summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--system/easy-kernel/0000-README40
-rw-r--r--system/easy-kernel/0100-linux-6.6.58.patch506489
-rw-r--r--system/easy-kernel/0100-linux-6.6.6.patch64421
-rw-r--r--system/easy-kernel/0120-XATTR_USER_PREFIX.patch31
-rw-r--r--system/easy-kernel/0204-amd-deserialised-MSR-access.patch134
-rw-r--r--system/easy-kernel/0204-sparc-warray-fix.patch17
-rw-r--r--system/easy-kernel/0208-gcc14-objtool-fix.patch41
-rw-r--r--system/easy-kernel/0250-expose-per-process-ksm.patch4
-rw-r--r--system/easy-kernel/0252-rectify-ksm-inheritance.patch1059
-rw-r--r--system/easy-kernel/0300-correct-max98388-includes.patch39
-rw-r--r--system/easy-kernel/0300-tmp513-regression-fix.patch30
-rw-r--r--system/easy-kernel/0302-i915-gcc14-fix.patch37
-rw-r--r--system/easy-kernel/0302-iwlwifi-rfkill-fix.patch170
-rw-r--r--system/easy-kernel/0304-fix-powerbook6-5-audio.patch (renamed from system/easy-kernel/0210-fix-powerbook6-5-audio.patch)0
-rw-r--r--system/easy-kernel/0502-gcc9-kcflags.patch68
-rw-r--r--system/easy-kernel/0504-update-zstd-to-v1_5_6.patch (renamed from system/easy-kernel/0504-update-zstd-to-v1_5_5.patch)6745
-rw-r--r--system/easy-kernel/1000-version.patch6
-rw-r--r--system/easy-kernel/APKBUILD58
-rw-r--r--system/easy-kernel/config-aarch6473
-rw-r--r--system/easy-kernel/config-armv744
-rw-r--r--system/easy-kernel/config-m68k40
-rw-r--r--system/easy-kernel/config-pmmx60
-rw-r--r--system/easy-kernel/config-ppc44
-rw-r--r--system/easy-kernel/config-ppc6449
-rw-r--r--system/easy-kernel/config-sparc6443
-rw-r--r--system/easy-kernel/config-x86_6466
-rw-r--r--system/easy-kernel/no-require-gnu-tar.patch4
27 files changed, 513869 insertions, 65943 deletions
diff --git a/system/easy-kernel/0000-README b/system/easy-kernel/0000-README
index ba2ebb04d..67d75ca60 100644
--- a/system/easy-kernel/0000-README
+++ b/system/easy-kernel/0000-README
@@ -28,10 +28,6 @@ Patchset Sequence
Individual Patch Descriptions (0120-1000):
--------------------------------------------------------------------------
-File: 0120-XATTR_USER_PREFIX.patch
-From: Anthony G. Basile <blueness@gentoo.org>
-Desc: Support for namespace user.pax.* on tmpfs.
-
File: 0122-link-security-restrictions.patch
From: Ben Hutchings <ben@decadent.org.uk>
Desc: Enable link security restrictions by default.
@@ -52,18 +48,22 @@ File: 0202-parisc-disable-prctl.patch
From: Helge Deller <deller@gmx.de>
Desc: Disables prctl on PA-RISC/HPPA due to this platform needing executable stacks.
-File: 0204-amd-deserialised-MSR-access.patch
-From: Borislav Petkov <bp@alien8.de>
-Desc: Reduces performance penalty on AMD64 processors (Opteron, K8, Athlon64, Sempron) by removing unnecessary synchronisation barrier.
+File: 0204-sparc-warray-fix.patch
+From: Gustavo A. R. Silva <gustavoars@kernel.org>
+Desc: Fixes issues with SPARC compilation due to -Warray-bounds
-File: 0210-fix-powerbook-6-5-audio.patch
-From: Horst Burkhardt <horst@burkhardt.com.au>
-Desc: Enables audio in PowerBook6,4 and PowerBook6,5 iBooks on PowerPC.
+File: 0208-gcc14-objtool-fix.patch
+From: Sam James <sam@gentoo.org>
+Desc: Fixes to calloc to make gcc14 not chuck a -Walloc hissy fit.
File: 0250-expose-per-process-ksm.patch
From: Oleksandr Natalenko <oleksandr@natalenko.name>
Desc: Provides a non-prctl interface for per-process KSM to support uksmd.
+File: 0252-rectify-ksm-inheritance.patch
+From: Stefan Roesch <shr@devkernel.io>
+Desc: Extends prctl interface for per-process KSM to allow proper inheritance of KSM state.
+
File: 0260-reduce-swappiness.patch
From: Horst Burkhardt <horst@burkhardt.com.au> - originally from -ck patchset by Con Kolivas
Desc: Reduces the proclivity of the kernel to page out memory contents to disk.
@@ -72,13 +72,17 @@ File: 0262-boot-order.patch
From: Peter Jung <admin@ptr1337.dev>
Desc: Changes graphics bringup to occur after ATA initialisation, saving some time at boot.
-File: 0300-tmp513-regression-fix.patch
-From: Mike Pagano <mpagano@gentoo.org>
-Desc: Fix to regression in Kconfig from kernel 5.5.6 to enable tmp513 hardware monitoring module to build.
+File: 0300-correct-max98388-includes.patch
+From: Linus Walleij <linus.walleij@linaro.org>
+Desc: Fixes includes in MAX98388 ASoC ALSA driver.
+
+File: 0302-i915-gcc14-fix.patch
+From: Sam James <sam@gentoo.org>
+Desc: Adjusts alloc size in drm/i915 to prevent gcc14 hissy fit.
-File: 0302-iwlwifi-rfkill-fix.patch
-From: Johannes Berg <johannes.berg@intel.com>
-Desc: Fix issue where rfkill results in kernel lock-up.
+File: 0304-fix-powerbook-6-5-audio.patch
+From: Horst Burkhardt <horst@burkhardt.com.au>
+Desc: Enables audio in PowerBook6,4 and PowerBook6,5 iBooks on PowerPC.
File: 0400-reduce-pageblock-size-nonhugetlb.patch
From: Sultan Alsawaf <sultan@kerneltoast.com>
@@ -100,9 +104,9 @@ File: 0502-gcc9-kcflags.patch
From: graysky <therealgraysky@proton.me>
Desc: Enables gcc >=9.1 optimizations for the very latest x86_64 CPUs.
-File: 0504-update-zstd-to-v1_5_5.patch
+File: 0504-update-zstd-to-v1_5_6.patch
From: Piotr Gorski <lucjan.lucjanov@gmail.com>
-Desc: Updates kernel Zstandard compression code to upstream 1.5.5 from Meta.
+Desc: Updates kernel Zstandard compression code to upstream 1.5.6 from Meta.
File: 1000-version.patch
From: Horst Burkhardt <horst@burkhardt.com.au>
diff --git a/system/easy-kernel/0100-linux-6.6.58.patch b/system/easy-kernel/0100-linux-6.6.58.patch
new file mode 100644
index 000000000..83409c5f7
--- /dev/null
+++ b/system/easy-kernel/0100-linux-6.6.58.patch
@@ -0,0 +1,506489 @@
+diff --git a/.gitignore b/.gitignore
+index 0bbae167bf93e9..d1a8ab3f98aaf1 100644
+--- a/.gitignore
++++ b/.gitignore
+@@ -135,7 +135,6 @@ GTAGS
+ # id-utils files
+ ID
+
+-*.orig
+ *~
+ \#*#
+
+diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
+index 1fe9a553c37b71..f0025d1c3d5acd 100644
+--- a/Documentation/ABI/stable/sysfs-block
++++ b/Documentation/ABI/stable/sysfs-block
+@@ -101,6 +101,16 @@ Description:
+ devices that support receiving integrity metadata.
+
+
++What: /sys/block/<disk>/partscan
++Date: May 2024
++Contact: Christoph Hellwig <hch@lst.de>
++Description:
++ The /sys/block/<disk>/partscan files reports if partition
++ scanning is enabled for the disk. It returns "1" if partition
++ scanning is enabled, or "0" if not. The value type is a 32-bit
++ unsigned integer, but only "0" and "1" are valid values.
++
++
+ What: /sys/block/<disk>/<partition>/alignment_offset
+ Date: April 2009
+ Contact: Martin K. Petersen <martin.petersen@oracle.com>
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
+index 31dbb390573ff2..c431f0a13cf502 100644
+--- a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
++++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
+@@ -3,7 +3,7 @@ KernelVersion:
+ Contact: linux-iio@vger.kernel.org
+ Description:
+ Reading this returns the valid values that can be written to the
+- on_altvoltage0_mode attribute:
++ filter_mode attribute:
+
+ - auto -> Adjust bandpass filter to track changes in input clock rate.
+ - manual -> disable/unregister the clock rate notifier / input clock tracking.
+diff --git a/Documentation/ABI/testing/sysfs-bus-optee-devices b/Documentation/ABI/testing/sysfs-bus-optee-devices
+index 0f58701367b66a..af31e5a22d89fc 100644
+--- a/Documentation/ABI/testing/sysfs-bus-optee-devices
++++ b/Documentation/ABI/testing/sysfs-bus-optee-devices
+@@ -6,3 +6,12 @@ Description:
+ OP-TEE bus provides reference to registered drivers under this directory. The <uuid>
+ matches Trusted Application (TA) driver and corresponding TA in secure OS. Drivers
+ are free to create needed API under optee-ta-<uuid> directory.
++
++What: /sys/bus/tee/devices/optee-ta-<uuid>/need_supplicant
++Date: November 2023
++KernelVersion: 6.7
++Contact: op-tee@lists.trustedfirmware.org
++Description:
++ Allows to distinguish whether an OP-TEE based TA/device requires user-space
++ tee-supplicant to function properly or not. This attribute will be present for
++ devices which depend on tee-supplicant to be running.
+diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
+index 5e6b74f304062a..1e7e0bb4c14ecb 100644
+--- a/Documentation/ABI/testing/sysfs-class-devfreq
++++ b/Documentation/ABI/testing/sysfs-class-devfreq
+@@ -52,6 +52,9 @@ Description:
+
+ echo 0 > /sys/class/devfreq/.../trans_stat
+
++ If the transition table is bigger than PAGE_SIZE, reading
++ this will return an -EFBIG error.
++
+ What: /sys/class/devfreq/.../available_frequencies
+ Date: October 2012
+ Contact: Nishanth Menon <nm@ti.com>
+diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
+index b2ff0012c0f2b8..2e24ac3bd7efa4 100644
+--- a/Documentation/ABI/testing/sysfs-class-led
++++ b/Documentation/ABI/testing/sysfs-class-led
+@@ -59,15 +59,6 @@ Description:
+ brightness. Reading this file when no hw brightness change
+ event has happened will return an ENODATA error.
+
+-What: /sys/class/leds/<led>/color
+-Date: June 2023
+-KernelVersion: 6.5
+-Description:
+- Color of the LED.
+-
+- This is a read-only file. Reading this file returns the color
+- of the LED as a string (e.g: "red", "green", "multicolor").
+-
+ What: /sys/class/leds/<led>/trigger
+ Date: March 2006
+ KernelVersion: 2.6.17
+diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
+index 906ff3ca928ac1..5bff64d256c207 100644
+--- a/Documentation/ABI/testing/sysfs-class-net-queues
++++ b/Documentation/ABI/testing/sysfs-class-net-queues
+@@ -1,4 +1,4 @@
+-What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
++What: /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
+ Date: March 2010
+ KernelVersion: 2.6.35
+ Contact: netdev@vger.kernel.org
+@@ -8,7 +8,7 @@ Description:
+ network device queue. Possible values depend on the number
+ of available CPU(s) in the system.
+
+-What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
++What: /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
+ Date: April 2010
+ KernelVersion: 2.6.35
+ Contact: netdev@vger.kernel.org
+@@ -16,7 +16,7 @@ Description:
+ Number of Receive Packet Steering flows being currently
+ processed by this particular network device receive queue.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
++What: /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
+ Date: November 2011
+ KernelVersion: 3.3
+ Contact: netdev@vger.kernel.org
+@@ -24,7 +24,7 @@ Description:
+ Indicates the number of transmit timeout events seen by this
+ network interface transmit queue.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
++What: /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
+ Date: March 2015
+ KernelVersion: 4.1
+ Contact: netdev@vger.kernel.org
+@@ -32,7 +32,7 @@ Description:
+ A Mbps max-rate set for the queue, a value of zero means disabled,
+ default is disabled.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
++What: /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
+ Date: November 2010
+ KernelVersion: 2.6.38
+ Contact: netdev@vger.kernel.org
+@@ -42,7 +42,7 @@ Description:
+ network device transmit queue. Possible values depend on the
+ number of available CPU(s) in the system.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
++What: /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
+ Date: June 2018
+ KernelVersion: 4.18.0
+ Contact: netdev@vger.kernel.org
+@@ -53,7 +53,7 @@ Description:
+ number of available receive queue(s) in the network device.
+ Default is disabled.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
++What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
+ Date: November 2011
+ KernelVersion: 3.3
+ Contact: netdev@vger.kernel.org
+@@ -62,7 +62,7 @@ Description:
+ of this particular network device transmit queue.
+ Default value is 1000.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
++What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
+ Date: November 2011
+ KernelVersion: 3.3
+ Contact: netdev@vger.kernel.org
+@@ -70,7 +70,7 @@ Description:
+ Indicates the number of bytes (objects) in flight on this
+ network device transmit queue.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
++What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
+ Date: November 2011
+ KernelVersion: 3.3
+ Contact: netdev@vger.kernel.org
+@@ -79,7 +79,7 @@ Description:
+ on this network device transmit queue. This value is clamped
+ to be within the bounds defined by limit_max and limit_min.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
++What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
+ Date: November 2011
+ KernelVersion: 3.3
+ Contact: netdev@vger.kernel.org
+@@ -88,7 +88,7 @@ Description:
+ queued on this network device transmit queue. See
+ include/linux/dynamic_queue_limits.h for the default value.
+
+-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
++What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
+ Date: November 2011
+ KernelVersion: 3.3
+ Contact: netdev@vger.kernel.org
+diff --git a/Documentation/ABI/testing/sysfs-class-net-statistics b/Documentation/ABI/testing/sysfs-class-net-statistics
+index 55db27815361b2..53e508c6936a51 100644
+--- a/Documentation/ABI/testing/sysfs-class-net-statistics
++++ b/Documentation/ABI/testing/sysfs-class-net-statistics
+@@ -1,4 +1,4 @@
+-What: /sys/class/<iface>/statistics/collisions
++What: /sys/class/net/<iface>/statistics/collisions
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -6,7 +6,7 @@ Description:
+ Indicates the number of collisions seen by this network device.
+ This value might not be relevant with all MAC layers.
+
+-What: /sys/class/<iface>/statistics/multicast
++What: /sys/class/net/<iface>/statistics/multicast
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -14,7 +14,7 @@ Description:
+ Indicates the number of multicast packets received by this
+ network device.
+
+-What: /sys/class/<iface>/statistics/rx_bytes
++What: /sys/class/net/<iface>/statistics/rx_bytes
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -23,7 +23,7 @@ Description:
+ See the network driver for the exact meaning of when this
+ value is incremented.
+
+-What: /sys/class/<iface>/statistics/rx_compressed
++What: /sys/class/net/<iface>/statistics/rx_compressed
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -32,7 +32,7 @@ Description:
+ network device. This value might only be relevant for interfaces
+ that support packet compression (e.g: PPP).
+
+-What: /sys/class/<iface>/statistics/rx_crc_errors
++What: /sys/class/net/<iface>/statistics/rx_crc_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -41,7 +41,7 @@ Description:
+ by this network device. Note that the specific meaning might
+ depend on the MAC layer used by the interface.
+
+-What: /sys/class/<iface>/statistics/rx_dropped
++What: /sys/class/net/<iface>/statistics/rx_dropped
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -51,7 +51,7 @@ Description:
+ packet processing. See the network driver for the exact
+ meaning of this value.
+
+-What: /sys/class/<iface>/statistics/rx_errors
++What: /sys/class/net/<iface>/statistics/rx_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -59,7 +59,7 @@ Description:
+ Indicates the number of receive errors on this network device.
+ See the network driver for the exact meaning of this value.
+
+-What: /sys/class/<iface>/statistics/rx_fifo_errors
++What: /sys/class/net/<iface>/statistics/rx_fifo_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -68,7 +68,7 @@ Description:
+ network device. See the network driver for the exact
+ meaning of this value.
+
+-What: /sys/class/<iface>/statistics/rx_frame_errors
++What: /sys/class/net/<iface>/statistics/rx_frame_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -78,7 +78,7 @@ Description:
+ on the MAC layer protocol used. See the network driver for
+ the exact meaning of this value.
+
+-What: /sys/class/<iface>/statistics/rx_length_errors
++What: /sys/class/net/<iface>/statistics/rx_length_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -87,7 +87,7 @@ Description:
+ error, oversized or undersized. See the network driver for the
+ exact meaning of this value.
+
+-What: /sys/class/<iface>/statistics/rx_missed_errors
++What: /sys/class/net/<iface>/statistics/rx_missed_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -96,7 +96,7 @@ Description:
+ due to lack of capacity in the receive side. See the network
+ driver for the exact meaning of this value.
+
+-What: /sys/class/<iface>/statistics/rx_nohandler
++What: /sys/class/net/<iface>/statistics/rx_nohandler
+ Date: February 2016
+ KernelVersion: 4.6
+ Contact: netdev@vger.kernel.org
+@@ -104,7 +104,7 @@ Description:
+ Indicates the number of received packets that were dropped on
+ an inactive device by the network core.
+
+-What: /sys/class/<iface>/statistics/rx_over_errors
++What: /sys/class/net/<iface>/statistics/rx_over_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -114,7 +114,7 @@ Description:
+ (e.g: larger than MTU). See the network driver for the exact
+ meaning of this value.
+
+-What: /sys/class/<iface>/statistics/rx_packets
++What: /sys/class/net/<iface>/statistics/rx_packets
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -122,7 +122,7 @@ Description:
+ Indicates the total number of good packets received by this
+ network device.
+
+-What: /sys/class/<iface>/statistics/tx_aborted_errors
++What: /sys/class/net/<iface>/statistics/tx_aborted_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -132,7 +132,7 @@ Description:
+ a medium collision). See the network driver for the exact
+ meaning of this value.
+
+-What: /sys/class/<iface>/statistics/tx_bytes
++What: /sys/class/net/<iface>/statistics/tx_bytes
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -143,7 +143,7 @@ Description:
+ transmitted packets or all packets that have been queued for
+ transmission.
+
+-What: /sys/class/<iface>/statistics/tx_carrier_errors
++What: /sys/class/net/<iface>/statistics/tx_carrier_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -152,7 +152,7 @@ Description:
+ because of carrier errors (e.g: physical link down). See the
+ network driver for the exact meaning of this value.
+
+-What: /sys/class/<iface>/statistics/tx_compressed
++What: /sys/class/net/<iface>/statistics/tx_compressed
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -161,7 +161,7 @@ Description:
+ this might only be relevant for devices that support
+ compression (e.g: PPP).
+
+-What: /sys/class/<iface>/statistics/tx_dropped
++What: /sys/class/net/<iface>/statistics/tx_dropped
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -170,7 +170,7 @@ Description:
+ See the driver for the exact reasons as to why the packets were
+ dropped.
+
+-What: /sys/class/<iface>/statistics/tx_errors
++What: /sys/class/net/<iface>/statistics/tx_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -179,7 +179,7 @@ Description:
+ a network device. See the driver for the exact reasons as to
+ why the packets were dropped.
+
+-What: /sys/class/<iface>/statistics/tx_fifo_errors
++What: /sys/class/net/<iface>/statistics/tx_fifo_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -188,7 +188,7 @@ Description:
+ FIFO error. See the driver for the exact reasons as to why the
+ packets were dropped.
+
+-What: /sys/class/<iface>/statistics/tx_heartbeat_errors
++What: /sys/class/net/<iface>/statistics/tx_heartbeat_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -197,7 +197,7 @@ Description:
+ reported as heartbeat errors. See the driver for the exact
+ reasons as to why the packets were dropped.
+
+-What: /sys/class/<iface>/statistics/tx_packets
++What: /sys/class/net/<iface>/statistics/tx_packets
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+@@ -206,7 +206,7 @@ Description:
+ device. See the driver for whether this reports the number of all
+ attempted or successful transmissions.
+
+-What: /sys/class/<iface>/statistics/tx_window_errors
++What: /sys/class/net/<iface>/statistics/tx_window_errors
+ Date: April 2005
+ KernelVersion: 2.6.12
+ Contact: netdev@vger.kernel.org
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index 7ecd5c8161a610..657bdee28d845a 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -519,6 +519,7 @@ What: /sys/devices/system/cpu/vulnerabilities
+ /sys/devices/system/cpu/vulnerabilities/mds
+ /sys/devices/system/cpu/vulnerabilities/meltdown
+ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
++ /sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
+ /sys/devices/system/cpu/vulnerabilities/retbleed
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+@@ -564,7 +565,8 @@ Description: Control Symmetric Multi Threading (SMT)
+ ================ =========================================
+
+ If control status is "forceoff" or "notsupported" writes
+- are rejected.
++ are rejected. Note that enabling SMT on PowerPC skips
++ offline cores.
+
+ What: /sys/devices/system/cpu/cpuX/power/energy_perf_bias
+ Date: March 2019
+diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat
+index ef6d6c57105efb..96834d103a09e2 100644
+--- a/Documentation/ABI/testing/sysfs-driver-qat
++++ b/Documentation/ABI/testing/sysfs-driver-qat
+@@ -29,6 +29,8 @@ Description: (RW) Reports the current configuration of the QAT device.
+ services
+ * asym;sym: identical to sym;asym
+ * dc: the device is configured for running compression services
++ * dcc: identical to dc but enables the dc chaining feature,
++ hash then compression. If this is not required chose dc
+ * sym: the device is configured for running symmetric crypto
+ services
+ * asym: the device is configured for running asymmetric crypto
+diff --git a/Documentation/admin-guide/abi-obsolete.rst b/Documentation/admin-guide/abi-obsolete.rst
+index d095867899c59a..594e697aa1b2f4 100644
+--- a/Documentation/admin-guide/abi-obsolete.rst
++++ b/Documentation/admin-guide/abi-obsolete.rst
+@@ -7,5 +7,5 @@ marked to be removed at some later point in time.
+ The description of the interface will document the reason why it is
+ obsolete and when it can be expected to be removed.
+
+-.. kernel-abi:: $srctree/Documentation/ABI/obsolete
++.. kernel-abi:: ABI/obsolete
+ :rst:
+diff --git a/Documentation/admin-guide/abi-removed.rst b/Documentation/admin-guide/abi-removed.rst
+index f7e9e43023c136..f9e000c81828e5 100644
+--- a/Documentation/admin-guide/abi-removed.rst
++++ b/Documentation/admin-guide/abi-removed.rst
+@@ -1,5 +1,5 @@
+ ABI removed symbols
+ ===================
+
+-.. kernel-abi:: $srctree/Documentation/ABI/removed
++.. kernel-abi:: ABI/removed
+ :rst:
+diff --git a/Documentation/admin-guide/abi-stable.rst b/Documentation/admin-guide/abi-stable.rst
+index 70490736e0d301..fc3361d847b123 100644
+--- a/Documentation/admin-guide/abi-stable.rst
++++ b/Documentation/admin-guide/abi-stable.rst
+@@ -10,5 +10,5 @@ for at least 2 years.
+ Most interfaces (like syscalls) are expected to never change and always
+ be available.
+
+-.. kernel-abi:: $srctree/Documentation/ABI/stable
++.. kernel-abi:: ABI/stable
+ :rst:
+diff --git a/Documentation/admin-guide/abi-testing.rst b/Documentation/admin-guide/abi-testing.rst
+index b205b16a72d08a..19767926b34407 100644
+--- a/Documentation/admin-guide/abi-testing.rst
++++ b/Documentation/admin-guide/abi-testing.rst
+@@ -16,5 +16,5 @@ Programs that use these interfaces are strongly encouraged to add their
+ name to the description of these interfaces, so that the kernel
+ developers can easily notify them if any changes occur.
+
+-.. kernel-abi:: $srctree/Documentation/ABI/testing
++.. kernel-abi:: ABI/testing
+ :rst:
+diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst
+index 5f936b4b601881..3de599cf0779a9 100644
+--- a/Documentation/admin-guide/cifs/usage.rst
++++ b/Documentation/admin-guide/cifs/usage.rst
+@@ -722,40 +722,26 @@ Configuration pseudo-files:
+ ======================= =======================================================
+ SecurityFlags Flags which control security negotiation and
+ also packet signing. Authentication (may/must)
+- flags (e.g. for NTLM and/or NTLMv2) may be combined with
++ flags (e.g. for NTLMv2) may be combined with
+ the signing flags. Specifying two different password
+ hashing mechanisms (as "must use") on the other hand
+ does not make much sense. Default flags are::
+
+- 0x07007
+-
+- (NTLM, NTLMv2 and packet signing allowed). The maximum
+- allowable flags if you want to allow mounts to servers
+- using weaker password hashes is 0x37037 (lanman,
+- plaintext, ntlm, ntlmv2, signing allowed). Some
+- SecurityFlags require the corresponding menuconfig
+- options to be enabled. Enabling plaintext
+- authentication currently requires also enabling
+- lanman authentication in the security flags
+- because the cifs module only supports sending
+- laintext passwords using the older lanman dialect
+- form of the session setup SMB. (e.g. for authentication
+- using plain text passwords, set the SecurityFlags
+- to 0x30030)::
++ 0x00C5
++
++ (NTLMv2 and packet signing allowed). Some SecurityFlags
++ may require enabling a corresponding menuconfig option.
+
+ may use packet signing 0x00001
+ must use packet signing 0x01001
+- may use NTLM (most common password hash) 0x00002
+- must use NTLM 0x02002
+ may use NTLMv2 0x00004
+ must use NTLMv2 0x04004
+- may use Kerberos security 0x00008
+- must use Kerberos 0x08008
+- may use lanman (weak) password hash 0x00010
+- must use lanman password hash 0x10010
+- may use plaintext passwords 0x00020
+- must use plaintext passwords 0x20020
+- (reserved for future packet encryption) 0x00040
++ may use Kerberos security (krb5) 0x00008
++ must use Kerberos 0x08008
++ may use NTLMSSP 0x00080
++ must use NTLMSSP 0x80080
++ seal (packet encryption) 0x00040
++ must seal 0x40040
+
+ cifsFYI If set to non-zero value, additional debug information
+ will be logged to the system error log. This field
+diff --git a/Documentation/admin-guide/features.rst b/Documentation/admin-guide/features.rst
+index 8c167082a84f9e..7651eca38227d0 100644
+--- a/Documentation/admin-guide/features.rst
++++ b/Documentation/admin-guide/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features
++.. kernel-feat:: features
+diff --git a/Documentation/admin-guide/hw-vuln/core-scheduling.rst b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+index cf1eeefdfc32f3..a92e10ec402e7d 100644
+--- a/Documentation/admin-guide/hw-vuln/core-scheduling.rst
++++ b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+@@ -67,8 +67,8 @@ arg4:
+ will be performed for all tasks in the task group of ``pid``.
+
+ arg5:
+- userspace pointer to an unsigned long for storing the cookie returned by
+- ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
++ userspace pointer to an unsigned long long for storing the cookie returned
++ by ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
+
+ In order for a process to push a cookie to, or pull a cookie from a process, it
+ is required to have the ptrace access mode: `PTRACE_MODE_READ_REALCREDS` to the
+diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
+index de99caabf65a3f..ff0b440ef2dc90 100644
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -21,3 +21,4 @@ are configurable at compile, boot or run time.
+ cross-thread-rsb
+ srso
+ gather_data_sampling
++ reg-file-data-sampling
+diff --git a/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst b/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
+new file mode 100644
+index 00000000000000..0585d02b9a6cbc
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
+@@ -0,0 +1,104 @@
++==================================
++Register File Data Sampling (RFDS)
++==================================
++
++Register File Data Sampling (RFDS) is a microarchitectural vulnerability that
++only affects Intel Atom parts(also branded as E-cores). RFDS may allow
++a malicious actor to infer data values previously used in floating point
++registers, vector registers, or integer registers. RFDS does not provide the
++ability to choose which data is inferred. CVE-2023-28746 is assigned to RFDS.
++
++Affected Processors
++===================
++Below is the list of affected Intel processors [#f1]_:
++
++ =================== ============
++ Common name Family_Model
++ =================== ============
++ ATOM_GOLDMONT 06_5CH
++ ATOM_GOLDMONT_D 06_5FH
++ ATOM_GOLDMONT_PLUS 06_7AH
++ ATOM_TREMONT_D 06_86H
++ ATOM_TREMONT 06_96H
++ ALDERLAKE 06_97H
++ ALDERLAKE_L 06_9AH
++ ATOM_TREMONT_L 06_9CH
++ RAPTORLAKE 06_B7H
++ RAPTORLAKE_P 06_BAH
++ ATOM_GRACEMONT 06_BEH
++ RAPTORLAKE_S 06_BFH
++ =================== ============
++
++As an exception to this table, Intel Xeon E family parts ALDERLAKE(06_97H) and
++RAPTORLAKE(06_B7H) codenamed Catlow are not affected. They are reported as
++vulnerable in Linux because they share the same family/model with an affected
++part. Unlike their affected counterparts, they do not enumerate RFDS_CLEAR or
++CPUID.HYBRID. This information could be used to distinguish between the
++affected and unaffected parts, but it is deemed not worth adding complexity as
++the reporting is fixed automatically when these parts enumerate RFDS_NO.
++
++Mitigation
++==========
++Intel released a microcode update that enables software to clear sensitive
++information using the VERW instruction. Like MDS, RFDS deploys the same
++mitigation strategy to force the CPU to clear the affected buffers before an
++attacker can extract the secrets. This is achieved by using the otherwise
++unused and obsolete VERW instruction in combination with a microcode update.
++The microcode clears the affected CPU buffers when the VERW instruction is
++executed.
++
++Mitigation points
++-----------------
++VERW is executed by the kernel before returning to user space, and by KVM
++before VMentry. None of the affected cores support SMT, so VERW is not required
++at C-state transitions.
++
++New bits in IA32_ARCH_CAPABILITIES
++----------------------------------
++Newer processors and microcode update on existing affected processors added new
++bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate
++vulnerability and mitigation capability:
++
++- Bit 27 - RFDS_NO - When set, processor is not affected by RFDS.
++- Bit 28 - RFDS_CLEAR - When set, processor is affected by RFDS, and has the
++ microcode that clears the affected buffers on VERW execution.
++
++Mitigation control on the kernel command line
++---------------------------------------------
++The kernel command line allows to control RFDS mitigation at boot time with the
++parameter "reg_file_data_sampling=". The valid arguments are:
++
++ ========== =================================================================
++ on If the CPU is vulnerable, enable mitigation; CPU buffer clearing
++ on exit to userspace and before entering a VM.
++ off Disables mitigation.
++ ========== =================================================================
++
++Mitigation default is selected by CONFIG_MITIGATION_RFDS.
++
++Mitigation status information
++-----------------------------
++The Linux kernel provides a sysfs interface to enumerate the current
++vulnerability status of the system: whether the system is vulnerable, and
++which mitigations are active. The relevant sysfs file is:
++
++ /sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
++
++The possible values in this file are:
++
++ .. list-table::
++
++ * - 'Not affected'
++ - The processor is not vulnerable
++ * - 'Vulnerable'
++ - The processor is vulnerable, but no mitigation enabled
++ * - 'Vulnerable: No microcode'
++ - The processor is vulnerable but microcode is not updated.
++ * - 'Mitigation: Clear Register File'
++ - The processor is vulnerable and the CPU buffer clearing mitigation is
++ enabled.
++
++References
++----------
++.. [#f1] Affected Processors
++ https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index 32a8893e561776..e0a1be97fa7598 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -138,11 +138,10 @@ associated with the source address of the indirect branch. Specifically,
+ the BHB might be shared across privilege levels even in the presence of
+ Enhanced IBRS.
+
+-Currently the only known real-world BHB attack vector is via
+-unprivileged eBPF. Therefore, it's highly recommended to not enable
+-unprivileged eBPF, especially when eIBRS is used (without retpolines).
+-For a full mitigation against BHB attacks, it's recommended to use
+-retpolines (or eIBRS combined with retpolines).
++Previously the only known real-world BHB attack vector was via unprivileged
++eBPF. Further research has found attacks that don't require unprivileged eBPF.
++For a full mitigation against BHB attacks it is recommended to set BHI_DIS_S or
++use the BHB clearing sequence.
+
+ Attack scenarios
+ ----------------
+@@ -430,6 +429,23 @@ The possible values in this file are:
+ 'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
+ =========================== =======================================================
+
++ - Branch History Injection (BHI) protection status:
++
++.. list-table::
++
++ * - BHI: Not affected
++ - System is not affected
++ * - BHI: Retpoline
++ - System is protected by retpoline
++ * - BHI: BHI_DIS_S
++ - System is protected by BHI_DIS_S
++ * - BHI: SW loop, KVM SW loop
++ - System is protected by software clearing sequence
++ * - BHI: Vulnerable
++ - System is vulnerable to BHI
++ * - BHI: Vulnerable, KVM: SW loop
++ - System is vulnerable; KVM is protected by software clearing sequence
++
+ Full mitigation might require a microcode update from the CPU
+ vendor. When the necessary microcode is not available, the kernel will
+ report vulnerability.
+@@ -484,7 +500,11 @@ Spectre variant 2
+
+ Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
+ boot, by setting the IBRS bit, and they're automatically protected against
+- Spectre v2 variant attacks.
++ some Spectre v2 variant attacks. The BHB can still influence the choice of
++ indirect branch predictor entry, and although branch predictor entries are
++ isolated between modes when eIBRS is enabled, the BHB itself is not isolated
++ between modes. Systems which support BHI_DIS_S will set it to protect against
++ BHI attacks.
+
+ On Intel's enhanced IBRS systems, this includes cross-thread branch target
+ injections on SMT systems (STIBP). In other words, Intel eIBRS enables
+@@ -638,6 +658,18 @@ kernel command line.
+ spectre_v2=off. Spectre variant 1 mitigations
+ cannot be disabled.
+
++ spectre_bhi=
++
++ [X86] Control mitigation of Branch History Injection
++ (BHI) vulnerability. This setting affects the deployment
++ of the HW BHI control and the SW BHB clearing sequence.
++
++ on
++ (default) Enable the HW or SW mitigation as
++ needed.
++ off
++ Disable the mitigation.
++
+ For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
+
+ Mitigation selection guide
+diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
+index b6cfb51cb0b469..e715bfc09879a7 100644
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -46,12 +46,22 @@ The possible values in this file are:
+
+ The processor is not vulnerable
+
+- * 'Vulnerable: no microcode':
++* 'Vulnerable':
++
++ The processor is vulnerable and no mitigations have been applied.
++
++ * 'Vulnerable: No microcode':
+
+ The processor is vulnerable, no microcode extending IBPB
+ functionality to address the vulnerability has been applied.
+
+- * 'Mitigation: microcode':
++ * 'Vulnerable: Safe RET, no microcode':
++
++ The "Safe RET" mitigation (see below) has been applied to protect the
++ kernel, but the IBPB-extending microcode has not been applied. User
++ space tasks may still be vulnerable.
++
++ * 'Vulnerable: Microcode, no safe RET':
+
+ Extended IBPB functionality microcode patch has been applied. It does
+ not address User->Kernel and Guest->Host transitions protection but it
+@@ -72,11 +82,11 @@ The possible values in this file are:
+
+ (spec_rstack_overflow=microcode)
+
+- * 'Mitigation: safe RET':
++ * 'Mitigation: Safe RET':
+
+- Software-only mitigation. It complements the extended IBPB microcode
+- patch functionality by addressing User->Kernel and Guest->Host
+- transitions protection.
++ Combined microcode/software mitigation. It complements the
++ extended IBPB microcode patch functionality by addressing
++ User->Kernel and Guest->Host transitions protection.
+
+ Selected by default or by spec_rstack_overflow=safe-ret
+
+@@ -129,7 +139,7 @@ an indrect branch prediction barrier after having applied the required
+ microcode patch for one's system. This mitigation comes also at
+ a performance cost.
+
+-Mitigation: safe RET
++Mitigation: Safe RET
+ --------------------
+
+ The mitigation works by ensuring all RET instructions speculate to
+diff --git a/Documentation/admin-guide/kdump/vmcoreinfo.rst b/Documentation/admin-guide/kdump/vmcoreinfo.rst
+index 599e8d3bcbc318..9235cf4fbabff0 100644
+--- a/Documentation/admin-guide/kdump/vmcoreinfo.rst
++++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst
+@@ -172,7 +172,7 @@ variables.
+ Offset of the free_list's member. This value is used to compute the number
+ of free pages.
+
+-Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
++Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS].
+ The free_list represents a linked list of free page blocks.
+
+ (list_head, next|prev)
+@@ -189,8 +189,8 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
+ information. Makedumpfile gets the start address of the vmalloc region
+ from this.
+
+-(zone.free_area, MAX_ORDER + 1)
+--------------------------------
++(zone.free_area, NR_PAGE_ORDERS)
++--------------------------------
+
+ Free areas descriptor. User-space tools use this value to iterate the
+ free_area ranges. MAX_ORDER is used by the zone buddy allocator.
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 0a1731a0f0ef37..d83a3f47e20074 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -664,12 +664,6 @@
+ loops can be debugged more effectively on production
+ systems.
+
+- clocksource.max_cswd_read_retries= [KNL]
+- Number of clocksource_watchdog() retries due to
+- external delays before the clock will be marked
+- unstable. Defaults to two retries, that is,
+- three attempts to read the clock under test.
+-
+ clocksource.verify_n_cpus= [KNL]
+ Limit the number of CPUs checked for clocksources
+ marked with CLOCK_SOURCE_VERIFY_PERCPU that
+@@ -1133,6 +1127,26 @@
+ The filter can be disabled or changed to another
+ driver later using sysfs.
+
++ reg_file_data_sampling=
++ [X86] Controls mitigation for Register File Data
++ Sampling (RFDS) vulnerability. RFDS is a CPU
++ vulnerability which may allow userspace to infer
++ kernel data values previously stored in floating point
++ registers, vector registers, or integer registers.
++ RFDS only affects Intel Atom processors.
++
++ on: Turns ON the mitigation.
++ off: Turns OFF the mitigation.
++
++ This parameter overrides the compile time default set
++ by CONFIG_MITIGATION_RFDS. Mitigation cannot be
++ disabled when other VERW based mitigations (like MDS)
++ are enabled. In order to disable RFDS mitigation all
++ VERW based mitigations need to be disabled.
++
++ For details see:
++ Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
++
+ driver_async_probe= [KNL]
+ List of driver names to be probed asynchronously. *
+ matches with all driver names. If * is specified, the
+@@ -3249,9 +3263,7 @@
+
+ mem_encrypt= [X86-64] AMD Secure Memory Encryption (SME) control
+ Valid arguments: on, off
+- Default (depends on kernel configuration option):
+- on (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y)
+- off (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n)
++ Default: off
+ mem_encrypt=on: Activate SME
+ mem_encrypt=off: Do not activate SME
+
+@@ -3305,6 +3317,9 @@
+ arch-independent options, each of which is an
+ aggregation of existing arch-specific options.
+
++ Note, "mitigations" is supported if and only if the
++ kernel was built with CPU_MITIGATIONS=y.
++
+ off
+ Disable all optional CPU mitigations. This
+ improves system performance, but it may also
+@@ -3322,8 +3337,10 @@
+ nospectre_bhb [ARM64]
+ nospectre_v1 [X86,PPC]
+ nospectre_v2 [X86,PPC,S390,ARM64]
++ reg_file_data_sampling=off [X86]
+ retbleed=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
++ spectre_bhi=off [X86]
+ spectre_v2_user=off [X86]
+ srbds=off [X86,INTEL]
+ ssbd=force-off [ARM64]
+@@ -4622,6 +4639,16 @@
+ printk.time= Show timing data prefixed to each printk message line
+ Format: <bool> (1/Y/y=enable, 0/N/n=disable)
+
++ proc_mem.force_override= [KNL]
++ Format: {always | ptrace | never}
++ Traditionally /proc/pid/mem allows memory permissions to be
++ overridden without restrictions. This option may be set to
++ restrict that. Can be one of:
++ - 'always': traditional behavior always allows mem overrides.
++ - 'ptrace': only allow mem overrides for active ptracers.
++ - 'never': never allow mem overrides.
++ If not specified, default is the CONFIG_PROC_MEM_* choice.
++
+ processor.max_cstate= [HW,ACPI]
+ Limit processor to maximum C-state
+ max_cstate=9 overrides any DMI blacklist limit.
+@@ -4632,11 +4659,9 @@
+
+ profile= [KNL] Enable kernel profiling via /proc/profile
+ Format: [<profiletype>,]<number>
+- Param: <profiletype>: "schedule", "sleep", or "kvm"
++ Param: <profiletype>: "schedule" or "kvm"
+ [defaults to kernel profiling]
+ Param: "schedule" - profile schedule points.
+- Param: "sleep" - profile D-state sleeping (millisecs).
+- Requires CONFIG_SCHEDSTATS
+ Param: "kvm" - profile VM exits.
+ Param: <number> - step/bucket size as a power of 2 for
+ statistical time based profiling.
+@@ -5858,6 +5883,13 @@
+ This feature may be more efficiently disabled
+ using the csdlock_debug- kernel parameter.
+
++ smp.panic_on_ipistall= [KNL]
++ If a csd_lock_timeout extends for more than
++ the specified number of milliseconds, panic the
++ system. By default, let CSD-lock acquisition
++ take as long as they take. Specifying 300,000
++ for this value provides a 5-minute timeout.
++
+ smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
+ smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
+ smsc-ircc2.ircc_sir= [HW] SIR base I/O port
+@@ -5894,6 +5926,15 @@
+ sonypi.*= [HW] Sony Programmable I/O Control Device driver
+ See Documentation/admin-guide/laptops/sonypi.rst
+
++ spectre_bhi= [X86] Control mitigation of Branch History Injection
++ (BHI) vulnerability. This setting affects the
++ deployment of the HW BHI control and the SW BHB
++ clearing sequence.
++
++ on - (default) Enable the HW or SW mitigation
++ as needed.
++ off - Disable the mitigation.
++
+ spectre_v2= [X86] Control mitigation of Spectre variant 2
+ (indirect branch speculation) vulnerability.
+ The default operation protects the kernel from
+@@ -6817,6 +6858,9 @@
+ pause after every control message);
+ o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
+ delay after resetting its port);
++ p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT
++ (Reduce timeout of the SET_ADDRESS
++ request from 5000 ms to 500 ms);
+ Example: quirks=0781:5580:bk,0a5c:5834:gij
+
+ usbhid.mousepoll=
+diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
+index 8da1b728182733..9285f69f4f7355 100644
+--- a/Documentation/admin-guide/mm/damon/usage.rst
++++ b/Documentation/admin-guide/mm/damon/usage.rst
+@@ -389,7 +389,7 @@ pages of all memory cgroups except ``/having_care_already``.::
+ # # further filter out all cgroups except one at '/having_care_already'
+ echo memcg > 1/type
+ echo /having_care_already > 1/memcg_path
+- echo N > 1/matching
++ echo Y > 1/matching
+
+ Note that ``anon`` and ``memcg`` filters are currently supported only when
+ ``paddr`` `implementation <sysfs_contexts>` is being used.
+diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
+index 4877563241f3bd..5f1748f33d9a20 100644
+--- a/Documentation/admin-guide/sysctl/net.rst
++++ b/Documentation/admin-guide/sysctl/net.rst
+@@ -205,6 +205,11 @@ Will increase power usage.
+
+ Default: 0 (off)
+
++mem_pcpu_rsv
++------------
++
++Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
++
+ rmem_default
+ ------------
+
+diff --git a/Documentation/arch/arc/features.rst b/Documentation/arch/arc/features.rst
+index b793583d688a46..49ff446ff744cc 100644
+--- a/Documentation/arch/arc/features.rst
++++ b/Documentation/arch/arc/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features arc
++.. kernel-feat:: features arc
+diff --git a/Documentation/arch/arm/features.rst b/Documentation/arch/arm/features.rst
+index 7414ec03dd157c..0e76aaf68ecab2 100644
+--- a/Documentation/arch/arm/features.rst
++++ b/Documentation/arch/arm/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features arm
++.. kernel-feat:: features arm
+diff --git a/Documentation/arch/arm64/features.rst b/Documentation/arch/arm64/features.rst
+index dfa4cb3cd3efa5..03321f4309d0be 100644
+--- a/Documentation/arch/arm64/features.rst
++++ b/Documentation/arch/arm64/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features arm64
++.. kernel-feat:: features arm64
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index f47f63bcf67c91..3cf806733083c7 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -54,6 +54,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 |
+++----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
+ +----------------+-----------------+-----------------+-----------------------------+
+@@ -71,6 +73,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A510 | #3117295 | ARM64_ERRATUM_3117295 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A520 | #2966298 | ARM64_ERRATUM_2966298 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+@@ -117,32 +121,72 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A715 | #3456084 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-X2 | #3324338 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-X3 | #3324335 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Cortex-X925 | #3324334 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N1 | #1349291 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-N3 | #3456111 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | MMU-500 | #841119,826419 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | MMU-600 | #1076982,1209401| N/A |
+@@ -233,3 +277,12 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ASR | ASR8601 | #8601001 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
+++----------------+-----------------+-----------------+-----------------------------+
++| Microsoft | Azure Cobalt 100| #2139208 | ARM64_ERRATUM_2139208 |
+++----------------+-----------------+-----------------+-----------------------------+
++| Microsoft | Azure Cobalt 100| #2067961 | ARM64_ERRATUM_2067961 |
+++----------------+-----------------+-----------------+-----------------------------+
++| Microsoft | Azure Cobalt 100| #2253138 | ARM64_ERRATUM_2253138 |
+++----------------+-----------------+-----------------+-----------------------------+
++| Microsoft | Azure Cobalt 100| #3324339 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Documentation/arch/ia64/features.rst b/Documentation/arch/ia64/features.rst
+index d7226fdcf5f8c0..056838d2ab55c5 100644
+--- a/Documentation/arch/ia64/features.rst
++++ b/Documentation/arch/ia64/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features ia64
++.. kernel-feat:: features ia64
+diff --git a/Documentation/arch/loongarch/features.rst b/Documentation/arch/loongarch/features.rst
+index ebacade3ea454e..009f44c7951f8a 100644
+--- a/Documentation/arch/loongarch/features.rst
++++ b/Documentation/arch/loongarch/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features loongarch
++.. kernel-feat:: features loongarch
+diff --git a/Documentation/arch/m68k/features.rst b/Documentation/arch/m68k/features.rst
+index 5107a21194724e..de7f0ccf7fc8ed 100644
+--- a/Documentation/arch/m68k/features.rst
++++ b/Documentation/arch/m68k/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features m68k
++.. kernel-feat:: features m68k
+diff --git a/Documentation/arch/mips/features.rst b/Documentation/arch/mips/features.rst
+index 1973d729b29a98..6e0ffe3e735400 100644
+--- a/Documentation/arch/mips/features.rst
++++ b/Documentation/arch/mips/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features mips
++.. kernel-feat:: features mips
+diff --git a/Documentation/arch/nios2/features.rst b/Documentation/arch/nios2/features.rst
+index 8449e63f69b2b4..89913810ccb5a0 100644
+--- a/Documentation/arch/nios2/features.rst
++++ b/Documentation/arch/nios2/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features nios2
++.. kernel-feat:: features nios2
+diff --git a/Documentation/arch/openrisc/features.rst b/Documentation/arch/openrisc/features.rst
+index 3f7c40d219f2cc..bae2e25adfd642 100644
+--- a/Documentation/arch/openrisc/features.rst
++++ b/Documentation/arch/openrisc/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features openrisc
++.. kernel-feat:: features openrisc
+diff --git a/Documentation/arch/parisc/features.rst b/Documentation/arch/parisc/features.rst
+index 501d7c45003790..b3aa4d243b9362 100644
+--- a/Documentation/arch/parisc/features.rst
++++ b/Documentation/arch/parisc/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features parisc
++.. kernel-feat:: features parisc
+diff --git a/Documentation/arch/s390/features.rst b/Documentation/arch/s390/features.rst
+index 57c296a9d8f30d..2883dc95068173 100644
+--- a/Documentation/arch/s390/features.rst
++++ b/Documentation/arch/s390/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features s390
++.. kernel-feat:: features s390
+diff --git a/Documentation/arch/sh/features.rst b/Documentation/arch/sh/features.rst
+index f722af3b6c9934..fae48fe81e9bd0 100644
+--- a/Documentation/arch/sh/features.rst
++++ b/Documentation/arch/sh/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features sh
++.. kernel-feat:: features sh
+diff --git a/Documentation/arch/sparc/features.rst b/Documentation/arch/sparc/features.rst
+index c0c92468b0fe90..96835b6d598a1a 100644
+--- a/Documentation/arch/sparc/features.rst
++++ b/Documentation/arch/sparc/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features sparc
++.. kernel-feat:: features sparc
+diff --git a/Documentation/arch/x86/amd-memory-encryption.rst b/Documentation/arch/x86/amd-memory-encryption.rst
+index 934310ce725829..bace87cc9ca2ce 100644
+--- a/Documentation/arch/x86/amd-memory-encryption.rst
++++ b/Documentation/arch/x86/amd-memory-encryption.rst
+@@ -87,14 +87,14 @@ The state of SME in the Linux kernel can be documented as follows:
+ kernel is non-zero).
+
+ SME can also be enabled and activated in the BIOS. If SME is enabled and
+-activated in the BIOS, then all memory accesses will be encrypted and it will
+-not be necessary to activate the Linux memory encryption support. If the BIOS
+-merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
+-memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
+-by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
+-not enable SME, then Linux will not be able to activate memory encryption, even
+-if configured to do so by default or the mem_encrypt=on command line parameter
+-is specified.
++activated in the BIOS, then all memory accesses will be encrypted and it
++will not be necessary to activate the Linux memory encryption support.
++
++If the BIOS merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG),
++then memory encryption can be enabled by supplying mem_encrypt=on on the
++kernel command line. However, if BIOS does not enable SME, then Linux
++will not be able to activate memory encryption, even if configured to do
++so by default or the mem_encrypt=on command line parameter is specified.
+
+ Secure Nested Paging (SNP)
+ ==========================
+diff --git a/Documentation/arch/x86/features.rst b/Documentation/arch/x86/features.rst
+index b663f15053ce85..a33616346a388c 100644
+--- a/Documentation/arch/x86/features.rst
++++ b/Documentation/arch/x86/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features x86
++.. kernel-feat:: features x86
+diff --git a/Documentation/arch/x86/mds.rst b/Documentation/arch/x86/mds.rst
+index e73fdff62c0aa1..c58c72362911cd 100644
+--- a/Documentation/arch/x86/mds.rst
++++ b/Documentation/arch/x86/mds.rst
+@@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
+
+ mds_clear_cpu_buffers()
+
++Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
++Other than CFLAGS.ZF, this macro doesn't clobber any registers.
++
+ The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
+ (idle) transitions.
+
+@@ -138,17 +141,30 @@ Mitigation points
+
+ When transitioning from kernel to user space the CPU buffers are flushed
+ on affected CPUs when the mitigation is not disabled on the kernel
+- command line. The migitation is enabled through the static key
+- mds_user_clear.
+-
+- The mitigation is invoked in prepare_exit_to_usermode() which covers
+- all but one of the kernel to user space transitions. The exception
+- is when we return from a Non Maskable Interrupt (NMI), which is
+- handled directly in do_nmi().
+-
+- (The reason that NMI is special is that prepare_exit_to_usermode() can
+- enable IRQs. In NMI context, NMIs are blocked, and we don't want to
+- enable IRQs with NMIs blocked.)
++ command line. The mitigation is enabled through the feature flag
++ X86_FEATURE_CLEAR_CPU_BUF.
++
++ The mitigation is invoked just before transitioning to userspace after
++ user registers are restored. This is done to minimize the window in
++ which kernel data could be accessed after VERW e.g. via an NMI after
++ VERW.
++
++ **Corner case not handled**
++ Interrupts returning to kernel don't clear CPUs buffers since the
++ exit-to-user path is expected to do that anyways. But, there could be
++ a case when an NMI is generated in kernel after the exit-to-user path
++ has cleared the buffers. This case is not handled and NMI returning to
++ kernel don't clear CPU buffers because:
++
++ 1. It is rare to get an NMI after VERW, but before returning to userspace.
++ 2. For an unprivileged user, there is no known way to make that NMI
++ less rare or target it.
++ 3. It would take a large number of these precisely-timed NMIs to mount
++ an actual attack. There's presumably not enough bandwidth.
++ 4. The NMI in question occurs after a VERW, i.e. when user state is
++ restored and most interesting data is already scrubbed. Whats left
++ is only the data that NMI touches, and that may or may not be of
++ any interest.
+
+
+ 2. C-State transition
+diff --git a/Documentation/arch/xtensa/features.rst b/Documentation/arch/xtensa/features.rst
+index 6b92c7bfa19daa..28dcce1759be4b 100644
+--- a/Documentation/arch/xtensa/features.rst
++++ b/Documentation/arch/xtensa/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features xtensa
++.. kernel-feat:: features xtensa
+diff --git a/Documentation/bpf/map_lpm_trie.rst b/Documentation/bpf/map_lpm_trie.rst
+index 74d64a30f50073..f9cd579496c9ce 100644
+--- a/Documentation/bpf/map_lpm_trie.rst
++++ b/Documentation/bpf/map_lpm_trie.rst
+@@ -17,7 +17,7 @@ significant byte.
+
+ LPM tries may be created with a maximum prefix length that is a multiple
+ of 8, in the range from 8 to 2048. The key used for lookup and update
+-operations is a ``struct bpf_lpm_trie_key``, extended by
++operations is a ``struct bpf_lpm_trie_key_u8``, extended by
+ ``max_prefixlen/8`` bytes.
+
+ - For IPv4 addresses the data length is 4 bytes
+diff --git a/Documentation/cdrom/cdrom-standard.rst b/Documentation/cdrom/cdrom-standard.rst
+index 7964fe134277b8..6c1303cff159e1 100644
+--- a/Documentation/cdrom/cdrom-standard.rst
++++ b/Documentation/cdrom/cdrom-standard.rst
+@@ -217,7 +217,7 @@ current *struct* is::
+ int (*media_changed)(struct cdrom_device_info *, int);
+ int (*tray_move)(struct cdrom_device_info *, int);
+ int (*lock_door)(struct cdrom_device_info *, int);
+- int (*select_speed)(struct cdrom_device_info *, int);
++ int (*select_speed)(struct cdrom_device_info *, unsigned long);
+ int (*get_last_session) (struct cdrom_device_info *,
+ struct cdrom_multisession *);
+ int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
+@@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
+
+ ::
+
+- int select_speed(struct cdrom_device_info *cdi, int speed)
++ int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
+
+ Some CD-ROM drives are capable of changing their head-speed. There
+ are several reasons for changing the speed of a CD-ROM drive. Badly
+diff --git a/Documentation/conf.py b/Documentation/conf.py
+index d4fdf6a3875a83..e385e24fe9e72e 100644
+--- a/Documentation/conf.py
++++ b/Documentation/conf.py
+@@ -345,9 +345,9 @@ sys.stderr.write("Using %s theme\n" % html_theme)
+ html_static_path = ['sphinx-static']
+
+ # If true, Docutils "smart quotes" will be used to convert quotes and dashes
+-# to typographically correct entities. This will convert "--" to "—",
+-# which is not always what we want, so disable it.
+-smartquotes = False
++# to typographically correct entities. However, conversion of "--" to "—"
++# is not always what we want, so enable only quotes.
++smartquotes_action = 'q'
+
+ # Custom sidebar templates, maps document names to template names.
+ # Note that the RTD theme ignores this
+@@ -383,6 +383,12 @@ latex_elements = {
+ verbatimhintsturnover=false,
+ ''',
+
++ #
++ # Some of our authors are fond of deep nesting; tell latex to
++ # cope.
++ #
++ 'maxlistdepth': '10',
++
+ # For CJK One-half spacing, need to be in front of hyperref
+ 'extrapackages': r'\usepackage{setspace}',
+
+diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
+index deede972f25479..3ae1b3677d7f3a 100644
+--- a/Documentation/dev-tools/kselftest.rst
++++ b/Documentation/dev-tools/kselftest.rst
+@@ -255,9 +255,21 @@ Contributing new tests (details)
+
+ TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
+ executable which is not tested by default.
++
+ TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
+ test.
+
++ TEST_INCLUDES is similar to TEST_FILES, it lists files which should be
++ included when exporting or installing the tests, with the following
++ differences:
++
++ * symlinks to files in other directories are preserved
++ * the part of paths below tools/testing/selftests/ is preserved when
++ copying the files to the output directory
++
++ TEST_INCLUDES is meant to list dependencies located in other directories of
++ the selftests hierarchy.
++
+ * First use the headers inside the kernel source and/or git repo, and then the
+ system headers. Headers for the kernel release as opposed to headers
+ installed by the distro on the system should be the primary focus to be able
+diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
+index adbfaea3234354..90f31beb80c224 100644
+--- a/Documentation/devicetree/bindings/arm/qcom.yaml
++++ b/Documentation/devicetree/bindings/arm/qcom.yaml
+@@ -136,7 +136,7 @@ description: |
+ There are many devices in the list below that run the standard ChromeOS
+ bootloader setup and use the open source depthcharge bootloader to boot the
+ OS. These devices do not use the scheme described above. For details, see:
+- https://docs.kernel.org/arm/google/chromebook-boot-flow.html
++ https://docs.kernel.org/arch/arm/google/chromebook-boot-flow.html
+
+ properties:
+ $nodename:
+diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
+index 0999ea07f47bb6..e4576546bf0dbb 100644
+--- a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
++++ b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
+@@ -127,6 +127,7 @@ patternProperties:
+ - qcom,dsi-phy-20nm
+ - qcom,dsi-phy-28nm-8226
+ - qcom,dsi-phy-28nm-hpm
++ - qcom,dsi-phy-28nm-hpm-fam-b
+ - qcom,dsi-phy-28nm-lp
+ - qcom,hdmi-phy-8084
+ - qcom,hdmi-phy-8660
+diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+index 437db0c62339fa..e1b4b910044b0e 100644
+--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
++++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+@@ -47,8 +47,8 @@ properties:
+ - 3
+
+ dma-channels:
+- minItems: 1
+- maxItems: 64
++ minimum: 1
++ maximum: 64
+
+ clocks:
+ minItems: 1
+diff --git a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
+index c1060e5fcef3a9..d3d8a2e143ed25 100644
+--- a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
++++ b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
+@@ -126,7 +126,7 @@ examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+- gpio@e000a000 {
++ gpio@a0020000 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ reg = <0xa0020000 0x10000>;
+ #gpio-cells = <2>;
+diff --git a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+index 6adedd3ec399b9..c22e459c175abf 100644
+--- a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
++++ b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+@@ -75,7 +75,7 @@ required:
+ - clocks
+
+ allOf:
+- - $ref: i2c-controller.yaml
++ - $ref: /schemas/i2c/i2c-controller.yaml#
+ - if:
+ properties:
+ compatible:
+diff --git a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+index ab151c9db21913..580003cdfff59e 100644
+--- a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
++++ b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+@@ -21,7 +21,7 @@ description: |
+ google,cros-ec-spi or google,cros-ec-i2c.
+
+ allOf:
+- - $ref: i2c-controller.yaml#
++ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+ properties:
+ compatible:
+diff --git a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
+index 9996dd93f84b29..e1f450b80db278 100644
+--- a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
++++ b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
+@@ -28,6 +28,9 @@ properties:
+ reg:
+ maxItems: 1
+
++ clocks:
++ maxItems: 1
++
+ dmas:
+ maxItems: 1
+
+@@ -39,12 +42,16 @@ properties:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ A reference to a the actual ADC to which this FPGA ADC interfaces to.
++ deprecated: true
++
++ '#io-backend-cells':
++ const: 0
+
+ required:
+ - compatible
+ - dmas
+ - reg
+- - adi,adc-dev
++ - clocks
+
+ additionalProperties: false
+
+@@ -55,7 +62,7 @@ examples:
+ reg = <0x44a00000 0x10000>;
+ dmas = <&rx_dma 0>;
+ dma-names = "rx";
+-
+- adi,adc-dev = <&spi_adc>;
++ clocks = <&axi_clk>;
++ #io-backend-cells = <0>;
+ };
+ ...
+diff --git a/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml b/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
+index c13c10c8d65da2..eed0df9d3a2322 100644
+--- a/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
++++ b/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml
+@@ -42,7 +42,7 @@ allOf:
+ properties:
+ compatible:
+ contains:
+- const: maxim,max30100
++ const: maxim,max30102
+ then:
+ properties:
+ maxim,green-led-current-microamp: false
+diff --git a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
+index 9790f75fc669ef..fe5145d3b73cf2 100644
+--- a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
++++ b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
+@@ -23,7 +23,6 @@ properties:
+ - ak8963
+ - ak09911
+ - ak09912
+- - ak09916
+ deprecated: true
+
+ reg:
+diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
+index 509d20c091af82..6a206111d4e0f0 100644
+--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
++++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
+@@ -62,6 +62,9 @@ properties:
+ - description: MPM pin number
+ - description: GIC SPI number for the MPM pin
+
++ '#power-domain-cells':
++ const: 0
++
+ required:
+ - compatible
+ - reg
+@@ -93,4 +96,5 @@ examples:
+ <86 183>,
+ <90 260>,
+ <91 260>;
++ #power-domain-cells = <0>;
+ };
+diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+index cf456f8d9ddcb8..c87677f5e2a250 100644
+--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
++++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+@@ -37,15 +37,15 @@ properties:
+ active low.
+ maxItems: 1
+
+- dovdd-supply:
++ DOVDD-supply:
+ description:
+ Definition of the regulator used as interface power supply.
+
+- avdd-supply:
++ AVDD-supply:
+ description:
+ Definition of the regulator used as analog power supply.
+
+- dvdd-supply:
++ DVDD-supply:
+ description:
+ Definition of the regulator used as digital power supply.
+
+@@ -59,9 +59,9 @@ required:
+ - reg
+ - clocks
+ - clock-names
+- - dovdd-supply
+- - avdd-supply
+- - dvdd-supply
++ - DOVDD-supply
++ - AVDD-supply
++ - DVDD-supply
+ - reset-gpios
+ - port
+
+@@ -82,9 +82,9 @@ examples:
+ clock-names = "xvclk";
+ reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+
+- dovdd-supply = <&sw2_reg>;
+- dvdd-supply = <&sw2_reg>;
+- avdd-supply = <&reg_peri_3p15v>;
++ DOVDD-supply = <&sw2_reg>;
++ DVDD-supply = <&sw2_reg>;
++ AVDD-supply = <&reg_peri_3p15v>;
+
+ port {
+ ov2680_to_mipi: endpoint {
+diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
+index 7032c7e1503900..3e128733ef535b 100644
+--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
++++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml
+@@ -61,6 +61,9 @@ properties:
+ - description: used for 1st data pipe from RDMA
+ - description: used for 2nd data pipe from RDMA
+
++ '#dma-cells':
++ const: 1
++
+ required:
+ - compatible
+ - reg
+@@ -70,6 +73,7 @@ required:
+ - clocks
+ - iommus
+ - mboxes
++ - '#dma-cells'
+
+ additionalProperties: false
+
+@@ -80,16 +84,17 @@ examples:
+ #include <dt-bindings/power/mt8183-power.h>
+ #include <dt-bindings/memory/mt8183-larb-port.h>
+
+- mdp3_rdma0: mdp3-rdma0@14001000 {
+- compatible = "mediatek,mt8183-mdp3-rdma";
+- reg = <0x14001000 0x1000>;
+- mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
+- mediatek,gce-events = <CMDQ_EVENT_MDP_RDMA0_SOF>,
+- <CMDQ_EVENT_MDP_RDMA0_EOF>;
+- power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+- clocks = <&mmsys CLK_MM_MDP_RDMA0>,
+- <&mmsys CLK_MM_MDP_RSZ1>;
+- iommus = <&iommu>;
+- mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST>,
+- <&gce 21 CMDQ_THR_PRIO_LOWEST>;
++ dma-controller@14001000 {
++ compatible = "mediatek,mt8183-mdp3-rdma";
++ reg = <0x14001000 0x1000>;
++ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
++ mediatek,gce-events = <CMDQ_EVENT_MDP_RDMA0_SOF>,
++ <CMDQ_EVENT_MDP_RDMA0_EOF>;
++ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
++ clocks = <&mmsys CLK_MM_MDP_RDMA0>,
++ <&mmsys CLK_MM_MDP_RSZ1>;
++ iommus = <&iommu>;
++ mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST>,
++ <&gce 21 CMDQ_THR_PRIO_LOWEST>;
++ #dma-cells = <1>;
+ };
+diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
+index 0baa77198fa217..64ea98aa05928e 100644
+--- a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
++++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml
+@@ -50,6 +50,9 @@ properties:
+ iommus:
+ maxItems: 1
+
++ '#dma-cells':
++ const: 1
++
+ required:
+ - compatible
+ - reg
+@@ -58,6 +61,7 @@ required:
+ - power-domains
+ - clocks
+ - iommus
++ - '#dma-cells'
+
+ additionalProperties: false
+
+@@ -68,13 +72,14 @@ examples:
+ #include <dt-bindings/power/mt8183-power.h>
+ #include <dt-bindings/memory/mt8183-larb-port.h>
+
+- mdp3_wrot0: mdp3-wrot0@14005000 {
+- compatible = "mediatek,mt8183-mdp3-wrot";
+- reg = <0x14005000 0x1000>;
+- mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
+- mediatek,gce-events = <CMDQ_EVENT_MDP_WROT0_SOF>,
+- <CMDQ_EVENT_MDP_WROT0_EOF>;
+- power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+- clocks = <&mmsys CLK_MM_MDP_WROT0>;
+- iommus = <&iommu>;
++ dma-controller@14005000 {
++ compatible = "mediatek,mt8183-mdp3-wrot";
++ reg = <0x14005000 0x1000>;
++ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
++ mediatek,gce-events = <CMDQ_EVENT_MDP_WROT0_SOF>,
++ <CMDQ_EVENT_MDP_WROT0_EOF>;
++ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
++ clocks = <&mmsys CLK_MM_MDP_WROT0>;
++ iommus = <&iommu>;
++ #dma-cells = <1>;
+ };
+diff --git a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
+index e466dff8286d2b..afcaa427d48b09 100644
+--- a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
++++ b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
+@@ -90,15 +90,16 @@ properties:
+ description: connection point for input on the parallel interface
+
+ properties:
+- bus-type:
+- enum: [5, 6]
+-
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+- required:
+- - bus-type
++ properties:
++ bus-type:
++ enum: [5, 6]
++
++ required:
++ - bus-type
+
+ anyOf:
+ - required:
+diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
+index 294693a8906cf5..10540aa7afa1af 100644
+--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
++++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
+@@ -22,8 +22,9 @@ compatible:
+ "mediatek,mt6323" for PMIC MT6323
+ "mediatek,mt6331" for PMIC MT6331 and MT6332
+ "mediatek,mt6357" for PMIC MT6357
+- "mediatek,mt6358" for PMIC MT6358 and MT6366
++ "mediatek,mt6358" for PMIC MT6358
+ "mediatek,mt6359" for PMIC MT6359
++ "mediatek,mt6366", "mediatek,mt6358" for PMIC MT6366
+ "mediatek,mt6397" for PMIC MT6397
+
+ Optional subnodes:
+@@ -40,6 +41,7 @@ Optional subnodes:
+ - compatible: "mediatek,mt6323-regulator"
+ see ../regulator/mt6323-regulator.txt
+ - compatible: "mediatek,mt6358-regulator"
++ - compatible: "mediatek,mt6366-regulator", "mediatek-mt6358-regulator"
+ see ../regulator/mt6358-regulator.txt
+ - compatible: "mediatek,mt6397-regulator"
+ see ../regulator/mt6397-regulator.txt
+diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
+index e74502a0afe867..3202dc7967c5b6 100644
+--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
++++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
+@@ -337,8 +337,8 @@ allOf:
+ minItems: 4
+
+ clocks:
+- minItems: 34
+- maxItems: 34
++ minItems: 24
++ maxItems: 24
+
+ clock-names:
+ items:
+@@ -351,18 +351,6 @@ allOf:
+ - const: ethwarp_wocpu1
+ - const: ethwarp_wocpu0
+ - const: esw
+- - const: netsys0
+- - const: netsys1
+- - const: sgmii_tx250m
+- - const: sgmii_rx250m
+- - const: sgmii2_tx250m
+- - const: sgmii2_rx250m
+- - const: top_usxgmii0_sel
+- - const: top_usxgmii1_sel
+- - const: top_sgm0_sel
+- - const: top_sgm1_sel
+- - const: top_xfi_phy0_xtal_sel
+- - const: top_xfi_phy1_xtal_sel
+ - const: top_eth_gmii_sel
+ - const: top_eth_refck_50m_sel
+ - const: top_eth_sys_200m_sel
+@@ -375,16 +363,10 @@ allOf:
+ - const: top_netsys_sync_250m_sel
+ - const: top_netsys_ppefb_250m_sel
+ - const: top_netsys_warp_sel
+- - const: wocpu1
+- - const: wocpu0
+ - const: xgp1
+ - const: xgp2
+ - const: xgp3
+
+- mediatek,sgmiisys:
+- minItems: 2
+- maxItems: 2
+-
+ patternProperties:
+ "^mac@[0-1]$":
+ type: object
+diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+index ddf9522a5dc230..5c2769dc689af7 100644
+--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
++++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+@@ -394,6 +394,11 @@ properties:
+ When a PFC frame is received with priorities matching the bitmask,
+ the queue is blocked from transmitting for the pause time specified
+ in the PFC frame.
++
++ snps,coe-unsupported:
++ type: boolean
++ description: TX checksum offload is unsupported by the TX queue.
++
+ allOf:
+ - if:
+ required:
+diff --git a/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml b/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml
+index 1d33d80af11c3c..652d696bc9e90b 100644
+--- a/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml
++++ b/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml
+@@ -34,6 +34,7 @@ properties:
+ and length of the AXI DMA controller IO space, unless
+ axistream-connected is specified, in which case the reg
+ attribute of the node referenced by it is used.
++ minItems: 1
+ maxItems: 2
+
+ interrupts:
+@@ -165,7 +166,7 @@ examples:
+ clock-names = "s_axi_lite_clk", "axis_clk", "ref_clk", "mgt_clk";
+ clocks = <&axi_clk>, <&axi_clk>, <&pl_enet_ref_clk>, <&mgt_clk>;
+ phy-mode = "mii";
+- reg = <0x00 0x40000000 0x00 0x40000>;
++ reg = <0x40000000 0x40000>;
+ xlnx,rxcsum = <0x2>;
+ xlnx,rxmem = <0x800>;
+ xlnx,txcsum = <0x2>;
+diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+index a9b822aeaa7edb..e436650f0faf7d 100644
+--- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
++++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
+@@ -14,9 +14,11 @@ allOf:
+
+ properties:
+ compatible:
+- enum:
+- - fsl,imx23-ocotp
+- - fsl,imx28-ocotp
++ items:
++ - enum:
++ - fsl,imx23-ocotp
++ - fsl,imx28-ocotp
++ - const: fsl,ocotp
+
+ reg:
+ maxItems: 1
+@@ -34,7 +36,7 @@ unevaluatedProperties: false
+ examples:
+ - |
+ ocotp: efuse@8002c000 {
+- compatible = "fsl,imx28-ocotp";
++ compatible = "fsl,imx28-ocotp", "fsl,ocotp";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x8002c000 0x2000>;
+diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+index 8fdfbc763d7045..835b6db00c2796 100644
+--- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
++++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+@@ -68,6 +68,18 @@ properties:
+ phy-names:
+ const: pcie
+
++ vpcie1v5-supply:
++ description: The 1.5v regulator to use for PCIe.
++
++ vpcie3v3-supply:
++ description: The 3.3v regulator to use for PCIe.
++
++ vpcie12v-supply:
++ description: The 12v regulator to use for PCIe.
++
++ iommu-map: true
++ iommu-map-mask: true
++
+ required:
+ - compatible
+ - reg
+@@ -121,5 +133,7 @@ examples:
+ clock-names = "pcie", "pcie_bus";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 319>;
++ vpcie3v3-supply = <&pcie_3v3>;
++ vpcie12v-supply = <&pcie_12v>;
+ };
+ };
+diff --git a/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml b/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
+index 531008f0b6ac32..002b728cbc7184 100644
+--- a/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
++++ b/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
+@@ -37,6 +37,7 @@ properties:
+ description: This property is needed if using 24MHz OSC for RC's PHY.
+
+ ep-gpios:
++ maxItems: 1
+ description: pre-reset GPIO
+
+ vpcie12v-supply:
+diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+index d981d77e82e40a..a6244c33faf614 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+@@ -71,7 +71,6 @@ required:
+ - reg
+ - clocks
+ - clock-names
+- - power-domains
+ - resets
+ - reset-names
+ - vdda-phy-supply
+@@ -130,6 +129,21 @@ allOf:
+ clock-names:
+ maxItems: 1
+
++ - if:
++ properties:
++ compatible:
++ contains:
++ enum:
++ - qcom,msm8996-qmp-ufs-phy
++ - qcom,msm8998-qmp-ufs-phy
++ then:
++ properties:
++ power-domains:
++ false
++ else:
++ required:
++ - power-domains
++
+ additionalProperties: false
+
+ examples:
+diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
+index 9af203dc8793f3..fa7408eb74895b 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
+@@ -62,12 +62,12 @@ properties:
+ "#clock-cells":
+ const: 1
+ description:
+- See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h
++ See include/dt-bindings/phy/phy-qcom-qmp.h
+
+ "#phy-cells":
+ const: 1
+ description:
+- See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h
++ See include/dt-bindings/phy/phy-qcom-qmp.h
+
+ orientation-switch:
+ description:
+diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+index 029569d5fcf35f..24c733c10e0e92 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+@@ -32,6 +32,27 @@ properties:
+
+ vdd3-supply: true
+
++ qcom,tune-usb2-disc-thres:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed disconnect threshold
++ minimum: 0
++ maximum: 7
++ default: 0
++
++ qcom,tune-usb2-amplitude:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed trasmit amplitude
++ minimum: 0
++ maximum: 15
++ default: 8
++
++ qcom,tune-usb2-preem:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed TX pre-emphasis tuning
++ minimum: 0
++ maximum: 7
++ default: 5
++
+ required:
+ - compatible
+ - reg
+diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+index 0f200e3f97a9a3..fce7f8a19e9c0a 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+@@ -15,9 +15,6 @@ description: |
+ properties:
+ compatible:
+ oneOf:
+- - enum:
+- - qcom,sc8180x-usb-hs-phy
+- - qcom,usb-snps-femto-v2-phy
+ - items:
+ - enum:
+ - qcom,sa8775p-usb-hs-phy
+@@ -26,6 +23,7 @@ properties:
+ - items:
+ - enum:
+ - qcom,sc7280-usb-hs-phy
++ - qcom,sc8180x-usb-hs-phy
+ - qcom,sdx55-usb-hs-phy
+ - qcom,sdx65-usb-hs-phy
+ - qcom,sm6375-usb-hs-phy
+diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+index bd72a326e6e069..60f30a59f3853e 100644
+--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+@@ -97,7 +97,8 @@ patternProperties:
+ then:
+ properties:
+ groups:
+- enum: [emmc, emmc_rst]
++ items:
++ enum: [emmc, emmc_rst]
+ - if:
+ properties:
+ function:
+@@ -105,8 +106,9 @@ patternProperties:
+ then:
+ properties:
+ groups:
+- enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
+- rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
++ items:
++ enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
++ rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
+ - if:
+ properties:
+ function:
+@@ -123,10 +125,11 @@ patternProperties:
+ then:
+ properties:
+ groups:
+- enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
+- i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
+- i2s1_out_data, i2s2_out_data, i2s3_out_data,
+- i2s4_out_data]
++ items:
++ enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
++ i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
++ i2s1_out_data, i2s2_out_data, i2s3_out_data,
++ i2s4_out_data]
+ - if:
+ properties:
+ function:
+@@ -159,10 +162,11 @@ patternProperties:
+ then:
+ properties:
+ groups:
+- enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
+- pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
+- pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
+- pcie_wake, pcie_clkreq]
++ items:
++ enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
++ pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
++ pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
++ pcie_wake, pcie_clkreq]
+ - if:
+ properties:
+ function:
+@@ -178,11 +182,12 @@ patternProperties:
+ then:
+ properties:
+ groups:
+- enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
+- pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
+- pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
+- pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
+- pwm_ch7_0, pwm_0, pwm_1]
++ items:
++ enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
++ pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
++ pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
++ pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
++ pwm_ch7_0, pwm_0, pwm_1]
+ - if:
+ properties:
+ function:
+@@ -260,33 +265,34 @@ patternProperties:
+ pins:
+ description:
+ An array of strings. Each string contains the name of a pin.
+- enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
+- RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
+- I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
+- I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
+- G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
+- G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
+- NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
+- MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
+- MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
+- MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
+- MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
+- PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
+- GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
+- PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
+- AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
+- PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
+- WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
+- WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
+- EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
+- EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
+- WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
+- UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
+- UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
+- PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
+- GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
+- TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
+- WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
++ items:
++ enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
++ RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
++ I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
++ I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
++ G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
++ G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
++ NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
++ MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
++ MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
++ MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
++ MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
++ PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
++ GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
++ PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
++ AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
++ PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
++ WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
++ WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
++ EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
++ EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
++ WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
++ UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
++ UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
++ PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
++ GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
++ TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
++ WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
+
+ bias-disable: true
+
+diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
+index 303a443d9e29b2..9418fd66a8e95a 100644
+--- a/Documentation/devicetree/bindings/serial/rs485.yaml
++++ b/Documentation/devicetree/bindings/serial/rs485.yaml
+@@ -29,6 +29,10 @@ properties:
+ default: 0
+ maximum: 100
+
++ rs485-rts-active-high:
++ description: drive RTS high when sending (this is the default).
++ $ref: /schemas/types.yaml#/definitions/flag
++
+ rs485-rts-active-low:
+ description: drive RTS low when sending (default is high).
+ $ref: /schemas/types.yaml#/definitions/flag
+diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
+index ea277560a59664..5727bd549deca4 100644
+--- a/Documentation/devicetree/bindings/serial/serial.yaml
++++ b/Documentation/devicetree/bindings/serial/serial.yaml
+@@ -96,7 +96,7 @@ then:
+ rts-gpios: false
+
+ patternProperties:
+- "^bluetooth|gnss|gps|mcu$":
++ "^(bluetooth|gnss|gps|mcu)$":
+ if:
+ type: object
+ then:
+diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+index e4fa6a07b4fa2c..be6ffec2b07497 100644
+--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
++++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+@@ -163,6 +163,7 @@ allOf:
+ unevaluatedProperties: false
+
+ pcie-phy:
++ type: object
+ description:
+ Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt
+
+diff --git a/Documentation/devicetree/bindings/sound/rt5645.txt b/Documentation/devicetree/bindings/sound/rt5645.txt
+index 41a62fd2ae1ffb..c1fa379f5f3ea1 100644
+--- a/Documentation/devicetree/bindings/sound/rt5645.txt
++++ b/Documentation/devicetree/bindings/sound/rt5645.txt
+@@ -20,6 +20,11 @@ Optional properties:
+ a GPIO spec for the external headphone detect pin. If jd-mode = 0,
+ we will get the JD status by getting the value of hp-detect-gpios.
+
++- cbj-sleeve-gpios:
++ a GPIO spec to control the external combo jack circuit to tie the sleeve/ring2
++ contacts to the ground or floating. It could avoid some electric noise from the
++ active speaker jacks.
++
+ - realtek,in2-differential
+ Boolean. Indicate MIC2 input are differential, rather than single-ended.
+
+@@ -68,6 +73,7 @@ codec: rt5650@1a {
+ compatible = "realtek,rt5650";
+ reg = <0x1a>;
+ hp-detect-gpios = <&gpio 19 0>;
++ cbj-sleeve-gpios = <&gpio 20 0>;
+ interrupt-parent = <&gpio>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ realtek,dmic-en = "true";
+diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
+index 7fd59114548001..902db92da83207 100644
+--- a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
++++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
+@@ -15,12 +15,19 @@ allOf:
+
+ properties:
+ compatible:
+- enum:
+- - nxp,imx8dxl-fspi
+- - nxp,imx8mm-fspi
+- - nxp,imx8mp-fspi
+- - nxp,imx8qxp-fspi
+- - nxp,lx2160a-fspi
++ oneOf:
++ - enum:
++ - nxp,imx8dxl-fspi
++ - nxp,imx8mm-fspi
++ - nxp,imx8mp-fspi
++ - nxp,imx8qxp-fspi
++ - nxp,imx8ulp-fspi
++ - nxp,lx2160a-fspi
++ - items:
++ - enum:
++ - nxp,imx93-fspi
++ - nxp,imx95-fspi
++ - const: nxp,imx8mm-fspi
+
+ reg:
+ items:
+diff --git a/Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml b/Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
+index f882903769f954..eee7c8d4cf4a29 100644
+--- a/Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
++++ b/Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
+@@ -14,7 +14,7 @@ description: |
+ It is a MIPI System Power Management (SPMI) controller.
+
+ The PMIC part is provided by
+- ./Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml.
++ Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml.
+
+ allOf:
+ - $ref: spmi.yaml#
+@@ -48,7 +48,7 @@ patternProperties:
+ PMIC properties, which are specific to the used SPMI PMIC device(s).
+ When used in combination with HiSilicon 6421v600, the properties
+ are documented at
+- drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml.
++ Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml
+
+ unevaluatedProperties: false
+
+diff --git a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+index 7538469997f9e1..ca81c8afba79c6 100644
+--- a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
++++ b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+@@ -10,28 +10,55 @@ maintainers:
+ - zhanghongchen <zhanghongchen@loongson.cn>
+ - Yinbo Zhu <zhuyinbo@loongson.cn>
+
++allOf:
++ - $ref: /schemas/thermal/thermal-sensor.yaml#
++
+ properties:
+ compatible:
+ oneOf:
+ - enum:
+ - loongson,ls2k1000-thermal
++ - loongson,ls2k2000-thermal
+ - items:
+ - enum:
+- - loongson,ls2k2000-thermal
++ - loongson,ls2k0500-thermal
+ - const: loongson,ls2k1000-thermal
+
+ reg:
+- maxItems: 1
++ minItems: 1
++ maxItems: 2
+
+ interrupts:
+ maxItems: 1
+
++ '#thermal-sensor-cells':
++ const: 1
++
+ required:
+ - compatible
+ - reg
+ - interrupts
++ - '#thermal-sensor-cells'
++
++if:
++ properties:
++ compatible:
++ contains:
++ enum:
++ - loongson,ls2k2000-thermal
++
++then:
++ properties:
++ reg:
++ minItems: 2
++ maxItems: 2
++
++else:
++ properties:
++ reg:
++ maxItems: 1
+
+-additionalProperties: false
++unevaluatedProperties: false
+
+ examples:
+ - |
+@@ -41,4 +68,5 @@ examples:
+ reg = <0x1fe01500 0x30>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
++ #thermal-sensor-cells = <1>;
+ };
+diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+index 4f3acdc4dec0ef..98cdd98212c495 100644
+--- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
++++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+@@ -49,7 +49,10 @@ properties:
+ to take when the temperature crosses those thresholds.
+
+ patternProperties:
+- "^[a-zA-Z][a-zA-Z0-9\\-]{1,12}-thermal$":
++ # Node name is limited in size due to Linux kernel requirements - 19
++ # characters in total (see THERMAL_NAME_LENGTH, including terminating NUL
++ # byte):
++ "^[a-zA-Z][a-zA-Z0-9\\-]{1,10}-thermal$":
+ type: object
+ description:
+ Each thermal zone node contains information about how frequently it
+diff --git a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
+index bffdab0b01859b..fbac40b958ddea 100644
+--- a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
++++ b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
+@@ -169,27 +169,27 @@ properties:
+ - const: tgib0
+ - const: tgic0
+ - const: tgid0
+- - const: tgiv0
++ - const: tciv0
+ - const: tgie0
+ - const: tgif0
+ - const: tgia1
+ - const: tgib1
+- - const: tgiv1
+- - const: tgiu1
++ - const: tciv1
++ - const: tciu1
+ - const: tgia2
+ - const: tgib2
+- - const: tgiv2
+- - const: tgiu2
++ - const: tciv2
++ - const: tciu2
+ - const: tgia3
+ - const: tgib3
+ - const: tgic3
+ - const: tgid3
+- - const: tgiv3
++ - const: tciv3
+ - const: tgia4
+ - const: tgib4
+ - const: tgic4
+ - const: tgid4
+- - const: tgiv4
++ - const: tciv4
+ - const: tgiu5
+ - const: tgiv5
+ - const: tgiw5
+@@ -197,18 +197,18 @@ properties:
+ - const: tgib6
+ - const: tgic6
+ - const: tgid6
+- - const: tgiv6
++ - const: tciv6
+ - const: tgia7
+ - const: tgib7
+ - const: tgic7
+ - const: tgid7
+- - const: tgiv7
++ - const: tciv7
+ - const: tgia8
+ - const: tgib8
+ - const: tgic8
+ - const: tgid8
+- - const: tgiv8
+- - const: tgiu8
++ - const: tciv8
++ - const: tciu8
+
+ clocks:
+ maxItems: 1
+@@ -285,16 +285,16 @@ examples:
+ <GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
+- interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tgiv0", "tgie0",
++ interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tciv0", "tgie0",
+ "tgif0",
+- "tgia1", "tgib1", "tgiv1", "tgiu1",
+- "tgia2", "tgib2", "tgiv2", "tgiu2",
+- "tgia3", "tgib3", "tgic3", "tgid3", "tgiv3",
+- "tgia4", "tgib4", "tgic4", "tgid4", "tgiv4",
++ "tgia1", "tgib1", "tciv1", "tciu1",
++ "tgia2", "tgib2", "tciv2", "tciu2",
++ "tgia3", "tgib3", "tgic3", "tgid3", "tciv3",
++ "tgia4", "tgib4", "tgic4", "tgid4", "tciv4",
+ "tgiu5", "tgiv5", "tgiw5",
+- "tgia6", "tgib6", "tgic6", "tgid6", "tgiv6",
+- "tgia7", "tgib7", "tgic7", "tgid7", "tgiv7",
+- "tgia8", "tgib8", "tgic8", "tgid8", "tgiv8", "tgiu8";
++ "tgia6", "tgib6", "tgic6", "tgid6", "tciv6",
++ "tgia7", "tgib7", "tgic7", "tgid7", "tciv7",
++ "tgia8", "tgib8", "tgic8", "tgid8", "tciv8", "tciu8";
+ clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;
+diff --git a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
+index ff3a1707ef570f..6d4cfd943f5847 100644
+--- a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
++++ b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
+@@ -36,7 +36,11 @@ properties:
+
+ vdd-supply:
+ description:
+- VDD power supply to the hub
++ 3V3 power supply to the hub
++
++ vdd2-supply:
++ description:
++ 1V2 power supply to the hub
+
+ peer-hub:
+ $ref: /schemas/types.yaml#/definitions/phandle
+@@ -62,6 +66,7 @@ allOf:
+ properties:
+ reset-gpios: false
+ vdd-supply: false
++ vdd2-supply: false
+ peer-hub: false
+ i2c-bus: false
+ else:
+diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst
+index 60420853409533..833f68fb070089 100644
+--- a/Documentation/driver-api/fpga/fpga-bridge.rst
++++ b/Documentation/driver-api/fpga/fpga-bridge.rst
+@@ -6,9 +6,12 @@ API to implement a new FPGA bridge
+
+ * struct fpga_bridge - The FPGA Bridge structure
+ * struct fpga_bridge_ops - Low level Bridge driver ops
+-* fpga_bridge_register() - Create and register a bridge
++* __fpga_bridge_register() - Create and register a bridge
+ * fpga_bridge_unregister() - Unregister a bridge
+
++The helper macro ``fpga_bridge_register()`` automatically sets
++the module that registers the FPGA bridge as the owner.
++
+ .. kernel-doc:: include/linux/fpga/fpga-bridge.h
+ :functions: fpga_bridge
+
+@@ -16,7 +19,7 @@ API to implement a new FPGA bridge
+ :functions: fpga_bridge_ops
+
+ .. kernel-doc:: drivers/fpga/fpga-bridge.c
+- :functions: fpga_bridge_register
++ :functions: __fpga_bridge_register
+
+ .. kernel-doc:: drivers/fpga/fpga-bridge.c
+ :functions: fpga_bridge_unregister
+diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
+index 49c0a951265320..8d2b79f696c1fb 100644
+--- a/Documentation/driver-api/fpga/fpga-mgr.rst
++++ b/Documentation/driver-api/fpga/fpga-mgr.rst
+@@ -24,7 +24,8 @@ How to support a new FPGA device
+ --------------------------------
+
+ To add another FPGA manager, write a driver that implements a set of ops. The
+-probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as::
++probe function calls ``fpga_mgr_register()`` or ``fpga_mgr_register_full()``,
++such as::
+
+ static const struct fpga_manager_ops socfpga_fpga_ops = {
+ .write_init = socfpga_fpga_ops_configure_init,
+@@ -69,10 +70,11 @@ probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as::
+ }
+
+ Alternatively, the probe function could call one of the resource managed
+-register functions, devm_fpga_mgr_register() or devm_fpga_mgr_register_full().
+-When these functions are used, the parameter syntax is the same, but the call
+-to fpga_mgr_unregister() should be removed. In the above example, the
+-socfpga_fpga_remove() function would not be required.
++register functions, ``devm_fpga_mgr_register()`` or
++``devm_fpga_mgr_register_full()``. When these functions are used, the
++parameter syntax is the same, but the call to ``fpga_mgr_unregister()`` should be
++removed. In the above example, the ``socfpga_fpga_remove()`` function would not be
++required.
+
+ The ops will implement whatever device specific register writes are needed to
+ do the programming sequence for this particular FPGA. These ops return 0 for
+@@ -125,15 +127,19 @@ API for implementing a new FPGA Manager driver
+ * struct fpga_manager - the FPGA manager struct
+ * struct fpga_manager_ops - Low level FPGA manager driver ops
+ * struct fpga_manager_info - Parameter structure for fpga_mgr_register_full()
+-* fpga_mgr_register_full() - Create and register an FPGA manager using the
++* __fpga_mgr_register_full() - Create and register an FPGA manager using the
+ fpga_mgr_info structure to provide the full flexibility of options
+-* fpga_mgr_register() - Create and register an FPGA manager using standard
++* __fpga_mgr_register() - Create and register an FPGA manager using standard
+ arguments
+-* devm_fpga_mgr_register_full() - Resource managed version of
+- fpga_mgr_register_full()
+-* devm_fpga_mgr_register() - Resource managed version of fpga_mgr_register()
++* __devm_fpga_mgr_register_full() - Resource managed version of
++ __fpga_mgr_register_full()
++* __devm_fpga_mgr_register() - Resource managed version of __fpga_mgr_register()
+ * fpga_mgr_unregister() - Unregister an FPGA manager
+
++Helper macros ``fpga_mgr_register_full()``, ``fpga_mgr_register()``,
++``devm_fpga_mgr_register_full()``, and ``devm_fpga_mgr_register()`` are available
++to ease the registration.
++
+ .. kernel-doc:: include/linux/fpga/fpga-mgr.h
+ :functions: fpga_mgr_states
+
+@@ -147,16 +153,16 @@ API for implementing a new FPGA Manager driver
+ :functions: fpga_manager_info
+
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+- :functions: fpga_mgr_register_full
++ :functions: __fpga_mgr_register_full
+
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+- :functions: fpga_mgr_register
++ :functions: __fpga_mgr_register
+
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+- :functions: devm_fpga_mgr_register_full
++ :functions: __devm_fpga_mgr_register_full
+
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+- :functions: devm_fpga_mgr_register
++ :functions: __devm_fpga_mgr_register
+
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+ :functions: fpga_mgr_unregister
+diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
+index dc55d60a0b4a51..2d03b5fb765755 100644
+--- a/Documentation/driver-api/fpga/fpga-region.rst
++++ b/Documentation/driver-api/fpga/fpga-region.rst
+@@ -46,13 +46,16 @@ API to add a new FPGA region
+ ----------------------------
+
+ * struct fpga_region - The FPGA region struct
+-* struct fpga_region_info - Parameter structure for fpga_region_register_full()
+-* fpga_region_register_full() - Create and register an FPGA region using the
++* struct fpga_region_info - Parameter structure for __fpga_region_register_full()
++* __fpga_region_register_full() - Create and register an FPGA region using the
+ fpga_region_info structure to provide the full flexibility of options
+-* fpga_region_register() - Create and register an FPGA region using standard
++* __fpga_region_register() - Create and register an FPGA region using standard
+ arguments
+ * fpga_region_unregister() - Unregister an FPGA region
+
++Helper macros ``fpga_region_register()`` and ``fpga_region_register_full()``
++automatically set the module that registers the FPGA region as the owner.
++
+ The FPGA region's probe function will need to get a reference to the FPGA
+ Manager it will be using to do the programming. This usually would happen
+ during the region's probe function.
+@@ -82,10 +85,10 @@ following APIs to handle building or tearing down that list.
+ :functions: fpga_region_info
+
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+- :functions: fpga_region_register_full
++ :functions: __fpga_region_register_full
+
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+- :functions: fpga_region_register
++ :functions: __fpga_region_register
+
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+ :functions: fpga_region_unregister
+diff --git a/Documentation/driver-api/ipmi.rst b/Documentation/driver-api/ipmi.rst
+index e224e47b6b0944..dfa021eacd63c4 100644
+--- a/Documentation/driver-api/ipmi.rst
++++ b/Documentation/driver-api/ipmi.rst
+@@ -540,7 +540,7 @@ at module load time (for a module) with::
+ alerts_broken
+
+ The addresses are normal I2C addresses. The adapter is the string
+-name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
++name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name.
+ It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
+ spaces, so if the name is "This is an I2C chip" you can say
+ adapter_name=ThisisanI2cchip. This is because it's hard to pass in
+diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst
+index 44deb52beeb476..d0b241628cf13d 100644
+--- a/Documentation/driver-api/pci/p2pdma.rst
++++ b/Documentation/driver-api/pci/p2pdma.rst
+@@ -83,19 +83,9 @@ this to include other types of resources like doorbells.
+ Client Drivers
+ --------------
+
+-A client driver typically only has to conditionally change its DMA map
+-routine to use the mapping function :c:func:`pci_p2pdma_map_sg()` instead
+-of the usual :c:func:`dma_map_sg()` function. Memory mapped in this
+-way does not need to be unmapped.
+-
+-The client may also, optionally, make use of
+-:c:func:`is_pci_p2pdma_page()` to determine when to use the P2P mapping
+-functions and when to use the regular mapping functions. In some
+-situations, it may be more appropriate to use a flag to indicate a
+-given request is P2P memory and map appropriately. It is important to
+-ensure that struct pages that back P2P memory stay out of code that
+-does not have support for them as other code may treat the pages as
+-regular memory which may not be appropriate.
++A client driver only has to use the mapping API :c:func:`dma_map_sg()`
++and :c:func:`dma_unmap_sg()` functions as usual, and the implementation
++will do the right thing for the P2P capable memory.
+
+
+ Orchestrator Drivers
+diff --git a/Documentation/driver-api/pwm.rst b/Documentation/driver-api/pwm.rst
+index 3fdc95f7a1d159..ed5ec98165381f 100644
+--- a/Documentation/driver-api/pwm.rst
++++ b/Documentation/driver-api/pwm.rst
+@@ -41,7 +41,7 @@ the getter, devm_pwm_get() and devm_fwnode_pwm_get(), also exist.
+
+ After being requested, a PWM has to be configured using::
+
+- int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
++ int pwm_apply_might_sleep(struct pwm_device *pwm, struct pwm_state *state);
+
+ This API controls both the PWM period/duty_cycle config and the
+ enable/disable state.
+@@ -57,13 +57,13 @@ If supported by the driver, the signal can be optimized, for example to improve
+ EMI by phase shifting the individual channels of a chip.
+
+ The pwm_config(), pwm_enable() and pwm_disable() functions are just wrappers
+-around pwm_apply_state() and should not be used if the user wants to change
++around pwm_apply_might_sleep() and should not be used if the user wants to change
+ several parameter at once. For example, if you see pwm_config() and
+ pwm_{enable,disable}() calls in the same function, this probably means you
+-should switch to pwm_apply_state().
++should switch to pwm_apply_might_sleep().
+
+ The PWM user API also allows one to query the PWM state that was passed to the
+-last invocation of pwm_apply_state() using pwm_get_state(). Note this is
++last invocation of pwm_apply_might_sleep() using pwm_get_state(). Note this is
+ different to what the driver has actually implemented if the request cannot be
+ satisfied exactly with the hardware in use. There is currently no way for
+ consumers to get the actually implemented settings.
+diff --git a/Documentation/filesystems/directory-locking.rst b/Documentation/filesystems/directory-locking.rst
+index dccd61c7c5c3be..193c22687851a7 100644
+--- a/Documentation/filesystems/directory-locking.rst
++++ b/Documentation/filesystems/directory-locking.rst
+@@ -22,13 +22,16 @@ exclusive.
+ 3) object removal. Locking rules: caller locks parent, finds victim,
+ locks victim and calls the method. Locks are exclusive.
+
+-4) rename() that is _not_ cross-directory. Locking rules: caller locks the
+-parent and finds source and target. We lock both (provided they exist). If we
+-need to lock two inodes of different type (dir vs non-dir), we lock directory
+-first. If we need to lock two inodes of the same type, lock them in inode
+-pointer order. Then call the method. All locks are exclusive.
+-NB: we might get away with locking the source (and target in exchange
+-case) shared.
++4) rename() that is _not_ cross-directory. Locking rules: caller locks
++the parent and finds source and target. Then we decide which of the
++source and target need to be locked. Source needs to be locked if it's a
++non-directory; target - if it's a non-directory or about to be removed.
++Take the locks that need to be taken, in inode pointer order if need
++to take both (that can happen only when both source and target are
++non-directories - the source because it wouldn't be locked otherwise
++and the target because mixing directory and non-directory is allowed
++only with RENAME_EXCHANGE, and that won't be removing the target).
++After the locks had been taken, call the method. All locks are exclusive.
+
+ 5) link creation. Locking rules:
+
+@@ -44,20 +47,17 @@ rules:
+
+ * lock the filesystem
+ * lock parents in "ancestors first" order. If one is not ancestor of
+- the other, lock them in inode pointer order.
++ the other, lock the parent of source first.
+ * find source and target.
+ * if old parent is equal to or is a descendent of target
+ fail with -ENOTEMPTY
+ * if new parent is equal to or is a descendent of source
+ fail with -ELOOP
+- * Lock both the source and the target provided they exist. If we
+- need to lock two inodes of different type (dir vs non-dir), we lock
+- the directory first. If we need to lock two inodes of the same type,
+- lock them in inode pointer order.
++ * Lock subdirectories involved (source before target).
++ * Lock non-directories involved, in inode pointer order.
+ * call the method.
+
+-All ->i_rwsem are taken exclusive. Again, we might get away with locking
+-the source (and target in exchange case) shared.
++All ->i_rwsem are taken exclusive.
+
+ The rules above obviously guarantee that all directories that are going to be
+ read, modified or removed by method will be locked by caller.
+@@ -67,6 +67,7 @@ If no directory is its own ancestor, the scheme above is deadlock-free.
+
+ Proof:
+
++[XXX: will be updated once we are done massaging the lock_rename()]
+ First of all, at any moment we have a linear ordering of the
+ objects - A < B iff (A is an ancestor of B) or (B is not an ancestor
+ of A and ptr(A) < ptr(B)).
+diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
+index d32c6209685d64..dbfbbe9ab28b12 100644
+--- a/Documentation/filesystems/f2fs.rst
++++ b/Documentation/filesystems/f2fs.rst
+@@ -126,9 +126,7 @@ norecovery Disable the roll-forward recovery routine, mounted read-
+ discard/nodiscard Enable/disable real-time discard in f2fs, if discard is
+ enabled, f2fs will issue discard/TRIM commands when a
+ segment is cleaned.
+-no_heap Disable heap-style segment allocation which finds free
+- segments for data from the beginning of main area, while
+- for node from the end of main area.
++heap/no_heap Deprecated.
+ nouser_xattr Disable Extended User Attributes. Note: xattr is enabled
+ by default if CONFIG_F2FS_FS_XATTR is selected.
+ noacl Disable POSIX Access Control List. Note: acl is enabled
+@@ -228,8 +226,6 @@ mode=%s Control block allocation mode which supports "adaptive"
+ option for more randomness.
+ Please, use these options for your experiments and we strongly
+ recommend to re-format the filesystem after using these options.
+-io_bits=%u Set the bit size of write IO requests. It should be set
+- with "mode=lfs".
+ usrquota Enable plain user disk quota accounting.
+ grpquota Enable plain group disk quota accounting.
+ prjquota Enable plain project quota accounting.
+diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
+index 7be2900806c853..bd12f2f850ad3a 100644
+--- a/Documentation/filesystems/locking.rst
++++ b/Documentation/filesystems/locking.rst
+@@ -101,7 +101,7 @@ symlink: exclusive
+ mkdir: exclusive
+ unlink: exclusive (both)
+ rmdir: exclusive (both)(see below)
+-rename: exclusive (all) (see below)
++rename: exclusive (both parents, some children) (see below)
+ readlink: no
+ get_link: no
+ setattr: exclusive
+@@ -123,6 +123,9 @@ get_offset_ctx no
+ Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_rwsem
+ exclusive on victim.
+ cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
++ ->unlink() and ->rename() have ->i_rwsem exclusive on all non-directories
++ involved.
++ ->rename() has ->i_rwsem exclusive on any subdirectory that changes parent.
+
+ See Documentation/filesystems/directory-locking.rst for more detailed discussion
+ of the locking scheme for directory operations.
+diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
+index 5b93268e400f4c..56a5ad7a4fbd20 100644
+--- a/Documentation/filesystems/overlayfs.rst
++++ b/Documentation/filesystems/overlayfs.rst
+@@ -344,10 +344,11 @@ escaping the colons with a single backslash. For example:
+
+ mount -t overlay overlay -olowerdir=/a\:lower\:\:dir /merged
+
+-Since kernel version v6.5, directory names containing colons can also
+-be provided as lower layer using the fsconfig syscall from new mount api:
++Since kernel version v6.8, directory names containing colons can also
++be configured as lower layer using the "lowerdir+" mount options and the
++fsconfig syscall from new mount api. For example:
+
+- fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir", "/a:lower::dir", 0);
++ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/a:lower::dir", 0);
+
+ In the latter case, colons in lower layer directory names will be escaped
+ as an octal characters (\072) when displayed in /proc/self/mountinfo.
+@@ -416,6 +417,16 @@ Only the data of the files in the "data-only" lower layers may be visible
+ when a "metacopy" file in one of the lower layers above it, has a "redirect"
+ to the absolute path of the "lower data" file in the "data-only" lower layer.
+
++Since kernel version v6.8, "data-only" lower layers can also be added using
++the "datadir+" mount options and the fsconfig syscall from new mount api.
++For example:
++
++ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l1", 0);
++ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l2", 0);
++ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l3", 0);
++ fsconfig(fs_fd, FSCONFIG_SET_STRING, "datadir+", "/do1", 0);
++ fsconfig(fs_fd, FSCONFIG_SET_STRING, "datadir+", "/do2", 0);
++
+
+ fs-verity support
+ ----------------------
+diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
+index 4d05b9862451ea..41d964b48e6578 100644
+--- a/Documentation/filesystems/porting.rst
++++ b/Documentation/filesystems/porting.rst
+@@ -1045,3 +1045,21 @@ filesystem type is now moved to a later point when the devices are closed:
+ As this is a VFS level change it has no practical consequences for filesystems
+ other than that all of them must use one of the provided kill_litter_super(),
+ kill_anon_super(), or kill_block_super() helpers.
++
++---
++
++**mandatory**
++
++If ->rename() update of .. on cross-directory move needs an exclusion with
++directory modifications, do *not* lock the subdirectory in question in your
++->rename() - it's done by the caller now [that item should've been added in
++28eceeda130f "fs: Lock moved directories"].
++
++---
++
++**mandatory**
++
++On same-directory ->rename() the (tautological) update of .. is not protected
++by any locks; just don't do it if the old parent is the same as the new one.
++We really can't lock two subdirectories in same-directory rename - not without
++deadlocks.
+diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
+index a0c83fc481264e..690d2ffe720eff 100644
+--- a/Documentation/gpu/drm-kms.rst
++++ b/Documentation/gpu/drm-kms.rst
+@@ -546,6 +546,8 @@ Plane Composition Properties
+ .. kernel-doc:: drivers/gpu/drm/drm_blend.c
+ :doc: overview
+
++.. _damage_tracking_properties:
++
+ Damage Tracking Properties
+ --------------------------
+
+diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
+index 03fe5d1247be28..85bbe05436098a 100644
+--- a/Documentation/gpu/todo.rst
++++ b/Documentation/gpu/todo.rst
+@@ -337,8 +337,8 @@ connector register/unregister fixes
+
+ Level: Intermediate
+
+-Remove load/unload callbacks from all non-DRIVER_LEGACY drivers
+----------------------------------------------------------------
++Remove load/unload callbacks
++----------------------------
+
+ The load/unload callbacks in struct &drm_driver are very much midlayers, plus
+ for historical reasons they get the ordering wrong (and we can't fix that)
+@@ -347,8 +347,7 @@ between setting up the &drm_driver structure and calling drm_dev_register().
+ - Rework drivers to no longer use the load/unload callbacks, directly coding the
+ load/unload sequence into the driver's probe function.
+
+-- Once all non-DRIVER_LEGACY drivers are converted, disallow the load/unload
+- callbacks for all modern drivers.
++- Once all drivers are converted, remove the load/unload callbacks.
+
+ Contact: Daniel Vetter
+
+diff --git a/Documentation/hwmon/corsair-psu.rst b/Documentation/hwmon/corsair-psu.rst
+index 16db34d464dd6b..7ed794087f8489 100644
+--- a/Documentation/hwmon/corsair-psu.rst
++++ b/Documentation/hwmon/corsair-psu.rst
+@@ -15,11 +15,11 @@ Supported devices:
+
+ Corsair HX850i
+
+- Corsair HX1000i (Series 2022 and 2023)
++ Corsair HX1000i (Legacy and Series 2023)
+
+- Corsair HX1200i
++ Corsair HX1200i (Legacy and Series 2023)
+
+- Corsair HX1500i (Series 2022 and 2023)
++ Corsair HX1500i (Legacy and Series 2023)
+
+ Corsair RM550i
+
+diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
+index e76e68ccf7182c..10eced6c2e4625 100644
+--- a/Documentation/i2c/busses/i2c-i801.rst
++++ b/Documentation/i2c/busses/i2c-i801.rst
+@@ -47,6 +47,7 @@ Supported adapters:
+ * Intel Alder Lake (PCH)
+ * Intel Raptor Lake (PCH)
+ * Intel Meteor Lake (SOC and PCH)
++ * Intel Birch Stream (SOC)
+
+ Datasheets: Publicly available at the Intel website
+
+diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
+index a1f3eb7a43e235..131863142cbb35 100644
+--- a/Documentation/kbuild/modules.rst
++++ b/Documentation/kbuild/modules.rst
+@@ -128,7 +128,7 @@ executed to make module versioning work.
+
+ modules_install
+ Install the external module(s). The default location is
+- /lib/modules/<kernel_release>/extra/, but a prefix may
++ /lib/modules/<kernel_release>/updates/, but a prefix may
+ be added with INSTALL_MOD_PATH (discussed in section 5).
+
+ clean
+@@ -417,7 +417,7 @@ directory:
+
+ And external modules are installed in:
+
+- /lib/modules/$(KERNELRELEASE)/extra/
++ /lib/modules/$(KERNELRELEASE)/updates/
+
+ 5.1 INSTALL_MOD_PATH
+ --------------------
+@@ -438,10 +438,10 @@ And external modules are installed in:
+ -------------------
+
+ External modules are by default installed to a directory under
+- /lib/modules/$(KERNELRELEASE)/extra/, but you may wish to
++ /lib/modules/$(KERNELRELEASE)/updates/, but you may wish to
+ locate modules for a specific functionality in a separate
+ directory. For this purpose, use INSTALL_MOD_DIR to specify an
+- alternative name to "extra."::
++ alternative name to "updates."::
+
+ $ make INSTALL_MOD_DIR=gandalf -C $KDIR \
+ M=$PWD modules_install
+diff --git a/Documentation/locking/hwspinlock.rst b/Documentation/locking/hwspinlock.rst
+index 6f03713b700390..2ffaa3cbd63f16 100644
+--- a/Documentation/locking/hwspinlock.rst
++++ b/Documentation/locking/hwspinlock.rst
+@@ -85,6 +85,17 @@ is already free).
+
+ Should be called from a process context (might sleep).
+
++::
++
++ int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
++
++After verifying the owner of the hwspinlock, release a previously acquired
++hwspinlock; returns 0 on success, or an appropriate error code on failure
++(e.g. -EOPNOTSUPP if the bust operation is not defined for the specific
++hwspinlock).
++
++Should be called from a process context (might sleep).
++
+ ::
+
+ int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
+diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
+index c82e3ee20e51ee..8c71249e7c4d05 100644
+--- a/Documentation/mm/arch_pgtable_helpers.rst
++++ b/Documentation/mm/arch_pgtable_helpers.rst
+@@ -142,7 +142,8 @@ PMD Page Table Helpers
+ +---------------------------+--------------------------------------------------+
+ | pmd_swp_clear_soft_dirty | Clears a soft dirty swapped PMD |
+ +---------------------------+--------------------------------------------------+
+-| pmd_mkinvalid | Invalidates a mapped PMD [1] |
++| pmd_mkinvalid | Invalidates a present PMD; do not call for |
++| | non-present PMD [1] |
+ +---------------------------+--------------------------------------------------+
+ | pmd_set_huge | Creates a PMD huge mapping |
+ +---------------------------+--------------------------------------------------+
+@@ -198,7 +199,8 @@ PUD Page Table Helpers
+ +---------------------------+--------------------------------------------------+
+ | pud_mkdevmap | Creates a ZONE_DEVICE mapped PUD |
+ +---------------------------+--------------------------------------------------+
+-| pud_mkinvalid | Invalidates a mapped PUD [1] |
++| pud_mkinvalid | Invalidates a present PUD; do not call for |
++| | non-present PUD [1] |
+ +---------------------------+--------------------------------------------------+
+ | pud_set_huge | Creates a PUD huge mapping |
+ +---------------------------+--------------------------------------------------+
+diff --git a/Documentation/mm/page_table_check.rst b/Documentation/mm/page_table_check.rst
+index c12838ce6b8de2..c59f22eb6a0f9a 100644
+--- a/Documentation/mm/page_table_check.rst
++++ b/Documentation/mm/page_table_check.rst
+@@ -14,7 +14,7 @@ Page table check performs extra verifications at the time when new pages become
+ accessible from the userspace by getting their page table entries (PTEs PMDs
+ etc.) added into the table.
+
+-In case of detected corruption, the kernel is crashed. There is a small
++In case of most detected corruption, the kernel is crashed. There is a small
+ performance and memory overhead associated with the page table check. Therefore,
+ it is disabled by default, but can be optionally enabled on systems where the
+ extra hardening outweighs the performance costs. Also, because page table check
+@@ -22,6 +22,13 @@ is synchronous, it can help with debugging double map memory corruption issues,
+ by crashing kernel at the time wrong mapping occurs instead of later which is
+ often the case with memory corruptions bugs.
+
++It can also be used to do page table entry checks over various flags, dump
++warnings when illegal combinations of entry flags are detected. Currently,
++userfaultfd is the only user of such to sanity check wr-protect bit against
++any writable flags. Illegal flag combinations will not directly cause data
++corruption in this case immediately, but that will cause read-only data to
++be writable, leading to corrupt when the page content is later modified.
++
+ Double mapping detection logic
+ ==============================
+
+diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+index 5eaa3ab6c73e7f..b842bcb14255b5 100644
+--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
++++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+@@ -54,6 +54,7 @@ ena_common_defs.h Common definitions for ena_com layer.
+ ena_regs_defs.h Definition of ENA PCI memory-mapped (MMIO) registers.
+ ena_netdev.[ch] Main Linux kernel driver.
+ ena_ethtool.c ethtool callbacks.
++ena_xdp.[ch] XDP files
+ ena_pci_id_tbl.h Supported device IDs.
+ ================= ======================================================
+
+diff --git a/Documentation/networking/devlink/devlink-port.rst b/Documentation/networking/devlink/devlink-port.rst
+index e33ad2401ad70c..562f46b4127449 100644
+--- a/Documentation/networking/devlink/devlink-port.rst
++++ b/Documentation/networking/devlink/devlink-port.rst
+@@ -126,7 +126,7 @@ Users may also set the RoCE capability of the function using
+ `devlink port function set roce` command.
+
+ Users may also set the function as migratable using
+-'devlink port function set migratable' command.
++`devlink port function set migratable` command.
+
+ Users may also set the IPsec crypto capability of the function using
+ `devlink port function set ipsec_crypto` command.
+diff --git a/Documentation/powerpc/features.rst b/Documentation/powerpc/features.rst
+index aeae73df86b0c5..ee4b95e04202d3 100644
+--- a/Documentation/powerpc/features.rst
++++ b/Documentation/powerpc/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features powerpc
++.. kernel-feat:: features powerpc
+diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
+index b48da698d6f253..bb96ca0f774b9a 100644
+--- a/Documentation/process/changes.rst
++++ b/Documentation/process/changes.rst
+@@ -31,7 +31,7 @@ you probably needn't concern yourself with pcmciautils.
+ ====================== =============== ========================================
+ GNU C 5.1 gcc --version
+ Clang/LLVM (optional) 11.0.0 clang --version
+-Rust (optional) 1.71.1 rustc --version
++Rust (optional) 1.73.0 rustc --version
+ bindgen (optional) 0.65.1 bindgen --version
+ GNU make 3.82 make --version
+ bash 4.2 bash --version
+diff --git a/Documentation/riscv/features.rst b/Documentation/riscv/features.rst
+index c70ef6ac2368c9..36e90144adabd1 100644
+--- a/Documentation/riscv/features.rst
++++ b/Documentation/riscv/features.rst
+@@ -1,3 +1,3 @@
+ .. SPDX-License-Identifier: GPL-2.0
+
+-.. kernel-feat:: $srctree/Documentation/features riscv
++.. kernel-feat:: features riscv
+diff --git a/Documentation/sound/soc/dapm.rst b/Documentation/sound/soc/dapm.rst
+index 8e44107933abf5..c3154ce6e1b273 100644
+--- a/Documentation/sound/soc/dapm.rst
++++ b/Documentation/sound/soc/dapm.rst
+@@ -234,7 +234,7 @@ corresponding soft power control. In this case it is necessary to create
+ a virtual widget - a widget with no control bits e.g.
+ ::
+
+- SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_DAPM_NOPM, 0, 0, NULL, 0),
++ SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ This can be used to merge to signal paths together in software.
+
+diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
+index a99716bf44b553..de5d132d94c595 100644
+--- a/Documentation/sphinx/cdomain.py
++++ b/Documentation/sphinx/cdomain.py
+@@ -93,7 +93,7 @@ def markup_ctype_refs(match):
+ #
+ RE_expr = re.compile(r':c:(expr|texpr):`([^\`]+)`')
+ def markup_c_expr(match):
+- return '\ ``' + match.group(2) + '``\ '
++ return '\\ ``' + match.group(2) + '``\\ '
+
+ #
+ # Parse Sphinx 3.x C markups, replacing them by backward-compatible ones
+diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
+index b5feb5b1d90548..5911bd0d796571 100644
+--- a/Documentation/sphinx/kernel_abi.py
++++ b/Documentation/sphinx/kernel_abi.py
+@@ -39,8 +39,6 @@ import sys
+ import re
+ import kernellog
+
+-from os import path
+-
+ from docutils import nodes, statemachine
+ from docutils.statemachine import ViewList
+ from docutils.parsers.rst import directives, Directive
+@@ -73,60 +71,26 @@ class KernelCmd(Directive):
+ }
+
+ def run(self):
+-
+ doc = self.state.document
+ if not doc.settings.file_insertion_enabled:
+ raise self.warning("docutils: file insertion disabled")
+
+- env = doc.settings.env
+- cwd = path.dirname(doc.current_source)
+- cmd = "get_abi.pl rest --enable-lineno --dir "
+- cmd += self.arguments[0]
+-
+- if 'rst' in self.options:
+- cmd += " --rst-source"
++ srctree = os.path.abspath(os.environ["srctree"])
+
+- srctree = path.abspath(os.environ["srctree"])
++ args = [
++ os.path.join(srctree, 'scripts/get_abi.pl'),
++ 'rest',
++ '--enable-lineno',
++ '--dir', os.path.join(srctree, 'Documentation', self.arguments[0]),
++ ]
+
+- fname = cmd
+-
+- # extend PATH with $(srctree)/scripts
+- path_env = os.pathsep.join([
+- srctree + os.sep + "scripts",
+- os.environ["PATH"]
+- ])
+- shell_env = os.environ.copy()
+- shell_env["PATH"] = path_env
+- shell_env["srctree"] = srctree
++ if 'rst' in self.options:
++ args.append('--rst-source')
+
+- lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
++ lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
+ nodeList = self.nestedParse(lines, self.arguments[0])
+ return nodeList
+
+- def runCmd(self, cmd, **kwargs):
+- u"""Run command ``cmd`` and return its stdout as unicode."""
+-
+- try:
+- proc = subprocess.Popen(
+- cmd
+- , stdout = subprocess.PIPE
+- , stderr = subprocess.PIPE
+- , **kwargs
+- )
+- out, err = proc.communicate()
+-
+- out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+-
+- if proc.returncode != 0:
+- raise self.severe(
+- u"command '%s' failed with return code %d"
+- % (cmd, proc.returncode)
+- )
+- except OSError as exc:
+- raise self.severe(u"problems with '%s' directive: %s."
+- % (self.name, ErrorString(exc)))
+- return out
+-
+ def nestedParse(self, lines, fname):
+ env = self.state.document.settings.env
+ content = ViewList()
+@@ -138,7 +102,7 @@ class KernelCmd(Directive):
+ code_block += "\n " + l
+ lines = code_block + "\n\n"
+
+- line_regex = re.compile("^\.\. LINENO (\S+)\#([0-9]+)$")
++ line_regex = re.compile(r"^\.\. LINENO (\S+)\#([0-9]+)$")
+ ln = 0
+ n = 0
+ f = fname
+diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py
+index 27b701ed3681ed..03ace5f01b5c02 100644
+--- a/Documentation/sphinx/kernel_feat.py
++++ b/Documentation/sphinx/kernel_feat.py
+@@ -37,8 +37,6 @@ import re
+ import subprocess
+ import sys
+
+-from os import path
+-
+ from docutils import nodes, statemachine
+ from docutils.statemachine import ViewList
+ from docutils.parsers.rst import directives, Directive
+@@ -76,35 +74,28 @@ class KernelFeat(Directive):
+ self.state.document.settings.env.app.warn(message, prefix="")
+
+ def run(self):
+-
+ doc = self.state.document
+ if not doc.settings.file_insertion_enabled:
+ raise self.warning("docutils: file insertion disabled")
+
+ env = doc.settings.env
+- cwd = path.dirname(doc.current_source)
+- cmd = "get_feat.pl rest --enable-fname --dir "
+- cmd += self.arguments[0]
+-
+- if len(self.arguments) > 1:
+- cmd += " --arch " + self.arguments[1]
+
+- srctree = path.abspath(os.environ["srctree"])
++ srctree = os.path.abspath(os.environ["srctree"])
+
+- fname = cmd
++ args = [
++ os.path.join(srctree, 'scripts/get_feat.pl'),
++ 'rest',
++ '--enable-fname',
++ '--dir',
++ os.path.join(srctree, 'Documentation', self.arguments[0]),
++ ]
+
+- # extend PATH with $(srctree)/scripts
+- path_env = os.pathsep.join([
+- srctree + os.sep + "scripts",
+- os.environ["PATH"]
+- ])
+- shell_env = os.environ.copy()
+- shell_env["PATH"] = path_env
+- shell_env["srctree"] = srctree
++ if len(self.arguments) > 1:
++ args.extend(['--arch', self.arguments[1]])
+
+- lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
++ lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
+
+- line_regex = re.compile("^\.\. FILE (\S+)$")
++ line_regex = re.compile(r"^\.\. FILE (\S+)$")
+
+ out_lines = ""
+
+@@ -118,33 +109,9 @@ class KernelFeat(Directive):
+ else:
+ out_lines += line + "\n"
+
+- nodeList = self.nestedParse(out_lines, fname)
++ nodeList = self.nestedParse(out_lines, self.arguments[0])
+ return nodeList
+
+- def runCmd(self, cmd, **kwargs):
+- u"""Run command ``cmd`` and return its stdout as unicode."""
+-
+- try:
+- proc = subprocess.Popen(
+- cmd
+- , stdout = subprocess.PIPE
+- , stderr = subprocess.PIPE
+- , **kwargs
+- )
+- out, err = proc.communicate()
+-
+- out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+-
+- if proc.returncode != 0:
+- raise self.severe(
+- u"command '%s' failed with return code %d"
+- % (cmd, proc.returncode)
+- )
+- except OSError as exc:
+- raise self.severe(u"problems with '%s' directive: %s."
+- % (self.name, ErrorString(exc)))
+- return out
+-
+ def nestedParse(self, lines, fname):
+ content = ViewList()
+ node = nodes.section()
+diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
+index abe7680883771d..6387624423363d 100755
+--- a/Documentation/sphinx/kernel_include.py
++++ b/Documentation/sphinx/kernel_include.py
+@@ -97,7 +97,6 @@ class KernelInclude(Include):
+ # HINT: this is the only line I had to change / commented out:
+ #path = utils.relative_path(None, path)
+
+- path = nodes.reprunicode(path)
+ encoding = self.options.get(
+ 'encoding', self.state.document.settings.input_encoding)
+ e_handler=self.state.document.settings.input_encoding_error_handler
+diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
+index 9395892c7ba38b..8dc134904b9077 100644
+--- a/Documentation/sphinx/kerneldoc.py
++++ b/Documentation/sphinx/kerneldoc.py
+@@ -130,7 +130,7 @@ class KernelDocDirective(Directive):
+ result = ViewList()
+
+ lineoffset = 0;
+- line_regex = re.compile("^\.\. LINENO ([0-9]+)$")
++ line_regex = re.compile(r"^\.\. LINENO ([0-9]+)$")
+ for line in lines:
+ match = line_regex.search(line)
+ if match:
+diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
+index 328b3631a585cd..dcad0fff4723ee 100755
+--- a/Documentation/sphinx/maintainers_include.py
++++ b/Documentation/sphinx/maintainers_include.py
+@@ -77,7 +77,7 @@ class MaintainersInclude(Include):
+ line = line.rstrip()
+
+ # Linkify all non-wildcard refs to ReST files in Documentation/.
+- pat = '(Documentation/([^\s\?\*]*)\.rst)'
++ pat = r'(Documentation/([^\s\?\*]*)\.rst)'
+ m = re.search(pat, line)
+ if m:
+ # maintainers.rst is in a subdirectory, so include "../".
+@@ -90,11 +90,11 @@ class MaintainersInclude(Include):
+ output = "| %s" % (line.replace("\\", "\\\\"))
+ # Look for and record field letter to field name mappings:
+ # R: Designated *reviewer*: FullName <address@domain>
+- m = re.search("\s(\S):\s", line)
++ m = re.search(r"\s(\S):\s", line)
+ if m:
+ field_letter = m.group(1)
+ if field_letter and not field_letter in fields:
+- m = re.search("\*([^\*]+)\*", line)
++ m = re.search(r"\*([^\*]+)\*", line)
+ if m:
+ fields[field_letter] = m.group(1)
+ elif subsystems:
+@@ -112,7 +112,7 @@ class MaintainersInclude(Include):
+ field_content = ""
+
+ # Collapse whitespace in subsystem name.
+- heading = re.sub("\s+", " ", line)
++ heading = re.sub(r"\s+", " ", line)
+ output = output + "%s\n%s" % (heading, "~" * len(heading))
+ field_prev = ""
+ else:
+diff --git a/Documentation/translations/zh_CN/arch/loongarch/features.rst b/Documentation/translations/zh_CN/arch/loongarch/features.rst
+index 82bfac180bdc04..cec38dda8298c1 100644
+--- a/Documentation/translations/zh_CN/arch/loongarch/features.rst
++++ b/Documentation/translations/zh_CN/arch/loongarch/features.rst
+@@ -5,4 +5,4 @@
+ :Original: Documentation/arch/loongarch/features.rst
+ :Translator: Huacai Chen <chenhuacai@loongson.cn>
+
+-.. kernel-feat:: $srctree/Documentation/features loongarch
++.. kernel-feat:: features loongarch
+diff --git a/Documentation/translations/zh_CN/arch/mips/features.rst b/Documentation/translations/zh_CN/arch/mips/features.rst
+index da1b956e4a40f6..0d6df97db069bb 100644
+--- a/Documentation/translations/zh_CN/arch/mips/features.rst
++++ b/Documentation/translations/zh_CN/arch/mips/features.rst
+@@ -10,4 +10,4 @@
+
+ .. _cn_features:
+
+-.. kernel-feat:: $srctree/Documentation/features mips
++.. kernel-feat:: features mips
+diff --git a/Documentation/translations/zh_TW/dev-tools/index.rst b/Documentation/translations/zh_TW/dev-tools/index.rst
+new file mode 100644
+index 00000000000000..8f101db5a07fff
+--- /dev/null
++++ b/Documentation/translations/zh_TW/dev-tools/index.rst
+@@ -0,0 +1,40 @@
++.. include:: ../disclaimer-zh_TW.rst
++
++:Original: Documentation/dev-tools/index.rst
++:Translator: Min-Hua Chen <minhuadotchen@gmail.com>
++
++============
++內核開發工具
++============
++
++本文檔是有關內核開發工具文檔的合集。
++目前這些文檔已經整理在一起,不需要再花費額外的精力。
++歡迎任何補丁。
++
++有關測試專用工具的簡要概述,參見
++Documentation/dev-tools/testing-overview.rst
++
++.. class:: toc-title
++
++ 目錄
++
++.. toctree::
++ :maxdepth: 2
++
++ sparse
++
++Todolist:
++
++ - coccinelle
++ - kcov
++ - ubsan
++ - kmemleak
++ - kcsan
++ - kfence
++ - kgdb
++ - kselftest
++ - kunit/index
++ - testing-overview
++ - gcov
++ - kasan
++ - gdb-kernel-debugging
+diff --git a/Documentation/translations/zh_TW/dev-tools/sparse.txt b/Documentation/translations/zh_TW/dev-tools/sparse.txt
+new file mode 100644
+index 00000000000000..35d3d1d748e6f1
+--- /dev/null
++++ b/Documentation/translations/zh_TW/dev-tools/sparse.txt
+@@ -0,0 +1,91 @@
++Chinese translated version of Documentation/dev-tools/sparse.rst
++
++If you have any comment or update to the content, please contact the
++original document maintainer directly. However, if you have a problem
++communicating in English you can also ask the Chinese maintainer for
++help. Contact the Chinese maintainer if this translation is outdated
++or if there is a problem with the translation.
++
++Traditional Chinese maintainer: Hu Haowen <src.res.211@gmail.com>
++---------------------------------------------------------------------
++Documentation/dev-tools/sparse.rst 的繁體中文翻譯
++
++如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
++交流有困難的話,也可以向繁體中文版維護者求助。如果本翻譯更新不及時或
++者翻譯存在問題,請聯繫繁體中文版維護者。
++
++繁體中文版維護者: 胡皓文 Hu Haowen <src.res.211@gmail.com>
++繁體中文版翻譯者: 胡皓文 Hu Haowen <src.res.211@gmail.com>
++
++以下爲正文
++---------------------------------------------------------------------
++
++Copyright 2004 Linus Torvalds
++Copyright 2004 Pavel Machek <pavel@ucw.cz>
++Copyright 2006 Bob Copeland <me@bobcopeland.com>
++
++使用 sparse 工具做類型檢查
++~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++"__bitwise" 是一種類型屬性,所以你應該這樣使用它:
++
++ typedef int __bitwise pm_request_t;
++
++ enum pm_request {
++ PM_SUSPEND = (__force pm_request_t) 1,
++ PM_RESUME = (__force pm_request_t) 2
++ };
++
++這樣會使 PM_SUSPEND 和 PM_RESUME 成爲位方式(bitwise)整數(使用"__force"
++是因爲 sparse 會抱怨改變位方式的類型轉換,但是這裡我們確實需要強制進行轉
++換)。而且因爲所有枚舉值都使用了相同的類型,這裡的"enum pm_request"也將
++會使用那個類型做爲底層實現。
++
++而且使用 gcc 編譯的時候,所有的 __bitwise/__force 都會消失,最後在 gcc
++看來它們只不過是普通的整數。
++
++坦白來說,你並不需要使用枚舉類型。上面那些實際都可以濃縮成一個特殊的"int
++__bitwise"類型。
++
++所以更簡單的辦法只要這樣做:
++
++ typedef int __bitwise pm_request_t;
++
++ #define PM_SUSPEND ((__force pm_request_t) 1)
++ #define PM_RESUME ((__force pm_request_t) 2)
++
++現在你就有了嚴格的類型檢查所需要的所有基礎架構。
++
++一個小提醒:常數整數"0"是特殊的。你可以直接把常數零當作位方式整數使用而
++不用擔心 sparse 會抱怨。這是因爲"bitwise"(恰如其名)是用來確保不同位方
++式類型不會被弄混(小尾模式,大尾模式,cpu尾模式,或者其他),對他們來說
++常數"0"確實是特殊的。
++
++獲取 sparse 工具
++~~~~~~~~~~~~~~~~
++
++你可以從 Sparse 的主頁獲取最新的發布版本:
++
++ https://www.kernel.org/pub/software/devel/sparse/dist/
++
++或者,你也可以使用 git 克隆最新的 sparse 開發版本:
++
++ git://git.kernel.org/pub/scm/devel/sparse/sparse.git
++
++一旦你下載了源碼,只要以普通用戶身份運行:
++
++ make
++ make install
++
++它將會被自動安裝到你的 ~/bin 目錄下。
++
++使用 sparse 工具
++~~~~~~~~~~~~~~~~
++
++用"make C=1"命令來編譯內核,會對所有重新編譯的 C 文件使用 sparse 工具。
++或者使用"make C=2"命令,無論文件是否被重新編譯都會對其使用 sparse 工具。
++如果你已經編譯了內核,用後一種方式可以很快地檢查整個源碼樹。
++
++make 的可選變量 CHECKFLAGS 可以用來向 sparse 工具傳遞參數。編譯系統會自
++動向 sparse 工具傳遞 -Wbitwise 參數。
++
+diff --git a/Documentation/translations/zh_TW/index.rst b/Documentation/translations/zh_TW/index.rst
+index d1cf0b4d8e46d9..ffcaf3272fe70a 100644
+--- a/Documentation/translations/zh_TW/index.rst
++++ b/Documentation/translations/zh_TW/index.rst
+@@ -55,11 +55,11 @@ TODOList:
+ :maxdepth: 1
+
+ process/license-rules
++ dev-tools/index
+
+ TODOList:
+
+ * doc-guide/index
+-* dev-tools/index
+ * dev-tools/testing-overview
+ * kernel-hacking/index
+ * rust/index
+diff --git a/Documentation/translations/zh_TW/sparse.txt b/Documentation/translations/zh_TW/sparse.txt
+deleted file mode 100644
+index 35d3d1d748e6f1..00000000000000
+--- a/Documentation/translations/zh_TW/sparse.txt
++++ /dev/null
+@@ -1,91 +0,0 @@
+-Chinese translated version of Documentation/dev-tools/sparse.rst
+-
+-If you have any comment or update to the content, please contact the
+-original document maintainer directly. However, if you have a problem
+-communicating in English you can also ask the Chinese maintainer for
+-help. Contact the Chinese maintainer if this translation is outdated
+-or if there is a problem with the translation.
+-
+-Traditional Chinese maintainer: Hu Haowen <src.res.211@gmail.com>
+----------------------------------------------------------------------
+-Documentation/dev-tools/sparse.rst 的繁體中文翻譯
+-
+-如果想評論或更新本文的內容,請直接聯繫原文檔的維護者。如果你使用英文
+-交流有困難的話,也可以向繁體中文版維護者求助。如果本翻譯更新不及時或
+-者翻譯存在問題,請聯繫繁體中文版維護者。
+-
+-繁體中文版維護者: 胡皓文 Hu Haowen <src.res.211@gmail.com>
+-繁體中文版翻譯者: 胡皓文 Hu Haowen <src.res.211@gmail.com>
+-
+-以下爲正文
+----------------------------------------------------------------------
+-
+-Copyright 2004 Linus Torvalds
+-Copyright 2004 Pavel Machek <pavel@ucw.cz>
+-Copyright 2006 Bob Copeland <me@bobcopeland.com>
+-
+-使用 sparse 工具做類型檢查
+-~~~~~~~~~~~~~~~~~~~~~~~~~~
+-
+-"__bitwise" 是一種類型屬性,所以你應該這樣使用它:
+-
+- typedef int __bitwise pm_request_t;
+-
+- enum pm_request {
+- PM_SUSPEND = (__force pm_request_t) 1,
+- PM_RESUME = (__force pm_request_t) 2
+- };
+-
+-這樣會使 PM_SUSPEND 和 PM_RESUME 成爲位方式(bitwise)整數(使用"__force"
+-是因爲 sparse 會抱怨改變位方式的類型轉換,但是這裡我們確實需要強制進行轉
+-換)。而且因爲所有枚舉值都使用了相同的類型,這裡的"enum pm_request"也將
+-會使用那個類型做爲底層實現。
+-
+-而且使用 gcc 編譯的時候,所有的 __bitwise/__force 都會消失,最後在 gcc
+-看來它們只不過是普通的整數。
+-
+-坦白來說,你並不需要使用枚舉類型。上面那些實際都可以濃縮成一個特殊的"int
+-__bitwise"類型。
+-
+-所以更簡單的辦法只要這樣做:
+-
+- typedef int __bitwise pm_request_t;
+-
+- #define PM_SUSPEND ((__force pm_request_t) 1)
+- #define PM_RESUME ((__force pm_request_t) 2)
+-
+-現在你就有了嚴格的類型檢查所需要的所有基礎架構。
+-
+-一個小提醒:常數整數"0"是特殊的。你可以直接把常數零當作位方式整數使用而
+-不用擔心 sparse 會抱怨。這是因爲"bitwise"(恰如其名)是用來確保不同位方
+-式類型不會被弄混(小尾模式,大尾模式,cpu尾模式,或者其他),對他們來說
+-常數"0"確實是特殊的。
+-
+-獲取 sparse 工具
+-~~~~~~~~~~~~~~~~
+-
+-你可以從 Sparse 的主頁獲取最新的發布版本:
+-
+- https://www.kernel.org/pub/software/devel/sparse/dist/
+-
+-或者,你也可以使用 git 克隆最新的 sparse 開發版本:
+-
+- git://git.kernel.org/pub/scm/devel/sparse/sparse.git
+-
+-一旦你下載了源碼,只要以普通用戶身份運行:
+-
+- make
+- make install
+-
+-它將會被自動安裝到你的 ~/bin 目錄下。
+-
+-使用 sparse 工具
+-~~~~~~~~~~~~~~~~
+-
+-用"make C=1"命令來編譯內核,會對所有重新編譯的 C 文件使用 sparse 工具。
+-或者使用"make C=2"命令,無論文件是否被重新編譯都會對其使用 sparse 工具。
+-如果你已經編譯了內核,用後一種方式可以很快地檢查整個源碼樹。
+-
+-make 的可選變量 CHECKFLAGS 可以用來向 sparse 工具傳遞參數。編譯系統會自
+-動向 sparse 工具傳遞 -Wbitwise 參數。
+-
+diff --git a/Documentation/userspace-api/media/mediactl/media-types.rst b/Documentation/userspace-api/media/mediactl/media-types.rst
+index 0ffeece1e0c8e9..6332e8395263b0 100644
+--- a/Documentation/userspace-api/media/mediactl/media-types.rst
++++ b/Documentation/userspace-api/media/mediactl/media-types.rst
+@@ -375,12 +375,11 @@ Types and flags used to represent the media graph elements
+ are origins of links.
+
+ * - ``MEDIA_PAD_FL_MUST_CONNECT``
+- - If this flag is set and the pad is linked to any other pad, then
+- at least one of those links must be enabled for the entity to be
+- able to stream. There could be temporary reasons (e.g. device
+- configuration dependent) for the pad to need enabled links even
+- when this flag isn't set; the absence of the flag doesn't imply
+- there is none.
++ - If this flag is set, then for this pad to be able to stream, it must
++ be connected by at least one enabled link. There could be temporary
++ reasons (e.g. device configuration dependent) for the pad to need
++ enabled links even when this flag isn't set; the absence of the flag
++ doesn't imply there is none.
+
+
+ One and only one of ``MEDIA_PAD_FL_SINK`` and ``MEDIA_PAD_FL_SOURCE``
+diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
+index 3a034db5e55f89..887d9d2fed492b 100644
+--- a/Documentation/virt/kvm/locking.rst
++++ b/Documentation/virt/kvm/locking.rst
+@@ -9,7 +9,7 @@ KVM Lock Overview
+
+ The acquisition orders for mutexes are as follows:
+
+-- cpus_read_lock() is taken outside kvm_lock
++- cpus_read_lock() is taken outside kvm_lock and kvm_usage_lock
+
+ - kvm->lock is taken outside vcpu->mutex
+
+@@ -24,6 +24,13 @@ The acquisition orders for mutexes are as follows:
+ are taken on the waiting side when modifying memslots, so MMU notifiers
+ must not take either kvm->slots_lock or kvm->slots_arch_lock.
+
++cpus_read_lock() vs kvm_lock:
++
++- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that
++ being the official ordering, as it is quite easy to unknowingly trigger
++ cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list,
++ e.g. avoid complex operations when possible.
++
+ For SRCU:
+
+ - ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
+@@ -228,10 +235,17 @@ time it will be set using the Dirty tracking mechanism described above.
+ :Type: mutex
+ :Arch: any
+ :Protects: - vm_list
+- - kvm_usage_count
++
++``kvm_usage_lock``
++^^^^^^^^^^^^^^^^^^
++
++:Type: mutex
++:Arch: any
++:Protects: - kvm_usage_count
+ - hardware virtualization enable/disable
+-:Comment: KVM also disables CPU hotplug via cpus_read_lock() during
+- enable/disable.
++:Comment: Exists because using kvm_lock leads to deadlock (see earlier comment
++ on cpus_read_lock() vs kvm_lock). Note, KVM also disables CPU hotplug via
++ cpus_read_lock() when enabling/disabling virtualization.
+
+ ``kvm->mn_invalidate_lock``
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+@@ -291,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above.
+ wakeup.
+
+ ``vendor_module_lock``
+-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++^^^^^^^^^^^^^^^^^^^^^^
+ :Type: mutex
+ :Arch: x86
+ :Protects: loading a vendor module (kvm_amd or kvm_intel)
+-:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is
+- taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and
+- many operations need to take cpu_hotplug_lock when loading a vendor module,
+- e.g. updating static calls.
++:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken
++ in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while
++ cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many
++ operations need to take cpu_hotplug_lock when loading a vendor module, e.g.
++ updating static calls.
+diff --git a/MAINTAINERS b/MAINTAINERS
+index dd5de540ec0b52..ae4c0cec507360 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -8142,7 +8142,7 @@ M: Geoffrey D. Bennett <g@b4.vu>
+ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+ S: Maintained
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
+-F: sound/usb/mixer_scarlett_gen2.c
++F: sound/usb/mixer_scarlett2.c
+
+ FORCEDETH GIGABIT ETHERNET DRIVER
+ M: Rain River <rain.1986.08.12@gmail.com>
+@@ -10157,6 +10157,14 @@ L: linux-media@vger.kernel.org
+ S: Maintained
+ F: drivers/media/rc/iguanair.c
+
++IIO BACKEND FRAMEWORK
++M: Nuno Sa <nuno.sa@analog.com>
++R: Olivier Moysan <olivier.moysan@foss.st.com>
++L: linux-iio@vger.kernel.org
++S: Maintained
++F: drivers/iio/industrialio-backend.c
++F: include/linux/iio/backend.h
++
+ IIO DIGITAL POTENTIOMETER DAC
+ M: Peter Rosin <peda@axentia.se>
+ L: linux-iio@vger.kernel.org
+@@ -13694,7 +13702,7 @@ M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ M: "Paul E. McKenney" <paulmck@kernel.org>
+ L: linux-kernel@vger.kernel.org
+ S: Supported
+-F: arch/powerpc/include/asm/membarrier.h
++F: arch/*/include/asm/membarrier.h
+ F: include/uapi/linux/membarrier.h
+ F: kernel/sched/membarrier.c
+
+@@ -17380,7 +17388,7 @@ F: drivers/video/backlight/pwm_bl.c
+ F: include/dt-bindings/pwm/
+ F: include/linux/pwm.h
+ F: include/linux/pwm_backlight.h
+-K: pwm_(config|apply_state|ops)
++K: pwm_(config|apply_might_sleep|ops)
+
+ PXA GPIO DRIVER
+ M: Robert Jarzmik <robert.jarzmik@free.fr>
+@@ -23630,6 +23638,7 @@ F: include/xen/arm/swiotlb-xen.h
+ F: include/xen/swiotlb-xen.h
+
+ XFS FILESYSTEM
++M: Catherine Hoang <catherine.hoang@oracle.com>
+ M: Chandan Babu R <chandan.babu@oracle.com>
+ R: Darrick J. Wong <djwong@kernel.org>
+ L: linux-xfs@vger.kernel.org
+diff --git a/Makefile b/Makefile
+index 5c418efbe89b6c..f80e78c7cf2006 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,9 +1,9 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 0
++SUBLEVEL = 58
+ EXTRAVERSION =
+-NAME = Hurr durr I'ma ninja sloth
++NAME = Pinguïn Aangedreven
+
+ # *DOCUMENTATION*
+ # To see a list of typical targets execute "make help"
+@@ -951,7 +951,6 @@ endif
+ ifdef CONFIG_LTO_CLANG
+ ifdef CONFIG_LTO_CLANG_THIN
+ CC_FLAGS_LTO := -flto=thin -fsplit-lto-unit
+-KBUILD_LDFLAGS += --thinlto-cache-dir=$(extmod_prefix).thinlto-cache
+ else
+ CC_FLAGS_LTO := -flto
+ endif
+@@ -1317,6 +1316,14 @@ scripts_unifdef: scripts_basic
+ quiet_cmd_install = INSTALL $(INSTALL_PATH)
+ cmd_install = unset sub_make_done; $(srctree)/scripts/install.sh
+
++# ---------------------------------------------------------------------------
++# vDSO install
++
++PHONY += vdso_install
++vdso_install: export INSTALL_FILES = $(vdso-install-y)
++vdso_install:
++ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vdsoinst
++
+ # ---------------------------------------------------------------------------
+ # Tools
+
+@@ -1474,7 +1481,7 @@ endif # CONFIG_MODULES
+ # Directories & files removed with 'make clean'
+ CLEAN_FILES += vmlinux.symvers modules-only.symvers \
+ modules.builtin modules.builtin.modinfo modules.nsdeps \
+- compile_commands.json .thinlto-cache rust/test \
++ compile_commands.json rust/test \
+ rust-project.json .vmlinux.objs .vmlinux.export.c
+
+ # Directories & files removed with 'make mrproper'
+@@ -1560,6 +1567,7 @@ help:
+ @echo '* vmlinux - Build the bare kernel'
+ @echo '* modules - Build all modules'
+ @echo ' modules_install - Install all modules to INSTALL_MOD_PATH (default: /)'
++ @echo ' vdso_install - Install unstripped vdso to INSTALL_MOD_PATH (default: /)'
+ @echo ' dir/ - Build all files in dir and below'
+ @echo ' dir/file.[ois] - Build specified target only'
+ @echo ' dir/file.ll - Build the LLVM assembly file'
+@@ -1777,7 +1785,7 @@ PHONY += compile_commands.json
+
+ clean-dirs := $(KBUILD_EXTMOD)
+ clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps \
+- $(KBUILD_EXTMOD)/compile_commands.json $(KBUILD_EXTMOD)/.thinlto-cache
++ $(KBUILD_EXTMOD)/compile_commands.json
+
+ PHONY += prepare
+ # now expand this into a simple variable to reduce the cost of shell evaluations
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 12d51495caec18..09603e0bc2cc16 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -9,6 +9,14 @@
+ #
+ source "arch/$(SRCARCH)/Kconfig"
+
++config ARCH_CONFIGURES_CPU_MITIGATIONS
++ bool
++
++if !ARCH_CONFIGURES_CPU_MITIGATIONS
++config CPU_MITIGATIONS
++ def_bool y
++endif
++
+ menu "General architecture-dependent options"
+
+ config ARCH_HAS_SUBPAGE_FAULTS
+@@ -681,6 +689,7 @@ config SHADOW_CALL_STACK
+ bool "Shadow Call Stack"
+ depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
+ depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
++ depends on MMU
+ help
+ This option enables the compiler's Shadow Call Stack, which
+ uses a shadow stack to protect function return addresses from
+diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
+index fb3025396ac964..cfdf90bc8b3f86 100644
+--- a/arch/alpha/kernel/rtc.c
++++ b/arch/alpha/kernel/rtc.c
+@@ -80,7 +80,7 @@ init_rtc_epoch(void)
+ static int
+ alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ {
+- int ret = mc146818_get_time(tm);
++ int ret = mc146818_get_time(tm, 10);
+
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "unable to read current time\n");
+diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
+index c80258ec332ffe..85a679ce061c25 100644
+--- a/arch/alpha/kernel/setup.c
++++ b/arch/alpha/kernel/setup.c
+@@ -131,6 +131,7 @@ static void determine_cpu_caches (unsigned int);
+
+ static char __initdata command_line[COMMAND_LINE_SIZE];
+
++#ifdef CONFIG_VGA_CONSOLE
+ /*
+ * The format of "screen_info" is strange, and due to early
+ * i386-setup code. This is just enough to make the console
+@@ -147,6 +148,7 @@ struct screen_info screen_info = {
+ };
+
+ EXPORT_SYMBOL(screen_info);
++#endif
+
+ /*
+ * The direct map I/O window, if any. This should be the same
+diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
+index 7c420d8dac53d9..7de8a5d2d20667 100644
+--- a/arch/alpha/kernel/sys_sio.c
++++ b/arch/alpha/kernel/sys_sio.c
+@@ -57,11 +57,13 @@ sio_init_irq(void)
+ static inline void __init
+ alphabook1_init_arch(void)
+ {
++#ifdef CONFIG_VGA_CONSOLE
+ /* The AlphaBook1 has LCD video fixed at 800x600,
+ 37 rows and 100 cols. */
+ screen_info.orig_y = 37;
+ screen_info.orig_video_cols = 100;
+ screen_info.orig_video_lines = 37;
++#endif
+
+ lca_init_arch();
+ }
+diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
+index 6691f425507788..41b980df862b14 100644
+--- a/arch/arc/boot/dts/hsdk.dts
++++ b/arch/arc/boot/dts/hsdk.dts
+@@ -205,7 +205,6 @@ dmac_cfg_clk: dmac-gpu-cfg-clk {
+ };
+
+ gmac: ethernet@8000 {
+- #interrupt-cells = <1>;
+ compatible = "snps,dwmac";
+ reg = <0x8000 0x2000>;
+ interrupts = <10>;
+diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
+index bd5b1a9a054402..6fc74500a9f52a 100644
+--- a/arch/arc/include/asm/cacheflush.h
++++ b/arch/arc/include/asm/cacheflush.h
+@@ -40,6 +40,7 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
+
+ /* TBD: optimize this */
+ #define flush_cache_vmap(start, end) flush_cache_all()
++#define flush_cache_vmap_early(start, end) do { } while (0)
+ #define flush_cache_vunmap(start, end) flush_cache_all()
+
+ #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
+diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h
+index 9d96180797396b..a339223d9e052b 100644
+--- a/arch/arc/include/asm/jump_label.h
++++ b/arch/arc/include/asm/jump_label.h
+@@ -31,7 +31,7 @@
+ static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+ {
+- asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
++ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ "1: \n"
+ "nop \n"
+ ".pushsection __jump_table, \"aw\" \n"
+@@ -47,7 +47,7 @@ static __always_inline bool arch_static_branch(struct static_key *key,
+ static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+ {
+- asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
++ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ "1: \n"
+ "b %l[l_yes] \n"
+ ".pushsection __jump_table, \"aw\" \n"
+diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
+index 4dcf8589b708ac..d08a5092c2b4d4 100644
+--- a/arch/arc/kernel/setup.c
++++ b/arch/arc/kernel/setup.c
+@@ -153,7 +153,7 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
+ {
+ int n = 0;
+ #ifdef CONFIG_ISA_ARCV2
+- const char *release, *cpu_nm, *isa_nm = "ARCv2";
++ const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
+ int dual_issue = 0, dual_enb = 0, mpy_opt, present;
+ int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
+ char mpy_nm[16], lpb_nm[32];
+@@ -172,8 +172,6 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
+ * releases only update it.
+ */
+
+- cpu_nm = "HS38";
+-
+ if (info->arcver > 0x50 && info->arcver <= 0x53) {
+ release = arc_hs_rel[info->arcver - 0x51].str;
+ } else {
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index 0b3bb529d24632..8f6f4a5429646f 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -62,7 +62,7 @@ struct rt_sigframe {
+ unsigned int sigret_magic;
+ };
+
+-static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
+ {
+ int err = 0;
+ #ifndef CONFIG_ISA_ARCOMPACT
+@@ -75,12 +75,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+ #else
+ v2abi.r58 = v2abi.r59 = 0;
+ #endif
+- err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
++ err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
+ #endif
+ return err;
+ }
+
+-static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
+ {
+ int err = 0;
+ #ifndef CONFIG_ISA_ARCOMPACT
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 9557808e8937b1..57c0448d017a13 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -589,8 +589,8 @@ source "arch/arm/mm/Kconfig"
+
+ config IWMMXT
+ bool "Enable iWMMXt support"
+- depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
+- default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B
++ depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
++ default y if PXA27x || PXA3xx || ARCH_MMP
+ help
+ Enable support for iWMMXt context switching at run time if
+ running on a CPU that supports it.
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index 547e5856eaa0d3..5ba42f69f8ce0c 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -304,11 +304,7 @@ $(INSTALL_TARGETS): KBUILD_IMAGE = $(boot)/$(patsubst %install,%Image,$@)
+ $(INSTALL_TARGETS):
+ $(call cmd,install)
+
+-PHONY += vdso_install
+-vdso_install:
+-ifeq ($(CONFIG_VDSO),y)
+- $(Q)$(MAKE) $(build)=arch/arm/vdso $@
+-endif
++vdso-install-$(CONFIG_VDSO) += arch/arm/vdso/vdso.so.dbg
+
+ # My testing targets (bypasses dependencies)
+ bp:; $(Q)$(MAKE) $(build)=$(boot) $(boot)/bootpImage
+@@ -331,7 +327,6 @@ define archhelp
+ echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
+ echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
+ echo ' install to $$(INSTALL_PATH) and run lilo'
+- echo ' vdso_install - Install unstripped vdso.so to $$(INSTALL_MOD_PATH)/vdso'
+ echo
+ echo ' multi_v7_lpae_defconfig - multi_v7_defconfig with CONFIG_ARM_LPAE enabled'
+ endef
+diff --git a/arch/arm/boot/dts/allwinner/Makefile b/arch/arm/boot/dts/allwinner/Makefile
+index eebb5a0c873ad4..296be33ec93465 100644
+--- a/arch/arm/boot/dts/allwinner/Makefile
++++ b/arch/arm/boot/dts/allwinner/Makefile
+@@ -259,68 +259,6 @@ dtb-$(CONFIG_MACH_SUN8I) += \
+ sun8i-v3s-licheepi-zero.dtb \
+ sun8i-v3s-licheepi-zero-dock.dtb \
+ sun8i-v40-bananapi-m2-berry.dtb
+-dtb-$(CONFIG_MACH_SUN8I) += \
+- sun8i-a23-evb.dtb \
+- sun8i-a23-gt90h-v4.dtb \
+- sun8i-a23-inet86dz.dtb \
+- sun8i-a23-ippo-q8h-v5.dtb \
+- sun8i-a23-ippo-q8h-v1.2.dtb \
+- sun8i-a23-polaroid-mid2407pxe03.dtb \
+- sun8i-a23-polaroid-mid2809pxe04.dtb \
+- sun8i-a23-q8-tablet.dtb \
+- sun8i-a33-et-q8-v1.6.dtb \
+- sun8i-a33-ga10h-v1.1.dtb \
+- sun8i-a33-inet-d978-rev2.dtb \
+- sun8i-a33-ippo-q8h-v1.2.dtb \
+- sun8i-a33-olinuxino.dtb \
+- sun8i-a33-q8-tablet.dtb \
+- sun8i-a33-sinlinx-sina33.dtb \
+- sun8i-a83t-allwinner-h8homlet-v2.dtb \
+- sun8i-a83t-bananapi-m3.dtb \
+- sun8i-a83t-cubietruck-plus.dtb \
+- sun8i-a83t-tbs-a711.dtb \
+- sun8i-h2-plus-bananapi-m2-zero.dtb \
+- sun8i-h2-plus-libretech-all-h3-cc.dtb \
+- sun8i-h2-plus-orangepi-r1.dtb \
+- sun8i-h2-plus-orangepi-zero.dtb \
+- sun8i-h3-bananapi-m2-plus.dtb \
+- sun8i-h3-bananapi-m2-plus-v1.2.dtb \
+- sun8i-h3-beelink-x2.dtb \
+- sun8i-h3-libretech-all-h3-cc.dtb \
+- sun8i-h3-mapleboard-mp130.dtb \
+- sun8i-h3-nanopi-duo2.dtb \
+- sun8i-h3-nanopi-m1.dtb\
+- \
+- sun8i-h3-nanopi-m1-plus.dtb \
+- sun8i-h3-nanopi-neo.dtb \
+- sun8i-h3-nanopi-neo-air.dtb \
+- sun8i-h3-nanopi-r1.dtb \
+- sun8i-h3-orangepi-2.dtb \
+- sun8i-h3-orangepi-lite.dtb \
+- sun8i-h3-orangepi-one.dtb \
+- sun8i-h3-orangepi-pc.dtb \
+- sun8i-h3-orangepi-pc-plus.dtb \
+- sun8i-h3-orangepi-plus.dtb \
+- sun8i-h3-orangepi-plus2e.dtb \
+- sun8i-h3-orangepi-zero-plus2.dtb \
+- sun8i-h3-rervision-dvk.dtb \
+- sun8i-h3-zeropi.dtb \
+- sun8i-h3-emlid-neutis-n5h3-devboard.dtb \
+- sun8i-r16-bananapi-m2m.dtb \
+- sun8i-r16-nintendo-nes-classic.dtb \
+- sun8i-r16-nintendo-super-nes-classic.dtb \
+- sun8i-r16-parrot.dtb \
+- sun8i-r40-bananapi-m2-ultra.dtb \
+- sun8i-r40-oka40i-c.dtb \
+- sun8i-s3-elimo-initium.dtb \
+- sun8i-s3-lichee-zero-plus.dtb \
+- sun8i-s3-pinecube.dtb \
+- sun8i-t113s-mangopi-mq-r-t113.dtb \
+- sun8i-t3-cqa3t-bv3.dtb \
+- sun8i-v3-sl631-imx179.dtb \
+- sun8i-v3s-licheepi-zero.dtb \
+- sun8i-v3s-licheepi-zero-dock.dtb \
+- sun8i-v40-bananapi-m2-berry.dtb
+ dtb-$(CONFIG_MACH_SUN9I) += \
+ sun9i-a80-optimus.dtb \
+ sun9i-a80-cubieboard4.dtb
+diff --git a/arch/arm/boot/dts/amazon/alpine.dtsi b/arch/arm/boot/dts/amazon/alpine.dtsi
+index ff68dfb4eb7874..90bd12feac0101 100644
+--- a/arch/arm/boot/dts/amazon/alpine.dtsi
++++ b/arch/arm/boot/dts/amazon/alpine.dtsi
+@@ -167,7 +167,6 @@ pcie@fbc00000 {
+ msix: msix@fbe00000 {
+ compatible = "al,alpine-msix";
+ reg = <0x0 0xfbe00000 0x0 0x100000>;
+- interrupt-controller;
+ msi-controller;
+ al,msi-base-spi = <96>;
+ al,msi-num-spis = <64>;
+diff --git a/arch/arm/boot/dts/arm/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
+index efed325af88d20..d99bac02232b37 100644
+--- a/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
++++ b/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
+@@ -451,7 +451,7 @@ pb1176_serial3: serial@1010f000 {
+
+ /* Direct-mapped development chip ROM */
+ pb1176_rom@10200000 {
+- compatible = "direct-mapped";
++ compatible = "mtd-rom";
+ reg = <0x10200000 0x4000>;
+ bank-width = <1>;
+ };
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
+index e899de681f4752..5be0e8fd2633c2 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
++++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
+@@ -45,8 +45,8 @@ spi1_gpio: spi1-gpio {
+ num-chipselects = <1>;
+ cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
+
+- tpmdev@0 {
+- compatible = "tcg,tpm_tis-spi";
++ tpm@0 {
++ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
+index a677c827e758fe..5a8169bbda8792 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
++++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
+@@ -80,8 +80,8 @@ spi_gpio: spi {
+ gpio-miso = <&gpio ASPEED_GPIO(R, 5) GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+
+- tpmdev@0 {
+- compatible = "tcg,tpm_tis-spi";
++ tpm@0 {
++ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
+index 3f6010ef2b86f2..213023bc5aec41 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
++++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
+@@ -456,7 +456,7 @@ &i2c1 {
+ status = "okay";
+
+ tpm: tpm@2e {
+- compatible = "tcg,tpm-tis-i2c";
++ compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c";
+ reg = <0x2e>;
+ };
+ };
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
+index 530491ae5eb260..857cb26ed6d7e8 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
++++ b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
+@@ -466,7 +466,6 @@ i2c_ic: interrupt-controller@0 {
+ i2c0: i2c-bus@40 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x40 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -482,7 +481,6 @@ i2c0: i2c-bus@40 {
+ i2c1: i2c-bus@80 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x80 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -498,7 +496,6 @@ i2c1: i2c-bus@80 {
+ i2c2: i2c-bus@c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0xc0 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -515,7 +512,6 @@ i2c2: i2c-bus@c0 {
+ i2c3: i2c-bus@100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x100 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -532,7 +528,6 @@ i2c3: i2c-bus@100 {
+ i2c4: i2c-bus@140 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x140 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -549,7 +544,6 @@ i2c4: i2c-bus@140 {
+ i2c5: i2c-bus@180 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x180 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -566,7 +560,6 @@ i2c5: i2c-bus@180 {
+ i2c6: i2c-bus@1c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x1c0 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -583,7 +576,6 @@ i2c6: i2c-bus@1c0 {
+ i2c7: i2c-bus@300 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x300 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -600,7 +592,6 @@ i2c7: i2c-bus@300 {
+ i2c8: i2c-bus@340 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x340 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -617,7 +608,6 @@ i2c8: i2c-bus@340 {
+ i2c9: i2c-bus@380 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x380 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -634,7 +624,6 @@ i2c9: i2c-bus@380 {
+ i2c10: i2c-bus@3c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x3c0 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -651,7 +640,6 @@ i2c10: i2c-bus@3c0 {
+ i2c11: i2c-bus@400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x400 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -668,7 +656,6 @@ i2c11: i2c-bus@400 {
+ i2c12: i2c-bus@440 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x440 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+@@ -685,7 +672,6 @@ i2c12: i2c-bus@440 {
+ i2c13: i2c-bus@480 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x480 0x40>;
+ compatible = "aspeed,ast2400-i2c-bus";
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
+index 04f98d1dbb97c8..e6f3cf3c721e57 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
++++ b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
+@@ -363,6 +363,7 @@ sgpio: sgpio@1e780200 {
+ interrupts = <40>;
+ reg = <0x1e780200 0x0100>;
+ clocks = <&syscon ASPEED_CLK_APB>;
++ #interrupt-cells = <2>;
+ interrupt-controller;
+ bus-frequency = <12000000>;
+ pinctrl-names = "default";
+@@ -594,7 +595,6 @@ i2c_ic: interrupt-controller@0 {
+ i2c0: i2c-bus@40 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x40 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -610,7 +610,6 @@ i2c0: i2c-bus@40 {
+ i2c1: i2c-bus@80 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x80 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -626,7 +625,6 @@ i2c1: i2c-bus@80 {
+ i2c2: i2c-bus@c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0xc0 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -643,7 +641,6 @@ i2c2: i2c-bus@c0 {
+ i2c3: i2c-bus@100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x100 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -660,7 +657,6 @@ i2c3: i2c-bus@100 {
+ i2c4: i2c-bus@140 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x140 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -677,7 +673,6 @@ i2c4: i2c-bus@140 {
+ i2c5: i2c-bus@180 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x180 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -694,7 +689,6 @@ i2c5: i2c-bus@180 {
+ i2c6: i2c-bus@1c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x1c0 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -711,7 +705,6 @@ i2c6: i2c-bus@1c0 {
+ i2c7: i2c-bus@300 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x300 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -728,7 +721,6 @@ i2c7: i2c-bus@300 {
+ i2c8: i2c-bus@340 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x340 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -745,7 +737,6 @@ i2c8: i2c-bus@340 {
+ i2c9: i2c-bus@380 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x380 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -762,7 +753,6 @@ i2c9: i2c-bus@380 {
+ i2c10: i2c-bus@3c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x3c0 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -779,7 +769,6 @@ i2c10: i2c-bus@3c0 {
+ i2c11: i2c-bus@400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x400 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -796,7 +785,6 @@ i2c11: i2c-bus@400 {
+ i2c12: i2c-bus@440 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x440 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+@@ -813,7 +801,6 @@ i2c12: i2c-bus@440 {
+ i2c13: i2c-bus@480 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+
+ reg = <0x480 0x40>;
+ compatible = "aspeed,ast2500-i2c-bus";
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
+index c4d1faade8be33..29f94696d8b189 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
++++ b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
+@@ -474,6 +474,7 @@ sgpiom0: sgpiom@1e780500 {
+ reg = <0x1e780500 0x100>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_APB2>;
++ #interrupt-cells = <2>;
+ interrupt-controller;
+ bus-frequency = <12000000>;
+ pinctrl-names = "default";
+@@ -488,6 +489,7 @@ sgpiom1: sgpiom@1e780600 {
+ reg = <0x1e780600 0x100>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_APB2>;
++ #interrupt-cells = <2>;
+ interrupt-controller;
+ bus-frequency = <12000000>;
+ pinctrl-names = "default";
+@@ -902,7 +904,6 @@ &i2c {
+ i2c0: i2c-bus@80 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x80 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -917,7 +918,6 @@ i2c0: i2c-bus@80 {
+ i2c1: i2c-bus@100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x100 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -932,7 +932,6 @@ i2c1: i2c-bus@100 {
+ i2c2: i2c-bus@180 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x180 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -947,7 +946,6 @@ i2c2: i2c-bus@180 {
+ i2c3: i2c-bus@200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x200 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -962,7 +960,6 @@ i2c3: i2c-bus@200 {
+ i2c4: i2c-bus@280 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x280 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -977,7 +974,6 @@ i2c4: i2c-bus@280 {
+ i2c5: i2c-bus@300 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x300 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -992,7 +988,6 @@ i2c5: i2c-bus@300 {
+ i2c6: i2c-bus@380 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x380 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1007,7 +1002,6 @@ i2c6: i2c-bus@380 {
+ i2c7: i2c-bus@400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x400 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1022,7 +1016,6 @@ i2c7: i2c-bus@400 {
+ i2c8: i2c-bus@480 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x480 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1037,7 +1030,6 @@ i2c8: i2c-bus@480 {
+ i2c9: i2c-bus@500 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x500 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1052,7 +1044,6 @@ i2c9: i2c-bus@500 {
+ i2c10: i2c-bus@580 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x580 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1067,7 +1058,6 @@ i2c10: i2c-bus@580 {
+ i2c11: i2c-bus@600 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x600 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1082,7 +1072,6 @@ i2c11: i2c-bus@600 {
+ i2c12: i2c-bus@680 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x680 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1097,7 +1086,6 @@ i2c12: i2c-bus@680 {
+ i2c13: i2c-bus@700 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x700 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1112,7 +1100,6 @@ i2c13: i2c-bus@700 {
+ i2c14: i2c-bus@780 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x780 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+@@ -1127,7 +1114,6 @@ i2c14: i2c-bus@780 {
+ i2c15: i2c-bus@800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- #interrupt-cells = <1>;
+ reg = <0x800 0x80>;
+ compatible = "aspeed,ast2600-i2c-bus";
+ clocks = <&syscon ASPEED_CLK_APB2>;
+diff --git a/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi b/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
+index 31590d3186a2e0..00e5887c926f18 100644
+--- a/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
++++ b/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
+@@ -35,8 +35,8 @@ spi_gpio: spi {
+ gpio-mosi = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
+ gpio-miso = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
+
+- tpmdev@0 {
+- compatible = "tcg,tpm_tis-spi";
++ tpm@0 {
++ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi b/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
+index f9f79ed825181b..07ca0d993c9fdb 100644
+--- a/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
+@@ -167,6 +167,7 @@ gpio_crmu: gpio@3024800 {
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&mailbox>;
+ interrupts = <0>;
+ };
+@@ -247,6 +248,7 @@ gpio_ccm: gpio@1800a000 {
+ gpio-controller;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ };
+
+ i2c1: i2c@1800b000 {
+@@ -518,6 +520,7 @@ gpio_asiu: gpio@180a5000 {
+ gpio-controller;
+
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 42 1>,
+ <&pinctrl 1 44 3>,
+diff --git a/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi b/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
+index 788a6806191a33..75545b10ef2fa6 100644
+--- a/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
+@@ -200,6 +200,7 @@ gpiob: gpio@30000 {
+ gpio-controller;
+ ngpios = <4>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi b/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
+index 9d20ba3b1ffb13..6a4482c9316741 100644
+--- a/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
+@@ -180,6 +180,7 @@ gpioa: gpio@20 {
+ gpio-controller;
+ ngpios = <32>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ };
+@@ -352,6 +353,7 @@ gpiob: gpio@30000 {
+ gpio-controller;
+ ngpios = <4>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts b/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
+index 1ab8184302db44..5a2869a18bd555 100644
+--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
++++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
+@@ -36,9 +36,7 @@ &led_pwr {
+ gpios = <&gpio 42 GPIO_ACTIVE_HIGH>;
+ };
+
+-&leds {
+- /delete-node/ led_act;
+-};
++/delete-node/ &led_act;
+
+ &pm {
+ /delete-property/ system-power-controller;
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
+index 42bcbf10957c40..9f9084269ef58b 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
++++ b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
+@@ -181,5 +181,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
+index e04d2e5ea51aa4..72e960c888ac86 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
+@@ -85,5 +85,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
+index a399800139d9ce..750e17482371cf 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
+@@ -88,5 +88,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
+index fad3473810a2e5..2bdbc7d18b0eb1 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
+@@ -122,5 +122,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
+index 5b2b7b8b3b123f..b226bef3369cf7 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
+@@ -145,6 +145,14 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
+index d0a26b643b82fe..192b8db5a89c39 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
+@@ -145,5 +145,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
+index 9f21d6d6d35b75..0198b5f9e4a750 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
+@@ -81,5 +81,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
+index 2561072917021c..73ff1694a4a0b3 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
+@@ -148,5 +148,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
+index 707c561703ed81..55fc9f44cbc7f5 100644
+--- a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
+@@ -227,6 +227,14 @@ port@4 {
+ label = "wan";
+ };
+
++ port@5 {
++ status = "disabled";
++ };
++
++ port@7 {
++ status = "disabled";
++ };
++
+ port@8 {
+ label = "cpu";
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
+index c914569ddd5ecc..e6d26987865d02 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
+@@ -144,6 +144,14 @@ port@4 {
+ label = "wan";
+ };
+
++ port@5 {
++ status = "disabled";
++ };
++
++ port@7 {
++ status = "disabled";
++ };
++
+ port@8 {
+ label = "cpu";
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
+index f050acbea0b207..3124dfd01b9447 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
+@@ -192,6 +192,14 @@ port@4 {
+ label = "wan";
+ };
+
++ port@5 {
++ status = "disabled";
++ };
++
++ port@7 {
++ status = "disabled";
++ };
++
+ port@8 {
+ label = "cpu";
+ phy-mode = "rgmii";
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
+index e8991d4e248ce2..e374062eb5b762 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
+@@ -107,5 +107,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
+index afc635c8cdebbc..badafa024d24c5 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
+@@ -120,5 +120,13 @@ port@1 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
+index 7cfa4607ef311f..cf95af9db1e66d 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
+@@ -107,5 +107,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
+index d55e10095eae79..992c19e1cfa173 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
+@@ -75,5 +75,13 @@ port@0 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
+index ccf031c0e276d6..4d0ba315a2049e 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
+@@ -147,5 +147,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
+index e28f7a3501179f..83c429afc2974d 100644
+--- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
++++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
+@@ -158,5 +158,13 @@ port@4 {
+ port@5 {
+ label = "cpu";
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
+index 03ad614e6b7214..0bf5106f7012c9 100644
+--- a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
++++ b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
+@@ -124,6 +124,14 @@ fixed-link {
+ full-duplex;
+ };
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
+index 26c12bfb0bdd4a..25eeacf6a2484c 100644
+--- a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
++++ b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
+@@ -185,6 +185,14 @@ fixed-link {
+ full-duplex;
+ };
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm953012er.dts b/arch/arm/boot/dts/broadcom/bcm953012er.dts
+index 4fe3b365337670..d939ec9f4a9e79 100644
+--- a/arch/arm/boot/dts/broadcom/bcm953012er.dts
++++ b/arch/arm/boot/dts/broadcom/bcm953012er.dts
+@@ -84,6 +84,14 @@ port@5 {
+ label = "cpu";
+ ethernet = <&gmac0>;
+ };
++
++ port@7 {
++ status = "disabled";
++ };
++
++ port@8 {
++ status = "disabled";
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
+index 4d70f6afd13ab5..6d5e69035f94dc 100644
+--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
++++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
+@@ -60,6 +60,8 @@ pci@c0000000 {
+ * We have slots (IDSEL) 1 and 2 with one assigned IRQ
+ * each handling all IRQs.
+ */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map =
+ /* IDSEL 1 */
+ <0x0800 0 0 1 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 1 is irq 11 */
+diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
+index 9ec0169bacf8c2..5f4c849915db71 100644
+--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
++++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
+@@ -89,6 +89,8 @@ pci@c0000000 {
+ * The slots have Ethernet, Ethernet, NEC and MPCI.
+ * The IDSELs are 11, 12, 13, 14.
+ */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map =
+ /* IDSEL 11 - Ethernet A */
+ <0x5800 0 0 1 &gpio0 4 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 11 is irq 4 */
+diff --git a/arch/arm/boot/dts/marvell/kirkwood-l-50.dts b/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
+index dffb9f84e67c50..c841eb8e7fb1d0 100644
+--- a/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
++++ b/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
+@@ -65,6 +65,7 @@ i2c@11000 {
+ gpio2: gpio-expander@20 {
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
++ interrupt-controller;
+ compatible = "semtech,sx1505q";
+ reg = <0x20>;
+
+@@ -79,6 +80,7 @@ gpio2: gpio-expander@20 {
+ gpio3: gpio-expander@21 {
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
++ interrupt-controller;
+ compatible = "semtech,sx1505q";
+ reg = <0x21>;
+
+diff --git a/arch/arm/boot/dts/marvell/mmp2-brownstone.dts b/arch/arm/boot/dts/marvell/mmp2-brownstone.dts
+index 04f1ae1382e7a3..bc64348b821851 100644
+--- a/arch/arm/boot/dts/marvell/mmp2-brownstone.dts
++++ b/arch/arm/boot/dts/marvell/mmp2-brownstone.dts
+@@ -28,7 +28,7 @@ &uart3 {
+ &twsi1 {
+ status = "okay";
+ pmic: max8925@3c {
+- compatible = "maxium,max8925";
++ compatible = "maxim,max8925";
+ reg = <0x3c>;
+ interrupts = <1>;
+ interrupt-parent = <&intcmux4>;
+diff --git a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
+index 217e9b96c61e5d..20b2497657ae48 100644
+--- a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
++++ b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
+@@ -293,7 +293,7 @@ vddcore: VDD_CORE {
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+- regulator-suspend-voltage = <1150000>;
++ regulator-suspend-microvolt = <1150000>;
+ regulator-mode = <4>;
+ };
+
+@@ -314,7 +314,7 @@ vddcpu: VDD_OTHER {
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+- regulator-suspend-voltage = <1050000>;
++ regulator-suspend-microvolt = <1050000>;
+ regulator-mode = <4>;
+ };
+
+@@ -331,7 +331,7 @@ vldo1: LDO1 {
+ regulator-always-on;
+
+ regulator-state-standby {
+- regulator-suspend-voltage = <1800000>;
++ regulator-suspend-microvolt = <1800000>;
+ regulator-on-in-suspend;
+ };
+
+@@ -346,7 +346,7 @@ vldo2: LDO2 {
+ regulator-max-microvolt = <3700000>;
+
+ regulator-state-standby {
+- regulator-suspend-voltage = <1800000>;
++ regulator-suspend-microvolt = <1800000>;
+ regulator-on-in-suspend;
+ };
+
+diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi
+index 73d570a172690c..1705c96f4221e8 100644
+--- a/arch/arm/boot/dts/microchip/sam9x60.dtsi
++++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi
+@@ -1312,7 +1312,7 @@ rtt: rtc@fffffe20 {
+ compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
+ reg = <0xfffffe20 0x20>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+- clocks = <&clk32k 0>;
++ clocks = <&clk32k 1>;
+ };
+
+ pit: timer@fffffe40 {
+@@ -1338,7 +1338,7 @@ rtc: rtc@fffffea8 {
+ compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc";
+ reg = <0xfffffea8 0x100>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+- clocks = <&clk32k 0>;
++ clocks = <&clk32k 1>;
+ };
+
+ watchdog: watchdog@ffffff80 {
+diff --git a/arch/arm/boot/dts/microchip/sama7g5.dtsi b/arch/arm/boot/dts/microchip/sama7g5.dtsi
+index 269e0a3ca269cd..7a95464bb78d83 100644
+--- a/arch/arm/boot/dts/microchip/sama7g5.dtsi
++++ b/arch/arm/boot/dts/microchip/sama7g5.dtsi
+@@ -272,7 +272,7 @@ rtt: rtc@e001d020 {
+ compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
+ reg = <0xe001d020 0x30>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&clk32k 0>;
++ clocks = <&clk32k 1>;
+ };
+
+ clk32k: clock-controller@e001d050 {
+diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
+index fd671c7a1e5d64..6e1f0f164cb4f5 100644
+--- a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
++++ b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
+@@ -120,6 +120,7 @@ gpio0: gpio@0 {
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
+ <3 IRQ_TYPE_LEVEL_HIGH>,
+ <4 IRQ_TYPE_LEVEL_HIGH>;
++ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+@@ -128,6 +129,7 @@ gpio1: gpio@1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
++ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+diff --git a/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi b/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
+index 1640763fd4af22..ff0d684622f74d 100644
+--- a/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
++++ b/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
+@@ -997,7 +997,6 @@ touchscreen@41 {
+ compatible = "st,stmpe811";
+ reg = <0x41>;
+ irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
+- interrupt-controller;
+ id = <0>;
+ blocks = <0x5>;
+ irq-trigger = <0x1>;
+diff --git a/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi b/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
+index 3b6fad273cabf1..d38f1dd38a9068 100644
+--- a/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
++++ b/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
+@@ -980,7 +980,6 @@ touchscreen@41 {
+ compatible = "st,stmpe811";
+ reg = <0x41>;
+ irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
+- interrupt-controller;
+ id = <0>;
+ blocks = <0x5>;
+ irq-trigger = <0x1>;
+diff --git a/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi b/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
+index 4eb526fe9c5588..81c8a5fd92ccea 100644
+--- a/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
++++ b/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
+@@ -861,7 +861,6 @@ touchscreen@41 {
+ compatible = "st,stmpe811";
+ reg = <0x41>;
+ irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
+- interrupt-controller;
+ id = <0>;
+ blocks = <0x5>;
+ irq-trigger = <0x1>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx1-ads.dts b/arch/arm/boot/dts/nxp/imx/imx1-ads.dts
+index 5833fb6f15d88a..2c817c4a4c68f8 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx1-ads.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx1-ads.dts
+@@ -65,7 +65,7 @@ &weim {
+ pinctrl-0 = <&pinctrl_weim>;
+ status = "okay";
+
+- nor: nor@0,0 {
++ nor: flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0x00000000 0x02000000>;
+ bank-width = <4>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx1-apf9328.dts b/arch/arm/boot/dts/nxp/imx/imx1-apf9328.dts
+index 1f11e9542a72de..e66eef87a7a4fd 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx1-apf9328.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx1-apf9328.dts
+@@ -45,7 +45,7 @@ &weim {
+ pinctrl-0 = <&pinctrl_weim>;
+ status = "okay";
+
+- nor: nor@0,0 {
++ nor: flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0x00000000 0x02000000>;
+ bank-width = <2>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx1.dtsi b/arch/arm/boot/dts/nxp/imx/imx1.dtsi
+index e312f1e74e2fe6..4aeb74479f44e9 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx1.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx1.dtsi
+@@ -268,9 +268,12 @@ weim: weim@220000 {
+ status = "disabled";
+ };
+
+- esram: esram@300000 {
++ esram: sram@300000 {
+ compatible = "mmio-sram";
+ reg = <0x00300000 0x20000>;
++ ranges = <0 0x00300000 0x20000>;
++ #address-cells = <1>;
++ #size-cells = <1>;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx25-eukrea-cpuimx25.dtsi b/arch/arm/boot/dts/nxp/imx/imx25-eukrea-cpuimx25.dtsi
+index 0703f62d10d1cb..93a6e4e680b451 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx25-eukrea-cpuimx25.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx25-eukrea-cpuimx25.dtsi
+@@ -27,7 +27,7 @@ &i2c1 {
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+- pcf8563@51 {
++ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts b/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
+index fc8a502fc957f0..6cddb2cc36fe2a 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-cmo-qvga.dts
+@@ -16,7 +16,7 @@ cmo_qvga: display {
+ bus-width = <18>;
+ display-timings {
+ native-mode = <&qvga_timings>;
+- qvga_timings: 320x240 {
++ qvga_timings: timing0 {
+ clock-frequency = <6500000>;
+ hactive = <320>;
+ vactive = <240>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts b/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
+index 80a7f96de4c6ac..64b2ffac463b2a 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-dvi-svga.dts
+@@ -16,7 +16,7 @@ dvi_svga: display {
+ bus-width = <18>;
+ display-timings {
+ native-mode = <&dvi_svga_timings>;
+- dvi_svga_timings: 800x600 {
++ dvi_svga_timings: timing0 {
+ clock-frequency = <40000000>;
+ hactive = <800>;
+ vactive = <600>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts b/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
+index 24027a1fb46d11..fb074bfdaa8dc2 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dts
+@@ -16,7 +16,7 @@ dvi_vga: display {
+ bus-width = <18>;
+ display-timings {
+ native-mode = <&dvi_vga_timings>;
+- dvi_vga_timings: 640x480 {
++ dvi_vga_timings: timing0 {
+ clock-frequency = <31250000>;
+ hactive = <640>;
+ vactive = <480>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx25-pdk.dts b/arch/arm/boot/dts/nxp/imx/imx25-pdk.dts
+index 04f4b127a17257..e93bf3b7115fac 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx25-pdk.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx25-pdk.dts
+@@ -68,7 +68,7 @@ wvga: display {
+ bus-width = <18>;
+ display-timings {
+ native-mode = <&wvga_timings>;
+- wvga_timings: 640x480 {
++ wvga_timings: timing0 {
+ hactive = <640>;
+ vactive = <480>;
+ hback-porch = <45>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx25.dtsi b/arch/arm/boot/dts/nxp/imx/imx25.dtsi
+index 5f90d72b840b0e..5ac4549286bd7f 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx25.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx25.dtsi
+@@ -543,7 +543,7 @@ pwm1: pwm@53fe0000 {
+ };
+
+ iim: efuse@53ff0000 {
+- compatible = "fsl,imx25-iim", "fsl,imx27-iim";
++ compatible = "fsl,imx25-iim";
+ reg = <0x53ff0000 0x4000>;
+ interrupts = <19>;
+ clocks = <&clks 99>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx27-apf27dev.dts b/arch/arm/boot/dts/nxp/imx/imx27-apf27dev.dts
+index a21f1f7c24b88d..849306cb4532db 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx27-apf27dev.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx27-apf27dev.dts
+@@ -16,7 +16,7 @@ display: display {
+ fsl,pcr = <0xfae80083>; /* non-standard but required */
+ display-timings {
+ native-mode = <&timing0>;
+- timing0: 800x480 {
++ timing0: timing0 {
+ clock-frequency = <33000033>;
+ hactive = <800>;
+ vactive = <480>;
+@@ -47,7 +47,7 @@ leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- user {
++ led-user {
+ label = "Heartbeat";
+ gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/nxp/imx/imx27-eukrea-cpuimx27.dtsi b/arch/arm/boot/dts/nxp/imx/imx27-eukrea-cpuimx27.dtsi
+index 74110bbcd9d4f2..c7e92358487826 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx27-eukrea-cpuimx27.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx27-eukrea-cpuimx27.dtsi
+@@ -33,7 +33,7 @@ &i2c1 {
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+- pcf8563@51 {
++ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+@@ -90,7 +90,7 @@ &usbotg {
+ &weim {
+ status = "okay";
+
+- nor: nor@0,0 {
++ nor: flash@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+diff --git a/arch/arm/boot/dts/nxp/imx/imx27-eukrea-mbimxsd27-baseboard.dts b/arch/arm/boot/dts/nxp/imx/imx27-eukrea-mbimxsd27-baseboard.dts
+index 145e459625b32d..d78793601306cf 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx27-eukrea-mbimxsd27-baseboard.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx27-eukrea-mbimxsd27-baseboard.dts
+@@ -16,7 +16,7 @@ display0: CMO-QVGA {
+
+ display-timings {
+ native-mode = <&timing0>;
+- timing0: 320x240 {
++ timing0: timing0 {
+ clock-frequency = <6500000>;
+ hactive = <320>;
+ vactive = <240>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-rdk.dts
+index 25442eba21c1e0..27c93b9fe0499f 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-rdk.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycard-s-rdk.dts
+@@ -19,7 +19,7 @@ display: display {
+ fsl,pcr = <0xf0c88080>; /* non-standard but required */
+ display-timings {
+ native-mode = <&timing0>;
+- timing0: 640x480 {
++ timing0: timing0 {
+ hactive = <640>;
+ vactive = <480>;
+ hback-porch = <112>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycore-rdk.dts b/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycore-rdk.dts
+index 7f0cd4d3ec2de4..67b235044b708c 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycore-rdk.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycore-rdk.dts
+@@ -19,7 +19,7 @@ display0: LQ035Q7 {
+
+ display-timings {
+ native-mode = <&timing0>;
+- timing0: 240x320 {
++ timing0: timing0 {
+ clock-frequency = <5500000>;
+ hactive = <240>;
+ vactive = <320>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycore-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycore-som.dtsi
+index 7191e10712b956..efce284b57969b 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycore-som.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx27-phytec-phycore-som.dtsi
+@@ -314,7 +314,7 @@ &usbotg {
+ &weim {
+ status = "okay";
+
+- nor: nor@0,0 {
++ nor: flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0x00000000 0x02000000>;
+ bank-width = <2>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx27.dtsi b/arch/arm/boot/dts/nxp/imx/imx27.dtsi
+index faba12ee7465eb..cac4b3d68986a0 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx27.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx27.dtsi
+@@ -588,6 +588,9 @@ weim: weim@d8002000 {
+ iram: sram@ffff4c00 {
+ compatible = "mmio-sram";
+ reg = <0xffff4c00 0xb400>;
++ ranges = <0 0xffff4c00 0xb400>;
++ #address-cells = <1>;
++ #size-cells = <1>;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi
+index 3be38a3c4bb11c..c32ea040fecdda 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi
+@@ -117,17 +117,9 @@ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- phy_port2: phy@1 {
+- reg = <1>;
+- };
+-
+- phy_port3: phy@2 {
+- reg = <2>;
+- };
+-
+ switch@10 {
+ compatible = "qca,qca8334";
+- reg = <10>;
++ reg = <0x10>;
+ reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+
+ switch_ports: ports {
+@@ -149,15 +141,30 @@ fixed-link {
+ eth2: port@2 {
+ reg = <2>;
+ label = "eth2";
++ phy-mode = "internal";
+ phy-handle = <&phy_port2>;
+ };
+
+ eth1: port@3 {
+ reg = <3>;
+ label = "eth1";
++ phy-mode = "internal";
+ phy-handle = <&phy_port3>;
+ };
+ };
++
++ mdio {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ phy_port2: ethernet-phy@1 {
++ reg = <1>;
++ };
++
++ phy_port3: ethernet-phy@2 {
++ reg = <2>;
++ };
++ };
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi
+index 52a0f6ee426f97..bcf4d9c870ec97 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi
+@@ -274,24 +274,24 @@ leds: led-controller@30 {
+
+ led@0 {
+ chan-name = "R";
+- led-cur = /bits/ 8 <0x20>;
+- max-cur = /bits/ 8 <0x60>;
++ led-cur = /bits/ 8 <0x6e>;
++ max-cur = /bits/ 8 <0xc8>;
+ reg = <0>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@1 {
+ chan-name = "G";
+- led-cur = /bits/ 8 <0x20>;
+- max-cur = /bits/ 8 <0x60>;
++ led-cur = /bits/ 8 <0xbe>;
++ max-cur = /bits/ 8 <0xc8>;
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@2 {
+ chan-name = "B";
+- led-cur = /bits/ 8 <0x20>;
+- max-cur = /bits/ 8 <0x60>;
++ led-cur = /bits/ 8 <0xbe>;
++ max-cur = /bits/ 8 <0xc8>;
+ reg = <2>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts
+index 717decda0cebd5..3ac7a45016205a 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts
+@@ -76,6 +76,7 @@ reg_can1_supply: regulator-can1-supply {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enable_can1_power>;
+ regulator-name = "can1_supply";
++ startup-delay-us = <1000>;
+ };
+
+ reg_can2_supply: regulator-can2-supply {
+@@ -85,6 +86,7 @@ reg_can2_supply: regulator-can2-supply {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enable_can2_power>;
+ regulator-name = "can2_supply";
++ startup-delay-us = <1000>;
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts b/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
+index db8c332df6a1d5..cad112e054758f 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
+@@ -227,7 +227,6 @@ bridge@1,0 {
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+- #interrupt-cells = <1>;
+
+ bridge@2,1 {
+ compatible = "pci10b5,8605";
+@@ -235,7 +234,6 @@ bridge@2,1 {
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+- #interrupt-cells = <1>;
+
+ /* Intel Corporation I210 Gigabit Network Connection */
+ ethernet@3,0 {
+@@ -250,7 +248,6 @@ bridge@2,2 {
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+- #interrupt-cells = <1>;
+
+ /* Intel Corporation I210 Gigabit Network Connection */
+ switch_nic: ethernet@4,0 {
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
+index 99f4f6ac71d4a1..c1ae7c47b44227 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
+@@ -245,6 +245,7 @@ pca9539: pca9539@74 {
+ reg = <0x74>;
+ gpio-controller;
+ #gpio-cells = <2>;
++ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupt-parent = <&gpio2>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+@@ -390,7 +391,6 @@ pci_root: root@0,0 {
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+- #interrupt-cells = <1>;
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi
+index 4d6a0c3e8455f9..ff062f4fd726eb 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi
+@@ -5,31 +5,8 @@
+
+ #include "imx6q.dtsi"
+ #include "imx6qdl-kontron-samx6i.dtsi"
+-#include <dt-bindings/gpio/gpio.h>
+
+ / {
+ model = "Kontron SMARC sAMX6i Quad/Dual";
+ compatible = "kontron,imx6q-samx6i", "fsl,imx6q";
+ };
+-
+-/* Quad/Dual SoMs have 3 chip-select signals */
+-&ecspi4 {
+- cs-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>,
+- <&gpio3 29 GPIO_ACTIVE_LOW>,
+- <&gpio3 25 GPIO_ACTIVE_LOW>;
+-};
+-
+-&pinctrl_ecspi4 {
+- fsl,pins = <
+- MX6QDL_PAD_EIM_D21__ECSPI4_SCLK 0x100b1
+- MX6QDL_PAD_EIM_D28__ECSPI4_MOSI 0x100b1
+- MX6QDL_PAD_EIM_D22__ECSPI4_MISO 0x100b1
+-
+- /* SPI4_IMX_CS2# - connected to internal flash */
+- MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x1b0b0
+- /* SPI4_IMX_CS0# - connected to SMARC SPI0_CS0# */
+- MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1b0b0
+- /* SPI4_CS3# - connected to SMARC SPI0_CS1# */
+- MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x1b0b0
+- >;
+-};
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts b/arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts
+index a3f247c722b438..0342a79ccd5db2 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts
+@@ -37,9 +37,9 @@ panel_in: endpoint {
+
+ &clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+- <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
++ <&clks IMX6QDL_CLK_LDB_DI1_SEL>, <&clks IMX6QDL_CLK_ENET_REF_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+- <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>;
++ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, <&clk50m_phy>;
+ };
+
+ &hdmi {
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+index 4cc965277c5219..dcb4f6a32f8092 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+@@ -619,7 +619,6 @@ stmpe811@41 {
+ blocks = <0x5>;
+ id = <0>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+- interrupt-controller;
+ interrupt-parent = <&gpio4>;
+ irq-trigger = <0x1>;
+ pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
+index 11d9c7a2dacb14..6cc4d6fd5f28be 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
+@@ -543,7 +543,6 @@ stmpe811@41 {
+ blocks = <0x5>;
+ interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio6>;
+- interrupt-controller;
+ id = <0>;
+ irq-trigger = <0x1>;
+ pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
+index a63e73adc1fc53..42b2ba23aefc9e 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
+@@ -225,7 +225,6 @@ da9063: pmic@58 {
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+- interrupt-controller;
+
+ onkey {
+ compatible = "dlg,da9063-onkey";
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi
+index 85aeebc9485dd3..668d33d1ff0c1c 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi
+@@ -244,7 +244,8 @@ &ecspi4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi4>;
+ cs-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>,
+- <&gpio3 29 GPIO_ACTIVE_LOW>;
++ <&gpio3 29 GPIO_ACTIVE_LOW>,
++ <&gpio3 25 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ /* default boot source: workaround #1 for errata ERR006282 */
+@@ -259,7 +260,7 @@ smarc_flash: flash@0 {
+ &fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+- phy-mode = "rgmii";
++ phy-connection-type = "rgmii-id";
+ phy-handle = <&ethphy>;
+
+ mdio {
+@@ -269,7 +270,7 @@ mdio {
+ ethphy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+- reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
++ reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <1000>;
+ };
+ };
+@@ -464,6 +465,8 @@ MX6QDL_PAD_EIM_D22__ECSPI4_MISO 0x100b1
+ MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x1b0b0
+ /* SPI_IMX_CS0# - connected to SMARC SPI0_CS0# */
+ MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1b0b0
++ /* SPI4_CS3# - connected to SMARC SPI0_CS1# */
++ MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x1b0b0
+ >;
+ };
+
+@@ -516,7 +519,7 @@ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+- MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0 /* RST_GBE0_PHY# */
++ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0 /* RST_GBE0_PHY# */
+ >;
+ };
+
+@@ -729,7 +732,7 @@ &pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ wake-up-gpio = <&gpio6 18 GPIO_ACTIVE_HIGH>;
+- reset-gpio = <&gpio3 13 GPIO_ACTIVE_HIGH>;
++ reset-gpio = <&gpio3 13 GPIO_ACTIVE_LOW>;
+ };
+
+ /* LCD_BKLT_PWM */
+@@ -817,5 +820,6 @@ &wdog1 {
+ /* CPLD is feeded by watchdog (hardwired) */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog1>;
++ fsl,ext-reset-output;
+ status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
+index 113974520d544b..c0c47adc5866e3 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
+@@ -124,6 +124,7 @@ pmic@58 {
+ reg = <0x58>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>; /* active-low GPIO2_9 */
++ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ regulators {
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
+index 86b4269e0e0117..85e278eb201610 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
+@@ -100,6 +100,7 @@ pmic: pmic@58 {
+ interrupt-parent = <&gpio1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
+index 875ae699c5cb80..ce9f4c22672939 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
+@@ -366,7 +366,7 @@ MX6UL_PAD_ENET1_RX_ER__PWM8_OUT 0x110b0
+ };
+
+ pinctrl_tsc: tscgrp {
+- fsl,pin = <
++ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0
+ MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0
+ MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
+index 4ffe99ed55ca2c..07dcecbe485dca 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
+@@ -121,6 +121,8 @@ ethphy1: ethernet-phy@1 {
+ max-speed = <100>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
++ clocks = <&clks IMX6UL_CLK_ENET_REF>;
++ clock-names = "rmii-ref";
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
+index ea627638e40cf6..7dd1fe5a2fb768 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
+@@ -121,7 +121,7 @@ &ecspi1 {
+ tpm_tis: tpm@1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm>;
+- compatible = "tcg,tpm_tis-spi";
++ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <1>;
+ spi-max-frequency = <20000000>;
+ interrupt-parent = <&gpio5>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
+index 3fdece5bd31f9d..5248a058230c86 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6ull-tarragon-common.dtsi
+@@ -805,6 +805,7 @@ &usbotg1 {
+ &pinctrl_usb_pwr>;
+ dr_mode = "host";
+ power-active-high;
++ over-current-active-low;
+ disable-over-current;
+ status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts b/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
+index 3a723843d5626f..9984b343cdf0ca 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
+@@ -130,7 +130,7 @@ &ecspi4 {
+ * TCG specification - Section 6.4.1 Clocking:
+ * TPM shall support a SPI clock frequency range of 10-24 MHz.
+ */
+- st33htph: tpm-tis@0 {
++ st33htph: tpm@0 {
+ compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <24000000>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
+index 12361fcbe24aff..1b965652291bfa 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
+@@ -63,6 +63,7 @@ pca9554: io-expander@25 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
++ interrupt-controller;
+ reg = <0x25>;
+ };
+
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts
+index 521493342fe972..8f5566027c25a2 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts
+@@ -350,7 +350,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x59
+
+ &iomuxc_lpsr {
+ pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp {
+- fsl,phy = <
++ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08
+ >;
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7d.dtsi b/arch/arm/boot/dts/nxp/imx/imx7d.dtsi
+index 4b94b8afb55d91..0484e349e064e4 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7d.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx7d.dtsi
+@@ -217,9 +217,6 @@ fec2: ethernet@30bf0000 {
+ };
+
+ &ca_funnel_in_ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+ port@1 {
+ reg = <1>;
+ ca_funnel_in_port1: endpoint {
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
+index ba7231b364bb8c..7bab113ca6da79 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
+@@ -210,6 +210,7 @@ ov2680_to_mipi: endpoint {
+ remote-endpoint = <&mipi_from_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1>;
++ link-frequencies = /bits/ 64 <330000000>;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
+index e152d08f27d49e..39e9f1411ebb80 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
+@@ -190,7 +190,11 @@ funnel@30041000 {
+ clock-names = "apb_pclk";
+
+ ca_funnel_in_ports: in-ports {
+- port {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ port@0 {
++ reg = <0>;
+ ca_funnel_in_port0: endpoint {
+ remote-endpoint = <&etm0_out_port>;
+ };
+@@ -454,7 +458,7 @@ iomuxc_lpsr: pinctrl@302c0000 {
+ };
+
+ gpt1: timer@302d0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302d0000 0x10000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
+@@ -463,7 +467,7 @@ gpt1: timer@302d0000 {
+ };
+
+ gpt2: timer@302e0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302e0000 0x10000>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
+@@ -473,7 +477,7 @@ gpt2: timer@302e0000 {
+ };
+
+ gpt3: timer@302f0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302f0000 0x10000>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
+@@ -483,7 +487,7 @@ gpt3: timer@302f0000 {
+ };
+
+ gpt4: timer@30300000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x30300000 0x10000>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
+@@ -814,7 +818,7 @@ csi_from_csi_mux: endpoint {
+ };
+
+ lcdif: lcdif@30730000 {
+- compatible = "fsl,imx7d-lcdif", "fsl,imx28-lcdif";
++ compatible = "fsl,imx7d-lcdif", "fsl,imx6sx-lcdif";
+ reg = <0x30730000 0x10000>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
+@@ -1278,7 +1282,7 @@ dma_apbh: dma-controller@33000000 {
+ gpmi: nand-controller@33002000 {
+ compatible = "fsl,imx7d-gpmi-nand";
+ #address-cells = <1>;
+- #size-cells = <1>;
++ #size-cells = <0>;
+ reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
+ reg-names = "gpmi-nand", "bch";
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts b/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
+index 46057d9bf555b6..c2efcc20ae8026 100644
+--- a/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
++++ b/arch/arm/boot/dts/nxp/mxs/imx23-sansa.dts
+@@ -175,10 +175,8 @@ i2c-0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "i2c-gpio";
+- gpios = <
+- &gpio1 24 0 /* SDA */
+- &gpio1 22 0 /* SCL */
+- >;
++ sda-gpios = <&gpio1 24 0>;
++ scl-gpios = <&gpio1 22 0>;
+ i2c-gpio,delay-us = <2>; /* ~100 kHz */
+ };
+
+@@ -186,10 +184,8 @@ i2c-1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "i2c-gpio";
+- gpios = <
+- &gpio0 31 0 /* SDA */
+- &gpio0 30 0 /* SCL */
+- >;
++ sda-gpios = <&gpio0 31 0>;
++ scl-gpios = <&gpio0 30 0>;
+ i2c-gpio,delay-us = <2>; /* ~100 kHz */
+
+ touch: touch@20 {
+diff --git a/arch/arm/boot/dts/nxp/mxs/imx23.dtsi b/arch/arm/boot/dts/nxp/mxs/imx23.dtsi
+index 5eca942a52fd44..14c07b585f8220 100644
+--- a/arch/arm/boot/dts/nxp/mxs/imx23.dtsi
++++ b/arch/arm/boot/dts/nxp/mxs/imx23.dtsi
+@@ -412,7 +412,7 @@ emi@80020000 {
+ status = "disabled";
+ };
+
+- dma_apbx: dma-apbx@80024000 {
++ dma_apbx: dma-controller@80024000 {
+ compatible = "fsl,imx23-dma-apbx";
+ reg = <0x80024000 0x2000>;
+ interrupts = <7>, <5>, <9>, <26>,
+diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-xea.dts b/arch/arm/boot/dts/nxp/mxs/imx28-xea.dts
+index a400c108f66a2d..6c5e6856648af9 100644
+--- a/arch/arm/boot/dts/nxp/mxs/imx28-xea.dts
++++ b/arch/arm/boot/dts/nxp/mxs/imx28-xea.dts
+@@ -8,6 +8,7 @@
+ #include "imx28-lwe.dtsi"
+
+ / {
++ model = "Liebherr XEA board";
+ compatible = "lwn,imx28-xea", "fsl,imx28";
+ };
+
+diff --git a/arch/arm/boot/dts/nxp/mxs/imx28.dtsi b/arch/arm/boot/dts/nxp/mxs/imx28.dtsi
+index 763adeb995ee76..9b73130887ea14 100644
+--- a/arch/arm/boot/dts/nxp/mxs/imx28.dtsi
++++ b/arch/arm/boot/dts/nxp/mxs/imx28.dtsi
+@@ -990,7 +990,7 @@ etm: etm@80022000 {
+ status = "disabled";
+ };
+
+- dma_apbx: dma-apbx@80024000 {
++ dma_apbx: dma-controller@80024000 {
+ compatible = "fsl,imx28-dma-apbx";
+ reg = <0x80024000 0x2000>;
+ interrupts = <78>, <79>, <66>, <0>,
+diff --git a/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
+index 16b4e06c4efad3..a248b8a4534210 100644
+--- a/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
++++ b/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
+@@ -338,6 +338,7 @@ gpio6: io-expander@22 {
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
++ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupt-parent = <&gpio3>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+diff --git a/arch/arm/boot/dts/qcom/pm8226.dtsi b/arch/arm/boot/dts/qcom/pm8226.dtsi
+new file mode 100644
+index 00000000000000..2413778f371507
+--- /dev/null
++++ b/arch/arm/boot/dts/qcom/pm8226.dtsi
+@@ -0,0 +1,180 @@
++// SPDX-License-Identifier: BSD-3-Clause
++#include <dt-bindings/iio/qcom,spmi-vadc.h>
++#include <dt-bindings/input/linux-event-codes.h>
++#include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/spmi/spmi.h>
++
++/ {
++ thermal-zones {
++ pm8226-thermal {
++ polling-delay-passive = <100>;
++ polling-delay = <0>;
++ thermal-sensors = <&pm8226_temp>;
++
++ trips {
++ trip0 {
++ temperature = <105000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ trip1 {
++ temperature = <125000>;
++ hysteresis = <2000>;
++ type = "hot";
++ };
++
++ crit {
++ temperature = <145000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++ };
++ };
++};
++
++&spmi_bus {
++ pm8226_0: pm8226@0 {
++ compatible = "qcom,pm8226", "qcom,spmi-pmic";
++ reg = <0x0 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ pon@800 {
++ compatible = "qcom,pm8916-pon";
++ reg = <0x800>;
++
++ pwrkey {
++ compatible = "qcom,pm8941-pwrkey";
++ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
++ debounce = <15625>;
++ bias-pull-up;
++ linux,code = <KEY_POWER>;
++ };
++
++ pm8226_resin: resin {
++ compatible = "qcom,pm8941-resin";
++ interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
++ debounce = <15625>;
++ bias-pull-up;
++ status = "disabled";
++ };
++ };
++
++ smbb: charger@1000 {
++ compatible = "qcom,pm8226-charger";
++ reg = <0x1000>;
++ interrupts = <0x0 0x10 7 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x10 5 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x10 4 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x12 1 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x12 0 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x13 2 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x13 1 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x14 1 IRQ_TYPE_EDGE_BOTH>;
++ interrupt-names = "chg-done",
++ "chg-fast",
++ "chg-trkl",
++ "bat-temp-ok",
++ "bat-present",
++ "chg-gone",
++ "usb-valid",
++ "dc-valid";
++
++ chg_otg: otg-vbus { };
++ };
++
++ pm8226_temp: temp-alarm@2400 {
++ compatible = "qcom,spmi-temp-alarm";
++ reg = <0x2400>;
++ interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
++ io-channels = <&pm8226_vadc VADC_DIE_TEMP>;
++ io-channel-names = "thermal";
++ #thermal-sensor-cells = <0>;
++ };
++
++ pm8226_vadc: adc@3100 {
++ compatible = "qcom,spmi-vadc";
++ reg = <0x3100>;
++ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #io-channel-cells = <1>;
++
++ channel@7 {
++ reg = <VADC_VSYS>;
++ qcom,pre-scaling = <1 3>;
++ label = "vph_pwr";
++ };
++ channel@8 {
++ reg = <VADC_DIE_TEMP>;
++ label = "die_temp";
++ };
++ channel@9 {
++ reg = <VADC_REF_625MV>;
++ label = "ref_625mv";
++ };
++ channel@a {
++ reg = <VADC_REF_1250MV>;
++ label = "ref_1250mv";
++ };
++ channel@e {
++ reg = <VADC_GND_REF>;
++ };
++ channel@f {
++ reg = <VADC_VDD_VADC>;
++ };
++ };
++
++ pm8226_iadc: adc@3600 {
++ compatible = "qcom,pm8226-iadc", "qcom,spmi-iadc";
++ reg = <0x3600>;
++ interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
++ };
++
++ rtc@6000 {
++ compatible = "qcom,pm8941-rtc";
++ reg = <0x6000>, <0x6100>;
++ reg-names = "rtc", "alarm";
++ interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
++ };
++
++ pm8226_mpps: mpps@a000 {
++ compatible = "qcom,pm8226-mpp", "qcom,spmi-mpp";
++ reg = <0xa000>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ gpio-ranges = <&pm8226_mpps 0 0 8>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ pm8226_gpios: gpio@c000 {
++ compatible = "qcom,pm8226-gpio", "qcom,spmi-gpio";
++ reg = <0xc000>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ gpio-ranges = <&pm8226_gpios 0 0 8>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++ };
++
++ pm8226_1: pm8226@1 {
++ compatible = "qcom,pm8226", "qcom,spmi-pmic";
++ reg = <0x1 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ pm8226_spmi_regulators: regulators {
++ compatible = "qcom,pm8226-regulators";
++ };
++
++ pm8226_vib: vibrator@c000 {
++ compatible = "qcom,pm8916-vib";
++ reg = <0xc000>;
++ status = "disabled";
++ };
++ };
++};
+diff --git a/arch/arm/boot/dts/qcom/pm8841.dtsi b/arch/arm/boot/dts/qcom/pm8841.dtsi
+new file mode 100644
+index 00000000000000..3bf2ce5c86a641
+--- /dev/null
++++ b/arch/arm/boot/dts/qcom/pm8841.dtsi
+@@ -0,0 +1,68 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/spmi/spmi.h>
++
++
++/ {
++ thermal-zones {
++ pm8841-thermal {
++ polling-delay-passive = <100>;
++ polling-delay = <0>;
++ thermal-sensors = <&pm8841_temp>;
++
++ trips {
++ trip0 {
++ temperature = <105000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ trip1 {
++ temperature = <125000>;
++ hysteresis = <2000>;
++ type = "hot";
++ };
++
++ crit {
++ temperature = <140000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++ };
++ };
++};
++
++&spmi_bus {
++
++ pm8841_0: pm8841@4 {
++ compatible = "qcom,pm8841", "qcom,spmi-pmic";
++ reg = <0x4 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ pm8841_mpps: mpps@a000 {
++ compatible = "qcom,pm8841-mpp", "qcom,spmi-mpp";
++ reg = <0xa000>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ gpio-ranges = <&pm8841_mpps 0 0 4>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ pm8841_temp: temp-alarm@2400 {
++ compatible = "qcom,spmi-temp-alarm";
++ reg = <0x2400>;
++ interrupts = <4 0x24 0 IRQ_TYPE_EDGE_RISING>;
++ #thermal-sensor-cells = <0>;
++ };
++ };
++
++ pm8841_1: pm8841@5 {
++ compatible = "qcom,pm8841", "qcom,spmi-pmic";
++ reg = <0x5 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++};
+diff --git a/arch/arm/boot/dts/qcom/pm8941.dtsi b/arch/arm/boot/dts/qcom/pm8941.dtsi
+new file mode 100644
+index 00000000000000..ed0ba591c75581
+--- /dev/null
++++ b/arch/arm/boot/dts/qcom/pm8941.dtsi
+@@ -0,0 +1,254 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <dt-bindings/iio/qcom,spmi-vadc.h>
++#include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/spmi/spmi.h>
++
++
++/ {
++ thermal-zones {
++ pm8941-thermal {
++ polling-delay-passive = <100>;
++ polling-delay = <0>;
++ thermal-sensors = <&pm8941_temp>;
++
++ trips {
++ trip0 {
++ temperature = <105000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ trip1 {
++ temperature = <125000>;
++ hysteresis = <2000>;
++ type = "hot";
++ };
++
++ crit {
++ temperature = <145000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++ };
++ };
++};
++
++&spmi_bus {
++
++ pm8941_0: pm8941@0 {
++ compatible = "qcom,pm8941", "qcom,spmi-pmic";
++ reg = <0x0 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ rtc@6000 {
++ compatible = "qcom,pm8941-rtc";
++ reg = <0x6000>,
++ <0x6100>;
++ reg-names = "rtc", "alarm";
++ interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
++ };
++
++ pon@800 {
++ compatible = "qcom,pm8941-pon";
++ reg = <0x800>;
++
++ pwrkey {
++ compatible = "qcom,pm8941-pwrkey";
++ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
++ debounce = <15625>;
++ bias-pull-up;
++ };
++
++ pm8941_resin: resin {
++ compatible = "qcom,pm8941-resin";
++ interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
++ debounce = <15625>;
++ bias-pull-up;
++ status = "disabled";
++ };
++ };
++
++ usb_id: usb-detect@900 {
++ compatible = "qcom,pm8941-misc";
++ reg = <0x900>;
++ interrupts = <0x0 0x9 0 IRQ_TYPE_EDGE_BOTH>;
++ interrupt-names = "usb_id";
++ };
++
++ smbb: charger@1000 {
++ compatible = "qcom,pm8941-charger";
++ reg = <0x1000>;
++ interrupts = <0x0 0x10 7 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x10 5 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x10 4 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x12 1 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x12 0 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x13 2 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x13 1 IRQ_TYPE_EDGE_BOTH>,
++ <0x0 0x14 1 IRQ_TYPE_EDGE_BOTH>;
++ interrupt-names = "chg-done",
++ "chg-fast",
++ "chg-trkl",
++ "bat-temp-ok",
++ "bat-present",
++ "chg-gone",
++ "usb-valid",
++ "dc-valid";
++
++ usb-otg-in-supply = <&pm8941_5vs1>;
++
++ chg_otg: otg-vbus { };
++ };
++
++ pm8941_gpios: gpio@c000 {
++ compatible = "qcom,pm8941-gpio", "qcom,spmi-gpio";
++ reg = <0xc000>;
++ gpio-controller;
++ gpio-ranges = <&pm8941_gpios 0 0 36>;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++
++ boost_bypass_n_pin: boost-bypass-state {
++ pins = "gpio21";
++ function = "normal";
++ };
++ };
++
++ pm8941_mpps: mpps@a000 {
++ compatible = "qcom,pm8941-mpp", "qcom,spmi-mpp";
++ reg = <0xa000>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ gpio-ranges = <&pm8941_mpps 0 0 8>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ pm8941_temp: temp-alarm@2400 {
++ compatible = "qcom,spmi-temp-alarm";
++ reg = <0x2400>;
++ interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
++ io-channels = <&pm8941_vadc VADC_DIE_TEMP>;
++ io-channel-names = "thermal";
++ #thermal-sensor-cells = <0>;
++ };
++
++ pm8941_vadc: adc@3100 {
++ compatible = "qcom,spmi-vadc";
++ reg = <0x3100>;
++ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #io-channel-cells = <1>;
++
++
++ channel@6 {
++ reg = <VADC_VBAT_SNS>;
++ };
++
++ channel@8 {
++ reg = <VADC_DIE_TEMP>;
++ };
++
++ channel@9 {
++ reg = <VADC_REF_625MV>;
++ };
++
++ channel@a {
++ reg = <VADC_REF_1250MV>;
++ };
++
++ channel@e {
++ reg = <VADC_GND_REF>;
++ };
++
++ channel@f {
++ reg = <VADC_VDD_VADC>;
++ };
++
++ channel@30 {
++ reg = <VADC_LR_MUX1_BAT_THERM>;
++ };
++ };
++
++ pm8941_iadc: adc@3600 {
++ compatible = "qcom,pm8941-iadc", "qcom,spmi-iadc";
++ reg = <0x3600>;
++ interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
++ qcom,external-resistor-micro-ohms = <10000>;
++ };
++
++ pm8941_coincell: charger@2800 {
++ compatible = "qcom,pm8941-coincell";
++ reg = <0x2800>;
++ status = "disabled";
++ };
++ };
++
++ pm8941_1: pm8941@1 {
++ compatible = "qcom,pm8941", "qcom,spmi-pmic";
++ reg = <0x1 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ pm8941_lpg: pwm {
++ compatible = "qcom,pm8941-lpg";
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #pwm-cells = <2>;
++
++ status = "disabled";
++ };
++
++ pm8941_vib: vibrator@c000 {
++ compatible = "qcom,pm8916-vib";
++ reg = <0xc000>;
++ status = "disabled";
++ };
++
++ pm8941_wled: wled@d800 {
++ compatible = "qcom,pm8941-wled";
++ reg = <0xd800>;
++ label = "backlight";
++
++ status = "disabled";
++ };
++
++ regulators {
++ compatible = "qcom,pm8941-regulators";
++ interrupts = <0x1 0x83 0x2 0>, <0x1 0x84 0x2 0>;
++ interrupt-names = "ocp-5vs1", "ocp-5vs2";
++ vin_5vs-supply = <&pm8941_5v>;
++
++ pm8941_5v: s4 {
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-enable-ramp-delay = <500>;
++ };
++
++ pm8941_5vs1: 5vs1 {
++ regulator-enable-ramp-delay = <1000>;
++ regulator-pull-down;
++ regulator-over-current-protection;
++ qcom,ocp-max-retries = <10>;
++ qcom,ocp-retry-delay = <30>;
++ qcom,vs-soft-start-strength = <0>;
++ regulator-initial-mode = <1>;
++ };
++
++ pm8941_5vs2: 5vs2 {
++ regulator-enable-ramp-delay = <1000>;
++ regulator-pull-down;
++ regulator-over-current-protection;
++ qcom,ocp-max-retries = <10>;
++ qcom,ocp-retry-delay = <30>;
++ qcom,vs-soft-start-strength = <0>;
++ regulator-initial-mode = <1>;
++ };
++ };
++ };
++};
+diff --git a/arch/arm/boot/dts/qcom/pma8084.dtsi b/arch/arm/boot/dts/qcom/pma8084.dtsi
+new file mode 100644
+index 00000000000000..2985f4805b93ee
+--- /dev/null
++++ b/arch/arm/boot/dts/qcom/pma8084.dtsi
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <dt-bindings/iio/qcom,spmi-vadc.h>
++#include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/spmi/spmi.h>
++
++&spmi_bus {
++
++ pma8084_0: pma8084@0 {
++ compatible = "qcom,pma8084", "qcom,spmi-pmic";
++ reg = <0x0 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ rtc@6000 {
++ compatible = "qcom,pm8941-rtc";
++ reg = <0x6000>,
++ <0x6100>;
++ reg-names = "rtc", "alarm";
++ interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
++ };
++
++ pwrkey@800 {
++ compatible = "qcom,pm8941-pwrkey";
++ reg = <0x800>;
++ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
++ debounce = <15625>;
++ bias-pull-up;
++ };
++
++ pma8084_gpios: gpio@c000 {
++ compatible = "qcom,pma8084-gpio", "qcom,spmi-gpio";
++ reg = <0xc000>;
++ gpio-controller;
++ gpio-ranges = <&pma8084_gpios 0 0 22>;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ pma8084_mpps: mpps@a000 {
++ compatible = "qcom,pma8084-mpp", "qcom,spmi-mpp";
++ reg = <0xa000>;
++ gpio-controller;
++ #gpio-cells = <2>;
++ gpio-ranges = <&pma8084_mpps 0 0 8>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ pma8084_temp: temp-alarm@2400 {
++ compatible = "qcom,spmi-temp-alarm";
++ reg = <0x2400>;
++ interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
++ #thermal-sensor-cells = <0>;
++ io-channels = <&pma8084_vadc VADC_DIE_TEMP>;
++ io-channel-names = "thermal";
++ };
++
++ pma8084_vadc: adc@3100 {
++ compatible = "qcom,spmi-vadc";
++ reg = <0x3100>;
++ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #io-channel-cells = <1>;
++
++ channel@8 {
++ reg = <VADC_DIE_TEMP>;
++ };
++
++ channel@9 {
++ reg = <VADC_REF_625MV>;
++ };
++
++ channel@a {
++ reg = <VADC_REF_1250MV>;
++ };
++
++ channel@c {
++ reg = <VADC_SPARE1>;
++ };
++
++ channel@e {
++ reg = <VADC_GND_REF>;
++ };
++
++ channel@f {
++ reg = <VADC_VDD_VADC>;
++ };
++ };
++ };
++
++ pma8084_1: pma8084@1 {
++ compatible = "qcom,pma8084", "qcom,spmi-pmic";
++ reg = <0x1 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++};
+diff --git a/arch/arm/boot/dts/qcom/pmx55.dtsi b/arch/arm/boot/dts/qcom/pmx55.dtsi
+new file mode 100644
+index 00000000000000..da0851173c6997
+--- /dev/null
++++ b/arch/arm/boot/dts/qcom/pmx55.dtsi
+@@ -0,0 +1,85 @@
++// SPDX-License-Identifier: BSD-3-Clause
++
++/*
++ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2020, Linaro Limited
++ */
++
++#include <dt-bindings/iio/qcom,spmi-vadc.h>
++#include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/spmi/spmi.h>
++
++&spmi_bus {
++ pmic@8 {
++ compatible = "qcom,pmx55", "qcom,spmi-pmic";
++ reg = <0x8 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ pon@800 {
++ compatible = "qcom,pm8916-pon";
++ reg = <0x0800>;
++
++ status = "disabled";
++ };
++
++ pmx55_temp: temp-alarm@2400 {
++ compatible = "qcom,spmi-temp-alarm";
++ reg = <0x2400>;
++ interrupts = <0x8 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
++ io-channels = <&pmx55_adc ADC5_DIE_TEMP>;
++ io-channel-names = "thermal";
++ #thermal-sensor-cells = <0>;
++ };
++
++ pmx55_adc: adc@3100 {
++ compatible = "qcom,spmi-adc5";
++ reg = <0x3100>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #io-channel-cells = <1>;
++ interrupts = <0x8 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
++
++ channel@0 {
++ reg = <ADC5_REF_GND>;
++ qcom,pre-scaling = <1 1>;
++ label = "ref_gnd";
++ };
++
++ channel@1 {
++ reg = <ADC5_1P25VREF>;
++ qcom,pre-scaling = <1 1>;
++ label = "vref_1p25";
++ };
++
++ channel@6 {
++ reg = <ADC5_DIE_TEMP>;
++ qcom,pre-scaling = <1 1>;
++ label = "die_temp";
++ };
++
++ channel@9 {
++ reg = <ADC5_CHG_TEMP>;
++ qcom,pre-scaling = <1 1>;
++ label = "chg_temp";
++ };
++ };
++
++ pmx55_gpios: gpio@c000 {
++ compatible = "qcom,pmx55-gpio", "qcom,spmi-gpio";
++ reg = <0xc000>;
++ gpio-controller;
++ gpio-ranges = <&pmx55_gpios 0 0 11>;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++ };
++
++ pmic@9 {
++ compatible = "qcom,pmx55", "qcom,spmi-pmic";
++ reg = <0x9 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++};
+diff --git a/arch/arm/boot/dts/qcom/pmx65.dtsi b/arch/arm/boot/dts/qcom/pmx65.dtsi
+new file mode 100644
+index 00000000000000..1c7fdf59c1f56a
+--- /dev/null
++++ b/arch/arm/boot/dts/qcom/pmx65.dtsi
+@@ -0,0 +1,33 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/spmi/spmi.h>
++
++&spmi_bus {
++ pmic@1 {
++ compatible = "qcom,pmx65", "qcom,spmi-pmic";
++ reg = <1 SPMI_USID>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ pmx65_temp: temp-alarm@a00 {
++ compatible = "qcom,spmi-temp-alarm";
++ reg = <0xa00>;
++ interrupts = <0x1 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
++ #thermal-sensor-cells = <0>;
++ };
++
++ pmx65_gpios: gpio@8800 {
++ compatible = "qcom,pmx65-gpio", "qcom,spmi-gpio";
++ reg = <0x8800>;
++ gpio-controller;
++ gpio-ranges = <&pmx65_gpios 0 0 16>;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++ };
++};
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-asus-sparrow.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-asus-sparrow.dts
+index aa0e0e8d2a973e..a39f5a161b03bb 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8026-asus-sparrow.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8026-asus-sparrow.dts
+@@ -6,7 +6,7 @@
+ /dts-v1/;
+
+ #include "qcom-msm8226.dtsi"
+-#include "qcom-pm8226.dtsi"
++#include "pm8226.dtsi"
+
+ /delete-node/ &adsp_region;
+
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-huawei-sturgeon.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-huawei-sturgeon.dts
+index de19640efe5538..59b218042d32dd 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8026-huawei-sturgeon.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8026-huawei-sturgeon.dts
+@@ -6,7 +6,7 @@
+ /dts-v1/;
+
+ #include "qcom-msm8226.dtsi"
+-#include "qcom-pm8226.dtsi"
++#include "pm8226.dtsi"
+ #include <dt-bindings/input/ti-drv260x.h>
+
+ /delete-node/ &adsp_region;
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-lg-lenok.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-lg-lenok.dts
+index b887e5361ec3a2..feb78afef3a6e0 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8026-lg-lenok.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8026-lg-lenok.dts
+@@ -6,7 +6,7 @@
+ /dts-v1/;
+
+ #include "qcom-msm8226.dtsi"
+-#include "qcom-pm8226.dtsi"
++#include "pm8226.dtsi"
+
+ /delete-node/ &adsp_region;
+
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
+index 884d99297d4cf1..cffc069712b2f1 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
+@@ -7,7 +7,7 @@
+
+ #include <dt-bindings/input/input.h>
+ #include "qcom-msm8226.dtsi"
+-#include "qcom-pm8226.dtsi"
++#include "pm8226.dtsi"
+
+ /delete-node/ &adsp_region;
+ /delete-node/ &smem_region;
+@@ -45,11 +45,11 @@ gpio-hall-sensor {
+
+ event-hall-sensor {
+ label = "Hall Effect Sensor";
+- gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>;
+- interrupts = <&tlmm 110 IRQ_TYPE_EDGE_FALLING>;
++ gpios = <&tlmm 110 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ debounce-interval = <15>;
++ linux,can-disable;
+ wakeup-source;
+ };
+ };
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom/qcom-apq8060-dragonboard.dts
+index db4c791b2e2fb2..48fd1a1feea342 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8060-dragonboard.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8060-dragonboard.dts
+@@ -72,7 +72,7 @@ cm3605 {
+ /* Trig on both edges - getting close or far away */
+ interrupts-extended = <&pm8058_gpio 34 IRQ_TYPE_EDGE_BOTH>;
+ /* MPP05 analog input to the XOADC */
+- io-channels = <&xoadc 0x00 0x05>;
++ io-channels = <&pm8058_xoadc 0x00 0x05>;
+ io-channel-names = "aout";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dragon_cm3605_gpios>, <&dragon_cm3605_mpps>;
+@@ -945,7 +945,7 @@ irq-pins {
+ };
+ };
+
+-&xoadc {
++&pm8058_xoadc {
+ /* Reference voltage 2.2 V */
+ xoadc-ref-supply = <&pm8058_l18>;
+
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+index 516f0d2495e2d0..950adb63af7016 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-apq8064.dtsi
+@@ -738,7 +738,7 @@ pwrkey@1c {
+
+ xoadc: xoadc@197 {
+ compatible = "qcom,pm8921-adc";
+- reg = <197>;
++ reg = <0x197>;
+ interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts b/arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts
+index 6d1b2439ae3ace..950fa652f9856a 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8074-dragonboard.dts
+@@ -4,8 +4,8 @@
+ #include <dt-bindings/leds/common.h>
+ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+ #include "qcom-msm8974.dtsi"
+-#include "qcom-pm8841.dtsi"
+-#include "qcom-pm8941.dtsi"
++#include "pm8841.dtsi"
++#include "pm8941.dtsi"
+
+ /delete-node/ &mpss_region;
+
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8084-ifc6540.dts b/arch/arm/boot/dts/qcom/qcom-apq8084-ifc6540.dts
+index 116e59a3b76d01..1df24c922be9f3 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8084-ifc6540.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8084-ifc6540.dts
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "qcom-apq8084.dtsi"
+-#include "qcom-pma8084.dtsi"
++#include "pma8084.dtsi"
+
+ / {
+ model = "Qualcomm APQ8084/IFC6540";
+diff --git a/arch/arm/boot/dts/qcom/qcom-apq8084-mtp.dts b/arch/arm/boot/dts/qcom/qcom-apq8084-mtp.dts
+index c6b6680248a69e..d4e6aee034afd1 100644
+--- a/arch/arm/boot/dts/qcom/qcom-apq8084-mtp.dts
++++ b/arch/arm/boot/dts/qcom/qcom-apq8084-mtp.dts
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "qcom-apq8084.dtsi"
+-#include "qcom-pma8084.dtsi"
++#include "pma8084.dtsi"
+
+ / {
+ model = "Qualcomm APQ 8084-MTP";
+diff --git a/arch/arm/boot/dts/qcom/qcom-mdm9615-wp8548.dtsi b/arch/arm/boot/dts/qcom/qcom-mdm9615-wp8548.dtsi
+index 92c8003dac252d..dac3aa793f7115 100644
+--- a/arch/arm/boot/dts/qcom/qcom-mdm9615-wp8548.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-mdm9615-wp8548.dtsi
+@@ -76,7 +76,7 @@ reset-out-pins {
+ };
+ };
+
+-&pmicgpio {
++&pm8018_gpio {
+ usb_vbus_5v_pins: usb-vbus-5v-state {
+ pins = "gpio4";
+ function = "normal";
+diff --git a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
+index fc4f52f9e9f7dc..c0a60bae703b11 100644
+--- a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
+@@ -47,14 +47,12 @@ cxo_board: cxo_board {
+ };
+ };
+
+- regulators {
+- vsdcc_fixed: vsdcc-regulator {
+- compatible = "regulator-fixed";
+- regulator-name = "SDCC Power";
+- regulator-min-microvolt = <2700000>;
+- regulator-max-microvolt = <2700000>;
+- regulator-always-on;
+- };
++ vsdcc_fixed: vsdcc-regulator {
++ compatible = "regulator-fixed";
++ regulator-name = "SDCC Power";
++ regulator-min-microvolt = <2700000>;
++ regulator-max-microvolt = <2700000>;
++ regulator-always-on;
+ };
+
+ soc: soc {
+@@ -263,7 +261,7 @@ qcom,ssbi@500000 {
+ reg = <0x500000 0x1000>;
+ qcom,controller-type = "pmic-arbiter";
+
+- pmicintc: pmic {
++ pm8018: pmic {
+ compatible = "qcom,pm8018", "qcom,pm8921";
+ interrupts = <GIC_PPI 226 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+@@ -274,38 +272,38 @@ pmicintc: pmic {
+ pwrkey@1c {
+ compatible = "qcom,pm8018-pwrkey", "qcom,pm8921-pwrkey";
+ reg = <0x1c>;
+- interrupt-parent = <&pmicintc>;
++ interrupt-parent = <&pm8018>;
+ interrupts = <50 IRQ_TYPE_EDGE_RISING>,
+ <51 IRQ_TYPE_EDGE_RISING>;
+ debounce = <15625>;
+ pull-up;
+ };
+
+- pmicmpp: mpps@50 {
++ pm8018_mpps: mpps@50 {
+ compatible = "qcom,pm8018-mpp", "qcom,ssbi-mpp";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x50>;
+ gpio-controller;
+ #gpio-cells = <2>;
+- gpio-ranges = <&pmicmpp 0 0 6>;
++ gpio-ranges = <&pm8018_mpps 0 0 6>;
+ };
+
+ rtc@11d {
+ compatible = "qcom,pm8018-rtc", "qcom,pm8921-rtc";
+- interrupt-parent = <&pmicintc>;
++ interrupt-parent = <&pm8018>;
+ interrupts = <39 IRQ_TYPE_EDGE_RISING>;
+ reg = <0x11d>;
+ allow-set-time;
+ };
+
+- pmicgpio: gpio@150 {
++ pm8018_gpio: gpio@150 {
+ compatible = "qcom,pm8018-gpio", "qcom,ssbi-gpio";
+ reg = <0x150>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+- gpio-ranges = <&pmicgpio 0 0 6>;
++ gpio-ranges = <&pm8018_gpio 0 0 6>;
+ #gpio-cells = <2>;
+ };
+ };
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
+index 44f3f0127fd709..78738371f634cf 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-msm8226.dtsi
+@@ -404,8 +404,8 @@ mmcc: clock-controller@fd8c0000 {
+ <&gcc GPLL0_VOTE>,
+ <&gcc GPLL1_VOTE>,
+ <&rpmcc RPM_SMD_GFX3D_CLK_SRC>,
+- <0>,
+- <0>;
++ <&mdss_dsi0_phy 1>,
++ <&mdss_dsi0_phy 0>;
+ clock-names = "xo",
+ "mmss_gpll0_vote",
+ "gpll0_vote",
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8660.dtsi
+index 78023ed2fdf71f..9217ced108c42f 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8660.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-msm8660.dtsi
+@@ -80,13 +80,13 @@ sleep-clk {
+ */
+ iio-hwmon {
+ compatible = "iio-hwmon";
+- io-channels = <&xoadc 0x00 0x01>, /* Battery */
+- <&xoadc 0x00 0x02>, /* DC in (charger) */
+- <&xoadc 0x00 0x04>, /* VPH the main system voltage */
+- <&xoadc 0x00 0x0b>, /* Die temperature */
+- <&xoadc 0x00 0x0c>, /* Reference voltage 1.25V */
+- <&xoadc 0x00 0x0d>, /* Reference voltage 0.625V */
+- <&xoadc 0x00 0x0e>; /* Reference voltage 0.325V */
++ io-channels = <&pm8058_xoadc 0x00 0x01>, /* Battery */
++ <&pm8058_xoadc 0x00 0x02>, /* DC in (charger) */
++ <&pm8058_xoadc 0x00 0x04>, /* VPH the main system voltage */
++ <&pm8058_xoadc 0x00 0x0b>, /* Die temperature */
++ <&pm8058_xoadc 0x00 0x0c>, /* Reference voltage 1.25V */
++ <&pm8058_xoadc 0x00 0x0d>, /* Reference voltage 0.625V */
++ <&pm8058_xoadc 0x00 0x0e>; /* Reference voltage 0.325V */
+ };
+
+ soc: soc {
+@@ -390,7 +390,7 @@ pm8058_keypad: keypad@148 {
+ row-hold = <91500>;
+ };
+
+- xoadc: xoadc@197 {
++ pm8058_xoadc: xoadc@197 {
+ compatible = "qcom,pm8058-adc";
+ reg = <0x197>;
+ interrupts-extended = <&pm8058 76 IRQ_TYPE_EDGE_RISING>;
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts
+index 60bdfddeae69eb..da99f770d4f57b 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts
++++ b/arch/arm/boot/dts/qcom/qcom-msm8974-lge-nexus5-hammerhead.dts
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "qcom-msm8974.dtsi"
+-#include "qcom-pm8841.dtsi"
+-#include "qcom-pm8941.dtsi"
++#include "pm8841.dtsi"
++#include "pm8941.dtsi"
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/leds/common.h>
+ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974-sony-xperia-rhine.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974-sony-xperia-rhine.dtsi
+index 68a2f9094e536f..23ae474698aa7b 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8974-sony-xperia-rhine.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-msm8974-sony-xperia-rhine.dtsi
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "qcom-msm8974.dtsi"
+-#include "qcom-pm8841.dtsi"
+-#include "qcom-pm8941.dtsi"
++#include "pm8841.dtsi"
++#include "pm8941.dtsi"
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/leds/common.h>
+ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
+index 706fef53767e10..4a8eb8b423290f 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
+@@ -1194,7 +1194,7 @@ restart@fc4ab000 {
+
+ qfprom: qfprom@fc4bc000 {
+ compatible = "qcom,msm8974-qfprom", "qcom,qfprom";
+- reg = <0xfc4bc000 0x1000>;
++ reg = <0xfc4bc000 0x2100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-fairphone-fp2.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-fairphone-fp2.dts
+index 42d253b75dad02..6c4153689b39e5 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-fairphone-fp2.dts
++++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-fairphone-fp2.dts
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "qcom-msm8974pro.dtsi"
+-#include "qcom-pm8841.dtsi"
+-#include "qcom-pm8941.dtsi"
++#include "pm8841.dtsi"
++#include "pm8941.dtsi"
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/leds/common.h>
+ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-oneplus-bacon.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-oneplus-bacon.dts
+index 8230d0e1d95d1d..c0ca264d8140db 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-oneplus-bacon.dts
++++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-oneplus-bacon.dts
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "qcom-msm8974pro.dtsi"
+-#include "qcom-pm8841.dtsi"
+-#include "qcom-pm8941.dtsi"
++#include "pm8841.dtsi"
++#include "pm8941.dtsi"
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts
+index 3e2c86591ee2f7..325feb89b343ab 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts
++++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-samsung-klte.dts
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "qcom-msm8974pro.dtsi"
+-#include "qcom-pma8084.dtsi"
++#include "pma8084.dtsi"
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+ #include <dt-bindings/leds/common.h>
+diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
+index 11468d1409f722..0798cce3dbea01 100644
+--- a/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
++++ b/arch/arm/boot/dts/qcom/qcom-msm8974pro-sony-xperia-shinano-castor.dts
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "qcom-msm8974pro.dtsi"
+-#include "qcom-pm8841.dtsi"
+-#include "qcom-pm8941.dtsi"
++#include "pm8841.dtsi"
++#include "pm8941.dtsi"
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/leds/common.h>
+ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+diff --git a/arch/arm/boot/dts/qcom/qcom-pm8226.dtsi b/arch/arm/boot/dts/qcom/qcom-pm8226.dtsi
+deleted file mode 100644
+index 2413778f371507..00000000000000
+--- a/arch/arm/boot/dts/qcom/qcom-pm8226.dtsi
++++ /dev/null
+@@ -1,180 +0,0 @@
+-// SPDX-License-Identifier: BSD-3-Clause
+-#include <dt-bindings/iio/qcom,spmi-vadc.h>
+-#include <dt-bindings/input/linux-event-codes.h>
+-#include <dt-bindings/interrupt-controller/irq.h>
+-#include <dt-bindings/spmi/spmi.h>
+-
+-/ {
+- thermal-zones {
+- pm8226-thermal {
+- polling-delay-passive = <100>;
+- polling-delay = <0>;
+- thermal-sensors = <&pm8226_temp>;
+-
+- trips {
+- trip0 {
+- temperature = <105000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- trip1 {
+- temperature = <125000>;
+- hysteresis = <2000>;
+- type = "hot";
+- };
+-
+- crit {
+- temperature = <145000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+- };
+- };
+-};
+-
+-&spmi_bus {
+- pm8226_0: pm8226@0 {
+- compatible = "qcom,pm8226", "qcom,spmi-pmic";
+- reg = <0x0 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- pon@800 {
+- compatible = "qcom,pm8916-pon";
+- reg = <0x800>;
+-
+- pwrkey {
+- compatible = "qcom,pm8941-pwrkey";
+- interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+- debounce = <15625>;
+- bias-pull-up;
+- linux,code = <KEY_POWER>;
+- };
+-
+- pm8226_resin: resin {
+- compatible = "qcom,pm8941-resin";
+- interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+- debounce = <15625>;
+- bias-pull-up;
+- status = "disabled";
+- };
+- };
+-
+- smbb: charger@1000 {
+- compatible = "qcom,pm8226-charger";
+- reg = <0x1000>;
+- interrupts = <0x0 0x10 7 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x10 5 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x10 4 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x12 1 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x12 0 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x13 2 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x13 1 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x14 1 IRQ_TYPE_EDGE_BOTH>;
+- interrupt-names = "chg-done",
+- "chg-fast",
+- "chg-trkl",
+- "bat-temp-ok",
+- "bat-present",
+- "chg-gone",
+- "usb-valid",
+- "dc-valid";
+-
+- chg_otg: otg-vbus { };
+- };
+-
+- pm8226_temp: temp-alarm@2400 {
+- compatible = "qcom,spmi-temp-alarm";
+- reg = <0x2400>;
+- interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
+- io-channels = <&pm8226_vadc VADC_DIE_TEMP>;
+- io-channel-names = "thermal";
+- #thermal-sensor-cells = <0>;
+- };
+-
+- pm8226_vadc: adc@3100 {
+- compatible = "qcom,spmi-vadc";
+- reg = <0x3100>;
+- interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- #io-channel-cells = <1>;
+-
+- channel@7 {
+- reg = <VADC_VSYS>;
+- qcom,pre-scaling = <1 3>;
+- label = "vph_pwr";
+- };
+- channel@8 {
+- reg = <VADC_DIE_TEMP>;
+- label = "die_temp";
+- };
+- channel@9 {
+- reg = <VADC_REF_625MV>;
+- label = "ref_625mv";
+- };
+- channel@a {
+- reg = <VADC_REF_1250MV>;
+- label = "ref_1250mv";
+- };
+- channel@e {
+- reg = <VADC_GND_REF>;
+- };
+- channel@f {
+- reg = <VADC_VDD_VADC>;
+- };
+- };
+-
+- pm8226_iadc: adc@3600 {
+- compatible = "qcom,pm8226-iadc", "qcom,spmi-iadc";
+- reg = <0x3600>;
+- interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
+- };
+-
+- rtc@6000 {
+- compatible = "qcom,pm8941-rtc";
+- reg = <0x6000>, <0x6100>;
+- reg-names = "rtc", "alarm";
+- interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
+- };
+-
+- pm8226_mpps: mpps@a000 {
+- compatible = "qcom,pm8226-mpp", "qcom,spmi-mpp";
+- reg = <0xa000>;
+- gpio-controller;
+- #gpio-cells = <2>;
+- gpio-ranges = <&pm8226_mpps 0 0 8>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- pm8226_gpios: gpio@c000 {
+- compatible = "qcom,pm8226-gpio", "qcom,spmi-gpio";
+- reg = <0xc000>;
+- gpio-controller;
+- #gpio-cells = <2>;
+- gpio-ranges = <&pm8226_gpios 0 0 8>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+- };
+-
+- pm8226_1: pm8226@1 {
+- compatible = "qcom,pm8226", "qcom,spmi-pmic";
+- reg = <0x1 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- pm8226_spmi_regulators: regulators {
+- compatible = "qcom,pm8226-regulators";
+- };
+-
+- pm8226_vib: vibrator@c000 {
+- compatible = "qcom,pm8916-vib";
+- reg = <0xc000>;
+- status = "disabled";
+- };
+- };
+-};
+diff --git a/arch/arm/boot/dts/qcom/qcom-pm8841.dtsi b/arch/arm/boot/dts/qcom/qcom-pm8841.dtsi
+deleted file mode 100644
+index 3bf2ce5c86a641..00000000000000
+--- a/arch/arm/boot/dts/qcom/qcom-pm8841.dtsi
++++ /dev/null
+@@ -1,68 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-#include <dt-bindings/interrupt-controller/irq.h>
+-#include <dt-bindings/spmi/spmi.h>
+-
+-
+-/ {
+- thermal-zones {
+- pm8841-thermal {
+- polling-delay-passive = <100>;
+- polling-delay = <0>;
+- thermal-sensors = <&pm8841_temp>;
+-
+- trips {
+- trip0 {
+- temperature = <105000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- trip1 {
+- temperature = <125000>;
+- hysteresis = <2000>;
+- type = "hot";
+- };
+-
+- crit {
+- temperature = <140000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+- };
+- };
+-};
+-
+-&spmi_bus {
+-
+- pm8841_0: pm8841@4 {
+- compatible = "qcom,pm8841", "qcom,spmi-pmic";
+- reg = <0x4 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- pm8841_mpps: mpps@a000 {
+- compatible = "qcom,pm8841-mpp", "qcom,spmi-mpp";
+- reg = <0xa000>;
+- gpio-controller;
+- #gpio-cells = <2>;
+- gpio-ranges = <&pm8841_mpps 0 0 4>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- pm8841_temp: temp-alarm@2400 {
+- compatible = "qcom,spmi-temp-alarm";
+- reg = <0x2400>;
+- interrupts = <4 0x24 0 IRQ_TYPE_EDGE_RISING>;
+- #thermal-sensor-cells = <0>;
+- };
+- };
+-
+- pm8841_1: pm8841@5 {
+- compatible = "qcom,pm8841", "qcom,spmi-pmic";
+- reg = <0x5 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- };
+-};
+diff --git a/arch/arm/boot/dts/qcom/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom/qcom-pm8941.dtsi
+deleted file mode 100644
+index ed0ba591c75581..00000000000000
+--- a/arch/arm/boot/dts/qcom/qcom-pm8941.dtsi
++++ /dev/null
+@@ -1,254 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-#include <dt-bindings/iio/qcom,spmi-vadc.h>
+-#include <dt-bindings/interrupt-controller/irq.h>
+-#include <dt-bindings/spmi/spmi.h>
+-
+-
+-/ {
+- thermal-zones {
+- pm8941-thermal {
+- polling-delay-passive = <100>;
+- polling-delay = <0>;
+- thermal-sensors = <&pm8941_temp>;
+-
+- trips {
+- trip0 {
+- temperature = <105000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- trip1 {
+- temperature = <125000>;
+- hysteresis = <2000>;
+- type = "hot";
+- };
+-
+- crit {
+- temperature = <145000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+- };
+- };
+-};
+-
+-&spmi_bus {
+-
+- pm8941_0: pm8941@0 {
+- compatible = "qcom,pm8941", "qcom,spmi-pmic";
+- reg = <0x0 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- rtc@6000 {
+- compatible = "qcom,pm8941-rtc";
+- reg = <0x6000>,
+- <0x6100>;
+- reg-names = "rtc", "alarm";
+- interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
+- };
+-
+- pon@800 {
+- compatible = "qcom,pm8941-pon";
+- reg = <0x800>;
+-
+- pwrkey {
+- compatible = "qcom,pm8941-pwrkey";
+- interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+- debounce = <15625>;
+- bias-pull-up;
+- };
+-
+- pm8941_resin: resin {
+- compatible = "qcom,pm8941-resin";
+- interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+- debounce = <15625>;
+- bias-pull-up;
+- status = "disabled";
+- };
+- };
+-
+- usb_id: usb-detect@900 {
+- compatible = "qcom,pm8941-misc";
+- reg = <0x900>;
+- interrupts = <0x0 0x9 0 IRQ_TYPE_EDGE_BOTH>;
+- interrupt-names = "usb_id";
+- };
+-
+- smbb: charger@1000 {
+- compatible = "qcom,pm8941-charger";
+- reg = <0x1000>;
+- interrupts = <0x0 0x10 7 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x10 5 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x10 4 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x12 1 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x12 0 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x13 2 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x13 1 IRQ_TYPE_EDGE_BOTH>,
+- <0x0 0x14 1 IRQ_TYPE_EDGE_BOTH>;
+- interrupt-names = "chg-done",
+- "chg-fast",
+- "chg-trkl",
+- "bat-temp-ok",
+- "bat-present",
+- "chg-gone",
+- "usb-valid",
+- "dc-valid";
+-
+- usb-otg-in-supply = <&pm8941_5vs1>;
+-
+- chg_otg: otg-vbus { };
+- };
+-
+- pm8941_gpios: gpio@c000 {
+- compatible = "qcom,pm8941-gpio", "qcom,spmi-gpio";
+- reg = <0xc000>;
+- gpio-controller;
+- gpio-ranges = <&pm8941_gpios 0 0 36>;
+- #gpio-cells = <2>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+-
+- boost_bypass_n_pin: boost-bypass-state {
+- pins = "gpio21";
+- function = "normal";
+- };
+- };
+-
+- pm8941_mpps: mpps@a000 {
+- compatible = "qcom,pm8941-mpp", "qcom,spmi-mpp";
+- reg = <0xa000>;
+- gpio-controller;
+- #gpio-cells = <2>;
+- gpio-ranges = <&pm8941_mpps 0 0 8>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- pm8941_temp: temp-alarm@2400 {
+- compatible = "qcom,spmi-temp-alarm";
+- reg = <0x2400>;
+- interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
+- io-channels = <&pm8941_vadc VADC_DIE_TEMP>;
+- io-channel-names = "thermal";
+- #thermal-sensor-cells = <0>;
+- };
+-
+- pm8941_vadc: adc@3100 {
+- compatible = "qcom,spmi-vadc";
+- reg = <0x3100>;
+- interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- #io-channel-cells = <1>;
+-
+-
+- channel@6 {
+- reg = <VADC_VBAT_SNS>;
+- };
+-
+- channel@8 {
+- reg = <VADC_DIE_TEMP>;
+- };
+-
+- channel@9 {
+- reg = <VADC_REF_625MV>;
+- };
+-
+- channel@a {
+- reg = <VADC_REF_1250MV>;
+- };
+-
+- channel@e {
+- reg = <VADC_GND_REF>;
+- };
+-
+- channel@f {
+- reg = <VADC_VDD_VADC>;
+- };
+-
+- channel@30 {
+- reg = <VADC_LR_MUX1_BAT_THERM>;
+- };
+- };
+-
+- pm8941_iadc: adc@3600 {
+- compatible = "qcom,pm8941-iadc", "qcom,spmi-iadc";
+- reg = <0x3600>;
+- interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
+- qcom,external-resistor-micro-ohms = <10000>;
+- };
+-
+- pm8941_coincell: charger@2800 {
+- compatible = "qcom,pm8941-coincell";
+- reg = <0x2800>;
+- status = "disabled";
+- };
+- };
+-
+- pm8941_1: pm8941@1 {
+- compatible = "qcom,pm8941", "qcom,spmi-pmic";
+- reg = <0x1 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- pm8941_lpg: pwm {
+- compatible = "qcom,pm8941-lpg";
+-
+- #address-cells = <1>;
+- #size-cells = <0>;
+- #pwm-cells = <2>;
+-
+- status = "disabled";
+- };
+-
+- pm8941_vib: vibrator@c000 {
+- compatible = "qcom,pm8916-vib";
+- reg = <0xc000>;
+- status = "disabled";
+- };
+-
+- pm8941_wled: wled@d800 {
+- compatible = "qcom,pm8941-wled";
+- reg = <0xd800>;
+- label = "backlight";
+-
+- status = "disabled";
+- };
+-
+- regulators {
+- compatible = "qcom,pm8941-regulators";
+- interrupts = <0x1 0x83 0x2 0>, <0x1 0x84 0x2 0>;
+- interrupt-names = "ocp-5vs1", "ocp-5vs2";
+- vin_5vs-supply = <&pm8941_5v>;
+-
+- pm8941_5v: s4 {
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- regulator-enable-ramp-delay = <500>;
+- };
+-
+- pm8941_5vs1: 5vs1 {
+- regulator-enable-ramp-delay = <1000>;
+- regulator-pull-down;
+- regulator-over-current-protection;
+- qcom,ocp-max-retries = <10>;
+- qcom,ocp-retry-delay = <30>;
+- qcom,vs-soft-start-strength = <0>;
+- regulator-initial-mode = <1>;
+- };
+-
+- pm8941_5vs2: 5vs2 {
+- regulator-enable-ramp-delay = <1000>;
+- regulator-pull-down;
+- regulator-over-current-protection;
+- qcom,ocp-max-retries = <10>;
+- qcom,ocp-retry-delay = <30>;
+- qcom,vs-soft-start-strength = <0>;
+- regulator-initial-mode = <1>;
+- };
+- };
+- };
+-};
+diff --git a/arch/arm/boot/dts/qcom/qcom-pma8084.dtsi b/arch/arm/boot/dts/qcom/qcom-pma8084.dtsi
+deleted file mode 100644
+index 2985f4805b93ee..00000000000000
+--- a/arch/arm/boot/dts/qcom/qcom-pma8084.dtsi
++++ /dev/null
+@@ -1,99 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-#include <dt-bindings/iio/qcom,spmi-vadc.h>
+-#include <dt-bindings/interrupt-controller/irq.h>
+-#include <dt-bindings/spmi/spmi.h>
+-
+-&spmi_bus {
+-
+- pma8084_0: pma8084@0 {
+- compatible = "qcom,pma8084", "qcom,spmi-pmic";
+- reg = <0x0 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- rtc@6000 {
+- compatible = "qcom,pm8941-rtc";
+- reg = <0x6000>,
+- <0x6100>;
+- reg-names = "rtc", "alarm";
+- interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
+- };
+-
+- pwrkey@800 {
+- compatible = "qcom,pm8941-pwrkey";
+- reg = <0x800>;
+- interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+- debounce = <15625>;
+- bias-pull-up;
+- };
+-
+- pma8084_gpios: gpio@c000 {
+- compatible = "qcom,pma8084-gpio", "qcom,spmi-gpio";
+- reg = <0xc000>;
+- gpio-controller;
+- gpio-ranges = <&pma8084_gpios 0 0 22>;
+- #gpio-cells = <2>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- pma8084_mpps: mpps@a000 {
+- compatible = "qcom,pma8084-mpp", "qcom,spmi-mpp";
+- reg = <0xa000>;
+- gpio-controller;
+- #gpio-cells = <2>;
+- gpio-ranges = <&pma8084_mpps 0 0 8>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+-
+- pma8084_temp: temp-alarm@2400 {
+- compatible = "qcom,spmi-temp-alarm";
+- reg = <0x2400>;
+- interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
+- #thermal-sensor-cells = <0>;
+- io-channels = <&pma8084_vadc VADC_DIE_TEMP>;
+- io-channel-names = "thermal";
+- };
+-
+- pma8084_vadc: adc@3100 {
+- compatible = "qcom,spmi-vadc";
+- reg = <0x3100>;
+- interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- #io-channel-cells = <1>;
+-
+- channel@8 {
+- reg = <VADC_DIE_TEMP>;
+- };
+-
+- channel@9 {
+- reg = <VADC_REF_625MV>;
+- };
+-
+- channel@a {
+- reg = <VADC_REF_1250MV>;
+- };
+-
+- channel@c {
+- reg = <VADC_SPARE1>;
+- };
+-
+- channel@e {
+- reg = <VADC_GND_REF>;
+- };
+-
+- channel@f {
+- reg = <VADC_VDD_VADC>;
+- };
+- };
+- };
+-
+- pma8084_1: pma8084@1 {
+- compatible = "qcom,pma8084", "qcom,spmi-pmic";
+- reg = <0x1 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- };
+-};
+diff --git a/arch/arm/boot/dts/qcom/qcom-pmx55.dtsi b/arch/arm/boot/dts/qcom/qcom-pmx55.dtsi
+deleted file mode 100644
+index da0851173c6997..00000000000000
+--- a/arch/arm/boot/dts/qcom/qcom-pmx55.dtsi
++++ /dev/null
+@@ -1,85 +0,0 @@
+-// SPDX-License-Identifier: BSD-3-Clause
+-
+-/*
+- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+- * Copyright (c) 2020, Linaro Limited
+- */
+-
+-#include <dt-bindings/iio/qcom,spmi-vadc.h>
+-#include <dt-bindings/interrupt-controller/irq.h>
+-#include <dt-bindings/spmi/spmi.h>
+-
+-&spmi_bus {
+- pmic@8 {
+- compatible = "qcom,pmx55", "qcom,spmi-pmic";
+- reg = <0x8 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- pon@800 {
+- compatible = "qcom,pm8916-pon";
+- reg = <0x0800>;
+-
+- status = "disabled";
+- };
+-
+- pmx55_temp: temp-alarm@2400 {
+- compatible = "qcom,spmi-temp-alarm";
+- reg = <0x2400>;
+- interrupts = <0x8 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+- io-channels = <&pmx55_adc ADC5_DIE_TEMP>;
+- io-channel-names = "thermal";
+- #thermal-sensor-cells = <0>;
+- };
+-
+- pmx55_adc: adc@3100 {
+- compatible = "qcom,spmi-adc5";
+- reg = <0x3100>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- #io-channel-cells = <1>;
+- interrupts = <0x8 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+-
+- channel@0 {
+- reg = <ADC5_REF_GND>;
+- qcom,pre-scaling = <1 1>;
+- label = "ref_gnd";
+- };
+-
+- channel@1 {
+- reg = <ADC5_1P25VREF>;
+- qcom,pre-scaling = <1 1>;
+- label = "vref_1p25";
+- };
+-
+- channel@6 {
+- reg = <ADC5_DIE_TEMP>;
+- qcom,pre-scaling = <1 1>;
+- label = "die_temp";
+- };
+-
+- channel@9 {
+- reg = <ADC5_CHG_TEMP>;
+- qcom,pre-scaling = <1 1>;
+- label = "chg_temp";
+- };
+- };
+-
+- pmx55_gpios: gpio@c000 {
+- compatible = "qcom,pmx55-gpio", "qcom,spmi-gpio";
+- reg = <0xc000>;
+- gpio-controller;
+- gpio-ranges = <&pmx55_gpios 0 0 11>;
+- #gpio-cells = <2>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+- };
+-
+- pmic@9 {
+- compatible = "qcom,pmx55", "qcom,spmi-pmic";
+- reg = <0x9 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- };
+-};
+diff --git a/arch/arm/boot/dts/qcom/qcom-pmx65.dtsi b/arch/arm/boot/dts/qcom/qcom-pmx65.dtsi
+deleted file mode 100644
+index 1c7fdf59c1f56a..00000000000000
+--- a/arch/arm/boot/dts/qcom/qcom-pmx65.dtsi
++++ /dev/null
+@@ -1,33 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+- */
+-
+-#include <dt-bindings/interrupt-controller/irq.h>
+-#include <dt-bindings/spmi/spmi.h>
+-
+-&spmi_bus {
+- pmic@1 {
+- compatible = "qcom,pmx65", "qcom,spmi-pmic";
+- reg = <1 SPMI_USID>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- pmx65_temp: temp-alarm@a00 {
+- compatible = "qcom,spmi-temp-alarm";
+- reg = <0xa00>;
+- interrupts = <0x1 0xa 0x0 IRQ_TYPE_EDGE_BOTH>;
+- #thermal-sensor-cells = <0>;
+- };
+-
+- pmx65_gpios: gpio@8800 {
+- compatible = "qcom,pmx65-gpio", "qcom,spmi-gpio";
+- reg = <0x8800>;
+- gpio-controller;
+- gpio-ranges = <&pmx65_gpios 0 0 16>;
+- #gpio-cells = <2>;
+- interrupt-controller;
+- #interrupt-cells = <2>;
+- };
+- };
+-};
+diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55-mtp.dts b/arch/arm/boot/dts/qcom/qcom-sdx55-mtp.dts
+index 7e97ad5803d87b..2470693619090b 100644
+--- a/arch/arm/boot/dts/qcom/qcom-sdx55-mtp.dts
++++ b/arch/arm/boot/dts/qcom/qcom-sdx55-mtp.dts
+@@ -9,7 +9,7 @@
+ #include "qcom-sdx55.dtsi"
+ #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+ #include <arm64/qcom/pm8150b.dtsi>
+-#include "qcom-pmx55.dtsi"
++#include "pmx55.dtsi"
+
+ / {
+ model = "Qualcomm Technologies, Inc. SDX55 MTP";
+diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55-t55.dts b/arch/arm/boot/dts/qcom/qcom-sdx55-t55.dts
+index 51058b06527979..082f7ed1a01fb8 100644
+--- a/arch/arm/boot/dts/qcom/qcom-sdx55-t55.dts
++++ b/arch/arm/boot/dts/qcom/qcom-sdx55-t55.dts
+@@ -8,7 +8,7 @@
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+ #include "qcom-sdx55.dtsi"
+-#include "qcom-pmx55.dtsi"
++#include "pmx55.dtsi"
+
+ / {
+ model = "Thundercomm T55 Development Kit";
+diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55-telit-fn980-tlb.dts b/arch/arm/boot/dts/qcom/qcom-sdx55-telit-fn980-tlb.dts
+index 8fadc6e70692a5..e336a15b45c4c6 100644
+--- a/arch/arm/boot/dts/qcom/qcom-sdx55-telit-fn980-tlb.dts
++++ b/arch/arm/boot/dts/qcom/qcom-sdx55-telit-fn980-tlb.dts
+@@ -8,7 +8,7 @@
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+ #include "qcom-sdx55.dtsi"
+-#include "qcom-pmx55.dtsi"
++#include "pmx55.dtsi"
+
+ / {
+ model = "Telit FN980 TLB";
+diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+index 55ce87b7525394..f9ad5abfbd28bf 100644
+--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+@@ -345,10 +345,10 @@ pcie_rc: pcie@1c00000 {
+ "msi8";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+- interrupt-map = <0 0 0 1 &intc 0 0 0 141 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+- <0 0 0 2 &intc 0 0 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+- <0 0 0 3 &intc 0 0 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+- <0 0 0 4 &intc 0 0 0 144 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
++ interrupt-map = <0 0 0 1 &intc 0 141 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
++ <0 0 0 2 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
++ <0 0 0 3 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
++ <0 0 0 4 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_PCIE_PIPE_CLK>,
+ <&gcc GCC_PCIE_AUX_CLK>,
+@@ -592,10 +592,10 @@ usb: usb@a6f8800 {
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 51 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 11 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 10 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -619,7 +619,7 @@ pdc: interrupt-controller@b210000 {
+ compatible = "qcom,sdx55-pdc", "qcom,pdc";
+ reg = <0x0b210000 0x30000>;
+ qcom,pdc-ranges = <0 179 52>;
+- #interrupt-cells = <3>;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+diff --git a/arch/arm/boot/dts/qcom/qcom-sdx65-mtp.dts b/arch/arm/boot/dts/qcom/qcom-sdx65-mtp.dts
+index fcf1c51c5e7a7a..b87c5434cc29e4 100644
+--- a/arch/arm/boot/dts/qcom/qcom-sdx65-mtp.dts
++++ b/arch/arm/boot/dts/qcom/qcom-sdx65-mtp.dts
+@@ -8,7 +8,7 @@
+ #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+ #include <arm64/qcom/pmk8350.dtsi>
+ #include <arm64/qcom/pm7250b.dtsi>
+-#include "qcom-pmx65.dtsi"
++#include "pmx65.dtsi"
+
+ / {
+ model = "Qualcomm Technologies, Inc. SDX65 MTP";
+diff --git a/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
+index 1a3583029a649e..271899c861c01c 100644
+--- a/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-sdx65.dtsi
+@@ -338,7 +338,7 @@ pcie_ep: pcie-ep@1c00000 {
+ power-domains = <&gcc PCIE_GDSC>;
+
+ phys = <&pcie_phy>;
+- phy-names = "pcie-phy";
++ phy-names = "pciephy";
+
+ max-link-speed = <3>;
+ num-lanes = <2>;
+@@ -530,7 +530,7 @@ restart@c264000 {
+ reg = <0x0c264000 0x1000>;
+ };
+
+- spmi_bus: qcom,spmi@c440000 {
++ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0xc440000 0xd00>,
+ <0xc600000 0x2000000>,
+diff --git a/arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts
+index e81a7213d30477..4282bafbb50431 100644
+--- a/arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts
++++ b/arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts
+@@ -209,6 +209,18 @@ &cmt1 {
+ status = "okay";
+ };
+
++&extal1_clk {
++ clock-frequency = <26000000>;
++};
++
++&extal2_clk {
++ clock-frequency = <48000000>;
++};
++
++&extalr_clk {
++ clock-frequency = <32768>;
++};
++
+ &pfc {
+ scifa0_pins: scifa0 {
+ groups = "scifa0_data";
+diff --git a/arch/arm/boot/dts/renesas/r8a73a4.dtsi b/arch/arm/boot/dts/renesas/r8a73a4.dtsi
+index c39066967053f0..d1f4cbd099efb4 100644
+--- a/arch/arm/boot/dts/renesas/r8a73a4.dtsi
++++ b/arch/arm/boot/dts/renesas/r8a73a4.dtsi
+@@ -450,17 +450,20 @@ clocks {
+ extalr_clk: extalr {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+- clock-frequency = <32768>;
++ /* This value must be overridden by the board. */
++ clock-frequency = <0>;
+ };
+ extal1_clk: extal1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+- clock-frequency = <25000000>;
++ /* This value must be overridden by the board. */
++ clock-frequency = <0>;
+ };
+ extal2_clk: extal2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+- clock-frequency = <48000000>;
++ /* This value must be overridden by the board. */
++ clock-frequency = <0>;
+ };
+ fsiack_clk: fsiack {
+ compatible = "fixed-clock";
+diff --git a/arch/arm/boot/dts/renesas/r8a7790-lager.dts b/arch/arm/boot/dts/renesas/r8a7790-lager.dts
+index 5ad5349a50dc9b..ab7e9fa90b9fe2 100644
+--- a/arch/arm/boot/dts/renesas/r8a7790-lager.dts
++++ b/arch/arm/boot/dts/renesas/r8a7790-lager.dts
+@@ -437,6 +437,7 @@ pmic@58 {
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+diff --git a/arch/arm/boot/dts/renesas/r8a7790-stout.dts b/arch/arm/boot/dts/renesas/r8a7790-stout.dts
+index fe14727eefe1ec..25956661a87541 100644
+--- a/arch/arm/boot/dts/renesas/r8a7790-stout.dts
++++ b/arch/arm/boot/dts/renesas/r8a7790-stout.dts
+@@ -332,6 +332,7 @@ pmic@58 {
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ onkey {
+ compatible = "dlg,da9063-onkey";
+diff --git a/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts b/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
+index 26a40782cc899b..4a76be68887b43 100644
+--- a/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
++++ b/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
+@@ -800,6 +800,7 @@ pmic@58 {
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+diff --git a/arch/arm/boot/dts/renesas/r8a7791-porter.dts b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
+index ec0a20d5130d6f..fcc9a2313e1dfd 100644
+--- a/arch/arm/boot/dts/renesas/r8a7791-porter.dts
++++ b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
+@@ -389,6 +389,7 @@ pmic@5a {
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ watchdog {
+ compatible = "dlg,da9063-watchdog";
+diff --git a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+index c66de9dd12dfca..20963c9bbf0ada 100644
+--- a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
++++ b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+@@ -239,7 +239,7 @@ du1_pins: du1 {
+ };
+
+ keyboard_pins: keyboard {
+- pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_02";
++ pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_2";
+ bias-pull-up;
+ };
+
+@@ -330,6 +330,7 @@ pmic@58 {
+ interrupt-parent = <&irqc>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+diff --git a/arch/arm/boot/dts/renesas/r8a7793-gose.dts b/arch/arm/boot/dts/renesas/r8a7793-gose.dts
+index 79b537b2464266..9358fc7d0e9f6f 100644
+--- a/arch/arm/boot/dts/renesas/r8a7793-gose.dts
++++ b/arch/arm/boot/dts/renesas/r8a7793-gose.dts
+@@ -735,6 +735,7 @@ pmic@58 {
+ interrupt-parent = <&irqc0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+diff --git a/arch/arm/boot/dts/renesas/r8a7794-alt.dts b/arch/arm/boot/dts/renesas/r8a7794-alt.dts
+index 4d93319674c6ef..3a9db455ddec94 100644
+--- a/arch/arm/boot/dts/renesas/r8a7794-alt.dts
++++ b/arch/arm/boot/dts/renesas/r8a7794-alt.dts
+@@ -458,6 +458,7 @@ pmic@58 {
+ interrupt-parent = <&gpio3>;
+ interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+diff --git a/arch/arm/boot/dts/renesas/r8a7794-silk.dts b/arch/arm/boot/dts/renesas/r8a7794-silk.dts
+index b7af1befa126ba..b825f2e25dd060 100644
+--- a/arch/arm/boot/dts/renesas/r8a7794-silk.dts
++++ b/arch/arm/boot/dts/renesas/r8a7794-silk.dts
+@@ -424,6 +424,7 @@ pmic@58 {
+ interrupt-parent = <&gpio3>;
+ interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ onkey {
+ compatible = "dlg,da9063-onkey";
+diff --git a/arch/arm/boot/dts/rockchip/rk3036.dtsi b/arch/arm/boot/dts/rockchip/rk3036.dtsi
+index 78686fc72ce69a..c420c7c642cb0b 100644
+--- a/arch/arm/boot/dts/rockchip/rk3036.dtsi
++++ b/arch/arm/boot/dts/rockchip/rk3036.dtsi
+@@ -402,12 +402,20 @@ hdmi: hdmi@20034000 {
+ pinctrl-0 = <&hdmi_ctl>;
+ status = "disabled";
+
+- hdmi_in: port {
++ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- hdmi_in_vop: endpoint@0 {
++
++ hdmi_in: port@0 {
+ reg = <0>;
+- remote-endpoint = <&vop_out_hdmi>;
++
++ hdmi_in_vop: endpoint {
++ remote-endpoint = <&vop_out_hdmi>;
++ };
++ };
++
++ hdmi_out: port@1 {
++ reg = <1>;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/rockchip/rk3066a.dtsi b/arch/arm/boot/dts/rockchip/rk3066a.dtsi
+index de9915d946f74f..b98d5e357baf35 100644
+--- a/arch/arm/boot/dts/rockchip/rk3066a.dtsi
++++ b/arch/arm/boot/dts/rockchip/rk3066a.dtsi
+@@ -123,6 +123,7 @@ hdmi: hdmi@10116000 {
+ pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
+ power-domains = <&power RK3066_PD_VIO>;
+ rockchip,grf = <&grf>;
++ #sound-dai-cells = <0>;
+ status = "disabled";
+
+ ports {
+diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi
+index 88a4b0d6d928d4..80d81af5fe0efe 100644
+--- a/arch/arm/boot/dts/rockchip/rk3128.dtsi
++++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi
+@@ -795,7 +795,7 @@ sdmmc_wp: sdmmc-wp {
+ };
+
+ sdmmc_pwren: sdmmc-pwren {
+- rockchip,pins = <1 RK_PB6 1 &pcfg_pull_default>;
++ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_default>;
+ };
+
+ sdmmc_bus4: sdmmc-bus4 {
+diff --git a/arch/arm/boot/dts/rockchip/rk322x.dtsi b/arch/arm/boot/dts/rockchip/rk322x.dtsi
+index ffc16d6b97e1bd..03d9baddcbabaa 100644
+--- a/arch/arm/boot/dts/rockchip/rk322x.dtsi
++++ b/arch/arm/boot/dts/rockchip/rk322x.dtsi
+@@ -732,14 +732,20 @@ hdmi: hdmi@200a0000 {
+ status = "disabled";
+
+ ports {
+- hdmi_in: port {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- hdmi_in_vop: endpoint@0 {
+- reg = <0>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ hdmi_in: port@0 {
++ reg = <0>;
++
++ hdmi_in_vop: endpoint {
+ remote-endpoint = <&vop_out_hdmi>;
+ };
+ };
++
++ hdmi_out: port@1 {
++ reg = <1>;
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/rockchip/rk3288.dtsi b/arch/arm/boot/dts/rockchip/rk3288.dtsi
+index cb9cdaddffd429..8593a835993767 100644
+--- a/arch/arm/boot/dts/rockchip/rk3288.dtsi
++++ b/arch/arm/boot/dts/rockchip/rk3288.dtsi
+@@ -1231,27 +1231,37 @@ hdmi: hdmi@ff980000 {
+ compatible = "rockchip,rk3288-dw-hdmi";
+ reg = <0x0 0xff980000 0x0 0x20000>;
+ reg-io-width = <4>;
+- #sound-dai-cells = <0>;
+- rockchip,grf = <&grf>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>;
+ clock-names = "iahb", "isfr", "cec";
+ power-domains = <&power RK3288_PD_VIO>;
++ rockchip,grf = <&grf>;
++ #sound-dai-cells = <0>;
+ status = "disabled";
+
+ ports {
+- hdmi_in: port {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ hdmi_in: port@0 {
++ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
++
+ hdmi_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_hdmi>;
+ };
++
+ hdmi_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_hdmi>;
+ };
+ };
++
++ hdmi_out: port@1 {
++ reg = <1>;
++ };
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/rockchip/rv1108.dtsi b/arch/arm/boot/dts/rockchip/rv1108.dtsi
+index abf3006f0a8424..f3291f3bbc6fd2 100644
+--- a/arch/arm/boot/dts/rockchip/rv1108.dtsi
++++ b/arch/arm/boot/dts/rockchip/rv1108.dtsi
+@@ -196,7 +196,6 @@ spi: spi@10270000 {
+ pwm4: pwm@10280000 {
+ compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
+ reg = <0x10280000 0x10>;
+- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+@@ -208,7 +207,6 @@ pwm4: pwm@10280000 {
+ pwm5: pwm@10280010 {
+ compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
+ reg = <0x10280010 0x10>;
+- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+@@ -220,7 +218,6 @@ pwm5: pwm@10280010 {
+ pwm6: pwm@10280020 {
+ compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
+ reg = <0x10280020 0x10>;
+- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+@@ -232,7 +229,6 @@ pwm6: pwm@10280020 {
+ pwm7: pwm@10280030 {
+ compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
+ reg = <0x10280030 0x10>;
+- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+@@ -386,7 +382,6 @@ i2c0: i2c@20000000 {
+ pwm0: pwm@20040000 {
+ compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
+ reg = <0x20040000 0x10>;
+- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+@@ -398,7 +393,6 @@ pwm0: pwm@20040000 {
+ pwm1: pwm@20040010 {
+ compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
+ reg = <0x20040010 0x10>;
+- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+@@ -410,7 +404,6 @@ pwm1: pwm@20040010 {
+ pwm2: pwm@20040020 {
+ compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
+ reg = <0x20040020 0x10>;
+- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+@@ -422,7 +415,6 @@ pwm2: pwm@20040020 {
+ pwm3: pwm@20040030 {
+ compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
+ reg = <0x20040030 0x10>;
+- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
+ clock-names = "pwm", "pclk";
+ pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/samsung/exynos4.dtsi b/arch/arm/boot/dts/samsung/exynos4.dtsi
+index f775b9377a38b5..7f981b5c0d64b5 100644
+--- a/arch/arm/boot/dts/samsung/exynos4.dtsi
++++ b/arch/arm/boot/dts/samsung/exynos4.dtsi
+@@ -203,16 +203,16 @@ dsi_0: dsi@11c80000 {
+
+ camera: camera@11800000 {
+ compatible = "samsung,fimc";
++ ranges = <0x0 0x11800000 0xa0000>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #clock-cells = <1>;
+ clock-output-names = "cam_a_clkout", "cam_b_clkout";
+- ranges;
+
+- fimc_0: fimc@11800000 {
++ fimc_0: fimc@0 {
+ compatible = "samsung,exynos4210-fimc";
+- reg = <0x11800000 0x1000>;
++ reg = <0x0 0x1000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_FIMC0>,
+ <&clock CLK_SCLK_FIMC0>;
+@@ -223,9 +223,9 @@ fimc_0: fimc@11800000 {
+ status = "disabled";
+ };
+
+- fimc_1: fimc@11810000 {
++ fimc_1: fimc@10000 {
+ compatible = "samsung,exynos4210-fimc";
+- reg = <0x11810000 0x1000>;
++ reg = <0x00010000 0x1000>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_FIMC1>,
+ <&clock CLK_SCLK_FIMC1>;
+@@ -236,9 +236,9 @@ fimc_1: fimc@11810000 {
+ status = "disabled";
+ };
+
+- fimc_2: fimc@11820000 {
++ fimc_2: fimc@20000 {
+ compatible = "samsung,exynos4210-fimc";
+- reg = <0x11820000 0x1000>;
++ reg = <0x00020000 0x1000>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_FIMC2>,
+ <&clock CLK_SCLK_FIMC2>;
+@@ -249,9 +249,9 @@ fimc_2: fimc@11820000 {
+ status = "disabled";
+ };
+
+- fimc_3: fimc@11830000 {
++ fimc_3: fimc@30000 {
+ compatible = "samsung,exynos4210-fimc";
+- reg = <0x11830000 0x1000>;
++ reg = <0x00030000 0x1000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_FIMC3>,
+ <&clock CLK_SCLK_FIMC3>;
+@@ -262,9 +262,9 @@ fimc_3: fimc@11830000 {
+ status = "disabled";
+ };
+
+- csis_0: csis@11880000 {
++ csis_0: csis@80000 {
+ compatible = "samsung,exynos4210-csis";
+- reg = <0x11880000 0x4000>;
++ reg = <0x00080000 0x4000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_CSIS0>,
+ <&clock CLK_SCLK_CSIS0>;
+@@ -278,9 +278,9 @@ csis_0: csis@11880000 {
+ #size-cells = <0>;
+ };
+
+- csis_1: csis@11890000 {
++ csis_1: csis@90000 {
+ compatible = "samsung,exynos4210-csis";
+- reg = <0x11890000 0x4000>;
++ reg = <0x00090000 0x4000>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_CSIS1>,
+ <&clock CLK_SCLK_CSIS1>;
+diff --git a/arch/arm/boot/dts/samsung/exynos4210-i9100.dts b/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
+index a9ec1f6c1dea15..a076a1dfe41f8f 100644
+--- a/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
+@@ -527,6 +527,14 @@ vtcam_reg: LDO12 {
+ regulator-name = "VT_CAM_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
++
++ /*
++ * Force-enable this regulator; otherwise the
++ * kernel hangs very early in the boot process
++ * for about 12 seconds, without apparent
++ * reason.
++ */
++ regulator-always-on;
+ };
+
+ vcclcd_reg: LDO13 {
+diff --git a/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts b/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
+index b566f878ed84f9..18f4f494093ba8 100644
+--- a/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
++++ b/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
+@@ -88,7 +88,7 @@ eeprom@52 {
+ &keypad {
+ samsung,keypad-num-rows = <2>;
+ samsung,keypad-num-columns = <8>;
+- linux,keypad-no-autorepeat;
++ linux,input-no-autorepeat;
+ wakeup-source;
+ pinctrl-names = "default";
+ pinctrl-0 = <&keypad_rows &keypad_cols>;
+diff --git a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
+index ce81e42bf5eb3d..39469b708f910b 100644
+--- a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
++++ b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
+@@ -435,6 +435,7 @@ &exynos_usbphy {
+ };
+
+ &fimd {
++ samsung,invert-vclk;
+ status = "okay";
+ };
+
+diff --git a/arch/arm/boot/dts/samsung/exynos4412-origen.dts b/arch/arm/boot/dts/samsung/exynos4412-origen.dts
+index 23b151645d6686..10ab7bc90f502f 100644
+--- a/arch/arm/boot/dts/samsung/exynos4412-origen.dts
++++ b/arch/arm/boot/dts/samsung/exynos4412-origen.dts
+@@ -453,7 +453,7 @@ buck9_reg: BUCK9 {
+ &keypad {
+ samsung,keypad-num-rows = <3>;
+ samsung,keypad-num-columns = <2>;
+- linux,keypad-no-autorepeat;
++ linux,input-no-autorepeat;
+ wakeup-source;
+ pinctrl-0 = <&keypad_rows &keypad_cols>;
+ pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts b/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
+index 715dfcba141743..e16df9e75fcb0d 100644
+--- a/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
++++ b/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
+@@ -69,7 +69,7 @@ cooling_map1: map1 {
+ &keypad {
+ samsung,keypad-num-rows = <3>;
+ samsung,keypad-num-columns = <8>;
+- linux,keypad-no-autorepeat;
++ linux,input-no-autorepeat;
+ wakeup-source;
+ pinctrl-0 = <&keypad_rows &keypad_cols>;
+ pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/samsung/exynos4x12.dtsi b/arch/arm/boot/dts/samsung/exynos4x12.dtsi
+index 84c1db221c984b..83d9d0a0a61754 100644
+--- a/arch/arm/boot/dts/samsung/exynos4x12.dtsi
++++ b/arch/arm/boot/dts/samsung/exynos4x12.dtsi
+@@ -451,14 +451,15 @@ &combiner {
+ };
+
+ &camera {
++ ranges = <0x0 0x11800000 0xba1000>;
+ clocks = <&clock CLK_SCLK_CAM0>, <&clock CLK_SCLK_CAM1>,
+ <&clock CLK_PIXELASYNCM0>, <&clock CLK_PIXELASYNCM1>;
+ clock-names = "sclk_cam0", "sclk_cam1", "pxl_async0", "pxl_async1";
+
+ /* fimc_[0-3] are configured outside, under phandles */
+- fimc_lite_0: fimc-lite@12390000 {
++ fimc_lite_0: fimc-lite@b90000 {
+ compatible = "samsung,exynos4212-fimc-lite";
+- reg = <0x12390000 0x1000>;
++ reg = <0x00b90000 0x1000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&pd_isp>;
+ clocks = <&isp_clock CLK_ISP_FIMC_LITE0>;
+@@ -467,9 +468,9 @@ fimc_lite_0: fimc-lite@12390000 {
+ status = "disabled";
+ };
+
+- fimc_lite_1: fimc-lite@123a0000 {
++ fimc_lite_1: fimc-lite@ba0000 {
+ compatible = "samsung,exynos4212-fimc-lite";
+- reg = <0x123a0000 0x1000>;
++ reg = <0x00ba0000 0x1000>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&pd_isp>;
+ clocks = <&isp_clock CLK_ISP_FIMC_LITE1>;
+@@ -478,9 +479,9 @@ fimc_lite_1: fimc-lite@123a0000 {
+ status = "disabled";
+ };
+
+- fimc_is: fimc-is@12000000 {
++ fimc_is: fimc-is@800000 {
+ compatible = "samsung,exynos4212-fimc-is";
+- reg = <0x12000000 0x260000>;
++ reg = <0x00800000 0x260000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&pd_isp>;
+@@ -525,9 +526,9 @@ pmu@10020000 {
+ reg = <0x10020000 0x3000>;
+ };
+
+- i2c1_isp: i2c-isp@12140000 {
++ i2c1_isp: i2c-isp@940000 {
+ compatible = "samsung,exynos4212-i2c-isp";
+- reg = <0x12140000 0x100>;
++ reg = <0x00940000 0x100>;
+ clocks = <&isp_clock CLK_ISP_I2C1_ISP>;
+ clock-names = "i2c_isp";
+ #address-cells = <1>;
+diff --git a/arch/arm/boot/dts/samsung/s5pv210.dtsi b/arch/arm/boot/dts/samsung/s5pv210.dtsi
+index f7de5b5f2f3837..ed560c9a3aa1ef 100644
+--- a/arch/arm/boot/dts/samsung/s5pv210.dtsi
++++ b/arch/arm/boot/dts/samsung/s5pv210.dtsi
+@@ -549,17 +549,17 @@ i2c1: i2c@fab00000 {
+
+ camera: camera@fa600000 {
+ compatible = "samsung,fimc";
++ ranges = <0x0 0xfa600000 0xe01000>;
+ clocks = <&clocks SCLK_CAM0>, <&clocks SCLK_CAM1>;
+ clock-names = "sclk_cam0", "sclk_cam1";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #clock-cells = <1>;
+ clock-output-names = "cam_a_clkout", "cam_b_clkout";
+- ranges;
+
+- csis0: csis@fa600000 {
++ csis0: csis@0 {
+ compatible = "samsung,s5pv210-csis";
+- reg = <0xfa600000 0x4000>;
++ reg = <0x00000000 0x4000>;
+ interrupt-parent = <&vic2>;
+ interrupts = <29>;
+ clocks = <&clocks CLK_CSIS>,
+@@ -572,9 +572,9 @@ csis0: csis@fa600000 {
+ #size-cells = <0>;
+ };
+
+- fimc0: fimc@fb200000 {
++ fimc0: fimc@c00000 {
+ compatible = "samsung,s5pv210-fimc";
+- reg = <0xfb200000 0x1000>;
++ reg = <0x00c00000 0x1000>;
+ interrupts = <5>;
+ interrupt-parent = <&vic2>;
+ clocks = <&clocks CLK_FIMC0>,
+@@ -586,9 +586,9 @@ fimc0: fimc@fb200000 {
+ samsung,cam-if;
+ };
+
+- fimc1: fimc@fb300000 {
++ fimc1: fimc@d00000 {
+ compatible = "samsung,s5pv210-fimc";
+- reg = <0xfb300000 0x1000>;
++ reg = <0x00d00000 0x1000>;
+ interrupt-parent = <&vic2>;
+ interrupts = <6>;
+ clocks = <&clocks CLK_FIMC1>,
+@@ -602,9 +602,9 @@ fimc1: fimc@fb300000 {
+ samsung,lcd-wb;
+ };
+
+- fimc2: fimc@fb400000 {
++ fimc2: fimc@e00000 {
+ compatible = "samsung,s5pv210-fimc";
+- reg = <0xfb400000 0x1000>;
++ reg = <0x00e00000 0x1000>;
+ interrupt-parent = <&vic2>;
+ interrupts = <7>;
+ clocks = <&clocks CLK_FIMC2>,
+diff --git a/arch/arm/boot/dts/st/stm32429i-eval.dts b/arch/arm/boot/dts/st/stm32429i-eval.dts
+index 576235ec3c516e..afa417b34b25ff 100644
+--- a/arch/arm/boot/dts/st/stm32429i-eval.dts
++++ b/arch/arm/boot/dts/st/stm32429i-eval.dts
+@@ -222,7 +222,6 @@ stmpe1600: stmpe1600@42 {
+ reg = <0x42>;
+ interrupts = <8 3>;
+ interrupt-parent = <&gpioi>;
+- interrupt-controller;
+ wakeup-source;
+
+ stmpegpio: stmpe_gpio {
+diff --git a/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
+index 65480a9f5cc4e1..842f2b17c4a81c 100644
+--- a/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
++++ b/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
+@@ -376,7 +376,6 @@ pins2 {
+ };
+ };
+
+-
+ ltdc_pins_a: ltdc-0 {
+ pins {
+ pinmux = <STM32_PINMUX('E', 4, AF14)>, /* LCD_B0 */
+diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi
+index 61508917521c36..aec7fa5ab5d8c6 100644
+--- a/arch/arm/boot/dts/st/stm32mp151.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp151.dtsi
+@@ -50,6 +50,7 @@ timer {
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-parent = <&intc>;
++ arm,no-tick-in-suspend;
+ };
+
+ clocks {
+diff --git a/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
+index afcd6285890cc0..c27963898b5e6c 100644
+--- a/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
++++ b/arch/arm/boot/dts/st/stm32mp157a-dk1-scmi.dts
+@@ -11,7 +11,7 @@
+
+ / {
+ model = "STMicroelectronics STM32MP157A-DK1 SCMI Discovery Board";
+- compatible = "st,stm32mp157a-dk1-scmi", "st,stm32mp157a-dk1", "st,stm32mp157";
++ compatible = "st,stm32mp157a-dk1-scmi", "st,stm32mp157";
+
+ reserved-memory {
+ optee@de000000 {
+diff --git a/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
+index 39358d90200031..62261894313407 100644
+--- a/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
++++ b/arch/arm/boot/dts/st/stm32mp157c-dk2-scmi.dts
+@@ -11,7 +11,7 @@
+
+ / {
+ model = "STMicroelectronics STM32MP157C-DK2 SCMI Discovery Board";
+- compatible = "st,stm32mp157c-dk2-scmi", "st,stm32mp157c-dk2", "st,stm32mp157";
++ compatible = "st,stm32mp157c-dk2-scmi", "st,stm32mp157";
+
+ reserved-memory {
+ optee@de000000 {
+diff --git a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
+index 510cca5acb79ca..7a701f7ef0c704 100644
+--- a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
++++ b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
+@@ -64,7 +64,6 @@ touchscreen@38 {
+ reg = <0x38>;
+ interrupts = <2 2>;
+ interrupt-parent = <&gpiof>;
+- interrupt-controller;
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <800>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
+index 07ea765a4553a5..c7c4d7e89d6123 100644
+--- a/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
++++ b/arch/arm/boot/dts/st/stm32mp157c-ed1-scmi.dts
+@@ -11,7 +11,7 @@
+
+ / {
+ model = "STMicroelectronics STM32MP157C-ED1 SCMI eval daughter";
+- compatible = "st,stm32mp157c-ed1-scmi", "st,stm32mp157c-ed1", "st,stm32mp157";
++ compatible = "st,stm32mp157c-ed1-scmi", "st,stm32mp157";
+
+ reserved-memory {
+ optee@fe000000 {
+diff --git a/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts b/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
+index 813086ec248959..2ab77e64f1bbb9 100644
+--- a/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
++++ b/arch/arm/boot/dts/st/stm32mp157c-ev1-scmi.dts
+@@ -11,8 +11,7 @@
+
+ / {
+ model = "STMicroelectronics STM32MP157C-EV1 SCMI eval daughter on eval mother";
+- compatible = "st,stm32mp157c-ev1-scmi", "st,stm32mp157c-ev1", "st,stm32mp157c-ed1",
+- "st,stm32mp157";
++ compatible = "st,stm32mp157c-ev1-scmi", "st,stm32mp157c-ed1", "st,stm32mp157";
+
+ reserved-memory {
+ optee@fe000000 {
+diff --git a/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi b/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
+index b8730aa52ce6fe..a59331aa58e55e 100644
+--- a/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
++++ b/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
+@@ -217,7 +217,7 @@ &spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_pins>;
+
+- tpm_spi_tis@0 {
++ tpm@0 {
+ compatible = "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <500000>;
+diff --git a/arch/arm/boot/dts/ti/omap/am33xx.dtsi b/arch/arm/boot/dts/ti/omap/am33xx.dtsi
+index 1a2cd5baf40210..5b9e01a8aa5d5a 100644
+--- a/arch/arm/boot/dts/ti/omap/am33xx.dtsi
++++ b/arch/arm/boot/dts/ti/omap/am33xx.dtsi
+@@ -359,6 +359,7 @@ usb: target-module@47400000 {
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
++ ti,sysc-delay-us = <2>;
+ clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+diff --git a/arch/arm/boot/dts/ti/omap/am3517-evm.dts b/arch/arm/boot/dts/ti/omap/am3517-evm.dts
+index af9df15274bed1..866f68c5b504dc 100644
+--- a/arch/arm/boot/dts/ti/omap/am3517-evm.dts
++++ b/arch/arm/boot/dts/ti/omap/am3517-evm.dts
+@@ -271,13 +271,6 @@ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda */
+ >;
+ };
+
+- leds_pins: leds-pins {
+- pinctrl-single,pins = <
+- OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */
+- OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */
+- >;
+- };
+-
+ mmc1_pins: mmc1-pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
+@@ -355,3 +348,12 @@ OMAP3430_CORE2_IOPAD(0x25e2, PIN_INPUT | MUX_MODE3) /* etk_d3.hsusb1_data7 */
+ >;
+ };
+ };
++
++&omap3_pmx_wkup {
++ leds_pins: leds-pins {
++ pinctrl-single,pins = <
++ OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */
++ OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */
++ >;
++ };
++};
+diff --git a/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts b/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
+index 9a234dc1431d12..5b240769d300e5 100644
+--- a/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
++++ b/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
+@@ -415,7 +415,6 @@ stmpe811@41 {
+ reg = <0x41>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio2>;
+- interrupt-controller;
+ id = <0>;
+ blocks = <0x5>;
+ irq-trigger = <0x1>;
+diff --git a/arch/arm/boot/dts/ti/omap/dra7.dtsi b/arch/arm/boot/dts/ti/omap/dra7.dtsi
+index 3f3e52e3b37526..6509c742fb58c9 100644
+--- a/arch/arm/boot/dts/ti/omap/dra7.dtsi
++++ b/arch/arm/boot/dts/ti/omap/dra7.dtsi
+@@ -147,7 +147,7 @@ ocp: ocp {
+
+ l3-noc@44000000 {
+ compatible = "ti,dra7-l3-noc";
+- reg = <0x44000000 0x1000>,
++ reg = <0x44000000 0x1000000>,
+ <0x45000000 0x1000>;
+ interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm/boot/dts/ti/omap/omap3-n900.dts b/arch/arm/boot/dts/ti/omap/omap3-n900.dts
+index d3348534125173..036e472b77bebb 100644
+--- a/arch/arm/boot/dts/ti/omap/omap3-n900.dts
++++ b/arch/arm/boot/dts/ti/omap/omap3-n900.dts
+@@ -781,7 +781,7 @@ accelerometer@1d {
+
+ mount-matrix = "-1", "0", "0",
+ "0", "1", "0",
+- "0", "0", "1";
++ "0", "0", "-1";
+ };
+
+ cam1: camera@3e {
+diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
+index 0a90583f9f017e..8f9dbe8d90291e 100644
+--- a/arch/arm/configs/imx_v6_v7_defconfig
++++ b/arch/arm/configs/imx_v6_v7_defconfig
+@@ -297,6 +297,7 @@ CONFIG_FB_MODE_HELPERS=y
+ CONFIG_LCD_CLASS_DEVICE=y
+ CONFIG_LCD_L4F00242T03=y
+ CONFIG_LCD_PLATFORM=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=y
+ CONFIG_BACKLIGHT_PWM=y
+ CONFIG_BACKLIGHT_GPIO=y
+ CONFIG_FRAMEBUFFER_CONSOLE=y
+diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
+index bddc82f7894211..a83d29fed17563 100644
+--- a/arch/arm/configs/sunxi_defconfig
++++ b/arch/arm/configs/sunxi_defconfig
+@@ -110,6 +110,7 @@ CONFIG_DRM_PANEL_LVDS=y
+ CONFIG_DRM_PANEL_SIMPLE=y
+ CONFIG_DRM_PANEL_EDP=y
+ CONFIG_DRM_SIMPLE_BRIDGE=y
++CONFIG_DRM_DW_HDMI=y
+ CONFIG_DRM_LIMA=y
+ CONFIG_FB_SIMPLE=y
+ CONFIG_BACKLIGHT_CLASS_DEVICE=y
+diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
+index b668c97663ec0c..f5b66f4cf45d96 100644
+--- a/arch/arm/crypto/aes-ce-glue.c
++++ b/arch/arm/crypto/aes-ce-glue.c
+@@ -711,7 +711,7 @@ static int __init aes_init(void)
+ algname = aes_algs[i].base.cra_name + 2;
+ drvname = aes_algs[i].base.cra_driver_name + 2;
+ basename = aes_algs[i].base.cra_driver_name;
+- simd = simd_skcipher_create_compat(algname, drvname, basename);
++ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto unregister_simds;
+diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
+index f00f042ef3570e..0ca94b90bc4ec5 100644
+--- a/arch/arm/crypto/aes-neonbs-glue.c
++++ b/arch/arm/crypto/aes-neonbs-glue.c
+@@ -539,7 +539,7 @@ static int __init aes_init(void)
+ algname = aes_algs[i].base.cra_name + 2;
+ drvname = aes_algs[i].base.cra_driver_name + 2;
+ basename = aes_algs[i].base.cra_driver_name;
+- simd = simd_skcipher_create_compat(algname, drvname, basename);
++ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto unregister_simds;
+diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
+index 433ee4ddce6c81..f85933fdec75fd 100644
+--- a/arch/arm/crypto/sha256_glue.c
++++ b/arch/arm/crypto/sha256_glue.c
+@@ -24,8 +24,8 @@
+
+ #include "sha256_glue.h"
+
+-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+- unsigned int num_blks);
++asmlinkage void sha256_block_data_order(struct sha256_state *state,
++ const u8 *data, int num_blks);
+
+ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -33,23 +33,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ /* make sure casting to sha256_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
+
+- return sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order);
++ return sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_update);
+
+ static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out)
+ {
+- sha256_base_do_finalize(desc,
+- (sha256_block_fn *)sha256_block_data_order);
++ sha256_base_do_finalize(desc, sha256_block_data_order);
+ return sha256_base_finish(desc, out);
+ }
+
+ int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+- sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order);
++ sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ return crypto_sha256_arm_final(desc, out);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_finup);
+diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
+index 0635a65aa488ba..1be5bd498af36f 100644
+--- a/arch/arm/crypto/sha512-glue.c
++++ b/arch/arm/crypto/sha512-glue.c
+@@ -25,27 +25,25 @@ MODULE_ALIAS_CRYPTO("sha512");
+ MODULE_ALIAS_CRYPTO("sha384-arm");
+ MODULE_ALIAS_CRYPTO("sha512-arm");
+
+-asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
++asmlinkage void sha512_block_data_order(struct sha512_state *state,
++ u8 const *src, int blocks);
+
+ int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+ {
+- return sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order);
++ return sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ }
+
+ static int sha512_arm_final(struct shash_desc *desc, u8 *out)
+ {
+- sha512_base_do_finalize(desc,
+- (sha512_block_fn *)sha512_block_data_order);
++ sha512_base_do_finalize(desc, sha512_block_data_order);
+ return sha512_base_finish(desc, out);
+ }
+
+ int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+- sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order);
++ sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ return sha512_arm_final(desc, out);
+ }
+
+diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
+index 72529f5e2bed95..a41b503b7dcde0 100644
+--- a/arch/arm/include/asm/arm_pmuv3.h
++++ b/arch/arm/include/asm/arm_pmuv3.h
+@@ -23,6 +23,8 @@
+ #define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
+ #define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
+ #define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
++#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
++#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
+ #define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
+ #define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
+
+@@ -150,21 +152,6 @@ static inline u64 read_pmccntr(void)
+ return read_sysreg(PMCCNTR);
+ }
+
+-static inline void write_pmxevcntr(u32 val)
+-{
+- write_sysreg(val, PMXEVCNTR);
+-}
+-
+-static inline u32 read_pmxevcntr(void)
+-{
+- return read_sysreg(PMXEVCNTR);
+-}
+-
+-static inline void write_pmxevtyper(u32 val)
+-{
+- write_sysreg(val, PMXEVTYPER);
+-}
+-
+ static inline void write_pmcntenset(u32 val)
+ {
+ write_sysreg(val, PMCNTENSET);
+@@ -205,16 +192,6 @@ static inline void write_pmuserenr(u32 val)
+ write_sysreg(val, PMUSERENR);
+ }
+
+-static inline u32 read_pmceid0(void)
+-{
+- return read_sysreg(PMCEID0);
+-}
+-
+-static inline u32 read_pmceid1(void)
+-{
+- return read_sysreg(PMCEID1);
+-}
+-
+ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+ static inline void kvm_clr_pmu_events(u32 clr) {}
+ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+@@ -231,6 +208,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+ /* PMU Version in DFR Register */
+ #define ARMV8_PMU_DFR_VER_NI 0
++#define ARMV8_PMU_DFR_VER_V3P1 0x4
+ #define ARMV8_PMU_DFR_VER_V3P4 0x5
+ #define ARMV8_PMU_DFR_VER_V3P5 0x6
+ #define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
+@@ -251,4 +229,24 @@ static inline bool is_pmuv3p5(int pmuver)
+ return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
+ }
+
++static inline u64 read_pmceid0(void)
++{
++ u64 val = read_sysreg(PMCEID0);
++
++ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
++ val |= (u64)read_sysreg(PMCEID2) << 32;
++
++ return val;
++}
++
++static inline u64 read_pmceid1(void)
++{
++ u64 val = read_sysreg(PMCEID1);
++
++ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
++ val |= (u64)read_sysreg(PMCEID3) << 32;
++
++ return val;
++}
++
+ #endif
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index f6181f69577fe5..1075534b0a2eeb 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -340,6 +340,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+ dsb(ishst);
+ }
+
++#define flush_cache_vmap_early(start, end) do { } while (0)
++
+ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+ {
+ if (!cache_is_vipt_nonaliasing())
+diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
+index c6aded1b069cf0..e2a1916013e75e 100644
+--- a/arch/arm/include/asm/dma.h
++++ b/arch/arm/include/asm/dma.h
+@@ -12,6 +12,9 @@
+ extern phys_addr_t arm_dma_zone_size; \
+ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
+ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
++
++extern phys_addr_t arm_dma_limit;
++#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit
+ #endif
+
+ #ifdef CONFIG_ISA_DMA_API
+diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
+index 58e039a851af03..3c82975d46db35 100644
+--- a/arch/arm/include/asm/exception.h
++++ b/arch/arm/include/asm/exception.h
+@@ -10,10 +10,6 @@
+
+ #include <linux/interrupt.h>
+
+-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ #define __exception_irq_entry __irq_entry
+-#else
+-#define __exception_irq_entry
+-#endif
+
+ #endif /* __ASM_ARM_EXCEPTION_H */
+diff --git a/arch/arm/include/asm/irq_work.h b/arch/arm/include/asm/irq_work.h
+index 3149e4dc1b5405..8895999834cc0b 100644
+--- a/arch/arm/include/asm/irq_work.h
++++ b/arch/arm/include/asm/irq_work.h
+@@ -9,6 +9,4 @@ static inline bool arch_irq_work_has_interrupt(void)
+ return is_smp();
+ }
+
+-extern void arch_irq_work_raise(void);
+-
+ #endif /* _ASM_ARM_IRQ_WORK_H */
+diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
+index e12d7d096fc034..e4eb54f6cd9fef 100644
+--- a/arch/arm/include/asm/jump_label.h
++++ b/arch/arm/include/asm/jump_label.h
+@@ -11,7 +11,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ WASM(nop) "\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+@@ -25,7 +25,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ WASM(b) " %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
+index e62832dcba7600..a8287e7ab9d41a 100644
+--- a/arch/arm/include/asm/kexec.h
++++ b/arch/arm/include/asm/kexec.h
+@@ -2,8 +2,6 @@
+ #ifndef _ARM_KEXEC_H
+ #define _ARM_KEXEC_H
+
+-#ifdef CONFIG_KEXEC
+-
+ /* Maximum physical address we can use pages from */
+ #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+ /* Maximum address we can reach in physical address mode */
+@@ -82,6 +80,4 @@ static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
+
+ #endif /* __ASSEMBLY__ */
+
+-#endif /* CONFIG_KEXEC */
+-
+ #endif /* _ARM_KEXEC_H */
+diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
+new file mode 100644
+index 00000000000000..2189e507c8e08b
+--- /dev/null
++++ b/arch/arm/include/asm/mman.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __ASM_MMAN_H__
++#define __ASM_MMAN_H__
++
++#include <asm/system_info.h>
++#include <uapi/asm/mman.h>
++
++static inline bool arch_memory_deny_write_exec_supported(void)
++{
++ return cpu_architecture() >= CPU_ARCH_ARMv6;
++}
++#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
++
++#endif /* __ASM_MMAN_H__ */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 16b02f44c7d312..d657b84b6bf706 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -151,6 +151,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
++#define pgdp_get(pgpd) READ_ONCE(*pgdp)
++
+ #define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
+ #define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
+
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index bb5c8182311774..c28f5ec21e417b 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -109,16 +109,6 @@ extern int __get_user_64t_1(void *);
+ extern int __get_user_64t_2(void *);
+ extern int __get_user_64t_4(void *);
+
+-#define __GUP_CLOBBER_1 "lr", "cc"
+-#ifdef CONFIG_CPU_USE_DOMAINS
+-#define __GUP_CLOBBER_2 "ip", "lr", "cc"
+-#else
+-#define __GUP_CLOBBER_2 "lr", "cc"
+-#endif
+-#define __GUP_CLOBBER_4 "lr", "cc"
+-#define __GUP_CLOBBER_32t_8 "lr", "cc"
+-#define __GUP_CLOBBER_8 "lr", "cc"
+-
+ #define __get_user_x(__r2, __p, __e, __l, __s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%1", "r2") \
+@@ -126,7 +116,7 @@ extern int __get_user_64t_4(void *);
+ "bl __get_user_" #__s \
+ : "=&r" (__e), "=r" (__r2) \
+ : "0" (__p), "r" (__l) \
+- : __GUP_CLOBBER_##__s)
++ : "ip", "lr", "cc")
+
+ /* narrowing a double-word get into a single 32bit word register: */
+ #ifdef __ARMEB__
+@@ -148,7 +138,7 @@ extern int __get_user_64t_4(void *);
+ "bl __get_user_64t_" #__s \
+ : "=&r" (__e), "=r" (__r2) \
+ : "0" (__p), "r" (__l) \
+- : __GUP_CLOBBER_##__s)
++ : "ip", "lr", "cc")
+ #else
+ #define __get_user_x_64t __get_user_x
+ #endif
+diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
+index d53f56d6f84085..ae2f2b2b4e5abc 100644
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -59,7 +59,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
+ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o patch.o
+ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o patch.o
+ obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
+-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
++obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
+ # Main staffs in KPROBES are in arch/arm/probes/ .
+ obj-$(CONFIG_KPROBES) += patch.o insn.o
+ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
+@@ -75,8 +75,6 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
+ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
+ obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
+-obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
+-obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
+ obj-$(CONFIG_IWMMXT) += iwmmxt.o
+ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
+ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
+diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
+index a0218c4867b9b6..4a335d3c59690b 100644
+--- a/arch/arm/kernel/iwmmxt.S
++++ b/arch/arm/kernel/iwmmxt.S
+@@ -18,18 +18,6 @@
+ #include <asm/assembler.h>
+ #include "iwmmxt.h"
+
+-#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
+-#define PJ4(code...) code
+-#define XSC(code...)
+-#elif defined(CONFIG_CPU_MOHAWK) || \
+- defined(CONFIG_CPU_XSC3) || \
+- defined(CONFIG_CPU_XSCALE)
+-#define PJ4(code...)
+-#define XSC(code...) code
+-#else
+-#error "Unsupported iWMMXt architecture"
+-#endif
+-
+ #define MMX_WR0 (0x00)
+ #define MMX_WR1 (0x08)
+ #define MMX_WR2 (0x10)
+@@ -81,17 +69,13 @@ ENDPROC(iwmmxt_undef_handler)
+ ENTRY(iwmmxt_task_enable)
+ inc_preempt_count r10, r3
+
+- XSC(mrc p15, 0, r2, c15, c1, 0)
+- PJ4(mrc p15, 0, r2, c1, c0, 2)
++ mrc p15, 0, r2, c15, c1, 0
+ @ CP0 and CP1 accessible?
+- XSC(tst r2, #0x3)
+- PJ4(tst r2, #0xf)
++ tst r2, #0x3
+ bne 4f @ if so no business here
+ @ enable access to CP0 and CP1
+- XSC(orr r2, r2, #0x3)
+- XSC(mcr p15, 0, r2, c15, c1, 0)
+- PJ4(orr r2, r2, #0xf)
+- PJ4(mcr p15, 0, r2, c1, c0, 2)
++ orr r2, r2, #0x3
++ mcr p15, 0, r2, c15, c1, 0
+
+ ldr r3, =concan_owner
+ ldr r2, [r0, #S_PC] @ current task pc value
+@@ -218,12 +202,9 @@ ENTRY(iwmmxt_task_disable)
+ bne 1f @ no: quit
+
+ @ enable access to CP0 and CP1
+- XSC(mrc p15, 0, r4, c15, c1, 0)
+- XSC(orr r4, r4, #0x3)
+- XSC(mcr p15, 0, r4, c15, c1, 0)
+- PJ4(mrc p15, 0, r4, c1, c0, 2)
+- PJ4(orr r4, r4, #0xf)
+- PJ4(mcr p15, 0, r4, c1, c0, 2)
++ mrc p15, 0, r4, c15, c1, 0
++ orr r4, r4, #0x3
++ mcr p15, 0, r4, c15, c1, 0
+
+ mov r0, #0 @ nothing to load
+ str r0, [r3] @ no more current owner
+@@ -232,10 +213,8 @@ ENTRY(iwmmxt_task_disable)
+ bl concan_save
+
+ @ disable access to CP0 and CP1
+- XSC(bic r4, r4, #0x3)
+- XSC(mcr p15, 0, r4, c15, c1, 0)
+- PJ4(bic r4, r4, #0xf)
+- PJ4(mcr p15, 0, r4, c1, c0, 2)
++ bic r4, r4, #0x3
++ mcr p15, 0, r4, c15, c1, 0
+
+ mrc p15, 0, r2, c2, c0, 0
+ mov r2, r2 @ cpwait
+@@ -330,11 +309,9 @@ ENDPROC(iwmmxt_task_restore)
+ */
+ ENTRY(iwmmxt_task_switch)
+
+- XSC(mrc p15, 0, r1, c15, c1, 0)
+- PJ4(mrc p15, 0, r1, c1, c0, 2)
++ mrc p15, 0, r1, c15, c1, 0
+ @ CP0 and CP1 accessible?
+- XSC(tst r1, #0x3)
+- PJ4(tst r1, #0xf)
++ tst r1, #0x3
+ bne 1f @ yes: block them for next task
+
+ ldr r2, =concan_owner
+@@ -344,10 +321,8 @@ ENTRY(iwmmxt_task_switch)
+ retne lr @ no: leave Concan disabled
+
+ 1: @ flip Concan access
+- XSC(eor r1, r1, #0x3)
+- XSC(mcr p15, 0, r1, c15, c1, 0)
+- PJ4(eor r1, r1, #0xf)
+- PJ4(mcr p15, 0, r1, c1, c0, 2)
++ eor r1, r1, #0x3
++ mcr p15, 0, r1, c15, c1, 0
+
+ mrc p15, 0, r1, c2, c0, 0
+ sub pc, lr, r1, lsr #32 @ cpwait and return
+diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
+index 7147edbe56c67c..1d230ac9d0eb5c 100644
+--- a/arch/arm/kernel/perf_callchain.c
++++ b/arch/arm/kernel/perf_callchain.c
+@@ -85,8 +85,7 @@ static bool
+ callchain_trace(void *data, unsigned long pc)
+ {
+ struct perf_callchain_entry_ctx *entry = data;
+- perf_callchain_store(entry, pc);
+- return true;
++ return perf_callchain_store(entry, pc) == 0;
+ }
+
+ void
+diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
+deleted file mode 100644
+index 4bca8098c4ff55..00000000000000
+--- a/arch/arm/kernel/pj4-cp0.c
++++ /dev/null
+@@ -1,135 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * linux/arch/arm/kernel/pj4-cp0.c
+- *
+- * PJ4 iWMMXt coprocessor context switching and handling
+- *
+- * Copyright (c) 2010 Marvell International Inc.
+- */
+-
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/signal.h>
+-#include <linux/sched.h>
+-#include <linux/init.h>
+-#include <linux/io.h>
+-#include <asm/thread_notify.h>
+-#include <asm/cputype.h>
+-
+-static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
+-{
+- struct thread_info *thread = t;
+-
+- switch (cmd) {
+- case THREAD_NOTIFY_FLUSH:
+- /*
+- * flush_thread() zeroes thread->fpstate, so no need
+- * to do anything here.
+- *
+- * FALLTHROUGH: Ensure we don't try to overwrite our newly
+- * initialised state information on the first fault.
+- */
+-
+- case THREAD_NOTIFY_EXIT:
+- iwmmxt_task_release(thread);
+- break;
+-
+- case THREAD_NOTIFY_SWITCH:
+- iwmmxt_task_switch(thread);
+- break;
+- }
+-
+- return NOTIFY_DONE;
+-}
+-
+-static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
+- .notifier_call = iwmmxt_do,
+-};
+-
+-
+-static u32 __init pj4_cp_access_read(void)
+-{
+- u32 value;
+-
+- __asm__ __volatile__ (
+- "mrc p15, 0, %0, c1, c0, 2\n\t"
+- : "=r" (value));
+- return value;
+-}
+-
+-static void __init pj4_cp_access_write(u32 value)
+-{
+- u32 temp;
+-
+- __asm__ __volatile__ (
+- "mcr p15, 0, %1, c1, c0, 2\n\t"
+-#ifdef CONFIG_THUMB2_KERNEL
+- "isb\n\t"
+-#else
+- "mrc p15, 0, %0, c1, c0, 2\n\t"
+- "mov %0, %0\n\t"
+- "sub pc, pc, #4\n\t"
+-#endif
+- : "=r" (temp) : "r" (value));
+-}
+-
+-static int __init pj4_get_iwmmxt_version(void)
+-{
+- u32 cp_access, wcid;
+-
+- cp_access = pj4_cp_access_read();
+- pj4_cp_access_write(cp_access | 0xf);
+-
+- /* check if coprocessor 0 and 1 are available */
+- if ((pj4_cp_access_read() & 0xf) != 0xf) {
+- pj4_cp_access_write(cp_access);
+- return -ENODEV;
+- }
+-
+- /* read iWMMXt coprocessor id register p1, c0 */
+- __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
+-
+- pj4_cp_access_write(cp_access);
+-
+- /* iWMMXt v1 */
+- if ((wcid & 0xffffff00) == 0x56051000)
+- return 1;
+- /* iWMMXt v2 */
+- if ((wcid & 0xffffff00) == 0x56052000)
+- return 2;
+-
+- return -EINVAL;
+-}
+-
+-/*
+- * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
+- * switch code handle iWMMXt context switching.
+- */
+-static int __init pj4_cp0_init(void)
+-{
+- u32 __maybe_unused cp_access;
+- int vers;
+-
+- if (!cpu_is_pj4())
+- return 0;
+-
+- vers = pj4_get_iwmmxt_version();
+- if (vers < 0)
+- return 0;
+-
+-#ifndef CONFIG_IWMMXT
+- pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
+-#else
+- cp_access = pj4_cp_access_read() & ~0xf;
+- pj4_cp_access_write(cp_access);
+-
+- pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
+- elf_hwcap |= HWCAP_IWMMXT;
+- thread_register_notifier(&iwmmxt_notifier_block);
+- register_iwmmxt_undef_handler();
+-#endif
+-
+- return 0;
+-}
+-
+-late_initcall(pj4_cp0_init);
+diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
+index a86a1d4f34618c..93afd1005b43c9 100644
+--- a/arch/arm/kernel/sleep.S
++++ b/arch/arm/kernel/sleep.S
+@@ -127,6 +127,10 @@ cpu_resume_after_mmu:
+ instr_sync
+ #endif
+ bl cpu_init @ restore the und/abt/irq banked regs
++#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
++ mov r0, sp
++ bl kasan_unpoison_task_stack_below
++#endif
+ mov r0, #0 @ return zero on success
+ ldmfd sp!, {r4 - r11, pc}
+ ENDPROC(cpu_resume_after_mmu)
+diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
+index d71ab61430b261..de75ae4d5ab41c 100644
+--- a/arch/arm/lib/memset.S
++++ b/arch/arm/lib/memset.S
+@@ -17,6 +17,7 @@ ENTRY(__memset)
+ ENTRY(mmioset)
+ WEAK(memset)
+ UNWIND( .fnstart )
++ and r1, r1, #255 @ cast to unsigned char
+ ands r3, r0, #3 @ 1 unaligned?
+ mov ip, r0 @ preserve r0 as return value
+ bne 6f @ 1
+diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
+index 4316e1370627cf..2a8a9fe46586d2 100644
+--- a/arch/arm/mach-davinci/Kconfig
++++ b/arch/arm/mach-davinci/Kconfig
+@@ -4,12 +4,14 @@ menuconfig ARCH_DAVINCI
+ bool "TI DaVinci"
+ depends on ARCH_MULTI_V5
+ depends on CPU_LITTLE_ENDIAN
++ select CPU_ARM926T
+ select DAVINCI_TIMER
+ select ZONE_DMA
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM && OF
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
++ select PINCTRL
+ select PINCTRL_SINGLE
+
+ if ARCH_DAVINCI
+diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
+index 8aa39db095d761..2c5155bd376ba8 100644
+--- a/arch/arm/mach-davinci/pm.c
++++ b/arch/arm/mach-davinci/pm.c
+@@ -61,7 +61,7 @@ static void davinci_pm_suspend(void)
+
+ /* Configure sleep count in deep sleep register */
+ val = __raw_readl(pm_config.deepsleep_reg);
+- val &= ~DEEPSLEEP_SLEEPCOUNT_MASK,
++ val &= ~DEEPSLEEP_SLEEPCOUNT_MASK;
+ val |= pm_config.sleepcount;
+ __raw_writel(val, pm_config.deepsleep_reg);
+
+diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
+index 85a496ddc6197e..e9f72a529b5089 100644
+--- a/arch/arm/mach-ep93xx/clock.c
++++ b/arch/arm/mach-ep93xx/clock.c
+@@ -359,7 +359,7 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
+ u32 val = __raw_readl(psc->reg);
+ u8 index = (val & psc->mask) >> psc->shift;
+
+- if (index > psc->num_div)
++ if (index >= psc->num_div)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]);
+diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
+index 71b1139764204c..8b1ec60a9a467a 100644
+--- a/arch/arm/mach-ep93xx/core.c
++++ b/arch/arm/mach-ep93xx/core.c
+@@ -339,6 +339,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("G", 0, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
++ { }
+ },
+ };
+
+diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
+index 2157493b78a9bd..df69af9323754f 100644
+--- a/arch/arm/mach-imx/mmdc.c
++++ b/arch/arm/mach-imx/mmdc.c
+@@ -501,6 +501,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+
+ name = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "mmdc%d", ret);
++ if (!name) {
++ ret = -ENOMEM;
++ goto pmu_release_id;
++ }
+
+ pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
+ pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
+@@ -523,9 +527,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+
+ pmu_register_err:
+ pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
+- ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
+ hrtimer_cancel(&pmu_mmdc->hrtimer);
++pmu_release_id:
++ ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ pmu_free:
+ kfree(pmu_mmdc);
+ return ret;
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index 8e3b5068d4ab07..b45a3879eb344c 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -79,10 +79,8 @@ static struct musb_hdrc_platform_data tusb_data = {
+ static struct gpiod_lookup_table tusb_gpio_table = {
+ .dev_id = "musb-tusb",
+ .table = {
+- GPIO_LOOKUP("gpio-0-15", 0, "enable",
+- GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("gpio-48-63", 10, "int",
+- GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("gpio-0-31", 0, "enable", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("gpio-32-63", 26, "int", GPIO_ACTIVE_HIGH),
+ { }
+ },
+ };
+@@ -140,12 +138,11 @@ static int slot1_cover_open;
+ static int slot2_cover_open;
+ static struct device *mmc_device;
+
+-static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
++static struct gpiod_lookup_table nokia800_mmc_gpio_table = {
+ .dev_id = "mmci-omap.0",
+ .table = {
+ /* Slot switch, GPIO 96 */
+- GPIO_LOOKUP("gpio-80-111", 16,
+- "switch", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
+ { }
+ },
+ };
+@@ -153,12 +150,12 @@ static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
+ static struct gpiod_lookup_table nokia810_mmc_gpio_table = {
+ .dev_id = "mmci-omap.0",
+ .table = {
++ /* Slot switch, GPIO 96 */
++ GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
+ /* Slot index 1, VSD power, GPIO 23 */
+- GPIO_LOOKUP_IDX("gpio-16-31", 7,
+- "vsd", 1, GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP_IDX("gpio-0-31", 23, "vsd", 1, GPIO_ACTIVE_HIGH),
+ /* Slot index 1, VIO power, GPIO 9 */
+- GPIO_LOOKUP_IDX("gpio-0-15", 9,
+- "vio", 1, GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP_IDX("gpio-0-31", 9, "vio", 1, GPIO_ACTIVE_HIGH),
+ { }
+ },
+ };
+@@ -415,8 +412,6 @@ static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
+
+ static void __init n8x0_mmc_init(void)
+ {
+- gpiod_add_lookup_table(&nokia8xx_mmc_gpio_table);
+-
+ if (board_is_n810()) {
+ mmc1_data.slots[0].name = "external";
+
+@@ -429,6 +424,8 @@ static void __init n8x0_mmc_init(void)
+ mmc1_data.slots[1].name = "internal";
+ mmc1_data.slots[1].ban_openended = 1;
+ gpiod_add_lookup_table(&nokia810_mmc_gpio_table);
++ } else {
++ gpiod_add_lookup_table(&nokia800_mmc_gpio_table);
+ }
+
+ mmc1_data.nr_slots = 2;
+diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
+index 98999aa8cc0c09..7f387706368a68 100644
+--- a/arch/arm/mach-omap2/id.c
++++ b/arch/arm/mach-omap2/id.c
+@@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
+
+ soc_dev_attr->machine = soc_name;
+ soc_dev_attr->family = omap_get_family();
++ if (!soc_dev_attr->family) {
++ kfree(soc_dev_attr);
++ return;
++ }
+ soc_dev_attr->revision = soc_rev;
+ soc_dev_attr->custom_attr_group = omap_soc_groups[0];
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
++ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr);
+ return;
+ }
+diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
+index c1c0121f478d66..b947bacf23a376 100644
+--- a/arch/arm/mach-omap2/pdata-quirks.c
++++ b/arch/arm/mach-omap2/pdata-quirks.c
+@@ -275,9 +275,19 @@ static struct platform_device pandora_backlight = {
+ .id = -1,
+ };
+
++static struct gpiod_lookup_table pandora_soc_audio_gpios = {
++ .dev_id = "soc-audio",
++ .table = {
++ GPIO_LOOKUP("gpio-112-127", 6, "dac", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("gpio-0-15", 14, "amp", GPIO_ACTIVE_HIGH),
++ { }
++ },
++};
++
+ static void __init omap3_pandora_legacy_init(void)
+ {
+ platform_device_register(&pandora_backlight);
++ gpiod_add_lookup_table(&pandora_soc_audio_gpios);
+ }
+ #endif /* CONFIG_ARCH_OMAP3 */
+
+diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
+index cc691b199429ca..a42417de53f74d 100644
+--- a/arch/arm/mach-pxa/spitz.c
++++ b/arch/arm/mach-pxa/spitz.c
+@@ -520,10 +520,8 @@ static struct gpiod_lookup_table spitz_ads7846_gpio_table = {
+ static struct gpiod_lookup_table spitz_lcdcon_gpio_table = {
+ .dev_id = "spi2.1",
+ .table = {
+- GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_CONT,
+- "BL_CONT", GPIO_ACTIVE_LOW),
+- GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_ON,
+- "BL_ON", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("sharp-scoop.1", 6, "BL_CONT", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("sharp-scoop.1", 7, "BL_ON", GPIO_ACTIVE_HIGH),
+ { },
+ },
+ };
+@@ -531,10 +529,8 @@ static struct gpiod_lookup_table spitz_lcdcon_gpio_table = {
+ static struct gpiod_lookup_table akita_lcdcon_gpio_table = {
+ .dev_id = "spi2.1",
+ .table = {
+- GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_CONT,
+- "BL_CONT", GPIO_ACTIVE_LOW),
+- GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_ON,
+- "BL_ON", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("i2c-max7310", 3, "BL_ON", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("i2c-max7310", 4, "BL_CONT", GPIO_ACTIVE_LOW),
+ { },
+ },
+ };
+@@ -941,12 +937,9 @@ static inline void spitz_i2c_init(void) {}
+ static struct gpiod_lookup_table spitz_audio_gpio_table = {
+ .dev_id = "spitz-audio",
+ .table = {
+- GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
+- "mute-l", GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
+- "mute-r", GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("sharp-scoop.1", SPITZ_GPIO_MIC_BIAS - SPITZ_SCP2_GPIO_BASE,
+- "mic", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("sharp-scoop.0", 3, "mute-l", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("sharp-scoop.0", 4, "mute-r", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("sharp-scoop.1", 8, "mic", GPIO_ACTIVE_HIGH),
+ { },
+ },
+ };
+@@ -954,12 +947,9 @@ static struct gpiod_lookup_table spitz_audio_gpio_table = {
+ static struct gpiod_lookup_table akita_audio_gpio_table = {
+ .dev_id = "spitz-audio",
+ .table = {
+- GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE,
+- "mute-l", GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE,
+- "mute-r", GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("i2c-max7310", AKITA_GPIO_MIC_BIAS - AKITA_IOEXP_GPIO_BASE,
+- "mic", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("sharp-scoop.0", 3, "mute-l", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("sharp-scoop.0", 4, "mute-r", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("i2c-max7310", 2, "mic", GPIO_ACTIVE_HIGH),
+ { },
+ },
+ };
+diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
+index cb63921232a6f8..277f6aa8e6c25f 100644
+--- a/arch/arm/mach-sunxi/mc_smp.c
++++ b/arch/arm/mach-sunxi/mc_smp.c
+@@ -803,16 +803,16 @@ static int __init sunxi_mc_smp_init(void)
+ for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
+ ret = of_property_match_string(node, "enable-method",
+ sunxi_mc_smp_data[i].enable_method);
+- if (!ret)
++ if (ret >= 0)
+ break;
+ }
+
+- is_a83t = sunxi_mc_smp_data[i].is_a83t;
+-
+ of_node_put(node);
+- if (ret)
++ if (ret < 0)
+ return -ENODEV;
+
++ is_a83t = sunxi_mc_smp_data[i].is_a83t;
++
+ if (!sunxi_mc_smp_cpu_table_init())
+ return -EINVAL;
+
+diff --git a/arch/arm/mach-versatile/platsmp-realview.c b/arch/arm/mach-versatile/platsmp-realview.c
+index 5d363385c80192..059d796b26bc8e 100644
+--- a/arch/arm/mach-versatile/platsmp-realview.c
++++ b/arch/arm/mach-versatile/platsmp-realview.c
+@@ -66,6 +66,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
+ return;
+ }
+ map = syscon_node_to_regmap(np);
++ of_node_put(np);
+ if (IS_ERR(map)) {
+ pr_err("PLATSMP: No syscon regmap\n");
+ return;
+diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
+index d19d140a10c7d5..0749cf8a66371b 100644
+--- a/arch/arm/mm/flush.c
++++ b/arch/arm/mm/flush.c
+@@ -296,6 +296,9 @@ void __sync_icache_dcache(pte_t pteval)
+ return;
+
+ folio = page_folio(pfn_to_page(pfn));
++ if (folio_test_reserved(folio))
++ return;
++
+ if (cache_is_vipt_aliasing())
+ mapping = folio_flush_mapping(folio);
+ else
+diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
+index 515ca33b854c1a..d761bd2e2f4072 100644
+--- a/arch/arm/vdso/Makefile
++++ b/arch/arm/vdso/Makefile
+@@ -63,28 +63,3 @@ quiet_cmd_vdsold_and_vdso_check = LD $@
+
+ quiet_cmd_vdsomunge = MUNGE $@
+ cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
+-
+-#
+-# Install the unstripped copy of vdso.so.dbg. If our toolchain
+-# supports build-id, install .build-id links as well.
+-#
+-# Cribbed from arch/x86/vdso/Makefile.
+-#
+-quiet_cmd_vdso_install = INSTALL $<
+-define cmd_vdso_install
+- cp $< "$(MODLIB)/vdso/vdso.so"; \
+- if readelf -n $< | grep -q 'Build ID'; then \
+- buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+- first=`echo $$buildid | cut -b-2`; \
+- last=`echo $$buildid | cut -b3-`; \
+- mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+- ln -sf "../../vdso.so" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+- fi
+-endef
+-
+-$(MODLIB)/vdso: FORCE
+- @mkdir -p $(MODLIB)/vdso
+-
+-PHONY += vdso_install
+-vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso
+- $(call cmd,vdso_install)
+diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h
+index 3c7938fd40aad6..32090b0fb250b8 100644
+--- a/arch/arm/vfp/vfpinstr.h
++++ b/arch/arm/vfp/vfpinstr.h
+@@ -64,33 +64,37 @@
+
+ #ifdef CONFIG_AS_VFP_VMRS_FPINST
+
+-#define fmrx(_vfp_) ({ \
+- u32 __v; \
+- asm(".fpu vfpv2\n" \
+- "vmrs %0, " #_vfp_ \
+- : "=r" (__v) : : "cc"); \
+- __v; \
+- })
+-
+-#define fmxr(_vfp_,_var_) \
+- asm(".fpu vfpv2\n" \
+- "vmsr " #_vfp_ ", %0" \
+- : : "r" (_var_) : "cc")
++#define fmrx(_vfp_) ({ \
++ u32 __v; \
++ asm volatile (".fpu vfpv2\n" \
++ "vmrs %0, " #_vfp_ \
++ : "=r" (__v) : : "cc"); \
++ __v; \
++})
++
++#define fmxr(_vfp_, _var_) ({ \
++ asm volatile (".fpu vfpv2\n" \
++ "vmsr " #_vfp_ ", %0" \
++ : : "r" (_var_) : "cc"); \
++})
+
+ #else
+
+ #define vfpreg(_vfp_) #_vfp_
+
+-#define fmrx(_vfp_) ({ \
+- u32 __v; \
+- asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \
+- : "=r" (__v) : : "cc"); \
+- __v; \
+- })
+-
+-#define fmxr(_vfp_,_var_) \
+- asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \
+- : : "r" (_var_) : "cc")
++#define fmrx(_vfp_) ({ \
++ u32 __v; \
++ asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \
++ "cr0, 0 @ fmrx %0, " #_vfp_ \
++ : "=r" (__v) : : "cc"); \
++ __v; \
++})
++
++#define fmxr(_vfp_, _var_) ({ \
++ asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \
++ "cr0, 0 @ fmxr " #_vfp_ ", %0" \
++ : : "r" (_var_) : "cc"); \
++})
+
+ #endif
+
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index c392e18f1e4317..a395b6c0aae2a9 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
+ BUG_ON(err);
+ per_cpu(xen_vcpu, cpu) = vcpup;
+
+- if (!xen_kernel_unmapped_at_usr())
+- xen_setup_runstate_info(cpu);
+-
+ after_register_vcpu_info:
+ enable_percpu_irq(xen_events_irq, 0);
+ return 0;
+@@ -487,7 +484,8 @@ static int __init xen_guest_init(void)
+ * for secondary CPUs as they are brought up.
+ * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
+ */
+- xen_vcpu_info = alloc_percpu(struct vcpu_info);
++ xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
++ 1 << fls(sizeof(struct vcpu_info) - 1));
+ if (xen_vcpu_info == NULL)
+ return -ENOMEM;
+
+@@ -523,9 +521,6 @@ static int __init xen_guest_init(void)
+ return -EINVAL;
+ }
+
+- if (!xen_kernel_unmapped_at_usr())
+- xen_time_setup_guest();
+-
+ if (xen_initial_domain())
+ pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
+
+@@ -535,7 +530,13 @@ static int __init xen_guest_init(void)
+ }
+ early_initcall(xen_guest_init);
+
+-static int __init xen_pm_init(void)
++static int xen_starting_runstate_cpu(unsigned int cpu)
++{
++ xen_setup_runstate_info(cpu);
++ return 0;
++}
++
++static int __init xen_late_init(void)
+ {
+ if (!xen_domain())
+ return -ENODEV;
+@@ -548,9 +549,16 @@ static int __init xen_pm_init(void)
+ do_settimeofday64(&ts);
+ }
+
+- return 0;
++ if (xen_kernel_unmapped_at_usr())
++ return 0;
++
++ xen_time_setup_guest();
++
++ return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
++ "arm/xen_runstate:starting",
++ xen_starting_runstate_cpu, NULL);
+ }
+-late_initcall(xen_pm_init);
++late_initcall(xen_late_init);
+
+
+ /* empty stubs */
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 78f20e6327120e..eab866d6903347 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -191,12 +191,13 @@ config ARM64
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
+- if $(cc-option,-fpatchable-function-entry=2)
++ if (GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS || \
++ CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS)
+ select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \
+ if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS
+ select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \
+ if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \
+- !CC_OPTIMIZE_FOR_SIZE)
++ (CC_IS_CLANG || !CC_OPTIMIZE_FOR_SIZE))
+ select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
+ if DYNAMIC_FTRACE_WITH_ARGS
+ select HAVE_SAMPLE_FTRACE_DIRECT
+@@ -262,12 +263,10 @@ config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
+ def_bool CC_IS_CLANG
+ # https://github.com/ClangBuiltLinux/linux/issues/1507
+ depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
+- select HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+ config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS
+ def_bool CC_IS_GCC
+ depends on $(cc-option,-fpatchable-function-entry=2)
+- select HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+ config 64BIT
+ def_bool y
+@@ -420,7 +419,7 @@ config AMPERE_ERRATUM_AC03_CPU_38
+ default y
+ help
+ This option adds an alternative code sequence to work around Ampere
+- erratum AC03_CPU_38 on AmpereOne.
++ errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne.
+
+ The affected design reports FEAT_HAFDBS as not implemented in
+ ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0
+@@ -1037,8 +1036,12 @@ config ARM64_ERRATUM_2645198
+
+ If unsure, say Y.
+
++config ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++ bool
++
+ config ARM64_ERRATUM_2966298
+ bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
++ select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ default y
+ help
+ This option adds the workaround for ARM Cortex-A520 erratum 2966298.
+@@ -1050,6 +1053,60 @@ config ARM64_ERRATUM_2966298
+
+ If unsure, say Y.
+
++config ARM64_ERRATUM_3117295
++ bool "Cortex-A510: 3117295: workaround for speculatively executed unprivileged load"
++ select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++ default y
++ help
++ This option adds the workaround for ARM Cortex-A510 erratum 3117295.
++
++ On an affected Cortex-A510 core, a speculatively executed unprivileged
++ load might leak data from a privileged level via a cache side channel.
++
++ Work around this problem by executing a TLBI before returning to EL0.
++
++ If unsure, say Y.
++
++config ARM64_ERRATUM_3194386
++ bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
++ default y
++ help
++ This option adds the workaround for the following errata:
++
++ * ARM Cortex-A76 erratum 3324349
++ * ARM Cortex-A77 erratum 3324348
++ * ARM Cortex-A78 erratum 3324344
++ * ARM Cortex-A78C erratum 3324346
++ * ARM Cortex-A78C erratum 3324347
++ * ARM Cortex-A710 erratam 3324338
++ * ARM Cortex-A715 errartum 3456084
++ * ARM Cortex-A720 erratum 3456091
++ * ARM Cortex-A725 erratum 3456106
++ * ARM Cortex-X1 erratum 3324344
++ * ARM Cortex-X1C erratum 3324346
++ * ARM Cortex-X2 erratum 3324338
++ * ARM Cortex-X3 erratum 3324335
++ * ARM Cortex-X4 erratum 3194386
++ * ARM Cortex-X925 erratum 3324334
++ * ARM Neoverse-N1 erratum 3324349
++ * ARM Neoverse N2 erratum 3324339
++ * ARM Neoverse-N3 erratum 3456111
++ * ARM Neoverse-V1 erratum 3324341
++ * ARM Neoverse V2 erratum 3324336
++ * ARM Neoverse-V3 erratum 3312417
++
++ On affected cores "MSR SSBS, #0" instructions may not affect
++ subsequent speculative instructions, which may permit unexepected
++ speculative store bypassing.
++
++ Work around this problem by placing a Speculation Barrier (SB) or
++ Instruction Synchronization Barrier (ISB) after kernel changes to
++ SSBS. The presence of the SSBS special-purpose register is hidden
++ from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
++ will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
++
++ If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ bool "Cavium erratum 22375, 24313"
+ default y
+@@ -1368,6 +1425,8 @@ choice
+ config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ depends on !LD_IS_LLD || LLD_VERSION >= 130000
++ # https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
++ depends on AS_IS_GNU || AS_VERSION >= 150000
+ help
+ Say Y if you plan on running a kernel with a big-endian userspace.
+
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 2d49aea0ff67a8..9a2d3723cd0fa9 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -158,7 +158,7 @@ endif
+
+ all: $(notdir $(KBUILD_IMAGE))
+
+-
++vmlinuz.efi: Image
+ Image vmlinuz.efi: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+@@ -169,12 +169,6 @@ install: KBUILD_IMAGE := $(boot)/Image
+ install zinstall:
+ $(call cmd,install)
+
+-PHONY += vdso_install
+-vdso_install:
+- $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+- $(if $(CONFIG_COMPAT_VDSO), \
+- $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
+-
+ archprepare:
+ $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+ ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
+@@ -205,6 +199,9 @@ ifdef CONFIG_COMPAT_VDSO
+ endif
+ endif
+
++vdso-install-y += arch/arm64/kernel/vdso/vdso.so.dbg
++vdso-install-$(CONFIG_COMPAT_VDSO) += arch/arm64/kernel/vdso32/vdso.so.dbg:vdso32.so
++
+ include $(srctree)/scripts/Makefile.defconf
+
+ PHONY += virtconfig
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+index 9ec49ac2f6fd5d..381d58cea092d9 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+@@ -291,6 +291,8 @@ sw {
+ };
+
+ &spdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&spdif_tx_pin>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
+index 4903d6358112de..855b7d43bc503a 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
+@@ -166,6 +166,8 @@ &r_ir {
+ };
+
+ &spdif {
++ pinctrl-names = "default";
++ pinctrl-0 = <&spdif_tx_pin>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+index ca1d287a0a01d9..d11e5041bae9a4 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+@@ -406,6 +406,7 @@ spi1_cs_pin: spi1-cs-pin {
+ function = "spi1";
+ };
+
++ /omit-if-no-ref/
+ spdif_tx_pin: spdif-tx-pin {
+ pins = "PH7";
+ function = "spdif";
+@@ -655,10 +656,8 @@ spdif: spdif@5093000 {
+ clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>;
+ clock-names = "apb", "spdif";
+ resets = <&ccu RST_BUS_SPDIF>;
+- dmas = <&dma 2>;
+- dma-names = "tx";
+- pinctrl-names = "default";
+- pinctrl-0 = <&spdif_tx_pin>;
++ dmas = <&dma 2>, <&dma 2>;
++ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
+index 15290e6892fca4..fc7315b9440659 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero.dtsi
+@@ -68,10 +68,7 @@ &ehci1 {
+ &emac0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ext_rgmii_pins>;
+- phy-mode = "rgmii";
+ phy-handle = <&ext_rgmii_phy>;
+- allwinner,rx-delay-ps = <3100>;
+- allwinner,tx-delay-ps = <700>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
+index d83852e72f0634..b5d713926a341a 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-zero2.dts
+@@ -13,6 +13,9 @@ / {
+ };
+
+ &emac0 {
++ allwinner,rx-delay-ps = <3100>;
++ allwinner,tx-delay-ps = <700>;
++ phy-mode = "rgmii";
+ phy-supply = <&reg_dcdce>;
+ };
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
+index 00fe28caac939a..b3b1b8692125f9 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h618-orangepi-zero3.dts
+@@ -13,6 +13,8 @@ / {
+ };
+
+ &emac0 {
++ allwinner,tx-delay-ps = <700>;
++ phy-mode = "rgmii-rxid";
+ phy-supply = <&reg_dldo1>;
+ };
+
+diff --git a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
+index dccbba6e7f98e4..dbf2dce8d1d68a 100644
+--- a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
++++ b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
+@@ -145,7 +145,6 @@ pci@fbc00000 {
+ msix: msix@fbe00000 {
+ compatible = "al,alpine-msix";
+ reg = <0x0 0xfbe00000 0x0 0x100000>;
+- interrupt-controller;
+ msi-controller;
+ al,msi-base-spi = <160>;
+ al,msi-num-spis = <160>;
+diff --git a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
+index 39481d7fd7d4da..3ea178acdddfe2 100644
+--- a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
++++ b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
+@@ -355,7 +355,6 @@ pcie@fbd00000 {
+ msix: msix@fbe00000 {
+ compatible = "al,alpine-msix";
+ reg = <0x0 0xfbe00000 0x0 0x100000>;
+- interrupt-controller;
+ msi-controller;
+ al,msi-base-spi = <336>;
+ al,msi-num-spis = <959>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+index ff68b911b72971..0ff0d090548d0e 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -215,6 +215,11 @@ hdmi_tx: hdmi-tx@0 {
+ #sound-dai-cells = <0>;
+ status = "disabled";
+
++ assigned-clocks = <&clkc CLKID_HDMI_SEL>,
++ <&clkc CLKID_HDMI>;
++ assigned-clock-parents = <&xtal>, <0>;
++ assigned-clock-rates = <0>, <24000000>;
++
+ /* VPU VENC Input */
+ hdmi_tx_venc_port: port@0 {
+ reg = <0>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
+index 6a1f4dcf64885f..7b655e07e80cf6 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
+@@ -367,6 +367,10 @@ &ethmac {
+ power-domains = <&pwrc PWRC_G12A_ETH_ID>;
+ };
+
++&hdmi_tx {
++ power-domains = <&pwrc PWRC_G12A_VPU_ID>;
++};
++
+ &vpu {
+ power-domains = <&pwrc PWRC_G12A_VPU_ID>;
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+index 12ef6e81c8bd63..ed00e67e6923a0 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+@@ -311,10 +311,16 @@ &hdmi_tx {
+ <&reset RESET_HDMI_SYSTEM_RESET>,
+ <&reset RESET_HDMI_TX>;
+ reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+- clocks = <&clkc CLKID_HDMI_PCLK>,
+- <&clkc CLKID_CLK81>,
++ clocks = <&clkc CLKID_HDMI>,
++ <&clkc CLKID_HDMI_PCLK>,
+ <&clkc CLKID_GCLK_VENCI_INT0>;
+ clock-names = "isfr", "iahb", "venci";
++ power-domains = <&pwrc PWRC_GXBB_VPU_ID>;
++
++ assigned-clocks = <&clkc CLKID_HDMI_SEL>,
++ <&clkc CLKID_HDMI>;
++ assigned-clock-parents = <&xtal>, <0>;
++ assigned-clock-rates = <0>, <24000000>;
+ };
+
+ &sysctrl {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+index 17bcfa4702e170..f58d1790de1cb4 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+@@ -323,10 +323,16 @@ &hdmi_tx {
+ <&reset RESET_HDMI_SYSTEM_RESET>,
+ <&reset RESET_HDMI_TX>;
+ reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+- clocks = <&clkc CLKID_HDMI_PCLK>,
+- <&clkc CLKID_CLK81>,
++ clocks = <&clkc CLKID_HDMI>,
++ <&clkc CLKID_HDMI_PCLK>,
+ <&clkc CLKID_GCLK_VENCI_INT0>;
+ clock-names = "isfr", "iahb", "venci";
++ power-domains = <&pwrc PWRC_GXBB_VPU_ID>;
++
++ assigned-clocks = <&clkc CLKID_HDMI_SEL>,
++ <&clkc CLKID_HDMI>;
++ assigned-clock-parents = <&xtal>, <0>;
++ assigned-clock-rates = <0>, <24000000>;
+ };
+
+ &sysctrl {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts b/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
+index 8ffbcb2b1ac594..bbd3c05cbd9089 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-s4-s805x2-aq222.dts
+@@ -15,7 +15,7 @@ / {
+ #size-cells = <2>;
+
+ aliases {
+- serial0 = &uart_B;
++ serial0 = &uart_b;
+ };
+
+ memory@0 {
+@@ -25,6 +25,6 @@ memory@0 {
+
+ };
+
+-&uart_B {
++&uart_b {
+ status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+index f24460186d3d82..a781eabe21f04a 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+@@ -61,10 +61,15 @@ xtal: xtal-clk {
+ #clock-cells = <0>;
+ };
+
+- pwrc: power-controller {
+- compatible = "amlogic,meson-s4-pwrc";
+- #power-domain-cells = <1>;
+- status = "okay";
++ firmware {
++ sm: secure-monitor {
++ compatible = "amlogic,meson-gxbb-sm";
++
++ pwrc: power-controller {
++ compatible = "amlogic,meson-s4-pwrc";
++ #power-domain-cells = <1>;
++ };
++ };
+ };
+
+ soc {
+@@ -118,14 +123,14 @@ gpio_intc: interrupt-controller@4080 {
+ <10 11 12 13 14 15 16 17 18 19 20 21>;
+ };
+
+- uart_B: serial@7a000 {
++ uart_b: serial@7a000 {
+ compatible = "amlogic,meson-s4-uart",
+ "amlogic,meson-ao-uart";
+ reg = <0x0 0x7a000 0x0 0x18>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_EDGE_RISING>;
+- status = "disabled";
+ clocks = <&xtal>, <&xtal>, <&xtal>;
+ clock-names = "xtal", "pclk", "baud";
++ status = "disabled";
+ };
+
+ reset: reset-controller@2000 {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+index 643f94d9d08e10..13e742ba00bea0 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+@@ -339,7 +339,7 @@ tdmin_lb: audio-controller@3c0 {
+ };
+
+ spdifin: audio-controller@400 {
+- compatible = "amlogic,g12a-spdifin",
++ compatible = "amlogic,sm1-spdifin",
+ "amlogic,axg-spdifin";
+ reg = <0x0 0x400 0x0 0x30>;
+ #sound-dai-cells = <0>;
+@@ -353,7 +353,7 @@ spdifin: audio-controller@400 {
+ };
+
+ spdifout_a: audio-controller@480 {
+- compatible = "amlogic,g12a-spdifout",
++ compatible = "amlogic,sm1-spdifout",
+ "amlogic,axg-spdifout";
+ reg = <0x0 0x480 0x0 0x50>;
+ #sound-dai-cells = <0>;
+@@ -518,6 +518,10 @@ &gpio_intc {
+ "amlogic,meson-gpio-intc";
+ };
+
++&hdmi_tx {
++ power-domains = <&pwrc PWRC_SM1_VPU_ID>;
++};
++
+ &pcie {
+ power-domains = <&pwrc PWRC_SM1_PCIE_ID>;
+ };
+diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+index 2f124b027bbf0a..aadfa0ae052526 100644
+--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+@@ -227,9 +227,6 @@ ethernet-switch@0 {
+ brcm,num-gphy = <5>;
+ brcm,num-rgmii-ports = <2>;
+
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+ ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+index 9dcd25ec2c0418..896d1f33b5b617 100644
+--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
++++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+@@ -586,6 +586,7 @@ gpio_g: gpio@660a0000 {
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+index f049687d6b96d2..d8516ec0dae745 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+@@ -450,6 +450,7 @@ gpio_hsls: gpio@d0000 {
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinmux 0 0 16>,
+ <&pinmux 16 71 2>,
+diff --git a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts
+index 47a389d9ff7d71..9d74fa6bfed9fb 100644
+--- a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts
++++ b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts
+@@ -32,7 +32,7 @@ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x3da00000>,
+ <0x0 0xc0000000 0x40000000>,
+- <0x8 0x80000000 0x40000000>;
++ <0x8 0x80000000 0x80000000>;
+ };
+
+ gpio-keys {
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+index d2f5345d056007..717288bbdb8b63 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+@@ -1186,26 +1186,34 @@ sata1: sata@3210000 {
+ dma-coherent;
+ };
+
+- usb0: usb@3100000 {
+- status = "disabled";
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3100000 0x0 0x10000>;
+- interrupts = <0 80 0x4>; /* Level high type */
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+- };
++ bus: bus {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ compatible = "simple-bus";
++ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
++
++ usb0: usb@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 80 0x4>; /* Level high type */
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ status = "disabled";
++ };
+
+- usb1: usb@3110000 {
+- status = "disabled";
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3110000 0x0 0x10000>;
+- interrupts = <0 81 0x4>; /* Level high type */
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ usb1: usb@3110000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <0 81 0x4>; /* Level high type */
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ status = "disabled";
++ };
+ };
+
+ ccn@4000000 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi b/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
+index 9b1b522517f8ef..0878a15acc1ba5 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
+@@ -82,12 +82,9 @@ reg_module_wifi: regulator-module-wifi {
+ pinctrl-0 = <&pinctrl_wifi_pdn>;
+ gpio = <&lsio_gpio1 28 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
++ regulator-always-on;
+ regulator-name = "wifi_pwrdn_fake_regulator";
+ regulator-settling-time-us = <100>;
+-
+- regulator-state-mem {
+- regulator-off-in-suspend;
+- };
+ };
+
+ reg_pcie_switch: regulator-pcie-switch {
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+index fc1a5d34382b7e..49298cd9eb0da0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+@@ -41,7 +41,7 @@ usbotg1: usb@5b0d0000 {
+ interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,usbphy = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc1 0>;
+- clocks = <&usb2_lpcg 0>;
++ clocks = <&usb2_lpcg IMX_LPCG_CLK_6>;
+ ahb-burst-config = <0x0>;
+ tx-burst-size-dword = <0x10>;
+ rx-burst-size-dword = <0x10>;
+@@ -58,7 +58,7 @@ usbmisc1: usbmisc@5b0d0200 {
+ usbphy1: usbphy@5b100000 {
+ compatible = "fsl,imx7ulp-usbphy";
+ reg = <0x5b100000 0x1000>;
+- clocks = <&usb2_lpcg 1>;
++ clocks = <&usb2_lpcg IMX_LPCG_CLK_7>;
+ power-domains = <&pd IMX_SC_R_USB_0_PHY>;
+ status = "disabled";
+ };
+@@ -67,8 +67,8 @@ usdhc1: mmc@5b010000 {
+ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b010000 0x10000>;
+ clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc0_lpcg IMX_LPCG_CLK_0>,
+- <&sdhc0_lpcg IMX_LPCG_CLK_5>;
++ <&sdhc0_lpcg IMX_LPCG_CLK_5>,
++ <&sdhc0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_0>;
+ status = "disabled";
+@@ -78,8 +78,8 @@ usdhc2: mmc@5b020000 {
+ interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b020000 0x10000>;
+ clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc1_lpcg IMX_LPCG_CLK_0>,
+- <&sdhc1_lpcg IMX_LPCG_CLK_5>;
++ <&sdhc1_lpcg IMX_LPCG_CLK_5>,
++ <&sdhc1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_1>;
+ fsl,tuning-start-tap = <20>;
+@@ -91,8 +91,8 @@ usdhc3: mmc@5b030000 {
+ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b030000 0x10000>;
+ clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
+- <&sdhc2_lpcg IMX_LPCG_CLK_0>,
+- <&sdhc2_lpcg IMX_LPCG_CLK_5>;
++ <&sdhc2_lpcg IMX_LPCG_CLK_5>,
++ <&sdhc2_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "ahb", "per";
+ power-domains = <&pd IMX_SC_R_SDHC_2>;
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+index adb98a72bdfd91..89857e14c46140 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+@@ -27,8 +27,8 @@ lpspi0: spi@5a000000 {
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&spi0_lpcg 0>,
+- <&spi0_lpcg 1>;
++ clocks = <&spi0_lpcg IMX_LPCG_CLK_0>,
++ <&spi0_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <60000000>;
+@@ -43,8 +43,8 @@ lpspi1: spi@5a010000 {
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&spi1_lpcg 0>,
+- <&spi1_lpcg 1>;
++ clocks = <&spi1_lpcg IMX_LPCG_CLK_0>,
++ <&spi1_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <60000000>;
+@@ -59,8 +59,8 @@ lpspi2: spi@5a020000 {
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&spi2_lpcg 0>,
+- <&spi2_lpcg 1>;
++ clocks = <&spi2_lpcg IMX_LPCG_CLK_0>,
++ <&spi2_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <60000000>;
+@@ -75,8 +75,8 @@ lpspi3: spi@5a030000 {
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&spi3_lpcg 0>,
+- <&spi3_lpcg 1>;
++ clocks = <&spi3_lpcg IMX_LPCG_CLK_0>,
++ <&spi3_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <60000000>;
+@@ -282,8 +282,8 @@ adc0: adc@5a880000 {
+ reg = <0x5a880000 0x10000>;
+ interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&adc0_lpcg 0>,
+- <&adc0_lpcg 1>;
++ clocks = <&adc0_lpcg IMX_LPCG_CLK_0>,
++ <&adc0_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_ADC_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+@@ -297,8 +297,8 @@ adc1: adc@5a890000 {
+ reg = <0x5a890000 0x10000>;
+ interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&adc1_lpcg 0>,
+- <&adc1_lpcg 1>;
++ clocks = <&adc1_lpcg IMX_LPCG_CLK_0>,
++ <&adc1_lpcg IMX_LPCG_CLK_4>;
+ clock-names = "per", "ipg";
+ assigned-clocks = <&clk IMX_SC_R_ADC_1 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+@@ -311,8 +311,8 @@ flexcan1: can@5a8d0000 {
+ reg = <0x5a8d0000 0x10000>;
+ interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+- clocks = <&can0_lpcg 1>,
+- <&can0_lpcg 0>;
++ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
++ <&can0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <40000000>;
+@@ -332,8 +332,8 @@ flexcan2: can@5a8e0000 {
+ * CAN1 shares CAN0's clock and to enable CAN0's clock it
+ * has to be powered on.
+ */
+- clocks = <&can0_lpcg 1>,
+- <&can0_lpcg 0>;
++ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
++ <&can0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <40000000>;
+@@ -353,8 +353,8 @@ flexcan3: can@5a8f0000 {
+ * CAN2 shares CAN0's clock and to enable CAN0's clock it
+ * has to be powered on.
+ */
+- clocks = <&can0_lpcg 1>,
+- <&can0_lpcg 0>;
++ clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
++ <&can0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per";
+ assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <40000000>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+index ea8c93757521b3..c66449798efce1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+@@ -32,11 +32,12 @@ lsio_pwm0: pwm@5d000000 {
+ compatible = "fsl,imx27-pwm";
+ reg = <0x5d000000 0x10000>;
+ clock-names = "ipg", "per";
+- clocks = <&pwm0_lpcg 4>,
+- <&pwm0_lpcg 1>;
++ clocks = <&pwm0_lpcg IMX_LPCG_CLK_6>,
++ <&pwm0_lpcg IMX_LPCG_CLK_1>;
+ assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+- #pwm-cells = <2>;
++ #pwm-cells = <3>;
++ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+@@ -44,11 +45,12 @@ lsio_pwm1: pwm@5d010000 {
+ compatible = "fsl,imx27-pwm";
+ reg = <0x5d010000 0x10000>;
+ clock-names = "ipg", "per";
+- clocks = <&pwm1_lpcg 4>,
+- <&pwm1_lpcg 1>;
++ clocks = <&pwm1_lpcg IMX_LPCG_CLK_6>,
++ <&pwm1_lpcg IMX_LPCG_CLK_1>;
+ assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+- #pwm-cells = <2>;
++ #pwm-cells = <3>;
++ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+@@ -56,11 +58,12 @@ lsio_pwm2: pwm@5d020000 {
+ compatible = "fsl,imx27-pwm";
+ reg = <0x5d020000 0x10000>;
+ clock-names = "ipg", "per";
+- clocks = <&pwm2_lpcg 4>,
+- <&pwm2_lpcg 1>;
++ clocks = <&pwm2_lpcg IMX_LPCG_CLK_6>,
++ <&pwm2_lpcg IMX_LPCG_CLK_1>;
+ assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+- #pwm-cells = <2>;
++ #pwm-cells = <3>;
++ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+@@ -68,11 +71,12 @@ lsio_pwm3: pwm@5d030000 {
+ compatible = "fsl,imx27-pwm";
+ reg = <0x5d030000 0x10000>;
+ clock-names = "ipg", "per";
+- clocks = <&pwm3_lpcg 4>,
+- <&pwm3_lpcg 1>;
++ clocks = <&pwm3_lpcg IMX_LPCG_CLK_6>,
++ <&pwm3_lpcg IMX_LPCG_CLK_1>;
+ assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+- #pwm-cells = <2>;
++ #pwm-cells = <3>;
++ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
+index 8b16bd68576c0b..d9fa0deea7002e 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
+@@ -294,8 +294,8 @@ MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2 0x19
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+- MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
++ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083
++ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083
+ >;
+ };
+
+@@ -313,19 +313,19 @@ MX8MM_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x19
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140
+- MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140
+- MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140
+- MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140
++ MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0
++ MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0
++ MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0
++ MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140
+- MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140
+- MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140
+- MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140
++ MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0
++ MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0
++ MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0
++ MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0
+ >;
+ };
+
+@@ -337,40 +337,40 @@ MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x19
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
++ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
+- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
++ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
++ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
++ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
+- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
++ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
++ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
++ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
+- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
++ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
++ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ >;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
+index dcec57c20399ed..aab8e24216501e 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
+@@ -279,8 +279,8 @@ MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2 0x19
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+- MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
++ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083
++ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083
+ >;
+ };
+
+@@ -292,19 +292,19 @@ MX8MM_IOMUXC_SPDIF_RX_PWM2_OUT 0x19
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140
+- MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140
+- MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140
+- MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140
++ MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0
++ MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0
++ MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0
++ MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140
+- MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140
+- MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140
+- MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140
++ MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0
++ MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0
++ MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0
++ MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0
+ >;
+ };
+
+@@ -316,40 +316,40 @@ MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x19
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
++ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
+- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
++ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
++ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
++ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
+- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
++ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
++ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
++ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
+- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
++ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
++ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ >;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
+index 6e75ab879bf59c..60abcb636cedf3 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
+@@ -210,7 +210,7 @@ rv3028: rtc@52 {
+ reg = <0x52>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc>;
+- interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_LOW>;
+ trickle-diode-disable;
+ };
+ };
+@@ -252,8 +252,8 @@ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x19
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+- MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
++ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083
++ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083
+ >;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
+index 1f8326613ee9e3..2076148e08627a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
+@@ -237,8 +237,8 @@ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x19
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+- MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+- MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
++ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083
++ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083
+ >;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+index 0ce60ad9c7d50f..26d4afdbca6f40 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+@@ -47,17 +47,6 @@ pps {
+ gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+-
+- reg_usb_otg1_vbus: regulator-usb-otg1 {
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_reg_usb1_en>;
+- compatible = "regulator-fixed";
+- regulator-name = "usb_otg1_vbus";
+- gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+- enable-active-high;
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- };
+ };
+
+ /* off-board header */
+@@ -145,9 +134,10 @@ &uart3 {
+ };
+
+ &usbotg1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usbotg1>;
+ dr_mode = "otg";
+ over-current-active-low;
+- vbus-supply = <&reg_usb_otg1_vbus>;
+ status = "okay";
+ };
+
+@@ -205,14 +195,6 @@ MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x41
+ >;
+ };
+
+- pinctrl_reg_usb1_en: regusb1grp {
+- fsl,pins = <
+- MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x41
+- MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141
+- MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
+- >;
+- };
+-
+ pinctrl_spi2: spi2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+@@ -235,4 +217,11 @@ MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x140
+ MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140
+ >;
+ };
++
++ pinctrl_usbotg1: usbotg1grp {
++ fsl,pins = <
++ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141
++ MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
++ >;
++ };
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index 6f0811587142d2..14d20a33af8e15 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -929,7 +929,7 @@ pinctrl_gpio8: gpio8grp {
+ /* Verdin GPIO_9_DSI (pulled-up as active-low) */
+ pinctrl_gpio_9_dsi: gpio9dsigrp {
+ fsl,pins =
+- <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */
++ <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */
+ };
+
+ /* Verdin GPIO_10_DSI (pulled-up as active-low) */
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 236fe44f779df3..54faf83cb436e5 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -399,6 +399,7 @@ micfil: audio-controller@30080000 {
+ "pll8k", "pll11k", "clkext3";
+ dmas = <&sdma2 24 25 0x80000000>;
+ dma-names = "rx";
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+@@ -1407,7 +1408,7 @@ gpu_3d: gpu@38000000 {
+ assigned-clocks = <&clk IMX8MM_CLK_GPU3D_CORE>,
+ <&clk IMX8MM_GPU_PLL_OUT>;
+ assigned-clock-parents = <&clk IMX8MM_GPU_PLL_OUT>;
+- assigned-clock-rates = <0>, <1000000000>;
++ assigned-clock-rates = <0>, <800000000>;
+ power-domains = <&pgc_gpu>;
+ };
+
+@@ -1422,7 +1423,7 @@ gpu_2d: gpu@38008000 {
+ assigned-clocks = <&clk IMX8MM_CLK_GPU2D_CORE>,
+ <&clk IMX8MM_GPU_PLL_OUT>;
+ assigned-clock-parents = <&clk IMX8MM_GPU_PLL_OUT>;
+- assigned-clock-rates = <0>, <1000000000>;
++ assigned-clock-rates = <0>, <800000000>;
+ power-domains = <&pgc_gpu>;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index aa38dd6dc9ba54..1bb1d0c1bae4de 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -371,6 +371,7 @@ micfil: audio-controller@30080000 {
+ "pll8k", "pll11k", "clkext3";
+ dmas = <&sdma2 24 25 0x80000000>;
+ dma-names = "rx";
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+index acd265d8b58ed9..e094f409028ddb 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+@@ -163,13 +163,12 @@ sound-wm8962 {
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai3>;
++ frame-master;
++ bitclock-master;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&wm8962>;
+- clocks = <&clk IMX8MP_CLK_IPP_DO_CLKO1>;
+- frame-master;
+- bitclock-master;
+ };
+ };
+ };
+@@ -381,10 +380,9 @@ &pcie_phy {
+ &sai3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+- assigned-clocks = <&clk IMX8MP_CLK_SAI3>,
+- <&clk IMX8MP_AUDIO_PLL2> ;
+- assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
+- assigned-clock-rates = <12288000>, <361267200>;
++ assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
++ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
++ assigned-clock-rates = <12288000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+index 13674dc64be9d3..cd44bf83745cae 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+@@ -121,7 +121,7 @@ &ecspi1 {
+ flash@0 { /* W25Q128JVEI */
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+- spi-max-frequency = <100000000>; /* Up to 133 MHz */
++ spi-max-frequency = <40000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ };
+@@ -484,7 +484,7 @@ &uart3 { /* A53 Debug */
+ &uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+- status = "okay";
++ status = "disabled";
+ };
+
+ &usb3_phy0 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+index 28db9349ed62c4..267ceffc02d840 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
+@@ -284,7 +284,6 @@ &usb_dwc3_1 {
+ usb_hub_2_x: hub@1 {
+ compatible = "usbbda,5411";
+ reg = <1>;
+- reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_usb_hub>;
+ peer-hub = <&usb_hub_3_x>;
+ };
+@@ -293,7 +292,6 @@ usb_hub_2_x: hub@1 {
+ usb_hub_3_x: hub@2 {
+ compatible = "usbbda,411";
+ reg = <2>;
+- reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_usb_hub>;
+ peer-hub = <&usb_hub_2_x>;
+ };
+@@ -443,7 +441,6 @@ MX8MP_IOMUXC_UART4_TXD__UART4_DCE_TX 0x49
+ pinctrl_usb1: usb1grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR 0x10
+- MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25 0x19
+ >;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+index cb1953d14aa907..eae39c1cb98568 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+@@ -251,8 +251,8 @@ tc_bridge: bridge@f {
+ <&clk IMX8MP_CLK_CLKOUT2>,
+ <&clk IMX8MP_AUDIO_PLL2_OUT>;
+ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
+- assigned-clock-rates = <13000000>, <13000000>, <156000000>;
+- reset-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
++ assigned-clock-rates = <13000000>, <13000000>, <208000000>;
++ reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+
+ ports {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+index cc9d468b43ab8d..92f8cc05fe9da1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+@@ -23,7 +23,7 @@ hdmi-connector {
+
+ port {
+ hdmi_connector_in: endpoint {
+- remote-endpoint = <&adv7533_out>;
++ remote-endpoint = <&adv7535_out>;
+ };
+ };
+ };
+@@ -107,6 +107,13 @@ reg_usdhc2_vmmc: regulator-usdhc2 {
+ enable-active-high;
+ };
+
++ reg_vext_3v3: regulator-vext-3v3 {
++ compatible = "regulator-fixed";
++ regulator-name = "VEXT_3V3";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ };
++
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "wm8960-audio";
+@@ -342,7 +349,7 @@ BUCK4 {
+ regulator-always-on;
+ };
+
+- BUCK5 {
++ reg_buck5: BUCK5 {
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <1950000>;
+@@ -393,14 +400,16 @@ &i2c2 {
+
+ hdmi@3d {
+ compatible = "adi,adv7535";
+- reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
+- reg-names = "main", "cec", "edid", "packet";
++ reg = <0x3d>;
++ interrupt-parent = <&gpio1>;
++ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+ adi,dsi-lanes = <4>;
+- adi,input-depth = <8>;
+- adi,input-colorspace = "rgb";
+- adi,input-clock = "1x";
+- adi,input-style = <1>;
+- adi,input-justification = "evenly";
++ avdd-supply = <&reg_buck5>;
++ dvdd-supply = <&reg_buck5>;
++ pvdd-supply = <&reg_buck5>;
++ a2vdd-supply = <&reg_buck5>;
++ v3p3-supply = <&reg_vext_3v3>;
++ v1p2-supply = <&reg_buck5>;
+
+ ports {
+ #address-cells = <1>;
+@@ -409,7 +418,7 @@ ports {
+ port@0 {
+ reg = <0>;
+
+- adv7533_in: endpoint {
++ adv7535_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+@@ -417,7 +426,7 @@ adv7533_in: endpoint {
+ port@1 {
+ reg = <1>;
+
+- adv7533_out: endpoint {
++ adv7535_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+@@ -502,7 +511,7 @@ port@1 {
+ reg = <1>;
+
+ dsi_out: endpoint {
+- remote-endpoint = <&adv7533_in>;
++ remote-endpoint = <&adv7535_in>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+index 4240e20d38ac32..258e90cc16ff3a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+@@ -168,6 +168,13 @@ reg_vcc_12v0: regulator-12v0 {
+ enable-active-high;
+ };
+
++ reg_vcc_1v8: regulator-1v8 {
++ compatible = "regulator-fixed";
++ regulator-name = "VCC_1V8";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ };
++
+ reg_vcc_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+@@ -464,7 +471,7 @@ tlv320aic3x04: audio-codec@18 {
+ clock-names = "mclk";
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1>;
+ reset-gpios = <&gpio4 29 GPIO_ACTIVE_LOW>;
+- iov-supply = <&reg_vcc_3v3>;
++ iov-supply = <&reg_vcc_1v8>;
+ ldoin-supply = <&reg_vcc_3v3>;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+index 68c62def4c06e1..d27bfba1b4b8c4 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+@@ -161,7 +161,7 @@ &uart3 {
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+- shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
++ shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index 83d907294fbc73..d1488ebfef3f02 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -785,6 +785,23 @@ pgc_usb2_phy: power-domain@3 {
+ reg = <IMX8MP_POWER_DOMAIN_USB2_PHY>;
+ };
+
++ pgc_mlmix: power-domain@4 {
++ #power-domain-cells = <0>;
++ reg = <IMX8MP_POWER_DOMAIN_MLMIX>;
++ clocks = <&clk IMX8MP_CLK_ML_AXI>,
++ <&clk IMX8MP_CLK_ML_AHB>,
++ <&clk IMX8MP_CLK_NPU_ROOT>;
++ assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>,
++ <&clk IMX8MP_CLK_ML_AXI>,
++ <&clk IMX8MP_CLK_ML_AHB>;
++ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
++ <&clk IMX8MP_SYS_PLL1_800M>,
++ <&clk IMX8MP_SYS_PLL1_800M>;
++ assigned-clock-rates = <800000000>,
++ <800000000>,
++ <300000000>;
++ };
++
+ pgc_audio: power-domain@5 {
+ #power-domain-cells = <0>;
+ reg = <IMX8MP_POWER_DOMAIN_AUDIOMIX>;
+@@ -817,6 +834,12 @@ pgc_gpumix: power-domain@7 {
+ assigned-clock-rates = <800000000>, <400000000>;
+ };
+
++ pgc_vpumix: power-domain@8 {
++ #power-domain-cells = <0>;
++ reg = <IMX8MP_POWER_DOMAIN_VPUMIX>;
++ clocks = <&clk IMX8MP_CLK_VPU_ROOT>;
++ };
++
+ pgc_gpu3d: power-domain@9 {
+ #power-domain-cells = <0>;
+ reg = <IMX8MP_POWER_DOMAIN_GPU3D>;
+@@ -832,60 +855,64 @@ pgc_mediamix: power-domain@10 {
+ <&clk IMX8MP_CLK_MEDIA_APB_ROOT>;
+ };
+
+- pgc_mipi_phy2: power-domain@16 {
++ pgc_vpu_g1: power-domain@11 {
+ #power-domain-cells = <0>;
+- reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
++ power-domains = <&pgc_vpumix>;
++ reg = <IMX8MP_POWER_DOMAIN_VPU_G1>;
++ clocks = <&clk IMX8MP_CLK_VPU_G1_ROOT>;
+ };
+
+- pgc_hsiomix: power-domain@17 {
++ pgc_vpu_g2: power-domain@12 {
+ #power-domain-cells = <0>;
+- reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>;
+- clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+- <&clk IMX8MP_CLK_HSIO_ROOT>;
+- assigned-clocks = <&clk IMX8MP_CLK_HSIO_AXI>;
+- assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_500M>;
+- assigned-clock-rates = <500000000>;
++ power-domains = <&pgc_vpumix>;
++ reg = <IMX8MP_POWER_DOMAIN_VPU_G2>;
++ clocks = <&clk IMX8MP_CLK_VPU_G2_ROOT>;
++
+ };
+
+- pgc_ispdwp: power-domain@18 {
++ pgc_vpu_vc8000e: power-domain@13 {
+ #power-domain-cells = <0>;
+- reg = <IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP>;
+- clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>;
++ power-domains = <&pgc_vpumix>;
++ reg = <IMX8MP_POWER_DOMAIN_VPU_VC8000E>;
++ clocks = <&clk IMX8MP_CLK_VPU_VC8KE_ROOT>;
+ };
+
+- pgc_vpumix: power-domain@19 {
++ pgc_hdmimix: power-domain@14 {
+ #power-domain-cells = <0>;
+- reg = <IMX8MP_POWER_DOMAIN_VPUMIX>;
+- clocks = <&clk IMX8MP_CLK_VPU_ROOT>;
++ reg = <IMX8MP_POWER_DOMAIN_HDMIMIX>;
++ clocks = <&clk IMX8MP_CLK_HDMI_ROOT>,
++ <&clk IMX8MP_CLK_HDMI_APB>;
++ assigned-clocks = <&clk IMX8MP_CLK_HDMI_AXI>,
++ <&clk IMX8MP_CLK_HDMI_APB>;
++ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_500M>,
++ <&clk IMX8MP_SYS_PLL1_133M>;
++ assigned-clock-rates = <500000000>, <133000000>;
+ };
+
+- pgc_vpu_g1: power-domain@20 {
++ pgc_hdmi_phy: power-domain@15 {
+ #power-domain-cells = <0>;
+- power-domains = <&pgc_vpumix>;
+- reg = <IMX8MP_POWER_DOMAIN_VPU_G1>;
+- clocks = <&clk IMX8MP_CLK_VPU_G1_ROOT>;
++ reg = <IMX8MP_POWER_DOMAIN_HDMI_PHY>;
+ };
+
+- pgc_vpu_g2: power-domain@21 {
++ pgc_mipi_phy2: power-domain@16 {
+ #power-domain-cells = <0>;
+- power-domains = <&pgc_vpumix>;
+- reg = <IMX8MP_POWER_DOMAIN_VPU_G2>;
+- clocks = <&clk IMX8MP_CLK_VPU_G2_ROOT>;
++ reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
+ };
+
+- pgc_vpu_vc8000e: power-domain@22 {
++ pgc_hsiomix: power-domain@17 {
+ #power-domain-cells = <0>;
+- power-domains = <&pgc_vpumix>;
+- reg = <IMX8MP_POWER_DOMAIN_VPU_VC8000E>;
+- clocks = <&clk IMX8MP_CLK_VPU_VC8KE_ROOT>;
++ reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>;
++ clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
++ <&clk IMX8MP_CLK_HSIO_ROOT>;
++ assigned-clocks = <&clk IMX8MP_CLK_HSIO_AXI>;
++ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_500M>;
++ assigned-clock-rates = <500000000>;
+ };
+
+- pgc_mlmix: power-domain@24 {
++ pgc_ispdwp: power-domain@18 {
+ #power-domain-cells = <0>;
+- reg = <IMX8MP_POWER_DOMAIN_MLMIX>;
+- clocks = <&clk IMX8MP_CLK_ML_AXI>,
+- <&clk IMX8MP_CLK_ML_AHB>,
+- <&clk IMX8MP_CLK_NPU_ROOT>;
++ reg = <IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP>;
++ clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>;
+ };
+ };
+ };
+@@ -1831,6 +1858,27 @@ hsio_blk_ctrl: blk-ctrl@32f10000 {
+ #power-domain-cells = <1>;
+ #clock-cells = <0>;
+ };
++
++ hdmi_blk_ctrl: blk-ctrl@32fc0000 {
++ compatible = "fsl,imx8mp-hdmi-blk-ctrl", "syscon";
++ reg = <0x32fc0000 0x1000>;
++ clocks = <&clk IMX8MP_CLK_HDMI_APB>,
++ <&clk IMX8MP_CLK_HDMI_ROOT>,
++ <&clk IMX8MP_CLK_HDMI_REF_266M>,
++ <&clk IMX8MP_CLK_HDMI_24M>,
++ <&clk IMX8MP_CLK_HDMI_FDCC_TST>;
++ clock-names = "apb", "axi", "ref_266m", "ref_24m", "fdcc";
++ power-domains = <&pgc_hdmimix>, <&pgc_hdmimix>,
++ <&pgc_hdmimix>, <&pgc_hdmimix>,
++ <&pgc_hdmimix>, <&pgc_hdmimix>,
++ <&pgc_hdmimix>, <&pgc_hdmi_phy>,
++ <&pgc_hdmimix>, <&pgc_hdmimix>;
++ power-domain-names = "bus", "irqsteer", "lcdif",
++ "pai", "pvi", "trng",
++ "hdmi-tx", "hdmi-tx-phy",
++ "hdcp", "hrv";
++ #power-domain-cells = <1>;
++ };
+ };
+
+ pcie: pcie@33800000 {
+@@ -1970,6 +2018,18 @@ vpumix_blk_ctrl: blk-ctrl@38330000 {
+ interconnect-names = "g1", "g2", "vc8000e";
+ };
+
++ npu: npu@38500000 {
++ compatible = "vivante,gc";
++ reg = <0x38500000 0x200000>;
++ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clk IMX8MP_CLK_NPU_ROOT>,
++ <&clk IMX8MP_CLK_NPU_ROOT>,
++ <&clk IMX8MP_CLK_ML_AXI>,
++ <&clk IMX8MP_CLK_ML_AHB>;
++ clock-names = "core", "shader", "bus", "reg";
++ power-domains = <&pgc_mlmix>;
++ };
++
+ gic: interrupt-controller@38800000 {
+ compatible = "arm,gic-v3";
+ reg = <0x38800000 0x10000>,
+@@ -2030,6 +2090,7 @@ usb_dwc3_0: usb@38100000 {
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,gfladj-refclk-lpm-sel-quirk;
++ snps,parkmode-disable-ss-quirk;
+ };
+
+ };
+@@ -2072,6 +2133,7 @@ usb_dwc3_1: usb@38200000 {
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,gfladj-refclk-lpm-sel-quirk;
++ snps,parkmode-disable-ss-quirk;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 35f07dfb4ca8df..052ba9baa400f8 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -1649,6 +1649,7 @@ usb_dwc3_0: usb@38100000 {
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg1>;
++ snps,parkmode-disable-ss-quirk;
+ status = "disabled";
+ };
+
+@@ -1680,6 +1681,7 @@ usb_dwc3_1: usb@38200000 {
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg2>;
++ snps,parkmode-disable-ss-quirk;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+index 0b34cc2250e14c..a9ab87699f3d56 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
++++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+@@ -36,7 +36,7 @@ reg_usdhc2_vmmc: usdhc2-vmmc {
+ regulator-name = "SD1_SPWR";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+- gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
++ gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
+index e9b198c13b2fd8..d896135f31fcd8 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
+@@ -49,15 +49,15 @@ &flexcan1 {
+ };
+
+ &flexcan2 {
+- clocks = <&can1_lpcg 1>,
+- <&can1_lpcg 0>;
++ clocks = <&can1_lpcg IMX_LPCG_CLK_4>,
++ <&can1_lpcg IMX_LPCG_CLK_0>;
+ assigned-clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>;
+ fsl,clk-source = /bits/ 8 <1>;
+ };
+
+ &flexcan3 {
+- clocks = <&can2_lpcg 1>,
+- <&can2_lpcg 0>;
++ clocks = <&can2_lpcg IMX_LPCG_CLK_4>,
++ <&can2_lpcg IMX_LPCG_CLK_0>;
+ assigned-clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>;
+ fsl,clk-source = /bits/ 8 <1>;
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+index 7764b4146e0ab4..2bbdacb1313f9d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
+@@ -8,5 +8,5 @@ &jpegdec {
+ };
+
+ &jpegenc {
+- compatible = "nxp,imx8qm-jpgdec", "nxp,imx8qxp-jpgenc";
++ compatible = "nxp,imx8qm-jpgenc", "nxp,imx8qxp-jpgenc";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+index cafd39130eb887..a06ca740f540c7 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+@@ -168,7 +168,6 @@ &usdhc2 {
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+- no-sdio;
+ no-mmc;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+index f06139bdff97e3..aaf9685ef0fbbb 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
++++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+@@ -437,7 +437,7 @@ &usdhc2 {
+ pinctrl-0 = <&pinctrl_usdhc2_hs>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>;
+- cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>;
++ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ bus-width = <4>;
+ no-sdio;
+@@ -577,7 +577,7 @@ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX93_PAD_UART2_TXD__LPUART2_TX 0x31e
+ MX93_PAD_UART2_RXD__LPUART2_RX 0x31e
+- MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x31e
++ MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x51e
+ >;
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
+index f6e422dc2663e9..b6f3c076fe54a1 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
+@@ -19,7 +19,7 @@ reserved-memory {
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+- alloc-ranges = <0 0x60000000 0 0x40000000>;
++ alloc-ranges = <0 0x80000000 0 0x40000000>;
+ size = <0 0x10000000>;
+ linux,cma-default;
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index dcf6e4846ac9de..35155b009dd249 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -373,7 +373,7 @@ mediamix: power-domain@44462400 {
+ compatible = "fsl,imx93-src-slice";
+ reg = <0x44462400 0x400>, <0x44465800 0x400>;
+ #power-domain-cells = <0>;
+- clocks = <&clk IMX93_CLK_MEDIA_AXI>,
++ clocks = <&clk IMX93_CLK_NIC_MEDIA_GATE>,
+ <&clk IMX93_CLK_MEDIA_APB>;
+ };
+ };
+@@ -786,6 +786,8 @@ fec: ethernet@42890000 {
+ fsl,num-tx-queues = <3>;
+ fsl,num-rx-queues = <3>;
+ fsl,stop-mode = <&wakeupmix_gpr 0x0c 1>;
++ nvmem-cells = <&eth_mac1>;
++ nvmem-cell-names = "mac-address";
+ status = "disabled";
+ };
+
+@@ -807,7 +809,9 @@ eqos: ethernet@428a0000 {
+ <&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>;
+ assigned-clock-rates = <100000000>, <250000000>;
+ intf_mode = <&wakeupmix_gpr 0x28>;
+- snps,clk-csr = <0>;
++ snps,clk-csr = <6>;
++ nvmem-cells = <&eth_mac2>;
++ nvmem-cell-names = "mac-address";
+ status = "disabled";
+ };
+
+@@ -888,6 +892,15 @@ ocotp: efuse@47510000 {
+ reg = <0x47510000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
++
++ eth_mac1: mac-address@4ec {
++ reg = <0x4ec 0x6>;
++ };
++
++ eth_mac2: mac-address@4f2 {
++ reg = <0x4f2 0x6>;
++ };
++
+ };
+
+ s4muap: mailbox@47520000 {
+diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+index ed1b5a7a606786..d01023401d7e3f 100644
+--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
++++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+@@ -58,7 +58,7 @@ cpu@3 {
+ gic: interrupt-controller@f1001000 {
+ compatible = "arm,gic-400";
+ reg = <0x0 0xf1001000 0x0 0x1000>, /* GICD */
+- <0x0 0xf1002000 0x0 0x100>; /* GICC */
++ <0x0 0xf1002000 0x0 0x2000>; /* GICC */
+ #address-cells = <0>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+diff --git a/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi b/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
+index 970047f2dabd51..c06e011a6c3ffc 100644
+--- a/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
++++ b/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi
+@@ -25,9 +25,6 @@ pmic: pmic@0 {
+ gpios = <&gpio28 0 0>;
+
+ regulators {
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+ ldo3: ldo3 { /* HDMI */
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1500000>;
+diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
+index 48ec4ebec0a83e..b864ffa74ea8b6 100644
+--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
++++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
+@@ -126,7 +126,6 @@ eth0: ethernet@c1b00000 {
+ amba {
+ #address-cells = <2>;
+ #size-cells = <1>;
+- #interrupt-cells = <3>;
+
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
+index 3869460aa5dcb5..996fb39bb50c1f 100644
+--- a/arch/arm64/boot/dts/lg/lg1313.dtsi
++++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
+@@ -126,7 +126,6 @@ eth0: ethernet@c3700000 {
+ amba {
+ #address-cells = <2>;
+ #size-cells = <1>;
+- #interrupt-cells = <3>;
+
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index 9eab2bb221348a..805ef2d79b4012 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -130,7 +130,7 @@ rtc@6f {
+ compatible = "microchip,mcp7940x";
+ reg = <0x6f>;
+ interrupt-parent = <&gpiosb>;
+- interrupts = <5 0>; /* GPIO2_5 */
++ interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index e300145ad1a6f5..1cc3fa1c354de8 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -431,14 +431,14 @@ xor11 {
+ crypto: crypto@90000 {
+ compatible = "inside-secure,safexcel-eip97ies";
+ reg = <0x90000 0x20000>;
+- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
++ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+- interrupt-names = "mem", "ring0", "ring1",
+- "ring2", "ring3", "eip";
++ <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "ring0", "ring1", "ring2",
++ "ring3", "eip", "mem";
+ clocks = <&nb_periph_clk 15>;
+ };
+
+diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+index 2c920e22cec2b5..7ec7c789d87eff 100644
+--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+@@ -138,7 +138,6 @@ pmu {
+
+ odmi: odmi@300000 {
+ compatible = "marvell,odmi-controller";
+- interrupt-controller;
+ msi-controller;
+ marvell,odmi-frames = <4>;
+ reg = <0x300000 0x4000>,
+diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+index 4ec1aae0a3a9c3..7e595ac80043aa 100644
+--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+@@ -511,14 +511,14 @@ CP11X_LABEL(sdhci0): mmc@780000 {
+ CP11X_LABEL(crypto): crypto@800000 {
+ compatible = "inside-secure,safexcel-eip197b";
+ reg = <0x800000 0x200000>;
+- interrupts = <87 IRQ_TYPE_LEVEL_HIGH>,
+- <88 IRQ_TYPE_LEVEL_HIGH>,
++ interrupts = <88 IRQ_TYPE_LEVEL_HIGH>,
+ <89 IRQ_TYPE_LEVEL_HIGH>,
+ <90 IRQ_TYPE_LEVEL_HIGH>,
+ <91 IRQ_TYPE_LEVEL_HIGH>,
+- <92 IRQ_TYPE_LEVEL_HIGH>;
+- interrupt-names = "mem", "ring0", "ring1",
+- "ring2", "ring3", "eip";
++ <92 IRQ_TYPE_LEVEL_HIGH>,
++ <87 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "ring0", "ring1", "ring2", "ring3",
++ "eip", "mem";
+ clock-names = "core", "reg";
+ clocks = <&CP11X_LABEL(clk) 1 26>,
+ <&CP11X_LABEL(clk) 1 17>;
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+index 32cfb3e2efc3a4..47d45ff3d6f578 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
+@@ -120,7 +120,7 @@ cp0_sdhci_pins: cp0-sdhi-pins-0 {
+ "mpp59", "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+- cp0_spi0_pins: cp0-spi-pins-0 {
++ cp0_spi1_pins: cp0-spi-pins-1 {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+@@ -170,7 +170,7 @@ &cp0_sdhci0 {
+
+ &cp0_spi1 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&cp0_spi0_pins>;
++ pinctrl-0 = <&cp0_spi1_pins>;
+ reg = <0x700680 0x50>, /* control */
+ <0x2000000 0x1000000>; /* CS0 */
+ status = "okay";
+diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+index c7de1ea0d470a9..6eb6a175de38d5 100644
+--- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
++++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
+@@ -307,7 +307,7 @@ &cp0_sdhci0 {
+ &cp0_spi1 {
+ status = "disabled";
+ pinctrl-names = "default";
+- pinctrl-0 = <&cp0_spi0_pins>;
++ pinctrl-0 = <&cp0_spi1_pins>;
+ reg = <0x700680 0x50>;
+
+ flash@0 {
+@@ -371,7 +371,7 @@ cp0_sdhci_pins: cp0-sdhi-pins-0 {
+ "mpp59", "mpp60", "mpp61";
+ marvell,function = "sdio";
+ };
+- cp0_spi0_pins: cp0-spi-pins-0 {
++ cp0_spi1_pins: cp0-spi-pins-1 {
+ marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
+ marvell,function = "spi1";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+index fffdb7bbf889e4..2d0ef6f23b3a93 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+@@ -129,7 +129,7 @@ ethernet_phy0: ethernet-phy@5 {
+ };
+
+ &pio {
+- eth_default: eth_default {
++ eth_default: eth-default-pins {
+ tx_pins {
+ pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
+ <MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
+@@ -156,7 +156,7 @@ mdio_pins {
+ };
+ };
+
+- eth_sleep: eth_sleep {
++ eth_sleep: eth-sleep-pins {
+ tx_pins {
+ pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
+ <MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
+@@ -182,14 +182,14 @@ mdio_pins {
+ };
+ };
+
+- usb0_id_pins_float: usb0_iddig {
++ usb0_id_pins_float: usb0-iddig-pins {
+ pins_iddig {
+ pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
+ bias-pull-up;
+ };
+ };
+
+- usb1_id_pins_float: usb1_iddig {
++ usb1_id_pins_float: usb1-iddig-pins {
+ pins_iddig {
+ pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
+ bias-pull-up;
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+index ed1a9d31941530..f767f921bdee16 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+@@ -249,10 +249,11 @@ topckgen: syscon@10000000 {
+ #clock-cells = <1>;
+ };
+
+- infracfg: syscon@10001000 {
++ infracfg: clock-controller@10001000 {
+ compatible = "mediatek,mt2712-infracfg", "syscon";
+ reg = <0 0x10001000 0 0x1000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+ };
+
+ pericfg: syscon@10003000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+index 86cedb0bf1a900..15838c1ee8cc33 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -73,8 +73,9 @@ led-1 {
+ };
+ };
+
+- memory {
++ memory@40000000 {
+ reg = <0 0x40000000 0 0x40000000>;
++ device_type = "memory";
+ };
+
+ reg_1p8v: regulator-1p8v {
+@@ -317,8 +318,8 @@ asm_sel {
+ /* eMMC is shared pin with parallel NAND */
+ emmc_pins_default: emmc-pins-default {
+ mux {
+- function = "emmc", "emmc_rst";
+- groups = "emmc";
++ function = "emmc";
++ groups = "emmc", "emmc_rst";
+ };
+
+ /* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7",
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+index dad8e683aac5bc..0a14ef1da60de8 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -55,8 +55,9 @@ key-wps {
+ };
+ };
+
+- memory {
++ memory@40000000 {
+ reg = <0 0x40000000 0 0x20000000>;
++ device_type = "memory";
+ };
+
+ reg_1p8v: regulator-1p8v {
+@@ -243,8 +244,8 @@ &pio {
+ /* eMMC is shared pin with parallel NAND */
+ emmc_pins_default: emmc-pins-default {
+ mux {
+- function = "emmc", "emmc_rst";
+- groups = "emmc";
++ function = "emmc";
++ groups = "emmc", "emmc_rst";
+ };
+
+ /* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7",
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 3ee9266fa8e985..917fa39a74f8d7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -252,7 +252,7 @@ scpsys: power-controller@10006000 {
+ clock-names = "hif_sel";
+ };
+
+- cir: cir@10009000 {
++ cir: ir-receiver@10009000 {
+ compatible = "mediatek,mt7622-cir";
+ reg = <0 0x10009000 0 0x1000>;
+ interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
+@@ -283,16 +283,14 @@ thermal_calibration: calib@198 {
+ };
+ };
+
+- apmixedsys: apmixedsys@10209000 {
+- compatible = "mediatek,mt7622-apmixedsys",
+- "syscon";
++ apmixedsys: clock-controller@10209000 {
++ compatible = "mediatek,mt7622-apmixedsys";
+ reg = <0 0x10209000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+- topckgen: topckgen@10210000 {
+- compatible = "mediatek,mt7622-topckgen",
+- "syscon";
++ topckgen: clock-controller@10210000 {
++ compatible = "mediatek,mt7622-topckgen";
+ reg = <0 0x10210000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+@@ -515,7 +513,6 @@ thermal: thermal@1100b000 {
+ <&pericfg CLK_PERI_AUXADC_PD>;
+ clock-names = "therm", "auxadc";
+ resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
+- reset-names = "therm";
+ mediatek,auxadc = <&auxadc>;
+ mediatek,apmixedsys = <&apmixedsys>;
+ nvmem-cells = <&thermal_calibration>;
+@@ -734,9 +731,8 @@ wmac: wmac@18000000 {
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+ };
+
+- ssusbsys: ssusbsys@1a000000 {
+- compatible = "mediatek,mt7622-ssusbsys",
+- "syscon";
++ ssusbsys: clock-controller@1a000000 {
++ compatible = "mediatek,mt7622-ssusbsys";
+ reg = <0 0x1a000000 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+@@ -793,9 +789,8 @@ u2port1: usb-phy@1a0c5000 {
+ };
+ };
+
+- pciesys: pciesys@1a100800 {
+- compatible = "mediatek,mt7622-pciesys",
+- "syscon";
++ pciesys: clock-controller@1a100800 {
++ compatible = "mediatek,mt7622-pciesys";
+ reg = <0 0x1a100800 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+@@ -921,12 +916,13 @@ sata_port: sata-phy@1a243000 {
+ };
+ };
+
+- hifsys: syscon@1af00000 {
+- compatible = "mediatek,mt7622-hifsys", "syscon";
++ hifsys: clock-controller@1af00000 {
++ compatible = "mediatek,mt7622-hifsys";
+ reg = <0 0x1af00000 0 0x70>;
++ #clock-cells = <1>;
+ };
+
+- ethsys: syscon@1b000000 {
++ ethsys: clock-controller@1b000000 {
+ compatible = "mediatek,mt7622-ethsys",
+ "syscon";
+ reg = <0 0x1b000000 0 0x1000>;
+@@ -966,9 +962,7 @@ wed1: wed@1020b000 {
+ };
+
+ eth: ethernet@1b100000 {
+- compatible = "mediatek,mt7622-eth",
+- "mediatek,mt2701-eth",
+- "syscon";
++ compatible = "mediatek,mt7622-eth";
+ reg = <0 0x1b100000 0 0x20000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
+index af4a4309bda4b9..aba6686eb34a3a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
+@@ -43,7 +43,7 @@ fan: pwm-fan {
+ #cooling-cells = <2>;
+ /* cooling level (0, 1, 2) - pwm inverted */
+ cooling-levels = <255 96 0>;
+- pwms = <&pwm 0 10000 0>;
++ pwms = <&pwm 0 10000>;
+ status = "okay";
+ };
+
+@@ -126,6 +126,7 @@ sfp1: sfp-1 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ los-gpios = <&pio 46 GPIO_ACTIVE_HIGH>;
++ maximum-power-milliwatt = <3000>;
+ mod-def0-gpios = <&pio 49 GPIO_ACTIVE_LOW>;
+ tx-disable-gpios = <&pio 20 GPIO_ACTIVE_HIGH>;
+ tx-fault-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
+@@ -137,6 +138,7 @@ sfp2: sfp-2 {
+ i2c-bus = <&i2c_sfp2>;
+ los-gpios = <&pio 31 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&pio 47 GPIO_ACTIVE_LOW>;
++ maximum-power-milliwatt = <3000>;
+ tx-disable-gpios = <&pio 15 GPIO_ACTIVE_HIGH>;
+ tx-fault-gpios = <&pio 48 GPIO_ACTIVE_HIGH>;
+ };
+@@ -144,22 +146,22 @@ sfp2: sfp-2 {
+
+ &cpu_thermal {
+ cooling-maps {
+- cpu-active-high {
++ map-cpu-active-high {
+ /* active: set fan to cooling level 2 */
+ cooling-device = <&fan 2 2>;
+ trip = <&cpu_trip_active_high>;
+ };
+
+- cpu-active-low {
++ map-cpu-active-med {
+ /* active: set fan to cooling level 1 */
+ cooling-device = <&fan 1 1>;
+- trip = <&cpu_trip_active_low>;
++ trip = <&cpu_trip_active_med>;
+ };
+
+- cpu-passive {
+- /* passive: set fan to cooling level 0 */
++ map-cpu-active-low {
++ /* active: set fan to cooling level 0 */
+ cooling-device = <&fan 0 0>;
+- trip = <&cpu_trip_passive>;
++ trip = <&cpu_trip_active_low>;
+ };
+ };
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts b/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
+index 3ef371ca254e81..2f884c24f1eb46 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
+@@ -237,12 +237,13 @@ &spi0 {
+ pinctrl-0 = <&spi_flash_pins>;
+ cs-gpios = <0>, <0>;
+ status = "okay";
+- spi_nand: spi_nand@0 {
++
++ spi_nand: flash@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+- spi-tx-buswidth = <4>;
+- spi-rx-buswidth = <4>;
++ spi-tx-bus-width = <4>;
++ spi-rx-bus-width = <4>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+index 24eda00e320d3a..559990dcd1d179 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+@@ -16,49 +16,49 @@ / {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+- clk40m: oscillator-40m {
+- compatible = "fixed-clock";
+- clock-frequency = <40000000>;
+- #clock-cells = <0>;
+- clock-output-names = "clkxtal";
+- };
+-
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu0: cpu@0 {
+- device_type = "cpu";
+ compatible = "arm,cortex-a53";
+- enable-method = "psci";
+ reg = <0x0>;
++ device_type = "cpu";
++ enable-method = "psci";
+ #cooling-cells = <2>;
+ };
+
+ cpu1: cpu@1 {
+- device_type = "cpu";
+ compatible = "arm,cortex-a53";
+- enable-method = "psci";
+ reg = <0x1>;
++ device_type = "cpu";
++ enable-method = "psci";
+ #cooling-cells = <2>;
+ };
+
+ cpu2: cpu@2 {
+- device_type = "cpu";
+ compatible = "arm,cortex-a53";
+- enable-method = "psci";
+ reg = <0x2>;
++ device_type = "cpu";
++ enable-method = "psci";
+ #cooling-cells = <2>;
+ };
+
+ cpu3: cpu@3 {
+- device_type = "cpu";
+- enable-method = "psci";
+ compatible = "arm,cortex-a53";
+ reg = <0x3>;
++ device_type = "cpu";
++ enable-method = "psci";
+ #cooling-cells = <2>;
+ };
+ };
+
++ clk40m: oscillator-40m {
++ compatible = "fixed-clock";
++ clock-frequency = <40000000>;
++ #clock-cells = <0>;
++ clock-output-names = "clkxtal";
++ };
++
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+@@ -121,38 +121,30 @@ wo_boot: wo-boot@15194000 {
+
+ };
+
+- timer {
+- compatible = "arm,armv8-timer";
+- interrupt-parent = <&gic>;
+- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+- <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+- <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+- <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+- };
+-
+ soc {
+- #address-cells = <2>;
+- #size-cells = <2>;
+ compatible = "simple-bus";
+ ranges;
++ #address-cells = <2>;
++ #size-cells = <2>;
+
+ gic: interrupt-controller@c000000 {
+ compatible = "arm,gic-v3";
+- #interrupt-cells = <3>;
+- interrupt-parent = <&gic>;
+- interrupt-controller;
+ reg = <0 0x0c000000 0 0x10000>, /* GICD */
+ <0 0x0c080000 0 0x80000>, /* GICR */
+ <0 0x0c400000 0 0x2000>, /* GICC */
+ <0 0x0c410000 0 0x1000>, /* GICH */
+ <0 0x0c420000 0 0x2000>; /* GICV */
++ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-controller;
++ #interrupt-cells = <3>;
+ };
+
+ infracfg: infracfg@10001000 {
+ compatible = "mediatek,mt7986-infracfg", "syscon";
+ reg = <0 0x10001000 0 0x1000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+ };
+
+ wed_pcie: wed-pcie@10003000 {
+@@ -202,6 +194,19 @@ pio: pinctrl@1001f000 {
+ #interrupt-cells = <2>;
+ };
+
++ pwm: pwm@10048000 {
++ compatible = "mediatek,mt7986-pwm";
++ reg = <0 0x10048000 0 0x1000>;
++ #pwm-cells = <2>;
++ interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&topckgen CLK_TOP_PWM_SEL>,
++ <&infracfg CLK_INFRA_PWM_STA>,
++ <&infracfg CLK_INFRA_PWM1_CK>,
++ <&infracfg CLK_INFRA_PWM2_CK>;
++ clock-names = "top", "main", "pwm1", "pwm2";
++ status = "disabled";
++ };
++
+ sgmiisys0: syscon@10060000 {
+ compatible = "mediatek,mt7986-sgmiisys_0",
+ "syscon";
+@@ -234,26 +239,11 @@ crypto: crypto@10320000 {
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ring0", "ring1", "ring2", "ring3";
+ clocks = <&infracfg CLK_INFRA_EIP97_CK>;
+- clock-names = "infra_eip97_ck";
+ assigned-clocks = <&topckgen CLK_TOP_EIP_B_SEL>;
+ assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>;
+ status = "disabled";
+ };
+
+- pwm: pwm@10048000 {
+- compatible = "mediatek,mt7986-pwm";
+- reg = <0 0x10048000 0 0x1000>;
+- #clock-cells = <1>;
+- #pwm-cells = <2>;
+- interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&topckgen CLK_TOP_PWM_SEL>,
+- <&infracfg CLK_INFRA_PWM_STA>,
+- <&infracfg CLK_INFRA_PWM1_CK>,
+- <&infracfg CLK_INFRA_PWM2_CK>;
+- clock-names = "top", "main", "pwm1", "pwm2";
+- status = "disabled";
+- };
+-
+ uart0: serial@11002000 {
+ compatible = "mediatek,mt7986-uart",
+ "mediatek,mt6577-uart";
+@@ -311,9 +301,9 @@ i2c0: i2c@11008000 {
+
+ spi0: spi@1100a000 {
+ compatible = "mediatek,mt7986-spi-ipm", "mediatek,spi-ipm";
++ reg = <0 0x1100a000 0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- reg = <0 0x1100a000 0 0x100>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_MPLL_D2>,
+ <&topckgen CLK_TOP_SPI_SEL>,
+@@ -325,9 +315,9 @@ spi0: spi@1100a000 {
+
+ spi1: spi@1100b000 {
+ compatible = "mediatek,mt7986-spi-ipm", "mediatek,spi-ipm";
++ reg = <0 0x1100b000 0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- reg = <0 0x1100b000 0 0x100>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topckgen CLK_TOP_MPLL_D2>,
+ <&topckgen CLK_TOP_SPIM_MST_SEL>,
+@@ -337,6 +327,20 @@ spi1: spi@1100b000 {
+ status = "disabled";
+ };
+
++ thermal: thermal@1100c800 {
++ compatible = "mediatek,mt7986-thermal";
++ reg = <0 0x1100c800 0 0x800>;
++ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&infracfg CLK_INFRA_THERM_CK>,
++ <&infracfg CLK_INFRA_ADC_26M_CK>;
++ clock-names = "therm", "auxadc";
++ nvmem-cells = <&thermal_calibration>;
++ nvmem-cell-names = "calibration-data";
++ #thermal-sensor-cells = <1>;
++ mediatek,auxadc = <&auxadc>;
++ mediatek,apmixedsys = <&apmixedsys>;
++ };
++
+ auxadc: adc@1100d000 {
+ compatible = "mediatek,mt7986-auxadc";
+ reg = <0 0x1100d000 0 0x1000>;
+@@ -374,6 +378,10 @@ mmc0: mmc@11230000 {
+ reg = <0 0x11230000 0 0x1000>,
+ <0 0x11c20000 0 0x1000>;
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
++ assigned-clocks = <&topckgen CLK_TOP_EMMC_416M_SEL>,
++ <&topckgen CLK_TOP_EMMC_250M_SEL>;
++ assigned-clock-parents = <&apmixedsys CLK_APMIXED_MPLL>,
++ <&topckgen CLK_TOP_NET1PLL_D5_D2>;
+ clocks = <&topckgen CLK_TOP_EMMC_416M_SEL>,
+ <&infracfg CLK_INFRA_MSDC_HCK_CK>,
+ <&infracfg CLK_INFRA_MSDC_CK>,
+@@ -384,39 +392,23 @@ mmc0: mmc@11230000 {
+ status = "disabled";
+ };
+
+- thermal: thermal@1100c800 {
+- #thermal-sensor-cells = <1>;
+- compatible = "mediatek,mt7986-thermal";
+- reg = <0 0x1100c800 0 0x800>;
+- interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&infracfg CLK_INFRA_THERM_CK>,
+- <&infracfg CLK_INFRA_ADC_26M_CK>,
+- <&infracfg CLK_INFRA_ADC_FRC_CK>;
+- clock-names = "therm", "auxadc", "adc_32k";
+- mediatek,auxadc = <&auxadc>;
+- mediatek,apmixedsys = <&apmixedsys>;
+- nvmem-cells = <&thermal_calibration>;
+- nvmem-cell-names = "calibration-data";
+- };
+-
+ pcie: pcie@11280000 {
+ compatible = "mediatek,mt7986-pcie",
+ "mediatek,mt8192-pcie";
++ reg = <0x00 0x11280000 0x00 0x4000>;
++ reg-names = "pcie-mac";
++ ranges = <0x82000000 0x00 0x20000000 0x00
++ 0x20000000 0x00 0x10000000>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+- reg = <0x00 0x11280000 0x00 0x4000>;
+- reg-names = "pcie-mac";
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+ bus-range = <0x00 0xff>;
+- ranges = <0x82000000 0x00 0x20000000 0x00
+- 0x20000000 0x00 0x10000000>;
+ clocks = <&infracfg CLK_INFRA_IPCIE_PIPE_CK>,
+ <&infracfg CLK_INFRA_IPCIE_CK>,
+ <&infracfg CLK_INFRA_IPCIER_CK>,
+ <&infracfg CLK_INFRA_IPCIEB_CK>;
+ clock-names = "pl_250m", "tl_26m", "peri_26m", "top_133m";
+- status = "disabled";
+
+ phys = <&pcie_port PHY_TYPE_PCIE>;
+ phy-names = "pcie-phy";
+@@ -427,6 +419,8 @@ pcie: pcie@11280000 {
+ <0 0 0 2 &pcie_intc 1>,
+ <0 0 0 3 &pcie_intc 2>,
+ <0 0 0 4 &pcie_intc 3>;
++ status = "disabled";
++
+ pcie_intc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+@@ -437,9 +431,9 @@ pcie_intc: interrupt-controller {
+ pcie_phy: t-phy {
+ compatible = "mediatek,mt7986-tphy",
+ "mediatek,generic-tphy-v2";
++ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+- ranges;
+ status = "disabled";
+
+ pcie_port: pcie-phy@11c00000 {
+@@ -464,9 +458,9 @@ thermal_calibration: calib@274 {
+ usb_phy: t-phy@11e10000 {
+ compatible = "mediatek,mt7986-tphy",
+ "mediatek,generic-tphy-v2";
++ ranges = <0 0 0x11e10000 0x1700>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+- ranges = <0 0 0x11e10000 0x1700>;
+ status = "disabled";
+
+ u2port0: usb-phy@0 {
+@@ -494,8 +488,6 @@ u2port1: usb-phy@1000 {
+ };
+
+ ethsys: syscon@15000000 {
+- #address-cells = <1>;
+- #size-cells = <1>;
+ compatible = "mediatek,mt7986-ethsys",
+ "syscon";
+ reg = <0 0x15000000 0 0x1000>;
+@@ -529,20 +521,6 @@ wed1: wed@15011000 {
+ mediatek,wo-ccif = <&wo_ccif1>;
+ };
+
+- wo_ccif0: syscon@151a5000 {
+- compatible = "mediatek,mt7986-wo-ccif", "syscon";
+- reg = <0 0x151a5000 0 0x1000>;
+- interrupt-parent = <&gic>;
+- interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
+- };
+-
+- wo_ccif1: syscon@151ad000 {
+- compatible = "mediatek,mt7986-wo-ccif", "syscon";
+- reg = <0 0x151ad000 0 0x1000>;
+- interrupt-parent = <&gic>;
+- interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+- };
+-
+ eth: ethernet@15100000 {
+ compatible = "mediatek,mt7986-eth";
+ reg = <0 0x15100000 0 0x80000>;
+@@ -575,26 +553,39 @@ eth: ethernet@15100000 {
+ <&topckgen CLK_TOP_SGM_325M_SEL>;
+ assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
+ <&apmixedsys CLK_APMIXED_SGMPLL>;
++ #address-cells = <1>;
++ #size-cells = <0>;
+ mediatek,ethsys = <&ethsys>;
+ mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+ mediatek,wed-pcie = <&wed_pcie>;
+ mediatek,wed = <&wed0>, <&wed1>;
+- #reset-cells = <1>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+ status = "disabled";
+ };
+
++ wo_ccif0: syscon@151a5000 {
++ compatible = "mediatek,mt7986-wo-ccif", "syscon";
++ reg = <0 0x151a5000 0 0x1000>;
++ interrupt-parent = <&gic>;
++ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ wo_ccif1: syscon@151ad000 {
++ compatible = "mediatek,mt7986-wo-ccif", "syscon";
++ reg = <0 0x151ad000 0 0x1000>;
++ interrupt-parent = <&gic>;
++ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
+ wifi: wifi@18000000 {
+ compatible = "mediatek,mt7986-wmac";
++ reg = <0 0x18000000 0 0x1000000>,
++ <0 0x10003000 0 0x1000>,
++ <0 0x11d10000 0 0x1000>;
+ resets = <&watchdog MT7986_TOPRGU_CONSYS_SW_RST>;
+ reset-names = "consys";
+ clocks = <&topckgen CLK_TOP_CONN_MCUSYS_SEL>,
+ <&topckgen CLK_TOP_AP2CNN_HOST_SEL>;
+ clock-names = "mcu", "ap2conn";
+- reg = <0 0x18000000 0 0x1000000>,
+- <0 0x10003000 0 0x1000>,
+- <0 0x11d10000 0 0x1000>;
+ interrupts = <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+@@ -610,24 +601,45 @@ cpu_thermal: cpu-thermal {
+ thermal-sensors = <&thermal 0>;
+
+ trips {
++ cpu_trip_crit: crit {
++ temperature = <125000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++
++ cpu_trip_hot: hot {
++ temperature = <120000>;
++ hysteresis = <2000>;
++ type = "hot";
++ };
++
+ cpu_trip_active_high: active-high {
+ temperature = <115000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+- cpu_trip_active_low: active-low {
++ cpu_trip_active_med: active-med {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+- cpu_trip_passive: passive {
+- temperature = <40000>;
++ cpu_trip_active_low: active-low {
++ temperature = <60000>;
+ hysteresis = <2000>;
+- type = "passive";
++ type = "active";
+ };
+ };
+ };
+ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupt-parent = <&gic>;
++ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
++ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
++ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
++ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
++ };
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts b/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
+index dde190442e3866..57dcaeef31d7fc 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
+@@ -152,12 +152,13 @@ &spi0 {
+ pinctrl-0 = <&spi_flash_pins>;
+ cs-gpios = <0>, <0>;
+ status = "okay";
+- spi_nand: spi_nand@0 {
++
++ spi_nand: flash@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+- spi-tx-buswidth = <4>;
+- spi-rx-buswidth = <4>;
++ spi-tx-bus-width = <4>;
++ spi-rx-bus-width = <4>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+index 5122963d8743ab..d258c80213b264 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+@@ -44,7 +44,7 @@ extcon_usb: extcon_iddig {
+ id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
+ };
+
+- usb_p1_vbus: regulator@0 {
++ usb_p1_vbus: regulator-usb-p1 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+@@ -53,7 +53,7 @@ usb_p1_vbus: regulator@0 {
+ enable-active-high;
+ };
+
+- usb_p0_vbus: regulator@1 {
++ usb_p0_vbus: regulator-usb-p0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus";
+ regulator-min-microvolt = <5000000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+index d8bd5180768327..77f9ab94c00bd9 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+@@ -31,14 +31,14 @@ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+- scp_mem_reserved: scp_mem_region {
++ scp_mem_reserved: memory@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+ };
+ };
+
+- ntc@0 {
++ thermal-sensor {
+ compatible = "murata,ncp03wf104";
+ pullup-uv = <1800000>;
+ pullup-ohm = <390000>;
+@@ -155,8 +155,8 @@ &mt6358_vsram_gpu_reg {
+ };
+
+ &pio {
+- i2c_pins_0: i2c0{
+- pins_i2c{
++ i2c_pins_0: i2c0 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ <PINMUX_GPIO83__FUNC_SCL0>;
+ mediatek,pull-up-adv = <3>;
+@@ -164,8 +164,8 @@ pins_i2c{
+ };
+ };
+
+- i2c_pins_1: i2c1{
+- pins_i2c{
++ i2c_pins_1: i2c1 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ <PINMUX_GPIO84__FUNC_SCL1>;
+ mediatek,pull-up-adv = <3>;
+@@ -173,8 +173,8 @@ pins_i2c{
+ };
+ };
+
+- i2c_pins_2: i2c2{
+- pins_i2c{
++ i2c_pins_2: i2c2 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ <PINMUX_GPIO104__FUNC_SDA2>;
+ mediatek,pull-up-adv = <3>;
+@@ -182,8 +182,8 @@ pins_i2c{
+ };
+ };
+
+- i2c_pins_3: i2c3{
+- pins_i2c{
++ i2c_pins_3: i2c3 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-up-adv = <3>;
+@@ -191,8 +191,8 @@ pins_i2c{
+ };
+ };
+
+- i2c_pins_4: i2c4{
+- pins_i2c{
++ i2c_pins_4: i2c4 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ <PINMUX_GPIO106__FUNC_SDA4>;
+ mediatek,pull-up-adv = <3>;
+@@ -200,8 +200,8 @@ pins_i2c{
+ };
+ };
+
+- i2c_pins_5: i2c5{
+- pins_i2c{
++ i2c_pins_5: i2c5 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+@@ -209,8 +209,8 @@ pins_i2c{
+ };
+ };
+
+- spi_pins_0: spi0{
+- pins_spi{
++ spi_pins_0: spi0 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
+ <PINMUX_GPIO86__FUNC_SPI0_CSB>,
+ <PINMUX_GPIO87__FUNC_SPI0_MO>,
+@@ -324,8 +324,8 @@ pins_clk {
+ };
+ };
+
+- spi_pins_1: spi1{
+- pins_spi{
++ spi_pins_1: spi1 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
+ <PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
+ <PINMUX_GPIO163__FUNC_SPI1_A_MO>,
+@@ -334,8 +334,8 @@ pins_spi{
+ };
+ };
+
+- spi_pins_2: spi2{
+- pins_spi{
++ spi_pins_2: spi2 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
+ <PINMUX_GPIO1__FUNC_SPI2_MO>,
+ <PINMUX_GPIO2__FUNC_SPI2_CLK>,
+@@ -344,8 +344,8 @@ pins_spi{
+ };
+ };
+
+- spi_pins_3: spi3{
+- pins_spi{
++ spi_pins_3: spi3 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO21__FUNC_SPI3_MI>,
+ <PINMUX_GPIO22__FUNC_SPI3_CSB>,
+ <PINMUX_GPIO23__FUNC_SPI3_MO>,
+@@ -354,8 +354,8 @@ pins_spi{
+ };
+ };
+
+- spi_pins_4: spi4{
+- pins_spi{
++ spi_pins_4: spi4 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO17__FUNC_SPI4_MI>,
+ <PINMUX_GPIO18__FUNC_SPI4_CSB>,
+ <PINMUX_GPIO19__FUNC_SPI4_MO>,
+@@ -364,8 +364,8 @@ pins_spi{
+ };
+ };
+
+- spi_pins_5: spi5{
+- pins_spi{
++ spi_pins_5: spi5 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO13__FUNC_SPI5_MI>,
+ <PINMUX_GPIO14__FUNC_SPI5_CSB>,
+ <PINMUX_GPIO15__FUNC_SPI5_MO>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi
+index 2c69e7658dba6d..b9a6fd4f86d4a0 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi
+@@ -28,7 +28,7 @@ da7219_aad {
+ dlg,btn-cfg = <50>;
+ dlg,mic-det-thr = <500>;
+ dlg,jack-ins-deb = <20>;
+- dlg,jack-det-rate = "32ms_64ms";
++ dlg,jack-det-rate = "32_64";
+ dlg,jack-rem-deb = <1>;
+
+ dlg,a-d-btn-thr = <0xa>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+index bf97b60ae4d17e..32f6899f885ef7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+@@ -91,6 +91,8 @@ cros_ec_pwm: pwm {
+
+ &dsi0 {
+ status = "okay";
++ /delete-property/#size-cells;
++ /delete-property/#address-cells;
+ /delete-node/panel@0;
+ ports {
+ port {
+@@ -154,21 +156,24 @@ anx_bridge: anx7625@58 {
+ vdd18-supply = <&pp1800_mipibrdg>;
+ vdd33-supply = <&vddio_mipibrdg>;
+
+- #address-cells = <1>;
+- #size-cells = <0>;
+- port@0 {
+- reg = <0>;
++ ports {
++ #address-cells = <1>;
++ #size-cells = <0>;
+
+- anx7625_in: endpoint {
+- remote-endpoint = <&dsi_out>;
++ port@0 {
++ reg = <0>;
++
++ anx7625_in: endpoint {
++ remote-endpoint = <&dsi_out>;
++ };
+ };
+- };
+
+- port@1 {
+- reg = <1>;
++ port@1 {
++ reg = <1>;
+
+- anx7625_out: endpoint {
+- remote-endpoint = <&panel_in>;
++ anx7625_out: endpoint {
++ remote-endpoint = <&panel_in>;
++ };
+ };
+ };
+
+@@ -441,20 +446,20 @@ pins2 {
+ };
+
+ touchscreen_pins: touchscreen-pins {
+- touch_int_odl {
++ touch-int-odl {
+ pinmux = <PINMUX_GPIO155__FUNC_GPIO155>;
+ input-enable;
+ bias-pull-up;
+ };
+
+- touch_rst_l {
++ touch-rst-l {
+ pinmux = <PINMUX_GPIO156__FUNC_GPIO156>;
+ output-high;
+ };
+ };
+
+ trackpad_pins: trackpad-pins {
+- trackpad_int {
++ trackpad-int {
+ pinmux = <PINMUX_GPIO7__FUNC_GPIO7>;
+ input-enable;
+ bias-disable; /* pulled externally */
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
+index a11adeb29b1f2e..0d3c7b8162ff0b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
+@@ -373,6 +373,10 @@ pen_eject {
+ };
+
+ &cros_ec {
++ cbas {
++ compatible = "google,cros-cbas";
++ };
++
+ keyboard-controller {
+ compatible = "google,cros-ec-keyb-switches";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
+index 4864c39e53a4fd..e73113cb51f538 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
+@@ -340,6 +340,10 @@ touch_pin_reset: pin_reset {
+ };
+
+ &cros_ec {
++ cbas {
++ compatible = "google,cros-cbas";
++ };
++
+ keyboard-controller {
+ compatible = "google,cros-ec-keyb-switches";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
+index d5f41c6c98814a..181da69d18f46a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
+@@ -344,6 +344,10 @@ rst_pin {
+ };
+
+ &cros_ec {
++ cbas {
++ compatible = "google,cros-cbas";
++ };
++
+ keyboard-controller {
+ compatible = "google,cros-ec-keyb-switches";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index 6ce16a265e0530..2c6587f260f82b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -108,7 +108,7 @@ reserved_memory: reserved-memory {
+ #size-cells = <2>;
+ ranges;
+
+- scp_mem_reserved: scp_mem_region {
++ scp_mem_reserved: memory@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+@@ -405,7 +405,6 @@ &mt6358codec {
+ };
+
+ &mt6358_vgpu_reg {
+- regulator-min-microvolt = <625000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-coupled-with = <&mt6358_vsram_gpu_reg>;
+@@ -432,7 +431,7 @@ &mt6358_vsram_gpu_reg {
+
+ &pio {
+ aud_pins_default: audiopins {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO97__FUNC_I2S2_MCK>,
+ <PINMUX_GPIO98__FUNC_I2S2_BCK>,
+ <PINMUX_GPIO101__FUNC_I2S2_LRCK>,
+@@ -454,7 +453,7 @@ pins_bus {
+ };
+
+ aud_pins_tdm_out_on: audiotdmouton {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO169__FUNC_TDM_BCK_2ND>,
+ <PINMUX_GPIO170__FUNC_TDM_LRCK_2ND>,
+ <PINMUX_GPIO171__FUNC_TDM_DATA0_2ND>,
+@@ -466,7 +465,7 @@ pins_bus {
+ };
+
+ aud_pins_tdm_out_off: audiotdmoutoff {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO169__FUNC_GPIO169>,
+ <PINMUX_GPIO170__FUNC_GPIO170>,
+ <PINMUX_GPIO171__FUNC_GPIO171>,
+@@ -480,13 +479,13 @@ pins_bus {
+ };
+
+ bt_pins: bt-pins {
+- pins_bt_en {
++ pins-bt-en {
+ pinmux = <PINMUX_GPIO120__FUNC_GPIO120>;
+ output-low;
+ };
+ };
+
+- ec_ap_int_odl: ec_ap_int_odl {
++ ec_ap_int_odl: ec-ap-int-odl {
+ pins1 {
+ pinmux = <PINMUX_GPIO151__FUNC_GPIO151>;
+ input-enable;
+@@ -494,7 +493,7 @@ pins1 {
+ };
+ };
+
+- h1_int_od_l: h1_int_od_l {
++ h1_int_od_l: h1-int-od-l {
+ pins1 {
+ pinmux = <PINMUX_GPIO153__FUNC_GPIO153>;
+ input-enable;
+@@ -502,7 +501,7 @@ pins1 {
+ };
+
+ i2c0_pins: i2c0 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ <PINMUX_GPIO83__FUNC_SCL0>;
+ mediatek,pull-up-adv = <3>;
+@@ -511,7 +510,7 @@ pins_bus {
+ };
+
+ i2c1_pins: i2c1 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ <PINMUX_GPIO84__FUNC_SCL1>;
+ mediatek,pull-up-adv = <3>;
+@@ -520,7 +519,7 @@ pins_bus {
+ };
+
+ i2c2_pins: i2c2 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ <PINMUX_GPIO104__FUNC_SDA2>;
+ bias-disable;
+@@ -529,7 +528,7 @@ pins_bus {
+ };
+
+ i2c3_pins: i2c3 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-up-adv = <3>;
+@@ -538,7 +537,7 @@ pins_bus {
+ };
+
+ i2c4_pins: i2c4 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ <PINMUX_GPIO106__FUNC_SDA4>;
+ bias-disable;
+@@ -547,7 +546,7 @@ pins_bus {
+ };
+
+ i2c5_pins: i2c5 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+@@ -556,7 +555,7 @@ pins_bus {
+ };
+
+ i2c6_pins: i2c6 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO11__FUNC_SCL6>,
+ <PINMUX_GPIO12__FUNC_SDA6>;
+ bias-disable;
+@@ -564,7 +563,7 @@ pins_bus {
+ };
+
+ mmc0_pins_default: mmc0-pins-default {
+- pins_cmd_dat {
++ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+@@ -579,13 +578,13 @@ pins_cmd_dat {
+ mediatek,pull-up-adv = <01>;
+ };
+
+- pins_clk {
++ pins-clk {
+ pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+- pins_rst {
++ pins-rst {
+ pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <01>;
+@@ -593,7 +592,7 @@ pins_rst {
+ };
+
+ mmc0_pins_uhs: mmc0-pins-uhs {
+- pins_cmd_dat {
++ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+@@ -608,19 +607,19 @@ pins_cmd_dat {
+ mediatek,pull-up-adv = <01>;
+ };
+
+- pins_clk {
++ pins-clk {
+ pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+- pins_ds {
++ pins-ds {
+ pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+- pins_rst {
++ pins-rst {
+ pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-up-adv = <01>;
+@@ -628,7 +627,7 @@ pins_rst {
+ };
+
+ mmc1_pins_default: mmc1-pins-default {
+- pins_cmd_dat {
++ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+@@ -638,7 +637,7 @@ pins_cmd_dat {
+ mediatek,pull-up-adv = <10>;
+ };
+
+- pins_clk {
++ pins-clk {
+ pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ input-enable;
+ mediatek,pull-down-adv = <10>;
+@@ -646,7 +645,7 @@ pins_clk {
+ };
+
+ mmc1_pins_uhs: mmc1-pins-uhs {
+- pins_cmd_dat {
++ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+@@ -657,7 +656,7 @@ pins_cmd_dat {
+ mediatek,pull-up-adv = <10>;
+ };
+
+- pins_clk {
++ pins-clk {
+ pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ mediatek,pull-down-adv = <10>;
+@@ -665,15 +664,15 @@ pins_clk {
+ };
+ };
+
+- panel_pins_default: panel_pins_default {
+- panel_reset {
++ panel_pins_default: panel-pins-default {
++ panel-reset {
+ pinmux = <PINMUX_GPIO45__FUNC_GPIO45>;
+ output-low;
+ bias-pull-up;
+ };
+ };
+
+- pwm0_pin_default: pwm0_pin_default {
++ pwm0_pin_default: pwm0-pin-default {
+ pins1 {
+ pinmux = <PINMUX_GPIO176__FUNC_GPIO176>;
+ output-high;
+@@ -685,14 +684,14 @@ pins2 {
+ };
+
+ scp_pins: scp {
+- pins_scp_uart {
++ pins-scp-uart {
+ pinmux = <PINMUX_GPIO110__FUNC_TP_URXD1_AO>,
+ <PINMUX_GPIO112__FUNC_TP_UTXD1_AO>;
+ };
+ };
+
+ spi0_pins: spi0 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
+ <PINMUX_GPIO86__FUNC_GPIO86>,
+ <PINMUX_GPIO87__FUNC_SPI0_MO>,
+@@ -702,7 +701,7 @@ pins_spi{
+ };
+
+ spi1_pins: spi1 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
+ <PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
+ <PINMUX_GPIO163__FUNC_SPI1_A_MO>,
+@@ -712,20 +711,20 @@ pins_spi{
+ };
+
+ spi2_pins: spi2 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
+ <PINMUX_GPIO1__FUNC_SPI2_MO>,
+ <PINMUX_GPIO2__FUNC_SPI2_CLK>;
+ bias-disable;
+ };
+- pins_spi_mi {
++ pins-spi-mi {
+ pinmux = <PINMUX_GPIO94__FUNC_SPI2_MI>;
+ mediatek,pull-down-adv = <00>;
+ };
+ };
+
+ spi3_pins: spi3 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO21__FUNC_SPI3_MI>,
+ <PINMUX_GPIO22__FUNC_SPI3_CSB>,
+ <PINMUX_GPIO23__FUNC_SPI3_MO>,
+@@ -735,7 +734,7 @@ pins_spi{
+ };
+
+ spi4_pins: spi4 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO17__FUNC_SPI4_MI>,
+ <PINMUX_GPIO18__FUNC_SPI4_CSB>,
+ <PINMUX_GPIO19__FUNC_SPI4_MO>,
+@@ -745,7 +744,7 @@ pins_spi{
+ };
+
+ spi5_pins: spi5 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO13__FUNC_SPI5_MI>,
+ <PINMUX_GPIO14__FUNC_SPI5_CSB>,
+ <PINMUX_GPIO15__FUNC_SPI5_MO>,
+@@ -755,63 +754,61 @@ pins_spi{
+ };
+
+ uart0_pins_default: uart0-pins-default {
+- pins_rx {
++ pins-rx {
+ pinmux = <PINMUX_GPIO95__FUNC_URXD0>;
+ input-enable;
+ bias-pull-up;
+ };
+- pins_tx {
++ pins-tx {
+ pinmux = <PINMUX_GPIO96__FUNC_UTXD0>;
+ };
+ };
+
+ uart1_pins_default: uart1-pins-default {
+- pins_rx {
++ pins-rx {
+ pinmux = <PINMUX_GPIO121__FUNC_URXD1>;
+ input-enable;
+ bias-pull-up;
+ };
+- pins_tx {
++ pins-tx {
+ pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
+ };
+- pins_rts {
++ pins-rts {
+ pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
+- output-enable;
+ };
+- pins_cts {
++ pins-cts {
+ pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
+ input-enable;
+ };
+ };
+
+ uart1_pins_sleep: uart1-pins-sleep {
+- pins_rx {
++ pins-rx {
+ pinmux = <PINMUX_GPIO121__FUNC_GPIO121>;
+ input-enable;
+ bias-pull-up;
+ };
+- pins_tx {
++ pins-tx {
+ pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
+ };
+- pins_rts {
++ pins-rts {
+ pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
+- output-enable;
+ };
+- pins_cts {
++ pins-cts {
+ pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
+ input-enable;
+ };
+ };
+
+ wifi_pins_pwrseq: wifi-pins-pwrseq {
+- pins_wifi_enable {
++ pins-wifi-enable {
+ pinmux = <PINMUX_GPIO119__FUNC_GPIO119>;
+ output-low;
+ };
+ };
+
+ wifi_pins_wakeup: wifi-pins-wakeup {
+- pins_wifi_wakeup {
++ pins-wifi-wakeup {
+ pinmux = <PINMUX_GPIO113__FUNC_GPIO113>;
+ input-enable;
+ };
+@@ -907,10 +904,6 @@ usbc_extcon: extcon0 {
+ google,usb-port-id = <0>;
+ };
+
+- cbas {
+- compatible = "google,cros-cbas";
+- };
+-
+ typec {
+ compatible = "google,cros-ec-typec";
+ #address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+index 526bcae7a3f8ff..b5784a60c315d3 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+@@ -193,7 +193,7 @@ &mt6358_vsram_gpu_reg {
+
+ &pio {
+ i2c_pins_0: i2c0 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ <PINMUX_GPIO83__FUNC_SCL0>;
+ mediatek,pull-up-adv = <3>;
+@@ -202,7 +202,7 @@ pins_i2c{
+ };
+
+ i2c_pins_1: i2c1 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ <PINMUX_GPIO84__FUNC_SCL1>;
+ mediatek,pull-up-adv = <3>;
+@@ -211,7 +211,7 @@ pins_i2c{
+ };
+
+ i2c_pins_2: i2c2 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ <PINMUX_GPIO104__FUNC_SDA2>;
+ mediatek,pull-up-adv = <3>;
+@@ -220,7 +220,7 @@ pins_i2c{
+ };
+
+ i2c_pins_3: i2c3 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-up-adv = <3>;
+@@ -229,7 +229,7 @@ pins_i2c{
+ };
+
+ i2c_pins_4: i2c4 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ <PINMUX_GPIO106__FUNC_SDA4>;
+ mediatek,pull-up-adv = <3>;
+@@ -238,7 +238,7 @@ pins_i2c{
+ };
+
+ i2c_pins_5: i2c5 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 5169779d01dfb4..8721a5ffca30a7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1210,127 +1210,6 @@ thermal: thermal@1100b000 {
+ nvmem-cell-names = "calibration-data";
+ };
+
+- thermal_zones: thermal-zones {
+- cpu_thermal: cpu-thermal {
+- polling-delay-passive = <100>;
+- polling-delay = <500>;
+- thermal-sensors = <&thermal 0>;
+- sustainable-power = <5000>;
+-
+- trips {
+- threshold: trip-point0 {
+- temperature = <68000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- target: trip-point1 {
+- temperature = <80000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- cpu_crit: cpu-crit {
+- temperature = <115000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+-
+- cooling-maps {
+- map0 {
+- trip = <&target>;
+- cooling-device = <&cpu0
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu1
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu2
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu3
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- contribution = <3072>;
+- };
+- map1 {
+- trip = <&target>;
+- cooling-device = <&cpu4
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu5
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu6
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu7
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- contribution = <1024>;
+- };
+- };
+- };
+-
+- /* The tzts1 ~ tzts6 don't need to polling */
+- /* The tzts1 ~ tzts6 don't need to thermal throttle */
+-
+- tzts1: tzts1 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 1>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tzts2: tzts2 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 2>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tzts3: tzts3 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 3>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tzts4: tzts4 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 4>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tzts5: tzts5 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 5>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tztsABB: tztsABB {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 6>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+- };
+-
+ pwm0: pwm@1100e000 {
+ compatible = "mediatek,mt8183-disp-pwm";
+ reg = <0 0x1100e000 0 0x1000>;
+@@ -1749,6 +1628,7 @@ mfgcfg: syscon@13000000 {
+ compatible = "mediatek,mt8183-mfgcfg", "syscon";
+ reg = <0 0x13000000 0 0x1000>;
+ #clock-cells = <1>;
++ power-domains = <&spm MT8183_POWER_DOMAIN_MFG_ASYNC>;
+ };
+
+ gpu: gpu@13040000 {
+@@ -1781,7 +1661,7 @@ mmsys: syscon@14000000 {
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
+ };
+
+- mdp3-rdma0@14001000 {
++ dma-controller0@14001000 {
+ compatible = "mediatek,mt8183-mdp3-rdma";
+ reg = <0 0x14001000 0 0x1000>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
+@@ -1793,6 +1673,7 @@ mdp3-rdma0@14001000 {
+ iommus = <&iommu M4U_PORT_MDP_RDMA0>;
+ mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST 0>,
+ <&gce 21 CMDQ_THR_PRIO_LOWEST 0>;
++ #dma-cells = <1>;
+ };
+
+ mdp3-rsz0@14003000 {
+@@ -1813,7 +1694,7 @@ mdp3-rsz1@14004000 {
+ clocks = <&mmsys CLK_MM_MDP_RSZ1>;
+ };
+
+- mdp3-wrot0@14005000 {
++ dma-controller@14005000 {
+ compatible = "mediatek,mt8183-mdp3-wrot";
+ reg = <0 0x14005000 0 0x1000>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>;
+@@ -1822,6 +1703,7 @@ mdp3-wrot0@14005000 {
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ clocks = <&mmsys CLK_MM_MDP_WROT0>;
+ iommus = <&iommu M4U_PORT_MDP_WROT0>;
++ #dma-cells = <1>;
+ };
+
+ mdp3-wdma@14006000 {
+@@ -2105,4 +1987,125 @@ larb3: larb@1a002000 {
+ power-domains = <&spm MT8183_POWER_DOMAIN_CAM>;
+ };
+ };
++
++ thermal_zones: thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <100>;
++ polling-delay = <500>;
++ thermal-sensors = <&thermal 0>;
++ sustainable-power = <5000>;
++
++ trips {
++ threshold: trip-point0 {
++ temperature = <68000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ target: trip-point1 {
++ temperature = <80000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ cpu_crit: cpu-crit {
++ temperature = <115000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&target>;
++ cooling-device = <&cpu0
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu1
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu2
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu3
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ contribution = <3072>;
++ };
++ map1 {
++ trip = <&target>;
++ cooling-device = <&cpu4
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu5
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu6
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu7
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ contribution = <1024>;
++ };
++ };
++ };
++
++ /* The tzts1 ~ tzts6 don't need to polling */
++ /* The tzts1 ~ tzts6 don't need to thermal throttle */
++
++ tzts1: tzts1 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 1>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tzts2: tzts2 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 2>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tzts3: tzts3 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 3>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tzts4: tzts4 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 4>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tzts5: tzts5 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 5>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tztsABB: tztsABB {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 6>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++ };
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+index f04ae70c470aa3..2c184f9e0fc390 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+@@ -22,7 +22,7 @@ / {
+
+ aliases {
+ ovl0 = &ovl0;
+- ovl_2l0 = &ovl_2l0;
++ ovl-2l0 = &ovl_2l0;
+ rdma0 = &rdma0;
+ rdma1 = &rdma1;
+ };
+@@ -731,7 +731,7 @@ opp-850000000 {
+ opp-900000000-3 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <850000>;
+- opp-supported-hw = <0x8>;
++ opp-supported-hw = <0xcf>;
+ };
+
+ opp-900000000-4 {
+@@ -743,13 +743,13 @@ opp-900000000-4 {
+ opp-900000000-5 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <825000>;
+- opp-supported-hw = <0x30>;
++ opp-supported-hw = <0x20>;
+ };
+
+ opp-950000000-3 {
+ opp-hz = /bits/ 64 <950000000>;
+ opp-microvolt = <900000>;
+- opp-supported-hw = <0x8>;
++ opp-supported-hw = <0xcf>;
+ };
+
+ opp-950000000-4 {
+@@ -761,13 +761,13 @@ opp-950000000-4 {
+ opp-950000000-5 {
+ opp-hz = /bits/ 64 <950000000>;
+ opp-microvolt = <850000>;
+- opp-supported-hw = <0x30>;
++ opp-supported-hw = <0x20>;
+ };
+
+ opp-1000000000-3 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <950000>;
+- opp-supported-hw = <0x8>;
++ opp-supported-hw = <0xcf>;
+ };
+
+ opp-1000000000-4 {
+@@ -779,7 +779,7 @@ opp-1000000000-4 {
+ opp-1000000000-5 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <875000>;
+- opp-supported-hw = <0x30>;
++ opp-supported-hw = <0x20>;
+ };
+ };
+
+@@ -924,17 +924,24 @@ power-domain@MT8186_POWER_DOMAIN_CSIRX_TOP {
+ reg = <MT8186_POWER_DOMAIN_CSIRX_TOP>;
+ clocks = <&topckgen CLK_TOP_SENINF>,
+ <&topckgen CLK_TOP_SENINF1>;
+- clock-names = "csirx_top0", "csirx_top1";
++ clock-names = "subsys-csirx-top0",
++ "subsys-csirx-top1";
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8186_POWER_DOMAIN_SSUSB {
+ reg = <MT8186_POWER_DOMAIN_SSUSB>;
++ clocks = <&topckgen CLK_TOP_USB_TOP>,
++ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_REF>;
++ clock-names = "sys_ck", "ref_ck";
+ #power-domain-cells = <0>;
+ };
+
+ power-domain@MT8186_POWER_DOMAIN_SSUSB_P1 {
+ reg = <MT8186_POWER_DOMAIN_SSUSB_P1>;
++ clocks = <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_SYS>,
++ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_REF>;
++ clock-names = "sys_ck", "ref_ck";
+ #power-domain-cells = <0>;
+ };
+
+@@ -942,7 +949,8 @@ power-domain@MT8186_POWER_DOMAIN_ADSP_AO {
+ reg = <MT8186_POWER_DOMAIN_ADSP_AO>;
+ clocks = <&topckgen CLK_TOP_AUDIODSP>,
+ <&topckgen CLK_TOP_ADSP_BUS>;
+- clock-names = "audioadsp", "adsp_bus";
++ clock-names = "audioadsp",
++ "subsys-adsp-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #power-domain-cells = <1>;
+@@ -975,8 +983,11 @@ power-domain@MT8186_POWER_DOMAIN_DIS {
+ <&mmsys CLK_MM_SMI_COMMON>,
+ <&mmsys CLK_MM_SMI_GALS>,
+ <&mmsys CLK_MM_SMI_IOMMU>;
+- clock-names = "disp", "mdp", "smi_infra", "smi_common",
+- "smi_gals", "smi_iommu";
++ clock-names = "disp", "mdp",
++ "subsys-smi-infra",
++ "subsys-smi-common",
++ "subsys-smi-gals",
++ "subsys-smi-iommu";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -993,15 +1004,17 @@ power-domain@MT8186_POWER_DOMAIN_VDEC {
+
+ power-domain@MT8186_POWER_DOMAIN_CAM {
+ reg = <MT8186_POWER_DOMAIN_CAM>;
+- clocks = <&topckgen CLK_TOP_CAM>,
+- <&topckgen CLK_TOP_SENINF>,
++ clocks = <&topckgen CLK_TOP_SENINF>,
+ <&topckgen CLK_TOP_SENINF1>,
+ <&topckgen CLK_TOP_SENINF2>,
+ <&topckgen CLK_TOP_SENINF3>,
++ <&camsys CLK_CAM2MM_GALS>,
+ <&topckgen CLK_TOP_CAMTM>,
+- <&camsys CLK_CAM2MM_GALS>;
+- clock-names = "cam-top", "cam0", "cam1", "cam2",
+- "cam3", "cam-tm", "gals";
++ <&topckgen CLK_TOP_CAM>;
++ clock-names = "cam0", "cam1", "cam2",
++ "cam3", "gals",
++ "subsys-cam-tm",
++ "subsys-cam-top";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -1020,9 +1033,9 @@ power-domain@MT8186_POWER_DOMAIN_CAM_RAWA {
+
+ power-domain@MT8186_POWER_DOMAIN_IMG {
+ reg = <MT8186_POWER_DOMAIN_IMG>;
+- clocks = <&topckgen CLK_TOP_IMG1>,
+- <&imgsys1 CLK_IMG1_GALS_IMG1>;
+- clock-names = "img-top", "gals";
++ clocks = <&imgsys1 CLK_IMG1_GALS_IMG1>,
++ <&topckgen CLK_TOP_IMG1>;
++ clock-names = "gals", "subsys-img-top";
+ mediatek,infracfg = <&infracfg_ao>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -1041,8 +1054,11 @@ power-domain@MT8186_POWER_DOMAIN_IPE {
+ <&ipesys CLK_IPE_LARB20>,
+ <&ipesys CLK_IPE_SMI_SUBCOM>,
+ <&ipesys CLK_IPE_GALS_IPE>;
+- clock-names = "ipe-top", "ipe-larb0", "ipe-larb1",
+- "ipe-smi", "ipe-gals";
++ clock-names = "subsys-ipe-top",
++ "subsys-ipe-larb0",
++ "subsys-ipe-larb1",
++ "subsys-ipe-smi",
++ "subsys-ipe-gals";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+@@ -1051,7 +1067,7 @@ power-domain@MT8186_POWER_DOMAIN_VENC {
+ reg = <MT8186_POWER_DOMAIN_VENC>;
+ clocks = <&topckgen CLK_TOP_VENC>,
+ <&vencsys CLK_VENC_CKE1_VENC>;
+- clock-names = "venc0", "larb";
++ clock-names = "venc0", "subsys-larb";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+@@ -1061,7 +1077,9 @@ power-domain@MT8186_POWER_DOMAIN_WPE {
+ clocks = <&topckgen CLK_TOP_WPE>,
+ <&wpesys CLK_WPE_SMI_LARB8_CK_EN>,
+ <&wpesys CLK_WPE_SMI_LARB8_PCLK_EN>;
+- clock-names = "wpe0", "larb-ck", "larb-pclk";
++ clock-names = "wpe0",
++ "subsys-larb-ck",
++ "subsys-larb-pclk";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+@@ -1148,14 +1166,14 @@ adsp: adsp@10680000 {
+ status = "disabled";
+ };
+
+- adsp_mailbox0: mailbox@10686000 {
++ adsp_mailbox0: mailbox@10686100 {
+ compatible = "mediatek,mt8186-adsp-mbox";
+ #mbox-cells = <0>;
+ reg = <0 0x10686100 0 0x1000>;
+ interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH 0>;
+ };
+
+- adsp_mailbox1: mailbox@10687000 {
++ adsp_mailbox1: mailbox@10687100 {
+ compatible = "mediatek,mt8186-adsp-mbox";
+ #mbox-cells = <0>;
+ reg = <0 0x10687100 0 0x1000>;
+@@ -1518,8 +1536,9 @@ ssusb0: usb@11201000 {
+ clocks = <&topckgen CLK_TOP_USB_TOP>,
+ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_REF>,
+ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_HCLK>,
+- <&infracfg_ao CLK_INFRA_AO_ICUSB>;
+- clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck";
++ <&infracfg_ao CLK_INFRA_AO_ICUSB>,
++ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_XHCI>;
++ clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH 0>;
+ phys = <&u2port0 PHY_TYPE_USB2>;
+ power-domains = <&spm MT8186_POWER_DOMAIN_SSUSB>;
+@@ -1583,8 +1602,9 @@ ssusb1: usb@11281000 {
+ clocks = <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_SYS>,
+ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_REF>,
+ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_HCLK>,
+- <&clk26m>;
+- clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck";
++ <&clk26m>,
++ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_XHCI>;
++ clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH 0>;
+ phys = <&u2port1 PHY_TYPE_USB2>, <&u3port1 PHY_TYPE_USB3>;
+ power-domains = <&spm MT8186_POWER_DOMAIN_SSUSB_P1>;
+@@ -1656,7 +1676,7 @@ efuse: efuse@11cb0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+- gpu_speedbin: gpu-speed-bin@59c {
++ gpu_speedbin: gpu-speedbin@59c {
+ reg = <0x59c 0x4>;
+ bits = <0 3>;
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+index 0e8b341170907d..6b4b7a7cd35efb 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+@@ -147,6 +147,7 @@ pp3300_mipibrdg: regulator-3v3-mipibrdg {
+ regulator-boot-on;
+ gpio = <&pio 127 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&pp3300_g>;
++ off-on-delay-us = <500000>;
+ };
+
+ /* separately switched 3.3V power rail */
+@@ -1308,10 +1309,6 @@ cros_ec: ec@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- base_detection: cbas {
+- compatible = "google,cros-cbas";
+- };
+-
+ cros_ec_pwm: pwm {
+ compatible = "google,cros-ec-pwm";
+ #pwm-cells = <1>;
+@@ -1396,7 +1393,7 @@ regulators {
+ mt6315_6_vbuck1: vbuck1 {
+ regulator-compatible = "vbuck1";
+ regulator-name = "Vbcpu";
+- regulator-min-microvolt = <300000>;
++ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-allowed-modes = <0 1 2>;
+@@ -1406,7 +1403,7 @@ mt6315_6_vbuck1: vbuck1 {
+ mt6315_6_vbuck3: vbuck3 {
+ regulator-compatible = "vbuck3";
+ regulator-name = "Vlcpu";
+- regulator-min-microvolt = <300000>;
++ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-allowed-modes = <0 1 2>;
+@@ -1423,7 +1420,7 @@ regulators {
+ mt6315_7_vbuck1: vbuck1 {
+ regulator-compatible = "vbuck1";
+ regulator-name = "Vgpu";
+- regulator-min-microvolt = <606250>;
++ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <800000>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-allowed-modes = <0 1 2>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+index 69f4cded5dbbf2..b1443adc55aab5 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+@@ -1412,6 +1412,7 @@ mutex: mutex@14001000 {
+ reg = <0 0x14001000 0 0x1000>;
+ interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&mmsys CLK_MM_DISP_MUTEX0>;
++ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0>,
+ <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1>;
+ power-domains = <&spm MT8192_POWER_DOMAIN_DISP>;
+@@ -1770,7 +1771,7 @@ vcodec_enc: vcodec@17020000 {
+ mediatek,scp = <&scp>;
+ power-domains = <&spm MT8192_POWER_DOMAIN_VENC>;
+ clocks = <&vencsys CLK_VENC_SET1_VENC>;
+- clock-names = "venc-set1";
++ clock-names = "venc_sel";
+ assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D4>;
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
+index 2d5e8f371b6def..a82d716f10d449 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
+@@ -23,3 +23,7 @@ &sound {
+ &ts_10 {
+ status = "okay";
+ };
++
++&watchdog {
++ /delete-property/ mediatek,disable-extrst;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
+index 2586c32ce6e6fe..2fe20e0dad836d 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
+@@ -43,3 +43,7 @@ &sound {
+ &ts_10 {
+ status = "okay";
+ };
++
++&watchdog {
++ /delete-property/ mediatek,disable-extrst;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
+index f54f9477b99dad..dd294ca98194cc 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
+@@ -44,3 +44,7 @@ &sound {
+ &ts_10 {
+ status = "okay";
+ };
++
++&watchdog {
++ /delete-property/ mediatek,disable-extrst;
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+index 37a3e9de90ff70..34e18eb5d7f450 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+@@ -114,6 +114,77 @@ ppvar_sys: regulator-ppvar-sys {
+ regulator-boot-on;
+ };
+
++ /* Murata NCP03WF104F05RL */
++ tboard_thermistor1: thermal-sensor-t1 {
++ compatible = "generic-adc-thermal";
++ #thermal-sensor-cells = <0>;
++ io-channels = <&auxadc 0>;
++ io-channel-names = "sensor-channel";
++ temperature-lookup-table = < (-10000) 1553
++ (-5000) 1485
++ 0 1406
++ 5000 1317
++ 10000 1219
++ 15000 1115
++ 20000 1007
++ 25000 900
++ 30000 796
++ 35000 697
++ 40000 605
++ 45000 523
++ 50000 449
++ 55000 384
++ 60000 327
++ 65000 279
++ 70000 237
++ 75000 202
++ 80000 172
++ 85000 147
++ 90000 125
++ 95000 107
++ 100000 92
++ 105000 79
++ 110000 68
++ 115000 59
++ 120000 51
++ 125000 44>;
++ };
++
++ tboard_thermistor2: thermal-sensor-t2 {
++ compatible = "generic-adc-thermal";
++ #thermal-sensor-cells = <0>;
++ io-channels = <&auxadc 1>;
++ io-channel-names = "sensor-channel";
++ temperature-lookup-table = < (-10000) 1553
++ (-5000) 1485
++ 0 1406
++ 5000 1317
++ 10000 1219
++ 15000 1115
++ 20000 1007
++ 25000 900
++ 30000 796
++ 35000 697
++ 40000 605
++ 45000 523
++ 50000 449
++ 55000 384
++ 60000 327
++ 65000 279
++ 70000 237
++ 75000 202
++ 80000 172
++ 85000 147
++ 90000 125
++ 95000 107
++ 100000 92
++ 105000 79
++ 110000 68
++ 115000 59
++ 120000 51
++ 125000 44>;
++ };
++
+ usb_vbus: regulator-5v0-usb-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb-vbus";
+@@ -176,6 +247,42 @@ &afe {
+ memory-region = <&afe_mem>;
+ };
+
++&auxadc {
++ status = "okay";
++};
++
++&cpu0 {
++ cpu-supply = <&mt6359_vcore_buck_reg>;
++};
++
++&cpu1 {
++ cpu-supply = <&mt6359_vcore_buck_reg>;
++};
++
++&cpu2 {
++ cpu-supply = <&mt6359_vcore_buck_reg>;
++};
++
++&cpu3 {
++ cpu-supply = <&mt6359_vcore_buck_reg>;
++};
++
++&cpu4 {
++ cpu-supply = <&mt6315_6_vbuck1>;
++};
++
++&cpu5 {
++ cpu-supply = <&mt6315_6_vbuck1>;
++};
++
++&cpu6 {
++ cpu-supply = <&mt6315_6_vbuck1>;
++};
++
++&cpu7 {
++ cpu-supply = <&mt6315_6_vbuck1>;
++};
++
+ &dp_intf0 {
+ status = "okay";
+
+@@ -362,7 +469,7 @@ &i2c7 {
+ pinctrl-0 = <&i2c7_pins>;
+
+ pmic@34 {
+- #interrupt-cells = <1>;
++ #interrupt-cells = <2>;
+ compatible = "mediatek,mt6360";
+ reg = <0x34>;
+ interrupt-controller;
+@@ -1098,7 +1205,7 @@ regulators {
+ mt6315_6_vbuck1: vbuck1 {
+ regulator-compatible = "vbuck1";
+ regulator-name = "Vbcpu";
+- regulator-min-microvolt = <300000>;
++ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-ramp-delay = <6250>;
+@@ -1116,7 +1223,7 @@ regulators {
+ mt6315_7_vbuck1: vbuck1 {
+ regulator-compatible = "vbuck1";
+ regulator-name = "Vgpu";
+- regulator-min-microvolt = <625000>;
++ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1193750>;
+ regulator-enable-ramp-delay = <256>;
+ regulator-ramp-delay = <6250>;
+@@ -1127,6 +1234,36 @@ mt6315_7_vbuck1: vbuck1 {
+ };
+ };
+
++&thermal_zones {
++ soc-area-thermal {
++ polling-delay = <1000>;
++ polling-delay-passive = <250>;
++ thermal-sensors = <&tboard_thermistor1>;
++
++ trips {
++ trip-crit {
++ temperature = <84000>;
++ hysteresis = <1000>;
++ type = "critical";
++ };
++ };
++ };
++
++ pmic-area-thermal {
++ polling-delay = <1000>;
++ polling-delay-passive = <0>;
++ thermal-sensors = <&tboard_thermistor2>;
++
++ trips {
++ trip-crit {
++ temperature = <84000>;
++ hysteresis = <1000>;
++ type = "critical";
++ };
++ };
++ };
++};
++
+ &u3phy0 {
+ status = "okay";
+ };
+@@ -1175,6 +1312,7 @@ &xhci3 {
+ usb2-lpm-disable;
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ vbus-supply = <&usb_vbus>;
++ mediatek,u3p-dis-msk = <1>;
+ };
+
+ #include <arm/cros-ec-keyboard.dtsi>
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+index 5d635085fe3fd0..9079e48aea23ea 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+@@ -128,6 +128,7 @@ mt6360: pmic@34 {
+ compatible = "mediatek,mt6360";
+ reg = <0x34>;
+ interrupt-controller;
++ #interrupt-cells = <1>;
+ interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-names = "IRQB";
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index 54c674c45b49a2..d21ba00a5bd5df 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -627,6 +627,8 @@ power-domain@MT8195_POWER_DOMAIN_VDEC1 {
+
+ power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
+ reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
++ clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
++ clock-names = "venc1-larb";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+@@ -689,6 +691,8 @@ power-domain@MT8195_POWER_DOMAIN_VDEC2 {
+
+ power-domain@MT8195_POWER_DOMAIN_VENC {
+ reg = <MT8195_POWER_DOMAIN_VENC>;
++ clocks = <&vencsys CLK_VENC_LARB>;
++ clock-names = "venc0-larb";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+@@ -1959,6 +1963,7 @@ vppsys0: syscon@14000000 {
+ compatible = "mediatek,mt8195-vppsys0", "syscon";
+ reg = <0 0x14000000 0 0x1000>;
+ #clock-cells = <1>;
++ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0 0x1000>;
+ };
+
+ mutex@1400f000 {
+@@ -2073,6 +2078,7 @@ vppsys1: syscon@14f00000 {
+ compatible = "mediatek,mt8195-vppsys1", "syscon";
+ reg = <0 0x14f00000 0 0x1000>;
+ #clock-cells = <1>;
++ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0 0x1000>;
+ };
+
+ mutex@14f01000 {
+@@ -2619,6 +2625,7 @@ vdosys0: syscon@1c01a000 {
+ reg = <0 0x1c01a000 0 0x1000>;
+ mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
+ #clock-cells = <1>;
++ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xa000 0x1000>;
+ };
+
+
+@@ -2665,7 +2672,7 @@ larb20: larb@1b010000 {
+ reg = <0 0x1b010000 0 0x1000>;
+ mediatek,larb-id = <20>;
+ mediatek,smi = <&smi_common_vpp>;
+- clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>,
++ clocks = <&vencsys_core1 CLK_VENC_CORE1_VENC>,
+ <&vencsys_core1 CLK_VENC_CORE1_GALS>,
+ <&vppsys0 CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1>;
+ clock-names = "apb", "smi", "gals";
+@@ -2759,10 +2766,10 @@ dp_intf0: dp-intf@1c015000 {
+ compatible = "mediatek,mt8195-dp-intf";
+ reg = <0 0x1c015000 0 0x1000>;
+ interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>;
+- clocks = <&vdosys0 CLK_VDO0_DP_INTF0>,
+- <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
++ clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
++ <&vdosys0 CLK_VDO0_DP_INTF0>,
+ <&apmixedsys CLK_APMIXED_TVDPLL1>;
+- clock-names = "engine", "pixel", "pll";
++ clock-names = "pixel", "engine", "pll";
+ status = "disabled";
+ };
+
+@@ -2772,6 +2779,7 @@ mutex: mutex@1c016000 {
+ interrupts = <GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
+ clocks = <&vdosys0 CLK_VDO0_DISP_MUTEX0>;
++ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0x6000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0>;
+ };
+
+@@ -2842,6 +2850,7 @@ mutex1: mutex@1c101000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ clocks = <&vdosys1 CLK_VDO1_DISP_MUTEX>;
+ clock-names = "vdo1_mutex";
++ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x1000 0x1000>;
+ mediatek,gce-events = <CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0>;
+ };
+
+@@ -2869,7 +2878,7 @@ larb3: larb@1c103000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ };
+
+- vdo1_rdma0: rdma@1c104000 {
++ vdo1_rdma0: dma-controller@1c104000 {
+ compatible = "mediatek,mt8195-vdo1-rdma";
+ reg = <0 0x1c104000 0 0x1000>;
+ interrupts = <GIC_SPI 495 IRQ_TYPE_LEVEL_HIGH 0>;
+@@ -2877,9 +2886,10 @@ vdo1_rdma0: rdma@1c104000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vdo M4U_PORT_L2_MDP_RDMA0>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x4000 0x1000>;
++ #dma-cells = <1>;
+ };
+
+- vdo1_rdma1: rdma@1c105000 {
++ vdo1_rdma1: dma-controller@1c105000 {
+ compatible = "mediatek,mt8195-vdo1-rdma";
+ reg = <0 0x1c105000 0 0x1000>;
+ interrupts = <GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH 0>;
+@@ -2887,9 +2897,10 @@ vdo1_rdma1: rdma@1c105000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vpp M4U_PORT_L3_MDP_RDMA1>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x5000 0x1000>;
++ #dma-cells = <1>;
+ };
+
+- vdo1_rdma2: rdma@1c106000 {
++ vdo1_rdma2: dma-controller@1c106000 {
+ compatible = "mediatek,mt8195-vdo1-rdma";
+ reg = <0 0x1c106000 0 0x1000>;
+ interrupts = <GIC_SPI 497 IRQ_TYPE_LEVEL_HIGH 0>;
+@@ -2897,9 +2908,10 @@ vdo1_rdma2: rdma@1c106000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vdo M4U_PORT_L2_MDP_RDMA2>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x6000 0x1000>;
++ #dma-cells = <1>;
+ };
+
+- vdo1_rdma3: rdma@1c107000 {
++ vdo1_rdma3: dma-controller@1c107000 {
+ compatible = "mediatek,mt8195-vdo1-rdma";
+ reg = <0 0x1c107000 0 0x1000>;
+ interrupts = <GIC_SPI 498 IRQ_TYPE_LEVEL_HIGH 0>;
+@@ -2907,9 +2919,10 @@ vdo1_rdma3: rdma@1c107000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vpp M4U_PORT_L3_MDP_RDMA3>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x7000 0x1000>;
++ #dma-cells = <1>;
+ };
+
+- vdo1_rdma4: rdma@1c108000 {
++ vdo1_rdma4: dma-controller@1c108000 {
+ compatible = "mediatek,mt8195-vdo1-rdma";
+ reg = <0 0x1c108000 0 0x1000>;
+ interrupts = <GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH 0>;
+@@ -2917,9 +2930,10 @@ vdo1_rdma4: rdma@1c108000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vdo M4U_PORT_L2_MDP_RDMA4>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x8000 0x1000>;
++ #dma-cells = <1>;
+ };
+
+- vdo1_rdma5: rdma@1c109000 {
++ vdo1_rdma5: dma-controller@1c109000 {
+ compatible = "mediatek,mt8195-vdo1-rdma";
+ reg = <0 0x1c109000 0 0x1000>;
+ interrupts = <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH 0>;
+@@ -2927,9 +2941,10 @@ vdo1_rdma5: rdma@1c109000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vpp M4U_PORT_L3_MDP_RDMA5>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0x9000 0x1000>;
++ #dma-cells = <1>;
+ };
+
+- vdo1_rdma6: rdma@1c10a000 {
++ vdo1_rdma6: dma-controller@1c10a000 {
+ compatible = "mediatek,mt8195-vdo1-rdma";
+ reg = <0 0x1c10a000 0 0x1000>;
+ interrupts = <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH 0>;
+@@ -2937,9 +2952,10 @@ vdo1_rdma6: rdma@1c10a000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vdo M4U_PORT_L2_MDP_RDMA6>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xa000 0x1000>;
++ #dma-cells = <1>;
+ };
+
+- vdo1_rdma7: rdma@1c10b000 {
++ vdo1_rdma7: dma-controller@1c10b000 {
+ compatible = "mediatek,mt8195-vdo1-rdma";
+ reg = <0 0x1c10b000 0 0x1000>;
+ interrupts = <GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH 0>;
+@@ -2947,6 +2963,7 @@ vdo1_rdma7: rdma@1c10b000 {
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+ iommus = <&iommu_vpp M4U_PORT_L3_MDP_RDMA7>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xb000 0x1000>;
++ #dma-cells = <1>;
+ };
+
+ merge1: vpp-merge@1c10c000 {
+@@ -3019,10 +3036,10 @@ dp_intf1: dp-intf@1c113000 {
+ reg = <0 0x1c113000 0 0x1000>;
+ interrupts = <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
+- clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>,
+- <&vdosys1 CLK_VDO1_DPINTF>,
++ clocks = <&vdosys1 CLK_VDO1_DPINTF>,
++ <&vdosys1 CLK_VDO1_DP_INTF0_MM>,
+ <&apmixedsys CLK_APMIXED_TVDPLL2>;
+- clock-names = "engine", "pixel", "pll";
++ clock-names = "pixel", "engine", "pll";
+ status = "disabled";
+ };
+
+@@ -3378,7 +3395,7 @@ vpu1_crit: trip-crit {
+ };
+ };
+
+- gpu0-thermal {
++ gpu-thermal {
+ polling-delay = <1000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&lvts_ap MT8195_AP_GPU0>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+index bbc2e9bef08da5..441216eda487f8 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+@@ -9,8 +9,8 @@ / {
+ compatible = "nvidia,norrin", "nvidia,tegra132", "nvidia,tegra124";
+
+ aliases {
+- rtc0 = "/i2c@7000d000/as3722@40";
+- rtc1 = "/rtc@7000e000";
++ rtc0 = &as3722;
++ rtc1 = &tegra_rtc;
+ serial0 = &uarta;
+ };
+
+diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+index 8b78be8f4f9d0b..4b5435f5832344 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+@@ -570,7 +570,7 @@ spi@7000de00 {
+ status = "disabled";
+ };
+
+- rtc@7000e000 {
++ tegra_rtc: rtc@7000e000 {
+ compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
+ reg = <0x0 0x7000e000 0x0 0x100>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+index 4413a9b6da87a2..bf2ccc8ff93c4b 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+@@ -174,7 +174,7 @@ ethernet@6800000 {
+ status = "okay";
+
+ phy-handle = <&mgbe0_phy>;
+- phy-mode = "usxgmii";
++ phy-mode = "10gbase-r";
+
+ mdio {
+ #address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
+index 5f592f1d81e2ee..fe08e131b7b9ef 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
+@@ -28,7 +28,7 @@ spi@3270000 {
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+- spi-max-frequency = <136000000>;
++ spi-max-frequency = <102000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+@@ -42,7 +42,7 @@ flash@0 {
+ mmc@3400000 {
+ status = "okay";
+ bus-width = <4>;
+- cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_HIGH>;
++ cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>;
+ disable-wp;
+ };
+
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 95524e5bce8262..ac69eacf8a6ba9 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -43,12 +43,12 @@ timer@2080000 {
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+index 4f5541e9be0e98..dabe9f42a63ade 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+@@ -172,6 +172,9 @@ adv_bridge: bridge@39 {
+ pd-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>;
+
+ avdd-supply = <&pm8916_l6>;
++ a2vdd-supply = <&pm8916_l6>;
++ dvdd-supply = <&pm8916_l6>;
++ pvdd-supply = <&pm8916_l6>;
+ v1p2-supply = <&pm8916_l6>;
+ v3p3-supply = <&pm8916_l17>;
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+index 8bfc2db44624af..e40c55adff23d1 100644
+--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+@@ -135,7 +135,7 @@ smem@4a800000 {
+ reg = <0x0 0x4a800000 0x0 0x100000>;
+ no-map;
+
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 47b8b1d6730ac6..fea16f3271c007 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -211,7 +211,7 @@ q6_region: memory@4ab00000 {
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_region>;
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ soc: soc@0 {
+@@ -393,7 +393,7 @@ gcc: gcc@1800000 {
+
+ tcsr_mutex: hwlock@1905000 {
+ compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
+- reg = <0x0 0x01905000 0x0 0x1000>;
++ reg = <0x0 0x01905000 0x0 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
+@@ -565,7 +565,7 @@ usb3: usb@8af8800 {
+ <&gcc GCC_USB0_MOCK_UTMI_CLK>;
+ assigned-clock-rates = <133330000>,
+ <133330000>,
+- <20000000>;
++ <24000000>;
+
+ resets = <&gcc GCC_USB0_BCR>;
+ status = "disabled";
+@@ -579,6 +579,7 @@ dwc_0: usb@8a00000 {
+ clocks = <&xo>;
+ clock-names = "ref";
+ tx-fifo-resize;
++ snps,parkmode-disable-ss-quirk;
+ snps,is-utmi-l1-suspend;
+ snps,hird-threshold = /bits/ 8 <0x0>;
+ snps,dis_u2_susphy_quirk;
+@@ -767,10 +768,10 @@ pcie0: pci@20000000 {
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+- interrupt-map = <0 0 0 1 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+- <0 0 0 2 &intc 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+- <0 0 0 3 &intc 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+- <0 0 0 4 &intc 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
++ interrupt-map = <0 0 0 1 &intc 0 0 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
++ <0 0 0 2 &intc 0 0 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
++ <0 0 0 3 &intc 0 0 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
++ <0 0 0 4 &intc 0 0 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
+ <&gcc GCC_PCIE0_AXI_M_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 00ed71936b4723..e5993a365870c1 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -101,7 +101,7 @@ smem@4ab00000 {
+ reg = <0x0 0x4ab00000 0x0 0x100000>;
+ no-map;
+
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ memory@4ac00000 {
+@@ -641,6 +641,7 @@ dwc_0: usb@8a00000 {
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&qusb_phy_0>, <&usb0_ssphy>;
+ phy-names = "usb2-phy", "usb3-phy";
++ snps,parkmode-disable-ss-quirk;
+ snps,is-utmi-l1-suspend;
+ snps,hird-threshold = /bits/ 8 <0x0>;
+ snps,dis_u2_susphy_quirk;
+@@ -683,6 +684,7 @@ dwc_1: usb@8c00000 {
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&qusb_phy_1>, <&usb1_ssphy>;
+ phy-names = "usb2-phy", "usb3-phy";
++ snps,parkmode-disable-ss-quirk;
+ snps,is-utmi-l1-suspend;
+ snps,hird-threshold = /bits/ 8 <0x0>;
+ snps,dis_u2_susphy_quirk;
+@@ -817,13 +819,13 @@ pcie1: pci@10000000 {
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+- interrupt-map = <0 0 0 1 &intc 0 142
++ interrupt-map = <0 0 0 1 &intc 0 0 142
+ IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+- <0 0 0 2 &intc 0 143
++ <0 0 0 2 &intc 0 0 143
+ IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+- <0 0 0 3 &intc 0 144
++ <0 0 0 3 &intc 0 0 144
+ IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+- <0 0 0 4 &intc 0 145
++ <0 0 0 4 &intc 0 0 145
+ IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_SYS_NOC_PCIE1_AXI_CLK>,
+@@ -879,13 +881,13 @@ pcie0: pci@20000000 {
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+- interrupt-map = <0 0 0 1 &intc 0 75
++ interrupt-map = <0 0 0 1 &intc 0 0 75
+ IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+- <0 0 0 2 &intc 0 78
++ <0 0 0 2 &intc 0 0 78
+ IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+- <0 0 0 3 &intc 0 79
++ <0 0 0 3 &intc 0 0 79
+ IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+- <0 0 0 4 &intc 0 83
++ <0 0 0 4 &intc 0 0 83
+ IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+
+ clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 51aba071c1eb30..8a72ad4afd0320 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -195,7 +195,7 @@ tz_region: tz@4a600000 {
+ smem@4aa00000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x4aa00000 0x0 0x100000>;
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ no-map;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+index 3892ad4f639a8f..4efc534b1d6e74 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+@@ -88,6 +88,7 @@ led-controller@45 {
+ #size-cells = <0>;
+
+ vcc-supply = <&pm8916_l17>;
++ vio-supply = <&pm8916_l6>;
+
+ led@0 {
+ reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
+index 8e238976ab1cef..43078b890d8605 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
+@@ -118,6 +118,7 @@ led-controller@45 {
+ #size-cells = <0>;
+
+ vcc-supply = <&pm8916_l16>;
++ vio-supply = <&pm8916_l5>;
+
+ led@0 {
+ reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 33fb65d7310461..961ceb83a91fae 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1813,7 +1813,7 @@ apps_iommu: iommu@1ef0000 {
+ #size-cells = <1>;
+ #iommu-cells = <1>;
+ compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
+- ranges = <0 0x01e20000 0x40000>;
++ ranges = <0 0x01e20000 0x20000>;
+ reg = <0x01ef0000 0x3000>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+@@ -2085,6 +2085,7 @@ blsp_dma: dma-controller@7884000 {
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
++ qcom,controlled-remotely;
+ };
+
+ blsp_uart1: serial@78af000 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index 6e24f0f2374fe5..3fd64cafe99c5f 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -1447,7 +1447,7 @@ opp-19200000 {
+ apps_iommu: iommu@1ef0000 {
+ compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
+ reg = <0x01ef0000 0x3000>;
+- ranges = <0 0x01e20000 0x40000>;
++ ranges = <0 0x01e20000 0x20000>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+ clock-names = "iface", "bus";
+@@ -1661,6 +1661,7 @@ blsp_dma: dma-controller@7884000 {
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
++ qcom,controlled-remotely;
+ };
+
+ blsp_uart1: serial@78af000 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
+index ed95d09cedb1e3..6b9245cd8b0c3f 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
+@@ -111,6 +111,7 @@ led-controller@45 {
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
+index 61ff629c9bf345..9ac4f507e321a6 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
+@@ -104,6 +104,7 @@ led-controller@45 {
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+index 1a1d3f92a51168..b0588f30f8f1a7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+@@ -113,6 +113,7 @@ led-controller@45 {
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+index f9f5afbcc52bba..4c5be22b47feea 100644
+--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+@@ -379,7 +379,7 @@ adsp_smp2p_in: slave-kernel {
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+ interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
+- qcom,ipc = <&apcs 8 13>;
++ qcom,ipc = <&apcs 8 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+@@ -402,7 +402,7 @@ modem_smp2p_in: slave-kernel {
+ smp2p-wcnss {
+ compatible = "qcom,smp2p";
+ interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
+- qcom,ipc = <&apcs 8 17>;
++ qcom,ipc = <&apcs 8 18>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <4>;
+@@ -428,9 +428,9 @@ smsm {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- qcom,ipc-1 = <&apcs 8 12>;
++ qcom,ipc-1 = <&apcs 8 13>;
+ qcom,ipc-2 = <&apcs 8 9>;
+- qcom,ipc-3 = <&apcs 8 18>;
++ qcom,ipc-3 = <&apcs 8 19>;
+
+ apps_smsm: apps@0 {
+ reg = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+index fcca1ba94da699..5fe5de9ceef99f 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
++++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+@@ -109,11 +109,6 @@ rmtfs_mem: rmtfs@ca100000 {
+ qcom,client-id = <1>;
+ };
+
+- audio_mem: audio@cb400000 {
+- reg = <0 0xcb000000 0 0x400000>;
+- no-mem;
+- };
+-
+ qseecom_mem: qseecom@cb400000 {
+ reg = <0 0xcb400000 0 0x1c00000>;
+ no-mem;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+index 06f8ff624181fc..d5b35ff0175cd4 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+@@ -405,7 +405,6 @@ &usb3_dwc3 {
+
+ &hsusb_phy1 {
+ status = "okay";
+- extcon = <&typec>;
+
+ vdda-pll-supply = <&vreg_l12a_1p8>;
+ vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index c8e0986425ab4f..1f7cbb35886db5 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -443,6 +443,19 @@ memory@80000000 {
+ reg = <0x0 0x80000000 0x0 0x0>;
+ };
+
++ etm {
++ compatible = "qcom,coresight-remote-etm";
++
++ out-ports {
++ port {
++ modem_etm_out_funnel_in2: endpoint {
++ remote-endpoint =
++ <&funnel_in2_in_modem_etm>;
++ };
++ };
++ };
++ };
++
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+@@ -2077,7 +2090,7 @@ ufshc: ufshc@624000 {
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+- <0 0>,
++ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+@@ -2643,6 +2656,14 @@ funnel@3023000 {
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
++ in-ports {
++ port {
++ funnel_in2_in_modem_etm: endpoint {
++ remote-endpoint =
++ <&modem_etm_out_funnel_in2>;
++ };
++ };
++ };
+
+ out-ports {
+ port {
+@@ -3061,6 +3082,7 @@ usb3_dwc3: usb@6a00000 {
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ snps,is-utmi-l1-suspend;
++ snps,parkmode-disable-ss-quirk;
+ tx-fifo-resize;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index f180047cacb057..7fcc15b6946ae8 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -1588,7 +1588,6 @@ adreno_smmu: iommu@5040000 {
+ * SoC VDDMX RPM Power Domain in the Adreno driver.
+ */
+ power-domains = <&gpucc GPU_GX_GDSC>;
+- status = "disabled";
+ };
+
+ gpucc: clock-controller@5065000 {
+@@ -2034,9 +2033,11 @@ etm5: etm@7c40000 {
+
+ cpu = <&CPU4>;
+
+- port {
+- etm4_out: endpoint {
+- remote-endpoint = <&apss_funnel_in4>;
++ out-ports {
++ port {
++ etm4_out: endpoint {
++ remote-endpoint = <&apss_funnel_in4>;
++ };
+ };
+ };
+ };
+@@ -2051,9 +2052,11 @@ etm6: etm@7d40000 {
+
+ cpu = <&CPU5>;
+
+- port {
+- etm5_out: endpoint {
+- remote-endpoint = <&apss_funnel_in5>;
++ out-ports {
++ port {
++ etm5_out: endpoint {
++ remote-endpoint = <&apss_funnel_in5>;
++ };
+ };
+ };
+ };
+@@ -2068,9 +2071,11 @@ etm7: etm@7e40000 {
+
+ cpu = <&CPU6>;
+
+- port {
+- etm6_out: endpoint {
+- remote-endpoint = <&apss_funnel_in6>;
++ out-ports {
++ port {
++ etm6_out: endpoint {
++ remote-endpoint = <&apss_funnel_in6>;
++ };
+ };
+ };
+ };
+@@ -2085,9 +2090,11 @@ etm8: etm@7f40000 {
+
+ cpu = <&CPU7>;
+
+- port {
+- etm7_out: endpoint {
+- remote-endpoint = <&apss_funnel_in7>;
++ out-ports {
++ port {
++ etm7_out: endpoint {
++ remote-endpoint = <&apss_funnel_in7>;
++ };
+ };
+ };
+ };
+@@ -2152,7 +2159,8 @@ usb3_dwc3: usb@a800000 {
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+- phys = <&qusb2phy>, <&usb1_ssphy>;
++ snps,parkmode-disable-ss-quirk;
++ phys = <&qusb2phy>, <&usb3phy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
+@@ -2161,33 +2169,26 @@ usb3_dwc3: usb@a800000 {
+
+ usb3phy: phy@c010000 {
+ compatible = "qcom,msm8998-qmp-usb3-phy";
+- reg = <0x0c010000 0x18c>;
+- status = "disabled";
+- #address-cells = <1>;
+- #size-cells = <1>;
+- ranges;
++ reg = <0x0c010000 0x1000>;
+
+ clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
++ <&gcc GCC_USB3_CLKREF_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+- <&gcc GCC_USB3_CLKREF_CLK>;
+- clock-names = "aux", "cfg_ahb", "ref";
++ <&gcc GCC_USB3_PHY_PIPE_CLK>;
++ clock-names = "aux",
++ "ref",
++ "cfg_ahb",
++ "pipe";
++ clock-output-names = "usb3_phy_pipe_clk_src";
++ #clock-cells = <0>;
++ #phy-cells = <0>;
+
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+- reset-names = "phy", "common";
++ reset-names = "phy",
++ "phy_phy";
+
+- usb1_ssphy: phy@c010200 {
+- reg = <0xc010200 0x128>,
+- <0xc010400 0x200>,
+- <0xc010c00 0x20c>,
+- <0xc010600 0x128>,
+- <0xc010800 0x200>;
+- #phy-cells = <0>;
+- #clock-cells = <0>;
+- clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
+- clock-names = "pipe0";
+- clock-output-names = "usb3_phy_pipe_clk_src";
+- };
++ status = "disabled";
+ };
+
+ qusb2phy: phy@c012000 {
+diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+index d46e591e72b5c9..40a8506553ef5d 100644
+--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+@@ -418,6 +418,11 @@ tcsr_mutex: hwlock@340000 {
+ #hwlock-cells = <1>;
+ };
+
++ tcsr_regs: syscon@3c0000 {
++ compatible = "qcom,qcm2290-tcsr", "syscon";
++ reg = <0x0 0x003c0000 0x0 0x40000>;
++ };
++
+ tlmm: pinctrl@500000 {
+ compatible = "qcom,qcm2290-tlmm";
+ reg = <0x0 0x00500000 0x0 0x300000>;
+@@ -665,6 +670,8 @@ usb_qmpphy: phy@1615000 {
+
+ #phy-cells = <0>;
+
++ qcom,tcsr-reg = <&tcsr_regs 0xb244>;
++
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+index 10655401528e4e..a22b4501ce1ef5 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+@@ -62,7 +62,7 @@ bluetooth {
+ vddrf-supply = <&vreg_l1_1p3>;
+ vddch0-supply = <&vdd_ch0_3p3>;
+
+- local-bd-address = [ 02 00 00 00 5a ad ];
++ local-bd-address = [ 00 00 00 00 00 00 ];
+
+ max-speed = <3200000>;
+ };
+diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+index 1c0e5d271e91bb..dbdc06be6260b7 100644
+--- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi
++++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+@@ -1452,7 +1452,21 @@ system-cache-controller@19200000 {
+ "llcc_broadcast_base",
+ "multi_channel_register";
+ interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+- multi-ch-bit-off = <24 2>;
++
++ nvmem-cells = <&multi_chan_ddr>;
++ nvmem-cell-names = "multi-chan-ddr";
++ };
++
++ sec_qfprom: efuse@221c8000 {
++ compatible = "qcom,qdu1000-sec-qfprom", "qcom,sec-qfprom";
++ reg = <0 0x221c8000 0 0x1000>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ multi_chan_ddr: multi-chan-ddr@12b {
++ reg = <0x12b 0x1>;
++ bits = <0 2>;
++ };
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+index eadba066972e87..37abb83ea46476 100644
+--- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
++++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+@@ -13,7 +13,7 @@ / {
+ compatible = "qcom,qrb2210-rb1", "qcom,qrb2210", "qcom,qcm2290";
+
+ aliases {
+- serial0 = &uart0;
++ serial0 = &uart4;
+ sdhc1 = &sdhc_1;
+ sdhc2 = &sdhc_2;
+ };
+@@ -150,15 +150,15 @@ regulators {
+
+ pm2250_s3: s3 {
+ /* 0.4V-1.6625V -> 1.3V (Power tree requirements) */
+- regulator-min-microvolts = <1350000>;
+- regulator-max-microvolts = <1350000>;
++ regulator-min-microvolt = <1352000>;
++ regulator-max-microvolt = <1352000>;
+ regulator-boot-on;
+ };
+
+ pm2250_s4: s4 {
+ /* 1.2V-2.35V -> 2.05V (Power tree requirements) */
+- regulator-min-microvolts = <2072000>;
+- regulator-max-microvolts = <2072000>;
++ regulator-min-microvolt = <2072000>;
++ regulator-max-microvolt = <2072000>;
+ regulator-boot-on;
+ };
+
+@@ -166,47 +166,47 @@ pm2250_s4: s4 {
+
+ pm2250_l2: l2 {
+ /* LPDDR4X VDD2 */
+- regulator-min-microvolts = <1136000>;
+- regulator-max-microvolts = <1136000>;
++ regulator-min-microvolt = <1136000>;
++ regulator-max-microvolt = <1136000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm2250_l3: l3 {
+ /* LPDDR4X VDDQ */
+- regulator-min-microvolts = <616000>;
+- regulator-max-microvolts = <616000>;
++ regulator-min-microvolt = <616000>;
++ regulator-max-microvolt = <616000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm2250_l4: l4 {
+- /* max = 3.05V -> max = just below 3V (SDHCI2) */
+- regulator-min-microvolts = <1648000>;
+- regulator-max-microvolts = <2992000>;
++ /* max = 3.05V -> max = 2.7 to disable 3V signaling (SDHCI2) */
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <2700000>;
+ regulator-allow-set-load;
+ };
+
+ pm2250_l5: l5 {
+ /* CSI/DSI */
+- regulator-min-microvolts = <1232000>;
+- regulator-max-microvolts = <1232000>;
++ regulator-min-microvolt = <1232000>;
++ regulator-max-microvolt = <1232000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm2250_l6: l6 {
+ /* DRAM PLL */
+- regulator-min-microvolts = <928000>;
+- regulator-max-microvolts = <928000>;
++ regulator-min-microvolt = <928000>;
++ regulator-max-microvolt = <928000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm2250_l7: l7 {
+ /* Wi-Fi CX/MX */
+- regulator-min-microvolts = <664000>;
+- regulator-max-microvolts = <664000>;
++ regulator-min-microvolt = <664000>;
++ regulator-max-microvolt = <664000>;
+ };
+
+ /*
+@@ -216,37 +216,37 @@ pm2250_l7: l7 {
+
+ pm2250_l10: l10 {
+ /* Wi-Fi RFA */
+- regulator-min-microvolts = <1300000>;
+- regulator-max-microvolts = <1300000>;
++ regulator-min-microvolt = <1304000>;
++ regulator-max-microvolt = <1304000>;
+ };
+
+ pm2250_l11: l11 {
+ /* GPS RF1 */
+- regulator-min-microvolts = <1000000>;
+- regulator-max-microvolts = <1000000>;
++ regulator-min-microvolt = <1000000>;
++ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ };
+
+ pm2250_l12: l12 {
+ /* USB PHYs */
+- regulator-min-microvolts = <928000>;
+- regulator-max-microvolts = <928000>;
++ regulator-min-microvolt = <928000>;
++ regulator-max-microvolt = <928000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm2250_l13: l13 {
+ /* USB/QFPROM/PLLs */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm2250_l14: l14 {
+ /* SDHCI1 VQMMC */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ /* Broken hardware, never turn it off! */
+ regulator-always-on;
+@@ -254,8 +254,8 @@ pm2250_l14: l14 {
+
+ pm2250_l15: l15 {
+ /* WCD/DSI/BT VDDIO */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ regulator-always-on;
+ regulator-boot-on;
+@@ -263,47 +263,47 @@ pm2250_l15: l15 {
+
+ pm2250_l16: l16 {
+ /* GPS RF2 */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ };
+
+ pm2250_l17: l17 {
+- regulator-min-microvolts = <3000000>;
+- regulator-max-microvolts = <3000000>;
++ regulator-min-microvolt = <3000000>;
++ regulator-max-microvolt = <3000000>;
+ };
+
+ pm2250_l18: l18 {
+ /* VDD_PXn */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ };
+
+ pm2250_l19: l19 {
+ /* VDD_PXn */
+- regulator-min-microvolts = <1800000>;
+- regulator-max-microvolts = <1800000>;
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
+ };
+
+ pm2250_l20: l20 {
+ /* SDHCI1 VMMC */
+- regulator-min-microvolts = <2856000>;
+- regulator-max-microvolts = <2856000>;
++ regulator-min-microvolt = <2400000>;
++ regulator-max-microvolt = <3600000>;
+ regulator-allow-set-load;
+ };
+
+ pm2250_l21: l21 {
+ /* SDHCI2 VMMC */
+- regulator-min-microvolts = <2960000>;
+- regulator-max-microvolts = <3300000>;
++ regulator-min-microvolt = <2960000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-allow-set-load;
+ regulator-boot-on;
+ };
+
+ pm2250_l22: l22 {
+ /* Wi-Fi */
+- regulator-min-microvolts = <3312000>;
+- regulator-max-microvolts = <3312000>;
++ regulator-min-microvolt = <3312000>;
++ regulator-max-microvolt = <3312000>;
+ };
+ };
+ };
+@@ -357,7 +357,7 @@ key_volp_n: key-volp-n-state {
+ };
+
+ /* UART connected to the Micro-USB port via a FTDI chip */
+-&uart0 {
++&uart4 {
+ compatible = "qcom,geni-debug-uart";
+ status = "okay";
+ };
+@@ -366,6 +366,16 @@ &usb {
+ status = "okay";
+ };
+
++&usb_qmpphy {
++ vdda-phy-supply = <&pm2250_l12>;
++ vdda-pll-supply = <&pm2250_l13>;
++ status = "okay";
++};
++
++&usb_dwc3 {
++ dr_mode = "host";
++};
++
+ &usb_hsphy {
+ vdd-supply = <&pm2250_l12>;
+ vdda-pll-supply = <&pm2250_l13>;
+diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+index a7278a9472ed9b..5def8c1154ceb3 100644
+--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
++++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+@@ -57,6 +57,17 @@ hdmi_con: endpoint {
+ };
+ };
+
++ i2c2_gpio: i2c {
++ compatible = "i2c-gpio";
++
++ sda-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
++ scl-gpios = <&tlmm 7 GPIO_ACTIVE_HIGH>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ status = "disabled";
++ };
++
+ leds {
+ compatible = "gpio-leds";
+
+@@ -187,7 +198,7 @@ zap-shader {
+ };
+ };
+
+-&i2c2 {
++&i2c2_gpio {
+ clock-frequency = <400000>;
+ status = "okay";
+
+@@ -353,6 +364,8 @@ vreg_l8a_0p664: l8 {
+ vreg_l9a_1p8: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2000000>;
++ regulator-always-on;
++ regulator-boot-on;
+ };
+
+ vreg_l10a_1p8: l10 {
+@@ -518,7 +531,6 @@ &usb {
+
+ &usb_dwc3 {
+ maximum-speed = "super-speed";
+- dr_mode = "peripheral";
+ };
+
+ &usb_hsphy {
+diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+index dfa8ee5c75af63..e95a004c33919a 100644
+--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
++++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+@@ -63,8 +63,8 @@ led-user4 {
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
+- linux,default-trigger = "panic-indicator";
+ default-state = "off";
++ panic-indicator;
+ };
+
+ led-wlan {
+diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
+index 5e4287f8c8cd19..b2cf2c988336c0 100644
+--- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
++++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts
+@@ -367,6 +367,16 @@ queue0 {
+ };
+ };
+
++&pmm8155au_1_gpios {
++ pmm8155au_1_sdc2_cd: sdc2-cd-default-state {
++ pins = "gpio4";
++ function = "normal";
++ input-enable;
++ bias-pull-up;
++ power-source = <0>;
++ };
++};
++
+ &qupv3_id_1 {
+ status = "okay";
+ };
+@@ -384,10 +394,10 @@ &remoteproc_cdsp {
+ &sdhc_2 {
+ status = "okay";
+
+- cd-gpios = <&tlmm 4 GPIO_ACTIVE_LOW>;
++ cd-gpios = <&pmm8155au_1_gpios 4 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default", "sleep";
+- pinctrl-0 = <&sdc2_on>;
+- pinctrl-1 = <&sdc2_off>;
++ pinctrl-0 = <&sdc2_on &pmm8155au_1_sdc2_cd>;
++ pinctrl-1 = <&sdc2_off &pmm8155au_1_sdc2_cd>;
+ vqmmc-supply = <&vreg_l13c_2p96>; /* IO line power */
+ vmmc-supply = <&vreg_l17a_2p96>; /* Card power line */
+ bus-width = <4>;
+@@ -505,13 +515,6 @@ data-pins {
+ bias-pull-up; /* pull up */
+ drive-strength = <16>; /* 16 MA */
+ };
+-
+- sd-cd-pins {
+- pins = "gpio96";
+- function = "gpio";
+- bias-pull-up; /* pull up */
+- drive-strength = <2>; /* 2 MA */
+- };
+ };
+
+ sdc2_off: sdc2-off-state {
+@@ -532,13 +535,6 @@ data-pins {
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+-
+- sd-cd-pins {
+- pins = "gpio96";
+- function = "gpio";
+- bias-pull-up; /* pull up */
+- drive-strength = <2>; /* 2 MA */
+- };
+ };
+
+ usb2phy_ac_en1_default: usb2phy-ac-en1-default-state {
+diff --git a/arch/arm64/boot/dts/qcom/sa8540p.dtsi b/arch/arm64/boot/dts/qcom/sa8540p.dtsi
+index 96b2c59ad02b4d..23888029cc1179 100644
+--- a/arch/arm64/boot/dts/qcom/sa8540p.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8540p.dtsi
+@@ -168,6 +168,9 @@ opp-2592000000 {
+ };
+
+ &gpucc {
++ /* SA8295P and SA8540P doesn't provide gfx.lvl */
++ /delete-property/ power-domains;
++
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+index 9f4f58e831a4a6..f6766fa8df34d3 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+@@ -1602,8 +1602,8 @@ usb_0: usb@a6f8800 {
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 14 IRQ_TYPE_EDGE_RISING>,
+- <&pdc 15 IRQ_TYPE_EDGE_RISING>,
++ <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "dp_hs_phy_irq",
+@@ -1689,8 +1689,8 @@ usb_1: usb@a8f8800 {
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 8 IRQ_TYPE_EDGE_RISING>,
+- <&pdc 7 IRQ_TYPE_EDGE_RISING>,
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 7 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "pwr_event",
+ "dp_hs_phy_irq",
+@@ -1752,8 +1752,8 @@ usb_2: usb@a4f8800 {
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 10 IRQ_TYPE_EDGE_RISING>,
+- <&pdc 9 IRQ_TYPE_EDGE_RISING>;
++ <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "pwr_event",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq";
+@@ -1951,6 +1951,7 @@ apps_smmu: iommu@15000000 {
+ reg = <0x0 0x15000000 0x0 0x100000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <2>;
++ dma-coherent;
+
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+@@ -2089,6 +2090,7 @@ pcie_smmu: iommu@15200000 {
+ reg = <0x0 0x15200000 0x0 0x80000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <2>;
++ dma-coherent;
+
+ interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>,
+@@ -2173,7 +2175,7 @@ watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-sa8775p", "qcom,kpss-wdt";
+ reg = <0x0 0x17c10000 0x0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ memtimer: timer@17c20000 {
+@@ -2350,6 +2352,7 @@ ethernet1: ethernet@23000000 {
+ phy-names = "serdes";
+
+ iommus = <&apps_smmu 0x140 0xf>;
++ dma-coherent;
+
+ snps,tso;
+ snps,pbl = <32>;
+@@ -2383,6 +2386,7 @@ ethernet0: ethernet@23040000 {
+ phy-names = "serdes";
+
+ iommus = <&apps_smmu 0x120 0xf>;
++ dma-coherent;
+
+ snps,tso;
+ snps,pbl = <32>;
+@@ -2398,7 +2402,7 @@ arch_timer: timer {
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+- <GIC_PPI 12 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
++ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ pcie0: pci@1c00000{
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts b/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
+index dbb48934d49950..3342cb0480385f 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
++++ b/arch/arm64/boot/dts/qcom/sc7180-acer-aspire1.dts
+@@ -209,9 +209,22 @@ alc5682: codec@1a {
+ AVDD-supply = <&vreg_l15a_1p8>;
+ MICVDD-supply = <&reg_codec_3p3>;
+ VBAT-supply = <&reg_codec_3p3>;
++ DBVDD-supply = <&vreg_l15a_1p8>;
++ LDO1-IN-supply = <&vreg_l15a_1p8>;
++
++ /*
++ * NOTE: The board has a path from this codec to the
++ * DMIC microphones in the lid, however some of the option
++ * resistors are absent and the microphones are connected
++ * to the SoC instead.
++ *
++ * If the resistors were to be changed by the user to
++ * connect the codec, the following could be used:
++ *
++ * realtek,dmic1-data-pin = <1>;
++ * realtek,dmic1-clk-pin = <1>;
++ */
+
+- realtek,dmic1-data-pin = <1>;
+- realtek,dmic1-clk-pin = <1>;
+ realtek,jd-src = <1>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+index 5a33e16a8b6776..c2f5e9f6679d69 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+@@ -970,6 +970,8 @@ bluetooth: bluetooth {
+ vddrf-supply = <&pp1300_l2c>;
+ vddch0-supply = <&pp3300_l10c>;
+ max-speed = <3200000>;
++
++ qcom,local-bd-address-broken;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index a79c0f2e18799c..68b1c017a9fd5f 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -15,6 +15,7 @@
+ #include <dt-bindings/interconnect/qcom,osm-l3.h>
+ #include <dt-bindings/interconnect/qcom,sc7180.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
++#include <dt-bindings/phy/phy-qcom-qmp.h>
+ #include <dt-bindings/phy/phy-qcom-qusb2.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+ #include <dt-bindings/reset/qcom,sdm845-aoss.h>
+@@ -2795,49 +2796,28 @@ usb_1_hsphy: phy@88e3000 {
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ };
+
+- usb_1_qmpphy: phy-wrapper@88e9000 {
++ usb_1_qmpphy: phy@88e8000 {
+ compatible = "qcom,sc7180-qmp-usb3-dp-phy";
+- reg = <0 0x088e9000 0 0x18c>,
+- <0 0x088e8000 0 0x3c>,
+- <0 0x088ea000 0 0x18c>;
++ reg = <0 0x088e8000 0 0x3000>;
+ status = "disabled";
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+- <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
+- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+- clock-names = "aux", "cfg_ahb", "ref", "com_aux";
++ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
++ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
++ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
++ clock-names = "aux",
++ "ref",
++ "com_aux",
++ "usb3_pipe",
++ "cfg_ahb";
+
+ resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+ reset-names = "phy", "common";
+
+- usb_1_ssphy: usb3-phy@88e9200 {
+- reg = <0 0x088e9200 0 0x128>,
+- <0 0x088e9400 0 0x200>,
+- <0 0x088e9c00 0 0x218>,
+- <0 0x088e9600 0 0x128>,
+- <0 0x088e9800 0 0x200>,
+- <0 0x088e9a00 0 0x18>;
+- #clock-cells = <0>;
+- #phy-cells = <0>;
+- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+- clock-names = "pipe0";
+- clock-output-names = "usb3_phy_pipe_clk_src";
+- };
+-
+- dp_phy: dp-phy@88ea200 {
+- reg = <0 0x088ea200 0 0x200>,
+- <0 0x088ea400 0 0x200>,
+- <0 0x088eaa00 0 0x200>,
+- <0 0x088ea600 0 0x200>,
+- <0 0x088ea800 0 0x200>;
+- #clock-cells = <1>;
+- #phy-cells = <0>;
+- };
++ #clock-cells = <1>;
++ #phy-cells = <1>;
+ };
+
+ pmu@90b6300 {
+@@ -2978,8 +2958,8 @@ usb_1: usb@a6f8800 {
+
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 8 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 9 IRQ_TYPE_LEVEL_HIGH>;
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -3001,7 +2981,8 @@ usb_1_dwc3: usb@a600000 {
+ iommus = <&apps_smmu 0x540 0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+- phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
++ snps,parkmode-disable-ss-quirk;
++ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
+ phy-names = "usb2-phy", "usb3-phy";
+ maximum-speed = "super-speed";
+ };
+@@ -3307,8 +3288,9 @@ mdss_dp: displayport-controller@ae90000 {
+ "ctrl_link_iface", "stream_pixel";
+ assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
+- assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>;
+- phys = <&dp_phy>;
++ assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
++ phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>;
+ phy-names = "dp";
+
+ operating-points-v2 = <&dp_opp_table>;
+@@ -3365,8 +3347,8 @@ dispcc: clock-controller@af00000 {
+ <&gcc GCC_DISP_GPLL0_CLK_SRC>,
+ <&mdss_dsi0_phy 0>,
+ <&mdss_dsi0_phy 1>,
+- <&dp_phy 0>,
+- <&dp_phy 1>;
++ <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
+ clock-names = "bi_tcxo",
+ "gcc_disp_gpll0_clk_src",
+ "dsi0_phy_pll_out_byteclk",
+@@ -3587,7 +3569,7 @@ watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-sc7180", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
+index 2e1cd219fc1822..5d462ae14ba122 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi
+@@ -46,6 +46,26 @@ wpss_mem: memory@9ae00000 {
+ };
+ };
+
++&lpass_aon {
++ status = "okay";
++};
++
++&lpass_core {
++ status = "okay";
++};
++
++&lpass_hm {
++ status = "okay";
++};
++
++&lpasscc {
++ status = "okay";
++};
++
++&pdc_reset {
++ status = "okay";
++};
++
+ /* The PMIC PON code isn't compatible w/ how Chrome EC/BIOS handle things. */
+ &pmk8350_pon {
+ status = "disabled";
+@@ -84,6 +104,10 @@ &scm {
+ dma-coherent;
+ };
+
++&watchdog {
++ status = "okay";
++};
++
+ &wifi {
+ status = "okay";
+
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 925428a5f6aea2..149c7962f2cbb7 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -18,6 +18,7 @@
+ #include <dt-bindings/interconnect/qcom,sc7280.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mailbox/qcom-ipcc.h>
++#include <dt-bindings/phy/phy-qcom-qmp.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+ #include <dt-bindings/reset/qcom,sdm845-aoss.h>
+ #include <dt-bindings/reset/qcom,sdm845-pdc.h>
+@@ -649,18 +650,6 @@ cpu7_opp_3014mhz: opp-3014400000 {
+ };
+ };
+
+- eud_typec: connector {
+- compatible = "usb-c-connector";
+-
+- ports {
+- port@0 {
+- con_eud: endpoint {
+- remote-endpoint = <&eud_con>;
+- };
+- };
+- };
+- };
+-
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+@@ -869,7 +858,8 @@ gcc: clock-controller@100000 {
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>,
+ <0>, <&pcie1_lane>,
+- <0>, <0>, <0>, <0>;
++ <0>, <0>, <0>,
++ <&usb_1_qmpphy QMP_USB43DP_USB3_PIPE_CLK>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk",
+ "pcie_0_pipe_clk", "pcie_1_pipe_clk",
+ "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk",
+@@ -936,6 +926,7 @@ sdhc_1: mmc@7c4000 {
+
+ bus-width = <8>;
+ supports-cqe;
++ dma-coherent;
+
+ qcom,dll-config = <0x0007642c>;
+ qcom,ddr-config = <0x80040868>;
+@@ -2108,8 +2099,16 @@ pcie1: pci@1c08000 {
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+
+- interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+- interrupt-names = "msi";
++ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "msi0", "msi1", "msi2", "msi3",
++ "msi4", "msi5", "msi6", "msi7";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
+@@ -2266,6 +2265,7 @@ lpasscc: lpasscc@3000000 {
+ clocks = <&gcc GCC_CFG_NOC_LPASS_CLK>;
+ clock-names = "iface";
+ #clock-cells = <1>;
++ status = "reserved"; /* Owned by ADSP firmware */
+ };
+
+ lpass_rx_macro: codec@3200000 {
+@@ -2417,6 +2417,7 @@ lpass_aon: clock-controller@3380000 {
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "iface";
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
++ status = "reserved"; /* Owned by ADSP firmware */
+ };
+
+ lpass_core: clock-controller@3900000 {
+@@ -2427,6 +2428,7 @@ lpass_core: clock-controller@3900000 {
+ power-domains = <&lpass_hm LPASS_CORE_CC_LPASS_CORE_HM_GDSC>;
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
++ status = "reserved"; /* Owned by ADSP firmware */
+ };
+
+ lpass_cpu: audio@3987000 {
+@@ -2497,6 +2499,7 @@ lpass_hm: clock-controller@3c00000 {
+ clock-names = "bi_tcxo";
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
++ status = "reserved"; /* Owned by ADSP firmware */
+ };
+
+ lpass_ag_noc: interconnect@3c40000 {
+@@ -2565,7 +2568,8 @@ gpu: gpu@3d00000 {
+ "cx_mem",
+ "cx_dbgc";
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+- iommus = <&adreno_smmu 0 0x401>;
++ iommus = <&adreno_smmu 0 0x400>,
++ <&adreno_smmu 1 0x400>;
+ operating-points-v2 = <&gpu_opp_table>;
+ qcom,gmu = <&gmu>;
+ interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>;
+@@ -2739,6 +2743,7 @@ adreno_smmu: iommu@3da0000 {
+ "gpu_cc_hub_aon_clk";
+
+ power-domains = <&gpucc GPU_CC_CX_GDSC>;
++ dma-coherent;
+ };
+
+ remoteproc_mpss: remoteproc@4080000 {
+@@ -3296,6 +3301,7 @@ sdhc_2: mmc@8804000 {
+ operating-points-v2 = <&sdhc2_opp_table>;
+
+ bus-width = <4>;
++ dma-coherent;
+
+ qcom,dll-config = <0x0007642c>;
+
+@@ -3346,49 +3352,26 @@ usb_2_hsphy: phy@88e4000 {
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ };
+
+- usb_1_qmpphy: phy-wrapper@88e9000 {
+- compatible = "qcom,sc7280-qmp-usb3-dp-phy",
+- "qcom,sm8250-qmp-usb3-dp-phy";
+- reg = <0 0x088e9000 0 0x200>,
+- <0 0x088e8000 0 0x40>,
+- <0 0x088ea000 0 0x200>;
++ usb_1_qmpphy: phy@88e8000 {
++ compatible = "qcom,sc7280-qmp-usb3-dp-phy";
++ reg = <0 0x088e8000 0 0x3000>;
+ status = "disabled";
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+- clock-names = "aux", "ref_clk_src", "com_aux";
++ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
++ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
++ clock-names = "aux",
++ "ref",
++ "com_aux",
++ "usb3_pipe";
+
+ resets = <&gcc GCC_USB3_DP_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3_PHY_PRIM_BCR>;
+ reset-names = "phy", "common";
+
+- usb_1_ssphy: usb3-phy@88e9200 {
+- reg = <0 0x088e9200 0 0x200>,
+- <0 0x088e9400 0 0x200>,
+- <0 0x088e9c00 0 0x400>,
+- <0 0x088e9600 0 0x200>,
+- <0 0x088e9800 0 0x200>,
+- <0 0x088e9a00 0 0x100>;
+- #clock-cells = <0>;
+- #phy-cells = <0>;
+- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+- clock-names = "pipe0";
+- clock-output-names = "usb3_phy_pipe_clk_src";
+- };
+-
+- dp_phy: dp-phy@88ea200 {
+- reg = <0 0x088ea200 0 0x200>,
+- <0 0x088ea400 0 0x200>,
+- <0 0x088eaa00 0 0x200>,
+- <0 0x088ea600 0 0x200>,
+- <0 0x088ea800 0 0x200>;
+- #phy-cells = <0>;
+- #clock-cells = <1>;
+- };
++ #clock-cells = <1>;
++ #phy-cells = <1>;
+ };
+
+ usb_2: usb@8cf8800 {
+@@ -3416,8 +3399,8 @@ usb_2: usb@8cf8800 {
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 12 IRQ_TYPE_EDGE_RISING>,
+- <&pdc 13 IRQ_TYPE_EDGE_RISING>;
++ <&pdc 12 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 13 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq";
+@@ -3624,6 +3607,8 @@ eud: eud@88e0000 {
+ <0 0x88e2000 0 0x1000>;
+ interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
+
++ status = "disabled";
++
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -3634,13 +3619,6 @@ eud_ep: endpoint {
+ remote-endpoint = <&usb2_role_switch>;
+ };
+ };
+-
+- port@1 {
+- reg = <1>;
+- eud_con: endpoint {
+- remote-endpoint = <&con_eud>;
+- };
+- };
+ };
+ };
+
+@@ -3676,9 +3654,9 @@ usb_1: usb@a6f8800 {
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 14 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+- <&pdc 17 IRQ_TYPE_EDGE_BOTH>;
++ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq",
+@@ -3702,7 +3680,8 @@ usb_1_dwc3: usb@a600000 {
+ iommus = <&apps_smmu 0xe0 0x0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+- phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
++ snps,parkmode-disable-ss-quirk;
++ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
+ phy-names = "usb2-phy", "usb3-phy";
+ maximum-speed = "super-speed";
+ };
+@@ -3807,8 +3786,8 @@ dispcc: clock-controller@af00000 {
+ <&gcc GCC_DISP_GPLL0_CLK_SRC>,
+ <&mdss_dsi_phy 0>,
+ <&mdss_dsi_phy 1>,
+- <&dp_phy 0>,
+- <&dp_phy 1>,
++ <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
+ <&mdss_edp_phy 0>,
+ <&mdss_edp_phy 1>;
+ clock-names = "bi_tcxo",
+@@ -4144,8 +4123,9 @@ mdss_dp: displayport-controller@ae90000 {
+ "stream_pixel";
+ assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
+- assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>;
+- phys = <&dp_phy>;
++ assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
++ phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>;
+ phy-names = "dp";
+
+ operating-points-v2 = <&dp_opp_table>;
+@@ -4215,6 +4195,7 @@ pdc_reset: reset-controller@b5e0000 {
+ compatible = "qcom,sc7280-pdc-global";
+ reg = <0 0x0b5e0000 0 0x20000>;
+ #reset-cells = <1>;
++ status = "reserved"; /* Owned by firmware */
+ };
+
+ tsens0: thermal-sensor@c263000 {
+@@ -5211,11 +5192,12 @@ msi-controller@17a40000 {
+ };
+ };
+
+- watchdog@17c10000 {
++ watchdog: watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-sc7280", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
++ status = "reserved"; /* Owned by Gunyah hyp */
+ };
+
+ timer@17c20000 {
+@@ -5363,6 +5345,14 @@ cpufreq_hw: cpufreq@18591000 {
+ reg = <0 0x18591000 0 0x1000>,
+ <0 0x18592000 0 0x1000>,
+ <0 0x18593000 0 0x1000>;
++
++ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "dcvsh-irq-0",
++ "dcvsh-irq-1",
++ "dcvsh-irq-2";
++
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+ #freq-domain-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
+index 834e6f9fb7c825..ae008c3b0aed93 100644
+--- a/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
++++ b/arch/arm64/boot/dts/qcom/sc8180x-primus.dts
+@@ -42,7 +42,7 @@ gpio-keys {
+ pinctrl-0 = <&hall_int_active_state>;
+
+ lid-switch {
+- gpios = <&tlmm 121 GPIO_ACTIVE_HIGH>;
++ gpios = <&tlmm 121 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ wakeup-source;
+diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+index 486f7ffef43b2d..92b85de7706d39 100644
+--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+@@ -289,7 +289,7 @@ LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
+ BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x40000004>;
+- entry-latency-us = <241>;
++ entry-latency-us = <2411>;
+ exit-latency-us = <1461>;
+ min-residency-us = <4488>;
+ local-timer-stop;
+@@ -297,7 +297,15 @@ BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
+ };
+
+ domain-idle-states {
+- CLUSTER_SLEEP_0: cluster-sleep-0 {
++ CLUSTER_SLEEP_APSS_OFF: cluster-sleep-0 {
++ compatible = "domain-idle-state";
++ arm,psci-suspend-param = <0x41000044>;
++ entry-latency-us = <3300>;
++ exit-latency-us = <3300>;
++ min-residency-us = <6000>;
++ };
++
++ CLUSTER_SLEEP_AOSS_SLEEP: cluster-sleep-1 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x4100a344>;
+ entry-latency-us = <3263>;
+@@ -581,7 +589,7 @@ CPU_PD7: power-domain-cpu7 {
+
+ CLUSTER_PD: power-domain-cpu-cluster0 {
+ #power-domain-cells = <0>;
+- domain-idle-states = <&CLUSTER_SLEEP_0>;
++ domain-idle-states = <&CLUSTER_SLEEP_APSS_OFF &CLUSTER_SLEEP_AOSS_SLEEP>;
+ };
+ };
+
+@@ -781,6 +789,7 @@ gcc: clock-controller@100000 {
+ clock-names = "bi_tcxo",
+ "bi_tcxo_ao",
+ "sleep_clk";
++ power-domains = <&rpmhpd SC8180X_CX>;
+ };
+
+ qupv3_id_0: geniqup@8c0000 {
+@@ -1749,23 +1758,29 @@ pcie0: pci@1c00000 {
+ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+- phys = <&pcie0_lane>;
++ phys = <&pcie0_phy>;
+ phy-names = "pciephy";
++ dma-coherent;
+
+ status = "disabled";
+ };
+
+- pcie0_phy: phy-wrapper@1c06000 {
++ pcie0_phy: phy@1c06000 {
+ compatible = "qcom,sc8180x-qmp-pcie-phy";
+- reg = <0 0x1c06000 0 0x1c0>;
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
++ reg = <0 0x01c06000 0 0x1000>;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_0_CLKREF_CLK>,
+- <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "ref", "refgen";
++ <&gcc GCC_PCIE0_PHY_REFGEN_CLK>,
++ <&gcc GCC_PCIE_0_PIPE_CLK>;
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen",
++ "pipe";
++ #clock-cells = <0>;
++ clock-output-names = "pcie_0_pipe_clk";
++ #phy-cells = <0>;
+
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "phy";
+@@ -1774,21 +1789,6 @@ pcie0_phy: phy-wrapper@1c06000 {
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+-
+- pcie0_lane: phy@1c06200 {
+- reg = <0 0x1c06200 0 0x170>, /* tx0 */
+- <0 0x1c06400 0 0x200>, /* rx0 */
+- <0 0x1c06a00 0 0x1f0>, /* pcs */
+- <0 0x1c06600 0 0x170>, /* tx1 */
+- <0 0x1c06800 0 0x200>, /* rx1 */
+- <0 0x1c06e00 0 0xf4>; /* pcs_com */
+- clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+- clock-names = "pipe0";
+-
+- #clock-cells = <0>;
+- clock-output-names = "pcie_0_pipe_clk";
+- #phy-cells = <0>;
+- };
+ };
+
+ pcie3: pci@1c08000 {
+@@ -1853,26 +1853,33 @@ pcie3: pci@1c08000 {
+ power-domains = <&gcc PCIE_3_GDSC>;
+
+ interconnects = <&aggre2_noc MASTER_PCIE_3 0 &mc_virt SLAVE_EBI_CH0 0>,
+- <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
++ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_3 0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+- phys = <&pcie3_lane>;
++ phys = <&pcie3_phy>;
+ phy-names = "pciephy";
++ dma-coherent;
+
+ status = "disabled";
+ };
+
+- pcie3_phy: phy-wrapper@1c0c000 {
++ pcie3_phy: phy@1c0c000 {
+ compatible = "qcom,sc8180x-qmp-pcie-phy";
+- reg = <0 0x1c0c000 0 0x1c0>;
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
++ reg = <0 0x01c0c000 0 0x1000>;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_3_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_3_CLKREF_CLK>,
+- <&gcc GCC_PCIE2_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "ref", "refgen";
++ <&gcc GCC_PCIE3_PHY_REFGEN_CLK>,
++ <&gcc GCC_PCIE_3_PIPE_CLK>;
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen",
++ "pipe";
++ #clock-cells = <0>;
++ clock-output-names = "pcie_3_pipe_clk";
++
++ #phy-cells = <0>;
+
+ resets = <&gcc GCC_PCIE_3_PHY_BCR>;
+ reset-names = "phy";
+@@ -1881,21 +1888,6 @@ pcie3_phy: phy-wrapper@1c0c000 {
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+-
+- pcie3_lane: phy@1c0c200 {
+- reg = <0 0x1c0c200 0 0x170>, /* tx0 */
+- <0 0x1c0c400 0 0x200>, /* rx0 */
+- <0 0x1c0ca00 0 0x1f0>, /* pcs */
+- <0 0x1c0c600 0 0x170>, /* tx1 */
+- <0 0x1c0c800 0 0x200>, /* rx1 */
+- <0 0x1c0ce00 0 0xf4>; /* pcs_com */
+- clocks = <&gcc GCC_PCIE_3_PIPE_CLK>;
+- clock-names = "pipe0";
+-
+- #clock-cells = <0>;
+- clock-output-names = "pcie_3_pipe_clk";
+- #phy-cells = <0>;
+- };
+ };
+
+ pcie1: pci@1c10000 {
+@@ -1960,26 +1952,33 @@ pcie1: pci@1c10000 {
+ power-domains = <&gcc PCIE_1_GDSC>;
+
+ interconnects = <&aggre2_noc MASTER_PCIE_1 0 &mc_virt SLAVE_EBI_CH0 0>,
+- <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
++ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_1 0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+- phys = <&pcie1_lane>;
++ phys = <&pcie1_phy>;
+ phy-names = "pciephy";
++ dma-coherent;
+
+ status = "disabled";
+ };
+
+- pcie1_phy: phy-wrapper@1c16000 {
++ pcie1_phy: phy@1c16000 {
+ compatible = "qcom,sc8180x-qmp-pcie-phy";
+- reg = <0 0x1c16000 0 0x1c0>;
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
++ reg = <0 0x01c16000 0 0x1000>;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_1_CLKREF_CLK>,
+- <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "ref", "refgen";
++ <&gcc GCC_PCIE1_PHY_REFGEN_CLK>,
++ <&gcc GCC_PCIE_1_PIPE_CLK>;
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen",
++ "pipe";
++ #clock-cells = <0>;
++ clock-output-names = "pcie_1_pipe_clk";
++
++ #phy-cells = <0>;
+
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "phy";
+@@ -1988,21 +1987,6 @@ pcie1_phy: phy-wrapper@1c16000 {
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+-
+- pcie1_lane: phy@1c0e200 {
+- reg = <0 0x1c16200 0 0x170>, /* tx0 */
+- <0 0x1c16400 0 0x200>, /* rx0 */
+- <0 0x1c16a00 0 0x1f0>, /* pcs */
+- <0 0x1c16600 0 0x170>, /* tx1 */
+- <0 0x1c16800 0 0x200>, /* rx1 */
+- <0 0x1c16e00 0 0xf4>; /* pcs_com */
+- clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+- clock-names = "pipe0";
+- #clock-cells = <0>;
+- clock-output-names = "pcie_1_pipe_clk";
+-
+- #phy-cells = <0>;
+- };
+ };
+
+ pcie2: pci@1c18000 {
+@@ -2067,26 +2051,33 @@ pcie2: pci@1c18000 {
+ power-domains = <&gcc PCIE_2_GDSC>;
+
+ interconnects = <&aggre2_noc MASTER_PCIE_2 0 &mc_virt SLAVE_EBI_CH0 0>,
+- <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>;
++ <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_2 0>;
+ interconnect-names = "pcie-mem", "cpu-pcie";
+
+- phys = <&pcie2_lane>;
++ phys = <&pcie2_phy>;
+ phy-names = "pciephy";
++ dma-coherent;
+
+ status = "disabled";
+ };
+
+- pcie2_phy: phy-wrapper@1c1c000 {
++ pcie2_phy: phy@1c1c000 {
+ compatible = "qcom,sc8180x-qmp-pcie-phy";
+- reg = <0 0x1c1c000 0 0x1c0>;
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
++ reg = <0 0x01c1c000 0 0x1000>;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_2_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_2_CLKREF_CLK>,
+- <&gcc GCC_PCIE2_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "ref", "refgen";
++ <&gcc GCC_PCIE2_PHY_REFGEN_CLK>,
++ <&gcc GCC_PCIE_2_PIPE_CLK>;
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen",
++ "pipe";
++ #clock-cells = <0>;
++ clock-output-names = "pcie_2_pipe_clk";
++
++ #phy-cells = <0>;
+
+ resets = <&gcc GCC_PCIE_2_PHY_BCR>;
+ reset-names = "phy";
+@@ -2095,22 +2086,6 @@ pcie2_phy: phy-wrapper@1c1c000 {
+ assigned-clock-rates = <100000000>;
+
+ status = "disabled";
+-
+- pcie2_lane: phy@1c0e200 {
+- reg = <0 0x1c1c200 0 0x170>, /* tx0 */
+- <0 0x1c1c400 0 0x200>, /* rx0 */
+- <0 0x1c1ca00 0 0x1f0>, /* pcs */
+- <0 0x1c1c600 0 0x170>, /* tx1 */
+- <0 0x1c1c800 0 0x200>, /* rx1 */
+- <0 0x1c1ce00 0 0xf4>; /* pcs_com */
+- clocks = <&gcc GCC_PCIE_2_PIPE_CLK>;
+- clock-names = "pipe0";
+-
+- #clock-cells = <0>;
+- clock-output-names = "pcie_2_pipe_clk";
+-
+- #phy-cells = <0>;
+- };
+ };
+
+ ufs_mem_hc: ufshc@1d84000 {
+@@ -2118,7 +2093,7 @@ ufs_mem_hc: ufshc@1d84000 {
+ "jedec,ufs-2.0";
+ reg = <0 0x01d84000 0 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+- phys = <&ufs_mem_phy_lanes>;
++ phys = <&ufs_mem_phy>;
+ phy-names = "ufsphy";
+ lanes-per-direction = <2>;
+ #reset-cells = <1>;
+@@ -2157,10 +2132,8 @@ ufs_mem_hc: ufshc@1d84000 {
+
+ ufs_mem_phy: phy-wrapper@1d87000 {
+ compatible = "qcom,sc8180x-qmp-ufs-phy";
+- reg = <0 0x01d87000 0 0x1c0>;
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
++ reg = <0 0x01d87000 0 0x1000>;
++
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+ clock-names = "ref",
+@@ -2168,16 +2141,12 @@ ufs_mem_phy: phy-wrapper@1d87000 {
+
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+- status = "disabled";
+
+- ufs_mem_phy_lanes: phy@1d87400 {
+- reg = <0 0x01d87400 0 0x108>,
+- <0 0x01d87600 0 0x1e0>,
+- <0 0x01d87c00 0 0x1dc>,
+- <0 0x01d87800 0 0x108>,
+- <0 0x01d87a00 0 0x1e0>;
+- #phy-cells = <0>;
+- };
++ power-domains = <&gcc UFS_PHY_GDSC>;
++
++ #phy-cells = <0>;
++
++ status = "disabled";
+ };
+
+ ipa_virt: interconnect@1e00000 {
+@@ -2576,11 +2545,14 @@ usb_sec_dpphy: dp-phy@88ef200 {
+
+ system-cache-controller@9200000 {
+ compatible = "qcom,sc8180x-llcc";
+- reg = <0 0x09200000 0 0x50000>, <0 0x09280000 0 0x50000>,
+- <0 0x09300000 0 0x50000>, <0 0x09380000 0 0x50000>,
+- <0 0x09600000 0 0x50000>;
++ reg = <0 0x09200000 0 0x58000>, <0 0x09280000 0 0x58000>,
++ <0 0x09300000 0 0x58000>, <0 0x09380000 0 0x58000>,
++ <0 0x09400000 0 0x58000>, <0 0x09480000 0 0x58000>,
++ <0 0x09500000 0 0x58000>, <0 0x09580000 0 0x58000>,
++ <0 0x09600000 0 0x58000>;
+ reg-names = "llcc0_base", "llcc1_base", "llcc2_base",
+- "llcc3_base", "llcc_broadcast_base";
++ "llcc3_base", "llcc4_base", "llcc5_base",
++ "llcc6_base", "llcc7_base", "llcc_broadcast_base";
+ interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+@@ -2594,10 +2566,10 @@ gem_noc: interconnect@9680000 {
+ usb_prim: usb@a6f8800 {
+ compatible = "qcom,sc8180x-dwc3", "qcom,dwc3";
+ reg = <0 0x0a6f8800 0 0x400>;
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq",
+ "ss_phy_irq",
+ "dm_hs_phy_irq",
+@@ -2668,10 +2640,10 @@ usb_sec: usb@a8f8800 {
+ "xo";
+ resets = <&gcc GCC_USB30_SEC_BCR>;
+ power-domains = <&gcc USB30_SEC_GDSC>;
+- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 40 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 11 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -2756,10 +2728,8 @@ mdss_mdp: mdp@ae01000 {
+ "core",
+ "vsync";
+
+- assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>,
+- <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+- assigned-clock-rates = <460000000>,
+- <19200000>;
++ assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
++ assigned-clock-rates = <19200000>;
+
+ operating-points-v2 = <&mdp_opp_table>;
+ power-domains = <&rpmhpd SC8180X_MMCX>;
+@@ -3219,7 +3189,7 @@ edp_phy: phy@aec2a00 {
+ <&dispcc DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "aux", "cfg_ahb";
+
+- power-domains = <&dispcc MDSS_GDSC>;
++ power-domains = <&rpmhpd SC8180X_MX>;
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+@@ -3245,6 +3215,7 @@ dispcc: clock-controller@af00000 {
+ "edp_phy_pll_link_clk",
+ "edp_phy_pll_vco_div_clk";
+ power-domains = <&rpmhpd SC8180X_MMCX>;
++ required-opps = <&rpmhpd_opp_low_svs>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+@@ -3283,7 +3254,7 @@ tsens1: thermal-sensor@c265000 {
+
+ aoss_qmp: power-controller@c300000 {
+ compatible = "qcom,sc8180x-aoss-qmp", "qcom,aoss-qmp";
+- reg = <0x0 0x0c300000 0x0 0x100000>;
++ reg = <0x0 0x0c300000 0x0 0x400>;
+ interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&apss_shared 0>;
+
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+index e4861c61a65bdc..ffc4406422ae2f 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+@@ -458,6 +458,8 @@ mdss0_dp3_out: endpoint {
+ };
+
+ &mdss0_dp3_phy {
++ compatible = "qcom,sc8280xp-edp-phy";
++
+ vdda-phy-supply = <&vreg_l6b>;
+ vdda-pll-supply = <&vreg_l3b>;
+
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+index 38edaf51aa3457..5c2894fcfa4a08 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+@@ -82,6 +82,9 @@ switch-lid {
+ leds {
+ compatible = "gpio-leds";
+
++ pinctrl-names = "default";
++ pinctrl-0 = <&cam_indicator_en>;
++
+ led-camera-indicator {
+ label = "white:camera-indicator";
+ function = LED_FUNCTION_INDICATOR;
+@@ -601,6 +604,7 @@ mdss0_dp3_out: endpoint {
+ };
+
+ &mdss0_dp3_phy {
++ compatible = "qcom,sc8280xp-edp-phy";
+ vdda-phy-supply = <&vreg_l6b>;
+ vdda-pll-supply = <&vreg_l3b>;
+
+@@ -615,15 +619,16 @@ &i2c4 {
+
+ status = "okay";
+
+- /* FIXME: verify */
+ touchscreen@10 {
+- compatible = "hid-over-i2c";
++ compatible = "elan,ekth5015m", "elan,ekth6915";
+ reg = <0x10>;
+
+- hid-descr-addr = <0x1>;
+ interrupts-extended = <&tlmm 175 IRQ_TYPE_LEVEL_LOW>;
+- vdd-supply = <&vreg_misc_3p3>;
+- vddl-supply = <&vreg_s10b>;
++ reset-gpios = <&tlmm 99 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
++ no-reset-on-power-off;
++
++ vcc33-supply = <&vreg_misc_3p3>;
++ vccio-supply = <&vreg_misc_3p3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts0_default>;
+@@ -717,6 +722,8 @@ &pcie3a_phy {
+ };
+
+ &pcie4 {
++ max-link-speed = <2>;
++
+ perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
+
+@@ -1277,6 +1284,13 @@ hstp-sw-ctrl-pins {
+ };
+ };
+
++ cam_indicator_en: cam-indicator-en-state {
++ pins = "gpio28";
++ function = "gpio";
++ drive-strength = <2>;
++ bias-disable;
++ };
++
+ edp_reg_en: edp-reg-en-state {
+ pins = "gpio25";
+ function = "gpio";
+@@ -1438,8 +1452,8 @@ int-n-pins {
+ reset-n-pins {
+ pins = "gpio99";
+ function = "gpio";
+- output-high;
+- drive-strength = <16>;
++ drive-strength = <2>;
++ bias-disable;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index cad59af7ccef1b..6425c74edd60cc 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -1773,6 +1773,7 @@ pcie4: pcie@1c00000 {
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_4_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ phys = <&pcie4_phy>;
+ phy-names = "pciephy";
+@@ -1797,6 +1798,7 @@ pcie4_phy: phy@1c06000 {
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc PCIE_4_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_PCIE_4_PHY_BCR>;
+ reset-names = "phy";
+@@ -1871,6 +1873,7 @@ pcie3b: pcie@1c08000 {
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_3B_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ phys = <&pcie3b_phy>;
+ phy-names = "pciephy";
+@@ -1895,6 +1898,7 @@ pcie3b_phy: phy@1c0e000 {
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc PCIE_3B_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_PCIE_3B_PHY_BCR>;
+ reset-names = "phy";
+@@ -1969,6 +1973,7 @@ pcie3a: pcie@1c10000 {
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_3A_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ phys = <&pcie3a_phy>;
+ phy-names = "pciephy";
+@@ -1994,6 +1999,7 @@ pcie3a_phy: phy@1c14000 {
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc PCIE_3A_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_PCIE_3A_PHY_BCR>;
+ reset-names = "phy";
+@@ -2070,6 +2076,7 @@ pcie2b: pcie@1c18000 {
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_2B_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ phys = <&pcie2b_phy>;
+ phy-names = "pciephy";
+@@ -2094,6 +2101,7 @@ pcie2b_phy: phy@1c1e000 {
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc PCIE_2B_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_PCIE_2B_PHY_BCR>;
+ reset-names = "phy";
+@@ -2168,6 +2176,7 @@ pcie2a: pcie@1c20000 {
+ reset-names = "pci";
+
+ power-domains = <&gcc PCIE_2A_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ phys = <&pcie2a_phy>;
+ phy-names = "pciephy";
+@@ -2193,6 +2202,7 @@ pcie2a_phy: phy@1c24000 {
+ assigned-clock-rates = <100000000>;
+
+ power-domains = <&gcc PCIE_2A_GDSC>;
++ required-opps = <&rpmhpd_opp_nom>;
+
+ resets = <&gcc GCC_PCIE_2A_PHY_BCR>;
+ reset-names = "phy";
+@@ -4225,7 +4235,7 @@ watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-sc8280xp", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+index ec6003212c4d5f..0f3f57fb860ec8 100644
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+@@ -1258,6 +1258,7 @@ usb3_dwc3: usb@a800000 {
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
++ snps,parkmode-disable-ss-quirk;
+
+ /*
+ * SDM630 technically supports USB3 but I
+diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+index 84cd2e39266fed..730c8351bcaa33 100644
+--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+@@ -1295,10 +1295,10 @@ usb_1: usb@a6f8800 {
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <150000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -1328,7 +1328,8 @@ pdc: interrupt-controller@b220000 {
+ compatible = "qcom,sdm670-pdc", "qcom,pdc";
+ reg = <0 0x0b220000 0 0x30000>;
+ qcom,pdc-ranges = <0 480 40>, <41 521 7>, <49 529 4>,
+- <54 534 24>, <79 559 30>, <115 630 7>;
++ <54 534 24>, <79 559 15>, <94 609 15>,
++ <115 630 7>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index f86e7acdfd99f4..0ab5e8f53ac9f8 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -143,16 +143,20 @@ panel_in_edp: endpoint {
+ };
+ };
+
++&cpufreq_hw {
++ /delete-property/ interrupts-extended; /* reference to lmh_cluster[01] */
++};
++
+ &psci {
+- /delete-node/ cpu0;
+- /delete-node/ cpu1;
+- /delete-node/ cpu2;
+- /delete-node/ cpu3;
+- /delete-node/ cpu4;
+- /delete-node/ cpu5;
+- /delete-node/ cpu6;
+- /delete-node/ cpu7;
+- /delete-node/ cpu-cluster0;
++ /delete-node/ power-domain-cpu0;
++ /delete-node/ power-domain-cpu1;
++ /delete-node/ power-domain-cpu2;
++ /delete-node/ power-domain-cpu3;
++ /delete-node/ power-domain-cpu4;
++ /delete-node/ power-domain-cpu5;
++ /delete-node/ power-domain-cpu6;
++ /delete-node/ power-domain-cpu7;
++ /delete-node/ power-domain-cluster;
+ };
+
+ &cpus {
+@@ -275,6 +279,14 @@ &BIG_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ };
+
++&lmh_cluster0 {
++ status = "disabled";
++};
++
++&lmh_cluster1 {
++ status = "disabled";
++};
++
+ /*
+ * Reserved memory changes
+ *
+@@ -338,6 +350,8 @@ flash@0 {
+
+
+ &apps_rsc {
++ /delete-property/ power-domains;
++
+ regulators-0 {
+ compatible = "qcom,pm8998-rpmh-regulators";
+ qcom,pmic-id = "a";
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index c7eba6c491be2b..0a891a0122446c 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -67,8 +67,8 @@ led-0 {
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pm8998_gpios 13 GPIO_ACTIVE_HIGH>;
+- linux,default-trigger = "panic-indicator";
+ default-state = "off";
++ panic-indicator;
+ };
+
+ led-1 {
+@@ -580,7 +580,7 @@ &mss_pil {
+ &pcie0 {
+ status = "okay";
+ perst-gpios = <&tlmm 35 GPIO_ACTIVE_LOW>;
+- enable-gpio = <&tlmm 134 GPIO_ACTIVE_HIGH>;
++ wake-gpios = <&tlmm 134 GPIO_ACTIVE_HIGH>;
+
+ vddpe-3v3-supply = <&pcie0_3p3v_dual>;
+
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+index b3c27a5247429f..1516113391edc3 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+@@ -716,6 +716,8 @@ &wifi {
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
++
++ qcom,snoc-host-cap-8bit-quirk;
+ };
+
+ /* PINCTRL - additions to nodes defined in sdm845.dtsi */
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+index 122c7128dea9da..9322b92a1e6825 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+@@ -485,13 +485,13 @@ &pmi8998_charger {
+ };
+
+ &q6afedai {
+- qi2s@22 {
+- reg = <22>;
++ dai@22 {
++ reg = <QUATERNARY_MI2S_RX>;
+ qcom,sd-lines = <1>;
+ };
+
+- qi2s@23 {
+- reg = <23>;
++ dai@23 {
++ reg = <QUATERNARY_MI2S_TX>;
+ qcom,sd-lines = <0>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 055ca80c007578..dcdc8a0cd1819f 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -18,6 +18,7 @@
+ #include <dt-bindings/interconnect/qcom,osm-l3.h>
+ #include <dt-bindings/interconnect/qcom,sdm845.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
++#include <dt-bindings/phy/phy-qcom-qmp.h>
+ #include <dt-bindings/phy/phy-qcom-qusb2.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+ #include <dt-bindings/reset/qcom,sdm845-aoss.h>
+@@ -2634,6 +2635,8 @@ ufs_mem_phy: phy@1d87000 {
+ clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
++ power-domains = <&gcc UFS_PHY_GDSC>;
++
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+ status = "disabled";
+@@ -3363,8 +3366,8 @@ slpi_pas: remoteproc@5c00000 {
+
+ qcom,qmp = <&aoss_qmp>;
+
+- power-domains = <&rpmhpd SDM845_CX>,
+- <&rpmhpd SDM845_MX>;
++ power-domains = <&rpmhpd SDM845_LCX>,
++ <&rpmhpd SDM845_LMX>;
+ power-domain-names = "lcx", "lmx";
+
+ memory-region = <&slpi_mem>;
+@@ -3555,11 +3558,8 @@ etf_out: endpoint {
+ };
+
+ in-ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+- port@1 {
+- reg = <1>;
++ port {
+ etf_in: endpoint {
+ remote-endpoint =
+ <&merge_funnel_out>;
+@@ -3984,80 +3984,54 @@ usb_2_hsphy: phy@88e3000 {
+ nvmem-cells = <&qusb2s_hstx_trim>;
+ };
+
+- usb_1_qmpphy: phy@88e9000 {
++ usb_1_qmpphy: phy@88e8000 {
+ compatible = "qcom,sdm845-qmp-usb3-dp-phy";
+- reg = <0 0x088e9000 0 0x18c>,
+- <0 0x088e8000 0 0x38>,
+- <0 0x088ea000 0 0x40>;
++ reg = <0 0x088e8000 0 0x3000>;
+ status = "disabled";
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+- <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
+- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+- clock-names = "aux", "cfg_ahb", "ref", "com_aux";
++ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
++ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
++ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
++ clock-names = "aux",
++ "ref",
++ "com_aux",
++ "usb3_pipe",
++ "cfg_ahb";
+
+ resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+ reset-names = "phy", "common";
+
+- usb_1_ssphy: usb3-phy@88e9200 {
+- reg = <0 0x088e9200 0 0x128>,
+- <0 0x088e9400 0 0x200>,
+- <0 0x088e9c00 0 0x218>,
+- <0 0x088e9600 0 0x128>,
+- <0 0x088e9800 0 0x200>,
+- <0 0x088e9a00 0 0x100>;
+- #clock-cells = <0>;
+- #phy-cells = <0>;
+- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+- clock-names = "pipe0";
+- clock-output-names = "usb3_phy_pipe_clk_src";
+- };
+-
+- dp_phy: dp-phy@88ea200 {
+- reg = <0 0x088ea200 0 0x200>,
+- <0 0x088ea400 0 0x200>,
+- <0 0x088eaa00 0 0x200>,
+- <0 0x088ea600 0 0x200>,
+- <0 0x088ea800 0 0x200>;
+- #clock-cells = <1>;
+- #phy-cells = <0>;
+- };
++ #clock-cells = <1>;
++ #phy-cells = <1>;
+ };
+
+ usb_2_qmpphy: phy@88eb000 {
+ compatible = "qcom,sdm845-qmp-usb3-uni-phy";
+- reg = <0 0x088eb000 0 0x18c>;
+- status = "disabled";
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
++ reg = <0 0x088eb000 0 0x1000>;
+
+ clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_SEC_CLKREF_CLK>,
+- <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
+- clock-names = "aux", "cfg_ahb", "ref", "com_aux";
++ <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>,
++ <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "com_aux",
++ "pipe";
++ clock-output-names = "usb3_uni_phy_pipe_clk_src";
++ #clock-cells = <0>;
++ #phy-cells = <0>;
+
+- resets = <&gcc GCC_USB3PHY_PHY_SEC_BCR>,
+- <&gcc GCC_USB3_PHY_SEC_BCR>;
+- reset-names = "phy", "common";
++ resets = <&gcc GCC_USB3_PHY_SEC_BCR>,
++ <&gcc GCC_USB3PHY_PHY_SEC_BCR>;
++ reset-names = "phy",
++ "phy_phy";
+
+- usb_2_ssphy: phy@88eb200 {
+- reg = <0 0x088eb200 0 0x128>,
+- <0 0x088eb400 0 0x1fc>,
+- <0 0x088eb800 0 0x218>,
+- <0 0x088eb600 0 0x70>;
+- #clock-cells = <0>;
+- #phy-cells = <0>;
+- clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
+- clock-names = "pipe0";
+- clock-output-names = "usb3_uni_phy_pipe_clk_src";
+- };
++ status = "disabled";
+ };
+
+ usb_1: usb@a6f8800 {
+@@ -4084,10 +4058,10 @@ usb_1: usb@a6f8800 {
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <150000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc_intc 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc_intc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc_intc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -4106,7 +4080,8 @@ usb_1_dwc3: usb@a600000 {
+ iommus = <&apps_smmu 0x740 0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+- phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
++ snps,parkmode-disable-ss-quirk;
++ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+@@ -4135,10 +4110,10 @@ usb_2: usb@a8f8800 {
+ <&gcc GCC_USB30_SEC_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <150000000>;
+
+- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc_intc 7 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc_intc 10 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc_intc 11 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -4157,7 +4132,8 @@ usb_2_dwc3: usb@a800000 {
+ iommus = <&apps_smmu 0x760 0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+- phys = <&usb_2_hsphy>, <&usb_2_ssphy>;
++ snps,parkmode-disable-ss-quirk;
++ phys = <&usb_2_hsphy>, <&usb_2_qmpphy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+@@ -4574,8 +4550,9 @@ mdss_dp: displayport-controller@ae90000 {
+ "ctrl_link_iface", "stream_pixel";
+ assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
+- assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>;
+- phys = <&dp_phy>;
++ assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
++ phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>;
+ phy-names = "dp";
+
+ operating-points-v2 = <&dp_opp_table>;
+@@ -4913,8 +4890,8 @@ dispcc: clock-controller@af00000 {
+ <&mdss_dsi0_phy 1>,
+ <&mdss_dsi1_phy 0>,
+ <&mdss_dsi1_phy 1>,
+- <&dp_phy 0>,
+- <&dp_phy 1>;
++ <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
++ <&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
+ clock-names = "bi_tcxo",
+ "gcc_disp_gpll0_clk_src",
+ "gcc_disp_gpll0_div_clk_src",
+@@ -5118,7 +5095,7 @@ watchdog@17980000 {
+ compatible = "qcom,apss-wdt-sdm845", "qcom,kpss-wdt";
+ reg = <0 0x17980000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ apss_shared: mailbox@17990000 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+index 92a812b5f4238e..fe5c12da666e40 100644
+--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+@@ -488,6 +488,7 @@ ecsh: hid@5c {
+ &ipa {
+ qcom,gsi-loader = "self";
+ memory-region = <&ipa_fw_mem>;
++ firmware-name = "qcom/sdm850/LENOVO/81JL/ipa_fws.elf";
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sdx75-idp.dts b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
+index 10d15871f2c48e..a14e0650c4a8aa 100644
+--- a/arch/arm64/boot/dts/qcom/sdx75-idp.dts
++++ b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
+@@ -44,7 +44,7 @@ vreg_bob_3p3: pmx75-bob {
+ };
+
+ &apps_rsc {
+- pmx75-rpmh-regulators {
++ regulators-0 {
+ compatible = "qcom,pmx75-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+index 839c6035124034..821db9b8518557 100644
+--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+@@ -591,6 +591,11 @@ tcsr_mutex: hwlock@340000 {
+ #hwlock-cells = <1>;
+ };
+
++ tcsr_regs: syscon@3c0000 {
++ compatible = "qcom,sm6115-tcsr", "syscon";
++ reg = <0x0 0x003c0000 0x0 0x40000>;
++ };
++
+ tlmm: pinctrl@500000 {
+ compatible = "qcom,sm6115-tlmm";
+ reg = <0x0 0x00500000 0x0 0x400000>,
+@@ -856,6 +861,8 @@ usb_qmpphy: phy@1615000 {
+
+ #phy-cells = <0>;
+
++ qcom,tcsr-reg = <&tcsr_regs 0xb244>;
++
+ status = "disabled";
+ };
+
+@@ -1036,6 +1043,8 @@ ufs_mem_phy: phy@4807000 {
+ clocks = <&gcc GCC_UFS_CLKREF_CLK>, <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+ clock-names = "ref", "ref_aux";
+
++ power-domains = <&gcc GCC_UFS_PHY_GDSC>;
++
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index d7c1a40617c647..07081088ba1463 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -1165,6 +1165,10 @@ usb3: usb@4ef8800 {
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <66666667>;
+
++ interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "hs_phy_irq", "ss_phy_irq";
++
+ power-domains = <&gcc USB30_PRIM_GDSC>;
+ qcom,select-utmi-as-pipe-clk;
+ status = "disabled";
+@@ -1208,7 +1212,7 @@ spmi_bus: spmi@1c40000 {
+
+ apps_smmu: iommu@c600000 {
+ compatible = "qcom,sm6125-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+- reg = <0xc600000 0x80000>;
++ reg = <0x0c600000 0x80000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 8fd6f4d0349001..2efceb49a3218e 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -1197,6 +1197,8 @@ ufs_mem_phy: phy@1d87000 {
+ clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
++ power-domains = <&gcc UFS_PHY_GDSC>;
++
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+
+@@ -1297,6 +1299,7 @@ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "adsp";
++ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+@@ -1557,6 +1560,7 @@ fastrpc {
+ compatible = "qcom,fastrpc";
+ qcom,glink-channels = "fastrpcglink-apps-dsp";
+ label = "cdsp";
++ qcom,non-secure-domain;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+@@ -1864,6 +1868,7 @@ usb_1_dwc3: usb@a600000 {
+ snps,dis_enblslpm_quirk;
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
++ snps,parkmode-disable-ss-quirk;
+ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+@@ -2524,7 +2529,7 @@ watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-sm6350", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+index e7ff55443da702..e56f7ea4ebc6ae 100644
+--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+@@ -311,6 +311,25 @@ scm {
+ };
+ };
+
++ mpm: interrupt-controller {
++ compatible = "qcom,mpm";
++ qcom,rpm-msg-ram = <&apss_mpm>;
++ interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
++ mboxes = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_SMP2P>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ #power-domain-cells = <0>;
++ interrupt-parent = <&intc>;
++ qcom,mpm-pin-count = <96>;
++ qcom,mpm-pin-map = <5 296>, /* Soundwire wake_irq */
++ <12 422>, /* DWC3 ss_phy_irq */
++ <86 183>, /* MPM wake, SPMI */
++ <89 314>, /* TSENS0 0C */
++ <90 315>, /* TSENS1 0C */
++ <93 164>, /* DWC3 dm_hs_phy_irq */
++ <94 165>; /* DWC3 dp_hs_phy_irq */
++ };
++
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+@@ -486,6 +505,7 @@ CPU_PD7: power-domain-cpu7 {
+
+ CLUSTER_PD: power-domain-cpu-cluster0 {
+ #power-domain-cells = <0>;
++ power-domains = <&mpm>;
+ domain-idle-states = <&CLUSTER_SLEEP_0>;
+ };
+ };
+@@ -808,7 +828,7 @@ tlmm: pinctrl@500000 {
+ reg = <0 0x00500000 0 0x800000>;
+ interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&tlmm 0 0 157>;
+- /* TODO: Hook up MPM as wakeup-parent when it's there */
++ wakeup-parent = <&mpm>;
+ interrupt-controller;
+ gpio-controller;
+ #interrupt-cells = <2>;
+@@ -930,7 +950,7 @@ spmi_bus: spmi@1c40000 {
+ <0 0x01c0a000 0 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&mpm 86 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+@@ -962,8 +982,15 @@ tsens1: thermal-sensor@4413000 {
+ };
+
+ rpm_msg_ram: sram@45f0000 {
+- compatible = "qcom,rpm-msg-ram";
++ compatible = "qcom,rpm-msg-ram", "mmio-sram";
+ reg = <0 0x045f0000 0 0x7000>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0 0x0 0x045f0000 0x7000>;
++
++ apss_mpm: sram@1b8 {
++ reg = <0x1b8 0x48>;
++ };
+ };
+
+ sram@4690000 {
+@@ -1360,10 +1387,10 @@ usb_1: usb@4ef8800 {
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <133333333>;
+
+- interrupts = <GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 302 IRQ_TYPE_LEVEL_HIGH>,
++ <&mpm 12 IRQ_TYPE_LEVEL_HIGH>,
++ <&mpm 93 IRQ_TYPE_EDGE_BOTH>,
++ <&mpm 94 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq",
+ "ss_phy_irq",
+ "dm_hs_phy_irq",
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+index bb161b536da466..f4c6e1309a7e99 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
++++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts
+@@ -127,8 +127,6 @@ vdda_qrefs_0p875_5:
+ vdda_sp_sensor:
+ vdda_ufs_2ln_core_1:
+ vdda_ufs_2ln_core_2:
+- vdda_usb_ss_dp_core_1:
+- vdda_usb_ss_dp_core_2:
+ vdda_qlink_lv:
+ vdda_qlink_lv_ck:
+ vreg_l5a_0p875: ldo5 {
+@@ -210,6 +208,12 @@ vreg_l17a_3p0: ldo17 {
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
++
++ vreg_l18a_0p8: ldo18 {
++ regulator-min-microvolt = <880000>;
++ regulator-max-microvolt = <880000>;
++ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++ };
+ };
+
+ regulators-1 {
+@@ -445,13 +449,13 @@ &usb_2_hsphy {
+ &usb_1_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+- vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
++ vdda-pll-supply = <&vreg_l18a_0p8>;
+ };
+
+ &usb_2_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l3c_1p2>;
+- vdda-pll-supply = <&vdda_usb_ss_dp_core_1>;
++ vdda-pll-supply = <&vreg_l5a_0p875>;
+ };
+
+ &usb_1 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 06c53000bb74d4..73ef228ff26891 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1876,8 +1876,8 @@ pcie0: pci@1c00000 {
+ phys = <&pcie0_lane>;
+ phy-names = "pciephy";
+
+- perst-gpio = <&tlmm 35 GPIO_ACTIVE_HIGH>;
+- enable-gpio = <&tlmm 37 GPIO_ACTIVE_HIGH>;
++ perst-gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>;
++ wake-gpios = <&tlmm 37 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_default_state>;
+@@ -1893,8 +1893,12 @@ pcie0_phy: phy@1c06000 {
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
++ <&gcc GCC_PCIE_0_CLKREF_CLK>,
+ <&gcc GCC_PCIE0_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "refgen";
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen";
+
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "phy";
+@@ -1974,7 +1978,7 @@ pcie1: pci@1c08000 {
+ phys = <&pcie1_lane>;
+ phy-names = "pciephy";
+
+- perst-gpio = <&tlmm 102 GPIO_ACTIVE_HIGH>;
++ perst-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&tlmm 104 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+@@ -1991,8 +1995,12 @@ pcie1_phy: phy@1c0e000 {
+ ranges;
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
++ <&gcc GCC_PCIE_1_CLKREF_CLK>,
+ <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
+- clock-names = "aux", "cfg_ahb", "refgen";
++ clock-names = "aux",
++ "cfg_ahb",
++ "ref",
++ "refgen";
+
+ resets = <&gcc GCC_PCIE_1_PHY_BCR>;
+ reset-names = "phy";
+@@ -2965,11 +2973,8 @@ replicator1_out: endpoint {
+ };
+
+ in-ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+- port@1 {
+- reg = <1>;
++ port {
+ replicator1_in: endpoint {
+ remote-endpoint = <&replicator_out1>;
+ };
+@@ -3584,10 +3589,10 @@ usb_1: usb@a6f8800 {
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -3637,10 +3642,10 @@ usb_2: usb@a8f8800 {
+ <&gcc GCC_USB30_SEC_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 7 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 10 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 11 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -3951,6 +3956,7 @@ dispcc: clock-controller@af00000 {
+ "dp_phy_pll_link_clk",
+ "dp_phy_pll_vco_div_clk";
+ power-domains = <&rpmhpd SM8150_MMCX>;
++ required-opps = <&rpmhpd_opp_low_svs>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+@@ -4189,7 +4195,7 @@ watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-sm8150", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index a4e58ad731c34f..b522d19f3a1327 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -2169,7 +2169,7 @@ ufs_mem_hc: ufshc@1d84000 {
+ "jedec,ufs-2.0";
+ reg = <0 0x01d84000 0 0x3000>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+- phys = <&ufs_mem_phy_lanes>;
++ phys = <&ufs_mem_phy>;
+ phy-names = "ufsphy";
+ lanes-per-direction = <2>;
+ #reset-cells = <1>;
+@@ -2217,10 +2217,8 @@ ufs_mem_hc: ufshc@1d84000 {
+
+ ufs_mem_phy: phy@1d87000 {
+ compatible = "qcom,sm8250-qmp-ufs-phy";
+- reg = <0 0x01d87000 0 0x1c0>;
+- #address-cells = <2>;
+- #size-cells = <2>;
+- ranges;
++ reg = <0 0x01d87000 0 0x1000>;
++
+ clock-names = "ref",
+ "ref_aux";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+@@ -2228,16 +2226,12 @@ ufs_mem_phy: phy@1d87000 {
+
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+- status = "disabled";
+
+- ufs_mem_phy_lanes: phy@1d87400 {
+- reg = <0 0x01d87400 0 0x16c>,
+- <0 0x01d87600 0 0x200>,
+- <0 0x01d87c00 0 0x200>,
+- <0 0x01d87800 0 0x16c>,
+- <0 0x01d87a00 0 0x200>;
+- #phy-cells = <0>;
+- };
++ power-domains = <&gcc UFS_PHY_GDSC>;
++
++ #phy-cells = <0>;
++
++ status = "disabled";
+ };
+
+ cryptobam: dma-controller@1dc4000 {
+@@ -2830,11 +2824,8 @@ tpda@6004000 {
+ clock-names = "apb_pclk";
+
+ out-ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+- port@0 {
+- reg = <0>;
++ port {
+ tpda_out_funnel_qatb: endpoint {
+ remote-endpoint = <&funnel_qatb_in_tpda>;
+ };
+@@ -2877,11 +2868,7 @@ funnel_qatb_out_funnel_in0: endpoint {
+ };
+
+ in-ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- port@0 {
+- reg = <0>;
++ port {
+ funnel_qatb_in_tpda: endpoint {
+ remote-endpoint = <&tpda_out_funnel_qatb>;
+ };
+@@ -3090,11 +3077,8 @@ etf_out: endpoint {
+ };
+
+ in-ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+- port@0 {
+- reg = <0>;
++ port {
+ etf_in_funnel_swao_out: endpoint {
+ remote-endpoint = <&funnel_swao_out_etf>;
+ };
+@@ -3178,8 +3162,6 @@ funnel@6c2d000 {
+ clock-names = "apb_pclk";
+
+ out-ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+ port {
+ tpdm_mm_out_tpda9: endpoint {
+ remote-endpoint = <&tpda_9_in_tpdm_mm>;
+@@ -3445,11 +3427,7 @@ funnel_apss_merg_out_funnel_in1: endpoint {
+ };
+
+ in-ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- port@0 {
+- reg = <0>;
++ port {
+ funnel_apss_merg_in_funnel_apss: endpoint {
+ remote-endpoint = <&funnel_apss_out_funnel_apss_merg>;
+ };
+@@ -5664,7 +5642,7 @@ watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-sm8250", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ timer@17c20000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 00604bf7724f42..d4f1b36c7aebe4 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -918,9 +918,9 @@ spi19: spi@894000 {
+ };
+ };
+
+- gpi_dma0: dma-controller@9800000 {
++ gpi_dma0: dma-controller@900000 {
+ compatible = "qcom,sm8350-gpi-dma", "qcom,sm6350-gpi-dma";
+- reg = <0 0x09800000 0 0x60000>;
++ reg = <0 0x00900000 0 0x60000>;
+ interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1731,6 +1731,8 @@ ufs_mem_phy: phy@1d87000 {
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
++ power-domains = <&gcc UFS_PHY_GDSC>;
++
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+ status = "disabled";
+@@ -2020,7 +2022,7 @@ mpss: remoteproc@4080000 {
+ compatible = "qcom,sm8350-mpss-pas";
+ reg = <0x0 0x04080000 0x0 0x4040>;
+
+- interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
++ interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 2 IRQ_TYPE_EDGE_RISING>,
+@@ -2062,7 +2064,7 @@ slpi: remoteproc@5c00000 {
+ compatible = "qcom,sm8350-slpi-pas";
+ reg = <0 0x05c00000 0 0x4000>;
+
+- interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>,
++ interrupts-extended = <&pdc 9 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 2 IRQ_TYPE_EDGE_RISING>,
+@@ -2964,7 +2966,7 @@ qup_uart6_default: qup-uart6-default-state {
+ };
+
+ qup_uart18_default: qup-uart18-default-state {
+- pins = "gpio58", "gpio59";
++ pins = "gpio68", "gpio69";
+ function = "qup18";
+ drive-strength = <2>;
+ bias-disable;
+@@ -3206,7 +3208,7 @@ adsp: remoteproc@17300000 {
+ compatible = "qcom,sm8350-adsp-pas";
+ reg = <0 0x17300000 0 0x100>;
+
+- interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
++ interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+@@ -3511,7 +3513,7 @@ cdsp: remoteproc@98900000 {
+ compatible = "qcom,sm8350-cdsp-pas";
+ reg = <0 0x98900000 0 0x1400000>;
+
+- interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
++ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 2a60cf8bd891c2..a34f460240a076 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -1025,6 +1025,12 @@ uart20: serial@894000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_uart20_default>;
+ interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
++ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
++ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
++ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
++ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
++ interconnect-names = "qup-core",
++ "qup-config";
+ status = "disabled";
+ };
+
+@@ -1417,6 +1423,12 @@ uart7: serial@99c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_uart7_tx>, <&qup_uart7_rx>;
+ interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
++ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
++ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
++ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
++ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
++ interconnect-names = "qup-core",
++ "qup-config";
+ status = "disabled";
+ };
+ };
+@@ -1762,12 +1774,8 @@ pcie0: pci@1c00000 {
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
+
+- /*
+- * MSIs for BDF (1:0.0) only works with Device ID 0x5980.
+- * Hence, the IDs are swapped.
+- */
+- msi-map = <0x0 &gic_its 0x5981 0x1>,
+- <0x100 &gic_its 0x5980 0x1>;
++ msi-map = <0x0 &gic_its 0x5980 0x1>,
++ <0x100 &gic_its 0x5981 0x1>;
+ msi-map-mask = <0xff00>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+@@ -1876,12 +1884,8 @@ pcie1: pci@1c08000 {
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+
+- /*
+- * MSIs for BDF (1:0.0) only works with Device ID 0x5a00.
+- * Hence, the IDs are swapped.
+- */
+- msi-map = <0x0 &gic_its 0x5a01 0x1>,
+- <0x100 &gic_its 0x5a00 0x1>;
++ msi-map = <0x0 &gic_its 0x5a00 0x1>,
++ <0x100 &gic_its 0x5a01 0x1>;
+ msi-map-mask = <0xff00>;
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+@@ -2176,7 +2180,7 @@ wsa2macro: codec@31e0000 {
+ #sound-dai-cells = <1>;
+ };
+
+- swr4: soundwire-controller@31f0000 {
++ swr4: soundwire@31f0000 {
+ compatible = "qcom,soundwire-v1.7.0";
+ reg = <0 0x031f0000 0 0x2000>;
+ interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2224,7 +2228,7 @@ rxmacro: codec@3200000 {
+ #sound-dai-cells = <1>;
+ };
+
+- swr1: soundwire-controller@3210000 {
++ swr1: soundwire@3210000 {
+ compatible = "qcom,soundwire-v1.7.0";
+ reg = <0 0x03210000 0 0x2000>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2291,7 +2295,7 @@ wsamacro: codec@3240000 {
+ #sound-dai-cells = <1>;
+ };
+
+- swr0: soundwire-controller@3250000 {
++ swr0: soundwire@3250000 {
+ compatible = "qcom,soundwire-v1.7.0";
+ reg = <0 0x03250000 0 0x2000>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2318,14 +2322,14 @@ swr0: soundwire-controller@3250000 {
+ status = "disabled";
+ };
+
+- swr2: soundwire-controller@33b0000 {
++ swr2: soundwire@33b0000 {
+ compatible = "qcom,soundwire-v1.7.0";
+ reg = <0 0x033b0000 0 0x2000>;
+ interrupts = <GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "core", "wakeup";
+
+- clocks = <&vamacro>;
++ clocks = <&txmacro>;
+ clock-names = "iface";
+ label = "TX";
+
+@@ -4196,6 +4200,8 @@ ufs_mem_phy: phy@1d87000 {
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>,
+ <&gcc GCC_UFS_0_CLKREF_EN>;
+
++ power-domains = <&gcc UFS_PHY_GDSC>;
++
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+index f29cce5186acd5..c4bfe43471f7ca 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+@@ -743,7 +743,7 @@ &swr2 {
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+- qcom,tx-port-mapping = <1 1 2 3>;
++ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+index 2c09ce8aeafd9b..7a70cc59427979 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+@@ -835,7 +835,7 @@ &swr2 {
+ wcd_tx: codec@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+- qcom,tx-port-mapping = <1 1 2 3>;
++ qcom,tx-port-mapping = <2 2 3 4>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+index d115960bdeec8a..90e6cd239f5699 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+@@ -283,9 +283,9 @@ LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "silver-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+- entry-latency-us = <800>;
++ entry-latency-us = <550>;
+ exit-latency-us = <750>;
+- min-residency-us = <4090>;
++ min-residency-us = <6700>;
+ local-timer-stop;
+ };
+
+@@ -294,8 +294,18 @@ BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
+ idle-state-name = "gold-rail-power-collapse";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <600>;
+- exit-latency-us = <1550>;
+- min-residency-us = <4791>;
++ exit-latency-us = <1300>;
++ min-residency-us = <8136>;
++ local-timer-stop;
++ };
++
++ PRIME_CPU_SLEEP_0: cpu-sleep-2-0 {
++ compatible = "arm,idle-state";
++ idle-state-name = "goldplus-rail-power-collapse";
++ arm,psci-suspend-param = <0x40000004>;
++ entry-latency-us = <500>;
++ exit-latency-us = <1350>;
++ min-residency-us = <7480>;
+ local-timer-stop;
+ };
+ };
+@@ -304,17 +314,17 @@ domain-idle-states {
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000044>;
+- entry-latency-us = <1050>;
+- exit-latency-us = <2500>;
+- min-residency-us = <5309>;
++ entry-latency-us = <750>;
++ exit-latency-us = <2350>;
++ min-residency-us = <9144>;
+ };
+
+ CLUSTER_SLEEP_1: cluster-sleep-1 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x4100c344>;
+- entry-latency-us = <2700>;
+- exit-latency-us = <3500>;
+- min-residency-us = <13959>;
++ entry-latency-us = <2800>;
++ exit-latency-us = <4400>;
++ min-residency-us = <10150>;
+ };
+ };
+ };
+@@ -398,7 +408,7 @@ CPU_PD6: power-domain-cpu6 {
+ CPU_PD7: power-domain-cpu7 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+- domain-idle-states = <&BIG_CPU_SLEEP_0>;
++ domain-idle-states = <&PRIME_CPU_SLEEP_0>;
+ };
+
+ CLUSTER_PD: power-domain-cluster {
+@@ -2034,7 +2044,7 @@ lpass_wsa2macro: codec@6aa0000 {
+ #sound-dai-cells = <1>;
+ };
+
+- swr3: soundwire-controller@6ab0000 {
++ swr3: soundwire@6ab0000 {
+ compatible = "qcom,soundwire-v2.0.0";
+ reg = <0 0x06ab0000 0 0x10000>;
+ interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2080,7 +2090,7 @@ lpass_rxmacro: codec@6ac0000 {
+ #sound-dai-cells = <1>;
+ };
+
+- swr1: soundwire-controller@6ad0000 {
++ swr1: soundwire@6ad0000 {
+ compatible = "qcom,soundwire-v2.0.0";
+ reg = <0 0x06ad0000 0 0x10000>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2145,7 +2155,7 @@ lpass_wsamacro: codec@6b00000 {
+ #sound-dai-cells = <1>;
+ };
+
+- swr0: soundwire-controller@6b10000 {
++ swr0: soundwire@6b10000 {
+ compatible = "qcom,soundwire-v2.0.0";
+ reg = <0 0x06b10000 0 0x10000>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2172,13 +2182,13 @@ swr0: soundwire-controller@6b10000 {
+ status = "disabled";
+ };
+
+- swr2: soundwire-controller@6d30000 {
++ swr2: soundwire@6d30000 {
+ compatible = "qcom,soundwire-v2.0.0";
+ reg = <0 0x06d30000 0 0x10000>;
+ interrupts = <GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "core", "wakeup";
+- clocks = <&lpass_vamacro>;
++ clocks = <&lpass_txmacro>;
+ clock-names = "iface";
+ label = "TX";
+
+@@ -2893,8 +2903,8 @@ usb_1: usb@a6f8800 {
+
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 15 IRQ_TYPE_EDGE_RISING>,
+- <&pdc 14 IRQ_TYPE_EDGE_RISING>;
++ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 14 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq",
+ "ss_phy_irq",
+ "dm_hs_phy_irq",
+@@ -3007,7 +3017,7 @@ sram@c3f0000 {
+ spmi_bus: spmi@c400000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0 0x0c400000 0 0x3000>,
+- <0 0x0c500000 0 0x4000000>,
++ <0 0x0c500000 0 0x400000>,
+ <0 0x0c440000 0 0x80000>,
+ <0 0x0c4c0000 0 0x20000>,
+ <0 0x0c42d000 0 0x4000>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+index 4e67a03564971b..84e0eb48a1b8ac 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+@@ -658,7 +658,7 @@ channel7 {
+ avb0: ethernet@e6800000 {
+ compatible = "renesas,etheravb-r8a779a0",
+ "renesas,etheravb-rcar-gen4";
+- reg = <0 0xe6800000 0 0x800>;
++ reg = <0 0xe6800000 0 0x1000>;
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+@@ -706,7 +706,7 @@ avb0: ethernet@e6800000 {
+ avb1: ethernet@e6810000 {
+ compatible = "renesas,etheravb-r8a779a0",
+ "renesas,etheravb-rcar-gen4";
+- reg = <0 0xe6810000 0 0x800>;
++ reg = <0 0xe6810000 0 0x1000>;
+ interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+@@ -2910,6 +2910,9 @@ timer {
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
++ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++ "hyp-virt";
+ };
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+index ecdd5a523fa344..555fff9364e35c 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+@@ -1181,7 +1181,10 @@ timer {
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
++ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++ "hyp-virt";
+ };
+
+ ufs30_clk: ufs30-clk {
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
+index bb4a5270f71b6a..913f70fe6c5cd2 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi
+@@ -187,6 +187,9 @@ &extalr_clk {
+ };
+
+ &hscif0 {
++ pinctrl-0 = <&hscif0_pins>;
++ pinctrl-names = "default";
++
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+index d3d25e077c5d50..87fbc53316906c 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+@@ -161,11 +161,6 @@ L3_CA76_1: cache-controller-1 {
+ };
+ };
+
+- psci {
+- compatible = "arm,psci-1.0", "arm,psci-0.2";
+- method = "smc";
+- };
+-
+ extal_clk: extal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+@@ -185,13 +180,24 @@ pmu_a76 {
+ interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+- /* External SCIF clock - to be overridden by boards that provide it */
++ psci {
++ compatible = "arm,psci-1.0", "arm,psci-0.2";
++ method = "smc";
++ };
++
++ /* External SCIF clocks - to be overridden by boards that provide them */
+ scif_clk: scif {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
++ scif_clk2: scif2 {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <0>;
++ };
++
+ soc: soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+@@ -681,7 +687,7 @@ hscif2: serial@e6560000 {
+ interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 516>,
+ <&cpg CPG_CORE R8A779G0_CLK_SASYNCPERD1>,
+- <&scif_clk>;
++ <&scif_clk2>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x35>, <&dmac0 0x34>,
+ <&dmac1 0x35>, <&dmac1 0x34>;
+@@ -761,7 +767,7 @@ channel7 {
+ avb0: ethernet@e6800000 {
+ compatible = "renesas,etheravb-r8a779g0",
+ "renesas,etheravb-rcar-gen4";
+- reg = <0 0xe6800000 0 0x800>;
++ reg = <0 0xe6800000 0 0x1000>;
+ interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+@@ -808,7 +814,7 @@ avb0: ethernet@e6800000 {
+ avb1: ethernet@e6810000 {
+ compatible = "renesas,etheravb-r8a779g0",
+ "renesas,etheravb-rcar-gen4";
+- reg = <0 0xe6810000 0 0x800>;
++ reg = <0 0xe6810000 0 0x1000>;
+ interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1057,7 +1063,7 @@ scif4: serial@e6c40000 {
+ interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 705>,
+ <&cpg CPG_CORE R8A779G0_CLK_SASYNCPERD1>,
+- <&scif_clk>;
++ <&scif_clk2>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x59>, <&dmac0 0x58>,
+ <&dmac1 0x59>, <&dmac1 0x58>;
+@@ -1777,6 +1783,37 @@ ssi0: ssi-0 {
+ };
+ };
+
++ mmc0: mmc@ee140000 {
++ compatible = "renesas,sdhi-r8a779g0",
++ "renesas,rcar-gen4-sdhi";
++ reg = <0 0xee140000 0 0x2000>;
++ interrupts = <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&cpg CPG_MOD 706>,
++ <&cpg CPG_CORE R8A779G0_CLK_SD0H>;
++ clock-names = "core", "clkh";
++ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
++ resets = <&cpg 706>;
++ max-frequency = <200000000>;
++ iommus = <&ipmmu_ds0 32>;
++ status = "disabled";
++ };
++
++ rpc: spi@ee200000 {
++ compatible = "renesas,r8a779g0-rpc-if",
++ "renesas,rcar-gen4-rpc-if";
++ reg = <0 0xee200000 0 0x200>,
++ <0 0x08000000 0 0x04000000>,
++ <0 0xee208000 0 0x100>;
++ reg-names = "regs", "dirmap", "wbuf";
++ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&cpg CPG_MOD 629>;
++ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
++ resets = <&cpg 629>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++ };
++
+ ipmmu_rt0: iommu@ee480000 {
+ compatible = "renesas,ipmmu-r8a779g0",
+ "renesas,rcar-gen4-ipmmu-vmsa";
+@@ -1886,37 +1923,6 @@ ipmmu_mm: iommu@eefc0000 {
+ #iommu-cells = <1>;
+ };
+
+- mmc0: mmc@ee140000 {
+- compatible = "renesas,sdhi-r8a779g0",
+- "renesas,rcar-gen4-sdhi";
+- reg = <0 0xee140000 0 0x2000>;
+- interrupts = <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&cpg CPG_MOD 706>,
+- <&cpg CPG_CORE R8A779G0_CLK_SD0H>;
+- clock-names = "core", "clkh";
+- power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+- resets = <&cpg 706>;
+- max-frequency = <200000000>;
+- iommus = <&ipmmu_ds0 32>;
+- status = "disabled";
+- };
+-
+- rpc: spi@ee200000 {
+- compatible = "renesas,r8a779g0-rpc-if",
+- "renesas,rcar-gen4-rpc-if";
+- reg = <0 0xee200000 0 0x200>,
+- <0 0x08000000 0 0x04000000>,
+- <0 0xee208000 0 0x100>;
+- reg-names = "regs", "dirmap", "wbuf";
+- interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&cpg CPG_MOD 629>;
+- power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+- resets = <&cpg 629>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+- status = "disabled";
+- };
+-
+ gic: interrupt-controller@f1000000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+@@ -2344,6 +2350,9 @@ timer {
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
++ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++ "hyp-virt";
+ };
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+index 2ab231572d95ff..71d51febabc1e8 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+@@ -50,7 +50,10 @@ timer {
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
++ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++ "hyp-virt";
+ };
+ };
+
+@@ -109,7 +112,13 @@ irqc: interrupt-controller@110a0000 {
+ <SOC_PERIPHERAL_IRQ(473) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(474) IRQ_TYPE_LEVEL_HIGH>,
+ <SOC_PERIPHERAL_IRQ(475) IRQ_TYPE_LEVEL_HIGH>,
+- <SOC_PERIPHERAL_IRQ(25) IRQ_TYPE_EDGE_RISING>;
++ <SOC_PERIPHERAL_IRQ(25) IRQ_TYPE_EDGE_RISING>,
++ <SOC_PERIPHERAL_IRQ(34) IRQ_TYPE_EDGE_RISING>,
++ <SOC_PERIPHERAL_IRQ(35) IRQ_TYPE_EDGE_RISING>,
++ <SOC_PERIPHERAL_IRQ(36) IRQ_TYPE_EDGE_RISING>,
++ <SOC_PERIPHERAL_IRQ(37) IRQ_TYPE_EDGE_RISING>,
++ <SOC_PERIPHERAL_IRQ(38) IRQ_TYPE_EDGE_RISING>,
++ <SOC_PERIPHERAL_IRQ(39) IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "nmi",
+ "irq0", "irq1", "irq2", "irq3",
+ "irq4", "irq5", "irq6", "irq7",
+@@ -121,7 +130,9 @@ irqc: interrupt-controller@110a0000 {
+ "tint20", "tint21", "tint22", "tint23",
+ "tint24", "tint25", "tint26", "tint27",
+ "tint28", "tint29", "tint30", "tint31",
+- "bus-err";
++ "bus-err", "ec7tie1-0", "ec7tie2-0",
++ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
++ "ec7tiovf-1";
+ clocks = <&cpg CPG_MOD R9A07G043_IA55_CLK>,
+ <&cpg CPG_MOD R9A07G043_IA55_PCLK>;
+ clock-names = "clk", "pclk";
+@@ -134,8 +145,8 @@ gic: interrupt-controller@11900000 {
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+- reg = <0x0 0x11900000 0 0x40000>,
+- <0x0 0x11940000 0 0x60000>;
++ reg = <0x0 0x11900000 0 0x20000>,
++ <0x0 0x11940000 0 0x40000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+index 66f68fc2b24118..edc942c8463959 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+@@ -905,7 +905,27 @@ irqc: interrupt-controller@110a0000 {
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>;
++ interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3",
++ "irq4", "irq5", "irq6", "irq7",
++ "tint0", "tint1", "tint2", "tint3",
++ "tint4", "tint5", "tint6", "tint7",
++ "tint8", "tint9", "tint10", "tint11",
++ "tint12", "tint13", "tint14", "tint15",
++ "tint16", "tint17", "tint18", "tint19",
++ "tint20", "tint21", "tint22", "tint23",
++ "tint24", "tint25", "tint26", "tint27",
++ "tint28", "tint29", "tint30", "tint31",
++ "bus-err", "ec7tie1-0", "ec7tie2-0",
++ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
++ "ec7tiovf-1";
+ clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>,
+ <&cpg CPG_MOD R9A07G044_IA55_PCLK>;
+ clock-names = "clk", "pclk";
+@@ -977,8 +997,8 @@ gic: interrupt-controller@11900000 {
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+- reg = <0x0 0x11900000 0 0x40000>,
+- <0x0 0x11940000 0 0x60000>;
++ reg = <0x0 0x11900000 0 0x20000>,
++ <0x0 0x11940000 0 0x40000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+@@ -1268,6 +1288,9 @@ timer {
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
++ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++ "hyp-virt";
+ };
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+index 1f1d481dc7830d..d61f7894e55cdd 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+@@ -912,7 +912,27 @@ irqc: interrupt-controller@110a0000 {
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>;
++ interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3",
++ "irq4", "irq5", "irq6", "irq7",
++ "tint0", "tint1", "tint2", "tint3",
++ "tint4", "tint5", "tint6", "tint7",
++ "tint8", "tint9", "tint10", "tint11",
++ "tint12", "tint13", "tint14", "tint15",
++ "tint16", "tint17", "tint18", "tint19",
++ "tint20", "tint21", "tint22", "tint23",
++ "tint24", "tint25", "tint26", "tint27",
++ "tint28", "tint29", "tint30", "tint31",
++ "bus-err", "ec7tie1-0", "ec7tie2-0",
++ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
++ "ec7tiovf-1";
+ clocks = <&cpg CPG_MOD R9A07G054_IA55_CLK>,
+ <&cpg CPG_MOD R9A07G054_IA55_PCLK>;
+ clock-names = "clk", "pclk";
+@@ -984,8 +1004,8 @@ gic: interrupt-controller@11900000 {
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+- reg = <0x0 0x11900000 0 0x40000>,
+- <0x0 0x11940000 0 0x60000>;
++ reg = <0x0 0x11900000 0 0x20000>,
++ <0x0 0x11940000 0 0x40000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+@@ -1275,6 +1295,9 @@ timer {
+ interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+- <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
++ <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>,
++ <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "sec-phys", "phys", "virt", "hyp-phys",
++ "hyp-virt";
+ };
+ };
+diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+index 3885ef3454ff6e..50de17e4fb3f25 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+@@ -234,6 +234,7 @@ gpio_exp_74: gpio@74 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+
+@@ -294,6 +295,7 @@ gpio_exp_75: gpio@75 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ };
+@@ -314,6 +316,7 @@ gpio_exp_76: gpio@76 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ };
+@@ -324,6 +327,7 @@ gpio_exp_77: gpio@77 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
+index 42ce78beb4134d..20955556b624d0 100644
+--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
+@@ -632,6 +632,7 @@ spi0: spi@ff1d0000 {
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac 12>, <&dmac 13>;
+ dma-names = "tx", "rx";
++ num-cs = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_clk &spi0_csn &spi0_miso &spi0_mosi>;
+ #address-cells = <1>;
+@@ -647,6 +648,7 @@ spi1: spi@ff1d8000 {
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac 14>, <&dmac 15>;
+ dma-names = "tx", "rx";
++ num-cs = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_clk &spi1_csn0 &spi1_csn1 &spi1_miso &spi1_mosi>;
+ #address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+index e9810d2f04071c..5ca0cc19f92c84 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+@@ -5,6 +5,8 @@
+ */
+
+ /dts-v1/;
++
++#include <dt-bindings/leds/common.h>
+ #include "rk3308.dtsi"
+
+ / {
+@@ -15,6 +17,7 @@ aliases {
+ ethernet0 = &gmac;
+ mmc0 = &emmc;
+ mmc1 = &sdmmc;
++ mmc2 = &sdio;
+ };
+
+ chosen {
+@@ -24,17 +27,21 @@ chosen {
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+- pinctrl-0 = <&green_led_gio>, <&heartbeat_led_gpio>;
++ pinctrl-0 = <&green_led>, <&heartbeat_led>;
+
+ green-led {
++ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
++ function = LED_FUNCTION_POWER;
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ label = "rockpis:green:power";
+ linux,default-trigger = "default-on";
+ };
+
+ blue-led {
++ color = <LED_COLOR_ID_BLUE>;
+ default-state = "on";
++ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ label = "rockpis:blue:user";
+ linux,default-trigger = "heartbeat";
+@@ -126,21 +133,37 @@ &cpu0 {
+ };
+
+ &emmc {
+- bus-width = <4>;
+ cap-mmc-highspeed;
+- mmc-hs200-1_8v;
++ cap-sd-highspeed;
++ no-sdio;
+ non-removable;
++ pinctrl-names = "default";
++ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
+ vmmc-supply = <&vcc_io>;
+ status = "okay";
+ };
+
+ &gmac {
+ clock_in_out = "output";
++ phy-handle = <&rtl8201f>;
+ phy-supply = <&vcc_io>;
+- snps,reset-gpio = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+- snps,reset-active-low;
+- snps,reset-delays-us = <0 50000 50000>;
+ status = "okay";
++
++ mdio {
++ compatible = "snps,dwmac-mdio";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ rtl8201f: ethernet-phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c22";
++ reg = <1>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&mac_rst>;
++ reset-assert-us = <20000>;
++ reset-deassert-us = <50000>;
++ reset-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
++ };
++ };
+ };
+
+ &i2c1 {
+@@ -151,12 +174,32 @@ &pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtc_32k>;
+
++ bluetooth {
++ bt_reg_on: bt-reg-on {
++ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
++ };
++
++ bt_wake_host: bt-wake-host {
++ rockchip,pins = <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
++ };
++
++ host_wake_bt: host-wake-bt {
++ rockchip,pins = <4 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
++ };
++ };
++
++ gmac {
++ mac_rst: mac-rst {
++ rockchip,pins = <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
++ };
++ };
++
+ leds {
+- green_led_gio: green-led-gpio {
++ green_led: green-led {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+- heartbeat_led_gpio: heartbeat-led-gpio {
++ heartbeat_led: heartbeat-led {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+@@ -194,15 +237,31 @@ &sdio {
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+- max-frequency = <1000000>;
++ max-frequency = <100000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
++ no-mmc;
++ no-sd;
+ non-removable;
+- sd-uhs-sdr104;
++ sd-uhs-sdr50;
++ vmmc-supply = <&vcc_io>;
++ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
++
++ rtl8723ds: wifi@1 {
++ reg = <1>;
++ interrupt-parent = <&gpio0>;
++ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "host-wake";
++ pinctrl-names = "default";
++ pinctrl-0 = <&wifi_host_wake>;
++ };
+ };
+
+ &sdmmc {
++ cap-mmc-highspeed;
+ cap-sd-highspeed;
++ disable-wp;
++ vmmc-supply = <&vcc_io>;
+ status = "okay";
+ };
+
+@@ -221,16 +280,22 @@ u2phy_otg: otg-port {
+ };
+
+ &uart0 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&uart0_xfer>;
+ status = "okay";
+ };
+
+ &uart4 {
++ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+- compatible = "realtek,rtl8723bs-bt";
+- device-wake-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
++ compatible = "realtek,rtl8723ds-bt";
++ device-wake-gpios = <&gpio4 RK_PB2 GPIO_ACTIVE_HIGH>;
++ enable-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>;
+ host-wake-gpios = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&bt_reg_on &bt_wake_host &host_wake_bt>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+index 5d7d567283e525..4237f2ee8fee33 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+@@ -26,9 +26,11 @@ yt8531c: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+
++ motorcomm,auto-sleep-disabled;
+ motorcomm,clk-out-frequency-hz = <125000000>;
+ motorcomm,keep-pll-enabled;
+- motorcomm,auto-sleep-disabled;
++ motorcomm,rx-clk-drv-microamp = <5020>;
++ motorcomm,rx-data-drv-microamp = <5020>;
+
+ pinctrl-0 = <&eth_phy_reset_pin>;
+ pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+index 018a3a5075c72e..66443d52cd34d8 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+@@ -186,8 +186,8 @@ &i2c1 {
+ rk805: pmic@18 {
+ compatible = "rockchip,rk805";
+ reg = <0x18>;
+- interrupt-parent = <&gpio2>;
+- interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-parent = <&gpio0>;
++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk805-clkout2";
+ gpio-controller;
+@@ -332,7 +332,7 @@ led_pin: led-pin {
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+- rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
++ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index e729e7a22b23a6..126165ba1ea260 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -668,7 +668,7 @@ vpu_mmu: iommu@ff350800 {
+
+ vdec: video-codec@ff360000 {
+ compatible = "rockchip,rk3328-vdec", "rockchip,rk3399-vdec";
+- reg = <0x0 0xff360000 0x0 0x400>;
++ reg = <0x0 0xff360000 0x0 0x480>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_RKVDEC>, <&cru HCLK_RKVDEC>,
+ <&cru SCLK_VDEC_CABAC>, <&cru SCLK_VDEC_CORE>;
+@@ -743,11 +743,20 @@ hdmi: hdmi@ff3c0000 {
+ status = "disabled";
+
+ ports {
+- hdmi_in: port {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ hdmi_in: port@0 {
++ reg = <0>;
++
+ hdmi_in_vop: endpoint {
+ remote-endpoint = <&vop_out_hdmi>;
+ };
+ };
++
++ hdmi_out: port@1 {
++ reg = <1>;
++ };
+ };
+ };
+
+@@ -813,8 +822,8 @@ cru: clock-controller@ff440000 {
+ <0>, <24000000>,
+ <24000000>, <24000000>,
+ <15000000>, <15000000>,
+- <100000000>, <100000000>,
+- <100000000>, <100000000>,
++ <300000000>, <100000000>,
++ <400000000>, <100000000>,
+ <50000000>, <100000000>,
+ <100000000>, <100000000>,
+ <50000000>, <50000000>,
+diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+index a4c5aaf1f45794..cac58ad951b2e3 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+@@ -790,6 +790,7 @@ spdif: spdif@ff880000 {
+ dma-names = "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_tx>;
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+@@ -801,6 +802,7 @@ i2s_2ch: i2s-2ch@ff890000 {
+ clocks = <&cru SCLK_I2S_2CH>, <&cru HCLK_I2S_2CH>;
+ dmas = <&dmac_bus 6>, <&dmac_bus 7>;
+ dma-names = "tx", "rx";
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+@@ -814,6 +816,7 @@ i2s_8ch: i2s-8ch@ff898000 {
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_8ch_bus>;
++ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+index 5c1929d41cc0b7..cacbad35cfc854 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+@@ -509,8 +509,7 @@ wacky_spi_audio: spi2@0 {
+ &pci_rootport {
+ mvl_wifi: wifi@0,0 {
+ compatible = "pci1b4b,2b42";
+- reg = <0x83010000 0x0 0x00000000 0x0 0x00100000
+- 0x83010000 0x0 0x00100000 0x0 0x00100000>;
++ reg = <0x0000 0x0 0x0 0x0 0x0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
+index 853e88455e750e..9e4b12ed62cbed 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
+@@ -34,8 +34,8 @@ &mipi_panel {
+ &pci_rootport {
+ wifi@0,0 {
+ compatible = "qcom,ath10k";
+- reg = <0x00010000 0x0 0x00000000 0x0 0x00000000>,
+- <0x03010010 0x0 0x00000000 0x0 0x00200000>;
++ reg = <0x00000000 0x0 0x00000000 0x0 0x00000000>,
++ <0x03000010 0x0 0x00000000 0x0 0x00200000>;
+ qcom,ath10k-calibration-variant = "GO_DUMO";
+ };
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+index c9bf1d5c3a4264..3cd63d1e8f15bb 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+@@ -450,7 +450,7 @@ da7219_aad {
+ dlg,btn-cfg = <50>;
+ dlg,mic-det-thr = <500>;
+ dlg,jack-ins-deb = <20>;
+- dlg,jack-det-rate = "32ms_64ms";
++ dlg,jack-det-rate = "32_64";
+ dlg,jack-rem-deb = <1>;
+
+ dlg,a-d-btn-thr = <0xa>;
+@@ -489,6 +489,7 @@ pci_rootport: pcie@0,0 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
++ device_type = "pci";
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+index 054c6a4d1a45f7..f5e124b235c83c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+@@ -32,12 +32,12 @@ chosen {
+ backlight: edp-backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&vcc_12v>;
+- pwms = <&pwm0 0 740740 0>;
++ pwms = <&pwm0 0 125000 0>;
+ };
+
+ bat: battery {
+ compatible = "simple-battery";
+- charge-full-design-microamp-hours = <9800000>;
++ charge-full-design-microamp-hours = <10000000>;
+ voltage-max-design-microvolt = <4350000>;
+ voltage-min-design-microvolt = <3000000>;
+ };
+@@ -779,7 +779,6 @@ &pcie_phy {
+ };
+
+ &pcie0 {
+- bus-scan-delay-ms = <1000>;
+ ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
+ num-lanes = <4>;
+ pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index 20e3f41efe97fa..aba2748fe54c77 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -119,6 +119,22 @@ &emmc_phy {
+ drive-impedance-ohm = <33>;
+ };
+
++&gpio3 {
++ /*
++ * The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
++ * eMMC and SPI flash powered-down initially (in fact it keeps the
++ * reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
++ * that signal so that eMMC and SPI can be used regardless of the state
++ * of the signal.
++ */
++ bios-disable-override-hog {
++ gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
++ gpio-hog;
++ line-name = "bios_disable_override";
++ output-high;
++ };
++};
++
+ &gmac {
+ assigned-clocks = <&cru SCLK_RMII_SRC>;
+ assigned-clock-parents = <&clkin_gmac>;
+@@ -374,6 +390,7 @@ vdd_cpu_b: regulator@60 {
+
+ &i2s0 {
+ pinctrl-0 = <&i2s0_2ch_bus>;
++ pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
+ rockchip,playback-channels = <2>;
+ rockchip,capture-channels = <2>;
+ status = "okay";
+@@ -382,8 +399,8 @@ &i2s0 {
+ /*
+ * As Q7 does not specify neither a global nor a RX clock for I2S these
+ * signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO.
+- * Therefore we have to redefine the i2s0_2ch_bus definition to prevent
+- * conflicts.
++ * Therefore we have to redefine the i2s0_2ch_bus and i2s0_2ch_bus_bclk_off
++ * definitions to prevent conflicts.
+ */
+ &i2s0_2ch_bus {
+ rockchip,pins =
+@@ -393,6 +410,14 @@ &i2s0_2ch_bus {
+ <3 RK_PD7 1 &pcfg_pull_none>;
+ };
+
++&i2s0_2ch_bus_bclk_off {
++ rockchip,pins =
++ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
++ <3 RK_PD2 1 &pcfg_pull_none>,
++ <3 RK_PD3 1 &pcfg_pull_none>,
++ <3 RK_PD7 1 &pcfg_pull_none>;
++};
++
+ &io_domains {
+ status = "okay";
+ bt656-supply = <&vcc_1v8>;
+@@ -401,16 +426,27 @@ &io_domains {
+ gpio1830-supply = <&vcc_1v8>;
+ };
+
+-&pmu_io_domains {
+- status = "okay";
+- pmu1830-supply = <&vcc_1v8>;
+-};
+-
+-&pwm2 {
+- status = "okay";
++&pcie_clkreqn_cpm {
++ rockchip,pins =
++ <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ &pinctrl {
++ pinctrl-names = "default";
++ pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
++
++ gpios {
++ bios_disable_override_hog_pin: bios-disable-override-hog-pin {
++ rockchip,pins =
++ <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
++ };
++
++ q7_thermal_pin: q7-thermal-pin {
++ rockchip,pins =
++ <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
++ };
++ };
++
+ i2c8 {
+ i2c8_xfer_a: i2c8-xfer {
+ rockchip,pins =
+@@ -443,11 +479,20 @@ vcc5v0_host_en: vcc5v0-host-en {
+ usb3 {
+ usb3_id: usb3-id {
+ rockchip,pins =
+- <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
++ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+ };
+
++&pmu_io_domains {
++ status = "okay";
++ pmu1830-supply = <&vcc_1v8>;
++};
++
++&pwm2 {
++ status = "okay";
++};
++
+ &sdhci {
+ /*
+ * Signal integrity isn't great at 200MHz but 100MHz has proven stable
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index 5bc2d4faeea6df..fb1ea84c2b14fe 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -1109,7 +1109,9 @@ power-domain@RK3399_PD_VCODEC {
+ power-domain@RK3399_PD_VDU {
+ reg = <RK3399_PD_VDU>;
+ clocks = <&cru ACLK_VDU>,
+- <&cru HCLK_VDU>;
++ <&cru HCLK_VDU>,
++ <&cru SCLK_VDU_CA>,
++ <&cru SCLK_VDU_CORE>;
+ pm_qos = <&qos_video_m1_r>,
+ <&qos_video_m1_w>;
+ #power-domain-cells = <0>;
+@@ -1385,7 +1387,7 @@ vpu_mmu: iommu@ff650800 {
+
+ vdec: video-codec@ff660000 {
+ compatible = "rockchip,rk3399-vdec";
+- reg = <0x0 0xff660000 0x0 0x400>;
++ reg = <0x0 0xff660000 0x0 0x480>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
+ <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
+@@ -1951,6 +1953,7 @@ simple-audio-card,codec {
+ hdmi: hdmi@ff940000 {
+ compatible = "rockchip,rk3399-dw-hdmi";
+ reg = <0x0 0xff940000 0x0 0x20000>;
++ reg-io-width = <4>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_HDMI_CTRL>,
+ <&cru SCLK_HDMI_SFR>,
+@@ -1959,13 +1962,16 @@ hdmi: hdmi@ff940000 {
+ <&cru PLL_VPLL>;
+ clock-names = "iahb", "isfr", "cec", "grf", "ref";
+ power-domains = <&power RK3399_PD_HDCP>;
+- reg-io-width = <4>;
+ rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+
+ ports {
+- hdmi_in: port {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ hdmi_in: port@0 {
++ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+@@ -1978,6 +1984,10 @@ hdmi_in_vopl: endpoint@1 {
+ remote-endpoint = <&vopl_out_hdmi>;
+ };
+ };
++
++ hdmi_out: port@1 {
++ reg = <1>;
++ };
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+index 1c6d83b47cd217..6ecdf5d283390a 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+@@ -455,7 +455,7 @@ &pcie2x1 {
+ &pinctrl {
+ leds {
+ sys_led_pin: sys-status-led-pin {
+- rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
++ rockchip,pins = <0 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+index 2d92713be2a09f..6195937aa6dc5f 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+@@ -289,7 +289,7 @@ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+- regulator-min-microvolt = <900000>;
++ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
+index 938092fce18661..68a72ac24cd4b4 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
+@@ -268,7 +268,7 @@ rk809: pmic@20 {
+ vcc9-supply = <&vcc3v3_sys>;
+
+ codec {
+- mic-in-differential;
++ rockchip,mic-in-differential;
+ };
+
+ regulators {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+index f9127ddfbb7dfd..dc5892d25c1000 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+@@ -416,6 +416,8 @@ regulator-state-mem {
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
++ regulator-always-on;
++ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+@@ -525,9 +527,9 @@ &mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- switch@0 {
++ switch@1f {
+ compatible = "mediatek,mt7531";
+- reg = <0>;
++ reg = <0x1f>;
+
+ ports {
+ #address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
+index 19f8fc369b1308..8c3ab07d380797 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
+@@ -475,7 +475,7 @@ regulator-state-mem {
+ };
+
+ codec {
+- mic-in-differential;
++ rockchip,mic-in-differential;
+ };
+ };
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
+index 58ab7e9971dbce..b5e67990dd0f8b 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
+@@ -11,6 +11,10 @@ aliases {
+ };
+ };
+
++&pmu_io_domains {
++ vccio3-supply = <&vccio_sd>;
++};
++
+ &sdmmc0 {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
+index 89e84e3a92629a..25c49bdbadbcba 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
+@@ -39,9 +39,9 @@ status_led: led-status {
+ };
+ };
+
+- dc_12v: dc-12v-regulator {
++ vcc12v_dcin: vcc12v-dcin-regulator {
+ compatible = "regulator-fixed";
+- regulator-name = "dc_12v";
++ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+@@ -65,7 +65,7 @@ vcc3v3_sys: vcc3v3-sys-regulator {
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+- vin-supply = <&dc_12v>;
++ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+@@ -75,16 +75,7 @@ vcc5v0_sys: vcc5v0-sys-regulator {
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+- vin-supply = <&dc_12v>;
+- };
+-
+- vcc5v0_usb_host: vcc5v0-usb-host-regulator {
+- compatible = "regulator-fixed";
+- regulator-name = "vcc5v0_usb_host";
+- regulator-always-on;
+- regulator-boot-on;
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
++ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb_otg: vcc5v0-usb-otg-regulator {
+@@ -94,8 +85,9 @@ vcc5v0_usb_otg: vcc5v0-usb-otg-regulator {
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_usb_otg_en>;
+ regulator-name = "vcc5v0_usb_otg";
+- regulator-always-on;
+- regulator-boot-on;
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ vin-supply = <&vcc5v0_sys>;
+ };
+ };
+
+@@ -123,6 +115,10 @@ &cpu3 {
+ cpu-supply = <&vdd_cpu>;
+ };
+
++&display_subsystem {
++ status = "disabled";
++};
++
+ &gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+@@ -405,8 +401,8 @@ vcc5v0_usb_otg_en: vcc5v0-usb-otg-en {
+ &pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+- vccio1-supply = <&vccio_acodec>;
+- vccio3-supply = <&vccio_sd>;
++ vccio1-supply = <&vcc_3v3>;
++ vccio2-supply = <&vcc_1v8>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_1v8>;
+@@ -429,28 +425,12 @@ &uart2 {
+ status = "okay";
+ };
+
+-&usb_host0_ehci {
+- status = "okay";
+-};
+-
+-&usb_host0_ohci {
+- status = "okay";
+-};
+-
+ &usb_host0_xhci {
+ dr_mode = "host";
+ extcon = <&usb2phy0>;
+ status = "okay";
+ };
+
+-&usb_host1_ehci {
+- status = "okay";
+-};
+-
+-&usb_host1_ohci {
+- status = "okay";
+-};
+-
+ &usb_host1_xhci {
+ status = "okay";
+ };
+@@ -460,7 +440,7 @@ &usb2phy0 {
+ };
+
+ &usb2phy0_host {
+- phy-supply = <&vcc5v0_usb_host>;
++ phy-supply = <&vcc5v0_sys>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
+index e1fe5e442689a0..ce2a5e1ccefc3f 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
+@@ -39,7 +39,7 @@ &gmac0_tx_bus2
+ &gmac0_rx_bus2
+ &gmac0_rgmii_clk
+ &gmac0_rgmii_bus>;
+- snps,reset-gpio = <&gpio0 RK_PB0 GPIO_ACTIVE_LOW>;
++ snps,reset-gpio = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ /* Reset time is 15ms, 50ms for rtl8211f */
+ snps,reset-delays-us = <0 15000 50000>;
+@@ -61,7 +61,7 @@ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus>;
+- snps,reset-gpio = <&gpio0 RK_PB1 GPIO_ACTIVE_LOW>;
++ snps,reset-gpio = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ /* Reset time is 15ms, 50ms for rtl8211f */
+ snps,reset-delays-us = <0 15000 50000>;
+@@ -71,18 +71,18 @@ &gmac1m1_rgmii_clk
+ };
+
+ &mdio0 {
+- rgmii_phy0: ethernet-phy@0 {
++ rgmii_phy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+- reg = <0>;
++ reg = <0x1>;
+ pinctrl-0 = <&eth_phy0_reset_pin>;
+ pinctrl-names = "default";
+ };
+ };
+
+ &mdio1 {
+- rgmii_phy1: ethernet-phy@0 {
++ rgmii_phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+- reg = <0>;
++ reg = <0x1>;
+ pinctrl-0 = <&eth_phy1_reset_pin>;
+ pinctrl-names = "default";
+ };
+@@ -102,6 +102,10 @@ eth_phy1_reset_pin: eth-phy1-reset-pin {
+ };
+ };
+
++&pmu_io_domains {
++ vccio3-supply = <&vcc_3v3>;
++};
++
+ &sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
+index a337f547caf538..6a02db4f073f29 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
+@@ -13,7 +13,7 @@
+
+ / {
+ model = "Hardkernel ODROID-M1";
+- compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568";
++ compatible = "hardkernel,odroid-m1", "rockchip,rk3568";
+
+ aliases {
+ ethernet0 = &gmac0;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+index e05ab11981f554..17830e8c9a59be 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+@@ -530,10 +530,6 @@ regulator-state-mem {
+ };
+ };
+ };
+-
+- codec {
+- mic-in-differential;
+- };
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index abee88911982d4..2f885bc3665b5c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -597,6 +597,7 @@ vpu: video-codec@fdea0400 {
+ compatible = "rockchip,rk3568-vpu";
+ reg = <0x0 0xfdea0000 0x0 0x800>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "vdpu";
+ clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
+ clock-names = "aclk", "hclk";
+ iommus = <&vdpu_mmu>;
+@@ -748,6 +749,7 @@ vop_mmu: iommu@fe043e00 {
+ clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
++ power-domains = <&power RK3568_PD_VO>;
+ status = "disabled";
+ };
+
+@@ -970,7 +972,7 @@ pcie2x1: pcie@fe260000 {
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+- interrupt-names = "sys", "pmc", "msi", "legacy", "err";
++ interrupt-names = "sys", "pmc", "msg", "legacy", "err";
+ bus-range = <0x0 0xf>;
+ clocks = <&cru ACLK_PCIE20_MST>, <&cru ACLK_PCIE20_SLV>,
+ <&cru ACLK_PCIE20_DBI>, <&cru PCLK_PCIE20>,
+@@ -1116,7 +1118,7 @@ i2s2_2ch: i2s@fe420000 {
+ dmas = <&dmac1 4>, <&dmac1 5>;
+ dma-names = "tx", "rx";
+ resets = <&cru SRST_M_I2S2_2CH>;
+- reset-names = "m";
++ reset-names = "tx-m";
+ rockchip,grf = <&grf>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s2m0_sclktx
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
+index 229a9111f5eb05..fa8286a325af74 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
+@@ -215,6 +215,7 @@ pmic@0 {
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ pinctrl-names = "default";
+ spi-max-frequency = <1000000>;
++ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
+index 0bd80e51575448..97af4f91282854 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
+@@ -137,6 +137,18 @@ vbus5v0_typec: vbus5v0-typec-regulator {
+ vin-supply = <&vcc5v0_sys>;
+ };
+
++ vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pcie_m2_1_pwren>;
++ regulator-name = "vcc3v3_pcie2x1l0";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ vin-supply = <&vcc5v0_sys>;
++ };
++
+ vcc3v3_pcie30: vcc3v3-pcie30-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+@@ -421,6 +433,14 @@ &pcie2x1l0 {
+ status = "okay";
+ };
+
++&pcie2x1l1 {
++ reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
++ vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pcie2_1_rst>;
++ status = "okay";
++};
++
+ &pcie2x1l2 {
+ reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc_3v3_pcie20>;
+@@ -467,6 +487,10 @@ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
++ pcie2_1_rst: pcie2-1-rst {
++ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
++ };
++
+ pcie2_2_rst: pcie2-2-rst {
+ rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+@@ -474,6 +498,10 @@ pcie2_2_rst: pcie2-2-rst {
+ pcie_m2_0_pwren: pcie-m20-pwren {
+ rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
++
++ pcie_m2_1_pwren: pcie-m21-pwren {
++ rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
++ };
+ };
+
+ usb {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+index d1503a4b233a39..9299fa7e3e2150 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+@@ -163,13 +163,13 @@ &gpio0 {
+
+ &gpio1 {
+ gpio-line-names = /* GPIO1 A0-A7 */
+- "HEADER_27_3v3", "HEADER_28_3v3", "", "",
++ "HEADER_27_3v3", "", "", "",
+ "HEADER_29_1v8", "", "HEADER_7_1v8", "",
+ /* GPIO1 B0-B7 */
+ "", "HEADER_31_1v8", "HEADER_33_1v8", "",
+ "HEADER_11_1v8", "HEADER_13_1v8", "", "",
+ /* GPIO1 C0-C7 */
+- "", "", "", "",
++ "", "HEADER_28_3v3", "", "",
+ "", "", "", "",
+ /* GPIO1 D0-D7 */
+ "", "", "", "",
+@@ -193,11 +193,11 @@ &gpio3 {
+
+ &gpio4 {
+ gpio-line-names = /* GPIO4 A0-A7 */
+- "", "", "HEADER_37_3v3", "HEADER_32_3v3",
+- "HEADER_36_3v3", "", "HEADER_35_3v3", "HEADER_38_3v3",
++ "", "", "HEADER_37_3v3", "HEADER_8_3v3",
++ "HEADER_10_3v3", "", "HEADER_32_3v3", "HEADER_35_3v3",
+ /* GPIO4 B0-B7 */
+ "", "", "", "HEADER_40_3v3",
+- "HEADER_8_3v3", "HEADER_10_3v3", "", "",
++ "HEADER_38_3v3", "HEADER_36_3v3", "", "",
+ /* GPIO4 C0-C7 */
+ "", "", "", "",
+ "", "", "", "",
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi
+index 48181671eacb0d..0933652bafc301 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi
+@@ -369,7 +369,7 @@ emmc_cmd: emmc-cmd {
+ emmc_data_strobe: emmc-data-strobe {
+ rockchip,pins =
+ /* emmc_data_strobe */
+- <2 RK_PA2 1 &pcfg_pull_none>;
++ <2 RK_PA2 1 &pcfg_pull_down>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
+index 8347adcbd00301..68763714f7f7bc 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
+@@ -390,6 +390,7 @@ pmic@0 {
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
++ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+index 5544f66c6ff411..aa18cf1d1afaa1 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+@@ -890,6 +890,7 @@ power-domain@RK3588_PD_USB {
+ reg = <RK3588_PD_USB>;
+ clocks = <&cru PCLK_PHP_ROOT>,
+ <&cru ACLK_USB_ROOT>,
++ <&cru ACLK_USB>,
+ <&cru HCLK_USB_ROOT>,
+ <&cru HCLK_HOST0>,
+ <&cru HCLK_HOST_ARB0>,
+@@ -1541,7 +1542,6 @@ i2s2_2ch: i2s@fe490000 {
+ dmas = <&dmac1 0>, <&dmac1 1>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3588_PD_AUDIO>;
+- rockchip,trcm-sync-tx-only;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s2m1_lrck
+ &i2s2m1_sclk
+@@ -1562,7 +1562,6 @@ i2s3_2ch: i2s@fe4a0000 {
+ dmas = <&dmac1 2>, <&dmac1 3>;
+ dma-names = "tx", "rx";
+ power-domains = <&power RK3588_PD_AUDIO>;
+- rockchip,trcm-sync-tx-only;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s3_lrck
+ &i2s3_sclk
+diff --git a/arch/arm64/boot/dts/sprd/ums512.dtsi b/arch/arm64/boot/dts/sprd/ums512.dtsi
+index 024be594c47d17..cc4459551e05e1 100644
+--- a/arch/arm64/boot/dts/sprd/ums512.dtsi
++++ b/arch/arm64/boot/dts/sprd/ums512.dtsi
+@@ -96,7 +96,7 @@ CPU5: cpu@500 {
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+- compatible = "arm,cortex-a55";
++ compatible = "arm,cortex-a75";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD>;
+@@ -104,7 +104,7 @@ CPU6: cpu@600 {
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+- compatible = "arm,cortex-a55";
++ compatible = "arm,cortex-a75";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD>;
+@@ -113,7 +113,7 @@ CPU7: cpu@700 {
+
+ idle-states {
+ entry-method = "psci";
+- CORE_PD: core-pd {
++ CORE_PD: cpu-pd {
+ compatible = "arm,idle-state";
+ entry-latency-us = <4000>;
+ exit-latency-us = <4000>;
+@@ -291,6 +291,7 @@ anlg_phy_gc_regs: syscon@323e0000 {
+ pll2: clock-controller@0 {
+ compatible = "sprd,ums512-gc-pll";
+ reg = <0x0 0x100>;
++ clocks = <&ext_26m>;
+ clock-names = "ext-26m";
+ #clock-cells = <1>;
+ };
+diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
+index e7b8e2e7f083db..8bd5acc6d68351 100644
+--- a/arch/arm64/boot/dts/ti/Makefile
++++ b/arch/arm64/boot/dts/ti/Makefile
+@@ -9,6 +9,8 @@
+ # alphabetically.
+
+ # Boards with AM62x SoC
++k3-am625-sk-hdmi-audio-dtbs := k3-am625-sk.dtb k3-am62x-sk-hdmi-audio.dtbo
++k3-am62-lp-sk-hdmi-audio-dtbs := k3-am62-lp-sk.dtb k3-am62x-sk-hdmi-audio.dtbo
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-beagleplay.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-phyboard-lyra-rdk.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-sk.dtb
+@@ -19,7 +21,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dahlia.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dev.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-yavia.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk.dtb
+-dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-hdmi-audio.dtbo
++dtb-$(CONFIG_ARCH_K3) += k3-am625-sk-hdmi-audio.dtb
++dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk-hdmi-audio.dtb
+
+ # Boards with AM62Ax SoC
+ dtb-$(CONFIG_ARCH_K3) += k3-am62a7-sk.dtb
+@@ -66,6 +69,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721e-sk.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am68-sk-base-board.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-j721s2-common-proc-board.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm-gesi-exp-board.dtbo
++k3-j721s2-evm-dtbs := k3-j721s2-common-proc-board.dtb k3-j721s2-evm-gesi-exp-board.dtbo
++dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm.dtb
+
+ # Boards with J784s4 SoC
+ dtb-$(CONFIG_ARCH_K3) += k3-am69-sk.dtb
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index 284b90c94da8a2..a9b47ab92a02c7 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -613,6 +613,8 @@ usb0: usb@31000000 {
+ interrupt-names = "host", "peripheral";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
++ snps,usb2-gadget-lpm-disable;
++ snps,usb2-lpm-disable;
+ };
+ };
+
+@@ -636,6 +638,8 @@ usb1: usb@31100000 {
+ interrupt-names = "host", "peripheral";
+ maximum-speed = "high-speed";
+ dr_mode = "otg";
++ snps,usb2-gadget-lpm-disable;
++ snps,usb2-lpm-disable;
+ };
+ };
+
+@@ -743,9 +747,10 @@ dss: dss@30200000 {
+ <0x00 0x30207000 0x00 0x1000>, /* ovr1 */
+ <0x00 0x30208000 0x00 0x1000>, /* ovr2 */
+ <0x00 0x3020a000 0x00 0x1000>, /* vp1: Used for OLDI */
+- <0x00 0x3020b000 0x00 0x1000>; /* vp2: Used as DPI Out */
++ <0x00 0x3020b000 0x00 0x1000>, /* vp2: Used as DPI Out */
++ <0x00 0x30201000 0x00 0x1000>; /* common1 */
+ reg-names = "common", "vidl1", "vid",
+- "ovr1", "ovr2", "vp1", "vp2";
++ "ovr1", "ovr2", "vp1", "vp2", "common1";
+ power-domains = <&k3_pds 186 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 186 6>,
+ <&dss_vp1_clk>,
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+index 40992e7e4c3084..0a5634ca005dfb 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+@@ -60,7 +60,7 @@ verdin_key_wakeup: key-wakeup {
+
+ memory@80000000 {
+ device_type = "memory";
+- reg = <0x00000000 0x80000000 0x00000000 0x40000000>; /* 1G RAM */
++ reg = <0x00000000 0x80000000 0x00000000 0x80000000>; /* 2G RAM */
+ };
+
+ opp-table {
+@@ -1061,6 +1061,7 @@ dsi_bridge: dsi@e {
+ vddc-supply = <&reg_1v2_dsi>;
+ vddmipi-supply = <&reg_1v2_dsi>;
+ vddio-supply = <&reg_1v8_dsi>;
++ status = "disabled";
+
+ dsi_bridge_ports: ports {
+ #address-cells = <1>;
+@@ -1308,8 +1309,6 @@ &mcasp0 {
+ 0 0 0 0
+ >;
+ tdm-slots = <2>;
+- rx-num-evt = <32>;
+- tx-num-evt = <32>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+@@ -1326,8 +1325,6 @@ &mcasp1 {
+ 0 0 0 0
+ >;
+ tdm-slots = <2>;
+- rx-num-evt = <32>;
+- tx-num-evt = <32>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+index 7cfdf562b53bfe..3560349d630513 100644
+--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
++++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+@@ -58,7 +58,7 @@ reserved-memory {
+
+ ramoops: ramoops@9ca00000 {
+ compatible = "ramoops";
+- reg = <0x00 0x9c700000 0x00 0x00100000>;
++ reg = <0x00 0x9ca00000 0x00 0x00100000>;
+ record-size = <0x8000>;
+ console-size = <0x8000>;
+ ftrace-size = <0x00>;
+@@ -903,6 +903,4 @@ &mcasp1 {
+ 0 0 0 0
+ 0 0 0 0
+ >;
+- tx-num-evt = <32>;
+- rx-num-evt = <32>;
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+index 3198af08fb9fad..de36abb243f104 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+@@ -462,7 +462,7 @@ main_gpio0: gpio@600000 {
+ <193>, <194>, <195>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+- ti,ngpio = <87>;
++ ti,ngpio = <92>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 77 0>;
+@@ -480,7 +480,7 @@ main_gpio1: gpio@601000 {
+ <183>, <184>, <185>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+- ti,ngpio = <88>;
++ ti,ngpio = <52>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 78 0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+index cff283c75f8ecd..99f2878de4c677 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+@@ -250,7 +250,7 @@ &main_i2c1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+- clock-frequency = <400000>;
++ clock-frequency = <100000>;
+
+ exp1: gpio@22 {
+ compatible = "ti,tca6424";
+diff --git a/arch/arm64/boot/dts/ti/k3-am62p.dtsi b/arch/arm64/boot/dts/ti/k3-am62p.dtsi
+index 294ab73ec98b7b..dc0a8e94e9ace0 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62p.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62p.dtsi
+@@ -71,7 +71,7 @@ cbass_main: bus@f0000 {
+ <0x00 0x43600000 0x00 0x43600000 0x00 0x00010000>, /* SA3 sproxy data */
+ <0x00 0x44043000 0x00 0x44043000 0x00 0x00000fe0>, /* TI SCI DEBUG */
+ <0x00 0x44860000 0x00 0x44860000 0x00 0x00040000>, /* SA3 sproxy config */
+- <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>, /* DMSS */
++ <0x00 0x48000000 0x00 0x48000000 0x00 0x06408000>, /* DMSS */
+ <0x00 0x60000000 0x00 0x60000000 0x00 0x08000000>, /* FSS0 DAT1 */
+ <0x00 0x70000000 0x00 0x70000000 0x00 0x00010000>, /* OCSRAM */
+ <0x01 0x00000000 0x01 0x00000000 0x00 0x00310000>, /* A53 PERIPHBASE */
+diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+index 677ff8de4b6ecf..0f8c0f6a0f573e 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+@@ -481,8 +481,6 @@ &mcasp1 {
+ 0 0 0 0
+ 0 0 0 0
+ >;
+- tx-num-evt = <32>;
+- rx-num-evt = <32>;
+ };
+
+ &dss {
+diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+index 0df54a74182474..064eb062bb54a0 100644
+--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+@@ -612,6 +612,10 @@ sdhci0: mmc@fa10000 {
+ ti,otap-del-sel-mmc-hs = <0x0>;
+ ti,otap-del-sel-ddr52 = <0x6>;
+ ti,otap-del-sel-hs200 = <0x7>;
++ ti,itap-del-sel-legacy = <0x10>;
++ ti,itap-del-sel-mmc-hs = <0xa>;
++ ti,itap-del-sel-ddr52 = <0x3>;
++ status = "disabled";
+ };
+
+ sdhci1: mmc@fa00000 {
+@@ -623,13 +627,18 @@ sdhci1: mmc@fa00000 {
+ clock-names = "clk_ahb", "clk_xin";
+ ti,trm-icp = <0x2>;
+ ti,otap-del-sel-legacy = <0x0>;
+- ti,otap-del-sel-sd-hs = <0xf>;
++ ti,otap-del-sel-sd-hs = <0x0>;
+ ti,otap-del-sel-sdr12 = <0xf>;
+ ti,otap-del-sel-sdr25 = <0xf>;
+ ti,otap-del-sel-sdr50 = <0xc>;
+ ti,otap-del-sel-sdr104 = <0x6>;
+ ti,otap-del-sel-ddr50 = <0x9>;
++ ti,itap-del-sel-legacy = <0x0>;
++ ti,itap-del-sel-sd-hs = <0x0>;
++ ti,itap-del-sel-sdr12 = <0x0>;
++ ti,itap-del-sel-sdr25 = <0x0>;
+ ti,clkbuf-sel = <0x7>;
++ status = "disabled";
+ };
+
+ cpsw3g: ethernet@8000000 {
+diff --git a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
+index 1c2c8f0daca9ff..1dcbf1fe7fae47 100644
+--- a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
+@@ -200,6 +200,7 @@ flash@0 {
+ };
+
+ &sdhci0 {
++ status = "okay";
+ bus-width = <8>;
+ non-removable;
+ ti,driver-strength-ohm = <50>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+index b4a1f73d4fb17a..91d726ef7594a3 100644
+--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
++++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+@@ -35,6 +35,7 @@ aliases {
+ };
+
+ memory@80000000 {
++ bootph-all;
+ device_type = "memory";
+ /* 2G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+@@ -108,6 +109,7 @@ rtos_ipc_memory_region: ipc-memories@a5000000 {
+
+ evm_12v0: regulator-0 {
+ /* main DC jack */
++ bootph-all;
+ compatible = "regulator-fixed";
+ regulator-name = "evm_12v0";
+ regulator-min-microvolt = <12000000>;
+@@ -129,6 +131,7 @@ vsys_5v0: regulator-1 {
+
+ vsys_3v3: regulator-2 {
+ /* output of LM5140 */
++ bootph-all;
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_3v3";
+ regulator-min-microvolt = <3300000>;
+@@ -140,6 +143,7 @@ vsys_3v3: regulator-2 {
+
+ vdd_mmc1: regulator-3 {
+ /* TPS2051BD */
++ bootph-all;
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_mmc1";
+ regulator-min-microvolt = <3300000>;
+@@ -161,6 +165,7 @@ vddb: regulator-4 {
+ };
+
+ vtt_supply: regulator-5 {
++ bootph-all;
+ compatible = "regulator-fixed";
+ regulator-name = "vtt";
+ pinctrl-names = "default";
+@@ -251,6 +256,7 @@ AM64X_IOPAD(0x0244, PIN_OUTPUT, 0) /* (E14) UART1_TXD */
+ };
+
+ main_uart0_pins_default: main-uart0-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0238, PIN_INPUT, 0) /* (B16) UART0_CTSn */
+ AM64X_IOPAD(0x023c, PIN_OUTPUT, 0) /* (A16) UART0_RTSn */
+@@ -269,6 +275,7 @@ AM64X_IOPAD(0x0218, PIN_INPUT, 0) /* (A14) SPI0_D1 */
+ };
+
+ main_i2c0_pins_default: main-i2c0-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0260, PIN_INPUT_PULLUP, 0) /* (A18) I2C0_SCL */
+ AM64X_IOPAD(0x0264, PIN_INPUT_PULLUP, 0) /* (B18) I2C0_SDA */
+@@ -276,6 +283,7 @@ AM64X_IOPAD(0x0264, PIN_INPUT_PULLUP, 0) /* (B18) I2C0_SDA */
+ };
+
+ main_i2c1_pins_default: main-i2c1-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0268, PIN_INPUT_PULLUP, 0) /* (C18) I2C1_SCL */
+ AM64X_IOPAD(0x026c, PIN_INPUT_PULLUP, 0) /* (B19) I2C1_SDA */
+@@ -283,6 +291,7 @@ AM64X_IOPAD(0x026c, PIN_INPUT_PULLUP, 0) /* (B19) I2C1_SDA */
+ };
+
+ mdio1_pins_default: mdio1-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x01fc, PIN_OUTPUT, 4) /* (R2) PRG0_PRU1_GPO19.MDIO0_MDC */
+ AM64X_IOPAD(0x01f8, PIN_INPUT, 4) /* (P5) PRG0_PRU1_GPO18.MDIO0_MDIO */
+@@ -290,6 +299,7 @@ AM64X_IOPAD(0x01f8, PIN_INPUT, 4) /* (P5) PRG0_PRU1_GPO18.MDIO0_MDIO */
+ };
+
+ rgmii1_pins_default: rgmii1-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x01cc, PIN_INPUT, 4) /* (W5) PRG0_PRU1_GPO7.RGMII1_RD0 */
+ AM64X_IOPAD(0x01d4, PIN_INPUT, 4) /* (Y5) PRG0_PRU1_GPO9.RGMII1_RD1 */
+@@ -307,6 +317,7 @@ AM64X_IOPAD(0x00dc, PIN_OUTPUT, 4) /* (U15) PRG1_PRU0_GPO9.RGMII1_TX_CTL */
+ };
+
+ rgmii2_pins_default: rgmii2-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0108, PIN_INPUT, 4) /* (W11) PRG1_PRU1_GPO0.RGMII2_RD0 */
+ AM64X_IOPAD(0x010c, PIN_INPUT, 4) /* (V11) PRG1_PRU1_GPO1.RGMII2_RD1 */
+@@ -324,6 +335,7 @@ AM64X_IOPAD(0x0144, PIN_OUTPUT, 4) /* (Y11) PRG1_PRU1_GPO15.RGMII2_TX_CTL */
+ };
+
+ main_usb0_pins_default: main-usb0-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x02a8, PIN_OUTPUT, 0) /* (E19) USB0_DRVVBUS */
+ >;
+@@ -366,6 +378,7 @@ AM64X_IOPAD(0x0258, PIN_OUTPUT, 0) /* (C17) MCAN1_TX */
+ };
+
+ ddr_vtt_pins_default: ddr-vtt-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0030, PIN_OUTPUT_PULLUP, 7) /* (L18) OSPI0_CSN1.GPIO0_12 */
+ >;
+@@ -373,6 +386,7 @@ AM64X_IOPAD(0x0030, PIN_OUTPUT_PULLUP, 7) /* (L18) OSPI0_CSN1.GPIO0_12 */
+ };
+
+ &main_uart0 {
++ bootph-all;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart0_pins_default>;
+@@ -387,6 +401,7 @@ &main_uart1 {
+ };
+
+ &main_i2c0 {
++ bootph-all;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c0_pins_default>;
+@@ -400,12 +415,14 @@ eeprom@50 {
+ };
+
+ &main_i2c1 {
++ bootph-all;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+ clock-frequency = <400000>;
+
+ exp1: gpio@22 {
++ bootph-all;
+ compatible = "ti,tca6424";
+ reg = <0x22>;
+ gpio-controller;
+@@ -438,6 +455,10 @@ display@3c {
+ };
+ };
+
++&main_gpio0 {
++ bootph-all;
++};
++
+ /* mcu_gpio0 is reserved for mcu firmware usage */
+ &mcu_gpio0 {
+ status = "reserved";
+@@ -457,16 +478,19 @@ eeprom@0 {
+ };
+ };
+
++/* eMMC */
+ &sdhci0 {
+- /* emmc */
++ status = "okay";
+ bus-width = <8>;
+ non-removable;
+ ti,driver-strength-ohm = <50>;
+ disable-wp;
+ };
+
++/* SD/MMC */
+ &sdhci1 {
+- /* SD/MMC */
++ bootph-all;
++ status = "okay";
+ vmmc-supply = <&vdd_mmc1>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+@@ -476,11 +500,13 @@ &sdhci1 {
+ };
+
+ &usbss0 {
++ bootph-all;
+ ti,vbus-divider;
+ ti,usb2-only;
+ };
+
+ &usb0 {
++ bootph-all;
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ pinctrl-names = "default";
+@@ -488,11 +514,13 @@ &usb0 {
+ };
+
+ &cpsw3g {
++ bootph-all;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii1_pins_default>, <&rgmii2_pins_default>;
+ };
+
+ &cpsw_port1 {
++ bootph-all;
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&cpsw3g_phy0>;
+ };
+@@ -503,11 +531,13 @@ &cpsw_port2 {
+ };
+
+ &cpsw3g_mdio {
++ bootph-all;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio1_pins_default>;
+
+ cpsw3g_phy0: ethernet-phy@0 {
++ bootph-all;
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+index 9175e96842d821..53b64e55413f99 100644
+--- a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+@@ -264,6 +264,7 @@ &main_uart1 {
+ };
+
+ &sdhci1 {
++ status = "okay";
+ vmmc-supply = <&vcc_3v3_mmc>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mmc1_pins_default>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+index 722fd285a34eca..bffbd234f715ad 100644
+--- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+@@ -34,6 +34,7 @@ aliases {
+ };
+
+ memory@80000000 {
++ bootph-pre-ram;
+ device_type = "memory";
+ /* 2G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+@@ -107,6 +108,7 @@ rtos_ipc_memory_region: ipc-memories@a5000000 {
+
+ vusb_main: regulator-0 {
+ /* USB MAIN INPUT 5V DC */
++ bootph-all;
+ compatible = "regulator-fixed";
+ regulator-name = "vusb_main5v0";
+ regulator-min-microvolt = <5000000>;
+@@ -117,6 +119,7 @@ vusb_main: regulator-0 {
+
+ vcc_3v3_sys: regulator-1 {
+ /* output of LP8733xx */
++ bootph-all;
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_sys";
+ regulator-min-microvolt = <3300000>;
+@@ -128,6 +131,7 @@ vcc_3v3_sys: regulator-1 {
+
+ vdd_mmc1: regulator-2 {
+ /* TPS2051BD */
++ bootph-all;
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_mmc1";
+ regulator-min-microvolt = <3300000>;
+@@ -234,6 +238,7 @@ led-7 {
+
+ &main_pmx0 {
+ main_mmc1_pins_default: main-mmc1-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x029c, PIN_INPUT_PULLUP, 0) /* (C20) MMC1_SDWP */
+ AM64X_IOPAD(0x0298, PIN_INPUT_PULLUP, 0) /* (D19) MMC1_SDCD */
+@@ -248,6 +253,7 @@ AM64X_IOPAD(0x027c, PIN_INPUT_PULLUP, 0) /* (K18) MMC1_DAT3 */
+ };
+
+ main_uart0_pins_default: main-uart0-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0238, PIN_INPUT, 0) /* (B16) UART0_CTSn */
+ AM64X_IOPAD(0x023c, PIN_OUTPUT, 0) /* (A16) UART0_RTSn */
+@@ -257,6 +263,7 @@ AM64X_IOPAD(0x0234, PIN_OUTPUT, 0) /* (C16) UART0_TXD */
+ };
+
+ main_uart1_pins_default: main-uart1-default-pins {
++ bootph-pre-ram;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0248, PIN_INPUT, 0) /* (D16) UART1_CTSn */
+ AM64X_IOPAD(0x024c, PIN_OUTPUT, 0) /* (E16) UART1_RTSn */
+@@ -266,12 +273,14 @@ AM64X_IOPAD(0x0244, PIN_OUTPUT, 0) /* (E14) UART1_TXD */
+ };
+
+ main_usb0_pins_default: main-usb0-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x02a8, PIN_OUTPUT, 0) /* (E19) USB0_DRVVBUS */
+ >;
+ };
+
+ main_i2c0_pins_default: main-i2c0-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0260, PIN_INPUT_PULLUP, 0) /* (A18) I2C0_SCL */
+ AM64X_IOPAD(0x0264, PIN_INPUT_PULLUP, 0) /* (B18) I2C0_SDA */
+@@ -279,6 +288,7 @@ AM64X_IOPAD(0x0264, PIN_INPUT_PULLUP, 0) /* (B18) I2C0_SDA */
+ };
+
+ main_i2c1_pins_default: main-i2c1-default-pins {
++ bootph-all;
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0268, PIN_INPUT_PULLUP, 0) /* (C18) I2C1_SCL */
+ AM64X_IOPAD(0x026c, PIN_INPUT_PULLUP, 0) /* (B19) I2C1_SDA */
+@@ -367,6 +377,7 @@ AM64X_IOPAD(0x00bc, PIN_INPUT, 7) /* (U8) GPIO0_46 */
+ };
+
+ &main_uart0 {
++ bootph-all;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart0_pins_default>;
+@@ -375,12 +386,14 @@ &main_uart0 {
+
+ &main_uart1 {
+ /* main_uart1 is reserved for firmware usage */
++ bootph-pre-ram;
+ status = "reserved";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart1_pins_default>;
+ };
+
+ &main_i2c0 {
++ bootph-all;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c0_pins_default>;
+@@ -393,12 +406,14 @@ eeprom@51 {
+ };
+
+ &main_i2c1 {
++ bootph-all;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+ clock-frequency = <400000>;
+
+ exp1: gpio@70 {
++ bootph-all;
+ compatible = "nxp,pca9538";
+ reg = <0x70>;
+ gpio-controller;
+@@ -424,6 +439,7 @@ &mcu_gpio0 {
+ };
+
+ &sdhci0 {
++ status = "okay";
+ vmmc-supply = <&wlan_en>;
+ bus-width = <4>;
+ non-removable;
+@@ -443,8 +459,10 @@ wlcore: wlcore@2 {
+ };
+ };
+
++/* SD/MMC */
+ &sdhci1 {
+- /* SD/MMC */
++ bootph-all;
++ status = "okay";
+ vmmc-supply = <&vdd_mmc1>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+@@ -454,11 +472,22 @@ &sdhci1 {
+ };
+
+ &serdes_ln_ctrl {
++ bootph-all;
+ idle-states = <AM64_SERDES0_LANE0_USB>;
+ };
+
++&serdes_refclk {
++ bootph-all;
++};
++
++&serdes_wiz0 {
++ bootph-all;
++};
++
+ &serdes0 {
++ bootph-all;
+ serdes0_usb_link: phy@0 {
++ bootph-all;
+ reg = <0>;
+ cdns,num-lanes = <1>;
+ #phy-cells = <0>;
+@@ -468,10 +497,12 @@ serdes0_usb_link: phy@0 {
+ };
+
+ &usbss0 {
++ bootph-all;
+ ti,vbus-divider;
+ };
+
+ &usb0 {
++ bootph-all;
+ dr_mode = "host";
+ maximum-speed = "super-speed";
+ pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
+index 04c15b64f0b776..76ff44e71ec177 100644
+--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
++++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
+@@ -420,7 +420,6 @@ &sdhci1 {
+ ti,driver-strength-ohm = <50>;
+ ti,fails-without-test-cd;
+ /* Enabled by overlay */
+- status = "disabled";
+ };
+
+ &tscadc0 {
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
+index 6229849b5d9682..65dbbff64ed961 100644
+--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
+@@ -207,6 +207,7 @@ partitions {
+ };
+
+ &sdhci0 {
++ status = "okay";
+ non-removable;
+ disable-wp;
+ no-sdio;
+diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
+index ba1c14a54acf48..b849648d51f91b 100644
+--- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
+@@ -14,6 +14,16 @@
+
+ / {
+ aliases {
++ serial0 = &wkup_uart0;
++ serial1 = &mcu_uart0;
++ serial2 = &main_uart0;
++ serial3 = &main_uart1;
++ i2c0 = &wkup_i2c0;
++ i2c1 = &mcu_i2c0;
++ i2c2 = &main_i2c0;
++ i2c3 = &main_i2c1;
++ i2c4 = &main_i2c2;
++ i2c5 = &main_i2c3;
+ spi0 = &mcu_spi0;
+ mmc0 = &sdhci1;
+ mmc1 = &sdhci0;
+diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+index bc460033a37a86..57befcce93b976 100644
+--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+@@ -1013,9 +1013,10 @@ dss: dss@4a00000 {
+ <0x0 0x04a07000 0x0 0x1000>, /* ovr1 */
+ <0x0 0x04a08000 0x0 0x1000>, /* ovr2 */
+ <0x0 0x04a0a000 0x0 0x1000>, /* vp1 */
+- <0x0 0x04a0b000 0x0 0x1000>; /* vp2 */
++ <0x0 0x04a0b000 0x0 0x1000>, /* vp2 */
++ <0x0 0x04a01000 0x0 0x1000>; /* common1 */
+ reg-names = "common", "vidl1", "vid",
+- "ovr1", "ovr2", "vp1", "vp2";
++ "ovr1", "ovr2", "vp1", "vp2", "common1";
+
+ ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
+
+@@ -1034,7 +1035,7 @@ dss: dss@4a00000 {
+ assigned-clocks = <&k3_clks 67 2>;
+ assigned-clock-parents = <&k3_clks 67 5>;
+
+- interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
++ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+
+ dma-coherent;
+
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+index cee2b4b0eb87da..7a0c599f2b1c35 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+@@ -91,24 +91,25 @@ vdd_sd_dv: gpio-regulator-TLV71033 {
+ };
+
+ &wkup_pmx0 {
++};
++
++&wkup_pmx2 {
+ mcu_uart0_pins_default: mcu-uart0-default-pins {
+ pinctrl-single,pins = <
+- J721E_WKUP_IOPAD(0xf4, PIN_INPUT, 0) /* (D20) MCU_UART0_RXD */
+- J721E_WKUP_IOPAD(0xf0, PIN_OUTPUT, 0) /* (D19) MCU_UART0_TXD */
+- J721E_WKUP_IOPAD(0xf8, PIN_INPUT, 0) /* (E20) MCU_UART0_CTSn */
+- J721E_WKUP_IOPAD(0xfc, PIN_OUTPUT, 0) /* (E21) MCU_UART0_RTSn */
++ J721E_WKUP_IOPAD(0x90, PIN_INPUT, 0) /* (E20) MCU_UART0_CTSn */
++ J721E_WKUP_IOPAD(0x94, PIN_OUTPUT, 0) /* (E21) MCU_UART0_RTSn */
++ J721E_WKUP_IOPAD(0x8c, PIN_INPUT, 0) /* (D20) MCU_UART0_RXD */
++ J721E_WKUP_IOPAD(0x88, PIN_OUTPUT, 0) /* (D19) MCU_UART0_TXD */
+ >;
+ };
+
+ wkup_uart0_pins_default: wkup-uart0-default-pins {
+ pinctrl-single,pins = <
+- J721E_WKUP_IOPAD(0xb0, PIN_INPUT, 0) /* (B14) WKUP_UART0_RXD */
+- J721E_WKUP_IOPAD(0xb4, PIN_OUTPUT, 0) /* (A14) WKUP_UART0_TXD */
++ J721E_WKUP_IOPAD(0x48, PIN_INPUT, 0) /* (B14) WKUP_UART0_RXD */
++ J721E_WKUP_IOPAD(0x4c, PIN_OUTPUT, 0) /* (A14) WKUP_UART0_TXD */
+ >;
+ };
+-};
+
+-&wkup_pmx2 {
+ mcu_cpsw_pins_default: mcu-cpsw-default-pins {
+ pinctrl-single,pins = <
+ J721E_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
+@@ -210,7 +211,6 @@ &mcu_uart0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_uart0_pins_default>;
+- clock-frequency = <96000000>;
+ };
+
+ &main_uart0 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
+index 2f954729f35338..7897323376a5b9 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts
+@@ -123,7 +123,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 {
+ no-map;
+ };
+
+- c66_1_dma_memory_region: c66-dma-memory@a6000000 {
++ c66_0_dma_memory_region: c66-dma-memory@a6000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa6000000 0x00 0x100000>;
+ no-map;
+@@ -135,7 +135,7 @@ c66_0_memory_region: c66-memory@a6100000 {
+ no-map;
+ };
+
+- c66_0_dma_memory_region: c66-dma-memory@a7000000 {
++ c66_1_dma_memory_region: c66-dma-memory@a7000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa7000000 0x00 0x100000>;
+ no-map;
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+index 42fe8eee9ec8c7..ccacb65683b5b0 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+@@ -119,7 +119,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 {
+ no-map;
+ };
+
+- c66_1_dma_memory_region: c66-dma-memory@a6000000 {
++ c66_0_dma_memory_region: c66-dma-memory@a6000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa6000000 0x00 0x100000>;
+ no-map;
+@@ -131,7 +131,7 @@ c66_0_memory_region: c66-memory@a6100000 {
+ no-map;
+ };
+
+- c66_0_dma_memory_region: c66-dma-memory@a7000000 {
++ c66_1_dma_memory_region: c66-dma-memory@a7000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0xa7000000 0x00 0x100000>;
+ no-map;
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
+index c6b85bbf9a179b..1ba1f53c72d03b 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
+@@ -190,8 +190,6 @@ J721S2_IOPAD(0x038, PIN_OUTPUT, 0) /* (AB28) MCASP0_ACLKX.MCAN5_TX */
+ &wkup_pmx2 {
+ wkup_uart0_pins_default: wkup-uart0-default-pins {
+ pinctrl-single,pins = <
+- J721S2_WKUP_IOPAD(0x070, PIN_INPUT, 0) /* (E25) WKUP_GPIO0_6.WKUP_UART0_CTSn */
+- J721S2_WKUP_IOPAD(0x074, PIN_OUTPUT, 0) /* (F28) WKUP_GPIO0_7.WKUP_UART0_RTSn */
+ J721S2_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (D28) WKUP_UART0_RXD */
+ J721S2_WKUP_IOPAD(0x04c, PIN_OUTPUT, 0) /* (D27) WKUP_UART0_TXD */
+ >;
+diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+index 2ddad931855416..71324fec415ae2 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+@@ -652,7 +652,7 @@ wkup_vtm0: temperature-sensor@42040000 {
+ compatible = "ti,j7200-vtm";
+ reg = <0x00 0x42040000 0x0 0x350>,
+ <0x00 0x42050000 0x0 0x350>;
+- power-domains = <&k3_pds 154 TI_SCI_PD_SHARED>;
++ power-domains = <&k3_pds 180 TI_SCI_PD_SHARED>;
+ #thermal-sensor-cells = <1>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+index 5991c2e1d994c8..39f99ee39dab99 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+@@ -296,8 +296,6 @@ &wkup_pmx2 {
+ wkup_uart0_pins_default: wkup-uart0-default-pins {
+ bootph-all;
+ pinctrl-single,pins = <
+- J721S2_WKUP_IOPAD(0x070, PIN_INPUT, 0) /* (L37) WKUP_GPIO0_6.WKUP_UART0_CTSn */
+- J721S2_WKUP_IOPAD(0x074, PIN_INPUT, 0) /* (L36) WKUP_GPIO0_7.WKUP_UART0_RTSn */
+ J721S2_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (K35) WKUP_UART0_RXD */
+ J721S2_WKUP_IOPAD(0x04c, PIN_INPUT, 0) /* (K34) WKUP_UART0_TXD */
+ >;
+diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+index 4ab4018d369538..8d26daf7fa3d12 100644
+--- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+@@ -616,7 +616,7 @@ wkup_vtm0: temperature-sensor@42040000 {
+ compatible = "ti,j7200-vtm";
+ reg = <0x00 0x42040000 0x00 0x350>,
+ <0x00 0x42050000 0x00 0x350>;
+- power-domains = <&k3_pds 154 TI_SCI_PD_SHARED>;
++ power-domains = <&k3_pds 243 TI_SCI_PD_SHARED>;
+ #thermal-sensor-cells = <1>;
+ };
+
+diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
+index 5e40c0b4fa0a90..1068b0fa8e9847 100644
+--- a/arch/arm64/boot/dts/xilinx/Makefile
++++ b/arch/arm64/boot/dts/xilinx/Makefile
+@@ -22,11 +22,10 @@ dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-sm-k26-revA.dtb
+ dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-smk-k26-revA.dtb
+
+ zynqmp-sm-k26-revA-sck-kv-g-revA-dtbs := zynqmp-sm-k26-revA.dtb zynqmp-sck-kv-g-revA.dtbo
++dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-sm-k26-revA-sck-kv-g-revA.dtb
+ zynqmp-sm-k26-revA-sck-kv-g-revB-dtbs := zynqmp-sm-k26-revA.dtb zynqmp-sck-kv-g-revB.dtbo
++dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-sm-k26-revA-sck-kv-g-revB.dtb
+ zynqmp-smk-k26-revA-sck-kv-g-revA-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kv-g-revA.dtbo
++dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-smk-k26-revA-sck-kv-g-revA.dtb
+ zynqmp-smk-k26-revA-sck-kv-g-revB-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kv-g-revB.dtbo
+-
+-zynqmp-sm-k26-revA-sck-kr-g-revA-dtbs := zynqmp-sm-k26-revA.dtb zynqmp-sck-kr-g-revA.dtbo
+-zynqmp-sm-k26-revA-sck-kr-g-revB-dtbs := zynqmp-sm-k26-revA.dtb zynqmp-sck-kr-g-revB.dtbo
+-zynqmp-smk-k26-revA-sck-kr-g-revA-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kr-g-revA.dtbo
+-zynqmp-smk-k26-revA-sck-kr-g-revB-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kr-g-revB.dtbo
++dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-smk-k26-revA-sck-kv-g-revB.dtb
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso b/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso
+index ae1b9b2bdbee27..92f4190d564db1 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso
++++ b/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revA.dtso
+@@ -21,57 +21,57 @@
+ /dts-v1/;
+ /plugin/;
+
+-&i2c1 { /* I2C_SCK C23/C24 - MIO from SOM */
+- #address-cells = <1>;
+- #size-cells = <0>;
+- pinctrl-names = "default", "gpio";
+- pinctrl-0 = <&pinctrl_i2c1_default>;
+- pinctrl-1 = <&pinctrl_i2c1_gpio>;
+- scl-gpios = <&gpio 24 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+- sda-gpios = <&gpio 25 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+-
+- /* u14 - 0x40 - ina260 */
+- /* u27 - 0xe0 - STDP4320 DP/HDMI splitter */
+-};
+-
+-&amba {
+- si5332_0: si5332_0 { /* u17 */
++&{/} {
++ si5332_0: si5332-0 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+- si5332_1: si5332_1 { /* u17 */
++ si5332_1: si5332-1 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+- si5332_2: si5332_2 { /* u17 */
++ si5332_2: si5332-2 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ };
+
+- si5332_3: si5332_3 { /* u17 */
++ si5332_3: si5332-3 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+- si5332_4: si5332_4 { /* u17 */
++ si5332_4: si5332-4 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+
+- si5332_5: si5332_5 { /* u17 */
++ si5332_5: si5332-5 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
++&i2c1 { /* I2C_SCK C23/C24 - MIO from SOM */
++ #address-cells = <1>;
++ #size-cells = <0>;
++ pinctrl-names = "default", "gpio";
++ pinctrl-0 = <&pinctrl_i2c1_default>;
++ pinctrl-1 = <&pinctrl_i2c1_gpio>;
++ scl-gpios = <&gpio 24 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
++ sda-gpios = <&gpio 25 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
++
++ /* u14 - 0x40 - ina260 */
++ /* u27 - 0xe0 - STDP4320 DP/HDMI splitter */
++};
++
+ /* DP/USB 3.0 and SATA */
+ &psgtr {
+ status = "okay";
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso b/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso
+index b59e48be6465a5..f88b71f5b07a63 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso
++++ b/arch/arm64/boot/dts/xilinx/zynqmp-sck-kv-g-revB.dtso
+@@ -16,58 +16,58 @@
+ /dts-v1/;
+ /plugin/;
+
+-&i2c1 { /* I2C_SCK C23/C24 - MIO from SOM */
+- #address-cells = <1>;
+- #size-cells = <0>;
+- pinctrl-names = "default", "gpio";
+- pinctrl-0 = <&pinctrl_i2c1_default>;
+- pinctrl-1 = <&pinctrl_i2c1_gpio>;
+- scl-gpios = <&gpio 24 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+- sda-gpios = <&gpio 25 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+-
+- /* u14 - 0x40 - ina260 */
+- /* u43 - 0x2d - usb5744 */
+- /* u27 - 0xe0 - STDP4320 DP/HDMI splitter */
+-};
+-
+-&amba {
+- si5332_0: si5332_0 { /* u17 */
++&{/} {
++ si5332_0: si5332-0 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+- si5332_1: si5332_1 { /* u17 */
++ si5332_1: si5332-1 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+- si5332_2: si5332_2 { /* u17 */
++ si5332_2: si5332-2 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ };
+
+- si5332_3: si5332_3 { /* u17 */
++ si5332_3: si5332-3 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+- si5332_4: si5332_4 { /* u17 */
++ si5332_4: si5332-4 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+
+- si5332_5: si5332_5 { /* u17 */
++ si5332_5: si5332-5 { /* u17 */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+ };
+
++&i2c1 { /* I2C_SCK C23/C24 - MIO from SOM */
++ #address-cells = <1>;
++ #size-cells = <0>;
++ pinctrl-names = "default", "gpio";
++ pinctrl-0 = <&pinctrl_i2c1_default>;
++ pinctrl-1 = <&pinctrl_i2c1_gpio>;
++ scl-gpios = <&gpio 24 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
++ sda-gpios = <&gpio 25 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
++
++ /* u14 - 0x40 - ina260 */
++ /* u43 - 0x2d - usb5744 */
++ /* u27 - 0xe0 - STDP4320 DP/HDMI splitter */
++};
++
+ /* DP/USB 3.0 */
+ &psgtr {
+ status = "okay";
+diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
+index 7399d706967a4f..9b7a09808a3dda 100755
+--- a/arch/arm64/boot/install.sh
++++ b/arch/arm64/boot/install.sh
+@@ -17,7 +17,8 @@
+ # $3 - kernel map file
+ # $4 - default install path (blank if root directory)
+
+-if [ "$(basename $2)" = "Image.gz" ]; then
++if [ "$(basename $2)" = "Image.gz" ] || [ "$(basename $2)" = "vmlinuz.efi" ]
++then
+ # Compressed install
+ echo "Installing compressed kernel"
+ base=vmlinuz
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index a789119e6483b5..60af93c04b45a7 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -623,6 +623,7 @@ CONFIG_GPIO_RCAR=y
+ CONFIG_GPIO_UNIPHIER=y
+ CONFIG_GPIO_VISCONTI=y
+ CONFIG_GPIO_WCD934X=m
++CONFIG_GPIO_VF610=y
+ CONFIG_GPIO_XGENE=y
+ CONFIG_GPIO_XGENE_SB=y
+ CONFIG_GPIO_MAX732X=y
+diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
+index bac4cabef6073e..467ac2f768ac2b 100644
+--- a/arch/arm64/crypto/aes-neonbs-glue.c
++++ b/arch/arm64/crypto/aes-neonbs-glue.c
+@@ -227,8 +227,19 @@ static int ctr_encrypt(struct skcipher_request *req)
+ src += blocks * AES_BLOCK_SIZE;
+ }
+ if (nbytes && walk.nbytes == walk.total) {
++ u8 buf[AES_BLOCK_SIZE];
++ u8 *d = dst;
++
++ if (unlikely(nbytes < AES_BLOCK_SIZE))
++ src = dst = memcpy(buf + sizeof(buf) - nbytes,
++ src, nbytes);
++
+ neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
+ nbytes, walk.iv);
++
++ if (unlikely(nbytes < AES_BLOCK_SIZE))
++ memcpy(d, dst, nbytes);
++
+ nbytes = 0;
+ }
+ kernel_neon_end();
+diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
+index 6792a1f83f2ad4..a407f9cd549edc 100644
+--- a/arch/arm64/include/asm/acpi.h
++++ b/arch/arm64/include/asm/acpi.h
+@@ -119,6 +119,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
+ return acpi_cpu_get_madt_gicc(cpu)->uid;
+ }
+
++static inline int get_cpu_for_acpi_id(u32 uid)
++{
++ int cpu;
++
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
++ if (acpi_cpu_get_madt_gicc(cpu) &&
++ uid == get_acpi_id_for_cpu(cpu))
++ return cpu;
++
++ return -EINVAL;
++}
++
+ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+ void __init acpi_init_cpus(void);
+ int apei_claim_sea(struct pt_regs *regs);
+diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
+index 94b486192e1f15..a3652b6bb740d9 100644
+--- a/arch/arm64/include/asm/alternative-macros.h
++++ b/arch/arm64/include/asm/alternative-macros.h
+@@ -229,7 +229,7 @@ alternative_has_cap_likely(const unsigned long cpucap)
+ compiletime_assert(cpucap < ARM64_NCAPS,
+ "cpucap must be < ARM64_NCAPS");
+
+- asm_volatile_goto(
++ asm goto(
+ ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
+ :
+ : [cpucap] "i" (cpucap)
+@@ -247,7 +247,7 @@ alternative_has_cap_unlikely(const unsigned long cpucap)
+ compiletime_assert(cpucap < ARM64_NCAPS,
+ "cpucap must be < ARM64_NCAPS");
+
+- asm_volatile_goto(
++ asm goto(
+ ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
+ :
+ : [cpucap] "i" (cpucap)
+diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
+index 18dc2fb3d7b7b2..c27404fa4418ad 100644
+--- a/arch/arm64/include/asm/arm_pmuv3.h
++++ b/arch/arm64/include/asm/arm_pmuv3.h
+@@ -46,12 +46,12 @@ static inline u32 read_pmuver(void)
+ ID_AA64DFR0_EL1_PMUVer_SHIFT);
+ }
+
+-static inline void write_pmcr(u32 val)
++static inline void write_pmcr(u64 val)
+ {
+ write_sysreg(val, pmcr_el0);
+ }
+
+-static inline u32 read_pmcr(void)
++static inline u64 read_pmcr(void)
+ {
+ return read_sysreg(pmcr_el0);
+ }
+@@ -71,21 +71,6 @@ static inline u64 read_pmccntr(void)
+ return read_sysreg(pmccntr_el0);
+ }
+
+-static inline void write_pmxevcntr(u32 val)
+-{
+- write_sysreg(val, pmxevcntr_el0);
+-}
+-
+-static inline u32 read_pmxevcntr(void)
+-{
+- return read_sysreg(pmxevcntr_el0);
+-}
+-
+-static inline void write_pmxevtyper(u32 val)
+-{
+- write_sysreg(val, pmxevtyper_el0);
+-}
+-
+ static inline void write_pmcntenset(u32 val)
+ {
+ write_sysreg(val, pmcntenset_el0);
+@@ -106,7 +91,7 @@ static inline void write_pmintenclr(u32 val)
+ write_sysreg(val, pmintenclr_el1);
+ }
+
+-static inline void write_pmccfiltr(u32 val)
++static inline void write_pmccfiltr(u64 val)
+ {
+ write_sysreg(val, pmccfiltr_el0);
+ }
+@@ -126,12 +111,12 @@ static inline void write_pmuserenr(u32 val)
+ write_sysreg(val, pmuserenr_el0);
+ }
+
+-static inline u32 read_pmceid0(void)
++static inline u64 read_pmceid0(void)
+ {
+ return read_sysreg(pmceid0_el0);
+ }
+
+-static inline u32 read_pmceid1(void)
++static inline u64 read_pmceid1(void)
+ {
+ return read_sysreg(pmceid1_el0);
+ }
+diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
+index c762038ba40093..6e73809f6492a2 100644
+--- a/arch/arm64/include/asm/asm-bug.h
++++ b/arch/arm64/include/asm/asm-bug.h
+@@ -28,6 +28,7 @@
+ 14470: .long 14471f - .; \
+ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
+ .short flags; \
++ .align 2; \
+ .popsection; \
+ 14471:
+ #else
+diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
+index cf2987464c1860..1ca947d5c93963 100644
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -40,6 +40,10 @@
+ */
+ #define dgh() asm volatile("hint #6" : : : "memory")
+
++#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
++ SB_BARRIER_INSN"nop\n", \
++ ARM64_HAS_SB))
++
+ #ifdef CONFIG_ARM64_PSEUDO_NMI
+ #define pmr_sync() \
+ do { \
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 74d00feb62f03e..488f8e75134959 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -61,6 +61,7 @@
+ #define ARM_CPU_IMP_HISI 0x48
+ #define ARM_CPU_IMP_APPLE 0x61
+ #define ARM_CPU_IMP_AMPERE 0xC0
++#define ARM_CPU_IMP_MICROSOFT 0x6D
+
+ #define ARM_CPU_PART_AEM_V8 0xD0F
+ #define ARM_CPU_PART_FOUNDATION 0xD00
+@@ -85,8 +86,18 @@
+ #define ARM_CPU_PART_CORTEX_X2 0xD48
+ #define ARM_CPU_PART_NEOVERSE_N2 0xD49
+ #define ARM_CPU_PART_CORTEX_A78C 0xD4B
+-
+-#define APM_CPU_PART_POTENZA 0x000
++#define ARM_CPU_PART_CORTEX_X1C 0xD4C
++#define ARM_CPU_PART_CORTEX_X3 0xD4E
++#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
++#define ARM_CPU_PART_CORTEX_A720 0xD81
++#define ARM_CPU_PART_CORTEX_X4 0xD82
++#define ARM_CPU_PART_NEOVERSE_V3 0xD84
++#define ARM_CPU_PART_CORTEX_X925 0xD85
++#define ARM_CPU_PART_CORTEX_A725 0xD87
++#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
++
++#define APM_CPU_PART_XGENE 0x000
++#define APM_CPU_VAR_POTENZA 0x00
+
+ #define CAVIUM_CPU_PART_THUNDERX 0x0A1
+ #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
+@@ -133,6 +144,9 @@
+ #define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039
+
+ #define AMPERE_CPU_PART_AMPERE1 0xAC3
++#define AMPERE_CPU_PART_AMPERE1A 0xAC4
++
++#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
+
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+@@ -155,6 +169,15 @@
+ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+ #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+ #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
++#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
++#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
++#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
++#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
++#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
++#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
++#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
++#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
++#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+@@ -192,6 +215,8 @@
+ #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
+ #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
+ #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
++#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A)
++#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
+
+ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
+diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
+index ae35939f395bb1..1cdae1b4f03bee 100644
+--- a/arch/arm64/include/asm/esr.h
++++ b/arch/arm64/include/asm/esr.h
+@@ -10,63 +10,63 @@
+ #include <asm/memory.h>
+ #include <asm/sysreg.h>
+
+-#define ESR_ELx_EC_UNKNOWN (0x00)
+-#define ESR_ELx_EC_WFx (0x01)
++#define ESR_ELx_EC_UNKNOWN UL(0x00)
++#define ESR_ELx_EC_WFx UL(0x01)
+ /* Unallocated EC: 0x02 */
+-#define ESR_ELx_EC_CP15_32 (0x03)
+-#define ESR_ELx_EC_CP15_64 (0x04)
+-#define ESR_ELx_EC_CP14_MR (0x05)
+-#define ESR_ELx_EC_CP14_LS (0x06)
+-#define ESR_ELx_EC_FP_ASIMD (0x07)
+-#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
+-#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
++#define ESR_ELx_EC_CP15_32 UL(0x03)
++#define ESR_ELx_EC_CP15_64 UL(0x04)
++#define ESR_ELx_EC_CP14_MR UL(0x05)
++#define ESR_ELx_EC_CP14_LS UL(0x06)
++#define ESR_ELx_EC_FP_ASIMD UL(0x07)
++#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
++#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
+ /* Unallocated EC: 0x0A - 0x0B */
+-#define ESR_ELx_EC_CP14_64 (0x0C)
+-#define ESR_ELx_EC_BTI (0x0D)
+-#define ESR_ELx_EC_ILL (0x0E)
++#define ESR_ELx_EC_CP14_64 UL(0x0C)
++#define ESR_ELx_EC_BTI UL(0x0D)
++#define ESR_ELx_EC_ILL UL(0x0E)
+ /* Unallocated EC: 0x0F - 0x10 */
+-#define ESR_ELx_EC_SVC32 (0x11)
+-#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
+-#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
++#define ESR_ELx_EC_SVC32 UL(0x11)
++#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */
++#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */
+ /* Unallocated EC: 0x14 */
+-#define ESR_ELx_EC_SVC64 (0x15)
+-#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
+-#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
+-#define ESR_ELx_EC_SYS64 (0x18)
+-#define ESR_ELx_EC_SVE (0x19)
+-#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
++#define ESR_ELx_EC_SVC64 UL(0x15)
++#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */
++#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */
++#define ESR_ELx_EC_SYS64 UL(0x18)
++#define ESR_ELx_EC_SVE UL(0x19)
++#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */
+ /* Unallocated EC: 0x1B */
+-#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
+-#define ESR_ELx_EC_SME (0x1D)
++#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */
++#define ESR_ELx_EC_SME UL(0x1D)
+ /* Unallocated EC: 0x1E */
+-#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
+-#define ESR_ELx_EC_IABT_LOW (0x20)
+-#define ESR_ELx_EC_IABT_CUR (0x21)
+-#define ESR_ELx_EC_PC_ALIGN (0x22)
++#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */
++#define ESR_ELx_EC_IABT_LOW UL(0x20)
++#define ESR_ELx_EC_IABT_CUR UL(0x21)
++#define ESR_ELx_EC_PC_ALIGN UL(0x22)
+ /* Unallocated EC: 0x23 */
+-#define ESR_ELx_EC_DABT_LOW (0x24)
+-#define ESR_ELx_EC_DABT_CUR (0x25)
+-#define ESR_ELx_EC_SP_ALIGN (0x26)
+-#define ESR_ELx_EC_MOPS (0x27)
+-#define ESR_ELx_EC_FP_EXC32 (0x28)
++#define ESR_ELx_EC_DABT_LOW UL(0x24)
++#define ESR_ELx_EC_DABT_CUR UL(0x25)
++#define ESR_ELx_EC_SP_ALIGN UL(0x26)
++#define ESR_ELx_EC_MOPS UL(0x27)
++#define ESR_ELx_EC_FP_EXC32 UL(0x28)
+ /* Unallocated EC: 0x29 - 0x2B */
+-#define ESR_ELx_EC_FP_EXC64 (0x2C)
++#define ESR_ELx_EC_FP_EXC64 UL(0x2C)
+ /* Unallocated EC: 0x2D - 0x2E */
+-#define ESR_ELx_EC_SERROR (0x2F)
+-#define ESR_ELx_EC_BREAKPT_LOW (0x30)
+-#define ESR_ELx_EC_BREAKPT_CUR (0x31)
+-#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
+-#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
+-#define ESR_ELx_EC_WATCHPT_LOW (0x34)
+-#define ESR_ELx_EC_WATCHPT_CUR (0x35)
++#define ESR_ELx_EC_SERROR UL(0x2F)
++#define ESR_ELx_EC_BREAKPT_LOW UL(0x30)
++#define ESR_ELx_EC_BREAKPT_CUR UL(0x31)
++#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32)
++#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33)
++#define ESR_ELx_EC_WATCHPT_LOW UL(0x34)
++#define ESR_ELx_EC_WATCHPT_CUR UL(0x35)
+ /* Unallocated EC: 0x36 - 0x37 */
+-#define ESR_ELx_EC_BKPT32 (0x38)
++#define ESR_ELx_EC_BKPT32 UL(0x38)
+ /* Unallocated EC: 0x39 */
+-#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
++#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */
+ /* Unallocated EC: 0x3B */
+-#define ESR_ELx_EC_BRK64 (0x3C)
++#define ESR_ELx_EC_BRK64 UL(0x3C)
+ /* Unallocated EC: 0x3D - 0x3F */
+-#define ESR_ELx_EC_MAX (0x3F)
++#define ESR_ELx_EC_MAX UL(0x3F)
+
+ #define ESR_ELx_EC_SHIFT (26)
+ #define ESR_ELx_EC_WIDTH (6)
+diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
+index 8df46f186c64b8..7415c63b41874d 100644
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -36,13 +36,13 @@
+ * When we defined the maximum SVE vector length we defined the ABI so
+ * that the maximum vector length included all the reserved for future
+ * expansion bits in ZCR rather than those just currently defined by
+- * the architecture. While SME follows a similar pattern the fact that
+- * it includes a square matrix means that any allocations that attempt
+- * to cover the maximum potential vector length (such as happen with
+- * the regset used for ptrace) end up being extremely large. Define
+- * the much lower actual limit for use in such situations.
++ * the architecture. Using this length to allocate worst size buffers
++ * results in excessively large allocations, and this effect is even
++ * more pronounced for SME due to ZA. Define more suitable VLs for
++ * these situations.
+ */
+-#define SME_VQ_MAX 16
++#define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1)
++#define SME_VQ_MAX ((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1)
+
+ struct task_struct;
+
+@@ -360,6 +360,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
+ extern unsigned int sme_get_vl(void);
+ extern int sme_set_current_vl(unsigned long arg);
+ extern int sme_get_current_vl(void);
++extern void sme_suspend_exit(void);
+
+ /*
+ * Return how many bytes of memory are required to store the full SME
+@@ -395,6 +396,7 @@ static inline int sme_max_vl(void) { return 0; }
+ static inline int sme_max_virtualisable_vl(void) { return 0; }
+ static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
+ static inline int sme_get_current_vl(void) { return -EINVAL; }
++static inline void sme_suspend_exit(void) { }
+
+ static inline size_t sme_state_size(struct task_struct const *task)
+ {
+diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
+index 81bbfa3a035bd2..a1020285ea7504 100644
+--- a/arch/arm64/include/asm/irq_work.h
++++ b/arch/arm64/include/asm/irq_work.h
+@@ -2,8 +2,6 @@
+ #ifndef __ASM_IRQ_WORK_H
+ #define __ASM_IRQ_WORK_H
+
+-extern void arch_irq_work_raise(void);
+-
+ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ return true;
+diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
+index 48ddc0f45d2283..4b99159150829b 100644
+--- a/arch/arm64/include/asm/jump_label.h
++++ b/arch/arm64/include/asm/jump_label.h
+@@ -13,12 +13,13 @@
+ #include <linux/types.h>
+ #include <asm/insn.h>
+
++#define HAVE_JUMP_LABEL_BATCH
+ #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
+
+ static __always_inline bool arch_static_branch(struct static_key * const key,
+ const bool branch)
+ {
+- asm_volatile_goto(
++ asm goto(
+ "1: nop \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align 3 \n\t"
+@@ -35,7 +36,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
+ static __always_inline bool arch_static_branch_jump(struct static_key * const key,
+ const bool branch)
+ {
+- asm_volatile_goto(
++ asm goto(
+ "1: b %l[l_yes] \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align 3 \n\t"
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 7f7d9b1df4e5ad..07bdf5dd8ebef5 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -826,6 +826,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
+
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
++ /*
++ * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
++ * dirtiness again.
++ */
++ if (pte_sw_dirty(pte))
++ pte = pte_mkdirty(pte);
+ return pte;
+ }
+
+diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
+index f4af547ef54caa..2e4d7da74fb87a 100644
+--- a/arch/arm64/include/asm/setup.h
++++ b/arch/arm64/include/asm/setup.h
+@@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
+ extern bool rodata_enabled;
+ extern bool rodata_full;
+
+- if (arg && !strcmp(arg, "full")) {
++ if (!arg)
++ return false;
++
++ if (!strcmp(arg, "full")) {
++ rodata_enabled = rodata_full = true;
++ return true;
++ }
++
++ if (!strcmp(arg, "off")) {
++ rodata_enabled = rodata_full = false;
++ return true;
++ }
++
++ if (!strcmp(arg, "on")) {
+ rodata_enabled = true;
+- rodata_full = true;
++ rodata_full = false;
+ return true;
+ }
+
+diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
+index 17f687510c4851..7a0e7b59be9b9f 100644
+--- a/arch/arm64/include/asm/syscall_wrapper.h
++++ b/arch/arm64/include/asm/syscall_wrapper.h
+@@ -44,9 +44,6 @@
+ return sys_ni_syscall(); \
+ }
+
+-#define COMPAT_SYS_NI(name) \
+- SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
+-
+ #endif /* CONFIG_COMPAT */
+
+ #define __SYSCALL_DEFINEx(x, name, ...) \
+@@ -82,6 +79,5 @@
+ }
+
+ asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused);
+-#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
+
+ #endif /* __ASM_SYSCALL_WRAPPER_H */
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
+index b149cf9f91bc96..b73baaf8ae47be 100644
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -152,12 +152,18 @@ static inline unsigned long get_trans_granule(void)
+ #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
+
+ /*
+- * Generate 'num' values from -1 to 30 with -1 rejected by the
+- * __flush_tlb_range() loop below.
++ * Generate 'num' values from -1 to 31 with -1 rejected by the
++ * __flush_tlb_range() loop below. Its return value is only
++ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
++ * 'pages' is more than that, you must iterate over the overall
++ * range.
+ */
+-#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
+-#define __TLBI_RANGE_NUM(pages, scale) \
+- ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
++#define __TLBI_RANGE_NUM(pages, scale) \
++ ({ \
++ int __pages = min((pages), \
++ __TLBI_RANGE_PAGES(31, (scale))); \
++ (__pages >> (5 * (scale) + 1)) - 1; \
++ })
+
+ /*
+ * TLB Invalidation
+@@ -351,29 +357,25 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+ * entries one by one at the granularity of 'stride'. If the TLB
+ * range ops are supported, then:
+ *
+- * 1. If 'pages' is odd, flush the first page through non-range
+- * operations;
+- *
+- * 2. For remaining pages: the minimum range granularity is decided
+- * by 'scale', so multiple range TLBI operations may be required.
+- * Start from scale = 0, flush the corresponding number of pages
+- * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
+- * until no pages left.
++ * 1. The minimum range granularity is decided by 'scale', so multiple range
++ * TLBI operations may be required. Start from scale = 3, flush the largest
++ * possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
++ * requested range, then decrement scale and continue until one or zero pages
++ * are left.
+ *
+- * Note that certain ranges can be represented by either num = 31 and
+- * scale or num = 0 and scale + 1. The loop below favours the latter
+- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
++ * 2. If there is 1 page remaining, flush it through non-range operations. Range
++ * operations can only span an even number of pages.
+ */
+ #define __flush_tlb_range_op(op, start, pages, stride, \
+ asid, tlb_level, tlbi_user) \
+ do { \
+ int num = 0; \
+- int scale = 0; \
++ int scale = 3; \
+ unsigned long addr; \
+ \
+ while (pages > 0) { \
+ if (!system_supports_tlb_range() || \
+- pages % 2 == 1) { \
++ pages == 1) { \
+ addr = __TLBI_VADDR(start, asid); \
+ __tlbi_level(op, addr, tlb_level); \
+ if (tlbi_user) \
+@@ -393,7 +395,7 @@ do { \
+ start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
+ pages -= __TLBI_RANGE_PAGES(num, scale); \
+ } \
+- scale++; \
++ scale--; \
+ } \
+ } while (0)
+
+diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
+index 78b68311ec8192..545a4a7b5371ce 100644
+--- a/arch/arm64/include/asm/unistd32.h
++++ b/arch/arm64/include/asm/unistd32.h
+@@ -840,7 +840,7 @@ __SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
+ #define __NR_ppoll_time64 414
+ __SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
+ #define __NR_io_pgetevents_time64 416
+-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
++__SYSCALL(__NR_io_pgetevents_time64, compat_sys_io_pgetevents_time64)
+ #define __NR_recvmmsg_time64 417
+ __SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
+ #define __NR_mq_timedsend_time64 418
+diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h
+index 2b09495499c618..014b02897f8e22 100644
+--- a/arch/arm64/include/asm/uprobes.h
++++ b/arch/arm64/include/asm/uprobes.h
+@@ -10,11 +10,9 @@
+ #include <asm/insn.h>
+ #include <asm/probes.h>
+
+-#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
+-
+ #define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
+ #define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
+-#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
++#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
+
+ typedef __le32 uprobe_opcode_t;
+
+@@ -23,8 +21,8 @@ struct arch_uprobe_task {
+
+ struct arch_uprobe {
+ union {
+- u8 insn[MAX_UINSN_BYTES];
+- u8 ixol[MAX_UINSN_BYTES];
++ __le32 insn;
++ __le32 ixol;
+ };
+ struct arch_probe_insn api;
+ bool simulate;
+diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
+index f23c1dc3f002fe..8f003db7a6967a 100644
+--- a/arch/arm64/include/uapi/asm/sigcontext.h
++++ b/arch/arm64/include/uapi/asm/sigcontext.h
+@@ -312,10 +312,10 @@ struct zt_context {
+ ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
+ / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
+
+-#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
++#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
+
+ #define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
+- (SVE_SIG_ZREG_SIZE(vq) * n))
++ (SVE_SIG_ZREG_SIZE(vq) * (n)))
+
+ #define ZA_SIG_CONTEXT_SIZE(vq) \
+ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
+@@ -326,7 +326,7 @@ struct zt_context {
+
+ #define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
+
+-#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n)
++#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
+
+ #define ZT_SIG_CONTEXT_SIZE(n) \
+ (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n))
+diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
+index e51535a5f939a9..2465f291c7e17c 100644
+--- a/arch/arm64/kernel/acpi_numa.c
++++ b/arch/arm64/kernel/acpi_numa.c
+@@ -27,24 +27,13 @@
+
+ #include <asm/numa.h>
+
+-static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
++static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
+
+ int __init acpi_numa_get_nid(unsigned int cpu)
+ {
+ return acpi_early_node_map[cpu];
+ }
+
+-static inline int get_cpu_for_acpi_id(u32 uid)
+-{
+- int cpu;
+-
+- for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+- if (uid == get_acpi_id_for_cpu(cpu))
+- return cpu;
+-
+- return -EINVAL;
+-}
+-
+ static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
+ const unsigned long end)
+ {
+diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
+index e459cfd3371171..d6b711e56df972 100644
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -464,6 +464,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu)
+ for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
+ struct insn_emulation *insn = insn_emulations[i];
+ bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
++ if (insn->status == INSN_UNAVAILABLE)
++ continue;
++
+ if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
+ pr_warn("CPU[%u] cannot support the emulation of %s",
+ cpu, insn->name);
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 5706e74c55786a..463b48d0f92500 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -390,6 +390,7 @@ static const struct midr_range erratum_1463225[] = {
+ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_2139208
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_2119858
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+@@ -403,6 +404,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
+ static const struct midr_range tsb_flush_fail_cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_2067961
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_2054223
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+@@ -415,6 +417,7 @@ static const struct midr_range tsb_flush_fail_cpus[] = {
+ static struct midr_range trbe_write_out_of_range_cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_2253138
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_2224489
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+@@ -432,6 +435,54 @@ static struct midr_range broken_aarch32_aes[] = {
+ };
+ #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
+
++#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++static const struct midr_range erratum_spec_unpriv_load_list[] = {
++#ifdef CONFIG_ARM64_ERRATUM_3117295
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_2966298
++ /* Cortex-A520 r0p0 to r0p1 */
++ MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
++#endif
++ {},
++};
++#endif
++
++#ifdef CONFIG_ARM64_ERRATUM_3194386
++static const struct midr_range erratum_spec_ssbs_list[] = {
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
++ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
++ {}
++};
++#endif
++
++#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
++static const struct midr_range erratum_ac03_cpu_38_list[] = {
++ MIDR_ALL_VERSIONS(MIDR_AMPERE1),
++ MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
++ {},
++};
++#endif
++
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+ #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+ {
+@@ -730,19 +781,26 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ .cpu_enable = cpu_clear_bf16_from_user_emulation,
+ },
+ #endif
+-#ifdef CONFIG_ARM64_ERRATUM_2966298
++#ifdef CONFIG_ARM64_ERRATUM_3194386
++ {
++ .desc = "SSBS not fully self-synchronizing",
++ .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
++ ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
++ },
++#endif
++#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ {
+- .desc = "ARM erratum 2966298",
+- .capability = ARM64_WORKAROUND_2966298,
++ .desc = "ARM errata 2966298, 3117295",
++ .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
+ /* Cortex-A520 r0p0 - r0p1 */
+- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
++ ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
+ },
+ #endif
+ #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
+ {
+ .desc = "AmpereOne erratum AC03_CPU_38",
+ .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
+- ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
++ ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
+ },
+ #endif
+ {
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 444a73c2e63858..7e96604559004b 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2190,6 +2190,17 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ }
+ #endif /* CONFIG_ARM64_MTE */
+
++static void user_feature_fixup(void)
++{
++ if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) {
++ struct arm64_ftr_reg *regp;
++
++ regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1);
++ if (regp)
++ regp->user_mask &= ~ID_AA64PFR1_EL1_SSBS_MASK;
++ }
++}
++
+ static void elf_hwcap_fixup(void)
+ {
+ #ifdef CONFIG_ARM64_ERRATUM_1742098
+@@ -3345,6 +3356,7 @@ void __init setup_cpu_features(void)
+ u32 cwg;
+
+ setup_system_capabilities();
++ user_feature_fixup();
+ setup_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (system_supports_32bit_el0()) {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index a6030913cd58c4..7fcbee0f6c0e4e 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -428,16 +428,9 @@ alternative_else_nop_endif
+ ldp x28, x29, [sp, #16 * 14]
+
+ .if \el == 0
+-alternative_if ARM64_WORKAROUND_2966298
+- tlbi vale1, xzr
+- dsb nsh
+-alternative_else_nop_endif
+-alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+- ldr lr, [sp, #S_LR]
+- add sp, sp, #PT_REGS_SIZE // restore sp
+- eret
+-alternative_else_nop_endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++ alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0
++
+ msr far_el1, x29
+
+ ldr_this_cpu x30, this_cpu_vector, x29
+@@ -446,7 +439,18 @@ alternative_else_nop_endif
+ ldr lr, [sp, #S_LR] // restore x30
+ add sp, sp, #PT_REGS_SIZE // restore sp
+ br x29
++
++.L_skip_tramp_exit_\@:
+ #endif
++ ldr lr, [sp, #S_LR]
++ add sp, sp, #PT_REGS_SIZE // restore sp
++
++ /* This must be after the last explicit memory access */
++alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
++ tlbi vale1, xzr
++ dsb nsh
++alternative_else_nop_endif
++ eret
+ .else
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #PT_REGS_SIZE // restore sp
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 91e44ac7150f90..5cdfcc9e3e54b9 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1280,8 +1280,10 @@ void fpsimd_release_task(struct task_struct *dead_task)
+ */
+ void sme_alloc(struct task_struct *task, bool flush)
+ {
+- if (task->thread.sme_state && flush) {
+- memset(task->thread.sme_state, 0, sme_state_size(task));
++ if (task->thread.sme_state) {
++ if (flush)
++ memset(task->thread.sme_state, 0,
++ sme_state_size(task));
+ return;
+ }
+
+@@ -1404,6 +1406,22 @@ void __init sme_setup(void)
+ get_sme_default_vl());
+ }
+
++void sme_suspend_exit(void)
++{
++ u64 smcr = 0;
++
++ if (!system_supports_sme())
++ return;
++
++ if (system_supports_fa64())
++ smcr |= SMCR_ELx_FA64;
++ if (system_supports_sme2())
++ smcr |= SMCR_ELx_EZT0;
++
++ write_sysreg_s(smcr, SYS_SMCR_EL1);
++ write_sysreg_s(0, SYS_SMPRI_EL1);
++}
++
+ #endif /* CONFIG_ARM64_SME */
+
+ static void sve_init_regs(void)
+@@ -1684,7 +1702,7 @@ void fpsimd_preserve_current_state(void)
+ void fpsimd_signal_preserve_current_state(void)
+ {
+ fpsimd_preserve_current_state();
+- if (test_thread_flag(TIF_SVE))
++ if (current->thread.fp_type == FP_STATE_SVE)
+ sve_to_fpsimd(current);
+ }
+
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 7b236994f0e150..6517bf2644a08b 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -569,6 +569,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
+ adr_l x1, __hyp_text_end
+ adr_l x2, dcache_clean_poc
+ blr x2
++
++ mov_q x0, INIT_SCTLR_EL2_MMU_OFF
++ pre_disable_mmu_workaround
++ msr sctlr_el2, x0
++ isb
+ 0:
+ mov_q x0, HCR_HOST_NVHE_FLAGS
+ msr hcr_el2, x0
+diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
+index 6ad5c6ef532962..85087e2df56498 100644
+--- a/arch/arm64/kernel/irq.c
++++ b/arch/arm64/kernel/irq.c
+@@ -22,6 +22,7 @@
+ #include <linux/vmalloc.h>
+ #include <asm/daifflags.h>
+ #include <asm/exception.h>
++#include <asm/numa.h>
+ #include <asm/softirq_stack.h>
+ #include <asm/stacktrace.h>
+ #include <asm/vmap_stack.h>
+@@ -47,17 +48,17 @@ static void init_irq_scs(void)
+
+ for_each_possible_cpu(cpu)
+ per_cpu(irq_shadow_call_stack_ptr, cpu) =
+- scs_alloc(cpu_to_node(cpu));
++ scs_alloc(early_cpu_to_node(cpu));
+ }
+
+ #ifdef CONFIG_VMAP_STACK
+-static void init_irq_stacks(void)
++static void __init init_irq_stacks(void)
+ {
+ int cpu;
+ unsigned long *p;
+
+ for_each_possible_cpu(cpu) {
+- p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
++ p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, early_cpu_to_node(cpu));
+ per_cpu(irq_stack_ptr, cpu) = p;
+ }
+ }
+diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
+index faf88ec9c48e8a..f63ea915d6ad25 100644
+--- a/arch/arm64/kernel/jump_label.c
++++ b/arch/arm64/kernel/jump_label.c
+@@ -7,11 +7,12 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/jump_label.h>
++#include <linux/smp.h>
+ #include <asm/insn.h>
+ #include <asm/patching.h>
+
+-void arch_jump_label_transform(struct jump_entry *entry,
+- enum jump_label_type type)
++bool arch_jump_label_transform_queue(struct jump_entry *entry,
++ enum jump_label_type type)
+ {
+ void *addr = (void *)jump_entry_code(entry);
+ u32 insn;
+@@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
+ }
+
+ aarch64_insn_patch_text_nosync(addr, insn);
++ return true;
++}
++
++void arch_jump_label_transform_apply(void)
++{
++ kick_all_cpus_sync();
+ }
+diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
+index bd69a4e7cd6055..79200f21e12393 100644
+--- a/arch/arm64/kernel/module-plts.c
++++ b/arch/arm64/kernel/module-plts.c
+@@ -167,9 +167,6 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
+ switch (ELF64_R_TYPE(rela[i].r_info)) {
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+- break;
+-
+ /*
+ * We only have to consider branch targets that resolve
+ * to symbols that are defined in a different section.
+@@ -269,9 +266,6 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
+ {
+ int i = 0, j = numrels - 1;
+
+- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+- return 0;
+-
+ while (i < j) {
+ if (branch_rela_needs_plt(syms, &rela[i], dstidx))
+ i++;
+diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
+index 968d5fffe23302..3496d6169e59b2 100644
+--- a/arch/arm64/kernel/probes/decode-insn.c
++++ b/arch/arm64/kernel/probes/decode-insn.c
+@@ -99,10 +99,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
+ aarch64_insn_is_blr(insn) ||
+ aarch64_insn_is_ret(insn)) {
+ api->handler = simulate_br_blr_ret;
+- } else if (aarch64_insn_is_ldr_lit(insn)) {
+- api->handler = simulate_ldr_literal;
+- } else if (aarch64_insn_is_ldrsw_lit(insn)) {
+- api->handler = simulate_ldrsw_literal;
+ } else {
+ /*
+ * Instruction cannot be stepped out-of-line and we don't
+@@ -140,6 +136,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
+ probe_opcode_t insn = le32_to_cpu(*addr);
+ probe_opcode_t *scan_end = NULL;
+ unsigned long size = 0, offset = 0;
++ struct arch_probe_insn *api = &asi->api;
++
++ if (aarch64_insn_is_ldr_lit(insn)) {
++ api->handler = simulate_ldr_literal;
++ decoded = INSN_GOOD_NO_SLOT;
++ } else if (aarch64_insn_is_ldrsw_lit(insn)) {
++ api->handler = simulate_ldrsw_literal;
++ decoded = INSN_GOOD_NO_SLOT;
++ } else {
++ decoded = arm_probe_decode_insn(insn, &asi->api);
++ }
+
+ /*
+ * If there's a symbol defined in front of and near enough to
+@@ -157,7 +164,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
+ else
+ scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
+ }
+- decoded = arm_probe_decode_insn(insn, &asi->api);
+
+ if (decoded != INSN_REJECTED && scan_end)
+ if (is_probed_address_atomic(addr - 1, scan_end))
+diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
+index 22d0b32524763e..b65334ab79d2b0 100644
+--- a/arch/arm64/kernel/probes/simulate-insn.c
++++ b/arch/arm64/kernel/probes/simulate-insn.c
+@@ -171,17 +171,15 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
+ void __kprobes
+ simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
+ {
+- u64 *load_addr;
++ unsigned long load_addr;
+ int xn = opcode & 0x1f;
+- int disp;
+
+- disp = ldr_displacement(opcode);
+- load_addr = (u64 *) (addr + disp);
++ load_addr = addr + ldr_displacement(opcode);
+
+ if (opcode & (1 << 30)) /* x0-x30 */
+- set_x_reg(regs, xn, *load_addr);
++ set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr));
+ else /* w0-w30 */
+- set_w_reg(regs, xn, *load_addr);
++ set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr));
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+ }
+@@ -189,14 +187,12 @@ simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
+ void __kprobes
+ simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
+ {
+- s32 *load_addr;
++ unsigned long load_addr;
+ int xn = opcode & 0x1f;
+- int disp;
+
+- disp = ldr_displacement(opcode);
+- load_addr = (s32 *) (addr + disp);
++ load_addr = addr + ldr_displacement(opcode);
+
+- set_x_reg(regs, xn, *load_addr);
++ set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr));
+
+ instruction_pointer_set(regs, instruction_pointer(regs) + 4);
+ }
+diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
+index d49aef2657cdf7..a2f137a595fc1c 100644
+--- a/arch/arm64/kernel/probes/uprobes.c
++++ b/arch/arm64/kernel/probes/uprobes.c
+@@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+ return -EINVAL;
+
+- insn = *(probe_opcode_t *)(&auprobe->insn[0]);
++ insn = le32_to_cpu(auprobe->insn);
+
+ switch (arm_probe_decode_insn(insn, &auprobe->api)) {
+ case INSN_REJECTED:
+@@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ if (!auprobe->simulate)
+ return false;
+
+- insn = *(probe_opcode_t *)(&auprobe->insn[0]);
++ insn = le32_to_cpu(auprobe->insn);
+ addr = instruction_pointer(regs);
+
+ if (auprobe->api.handler)
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index 05f40c4e18fda2..57503dc4b22faf 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -558,6 +558,18 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
+
+ /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
+ set_pstate_ssbs(0);
++
++ /*
++ * SSBS is self-synchronizing and is intended to affect subsequent
++ * speculative instructions, but some CPUs can speculate with a stale
++ * value of SSBS.
++ *
++ * Mitigate this with an unconditional speculation barrier, as CPUs
++ * could mis-speculate branches and bypass a conditional barrier.
++ */
++ if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
++ spec_bar();
++
+ return SPECTRE_MITIGATED;
+ }
+
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 20d7ef82de90aa..d95416b93a9dd5 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -728,7 +728,6 @@ static void sve_init_header_from_task(struct user_sve_header *header,
+ {
+ unsigned int vq;
+ bool active;
+- bool fpsimd_only;
+ enum vec_type task_type;
+
+ memset(header, 0, sizeof(*header));
+@@ -744,12 +743,10 @@ static void sve_init_header_from_task(struct user_sve_header *header,
+ case ARM64_VEC_SVE:
+ if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+- fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
+ break;
+ case ARM64_VEC_SME:
+ if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+- fpsimd_only = false;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+@@ -757,7 +754,7 @@ static void sve_init_header_from_task(struct user_sve_header *header,
+ }
+
+ if (active) {
+- if (fpsimd_only) {
++ if (target->thread.fp_type == FP_STATE_FPSIMD) {
+ header->flags |= SVE_PT_REGS_FPSIMD;
+ } else {
+ header->flags |= SVE_PT_REGS_SVE;
+@@ -1107,12 +1104,13 @@ static int za_set(struct task_struct *target,
+ }
+ }
+
+- /* Allocate/reinit ZA storage */
+- sme_alloc(target, true);
+- if (!target->thread.sme_state) {
+- ret = -ENOMEM;
+- goto out;
+- }
++ /*
++ * Only flush the storage if PSTATE.ZA was not already set,
++ * otherwise preserve any existing data.
++ */
++ sme_alloc(target, !thread_za_enabled(&target->thread));
++ if (!target->thread.sme_state)
++ return -ENOMEM;
+
+ /* If there is no data then disable ZA */
+ if (!count) {
+@@ -1498,7 +1496,8 @@ static const struct user_regset aarch64_regsets[] = {
+ #ifdef CONFIG_ARM64_SVE
+ [REGSET_SVE] = { /* Scalable Vector Extension */
+ .core_note_type = NT_ARM_SVE,
+- .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
++ .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
++ SVE_PT_REGS_SVE),
+ SVE_VQ_BYTES),
+ .size = SVE_VQ_BYTES,
+ .align = SVE_VQ_BYTES,
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index 417a8a86b2db59..c583d1f335f8c7 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -371,9 +371,6 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
+ smp_init_cpus();
+ smp_build_mpidr_hash();
+
+- /* Init percpu seeds for random tags after cpus are set up. */
+- kasan_init_sw_tags();
+-
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ /*
+ * Make sure init_thread_info.ttbr0 always generates translation
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index 0e8beb3349ea2a..425b1bc17a3f6d 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -242,7 +242,7 @@ static int preserve_sve_context(struct sve_context __user *ctx)
+ vl = task_get_sme_vl(current);
+ vq = sve_vq_from_vl(vl);
+ flags |= SVE_SIG_FLAG_SM;
+- } else if (test_thread_flag(TIF_SVE)) {
++ } else if (current->thread.fp_type == FP_STATE_SVE) {
+ vq = sve_vq_from_vl(vl);
+ }
+
+@@ -878,7 +878,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
+ if (system_supports_sve() || system_supports_sme()) {
+ unsigned int vq = 0;
+
+- if (add_all || test_thread_flag(TIF_SVE) ||
++ if (add_all || current->thread.fp_type == FP_STATE_SVE ||
+ thread_sm_enabled(&current->thread)) {
+ int vl = max(sve_max_vl(), sme_max_vl());
+
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 960b98b43506dd..14365ef8424402 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -459,6 +459,8 @@ void __init smp_prepare_boot_cpu(void)
+ init_gic_priority_masking();
+
+ kasan_init_hw_tags();
++ /* Init percpu seeds for random tags after cpus are set up. */
++ kasan_init_sw_tags();
+ }
+
+ /*
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index 0fbdf5fe64d8da..045af2bfd656a0 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -12,6 +12,7 @@
+ #include <asm/daifflags.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/exec.h>
++#include <asm/fpsimd.h>
+ #include <asm/mte.h>
+ #include <asm/memory.h>
+ #include <asm/mmu_context.h>
+@@ -80,6 +81,8 @@ void notrace __cpu_suspend_exit(void)
+ */
+ spectre_v4_enable_mitigation(NULL);
+
++ sme_suspend_exit();
++
+ /* Restore additional feature-specific configuration */
+ ptrauth_suspend_exit();
+ }
+diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
+index 9a70d9746b661b..f090e39f69bc4a 100644
+--- a/arch/arm64/kernel/syscall.c
++++ b/arch/arm64/kernel/syscall.c
+@@ -56,17 +56,15 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
+ syscall_set_return_value(current, regs, 0, ret);
+
+ /*
+- * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+- * but not enough for arm64 stack utilization comfort. To keep
+- * reasonable stack head room, reduce the maximum offset to 9 bits.
++ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
++ * bits. The actual entropy will be further reduced by the compiler
++ * when applying stack alignment constraints: the AAPCS mandates a
++ * 16-byte aligned SP at function boundaries, which will remove the
++ * 4 low bits from any entropy chosen here.
+ *
+- * The actual entropy will be further reduced by the compiler when
+- * applying stack alignment constraints: the AAPCS mandates a
+- * 16-byte (i.e. 4-bit) aligned SP at function boundaries.
+- *
+- * The resulting 5 bits of entropy is seen in SP[8:4].
++ * The resulting 6 bits of entropy is seen in SP[9:4].
+ */
+- choose_random_kstack_offset(get_random_u16() & 0x1FF);
++ choose_random_kstack_offset(get_random_u16());
+ }
+
+ static inline bool has_syscall_work(unsigned long flags)
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index fe7a53c6781f15..8818287f10955c 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -78,13 +78,3 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ # Actual build commands
+ quiet_cmd_vdsold_and_vdso_check = LD $@
+ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
+-
+-# Install commands for the unstripped file
+-quiet_cmd_vdso_install = INSTALL $@
+- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+-
+-vdso.so: $(obj)/vdso.so.dbg
+- @mkdir -p $(MODLIB)/vdso
+- $(call cmd,vdso_install)
+-
+-vdso_install: vdso.so
+diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
+index 2f73e5bca213f8..1f911a76c5af39 100644
+--- a/arch/arm64/kernel/vdso32/Makefile
++++ b/arch/arm64/kernel/vdso32/Makefile
+@@ -172,13 +172,3 @@ gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
+ quiet_cmd_vdsosym = VDSOSYM $@
+ # The AArch64 nm should be able to read an AArch32 binary
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+-
+-# Install commands for the unstripped file
+-quiet_cmd_vdso_install = INSTALL32 $@
+- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
+-
+-vdso.so: $(obj)/vdso.so.dbg
+- @mkdir -p $(MODLIB)/vdso
+- $(call cmd,vdso_install)
+-
+-vdso_install: vdso.so
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 4866b3f7b4ea38..685cc436146a5a 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -407,7 +407,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_timer_vcpu_terminate(vcpu);
+ kvm_pmu_vcpu_destroy(vcpu);
+-
++ kvm_vgic_vcpu_destroy(vcpu);
+ kvm_arm_vcpu_destroy(vcpu);
+ }
+
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 95f6945c443252..efe82cc86bd1f3 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ case PSR_AA32_MODE_SVC:
+ case PSR_AA32_MODE_ABT:
+ case PSR_AA32_MODE_UND:
++ case PSR_AA32_MODE_SYS:
+ if (!vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
+ break;
+@@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
+ int i, nr_reg;
+
+- switch (*vcpu_cpsr(vcpu)) {
++ switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
+ /*
+ * Either we are dealing with user mode, and only the
+ * first 15 registers (+ PC) must be narrowed to 32bit.
+@@ -874,7 +875,7 @@ u32 __attribute_const__ kvm_target_cpu(void)
+ break;
+ case ARM_CPU_IMP_APM:
+ switch (part_number) {
+- case APM_CPU_PART_POTENZA:
++ case APM_CPU_PART_XGENE:
+ return KVM_ARM_TARGET_XGENE_POTENZA;
+ }
+ break;
+diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
+index f98cbe2626a1cb..19efb41aab805d 100644
+--- a/arch/arm64/kvm/hyp/aarch32.c
++++ b/arch/arm64/kvm/hyp/aarch32.c
+@@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+ u32 cpsr_cond;
+ int cond;
+
+- /* Top two bits non-zero? Unconditional. */
+- if (kvm_vcpu_get_esr(vcpu) >> 30)
++ /*
++ * These are the exception classes that could fire with a
++ * conditional instruction.
++ */
++ switch (kvm_vcpu_trap_get_class(vcpu)) {
++ case ESR_ELx_EC_CP15_32:
++ case ESR_ELx_EC_CP15_64:
++ case ESR_ELx_EC_CP14_MR:
++ case ESR_ELx_EC_CP14_LS:
++ case ESR_ELx_EC_FP_ASIMD:
++ case ESR_ELx_EC_CP10_ID:
++ case ESR_ELx_EC_CP14_64:
++ case ESR_ELx_EC_SVC32:
++ break;
++ default:
+ return true;
++ }
+
+ /* Is condition field valid? */
+ cond = kvm_vcpu_get_condition(vcpu);
+diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+index fe5472a184a37d..97c527ef53c2ad 100644
+--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
++++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+@@ -16,7 +16,7 @@ struct hyp_pool {
+ * API at EL2.
+ */
+ hyp_spinlock_t lock;
+- struct list_head free_area[MAX_ORDER + 1];
++ struct list_head free_area[NR_PAGE_ORDERS];
+ phys_addr_t range_start;
+ phys_addr_t range_end;
+ unsigned short max_order;
+diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
+index 6e4dba9eadef52..8d21ab904f1a98 100644
+--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
++++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
+@@ -415,9 +415,9 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
+ return;
+ }
+
+-static __always_inline void do_ffa_mem_xfer(const u64 func_id,
+- struct arm_smccc_res *res,
+- struct kvm_cpu_context *ctxt)
++static void __do_ffa_mem_xfer(const u64 func_id,
++ struct arm_smccc_res *res,
++ struct kvm_cpu_context *ctxt)
+ {
+ DECLARE_REG(u32, len, ctxt, 1);
+ DECLARE_REG(u32, fraglen, ctxt, 2);
+@@ -428,9 +428,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
+ u32 offset, nr_ranges;
+ int ret = 0;
+
+- BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
+- func_id != FFA_FN64_MEM_LEND);
+-
+ if (addr_mbz || npages_mbz || fraglen > len ||
+ fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+@@ -449,6 +446,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
+ goto out_unlock;
+ }
+
++ if (len > ffa_desc_buf.len) {
++ ret = FFA_RET_NO_MEMORY;
++ goto out_unlock;
++ }
++
+ buf = hyp_buffers.tx;
+ memcpy(buf, host_buffers.tx, fraglen);
+
+@@ -498,6 +500,13 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
+ goto out_unlock;
+ }
+
++#define do_ffa_mem_xfer(fid, res, ctxt) \
++ do { \
++ BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
++ (fid) != FFA_FN64_MEM_LEND); \
++ __do_ffa_mem_xfer((fid), (res), (ctxt)); \
++ } while (0);
++
+ static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+ {
+diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
+index f155b8c9e98c7f..ca0bf0b92ca09e 100644
+--- a/arch/arm64/kvm/hyp/pgtable.c
++++ b/arch/arm64/kvm/hyp/pgtable.c
+@@ -523,7 +523,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
+
+ kvm_clear_pte(ctx->ptep);
+ dsb(ishst);
+- __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
++ __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), 0);
+ } else {
+ if (ctx->end - ctx->addr < granule)
+ return -EINVAL;
+@@ -805,12 +805,15 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
+ * Perform the appropriate TLB invalidation based on the
+ * evicted pte value (if any).
+ */
+- if (kvm_pte_table(ctx->old, ctx->level))
+- kvm_tlb_flush_vmid_range(mmu, ctx->addr,
+- kvm_granule_size(ctx->level));
+- else if (kvm_pte_valid(ctx->old))
++ if (kvm_pte_table(ctx->old, ctx->level)) {
++ u64 size = kvm_granule_size(ctx->level);
++ u64 addr = ALIGN_DOWN(ctx->addr, size);
++
++ kvm_tlb_flush_vmid_range(mmu, addr, size);
++ } else if (kvm_pte_valid(ctx->old)) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
+ ctx->addr, ctx->level);
++ }
+ }
+
+ if (stage2_pte_is_counted(ctx->old))
+@@ -858,9 +861,13 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
+ if (kvm_pte_valid(ctx->old)) {
+ kvm_clear_pte(ctx->ptep);
+
+- if (!stage2_unmap_defer_tlb_flush(pgt))
+- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
+- ctx->addr, ctx->level);
++ if (kvm_pte_table(ctx->old, ctx->level)) {
++ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
++ 0);
++ } else if (!stage2_unmap_defer_tlb_flush(pgt)) {
++ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
++ ctx->level);
++ }
+ }
+
+ mm_ops->put_page(ctx->ptep);
+diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
+index 6ff3ec18c92584..b2c8084cdb95de 100644
+--- a/arch/arm64/kvm/pkvm.c
++++ b/arch/arm64/kvm/pkvm.c
+@@ -101,6 +101,17 @@ void __init kvm_hyp_reserve(void)
+ hyp_mem_base);
+ }
+
++static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
++{
++ if (host_kvm->arch.pkvm.handle) {
++ WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
++ host_kvm->arch.pkvm.handle));
++ }
++
++ host_kvm->arch.pkvm.handle = 0;
++ free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
++}
++
+ /*
+ * Allocates and donates memory for hypervisor VM structs at EL2.
+ *
+@@ -181,7 +192,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
+ return 0;
+
+ destroy_vm:
+- pkvm_destroy_hyp_vm(host_kvm);
++ __pkvm_destroy_hyp_vm(host_kvm);
+ return ret;
+ free_vm:
+ free_pages_exact(hyp_vm, hyp_vm_sz);
+@@ -194,23 +205,19 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
+ {
+ int ret = 0;
+
+- mutex_lock(&host_kvm->lock);
++ mutex_lock(&host_kvm->arch.config_lock);
+ if (!host_kvm->arch.pkvm.handle)
+ ret = __pkvm_create_hyp_vm(host_kvm);
+- mutex_unlock(&host_kvm->lock);
++ mutex_unlock(&host_kvm->arch.config_lock);
+
+ return ret;
+ }
+
+ void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
+ {
+- if (host_kvm->arch.pkvm.handle) {
+- WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
+- host_kvm->arch.pkvm.handle));
+- }
+-
+- host_kvm->arch.pkvm.handle = 0;
+- free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
++ mutex_lock(&host_kvm->arch.config_lock);
++ __pkvm_destroy_hyp_vm(host_kvm);
++ mutex_unlock(&host_kvm->arch.config_lock);
+ }
+
+ int pkvm_init_host_vm(struct kvm *host_kvm)
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 0afd6136e2759c..b233a64df2956a 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -32,6 +32,7 @@
+ #include <trace/events/kvm.h>
+
+ #include "sys_regs.h"
++#include "vgic/vgic.h"
+
+ #include "trace.h"
+
+@@ -301,6 +302,11 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
+ {
+ bool g1;
+
++ if (!kvm_has_gicv3(vcpu->kvm)) {
++ kvm_inject_undefined(vcpu);
++ return false;
++ }
++
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p, r);
+
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index c8c3cb81278321..a2b439ad387c80 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -355,7 +355,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
+
+ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+ list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
+- vgic_v3_free_redist_region(rdreg);
++ vgic_v3_free_redist_region(kvm, rdreg);
+ INIT_LIST_HEAD(&dist->rd_regions);
+ } else {
+ dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
+@@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
+ vgic_v4_teardown(kvm);
+ }
+
+-void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
++static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+@@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ vgic_flush_pending_lpis(vcpu);
+
+ INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+- vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
++ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
++ vgic_unregister_redist_iodev(vcpu);
++ vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
++ }
+ }
+
+-static void __kvm_vgic_destroy(struct kvm *kvm)
++void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
++{
++ struct kvm *kvm = vcpu->kvm;
++
++ mutex_lock(&kvm->slots_lock);
++ __kvm_vgic_vcpu_destroy(vcpu);
++ mutex_unlock(&kvm->slots_lock);
++}
++
++void kvm_vgic_destroy(struct kvm *kvm)
+ {
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+- lockdep_assert_held(&kvm->arch.config_lock);
++ mutex_lock(&kvm->slots_lock);
+
+ vgic_debug_destroy(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+- kvm_vgic_vcpu_destroy(vcpu);
++ __kvm_vgic_vcpu_destroy(vcpu);
++
++ mutex_lock(&kvm->arch.config_lock);
+
+ kvm_vgic_dist_destroy(kvm);
+-}
+
+-void kvm_vgic_destroy(struct kvm *kvm)
+-{
+- mutex_lock(&kvm->arch.config_lock);
+- __kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->arch.config_lock);
++ mutex_unlock(&kvm->slots_lock);
+ }
+
+ /**
+@@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ type = VGIC_V3;
+ }
+
+- if (ret) {
+- __kvm_vgic_destroy(kvm);
++ if (ret)
+ goto out;
+- }
++
+ dist->ready = true;
+ dist_base = dist->vgic_dist_base;
+ mutex_unlock(&kvm->arch.config_lock);
+
+ ret = vgic_register_dist_iodev(kvm, dist_base, type);
+- if (ret) {
++ if (ret)
+ kvm_err("Unable to register VGIC dist MMIO regions\n");
+- kvm_vgic_destroy(kvm);
+- }
+- mutex_unlock(&kvm->slots_lock);
+- return ret;
+
++ goto out_slots;
+ out:
+ mutex_unlock(&kvm->arch.config_lock);
++out_slots:
+ mutex_unlock(&kvm->slots_lock);
++
++ if (ret)
++ kvm_vgic_destroy(kvm);
++
+ return ret;
+ }
+
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index 5fe2365a629f25..4f9084ba7949c0 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -462,6 +462,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
+ }
+
+ irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
++ if (!irq)
++ continue;
++
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ irq->pending_latch = pendmask & (1U << bit_nr);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+@@ -584,7 +587,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
++
+ irq = __vgic_its_check_cache(dist, db, devid, eventid);
++ if (irq)
++ vgic_get_irq_kref(irq);
++
+ raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+
+ return irq;
+@@ -763,6 +770,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ irq->pending_latch = true;
+ vgic_queue_irq_unlock(kvm, irq, flags);
++ vgic_put_irq(kvm, irq);
+
+ return 0;
+ }
+@@ -1422,6 +1430,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
+
+ for (i = 0; i < irq_count; i++) {
+ irq = vgic_get_irq(kvm, NULL, intids[i]);
++ if (!irq)
++ continue;
+
+ update_affinity(irq, vcpu2);
+
+diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+index 212b73a715c1c2..2f9e8c611f6421 100644
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -337,16 +337,12 @@ int kvm_register_vgic_device(unsigned long type)
+ int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
+ struct vgic_reg_attr *reg_attr)
+ {
+- int cpuid;
++ int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
+
+- cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+- KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+-
+- if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
+- return -EINVAL;
+-
+- reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
++ reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
++ if (!reg_attr->vcpu)
++ return -EINVAL;
+
+ return 0;
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 188d2187eede93..48e8b60ff1e338 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -365,19 +365,26 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+- if (test_bit(i, &val)) {
+- /*
+- * pending_latch is set irrespective of irq type
+- * (level or edge) to avoid dependency that VM should
+- * restore irq config before pending info.
+- */
+- irq->pending_latch = true;
+- vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+- } else {
++
++ /*
++ * pending_latch is set irrespective of irq type
++ * (level or edge) to avoid dependency that VM should
++ * restore irq config before pending info.
++ */
++ irq->pending_latch = test_bit(i, &val);
++
++ if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
++ irq_set_irqchip_state(irq->host_irq,
++ IRQCHIP_STATE_PENDING,
++ irq->pending_latch);
+ irq->pending_latch = false;
+- raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
+
++ if (irq->pending_latch)
++ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
++ else
++ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
++
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+
+@@ -820,7 +827,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
+ return ret;
+ }
+
+-static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
++void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
+
+@@ -935,8 +942,19 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
+ return ret;
+ }
+
+-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
++void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
+ {
++ struct kvm_vcpu *vcpu;
++ unsigned long c;
++
++ lockdep_assert_held(&kvm->arch.config_lock);
++
++ /* Garbage collect the region */
++ kvm_for_each_vcpu(c, vcpu, kvm) {
++ if (vcpu->arch.vgic_cpu.rdreg == rdreg)
++ vcpu->arch.vgic_cpu.rdreg = NULL;
++ }
++
+ list_del(&rdreg->list);
+ kfree(rdreg);
+ }
+@@ -961,7 +979,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
+
+ mutex_lock(&kvm->arch.config_lock);
+ rdreg = vgic_v3_rdist_region_from_index(kvm, index);
+- vgic_v3_free_redist_region(rdreg);
++ vgic_v3_free_redist_region(kvm, rdreg);
+ mutex_unlock(&kvm->arch.config_lock);
+ return ret;
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index 0ab09b0d44404b..07e48f8a4f23b3 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -241,6 +241,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
+ int vgic_v3_save_pending_tables(struct kvm *kvm);
+ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
+ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
++void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
+ bool vgic_v3_check_base(struct kvm *kvm);
+
+ void vgic_v3_load(struct kvm_vcpu *vcpu);
+@@ -309,7 +310,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
+
+ struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
+ u32 index);
+-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
++void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
+
+ bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
+
+@@ -342,4 +343,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm);
+ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
+ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
+
++static inline bool kvm_has_gicv3(struct kvm *kvm)
++{
++ return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
++ irqchip_in_kernel(kvm) &&
++ kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
++}
++
+ #endif
+diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
+index 8e2017ba5f1b11..0a62f458c5cb02 100644
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -29,8 +29,8 @@ bool can_set_direct_map(void)
+ *
+ * KFENCE pool requires page-granular mapping if initialized late.
+ */
+- return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
+- arm64_kfence_can_set_direct_map();
++ return rodata_full || debug_pagealloc_enabled() ||
++ arm64_kfence_can_set_direct_map();
+ }
+
+ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+@@ -105,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
+ * If we are manipulating read-only permissions, apply the same
+ * change to the linear mapping of the pages that back this VM area.
+ */
+- if (rodata_enabled &&
+- rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
++ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
+ pgprot_val(clear_mask) == PTE_RDONLY)) {
+ for (i = 0; i < area->nr_pages; i++) {
+ __change_memory_common((u64)page_address(area->pages[i]),
+@@ -220,9 +219,6 @@ bool kernel_page_present(struct page *page)
+ pte_t *ptep;
+ unsigned long addr = (unsigned long)page_address(page);
+
+- if (!can_set_direct_map())
+- return true;
+-
+ pgdp = pgd_offset_k(addr);
+ if (pgd_none(READ_ONCE(*pgdp)))
+ return false;
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 150d1c6543f7f1..166619348b98e4 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -876,7 +876,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ emit(A64_UXTH(is64, dst, dst), ctx);
+ break;
+ case 32:
+- emit(A64_REV32(is64, dst, dst), ctx);
++ emit(A64_REV32(0, dst, dst), ctx);
+ /* upper 32 bits already cleared */
+ break;
+ case 64:
+@@ -1189,7 +1189,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ if (sign_extend)
+- emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
++ emit(A64_LDRSW(dst, src, tmp), ctx);
+ else
+ emit(A64_LDR32(dst, src, tmp), ctx);
+ }
+@@ -1738,15 +1738,15 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
+
+ emit_call(enter_prog, ctx);
+
++ /* save return value to callee saved register x20 */
++ emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
++
+ /* if (__bpf_prog_enter(prog) == 0)
+ * goto skip_exec_of_prog;
+ */
+ branch = ctx->image + ctx->idx;
+ emit(A64_NOP, ctx);
+
+- /* save return value to callee saved register x20 */
+- emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
+-
+ emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
+ if (!p->jited)
+ emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
+diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
+index dea3dc89234b02..c251ef3caae560 100644
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -84,7 +84,6 @@ WORKAROUND_2077057
+ WORKAROUND_2457168
+ WORKAROUND_2645198
+ WORKAROUND_2658417
+-WORKAROUND_2966298
+ WORKAROUND_AMPERE_AC03_CPU_38
+ WORKAROUND_TRBE_OVERWRITE_FILL_MODE
+ WORKAROUND_TSB_FLUSH_FAILURE
+@@ -100,3 +99,5 @@ WORKAROUND_NVIDIA_CARMEL_CNP
+ WORKAROUND_QCOM_FALKOR_E1003
+ WORKAROUND_REPEAT_TLBI
+ WORKAROUND_SPECULATIVE_AT
++WORKAROUND_SPECULATIVE_SSBS
++WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h
+index 908d8b0bc4fdc6..d011a81575d21e 100644
+--- a/arch/csky/abiv1/inc/abi/cacheflush.h
++++ b/arch/csky/abiv1/inc/abi/cacheflush.h
+@@ -43,6 +43,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
+ */
+ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+ #define flush_cache_vmap(start, end) cache_wbinv_all()
++#define flush_cache_vmap_early(start, end) do { } while (0)
+ #define flush_cache_vunmap(start, end) cache_wbinv_all()
+
+ #define flush_icache_range(start, end) cache_wbinv_range(start, end)
+diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
+index 40be16907267d6..6513ac5d257888 100644
+--- a/arch/csky/abiv2/inc/abi/cacheflush.h
++++ b/arch/csky/abiv2/inc/abi/cacheflush.h
+@@ -41,6 +41,7 @@ void flush_icache_mm_range(struct mm_struct *mm,
+ void flush_icache_deferred(struct mm_struct *mm);
+
+ #define flush_cache_vmap(start, end) do { } while (0)
++#define flush_cache_vmap_early(start, end) do { } while (0)
+ #define flush_cache_vunmap(start, end) do { } while (0)
+
+ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+diff --git a/arch/csky/include/asm/irq_work.h b/arch/csky/include/asm/irq_work.h
+index 33aaf39d6f94f6..d39fcc1f5395f6 100644
+--- a/arch/csky/include/asm/irq_work.h
++++ b/arch/csky/include/asm/irq_work.h
+@@ -7,5 +7,5 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ return true;
+ }
+-extern void arch_irq_work_raise(void);
++
+ #endif /* __ASM_CSKY_IRQ_WORK_H */
+diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h
+index d488ba6084bc6b..ef2e37a10a0feb 100644
+--- a/arch/csky/include/asm/jump_label.h
++++ b/arch/csky/include/asm/jump_label.h
+@@ -12,7 +12,7 @@
+ static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+ {
+- asm_volatile_goto(
++ asm goto(
+ "1: nop32 \n"
+ " .pushsection __jump_table, \"aw\" \n"
+ " .align 2 \n"
+@@ -29,7 +29,7 @@ static __always_inline bool arch_static_branch(struct static_key *key,
+ static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+ {
+- asm_volatile_goto(
++ asm goto(
+ "1: bsr32 %l[label] \n"
+ " .pushsection __jump_table, \"aw\" \n"
+ " .align 2 \n"
+@@ -43,5 +43,10 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ return true;
+ }
+
++enum jump_label_type;
++void arch_jump_label_transform_static(struct jump_entry *entry,
++ enum jump_label_type type);
++#define arch_jump_label_transform_static arch_jump_label_transform_static
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ASM_CSKY_JUMP_LABEL_H */
+diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
+index 7ff6a2466af10d..e0594b6370a658 100644
+--- a/arch/csky/include/uapi/asm/unistd.h
++++ b/arch/csky/include/uapi/asm/unistd.h
+@@ -6,6 +6,7 @@
+ #define __ARCH_WANT_SYS_CLONE3
+ #define __ARCH_WANT_SET_GET_RLIMIT
+ #define __ARCH_WANT_TIME32_SYSCALLS
++#define __ARCH_WANT_SYNC_FILE_RANGE2
+ #include <asm-generic/unistd.h>
+
+ #define __NR_set_thread_area (__NR_arch_specific_syscall + 0)
+diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
+index 834cffcfbce320..7ba4b98076de1e 100644
+--- a/arch/csky/kernel/probes/ftrace.c
++++ b/arch/csky/kernel/probes/ftrace.c
+@@ -12,6 +12,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct kprobe_ctlblk *kcb;
+ struct pt_regs *regs;
+
++ if (unlikely(kprobe_ftrace_disabled))
++ return;
++
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+diff --git a/arch/hexagon/include/asm/syscalls.h b/arch/hexagon/include/asm/syscalls.h
+new file mode 100644
+index 00000000000000..40f2d08bec92cc
+--- /dev/null
++++ b/arch/hexagon/include/asm/syscalls.h
+@@ -0,0 +1,6 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#include <asm-generic/syscalls.h>
++
++asmlinkage long sys_hexagon_fadvise64_64(int fd, int advice,
++ u32 a2, u32 a3, u32 a4, u32 a5);
+diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
+index 432c4db1b62392..21ae22306b5dce 100644
+--- a/arch/hexagon/include/uapi/asm/unistd.h
++++ b/arch/hexagon/include/uapi/asm/unistd.h
+@@ -36,5 +36,6 @@
+ #define __ARCH_WANT_SYS_VFORK
+ #define __ARCH_WANT_SYS_FORK
+ #define __ARCH_WANT_TIME32_SYSCALLS
++#define __ARCH_WANT_SYNC_FILE_RANGE2
+
+ #include <asm-generic/unistd.h>
+diff --git a/arch/hexagon/kernel/syscalltab.c b/arch/hexagon/kernel/syscalltab.c
+index 0fadd582cfc77f..5d98bdc494ec29 100644
+--- a/arch/hexagon/kernel/syscalltab.c
++++ b/arch/hexagon/kernel/syscalltab.c
+@@ -14,6 +14,13 @@
+ #undef __SYSCALL
+ #define __SYSCALL(nr, call) [nr] = (call),
+
++SYSCALL_DEFINE6(hexagon_fadvise64_64, int, fd, int, advice,
++ SC_ARG64(offset), SC_ARG64(len))
++{
++ return ksys_fadvise64_64(fd, SC_VAL64(loff_t, offset), SC_VAL64(loff_t, len), advice);
++}
++#define sys_fadvise64_64 sys_hexagon_fadvise64_64
++
+ void *sys_call_table[__NR_syscalls] = {
+ #include <asm/unistd.h>
+ };
+diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
+index 1140051a0c455d..1150b77fa281ce 100644
+--- a/arch/hexagon/kernel/vmlinux.lds.S
++++ b/arch/hexagon/kernel/vmlinux.lds.S
+@@ -63,6 +63,7 @@ SECTIONS
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
++ .hexagon.attributes 0 : { *(.hexagon.attributes) }
+
+ DISCARDS
+ }
+diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
+index 5a55ac82c13a47..d2c66efdde560e 100644
+--- a/arch/ia64/kernel/setup.c
++++ b/arch/ia64/kernel/setup.c
+@@ -86,9 +86,13 @@ EXPORT_SYMBOL(local_per_cpu_offset);
+ #endif
+ unsigned long ia64_cycles_per_usec;
+ struct ia64_boot_param *ia64_boot_param;
++#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_EFI)
+ struct screen_info screen_info;
++#endif
++#ifdef CONFIG_VGA_CONSOLE
+ unsigned long vga_console_iobase;
+ unsigned long vga_console_membase;
++#endif
+
+ static struct resource data_resource = {
+ .name = "Kernel data",
+@@ -497,6 +501,7 @@ early_console_setup (char *cmdline)
+ static void __init
+ screen_info_setup(void)
+ {
++#ifdef CONFIG_VGA_CONSOLE
+ unsigned int orig_x, orig_y, num_cols, num_rows, font_height;
+
+ memset(&screen_info, 0, sizeof(screen_info));
+@@ -525,6 +530,7 @@ screen_info_setup(void)
+ screen_info.orig_video_mode = 3; /* XXX fake */
+ screen_info.orig_video_isVGA = 1; /* XXX fake */
+ screen_info.orig_video_ega_bx = 3; /* XXX fake */
++#endif
+ }
+
+ static inline void
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index e14396a2ddcbfc..9fd8644a9a4c6a 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -11,6 +11,7 @@ config LOONGARCH
+ select ARCH_DISABLE_KASAN_INLINE
+ select ARCH_ENABLE_MEMORY_HOTPLUG
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
++ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_FORTIFY_SOURCE
+@@ -97,6 +98,7 @@ config LOONGARCH
+ select HAVE_ARCH_KFENCE
+ select HAVE_ARCH_KGDB if PERF_EVENTS
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
++ select HAVE_ARCH_SECCOMP
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+@@ -603,23 +605,6 @@ config RANDOMIZE_BASE_MAX_OFFSET
+
+ This is limited by the size of the lower address memory, 256MB.
+
+-config SECCOMP
+- bool "Enable seccomp to safely compute untrusted bytecode"
+- depends on PROC_FS
+- default y
+- help
+- This kernel feature is useful for number crunching applications
+- that may need to compute untrusted bytecode during their
+- execution. By using pipes or other transports made available to
+- the process as file descriptors supporting the read/write
+- syscalls, it's possible to isolate those applications in
+- their own address space using seccomp. Once seccomp is
+- enabled via /proc/<pid>/seccomp, it cannot be disabled
+- and the task is only allowed to execute a few safe syscalls
+- defined by each seccomp mode.
+-
+- If unsure, say Y. Only embedded should say N here.
+-
+ endmenu
+
+ config ARCH_SELECT_MEMORY_MODEL
+@@ -638,10 +623,6 @@ config ARCH_SPARSEMEM_ENABLE
+ or have huge holes in the physical address space for other reasons.
+ See <file:Documentation/mm/numa.rst> for more.
+
+-config ARCH_ENABLE_THP_MIGRATION
+- def_bool y
+- depends on TRANSPARENT_HUGEPAGE
+-
+ config ARCH_MEMORY_PROBE
+ def_bool y
+ depends on MEMORY_HOTPLUG
+diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
+index fb0fada43197e4..81e8089c9c4f18 100644
+--- a/arch/loongarch/Makefile
++++ b/arch/loongarch/Makefile
+@@ -80,7 +80,7 @@ endif
+
+ ifeq ($(CONFIG_RELOCATABLE),y)
+ KBUILD_CFLAGS_KERNEL += -fPIE
+-LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
++LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)
+ endif
+
+ cflags-y += $(call cc-option, -mno-check-zero-division)
+@@ -136,12 +136,12 @@ vdso_prepare: prepare0
+ $(Q)$(MAKE) $(build)=arch/loongarch/vdso include/generated/vdso-offsets.h
+ endif
+
+-PHONY += vdso_install
+-vdso_install:
+- $(Q)$(MAKE) $(build)=arch/loongarch/vdso $@
++vdso-install-y += arch/loongarch/vdso/vdso.so.dbg
+
+ all: $(notdir $(KBUILD_IMAGE))
+
++vmlinuz.efi: vmlinux.efi
++
+ vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@
+
+diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
+index a3b52aaa83b336..e5f70642ed2062 100644
+--- a/arch/loongarch/configs/loongson3_defconfig
++++ b/arch/loongarch/configs/loongson3_defconfig
+@@ -83,7 +83,6 @@ CONFIG_ZPOOL=y
+ CONFIG_ZSWAP=y
+ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
+ CONFIG_ZBUD=y
+-CONFIG_Z3FOLD=y
+ CONFIG_ZSMALLOC=m
+ # CONFIG_COMPAT_BRK is not set
+ CONFIG_MEMORY_HOTPLUG=y
+diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c
+index 1f2a2c3839bcbf..1e8ff57a46ca6d 100644
+--- a/arch/loongarch/crypto/crc32-loongarch.c
++++ b/arch/loongarch/crypto/crc32-loongarch.c
+@@ -44,7 +44,6 @@ static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
+
+ CRC32(crc, value, w);
+ p += sizeof(u32);
+- len -= sizeof(u32);
+ }
+
+ if (len & sizeof(u16)) {
+@@ -80,7 +79,6 @@ static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
+
+ CRC32C(crc, value, w);
+ p += sizeof(u32);
+- len -= sizeof(u32);
+ }
+
+ if (len & sizeof(u16)) {
+diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
+index 93783fa24f6e9b..dede0b422cfb91 100644
+--- a/arch/loongarch/include/asm/Kbuild
++++ b/arch/loongarch/include/asm/Kbuild
+@@ -4,6 +4,7 @@ generic-y += mcs_spinlock.h
+ generic-y += parport.h
+ generic-y += early_ioremap.h
+ generic-y += qrwlock.h
++generic-y += qspinlock.h
+ generic-y += rwsem.h
+ generic-y += segment.h
+ generic-y += user.h
+diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
+index 8de6c4b83a61a8..49e29b29996f0f 100644
+--- a/arch/loongarch/include/asm/acpi.h
++++ b/arch/loongarch/include/asm/acpi.h
+@@ -32,8 +32,10 @@ static inline bool acpi_has_cpu_in_madt(void)
+ return true;
+ }
+
++#define MAX_CORE_PIC 256
++
+ extern struct list_head acpi_wakeup_device_list;
+-extern struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
++extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
+
+ extern int __init parse_acpi_topology(void);
+
+diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
+index c9544f358c3399..655db7d7a42796 100644
+--- a/arch/loongarch/include/asm/asmmacro.h
++++ b/arch/loongarch/include/asm/asmmacro.h
+@@ -609,8 +609,7 @@
+ lu32i.d \reg, 0
+ lu52i.d \reg, \reg, 0
+ .pushsection ".la_abs", "aw", %progbits
+- 768:
+- .dword 768b-766b
++ .dword 766b
+ .dword \sym
+ .popsection
+ #endif
+diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h
+deleted file mode 100644
+index 75ccd808a2af39..00000000000000
+--- a/arch/loongarch/include/asm/dma-direct.h
++++ /dev/null
+@@ -1,11 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+- */
+-#ifndef _LOONGARCH_DMA_DIRECT_H
+-#define _LOONGARCH_DMA_DIRECT_H
+-
+-dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+-phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+-
+-#endif /* _LOONGARCH_DMA_DIRECT_H */
+diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h
+index 091897d40b0375..eddc8e79b3fae7 100644
+--- a/arch/loongarch/include/asm/efi.h
++++ b/arch/loongarch/include/asm/efi.h
+@@ -32,6 +32,4 @@ static inline unsigned long efi_get_kimg_min_align(void)
+
+ #define EFI_KIMG_PREFERRED_ADDRESS PHYSADDR(VMLINUX_LOAD_ADDRESS)
+
+-unsigned long kernel_entry_address(void);
+-
+ #endif /* _ASM_LOONGARCH_EFI_H */
+diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
+index b9a4ab54285c11..f16bd42456e4cc 100644
+--- a/arch/loongarch/include/asm/elf.h
++++ b/arch/loongarch/include/asm/elf.h
+@@ -241,8 +241,6 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
+ do { \
+ current->thread.vdso = &vdso_info; \
+ \
+- loongarch_set_personality_fcsr(state); \
+- \
+ if (personality(current->personality) != PER_LINUX) \
+ set_personality(PER_LINUX); \
+ } while (0)
+@@ -259,7 +257,6 @@ do { \
+ clear_thread_flag(TIF_32BIT_ADDR); \
+ \
+ current->thread.vdso = &vdso_info; \
+- loongarch_set_personality_fcsr(state); \
+ \
+ p = personality(current->personality); \
+ if (p != PER_LINUX32 && p != PER_LINUX) \
+@@ -293,7 +290,7 @@ extern const char *__elf_platform;
+ #define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
+ _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
+- _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
++ _r->regs[9] = _r->regs[10] /* syscall n */ = _r->regs[12] = 0; \
+ _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
+ _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
+ _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
+@@ -340,6 +337,4 @@ extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+ extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
+ struct arch_elf_state *state);
+
+-extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
+-
+ #endif /* _ASM_ELF_H */
+diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
+index 21447fb1efc778..d78330916bd18a 100644
+--- a/arch/loongarch/include/asm/hw_breakpoint.h
++++ b/arch/loongarch/include/asm/hw_breakpoint.h
+@@ -75,6 +75,8 @@ do { \
+ #define CSR_MWPC_NUM 0x3f
+
+ #define CTRL_PLV_ENABLE 0x1e
++#define CTRL_PLV0_ENABLE 0x02
++#define CTRL_PLV3_ENABLE 0x10
+
+ #define MWPnCFG3_LoadEn 8
+ #define MWPnCFG3_StoreEn 9
+@@ -101,7 +103,7 @@ struct perf_event;
+ struct perf_event_attr;
+
+ extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+- int *gen_len, int *gen_type, int *offset);
++ int *gen_len, int *gen_type);
+ extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+ extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h
+index af4f4e8fbd858f..8156ffb6741591 100644
+--- a/arch/loongarch/include/asm/hw_irq.h
++++ b/arch/loongarch/include/asm/hw_irq.h
+@@ -9,6 +9,8 @@
+
+ extern atomic_t irq_err_count;
+
++#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE
++
+ /*
+ * interrupt-retrigger: NOP for now. This may not be appropriate for all
+ * machines, we'll see ...
+diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
+index c486c2341b6623..4a8adcca329b81 100644
+--- a/arch/loongarch/include/asm/io.h
++++ b/arch/loongarch/include/asm/io.h
+@@ -71,6 +71,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
+ #define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
+ #define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
+
++#define __io_aw() mmiowb()
++
+ #include <asm-generic/io.h>
+
+ #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h
+index 3cea299a5ef583..29acfe3de3faae 100644
+--- a/arch/loongarch/include/asm/jump_label.h
++++ b/arch/loongarch/include/asm/jump_label.h
+@@ -22,7 +22,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
+ {
+- asm_volatile_goto(
++ asm goto(
+ "1: nop \n\t"
+ JUMP_TABLE_ENTRY
+ : : "i"(&((char *)key)[branch]) : : l_yes);
+@@ -35,7 +35,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key, co
+
+ static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
+ {
+- asm_volatile_goto(
++ asm goto(
+ "1: b %l[l_yes] \n\t"
+ JUMP_TABLE_ENTRY
+ : : "i"(&((char *)key)[branch]) : : l_yes);
+diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
+index 27f319b4986257..b5f9de9f102e44 100644
+--- a/arch/loongarch/include/asm/numa.h
++++ b/arch/loongarch/include/asm/numa.h
+@@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
+ static inline void early_numa_add_cpu(int cpuid, s16 node) { }
+ static inline void numa_add_cpu(unsigned int cpu) { }
+ static inline void numa_remove_cpu(unsigned int cpu) { }
++static inline void set_cpuid_to_node(int cpuid, s16 node) { }
+
+ static inline int early_cpu_to_node(int cpu)
+ {
+diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
+index b9f567e6601668..7e804140500f15 100644
+--- a/arch/loongarch/include/asm/percpu.h
++++ b/arch/loongarch/include/asm/percpu.h
+@@ -29,10 +29,15 @@ static inline void set_my_cpu_offset(unsigned long off)
+ __my_cpu_offset = off;
+ csr_write64(off, PERCPU_BASE_KS);
+ }
+-#define __my_cpu_offset __my_cpu_offset
++
++#define __my_cpu_offset \
++({ \
++ __asm__ __volatile__("":"+r"(__my_cpu_offset)); \
++ __my_cpu_offset; \
++})
+
+ #define PERCPU_OP(op, asm_op, c_op) \
+-static inline unsigned long __percpu_##op(void *ptr, \
++static __always_inline unsigned long __percpu_##op(void *ptr, \
+ unsigned long val, int size) \
+ { \
+ unsigned long ret; \
+@@ -63,7 +68,7 @@ PERCPU_OP(and, and, &)
+ PERCPU_OP(or, or, |)
+ #undef PERCPU_OP
+
+-static inline unsigned long __percpu_read(void *ptr, int size)
++static __always_inline unsigned long __percpu_read(void *ptr, int size)
+ {
+ unsigned long ret;
+
+@@ -100,7 +105,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
+ return ret;
+ }
+
+-static inline void __percpu_write(void *ptr, unsigned long val, int size)
++static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+ switch (size) {
+ case 1:
+@@ -132,8 +137,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ }
+ }
+
+-static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+- int size)
++static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
++ int size)
+ {
+ switch (size) {
+ case 1:
+diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
+index 2a35a0bc2aaabf..f948a0676daf80 100644
+--- a/arch/loongarch/include/asm/perf_event.h
++++ b/arch/loongarch/include/asm/perf_event.h
+@@ -7,6 +7,13 @@
+ #ifndef __LOONGARCH_PERF_EVENT_H__
+ #define __LOONGARCH_PERF_EVENT_H__
+
++#include <asm/ptrace.h>
++
+ #define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
+
++#define perf_arch_fetch_caller_regs(regs, __ip) { \
++ (regs)->csr_era = (__ip); \
++ (regs)->regs[3] = (unsigned long) __builtin_frame_address(0); \
++}
++
+ #endif /* __LOONGARCH_PERF_EVENT_H__ */
+diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
+deleted file mode 100644
+index 34f43f8ad5912b..00000000000000
+--- a/arch/loongarch/include/asm/qspinlock.h
++++ /dev/null
+@@ -1,18 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _ASM_QSPINLOCK_H
+-#define _ASM_QSPINLOCK_H
+-
+-#include <asm-generic/qspinlock_types.h>
+-
+-#define queued_spin_unlock queued_spin_unlock
+-
+-static inline void queued_spin_unlock(struct qspinlock *lock)
+-{
+- compiletime_assert_atomic_type(lock->locked);
+- c_sync();
+- WRITE_ONCE(lock->locked, 0);
+-}
+-
+-#include <asm-generic/qspinlock.h>
+-
+-#endif /* _ASM_QSPINLOCK_H */
+diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
+index a0bc159ce8bdc0..ee52fb1e996316 100644
+--- a/arch/loongarch/include/asm/setup.h
++++ b/arch/loongarch/include/asm/setup.h
+@@ -25,7 +25,7 @@ extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len
+ #ifdef CONFIG_RELOCATABLE
+
+ struct rela_la_abs {
+- long offset;
++ long pc;
+ long symvalue;
+ };
+
+diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
+index 4fb1e6408b982a..efc8c42290d019 100644
+--- a/arch/loongarch/include/asm/stackframe.h
++++ b/arch/loongarch/include/asm/stackframe.h
+@@ -41,7 +41,7 @@
+ .macro JUMP_VIRT_ADDR temp1 temp2
+ li.d \temp1, CACHE_BASE
+ pcaddi \temp2, 0
+- or \temp1, \temp1, \temp2
++ bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
+ jirl zero, \temp1, 0xc
+ .endm
+
+diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h
+index fcb668984f0336..b344b1f917153b 100644
+--- a/arch/loongarch/include/uapi/asm/unistd.h
++++ b/arch/loongarch/include/uapi/asm/unistd.h
+@@ -1,4 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++#define __ARCH_WANT_NEW_STAT
+ #define __ARCH_WANT_SYS_CLONE
+ #define __ARCH_WANT_SYS_CLONE3
+
+diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
+index 8e00a754e54894..55d6a48c76a821 100644
+--- a/arch/loongarch/kernel/acpi.c
++++ b/arch/loongarch/kernel/acpi.c
+@@ -29,11 +29,9 @@ int disabled_cpus;
+
+ u64 acpi_saved_sp;
+
+-#define MAX_CORE_PIC 256
+-
+ #define PREFIX "ACPI: "
+
+-struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
++struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
+
+ void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
+ {
+diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
+index 9fc10cea21e10e..de4f3def4af0b9 100644
+--- a/arch/loongarch/kernel/efi.c
++++ b/arch/loongarch/kernel/efi.c
+@@ -66,6 +66,12 @@ void __init efi_runtime_init(void)
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ }
+
++bool efi_poweroff_required(void)
++{
++ return efi_enabled(EFI_RUNTIME_SERVICES) &&
++ (acpi_gbl_reduced_hardware || acpi_no_s5);
++}
++
+ unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+ static void __init init_screen_info(void)
+diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
+index 183e94fc9c69ce..0fa81ced28dcdd 100644
+--- a/arch/loongarch/kernel/elf.c
++++ b/arch/loongarch/kernel/elf.c
+@@ -23,8 +23,3 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
+ {
+ return 0;
+ }
+-
+-void loongarch_set_personality_fcsr(struct arch_elf_state *state)
+-{
+- current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
+-}
+diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
+index 73858c9029cc9e..bff058317062e3 100644
+--- a/arch/loongarch/kernel/ftrace_dyn.c
++++ b/arch/loongarch/kernel/ftrace_dyn.c
+@@ -287,6 +287,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
++ if (unlikely(kprobe_ftrace_disabled))
++ return;
++
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
+index 53b883db078620..e336fbc4eb9675 100644
+--- a/arch/loongarch/kernel/head.S
++++ b/arch/loongarch/kernel/head.S
+@@ -22,7 +22,7 @@
+ _head:
+ .word MZ_MAGIC /* "MZ", MS-DOS header */
+ .org 0x8
+- .dword kernel_entry /* Kernel entry point */
++ .dword _kernel_entry /* Kernel entry point (physical address) */
+ .dword _kernel_asize /* Kernel image effective size */
+ .quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
+ .org 0x38 /* 0x20 ~ 0x37 reserved */
+@@ -34,7 +34,6 @@ pe_header:
+
+ SYM_DATA(kernel_asize, .long _kernel_asize);
+ SYM_DATA(kernel_fsize, .long _kernel_fsize);
+-SYM_DATA(kernel_offset, .long _kernel_offset);
+
+ #endif
+
+diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
+index fc55c4de2a11ff..a6e4b605bfa8d6 100644
+--- a/arch/loongarch/kernel/hw_breakpoint.c
++++ b/arch/loongarch/kernel/hw_breakpoint.c
+@@ -174,11 +174,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+ static int hw_breakpoint_control(struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+ {
+- u32 ctrl;
++ u32 ctrl, privilege;
+ int i, max_slots, enable;
++ struct pt_regs *regs;
+ struct perf_event **slots;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
++ if (arch_check_bp_in_kernelspace(info))
++ privilege = CTRL_PLV0_ENABLE;
++ else
++ privilege = CTRL_PLV3_ENABLE;
++
++ /* Whether bp belongs to a task. */
++ if (bp->hw.target)
++ regs = task_pt_regs(bp->hw.target);
++
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ /* Breakpoint */
+ slots = this_cpu_ptr(bp_on_reg);
+@@ -197,31 +207,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ /* Set the FWPnCFG/MWPnCFG 1~4 register. */
+- write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
+- write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
+- write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
+- write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
+- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+- write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
++ write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
++ write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
++ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
++ write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
+ } else {
++ write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
++ write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
++ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ ctrl = encode_ctrl_reg(info->ctrl);
+- write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
++ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
+ }
+ enable = csr_read64(LOONGARCH_CSR_CRMD);
+ csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
++ if (bp->hw.target && test_tsk_thread_flag(bp->hw.target, TIF_LOAD_WATCH))
++ regs->csr_prmd |= CSR_PRMD_PWE;
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ /* Reset the FWPnCFG/MWPnCFG 1~4 register. */
+- write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
+- write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
+- write_wb_reg(CSR_CFG_MASK, i, 0, 0);
+- write_wb_reg(CSR_CFG_MASK, i, 1, 0);
+- write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+- write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
+- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
++ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
++ write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
++ write_wb_reg(CSR_CFG_MASK, i, 0, 0);
++ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
++ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
++ } else {
++ write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
++ write_wb_reg(CSR_CFG_MASK, i, 1, 0);
++ write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
++ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
++ }
++ if (bp->hw.target)
++ regs->csr_prmd &= ~CSR_PRMD_PWE;
+ break;
+ }
+
+@@ -283,7 +300,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
+ * to generic breakpoint descriptions.
+ */
+ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+- int *gen_len, int *gen_type, int *offset)
++ int *gen_len, int *gen_type)
+ {
+ /* Type */
+ switch (ctrl.type) {
+@@ -303,11 +320,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+ return -EINVAL;
+ }
+
+- if (!ctrl.len)
+- return -EINVAL;
+-
+- *offset = __ffs(ctrl.len);
+-
+ /* Len */
+ switch (ctrl.len) {
+ case LOONGARCH_BREAKPOINT_LEN_1:
+@@ -386,21 +398,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
+ struct arch_hw_breakpoint *hw)
+ {
+ int ret;
+- u64 alignment_mask, offset;
++ u64 alignment_mask;
+
+ /* Build the arch_hw_breakpoint. */
+ ret = arch_build_bp_info(bp, attr, hw);
+ if (ret)
+ return ret;
+
+- if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
+- alignment_mask = 0x7;
+- else
++ if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ alignment_mask = 0x3;
+- offset = hw->address & alignment_mask;
+-
+- hw->address &= ~alignment_mask;
+- hw->ctrl.len <<= offset;
++ hw->address &= ~alignment_mask;
++ }
+
+ return 0;
+ }
+@@ -471,12 +479,15 @@ void breakpoint_handler(struct pt_regs *regs)
+ slots = this_cpu_ptr(bp_on_reg);
+
+ for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
+- bp = slots[i];
+- if (bp == NULL)
+- continue;
+- perf_bp_event(bp, regs);
++ if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
++ bp = slots[i];
++ if (bp == NULL)
++ continue;
++ perf_bp_event(bp, regs);
++ csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
++ update_bp_registers(regs, 0, 0);
++ }
+ }
+- update_bp_registers(regs, 0, 0);
+ }
+ NOKPROBE_SYMBOL(breakpoint_handler);
+
+@@ -488,12 +499,15 @@ void watchpoint_handler(struct pt_regs *regs)
+ slots = this_cpu_ptr(wp_on_reg);
+
+ for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
+- wp = slots[i];
+- if (wp == NULL)
+- continue;
+- perf_bp_event(wp, regs);
++ if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
++ wp = slots[i];
++ if (wp == NULL)
++ continue;
++ perf_bp_event(wp, regs);
++ csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
++ update_bp_registers(regs, 0, 1);
++ }
+ }
+- update_bp_registers(regs, 0, 1);
+ }
+ NOKPROBE_SYMBOL(watchpoint_handler);
+
+diff --git a/arch/loongarch/kernel/image-vars.h b/arch/loongarch/kernel/image-vars.h
+index e561989d02de93..b12f8810f19916 100644
+--- a/arch/loongarch/kernel/image-vars.h
++++ b/arch/loongarch/kernel/image-vars.h
+@@ -11,7 +11,6 @@ __efistub_strcmp = strcmp;
+ __efistub_kernel_entry = kernel_entry;
+ __efistub_kernel_asize = kernel_asize;
+ __efistub_kernel_fsize = kernel_fsize;
+-__efistub_kernel_offset = kernel_offset;
+ __efistub_screen_info = screen_info;
+
+ #endif
+diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
+index 883e5066ae445f..df42c063f6c430 100644
+--- a/arch/loongarch/kernel/irq.c
++++ b/arch/loongarch/kernel/irq.c
+@@ -122,9 +122,6 @@ void __init init_IRQ(void)
+ panic("IPI IRQ request failed\n");
+ #endif
+
+- for (i = 0; i < NR_IRQS; i++)
+- irq_set_noprobe(i);
+-
+ for_each_possible_cpu(i) {
+ page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
+
+diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
+index 0491bf453cd496..cac7cba81b65f7 100644
+--- a/arch/loongarch/kernel/perf_event.c
++++ b/arch/loongarch/kernel/perf_event.c
+@@ -884,4 +884,4 @@ static int __init init_hw_perf_events(void)
+
+ return 0;
+ }
+-early_initcall(init_hw_perf_events);
++pure_initcall(init_hw_perf_events);
+diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
+index 767d94cce0de07..f2ff8b5d591e4f 100644
+--- a/arch/loongarch/kernel/process.c
++++ b/arch/loongarch/kernel/process.c
+@@ -85,6 +85,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+ regs->csr_euen = euen;
+ lose_fpu(0);
+ lose_lbt(0);
++ current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
+
+ clear_thread_flag(TIF_LSX_CTX_LIVE);
+ clear_thread_flag(TIF_LASX_CTX_LIVE);
+diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
+index c114c5ef13325a..19dc6eff45ccc8 100644
+--- a/arch/loongarch/kernel/ptrace.c
++++ b/arch/loongarch/kernel/ptrace.c
+@@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
+ struct arch_hw_breakpoint_ctrl ctrl,
+ struct perf_event_attr *attr)
+ {
+- int err, len, type, offset;
++ int err, len, type;
+
+- err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
++ err = arch_bp_generic_fields(ctrl, &len, &type);
+ if (err)
+ return err;
+
+- switch (note_type) {
+- case NT_LOONGARCH_HW_BREAK:
+- if ((type & HW_BREAKPOINT_X) != type)
+- return -EINVAL;
+- break;
+- case NT_LOONGARCH_HW_WATCH:
+- if ((type & HW_BREAKPOINT_RW) != type)
+- return -EINVAL;
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+ attr->bp_len = len;
+ attr->bp_type = type;
+- attr->bp_addr += offset;
+
+ return 0;
+ }
+@@ -603,16 +589,36 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ struct arch_hw_breakpoint_ctrl ctrl;
++ struct thread_info *ti = task_thread_info(tsk);
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ attr = bp->attr;
+- decode_ctrl_reg(uctrl, &ctrl);
+- err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+- if (err)
+- return err;
++
++ switch (note_type) {
++ case NT_LOONGARCH_HW_BREAK:
++ ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
++ ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
++ break;
++ case NT_LOONGARCH_HW_WATCH:
++ decode_ctrl_reg(uctrl, &ctrl);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (uctrl & CTRL_PLV_ENABLE) {
++ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
++ if (err)
++ return err;
++ attr.disabled = 0;
++ set_ti_thread_flag(ti, TIF_LOAD_WATCH);
++ } else {
++ attr.disabled = 1;
++ clear_ti_thread_flag(ti, TIF_LOAD_WATCH);
++ }
+
+ return modify_user_hw_breakpoint(bp, &attr);
+ }
+@@ -643,6 +649,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
++ /* Kernel-space address cannot be monitored by user-space */
++ if ((unsigned long)addr >= XKPRANGE)
++ return -EINVAL;
++
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
+index 6c3eff9af9fb1e..0eddd4a66b8745 100644
+--- a/arch/loongarch/kernel/relocate.c
++++ b/arch/loongarch/kernel/relocate.c
+@@ -13,6 +13,7 @@
+ #include <asm/bootinfo.h>
+ #include <asm/early_ioremap.h>
+ #include <asm/inst.h>
++#include <asm/io.h>
+ #include <asm/sections.h>
+ #include <asm/setup.h>
+
+@@ -52,7 +53,7 @@ static inline void __init relocate_absolute(long random_offset)
+ for (p = begin; (void *)p < end; p++) {
+ long v = p->symvalue;
+ uint32_t lu12iw, ori, lu32id, lu52id;
+- union loongarch_instruction *insn = (void *)p - p->offset;
++ union loongarch_instruction *insn = (void *)p->pc;
+
+ lu12iw = (v >> 12) & 0xfffff;
+ ori = v & 0xfff;
+@@ -102,6 +103,14 @@ static inline __init unsigned long get_random_boot(void)
+ return hash;
+ }
+
++static int __init nokaslr(char *p)
++{
++ pr_info("KASLR is disabled.\n");
++
++ return 0; /* Print a notice and silence the boot warning */
++}
++early_param("nokaslr", nokaslr);
++
+ static inline __init bool kaslr_disabled(void)
+ {
+ char *str;
+@@ -162,7 +171,7 @@ unsigned long __init relocate_kernel(void)
+ unsigned long kernel_length;
+ unsigned long random_offset = 0;
+ void *location_new = _text; /* Default to original kernel start */
+- char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
++ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+
+ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
+
+@@ -174,6 +183,7 @@ unsigned long __init relocate_kernel(void)
+ random_offset = (unsigned long)location_new - (unsigned long)(_text);
+ #endif
+ reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
++ early_memunmap(cmdline, COMMAND_LINE_SIZE);
+
+ if (random_offset) {
+ kernel_length = (long)(_end) - (long)(_text);
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index aed65915e932e2..6748d7f3f22198 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -57,7 +57,9 @@
+ #define SMBIOS_CORE_PACKAGE_OFFSET 0x23
+ #define LOONGSON_EFI_ENABLE (1 << 3)
+
++#ifdef CONFIG_EFI
+ struct screen_info screen_info __section(".data");
++#endif
+
+ unsigned long fw_arg0, fw_arg1, fw_arg2;
+ DEFINE_PER_CPU(unsigned long, kernelsp);
+@@ -367,6 +369,8 @@ void __init platform_init(void)
+ acpi_gbl_use_default_register_widths = false;
+ acpi_boot_table_init();
+ #endif
++
++ early_init_fdt_scan_reserved_mem();
+ unflatten_and_copy_device_tree();
+
+ #ifdef CONFIG_NUMA
+@@ -400,8 +404,6 @@ static void __init arch_mem_init(char **cmdline_p)
+
+ check_kernel_sections_mem();
+
+- early_init_fdt_scan_reserved_mem();
+-
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
+index ef35c871244f08..d74dfe1206ed04 100644
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -88,6 +88,73 @@ void show_ipi_list(struct seq_file *p, int prec)
+ }
+ }
+
++static inline void set_cpu_core_map(int cpu)
++{
++ int i;
++
++ cpumask_set_cpu(cpu, &cpu_core_setup_map);
++
++ for_each_cpu(i, &cpu_core_setup_map) {
++ if (cpu_data[cpu].package == cpu_data[i].package) {
++ cpumask_set_cpu(i, &cpu_core_map[cpu]);
++ cpumask_set_cpu(cpu, &cpu_core_map[i]);
++ }
++ }
++}
++
++static inline void set_cpu_sibling_map(int cpu)
++{
++ int i;
++
++ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
++
++ for_each_cpu(i, &cpu_sibling_setup_map) {
++ if (cpus_are_siblings(cpu, i)) {
++ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
++ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
++ }
++ }
++}
++
++static inline void clear_cpu_sibling_map(int cpu)
++{
++ int i;
++
++ for_each_cpu(i, &cpu_sibling_setup_map) {
++ if (cpus_are_siblings(cpu, i)) {
++ cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
++ cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
++ }
++ }
++
++ cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
++}
++
++/*
++ * Calculate a new cpu_foreign_map mask whenever a
++ * new cpu appears or disappears.
++ */
++void calculate_cpu_foreign_map(void)
++{
++ int i, k, core_present;
++ cpumask_t temp_foreign_map;
++
++ /* Re-calculate the mask */
++ cpumask_clear(&temp_foreign_map);
++ for_each_online_cpu(i) {
++ core_present = 0;
++ for_each_cpu(k, &temp_foreign_map)
++ if (cpus_are_siblings(i, k))
++ core_present = 1;
++ if (!core_present)
++ cpumask_set_cpu(i, &temp_foreign_map);
++ }
++
++ for_each_online_cpu(i)
++ cpumask_andnot(&cpu_foreign_map[i],
++ &temp_foreign_map, &cpu_sibling_map[i]);
++}
++
+ /* Send mailbox buffer via Mail_Send */
+ static void csr_mail_send(uint64_t data, int cpu, int mailbox)
+ {
+@@ -195,7 +262,6 @@ static void __init fdt_smp_setup(void)
+
+ if (cpuid == loongson_sysconf.boot_cpu_id) {
+ cpu = 0;
+- numa_add_cpu(cpu);
+ } else {
+ cpu = cpumask_next_zero(-1, cpu_present_mask);
+ }
+@@ -205,6 +271,9 @@ static void __init fdt_smp_setup(void)
+ set_cpu_present(cpu, true);
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
++
++ early_numa_add_cpu(cpu, 0);
++ set_cpuid_to_node(cpuid, 0);
+ }
+
+ loongson_sysconf.nr_cpus = num_processors;
+@@ -300,6 +369,7 @@ int loongson_cpu_disable(void)
+ numa_remove_cpu(cpu);
+ #endif
+ set_cpu_online(cpu, false);
++ clear_cpu_sibling_map(cpu);
+ calculate_cpu_foreign_map();
+ local_irq_save(flags);
+ irq_migrate_all_off_this_cpu();
+@@ -334,6 +404,7 @@ void __noreturn arch_cpu_idle_dead(void)
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ } while (addr == 0);
+
++ local_irq_disable();
+ init_fn = (void *)TO_CACHE(addr);
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+
+@@ -376,59 +447,6 @@ static int __init ipi_pm_init(void)
+ core_initcall(ipi_pm_init);
+ #endif
+
+-static inline void set_cpu_sibling_map(int cpu)
+-{
+- int i;
+-
+- cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+-
+- for_each_cpu(i, &cpu_sibling_setup_map) {
+- if (cpus_are_siblings(cpu, i)) {
+- cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+- cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+- }
+- }
+-}
+-
+-static inline void set_cpu_core_map(int cpu)
+-{
+- int i;
+-
+- cpumask_set_cpu(cpu, &cpu_core_setup_map);
+-
+- for_each_cpu(i, &cpu_core_setup_map) {
+- if (cpu_data[cpu].package == cpu_data[i].package) {
+- cpumask_set_cpu(i, &cpu_core_map[cpu]);
+- cpumask_set_cpu(cpu, &cpu_core_map[i]);
+- }
+- }
+-}
+-
+-/*
+- * Calculate a new cpu_foreign_map mask whenever a
+- * new cpu appears or disappears.
+- */
+-void calculate_cpu_foreign_map(void)
+-{
+- int i, k, core_present;
+- cpumask_t temp_foreign_map;
+-
+- /* Re-calculate the mask */
+- cpumask_clear(&temp_foreign_map);
+- for_each_online_cpu(i) {
+- core_present = 0;
+- for_each_cpu(k, &temp_foreign_map)
+- if (cpus_are_siblings(i, k))
+- core_present = 1;
+- if (!core_present)
+- cpumask_set_cpu(i, &temp_foreign_map);
+- }
+-
+- for_each_online_cpu(i)
+- cpumask_andnot(&cpu_foreign_map[i],
+- &temp_foreign_map, &cpu_sibling_map[i]);
+-}
+-
+ /* Preload SMP state for boot cpu */
+ void smp_prepare_boot_cpu(void)
+ {
+@@ -437,6 +455,7 @@ void smp_prepare_boot_cpu(void)
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
+ set_my_cpu_offset(per_cpu_offset(0));
++ numa_add_cpu(0);
+
+ rr_node = first_node(node_online_map);
+ for_each_possible_cpu(cpu) {
+@@ -504,7 +523,7 @@ asmlinkage void start_secondary(void)
+ unsigned int cpu;
+
+ sync_counter();
+- cpu = smp_processor_id();
++ cpu = raw_smp_processor_id();
+ set_my_cpu_offset(per_cpu_offset(cpu));
+
+ cpu_probe();
+diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
+index 92270f14db9482..f623feb2129f12 100644
+--- a/arch/loongarch/kernel/stacktrace.c
++++ b/arch/loongarch/kernel/stacktrace.c
+@@ -32,7 +32,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ }
+
+ for (unwind_start(&state, task, regs);
+- !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
++ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || !consume_entry(cookie, addr))
+ break;
+diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
+index 3064af94db9c2e..e7015f7b70e37c 100644
+--- a/arch/loongarch/kernel/time.c
++++ b/arch/loongarch/kernel/time.c
+@@ -58,14 +58,16 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
+ return 0;
+ }
+
+-static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
++static int constant_set_state_periodic(struct clock_event_device *evt)
+ {
++ unsigned long period;
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+- timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+- timer_config &= ~CSR_TCFG_EN;
++ period = const_clock_freq / HZ;
++ timer_config = period & CSR_TCFG_VAL;
++ timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+@@ -73,16 +75,14 @@ static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
+ return 0;
+ }
+
+-static int constant_set_state_periodic(struct clock_event_device *evt)
++static int constant_set_state_shutdown(struct clock_event_device *evt)
+ {
+- unsigned long period;
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+- period = const_clock_freq / HZ;
+- timer_config = period & CSR_TCFG_VAL;
+- timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
++ timer_config = csr_read64(LOONGARCH_CSR_TCFG);
++ timer_config &= ~CSR_TCFG_EN;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+@@ -90,11 +90,6 @@ static int constant_set_state_periodic(struct clock_event_device *evt)
+ return 0;
+ }
+
+-static int constant_set_state_shutdown(struct clock_event_device *evt)
+-{
+- return 0;
+-}
+-
+ static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
+ {
+ unsigned long timer_config;
+@@ -161,7 +156,7 @@ int constant_clockevent_init(void)
+ cd->rating = 320;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_state_oneshot = constant_set_state_oneshot;
+- cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
++ cd->set_state_oneshot_stopped = constant_set_state_shutdown;
+ cd->set_state_periodic = constant_set_state_periodic;
+ cd->set_state_shutdown = constant_set_state_shutdown;
+ cd->set_next_event = constant_timer_next_event;
+diff --git a/arch/loongarch/kernel/unwind.c b/arch/loongarch/kernel/unwind.c
+index ba324ba76fa156..a463d6961344c0 100644
+--- a/arch/loongarch/kernel/unwind.c
++++ b/arch/loongarch/kernel/unwind.c
+@@ -28,6 +28,5 @@ bool default_next_frame(struct unwind_state *state)
+
+ } while (!get_stack_info(state->sp, state->task, info));
+
+- state->error = true;
+ return false;
+ }
+diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
+index 55afc27320e12a..929ae240280a5f 100644
+--- a/arch/loongarch/kernel/unwind_prologue.c
++++ b/arch/loongarch/kernel/unwind_prologue.c
+@@ -227,7 +227,7 @@ static bool next_frame(struct unwind_state *state)
+ } while (!get_stack_info(state->sp, state->task, info));
+
+ out:
+- state->error = true;
++ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
+ }
+
+diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
+index bb2ec86f37a8eb..d5afd0c80a4999 100644
+--- a/arch/loongarch/kernel/vmlinux.lds.S
++++ b/arch/loongarch/kernel/vmlinux.lds.S
+@@ -5,6 +5,7 @@
+
+ #define PAGE_SIZE _PAGE_SIZE
+ #define RO_EXCEPTION_TABLE_ALIGN 4
++#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
+
+ /*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+@@ -139,11 +140,11 @@ SECTIONS
+
+ #ifdef CONFIG_EFI_STUB
+ /* header symbols */
+- _kernel_asize = _end - _text;
+- _kernel_fsize = _edata - _text;
+- _kernel_vsize = _end - __initdata_begin;
+- _kernel_rsize = _edata - __initdata_begin;
+- _kernel_offset = kernel_offset - _text;
++ _kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
++ _kernel_asize = ABSOLUTE(_end - _text);
++ _kernel_fsize = ABSOLUTE(_edata - _text);
++ _kernel_vsize = ABSOLUTE(_end - __initdata_begin);
++ _kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
+ #endif
+
+ .gptab.sdata : {
+diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
+index 1fc2f6813ea027..97b40defde0608 100644
+--- a/arch/loongarch/mm/fault.c
++++ b/arch/loongarch/mm/fault.c
+@@ -202,10 +202,10 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+- if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
+- goto bad_area;
+ if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
+ goto bad_area;
++ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
++ goto bad_area;
+ }
+
+ /*
+diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
+index cc3e81fe0186f4..c608adc9984581 100644
+--- a/arch/loongarch/mm/kasan_init.c
++++ b/arch/loongarch/mm/kasan_init.c
+@@ -44,6 +44,9 @@ void *kasan_mem_to_shadow(const void *addr)
+ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+ unsigned long offset = 0;
+
++ if (maddr >= FIXADDR_START)
++ return (void *)(kasan_early_shadow_page);
++
+ maddr &= XRANGE_SHADOW_MASK;
+ switch (xrange) {
+ case XKPRANGE_CC_SEG:
+diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
+index 71d0539e2d0b02..2aae72e638713a 100644
+--- a/arch/loongarch/mm/pgtable.c
++++ b/arch/loongarch/mm/pgtable.c
+@@ -13,13 +13,13 @@ struct page *dmw_virt_to_page(unsigned long kaddr)
+ {
+ return pfn_to_page(virt_to_pfn(kaddr));
+ }
+-EXPORT_SYMBOL_GPL(dmw_virt_to_page);
++EXPORT_SYMBOL(dmw_virt_to_page);
+
+ struct page *tlb_virt_to_page(unsigned long kaddr)
+ {
+ return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
+ }
+-EXPORT_SYMBOL_GPL(tlb_virt_to_page);
++EXPORT_SYMBOL(tlb_virt_to_page);
+
+ pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
+index 2c0a411f23aa77..56bf1dd5358aa1 100644
+--- a/arch/loongarch/mm/tlb.c
++++ b/arch/loongarch/mm/tlb.c
+@@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu)
+ set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
+ set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
+ set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
+- }
++ } else {
++ int vec_sz __maybe_unused;
++ void *addr __maybe_unused;
++ struct page *page __maybe_unused;
++
++ /* Avoid lockdep warning */
++ rcu_cpu_starting(cpu);
++
+ #ifdef CONFIG_NUMA
+- else {
+- void *addr;
+- struct page *page;
+- const int vec_sz = sizeof(exception_handlers);
++ vec_sz = sizeof(exception_handlers);
+
+ if (pcpu_handlers[cpu])
+ return;
+@@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu)
+ csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
+ csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
+ csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
+- }
+ #endif
++ }
+ }
+
+ void tlb_init(int cpu)
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index db9342b2d0e660..9eb7753d117dfb 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -461,7 +461,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ const u8 dst = regmap[insn->dst_reg];
+ const s16 off = insn->off;
+ const s32 imm = insn->imm;
+- const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+ const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
+
+ switch (code) {
+@@ -855,8 +854,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+- emit_sext_32(ctx, regmap[BPF_REG_0], true);
+-
+ if (i == ctx->prog->len - 1)
+ break;
+
+@@ -867,8 +864,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+
+ /* dst = imm64 */
+ case BPF_LD | BPF_IMM | BPF_DW:
++ {
++ const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
++
+ move_imm(ctx, dst, imm64, is32);
+ return 1;
++ }
+
+ /* dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_B:
+@@ -907,14 +908,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ }
+ break;
+ case BPF_DW:
+- if (is_signed_imm12(off)) {
+- emit_insn(ctx, ldd, dst, src, off);
+- } else if (is_signed_imm14(off)) {
+- emit_insn(ctx, ldptrd, dst, src, off);
+- } else {
+- move_imm(ctx, t1, off, is32);
+- emit_insn(ctx, ldxd, dst, src, t1);
+- }
++ move_imm(ctx, t1, off, is32);
++ emit_insn(ctx, ldxd, dst, src, t1);
+ break;
+ }
+
+diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c
+index 365f7de771cbb9..1da4dc46df43e5 100644
+--- a/arch/loongarch/pci/acpi.c
++++ b/arch/loongarch/pci/acpi.c
+@@ -225,6 +225,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+ if (bus) {
+ memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
+ kfree(info);
++ kfree(root_ops);
+ } else {
+ struct pci_bus *child;
+
+diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
+index 5c97d146332821..f597cd08a96be0 100644
+--- a/arch/loongarch/vdso/Makefile
++++ b/arch/loongarch/vdso/Makefile
+@@ -2,6 +2,7 @@
+ # Objects to go into the VDSO.
+
+ KASAN_SANITIZE := n
++UBSAN_SANITIZE := n
+ KCOV_INSTRUMENT := n
+
+ # Include the generic Makefile to check the built vdso.
+@@ -83,13 +84,3 @@ $(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE
+ obj-y += vdso.o
+
+ $(obj)/vdso.o : $(obj)/vdso.so
+-
+-# install commands for the unstripped file
+-quiet_cmd_vdso_install = INSTALL $@
+- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+-
+-vdso.so: $(obj)/vdso.so.dbg
+- @mkdir -p $(MODLIB)/vdso
+- $(call cmd,vdso_install)
+-
+-vdso_install: vdso.so
+diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
+index 3137b45750dfce..b7cb28f5ee290a 100644
+--- a/arch/m68k/amiga/config.c
++++ b/arch/m68k/amiga/config.c
+@@ -180,6 +180,15 @@ int __init amiga_parse_bootinfo(const struct bi_record *record)
+ dev->slotsize = be16_to_cpu(cd->cd_SlotSize);
+ dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr);
+ dev->boardsize = be32_to_cpu(cd->cd_BoardSize);
++
++ /* CS-LAB Warp 1260 workaround */
++ if (be16_to_cpu(dev->rom.er_Manufacturer) == ZORRO_MANUF(ZORRO_PROD_CSLAB_WARP_1260) &&
++ dev->rom.er_Product == ZORRO_PROD(ZORRO_PROD_CSLAB_WARP_1260)) {
++
++ /* turn off all interrupts */
++ pr_info("Warp 1260 card detected: applying interrupt storm workaround\n");
++ *(uint32_t *)(dev->boardaddr + 0x1000) = 0xfff;
++ }
+ } else
+ pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n");
+ #endif /* CONFIG_ZORRO */
+diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
+index 56f02ea2c248d8..715d1e0d973e61 100644
+--- a/arch/m68k/atari/ataints.c
++++ b/arch/m68k/atari/ataints.c
+@@ -302,11 +302,7 @@ void __init atari_init_IRQ(void)
+
+ if (ATARIHW_PRESENT(SCU)) {
+ /* init the SCU if present */
+- tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and
+- * disable HSYNC interrupts (who
+- * needs them?) MFP and SCC are
+- * enabled in VME mask
+- */
++ tt_scu.sys_mask = 0x0; /* disable all interrupts */
+ tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */
+ } else {
+ /* If no SCU and no Hades, the HSYNC interrupt needs to be
+diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
+index ed12358c4783b4..9a71b0148461a4 100644
+--- a/arch/m68k/include/asm/cacheflush_mm.h
++++ b/arch/m68k/include/asm/cacheflush_mm.h
+@@ -191,6 +191,7 @@ extern void cache_push_v(unsigned long vaddr, int len);
+ #define flush_cache_all() __flush_cache_all()
+
+ #define flush_cache_vmap(start, end) flush_cache_all()
++#define flush_cache_vmap_early(start, end) do { } while (0)
+ #define flush_cache_vunmap(start, end) flush_cache_all()
+
+ static inline void flush_cache_mm(struct mm_struct *mm)
+diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
+index d7f3de9c5d6f79..4ba14f3535fcbe 100644
+--- a/arch/m68k/include/asm/cmpxchg.h
++++ b/arch/m68k/include/asm/cmpxchg.h
+@@ -32,7 +32,7 @@ static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, in
+ x = tmp;
+ break;
+ default:
+- tmp = __invalid_xchg_size(x, ptr, size);
++ x = __invalid_xchg_size(x, ptr, size);
+ break;
+ }
+
+diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
+index 4dd2fd7acba9ea..2e1e9ad4f98ca7 100644
+--- a/arch/m68k/kernel/entry.S
++++ b/arch/m68k/kernel/entry.S
+@@ -433,7 +433,9 @@ resume:
+ movec %a0,%dfc
+
+ /* restore status register */
+- movew %a1@(TASK_THREAD+THREAD_SR),%sr
++ movew %a1@(TASK_THREAD+THREAD_SR),%d0
++ oriw #0x0700,%d0
++ movew %d0,%sr
+
+ rts
+
+diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
+index e06ce147c0b7fc..fb87219fc3b469 100644
+--- a/arch/m68k/kernel/process.c
++++ b/arch/m68k/kernel/process.c
+@@ -116,7 +116,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
+ {
+ /* regs will be equal to current_pt_regs() */
+ struct kernel_clone_args args = {
+- .flags = regs->d1 & ~CSIGNAL,
++ .flags = (u32)(regs->d1) & ~CSIGNAL,
+ .pidfd = (int __user *)regs->d3,
+ .child_tid = (int __user *)regs->d4,
+ .parent_tid = (int __user *)regs->d3,
+diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
+index c7cb29f0ff0163..29e06f46ab511f 100644
+--- a/arch/m68k/mac/misc.c
++++ b/arch/m68k/mac/misc.c
+@@ -451,30 +451,18 @@ void mac_poweroff(void)
+
+ void mac_reset(void)
+ {
+- if (macintosh_config->adb_type == MAC_ADB_II &&
+- macintosh_config->ident != MAC_MODEL_SE30) {
+- /* need ROMBASE in booter */
+- /* indeed, plus need to MAP THE ROM !! */
+-
+- if (mac_bi_data.rombase == 0)
+- mac_bi_data.rombase = 0x40800000;
+-
+- /* works on some */
+- rom_reset = (void *) (mac_bi_data.rombase + 0xa);
+-
+- local_irq_disable();
+- rom_reset();
+ #ifdef CONFIG_ADB_CUDA
+- } else if (macintosh_config->adb_type == MAC_ADB_EGRET ||
+- macintosh_config->adb_type == MAC_ADB_CUDA) {
++ if (macintosh_config->adb_type == MAC_ADB_EGRET ||
++ macintosh_config->adb_type == MAC_ADB_CUDA) {
+ cuda_restart();
++ } else
+ #endif
+ #ifdef CONFIG_ADB_PMU
+- } else if (macintosh_config->adb_type == MAC_ADB_PB2) {
++ if (macintosh_config->adb_type == MAC_ADB_PB2) {
+ pmu_restart();
++ } else
+ #endif
+- } else if (CPU_IS_030) {
+-
++ if (CPU_IS_030) {
+ /* 030-specific reset routine. The idea is general, but the
+ * specific registers to reset are '030-specific. Until I
+ * have a non-030 machine, I can't test anything else.
+@@ -522,6 +510,18 @@ void mac_reset(void)
+ "jmp %/a0@\n\t" /* jump to the reset vector */
+ ".chip 68k"
+ : : "r" (offset), "a" (rombase) : "a0");
++ } else {
++ /* need ROMBASE in booter */
++ /* indeed, plus need to MAP THE ROM !! */
++
++ if (mac_bi_data.rombase == 0)
++ mac_bi_data.rombase = 0x40800000;
++
++ /* works on some */
++ rom_reset = (void *)(mac_bi_data.rombase + 0xa);
++
++ local_irq_disable();
++ rom_reset();
+ }
+
+ /* should never get here */
+diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
+index 4393bee64eaf80..85c4d29ef43e9e 100644
+--- a/arch/microblaze/kernel/Makefile
++++ b/arch/microblaze/kernel/Makefile
+@@ -7,7 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
+ # Do not trace early boot code and low level code
+ CFLAGS_REMOVE_timer.o = -pg
+ CFLAGS_REMOVE_intc.o = -pg
+-CFLAGS_REMOVE_early_printk.o = -pg
+ CFLAGS_REMOVE_ftrace.o = -pg
+ CFLAGS_REMOVE_process.o = -pg
+ endif
+diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
+index 85dbda4a08a81f..03da36dc6d9c92 100644
+--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
++++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
+@@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
+ static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
+
+ #define err_printk(x) \
+- early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
++ pr_err("ERROR: Microblaze " x "-different for kernel and DTS\n");
+
+ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
+ {
+diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
+index 3827dc76edd823..4520c57415797f 100644
+--- a/arch/microblaze/mm/init.c
++++ b/arch/microblaze/mm/init.c
+@@ -193,11 +193,6 @@ asmlinkage void __init mmu_init(void)
+ {
+ unsigned int kstart, ksize;
+
+- if (!memblock.reserved.cnt) {
+- pr_emerg("Error memory count\n");
+- machine_restart(NULL);
+- }
+-
+ if ((u32) memblock.memory.regions[0].size < 0x400000) {
+ pr_emerg("Memory must be greater than 4MB\n");
+ machine_restart(NULL);
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index bc8421859006fa..91c3a502156b31 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -482,6 +482,7 @@ config MACH_LOONGSON2EF
+
+ config MACH_LOONGSON64
+ bool "Loongson 64-bit family of machines"
++ select ARCH_DMA_DEFAULT_COHERENT
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+@@ -1273,6 +1274,7 @@ config CPU_LOONGSON64
+ select CPU_SUPPORTS_MSA
+ select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
+ select CPU_MIPSR2_IRQ_VI
++ select DMA_NONCOHERENT
+ select WEAK_ORDERING
+ select WEAK_REORDERING_BEYOND_LLSC
+ select MIPS_ASID_BITS_VARIABLE
+diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
+index f521874ebb07b2..67f067706af273 100644
+--- a/arch/mips/alchemy/devboards/db1200.c
++++ b/arch/mips/alchemy/devboards/db1200.c
+@@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
+ i2c_register_board_info(0, db1200_i2c_devs,
+ ARRAY_SIZE(db1200_i2c_devs));
+ spi_register_board_info(db1200_spi_devs,
+- ARRAY_SIZE(db1200_i2c_devs));
++ ARRAY_SIZE(db1200_spi_devs));
+
+ /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
+ * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
+diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
+index fd91d9c9a2525d..6c6837181f5555 100644
+--- a/arch/mips/alchemy/devboards/db1550.c
++++ b/arch/mips/alchemy/devboards/db1550.c
+@@ -589,7 +589,7 @@ int __init db1550_dev_setup(void)
+ i2c_register_board_info(0, db1550_i2c_devs,
+ ARRAY_SIZE(db1550_i2c_devs));
+ spi_register_board_info(db1550_spi_devs,
+- ARRAY_SIZE(db1550_i2c_devs));
++ ARRAY_SIZE(db1550_spi_devs));
+
+ c = clk_get(NULL, "psc0_intclk");
+ if (!IS_ERR(c)) {
+diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
+index ec180ab92eaa83..66a8ba19c28722 100644
+--- a/arch/mips/bmips/setup.c
++++ b/arch/mips/bmips/setup.c
+@@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
+ * RAC flush causes kernel panics on BCM6358 when booting from TP1
+ * because the bootloader is not initializing it properly.
+ */
+- bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
++ bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
++ !!BMIPS_GET_CBR();
+ }
+
+ static void bcm6368_quirks(void)
+diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+index f878f47e4501bc..cc7747c5f21f35 100644
+--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+@@ -23,14 +23,6 @@ cpu0: cpu@0 {
+ };
+ };
+
+- memory@200000 {
+- compatible = "memory";
+- device_type = "memory";
+- reg = <0x00000000 0x00200000 0x00000000 0x0ee00000>, /* 238 MB at 2 MB */
+- <0x00000000 0x20000000 0x00000000 0x1f000000>, /* 496 MB at 512 MB */
+- <0x00000001 0x10000000 0x00000001 0xb0000000>; /* 6912 MB at 4352MB */
+- };
+-
+ cpu_clk: cpu_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+@@ -52,6 +44,13 @@ package0: bus@10000000 {
+ 0 0x40000000 0 0x40000000 0 0x40000000
+ 0xfe 0x00000000 0xfe 0x00000000 0 0x40000000>;
+
++ isa@18000000 {
++ compatible = "isa";
++ #size-cells = <1>;
++ #address-cells = <2>;
++ ranges = <1 0x0 0x0 0x18000000 0x4000>;
++ };
++
+ pm: reset-controller@1fe07000 {
+ compatible = "loongson,ls2k-pm";
+ reg = <0 0x1fe07000 0 0x422>;
+@@ -100,8 +99,8 @@ liointc1: interrupt-controller@1fe11440 {
+ rtc0: rtc@1fe07800 {
+ compatible = "loongson,ls2k1000-rtc";
+ reg = <0 0x1fe07800 0 0x78>;
+- interrupt-parent = <&liointc0>;
+- interrupts = <60 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-parent = <&liointc1>;
++ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ uart0: serial@1fe00000 {
+@@ -109,7 +108,7 @@ uart0: serial@1fe00000 {
+ reg = <0 0x1fe00000 0 0x8>;
+ clock-frequency = <125000000>;
+ interrupt-parent = <&liointc0>;
+- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+
+@@ -118,7 +117,6 @@ pci@1a000000 {
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+- #interrupt-cells = <2>;
+
+ reg = <0 0x1a000000 0 0x02000000>,
+ <0xfe 0x00000000 0 0x20000000>;
+@@ -130,15 +128,15 @@ gmac@3,0 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass0c0320",
+- "pciclass0c03",
+- "loongson, pci-gmac";
++ "pciclass0c03";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+- interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+- <13 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
++ <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+- phy-mode = "rgmii";
++ phy-mode = "rgmii-id";
++ phy-handle = <&phy1>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -157,11 +155,12 @@ gmac@3,1 {
+ "loongson, pci-gmac";
+
+ reg = <0x1900 0x0 0x0 0x0 0x0>;
+- interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
+- <15 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
++ <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+- phy-mode = "rgmii";
++ phy-mode = "rgmii-id";
++ phy-handle = <&phy1>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -179,7 +178,7 @@ ehci@4,1 {
+ "pciclass0c03";
+
+ reg = <0x2100 0x0 0x0 0x0 0x0>;
+- interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ };
+
+@@ -190,7 +189,7 @@ ohci@4,2 {
+ "pciclass0c03";
+
+ reg = <0x2200 0x0 0x0 0x0 0x0>;
+- interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ };
+
+@@ -201,97 +200,121 @@ sata@8,0 {
+ "pciclass0106";
+
+ reg = <0x4000 0x0 0x0 0x0 0x0>;
+- interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc0>;
+ };
+
+- pci_bridge@9,0 {
++ pcie@9,0 {
+ compatible = "pci0014,7a19.0",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x4800 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_HIGH>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@a,0 {
++ pcie@a,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x5000 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_HIGH>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@b,0 {
++ pcie@b,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x5800 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_HIGH>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@c,0 {
++ pcie@c,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x6000 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_HIGH>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@d,0 {
++ pcie@d,0 {
+ compatible = "pci0014,7a19.0",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x6800 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_HIGH>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@e,0 {
++ pcie@e,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x7000 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_HIGH>;
++ ranges;
+ external-facing;
+ };
+
+diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+index 7c69e8245c2f10..cce9428afc41fc 100644
+--- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
++++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+@@ -193,8 +193,7 @@ gmac@3,0 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass020000",
+- "pciclass0200",
+- "loongson, pci-gmac";
++ "pciclass0200";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
+index f36c2519ed9768..1f14132b3fc98a 100644
+--- a/arch/mips/include/asm/cacheflush.h
++++ b/arch/mips/include/asm/cacheflush.h
+@@ -97,6 +97,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+ __flush_cache_vmap();
+ }
+
++#define flush_cache_vmap_early(start, end) do { } while (0)
++
+ extern void (*__flush_cache_vunmap)(void);
+
+ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
+index 4044eaf989ac7d..0921ddda11a4b3 100644
+--- a/arch/mips/include/asm/checksum.h
++++ b/arch/mips/include/asm/checksum.h
+@@ -241,7 +241,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ " .set pop"
+ : "=&r" (sum), "=&r" (tmp)
+ : "r" (saddr), "r" (daddr),
+- "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
++ "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
++ : "memory");
+
+ return csum_fold(sum);
+ }
+diff --git a/arch/mips/include/asm/dmi.h b/arch/mips/include/asm/dmi.h
+index 27415a288adf56..dc397f630c6608 100644
+--- a/arch/mips/include/asm/dmi.h
++++ b/arch/mips/include/asm/dmi.h
+@@ -5,7 +5,7 @@
+ #include <linux/io.h>
+ #include <linux/memblock.h>
+
+-#define dmi_early_remap(x, l) ioremap_cache(x, l)
++#define dmi_early_remap(x, l) ioremap(x, l)
+ #define dmi_early_unmap(x, l) iounmap(x)
+ #define dmi_remap(x, l) ioremap_cache(x, l)
+ #define dmi_unmap(x) iounmap(x)
+diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
+index c5c6864e64bc43..405c85173f2c16 100644
+--- a/arch/mips/include/asm/jump_label.h
++++ b/arch/mips/include/asm/jump_label.h
+@@ -36,7 +36,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
++ asm goto("1:\t" B_INSN " 2f\n\t"
+ "2:\t.insn\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+@@ -50,7 +50,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
++ asm goto("1:\t" J_INSN " %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+ ".popsection\n\t"
+diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
+index 035b1a69e2d00d..9218b3ae338322 100644
+--- a/arch/mips/include/asm/mach-loongson64/boot_param.h
++++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
+@@ -14,7 +14,11 @@
+ #define ADAPTER_ROM 8
+ #define ACPI_TABLE 9
+ #define SMBIOS_TABLE 10
+-#define MAX_MEMORY_TYPE 11
++#define UMA_VIDEO_RAM 11
++#define VUMA_VIDEO_RAM 12
++#define MAX_MEMORY_TYPE 13
++
++#define MEM_SIZE_IS_IN_BYTES (1 << 31)
+
+ #define LOONGSON3_BOOT_MEM_MAP_MAX 128
+ struct efi_memory_map_loongson {
+@@ -38,12 +42,14 @@ enum loongson_cpu_type {
+ Legacy_1B = 0x5,
+ Legacy_2G = 0x6,
+ Legacy_2H = 0x7,
++ Legacy_2K = 0x8,
+ Loongson_1A = 0x100,
+ Loongson_1B = 0x101,
+ Loongson_2E = 0x200,
+ Loongson_2F = 0x201,
+ Loongson_2G = 0x202,
+ Loongson_2H = 0x203,
++ Loongson_2K = 0x204,
+ Loongson_3A = 0x300,
+ Loongson_3B = 0x301
+ };
+@@ -117,7 +123,8 @@ struct irq_source_routing_table {
+ u64 pci_io_start_addr;
+ u64 pci_io_end_addr;
+ u64 pci_config_addr;
+- u32 dma_mask_bits;
++ u16 dma_mask_bits;
++ u16 dma_noncoherent;
+ } __packed;
+
+ struct interface_info {
+diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
+index 23c67c0871b17c..696b40beb774f5 100644
+--- a/arch/mips/include/asm/mips-cm.h
++++ b/arch/mips/include/asm/mips-cm.h
+@@ -228,6 +228,10 @@ GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
+ GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
+ #define CM_GCR_CPC_STATUS_EX BIT(0)
+
++/* GCR_ACCESS - Controls core/IOCU access to GCRs */
++GCR_ACCESSOR_RW(32, 0x120, access_cm3)
++#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
++
+ /* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
+ GCR_ACCESSOR_RW(32, 0x130, l2_config)
+ #define CM_GCR_L2_CONFIG_BYPASS BIT(20)
+diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
+index daf3cf244ea972..4a2b40ce39e091 100644
+--- a/arch/mips/include/asm/ptrace.h
++++ b/arch/mips/include/asm/ptrace.h
+@@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+ {
+ regs->cp0_epc = val;
++ regs->cp0_cause &= ~CAUSEF_BD;
+ }
+
+ /* Query offset/name of register from its name/offset */
+@@ -154,9 +155,11 @@ static inline long regs_return_value(struct pt_regs *regs)
+ }
+
+ #define instruction_pointer(regs) ((regs)->cp0_epc)
++extern unsigned long exception_ip(struct pt_regs *regs);
++#define exception_ip(regs) exception_ip(regs)
+ #define profile_pc(regs) instruction_pointer(regs)
+
+-extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
++extern asmlinkage long syscall_trace_enter(struct pt_regs *regs);
+ extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
+
+ extern void die(const char *, struct pt_regs *) __noreturn;
+diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
+index d1b11f66f748f0..cb1045ebab0621 100644
+--- a/arch/mips/kernel/asm-offsets.c
++++ b/arch/mips/kernel/asm-offsets.c
+@@ -101,6 +101,7 @@ void output_thread_info_defines(void)
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_REGS, thread_info, regs);
++ OFFSET(TI_SYSCALL, thread_info, syscall);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ DEFINE(_THREAD_MASK, THREAD_MASK);
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
+index 368e8475870f08..5f6e9e2ebbdbb8 100644
+--- a/arch/mips/kernel/cevt-r4k.c
++++ b/arch/mips/kernel/cevt-r4k.c
+@@ -303,13 +303,6 @@ int r4k_clockevent_init(void)
+ if (!c0_compare_int_usable())
+ return -ENXIO;
+
+- /*
+- * With vectored interrupts things are getting platform specific.
+- * get_c0_compare_int is a hook to allow a platform to return the
+- * interrupt number of its liking.
+- */
+- irq = get_c0_compare_int();
+-
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+@@ -320,7 +313,6 @@ int r4k_clockevent_init(void)
+ min_delta = calculate_min_delta();
+
+ cd->rating = 300;
+- cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = mips_next_event;
+ cd->event_handler = mips_event_handler;
+@@ -332,6 +324,13 @@ int r4k_clockevent_init(void)
+
+ cp0_timer_irq_installed = 1;
+
++ /*
++ * With vectored interrupts things are getting platform specific.
++ * get_c0_compare_int is a hook to allow a platform to return the
++ * interrupt number of its liking.
++ */
++ irq = get_c0_compare_int();
++
+ if (request_irq(irq, c0_compare_interrupt, flags, "timer",
+ c0_compare_interrupt))
+ pr_err("Failed to request irq %d (timer)\n", irq);
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index b406d8bfb15a36..c7fee72ea60679 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1725,12 +1725,16 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
++ change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
++ LOONGSON_CONF6_INTIMER);
+ break;
+ case PRID_IMP_LOONGSON_64G:
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ decode_cpucfg(c);
++ change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
++ LOONGSON_CONF6_INTIMER);
+ break;
+ default:
+ panic("Unknown Loongson Processor ID!");
+diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
+index 5582a4ca1e9e36..7aa2c2360ff602 100644
+--- a/arch/mips/kernel/elf.c
++++ b/arch/mips/kernel/elf.c
+@@ -11,6 +11,7 @@
+
+ #include <asm/cpu-features.h>
+ #include <asm/cpu-info.h>
++#include <asm/fpu.h>
+
+ #ifdef CONFIG_MIPS_FP_SUPPORT
+
+@@ -309,6 +310,11 @@ void mips_set_personality_nan(struct arch_elf_state *state)
+ struct cpuinfo_mips *c = &boot_cpu_data;
+ struct task_struct *t = current;
+
++ /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case
++ * we are preempted before the lose_fpu(0) in start_thread.
++ */
++ lose_fpu(0);
++
+ t->thread.fpu.fcr31 = c->fpu_csr31;
+ switch (state->nan_2008) {
+ case 0:
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index 5387ed0a51862b..b630604c577f9f 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -121,6 +121,19 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ /* Put the stack after the struct pt_regs. */
+ childksp = (unsigned long) childregs;
+ p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
++
++ /*
++ * New tasks lose permission to use the fpu. This accelerates context
++ * switching for most programs since they don't use the fpu.
++ */
++ clear_tsk_thread_flag(p, TIF_USEDFPU);
++ clear_tsk_thread_flag(p, TIF_USEDMSA);
++ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
++
++#ifdef CONFIG_MIPS_MT_FPAFF
++ clear_tsk_thread_flag(p, TIF_FPUBOUND);
++#endif /* CONFIG_MIPS_MT_FPAFF */
++
+ if (unlikely(args->fn)) {
+ /* kernel thread */
+ unsigned long status = p->thread.cp0_status;
+@@ -149,20 +162,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ p->thread.reg29 = (unsigned long) childregs;
+ p->thread.reg31 = (unsigned long) ret_from_fork;
+
+- /*
+- * New tasks lose permission to use the fpu. This accelerates context
+- * switching for most programs since they don't use the fpu.
+- */
+ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
+
+- clear_tsk_thread_flag(p, TIF_USEDFPU);
+- clear_tsk_thread_flag(p, TIF_USEDMSA);
+- clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
+-
+-#ifdef CONFIG_MIPS_MT_FPAFF
+- clear_tsk_thread_flag(p, TIF_FPUBOUND);
+-#endif /* CONFIG_MIPS_MT_FPAFF */
+-
+ #ifdef CONFIG_MIPS_FP_SUPPORT
+ atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+ #endif
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index d9df543f7e2c4c..61503a36067e9e 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -31,6 +31,7 @@
+ #include <linux/seccomp.h>
+ #include <linux/ftrace.h>
+
++#include <asm/branch.h>
+ #include <asm/byteorder.h>
+ #include <asm/cpu.h>
+ #include <asm/cpu-info.h>
+@@ -48,6 +49,12 @@
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/syscalls.h>
+
++unsigned long exception_ip(struct pt_regs *regs)
++{
++ return exception_epc(regs);
++}
++EXPORT_SYMBOL(exception_ip);
++
+ /*
+ * Called by kernel/ptrace.c when detaching..
+ *
+@@ -1310,16 +1317,13 @@ long arch_ptrace(struct task_struct *child, long request,
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+-asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
++asmlinkage long syscall_trace_enter(struct pt_regs *regs)
+ {
+ user_exit();
+
+- current_thread_info()->syscall = syscall;
+-
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ if (ptrace_report_syscall_entry(regs))
+ return -1;
+- syscall = current_thread_info()->syscall;
+ }
+
+ #ifdef CONFIG_SECCOMP
+@@ -1328,7 +1332,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+ struct seccomp_data sd;
+ unsigned long args[6];
+
+- sd.nr = syscall;
++ sd.nr = current_thread_info()->syscall;
+ sd.arch = syscall_get_arch(current);
+ syscall_get_arguments(current, regs, args);
+ for (i = 0; i < 6; i++)
+@@ -1338,23 +1342,23 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+ ret = __secure_computing(&sd);
+ if (ret == -1)
+ return ret;
+- syscall = current_thread_info()->syscall;
+ }
+ #endif
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
+
+- audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
++ audit_syscall_entry(current_thread_info()->syscall,
++ regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
+
+ /*
+ * Negative syscall numbers are mistaken for rejected syscalls, but
+ * won't have had the return value set appropriately, so we do so now.
+ */
+- if (syscall < 0)
++ if (current_thread_info()->syscall < 0)
+ syscall_set_return_value(current, regs, -ENOSYS, 0);
+- return syscall;
++ return current_thread_info()->syscall;
+ }
+
+ /*
+diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
+index 18dc9b34505614..2c604717e63080 100644
+--- a/arch/mips/kernel/scall32-o32.S
++++ b/arch/mips/kernel/scall32-o32.S
+@@ -77,6 +77,18 @@ loads_done:
+ PTR_WD load_a7, bad_stack_a7
+ .previous
+
++ /*
++ * syscall number is in v0 unless we called syscall(__NR_###)
++ * where the real syscall number is in a0
++ */
++ subu t2, v0, __NR_O32_Linux
++ bnez t2, 1f /* __NR_syscall at offset 0 */
++ LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number
++ b 2f
++1:
++ LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number
++2:
++
+ lw t0, TI_FLAGS($28) # syscall tracing enabled?
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ and t0, t1
+@@ -114,16 +126,7 @@ syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+
+- /*
+- * syscall number is in v0 unless we called syscall(__NR_###)
+- * where the real syscall number is in a0
+- */
+- move a1, v0
+- subu t2, v0, __NR_O32_Linux
+- bnez t2, 1f /* __NR_syscall at offset 0 */
+- lw a1, PT_R4(sp)
+-
+-1: jal syscall_trace_enter
++ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 97456b2ca7dc32..97788859238c34 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp)
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
++ LONG_S v0, TI_SYSCALL($28) # Store syscall number
++
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+@@ -72,7 +74,6 @@ syscall_common:
+ n32_syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+- move a1, v0
+ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
+index e6264aa62e457f..be11ea5cc67e04 100644
+--- a/arch/mips/kernel/scall64-n64.S
++++ b/arch/mips/kernel/scall64-n64.S
+@@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp)
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
++ LONG_S v0, TI_SYSCALL($28) # Store syscall number
++
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+@@ -82,7 +84,6 @@ n64_syscall_exit:
+ syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+- move a1, v0
+ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index d3c2616cba2269..7a5abb73e53127 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -79,6 +79,22 @@ loads_done:
+ PTR_WD load_a7, bad_stack_a7
+ .previous
+
++ /*
++ * absolute syscall number is in v0 unless we called syscall(__NR_###)
++ * where the real syscall number is in a0
++ * note: NR_syscall is the first O32 syscall but the macro is
++ * only defined when compiling with -mabi=32 (CONFIG_32BIT)
++ * therefore __NR_O32_Linux is used (4000)
++ */
++
++ subu t2, v0, __NR_O32_Linux
++ bnez t2, 1f /* __NR_syscall at offset 0 */
++ LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number
++ b 2f
++1:
++ LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number
++2:
++
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+@@ -113,22 +129,7 @@ trace_a_syscall:
+ sd a7, PT_R11(sp) # For indirect syscalls
+
+ move a0, sp
+- /*
+- * absolute syscall number is in v0 unless we called syscall(__NR_###)
+- * where the real syscall number is in a0
+- * note: NR_syscall is the first O32 syscall but the macro is
+- * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+- * therefore __NR_O32_Linux is used (4000)
+- */
+- .set push
+- .set reorder
+- subu t1, v0, __NR_O32_Linux
+- move a1, v0
+- bnez t1, 1f /* __NR_syscall at offset 0 */
+- ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+- .set pop
+-
+-1: jal syscall_trace_enter
++ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index cb871eb784a7c1..3f45b72561db9c 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -54,7 +54,7 @@ struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
+
+ EXPORT_SYMBOL(cpu_data);
+
+-#ifdef CONFIG_VT
++#ifdef CONFIG_VGA_CONSOLE
+ struct screen_info screen_info;
+ #endif
+
+@@ -326,11 +326,11 @@ static void __init bootmem_init(void)
+ panic("Incorrect memory mapping !!!");
+
+ if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
++ max_low_pfn = PFN_DOWN(HIGHMEM_START);
+ #ifdef CONFIG_HIGHMEM
+- highstart_pfn = PFN_DOWN(HIGHMEM_START);
++ highstart_pfn = max_low_pfn;
+ highend_pfn = max_pfn;
+ #else
+- max_low_pfn = PFN_DOWN(HIGHMEM_START);
+ max_pfn = max_low_pfn;
+ #endif
+ }
+diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
+index dd55d59b88db34..d445f8e849abdc 100644
+--- a/arch/mips/kernel/smp-cps.c
++++ b/arch/mips/kernel/smp-cps.c
+@@ -222,7 +222,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
+ write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
+
+ /* Ensure the core can access the GCRs */
+- set_gcr_access(1 << core);
++ if (mips_cm_revision() < CM_REV_CM3)
++ set_gcr_access(1 << core);
++ else
++ set_gcr_access_cm3(1 << core);
+
+ if (mips_cpc_present()) {
+ /* Reset the core */
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index 8fbef537fb8859..81f6c4f8fbc154 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -351,10 +351,11 @@ early_initcall(mips_smp_ipi_init);
+ */
+ asmlinkage void start_secondary(void)
+ {
+- unsigned int cpu;
++ unsigned int cpu = raw_smp_processor_id();
+
+ cpu_probe();
+ per_cpu_trap_init(false);
++ rcu_cpu_starting(cpu);
+ mips_clockevent_init();
+ mp_ops->init_secondary();
+ cpu_report();
+@@ -366,7 +367,6 @@ asmlinkage void start_secondary(void)
+ */
+
+ calibrate_delay();
+- cpu = smp_processor_id();
+ cpu_data[cpu].udelay_val = loops_per_jiffy;
+
+ set_cpu_sibling_map(cpu);
+diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
+index 152034b8e0a0f3..4a296124604a15 100644
+--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
++++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
+@@ -354,7 +354,7 @@
+ 412 n32 utimensat_time64 sys_utimensat
+ 413 n32 pselect6_time64 compat_sys_pselect6_time64
+ 414 n32 ppoll_time64 compat_sys_ppoll_time64
+-416 n32 io_pgetevents_time64 sys_io_pgetevents
++416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64
+ 417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
+ 418 n32 mq_timedsend_time64 sys_mq_timedsend
+ 419 n32 mq_timedreceive_time64 sys_mq_timedreceive
+diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
+index 1a646813afdca4..1ee62a861380a2 100644
+--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
++++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
+@@ -27,7 +27,7 @@
+ 17 o32 break sys_ni_syscall
+ # 18 was sys_stat
+ 18 o32 unused18 sys_ni_syscall
+-19 o32 lseek sys_lseek
++19 o32 lseek sys_lseek compat_sys_lseek
+ 20 o32 getpid sys_getpid
+ 21 o32 mount sys_mount
+ 22 o32 umount sys_oldumount
+@@ -403,7 +403,7 @@
+ 412 o32 utimensat_time64 sys_utimensat sys_utimensat
+ 413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+ 414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+-416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
++416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+ 417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+ 418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+ 419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 246c6a6b02614c..5b778995d44831 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -2007,7 +2007,13 @@ unsigned long vi_handlers[64];
+
+ void reserve_exception_space(phys_addr_t addr, unsigned long size)
+ {
+- memblock_reserve(addr, size);
++ /*
++ * reserve exception space on CPUs other than CPU0
++ * is too late, since memblock is unavailable when APs
++ * up
++ */
++ if (smp_processor_id() == 0)
++ memblock_reserve(addr, size);
+ }
+
+ void __init *set_except_vector(int n, void *addr)
+diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
+index a3cf293658581e..0c45767eacf674 100644
+--- a/arch/mips/lantiq/prom.c
++++ b/arch/mips/lantiq/prom.c
+@@ -108,10 +108,9 @@ void __init prom_init(void)
+ prom_init_cmdline();
+
+ #if defined(CONFIG_MIPS_MT_SMP)
+- if (cpu_has_mipsmt) {
+- lantiq_smp_ops = vsmp_smp_ops;
++ lantiq_smp_ops = vsmp_smp_ops;
++ if (cpu_has_mipsmt)
+ lantiq_smp_ops.init_secondary = lantiq_init_secondary;
+- register_smp_ops(&lantiq_smp_ops);
+- }
++ register_smp_ops(&lantiq_smp_ops);
+ #endif
+ }
+diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
+index c961e2999f15ac..09ff052698614d 100644
+--- a/arch/mips/loongson64/env.c
++++ b/arch/mips/loongson64/env.c
+@@ -13,6 +13,8 @@
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
++
++#include <linux/dma-map-ops.h>
+ #include <linux/export.h>
+ #include <linux/pci_ids.h>
+ #include <asm/bootinfo.h>
+@@ -86,6 +88,12 @@ void __init prom_lefi_init_env(void)
+ cpu_clock_freq = ecpu->cpu_clock_freq;
+ loongson_sysconf.cputype = ecpu->cputype;
+ switch (ecpu->cputype) {
++ case Legacy_2K:
++ case Loongson_2K:
++ smp_group[0] = 0x900000001fe11000;
++ loongson_sysconf.cores_per_node = 2;
++ loongson_sysconf.cores_per_package = 2;
++ break;
+ case Legacy_3A:
+ case Loongson_3A:
+ loongson_sysconf.cores_per_node = 4;
+@@ -147,8 +155,14 @@ void __init prom_lefi_init_env(void)
+
+ loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
+ if (loongson_sysconf.dma_mask_bits < 32 ||
+- loongson_sysconf.dma_mask_bits > 64)
++ loongson_sysconf.dma_mask_bits > 64) {
+ loongson_sysconf.dma_mask_bits = 32;
++ dma_default_coherent = true;
++ } else {
++ dma_default_coherent = !eirq_source->dma_noncoherent;
++ }
++
++ pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
+
+ loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
+ loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
+@@ -213,6 +227,8 @@ void __init prom_lefi_init_env(void)
+ default:
+ break;
+ }
++ } else if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) {
++ loongson_fdt_blob = __dtb_loongson64_2core_2k1000_begin;
+ } else if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G) {
+ if (loongson_sysconf.bridgetype == LS7A)
+ loongson_fdt_blob = __dtb_loongson64g_4core_ls7a_begin;
+diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
+index ee8de1735b7c04..f25caa6aa9d306 100644
+--- a/arch/mips/loongson64/init.c
++++ b/arch/mips/loongson64/init.c
+@@ -49,8 +49,7 @@ void virtual_early_config(void)
+ void __init szmem(unsigned int node)
+ {
+ u32 i, mem_type;
+- static unsigned long num_physpages;
+- u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
++ phys_addr_t node_id, mem_start, mem_size;
+
+ /* Otherwise come from DTB */
+ if (loongson_sysconf.fw_interface != LOONGSON_LEFI)
+@@ -64,30 +63,46 @@ void __init szmem(unsigned int node)
+
+ mem_type = loongson_memmap->map[i].mem_type;
+ mem_size = loongson_memmap->map[i].mem_size;
+- mem_start = loongson_memmap->map[i].mem_start;
++
++ /* Memory size comes in MB if MEM_SIZE_IS_IN_BYTES not set */
++ if (mem_size & MEM_SIZE_IS_IN_BYTES)
++ mem_size &= ~MEM_SIZE_IS_IN_BYTES;
++ else
++ mem_size = mem_size << 20;
++
++ mem_start = (node_id << 44) | loongson_memmap->map[i].mem_start;
+
+ switch (mem_type) {
+ case SYSTEM_RAM_LOW:
+ case SYSTEM_RAM_HIGH:
+- start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
+- node_psize = (mem_size << 20) >> PAGE_SHIFT;
+- end_pfn = start_pfn + node_psize;
+- num_physpages += node_psize;
+- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+- (u32)node_id, mem_type, mem_start, mem_size);
+- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+- start_pfn, end_pfn, num_physpages);
+- memblock_add_node(PFN_PHYS(start_pfn),
+- PFN_PHYS(node_psize), node,
++ case UMA_VIDEO_RAM:
++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes usable\n",
++ (u32)node_id, mem_type, &mem_start, &mem_size);
++ memblock_add_node(mem_start, mem_size, node,
+ MEMBLOCK_NONE);
+ break;
+ case SYSTEM_RAM_RESERVED:
+- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+- (u32)node_id, mem_type, mem_start, mem_size);
+- memblock_reserve(((node_id << 44) + mem_start), mem_size << 20);
++ case VIDEO_ROM:
++ case ADAPTER_ROM:
++ case ACPI_TABLE:
++ case SMBIOS_TABLE:
++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes reserved\n",
++ (u32)node_id, mem_type, &mem_start, &mem_size);
++ memblock_reserve(mem_start, mem_size);
++ break;
++ /* We should not reserve VUMA_VIDEO_RAM as it overlaps with MMIO */
++ case VUMA_VIDEO_RAM:
++ default:
++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes unhandled\n",
++ (u32)node_id, mem_type, &mem_start, &mem_size);
+ break;
+ }
+ }
++
++ /* Reserve vgabios if it comes from firmware */
++ if (loongson_sysconf.vgabios_addr)
++ memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
++ SZ_256K);
+ }
+
+ #ifndef CONFIG_NUMA
+diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c
+index e420800043b089..2a8e4cd72605d0 100644
+--- a/arch/mips/loongson64/reset.c
++++ b/arch/mips/loongson64/reset.c
+@@ -11,6 +11,7 @@
+ #include <linux/init.h>
+ #include <linux/kexec.h>
+ #include <linux/pm.h>
++#include <linux/reboot.h>
+ #include <linux/slab.h>
+
+ #include <asm/bootinfo.h>
+@@ -21,36 +22,21 @@
+ #include <loongson.h>
+ #include <boot_param.h>
+
+-static void loongson_restart(char *command)
++static int firmware_restart(struct sys_off_data *unusedd)
+ {
+
+ void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr;
+
+ fw_restart();
+- while (1) {
+- if (cpu_wait)
+- cpu_wait();
+- }
++ return NOTIFY_DONE;
+ }
+
+-static void loongson_poweroff(void)
++static int firmware_poweroff(struct sys_off_data *unused)
+ {
+ void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
+
+ fw_poweroff();
+- while (1) {
+- if (cpu_wait)
+- cpu_wait();
+- }
+-}
+-
+-static void loongson_halt(void)
+-{
+- pr_notice("\n\n** You can safely turn off the power now **\n\n");
+- while (1) {
+- if (cpu_wait)
+- cpu_wait();
+- }
++ return NOTIFY_DONE;
+ }
+
+ #ifdef CONFIG_KEXEC
+@@ -154,9 +140,17 @@ static void loongson_crash_shutdown(struct pt_regs *regs)
+
+ static int __init mips_reboot_setup(void)
+ {
+- _machine_restart = loongson_restart;
+- _machine_halt = loongson_halt;
+- pm_power_off = loongson_poweroff;
++ if (loongson_sysconf.restart_addr) {
++ register_sys_off_handler(SYS_OFF_MODE_RESTART,
++ SYS_OFF_PRIO_FIRMWARE,
++ firmware_restart, NULL);
++ }
++
++ if (loongson_sysconf.poweroff_addr) {
++ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
++ SYS_OFF_PRIO_FIRMWARE,
++ firmware_poweroff, NULL);
++ }
+
+ #ifdef CONFIG_KEXEC
+ kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
+diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
+index e015a26a40f7a5..979993679d913e 100644
+--- a/arch/mips/loongson64/smp.c
++++ b/arch/mips/loongson64/smp.c
+@@ -466,12 +466,25 @@ static void loongson3_smp_finish(void)
+ static void __init loongson3_smp_setup(void)
+ {
+ int i = 0, num = 0; /* i: physical id, num: logical id */
++ int max_cpus = 0;
+
+ init_cpu_possible(cpu_none_mask);
+
++ for (i = 0; i < ARRAY_SIZE(smp_group); i++) {
++ if (!smp_group[i])
++ break;
++ max_cpus += loongson_sysconf.cores_per_node;
++ }
++
++ if (max_cpus < loongson_sysconf.nr_cpus) {
++ pr_err("SMP Groups are less than the number of CPUs\n");
++ loongson_sysconf.nr_cpus = max_cpus ? max_cpus : 1;
++ }
++
+ /* For unified kernel, NR_CPUS is the maximum possible value,
+ * loongson_sysconf.nr_cpus is the really present value
+ */
++ i = 0;
+ while (i < loongson_sysconf.nr_cpus) {
+ if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
+ /* Reserved physical CPU cores */
+@@ -492,14 +505,14 @@ static void __init loongson3_smp_setup(void)
+ __cpu_logical_map[num] = -1;
+ num++;
+ }
+-
+ csr_ipi_probe();
+ ipi_set0_regs_init();
+ ipi_clear0_regs_init();
+ ipi_status0_regs_init();
+ ipi_en0_regs_init();
+ ipi_mailbox_buf_init();
+- ipi_write_enable(0);
++ if (smp_group[0])
++ ipi_write_enable(0);
+
+ cpu_set_core(&cpu_data[0],
+ cpu_logical_map(0) % loongson_sysconf.cores_per_package);
+@@ -818,6 +831,9 @@ static int loongson3_disable_clock(unsigned int cpu)
+ uint64_t core_id = cpu_core(&cpu_data[cpu]);
+ uint64_t package_id = cpu_data[cpu].package;
+
++ if (!loongson_chipcfg[package_id] || !loongson_freqctrl[package_id])
++ return 0;
++
+ if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
+ LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
+ } else {
+@@ -832,6 +848,9 @@ static int loongson3_enable_clock(unsigned int cpu)
+ uint64_t core_id = cpu_core(&cpu_data[cpu]);
+ uint64_t package_id = cpu_data[cpu].package;
+
++ if (!loongson_chipcfg[package_id] || !loongson_freqctrl[package_id])
++ return 0;
++
+ if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
+ LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
+ } else {
+diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
+index 02042100e26718..7f830634dbe7db 100644
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -117,7 +117,7 @@ void __flush_dcache_pages(struct page *page, unsigned int nr)
+ * get faulted into the tlb (and thus flushed) anyways.
+ */
+ for (i = 0; i < nr; i++) {
+- addr = (unsigned long)kmap_local_page(page + i);
++ addr = (unsigned long)kmap_local_page(nth_page(page, i));
+ flush_data_cache_page(addr);
+ kunmap_local((void *)addr);
+ }
+diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
+index 5dcb525a899543..6e368a4658b544 100644
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -422,7 +422,12 @@ void __init paging_init(void)
+ (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+ max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+ }
++
++ max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
++#else
++ max_mapnr = max_low_pfn;
+ #endif
++ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ free_area_init(max_zone_pfns);
+ }
+@@ -458,13 +463,6 @@ void __init mem_init(void)
+ */
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
+
+-#ifdef CONFIG_HIGHMEM
+- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
+-#else
+- max_mapnr = max_low_pfn;
+-#endif
+- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+-
+ maar_init();
+ memblock_free_all();
+ setup_zero_pages(); /* Setup zeroed pages. */
+diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
+index 874ed6df97683a..34b9323bdabb0e 100644
+--- a/arch/mips/pci/ops-rc32434.c
++++ b/arch/mips/pci/ops-rc32434.c
+@@ -112,8 +112,8 @@ static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
+ * gives them time to settle
+ */
+ if (where == PCI_VENDOR_ID) {
+- if (ret == 0xffffffff || ret == 0x00000000 ||
+- ret == 0x0000ffff || ret == 0xffff0000) {
++ if (*val == 0xffffffff || *val == 0x00000000 ||
++ *val == 0x0000ffff || *val == 0xffff0000) {
+ if (delay > 4)
+ return 0;
+ delay *= 2;
+diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
+index c9edd3fb380df5..9eaacd3d338805 100644
+--- a/arch/mips/pci/pcie-octeon.c
++++ b/arch/mips/pci/pcie-octeon.c
+@@ -230,12 +230,18 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
+ {
+ union cvmx_pcie_address pcie_addr;
+ union cvmx_pciercx_cfg006 pciercx_cfg006;
++ union cvmx_pciercx_cfg032 pciercx_cfg032;
+
+ pciercx_cfg006.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
+ if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
+ return 0;
+
++ pciercx_cfg032.u32 =
++ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
++ if ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1))
++ return 0;
++
+ pcie_addr.u64 = 0;
+ pcie_addr.config.upper = 2;
+ pcie_addr.config.io = 1;
+diff --git a/arch/mips/sgi-ip30/ip30-console.c b/arch/mips/sgi-ip30/ip30-console.c
+index b91f8c4fdc7860..a087b7ebe12936 100644
+--- a/arch/mips/sgi-ip30/ip30-console.c
++++ b/arch/mips/sgi-ip30/ip30-console.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+
+ #include <linux/io.h>
++#include <linux/processor.h>
+
+ #include <asm/sn/ioc3.h>
+
+diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
+index 76683993cdd3ad..37df504d3ecbb0 100644
+--- a/arch/mips/sibyte/swarm/setup.c
++++ b/arch/mips/sibyte/swarm/setup.c
+@@ -129,7 +129,7 @@ void __init plat_mem_setup(void)
+ if (m41t81_probe())
+ swarm_rtc_type = RTC_M41T81;
+
+-#ifdef CONFIG_VT
++#ifdef CONFIG_VGA_CONSOLE
+ screen_info = (struct screen_info) {
+ .orig_video_page = 52,
+ .orig_video_mode = 3,
+diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
+index efad85c8c823b9..9984cf91be7d04 100644
+--- a/arch/mips/sni/setup.c
++++ b/arch/mips/sni/setup.c
+@@ -38,7 +38,7 @@ extern void sni_machine_power_off(void);
+
+ static void __init sni_display_setup(void)
+ {
+-#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_FW_ARC)
++#if defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_FW_ARC)
+ struct screen_info *si = &screen_info;
+ DISPLAY_STATUS *di;
+
+diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h
+index 348cea0977927a..81484a776b333a 100644
+--- a/arch/nios2/include/asm/cacheflush.h
++++ b/arch/nios2/include/asm/cacheflush.h
+@@ -38,6 +38,7 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ #define flush_icache_pages flush_icache_pages
+
+ #define flush_cache_vmap(start, end) flush_dcache_range(start, end)
++#define flush_cache_vmap_early(start, end) do { } while (0)
+ #define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
+
+ extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
+index 9cf7fb60441f89..be56eaafc8b957 100644
+--- a/arch/openrisc/kernel/setup.c
++++ b/arch/openrisc/kernel/setup.c
+@@ -255,6 +255,9 @@ void calibrate_delay(void)
+
+ void __init setup_arch(char **cmdline_p)
+ {
++ /* setup memblock allocator */
++ setup_memory();
++
+ unflatten_and_copy_device_tree();
+
+ setup_cpuinfo();
+@@ -278,9 +281,6 @@ void __init setup_arch(char **cmdline_p)
+ }
+ #endif
+
+- /* setup memblock allocator */
+- setup_memory();
+-
+ /* paging_init() sets up the MMU and marks all pages as reserved */
+ paging_init();
+
+diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
+index 9370888c9a7e31..90554a5558fbca 100644
+--- a/arch/openrisc/kernel/traps.c
++++ b/arch/openrisc/kernel/traps.c
+@@ -180,29 +180,39 @@ asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector)
+
+ asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address)
+ {
+- int code = FPE_FLTUNK;
+- unsigned long fpcsr = regs->fpcsr;
+-
+- if (fpcsr & SPR_FPCSR_IVF)
+- code = FPE_FLTINV;
+- else if (fpcsr & SPR_FPCSR_OVF)
+- code = FPE_FLTOVF;
+- else if (fpcsr & SPR_FPCSR_UNF)
+- code = FPE_FLTUND;
+- else if (fpcsr & SPR_FPCSR_DZF)
+- code = FPE_FLTDIV;
+- else if (fpcsr & SPR_FPCSR_IXF)
+- code = FPE_FLTRES;
+-
+- /* Clear all flags */
+- regs->fpcsr &= ~SPR_FPCSR_ALLF;
+-
+- force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
++ if (user_mode(regs)) {
++ int code = FPE_FLTUNK;
++ unsigned long fpcsr = regs->fpcsr;
++
++ if (fpcsr & SPR_FPCSR_IVF)
++ code = FPE_FLTINV;
++ else if (fpcsr & SPR_FPCSR_OVF)
++ code = FPE_FLTOVF;
++ else if (fpcsr & SPR_FPCSR_UNF)
++ code = FPE_FLTUND;
++ else if (fpcsr & SPR_FPCSR_DZF)
++ code = FPE_FLTDIV;
++ else if (fpcsr & SPR_FPCSR_IXF)
++ code = FPE_FLTRES;
++
++ /* Clear all flags */
++ regs->fpcsr &= ~SPR_FPCSR_ALLF;
++
++ force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
++ } else {
++ pr_emerg("KERNEL: Illegal fpe exception 0x%.8lx\n", regs->pc);
++ die("Die:", regs, SIGFPE);
++ }
+ }
+
+ asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
+ {
+- force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
++ if (user_mode(regs)) {
++ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
++ } else {
++ pr_emerg("KERNEL: Illegal trap exception 0x%.8lx\n", regs->pc);
++ die("Die:", regs, SIGILL);
++ }
+ }
+
+ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index a15ab147af2e07..a077e6bf9475f5 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -14,9 +14,11 @@ config PARISC
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_NO_SG_CHAIN
++ select ARCH_SPLIT_ARG64 if !64BIT
+ select ARCH_SUPPORTS_HUGETLBFS if PA20
+ select ARCH_SUPPORTS_MEMORY_FAILURE
+ select ARCH_STACKWALK
++ select ARCH_HAS_CACHE_LINE_SIZE
+ select ARCH_HAS_DEBUG_VM_PGTABLE
+ select HAVE_RELIABLE_STACKTRACE
+ select DMA_OPS
+@@ -24,7 +26,6 @@ config PARISC
+ select RTC_DRV_GENERIC
+ select INIT_ALL_POSSIBLE
+ select BUG
+- select BUILDTIME_TABLE_SORT
+ select HAVE_PCI
+ select HAVE_PERF_EVENTS
+ select HAVE_KERNEL_BZIP2
+@@ -83,6 +84,7 @@ config PARISC
+ select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
+ select TRACE_IRQFLAGS_SUPPORT
+ select HAVE_FUNCTION_DESCRIPTORS if 64BIT
++ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
+
+ help
+ The PA-RISC microprocessor is designed by Hewlett-Packard and used
+@@ -113,9 +115,12 @@ config ARCH_HAS_ILOG2_U64
+ default n
+
+ config GENERIC_BUG
+- bool
+- default y
++ def_bool y
+ depends on BUG
++ select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
++
++config GENERIC_BUG_RELATIVE_POINTERS
++ bool
+
+ config GENERIC_HWEIGHT
+ bool
+@@ -138,11 +143,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
+ default 8
+
+ config ARCH_MMAP_RND_BITS_MAX
+- default 24 if 64BIT
+- default 17
++ default 18 if 64BIT
++ default 13
+
+ config ARCH_MMAP_RND_COMPAT_BITS_MAX
+- default 17
++ default 13
+
+ # unless you want to implement ACPI on PA-RISC ... ;-)
+ config PM
+diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
+index 968ebe17494c5f..920db57b6b4cc8 100644
+--- a/arch/parisc/Makefile
++++ b/arch/parisc/Makefile
+@@ -177,12 +177,8 @@ vdso_prepare: prepare0
+ $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 include/generated/vdso32-offsets.h
+ endif
+
+-PHONY += vdso_install
+-
+-vdso_install:
+- $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso $@
+- $(if $(CONFIG_COMPAT_VDSO), \
+- $(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 $@)
++vdso-install-y += arch/parisc/kernel/vdso32/vdso32.so
++vdso-install-$(CONFIG_64BIT) += arch/parisc/kernel/vdso64/vdso64.so
+
+ install: KBUILD_IMAGE := vmlinux
+ zinstall: KBUILD_IMAGE := vmlinuz
+diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
+index 1ed45fd085d3b8..1eb488f25b8380 100644
+--- a/arch/parisc/include/asm/alternative.h
++++ b/arch/parisc/include/asm/alternative.h
+@@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* Alternative SMP implementation. */
+ #define ALTERNATIVE(cond, replacement) "!0:" \
+- ".section .altinstructions, \"aw\" !" \
++ ".section .altinstructions, \"a\" !" \
++ ".align 4 !" \
+ ".word (0b-4-.) !" \
+ ".hword 1, " __stringify(cond) " !" \
+ ".word " __stringify(replacement) " !" \
+@@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* to replace one single instructions by a new instruction */
+ #define ALTERNATIVE(from, to, cond, replacement)\
+- .section .altinstructions, "aw" ! \
++ .section .altinstructions, "a" ! \
++ .align 4 ! \
+ .word (from - .) ! \
+ .hword (to - from)/4, cond ! \
+ .word replacement ! \
+@@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* to replace multiple instructions by new code */
+ #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
+- .section .altinstructions, "aw" ! \
++ .section .altinstructions, "a" ! \
++ .align 4 ! \
+ .word (from - .) ! \
+ .hword -num_instructions, cond ! \
+ .word (new_instr_ptr - .) ! \
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index 75677b526b2bb7..000a28e1c5e8d4 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -97,26 +97,28 @@
+ * version takes two arguments: a src and destination register.
+ * However, the source and destination registers can not be
+ * the same register.
++ *
++ * We use add,l to avoid clobbering the C/B bits in the PSW.
+ */
+
+ .macro tophys grvirt, grphys
+- ldil L%(__PAGE_OFFSET), \grphys
+- sub \grvirt, \grphys, \grphys
++ ldil L%(-__PAGE_OFFSET), \grphys
++ addl \grvirt, \grphys, \grphys
+ .endm
+-
++
+ .macro tovirt grphys, grvirt
+ ldil L%(__PAGE_OFFSET), \grvirt
+- add \grphys, \grvirt, \grvirt
++ addl \grphys, \grvirt, \grvirt
+ .endm
+
+ .macro tophys_r1 gr
+- ldil L%(__PAGE_OFFSET), %r1
+- sub \gr, %r1, \gr
++ ldil L%(-__PAGE_OFFSET), %r1
++ addl \gr, %r1, \gr
+ .endm
+-
++
+ .macro tovirt_r1 gr
+ ldil L%(__PAGE_OFFSET), %r1
+- add \gr, %r1, \gr
++ addl \gr, %r1, \gr
+ .endm
+
+ .macro delay value
+@@ -574,7 +576,9 @@
+ */
+ #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
+ .section __ex_table,"aw" ! \
++ .align 4 ! \
+ .word (fault_addr - .), (except_addr - .) ! \
++ or %r0,%r0,%r0 ! \
+ .previous
+
+
+diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
+index 4b6d60b941247e..833555f74ffa72 100644
+--- a/arch/parisc/include/asm/bug.h
++++ b/arch/parisc/include/asm/bug.h
+@@ -17,24 +17,27 @@
+ #define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
+ #define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
+
+-#if defined(CONFIG_64BIT)
+-#define ASM_WORD_INSN ".dword\t"
++#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
++# define __BUG_REL(val) ".word " __stringify(val) " - ."
+ #else
+-#define ASM_WORD_INSN ".word\t"
++# define __BUG_REL(val) ".word " __stringify(val)
+ #endif
+
++
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #define BUG() \
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
+- "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+- "\t.short %c1, %c2\n" \
+- "\t.org 2b+%c3\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align 4\n" \
++ "2:\t" __BUG_REL(1b) "\n" \
++ "\t" __BUG_REL(%c0) "\n" \
++ "\t.short %1, %2\n" \
++ "\t.blockz %3-2*4-2*2\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+- "i" (0), "i" (sizeof(struct bug_entry)) ); \
++ "i" (0), "i" (sizeof(struct bug_entry)) ); \
+ unreachable(); \
+ } while(0)
+
+@@ -51,10 +54,12 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
+- "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+- "\t.short %c1, %c2\n" \
+- "\t.org 2b+%c3\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align 4\n" \
++ "2:\t" __BUG_REL(1b) "\n" \
++ "\t" __BUG_REL(%c0) "\n" \
++ "\t.short %1, %2\n" \
++ "\t.blockz %3-2*4-2*2\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (BUGFLAG_WARNING|(flags)), \
+@@ -65,10 +70,11 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
+- "2:\t" ASM_WORD_INSN "1b\n" \
+- "\t.short %c0\n" \
+- "\t.org 2b+%c1\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align 4\n" \
++ "2:\t" __BUG_REL(1b) "\n" \
++ "\t.short %0\n" \
++ "\t.blockz %1-4-2\n" \
+ "\t.popsection" \
+ : : "i" (BUGFLAG_WARNING|(flags)), \
+ "i" (sizeof(struct bug_entry)) ); \
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index 2a60d7a72f1fa8..a3f0f100f21949 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -20,7 +20,16 @@
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
++#ifdef CONFIG_PA20
++#define ARCH_DMA_MINALIGN 128
++#else
++#define ARCH_DMA_MINALIGN 32
++#endif
++#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
++
++#define arch_slab_minalign() ((unsigned)dcache_stride)
++#define cache_line_size() dcache_stride
++#define dma_get_cache_alignment cache_line_size
+
+ #define __read_mostly __section(".data..read_mostly")
+
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index b4006f2a97052d..8394718870e1a2 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -31,17 +31,17 @@ void flush_cache_all_local(void);
+ void flush_cache_all(void);
+ void flush_cache_mm(struct mm_struct *mm);
+
+-void flush_kernel_dcache_page_addr(const void *addr);
+-
+ #define flush_kernel_dcache_range(start,size) \
+ flush_kernel_dcache_range_asm((start), (start)+(size));
+
++/* The only way to flush a vmap range is to flush whole cache */
+ #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
+ void flush_kernel_vmap_range(void *vaddr, int size);
+ void invalidate_kernel_vmap_range(void *vaddr, int size);
+
+-#define flush_cache_vmap(start, end) flush_cache_all()
+-#define flush_cache_vunmap(start, end) flush_cache_all()
++void flush_cache_vmap(unsigned long start, unsigned long end);
++#define flush_cache_vmap_early(start, end) do { } while (0)
++void flush_cache_vunmap(unsigned long start, unsigned long end);
+
+ void flush_dcache_folio(struct folio *folio);
+ #define flush_dcache_folio flush_dcache_folio
+@@ -76,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+
+-/* defined in pacache.S exported in cache.c used by flush_anon_page */
+-void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+-
+ #define ARCH_HAS_FLUSH_ANON_PAGE
+ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
+
+ #define ARCH_HAS_FLUSH_ON_KUNMAP
+-static inline void kunmap_flush_on_unmap(const void *addr)
+-{
+- flush_kernel_dcache_page_addr(addr);
+-}
++void kunmap_flush_on_unmap(const void *addr);
+
+ #endif /* _PARISC_CACHEFLUSH_H */
+
+diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
+index 3c43baca7b397d..2aceebcd695c80 100644
+--- a/arch/parisc/include/asm/checksum.h
++++ b/arch/parisc/include/asm/checksum.h
+@@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+ " addc %0, %5, %0\n"
+ " addc %0, %3, %0\n"
+ "1: ldws,ma 4(%1), %3\n"
+-" addib,< 0, %2, 1b\n"
++" addib,> -1, %2, 1b\n"
+ " addc %0, %3, %0\n"
+ "\n"
+ " extru %0, 31, 16, %4\n"
+@@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ ** Try to keep 4 registers with "live" values ahead of the ALU.
+ */
+
++" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */
+ " ldd,ma 8(%1), %4\n" /* get 1st saddr word */
+ " ldd,ma 8(%2), %5\n" /* get 1st daddr word */
+ " add %4, %0, %0\n"
+@@ -137,8 +138,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
+ " extrd,u %0, 31, 32, %4\n"/* copy upper half down */
+ " depdi 0, 31, 32, %0\n"/* clear upper half */
+-" add %4, %0, %0\n" /* fold into 32-bits */
+-" addc 0, %0, %0\n" /* add carry */
++" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */
++" addc 0, %0, %0\n" /* add final carry */
+
+ #else
+
+@@ -163,7 +164,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ " ldw,ma 4(%2), %7\n" /* 4th daddr */
+ " addc %6, %0, %0\n"
+ " addc %7, %0, %0\n"
+-" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
++" addc %3, %0, %0\n" /* fold in proto+len */
++" addc 0, %0, %0\n" /* add carry */
+
+ #endif
+ : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 140eaa97bf215d..2d73d3c3cd37f8 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_HWCAP 0
+
+-/* Masks for stack and mmap randomization */
+-#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
+-#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
+-#define STACK_RND_MASK MMAP_RND_MASK
+-
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *);
+-#define arch_randomize_brk arch_randomize_brk
+-
++#define STACK_RND_MASK 0x7ff /* 8MB of VA */
+
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+diff --git a/arch/parisc/include/asm/extable.h b/arch/parisc/include/asm/extable.h
+new file mode 100644
+index 00000000000000..4ea23e3d79dc90
+--- /dev/null
++++ b/arch/parisc/include/asm/extable.h
+@@ -0,0 +1,64 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __PARISC_EXTABLE_H
++#define __PARISC_EXTABLE_H
++
++#include <asm/ptrace.h>
++#include <linux/compiler.h>
++
++/*
++ * The exception table consists of three addresses:
++ *
++ * - A relative address to the instruction that is allowed to fault.
++ * - A relative address at which the program should continue (fixup routine)
++ * - An asm statement which specifies which CPU register will
++ * receive -EFAULT when an exception happens if the lowest bit in
++ * the fixup address is set.
++ *
++ * Note: The register specified in the err_opcode instruction will be
++ * modified at runtime if a fault happens. Register %r0 will be ignored.
++ *
++ * Since relative addresses are used, 32bit values are sufficient even on
++ * 64bit kernel.
++ */
++
++struct pt_regs;
++int fixup_exception(struct pt_regs *regs);
++
++#define ARCH_HAS_RELATIVE_EXTABLE
++struct exception_table_entry {
++ int insn; /* relative address of insn that is allowed to fault. */
++ int fixup; /* relative address of fixup routine */
++ int err_opcode; /* sample opcode with register which holds error code */
++};
++
++#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr, opcode )\
++ ".section __ex_table,\"aw\"\n" \
++ ".align 4\n" \
++ ".word (" #fault_addr " - .), (" #except_addr " - .)\n" \
++ opcode "\n" \
++ ".previous\n"
++
++/*
++ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
++ * (with lowest bit set) for which the fault handler in fixup_exception() will
++ * load -EFAULT on fault into the register specified by the err_opcode instruction,
++ * and zeroes the target register in case of a read fault in get_user().
++ */
++#define ASM_EXCEPTIONTABLE_VAR(__err_var) \
++ int __err_var = 0
++#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr, register )\
++ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1, "or %%r0,%%r0," register)
++
++static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
++ struct exception_table_entry *b,
++ struct exception_table_entry tmp,
++ int delta)
++{
++ a->fixup = b->fixup + delta;
++ b->fixup = tmp.fixup - delta;
++ a->err_opcode = b->err_opcode;
++ b->err_opcode = tmp.err_opcode;
++}
++#define swap_ex_entry_fixup swap_ex_entry_fixup
++
++#endif
+diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
+index af2a598bc0f819..317ebc5edc9fe9 100644
+--- a/arch/parisc/include/asm/jump_label.h
++++ b/arch/parisc/include/asm/jump_label.h
+@@ -12,13 +12,15 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
++ ".align %1\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+- : : "i" (&((char *)key)[branch]) : : l_yes);
++ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
++ : : l_yes);
+
+ return false;
+ l_yes:
+@@ -27,13 +29,15 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ "b,n %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
++ ".align %1\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+- : : "i" (&((char *)key)[branch]) : : l_yes);
++ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
++ : : l_yes);
+
+ return false;
+ l_yes:
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index ee9e071859b2f4..47ebc4c91eaff3 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -55,7 +55,7 @@
+ })
+
+ #ifdef CONFIG_SMP
+-# define __lock_aligned __section(".data..lock_aligned")
++# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
+ #endif
+
+ #endif /* __PARISC_LDCW_H */
+diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
+new file mode 100644
+index 00000000000000..89b6beeda0b869
+--- /dev/null
++++ b/arch/parisc/include/asm/mman.h
+@@ -0,0 +1,28 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __ASM_MMAN_H__
++#define __ASM_MMAN_H__
++
++#include <uapi/asm/mman.h>
++
++/* PARISC cannot allow mdwe as it needs writable stacks */
++static inline bool arch_memory_deny_write_exec_supported(void)
++{
++ return false;
++}
++#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
++
++static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
++{
++ /*
++ * The stack on parisc grows upwards, so if userspace requests memory
++ * for a stack, mark it with VM_GROWSUP so that the stack expansion in
++ * the fault handler will work.
++ */
++ if (flags & MAP_STACK)
++ return VM_GROWSUP;
++
++ return 0;
++}
++#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
++
++#endif /* __ASM_MMAN_H__ */
+diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
+index 667e703c0e8f69..d6ad1812866a08 100644
+--- a/arch/parisc/include/asm/page.h
++++ b/arch/parisc/include/asm/page.h
+@@ -16,6 +16,7 @@
+ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 974accac05cd34..babf65751e8180 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+ return pte;
+ }
+
++static inline pte_t ptep_get(pte_t *ptep)
++{
++ return READ_ONCE(*ptep);
++}
++#define ptep_get ptep_get
++
+ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+ {
+ pte_t pte;
+
+- if (!pte_young(*ptep))
+- return 0;
+-
+- pte = *ptep;
++ pte = ptep_get(ptep);
+ if (!pte_young(pte)) {
+ return 0;
+ }
+@@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
+ return 1;
+ }
+
+-struct mm_struct;
+-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+-{
+- pte_t old_pte;
+-
+- old_pte = *ptep;
+- set_pte(ptep, __pte(0));
+-
+- return old_pte;
+-}
++int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
++pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+
++struct mm_struct;
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ set_pte(ptep, pte_wrprotect(*ptep));
+@@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+ #define __HAVE_ARCH_PTEP_SET_WRPROTECT
+ #define __HAVE_ARCH_PTE_SAME
+
+diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
+index ff6cbdb6903bca..ece4b3046515c6 100644
+--- a/arch/parisc/include/asm/processor.h
++++ b/arch/parisc/include/asm/processor.h
+@@ -47,6 +47,8 @@
+
+ #ifndef __ASSEMBLY__
+
++struct rlimit;
++unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
+ unsigned long calc_max_stack_size(unsigned long stack_max);
+
+ /*
+diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
+index 715c96ba2ec81c..e84883c6b4c7a0 100644
+--- a/arch/parisc/include/asm/signal.h
++++ b/arch/parisc/include/asm/signal.h
+@@ -4,23 +4,11 @@
+
+ #include <uapi/asm/signal.h>
+
+-#define _NSIG 64
+-/* bits-per-word, where word apparently means 'long' not 'int' */
+-#define _NSIG_BPW BITS_PER_LONG
+-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+-
+ # ifndef __ASSEMBLY__
+
+ /* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+-typedef unsigned long old_sigset_t; /* at least 32 bits */
+-
+-typedef struct {
+- /* next_signal() assumes this is a long - no choice */
+- unsigned long sig[_NSIG_WORDS];
+-} sigset_t;
+-
+ #include <asm/sigcontext.h>
+
+ #endif /* !__ASSEMBLY */
+diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
+index c822bd0c0e3c6c..51f40eaf778065 100644
+--- a/arch/parisc/include/asm/special_insns.h
++++ b/arch/parisc/include/asm/special_insns.h
+@@ -8,7 +8,8 @@
+ "copy %%r0,%0\n" \
+ "8:\tlpa %%r0(%1),%0\n" \
+ "9:\n" \
+- ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
++ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
++ "or %%r0,%%r0,%%r0") \
+ : "=&r" (pa) \
+ : "r" (va) \
+ : "memory" \
+@@ -22,7 +23,8 @@
+ "copy %%r0,%0\n" \
+ "8:\tlpa %%r0(%%sr3,%1),%0\n" \
+ "9:\n" \
+- ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
++ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
++ "or %%r0,%%r0,%%r0") \
+ : "=&r" (pa) \
+ : "r" (va) \
+ : "memory" \
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 2bf660eabe421e..88d0ae5769dde5 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -7,6 +7,7 @@
+ */
+ #include <asm/page.h>
+ #include <asm/cache.h>
++#include <asm/extable.h>
+
+ #include <linux/bug.h>
+ #include <linux/string.h>
+@@ -26,36 +27,6 @@
+ #define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr)
+ #endif
+
+-/*
+- * The exception table contains two values: the first is the relative offset to
+- * the address of the instruction that is allowed to fault, and the second is
+- * the relative offset to the address of the fixup routine. Since relative
+- * addresses are used, 32bit values are sufficient even on 64bit kernel.
+- */
+-
+-#define ARCH_HAS_RELATIVE_EXTABLE
+-struct exception_table_entry {
+- int insn; /* relative address of insn that is allowed to fault. */
+- int fixup; /* relative address of fixup routine */
+-};
+-
+-#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
+- ".section __ex_table,\"aw\"\n" \
+- ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
+- ".previous\n"
+-
+-/*
+- * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+- * (with lowest bit set) for which the fault handler in fixup_exception() will
+- * load -EFAULT into %r29 for a read or write fault, and zeroes the target
+- * register in case of a read fault in get_user().
+- */
+-#define ASM_EXCEPTIONTABLE_REG 29
+-#define ASM_EXCEPTIONTABLE_VAR(__variable) \
+- register long __variable __asm__ ("r29") = 0
+-#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+- ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+-
+ #define __get_user_internal(sr, val, ptr) \
+ ({ \
+ ASM_EXCEPTIONTABLE_VAR(__gu_err); \
+@@ -82,7 +53,7 @@ struct exception_table_entry {
+ \
+ __asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \
+ "9:\n" \
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \
+ : "=r"(__gu_val), "+r"(__gu_err) \
+ : "i"(sr), "r"(ptr)); \
+ \
+@@ -114,8 +85,8 @@ struct exception_table_entry {
+ "1: ldw 0(%%sr%2,%3),%0\n" \
+ "2: ldw 4(%%sr%2,%3),%R0\n" \
+ "9:\n" \
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1") \
+ : "=&r"(__gu_tmp.l), "+r"(__gu_err) \
+ : "i"(sr), "r"(ptr)); \
+ \
+@@ -173,7 +144,7 @@ struct exception_table_entry {
+ __asm__ __volatile__ ( \
+ "1: " stx " %1,0(%%sr%2,%3)\n" \
+ "9:\n" \
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \
+ : "+r"(__pu_err) \
+ : "r"(x), "i"(sr), "r"(ptr))
+
+@@ -185,15 +156,14 @@ struct exception_table_entry {
+ "1: stw %1,0(%%sr%2,%3)\n" \
+ "2: stw %R1,4(%%sr%2,%3)\n" \
+ "9:\n" \
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0") \
+ : "+r"(__pu_err) \
+ : "r"(__val), "i"(sr), "r"(ptr)); \
+ } while (0)
+
+ #endif /* !defined(CONFIG_64BIT) */
+
+-
+ /*
+ * Complex access routines -- external declarations
+ */
+@@ -215,7 +185,4 @@ unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
+ #define INLINE_COPY_TO_USER
+ #define INLINE_COPY_FROM_USER
+
+-struct pt_regs;
+-int fixup_exception(struct pt_regs *regs);
+-
+ #endif /* __PARISC_UACCESS_H */
+diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
+index 87245c584784ec..8d94739d75c67c 100644
+--- a/arch/parisc/include/uapi/asm/errno.h
++++ b/arch/parisc/include/uapi/asm/errno.h
+@@ -75,7 +75,6 @@
+
+ /* We now return you to your regularly scheduled HPUX. */
+
+-#define ENOSYM 215 /* symbol does not exist in executable */
+ #define ENOTSOCK 216 /* Socket operation on non-socket */
+ #define EDESTADDRREQ 217 /* Destination address required */
+ #define EMSGSIZE 218 /* Message too long */
+@@ -101,7 +100,6 @@
+ #define ETIMEDOUT 238 /* Connection timed out */
+ #define ECONNREFUSED 239 /* Connection refused */
+ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+-#define EREMOTERELEASE 240 /* Remote peer released connection */
+ #define EHOSTDOWN 241 /* Host is down */
+ #define EHOSTUNREACH 242 /* No route to host */
+
+diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
+index 7a90070136e823..8e38a86996fc60 100644
+--- a/arch/parisc/include/uapi/asm/pdc.h
++++ b/arch/parisc/include/uapi/asm/pdc.h
+@@ -472,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */
+ unsigned long arch_rev;
+ unsigned long pot_key;
+ unsigned long curr_key;
++ unsigned long width; /* default of PSW_W bit (1=enabled) */
+ };
+
+ struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
+diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
+index 8e4895c5ea5d37..40d7a574c5dd19 100644
+--- a/arch/parisc/include/uapi/asm/signal.h
++++ b/arch/parisc/include/uapi/asm/signal.h
+@@ -57,10 +57,20 @@
+
+ #include <asm-generic/signal-defs.h>
+
++#define _NSIG 64
++#define _NSIG_BPW (sizeof(unsigned long) * 8)
++#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
++
+ # ifndef __ASSEMBLY__
+
+ # include <linux/types.h>
+
++typedef unsigned long old_sigset_t; /* at least 32 bits */
++
++typedef struct {
++ unsigned long sig[_NSIG_WORDS];
++} sigset_t;
++
+ /* Avoid too many header ordering problems. */
+ struct siginfo;
+
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 268d90a9325b46..f7953b0391cf60 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -20,6 +20,7 @@
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+ #include <linux/syscalls.h>
++#include <linux/vmalloc.h>
+ #include <asm/pdc.h>
+ #include <asm/cache.h>
+ #include <asm/cacheflush.h>
+@@ -31,20 +32,31 @@
+ #include <asm/mmu_context.h>
+ #include <asm/cachectl.h>
+
++#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
++
++/*
++ * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
++ * of page flushes done flush_cache_page_if_present. There are some
++ * pros and cons in using this option. It may increase the risk of
++ * random segmentation faults.
++ */
++#define CONFIG_FLUSH_PAGE_ACCESSED 0
++
+ int split_tlb __ro_after_init;
+ int dcache_stride __ro_after_init;
+ int icache_stride __ro_after_init;
+ EXPORT_SYMBOL(dcache_stride);
+
++/* Internal implementation in arch/parisc/kernel/pacache.S */
+ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+ EXPORT_SYMBOL(flush_dcache_page_asm);
+ void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+ void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+-
+-/* Internal implementation in arch/parisc/kernel/pacache.S */
+ void flush_data_cache_local(void *); /* flushes local data-cache only */
+ void flush_instruction_cache_local(void); /* flushes local code-cache only */
+
++static void flush_kernel_dcache_page_addr(const void *addr);
++
+ /* On some machines (i.e., ones with the Merced bus), there can be
+ * only a single PxTLB broadcast at a time; this must be guaranteed
+ * by software. We need a spinlock around all TLB flushes to ensure
+@@ -58,7 +70,7 @@ int pa_serialize_tlb_flushes __ro_after_init;
+
+ struct pdc_cache_info cache_info __ro_after_init;
+ #ifndef CONFIG_PA20
+-struct pdc_btlb_info btlb_info __ro_after_init;
++struct pdc_btlb_info btlb_info;
+ #endif
+
+ DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
+@@ -317,6 +329,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ {
+ if (!static_branch_likely(&parisc_has_cache))
+ return;
++
++ /*
++ * The TLB is the engine of coherence on parisc. The CPU is
++ * entitled to speculate any page with a TLB mapping, so here
++ * we kill the mapping then flush the page along a special flush
++ * only alias mapping. This guarantees that the page is no-longer
++ * in the cache for any process and nor may it be speculatively
++ * read in (until the user or kernel specifically accesses it,
++ * of course).
++ */
++ flush_tlb_page(vma, vmaddr);
++
+ preempt_disable();
+ flush_dcache_page_asm(physaddr, vmaddr);
+ if (vma->vm_flags & VM_EXEC)
+@@ -324,46 +348,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ preempt_enable();
+ }
+
+-static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
++static void flush_kernel_dcache_page_addr(const void *addr)
+ {
+- unsigned long flags, space, pgd, prot;
+-#ifdef CONFIG_TLB_PTLOCK
+- unsigned long pgd_lock;
+-#endif
++ unsigned long vaddr = (unsigned long)addr;
++ unsigned long flags;
+
+- vmaddr &= PAGE_MASK;
++ /* Purge TLB entry to remove translation on all CPUs */
++ purge_tlb_start(flags);
++ pdtlb(SR_KERNEL, addr);
++ purge_tlb_end(flags);
+
++ /* Use tmpalias flush to prevent data cache move-in */
+ preempt_disable();
++ flush_dcache_page_asm(__pa(vaddr), vaddr);
++ preempt_enable();
++}
+
+- /* Set context for flush */
+- local_irq_save(flags);
+- prot = mfctl(8);
+- space = mfsp(SR_USER);
+- pgd = mfctl(25);
+-#ifdef CONFIG_TLB_PTLOCK
+- pgd_lock = mfctl(28);
+-#endif
+- switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
+- local_irq_restore(flags);
+-
+- flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
+- if (vma->vm_flags & VM_EXEC)
+- flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
+- flush_tlb_page(vma, vmaddr);
++static void flush_kernel_icache_page_addr(const void *addr)
++{
++ unsigned long vaddr = (unsigned long)addr;
++ unsigned long flags;
+
+- /* Restore previous context */
+- local_irq_save(flags);
+-#ifdef CONFIG_TLB_PTLOCK
+- mtctl(pgd_lock, 28);
+-#endif
+- mtctl(pgd, 25);
+- mtsp(space, SR_USER);
+- mtctl(prot, 8);
+- local_irq_restore(flags);
++ /* Purge TLB entry to remove translation on all CPUs */
++ purge_tlb_start(flags);
++ pdtlb(SR_KERNEL, addr);
++ purge_tlb_end(flags);
+
++ /* Use tmpalias flush to prevent instruction cache move-in */
++ preempt_disable();
++ flush_icache_page_asm(__pa(vaddr), vaddr);
+ preempt_enable();
+ }
+
++void kunmap_flush_on_unmap(const void *addr)
++{
++ flush_kernel_dcache_page_addr(addr);
++}
++EXPORT_SYMBOL(kunmap_flush_on_unmap);
++
+ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ unsigned int nr)
+ {
+@@ -371,13 +393,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+
+ for (;;) {
+ flush_kernel_dcache_page_addr(kaddr);
+- flush_kernel_icache_page(kaddr);
++ flush_kernel_icache_page_addr(kaddr);
+ if (--nr == 0)
+ break;
+ kaddr += PAGE_SIZE;
+ }
+ }
+
++/*
++ * Walk page directory for MM to find PTEP pointer for address ADDR.
++ */
+ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
+ {
+ pte_t *ptep = NULL;
+@@ -406,6 +431,41 @@ static inline bool pte_needs_flush(pte_t pte)
+ == (_PAGE_PRESENT | _PAGE_ACCESSED);
+ }
+
++/*
++ * Return user physical address. Returns 0 if page is not present.
++ */
++static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
++{
++ unsigned long flags, space, pgd, prot, pa;
++#ifdef CONFIG_TLB_PTLOCK
++ unsigned long pgd_lock;
++#endif
++
++ /* Save context */
++ local_irq_save(flags);
++ prot = mfctl(8);
++ space = mfsp(SR_USER);
++ pgd = mfctl(25);
++#ifdef CONFIG_TLB_PTLOCK
++ pgd_lock = mfctl(28);
++#endif
++
++ /* Set context for lpa_user */
++ switch_mm_irqs_off(NULL, mm, NULL);
++ pa = lpa_user(addr);
++
++ /* Restore previous context */
++#ifdef CONFIG_TLB_PTLOCK
++ mtctl(pgd_lock, 28);
++#endif
++ mtctl(pgd, 25);
++ mtsp(space, SR_USER);
++ mtctl(prot, 8);
++ local_irq_restore(flags);
++
++ return pa;
++}
++
+ void flush_dcache_folio(struct folio *folio)
+ {
+ struct address_space *mapping = folio_flush_mapping(folio);
+@@ -454,50 +514,23 @@ void flush_dcache_folio(struct folio *folio)
+ if (addr + nr * PAGE_SIZE > vma->vm_end)
+ nr = (vma->vm_end - addr) / PAGE_SIZE;
+
+- if (parisc_requires_coherency()) {
+- for (i = 0; i < nr; i++) {
+- pte_t *ptep = get_ptep(vma->vm_mm,
+- addr + i * PAGE_SIZE);
+- if (!ptep)
+- continue;
+- if (pte_needs_flush(*ptep))
+- flush_user_cache_page(vma,
+- addr + i * PAGE_SIZE);
+- /* Optimise accesses to the same table? */
+- pte_unmap(ptep);
+- }
+- } else {
++ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
++ != (addr & (SHM_COLOUR - 1))) {
++ for (i = 0; i < nr; i++)
++ __flush_cache_page(vma,
++ addr + i * PAGE_SIZE,
++ (pfn + i) * PAGE_SIZE);
+ /*
+- * The TLB is the engine of coherence on parisc:
+- * The CPU is entitled to speculate any page
+- * with a TLB mapping, so here we kill the
+- * mapping then flush the page along a special
+- * flush only alias mapping. This guarantees that
+- * the page is no-longer in the cache for any
+- * process and nor may it be speculatively read
+- * in (until the user or kernel specifically
+- * accesses it, of course)
++ * Software is allowed to have any number
++ * of private mappings to a page.
+ */
+- for (i = 0; i < nr; i++)
+- flush_tlb_page(vma, addr + i * PAGE_SIZE);
+- if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+- != (addr & (SHM_COLOUR - 1))) {
+- for (i = 0; i < nr; i++)
+- __flush_cache_page(vma,
+- addr + i * PAGE_SIZE,
+- (pfn + i) * PAGE_SIZE);
+- /*
+- * Software is allowed to have any number
+- * of private mappings to a page.
+- */
+- if (!(vma->vm_flags & VM_SHARED))
+- continue;
+- if (old_addr)
+- pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
+- old_addr, addr, vma->vm_file);
+- if (nr == folio_nr_pages(folio))
+- old_addr = addr;
+- }
++ if (!(vma->vm_flags & VM_SHARED))
++ continue;
++ if (old_addr)
++ pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
++ old_addr, addr, vma->vm_file);
++ if (nr == folio_nr_pages(folio))
++ old_addr = addr;
+ }
+ WARN_ON(++count == 4096);
+ }
+@@ -587,35 +620,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
+ extern void clear_user_page_asm(void *, unsigned long);
+ extern void copy_user_page_asm(void *, void *, unsigned long);
+
+-void flush_kernel_dcache_page_addr(const void *addr)
+-{
+- unsigned long flags;
+-
+- flush_kernel_dcache_page_asm(addr);
+- purge_tlb_start(flags);
+- pdtlb(SR_KERNEL, addr);
+- purge_tlb_end(flags);
+-}
+-EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+-
+ static void flush_cache_page_if_present(struct vm_area_struct *vma,
+- unsigned long vmaddr, unsigned long pfn)
++ unsigned long vmaddr)
+ {
++#if CONFIG_FLUSH_PAGE_ACCESSED
+ bool needs_flush = false;
+- pte_t *ptep;
++ pte_t *ptep, pte;
+
+- /*
+- * The pte check is racy and sometimes the flush will trigger
+- * a non-access TLB miss. Hopefully, the page has already been
+- * flushed.
+- */
+ ptep = get_ptep(vma->vm_mm, vmaddr);
+ if (ptep) {
+- needs_flush = pte_needs_flush(*ptep);
++ pte = ptep_get(ptep);
++ needs_flush = pte_needs_flush(pte);
+ pte_unmap(ptep);
+ }
+ if (needs_flush)
+- flush_cache_page(vma, vmaddr, pfn);
++ __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
++#else
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long physaddr = get_upa(mm, vmaddr);
++
++ if (physaddr)
++ __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
++#endif
+ }
+
+ void copy_user_highpage(struct page *to, struct page *from,
+@@ -625,7 +651,7 @@ void copy_user_highpage(struct page *to, struct page *from,
+
+ kfrom = kmap_local_page(from);
+ kto = kmap_local_page(to);
+- flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
++ __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
+ copy_page_asm(kto, kfrom);
+ kunmap_local(kto);
+ kunmap_local(kfrom);
+@@ -634,16 +660,17 @@ void copy_user_highpage(struct page *to, struct page *from,
+ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len)
+ {
+- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
++ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
+ memcpy(dst, src, len);
+- flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
++ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
+ }
+
+ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long user_vaddr, void *dst, void *src, int len)
+ {
+- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
++ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
+ memcpy(dst, src, len);
++ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
+ }
+
+ /* __flush_tlb_range()
+@@ -677,32 +704,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
+
+ static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+ {
+- unsigned long addr, pfn;
+- pte_t *ptep;
+-
+- for (addr = start; addr < end; addr += PAGE_SIZE) {
+- bool needs_flush = false;
+- /*
+- * The vma can contain pages that aren't present. Although
+- * the pte search is expensive, we need the pte to find the
+- * page pfn and to check whether the page should be flushed.
+- */
+- ptep = get_ptep(vma->vm_mm, addr);
+- if (ptep) {
+- needs_flush = pte_needs_flush(*ptep);
+- pfn = pte_pfn(*ptep);
+- pte_unmap(ptep);
+- }
+- if (needs_flush) {
+- if (parisc_requires_coherency()) {
+- flush_user_cache_page(vma, addr);
+- } else {
+- if (WARN_ON(!pfn_valid(pfn)))
+- return;
+- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+- }
+- }
+- }
++ unsigned long addr;
++
++ for (addr = start; addr < end; addr += PAGE_SIZE)
++ flush_cache_page_if_present(vma, addr);
+ }
+
+ static inline unsigned long mm_total_size(struct mm_struct *mm)
+@@ -753,21 +758,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
+ return;
+ flush_tlb_range(vma, start, end);
+- flush_cache_all();
++ if (vma->vm_flags & VM_EXEC)
++ flush_cache_all();
++ else
++ flush_data_cache();
+ return;
+ }
+
+- flush_cache_pages(vma, start, end);
++ flush_cache_pages(vma, start & PAGE_MASK, end);
+ }
+
+ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
+ {
+- if (WARN_ON(!pfn_valid(pfn)))
+- return;
+- if (parisc_requires_coherency())
+- flush_user_cache_page(vma, vmaddr);
+- else
+- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
++ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
+
+ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+@@ -775,34 +778,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
+ if (!PageAnon(page))
+ return;
+
+- if (parisc_requires_coherency()) {
+- if (vma->vm_flags & VM_SHARED)
+- flush_data_cache();
+- else
+- flush_user_cache_page(vma, vmaddr);
++ __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
++}
++
++int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
++ pte_t *ptep)
++{
++ pte_t pte = ptep_get(ptep);
++
++ if (!pte_young(pte))
++ return 0;
++ set_pte(ptep, pte_mkold(pte));
++#if CONFIG_FLUSH_PAGE_ACCESSED
++ __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
++#endif
++ return 1;
++}
++
++/*
++ * After a PTE is cleared, we have no way to flush the cache for
++ * the physical page. On PA8800 and PA8900 processors, these lines
++ * can cause random cache corruption. Thus, we must flush the cache
++ * as well as the TLB when clearing a PTE that's valid.
++ */
++pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
++ pte_t *ptep)
++{
++ struct mm_struct *mm = (vma)->vm_mm;
++ pte_t pte = ptep_get_and_clear(mm, addr, ptep);
++ unsigned long pfn = pte_pfn(pte);
++
++ if (pfn_valid(pfn))
++ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
++ else if (pte_accessible(mm, pte))
++ flush_tlb_page(vma, addr);
++
++ return pte;
++}
++
++/*
++ * The physical address for pages in the ioremap case can be obtained
++ * from the vm_struct struct. I wasn't able to successfully handle the
++ * vmalloc and vmap cases. We have an array of struct page pointers in
++ * the uninitialized vmalloc case but the flush failed using page_to_pfn.
++ */
++void flush_cache_vmap(unsigned long start, unsigned long end)
++{
++ unsigned long addr, physaddr;
++ struct vm_struct *vm;
++
++ /* Prevent cache move-in */
++ flush_tlb_kernel_range(start, end);
++
++ if (end - start >= parisc_cache_flush_threshold) {
++ flush_cache_all();
+ return;
+ }
+
+- flush_tlb_page(vma, vmaddr);
+- preempt_disable();
+- flush_dcache_page_asm(page_to_phys(page), vmaddr);
+- preempt_enable();
++ if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
++ flush_cache_all();
++ return;
++ }
++
++ vm = find_vm_area((void *)start);
++ if (WARN_ON_ONCE(!vm)) {
++ flush_cache_all();
++ return;
++ }
++
++ /* The physical addresses of IOREMAP regions are contiguous */
++ if (vm->flags & VM_IOREMAP) {
++ physaddr = vm->phys_addr;
++ for (addr = start; addr < end; addr += PAGE_SIZE) {
++ preempt_disable();
++ flush_dcache_page_asm(physaddr, start);
++ flush_icache_page_asm(physaddr, start);
++ preempt_enable();
++ physaddr += PAGE_SIZE;
++ }
++ return;
++ }
++
++ flush_cache_all();
+ }
++EXPORT_SYMBOL(flush_cache_vmap);
+
++/*
++ * The vm_struct has been retired and the page table is set up. The
++ * last page in the range is a guard page. Its physical address can't
++ * be determined using lpa, so there is no way to flush the range
++ * using flush_dcache_page_asm.
++ */
++void flush_cache_vunmap(unsigned long start, unsigned long end)
++{
++ /* Prevent cache move-in */
++ flush_tlb_kernel_range(start, end);
++ flush_data_cache();
++}
++EXPORT_SYMBOL(flush_cache_vunmap);
++
++/*
++ * On systems with PA8800/PA8900 processors, there is no way to flush
++ * a vmap range other than using the architected loop to flush the
++ * entire cache. The page directory is not set up, so we can't use
++ * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
++ * L2 is physically indexed but FDCE/FICE instructions in virtual
++ * mode output their virtual address on the core bus, not their
++ * real address. As a result, the L2 cache index formed from the
++ * virtual address will most likely not be the same as the L2 index
++ * formed from the real address.
++ */
+ void flush_kernel_vmap_range(void *vaddr, int size)
+ {
+ unsigned long start = (unsigned long)vaddr;
+ unsigned long end = start + size;
+
+- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+- (unsigned long)size >= parisc_cache_flush_threshold) {
+- flush_tlb_kernel_range(start, end);
+- flush_data_cache();
++ flush_tlb_kernel_range(start, end);
++
++ if (!static_branch_likely(&parisc_has_dcache))
++ return;
++
++ /* If interrupts are disabled, we can only do local flush */
++ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
++ flush_data_cache_local(NULL);
+ return;
+ }
+
+- flush_kernel_dcache_range_asm(start, end);
+- flush_tlb_kernel_range(start, end);
++ flush_data_cache();
+ }
+ EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+@@ -814,15 +916,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
+ /* Ensure DMA is complete */
+ asm_syncdma();
+
+- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+- (unsigned long)size >= parisc_cache_flush_threshold) {
+- flush_tlb_kernel_range(start, end);
+- flush_data_cache();
++ flush_tlb_kernel_range(start, end);
++
++ if (!static_branch_likely(&parisc_has_dcache))
++ return;
++
++ /* If interrupts are disabled, we can only do local flush */
++ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
++ flush_data_cache_local(NULL);
+ return;
+ }
+
+- purge_kernel_dcache_range_asm(start, end);
+- flush_tlb_kernel_range(start, end);
++ flush_data_cache();
+ }
+ EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+
+@@ -850,7 +955,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
+ #endif
+ " fic,m %3(%4,%0)\n"
+ "2: sync\n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
+ : "+r" (start), "+r" (error)
+ : "r" (end), "r" (dcache_stride), "i" (SR_USER));
+ }
+@@ -865,7 +970,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
+ #endif
+ " fdc,m %3(%4,%0)\n"
+ "2: sync\n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
+ : "+r" (start), "+r" (error)
+ : "r" (end), "r" (icache_stride), "i" (SR_USER));
+ }
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index ed8b759480614b..8be4558ef33c0e 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -1004,6 +1004,9 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
+
+ pr_info("\n");
+
++ /* Prevent hung task messages when printing on serial console */
++ cond_resched();
++
+ pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n",
+ hpa, parisc_hardware_description(&dev->id));
+
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index ae03b8679696e7..ea57bcc21dc5fe 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -36,6 +36,24 @@
+ .level 2.0
+ #endif
+
++/*
++ * We need seven instructions after a TLB insert for it to take effect.
++ * The PA8800/PA8900 processors are an exception and need 12 instructions.
++ * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
++ */
++#ifdef CONFIG_64BIT
++#define NUM_PIPELINE_INSNS 12
++#else
++#define NUM_PIPELINE_INSNS 7
++#endif
++
++ /* Insert num nops */
++ .macro insert_nops num
++ .rept \num
++ nop
++ .endr
++ .endm
++
+ /* Get aligned page_table_lock address for this mm from cr28/tr4 */
+ .macro get_ptl reg
+ mfctl %cr28,\reg
+@@ -415,24 +433,20 @@
+ 3:
+ .endm
+
+- /* Release page_table_lock without reloading lock address.
+- We use an ordered store to ensure all prior accesses are
+- performed prior to releasing the lock. */
+- .macro ptl_unlock0 spc,tmp,tmp2
++ /* Release page_table_lock if for user space. We use an ordered
++ store to ensure all prior accesses are performed prior to
++ releasing the lock. Note stw may not be executed, so we
++ provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
++ .macro ptl_unlock spc,tmp,tmp2
+ #ifdef CONFIG_TLB_PTLOCK
+-98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
++98: get_ptl \tmp
++ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
+ or,COND(=) %r0,\spc,%r0
+ stw,ma \tmp2,0(\tmp)
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-#endif
+- .endm
+-
+- /* Release page_table_lock. */
+- .macro ptl_unlock1 spc,tmp,tmp2
+-#ifdef CONFIG_TLB_PTLOCK
+-98: get_ptl \tmp
+- ptl_unlock0 \spc,\tmp,\tmp2
+-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
++ insert_nops NUM_PIPELINE_INSNS - 4
++#else
++ insert_nops NUM_PIPELINE_INSNS - 1
+ #endif
+ .endm
+
+@@ -461,13 +475,13 @@
+ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+ #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
++ #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ .macro convert_for_tlb_insert20 pte,tmp
+ #ifdef CONFIG_HUGETLB_PAGE
+ copy \pte,\tmp
+- extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++ extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
+
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+@@ -475,8 +489,7 @@
+ depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
+ #else /* Huge pages disabled */
+- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++ extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+ #endif
+@@ -1038,8 +1051,7 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
+ STREG %r16, PT_ISR(%r29)
+ STREG %r17, PT_IOR(%r29)
+
+-#if 0 && defined(CONFIG_64BIT)
+- /* Revisit when we have 64-bit code above 4Gb */
++#if defined(CONFIG_64BIT)
+ b,n intr_save2
+
+ skip_save_ior:
+@@ -1047,8 +1059,7 @@ skip_save_ior:
+ * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
+ * above.
+ */
+- extrd,u,* %r8,PSW_W_BIT,1,%r1
+- cmpib,COND(=),n 1,%r1,intr_save2
++ bb,COND(>=),n %r8,PSW_W_BIT,intr_save2
+ LDREG PT_IASQ0(%r29), %r16
+ LDREG PT_IAOQ0(%r29), %r17
+ /* adjust iasq/iaoq */
+@@ -1124,7 +1135,7 @@ dtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1133,6 +1144,7 @@ dtlb_check_alias_20w:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1150,7 +1162,7 @@ nadtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1159,6 +1171,7 @@ nadtlb_check_alias_20w:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1184,7 +1197,7 @@ dtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1194,6 +1207,7 @@ dtlb_check_alias_11:
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1217,7 +1231,7 @@ nadtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1227,6 +1241,7 @@ nadtlb_check_alias_11:
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1246,7 +1261,7 @@ dtlb_miss_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1255,6 +1270,7 @@ dtlb_check_alias_20:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1274,7 +1290,7 @@ nadtlb_miss_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1283,6 +1299,7 @@ nadtlb_check_alias_20:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1319,7 +1336,7 @@ itlb_miss_20w:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1343,7 +1360,7 @@ naitlb_miss_20w:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1352,6 +1369,7 @@ naitlb_check_alias_20w:
+
+ iitlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1377,7 +1395,7 @@ itlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1401,7 +1419,7 @@ naitlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1411,6 +1429,7 @@ naitlb_check_alias_11:
+ iitlba pte,(%sr0, va)
+ iitlbp prot,(%sr0, va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1431,7 +1450,7 @@ itlb_miss_20:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1451,7 +1470,7 @@ naitlb_miss_20:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1460,6 +1479,7 @@ naitlb_check_alias_20:
+
+ iitlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1481,7 +1501,7 @@ dbit_trap_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+ #else
+@@ -1507,7 +1527,7 @@ dbit_trap_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1527,7 +1547,7 @@ dbit_trap_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+ #endif
+diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
+index 81078abec521a7..56e694f6acc2d7 100644
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned long address)
+ #ifdef CONFIG_64BIT
+ if(unlikely(parisc_narrow_firmware)) {
+ if((address & 0xff000000) == 0xf0000000)
+- return 0xf0f0f0f000000000UL | (u32)address;
++ return (0xfffffff0UL << 32) | (u32)address;
+
+ if((address & 0xf0000000) == 0xf0000000)
+- return 0xffffffff00000000UL | (u32)address;
++ return (0xffffffffUL << 32) | (u32)address;
+ }
+ #endif
+ return address;
+diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
+index d1defb9ede70c0..c91f9c2e61ed25 100644
+--- a/arch/parisc/kernel/ftrace.c
++++ b/arch/parisc/kernel/ftrace.c
+@@ -78,7 +78,7 @@ asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
+ #endif
+ }
+
+-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
+ int ftrace_enable_ftrace_graph_caller(void)
+ {
+ static_key_enable(&ftrace_graph_enable.key);
+@@ -206,6 +206,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct kprobe *p;
+ int bit;
+
++ if (unlikely(kprobe_ftrace_disabled))
++ return;
++
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index a171bf3c6b318d..96e0264ac96163 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -70,9 +70,8 @@ $bss_loop:
+ stw,ma %arg2,4(%r1)
+ stw,ma %arg3,4(%r1)
+
+-#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
+- /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
+- * and halt kernel if we detect a PA1.x CPU. */
++#if defined(CONFIG_PA20)
++ /* check for 64-bit capable CPU as required by current kernel */
+ ldi 32,%r10
+ mtctl %r10,%cr11
+ .level 2.0
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index 2f81bfd4f15e17..dff66be65d2900 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -498,7 +498,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
+
+ old_regs = set_irq_regs(regs);
+ local_irq_disable();
+- irq_enter();
++ irq_enter_rcu();
+
+ eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
+ if (!eirr_val)
+@@ -533,7 +533,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
+ #endif /* CONFIG_IRQSTACKS */
+
+ out:
+- irq_exit();
++ irq_exit_rcu();
+ set_irq_regs(old_regs);
+ return;
+
+diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
+index 6f0c92e8149d82..dcf61cbd314708 100644
+--- a/arch/parisc/kernel/parisc_ksyms.c
++++ b/arch/parisc/kernel/parisc_ksyms.c
+@@ -22,6 +22,7 @@ EXPORT_SYMBOL(memset);
+ #include <linux/atomic.h>
+ EXPORT_SYMBOL(__xchg8);
+ EXPORT_SYMBOL(__xchg32);
++EXPORT_SYMBOL(__cmpxchg_u8);
+ EXPORT_SYMBOL(__cmpxchg_u32);
+ EXPORT_SYMBOL(__cmpxchg_u64);
+ #ifdef CONFIG_SMP
+diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
+index 1fc89fa2c2d214..e37c48770b5854 100644
+--- a/arch/parisc/kernel/processor.c
++++ b/arch/parisc/kernel/processor.c
+@@ -172,7 +172,6 @@ static int __init processor_probe(struct parisc_device *dev)
+ p->cpu_num = cpu_info.cpu_num;
+ p->cpu_loc = cpu_info.cpu_loc;
+
+- set_cpu_possible(cpuid, true);
+ store_cpu_topology(cpuid);
+
+ #ifdef CONFIG_SMP
+@@ -474,13 +473,6 @@ static struct parisc_driver cpu_driver __refdata = {
+ */
+ void __init processor_init(void)
+ {
+- unsigned int cpu;
+-
+ reset_cpu_topology();
+-
+- /* reset possible mask. We will mark those which are possible. */
+- for_each_possible_cpu(cpu)
+- set_cpu_possible(cpu, false);
+-
+ register_parisc_driver(&cpu_driver);
+ }
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index ab896eff7a1de9..98af719d5f85b2 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
+ * indicating that "current" should be used instead of a passed-in
+ * value from the exec bprm as done with arch_pick_mmap_layout().
+ */
+-static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
++unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
+ {
+ unsigned long stack_base;
+
+diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
+index 2a12a547b447bd..826c8e51b5853b 100644
+--- a/arch/parisc/kernel/sys_parisc32.c
++++ b/arch/parisc/kernel/sys_parisc32.c
+@@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
+ current->comm, current->pid, r20);
+ return -ENOSYS;
+ }
+-
+-asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
+- compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
+- const char __user * pathname)
+-{
+- return sys_fanotify_mark(fanotify_fd, flags,
+- ((__u64)mask1 << 32) | mask0,
+- dfd, pathname);
+-}
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 1f51aa9c8230cc..0fa81bf1466b15 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -243,10 +243,10 @@ linux_gateway_entry:
+
+ #ifdef CONFIG_64BIT
+ ldil L%sys_call_table, %r1
+- or,= %r2,%r2,%r2
+- addil L%(sys_call_table64-sys_call_table), %r1
++ or,ev %r2,%r2,%r2
++ ldil L%sys_call_table64, %r1
+ ldo R%sys_call_table(%r1), %r19
+- or,= %r2,%r2,%r2
++ or,ev %r2,%r2,%r2
+ ldo R%sys_call_table64(%r1), %r19
+ #else
+ load32 sys_call_table, %r19
+@@ -379,10 +379,10 @@ tracesys_next:
+ extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
+
+ ldil L%sys_call_table, %r1
+- or,= %r2,%r2,%r2
+- addil L%(sys_call_table64-sys_call_table), %r1
++ or,ev %r2,%r2,%r2
++ ldil L%sys_call_table64, %r1
+ ldo R%sys_call_table(%r1), %r19
+- or,= %r2,%r2,%r2
++ or,ev %r2,%r2,%r2
+ ldo R%sys_call_table64(%r1), %r19
+ #else
+ load32 sys_call_table, %r19
+@@ -1327,6 +1327,8 @@ ENTRY(sys_call_table)
+ END(sys_call_table)
+
+ #ifdef CONFIG_64BIT
++#undef __SYSCALL_WITH_COMPAT
++#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+ .align 8
+ ENTRY(sys_call_table64)
+ #include <asm/syscall_table_64.h> /* 64-bit syscalls */
+diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
+index e97c175b56f965..73f560e309573a 100644
+--- a/arch/parisc/kernel/syscalls/syscall.tbl
++++ b/arch/parisc/kernel/syscalls/syscall.tbl
+@@ -108,7 +108,7 @@
+ 95 common fchown sys_fchown
+ 96 common getpriority sys_getpriority
+ 97 common setpriority sys_setpriority
+-98 common recv sys_recv
++98 common recv sys_recv compat_sys_recv
+ 99 common statfs sys_statfs compat_sys_statfs
+ 100 common fstatfs sys_fstatfs compat_sys_fstatfs
+ 101 common stat64 sys_stat64
+@@ -135,7 +135,7 @@
+ 120 common clone sys_clone_wrapper
+ 121 common setdomainname sys_setdomainname
+ 122 common sendfile sys_sendfile compat_sys_sendfile
+-123 common recvfrom sys_recvfrom
++123 common recvfrom sys_recvfrom compat_sys_recvfrom
+ 124 32 adjtimex sys_adjtimex_time32
+ 124 64 adjtimex sys_adjtimex
+ 125 common mprotect sys_mprotect
+@@ -364,7 +364,7 @@
+ 320 common accept4 sys_accept4
+ 321 common prlimit64 sys_prlimit64
+ 322 common fanotify_init sys_fanotify_init
+-323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
++323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+ 324 32 clock_adjtime sys_clock_adjtime32
+ 324 64 clock_adjtime sys_clock_adjtime
+ 325 common name_to_handle_at sys_name_to_handle_at
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index ce25acfe4889d0..a8e75e5b884a70 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -120,8 +120,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg)
+ "2: ldbs 1(%%sr1,%3), %0\n"
+ " depw %2, 23, 24, %0\n"
+ "3: \n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
+ : "+r" (val), "+r" (ret), "=&r" (temp1)
+ : "r" (saddr), "r" (regs->isr) );
+
+@@ -152,8 +152,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+ " mtctl %2,11\n"
+ " vshd %0,%3,%0\n"
+ "3: \n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
+ : "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2)
+ : "r" (saddr), "r" (regs->isr) );
+
+@@ -169,6 +169,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ {
+ unsigned long saddr = regs->ior;
++ unsigned long shift, temp1;
+ __u64 val = 0;
+ ASM_EXCEPTIONTABLE_VAR(ret);
+
+@@ -180,25 +181,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+
+ #ifdef CONFIG_64BIT
+ __asm__ __volatile__ (
+-" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
+-" mtsp %4, %%sr1\n"
+-" depd %%r0,63,3,%3\n"
+-"1: ldd 0(%%sr1,%3),%0\n"
+-"2: ldd 8(%%sr1,%3),%%r20\n"
+-" subi 64,%%r19,%%r19\n"
+-" mtsar %%r19\n"
+-" shrpd %0,%%r20,%%sar,%0\n"
++" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */
++" mtsp %5, %%sr1\n"
++" depd %%r0,63,3,%2\n"
++"1: ldd 0(%%sr1,%2),%0\n"
++"2: ldd 8(%%sr1,%2),%4\n"
++" subi 64,%3,%3\n"
++" mtsar %3\n"
++" shrpd %0,%4,%%sar,%0\n"
+ "3: \n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+- : "=r" (val), "+r" (ret)
+- : "0" (val), "r" (saddr), "r" (regs->isr)
+- : "r19", "r20" );
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
++ : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
++ : "r" (regs->isr) );
+ #else
+- {
+- unsigned long shift, temp1;
+ __asm__ __volatile__ (
+-" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */
++" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */
+ " mtsp %5, %%sr1\n"
+ " dep %%r0,31,2,%2\n"
+ "1: ldw 0(%%sr1,%2),%0\n"
+@@ -209,12 +207,11 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+ " vshd %0,%R0,%0\n"
+ " vshd %R0,%4,%R0\n"
+ "4: \n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
+ : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
+ : "r" (regs->isr) );
+- }
+ #endif
+
+ DPRINTF("val = 0x%llx\n", val);
+@@ -244,8 +241,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg)
+ "1: stb %1, 0(%%sr1, %3)\n"
+ "2: stb %2, 1(%%sr1, %3)\n"
+ "3: \n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
+ : "+r" (ret), "=&r" (temp1)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr) );
+
+@@ -285,8 +282,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
+ " stw %%r20,0(%%sr1,%2)\n"
+ " stw %%r21,4(%%sr1,%2)\n"
+ "3: \n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
+ : "+r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r22", "r1" );
+@@ -329,10 +326,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
+ "3: std %%r20,0(%%sr1,%2)\n"
+ "4: std %%r21,8(%%sr1,%2)\n"
+ "5: \n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0")
+ : "+r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r22", "r1" );
+@@ -357,11 +354,11 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
+ "4: stw %%r1,4(%%sr1,%2)\n"
+ "5: stw %R1,8(%%sr1,%2)\n"
+ "6: \n"
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b)
+- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b)
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0")
++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0")
+ : "+r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r1" );
+diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
+index 27ae40a443b80c..f7e0fee5ee55a3 100644
+--- a/arch/parisc/kernel/unwind.c
++++ b/arch/parisc/kernel/unwind.c
+@@ -228,10 +228,8 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
+ #ifdef CONFIG_IRQSTACKS
+ extern void * const _call_on_stack;
+ #endif /* CONFIG_IRQSTACKS */
+- void *ptr;
+
+- ptr = dereference_kernel_function_descriptor(&handle_interruption);
+- if (pc_is_kernel_fn(pc, ptr)) {
++ if (pc_is_kernel_fn(pc, handle_interruption)) {
+ struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
+ dbg("Unwinding through handle_interruption()\n");
+ info->prev_sp = regs->gr[30];
+@@ -239,13 +237,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
+ return 1;
+ }
+
+- if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
+- pc_is_kernel_fn(pc, syscall_exit)) {
++ if (pc == (unsigned long)&ret_from_kernel_thread ||
++ pc == (unsigned long)&syscall_exit) {
+ info->prev_sp = info->prev_ip = 0;
+ return 1;
+ }
+
+- if (pc_is_kernel_fn(pc, intr_return)) {
++ if (pc == (unsigned long)&intr_return) {
+ struct pt_regs *regs;
+
+ dbg("Found intr_return()\n");
+@@ -257,14 +255,14 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
+ }
+
+ if (pc_is_kernel_fn(pc, _switch_to) ||
+- pc_is_kernel_fn(pc, _switch_to_ret)) {
++ pc == (unsigned long)&_switch_to_ret) {
+ info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+ info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+ return 1;
+ }
+
+ #ifdef CONFIG_IRQSTACKS
+- if (pc_is_kernel_fn(pc, _call_on_stack)) {
++ if (pc == (unsigned long)&_call_on_stack) {
+ info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
+ info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
+ return 1;
+diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
+index 58694d1989c233..548051b0b4aff6 100644
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -130,6 +130,7 @@ SECTIONS
+ RO_DATA(8)
+
+ /* unwind info */
++ . = ALIGN(4);
+ .PARISC.unwind : {
+ __start___unwind = .;
+ *(.PARISC.unwind)
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 2fe5b44986e092..c39de84e98b051 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -150,11 +150,16 @@ int fixup_exception(struct pt_regs *regs)
+ * Fix up get_user() and put_user().
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
+ * bit in the relative address of the fixup routine to indicate
+- * that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
+- * -EFAULT to report a userspace access error.
++ * that the register encoded in the "or %r0,%r0,register"
++ * opcode should be loaded with -EFAULT to report a userspace
++ * access error.
+ */
+ if (fix->fixup & 1) {
+- regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
++ int fault_error_reg = fix->err_opcode & 0x1f;
++ if (!WARN_ON(!fault_error_reg))
++ regs->gr[fault_error_reg] = -EFAULT;
++ pr_debug("Unalignment fixup of register %d at %pS\n",
++ fault_error_reg, (void*)regs->iaoq[0]);
+
+ /* zero target register for get_user() */
+ if (parisc_acctyp(0, regs->iir) == VM_READ) {
+diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c
+index d6ee2fd4555037..7b9cb3cda27eeb 100644
+--- a/arch/parisc/net/bpf_jit_core.c
++++ b/arch/parisc/net/bpf_jit_core.c
+@@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ jit_data->header =
+ bpf_jit_binary_alloc(prog_size + extable_size,
+ &jit_data->image,
+- sizeof(u32),
++ sizeof(long),
+ bpf_fill_ill_insns);
+ if (!jit_data->header) {
+ prog = orig_prog;
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index d5d5388973ac76..2fe51e0ad63713 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -607,10 +607,10 @@ config ARCH_SUPPORTS_KEXEC
+ def_bool PPC_BOOK3S || PPC_E500 || (44x && !SMP)
+
+ config ARCH_SUPPORTS_KEXEC_FILE
+- def_bool PPC64 && CRYPTO=y && CRYPTO_SHA256=y
++ def_bool PPC64
+
+ config ARCH_SUPPORTS_KEXEC_PURGATORY
+- def_bool KEXEC_FILE
++ def_bool y
+
+ config ARCH_SELECTS_KEXEC_FILE
+ def_bool y
+@@ -857,6 +857,7 @@ config THREAD_SHIFT
+ int "Thread shift" if EXPERT
+ range 13 15
+ default "15" if PPC_256K_PAGES
++ default "15" if PPC_PSERIES || PPC_POWERNV
+ default "14" if PPC64
+ default "13"
+ help
+diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c
+index 267d6524caac47..d07796fdf91aa7 100644
+--- a/arch/powerpc/boot/simple_alloc.c
++++ b/arch/powerpc/boot/simple_alloc.c
+@@ -112,8 +112,11 @@ static void *simple_realloc(void *ptr, unsigned long size)
+ return ptr;
+
+ new = simple_malloc(size);
+- memcpy(new, ptr, p->size);
+- simple_free(ptr);
++ if (new) {
++ memcpy(new, ptr, p->size);
++ simple_free(ptr);
++ }
++
+ return new;
+ }
+
+diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
+index 524db76f47b737..8aff8321739778 100644
+--- a/arch/powerpc/configs/85xx-hw.config
++++ b/arch/powerpc/configs/85xx-hw.config
+@@ -24,6 +24,7 @@ CONFIG_FS_ENET=y
+ CONFIG_FSL_CORENET_CF=y
+ CONFIG_FSL_DMA=y
+ CONFIG_FSL_HV_MANAGER=y
++CONFIG_FSL_IFC=y
+ CONFIG_FSL_PQ_MDIO=y
+ CONFIG_FSL_RIO=y
+ CONFIG_FSL_XGMAC_MDIO=y
+@@ -58,6 +59,7 @@ CONFIG_INPUT_FF_MEMLESS=m
+ CONFIG_MARVELL_PHY=y
+ CONFIG_MDIO_BUS_MUX_GPIO=y
+ CONFIG_MDIO_BUS_MUX_MMIOREG=y
++CONFIG_MEMORY=y
+ CONFIG_MMC_SDHCI_OF_ESDHC=y
+ CONFIG_MMC_SDHCI_PLTFM=y
+ CONFIG_MMC_SDHCI=y
+diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
+index 6e7b9e8fd2251a..65e518dde2c2fe 100644
+--- a/arch/powerpc/configs/ppc64_defconfig
++++ b/arch/powerpc/configs/ppc64_defconfig
+@@ -81,7 +81,6 @@ CONFIG_MODULE_SIG_SHA512=y
+ CONFIG_PARTITION_ADVANCED=y
+ CONFIG_BINFMT_MISC=m
+ CONFIG_ZSWAP=y
+-CONFIG_Z3FOLD=y
+ CONFIG_ZSMALLOC=y
+ # CONFIG_SLAB_MERGE_DEFAULT is not set
+ CONFIG_SLAB_FREELIST_RANDOM=y
+diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
+index 8d3eacb50d5601..9d44e6630908d2 100644
+--- a/arch/powerpc/configs/skiroot_defconfig
++++ b/arch/powerpc/configs/skiroot_defconfig
+@@ -301,7 +301,6 @@ CONFIG_WQ_WATCHDOG=y
+ CONFIG_DEBUG_SG=y
+ CONFIG_DEBUG_NOTIFIERS=y
+ CONFIG_BUG_ON_DATA_CORRUPTION=y
+-CONFIG_DEBUG_CREDENTIALS=y
+ # CONFIG_FTRACE is not set
+ CONFIG_XMON=y
+ # CONFIG_RUNTIME_TESTING_MENU is not set
+diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
+index 6fc2248ca56166..fccf742c55c2c3 100644
+--- a/arch/powerpc/crypto/Kconfig
++++ b/arch/powerpc/crypto/Kconfig
+@@ -96,6 +96,7 @@ config CRYPTO_AES_PPC_SPE
+
+ config CRYPTO_AES_GCM_P10
+ tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
++ depends on BROKEN
+ depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
+ select CRYPTO_LIB_AES
+ select CRYPTO_ALGAPI
+diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
+index 4b6e899895e7be..f62ee54076c06d 100644
+--- a/arch/powerpc/crypto/aes-gcm-p10-glue.c
++++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
+@@ -37,7 +37,7 @@ asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
+ void *rkey, u8 *iv, void *Xi);
+ asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
+ void *rkey, u8 *iv, void *Xi);
+-asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
++asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
+ asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
+ unsigned char *aad, unsigned int alen);
+
+diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c
+index 74fb86b0d2097c..7c728755852e1a 100644
+--- a/arch/powerpc/crypto/chacha-p10-glue.c
++++ b/arch/powerpc/crypto/chacha-p10-glue.c
+@@ -197,6 +197,9 @@ static struct skcipher_alg algs[] = {
+
+ static int __init chacha_p10_init(void)
+ {
++ if (!cpu_has_feature(CPU_FTR_ARCH_31))
++ return 0;
++
+ static_branch_enable(&have_p10);
+
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+@@ -204,10 +207,13 @@ static int __init chacha_p10_init(void)
+
+ static void __exit chacha_p10_exit(void)
+ {
++ if (!static_branch_likely(&have_p10))
++ return;
++
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+ }
+
+-module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init);
++module_init(chacha_p10_init);
+ module_exit(chacha_p10_exit);
+
+ MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
+diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
+index 2bc53c646ccd7d..83848b534cb171 100644
+--- a/arch/powerpc/include/asm/asm-compat.h
++++ b/arch/powerpc/include/asm/asm-compat.h
+@@ -39,6 +39,12 @@
+ #define STDX_BE stringify_in_c(stdbrx)
+ #endif
+
++#ifdef CONFIG_CC_IS_CLANG
++#define DS_FORM_CONSTRAINT "Z<>"
++#else
++#define DS_FORM_CONSTRAINT "YZ<>"
++#endif
++
+ #else /* 32-bit */
+
+ /* operations for longs and pointers */
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index 5bf6a4d49268c7..d1ea554c33ed7e 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -11,6 +11,7 @@
+ #include <asm/cmpxchg.h>
+ #include <asm/barrier.h>
+ #include <asm/asm-const.h>
++#include <asm/asm-compat.h>
+
+ /*
+ * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
+@@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+ else
+- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
++ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
+
+ return t;
+ }
+@@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+ else
+- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
++ __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
+ }
+
+ #define ATOMIC64_OP(op, asm_op) \
+diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
+index 9e5a39b6a3114b..107fc5a4845696 100644
+--- a/arch/powerpc/include/asm/ftrace.h
++++ b/arch/powerpc/include/asm/ftrace.h
+@@ -20,14 +20,6 @@
+ #ifndef __ASSEMBLY__
+ extern void _mcount(void);
+
+-static inline unsigned long ftrace_call_adjust(unsigned long addr)
+-{
+- if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
+- addr += MCOUNT_INSN_SIZE;
+-
+- return addr;
+-}
+-
+ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
+ unsigned long sp);
+
+@@ -142,8 +134,10 @@ static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
+ #ifdef CONFIG_FUNCTION_TRACER
+ extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
+ void ftrace_free_init_tramp(void);
++unsigned long ftrace_call_adjust(unsigned long addr);
+ #else
+ static inline void ftrace_free_init_tramp(void) { }
++static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
+ #endif
+ #endif /* !__ASSEMBLY__ */
+
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index c099780385dd3d..218488407ac00e 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -494,7 +494,7 @@ long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+ * Used for all but the craziest of phyp interfaces (see plpar_hcall9)
+ */
+ #define PLPAR_HCALL_BUFSIZE 4
+-long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
+
+ /**
+ * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
+@@ -508,7 +508,7 @@ long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+ * plpar_hcall, but plpar_hcall_raw works in real mode and does not
+ * calculate hypervisor call statistics.
+ */
+-long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
+
+ /**
+ * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
+@@ -519,8 +519,8 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+ * PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
+ */
+ #define PLPAR_HCALL9_BUFSIZE 9
+-long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+-long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall9(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
++long plpar_hcall9_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
+
+ /* pseries hcall tracing */
+ extern struct static_key hcall_tracepoint_key;
+@@ -540,7 +540,7 @@ struct hvcall_mpp_data {
+ unsigned long backing_mem;
+ };
+
+-int h_get_mpp(struct hvcall_mpp_data *);
++long h_get_mpp(struct hvcall_mpp_data *mpp_data);
+
+ struct hvcall_mpp_x_data {
+ unsigned long coalesced_bytes;
+diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
+index a4196ab1d0167c..5f9d61b2159cc5 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -336,6 +336,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
+ if (IS_ENABLED(CONFIG_KASAN))
+ return;
+
++ /*
++ * Likewise, do not use it in real mode if percpu first chunk is not
++ * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
++ * are chances where percpu allocation can come from vmalloc area.
++ */
++ if (percpu_first_chunk_is_paged)
++ return;
++
+ /* Otherwise, it should be safe to call it */
+ nmi_enter();
+ }
+@@ -351,6 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
+ // no nmi_exit for a pseries hash guest taking a real mode exception
+ } else if (IS_ENABLED(CONFIG_KASAN)) {
+ // no nmi_exit for KASAN in real mode
++ } else if (percpu_first_chunk_is_paged) {
++ // no nmi_exit if percpu first chunk is not embedded
+ } else {
+ nmi_exit();
+ }
+diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
+index 0732b743e09962..99419e87f5556e 100644
+--- a/arch/powerpc/include/asm/io.h
++++ b/arch/powerpc/include/asm/io.h
+@@ -37,7 +37,7 @@ extern struct pci_dev *isa_bridge_pcidev;
+ * define properly based on the platform
+ */
+ #ifndef CONFIG_PCI
+-#define _IO_BASE 0
++#define _IO_BASE POISON_POINTER_DELTA
+ #define _ISA_MEM_BASE 0
+ #define PCI_DRAM_OFFSET 0
+ #elif defined(CONFIG_PPC32)
+@@ -585,12 +585,12 @@ __do_out_asm(_rec_outl, "stwbrx")
+ #define __do_inw(port) _rec_inw(port)
+ #define __do_inl(port) _rec_inl(port)
+ #else /* CONFIG_PPC32 */
+-#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port);
+-#define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port);
+-#define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port);
++#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_inb(port) readb((PCI_IO_ADDR)(_IO_BASE + port));
++#define __do_inw(port) readw((PCI_IO_ADDR)(_IO_BASE + port));
++#define __do_inl(port) readl((PCI_IO_ADDR)(_IO_BASE + port));
+ #endif /* !CONFIG_PPC32 */
+
+ #ifdef CONFIG_EEH
+@@ -606,12 +606,12 @@ __do_out_asm(_rec_outl, "stwbrx")
+ #define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n))
+ #define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n))
+
+-#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+-#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+-#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
++#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
++#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
++#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+
+ #define __do_memset_io(addr, c, n) \
+ _memset_io(PCI_FIX_ADDR(addr), c, n)
+diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
+index b8b0be8f1a07ee..c6d3078bd8c3b4 100644
+--- a/arch/powerpc/include/asm/irq_work.h
++++ b/arch/powerpc/include/asm/irq_work.h
+@@ -6,6 +6,5 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ return true;
+ }
+-extern void arch_irq_work_raise(void);
+
+ #endif /* _ASM_POWERPC_IRQ_WORK_H */
+diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
+index 93ce3ec253877d..2f2a86ed2280aa 100644
+--- a/arch/powerpc/include/asm/jump_label.h
++++ b/arch/powerpc/include/asm/jump_label.h
+@@ -17,7 +17,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ "nop # arch_static_branch\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+@@ -32,7 +32,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ "b %l[l_yes] # arch_static_branch_jump\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
+index 52cc25864a1be2..d8b7e246a32f59 100644
+--- a/arch/powerpc/include/asm/mmu.h
++++ b/arch/powerpc/include/asm/mmu.h
+@@ -412,5 +412,9 @@ extern void *abatron_pteptrs[2];
+ #include <asm/nohash/mmu.h>
+ #endif
+
++#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
++#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_MMU_H_ */
+diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
+index 4c6c6dbd182f45..da827d2d08666e 100644
+--- a/arch/powerpc/include/asm/mmzone.h
++++ b/arch/powerpc/include/asm/mmzone.h
+@@ -42,14 +42,6 @@ u64 memory_hotplug_max(void);
+ #else
+ #define memory_hotplug_max() memblock_end_of_DRAM()
+ #endif /* CONFIG_NUMA */
+-#ifdef CONFIG_FA_DUMP
+-#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
+-#endif
+-
+-#ifdef CONFIG_MEMORY_HOTPLUG
+-extern int create_section_mapping(unsigned long start, unsigned long end,
+- int nid, pgprot_t prot);
+-#endif
+
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_MMZONE_H_ */
+diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+index 6fe46e7545566c..0b4e5f8ce3e8a9 100644
+--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
++++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
+@@ -69,9 +69,6 @@
+
+ #define _PTE_NONE_MASK 0
+
+-/* Until my rework is finished, 40x still needs atomic PTE updates */
+-#define PTE_ATOMIC_UPDATES 1
+-
+ #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+ #define _PAGE_BASE (_PAGE_BASE_NC)
+
+diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h
+index 6ddced0415cb5c..7dc24b8632d7c2 100644
+--- a/arch/powerpc/include/asm/nohash/mmu-e500.h
++++ b/arch/powerpc/include/asm/nohash/mmu-e500.h
+@@ -303,8 +303,7 @@ extern unsigned long linear_map_top;
+ extern int book3e_htw_mode;
+
+ #define PPC_HTW_NONE 0
+-#define PPC_HTW_IBM 1
+-#define PPC_HTW_E6500 2
++#define PPC_HTW_E6500 1
+
+ /*
+ * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
+index 8e5b7d0b851c61..634970ce13c6b9 100644
+--- a/arch/powerpc/include/asm/percpu.h
++++ b/arch/powerpc/include/asm/percpu.h
+@@ -15,6 +15,16 @@
+ #endif /* CONFIG_SMP */
+ #endif /* __powerpc64__ */
+
++#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
++#include <linux/jump_label.h>
++DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
++
++#define percpu_first_chunk_is_paged \
++ (static_key_enabled(&__percpu_first_chunk_is_paged.key))
++#else
++#define percpu_first_chunk_is_paged false
++#endif /* CONFIG_PPC64 && CONFIG_SMP */
++
+ #include <asm-generic/percpu.h>
+
+ #include <asm/paca.h>
+diff --git a/arch/powerpc/include/asm/plpks.h b/arch/powerpc/include/asm/plpks.h
+index 23b77027c91637..7a84069759b032 100644
+--- a/arch/powerpc/include/asm/plpks.h
++++ b/arch/powerpc/include/asm/plpks.h
+@@ -44,9 +44,8 @@
+ #define PLPKS_MAX_DATA_SIZE 4000
+
+ // Timeouts for PLPKS operations
+-#define PLPKS_MAX_TIMEOUT 5000 // msec
+-#define PLPKS_FLUSH_SLEEP 10 // msec
+-#define PLPKS_FLUSH_SLEEP_RANGE 400
++#define PLPKS_MAX_TIMEOUT (5 * USEC_PER_SEC)
++#define PLPKS_FLUSH_SLEEP 10000 // usec
+
+ struct plpks_var {
+ char *component;
+diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
+index d9fcff5750271d..2689e7139b9ea7 100644
+--- a/arch/powerpc/include/asm/ppc-pci.h
++++ b/arch/powerpc/include/asm/ppc-pci.h
+@@ -30,6 +30,16 @@ void *pci_traverse_device_nodes(struct device_node *start,
+ void *data);
+ extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+
++#if defined(CONFIG_IOMMU_API) && (defined(CONFIG_PPC_PSERIES) || \
++ defined(CONFIG_PPC_POWERNV))
++extern void ppc_iommu_register_device(struct pci_controller *phb);
++extern void ppc_iommu_unregister_device(struct pci_controller *phb);
++#else
++static inline void ppc_iommu_register_device(struct pci_controller *phb) { }
++static inline void ppc_iommu_unregister_device(struct pci_controller *phb) { }
++#endif
++
++
+ /* From rtas_pci.h */
+ extern void init_pci_config_tokens (void);
+ extern unsigned long get_phb_buid (struct device_node *);
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 4ae4ab9090a2d4..ade5f094dbd222 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -617,6 +617,8 @@
+ #endif
+ #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
+ #define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
++#define SPRN_HID2_G2_LE 0x3F3 /* G2_LE HID2 Register */
++#define HID2_G2_LE_HBE (1<<18) /* High BAT Enable (G2_LE) */
+ #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
+ #define SPRN_IABR2 0x3FA /* 83xx */
+ #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
+diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
+index a21f529c43d96b..8359c06d92d9f8 100644
+--- a/arch/powerpc/include/asm/reg_fsl_emb.h
++++ b/arch/powerpc/include/asm/reg_fsl_emb.h
+@@ -12,9 +12,16 @@
+ #ifndef __ASSEMBLY__
+ /* Performance Monitor Registers */
+ #define mfpmr(rn) ({unsigned int rval; \
+- asm volatile("mfpmr %0," __stringify(rn) \
++ asm volatile(".machine push; " \
++ ".machine e300; " \
++ "mfpmr %0," __stringify(rn) ";" \
++ ".machine pop; " \
+ : "=r" (rval)); rval;})
+-#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
++#define mtpmr(rn, v) asm volatile(".machine push; " \
++ ".machine e300; " \
++ "mtpmr " __stringify(rn) ",%0; " \
++ ".machine pop; " \
++ : : "r" (v))
+ #endif /* __ASSEMBLY__ */
+
+ /* Freescale Book E Performance Monitor APU Registers */
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index c697c3c746946d..33024a2874a691 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -68,7 +68,7 @@ enum rtas_function_index {
+ RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE,
+ RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2,
+ RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW,
+- RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS,
++ RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW,
+ RTAS_FNIDX__IBM_SCAN_LOG_DUMP,
+ RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR,
+ RTAS_FNIDX__IBM_SET_EEH_OPTION,
+@@ -163,7 +163,7 @@ typedef struct {
+ #define RTAS_FN_IBM_READ_SLOT_RESET_STATE rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE)
+ #define RTAS_FN_IBM_READ_SLOT_RESET_STATE2 rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2)
+ #define RTAS_FN_IBM_REMOVE_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW)
+-#define RTAS_FN_IBM_RESET_PE_DMA_WINDOWS rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS)
++#define RTAS_FN_IBM_RESET_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW)
+ #define RTAS_FN_IBM_SCAN_LOG_DUMP rtas_fn_handle(RTAS_FNIDX__IBM_SCAN_LOG_DUMP)
+ #define RTAS_FN_IBM_SET_DYNAMIC_INDICATOR rtas_fn_handle(RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR)
+ #define RTAS_FN_IBM_SET_EEH_OPTION rtas_fn_handle(RTAS_FNIDX__IBM_SET_EEH_OPTION)
+diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
+index ea26665f82cfc8..f43f3a6b0051cf 100644
+--- a/arch/powerpc/include/asm/sections.h
++++ b/arch/powerpc/include/asm/sections.h
+@@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;
+
+ extern char __head_end[];
+ extern char __srwx_boundary[];
++extern char __exittext_begin[], __exittext_end[];
+
+ /* Patch sites */
+ extern s32 patch__call_flush_branch_caches1;
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index bf5dde1a411471..15c5691dd21844 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -14,7 +14,7 @@
+
+ #ifdef __KERNEL__
+
+-#ifdef CONFIG_KASAN
++#if defined(CONFIG_KASAN) && CONFIG_THREAD_SHIFT < 15
+ #define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
+ #else
+ #define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
+diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
+index f4e6f2dd04b731..16bacfe8c7a2ca 100644
+--- a/arch/powerpc/include/asm/topology.h
++++ b/arch/powerpc/include/asm/topology.h
+@@ -145,6 +145,7 @@ static inline int cpu_to_coregroup_id(int cpu)
+
+ #ifdef CONFIG_HOTPLUG_SMT
+ #include <linux/cpu_smt.h>
++#include <linux/cpumask.h>
+ #include <asm/cputhreads.h>
+
+ static inline bool topology_is_primary_thread(unsigned int cpu)
+@@ -156,6 +157,18 @@ static inline bool topology_smt_thread_allowed(unsigned int cpu)
+ {
+ return cpu_thread_in_core(cpu) < cpu_smt_num_threads;
+ }
++
++#define topology_is_core_online topology_is_core_online
++static inline bool topology_is_core_online(unsigned int cpu)
++{
++ int i, first_cpu = cpu_first_thread_sibling(cpu);
++
++ for (i = first_cpu; i < first_cpu + threads_per_core; ++i) {
++ if (cpu_online(i))
++ return true;
++ }
++ return false;
++}
+ #endif
+
+ #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index fb725ec77926e1..a81bd825087cda 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -6,6 +6,7 @@
+ #include <asm/page.h>
+ #include <asm/extable.h>
+ #include <asm/kup.h>
++#include <asm/asm-compat.h>
+
+ #ifdef __powerpc64__
+ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
+@@ -74,7 +75,7 @@ __pu_failed: \
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ #ifdef CONFIG_PPC_KERNEL_PREFIXED
+ #define __put_user_asm_goto(x, addr, label, op) \
+- asm_volatile_goto( \
++ asm goto( \
+ "1: " op " %0,0(%1) # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+@@ -83,7 +84,7 @@ __pu_failed: \
+ : label)
+ #else
+ #define __put_user_asm_goto(x, addr, label, op) \
+- asm_volatile_goto( \
++ asm goto( \
+ "1: " op "%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+@@ -93,11 +94,21 @@ __pu_failed: \
+ #endif
+
+ #ifdef __powerpc64__
++#ifdef CONFIG_PPC_KERNEL_PREFIXED
+ #define __put_user_asm2_goto(x, ptr, label) \
+ __put_user_asm_goto(x, ptr, label, "std")
++#else
++#define __put_user_asm2_goto(x, addr, label) \
++ asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
++ EX_TABLE(1b, %l2) \
++ : \
++ : "r" (x), DS_FORM_CONSTRAINT (*addr) \
++ : \
++ : label)
++#endif // CONFIG_PPC_KERNEL_PREFIXED
+ #else /* __powerpc64__ */
+ #define __put_user_asm2_goto(x, addr, label) \
+- asm_volatile_goto( \
++ asm goto( \
+ "1: stw%X1 %0, %1\n" \
+ "2: stw%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+@@ -146,7 +157,7 @@ do { \
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ #ifdef CONFIG_PPC_KERNEL_PREFIXED
+ #define __get_user_asm_goto(x, addr, label, op) \
+- asm_volatile_goto( \
++ asm_goto_output( \
+ "1: "op" %0,0(%1) # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+@@ -155,7 +166,7 @@ do { \
+ : label)
+ #else
+ #define __get_user_asm_goto(x, addr, label, op) \
+- asm_volatile_goto( \
++ asm_goto_output( \
+ "1: "op"%U1%X1 %0, %1 # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+@@ -169,7 +180,7 @@ do { \
+ __get_user_asm_goto(x, addr, label, "ld")
+ #else /* __powerpc64__ */
+ #define __get_user_asm2_goto(x, addr, label) \
+- asm_volatile_goto( \
++ asm_goto_output( \
+ "1: lwz%X1 %0, %1\n" \
+ "2: lwz%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
+index a585c8e538ff0f..939daf6b695ef1 100644
+--- a/arch/powerpc/include/asm/vdso_datapage.h
++++ b/arch/powerpc/include/asm/vdso_datapage.h
+@@ -111,6 +111,21 @@ extern struct vdso_arch_data *vdso_data;
+ addi \ptr, \ptr, (_vdso_datapage - 999b)@l
+ .endm
+
++#include <asm/asm-offsets.h>
++#include <asm/page.h>
++
++.macro get_realdatapage ptr scratch
++ get_datapage \ptr
++#ifdef CONFIG_TIME_NS
++ lwz \scratch, VDSO_CLOCKMODE_OFFSET(\ptr)
++ xoris \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@h
++ xori \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@l
++ cntlzw \scratch, \scratch
++ rlwinm \scratch, \scratch, PAGE_SHIFT - 5, 1 << PAGE_SHIFT
++ add \ptr, \ptr, \scratch
++#endif
++.endm
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
+index 4c69ece52a31e5..59ed89890c902b 100644
+--- a/arch/powerpc/include/asm/vmalloc.h
++++ b/arch/powerpc/include/asm/vmalloc.h
+@@ -7,14 +7,14 @@
+ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+ #define arch_vmap_pud_supported arch_vmap_pud_supported
+-static inline bool arch_vmap_pud_supported(pgprot_t prot)
++static __always_inline bool arch_vmap_pud_supported(pgprot_t prot)
+ {
+ /* HPT does not cope with large pages in the vmalloc area */
+ return radix_enabled();
+ }
+
+ #define arch_vmap_pmd_supported arch_vmap_pmd_supported
+-static inline bool arch_vmap_pmd_supported(pgprot_t prot)
++static __always_inline bool arch_vmap_pmd_supported(pgprot_t prot)
+ {
+ return radix_enabled();
+ }
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 9f14d95b8b32fd..2affd30468bc4c 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -348,6 +348,8 @@ int main(void)
+ #else
+ OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, syscall_map);
+ #endif
++ OFFSET(VDSO_CLOCKMODE_OFFSET, vdso_arch_data, data[0].clock_mode);
++ DEFINE(VDSO_CLOCKMODE_TIMENS, VDSO_CLOCKMODE_TIMENS);
+
+ #ifdef CONFIG_BUG
+ DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
+diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
+index f29ce3dd6140f4..bfd3f442e5eb9d 100644
+--- a/arch/powerpc/kernel/cpu_setup_6xx.S
++++ b/arch/powerpc/kernel/cpu_setup_6xx.S
+@@ -26,6 +26,15 @@ BEGIN_FTR_SECTION
+ bl __init_fpu_registers
+ END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
+ bl setup_common_caches
++
++ /*
++ * This assumes that all cores using __setup_cpu_603 with
++ * MMU_FTR_USE_HIGH_BATS are G2_LE compatible
++ */
++BEGIN_MMU_FTR_SECTION
++ bl setup_g2_le_hid2
++END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
++
+ mtlr r5
+ blr
+ _GLOBAL(__setup_cpu_604)
+@@ -115,6 +124,16 @@ SYM_FUNC_START_LOCAL(setup_604_hid0)
+ blr
+ SYM_FUNC_END(setup_604_hid0)
+
++/* Enable high BATs for G2_LE and derivatives like e300cX */
++SYM_FUNC_START_LOCAL(setup_g2_le_hid2)
++ mfspr r11,SPRN_HID2_G2_LE
++ oris r11,r11,HID2_G2_LE_HBE@h
++ mtspr SPRN_HID2_G2_LE,r11
++ sync
++ isync
++ blr
++SYM_FUNC_END(setup_g2_le_hid2)
++
+ /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
+ * erratas we work around here.
+ * Moto MPC710CE.pdf describes them, those are errata
+@@ -495,4 +514,3 @@ _GLOBAL(__restore_cpu_setup)
+ mtcr r7
+ blr
+ _ASM_NOKPROBE_SYMBOL(__restore_cpu_setup)
+-
+diff --git a/arch/powerpc/kernel/cpu_specs_e500mc.h b/arch/powerpc/kernel/cpu_specs_e500mc.h
+index ceb06b109f8313..2ae8e9a7b461c8 100644
+--- a/arch/powerpc/kernel/cpu_specs_e500mc.h
++++ b/arch/powerpc/kernel/cpu_specs_e500mc.h
+@@ -8,7 +8,8 @@
+
+ #ifdef CONFIG_PPC64
+ #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+- PPC_FEATURE_HAS_FPU | PPC_FEATURE_64)
++ PPC_FEATURE_HAS_FPU | PPC_FEATURE_64 | \
++ PPC_FEATURE_BOOKE)
+ #else
+ #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+ PPC_FEATURE_BOOKE)
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index e0ce812796241a..7d1b50599dd6c2 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -849,6 +849,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
+ {
+ struct eeh_dev *edev;
+ struct pci_dev *pdev;
++ struct pci_bus *bus = NULL;
+
+ if (pe->type & EEH_PE_PHB)
+ return pe->phb->bus;
+@@ -859,9 +860,11 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
+
+ /* Retrieve the parent PCI bus of first (top) PCI device */
+ edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
++ pci_lock_rescan_remove();
+ pdev = eeh_dev_to_pci_dev(edev);
+ if (pdev)
+- return pdev->bus;
++ bus = pdev->bus;
++ pci_unlock_rescan_remove();
+
+- return NULL;
++ return bus;
+ }
+diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
+index 6a9acfb690c9f5..2f8f3f93cbb67e 100644
+--- a/arch/powerpc/kernel/fpu.S
++++ b/arch/powerpc/kernel/fpu.S
+@@ -23,6 +23,15 @@
+ #include <asm/feature-fixups.h>
+
+ #ifdef CONFIG_VSX
++#define __REST_1FPVSR(n,c,base) \
++BEGIN_FTR_SECTION \
++ b 2f; \
++END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
++ REST_FPR(n,base); \
++ b 3f; \
++2: REST_VSR(n,c,base); \
++3:
++
+ #define __REST_32FPVSRS(n,c,base) \
+ BEGIN_FTR_SECTION \
+ b 2f; \
+@@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
+ 2: SAVE_32VSRS(n,c,base); \
+ 3:
+ #else
++#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
+ #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
+ #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
+ #endif
++#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
+ #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
+ #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
+
+@@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
+ SAVE_32FPVSRS(0, R4, R3)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r3)
++ REST_1FPVSR(0, R4, R3)
+ blr
+ EXPORT_SYMBOL(store_fp_state)
+
+@@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
+ 2: SAVE_32FPVSRS(0, R4, R6)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r6)
++ REST_1FPVSR(0, R4, R6)
+ blr
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index 4690c219bfa4df..63432a33ec49ac 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -647,8 +647,9 @@ __after_prom_start:
+ * Note: This process overwrites the OF exception vectors.
+ */
+ LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
+- mr. r4,r26 /* In some cases the loader may */
+- beq 9f /* have already put us at zero */
++ mr r4,r26 /* Load the virtual source address into r4 */
++ cmpld r3,r4 /* Check if source == dest */
++ beq 9f /* If so skip the copy */
+ li r6,0x100 /* Start offset, the first 0x100 */
+ /* bytes were copied earlier. */
+
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index 647b0b445e89db..0c94db8e2bdedb 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -41,12 +41,12 @@
+ #include "head_32.h"
+
+ .macro compare_to_kernel_boundary scratch, addr
+-#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
++#if CONFIG_TASK_SIZE <= 0x80000000 && MODULES_VADDR >= 0x80000000
+ /* By simply checking Address >= 0x80000000, we know if its a kernel address */
+ not. \scratch, \addr
+ #else
+ rlwinm \scratch, \addr, 16, 0xfff8
+- cmpli cr0, \scratch, PAGE_OFFSET@h
++ cmpli cr0, \scratch, TASK_SIZE@h
+ #endif
+ .endm
+
+@@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
+ mfspr r10, SPRN_SRR0
+ mtspr SPRN_MD_EPN, r10
+ rlwinm r11, r10, 16, 0xfff8
+- cmpli cr1, r11, PAGE_OFFSET@h
++ cmpli cr1, r11, TASK_SIZE@h
+ mfspr r11, SPRN_M_TWB /* Get level 1 table */
+ blt+ cr1, 3f
+
+diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
+index bd863702d81218..1ad059a9e2fef3 100644
+--- a/arch/powerpc/kernel/interrupt_64.S
++++ b/arch/powerpc/kernel/interrupt_64.S
+@@ -52,7 +52,8 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+- std r11,_NIP(r1)
++ std r11,_LINK(r1)
++ std r11,_NIP(r1) /* Saved LR is also the next instruction */
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+@@ -70,7 +71,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+- std r11,_LINK(r1)
+ std r11,_CTR(r1)
+
+ li r11,\trapnr
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 14251bc5219eba..efaca0c6eff9d0 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -1344,7 +1344,7 @@ static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
+ struct pci_controller *hose;
+
+ if (!dev_is_pci(dev))
+- return ERR_PTR(-EPERM);
++ return ERR_PTR(-ENODEV);
+
+ pdev = to_pci_dev(dev);
+ hose = pdev->bus->sysdata;
+@@ -1393,6 +1393,21 @@ static const struct attribute_group *spapr_tce_iommu_groups[] = {
+ NULL,
+ };
+
++void ppc_iommu_register_device(struct pci_controller *phb)
++{
++ iommu_device_sysfs_add(&phb->iommu, phb->parent,
++ spapr_tce_iommu_groups, "iommu-phb%04x",
++ phb->global_number);
++ iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
++ phb->parent);
++}
++
++void ppc_iommu_unregister_device(struct pci_controller *phb)
++{
++ iommu_device_unregister(&phb->iommu);
++ iommu_device_sysfs_remove(&phb->iommu);
++}
++
+ /*
+ * This registers IOMMU devices of PHBs. This needs to happen
+ * after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
+@@ -1403,11 +1418,7 @@ static int __init spapr_tce_setup_phb_iommus_initcall(void)
+ struct pci_controller *hose;
+
+ list_for_each_entry(hose, &hose_list, list_node) {
+- iommu_device_sysfs_add(&hose->iommu, hose->parent,
+- spapr_tce_iommu_groups, "iommu-phb%04x",
+- hose->global_number);
+- iommu_device_register(&hose->iommu, &spapr_tce_iommu_ops,
+- hose->parent);
++ ppc_iommu_register_device(hose);
+ }
+ return 0;
+ }
+diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
+index 938e66829eae65..d5c48d1b0a31ea 100644
+--- a/arch/powerpc/kernel/irq_64.c
++++ b/arch/powerpc/kernel/irq_64.c
+@@ -230,7 +230,7 @@ notrace __no_kcsan void arch_local_irq_restore(unsigned long mask)
+ * This allows interrupts to be unmasked without hard disabling, and
+ * also without new hard interrupts coming in ahead of pending ones.
+ */
+- asm_volatile_goto(
++ asm goto(
+ "1: \n"
+ " lbz 9,%0(13) \n"
+ " cmpwi 9,0 \n"
+diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
+index 072ebe7f290ba7..f8208c027148fd 100644
+--- a/arch/powerpc/kernel/kprobes-ftrace.c
++++ b/arch/powerpc/kernel/kprobes-ftrace.c
+@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
+ struct pt_regs *regs;
+ int bit;
+
++ if (unlikely(kprobe_ftrace_disabled))
++ return;
++
+ bit = ftrace_test_recursion_trylock(nip, parent_nip);
+ if (bit < 0)
+ return;
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index b68898ac07e199..9452a54d356c97 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1198,11 +1198,11 @@ void kvmppc_save_user_regs(void)
+
+ usermsr = current->thread.regs->msr;
+
++ /* Caller has enabled FP/VEC/VSX/TM in MSR */
+ if (usermsr & MSR_FP)
+- save_fpu(current);
+-
++ __giveup_fpu(current);
+ if (usermsr & MSR_VEC)
+- save_altivec(current);
++ __giveup_altivec(current);
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (usermsr & MSR_TM) {
+@@ -2258,6 +2258,22 @@ unsigned long __get_wchan(struct task_struct *p)
+ return ret;
+ }
+
++static bool empty_user_regs(struct pt_regs *regs, struct task_struct *tsk)
++{
++ unsigned long stack_page;
++
++ // A non-empty pt_regs should never have a zero MSR or TRAP value.
++ if (regs->msr || regs->trap)
++ return false;
++
++ // Check it sits at the very base of the stack
++ stack_page = (unsigned long)task_stack_page(tsk);
++ if ((unsigned long)(regs + 1) != stack_page + THREAD_SIZE)
++ return false;
++
++ return true;
++}
++
+ static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
+
+ void __no_sanitize_address show_stack(struct task_struct *tsk,
+@@ -2322,9 +2338,13 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
+ lr = regs->link;
+ printk("%s--- interrupt: %lx at %pS\n",
+ loglvl, regs->trap, (void *)regs->nip);
+- __show_regs(regs);
+- printk("%s--- interrupt: %lx\n",
+- loglvl, regs->trap);
++
++ // Detect the case of an empty pt_regs at the very base
++ // of the stack and suppress showing it in full.
++ if (!empty_user_regs(regs, tsk)) {
++ __show_regs(regs);
++ printk("%s--- interrupt: %lx\n", loglvl, regs->trap);
++ }
+
+ firstframe = 1;
+ }
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 0b5878c3125b1c..bf6d8ad3819e99 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -327,6 +327,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ void *data)
+ {
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
++ const __be32 *cpu_version = NULL;
+ const __be32 *prop;
+ const __be32 *intserv;
+ int i, nthreads;
+@@ -375,6 +376,18 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ if (IS_ENABLED(CONFIG_PPC64))
+ boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
+
++ if (nr_cpu_ids % nthreads != 0) {
++ set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads));
++ pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n",
++ nr_cpu_ids);
++ }
++
++ if (boot_cpuid >= nr_cpu_ids) {
++ set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads)));
++ pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n",
++ boot_cpuid, nr_cpu_ids);
++ }
++
+ /*
+ * PAPR defines "logical" PVR values for cpus that
+ * meet various levels of the architecture:
+@@ -398,7 +411,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+ if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
+ identify_cpu(0, be32_to_cpup(prop));
+- seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
++ cpu_version = prop;
+ }
+
+ check_cpu_feature_properties(node);
+@@ -409,6 +422,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ }
+
+ identical_pvr_fixup(node);
++
++ // We can now add the CPU name & PVR to the hardware description
++ seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
++ if (cpu_version)
++ seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(cpu_version));
++
+ init_mmu_slb_size(node);
+
+ #ifdef CONFIG_PPC64
+@@ -846,9 +865,6 @@ void __init early_init_devtree(void *params)
+
+ dt_cpu_ftrs_scan();
+
+- // We can now add the CPU name & PVR to the hardware description
+- seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
+-
+ /* Retrieve CPU related informations from the flat tree
+ * (altivec support, boot CPU ID, ...)
+ */
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index eddc031c4b95f4..46b9476d758249 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -310,8 +310,13 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
+ [RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = {
+ .name = "ibm,remove-pe-dma-window",
+ },
+- [RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS] = {
+- .name = "ibm,reset-pe-dma-windows",
++ [RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW] = {
++ /*
++ * Note: PAPR+ v2.13 7.3.31.4.1 spells this as
++ * "ibm,reset-pe-dma-windows" (plural), but RTAS
++ * implementations use the singular form in practice.
++ */
++ .name = "ibm,reset-pe-dma-window",
+ },
+ [RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = {
+ .name = "ibm,scan-log-dump",
+@@ -544,6 +549,21 @@ static int __init rtas_token_to_function_xarray_init(void)
+ }
+ arch_initcall(rtas_token_to_function_xarray_init);
+
++/*
++ * For use by sys_rtas(), where the token value is provided by user
++ * space and we don't want to warn on failed lookups.
++ */
++static const struct rtas_function *rtas_token_to_function_untrusted(s32 token)
++{
++ return xa_load(&rtas_token_to_function_xarray, token);
++}
++
++/*
++ * Reverse lookup for deriving the function descriptor from a
++ * known-good token value in contexts where the former is not already
++ * available. @token must be valid, e.g. derived from the result of a
++ * prior lookup against the function table.
++ */
+ static const struct rtas_function *rtas_token_to_function(s32 token)
+ {
+ const struct rtas_function *func;
+@@ -551,7 +571,7 @@ static const struct rtas_function *rtas_token_to_function(s32 token)
+ if (WARN_ONCE(token < 0, "invalid token %d", token))
+ return NULL;
+
+- func = xa_load(&rtas_token_to_function_xarray, token);
++ func = rtas_token_to_function_untrusted(token);
+
+ if (WARN_ONCE(!func, "unexpected failed lookup for token %d", token))
+ return NULL;
+@@ -1726,7 +1746,7 @@ static bool block_rtas_call(int token, int nargs,
+ * If this token doesn't correspond to a function the kernel
+ * understands, you're not allowed to call it.
+ */
+- func = rtas_token_to_function(token);
++ func = rtas_token_to_function_untrusted(token);
+ if (!func)
+ goto err;
+ /*
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 20f72cd1d8138d..03eaad5949f141 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -950,6 +950,7 @@ void __init setup_arch(char **cmdline_p)
+ mem_topology_setup();
+ /* Set max_mapnr before paging_init() */
+ set_max_mapnr(max_pfn);
++ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+
+ /*
+ * Release secondary cpus out of their spinloops at 0x60 now that
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 246201d0d879ef..394f209536cee0 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -834,6 +834,7 @@ static __init int pcpu_cpu_to_node(int cpu)
+
+ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(__per_cpu_offset);
++DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
+
+ void __init setup_per_cpu_areas(void)
+ {
+@@ -876,6 +877,7 @@ void __init setup_per_cpu_areas(void)
+ if (rc < 0)
+ panic("cannot initialize percpu area (err=%d)", rc);
+
++ static_key_enable(&__percpu_first_chunk_is_paged.key);
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
+index 20e50586e8a26c..40f6751271d3d8 100644
+--- a/arch/powerpc/kernel/syscalls/syscall.tbl
++++ b/arch/powerpc/kernel/syscalls/syscall.tbl
+@@ -230,8 +230,10 @@
+ 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+ 179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
+ 179 64 pread64 sys_pread64
++179 spu pread64 sys_pread64
+ 180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
+ 180 64 pwrite64 sys_pwrite64
++180 spu pwrite64 sys_pwrite64
+ 181 common chown sys_chown
+ 182 common getcwd sys_getcwd
+ 183 common capget sys_capget
+@@ -246,6 +248,7 @@
+ 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
+ 191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
+ 191 64 readahead sys_readahead
++191 spu readahead sys_readahead
+ 192 32 mmap2 sys_mmap2 compat_sys_mmap2
+ 193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
+ 194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
+@@ -293,6 +296,7 @@
+ 232 nospu set_tid_address sys_set_tid_address
+ 233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
+ 233 64 fadvise64 sys_fadvise64
++233 spu fadvise64 sys_fadvise64
+ 234 nospu exit_group sys_exit_group
+ 235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+ 236 common epoll_create sys_epoll_create
+@@ -502,7 +506,7 @@
+ 412 32 utimensat_time64 sys_utimensat sys_utimensat
+ 413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+ 414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
++416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+ 417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+ 418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+ 419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
+index 82010629cf887c..d8d6b4fd9a14cb 100644
+--- a/arch/powerpc/kernel/trace/ftrace.c
++++ b/arch/powerpc/kernel/trace/ftrace.c
+@@ -27,10 +27,22 @@
+ #include <asm/ftrace.h>
+ #include <asm/syscall.h>
+ #include <asm/inst.h>
++#include <asm/sections.h>
+
+ #define NUM_FTRACE_TRAMPS 2
+ static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
+
++unsigned long ftrace_call_adjust(unsigned long addr)
++{
++ if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
++ return 0;
++
++ if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
++ addr += MCOUNT_INSN_SIZE;
++
++ return addr;
++}
++
+ static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
+ {
+ ppc_inst_t op;
+diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.c b/arch/powerpc/kernel/trace/ftrace_64_pg.c
+index 7b85c3b460a3c0..12fab1803bcf45 100644
+--- a/arch/powerpc/kernel/trace/ftrace_64_pg.c
++++ b/arch/powerpc/kernel/trace/ftrace_64_pg.c
+@@ -37,6 +37,11 @@
+ #define NUM_FTRACE_TRAMPS 8
+ static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
+
++unsigned long ftrace_call_adjust(unsigned long addr)
++{
++ return addr;
++}
++
+ static ppc_inst_t
+ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
+ {
+diff --git a/arch/powerpc/kernel/trace/ftrace_entry.S b/arch/powerpc/kernel/trace/ftrace_entry.S
+index 90701885762cf1..40677416d7b262 100644
+--- a/arch/powerpc/kernel/trace/ftrace_entry.S
++++ b/arch/powerpc/kernel/trace/ftrace_entry.S
+@@ -62,7 +62,7 @@
+ .endif
+
+ /* Save previous stack pointer (r1) */
+- addi r8, r1, SWITCH_FRAME_SIZE
++ addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ PPC_STL r8, GPR1(r1)
+
+ .if \allregs == 1
+@@ -182,7 +182,7 @@ ftrace_no_trace:
+ mflr r3
+ mtctr r3
+ REST_GPR(3, r1)
+- addi r1, r1, SWITCH_FRAME_SIZE
++ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ mtlr r0
+ bctr
+ #endif
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 64ff37721fd06f..2de7f6dcd32b06 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1164,6 +1164,7 @@ void emulate_single_step(struct pt_regs *regs)
+ __single_step_exception(regs);
+ }
+
++#ifdef CONFIG_PPC_FPU_REGS
+ static inline int __parse_fpscr(unsigned long fpscr)
+ {
+ int ret = FPE_FLTUNK;
+@@ -1190,6 +1191,7 @@ static inline int __parse_fpscr(unsigned long fpscr)
+
+ return ret;
+ }
++#endif
+
+ static void parse_fpe(struct pt_regs *regs)
+ {
+@@ -1437,10 +1439,12 @@ static int emulate_instruction(struct pt_regs *regs)
+ return -EINVAL;
+ }
+
++#ifdef CONFIG_GENERIC_BUG
+ int is_valid_bugaddr(unsigned long addr)
+ {
+ return is_kernel_addr(addr);
+ }
++#endif
+
+ #ifdef CONFIG_MATH_EMULATION
+ static int emulate_math(struct pt_regs *regs)
+diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S
+index 0085ae464dac9c..3b2479bd2f9a1d 100644
+--- a/arch/powerpc/kernel/vdso/cacheflush.S
++++ b/arch/powerpc/kernel/vdso/cacheflush.S
+@@ -30,7 +30,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
+ #ifdef CONFIG_PPC64
+ mflr r12
+ .cfi_register lr,r12
+- get_datapage r10
++ get_realdatapage r10, r11
+ mtlr r12
+ .cfi_restore lr
+ #endif
+diff --git a/arch/powerpc/kernel/vdso/datapage.S b/arch/powerpc/kernel/vdso/datapage.S
+index db8e167f01667e..2b19b6201a33a8 100644
+--- a/arch/powerpc/kernel/vdso/datapage.S
++++ b/arch/powerpc/kernel/vdso/datapage.S
+@@ -28,7 +28,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
+ mflr r12
+ .cfi_register lr,r12
+ mr. r4,r3
+- get_datapage r3
++ get_realdatapage r3, r11
+ mtlr r12
+ #ifdef __powerpc64__
+ addi r3,r3,CFG_SYSCALL_MAP64
+@@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
+ .cfi_startproc
+ mflr r12
+ .cfi_register lr,r12
+- get_datapage r3
++ get_realdatapage r3, r11
+ #ifndef __powerpc64__
+ lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
+ #endif
+diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S
+index 48fc6658053aa4..894cb939cd2b31 100644
+--- a/arch/powerpc/kernel/vdso/gettimeofday.S
++++ b/arch/powerpc/kernel/vdso/gettimeofday.S
+@@ -38,11 +38,7 @@
+ .else
+ addi r4, r5, VDSO_DATA_OFFSET
+ .endif
+-#ifdef __powerpc64__
+ bl CFUNC(DOTSYM(\funct))
+-#else
+- bl \funct
+-#endif
+ PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
+ #ifdef __powerpc64__
+ PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
+diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
+index 426e1ccc6971a3..8f57107000a247 100644
+--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
++++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
+@@ -74,6 +74,8 @@ SECTIONS
+ .got : { *(.got) } :text
+ .plt : { *(.plt) }
+
++ .rela.dyn : { *(.rela .rela*) }
++
+ _end = .;
+ __end = .;
+ PROVIDE(end = .);
+@@ -87,7 +89,7 @@ SECTIONS
+ *(.branch_lt)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+- *(.got1 .glink .iplt .rela*)
++ *(.got1 .glink .iplt)
+ }
+ }
+
+diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
+index bda6c8cdd459c0..400819258c06b7 100644
+--- a/arch/powerpc/kernel/vdso/vdso64.lds.S
++++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
+@@ -69,7 +69,7 @@ SECTIONS
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+ .gcc_except_table : { *(.gcc_except_table) }
+- .rela.dyn ALIGN(8) : { *(.rela.dyn) }
++ .rela.dyn ALIGN(8) : { *(.rela .rela*) }
+
+ .got ALIGN(8) : { *(.got .toc) }
+
+@@ -86,7 +86,7 @@ SECTIONS
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ *(.opd)
+- *(.glink .iplt .plt .rela*)
++ *(.glink .iplt .plt)
+ }
+ }
+
+diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
+index 4094e4c4c77a77..80b3f6e476b66b 100644
+--- a/arch/powerpc/kernel/vector.S
++++ b/arch/powerpc/kernel/vector.S
+@@ -33,6 +33,7 @@ _GLOBAL(store_vr_state)
+ mfvscr v0
+ li r4, VRSTATE_VSCR
+ stvx v0, r4, r3
++ lvx v0, 0, r3
+ blr
+ EXPORT_SYMBOL(store_vr_state)
+
+@@ -109,6 +110,7 @@ _GLOBAL(save_altivec)
+ mfvscr v0
+ li r4,VRSTATE_VSCR
+ stvx v0,r4,r7
++ lvx v0,0,r7
+ blr
+
+ #ifdef CONFIG_VSX
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 1c5970df323366..f420df7888a75c 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -281,7 +281,9 @@ SECTIONS
+ * to deal with references from __bug_table
+ */
+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
++ __exittext_begin = .;
+ EXIT_TEXT
++ __exittext_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
+diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
+index de64c796299121..005269ac3244c4 100644
+--- a/arch/powerpc/kexec/core.c
++++ b/arch/powerpc/kexec/core.c
+@@ -74,6 +74,9 @@ void arch_crash_save_vmcoreinfo(void)
+ VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
+ VMCOREINFO_OFFSET(mmu_psize_def, shift);
+ #endif
++ VMCOREINFO_SYMBOL(cur_cpu_spec);
++ VMCOREINFO_OFFSET(cpu_spec, mmu_features);
++ vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled());
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+ }
+
+diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
+index a79e28c91e2be3..e465e448773768 100644
+--- a/arch/powerpc/kexec/core_64.c
++++ b/arch/powerpc/kexec/core_64.c
+@@ -26,6 +26,7 @@
+ #include <asm/paca.h>
+ #include <asm/mmu.h>
+ #include <asm/sections.h> /* _end */
++#include <asm/setup.h>
+ #include <asm/smp.h>
+ #include <asm/hw_breakpoint.h>
+ #include <asm/svm.h>
+@@ -316,6 +317,16 @@ void default_machine_kexec(struct kimage *image)
+ if (!kdump_in_progress())
+ kexec_prepare_cpus();
+
++#ifdef CONFIG_PPC_PSERIES
++ /*
++ * This must be done after other CPUs have shut down, otherwise they
++ * could execute the 'scv' instruction, which is not supported with
++ * reloc disabled (see configure_exceptions()).
++ */
++ if (firmware_has_feature(FW_FEATURE_SET_MODE))
++ pseries_disable_reloc_on_exc();
++#endif
++
+ printk("kexec: Starting switchover sequence.\n");
+
+ /* switch to a staticly allocated stack. Based on irq stack code.
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+index efd0ebf70a5e60..fdfc2a62dd67df 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+@@ -28,6 +28,7 @@
+ #include <asm/pte-walk.h>
+
+ #include "book3s.h"
++#include "book3s_hv.h"
+ #include "trace_hv.h"
+
+ //#define DEBUG_RESIZE_HPT 1
+@@ -347,7 +348,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ unsigned long v, orig_v, gr;
+ __be64 *hptep;
+ long int index;
+- int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
++ int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR);
+
+ if (kvm_is_radix(vcpu->kvm))
+ return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
+@@ -385,7 +386,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+
+ /* Get PP bits and key for permission check */
+ pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
+- key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
++ key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
+ key &= slb_v;
+
+ /* Calculate permissions */
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
+index 572707858d65d4..10aacbf92466a5 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
+@@ -15,6 +15,7 @@
+
+ #include <asm/kvm_ppc.h>
+ #include <asm/kvm_book3s.h>
++#include "book3s_hv.h"
+ #include <asm/page.h>
+ #include <asm/mmu.h>
+ #include <asm/pgalloc.h>
+@@ -294,9 +295,9 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ } else {
+ if (!(pte & _PAGE_PRIVILEGED)) {
+ /* Check AMR/IAMR to see if strict mode is in force */
+- if (vcpu->arch.amr & (1ul << 62))
++ if (kvmppc_get_amr_hv(vcpu) & (1ul << 62))
+ gpte->may_read = 0;
+- if (vcpu->arch.amr & (1ul << 63))
++ if (kvmppc_get_amr_hv(vcpu) & (1ul << 63))
+ gpte->may_write = 0;
+ if (vcpu->arch.iamr & (1ul << 62))
+ gpte->may_execute = 0;
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 93b695b289e99a..395659f2f4c8ef 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -129,14 +129,16 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ }
+ rcu_read_unlock();
+
+- fdput(f);
+-
+- if (!found)
++ if (!found) {
++ fdput(f);
+ return -EINVAL;
++ }
+
+ table_group = iommu_group_get_iommudata(grp);
+- if (WARN_ON(!table_group))
++ if (WARN_ON(!table_group)) {
++ fdput(f);
+ return -EFAULT;
++ }
+
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ struct iommu_table *tbltmp = table_group->tables[i];
+@@ -157,8 +159,10 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ break;
+ }
+ }
+- if (!tbl)
++ if (!tbl) {
++ fdput(f);
+ return -EINVAL;
++ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
+@@ -169,6 +173,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ /* stit is being destroyed */
+ iommu_tce_table_put(tbl);
+ rcu_read_unlock();
++ fdput(f);
+ return -ENOTTY;
+ }
+ /*
+@@ -176,6 +181,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ * its KVM reference counter and can return.
+ */
+ rcu_read_unlock();
++ fdput(f);
+ return 0;
+ }
+ rcu_read_unlock();
+@@ -183,6 +189,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ stit = kzalloc(sizeof(*stit), GFP_KERNEL);
+ if (!stit) {
+ iommu_tce_table_put(tbl);
++ fdput(f);
+ return -ENOMEM;
+ }
+
+@@ -191,6 +198,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+
+ list_add_rcu(&stit->next, &stt->iommu_tables);
+
++ fdput(f);
+ return 0;
+ }
+
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 130bafdb143088..1bb00c72154407 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -868,7 +868,7 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
+ /* Guests can't breakpoint the hypervisor */
+ if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ return H_P3;
+- vcpu->arch.ciabr = value1;
++ kvmppc_set_ciabr_hv(vcpu, value1);
+ return H_SUCCESS;
+ case H_SET_MODE_RESOURCE_SET_DAWR0:
+ if (!kvmppc_power8_compatible(vcpu))
+@@ -879,8 +879,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
+ return H_UNSUPPORTED_FLAG_START;
+ if (value2 & DABRX_HYP)
+ return H_P4;
+- vcpu->arch.dawr0 = value1;
+- vcpu->arch.dawrx0 = value2;
++ kvmppc_set_dawr0_hv(vcpu, value1);
++ kvmppc_set_dawrx0_hv(vcpu, value2);
+ return H_SUCCESS;
+ case H_SET_MODE_RESOURCE_SET_DAWR1:
+ if (!kvmppc_power8_compatible(vcpu))
+@@ -895,8 +895,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
+ return H_UNSUPPORTED_FLAG_START;
+ if (value2 & DABRX_HYP)
+ return H_P4;
+- vcpu->arch.dawr1 = value1;
+- vcpu->arch.dawrx1 = value2;
++ kvmppc_set_dawr1_hv(vcpu, value1);
++ kvmppc_set_dawrx1_hv(vcpu, value2);
+ return H_SUCCESS;
+ case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
+ /*
+@@ -1370,7 +1370,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
+ */
+ static void kvmppc_cede(struct kvm_vcpu *vcpu)
+ {
+- vcpu->arch.shregs.msr |= MSR_EE;
++ __kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE);
+ vcpu->arch.ceded = 1;
+ smp_mb();
+ if (vcpu->arch.prodded) {
+@@ -1544,7 +1544,7 @@ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
+ return EMULATE_FAIL;
+
+- vcpu->arch.hfscr |= HFSCR_PM;
++ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PM);
+
+ return RESUME_GUEST;
+ }
+@@ -1554,7 +1554,7 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
+ return EMULATE_FAIL;
+
+- vcpu->arch.hfscr |= HFSCR_EBB;
++ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_EBB);
+
+ return RESUME_GUEST;
+ }
+@@ -1564,7 +1564,7 @@ static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
+ return EMULATE_FAIL;
+
+- vcpu->arch.hfscr |= HFSCR_TM;
++ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
+
+ return RESUME_GUEST;
+ }
+@@ -1585,7 +1585,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ * That can happen due to a bug, or due to a machine check
+ * occurring at just the wrong time.
+ */
+- if (vcpu->arch.shregs.msr & MSR_HV) {
++ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
+ printk(KERN_EMERG "KVM trap in HV mode!\n");
+ printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+ vcpu->arch.trap, kvmppc_get_pc(vcpu),
+@@ -1636,7 +1636,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ * so that it knows that the machine check occurred.
+ */
+ if (!vcpu->kvm->arch.fwnmi_enabled) {
+- ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
++ ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) |
+ (kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
+ kvmppc_core_queue_machine_check(vcpu, flags);
+ r = RESUME_GUEST;
+@@ -1666,7 +1666,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ * as a result of a hypervisor emulation interrupt
+ * (e40) getting turned into a 700 by BML RTAS.
+ */
+- flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
++ flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) |
+ (kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+@@ -1676,7 +1676,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ {
+ int i;
+
+- if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
++ if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
+ /*
+ * Guest userspace executed sc 1. This can only be
+ * reached by the P9 path because the old path
+@@ -1754,7 +1754,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ break;
+ }
+
+- if (!(vcpu->arch.shregs.msr & MSR_DR))
++ if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+@@ -1778,7 +1778,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ long err;
+
+ vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
+- vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
++ vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) &
+ DSISR_SRR1_MATCH_64S;
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+@@ -1787,7 +1787,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
++ if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+ r = RESUME_PAGE_FAULT;
+ break;
+@@ -1801,7 +1801,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ break;
+ }
+
+- if (!(vcpu->arch.shregs.msr & MSR_IR))
++ if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+@@ -1863,7 +1863,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ * Otherwise, we just generate a program interrupt to the guest.
+ */
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
+- u64 cause = vcpu->arch.hfscr >> 56;
++ u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56;
+
+ r = EMULATE_FAIL;
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+@@ -1891,7 +1891,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
+ kvmppc_dump_regs(vcpu);
+ printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+ vcpu->arch.trap, kvmppc_get_pc(vcpu),
+- vcpu->arch.shregs.msr);
++ __kvmppc_get_msr_hv(vcpu));
+ run->hw.hardware_exit_reason = vcpu->arch.trap;
+ r = RESUME_HOST;
+ break;
+@@ -1915,11 +1915,11 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
+ * That can happen due to a bug, or due to a machine check
+ * occurring at just the wrong time.
+ */
+- if (vcpu->arch.shregs.msr & MSR_HV) {
++ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
+ pr_emerg("KVM trap in HV mode while nested!\n");
+ pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+ vcpu->arch.trap, kvmppc_get_pc(vcpu),
+- vcpu->arch.shregs.msr);
++ __kvmppc_get_msr_hv(vcpu));
+ kvmppc_dump_regs(vcpu);
+ return RESUME_HOST;
+ }
+@@ -1976,7 +1976,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
+ vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
+ vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
+ DSISR_SRR1_MATCH_64S;
+- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
++ if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvmhv_nested_page_fault(vcpu);
+@@ -2207,64 +2207,64 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ *val = get_reg_val(id, vcpu->arch.dabrx);
+ break;
+ case KVM_REG_PPC_DSCR:
+- *val = get_reg_val(id, vcpu->arch.dscr);
++ *val = get_reg_val(id, kvmppc_get_dscr_hv(vcpu));
+ break;
+ case KVM_REG_PPC_PURR:
+- *val = get_reg_val(id, vcpu->arch.purr);
++ *val = get_reg_val(id, kvmppc_get_purr_hv(vcpu));
+ break;
+ case KVM_REG_PPC_SPURR:
+- *val = get_reg_val(id, vcpu->arch.spurr);
++ *val = get_reg_val(id, kvmppc_get_spurr_hv(vcpu));
+ break;
+ case KVM_REG_PPC_AMR:
+- *val = get_reg_val(id, vcpu->arch.amr);
++ *val = get_reg_val(id, kvmppc_get_amr_hv(vcpu));
+ break;
+ case KVM_REG_PPC_UAMOR:
+- *val = get_reg_val(id, vcpu->arch.uamor);
++ *val = get_reg_val(id, kvmppc_get_uamor_hv(vcpu));
+ break;
+ case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
+ i = id - KVM_REG_PPC_MMCR0;
+- *val = get_reg_val(id, vcpu->arch.mmcr[i]);
++ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, i));
+ break;
+ case KVM_REG_PPC_MMCR2:
+- *val = get_reg_val(id, vcpu->arch.mmcr[2]);
++ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 2));
+ break;
+ case KVM_REG_PPC_MMCRA:
+- *val = get_reg_val(id, vcpu->arch.mmcra);
++ *val = get_reg_val(id, kvmppc_get_mmcra_hv(vcpu));
+ break;
+ case KVM_REG_PPC_MMCRS:
+ *val = get_reg_val(id, vcpu->arch.mmcrs);
+ break;
+ case KVM_REG_PPC_MMCR3:
+- *val = get_reg_val(id, vcpu->arch.mmcr[3]);
++ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 3));
+ break;
+ case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
+ i = id - KVM_REG_PPC_PMC1;
+- *val = get_reg_val(id, vcpu->arch.pmc[i]);
++ *val = get_reg_val(id, kvmppc_get_pmc_hv(vcpu, i));
+ break;
+ case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
+ i = id - KVM_REG_PPC_SPMC1;
+ *val = get_reg_val(id, vcpu->arch.spmc[i]);
+ break;
+ case KVM_REG_PPC_SIAR:
+- *val = get_reg_val(id, vcpu->arch.siar);
++ *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
+ break;
+ case KVM_REG_PPC_SDAR:
+- *val = get_reg_val(id, vcpu->arch.sdar);
++ *val = get_reg_val(id, kvmppc_get_sdar_hv(vcpu));
+ break;
+ case KVM_REG_PPC_SIER:
+- *val = get_reg_val(id, vcpu->arch.sier[0]);
++ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0));
+ break;
+ case KVM_REG_PPC_SIER2:
+- *val = get_reg_val(id, vcpu->arch.sier[1]);
++ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 1));
+ break;
+ case KVM_REG_PPC_SIER3:
+- *val = get_reg_val(id, vcpu->arch.sier[2]);
++ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 2));
+ break;
+ case KVM_REG_PPC_IAMR:
+- *val = get_reg_val(id, vcpu->arch.iamr);
++ *val = get_reg_val(id, kvmppc_get_iamr_hv(vcpu));
+ break;
+ case KVM_REG_PPC_PSPB:
+- *val = get_reg_val(id, vcpu->arch.pspb);
++ *val = get_reg_val(id, kvmppc_get_pspb_hv(vcpu));
+ break;
+ case KVM_REG_PPC_DPDES:
+ /*
+@@ -2282,19 +2282,19 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ *val = get_reg_val(id, vcpu->arch.vcore->vtb);
+ break;
+ case KVM_REG_PPC_DAWR:
+- *val = get_reg_val(id, vcpu->arch.dawr0);
++ *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu));
+ break;
+ case KVM_REG_PPC_DAWRX:
+- *val = get_reg_val(id, vcpu->arch.dawrx0);
++ *val = get_reg_val(id, kvmppc_get_dawrx0_hv(vcpu));
+ break;
+ case KVM_REG_PPC_DAWR1:
+- *val = get_reg_val(id, vcpu->arch.dawr1);
++ *val = get_reg_val(id, kvmppc_get_dawr1_hv(vcpu));
+ break;
+ case KVM_REG_PPC_DAWRX1:
+- *val = get_reg_val(id, vcpu->arch.dawrx1);
++ *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu));
+ break;
+ case KVM_REG_PPC_CIABR:
+- *val = get_reg_val(id, vcpu->arch.ciabr);
++ *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu));
+ break;
+ case KVM_REG_PPC_CSIGR:
+ *val = get_reg_val(id, vcpu->arch.csigr);
+@@ -2312,7 +2312,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ *val = get_reg_val(id, vcpu->arch.acop);
+ break;
+ case KVM_REG_PPC_WORT:
+- *val = get_reg_val(id, vcpu->arch.wort);
++ *val = get_reg_val(id, kvmppc_get_wort_hv(vcpu));
+ break;
+ case KVM_REG_PPC_TIDR:
+ *val = get_reg_val(id, vcpu->arch.tid);
+@@ -2345,7 +2345,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+ break;
+ case KVM_REG_PPC_PPR:
+- *val = get_reg_val(id, vcpu->arch.ppr);
++ *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu));
+ break;
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case KVM_REG_PPC_TFHAR:
+@@ -2425,6 +2425,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ case KVM_REG_PPC_PTCR:
+ *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
+ break;
++ case KVM_REG_PPC_FSCR:
++ *val = get_reg_val(id, kvmppc_get_fscr_hv(vcpu));
++ break;
+ default:
+ r = -EINVAL;
+ break;
+@@ -2453,64 +2456,64 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
+ break;
+ case KVM_REG_PPC_DSCR:
+- vcpu->arch.dscr = set_reg_val(id, *val);
++ kvmppc_set_dscr_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_PURR:
+- vcpu->arch.purr = set_reg_val(id, *val);
++ kvmppc_set_purr_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_SPURR:
+- vcpu->arch.spurr = set_reg_val(id, *val);
++ kvmppc_set_spurr_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_AMR:
+- vcpu->arch.amr = set_reg_val(id, *val);
++ kvmppc_set_amr_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_UAMOR:
+- vcpu->arch.uamor = set_reg_val(id, *val);
++ kvmppc_set_uamor_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
+ i = id - KVM_REG_PPC_MMCR0;
+- vcpu->arch.mmcr[i] = set_reg_val(id, *val);
++ kvmppc_set_mmcr_hv(vcpu, i, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_MMCR2:
+- vcpu->arch.mmcr[2] = set_reg_val(id, *val);
++ kvmppc_set_mmcr_hv(vcpu, 2, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_MMCRA:
+- vcpu->arch.mmcra = set_reg_val(id, *val);
++ kvmppc_set_mmcra_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_MMCRS:
+ vcpu->arch.mmcrs = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MMCR3:
+- *val = get_reg_val(id, vcpu->arch.mmcr[3]);
++ kvmppc_set_mmcr_hv(vcpu, 3, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
+ i = id - KVM_REG_PPC_PMC1;
+- vcpu->arch.pmc[i] = set_reg_val(id, *val);
++ kvmppc_set_pmc_hv(vcpu, i, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
+ i = id - KVM_REG_PPC_SPMC1;
+ vcpu->arch.spmc[i] = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_SIAR:
+- vcpu->arch.siar = set_reg_val(id, *val);
++ kvmppc_set_siar_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_SDAR:
+- vcpu->arch.sdar = set_reg_val(id, *val);
++ kvmppc_set_sdar_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_SIER:
+- vcpu->arch.sier[0] = set_reg_val(id, *val);
++ kvmppc_set_sier_hv(vcpu, 0, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_SIER2:
+- vcpu->arch.sier[1] = set_reg_val(id, *val);
++ kvmppc_set_sier_hv(vcpu, 1, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_SIER3:
+- vcpu->arch.sier[2] = set_reg_val(id, *val);
++ kvmppc_set_sier_hv(vcpu, 2, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_IAMR:
+- vcpu->arch.iamr = set_reg_val(id, *val);
++ kvmppc_set_iamr_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_PSPB:
+- vcpu->arch.pspb = set_reg_val(id, *val);
++ kvmppc_set_pspb_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_DPDES:
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+@@ -2522,22 +2525,22 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ vcpu->arch.vcore->vtb = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_DAWR:
+- vcpu->arch.dawr0 = set_reg_val(id, *val);
++ kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_DAWRX:
+- vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
++ kvmppc_set_dawrx0_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
+ break;
+ case KVM_REG_PPC_DAWR1:
+- vcpu->arch.dawr1 = set_reg_val(id, *val);
++ kvmppc_set_dawr1_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_DAWRX1:
+- vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
++ kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
+ break;
+ case KVM_REG_PPC_CIABR:
+- vcpu->arch.ciabr = set_reg_val(id, *val);
++ kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val));
+ /* Don't allow setting breakpoints in hypervisor code */
+- if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
+- vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
++ if ((kvmppc_get_ciabr_hv(vcpu) & CIABR_PRIV) == CIABR_PRIV_HYPER)
++ kvmppc_set_ciabr_hv(vcpu, kvmppc_get_ciabr_hv(vcpu) & ~CIABR_PRIV);
+ break;
+ case KVM_REG_PPC_CSIGR:
+ vcpu->arch.csigr = set_reg_val(id, *val);
+@@ -2555,7 +2558,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ vcpu->arch.acop = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_WORT:
+- vcpu->arch.wort = set_reg_val(id, *val);
++ kvmppc_set_wort_hv(vcpu, set_reg_val(id, *val));
+ break;
+ case KVM_REG_PPC_TIDR:
+ vcpu->arch.tid = set_reg_val(id, *val);
+@@ -2615,7 +2618,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
+ break;
+ case KVM_REG_PPC_PPR:
+- vcpu->arch.ppr = set_reg_val(id, *val);
++ kvmppc_set_ppr_hv(vcpu, set_reg_val(id, *val));
+ break;
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case KVM_REG_PPC_TFHAR:
+@@ -2699,6 +2702,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ case KVM_REG_PPC_PTCR:
+ vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
+ break;
++ case KVM_REG_PPC_FSCR:
++ kvmppc_set_fscr_hv(vcpu, set_reg_val(id, *val));
++ break;
+ default:
+ r = -EINVAL;
+ break;
+@@ -2916,19 +2922,20 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
+ vcpu->arch.shared_big_endian = false;
+ #endif
+ #endif
+- vcpu->arch.mmcr[0] = MMCR0_FC;
++ kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
++
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+- vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
+- vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
++ kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT);
++ kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE);
+ }
+
+- vcpu->arch.ctrl = CTRL_RUNLATCH;
++ kvmppc_set_ctrl_hv(vcpu, CTRL_RUNLATCH);
+ /* default to host PVR, since we can't spoof it */
+ kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
+ spin_lock_init(&vcpu->arch.vpa_update_lock);
+ spin_lock_init(&vcpu->arch.tbacct_lock);
+ vcpu->arch.busy_preempt = TB_NIL;
+- vcpu->arch.shregs.msr = MSR_ME;
++ __kvmppc_set_msr_hv(vcpu, MSR_ME);
+ vcpu->arch.intr_msr = MSR_SF | MSR_ME;
+
+ /*
+@@ -2938,29 +2945,30 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
+ * don't set the HFSCR_MSGP bit, and that causes those instructions
+ * to trap and then we emulate them.
+ */
+- vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
+- HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
++ kvmppc_set_hfscr_hv(vcpu, HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
++ HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP);
+
+ /* On POWER10 and later, allow prefixed instructions */
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+- vcpu->arch.hfscr |= HFSCR_PREFIX;
++ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PREFIX);
+
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+- vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
++ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & mfspr(SPRN_HFSCR));
++
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+- vcpu->arch.hfscr |= HFSCR_TM;
++ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
+ #endif
+ }
+ if (cpu_has_feature(CPU_FTR_TM_COMP))
+ vcpu->arch.hfscr |= HFSCR_TM;
+
+- vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
++ vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu);
+
+ /*
+ * PM, EBB, TM are demand-faulted so start with it clear.
+ */
+- vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
++ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM));
+
+ kvmppc_mmu_book3s_hv_init(vcpu);
+
+@@ -4176,7 +4184,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ __this_cpu_write(cpu_in_guest, NULL);
+
+ if (trap == BOOK3S_INTERRUPT_SYSCALL &&
+- !(vcpu->arch.shregs.msr & MSR_PR)) {
++ !(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+ /*
+@@ -4655,13 +4663,19 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
+
+ if (!nested) {
+ kvmppc_core_prepare_to_enter(vcpu);
+- if (vcpu->arch.shregs.msr & MSR_EE) {
+- if (xive_interrupt_pending(vcpu))
++ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
++ &vcpu->arch.pending_exceptions) ||
++ xive_interrupt_pending(vcpu)) {
++ /*
++ * For nested HV, don't synthesize but always pass MER,
++ * the L0 will be able to optimise that more
++ * effectively than manipulating registers directly.
++ */
++ if (!kvmhv_on_pseries() && (__kvmppc_get_msr_hv(vcpu) & MSR_EE))
+ kvmppc_inject_interrupt_hv(vcpu,
+- BOOK3S_INTERRUPT_EXTERNAL, 0);
+- } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
+- &vcpu->arch.pending_exceptions)) {
+- lpcr |= LPCR_MER;
++ BOOK3S_INTERRUPT_EXTERNAL, 0);
++ else
++ lpcr |= LPCR_MER;
+ }
+ } else if (vcpu->arch.pending_exceptions ||
+ vcpu->arch.doorbell_request ||
+@@ -4844,7 +4858,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
+ msr |= MSR_VSX;
+ if ((cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+- (vcpu->arch.hfscr & HFSCR_TM))
++ (kvmppc_get_hfscr_hv(vcpu) & HFSCR_TM))
+ msr |= MSR_TM;
+ msr = msr_check_and_set(msr);
+
+@@ -4868,7 +4882,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
+ if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
+ accumulate_time(vcpu, &vcpu->arch.hcall);
+
+- if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
++ if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
+ /*
+ * These should have been caught reflected
+ * into the guest by now. Final sanity check:
+diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h
+index 2f2e59d7d433a6..95241764dfb4eb 100644
+--- a/arch/powerpc/kvm/book3s_hv.h
++++ b/arch/powerpc/kvm/book3s_hv.h
+@@ -50,3 +50,71 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
+ #define start_timing(vcpu, next) do {} while (0)
+ #define end_timing(vcpu) do {} while (0)
+ #endif
++
++static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
++{
++ vcpu->arch.shregs.msr = val;
++}
++
++static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.shregs.msr;
++}
++
++#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
++static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \
++{ \
++ vcpu->arch.reg = val; \
++}
++
++#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \
++static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \
++{ \
++ return vcpu->arch.reg; \
++}
++
++#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size) \
++ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
++ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \
++
++#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \
++static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \
++{ \
++ vcpu->arch.reg[i] = val; \
++}
++
++#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \
++static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \
++{ \
++ return vcpu->arch.reg[i]; \
++}
++
++#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size) \
++ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \
++ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \
++
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64)
++
++KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64)
++KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64)
++KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32)
++
++KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32)
+diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
+index 0f5b021fa5590b..663f5222f3d06d 100644
+--- a/arch/powerpc/kvm/book3s_hv_builtin.c
++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
+@@ -32,6 +32,7 @@
+
+ #include "book3s_xics.h"
+ #include "book3s_xive.h"
++#include "book3s_hv.h"
+
+ /*
+ * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
+@@ -510,7 +511,7 @@ void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
+ */
+ if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
+ msr &= ~MSR_TS_MASK;
+- vcpu->arch.shregs.msr = msr;
++ __kvmppc_set_msr_hv(vcpu, msr);
+ kvmppc_end_cede(vcpu);
+ }
+ EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
+@@ -548,7 +549,7 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
+ kvmppc_set_srr0(vcpu, pc);
+ kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
+ kvmppc_set_pc(vcpu, new_pc);
+- vcpu->arch.shregs.msr = new_msr;
++ __kvmppc_set_msr_hv(vcpu, new_msr);
+ }
+
+ void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 7197c8256668b8..6cef200c2404df 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -1990,8 +1990,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ break;
+
+ r = -ENXIO;
+- if (!xive_enabled())
++ if (!xive_enabled()) {
++ fdput(f);
+ break;
++ }
+
+ r = -EPERM;
+ dev = kvm_device_from_filp(f.file);
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index 51ad0397c17abf..0ab65eeb93ee3a 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -45,7 +45,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+ # so it is only needed for modules, and only for older linkers which
+ # do not support --save-restore-funcs
+ ifndef CONFIG_LD_IS_BFD
+-extra-$(CONFIG_PPC64) += crtsavres.o
++always-$(CONFIG_PPC64) += crtsavres.o
+ endif
+
+ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
+@@ -76,7 +76,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
+ obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
+
+ obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
+-CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
++CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
+ # Enable <altivec.h>
+ CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
+
+diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
+index 6dd2f46bd3ef64..8830267789c9c0 100644
+--- a/arch/powerpc/lib/qspinlock.c
++++ b/arch/powerpc/lib/qspinlock.c
+@@ -715,7 +715,15 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
+ }
+
+ release:
+- qnodesp->count--; /* release the node */
++ /*
++ * Clear the lock before releasing the node, as another CPU might see stale
++ * values if an interrupt occurs after we increment qnodesp->count
++ * but before node->lock is initialized. The barrier ensures that
++ * there are no further stores to the node after it has been released.
++ */
++ node->lock = NULL;
++ barrier();
++ qnodesp->count--;
+ }
+
+ void queued_spin_lock_slowpath(struct qspinlock *lock)
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index a4ab8625061a66..6af97dc0f6d5a8 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -586,6 +586,8 @@ static int do_fp_load(struct instruction_op *op, unsigned long ea,
+ } u;
+
+ nb = GETSIZE(op->type);
++ if (nb > sizeof(u))
++ return -EINVAL;
+ if (!address_ok(regs, ea, nb))
+ return -EFAULT;
+ rn = op->reg;
+@@ -636,6 +638,8 @@ static int do_fp_store(struct instruction_op *op, unsigned long ea,
+ } u;
+
+ nb = GETSIZE(op->type);
++ if (nb > sizeof(u))
++ return -EINVAL;
+ if (!address_ok(regs, ea, nb))
+ return -EFAULT;
+ rn = op->reg;
+@@ -680,6 +684,9 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
+ u8 b[sizeof(__vector128)];
+ } u = {};
+
++ if (size > sizeof(u))
++ return -EINVAL;
++
+ if (!address_ok(regs, ea & ~0xfUL, 16))
+ return -EFAULT;
+ /* align to multiple of size */
+@@ -707,6 +714,9 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
+ u8 b[sizeof(__vector128)];
+ } u;
+
++ if (size > sizeof(u))
++ return -EINVAL;
++
+ if (!address_ok(regs, ea & ~0xfUL, 16))
+ return -EFAULT;
+ /* align to multiple of size */
+diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
+index 8f8a62d3ff4de4..5b4cbb25d9cf77 100644
+--- a/arch/powerpc/mm/book3s64/pgtable.c
++++ b/arch/powerpc/mm/book3s64/pgtable.c
+@@ -130,7 +130,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
+
+ WARN_ON(pte_hw_valid(pud_pte(*pudp)));
+ assert_spin_locked(pud_lockptr(mm, pudp));
+- WARN_ON(!(pud_large(pud)));
++ WARN_ON(!(pud_leaf(pud)));
+ #endif
+ trace_hugepage_set_pud(addr, pud_val(pud));
+ return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
+@@ -170,6 +170,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ {
+ unsigned long old_pmd;
+
++ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ return __pmd(old_pmd);
+@@ -542,6 +543,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+ }
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /*
+ * For hash translation mode, we use the deposited table to store hash slot
+ * information and they are stored at PTRS_PER_PMD offset from related pmd
+@@ -563,6 +565,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+
+ return true;
+ }
++#endif
+
+ /*
+ * Does the CPU support tlbie?
+diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
+index 119ef491f79760..d3a7726ecf512c 100644
+--- a/arch/powerpc/mm/init-common.c
++++ b/arch/powerpc/mm/init-common.c
+@@ -126,7 +126,7 @@ void pgtable_cache_add(unsigned int shift)
+ * as to leave enough 0 bits in the address to contain it. */
+ unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
+ HUGEPD_SHIFT_MASK + 1);
+- struct kmem_cache *new;
++ struct kmem_cache *new = NULL;
+
+ /* It would be nice if this was a BUILD_BUG_ON(), but at the
+ * moment, gcc doesn't seem to recognize is_power_of_2 as a
+@@ -139,7 +139,8 @@ void pgtable_cache_add(unsigned int shift)
+
+ align = max_t(unsigned long, align, minalign);
+ name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
+- new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
++ if (name)
++ new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
+ if (!new)
+ panic("Could not allocate pgtable cache for order %d", shift);
+
+diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c
+index a70828a6d9357d..aa9aa11927b2f8 100644
+--- a/arch/powerpc/mm/kasan/init_32.c
++++ b/arch/powerpc/mm/kasan/init_32.c
+@@ -64,6 +64,7 @@ int __init __weak kasan_init_region(void *start, size_t size)
+ if (ret)
+ return ret;
+
++ k_start = k_start & PAGE_MASK;
+ block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ if (!block)
+ return -ENOMEM;
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 07e8f4f1e07f89..9dbef559af4cbf 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -287,8 +287,6 @@ void __init mem_init(void)
+ swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
+ #endif
+
+- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+-
+ kasan_late_init();
+
+ memblock_free_all();
+diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
+index 7f9ff0640124af..72341b9fb5521f 100644
+--- a/arch/powerpc/mm/mmu_decl.h
++++ b/arch/powerpc/mm/mmu_decl.h
+@@ -181,3 +181,8 @@ static inline bool debug_pagealloc_enabled_or_kfence(void)
+ {
+ return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
+ }
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++int create_section_mapping(unsigned long start, unsigned long end,
++ int nid, pgprot_t prot);
++#endif
+diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
+index a642a79298929d..a947dff35d6517 100644
+--- a/arch/powerpc/mm/nohash/8xx.c
++++ b/arch/powerpc/mm/nohash/8xx.c
+@@ -92,7 +92,8 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
+ return -EINVAL;
+
+ set_huge_pte_at(&init_mm, va, ptep,
+- pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize);
++ pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)),
++ 1UL << mmu_psize_to_shift(psize));
+
+ return 0;
+ }
+@@ -148,11 +149,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
+
+ mmu_mapin_immr();
+
+- mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
++ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true);
+ if (debug_pagealloc_enabled_or_kfence()) {
+ top = boundary;
+ } else {
+- mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
++ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true);
+ mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
+ }
+
+diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
+index f3894e79d5f700..24b445a5fcaccd 100644
+--- a/arch/powerpc/mm/nohash/Makefile
++++ b/arch/powerpc/mm/nohash/Makefile
+@@ -3,7 +3,7 @@
+ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
+
+ obj-y += mmu_context.o tlb.o tlb_low.o kup.o
+-obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
++obj-$(CONFIG_PPC_BOOK3E_64) += tlb_64e.o tlb_low_64e.o book3e_pgtable.o
+ obj-$(CONFIG_40x) += 40x.o
+ obj-$(CONFIG_44x) += 44x.o
+ obj-$(CONFIG_PPC_8xx) += 8xx.o
+diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
+index 5ffa0af4328af8..f57dc721d0636a 100644
+--- a/arch/powerpc/mm/nohash/tlb.c
++++ b/arch/powerpc/mm/nohash/tlb.c
+@@ -110,28 +110,6 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
+ };
+ #endif
+
+-/* The variables below are currently only used on 64-bit Book3E
+- * though this will probably be made common with other nohash
+- * implementations at some point
+- */
+-#ifdef CONFIG_PPC64
+-
+-int mmu_pte_psize; /* Page size used for PTE pages */
+-int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
+-int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
+-unsigned long linear_map_top; /* Top of linear mapping */
+-
+-
+-/*
+- * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
+- * exceptions. This is used for bolted and e6500 TLB miss handlers which
+- * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
+- * this is set to zero.
+- */
+-int extlb_level_exc;
+-
+-#endif /* CONFIG_PPC64 */
+-
+ #ifdef CONFIG_PPC_E500
+ /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
+ DEFINE_PER_CPU(int, next_tlbcam_idx);
+@@ -358,381 +336,7 @@ void tlb_flush(struct mmu_gather *tlb)
+ flush_tlb_mm(tlb->mm);
+ }
+
+-/*
+- * Below are functions specific to the 64-bit variant of Book3E though that
+- * may change in the future
+- */
+-
+-#ifdef CONFIG_PPC64
+-
+-/*
+- * Handling of virtual linear page tables or indirect TLB entries
+- * flushing when PTE pages are freed
+- */
+-void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
+-{
+- int tsize = mmu_psize_defs[mmu_pte_psize].enc;
+-
+- if (book3e_htw_mode != PPC_HTW_NONE) {
+- unsigned long start = address & PMD_MASK;
+- unsigned long end = address + PMD_SIZE;
+- unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
+-
+- /* This isn't the most optimal, ideally we would factor out the
+- * while preempt & CPU mask mucking around, or even the IPI but
+- * it will do for now
+- */
+- while (start < end) {
+- __flush_tlb_page(tlb->mm, start, tsize, 1);
+- start += size;
+- }
+- } else {
+- unsigned long rmask = 0xf000000000000000ul;
+- unsigned long rid = (address & rmask) | 0x1000000000000000ul;
+- unsigned long vpte = address & ~rmask;
+-
+- vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
+- vpte |= rid;
+- __flush_tlb_page(tlb->mm, vpte, tsize, 0);
+- }
+-}
+-
+-static void __init setup_page_sizes(void)
+-{
+- unsigned int tlb0cfg;
+- unsigned int tlb0ps;
+- unsigned int eptcfg;
+- int i, psize;
+-
+-#ifdef CONFIG_PPC_E500
+- unsigned int mmucfg = mfspr(SPRN_MMUCFG);
+- int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
+-
+- if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
+- unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
+- unsigned int min_pg, max_pg;
+-
+- min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
+- max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
+-
+- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+- struct mmu_psize_def *def;
+- unsigned int shift;
+-
+- def = &mmu_psize_defs[psize];
+- shift = def->shift;
+-
+- if (shift == 0 || shift & 1)
+- continue;
+-
+- /* adjust to be in terms of 4^shift Kb */
+- shift = (shift - 10) >> 1;
+-
+- if ((shift >= min_pg) && (shift <= max_pg))
+- def->flags |= MMU_PAGE_SIZE_DIRECT;
+- }
+-
+- goto out;
+- }
+-
+- if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
+- u32 tlb1cfg, tlb1ps;
+-
+- tlb0cfg = mfspr(SPRN_TLB0CFG);
+- tlb1cfg = mfspr(SPRN_TLB1CFG);
+- tlb1ps = mfspr(SPRN_TLB1PS);
+- eptcfg = mfspr(SPRN_EPTCFG);
+-
+- if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
+- book3e_htw_mode = PPC_HTW_E6500;
+-
+- /*
+- * We expect 4K subpage size and unrestricted indirect size.
+- * The lack of a restriction on indirect size is a Freescale
+- * extension, indicated by PSn = 0 but SPSn != 0.
+- */
+- if (eptcfg != 2)
+- book3e_htw_mode = PPC_HTW_NONE;
+-
+- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+- struct mmu_psize_def *def = &mmu_psize_defs[psize];
+-
+- if (!def->shift)
+- continue;
+-
+- if (tlb1ps & (1U << (def->shift - 10))) {
+- def->flags |= MMU_PAGE_SIZE_DIRECT;
+-
+- if (book3e_htw_mode && psize == MMU_PAGE_2M)
+- def->flags |= MMU_PAGE_SIZE_INDIRECT;
+- }
+- }
+-
+- goto out;
+- }
+-#endif
+-
+- tlb0cfg = mfspr(SPRN_TLB0CFG);
+- tlb0ps = mfspr(SPRN_TLB0PS);
+- eptcfg = mfspr(SPRN_EPTCFG);
+-
+- /* Look for supported direct sizes */
+- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+- struct mmu_psize_def *def = &mmu_psize_defs[psize];
+-
+- if (tlb0ps & (1U << (def->shift - 10)))
+- def->flags |= MMU_PAGE_SIZE_DIRECT;
+- }
+-
+- /* Indirect page sizes supported ? */
+- if ((tlb0cfg & TLBnCFG_IND) == 0 ||
+- (tlb0cfg & TLBnCFG_PT) == 0)
+- goto out;
+-
+- book3e_htw_mode = PPC_HTW_IBM;
+-
+- /* Now, we only deal with one IND page size for each
+- * direct size. Hopefully all implementations today are
+- * unambiguous, but we might want to be careful in the
+- * future.
+- */
+- for (i = 0; i < 3; i++) {
+- unsigned int ps, sps;
+-
+- sps = eptcfg & 0x1f;
+- eptcfg >>= 5;
+- ps = eptcfg & 0x1f;
+- eptcfg >>= 5;
+- if (!ps || !sps)
+- continue;
+- for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+- struct mmu_psize_def *def = &mmu_psize_defs[psize];
+-
+- if (ps == (def->shift - 10))
+- def->flags |= MMU_PAGE_SIZE_INDIRECT;
+- if (sps == (def->shift - 10))
+- def->ind = ps + 10;
+- }
+- }
+-
+-out:
+- /* Cleanup array and print summary */
+- pr_info("MMU: Supported page sizes\n");
+- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+- struct mmu_psize_def *def = &mmu_psize_defs[psize];
+- const char *__page_type_names[] = {
+- "unsupported",
+- "direct",
+- "indirect",
+- "direct & indirect"
+- };
+- if (def->flags == 0) {
+- def->shift = 0;
+- continue;
+- }
+- pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
+- __page_type_names[def->flags & 0x3]);
+- }
+-}
+-
+-static void __init setup_mmu_htw(void)
+-{
+- /*
+- * If we want to use HW tablewalk, enable it by patching the TLB miss
+- * handlers to branch to the one dedicated to it.
+- */
+-
+- switch (book3e_htw_mode) {
+- case PPC_HTW_IBM:
+- patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
+- patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
+- break;
+-#ifdef CONFIG_PPC_E500
+- case PPC_HTW_E6500:
+- extlb_level_exc = EX_TLB_SIZE;
+- patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
+- patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
+- break;
+-#endif
+- }
+- pr_info("MMU: Book3E HW tablewalk %s\n",
+- book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
+-}
+-
+-/*
+- * Early initialization of the MMU TLB code
+- */
+-static void early_init_this_mmu(void)
+-{
+- unsigned int mas4;
+-
+- /* Set MAS4 based on page table setting */
+-
+- mas4 = 0x4 << MAS4_WIMGED_SHIFT;
+- switch (book3e_htw_mode) {
+- case PPC_HTW_E6500:
+- mas4 |= MAS4_INDD;
+- mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
+- mas4 |= MAS4_TLBSELD(1);
+- mmu_pte_psize = MMU_PAGE_2M;
+- break;
+-
+- case PPC_HTW_IBM:
+- mas4 |= MAS4_INDD;
+- mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
+- mmu_pte_psize = MMU_PAGE_1M;
+- break;
+-
+- case PPC_HTW_NONE:
+- mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
+- mmu_pte_psize = mmu_virtual_psize;
+- break;
+- }
+- mtspr(SPRN_MAS4, mas4);
+-
+-#ifdef CONFIG_PPC_E500
+- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+- unsigned int num_cams;
+- bool map = true;
+-
+- /* use a quarter of the TLBCAM for bolted linear map */
+- num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
+-
+- /*
+- * Only do the mapping once per core, or else the
+- * transient mapping would cause problems.
+- */
+-#ifdef CONFIG_SMP
+- if (hweight32(get_tensr()) > 1)
+- map = false;
+-#endif
+-
+- if (map)
+- linear_map_top = map_mem_in_cams(linear_map_top,
+- num_cams, false, true);
+- }
+-#endif
+-
+- /* A sync won't hurt us after mucking around with
+- * the MMU configuration
+- */
+- mb();
+-}
+-
+-static void __init early_init_mmu_global(void)
+-{
+- /* XXX This should be decided at runtime based on supported
+- * page sizes in the TLB, but for now let's assume 16M is
+- * always there and a good fit (which it probably is)
+- *
+- * Freescale booke only supports 4K pages in TLB0, so use that.
+- */
+- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
+- mmu_vmemmap_psize = MMU_PAGE_4K;
+- else
+- mmu_vmemmap_psize = MMU_PAGE_16M;
+-
+- /* XXX This code only checks for TLB 0 capabilities and doesn't
+- * check what page size combos are supported by the HW. It
+- * also doesn't handle the case where a separate array holds
+- * the IND entries from the array loaded by the PT.
+- */
+- /* Look for supported page sizes */
+- setup_page_sizes();
+-
+- /* Look for HW tablewalk support */
+- setup_mmu_htw();
+-
+-#ifdef CONFIG_PPC_E500
+- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+- if (book3e_htw_mode == PPC_HTW_NONE) {
+- extlb_level_exc = EX_TLB_SIZE;
+- patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
+- patch_exception(0x1e0,
+- exc_instruction_tlb_miss_bolted_book3e);
+- }
+- }
+-#endif
+-
+- /* Set the global containing the top of the linear mapping
+- * for use by the TLB miss code
+- */
+- linear_map_top = memblock_end_of_DRAM();
+-
+- ioremap_bot = IOREMAP_BASE;
+-}
+-
+-static void __init early_mmu_set_memory_limit(void)
+-{
+-#ifdef CONFIG_PPC_E500
+- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+- /*
+- * Limit memory so we dont have linear faults.
+- * Unlike memblock_set_current_limit, which limits
+- * memory available during early boot, this permanently
+- * reduces the memory available to Linux. We need to
+- * do this because highmem is not supported on 64-bit.
+- */
+- memblock_enforce_memory_limit(linear_map_top);
+- }
+-#endif
+-
+- memblock_set_current_limit(linear_map_top);
+-}
+-
+-/* boot cpu only */
+-void __init early_init_mmu(void)
+-{
+- early_init_mmu_global();
+- early_init_this_mmu();
+- early_mmu_set_memory_limit();
+-}
+-
+-void early_init_mmu_secondary(void)
+-{
+- early_init_this_mmu();
+-}
+-
+-void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+- phys_addr_t first_memblock_size)
+-{
+- /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
+- * the bolted TLB entry. We know for now that only 1G
+- * entries are supported though that may eventually
+- * change.
+- *
+- * on FSL Embedded 64-bit, usually all RAM is bolted, but with
+- * unusual memory sizes it's possible for some RAM to not be mapped
+- * (such RAM is not used at all by Linux, since we don't support
+- * highmem on 64-bit). We limit ppc64_rma_size to what would be
+- * mappable if this memblock is the only one. Additional memblocks
+- * can only increase, not decrease, the amount that ends up getting
+- * mapped. We still limit max to 1G even if we'll eventually map
+- * more. This is due to what the early init code is set up to do.
+- *
+- * We crop it to the size of the first MEMBLOCK to
+- * avoid going over total available memory just in case...
+- */
+-#ifdef CONFIG_PPC_E500
+- if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+- unsigned long linear_sz;
+- unsigned int num_cams;
+-
+- /* use a quarter of the TLBCAM for bolted linear map */
+- num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
+-
+- linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
+- true, true);
+-
+- ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
+- } else
+-#endif
+- ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
+-
+- /* Finally limit subsequent allocations */
+- memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
+-}
+-#else /* ! CONFIG_PPC64 */
++#ifndef CONFIG_PPC64
+ void __init early_init_mmu(void)
+ {
+ unsigned long root = of_get_flat_dt_root();
+diff --git a/arch/powerpc/mm/nohash/tlb_64e.c b/arch/powerpc/mm/nohash/tlb_64e.c
+new file mode 100644
+index 00000000000000..b6af3ec4d001dc
+--- /dev/null
++++ b/arch/powerpc/mm/nohash/tlb_64e.c
+@@ -0,0 +1,361 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
++ * IBM Corp.
++ *
++ * Derived from arch/ppc/mm/init.c:
++ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
++ *
++ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
++ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
++ * Copyright (C) 1996 Paul Mackerras
++ *
++ * Derived from "arch/i386/mm/init.c"
++ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
++ */
++
++#include <linux/kernel.h>
++#include <linux/export.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/memblock.h>
++
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++#include <asm/tlb.h>
++#include <asm/code-patching.h>
++#include <asm/cputhreads.h>
++
++#include <mm/mmu_decl.h>
++
++/* The variables below are currently only used on 64-bit Book3E
++ * though this will probably be made common with other nohash
++ * implementations at some point
++ */
++static int mmu_pte_psize; /* Page size used for PTE pages */
++int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
++int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
++unsigned long linear_map_top; /* Top of linear mapping */
++
++
++/*
++ * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
++ * exceptions. This is used for bolted and e6500 TLB miss handlers which
++ * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
++ * this is set to zero.
++ */
++int extlb_level_exc;
++
++/*
++ * Handling of virtual linear page tables or indirect TLB entries
++ * flushing when PTE pages are freed
++ */
++void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
++{
++ int tsize = mmu_psize_defs[mmu_pte_psize].enc;
++
++ if (book3e_htw_mode != PPC_HTW_NONE) {
++ unsigned long start = address & PMD_MASK;
++ unsigned long end = address + PMD_SIZE;
++ unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
++
++ /* This isn't the most optimal, ideally we would factor out the
++ * while preempt & CPU mask mucking around, or even the IPI but
++ * it will do for now
++ */
++ while (start < end) {
++ __flush_tlb_page(tlb->mm, start, tsize, 1);
++ start += size;
++ }
++ } else {
++ unsigned long rmask = 0xf000000000000000ul;
++ unsigned long rid = (address & rmask) | 0x1000000000000000ul;
++ unsigned long vpte = address & ~rmask;
++
++ vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
++ vpte |= rid;
++ __flush_tlb_page(tlb->mm, vpte, tsize, 0);
++ }
++}
++
++static void __init setup_page_sizes(void)
++{
++ unsigned int tlb0cfg;
++ unsigned int eptcfg;
++ int psize;
++
++#ifdef CONFIG_PPC_E500
++ unsigned int mmucfg = mfspr(SPRN_MMUCFG);
++ int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
++
++ if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
++ unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
++ unsigned int min_pg, max_pg;
++
++ min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
++ max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
++
++ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
++ struct mmu_psize_def *def;
++ unsigned int shift;
++
++ def = &mmu_psize_defs[psize];
++ shift = def->shift;
++
++ if (shift == 0 || shift & 1)
++ continue;
++
++ /* adjust to be in terms of 4^shift Kb */
++ shift = (shift - 10) >> 1;
++
++ if ((shift >= min_pg) && (shift <= max_pg))
++ def->flags |= MMU_PAGE_SIZE_DIRECT;
++ }
++
++ goto out;
++ }
++
++ if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
++ u32 tlb1cfg, tlb1ps;
++
++ tlb0cfg = mfspr(SPRN_TLB0CFG);
++ tlb1cfg = mfspr(SPRN_TLB1CFG);
++ tlb1ps = mfspr(SPRN_TLB1PS);
++ eptcfg = mfspr(SPRN_EPTCFG);
++
++ if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
++ book3e_htw_mode = PPC_HTW_E6500;
++
++ /*
++ * We expect 4K subpage size and unrestricted indirect size.
++ * The lack of a restriction on indirect size is a Freescale
++ * extension, indicated by PSn = 0 but SPSn != 0.
++ */
++ if (eptcfg != 2)
++ book3e_htw_mode = PPC_HTW_NONE;
++
++ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
++ struct mmu_psize_def *def = &mmu_psize_defs[psize];
++
++ if (!def->shift)
++ continue;
++
++ if (tlb1ps & (1U << (def->shift - 10))) {
++ def->flags |= MMU_PAGE_SIZE_DIRECT;
++
++ if (book3e_htw_mode && psize == MMU_PAGE_2M)
++ def->flags |= MMU_PAGE_SIZE_INDIRECT;
++ }
++ }
++
++ goto out;
++ }
++#endif
++out:
++ /* Cleanup array and print summary */
++ pr_info("MMU: Supported page sizes\n");
++ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
++ struct mmu_psize_def *def = &mmu_psize_defs[psize];
++ const char *__page_type_names[] = {
++ "unsupported",
++ "direct",
++ "indirect",
++ "direct & indirect"
++ };
++ if (def->flags == 0) {
++ def->shift = 0;
++ continue;
++ }
++ pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
++ __page_type_names[def->flags & 0x3]);
++ }
++}
++
++static void __init setup_mmu_htw(void)
++{
++ /*
++ * If we want to use HW tablewalk, enable it by patching the TLB miss
++ * handlers to branch to the one dedicated to it.
++ */
++
++ switch (book3e_htw_mode) {
++#ifdef CONFIG_PPC_E500
++ case PPC_HTW_E6500:
++ extlb_level_exc = EX_TLB_SIZE;
++ patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
++ patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
++ break;
++#endif
++ }
++ pr_info("MMU: Book3E HW tablewalk %s\n",
++ book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
++}
++
++/*
++ * Early initialization of the MMU TLB code
++ */
++static void early_init_this_mmu(void)
++{
++ unsigned int mas4;
++
++ /* Set MAS4 based on page table setting */
++
++ mas4 = 0x4 << MAS4_WIMGED_SHIFT;
++ switch (book3e_htw_mode) {
++ case PPC_HTW_E6500:
++ mas4 |= MAS4_INDD;
++ mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
++ mas4 |= MAS4_TLBSELD(1);
++ mmu_pte_psize = MMU_PAGE_2M;
++ break;
++
++ case PPC_HTW_NONE:
++ mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
++ mmu_pte_psize = mmu_virtual_psize;
++ break;
++ }
++ mtspr(SPRN_MAS4, mas4);
++
++#ifdef CONFIG_PPC_E500
++ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
++ unsigned int num_cams;
++ bool map = true;
++
++ /* use a quarter of the TLBCAM for bolted linear map */
++ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
++
++ /*
++ * Only do the mapping once per core, or else the
++ * transient mapping would cause problems.
++ */
++#ifdef CONFIG_SMP
++ if (hweight32(get_tensr()) > 1)
++ map = false;
++#endif
++
++ if (map)
++ linear_map_top = map_mem_in_cams(linear_map_top,
++ num_cams, false, true);
++ }
++#endif
++
++ /* A sync won't hurt us after mucking around with
++ * the MMU configuration
++ */
++ mb();
++}
++
++static void __init early_init_mmu_global(void)
++{
++ /* XXX This should be decided at runtime based on supported
++ * page sizes in the TLB, but for now let's assume 16M is
++ * always there and a good fit (which it probably is)
++ *
++ * Freescale booke only supports 4K pages in TLB0, so use that.
++ */
++ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
++ mmu_vmemmap_psize = MMU_PAGE_4K;
++ else
++ mmu_vmemmap_psize = MMU_PAGE_16M;
++
++ /* XXX This code only checks for TLB 0 capabilities and doesn't
++ * check what page size combos are supported by the HW. It
++ * also doesn't handle the case where a separate array holds
++ * the IND entries from the array loaded by the PT.
++ */
++ /* Look for supported page sizes */
++ setup_page_sizes();
++
++ /* Look for HW tablewalk support */
++ setup_mmu_htw();
++
++#ifdef CONFIG_PPC_E500
++ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
++ if (book3e_htw_mode == PPC_HTW_NONE) {
++ extlb_level_exc = EX_TLB_SIZE;
++ patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
++ patch_exception(0x1e0,
++ exc_instruction_tlb_miss_bolted_book3e);
++ }
++ }
++#endif
++
++ /* Set the global containing the top of the linear mapping
++ * for use by the TLB miss code
++ */
++ linear_map_top = memblock_end_of_DRAM();
++
++ ioremap_bot = IOREMAP_BASE;
++}
++
++static void __init early_mmu_set_memory_limit(void)
++{
++#ifdef CONFIG_PPC_E500
++ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
++ /*
++ * Limit memory so we dont have linear faults.
++ * Unlike memblock_set_current_limit, which limits
++ * memory available during early boot, this permanently
++ * reduces the memory available to Linux. We need to
++ * do this because highmem is not supported on 64-bit.
++ */
++ memblock_enforce_memory_limit(linear_map_top);
++ }
++#endif
++
++ memblock_set_current_limit(linear_map_top);
++}
++
++/* boot cpu only */
++void __init early_init_mmu(void)
++{
++ early_init_mmu_global();
++ early_init_this_mmu();
++ early_mmu_set_memory_limit();
++}
++
++void early_init_mmu_secondary(void)
++{
++ early_init_this_mmu();
++}
++
++void setup_initial_memory_limit(phys_addr_t first_memblock_base,
++ phys_addr_t first_memblock_size)
++{
++ /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
++ * the bolted TLB entry. We know for now that only 1G
++ * entries are supported though that may eventually
++ * change.
++ *
++ * on FSL Embedded 64-bit, usually all RAM is bolted, but with
++ * unusual memory sizes it's possible for some RAM to not be mapped
++ * (such RAM is not used at all by Linux, since we don't support
++ * highmem on 64-bit). We limit ppc64_rma_size to what would be
++ * mappable if this memblock is the only one. Additional memblocks
++ * can only increase, not decrease, the amount that ends up getting
++ * mapped. We still limit max to 1G even if we'll eventually map
++ * more. This is due to what the early init code is set up to do.
++ *
++ * We crop it to the size of the first MEMBLOCK to
++ * avoid going over total available memory just in case...
++ */
++#ifdef CONFIG_PPC_E500
++ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
++ unsigned long linear_sz;
++ unsigned int num_cams;
++
++ /* use a quarter of the TLBCAM for bolted linear map */
++ num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
++
++ linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
++ true, true);
++
++ ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
++ } else
++#endif
++ ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
++
++ /* Finally limit subsequent allocations */
++ memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
++}
+diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
+index 7e0b8fe1c27975..b0eb3f7eaed149 100644
+--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
++++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
+@@ -893,201 +893,6 @@ virt_page_table_tlb_miss_whacko_fault:
+ TLB_MISS_EPILOG_ERROR
+ b exc_data_storage_book3e
+
+-
+-/**************************************************************
+- * *
+- * TLB miss handling for Book3E with hw page table support *
+- * *
+- **************************************************************/
+-
+-
+-/* Data TLB miss */
+- START_EXCEPTION(data_tlb_miss_htw)
+- TLB_MISS_PROLOG
+-
+- /* Now we handle the fault proper. We only save DEAR in normal
+- * fault case since that's the only interesting values here.
+- * We could probably also optimize by not saving SRR0/1 in the
+- * linear mapping case but I'll leave that for later
+- */
+- mfspr r14,SPRN_ESR
+- mfspr r16,SPRN_DEAR /* get faulting address */
+- srdi r11,r16,44 /* get region */
+- xoris r11,r11,0xc
+- cmpldi cr0,r11,0 /* linear mapping ? */
+- beq tlb_load_linear /* yes -> go to linear map load */
+- cmpldi cr1,r11,1 /* vmalloc mapping ? */
+-
+- /* We do the user/kernel test for the PID here along with the RW test
+- */
+- srdi. r11,r16,60 /* Check for user region */
+- ld r15,PACAPGD(r13) /* Load user pgdir */
+- beq htw_tlb_miss
+-
+- /* XXX replace the RMW cycles with immediate loads + writes */
+-1: mfspr r10,SPRN_MAS1
+- rlwinm r10,r10,0,16,1 /* Clear TID */
+- mtspr SPRN_MAS1,r10
+- ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
+- beq+ cr1,htw_tlb_miss
+-
+- /* We got a crappy address, just fault with whatever DEAR and ESR
+- * are here
+- */
+- TLB_MISS_EPILOG_ERROR
+- b exc_data_storage_book3e
+-
+-/* Instruction TLB miss */
+- START_EXCEPTION(instruction_tlb_miss_htw)
+- TLB_MISS_PROLOG
+-
+- /* If we take a recursive fault, the second level handler may need
+- * to know whether we are handling a data or instruction fault in
+- * order to get to the right store fault handler. We provide that
+- * info by keeping a crazy value for ESR in r14
+- */
+- li r14,-1 /* store to exception frame is done later */
+-
+- /* Now we handle the fault proper. We only save DEAR in the non
+- * linear mapping case since we know the linear mapping case will
+- * not re-enter. We could indeed optimize and also not save SRR0/1
+- * in the linear mapping case but I'll leave that for later
+- *
+- * Faulting address is SRR0 which is already in r16
+- */
+- srdi r11,r16,44 /* get region */
+- xoris r11,r11,0xc
+- cmpldi cr0,r11,0 /* linear mapping ? */
+- beq tlb_load_linear /* yes -> go to linear map load */
+- cmpldi cr1,r11,1 /* vmalloc mapping ? */
+-
+- /* We do the user/kernel test for the PID here along with the RW test
+- */
+- srdi. r11,r16,60 /* Check for user region */
+- ld r15,PACAPGD(r13) /* Load user pgdir */
+- beq htw_tlb_miss
+-
+- /* XXX replace the RMW cycles with immediate loads + writes */
+-1: mfspr r10,SPRN_MAS1
+- rlwinm r10,r10,0,16,1 /* Clear TID */
+- mtspr SPRN_MAS1,r10
+- ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
+- beq+ htw_tlb_miss
+-
+- /* We got a crappy address, just fault */
+- TLB_MISS_EPILOG_ERROR
+- b exc_instruction_storage_book3e
+-
+-
+-/*
+- * This is the guts of the second-level TLB miss handler for direct
+- * misses. We are entered with:
+- *
+- * r16 = virtual page table faulting address
+- * r15 = PGD pointer
+- * r14 = ESR
+- * r13 = PACA
+- * r12 = TLB exception frame in PACA
+- * r11 = crap (free to use)
+- * r10 = crap (free to use)
+- *
+- * It can be re-entered by the linear mapping miss handler. However, to
+- * avoid too much complication, it will save/restore things for us
+- */
+-htw_tlb_miss:
+-#ifdef CONFIG_PPC_KUAP
+- mfspr r10,SPRN_MAS1
+- rlwinm. r10,r10,0,0x3fff0000
+- beq- htw_tlb_miss_fault /* KUAP fault */
+-#endif
+- /* Search if we already have a TLB entry for that virtual address, and
+- * if we do, bail out.
+- *
+- * MAS1:IND should be already set based on MAS4
+- */
+- PPC_TLBSRX_DOT(0,R16)
+- beq htw_tlb_miss_done
+-
+- /* Now, we need to walk the page tables. First check if we are in
+- * range.
+- */
+- rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
+- bne- htw_tlb_miss_fault
+-
+- /* Get the PGD pointer */
+- cmpldi cr0,r15,0
+- beq- htw_tlb_miss_fault
+-
+- /* Get to PGD entry */
+- rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
+- clrrdi r10,r11,3
+- ldx r15,r10,r15
+- cmpdi cr0,r15,0
+- bge htw_tlb_miss_fault
+-
+- /* Get to PUD entry */
+- rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
+- clrrdi r10,r11,3
+- ldx r15,r10,r15
+- cmpdi cr0,r15,0
+- bge htw_tlb_miss_fault
+-
+- /* Get to PMD entry */
+- rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
+- clrrdi r10,r11,3
+- ldx r15,r10,r15
+- cmpdi cr0,r15,0
+- bge htw_tlb_miss_fault
+-
+- /* Ok, we're all right, we can now create an indirect entry for
+- * a 1M or 256M page.
+- *
+- * The last trick is now that because we use "half" pages for
+- * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
+- * for an added LSB bit to the RPN. For 64K pages, there is no
+- * problem as we already use 32K arrays (half PTE pages), but for
+- * 4K page we need to extract a bit from the virtual address and
+- * insert it into the "PA52" bit of the RPN.
+- */
+- rlwimi r15,r16,32-9,20,20
+- /* Now we build the MAS:
+- *
+- * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
+- * MAS 1 : Almost fully setup
+- * - PID already updated by caller if necessary
+- * - TSIZE for now is base ind page size always
+- * MAS 2 : Use defaults
+- * MAS 3+7 : Needs to be done
+- */
+- ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
+-
+- srdi r16,r10,32
+- mtspr SPRN_MAS3,r10
+- mtspr SPRN_MAS7,r16
+-
+- tlbwe
+-
+-htw_tlb_miss_done:
+- /* We don't bother with restoring DEAR or ESR since we know we are
+- * level 0 and just going back to userland. They are only needed
+- * if you are going to take an access fault
+- */
+- TLB_MISS_EPILOG_SUCCESS
+- rfi
+-
+-htw_tlb_miss_fault:
+- /* We need to check if it was an instruction miss. We know this
+- * though because r14 would contain -1
+- */
+- cmpdi cr0,r14,-1
+- beq 1f
+- mtspr SPRN_DEAR,r16
+- mtspr SPRN_ESR,r14
+- TLB_MISS_EPILOG_ERROR
+- b exc_data_storage_book3e
+-1: TLB_MISS_EPILOG_ERROR
+- b exc_instruction_storage_book3e
+-
+ /*
+ * This is the guts of "any" level TLB miss handler for kernel linear
+ * mapping misses. We are entered with:
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index 7f91ea064c0874..06f886850a9327 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -851,6 +851,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+
+ /* Get offset into TMP_REG */
+ EMIT(PPC_RAW_LI(tmp_reg, off));
++ /*
++ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
++ * before and after the operation.
++ *
++ * This is a requirement in the Linux Kernel Memory Model.
++ * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
++ */
++ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
++ EMIT(PPC_RAW_SYNC());
+ tmp_idx = ctx->idx * 4;
+ /* load value from memory into r0 */
+ EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
+@@ -904,6 +913,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+
+ /* For the BPF_FETCH variant, get old data into src_reg */
+ if (imm & BPF_FETCH) {
++ /* Emit 'sync' to enforce full ordering */
++ if (IS_ENABLED(CONFIG_SMP))
++ EMIT(PPC_RAW_SYNC());
+ EMIT(PPC_RAW_MR(ret_reg, ax_reg));
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
+diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
+index 0f8048f6dad630..2239ce5e8501cd 100644
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -803,6 +803,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+
+ /* Get offset into TMP_REG_1 */
+ EMIT(PPC_RAW_LI(tmp1_reg, off));
++ /*
++ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
++ * before and after the operation.
++ *
++ * This is a requirement in the Linux Kernel Memory Model.
++ * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
++ */
++ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
++ EMIT(PPC_RAW_SYNC());
+ tmp_idx = ctx->idx * 4;
+ /* load value from memory into TMP_REG_2 */
+ if (size == BPF_DW)
+@@ -865,6 +874,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
+ PPC_BCC_SHORT(COND_NE, tmp_idx);
+
+ if (imm & BPF_FETCH) {
++ /* Emit 'sync' to enforce full ordering */
++ if (IS_ENABLED(CONFIG_SMP))
++ EMIT(PPC_RAW_SYNC());
+ EMIT(PPC_RAW_MR(ret_reg, _R0));
+ /*
+ * Skip unnecessary zero-extension for 32-bit cmpxchg.
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 8c1f7def596e4a..10b946e9c6e756 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ /*
+ * Disable instruction sampling if it was enabled
+ */
+- if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
+- val &= ~MMCRA_SAMPLE_ENABLE;
++ val &= ~MMCRA_SAMPLE_ENABLE;
+
+ /* Disable BHRB via mmcra (BHRBRD) for p10 */
+ if (ppmu->flags & PPMU_ARCH_31)
+@@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ * instruction sampling or BHRB.
+ */
+ if (val != mmcra) {
+- mtspr(SPRN_MMCRA, mmcra);
++ mtspr(SPRN_MMCRA, val);
+ mb();
+ isync();
+ }
+diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
+index 39dbe6b348df28..241551d1282f80 100644
+--- a/arch/powerpc/perf/hv-gpci.c
++++ b/arch/powerpc/perf/hv-gpci.c
+@@ -534,6 +534,9 @@ static ssize_t affinity_domain_via_partition_show(struct device *dev, struct dev
+ if (!ret)
+ goto parse_result;
+
++ if (ret && (ret != H_PARAMETER))
++ goto out;
++
+ /*
+ * ret value as 'H_PARAMETER' implies that the current buffer size
+ * can't accommodate all the information, and a partial buffer
+@@ -692,6 +695,20 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
+
+ ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
+ virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
++
++ /*
++ * ret value as 'H_PARAMETER' with detail_rc as 'GEN_BUF_TOO_SMALL',
++ * specifies that the current buffer size cannot accommodate
++ * all the information and a partial buffer returned.
++ * Since in this function we are only accessing data for a given starting index,
++ * we don't need to accommodate whole data and can get required count by
++ * accessing first entry data.
++ * Hence hcall fails only incase the ret value is other than H_SUCCESS or
++ * H_PARAMETER with detail_rc value as GEN_BUF_TOO_SMALL(0x1B).
++ */
++ if (ret == H_PARAMETER && be32_to_cpu(arg->params.detail_rc) == 0x1B)
++ ret = 0;
++
+ if (ret) {
+ pr_devel("hcall failed: 0x%lx\n", ret);
+ goto out;
+@@ -756,6 +773,7 @@ static int h_gpci_event_init(struct perf_event *event)
+ {
+ u64 count;
+ u8 length;
++ unsigned long ret;
+
+ /* Not our event */
+ if (event->attr.type != event->pmu->type)
+@@ -786,13 +804,23 @@ static int h_gpci_event_init(struct perf_event *event)
+ }
+
+ /* check if the request works... */
+- if (single_gpci_request(event_get_request(event),
++ ret = single_gpci_request(event_get_request(event),
+ event_get_starting_index(event),
+ event_get_secondary_index(event),
+ event_get_counter_info_version(event),
+ event_get_offset(event),
+ length,
+- &count)) {
++ &count);
++
++ /*
++ * ret value as H_AUTHORITY implies that partition is not permitted to retrieve
++ * performance information, and required to set
++ * "Enable Performance Information Collection" option.
++ */
++ if (ret == H_AUTHORITY)
++ return -EPERM;
++
++ if (ret) {
+ pr_devel("gpci hcall failed\n");
+ return -EINVAL;
+ }
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 9d229ef7f86efa..56d82f7f9734e0 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -51,7 +51,7 @@ static int trace_imc_mem_size;
+ * core and trace-imc
+ */
+ static struct imc_pmu_ref imc_global_refc = {
+- .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
+ .id = 0,
+ .refc = 0,
+ };
+@@ -299,6 +299,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+ attr_group->attrs = attrs;
+ do {
+ ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
++ if (!ev_val_str)
++ continue;
+ dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
+ if (!dev_str)
+ continue;
+@@ -306,6 +308,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+ attrs[j++] = dev_str;
+ if (pmu->events[i].scale) {
+ ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
++ if (!ev_scale_str)
++ continue;
+ dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
+ if (!dev_str)
+ continue;
+@@ -315,6 +319,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+
+ if (pmu->events[i].unit) {
+ ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
++ if (!ev_unit_str)
++ continue;
+ dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
+ if (!dev_str)
+ continue;
+diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
+index 1624ebf95497ba..35a1f4b9f8272b 100644
+--- a/arch/powerpc/platforms/44x/Kconfig
++++ b/arch/powerpc/platforms/44x/Kconfig
+@@ -173,6 +173,7 @@ config ISS4xx
+ config CURRITUCK
+ bool "IBM Currituck (476fpe) Support"
+ depends on PPC_47x
++ select I2C
+ select SWIOTLB
+ select 476FPE
+ select FORCE_PCI
+diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
+index 77ea9335fd049c..f381b177ea06ad 100644
+--- a/arch/powerpc/platforms/book3s/vas-api.c
++++ b/arch/powerpc/platforms/book3s/vas-api.c
+@@ -4,6 +4,8 @@
+ * Copyright (C) 2019 Haren Myneni, IBM Corp
+ */
+
++#define pr_fmt(fmt) "vas-api: " fmt
++
+ #include <linux/kernel.h>
+ #include <linux/device.h>
+ #include <linux/cdev.h>
+@@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+ task_ref->mm = get_task_mm(current);
+ if (!task_ref->mm) {
+ put_pid(task_ref->pid);
+- pr_err("VAS: pid(%d): mm_struct is not found\n",
++ pr_err("pid(%d): mm_struct is not found\n",
+ current->pid);
+ return -EPERM;
+ }
+@@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb,
+ rc = kill_pid_info(SIGSEGV, &info, pid);
+ rcu_read_unlock();
+
+- pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+- pid_vnr(pid), rc);
++ pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc);
+ }
+
+ void vas_dump_crb(struct coprocessor_request_block *crb)
+@@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+
+ rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ if (rc) {
+- pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
++ pr_err("copy_from_user() returns %d\n", rc);
+ return -EFAULT;
+ }
+
+@@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+ txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ cp_inst->coproc->cop_type);
+ if (IS_ERR(txwin)) {
+- pr_err("%s() VAS window open failed, %ld\n", __func__,
++ pr_err_ratelimited("VAS window open failed rc=%ld\n",
+ PTR_ERR(txwin));
+ return PTR_ERR(txwin);
+ }
+@@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ * window is not opened. Shouldn't expect this error.
+ */
+ if (!cp_inst || !cp_inst->txwin) {
+- pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
+- __func__);
++ pr_err("Unexpected fault on paste address with TX window closed\n");
+ return VM_FAULT_SIGBUS;
+ }
+
+@@ -421,8 +421,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+ * issue NX request.
+ */
+ if (txwin->task_ref.vma != vmf->vma) {
+- pr_err("%s(): No previous mapping with paste address\n",
+- __func__);
++ pr_err("No previous mapping with paste address\n");
+ return VM_FAULT_SIGBUS;
+ }
+
+@@ -481,19 +480,19 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ txwin = cp_inst->txwin;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+- pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
++ pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n",
+ (vma->vm_end - vma->vm_start), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /* Ensure instance has an open send window */
+ if (!txwin) {
+- pr_err("%s(): No send window open?\n", __func__);
++ pr_err("No send window open?\n");
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
+- pr_err("%s(): VAS API is not registered\n", __func__);
++ pr_err("VAS API is not registered\n");
+ return -EACCES;
+ }
+
+@@ -510,14 +509,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ */
+ mutex_lock(&txwin->task_ref.mmap_mutex);
+ if (txwin->status != VAS_WIN_ACTIVE) {
+- pr_err("%s(): Window is not active\n", __func__);
++ pr_err("Window is not active\n");
+ rc = -EACCES;
+ goto out;
+ }
+
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (!paste_addr) {
+- pr_err("%s(): Window paste address failed\n", __func__);
++ pr_err("Window paste address failed\n");
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -533,8 +532,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+ rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, prot);
+
+- pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+- paste_addr, vma->vm_start, rc);
++ pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr,
++ vma->vm_start, rc);
+
+ txwin->task_ref.vma = vma;
+ vma->vm_ops = &vas_vm_ops;
+@@ -609,8 +608,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ goto err;
+ }
+
+- pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+- MINOR(devno));
++ pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno));
+
+ return 0;
+
+diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
+index 9c10aac40c7b11..e265f026eee2a9 100644
+--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
++++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
+@@ -99,9 +99,6 @@ static void __init linkstation_init_IRQ(void)
+ mpic_init(mpic);
+ }
+
+-extern void avr_uart_configure(void);
+-extern void avr_uart_send(const char);
+-
+ static void __noreturn linkstation_restart(char *cmd)
+ {
+ local_irq_disable();
+diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h
+index 5ad12023e56280..ebc258fa4858d0 100644
+--- a/arch/powerpc/platforms/embedded6xx/mpc10x.h
++++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h
+@@ -156,4 +156,7 @@ int mpc10x_disable_store_gathering(struct pci_controller *hose);
+ /* For MPC107 boards that use the built-in openpic */
+ void mpc10x_set_openpic(void);
+
++void avr_uart_configure(void);
++void avr_uart_send(const char c);
++
+ #endif /* __PPC_KERNEL_MPC10X_H */
+diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
+index f9a7001dacb7a1..56a1f7ce78d2c7 100644
+--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
++++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
+@@ -275,6 +275,8 @@ int __init opal_event_init(void)
+ else
+ name = kasprintf(GFP_KERNEL, "opal");
+
++ if (!name)
++ continue;
+ /* Install interrupt handler */
+ rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK,
+ name, NULL);
+diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
+index 7bfe4cbeb35a99..ea917266aa1725 100644
+--- a/arch/powerpc/platforms/powernv/opal-powercap.c
++++ b/arch/powerpc/platforms/powernv/opal-powercap.c
+@@ -196,6 +196,12 @@ void __init opal_powercap_init(void)
+
+ j = 0;
+ pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
++ if (!pcaps[i].pg.name) {
++ kfree(pcaps[i].pattrs);
++ kfree(pcaps[i].pg.attrs);
++ goto out_pcaps_pattrs;
++ }
++
+ if (has_min) {
+ powercap_add_attr(min, "powercap-min",
+ &pcaps[i].pattrs[j]);
+diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
+index 262cd6fac90714..748c2b97fa5370 100644
+--- a/arch/powerpc/platforms/powernv/opal-xscom.c
++++ b/arch/powerpc/platforms/powernv/opal-xscom.c
+@@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
+ ent->chip = chip;
+ snprintf(ent->name, 16, "%08x", chip);
+ ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
++ if (!ent->path.data) {
++ kfree(ent);
++ return -ENOMEM;
++ }
++
+ ent->path.size = strlen((char *)ent->path.data);
+
+ dir = debugfs_create_dir(ent->name, root);
+diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
+index a44869e5ea70f8..1bd1b0b49bc62a 100644
+--- a/arch/powerpc/platforms/ps3/Kconfig
++++ b/arch/powerpc/platforms/ps3/Kconfig
+@@ -67,6 +67,7 @@ config PS3_VUART
+ config PS3_PS3AV
+ depends on PPC_PS3
+ tristate "PS3 AV settings driver" if PS3_ADVANCED
++ select VIDEO
+ select PS3_VUART
+ default y
+ help
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index 47f8eabd1bee31..9873b916b23704 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -334,23 +334,6 @@ int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
+ {
+ int rc;
+
+- /* pseries error logs are in BE format, convert to cpu type */
+- switch (hp_elog->id_type) {
+- case PSERIES_HP_ELOG_ID_DRC_COUNT:
+- hp_elog->_drc_u.drc_count =
+- be32_to_cpu(hp_elog->_drc_u.drc_count);
+- break;
+- case PSERIES_HP_ELOG_ID_DRC_INDEX:
+- hp_elog->_drc_u.drc_index =
+- be32_to_cpu(hp_elog->_drc_u.drc_index);
+- break;
+- case PSERIES_HP_ELOG_ID_DRC_IC:
+- hp_elog->_drc_u.ic.count =
+- be32_to_cpu(hp_elog->_drc_u.ic.count);
+- hp_elog->_drc_u.ic.index =
+- be32_to_cpu(hp_elog->_drc_u.ic.index);
+- }
+-
+ switch (hp_elog->resource) {
+ case PSERIES_HP_ELOG_RESOURCE_MEM:
+ rc = dlpar_memory(hp_elog);
+diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+index e62835a12d73fc..6838a0fcda296b 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+@@ -757,7 +757,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
+ u32 drc_index;
+ int rc;
+
+- drc_index = hp_elog->_drc_u.drc_index;
++ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
+
+ lock_device_hotplug();
+
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index aa4042dcd6d40e..95ff84c55cb144 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -435,14 +435,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
+ }
+ }
+
+- if (!lmb_found)
++ if (!lmb_found) {
++ pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
+ rc = -EINVAL;
+-
+- if (rc)
++ } else if (rc) {
+ pr_debug("Failed to hot-remove memory at %llx\n",
+ lmb->base_addr);
+- else
++ } else {
+ pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
++ }
+
+ return rc;
+ }
+@@ -810,16 +811,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+ case PSERIES_HP_ELOG_ACTION_ADD:
+ switch (hp_elog->id_type) {
+ case PSERIES_HP_ELOG_ID_DRC_COUNT:
+- count = hp_elog->_drc_u.drc_count;
++ count = be32_to_cpu(hp_elog->_drc_u.drc_count);
+ rc = dlpar_memory_add_by_count(count);
+ break;
+ case PSERIES_HP_ELOG_ID_DRC_INDEX:
+- drc_index = hp_elog->_drc_u.drc_index;
++ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
+ rc = dlpar_memory_add_by_index(drc_index);
+ break;
+ case PSERIES_HP_ELOG_ID_DRC_IC:
+- count = hp_elog->_drc_u.ic.count;
+- drc_index = hp_elog->_drc_u.ic.index;
++ count = be32_to_cpu(hp_elog->_drc_u.ic.count);
++ drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index);
+ rc = dlpar_memory_add_by_ic(count, drc_index);
+ break;
+ default:
+@@ -831,16 +832,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+ case PSERIES_HP_ELOG_ACTION_REMOVE:
+ switch (hp_elog->id_type) {
+ case PSERIES_HP_ELOG_ID_DRC_COUNT:
+- count = hp_elog->_drc_u.drc_count;
++ count = be32_to_cpu(hp_elog->_drc_u.drc_count);
+ rc = dlpar_memory_remove_by_count(count);
+ break;
+ case PSERIES_HP_ELOG_ID_DRC_INDEX:
+- drc_index = hp_elog->_drc_u.drc_index;
++ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
+ rc = dlpar_memory_remove_by_index(drc_index);
+ break;
+ case PSERIES_HP_ELOG_ID_DRC_IC:
+- count = hp_elog->_drc_u.ic.count;
+- drc_index = hp_elog->_drc_u.ic.index;
++ count = be32_to_cpu(hp_elog->_drc_u.ic.count);
++ drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index);
+ rc = dlpar_memory_remove_by_ic(count, drc_index);
+ break;
+ default:
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 16d93b580f61f1..b1e6d275cda9eb 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -574,29 +574,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
+
+ struct iommu_table_ops iommu_table_lpar_multi_ops;
+
+-/*
+- * iommu_table_setparms_lpar
+- *
+- * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
+- */
+-static void iommu_table_setparms_lpar(struct pci_controller *phb,
+- struct device_node *dn,
+- struct iommu_table *tbl,
+- struct iommu_table_group *table_group,
+- const __be32 *dma_window)
+-{
+- unsigned long offset, size, liobn;
+-
+- of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
+-
+- iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
+- &iommu_table_lpar_multi_ops);
+-
+-
+- table_group->tce32_start = offset;
+- table_group->tce32_size = size;
+-}
+-
+ struct iommu_table_ops iommu_table_pseries_ops = {
+ .set = tce_build_pSeries,
+ .clear = tce_free_pSeries,
+@@ -724,26 +701,71 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
+ * dynamic 64bit DMA window, walking up the device tree.
+ */
+ static struct device_node *pci_dma_find(struct device_node *dn,
+- const __be32 **dma_window)
++ struct dynamic_dma_window_prop *prop)
+ {
+- const __be32 *dw = NULL;
++ const __be32 *default_prop = NULL;
++ const __be32 *ddw_prop = NULL;
++ struct device_node *rdn = NULL;
++ bool default_win = false, ddw_win = false;
+
+ for ( ; dn && PCI_DN(dn); dn = dn->parent) {
+- dw = of_get_property(dn, "ibm,dma-window", NULL);
+- if (dw) {
+- if (dma_window)
+- *dma_window = dw;
+- return dn;
++ default_prop = of_get_property(dn, "ibm,dma-window", NULL);
++ if (default_prop) {
++ rdn = dn;
++ default_win = true;
+ }
+- dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
+- if (dw)
+- return dn;
+- dw = of_get_property(dn, DMA64_PROPNAME, NULL);
+- if (dw)
+- return dn;
++ ddw_prop = of_get_property(dn, DIRECT64_PROPNAME, NULL);
++ if (ddw_prop) {
++ rdn = dn;
++ ddw_win = true;
++ break;
++ }
++ ddw_prop = of_get_property(dn, DMA64_PROPNAME, NULL);
++ if (ddw_prop) {
++ rdn = dn;
++ ddw_win = true;
++ break;
++ }
++
++ /* At least found default window, which is the case for normal boot */
++ if (default_win)
++ break;
+ }
+
+- return NULL;
++ /* For PCI devices there will always be a DMA window, either on the device
++ * or parent bus
++ */
++ WARN_ON(!(default_win | ddw_win));
++
++ /* caller doesn't want to get DMA window property */
++ if (!prop)
++ return rdn;
++
++ /* parse DMA window property. During normal system boot, only default
++ * DMA window is passed in OF. But, for kdump, a dedicated adapter might
++ * have both default and DDW in FDT. In this scenario, DDW takes precedence
++ * over default window.
++ */
++ if (ddw_win) {
++ struct dynamic_dma_window_prop *p;
++
++ p = (struct dynamic_dma_window_prop *)ddw_prop;
++ prop->liobn = p->liobn;
++ prop->dma_base = p->dma_base;
++ prop->tce_shift = p->tce_shift;
++ prop->window_shift = p->window_shift;
++ } else if (default_win) {
++ unsigned long offset, size, liobn;
++
++ of_parse_dma_window(rdn, default_prop, &liobn, &offset, &size);
++
++ prop->liobn = cpu_to_be32((u32)liobn);
++ prop->dma_base = cpu_to_be64(offset);
++ prop->tce_shift = cpu_to_be32(IOMMU_PAGE_SHIFT_4K);
++ prop->window_shift = cpu_to_be32(order_base_2(size));
++ }
++
++ return rdn;
+ }
+
+ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
+@@ -751,17 +773,28 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
+ struct iommu_table *tbl;
+ struct device_node *dn, *pdn;
+ struct pci_dn *ppci;
+- const __be32 *dma_window = NULL;
++ struct dynamic_dma_window_prop prop;
+
+ dn = pci_bus_to_OF_node(bus);
+
+ pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
+ dn);
+
+- pdn = pci_dma_find(dn, &dma_window);
++ pdn = pci_dma_find(dn, &prop);
++
++ /* In PPC architecture, there will always be DMA window on bus or one of the
++ * parent bus. During reboot, there will be ibm,dma-window property to
++ * define DMA window. For kdump, there will at least be default window or DDW
++ * or both.
++ * There is an exception to the above. In case the PE goes into frozen
++ * state, firmware may not provide ibm,dma-window property at the time
++ * of LPAR boot up.
++ */
+
+- if (dma_window == NULL)
++ if (!pdn) {
+ pr_debug(" no ibm,dma-window property !\n");
++ return;
++ }
+
+ ppci = PCI_DN(pdn);
+
+@@ -771,13 +804,24 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
+ if (!ppci->table_group) {
+ ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
+ tbl = ppci->table_group->tables[0];
+- if (dma_window) {
+- iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
+- ppci->table_group, dma_window);
+
+- if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
+- panic("Failed to initialize iommu table");
+- }
++ iommu_table_setparms_common(tbl, ppci->phb->bus->number,
++ be32_to_cpu(prop.liobn),
++ be64_to_cpu(prop.dma_base),
++ 1ULL << be32_to_cpu(prop.window_shift),
++ be32_to_cpu(prop.tce_shift), NULL,
++ &iommu_table_lpar_multi_ops);
++
++ /* Only for normal boot with default window. Doesn't matter even
++ * if we set these with DDW which is 64bit during kdump, since
++ * these will not be used during kdump.
++ */
++ ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
++ ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
++
++ if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
++ panic("Failed to initialize iommu table");
++
+ iommu_register_group(ppci->table_group,
+ pci_domain_nr(bus), 0);
+ pr_debug(" created table: %p\n", ppci->table_group);
+@@ -914,7 +958,8 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_
+ return 0;
+ }
+
+-static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
++static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift,
++ bool *direct_mapping)
+ {
+ struct dma_win *window;
+ const struct dynamic_dma_window_prop *dma64;
+@@ -927,6 +972,7 @@ static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *windo
+ dma64 = window->prop;
+ *dma_addr = be64_to_cpu(dma64->dma_base);
+ *window_shift = be32_to_cpu(dma64->window_shift);
++ *direct_mapping = window->direct;
+ found = true;
+ break;
+ }
+@@ -966,6 +1012,12 @@ static void find_existing_ddw_windows_named(const char *name)
+ continue;
+ }
+
++ /* If at the time of system initialization, there are DDWs in OF,
++ * it means this is during kexec. DDW could be direct or dynamic.
++ * We will just mark DDWs as "dynamic" since this is kdump path,
++ * no need to worry about perforance. ddw_list_new_entry() will
++ * set window->direct = false.
++ */
+ window = ddw_list_new_entry(pdn, dma64);
+ if (!window) {
+ of_node_put(pdn);
+@@ -1270,10 +1322,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+
+ mutex_lock(&dma_win_init_mutex);
+
+- if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
+- direct_mapping = (len >= max_ram_len);
++ if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len, &direct_mapping))
+ goto out_unlock;
+- }
+
+ /*
+ * If we already went through this for a previous function of
+@@ -1524,8 +1574,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
+ {
+ struct device_node *pdn, *dn;
+ struct iommu_table *tbl;
+- const __be32 *dma_window = NULL;
+ struct pci_dn *pci;
++ struct dynamic_dma_window_prop prop;
+
+ pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
+
+@@ -1538,7 +1588,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
+ dn = pci_device_to_OF_node(dev);
+ pr_debug(" node is %pOF\n", dn);
+
+- pdn = pci_dma_find(dn, &dma_window);
++ pdn = pci_dma_find(dn, &prop);
+ if (!pdn || !PCI_DN(pdn)) {
+ printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
+ "no DMA window found for pci dev=%s dn=%pOF\n",
+@@ -1551,8 +1601,20 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
+ if (!pci->table_group) {
+ pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
+ tbl = pci->table_group->tables[0];
+- iommu_table_setparms_lpar(pci->phb, pdn, tbl,
+- pci->table_group, dma_window);
++
++ iommu_table_setparms_common(tbl, pci->phb->bus->number,
++ be32_to_cpu(prop.liobn),
++ be64_to_cpu(prop.dma_base),
++ 1ULL << be32_to_cpu(prop.window_shift),
++ be32_to_cpu(prop.tce_shift), NULL,
++ &iommu_table_lpar_multi_ops);
++
++ /* Only for normal boot with default window. Doesn't matter even
++ * if we set these with DDW which is 64bit during kdump, since
++ * these will not be used during kdump.
++ */
++ pci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
++ pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
+
+ iommu_init_table(tbl, pci->phb->node, 0, 0);
+ iommu_register_group(pci->table_group,
+diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
+index 096d09ed89f673..431be156ca9bb3 100644
+--- a/arch/powerpc/platforms/pseries/kexec.c
++++ b/arch/powerpc/platforms/pseries/kexec.c
+@@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
+ } else
+ xics_kexec_teardown_cpu(secondary);
+ }
+-
+-void pseries_machine_kexec(struct kimage *image)
+-{
+- if (firmware_has_feature(FW_FEATURE_SET_MODE))
+- pseries_disable_reloc_on_exc();
+-
+- default_machine_kexec(image);
+-}
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index f2cb62148f36f4..c3585e90c6db6b 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -526,8 +526,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
+
+ if (cmd) {
+ rc = init_cpu_associativity();
+- if (rc)
++ if (rc) {
++ destroy_cpu_associativity();
+ goto out;
++ }
+
+ for_each_possible_cpu(cpu) {
+ disp = per_cpu_ptr(&vcpu_disp_data, cpu);
+@@ -660,8 +662,12 @@ u64 pseries_paravirt_steal_clock(int cpu)
+ {
+ struct lppaca *lppaca = &lppaca_of(cpu);
+
+- return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
+- be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb));
++ /*
++ * VPA steal time counters are reported at TB frequency. Hence do a
++ * conversion to ns before returning
++ */
++ return tb_to_ns(be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
++ be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)));
+ }
+ #endif
+
+@@ -1880,10 +1886,10 @@ notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
+ * h_get_mpp
+ * H_GET_MPP hcall returns info in 7 parms
+ */
+-int h_get_mpp(struct hvcall_mpp_data *mpp_data)
++long h_get_mpp(struct hvcall_mpp_data *mpp_data)
+ {
+- int rc;
+- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
++ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
++ long rc;
+
+ rc = plpar_hcall9(H_GET_MPP, retbuf);
+
+diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
+index 1c151d77e74b34..11d5208817b9d4 100644
+--- a/arch/powerpc/platforms/pseries/lparcfg.c
++++ b/arch/powerpc/platforms/pseries/lparcfg.c
+@@ -113,8 +113,8 @@ struct hvcall_ppp_data {
+ */
+ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
+ {
+- unsigned long rc;
+- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
++ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
++ long rc;
+
+ rc = plpar_hcall9(H_GET_PPP, retbuf);
+
+@@ -193,7 +193,7 @@ static void parse_ppp_data(struct seq_file *m)
+ struct hvcall_ppp_data ppp_data;
+ struct device_node *root;
+ const __be32 *perf_level;
+- int rc;
++ long rc;
+
+ rc = h_get_ppp(&ppp_data);
+ if (rc)
+@@ -357,8 +357,8 @@ static int read_dt_lpar_name(struct seq_file *m)
+
+ static void read_lpar_name(struct seq_file *m)
+ {
+- if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
+- pr_err_once("Error can't get the LPAR name");
++ if (read_rtas_lpar_name(m))
++ read_dt_lpar_name(m);
+ }
+
+ #define SPLPAR_MAXLENGTH 1026*(sizeof(char))
+diff --git a/arch/powerpc/platforms/pseries/papr-sysparm.c b/arch/powerpc/platforms/pseries/papr-sysparm.c
+index fedc61599e6cc7..a1e7aeac741616 100644
+--- a/arch/powerpc/platforms/pseries/papr-sysparm.c
++++ b/arch/powerpc/platforms/pseries/papr-sysparm.c
+@@ -23,6 +23,46 @@ void papr_sysparm_buf_free(struct papr_sysparm_buf *buf)
+ kfree(buf);
+ }
+
++static size_t papr_sysparm_buf_get_length(const struct papr_sysparm_buf *buf)
++{
++ return be16_to_cpu(buf->len);
++}
++
++static void papr_sysparm_buf_set_length(struct papr_sysparm_buf *buf, size_t length)
++{
++ WARN_ONCE(length > sizeof(buf->val),
++ "bogus length %zu, clamping to safe value", length);
++ length = min(sizeof(buf->val), length);
++ buf->len = cpu_to_be16(length);
++}
++
++/*
++ * For use on buffers returned from ibm,get-system-parameter before
++ * returning them to callers. Ensures the encoded length of valid data
++ * cannot overrun buf->val[].
++ */
++static void papr_sysparm_buf_clamp_length(struct papr_sysparm_buf *buf)
++{
++ papr_sysparm_buf_set_length(buf, papr_sysparm_buf_get_length(buf));
++}
++
++/*
++ * Perform some basic diligence on the system parameter buffer before
++ * submitting it to RTAS.
++ */
++static bool papr_sysparm_buf_can_submit(const struct papr_sysparm_buf *buf)
++{
++ /*
++ * Firmware ought to reject buffer lengths that exceed the
++ * maximum specified in PAPR, but there's no reason for the
++ * kernel to allow them either.
++ */
++ if (papr_sysparm_buf_get_length(buf) > sizeof(buf->val))
++ return false;
++
++ return true;
++}
++
+ /**
+ * papr_sysparm_get() - Retrieve the value of a PAPR system parameter.
+ * @param: PAPR system parameter token as described in
+@@ -63,6 +103,9 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
++ if (!papr_sysparm_buf_can_submit(buf))
++ return -EINVAL;
++
+ work_area = rtas_work_area_alloc(sizeof(*buf));
+
+ memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));
+@@ -77,6 +120,7 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
+ case 0:
+ ret = 0;
+ memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf));
++ papr_sysparm_buf_clamp_length(buf);
+ break;
+ case -3: /* parameter not implemented */
+ ret = -EOPNOTSUPP;
+@@ -115,6 +159,9 @@ int papr_sysparm_set(papr_sysparm_t param, const struct papr_sysparm_buf *buf)
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
++ if (!papr_sysparm_buf_can_submit(buf))
++ return -EINVAL;
++
+ work_area = rtas_work_area_alloc(sizeof(*buf));
+
+ memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));
+diff --git a/arch/powerpc/platforms/pseries/papr_platform_attributes.c b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
+index 526c621b098bec..eea2041b270b54 100644
+--- a/arch/powerpc/platforms/pseries/papr_platform_attributes.c
++++ b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
+@@ -101,10 +101,12 @@ static int papr_get_attr(u64 id, struct energy_scale_attribute *esi)
+ esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs);
+
+ temp_buf = krealloc(buf, esi_buf_size, GFP_KERNEL);
+- if (temp_buf)
++ if (temp_buf) {
+ buf = temp_buf;
+- else
+- return -ENOMEM;
++ } else {
++ ret = -ENOMEM;
++ goto out_buf;
++ }
+
+ goto retry;
+ }
+diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
+index 4ba82456811921..4448386268d991 100644
+--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
+@@ -35,6 +35,8 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
+
+ pseries_msi_allocate_domains(phb);
+
++ ppc_iommu_register_device(phb);
++
+ /* Create EEH devices for the PHB */
+ eeh_phb_pe_create(phb);
+
+@@ -76,6 +78,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
+ }
+ }
+
++ ppc_iommu_unregister_device(phb);
++
+ pseries_msi_free_domains(phb);
+
+ /* Keep a reference so phb isn't freed yet */
+diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
+index 2d40304eb6c164..ed492d38f6ad6b 100644
+--- a/arch/powerpc/platforms/pseries/plpks.c
++++ b/arch/powerpc/platforms/pseries/plpks.c
+@@ -415,8 +415,7 @@ static int plpks_confirm_object_flushed(struct label *label,
+ break;
+ }
+
+- usleep_range(PLPKS_FLUSH_SLEEP,
+- PLPKS_FLUSH_SLEEP + PLPKS_FLUSH_SLEEP_RANGE);
++ fsleep(PLPKS_FLUSH_SLEEP);
+ timeout = timeout + PLPKS_FLUSH_SLEEP;
+ } while (timeout < PLPKS_MAX_TIMEOUT);
+
+@@ -464,9 +463,10 @@ int plpks_signed_update_var(struct plpks_var *var, u64 flags)
+
+ continuetoken = retbuf[0];
+ if (pseries_status_to_err(rc) == -EBUSY) {
+- int delay_ms = get_longbusy_msecs(rc);
+- mdelay(delay_ms);
+- timeout += delay_ms;
++ int delay_us = get_longbusy_msecs(rc) * 1000;
++
++ fsleep(delay_us);
++ timeout += delay_us;
+ }
+ rc = pseries_status_to_err(rc);
+ } while (rc == -EBUSY && timeout < PLPKS_MAX_TIMEOUT);
+diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c
+index 3c290b9ed01b39..0f1d45f32e4a44 100644
+--- a/arch/powerpc/platforms/pseries/pmem.c
++++ b/arch/powerpc/platforms/pseries/pmem.c
+@@ -121,7 +121,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
+ return -EINVAL;
+ }
+
+- drc_index = hp_elog->_drc_u.drc_index;
++ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
+
+ lock_device_hotplug();
+
+diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
+index 8376f03f932a45..dd6c569f680687 100644
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { }
+ #endif
+
+ extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
+-void pseries_machine_kexec(struct kimage *image);
+
+ extern void pSeries_final_fixup(void);
+
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index ecea85c74c43fa..1feb6b919bd970 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -343,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void)
+ {
+ void (*ctor)(void *) = get_dtl_cache_ctor();
+
+- dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+- DISPATCH_LOG_BYTES, 0, ctor);
++ dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES,
++ DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor);
+ if (!dtl_cache) {
+ pr_warn("Failed to create dispatch trace log buffer cache\n");
+ pr_warn("Stolen time statistics will be unreliable\n");
+@@ -1153,7 +1153,6 @@ define_machine(pseries) {
+ .machine_check_exception = pSeries_machine_check_exception,
+ .machine_check_log_err = pSeries_machine_check_log_err,
+ #ifdef CONFIG_KEXEC_CORE
+- .machine_kexec = pseries_machine_kexec,
+ .kexec_cpu_down = pseries_kexec_cpu_down,
+ #endif
+ #ifdef CONFIG_MEMORY_HOTPLUG
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index e25ac52acf5073..71d52a670d951b 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -341,7 +341,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+
+ if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
+ atomic_read(&cop_feat_caps->nr_total_credits)) {
+- pr_err("Credits are not available to allocate window\n");
++ pr_err_ratelimited("Credits are not available to allocate window\n");
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -385,11 +385,15 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ * same fault IRQ is not freed by the OS before.
+ */
+ mutex_lock(&vas_pseries_mutex);
+- if (migration_in_progress)
++ if (migration_in_progress) {
+ rc = -EBUSY;
+- else
++ } else {
+ rc = allocate_setup_window(txwin, (u64 *)&domain[0],
+ cop_feat_caps->win_type);
++ if (!rc)
++ caps->nr_open_wins_progress++;
++ }
++
+ mutex_unlock(&vas_pseries_mutex);
+ if (rc)
+ goto out;
+@@ -404,8 +408,17 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ goto out_free;
+
+ txwin->win_type = cop_feat_caps->win_type;
+- mutex_lock(&vas_pseries_mutex);
++
+ /*
++ * The migration SUSPEND thread sets migration_in_progress and
++ * closes all open windows from the list. But the window is
++ * added to the list after open and modify HCALLs. So possible
++ * that migration_in_progress is set before modify HCALL which
++ * may cause some windows are still open when the hypervisor
++ * initiates the migration.
++ * So checks the migration_in_progress flag again and close all
++ * open windows.
++ *
+ * Possible to lose the acquired credit with DLPAR core
+ * removal after the window is opened. So if there are any
+ * closed windows (means with lost credits), do not give new
+@@ -413,9 +426,11 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ * after the existing windows are reopened when credits are
+ * available.
+ */
+- if (!caps->nr_close_wins) {
++ mutex_lock(&vas_pseries_mutex);
++ if (!caps->nr_close_wins && !migration_in_progress) {
+ list_add(&txwin->win_list, &caps->list);
+ caps->nr_open_windows++;
++ caps->nr_open_wins_progress--;
+ mutex_unlock(&vas_pseries_mutex);
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
+ return &txwin->vas_win;
+@@ -424,7 +439,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+
+ put_vas_user_win_ref(&txwin->vas_win.task_ref);
+ rc = -EBUSY;
+- pr_err("No credit is available to allocate window\n");
++ pr_err_ratelimited("No credit is available to allocate window\n");
+
+ out_free:
+ /*
+@@ -433,6 +448,12 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ */
+ free_irq_setup(txwin);
+ h_deallocate_vas_window(txwin->vas_win.winid);
++ /*
++ * Hold mutex and reduce nr_open_wins_progress counter.
++ */
++ mutex_lock(&vas_pseries_mutex);
++ caps->nr_open_wins_progress--;
++ mutex_unlock(&vas_pseries_mutex);
+ out:
+ atomic_dec(&cop_feat_caps->nr_used_credits);
+ kfree(txwin);
+@@ -937,14 +958,14 @@ int vas_migration_handler(int action)
+ struct vas_caps *vcaps;
+ int i, rc = 0;
+
++ pr_info("VAS migration event %d\n", action);
++
+ /*
+ * NX-GZIP is not enabled. Nothing to do for migration.
+ */
+ if (!copypaste_feat)
+ return rc;
+
+- mutex_lock(&vas_pseries_mutex);
+-
+ if (action == VAS_SUSPEND)
+ migration_in_progress = true;
+ else
+@@ -990,12 +1011,27 @@ int vas_migration_handler(int action)
+
+ switch (action) {
+ case VAS_SUSPEND:
++ mutex_lock(&vas_pseries_mutex);
+ rc = reconfig_close_windows(vcaps, vcaps->nr_open_windows,
+ true);
++ /*
++ * Windows are included in the list after successful
++ * open. So wait for closing these in-progress open
++ * windows in vas_allocate_window() which will be
++ * done if the migration_in_progress is set.
++ */
++ while (vcaps->nr_open_wins_progress) {
++ mutex_unlock(&vas_pseries_mutex);
++ msleep(10);
++ mutex_lock(&vas_pseries_mutex);
++ }
++ mutex_unlock(&vas_pseries_mutex);
+ break;
+ case VAS_RESUME:
++ mutex_lock(&vas_pseries_mutex);
+ atomic_set(&caps->nr_total_credits, new_nr_creds);
+ rc = reconfig_open_windows(vcaps, new_nr_creds, true);
++ mutex_unlock(&vas_pseries_mutex);
+ break;
+ default:
+ /* should not happen */
+@@ -1011,8 +1047,9 @@ int vas_migration_handler(int action)
+ goto out;
+ }
+
++ pr_info("VAS migration event (%d) successful\n", action);
++
+ out:
+- mutex_unlock(&vas_pseries_mutex);
+ return rc;
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h
+index 7115043ec48830..45567cd1317837 100644
+--- a/arch/powerpc/platforms/pseries/vas.h
++++ b/arch/powerpc/platforms/pseries/vas.h
+@@ -91,6 +91,8 @@ struct vas_cop_feat_caps {
+ struct vas_caps {
+ struct vas_cop_feat_caps caps;
+ struct list_head list; /* List of open windows */
++ int nr_open_wins_progress; /* Number of open windows in */
++ /* progress. Used in migration */
+ int nr_close_wins; /* closed windows in the hypervisor for DLPAR */
+ int nr_open_windows; /* Number of successful open windows */
+ u8 feat; /* Feature type */
+diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
+index 57978a44d55b69..ce9895633c4e0f 100644
+--- a/arch/powerpc/sysdev/fsl_msi.c
++++ b/arch/powerpc/sysdev/fsl_msi.c
+@@ -568,10 +568,12 @@ static const struct fsl_msi_feature ipic_msi_feature = {
+ .msiir_offset = 0x38,
+ };
+
++#ifdef CONFIG_EPAPR_PARAVIRT
+ static const struct fsl_msi_feature vmpic_msi_feature = {
+ .fsl_pic_ip = FSL_PIC_IP_VMPIC,
+ .msiir_offset = 0,
+ };
++#endif
+
+ static const struct of_device_id fsl_of_msi_ids[] = {
+ {
+diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
+index f6ec6dba92dcbc..700b67476a7d8d 100644
+--- a/arch/powerpc/sysdev/xics/icp-native.c
++++ b/arch/powerpc/sysdev/xics/icp-native.c
+@@ -236,6 +236,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
+ rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
+ cpu, hw_id);
+
++ if (!rname)
++ return -ENOMEM;
+ if (!request_mem_region(addr, size, rname)) {
+ pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
+ cpu, hw_id);
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 9f0af4d795d886..f1c0fa6ece21d0 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -802,7 +802,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
+ if (out_qpage)
+ *out_qpage = be64_to_cpu(qpage);
+ if (out_qsize)
+- *out_qsize = be32_to_cpu(qsize);
++ *out_qsize = be64_to_cpu(qsize);
+ if (out_qeoi_page)
+ *out_qeoi_page = be64_to_cpu(qeoi_page);
+ if (out_escalate_irq)
+diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
+index 75fa98221d485d..af105e1bc3fca4 100644
+--- a/arch/powerpc/xmon/ppc-dis.c
++++ b/arch/powerpc/xmon/ppc-dis.c
+@@ -122,32 +122,21 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr)
+ bool insn_is_short;
+ ppc_cpu_t dialect;
+
+- dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON
+- | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
++ dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON;
+
+- if (cpu_has_feature(CPU_FTRS_POWER5))
+- dialect |= PPC_OPCODE_POWER5;
++ if (IS_ENABLED(CONFIG_PPC64))
++ dialect |= PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_CELL |
++ PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 |
++ PPC_OPCODE_POWER9;
+
+- if (cpu_has_feature(CPU_FTRS_CELL))
+- dialect |= (PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC);
++ if (cpu_has_feature(CPU_FTR_TM))
++ dialect |= PPC_OPCODE_HTM;
+
+- if (cpu_has_feature(CPU_FTRS_POWER6))
+- dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC);
++ if (cpu_has_feature(CPU_FTR_ALTIVEC))
++ dialect |= PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2;
+
+- if (cpu_has_feature(CPU_FTRS_POWER7))
+- dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
+- | PPC_OPCODE_ALTIVEC | PPC_OPCODE_VSX);
+-
+- if (cpu_has_feature(CPU_FTRS_POWER8))
+- dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
+- | PPC_OPCODE_POWER8 | PPC_OPCODE_HTM
+- | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX);
+-
+- if (cpu_has_feature(CPU_FTRS_POWER9))
+- dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
+- | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM
+- | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2
+- | PPC_OPCODE_VSX | PPC_OPCODE_VSX3);
++ if (cpu_has_feature(CPU_FTR_VSX))
++ dialect |= PPC_OPCODE_VSX | PPC_OPCODE_VSX3;
+
+ /* Get the major opcode of the insn. */
+ opcode = NULL;
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index b3b94cd3771373..1d815405a3b4f2 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1352,7 +1352,7 @@ static int cpu_cmd(void)
+ }
+ termch = cpu;
+
+- if (!scanhex(&cpu)) {
++ if (!scanhex(&cpu) || cpu >= num_possible_cpus()) {
+ /* print cpus waiting or in xmon */
+ printf("cpus stopped:");
+ last_cpu = first_cpu = NR_CPUS;
+@@ -2774,7 +2774,7 @@ static void dump_pacas(void)
+
+ termch = c; /* Put c back, it wasn't 'a' */
+
+- if (scanhex(&num))
++ if (scanhex(&num) && num < num_possible_cpus())
+ dump_one_paca(num);
+ else
+ dump_one_paca(xmon_owner);
+@@ -2847,7 +2847,7 @@ static void dump_xives(void)
+
+ termch = c; /* Put c back, it wasn't 'a' */
+
+- if (scanhex(&num))
++ if (scanhex(&num) && num < num_possible_cpus())
+ dump_one_xive(num);
+ else
+ dump_one_xive(xmon_owner);
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 9c48fecc671918..1304992232adbe 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -27,6 +27,7 @@ config RISCV
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_KCOV
++ select ARCH_HAS_MEMBARRIER_CALLBACKS
+ select ARCH_HAS_MMIOWB
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ select ARCH_HAS_PMEM_API
+@@ -258,6 +259,11 @@ config GENERIC_HWEIGHT
+ config FIX_EARLYCON_MEM
+ def_bool MMU
+
++config ILLEGAL_POINTER_VALUE
++ hex
++ default 0 if 32BIT
++ default 0xdead000000000000 if 64BIT
++
+ config PGTABLE_LEVELS
+ int
+ default 5 if 64BIT
+@@ -287,7 +293,6 @@ config AS_HAS_OPTION_ARCH
+ # https://reviews.llvm.org/D123515
+ def_bool y
+ depends on $(as-instr, .option arch$(comma) +m)
+- depends on !$(as-instr, .option arch$(comma) -i)
+
+ source "arch/riscv/Kconfig.socs"
+ source "arch/riscv/Kconfig.errata"
+@@ -490,8 +495,8 @@ config RISCV_ISA_SVPBMT
+ config TOOLCHAIN_HAS_V
+ bool
+ default y
+- depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv)
+- depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv)
++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv)
++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv)
+ depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800
+ depends on AS_HAS_OPTION_ARCH
+
+@@ -628,8 +633,7 @@ config IRQ_STACKS
+ config THREAD_SIZE_ORDER
+ int "Kernel stack size (in power-of-two numbers of page size)" if VMAP_STACK && EXPERT
+ range 0 4
+- default 1 if 32BIT && !KASAN
+- default 3 if 64BIT && KASAN
++ default 1 if 32BIT
+ default 2
+ help
+ Specify the Pages of thread stack size (from 4KB to 64KB), which also
+@@ -669,7 +673,7 @@ config RISCV_BOOT_SPINWAIT
+ If unsure what to do here, say N.
+
+ config ARCH_SUPPORTS_KEXEC
+- def_bool MMU
++ def_bool y
+
+ config ARCH_SELECTS_KEXEC
+ def_bool y
+@@ -677,7 +681,7 @@ config ARCH_SELECTS_KEXEC
+ select HOTPLUG_CPU if SMP
+
+ config ARCH_SUPPORTS_KEXEC_FILE
+- def_bool 64BIT && MMU
++ def_bool 64BIT
+
+ config ARCH_SELECTS_KEXEC_FILE
+ def_bool y
+@@ -686,9 +690,7 @@ config ARCH_SELECTS_KEXEC_FILE
+ select KEXEC_ELF
+
+ config ARCH_SUPPORTS_KEXEC_PURGATORY
+- def_bool KEXEC_FILE
+- depends on CRYPTO=y
+- depends on CRYPTO_SHA256=y
++ def_bool ARCH_SUPPORTS_KEXEC_FILE
+
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool y
+diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
+index 6833d01e2e707b..30fd6a51282853 100644
+--- a/arch/riscv/Kconfig.socs
++++ b/arch/riscv/Kconfig.socs
+@@ -29,6 +29,7 @@ config SOC_STARFIVE
+ bool "StarFive SoCs"
+ select PINCTRL
+ select RESET_CONTROLLER
++ select ARM_AMBA
+ help
+ This enables support for StarFive SoC platform hardware.
+
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index b43a6bb7e4dcb6..4d06f340267401 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -130,12 +130,6 @@ endif
+ libs-y += arch/riscv/lib/
+ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+-PHONY += vdso_install
+-vdso_install:
+- $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+- $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
+- $(build)=arch/riscv/kernel/compat_vdso compat_$@)
+-
+ ifeq ($(KBUILD_EXTMOD),)
+ ifeq ($(CONFIG_MMU),y)
+ prepare: vdso_prepare
+@@ -147,6 +141,9 @@ vdso_prepare: prepare0
+ endif
+ endif
+
++vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
++vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg:../compat_vdso/compat_vdso.so
++
+ ifneq ($(CONFIG_XIP_KERNEL),y)
+ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
+ KBUILD_IMAGE := $(boot)/loader.bin
+diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
+index 22b13947bd131e..8e7fc0edf21d3e 100644
+--- a/arch/riscv/boot/Makefile
++++ b/arch/riscv/boot/Makefile
+@@ -17,6 +17,7 @@
+ KCOV_INSTRUMENT := n
+
+ OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
++OBJCOPYFLAGS_loader.bin :=-O binary
+ OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+ targets := Image Image.* loader loader.o loader.lds loader.bin
+diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
+index 8275630af977d2..b8684312593e5b 100644
+--- a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
++++ b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
+@@ -30,7 +30,6 @@ cpu0: cpu@0 {
+ cpu0_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+- #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+index 07387f9c135ca7..72b87b08ab444e 100644
+--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
++++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+@@ -123,6 +123,7 @@ pmic@58 {
+ interrupt-parent = <&gpio>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
++ #interrupt-cells = <2>;
+
+ onkey {
+ compatible = "dlg,da9063-onkey";
+diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+index 2c02358abd711a..4874e3bb42ab10 100644
+--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
++++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+@@ -162,7 +162,6 @@ &i2c5 {
+ axp15060: pmic@36 {
+ compatible = "x-powers,axp15060";
+ reg = <0x36>;
+- interrupts = <0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+@@ -205,6 +204,8 @@ &i2c6 {
+
+ &mmc0 {
+ max-frequency = <100000000>;
++ assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>;
++ assigned-clock-rates = <50000000>;
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+@@ -221,6 +222,8 @@ &mmc0 {
+
+ &mmc1 {
+ max-frequency = <100000000>;
++ assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO1_SDCARD>;
++ assigned-clock-rates = <50000000>;
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+@@ -440,40 +443,6 @@ GPOEN_ENABLE,
+ };
+ };
+
+- tdm_pins: tdm-0 {
+- tx-pins {
+- pinmux = <GPIOMUX(44, GPOUT_SYS_TDM_TXD,
+- GPOEN_ENABLE,
+- GPI_NONE)>;
+- bias-pull-up;
+- drive-strength = <2>;
+- input-disable;
+- input-schmitt-disable;
+- slew-rate = <0>;
+- };
+-
+- rx-pins {
+- pinmux = <GPIOMUX(61, GPOUT_HIGH,
+- GPOEN_DISABLE,
+- GPI_SYS_TDM_RXD)>;
+- input-enable;
+- };
+-
+- sync-pins {
+- pinmux = <GPIOMUX(63, GPOUT_HIGH,
+- GPOEN_DISABLE,
+- GPI_SYS_TDM_SYNC)>;
+- input-enable;
+- };
+-
+- pcmclk-pins {
+- pinmux = <GPIOMUX(38, GPOUT_HIGH,
+- GPOEN_DISABLE,
+- GPI_SYS_TDM_CLK)>;
+- input-enable;
+- };
+- };
+-
+ uart0_pins: uart0-0 {
+ tx-pins {
+ pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
+@@ -499,12 +468,6 @@ GPOEN_DISABLE,
+ };
+ };
+
+-&tdm {
+- pinctrl-names = "default";
+- pinctrl-0 = <&tdm_pins>;
+- status = "okay";
+-};
+-
+ &uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c
+index 197db68cc8daf7..17a90486972468 100644
+--- a/arch/riscv/errata/andes/errata.c
++++ b/arch/riscv/errata/andes/errata.c
+@@ -38,29 +38,35 @@ static long ax45mp_iocp_sw_workaround(void)
+ return ret.error ? 0 : ret.value;
+ }
+
+-static bool errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
++static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
+ {
++ static bool done;
++
+ if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO))
+- return false;
++ return;
++
++ if (done)
++ return;
++
++ done = true;
+
+ if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID)
+- return false;
++ return;
+
+ if (!ax45mp_iocp_sw_workaround())
+- return false;
++ return;
+
+ /* Set this just to make core cbo code happy */
+ riscv_cbom_block_size = 1;
+ riscv_noncoherent_supported();
+-
+- return true;
+ }
+
+ void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage)
+ {
+- errata_probe_iocp(stage, archid, impid);
++ if (stage == RISCV_ALTERNATIVES_BOOT)
++ errata_probe_iocp(stage, archid, impid);
+
+ /* we have nothing to patch here ATM so just return back */
+ }
+diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
+index 61ba8ed43d8feb..36b955c762ba08 100644
+--- a/arch/riscv/include/asm/asm-prototypes.h
++++ b/arch/riscv/include/asm/asm-prototypes.h
+@@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
+ DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
+ DECLARE_DO_ERROR_INFO(do_trap_break);
+
+-asmlinkage unsigned long get_overflow_stack(void);
+ asmlinkage void handle_bad_stack(struct pt_regs *regs);
+ asmlinkage void do_page_fault(struct pt_regs *regs);
+ asmlinkage void do_irq(struct pt_regs *regs);
+diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
+index 114bbadaef41eb..b5b84c6be01e16 100644
+--- a/arch/riscv/include/asm/asm.h
++++ b/arch/riscv/include/asm/asm.h
+@@ -82,6 +82,28 @@
+ .endr
+ .endm
+
++#ifdef CONFIG_SMP
++#ifdef CONFIG_32BIT
++#define PER_CPU_OFFSET_SHIFT 2
++#else
++#define PER_CPU_OFFSET_SHIFT 3
++#endif
++
++.macro asm_per_cpu dst sym tmp
++ REG_L \tmp, TASK_TI_CPU_NUM(tp)
++ slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
++ la \dst, __per_cpu_offset
++ add \dst, \dst, \tmp
++ REG_L \tmp, 0(\dst)
++ la \dst, \sym
++ add \dst, \dst, \tmp
++.endm
++#else /* CONFIG_SMP */
++.macro asm_per_cpu dst sym tmp
++ la \dst, \sym
++.endm
++#endif /* CONFIG_SMP */
++
+ /* save all GPs except x1 ~ x5 */
+ .macro save_from_x6_to_x31
+ REG_S x6, PT_T1(sp)
+@@ -142,6 +164,16 @@
+ REG_L x31, PT_T6(sp)
+ .endm
+
++/* Annotate a function as being unsuitable for kprobes. */
++#ifdef CONFIG_KPROBES
++#define ASM_NOKPROBE(name) \
++ .pushsection "_kprobe_blacklist", "aw"; \
++ RISCV_PTR name; \
++ .popsection
++#else
++#define ASM_NOKPROBE(name)
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* _ASM_RISCV_ASM_H */
+diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
+index 3cb53c4df27cfe..a129dac4521d35 100644
+--- a/arch/riscv/include/asm/cacheflush.h
++++ b/arch/riscv/include/asm/cacheflush.h
+@@ -37,7 +37,8 @@ static inline void flush_dcache_page(struct page *page)
+ flush_icache_mm(vma->vm_mm, 0)
+
+ #ifdef CONFIG_64BIT
+-#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
++#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
++#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
+ #endif
+
+ #ifndef CONFIG_SMP
+diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
+index b55b434f005910..d3f3c237adad74 100644
+--- a/arch/riscv/include/asm/errata_list.h
++++ b/arch/riscv/include/asm/errata_list.h
+@@ -44,11 +44,21 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \
+ CONFIG_ERRATA_SIFIVE_CIP_453)
+ #else /* !__ASSEMBLY__ */
+
+-#define ALT_FLUSH_TLB_PAGE(x) \
++#define ALT_SFENCE_VMA_ASID(asid) \
++asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID, \
++ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
++ : : "r" (asid) : "memory")
++
++#define ALT_SFENCE_VMA_ADDR(addr) \
+ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
+ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
+ : : "r" (addr) : "memory")
+
++#define ALT_SFENCE_VMA_ADDR_ASID(addr, asid) \
++asm(ALTERNATIVE("sfence.vma %0, %1", "sfence.vma", SIFIVE_VENDOR_ID, \
++ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
++ : : "r" (addr), "r" (asid) : "memory")
++
+ /*
+ * _val is marked as "will be overwritten", so need to set it to 0
+ * in the default case.
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index 2b2f5df7ef2c7d..42777f91a9c580 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -25,6 +25,11 @@
+
+ #define ARCH_SUPPORTS_FTRACE_OPS 1
+ #ifndef __ASSEMBLY__
++
++extern void *return_address(unsigned int level);
++
++#define ftrace_return_address(n) return_address(n)
++
+ void MCOUNT_NAME(void);
+ static inline unsigned long ftrace_call_adjust(unsigned long addr)
+ {
+diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
+index 4c5b0e929890fa..22deb7a2a6ec4e 100644
+--- a/arch/riscv/include/asm/hugetlb.h
++++ b/arch/riscv/include/asm/hugetlb.h
+@@ -11,6 +11,11 @@ static inline void arch_clear_hugepage_flags(struct page *page)
+ }
+ #define arch_clear_hugepage_flags arch_clear_hugepage_flags
+
++#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
++bool arch_hugetlb_migration_supported(struct hstate *h);
++#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
++#endif
++
+ #ifdef CONFIG_RISCV_ISA_SVNAPOT
+ #define __HAVE_ARCH_HUGE_PTE_CLEAR
+ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
+index b7b58258f6c7c0..f4157034efa9cb 100644
+--- a/arch/riscv/include/asm/hwcap.h
++++ b/arch/riscv/include/asm/hwcap.h
+@@ -98,7 +98,7 @@ riscv_has_extension_likely(const unsigned long ext)
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+- asm_volatile_goto(
++ asm goto(
+ ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+@@ -121,7 +121,7 @@ riscv_has_extension_unlikely(const unsigned long ext)
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+- asm_volatile_goto(
++ asm goto(
+ ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
+index 78936f4ff51330..7cad513538d8d0 100644
+--- a/arch/riscv/include/asm/hwprobe.h
++++ b/arch/riscv/include/asm/hwprobe.h
+@@ -10,4 +10,9 @@
+
+ #define RISCV_HWPROBE_MAX_KEY 5
+
++static inline bool riscv_hwprobe_key_is_valid(__s64 key)
++{
++ return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
++}
++
+ #endif
+diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
+index 06e439eeef9ada..09fde95a5e8f75 100644
+--- a/arch/riscv/include/asm/insn.h
++++ b/arch/riscv/include/asm/insn.h
+@@ -145,7 +145,7 @@
+
+ /* parts of opcode for RVF, RVD and RVQ */
+ #define RVFDQ_FL_FS_WIDTH_OFF 12
+-#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0)
++#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(2, 0)
+ #define RVFDQ_FL_FS_WIDTH_W 2
+ #define RVFDQ_FL_FS_WIDTH_D 3
+ #define RVFDQ_LS_FS_WIDTH_Q 4
+diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h
+index b53891964ae037..b27a4d64fc6a04 100644
+--- a/arch/riscv/include/asm/irq_work.h
++++ b/arch/riscv/include/asm/irq_work.h
+@@ -6,5 +6,5 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ return IS_ENABLED(CONFIG_SMP);
+ }
+-extern void arch_irq_work_raise(void);
++
+ #endif /* _ASM_RISCV_IRQ_WORK_H */
+diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
+index 14a5ea8d8ef0f4..4a35d787c01914 100644
+--- a/arch/riscv/include/asm/jump_label.h
++++ b/arch/riscv/include/asm/jump_label.h
+@@ -17,7 +17,7 @@
+ static __always_inline bool arch_static_branch(struct static_key * const key,
+ const bool branch)
+ {
+- asm_volatile_goto(
++ asm goto(
+ " .align 2 \n\t"
+ " .option push \n\t"
+ " .option norelax \n\t"
+@@ -39,7 +39,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
+ static __always_inline bool arch_static_branch_jump(struct static_key * const key,
+ const bool branch)
+ {
+- asm_volatile_goto(
++ asm goto(
+ " .align 2 \n\t"
+ " .option push \n\t"
+ " .option norelax \n\t"
+diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
+index 0bbffd528096d9..7388edd88986f9 100644
+--- a/arch/riscv/include/asm/kfence.h
++++ b/arch/riscv/include/asm/kfence.h
+@@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
+ pte_t *pte = virt_to_kpte(addr);
+
+ if (protect)
+- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
++ set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT));
+ else
+- set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
++ set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
+index 395518a1664e00..a50a1d23523fea 100644
+--- a/arch/riscv/include/asm/kvm_vcpu_pmu.h
++++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h
+@@ -10,6 +10,7 @@
+ #define __KVM_VCPU_RISCV_PMU_H
+
+ #include <linux/perf/riscv_pmu.h>
++#include <asm/kvm_vcpu_insn.h>
+ #include <asm/sbi.h>
+
+ #ifdef CONFIG_RISCV_PMU_SBI
+@@ -57,11 +58,11 @@ struct kvm_pmu {
+
+ #if defined(CONFIG_32BIT)
+ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+-{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
+-{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
++{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
++{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
+ #else
+ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+-{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
++{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
+ #endif
+
+ int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
+@@ -92,8 +93,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
+ struct kvm_pmu {
+ };
+
++static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
++ unsigned long *val, unsigned long new_val,
++ unsigned long wr_mask)
++{
++ if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
++ *val = 0;
++ return KVM_INSN_CONTINUE_NEXT_SEPC;
++ } else {
++ return KVM_INSN_ILLEGAL_TRAP;
++ }
++}
++
+ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+-{.base = 0, .count = 0, .func = NULL },
++{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
+
+ static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
+ static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
+diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h
+new file mode 100644
+index 00000000000000..6c016ebb5020af
+--- /dev/null
++++ b/arch/riscv/include/asm/membarrier.h
+@@ -0,0 +1,31 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++#ifndef _ASM_RISCV_MEMBARRIER_H
++#define _ASM_RISCV_MEMBARRIER_H
++
++static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
++ struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ /*
++ * Only need the full barrier when switching between processes.
++ * Barrier when switching from kernel to userspace is not
++ * required here, given that it is implied by mmdrop(). Barrier
++ * when switching from userspace to kernel is not needed after
++ * store to rq->curr.
++ */
++ if (IS_ENABLED(CONFIG_SMP) &&
++ likely(!(atomic_read(&next->membarrier_state) &
++ (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
++ MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
++ return;
++
++ /*
++ * The membarrier system call requires a full memory barrier
++ * after storing to rq->curr, before going back to user-space.
++ * Matches a full barrier in the proximity of the membarrier
++ * system call entry.
++ */
++ smp_mb();
++}
++
++#endif /* _ASM_RISCV_MEMBARRIER_H */
+diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
+index 5488ecc337b63f..94b3d6930fc370 100644
+--- a/arch/riscv/include/asm/page.h
++++ b/arch/riscv/include/asm/page.h
+@@ -33,8 +33,8 @@
+ #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+ #endif
+ /*
+- * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
+- * define the PAGE_OFFSET value for SV39.
++ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
++ * define the PAGE_OFFSET value for SV48 and SV39.
+ */
+ #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
+ #define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
+@@ -89,7 +89,7 @@ typedef struct page *pgtable_t;
+ #define PTE_FMT "%08lx"
+ #endif
+
+-#ifdef CONFIG_64BIT
++#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+ /*
+ * We override this value as its generic definition uses __pa too early in
+ * the boot process (before kernel_map.va_pa_offset is set).
+diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
+index 7a5097202e1570..3272ca7a5270bf 100644
+--- a/arch/riscv/include/asm/pgtable-64.h
++++ b/arch/riscv/include/asm/pgtable-64.h
+@@ -198,7 +198,7 @@ static inline int pud_user(pud_t pud)
+
+ static inline void set_pud(pud_t *pudp, pud_t pud)
+ {
+- *pudp = pud;
++ WRITE_ONCE(*pudp, pud);
+ }
+
+ static inline void pud_clear(pud_t *pudp)
+@@ -274,7 +274,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
+ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
+ {
+ if (pgtable_l4_enabled)
+- *p4dp = p4d;
++ WRITE_ONCE(*p4dp, p4d);
+ else
+ set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
+ }
+@@ -336,18 +336,12 @@ static inline struct page *p4d_page(p4d_t p4d)
+ #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+ #define pud_offset pud_offset
+-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+-{
+- if (pgtable_l4_enabled)
+- return p4d_pgtable(*p4d) + pud_index(address);
+-
+- return (pud_t *)p4d;
+-}
++pud_t *pud_offset(p4d_t *p4d, unsigned long address);
+
+ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
+ if (pgtable_l5_enabled)
+- *pgdp = pgd;
++ WRITE_ONCE(*pgdp, pgd);
+ else
+ set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
+ }
+@@ -400,12 +394,6 @@ static inline struct page *pgd_page(pgd_t pgd)
+ #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
+ #define p4d_offset p4d_offset
+-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+-{
+- if (pgtable_l5_enabled)
+- return pgd_pgtable(*pgd) + p4d_index(address);
+-
+- return (p4d_t *)pgd;
+-}
++p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
+
+ #endif /* _ASM_RISCV_PGTABLE_64_H */
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index b2ba3f79cfe9a7..37829dab4a0a48 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -84,7 +84,7 @@
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+-#define vmemmap ((struct page *)VMEMMAP_START)
++#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
+
+ #define PCI_IO_SIZE SZ_16M
+ #define PCI_IO_END VMEMMAP_START
+@@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd)
+
+ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
+- *pmdp = pmd;
++ WRITE_ONCE(*pmdp, pmd);
+ }
+
+ static inline void pmd_clear(pmd_t *pmdp)
+@@ -438,6 +438,12 @@ static inline pte_t pte_mkhuge(pte_t pte)
+ return pte;
+ }
+
++#ifdef CONFIG_RISCV_ISA_SVNAPOT
++#define pte_leaf_size(pte) (pte_napot(pte) ? \
++ napot_cont_size(napot_cont_order(pte)) :\
++ PAGE_SIZE)
++#endif
++
+ #ifdef CONFIG_NUMA_BALANCING
+ /*
+ * See the comment in include/asm-generic/pgtable.h
+@@ -509,7 +515,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
+ */
+ static inline void set_pte(pte_t *ptep, pte_t pteval)
+ {
+- *ptep = pteval;
++ WRITE_ONCE(*ptep, pteval);
+ }
+
+ void flush_icache_pte(pte_t pte);
+@@ -543,19 +549,12 @@ static inline void pte_clear(struct mm_struct *mm,
+ __set_pte_at(ptep, __pte(0));
+ }
+
+-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+-static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+- unsigned long address, pte_t *ptep,
+- pte_t entry, int dirty)
+-{
+- if (!pte_same(*ptep, entry))
+- __set_pte_at(ptep, entry);
+- /*
+- * update_mmu_cache will unconditionally execute, handling both
+- * the case that the PTE changed and the spurious fault case.
+- */
+- return true;
+-}
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
++extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
++ pte_t *ptep, pte_t entry, int dirty);
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
++extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
++ pte_t *ptep);
+
+ #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+@@ -568,16 +567,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ return pte;
+ }
+
+-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+- unsigned long address,
+- pte_t *ptep)
+-{
+- if (!pte_young(*ptep))
+- return 0;
+- return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+-}
+-
+ #define __HAVE_ARCH_PTEP_SET_WRPROTECT
+ static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+@@ -880,7 +869,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+ #define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+
+ #ifdef CONFIG_COMPAT
+-#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
++#define TASK_SIZE_32 (_AC(0x80000000, UL))
+ #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+ #else
+@@ -897,8 +886,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+ #define PAGE_SHARED __pgprot(0)
+ #define PAGE_KERNEL __pgprot(0)
+ #define swapper_pg_dir NULL
+-#define TASK_SIZE 0xffffffffUL
+-#define VMALLOC_START 0
++#define TASK_SIZE _AC(-1, UL)
++#define VMALLOC_START _AC(0, UL)
+ #define VMALLOC_END TASK_SIZE
+
+ #endif /* !CONFIG_MMU */
+diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
+index 3e23e1786d0521..4f6af8c6cfa060 100644
+--- a/arch/riscv/include/asm/processor.h
++++ b/arch/riscv/include/asm/processor.h
+@@ -15,7 +15,7 @@
+
+ #ifdef CONFIG_64BIT
+ #define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
+-#define STACK_TOP_MAX TASK_SIZE_64
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define arch_get_mmap_end(addr, len, flags) \
+ ({ \
+diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
+index 5b4a1bf5f4395c..3ed853b8a8c858 100644
+--- a/arch/riscv/include/asm/sbi.h
++++ b/arch/riscv/include/asm/sbi.h
+@@ -273,9 +273,6 @@ void sbi_set_timer(uint64_t stime_value);
+ void sbi_shutdown(void);
+ void sbi_send_ipi(unsigned int cpu);
+ int sbi_remote_fence_i(const struct cpumask *cpu_mask);
+-int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
+- unsigned long start,
+- unsigned long size);
+
+ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
+ unsigned long start,
+@@ -330,6 +327,8 @@ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1
+ static inline void sbi_init(void) {}
+ #endif /* CONFIG_RISCV_SBI */
+
++unsigned long riscv_get_mvendorid(void);
++unsigned long riscv_get_marchid(void);
+ unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
+ unsigned long riscv_cached_marchid(unsigned int cpu_id);
+ unsigned long riscv_cached_mimpid(unsigned int cpu_id);
+diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
+index 32336e8a17cb07..a393d5035c5433 100644
+--- a/arch/riscv/include/asm/sections.h
++++ b/arch/riscv/include/asm/sections.h
+@@ -13,6 +13,7 @@ extern char _start_kernel[];
+ extern char __init_data_begin[], __init_data_end[];
+ extern char __init_text_begin[], __init_text_end[];
+ extern char __alt_start[], __alt_end[];
++extern char __exittext_begin[], __exittext_end[];
+
+ static inline bool is_va_kernel_text(uintptr_t va)
+ {
+diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h
+index 63acaecc337478..2f901a410586d0 100644
+--- a/arch/riscv/include/asm/sparsemem.h
++++ b/arch/riscv/include/asm/sparsemem.h
+@@ -7,7 +7,7 @@
+ #ifdef CONFIG_64BIT
+ #define MAX_PHYSMEM_BITS 56
+ #else
+-#define MAX_PHYSMEM_BITS 34
++#define MAX_PHYSMEM_BITS 32
+ #endif /* CONFIG_64BIT */
+ #define SECTION_SIZE_BITS 27
+ #endif /* CONFIG_SPARSEMEM */
+diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h
+index f7e8ef2418b99f..b1495a7e06ce69 100644
+--- a/arch/riscv/include/asm/stacktrace.h
++++ b/arch/riscv/include/asm/stacktrace.h
+@@ -21,4 +21,9 @@ static inline bool on_thread_stack(void)
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+ }
+
++
++#ifdef CONFIG_VMAP_STACK
++DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
++#endif /* CONFIG_VMAP_STACK */
++
+ #endif /* _ASM_RISCV_STACKTRACE_H */
+diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
+index 1d7942c8a6cbae..eeec04b7dae67b 100644
+--- a/arch/riscv/include/asm/syscall_wrapper.h
++++ b/arch/riscv/include/asm/syscall_wrapper.h
+@@ -46,9 +46,6 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
+ return sys_ni_syscall(); \
+ }
+
+-#define COMPAT_SYS_NI(name) \
+- SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers);
+-
+ #endif /* CONFIG_COMPAT */
+
+ #define __SYSCALL_DEFINEx(x, name, ...) \
+@@ -82,6 +79,4 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
+ return sys_ni_syscall(); \
+ }
+
+-#define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers);
+-
+ #endif /* __ASM_SYSCALL_WRAPPER_H */
+diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
+index 1833beb00489c3..8c72d1bcdf141e 100644
+--- a/arch/riscv/include/asm/thread_info.h
++++ b/arch/riscv/include/asm/thread_info.h
+@@ -12,7 +12,12 @@
+ #include <linux/const.h>
+
+ /* thread information allocation */
+-#define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER
++#ifdef CONFIG_KASAN
++#define KASAN_STACK_ORDER 1
++#else
++#define KASAN_STACK_ORDER 0
++#endif
++#define THREAD_SIZE_ORDER (CONFIG_THREAD_SIZE_ORDER + KASAN_STACK_ORDER)
+ #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+ /*
+@@ -28,15 +33,11 @@
+
+ #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
+ #define OVERFLOW_STACK_SIZE SZ_4K
+-#define SHADOW_OVERFLOW_STACK_SIZE (1024)
+
+ #define IRQ_STACK_SIZE THREAD_SIZE
+
+ #ifndef __ASSEMBLY__
+
+-extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
+-extern unsigned long spin_shadow_stack;
+-
+ #include <asm/processor.h>
+ #include <asm/csr.h>
+
+diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
+index 120bcf2ed8a878..50b63b5c15bd8b 100644
+--- a/arch/riscv/include/asm/tlb.h
++++ b/arch/riscv/include/asm/tlb.h
+@@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
+
+ static inline void tlb_flush(struct mmu_gather *tlb)
+ {
+- flush_tlb_mm(tlb->mm);
++#ifdef CONFIG_MMU
++ if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables)
++ flush_tlb_mm(tlb->mm);
++ else
++ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
++ tlb_get_unmap_size(tlb));
++#endif
+ }
+
+ #endif /* _ASM_RISCV_TLB_H */
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index a09196f8de688e..97711d5bd8ef9a 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -11,6 +11,9 @@
+ #include <asm/smp.h>
+ #include <asm/errata_list.h>
+
++#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1)
++#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
++
+ #ifdef CONFIG_MMU
+ extern unsigned long asid_mask;
+
+@@ -19,10 +22,27 @@ static inline void local_flush_tlb_all(void)
+ __asm__ __volatile__ ("sfence.vma" : : : "memory");
+ }
+
++static inline void local_flush_tlb_all_asid(unsigned long asid)
++{
++ if (asid != FLUSH_TLB_NO_ASID)
++ ALT_SFENCE_VMA_ASID(asid);
++ else
++ local_flush_tlb_all();
++}
++
+ /* Flush one page from local TLB */
+ static inline void local_flush_tlb_page(unsigned long addr)
+ {
+- ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
++ ALT_SFENCE_VMA_ADDR(addr);
++}
++
++static inline void local_flush_tlb_page_asid(unsigned long addr,
++ unsigned long asid)
++{
++ if (asid != FLUSH_TLB_NO_ASID)
++ ALT_SFENCE_VMA_ADDR_ASID(addr, asid);
++ else
++ local_flush_tlb_page(addr);
+ }
+ #else /* CONFIG_MMU */
+ #define local_flush_tlb_all() do { } while (0)
+@@ -32,9 +52,13 @@ static inline void local_flush_tlb_page(unsigned long addr)
+ #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
+ void flush_tlb_all(void);
+ void flush_tlb_mm(struct mm_struct *mm);
++void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
++ unsigned long end, unsigned int page_size);
+ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
++void flush_tlb_kernel_range(unsigned long start, unsigned long end);
++void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+@@ -51,14 +75,16 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
+ local_flush_tlb_all();
+ }
+
+-#define flush_tlb_mm(mm) flush_tlb_all()
+-#endif /* !CONFIG_SMP || !CONFIG_MMU */
+-
+ /* Flush a range of kernel pages */
+ static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+ {
+- flush_tlb_all();
++ local_flush_tlb_all();
+ }
+
++#define flush_tlb_mm(mm) flush_tlb_all()
++#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
++#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
++#endif /* !CONFIG_SMP || !CONFIG_MMU */
++
+ #endif /* _ASM_RISCV_TLBFLUSH_H */
+diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
+index ec0cab9fbddd0d..72ec1d9bd3f312 100644
+--- a/arch/riscv/include/asm/uaccess.h
++++ b/arch/riscv/include/asm/uaccess.h
+@@ -319,7 +319,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
+
+ #define __get_kernel_nofault(dst, src, type, err_label) \
+ do { \
+- long __kr_err; \
++ long __kr_err = 0; \
+ \
+ __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
+ if (unlikely(__kr_err)) \
+@@ -328,7 +328,7 @@ do { \
+
+ #define __put_kernel_nofault(dst, src, type, err_label) \
+ do { \
+- long __kr_err; \
++ long __kr_err = 0; \
+ \
+ __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
+ if (unlikely(__kr_err)) \
+diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
+index 14f5d27783b858..96b65a5396dfcf 100644
+--- a/arch/riscv/include/asm/vdso/processor.h
++++ b/arch/riscv/include/asm/vdso/processor.h
+@@ -14,7 +14,7 @@ static inline void cpu_relax(void)
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+ #endif
+
+-#ifdef __riscv_zihintpause
++#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
+index 924d01b56c9a1e..51f6dfe19745aa 100644
+--- a/arch/riscv/include/asm/vmalloc.h
++++ b/arch/riscv/include/asm/vmalloc.h
+@@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+ return true;
+ }
+
+-#ifdef CONFIG_RISCV_ISA_SVNAPOT
+-#include <linux/pgtable.h>
++#endif
+
+-#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+-static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+- u64 pfn, unsigned int max_page_shift)
+-{
+- unsigned long map_size = PAGE_SIZE;
+- unsigned long size, order;
+-
+- if (!has_svnapot())
+- return map_size;
+-
+- for_each_napot_order_rev(order) {
+- if (napot_cont_shift(order) > max_page_shift)
+- continue;
+-
+- size = napot_cont_size(order);
+- if (end - addr < size)
+- continue;
+-
+- if (!IS_ALIGNED(addr, size))
+- continue;
+-
+- if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+- continue;
+-
+- map_size = size;
+- break;
+- }
+-
+- return map_size;
+-}
+-
+-#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+-static inline int arch_vmap_pte_supported_shift(unsigned long size)
+-{
+- int shift = PAGE_SHIFT;
+- unsigned long order;
+-
+- if (!has_svnapot())
+- return shift;
+-
+- WARN_ON_ONCE(size >= PMD_SIZE);
+-
+- for_each_napot_order_rev(order) {
+- if (napot_cont_size(order) > size)
+- continue;
+-
+- if (!IS_ALIGNED(size, napot_cont_size(order)))
+- continue;
+-
+- shift = napot_cont_shift(order);
+- break;
+- }
+-
+- return shift;
+-}
+-
+-#endif /* CONFIG_RISCV_ISA_SVNAPOT */
+-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+ #endif /* _ASM_RISCV_VMALLOC_H */
+diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
+index d4ffc3c37649ff..b65bf6306f69c6 100644
+--- a/arch/riscv/include/asm/xip_fixup.h
++++ b/arch/riscv/include/asm/xip_fixup.h
+@@ -13,7 +13,7 @@
+ add \reg, \reg, t0
+ .endm
+ .macro XIP_FIXUP_FLASH_OFFSET reg
+- la t1, __data_loc
++ la t0, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
+diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
+index 10aaa83db89ef7..95050ebe9ad00b 100644
+--- a/arch/riscv/include/uapi/asm/auxvec.h
++++ b/arch/riscv/include/uapi/asm/auxvec.h
+@@ -34,7 +34,7 @@
+ #define AT_L3_CACHEGEOMETRY 47
+
+ /* entries in ARCH_DLINFO */
+-#define AT_VECTOR_SIZE_ARCH 9
++#define AT_VECTOR_SIZE_ARCH 10
+ #define AT_MINSIGSTKSZ 51
+
+ #endif /* _UAPI_ASM_RISCV_AUXVEC_H */
+diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
+index 95cf25d484052e..03968c06258ceb 100644
+--- a/arch/riscv/kernel/Makefile
++++ b/arch/riscv/kernel/Makefile
+@@ -7,6 +7,7 @@ ifdef CONFIG_FTRACE
+ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
+ endif
+ CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
+ CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
+@@ -46,6 +47,7 @@ obj-y += irq.o
+ obj-y += process.o
+ obj-y += ptrace.o
+ obj-y += reset.o
++obj-y += return_address.o
+ obj-y += setup.o
+ obj-y += signal.o
+ obj-y += syscall_table.o
+diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
+index d6a75aac1d27a5..9f535d5de33f93 100644
+--- a/arch/riscv/kernel/asm-offsets.c
++++ b/arch/riscv/kernel/asm-offsets.c
+@@ -39,6 +39,7 @@ void asm_offsets(void)
+ OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+ OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+
++ OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
+ OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
+ OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
+ OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
+diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
+index b86e5e2c3aea94..62fa393b2eb2ea 100644
+--- a/arch/riscv/kernel/compat_vdso/Makefile
++++ b/arch/riscv/kernel/compat_vdso/Makefile
+@@ -76,13 +76,3 @@ quiet_cmd_compat_vdsold = VDSOLD $@
+ # actual build commands
+ quiet_cmd_compat_vdsoas = VDSOAS $@
+ cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
+-
+-# install commands for the unstripped file
+-quiet_cmd_compat_vdso_install = INSTALL $@
+- cmd_compat_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/compat_vdso/$@
+-
+-compat_vdso.so: $(obj)/compat_vdso.so.dbg
+- @mkdir -p $(MODLIB)/compat_vdso
+- $(call cmd,compat_vdso_install)
+-
+-compat_vdso_install: compat_vdso.so
+diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
+index c17dacb1141cb3..88732abecd0230 100644
+--- a/arch/riscv/kernel/cpu.c
++++ b/arch/riscv/kernel/cpu.c
+@@ -125,19 +125,48 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo
+ */
+ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
+ {
+- int rc;
+-
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv")) {
+- rc = riscv_of_processor_hartid(node, hartid);
+- if (!rc)
+- return 0;
++ *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
++ if (*hartid == ~0UL) {
++ pr_warn("Found CPU without hart ID\n");
++ return -ENODEV;
++ }
++ return 0;
+ }
+ }
+
+ return -1;
+ }
+
++unsigned long __init riscv_get_marchid(void)
++{
++ struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
++
++#if IS_ENABLED(CONFIG_RISCV_SBI)
++ ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
++#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
++ ci->marchid = csr_read(CSR_MARCHID);
++#else
++ ci->marchid = 0;
++#endif
++ return ci->marchid;
++}
++
++unsigned long __init riscv_get_mvendorid(void)
++{
++ struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
++
++#if IS_ENABLED(CONFIG_RISCV_SBI)
++ ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
++#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
++ ci->mvendorid = csr_read(CSR_MVENDORID);
++#else
++ ci->mvendorid = 0;
++#endif
++ return ci->mvendorid;
++}
++
+ DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+
+ unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
+@@ -169,12 +198,16 @@ static int riscv_cpuinfo_starting(unsigned int cpu)
+ struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
+
+ #if IS_ENABLED(CONFIG_RISCV_SBI)
+- ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
+- ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
++ if (!ci->mvendorid)
++ ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
++ if (!ci->marchid)
++ ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
+ ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
+ #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
+- ci->mvendorid = csr_read(CSR_MVENDORID);
+- ci->marchid = csr_read(CSR_MARCHID);
++ if (!ci->mvendorid)
++ ci->mvendorid = csr_read(CSR_MVENDORID);
++ if (!ci->marchid)
++ ci->marchid = csr_read(CSR_MARCHID);
+ ci->mimpid = csr_read(CSR_MIMPID);
+ #else
+ ci->mvendorid = 0;
+diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
+index efa0f0816634c4..93cbc38d180571 100644
+--- a/arch/riscv/kernel/cpu_ops_sbi.c
++++ b/arch/riscv/kernel/cpu_ops_sbi.c
+@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
+ /* Make sure tidle is updated */
+ smp_mb();
+ bdata->task_ptr = tidle;
+- bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
++ bdata->stack_ptr = task_pt_regs(tidle);
+ /* Make sure boot data is updated */
+ smp_mb();
+ hsm_data = __pa(bdata);
+diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
+index d98d19226b5f51..691e0c5366d2bd 100644
+--- a/arch/riscv/kernel/cpu_ops_spinwait.c
++++ b/arch/riscv/kernel/cpu_ops_spinwait.c
+@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
+
+ /* Make sure tidle is updated */
+ smp_mb();
+- WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
+- task_stack_page(tidle) + THREAD_SIZE);
++ WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
+ WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
+ }
+
+diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
+index 1cfbba65d11ae3..bb5fb2b820a21e 100644
+--- a/arch/riscv/kernel/cpufeature.c
++++ b/arch/riscv/kernel/cpufeature.c
+@@ -21,6 +21,7 @@
+ #include <asm/hwprobe.h>
+ #include <asm/patch.h>
+ #include <asm/processor.h>
++#include <asm/sbi.h>
+ #include <asm/vector.h>
+
+ #include "copy-unaligned.h"
+@@ -350,6 +351,8 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
+ struct acpi_table_header *rhct;
+ acpi_status status;
+ unsigned int cpu;
++ u64 boot_vendorid;
++ u64 boot_archid;
+
+ if (!acpi_disabled) {
+ status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
+@@ -357,6 +360,9 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
+ return;
+ }
+
++ boot_vendorid = riscv_get_mvendorid();
++ boot_archid = riscv_get_marchid();
++
+ for_each_possible_cpu(cpu) {
+ struct riscv_isainfo *isainfo = &hart_isa[cpu];
+ unsigned long this_hwcap = 0;
+@@ -396,6 +402,19 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
+ set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
+ }
+
++ /*
++ * "V" in ISA strings is ambiguous in practice: it should mean
++ * just the standard V-1.0 but vendors aren't well behaved.
++ * Many vendors with T-Head CPU cores which implement the 0.7.1
++ * version of the vector specification put "v" into their DTs.
++ * CPU cores with the ratified spec will contain non-zero
++ * marchid.
++ */
++ if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) {
++ this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
++ clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
++ }
++
+ /*
+ * All "okay" hart should have same isa. Set HWCAP based on
+ * common capabilities of every "okay" hart, in case they don't
+@@ -568,6 +587,10 @@ void check_unaligned_access(int cpu)
+ void *src;
+ long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
+
++ /* We are already set since the last check */
++ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
++ return;
++
+ page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
+ if (!page) {
+ pr_warn("Can't alloc pages to measure memcpy performance");
+diff --git a/arch/riscv/kernel/crash_core.c b/arch/riscv/kernel/crash_core.c
+index 55f1d7856b5448..8706736fd4e2dc 100644
+--- a/arch/riscv/kernel/crash_core.c
++++ b/arch/riscv/kernel/crash_core.c
+@@ -5,17 +5,19 @@
+
+ void arch_crash_save_vmcoreinfo(void)
+ {
+- VMCOREINFO_NUMBER(VA_BITS);
+ VMCOREINFO_NUMBER(phys_ram_base);
+
+ vmcoreinfo_append_str("NUMBER(PAGE_OFFSET)=0x%lx\n", PAGE_OFFSET);
+ vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START);
+ vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
++#ifdef CONFIG_MMU
++ VMCOREINFO_NUMBER(VA_BITS);
+ vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
+ vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
+ #ifdef CONFIG_64BIT
+ vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
+ vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
++#endif
+ #endif
+ vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
+ vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
+diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c
+index aa6209a74c83ff..b64bf1624a0529 100644
+--- a/arch/riscv/kernel/efi.c
++++ b/arch/riscv/kernel/efi.c
+@@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
+ {
+ efi_memory_desc_t *md = data;
+- pte_t pte = READ_ONCE(*ptep);
++ pte_t pte = ptep_get(ptep);
+ unsigned long val;
+
+ if (md->attribute & EFI_MEMORY_RO) {
+diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
+index e60fbd8660c4a5..8c32bf1eedda08 100644
+--- a/arch/riscv/kernel/elf_kexec.c
++++ b/arch/riscv/kernel/elf_kexec.c
+@@ -444,6 +444,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
+ ENCODE_CJTYPE_IMM(val - addr);
+ break;
++ case R_RISCV_ADD16:
++ *(u16 *)loc += val;
++ break;
++ case R_RISCV_SUB16:
++ *(u16 *)loc -= val;
++ break;
+ case R_RISCV_ADD32:
+ *(u32 *)loc += val;
+ break;
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 143a2bb3e69760..1f90fee24a8ba8 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -10,9 +10,13 @@
+ #include <asm/asm.h>
+ #include <asm/csr.h>
+ #include <asm/unistd.h>
++#include <asm/page.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/errata_list.h>
++#include <linux/sizes.h>
++
++ .section .irqentry.text, "ax"
+
+ SYM_CODE_START(handle_exception)
+ /*
+@@ -101,6 +105,7 @@ _save_context:
+ 1:
+ tail do_trap_unknown
+ SYM_CODE_END(handle_exception)
++ASM_NOKPROBE(handle_exception)
+
+ /*
+ * The ret_from_exception must be called with interrupt disabled. Here is the
+@@ -167,70 +172,19 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
+ sret
+ #endif
+ SYM_CODE_END(ret_from_exception)
++ASM_NOKPROBE(ret_from_exception)
+
+ #ifdef CONFIG_VMAP_STACK
+ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
+- /*
+- * Takes the psuedo-spinlock for the shadow stack, in case multiple
+- * harts are concurrently overflowing their kernel stacks. We could
+- * store any value here, but since we're overflowing the kernel stack
+- * already we only have SP to use as a scratch register. So we just
+- * swap in the address of the spinlock, as that's definately non-zero.
+- *
+- * Pairs with a store_release in handle_bad_stack().
+- */
+-1: la sp, spin_shadow_stack
+- REG_AMOSWAP_AQ sp, sp, (sp)
+- bnez sp, 1b
+-
+- la sp, shadow_stack
+- addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
+-
+- //save caller register to shadow stack
+- addi sp, sp, -(PT_SIZE_ON_STACK)
+- REG_S x1, PT_RA(sp)
+- REG_S x5, PT_T0(sp)
+- REG_S x6, PT_T1(sp)
+- REG_S x7, PT_T2(sp)
+- REG_S x10, PT_A0(sp)
+- REG_S x11, PT_A1(sp)
+- REG_S x12, PT_A2(sp)
+- REG_S x13, PT_A3(sp)
+- REG_S x14, PT_A4(sp)
+- REG_S x15, PT_A5(sp)
+- REG_S x16, PT_A6(sp)
+- REG_S x17, PT_A7(sp)
+- REG_S x28, PT_T3(sp)
+- REG_S x29, PT_T4(sp)
+- REG_S x30, PT_T5(sp)
+- REG_S x31, PT_T6(sp)
++ /* we reach here from kernel context, sscratch must be 0 */
++ csrrw x31, CSR_SCRATCH, x31
++ asm_per_cpu sp, overflow_stack, x31
++ li x31, OVERFLOW_STACK_SIZE
++ add sp, sp, x31
++ /* zero out x31 again and restore x31 */
++ xor x31, x31, x31
++ csrrw x31, CSR_SCRATCH, x31
+
+- la ra, restore_caller_reg
+- tail get_overflow_stack
+-
+-restore_caller_reg:
+- //save per-cpu overflow stack
+- REG_S a0, -8(sp)
+- //restore caller register from shadow_stack
+- REG_L x1, PT_RA(sp)
+- REG_L x5, PT_T0(sp)
+- REG_L x6, PT_T1(sp)
+- REG_L x7, PT_T2(sp)
+- REG_L x10, PT_A0(sp)
+- REG_L x11, PT_A1(sp)
+- REG_L x12, PT_A2(sp)
+- REG_L x13, PT_A3(sp)
+- REG_L x14, PT_A4(sp)
+- REG_L x15, PT_A5(sp)
+- REG_L x16, PT_A6(sp)
+- REG_L x17, PT_A7(sp)
+- REG_L x28, PT_T3(sp)
+- REG_L x29, PT_T4(sp)
+- REG_L x30, PT_T5(sp)
+- REG_L x31, PT_T6(sp)
+-
+- //load per-cpu overflow stack
+- REG_L sp, -8(sp)
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+
+ //save context to overflow stack
+@@ -254,6 +208,7 @@ restore_caller_reg:
+ move a0, sp
+ tail handle_bad_stack
+ SYM_CODE_END(handle_kernel_stack_overflow)
++ASM_NOKPROBE(handle_kernel_stack_overflow)
+ #endif
+
+ SYM_CODE_START(ret_from_fork)
+@@ -264,8 +219,8 @@ SYM_CODE_START(ret_from_fork)
+ jalr s0
+ 1:
+ move a0, sp /* pt_regs */
+- la ra, ret_from_exception
+- tail syscall_exit_to_user_mode
++ call syscall_exit_to_user_mode
++ j ret_from_exception
+ SYM_CODE_END(ret_from_fork)
+
+ /*
+diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
+index 3710ea5d160f30..9691fa8f2faa7b 100644
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -88,6 +88,7 @@ relocate_enable_mmu:
+ /* Compute satp for kernel page tables, but don't load it yet */
+ srl a2, a0, PAGE_SHIFT
+ la a1, satp_mode
++ XIP_FIXUP_OFFSET a1
+ REG_L a1, 0(a1)
+ or a2, a2, a1
+
+@@ -304,6 +305,9 @@ clear_bss_done:
+ #else
+ mv a0, a1
+ #endif /* CONFIG_BUILTIN_DTB */
++ /* Set trap vector to spin forever to help debug */
++ la a3, .Lsecondary_park
++ csrw CSR_TVEC, a3
+ call setup_vm
+ #ifdef CONFIG_MMU
+ la a0, early_pg_dir
+diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
+index 2d139b724bc842..ccb0c5d5c63c42 100644
+--- a/arch/riscv/kernel/machine_kexec.c
++++ b/arch/riscv/kernel/machine_kexec.c
+@@ -147,20 +147,12 @@ static void machine_kexec_mask_interrupts(void)
+
+ for_each_irq_desc(i, desc) {
+ struct irq_chip *chip;
+- int ret;
+
+ chip = irq_desc_get_chip(desc);
+ if (!chip)
+ continue;
+
+- /*
+- * First try to remove the active state. If this
+- * fails, try to EOI the interrupt.
+- */
+- ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+-
+- if (ret && irqd_irq_inprogress(&desc->irq_data) &&
+- chip->irq_eoi)
++ if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+ chip->irq_eoi(&desc->irq_data);
+
+ if (chip->irq_mask)
+diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
+index 7c651d55fcbd2f..df4f6fec5d1740 100644
+--- a/arch/riscv/kernel/module.c
++++ b/arch/riscv/kernel/module.c
+@@ -440,7 +440,8 @@ void *module_alloc(unsigned long size)
+ {
+ return __vmalloc_node_range(size, 1, MODULES_VADDR,
+ MODULES_END, GFP_KERNEL,
+- PAGE_KERNEL, 0, NUMA_NO_NODE,
++ PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
++ NUMA_NO_NODE,
+ __builtin_return_address(0));
+ }
+ #endif
+diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
+index 13ee7bf589a15e..30e12b310cab73 100644
+--- a/arch/riscv/kernel/patch.c
++++ b/arch/riscv/kernel/patch.c
+@@ -14,6 +14,7 @@
+ #include <asm/fixmap.h>
+ #include <asm/ftrace.h>
+ #include <asm/patch.h>
++#include <asm/sections.h>
+
+ struct patch_insn {
+ void *addr;
+@@ -25,6 +26,14 @@ struct patch_insn {
+ int riscv_patch_in_stop_machine = false;
+
+ #ifdef CONFIG_MMU
++
++static inline bool is_kernel_exittext(uintptr_t addr)
++{
++ return system_state < SYSTEM_RUNNING &&
++ addr >= (uintptr_t)__exittext_begin &&
++ addr < (uintptr_t)__exittext_end;
++}
++
+ /*
+ * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
+ * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
+@@ -35,7 +44,7 @@ static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
+ uintptr_t uintaddr = (uintptr_t) addr;
+ struct page *page;
+
+- if (core_kernel_text(uintaddr))
++ if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
+ page = phys_to_page(__pa_symbol(addr));
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ page = vmalloc_to_page(addr);
+@@ -71,6 +80,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
+ */
+ lockdep_assert_held(&text_mutex);
+
++ preempt_disable();
++
+ if (across_pages)
+ patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
+
+@@ -83,6 +94,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
+ if (across_pages)
+ patch_unmap(FIX_TEXT_POKE1);
+
++ preempt_enable();
++
+ return 0;
+ }
+ NOKPROBE_SYMBOL(__patch_insn_set);
+@@ -113,6 +126,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
+ if (!riscv_patch_in_stop_machine)
+ lockdep_assert_held(&text_mutex);
+
++ preempt_disable();
++
+ if (across_pages)
+ patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
+
+@@ -125,6 +140,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
+ if (across_pages)
+ patch_unmap(FIX_TEXT_POKE1);
+
++ preempt_enable();
++
+ return ret;
+ }
+ NOKPROBE_SYMBOL(__patch_insn_write);
+diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
+index 3348a61de7d998..2932791e938821 100644
+--- a/arch/riscv/kernel/perf_callchain.c
++++ b/arch/riscv/kernel/perf_callchain.c
+@@ -62,7 +62,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ perf_callchain_store(entry, regs->epc);
+
+ fp = user_backtrace(entry, fp, regs->ra);
+- while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
++ while (fp && !(fp & 0x7) && entry->nr < entry->max_stack)
+ fp = user_backtrace(entry, fp, 0);
+ }
+
+diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
+index 68e786c84c949b..f6d4dedffb8422 100644
+--- a/arch/riscv/kernel/pi/cmdline_early.c
++++ b/arch/riscv/kernel/pi/cmdline_early.c
+@@ -38,8 +38,7 @@ static char *get_early_cmdline(uintptr_t dtb_pa)
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ fdt_cmdline_size == 0 /* CONFIG_CMDLINE_FALLBACK */) {
+- strncat(early_cmdline, CONFIG_CMDLINE,
+- COMMAND_LINE_SIZE - fdt_cmdline_size);
++ strlcat(early_cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ }
+
+ return early_cmdline;
+diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c
+index 7142ec42e889f9..a69dfa610aa857 100644
+--- a/arch/riscv/kernel/probes/ftrace.c
++++ b/arch/riscv/kernel/probes/ftrace.c
+@@ -11,6 +11,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct kprobe_ctlblk *kcb;
+ int bit;
+
++ if (unlikely(kprobe_ftrace_disabled))
++ return;
++
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
+index 2f08c14a933d05..fecbbcf40ac3fe 100644
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -28,9 +28,8 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+
+ p->ainsn.api.restore = (unsigned long)p->addr + offset;
+
+- patch_text(p->ainsn.api.insn, &p->opcode, 1);
+- patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
+- &insn, 1);
++ patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1);
++ patch_text_nosync(p->ainsn.api.insn + offset, &insn, 1);
+ }
+
+ static void __kprobes arch_prepare_simulate(struct kprobe *p)
+diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
+index d3099d67816d05..6c166029079c42 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.c
++++ b/arch/riscv/kernel/probes/simulate-insn.c
+@@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
+ unsigned long val)
+ {
+ if (index == 0)
+- return false;
++ return true;
+ else if (index <= 31)
+ *((unsigned long *)regs + index) = val;
+ else
+diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
+index 194f166b2cc40e..4b3dc8beaf77d3 100644
+--- a/arch/riscv/kernel/probes/uprobes.c
++++ b/arch/riscv/kernel/probes/uprobes.c
+@@ -3,6 +3,7 @@
+ #include <linux/highmem.h>
+ #include <linux/ptrace.h>
+ #include <linux/uprobes.h>
++#include <asm/insn.h>
+
+ #include "decode-insn.h"
+
+@@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
+ #endif
+ }
+
++bool is_trap_insn(uprobe_opcode_t *insn)
++{
++ return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
++}
++
+ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+ {
+ return instruction_pointer(regs);
+diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
+index e32d737e039fd4..83e223318822ae 100644
+--- a/arch/riscv/kernel/process.c
++++ b/arch/riscv/kernel/process.c
+@@ -26,8 +26,6 @@
+ #include <asm/cpuidle.h>
+ #include <asm/vector.h>
+
+-register unsigned long gp_in_global __asm__("gp");
+-
+ #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
+ #include <linux/stackprotector.h>
+ unsigned long __stack_chk_guard __read_mostly;
+@@ -186,7 +184,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ if (unlikely(args->fn)) {
+ /* Kernel thread */
+ memset(childregs, 0, sizeof(struct pt_regs));
+- childregs->gp = gp_in_global;
+ /* Supervisor/Machine, irqs on: */
+ childregs->status = SR_PP | SR_PIE;
+
+diff --git a/arch/riscv/kernel/return_address.c b/arch/riscv/kernel/return_address.c
+new file mode 100644
+index 00000000000000..c8115ec8fb304b
+--- /dev/null
++++ b/arch/riscv/kernel/return_address.c
+@@ -0,0 +1,48 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * This code come from arch/arm64/kernel/return_address.c
++ *
++ * Copyright (C) 2023 SiFive.
++ */
++
++#include <linux/export.h>
++#include <linux/kprobes.h>
++#include <linux/stacktrace.h>
++
++struct return_address_data {
++ unsigned int level;
++ void *addr;
++};
++
++static bool save_return_addr(void *d, unsigned long pc)
++{
++ struct return_address_data *data = d;
++
++ if (!data->level) {
++ data->addr = (void *)pc;
++ return false;
++ }
++
++ --data->level;
++
++ return true;
++}
++NOKPROBE_SYMBOL(save_return_addr);
++
++noinline void *return_address(unsigned int level)
++{
++ struct return_address_data data;
++
++ data.level = level + 3;
++ data.addr = NULL;
++
++ arch_stack_walk(save_return_addr, &data, current, NULL);
++
++ if (!data.level)
++ return data.addr;
++ else
++ return NULL;
++
++}
++EXPORT_SYMBOL_GPL(return_address);
++NOKPROBE_SYMBOL(return_address);
+diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
+index c672c8ba9a2a6b..5a62ed1da45332 100644
+--- a/arch/riscv/kernel/sbi.c
++++ b/arch/riscv/kernel/sbi.c
+@@ -11,6 +11,7 @@
+ #include <linux/reboot.h>
+ #include <asm/sbi.h>
+ #include <asm/smp.h>
++#include <asm/tlbflush.h>
+
+ /* default SBI version is 0.1 */
+ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
+@@ -376,32 +377,15 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask)
+ }
+ EXPORT_SYMBOL(sbi_remote_fence_i);
+
+-/**
+- * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
+- * harts for the specified virtual address range.
+- * @cpu_mask: A cpu mask containing all the target harts.
+- * @start: Start of the virtual address
+- * @size: Total size of the virtual address range.
+- *
+- * Return: 0 on success, appropriate linux error code otherwise.
+- */
+-int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
+- unsigned long start,
+- unsigned long size)
+-{
+- return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+- cpu_mask, start, size, 0, 0);
+-}
+-EXPORT_SYMBOL(sbi_remote_sfence_vma);
+-
+ /**
+ * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
+- * remote harts for a virtual address range belonging to a specific ASID.
++ * remote harts for a virtual address range belonging to a specific ASID or not.
+ *
+ * @cpu_mask: A cpu mask containing all the target harts.
+ * @start: Start of the virtual address
+ * @size: Total size of the virtual address range.
+- * @asid: The value of address space identifier (ASID).
++ * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
++ * for flushing all address spaces.
+ *
+ * Return: 0 on success, appropriate linux error code otherwise.
+ */
+@@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
+ unsigned long size,
+ unsigned long asid)
+ {
+- return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+- cpu_mask, start, size, asid, 0);
++ if (asid == FLUSH_TLB_NO_ASID)
++ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
++ cpu_mask, start, size, 0, 0);
++ else
++ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
++ cpu_mask, start, size, asid, 0);
+ }
+ EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
+
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index aac853ae4eb74e..ddadee6621f0da 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -40,15 +40,8 @@
+
+ #include "head.h"
+
+-#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI)
+-struct screen_info screen_info __section(".data") = {
+- .orig_video_lines = 30,
+- .orig_video_cols = 80,
+- .orig_video_mode = 0,
+- .orig_video_ega_bx = 0,
+- .orig_video_isVGA = 1,
+- .orig_video_points = 8
+-};
++#if defined(CONFIG_EFI)
++struct screen_info screen_info __section(".data");
+ #endif
+
+ /*
+@@ -173,6 +166,19 @@ static void __init init_resources(void)
+ if (ret < 0)
+ goto error;
+
++#ifdef CONFIG_KEXEC_CORE
++ if (crashk_res.start != crashk_res.end) {
++ ret = add_resource(&iomem_resource, &crashk_res);
++ if (ret < 0)
++ goto error;
++ }
++ if (crashk_low_res.start != crashk_low_res.end) {
++ ret = add_resource(&iomem_resource, &crashk_low_res);
++ if (ret < 0)
++ goto error;
++ }
++#endif
++
+ #ifdef CONFIG_CRASH_DUMP
+ if (elfcorehdr_size > 0) {
+ elfcorehdr_res.start = elfcorehdr_addr;
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index 21a4d0e111bc5f..88b6220b260879 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -384,30 +384,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+- /* Are we from a system call? */
+- if (regs->cause == EXC_SYSCALL) {
+- /* Avoid additional syscall restarting via ret_from_exception */
+- regs->cause = -1UL;
+- /* If so, check system call restarting.. */
+- switch (regs->a0) {
+- case -ERESTART_RESTARTBLOCK:
+- case -ERESTARTNOHAND:
+- regs->a0 = -EINTR;
+- break;
+-
+- case -ERESTARTSYS:
+- if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+- regs->a0 = -EINTR;
+- break;
+- }
+- fallthrough;
+- case -ERESTARTNOINTR:
+- regs->a0 = regs->orig_a0;
+- regs->epc -= 0x4;
+- break;
+- }
+- }
+-
+ rseq_signal_deliver(ksig, regs);
+
+ /* Set up the stack frame */
+@@ -421,35 +397,66 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+
+ void arch_do_signal_or_restart(struct pt_regs *regs)
+ {
++ unsigned long continue_addr = 0, restart_addr = 0;
++ int retval = 0;
+ struct ksignal ksig;
++ bool syscall = (regs->cause == EXC_SYSCALL);
+
+- if (get_signal(&ksig)) {
+- /* Actually deliver the signal */
+- handle_signal(&ksig, regs);
+- return;
+- }
++ /* If we were from a system call, check for system call restarting */
++ if (syscall) {
++ continue_addr = regs->epc;
++ restart_addr = continue_addr - 4;
++ retval = regs->a0;
+
+- /* Did we come from a system call? */
+- if (regs->cause == EXC_SYSCALL) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ regs->cause = -1UL;
+
+- /* Restart the system call - no handlers present */
+- switch (regs->a0) {
++ /*
++ * Prepare for system call restart. We do this here so that a
++ * debugger will see the already changed PC.
++ */
++ switch (retval) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+- regs->a0 = regs->orig_a0;
+- regs->epc -= 0x4;
+- break;
+ case -ERESTART_RESTARTBLOCK:
+- regs->a0 = regs->orig_a0;
+- regs->a7 = __NR_restart_syscall;
+- regs->epc -= 0x4;
++ regs->a0 = regs->orig_a0;
++ regs->epc = restart_addr;
+ break;
+ }
+ }
+
++ /*
++ * Get the signal to deliver. When running under ptrace, at this point
++ * the debugger may change all of our registers.
++ */
++ if (get_signal(&ksig)) {
++ /*
++ * Depending on the signal settings, we may need to revert the
++ * decision to restart the system call, but skip this if a
++ * debugger has chosen to restart at a different PC.
++ */
++ if (regs->epc == restart_addr &&
++ (retval == -ERESTARTNOHAND ||
++ retval == -ERESTART_RESTARTBLOCK ||
++ (retval == -ERESTARTSYS &&
++ !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
++ regs->a0 = -EINTR;
++ regs->epc = continue_addr;
++ }
++
++ /* Actually deliver the signal */
++ handle_signal(&ksig, regs);
++ return;
++ }
++
++ /*
++ * Handle restarting a different system call. As above, if a debugger
++ * has chosen to restart at a different PC, ignore the restart.
++ */
++ if (syscall && regs->epc == restart_addr && retval == -ERESTART_RESTARTBLOCK)
++ regs->a7 = __NR_restart_syscall;
++
+ /*
+ * If there is no signal to deliver, we just put the saved
+ * sigmask back.
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 64a9c093aef93a..10e311b2759d39 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -18,10 +18,21 @@
+
+ extern asmlinkage void ret_from_exception(void);
+
++static inline int fp_is_valid(unsigned long fp, unsigned long sp)
++{
++ unsigned long low, high;
++
++ low = sp + sizeof(struct stackframe);
++ high = ALIGN(sp, THREAD_SIZE);
++
++ return !(fp < low || fp > high || fp & 0x07);
++}
++
+ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ bool (*fn)(void *, unsigned long), void *arg)
+ {
+ unsigned long fp, sp, pc;
++ int graph_idx = 0;
+ int level = 0;
+
+ if (regs) {
+@@ -41,26 +52,24 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ }
+
+ for (;;) {
+- unsigned long low, high;
+ struct stackframe *frame;
+
+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
+ break;
+
+- /* Validate frame pointer */
+- low = sp + sizeof(struct stackframe);
+- high = ALIGN(sp, THREAD_SIZE);
+- if (unlikely(fp < low || fp > high || fp & 0x7))
++ if (unlikely(!fp_is_valid(fp, sp)))
+ break;
++
+ /* Unwind stack frame */
+ frame = (struct stackframe *)fp - 1;
+ sp = fp;
+- if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
++ if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
++ /* We hit function where ra is not saved on the stack */
+ fp = frame->ra;
+ pc = regs->ra;
+ } else {
+ fp = frame->fp;
+- pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
++ pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
+ &frame->ra);
+ if (pc == (unsigned long)ret_from_exception) {
+ if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+@@ -148,7 +157,7 @@ unsigned long __get_wchan(struct task_struct *task)
+ return pc;
+ }
+
+-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
++noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
+ {
+ walk_stackframe(task, regs, consume_entry, cookie);
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index fae8f610d867fd..2158b7a65d74f7 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -311,6 +311,7 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
+
+ regs->epc += 4;
+ regs->orig_a0 = regs->a0;
++ regs->a0 = -ENOSYS;
+
+ riscv_v_vstate_discard(regs);
+
+@@ -318,8 +319,6 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
+
+ if (syscall >= 0 && syscall < NR_syscalls)
+ syscall_handler(regs, syscall);
+- else if (syscall != -1)
+- regs->a0 = -ENOSYS;
+
+ syscall_exit_to_user_mode(regs);
+ } else {
+@@ -410,48 +409,14 @@ int is_valid_bugaddr(unsigned long pc)
+ #endif /* CONFIG_GENERIC_BUG */
+
+ #ifdef CONFIG_VMAP_STACK
+-/*
+- * Extra stack space that allows us to provide panic messages when the kernel
+- * has overflowed its stack.
+- */
+-static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
++DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+ overflow_stack)__aligned(16);
+-/*
+- * A temporary stack for use by handle_kernel_stack_overflow. This is used so
+- * we can call into C code to get the per-hart overflow stack. Usage of this
+- * stack must be protected by spin_shadow_stack.
+- */
+-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
+-
+-/*
+- * A pseudo spinlock to protect the shadow stack from being used by multiple
+- * harts concurrently. This isn't a real spinlock because the lock side must
+- * be taken without a valid stack and only a single register, it's only taken
+- * while in the process of panicing anyway so the performance and error
+- * checking a proper spinlock gives us doesn't matter.
+- */
+-unsigned long spin_shadow_stack;
+-
+-asmlinkage unsigned long get_overflow_stack(void)
+-{
+- return (unsigned long)this_cpu_ptr(overflow_stack) +
+- OVERFLOW_STACK_SIZE;
+-}
+
+ asmlinkage void handle_bad_stack(struct pt_regs *regs)
+ {
+ unsigned long tsk_stk = (unsigned long)current->stack;
+ unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+
+- /*
+- * We're done with the shadow stack by this point, as we're on the
+- * overflow stack. Tell any other concurrent overflowing harts that
+- * they can proceed with panicing by releasing the pseudo-spinlock.
+- *
+- * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
+- */
+- smp_store_release(&spin_shadow_stack, 0);
+-
+ console_verbose();
+
+ pr_emerg("Insufficient stack space to handle exception!\n");
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index 378f5b15144356..e867fe465164e2 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -151,51 +151,19 @@
+ #define PRECISION_S 0
+ #define PRECISION_D 1
+
+-#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
+-static inline type load_##type(const type *addr) \
+-{ \
+- type val; \
+- asm (#insn " %0, %1" \
+- : "=&r" (val) : "m" (*addr)); \
+- return val; \
+-}
+-
+-#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
+-static inline void store_##type(type *addr, type val) \
+-{ \
+- asm volatile (#insn " %0, %1\n" \
+- : : "r" (val), "m" (*addr)); \
+-}
++static inline u8 load_u8(const u8 *addr)
++{
++ u8 val;
+
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
+-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
+-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
+-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
+-#if defined(CONFIG_64BIT)
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
+-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
+-#else
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
+-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
++ asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
+
+-static inline u64 load_u64(const u64 *addr)
+-{
+- return load_u32((u32 *)addr)
+- + ((u64)load_u32((u32 *)addr + 1) << 32);
++ return val;
+ }
+
+-static inline void store_u64(u64 *addr, u64 val)
++static inline void store_u8(u8 *addr, u8 val)
+ {
+- store_u32((u32 *)addr, val);
+- store_u32((u32 *)addr + 1, val >> 32);
++ asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
+ }
+-#endif
+
+ static inline ulong get_insn(ulong mepc)
+ {
+@@ -342,16 +310,14 @@ int handle_misaligned_store(struct pt_regs *regs)
+ } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
+ len = 8;
+ val.data_ulong = GET_RS2S(insn, regs);
+- } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
+- ((insn >> SH_RD) & 0x1f)) {
++ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
+ len = 8;
+ val.data_ulong = GET_RS2C(insn, regs);
+ #endif
+ } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
+ len = 4;
+ val.data_ulong = GET_RS2S(insn, regs);
+- } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
+- ((insn >> SH_RD) & 0x1f)) {
++ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
+ len = 4;
+ val.data_ulong = GET_RS2C(insn, regs);
+ } else {
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index 6b1dba11bf6dcd..e8aa7c38000755 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -73,13 +73,3 @@ quiet_cmd_vdsold = VDSOLD $@
+ cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
+ $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+ rm $@.tmp
+-
+-# install commands for the unstripped file
+-quiet_cmd_vdso_install = INSTALL $@
+- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+-
+-vdso.so: $(obj)/vdso.so.dbg
+- @mkdir -p $(MODLIB)/vdso
+- $(call cmd,vdso_install)
+-
+-vdso_install: vdso.so
+diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
+index d40bec6ac07866..cadf725ef79837 100644
+--- a/arch/riscv/kernel/vdso/hwprobe.c
++++ b/arch/riscv/kernel/vdso/hwprobe.c
+@@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+
+ /* This is something we can handle, fill out the pairs. */
+ while (p < end) {
+- if (p->key <= RISCV_HWPROBE_MAX_KEY) {
++ if (riscv_hwprobe_key_is_valid(p->key)) {
+ p->value = avd->all_cpu_hwprobe_values[p->key];
+
+ } else {
+diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
+index 50767647fbc649..8c3daa1b05313a 100644
+--- a/arch/riscv/kernel/vmlinux-xip.lds.S
++++ b/arch/riscv/kernel/vmlinux-xip.lds.S
+@@ -29,10 +29,12 @@ SECTIONS
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ /* we have to discard exit text and such at runtime, not link time */
++ __exittext_begin = .;
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
++ __exittext_end = .;
+
+ .text : {
+ _text = .;
+diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
+index 492dd4b8f3d69a..002ca58dd998cb 100644
+--- a/arch/riscv/kernel/vmlinux.lds.S
++++ b/arch/riscv/kernel/vmlinux.lds.S
+@@ -69,10 +69,12 @@ SECTIONS
+ __soc_builtin_dtb_table_end = .;
+ }
+ /* we have to discard exit text and such at runtime, not link time */
++ __exittext_begin = .;
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
++ __exittext_end = .;
+
+ __init_text_end = .;
+ . = ALIGN(SECTION_ALIGN);
+diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c
+index 39e72aa016a4cc..b467ba5ed91000 100644
+--- a/arch/riscv/kvm/aia_aplic.c
++++ b/arch/riscv/kvm/aia_aplic.c
+@@ -137,11 +137,21 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+
+ sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
+- if (!pending &&
+- ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
+- (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
++ if (sm == APLIC_SOURCECFG_SM_INACTIVE)
+ goto skip_write_pending;
+
++ if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
++ sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
++ if (!pending)
++ goto skip_write_pending;
++ if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
++ sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
++ goto skip_write_pending;
++ if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
++ sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
++ goto skip_write_pending;
++ }
++
+ if (pending)
+ irqd->state |= APLIC_IRQ_STATE_PENDING;
+ else
+@@ -187,16 +197,31 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
+
+ static bool aplic_read_input(struct aplic *aplic, u32 irq)
+ {
+- bool ret;
+- unsigned long flags;
++ u32 sourcecfg, sm, raw_input, irq_inverted;
+ struct aplic_irq *irqd;
++ unsigned long flags;
++ bool ret = false;
+
+ if (!irq || aplic->nr_irqs <= irq)
+ return false;
+ irqd = &aplic->irqs[irq];
+
+ raw_spin_lock_irqsave(&irqd->lock, flags);
+- ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
++
++ sourcecfg = irqd->sourcecfg;
++ if (sourcecfg & APLIC_SOURCECFG_D)
++ goto skip;
++
++ sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
++ if (sm == APLIC_SOURCECFG_SM_INACTIVE)
++ goto skip;
++
++ raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
++ irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
++ sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
++ ret = !!(raw_input ^ irq_inverted);
++
++skip:
+ raw_spin_unlock_irqrestore(&irqd->lock, flags);
+
+ return ret;
+diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
+index 0eb689351b7d04..5cd407c6a8e4f8 100644
+--- a/arch/riscv/kvm/aia_device.c
++++ b/arch/riscv/kvm/aia_device.c
+@@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
+
+ static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
+ {
+- u32 hart, group = 0;
++ u32 hart = 0, group = 0;
+
+- hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
+- GENMASK_ULL(aia->nr_hart_bits - 1, 0);
++ if (aia->nr_hart_bits)
++ hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
++ GENMASK_ULL(aia->nr_hart_bits - 1, 0);
+ if (aia->nr_group_bits)
+ group = (addr >> aia->nr_group_shift) &
+ GENMASK_ULL(aia->nr_group_bits - 1, 0);
+diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
+index 6cf23b8adb7129..e808723a85f1b1 100644
+--- a/arch/riscv/kvm/aia_imsic.c
++++ b/arch/riscv/kvm/aia_imsic.c
+@@ -55,6 +55,7 @@ struct imsic {
+ /* IMSIC SW-file */
+ struct imsic_mrif *swfile;
+ phys_addr_t swfile_pa;
++ spinlock_t swfile_extirq_lock;
+ };
+
+ #define imsic_vs_csr_read(__c) \
+@@ -613,12 +614,23 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
+ {
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+ struct imsic_mrif *mrif = imsic->swfile;
++ unsigned long flags;
++
++ /*
++ * The critical section is necessary during external interrupt
++ * updates to avoid the risk of losing interrupts due to potential
++ * interruptions between reading topei and updating pending status.
++ */
++
++ spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
+
+ if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
+ imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
+ kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
+ else
+ kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
++
++ spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
+ }
+
+ static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
+@@ -1039,6 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
+ }
+ imsic->swfile = page_to_virt(swfile_page);
+ imsic->swfile_pa = page_to_phys(swfile_page);
++ spin_lock_init(&imsic->swfile_extirq_lock);
+
+ /* Setup IO device */
+ kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
+diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
+index 068c7459387102..a9e2fd7245e1e9 100644
+--- a/arch/riscv/kvm/mmu.c
++++ b/arch/riscv/kvm/mmu.c
+@@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
+ *ptep_level = current_level;
+ ptep = (pte_t *)kvm->arch.pgd;
+ ptep = &ptep[gstage_pte_index(addr, current_level)];
+- while (ptep && pte_val(*ptep)) {
++ while (ptep && pte_val(ptep_get(ptep))) {
+ if (gstage_pte_leaf(ptep)) {
+ *ptep_level = current_level;
+ *ptepp = ptep;
+@@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
+ if (current_level) {
+ current_level--;
+ *ptep_level = current_level;
+- ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
++ ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ ptep = &ptep[gstage_pte_index(addr, current_level)];
+ } else {
+ ptep = NULL;
+@@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
+ if (gstage_pte_leaf(ptep))
+ return -EEXIST;
+
+- if (!pte_val(*ptep)) {
++ if (!pte_val(ptep_get(ptep))) {
+ if (!pcache)
+ return -ENOMEM;
+ next_ptep = kvm_mmu_memory_cache_alloc(pcache);
+ if (!next_ptep)
+ return -ENOMEM;
+- *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
+- __pgprot(_PAGE_TABLE));
++ set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
++ __pgprot(_PAGE_TABLE)));
+ } else {
+ if (gstage_pte_leaf(ptep))
+ return -EEXIST;
+- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
++ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ }
+
+ current_level--;
+ ptep = &next_ptep[gstage_pte_index(addr, current_level)];
+ }
+
+- *ptep = *new_pte;
++ set_pte(ptep, *new_pte);
+ if (gstage_pte_leaf(ptep))
+ gstage_remote_tlb_flush(kvm, current_level, addr);
+
+@@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
+
+ BUG_ON(addr & (page_size - 1));
+
+- if (!pte_val(*ptep))
++ if (!pte_val(ptep_get(ptep)))
+ return;
+
+ if (ptep_level && !gstage_pte_leaf(ptep)) {
+- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
++ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ next_ptep_level = ptep_level - 1;
+ ret = gstage_level_to_page_size(next_ptep_level,
+ &next_page_size);
+@@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
+ if (op == GSTAGE_OP_CLEAR)
+ set_pte(ptep, __pte(0));
+ else if (op == GSTAGE_OP_WP)
+- set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
++ set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
+ gstage_remote_tlb_flush(kvm, ptep_level, addr);
+ }
+ }
+@@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+ &ptep, &ptep_level))
+ return false;
+
+- return pte_young(*ptep);
++ return pte_young(ptep_get(ptep));
+ }
+
+ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
+index b7e0e03c69b1e5..d520b25d856167 100644
+--- a/arch/riscv/kvm/vcpu_onereg.c
++++ b/arch/riscv/kvm/vcpu_onereg.c
+@@ -614,9 +614,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_ISA_SINGLE:
+ return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
+- case KVM_REG_RISCV_SBI_MULTI_EN:
++ case KVM_REG_RISCV_ISA_MULTI_EN:
+ return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
+- case KVM_REG_RISCV_SBI_MULTI_DIS:
++ case KVM_REG_RISCV_ISA_MULTI_DIS:
+ return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
+ default:
+ return -ENOENT;
+diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
+index 86391a5061dda9..cee1b9ca4ec481 100644
+--- a/arch/riscv/kvm/vcpu_pmu.c
++++ b/arch/riscv/kvm/vcpu_pmu.c
+@@ -39,7 +39,7 @@ static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc)
+ u64 sample_period;
+
+ if (!pmc->counter_val)
+- sample_period = counter_val_mask + 1;
++ sample_period = counter_val_mask;
+ else
+ sample_period = (-pmc->counter_val) & counter_val_mask;
+
+diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
+index 9cd97091c72330..7a7fe40d0930be 100644
+--- a/arch/riscv/kvm/vcpu_sbi.c
++++ b/arch/riscv/kvm/vcpu_sbi.c
+@@ -91,8 +91,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ run->riscv_sbi.args[3] = cp->a3;
+ run->riscv_sbi.args[4] = cp->a4;
+ run->riscv_sbi.args[5] = cp->a5;
+- run->riscv_sbi.ret[0] = cp->a0;
+- run->riscv_sbi.ret[1] = cp->a1;
++ run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
++ run->riscv_sbi.ret[1] = 0;
+ }
+
+ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
+diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
+index 9c454f90fd3da2..2c869f8026a889 100644
+--- a/arch/riscv/mm/Makefile
++++ b/arch/riscv/mm/Makefile
+@@ -13,10 +13,9 @@ endif
+ KCOV_INSTRUMENT_init.o := n
+
+ obj-y += init.o
+-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
++obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
+ obj-y += cacheflush.o
+ obj-y += context.o
+-obj-y += pgtable.o
+ obj-y += pmem.o
+
+ ifeq ($(CONFIG_MMU),y)
+@@ -36,3 +35,4 @@ endif
+
+ obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+ obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
++obj-$(CONFIG_RISCV_NONSTANDARD_CACHE_OPS) += cache-ops.o
+diff --git a/arch/riscv/mm/cache-ops.c b/arch/riscv/mm/cache-ops.c
+new file mode 100644
+index 00000000000000..a993ad11d0eca9
+--- /dev/null
++++ b/arch/riscv/mm/cache-ops.c
+@@ -0,0 +1,17 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
++ */
++
++#include <asm/dma-noncoherent.h>
++
++struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init;
++
++void
++riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
++{
++ if (!ops)
++ return;
++ noncoherent_cache_ops = *ops;
++}
++EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
+diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
+index 217fd4de613422..ba8eb3944687cf 100644
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -323,6 +323,8 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ if (unlikely(prev == next))
+ return;
+
++ membarrier_arch_switch_mm(prev, next, task);
++
+ /*
+ * Mark the current MM context as inactive, and the next as
+ * active. This is at least used by the icache flushing
+diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
+index b76e7e192eb183..341bd6706b4c56 100644
+--- a/arch/riscv/mm/dma-noncoherent.c
++++ b/arch/riscv/mm/dma-noncoherent.c
+@@ -15,12 +15,6 @@ static bool noncoherent_supported __ro_after_init;
+ int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
+ EXPORT_SYMBOL_GPL(dma_cache_alignment);
+
+-struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
+- .wback = NULL,
+- .inv = NULL,
+- .wback_inv = NULL,
+-};
+-
+ static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
+ {
+ void *vaddr = phys_to_virt(paddr);
+@@ -162,12 +156,3 @@ void __init riscv_set_dma_cache_alignment(void)
+ if (!noncoherent_supported)
+ dma_cache_alignment = 1;
+ }
+-
+-void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
+-{
+- if (!ops)
+- return;
+-
+- noncoherent_cache_ops = *ops;
+-}
+-EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index 90d4ba36d1d062..8960f4c844976f 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
+
+ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
+ {
++ if (!user_mode(regs)) {
++ no_context(regs, addr);
++ return;
++ }
++
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+- if (!user_mode(regs)) {
+- no_context(regs, addr);
+- return;
+- }
+ pagefault_out_of_memory();
+ return;
+ } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
+ /* Kernel mode? Handle exceptions or die */
+- if (!user_mode(regs)) {
+- no_context(regs, addr);
+- return;
+- }
+ do_trap(regs, SIGBUS, BUS_ADRERR, addr);
+ return;
++ } else if (fault & VM_FAULT_SIGSEGV) {
++ do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
++ return;
+ }
++
+ BUG();
+ }
+
+@@ -136,24 +137,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
+ pgd = (pgd_t *)pfn_to_virt(pfn) + index;
+ pgd_k = init_mm.pgd + index;
+
+- if (!pgd_present(*pgd_k)) {
++ if (!pgd_present(pgdp_get(pgd_k))) {
+ no_context(regs, addr);
+ return;
+ }
+- set_pgd(pgd, *pgd_k);
++ set_pgd(pgd, pgdp_get(pgd_k));
+
+ p4d_k = p4d_offset(pgd_k, addr);
+- if (!p4d_present(*p4d_k)) {
++ if (!p4d_present(p4dp_get(p4d_k))) {
+ no_context(regs, addr);
+ return;
+ }
+
+ pud_k = pud_offset(p4d_k, addr);
+- if (!pud_present(*pud_k)) {
++ if (!pud_present(pudp_get(pud_k))) {
+ no_context(regs, addr);
+ return;
+ }
+- if (pud_leaf(*pud_k))
++ if (pud_leaf(pudp_get(pud_k)))
+ goto flush_tlb;
+
+ /*
+@@ -161,11 +162,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
+ * to copy individual PTEs
+ */
+ pmd_k = pmd_offset(pud_k, addr);
+- if (!pmd_present(*pmd_k)) {
++ if (!pmd_present(pmdp_get(pmd_k))) {
+ no_context(regs, addr);
+ return;
+ }
+- if (pmd_leaf(*pmd_k))
++ if (pmd_leaf(pmdp_get(pmd_k)))
+ goto flush_tlb;
+
+ /*
+@@ -175,7 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
+ * silently loop forever.
+ */
+ pte_k = pte_offset_kernel(pmd_k, addr);
+- if (!pte_present(*pte_k)) {
++ if (!pte_present(ptep_get(pte_k))) {
+ no_context(regs, addr);
+ return;
+ }
+diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
+index b52f0210481fac..5ef2a6891158a6 100644
+--- a/arch/riscv/mm/hugetlbpage.c
++++ b/arch/riscv/mm/hugetlbpage.c
+@@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
+ }
+
+ if (sz == PMD_SIZE) {
+- if (want_pmd_share(vma, addr) && pud_none(*pud))
++ if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud)))
+ pte = huge_pmd_share(mm, vma, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+@@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
+ pmd_t *pmd;
+
+ pgd = pgd_offset(mm, addr);
+- if (!pgd_present(*pgd))
++ if (!pgd_present(pgdp_get(pgd)))
+ return NULL;
+
+ p4d = p4d_offset(pgd, addr);
+- if (!p4d_present(*p4d))
++ if (!p4d_present(p4dp_get(p4d)))
+ return NULL;
+
+ pud = pud_offset(p4d, addr);
+@@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
+ /* must be pud huge, non-present or none */
+ return (pte_t *)pud;
+
+- if (!pud_present(*pud))
++ if (!pud_present(pudp_get(pud)))
+ return NULL;
+
+ pmd = pmd_offset(pud, addr);
+@@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
+ /* must be pmd huge, non-present or none */
+ return (pte_t *)pmd;
+
+- if (!pmd_present(*pmd))
++ if (!pmd_present(pmdp_get(pmd)))
+ return NULL;
+
+ for_each_napot_order(order) {
+@@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
+ return pte;
+ }
+
++unsigned long hugetlb_mask_last_page(struct hstate *h)
++{
++ unsigned long hp_size = huge_page_size(h);
++
++ switch (hp_size) {
++#ifndef __PAGETABLE_PMD_FOLDED
++ case PUD_SIZE:
++ return P4D_SIZE - PUD_SIZE;
++#endif
++ case PMD_SIZE:
++ return PUD_SIZE - PMD_SIZE;
++ case napot_cont_size(NAPOT_CONT64KB_ORDER):
++ return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
++ default:
++ break;
++ }
++
++ return 0UL;
++}
++
+ static pte_t get_clear_contig(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep,
+@@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
+ return entry;
+ }
+
++static void clear_flush(struct mm_struct *mm,
++ unsigned long addr,
++ pte_t *ptep,
++ unsigned long pgsize,
++ unsigned long ncontig)
++{
++ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
++ unsigned long i, saddr = addr;
++
++ for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
++ ptep_get_and_clear(mm, addr, ptep);
++
++ flush_tlb_range(&vma, saddr, addr);
++}
++
++/*
++ * When dealing with NAPOT mappings, the privileged specification indicates that
++ * "if an update needs to be made, the OS generally should first mark all of the
++ * PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
++ * within the range, [...] then update the PTE(s), as described in Section
++ * 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
++ * arm64.
++ */
+ void set_huge_pte_at(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep,
+ pte_t pte,
+ unsigned long sz)
+ {
+- unsigned long hugepage_shift;
++ unsigned long hugepage_shift, pgsize;
+ int i, pte_num;
+
+ if (sz >= PGDIR_SIZE)
+@@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
+ hugepage_shift = PAGE_SHIFT;
+
+ pte_num = sz >> hugepage_shift;
+- for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
++ pgsize = 1 << hugepage_shift;
++
++ if (!pte_present(pte)) {
++ for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
++ set_ptes(mm, addr, ptep, pte, 1);
++ return;
++ }
++
++ if (!pte_napot(pte)) {
++ set_ptes(mm, addr, ptep, pte, 1);
++ return;
++ }
++
++ clear_flush(mm, addr, ptep, pgsize, pte_num);
++
++ for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
+ set_pte_at(mm, addr, ptep, pte);
+ }
+
+@@ -293,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm,
+ pte_t *ptep,
+ unsigned long sz)
+ {
+- pte_t pte = READ_ONCE(*ptep);
++ pte_t pte = ptep_get(ptep);
+ int i, pte_num;
+
+ if (!pte_napot(pte)) {
+@@ -306,7 +364,7 @@ void huge_pte_clear(struct mm_struct *mm,
+ pte_clear(mm, addr, ptep);
+ }
+
+-static __init bool is_napot_size(unsigned long size)
++static bool is_napot_size(unsigned long size)
+ {
+ unsigned long order;
+
+@@ -334,7 +392,7 @@ arch_initcall(napot_hugetlbpages_init);
+
+ #else
+
+-static __init bool is_napot_size(unsigned long size)
++static bool is_napot_size(unsigned long size)
+ {
+ return false;
+ }
+@@ -351,7 +409,7 @@ int pmd_huge(pmd_t pmd)
+ return pmd_leaf(pmd);
+ }
+
+-bool __init arch_hugetlb_valid_size(unsigned long size)
++static bool __hugetlb_valid_size(unsigned long size)
+ {
+ if (size == HPAGE_SIZE)
+ return true;
+@@ -363,6 +421,18 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
+ return false;
+ }
+
++bool __init arch_hugetlb_valid_size(unsigned long size)
++{
++ return __hugetlb_valid_size(size);
++}
++
++#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
++bool arch_hugetlb_migration_supported(struct hstate *h)
++{
++ return __hugetlb_valid_size(huge_page_size(h));
++}
++#endif
++
+ #ifdef CONFIG_CONTIG_ALLOC
+ static __init int gigantic_pages_init(void)
+ {
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 0798bd861dcb9a..3245bb525212e3 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -172,6 +172,9 @@ void __init mem_init(void)
+
+ /* Limit the memory size via mem. */
+ static phys_addr_t memory_limit;
++#ifdef CONFIG_XIP_KERNEL
++#define memory_limit (*(phys_addr_t *)XIP_FIXUP(&memory_limit))
++#endif /* CONFIG_XIP_KERNEL */
+
+ static int __init early_mem(char *p)
+ {
+@@ -214,8 +217,6 @@ static void __init setup_bootmem(void)
+ */
+ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+
+- phys_ram_end = memblock_end_of_DRAM();
+-
+ /*
+ * Make sure we align the start of the memory on a PMD boundary so that
+ * at worst, we map the linear mapping with PMD mappings.
+@@ -227,24 +228,36 @@ static void __init setup_bootmem(void)
+ * In 64-bit, any use of __va/__pa before this point is wrong as we
+ * did not know the start of DRAM before.
+ */
+- if (IS_ENABLED(CONFIG_64BIT))
++ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
+ kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
+
+ /*
+- * memblock allocator is not aware of the fact that last 4K bytes of
+- * the addressable memory can not be mapped because of IS_ERR_VALUE
+- * macro. Make sure that last 4k bytes are not usable by memblock
+- * if end of dram is equal to maximum addressable memory. For 64-bit
+- * kernel, this problem can't happen here as the end of the virtual
+- * address space is occupied by the kernel mapping then this check must
+- * be done as soon as the kernel mapping base address is determined.
++ * The size of the linear page mapping may restrict the amount of
++ * usable RAM.
++ */
++ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
++ max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
++ memblock_cap_memory_range(phys_ram_base,
++ max_mapped_addr - phys_ram_base);
++ }
++
++ /*
++ * Reserve physical address space that would be mapped to virtual
++ * addresses greater than (void *)(-PAGE_SIZE) because:
++ * - This memory would overlap with ERR_PTR
++ * - This memory belongs to high memory, which is not supported
++ *
++ * This is not applicable to 64-bit kernel, because virtual addresses
++ * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
++ * occupied by kernel mapping. Also it is unrealistic for high memory
++ * to exist on 64-bit platforms.
+ */
+ if (!IS_ENABLED(CONFIG_64BIT)) {
+- max_mapped_addr = __pa(~(ulong)0);
+- if (max_mapped_addr == (phys_ram_end - 1))
+- memblock_set_current_limit(max_mapped_addr - 4096);
++ max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
++ memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
+ }
+
++ phys_ram_end = memblock_end_of_DRAM();
+ min_low_pfn = PFN_UP(phys_ram_base);
+ max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
+ high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
+@@ -664,16 +677,19 @@ void __init create_pgd_mapping(pgd_t *pgdp,
+ static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
+ phys_addr_t size)
+ {
+- if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
+- return PGDIR_SIZE;
++ if (debug_pagealloc_enabled())
++ return PAGE_SIZE;
+
+- if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
++ if (pgtable_l5_enabled &&
++ !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
+ return P4D_SIZE;
+
+- if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
++ if (pgtable_l4_enabled &&
++ !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
+ return PUD_SIZE;
+
+- if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
++ if (IS_ENABLED(CONFIG_64BIT) &&
++ !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
+ return PMD_SIZE;
+
+ return PAGE_SIZE;
+@@ -896,7 +912,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
+ PMD_SIZE, PAGE_KERNEL_EXEC);
+
+ /* Map the data in RAM */
+- end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
++ end_va = kernel_map.virt_addr + kernel_map.size;
+ for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
+ create_pgd_mapping(pgdir, va,
+ kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
+@@ -950,7 +966,7 @@ static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
+ * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
+ * kernel is mapped in the linear mapping, that makes no difference.
+ */
+- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
++ dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
+ #endif
+
+ dtb_early_pa = dtb_pa;
+@@ -1053,18 +1069,23 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ #endif
+
+ kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
+- kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
+
+ #ifdef CONFIG_XIP_KERNEL
++#ifdef CONFIG_64BIT
++ kernel_map.page_offset = PAGE_OFFSET_L3;
++#else
++ kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
++#endif
+ kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
+ kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
+
+ phys_ram_base = CONFIG_PHYS_RAM_BASE;
+ kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
+- kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
++ kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
+
+ kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
+ #else
++ kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
+ kernel_map.phys_addr = (uintptr_t)(&_start);
+ kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
+ #endif
+@@ -1257,8 +1278,6 @@ static void __init create_linear_mapping_page_table(void)
+ if (start <= __pa(PAGE_OFFSET) &&
+ __pa(PAGE_OFFSET) < end)
+ start = __pa(PAGE_OFFSET);
+- if (end >= __pa(PAGE_OFFSET) + memory_limit)
+- end = __pa(PAGE_OFFSET) + memory_limit;
+
+ create_linear_mapping_range(start, end, 0);
+ }
+@@ -1494,6 +1513,10 @@ void __init misc_mem_init(void)
+ early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
+ arch_numa_init();
+ sparse_init();
++#ifdef CONFIG_SPARSEMEM_VMEMMAP
++ /* The entire VMEMMAP region has been populated. Flush TLB for this region */
++ local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
++#endif
+ zone_sizes_init();
+ reserve_crashkernel();
+ memblock_dump_all();
+diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
+index 5e39dcf23fdbc1..e962518530373d 100644
+--- a/arch/riscv/mm/kasan_init.c
++++ b/arch/riscv/mm/kasan_init.c
+@@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
+ phys_addr_t phys_addr;
+ pte_t *ptep, *p;
+
+- if (pmd_none(*pmd)) {
++ if (pmd_none(pmdp_get(pmd))) {
+ p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+ set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
+ }
+@@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
+ ptep = pte_offset_kernel(pmd, vaddr);
+
+ do {
+- if (pte_none(*ptep)) {
++ if (pte_none(ptep_get(ptep))) {
+ phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
+@@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
+ pmd_t *pmdp, *p;
+ unsigned long next;
+
+- if (pud_none(*pud)) {
++ if (pud_none(pudp_get(pud))) {
+ p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+ set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
+ }
+@@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
+ do {
+ next = pmd_addr_end(vaddr, end);
+
+- if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
++ if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
++ (next - vaddr) >= PMD_SIZE) {
+ phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
+ if (phys_addr) {
+ set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+@@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
+ pud_t *pudp, *p;
+ unsigned long next;
+
+- if (p4d_none(*p4d)) {
++ if (p4d_none(p4dp_get(p4d))) {
+ p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+ set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
+ }
+@@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d,
+ do {
+ next = pud_addr_end(vaddr, end);
+
+- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
++ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
++ (next - vaddr) >= PUD_SIZE) {
+ phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
+ if (phys_addr) {
+ set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
+@@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
+ p4d_t *p4dp, *p;
+ unsigned long next;
+
+- if (pgd_none(*pgd)) {
++ if (pgd_none(pgdp_get(pgd))) {
+ p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
+ set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
+ }
+@@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
+ do {
+ next = p4d_addr_end(vaddr, end);
+
+- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
++ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
++ (next - vaddr) >= P4D_SIZE) {
+ phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
+ if (phys_addr) {
+ set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
+@@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
+ do {
+ next = pgd_addr_end(vaddr, end);
+
+- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
++ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+ (next - vaddr) >= PGDIR_SIZE) {
+ phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
+ if (phys_addr) {
+@@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp,
+ if (!pgtable_l4_enabled) {
+ pudp = (pud_t *)p4dp;
+ } else {
+- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
++ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
+ pudp = base_pud + pud_index(vaddr);
+ }
+
+@@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp,
+ if (!pgtable_l5_enabled) {
+ p4dp = (p4d_t *)pgdp;
+ } else {
+- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
++ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
+ p4dp = base_p4d + p4d_index(vaddr);
+ }
+
+@@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp,
+ if (!pgtable_l4_enabled) {
+ pudp = (pud_t *)p4dp;
+ } else {
+- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
++ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
+ pudp = base_pud + pud_index(vaddr);
+ }
+
+ do {
+ next = pud_addr_end(vaddr, end);
+
+- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
++ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+ (next - vaddr) >= PUD_SIZE) {
+ phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
+ set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
+@@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp,
+ if (!pgtable_l5_enabled) {
+ p4dp = (p4d_t *)pgdp;
+ } else {
+- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
++ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
+ p4dp = base_p4d + p4d_index(vaddr);
+ }
+
+ do {
+ next = p4d_addr_end(vaddr, end);
+
+- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
++ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+ (next - vaddr) >= P4D_SIZE) {
+ phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
+ set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
+@@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp,
+ do {
+ next = pgd_addr_end(vaddr, end);
+
+- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
++ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+ (next - vaddr) >= PGDIR_SIZE) {
+ phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
+ set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
+@@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
+ do {
+ next = pud_addr_end(vaddr, end);
+
+- if (pud_none(*pud_k)) {
++ if (pud_none(pudp_get(pud_k))) {
+ p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
+ continue;
+@@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
+ do {
+ next = p4d_addr_end(vaddr, end);
+
+- if (p4d_none(*p4d_k)) {
++ if (p4d_none(p4dp_get(p4d_k))) {
+ p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
+ continue;
+@@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
+ do {
+ next = pgd_addr_end(vaddr, end);
+
+- if (pgd_none(*pgd_k)) {
++ if (pgd_none(pgdp_get(pgd_k))) {
+ p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
+ continue;
+@@ -451,7 +454,7 @@ static void __init create_tmp_mapping(void)
+
+ /* Copy the last p4d since it is shared with the kernel mapping. */
+ if (pgtable_l5_enabled) {
+- ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
++ ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
+ memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
+ set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
+ pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
+@@ -462,7 +465,7 @@ static void __init create_tmp_mapping(void)
+
+ /* Copy the last pud since it is shared with the kernel mapping. */
+ if (pgtable_l4_enabled) {
+- ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
++ ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
+ memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
+ set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
+ pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
+diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
+index 161d0b34c2cb28..271d01a5ba4da1 100644
+--- a/arch/riscv/mm/pageattr.c
++++ b/arch/riscv/mm/pageattr.c
+@@ -5,6 +5,7 @@
+
+ #include <linux/pagewalk.h>
+ #include <linux/pgtable.h>
++#include <linux/vmalloc.h>
+ #include <asm/tlbflush.h>
+ #include <asm/bitops.h>
+ #include <asm/set_memory.h>
+@@ -25,23 +26,10 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
+ return new_val;
+ }
+
+-static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
+- unsigned long next, struct mm_walk *walk)
+-{
+- pgd_t val = READ_ONCE(*pgd);
+-
+- if (pgd_leaf(val)) {
+- val = __pgd(set_pageattr_masks(pgd_val(val), walk));
+- set_pgd(pgd, val);
+- }
+-
+- return 0;
+-}
+-
+ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+ {
+- p4d_t val = READ_ONCE(*p4d);
++ p4d_t val = p4dp_get(p4d);
+
+ if (p4d_leaf(val)) {
+ val = __p4d(set_pageattr_masks(p4d_val(val), walk));
+@@ -54,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
+ static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+ {
+- pud_t val = READ_ONCE(*pud);
++ pud_t val = pudp_get(pud);
+
+ if (pud_leaf(val)) {
+ val = __pud(set_pageattr_masks(pud_val(val), walk));
+@@ -67,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
+ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+ {
+- pmd_t val = READ_ONCE(*pmd);
++ pmd_t val = pmdp_get(pmd);
+
+ if (pmd_leaf(val)) {
+ val = __pmd(set_pageattr_masks(pmd_val(val), walk));
+@@ -80,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
+ static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+ {
+- pte_t val = READ_ONCE(*pte);
++ pte_t val = ptep_get(pte);
+
+ val = __pte(set_pageattr_masks(pte_val(val), walk));
+ set_pte(pte, val);
+@@ -96,7 +84,6 @@ static int pageattr_pte_hole(unsigned long addr, unsigned long next,
+ }
+
+ static const struct mm_walk_ops pageattr_ops = {
+- .pgd_entry = pageattr_pgd_entry,
+ .p4d_entry = pageattr_p4d_entry,
+ .pud_entry = pageattr_pud_entry,
+ .pmd_entry = pageattr_pmd_entry,
+@@ -105,12 +92,181 @@ static const struct mm_walk_ops pageattr_ops = {
+ .walk_lock = PGWALK_RDLOCK,
+ };
+
++#ifdef CONFIG_64BIT
++static int __split_linear_mapping_pmd(pud_t *pudp,
++ unsigned long vaddr, unsigned long end)
++{
++ pmd_t *pmdp;
++ unsigned long next;
++
++ pmdp = pmd_offset(pudp, vaddr);
++
++ do {
++ next = pmd_addr_end(vaddr, end);
++
++ if (next - vaddr >= PMD_SIZE &&
++ vaddr <= (vaddr & PMD_MASK) && end >= next)
++ continue;
++
++ if (pmd_leaf(pmdp_get(pmdp))) {
++ struct page *pte_page;
++ unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
++ pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
++ pte_t *ptep_new;
++ int i;
++
++ pte_page = alloc_page(GFP_KERNEL);
++ if (!pte_page)
++ return -ENOMEM;
++
++ ptep_new = (pte_t *)page_address(pte_page);
++ for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
++ set_pte(ptep_new, pfn_pte(pfn + i, prot));
++
++ smp_wmb();
++
++ set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
++ }
++ } while (pmdp++, vaddr = next, vaddr != end);
++
++ return 0;
++}
++
++static int __split_linear_mapping_pud(p4d_t *p4dp,
++ unsigned long vaddr, unsigned long end)
++{
++ pud_t *pudp;
++ unsigned long next;
++ int ret;
++
++ pudp = pud_offset(p4dp, vaddr);
++
++ do {
++ next = pud_addr_end(vaddr, end);
++
++ if (next - vaddr >= PUD_SIZE &&
++ vaddr <= (vaddr & PUD_MASK) && end >= next)
++ continue;
++
++ if (pud_leaf(pudp_get(pudp))) {
++ struct page *pmd_page;
++ unsigned long pfn = _pud_pfn(pudp_get(pudp));
++ pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
++ pmd_t *pmdp_new;
++ int i;
++
++ pmd_page = alloc_page(GFP_KERNEL);
++ if (!pmd_page)
++ return -ENOMEM;
++
++ pmdp_new = (pmd_t *)page_address(pmd_page);
++ for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
++ set_pmd(pmdp_new,
++ pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
++
++ smp_wmb();
++
++ set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
++ }
++
++ ret = __split_linear_mapping_pmd(pudp, vaddr, next);
++ if (ret)
++ return ret;
++ } while (pudp++, vaddr = next, vaddr != end);
++
++ return 0;
++}
++
++static int __split_linear_mapping_p4d(pgd_t *pgdp,
++ unsigned long vaddr, unsigned long end)
++{
++ p4d_t *p4dp;
++ unsigned long next;
++ int ret;
++
++ p4dp = p4d_offset(pgdp, vaddr);
++
++ do {
++ next = p4d_addr_end(vaddr, end);
++
++ /*
++ * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
++ * need to split, we'll change the protections on the whole P4D.
++ */
++ if (next - vaddr >= P4D_SIZE &&
++ vaddr <= (vaddr & P4D_MASK) && end >= next)
++ continue;
++
++ if (p4d_leaf(p4dp_get(p4dp))) {
++ struct page *pud_page;
++ unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
++ pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
++ pud_t *pudp_new;
++ int i;
++
++ pud_page = alloc_page(GFP_KERNEL);
++ if (!pud_page)
++ return -ENOMEM;
++
++ /*
++ * Fill the pud level with leaf puds that have the same
++ * protections as the leaf p4d.
++ */
++ pudp_new = (pud_t *)page_address(pud_page);
++ for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
++ set_pud(pudp_new,
++ pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
++
++ /*
++ * Make sure the pud filling is not reordered with the
++ * p4d store which could result in seeing a partially
++ * filled pud level.
++ */
++ smp_wmb();
++
++ set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
++ }
++
++ ret = __split_linear_mapping_pud(p4dp, vaddr, next);
++ if (ret)
++ return ret;
++ } while (p4dp++, vaddr = next, vaddr != end);
++
++ return 0;
++}
++
++static int __split_linear_mapping_pgd(pgd_t *pgdp,
++ unsigned long vaddr,
++ unsigned long end)
++{
++ unsigned long next;
++ int ret;
++
++ do {
++ next = pgd_addr_end(vaddr, end);
++ /* We never use PGD mappings for the linear mapping */
++ ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
++ if (ret)
++ return ret;
++ } while (pgdp++, vaddr = next, vaddr != end);
++
++ return 0;
++}
++
++static int split_linear_mapping(unsigned long start, unsigned long end)
++{
++ return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
++}
++#endif /* CONFIG_64BIT */
++
+ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
+ pgprot_t clear_mask)
+ {
+ int ret;
+ unsigned long start = addr;
+ unsigned long end = start + PAGE_SIZE * numpages;
++ unsigned long __maybe_unused lm_start;
++ unsigned long __maybe_unused lm_end;
+ struct pageattr_masks masks = {
+ .set_mask = set_mask,
+ .clear_mask = clear_mask
+@@ -120,11 +276,72 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
+ return 0;
+
+ mmap_write_lock(&init_mm);
++
++#ifdef CONFIG_64BIT
++ /*
++ * We are about to change the permissions of a kernel mapping, we must
++ * apply the same changes to its linear mapping alias, which may imply
++ * splitting a huge mapping.
++ */
++
++ if (is_vmalloc_or_module_addr((void *)start)) {
++ struct vm_struct *area = NULL;
++ int i, page_start;
++
++ area = find_vm_area((void *)start);
++ page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
++
++ for (i = page_start; i < page_start + numpages; ++i) {
++ lm_start = (unsigned long)page_address(area->pages[i]);
++ lm_end = lm_start + PAGE_SIZE;
++
++ ret = split_linear_mapping(lm_start, lm_end);
++ if (ret)
++ goto unlock;
++
++ ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
++ &pageattr_ops, NULL, &masks);
++ if (ret)
++ goto unlock;
++ }
++ } else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
++ if (is_kernel_mapping(start)) {
++ lm_start = (unsigned long)lm_alias(start);
++ lm_end = (unsigned long)lm_alias(end);
++ } else {
++ lm_start = start;
++ lm_end = end;
++ }
++
++ ret = split_linear_mapping(lm_start, lm_end);
++ if (ret)
++ goto unlock;
++
++ ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
++ &pageattr_ops, NULL, &masks);
++ if (ret)
++ goto unlock;
++ }
++
+ ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
+ &masks);
++
++unlock:
++ mmap_write_unlock(&init_mm);
++
++ /*
++ * We can't use flush_tlb_kernel_range() here as we may have split a
++ * hugepage that is larger than that, so let's flush everything.
++ */
++ flush_tlb_all();
++#else
++ ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
++ &masks);
++
+ mmap_write_unlock(&init_mm);
+
+ flush_tlb_kernel_range(start, end);
++#endif
+
+ return ret;
+ }
+@@ -159,50 +376,44 @@ int set_memory_nx(unsigned long addr, int numpages)
+
+ int set_direct_map_invalid_noflush(struct page *page)
+ {
+- int ret;
+- unsigned long start = (unsigned long)page_address(page);
+- unsigned long end = start + PAGE_SIZE;
+- struct pageattr_masks masks = {
+- .set_mask = __pgprot(0),
+- .clear_mask = __pgprot(_PAGE_PRESENT)
+- };
+-
+- mmap_read_lock(&init_mm);
+- ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+- mmap_read_unlock(&init_mm);
+-
+- return ret;
++ return __set_memory((unsigned long)page_address(page), 1,
++ __pgprot(0), __pgprot(_PAGE_PRESENT));
+ }
+
+ int set_direct_map_default_noflush(struct page *page)
+ {
+- int ret;
+- unsigned long start = (unsigned long)page_address(page);
+- unsigned long end = start + PAGE_SIZE;
+- struct pageattr_masks masks = {
+- .set_mask = PAGE_KERNEL,
+- .clear_mask = __pgprot(0)
+- };
++ return __set_memory((unsigned long)page_address(page), 1,
++ PAGE_KERNEL, __pgprot(_PAGE_EXEC));
++}
+
+- mmap_read_lock(&init_mm);
+- ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+- mmap_read_unlock(&init_mm);
++#ifdef CONFIG_DEBUG_PAGEALLOC
++static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
++{
++ int enable = *(int *)data;
+
+- return ret;
++ unsigned long val = pte_val(ptep_get(pte));
++
++ if (enable)
++ val |= _PAGE_PRESENT;
++ else
++ val &= ~_PAGE_PRESENT;
++
++ set_pte(pte, __pte(val));
++
++ return 0;
+ }
+
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+ void __kernel_map_pages(struct page *page, int numpages, int enable)
+ {
+ if (!debug_pagealloc_enabled())
+ return;
+
+- if (enable)
+- __set_memory((unsigned long)page_address(page), numpages,
+- __pgprot(_PAGE_PRESENT), __pgprot(0));
+- else
+- __set_memory((unsigned long)page_address(page), numpages,
+- __pgprot(0), __pgprot(_PAGE_PRESENT));
++ unsigned long start = (unsigned long)page_address(page);
++ unsigned long size = PAGE_SIZE * numpages;
++
++ apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
++
++ flush_tlb_kernel_range(start, start + size);
+ }
+ #endif
+
+@@ -216,29 +427,29 @@ bool kernel_page_present(struct page *page)
+ pte_t *pte;
+
+ pgd = pgd_offset_k(addr);
+- if (!pgd_present(*pgd))
++ if (!pgd_present(pgdp_get(pgd)))
+ return false;
+- if (pgd_leaf(*pgd))
++ if (pgd_leaf(pgdp_get(pgd)))
+ return true;
+
+ p4d = p4d_offset(pgd, addr);
+- if (!p4d_present(*p4d))
++ if (!p4d_present(p4dp_get(p4d)))
+ return false;
+- if (p4d_leaf(*p4d))
++ if (p4d_leaf(p4dp_get(p4d)))
+ return true;
+
+ pud = pud_offset(p4d, addr);
+- if (!pud_present(*pud))
++ if (!pud_present(pudp_get(pud)))
+ return false;
+- if (pud_leaf(*pud))
++ if (pud_leaf(pudp_get(pud)))
+ return true;
+
+ pmd = pmd_offset(pud, addr);
+- if (!pmd_present(*pmd))
++ if (!pmd_present(pmdp_get(pmd)))
+ return false;
+- if (pmd_leaf(*pmd))
++ if (pmd_leaf(pmdp_get(pmd)))
+ return true;
+
+ pte = pte_offset_kernel(pmd, addr);
+- return pte_present(*pte);
++ return pte_present(ptep_get(pte));
+ }
+diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
+index fef4e7328e4905..ef887efcb67900 100644
+--- a/arch/riscv/mm/pgtable.c
++++ b/arch/riscv/mm/pgtable.c
+@@ -5,6 +5,47 @@
+ #include <linux/kernel.h>
+ #include <linux/pgtable.h>
+
++int ptep_set_access_flags(struct vm_area_struct *vma,
++ unsigned long address, pte_t *ptep,
++ pte_t entry, int dirty)
++{
++ if (!pte_same(ptep_get(ptep), entry))
++ __set_pte_at(ptep, entry);
++ /*
++ * update_mmu_cache will unconditionally execute, handling both
++ * the case that the PTE changed and the spurious fault case.
++ */
++ return true;
++}
++
++int ptep_test_and_clear_young(struct vm_area_struct *vma,
++ unsigned long address,
++ pte_t *ptep)
++{
++ if (!pte_young(ptep_get(ptep)))
++ return 0;
++ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
++}
++EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
++
++#ifdef CONFIG_64BIT
++pud_t *pud_offset(p4d_t *p4d, unsigned long address)
++{
++ if (pgtable_l4_enabled)
++ return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
++
++ return (pud_t *)p4d;
++}
++
++p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
++{
++ if (pgtable_l5_enabled)
++ return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
++
++ return (p4d_t *)pgd;
++}
++#endif
++
+ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+ int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+ {
+@@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+
+ int pud_clear_huge(pud_t *pud)
+ {
+- if (!pud_leaf(READ_ONCE(*pud)))
++ if (!pud_leaf(pudp_get(pud)))
+ return 0;
+ pud_clear(pud);
+ return 1;
+@@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud)
+
+ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+- pmd_t *pmd = pud_pgtable(*pud);
++ pmd_t *pmd = pud_pgtable(pudp_get(pud));
+ int i;
+
+ pud_clear(pud);
+@@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+
+ int pmd_clear_huge(pmd_t *pmd)
+ {
+- if (!pmd_leaf(READ_ONCE(*pmd)))
++ if (!pmd_leaf(pmdp_get(pmd)))
+ return 0;
+ pmd_clear(pmd);
+ return 1;
+@@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd)
+
+ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+- pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
++ pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
+
+ pmd_clear(pmd);
+
+@@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+- VM_BUG_ON(pmd_trans_huge(*pmdp));
++ VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
+ /*
+ * When leaf PTE entries (regular pages) are collapsed into a leaf
+ * PMD entry (huge page), a valid non-leaf PTE is converted into a
+diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
+index 20a9f991a6d746..e9090b38f8117c 100644
+--- a/arch/riscv/mm/ptdump.c
++++ b/arch/riscv/mm/ptdump.c
+@@ -384,6 +384,9 @@ static int __init ptdump_init(void)
+
+ kernel_ptd_info.base_addr = KERN_VIRT_START;
+
++ pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
++ pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
++
+ for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
+ pg_level[i].mask |= pte_bits[j].mask;
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index 77be59aadc735e..324e8cd9b50228 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -6,30 +6,29 @@
+ #include <asm/sbi.h>
+ #include <asm/mmu_context.h>
+
+-static inline void local_flush_tlb_all_asid(unsigned long asid)
++/*
++ * Flush entire TLB if number of entries to be flushed is greater
++ * than the threshold below.
++ */
++static unsigned long tlb_flush_all_threshold __read_mostly = 64;
++
++static void local_flush_tlb_range_threshold_asid(unsigned long start,
++ unsigned long size,
++ unsigned long stride,
++ unsigned long asid)
+ {
+- __asm__ __volatile__ ("sfence.vma x0, %0"
+- :
+- : "r" (asid)
+- : "memory");
+-}
++ unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
++ int i;
+
+-static inline void local_flush_tlb_page_asid(unsigned long addr,
+- unsigned long asid)
+-{
+- __asm__ __volatile__ ("sfence.vma %0, %1"
+- :
+- : "r" (addr), "r" (asid)
+- : "memory");
+-}
++ if (nr_ptes_in_range > tlb_flush_all_threshold) {
++ local_flush_tlb_all_asid(asid);
++ return;
++ }
+
+-static inline void local_flush_tlb_range(unsigned long start,
+- unsigned long size, unsigned long stride)
+-{
+- if (size <= stride)
+- local_flush_tlb_page(start);
+- else
+- local_flush_tlb_all();
++ for (i = 0; i < nr_ptes_in_range; ++i) {
++ local_flush_tlb_page_asid(start, asid);
++ start += stride;
++ }
+ }
+
+ static inline void local_flush_tlb_range_asid(unsigned long start,
+@@ -37,8 +36,16 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
+ {
+ if (size <= stride)
+ local_flush_tlb_page_asid(start, asid);
+- else
++ else if (size == FLUSH_TLB_MAX_SIZE)
+ local_flush_tlb_all_asid(asid);
++ else
++ local_flush_tlb_range_threshold_asid(start, size, stride, asid);
++}
++
++/* Flush a range of kernel pages without broadcasting */
++void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
++{
++ local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
+ }
+
+ static void __ipi_flush_tlb_all(void *info)
+@@ -51,7 +58,7 @@ void flush_tlb_all(void)
+ if (riscv_use_ipi_for_rfence())
+ on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
+ else
+- sbi_remote_sfence_vma(NULL, 0, -1);
++ sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
+ }
+
+ struct flush_tlb_range_data {
+@@ -68,68 +75,62 @@ static void __ipi_flush_tlb_range_asid(void *info)
+ local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
+ }
+
+-static void __ipi_flush_tlb_range(void *info)
+-{
+- struct flush_tlb_range_data *d = info;
+-
+- local_flush_tlb_range(d->start, d->size, d->stride);
+-}
+-
+ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long size, unsigned long stride)
+ {
+ struct flush_tlb_range_data ftd;
+- struct cpumask *cmask = mm_cpumask(mm);
+- unsigned int cpuid;
++ const struct cpumask *cmask;
++ unsigned long asid = FLUSH_TLB_NO_ASID;
+ bool broadcast;
+
+- if (cpumask_empty(cmask))
+- return;
++ if (mm) {
++ unsigned int cpuid;
+
+- cpuid = get_cpu();
+- /* check if the tlbflush needs to be sent to other CPUs */
+- broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+- if (static_branch_unlikely(&use_asid_allocator)) {
+- unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
+-
+- if (broadcast) {
+- if (riscv_use_ipi_for_rfence()) {
+- ftd.asid = asid;
+- ftd.start = start;
+- ftd.size = size;
+- ftd.stride = stride;
+- on_each_cpu_mask(cmask,
+- __ipi_flush_tlb_range_asid,
+- &ftd, 1);
+- } else
+- sbi_remote_sfence_vma_asid(cmask,
+- start, size, asid);
+- } else {
+- local_flush_tlb_range_asid(start, size, stride, asid);
+- }
++ cmask = mm_cpumask(mm);
++ if (cpumask_empty(cmask))
++ return;
++
++ cpuid = get_cpu();
++ /* check if the tlbflush needs to be sent to other CPUs */
++ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
++
++ if (static_branch_unlikely(&use_asid_allocator))
++ asid = atomic_long_read(&mm->context.id) & asid_mask;
+ } else {
+- if (broadcast) {
+- if (riscv_use_ipi_for_rfence()) {
+- ftd.asid = 0;
+- ftd.start = start;
+- ftd.size = size;
+- ftd.stride = stride;
+- on_each_cpu_mask(cmask,
+- __ipi_flush_tlb_range,
+- &ftd, 1);
+- } else
+- sbi_remote_sfence_vma(cmask, start, size);
+- } else {
+- local_flush_tlb_range(start, size, stride);
+- }
++ cmask = cpu_online_mask;
++ broadcast = true;
+ }
+
+- put_cpu();
++ if (broadcast) {
++ if (riscv_use_ipi_for_rfence()) {
++ ftd.asid = asid;
++ ftd.start = start;
++ ftd.size = size;
++ ftd.stride = stride;
++ on_each_cpu_mask(cmask,
++ __ipi_flush_tlb_range_asid,
++ &ftd, 1);
++ } else
++ sbi_remote_sfence_vma_asid(cmask,
++ start, size, asid);
++ } else {
++ local_flush_tlb_range_asid(start, size, stride, asid);
++ }
++
++ if (mm)
++ put_cpu();
+ }
+
+ void flush_tlb_mm(struct mm_struct *mm)
+ {
+- __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
++ __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
++}
++
++void flush_tlb_mm_range(struct mm_struct *mm,
++ unsigned long start, unsigned long end,
++ unsigned int page_size)
++{
++ __flush_tlb_range(mm, start, end - start, page_size);
+ }
+
+ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+@@ -142,6 +143,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ {
+ __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
+ }
++
++void flush_tlb_kernel_range(unsigned long start, unsigned long end)
++{
++ __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
++}
++
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 8581693e62d396..2f041b5cea970e 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -516,33 +516,33 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
+ break;
+ /* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
+ case BPF_ADD | BPF_FETCH:
+- emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
+- rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
++ emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) :
++ rv_amoadd_w(rs, rs, rd, 1, 1), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ case BPF_AND | BPF_FETCH:
+- emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
+- rv_amoand_w(rs, rs, rd, 0, 0), ctx);
++ emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) :
++ rv_amoand_w(rs, rs, rd, 1, 1), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ case BPF_OR | BPF_FETCH:
+- emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
+- rv_amoor_w(rs, rs, rd, 0, 0), ctx);
++ emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) :
++ rv_amoor_w(rs, rs, rd, 1, 1), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ case BPF_XOR | BPF_FETCH:
+- emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
+- rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
++ emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) :
++ rv_amoxor_w(rs, rs, rd, 1, 1), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+ /* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
+ case BPF_XCHG:
+- emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
+- rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
++ emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) :
++ rv_amoswap_w(rs, rs, rd, 1, 1), ctx);
+ if (!is64)
+ emit_zext_32(rs, ctx);
+ break;
+@@ -740,6 +740,9 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
+ if (ret)
+ return ret;
+
++ /* store prog start time */
++ emit_mv(RV_REG_S1, RV_REG_A0, ctx);
++
+ /* if (__bpf_prog_enter(prog) == 0)
+ * goto skip_exec_of_prog;
+ */
+@@ -747,9 +750,6 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
+ /* nop reserved for conditional jump */
+ emit(rv_nop(), ctx);
+
+- /* store prog start time */
+- emit_mv(RV_REG_S1, RV_REG_A0, ctx);
+-
+ /* arg1: &args_off */
+ emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
+ if (!p->jited)
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index ae29e4392664ad..bd4782f23f66df 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -252,13 +252,13 @@ config ARCH_SUPPORTS_KEXEC
+ def_bool y
+
+ config ARCH_SUPPORTS_KEXEC_FILE
+- def_bool CRYPTO && CRYPTO_SHA256 && CRYPTO_SHA256_S390
++ def_bool y
+
+ config ARCH_SUPPORTS_KEXEC_SIG
+ def_bool MODULE_SIG_FORMAT
+
+ config ARCH_SUPPORTS_KEXEC_PURGATORY
+- def_bool KEXEC_FILE
++ def_bool y
+
+ config ARCH_SUPPORTS_CRASH_DUMP
+ def_bool y
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index a53a36ee0731bb..73873e4516866a 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -138,9 +138,6 @@ bzImage: vmlinux
+ zfcpdump:
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+-vdso_install:
+- $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
+-
+ archheaders:
+ $(Q)$(MAKE) $(build)=$(syscalls) uapi
+
+@@ -160,6 +157,9 @@ vdso_prepare: prepare0
+ $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
+ $(build)=arch/s390/kernel/vdso32 include/generated/vdso32-offsets.h)
+
++vdso-install-y += arch/s390/kernel/vdso64/vdso64.so.dbg
++vdso-install-$(CONFIG_COMPAT) += arch/s390/kernel/vdso32/vdso32.so.dbg
++
+ ifdef CONFIG_EXPOLINE_EXTERN
+ modules_prepare: expoline_prepare
+ expoline_prepare: scripts
+diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
+index 7b7521762633f9..4230144645bc5d 100644
+--- a/arch/s390/boot/ipl_parm.c
++++ b/arch/s390/boot/ipl_parm.c
+@@ -272,7 +272,7 @@ void parse_boot_command_line(void)
+ memory_limit = round_down(memparse(val, NULL), PAGE_SIZE);
+
+ if (!strcmp(param, "vmalloc") && val) {
+- vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE);
++ vmalloc_size = round_up(memparse(val, NULL), _SEGMENT_SIZE);
+ vmalloc_size_set = 1;
+ }
+
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index d3e48bd9c3944f..655bbcff81ffda 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -31,7 +31,6 @@ unsigned long __bootdata_preserved(max_mappable);
+ unsigned long __bootdata(ident_map_size);
+
+ u64 __bootdata_preserved(stfle_fac_list[16]);
+-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+ struct oldmem_data __bootdata_preserved(oldmem_data);
+
+ struct machine_info machine;
+@@ -212,7 +211,8 @@ static unsigned long setup_kernel_memory_layout(void)
+ VMALLOC_END = MODULES_VADDR;
+
+ /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
+- vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
++ vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE);
++ vmalloc_size = min(vmalloc_size, vsize);
+ VMALLOC_START = VMALLOC_END - vmalloc_size;
+
+ /* split remaining virtual space between 1:1 mapping & vmemmap array */
+diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
+index 442a74f113cbfd..14e1a73ffcfe63 100644
+--- a/arch/s390/boot/vmem.c
++++ b/arch/s390/boot/vmem.c
+@@ -360,7 +360,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
+ }
+ pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
+ pud_populate(&init_mm, pud, pmd);
+- } else if (pud_large(*pud)) {
++ } else if (pud_leaf(*pud)) {
+ continue;
+ }
+ pgtable_pmd_populate(pud, addr, next, mode);
+diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
+index 438cd92e60801b..dd06086293106e 100644
+--- a/arch/s390/configs/debug_defconfig
++++ b/arch/s390/configs/debug_defconfig
+@@ -834,7 +834,6 @@ CONFIG_DEBUG_IRQFLAGS=y
+ CONFIG_DEBUG_LIST=y
+ CONFIG_DEBUG_SG=y
+ CONFIG_DEBUG_NOTIFIERS=y
+-CONFIG_DEBUG_CREDENTIALS=y
+ CONFIG_RCU_TORTURE_TEST=m
+ CONFIG_RCU_REF_SCALE_TEST=m
+ CONFIG_RCU_CPU_STALL_TIMEOUT=300
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index c773820e4af90a..c6fe5405de4a4c 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -597,7 +597,9 @@ static int ctr_aes_crypt(struct skcipher_request *req)
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
+- cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
++ memset(buf, 0, AES_BLOCK_SIZE);
++ memcpy(buf, walk.src.virt.addr, nbytes);
++ cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
+ AES_BLOCK_SIZE, walk.iv);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
+index 8b541e44151d4d..55ee5567a5ea92 100644
+--- a/arch/s390/crypto/paes_s390.c
++++ b/arch/s390/crypto/paes_s390.c
+@@ -693,9 +693,11 @@ static int ctr_paes_crypt(struct skcipher_request *req)
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
++ memset(buf, 0, AES_BLOCK_SIZE);
++ memcpy(buf, walk.src.virt.addr, nbytes);
+ while (1) {
+ if (cpacf_kmctr(ctx->fc, &param, buf,
+- walk.src.virt.addr, AES_BLOCK_SIZE,
++ buf, AES_BLOCK_SIZE,
+ walk.iv) == AES_BLOCK_SIZE)
+ break;
+ if (__paes_convert_key(ctx))
+diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
+index b378e2b57ad875..c786538e397c08 100644
+--- a/arch/s390/include/asm/cpacf.h
++++ b/arch/s390/include/asm/cpacf.h
+@@ -166,28 +166,86 @@
+
+ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+
+-/**
+- * cpacf_query() - check if a specific CPACF function is available
+- * @opcode: the opcode of the crypto instruction
+- * @func: the function code to test for
+- *
+- * Executes the query function for the given crypto instruction @opcode
+- * and checks if @func is available
+- *
+- * Returns 1 if @func is available for @opcode, 0 otherwise
++/*
++ * Prototype for a not existing function to produce a link
++ * error if __cpacf_query() or __cpacf_check_opcode() is used
++ * with an invalid compile time const opcode.
+ */
+-static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
++void __cpacf_bad_opcode(void);
++
++static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
++ cpacf_mask_t *mask)
+ {
+ asm volatile(
+- " lghi 0,0\n" /* query function */
+- " lgr 1,%[mask]\n"
+- " spm 0\n" /* pckmo doesn't change the cc */
+- /* Parameter regs are ignored, but must be nonzero and unique */
+- "0: .insn rrf,%[opc] << 16,2,4,6,0\n"
+- " brc 1,0b\n" /* handle partial completion */
+- : "=m" (*mask)
+- : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
+- : "cc", "0", "1");
++ " la %%r1,%[mask]\n"
++ " xgr %%r0,%%r0\n"
++ " .insn rre,%[opc] << 16,%[r1],%[r2]\n"
++ : [mask] "=R" (*mask)
++ : [opc] "i" (opc),
++ [r1] "i" (r1), [r2] "i" (r2)
++ : "cc", "r0", "r1");
++}
++
++static __always_inline void __cpacf_query_rrf(u32 opc,
++ u8 r1, u8 r2, u8 r3, u8 m4,
++ cpacf_mask_t *mask)
++{
++ asm volatile(
++ " la %%r1,%[mask]\n"
++ " xgr %%r0,%%r0\n"
++ " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
++ : [mask] "=R" (*mask)
++ : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
++ [r3] "i" (r3), [m4] "i" (m4)
++ : "cc", "r0", "r1");
++}
++
++static __always_inline void __cpacf_query(unsigned int opcode,
++ cpacf_mask_t *mask)
++{
++ switch (opcode) {
++ case CPACF_KDSA:
++ __cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
++ break;
++ case CPACF_KIMD:
++ __cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
++ break;
++ case CPACF_KLMD:
++ __cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
++ break;
++ case CPACF_KM:
++ __cpacf_query_rre(CPACF_KM, 2, 4, mask);
++ break;
++ case CPACF_KMA:
++ __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
++ break;
++ case CPACF_KMAC:
++ __cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
++ break;
++ case CPACF_KMC:
++ __cpacf_query_rre(CPACF_KMC, 2, 4, mask);
++ break;
++ case CPACF_KMCTR:
++ __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
++ break;
++ case CPACF_KMF:
++ __cpacf_query_rre(CPACF_KMF, 2, 4, mask);
++ break;
++ case CPACF_KMO:
++ __cpacf_query_rre(CPACF_KMO, 2, 4, mask);
++ break;
++ case CPACF_PCC:
++ __cpacf_query_rre(CPACF_PCC, 0, 0, mask);
++ break;
++ case CPACF_PCKMO:
++ __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
++ break;
++ case CPACF_PRNO:
++ __cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
++ break;
++ default:
++ __cpacf_bad_opcode();
++ }
+ }
+
+ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
+@@ -211,10 +269,21 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
+ case CPACF_KMA:
+ return test_facility(146); /* check for MSA8 */
+ default:
+- BUG();
++ __cpacf_bad_opcode();
++ return 0;
+ }
+ }
+
++/**
++ * cpacf_query() - check if a specific CPACF function is available
++ * @opcode: the opcode of the crypto instruction
++ * @func: the function code to test for
++ *
++ * Executes the query function for the given crypto instruction @opcode
++ * and checks if @func is available
++ *
++ * Returns 1 if @func is available for @opcode, 0 otherwise
++ */
+ static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+ {
+ if (__cpacf_check_opcode(opcode)) {
+diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h
+index 4f21ae561e4ddc..390906b8e386e6 100644
+--- a/arch/s390/include/asm/dwarf.h
++++ b/arch/s390/include/asm/dwarf.h
+@@ -9,6 +9,7 @@
+ #define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+ #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
+ #define CFI_RESTORE .cfi_restore
++#define CFI_REL_OFFSET .cfi_rel_offset
+
+ #ifdef CONFIG_AS_CFI_VAL_OFFSET
+ #define CFI_VAL_OFFSET .cfi_val_offset
+diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
+index fdd319a622b065..622cd08e5f50fd 100644
+--- a/arch/s390/include/asm/entry-common.h
++++ b/arch/s390/include/asm/entry-common.h
+@@ -55,7 +55,7 @@ static __always_inline void arch_exit_to_user_mode(void)
+ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+ {
+- choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
++ choose_random_kstack_offset(get_tod_clock_fast());
+ }
+
+ #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
+index 94b6919026dfb8..953d42205ea83e 100644
+--- a/arch/s390/include/asm/facility.h
++++ b/arch/s390/include/asm/facility.h
+@@ -60,8 +60,10 @@ static inline int test_facility(unsigned long nr)
+ unsigned long facilities_als[] = { FACILITIES_ALS };
+
+ if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) {
+- if (__test_facility(nr, &facilities_als))
+- return 1;
++ if (__test_facility(nr, &facilities_als)) {
++ if (!__is_defined(__DECOMPRESSOR))
++ return 1;
++ }
+ }
+ return __test_facility(nr, &stfle_fac_list);
+ }
+diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
+index b714ed0ef68853..9acf48e53a87fb 100644
+--- a/arch/s390/include/asm/fpu/api.h
++++ b/arch/s390/include/asm/fpu/api.h
+@@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
+ #define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
+
+ #define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
+-#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
++#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
+
+ struct kernel_fpu;
+
+diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
+index 5cc46e0dde620d..9725586f42597c 100644
+--- a/arch/s390/include/asm/gmap.h
++++ b/arch/s390/include/asm/gmap.h
+@@ -146,7 +146,7 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
+
+ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
+ unsigned long gaddr, unsigned long vmaddr);
+-int gmap_mark_unmergeable(void);
++int s390_disable_cow_sharing(void);
+ void s390_unlist_old_asce(struct gmap *gmap);
+ int s390_replace_asce(struct gmap *gmap);
+ void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
+diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
+index 4453ad7c11aced..36a9c7740c437a 100644
+--- a/arch/s390/include/asm/io.h
++++ b/arch/s390/include/asm/io.h
+@@ -16,8 +16,10 @@
+ #include <asm/pci_io.h>
+
+ #define xlate_dev_mem_ptr xlate_dev_mem_ptr
++#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr
+ void *xlate_dev_mem_ptr(phys_addr_t phys);
+ #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
++#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
+
+ #define IO_SPACE_LIMIT 0
+diff --git a/arch/s390/include/asm/irq_work.h b/arch/s390/include/asm/irq_work.h
+index 603783766d0abb..f00c9f610d5a8e 100644
+--- a/arch/s390/include/asm/irq_work.h
++++ b/arch/s390/include/asm/irq_work.h
+@@ -7,6 +7,4 @@ static inline bool arch_irq_work_has_interrupt(void)
+ return true;
+ }
+
+-void arch_irq_work_raise(void);
+-
+ #endif /* _ASM_S390_IRQ_WORK_H */
+diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
+index 895f774bbcc553..bf78cf381dfcda 100644
+--- a/arch/s390/include/asm/jump_label.h
++++ b/arch/s390/include/asm/jump_label.h
+@@ -25,7 +25,7 @@
+ */
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("0: brcl 0,%l[label]\n"
++ asm goto("0: brcl 0,%l[label]\n"
+ ".pushsection __jump_table,\"aw\"\n"
+ ".balign 8\n"
+ ".long 0b-.,%l[label]-.\n"
+@@ -39,7 +39,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("0: brcl 15,%l[label]\n"
++ asm goto("0: brcl 15,%l[label]\n"
+ ".pushsection __jump_table,\"aw\"\n"
+ ".balign 8\n"
+ ".long 0b-.,%l[label]-.\n"
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index 427f9528a7b694..b039881c277a79 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
+ u64 instruction_io_other;
+ u64 instruction_lpsw;
+ u64 instruction_lpswe;
++ u64 instruction_lpswey;
+ u64 instruction_pfmf;
+ u64 instruction_ptff;
+ u64 instruction_sck;
+@@ -777,6 +778,13 @@ struct kvm_vm_stat {
+ u64 inject_service_signal;
+ u64 inject_virtio;
+ u64 aen_forward;
++ u64 gmap_shadow_create;
++ u64 gmap_shadow_reuse;
++ u64 gmap_shadow_r1_entry;
++ u64 gmap_shadow_r2_entry;
++ u64 gmap_shadow_r3_entry;
++ u64 gmap_shadow_sg_entry;
++ u64 gmap_shadow_pg_entry;
+ };
+
+ struct kvm_arch_memory_slot {
+diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
+index 829d68e2c68582..a9e5db0f2836e4 100644
+--- a/arch/s390/include/asm/mmu.h
++++ b/arch/s390/include/asm/mmu.h
+@@ -33,6 +33,11 @@ typedef struct {
+ unsigned int uses_skeys:1;
+ /* The mmu context uses CMM. */
+ unsigned int uses_cmm:1;
++ /*
++ * The mmu context allows COW-sharing of memory pages (KSM, zeropage).
++ * Note that COW-sharing during fork() is currently always allowed.
++ */
++ unsigned int allow_cow_sharing:1;
+ /* The gmaps associated with this context are allowed to use huge pages. */
+ unsigned int allow_gmap_hpage_1m:1;
+ } mm_context_t;
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 2a38af5a00c2df..8df6d09e9ca871 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -36,6 +36,7 @@ static inline int init_new_context(struct task_struct *tsk,
+ mm->context.has_pgste = 0;
+ mm->context.uses_skeys = 0;
+ mm->context.uses_cmm = 0;
++ mm->context.allow_cow_sharing = 1;
+ mm->context.allow_gmap_hpage_1m = 0;
+ #endif
+ switch (mm->context.asce_limit) {
+diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
+index 287bb88f76986e..2686bee800e3d5 100644
+--- a/arch/s390/include/asm/pci_io.h
++++ b/arch/s390/include/asm/pci_io.h
+@@ -11,6 +11,8 @@
+ /* I/O size constraints */
+ #define ZPCI_MAX_READ_SIZE 8
+ #define ZPCI_MAX_WRITE_SIZE 128
++#define ZPCI_BOUNDARY_SIZE (1 << 12)
++#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
+
+ /* I/O Map */
+ #define ZPCI_IOMAP_SHIFT 48
+@@ -125,16 +127,18 @@ static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
+ int zpci_write_block(volatile void __iomem *dst, const void *src,
+ unsigned long len);
+
+-static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
++static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
+ {
+- int count = len > max ? max : len, size = 1;
++ int offset = dst & ZPCI_BOUNDARY_MASK;
++ int size;
+
+- while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
+- dst = dst >> 1;
+- src = src >> 1;
+- size = size << 1;
+- }
+- return size;
++ size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
++ if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
++ return size;
++
++ if (size >= 8)
++ return 8;
++ return rounddown_pow_of_two(size);
+ }
+
+ static inline int zpci_memcpy_fromio(void *dst,
+@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
+ int size, rc = 0;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) src,
+- (u64) dst, n,
+- ZPCI_MAX_READ_SIZE);
++ size = zpci_get_max_io_size((u64 __force) src,
++ (u64) dst, n,
++ ZPCI_MAX_READ_SIZE);
+ rc = zpci_read_single(dst, src, size);
+ if (rc)
+ break;
+@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
+ return -EINVAL;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) dst,
+- (u64) src, n,
+- ZPCI_MAX_WRITE_SIZE);
++ size = zpci_get_max_io_size((u64 __force) dst,
++ (u64) src, n,
++ ZPCI_MAX_WRITE_SIZE);
+ if (size > 8) /* main path */
+ rc = zpci_write_block(dst, src, size);
+ else
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index fb3ee7758b7650..da2e91b5b19250 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -565,10 +565,20 @@ static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
+ }
+
+ /*
+- * In the case that a guest uses storage keys
+- * faults should no longer be backed by zero pages
++ * As soon as the guest uses storage keys or enables PV, we deduplicate all
++ * mapped shared zeropages and prevent new shared zeropages from getting
++ * mapped.
+ */
+-#define mm_forbids_zeropage mm_has_pgste
++#define mm_forbids_zeropage mm_forbids_zeropage
++static inline int mm_forbids_zeropage(struct mm_struct *mm)
++{
++#ifdef CONFIG_PGSTE
++ if (!mm->context.allow_cow_sharing)
++ return 1;
++#endif
++ return 0;
++}
++
+ static inline int mm_uses_skeys(struct mm_struct *mm)
+ {
+ #ifdef CONFIG_PGSTE
+@@ -729,7 +739,7 @@ static inline int pud_bad(pud_t pud)
+ {
+ unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
+
+- if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
++ if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
+ return 1;
+ if (type < _REGION_ENTRY_TYPE_R3)
+ return 0;
+@@ -1396,7 +1406,7 @@ static inline unsigned long pud_deref(pud_t pud)
+ unsigned long origin_mask;
+
+ origin_mask = _REGION_ENTRY_ORIGIN;
+- if (pud_large(pud))
++ if (pud_leaf(pud))
+ origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
+ return (unsigned long)__va(pud_val(pud) & origin_mask);
+ }
+@@ -1764,8 +1774,10 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+ {
+- pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
++ pmd_t pmd;
+
++ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
++ pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+ return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+ }
+
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index dc17896a001a92..e7338ed540d8fc 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -308,8 +308,8 @@ static inline void __load_psw(psw_t psw)
+ */
+ static __always_inline void __load_psw_mask(unsigned long mask)
+ {
++ psw_t psw __uninitialized;
+ unsigned long addr;
+- psw_t psw;
+
+ psw.mask = mask;
+
+diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h
+index 9286430fe7290b..35c1d1b860d88a 100644
+--- a/arch/s390/include/asm/syscall_wrapper.h
++++ b/arch/s390/include/asm/syscall_wrapper.h
+@@ -63,10 +63,6 @@
+ cond_syscall(__s390x_sys_##name); \
+ cond_syscall(__s390_sys_##name)
+
+-#define SYS_NI(name) \
+- SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
+- SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
+-
+ #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ long __s390_compat_sys##name(struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
+@@ -85,15 +81,11 @@
+
+ /*
+ * As some compat syscalls may not be implemented, we need to expand
+- * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
+- * kernel/time/posix-stubs.c to cover this case as well.
++ * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
+ */
+ #define COND_SYSCALL_COMPAT(name) \
+ cond_syscall(__s390_compat_sys_##name)
+
+-#define COMPAT_SYS_NI(name) \
+- SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers)
+-
+ #define __S390_SYS_STUBx(x, name, ...) \
+ long __s390_sys##name(struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
+@@ -124,9 +116,6 @@
+ #define COND_SYSCALL(name) \
+ cond_syscall(__s390x_sys_##name)
+
+-#define SYS_NI(name) \
+- SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers)
+-
+ #define __S390_SYS_STUBx(x, fullname, name, ...)
+
+ #endif /* CONFIG_COMPAT */
+diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
+index 0e7bd3873907f7..b2e2f9a4163c5c 100644
+--- a/arch/s390/include/asm/uv.h
++++ b/arch/s390/include/asm/uv.h
+@@ -442,7 +442,10 @@ static inline int share(unsigned long addr, u16 cmd)
+
+ if (!uv_call(0, (u64)&uvcb))
+ return 0;
+- return -EINVAL;
++ pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
++ uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
++ uvcb.header.rc, uvcb.header.rrc);
++ panic("System security cannot be guaranteed unless the system panics now.\n");
+ }
+
+ /*
+diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
+index 56254fa06f9906..4f266903022091 100644
+--- a/arch/s390/kernel/cache.c
++++ b/arch/s390/kernel/cache.c
+@@ -166,5 +166,6 @@ int populate_cache_leaves(unsigned int cpu)
+ ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
+ }
+ }
++ this_cpu_ci->cpu_map_populated = true;
+ return 0;
+ }
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 442ce0489e1a1e..3a54733e4fc65b 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -258,15 +258,9 @@ static inline void save_vector_registers(void)
+ #endif
+ }
+
+-static inline void setup_control_registers(void)
++static inline void setup_low_address_protection(void)
+ {
+- unsigned long reg;
+-
+- __ctl_store(reg, 0, 0);
+- reg |= CR0_LOW_ADDRESS_PROTECTION;
+- reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
+- reg |= CR0_EXTERNAL_CALL_SUBMASK;
+- __ctl_load(reg, 0, 0);
++ __ctl_set_bit(0, 28);
+ }
+
+ static inline void setup_access_registers(void)
+@@ -314,7 +308,7 @@ void __init startup_init(void)
+ save_vector_registers();
+ setup_topology();
+ sclp_early_detect();
+- setup_control_registers();
++ setup_low_address_protection();
+ setup_access_registers();
+ lockdep_on();
+ }
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 49a11f6dd7ae9a..26c08ee8774077 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -653,6 +653,7 @@ SYM_DATA_START_LOCAL(daton_psw)
+ SYM_DATA_END(daton_psw)
+
+ .section .rodata, "a"
++ .balign 8
+ #define SYSCALL(esame,emu) .quad __s390x_ ## esame
+ SYM_DATA_START(sys_call_table)
+ #include "asm/syscall_table.h"
+diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
+index c46381ea04ecb1..7f6f8c438c2654 100644
+--- a/arch/s390/kernel/ftrace.c
++++ b/arch/s390/kernel/ftrace.c
+@@ -296,6 +296,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct kprobe *p;
+ int bit;
+
++ if (unlikely(kprobe_ftrace_disabled))
++ return;
++
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 05e51666db033f..a3d3cb39b021a7 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -666,6 +666,7 @@ static int __init ipl_init(void)
+ &ipl_ccw_attr_group_lpar);
+ break;
+ case IPL_TYPE_ECKD:
++ case IPL_TYPE_ECKD_DUMP:
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group);
+ break;
+ case IPL_TYPE_FCP:
+@@ -961,8 +962,8 @@ static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
+ scpdata_len += padding;
+ }
+
+- reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
+- reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
++ reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN + scpdata_len;
++ reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN + scpdata_len;
+ reipl_block_nvme->nvme.scp_data_len = scpdata_len;
+
+ return count;
+@@ -1857,9 +1858,9 @@ static int __init dump_nvme_init(void)
+ }
+ dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
+ dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
+- dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN;
+- dump_block_nvme->fcp.pbt = IPL_PBT_NVME;
+- dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP;
++ dump_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
++ dump_block_nvme->nvme.pbt = IPL_PBT_NVME;
++ dump_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_DUMP;
+ dump_capabilities |= DUMP_TYPE_NVME;
+ return 0;
+ }
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index 850c11ea631a6b..5466e7bada03d2 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -556,25 +556,31 @@ static int cfdiag_diffctr(struct cpu_cf_events *cpuhw, unsigned long auth)
+ struct cf_trailer_entry *trailer_start, *trailer_stop;
+ struct cf_ctrset_entry *ctrstart, *ctrstop;
+ size_t offset = 0;
++ int i;
+
+- auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
+- do {
++ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
+ ctrstart = (struct cf_ctrset_entry *)(cpuhw->start + offset);
+ ctrstop = (struct cf_ctrset_entry *)(cpuhw->stop + offset);
+
++ /* Counter set not authorized */
++ if (!(auth & cpumf_ctr_ctl[i]))
++ continue;
++ /* Counter set size zero was not saved */
++ if (!cpum_cf_read_setsize(i))
++ continue;
++
+ if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
+ pr_err_once("cpum_cf_diag counter set compare error "
+ "in set %i\n", ctrstart->set);
+ return 0;
+ }
+- auth &= ~cpumf_ctr_ctl[ctrstart->set];
+ if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
+ cfdiag_diffctrset((u64 *)(ctrstart + 1),
+ (u64 *)(ctrstop + 1), ctrstart->ctr);
+ offset += ctrstart->ctr * sizeof(u64) +
+ sizeof(*ctrstart);
+ }
+- } while (ctrstart->def && auth);
++ }
+
+ /* Save time_stamp from start of event in stop's trailer */
+ trailer_start = (struct cf_trailer_entry *)(cpuhw->start + offset);
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 06efad5b4f931b..a3169193775f71 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -1463,7 +1463,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
+ unsigned long range, i, range_scan, idx, head, base, offset;
+ struct hws_trailer_entry *te;
+
+- if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
++ if (handle->head & ~PAGE_MASK)
+ return -EINVAL;
+
+ aux->head = handle->head >> PAGE_SHIFT;
+@@ -1642,7 +1642,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
+ unsigned long num_sdb;
+
+ aux = perf_get_aux(handle);
+- if (WARN_ON_ONCE(!aux))
++ if (!aux)
+ return;
+
+ /* Inform user space new data arrived */
+@@ -1661,7 +1661,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
+ num_sdb);
+ break;
+ }
+- if (WARN_ON_ONCE(!aux))
++ if (!aux)
+ return;
+
+ /* Update head and alert_mark to new position */
+@@ -1896,12 +1896,8 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
+ {
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
+
+- if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
++ if (!(event->hw.state & PERF_HES_STOPPED))
+ return;
+-
+- if (flags & PERF_EF_RELOAD)
+- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+-
+ perf_pmu_disable(event->pmu);
+ event->hw.state = 0;
+ cpuhw->lsctl.cs = 1;
+diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
+index fe7d1774ded184..4a4e914c283c80 100644
+--- a/arch/s390/kernel/perf_pai_crypto.c
++++ b/arch/s390/kernel/perf_pai_crypto.c
+@@ -646,7 +646,7 @@ static int __init attr_event_init(void)
+ for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
+ ret = attr_event_init_one(attrs, i);
+ if (ret) {
+- attr_event_free(attrs, i - 1);
++ attr_event_free(attrs, i);
+ return ret;
+ }
+ }
+diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
+index c57c1a203256fb..b5febe22d05464 100644
+--- a/arch/s390/kernel/perf_pai_ext.c
++++ b/arch/s390/kernel/perf_pai_ext.c
+@@ -607,7 +607,7 @@ static int __init attr_event_init(void)
+ for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
+ ret = attr_event_init_one(attrs, i);
+ if (ret) {
+- attr_event_free(attrs, i - 1);
++ attr_event_free(attrs, i);
+ return ret;
+ }
+ }
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index ea244a73efad9d..512b8147375935 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -385,6 +385,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
+ /*
+ * floating point control reg. is in the thread structure
+ */
++ save_fpu_regs();
+ if ((unsigned int) data != 0 ||
+ test_fp_ctl(data >> (BITS_PER_LONG - 32)))
+ return -EINVAL;
+@@ -741,6 +742,7 @@ static int __poke_user_compat(struct task_struct *child,
+ /*
+ * floating point control reg. is in the thread structure
+ */
++ save_fpu_regs();
+ if (test_fp_ctl(tmp))
+ return -EINVAL;
+ child->thread.fpu.fpc = data;
+@@ -904,9 +906,7 @@ static int s390_fpregs_set(struct task_struct *target,
+ int rc = 0;
+ freg_t fprs[__NUM_FPRS];
+
+- if (target == current)
+- save_fpu_regs();
+-
++ save_fpu_regs();
+ if (MACHINE_HAS_VX)
+ convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+ else
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index de6ad0fb2328a1..d48c7afe97e628 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -155,7 +155,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support);
+ EXPORT_SYMBOL(zlib_dfltcc_support);
+ u64 __bootdata_preserved(stfle_fac_list[16]);
+ EXPORT_SYMBOL(stfle_fac_list);
+-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
++u64 alt_stfle_fac_list[16];
+ struct oldmem_data __bootdata_preserved(oldmem_data);
+
+ unsigned long VMALLOC_START;
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index a4edb7ea66ea76..c63be2efd68952 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -1013,12 +1013,12 @@ void __init smp_fill_possible_mask(void)
+
+ void __init smp_prepare_cpus(unsigned int max_cpus)
+ {
+- /* request the 0x1201 emergency signal external interrupt */
+ if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
+ panic("Couldn't request external interrupt 0x1201");
+- /* request the 0x1202 external call external interrupt */
++ ctl_set_bit(0, 14);
+ if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
+ panic("Couldn't request external interrupt 0x1202");
++ ctl_set_bit(0, 13);
+ }
+
+ void __init smp_prepare_boot_cpu(void)
+diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
+index 0122cc156952cf..51cc3616d5f98a 100644
+--- a/arch/s390/kernel/syscalls/syscall.tbl
++++ b/arch/s390/kernel/syscalls/syscall.tbl
+@@ -418,7 +418,7 @@
+ 412 32 utimensat_time64 - sys_utimensat
+ 413 32 pselect6_time64 - compat_sys_pselect6_time64
+ 414 32 ppoll_time64 - compat_sys_ppoll_time64
+-416 32 io_pgetevents_time64 - sys_io_pgetevents
++416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
+ 417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
+ 418 32 mq_timedsend_time64 - sys_mq_timedsend
+ 419 32 mq_timedreceive_time64 - sys_mq_timedreceive
+diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
+index fc07bc39e69839..81fdee22a497df 100644
+--- a/arch/s390/kernel/uv.c
++++ b/arch/s390/kernel/uv.c
+@@ -181,36 +181,36 @@ int uv_convert_owned_from_secure(unsigned long paddr)
+ }
+
+ /*
+- * Calculate the expected ref_count for a page that would otherwise have no
++ * Calculate the expected ref_count for a folio that would otherwise have no
+ * further pins. This was cribbed from similar functions in other places in
+ * the kernel, but with some slight modifications. We know that a secure
+- * page can not be a huge page for example.
++ * folio can not be a large folio, for example.
+ */
+-static int expected_page_refs(struct page *page)
++static int expected_folio_refs(struct folio *folio)
+ {
+ int res;
+
+- res = page_mapcount(page);
+- if (PageSwapCache(page)) {
++ res = folio_mapcount(folio);
++ if (folio_test_swapcache(folio)) {
+ res++;
+- } else if (page_mapping(page)) {
++ } else if (folio_mapping(folio)) {
+ res++;
+- if (page_has_private(page))
++ if (folio->private)
+ res++;
+ }
+ return res;
+ }
+
+-static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
++static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
+ {
+ int expected, cc = 0;
+
+- if (PageWriteback(page))
++ if (folio_test_writeback(folio))
+ return -EAGAIN;
+- expected = expected_page_refs(page);
+- if (!page_ref_freeze(page, expected))
++ expected = expected_folio_refs(folio);
++ if (!folio_ref_freeze(folio, expected))
+ return -EBUSY;
+- set_bit(PG_arch_1, &page->flags);
++ set_bit(PG_arch_1, &folio->flags);
+ /*
+ * If the UVC does not succeed or fail immediately, we don't want to
+ * loop for long, or we might get stall notifications.
+@@ -220,9 +220,9 @@ static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
+ * -EAGAIN and we let the callers deal with it.
+ */
+ cc = __uv_call(0, (u64)uvcb);
+- page_ref_unfreeze(page, expected);
++ folio_ref_unfreeze(folio, expected);
+ /*
+- * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
++ * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
+ * If busy or partially completed, return -EAGAIN.
+ */
+ if (cc == UVC_CC_OK)
+@@ -277,7 +277,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ bool local_drain = false;
+ spinlock_t *ptelock;
+ unsigned long uaddr;
+- struct page *page;
++ struct folio *folio;
+ pte_t *ptep;
+ int rc;
+
+@@ -306,15 +306,26 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ if (!ptep)
+ goto out;
+ if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
+- page = pte_page(*ptep);
++ folio = page_folio(pte_page(*ptep));
++ rc = -EINVAL;
++ if (folio_test_large(folio))
++ goto unlock;
+ rc = -EAGAIN;
+- if (trylock_page(page)) {
++ if (folio_trylock(folio)) {
+ if (should_export_before_import(uvcb, gmap->mm))
+- uv_convert_from_secure(page_to_phys(page));
+- rc = make_page_secure(page, uvcb);
+- unlock_page(page);
++ uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
++ rc = make_folio_secure(folio, uvcb);
++ folio_unlock(folio);
+ }
++
++ /*
++ * Once we drop the PTL, the folio may get unmapped and
++ * freed immediately. We need a temporary reference.
++ */
++ if (rc == -EAGAIN)
++ folio_get(folio);
+ }
++unlock:
+ pte_unmap_unlock(ptep, ptelock);
+ out:
+ mmap_read_unlock(gmap->mm);
+@@ -324,10 +335,11 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ * If we are here because the UVC returned busy or partial
+ * completion, this is just a useless check, but it is safe.
+ */
+- wait_on_page_writeback(page);
++ folio_wait_writeback(folio);
++ folio_put(folio);
+ } else if (rc == -EBUSY) {
+ /*
+- * If we have tried a local drain and the page refcount
++ * If we have tried a local drain and the folio refcount
+ * still does not match our expected safe value, try with a
+ * system wide drain. This is needed if the pagevecs holding
+ * the page are on a different CPU.
+@@ -338,7 +350,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
+ return -EAGAIN;
+ }
+ /*
+- * We are here if the page refcount does not match the
++ * We are here if the folio refcount does not match the
+ * expected safe value. The main culprits are usually
+ * pagevecs. With lru_add_drain() we drain the pagevecs
+ * on the local CPU so that hopefully the refcount will
+diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
+index 23e868b79a6c99..4800d80decee69 100644
+--- a/arch/s390/kernel/vdso32/Makefile
++++ b/arch/s390/kernel/vdso32/Makefile
+@@ -19,10 +19,12 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
+ KBUILD_AFLAGS_32 += -m31 -s
+
+ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
++KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS))
+ KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
+-KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
++KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32))
++KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables
+
+-LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
++LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
+ --hash-style=both --build-id=sha1 -melf_s390 -T
+
+ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+@@ -61,16 +63,6 @@ quiet_cmd_vdso32as = VDSO32A $@
+ quiet_cmd_vdso32cc = VDSO32C $@
+ cmd_vdso32cc = $(CC) $(c_flags) -c -o $@ $<
+
+-# install commands for the unstripped file
+-quiet_cmd_vdso_install = INSTALL $@
+- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+-
+-vdso32.so: $(obj)/vdso32.so.dbg
+- @mkdir -p $(MODLIB)/vdso
+- $(call cmd,vdso_install)
+-
+-vdso_install: vdso32.so
+-
+ # Generate VDSO offsets using helper script
+ gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+ quiet_cmd_vdsosym = VDSOSYM $@
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index fc1c6ff8178f59..2f2e4e997030c3 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -24,9 +24,12 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
+ KBUILD_AFLAGS_64 += -m64
+
+ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
++KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64))
+ KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
+-KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
+-ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
++KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
++KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
++KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
++ldflags-y := -shared -soname=linux-vdso64.so.1 \
+ --hash-style=both --build-id=sha1 -T
+
+ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
+@@ -70,16 +73,6 @@ quiet_cmd_vdso64as = VDSO64A $@
+ quiet_cmd_vdso64cc = VDSO64C $@
+ cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
+
+-# install commands for the unstripped file
+-quiet_cmd_vdso_install = INSTALL $@
+- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+-
+-vdso64.so: $(obj)/vdso64.so.dbg
+- @mkdir -p $(MODLIB)/vdso
+- $(call cmd,vdso_install)
+-
+-vdso_install: vdso64.so
+-
+ # Generate VDSO offsets using helper script
+ gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+ quiet_cmd_vdsosym = VDSOSYM $@
+diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+index 57f62596e53b95..85247ef5a41b89 100644
+--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
++++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+@@ -24,8 +24,10 @@ __kernel_\func:
+ CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
+ stg %r14,STACK_FRAME_OVERHEAD(%r15)
++ CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD
+ brasl %r14,__s390_vdso_\func
+ lg %r14,STACK_FRAME_OVERHEAD(%r15)
++ CFI_RESTORE 14
+ aghi %r15,WRAPPER_FRAME_SIZE
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
+ CFI_RESTORE 15
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 2ae201ebf90b97..de5f9f623f5b27 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -71,6 +71,15 @@ SECTIONS
+ . = ALIGN(PAGE_SIZE);
+ __end_ro_after_init = .;
+
++ .data.rel.ro : {
++ *(.data.rel.ro .data.rel.ro.*)
++ }
++ .got : {
++ __got_start = .;
++ *(.got)
++ __got_end = .;
++ }
++
+ RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
+ BOOT_DATA_PRESERVED
+
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index e0a88dcaf5cb7a..24a18e5ef6e8e3 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -210,13 +210,13 @@ void vtime_flush(struct task_struct *tsk)
+ virt_timer_expire();
+
+ steal = S390_lowcore.steal_timer;
+- avg_steal = S390_lowcore.avg_steal_timer / 2;
++ avg_steal = S390_lowcore.avg_steal_timer;
+ if ((s64) steal > 0) {
+ S390_lowcore.steal_timer = 0;
+ account_steal_time(cputime_to_nsecs(steal));
+ avg_steal += steal;
+ }
+- S390_lowcore.avg_steal_timer = avg_steal;
++ S390_lowcore.avg_steal_timer = avg_steal / 2;
+ }
+
+ static u64 vtime_delta(void)
+diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
+index 3c65b8258ae67a..2cc3ec034046c9 100644
+--- a/arch/s390/kvm/diag.c
++++ b/arch/s390/kvm/diag.c
+@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
+ vcpu->stat.instruction_diagnose_258++;
+ if (vcpu->run->s.regs.gprs[rx] & 7)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+- rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
++ rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
+ if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
+diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
+index 6d6bc19b37dcbd..090dc38334336b 100644
+--- a/arch/s390/kvm/gaccess.c
++++ b/arch/s390/kvm/gaccess.c
+@@ -1001,6 +1001,8 @@ static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
+ const gfn_t gfn = gpa_to_gfn(gpa);
+ int rc;
+
++ if (!gfn_to_memslot(kvm, gfn))
++ return PGM_ADDRESSING;
+ if (mode == GACC_STORE)
+ rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
+ else
+@@ -1158,6 +1160,8 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
+ gra += fragment_len;
+ data += fragment_len;
+ }
++ if (rc > 0)
++ vcpu->arch.pgm.code = rc;
+ return rc;
+ }
+
+@@ -1382,6 +1386,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ unsigned long *pgt, int *dat_protection,
+ int *fake)
+ {
++ struct kvm *kvm;
+ struct gmap *parent;
+ union asce asce;
+ union vaddress vaddr;
+@@ -1390,6 +1395,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+
+ *fake = 0;
+ *dat_protection = 0;
++ kvm = sg->private;
+ parent = sg->parent;
+ vaddr.addr = saddr;
+ asce.val = sg->orig_asce;
+@@ -1450,6 +1456,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
+ if (rc)
+ return rc;
++ kvm->stat.gmap_shadow_r1_entry++;
+ }
+ fallthrough;
+ case ASCE_TYPE_REGION2: {
+@@ -1478,6 +1485,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
+ if (rc)
+ return rc;
++ kvm->stat.gmap_shadow_r2_entry++;
+ }
+ fallthrough;
+ case ASCE_TYPE_REGION3: {
+@@ -1515,6 +1523,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
+ if (rc)
+ return rc;
++ kvm->stat.gmap_shadow_r3_entry++;
+ }
+ fallthrough;
+ case ASCE_TYPE_SEGMENT: {
+@@ -1548,6 +1557,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
+ if (rc)
+ return rc;
++ kvm->stat.gmap_shadow_sg_entry++;
+ }
+ }
+ /* Return the parent address of the page table */
+@@ -1618,6 +1628,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+ pte.p |= dat_protection;
+ if (!rc)
+ rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
++ vcpu->kvm->stat.gmap_shadow_pg_entry++;
+ ipte_unlock(vcpu->kvm);
+ mmap_read_unlock(sg->mm);
+ return rc;
+diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
+index b320d12aa04934..3fde45a151f22e 100644
+--- a/arch/s390/kvm/gaccess.h
++++ b/arch/s390/kvm/gaccess.h
+@@ -405,11 +405,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
+ * @len: number of bytes to copy
+ *
+ * Copy @len bytes from @data (kernel space) to @gra (guest real address).
+- * It is up to the caller to ensure that the entire guest memory range is
+- * valid memory before calling this function.
+ * Guest low address and key protection are not checked.
+ *
+- * Returns zero on success or -EFAULT on error.
++ * Returns zero on success, -EFAULT when copying from @data failed, or
++ * PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
++ * is also stored to allow injecting into the guest (if applicable) using
++ * kvm_s390_inject_prog_cond().
+ *
+ * If an error occurs data may have been copied partially to guest memory.
+ */
+@@ -428,11 +429,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
+ * @len: number of bytes to copy
+ *
+ * Copy @len bytes from @gra (guest real address) to @data (kernel space).
+- * It is up to the caller to ensure that the entire guest memory range is
+- * valid memory before calling this function.
+ * Guest key protection is not checked.
+ *
+- * Returns zero on success or -EFAULT on error.
++ * Returns zero on success, -EFAULT when copying to @data failed, or
++ * PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
++ * is also stored to allow injecting into the guest (if applicable) using
++ * kvm_s390_inject_prog_cond().
+ *
+ * If an error occurs data may have been copied partially to kernel space.
+ */
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index b3f17e014cab5d..348d030d2660ca 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -66,7 +66,14 @@ const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ STATS_DESC_COUNTER(VM, inject_pfault_done),
+ STATS_DESC_COUNTER(VM, inject_service_signal),
+ STATS_DESC_COUNTER(VM, inject_virtio),
+- STATS_DESC_COUNTER(VM, aen_forward)
++ STATS_DESC_COUNTER(VM, aen_forward),
++ STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
++ STATS_DESC_COUNTER(VM, gmap_shadow_create),
++ STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
++ STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
++ STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
++ STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
++ STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
+ };
+
+ const struct kvm_stats_header kvm_vm_stats_header = {
+@@ -125,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ STATS_DESC_COUNTER(VCPU, instruction_io_other),
+ STATS_DESC_COUNTER(VCPU, instruction_lpsw),
+ STATS_DESC_COUNTER(VCPU, instruction_lpswe),
++ STATS_DESC_COUNTER(VCPU, instruction_lpswey),
+ STATS_DESC_COUNTER(VCPU, instruction_pfmf),
+ STATS_DESC_COUNTER(VCPU, instruction_ptff),
+ STATS_DESC_COUNTER(VCPU, instruction_sck),
+@@ -2625,9 +2633,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
+ if (r)
+ break;
+
+- mmap_write_lock(current->mm);
+- r = gmap_mark_unmergeable();
+- mmap_write_unlock(current->mm);
++ r = s390_disable_cow_sharing();
+ if (r)
+ break;
+
+@@ -4307,10 +4313,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+
+ vcpu_load(vcpu);
+
+- if (test_fp_ctl(fpu->fpc)) {
+- ret = -EINVAL;
+- goto out;
+- }
+ vcpu->run->s.regs.fpc = fpu->fpc;
+ if (MACHINE_HAS_VX)
+ convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
+@@ -4318,7 +4320,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ else
+ memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+
+-out:
+ vcpu_put(vcpu);
+ return ret;
+ }
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index a7ea80cfa445e1..0c0f47ec634477 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -120,6 +120,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
+ return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+ }
+
++static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
++{
++ u32 base1 = vcpu->arch.sie_block->ipb >> 28;
++ s64 disp1;
++
++ /* The displacement is a 20bit _SIGNED_ value */
++ disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
++ ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
++
++ if (ar)
++ *ar = base1;
++
++ return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
++}
++
+ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
+ u64 *address1, u64 *address2,
+ u8 *ar_b1, u8 *ar_b2)
+@@ -234,7 +249,12 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
+
+ static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
+ {
+- u32 gd = virt_to_phys(kvm->arch.gisa_int.origin);
++ u32 gd;
++
++ if (!kvm->arch.gisa_int.origin)
++ return 0;
++
++ gd = virt_to_phys(kvm->arch.gisa_int.origin);
+
+ if (gd && sclp.has_gisaf)
+ gd |= GISA_FORMAT1;
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index dc4cfa8795c08c..e5b220e686b09a 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -793,6 +793,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
+ return 0;
+ }
+
++static int handle_lpswey(struct kvm_vcpu *vcpu)
++{
++ psw_t new_psw;
++ u64 addr;
++ int rc;
++ u8 ar;
++
++ vcpu->stat.instruction_lpswey++;
++
++ if (!test_kvm_facility(vcpu->kvm, 193))
++ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
++
++ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
++ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
++
++ addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
++ if (addr & 7)
++ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
++ if (rc)
++ return kvm_s390_inject_prog_cond(vcpu, rc);
++
++ vcpu->arch.sie_block->gpsw = new_psw;
++ if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
++ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++ return 0;
++}
++
+ static int handle_stidp(struct kvm_vcpu *vcpu)
+ {
+ u64 stidp_data = vcpu->kvm->arch.model.cpuid;
+@@ -1458,6 +1488,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
+ case 0x61:
+ case 0x62:
+ return handle_ri(vcpu);
++ case 0x71:
++ return handle_lpswey(vcpu);
+ default:
+ return -EOPNOTSUPP;
+ }
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index 61499293c2ac3b..db9a180de65f1f 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -587,10 +587,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
+
+ if (!gmap_is_shadow(gmap))
+ return;
+- if (start >= 1UL << 31)
+- /* We are only interested in prefix pages */
+- return;
+-
+ /*
+ * Only new shadow blocks are added to the list during runtime,
+ * therefore we can safely reference them all the time.
+@@ -1214,15 +1210,17 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
+ * we're holding has been unshadowed. If the gmap is still valid,
+ * we can safely reuse it.
+ */
+- if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
++ if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) {
++ vcpu->kvm->stat.gmap_shadow_reuse++;
+ return 0;
++ }
+
+ /* release the old shadow - if any, and mark the prefix as unmapped */
+ release_gmap_shadow(vsie_page);
+ gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
+ if (IS_ERR(gmap))
+ return PTR_ERR(gmap);
+- gmap->private = vcpu->kvm;
++ vcpu->kvm->stat.gmap_shadow_create++;
+ WRITE_ONCE(vsie_page->gmap, gmap);
+ return 0;
+ }
+diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
+index f47515313226c4..9af4d829649444 100644
+--- a/arch/s390/mm/cmm.c
++++ b/arch/s390/mm/cmm.c
+@@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter,
+ (*counter)++;
+ spin_unlock(&cmm_lock);
+ nr--;
++ cond_resched();
+ }
+ return nr;
+ }
+
+-static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
++static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+ {
+ struct cmm_page_array *pa;
+ unsigned long addr;
+@@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+ return nr;
+ }
+
++static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
++{
++ long inc = 0;
++
++ while (nr) {
++ inc = min(256L, nr);
++ nr -= inc;
++ inc = __cmm_free_pages(inc, counter, list);
++ if (inc)
++ break;
++ cond_resched();
++ }
++ return nr + inc;
++}
++
+ static int cmm_oom_notify(struct notifier_block *self,
+ unsigned long dummy, void *parm)
+ {
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index b678295931c315..1a231181a413bd 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -331,14 +331,16 @@ static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
+ do_no_context(regs, fault);
+ else
+ do_sigsegv(regs, SEGV_MAPERR);
+- } else if (fault & VM_FAULT_SIGBUS) {
++ } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)) {
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ do_no_context(regs, fault);
+ else
+ do_sigbus(regs);
+- } else
++ } else {
++ pr_emerg("Unexpected fault flags: %08x\n", fault);
+ BUG();
++ }
+ break;
+ }
+ }
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 906a7bfc2a7874..1a656db09c9fe3 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -21,10 +21,22 @@
+
+ #include <asm/pgalloc.h>
+ #include <asm/gmap.h>
++#include <asm/page.h>
+ #include <asm/tlb.h>
+
+ #define GMAP_SHADOW_FAKE_TABLE 1ULL
+
++static struct page *gmap_alloc_crst(void)
++{
++ struct page *page;
++
++ page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ if (!page)
++ return NULL;
++ arch_set_page_dat(page, CRST_ALLOC_ORDER);
++ return page;
++}
++
+ /**
+ * gmap_alloc - allocate and initialize a guest address space
+ * @limit: maximum address of the gmap address space
+@@ -67,7 +79,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
+ spin_lock_init(&gmap->guest_table_lock);
+ spin_lock_init(&gmap->shadow_lock);
+ refcount_set(&gmap->ref_count, 1);
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ goto out_free;
+ page->index = 0;
+@@ -308,7 +320,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
+ unsigned long *new;
+
+ /* since we dont free the gmap table until gmap_free we can unlock */
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ new = page_to_virt(page);
+@@ -584,7 +596,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
+ pud = pud_offset(p4d, vmaddr);
+ VM_BUG_ON(pud_none(*pud));
+ /* large puds cannot yet be handled */
+- if (pud_large(*pud))
++ if (pud_leaf(*pud))
+ return -EFAULT;
+ pmd = pmd_offset(pud, vmaddr);
+ VM_BUG_ON(pmd_none(*pmd));
+@@ -1679,6 +1691,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
+ return ERR_PTR(-ENOMEM);
+ new->mm = parent->mm;
+ new->parent = gmap_get(parent);
++ new->private = parent->private;
+ new->orig_asce = asce;
+ new->edat_level = edat_level;
+ new->initialized = false;
+@@ -1759,7 +1772,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
+
+ BUG_ON(!gmap_is_shadow(sg));
+ /* Allocate a shadow region second table */
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ page->index = r2t & _REGION_ENTRY_ORIGIN;
+@@ -1843,7 +1856,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
+
+ BUG_ON(!gmap_is_shadow(sg));
+ /* Allocate a shadow region second table */
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ page->index = r3t & _REGION_ENTRY_ORIGIN;
+@@ -1927,7 +1940,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
+
+ BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
+ /* Allocate a shadow segment table */
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ page->index = sgt & _REGION_ENTRY_ORIGIN;
+@@ -2534,41 +2547,6 @@ static inline void thp_split_mm(struct mm_struct *mm)
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+-/*
+- * Remove all empty zero pages from the mapping for lazy refaulting
+- * - This must be called after mm->context.has_pgste is set, to avoid
+- * future creation of zero pages
+- * - This must be called after THP was disabled.
+- *
+- * mm contracts with s390, that even if mm were to remove a page table,
+- * racing with the loop below and so causing pte_offset_map_lock() to fail,
+- * it will never insert a page table containing empty zero pages once
+- * mm_forbids_zeropage(mm) i.e. mm->context.has_pgste is set.
+- */
+-static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
+- unsigned long end, struct mm_walk *walk)
+-{
+- unsigned long addr;
+-
+- for (addr = start; addr != end; addr += PAGE_SIZE) {
+- pte_t *ptep;
+- spinlock_t *ptl;
+-
+- ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+- if (!ptep)
+- break;
+- if (is_zero_pfn(pte_pfn(*ptep)))
+- ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
+- pte_unmap_unlock(ptep, ptl);
+- }
+- return 0;
+-}
+-
+-static const struct mm_walk_ops zap_zero_walk_ops = {
+- .pmd_entry = __zap_zero_pages,
+- .walk_lock = PGWALK_WRLOCK,
+-};
+-
+ /*
+ * switch on pgstes for its userspace process (for kvm)
+ */
+@@ -2586,22 +2564,142 @@ int s390_enable_sie(void)
+ mm->context.has_pgste = 1;
+ /* split thp mappings and disable thp for future mappings */
+ thp_split_mm(mm);
+- walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
+ mmap_write_unlock(mm);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(s390_enable_sie);
+
+-int gmap_mark_unmergeable(void)
++static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr,
++ unsigned long end, struct mm_walk *walk)
++{
++ unsigned long *found_addr = walk->private;
++
++ /* Return 1 of the page is a zeropage. */
++ if (is_zero_pfn(pte_pfn(*pte))) {
++ /*
++ * Shared zeropage in e.g., a FS DAX mapping? We cannot do the
++ * right thing and likely don't care: FAULT_FLAG_UNSHARE
++ * currently only works in COW mappings, which is also where
++ * mm_forbids_zeropage() is checked.
++ */
++ if (!is_cow_mapping(walk->vma->vm_flags))
++ return -EFAULT;
++
++ *found_addr = addr;
++ return 1;
++ }
++ return 0;
++}
++
++static const struct mm_walk_ops find_zeropage_ops = {
++ .pte_entry = find_zeropage_pte_entry,
++ .walk_lock = PGWALK_WRLOCK,
++};
++
++/*
++ * Unshare all shared zeropages, replacing them by anonymous pages. Note that
++ * we cannot simply zap all shared zeropages, because this could later
++ * trigger unexpected userfaultfd missing events.
++ *
++ * This must be called after mm->context.allow_cow_sharing was
++ * set to 0, to avoid future mappings of shared zeropages.
++ *
++ * mm contracts with s390, that even if mm were to remove a page table,
++ * and racing with walk_page_range_vma() calling pte_offset_map_lock()
++ * would fail, it will never insert a page table containing empty zero
++ * pages once mm_forbids_zeropage(mm) i.e.
++ * mm->context.allow_cow_sharing is set to 0.
++ */
++static int __s390_unshare_zeropages(struct mm_struct *mm)
++{
++ struct vm_area_struct *vma;
++ VMA_ITERATOR(vmi, mm, 0);
++ unsigned long addr;
++ vm_fault_t fault;
++ int rc;
++
++ for_each_vma(vmi, vma) {
++ /*
++ * We could only look at COW mappings, but it's more future
++ * proof to catch unexpected zeropages in other mappings and
++ * fail.
++ */
++ if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma))
++ continue;
++ addr = vma->vm_start;
++
++retry:
++ rc = walk_page_range_vma(vma, addr, vma->vm_end,
++ &find_zeropage_ops, &addr);
++ if (rc < 0)
++ return rc;
++ else if (!rc)
++ continue;
++
++ /* addr was updated by find_zeropage_pte_entry() */
++ fault = handle_mm_fault(vma, addr,
++ FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
++ NULL);
++ if (fault & VM_FAULT_OOM)
++ return -ENOMEM;
++ /*
++ * See break_ksm(): even after handle_mm_fault() returned 0, we
++ * must start the lookup from the current address, because
++ * handle_mm_fault() may back out if there's any difficulty.
++ *
++ * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but
++ * maybe they could trigger in the future on concurrent
++ * truncation. In that case, the shared zeropage would be gone
++ * and we can simply retry and make progress.
++ */
++ cond_resched();
++ goto retry;
++ }
++
++ return 0;
++}
++
++static int __s390_disable_cow_sharing(struct mm_struct *mm)
+ {
++ int rc;
++
++ if (!mm->context.allow_cow_sharing)
++ return 0;
++
++ mm->context.allow_cow_sharing = 0;
++
++ /* Replace all shared zeropages by anonymous pages. */
++ rc = __s390_unshare_zeropages(mm);
+ /*
+ * Make sure to disable KSM (if enabled for the whole process or
+ * individual VMAs). Note that nothing currently hinders user space
+ * from re-enabling it.
+ */
+- return ksm_disable(current->mm);
++ if (!rc)
++ rc = ksm_disable(mm);
++ if (rc)
++ mm->context.allow_cow_sharing = 1;
++ return rc;
+ }
+-EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
++
++/*
++ * Disable most COW-sharing of memory pages for the whole process:
++ * (1) Disable KSM and unmerge/unshare any KSM pages.
++ * (2) Disallow shared zeropages and unshare any zerpages that are mapped.
++ *
++ * Not that we currently don't bother with COW-shared pages that are shared
++ * with parent/child processes due to fork().
++ */
++int s390_disable_cow_sharing(void)
++{
++ int rc;
++
++ mmap_write_lock(current->mm);
++ rc = __s390_disable_cow_sharing(current->mm);
++ mmap_write_unlock(current->mm);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(s390_disable_cow_sharing);
+
+ /*
+ * Enable storage key handling from now on and initialize the storage
+@@ -2646,7 +2744,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
+ return 0;
+
+ start = pmd_val(*pmd) & HPAGE_MASK;
+- end = start + HPAGE_SIZE - 1;
++ end = start + HPAGE_SIZE;
+ __storage_key_init_range(start, end);
+ set_bit(PG_arch_1, &page->flags);
+ cond_resched();
+@@ -2670,7 +2768,7 @@ int s390_enable_skey(void)
+ goto out_up;
+
+ mm->context.uses_skeys = 1;
+- rc = gmap_mark_unmergeable();
++ rc = __s390_disable_cow_sharing(mm);
+ if (rc) {
+ mm->context.uses_skeys = 0;
+ goto out_up;
+@@ -2855,7 +2953,7 @@ int s390_replace_asce(struct gmap *gmap)
+ if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
+ return -EINVAL;
+
+- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
++ page = gmap_alloc_crst();
+ if (!page)
+ return -ENOMEM;
+ page->index = 0;
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
+index 297a6d897d5a0c..763469e518eec8 100644
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -139,7 +139,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
+ }
+
+ if (!test_and_set_bit(PG_arch_1, &page->flags))
+- __storage_key_init_range(paddr, paddr + size - 1);
++ __storage_key_init_range(paddr, paddr + size);
+ }
+
+ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+@@ -224,7 +224,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
+ if (p4d_present(*p4dp)) {
+ pudp = pud_offset(p4dp, addr);
+ if (pud_present(*pudp)) {
+- if (pud_large(*pudp))
++ if (pud_leaf(*pudp))
+ return (pte_t *) pudp;
+ pmdp = pmd_offset(pudp, addr);
+ }
+@@ -240,7 +240,7 @@ int pmd_huge(pmd_t pmd)
+
+ int pud_huge(pud_t pud)
+ {
+- return pud_large(pud);
++ return pud_leaf(pud);
+ }
+
+ bool __init arch_hugetlb_valid_size(unsigned long size)
+diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
+index 1e2ea706aa2289..79a037f49f7070 100644
+--- a/arch/s390/mm/page-states.c
++++ b/arch/s390/mm/page-states.c
+@@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
+ continue;
+ if (!pud_folded(*pud)) {
+ page = phys_to_page(pud_val(*pud));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_pmd(pud, addr, next);
+@@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
+ continue;
+ if (!p4d_folded(*p4d)) {
+ page = phys_to_page(p4d_val(*p4d));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_pud(p4d, addr, next);
+@@ -164,7 +164,7 @@ static void mark_kernel_pgd(void)
+ continue;
+ if (!pgd_folded(*pgd)) {
+ page = phys_to_page(pgd_val(*pgd));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_p4d(pgd, addr, next);
+@@ -181,6 +181,12 @@ void __init cmma_init_nodat(void)
+ return;
+ /* Mark pages used in kernel page tables */
+ mark_kernel_pgd();
++ page = virt_to_page(&swapper_pg_dir);
++ for (i = 0; i < 4; i++)
++ set_bit(PG_arch_1, &page[i].flags);
++ page = virt_to_page(&invalid_pg_dir);
++ for (i = 0; i < 4; i++)
++ set_bit(PG_arch_1, &page[i].flags);
+
+ /* Set all kernel pages not used for page tables to stable/no-dat */
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
+diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
+index b87e96c64b61d2..441f654d048d20 100644
+--- a/arch/s390/mm/pageattr.c
++++ b/arch/s390/mm/pageattr.c
+@@ -274,7 +274,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
+ if (pud_none(*pudp))
+ return -EINVAL;
+ next = pud_addr_end(addr, end);
+- if (pud_large(*pudp)) {
++ if (pud_leaf(*pudp)) {
+ need_split = !!(flags & SET_MEMORY_4K);
+ need_split |= !!(addr & ~PUD_MASK);
+ need_split |= !!(addr + PUD_SIZE > next);
+diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
+index 07fc660a24aa2f..9355fbe5f51e94 100644
+--- a/arch/s390/mm/pgalloc.c
++++ b/arch/s390/mm/pgalloc.c
+@@ -53,6 +53,8 @@ unsigned long *crst_table_alloc(struct mm_struct *mm)
+
+ void crst_table_free(struct mm_struct *mm, unsigned long *table)
+ {
++ if (!table)
++ return;
+ pagetable_free(virt_to_ptdesc(table));
+ }
+
+@@ -146,6 +148,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
+ ptdesc = pagetable_alloc(GFP_KERNEL, 0);
+ if (ptdesc) {
+ table = (u64 *)ptdesc_to_virt(ptdesc);
++ arch_set_page_dat(virt_to_page(table), 0);
+ memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
+ }
+@@ -499,6 +502,8 @@ static unsigned long *base_crst_alloc(unsigned long val)
+
+ static void base_crst_free(unsigned long *table)
+ {
++ if (!table)
++ return;
+ pagetable_free(virt_to_ptdesc(table));
+ }
+
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 3bd2ab2a9a3449..5e349869590a83 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -479,7 +479,7 @@ static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
+ return -ENOENT;
+
+ /* Large PUDs are not supported yet. */
+- if (pud_large(*pud))
++ if (pud_leaf(*pud))
+ return -EFAULT;
+
+ *pmdp = pmd_offset(pud, addr);
+@@ -756,7 +756,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
+ pte_clear(mm, addr, ptep);
+ }
+ if (reset)
+- pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
++ pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
+ pgste_set_unlock(ptep, pgste);
+ preempt_enable();
+ }
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index 6957d2ed97bf0e..2d3f65da56eeaa 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -12,6 +12,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/slab.h>
+ #include <linux/sort.h>
++#include <asm/page-states.h>
+ #include <asm/cacheflush.h>
+ #include <asm/nospec-branch.h>
+ #include <asm/pgalloc.h>
+@@ -45,8 +46,11 @@ void *vmem_crst_alloc(unsigned long val)
+ unsigned long *table;
+
+ table = vmem_alloc_pages(CRST_ALLOC_ORDER);
+- if (table)
+- crst_table_init(table, val);
++ if (!table)
++ return NULL;
++ crst_table_init(table, val);
++ if (slab_is_available())
++ arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
+ return table;
+ }
+
+@@ -318,7 +322,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
+ if (!add) {
+ if (pud_none(*pud))
+ continue;
+- if (pud_large(*pud)) {
++ if (pud_leaf(*pud)) {
+ if (IS_ALIGNED(addr, PUD_SIZE) &&
+ IS_ALIGNED(next, PUD_SIZE)) {
+ pud_clear(pud);
+@@ -339,7 +343,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
+ if (!pmd)
+ goto out;
+ pud_populate(&init_mm, pud, pmd);
+- } else if (pud_large(*pud)) {
++ } else if (pud_leaf(*pud)) {
+ continue;
+ }
+ ret = modify_pmd_table(pud, addr, next, add, direct);
+@@ -582,7 +586,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
+ if (!pmd)
+ goto out;
+ pud_populate(&init_mm, pud, pmd);
+- } else if (WARN_ON_ONCE(pud_large(*pud))) {
++ } else if (WARN_ON_ONCE(pud_leaf(*pud))) {
+ goto out;
+ }
+ pmd = pmd_offset(pud, addr);
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index e507692e51e71e..62ee557d4b4996 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -516,11 +516,12 @@ static void bpf_skip(struct bpf_jit *jit, int size)
+ * PLT for hotpatchable calls. The calling convention is the same as for the
+ * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
+ */
+-extern const char bpf_plt[];
+-extern const char bpf_plt_ret[];
+-extern const char bpf_plt_target[];
+-extern const char bpf_plt_end[];
+-#define BPF_PLT_SIZE 32
++struct bpf_plt {
++ char code[16];
++ void *ret;
++ void *target;
++} __packed;
++extern const struct bpf_plt bpf_plt;
+ asm(
+ ".pushsection .rodata\n"
+ " .balign 8\n"
+@@ -531,15 +532,14 @@ asm(
+ " .balign 8\n"
+ "bpf_plt_ret: .quad 0\n"
+ "bpf_plt_target: .quad 0\n"
+- "bpf_plt_end:\n"
+ " .popsection\n"
+ );
+
+-static void bpf_jit_plt(void *plt, void *ret, void *target)
++static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
+ {
+- memcpy(plt, bpf_plt, BPF_PLT_SIZE);
+- *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
+- *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
++ memcpy(plt, &bpf_plt, sizeof(*plt));
++ plt->ret = ret;
++ plt->target = target;
+ }
+
+ /*
+@@ -662,9 +662,9 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
+ jit->prg = ALIGN(jit->prg, 8);
+ jit->prologue_plt = jit->prg;
+ if (jit->prg_buf)
+- bpf_jit_plt(jit->prg_buf + jit->prg,
++ bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
+ jit->prg_buf + jit->prologue_plt_ret, NULL);
+- jit->prg += BPF_PLT_SIZE;
++ jit->prg += sizeof(struct bpf_plt);
+ }
+
+ static int get_probe_mem_regno(const u8 *insn)
+@@ -1311,8 +1311,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
+ (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
+ src_reg, dst_reg, off); \
+- if (is32 && (insn->imm & BPF_FETCH)) \
+- EMIT_ZERO(src_reg); \
++ if (insn->imm & BPF_FETCH) { \
++ /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \
++ _EMIT2(0x07e0); \
++ if (is32) \
++ EMIT_ZERO(src_reg); \
++ } \
+ } while (0)
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+@@ -1901,9 +1905,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ struct bpf_jit jit;
+ int pass;
+
+- if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
+- return orig_fp;
+-
+ if (!fp->jit_requested)
+ return orig_fp;
+
+@@ -2009,14 +2010,11 @@ bool bpf_jit_supports_far_kfunc_call(void)
+ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *old_addr, void *new_addr)
+ {
++ struct bpf_plt expected_plt, current_plt, new_plt, *plt;
+ struct {
+ u16 opc;
+ s32 disp;
+ } __packed insn;
+- char expected_plt[BPF_PLT_SIZE];
+- char current_plt[BPF_PLT_SIZE];
+- char new_plt[BPF_PLT_SIZE];
+- char *plt;
+ char *ret;
+ int err;
+
+@@ -2035,18 +2033,18 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ */
+ } else {
+ /* Verify the PLT. */
+- plt = (char *)ip + (insn.disp << 1);
+- err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
++ plt = ip + (insn.disp << 1);
++ err = copy_from_kernel_nofault(&current_plt, plt,
++ sizeof(current_plt));
+ if (err < 0)
+ return err;
+ ret = (char *)ip + 6;
+- bpf_jit_plt(expected_plt, ret, old_addr);
+- if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
++ bpf_jit_plt(&expected_plt, ret, old_addr);
++ if (memcmp(&current_plt, &expected_plt, sizeof(current_plt)))
+ return -EINVAL;
+ /* Adjust the call address. */
+- bpf_jit_plt(new_plt, ret, new_addr);
+- s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
+- new_plt + (bpf_plt_target - bpf_plt),
++ bpf_jit_plt(&new_plt, ret, new_addr);
++ s390_kernel_write(&plt->target, &new_plt.target,
+ sizeof(void *));
+ }
+
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index d34d5813d00660..777362cb4ea80b 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -241,7 +241,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ /* combine single writes by using store-block insn */
+ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
+ {
+- zpci_memcpy_toio(to, from, count);
++ zpci_memcpy_toio(to, from, count * 8);
+ }
+
+ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
+index ff8f24854c6462..84482a92133220 100644
+--- a/arch/s390/pci/pci_irq.c
++++ b/arch/s390/pci/pci_irq.c
+@@ -268,33 +268,20 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
+ }
+ }
+
+-int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
++static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs,
++ unsigned long *bit)
+ {
+- struct zpci_dev *zdev = to_zpci(pdev);
+- unsigned int hwirq, msi_vecs, cpu;
+- unsigned long bit;
+- struct msi_desc *msi;
+- struct msi_msg msg;
+- int cpu_addr;
+- int rc, irq;
+-
+- zdev->aisb = -1UL;
+- zdev->msi_first_bit = -1U;
+- if (type == PCI_CAP_ID_MSI && nvec > 1)
+- return 1;
+- msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
+-
+ if (irq_delivery == DIRECTED) {
+ /* Allocate cpu vector bits */
+- bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
+- if (bit == -1UL)
++ *bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
++ if (*bit == -1UL)
+ return -EIO;
+ } else {
+ /* Allocate adapter summary indicator bit */
+- bit = airq_iv_alloc_bit(zpci_sbv);
+- if (bit == -1UL)
++ *bit = airq_iv_alloc_bit(zpci_sbv);
++ if (*bit == -1UL)
+ return -EIO;
+- zdev->aisb = bit;
++ zdev->aisb = *bit;
+
+ /* Create adapter interrupt vector */
+ zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL);
+@@ -302,27 +289,66 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ return -ENOMEM;
+
+ /* Wire up shortcut pointer */
+- zpci_ibv[bit] = zdev->aibv;
++ zpci_ibv[*bit] = zdev->aibv;
+ /* Each function has its own interrupt vector */
+- bit = 0;
++ *bit = 0;
+ }
++ return 0;
++}
+
+- /* Request MSI interrupts */
++int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
++{
++ unsigned int hwirq, msi_vecs, irqs_per_msi, i, cpu;
++ struct zpci_dev *zdev = to_zpci(pdev);
++ struct msi_desc *msi;
++ struct msi_msg msg;
++ unsigned long bit;
++ int cpu_addr;
++ int rc, irq;
++
++ zdev->aisb = -1UL;
++ zdev->msi_first_bit = -1U;
++
++ msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
++ if (msi_vecs < nvec) {
++ pr_info("%s requested %d irqs, allocate system limit of %d",
++ pci_name(pdev), nvec, zdev->max_msi);
++ }
++
++ rc = __alloc_airq(zdev, msi_vecs, &bit);
++ if (rc < 0)
++ return rc;
++
++ /*
++ * Request MSI interrupts:
++ * When using MSI, nvec_used interrupt sources and their irq
++ * descriptors are controlled through one msi descriptor.
++ * Thus the outer loop over msi descriptors shall run only once,
++ * while two inner loops iterate over the interrupt vectors.
++ * When using MSI-X, each interrupt vector/irq descriptor
++ * is bound to exactly one msi descriptor (nvec_used is one).
++ * So the inner loops are executed once, while the outer iterates
++ * over the MSI-X descriptors.
++ */
+ hwirq = bit;
+ msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
+- rc = -EIO;
+ if (hwirq - bit >= msi_vecs)
+ break;
+- irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
+- (irq_delivery == DIRECTED) ?
+- msi->affinity : NULL);
++ irqs_per_msi = min_t(unsigned int, msi_vecs, msi->nvec_used);
++ irq = __irq_alloc_descs(-1, 0, irqs_per_msi, 0, THIS_MODULE,
++ (irq_delivery == DIRECTED) ?
++ msi->affinity : NULL);
+ if (irq < 0)
+ return -ENOMEM;
+- rc = irq_set_msi_desc(irq, msi);
+- if (rc)
+- return rc;
+- irq_set_chip_and_handler(irq, &zpci_irq_chip,
+- handle_percpu_irq);
++
++ for (i = 0; i < irqs_per_msi; i++) {
++ rc = irq_set_msi_desc_off(irq, i, msi);
++ if (rc)
++ return rc;
++ irq_set_chip_and_handler(irq + i, &zpci_irq_chip,
++ handle_percpu_irq);
++ }
++
+ msg.data = hwirq - bit;
+ if (irq_delivery == DIRECTED) {
+ if (msi->affinity)
+@@ -335,31 +361,35 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ msg.address_lo |= (cpu_addr << 8);
+
+ for_each_possible_cpu(cpu) {
+- airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
++ for (i = 0; i < irqs_per_msi; i++)
++ airq_iv_set_data(zpci_ibv[cpu],
++ hwirq + i, irq + i);
+ }
+ } else {
+ msg.address_lo = zdev->msi_addr & 0xffffffff;
+- airq_iv_set_data(zdev->aibv, hwirq, irq);
++ for (i = 0; i < irqs_per_msi; i++)
++ airq_iv_set_data(zdev->aibv, hwirq + i, irq + i);
+ }
+ msg.address_hi = zdev->msi_addr >> 32;
+ pci_write_msi_msg(irq, &msg);
+- hwirq++;
++ hwirq += irqs_per_msi;
+ }
+
+ zdev->msi_first_bit = bit;
+- zdev->msi_nr_irqs = msi_vecs;
++ zdev->msi_nr_irqs = hwirq - bit;
+
+ rc = zpci_set_irq(zdev);
+ if (rc)
+ return rc;
+
+- return (msi_vecs == nvec) ? 0 : msi_vecs;
++ return (zdev->msi_nr_irqs == nvec) ? 0 : zdev->msi_nr_irqs;
+ }
+
+ void arch_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+ struct zpci_dev *zdev = to_zpci(pdev);
+ struct msi_desc *msi;
++ unsigned int i;
+ int rc;
+
+ /* Disable interrupts */
+@@ -369,8 +399,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
+
+ /* Release MSI interrupts */
+ msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) {
+- irq_set_msi_desc(msi->irq, NULL);
+- irq_free_desc(msi->irq);
++ for (i = 0; i < msi->nvec_used; i++) {
++ irq_set_msi_desc(msi->irq + i, NULL);
++ irq_free_desc(msi->irq + i);
++ }
+ msi->msg.address_lo = 0;
+ msi->msg.address_hi = 0;
+ msi->msg.data = 0;
+@@ -410,7 +442,7 @@ static void __init cpu_enable_directed_irq(void *unused)
+ union zpci_sic_iib iib = {{0}};
+ union zpci_sic_iib ziib = {{0}};
+
+- iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
++ iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector);
+
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index 5880893329310d..a90499c087f0c5 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -97,9 +97,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
+ return -EINVAL;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) dst,
+- (u64 __force) src, n,
+- ZPCI_MAX_WRITE_SIZE);
++ size = zpci_get_max_io_size((u64 __force) dst,
++ (u64 __force) src, n,
++ ZPCI_MAX_WRITE_SIZE);
+ if (size > 8) /* main path */
+ rc = __pcistb_mio_inuser(dst, src, size, &status);
+ else
+@@ -242,9 +242,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
+ u8 status;
+
+ while (n > 0) {
+- size = zpci_get_max_write_size((u64 __force) src,
+- (u64 __force) dst, n,
+- ZPCI_MAX_READ_SIZE);
++ size = zpci_get_max_io_size((u64 __force) src,
++ (u64 __force) dst, n,
++ ZPCI_MAX_READ_SIZE);
+ rc = __pcilg_mio_inuser(dst, src, size, &status);
+ if (rc)
+ break;
+diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
+index c449e7c1b20ff5..8bcd6c1431a95b 100644
+--- a/arch/sh/Kconfig.debug
++++ b/arch/sh/Kconfig.debug
+@@ -22,6 +22,17 @@ config STACK_DEBUG
+ every function call and will therefore incur a major
+ performance hit. Most users should say N.
+
++config EARLY_PRINTK
++ bool "Early printk"
++ depends on SH_STANDARD_BIOS
++ help
++ Say Y here to redirect kernel printk messages to the serial port
++ used by the SH-IPL bootloader, starting very early in the boot
++ process and ending when the kernel's serial console is initialised.
++ This option is only useful while porting the kernel to a new machine,
++ when the kernel may crash or hang before the serial console is
++ initialised. If unsure, say N.
++
+ config 4KSTACKS
+ bool "Use 4Kb for kernel stacks instead of 8Kb"
+ depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
+diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
+index 3be293335de545..7a788d44cc7349 100644
+--- a/arch/sh/boards/mach-ecovec24/setup.c
++++ b/arch/sh/boards/mach-ecovec24/setup.c
+@@ -1220,7 +1220,7 @@ static int __init arch_setup(void)
+ lcdc_info.ch[0].num_modes = ARRAY_SIZE(ecovec_dvi_modes);
+
+ /* No backlight */
+- gpio_backlight_data.fbdev = NULL;
++ gpio_backlight_data.dev = NULL;
+
+ gpio_set_value(GPIO_PTA2, 1);
+ gpio_set_value(GPIO_PTU1, 1);
+diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
+index 878b6b551bd2d0..51112f54552b32 100644
+--- a/arch/sh/include/asm/cacheflush.h
++++ b/arch/sh/include/asm/cacheflush.h
+@@ -90,6 +90,7 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
+ unsigned long len);
+
+ #define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
++#define flush_cache_vmap_early(start, end) do { } while (0)
+ #define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
+
+ #define flush_dcache_mmap_lock(mapping) do { } while (0)
+diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
+index aed1ea8e2c2f06..74051b8ddf3e7b 100644
+--- a/arch/sh/kernel/kprobes.c
++++ b/arch/sh/kernel/kprobes.c
+@@ -44,17 +44,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ if (OPCODE_RTE(opcode))
+ return -EFAULT; /* Bad breakpoint */
+
++ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = opcode;
+
+ return 0;
+ }
+
+-void __kprobes arch_copy_kprobe(struct kprobe *p)
+-{
+- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+- p->opcode = *p->addr;
+-}
+-
+ void __kprobes arch_arm_kprobe(struct kprobe *p)
+ {
+ *p->addr = BREAKPOINT_INSTRUCTION;
+diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
+index 9dca568509a5e5..d6f4afcb0e8705 100644
+--- a/arch/sh/kernel/sys_sh32.c
++++ b/arch/sh/kernel/sys_sh32.c
+@@ -59,3 +59,14 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
+ (u64)len0 << 32 | len1, advice);
+ #endif
+ }
++
++/*
++ * swap the arguments the way that libc wants them instead of
++ * moving flags ahead of the 64-bit nbytes argument
++ */
++SYSCALL_DEFINE6(sh_sync_file_range6, int, fd, SC_ARG64(offset),
++ SC_ARG64(nbytes), unsigned int, flags)
++{
++ return ksys_sync_file_range(fd, SC_VAL64(loff_t, offset),
++ SC_VAL64(loff_t, nbytes), flags);
++}
+diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
+index e90d585c4d3e73..7e1ceb2ba57211 100644
+--- a/arch/sh/kernel/syscalls/syscall.tbl
++++ b/arch/sh/kernel/syscalls/syscall.tbl
+@@ -321,7 +321,7 @@
+ 311 common set_robust_list sys_set_robust_list
+ 312 common get_robust_list sys_get_robust_list
+ 313 common splice sys_splice
+-314 common sync_file_range sys_sync_file_range
++314 common sync_file_range sys_sh_sync_file_range6
+ 315 common tee sys_tee
+ 316 common vmsplice sys_vmsplice
+ 317 common move_pages sys_move_pages
+@@ -395,6 +395,7 @@
+ 385 common pkey_alloc sys_pkey_alloc
+ 386 common pkey_free sys_pkey_free
+ 387 common rseq sys_rseq
++388 common sync_file_range2 sys_sync_file_range2
+ # room for arch specific syscalls
+ 393 common semget sys_semget
+ 394 common semctl sys_semctl
+diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
+index 3e07074e009813..06fed5a21e8baa 100644
+--- a/arch/sh/lib/checksum.S
++++ b/arch/sh/lib/checksum.S
+@@ -33,7 +33,8 @@
+ */
+
+ /*
+- * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
++ * unsigned int csum_partial(const unsigned char *buf, int len,
++ * unsigned int sum);
+ */
+
+ .text
+@@ -45,31 +46,11 @@ ENTRY(csum_partial)
+ * Fortunately, it is easy to convert 2-byte alignment to 4-byte
+ * alignment for the unrolled loop.
+ */
++ mov r5, r1
+ mov r4, r0
+- tst #3, r0 ! Check alignment.
+- bt/s 2f ! Jump if alignment is ok.
+- mov r4, r7 ! Keep a copy to check for alignment
++ tst #2, r0 ! Check alignment.
++ bt 2f ! Jump if alignment is ok.
+ !
+- tst #1, r0 ! Check alignment.
+- bt 21f ! Jump if alignment is boundary of 2bytes.
+-
+- ! buf is odd
+- tst r5, r5
+- add #-1, r5
+- bt 9f
+- mov.b @r4+, r0
+- extu.b r0, r0
+- addc r0, r6 ! t=0 from previous tst
+- mov r6, r0
+- shll8 r6
+- shlr16 r0
+- shlr8 r0
+- or r0, r6
+- mov r4, r0
+- tst #2, r0
+- bt 2f
+-21:
+- ! buf is 2 byte aligned (len could be 0)
+ add #-2, r5 ! Alignment uses up two bytes.
+ cmp/pz r5 !
+ bt/s 1f ! Jump if we had at least two bytes.
+@@ -77,17 +58,16 @@ ENTRY(csum_partial)
+ bra 6f
+ add #2, r5 ! r5 was < 2. Deal with it.
+ 1:
++ mov r5, r1 ! Save new len for later use.
+ mov.w @r4+, r0
+ extu.w r0, r0
+ addc r0, r6
+ bf 2f
+ add #1, r6
+ 2:
+- ! buf is 4 byte aligned (len could be 0)
+- mov r5, r1
+ mov #-5, r0
+- shld r0, r1
+- tst r1, r1
++ shld r0, r5
++ tst r5, r5
+ bt/s 4f ! if it's =0, go to 4f
+ clrt
+ .align 2
+@@ -109,31 +89,30 @@ ENTRY(csum_partial)
+ addc r0, r6
+ addc r2, r6
+ movt r0
+- dt r1
++ dt r5
+ bf/s 3b
+ cmp/eq #1, r0
+- ! here, we know r1==0
+- addc r1, r6 ! add carry to r6
++ ! here, we know r5==0
++ addc r5, r6 ! add carry to r6
+ 4:
+- mov r5, r0
++ mov r1, r0
+ and #0x1c, r0
+ tst r0, r0
+- bt 6f
+- ! 4 bytes or more remaining
+- mov r0, r1
+- shlr2 r1
++ bt/s 6f
++ mov r0, r5
++ shlr2 r5
+ mov #0, r2
+ 5:
+ addc r2, r6
+ mov.l @r4+, r2
+ movt r0
+- dt r1
++ dt r5
+ bf/s 5b
+ cmp/eq #1, r0
+ addc r2, r6
+- addc r1, r6 ! r1==0 here, so it means add carry-bit
++ addc r5, r6 ! r5==0 here, so it means add carry-bit
+ 6:
+- ! 3 bytes or less remaining
++ mov r1, r5
+ mov #3, r0
+ and r0, r5
+ tst r5, r5
+@@ -159,16 +138,6 @@ ENTRY(csum_partial)
+ mov #0, r0
+ addc r0, r6
+ 9:
+- ! Check if the buffer was misaligned, if so realign sum
+- mov r7, r0
+- tst #1, r0
+- bt 10f
+- mov r6, r0
+- shll8 r6
+- shlr16 r0
+- shlr8 r0
+- or r0, r6
+-10:
+ rts
+ mov r6, r0
+
+diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
+index 7417345c6639a3..2a03daa68f2857 100644
+--- a/arch/sparc/Makefile
++++ b/arch/sparc/Makefile
+@@ -60,7 +60,7 @@ libs-y += arch/sparc/prom/
+ libs-y += arch/sparc/lib/
+
+ drivers-$(CONFIG_PM) += arch/sparc/power/
+-drivers-$(CONFIG_FB) += arch/sparc/video/
++drivers-$(CONFIG_FB_CORE) += arch/sparc/video/
+
+ boot := arch/sparc/boot
+
+@@ -76,9 +76,8 @@ install:
+ archheaders:
+ $(Q)$(MAKE) $(build)=arch/sparc/kernel/syscalls all
+
+-PHONY += vdso_install
+-vdso_install:
+- $(Q)$(MAKE) $(build)=arch/sparc/vdso $@
++vdso-install-$(CONFIG_SPARC64) += arch/sparc/vdso/vdso64.so.dbg
++vdso-install-$(CONFIG_COMPAT) += arch/sparc/vdso/vdso32.so.dbg
+
+ # This is the image used for packaging
+ KBUILD_IMAGE := $(boot)/zImage
+diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
+index f3b7270bf71b26..9fee0ccfccb8e1 100644
+--- a/arch/sparc/include/asm/cacheflush_32.h
++++ b/arch/sparc/include/asm/cacheflush_32.h
+@@ -48,6 +48,7 @@ static inline void flush_dcache_page(struct page *page)
+ #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+ #define flush_cache_vmap(start, end) flush_cache_all()
++#define flush_cache_vmap_early(start, end) do { } while (0)
+ #define flush_cache_vunmap(start, end) flush_cache_all()
+
+ /* When a context switch happens we must flush all user windows so that
+diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
+index 0e879004efff16..2b1261b77ecd1b 100644
+--- a/arch/sparc/include/asm/cacheflush_64.h
++++ b/arch/sparc/include/asm/cacheflush_64.h
+@@ -75,6 +75,7 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
+ #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+ #define flush_cache_vmap(start, end) do { } while (0)
++#define flush_cache_vmap_early(start, end) do { } while (0)
+ #define flush_cache_vunmap(start, end) do { } while (0)
+
+ #endif /* !__ASSEMBLY__ */
+diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
+index 94eb529dcb7762..2718cbea826a7d 100644
+--- a/arch/sparc/include/asm/jump_label.h
++++ b/arch/sparc/include/asm/jump_label.h
+@@ -10,7 +10,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+@@ -26,7 +26,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+
+ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ "b %l[l_yes]\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
+index a67abebd43592f..1b86d02a84556a 100644
+--- a/arch/sparc/include/asm/oplib_64.h
++++ b/arch/sparc/include/asm/oplib_64.h
+@@ -247,6 +247,7 @@ void prom_sun4v_guest_soft_state(void);
+ int prom_ihandle2path(int handle, char *buffer, int bufsize);
+
+ /* Client interface level routines. */
++void prom_cif_init(void *cif_handler);
+ void p1275_cmd_direct(unsigned long *);
+
+ #endif /* !(__SPARC64_OPLIB_H) */
+diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
+index 0a7ffcfd59cda0..e2eed8f97665fb 100644
+--- a/arch/sparc/include/asm/parport.h
++++ b/arch/sparc/include/asm/parport.h
+@@ -1,256 +1,11 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* parport.h: sparc64 specific parport initialization and dma.
+- *
+- * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
+- */
++#ifndef ___ASM_SPARC_PARPORT_H
++#define ___ASM_SPARC_PARPORT_H
+
+-#ifndef _ASM_SPARC64_PARPORT_H
+-#define _ASM_SPARC64_PARPORT_H 1
+-
+-#include <linux/of.h>
+-#include <linux/platform_device.h>
+-
+-#include <asm/ebus_dma.h>
+-#include <asm/ns87303.h>
+-#include <asm/prom.h>
+-
+-#define PARPORT_PC_MAX_PORTS PARPORT_MAX
+-
+-/*
+- * While sparc64 doesn't have an ISA DMA API, we provide something that looks
+- * close enough to make parport_pc happy
+- */
+-#define HAS_DMA
+-
+-#ifdef CONFIG_PARPORT_PC_FIFO
+-static DEFINE_SPINLOCK(dma_spin_lock);
+-
+-#define claim_dma_lock() \
+-({ unsigned long flags; \
+- spin_lock_irqsave(&dma_spin_lock, flags); \
+- flags; \
+-})
+-
+-#define release_dma_lock(__flags) \
+- spin_unlock_irqrestore(&dma_spin_lock, __flags);
++#if defined(__sparc__) && defined(__arch64__)
++#include <asm/parport_64.h>
++#else
++#include <asm-generic/parport.h>
++#endif
+ #endif
+
+-static struct sparc_ebus_info {
+- struct ebus_dma_info info;
+- unsigned int addr;
+- unsigned int count;
+- int lock;
+-
+- struct parport *port;
+-} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
+-
+-static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
+-
+-static inline int request_dma(unsigned int dmanr, const char *device_id)
+-{
+- if (dmanr >= PARPORT_PC_MAX_PORTS)
+- return -EINVAL;
+- if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
+- return -EBUSY;
+- return 0;
+-}
+-
+-static inline void free_dma(unsigned int dmanr)
+-{
+- if (dmanr >= PARPORT_PC_MAX_PORTS) {
+- printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
+- return;
+- }
+- if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
+- printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
+- return;
+- }
+-}
+-
+-static inline void enable_dma(unsigned int dmanr)
+-{
+- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
+-
+- if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
+- sparc_ebus_dmas[dmanr].addr,
+- sparc_ebus_dmas[dmanr].count))
+- BUG();
+-}
+-
+-static inline void disable_dma(unsigned int dmanr)
+-{
+- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
+-}
+-
+-static inline void clear_dma_ff(unsigned int dmanr)
+-{
+- /* nothing */
+-}
+-
+-static inline void set_dma_mode(unsigned int dmanr, char mode)
+-{
+- ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
+-}
+-
+-static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
+-{
+- sparc_ebus_dmas[dmanr].addr = addr;
+-}
+-
+-static inline void set_dma_count(unsigned int dmanr, unsigned int count)
+-{
+- sparc_ebus_dmas[dmanr].count = count;
+-}
+-
+-static inline unsigned int get_dma_residue(unsigned int dmanr)
+-{
+- return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
+-}
+-
+-static int ecpp_probe(struct platform_device *op)
+-{
+- unsigned long base = op->resource[0].start;
+- unsigned long config = op->resource[1].start;
+- unsigned long d_base = op->resource[2].start;
+- unsigned long d_len;
+- struct device_node *parent;
+- struct parport *p;
+- int slot, err;
+-
+- parent = op->dev.of_node->parent;
+- if (of_node_name_eq(parent, "dma")) {
+- p = parport_pc_probe_port(base, base + 0x400,
+- op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
+- op->dev.parent->parent, 0);
+- if (!p)
+- return -ENOMEM;
+- dev_set_drvdata(&op->dev, p);
+- return 0;
+- }
+-
+- for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
+- if (!test_and_set_bit(slot, dma_slot_map))
+- break;
+- }
+- err = -ENODEV;
+- if (slot >= PARPORT_PC_MAX_PORTS)
+- goto out_err;
+-
+- spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
+-
+- d_len = (op->resource[2].end - d_base) + 1UL;
+- sparc_ebus_dmas[slot].info.regs =
+- of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
+-
+- if (!sparc_ebus_dmas[slot].info.regs)
+- goto out_clear_map;
+-
+- sparc_ebus_dmas[slot].info.flags = 0;
+- sparc_ebus_dmas[slot].info.callback = NULL;
+- sparc_ebus_dmas[slot].info.client_cookie = NULL;
+- sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
+- strcpy(sparc_ebus_dmas[slot].info.name, "parport");
+- if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
+- goto out_unmap_regs;
+-
+- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
+-
+- /* Configure IRQ to Push Pull, Level Low */
+- /* Enable ECP, set bit 2 of the CTR first */
+- outb(0x04, base + 0x02);
+- ns87303_modify(config, PCR,
+- PCR_EPP_ENABLE |
+- PCR_IRQ_ODRAIN,
+- PCR_ECP_ENABLE |
+- PCR_ECP_CLK_ENA |
+- PCR_IRQ_POLAR);
+-
+- /* CTR bit 5 controls direction of port */
+- ns87303_modify(config, PTR,
+- 0, PTR_LPT_REG_DIR);
+-
+- p = parport_pc_probe_port(base, base + 0x400,
+- op->archdata.irqs[0],
+- slot,
+- op->dev.parent,
+- 0);
+- err = -ENOMEM;
+- if (!p)
+- goto out_disable_irq;
+-
+- dev_set_drvdata(&op->dev, p);
+-
+- return 0;
+-
+-out_disable_irq:
+- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
+- ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
+-
+-out_unmap_regs:
+- of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
+-
+-out_clear_map:
+- clear_bit(slot, dma_slot_map);
+-
+-out_err:
+- return err;
+-}
+-
+-static int ecpp_remove(struct platform_device *op)
+-{
+- struct parport *p = dev_get_drvdata(&op->dev);
+- int slot = p->dma;
+-
+- parport_pc_unregister_port(p);
+-
+- if (slot != PARPORT_DMA_NOFIFO) {
+- unsigned long d_base = op->resource[2].start;
+- unsigned long d_len;
+-
+- d_len = (op->resource[2].end - d_base) + 1UL;
+-
+- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
+- ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
+- of_iounmap(&op->resource[2],
+- sparc_ebus_dmas[slot].info.regs,
+- d_len);
+- clear_bit(slot, dma_slot_map);
+- }
+-
+- return 0;
+-}
+-
+-static const struct of_device_id ecpp_match[] = {
+- {
+- .name = "ecpp",
+- },
+- {
+- .name = "parallel",
+- .compatible = "ecpp",
+- },
+- {
+- .name = "parallel",
+- .compatible = "ns87317-ecpp",
+- },
+- {
+- .name = "parallel",
+- .compatible = "pnpALI,1533,3",
+- },
+- {},
+-};
+-
+-static struct platform_driver ecpp_driver = {
+- .driver = {
+- .name = "ecpp",
+- .of_match_table = ecpp_match,
+- },
+- .probe = ecpp_probe,
+- .remove = ecpp_remove,
+-};
+-
+-static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+-{
+- return platform_driver_register(&ecpp_driver);
+-}
+-
+-#endif /* !(_ASM_SPARC64_PARPORT_H */
+diff --git a/arch/sparc/include/asm/parport_64.h b/arch/sparc/include/asm/parport_64.h
+new file mode 100644
+index 00000000000000..0a7ffcfd59cda0
+--- /dev/null
++++ b/arch/sparc/include/asm/parport_64.h
+@@ -0,0 +1,256 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* parport.h: sparc64 specific parport initialization and dma.
++ *
++ * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
++ */
++
++#ifndef _ASM_SPARC64_PARPORT_H
++#define _ASM_SPARC64_PARPORT_H 1
++
++#include <linux/of.h>
++#include <linux/platform_device.h>
++
++#include <asm/ebus_dma.h>
++#include <asm/ns87303.h>
++#include <asm/prom.h>
++
++#define PARPORT_PC_MAX_PORTS PARPORT_MAX
++
++/*
++ * While sparc64 doesn't have an ISA DMA API, we provide something that looks
++ * close enough to make parport_pc happy
++ */
++#define HAS_DMA
++
++#ifdef CONFIG_PARPORT_PC_FIFO
++static DEFINE_SPINLOCK(dma_spin_lock);
++
++#define claim_dma_lock() \
++({ unsigned long flags; \
++ spin_lock_irqsave(&dma_spin_lock, flags); \
++ flags; \
++})
++
++#define release_dma_lock(__flags) \
++ spin_unlock_irqrestore(&dma_spin_lock, __flags);
++#endif
++
++static struct sparc_ebus_info {
++ struct ebus_dma_info info;
++ unsigned int addr;
++ unsigned int count;
++ int lock;
++
++ struct parport *port;
++} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
++
++static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
++
++static inline int request_dma(unsigned int dmanr, const char *device_id)
++{
++ if (dmanr >= PARPORT_PC_MAX_PORTS)
++ return -EINVAL;
++ if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
++ return -EBUSY;
++ return 0;
++}
++
++static inline void free_dma(unsigned int dmanr)
++{
++ if (dmanr >= PARPORT_PC_MAX_PORTS) {
++ printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
++ return;
++ }
++ if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
++ printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
++ return;
++ }
++}
++
++static inline void enable_dma(unsigned int dmanr)
++{
++ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
++
++ if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
++ sparc_ebus_dmas[dmanr].addr,
++ sparc_ebus_dmas[dmanr].count))
++ BUG();
++}
++
++static inline void disable_dma(unsigned int dmanr)
++{
++ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
++}
++
++static inline void clear_dma_ff(unsigned int dmanr)
++{
++ /* nothing */
++}
++
++static inline void set_dma_mode(unsigned int dmanr, char mode)
++{
++ ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
++}
++
++static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
++{
++ sparc_ebus_dmas[dmanr].addr = addr;
++}
++
++static inline void set_dma_count(unsigned int dmanr, unsigned int count)
++{
++ sparc_ebus_dmas[dmanr].count = count;
++}
++
++static inline unsigned int get_dma_residue(unsigned int dmanr)
++{
++ return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
++}
++
++static int ecpp_probe(struct platform_device *op)
++{
++ unsigned long base = op->resource[0].start;
++ unsigned long config = op->resource[1].start;
++ unsigned long d_base = op->resource[2].start;
++ unsigned long d_len;
++ struct device_node *parent;
++ struct parport *p;
++ int slot, err;
++
++ parent = op->dev.of_node->parent;
++ if (of_node_name_eq(parent, "dma")) {
++ p = parport_pc_probe_port(base, base + 0x400,
++ op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
++ op->dev.parent->parent, 0);
++ if (!p)
++ return -ENOMEM;
++ dev_set_drvdata(&op->dev, p);
++ return 0;
++ }
++
++ for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
++ if (!test_and_set_bit(slot, dma_slot_map))
++ break;
++ }
++ err = -ENODEV;
++ if (slot >= PARPORT_PC_MAX_PORTS)
++ goto out_err;
++
++ spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
++
++ d_len = (op->resource[2].end - d_base) + 1UL;
++ sparc_ebus_dmas[slot].info.regs =
++ of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
++
++ if (!sparc_ebus_dmas[slot].info.regs)
++ goto out_clear_map;
++
++ sparc_ebus_dmas[slot].info.flags = 0;
++ sparc_ebus_dmas[slot].info.callback = NULL;
++ sparc_ebus_dmas[slot].info.client_cookie = NULL;
++ sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
++ strcpy(sparc_ebus_dmas[slot].info.name, "parport");
++ if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
++ goto out_unmap_regs;
++
++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
++
++ /* Configure IRQ to Push Pull, Level Low */
++ /* Enable ECP, set bit 2 of the CTR first */
++ outb(0x04, base + 0x02);
++ ns87303_modify(config, PCR,
++ PCR_EPP_ENABLE |
++ PCR_IRQ_ODRAIN,
++ PCR_ECP_ENABLE |
++ PCR_ECP_CLK_ENA |
++ PCR_IRQ_POLAR);
++
++ /* CTR bit 5 controls direction of port */
++ ns87303_modify(config, PTR,
++ 0, PTR_LPT_REG_DIR);
++
++ p = parport_pc_probe_port(base, base + 0x400,
++ op->archdata.irqs[0],
++ slot,
++ op->dev.parent,
++ 0);
++ err = -ENOMEM;
++ if (!p)
++ goto out_disable_irq;
++
++ dev_set_drvdata(&op->dev, p);
++
++ return 0;
++
++out_disable_irq:
++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
++ ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
++
++out_unmap_regs:
++ of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
++
++out_clear_map:
++ clear_bit(slot, dma_slot_map);
++
++out_err:
++ return err;
++}
++
++static int ecpp_remove(struct platform_device *op)
++{
++ struct parport *p = dev_get_drvdata(&op->dev);
++ int slot = p->dma;
++
++ parport_pc_unregister_port(p);
++
++ if (slot != PARPORT_DMA_NOFIFO) {
++ unsigned long d_base = op->resource[2].start;
++ unsigned long d_len;
++
++ d_len = (op->resource[2].end - d_base) + 1UL;
++
++ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
++ ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
++ of_iounmap(&op->resource[2],
++ sparc_ebus_dmas[slot].info.regs,
++ d_len);
++ clear_bit(slot, dma_slot_map);
++ }
++
++ return 0;
++}
++
++static const struct of_device_id ecpp_match[] = {
++ {
++ .name = "ecpp",
++ },
++ {
++ .name = "parallel",
++ .compatible = "ecpp",
++ },
++ {
++ .name = "parallel",
++ .compatible = "ns87317-ecpp",
++ },
++ {
++ .name = "parallel",
++ .compatible = "pnpALI,1533,3",
++ },
++ {},
++};
++
++static struct platform_driver ecpp_driver = {
++ .driver = {
++ .name = "ecpp",
++ .of_match_table = ecpp_match,
++ },
++ .probe = ecpp_probe,
++ .remove = ecpp_remove,
++};
++
++static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
++{
++ return platform_driver_register(&ecpp_driver);
++}
++
++#endif /* !(_ASM_SPARC64_PARPORT_H */
+diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
+index 505b6700805dd6..0964fede0b2cc6 100644
+--- a/arch/sparc/include/asm/smp_64.h
++++ b/arch/sparc/include/asm/smp_64.h
+@@ -47,7 +47,6 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+ int hard_smp_processor_id(void);
+ #define raw_smp_processor_id() (current_thread_info()->cpu)
+
+-void smp_fill_in_cpu_possible_map(void);
+ void smp_fill_in_sib_core_maps(void);
+ void __noreturn cpu_play_dead(void);
+
+@@ -77,7 +76,6 @@ void __cpu_die(unsigned int cpu);
+ #define smp_fill_in_sib_core_maps() do { } while (0)
+ #define smp_fetch_global_regs() do { } while (0)
+ #define smp_fetch_global_pmu() do { } while (0)
+-#define smp_fill_in_cpu_possible_map() do { } while (0)
+ #define smp_init_cpu_poke() do { } while (0)
+ #define scheduler_poke() do { } while (0)
+
+diff --git a/arch/sparc/include/uapi/asm/termbits.h b/arch/sparc/include/uapi/asm/termbits.h
+index 4321322701fcfd..0da2b1adc0f526 100644
+--- a/arch/sparc/include/uapi/asm/termbits.h
++++ b/arch/sparc/include/uapi/asm/termbits.h
+@@ -10,16 +10,6 @@ typedef unsigned int tcflag_t;
+ typedef unsigned long tcflag_t;
+ #endif
+
+-#define NCC 8
+-struct termio {
+- unsigned short c_iflag; /* input mode flags */
+- unsigned short c_oflag; /* output mode flags */
+- unsigned short c_cflag; /* control mode flags */
+- unsigned short c_lflag; /* local mode flags */
+- unsigned char c_line; /* line discipline */
+- unsigned char c_cc[NCC]; /* control characters */
+-};
+-
+ #define NCCS 17
+ struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+diff --git a/arch/sparc/include/uapi/asm/termios.h b/arch/sparc/include/uapi/asm/termios.h
+index ee86f4093d83e9..cceb32260881e7 100644
+--- a/arch/sparc/include/uapi/asm/termios.h
++++ b/arch/sparc/include/uapi/asm/termios.h
+@@ -40,5 +40,14 @@ struct winsize {
+ unsigned short ws_ypixel;
+ };
+
++#define NCC 8
++struct termio {
++ unsigned short c_iflag; /* input mode flags */
++ unsigned short c_oflag; /* output mode flags */
++ unsigned short c_cflag; /* control mode flags */
++ unsigned short c_lflag; /* local mode flags */
++ unsigned char c_line; /* line discipline */
++ unsigned char c_cc[NCC]; /* control characters */
++};
+
+ #endif /* _UAPI_SPARC_TERMIOS_H */
+diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
+index 8700a0e3b0df7d..b2b639bee06848 100644
+--- a/arch/sparc/kernel/leon_pci_grpci1.c
++++ b/arch/sparc/kernel/leon_pci_grpci1.c
+@@ -697,7 +697,7 @@ static int grpci1_of_probe(struct platform_device *ofdev)
+ return err;
+ }
+
+-static const struct of_device_id grpci1_of_match[] __initconst = {
++static const struct of_device_id grpci1_of_match[] = {
+ {
+ .name = "GAISLER_PCIFBRG",
+ },
+diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
+index 60b6bdf7761fb3..ac2acd62a24ece 100644
+--- a/arch/sparc/kernel/leon_pci_grpci2.c
++++ b/arch/sparc/kernel/leon_pci_grpci2.c
+@@ -889,7 +889,7 @@ static int grpci2_of_probe(struct platform_device *ofdev)
+ return err;
+ }
+
+-static const struct of_device_id grpci2_of_match[] __initconst = {
++static const struct of_device_id grpci2_of_match[] = {
+ {
+ .name = "GAISLER_GRPCI2",
+ },
+diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
+index 17cdfdbf1f3b78..149adc09475306 100644
+--- a/arch/sparc/kernel/nmi.c
++++ b/arch/sparc/kernel/nmi.c
+@@ -279,7 +279,7 @@ static int __init setup_nmi_watchdog(char *str)
+ if (!strncmp(str, "panic", 5))
+ panic_on_timeout = 1;
+
+- return 0;
++ return 1;
+ }
+ __setup("nmi_watchdog=", setup_nmi_watchdog);
+
+diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
+index 998aa693d49125..ba82884cb92aa3 100644
+--- a/arch/sparc/kernel/prom_64.c
++++ b/arch/sparc/kernel/prom_64.c
+@@ -483,7 +483,9 @@ static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
+ ncpus_probed++;
+ #ifdef CONFIG_SMP
+ set_cpu_present(cpuid, true);
+- set_cpu_possible(cpuid, true);
++
++ if (num_possible_cpus() < nr_cpu_ids)
++ set_cpu_possible(cpuid, true);
+ #endif
+ return NULL;
+ }
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 6546ca9d4d3f1f..bda81f314bc257 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -684,7 +684,6 @@ void __init setup_arch(char **cmdline_p)
+
+ paging_init();
+ init_sparc64_elf_hwcap();
+- smp_fill_in_cpu_possible_map();
+ /*
+ * Once the OF device tree and MDESC have been setup and nr_cpus has
+ * been parsed, we know the list of possible cpus. Therefore we can
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index f3969a3600dbfe..e50c38eba2b876 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1220,20 +1220,6 @@ void __init smp_setup_processor_id(void)
+ xcall_deliver_impl = hypervisor_xcall_deliver;
+ }
+
+-void __init smp_fill_in_cpu_possible_map(void)
+-{
+- int possible_cpus = num_possible_cpus();
+- int i;
+-
+- if (possible_cpus > nr_cpu_ids)
+- possible_cpus = nr_cpu_ids;
+-
+- for (i = 0; i < possible_cpus; i++)
+- set_cpu_possible(i, true);
+- for (; i < NR_CPUS; i++)
+- set_cpu_possible(i, false);
+-}
+-
+ void smp_fill_in_sib_core_maps(void)
+ {
+ unsigned int i;
+diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
+index a45f0f31fe51ab..a3d308f2043e5f 100644
+--- a/arch/sparc/kernel/sys32.S
++++ b/arch/sparc/kernel/sys32.S
+@@ -18,224 +18,3 @@ sys32_mmap2:
+ sethi %hi(sys_mmap), %g1
+ jmpl %g1 + %lo(sys_mmap), %g0
+ sllx %o5, 12, %o5
+-
+- .align 32
+- .globl sys32_socketcall
+-sys32_socketcall: /* %o0=call, %o1=args */
+- cmp %o0, 1
+- bl,pn %xcc, do_einval
+- cmp %o0, 18
+- bg,pn %xcc, do_einval
+- sub %o0, 1, %o0
+- sllx %o0, 5, %o0
+- sethi %hi(__socketcall_table_begin), %g2
+- or %g2, %lo(__socketcall_table_begin), %g2
+- jmpl %g2 + %o0, %g0
+- nop
+-do_einval:
+- retl
+- mov -EINVAL, %o0
+-
+- .align 32
+-__socketcall_table_begin:
+-
+- /* Each entry is exactly 32 bytes. */
+-do_sys_socket: /* sys_socket(int, int, int) */
+-1: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_socket), %g1
+-2: ldswa [%o1 + 0x8] %asi, %o2
+- jmpl %g1 + %lo(sys_socket), %g0
+-3: ldswa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+-do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
+-4: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_bind), %g1
+-5: ldswa [%o1 + 0x8] %asi, %o2
+- jmpl %g1 + %lo(sys_bind), %g0
+-6: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+-do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
+-7: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_connect), %g1
+-8: ldswa [%o1 + 0x8] %asi, %o2
+- jmpl %g1 + %lo(sys_connect), %g0
+-9: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+-do_sys_listen: /* sys_listen(int, int) */
+-10: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_listen), %g1
+- jmpl %g1 + %lo(sys_listen), %g0
+-11: ldswa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+- nop
+-do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
+-12: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_accept), %g1
+-13: lduwa [%o1 + 0x8] %asi, %o2
+- jmpl %g1 + %lo(sys_accept), %g0
+-14: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+-do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
+-15: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_getsockname), %g1
+-16: lduwa [%o1 + 0x8] %asi, %o2
+- jmpl %g1 + %lo(sys_getsockname), %g0
+-17: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+-do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
+-18: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_getpeername), %g1
+-19: lduwa [%o1 + 0x8] %asi, %o2
+- jmpl %g1 + %lo(sys_getpeername), %g0
+-20: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+-do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
+-21: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_socketpair), %g1
+-22: ldswa [%o1 + 0x8] %asi, %o2
+-23: lduwa [%o1 + 0xc] %asi, %o3
+- jmpl %g1 + %lo(sys_socketpair), %g0
+-24: ldswa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+-do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
+-25: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_send), %g1
+-26: lduwa [%o1 + 0x8] %asi, %o2
+-27: lduwa [%o1 + 0xc] %asi, %o3
+- jmpl %g1 + %lo(sys_send), %g0
+-28: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+-do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
+-29: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_recv), %g1
+-30: lduwa [%o1 + 0x8] %asi, %o2
+-31: lduwa [%o1 + 0xc] %asi, %o3
+- jmpl %g1 + %lo(sys_recv), %g0
+-32: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+-do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
+-33: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_sendto), %g1
+-34: lduwa [%o1 + 0x8] %asi, %o2
+-35: lduwa [%o1 + 0xc] %asi, %o3
+-36: lduwa [%o1 + 0x10] %asi, %o4
+-37: ldswa [%o1 + 0x14] %asi, %o5
+- jmpl %g1 + %lo(sys_sendto), %g0
+-38: lduwa [%o1 + 0x4] %asi, %o1
+-do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
+-39: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_recvfrom), %g1
+-40: lduwa [%o1 + 0x8] %asi, %o2
+-41: lduwa [%o1 + 0xc] %asi, %o3
+-42: lduwa [%o1 + 0x10] %asi, %o4
+-43: lduwa [%o1 + 0x14] %asi, %o5
+- jmpl %g1 + %lo(sys_recvfrom), %g0
+-44: lduwa [%o1 + 0x4] %asi, %o1
+-do_sys_shutdown: /* sys_shutdown(int, int) */
+-45: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_shutdown), %g1
+- jmpl %g1 + %lo(sys_shutdown), %g0
+-46: ldswa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+- nop
+-do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */
+-47: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_setsockopt), %g1
+-48: ldswa [%o1 + 0x8] %asi, %o2
+-49: lduwa [%o1 + 0xc] %asi, %o3
+-50: ldswa [%o1 + 0x10] %asi, %o4
+- jmpl %g1 + %lo(sys_setsockopt), %g0
+-51: ldswa [%o1 + 0x4] %asi, %o1
+- nop
+-do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */
+-52: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_getsockopt), %g1
+-53: ldswa [%o1 + 0x8] %asi, %o2
+-54: lduwa [%o1 + 0xc] %asi, %o3
+-55: lduwa [%o1 + 0x10] %asi, %o4
+- jmpl %g1 + %lo(sys_getsockopt), %g0
+-56: ldswa [%o1 + 0x4] %asi, %o1
+- nop
+-do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
+-57: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(compat_sys_sendmsg), %g1
+-58: lduwa [%o1 + 0x8] %asi, %o2
+- jmpl %g1 + %lo(compat_sys_sendmsg), %g0
+-59: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+-do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
+-60: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(compat_sys_recvmsg), %g1
+-61: lduwa [%o1 + 0x8] %asi, %o2
+- jmpl %g1 + %lo(compat_sys_recvmsg), %g0
+-62: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+- nop
+-do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
+-63: ldswa [%o1 + 0x0] %asi, %o0
+- sethi %hi(sys_accept4), %g1
+-64: lduwa [%o1 + 0x8] %asi, %o2
+-65: ldswa [%o1 + 0xc] %asi, %o3
+- jmpl %g1 + %lo(sys_accept4), %g0
+-66: lduwa [%o1 + 0x4] %asi, %o1
+- nop
+- nop
+-
+- .section __ex_table,"a"
+- .align 4
+- .word 1b, __retl_efault, 2b, __retl_efault
+- .word 3b, __retl_efault, 4b, __retl_efault
+- .word 5b, __retl_efault, 6b, __retl_efault
+- .word 7b, __retl_efault, 8b, __retl_efault
+- .word 9b, __retl_efault, 10b, __retl_efault
+- .word 11b, __retl_efault, 12b, __retl_efault
+- .word 13b, __retl_efault, 14b, __retl_efault
+- .word 15b, __retl_efault, 16b, __retl_efault
+- .word 17b, __retl_efault, 18b, __retl_efault
+- .word 19b, __retl_efault, 20b, __retl_efault
+- .word 21b, __retl_efault, 22b, __retl_efault
+- .word 23b, __retl_efault, 24b, __retl_efault
+- .word 25b, __retl_efault, 26b, __retl_efault
+- .word 27b, __retl_efault, 28b, __retl_efault
+- .word 29b, __retl_efault, 30b, __retl_efault
+- .word 31b, __retl_efault, 32b, __retl_efault
+- .word 33b, __retl_efault, 34b, __retl_efault
+- .word 35b, __retl_efault, 36b, __retl_efault
+- .word 37b, __retl_efault, 38b, __retl_efault
+- .word 39b, __retl_efault, 40b, __retl_efault
+- .word 41b, __retl_efault, 42b, __retl_efault
+- .word 43b, __retl_efault, 44b, __retl_efault
+- .word 45b, __retl_efault, 46b, __retl_efault
+- .word 47b, __retl_efault, 48b, __retl_efault
+- .word 49b, __retl_efault, 50b, __retl_efault
+- .word 51b, __retl_efault, 52b, __retl_efault
+- .word 53b, __retl_efault, 54b, __retl_efault
+- .word 55b, __retl_efault, 56b, __retl_efault
+- .word 57b, __retl_efault, 58b, __retl_efault
+- .word 59b, __retl_efault, 60b, __retl_efault
+- .word 61b, __retl_efault, 62b, __retl_efault
+- .word 63b, __retl_efault, 64b, __retl_efault
+- .word 65b, __retl_efault, 66b, __retl_efault
+- .previous
+diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
+index 4ed06c71c43fb7..d0f535230ad8ba 100644
+--- a/arch/sparc/kernel/syscalls/syscall.tbl
++++ b/arch/sparc/kernel/syscalls/syscall.tbl
+@@ -117,7 +117,7 @@
+ 90 common dup2 sys_dup2
+ 91 32 setfsuid32 sys_setfsuid
+ 92 common fcntl sys_fcntl compat_sys_fcntl
+-93 common select sys_select
++93 common select sys_select compat_sys_select
+ 94 32 setfsgid32 sys_setfsgid
+ 95 common fsync sys_fsync
+ 96 common setpriority sys_setpriority
+@@ -155,7 +155,7 @@
+ 123 32 fchown sys_fchown16
+ 123 64 fchown sys_fchown
+ 124 common fchmod sys_fchmod
+-125 common recvfrom sys_recvfrom
++125 common recvfrom sys_recvfrom compat_sys_recvfrom
+ 126 32 setreuid sys_setreuid16
+ 126 64 setreuid sys_setreuid
+ 127 32 setregid sys_setregid16
+@@ -247,7 +247,7 @@
+ 204 32 readdir sys_old_readdir compat_sys_old_readdir
+ 204 64 readdir sys_nis_syscall
+ 205 common readahead sys_readahead compat_sys_readahead
+-206 common socketcall sys_socketcall sys32_socketcall
++206 common socketcall sys_socketcall compat_sys_socketcall
+ 207 common syslog sys_syslog
+ 208 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+ 209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
+@@ -461,7 +461,7 @@
+ 412 32 utimensat_time64 sys_utimensat sys_utimensat
+ 413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+ 414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+-416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
++416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+ 417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+ 418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+ 419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 08ffd17d5ec340..523a6e5ee92519 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)
+
+ /* Now allocate error trap reporting scoreboard. */
+ sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
+- for (order = 0; order <= MAX_ORDER; order++) {
++ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ if ((PAGE_SIZE << order) >= sz)
+ break;
+ }
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index f83017992eaaeb..d7db4e737218c2 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -1665,7 +1665,7 @@ bool kern_addr_valid(unsigned long addr)
+ if (pud_none(*pud))
+ return false;
+
+- if (pud_large(*pud))
++ if (pud_leaf(*pud))
+ return pfn_valid(pud_pfn(*pud));
+
+ pmd = pmd_offset(pud, addr);
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index b44d79d778c718..ef69127d7e5e8b 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -249,6 +249,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ {
+ pmd_t old, entry;
+
++ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
+ old = pmdp_establish(vma, address, pmdp, entry);
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
+index 103aa910431856..f7b8a1a865b8fe 100644
+--- a/arch/sparc/prom/init_64.c
++++ b/arch/sparc/prom/init_64.c
+@@ -26,9 +26,6 @@ phandle prom_chosen_node;
+ * routines in the prom library.
+ * It gets passed the pointer to the PROM vector.
+ */
+-
+-extern void prom_cif_init(void *);
+-
+ void __init prom_init(void *cif_handler)
+ {
+ phandle node;
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
+index 889aa602f8d860..51c3f984bbf728 100644
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -49,7 +49,7 @@ void p1275_cmd_direct(unsigned long *args)
+ local_irq_restore(flags);
+ }
+
+-void prom_cif_init(void *cif_handler, void *cif_stack)
++void prom_cif_init(void *cif_handler)
+ {
+ p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+ }
+diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
+index 77d7b9032158c7..d08c3a0443f3a7 100644
+--- a/arch/sparc/vdso/Makefile
++++ b/arch/sparc/vdso/Makefile
+@@ -116,30 +116,3 @@ quiet_cmd_vdso = VDSO $@
+
+ VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic
+ GCOV_PROFILE := n
+-
+-#
+-# Install the unstripped copies of vdso*.so. If our toolchain supports
+-# build-id, install .build-id links as well.
+-#
+-quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
+-define cmd_vdso_install
+- cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
+- if readelf -n $< |grep -q 'Build ID'; then \
+- buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+- first=`echo $$buildid | cut -b-2`; \
+- last=`echo $$buildid | cut -b3-`; \
+- mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+- ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+- fi
+-endef
+-
+-vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
+-
+-$(MODLIB)/vdso: FORCE
+- @mkdir -p $(MODLIB)/vdso
+-
+-$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
+- $(call cmd,vdso_install)
+-
+-PHONY += vdso_install $(vdso_img_insttargets)
+-vdso_install: $(vdso_img_insttargets) FORCE
+diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
+index 136c78f28f8ba2..1bbf4335de4540 100644
+--- a/arch/sparc/vdso/vma.c
++++ b/arch/sparc/vdso/vma.c
+@@ -449,9 +449,8 @@ static __init int vdso_setup(char *s)
+ unsigned long val;
+
+ err = kstrtoul(s, 10, &val);
+- if (err)
+- return err;
+- vdso_enabled = val;
+- return 0;
++ if (!err)
++ vdso_enabled = val;
++ return 1;
+ }
+ __setup("vdso=", vdso_setup);
+diff --git a/arch/sparc/video/Makefile b/arch/sparc/video/Makefile
+index 6baddbd58e4db3..d4d83f1702c61f 100644
+--- a/arch/sparc/video/Makefile
++++ b/arch/sparc/video/Makefile
+@@ -1,3 +1,3 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+
+-obj-$(CONFIG_FB) += fbdev.o
++obj-$(CONFIG_FB_CORE) += fbdev.o
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 82f05f25063480..34957dcb88b9c3 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -115,7 +115,9 @@ archprepare:
+ $(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
+
+ LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
+-LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
++ifdef CONFIG_LD_SCRIPT_DYN
++LINK-$(call gcc-min-version, 60100)$(CONFIG_CC_IS_CLANG) += -no-pie
++endif
+ LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
+
+ CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index b98545f3edb503..2ba4e0d4e26b06 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ parse_chan_pair(NULL, line, n, opts, error_out);
+ err = 0;
+ }
++ *error_out = "configured as 'none'";
+ } else {
+ char *new = kstrdup(init, GFP_KERNEL);
+ if (!new) {
+@@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ }
+ }
+ if (err) {
++ *error_out = "failed to parse channel pair";
+ line->init_str = NULL;
+ line->valid = 0;
+ kfree(new);
+@@ -673,24 +675,26 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
+ goto cleanup;
+ }
+
+- *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list),
+- .fd = fd,
++ *winch = ((struct winch) { .fd = fd,
+ .tty_fd = tty_fd,
+ .pid = pid,
+ .port = port,
+ .stack = stack });
+
++ spin_lock(&winch_handler_lock);
++ list_add(&winch->list, &winch_handlers);
++ spin_unlock(&winch_handler_lock);
++
+ if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
+ IRQF_SHARED, "winch", winch) < 0) {
+ printk(KERN_ERR "register_winch_irq - failed to register "
+ "IRQ\n");
++ spin_lock(&winch_handler_lock);
++ list_del(&winch->list);
++ spin_unlock(&winch_handler_lock);
+ goto out_free;
+ }
+
+- spin_lock(&winch_handler_lock);
+- list_add(&winch->list, &winch_handlers);
+- spin_unlock(&winch_handler_lock);
+-
+ return;
+
+ out_free:
+diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
+index 3d7836c4650701..cabcc501b448a3 100644
+--- a/arch/um/drivers/net_kern.c
++++ b/arch/um/drivers/net_kern.c
+@@ -204,7 +204,7 @@ static int uml_net_close(struct net_device *dev)
+ return 0;
+ }
+
+-static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct uml_net_private *lp = netdev_priv(dev);
+ unsigned long flags;
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index 50206feac577d5..ef7b4b911a455a 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -456,43 +456,31 @@ static int bulk_req_safe_read(
+ return n;
+ }
+
+-/* Called without dev->lock held, and only in interrupt context. */
+-static void ubd_handler(void)
++static void ubd_end_request(struct io_thread_req *io_req)
+ {
+- int n;
+- int count;
+-
+- while(1){
+- n = bulk_req_safe_read(
+- thread_fd,
+- irq_req_buffer,
+- &irq_remainder,
+- &irq_remainder_size,
+- UBD_REQ_BUFFER_SIZE
+- );
+- if (n < 0) {
+- if(n == -EAGAIN)
+- break;
+- printk(KERN_ERR "spurious interrupt in ubd_handler, "
+- "err = %d\n", -n);
+- return;
+- }
+- for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
+- struct io_thread_req *io_req = (*irq_req_buffer)[count];
+-
+- if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
+- blk_queue_max_discard_sectors(io_req->req->q, 0);
+- blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
+- }
+- blk_mq_end_request(io_req->req, io_req->error);
+- kfree(io_req);
+- }
++ if (io_req->error == BLK_STS_NOTSUPP) {
++ if (req_op(io_req->req) == REQ_OP_DISCARD)
++ blk_queue_max_discard_sectors(io_req->req->q, 0);
++ else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES)
++ blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
+ }
++ blk_mq_end_request(io_req->req, io_req->error);
++ kfree(io_req);
+ }
+
+ static irqreturn_t ubd_intr(int irq, void *dev)
+ {
+- ubd_handler();
++ int len, i;
++
++ while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer,
++ &irq_remainder, &irq_remainder_size,
++ UBD_REQ_BUFFER_SIZE)) >= 0) {
++ for (i = 0; i < len / sizeof(struct io_thread_req *); i++)
++ ubd_end_request((*irq_req_buffer)[i]);
++ }
++
++ if (len < 0 && len != -EAGAIN)
++ pr_err("spurious interrupt in %s, err = %d\n", __func__, len);
+ return IRQ_HANDLED;
+ }
+
+@@ -1099,7 +1087,7 @@ static int __init ubd_init(void)
+
+ if (irq_req_buffer == NULL) {
+ printk(KERN_ERR "Failed to initialize ubd buffering\n");
+- return -1;
++ return -ENOMEM;
+ }
+ io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
+ sizeof(struct io_thread_req *),
+@@ -1110,7 +1098,7 @@ static int __init ubd_init(void)
+
+ if (io_req_buffer == NULL) {
+ printk(KERN_ERR "Failed to initialize ubd buffering\n");
+- return -1;
++ return -ENOMEM;
+ }
+ platform_driver_register(&ubd_driver);
+ mutex_lock(&ubd_lock);
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index 131b7cb2957672..94a4dfac6c2368 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -141,7 +141,7 @@ static bool get_bpf_flash(struct arglist *def)
+
+ if (allow != NULL) {
+ if (kstrtoul(allow, 10, &result) == 0)
+- return (allow > 0);
++ return result > 0;
+ }
+ return false;
+ }
+diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
+index ffe2ee8a02465b..97a37c06299721 100644
+--- a/arch/um/drivers/virt-pci.c
++++ b/arch/um/drivers/virt-pci.c
+@@ -971,7 +971,7 @@ static long um_pci_map_platform(unsigned long offset, size_t size,
+ *ops = &um_pci_device_bar_ops;
+ *priv = &um_pci_platform_device->resptr[0];
+
+- return 0;
++ return offset;
+ }
+
+ static const struct logic_iomem_region_ops um_pci_platform_ops = {
+diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h
+index 4b6d1b526bc121..66fe06db872f05 100644
+--- a/arch/um/include/asm/cpufeature.h
++++ b/arch/um/include/asm/cpufeature.h
+@@ -75,7 +75,7 @@ extern void setup_clear_cpu_cap(unsigned int bit);
+ */
+ static __always_inline bool _static_cpu_has(u16 bit)
+ {
+- asm_volatile_goto("1: jmp 6f\n"
++ asm goto("1: jmp 6f\n"
+ "2:\n"
+ ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+ "((5f-4f) - (2b-1b)),0x90\n"
+diff --git a/arch/um/include/asm/kasan.h b/arch/um/include/asm/kasan.h
+index 0d6547f4ec85c4..f97bb1f7b8514a 100644
+--- a/arch/um/include/asm/kasan.h
++++ b/arch/um/include/asm/kasan.h
+@@ -24,7 +24,6 @@
+
+ #ifdef CONFIG_KASAN
+ void kasan_init(void);
+-void kasan_map_memory(void *start, unsigned long len);
+ extern int kasan_um_is_ready;
+
+ #ifdef CONFIG_STATIC_LINK
+diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h
+index 5b072aba5b658f..a7cb380c0b5c07 100644
+--- a/arch/um/include/asm/mmu.h
++++ b/arch/um/include/asm/mmu.h
+@@ -15,8 +15,6 @@ typedef struct mm_context {
+ struct page *stub_pages[2];
+ } mm_context_t;
+
+-extern void __switch_mm(struct mm_id * mm_idp);
+-
+ /* Avoid tangled inclusion with asm/ldt.h */
+ extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
+ extern void free_ldt(struct mm_context *mm);
+diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
+index 7414154b8e9aea..d34169883dbf0a 100644
+--- a/arch/um/include/asm/processor-generic.h
++++ b/arch/um/include/asm/processor-generic.h
+@@ -95,7 +95,6 @@ extern struct cpuinfo_um boot_cpu_data;
+ #define current_cpu_data boot_cpu_data
+ #define cache_line_size() (boot_cpu_data.cache_alignment)
+
+-extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
+ #define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
+ extern unsigned long __get_wchan(struct task_struct *p);
+
+diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
+index d8b8b4f07e429d..7372746c168757 100644
+--- a/arch/um/include/shared/kern_util.h
++++ b/arch/um/include/shared/kern_util.h
+@@ -50,7 +50,7 @@ extern void do_uml_exitcalls(void);
+ * Are we disallowed to sleep? Used to choose between GFP_KERNEL and
+ * GFP_ATOMIC.
+ */
+-extern int __cant_sleep(void);
++extern int __uml_cant_sleep(void);
+ extern int get_current_pid(void);
+ extern int copy_from_user_proc(void *to, void *from, int size);
+ extern char *uml_strdup(const char *string);
+@@ -67,4 +67,6 @@ extern void fatal_sigsegv(void) __attribute__ ((noreturn));
+
+ void um_idle_sleep(void);
+
++void kasan_map_memory(void *start, size_t len);
++
+ #endif
+diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
+index e82e203f5f4194..92dbf727e3842a 100644
+--- a/arch/um/include/shared/skas/mm_id.h
++++ b/arch/um/include/shared/skas/mm_id.h
+@@ -15,4 +15,6 @@ struct mm_id {
+ int kill;
+ };
+
++void __switch_mm(struct mm_id *mm_idp);
++
+ #endif
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 106b7da2f8d6f7..6daffb9d8a8d74 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -220,7 +220,7 @@ void arch_cpu_idle(void)
+ um_idle_sleep();
+ }
+
+-int __cant_sleep(void) {
++int __uml_cant_sleep(void) {
+ return in_atomic() || irqs_disabled() || in_interrupt();
+ /* Is in_interrupt() really needed? */
+ }
+diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
+index fddd1dec27e6d3..c8c4ef94c753f1 100644
+--- a/arch/um/kernel/time.c
++++ b/arch/um/kernel/time.c
+@@ -432,9 +432,29 @@ static void time_travel_update_time(unsigned long long next, bool idle)
+ time_travel_del_event(&ne);
+ }
+
++static void time_travel_update_time_rel(unsigned long long offs)
++{
++ unsigned long flags;
++
++ /*
++ * Disable interrupts before calculating the new time so
++ * that a real timer interrupt (signal) can't happen at
++ * a bad time e.g. after we read time_travel_time but
++ * before we've completed updating the time.
++ */
++ local_irq_save(flags);
++ time_travel_update_time(time_travel_time + offs, false);
++ local_irq_restore(flags);
++}
++
+ void time_travel_ndelay(unsigned long nsec)
+ {
+- time_travel_update_time(time_travel_time + nsec, false);
++ /*
++ * Not strictly needed to use _rel() version since this is
++ * only used in INFCPU/EXT modes, but it doesn't hurt and
++ * is more readable too.
++ */
++ time_travel_update_time_rel(nsec);
+ }
+ EXPORT_SYMBOL(time_travel_ndelay);
+
+@@ -568,7 +588,11 @@ static void time_travel_set_start(void)
+ #define time_travel_time 0
+ #define time_travel_ext_waiting 0
+
+-static inline void time_travel_update_time(unsigned long long ns, bool retearly)
++static inline void time_travel_update_time(unsigned long long ns, bool idle)
++{
++}
++
++static inline void time_travel_update_time_rel(unsigned long long offs)
+ {
+ }
+
+@@ -720,9 +744,7 @@ static u64 timer_read(struct clocksource *cs)
+ */
+ if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
+ !time_travel_ext_waiting)
+- time_travel_update_time(time_travel_time +
+- TIMER_MULTIPLIER,
+- false);
++ time_travel_update_time_rel(TIMER_MULTIPLIER);
+ return time_travel_time / TIMER_MULTIPLIER;
+ }
+
+@@ -852,9 +874,9 @@ int setup_time_travel_start(char *str)
+ return 1;
+ }
+
+-__setup("time-travel-start", setup_time_travel_start);
++__setup("time-travel-start=", setup_time_travel_start);
+ __uml_help(setup_time_travel_start,
+-"time-travel-start=<seconds>\n"
++"time-travel-start=<nanoseconds>\n"
+ "Configure the UML instance's wall clock to start at this value rather than\n"
+ "the host's wall clock at the time of UML boot.\n");
+ #endif
+diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
+index b459745f52e248..3cb8ac63be6ed9 100644
+--- a/arch/um/os-Linux/helper.c
++++ b/arch/um/os-Linux/helper.c
+@@ -46,7 +46,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
+ unsigned long stack, sp;
+ int pid, fds[2], ret, n;
+
+- stack = alloc_stack(0, __cant_sleep());
++ stack = alloc_stack(0, __uml_cant_sleep());
+ if (stack == 0)
+ return -ENOMEM;
+
+@@ -70,7 +70,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
+ data.pre_data = pre_data;
+ data.argv = argv;
+ data.fd = fds[1];
+- data.buf = __cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
++ data.buf = __uml_cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
+ uml_kmalloc(PATH_MAX, UM_GFP_KERNEL);
+ pid = clone(helper_child, (void *) sp, CLONE_VM, &data);
+ if (pid < 0) {
+@@ -121,7 +121,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
+ unsigned long stack, sp;
+ int pid, status, err;
+
+- stack = alloc_stack(0, __cant_sleep());
++ stack = alloc_stack(0, __uml_cant_sleep());
+ if (stack == 0)
+ return -ENOMEM;
+
+diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
+index 8530b2e086049b..c6c9495b143212 100644
+--- a/arch/um/os-Linux/mem.c
++++ b/arch/um/os-Linux/mem.c
+@@ -15,6 +15,7 @@
+ #include <sys/vfs.h>
+ #include <linux/magic.h>
+ #include <init.h>
++#include <kern_util.h>
+ #include <os.h>
+
+ /*
+diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
+index 24a403a70a0201..850d21e6473eec 100644
+--- a/arch/um/os-Linux/signal.c
++++ b/arch/um/os-Linux/signal.c
+@@ -8,6 +8,7 @@
+
+ #include <stdlib.h>
+ #include <stdarg.h>
++#include <stdbool.h>
+ #include <errno.h>
+ #include <signal.h>
+ #include <string.h>
+@@ -65,9 +66,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
+
+ int signals_enabled;
+ #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
+-static int signals_blocked;
+-#else
+-#define signals_blocked 0
++static int signals_blocked, signals_blocked_pending;
+ #endif
+ static unsigned int signals_pending;
+ static unsigned int signals_active = 0;
+@@ -76,14 +75,27 @@ void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
+ {
+ int enabled = signals_enabled;
+
+- if ((signals_blocked || !enabled) && (sig == SIGIO)) {
++#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
++ if ((signals_blocked ||
++ __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) &&
++ (sig == SIGIO)) {
++ /* increment so unblock will do another round */
++ __atomic_add_fetch(&signals_blocked_pending, 1,
++ __ATOMIC_SEQ_CST);
++ return;
++ }
++#endif
++
++ if (!enabled && (sig == SIGIO)) {
+ /*
+ * In TT_MODE_EXTERNAL, need to still call time-travel
+- * handlers unless signals are also blocked for the
+- * external time message processing. This will mark
+- * signals_pending by itself (only if necessary.)
++ * handlers. This will mark signals_pending by itself
++ * (only if necessary.)
++ * Note we won't get here if signals are hard-blocked
++ * (which is handled above), in that case the hard-
++ * unblock will handle things.
+ */
+- if (!signals_blocked && time_travel_mode == TT_MODE_EXTERNAL)
++ if (time_travel_mode == TT_MODE_EXTERNAL)
+ sigio_run_timetravel_handlers();
+ else
+ signals_pending |= SIGIO_MASK;
+@@ -380,33 +392,99 @@ int um_set_signals_trace(int enable)
+ #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT
+ void mark_sigio_pending(void)
+ {
++ /*
++ * It would seem that this should be atomic so
++ * it isn't a read-modify-write with a signal
++ * that could happen in the middle, losing the
++ * value set by the signal.
++ *
++ * However, this function is only called when in
++ * time-travel=ext simulation mode, in which case
++ * the only signal ever pending is SIGIO, which
++ * is blocked while this can be called, and the
++ * timer signal (SIGALRM) cannot happen.
++ */
+ signals_pending |= SIGIO_MASK;
+ }
+
+ void block_signals_hard(void)
+ {
+- if (signals_blocked)
+- return;
+- signals_blocked = 1;
++ signals_blocked++;
+ barrier();
+ }
+
+ void unblock_signals_hard(void)
+ {
++ static bool unblocking;
++
+ if (!signals_blocked)
++ panic("unblocking signals while not blocked");
++
++ if (--signals_blocked)
+ return;
+- /* Must be set to 0 before we check the pending bits etc. */
+- signals_blocked = 0;
++ /*
++ * Must be set to 0 before we check pending so the
++ * SIGIO handler will run as normal unless we're still
++ * going to process signals_blocked_pending.
++ */
+ barrier();
+
+- if (signals_pending && signals_enabled) {
+- /* this is a bit inefficient, but that's not really important */
+- block_signals();
+- unblock_signals();
+- } else if (signals_pending & SIGIO_MASK) {
+- /* we need to run time-travel handlers even if not enabled */
+- sigio_run_timetravel_handlers();
++ /*
++ * Note that block_signals_hard()/unblock_signals_hard() can be called
++ * within the unblock_signals()/sigio_run_timetravel_handlers() below.
++ * This would still be prone to race conditions since it's actually a
++ * call _within_ e.g. vu_req_read_message(), where we observed this
++ * issue, which loops. Thus, if the inner call handles the recorded
++ * pending signals, we can get out of the inner call with the real
++ * signal hander no longer blocked, and still have a race. Thus don't
++ * handle unblocking in the inner call, if it happens, but only in
++ * the outermost call - 'unblocking' serves as an ownership for the
++ * signals_blocked_pending decrement.
++ */
++ if (unblocking)
++ return;
++ unblocking = true;
++
++ while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) {
++ if (signals_enabled) {
++ /* signals are enabled so we can touch this */
++ signals_pending |= SIGIO_MASK;
++ /*
++ * this is a bit inefficient, but that's
++ * not really important
++ */
++ block_signals();
++ unblock_signals();
++ } else {
++ /*
++ * we need to run time-travel handlers even
++ * if not enabled
++ */
++ sigio_run_timetravel_handlers();
++ }
++
++ /*
++ * The decrement of signals_blocked_pending must be atomic so
++ * that the signal handler will either happen before or after
++ * the decrement, not during a read-modify-write:
++ * - If it happens before, it can increment it and we'll
++ * decrement it and do another round in the loop.
++ * - If it happens after it'll see 0 for both signals_blocked
++ * and signals_blocked_pending and thus run the handler as
++ * usual (subject to signals_enabled, but that's unrelated.)
++ *
++ * Note that a call to unblock_signals_hard() within the calls
++ * to unblock_signals() or sigio_run_timetravel_handlers() above
++ * will do nothing due to the 'unblocking' state, so this cannot
++ * underflow as the only one decrementing will be the outermost
++ * one.
++ */
++ if (__atomic_sub_fetch(&signals_blocked_pending, 1,
++ __ATOMIC_SEQ_CST) < 0)
++ panic("signals_blocked_pending underflow");
+ }
++
++ unblocking = false;
+ }
+ #endif
+
+diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
+index fc0f2a9dee5af9..1dca4ffbd572f7 100644
+--- a/arch/um/os-Linux/util.c
++++ b/arch/um/os-Linux/util.c
+@@ -173,23 +173,38 @@ __uml_setup("quiet", quiet_cmd_param,
+ "quiet\n"
+ " Turns off information messages during boot.\n\n");
+
++/*
++ * The os_info/os_warn functions will be called by helper threads. These
++ * have a very limited stack size and using the libc formatting functions
++ * may overflow the stack.
++ * So pull in the kernel vscnprintf and use that instead with a fixed
++ * on-stack buffer.
++ */
++int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
++
+ void os_info(const char *fmt, ...)
+ {
++ char buf[256];
+ va_list list;
++ int len;
+
+ if (quiet_info)
+ return;
+
+ va_start(list, fmt);
+- vfprintf(stderr, fmt, list);
++ len = vscnprintf(buf, sizeof(buf), fmt, list);
++ fwrite(buf, len, 1, stderr);
+ va_end(list);
+ }
+
+ void os_warn(const char *fmt, ...)
+ {
++ char buf[256];
+ va_list list;
++ int len;
+
+ va_start(list, fmt);
+- vfprintf(stderr, fmt, list);
++ len = vscnprintf(buf, sizeof(buf), fmt, list);
++ fwrite(buf, len, 1, stderr);
+ va_end(list);
+ }
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 66bfabae881491..82d12c93feabe6 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -62,6 +62,7 @@ config X86
+ select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+ select ARCH_32BIT_OFF_T if X86_32
+ select ARCH_CLOCKSOURCE_INIT
++ select ARCH_CONFIGURES_CPU_MITIGATIONS
+ select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
+ select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64
+@@ -1514,19 +1515,6 @@ config AMD_MEM_ENCRYPT
+ This requires an AMD processor that supports Secure Memory
+ Encryption (SME).
+
+-config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
+- bool "Activate AMD Secure Memory Encryption (SME) by default"
+- depends on AMD_MEM_ENCRYPT
+- help
+- Say yes to have system memory encrypted by default if running on
+- an AMD processor that supports Secure Memory Encryption (SME).
+-
+- If set to Y, then the encryption of system memory can be
+- deactivated with the mem_encrypt=off command line option.
+-
+- If set to N, then the encryption of system memory can be
+- activated with the mem_encrypt=on command line option.
+-
+ # Common NUMA Features
+ config NUMA
+ bool "NUMA Memory Allocation and Scheduler Support"
+@@ -2034,7 +2022,7 @@ config ARCH_SUPPORTS_KEXEC
+ def_bool y
+
+ config ARCH_SUPPORTS_KEXEC_FILE
+- def_bool X86_64 && CRYPTO && CRYPTO_SHA256
++ def_bool X86_64
+
+ config ARCH_SELECTS_KEXEC_FILE
+ def_bool y
+@@ -2042,7 +2030,7 @@ config ARCH_SELECTS_KEXEC_FILE
+ select HAVE_IMA_KEXEC if IMA
+
+ config ARCH_SUPPORTS_KEXEC_PURGATORY
+- def_bool KEXEC_FILE
++ def_bool y
+
+ config ARCH_SUPPORTS_KEXEC_SIG
+ def_bool y
+@@ -2434,17 +2422,21 @@ config PREFIX_SYMBOLS
+ def_bool y
+ depends on CALL_PADDING && !CFI_CLANG
+
+-menuconfig SPECULATION_MITIGATIONS
+- bool "Mitigations for speculative execution vulnerabilities"
++menuconfig CPU_MITIGATIONS
++ bool "Mitigations for CPU vulnerabilities"
+ default y
+ help
+- Say Y here to enable options which enable mitigations for
+- speculative execution hardware vulnerabilities.
++ Say Y here to enable options which enable mitigations for hardware
++ vulnerabilities (usually related to speculative execution).
++ Mitigations can be disabled or restricted to SMT systems at runtime
++ via the "mitigations" kernel parameter.
+
+- If you say N, all mitigations will be disabled. You really
+- should know what you are doing to say so.
++ If you say N, all mitigations will be disabled. This CANNOT be
++ overridden at runtime.
+
+-if SPECULATION_MITIGATIONS
++ Say 'Y', unless you really know what you are doing.
++
++if CPU_MITIGATIONS
+
+ config PAGE_TABLE_ISOLATION
+ bool "Remove the kernel mapping in user mode"
+@@ -2568,6 +2560,27 @@ config GDS_FORCE_MITIGATION
+
+ If in doubt, say N.
+
++config MITIGATION_RFDS
++ bool "RFDS Mitigation"
++ depends on CPU_SUP_INTEL
++ default y
++ help
++ Enable mitigation for Register File Data Sampling (RFDS) by default.
++ RFDS is a hardware vulnerability which affects Intel Atom CPUs. It
++ allows unprivileged speculative access to stale data previously
++ stored in floating point, vector and integer registers.
++ See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
++
++config MITIGATION_SPECTRE_BHI
++ bool "Mitigate Spectre-BHB (Branch History Injection)"
++ depends on CPU_SUP_INTEL
++ default y
++ help
++ Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
++ where the branch history buffer is poisoned to speculatively steer
++ indirect branches.
++ See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
++
+ endif
+
+ config ARCH_HAS_ADD_PAGES
+diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler
+index 8ad41da301e53c..16d0b022d6fff6 100644
+--- a/arch/x86/Kconfig.assembler
++++ b/arch/x86/Kconfig.assembler
+@@ -26,6 +26,6 @@ config AS_GFNI
+ Supported by binutils >= 2.30 and LLVM integrated assembler
+
+ config AS_WRUSS
+- def_bool $(as-instr,wrussq %rax$(comma)(%rbx))
++ def_bool $(as-instr64,wrussq %rax$(comma)(%rbx))
+ help
+ Supported by binutils >= 2.31 and LLVM integrated assembler
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 00468adf180f1d..87396575cfa777 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -375,7 +375,7 @@ config X86_CMOV
+ config X86_MINIMUM_CPU_FAMILY
+ int
+ default "64" if X86_64
+- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
++ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
+ default "5" if X86_32 && X86_CMPXCHG64
+ default "4"
+
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index c5d614d28a7599..74777a97e394aa 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -248,6 +248,7 @@ config UNWINDER_ORC
+
+ config UNWINDER_FRAME_POINTER
+ bool "Frame pointer unwinder"
++ select ARCH_WANT_FRAME_POINTERS
+ select FRAME_POINTER
+ help
+ This option enables the frame pointer unwinder for unwinding kernel
+@@ -271,7 +272,3 @@ config UNWINDER_GUESS
+ overhead.
+
+ endchoice
+-
+-config FRAME_POINTER
+- depends on !UNWINDER_ORC && !UNWINDER_GUESS
+- bool
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 5bfe5caaa444b3..3ff53a2d4ff084 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -291,9 +291,10 @@ PHONY += install
+ install:
+ $(call cmd,install)
+
+-PHONY += vdso_install
+-vdso_install:
+- $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
++vdso-install-$(CONFIG_X86_64) += arch/x86/entry/vdso/vdso64.so.dbg
++vdso-install-$(CONFIG_X86_X32_ABI) += arch/x86/entry/vdso/vdsox32.so.dbg
++vdso-install-$(CONFIG_X86_32) += arch/x86/entry/vdso/vdso32.so.dbg
++vdso-install-$(CONFIG_IA32_EMULATION) += arch/x86/entry/vdso/vdso32.so.dbg
+
+ archprepare: checkbin
+ checkbin:
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index f33e45ed143765..3cece19b74732f 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -89,7 +89,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
+
+ SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
+
+-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|efi32_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
++sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|efi.._stub_entry\|efi\(32\)\?_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|_e\?data\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+
+ quiet_cmd_zoffset = ZOFFSET $@
+ cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 71fc531b95b4ee..658e9ec065c476 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -84,7 +84,7 @@ LDFLAGS_vmlinux += -T
+ hostprogs := mkpiggy
+ HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+
+-sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
++sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
+
+ quiet_cmd_voffset = VOFFSET $@
+ cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
+@@ -116,9 +116,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
+
+ vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
+ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
+-vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
++vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+-$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
++$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
+ $(call if_changed,ld)
+
+ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
+diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
+index f4e22ef774ab6b..876fc6d46a1318 100644
+--- a/arch/x86/boot/compressed/efi_mixed.S
++++ b/arch/x86/boot/compressed/efi_mixed.S
+@@ -15,10 +15,12 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/asm-offsets.h>
+ #include <asm/msr.h>
+ #include <asm/page_types.h>
+ #include <asm/processor-flags.h>
+ #include <asm/segment.h>
++#include <asm/setup.h>
+
+ .code64
+ .text
+@@ -49,6 +51,11 @@ SYM_FUNC_START(startup_64_mixed_mode)
+ lea efi32_boot_args(%rip), %rdx
+ mov 0(%rdx), %edi
+ mov 4(%rdx), %esi
++
++ /* Switch to the firmware's stack */
++ movl efi32_boot_sp(%rip), %esp
++ andl $~7, %esp
++
+ #ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ mov 8(%rdx), %edx // saved bootparams pointer
+ test %edx, %edx
+@@ -144,6 +151,7 @@ SYM_FUNC_END(__efi64_thunk)
+ SYM_FUNC_START(efi32_stub_entry)
+ call 1f
+ 1: popl %ecx
++ leal (efi32_boot_args - 1b)(%ecx), %ebx
+
+ /* Clear BSS */
+ xorl %eax, %eax
+@@ -158,6 +166,7 @@ SYM_FUNC_START(efi32_stub_entry)
+ popl %ecx
+ popl %edx
+ popl %esi
++ movl %esi, 8(%ebx)
+ jmp efi32_entry
+ SYM_FUNC_END(efi32_stub_entry)
+ #endif
+@@ -234,8 +243,6 @@ SYM_FUNC_END(efi_enter32)
+ *
+ * Arguments: %ecx image handle
+ * %edx EFI system table pointer
+- * %esi struct bootparams pointer (or NULL when not using
+- * the EFI handover protocol)
+ *
+ * Since this is the point of no return for ordinary execution, no registers
+ * are considered live except for the function parameters. [Note that the EFI
+@@ -254,13 +261,25 @@ SYM_FUNC_START_LOCAL(efi32_entry)
+ /* Store firmware IDT descriptor */
+ sidtl (efi32_boot_idt - 1b)(%ebx)
+
++ /* Store firmware stack pointer */
++ movl %esp, (efi32_boot_sp - 1b)(%ebx)
++
+ /* Store boot arguments */
+ leal (efi32_boot_args - 1b)(%ebx), %ebx
+ movl %ecx, 0(%ebx)
+ movl %edx, 4(%ebx)
+- movl %esi, 8(%ebx)
+ movb $0x0, 12(%ebx) // efi_is64
+
++ /*
++ * Allocate some memory for a temporary struct boot_params, which only
++ * needs the minimal pieces that startup_32() relies on.
++ */
++ subl $PARAM_SIZE, %esp
++ movl %esp, %esi
++ movl $PAGE_SIZE, BP_kernel_alignment(%esi)
++ movl $_end - 1b, BP_init_size(%esi)
++ subl $startup_32 - 1b, BP_init_size(%esi)
++
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+@@ -286,8 +305,7 @@ SYM_FUNC_START(efi32_pe_entry)
+
+ movl 8(%ebp), %ecx // image_handle
+ movl 12(%ebp), %edx // sys_table
+- xorl %esi, %esi
+- jmp efi32_entry // pass %ecx, %edx, %esi
++ jmp efi32_entry // pass %ecx, %edx
+ // no other registers remain live
+
+ 2: popl %edi // restore callee-save registers
+@@ -318,5 +336,6 @@ SYM_DATA_END(efi32_boot_idt)
+
+ SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
+ SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
++SYM_DATA_LOCAL(efi32_boot_sp, .long 0)
+ SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
+ SYM_DATA(efi_is64, .byte 1)
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index bf4a10a5794f1c..1dcb794c5479ed 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -398,6 +398,11 @@ SYM_CODE_START(startup_64)
+ call sev_enable
+ #endif
+
++ /* Preserve only the CR4 bits that must be preserved, and clear the rest */
++ movq %cr4, %rax
++ andl $(X86_CR4_PAE | X86_CR4_MCE | X86_CR4_LA57), %eax
++ movq %rax, %cr4
++
+ /*
+ * configure_5level_paging() updates the number of paging levels using
+ * a trampoline in 32-bit addressable memory if the current number does
+diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
+index 08f93b0401bbd3..aead80ec70a0bf 100644
+--- a/arch/x86/boot/compressed/ident_map_64.c
++++ b/arch/x86/boot/compressed/ident_map_64.c
+@@ -385,3 +385,8 @@ void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
+ */
+ kernel_add_identity_map(address, end);
+ }
++
++void do_boot_nmi_trap(struct pt_regs *regs, unsigned long error_code)
++{
++ /* Empty handler to ignore NMI during early boot */
++}
+diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
+index 3cdf94b4145674..d100284bbef47b 100644
+--- a/arch/x86/boot/compressed/idt_64.c
++++ b/arch/x86/boot/compressed/idt_64.c
+@@ -61,6 +61,7 @@ void load_stage2_idt(void)
+ boot_idt_desc.address = (unsigned long)boot_idt;
+
+ set_idt_entry(X86_TRAP_PF, boot_page_fault);
++ set_idt_entry(X86_TRAP_NMI, boot_nmi_trap);
+
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ /*
+diff --git a/arch/x86/boot/compressed/idt_handlers_64.S b/arch/x86/boot/compressed/idt_handlers_64.S
+index 22890e199f5b44..4d03c8562f637d 100644
+--- a/arch/x86/boot/compressed/idt_handlers_64.S
++++ b/arch/x86/boot/compressed/idt_handlers_64.S
+@@ -70,6 +70,7 @@ SYM_FUNC_END(\name)
+ .code64
+
+ EXCEPTION_HANDLER boot_page_fault do_boot_page_fault error_code=1
++EXCEPTION_HANDLER boot_nmi_trap do_boot_nmi_trap error_code=0
+
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ EXCEPTION_HANDLER boot_stage1_vc do_vc_no_ghcb error_code=1
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index f711f2a85862e9..b5ecbd32a46fac 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -330,6 +330,7 @@ static size_t parse_elf(void *output)
+ return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
+ }
+
++const unsigned long kernel_text_size = VO___start_rodata - VO__text;
+ const unsigned long kernel_total_size = VO__end - VO__text;
+
+ static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
+@@ -357,6 +358,19 @@ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
+ return entry;
+ }
+
++/*
++ * Set the memory encryption xloadflag based on the mem_encrypt= command line
++ * parameter, if provided.
++ */
++static void parse_mem_encrypt(struct setup_header *hdr)
++{
++ int on = cmdline_find_option_bool("mem_encrypt=on");
++ int off = cmdline_find_option_bool("mem_encrypt=off");
++
++ if (on > off)
++ hdr->xloadflags |= XLF_MEM_ENCRYPTION;
++}
++
+ /*
+ * The compressed kernel image (ZO), has been moved so that its position
+ * is against the end of the buffer used to hold the uncompressed kernel
+@@ -387,6 +401,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
+ /* Clear flags intended for solely in-kernel use. */
+ boot_params->hdr.loadflags &= ~KASLR_FLAG;
+
++ parse_mem_encrypt(&boot_params->hdr);
++
+ sanitize_boot_params(boot_params);
+
+ if (boot_params->screen_info.orig_video_mode == 7) {
+diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
+index cc70d3fb90497e..aae1a2db425103 100644
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -197,6 +197,7 @@ static inline void cleanup_exception_handling(void) { }
+
+ /* IDT Entry Points */
+ void boot_page_fault(void);
++void boot_nmi_trap(void);
+ void boot_stage1_vc(void);
+ void boot_stage2_vc(void);
+
+diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
+index 80d76aea1f7bf1..0a49218a516a28 100644
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -116,6 +116,9 @@ static bool fault_in_kernel_space(unsigned long address)
+ #undef __init
+ #define __init
+
++#undef __head
++#define __head
++
+ #define __BOOT_COMPRESSED
+
+ /* Basic instruction decoding support needed */
+diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
+index b22f34b8684a72..083ec6d7722a16 100644
+--- a/arch/x86/boot/compressed/vmlinux.lds.S
++++ b/arch/x86/boot/compressed/vmlinux.lds.S
+@@ -43,11 +43,13 @@ SECTIONS
+ *(.rodata.*)
+ _erodata = . ;
+ }
+- .data : {
++ .data : ALIGN(0x1000) {
+ _data = . ;
+ *(.data)
+ *(.data.*)
+- *(.bss.efistub)
++
++ /* Add 4 bytes of extra space for a CRC-32 checksum */
++ . = ALIGN(. + 4, 0x200);
+ _edata = . ;
+ }
+ . = ALIGN(L1_CACHE_BYTES);
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index b04ca8e2b213c6..a1bbedd989e42e 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -36,66 +36,20 @@ SYSSEG = 0x1000 /* historical load address >> 4 */
+ #define ROOT_RDONLY 1
+ #endif
+
++ .set salign, 0x1000
++ .set falign, 0x200
++
+ .code16
+ .section ".bstext", "ax"
+-
+- .global bootsect_start
+-bootsect_start:
+ #ifdef CONFIG_EFI_STUB
+ # "MZ", MS-DOS header
+ .word MZ_MAGIC
+-#endif
+-
+- # Normalize the start address
+- ljmp $BOOTSEG, $start2
+-
+-start2:
+- movw %cs, %ax
+- movw %ax, %ds
+- movw %ax, %es
+- movw %ax, %ss
+- xorw %sp, %sp
+- sti
+- cld
+-
+- movw $bugger_off_msg, %si
+-
+-msg_loop:
+- lodsb
+- andb %al, %al
+- jz bs_die
+- movb $0xe, %ah
+- movw $7, %bx
+- int $0x10
+- jmp msg_loop
+-
+-bs_die:
+- # Allow the user to press a key, then reboot
+- xorw %ax, %ax
+- int $0x16
+- int $0x19
+-
+- # int 0x19 should never return. In case it does anyway,
+- # invoke the BIOS reset code...
+- ljmp $0xf000,$0xfff0
+-
+-#ifdef CONFIG_EFI_STUB
+ .org 0x38
+ #
+ # Offset to the PE header.
+ #
+ .long LINUX_PE_MAGIC
+ .long pe_header
+-#endif /* CONFIG_EFI_STUB */
+-
+- .section ".bsdata", "a"
+-bugger_off_msg:
+- .ascii "Use a boot loader.\r\n"
+- .ascii "\n"
+- .ascii "Remove disk and press any key to reboot...\r\n"
+- .byte 0
+-
+-#ifdef CONFIG_EFI_STUB
+ pe_header:
+ .long PE_MAGIC
+
+@@ -124,30 +78,26 @@ optional_header:
+ .byte 0x02 # MajorLinkerVersion
+ .byte 0x14 # MinorLinkerVersion
+
+- # Filled in by build.c
+- .long 0 # SizeOfCode
++ .long ZO__data # SizeOfCode
+
+- .long 0 # SizeOfInitializedData
++ .long ZO__end - ZO__data # SizeOfInitializedData
+ .long 0 # SizeOfUninitializedData
+
+- # Filled in by build.c
+- .long 0x0000 # AddressOfEntryPoint
++ .long setup_size + ZO_efi_pe_entry # AddressOfEntryPoint
+
+- .long 0x0200 # BaseOfCode
++ .long setup_size # BaseOfCode
+ #ifdef CONFIG_X86_32
+ .long 0 # data
+ #endif
+
+ extra_header_fields:
+- # PE specification requires ImageBase to be 64k aligned
+- .set image_base, (LOAD_PHYSICAL_ADDR + 0xffff) & ~0xffff
+ #ifdef CONFIG_X86_32
+- .long image_base # ImageBase
++ .long 0 # ImageBase
+ #else
+- .quad image_base # ImageBase
++ .quad 0 # ImageBase
+ #endif
+- .long 0x20 # SectionAlignment
+- .long 0x20 # FileAlignment
++ .long salign # SectionAlignment
++ .long falign # FileAlignment
+ .word 0 # MajorOperatingSystemVersion
+ .word 0 # MinorOperatingSystemVersion
+ .word LINUX_EFISTUB_MAJOR_VERSION # MajorImageVersion
+@@ -156,12 +106,9 @@ extra_header_fields:
+ .word 0 # MinorSubsystemVersion
+ .long 0 # Win32VersionValue
+
+- #
+- # The size of the bzImage is written in tools/build.c
+- #
+- .long 0 # SizeOfImage
++ .long setup_size + ZO__end # SizeOfImage
+
+- .long 0x200 # SizeOfHeaders
++ .long salign # SizeOfHeaders
+ .long 0 # CheckSum
+ .word IMAGE_SUBSYSTEM_EFI_APPLICATION # Subsystem (EFI application)
+ #ifdef CONFIG_EFI_DXE_MEM_ATTRIBUTES
+@@ -192,87 +139,77 @@ extra_header_fields:
+
+ # Section table
+ section_table:
+- #
+- # The offset & size fields are filled in by build.c.
+- #
+ .ascii ".setup"
+ .byte 0
+ .byte 0
+- .long 0
+- .long 0x0 # startup_{32,64}
+- .long 0 # Size of initialized data
+- # on disk
+- .long 0x0 # startup_{32,64}
+- .long 0 # PointerToRelocations
+- .long 0 # PointerToLineNumbers
+- .word 0 # NumberOfRelocations
+- .word 0 # NumberOfLineNumbers
+- .long IMAGE_SCN_CNT_CODE | \
+- IMAGE_SCN_MEM_READ | \
+- IMAGE_SCN_MEM_EXECUTE | \
+- IMAGE_SCN_ALIGN_16BYTES # Characteristics
++ .long pecompat_fstart - salign # VirtualSize
++ .long salign # VirtualAddress
++ .long pecompat_fstart - salign # SizeOfRawData
++ .long salign # PointerToRawData
+
+- #
+- # The EFI application loader requires a relocation section
+- # because EFI applications must be relocatable. The .reloc
+- # offset & size fields are filled in by build.c.
+- #
+- .ascii ".reloc"
+- .byte 0
+- .byte 0
+- .long 0
+- .long 0
+- .long 0 # SizeOfRawData
+- .long 0 # PointerToRawData
+- .long 0 # PointerToRelocations
+- .long 0 # PointerToLineNumbers
+- .word 0 # NumberOfRelocations
+- .word 0 # NumberOfLineNumbers
++ .long 0, 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+- IMAGE_SCN_MEM_DISCARDABLE | \
+- IMAGE_SCN_ALIGN_1BYTES # Characteristics
++ IMAGE_SCN_MEM_DISCARDABLE # Characteristics
+
+ #ifdef CONFIG_EFI_MIXED
+- #
+- # The offset & size fields are filled in by build.c.
+- #
+ .asciz ".compat"
+- .long 0
+- .long 0x0
+- .long 0 # Size of initialized data
+- # on disk
+- .long 0x0
+- .long 0 # PointerToRelocations
+- .long 0 # PointerToLineNumbers
+- .word 0 # NumberOfRelocations
+- .word 0 # NumberOfLineNumbers
++
++ .long pecompat_fsize # VirtualSize
++ .long pecompat_fstart # VirtualAddress
++ .long pecompat_fsize # SizeOfRawData
++ .long pecompat_fstart # PointerToRawData
++
++ .long 0, 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+- IMAGE_SCN_MEM_DISCARDABLE | \
+- IMAGE_SCN_ALIGN_1BYTES # Characteristics
++ IMAGE_SCN_MEM_DISCARDABLE # Characteristics
++
++ /*
++ * Put the IA-32 machine type and the associated entry point address in
++ * the .compat section, so loaders can figure out which other execution
++ * modes this image supports.
++ */
++ .pushsection ".pecompat", "a", @progbits
++ .balign salign
++ .globl pecompat_fstart
++pecompat_fstart:
++ .byte 0x1 # Version
++ .byte 8 # Size
++ .word IMAGE_FILE_MACHINE_I386 # PE machine type
++ .long setup_size + ZO_efi32_pe_entry # Entrypoint
++ .byte 0x0 # Sentinel
++ .popsection
++#else
++ .set pecompat_fstart, setup_size
+ #endif
+-
+- #
+- # The offset & size fields are filled in by build.c.
+- #
+ .ascii ".text"
+ .byte 0
+ .byte 0
+ .byte 0
+- .long 0
+- .long 0x0 # startup_{32,64}
+- .long 0 # Size of initialized data
++ .long ZO__data
++ .long setup_size
++ .long ZO__data # Size of initialized data
+ # on disk
+- .long 0x0 # startup_{32,64}
++ .long setup_size
+ .long 0 # PointerToRelocations
+ .long 0 # PointerToLineNumbers
+ .word 0 # NumberOfRelocations
+ .word 0 # NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+- IMAGE_SCN_MEM_EXECUTE | \
+- IMAGE_SCN_ALIGN_16BYTES # Characteristics
++ IMAGE_SCN_MEM_EXECUTE # Characteristics
++
++ .ascii ".data\0\0\0"
++ .long ZO__end - ZO__data # VirtualSize
++ .long setup_size + ZO__data # VirtualAddress
++ .long ZO__edata - ZO__data # SizeOfRawData
++ .long setup_size + ZO__data # PointerToRawData
++
++ .long 0, 0, 0
++ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
++ IMAGE_SCN_MEM_READ | \
++ IMAGE_SCN_MEM_WRITE # Characteristics
+
+ .set section_count, (. - section_table) / 40
+ #endif /* CONFIG_EFI_STUB */
+@@ -286,12 +223,12 @@ sentinel: .byte 0xff, 0xff /* Used to detect broken loaders */
+
+ .globl hdr
+ hdr:
+-setup_sects: .byte 0 /* Filled in by build.c */
++ .byte setup_sects - 1
+ root_flags: .word ROOT_RDONLY
+-syssize: .long 0 /* Filled in by build.c */
++syssize: .long ZO__edata / 16
+ ram_size: .word 0 /* Obsolete */
+ vid_mode: .word SVGA_MODE
+-root_dev: .word 0 /* Filled in by build.c */
++root_dev: .word 0 /* Default to major/minor 0/0 */
+ boot_flag: .word 0xAA55
+
+ # offset 512, entry point
+@@ -579,9 +516,25 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
+ # define INIT_SIZE VO_INIT_SIZE
+ #endif
+
++ .macro __handover_offset
++#ifndef CONFIG_EFI_HANDOVER_PROTOCOL
++ .long 0
++#elif !defined(CONFIG_X86_64)
++ .long ZO_efi32_stub_entry
++#else
++ /* Yes, this is really how we defined it :( */
++ .long ZO_efi64_stub_entry - 0x200
++#ifdef CONFIG_EFI_MIXED
++ .if ZO_efi32_stub_entry != ZO_efi64_stub_entry - 0x200
++ .error "32-bit and 64-bit EFI entry points do not match"
++ .endif
++#endif
++#endif
++ .endm
++
+ init_size: .long INIT_SIZE # kernel initialization size
+-handover_offset: .long 0 # Filled in by build.c
+-kernel_info_offset: .long 0 # Filled in by build.c
++handover_offset: __handover_offset
++kernel_info_offset: .long ZO_kernel_info
+
+ # End of setup header #####################################################
+
+diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
+index c4ea5258ab558f..9049f390d8347f 100644
+--- a/arch/x86/boot/main.c
++++ b/arch/x86/boot/main.c
+@@ -119,8 +119,8 @@ static void init_heap(void)
+ char *stack_end;
+
+ if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
+- asm("leal %P1(%%esp),%0"
+- : "=r" (stack_end) : "i" (-STACK_SIZE));
++ asm("leal %n1(%%esp),%0"
++ : "=r" (stack_end) : "i" (STACK_SIZE));
+
+ heap_end = (char *)
+ ((size_t)boot_params.hdr.heap_end_ptr + 0x200);
+diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld
+index 49546c247ae25e..3a2d1360abb016 100644
+--- a/arch/x86/boot/setup.ld
++++ b/arch/x86/boot/setup.ld
+@@ -10,10 +10,11 @@ ENTRY(_start)
+ SECTIONS
+ {
+ . = 0;
+- .bstext : { *(.bstext) }
+- .bsdata : { *(.bsdata) }
++ .bstext : {
++ *(.bstext)
++ . = 495;
++ } =0xffffffff
+
+- . = 495;
+ .header : { *(.header) }
+ .entrytext : { *(.entrytext) }
+ .inittext : { *(.inittext) }
+@@ -23,6 +24,9 @@ SECTIONS
+ .text : { *(.text .text.*) }
+ .text32 : { *(.text32) }
+
++ .pecompat : { *(.pecompat) }
++ PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
++
+ . = ALIGN(16);
+ .rodata : { *(.rodata*) }
+
+@@ -38,8 +42,10 @@ SECTIONS
+ .signature : {
+ setup_sig = .;
+ LONG(0x5a5aaa55)
+- }
+
++ setup_size = ALIGN(ABSOLUTE(.), 4096);
++ setup_sects = ABSOLUTE(setup_size / 512);
++ }
+
+ . = ALIGN(16);
+ .bss :
+diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
+index bd247692b70174..10311d77c67f8f 100644
+--- a/arch/x86/boot/tools/build.c
++++ b/arch/x86/boot/tools/build.c
+@@ -40,10 +40,6 @@ typedef unsigned char u8;
+ typedef unsigned short u16;
+ typedef unsigned int u32;
+
+-#define DEFAULT_MAJOR_ROOT 0
+-#define DEFAULT_MINOR_ROOT 0
+-#define DEFAULT_ROOT_DEV (DEFAULT_MAJOR_ROOT << 8 | DEFAULT_MINOR_ROOT)
+-
+ /* Minimal number of setup sectors */
+ #define SETUP_SECT_MIN 5
+ #define SETUP_SECT_MAX 64
+@@ -51,22 +47,7 @@ typedef unsigned int u32;
+ /* This must be large enough to hold the entire setup */
+ u8 buf[SETUP_SECT_MAX*512];
+
+-#define PECOFF_RELOC_RESERVE 0x20
+-
+-#ifdef CONFIG_EFI_MIXED
+-#define PECOFF_COMPAT_RESERVE 0x20
+-#else
+-#define PECOFF_COMPAT_RESERVE 0x0
+-#endif
+-
+-static unsigned long efi32_stub_entry;
+-static unsigned long efi64_stub_entry;
+-static unsigned long efi_pe_entry;
+-static unsigned long efi32_pe_entry;
+-static unsigned long kernel_info;
+-static unsigned long startup_64;
+-static unsigned long _ehead;
+-static unsigned long _end;
++static unsigned long _edata;
+
+ /*----------------------------------------------------------------------*/
+
+@@ -152,180 +133,6 @@ static void usage(void)
+ die("Usage: build setup system zoffset.h image");
+ }
+
+-#ifdef CONFIG_EFI_STUB
+-
+-static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
+-{
+- unsigned int pe_header;
+- unsigned short num_sections;
+- u8 *section;
+-
+- pe_header = get_unaligned_le32(&buf[0x3c]);
+- num_sections = get_unaligned_le16(&buf[pe_header + 6]);
+-
+-#ifdef CONFIG_X86_32
+- section = &buf[pe_header + 0xa8];
+-#else
+- section = &buf[pe_header + 0xb8];
+-#endif
+-
+- while (num_sections > 0) {
+- if (strncmp((char*)section, section_name, 8) == 0) {
+- /* section header size field */
+- put_unaligned_le32(size, section + 0x8);
+-
+- /* section header vma field */
+- put_unaligned_le32(vma, section + 0xc);
+-
+- /* section header 'size of initialised data' field */
+- put_unaligned_le32(datasz, section + 0x10);
+-
+- /* section header 'file offset' field */
+- put_unaligned_le32(offset, section + 0x14);
+-
+- break;
+- }
+- section += 0x28;
+- num_sections--;
+- }
+-}
+-
+-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+-{
+- update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+-}
+-
+-static void update_pecoff_setup_and_reloc(unsigned int size)
+-{
+- u32 setup_offset = 0x200;
+- u32 reloc_offset = size - PECOFF_RELOC_RESERVE - PECOFF_COMPAT_RESERVE;
+-#ifdef CONFIG_EFI_MIXED
+- u32 compat_offset = reloc_offset + PECOFF_RELOC_RESERVE;
+-#endif
+- u32 setup_size = reloc_offset - setup_offset;
+-
+- update_pecoff_section_header(".setup", setup_offset, setup_size);
+- update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
+-
+- /*
+- * Modify .reloc section contents with a single entry. The
+- * relocation is applied to offset 10 of the relocation section.
+- */
+- put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
+- put_unaligned_le32(10, &buf[reloc_offset + 4]);
+-
+-#ifdef CONFIG_EFI_MIXED
+- update_pecoff_section_header(".compat", compat_offset, PECOFF_COMPAT_RESERVE);
+-
+- /*
+- * Put the IA-32 machine type (0x14c) and the associated entry point
+- * address in the .compat section, so loaders can figure out which other
+- * execution modes this image supports.
+- */
+- buf[compat_offset] = 0x1;
+- buf[compat_offset + 1] = 0x8;
+- put_unaligned_le16(0x14c, &buf[compat_offset + 2]);
+- put_unaligned_le32(efi32_pe_entry + size, &buf[compat_offset + 4]);
+-#endif
+-}
+-
+-static void update_pecoff_text(unsigned int text_start, unsigned int file_sz,
+- unsigned int init_sz)
+-{
+- unsigned int pe_header;
+- unsigned int text_sz = file_sz - text_start;
+- unsigned int bss_sz = init_sz - file_sz;
+-
+- pe_header = get_unaligned_le32(&buf[0x3c]);
+-
+- /*
+- * The PE/COFF loader may load the image at an address which is
+- * misaligned with respect to the kernel_alignment field in the setup
+- * header.
+- *
+- * In order to avoid relocating the kernel to correct the misalignment,
+- * add slack to allow the buffer to be aligned within the declared size
+- * of the image.
+- */
+- bss_sz += CONFIG_PHYSICAL_ALIGN;
+- init_sz += CONFIG_PHYSICAL_ALIGN;
+-
+- /*
+- * Size of code: Subtract the size of the first sector (512 bytes)
+- * which includes the header.
+- */
+- put_unaligned_le32(file_sz - 512 + bss_sz, &buf[pe_header + 0x1c]);
+-
+- /* Size of image */
+- put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+-
+- /*
+- * Address of entry point for PE/COFF executable
+- */
+- put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);
+-
+- update_pecoff_section_header_fields(".text", text_start, text_sz + bss_sz,
+- text_sz, text_start);
+-}
+-
+-static int reserve_pecoff_reloc_section(int c)
+-{
+- /* Reserve 0x20 bytes for .reloc section */
+- memset(buf+c, 0, PECOFF_RELOC_RESERVE);
+- return PECOFF_RELOC_RESERVE;
+-}
+-
+-static void efi_stub_defaults(void)
+-{
+- /* Defaults for old kernel */
+-#ifdef CONFIG_X86_32
+- efi_pe_entry = 0x10;
+-#else
+- efi_pe_entry = 0x210;
+- startup_64 = 0x200;
+-#endif
+-}
+-
+-static void efi_stub_entry_update(void)
+-{
+- unsigned long addr = efi32_stub_entry;
+-
+-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+-#ifdef CONFIG_X86_64
+- /* Yes, this is really how we defined it :( */
+- addr = efi64_stub_entry - 0x200;
+-#endif
+-
+-#ifdef CONFIG_EFI_MIXED
+- if (efi32_stub_entry != addr)
+- die("32-bit and 64-bit EFI entry points do not match\n");
+-#endif
+-#endif
+- put_unaligned_le32(addr, &buf[0x264]);
+-}
+-
+-#else
+-
+-static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
+-static inline void update_pecoff_text(unsigned int text_start,
+- unsigned int file_sz,
+- unsigned int init_sz) {}
+-static inline void efi_stub_defaults(void) {}
+-static inline void efi_stub_entry_update(void) {}
+-
+-static inline int reserve_pecoff_reloc_section(int c)
+-{
+- return 0;
+-}
+-#endif /* CONFIG_EFI_STUB */
+-
+-static int reserve_pecoff_compat_section(int c)
+-{
+- /* Reserve 0x20 bytes for .compat section */
+- memset(buf+c, 0, PECOFF_COMPAT_RESERVE);
+- return PECOFF_COMPAT_RESERVE;
+-}
+-
+ /*
+ * Parse zoffset.h and find the entry points. We could just #include zoffset.h
+ * but that would mean tools/build would have to be rebuilt every time. It's
+@@ -354,14 +161,7 @@ static void parse_zoffset(char *fname)
+ p = (char *)buf;
+
+ while (p && *p) {
+- PARSE_ZOFS(p, efi32_stub_entry);
+- PARSE_ZOFS(p, efi64_stub_entry);
+- PARSE_ZOFS(p, efi_pe_entry);
+- PARSE_ZOFS(p, efi32_pe_entry);
+- PARSE_ZOFS(p, kernel_info);
+- PARSE_ZOFS(p, startup_64);
+- PARSE_ZOFS(p, _ehead);
+- PARSE_ZOFS(p, _end);
++ PARSE_ZOFS(p, _edata);
+
+ p = strchr(p, '\n');
+ while (p && (*p == '\r' || *p == '\n'))
+@@ -371,17 +171,14 @@ static void parse_zoffset(char *fname)
+
+ int main(int argc, char ** argv)
+ {
+- unsigned int i, sz, setup_sectors, init_sz;
++ unsigned int i, sz, setup_sectors;
+ int c;
+- u32 sys_size;
+ struct stat sb;
+ FILE *file, *dest;
+ int fd;
+ void *kernel;
+ u32 crc = 0xffffffffUL;
+
+- efi_stub_defaults();
+-
+ if (argc != 5)
+ usage();
+ parse_zoffset(argv[3]);
+@@ -403,72 +200,27 @@ int main(int argc, char ** argv)
+ die("Boot block hasn't got boot flag (0xAA55)");
+ fclose(file);
+
+- c += reserve_pecoff_compat_section(c);
+- c += reserve_pecoff_reloc_section(c);
+-
+ /* Pad unused space with zeros */
+- setup_sectors = (c + 511) / 512;
++ setup_sectors = (c + 4095) / 4096;
++ setup_sectors *= 8;
+ if (setup_sectors < SETUP_SECT_MIN)
+ setup_sectors = SETUP_SECT_MIN;
+ i = setup_sectors*512;
+ memset(buf+c, 0, i-c);
+
+- update_pecoff_setup_and_reloc(i);
+-
+- /* Set the default root device */
+- put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
+-
+ /* Open and stat the kernel file */
+ fd = open(argv[2], O_RDONLY);
+ if (fd < 0)
+ die("Unable to open `%s': %m", argv[2]);
+ if (fstat(fd, &sb))
+ die("Unable to stat `%s': %m", argv[2]);
+- sz = sb.st_size;
++ if (_edata != sb.st_size)
++ die("Unexpected file size `%s': %u != %u", argv[2], _edata,
++ sb.st_size);
++ sz = _edata - 4;
+ kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
+ if (kernel == MAP_FAILED)
+ die("Unable to mmap '%s': %m", argv[2]);
+- /* Number of 16-byte paragraphs, including space for a 4-byte CRC */
+- sys_size = (sz + 15 + 4) / 16;
+-#ifdef CONFIG_EFI_STUB
+- /*
+- * COFF requires minimum 32-byte alignment of sections, and
+- * adding a signature is problematic without that alignment.
+- */
+- sys_size = (sys_size + 1) & ~1;
+-#endif
+-
+- /* Patch the setup code with the appropriate size parameters */
+- buf[0x1f1] = setup_sectors-1;
+- put_unaligned_le32(sys_size, &buf[0x1f4]);
+-
+- init_sz = get_unaligned_le32(&buf[0x260]);
+-#ifdef CONFIG_EFI_STUB
+- /*
+- * The decompression buffer will start at ImageBase. When relocating
+- * the compressed kernel to its end, we must ensure that the head
+- * section does not get overwritten. The head section occupies
+- * [i, i + _ehead), and the destination is [init_sz - _end, init_sz).
+- *
+- * At present these should never overlap, because 'i' is at most 32k
+- * because of SETUP_SECT_MAX, '_ehead' is less than 1k, and the
+- * calculation of INIT_SIZE in boot/header.S ensures that
+- * 'init_sz - _end' is at least 64k.
+- *
+- * For future-proofing, increase init_sz if necessary.
+- */
+-
+- if (init_sz - _end < i + _ehead) {
+- init_sz = (i + _ehead + _end + 4095) & ~4095;
+- put_unaligned_le32(init_sz, &buf[0x260]);
+- }
+-#endif
+- update_pecoff_text(setup_sectors * 512, i + (sys_size * 16), init_sz);
+-
+- efi_stub_entry_update();
+-
+- /* Update kernel_info offset. */
+- put_unaligned_le32(kernel_info, &buf[0x268]);
+
+ crc = partial_crc32(buf, i, crc);
+ if (fwrite(buf, 1, i, dest) != i)
+@@ -479,13 +231,6 @@ int main(int argc, char ** argv)
+ if (fwrite(kernel, 1, sz, dest) != sz)
+ die("Writing kernel failed");
+
+- /* Add padding leaving 4 bytes for the checksum */
+- while (sz++ < (sys_size*16) - 4) {
+- crc = partial_crc32_one('\0', crc);
+- if (fwrite("\0", 1, 1, dest) != 1)
+- die("Writing padding failed");
+- }
+-
+ /* Write the CRC */
+ put_unaligned_le32(crc, buf);
+ if (fwrite(buf, 1, 4, dest) != 4)
+diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
+index eeec9986570ed0..ddd4efdc79d668 100644
+--- a/arch/x86/coco/core.c
++++ b/arch/x86/coco/core.c
+@@ -3,18 +3,22 @@
+ * Confidential Computing Platform Capability checks
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
++ * Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+ #include <linux/export.h>
+ #include <linux/cc_platform.h>
++#include <linux/string.h>
++#include <linux/random.h>
+
++#include <asm/archrandom.h>
+ #include <asm/coco.h>
+ #include <asm/processor.h>
+
+ enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
+-static u64 cc_mask __ro_after_init;
++u64 cc_mask __ro_after_init;
+
+ static bool noinstr intel_cc_platform_has(enum cc_attr attr)
+ {
+@@ -149,7 +153,39 @@ u64 cc_mkdec(u64 val)
+ }
+ EXPORT_SYMBOL_GPL(cc_mkdec);
+
+-__init void cc_set_mask(u64 mask)
++__init void cc_random_init(void)
+ {
+- cc_mask = mask;
++ /*
++ * The seed is 32 bytes (in units of longs), which is 256 bits, which
++ * is the security level that the RNG is targeting.
++ */
++ unsigned long rng_seed[32 / sizeof(long)];
++ size_t i, longs;
++
++ if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
++ return;
++
++ /*
++ * Since the CoCo threat model includes the host, the only reliable
++ * source of entropy that can be neither observed nor manipulated is
++ * RDRAND. Usually, RDRAND failure is considered tolerable, but since
++ * CoCo guests have no other unobservable source of entropy, it's
++ * important to at least ensure the RNG gets some initial random seeds.
++ */
++ for (i = 0; i < ARRAY_SIZE(rng_seed); i += longs) {
++ longs = arch_get_random_longs(&rng_seed[i], ARRAY_SIZE(rng_seed) - i);
++
++ /*
++ * A zero return value means that the guest doesn't have RDRAND
++ * or the CPU is physically broken, and in both cases that
++ * means most crypto inside of the CoCo instance will be
++ * broken, defeating the purpose of CoCo in the first place. So
++ * just panic here because it's absolutely unsafe to continue
++ * executing.
++ */
++ if (longs == 0)
++ panic("RDRAND is defective.");
++ }
++ add_device_randomness(rng_seed, sizeof(rng_seed));
++ memzero_explicit(rng_seed, sizeof(rng_seed));
+ }
+diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S
+index b193c0a1d8db38..2eca5f43734feb 100644
+--- a/arch/x86/coco/tdx/tdcall.S
++++ b/arch/x86/coco/tdx/tdcall.S
+@@ -195,6 +195,7 @@ SYM_FUNC_END(__tdx_module_call)
+ xor %r10d, %r10d
+ xor %r11d, %r11d
+ xor %rdi, %rdi
++ xor %rsi, %rsi
+ xor %rdx, %rdx
+
+ /* Restore callee-saved GPRs as mandated by the x86_64 ABI */
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index 1d6b863c42b001..905ac8a3f7165c 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -10,9 +10,11 @@
+ #include <asm/coco.h>
+ #include <asm/tdx.h>
+ #include <asm/vmx.h>
++#include <asm/ia32.h>
+ #include <asm/insn.h>
+ #include <asm/insn-eval.h>
+ #include <asm/pgtable.h>
++#include <asm/traps.h>
+
+ /* MMIO direction */
+ #define EPT_READ 0
+@@ -361,7 +363,6 @@ static bool mmio_read(int size, unsigned long addr, unsigned long *val)
+ .r12 = size,
+ .r13 = EPT_READ,
+ .r14 = addr,
+- .r15 = *val,
+ };
+
+ if (__tdx_hypercall_ret(&args))
+@@ -405,6 +406,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
+ return -EINVAL;
+ }
+
++ if (!fault_in_kernel_space(ve->gla)) {
++ WARN_ONCE(1, "Access to userspace address is not supported");
++ return -EINVAL;
++ }
++
+ /*
+ * Reject EPT violation #VEs that split pages.
+ *
+diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S
+index ef73a3ab87263e..791386d9a83aa1 100644
+--- a/arch/x86/crypto/nh-avx2-x86_64.S
++++ b/arch/x86/crypto/nh-avx2-x86_64.S
+@@ -154,5 +154,6 @@ SYM_TYPED_FUNC_START(nh_avx2)
+ vpaddq T1, T0, T0
+ vpaddq T4, T0, T0
+ vmovdqu T0, (HASH)
++ vzeroupper
+ RET
+ SYM_FUNC_END(nh_avx2)
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
+index 44340a1139e0b7..959afa705e95ca 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -24,8 +24,17 @@
+ #include <linux/types.h>
+ #include <crypto/sha1.h>
+ #include <crypto/sha1_base.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+
++static const struct x86_cpu_id module_cpu_ids[] = {
++ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++ {}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, sha1_block_fn *sha1_xform)
+ {
+@@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
+
+ static int __init sha1_ssse3_mod_init(void)
+ {
++ if (!x86_match_cpu(module_cpu_ids))
++ return -ENODEV;
++
+ if (register_sha1_ssse3())
+ goto fail;
+
+diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
+index 9918212faf914f..0bbec1c75cd0be 100644
+--- a/arch/x86/crypto/sha256-avx2-asm.S
++++ b/arch/x86/crypto/sha256-avx2-asm.S
+@@ -592,22 +592,22 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
+ leaq K256+0*32(%rip), INP ## reuse INP as scratch reg
+ vpaddd (INP, SRND), X0, XFER
+ vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
+- FOUR_ROUNDS_AND_SCHED _XFER + 0*32
++ FOUR_ROUNDS_AND_SCHED (_XFER + 0*32)
+
+ leaq K256+1*32(%rip), INP
+ vpaddd (INP, SRND), X0, XFER
+ vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
+- FOUR_ROUNDS_AND_SCHED _XFER + 1*32
++ FOUR_ROUNDS_AND_SCHED (_XFER + 1*32)
+
+ leaq K256+2*32(%rip), INP
+ vpaddd (INP, SRND), X0, XFER
+ vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
+- FOUR_ROUNDS_AND_SCHED _XFER + 2*32
++ FOUR_ROUNDS_AND_SCHED (_XFER + 2*32)
+
+ leaq K256+3*32(%rip), INP
+ vpaddd (INP, SRND), X0, XFER
+ vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
+- FOUR_ROUNDS_AND_SCHED _XFER + 3*32
++ FOUR_ROUNDS_AND_SCHED (_XFER + 3*32)
+
+ add $4*32, SRND
+ cmp $3*4*32, SRND
+@@ -618,12 +618,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
+ leaq K256+0*32(%rip), INP
+ vpaddd (INP, SRND), X0, XFER
+ vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
+- DO_4ROUNDS _XFER + 0*32
++ DO_4ROUNDS (_XFER + 0*32)
+
+ leaq K256+1*32(%rip), INP
+ vpaddd (INP, SRND), X1, XFER
+ vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
+- DO_4ROUNDS _XFER + 1*32
++ DO_4ROUNDS (_XFER + 1*32)
+ add $2*32, SRND
+
+ vmovdqa X2, X0
+@@ -651,8 +651,8 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
+ xor SRND, SRND
+ .align 16
+ .Lloop3:
+- DO_4ROUNDS _XFER + 0*32 + 16
+- DO_4ROUNDS _XFER + 1*32 + 16
++ DO_4ROUNDS (_XFER + 0*32 + 16)
++ DO_4ROUNDS (_XFER + 1*32 + 16)
+ add $2*32, SRND
+ cmp $4*4*32, SRND
+ jb .Lloop3
+@@ -716,6 +716,7 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
+ popq %r13
+ popq %r12
+ popq %rbx
++ vzeroupper
+ RET
+ SYM_FUNC_END(sha256_transform_rorx)
+
+diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
+index 3a5f6be7dbba4e..d25235f0ccafc3 100644
+--- a/arch/x86/crypto/sha256_ssse3_glue.c
++++ b/arch/x86/crypto/sha256_ssse3_glue.c
+@@ -38,11 +38,20 @@
+ #include <crypto/sha2.h>
+ #include <crypto/sha256_base.h>
+ #include <linux/string.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+
+ asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
+ const u8 *data, int blocks);
+
++static const struct x86_cpu_id module_cpu_ids[] = {
++ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++ {}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int _sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, sha256_block_fn *sha256_xform)
+ {
+@@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
+
+ static int __init sha256_ssse3_mod_init(void)
+ {
++ if (!x86_match_cpu(module_cpu_ids))
++ return -ENODEV;
++
+ if (register_sha256_ssse3())
+ goto fail;
+
+diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
+index f08496cd68708f..24973f42c43ff4 100644
+--- a/arch/x86/crypto/sha512-avx2-asm.S
++++ b/arch/x86/crypto/sha512-avx2-asm.S
+@@ -680,6 +680,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
+ pop %r12
+ pop %rbx
+
++ vzeroupper
+ RET
+ SYM_FUNC_END(sha512_transform_rorx)
+
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 93c60c0c9d4a7a..e72dac092245a5 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -25,6 +25,7 @@
+ #include <xen/events.h>
+ #endif
+
++#include <asm/apic.h>
+ #include <asm/desc.h>
+ #include <asm/traps.h>
+ #include <asm/vdso.h>
+@@ -47,7 +48,7 @@ static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
+
+ if (likely(unr < NR_syscalls)) {
+ unr = array_index_nospec(unr, NR_syscalls);
+- regs->ax = sys_call_table[unr](regs);
++ regs->ax = x64_sys_call(regs, unr);
+ return true;
+ }
+ return false;
+@@ -64,7 +65,7 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
+
+ if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
+ xnr = array_index_nospec(xnr, X32_NR_syscalls);
+- regs->ax = x32_sys_call_table[xnr](regs);
++ regs->ax = x32_sys_call(regs, xnr);
+ return true;
+ }
+ return false;
+@@ -96,6 +97,10 @@ static __always_inline int syscall_32_enter(struct pt_regs *regs)
+ return (int)regs->orig_ax;
+ }
+
++#ifdef CONFIG_IA32_EMULATION
++bool __ia32_enabled __ro_after_init = true;
++#endif
++
+ /*
+ * Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
+ */
+@@ -109,13 +114,102 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
+
+ if (likely(unr < IA32_NR_syscalls)) {
+ unr = array_index_nospec(unr, IA32_NR_syscalls);
+- regs->ax = ia32_sys_call_table[unr](regs);
++ regs->ax = ia32_sys_call(regs, unr);
+ } else if (nr != -1) {
+ regs->ax = __ia32_sys_ni_syscall(regs);
+ }
+ }
+
+-/* Handles int $0x80 */
++#ifdef CONFIG_IA32_EMULATION
++static __always_inline bool int80_is_external(void)
++{
++ const unsigned int offs = (0x80 / 32) * 0x10;
++ const u32 bit = BIT(0x80 % 32);
++
++ /* The local APIC on XENPV guests is fake */
++ if (cpu_feature_enabled(X86_FEATURE_XENPV))
++ return false;
++
++ /*
++ * If vector 0x80 is set in the APIC ISR then this is an external
++ * interrupt. Either from broken hardware or injected by a VMM.
++ *
++ * Note: In guest mode this is only valid for secure guests where
++ * the secure module fully controls the vAPIC exposed to the guest.
++ */
++ return apic_read(APIC_ISR + offs) & bit;
++}
++
++/**
++ * do_int80_emulation - 32-bit legacy syscall C entry from asm
++ *
++ * This entry point can be used by 32-bit and 64-bit programs to perform
++ * 32-bit system calls. Instances of INT $0x80 can be found inline in
++ * various programs and libraries. It is also used by the vDSO's
++ * __kernel_vsyscall fallback for hardware that doesn't support a faster
++ * entry method. Restarted 32-bit system calls also fall back to INT
++ * $0x80 regardless of what instruction was originally used to do the
++ * system call.
++ *
++ * This is considered a slow path. It is not used by most libc
++ * implementations on modern hardware except during process startup.
++ *
++ * The arguments for the INT $0x80 based syscall are on stack in the
++ * pt_regs structure:
++ * eax: system call number
++ * ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6
++ */
++__visible noinstr void do_int80_emulation(struct pt_regs *regs)
++{
++ int nr;
++
++ /* Kernel does not use INT $0x80! */
++ if (unlikely(!user_mode(regs))) {
++ irqentry_enter(regs);
++ instrumentation_begin();
++ panic("Unexpected external interrupt 0x80\n");
++ }
++
++ /*
++ * Establish kernel context for instrumentation, including for
++ * int80_is_external() below which calls into the APIC driver.
++ * Identical for soft and external interrupts.
++ */
++ enter_from_user_mode(regs);
++
++ instrumentation_begin();
++ add_random_kstack_offset();
++
++ /* Validate that this is a soft interrupt to the extent possible */
++ if (unlikely(int80_is_external()))
++ panic("Unexpected external interrupt 0x80\n");
++
++ /*
++ * The low level idtentry code pushed -1 into regs::orig_ax
++ * and regs::ax contains the syscall number.
++ *
++ * User tracing code (ptrace or signal handlers) might assume
++ * that the regs::orig_ax contains a 32-bit number on invoking
++ * a 32-bit syscall.
++ *
++ * Establish the syscall convention by saving the 32bit truncated
++ * syscall number in regs::orig_ax and by invalidating regs::ax.
++ */
++ regs->orig_ax = regs->ax & GENMASK(31, 0);
++ regs->ax = -ENOSYS;
++
++ nr = syscall_32_enter(regs);
++
++ local_irq_enable();
++ nr = syscall_enter_from_user_mode_work(regs, nr);
++ do_syscall_32_irqs_on(regs, nr);
++
++ instrumentation_end();
++ syscall_exit_to_user_mode(regs);
++}
++#else /* CONFIG_IA32_EMULATION */
++
++/* Handles int $0x80 on a 32bit kernel */
+ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
+ {
+ int nr = syscall_32_enter(regs);
+@@ -134,6 +228,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+ }
++#endif /* !CONFIG_IA32_EMULATION */
+
+ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
+ {
+diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
+index bfb7bcb362bcfc..34eca8015b64bc 100644
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -6,6 +6,11 @@
+ #include <linux/linkage.h>
+ #include <asm/export.h>
+ #include <asm/msr-index.h>
++#include <asm/unwind_hints.h>
++#include <asm/segment.h>
++#include <asm/cache.h>
++#include <asm/cpufeatures.h>
++#include <asm/nospec-branch.h>
+
+ .pushsection .noinstr.text, "ax"
+
+@@ -14,9 +19,32 @@ SYM_FUNC_START(entry_ibpb)
+ movl $PRED_CMD_IBPB, %eax
+ xorl %edx, %edx
+ wrmsr
++
++ /* Make sure IBPB clears return stack preductions too. */
++ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
+ RET
+ SYM_FUNC_END(entry_ibpb)
+ /* For KVM */
+ EXPORT_SYMBOL_GPL(entry_ibpb);
+
+ .popsection
++
++/*
++ * Define the VERW operand that is disguised as entry code so that
++ * it can be referenced with KPTI enabled. This ensure VERW can be
++ * used late in exit-to-user path after page tables are switched.
++ */
++.pushsection .entry.text, "ax"
++
++.align L1_CACHE_BYTES, 0xcc
++SYM_CODE_START_NOALIGN(mds_verw_sel)
++ UNWIND_HINT_UNDEFINED
++ ANNOTATE_NOENDBR
++ .word __KERNEL_DS
++.align L1_CACHE_BYTES, 0xcc
++SYM_CODE_END(mds_verw_sel);
++/* For KVM */
++EXPORT_SYMBOL_GPL(mds_verw_sel);
++
++.popsection
++
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 6e6af42e044a20..3894acc54b79c4 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -875,6 +875,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
+
+ /* Now ready to switch the cr3 */
+ SWITCH_TO_USER_CR3 scratch_reg=%eax
++ /* Clobbers ZF */
++ CLEAR_CPU_BUFFERS
+
+ /*
+ * Restore all flags except IF. (We restore IF separately because
+@@ -954,6 +956,7 @@ restore_all_switch_stack:
+
+ /* Restore user state */
+ RESTORE_REGS pop=4 # skip orig_eax/error_code
++ CLEAR_CPU_BUFFERS
+ .Lirq_return:
+ /*
+ * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
+@@ -1166,6 +1169,7 @@ SYM_CODE_START(asm_exc_nmi)
+
+ CHECK_AND_APPLY_ESPFIX
+ RESTORE_ALL_NMI cr3_reg=%edi pop=4
++ CLEAR_CPU_BUFFERS
+ jmp .Lirq_return
+
+ #ifdef CONFIG_X86_ESPFIX32
+@@ -1207,6 +1211,7 @@ SYM_CODE_START(asm_exc_nmi)
+ * 1 - orig_ax
+ */
+ lss (1+5+6)*4(%esp), %esp # back to espfix stack
++ CLEAR_CPU_BUFFERS
+ jmp .Lirq_return
+ #endif
+ SYM_CODE_END(asm_exc_nmi)
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 43606de225117d..2192b6c33ea009 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -116,6 +116,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
+ /* clobbers %rax, make sure it is after saving the syscall nr */
+ IBRS_ENTER
+ UNTRAIN_RET
++ CLEAR_BRANCH_HISTORY
+
+ call do_syscall_64 /* returns with IRQs disabled */
+
+@@ -166,22 +167,9 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
+ jne swapgs_restore_regs_and_return_to_usermode
+
+ /*
+- * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
+- * restore RF properly. If the slowpath sets it for whatever reason, we
+- * need to restore it correctly.
+- *
+- * SYSRET can restore TF, but unlike IRET, restoring TF results in a
+- * trap from userspace immediately after SYSRET. This would cause an
+- * infinite loop whenever #DB happens with register state that satisfies
+- * the opportunistic SYSRET conditions. For example, single-stepping
+- * this user code:
+- *
+- * movq $stuck_here, %rcx
+- * pushfq
+- * popq %r11
+- * stuck_here:
+- *
+- * would never get past 'stuck_here'.
++ * SYSRET cannot restore RF. It can restore TF, but unlike IRET,
++ * restoring TF results in a trap from userspace immediately after
++ * SYSRET.
+ */
+ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
+ jnz swapgs_restore_regs_and_return_to_usermode
+@@ -223,6 +211,7 @@ syscall_return_via_sysret:
+ SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
+ swapgs
++ CLEAR_CPU_BUFFERS
+ sysretq
+ SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
+@@ -663,6 +652,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
+ /* Restore RDI. */
+ popq %rdi
+ swapgs
++ CLEAR_CPU_BUFFERS
+ jmp .Lnative_iret
+
+
+@@ -774,6 +764,8 @@ native_irq_return_ldt:
+ */
+ popq %rax /* Restore user RAX */
+
++ CLEAR_CPU_BUFFERS
++
+ /*
+ * RSP now points to an ordinary IRET frame, except that the page
+ * is read-only and RSP[31:16] are preloaded with the userspace
+@@ -1502,6 +1494,12 @@ nmi_restore:
+ std
+ movq $0, 5*8(%rsp) /* clear "NMI executing" */
+
++ /*
++ * Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
++ * NMI in kernel after user state is restored. For an unprivileged user
++ * these conditions are hard to meet.
++ */
++
+ /*
+ * iretq reads the "iret" frame and exits the NMI stack in a
+ * single instruction. We are returning to kernel mode, so this
+@@ -1516,12 +1514,13 @@ SYM_CODE_END(asm_exc_nmi)
+ * This handles SYSCALL from 32-bit code. There is no way to program
+ * MSRs to fully disable 32-bit SYSCALL.
+ */
+-SYM_CODE_START(ignore_sysret)
++SYM_CODE_START(entry_SYSCALL32_ignore)
+ UNWIND_HINT_END_OF_STACK
+ ENDBR
+ mov $-ENOSYS, %eax
++ CLEAR_CPU_BUFFERS
+ sysretl
+-SYM_CODE_END(ignore_sysret)
++SYM_CODE_END(entry_SYSCALL32_ignore)
+ #endif
+
+ .pushsection .text, "ax"
+@@ -1538,3 +1537,63 @@ SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
+ call make_task_dead
+ SYM_CODE_END(rewind_stack_and_make_dead)
+ .popsection
++
++/*
++ * This sequence executes branches in order to remove user branch information
++ * from the branch history tracker in the Branch Predictor, therefore removing
++ * user influence on subsequent BTB lookups.
++ *
++ * It should be used on parts prior to Alder Lake. Newer parts should use the
++ * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
++ * virtualized on newer hardware the VMM should protect against BHI attacks by
++ * setting BHI_DIS_S for the guests.
++ *
++ * CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging
++ * and not clearing the branch history. The call tree looks like:
++ *
++ * call 1
++ * call 2
++ * call 2
++ * call 2
++ * call 2
++ * call 2
++ * ret
++ * ret
++ * ret
++ * ret
++ * ret
++ * ret
++ *
++ * This means that the stack is non-constant and ORC can't unwind it with %rsp
++ * alone. Therefore we unconditionally set up the frame pointer, which allows
++ * ORC to unwind properly.
++ *
++ * The alignment is for performance and not for safety, and may be safely
++ * refactored in the future if needed.
++ */
++SYM_FUNC_START(clear_bhb_loop)
++ push %rbp
++ mov %rsp, %rbp
++ movl $5, %ecx
++ ANNOTATE_INTRA_FUNCTION_CALL
++ call 1f
++ jmp 5f
++ .align 64, 0xcc
++ ANNOTATE_INTRA_FUNCTION_CALL
++1: call 2f
++ RET
++ .align 64, 0xcc
++2: movl $5, %eax
++3: jmp 4f
++ nop
++4: sub $1, %eax
++ jnz 3b
++ sub $1, %ecx
++ jnz 1b
++ RET
++5: lfence
++ pop %rbp
++ RET
++SYM_FUNC_END(clear_bhb_loop)
++EXPORT_SYMBOL_GPL(clear_bhb_loop)
++STACK_FRAME_NON_STANDARD(clear_bhb_loop)
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index 70150298f8bdf5..ebfccadf918cb4 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -90,9 +90,6 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+
+ cld
+
+- IBRS_ENTER
+- UNTRAIN_RET
+-
+ /*
+ * SYSENTER doesn't filter flags, so we need to clear NT and AC
+ * ourselves. To save a few cycles, we can check whether
+@@ -116,6 +113,16 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+ jnz .Lsysenter_fix_flags
+ .Lsysenter_flags_fixed:
+
++ /*
++ * CPU bugs mitigations mechanisms can call other functions. They
++ * should be invoked after making sure TF is cleared because
++ * single-step is ignored only for instructions inside the
++ * entry_SYSENTER_compat function.
++ */
++ IBRS_ENTER
++ UNTRAIN_RET
++ CLEAR_BRANCH_HISTORY
++
+ movq %rsp, %rdi
+ call do_SYSENTER_32
+ /* XEN PV guests always use IRET path */
+@@ -209,6 +216,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
+
+ IBRS_ENTER
+ UNTRAIN_RET
++ CLEAR_BRANCH_HISTORY
+
+ movq %rsp, %rdi
+ call do_fast_syscall_32
+@@ -271,6 +279,7 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL)
+ xorl %r9d, %r9d
+ xorl %r10d, %r10d
+ swapgs
++ CLEAR_CPU_BUFFERS
+ sysretl
+ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
+@@ -278,78 +287,15 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
+ SYM_CODE_END(entry_SYSCALL_compat)
+
+ /*
+- * 32-bit legacy system call entry.
+- *
+- * 32-bit x86 Linux system calls traditionally used the INT $0x80
+- * instruction. INT $0x80 lands here.
+- *
+- * This entry point can be used by 32-bit and 64-bit programs to perform
+- * 32-bit system calls. Instances of INT $0x80 can be found inline in
+- * various programs and libraries. It is also used by the vDSO's
+- * __kernel_vsyscall fallback for hardware that doesn't support a faster
+- * entry method. Restarted 32-bit system calls also fall back to INT
+- * $0x80 regardless of what instruction was originally used to do the
+- * system call.
+- *
+- * This is considered a slow path. It is not used by most libc
+- * implementations on modern hardware except during process startup.
+- *
+- * Arguments:
+- * eax system call number
+- * ebx arg1
+- * ecx arg2
+- * edx arg3
+- * esi arg4
+- * edi arg5
+- * ebp arg6
++ * int 0x80 is used by 32 bit mode as a system call entry. Normally idt entries
++ * point to C routines, however since this is a system call interface the branch
++ * history needs to be scrubbed to protect against BHI attacks, and that
++ * scrubbing needs to take place in assembly code prior to entering any C
++ * routines.
+ */
+-SYM_CODE_START(entry_INT80_compat)
+- UNWIND_HINT_ENTRY
+- ENDBR
+- /*
+- * Interrupts are off on entry.
+- */
+- ASM_CLAC /* Do this early to minimize exposure */
+- ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
+-
+- /*
+- * User tracing code (ptrace or signal handlers) might assume that
+- * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+- * syscall. Just in case the high bits are nonzero, zero-extend
+- * the syscall number. (This could almost certainly be deleted
+- * with no ill effects.)
+- */
+- movl %eax, %eax
+-
+- /* switch to thread stack expects orig_ax and rdi to be pushed */
+- pushq %rax /* pt_regs->orig_ax */
+-
+- /* Need to switch before accessing the thread stack. */
+- SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+-
+- /* In the Xen PV case we already run on the thread stack. */
+- ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+-
+- movq %rsp, %rax
+- movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+-
+- pushq 5*8(%rax) /* regs->ss */
+- pushq 4*8(%rax) /* regs->rsp */
+- pushq 3*8(%rax) /* regs->eflags */
+- pushq 2*8(%rax) /* regs->cs */
+- pushq 1*8(%rax) /* regs->ip */
+- pushq 0*8(%rax) /* regs->orig_ax */
+-.Lint80_keep_stack:
+-
+- PUSH_AND_CLEAR_REGS rax=$-ENOSYS
+- UNWIND_HINT_REGS
+-
+- cld
+-
+- IBRS_ENTER
+- UNTRAIN_RET
+-
+- movq %rsp, %rdi
+- call do_int80_syscall_32
+- jmp swapgs_restore_regs_and_return_to_usermode
+-SYM_CODE_END(entry_INT80_compat)
++SYM_CODE_START(int80_emulation)
++ ANNOTATE_NOENDBR
++ UNWIND_HINT_FUNC
++ CLEAR_BRANCH_HISTORY
++ jmp do_int80_emulation
++SYM_CODE_END(int80_emulation)
+diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
+index 8cfc9bc73e7f8b..c2235bae17ef66 100644
+--- a/arch/x86/entry/syscall_32.c
++++ b/arch/x86/entry/syscall_32.c
+@@ -18,8 +18,25 @@
+ #include <asm/syscalls_32.h>
+ #undef __SYSCALL
+
++/*
++ * The sys_call_table[] is no longer used for system calls, but
++ * kernel/trace/trace_syscalls.c still wants to know the system
++ * call address.
++ */
++#ifdef CONFIG_X86_32
+ #define __SYSCALL(nr, sym) __ia32_##sym,
+-
+-__visible const sys_call_ptr_t ia32_sys_call_table[] = {
++const sys_call_ptr_t sys_call_table[] = {
+ #include <asm/syscalls_32.h>
+ };
++#undef __SYSCALL
++#endif
++
++#define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs);
++
++long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
++{
++ switch (nr) {
++ #include <asm/syscalls_32.h>
++ default: return __ia32_sys_ni_syscall(regs);
++ }
++};
+diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
+index be120eec1fc9f9..33b3f09e6f151e 100644
+--- a/arch/x86/entry/syscall_64.c
++++ b/arch/x86/entry/syscall_64.c
+@@ -11,8 +11,23 @@
+ #include <asm/syscalls_64.h>
+ #undef __SYSCALL
+
++/*
++ * The sys_call_table[] is no longer used for system calls, but
++ * kernel/trace/trace_syscalls.c still wants to know the system
++ * call address.
++ */
+ #define __SYSCALL(nr, sym) __x64_##sym,
+-
+-asmlinkage const sys_call_ptr_t sys_call_table[] = {
++const sys_call_ptr_t sys_call_table[] = {
+ #include <asm/syscalls_64.h>
+ };
++#undef __SYSCALL
++
++#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
++
++long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
++{
++ switch (nr) {
++ #include <asm/syscalls_64.h>
++ default: return __x64_sys_ni_syscall(regs);
++ }
++};
+diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
+index bdd0e03a1265d2..03de4a93213182 100644
+--- a/arch/x86/entry/syscall_x32.c
++++ b/arch/x86/entry/syscall_x32.c
+@@ -11,8 +11,12 @@
+ #include <asm/syscalls_x32.h>
+ #undef __SYSCALL
+
+-#define __SYSCALL(nr, sym) __x64_##sym,
++#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
+
+-asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
+-#include <asm/syscalls_x32.h>
++long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
++{
++ switch (nr) {
++ #include <asm/syscalls_x32.h>
++ default: return __x64_sys_ni_syscall(regs);
++ }
+ };
+diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
+index 2d0b1bd866ead6..38db5ef2329f3c 100644
+--- a/arch/x86/entry/syscalls/syscall_32.tbl
++++ b/arch/x86/entry/syscalls/syscall_32.tbl
+@@ -420,7 +420,7 @@
+ 412 i386 utimensat_time64 sys_utimensat
+ 413 i386 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+ 414 i386 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+-416 i386 io_pgetevents_time64 sys_io_pgetevents
++416 i386 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
+ 417 i386 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+ 418 i386 mq_timedsend_time64 sys_mq_timedsend
+ 419 i386 mq_timedreceive_time64 sys_mq_timedreceive
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index 6a1821bd7d5e9b..c197efd829228c 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -190,31 +190,4 @@ GCOV_PROFILE := n
+ quiet_cmd_vdso_and_check = VDSO $@
+ cmd_vdso_and_check = $(cmd_vdso); $(cmd_vdso_check)
+
+-#
+-# Install the unstripped copies of vdso*.so. If our toolchain supports
+-# build-id, install .build-id links as well.
+-#
+-quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
+-define cmd_vdso_install
+- cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
+- if readelf -n $< |grep -q 'Build ID'; then \
+- buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+- first=`echo $$buildid | cut -b-2`; \
+- last=`echo $$buildid | cut -b3-`; \
+- mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+- ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+- fi
+-endef
+-
+-vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
+-
+-$(MODLIB)/vdso: FORCE
+- @mkdir -p $(MODLIB)/vdso
+-
+-$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso
+- $(call cmd,vdso_install)
+-
+-PHONY += vdso_install $(vdso_img_insttargets)
+-vdso_install: $(vdso_img_insttargets)
+-
+ clean-files := vdso32.so vdso32.so.dbg vdso64* vdso-image-*.c vdsox32.so*
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
+index e0ca8120aea876..1245000a8792fd 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -98,11 +98,6 @@ static int addr_to_vsyscall_nr(unsigned long addr)
+
+ static bool write_ok_or_segv(unsigned long ptr, size_t size)
+ {
+- /*
+- * XXX: if access_ok, get_user, and put_user handled
+- * sig_on_uaccess_err, this could go away.
+- */
+-
+ if (!access_ok((void __user *)ptr, size)) {
+ struct thread_struct *thread = &current->thread;
+
+@@ -120,10 +115,8 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
+ bool emulate_vsyscall(unsigned long error_code,
+ struct pt_regs *regs, unsigned long address)
+ {
+- struct task_struct *tsk;
+ unsigned long caller;
+ int vsyscall_nr, syscall_nr, tmp;
+- int prev_sig_on_uaccess_err;
+ long ret;
+ unsigned long orig_dx;
+
+@@ -172,8 +165,6 @@ bool emulate_vsyscall(unsigned long error_code,
+ goto sigsegv;
+ }
+
+- tsk = current;
+-
+ /*
+ * Check for access_ok violations and find the syscall nr.
+ *
+@@ -234,12 +225,8 @@ bool emulate_vsyscall(unsigned long error_code,
+ goto do_ret; /* skip requested */
+
+ /*
+- * With a real vsyscall, page faults cause SIGSEGV. We want to
+- * preserve that behavior to make writing exploits harder.
++ * With a real vsyscall, page faults cause SIGSEGV.
+ */
+- prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
+- current->thread.sig_on_uaccess_err = 1;
+-
+ ret = -EFAULT;
+ switch (vsyscall_nr) {
+ case 0:
+@@ -262,23 +249,12 @@ bool emulate_vsyscall(unsigned long error_code,
+ break;
+ }
+
+- current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
+-
+ check_fault:
+ if (ret == -EFAULT) {
+ /* Bad news -- userspace fed a bad pointer to a vsyscall. */
+ warn_bad_vsyscall(KERN_INFO, regs,
+ "vsyscall fault (exploit attempt?)");
+-
+- /*
+- * If we failed to generate a signal for any reason,
+- * generate one here. (This should be impossible.)
+- */
+- if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
+- !sigismember(&tsk->pending.signal, SIGSEGV)))
+- goto sigsegv;
+-
+- return true; /* Don't emulate the ret. */
++ goto sigsegv;
+ }
+
+ regs->ax = ret;
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index e24976593a298a..8ed10366c4a27b 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -250,7 +250,7 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
+ /*
+ * AMD Performance Monitor Family 17h and later:
+ */
+-static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
++static const u64 amd_zen1_perfmon_event_map[PERF_COUNT_HW_MAX] =
+ {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+@@ -262,10 +262,24 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
+ };
+
++static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] =
++{
++ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
++ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
++ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
++ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
++ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
++ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
++};
++
+ static u64 amd_pmu_event_map(int hw_event)
+ {
+- if (boot_cpu_data.x86 >= 0x17)
+- return amd_f17h_perfmon_event_map[hw_event];
++ if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19)
++ return amd_zen2_perfmon_event_map[hw_event];
++
++ if (cpu_feature_enabled(X86_FEATURE_ZEN1))
++ return amd_zen1_perfmon_event_map[hw_event];
+
+ return amd_perfmon_event_map[hw_event];
+ }
+@@ -604,7 +618,6 @@ static void amd_pmu_cpu_dead(int cpu)
+
+ kfree(cpuhw->lbr_sel);
+ cpuhw->lbr_sel = NULL;
+- amd_pmu_cpu_reset(cpu);
+
+ if (!x86_pmu.amd_nb_constraints)
+ return;
+@@ -905,8 +918,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ if (!status)
+ goto done;
+
+- /* Read branch records before unfreezing */
+- if (status & GLOBAL_STATUS_LBRS_FROZEN) {
++ /* Read branch records */
++ if (x86_pmu.lbr_nr) {
+ amd_pmu_lbr_read();
+ status &= ~GLOBAL_STATUS_LBRS_FROZEN;
+ }
+diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
+index eb31f850841a89..5149830c7c4fa6 100644
+--- a/arch/x86/events/amd/lbr.c
++++ b/arch/x86/events/amd/lbr.c
+@@ -173,9 +173,11 @@ void amd_pmu_lbr_read(void)
+
+ /*
+ * Check if a branch has been logged; if valid = 0, spec = 0
+- * then no branch was recorded
++ * then no branch was recorded; if reserved = 1 then an
++ * erroneous branch was recorded (see Erratum 1452)
+ */
+- if (!entry.to.split.valid && !entry.to.split.spec)
++ if ((!entry.to.split.valid && !entry.to.split.spec) ||
++ entry.to.split.reserved)
+ continue;
+
+ perf_clear_branch_entry_bitfields(br + out);
+@@ -400,10 +402,12 @@ void amd_pmu_lbr_enable_all(void)
+ wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
+ }
+
+- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+- rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
++ if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
++ rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
++ wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
++ }
+
+- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
++ rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
+ }
+
+@@ -416,10 +420,12 @@ void amd_pmu_lbr_disable_all(void)
+ return;
+
+ rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+-
+ wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
+- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
++
++ if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
++ rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
++ wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
++ }
+ }
+
+ __init int amd_pmu_lbr_init(void)
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 185f902e5f2859..150a365b4fbc89 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -41,6 +41,8 @@
+ #include <asm/desc.h>
+ #include <asm/ldt.h>
+ #include <asm/unwind.h>
++#include <asm/uprobes.h>
++#include <asm/ibt.h>
+
+ #include "perf_event.h"
+
+@@ -1644,6 +1646,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
+ while (++i < cpuc->n_events) {
+ cpuc->event_list[i-1] = cpuc->event_list[i];
+ cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
++ cpuc->assign[i-1] = cpuc->assign[i];
+ }
+ cpuc->event_constraint[i-1] = NULL;
+ --cpuc->n_events;
+@@ -2546,6 +2549,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ static DEFINE_MUTEX(rdpmc_mutex);
+ unsigned long val;
+ ssize_t ret;
+
+@@ -2559,6 +2563,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
+ if (x86_pmu.attr_rdpmc_broken)
+ return -ENOTSUPP;
+
++ guard(mutex)(&rdpmc_mutex);
++
+ if (val != x86_pmu.attr_rdpmc) {
+ /*
+ * Changing into or out of never available or always available,
+@@ -2812,6 +2818,46 @@ static unsigned long get_segment_base(unsigned int segment)
+ return get_desc_base(desc);
+ }
+
++#ifdef CONFIG_UPROBES
++/*
++ * Heuristic-based check if uprobe is installed at the function entry.
++ *
++ * Under assumption of user code being compiled with frame pointers,
++ * `push %rbp/%ebp` is a good indicator that we indeed are.
++ *
++ * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
++ * If we get this wrong, captured stack trace might have one extra bogus
++ * entry, but the rest of stack trace will still be meaningful.
++ */
++static bool is_uprobe_at_func_entry(struct pt_regs *regs)
++{
++ struct arch_uprobe *auprobe;
++
++ if (!current->utask)
++ return false;
++
++ auprobe = current->utask->auprobe;
++ if (!auprobe)
++ return false;
++
++ /* push %rbp/%ebp */
++ if (auprobe->insn[0] == 0x55)
++ return true;
++
++ /* endbr64 (64-bit only) */
++ if (user_64bit_mode(regs) && is_endbr(*(u32 *)auprobe->insn))
++ return true;
++
++ return false;
++}
++
++#else
++static bool is_uprobe_at_func_entry(struct pt_regs *regs)
++{
++ return false;
++}
++#endif /* CONFIG_UPROBES */
++
+ #ifdef CONFIG_IA32_EMULATION
+
+ #include <linux/compat.h>
+@@ -2823,6 +2869,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
+ unsigned long ss_base, cs_base;
+ struct stack_frame_ia32 frame;
+ const struct stack_frame_ia32 __user *fp;
++ u32 ret_addr;
+
+ if (user_64bit_mode(regs))
+ return 0;
+@@ -2832,6 +2879,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
+
+ fp = compat_ptr(ss_base + regs->bp);
+ pagefault_disable();
++
++ /* see perf_callchain_user() below for why we do this */
++ if (is_uprobe_at_func_entry(regs) &&
++ !get_user(ret_addr, (const u32 __user *)regs->sp))
++ perf_callchain_store(entry, ret_addr);
++
+ while (entry->nr < entry->max_stack) {
+ if (!valid_user_frame(fp, sizeof(frame)))
+ break;
+@@ -2860,6 +2913,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+ {
+ struct stack_frame frame;
+ const struct stack_frame __user *fp;
++ unsigned long ret_addr;
+
+ if (perf_guest_state()) {
+ /* TODO: We don't support guest os callchain now */
+@@ -2883,6 +2937,19 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+ return;
+
+ pagefault_disable();
++
++ /*
++ * If we are called from uprobe handler, and we are indeed at the very
++ * entry to user function (which is normally a `push %rbp` instruction,
++ * under assumption of application being compiled with frame pointers),
++ * we should read return address from *regs->sp before proceeding
++ * to follow frame pointers, otherwise we'll skip immediate caller
++ * as %rbp is not yet setup.
++ */
++ if (is_uprobe_at_func_entry(regs) &&
++ !get_user(ret_addr, (const unsigned long __user *)regs->sp))
++ perf_callchain_store(entry, ret_addr);
++
+ while (entry->nr < entry->max_stack) {
+ if (!valid_user_frame(fp, sizeof(frame)))
+ break;
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index fa355d3658a652..688550e336ce17 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4062,12 +4062,17 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
+ u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
+ int global_ctrl, pebs_enable;
+
++ /*
++ * In addition to obeying exclude_guest/exclude_host, remove bits being
++ * used for PEBS when running a guest, because PEBS writes to virtual
++ * addresses (not physical addresses).
++ */
+ *nr = 0;
+ global_ctrl = (*nr)++;
+ arr[global_ctrl] = (struct perf_guest_switch_msr){
+ .msr = MSR_CORE_PERF_GLOBAL_CTRL,
+ .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
+- .guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask),
++ .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
+ };
+
+ if (!x86_pmu.pebs)
+@@ -4460,6 +4465,25 @@ static u8 adl_get_hybrid_cpu_type(void)
+ return hybrid_big;
+ }
+
++static inline bool erratum_hsw11(struct perf_event *event)
++{
++ return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
++ X86_CONFIG(.event=0xc0, .umask=0x01);
++}
++
++/*
++ * The HSW11 requires a period larger than 100 which is the same as the BDM11.
++ * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
++ *
++ * The message 'interrupt took too long' can be observed on any counter which
++ * was armed with a period < 32 and two events expired in the same NMI.
++ * A minimum period of 32 is enforced for the rest of the events.
++ */
++static void hsw_limit_period(struct perf_event *event, s64 *left)
++{
++ *left = max(*left, erratum_hsw11(event) ? 128 : 32);
++}
++
+ /*
+ * Broadwell:
+ *
+@@ -4477,8 +4501,7 @@ static u8 adl_get_hybrid_cpu_type(void)
+ */
+ static void bdw_limit_period(struct perf_event *event, s64 *left)
+ {
+- if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
+- X86_CONFIG(.event=0xc0, .umask=0x01)) {
++ if (erratum_hsw11(event)) {
+ if (*left < 128)
+ *left = 128;
+ *left &= ~0x3fULL;
+@@ -6387,6 +6410,7 @@ __init int intel_pmu_init(void)
+
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.get_event_constraints = hsw_get_event_constraints;
++ x86_pmu.limit_period = hsw_limit_period;
+ x86_pmu.lbr_double_abort = true;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ hsw_format_attr : nhm_format_attr;
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 96fffb2d521d2a..cc6609cbfc8dad 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -80,7 +80,7 @@
+ * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
+ * perf code: 0x03
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
+- * KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
++ * KBL,CML,ICL,TGL,RKL
+ * Scope: Package (physical package)
+ * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
+ * perf code: 0x04
+@@ -89,8 +89,7 @@
+ * Scope: Package (physical package)
+ * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
+ * perf code: 0x05
+- * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
+- * ADL,RPL,MTL
++ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
+ * Scope: Package (physical package)
+ * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
+ * perf code: 0x06
+@@ -582,9 +581,7 @@ static const struct cstate_model adl_cstates __initconst = {
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES) |
+- BIT(PERF_CSTATE_PKG_C7_RES) |
+ BIT(PERF_CSTATE_PKG_C8_RES) |
+- BIT(PERF_CSTATE_PKG_C9_RES) |
+ BIT(PERF_CSTATE_PKG_C10_RES),
+ };
+
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index eb8dd8b8a1e860..b592bed9ebcc46 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1236,11 +1236,11 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
+ struct pmu *pmu = event->pmu;
+
+ /*
+- * Make sure we get updated with the first PEBS
+- * event. It will trigger also during removal, but
+- * that does not hurt:
++ * Make sure we get updated with the first PEBS event.
++ * During removal, ->pebs_data_cfg is still valid for
++ * the last PEBS event. Don't clear it.
+ */
+- if (cpuc->n_pebs == 1)
++ if ((cpuc->n_pebs == 1) && add)
+ cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
+
+ if (needed_cb != pebs_needs_sched_cb(cpuc)) {
+@@ -1830,8 +1830,12 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
+ set_linear_ip(regs, basic->ip);
+ regs->flags = PERF_EFLAGS_EXACT;
+
+- if ((sample_type & PERF_SAMPLE_WEIGHT_STRUCT) && (x86_pmu.flags & PMU_FL_RETIRE_LATENCY))
+- data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK;
++ if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
++ if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)
++ data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK;
++ else
++ data->weight.var3_w = 0;
++ }
+
+ /*
+ * The record for MEMINFO is in front of GP
+diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
+index 42a55794004a7a..4110246aba12c3 100644
+--- a/arch/x86/events/intel/pt.c
++++ b/arch/x86/events/intel/pt.c
+@@ -877,7 +877,7 @@ static void pt_update_head(struct pt *pt)
+ */
+ static void *pt_buffer_region(struct pt_buffer *buf)
+ {
+- return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
++ return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
+ }
+
+ /**
+@@ -989,7 +989,7 @@ pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
+ * order allocations, there shouldn't be many of these.
+ */
+ list_for_each_entry(topa, &buf->tables, list) {
+- if (topa->offset + topa->size > pg << PAGE_SHIFT)
++ if (topa->offset + topa->size > (unsigned long)pg << PAGE_SHIFT)
+ goto found;
+ }
+
+@@ -1602,6 +1602,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
+ * see comment in intel_pt_interrupt().
+ */
+ WRITE_ONCE(pt->handle_nmi, 0);
++ barrier();
+
+ pt_config_stop(event);
+
+@@ -1653,11 +1654,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
+ return 0;
+
+ /*
+- * Here, handle_nmi tells us if the tracing is on
++ * There is no PT interrupt in this mode, so stop the trace and it will
++ * remain stopped while the buffer is copied.
+ */
+- if (READ_ONCE(pt->handle_nmi))
+- pt_config_stop(event);
+-
++ pt_config_stop(event);
+ pt_read_offset(buf);
+ pt_update_head(pt);
+
+@@ -1669,11 +1669,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
+ ret = perf_output_copy_aux(&pt->handle, handle, from, to);
+
+ /*
+- * If the tracing was on when we turned up, restart it.
+- * Compiler barrier not needed as we couldn't have been
+- * preempted by anything that touches pt->handle_nmi.
++ * Here, handle_nmi tells us if the tracing was on.
++ * If the tracing was on, restart it.
+ */
+- if (pt->handle_nmi)
++ if (READ_ONCE(pt->handle_nmi))
+ pt_config_start(event);
+
+ return ret;
+diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
+index 96906a62aacdad..f5e46c04c145d0 100644
+--- a/arch/x86/events/intel/pt.h
++++ b/arch/x86/events/intel/pt.h
+@@ -33,8 +33,8 @@ struct topa_entry {
+ u64 rsvd2 : 1;
+ u64 size : 4;
+ u64 rsvd3 : 2;
+- u64 base : 36;
+- u64 rsvd4 : 16;
++ u64 base : 40;
++ u64 rsvd4 : 12;
+ };
+
+ /* TSC to Core Crystal Clock Ratio */
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 8250f0f59c2bbe..a8f11e60b98794 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -461,6 +461,7 @@
+ #define SPR_UBOX_DID 0x3250
+
+ /* SPR CHA */
++#define SPR_CHA_EVENT_MASK_EXT 0xffffffff
+ #define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
+ #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
+ SPR_CHA_PMON_CTL_TID_EN)
+@@ -477,6 +478,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
+ DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
+ DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
+ DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
++DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63");
+ DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
+ DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+ DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+@@ -5596,7 +5598,7 @@ static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, i
+ struct pci_dev *ubox = NULL;
+ struct pci_dev *dev = NULL;
+ u32 nid, gid;
+- int i, idx, ret = -EPERM;
++ int i, idx, lgc_pkg, ret = -EPERM;
+ struct intel_uncore_topology *upi;
+ unsigned int devfn;
+
+@@ -5614,8 +5616,13 @@ static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, i
+ for (i = 0; i < 8; i++) {
+ if (nid != GIDNIDMAP(gid, i))
+ continue;
++ lgc_pkg = topology_phys_to_logical_pkg(i);
++ if (lgc_pkg < 0) {
++ ret = -EPERM;
++ goto err;
++ }
+ for (idx = 0; idx < type->num_boxes; idx++) {
+- upi = &type->topology[nid][idx];
++ upi = &type->topology[lgc_pkg][idx];
+ devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
+ dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
+ ubox->bus->number,
+@@ -5626,6 +5633,7 @@ static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, i
+ goto err;
+ }
+ }
++ break;
+ }
+ }
+ err:
+@@ -5948,7 +5956,7 @@ static struct intel_uncore_ops spr_uncore_chabox_ops = {
+
+ static struct attribute *spr_uncore_cha_formats_attr[] = {
+ &format_attr_event.attr,
+- &format_attr_umask_ext4.attr,
++ &format_attr_umask_ext5.attr,
+ &format_attr_tid_en2.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+@@ -5984,7 +5992,7 @@ ATTRIBUTE_GROUPS(uncore_alias);
+ static struct intel_uncore_type spr_uncore_chabox = {
+ .name = "cha",
+ .event_mask = SPR_CHA_PMON_EVENT_MASK,
+- .event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
++ .event_mask_ext = SPR_CHA_EVENT_MASK_EXT,
+ .num_shared_regs = 1,
+ .constraints = skx_uncore_chabox_constraints,
+ .ops = &spr_uncore_chabox_ops,
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index 21556ad87f4ba8..d1e2d12279e268 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -15,6 +15,7 @@
+ #include <linux/io.h>
+ #include <asm/apic.h>
+ #include <asm/desc.h>
++#include <asm/e820/api.h>
+ #include <asm/sev.h>
+ #include <asm/ibt.h>
+ #include <asm/hypervisor.h>
+@@ -34,7 +35,6 @@
+ #include <clocksource/hyperv_timer.h>
+ #include <linux/highmem.h>
+
+-int hyperv_init_cpuhp;
+ u64 hv_current_partition_id = ~0ull;
+ EXPORT_SYMBOL_GPL(hv_current_partition_id);
+
+@@ -286,15 +286,31 @@ static int hv_cpu_die(unsigned int cpu)
+
+ static int __init hv_pci_init(void)
+ {
+- int gen2vm = efi_enabled(EFI_BOOT);
++ bool gen2vm = efi_enabled(EFI_BOOT);
+
+ /*
+- * For Generation-2 VM, we exit from pci_arch_init() by returning 0.
+- * The purpose is to suppress the harmless warning:
++ * A Generation-2 VM doesn't support legacy PCI/PCIe, so both
++ * raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() ->
++ * pcibios_init() doesn't call pcibios_resource_survey() ->
++ * e820__reserve_resources_late(); as a result, any emulated persistent
++ * memory of E820_TYPE_PRAM (12) via the kernel parameter
++ * memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be
++ * detected by register_e820_pmem(). Fix this by directly calling
++ * e820__reserve_resources_late() here: e820__reserve_resources_late()
++ * depends on e820__reserve_resources(), which has been called earlier
++ * from setup_arch(). Note: e820__reserve_resources_late() also adds
++ * any memory of E820_TYPE_PMEM (7) into iomem_resource, and
++ * acpi_nfit_register_region() -> acpi_nfit_insert_resource() ->
++ * region_intersects() returns REGION_INTERSECTS, so the memory of
++ * E820_TYPE_PMEM won't get added twice.
++ *
++ * We return 0 here so that pci_arch_init() won't print the warning:
+ * "PCI: Fatal: No config space access function found"
+ */
+- if (gen2vm)
++ if (gen2vm) {
++ e820__reserve_resources_late();
+ return 0;
++ }
+
+ /* For Generation-1 VM, we'll proceed in pci_arch_init(). */
+ return 1;
+@@ -590,8 +606,6 @@ void __init hyperv_init(void)
+
+ register_syscore_ops(&hv_syscore_ops);
+
+- hyperv_init_cpuhp = cpuhp;
+-
+ if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
+ hv_get_partition_id();
+
+@@ -620,7 +634,7 @@ void __init hyperv_init(void)
+ clean_guest_os_id:
+ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+ hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
+- cpuhp_remove_state(cpuhp);
++ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
+ free_ghcb_page:
+ free_percpu(hv_ghcb_pg);
+ free_vp_assist_page:
+diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
+index 999f5ac82fe900..c2f78fabc865bb 100644
+--- a/arch/x86/hyperv/hv_vtl.c
++++ b/arch/x86/hyperv/hv_vtl.c
+@@ -12,10 +12,16 @@
+ #include <asm/i8259.h>
+ #include <asm/mshyperv.h>
+ #include <asm/realmode.h>
++#include <../kernel/smpboot.h>
+
+ extern struct boot_params boot_params;
+ static struct real_mode_header hv_vtl_real_mode_header;
+
++static bool __init hv_vtl_msi_ext_dest_id(void)
++{
++ return true;
++}
++
+ void __init hv_vtl_init_platform(void)
+ {
+ pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
+@@ -38,6 +44,8 @@ void __init hv_vtl_init_platform(void)
+ x86_platform.legacy.warm_reset = 0;
+ x86_platform.legacy.reserve_bios_regions = 0;
+ x86_platform.legacy.devices.pnpbios = 0;
++
++ x86_init.hyper.msi_ext_dest_id = hv_vtl_msi_ext_dest_id;
+ }
+
+ static inline u64 hv_vtl_system_desc_base(struct ldttss_desc *desc)
+@@ -57,7 +65,7 @@ static void hv_vtl_ap_entry(void)
+ ((secondary_startup_64_fn)secondary_startup_64)(&boot_params, &boot_params);
+ }
+
+-static int hv_vtl_bringup_vcpu(u32 target_vp_index, u64 eip_ignored)
++static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
+ {
+ u64 status;
+ int ret = 0;
+@@ -71,7 +79,9 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, u64 eip_ignored)
+ struct ldttss_desc *ldt;
+ struct desc_struct *gdt;
+
+- u64 rsp = current->thread.sp;
++ struct task_struct *idle = idle_thread_get(cpu);
++ u64 rsp = (unsigned long)idle->thread.sp;
++
+ u64 rip = (u64)&hv_vtl_ap_entry;
+
+ native_store_gdt(&gdt_ptr);
+@@ -198,7 +208,15 @@ static int hv_vtl_apicid_to_vp_id(u32 apic_id)
+
+ static int hv_vtl_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
+ {
+- int vp_id;
++ int vp_id, cpu;
++
++ /* Find the logical CPU for the APIC ID */
++ for_each_present_cpu(cpu) {
++ if (arch_match_cpu_phys_id(cpu, apicid))
++ break;
++ }
++ if (cpu >= nr_cpu_ids)
++ return -EINVAL;
+
+ pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid);
+ vp_id = hv_vtl_apicid_to_vp_id(apicid);
+@@ -212,7 +230,7 @@ static int hv_vtl_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
+ return -EINVAL;
+ }
+
+- return hv_vtl_bringup_vcpu(vp_id, start_eip);
++ return hv_vtl_bringup_vcpu(vp_id, cpu, start_eip);
+ }
+
+ int __init hv_vtl_early_init(void)
+diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
+index c8a7fc23f63c67..529c36a98d9ea0 100644
+--- a/arch/x86/include/asm/acpi.h
++++ b/arch/x86/include/asm/acpi.h
+@@ -16,6 +16,9 @@
+ #include <asm/x86_init.h>
+ #include <asm/cpufeature.h>
+ #include <asm/irq_vectors.h>
++#include <asm/xen/hypervisor.h>
++
++#include <xen/xen.h>
+
+ #ifdef CONFIG_ACPI_APEI
+ # include <asm/pgtable_types.h>
+@@ -127,6 +130,17 @@ static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
+ if (!cpu_has(c, X86_FEATURE_MWAIT) ||
+ boot_option_idle_override == IDLE_NOMWAIT)
+ *cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
++
++ if (xen_initial_domain()) {
++ /*
++ * When Linux is running as Xen dom0, the hypervisor is the
++ * entity in charge of the processor power management, and so
++ * Xen needs to check the OS capabilities reported in the
++ * processor capabilities buffer matches what the hypervisor
++ * driver supports.
++ */
++ xen_sanitize_proc_cap_bits(cap);
++ }
+ }
+
+ static inline bool acpi_has_cpu_in_madt(void)
+@@ -151,6 +165,14 @@ void acpi_generic_reduced_hw_init(void);
+ void x86_default_set_root_pointer(u64 addr);
+ u64 x86_default_get_root_pointer(void);
+
++#ifdef CONFIG_XEN_PV
++/* A Xen PV domain needs a special acpi_os_ioremap() handling. */
++extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys,
++ acpi_size size);
++void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
++#define acpi_os_ioremap acpi_os_ioremap
++#endif
++
+ #else /* !CONFIG_ACPI */
+
+ #define acpi_lapic 0
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index 9c4da699e11aff..cb9ce0f9e78e05 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -58,7 +58,7 @@
+ #define ANNOTATE_IGNORE_ALTERNATIVE \
+ "999:\n\t" \
+ ".pushsection .discard.ignore_alts\n\t" \
+- ".long 999b - .\n\t" \
++ ".long 999b\n\t" \
+ ".popsection\n\t"
+
+ /*
+@@ -288,10 +288,10 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ * Otherwise, if CPU has feature1, newinstr1 is used.
+ * Otherwise, oldinstr is used.
+ */
+-#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2, \
+- ft_flags2, input...) \
+- asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
+- newinstr2, ft_flags2) \
++#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2, \
++ ft_flags2, input...) \
++ asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
++ newinstr2, ft_flags2) \
+ : : "i" (0), ## input)
+
+ /* Like alternative_input, but with a single output argument */
+@@ -301,7 +301,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+
+ /* Like alternative_io, but for replacing a direct call with another one. */
+ #define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \
+- asm_inline volatile (ALTERNATIVE("call %P[old]", "call %P[new]", ft_flags) \
++ asm_inline volatile (ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
+ : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+
+ /*
+@@ -310,12 +310,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ * Otherwise, if CPU has feature1, function1 is used.
+ * Otherwise, old function is used.
+ */
+-#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
+- output, input...) \
+- asm_inline volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", ft_flags1,\
+- "call %P[new2]", ft_flags2) \
+- : output, ASM_CALL_CONSTRAINT \
+- : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
++#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
++ output, input...) \
++ asm_inline volatile (ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
++ "call %c[new2]", ft_flags2) \
++ : output, ASM_CALL_CONSTRAINT \
++ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+ [new2] "i" (newfunc2), ## input)
+
+ /*
+@@ -352,7 +352,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ .macro ANNOTATE_IGNORE_ALTERNATIVE
+ .Lannotate_\@:
+ .pushsection .discard.ignore_alts
+- .long .Lannotate_\@ - .
++ .long .Lannotate_\@
+ .popsection
+ .endm
+
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 5af4ec1a0f71cf..33aa0c31c21cf1 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -13,6 +13,7 @@
+ #include <asm/mpspec.h>
+ #include <asm/msr.h>
+ #include <asm/hardirq.h>
++#include <asm/io.h>
+
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
+
+@@ -96,7 +97,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
+
+ static inline u32 native_apic_mem_read(u32 reg)
+ {
+- return *((volatile u32 *)(APIC_BASE + reg));
++ return readl((void __iomem *)(APIC_BASE + reg));
+ }
+
+ static inline void native_apic_mem_eoi(void)
+diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
+index b1a98fa38828e2..0e82074517f6b7 100644
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -13,6 +13,7 @@
+ #include <asm/preempt.h>
+ #include <asm/asm.h>
+ #include <asm/gsseg.h>
++#include <asm/nospec-branch.h>
+
+ #ifndef CONFIG_X86_CMPXCHG64
+ extern void cmpxchg8b_emu(void);
+diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
+index fbcfec4dc4ccd7..ca8eed1d496ab4 100644
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -113,6 +113,20 @@
+
+ #endif
+
++#ifndef __ASSEMBLY__
++#ifndef __pic__
++static __always_inline __pure void *rip_rel_ptr(void *p)
++{
++ asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
++
++ return p;
++}
++#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
++#else
++#define RIP_REL_REF(var) (var)
++#endif
++#endif
++
+ /*
+ * Macros to generate condition code outputs from inline assembly,
+ * The output operand must be type "bool".
+diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
+index 3486d91b8595f1..d510405e4e1de2 100644
+--- a/arch/x86/include/asm/atomic64_32.h
++++ b/arch/x86/include/asm/atomic64_32.h
+@@ -24,7 +24,7 @@ typedef struct {
+
+ #ifdef CONFIG_X86_CMPXCHG64
+ #define __alternative_atomic64(f, g, out, in...) \
+- asm volatile("call %P[func]" \
++ asm volatile("call %c[func]" \
+ : out : [func] "i" (atomic64_##g##_cx8), ## in)
+
+ #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index 35389b2af88ee8..d0795b5fab46ad 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -79,24 +79,9 @@ do { \
+ #define __smp_mb__before_atomic() do { } while (0)
+ #define __smp_mb__after_atomic() do { } while (0)
+
+-#include <asm-generic/barrier.h>
++/* Writing to CR3 provides a full memory barrier in switch_mm(). */
++#define smp_mb__after_switch_mm() do { } while (0)
+
+-/*
+- * Make previous memory operations globally visible before
+- * a WRMSR.
+- *
+- * MFENCE makes writes visible, but only affects load/store
+- * instructions. WRMSR is unfortunately not a load/store
+- * instruction and is unaffected by MFENCE. The LFENCE ensures
+- * that the WRMSR is not reordered.
+- *
+- * Most WRMSRs are full serializing instructions themselves and
+- * do not require this barrier. This is only required for the
+- * IA32_TSC_DEADLINE and X2APIC MSRs.
+- */
+-static inline void weak_wrmsr_fence(void)
+-{
+- asm volatile("mfence; lfence" : : : "memory");
+-}
++#include <asm-generic/barrier.h>
+
+ #endif /* _ASM_X86_BARRIER_H */
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index b3a7cfb0d99e01..c945c893c52e0a 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -81,6 +81,7 @@
+
+ #ifndef __ASSEMBLY__
+ extern unsigned int output_len;
++extern const unsigned long kernel_text_size;
+ extern const unsigned long kernel_total_size;
+
+ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
+diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
+index 44b08b53ab32fc..c1d6cd58f80940 100644
+--- a/arch/x86/include/asm/cmpxchg_64.h
++++ b/arch/x86/include/asm/cmpxchg_64.h
+@@ -62,7 +62,7 @@ static __always_inline u128 arch_cmpxchg128_local(volatile u128 *ptr, u128 old,
+ asm volatile(_lock "cmpxchg16b %[ptr]" \
+ CC_SET(e) \
+ : CC_OUT(e) (ret), \
+- [ptr] "+m" (*ptr), \
++ [ptr] "+m" (*(_ptr)), \
+ "+a" (o.low), "+d" (o.high) \
+ : "b" (n.low), "c" (n.high) \
+ : "memory"); \
+diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
+index 6ae2d16a7613b7..c72b3553081c3c 100644
+--- a/arch/x86/include/asm/coco.h
++++ b/arch/x86/include/asm/coco.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_X86_COCO_H
+ #define _ASM_X86_COCO_H
+
++#include <asm/asm.h>
+ #include <asm/types.h>
+
+ enum cc_vendor {
+@@ -13,10 +14,19 @@ enum cc_vendor {
+ extern enum cc_vendor cc_vendor;
+
+ #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+-void cc_set_mask(u64 mask);
++extern u64 cc_mask;
++
++static inline void cc_set_mask(u64 mask)
++{
++ RIP_REL_REF(cc_mask) = mask;
++}
++
+ u64 cc_mkenc(u64 val);
+ u64 cc_mkdec(u64 val);
++void cc_random_init(void);
+ #else
++static const u64 cc_mask = 0;
++
+ static inline u64 cc_mkenc(u64 val)
+ {
+ return val;
+@@ -26,6 +36,7 @@ static inline u64 cc_mkdec(u64 val)
+ {
+ return val;
+ }
++static inline void cc_random_init(void) { }
+ #endif
+
+ #endif /* _ASM_X86_COCO_H */
+diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
+index eb8fcede9e3bf4..e8e3dbe7f17306 100644
+--- a/arch/x86/include/asm/cpu_device_id.h
++++ b/arch/x86/include/asm/cpu_device_id.h
+@@ -2,6 +2,39 @@
+ #ifndef _ASM_X86_CPU_DEVICE_ID
+ #define _ASM_X86_CPU_DEVICE_ID
+
++/*
++ * Can't use <linux/bitfield.h> because it generates expressions that
++ * cannot be used in structure initializers. Bitfield construction
++ * here must match the union in struct cpuinfo_86:
++ * union {
++ * struct {
++ * __u8 x86_model;
++ * __u8 x86;
++ * __u8 x86_vendor;
++ * __u8 x86_reserved;
++ * };
++ * __u32 x86_vfm;
++ * };
++ */
++#define VFM_MODEL_BIT 0
++#define VFM_FAMILY_BIT 8
++#define VFM_VENDOR_BIT 16
++#define VFM_RSVD_BIT 24
++
++#define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
++#define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
++#define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
++
++#define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
++#define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
++#define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
++
++#define VFM_MAKE(_vendor, _family, _model) ( \
++ ((_model) << VFM_MODEL_BIT) | \
++ ((_family) << VFM_FAMILY_BIT) | \
++ ((_vendor) << VFM_VENDOR_BIT) \
++)
++
+ /*
+ * Declare drivers belonging to specific x86 CPUs
+ * Similar in spirit to pci_device_id and related PCI functions
+@@ -20,6 +53,9 @@
+ #define X86_CENTAUR_FAM6_C7_D 0xd
+ #define X86_CENTAUR_FAM6_NANO 0xf
+
++/* x86_cpu_id::flags */
++#define X86_CPU_ID_FLAG_ENTRY_VALID BIT(0)
++
+ #define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
+ /**
+ * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
+@@ -46,6 +82,18 @@
+ .model = _model, \
+ .steppings = _steppings, \
+ .feature = _feature, \
++ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, \
++ .driver_data = (unsigned long) _data \
++}
++
++#define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
++ _steppings, _feature, _data) { \
++ .vendor = _vendor, \
++ .family = _family, \
++ .model = _model, \
++ .steppings = _steppings, \
++ .feature = _feature, \
++ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, \
+ .driver_data = (unsigned long) _data \
+ }
+
+@@ -164,6 +212,56 @@
+ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ steppings, X86_FEATURE_ANY, data)
+
++/**
++ * X86_MATCH_VFM - Match encoded vendor/family/model
++ * @vfm: Encoded 8-bits each for vendor, family, model
++ * @data: Driver specific data or NULL. The internal storage
++ * format is unsigned long. The supplied value, pointer
++ * etc. is cast to unsigned long internally.
++ *
++ * Stepping and feature are set to wildcards
++ */
++#define X86_MATCH_VFM(vfm, data) \
++ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
++ VFM_VENDOR(vfm), \
++ VFM_FAMILY(vfm), \
++ VFM_MODEL(vfm), \
++ X86_STEPPING_ANY, X86_FEATURE_ANY, data)
++
++/**
++ * X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
++ * @vfm: Encoded 8-bits each for vendor, family, model
++ * @steppings: Bitmask of steppings to match
++ * @data: Driver specific data or NULL. The internal storage
++ * format is unsigned long. The supplied value, pointer
++ * etc. is cast to unsigned long internally.
++ *
++ * feature is set to wildcard
++ */
++#define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data) \
++ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
++ VFM_VENDOR(vfm), \
++ VFM_FAMILY(vfm), \
++ VFM_MODEL(vfm), \
++ steppings, X86_FEATURE_ANY, data)
++
++/**
++ * X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
++ * @vfm: Encoded 8-bits each for vendor, family, model
++ * @feature: A X86_FEATURE bit
++ * @data: Driver specific data or NULL. The internal storage
++ * format is unsigned long. The supplied value, pointer
++ * etc. is cast to unsigned long internally.
++ *
++ * Steppings is set to wildcard
++ */
++#define X86_MATCH_VFM_FEATURE(vfm, feature, data) \
++ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
++ VFM_VENDOR(vfm), \
++ VFM_FAMILY(vfm), \
++ VFM_MODEL(vfm), \
++ X86_STEPPING_ANY, feature, data)
++
+ /*
+ * Match specific microcode revisions.
+ *
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index a26bebbdff87ed..3508f3fc928d4d 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -33,6 +33,8 @@ enum cpuid_leafs
+ CPUID_7_EDX,
+ CPUID_8000_001F_EAX,
+ CPUID_8000_0021_EAX,
++ CPUID_LNX_5,
++ NR_CPUID_WORDS,
+ };
+
+ #define X86_CAP_FMT_NUM "%d:%d"
+@@ -91,8 +93,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
+ REQUIRED_MASK_CHECK || \
+- BUILD_BUG_ON_ZERO(NCAPINTS != 21))
++ BUILD_BUG_ON_ZERO(NCAPINTS != 22))
+
+ #define DISABLED_MASK_BIT_SET(feature_bit) \
+ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
+@@ -116,8 +119,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
+ DISABLED_MASK_CHECK || \
+- BUILD_BUG_ON_ZERO(NCAPINTS != 21))
++ BUILD_BUG_ON_ZERO(NCAPINTS != 22))
+
+ #define cpu_has(c, bit) \
+ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
+@@ -168,8 +172,8 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
+ */
+ static __always_inline bool _static_cpu_has(u16 bit)
+ {
+- asm_volatile_goto(
+- ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
++ asm goto(
++ ALTERNATIVE_TERNARY("jmp 6f", %c[feature], "", "jmp %l[t_no]")
+ ".pushsection .altinstr_aux,\"ax\"\n"
+ "6:\n"
+ " testb %[bitnum]," _ASM_RIP(%P[cap_byte]) "\n"
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 58cb9495e40f42..55d18eef6775a6 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -13,7 +13,7 @@
+ /*
+ * Defines x86 CPU feature bits
+ */
+-#define NCAPINTS 21 /* N 32-bit words worth of info */
++#define NCAPINTS 22 /* N 32-bit words worth of info */
+ #define NBUGINTS 2 /* N 32-bit bug flags */
+
+ /*
+@@ -97,7 +97,7 @@
+ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
+ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
+ #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
+-/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
++#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
+ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
+ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+ #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
+@@ -216,9 +216,9 @@
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+ #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
+ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+-#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
+ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+-#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
++#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
+ #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
+ #define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
+ #define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
+@@ -308,10 +308,14 @@
+ #define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
+ #define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
+ #define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
+-
+ #define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
+ #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
+ #define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
++#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
++#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
++#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
++#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
++#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
+@@ -343,6 +347,7 @@
+ #define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
+ #define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
+ #define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
++#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* "" IBPB clears return address predictor */
+ #define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+@@ -452,6 +457,18 @@
+ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
+
++/*
++ * Extended auxiliary flags: Linux defined - for features scattered in various
++ * CPUID levels like 0x80000022, etc and Linux defined features.
++ *
++ * Reuse free bits when adding new feature flags!
++ */
++#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
++#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */
++#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
++#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
++#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
++
+ /*
+ * BUG word(s)
+ */
+@@ -498,4 +515,7 @@
+ /* BUG word 2 */
+ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
+ #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
++#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
++#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
++#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
+index 702d93fdd10e8d..88fcf08458d9cd 100644
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -143,6 +143,7 @@
+ #define DISABLED_MASK18 (DISABLE_IBT)
+ #define DISABLED_MASK19 0
+ #define DISABLED_MASK20 0
+-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
++#define DISABLED_MASK21 0
++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
+
+ #endif /* _ASM_X86_DISABLED_FEATURES_H */
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index c4555b269a1b24..a050d329e34bfd 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -410,7 +410,6 @@ extern int __init efi_memmap_alloc(unsigned int num_entries,
+ struct efi_memory_map_data *data);
+ extern void __efi_memmap_free(u64 phys, unsigned long size,
+ unsigned long flags);
+-#define __efi_memmap_free __efi_memmap_free
+
+ extern int __init efi_memmap_install(struct efi_memory_map_data *data);
+ extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
+diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
+index ce8f50192ae3e4..fb2809b20b0ac4 100644
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -73,25 +73,21 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ #endif
+
+ /*
+- * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+- * but not enough for x86 stack utilization comfort. To keep
+- * reasonable stack head room, reduce the maximum offset to 8 bits.
+- *
+- * The actual entropy will be further reduced by the compiler when
+- * applying stack alignment constraints (see cc_stack_align4/8 in
++ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
++ * bits. The actual entropy will be further reduced by the compiler
++ * when applying stack alignment constraints (see cc_stack_align4/8 in
+ * arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
+ * low bits from any entropy chosen here.
+ *
+- * Therefore, final stack offset entropy will be 5 (x86_64) or
+- * 6 (ia32) bits.
++ * Therefore, final stack offset entropy will be 7 (x86_64) or
++ * 8 (ia32) bits.
+ */
+- choose_random_kstack_offset(rdtsc() & 0xFF);
++ choose_random_kstack_offset(rdtsc());
+ }
+ #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+
+ static __always_inline void arch_exit_to_user_mode(void)
+ {
+- mds_user_clear_cpu_buffers();
+ amd_clear_divider();
+ }
+ #define arch_exit_to_user_mode arch_exit_to_user_mode
+diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h
+index 611fa41711affd..eccc75bc9c4f3d 100644
+--- a/arch/x86/include/asm/fpu/signal.h
++++ b/arch/x86/include/asm/fpu/signal.h
+@@ -29,7 +29,7 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
+
+ unsigned long fpu__get_fpstate_size(void);
+
+-extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
++extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size, u32 pkru);
+ extern void fpu__clear_user_states(struct fpu *fpu);
+ extern bool fpu__restore_sig(void __user *buf, int ia32_frame);
+
+diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
+index eb810074f1e745..fd5fb43d920b44 100644
+--- a/arch/x86/include/asm/fpu/types.h
++++ b/arch/x86/include/asm/fpu/types.h
+@@ -589,6 +589,13 @@ struct fpu_state_config {
+ * even without XSAVE support, i.e. legacy features FP + SSE
+ */
+ u64 legacy_features;
++ /*
++ * @independent_features:
++ *
++ * Features that are supported by XSAVES, but not managed as part of
++ * the FPU core, such as LBR
++ */
++ u64 independent_features;
+ };
+
+ /* FPU state configuration information */
+diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
+index 66837b8c67f1a9..f2e245741afc2c 100644
+--- a/arch/x86/include/asm/hardirq.h
++++ b/arch/x86/include/asm/hardirq.h
+@@ -63,7 +63,11 @@ extern u64 arch_irq_stat(void);
+ #define local_softirq_pending_ref pcpu_hot.softirq_pending
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+-static inline void kvm_set_cpu_l1tf_flush_l1d(void)
++/*
++ * This function is called from noinstr interrupt contexts
++ * and must be inlined to not get instrumentation.
++ */
++static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void)
+ {
+ __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
+ }
+@@ -78,7 +82,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
+ return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
+ }
+ #else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
+-static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
++static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
+ #endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
+
+ #endif /* _ASM_X86_HARDIRQ_H */
+diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
+index fada857f0a1edf..9805629479d968 100644
+--- a/arch/x86/include/asm/ia32.h
++++ b/arch/x86/include/asm/ia32.h
+@@ -68,6 +68,27 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm);
+
+ #endif
+
+-#endif /* CONFIG_IA32_EMULATION */
++extern bool __ia32_enabled;
++
++static inline bool ia32_enabled(void)
++{
++ return __ia32_enabled;
++}
++
++static inline void ia32_disable(void)
++{
++ __ia32_enabled = false;
++}
++
++#else /* !CONFIG_IA32_EMULATION */
++
++static inline bool ia32_enabled(void)
++{
++ return IS_ENABLED(CONFIG_X86_32);
++}
++
++static inline void ia32_disable(void) {}
++
++#endif
+
+ #endif /* _ASM_X86_IA32_H */
+diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
+index 05fd175cec7d5c..10603e185111d5 100644
+--- a/arch/x86/include/asm/idtentry.h
++++ b/arch/x86/include/asm/idtentry.h
+@@ -13,15 +13,18 @@
+
+ #include <asm/irq_stack.h>
+
++typedef void (*idtentry_t)(struct pt_regs *regs);
++
+ /**
+ * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
+ * No error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+- * Declares three functions:
++ * Declares four functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
++ * - The C handler called from the FRED event dispatcher (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it
+@@ -31,6 +34,7 @@
+ #define DECLARE_IDTENTRY(vector, func) \
+ asmlinkage void asm_##func(void); \
+ asmlinkage void xen_asm_##func(void); \
++ void fred_##func(struct pt_regs *regs); \
+ __visible void func(struct pt_regs *regs)
+
+ /**
+@@ -137,6 +141,17 @@ static __always_inline void __##func(struct pt_regs *regs, \
+ #define DEFINE_IDTENTRY_RAW(func) \
+ __visible noinstr void func(struct pt_regs *regs)
+
++/**
++ * DEFINE_FREDENTRY_RAW - Emit code for raw FRED entry points
++ * @func: Function name of the entry point
++ *
++ * @func is called from the FRED event dispatcher with interrupts disabled.
++ *
++ * See @DEFINE_IDTENTRY_RAW for further details.
++ */
++#define DEFINE_FREDENTRY_RAW(func) \
++noinstr void fred_##func(struct pt_regs *regs)
++
+ /**
+ * DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points
+ * Error code pushed by hardware
+@@ -197,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \
+ irqentry_state_t state = irqentry_enter(regs); \
+ u32 vector = (u32)(u8)error_code; \
+ \
++ kvm_set_cpu_l1tf_flush_l1d(); \
+ instrumentation_begin(); \
+- kvm_set_cpu_l1tf_flush_l1d(); \
+ run_irq_on_irqstack_cond(__##func, regs, vector); \
+ instrumentation_end(); \
+ irqentry_exit(regs, state); \
+@@ -233,17 +248,27 @@ static noinline void __##func(struct pt_regs *regs, u32 vector)
+ #define DEFINE_IDTENTRY_SYSVEC(func) \
+ static void __##func(struct pt_regs *regs); \
+ \
++static __always_inline void instr_##func(struct pt_regs *regs) \
++{ \
++ run_sysvec_on_irqstack_cond(__##func, regs); \
++} \
++ \
+ __visible noinstr void func(struct pt_regs *regs) \
+ { \
+ irqentry_state_t state = irqentry_enter(regs); \
+ \
++ kvm_set_cpu_l1tf_flush_l1d(); \
+ instrumentation_begin(); \
+- kvm_set_cpu_l1tf_flush_l1d(); \
+- run_sysvec_on_irqstack_cond(__##func, regs); \
++ instr_##func (regs); \
+ instrumentation_end(); \
+ irqentry_exit(regs, state); \
+ } \
+ \
++void fred_##func(struct pt_regs *regs) \
++{ \
++ instr_##func (regs); \
++} \
++ \
+ static noinline void __##func(struct pt_regs *regs)
+
+ /**
+@@ -260,19 +285,29 @@ static noinline void __##func(struct pt_regs *regs)
+ #define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \
+ static __always_inline void __##func(struct pt_regs *regs); \
+ \
++static __always_inline void instr_##func(struct pt_regs *regs) \
++{ \
++ __irq_enter_raw(); \
++ __##func (regs); \
++ __irq_exit_raw(); \
++} \
++ \
+ __visible noinstr void func(struct pt_regs *regs) \
+ { \
+ irqentry_state_t state = irqentry_enter(regs); \
+ \
++ kvm_set_cpu_l1tf_flush_l1d(); \
+ instrumentation_begin(); \
+- __irq_enter_raw(); \
+- kvm_set_cpu_l1tf_flush_l1d(); \
+- __##func (regs); \
+- __irq_exit_raw(); \
++ instr_##func (regs); \
+ instrumentation_end(); \
+ irqentry_exit(regs, state); \
+ } \
+ \
++void fred_##func(struct pt_regs *regs) \
++{ \
++ instr_##func (regs); \
++} \
++ \
+ static __always_inline void __##func(struct pt_regs *regs)
+
+ /**
+@@ -410,15 +445,18 @@ __visible noinstr void func(struct pt_regs *regs, \
+ /* C-Code mapping */
+ #define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
+ #define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
++#define DEFINE_FREDENTRY_NMI DEFINE_FREDENTRY_RAW
+
+ #ifdef CONFIG_X86_64
+ #define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST
+ #define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST
+ #define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST
++#define DEFINE_FREDENTRY_MCE DEFINE_FREDENTRY_RAW
+
+ #define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST
+ #define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST
+ #define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST
++#define DEFINE_FREDENTRY_DEBUG DEFINE_FREDENTRY_RAW
+ #endif
+
+ #else /* !__ASSEMBLY__ */
+@@ -569,6 +607,10 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_UD, exc_invalid_op);
+ DECLARE_IDTENTRY_RAW(X86_TRAP_BP, exc_int3);
+ DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault);
+
++#if defined(CONFIG_IA32_EMULATION)
++DECLARE_IDTENTRY_RAW(IA32_SYSCALL_VECTOR, int80_emulation);
++#endif
++
+ #ifdef CONFIG_X86_MCE
+ #ifdef CONFIG_X86_64
+ DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
+@@ -651,23 +693,36 @@ DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi);
+ DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot);
+ DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single);
+ DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function);
++#else
++# define fred_sysvec_reschedule_ipi NULL
++# define fred_sysvec_reboot NULL
++# define fred_sysvec_call_function_single NULL
++# define fred_sysvec_call_function NULL
+ #endif
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ # ifdef CONFIG_X86_MCE_THRESHOLD
+ DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold);
++# else
++# define fred_sysvec_threshold NULL
+ # endif
+
+ # ifdef CONFIG_X86_MCE_AMD
+ DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error);
++# else
++# define fred_sysvec_deferred_error NULL
+ # endif
+
+ # ifdef CONFIG_X86_THERMAL_VECTOR
+ DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal);
++# else
++# define fred_sysvec_thermal NULL
+ # endif
+
+ # ifdef CONFIG_IRQ_WORK
+ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
++# else
++# define fred_sysvec_irq_work NULL
+ # endif
+ #endif
+
+@@ -675,12 +730,16 @@ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
+ DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi);
+ DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi);
+ DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi);
++#else
++# define fred_sysvec_kvm_posted_intr_ipi NULL
++# define fred_sysvec_kvm_posted_intr_wakeup_ipi NULL
++# define fred_sysvec_kvm_posted_intr_nested_ipi NULL
+ #endif
+
+ #if IS_ENABLED(CONFIG_HYPERV)
+ DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
+ DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
+-DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
++DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
+ #endif
+
+ #if IS_ENABLED(CONFIG_ACRN_GUEST)
+diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
+index 5f1d3c421f6860..cc9ccf61b6bd11 100644
+--- a/arch/x86/include/asm/init.h
++++ b/arch/x86/include/asm/init.h
+@@ -2,6 +2,8 @@
+ #ifndef _ASM_X86_INIT_H
+ #define _ASM_X86_INIT_H
+
++#define __head __section(".head.text")
++
+ struct x86_mapping_info {
+ void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
+ void *context; /* context for alloc_pgt_page */
+diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
+index 798183867d7896..b71ad173f8776f 100644
+--- a/arch/x86/include/asm/irq_stack.h
++++ b/arch/x86/include/asm/irq_stack.h
+@@ -100,7 +100,7 @@
+ }
+
+ #define ASM_CALL_ARG0 \
+- "call %P[__func] \n" \
++ "call %c[__func] \n" \
+ ASM_REACHABLE
+
+ #define ASM_CALL_ARG1 \
+diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
+index 800ffce0db29e3..6b4d36c9516557 100644
+--- a/arch/x86/include/asm/irq_work.h
++++ b/arch/x86/include/asm/irq_work.h
+@@ -9,7 +9,6 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ return boot_cpu_has(X86_FEATURE_APIC);
+ }
+-extern void arch_irq_work_raise(void);
+ #else
+ static inline bool arch_irq_work_has_interrupt(void)
+ {
+diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
+index 071572e23d3a06..cbbef32517f004 100644
+--- a/arch/x86/include/asm/jump_label.h
++++ b/arch/x86/include/asm/jump_label.h
+@@ -24,7 +24,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+ {
+- asm_volatile_goto("1:"
++ asm goto("1:"
+ "jmp %l[l_yes] # objtool NOPs this \n\t"
+ JUMP_TABLE_ENTRY
+ : : "i" (key), "i" (2 | branch) : : l_yes);
+@@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+
+ static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
+ {
+- asm_volatile_goto("1:"
++ asm goto("1:"
+ ".byte " __stringify(BYTES_NOP5) "\n\t"
+ JUMP_TABLE_ENTRY
+ : : "i" (key), "i" (branch) : : l_yes);
+@@ -52,7 +52,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key, co
+
+ static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
+ {
+- asm_volatile_goto("1:"
++ asm goto("1:"
+ "jmp %l[l_yes]\n\t"
+ JUMP_TABLE_ENTRY
+ : : "i" (key), "i" (branch) : : l_yes);
+diff --git a/arch/x86/include/asm/kmsan.h b/arch/x86/include/asm/kmsan.h
+index 8fa6ac0e2d7665..d91b37f5b4bb45 100644
+--- a/arch/x86/include/asm/kmsan.h
++++ b/arch/x86/include/asm/kmsan.h
+@@ -64,6 +64,7 @@ static inline bool kmsan_virt_addr_valid(void *addr)
+ {
+ unsigned long x = (unsigned long)addr;
+ unsigned long y = x - __START_KERNEL_map;
++ bool ret;
+
+ /* use the carry flag to determine if x was < __START_KERNEL_map */
+ if (unlikely(x > y)) {
+@@ -79,7 +80,21 @@ static inline bool kmsan_virt_addr_valid(void *addr)
+ return false;
+ }
+
+- return pfn_valid(x >> PAGE_SHIFT);
++ /*
++ * pfn_valid() relies on RCU, and may call into the scheduler on exiting
++ * the critical section. However, this would result in recursion with
++ * KMSAN. Therefore, disable preemption here, and re-enable preemption
++ * below while suppressing reschedules to avoid recursion.
++ *
++ * Note, this sacrifices occasionally breaking scheduling guarantees.
++ * Although, a kernel compiled with KMSAN has already given up on any
++ * performance guarantees due to being heavily instrumented.
++ */
++ preempt_disable();
++ ret = pfn_valid(x >> PAGE_SHIFT);
++ preempt_enable_no_resched();
++
++ return ret;
+ }
+
+ #endif /* !MODULE */
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index e3054e3e46d52d..9b419f0de713cc 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -108,6 +108,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
+ KVM_X86_OP_OPTIONAL(vcpu_unblocking)
+ KVM_X86_OP_OPTIONAL(pi_update_irte)
+ KVM_X86_OP_OPTIONAL(pi_start_assignment)
++KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
+ KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
+ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
+ KVM_X86_OP_OPTIONAL(set_hv_timer)
+diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h
+index 6c98f4bb4228ba..058bc636356a11 100644
+--- a/arch/x86/include/asm/kvm-x86-pmu-ops.h
++++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h
+@@ -22,7 +22,7 @@ KVM_X86_PMU_OP(get_msr)
+ KVM_X86_PMU_OP(set_msr)
+ KVM_X86_PMU_OP(refresh)
+ KVM_X86_PMU_OP(init)
+-KVM_X86_PMU_OP(reset)
++KVM_X86_PMU_OP_OPTIONAL(reset)
+ KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
+ KVM_X86_PMU_OP_OPTIONAL(cleanup)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 70d139406bc80d..257bf2e71d0605 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -828,6 +828,7 @@ struct kvm_vcpu_arch {
+ int cpuid_nent;
+ struct kvm_cpuid_entry2 *cpuid_entries;
+ struct kvm_hypervisor_cpuid kvm_cpuid;
++ bool is_amd_compatible;
+
+ /*
+ * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
+@@ -1708,6 +1709,7 @@ struct kvm_x86_ops {
+ int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+ void (*pi_start_assignment)(struct kvm *kvm);
++ void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
+ void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+
+@@ -1756,7 +1758,7 @@ struct kvm_x86_nested_ops {
+ bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
+ u32 error_code);
+ int (*check_events)(struct kvm_vcpu *vcpu);
+- bool (*has_events)(struct kvm_vcpu *vcpu);
++ bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
+ void (*triple_fault)(struct kvm_vcpu *vcpu);
+ int (*get_state)(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index 473b16d73b4710..76081a34fc231b 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -15,7 +15,8 @@
+ #include <linux/init.h>
+ #include <linux/cc_platform.h>
+
+-#include <asm/bootparam.h>
++#include <asm/asm.h>
++struct boot_params;
+
+ #ifdef CONFIG_X86_MEM_ENCRYPT
+ void __init mem_encrypt_init(void);
+@@ -45,8 +46,8 @@ void __init sme_unmap_bootdata(char *real_mode_data);
+ void __init sme_early_init(void);
+ void __init sev_setup_arch(void);
+
+-void __init sme_encrypt_kernel(struct boot_params *bp);
+-void __init sme_enable(struct boot_params *bp);
++void sme_encrypt_kernel(struct boot_params *bp);
++void sme_enable(struct boot_params *bp);
+
+ int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
+ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+@@ -57,6 +58,11 @@ void __init mem_encrypt_free_decrypted_mem(void);
+
+ void __init sev_es_init_vc_handling(void);
+
++static inline u64 sme_get_me_mask(void)
++{
++ return RIP_REL_REF(sme_me_mask);
++}
++
+ #define __bss_decrypted __section(".bss..decrypted")
+
+ #else /* !CONFIG_AMD_MEM_ENCRYPT */
+@@ -75,8 +81,8 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
+ static inline void __init sme_early_init(void) { }
+ static inline void __init sev_setup_arch(void) { }
+
+-static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
+-static inline void __init sme_enable(struct boot_params *bp) { }
++static inline void sme_encrypt_kernel(struct boot_params *bp) { }
++static inline void sme_enable(struct boot_params *bp) { }
+
+ static inline void sev_es_init_vc_handling(void) { }
+
+@@ -89,6 +95,8 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en
+
+ static inline void mem_encrypt_free_decrypted_mem(void) { }
+
++static inline u64 sme_get_me_mask(void) { return 0; }
++
+ #define __bss_decrypted
+
+ #endif /* CONFIG_AMD_MEM_ENCRYPT */
+@@ -106,11 +114,6 @@ void add_encrypt_protection_map(void);
+
+ extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
+
+-static inline u64 sme_get_me_mask(void)
+-{
+- return sme_me_mask;
+-}
+-
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* __X86_MEM_ENCRYPT_H__ */
+diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
+index 896445edc6a8e9..ec95d6e9f1682c 100644
+--- a/arch/x86/include/asm/mshyperv.h
++++ b/arch/x86/include/asm/mshyperv.h
+@@ -40,7 +40,6 @@ static inline unsigned char hv_get_nmi_reason(void)
+ }
+
+ #if IS_ENABLED(CONFIG_HYPERV)
+-extern int hyperv_init_cpuhp;
+ extern bool hyperv_paravisor_present;
+
+ extern void *hv_hypercall_pg;
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index b37abb55e948b7..24b7bd255e9830 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -50,10 +50,13 @@
+ #define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+ #define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
+ #define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
++#define SPEC_CTRL_BHI_DIS_S_SHIFT 10 /* Disable Branch History Injection behavior */
++#define SPEC_CTRL_BHI_DIS_S BIT(SPEC_CTRL_BHI_DIS_S_SHIFT)
+
+ /* A mask for bits which the kernel toggles when controlling mitigations */
+ #define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
+- | SPEC_CTRL_RRSBA_DIS_S)
++ | SPEC_CTRL_RRSBA_DIS_S \
++ | SPEC_CTRL_BHI_DIS_S)
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
+@@ -152,6 +155,10 @@
+ * are restricted to targets in
+ * kernel.
+ */
++#define ARCH_CAP_BHI_NO BIT(20) /*
++ * CPU is not affected by Branch
++ * History Injection.
++ */
+ #define ARCH_CAP_PBRSB_NO BIT(24) /*
+ * Not susceptible to Post-Barrier
+ * Return Stack Buffer Predictions.
+@@ -165,6 +172,14 @@
+ * CPU is not vulnerable to Gather
+ * Data Sampling (GDS).
+ */
++#define ARCH_CAP_RFDS_NO BIT(27) /*
++ * Not susceptible to Register
++ * File Data Sampling.
++ */
++#define ARCH_CAP_RFDS_CLEAR BIT(28) /*
++ * VERW clears CPU Register
++ * File.
++ */
+
+ #define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
+ * IA32_XAPIC_DISABLE_STATUS MSR
+@@ -222,6 +237,7 @@
+ #define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT)
+ #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4
+ #define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
++#define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9)
+
+ #define MSR_LBR_NHM_FROM 0x00000680
+ #define MSR_LBR_NHM_TO 0x000006c0
+@@ -553,6 +569,7 @@
+ #define MSR_AMD64_CPUID_FN_1 0xc0011004
+ #define MSR_AMD64_LS_CFG 0xc0011020
+ #define MSR_AMD64_DC_CFG 0xc0011022
++#define MSR_AMD64_TW_CFG 0xc0011023
+
+ #define MSR_AMD64_DE_CFG 0xc0011029
+ #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index 778df05f853918..bae83810505bf5 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -115,8 +115,15 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
+ }
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+- if (!need_resched())
+- __mwait(eax, ecx);
++
++ if (!need_resched()) {
++ if (ecx & 1) {
++ __mwait(eax, ecx);
++ } else {
++ __sti_mwait(eax, ecx);
++ raw_local_irq_disable();
++ }
++ }
+ }
+ current_clr_polling();
+ }
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index c55cc243592e94..ee642d26e30457 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -196,7 +196,7 @@
+ .macro ANNOTATE_RETPOLINE_SAFE
+ .Lhere_\@:
+ .pushsection .discard.retpoline_safe
+- .long .Lhere_\@ - .
++ .long .Lhere_\@
+ .popsection
+ .endm
+
+@@ -271,11 +271,20 @@
+ .Lskip_rsb_\@:
+ .endm
+
+-#ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_UNTRAIN_RET "call entry_untrain_ret"
+-#else
+-#define CALL_UNTRAIN_RET ""
++/*
++ * The CALL to srso_alias_untrain_ret() must be patched in directly at
++ * the spot where untraining must be done, ie., srso_alias_untrain_ret()
++ * must be the target of a CALL instruction instead of indirectly
++ * jumping to a wrapper which then calls it. Therefore, this macro is
++ * called outside of __UNTRAIN_RET below, for the time being, before the
++ * kernel can support nested alternatives with arbitrary nesting.
++ */
++.macro CALL_UNTRAIN_RET
++#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
++ ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
++ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+ #endif
++.endm
+
+ /*
+ * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
+@@ -288,38 +297,24 @@
+ * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
+ * where we have a stack but before any RET instruction.
+ */
+-.macro UNTRAIN_RET
+-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
+- defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
++.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
++#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
+ VALIDATE_UNRET_END
+- ALTERNATIVE_3 "", \
+- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+- "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
+- __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
++ CALL_UNTRAIN_RET
++ ALTERNATIVE_2 "", \
++ "call entry_ibpb", \ibpb_feature, \
++ __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
+ #endif
+ .endm
+
+-.macro UNTRAIN_RET_VM
+-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
+- defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
+- VALIDATE_UNRET_END
+- ALTERNATIVE_3 "", \
+- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+- "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \
+- __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
+-#endif
+-.endm
++#define UNTRAIN_RET \
++ __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)
+
+-.macro UNTRAIN_RET_FROM_CALL
+-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
+- defined(CONFIG_CALL_DEPTH_TRACKING)
+- VALIDATE_UNRET_END
+- ALTERNATIVE_3 "", \
+- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+- "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
+- __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
+-#endif
+-.endm
++#define UNTRAIN_RET_VM \
++ __UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)
++
++#define UNTRAIN_RET_FROM_CALL \
++ __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)
+
+
+ .macro CALL_DEPTH_ACCOUNT
+@@ -329,12 +324,45 @@
+ #endif
+ .endm
+
++/*
++ * Macro to execute VERW instruction that mitigate transient data sampling
++ * attacks such as MDS. On affected systems a microcode update overloaded VERW
++ * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
++ *
++ * Note: Only the memory operand variant of VERW clears the CPU buffers.
++ */
++.macro CLEAR_CPU_BUFFERS
++#ifdef CONFIG_X86_64
++ ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
++#else
++ /*
++ * In 32bit mode, the memory operand must be a %cs reference. The data
++ * segments may not be usable (vm86 mode), and the stack segment may not
++ * be flat (ESPFIX32).
++ */
++ ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
++#endif
++.endm
++
++#ifdef CONFIG_X86_64
++.macro CLEAR_BRANCH_HISTORY
++ ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
++.endm
++
++.macro CLEAR_BRANCH_HISTORY_VMEXIT
++ ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
++.endm
++#else
++#define CLEAR_BRANCH_HISTORY
++#define CLEAR_BRANCH_HISTORY_VMEXIT
++#endif
++
+ #else /* __ASSEMBLY__ */
+
+ #define ANNOTATE_RETPOLINE_SAFE \
+ "999:\n\t" \
+ ".pushsection .discard.retpoline_safe\n\t" \
+- ".long 999b - .\n\t" \
++ ".long 999b\n\t" \
+ ".popsection\n\t"
+
+ typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
+@@ -348,6 +376,22 @@ extern void __x86_return_thunk(void);
+ static inline void __x86_return_thunk(void) {}
+ #endif
+
++#ifdef CONFIG_CPU_UNRET_ENTRY
++extern void retbleed_return_thunk(void);
++#else
++static inline void retbleed_return_thunk(void) {}
++#endif
++
++extern void srso_alias_untrain_ret(void);
++
++#ifdef CONFIG_CPU_SRSO
++extern void srso_return_thunk(void);
++extern void srso_alias_return_thunk(void);
++#else
++static inline void srso_return_thunk(void) {}
++static inline void srso_alias_return_thunk(void) {}
++#endif
++
+ extern void retbleed_return_thunk(void);
+ extern void srso_return_thunk(void);
+ extern void srso_alias_return_thunk(void);
+@@ -359,6 +403,10 @@ extern void srso_alias_untrain_ret(void);
+ extern void entry_untrain_ret(void);
+ extern void entry_ibpb(void);
+
++#ifdef CONFIG_X86_64
++extern void clear_bhb_loop(void);
++#endif
++
+ extern void (*x86_return_thunk)(void);
+
+ #ifdef CONFIG_CALL_DEPTH_TRACKING
+@@ -538,13 +586,14 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+-DECLARE_STATIC_KEY_FALSE(mds_user_clear);
+ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+
+ DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+
++extern u16 mds_verw_sel;
++
+ #include <asm/segment.h>
+
+ /**
+@@ -570,17 +619,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
+ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+ }
+
+-/**
+- * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
+- *
+- * Clear CPU buffers if the corresponding static key is enabled
+- */
+-static __always_inline void mds_user_clear_cpu_buffers(void)
+-{
+- if (static_branch_likely(&mds_user_clear))
+- mds_clear_cpu_buffers();
+-}
+-
+ /**
+ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
+index e3bae2b60a0db9..ef2844d691735d 100644
+--- a/arch/x86/include/asm/numa.h
++++ b/arch/x86/include/asm/numa.h
+@@ -12,13 +12,6 @@
+
+ #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+-/*
+- * Too small node sizes may confuse the VM badly. Usually they
+- * result from BIOS bugs. So dont recognize nodes as standalone
+- * NUMA entities that have less than this amount of RAM listed:
+- */
+-#define NODE_MIN_SIZE (4*1024*1024)
+-
+ extern int numa_off;
+
+ /*
+diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
+index d18e5c332cb9f4..1b93ff80b43bcc 100644
+--- a/arch/x86/include/asm/page.h
++++ b/arch/x86/include/asm/page.h
+@@ -66,10 +66,14 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+ * virt_addr_valid(kaddr) returns true.
+ */
+ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+ extern bool __virt_addr_valid(unsigned long kaddr);
+ #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
+
++static __always_inline void *pfn_to_kaddr(unsigned long pfn)
++{
++ return __va(pfn << PAGE_SHIFT);
++}
++
+ static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
+ {
+ return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
+diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
+index cc6b8e087192e4..9dab85aba7afd9 100644
+--- a/arch/x86/include/asm/page_64.h
++++ b/arch/x86/include/asm/page_64.h
+@@ -17,6 +17,7 @@ extern unsigned long phys_base;
+ extern unsigned long page_offset_base;
+ extern unsigned long vmalloc_base;
+ extern unsigned long vmemmap_base;
++extern unsigned long physmem_end;
+
+ static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
+ {
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index e02b179ec65989..d03fe4fb41f43c 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -387,23 +387,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
+ #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+ static inline int pte_uffd_wp(pte_t pte)
+ {
+- bool wp = pte_flags(pte) & _PAGE_UFFD_WP;
+-
+-#ifdef CONFIG_DEBUG_VM
+- /*
+- * Having write bit for wr-protect-marked present ptes is fatal,
+- * because it means the uffd-wp bit will be ignored and write will
+- * just go through.
+- *
+- * Use any chance of pgtable walking to verify this (e.g., when
+- * page swapped out or being migrated for all purposes). It means
+- * something is already wrong. Tell the admin even before the
+- * process crashes. We also nail it with wrong pgtable setup.
+- */
+- WARN_ON_ONCE(wp && pte_write(pte));
+-#endif
+-
+- return wp;
++ return pte_flags(pte) & _PAGE_UFFD_WP;
+ }
+
+ static inline pte_t pte_mkuffd_wp(pte_t pte)
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 38b54b992f32e3..35c416f061552b 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -140,6 +140,10 @@ extern unsigned int ptrs_per_p4d;
+ # define VMEMMAP_START __VMEMMAP_BASE_L4
+ #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
+
++#ifdef CONFIG_RANDOMIZE_MEMORY
++# define PHYSMEM_END physmem_end
++#endif
++
+ /*
+ * End of the region for which vmalloc page tables are pre-allocated.
+ * For non-KMSAN builds, this is the same as VMALLOC_END.
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 0b748ee16b3d94..b786449626267e 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -148,7 +148,7 @@
+ #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
+ _PAGE_SPECIAL | _PAGE_ACCESSED | \
+ _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \
+- _PAGE_DEVMAP | _PAGE_ENC | _PAGE_UFFD_WP)
++ _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP)
+ #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
+ #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
+
+@@ -173,6 +173,7 @@ enum page_cache_mode {
+ };
+ #endif
+
++#define _PAGE_CC (_AT(pteval_t, cc_mask))
+ #define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
+
+ #define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
+@@ -566,6 +567,8 @@ static inline void update_page_count(int level, unsigned long pages) { }
+ extern pte_t *lookup_address(unsigned long address, unsigned int *level);
+ extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level);
++pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
++ unsigned int *level, bool *nx, bool *rw);
+ extern pmd_t *lookup_pmd_address(unsigned long address);
+ extern phys_addr_t slow_virt_to_phys(void *__address);
+ extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
+diff --git a/arch/x86/include/asm/posted_intr.h b/arch/x86/include/asm/posted_intr.h
+new file mode 100644
+index 00000000000000..f0324c56f7af51
+--- /dev/null
++++ b/arch/x86/include/asm/posted_intr.h
+@@ -0,0 +1,88 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _X86_POSTED_INTR_H
++#define _X86_POSTED_INTR_H
++
++#define POSTED_INTR_ON 0
++#define POSTED_INTR_SN 1
++
++#define PID_TABLE_ENTRY_VALID 1
++
++/* Posted-Interrupt Descriptor */
++struct pi_desc {
++ u32 pir[8]; /* Posted interrupt requested */
++ union {
++ struct {
++ /* bit 256 - Outstanding Notification */
++ u16 on : 1,
++ /* bit 257 - Suppress Notification */
++ sn : 1,
++ /* bit 271:258 - Reserved */
++ rsvd_1 : 14;
++ /* bit 279:272 - Notification Vector */
++ u8 nv;
++ /* bit 287:280 - Reserved */
++ u8 rsvd_2;
++ /* bit 319:288 - Notification Destination */
++ u32 ndst;
++ };
++ u64 control;
++ };
++ u32 rsvd[6];
++} __aligned(64);
++
++static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
++{
++ return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
++}
++
++static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
++{
++ return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
++}
++
++static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc)
++{
++ return test_and_clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
++}
++
++static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
++{
++ return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
++}
++
++static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
++{
++ return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
++}
++
++static inline void pi_set_sn(struct pi_desc *pi_desc)
++{
++ set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
++}
++
++static inline void pi_set_on(struct pi_desc *pi_desc)
++{
++ set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
++}
++
++static inline void pi_clear_on(struct pi_desc *pi_desc)
++{
++ clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
++}
++
++static inline void pi_clear_sn(struct pi_desc *pi_desc)
++{
++ clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
++}
++
++static inline bool pi_test_on(struct pi_desc *pi_desc)
++{
++ return test_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
++}
++
++static inline bool pi_test_sn(struct pi_desc *pi_desc)
++{
++ return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
++}
++
++#endif /* _X86_POSTED_INTR_H */
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index a3669a7774edbb..67ad64efa9263f 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -399,7 +399,7 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
+ return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
+ }
+
+-extern asmlinkage void ignore_sysret(void);
++extern asmlinkage void entry_SYSCALL32_ignore(void);
+
+ /* Save actual FS/GS selectors and bases to current->thread */
+ void current_save_fsgs(void);
+@@ -464,7 +464,6 @@ struct thread_struct {
+ unsigned long iopl_emul;
+
+ unsigned int iopl_warn:1;
+- unsigned int sig_on_uaccess_err:1;
+
+ /*
+ * Protection Keys Register for Userspace. Loaded immediately on
+@@ -734,4 +733,22 @@ bool arch_is_platform_page(u64 paddr);
+
+ extern bool gds_ucode_mitigated(void);
+
++/*
++ * Make previous memory operations globally visible before
++ * a WRMSR.
++ *
++ * MFENCE makes writes visible, but only affects load/store
++ * instructions. WRMSR is unfortunately not a load/store
++ * instruction and is unaffected by MFENCE. The LFENCE ensures
++ * that the WRMSR is not reordered.
++ *
++ * Most WRMSRs are full serializing instructions themselves and
++ * do not require this barrier. This is only required for the
++ * IA32_TSC_DEADLINE and X2APIC MSRs.
++ */
++static inline void weak_wrmsr_fence(void)
++{
++ alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE));
++}
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
+index 12ef86b19910d3..84294b66b91625 100644
+--- a/arch/x86/include/asm/proto.h
++++ b/arch/x86/include/asm/proto.h
+@@ -32,10 +32,6 @@ void entry_SYSCALL_compat(void);
+ void entry_SYSCALL_compat_safe_stack(void);
+ void entry_SYSRETL_compat_unsafe_stack(void);
+ void entry_SYSRETL_compat_end(void);
+-void entry_INT80_compat(void);
+-#ifdef CONFIG_XEN_PV
+-void xen_entry_INT80_compat(void);
+-#endif
+ #endif
+
+ void x86_configure_nx(void);
+diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
+index cde8357bb226d1..e897046c5d2c63 100644
+--- a/arch/x86/include/asm/qspinlock.h
++++ b/arch/x86/include/asm/qspinlock.h
+@@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu)
+
+ #ifdef CONFIG_PARAVIRT
+ /*
+- * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
++ * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
+ *
+- * Native (and PV wanting native due to vCPU pinning) should disable this key.
+- * It is done in this backwards fashion to only have a single direction change,
+- * which removes ordering between native_pv_spin_init() and HV setup.
++ * Native (and PV wanting native due to vCPU pinning) should keep this key
++ * disabled. Native does not touch the key.
++ *
++ * When in a guest then native_pv_lock_init() enables the key first and
++ * KVM/XEN might conditionally disable it later in the boot process again.
+ */
+-DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
++DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+ /*
+ * Shortcut for the queued_spin_lock_slowpath() function that allows
+diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
+index 7ba1726b71c7b8..e9187ddd3d1fdc 100644
+--- a/arch/x86/include/asm/required-features.h
++++ b/arch/x86/include/asm/required-features.h
+@@ -99,6 +99,7 @@
+ #define REQUIRED_MASK18 0
+ #define REQUIRED_MASK19 0
+ #define REQUIRED_MASK20 0
+-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
++#define REQUIRED_MASK21 0
++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
+
+ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
+diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
+index 4b081e0d3306b7..363266cbcadaf2 100644
+--- a/arch/x86/include/asm/rmwcc.h
++++ b/arch/x86/include/asm/rmwcc.h
+@@ -13,7 +13,7 @@
+ #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \
+ ({ \
+ bool c = false; \
+- asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \
++ asm goto (fullop "; j" #cc " %l[cc_label]" \
+ : : [var] "m" (_var), ## __VA_ARGS__ \
+ : clobbers : cc_label); \
+ if (0) { \
+diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
+index 5b4a1ce3d36808..75a5388d40681e 100644
+--- a/arch/x86/include/asm/sev.h
++++ b/arch/x86/include/asm/sev.h
+@@ -199,16 +199,16 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
+ struct snp_guest_request_ioctl;
+
+ void setup_ghcb(void);
+-void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+- unsigned long npages);
+-void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+- unsigned long npages);
+-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
++void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
++ unsigned long npages);
++void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
++ unsigned long npages);
+ void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
+ void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
+ void snp_set_wakeup_secondary_cpu(void);
+ bool snp_init(struct boot_params *bp);
+-void __init __noreturn snp_abort(void);
++void __noreturn snp_abort(void);
++void snp_dmi_setup(void);
+ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
+ void snp_accept_memory(phys_addr_t start, phys_addr_t end);
+ u64 snp_get_unsupported_features(u64 status);
+@@ -227,12 +227,12 @@ static inline void __init
+ early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
+ static inline void __init
+ early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
+-static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
+ static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
+ static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
+ static inline void snp_set_wakeup_secondary_cpu(void) { }
+ static inline bool snp_init(struct boot_params *bp) { return false; }
+ static inline void snp_abort(void) { }
++static inline void snp_dmi_setup(void) { }
+ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
+ {
+ return -ENOTTY;
+diff --git a/arch/x86/include/asm/shstk.h b/arch/x86/include/asm/shstk.h
+index 42fee8959df7be..896909f306e306 100644
+--- a/arch/x86/include/asm/shstk.h
++++ b/arch/x86/include/asm/shstk.h
+@@ -21,6 +21,7 @@ unsigned long shstk_alloc_thread_stack(struct task_struct *p, unsigned long clon
+ void shstk_free(struct task_struct *p);
+ int setup_signal_shadow_stack(struct ksignal *ksig);
+ int restore_signal_shadow_stack(void);
++int shstk_update_last_frame(unsigned long val);
+ #else
+ static inline long shstk_prctl(struct task_struct *task, int option,
+ unsigned long arg2) { return -EINVAL; }
+@@ -31,6 +32,7 @@ static inline unsigned long shstk_alloc_thread_stack(struct task_struct *p,
+ static inline void shstk_free(struct task_struct *p) {}
+ static inline int setup_signal_shadow_stack(struct ksignal *ksig) { return 0; }
+ static inline int restore_signal_shadow_stack(void) { return 0; }
++static inline int shstk_update_last_frame(unsigned long val) { return 0; }
+ #endif /* CONFIG_X86_USER_SHADOW_STACK */
+
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
+index d6cd9344f6c78e..48f8dd47cf6882 100644
+--- a/arch/x86/include/asm/special_insns.h
++++ b/arch/x86/include/asm/special_insns.h
+@@ -205,7 +205,7 @@ static inline void clwb(volatile void *__p)
+ #ifdef CONFIG_X86_USER_SHADOW_STACK
+ static inline int write_user_shstk_64(u64 __user *addr, u64 val)
+ {
+- asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
++ asm goto("1: wrussq %[val], (%[addr])\n"
+ _ASM_EXTABLE(1b, %l[fail])
+ :: [addr] "r" (addr), [val] "r" (val)
+ :: fail);
+diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
+index a800abb1a99255..d8416b3bf832e4 100644
+--- a/arch/x86/include/asm/suspend_32.h
++++ b/arch/x86/include/asm/suspend_32.h
+@@ -12,11 +12,6 @@
+
+ /* image of the saved processor state */
+ struct saved_context {
+- /*
+- * On x86_32, all segment registers except gs are saved at kernel
+- * entry in pt_regs.
+- */
+- u16 gs;
+ unsigned long cr0, cr2, cr3, cr4;
+ u64 misc_enable;
+ struct saved_msrs saved_msrs;
+@@ -27,6 +22,11 @@ struct saved_context {
+ unsigned long tr;
+ unsigned long safety;
+ unsigned long return_address;
++ /*
++ * On x86_32, all segment registers except gs are saved at kernel
++ * entry in pt_regs.
++ */
++ u16 gs;
+ bool misc_enable_saved;
+ } __attribute__((packed));
+
+diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
+index 4fb36fba4b5a1f..228a42585d5c97 100644
+--- a/arch/x86/include/asm/syscall.h
++++ b/arch/x86/include/asm/syscall.h
+@@ -16,19 +16,17 @@
+ #include <asm/thread_info.h> /* for TS_COMPAT */
+ #include <asm/unistd.h>
+
++/* This is used purely for kernel/trace/trace_syscalls.c */
+ typedef long (*sys_call_ptr_t)(const struct pt_regs *);
+ extern const sys_call_ptr_t sys_call_table[];
+
+-#if defined(CONFIG_X86_32)
+-#define ia32_sys_call_table sys_call_table
+-#else
+ /*
+ * These may not exist, but still put the prototypes in so we
+ * can use IS_ENABLED().
+ */
+-extern const sys_call_ptr_t ia32_sys_call_table[];
+-extern const sys_call_ptr_t x32_sys_call_table[];
+-#endif
++extern long ia32_sys_call(const struct pt_regs *, unsigned int nr);
++extern long x32_sys_call(const struct pt_regs *, unsigned int nr);
++extern long x64_sys_call(const struct pt_regs *, unsigned int nr);
+
+ /*
+ * Only the low 32 bits of orig_ax are meaningful, so we return int.
+@@ -84,7 +82,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+ {
+- memcpy(args, &regs->bx, 6 * sizeof(args[0]));
++ args[0] = regs->bx;
++ args[1] = regs->cx;
++ args[2] = regs->dx;
++ args[3] = regs->si;
++ args[4] = regs->di;
++ args[5] = regs->bp;
+ }
+
+ static inline int syscall_get_arch(struct task_struct *task)
+@@ -127,6 +130,7 @@ static inline int syscall_get_arch(struct task_struct *task)
+ }
+
+ void do_syscall_64(struct pt_regs *regs, int nr);
++void do_int80_emulation(struct pt_regs *regs);
+
+ #endif /* CONFIG_X86_32 */
+
+diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
+index fd2669b1cb2d95..7e88705e907f41 100644
+--- a/arch/x86/include/asm/syscall_wrapper.h
++++ b/arch/x86/include/asm/syscall_wrapper.h
+@@ -58,12 +58,29 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ ,,regs->di,,regs->si,,regs->dx \
+ ,,regs->r10,,regs->r8,,regs->r9) \
+
++
++/* SYSCALL_PT_ARGS is Adapted from s390x */
++#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \
++ SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
++#define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \
++ SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di))
++#define SYSCALL_PT_ARG4(m, t1, t2, t3, t4) \
++ SYSCALL_PT_ARG3(m, t1, t2, t3), m(t4, (regs->si))
++#define SYSCALL_PT_ARG3(m, t1, t2, t3) \
++ SYSCALL_PT_ARG2(m, t1, t2), m(t3, (regs->dx))
++#define SYSCALL_PT_ARG2(m, t1, t2) \
++ SYSCALL_PT_ARG1(m, t1), m(t2, (regs->cx))
++#define SYSCALL_PT_ARG1(m, t1) m(t1, (regs->bx))
++#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
++
++#define __SC_COMPAT_CAST(t, a) \
++ (__typeof(__builtin_choose_expr(__TYPE_IS_L(t), 0, 0U))) \
++ (unsigned int)a
++
+ /* Mapping of registers to parameters for syscalls on i386 */
+ #define SC_IA32_REGS_TO_ARGS(x, ...) \
+- __MAP(x,__SC_ARGS \
+- ,,(unsigned int)regs->bx,,(unsigned int)regs->cx \
+- ,,(unsigned int)regs->dx,,(unsigned int)regs->si \
+- ,,(unsigned int)regs->di,,(unsigned int)regs->bp)
++ SYSCALL_PT_ARGS(x, __SC_COMPAT_CAST, \
++ __MAP(x, __SC_TYPE, __VA_ARGS__)) \
+
+ #define __SYS_STUB0(abi, name) \
+ long __##abi##_##name(const struct pt_regs *regs); \
+@@ -86,9 +103,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ return sys_ni_syscall(); \
+ }
+
+-#define __SYS_NI(abi, name) \
+- SYSCALL_ALIAS(__##abi##_##name, sys_ni_posix_timers);
+-
+ #ifdef CONFIG_X86_64
+ #define __X64_SYS_STUB0(name) \
+ __SYS_STUB0(x64, sys_##name)
+@@ -100,13 +114,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ #define __X64_COND_SYSCALL(name) \
+ __COND_SYSCALL(x64, sys_##name)
+
+-#define __X64_SYS_NI(name) \
+- __SYS_NI(x64, sys_##name)
+ #else /* CONFIG_X86_64 */
+ #define __X64_SYS_STUB0(name)
+ #define __X64_SYS_STUBx(x, name, ...)
+ #define __X64_COND_SYSCALL(name)
+-#define __X64_SYS_NI(name)
+ #endif /* CONFIG_X86_64 */
+
+ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+@@ -120,13 +131,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ #define __IA32_COND_SYSCALL(name) \
+ __COND_SYSCALL(ia32, sys_##name)
+
+-#define __IA32_SYS_NI(name) \
+- __SYS_NI(ia32, sys_##name)
+ #else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+ #define __IA32_SYS_STUB0(name)
+ #define __IA32_SYS_STUBx(x, name, ...)
+ #define __IA32_COND_SYSCALL(name)
+-#define __IA32_SYS_NI(name)
+ #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+
+ #ifdef CONFIG_IA32_EMULATION
+@@ -135,8 +143,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ * additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
+ * ia32 regs in the proper order for shared or "common" syscalls. As some
+ * syscalls may not be implemented, we need to expand COND_SYSCALL in
+- * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
+- * case as well.
++ * kernel/sys_ni.c to cover this case as well.
+ */
+ #define __IA32_COMPAT_SYS_STUB0(name) \
+ __SYS_STUB0(ia32, compat_sys_##name)
+@@ -148,14 +155,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ #define __IA32_COMPAT_COND_SYSCALL(name) \
+ __COND_SYSCALL(ia32, compat_sys_##name)
+
+-#define __IA32_COMPAT_SYS_NI(name) \
+- __SYS_NI(ia32, compat_sys_##name)
+-
+ #else /* CONFIG_IA32_EMULATION */
+ #define __IA32_COMPAT_SYS_STUB0(name)
+ #define __IA32_COMPAT_SYS_STUBx(x, name, ...)
+ #define __IA32_COMPAT_COND_SYSCALL(name)
+-#define __IA32_COMPAT_SYS_NI(name)
+ #endif /* CONFIG_IA32_EMULATION */
+
+
+@@ -175,13 +178,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ #define __X32_COMPAT_COND_SYSCALL(name) \
+ __COND_SYSCALL(x64, compat_sys_##name)
+
+-#define __X32_COMPAT_SYS_NI(name) \
+- __SYS_NI(x64, compat_sys_##name)
+ #else /* CONFIG_X86_X32_ABI */
+ #define __X32_COMPAT_SYS_STUB0(name)
+ #define __X32_COMPAT_SYS_STUBx(x, name, ...)
+ #define __X32_COMPAT_COND_SYSCALL(name)
+-#define __X32_COMPAT_SYS_NI(name)
+ #endif /* CONFIG_X86_X32_ABI */
+
+
+@@ -212,17 +212,12 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+
+ /*
+ * As some compat syscalls may not be implemented, we need to expand
+- * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
+- * kernel/time/posix-stubs.c to cover this case as well.
++ * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
+ */
+ #define COND_SYSCALL_COMPAT(name) \
+ __IA32_COMPAT_COND_SYSCALL(name) \
+ __X32_COMPAT_COND_SYSCALL(name)
+
+-#define COMPAT_SYS_NI(name) \
+- __IA32_COMPAT_SYS_NI(name) \
+- __X32_COMPAT_SYS_NI(name)
+-
+ #endif /* CONFIG_COMPAT */
+
+ #define __SYSCALL_DEFINEx(x, name, ...) \
+@@ -243,8 +238,8 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ * As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
+ * obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
+ * hurt, we only need to re-define it here to keep the naming congruent to
+- * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
+- * macros to work correctly.
++ * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() macro
++ * to work correctly.
+ */
+ #define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+@@ -257,10 +252,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+ __X64_COND_SYSCALL(name) \
+ __IA32_COND_SYSCALL(name)
+
+-#define SYS_NI(name) \
+- __X64_SYS_NI(name) \
+- __IA32_SYS_NI(name)
+-
+
+ /*
+ * For VSYSCALLS, we need to declare these three syscalls with the new
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 8bae40a662827d..3a7755c1a44102 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -78,10 +78,10 @@ extern int __get_user_bad(void);
+ int __ret_gu; \
+ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
+ __chk_user_ptr(ptr); \
+- asm volatile("call __" #fn "_%P4" \
++ asm volatile("call __" #fn "_%c[size]" \
+ : "=a" (__ret_gu), "=r" (__val_gu), \
+ ASM_CALL_CONSTRAINT \
+- : "0" (ptr), "i" (sizeof(*(ptr)))); \
++ : "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
+ instrument_get_user(__val_gu); \
+ (x) = (__force __typeof__(*(ptr))) __val_gu; \
+ __builtin_expect(__ret_gu, 0); \
+@@ -133,7 +133,7 @@ extern int __get_user_bad(void);
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_goto_u64(x, addr, label) \
+- asm_volatile_goto("\n" \
++ asm goto("\n" \
+ "1: movl %%eax,0(%1)\n" \
+ "2: movl %%edx,4(%1)\n" \
+ _ASM_EXTABLE_UA(1b, %l2) \
+@@ -177,7 +177,7 @@ extern void __put_user_nocheck_8(void);
+ __chk_user_ptr(__ptr); \
+ __ptr_pu = __ptr; \
+ __val_pu = __x; \
+- asm volatile("call __" #fn "_%P[size]" \
++ asm volatile("call __" #fn "_%c[size]" \
+ : "=c" (__ret_pu), \
+ ASM_CALL_CONSTRAINT \
+ : "0" (__ptr_pu), \
+@@ -295,7 +295,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, itype, ltype, label) \
+- asm_volatile_goto("\n" \
++ asm_goto_output("\n" \
+ "1: mov"itype" %[umem],%[output]\n" \
+ _ASM_EXTABLE_UA(1b, %l2) \
+ : [output] ltype(x) \
+@@ -375,7 +375,7 @@ do { \
+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
+ __typeof__(*(_ptr)) __old = *_old; \
+ __typeof__(*(_ptr)) __new = (_new); \
+- asm_volatile_goto("\n" \
++ asm_goto_output("\n" \
+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
+ _ASM_EXTABLE_UA(1b, %l[label]) \
+ : CC_OUT(z) (success), \
+@@ -394,7 +394,7 @@ do { \
+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
+ __typeof__(*(_ptr)) __old = *_old; \
+ __typeof__(*(_ptr)) __new = (_new); \
+- asm_volatile_goto("\n" \
++ asm_goto_output("\n" \
+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
+ _ASM_EXTABLE_UA(1b, %l[label]) \
+ : CC_OUT(z) (success), \
+@@ -477,7 +477,7 @@ struct __large_struct { unsigned long buf[100]; };
+ * aliasing issues.
+ */
+ #define __put_user_goto(x, addr, itype, ltype, label) \
+- asm_volatile_goto("\n" \
++ asm goto("\n" \
+ "1: mov"itype" %0,%1\n" \
+ _ASM_EXTABLE_UA(1b, %l2) \
+ : : ltype(x), "m" (__m(addr)) \
+@@ -496,7 +496,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
+ #define copy_mc_to_kernel copy_mc_to_kernel
+
+ unsigned long __must_check
+-copy_mc_to_user(void *to, const void *from, unsigned len);
++copy_mc_to_user(void __user *to, const void *from, unsigned len);
+ #endif
+
+ /*
+diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
+index ab60a71a8dcb98..472f0263dbc612 100644
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/seqlock.h>
+ #include <uapi/asm/vsyscall.h>
++#include <asm/page_types.h>
+
+ #ifdef CONFIG_X86_VSYSCALL_EMULATION
+ extern void map_vsyscall(void);
+@@ -24,4 +25,13 @@ static inline bool emulate_vsyscall(unsigned long error_code,
+ }
+ #endif
+
++/*
++ * The (legacy) vsyscall page is the long page in the kernel portion
++ * of the address space that has user-accessible permissions.
++ */
++static inline bool is_vsyscall_vaddr(unsigned long vaddr)
++{
++ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
++}
++
+ #endif /* _ASM_X86_VSYSCALL_H */
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index 5240d88db52a70..0fe4e482a97b17 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -30,12 +30,13 @@ struct x86_init_mpparse {
+ * @reserve_resources: reserve the standard resources for the
+ * platform
+ * @memory_setup: platform specific memory setup
+- *
++ * @dmi_setup: platform specific DMI setup
+ */
+ struct x86_init_resources {
+ void (*probe_roms)(void);
+ void (*reserve_resources)(void);
+ char *(*memory_setup)(void);
++ void (*dmi_setup)(void);
+ };
+
+ /**
+diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
+index 7048dfacc04b24..64fbd2dbc5b761 100644
+--- a/arch/x86/include/asm/xen/hypervisor.h
++++ b/arch/x86/include/asm/xen/hypervisor.h
+@@ -62,6 +62,11 @@ void xen_arch_unregister_cpu(int num);
+ #ifdef CONFIG_PVH
+ void __init xen_pvh_init(struct boot_params *boot_params);
+ void __init mem_map_via_hcall(struct boot_params *boot_params_p);
++#ifdef CONFIG_XEN_PVH
++void __init xen_reserve_extra_memory(struct boot_params *bootp);
++#else
++static inline void xen_reserve_extra_memory(struct boot_params *bootp) { }
++#endif
+ #endif
+
+ /* Lazy mode for batching updates / context switch */
+@@ -100,4 +105,13 @@ static inline void leave_lazy(enum xen_lazy_mode mode)
+
+ enum xen_lazy_mode xen_get_lazy_mode(void);
+
++#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI)
++void xen_sanitize_proc_cap_bits(uint32_t *buf);
++#else
++static inline void xen_sanitize_proc_cap_bits(uint32_t *buf)
++{
++ BUG();
++}
++#endif
++
+ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
+diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
+index 01d19fc223463d..eeea058cf6028e 100644
+--- a/arch/x86/include/uapi/asm/bootparam.h
++++ b/arch/x86/include/uapi/asm/bootparam.h
+@@ -38,6 +38,7 @@
+ #define XLF_EFI_KEXEC (1<<4)
+ #define XLF_5LEVEL (1<<5)
+ #define XLF_5LEVEL_ENABLED (1<<6)
++#define XLF_MEM_ENCRYPTION (1<<7)
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index c55c0ef47a187e..49c39f5dc1c992 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -1901,3 +1901,14 @@ u64 x86_default_get_root_pointer(void)
+ {
+ return boot_params.acpi_rsdp_addr;
+ }
++
++#ifdef CONFIG_XEN_PV
++void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
++{
++ return ioremap_cache(phys, size);
++}
++
++void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) =
++ x86_acpi_os_ioremap;
++EXPORT_SYMBOL_GPL(acpi_os_ioremap);
++#endif
+diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
+index 8d8752b44f1139..ff8f25faca3dd1 100644
+--- a/arch/x86/kernel/acpi/cppc.c
++++ b/arch/x86/kernel/acpi/cppc.c
+@@ -20,7 +20,7 @@ bool cpc_supported_by_cpu(void)
+ (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
+ return true;
+ else if (boot_cpu_data.x86 == 0x17 &&
+- boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f)
++ boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f)
+ return true;
+ return boot_cpu_has(X86_FEATURE_CPPC);
+ }
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 73be3931e4f060..aae7456ece0700 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -255,6 +255,16 @@ static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
+ }
+ }
+
++static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ optimize_nops(instr, len);
++ sync_core();
++ local_irq_restore(flags);
++}
++
+ /*
+ * In this context, "source" is where the instructions are placed in the
+ * section .altinstr_replacement, for example during kernel build by the
+@@ -438,7 +448,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
+ * patch if feature is *NOT* present.
+ */
+ if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
+- optimize_nops(instr, a->instrlen);
++ optimize_nops_inplace(instr, a->instrlen);
+ continue;
+ }
+
+@@ -1685,8 +1695,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
+ } else {
+ local_irq_save(flags);
+ memcpy(addr, opcode, len);
+- local_irq_restore(flags);
+ sync_core();
++ local_irq_restore(flags);
+
+ /*
+ * Could also do a CLFLUSH here to speed up CPU recovery; but
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 356de955e78ddc..6dabb53f58a445 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -26,6 +26,7 @@
+ #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
+ #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a
+ #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
++#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
+ #define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb
+
+ #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
+@@ -61,6 +62,7 @@ static const struct pci_device_id amd_root_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
+ {}
+ };
+@@ -92,6 +94,8 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
+ {}
+ };
+@@ -112,6 +116,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
+@@ -206,7 +213,14 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
+
+ int amd_smn_read(u16 node, u32 address, u32 *value)
+ {
+- return __amd_smn_rw(node, address, value, false);
++ int err = __amd_smn_rw(node, address, value, false);
++
++ if (PCI_POSSIBLE_ERROR(*value)) {
++ err = -ENODEV;
++ *value = 0;
++ }
++
++ return err;
+ }
+ EXPORT_SYMBOL_GPL(amd_smn_read);
+
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 760adac3d1a824..00ca9c3c1d8bf8 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -36,6 +36,8 @@
+ #include <linux/smp.h>
+ #include <linux/mm.h>
+
++#include <xen/xen.h>
++
+ #include <asm/trace/irq_vectors.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/pc-conf-reg.h>
+@@ -471,7 +473,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt)
+ v = apic_read(APIC_LVTT);
+ v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+ apic_write(APIC_LVTT, v);
+- apic_write(APIC_TMICT, 0);
++
++ /*
++ * Setting APIC_LVT_MASKED (above) should be enough to tell
++ * the hardware that this timer will never fire. But AMD
++ * erratum 411 and some Intel CPU behavior circa 2024 say
++ * otherwise. Time for belt and suspenders programming: mask
++ * the timer _and_ zero the counter registers:
++ */
++ if (v & APIC_LVT_TIMER_TSCDEADLINE)
++ wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
++ else
++ apic_write(APIC_TMICT, 0);
++
+ return 0;
+ }
+
+@@ -1722,11 +1736,11 @@ static int x2apic_state;
+
+ static bool x2apic_hw_locked(void)
+ {
+- u64 ia32_cap;
++ u64 x86_arch_cap_msr;
+ u64 msr;
+
+- ia32_cap = x86_read_arch_cap_msr();
+- if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
++ x86_arch_cap_msr = x86_read_arch_cap_msr();
++ if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
+ rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
+ return (msr & LEGACY_XAPIC_DISABLED);
+ }
+@@ -1806,16 +1820,13 @@ void x2apic_setup(void)
+ __x2apic_enable();
+ }
+
+-static __init void apic_set_fixmap(void);
++static __init void apic_set_fixmap(bool read_apic);
+
+ static __init void x2apic_disable(void)
+ {
+- u32 x2apic_id, state = x2apic_state;
++ u32 x2apic_id;
+
+- x2apic_mode = 0;
+- x2apic_state = X2APIC_DISABLED;
+-
+- if (state != X2APIC_ON)
++ if (x2apic_state < X2APIC_ON)
+ return;
+
+ x2apic_id = read_apic_id();
+@@ -1828,7 +1839,16 @@ static __init void x2apic_disable(void)
+ }
+
+ __x2apic_disable();
+- apic_set_fixmap();
++
++ x2apic_mode = 0;
++ x2apic_state = X2APIC_DISABLED;
++
++ /*
++ * Don't reread the APIC ID as it was already done from
++ * check_x2apic() and the APIC driver still is a x2APIC variant,
++ * which fails to do the read after x2APIC was disabled.
++ */
++ apic_set_fixmap(false);
+ }
+
+ static __init void x2apic_enable(void)
+@@ -2093,13 +2113,14 @@ void __init init_apic_mappings(void)
+ }
+ }
+
+-static __init void apic_set_fixmap(void)
++static __init void apic_set_fixmap(bool read_apic)
+ {
+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+ apic_mmio_base = APIC_BASE;
+ apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
+ apic_mmio_base, mp_lapic_addr);
+- apic_read_boot_cpu_id(false);
++ if (read_apic)
++ apic_read_boot_cpu_id(false);
+ }
+
+ void __init register_lapic_address(unsigned long address)
+@@ -2109,7 +2130,7 @@ void __init register_lapic_address(unsigned long address)
+ mp_lapic_addr = address;
+
+ if (!x2apic_mode)
+- apic_set_fixmap();
++ apic_set_fixmap(true);
+ }
+
+ /*
+@@ -2344,6 +2365,15 @@ static int __init smp_init_primary_thread_mask(void)
+ {
+ unsigned int cpu;
+
++ /*
++ * XEN/PV provides either none or useless topology information.
++ * Pretend that all vCPUs are primary threads.
++ */
++ if (xen_pv_domain()) {
++ cpumask_copy(&__cpu_primary_thread_mask, cpu_possible_mask);
++ return 0;
++ }
++
+ for (cpu = 0; cpu < nr_logical_cpuids; cpu++)
+ cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]);
+ return 0;
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 00da6cf6b07dcb..d0c5325d175102 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -352,27 +352,26 @@ static void ioapic_mask_entry(int apic, int pin)
+ * shared ISA-space IRQs, so we have to support them. We are super
+ * fast in the common case, and fast for shared ISA-space IRQs.
+ */
+-static int __add_pin_to_irq_node(struct mp_chip_data *data,
+- int node, int apic, int pin)
++static bool add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin)
+ {
+ struct irq_pin_list *entry;
+
+- /* don't allow duplicates */
+- for_each_irq_pin(entry, data->irq_2_pin)
++ /* Don't allow duplicates */
++ for_each_irq_pin(entry, data->irq_2_pin) {
+ if (entry->apic == apic && entry->pin == pin)
+- return 0;
++ return true;
++ }
+
+ entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
+ if (!entry) {
+- pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
+- node, apic, pin);
+- return -ENOMEM;
++ pr_err("Cannot allocate irq_pin_list (%d,%d,%d)\n", node, apic, pin);
++ return false;
+ }
++
+ entry->apic = apic;
+ entry->pin = pin;
+ list_add_tail(&entry->list, &data->irq_2_pin);
+-
+- return 0;
++ return true;
+ }
+
+ static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin)
+@@ -387,13 +386,6 @@ static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin)
+ }
+ }
+
+-static void add_pin_to_irq_node(struct mp_chip_data *data,
+- int node, int apic, int pin)
+-{
+- if (__add_pin_to_irq_node(data, node, apic, pin))
+- panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
+-}
+-
+ /*
+ * Reroute an IRQ to a different pin.
+ */
+@@ -1002,8 +994,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
+ if (irq_data && irq_data->parent_data) {
+ if (!mp_check_pin_attr(irq, info))
+ return -EBUSY;
+- if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic,
+- info->ioapic.pin))
++ if (!add_pin_to_irq_node(irq_data->chip_data, node, ioapic, info->ioapic.pin))
+ return -ENOMEM;
+ } else {
+ info->flags |= X86_IRQ_ALLOC_LEGACY;
+@@ -3037,10 +3028,8 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+ return -ENOMEM;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
+- if (ret < 0) {
+- kfree(data);
+- return ret;
+- }
++ if (ret < 0)
++ goto free_data;
+
+ INIT_LIST_HEAD(&data->irq_2_pin);
+ irq_data->hwirq = info->ioapic.pin;
+@@ -3049,7 +3038,10 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+ irq_data->chip_data = data;
+ mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info);
+
+- add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
++ if (!add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin)) {
++ ret = -ENOMEM;
++ goto free_irqs;
++ }
+
+ mp_preconfigure_entry(data);
+ mp_register_handler(virq, data->is_level);
+@@ -3064,6 +3056,12 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+ ioapic, mpc_ioapic_id(ioapic), pin, virq,
+ data->is_level, data->active_low);
+ return 0;
++
++free_irqs:
++ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
++free_data:
++ kfree(data);
++ return ret;
+ }
+
+ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
+diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
+index 6b6b711678fe03..d9651f15ae4f70 100644
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
+ * caused by the non-atomic update of the address/data pair.
+ *
+ * Direct update is possible when:
+- * - The MSI is maskable (remapped MSI does not use this code path)).
+- * The quirk bit is not set in this case.
++ * - The MSI is maskable (remapped MSI does not use this code path).
++ * The reservation mode bit is set in this case.
+ * - The new vector is the same as the old vector
+ * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+ * - The interrupt is not yet started up
+ * - The new destination CPU is the same as the old destination CPU
+ */
+- if (!irqd_msi_nomask_quirk(irqd) ||
++ if (!irqd_can_reserve(irqd) ||
+ cfg->vector == old_cfg.vector ||
+ old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+ !irqd_is_started(irqd) ||
+@@ -215,8 +215,6 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ if (WARN_ON_ONCE(domain != real_parent))
+ return false;
+ info->chip->irq_set_affinity = msi_set_affinity;
+- /* See msi_set_affinity() for the gory details */
+- info->flags |= MSI_FLAG_NOMASK_QUIRK;
+ break;
+ case DOMAIN_BUS_DMAR:
+ case DOMAIN_BUS_AMDVI:
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 319448d87b99a7..218ef9072c0c61 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -1036,7 +1036,8 @@ static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
+ add_timer_on(&cl->timer, cpu);
+ }
+ } else {
+- apicd->prev_vector = 0;
++ pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
++ free_moved_vector(apicd);
+ }
+ raw_spin_unlock(&vector_lock);
+ }
+@@ -1073,6 +1074,7 @@ void irq_complete_move(struct irq_cfg *cfg)
+ */
+ void irq_force_complete_move(struct irq_desc *desc)
+ {
++ unsigned int cpu = smp_processor_id();
+ struct apic_chip_data *apicd;
+ struct irq_data *irqd;
+ unsigned int vector;
+@@ -1097,10 +1099,11 @@ void irq_force_complete_move(struct irq_desc *desc)
+ goto unlock;
+
+ /*
+- * If prev_vector is empty, no action required.
++ * If prev_vector is empty or the descriptor is neither currently
++ * nor previously on the outgoing CPU no action required.
+ */
+ vector = apicd->prev_vector;
+- if (!vector)
++ if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
+ goto unlock;
+
+ /*
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index ece2b5b7b0fe4e..145c81c68394be 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -66,20 +66,6 @@ static const int amd_erratum_400[] =
+ static const int amd_erratum_383[] =
+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
+
+-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
+-static const int amd_erratum_1054[] =
+- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
+-
+-static const int amd_zenbleed[] =
+- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
+- AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
+- AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
+- AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
+-
+-static const int amd_div0[] =
+- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
+- AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
+-
+ static const int amd_erratum_1485[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
+ AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
+@@ -620,6 +606,49 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ }
+
+ resctrl_cpu_detect(c);
++
++ /* Figure out Zen generations: */
++ switch (c->x86) {
++ case 0x17: {
++ switch (c->x86_model) {
++ case 0x00 ... 0x2f:
++ case 0x50 ... 0x5f:
++ setup_force_cpu_cap(X86_FEATURE_ZEN1);
++ break;
++ case 0x30 ... 0x4f:
++ case 0x60 ... 0x7f:
++ case 0x90 ... 0x91:
++ case 0xa0 ... 0xaf:
++ setup_force_cpu_cap(X86_FEATURE_ZEN2);
++ break;
++ default:
++ goto warn;
++ }
++ break;
++ }
++ case 0x19: {
++ switch (c->x86_model) {
++ case 0x00 ... 0x0f:
++ case 0x20 ... 0x5f:
++ setup_force_cpu_cap(X86_FEATURE_ZEN3);
++ break;
++ case 0x10 ... 0x1f:
++ case 0x60 ... 0xaf:
++ setup_force_cpu_cap(X86_FEATURE_ZEN4);
++ break;
++ default:
++ goto warn;
++ }
++ break;
++ }
++ default:
++ break;
++ }
++
++ return;
++
++warn:
++ WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
+ }
+
+ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
+@@ -945,6 +974,19 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ clear_rdrand_cpuid_bit(c);
+ }
+
++static void fix_erratum_1386(struct cpuinfo_x86 *c)
++{
++ /*
++ * Work around Erratum 1386. The XSAVES instruction malfunctions in
++ * certain circumstances on Zen1/2 uarch, and not all parts have had
++ * updated microcode at the time of writing (March 2023).
++ *
++ * Affected parts all have no supervisor XSAVE states, meaning that
++ * the XSAVEC instruction (which works fine) is equivalent.
++ */
++ clear_cpu_cap(c, X86_FEATURE_XSAVES);
++}
++
+ void init_spectral_chicken(struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+@@ -965,24 +1007,19 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
+ }
+ }
+ #endif
+- /*
+- * Work around Erratum 1386. The XSAVES instruction malfunctions in
+- * certain circumstances on Zen1/2 uarch, and not all parts have had
+- * updated microcode at the time of writing (March 2023).
+- *
+- * Affected parts all have no supervisor XSAVE states, meaning that
+- * the XSAVEC instruction (which works fine) is equivalent.
+- */
+- clear_cpu_cap(c, X86_FEATURE_XSAVES);
+ }
+
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
+- set_cpu_cap(c, X86_FEATURE_ZEN);
+-
++ setup_force_cpu_cap(X86_FEATURE_ZEN);
+ #ifdef CONFIG_NUMA
+ node_reclaim_distance = 32;
+ #endif
++}
++
++static void init_amd_zen1(struct cpuinfo_x86 *c)
++{
++ fix_erratum_1386(c);
+
+ /* Fix up CPUID bits, but only if not virtualised. */
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
+@@ -999,6 +1036,9 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
+ if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
+ set_cpu_cap(c, X86_FEATURE_BTC_NO);
+ }
++
++ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
++ setup_force_cpu_bug(X86_BUG_DIV0);
+ }
+
+ static bool cpu_has_zenbleed_microcode(void)
+@@ -1006,11 +1046,11 @@ static bool cpu_has_zenbleed_microcode(void)
+ u32 good_rev = 0;
+
+ switch (boot_cpu_data.x86_model) {
+- case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
+- case 0x60 ... 0x67: good_rev = 0x0860010b; break;
+- case 0x68 ... 0x6f: good_rev = 0x08608105; break;
+- case 0x70 ... 0x7f: good_rev = 0x08701032; break;
+- case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
++ case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
++ case 0x60 ... 0x67: good_rev = 0x0860010c; break;
++ case 0x68 ... 0x6f: good_rev = 0x08608107; break;
++ case 0x70 ... 0x7f: good_rev = 0x08701033; break;
++ case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
+
+ default:
+ return false;
+@@ -1023,11 +1063,8 @@ static bool cpu_has_zenbleed_microcode(void)
+ return true;
+ }
+
+-static void zenbleed_check(struct cpuinfo_x86 *c)
++static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
+ {
+- if (!cpu_has_amd_erratum(c, amd_zenbleed))
+- return;
+-
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return;
+
+@@ -1042,6 +1079,20 @@ static void zenbleed_check(struct cpuinfo_x86 *c)
+ }
+ }
+
++static void init_amd_zen2(struct cpuinfo_x86 *c)
++{
++ fix_erratum_1386(c);
++ zen2_zenbleed_check(c);
++}
++
++static void init_amd_zen3(struct cpuinfo_x86 *c)
++{
++}
++
++static void init_amd_zen4(struct cpuinfo_x86 *c)
++{
++}
++
+ static void init_amd(struct cpuinfo_x86 *c)
+ {
+ early_init_amd(c);
+@@ -1080,6 +1131,15 @@ static void init_amd(struct cpuinfo_x86 *c)
+ case 0x19: init_amd_zn(c); break;
+ }
+
++ if (boot_cpu_has(X86_FEATURE_ZEN1))
++ init_amd_zen1(c);
++ else if (boot_cpu_has(X86_FEATURE_ZEN2))
++ init_amd_zen2(c);
++ else if (boot_cpu_has(X86_FEATURE_ZEN3))
++ init_amd_zen3(c);
++ else if (boot_cpu_has(X86_FEATURE_ZEN4))
++ init_amd_zen4(c);
++
+ /*
+ * Enable workaround for FXSAVE leak on CPUs
+ * without a XSaveErPtr feature
+@@ -1131,7 +1191,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+ * Counter May Be Inaccurate".
+ */
+ if (cpu_has(c, X86_FEATURE_IRPERF) &&
+- !cpu_has_amd_erratum(c, amd_erratum_1054))
++ (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
+ msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+
+ check_null_seg_clears_base(c);
+@@ -1147,16 +1207,12 @@ static void init_amd(struct cpuinfo_x86 *c)
+ cpu_has(c, X86_FEATURE_AUTOIBRS))
+ WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
+
+- zenbleed_check(c);
+-
+- if (cpu_has_amd_erratum(c, amd_div0)) {
+- pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
+- setup_force_cpu_bug(X86_BUG_DIV0);
+- }
+-
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
+ cpu_has_amd_erratum(c, amd_erratum_1485))
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
++
++ /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
++ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
+ }
+
+ #ifdef CONFIG_X86_32
+@@ -1310,12 +1366,16 @@ static void zenbleed_check_cpu(void *unused)
+ {
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+
+- zenbleed_check(c);
++ zen2_zenbleed_check(c);
+ }
+
+ void amd_check_microcode(void)
+ {
+- on_each_cpu(zenbleed_check_cpu, NULL, 1);
++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
++ return;
++
++ if (cpu_feature_enabled(X86_FEATURE_ZEN2))
++ on_each_cpu(zenbleed_check_cpu, NULL, 1);
+ }
+
+ /*
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 10499bcd4e3962..7b5ba5b8592a25 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -61,9 +61,11 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+
++static u64 __ro_after_init x86_arch_cap_msr;
++
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+
+-void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
++void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
+
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+ static void update_spec_ctrl(u64 val)
+@@ -111,9 +113,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ /* Control unconditional IBPB in switch_mm() */
+ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+-/* Control MDS CPU buffer clear before returning to user space */
+-DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+-EXPORT_SYMBOL_GPL(mds_user_clear);
+ /* Control MDS CPU buffer clear before idling (halt, mwait) */
+ DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+ EXPORT_SYMBOL_GPL(mds_idle_clear);
+@@ -147,6 +146,8 @@ void __init cpu_select_mitigations(void)
+ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
+ }
+
++ x86_arch_cap_msr = x86_read_arch_cap_msr();
++
+ /* Select the proper CPU mitigations before patching alternatives: */
+ spectre_v1_select_mitigation();
+ spectre_v2_select_mitigation();
+@@ -252,7 +253,7 @@ static void __init mds_select_mitigation(void)
+ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ mds_mitigation = MDS_MITIGATION_VMWERV;
+
+- static_branch_enable(&mds_user_clear);
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+
+ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
+ (mds_nosmt || cpu_mitigations_auto_nosmt()))
+@@ -304,8 +305,6 @@ static const char * const taa_strings[] = {
+
+ static void __init taa_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+@@ -344,9 +343,8 @@ static void __init taa_select_mitigation(void)
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+- ia32_cap = x86_read_arch_cap_msr();
+- if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+- !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
++ if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
++ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+@@ -356,7 +354,7 @@ static void __init taa_select_mitigation(void)
+ * For guests that can't determine whether the correct microcode is
+ * present on host, enable the mitigation for UCODE_NEEDED as well.
+ */
+- static_branch_enable(&mds_user_clear);
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+
+ if (taa_nosmt || cpu_mitigations_auto_nosmt())
+ cpu_smt_disable(false);
+@@ -404,8 +402,6 @@ static const char * const mmio_strings[] = {
+
+ static void __init mmio_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
+ cpu_mitigations_off()) {
+@@ -416,15 +412,20 @@ static void __init mmio_select_mitigation(void)
+ if (mmio_mitigation == MMIO_MITIGATION_OFF)
+ return;
+
+- ia32_cap = x86_read_arch_cap_msr();
+-
+ /*
+ * Enable CPU buffer clear mitigation for host and VMM, if also affected
+ * by MDS or TAA. Otherwise, enable mitigation for VMM only.
+ */
+ if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
+ boot_cpu_has(X86_FEATURE_RTM)))
+- static_branch_enable(&mds_user_clear);
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++
++ /*
++ * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
++ * mitigations, disable KVM-only mitigation in that case.
++ */
++ if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
++ static_branch_disable(&mmio_stale_data_clear);
+ else
+ static_branch_enable(&mmio_stale_data_clear);
+
+@@ -433,7 +434,7 @@ static void __init mmio_select_mitigation(void)
+ * be propagated to uncore buffers, clearing the Fill buffers on idle
+ * is required irrespective of SMT state.
+ */
+- if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
++ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
+ static_branch_enable(&mds_idle_clear);
+
+ /*
+@@ -443,10 +444,10 @@ static void __init mmio_select_mitigation(void)
+ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+ * affected systems.
+ */
+- if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
++ if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
+ (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+ boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+- !(ia32_cap & ARCH_CAP_MDS_NO)))
++ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+ else
+ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+@@ -476,6 +477,57 @@ static int __init mmio_stale_data_parse_cmdline(char *str)
+ }
+ early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
+
++#undef pr_fmt
++#define pr_fmt(fmt) "Register File Data Sampling: " fmt
++
++enum rfds_mitigations {
++ RFDS_MITIGATION_OFF,
++ RFDS_MITIGATION_VERW,
++ RFDS_MITIGATION_UCODE_NEEDED,
++};
++
++/* Default mitigation for Register File Data Sampling */
++static enum rfds_mitigations rfds_mitigation __ro_after_init =
++ IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF;
++
++static const char * const rfds_strings[] = {
++ [RFDS_MITIGATION_OFF] = "Vulnerable",
++ [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
++ [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
++};
++
++static void __init rfds_select_mitigation(void)
++{
++ if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
++ rfds_mitigation = RFDS_MITIGATION_OFF;
++ return;
++ }
++ if (rfds_mitigation == RFDS_MITIGATION_OFF)
++ return;
++
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++ else
++ rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
++}
++
++static __init int rfds_parse_cmdline(char *str)
++{
++ if (!str)
++ return -EINVAL;
++
++ if (!boot_cpu_has_bug(X86_BUG_RFDS))
++ return 0;
++
++ if (!strcmp(str, "off"))
++ rfds_mitigation = RFDS_MITIGATION_OFF;
++ else if (!strcmp(str, "on"))
++ rfds_mitigation = RFDS_MITIGATION_VERW;
++
++ return 0;
++}
++early_param("reg_file_data_sampling", rfds_parse_cmdline);
++
+ #undef pr_fmt
+ #define pr_fmt(fmt) "" fmt
+
+@@ -484,12 +536,12 @@ static void __init md_clear_update_mitigation(void)
+ if (cpu_mitigations_off())
+ return;
+
+- if (!static_key_enabled(&mds_user_clear))
++ if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
+ goto out;
+
+ /*
+- * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
+- * mitigation, if necessary.
++ * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
++ * Stale Data mitigation, if necessary.
+ */
+ if (mds_mitigation == MDS_MITIGATION_OFF &&
+ boot_cpu_has_bug(X86_BUG_MDS)) {
+@@ -501,11 +553,19 @@ static void __init md_clear_update_mitigation(void)
+ taa_mitigation = TAA_MITIGATION_VERW;
+ taa_select_mitigation();
+ }
+- if (mmio_mitigation == MMIO_MITIGATION_OFF &&
+- boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
++ /*
++ * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
++ * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
++ */
++ if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+ mmio_select_mitigation();
+ }
++ if (rfds_mitigation == RFDS_MITIGATION_OFF &&
++ boot_cpu_has_bug(X86_BUG_RFDS)) {
++ rfds_mitigation = RFDS_MITIGATION_VERW;
++ rfds_select_mitigation();
++ }
+ out:
+ if (boot_cpu_has_bug(X86_BUG_MDS))
+ pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
+@@ -515,6 +575,8 @@ static void __init md_clear_update_mitigation(void)
+ pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
+ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
++ if (boot_cpu_has_bug(X86_BUG_RFDS))
++ pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
+ }
+
+ static void __init md_clear_select_mitigation(void)
+@@ -522,11 +584,12 @@ static void __init md_clear_select_mitigation(void)
+ mds_select_mitigation();
+ taa_select_mitigation();
+ mmio_select_mitigation();
++ rfds_select_mitigation();
+
+ /*
+- * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
+- * and print their mitigation after MDS, TAA and MMIO Stale Data
+- * mitigation selection is done.
++ * As these mitigations are inter-related and rely on VERW instruction
++ * to clear the microarchitural buffers, update and print their status
++ * after mitigation selection is done for each of these vulnerabilities.
+ */
+ md_clear_update_mitigation();
+ }
+@@ -593,8 +656,6 @@ void update_srbds_msr(void)
+
+ static void __init srbds_select_mitigation(void)
+ {
+- u64 ia32_cap;
+-
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+@@ -603,8 +664,7 @@ static void __init srbds_select_mitigation(void)
+ * are only exposed to SRBDS when TSX is enabled or when CPU is affected
+ * by Processor MMIO Stale Data vulnerability.
+ */
+- ia32_cap = x86_read_arch_cap_msr();
+- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
++ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+ !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+ else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+@@ -747,7 +807,7 @@ static void __init gds_select_mitigation(void)
+ /* Will verify below that mitigation _can_ be disabled */
+
+ /* No microcode */
+- if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
++ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
+ if (gds_mitigation == GDS_MITIGATION_FORCE) {
+ /*
+ * This only needs to be done on the boot CPU so do it
+@@ -1042,8 +1102,7 @@ static void __init retbleed_select_mitigation(void)
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+- if (IS_ENABLED(CONFIG_RETHUNK))
+- x86_return_thunk = retbleed_return_thunk;
++ x86_return_thunk = retbleed_return_thunk;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+@@ -1054,8 +1113,25 @@ static void __init retbleed_select_mitigation(void)
+
+ case RETBLEED_MITIGATION_IBPB:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++
++ /*
++ * IBPB on entry already obviates the need for
++ * software-based untraining so clear those in case some
++ * other mitigation like SRSO has selected them.
++ */
++ setup_clear_cpu_cap(X86_FEATURE_UNRET);
++ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
++
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ mitigate_smt = true;
++
++ /*
++ * There is no need for RSB filling: entry_ibpb() ensures
++ * all predictions, including the RSB, are invalidated,
++ * regardless of IBPB implementation.
++ */
++ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
++
+ break;
+
+ case RETBLEED_MITIGATION_STUFF:
+@@ -1478,20 +1554,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
+ return SPECTRE_V2_RETPOLINE;
+ }
+
++static bool __ro_after_init rrsba_disabled;
++
+ /* Disable in-kernel use of non-RSB RET predictors */
+ static void __init spec_ctrl_disable_kernel_rrsba(void)
+ {
+- u64 ia32_cap;
++ if (rrsba_disabled)
++ return;
+
+- if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
++ if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
++ rrsba_disabled = true;
+ return;
++ }
+
+- ia32_cap = x86_read_arch_cap_msr();
++ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
++ return;
+
+- if (ia32_cap & ARCH_CAP_RRSBA) {
+- x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+- update_spec_ctrl(x86_spec_ctrl_base);
+- }
++ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
++ update_spec_ctrl(x86_spec_ctrl_base);
++ rrsba_disabled = true;
+ }
+
+ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
+@@ -1541,6 +1622,74 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
+ dump_stack();
+ }
+
++/*
++ * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
++ * branch history in userspace. Not needed if BHI_NO is set.
++ */
++static bool __init spec_ctrl_bhi_dis(void)
++{
++ if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
++ return false;
++
++ x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
++ update_spec_ctrl(x86_spec_ctrl_base);
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
++
++ return true;
++}
++
++enum bhi_mitigations {
++ BHI_MITIGATION_OFF,
++ BHI_MITIGATION_ON,
++};
++
++static enum bhi_mitigations bhi_mitigation __ro_after_init =
++ IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
++
++static int __init spectre_bhi_parse_cmdline(char *str)
++{
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "off"))
++ bhi_mitigation = BHI_MITIGATION_OFF;
++ else if (!strcmp(str, "on"))
++ bhi_mitigation = BHI_MITIGATION_ON;
++ else
++ pr_err("Ignoring unknown spectre_bhi option (%s)", str);
++
++ return 0;
++}
++early_param("spectre_bhi", spectre_bhi_parse_cmdline);
++
++static void __init bhi_select_mitigation(void)
++{
++ if (bhi_mitigation == BHI_MITIGATION_OFF)
++ return;
++
++ /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
++ if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
++ !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
++ spec_ctrl_disable_kernel_rrsba();
++ if (rrsba_disabled)
++ return;
++ }
++
++ if (spec_ctrl_bhi_dis())
++ return;
++
++ if (!IS_ENABLED(CONFIG_X86_64))
++ return;
++
++ /* Mitigate KVM by default */
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
++ pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
++
++ /* Mitigate syscalls when the mitigation is forced =on */
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
++ pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -1652,6 +1801,9 @@ static void __init spectre_v2_select_mitigation(void)
+ mode == SPECTRE_V2_RETPOLINE)
+ spec_ctrl_disable_kernel_rrsba();
+
++ if (boot_cpu_has(X86_BUG_BHI))
++ bhi_select_mitigation();
++
+ spectre_v2_enabled = mode;
+ pr_info("%s\n", spectre_v2_strings[mode]);
+
+@@ -1766,8 +1918,6 @@ static void update_indir_branch_cond(void)
+ /* Update the static key controlling the MDS CPU buffer clear in idle */
+ static void update_mds_branch_idle(void)
+ {
+- u64 ia32_cap = x86_read_arch_cap_msr();
+-
+ /*
+ * Enable the idle clearing if SMT is active on CPUs which are
+ * affected only by MSBDS and not any other MDS variant.
+@@ -1782,7 +1932,7 @@ static void update_mds_branch_idle(void)
+ if (sched_smt_active()) {
+ static_branch_enable(&mds_idle_clear);
+ } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+- (ia32_cap & ARCH_CAP_FBSDP_NO)) {
++ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
+ static_branch_disable(&mds_idle_clear);
+ }
+ }
+@@ -2353,6 +2503,8 @@ early_param("l1tf", l1tf_cmdline);
+
+ enum srso_mitigation {
+ SRSO_MITIGATION_NONE,
++ SRSO_MITIGATION_UCODE_NEEDED,
++ SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
+ SRSO_MITIGATION_MICROCODE,
+ SRSO_MITIGATION_SAFE_RET,
+ SRSO_MITIGATION_IBPB,
+@@ -2368,11 +2520,13 @@ enum srso_mitigation_cmd {
+ };
+
+ static const char * const srso_strings[] = {
+- [SRSO_MITIGATION_NONE] = "Vulnerable",
+- [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode",
+- [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET",
+- [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
+- [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
++ [SRSO_MITIGATION_NONE] = "Vulnerable",
++ [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
++ [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
++ [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
++ [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
++ [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
++ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
+ };
+
+ static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
+@@ -2409,10 +2563,7 @@ static void __init srso_select_mitigation(void)
+ if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
+ goto pred_cmd;
+
+- if (!has_microcode) {
+- pr_warn("IBPB-extending microcode not applied!\n");
+- pr_warn(SRSO_NOTICE);
+- } else {
++ if (has_microcode) {
+ /*
+ * Zen1/2 with SMT off aren't vulnerable after the right
+ * IBPB microcode has been applied.
+@@ -2421,14 +2572,17 @@ static void __init srso_select_mitigation(void)
+ setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
+ return;
+ }
+- }
+
+- if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+- if (has_microcode) {
+- pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
++ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+ srso_mitigation = SRSO_MITIGATION_IBPB;
+- goto pred_cmd;
++ goto out;
+ }
++ } else {
++ pr_warn("IBPB-extending microcode not applied!\n");
++ pr_warn(SRSO_NOTICE);
++
++ /* may be overwritten by SRSO_CMD_SAFE_RET below */
++ srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
+ }
+
+ switch (srso_cmd) {
+@@ -2458,7 +2612,10 @@ static void __init srso_select_mitigation(void)
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
+ x86_return_thunk = srso_return_thunk;
+ }
+- srso_mitigation = SRSO_MITIGATION_SAFE_RET;
++ if (has_microcode)
++ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
++ else
++ srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+ goto pred_cmd;
+@@ -2470,6 +2627,14 @@ static void __init srso_select_mitigation(void)
+ if (has_microcode) {
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+ srso_mitigation = SRSO_MITIGATION_IBPB;
++
++ /*
++ * IBPB on entry already obviates the need for
++ * software-based untraining so clear those in case some
++ * other mitigation like Retbleed has selected them.
++ */
++ setup_clear_cpu_cap(X86_FEATURE_UNRET);
++ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+ }
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
+@@ -2482,6 +2647,13 @@ static void __init srso_select_mitigation(void)
+ if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
++
++ /*
++ * There is no need for RSB filling: entry_ibpb() ensures
++ * all predictions, including the RSB, are invalidated,
++ * regardless of IBPB implementation.
++ */
++ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ }
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+@@ -2493,10 +2665,11 @@ static void __init srso_select_mitigation(void)
+ break;
+ }
+
+- pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
++out:
++ pr_info("%s\n", srso_strings[srso_mitigation]);
+
+ pred_cmd:
+- if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
++ if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) &&
+ boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ }
+@@ -2608,6 +2781,11 @@ static ssize_t mmio_stale_data_show_state(char *buf)
+ sched_smt_active() ? "vulnerable" : "disabled");
+ }
+
++static ssize_t rfds_show_state(char *buf)
++{
++ return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
++}
++
+ static char *stibp_state(void)
+ {
+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+@@ -2616,15 +2794,15 @@ static char *stibp_state(void)
+
+ switch (spectre_v2_user_stibp) {
+ case SPECTRE_V2_USER_NONE:
+- return ", STIBP: disabled";
++ return "; STIBP: disabled";
+ case SPECTRE_V2_USER_STRICT:
+- return ", STIBP: forced";
++ return "; STIBP: forced";
+ case SPECTRE_V2_USER_STRICT_PREFERRED:
+- return ", STIBP: always-on";
++ return "; STIBP: always-on";
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ if (static_key_enabled(&switch_to_cond_stibp))
+- return ", STIBP: conditional";
++ return "; STIBP: conditional";
+ }
+ return "";
+ }
+@@ -2633,10 +2811,10 @@ static char *ibpb_state(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ if (static_key_enabled(&switch_mm_always_ibpb))
+- return ", IBPB: always-on";
++ return "; IBPB: always-on";
+ if (static_key_enabled(&switch_mm_cond_ibpb))
+- return ", IBPB: conditional";
+- return ", IBPB: disabled";
++ return "; IBPB: conditional";
++ return "; IBPB: disabled";
+ }
+ return "";
+ }
+@@ -2646,14 +2824,32 @@ static char *pbrsb_eibrs_state(void)
+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
+ if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
+ boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
+- return ", PBRSB-eIBRS: SW sequence";
++ return "; PBRSB-eIBRS: SW sequence";
+ else
+- return ", PBRSB-eIBRS: Vulnerable";
++ return "; PBRSB-eIBRS: Vulnerable";
+ } else {
+- return ", PBRSB-eIBRS: Not affected";
++ return "; PBRSB-eIBRS: Not affected";
+ }
+ }
+
++static const char *spectre_bhi_state(void)
++{
++ if (!boot_cpu_has_bug(X86_BUG_BHI))
++ return "; BHI: Not affected";
++ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
++ return "; BHI: BHI_DIS_S";
++ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
++ return "; BHI: SW loop, KVM: SW loop";
++ else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
++ !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
++ rrsba_disabled)
++ return "; BHI: Retpoline";
++ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
++ return "; BHI: Vulnerable, KVM: SW loop";
++
++ return "; BHI: Vulnerable";
++}
++
+ static ssize_t spectre_v2_show_state(char *buf)
+ {
+ if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
+@@ -2666,13 +2862,15 @@ static ssize_t spectre_v2_show_state(char *buf)
+ spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+ return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
+
+- return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
++ return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
+ spectre_v2_strings[spectre_v2_enabled],
+ ibpb_state(),
+- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
+ stibp_state(),
+- boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
++ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
+ pbrsb_eibrs_state(),
++ spectre_bhi_state(),
++ /* this should always be at the end */
+ spectre_v2_module_string());
+ }
+
+@@ -2704,9 +2902,7 @@ static ssize_t srso_show_state(char *buf)
+ if (boot_cpu_has(X86_FEATURE_SRSO_NO))
+ return sysfs_emit(buf, "Mitigation: SMT disabled\n");
+
+- return sysfs_emit(buf, "%s%s\n",
+- srso_strings[srso_mitigation],
+- boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
++ return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
+ }
+
+ static ssize_t gds_show_state(char *buf)
+@@ -2769,6 +2965,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ case X86_BUG_GDS:
+ return gds_show_state(buf);
+
++ case X86_BUG_RFDS:
++ return rfds_show_state(buf);
++
+ default:
+ break;
+ }
+@@ -2843,4 +3042,9 @@ ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *bu
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
+ }
++
++ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
++}
+ #endif
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 4e5ffc8b0e469d..7a1e58fb43a033 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1165,6 +1165,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+ #define NO_SPECTRE_V2 BIT(8)
+ #define NO_MMIO BIT(9)
+ #define NO_EIBRS_PBRSB BIT(10)
++#define NO_BHI BIT(11)
+
+ #define VULNWL(vendor, family, model, whitelist) \
+ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
+@@ -1227,18 +1228,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
+
+ /* AMD Family 0xf - 0x12 */
+- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
+
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
++ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
+
+ /* Zhaoxin Family 7 */
+- VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
+- VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
++ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
++ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
+ {}
+ };
+
+@@ -1269,6 +1270,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ #define SRSO BIT(5)
+ /* CPU is affected by GDS */
+ #define GDS BIT(6)
++/* CPU is affected by Register File Data Sampling */
++#define RFDS BIT(7)
+
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+@@ -1296,9 +1299,18 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
+ VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
+ VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
+- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
+- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
+- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
++ VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
++ VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
++ VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
++ VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
++ VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
++ VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS),
++ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
++ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
++ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
++ VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS),
++ VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS),
++ VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS),
+
+ VULNBL_AMD(0x15, RETBLEED),
+ VULNBL_AMD(0x16, RETBLEED),
+@@ -1317,28 +1329,46 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
+
+ u64 x86_read_arch_cap_msr(void)
+ {
+- u64 ia32_cap = 0;
++ u64 x86_arch_cap_msr = 0;
+
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
+
+- return ia32_cap;
++ return x86_arch_cap_msr;
+ }
+
+-static bool arch_cap_mmio_immune(u64 ia32_cap)
++static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
+ {
+- return (ia32_cap & ARCH_CAP_FBSDP_NO &&
+- ia32_cap & ARCH_CAP_PSDP_NO &&
+- ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
++ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
++ x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
++ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
++}
++
++static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
++{
++ /* The "immunity" bit trumps everything else: */
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
++ return false;
++
++ /*
++ * VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
++ * indicate that mitigation is needed because guest is running on a
++ * vulnerable hardware or may migrate to such hardware:
++ */
++ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
++ return true;
++
++ /* Only consult the blacklist when there is no enumeration: */
++ return cpu_matches(cpu_vuln_blacklist, RFDS);
+ }
+
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+- u64 ia32_cap = x86_read_arch_cap_msr();
++ u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
+
+ /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
++ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
+ setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+
+ if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
+@@ -1350,7 +1380,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+- !(ia32_cap & ARCH_CAP_SSB_NO) &&
++ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+@@ -1358,15 +1388,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
+ * flag and protect from vendor-specific bugs via the whitelist.
+ */
+- if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
++ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+ if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+- !(ia32_cap & ARCH_CAP_PBRSB_NO))
++ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+ }
+
+ if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+- !(ia32_cap & ARCH_CAP_MDS_NO)) {
++ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
+ setup_force_cpu_bug(X86_BUG_MDS);
+ if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
+ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+@@ -1385,9 +1415,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ * TSX_CTRL check alone is not sufficient for cases when the microcode
+ * update is not present or running as guest that don't get TSX_CTRL.
+ */
+- if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
++ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
+ (cpu_has(c, X86_FEATURE_RTM) ||
+- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
++ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
+ setup_force_cpu_bug(X86_BUG_TAA);
+
+ /*
+@@ -1413,7 +1443,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
+ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
+ */
+- if (!arch_cap_mmio_immune(ia32_cap)) {
++ if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
+ if (cpu_matches(cpu_vuln_blacklist, MMIO))
+ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
+ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
+@@ -1421,7 +1451,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ }
+
+ if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
+- if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
++ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
+ setup_force_cpu_bug(X86_BUG_RETBLEED);
+ }
+
+@@ -1439,15 +1469,28 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
+ * which means that AVX will be disabled.
+ */
+- if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
++ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
+ boot_cpu_has(X86_FEATURE_AVX))
+ setup_force_cpu_bug(X86_BUG_GDS);
+
++ if (vulnerable_to_rfds(x86_arch_cap_msr))
++ setup_force_cpu_bug(X86_BUG_RFDS);
++
++ /* When virtualized, eIBRS could be hidden, assume vulnerable */
++ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
++ !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
++ (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
++ boot_cpu_has(X86_FEATURE_HYPERVISOR)))
++ setup_force_cpu_bug(X86_BUG_BHI);
++
++ if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
++ setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
++
+ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ return;
+
+ /* Rogue Data Cache Load? No! */
+- if (ia32_cap & ARCH_CAP_RDCL_NO)
++ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
+ return;
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+@@ -1858,6 +1901,13 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+ #endif
+
++
++ /*
++ * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
++ * Hygon will clear it in ->c_init() below.
++ */
++ set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
++
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+@@ -2087,7 +2137,7 @@ void syscall_init(void)
+ (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
+ wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
+ #else
+- wrmsrl_cstar((unsigned long)ignore_sysret);
++ wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore);
+ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
+ wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+ wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
+diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
+index e462c1d3800a6c..6fb6d8a57cecaf 100644
+--- a/arch/x86/kernel/cpu/cpuid-deps.c
++++ b/arch/x86/kernel/cpu/cpuid-deps.c
+@@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
+ { X86_FEATURE_F16C, X86_FEATURE_XMM2, },
+ { X86_FEATURE_AES, X86_FEATURE_XMM2 },
+ { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
++ { X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
+ { X86_FEATURE_FMA, X86_FEATURE_AVX },
++ { X86_FEATURE_VAES, X86_FEATURE_AVX },
++ { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
+ { X86_FEATURE_AVX2, X86_FEATURE_AVX, },
+ { X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
+ { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
+@@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
+ { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
+- { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
+- { X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
+- { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index defdc594be14df..6e738759779e81 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -87,8 +87,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
+ if (!err)
+ c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+
+- /* Socket ID is ApicId[6] for these processors. */
+- c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
++ /*
++ * Socket ID is ApicId[6] for the processors with model <= 0x3
++ * when running on host.
++ */
++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+
+ cacheinfo_hygon_init_llc_id(c, cpu);
+ } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+@@ -344,6 +348,9 @@ static void init_hygon(struct cpuinfo_x86 *c)
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+
+ check_null_seg_clears_base(c);
++
++ /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
++ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
+ }
+
+ static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index be4045628fd33b..aa3e7ed0eb3d7f 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -184,6 +184,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+ return false;
+ }
+
++#define MSR_IA32_TME_ACTIVATE 0x982
++
++/* Helpers to access TME_ACTIVATE MSR */
++#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
++#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
++
++#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
++#define TME_ACTIVATE_POLICY_AES_XTS_128 0
++
++#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
++
++#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
++#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
++
++/* Values for mktme_status (SW only construct) */
++#define MKTME_ENABLED 0
++#define MKTME_DISABLED 1
++#define MKTME_UNINITIALIZED 2
++static int mktme_status = MKTME_UNINITIALIZED;
++
++static void detect_tme_early(struct cpuinfo_x86 *c)
++{
++ u64 tme_activate, tme_policy, tme_crypto_algs;
++ int keyid_bits = 0, nr_keyids = 0;
++ static u64 tme_activate_cpu0 = 0;
++
++ rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
++
++ if (mktme_status != MKTME_UNINITIALIZED) {
++ if (tme_activate != tme_activate_cpu0) {
++ /* Broken BIOS? */
++ pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
++ pr_err_once("x86/tme: MKTME is not usable\n");
++ mktme_status = MKTME_DISABLED;
++
++ /* Proceed. We may need to exclude bits from x86_phys_bits. */
++ }
++ } else {
++ tme_activate_cpu0 = tme_activate;
++ }
++
++ if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
++ pr_info_once("x86/tme: not enabled by BIOS\n");
++ mktme_status = MKTME_DISABLED;
++ return;
++ }
++
++ if (mktme_status != MKTME_UNINITIALIZED)
++ goto detect_keyid_bits;
++
++ pr_info("x86/tme: enabled by BIOS\n");
++
++ tme_policy = TME_ACTIVATE_POLICY(tme_activate);
++ if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
++ pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
++
++ tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
++ if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
++ pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
++ tme_crypto_algs);
++ mktme_status = MKTME_DISABLED;
++ }
++detect_keyid_bits:
++ keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
++ nr_keyids = (1UL << keyid_bits) - 1;
++ if (nr_keyids) {
++ pr_info_once("x86/mktme: enabled by BIOS\n");
++ pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
++ } else {
++ pr_info_once("x86/mktme: disabled by BIOS\n");
++ }
++
++ if (mktme_status == MKTME_UNINITIALIZED) {
++ /* MKTME is usable */
++ mktme_status = MKTME_ENABLED;
++ }
++
++ /*
++ * KeyID bits effectively lower the number of physical address
++ * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
++ */
++ c->x86_phys_bits -= keyid_bits;
++}
++
+ static void early_init_intel(struct cpuinfo_x86 *c)
+ {
+ u64 misc_enable;
+@@ -335,6 +419,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ */
+ if (detect_extended_topology_early(c) < 0)
+ detect_ht_early(c);
++
++ /*
++ * Adjust the number of physical bits early because it affects the
++ * valid bits of the MTRR mask registers.
++ */
++ if (cpu_has(c, X86_FEATURE_TME))
++ detect_tme_early(c);
+ }
+
+ static void bsp_init_intel(struct cpuinfo_x86 *c)
+@@ -495,90 +586,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
+ #endif
+ }
+
+-#define MSR_IA32_TME_ACTIVATE 0x982
+-
+-/* Helpers to access TME_ACTIVATE MSR */
+-#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
+-#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
+-
+-#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
+-#define TME_ACTIVATE_POLICY_AES_XTS_128 0
+-
+-#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
+-
+-#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
+-#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
+-
+-/* Values for mktme_status (SW only construct) */
+-#define MKTME_ENABLED 0
+-#define MKTME_DISABLED 1
+-#define MKTME_UNINITIALIZED 2
+-static int mktme_status = MKTME_UNINITIALIZED;
+-
+-static void detect_tme(struct cpuinfo_x86 *c)
+-{
+- u64 tme_activate, tme_policy, tme_crypto_algs;
+- int keyid_bits = 0, nr_keyids = 0;
+- static u64 tme_activate_cpu0 = 0;
+-
+- rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+-
+- if (mktme_status != MKTME_UNINITIALIZED) {
+- if (tme_activate != tme_activate_cpu0) {
+- /* Broken BIOS? */
+- pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
+- pr_err_once("x86/tme: MKTME is not usable\n");
+- mktme_status = MKTME_DISABLED;
+-
+- /* Proceed. We may need to exclude bits from x86_phys_bits. */
+- }
+- } else {
+- tme_activate_cpu0 = tme_activate;
+- }
+-
+- if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
+- pr_info_once("x86/tme: not enabled by BIOS\n");
+- mktme_status = MKTME_DISABLED;
+- return;
+- }
+-
+- if (mktme_status != MKTME_UNINITIALIZED)
+- goto detect_keyid_bits;
+-
+- pr_info("x86/tme: enabled by BIOS\n");
+-
+- tme_policy = TME_ACTIVATE_POLICY(tme_activate);
+- if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
+- pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
+-
+- tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
+- if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
+- pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
+- tme_crypto_algs);
+- mktme_status = MKTME_DISABLED;
+- }
+-detect_keyid_bits:
+- keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
+- nr_keyids = (1UL << keyid_bits) - 1;
+- if (nr_keyids) {
+- pr_info_once("x86/mktme: enabled by BIOS\n");
+- pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
+- } else {
+- pr_info_once("x86/mktme: disabled by BIOS\n");
+- }
+-
+- if (mktme_status == MKTME_UNINITIALIZED) {
+- /* MKTME is usable */
+- mktme_status = MKTME_ENABLED;
+- }
+-
+- /*
+- * KeyID bits effectively lower the number of physical address
+- * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
+- */
+- c->x86_phys_bits -= keyid_bits;
+-}
+-
+ static void init_cpuid_fault(struct cpuinfo_x86 *c)
+ {
+ u64 msr;
+@@ -715,9 +722,6 @@ static void init_intel(struct cpuinfo_x86 *c)
+
+ init_ia32_feat_ctl(c);
+
+- if (cpu_has(c, X86_FEATURE_TME))
+- detect_tme(c);
+-
+ init_intel_misc_features(c);
+
+ split_lock_init();
+diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
+index ad6776081e60da..ae71b8ef909c9a 100644
+--- a/arch/x86/kernel/cpu/match.c
++++ b/arch/x86/kernel/cpu/match.c
+@@ -39,9 +39,7 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
+ const struct x86_cpu_id *m;
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+- for (m = match;
+- m->vendor | m->family | m->model | m->steppings | m->feature;
+- m++) {
++ for (m = match; m->flags & X86_CPU_ID_FLAG_ENTRY_VALID; m++) {
+ if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
+ continue;
+ if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 6f35f724cc142e..e103c227acd3ae 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -44,6 +44,7 @@
+ #include <linux/sync_core.h>
+ #include <linux/task_work.h>
+ #include <linux/hardirq.h>
++#include <linux/kexec.h>
+
+ #include <asm/intel-family.h>
+ #include <asm/processor.h>
+@@ -233,6 +234,7 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
+ struct llist_node *pending;
+ struct mce_evt_llist *l;
+ int apei_err = 0;
++ struct page *p;
+
+ /*
+ * Allow instrumentation around external facilities usage. Not that it
+@@ -286,6 +288,20 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
+ if (!fake_panic) {
+ if (panic_timeout == 0)
+ panic_timeout = mca_cfg.panic_timeout;
++
++ /*
++ * Kdump skips the poisoned page in order to avoid
++ * touching the error bits again. Poison the page even
++ * if the error is fatal and the machine is about to
++ * panic.
++ */
++ if (kexec_crash_loaded()) {
++ if (final && (final->status & MCI_STATUS_ADDRV)) {
++ p = pfn_to_online_page(final->addr >> PAGE_SHIFT);
++ if (p)
++ SetPageHWPoison(p);
++ }
++ }
+ panic(msg);
+ } else
+ pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
+@@ -2452,12 +2468,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
+ return -EINVAL;
+
+ b = &per_cpu(mce_banks_array, s->id)[bank];
+-
+ if (!b->init)
+ return -ENODEV;
+
+ b->ctl = new;
++
++ mutex_lock(&mce_sysfs_mutex);
+ mce_restart();
++ mutex_unlock(&mce_sysfs_mutex);
+
+ return size;
+ }
+diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
+index 4d8d4bcf915ddd..72f0695c3dc1dd 100644
+--- a/arch/x86/kernel/cpu/mce/inject.c
++++ b/arch/x86/kernel/cpu/mce/inject.c
+@@ -746,6 +746,7 @@ static void check_hw_inj_possible(void)
+
+ wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status);
+ rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status);
++ wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0);
+
+ if (!status) {
+ hw_injection_possible = false;
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 6cc7a2c181da5f..a4ebd5e0ae8287 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -208,6 +208,11 @@ static int __init save_microcode_in_initrd(void)
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ int ret = -EINVAL;
+
++ if (dis_ucode_ldr) {
++ ret = 0;
++ goto out;
++ }
++
+ switch (c->x86_vendor) {
+ case X86_VENDOR_INTEL:
+ if (c->x86 >= 6)
+@@ -221,6 +226,7 @@ static int __init save_microcode_in_initrd(void)
+ break;
+ }
+
++out:
+ initrd_gone = true;
+
+ return ret;
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index e6bba12c759cb7..bcb2d640a0cd85 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -199,8 +199,8 @@ static void hv_machine_shutdown(void)
+ * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
+ * corrupts the old VP Assist Pages and can crash the kexec kernel.
+ */
+- if (kexec_in_progress && hyperv_init_cpuhp > 0)
+- cpuhp_remove_state(hyperv_init_cpuhp);
++ if (kexec_in_progress)
++ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
+
+ /* The function calls stop_other_cpus(). */
+ native_machine_shutdown();
+@@ -423,6 +423,7 @@ static void __init ms_hyperv_init_platform(void)
+ ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
+ x86_platform.calibrate_tsc = hv_get_tsc_khz;
+ x86_platform.calibrate_cpu = hv_get_tsc_khz;
++ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ }
+
+ if (ms_hyperv.priv_high & HV_ISOLATION) {
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
+index 767bf1c71aadda..2a2fc14955cd3b 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
+@@ -609,7 +609,7 @@ void mtrr_save_state(void)
+ {
+ int first_cpu;
+
+- if (!mtrr_enabled())
++ if (!mtrr_enabled() || !mtrr_state.have_fixed)
+ return;
+
+ first_cpu = cpumask_first(cpu_online_mask);
+diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
+index 030d3b409768de..10830995eadab6 100644
+--- a/arch/x86/kernel/cpu/resctrl/core.c
++++ b/arch/x86/kernel/cpu/resctrl/core.c
+@@ -193,7 +193,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
+ return false;
+ }
+
+-static bool __get_mem_config_intel(struct rdt_resource *r)
++static __init bool __get_mem_config_intel(struct rdt_resource *r)
+ {
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+ union cpuid_0x10_3_eax eax;
+@@ -227,12 +227,10 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
+ return true;
+ }
+
+-static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
++static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r)
+ {
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+- union cpuid_0x10_3_eax eax;
+- union cpuid_0x10_x_edx edx;
+- u32 ebx, ecx, subleaf;
++ u32 eax, ebx, ecx, edx, subleaf;
+
+ /*
+ * Query CPUID_Fn80000020_EDX_x01 for MBA and
+@@ -240,9 +238,9 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
+ */
+ subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1;
+
+- cpuid_count(0x80000020, subleaf, &eax.full, &ebx, &ecx, &edx.full);
+- hw_res->num_closid = edx.split.cos_max + 1;
+- r->default_ctrl = MAX_MBA_BW_AMD;
++ cpuid_count(0x80000020, subleaf, &eax, &ebx, &ecx, &edx);
++ hw_res->num_closid = edx + 1;
++ r->default_ctrl = 1 << eax;
+
+ /* AMD does not use delay */
+ r->membw.delay_linear = false;
+diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
+index 85ceaf9a31ac20..566386abb877f9 100644
+--- a/arch/x86/kernel/cpu/resctrl/internal.h
++++ b/arch/x86/kernel/cpu/resctrl/internal.h
+@@ -18,7 +18,6 @@
+ #define MBM_OVERFLOW_INTERVAL 1000
+ #define MAX_MBA_BW 100u
+ #define MBA_IS_LINEAR 0x4
+-#define MAX_MBA_BW_AMD 0x800
+ #define MBM_CNTR_WIDTH_OFFSET_AMD 20
+
+ #define RMID_VAL_ERROR BIT_ULL(63)
+@@ -296,14 +295,10 @@ struct rftype {
+ * struct mbm_state - status for each MBM counter in each domain
+ * @prev_bw_bytes: Previous bytes value read for bandwidth calculation
+ * @prev_bw: The most recent bandwidth in MBps
+- * @delta_bw: Difference between the current and previous bandwidth
+- * @delta_comp: Indicates whether to compute the delta_bw
+ */
+ struct mbm_state {
+ u64 prev_bw_bytes;
+ u32 prev_bw;
+- u32 delta_bw;
+- bool delta_comp;
+ };
+
+ /**
+@@ -395,6 +390,8 @@ struct rdt_parse_data {
+ * @msr_update: Function pointer to update QOS MSRs
+ * @mon_scale: cqm counter * mon_scale = occupancy in bytes
+ * @mbm_width: Monitor width, to detect and correct for overflow.
++ * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth
++ * Monitoring Event Configuration (BMEC) is supported.
+ * @cdp_enabled: CDP state of this resource
+ *
+ * Members of this structure are either private to the architecture
+@@ -409,6 +406,7 @@ struct rdt_hw_resource {
+ struct rdt_resource *r);
+ unsigned int mon_scale;
+ unsigned int mbm_width;
++ unsigned int mbm_cfg_mask;
+ bool cdp_enabled;
+ };
+
+diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
+index f136ac046851c8..3a6c069614eb84 100644
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -440,9 +440,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
+
+ cur_bw = bytes / SZ_1M;
+
+- if (m->delta_comp)
+- m->delta_bw = abs(cur_bw - m->prev_bw);
+- m->delta_comp = false;
+ m->prev_bw = cur_bw;
+ }
+
+@@ -520,11 +517,11 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+ {
+ u32 closid, rmid, cur_msr_val, new_msr_val;
+ struct mbm_state *pmbm_data, *cmbm_data;
+- u32 cur_bw, delta_bw, user_bw;
+ struct rdt_resource *r_mba;
+ struct rdt_domain *dom_mba;
+ struct list_head *head;
+ struct rdtgroup *entry;
++ u32 cur_bw, user_bw;
+
+ if (!is_mbm_local_enabled())
+ return;
+@@ -543,7 +540,6 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+
+ cur_bw = pmbm_data->prev_bw;
+ user_bw = dom_mba->mbps_val[closid];
+- delta_bw = pmbm_data->delta_bw;
+
+ /* MBA resource doesn't support CDP */
+ cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
+@@ -555,49 +551,31 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
+ list_for_each_entry(entry, head, mon.crdtgrp_list) {
+ cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
+ cur_bw += cmbm_data->prev_bw;
+- delta_bw += cmbm_data->delta_bw;
+ }
+
+ /*
+ * Scale up/down the bandwidth linearly for the ctrl group. The
+ * bandwidth step is the bandwidth granularity specified by the
+ * hardware.
+- *
+- * The delta_bw is used when increasing the bandwidth so that we
+- * dont alternately increase and decrease the control values
+- * continuously.
+- *
+- * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
+- * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
+- * switching between 90 and 110 continuously if we only check
+- * cur_bw < user_bw.
++ * Always increase throttling if current bandwidth is above the
++ * target set by user.
++ * But avoid thrashing up and down on every poll by checking
++ * whether a decrease in throttling is likely to push the group
++ * back over target. E.g. if currently throttling to 30% of bandwidth
++ * on a system with 10% granularity steps, check whether moving to
++ * 40% would go past the limit by multiplying current bandwidth by
++ * "(30 + 10) / 30".
+ */
+ if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
+ new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
+ } else if (cur_msr_val < MAX_MBA_BW &&
+- (user_bw > (cur_bw + delta_bw))) {
++ (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
+ new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
+ } else {
+ return;
+ }
+
+ resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
+-
+- /*
+- * Delta values are updated dynamically package wise for each
+- * rdtgrp every time the throttle MSR changes value.
+- *
+- * This is because (1)the increase in bandwidth is not perfectly
+- * linear and only "approximately" linear even when the hardware
+- * says it is linear.(2)Also since MBA is a core specific
+- * mechanism, the delta values vary based on number of cores used
+- * by the rdtgrp.
+- */
+- pmbm_data->delta_comp = true;
+- list_for_each_entry(entry, head, mon.crdtgrp_list) {
+- cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
+- cmbm_data->delta_comp = true;
+- }
+ }
+
+ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
+@@ -813,6 +791,12 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
+ return ret;
+
+ if (rdt_cpu_has(X86_FEATURE_BMEC)) {
++ u32 eax, ebx, ecx, edx;
++
++ /* Detect list of bandwidth sources that can be tracked */
++ cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx);
++ hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
++
+ if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) {
+ mbm_total_event.configurable = true;
+ mbm_config_rftype_init("mbm_total_bytes_config");
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 725344048f85da..d82d5de183b107 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -1553,12 +1553,6 @@ static int mbm_config_write_domain(struct rdt_resource *r,
+ struct mon_config_info mon_info = {0};
+ int ret = 0;
+
+- /* mon_config cannot be more than the supported set of events */
+- if (val > MAX_EVT_CONFIG_BITS) {
+- rdt_last_cmd_puts("Invalid event configuration\n");
+- return -EINVAL;
+- }
+-
+ /*
+ * Read the current config value first. If both are the same then
+ * no need to write it again.
+@@ -1596,6 +1590,7 @@ static int mbm_config_write_domain(struct rdt_resource *r,
+
+ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
+ {
++ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+ char *dom_str = NULL, *id_str;
+ unsigned long dom_id, val;
+ struct rdt_domain *d;
+@@ -1619,6 +1614,13 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
+ return -EINVAL;
+ }
+
++ /* Value from user cannot be more than the supported set of events */
++ if ((val & hw_res->mbm_cfg_mask) != val) {
++ rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n",
++ hw_res->mbm_cfg_mask);
++ return -EINVAL;
++ }
++
+ list_for_each_entry(d, &r->domains, list) {
+ if (d->id == dom_id) {
+ ret = mbm_config_write_domain(r, d, evtid, val);
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index 0dad49a09b7a9e..af5aa2c754c222 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -28,6 +28,7 @@ static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
+ { X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
+ { X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
++ { X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
+ { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
+ { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
+ { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
+@@ -49,6 +50,7 @@ static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
+ { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
+ { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
++ { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
+ { 0, 0, 0, 0, 0 }
+ };
+
+diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
+index 166692f2d50111..c7f8c3200e8d7f 100644
+--- a/arch/x86/kernel/cpu/sgx/main.c
++++ b/arch/x86/kernel/cpu/sgx/main.c
+@@ -474,24 +474,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
+ {
+ struct sgx_epc_page *page;
+ int nid_of_current = numa_node_id();
+- int nid = nid_of_current;
++ int nid_start, nid;
+
+- if (node_isset(nid_of_current, sgx_numa_mask)) {
+- page = __sgx_alloc_epc_page_from_node(nid_of_current);
+- if (page)
+- return page;
+- }
+-
+- /* Fall back to the non-local NUMA nodes: */
+- while (true) {
+- nid = next_node_in(nid, sgx_numa_mask);
+- if (nid == nid_of_current)
+- break;
++ /*
++ * Try local node first. If it doesn't have an EPC section,
++ * fall back to the non-local NUMA nodes.
++ */
++ if (node_isset(nid_of_current, sgx_numa_mask))
++ nid_start = nid_of_current;
++ else
++ nid_start = next_node_in(nid_of_current, sgx_numa_mask);
+
++ nid = nid_start;
++ do {
+ page = __sgx_alloc_epc_page_from_node(nid);
+ if (page)
+ return page;
+- }
++
++ nid = next_node_in(nid, sgx_numa_mask);
++ } while (nid != nid_start);
+
+ return ERR_PTR(-ENOMEM);
+ }
+diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
+index 87d38f17ff5c9c..c13c9cb40b9b46 100644
+--- a/arch/x86/kernel/devicetree.c
++++ b/arch/x86/kernel/devicetree.c
+@@ -82,7 +82,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
+
+ ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (ret)
+- return ret;
++ return pcibios_err_to_errno(ret);
+ if (!pin)
+ return 0;
+
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index fb8cf953380dab..b66f540de054a7 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -1017,10 +1017,12 @@ void __init e820__reserve_setup_data(void)
+ e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+
+ /*
+- * SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
+- * to be reserved.
++ * SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
++ * kexec and do not need to be reserved.
+ */
+- if (data->type != SETUP_EFI && data->type != SETUP_IMA)
++ if (data->type != SETUP_EFI &&
++ data->type != SETUP_IMA &&
++ data->type != SETUP_RNG_SEED)
+ e820__range_update_kexec(pa_data,
+ sizeof(*data) + data->len,
+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
+index e963344b044902..53935b4d62e305 100644
+--- a/arch/x86/kernel/eisa.c
++++ b/arch/x86/kernel/eisa.c
+@@ -2,6 +2,7 @@
+ /*
+ * EISA specific code
+ */
++#include <linux/cc_platform.h>
+ #include <linux/ioport.h>
+ #include <linux/eisa.h>
+ #include <linux/io.h>
+@@ -12,7 +13,7 @@ static __init int eisa_bus_probe(void)
+ {
+ void __iomem *p;
+
+- if (xen_pv_domain() && !xen_initial_domain())
++ if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return 0;
+
+ p = ioremap(0x0FFFD9, 4);
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index a21a4d0ecc345b..4b414b0ab0692a 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -145,8 +145,8 @@ void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+- "fildl %P[addr]" /* set F?P to defined value */
+- : : [addr] "m" (fpstate));
++ "fildl %[addr]" /* set F?P to defined value */
++ : : [addr] "m" (*fpstate));
+ }
+
+ if (use_xsave()) {
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 558076dbde5bfc..2b3b9e140dd41b 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -156,7 +156,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
+ return !err;
+ }
+
+-static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
++static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
+ {
+ if (use_xsave())
+ return xsave_to_user_sigframe(buf);
+@@ -185,7 +185,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
+ * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
+ * indicating the absence/presence of the extended state to the user.
+ */
+-bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
++bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru)
+ {
+ struct task_struct *tsk = current;
+ struct fpstate *fpstate = tsk->thread.fpu.fpstate;
+@@ -228,7 +228,7 @@ bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
+ fpregs_restore_userregs();
+
+ pagefault_disable();
+- ret = copy_fpregs_to_sigframe(buf_fx);
++ ret = copy_fpregs_to_sigframe(buf_fx, pkru);
+ pagefault_enable();
+ fpregs_unlock();
+
+@@ -274,12 +274,13 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures,
+ * Attempt to restore the FPU registers directly from user memory.
+ * Pagefaults are handled and any errors returned are fatal.
+ */
+-static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
+- bool fx_only, unsigned int size)
++static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only)
+ {
+ struct fpu *fpu = &current->thread.fpu;
+ int ret;
+
++ /* Restore enabled features only. */
++ xrestore &= fpu->fpstate->user_xfeatures;
+ retry:
+ fpregs_lock();
+ /* Ensure that XFD is up to date */
+@@ -309,7 +310,7 @@ static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
+ if (ret != X86_TRAP_PF)
+ return false;
+
+- if (!fault_in_readable(buf, size))
++ if (!fault_in_readable(buf, fpu->fpstate->user_size))
+ goto retry;
+ return false;
+ }
+@@ -339,7 +340,6 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
+ struct user_i387_ia32_struct env;
+ bool success, fx_only = false;
+ union fpregs_state *fpregs;
+- unsigned int state_size;
+ u64 user_xfeatures = 0;
+
+ if (use_xsave()) {
+@@ -349,17 +349,14 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
+ return false;
+
+ fx_only = !fx_sw_user.magic1;
+- state_size = fx_sw_user.xstate_size;
+ user_xfeatures = fx_sw_user.xfeatures;
+ } else {
+ user_xfeatures = XFEATURE_MASK_FPSSE;
+- state_size = fpu->fpstate->user_size;
+ }
+
+ if (likely(!ia32_fxstate)) {
+ /* Restore the FPU registers directly from user memory. */
+- return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
+- state_size);
++ return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only);
+ }
+
+ /*
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index ef6906107c541d..255ff8f6c52705 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -178,10 +178,11 @@ void fpu__init_cpu_xstate(void)
+ * Must happen after CR4 setup and before xsetbv() to allow KVM
+ * lazy passthrough. Write independent of the dynamic state static
+ * key as that does not work on the boot CPU. This also ensures
+- * that any stale state is wiped out from XFD.
++ * that any stale state is wiped out from XFD. Reset the per CPU
++ * xfd cache too.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_XFD))
+- wrmsrl(MSR_IA32_XFD, init_fpstate.xfd);
++ xfd_set_state(init_fpstate.xfd);
+
+ /*
+ * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
+@@ -787,6 +788,9 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
+ goto out_disable;
+ }
+
++ fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features &
++ XFEATURE_MASK_INDEPENDENT;
++
+ /*
+ * Clear XSAVE features that are disabled in the normal CPUID.
+ */
+diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
+index 3518fb26d06b02..544224611e23c5 100644
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -64,9 +64,9 @@ static inline u64 xfeatures_mask_supervisor(void)
+ static inline u64 xfeatures_mask_independent(void)
+ {
+ if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR))
+- return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
++ return fpu_kernel_cfg.independent_features & ~XFEATURE_MASK_LBR;
+
+- return XFEATURE_MASK_INDEPENDENT;
++ return fpu_kernel_cfg.independent_features;
+ }
+
+ /* XSAVE/XRSTOR wrapper functions */
+@@ -148,20 +148,26 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs
+ #endif
+
+ #ifdef CONFIG_X86_64
++static inline void xfd_set_state(u64 xfd)
++{
++ wrmsrl(MSR_IA32_XFD, xfd);
++ __this_cpu_write(xfd_state, xfd);
++}
++
+ static inline void xfd_update_state(struct fpstate *fpstate)
+ {
+ if (fpu_state_size_dynamic()) {
+ u64 xfd = fpstate->xfd;
+
+- if (__this_cpu_read(xfd_state) != xfd) {
+- wrmsrl(MSR_IA32_XFD, xfd);
+- __this_cpu_write(xfd_state, xfd);
+- }
++ if (__this_cpu_read(xfd_state) != xfd)
++ xfd_set_state(xfd);
+ }
+ }
+
+ extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
+ #else
++static inline void xfd_set_state(u64 xfd) { }
++
+ static inline void xfd_update_state(struct fpstate *fpstate) { }
+
+ static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 49f7629b17f734..c58213bce294e9 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -41,6 +41,7 @@
+ #include <asm/trapnr.h>
+ #include <asm/sev.h>
+ #include <asm/tdx.h>
++#include <asm/init.h>
+
+ /*
+ * Manage page tables very early on.
+@@ -80,12 +81,10 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] = {
+ * while the kernel still uses a direct mapping.
+ */
+ static struct desc_ptr startup_gdt_descr = {
+- .size = sizeof(startup_gdt),
++ .size = sizeof(startup_gdt)-1,
+ .address = 0,
+ };
+
+-#define __head __section(".head.text")
+-
+ static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
+ {
+ return ptr - (void *)_text + (void *)physaddr;
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index ea6995920b7aa9..e6eaee8509ceed 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -256,6 +256,22 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
+ testl $X2APIC_ENABLE, %eax
+ jnz .Lread_apicid_msr
+
++#ifdef CONFIG_X86_X2APIC
++ /*
++ * If system is in X2APIC mode then MMIO base might not be
++ * mapped causing the MMIO read below to fault. Faults can't
++ * be handled at that point.
++ */
++ cmpl $0, x2apic_mode(%rip)
++ jz .Lread_apicid_mmio
++
++ /* Force the AP into X2APIC mode. */
++ orl $X2APIC_ENABLE, %eax
++ wrmsr
++ jmp .Lread_apicid_msr
++#endif
++
++.Lread_apicid_mmio:
+ /* Read the APIC ID from the fix-mapped MMIO space. */
+ movq apic_mmio_base(%rip), %rcx
+ addq $APIC_ID, %rcx
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 1648aa0204d97d..046bc9d57e9966 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -1438,7 +1438,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+ memset(&curr_time, 0, sizeof(struct rtc_time));
+
+ if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
+- if (unlikely(mc146818_get_time(&curr_time) < 0)) {
++ if (unlikely(mc146818_get_time(&curr_time, 10) < 0)) {
+ pr_err_ratelimited("unable to read current time from RTC\n");
+ return IRQ_HANDLED;
+ }
+diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
+index b786d48f5a0faf..fc77a96040b7ea 100644
+--- a/arch/x86/kernel/idt.c
++++ b/arch/x86/kernel/idt.c
+@@ -117,7 +117,7 @@ static const __initconst struct idt_data def_idts[] = {
+
+ SYSG(X86_TRAP_OF, asm_exc_overflow),
+ #if defined(CONFIG_IA32_EMULATION)
+- SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat),
++ SYSG(IA32_SYSCALL_VECTOR, asm_int80_emulation),
+ #elif defined(CONFIG_X86_32)
+ SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32),
+ #endif
+diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
+index 578d16fc040fa1..5481c7c5db301b 100644
+--- a/arch/x86/kernel/jailhouse.c
++++ b/arch/x86/kernel/jailhouse.c
+@@ -12,6 +12,7 @@
+ #include <linux/kernel.h>
+ #include <linux/reboot.h>
+ #include <linux/serial_8250.h>
++#include <linux/acpi.h>
+ #include <asm/apic.h>
+ #include <asm/io_apic.h>
+ #include <asm/acpi.h>
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index e8babebad7b888..a6a3475e1d6097 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -335,7 +335,16 @@ static int can_probe(unsigned long paddr)
+ kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
+ bool *on_func_entry)
+ {
+- if (is_endbr(*(u32 *)addr)) {
++ u32 insn;
++
++ /*
++ * Since 'addr' is not guaranteed to be safe to access, use
++ * copy_from_kernel_nofault() to read the instruction:
++ */
++ if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
++ return NULL;
++
++ if (is_endbr(insn)) {
+ *on_func_entry = !offset || offset == 4;
+ if (*on_func_entry)
+ offset = 4;
+@@ -576,7 +585,8 @@ static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
+ {
+ unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
+
+- int3_emulate_call(regs, regs_get_register(regs, offs));
++ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
++ int3_emulate_jmp(regs, regs_get_register(regs, offs));
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
+
+diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
+index dd2ec14adb77ba..15af7e98e161a4 100644
+--- a/arch/x86/kernel/kprobes/ftrace.c
++++ b/arch/x86/kernel/kprobes/ftrace.c
+@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct kprobe_ctlblk *kcb;
+ int bit;
+
++ if (unlikely(kprobe_ftrace_disabled))
++ return;
++
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index fb8f52149be9ad..f2fff625576d56 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -24,8 +24,8 @@
+
+ static int kvmclock __initdata = 1;
+ static int kvmclock_vsyscall __initdata = 1;
+-static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
+-static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
++static int msr_kvm_system_time __ro_after_init;
++static int msr_kvm_wall_clock __ro_after_init;
+ static u64 kvm_sched_clock_offset __ro_after_init;
+
+ static int __init parse_no_kvmclock(char *arg)
+@@ -195,7 +195,8 @@ static void kvm_setup_secondary_clock(void)
+
+ void kvmclock_disable(void)
+ {
+- native_write_msr(msr_kvm_system_time, 0, 0);
++ if (msr_kvm_system_time)
++ native_write_msr(msr_kvm_system_time, 0, 0);
+ }
+
+ static void __init kvmclock_init_mem(void)
+@@ -294,7 +295,10 @@ void __init kvmclock_init(void)
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
+ msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
+ msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
+- } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
++ } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
++ msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
++ msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
++ } else {
+ return;
+ }
+
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index 1a3e2c05a8a5b6..2fa12d1dc67602 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -28,6 +28,7 @@
+ #include <asm/setup.h>
+ #include <asm/set_memory.h>
+ #include <asm/cpu.h>
++#include <asm/efi.h>
+
+ #ifdef CONFIG_ACPI
+ /*
+@@ -90,6 +91,8 @@ map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p)
+ {
+ #ifdef CONFIG_EFI
+ unsigned long mstart, mend;
++ void *kaddr;
++ int ret;
+
+ if (!efi_enabled(EFI_BOOT))
+ return 0;
+@@ -105,6 +108,30 @@ map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p)
+ if (!mstart)
+ return 0;
+
++ ret = kernel_ident_mapping_init(info, level4p, mstart, mend);
++ if (ret)
++ return ret;
++
++ kaddr = memremap(mstart, mend - mstart, MEMREMAP_WB);
++ if (!kaddr) {
++ pr_err("Could not map UEFI system table\n");
++ return -ENOMEM;
++ }
++
++ mstart = efi_config_table;
++
++ if (efi_enabled(EFI_64BIT)) {
++ efi_system_table_64_t *stbl = (efi_system_table_64_t *)kaddr;
++
++ mend = mstart + sizeof(efi_config_table_64_t) * stbl->nr_tables;
++ } else {
++ efi_system_table_32_t *stbl = (efi_system_table_32_t *)kaddr;
++
++ mend = mstart + sizeof(efi_config_table_32_t) * stbl->nr_tables;
++ }
++
++ memunmap(kaddr);
++
+ return kernel_ident_mapping_init(info, level4p, mstart, mend);
+ #endif
+ return 0;
+@@ -298,8 +325,15 @@ void machine_kexec_cleanup(struct kimage *image)
+ void machine_kexec(struct kimage *image)
+ {
+ unsigned long page_list[PAGES_NR];
+- void *control_page;
++ unsigned int host_mem_enc_active;
+ int save_ftrace_enabled;
++ void *control_page;
++
++ /*
++ * This must be done before load_segments() since if call depth tracking
++ * is used then GS must be valid to make any function calls.
++ */
++ host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
+
+ #ifdef CONFIG_KEXEC_JUMP
+ if (image->preserve_context)
+@@ -361,7 +395,7 @@ void machine_kexec(struct kimage *image)
+ (unsigned long)page_list,
+ image->start,
+ image->preserve_context,
+- cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
++ host_mem_enc_active);
+
+ #ifdef CONFIG_KEXEC_JUMP
+ if (image->preserve_context)
+diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
+index c94dec6a18345a..1f54eedc3015e9 100644
+--- a/arch/x86/kernel/mmconf-fam10h_64.c
++++ b/arch/x86/kernel/mmconf-fam10h_64.c
+@@ -9,6 +9,7 @@
+ #include <linux/pci.h>
+ #include <linux/dmi.h>
+ #include <linux/range.h>
++#include <linux/acpi.h>
+
+ #include <asm/pci-direct.h>
+ #include <linux/sort.h>
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index a0c551846b35f1..87aee638e1a5d8 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -507,12 +507,13 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
+ }
+ this_cpu_write(nmi_state, NMI_EXECUTING);
+ this_cpu_write(nmi_cr2, read_cr2());
++
++nmi_restart:
+ if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
+ WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
+ WARN_ON_ONCE(!(nsp->idt_seq & 0x1));
+ WRITE_ONCE(nsp->recv_jiffies, jiffies);
+ }
+-nmi_restart:
+
+ /*
+ * Needs to happen before DR7 is accessed, because the hypervisor can
+@@ -548,16 +549,13 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
+
+ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
+ write_cr2(this_cpu_read(nmi_cr2));
+- if (this_cpu_dec_return(nmi_state))
+- goto nmi_restart;
+-
+- if (user_mode(regs))
+- mds_user_clear_cpu_buffers();
+ if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
+ WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
+ WARN_ON_ONCE(nsp->idt_seq & 0x1);
+ WRITE_ONCE(nsp->recv_jiffies, jiffies);
+ }
++ if (this_cpu_dec_return(nmi_state))
++ goto nmi_restart;
+ }
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+@@ -631,7 +629,7 @@ void nmi_backtrace_stall_check(const struct cpumask *btp)
+ msgp = nmi_check_stall_msg[idx];
+ if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1))
+ modp = ", but OK because ignore_nmis was set";
+- if (nmi_seq & ~0x1)
++ if (nmi_seq & 0x1)
+ msghp = " (CPU currently in NMI handler function)";
+ else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
+ msghp = " (CPU exited one NMI handler function)";
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 97f1436c1a2034..8d51c86caa415f 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -71,13 +71,12 @@ DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text);
+ DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
+ #endif
+
+-DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
++DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+ void __init native_pv_lock_init(void)
+ {
+- if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
+- !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+- static_branch_disable(&virt_spin_lock_key);
++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
++ static_branch_enable(&virt_spin_lock_key);
+ }
+
+ static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
+diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c
+index 319fef37d9dce4..cc2c34ba7228ac 100644
+--- a/arch/x86/kernel/probe_roms.c
++++ b/arch/x86/kernel/probe_roms.c
+@@ -203,16 +203,6 @@ void __init probe_roms(void)
+ unsigned char c;
+ int i;
+
+- /*
+- * The ROM memory range is not part of the e820 table and is therefore not
+- * pre-validated by BIOS. The kernel page table maps the ROM region as encrypted
+- * memory, and SNP requires encrypted memory to be validated before access.
+- * Do that here.
+- */
+- snp_prep_memory(video_rom_resource.start,
+- ((system_rom_resource.end + 1) - video_rom_resource.start),
+- SNP_PAGE_STATE_PRIVATE);
+-
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index b6f4e8399fca20..5351f293f770b5 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -1030,7 +1030,10 @@ unsigned long arch_align_stack(unsigned long sp)
+
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+- return randomize_page(mm->brk, 0x02000000);
++ if (mmap_is_ia32())
++ return randomize_page(mm->brk, SZ_32M);
++
++ return randomize_page(mm->brk, SZ_1G);
+ }
+
+ /*
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 33b268747bb7bb..d595ef7c1de05e 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -138,7 +138,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
+ log_lvl, d3, d6, d7);
+ }
+
+- if (cpu_feature_enabled(X86_FEATURE_OSPKE))
++ if (cr4 & X86_CR4_PKE)
+ printk("%sPKRU: %08x\n", log_lvl, read_pkru());
+ }
+
+@@ -750,6 +750,27 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
+
+ #define LAM_U57_BITS 6
+
++static void enable_lam_func(void *__mm)
++{
++ struct mm_struct *mm = __mm;
++
++ if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) {
++ write_cr3(__read_cr3() | mm->context.lam_cr3_mask);
++ set_tlbstate_lam_mode(mm);
++ }
++}
++
++static void mm_enable_lam(struct mm_struct *mm)
++{
++ /*
++ * Even though the process must still be single-threaded at this
++ * point, kernel threads may be using the mm. IPI those kernel
++ * threads if they exist.
++ */
++ on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true);
++ set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
++}
++
+ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
+ {
+ if (!cpu_feature_enabled(X86_FEATURE_LAM))
+@@ -766,6 +787,10 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
++ /*
++ * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from
++ * being enabled unless the process is single threaded:
++ */
+ if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) {
+ mmap_write_unlock(mm);
+ return -EBUSY;
+@@ -782,9 +807,7 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
+ return -EINVAL;
+ }
+
+- write_cr3(__read_cr3() | mm->context.lam_cr3_mask);
+- set_tlbstate_lam_mode(mm);
+- set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
++ mm_enable_lam(mm);
+
+ mmap_write_unlock(mm);
+
+diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
+index 1309b9b053386b..2e7066980f3e8b 100644
+--- a/arch/x86/kernel/rtc.c
++++ b/arch/x86/kernel/rtc.c
+@@ -67,7 +67,7 @@ void mach_get_cmos_time(struct timespec64 *now)
+ return;
+ }
+
+- if (mc146818_get_time(&tm)) {
++ if (mc146818_get_time(&tm, 1000)) {
+ pr_err("Unable to read current time from RTC\n");
+ now->tv_sec = now->tv_nsec = 0;
+ return;
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index b098b1fa247081..eb129277dcdd64 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -9,7 +9,6 @@
+ #include <linux/console.h>
+ #include <linux/crash_dump.h>
+ #include <linux/dma-map-ops.h>
+-#include <linux/dmi.h>
+ #include <linux/efi.h>
+ #include <linux/ima.h>
+ #include <linux/init_ohci1394_dma.h>
+@@ -36,6 +35,7 @@
+ #include <asm/bios_ebda.h>
+ #include <asm/bugs.h>
+ #include <asm/cacheinfo.h>
++#include <asm/coco.h>
+ #include <asm/cpu.h>
+ #include <asm/efi.h>
+ #include <asm/gart.h>
+@@ -1029,7 +1029,7 @@ void __init setup_arch(char **cmdline_p)
+ efi_init();
+
+ reserve_ibft_region();
+- dmi_setup();
++ x86_init.resources.dmi_setup();
+
+ /*
+ * VMware detection requires dmi to be available, so this
+@@ -1121,6 +1121,7 @@ void __init setup_arch(char **cmdline_p)
+ * memory size.
+ */
+ sev_setup_arch();
++ cc_random_init();
+
+ efi_fake_memmap();
+ efi_find_mirror();
+diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
+index ccb0915e84e10c..acbec4de3ec31a 100644
+--- a/arch/x86/kernel/sev-shared.c
++++ b/arch/x86/kernel/sev-shared.c
+@@ -89,7 +89,8 @@ static bool __init sev_es_check_cpu_features(void)
+ return true;
+ }
+
+-static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
++static void __head __noreturn
++sev_es_terminate(unsigned int set, unsigned int reason)
+ {
+ u64 val = GHCB_MSR_TERM_REQ;
+
+@@ -326,13 +327,7 @@ static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid
+ */
+ static const struct snp_cpuid_table *snp_cpuid_get_table(void)
+ {
+- void *ptr;
+-
+- asm ("lea cpuid_table_copy(%%rip), %0"
+- : "=r" (ptr)
+- : "p" (&cpuid_table_copy));
+-
+- return ptr;
++ return &RIP_REL_REF(cpuid_table_copy);
+ }
+
+ /*
+@@ -391,7 +386,7 @@ static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
+ return xsave_size;
+ }
+
+-static bool
++static bool __head
+ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
+ {
+ const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+@@ -528,7 +523,8 @@ static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+ * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
+ * should be treated as fatal by caller.
+ */
+-static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
++static int __head
++snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+ {
+ const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+
+@@ -556,9 +552,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
+ leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
+
+ /* Skip post-processing for out-of-range zero leafs. */
+- if (!(leaf->fn <= cpuid_std_range_max ||
+- (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
+- (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
++ if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
++ (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
++ (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
+ return 0;
+ }
+
+@@ -570,7 +566,7 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
+ * page yet, so it only supports the MSR based communication with the
+ * hypervisor and only the CPUID exit-code.
+ */
+-void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
++void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ {
+ unsigned int subfn = lower_bits(regs->cx, 32);
+ unsigned int fn = lower_bits(regs->ax, 32);
+@@ -1016,7 +1012,8 @@ struct cc_setup_data {
+ * Search for a Confidential Computing blob passed in as a setup_data entry
+ * via the Linux Boot Protocol.
+ */
+-static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
++static __head
++struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
+ {
+ struct cc_setup_data *sd = NULL;
+ struct setup_data *hdr;
+@@ -1043,7 +1040,7 @@ static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
+ * mapping needs to be updated in sync with all the changes to virtual memory
+ * layout and related mapping facilities throughout the boot process.
+ */
+-static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
++static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
+ {
+ const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
+ int i;
+@@ -1063,11 +1060,11 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
+ const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
+
+ if (fn->eax_in == 0x0)
+- cpuid_std_range_max = fn->eax;
++ RIP_REL_REF(cpuid_std_range_max) = fn->eax;
+ else if (fn->eax_in == 0x40000000)
+- cpuid_hyp_range_max = fn->eax;
++ RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
+ else if (fn->eax_in == 0x80000000)
+- cpuid_ext_range_max = fn->eax;
++ RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
+ }
+ }
+
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index 6395bfd87b68b5..9905dc0e0b0960 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -23,8 +23,10 @@
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+ #include <linux/psp-sev.h>
++#include <linux/dmi.h>
+ #include <uapi/linux/sev-guest.h>
+
++#include <asm/init.h>
+ #include <asm/cpu_entry_area.h>
+ #include <asm/stacktrace.h>
+ #include <asm/sev.h>
+@@ -682,8 +684,9 @@ static u64 __init get_jump_table_addr(void)
+ return ret;
+ }
+
+-static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
+- unsigned long npages, enum psc_op op)
++static void __head
++early_set_pages_state(unsigned long vaddr, unsigned long paddr,
++ unsigned long npages, enum psc_op op)
+ {
+ unsigned long paddr_end;
+ u64 val;
+@@ -739,7 +742,7 @@ static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ }
+
+-void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
++void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages)
+ {
+ /*
+@@ -748,7 +751,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+- if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++ if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ return;
+
+ /*
+@@ -767,28 +770,13 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
+ * This eliminates worries about jump tables or checking boot_cpu_data
+ * in the cc_platform_has() function.
+ */
+- if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++ if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
+ return;
+
+ /* Ask hypervisor to mark the memory pages shared in the RMP table. */
+ early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
+ }
+
+-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
+-{
+- unsigned long vaddr, npages;
+-
+- vaddr = (unsigned long)__va(paddr);
+- npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
+-
+- if (op == SNP_PAGE_STATE_PRIVATE)
+- early_snp_set_memory_private(vaddr, paddr, npages);
+- else if (op == SNP_PAGE_STATE_SHARED)
+- early_snp_set_memory_shared(vaddr, paddr, npages);
+- else
+- WARN(1, "invalid memory op %d\n", op);
+-}
+-
+ static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
+ unsigned long vaddr_end, int op)
+ {
+@@ -1234,10 +1222,6 @@ void setup_ghcb(void)
+ if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
+ return;
+
+- /* First make sure the hypervisor talks a supported protocol. */
+- if (!sev_es_negotiate_protocol())
+- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+-
+ /*
+ * Check whether the runtime #VC exception handler is active. It uses
+ * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
+@@ -1254,6 +1238,13 @@ void setup_ghcb(void)
+ return;
+ }
+
++ /*
++ * Make sure the hypervisor talks a supported protocol.
++ * This gets called only in the BSP boot phase.
++ */
++ if (!sev_es_negotiate_protocol())
++ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
++
+ /*
+ * Clear the boot_ghcb. The first exception comes in before the bss
+ * section is cleared.
+@@ -2056,7 +2047,7 @@ bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
+ *
+ * Scan for the blob in that order.
+ */
+-static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
++static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+ {
+ struct cc_blob_sev_info *cc_info;
+
+@@ -2082,7 +2073,7 @@ static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+ return cc_info;
+ }
+
+-bool __init snp_init(struct boot_params *bp)
++bool __head snp_init(struct boot_params *bp)
+ {
+ struct cc_blob_sev_info *cc_info;
+
+@@ -2104,11 +2095,22 @@ bool __init snp_init(struct boot_params *bp)
+ return true;
+ }
+
+-void __init __noreturn snp_abort(void)
++void __head __noreturn snp_abort(void)
+ {
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+ }
+
++/*
++ * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
++ * enabled, as the alternative (fallback) logic for DMI probing in the legacy
++ * ROM region can cause a crash since this region is not pre-validated.
++ */
++void __init snp_dmi_setup(void)
++{
++ if (efi_enabled(EFI_CONFIG_TABLES))
++ dmi_setup();
++}
++
+ static void dump_cpuid_table(void)
+ {
+ const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
+diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
+index 59e15dd8d0f866..19e4db582fb69a 100644
+--- a/arch/x86/kernel/shstk.c
++++ b/arch/x86/kernel/shstk.c
+@@ -577,3 +577,14 @@ long shstk_prctl(struct task_struct *task, int option, unsigned long arg2)
+ return wrss_control(true);
+ return -EINVAL;
+ }
++
++int shstk_update_last_frame(unsigned long val)
++{
++ unsigned long ssp;
++
++ if (!features_enabled(ARCH_SHSTK_SHSTK))
++ return 0;
++
++ ssp = get_user_shstk_addr();
++ return write_user_shstk_64((u64 __user *)ssp, (u64)val);
++}
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 65fe2094da59b8..876d3b30c2c774 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -83,6 +83,7 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size,
+ unsigned long math_size = 0;
+ unsigned long sp = regs->sp;
+ unsigned long buf_fx = 0;
++ u32 pkru = read_pkru();
+
+ /* redzone */
+ if (!ia32_frame)
+@@ -138,7 +139,7 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size,
+ }
+
+ /* save i387 and extended state */
+- if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size))
++ if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size, pkru))
+ return (void __user *)-1L;
+
+ return (void __user *)sp;
+diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
+index cacf2ede62175d..449a6ed0b8c982 100644
+--- a/arch/x86/kernel/signal_64.c
++++ b/arch/x86/kernel/signal_64.c
+@@ -175,9 +175,6 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+ frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp);
+ uc_flags = frame_uc_flags(regs);
+
+- if (setup_signal_shadow_stack(ksig))
+- return -EFAULT;
+-
+ if (!user_access_begin(frame, sizeof(*frame)))
+ return -EFAULT;
+
+@@ -198,6 +195,9 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+ return -EFAULT;
+ }
+
++ if (setup_signal_shadow_stack(ksig))
++ return -EFAULT;
++
+ /* Set up registers for signal handler */
+ regs->di = ksig->sig;
+ /* In case the signal handler was declared without prototypes */
+@@ -260,13 +260,13 @@ SYSCALL_DEFINE0(rt_sigreturn)
+
+ set_current_blocked(&set);
+
+- if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
++ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+- if (restore_signal_shadow_stack())
++ if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
+ goto badframe;
+
+- if (restore_altstack(&frame->uc.uc_stack))
++ if (restore_signal_shadow_stack())
+ goto badframe;
+
+ return regs->ax;
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 2a187c0cbd5b11..ce77dac9a0202a 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -60,6 +60,7 @@
+ #include <linux/stackprotector.h>
+ #include <linux/cpuhotplug.h>
+ #include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
+
+ #include <asm/acpi.h>
+ #include <asm/cacheinfo.h>
+diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
+index e42faa792c0793..52e1f3f0b361ce 100644
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -27,25 +27,7 @@
+
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+- unsigned long pc = instruction_pointer(regs);
+-
+- if (!user_mode(regs) && in_lock_functions(pc)) {
+-#ifdef CONFIG_FRAME_POINTER
+- return *(unsigned long *)(regs->bp + sizeof(long));
+-#else
+- unsigned long *sp = (unsigned long *)regs->sp;
+- /*
+- * Return address is either directly at stack pointer
+- * or above a saved flags. Eflags has bits 22-31 zero,
+- * kernel addresses don't.
+- */
+- if (sp[0] >> 22)
+- return sp[0];
+- if (sp[1] >> 22)
+- return sp[1];
+-#endif
+- }
+- return pc;
++ return instruction_pointer(regs);
+ }
+ EXPORT_SYMBOL(profile_pc);
+
+diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
+index 1123ef3ccf9011..4334033658edfb 100644
+--- a/arch/x86/kernel/tsc_sync.c
++++ b/arch/x86/kernel/tsc_sync.c
+@@ -193,11 +193,9 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
+ cur->warned = false;
+
+ /*
+- * If a non-zero TSC value for socket 0 may be valid then the default
+- * adjusted value cannot assumed to be zero either.
++ * The default adjust value cannot be assumed to be zero on any socket.
+ */
+- if (tsc_async_resets)
+- cur->adjusted = bootval;
++ cur->adjusted = bootval;
+
+ /*
+ * Check whether this CPU is the first in a package to come up. In
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 6c07f6daaa227a..6402fb3089d262 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -1076,8 +1076,13 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
+ return orig_ret_vaddr;
+
+ nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
+- if (likely(!nleft))
++ if (likely(!nleft)) {
++ if (shstk_update_last_frame(trampoline_vaddr)) {
++ force_sig(SIGSEGV);
++ return -1;
++ }
+ return orig_ret_vaddr;
++ }
+
+ if (nleft != rasize) {
+ pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index f15fb71f280e24..54a5596adaa61e 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -139,10 +139,7 @@ SECTIONS
+ STATIC_CALL_TEXT
+
+ ALIGN_ENTRY_TEXT_BEGIN
+-#ifdef CONFIG_CPU_SRSO
+ *(.text..__x86.rethunk_untrain)
+-#endif
+-
+ ENTRY_TEXT
+
+ #ifdef CONFIG_CPU_SRSO
+@@ -520,12 +517,12 @@ INIT_PER_CPU(irq_stack_backing_store);
+ "fixed_percpu_data is not at start of per-cpu area");
+ #endif
+
+-#ifdef CONFIG_RETHUNK
++#ifdef CONFIG_CPU_UNRET_ENTRY
+ . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
+-. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ #endif
+
+ #ifdef CONFIG_CPU_SRSO
++. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ /*
+ * GNU ld cannot do XOR until 2.41.
+ * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index a37ebd3b47736d..268627a17cf0d8 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -3,10 +3,12 @@
+ *
+ * For licencing details see kernel-base/COPYING
+ */
++#include <linux/dmi.h>
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+ #include <linux/export.h>
+ #include <linux/pci.h>
++#include <linux/acpi.h>
+
+ #include <asm/acpi.h>
+ #include <asm/bios_ebda.h>
+@@ -66,6 +68,7 @@ struct x86_init_ops x86_init __initdata = {
+ .probe_roms = probe_roms,
+ .reserve_resources = reserve_standard_io_resources,
+ .memory_setup = e820__memory_setup_default,
++ .dmi_setup = dmi_setup,
+ },
+
+ .mpparse = {
+diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
+index 80e3fe184d17e6..a99ffc3f3a3fdb 100644
+--- a/arch/x86/kvm/Makefile
++++ b/arch/x86/kvm/Makefile
+@@ -26,6 +26,10 @@ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
+ vmx/hyperv.o vmx/nested.o vmx/posted_intr.o
+ kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
+
++ifdef CONFIG_HYPERV
++kvm-intel-y += vmx/vmx_onhyperv.o
++endif
++
+ kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \
+ svm/sev.o svm/hyperv.o
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 773132c3bf5af7..ac042a9a61f576 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -362,6 +362,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+
+ kvm_update_pv_runtime(vcpu);
+
++ vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+ vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
+
+@@ -677,6 +678,11 @@ void kvm_set_cpu_caps(void)
+ F(AMX_COMPLEX)
+ );
+
++ kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
++ F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
++ F(BHI_CTRL) | F(MCDT_NO)
++ );
++
+ kvm_cpu_cap_mask(CPUID_D_1_EAX,
+ F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
+ );
+@@ -956,13 +962,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ break;
+ /* function 7 has additional index. */
+ case 7:
+- entry->eax = min(entry->eax, 1u);
++ max_idx = entry->eax = min(entry->eax, 2u);
+ cpuid_entry_override(entry, CPUID_7_0_EBX);
+ cpuid_entry_override(entry, CPUID_7_ECX);
+ cpuid_entry_override(entry, CPUID_7_EDX);
+
+- /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
+- if (entry->eax == 1) {
++ /* KVM only supports up to 0x7.2, capped above via min(). */
++ if (max_idx >= 1) {
+ entry = do_host_cpuid(array, function, 1);
+ if (!entry)
+ goto out;
+@@ -972,6 +978,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ entry->ebx = 0;
+ entry->ecx = 0;
+ }
++ if (max_idx >= 2) {
++ entry = do_host_cpuid(array, function, 2);
++ if (!entry)
++ goto out;
++
++ cpuid_entry_override(entry, CPUID_7_2_EDX);
++ entry->ecx = 0;
++ entry->ebx = 0;
++ entry->eax = 0;
++ }
+ break;
+ case 0xa: { /* Architectural Performance Monitoring */
+ union cpuid10_eax eax;
+@@ -1196,9 +1212,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ entry->eax = entry->ebx = entry->ecx = 0;
+ break;
+ case 0x80000008: {
+- unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+- unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+- unsigned phys_as = entry->eax & 0xff;
++ unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
++ unsigned int phys_as;
+
+ /*
+ * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
+@@ -1206,16 +1221,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ * reductions in MAXPHYADDR for memory encryption affect shadow
+ * paging, too.
+ *
+- * If TDP is enabled but an explicit guest MAXPHYADDR is not
+- * provided, use the raw bare metal MAXPHYADDR as reductions to
+- * the HPAs do not affect GPAs.
++ * If TDP is enabled, use the raw bare metal MAXPHYADDR as
++ * reductions to the HPAs do not affect GPAs.
+ */
+- if (!tdp_enabled)
+- g_phys_as = boot_cpu_data.x86_phys_bits;
+- else if (!g_phys_as)
+- g_phys_as = phys_as;
++ if (!tdp_enabled) {
++ phys_as = boot_cpu_data.x86_phys_bits;
++ } else {
++ phys_as = entry->eax & 0xff;
++ }
+
+- entry->eax = g_phys_as | (virt_as << 8);
++ entry->eax = phys_as | (virt_as << 8);
+ entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
+ entry->edx = 0;
+ cpuid_entry_override(entry, CPUID_8000_0008_EBX);
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 284fa4704553da..57ee789ada1411 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -125,6 +125,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
+ return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
+ }
+
++static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.is_amd_compatible;
++}
++
++static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
++{
++ return !guest_cpuid_is_amd_compatible(vcpu);
++}
++
+ static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpuid_entry2 *best;
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 7c2dac6824e262..238afd7335e46d 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -727,10 +727,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
+
+ stimer_cleanup(stimer);
+ stimer->count = count;
+- if (stimer->count == 0)
+- stimer->config.enable = 0;
+- else if (stimer->config.auto_enable)
+- stimer->config.enable = 1;
++ if (!host) {
++ if (stimer->count == 0)
++ stimer->config.enable = 0;
++ else if (stimer->config.auto_enable)
++ stimer->config.enable = 1;
++ }
+
+ if (stimer->config.enable)
+ stimer_mark_pending(stimer, false);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 3e977dbbf9933d..1380f34897770d 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -41,6 +41,7 @@
+ #include "ioapic.h"
+ #include "trace.h"
+ #include "x86.h"
++#include "xen.h"
+ #include "cpuid.h"
+ #include "hyperv.h"
+ #include "smm.h"
+@@ -499,8 +500,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
+ }
+
+ /* Check if there are APF page ready requests pending */
+- if (enabled)
++ if (enabled) {
+ kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
++ kvm_xen_sw_enable_lapic(apic->vcpu);
++ }
+ }
+
+ static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
+@@ -2440,26 +2443,49 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
+ }
+ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+
++#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
++
++int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
++{
++ if (data & X2APIC_ICR_RESERVED_BITS)
++ return 1;
++
++ /*
++ * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but
++ * only AMD requires it to be zero, Intel essentially just ignores the
++ * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled,
++ * the CPU performs the reserved bits checks, i.e. the underlying CPU
++ * behavior will "win". Arbitrarily clear the BUSY bit, as there is no
++ * sane way to provide consistent behavior with respect to hardware.
++ */
++ data &= ~APIC_ICR_BUSY;
++
++ kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
++ kvm_lapic_set_reg64(apic, APIC_ICR, data);
++ trace_kvm_apic_write(APIC_ICR, data);
++ return 0;
++}
++
+ /* emulate APIC access in a trap manner */
+ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
+ {
+ struct kvm_lapic *apic = vcpu->arch.apic;
+- u64 val;
+
+ /*
+- * ICR is a single 64-bit register when x2APIC is enabled. For legacy
+- * xAPIC, ICR writes need to go down the common (slightly slower) path
+- * to get the upper half from ICR2.
++ * ICR is a single 64-bit register when x2APIC is enabled, all others
++ * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
++ * go down the common path to get the upper half from ICR2.
++ *
++ * Note, using the write helpers may incur an unnecessary write to the
++ * virtual APIC state, but KVM needs to conditionally modify the value
++ * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
++ * conditional branches is likely a wash relative to the cost of the
++ * maybe-unecessary write, and both are in the noise anyways.
+ */
+- if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
+- val = kvm_lapic_get_reg64(apic, APIC_ICR);
+- kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
+- trace_kvm_apic_write(APIC_ICR, val);
+- } else {
+- /* TODO: optimize to just emulate side effect w/o one more write */
+- val = kvm_lapic_get_reg(apic, offset);
+- kvm_lapic_reg_write(apic, offset, (u32)val);
+- }
++ if (apic_x2apic_mode(apic) && offset == APIC_ICR)
++ WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)));
++ else
++ kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
+
+@@ -2670,6 +2696,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ u64 msr_val;
+ int i;
+
++ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ if (!init_event) {
+ msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
+ if (kvm_vcpu_is_reset_bsp(vcpu))
+@@ -2767,7 +2795,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
+ trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
+
+ r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
+- if (r && lvt_type == APIC_LVTPC)
++ if (r && lvt_type == APIC_LVTPC &&
++ guest_cpuid_is_intel_compatible(apic->vcpu))
+ kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
+ return r;
+ }
+@@ -2981,6 +3010,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ int r;
+
++ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
+ /* set SPIV separately to get count of SW disabled APICs right */
+ apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
+@@ -3145,16 +3176,6 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+ return 0;
+ }
+
+-int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
+-{
+- data &= ~APIC_ICR_BUSY;
+-
+- kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
+- kvm_lapic_set_reg64(apic, APIC_ICR, data);
+- trace_kvm_apic_write(APIC_ICR, data);
+- return 0;
+-}
+-
+ static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
+ {
+ u32 low;
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index f7901cb4d2fa4b..294775b7383b42 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -3120,7 +3120,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
+ if (pud_none(pud) || !pud_present(pud))
+ goto out;
+
+- if (pud_large(pud)) {
++ if (pud_leaf(pud)) {
+ level = PG_LEVEL_1G;
+ goto out;
+ }
+@@ -4788,7 +4788,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
+ context->cpu_role.base.level, is_efer_nx(context),
+ guest_can_use(vcpu, X86_FEATURE_GBPAGES),
+ is_cr4_pse(context),
+- guest_cpuid_is_amd_or_hygon(vcpu));
++ guest_cpuid_is_amd_compatible(vcpu));
+ }
+
+ static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 6cd4dd631a2fac..8eef3ed5fe04e2 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -1506,6 +1506,16 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
+ }
+ }
+
++static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
++{
++ /*
++ * All TDP MMU shadow pages share the same role as their root, aside
++ * from level, so it is valid to key off any shadow page to determine if
++ * write protection is needed for an entire tree.
++ */
++ return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
++}
++
+ /*
+ * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
+ * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
+@@ -1516,7 +1526,8 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
+ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ gfn_t start, gfn_t end)
+ {
+- u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
++ const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
++ shadow_dirty_mask;
+ struct tdp_iter iter;
+ bool spte_set = false;
+
+@@ -1530,7 +1541,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ if (!is_shadow_present_pte(iter.old_spte))
+ continue;
+
+- KVM_MMU_WARN_ON(kvm_ad_enabled() &&
++ KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
+ spte_ad_need_write_protect(iter.old_spte));
+
+ if (!(iter.old_spte & dbit))
+@@ -1578,8 +1589,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
+ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
+ gfn_t gfn, unsigned long mask, bool wrprot)
+ {
+- u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
+- shadow_dirty_mask;
++ const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
++ shadow_dirty_mask;
+ struct tdp_iter iter;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+@@ -1591,7 +1602,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
+ if (!mask)
+ break;
+
+- KVM_MMU_WARN_ON(kvm_ad_enabled() &&
++ KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
+ spte_ad_need_write_protect(iter.old_spte));
+
+ if (iter.level > PG_LEVEL_4K ||
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index 9ae07db6f0f648..da2d82e3a8735e 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -250,6 +250,24 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
+ return true;
+ }
+
++static void pmc_release_perf_event(struct kvm_pmc *pmc)
++{
++ if (pmc->perf_event) {
++ perf_event_release_kernel(pmc->perf_event);
++ pmc->perf_event = NULL;
++ pmc->current_config = 0;
++ pmc_to_pmu(pmc)->event_count--;
++ }
++}
++
++static void pmc_stop_counter(struct kvm_pmc *pmc)
++{
++ if (pmc->perf_event) {
++ pmc->counter = pmc_read_counter(pmc);
++ pmc_release_perf_event(pmc);
++ }
++}
++
+ static int filter_cmp(const void *pa, const void *pb, u64 mask)
+ {
+ u64 a = *(u64 *)pa & mask;
+@@ -639,22 +657,79 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ return 0;
+ }
+
+-/* refresh PMU settings. This function generally is called when underlying
+- * settings are changed (such as changes of PMU CPUID by guest VMs), which
+- * should rarely happen.
++void kvm_pmu_reset(struct kvm_vcpu *vcpu)
++{
++ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
++ struct kvm_pmc *pmc;
++ int i;
++
++ pmu->need_cleanup = false;
++
++ bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
++
++ for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
++ pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
++ if (!pmc)
++ continue;
++
++ pmc_stop_counter(pmc);
++ pmc->counter = 0;
++
++ if (pmc_is_gp(pmc))
++ pmc->eventsel = 0;
++ }
++
++ pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
++
++ static_call_cond(kvm_x86_pmu_reset)(vcpu);
++}
++
++
++/*
++ * Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
++ * and/or PERF_CAPABILITIES.
+ */
+ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
+ {
++ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
++
+ if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
+ return;
+
+- bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
++ /*
++ * Stop/release all existing counters/events before realizing the new
++ * vPMU model.
++ */
++ kvm_pmu_reset(vcpu);
++
++ pmu->version = 0;
++ pmu->nr_arch_gp_counters = 0;
++ pmu->nr_arch_fixed_counters = 0;
++ pmu->counter_bitmask[KVM_PMC_GP] = 0;
++ pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
++ pmu->reserved_bits = 0xffffffff00200000ull;
++ pmu->raw_event_mask = X86_RAW_EVENT_MASK;
++ pmu->global_ctrl_mask = ~0ull;
++ pmu->global_status_mask = ~0ull;
++ pmu->fixed_ctr_ctrl_mask = ~0ull;
++ pmu->pebs_enable_mask = ~0ull;
++ pmu->pebs_data_cfg_mask = ~0ull;
++ bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
++
++ if (!vcpu->kvm->arch.enable_pmu)
++ return;
++
+ static_call(kvm_x86_pmu_refresh)(vcpu);
+-}
+
+-void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+-{
+- static_call(kvm_x86_pmu_reset)(vcpu);
++ /*
++ * At RESET, both Intel and AMD CPUs set all enable bits for general
++ * purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
++ * was written for v1 PMUs don't unknowingly leave GP counters disabled
++ * in the global controls). Emulate that behavior when refreshing the
++ * PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
++ */
++ if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
++ pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
+ }
+
+ void kvm_pmu_init(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
+index 1d64113de4883e..a46aa9b25150f5 100644
+--- a/arch/x86/kvm/pmu.h
++++ b/arch/x86/kvm/pmu.h
+@@ -80,24 +80,6 @@ static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
+ pmc->counter &= pmc_bitmask(pmc);
+ }
+
+-static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
+-{
+- if (pmc->perf_event) {
+- perf_event_release_kernel(pmc->perf_event);
+- pmc->perf_event = NULL;
+- pmc->current_config = 0;
+- pmc_to_pmu(pmc)->event_count--;
+- }
+-}
+-
+-static inline void pmc_stop_counter(struct kvm_pmc *pmc)
+-{
+- if (pmc->perf_event) {
+- pmc->counter = pmc_read_counter(pmc);
+- pmc_release_perf_event(pmc);
+- }
+-}
+-
+ static inline bool pmc_is_gp(struct kvm_pmc *pmc)
+ {
+ return pmc->type == KVM_PMC_GP;
+diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
+index b816506783755a..2f4e155080badc 100644
+--- a/arch/x86/kvm/reverse_cpuid.h
++++ b/arch/x86/kvm/reverse_cpuid.h
+@@ -16,6 +16,7 @@ enum kvm_only_cpuid_leafs {
+ CPUID_7_1_EDX,
+ CPUID_8000_0007_EDX,
+ CPUID_8000_0022_EAX,
++ CPUID_7_2_EDX,
+ NR_KVM_CPU_CAPS,
+
+ NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
+@@ -46,6 +47,14 @@ enum kvm_only_cpuid_leafs {
+ #define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
+ #define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
+
++/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
++#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
++#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
++#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
++#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
++#define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
++#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
++
+ /* CPUID level 0x80000007 (EDX). */
+ #define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
+
+@@ -80,6 +89,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
+ [CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX},
+ [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
+ [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
++ [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
+ };
+
+ /*
+@@ -92,10 +102,12 @@ static const struct cpuid_reg reverse_cpuid[] = {
+ */
+ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
+ {
++ BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
++ BUILD_BUG_ON(x86_leaf == CPUID_LNX_5);
+ BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
+ BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
+ }
+@@ -106,18 +118,20 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
+ */
+ static __always_inline u32 __feature_translate(int x86_feature)
+ {
+- if (x86_feature == X86_FEATURE_SGX1)
+- return KVM_X86_FEATURE_SGX1;
+- else if (x86_feature == X86_FEATURE_SGX2)
+- return KVM_X86_FEATURE_SGX2;
+- else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
+- return KVM_X86_FEATURE_SGX_EDECCSSA;
+- else if (x86_feature == X86_FEATURE_CONSTANT_TSC)
+- return KVM_X86_FEATURE_CONSTANT_TSC;
+- else if (x86_feature == X86_FEATURE_PERFMON_V2)
+- return KVM_X86_FEATURE_PERFMON_V2;
+-
+- return x86_feature;
++#define KVM_X86_TRANSLATE_FEATURE(f) \
++ case X86_FEATURE_##f: return KVM_X86_FEATURE_##f
++
++ switch (x86_feature) {
++ KVM_X86_TRANSLATE_FEATURE(SGX1);
++ KVM_X86_TRANSLATE_FEATURE(SGX2);
++ KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA);
++ KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
++ KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
++ KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
++ KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
++ default:
++ return x86_feature;
++ }
+ }
+
+ static __always_inline u32 __feature_leaf(int x86_feature)
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index 3fea8c47679e68..60891b9ce25f61 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -247,18 +247,6 @@ static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
+ kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
+ }
+
+-static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
+-{
+- /* Nested FLUSHBYASID is not supported yet. */
+- switch(tlb_ctl) {
+- case TLB_CONTROL_DO_NOTHING:
+- case TLB_CONTROL_FLUSH_ALL_ASID:
+- return true;
+- default:
+- return false;
+- }
+-}
+-
+ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ struct vmcb_ctrl_area_cached *control)
+ {
+@@ -278,9 +266,6 @@ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ IOPM_SIZE)))
+ return false;
+
+- if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
+- return false;
+-
+ if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
+ !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
+ return false;
+diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
+index 373ff6a6687b3a..3fd47de14b38a3 100644
+--- a/arch/x86/kvm/svm/pmu.c
++++ b/arch/x86/kvm/svm/pmu.c
+@@ -233,21 +233,6 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
+ }
+ }
+
+-static void amd_pmu_reset(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+- int i;
+-
+- for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
+- struct kvm_pmc *pmc = &pmu->gp_counters[i];
+-
+- pmc_stop_counter(pmc);
+- pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
+- }
+-
+- pmu->global_ctrl = pmu->global_status = 0;
+-}
+-
+ struct kvm_pmu_ops amd_pmu_ops __initdata = {
+ .hw_event_available = amd_hw_event_available,
+ .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
+@@ -259,7 +244,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
+ .set_msr = amd_pmu_set_msr,
+ .refresh = amd_pmu_refresh,
+ .init = amd_pmu_init,
+- .reset = amd_pmu_reset,
+ .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
+ .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
+ .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 4900c078045acc..99e72b8a96ac0b 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -57,7 +57,7 @@ static bool sev_es_enabled = true;
+ module_param_named(sev_es, sev_es_enabled, bool, 0444);
+
+ /* enable/disable SEV-ES DebugSwap support */
+-static bool sev_es_debug_swap_enabled = true;
++static bool sev_es_debug_swap_enabled = false;
+ module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
+ #else
+ #define sev_enabled false
+@@ -84,9 +84,10 @@ struct enc_region {
+ };
+
+ /* Called with the sev_bitmap_lock held, or on shutdown */
+-static int sev_flush_asids(int min_asid, int max_asid)
++static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
+ {
+- int ret, asid, error = 0;
++ int ret, error = 0;
++ unsigned int asid;
+
+ /* Check if there are any ASIDs to reclaim before performing a flush */
+ asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
+@@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
+ }
+
+ /* Must be called with the sev_bitmap_lock held */
+-static bool __sev_recycle_asids(int min_asid, int max_asid)
++static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
+ {
+ if (sev_flush_asids(min_asid, max_asid))
+ return false;
+@@ -143,8 +144,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
+
+ static int sev_asid_new(struct kvm_sev_info *sev)
+ {
+- int asid, min_asid, max_asid, ret;
++ /*
++ * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
++ * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
++ * Note: min ASID can end up larger than the max if basic SEV support is
++ * effectively disabled by disallowing use of ASIDs for SEV guests.
++ */
++ unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
++ unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
++ unsigned int asid;
+ bool retry = true;
++ int ret;
++
++ if (min_asid > max_asid)
++ return -ENOTTY;
+
+ WARN_ON(sev->misc_cg);
+ sev->misc_cg = get_current_misc_cg();
+@@ -157,12 +170,6 @@ static int sev_asid_new(struct kvm_sev_info *sev)
+
+ mutex_lock(&sev_bitmap_lock);
+
+- /*
+- * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
+- * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
+- */
+- min_asid = sev->es_active ? 1 : min_sev_asid;
+- max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
+ again:
+ asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
+ if (asid > max_asid) {
+@@ -187,7 +194,7 @@ static int sev_asid_new(struct kvm_sev_info *sev)
+ return ret;
+ }
+
+-static int sev_get_asid(struct kvm *kvm)
++static unsigned int sev_get_asid(struct kvm *kvm)
+ {
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+@@ -284,8 +291,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+
+ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
+ {
++ unsigned int asid = sev_get_asid(kvm);
+ struct sev_data_activate activate;
+- int asid = sev_get_asid(kvm);
+ int ret;
+
+ /* activate ASID on the given handle */
+@@ -612,8 +619,11 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
+ save->xss = svm->vcpu.arch.ia32_xss;
+ save->dr6 = svm->vcpu.arch.dr6;
+
+- if (sev_es_debug_swap_enabled)
++ if (sev_es_debug_swap_enabled) {
+ save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
++ pr_warn_once("Enabling DebugSwap with KVM_SEV_ES_INIT. "
++ "This will not work starting with Linux 6.10\n");
++ }
+
+ pr_debug("Virtual Machine Save Area (VMSA):\n");
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
+@@ -654,6 +664,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ return ret;
+
+ vcpu->arch.guest_state_protected = true;
++
++ /*
++ * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
++ * only after setting guest_state_protected because KVM_SET_MSRS allows
++ * dynamic toggling of LBRV (for performance reason) on write access to
++ * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
++ */
++ svm_enable_lbrv(vcpu);
+ return 0;
+ }
+
+@@ -1975,20 +1993,22 @@ int sev_mem_enc_register_region(struct kvm *kvm,
+ goto e_free;
+ }
+
+- region->uaddr = range->addr;
+- region->size = range->size;
+-
+- list_add_tail(&region->list, &sev->regions_list);
+- mutex_unlock(&kvm->lock);
+-
+ /*
+ * The guest may change the memory encryption attribute from C=0 -> C=1
+ * or vice versa for this memory range. Lets make sure caches are
+ * flushed to ensure that guest data gets written into memory with
+- * correct C-bit.
++ * correct C-bit. Note, this must be done before dropping kvm->lock,
++ * as region and its array of pages can be freed by a different task
++ * once kvm->lock is released.
+ */
+ sev_clflush_pages(region->pages, region->npages);
+
++ region->uaddr = range->addr;
++ region->size = range->size;
++
++ list_add_tail(&region->list, &sev->regions_list);
++ mutex_unlock(&kvm->lock);
++
+ return ret;
+
+ e_free:
+@@ -2229,8 +2249,10 @@ void __init sev_hardware_setup(void)
+ goto out;
+ }
+
+- sev_asid_count = max_sev_asid - min_sev_asid + 1;
+- WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
++ if (min_sev_asid <= max_sev_asid) {
++ sev_asid_count = max_sev_asid - min_sev_asid + 1;
++ WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
++ }
+ sev_supported = true;
+
+ /* SEV-ES support requested? */
+@@ -2250,6 +2272,12 @@ void __init sev_hardware_setup(void)
+ if (!boot_cpu_has(X86_FEATURE_SEV_ES))
+ goto out;
+
++ if (!lbrv) {
++ WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
++ "LBRV must be present for SEV-ES support");
++ goto out;
++ }
++
+ /* Has the system been allocated ASIDs for SEV-ES? */
+ if (min_sev_asid == 1)
+ goto out;
+@@ -2261,7 +2289,9 @@ void __init sev_hardware_setup(void)
+ out:
+ if (boot_cpu_has(X86_FEATURE_SEV))
+ pr_info("SEV %s (ASIDs %u - %u)\n",
+- sev_supported ? "enabled" : "disabled",
++ sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
++ "unusable" :
++ "disabled",
+ min_sev_asid, max_sev_asid);
+ if (boot_cpu_has(X86_FEATURE_SEV_ES))
+ pr_info("SEV-ES %s (ASIDs %u - %u)\n",
+@@ -2309,7 +2339,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
+ */
+ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
+ {
+- int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
++ unsigned int asid = sev_get_asid(vcpu->kvm);
+
+ /*
+ * Note! The address must be a kernel address, as regular page walk
+@@ -2627,7 +2657,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
+ void pre_sev_run(struct vcpu_svm *svm, int cpu)
+ {
+ struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
+- int asid = sev_get_asid(svm->vcpu.kvm);
++ unsigned int asid = sev_get_asid(svm->vcpu.kvm);
+
+ /* Assign the asid allocated with this SEV guest */
+ svm->asid = asid;
+@@ -2972,6 +3002,25 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+
+ set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
+ }
++
++ /*
++ * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
++ * the host/guest supports its use.
++ *
++ * guest_can_use() checks a number of requirements on the host/guest to
++ * ensure that MSR_IA32_XSS is available, but it might report true even
++ * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
++ * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
++ * to further check that the guest CPUID actually supports
++ * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
++ * guests will still get intercepted and caught in the normal
++ * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
++ */
++ if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
++ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
++ else
++ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
+ }
+
+ void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+@@ -2994,7 +3043,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+ svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
+- svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
+
+ /*
+ * An SEV-ES guest requires a VMSA area that is a separate from the
+@@ -3046,10 +3094,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ /* Clear intercepts on selected MSRs */
+ set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+ }
+
+ void sev_init_vmcb(struct vcpu_svm *svm)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index beea99c8e8e05e..413f1f2aadd1a3 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -99,10 +99,12 @@ static const struct svm_direct_access_msrs {
+ { .index = MSR_IA32_SPEC_CTRL, .always = false },
+ { .index = MSR_IA32_PRED_CMD, .always = false },
+ { .index = MSR_IA32_FLUSH_CMD, .always = false },
++ { .index = MSR_IA32_DEBUGCTLMSR, .always = false },
+ { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
+ { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
+ { .index = MSR_IA32_LASTINTFROMIP, .always = false },
+ { .index = MSR_IA32_LASTINTTOIP, .always = false },
++ { .index = MSR_IA32_XSS, .always = false },
+ { .index = MSR_EFER, .always = false },
+ { .index = MSR_IA32_CR_PAT, .always = false },
+ { .index = MSR_AMD64_SEV_ES_GHCB, .always = true },
+@@ -214,7 +216,7 @@ int vgif = true;
+ module_param(vgif, int, 0444);
+
+ /* enable/disable LBR virtualization */
+-static int lbrv = true;
++int lbrv = true;
+ module_param(lbrv, int, 0444);
+
+ static int tsc_scaling = true;
+@@ -1007,7 +1009,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
+ vmcb_mark_dirty(to_vmcb, VMCB_LBR);
+ }
+
+-static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
++void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+@@ -1017,6 +1019,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+
++ if (sev_es_guest(vcpu->kvm))
++ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
++
+ /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
+ if (is_guest_mode(vcpu))
+ svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
+@@ -1026,6 +1031,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+
++ KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
++
+ svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
+@@ -1873,15 +1880,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ bool old_paging = is_paging(vcpu);
+
+ #ifdef CONFIG_X86_64
+- if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
++ if (vcpu->arch.efer & EFER_LME) {
+ if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
+ vcpu->arch.efer |= EFER_LMA;
+- svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
++ if (!vcpu->arch.guest_state_protected)
++ svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
+ }
+
+ if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
+ vcpu->arch.efer &= ~EFER_LMA;
+- svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
++ if (!vcpu->arch.guest_state_protected)
++ svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
+ }
+ }
+ #endif
+@@ -2860,6 +2869,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ case MSR_CSTAR:
+ msr_info->data = svm->vmcb01.ptr->save.cstar;
+ break;
++ case MSR_GS_BASE:
++ msr_info->data = svm->vmcb01.ptr->save.gs.base;
++ break;
++ case MSR_FS_BASE:
++ msr_info->data = svm->vmcb01.ptr->save.fs.base;
++ break;
+ case MSR_KERNEL_GS_BASE:
+ msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
+ break;
+@@ -3081,6 +3096,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ case MSR_CSTAR:
+ svm->vmcb01.ptr->save.cstar = data;
+ break;
++ case MSR_GS_BASE:
++ svm->vmcb01.ptr->save.gs.base = data;
++ break;
++ case MSR_FS_BASE:
++ svm->vmcb01.ptr->save.fs.base = data;
++ break;
+ case MSR_KERNEL_GS_BASE:
+ svm->vmcb01.ptr->save.kernel_gs_base = data;
+ break;
+@@ -3854,16 +3875,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /*
+- * KVM should never request an NMI window when vNMI is enabled, as KVM
+- * allows at most one to-be-injected NMI and one pending NMI, i.e. if
+- * two NMIs arrive simultaneously, KVM will inject one and set
+- * V_NMI_PENDING for the other. WARN, but continue with the standard
+- * single-step approach to try and salvage the pending NMI.
++ * If NMIs are outright masked, i.e. the vCPU is already handling an
++ * NMI, and KVM has not yet intercepted an IRET, then there is nothing
++ * more to do at this time as KVM has already enabled IRET intercepts.
++ * If KVM has already intercepted IRET, then single-step over the IRET,
++ * as NMIs aren't architecturally unmasked until the IRET completes.
++ *
++ * If vNMI is enabled, KVM should never request an NMI window if NMIs
++ * are masked, as KVM allows at most one to-be-injected NMI and one
++ * pending NMI. If two NMIs arrive simultaneously, KVM will inject one
++ * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
++ * unmasked. KVM _will_ request an NMI window in some situations, e.g.
++ * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
++ * inject the NMI. In those situations, KVM needs to single-step over
++ * the STI shadow or intercept STGI.
+ */
+- WARN_ON_ONCE(is_vnmi_enabled(svm));
++ if (svm_get_nmi_mask(vcpu)) {
++ WARN_ON_ONCE(is_vnmi_enabled(svm));
+
+- if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
+- return; /* IRET will cause a vm exit */
++ if (!svm->awaiting_iret_completion)
++ return; /* IRET will cause a vm exit */
++ }
+
+ /*
+ * SEV-ES guests are responsible for signaling when a vCPU is ready to
+@@ -5146,6 +5178,9 @@ static __init void svm_set_cpu_caps(void)
+
+ /* CPUID 0x8000001F (SME/SEV features) */
+ sev_set_cpu_caps();
++
++ /* Don't advertise Bus Lock Detect to guest if SVM support is absent */
++ kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
+ }
+
+ static __init int svm_hardware_setup(void)
+@@ -5235,6 +5270,12 @@ static __init int svm_hardware_setup(void)
+
+ nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
+
++ if (lbrv) {
++ if (!boot_cpu_has(X86_FEATURE_LBRV))
++ lbrv = false;
++ else
++ pr_info("LBR virtualization supported\n");
++ }
+ /*
+ * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
+ * may be modified by svm_adjust_mmio_mask()), as well as nrips.
+@@ -5288,14 +5329,6 @@ static __init int svm_hardware_setup(void)
+ svm_x86_ops.set_vnmi_pending = NULL;
+ }
+
+-
+- if (lbrv) {
+- if (!boot_cpu_has(X86_FEATURE_LBRV))
+- lbrv = false;
+- else
+- pr_info("LBR virtualization supported\n");
+- }
+-
+ if (!enable_pmu)
+ pr_info("PMU virtualization is disabled\n");
+
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index be67ab7fdd104e..37ada9808d9b57 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -30,7 +30,7 @@
+ #define IOPM_SIZE PAGE_SIZE * 3
+ #define MSRPM_SIZE PAGE_SIZE * 2
+
+-#define MAX_DIRECT_ACCESS_MSRS 46
++#define MAX_DIRECT_ACCESS_MSRS 48
+ #define MSRPM_OFFSETS 32
+ extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
+ extern bool npt_enabled;
+@@ -39,6 +39,7 @@ extern int vgif;
+ extern bool intercept_smi;
+ extern bool x2avic_enabled;
+ extern bool vnmi;
++extern int lbrv;
+
+ /*
+ * Clean bits in VMCB.
+@@ -541,6 +542,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
+ void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
+ void svm_vcpu_free_msrpm(u32 *msrpm);
+ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
++void svm_enable_lbrv(struct kvm_vcpu *vcpu);
+ void svm_update_lbrv(struct kvm_vcpu *vcpu);
+
+ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
+diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h
+index 36c8af87a707ac..4e725854c63a10 100644
+--- a/arch/x86/kvm/svm/svm_ops.h
++++ b/arch/x86/kvm/svm/svm_ops.h
+@@ -8,7 +8,7 @@
+
+ #define svm_asm(insn, clobber...) \
+ do { \
+- asm_volatile_goto("1: " __stringify(insn) "\n\t" \
++ asm goto("1: " __stringify(insn) "\n\t" \
+ _ASM_EXTABLE(1b, %l[fault]) \
+ ::: clobber : fault); \
+ return; \
+@@ -18,7 +18,7 @@ fault: \
+
+ #define svm_asm1(insn, op1, clobber...) \
+ do { \
+- asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
++ asm goto("1: " __stringify(insn) " %0\n\t" \
+ _ASM_EXTABLE(1b, %l[fault]) \
+ :: op1 : clobber : fault); \
+ return; \
+@@ -28,7 +28,7 @@ fault: \
+
+ #define svm_asm2(insn, op1, op2, clobber...) \
+ do { \
+- asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
++ asm goto("1: " __stringify(insn) " %1, %0\n\t" \
+ _ASM_EXTABLE(1b, %l[fault]) \
+ :: op1, op2 : clobber : fault); \
+ return; \
+diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
+index 83843379813ee3..b82e6ed4f02417 100644
+--- a/arch/x86/kvm/trace.h
++++ b/arch/x86/kvm/trace.h
+@@ -732,13 +732,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
+ * Tracepoint for nested #vmexit because of interrupt pending
+ */
+ TRACE_EVENT(kvm_invlpga,
+- TP_PROTO(__u64 rip, int asid, u64 address),
++ TP_PROTO(__u64 rip, unsigned int asid, u64 address),
+ TP_ARGS(rip, asid, address),
+
+ TP_STRUCT__entry(
+- __field( __u64, rip )
+- __field( int, asid )
+- __field( __u64, address )
++ __field( __u64, rip )
++ __field( unsigned int, asid )
++ __field( __u64, address )
+ ),
+
+ TP_fast_assign(
+@@ -747,7 +747,7 @@ TRACE_EVENT(kvm_invlpga,
+ __entry->address = address;
+ ),
+
+- TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
++ TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
+ __entry->rip, __entry->asid, __entry->address)
+ );
+
+diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c
+index 313b8bb5b8a7cb..de13dc14fe1d2f 100644
+--- a/arch/x86/kvm/vmx/hyperv.c
++++ b/arch/x86/kvm/vmx/hyperv.c
+@@ -13,111 +13,6 @@
+
+ #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
+
+-/*
+- * Enlightened VMCSv1 doesn't support these:
+- *
+- * POSTED_INTR_NV = 0x00000002,
+- * GUEST_INTR_STATUS = 0x00000810,
+- * APIC_ACCESS_ADDR = 0x00002014,
+- * POSTED_INTR_DESC_ADDR = 0x00002016,
+- * EOI_EXIT_BITMAP0 = 0x0000201c,
+- * EOI_EXIT_BITMAP1 = 0x0000201e,
+- * EOI_EXIT_BITMAP2 = 0x00002020,
+- * EOI_EXIT_BITMAP3 = 0x00002022,
+- * GUEST_PML_INDEX = 0x00000812,
+- * PML_ADDRESS = 0x0000200e,
+- * VM_FUNCTION_CONTROL = 0x00002018,
+- * EPTP_LIST_ADDRESS = 0x00002024,
+- * VMREAD_BITMAP = 0x00002026,
+- * VMWRITE_BITMAP = 0x00002028,
+- *
+- * TSC_MULTIPLIER = 0x00002032,
+- * PLE_GAP = 0x00004020,
+- * PLE_WINDOW = 0x00004022,
+- * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
+- *
+- * Currently unsupported in KVM:
+- * GUEST_IA32_RTIT_CTL = 0x00002814,
+- */
+-#define EVMCS1_SUPPORTED_PINCTRL \
+- (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \
+- PIN_BASED_EXT_INTR_MASK | \
+- PIN_BASED_NMI_EXITING | \
+- PIN_BASED_VIRTUAL_NMIS)
+-
+-#define EVMCS1_SUPPORTED_EXEC_CTRL \
+- (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \
+- CPU_BASED_HLT_EXITING | \
+- CPU_BASED_CR3_LOAD_EXITING | \
+- CPU_BASED_CR3_STORE_EXITING | \
+- CPU_BASED_UNCOND_IO_EXITING | \
+- CPU_BASED_MOV_DR_EXITING | \
+- CPU_BASED_USE_TSC_OFFSETTING | \
+- CPU_BASED_MWAIT_EXITING | \
+- CPU_BASED_MONITOR_EXITING | \
+- CPU_BASED_INVLPG_EXITING | \
+- CPU_BASED_RDPMC_EXITING | \
+- CPU_BASED_INTR_WINDOW_EXITING | \
+- CPU_BASED_CR8_LOAD_EXITING | \
+- CPU_BASED_CR8_STORE_EXITING | \
+- CPU_BASED_RDTSC_EXITING | \
+- CPU_BASED_TPR_SHADOW | \
+- CPU_BASED_USE_IO_BITMAPS | \
+- CPU_BASED_MONITOR_TRAP_FLAG | \
+- CPU_BASED_USE_MSR_BITMAPS | \
+- CPU_BASED_NMI_WINDOW_EXITING | \
+- CPU_BASED_PAUSE_EXITING | \
+- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
+-
+-#define EVMCS1_SUPPORTED_2NDEXEC \
+- (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
+- SECONDARY_EXEC_WBINVD_EXITING | \
+- SECONDARY_EXEC_ENABLE_VPID | \
+- SECONDARY_EXEC_ENABLE_EPT | \
+- SECONDARY_EXEC_UNRESTRICTED_GUEST | \
+- SECONDARY_EXEC_DESC | \
+- SECONDARY_EXEC_ENABLE_RDTSCP | \
+- SECONDARY_EXEC_ENABLE_INVPCID | \
+- SECONDARY_EXEC_ENABLE_XSAVES | \
+- SECONDARY_EXEC_RDSEED_EXITING | \
+- SECONDARY_EXEC_RDRAND_EXITING | \
+- SECONDARY_EXEC_TSC_SCALING | \
+- SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
+- SECONDARY_EXEC_PT_USE_GPA | \
+- SECONDARY_EXEC_PT_CONCEAL_VMX | \
+- SECONDARY_EXEC_BUS_LOCK_DETECTION | \
+- SECONDARY_EXEC_NOTIFY_VM_EXITING | \
+- SECONDARY_EXEC_ENCLS_EXITING)
+-
+-#define EVMCS1_SUPPORTED_3RDEXEC (0ULL)
+-
+-#define EVMCS1_SUPPORTED_VMEXIT_CTRL \
+- (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \
+- VM_EXIT_SAVE_DEBUG_CONTROLS | \
+- VM_EXIT_ACK_INTR_ON_EXIT | \
+- VM_EXIT_HOST_ADDR_SPACE_SIZE | \
+- VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
+- VM_EXIT_SAVE_IA32_PAT | \
+- VM_EXIT_LOAD_IA32_PAT | \
+- VM_EXIT_SAVE_IA32_EFER | \
+- VM_EXIT_LOAD_IA32_EFER | \
+- VM_EXIT_CLEAR_BNDCFGS | \
+- VM_EXIT_PT_CONCEAL_PIP | \
+- VM_EXIT_CLEAR_IA32_RTIT_CTL)
+-
+-#define EVMCS1_SUPPORTED_VMENTRY_CTRL \
+- (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \
+- VM_ENTRY_LOAD_DEBUG_CONTROLS | \
+- VM_ENTRY_IA32E_MODE | \
+- VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
+- VM_ENTRY_LOAD_IA32_PAT | \
+- VM_ENTRY_LOAD_IA32_EFER | \
+- VM_ENTRY_LOAD_BNDCFGS | \
+- VM_ENTRY_PT_CONCEAL_PIP | \
+- VM_ENTRY_LOAD_IA32_RTIT_CTL)
+-
+-#define EVMCS1_SUPPORTED_VMFUNC (0)
+-
+ #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
+ #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
+ {EVMCS1_OFFSET(name), clean_field}
+@@ -608,40 +503,6 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
+ return 0;
+ }
+
+-#if IS_ENABLED(CONFIG_HYPERV)
+-DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
+-
+-/*
+- * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
+- * is: in case a feature has corresponding fields in eVMCS described and it was
+- * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a
+- * feature which has no corresponding eVMCS field, this likely means that KVM
+- * needs to be updated.
+- */
+-#define evmcs_check_vmcs_conf(field, ctrl) \
+- do { \
+- typeof(vmcs_conf->field) unsupported; \
+- \
+- unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl; \
+- if (unsupported) { \
+- pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\
+- (u64)unsupported); \
+- vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl; \
+- } \
+- } \
+- while (0)
+-
+-void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
+-{
+- evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
+- evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);
+- evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC);
+- evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC);
+- evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL);
+- evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL);
+-}
+-#endif
+-
+ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version)
+ {
+diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h
+index 9623fe1651c48b..9401dbfaea7cef 100644
+--- a/arch/x86/kvm/vmx/hyperv.h
++++ b/arch/x86/kvm/vmx/hyperv.h
+@@ -14,12 +14,113 @@
+ #include "vmcs.h"
+ #include "vmcs12.h"
+
+-struct vmcs_config;
+-
+-#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
+-
+ #define KVM_EVMCS_VERSION 1
+
++/*
++ * Enlightened VMCSv1 doesn't support these:
++ *
++ * POSTED_INTR_NV = 0x00000002,
++ * GUEST_INTR_STATUS = 0x00000810,
++ * APIC_ACCESS_ADDR = 0x00002014,
++ * POSTED_INTR_DESC_ADDR = 0x00002016,
++ * EOI_EXIT_BITMAP0 = 0x0000201c,
++ * EOI_EXIT_BITMAP1 = 0x0000201e,
++ * EOI_EXIT_BITMAP2 = 0x00002020,
++ * EOI_EXIT_BITMAP3 = 0x00002022,
++ * GUEST_PML_INDEX = 0x00000812,
++ * PML_ADDRESS = 0x0000200e,
++ * VM_FUNCTION_CONTROL = 0x00002018,
++ * EPTP_LIST_ADDRESS = 0x00002024,
++ * VMREAD_BITMAP = 0x00002026,
++ * VMWRITE_BITMAP = 0x00002028,
++ *
++ * TSC_MULTIPLIER = 0x00002032,
++ * PLE_GAP = 0x00004020,
++ * PLE_WINDOW = 0x00004022,
++ * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
++ *
++ * Currently unsupported in KVM:
++ * GUEST_IA32_RTIT_CTL = 0x00002814,
++ */
++#define EVMCS1_SUPPORTED_PINCTRL \
++ (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \
++ PIN_BASED_EXT_INTR_MASK | \
++ PIN_BASED_NMI_EXITING | \
++ PIN_BASED_VIRTUAL_NMIS)
++
++#define EVMCS1_SUPPORTED_EXEC_CTRL \
++ (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \
++ CPU_BASED_HLT_EXITING | \
++ CPU_BASED_CR3_LOAD_EXITING | \
++ CPU_BASED_CR3_STORE_EXITING | \
++ CPU_BASED_UNCOND_IO_EXITING | \
++ CPU_BASED_MOV_DR_EXITING | \
++ CPU_BASED_USE_TSC_OFFSETTING | \
++ CPU_BASED_MWAIT_EXITING | \
++ CPU_BASED_MONITOR_EXITING | \
++ CPU_BASED_INVLPG_EXITING | \
++ CPU_BASED_RDPMC_EXITING | \
++ CPU_BASED_INTR_WINDOW_EXITING | \
++ CPU_BASED_CR8_LOAD_EXITING | \
++ CPU_BASED_CR8_STORE_EXITING | \
++ CPU_BASED_RDTSC_EXITING | \
++ CPU_BASED_TPR_SHADOW | \
++ CPU_BASED_USE_IO_BITMAPS | \
++ CPU_BASED_MONITOR_TRAP_FLAG | \
++ CPU_BASED_USE_MSR_BITMAPS | \
++ CPU_BASED_NMI_WINDOW_EXITING | \
++ CPU_BASED_PAUSE_EXITING | \
++ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
++
++#define EVMCS1_SUPPORTED_2NDEXEC \
++ (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
++ SECONDARY_EXEC_WBINVD_EXITING | \
++ SECONDARY_EXEC_ENABLE_VPID | \
++ SECONDARY_EXEC_ENABLE_EPT | \
++ SECONDARY_EXEC_UNRESTRICTED_GUEST | \
++ SECONDARY_EXEC_DESC | \
++ SECONDARY_EXEC_ENABLE_RDTSCP | \
++ SECONDARY_EXEC_ENABLE_INVPCID | \
++ SECONDARY_EXEC_ENABLE_XSAVES | \
++ SECONDARY_EXEC_RDSEED_EXITING | \
++ SECONDARY_EXEC_RDRAND_EXITING | \
++ SECONDARY_EXEC_TSC_SCALING | \
++ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
++ SECONDARY_EXEC_PT_USE_GPA | \
++ SECONDARY_EXEC_PT_CONCEAL_VMX | \
++ SECONDARY_EXEC_BUS_LOCK_DETECTION | \
++ SECONDARY_EXEC_NOTIFY_VM_EXITING | \
++ SECONDARY_EXEC_ENCLS_EXITING)
++
++#define EVMCS1_SUPPORTED_3RDEXEC (0ULL)
++
++#define EVMCS1_SUPPORTED_VMEXIT_CTRL \
++ (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \
++ VM_EXIT_SAVE_DEBUG_CONTROLS | \
++ VM_EXIT_ACK_INTR_ON_EXIT | \
++ VM_EXIT_HOST_ADDR_SPACE_SIZE | \
++ VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
++ VM_EXIT_SAVE_IA32_PAT | \
++ VM_EXIT_LOAD_IA32_PAT | \
++ VM_EXIT_SAVE_IA32_EFER | \
++ VM_EXIT_LOAD_IA32_EFER | \
++ VM_EXIT_CLEAR_BNDCFGS | \
++ VM_EXIT_PT_CONCEAL_PIP | \
++ VM_EXIT_CLEAR_IA32_RTIT_CTL)
++
++#define EVMCS1_SUPPORTED_VMENTRY_CTRL \
++ (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \
++ VM_ENTRY_LOAD_DEBUG_CONTROLS | \
++ VM_ENTRY_IA32E_MODE | \
++ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
++ VM_ENTRY_LOAD_IA32_PAT | \
++ VM_ENTRY_LOAD_IA32_EFER | \
++ VM_ENTRY_LOAD_BNDCFGS | \
++ VM_ENTRY_PT_CONCEAL_PIP | \
++ VM_ENTRY_LOAD_IA32_RTIT_CTL)
++
++#define EVMCS1_SUPPORTED_VMFUNC (0)
++
+ struct evmcs_field {
+ u16 offset;
+ u16 clean_field;
+@@ -65,114 +166,6 @@ static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
+ return vmcs12_read_any((void *)evmcs, field, offset);
+ }
+
+-#if IS_ENABLED(CONFIG_HYPERV)
+-
+-DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
+-
+-static __always_inline bool kvm_is_using_evmcs(void)
+-{
+- return static_branch_unlikely(&__kvm_is_using_evmcs);
+-}
+-
+-static __always_inline int get_evmcs_offset(unsigned long field,
+- u16 *clean_field)
+-{
+- int offset = evmcs_field_offset(field, clean_field);
+-
+- WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
+- return offset;
+-}
+-
+-static __always_inline void evmcs_write64(unsigned long field, u64 value)
+-{
+- u16 clean_field;
+- int offset = get_evmcs_offset(field, &clean_field);
+-
+- if (offset < 0)
+- return;
+-
+- *(u64 *)((char *)current_evmcs + offset) = value;
+-
+- current_evmcs->hv_clean_fields &= ~clean_field;
+-}
+-
+-static __always_inline void evmcs_write32(unsigned long field, u32 value)
+-{
+- u16 clean_field;
+- int offset = get_evmcs_offset(field, &clean_field);
+-
+- if (offset < 0)
+- return;
+-
+- *(u32 *)((char *)current_evmcs + offset) = value;
+- current_evmcs->hv_clean_fields &= ~clean_field;
+-}
+-
+-static __always_inline void evmcs_write16(unsigned long field, u16 value)
+-{
+- u16 clean_field;
+- int offset = get_evmcs_offset(field, &clean_field);
+-
+- if (offset < 0)
+- return;
+-
+- *(u16 *)((char *)current_evmcs + offset) = value;
+- current_evmcs->hv_clean_fields &= ~clean_field;
+-}
+-
+-static __always_inline u64 evmcs_read64(unsigned long field)
+-{
+- int offset = get_evmcs_offset(field, NULL);
+-
+- if (offset < 0)
+- return 0;
+-
+- return *(u64 *)((char *)current_evmcs + offset);
+-}
+-
+-static __always_inline u32 evmcs_read32(unsigned long field)
+-{
+- int offset = get_evmcs_offset(field, NULL);
+-
+- if (offset < 0)
+- return 0;
+-
+- return *(u32 *)((char *)current_evmcs + offset);
+-}
+-
+-static __always_inline u16 evmcs_read16(unsigned long field)
+-{
+- int offset = get_evmcs_offset(field, NULL);
+-
+- if (offset < 0)
+- return 0;
+-
+- return *(u16 *)((char *)current_evmcs + offset);
+-}
+-
+-static inline void evmcs_load(u64 phys_addr)
+-{
+- struct hv_vp_assist_page *vp_ap =
+- hv_get_vp_assist_page(smp_processor_id());
+-
+- if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
+- vp_ap->nested_control.features.directhypercall = 1;
+- vp_ap->current_nested_vmcs = phys_addr;
+- vp_ap->enlighten_vmentry = 1;
+-}
+-
+-void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
+-#else /* !IS_ENABLED(CONFIG_HYPERV) */
+-static __always_inline bool kvm_is_using_evmcs(void) { return false; }
+-static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
+-static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
+-static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
+-static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
+-static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
+-static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
+-static inline void evmcs_load(u64 phys_addr) {}
+-#endif /* IS_ENABLED(CONFIG_HYPERV) */
+-
+ #define EVMPTR_INVALID (-1ULL)
+ #define EVMPTR_MAP_PENDING (-2ULL)
+
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index c5ec0ef51ff78f..0ad66b9207e85f 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -12,6 +12,7 @@
+ #include "mmu.h"
+ #include "nested.h"
+ #include "pmu.h"
++#include "posted_intr.h"
+ #include "sgx.h"
+ #include "trace.h"
+ #include "vmx.h"
+@@ -3830,8 +3831,8 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+ if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+ return 0;
+
+- max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
+- if (max_irr != 256) {
++ max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
++ if (max_irr > 0) {
+ vapic_page = vmx->nested.virtual_apic_map.hva;
+ if (!vapic_page)
+ goto mmio_needed;
+@@ -3962,10 +3963,42 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
+ to_vmx(vcpu)->nested.preemption_timer_expired;
+ }
+
+-static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
++static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
+ {
+- return nested_vmx_preemption_timer_pending(vcpu) ||
+- to_vmx(vcpu)->nested.mtf_pending;
++ struct vcpu_vmx *vmx = to_vmx(vcpu);
++ void *vapic = vmx->nested.virtual_apic_map.hva;
++ int max_irr, vppr;
++
++ if (nested_vmx_preemption_timer_pending(vcpu) ||
++ vmx->nested.mtf_pending)
++ return true;
++
++ /*
++ * Virtual Interrupt Delivery doesn't require manual injection. Either
++ * the interrupt is already in GUEST_RVI and will be recognized by CPU
++ * at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move
++ * the interrupt from the PIR to RVI prior to entering the guest.
++ */
++ if (for_injection)
++ return false;
++
++ if (!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
++ __vmx_interrupt_blocked(vcpu))
++ return false;
++
++ if (!vapic)
++ return false;
++
++ vppr = *((u32 *)(vapic + APIC_PROCPRI));
++
++ if (vmx->nested.pi_pending && vmx->nested.pi_desc &&
++ pi_test_on(vmx->nested.pi_desc)) {
++ max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
++ if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0))
++ return true;
++ }
++
++ return false;
+ }
+
+ /*
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 820d3e1f6b4f82..48a2f77f62ef35 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -71,7 +71,7 @@ static int fixed_pmc_events[] = {
+ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+ {
+ struct kvm_pmc *pmc;
+- u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
++ u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
+ int i;
+
+ pmu->fixed_ctr_ctrl = data;
+@@ -493,19 +493,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
+ u64 counter_mask;
+ int i;
+
+- pmu->nr_arch_gp_counters = 0;
+- pmu->nr_arch_fixed_counters = 0;
+- pmu->counter_bitmask[KVM_PMC_GP] = 0;
+- pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+- pmu->version = 0;
+- pmu->reserved_bits = 0xffffffff00200000ull;
+- pmu->raw_event_mask = X86_RAW_EVENT_MASK;
+- pmu->global_ctrl_mask = ~0ull;
+- pmu->global_status_mask = ~0ull;
+- pmu->fixed_ctr_ctrl_mask = ~0ull;
+- pmu->pebs_enable_mask = ~0ull;
+- pmu->pebs_data_cfg_mask = ~0ull;
+-
+ memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
+
+ /*
+@@ -517,8 +504,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
+ return;
+
+ entry = kvm_find_cpuid_entry(vcpu, 0xa);
+- if (!entry || !vcpu->kvm->arch.enable_pmu)
++ if (!entry)
+ return;
++
+ eax.full = entry->eax;
+ edx.full = entry->edx;
+
+@@ -632,26 +620,6 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
+
+ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
+ {
+- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+- struct kvm_pmc *pmc = NULL;
+- int i;
+-
+- for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
+- pmc = &pmu->gp_counters[i];
+-
+- pmc_stop_counter(pmc);
+- pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
+- }
+-
+- for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
+- pmc = &pmu->fixed_counters[i];
+-
+- pmc_stop_counter(pmc);
+- pmc->counter = pmc->prev_counter = 0;
+- }
+-
+- pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
+-
+ intel_pmu_release_guest_lbr_event(vcpu);
+ }
+
+diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
+index 26992076552ef1..1715d2ab07be5d 100644
+--- a/arch/x86/kvm/vmx/posted_intr.h
++++ b/arch/x86/kvm/vmx/posted_intr.h
+@@ -2,97 +2,8 @@
+ #ifndef __KVM_X86_VMX_POSTED_INTR_H
+ #define __KVM_X86_VMX_POSTED_INTR_H
+
+-#define POSTED_INTR_ON 0
+-#define POSTED_INTR_SN 1
+-
+-#define PID_TABLE_ENTRY_VALID 1
+-
+-/* Posted-Interrupt Descriptor */
+-struct pi_desc {
+- u32 pir[8]; /* Posted interrupt requested */
+- union {
+- struct {
+- /* bit 256 - Outstanding Notification */
+- u16 on : 1,
+- /* bit 257 - Suppress Notification */
+- sn : 1,
+- /* bit 271:258 - Reserved */
+- rsvd_1 : 14;
+- /* bit 279:272 - Notification Vector */
+- u8 nv;
+- /* bit 287:280 - Reserved */
+- u8 rsvd_2;
+- /* bit 319:288 - Notification Destination */
+- u32 ndst;
+- };
+- u64 control;
+- };
+- u32 rsvd[6];
+-} __aligned(64);
+-
+-static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
+-{
+- return test_and_set_bit(POSTED_INTR_ON,
+- (unsigned long *)&pi_desc->control);
+-}
+-
+-static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
+-{
+- return test_and_clear_bit(POSTED_INTR_ON,
+- (unsigned long *)&pi_desc->control);
+-}
+-
+-static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc)
+-{
+- return test_and_clear_bit(POSTED_INTR_SN,
+- (unsigned long *)&pi_desc->control);
+-}
+-
+-static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
+-{
+- return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
+-}
+-
+-static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
+-{
+- return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
+-}
+-
+-static inline void pi_set_sn(struct pi_desc *pi_desc)
+-{
+- set_bit(POSTED_INTR_SN,
+- (unsigned long *)&pi_desc->control);
+-}
+-
+-static inline void pi_set_on(struct pi_desc *pi_desc)
+-{
+- set_bit(POSTED_INTR_ON,
+- (unsigned long *)&pi_desc->control);
+-}
+-
+-static inline void pi_clear_on(struct pi_desc *pi_desc)
+-{
+- clear_bit(POSTED_INTR_ON,
+- (unsigned long *)&pi_desc->control);
+-}
+-
+-static inline void pi_clear_sn(struct pi_desc *pi_desc)
+-{
+- clear_bit(POSTED_INTR_SN,
+- (unsigned long *)&pi_desc->control);
+-}
+-
+-static inline bool pi_test_on(struct pi_desc *pi_desc)
+-{
+- return test_bit(POSTED_INTR_ON,
+- (unsigned long *)&pi_desc->control);
+-}
+-
+-static inline bool pi_test_sn(struct pi_desc *pi_desc)
+-{
+- return test_bit(POSTED_INTR_SN,
+- (unsigned long *)&pi_desc->control);
+-}
++#include <linux/find.h>
++#include <asm/posted_intr.h>
+
+ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
+ void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu);
+@@ -103,4 +14,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+ void vmx_pi_start_assignment(struct kvm *kvm);
+
++static inline int pi_find_highest_vector(struct pi_desc *pi_desc)
++{
++ int vec;
++
++ vec = find_last_bit((unsigned long *)pi_desc->pir, 256);
++ return vec < 256 ? vec : -1;
++}
++
+ #endif /* __KVM_X86_VMX_POSTED_INTR_H */
+diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h
+index edc3f16cc1896f..6a9bfdfbb6e59b 100644
+--- a/arch/x86/kvm/vmx/run_flags.h
++++ b/arch/x86/kvm/vmx/run_flags.h
+@@ -2,7 +2,10 @@
+ #ifndef __KVM_X86_VMX_RUN_FLAGS_H
+ #define __KVM_X86_VMX_RUN_FLAGS_H
+
+-#define VMX_RUN_VMRESUME (1 << 0)
+-#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
++#define VMX_RUN_VMRESUME_SHIFT 0
++#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
++
++#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
++#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
+
+ #endif /* __KVM_X86_VMX_RUN_FLAGS_H */
+diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
+index be275a0410a899..9522d46567f81b 100644
+--- a/arch/x86/kvm/vmx/vmenter.S
++++ b/arch/x86/kvm/vmx/vmenter.S
+@@ -139,7 +139,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
+ mov (%_ASM_SP), %_ASM_AX
+
+ /* Check if vmlaunch or vmresume is needed */
+- test $VMX_RUN_VMRESUME, %ebx
++ bt $VMX_RUN_VMRESUME_SHIFT, %ebx
+
+ /* Load guest registers. Don't clobber flags. */
+ mov VCPU_RCX(%_ASM_AX), %_ASM_CX
+@@ -161,8 +161,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
+ /* Load guest RAX. This kills the @regs pointer! */
+ mov VCPU_RAX(%_ASM_AX), %_ASM_AX
+
+- /* Check EFLAGS.ZF from 'test VMX_RUN_VMRESUME' above */
+- jz .Lvmlaunch
++ /* Clobbers EFLAGS.ZF */
++ CLEAR_CPU_BUFFERS
++
++ /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
++ jnc .Lvmlaunch
+
+ /*
+ * After a successful VMRESUME/VMLAUNCH, control flow "magically"
+@@ -272,6 +275,8 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
+
+ call vmx_spec_ctrl_restore_host
+
++ CLEAR_BRANCH_HISTORY_VMEXIT
++
+ /* Put return value in AX */
+ mov %_ASM_BX, %_ASM_AX
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 72e3943f36935c..2e0106d9d371cf 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -66,6 +66,8 @@
+ #include "vmx.h"
+ #include "x86.h"
+ #include "smm.h"
++#include "vmx_onhyperv.h"
++#include "posted_intr.h"
+
+ MODULE_AUTHOR("Qumranet");
+ MODULE_LICENSE("GPL");
+@@ -387,7 +389,16 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
+
+ static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
+ {
+- vmx->disable_fb_clear = (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
++ /*
++ * Disable VERW's behavior of clearing CPU buffers for the guest if the
++ * CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
++ * the mitigation. Disabling the clearing behavior provides a
++ * performance boost for guests that aren't aware that manually clearing
++ * CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
++ * and VM-Exit.
++ */
++ vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
++ (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
+ !boot_cpu_has_bug(X86_BUG_MDS) &&
+ !boot_cpu_has_bug(X86_BUG_TAA);
+
+@@ -745,7 +756,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
+ */
+ static int kvm_cpu_vmxoff(void)
+ {
+- asm_volatile_goto("1: vmxoff\n\t"
++ asm goto("1: vmxoff\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ ::: "cc", "memory" : fault);
+
+@@ -2789,7 +2800,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
+
+ cr4_set_bits(X86_CR4_VMXE);
+
+- asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
++ asm goto("1: vmxon %[vmxon_pointer]\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ : : [vmxon_pointer] "m"(vmxon_pointer)
+ : : fault);
+@@ -5039,14 +5050,19 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+ return !vmx_nmi_blocked(vcpu);
+ }
+
++bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
++{
++ return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
++ (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
++ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
++}
++
+ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
+ {
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ return false;
+
+- return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
+- (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+- (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
++ return __vmx_interrupt_blocked(vcpu);
+ }
+
+ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+@@ -6912,7 +6928,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+ }
+
+-static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
++static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+@@ -7226,11 +7242,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+
+ guest_state_enter_irqoff();
+
+- /* L1D Flush includes CPU buffer clear to mitigate MDS */
++ /*
++ * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
++ * mitigation for MDS is done late in VMentry and is still
++ * executed in spite of L1D Flush. This is because an extra VERW
++ * should not matter much after the big hammer L1D Flush.
++ */
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
+ vmx_l1d_flush(vcpu);
+- else if (static_branch_unlikely(&mds_user_clear))
+- mds_clear_cpu_buffers();
+ else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+ kvm_arch_has_assigned_device(vcpu->kvm))
+ mds_clear_cpu_buffers();
+@@ -7846,8 +7865,28 @@ static u64 vmx_get_perf_capabilities(void)
+
+ if (vmx_pebs_supported()) {
+ perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
+- if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
+- perf_cap &= ~PERF_CAP_PEBS_BASELINE;
++
++ /*
++ * Disallow adaptive PEBS as it is functionally broken, can be
++ * used by the guest to read *host* LBRs, and can be used to
++ * bypass userspace event filters. To correctly and safely
++ * support adaptive PEBS, KVM needs to:
++ *
++ * 1. Account for the ADAPTIVE flag when (re)programming fixed
++ * counters.
++ *
++ * 2. Gain support from perf (or take direct control of counter
++ * programming) to support events without adaptive PEBS
++ * enabled for the hardware counter.
++ *
++ * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
++ * adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
++ *
++ * 4. Document which PMU events are effectively exposed to the
++ * guest via adaptive PEBS, and make adaptive PEBS mutually
++ * exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
++ */
++ perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+ }
+
+ return perf_cap;
+@@ -8286,7 +8325,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
+- .apicv_post_state_restore = vmx_apicv_post_state_restore,
++ .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+ .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index c2130d2c8e24bb..6be1627d888e5a 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -7,10 +7,10 @@
+ #include <asm/kvm.h>
+ #include <asm/intel_pt.h>
+ #include <asm/perf_event.h>
++#include <asm/posted_intr.h>
+
+ #include "capabilities.h"
+ #include "../kvm_cache_regs.h"
+-#include "posted_intr.h"
+ #include "vmcs.h"
+ #include "vmx_ops.h"
+ #include "../cpuid.h"
+@@ -400,6 +400,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
+ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
+ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
+ bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
++bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
+ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
+ bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
+ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
+diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.c b/arch/x86/kvm/vmx/vmx_onhyperv.c
+new file mode 100644
+index 00000000000000..b9a8b91166d020
+--- /dev/null
++++ b/arch/x86/kvm/vmx/vmx_onhyperv.c
+@@ -0,0 +1,36 @@
++// SPDX-License-Identifier: GPL-2.0-only
++
++#include "capabilities.h"
++#include "vmx_onhyperv.h"
++
++DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
++
++/*
++ * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
++ * is: in case a feature has corresponding fields in eVMCS described and it was
++ * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a
++ * feature which has no corresponding eVMCS field, this likely means that KVM
++ * needs to be updated.
++ */
++#define evmcs_check_vmcs_conf(field, ctrl) \
++ do { \
++ typeof(vmcs_conf->field) unsupported; \
++ \
++ unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl; \
++ if (unsupported) { \
++ pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\
++ (u64)unsupported); \
++ vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl; \
++ } \
++ } \
++ while (0)
++
++void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
++{
++ evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
++ evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);
++ evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC);
++ evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC);
++ evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL);
++ evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL);
++}
+diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.h b/arch/x86/kvm/vmx/vmx_onhyperv.h
+new file mode 100644
+index 00000000000000..11541d272dbd8c
+--- /dev/null
++++ b/arch/x86/kvm/vmx/vmx_onhyperv.h
+@@ -0,0 +1,124 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef __ARCH_X86_KVM_VMX_ONHYPERV_H__
++#define __ARCH_X86_KVM_VMX_ONHYPERV_H__
++
++#include <asm/hyperv-tlfs.h>
++
++#include <linux/jump_label.h>
++
++#include "capabilities.h"
++#include "hyperv.h"
++#include "vmcs12.h"
++
++#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
++
++#if IS_ENABLED(CONFIG_HYPERV)
++
++DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
++
++static __always_inline bool kvm_is_using_evmcs(void)
++{
++ return static_branch_unlikely(&__kvm_is_using_evmcs);
++}
++
++static __always_inline int get_evmcs_offset(unsigned long field,
++ u16 *clean_field)
++{
++ int offset = evmcs_field_offset(field, clean_field);
++
++ WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
++ return offset;
++}
++
++static __always_inline void evmcs_write64(unsigned long field, u64 value)
++{
++ u16 clean_field;
++ int offset = get_evmcs_offset(field, &clean_field);
++
++ if (offset < 0)
++ return;
++
++ *(u64 *)((char *)current_evmcs + offset) = value;
++
++ current_evmcs->hv_clean_fields &= ~clean_field;
++}
++
++static __always_inline void evmcs_write32(unsigned long field, u32 value)
++{
++ u16 clean_field;
++ int offset = get_evmcs_offset(field, &clean_field);
++
++ if (offset < 0)
++ return;
++
++ *(u32 *)((char *)current_evmcs + offset) = value;
++ current_evmcs->hv_clean_fields &= ~clean_field;
++}
++
++static __always_inline void evmcs_write16(unsigned long field, u16 value)
++{
++ u16 clean_field;
++ int offset = get_evmcs_offset(field, &clean_field);
++
++ if (offset < 0)
++ return;
++
++ *(u16 *)((char *)current_evmcs + offset) = value;
++ current_evmcs->hv_clean_fields &= ~clean_field;
++}
++
++static __always_inline u64 evmcs_read64(unsigned long field)
++{
++ int offset = get_evmcs_offset(field, NULL);
++
++ if (offset < 0)
++ return 0;
++
++ return *(u64 *)((char *)current_evmcs + offset);
++}
++
++static __always_inline u32 evmcs_read32(unsigned long field)
++{
++ int offset = get_evmcs_offset(field, NULL);
++
++ if (offset < 0)
++ return 0;
++
++ return *(u32 *)((char *)current_evmcs + offset);
++}
++
++static __always_inline u16 evmcs_read16(unsigned long field)
++{
++ int offset = get_evmcs_offset(field, NULL);
++
++ if (offset < 0)
++ return 0;
++
++ return *(u16 *)((char *)current_evmcs + offset);
++}
++
++static inline void evmcs_load(u64 phys_addr)
++{
++ struct hv_vp_assist_page *vp_ap =
++ hv_get_vp_assist_page(smp_processor_id());
++
++ if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
++ vp_ap->nested_control.features.directhypercall = 1;
++ vp_ap->current_nested_vmcs = phys_addr;
++ vp_ap->enlighten_vmentry = 1;
++}
++
++void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
++#else /* !IS_ENABLED(CONFIG_HYPERV) */
++static __always_inline bool kvm_is_using_evmcs(void) { return false; }
++static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
++static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
++static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
++static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
++static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
++static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
++static inline void evmcs_load(u64 phys_addr) {}
++#endif /* IS_ENABLED(CONFIG_HYPERV) */
++
++#endif /* __ARCH_X86_KVM_VMX_ONHYPERV_H__ */
+diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
+index 33af7b4c6eb4a6..8060e5fc6dbd83 100644
+--- a/arch/x86/kvm/vmx/vmx_ops.h
++++ b/arch/x86/kvm/vmx/vmx_ops.h
+@@ -6,7 +6,7 @@
+
+ #include <asm/vmx.h>
+
+-#include "hyperv.h"
++#include "vmx_onhyperv.h"
+ #include "vmcs.h"
+ #include "../x86.h"
+
+@@ -94,7 +94,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
+
+ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+- asm_volatile_goto("1: vmread %[field], %[output]\n\t"
++ asm_goto_output("1: vmread %[field], %[output]\n\t"
+ "jna %l[do_fail]\n\t"
+
+ _ASM_EXTABLE(1b, %l[do_exception])
+@@ -188,7 +188,7 @@ static __always_inline unsigned long vmcs_readl(unsigned long field)
+
+ #define vmx_asm1(insn, op1, error_args...) \
+ do { \
+- asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
++ asm goto("1: " __stringify(insn) " %0\n\t" \
+ ".byte 0x2e\n\t" /* branch not taken hint */ \
+ "jna %l[error]\n\t" \
+ _ASM_EXTABLE(1b, %l[fault]) \
+@@ -205,7 +205,7 @@ fault: \
+
+ #define vmx_asm2(insn, op1, op2, error_args...) \
+ do { \
+- asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
++ asm goto("1: " __stringify(insn) " %1, %0\n\t" \
+ ".byte 0x2e\n\t" /* branch not taken hint */ \
+ "jna %l[error]\n\t" \
+ _ASM_EXTABLE(1b, %l[fault]) \
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 41cce5031126a0..50cc822e129007 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1620,7 +1620,8 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
+ ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
+ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
+ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
+- ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO)
++ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
++ ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
+
+ static u64 kvm_get_arch_capabilities(void)
+ {
+@@ -1652,6 +1653,8 @@ static u64 kvm_get_arch_capabilities(void)
+ data |= ARCH_CAP_SSB_NO;
+ if (!boot_cpu_has_bug(X86_BUG_MDS))
+ data |= ARCH_CAP_MDS_NO;
++ if (!boot_cpu_has_bug(X86_BUG_RFDS))
++ data |= ARCH_CAP_RFDS_NO;
+
+ if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ /*
+@@ -3314,7 +3317,7 @@ static bool is_mci_status_msr(u32 msr)
+ static bool can_set_mci_status(struct kvm_vcpu *vcpu)
+ {
+ /* McStatusWrEn enabled? */
+- if (guest_cpuid_is_amd_or_hygon(vcpu))
++ if (guest_cpuid_is_amd_compatible(vcpu))
+ return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
+
+ return false;
+@@ -3641,6 +3644,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ case MSR_AMD64_PATCH_LOADER:
+ case MSR_AMD64_BU_CFG2:
+ case MSR_AMD64_DC_CFG:
++ case MSR_AMD64_TW_CFG:
+ case MSR_F15H_EX_CFG:
+ break;
+
+@@ -4065,6 +4069,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ case MSR_AMD64_BU_CFG2:
+ case MSR_IA32_PERF_CTL:
+ case MSR_AMD64_DC_CFG:
++ case MSR_AMD64_TW_CFG:
+ case MSR_F15H_EX_CFG:
+ /*
+ * Intel Sandy Bridge CPUs must support the RAPL (running average power
+@@ -5298,7 +5303,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
+ if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
+ vcpu->arch.nmi_pending = 0;
+ atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
+- kvm_make_request(KVM_REQ_NMI, vcpu);
++ if (events->nmi.pending)
++ kvm_make_request(KVM_REQ_NMI, vcpu);
+ }
+ static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
+
+@@ -5823,7 +5829,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
+ break;
+
++ kvm_vcpu_srcu_read_lock(vcpu);
+ r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
++ kvm_vcpu_srcu_read_unlock(vcpu);
+ break;
+ }
+ case KVM_GET_DEBUGREGS: {
+@@ -7834,6 +7842,16 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
+
+ if (r < 0)
+ return X86EMUL_UNHANDLEABLE;
++
++ /*
++ * Mark the page dirty _before_ checking whether or not the CMPXCHG was
++ * successful, as the old value is written back on failure. Note, for
++ * live migration, this is unnecessarily conservative as CMPXCHG writes
++ * back the original value and the access is atomic, but KVM's ABI is
++ * that all writes are dirty logged, regardless of the value written.
++ */
++ kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa));
++
+ if (r)
+ return X86EMUL_CMPXCHG_FAILED;
+
+@@ -10238,7 +10256,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
+
+ if (is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->has_events &&
+- kvm_x86_ops.nested_ops->has_events(vcpu))
++ kvm_x86_ops.nested_ops->has_events(vcpu, true))
+ *req_immediate_exit = true;
+
+ /*
+@@ -10440,13 +10458,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
+
+ bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
+
++ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
++
+ if (irqchip_split(vcpu->kvm))
+ kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
+- else {
+- static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+- if (ioapic_in_kernel(vcpu->kvm))
+- kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+- }
++ else if (ioapic_in_kernel(vcpu->kvm))
++ kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+
+ if (is_guest_mode(vcpu))
+ vcpu->arch.load_eoi_exitmap_pending = true;
+@@ -12867,7 +12884,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+
+ if (is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->has_events &&
+- kvm_x86_ops.nested_ops->has_events(vcpu))
++ kvm_x86_ops.nested_ops->has_events(vcpu, false))
+ return true;
+
+ if (kvm_xen_has_pending_events(vcpu))
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index 40edf4d1974c53..0ea6016ad132a2 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -471,7 +471,7 @@ void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
+ kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
+ }
+
+-static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
++void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
+ {
+ struct kvm_lapic_irq irq = { };
+ int r;
+diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
+index f8f1fe22d09069..f5841d9000aebd 100644
+--- a/arch/x86/kvm/xen.h
++++ b/arch/x86/kvm/xen.h
+@@ -18,6 +18,7 @@ extern struct static_key_false_deferred kvm_xen_enabled;
+
+ int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
+ void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
++void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
+ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
+ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
+ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
+@@ -36,6 +37,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
+ const struct kvm_irq_routing_entry *ue);
+ void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu);
+
++static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
++{
++ /*
++ * The local APIC is being enabled. If the per-vCPU upcall vector is
++ * set and the vCPU's evtchn_upcall_pending flag is set, inject the
++ * interrupt.
++ */
++ if (static_branch_unlikely(&kvm_xen_enabled.key) &&
++ vcpu->arch.xen.vcpu_info_cache.active &&
++ vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
++ kvm_xen_inject_vcpu_vector(vcpu);
++}
++
+ static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
+ {
+ return static_branch_unlikely(&kvm_xen_enabled.key) &&
+@@ -101,6 +115,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
+ {
+ }
+
++static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
++{
++}
++
+ static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
+ {
+ return false;
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index ea3a28e7b613cc..f0dae4fb6d0716 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -14,19 +14,6 @@ ifdef CONFIG_KCSAN
+ CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
+ endif
+
+-# Early boot use of cmdline; don't instrument it
+-ifdef CONFIG_AMD_MEM_ENCRYPT
+-KCOV_INSTRUMENT_cmdline.o := n
+-KASAN_SANITIZE_cmdline.o := n
+-KCSAN_SANITIZE_cmdline.o := n
+-
+-ifdef CONFIG_FUNCTION_TRACER
+-CFLAGS_REMOVE_cmdline.o = -pg
+-endif
+-
+-CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
+-endif
+-
+ inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
+ inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
+ quiet_cmd_inat_tables = GEN $@
+diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
+index 80efd45a776173..6e8b7e600def57 100644
+--- a/arch/x86/lib/copy_mc.c
++++ b/arch/x86/lib/copy_mc.c
+@@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
+ }
+ EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
+
+-unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
++unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
+ {
+ unsigned long ret;
+
+ if (copy_mc_fragile_enabled) {
+ __uaccess_begin();
+- ret = copy_mc_fragile(dst, src, len);
++ ret = copy_mc_fragile((__force void *)dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+ if (static_cpu_has(X86_FEATURE_ERMS)) {
+ __uaccess_begin();
+- ret = copy_mc_enhanced_fast_string(dst, src, len);
++ ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
+ __uaccess_end();
+ return ret;
+ }
+
+- return copy_user_generic(dst, src, len);
++ return copy_user_generic((__force void *)dst, src, len);
+ }
+diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
+index cea25ca8b8cf69..c9dae65ac01b5a 100644
+--- a/arch/x86/lib/csum-partial_64.c
++++ b/arch/x86/lib/csum-partial_64.c
+@@ -11,26 +11,23 @@
+ #include <asm/checksum.h>
+ #include <asm/word-at-a-time.h>
+
+-static inline unsigned short from32to16(unsigned a)
++static inline __wsum csum_finalize_sum(u64 temp64)
+ {
+- unsigned short b = a >> 16;
+- asm("addw %w2,%w0\n\t"
+- "adcw $0,%w0\n"
+- : "=r" (b)
+- : "0" (b), "r" (a));
+- return b;
++ return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32);
+ }
+
+-static inline __wsum csum_tail(u64 temp64, int odd)
++static inline unsigned long update_csum_40b(unsigned long sum, const unsigned long m[5])
+ {
+- unsigned int result;
+-
+- result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
+- if (unlikely(odd)) {
+- result = from32to16(result);
+- result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+- }
+- return (__force __wsum)result;
++ asm("addq %1,%0\n\t"
++ "adcq %2,%0\n\t"
++ "adcq %3,%0\n\t"
++ "adcq %4,%0\n\t"
++ "adcq %5,%0\n\t"
++ "adcq $0,%0"
++ :"+r" (sum)
++ :"m" (m[0]), "m" (m[1]), "m" (m[2]),
++ "m" (m[3]), "m" (m[4]));
++ return sum;
+ }
+
+ /*
+@@ -47,64 +44,32 @@ static inline __wsum csum_tail(u64 temp64, int odd)
+ __wsum csum_partial(const void *buff, int len, __wsum sum)
+ {
+ u64 temp64 = (__force u64)sum;
+- unsigned odd;
+
+- odd = 1 & (unsigned long) buff;
+- if (unlikely(odd)) {
+- if (unlikely(len == 0))
+- return sum;
+- temp64 = ror32((__force u32)sum, 8);
+- temp64 += (*(unsigned char *)buff << 8);
+- len--;
+- buff++;
++ /* Do two 40-byte chunks in parallel to get better ILP */
++ if (likely(len >= 80)) {
++ u64 temp64_2 = 0;
++ do {
++ temp64 = update_csum_40b(temp64, buff);
++ temp64_2 = update_csum_40b(temp64_2, buff + 40);
++ buff += 80;
++ len -= 80;
++ } while (len >= 80);
++
++ asm("addq %1,%0\n\t"
++ "adcq $0,%0"
++ :"+r" (temp64): "r" (temp64_2));
+ }
+
+ /*
+- * len == 40 is the hot case due to IPv6 headers, but annotating it likely()
+- * has noticeable negative affect on codegen for all other cases with
+- * minimal performance benefit here.
++ * len == 40 is the hot case due to IPv6 headers, so return
++ * early for that exact case without checking the tail bytes.
+ */
+- if (len == 40) {
+- asm("addq 0*8(%[src]),%[res]\n\t"
+- "adcq 1*8(%[src]),%[res]\n\t"
+- "adcq 2*8(%[src]),%[res]\n\t"
+- "adcq 3*8(%[src]),%[res]\n\t"
+- "adcq 4*8(%[src]),%[res]\n\t"
+- "adcq $0,%[res]"
+- : [res] "+r"(temp64)
+- : [src] "r"(buff), "m"(*(const char(*)[40])buff));
+- return csum_tail(temp64, odd);
+- }
+- if (unlikely(len >= 64)) {
+- /*
+- * Extra accumulators for better ILP in the loop.
+- */
+- u64 tmp_accum, tmp_carries;
+-
+- asm("xorl %k[tmp_accum],%k[tmp_accum]\n\t"
+- "xorl %k[tmp_carries],%k[tmp_carries]\n\t"
+- "subl $64, %[len]\n\t"
+- "1:\n\t"
+- "addq 0*8(%[src]),%[res]\n\t"
+- "adcq 1*8(%[src]),%[res]\n\t"
+- "adcq 2*8(%[src]),%[res]\n\t"
+- "adcq 3*8(%[src]),%[res]\n\t"
+- "adcl $0,%k[tmp_carries]\n\t"
+- "addq 4*8(%[src]),%[tmp_accum]\n\t"
+- "adcq 5*8(%[src]),%[tmp_accum]\n\t"
+- "adcq 6*8(%[src]),%[tmp_accum]\n\t"
+- "adcq 7*8(%[src]),%[tmp_accum]\n\t"
+- "adcl $0,%k[tmp_carries]\n\t"
+- "addq $64, %[src]\n\t"
+- "subl $64, %[len]\n\t"
+- "jge 1b\n\t"
+- "addq %[tmp_accum],%[res]\n\t"
+- "adcq %[tmp_carries],%[res]\n\t"
+- "adcq $0,%[res]"
+- : [tmp_accum] "=&r"(tmp_accum),
+- [tmp_carries] "=&r"(tmp_carries), [res] "+r"(temp64),
+- [len] "+r"(len), [src] "+r"(buff)
+- : "m"(*(const char *)buff));
++ if (len >= 40) {
++ temp64 = update_csum_40b(temp64, buff);
++ len -= 40;
++ if (!len)
++ return csum_finalize_sum(temp64);
++ buff += 40;
+ }
+
+ if (len & 32) {
+@@ -143,7 +108,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
+ : [res] "+r"(temp64)
+ : [trail] "r"(trail));
+ }
+- return csum_tail(temp64, odd);
++ return csum_finalize_sum(temp64);
+ }
+ EXPORT_SYMBOL(csum_partial);
+
+diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
+index 9c63713477bbb7..6913fbce6544fb 100644
+--- a/arch/x86/lib/getuser.S
++++ b/arch/x86/lib/getuser.S
+@@ -44,7 +44,11 @@
+ or %rdx, %rax
+ .else
+ cmp $TASK_SIZE_MAX-\size+1, %eax
++.if \size != 8
+ jae .Lbad_get_user
++.else
++ jae .Lbad_get_user_8
++.endif
+ sbb %edx, %edx /* array_index_mask_nospec() */
+ and %edx, %eax
+ .endif
+@@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception)
+ #ifdef CONFIG_X86_32
+ SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
+ ASM_CLAC
+-bad_get_user_8:
++.Lbad_get_user_8:
+ xor %edx,%edx
+ xor %ecx,%ecx
+ mov $(-EFAULT),%_ASM_AX
+@@ -163,23 +167,23 @@ SYM_CODE_END(__get_user_8_handle_exception)
+ #endif
+
+ /* get_user */
+- _ASM_EXTABLE(1b, __get_user_handle_exception)
+- _ASM_EXTABLE(2b, __get_user_handle_exception)
+- _ASM_EXTABLE(3b, __get_user_handle_exception)
++ _ASM_EXTABLE_UA(1b, __get_user_handle_exception)
++ _ASM_EXTABLE_UA(2b, __get_user_handle_exception)
++ _ASM_EXTABLE_UA(3b, __get_user_handle_exception)
+ #ifdef CONFIG_X86_64
+- _ASM_EXTABLE(4b, __get_user_handle_exception)
++ _ASM_EXTABLE_UA(4b, __get_user_handle_exception)
+ #else
+- _ASM_EXTABLE(4b, __get_user_8_handle_exception)
+- _ASM_EXTABLE(5b, __get_user_8_handle_exception)
++ _ASM_EXTABLE_UA(4b, __get_user_8_handle_exception)
++ _ASM_EXTABLE_UA(5b, __get_user_8_handle_exception)
+ #endif
+
+ /* __get_user */
+- _ASM_EXTABLE(6b, __get_user_handle_exception)
+- _ASM_EXTABLE(7b, __get_user_handle_exception)
+- _ASM_EXTABLE(8b, __get_user_handle_exception)
++ _ASM_EXTABLE_UA(6b, __get_user_handle_exception)
++ _ASM_EXTABLE_UA(7b, __get_user_handle_exception)
++ _ASM_EXTABLE_UA(8b, __get_user_handle_exception)
+ #ifdef CONFIG_X86_64
+- _ASM_EXTABLE(9b, __get_user_handle_exception)
++ _ASM_EXTABLE_UA(9b, __get_user_handle_exception)
+ #else
+- _ASM_EXTABLE(9b, __get_user_8_handle_exception)
+- _ASM_EXTABLE(10b, __get_user_8_handle_exception)
++ _ASM_EXTABLE_UA(9b, __get_user_8_handle_exception)
++ _ASM_EXTABLE_UA(10b, __get_user_8_handle_exception)
+ #endif
+diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
+index e0411a3774d495..5eecb45d05d5da 100644
+--- a/arch/x86/lib/iomem.c
++++ b/arch/x86/lib/iomem.c
+@@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
+
+ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+ {
++ const void *orig_to = to;
++ const size_t orig_n = n;
++
+ if (unlikely(!n))
+ return;
+
+@@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
+ }
+ rep_movs(to, (const void *)from, n);
+ /* KMSAN must treat values read from devices as initialized. */
+- kmsan_unpoison_memory(to, n);
++ kmsan_unpoison_memory(orig_to, orig_n);
+ }
+
+ static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c
+index 92cd8ecc3a2c8c..40b81c338ae5b9 100644
+--- a/arch/x86/lib/misc.c
++++ b/arch/x86/lib/misc.c
+@@ -8,7 +8,7 @@
+ */
+ int num_digits(int val)
+ {
+- int m = 10;
++ long long m = 10;
+ int d = 1;
+
+ if (val < 0) {
+diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
+index 235bbda6fc8230..512dc58c938b81 100644
+--- a/arch/x86/lib/putuser.S
++++ b/arch/x86/lib/putuser.S
+@@ -134,15 +134,15 @@ SYM_CODE_START_LOCAL(__put_user_handle_exception)
+ RET
+ SYM_CODE_END(__put_user_handle_exception)
+
+- _ASM_EXTABLE(1b, __put_user_handle_exception)
+- _ASM_EXTABLE(2b, __put_user_handle_exception)
+- _ASM_EXTABLE(3b, __put_user_handle_exception)
+- _ASM_EXTABLE(4b, __put_user_handle_exception)
+- _ASM_EXTABLE(5b, __put_user_handle_exception)
+- _ASM_EXTABLE(6b, __put_user_handle_exception)
+- _ASM_EXTABLE(7b, __put_user_handle_exception)
+- _ASM_EXTABLE(9b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(1b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(2b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(3b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(4b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(5b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(6b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(7b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(9b, __put_user_handle_exception)
+ #ifdef CONFIG_X86_32
+- _ASM_EXTABLE(8b, __put_user_handle_exception)
+- _ASM_EXTABLE(10b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(8b, __put_user_handle_exception)
++ _ASM_EXTABLE_UA(10b, __put_user_handle_exception)
+ #endif
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index cd86aeb5fdd3ea..ffa51f392e17a3 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -126,12 +126,13 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
+ #include <asm/GEN-for-each-reg.h>
+ #undef GEN
+ #endif
+-/*
+- * This function name is magical and is used by -mfunction-return=thunk-extern
+- * for the compiler to generate JMPs to it.
+- */
++
+ #ifdef CONFIG_RETHUNK
+
++ .section .text..__x86.return_thunk
++
++#ifdef CONFIG_CPU_SRSO
++
+ /*
+ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
+ * special addresses:
+@@ -147,9 +148,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
+ *
+ * As a result, srso_alias_safe_ret() becomes a safe return.
+ */
+-#ifdef CONFIG_CPU_SRSO
+- .section .text..__x86.rethunk_untrain
+-
++ .pushsection .text..__x86.rethunk_untrain
+ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+@@ -158,17 +157,9 @@ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ jmp srso_alias_return_thunk
+ SYM_FUNC_END(srso_alias_untrain_ret)
+ __EXPORT_THUNK(srso_alias_untrain_ret)
++ .popsection
+
+- .section .text..__x86.rethunk_safe
+-#else
+-/* dummy definition for alternatives */
+-SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+- ANNOTATE_UNRET_SAFE
+- ret
+- int3
+-SYM_FUNC_END(srso_alias_untrain_ret)
+-#endif
+-
++ .pushsection .text..__x86.rethunk_safe
+ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ lea 8(%_ASM_SP), %_ASM_SP
+ UNWIND_HINT_FUNC
+@@ -177,14 +168,69 @@ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ int3
+ SYM_FUNC_END(srso_alias_safe_ret)
+
+- .section .text..__x86.return_thunk
+-
+-SYM_CODE_START(srso_alias_return_thunk)
++SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ call srso_alias_safe_ret
+ ud2
+ SYM_CODE_END(srso_alias_return_thunk)
++ .popsection
++
++/*
++ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
++ * above. On kernel entry, srso_untrain_ret() is executed which is a
++ *
++ * movabs $0xccccc30824648d48,%rax
++ *
++ * and when the return thunk executes the inner label srso_safe_ret()
++ * later, it is a stack manipulation and a RET which is mispredicted and
++ * thus a "safe" one to use.
++ */
++ .align 64
++ .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
++SYM_START(srso_untrain_ret, SYM_L_LOCAL, SYM_A_NONE)
++ ANNOTATE_NOENDBR
++ .byte 0x48, 0xb8
++
++/*
++ * This forces the function return instruction to speculate into a trap
++ * (UD2 in srso_return_thunk() below). This RET will then mispredict
++ * and execution will continue at the return site read from the top of
++ * the stack.
++ */
++SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
++ lea 8(%_ASM_SP), %_ASM_SP
++ ret
++ int3
++ int3
++ /* end of movabs */
++ lfence
++ call srso_safe_ret
++ ud2
++SYM_CODE_END(srso_safe_ret)
++SYM_FUNC_END(srso_untrain_ret)
++
++SYM_CODE_START(srso_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
++ call srso_safe_ret
++ ud2
++SYM_CODE_END(srso_return_thunk)
++
++#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
++#else /* !CONFIG_CPU_SRSO */
++#define JMP_SRSO_UNTRAIN_RET "ud2"
++/* Dummy for the alternative in CALL_UNTRAIN_RET. */
++SYM_CODE_START(srso_alias_untrain_ret)
++ ANNOTATE_UNRET_SAFE
++ ANNOTATE_NOENDBR
++ ret
++ int3
++SYM_FUNC_END(srso_alias_untrain_ret)
++__EXPORT_THUNK(srso_alias_untrain_ret)
++#endif /* CONFIG_CPU_SRSO */
++
++#ifdef CONFIG_CPU_UNRET_ENTRY
+
+ /*
+ * Some generic notes on the untraining sequences:
+@@ -266,65 +312,19 @@ SYM_CODE_END(retbleed_return_thunk)
+ SYM_FUNC_END(retbleed_untrain_ret)
+ __EXPORT_THUNK(retbleed_untrain_ret)
+
+-/*
+- * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+- * above. On kernel entry, srso_untrain_ret() is executed which is a
+- *
+- * movabs $0xccccc30824648d48,%rax
+- *
+- * and when the return thunk executes the inner label srso_safe_ret()
+- * later, it is a stack manipulation and a RET which is mispredicted and
+- * thus a "safe" one to use.
+- */
+- .align 64
+- .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
+-SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+- ANNOTATE_NOENDBR
+- .byte 0x48, 0xb8
+-
+-/*
+- * This forces the function return instruction to speculate into a trap
+- * (UD2 in srso_return_thunk() below). This RET will then mispredict
+- * and execution will continue at the return site read from the top of
+- * the stack.
+- */
+-SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+- lea 8(%_ASM_SP), %_ASM_SP
+- ret
+- int3
+- int3
+- /* end of movabs */
+- lfence
+- call srso_safe_ret
+- ud2
+-SYM_CODE_END(srso_safe_ret)
+-SYM_FUNC_END(srso_untrain_ret)
+-__EXPORT_THUNK(srso_untrain_ret)
++#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
++#else /* !CONFIG_CPU_UNRET_ENTRY */
++#define JMP_RETBLEED_UNTRAIN_RET "ud2"
++#endif /* CONFIG_CPU_UNRET_ENTRY */
+
+-SYM_CODE_START(srso_return_thunk)
+- UNWIND_HINT_FUNC
+- ANNOTATE_NOENDBR
+- call srso_safe_ret
+- ud2
+-SYM_CODE_END(srso_return_thunk)
++#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
+
+ SYM_FUNC_START(entry_untrain_ret)
+- ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
+- "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
+- "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
++ ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
+ SYM_FUNC_END(entry_untrain_ret)
+ __EXPORT_THUNK(entry_untrain_ret)
+
+-SYM_CODE_START(__x86_return_thunk)
+- UNWIND_HINT_FUNC
+- ANNOTATE_NOENDBR
+- ANNOTATE_UNRET_SAFE
+- ret
+- int3
+-SYM_CODE_END(__x86_return_thunk)
+-EXPORT_SYMBOL(__x86_return_thunk)
+-
+-#endif /* CONFIG_RETHUNK */
++#endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
+
+ #ifdef CONFIG_CALL_DEPTH_TRACKING
+
+@@ -359,3 +359,22 @@ SYM_FUNC_START(__x86_return_skl)
+ SYM_FUNC_END(__x86_return_skl)
+
+ #endif /* CONFIG_CALL_DEPTH_TRACKING */
++
++/*
++ * This function name is magical and is used by -mfunction-return=thunk-extern
++ * for the compiler to generate JMPs to it.
++ *
++ * This code is only used during kernel boot or module init. All
++ * 'JMP __x86_return_thunk' sites are changed to something else by
++ * apply_returns().
++ */
++SYM_CODE_START(__x86_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
++ ANNOTATE_UNRET_SAFE
++ ret
++ int3
++SYM_CODE_END(__x86_return_thunk)
++EXPORT_SYMBOL(__x86_return_thunk)
++
++#endif /* CONFIG_RETHUNK */
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
+index 5168ee0360b246..d1ccd06c531278 100644
+--- a/arch/x86/lib/x86-opcode-map.txt
++++ b/arch/x86/lib/x86-opcode-map.txt
+@@ -148,7 +148,7 @@ AVXcode:
+ 65: SEG=GS (Prefix)
+ 66: Operand-Size (Prefix)
+ 67: Address-Size (Prefix)
+-68: PUSH Iz (d64)
++68: PUSH Iz
+ 69: IMUL Gv,Ev,Iz
+ 6a: PUSH Ib (d64)
+ 6b: IMUL Gv,Ev,Ib
+@@ -698,10 +698,10 @@ AVXcode: 2
+ 4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+ 4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+ 4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+-50: vpdpbusd Vx,Hx,Wx (66),(ev)
+-51: vpdpbusds Vx,Hx,Wx (66),(ev)
+-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
++50: vpdpbusd Vx,Hx,Wx (66)
++51: vpdpbusds Vx,Hx,Wx (66)
++52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
++53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+ 54: vpopcntb/w Vx,Wx (66),(ev)
+ 55: vpopcntd/q Vx,Wx (66),(ev)
+ 58: vpbroadcastd Vx,Wx (66),(v)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index ab778eac195205..6529b3e2cff3cc 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -376,7 +376,7 @@ static void dump_pagetable(unsigned long address)
+ goto bad;
+
+ pr_cont("PUD %lx ", pud_val(*pud));
+- if (!pud_present(*pud) || pud_large(*pud))
++ if (!pud_present(*pud) || pud_leaf(*pud))
+ goto out;
+
+ pmd = pmd_offset(pud, address);
+@@ -717,39 +717,8 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
+ WARN_ON_ONCE(user_mode(regs));
+
+ /* Are we prepared to handle this kernel fault? */
+- if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
+- /*
+- * Any interrupt that takes a fault gets the fixup. This makes
+- * the below recursive fault logic only apply to a faults from
+- * task context.
+- */
+- if (in_interrupt())
+- return;
+-
+- /*
+- * Per the above we're !in_interrupt(), aka. task context.
+- *
+- * In this case we need to make sure we're not recursively
+- * faulting through the emulate_vsyscall() logic.
+- */
+- if (current->thread.sig_on_uaccess_err && signal) {
+- sanitize_error_code(address, &error_code);
+-
+- set_signal_archinfo(address, error_code);
+-
+- if (si_code == SEGV_PKUERR) {
+- force_sig_pkuerr((void __user *)address, pkey);
+- } else {
+- /* XXX: hwpoison faults will set the wrong code. */
+- force_sig_fault(signal, si_code, (void __user *)address);
+- }
+- }
+-
+- /*
+- * Barring that, we can do the fixup and be happy.
+- */
++ if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
+ return;
+- }
+
+ /*
+ * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
+@@ -798,15 +767,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
+ show_opcodes(regs, loglvl);
+ }
+
+-/*
+- * The (legacy) vsyscall page is the long page in the kernel portion
+- * of the address space that has user-accessible permissions.
+- */
+-static bool is_vsyscall_vaddr(unsigned long vaddr)
+-{
+- return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
+-}
+-
+ static void
+ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, u32 pkey, int si_code)
+@@ -1046,7 +1006,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
+ if (!pud_present(*pud))
+ return 0;
+
+- if (pud_large(*pud))
++ if (pud_leaf(*pud))
+ return spurious_kernel_fault_check(error_code, (pte_t *) pud);
+
+ pmd = pmd_offset(pud, address);
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 679893ea5e6873..6215dfa23578da 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -261,21 +261,17 @@ static void __init probe_page_size_mask(void)
+ }
+ }
+
+-#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
+- .family = 6, \
+- .model = _model, \
+- }
+ /*
+ * INVLPG may not properly flush Global entries
+ * on these CPUs when PCIDs are enabled.
+ */
+ static const struct x86_cpu_id invlpg_miss_ids[] = {
+- INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
+- INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
+- INTEL_MATCH(INTEL_FAM6_ATOM_GRACEMONT ),
+- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
+- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
+- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, 0),
+ {}
+ };
+
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index a190aae8ceaf70..aa69353da49f24 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -617,7 +617,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
+ }
+
+ if (!pud_none(*pud)) {
+- if (!pud_large(*pud)) {
++ if (!pud_leaf(*pud)) {
+ pmd = pmd_offset(pud, 0);
+ paddr_last = phys_pmd_init(pmd, paddr,
+ paddr_end,
+@@ -950,8 +950,12 @@ static void update_end_of_memory_vars(u64 start, u64 size)
+ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params)
+ {
++ unsigned long end = ((start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+ int ret;
+
++ if (WARN_ON_ONCE(end > PHYSMEM_END))
++ return -ERANGE;
++
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
+ WARN_ON_ONCE(ret);
+
+@@ -1163,7 +1167,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
+ if (!pud_present(*pud))
+ continue;
+
+- if (pud_large(*pud) &&
++ if (pud_leaf(*pud) &&
+ IS_ALIGNED(addr, PUD_SIZE) &&
+ IS_ALIGNED(next, PUD_SIZE)) {
+ spin_lock(&init_mm.page_table_lock);
+diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
+index 0302491d799d1b..fcf508c52bdc5c 100644
+--- a/arch/x86/mm/kasan_init_64.c
++++ b/arch/x86/mm/kasan_init_64.c
+@@ -115,7 +115,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
+ pud = pud_offset(p4d, addr);
+ do {
+ next = pud_addr_end(addr, end);
+- if (!pud_large(*pud))
++ if (!pud_leaf(*pud))
+ kasan_populate_pud(pud, addr, next, nid);
+ } while (pud++, addr = next, addr != end);
+ }
+diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
+index 37db264866b648..230f1dee4f0954 100644
+--- a/arch/x86/mm/kaslr.c
++++ b/arch/x86/mm/kaslr.c
+@@ -47,13 +47,24 @@ static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
+ */
+ static __initdata struct kaslr_memory_region {
+ unsigned long *base;
++ unsigned long *end;
+ unsigned long size_tb;
+ } kaslr_regions[] = {
+- { &page_offset_base, 0 },
+- { &vmalloc_base, 0 },
+- { &vmemmap_base, 0 },
++ {
++ .base = &page_offset_base,
++ .end = &physmem_end,
++ },
++ {
++ .base = &vmalloc_base,
++ },
++ {
++ .base = &vmemmap_base,
++ },
+ };
+
++/* The end of the possible address space for physical memory */
++unsigned long physmem_end __ro_after_init;
++
+ /* Get size in bytes used by the memory region */
+ static inline unsigned long get_padding(struct kaslr_memory_region *region)
+ {
+@@ -82,6 +93,8 @@ void __init kernel_randomize_memory(void)
+ BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
+ BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
+
++ /* Preset the end of the possible address space for physical memory */
++ physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1);
+ if (!kaslr_memory_enabled())
+ return;
+
+@@ -128,11 +141,18 @@ void __init kernel_randomize_memory(void)
+ vaddr += entropy;
+ *kaslr_regions[i].base = vaddr;
+
++ /* Calculate the end of the region */
++ vaddr += get_padding(&kaslr_regions[i]);
+ /*
+- * Jump the region and add a minimum padding based on
+- * randomization alignment.
++ * KASLR trims the maximum possible size of the
++ * direct-map. Update the physmem_end boundary.
++ * No rounding required as the region starts
++ * PUD aligned and size is in units of TB.
+ */
+- vaddr += get_padding(&kaslr_regions[i]);
++ if (kaslr_regions[i].end)
++ *kaslr_regions[i].end = __pa_nodebug(vaddr - 1);
++
++ /* Add a minimum padding based on randomization alignment. */
+ vaddr = round_up(vaddr + 1, PUD_SIZE);
+ remain_entropy -= entropy;
+ }
+diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
+index 5a53c2cc169cc9..42115ac079cfe6 100644
+--- a/arch/x86/mm/maccess.c
++++ b/arch/x86/mm/maccess.c
+@@ -3,18 +3,37 @@
+ #include <linux/uaccess.h>
+ #include <linux/kernel.h>
+
++#include <asm/vsyscall.h>
++
+ #ifdef CONFIG_X86_64
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+ {
+ unsigned long vaddr = (unsigned long)unsafe_src;
+
+ /*
+- * Range covering the highest possible canonical userspace address
+- * as well as non-canonical address range. For the canonical range
+- * we also need to include the userspace guard page.
++ * Do not allow userspace addresses. This disallows
++ * normal userspace and the userspace guard page:
++ */
++ if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
++ return false;
++
++ /*
++ * Reading from the vsyscall page may cause an unhandled fault in
++ * certain cases. Though it is at an address above TASK_SIZE_MAX, it is
++ * usually considered as a user space address.
+ */
+- return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+- __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
++ if (is_vsyscall_vaddr(vaddr))
++ return false;
++
++ /*
++ * Allow everything during early boot before 'x86_virt_bits'
++ * is initialized. Needed for instruction decoding in early
++ * exception handlers.
++ */
++ if (!boot_cpu_data.x86_virt_bits)
++ return true;
++
++ return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+ }
+ #else
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
+index 6faea41e99b6bb..1873a65b565578 100644
+--- a/arch/x86/mm/mem_encrypt_amd.c
++++ b/arch/x86/mm/mem_encrypt_amd.c
+@@ -34,6 +34,7 @@
+ #include <asm/msr.h>
+ #include <asm/cmdline.h>
+ #include <asm/sev.h>
++#include <asm/ia32.h>
+
+ #include "mm_internal.h"
+
+@@ -517,6 +518,34 @@ void __init sme_early_init(void)
+ */
+ if (sev_status & MSR_AMD64_SEV_ES_ENABLED)
+ x86_cpuinit.parallel_bringup = false;
++
++ /*
++ * The VMM is capable of injecting interrupt 0x80 and triggering the
++ * compatibility syscall path.
++ *
++ * By default, the 32-bit emulation is disabled in order to ensure
++ * the safety of the VM.
++ */
++ if (sev_status & MSR_AMD64_SEV_ENABLED)
++ ia32_disable();
++
++ /*
++ * Override init functions that scan the ROM region in SEV-SNP guests,
++ * as this memory is not pre-validated and would thus cause a crash.
++ */
++ if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
++ x86_init.mpparse.find_smp_config = x86_init_noop;
++ x86_init.pci.init_irq = x86_init_noop;
++ x86_init.resources.probe_roms = x86_init_noop;
++
++ /*
++ * DMI setup behavior for SEV-SNP guests depends on
++ * efi_enabled(EFI_CONFIG_TABLES), which hasn't been
++ * parsed yet. snp_dmi_setup() will run after that
++ * parsing has happened.
++ */
++ x86_init.resources.dmi_setup = snp_dmi_setup;
++ }
+ }
+
+ void __init mem_encrypt_free_decrypted_mem(void)
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index d73aeb16417fcf..cc47a818a640af 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -41,9 +41,9 @@
+ #include <linux/mem_encrypt.h>
+ #include <linux/cc_platform.h>
+
++#include <asm/init.h>
+ #include <asm/setup.h>
+ #include <asm/sections.h>
+-#include <asm/cmdline.h>
+ #include <asm/coco.h>
+ #include <asm/sev.h>
+
+@@ -95,11 +95,7 @@ struct sme_populate_pgd_data {
+ */
+ static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
+
+-static char sme_cmdline_arg[] __initdata = "mem_encrypt";
+-static char sme_cmdline_on[] __initdata = "on";
+-static char sme_cmdline_off[] __initdata = "off";
+-
+-static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
++static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ {
+ unsigned long pgd_start, pgd_end, pgd_size;
+ pgd_t *pgd_p;
+@@ -114,7 +110,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ memset(pgd_p, 0, pgd_size);
+ }
+
+-static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
++static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+ {
+ pgd_t *pgd;
+ p4d_t *p4d;
+@@ -145,13 +141,13 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+ set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
+ }
+
+- if (pud_large(*pud))
++ if (pud_leaf(*pud))
+ return NULL;
+
+ return pud;
+ }
+
+-static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
++static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+ {
+ pud_t *pud;
+ pmd_t *pmd;
+@@ -167,7 +163,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+ set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
+ }
+
+-static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
++static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+ {
+ pud_t *pud;
+ pmd_t *pmd;
+@@ -193,7 +189,7 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+ set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
+ }
+
+-static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
++static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+ {
+ while (ppd->vaddr < ppd->vaddr_end) {
+ sme_populate_pgd_large(ppd);
+@@ -203,7 +199,7 @@ static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+ }
+ }
+
+-static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
++static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+ {
+ while (ppd->vaddr < ppd->vaddr_end) {
+ sme_populate_pgd(ppd);
+@@ -213,7 +209,7 @@ static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+ }
+ }
+
+-static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
++static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
+ pmdval_t pmd_flags, pteval_t pte_flags)
+ {
+ unsigned long vaddr_end;
+@@ -237,22 +233,22 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+ __sme_map_range_pte(ppd);
+ }
+
+-static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
+ {
+ __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
+ }
+
+-static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
+ {
+ __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
+ }
+
+-static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
++static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
+ {
+ __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
+ }
+
+-static unsigned long __init sme_pgtable_calc(unsigned long len)
++static unsigned long __head sme_pgtable_calc(unsigned long len)
+ {
+ unsigned long entries = 0, tables = 0;
+
+@@ -289,7 +285,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
+ return entries + tables;
+ }
+
+-void __init sme_encrypt_kernel(struct boot_params *bp)
++void __head sme_encrypt_kernel(struct boot_params *bp)
+ {
+ unsigned long workarea_start, workarea_end, workarea_len;
+ unsigned long execute_start, execute_end, execute_len;
+@@ -305,7 +301,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ * instrumentation or checking boot_cpu_data in the cc_platform_has()
+ * function.
+ */
+- if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
++ if (!sme_get_me_mask() ||
++ RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
+ return;
+
+ /*
+@@ -323,9 +320,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ * memory from being cached.
+ */
+
+- /* Physical addresses gives us the identity mapped virtual addresses */
+- kernel_start = __pa_symbol(_text);
+- kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE);
++ kernel_start = (unsigned long)RIP_REL_REF(_text);
++ kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
+ kernel_len = kernel_end - kernel_start;
+
+ initrd_start = 0;
+@@ -342,14 +338,6 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ }
+ #endif
+
+- /*
+- * We're running identity mapped, so we must obtain the address to the
+- * SME encryption workarea using rip-relative addressing.
+- */
+- asm ("lea sme_workarea(%%rip), %0"
+- : "=r" (workarea_start)
+- : "p" (sme_workarea));
+-
+ /*
+ * Calculate required number of workarea bytes needed:
+ * executable encryption area size:
+@@ -359,7 +347,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ * pagetable structures for the encryption of the kernel
+ * pagetable structures for workarea (in case not currently mapped)
+ */
+- execute_start = workarea_start;
++ execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
+ execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
+ execute_len = execute_end - execute_start;
+
+@@ -502,14 +490,11 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
+ native_write_cr3(__native_read_cr3());
+ }
+
+-void __init sme_enable(struct boot_params *bp)
++void __head sme_enable(struct boot_params *bp)
+ {
+- const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
+ unsigned int eax, ebx, ecx, edx;
+ unsigned long feature_mask;
+- bool active_by_default;
+ unsigned long me_mask;
+- char buffer[16];
+ bool snp;
+ u64 msr;
+
+@@ -543,15 +528,18 @@ void __init sme_enable(struct boot_params *bp)
+ me_mask = 1UL << (ebx & 0x3f);
+
+ /* Check the SEV MSR whether SEV or SME is enabled */
+- sev_status = __rdmsr(MSR_AMD64_SEV);
+- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
++ RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
++ feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
+ /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
+- if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
++ if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
+ snp_abort();
+
+ /* Check if memory encryption is enabled */
+ if (feature_mask == AMD_SME_BIT) {
++ if (!(bp->hdr.xloadflags & XLF_MEM_ENCRYPTION))
++ return;
++
+ /*
+ * No SME if Hypervisor bit is set. This check is here to
+ * prevent a guest from trying to enable SME. For running as a
+@@ -571,48 +559,10 @@ void __init sme_enable(struct boot_params *bp)
+ msr = __rdmsr(MSR_AMD64_SYSCFG);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
+ return;
+- } else {
+- /* SEV state cannot be controlled by a command line option */
+- sme_me_mask = me_mask;
+- goto out;
+ }
+
+- /*
+- * Fixups have not been applied to phys_base yet and we're running
+- * identity mapped, so we must obtain the address to the SME command
+- * line argument data using rip-relative addressing.
+- */
+- asm ("lea sme_cmdline_arg(%%rip), %0"
+- : "=r" (cmdline_arg)
+- : "p" (sme_cmdline_arg));
+- asm ("lea sme_cmdline_on(%%rip), %0"
+- : "=r" (cmdline_on)
+- : "p" (sme_cmdline_on));
+- asm ("lea sme_cmdline_off(%%rip), %0"
+- : "=r" (cmdline_off)
+- : "p" (sme_cmdline_off));
+-
+- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
+- active_by_default = true;
+- else
+- active_by_default = false;
+-
+- cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
+- ((u64)bp->ext_cmd_line_ptr << 32));
+-
+- if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+- return;
+-
+- if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+- sme_me_mask = me_mask;
+- else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
+- sme_me_mask = 0;
+- else
+- sme_me_mask = active_by_default ? me_mask : 0;
+-out:
+- if (sme_me_mask) {
+- physical_mask &= ~sme_me_mask;
+- cc_vendor = CC_VENDOR_AMD;
+- cc_set_mask(sme_me_mask);
+- }
++ RIP_REL_REF(sme_me_mask) = me_mask;
++ physical_mask &= ~me_mask;
++ cc_vendor = CC_VENDOR_AMD;
++ cc_set_mask(me_mask);
+ }
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 2aadb2019b4f23..c7fa5396c0f05c 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -11,6 +11,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/sched.h>
+ #include <linux/topology.h>
++#include <linux/sort.h>
+
+ #include <asm/e820/api.h>
+ #include <asm/proto.h>
+@@ -601,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
+ if (start >= end)
+ continue;
+
+- /*
+- * Don't confuse VM with a node that doesn't have the
+- * minimum amount of memory:
+- */
+- if (end && (end - start) < NODE_MIN_SIZE)
+- continue;
+-
+ alloc_node_data(nid);
+ }
+
+@@ -961,4 +955,78 @@ int memory_add_physaddr_to_nid(u64 start)
+ return nid;
+ }
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++
+ #endif
++
++static int __init cmp_memblk(const void *a, const void *b)
++{
++ const struct numa_memblk *ma = *(const struct numa_memblk **)a;
++ const struct numa_memblk *mb = *(const struct numa_memblk **)b;
++
++ return (ma->start > mb->start) - (ma->start < mb->start);
++}
++
++static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
++
++/**
++ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
++ * @start: address to begin fill
++ * @end: address to end fill
++ *
++ * Find and extend numa_meminfo memblks to cover the physical
++ * address range @start-@end
++ *
++ * RETURNS:
++ * 0 : Success
++ * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
++ */
++
++int __init numa_fill_memblks(u64 start, u64 end)
++{
++ struct numa_memblk **blk = &numa_memblk_list[0];
++ struct numa_meminfo *mi = &numa_meminfo;
++ int count = 0;
++ u64 prev_end;
++
++ /*
++ * Create a list of pointers to numa_meminfo memblks that
++ * overlap start, end. The list is used to make in-place
++ * changes that fill out the numa_meminfo memblks.
++ */
++ for (int i = 0; i < mi->nr_blks; i++) {
++ struct numa_memblk *bi = &mi->blk[i];
++
++ if (memblock_addrs_overlap(start, end - start, bi->start,
++ bi->end - bi->start)) {
++ blk[count] = &mi->blk[i];
++ count++;
++ }
++ }
++ if (!count)
++ return NUMA_NO_MEMBLK;
++
++ /* Sort the list of pointers in memblk->start order */
++ sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
++
++ /* Make sure the first/last memblks include start/end */
++ blk[0]->start = min(blk[0]->start, start);
++ blk[count - 1]->end = max(blk[count - 1]->end, end);
++
++ /*
++ * Fill any gaps by tracking the previous memblks
++ * end address and backfilling to it if needed.
++ */
++ prev_end = blk[0]->end;
++ for (int i = 1; i < count; i++) {
++ struct numa_memblk *curr = blk[i];
++
++ if (prev_end >= curr->start) {
++ if (prev_end < curr->end)
++ prev_end = curr->end;
++ } else {
++ curr->start = prev_end;
++ prev_end = curr->end;
++ }
++ }
++ return 0;
++}
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index de10800cd4dd48..e7b9ac63bb02aa 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -950,6 +950,38 @@ static void free_pfn_range(u64 paddr, unsigned long size)
+ memtype_free(paddr, paddr + size);
+ }
+
++static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
++ pgprot_t *pgprot)
++{
++ unsigned long prot;
++
++ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
++
++ /*
++ * We need the starting PFN and cachemode used for track_pfn_remap()
++ * that covered the whole VMA. For most mappings, we can obtain that
++ * information from the page tables. For COW mappings, we might now
++ * suddenly have anon folios mapped and follow_phys() will fail.
++ *
++ * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
++ * detect the PFN. If we need the cachemode as well, we're out of luck
++ * for now and have to fail fork().
++ */
++ if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
++ if (pgprot)
++ *pgprot = __pgprot(prot);
++ return 0;
++ }
++ if (is_cow_mapping(vma->vm_flags)) {
++ if (pgprot)
++ return -EINVAL;
++ *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
++ return 0;
++ }
++ WARN_ON_ONCE(1);
++ return -EINVAL;
++}
++
+ /*
+ * track_pfn_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+@@ -960,20 +992,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
+ int track_pfn_copy(struct vm_area_struct *vma)
+ {
+ resource_size_t paddr;
+- unsigned long prot;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ pgprot_t pgprot;
+
+ if (vma->vm_flags & VM_PAT) {
+- /*
+- * reserve the whole chunk covered by vma. We need the
+- * starting address and protection from pte.
+- */
+- if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+- WARN_ON_ONCE(1);
++ if (get_pat_info(vma, &paddr, &pgprot))
+ return -EINVAL;
+- }
+- pgprot = __pgprot(prot);
++ /* reserve the whole chunk covered by vma. */
+ return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
+ }
+
+@@ -1048,7 +1073,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size, bool mm_wr_locked)
+ {
+ resource_size_t paddr;
+- unsigned long prot;
+
+ if (vma && !(vma->vm_flags & VM_PAT))
+ return;
+@@ -1056,11 +1080,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ /* free the chunk starting from pfn or the whole chunk */
+ paddr = (resource_size_t)pfn << PAGE_SHIFT;
+ if (!paddr && !size) {
+- if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+- WARN_ON_ONCE(1);
++ if (get_pat_info(vma, &paddr, NULL))
+ return;
+- }
+-
+ size = vma->vm_end - vma->vm_start;
+ }
+ free_pfn_range(paddr, size);
+diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
+index bda9f129835e95..2d850f6bae701c 100644
+--- a/arch/x86/mm/pat/set_memory.c
++++ b/arch/x86/mm/pat/set_memory.c
+@@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
+ * Validate strict W^X semantics.
+ */
+ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
+- unsigned long pfn, unsigned long npg)
++ unsigned long pfn, unsigned long npg,
++ bool nx, bool rw)
+ {
+ unsigned long end;
+
+@@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
+ if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
+ return new;
+
++ /* Non-leaf translation entries can disable writing or execution. */
++ if (!rw || nx)
++ return new;
++
+ end = start + npg * PAGE_SIZE - 1;
+ WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
+ (unsigned long long)pgprot_val(old),
+@@ -657,20 +662,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
+
+ /*
+ * Lookup the page table entry for a virtual address in a specific pgd.
+- * Return a pointer to the entry and the level of the mapping.
++ * Return a pointer to the entry, the level of the mapping, and the effective
++ * NX and RW bits of all page table levels.
+ */
+-pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+- unsigned int *level)
++pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
++ unsigned int *level, bool *nx, bool *rw)
+ {
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ *level = PG_LEVEL_NONE;
++ *nx = false;
++ *rw = true;
+
+ if (pgd_none(*pgd))
+ return NULL;
+
++ *nx |= pgd_flags(*pgd) & _PAGE_NX;
++ *rw &= pgd_flags(*pgd) & _PAGE_RW;
++
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none(*p4d))
+ return NULL;
+@@ -679,14 +690,20 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ if (p4d_large(*p4d) || !p4d_present(*p4d))
+ return (pte_t *)p4d;
+
++ *nx |= p4d_flags(*p4d) & _PAGE_NX;
++ *rw &= p4d_flags(*p4d) & _PAGE_RW;
++
+ pud = pud_offset(p4d, address);
+ if (pud_none(*pud))
+ return NULL;
+
+ *level = PG_LEVEL_1G;
+- if (pud_large(*pud) || !pud_present(*pud))
++ if (pud_leaf(*pud) || !pud_present(*pud))
+ return (pte_t *)pud;
+
++ *nx |= pud_flags(*pud) & _PAGE_NX;
++ *rw &= pud_flags(*pud) & _PAGE_RW;
++
+ pmd = pmd_offset(pud, address);
+ if (pmd_none(*pmd))
+ return NULL;
+@@ -695,11 +712,26 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ if (pmd_large(*pmd) || !pmd_present(*pmd))
+ return (pte_t *)pmd;
+
++ *nx |= pmd_flags(*pmd) & _PAGE_NX;
++ *rw &= pmd_flags(*pmd) & _PAGE_RW;
++
+ *level = PG_LEVEL_4K;
+
+ return pte_offset_kernel(pmd, address);
+ }
+
++/*
++ * Lookup the page table entry for a virtual address in a specific pgd.
++ * Return a pointer to the entry and the level of the mapping.
++ */
++pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
++ unsigned int *level)
++{
++ bool nx, rw;
++
++ return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw);
++}
++
+ /*
+ * Lookup the page table entry for a virtual address. Return a pointer
+ * to the entry and the level of the mapping.
+@@ -715,13 +747,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
+ EXPORT_SYMBOL_GPL(lookup_address);
+
+ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
+- unsigned int *level)
++ unsigned int *level, bool *nx, bool *rw)
+ {
+- if (cpa->pgd)
+- return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
+- address, level);
++ pgd_t *pgd;
++
++ if (!cpa->pgd)
++ pgd = pgd_offset_k(address);
++ else
++ pgd = cpa->pgd + pgd_index(address);
+
+- return lookup_address(address, level);
++ return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
+ }
+
+ /*
+@@ -743,7 +778,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
+ return NULL;
+
+ pud = pud_offset(p4d, address);
+- if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
++ if (pud_none(*pud) || pud_leaf(*pud) || !pud_present(*pud))
+ return NULL;
+
+ return pmd_offset(pud, address);
+@@ -845,12 +880,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ pgprot_t old_prot, new_prot, req_prot, chk_prot;
+ pte_t new_pte, *tmp;
+ enum pg_level level;
++ bool nx, rw;
+
+ /*
+ * Check for races, another CPU might have split this page
+ * up already:
+ */
+- tmp = _lookup_address_cpa(cpa, address, &level);
++ tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ if (tmp != kpte)
+ return 1;
+
+@@ -961,7 +997,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
+ psize, CPA_DETECT);
+
+- new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages);
++ new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
++ nx, rw);
+
+ /*
+ * If there is a conflict, split the large page.
+@@ -1042,6 +1079,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ pte_t *pbase = (pte_t *)page_address(base);
+ unsigned int i, level;
+ pgprot_t ref_prot;
++ bool nx, rw;
+ pte_t *tmp;
+
+ spin_lock(&pgd_lock);
+@@ -1049,7 +1087,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ * Check for races, another CPU might have split this page
+ * up for us already:
+ */
+- tmp = _lookup_address_cpa(cpa, address, &level);
++ tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ if (tmp != kpte) {
+ spin_unlock(&pgd_lock);
+ return 1;
+@@ -1274,7 +1312,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
+ */
+ while (end - start >= PUD_SIZE) {
+
+- if (pud_large(*pud))
++ if (pud_leaf(*pud))
+ pud_clear(pud);
+ else
+ unmap_pmd_range(pud, start, start + PUD_SIZE);
+@@ -1590,10 +1628,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ int do_split, err;
+ unsigned int level;
+ pte_t *kpte, old_pte;
++ bool nx, rw;
+
+ address = __cpa_addr(cpa, cpa->curpage);
+ repeat:
+- kpte = _lookup_address_cpa(cpa, address, &level);
++ kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ if (!kpte)
+ return __cpa_process_fault(cpa, address, primary);
+
+@@ -1615,7 +1654,8 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ new_prot = static_protections(new_prot, address, pfn, 1, 0,
+ CPA_PROTECT);
+
+- new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1);
++ new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
++ nx, rw);
+
+ new_prot = pgprot_clear_protnone_bits(new_prot);
+
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 9deadf517f14a9..b18f5a71e679e2 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -628,6 +628,8 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+ {
++ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
++
+ /*
+ * No flush is necessary. Once an invalid PTE is established, the PTE's
+ * access and dirty bits cannot be updated.
+@@ -774,7 +776,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+ */
+ int pud_clear_huge(pud_t *pud)
+ {
+- if (pud_large(*pud)) {
++ if (pud_leaf(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index 78414c6d1b5ed1..83a6bdf0b498ef 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -217,7 +217,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+
+ pud = pud_offset(p4d, address);
+ /* The user page tables do not use large mappings: */
+- if (pud_large(*pud)) {
++ if (pud_leaf(*pud)) {
+ WARN_ON(1);
+ return NULL;
+ }
+@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+ *
+ * Returns a pointer to a PTE on success, or NULL on failure.
+ */
+-static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
++static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
+ {
+ gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+ pmd_t *pmd;
+@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+ if (!pmd)
+ return NULL;
+
+- /* We can't do anything sensible if we hit a large mapping. */
++ /* Large PMD mapping found */
+ if (pmd_large(*pmd)) {
+- WARN_ON(1);
+- return NULL;
++ /* Clear the PMD if we hit a large mapping from the first round */
++ if (late_text) {
++ set_pmd(pmd, __pmd(0));
++ } else {
++ WARN_ON_ONCE(1);
++ return NULL;
++ }
+ }
+
+ if (pmd_none(*pmd)) {
+@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
+ if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
+ return;
+
+- target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
++ target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
+ if (WARN_ON(!target_pte))
+ return;
+
+@@ -301,7 +306,7 @@ enum pti_clone_level {
+
+ static void
+ pti_clone_pgtable(unsigned long start, unsigned long end,
+- enum pti_clone_level level)
++ enum pti_clone_level level, bool late_text)
+ {
+ unsigned long addr;
+
+@@ -374,14 +379,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+ */
+ *target_pmd = *pmd;
+
+- addr += PMD_SIZE;
++ addr = round_up(addr + 1, PMD_SIZE);
+
+ } else if (level == PTI_CLONE_PTE) {
+
+ /* Walk the page-table down to the pte level */
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte)) {
+- addr += PAGE_SIZE;
++ addr = round_up(addr + 1, PAGE_SIZE);
+ continue;
+ }
+
+@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+ return;
+
+ /* Allocate PTE in the user page-table */
+- target_pte = pti_user_pagetable_walk_pte(addr);
++ target_pte = pti_user_pagetable_walk_pte(addr, late_text);
+ if (WARN_ON(!target_pte))
+ return;
+
+@@ -401,7 +406,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+ /* Clone the PTE */
+ *target_pte = *pte;
+
+- addr += PAGE_SIZE;
++ addr = round_up(addr + 1, PAGE_SIZE);
+
+ } else {
+ BUG();
+@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void)
+ phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
+ pte_t *target_pte;
+
+- target_pte = pti_user_pagetable_walk_pte(va);
++ target_pte = pti_user_pagetable_walk_pte(va, false);
+ if (WARN_ON(!target_pte))
+ return;
+
+@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void)
+ start = CPU_ENTRY_AREA_BASE;
+ end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+
+- pti_clone_pgtable(start, end, PTI_CLONE_PMD);
++ pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
+ }
+ #endif /* CONFIG_X86_64 */
+
+@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void)
+ /*
+ * Clone the populated PMDs of the entry text and force it RO.
+ */
+-static void pti_clone_entry_text(void)
++static void pti_clone_entry_text(bool late)
+ {
+ pti_clone_pgtable((unsigned long) __entry_text_start,
+ (unsigned long) __entry_text_end,
+- PTI_CLONE_PMD);
++ PTI_LEVEL_KERNEL_IMAGE, late);
+ }
+
+ /*
+@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
+ * pti_set_kernel_image_nonglobal() did to clear the
+ * global bit.
+ */
+- pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
++ pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
+
+ /*
+ * pti_clone_pgtable() will set the global bit in any PMDs
+@@ -638,8 +643,15 @@ void __init pti_init(void)
+
+ /* Undo all global bits from the init pagetables in head_64.S: */
+ pti_set_kernel_image_nonglobal();
++
+ /* Replace some of the global bits just for shared entry text: */
+- pti_clone_entry_text();
++ /*
++ * This is very early in boot. Device and Late initcalls can do
++ * modprobe before free_initmem() and mark_readonly(). This
++ * pti_clone_entry_text() allows those user-mode-helpers to function,
++ * but notably the text is still RW.
++ */
++ pti_clone_entry_text(false);
+ pti_setup_espfix64();
+ pti_setup_vsyscall();
+ }
+@@ -656,10 +668,11 @@ void pti_finalize(void)
+ if (!boot_cpu_has(X86_FEATURE_PTI))
+ return;
+ /*
+- * We need to clone everything (again) that maps parts of the
+- * kernel image.
++ * This is after free_initmem() (all initcalls are done) and we've done
++ * mark_readonly(). Text is now NX which might've split some PMDs
++ * relative to the early clone.
+ */
+- pti_clone_entry_text();
++ pti_clone_entry_text(true);
+ pti_clone_kernel_text();
+
+ debug_checkwx_user();
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 453ea95b667dad..2fbae48f0b470a 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -497,9 +497,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ {
+ struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
+ u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+- unsigned long new_lam = mm_lam_cr3_mask(next);
+ bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
+ unsigned cpu = smp_processor_id();
++ unsigned long new_lam;
+ u64 next_tlb_gen;
+ bool need_flush;
+ u16 new_asid;
+@@ -622,9 +622,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+ }
+
+- /*
+- * Start remote flushes and then read tlb_gen.
+- */
++ /* Start receiving IPIs and then read tlb_gen (and LAM below) */
+ if (next != &init_mm)
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+@@ -636,6 +634,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ barrier();
+ }
+
++ new_lam = mm_lam_cr3_mask(next);
+ set_tlbstate_lam_mode(next);
+ if (need_flush) {
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index a5930042139d3b..a50c99e9b5c01f 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -58,6 +58,56 @@ static bool is_imm8(int value)
+ return value <= 127 && value >= -128;
+ }
+
++/*
++ * Let us limit the positive offset to be <= 123.
++ * This is to ensure eventual jit convergence For the following patterns:
++ * ...
++ * pass4, final_proglen=4391:
++ * ...
++ * 20e: 48 85 ff test rdi,rdi
++ * 211: 74 7d je 0x290
++ * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
++ * ...
++ * 289: 48 85 ff test rdi,rdi
++ * 28c: 74 17 je 0x2a5
++ * 28e: e9 7f ff ff ff jmp 0x212
++ * 293: bf 03 00 00 00 mov edi,0x3
++ * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
++ * and insn at 0x28e is 5-byte jmp insn with offset -129.
++ *
++ * pass5, final_proglen=4392:
++ * ...
++ * 20e: 48 85 ff test rdi,rdi
++ * 211: 0f 84 80 00 00 00 je 0x297
++ * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
++ * ...
++ * 28d: 48 85 ff test rdi,rdi
++ * 290: 74 1a je 0x2ac
++ * 292: eb 84 jmp 0x218
++ * 294: bf 03 00 00 00 mov edi,0x3
++ * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
++ * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
++ * At the same time, insn at 0x292 is a 2-byte insn since its offset is
++ * -124.
++ *
++ * pass6 will repeat the same code as in pass4 and this will prevent
++ * eventual convergence.
++ *
++ * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
++ * cycle in the above. In the above example je offset <= 0x7c should work.
++ *
++ * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
++ * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
++ * avoid no convergence issue.
++ *
++ * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
++ * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
++ */
++static bool is_imm8_jmp_offset(int value)
++{
++ return value <= 123 && value >= -128;
++}
++
+ static bool is_simm32(s64 value)
+ {
+ return value == (s64)(s32)value;
+@@ -344,7 +394,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
+ static int emit_rsb_call(u8 **pprog, void *func, void *ip)
+ {
+ OPTIMIZER_HIDE_VAR(func);
+- x86_call_depth_emit_accounting(pprog, func);
++ ip += x86_call_depth_emit_accounting(pprog, func);
+ return emit_patch(pprog, func, ip, 0xE8);
+ }
+
+@@ -1018,6 +1068,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
+
+ #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
+
++/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
++#define RESTORE_TAIL_CALL_CNT(stack) \
++ EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
++
+ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
+ int oldproglen, struct jit_context *ctx, bool jmp_padding)
+ {
+@@ -1454,36 +1508,41 @@ st: if (is_imm8(insn->off))
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
+ /* Conservatively check that src_reg + insn->off is a kernel address:
+- * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
+- * src_reg is used as scratch for src_reg += insn->off and restored
+- * after emit_ldx if necessary
++ * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
++ * and
++ * src_reg + insn->off < VSYSCALL_ADDR
+ */
+
+- u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
++ u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
+ u8 *end_of_jmp;
+
+- /* At end of these emitted checks, insn->off will have been added
+- * to src_reg, so no need to do relative load with insn->off offset
+- */
+- insn_off = 0;
++ /* movabsq r10, VSYSCALL_ADDR */
++ emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
++ (u32)(long)VSYSCALL_ADDR);
+
+- /* movabsq r11, limit */
+- EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
+- EMIT((u32)limit, 4);
+- EMIT(limit >> 32, 4);
++ /* mov src_reg, r11 */
++ EMIT_mov(AUX_REG, src_reg);
+
+ if (insn->off) {
+- /* add src_reg, insn->off */
+- maybe_emit_1mod(&prog, src_reg, true);
+- EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
++ /* add r11, insn->off */
++ maybe_emit_1mod(&prog, AUX_REG, true);
++ EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
+ }
+
+- /* cmp src_reg, r11 */
+- maybe_emit_mod(&prog, src_reg, AUX_REG, true);
+- EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
++ /* sub r11, r10 */
++ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
++ EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
++
++ /* movabsq r10, limit */
++ emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
++ (u32)(long)limit);
+
+- /* if unsigned '>=', goto load */
+- EMIT2(X86_JAE, 0);
++ /* cmp r10, r11 */
++ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
++ EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
++
++ /* if unsigned '>', goto load */
++ EMIT2(X86_JA, 0);
+ end_of_jmp = prog;
+
+ /* xor dst_reg, dst_reg */
+@@ -1509,18 +1568,6 @@ st: if (is_imm8(insn->off))
+ /* populate jmp_offset for JMP above */
+ start_of_ldx[-1] = prog - start_of_ldx;
+
+- if (insn->off && src_reg != dst_reg) {
+- /* sub src_reg, insn->off
+- * Restore src_reg after "add src_reg, insn->off" in prev
+- * if statement. But if src_reg == dst_reg, emit_ldx
+- * above already clobbered src_reg, so no need to restore.
+- * If add src_reg, insn->off was unnecessary, no need to
+- * restore either.
+- */
+- maybe_emit_1mod(&prog, src_reg, true);
+- EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
+- }
+-
+ if (!bpf_prog->aux->extable)
+ break;
+
+@@ -1623,9 +1670,7 @@ st: if (is_imm8(insn->off))
+
+ func = (u8 *) __bpf_call_base + imm32;
+ if (tail_call_reachable) {
+- /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+- EMIT3_off32(0x48, 0x8B, 0x85,
+- -round_up(bpf_prog->aux->stack_depth, 8) - 8);
++ RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
+ if (!imm32)
+ return -EINVAL;
+ offs = 7 + x86_call_depth_emit_accounting(&prog, func);
+@@ -1779,7 +1824,7 @@ st: if (is_imm8(insn->off))
+ return -EFAULT;
+ }
+ jmp_offset = addrs[i + insn->off] - addrs[i];
+- if (is_imm8(jmp_offset)) {
++ if (is_imm8_jmp_offset(jmp_offset)) {
+ if (jmp_padding) {
+ /* To keep the jmp_offset valid, the extra bytes are
+ * padded before the jump insn, so we subtract the
+@@ -1861,7 +1906,7 @@ st: if (is_imm8(insn->off))
+ break;
+ }
+ emit_jmp:
+- if (is_imm8(jmp_offset)) {
++ if (is_imm8_jmp_offset(jmp_offset)) {
+ if (jmp_padding) {
+ /* To avoid breaking jmp_offset, the extra bytes
+ * are padded before the actual jmp insn, so
+@@ -2400,6 +2445,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ * [ ... ]
+ * [ stack_arg2 ]
+ * RBP - arg_stack_off [ stack_arg1 ]
++ * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
+ */
+
+ /* room for return value of orig_call or fentry prog */
+@@ -2464,6 +2510,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ else
+ /* sub rsp, stack_size */
+ EMIT4(0x48, 0x83, 0xEC, stack_size);
++ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++ EMIT1(0x50); /* push rax */
+ /* mov QWORD PTR [rbp - rbx_off], rbx */
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
+
+@@ -2516,9 +2564,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ restore_regs(m, &prog, regs_off);
+ save_args(m, &prog, arg_stack_off, true);
+
++ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++ /* Before calling the original function, restore the
++ * tail_call_cnt from stack to rax.
++ */
++ RESTORE_TAIL_CALL_CNT(stack_size);
++
+ if (flags & BPF_TRAMP_F_ORIG_STACK) {
+- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
+- EMIT2(0xff, 0xd0); /* call *rax */
++ emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
++ EMIT2(0xff, 0xd3); /* call *rbx */
+ } else {
+ /* call original function */
+ if (emit_rsb_call(&prog, orig_call, prog)) {
+@@ -2569,7 +2623,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ ret = -EINVAL;
+ goto cleanup;
+ }
+- }
++ } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
++ /* Before running the original function, restore the
++ * tail_call_cnt from stack to rax.
++ */
++ RESTORE_TAIL_CALL_CNT(stack_size);
++
+ /* restore return value of orig_call or fentry prog back into RAX */
+ if (save_ret)
+ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+@@ -2913,3 +2972,49 @@ void bpf_jit_free(struct bpf_prog *prog)
+
+ bpf_prog_unlock_free(prog);
+ }
++
++void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++ struct bpf_prog *new, struct bpf_prog *old)
++{
++ u8 *old_addr, *new_addr, *old_bypass_addr;
++ int ret;
++
++ old_bypass_addr = old ? NULL : poke->bypass_addr;
++ old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
++ new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
++
++ /*
++ * On program loading or teardown, the program's kallsym entry
++ * might not be in place, so we use __bpf_arch_text_poke to skip
++ * the kallsyms check.
++ */
++ if (new) {
++ ret = __bpf_arch_text_poke(poke->tailcall_target,
++ BPF_MOD_JUMP,
++ old_addr, new_addr);
++ BUG_ON(ret < 0);
++ if (!old) {
++ ret = __bpf_arch_text_poke(poke->tailcall_bypass,
++ BPF_MOD_JUMP,
++ poke->bypass_addr,
++ NULL);
++ BUG_ON(ret < 0);
++ }
++ } else {
++ ret = __bpf_arch_text_poke(poke->tailcall_bypass,
++ BPF_MOD_JUMP,
++ old_bypass_addr,
++ poke->bypass_addr);
++ BUG_ON(ret < 0);
++ /* let other CPUs finish the execution of program
++ * so that it will not possible to expose them
++ * to invalid nop, stack unwind, nop state
++ */
++ if (!ret)
++ synchronize_rcu();
++ ret = __bpf_arch_text_poke(poke->tailcall_target,
++ BPF_MOD_JUMP,
++ old_addr, NULL);
++ BUG_ON(ret < 0);
++ }
++}
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index e3ec02e6ac9feb..98a9bb92d75c88 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -3,9 +3,11 @@
+ * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <linux/dmi.h>
+ #include <linux/pci.h>
++#include <linux/suspend.h>
+ #include <linux/vgaarb.h>
+ #include <asm/amd_nb.h>
+ #include <asm/hpet.h>
+@@ -904,3 +906,108 @@ static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
++
++/*
++ * Disable D3cold on Asus B1400 PCI-NVMe bridge
++ *
++ * On this platform with VMD off, the NVMe device cannot successfully power
++ * back on from D3cold. This appears to be an untested transition by the
++ * vendor: Windows leaves the NVMe and parent bridge in D0 during suspend.
++ *
++ * We disable D3cold on the parent bridge for simplicity, and the fact that
++ * both parent bridge and NVMe device share the same power resource.
++ *
++ * This is only needed on BIOS versions before 308; the newer versions flip
++ * StorageD3Enable from 1 to 0.
++ */
++static const struct dmi_system_id asus_nvme_broken_d3cold_table[] = {
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.304"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.305"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.306"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.307"),
++ },
++ },
++ {}
++};
++
++static void asus_disable_nvme_d3cold(struct pci_dev *pdev)
++{
++ if (dmi_check_system(asus_nvme_broken_d3cold_table) > 0)
++ pci_d3cold_disable(pdev);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x9a09, asus_disable_nvme_d3cold);
++
++#ifdef CONFIG_SUSPEND
++/*
++ * Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
++ * if the SoC is put into a hardware sleep state by the amd-pmc driver, the
++ * Root Ports don't generate wakeup interrupts for USB devices.
++ *
++ * When suspending, remove D3hot and D3cold from the PME_Support advertised
++ * by the Root Port so we don't use those states if we're expecting wakeup
++ * interrupts. Restore the advertised PME_Support when resuming.
++ */
++static void amd_rp_pme_suspend(struct pci_dev *dev)
++{
++ struct pci_dev *rp;
++
++ /*
++ * PM_SUSPEND_ON means we're doing runtime suspend, which means
++ * amd-pmc will not be involved so PMEs during D3 work as advertised.
++ *
++ * The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
++ * sleep state, but we assume amd-pmc is always present.
++ */
++ if (pm_suspend_target_state == PM_SUSPEND_ON)
++ return;
++
++ rp = pcie_find_root_port(dev);
++ if (!rp || !rp->pm_cap)
++ return;
++
++ rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
++ PCI_PM_CAP_PME_SHIFT);
++ dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
++}
++
++static void amd_rp_pme_resume(struct pci_dev *dev)
++{
++ struct pci_dev *rp;
++ u16 pmc;
++
++ rp = pcie_find_root_port(dev);
++ if (!rp || !rp->pm_cap)
++ return;
++
++ pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
++ rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
++}
++/* Rembrandt (yellow_carp) */
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume);
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume);
++/* Phoenix (pink_sardine) */
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
++#endif /* CONFIG_SUSPEND */
+diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
+index 8edd6220660446..722a33be08a186 100644
+--- a/arch/x86/pci/intel_mid_pci.c
++++ b/arch/x86/pci/intel_mid_pci.c
+@@ -233,9 +233,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
+ return 0;
+
+ ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+- if (ret < 0) {
++ if (ret) {
+ dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
+- return ret;
++ return pcibios_err_to_errno(ret);
+ }
+
+ id = x86_match_cpu(intel_mid_cpu_ids);
+diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
+index 4b3efaa82ab7c1..8447d1e2e1961e 100644
+--- a/arch/x86/pci/mmconfig-shared.c
++++ b/arch/x86/pci/mmconfig-shared.c
+@@ -525,7 +525,36 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
+ static bool __ref
+ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
+ {
+- if (!early && !acpi_disabled) {
++ struct resource *conflict;
++
++ if (early) {
++
++ /*
++ * Don't try to do this check unless configuration type 1
++ * is available. How about type 2?
++ */
++
++ /*
++ * 946f2ee5c731 ("Check that MCFG points to an e820
++ * reserved area") added this E820 check in 2006 to work
++ * around BIOS defects.
++ *
++ * Per PCI Firmware r3.3, sec 4.1.2, ECAM space must be
++ * reserved by a PNP0C02 resource, but it need not be
++ * mentioned in E820. Before the ACPI interpreter is
++ * available, we can't check for PNP0C02 resources, so
++ * there's no reliable way to verify the region in this
++ * early check. Keep it only for the old machines that
++ * motivated 946f2ee5c731.
++ */
++ if (dmi_get_bios_year() < 2016 && raw_pci_ops)
++ return is_mmconf_reserved(e820__mapped_all, cfg, dev,
++ "E820 entry");
++
++ return true;
++ }
++
++ if (!acpi_disabled) {
+ if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
+ "ACPI motherboard resource"))
+ return true;
+@@ -542,8 +571,17 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
+ &cfg->res);
+
+ if (is_mmconf_reserved(is_efi_mmio, cfg, dev,
+- "EfiMemoryMappedIO"))
++ "EfiMemoryMappedIO")) {
++ conflict = insert_resource_conflict(&iomem_resource,
++ &cfg->res);
++ if (conflict)
++ pr_warn("MMCONFIG %pR conflicts with %s %pR\n",
++ &cfg->res, conflict->name, conflict);
++ else
++ pr_info("MMCONFIG %pR reserved to work around lack of ACPI motherboard _CRS\n",
++ &cfg->res);
+ return true;
++ }
+ }
+
+ /*
+@@ -552,16 +590,7 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
+ * For MCFG information constructed from hotpluggable host bridge's
+ * _CBA method, just assume it's reserved.
+ */
+- if (pci_mmcfg_running_state)
+- return true;
+-
+- /* Don't try to do this check unless configuration
+- type 1 is available. how about type 2 ?*/
+- if (raw_pci_ops)
+- return is_mmconf_reserved(e820__mapped_all, cfg, dev,
+- "E820 entry");
+-
+- return false;
++ return pci_mmcfg_running_state;
+ }
+
+ static void __init pci_mmcfg_reject_broken(int early)
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 652cd53e77f641..0f2fe524f60dcd 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -38,10 +38,10 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
+ u8 gsi;
+
+ rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+- if (rc < 0) {
++ if (rc) {
+ dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
+ rc);
+- return rc;
++ return pcibios_err_to_errno(rc);
+ }
+ /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
+ pirq = gsi;
+diff --git a/arch/x86/platform/efi/memmap.c b/arch/x86/platform/efi/memmap.c
+index 4ef20b49eb5e72..6ed1935504b96e 100644
+--- a/arch/x86/platform/efi/memmap.c
++++ b/arch/x86/platform/efi/memmap.c
+@@ -92,12 +92,22 @@ int __init efi_memmap_alloc(unsigned int num_entries,
+ */
+ int __init efi_memmap_install(struct efi_memory_map_data *data)
+ {
++ unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
++ unsigned long flags = efi.memmap.flags;
++ u64 phys = efi.memmap.phys_map;
++ int ret;
++
+ efi_memmap_unmap();
+
+ if (efi_enabled(EFI_PARAVIRT))
+ return 0;
+
+- return __efi_memmap_init(data);
++ ret = __efi_memmap_init(data);
++ if (ret)
++ return ret;
++
++ __efi_memmap_free(phys, size, flags);
++ return 0;
+ }
+
+ /**
+diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
+index fdd49d70b43738..c81cea208c2c43 100644
+--- a/arch/x86/platform/intel/iosf_mbi.c
++++ b/arch/x86/platform/intel/iosf_mbi.c
+@@ -62,7 +62,7 @@ static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+
+ fail_read:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+- return result;
++ return pcibios_err_to_errno(result);
+ }
+
+ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+@@ -91,7 +91,7 @@ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+
+ fail_write:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+- return result;
++ return pcibios_err_to_errno(result);
+ }
+
+ int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
+index 00a92cb2c81474..a12117f3d4de72 100644
+--- a/arch/x86/platform/pvh/enlighten.c
++++ b/arch/x86/platform/pvh/enlighten.c
+@@ -74,6 +74,9 @@ static void __init init_pvh_bootparams(bool xen_guest)
+ } else
+ xen_raw_printk("Warning: Can fit ISA range into e820\n");
+
++ if (xen_guest)
++ xen_reserve_extra_memory(&pvh_bootparams);
++
+ pvh_bootparams.hdr.cmd_line_ptr =
+ pvh_start_info.cmdline_paddr;
+
+diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
+index 6f955eb1e1631a..d8af46e6775034 100644
+--- a/arch/x86/power/hibernate.c
++++ b/arch/x86/power/hibernate.c
+@@ -170,7 +170,7 @@ int relocate_restore_code(void)
+ goto out;
+ }
+ pud = pud_offset(p4d, relocated_restore_code);
+- if (pud_large(*pud)) {
++ if (pud_leaf(*pud)) {
+ set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+ goto out;
+ }
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 08aa0f25f12a0f..8d1c82795ea1da 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -42,7 +42,8 @@ KCOV_INSTRUMENT := n
+ # make up the standalone purgatory.ro
+
+ PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+-PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
++PURGATORY_CFLAGS := -mcmodel=small -ffreestanding -fno-zero-initialized-in-bss -g0
++PURGATORY_CFLAGS += -fpic -fvisibility=hidden
+ PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+ PURGATORY_CFLAGS += -fno-stack-protector
+
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index d30949e25ebd9b..a2cfd19c11eea1 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -653,6 +653,14 @@ static void print_absolute_relocs(void)
+ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+ continue;
+ }
++ /*
++ * Do not perform relocations in .notes section; any
++ * values there are meant for pre-boot consumption (e.g.
++ * startup_xen).
++ */
++ if (sec_applies->shdr.sh_type == SHT_NOTE) {
++ continue;
++ }
+ sh_symtab = sec_symtab->symtab;
+ sym_strtab = sec_symtab->link->strtab;
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+@@ -738,6 +746,15 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+ continue;
+ }
++
++ /*
++ * Do not perform relocations in .notes sections; any
++ * values there are meant for pre-boot consumption (e.g.
++ * startup_xen).
++ */
++ if (sec_applies->shdr.sh_type == SHT_NOTE)
++ continue;
++
+ sh_symtab = sec_symtab->symtab;
+ sym_strtab = sec_symtab->link->strtab;
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+diff --git a/arch/x86/um/shared/sysdep/archsetjmp.h b/arch/x86/um/shared/sysdep/archsetjmp.h
+index 166cedbab9266f..8c81d1a604a942 100644
+--- a/arch/x86/um/shared/sysdep/archsetjmp.h
++++ b/arch/x86/um/shared/sysdep/archsetjmp.h
+@@ -1,6 +1,13 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __X86_UM_SYSDEP_ARCHSETJMP_H
++#define __X86_UM_SYSDEP_ARCHSETJMP_H
++
+ #ifdef __i386__
+ #include "archsetjmp_32.h"
+ #else
+ #include "archsetjmp_64.h"
+ #endif
++
++unsigned long get_thread_reg(int reg, jmp_buf *buf);
++
++#endif /* __X86_UM_SYSDEP_ARCHSETJMP_H */
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index 9b1ec5d8c99c8d..a65fc2ae15b496 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -9,6 +9,7 @@ config XEN
+ select PARAVIRT_CLOCK
+ select X86_HV_CALLBACK_VECTOR
+ depends on X86_64 || (X86_32 && X86_PAE)
++ depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
+ depends on X86_LOCAL_APIC && X86_TSC
+ help
+ This is the Linux Xen port. Enabling this will allow the
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 0337392a312141..b88722dfc4f867 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -6,6 +6,7 @@
+ #include <linux/console.h>
+ #include <linux/cpu.h>
+ #include <linux/kexec.h>
++#include <linux/memblock.h>
+ #include <linux/slab.h>
+ #include <linux/panic_notifier.h>
+
+@@ -33,9 +34,12 @@ EXPORT_SYMBOL_GPL(hypercall_page);
+ * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
+ * but during boot it is switched to point to xen_vcpu_info.
+ * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
++ * Make sure that xen_vcpu_info doesn't cross a page boundary by making it
++ * cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
++ * which matches the cache line size of 64-bit x86 processors).
+ */
+ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+-DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
++DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
+
+ /* Linux <-> Xen vCPU id mapping */
+ DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
+@@ -160,6 +164,7 @@ void xen_vcpu_setup(int cpu)
+ int err;
+ struct vcpu_info *vcpup;
+
++ BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
+ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
+ /*
+@@ -346,3 +351,67 @@ void xen_arch_unregister_cpu(int num)
+ }
+ EXPORT_SYMBOL(xen_arch_unregister_cpu);
+ #endif
++
++/* Amount of extra memory space we add to the e820 ranges */
++struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
++
++void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns)
++{
++ unsigned int i;
++
++ /*
++ * No need to check for zero size, should happen rarely and will only
++ * write a new entry regarded to be unused due to zero size.
++ */
++ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
++ /* Add new region. */
++ if (xen_extra_mem[i].n_pfns == 0) {
++ xen_extra_mem[i].start_pfn = start_pfn;
++ xen_extra_mem[i].n_pfns = n_pfns;
++ break;
++ }
++ /* Append to existing region. */
++ if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
++ start_pfn) {
++ xen_extra_mem[i].n_pfns += n_pfns;
++ break;
++ }
++ }
++ if (i == XEN_EXTRA_MEM_MAX_REGIONS)
++ printk(KERN_WARNING "Warning: not enough extra memory regions\n");
++
++ memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
++}
++
++#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
++int __init arch_xen_unpopulated_init(struct resource **res)
++{
++ unsigned int i;
++
++ if (!xen_domain())
++ return -ENODEV;
++
++ /* Must be set strictly before calling xen_free_unpopulated_pages(). */
++ *res = &iomem_resource;
++
++ /*
++ * Initialize with pages from the extra memory regions (see
++ * arch/x86/xen/setup.c).
++ */
++ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
++ unsigned int j;
++
++ for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
++ struct page *pg =
++ pfn_to_page(xen_extra_mem[i].start_pfn + j);
++
++ xen_free_unpopulated_pages(1, &pg);
++ }
++
++ /* Zero so region is not also added to the balloon driver. */
++ xen_extra_mem[i].n_pfns = 0;
++ }
++
++ return 0;
++}
++#endif
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index bbbfdd495ebd3a..aeb33e0a3f7633 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -704,7 +704,7 @@ static struct trap_array_entry trap_array[] = {
+ TRAP_ENTRY(exc_int3, false ),
+ TRAP_ENTRY(exc_overflow, false ),
+ #ifdef CONFIG_IA32_EMULATION
+- { entry_INT80_compat, xen_entry_INT80_compat, false },
++ TRAP_ENTRY(int80_emulation, false ),
+ #endif
+ TRAP_ENTRY(exc_page_fault, false ),
+ TRAP_ENTRY(exc_divide_error, false ),
+diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
+index ada3868c02c231..c28f073c1df524 100644
+--- a/arch/x86/xen/enlighten_pvh.c
++++ b/arch/x86/xen/enlighten_pvh.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/acpi.h>
+ #include <linux/export.h>
++#include <linux/mm.h>
+
+ #include <xen/hvc-console.h>
+
+@@ -72,3 +73,70 @@ void __init mem_map_via_hcall(struct boot_params *boot_params_p)
+ }
+ boot_params_p->e820_entries = memmap.nr_entries;
+ }
++
++/*
++ * Reserve e820 UNUSABLE regions to inflate the memory balloon.
++ *
++ * On PVH dom0 the host memory map is used, RAM regions available to dom0 are
++ * located as the same place as in the native memory map, but since dom0 gets
++ * less memory than the total amount of host RAM the ranges that can't be
++ * populated are converted from RAM -> UNUSABLE. Use such regions (up to the
++ * ratio signaled in EXTRA_MEM_RATIO) in order to inflate the balloon driver at
++ * boot. Doing so prevents the guest (even if just temporary) from using holes
++ * in the memory map in order to map grants or foreign addresses, and
++ * hopefully limits the risk of a clash with a device MMIO region. Ideally the
++ * hypervisor should notify us which memory ranges are suitable for creating
++ * foreign mappings, but that's not yet implemented.
++ */
++void __init xen_reserve_extra_memory(struct boot_params *bootp)
++{
++ unsigned int i, ram_pages = 0, extra_pages;
++
++ for (i = 0; i < bootp->e820_entries; i++) {
++ struct boot_e820_entry *e = &bootp->e820_table[i];
++
++ if (e->type != E820_TYPE_RAM)
++ continue;
++ ram_pages += PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr);
++ }
++
++ /* Max amount of extra memory. */
++ extra_pages = EXTRA_MEM_RATIO * ram_pages;
++
++ /*
++ * Convert UNUSABLE ranges to RAM and reserve them for foreign mapping
++ * purposes.
++ */
++ for (i = 0; i < bootp->e820_entries && extra_pages; i++) {
++ struct boot_e820_entry *e = &bootp->e820_table[i];
++ unsigned long pages;
++
++ if (e->type != E820_TYPE_UNUSABLE)
++ continue;
++
++ pages = min(extra_pages,
++ PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr));
++
++ if (pages != (PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr))) {
++ struct boot_e820_entry *next;
++
++ if (bootp->e820_entries ==
++ ARRAY_SIZE(bootp->e820_table))
++ /* No space left to split - skip region. */
++ continue;
++
++ /* Split entry. */
++ next = e + 1;
++ memmove(next, e,
++ (bootp->e820_entries - i) * sizeof(*e));
++ bootp->e820_entries++;
++ next->addr = PAGE_ALIGN(e->addr) + PFN_PHYS(pages);
++ e->size = next->addr - e->addr;
++ next->size -= e->size;
++ }
++ e->type = E820_TYPE_RAM;
++ extra_pages -= pages;
++
++ xen_add_extra_mem(PFN_UP(e->addr), pages);
++ }
++}
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index b6830554ff6905..6b201e64d8abc8 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -1082,7 +1082,7 @@ static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
+ pmd_t *pmd_tbl;
+ int i;
+
+- if (pud_large(*pud)) {
++ if (pud_leaf(*pud)) {
+ pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
+ xen_free_ro_pages(pa, PUD_SIZE);
+ return;
+@@ -1863,7 +1863,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
+ if (!pud_present(pud))
+ return 0;
+ pa = pud_val(pud) & PTE_PFN_MASK;
+- if (pud_large(pud))
++ if (pud_leaf(pud))
+ return pa + (vaddr & ~PUD_MASK);
+
+ pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
+@@ -2019,10 +2019,7 @@ void __init xen_reserve_special_pages(void)
+
+ void __init xen_pt_check_e820(void)
+ {
+- if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
+- xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
+- BUG();
+- }
++ xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
+ }
+
+ static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 9bdc3b656b2c49..11b5c042d4faef 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -70,6 +70,7 @@
+ #include <linux/memblock.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
++#include <linux/acpi.h>
+
+ #include <asm/cache.h>
+ #include <asm/setup.h>
+@@ -80,6 +81,7 @@
+ #include <asm/xen/hypervisor.h>
+ #include <xen/balloon.h>
+ #include <xen/grant_table.h>
++#include <xen/hvc-console.h>
+
+ #include "multicalls.h"
+ #include "xen-ops.h"
+@@ -731,7 +733,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ * immediate unmapping.
+ */
+ map_ops[i].status = GNTST_general_error;
+- unmap[0].host_addr = map_ops[i].host_addr,
++ unmap[0].host_addr = map_ops[i].host_addr;
+ unmap[0].handle = map_ops[i].handle;
+ map_ops[i].handle = INVALID_GRANT_HANDLE;
+ if (map_ops[i].flags & GNTMAP_device_map)
+@@ -741,7 +743,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+
+ if (kmap_ops) {
+ kmap_ops[i].status = GNTST_general_error;
+- unmap[1].host_addr = kmap_ops[i].host_addr,
++ unmap[1].host_addr = kmap_ops[i].host_addr;
+ unmap[1].handle = kmap_ops[i].handle;
+ kmap_ops[i].handle = INVALID_GRANT_HANDLE;
+ if (kmap_ops[i].flags & GNTMAP_device_map)
+@@ -794,6 +796,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ return ret;
+ }
+
++/* Remapped non-RAM areas */
++#define NR_NONRAM_REMAP 4
++static struct nonram_remap {
++ phys_addr_t maddr;
++ phys_addr_t paddr;
++ size_t size;
++} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init;
++static unsigned int nr_nonram_remap __ro_after_init;
++
++/*
++ * Do the real remapping of non-RAM regions as specified in the
++ * xen_nonram_remap[] array.
++ * In case of an error just crash the system.
++ */
++void __init xen_do_remap_nonram(void)
++{
++ unsigned int i;
++ unsigned int remapped = 0;
++ const struct nonram_remap *remap = xen_nonram_remap;
++ unsigned long pfn, mfn, end_pfn;
++
++ for (i = 0; i < nr_nonram_remap; i++) {
++ end_pfn = PFN_UP(remap->paddr + remap->size);
++ pfn = PFN_DOWN(remap->paddr);
++ mfn = PFN_DOWN(remap->maddr);
++ while (pfn < end_pfn) {
++ if (!set_phys_to_machine(pfn, mfn))
++ panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n",
++ pfn, mfn);
++
++ pfn++;
++ mfn++;
++ remapped++;
++ }
++
++ remap++;
++ }
++
++ pr_info("Remapped %u non-RAM page(s)\n", remapped);
++}
++
++#ifdef CONFIG_ACPI
++/*
++ * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM
++ * regions into account.
++ * Any attempt to map an area crossing a remap boundary will produce a
++ * WARN() splat.
++ * phys is related to remap->maddr on input and will be rebased to remap->paddr.
++ */
++static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys,
++ acpi_size size)
++{
++ unsigned int i;
++ const struct nonram_remap *remap = xen_nonram_remap;
++
++ for (i = 0; i < nr_nonram_remap; i++) {
++ if (phys + size > remap->maddr &&
++ phys < remap->maddr + remap->size) {
++ WARN_ON(phys < remap->maddr ||
++ phys + size > remap->maddr + remap->size);
++ phys += remap->paddr - remap->maddr;
++ break;
++ }
++ }
++
++ return x86_acpi_os_ioremap(phys, size);
++}
++#endif /* CONFIG_ACPI */
++
++/*
++ * Add a new non-RAM remap entry.
++ * In case of no free entry found, just crash the system.
++ */
++void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
++ unsigned long size)
++{
++ BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK));
++
++ if (nr_nonram_remap == NR_NONRAM_REMAP) {
++ xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n");
++ BUG();
++ }
++
++#ifdef CONFIG_ACPI
++ /* Switch to the Xen acpi_os_ioremap() variant. */
++ if (nr_nonram_remap == 0)
++ acpi_os_ioremap = xen_acpi_os_ioremap;
++#endif
++
++ xen_nonram_remap[nr_nonram_remap].maddr = maddr;
++ xen_nonram_remap[nr_nonram_remap].paddr = paddr;
++ xen_nonram_remap[nr_nonram_remap].size = size;
++
++ nr_nonram_remap++;
++}
++
+ #ifdef CONFIG_XEN_DEBUG_FS
+ #include <linux/debugfs.h>
+ #include "debugfs.h"
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index b3e37961065a2c..dc822124cacb9c 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -15,12 +15,12 @@
+ #include <linux/cpuidle.h>
+ #include <linux/cpufreq.h>
+ #include <linux/memory_hotplug.h>
++#include <linux/acpi.h>
+
+ #include <asm/elf.h>
+ #include <asm/vdso.h>
+ #include <asm/e820/api.h>
+ #include <asm/setup.h>
+-#include <asm/acpi.h>
+ #include <asm/numa.h>
+ #include <asm/idtentry.h>
+ #include <asm/xen/hypervisor.h>
+@@ -38,9 +38,6 @@
+
+ #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
+
+-/* Amount of extra memory space we add to the e820 ranges */
+-struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
+-
+ /* Number of pages released from the initial allocation. */
+ unsigned long xen_released_pages;
+
+@@ -50,6 +47,9 @@ bool xen_pv_pci_possible;
+ /* E820 map used during setting up memory. */
+ static struct e820_table xen_e820_table __initdata;
+
++/* Number of initially usable memory pages. */
++static unsigned long ini_nr_pages __initdata;
++
+ /*
+ * Buffer used to remap identity mapped pages. We only need the virtual space.
+ * The physical page behind this address is remapped as needed to different
+@@ -64,18 +64,6 @@ static struct {
+ } xen_remap_buf __initdata __aligned(PAGE_SIZE);
+ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
+
+-/*
+- * The maximum amount of extra memory compared to the base size. The
+- * main scaling factor is the size of struct page. At extreme ratios
+- * of base:extra, all the base memory can be filled with page
+- * structures for the extra memory, leaving no space for anything
+- * else.
+- *
+- * 10x seems like a reasonable balance between scaling flexibility and
+- * leaving a practically usable system.
+- */
+-#define EXTRA_MEM_RATIO (10)
+-
+ static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
+
+ static void __init xen_parse_512gb(void)
+@@ -96,35 +84,6 @@ static void __init xen_parse_512gb(void)
+ xen_512gb_limit = val;
+ }
+
+-static void __init xen_add_extra_mem(unsigned long start_pfn,
+- unsigned long n_pfns)
+-{
+- int i;
+-
+- /*
+- * No need to check for zero size, should happen rarely and will only
+- * write a new entry regarded to be unused due to zero size.
+- */
+- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+- /* Add new region. */
+- if (xen_extra_mem[i].n_pfns == 0) {
+- xen_extra_mem[i].start_pfn = start_pfn;
+- xen_extra_mem[i].n_pfns = n_pfns;
+- break;
+- }
+- /* Append to existing region. */
+- if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
+- start_pfn) {
+- xen_extra_mem[i].n_pfns += n_pfns;
+- break;
+- }
+- }
+- if (i == XEN_EXTRA_MEM_MAX_REGIONS)
+- printk(KERN_WARNING "Warning: not enough extra memory regions\n");
+-
+- memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
+-}
+-
+ static void __init xen_del_extra_mem(unsigned long start_pfn,
+ unsigned long n_pfns)
+ {
+@@ -257,7 +216,7 @@ static int __init xen_free_mfn(unsigned long mfn)
+ * as a fallback if the remapping fails.
+ */
+ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
+- unsigned long end_pfn, unsigned long nr_pages)
++ unsigned long end_pfn)
+ {
+ unsigned long pfn, end;
+ int ret;
+@@ -265,7 +224,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
+ WARN_ON(start_pfn > end_pfn);
+
+ /* Release pages first. */
+- end = min(end_pfn, nr_pages);
++ end = min(end_pfn, ini_nr_pages);
+ for (pfn = start_pfn; pfn < end; pfn++) {
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+@@ -386,15 +345,14 @@ static void __init xen_do_set_identity_and_remap_chunk(
+ * to Xen and not remapped.
+ */
+ static unsigned long __init xen_set_identity_and_remap_chunk(
+- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+- unsigned long remap_pfn)
++ unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
+ {
+ unsigned long pfn;
+ unsigned long i = 0;
+ unsigned long n = end_pfn - start_pfn;
+
+ if (remap_pfn == 0)
+- remap_pfn = nr_pages;
++ remap_pfn = ini_nr_pages;
+
+ while (i < n) {
+ unsigned long cur_pfn = start_pfn + i;
+@@ -403,19 +361,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
+ unsigned long remap_range_size;
+
+ /* Do not remap pages beyond the current allocation */
+- if (cur_pfn >= nr_pages) {
++ if (cur_pfn >= ini_nr_pages) {
+ /* Identity map remaining pages */
+ set_phys_range_identity(cur_pfn, cur_pfn + size);
+ break;
+ }
+- if (cur_pfn + size > nr_pages)
+- size = nr_pages - cur_pfn;
++ if (cur_pfn + size > ini_nr_pages)
++ size = ini_nr_pages - cur_pfn;
+
+ remap_range_size = xen_find_pfn_range(&remap_pfn);
+ if (!remap_range_size) {
+ pr_warn("Unable to find available pfn range, not remapping identity pages\n");
+ xen_set_identity_and_release_chunk(cur_pfn,
+- cur_pfn + left, nr_pages);
++ cur_pfn + left);
+ break;
+ }
+ /* Adjust size to fit in current e820 RAM region */
+@@ -442,18 +400,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
+ }
+
+ static unsigned long __init xen_count_remap_pages(
+- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
++ unsigned long start_pfn, unsigned long end_pfn,
+ unsigned long remap_pages)
+ {
+- if (start_pfn >= nr_pages)
++ if (start_pfn >= ini_nr_pages)
+ return remap_pages;
+
+- return remap_pages + min(end_pfn, nr_pages) - start_pfn;
++ return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
+ }
+
+-static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
++static unsigned long __init xen_foreach_remap_area(
+ unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
+- unsigned long nr_pages, unsigned long last_val))
++ unsigned long last_val))
+ {
+ phys_addr_t start = 0;
+ unsigned long ret_val = 0;
+@@ -481,8 +439,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
+ end_pfn = PFN_UP(entry->addr);
+
+ if (start_pfn < end_pfn)
+- ret_val = func(start_pfn, end_pfn, nr_pages,
+- ret_val);
++ ret_val = func(start_pfn, end_pfn, ret_val);
+ start = end;
+ }
+ }
+@@ -539,6 +496,8 @@ void __init xen_remap_memory(void)
+ set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
+
+ pr_info("Remapped %ld page(s)\n", remapped);
++
++ xen_do_remap_nonram();
+ }
+
+ static unsigned long __init xen_get_pages_limit(void)
+@@ -612,7 +571,7 @@ static void __init xen_ignore_unusable(void)
+ }
+ }
+
+-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
++static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
+ {
+ struct e820_entry *entry;
+ unsigned mapcnt;
+@@ -669,6 +628,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size)
+ return 0;
+ }
+
++/*
++ * Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
++ * Note that the E820 map is modified accordingly, but the P2M map isn't yet.
++ * The adaption of the P2M must be deferred until page allocation is possible.
++ */
++static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
++{
++ struct e820_entry *entry;
++ unsigned int mapcnt;
++ phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
++ phys_addr_t swap_addr, swap_size, entry_end;
++
++ swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
++ swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
++ entry = xen_e820_table.entries;
++
++ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
++ entry_end = entry->addr + entry->size;
++ if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
++ entry_end - swap_size >= mem_end) {
++ /* Reduce RAM entry by needed space (whole pages). */
++ entry->size -= swap_size;
++
++ /* Add new entry at the end of E820 map. */
++ entry = xen_e820_table.entries +
++ xen_e820_table.nr_entries;
++ xen_e820_table.nr_entries++;
++
++ /* Fill new entry (keep size and page offset). */
++ entry->type = swap_entry->type;
++ entry->addr = entry_end - swap_size +
++ swap_addr - swap_entry->addr;
++ entry->size = swap_entry->size;
++
++ /* Convert old entry to RAM, align to pages. */
++ swap_entry->type = E820_TYPE_RAM;
++ swap_entry->addr = swap_addr;
++ swap_entry->size = swap_size;
++
++ /* Remember PFN<->MFN relation for P2M update. */
++ xen_add_remap_nonram(swap_addr, entry_end - swap_size,
++ swap_size);
++
++ /* Order E820 table and merge entries. */
++ e820__update_table(&xen_e820_table);
++
++ return;
++ }
++
++ entry++;
++ }
++
++ xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
++ BUG();
++}
++
++/*
++ * Look for non-RAM memory types in a specific guest physical area and move
++ * those away if possible (ACPI NVS only for now).
++ */
++static void __init xen_e820_resolve_conflicts(phys_addr_t start,
++ phys_addr_t size)
++{
++ struct e820_entry *entry;
++ unsigned int mapcnt;
++ phys_addr_t end;
++
++ if (!size)
++ return;
++
++ end = start + size;
++ entry = xen_e820_table.entries;
++
++ for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
++ if (entry->addr >= end)
++ return;
++
++ if (entry->addr + entry->size > start &&
++ entry->type == E820_TYPE_NVS)
++ xen_e820_swap_entry_with_ram(entry);
++
++ entry++;
++ }
++}
++
++/*
++ * Check for an area in physical memory to be usable for non-movable purposes.
++ * An area is considered to usable if the used E820 map lists it to be RAM or
++ * some other type which can be moved to higher PFNs while keeping the MFNs.
++ * In case the area is not usable, crash the system with an error message.
++ */
++void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
++ const char *component)
++{
++ xen_e820_resolve_conflicts(start, size);
++
++ if (!xen_is_e820_reserved(start, size))
++ return;
++
++ xen_raw_console_write("Xen hypervisor allocated ");
++ xen_raw_console_write(component);
++ xen_raw_console_write(" memory conflicts with E820 map\n");
++ BUG();
++}
++
+ /*
+ * Like memcpy, but with physical addresses for dest and src.
+ */
+@@ -728,20 +792,20 @@ static void __init xen_reserve_xen_mfnlist(void)
+ **/
+ char * __init xen_memory_setup(void)
+ {
+- unsigned long max_pfn, pfn_s, n_pfns;
++ unsigned long pfn_s, n_pfns;
+ phys_addr_t mem_end, addr, size, chunk_size;
+ u32 type;
+ int rc;
+ struct xen_memory_map memmap;
+ unsigned long max_pages;
+ unsigned long extra_pages = 0;
++ unsigned long maxmem_pages;
+ int i;
+ int op;
+
+ xen_parse_512gb();
+- max_pfn = xen_get_pages_limit();
+- max_pfn = min(max_pfn, xen_start_info->nr_pages);
+- mem_end = PFN_PHYS(max_pfn);
++ ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
++ mem_end = PFN_PHYS(ini_nr_pages);
+
+ memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
+ set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
+@@ -791,13 +855,35 @@ char * __init xen_memory_setup(void)
+ /* Make sure the Xen-supplied memory map is well-ordered. */
+ e820__update_table(&xen_e820_table);
+
++ /*
++ * Check whether the kernel itself conflicts with the target E820 map.
++ * Failing now is better than running into weird problems later due
++ * to relocating (and even reusing) pages with kernel text or data.
++ */
++ xen_chk_is_e820_usable(__pa_symbol(_text),
++ __pa_symbol(_end) - __pa_symbol(_text),
++ "kernel");
++
++ /*
++ * Check for a conflict of the xen_start_info memory with the target
++ * E820 map.
++ */
++ xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
++ "xen_start_info");
++
++ /*
++ * Check for a conflict of the hypervisor supplied page tables with
++ * the target E820 map.
++ */
++ xen_pt_check_e820();
++
+ max_pages = xen_get_max_pages();
+
+ /* How many extra pages do we need due to remapping? */
+- max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
++ max_pages += xen_foreach_remap_area(xen_count_remap_pages);
+
+- if (max_pages > max_pfn)
+- extra_pages += max_pages - max_pfn;
++ if (max_pages > ini_nr_pages)
++ extra_pages += max_pages - ini_nr_pages;
+
+ /*
+ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+@@ -806,8 +892,8 @@ char * __init xen_memory_setup(void)
+ * Make sure we have no memory above max_pages, as this area
+ * isn't handled by the p2m management.
+ */
+- extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+- extra_pages, max_pages - max_pfn);
++ maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
++ extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
+ i = 0;
+ addr = xen_e820_table.entries[0].addr;
+ size = xen_e820_table.entries[0].size;
+@@ -863,23 +949,6 @@ char * __init xen_memory_setup(void)
+
+ e820__update_table(e820_table);
+
+- /*
+- * Check whether the kernel itself conflicts with the target E820 map.
+- * Failing now is better than running into weird problems later due
+- * to relocating (and even reusing) pages with kernel text or data.
+- */
+- if (xen_is_e820_reserved(__pa_symbol(_text),
+- __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
+- xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
+- BUG();
+- }
+-
+- /*
+- * Check for a conflict of the hypervisor supplied page tables with
+- * the target E820 map.
+- */
+- xen_pt_check_e820();
+-
+ xen_reserve_xen_mfnlist();
+
+ /* Check for a conflict of the initrd with the target E820 map. */
+@@ -907,7 +976,7 @@ char * __init xen_memory_setup(void)
+ * Set identity map on non-RAM pages and prepare remapping the
+ * underlying RAM.
+ */
+- xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
++ xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
+
+ pr_info("Released %ld page(s)\n", xen_released_pages);
+
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 4b0d6fff88de5a..1fb9a1644d944b 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu)
+ char *resched_name, *callfunc_name, *debug_name;
+
+ resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
++ if (!resched_name)
++ goto fail_mem;
+ per_cpu(xen_resched_irq, cpu).name = resched_name;
+ rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
+ cpu,
+@@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu)
+ per_cpu(xen_resched_irq, cpu).irq = rc;
+
+ callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
++ if (!callfunc_name)
++ goto fail_mem;
+ per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
+ cpu,
+@@ -90,6 +94,9 @@ int xen_smp_intr_init(unsigned int cpu)
+
+ if (!xen_fifo_events) {
+ debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
++ if (!debug_name)
++ goto fail_mem;
++
+ per_cpu(xen_debug_irq, cpu).name = debug_name;
+ rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
+ xen_debug_interrupt,
+@@ -101,6 +108,9 @@ int xen_smp_intr_init(unsigned int cpu)
+ }
+
+ callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
++ if (!callfunc_name)
++ goto fail_mem;
++
+ per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
+ cpu,
+@@ -114,6 +124,8 @@ int xen_smp_intr_init(unsigned int cpu)
+
+ return 0;
+
++ fail_mem:
++ rc = -ENOMEM;
+ fail:
+ xen_smp_intr_free(cpu);
+ return rc;
+diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
+index 9e5e680087853a..1a9cd18dfbd312 100644
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -156,7 +156,7 @@ xen_pv_trap asm_xenpv_exc_machine_check
+ #endif /* CONFIG_X86_MCE */
+ xen_pv_trap asm_exc_simd_coprocessor_error
+ #ifdef CONFIG_IA32_EMULATION
+-xen_pv_trap entry_INT80_compat
++xen_pv_trap asm_int80_emulation
+ #endif
+ xen_pv_trap asm_exc_xen_unknown_trap
+ xen_pv_trap asm_exc_xen_hypervisor_callback
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 408a2aa66c6922..a6a21dd0552700 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+
+-DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
++DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
+ DECLARE_PER_CPU(unsigned long, xen_cr3);
+ DECLARE_PER_CPU(unsigned long, xen_current_cr3);
+
+@@ -43,8 +43,12 @@ void xen_mm_unpin_all(void);
+ #ifdef CONFIG_X86_64
+ void __init xen_relocate_p2m(void);
+ #endif
++void __init xen_do_remap_nonram(void);
++void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
++ unsigned long size);
+
+-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size);
++void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
++ const char *component);
+ unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
+ void __init xen_inv_extra_mem(void);
+ void __init xen_remap_memory(void);
+@@ -163,4 +167,18 @@ void xen_hvm_post_suspend(int suspend_cancelled);
+ static inline void xen_hvm_post_suspend(int suspend_cancelled) {}
+ #endif
+
++/*
++ * The maximum amount of extra memory compared to the base size. The
++ * main scaling factor is the size of struct page. At extreme ratios
++ * of base:extra, all the base memory can be filled with page
++ * structures for the extra memory, leaving no space for anything
++ * else.
++ *
++ * 10x seems like a reasonable balance between scaling flexibility and
++ * leaving a practically usable system.
++ */
++#define EXTRA_MEM_RATIO (10)
++
++void xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns);
++
+ #endif /* XEN_OPS_H */
+diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
+index 785a00ce83c11e..38bcecb0e457d9 100644
+--- a/arch/xtensa/include/asm/cacheflush.h
++++ b/arch/xtensa/include/asm/cacheflush.h
+@@ -116,8 +116,9 @@ void flush_cache_page(struct vm_area_struct*,
+ #define flush_cache_mm(mm) flush_cache_all()
+ #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+-#define flush_cache_vmap(start,end) flush_cache_all()
+-#define flush_cache_vunmap(start,end) flush_cache_all()
++#define flush_cache_vmap(start,end) flush_cache_all()
++#define flush_cache_vmap_early(start,end) do { } while (0)
++#define flush_cache_vunmap(start,end) flush_cache_all()
+
+ void flush_dcache_folio(struct folio *folio);
+ #define flush_dcache_folio flush_dcache_folio
+@@ -140,6 +141,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
+ #define flush_cache_dup_mm(mm) do { } while (0)
+
+ #define flush_cache_vmap(start,end) do { } while (0)
++#define flush_cache_vmap_early(start,end) do { } while (0)
+ #define flush_cache_vunmap(start,end) do { } while (0)
+
+ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h
+index c812bf85021c02..46c8596259d2d9 100644
+--- a/arch/xtensa/include/asm/jump_label.h
++++ b/arch/xtensa/include/asm/jump_label.h
+@@ -13,7 +13,7 @@
+ static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+ {
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ "_nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+@@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ * make it reachable and wrap both into a no-transform block
+ * to avoid any assembler interference with this.
+ */
+- asm_volatile_goto("1:\n\t"
++ asm goto("1:\n\t"
+ ".begin no-transform\n\t"
+ "_j %l[l_yes]\n\t"
+ "2:\n\t"
+diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
+index d008a153a2b9f7..7ed1a2085bd728 100644
+--- a/arch/xtensa/include/asm/processor.h
++++ b/arch/xtensa/include/asm/processor.h
+@@ -115,9 +115,9 @@
+ #define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
+
+ /* Convert return address to a valid pc
+- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
++ * Note: 'text' is the address within the same 1GB range as the ra
+ */
+-#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
++#define MAKE_PC_FROM_RA(ra, text) (((ra) & 0x3fffffff) | ((unsigned long)(text) & 0xc0000000))
+
+ #elif defined(__XTENSA_CALL0_ABI__)
+
+@@ -127,9 +127,9 @@
+ #define MAKE_RA_FOR_CALL(ra, ws) (ra)
+
+ /* Convert return address to a valid pc
+- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
++ * Note: 'text' is not used as 'ra' is always the full address
+ */
+-#define MAKE_PC_FROM_RA(ra, sp) (ra)
++#define MAKE_PC_FROM_RA(ra, text) (ra)
+
+ #else
+ #error Unsupported Xtensa ABI
+diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
+index a270467556dc84..86c70117371bb7 100644
+--- a/arch/xtensa/include/asm/ptrace.h
++++ b/arch/xtensa/include/asm/ptrace.h
+@@ -87,7 +87,7 @@ struct pt_regs {
+ # define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
+ # define instruction_pointer(regs) ((regs)->pc)
+ # define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
+- (regs)->areg[1]))
++ (regs)->pc))
+
+ # ifndef CONFIG_SMP
+ # define profile_pc(regs) instruction_pointer(regs)
+diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
+index a815577d25fd02..7bd66677f7b6de 100644
+--- a/arch/xtensa/kernel/process.c
++++ b/arch/xtensa/kernel/process.c
+@@ -47,6 +47,7 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/regs.h>
+ #include <asm/hw_breakpoint.h>
++#include <asm/sections.h>
+ #include <asm/traps.h>
+
+ extern void ret_from_fork(void);
+@@ -380,7 +381,7 @@ unsigned long __get_wchan(struct task_struct *p)
+ int count = 0;
+
+ sp = p->thread.sp;
+- pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
++ pc = MAKE_PC_FROM_RA(p->thread.ra, _text);
+
+ do {
+ if (sp < stack_page + sizeof(struct task_struct) ||
+@@ -392,7 +393,7 @@ unsigned long __get_wchan(struct task_struct *p)
+
+ /* Stack layout: sp-4: ra, sp-3: sp' */
+
+- pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
++ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), _text);
+ sp = SPILL_SLOT(sp, 1);
+ } while (count++ < 16);
+ return 0;
+diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
+index 831ffb648bda7e..ed324fdf2a2f91 100644
+--- a/arch/xtensa/kernel/stacktrace.c
++++ b/arch/xtensa/kernel/stacktrace.c
+@@ -13,6 +13,7 @@
+ #include <linux/stacktrace.h>
+
+ #include <asm/ftrace.h>
++#include <asm/sections.h>
+ #include <asm/stacktrace.h>
+ #include <asm/traps.h>
+ #include <linux/uaccess.h>
+@@ -189,7 +190,7 @@ void walk_stackframe(unsigned long *sp,
+ if (a1 <= (unsigned long)sp)
+ break;
+
+- frame.pc = MAKE_PC_FROM_RA(a0, a1);
++ frame.pc = MAKE_PC_FROM_RA(a0, _text);
+ frame.sp = a1;
+
+ if (fn(&frame, data))
+diff --git a/block/bdev.c b/block/bdev.c
+index f3b13aa1b7d428..5a54977518eeae 100644
+--- a/block/bdev.c
++++ b/block/bdev.c
+@@ -425,6 +425,8 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
+
+ void bdev_add(struct block_device *bdev, dev_t dev)
+ {
++ if (bdev_stable_writes(bdev))
++ mapping_set_stable_writes(bdev->bd_inode->i_mapping);
+ bdev->bd_dev = dev;
+ bdev->bd_inode->i_rdev = dev;
+ bdev->bd_inode->i_ino = dev;
+@@ -829,6 +831,25 @@ struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ }
+ EXPORT_SYMBOL(blkdev_get_by_dev);
+
++struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
++ const struct blk_holder_ops *hops)
++{
++ struct bdev_handle *handle = kmalloc(sizeof(*handle), GFP_KERNEL);
++ struct block_device *bdev;
++
++ if (!handle)
++ return ERR_PTR(-ENOMEM);
++ bdev = blkdev_get_by_dev(dev, mode, holder, hops);
++ if (IS_ERR(bdev)) {
++ kfree(handle);
++ return ERR_CAST(bdev);
++ }
++ handle->bdev = bdev;
++ handle->holder = holder;
++ return handle;
++}
++EXPORT_SYMBOL(bdev_open_by_dev);
++
+ /**
+ * blkdev_get_by_path - open a block device by name
+ * @path: path to the block device to open
+@@ -867,6 +888,28 @@ struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
+ }
+ EXPORT_SYMBOL(blkdev_get_by_path);
+
++struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
++ void *holder, const struct blk_holder_ops *hops)
++{
++ struct bdev_handle *handle;
++ dev_t dev;
++ int error;
++
++ error = lookup_bdev(path, &dev);
++ if (error)
++ return ERR_PTR(error);
++
++ handle = bdev_open_by_dev(dev, mode, holder, hops);
++ if (!IS_ERR(handle) && (mode & BLK_OPEN_WRITE) &&
++ bdev_read_only(handle->bdev)) {
++ bdev_release(handle);
++ return ERR_PTR(-EACCES);
++ }
++
++ return handle;
++}
++EXPORT_SYMBOL(bdev_open_by_path);
++
+ void blkdev_put(struct block_device *bdev, void *holder)
+ {
+ struct gendisk *disk = bdev->bd_disk;
+@@ -903,6 +946,13 @@ void blkdev_put(struct block_device *bdev, void *holder)
+ }
+ EXPORT_SYMBOL(blkdev_put);
+
++void bdev_release(struct bdev_handle *handle)
++{
++ blkdev_put(handle->bdev, handle->holder);
++ kfree(handle);
++}
++EXPORT_SYMBOL(bdev_release);
++
+ /**
+ * lookup_bdev() - Look up a struct block_device by name.
+ * @pathname: Name of the block device in the filesystem.
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 3cce6de464a7b7..7e0dcded5713a0 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
+
+ /* if a merge has already been setup, then proceed with that first */
+- if (bfqq->new_bfqq)
+- return bfqq->new_bfqq;
++ new_bfqq = bfqq->new_bfqq;
++ if (new_bfqq) {
++ while (new_bfqq->new_bfqq)
++ new_bfqq = new_bfqq->new_bfqq;
++ return new_bfqq;
++ }
+
+ /*
+ * Check delayed stable merge for rotational or non-queueing
+@@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ bfq_put_queue(bfqq);
+ }
+
+-static void
+-bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+- struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq)
+ {
++ struct bfq_queue *new_bfqq = bfqq->new_bfqq;
++
+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+ (unsigned long)new_bfqq->pid);
+ /* Save weight raising and idle window of the merged queues */
+@@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+ bfq_reassign_last_bfqq(bfqq, new_bfqq);
+
+ bfq_release_process_ref(bfqd, bfqq);
++
++ return new_bfqq;
+ }
+
+ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+@@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
+ * fulfilled, i.e., bic can be redirected to new_bfqq
+ * and bfqq can be put.
+ */
+- bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
+- new_bfqq);
+- /*
+- * If we get here, bio will be queued into new_queue,
+- * so use new_bfqq to decide whether bio and rq can be
+- * merged.
+- */
+- bfqq = new_bfqq;
++ while (bfqq != new_bfqq)
++ bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
+
+ /*
+ * Change also bqfd->bio_bfqq, as
+@@ -5699,9 +5701,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ * state before killing it.
+ */
+ bfqq->bic = bic;
+- bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
+-
+- return new_bfqq;
++ return bfq_merge_bfqqs(bfqd, bic, bfqq);
+ }
+
+ /*
+@@ -6156,6 +6156,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ bool waiting, idle_timer_disabled = false;
+
+ if (new_bfqq) {
++ struct bfq_queue *old_bfqq = bfqq;
+ /*
+ * Release the request's reference to the old bfqq
+ * and make sure one is taken to the shared queue.
+@@ -6172,18 +6173,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+ * new_bfqq.
+ */
+ if (bic_to_bfqq(RQ_BIC(rq), true,
+- bfq_actuator_index(bfqd, rq->bio)) == bfqq)
+- bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
+- bfqq, new_bfqq);
++ bfq_actuator_index(bfqd, rq->bio)) == bfqq) {
++ while (bfqq != new_bfqq)
++ bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
++ }
+
+- bfq_clear_bfqq_just_created(bfqq);
++ bfq_clear_bfqq_just_created(old_bfqq);
+ /*
+ * rq is about to be enqueued into new_bfqq,
+ * release rq reference on bfqq
+ */
+- bfq_put_queue(bfqq);
++ bfq_put_queue(old_bfqq);
+ rq->elv.priv[1] = new_bfqq;
+- bfqq = new_bfqq;
+ }
+
+ bfq_update_io_thinktime(bfqd, bfqq);
+@@ -6721,7 +6722,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ {
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
+
+- if (bfqq_process_refs(bfqq) == 1) {
++ if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) {
+ bfqq->pid = current->pid;
+ bfq_clear_bfqq_coop(bfqq);
+ bfq_clear_bfqq_split_coop(bfqq);
+@@ -6819,6 +6820,31 @@ static void bfq_prepare_request(struct request *rq)
+ rq->elv.priv[0] = rq->elv.priv[1] = NULL;
+ }
+
++static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *new_bfqq = bfqq->new_bfqq;
++ struct bfq_queue *waker_bfqq = bfqq->waker_bfqq;
++
++ if (!waker_bfqq)
++ return NULL;
++
++ while (new_bfqq) {
++ if (new_bfqq == waker_bfqq) {
++ /*
++ * If waker_bfqq is in the merge chain, and current
++ * is the only procress.
++ */
++ if (bfqq_process_refs(waker_bfqq) == 1)
++ return NULL;
++ break;
++ }
++
++ new_bfqq = new_bfqq->new_bfqq;
++ }
++
++ return waker_bfqq;
++}
++
+ /*
+ * If needed, init rq, allocate bfq data structures associated with
+ * rq, and increment reference counters in the destination bfq_queue
+@@ -6880,7 +6906,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
+ /* If the queue was seeky for too long, break it apart. */
+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
+ !bic->bfqq_data[a_idx].stably_merged) {
+- struct bfq_queue *old_bfqq = bfqq;
++ struct bfq_queue *waker_bfqq = bfq_waker_bfqq(bfqq);
+
+ /* Update bic before losing reference to bfqq */
+ if (bfq_bfqq_in_large_burst(bfqq))
+@@ -6900,7 +6926,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
+ bfqq_already_existing = true;
+
+ if (!bfqq_already_existing) {
+- bfqq->waker_bfqq = old_bfqq->waker_bfqq;
++ bfqq->waker_bfqq = waker_bfqq;
+ bfqq->tentative_waker_bfqq = NULL;
+
+ /*
+@@ -6910,7 +6936,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
+ * woken_list of the waker. See
+ * bfq_check_waker for details.
+ */
+- if (bfqq->waker_bfqq)
++ if (waker_bfqq)
+ hlist_add_head(&bfqq->woken_list_node,
+ &bfqq->waker_bfqq->woken_list);
+ }
+@@ -6932,7 +6958,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
+ * addition, if the queue has also just been split, we have to
+ * resume its state.
+ */
+- if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
++ if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq &&
++ bfqq_process_refs(bfqq) == 1) {
+ bfqq->bic = bic;
+ if (split) {
+ /*
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index ec8ac8cf6e1b98..15e444b2fcc123 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -217,6 +217,7 @@ bool bio_integrity_prep(struct bio *bio)
+ unsigned long start, end;
+ unsigned int len, nr_pages;
+ unsigned int bytes, offset, i;
++ gfp_t gfp = GFP_NOIO;
+
+ if (!bi)
+ return true;
+@@ -239,11 +240,19 @@ bool bio_integrity_prep(struct bio *bio)
+ if (!bi->profile->generate_fn ||
+ !(bi->flags & BLK_INTEGRITY_GENERATE))
+ return true;
++
++ /*
++ * Zero the memory allocated to not leak uninitialized kernel
++ * memory to disk. For PI this only affects the app tag, but
++ * for non-integrity metadata it affects the entire metadata
++ * buffer.
++ */
++ gfp |= __GFP_ZERO;
+ }
+
+ /* Allocate kernel buffer for protection data */
+ len = bio_integrity_bytes(bi, bio_sectors(bio));
+- buf = kmalloc(len, GFP_NOIO);
++ buf = kmalloc(len, gfp);
+ if (unlikely(buf == NULL)) {
+ printk(KERN_ERR "could not allocate integrity buffer\n");
+ goto err_end_io;
+diff --git a/block/bio.c b/block/bio.c
+index 816d412c06e9b4..62419aa09d7319 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -944,7 +944,7 @@ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
+
+ if ((addr1 | mask) != (addr2 | mask))
+ return false;
+- if (bv->bv_len + len > queue_max_segment_size(q))
++ if (len > queue_max_segment_size(q) - bv->bv_len)
+ return false;
+ return bvec_try_merge_page(bv, page, len, offset, same_page);
+ }
+@@ -1145,13 +1145,23 @@ EXPORT_SYMBOL(bio_add_folio);
+
+ void __bio_release_pages(struct bio *bio, bool mark_dirty)
+ {
+- struct bvec_iter_all iter_all;
+- struct bio_vec *bvec;
++ struct folio_iter fi;
++
++ bio_for_each_folio_all(fi, bio) {
++ struct page *page;
++ size_t nr_pages;
+
+- bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (mark_dirty && !PageCompound(bvec->bv_page))
+- set_page_dirty_lock(bvec->bv_page);
+- bio_release_page(bio, bvec->bv_page);
++ if (mark_dirty) {
++ folio_lock(fi.folio);
++ folio_mark_dirty(fi.folio);
++ folio_unlock(fi.folio);
++ }
++ page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
++ nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
++ fi.offset / PAGE_SIZE + 1;
++ do {
++ bio_release_page(bio, page++);
++ } while (--nr_pages != 0);
+ }
+ }
+ EXPORT_SYMBOL_GPL(__bio_release_pages);
+@@ -1439,18 +1449,12 @@ EXPORT_SYMBOL(bio_free_pages);
+ * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
+ * for performing direct-IO in BIOs.
+ *
+- * The problem is that we cannot run set_page_dirty() from interrupt context
++ * The problem is that we cannot run folio_mark_dirty() from interrupt context
+ * because the required locks are not interrupt-safe. So what we can do is to
+ * mark the pages dirty _before_ performing IO. And in interrupt context,
+ * check that the pages are still dirty. If so, fine. If not, redirty them
+ * in process context.
+ *
+- * We special-case compound pages here: normally this means reads into hugetlb
+- * pages. The logic in here doesn't really work right for compound pages
+- * because the VM does not uniformly chase down the head page in all cases.
+- * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
+- * handle them at all. So we skip compound pages here at an early stage.
+- *
+ * Note that this code is very hard to test under normal circumstances because
+ * direct-io pins the pages with get_user_pages(). This makes
+ * is_page_cache_freeable return false, and the VM will not clean the pages.
+@@ -1466,12 +1470,12 @@ EXPORT_SYMBOL(bio_free_pages);
+ */
+ void bio_set_pages_dirty(struct bio *bio)
+ {
+- struct bio_vec *bvec;
+- struct bvec_iter_all iter_all;
++ struct folio_iter fi;
+
+- bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (!PageCompound(bvec->bv_page))
+- set_page_dirty_lock(bvec->bv_page);
++ bio_for_each_folio_all(fi, bio) {
++ folio_lock(fi.folio);
++ folio_mark_dirty(fi.folio);
++ folio_unlock(fi.folio);
+ }
+ }
+ EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
+@@ -1515,12 +1519,11 @@ static void bio_dirty_fn(struct work_struct *work)
+
+ void bio_check_pages_dirty(struct bio *bio)
+ {
+- struct bio_vec *bvec;
++ struct folio_iter fi;
+ unsigned long flags;
+- struct bvec_iter_all iter_all;
+
+- bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
++ bio_for_each_folio_all(fi, bio) {
++ if (!folio_test_dirty(fi.folio))
+ goto defer;
+ }
+
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 4a42ea2972ad85..4fb045d26bd5ad 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -323,6 +323,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
+ blkg->q = disk->queue;
+ INIT_LIST_HEAD(&blkg->q_node);
+ blkg->blkcg = blkcg;
++ blkg->iostat.blkg = blkg;
+ #ifdef CONFIG_BLK_CGROUP_PUNT_BIO
+ spin_lock_init(&blkg->async_bio_lock);
+ bio_list_init(&blkg->async_bios);
+@@ -577,6 +578,7 @@ static void blkg_destroy_all(struct gendisk *disk)
+ struct request_queue *q = disk->queue;
+ struct blkcg_gq *blkg, *n;
+ int count = BLKG_DESTROY_BATCH_SIZE;
++ int i;
+
+ restart:
+ spin_lock_irq(&q->queue_lock);
+@@ -602,16 +604,61 @@ static void blkg_destroy_all(struct gendisk *disk)
+ }
+ }
+
++ /*
++ * Mark policy deactivated since policy offline has been done, and
++ * the free is scheduled, so future blkcg_deactivate_policy() can
++ * be bypassed
++ */
++ for (i = 0; i < BLKCG_MAX_POLS; i++) {
++ struct blkcg_policy *pol = blkcg_policy[i];
++
++ if (pol)
++ __clear_bit(pol->plid, q->blkcg_pols);
++ }
++
+ q->root_blkg = NULL;
+ spin_unlock_irq(&q->queue_lock);
+ }
+
++static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
++{
++ int i;
++
++ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
++ dst->bytes[i] = src->bytes[i];
++ dst->ios[i] = src->ios[i];
++ }
++}
++
++static void __blkg_clear_stat(struct blkg_iostat_set *bis)
++{
++ struct blkg_iostat cur = {0};
++ unsigned long flags;
++
++ flags = u64_stats_update_begin_irqsave(&bis->sync);
++ blkg_iostat_set(&bis->cur, &cur);
++ blkg_iostat_set(&bis->last, &cur);
++ u64_stats_update_end_irqrestore(&bis->sync, flags);
++}
++
++static void blkg_clear_stat(struct blkcg_gq *blkg)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu);
++
++ __blkg_clear_stat(s);
++ }
++ __blkg_clear_stat(&blkg->iostat);
++}
++
+ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 val)
+ {
+ struct blkcg *blkcg = css_to_blkcg(css);
+ struct blkcg_gq *blkg;
+- int i, cpu;
++ int i;
+
+ mutex_lock(&blkcg_pol_mutex);
+ spin_lock_irq(&blkcg->lock);
+@@ -622,18 +669,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+ * anyway. If you get hit by a race, retry.
+ */
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+- for_each_possible_cpu(cpu) {
+- struct blkg_iostat_set *bis =
+- per_cpu_ptr(blkg->iostat_cpu, cpu);
+- memset(bis, 0, sizeof(*bis));
+-
+- /* Re-initialize the cleared blkg_iostat_set */
+- u64_stats_init(&bis->sync);
+- bis->blkg = blkg;
+- }
+- memset(&blkg->iostat, 0, sizeof(blkg->iostat));
+- u64_stats_init(&blkg->iostat.sync);
+-
++ blkg_clear_stat(blkg);
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+
+@@ -936,16 +972,6 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
+ }
+ EXPORT_SYMBOL_GPL(blkg_conf_exit);
+
+-static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
+-{
+- int i;
+-
+- for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+- dst->bytes[i] = src->bytes[i];
+- dst->ios[i] = src->ios[i];
+- }
+-}
+-
+ static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
+ {
+ int i;
+@@ -1011,7 +1037,19 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
+ struct blkg_iostat cur;
+ unsigned int seq;
+
++ /*
++ * Order assignment of `next_bisc` from `bisc->lnode.next` in
++ * llist_for_each_entry_safe and clearing `bisc->lqueued` for
++ * avoiding to assign `next_bisc` with new next pointer added
++ * in blk_cgroup_bio_start() in case of re-ordering.
++ *
++ * The pair barrier is implied in llist_add() in blk_cgroup_bio_start().
++ */
++ smp_mb();
++
+ WRITE_ONCE(bisc->lqueued, false);
++ if (bisc == &blkg->iostat)
++ goto propagate_up; /* propagate up to parent only */
+
+ /* fetch the current per-cpu values */
+ do {
+@@ -1021,10 +1059,24 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
+
+ blkcg_iostat_update(blkg, &cur, &bisc->last);
+
++propagate_up:
+ /* propagate global delta to parent (unless that's root) */
+- if (parent && parent->parent)
++ if (parent && parent->parent) {
+ blkcg_iostat_update(parent, &blkg->iostat.cur,
+ &blkg->iostat.last);
++ /*
++ * Queue parent->iostat to its blkcg's lockless
++ * list to propagate up to the grandparent if the
++ * iostat hasn't been queued yet.
++ */
++ if (!parent->iostat.lqueued) {
++ struct llist_head *plhead;
++
++ plhead = per_cpu_ptr(parent->blkcg->lhead, cpu);
++ llist_add(&parent->iostat.lnode, plhead);
++ parent->iostat.lqueued = true;
++ }
++ }
+ }
+ raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
+ out:
+@@ -1396,6 +1448,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
+ return 0;
+ }
+
++void blkg_init_queue(struct request_queue *q)
++{
++ INIT_LIST_HEAD(&q->blkg_list);
++ mutex_init(&q->blkcg_mutex);
++}
++
+ int blkcg_init_disk(struct gendisk *disk)
+ {
+ struct request_queue *q = disk->queue;
+@@ -1403,9 +1461,6 @@ int blkcg_init_disk(struct gendisk *disk)
+ bool preloaded;
+ int ret;
+
+- INIT_LIST_HEAD(&q->blkg_list);
+- mutex_init(&q->blkcg_mutex);
+-
+ new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
+ if (!new_blkg)
+ return -ENOMEM;
+diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
+index 624c03c8fe64e3..5b0bdc268ade9f 100644
+--- a/block/blk-cgroup.h
++++ b/block/blk-cgroup.h
+@@ -188,6 +188,7 @@ struct blkcg_policy {
+ extern struct blkcg blkcg_root;
+ extern bool blkcg_debug_stats;
+
++void blkg_init_queue(struct request_queue *q);
+ int blkcg_init_disk(struct gendisk *disk);
+ void blkcg_exit_disk(struct gendisk *disk);
+
+@@ -249,12 +250,11 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
+ {
+ struct blkcg_gq *blkg;
+
+- WARN_ON_ONCE(!rcu_read_lock_held());
+-
+ if (blkcg == &blkcg_root)
+ return q->root_blkg;
+
+- blkg = rcu_dereference(blkcg->blkg_hint);
++ blkg = rcu_dereference_check(blkcg->blkg_hint,
++ lockdep_is_held(&q->queue_lock));
+ if (blkg && blkg->q == q)
+ return blkg;
+
+@@ -482,6 +482,7 @@ struct blkcg {
+ };
+
+ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
++static inline void blkg_init_queue(struct request_queue *q) { }
+ static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
+ static inline void blkcg_exit_disk(struct gendisk *disk) { }
+ static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 9d51e9894ece78..4f25d2c4bc7055 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -49,6 +49,7 @@
+ #include "blk-pm.h"
+ #include "blk-cgroup.h"
+ #include "blk-throttle.h"
++#include "blk-ioprio.h"
+
+ struct dentry *blk_debugfs_root;
+
+@@ -430,6 +431,8 @@ struct request_queue *blk_alloc_queue(int node_id)
+ init_waitqueue_head(&q->mq_freeze_wq);
+ mutex_init(&q->mq_freeze_lock);
+
++ blkg_init_queue(q);
++
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+ * See blk_register_queue() for details.
+@@ -501,9 +504,17 @@ static inline void bio_check_ro(struct bio *bio)
+ if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ return;
++
++ if (bio->bi_bdev->bd_ro_warned)
++ return;
++
++ bio->bi_bdev->bd_ro_warned = true;
++ /*
++ * Use ioctl to set underlying disk of raid/dm to read-only
++ * will trigger this.
++ */
+ pr_warn("Trying to write to read-only block-device %pg\n",
+ bio->bi_bdev);
+- /* Older lvm-tools actually trigger this */
+ }
+ }
+
+@@ -809,6 +820,14 @@ void submit_bio_noacct(struct bio *bio)
+ }
+ EXPORT_SYMBOL(submit_bio_noacct);
+
++static void bio_set_ioprio(struct bio *bio)
++{
++ /* Nobody set ioprio so far? Initialize it based on task's nice value */
++ if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
++ bio->bi_ioprio = get_current_ioprio();
++ blkcg_set_ioprio(bio);
++}
++
+ /**
+ * submit_bio - submit a bio to the block device layer for I/O
+ * @bio: The &struct bio which describes the I/O
+@@ -831,6 +850,7 @@ void submit_bio(struct bio *bio)
+ count_vm_events(PGPGOUT, bio_sectors(bio));
+ }
+
++ bio_set_ioprio(bio);
+ submit_bio_noacct(bio);
+ }
+ EXPORT_SYMBOL(submit_bio);
+@@ -940,10 +960,11 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end)
+ unsigned long stamp;
+ again:
+ stamp = READ_ONCE(part->bd_stamp);
+- if (unlikely(time_after(now, stamp))) {
+- if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
+- __part_stat_add(part, io_ticks, end ? now - stamp : 1);
+- }
++ if (unlikely(time_after(now, stamp)) &&
++ likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
++ (end || part_in_flight(part)))
++ __part_stat_add(part, io_ticks, now - stamp);
++
+ if (part->bd_partno) {
+ part = bdev_whole(part);
+ goto again;
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index e73dc22d05c1d1..313f0ffcce42e7 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -183,7 +183,7 @@ static void blk_flush_complete_seq(struct request *rq,
+ /* queue for flush */
+ if (list_empty(pending))
+ fq->flush_pending_since = jiffies;
+- list_move_tail(&rq->queuelist, pending);
++ list_add_tail(&rq->queuelist, pending);
+ break;
+
+ case REQ_FSEQ_DATA:
+@@ -261,6 +261,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
+ unsigned int seq = blk_flush_cur_seq(rq);
+
+ BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
++ list_del_init(&rq->queuelist);
+ blk_flush_complete_seq(rq, fq, seq, error);
+ }
+
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index d4e9b4556d14b2..5276c556a9df91 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -396,8 +396,6 @@ void blk_integrity_unregister(struct gendisk *disk)
+ if (!bi->profile)
+ return;
+
+- /* ensure all bios are off the integrity workqueue */
+- blk_flush_integrity();
+ blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
+ memset(bi, 0, sizeof(*bi));
+ }
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 089fcb9cfce370..c3cb9c20b306cf 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1347,16 +1347,24 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
+ {
+ struct ioc *ioc = iocg->ioc;
+ struct blkcg_gq *blkg = iocg_to_blkg(iocg);
+- u64 tdelta, delay, new_delay;
++ u64 tdelta, delay, new_delay, shift;
+ s64 vover, vover_pct;
+ u32 hwa;
+
+ lockdep_assert_held(&iocg->waitq.lock);
+
++ /*
++ * If the delay is set by another CPU, we may be in the past. No need to
++ * change anything if so. This avoids decay calculation underflow.
++ */
++ if (time_before64(now->now, iocg->delay_at))
++ return false;
++
+ /* calculate the current delay in effect - 1/2 every second */
+ tdelta = now->now - iocg->delay_at;
+- if (iocg->delay)
+- delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
++ shift = div64_u64(tdelta, USEC_PER_SEC);
++ if (iocg->delay && shift < BITS_PER_LONG)
++ delay = iocg->delay >> shift;
+ else
+ delay = 0;
+
+@@ -1431,8 +1439,11 @@ static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
+ lockdep_assert_held(&iocg->ioc->lock);
+ lockdep_assert_held(&iocg->waitq.lock);
+
+- /* make sure that nobody messed with @iocg */
+- WARN_ON_ONCE(list_empty(&iocg->active_list));
++ /*
++ * make sure that nobody messed with @iocg. Check iocg->pd.online
++ * to avoid warn when removing blkcg or disk.
++ */
++ WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online);
+ WARN_ON_ONCE(iocg->inuse > 1);
+
+ iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
+@@ -2065,7 +2076,7 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
+ struct ioc_now *now)
+ {
+ struct ioc_gq *iocg;
+- u64 dur, usage_pct, nr_cycles;
++ u64 dur, usage_pct, nr_cycles, nr_cycles_shift;
+
+ /* if no debtor, reset the cycle */
+ if (!nr_debtors) {
+@@ -2127,10 +2138,12 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
+ old_debt = iocg->abs_vdebt;
+ old_delay = iocg->delay;
+
++ nr_cycles_shift = min_t(u64, nr_cycles, BITS_PER_LONG - 1);
+ if (iocg->abs_vdebt)
+- iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
++ iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles_shift ?: 1;
++
+ if (iocg->delay)
+- iocg->delay = iocg->delay >> nr_cycles ?: 1;
++ iocg->delay = iocg->delay >> nr_cycles_shift ?: 1;
+
+ iocg_kick_waitq(iocg, true, now);
+
+diff --git a/block/blk-map.c b/block/blk-map.c
+index 8584babf3ea0ca..71210cdb34426d 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -205,12 +205,19 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
+ /*
+ * success
+ */
+- if ((iov_iter_rw(iter) == WRITE &&
+- (!map_data || !map_data->null_mapped)) ||
+- (map_data && map_data->from_user)) {
++ if (iov_iter_rw(iter) == WRITE &&
++ (!map_data || !map_data->null_mapped)) {
+ ret = bio_copy_from_iter(bio, iter);
+ if (ret)
+ goto cleanup;
++ } else if (map_data && map_data->from_user) {
++ struct iov_iter iter2 = *iter;
++
++ /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
++ iter2.data_source = ITER_SOURCE;
++ ret = bio_copy_from_iter(bio, &iter2);
++ if (ret)
++ goto cleanup;
+ } else {
+ if (bmd->is_our_pages)
+ zero_fill_bio(bio);
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 65e75efa9bd366..07bf758c523a9c 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -783,6 +783,8 @@ static void blk_account_io_merge_request(struct request *req)
+ if (blk_do_io_stat(req)) {
+ part_stat_lock();
+ part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
++ part_stat_local_dec(req->part,
++ in_flight[op_is_write(req_op(req))]);
+ part_stat_unlock();
+ }
+ }
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index cc57e2dd9a0bb3..2cafcf11ee8bee 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -38,6 +38,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
+ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ {
+ unsigned int users;
++ unsigned long flags;
+ struct blk_mq_tags *tags = hctx->tags;
+
+ /*
+@@ -56,11 +57,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ return;
+ }
+
+- spin_lock_irq(&tags->lock);
++ spin_lock_irqsave(&tags->lock, flags);
+ users = tags->active_queues + 1;
+ WRITE_ONCE(tags->active_queues, users);
+ blk_mq_update_wake_batch(tags, users);
+- spin_unlock_irq(&tags->lock);
++ spin_unlock_irqrestore(&tags->lock, flags);
+ }
+
+ /*
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 1fafd54dce3cb9..733d72f4d1cc9d 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -40,7 +40,6 @@
+ #include "blk-stat.h"
+ #include "blk-mq-sched.h"
+ #include "blk-rq-qos.h"
+-#include "blk-ioprio.h"
+
+ static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
+ static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
+@@ -447,6 +446,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
+ if (data->cmd_flags & REQ_NOWAIT)
+ data->flags |= BLK_MQ_REQ_NOWAIT;
+
++retry:
++ data->ctx = blk_mq_get_ctx(q);
++ data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
++
+ if (q->elevator) {
+ /*
+ * All requests use scheduler tags when an I/O scheduler is
+@@ -468,13 +471,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
+ if (ops->limit_depth)
+ ops->limit_depth(data->cmd_flags, data);
+ }
+- }
+-
+-retry:
+- data->ctx = blk_mq_get_ctx(q);
+- data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+- if (!(data->rq_flags & RQF_SCHED_TAGS))
++ } else {
+ blk_mq_tag_busy(data->hctx);
++ }
+
+ if (data->flags & BLK_MQ_REQ_RESERVED)
+ data->rq_flags |= RQF_RESV;
+@@ -994,6 +993,8 @@ static inline void blk_account_io_done(struct request *req, u64 now)
+ update_io_ticks(req->part, jiffies, true);
+ part_stat_inc(req->part, ios[sgrp]);
+ part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
++ part_stat_local_dec(req->part,
++ in_flight[op_is_write(req_op(req))]);
+ part_stat_unlock();
+ }
+ }
+@@ -1016,6 +1017,8 @@ static inline void blk_account_io_start(struct request *req)
+
+ part_stat_lock();
+ update_io_ticks(req->part, jiffies, false);
++ part_stat_local_inc(req->part,
++ in_flight[op_is_write(req_op(req))]);
+ part_stat_unlock();
+ }
+ }
+@@ -1511,14 +1514,26 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
+ }
+ EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
+
++static bool blk_is_flush_data_rq(struct request *rq)
++{
++ return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
++}
++
+ static bool blk_mq_rq_inflight(struct request *rq, void *priv)
+ {
+ /*
+ * If we find a request that isn't idle we know the queue is busy
+ * as it's checked in the iter.
+ * Return false to stop the iteration.
++ *
++ * In case of queue quiesce, if one flush data request is completed,
++ * don't count it as inflight given the flush sequence is suspended,
++ * and the original flush data request is invisible to driver, just
++ * like other pending requests because of quiesce
+ */
+- if (blk_mq_request_started(rq)) {
++ if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
++ blk_is_flush_data_rq(rq) &&
++ blk_mq_request_completed(rq))) {
+ bool *busy = priv;
+
+ *busy = true;
+@@ -1858,6 +1873,22 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
+ wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(wq, wait);
+
++ /*
++ * Add one explicit barrier since blk_mq_get_driver_tag() may
++ * not imply barrier in case of failure.
++ *
++ * Order adding us to wait queue and allocating driver tag.
++ *
++ * The pair is the one implied in sbitmap_queue_wake_up() which
++ * orders clearing sbitmap tag bits and waitqueue_active() in
++ * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
++ *
++ * Otherwise, re-order of adding wait queue and getting driver tag
++ * may cause __sbitmap_queue_wake_up() to wake up nothing because
++ * the waitqueue_active() may not observe us in wait queue.
++ */
++ smp_mb();
++
+ /*
+ * It's possible that a tag was freed in the window between the
+ * allocation failure and adding the hardware queue to the wait
+@@ -2875,11 +2906,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ };
+ struct request *rq;
+
+- if (unlikely(bio_queue_enter(bio)))
+- return NULL;
+-
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+- goto queue_exit;
++ return NULL;
+
+ rq_qos_throttle(q, bio);
+
+@@ -2895,35 +2923,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ rq_qos_cleanup(q, bio);
+ if (bio->bi_opf & REQ_NOWAIT)
+ bio_wouldblock_error(bio);
+-queue_exit:
+- blk_queue_exit(q);
+ return NULL;
+ }
+
+-static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+- struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
++/* return true if this @rq can be used for @bio */
++static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
++ struct bio *bio)
+ {
+- struct request *rq;
+- enum hctx_type type, hctx_type;
+-
+- if (!plug)
+- return NULL;
+- rq = rq_list_peek(&plug->cached_rq);
+- if (!rq || rq->q != q)
+- return NULL;
++ enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
++ enum hctx_type hctx_type = rq->mq_hctx->type;
+
+- if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
+- *bio = NULL;
+- return NULL;
+- }
++ WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
+
+- type = blk_mq_get_hctx_type((*bio)->bi_opf);
+- hctx_type = rq->mq_hctx->type;
+ if (type != hctx_type &&
+ !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
+- return NULL;
+- if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
+- return NULL;
++ return false;
++ if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
++ return false;
+
+ /*
+ * If any qos ->throttle() end up blocking, we will have flushed the
+@@ -2931,20 +2947,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ * before we throttle.
+ */
+ plug->cached_rq = rq_list_next(rq);
+- rq_qos_throttle(q, *bio);
++ rq_qos_throttle(rq->q, bio);
+
+ blk_mq_rq_time_init(rq, 0);
+- rq->cmd_flags = (*bio)->bi_opf;
++ rq->cmd_flags = bio->bi_opf;
+ INIT_LIST_HEAD(&rq->queuelist);
+- return rq;
+-}
+-
+-static void bio_set_ioprio(struct bio *bio)
+-{
+- /* Nobody set ioprio so far? Initialize it based on task's nice value */
+- if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
+- bio->bi_ioprio = get_current_ioprio();
+- blkcg_set_ioprio(bio);
++ return true;
+ }
+
+ /**
+@@ -2966,31 +2974,50 @@ void blk_mq_submit_bio(struct bio *bio)
+ struct blk_plug *plug = blk_mq_plug(bio);
+ const int is_sync = op_is_sync(bio->bi_opf);
+ struct blk_mq_hw_ctx *hctx;
+- struct request *rq;
++ struct request *rq = NULL;
+ unsigned int nr_segs = 1;
+ blk_status_t ret;
+
+ bio = blk_queue_bounce(bio, q);
+- if (bio_may_exceed_limits(bio, &q->limits)) {
+- bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+- if (!bio)
++
++ if (plug) {
++ rq = rq_list_peek(&plug->cached_rq);
++ if (rq && rq->q != q)
++ rq = NULL;
++ }
++ if (rq) {
++ if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
++ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
++ if (!bio)
++ return;
++ }
++ if (!bio_integrity_prep(bio))
++ return;
++ if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
++ return;
++ if (blk_mq_can_use_cached_rq(rq, plug, bio))
++ goto done;
++ percpu_ref_get(&q->q_usage_counter);
++ } else {
++ if (unlikely(bio_queue_enter(bio)))
+ return;
++ if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
++ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
++ if (!bio)
++ goto fail;
++ }
++ if (!bio_integrity_prep(bio))
++ goto fail;
+ }
+
+- if (!bio_integrity_prep(bio))
++ rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
++ if (unlikely(!rq)) {
++fail:
++ blk_queue_exit(q);
+ return;
+-
+- bio_set_ioprio(bio);
+-
+- rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
+- if (!rq) {
+- if (!bio)
+- return;
+- rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+- if (unlikely(!rq))
+- return;
+ }
+
++done:
+ trace_block_getrq(bio);
+
+ rq_qos_track(q, rq, bio);
+diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
+index dd7310c94713c9..dc510f493ba572 100644
+--- a/block/blk-rq-qos.c
++++ b/block/blk-rq-qos.c
+@@ -219,8 +219,8 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
+
+ data->got_token = true;
+ smp_wmb();
+- list_del_init(&curr->entry);
+ wake_up_process(data->task);
++ list_del_init_careful(&curr->entry);
+ return 1;
+ }
+
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 0046b447268f91..7019b8e204d965 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -686,6 +686,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ t->zone_write_granularity = max(t->zone_write_granularity,
+ b->zone_write_granularity);
+ t->zoned = max(t->zoned, b->zoned);
++ if (!t->zoned) {
++ t->zone_write_granularity = 0;
++ t->max_zone_append_sectors = 0;
++ }
+ return ret;
+ }
+ EXPORT_SYMBOL(blk_stack_limits);
+diff --git a/block/blk-stat.c b/block/blk-stat.c
+index 7ff76ae6c76a95..e42c263e53fb99 100644
+--- a/block/blk-stat.c
++++ b/block/blk-stat.c
+@@ -27,7 +27,7 @@ void blk_rq_stat_init(struct blk_rq_stat *stat)
+ /* src is a per-cpu stat, mean isn't initialized */
+ void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+ {
+- if (!src->nr_samples)
++ if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
+ return;
+
+ dst->min = min(dst->min, src->min);
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 13e4377a8b2865..16f5766620a410 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -1320,6 +1320,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
+ tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
+ tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
+
++ rcu_read_lock();
+ /*
+ * Update has_rules[] flags for the updated tg's subtree. A tg is
+ * considered to have rules if either the tg itself or any of its
+@@ -1347,6 +1348,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
+ this_tg->latency_target = max(this_tg->latency_target,
+ parent_tg->latency_target);
+ }
++ rcu_read_unlock();
+
+ /*
+ * We're already holding queue_lock and know @tg is valid. Let's
+diff --git a/block/blk-wbt.c b/block/blk-wbt.c
+index 0bb613139becbb..f8fda9cf583e1c 100644
+--- a/block/blk-wbt.c
++++ b/block/blk-wbt.c
+@@ -165,9 +165,9 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
+ */
+ static bool wb_recent_wait(struct rq_wb *rwb)
+ {
+- struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb;
++ struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
+
+- return time_before(jiffies, wb->dirty_sleep + HZ);
++ return time_before(jiffies, bdi->last_bdp_sleep + HZ);
+ }
+
+ static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
+diff --git a/block/blk.h b/block/blk.h
+index 08a358bc0919e2..67915b04b3c179 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -344,6 +344,7 @@ static inline bool blk_do_io_stat(struct request *rq)
+ }
+
+ void update_io_ticks(struct block_device *part, unsigned long now, bool end);
++unsigned int part_in_flight(struct block_device *part);
+
+ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
+ {
+diff --git a/block/fops.c b/block/fops.c
+index 73e42742543f6a..1df187b3067920 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -387,7 +387,7 @@ static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+
+ iomap->bdev = bdev;
+ iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
+- if (iomap->offset >= isize)
++ if (offset >= isize)
+ return -EIO;
+ iomap->type = IOMAP_MAPPED;
+ iomap->addr = iomap->offset;
+diff --git a/block/genhd.c b/block/genhd.c
+index cc32a0c704eb84..203c880c3e1cd2 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -118,7 +118,7 @@ static void part_stat_read_all(struct block_device *part,
+ }
+ }
+
+-static unsigned int part_in_flight(struct block_device *part)
++unsigned int part_in_flight(struct block_device *part)
+ {
+ unsigned int inflight = 0;
+ int cpu;
+@@ -345,9 +345,7 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
+ struct block_device *bdev;
+ int ret = 0;
+
+- if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
+- return -EINVAL;
+- if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
++ if (!disk_has_partscan(disk))
+ return -EINVAL;
+ if (disk->open_partitions)
+ return -EBUSY;
+@@ -432,7 +430,9 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ DISK_MAX_PARTS);
+ disk->minors = DISK_MAX_PARTS;
+ }
+- if (disk->first_minor + disk->minors > MINORMASK + 1)
++ if (disk->first_minor > MINORMASK ||
++ disk->minors > MINORMASK + 1 ||
++ disk->first_minor + disk->minors > MINORMASK + 1)
+ goto out_exit_elevator;
+ } else {
+ if (WARN_ON(disk->minors))
+@@ -501,8 +501,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ goto out_unregister_bdi;
+
+ /* Make sure the first partition scan will be proceed */
+- if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) &&
+- !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
++ if (get_capacity(disk) && disk_has_partscan(disk))
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
+
+ bdev_add(disk->part0, ddev->devt);
+@@ -542,6 +541,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ kobject_put(disk->part0->bd_holder_dir);
+ out_del_block_link:
+ sysfs_remove_link(block_depr, dev_name(ddev));
++ pm_runtime_set_memalloc_noio(ddev, false);
+ out_device_del:
+ device_del(ddev);
+ out_free_ext_minor:
+@@ -655,12 +655,12 @@ void del_gendisk(struct gendisk *disk)
+ */
+ if (!test_bit(GD_DEAD, &disk->state))
+ blk_report_disk_dead(disk, false);
+- __blk_mark_disk_dead(disk);
+
+ /*
+ * Drop all partitions now that the disk is marked dead.
+ */
+ mutex_lock(&disk->open_mutex);
++ __blk_mark_disk_dead(disk);
+ xa_for_each_start(&disk->part_tbl, idx, part, 1)
+ drop_partition(part);
+ mutex_unlock(&disk->open_mutex);
+@@ -1037,6 +1037,12 @@ static ssize_t diskseq_show(struct device *dev,
+ return sprintf(buf, "%llu\n", disk->diskseq);
+ }
+
++static ssize_t partscan_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%u\n", disk_has_partscan(dev_to_disk(dev)));
++}
++
+ static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
+ static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
+ static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
+@@ -1050,6 +1056,7 @@ static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
+ static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
+ static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
+ static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL);
++static DEVICE_ATTR(partscan, 0444, partscan_show, NULL);
+
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+ ssize_t part_fail_show(struct device *dev,
+@@ -1096,6 +1103,7 @@ static struct attribute *disk_attrs[] = {
+ &dev_attr_events_async.attr,
+ &dev_attr_events_poll_msecs.attr,
+ &dev_attr_diskseq.attr,
++ &dev_attr_partscan.attr,
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+ &dev_attr_fail.attr,
+ #endif
+diff --git a/block/ioctl.c b/block/ioctl.c
+index d5f5cd61efd7fc..3786033342848d 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -18,10 +18,8 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ {
+ struct gendisk *disk = bdev->bd_disk;
+ struct blkpg_partition p;
+- long long start, length;
++ sector_t start, length;
+
+- if (disk->flags & GENHD_FL_NO_PART)
+- return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
+@@ -35,14 +33,17 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ if (op == BLKPG_DEL_PARTITION)
+ return bdev_del_partition(disk, p.pno);
+
++ if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
++ return -EINVAL;
++ /* Check that the partition is aligned to the block size */
++ if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
++ return -EINVAL;
++
+ start = p.start >> SECTOR_SHIFT;
+ length = p.length >> SECTOR_SHIFT;
+
+ switch (op) {
+ case BLKPG_ADD_PARTITION:
+- /* check if partition is aligned to blocksize */
+- if (p.start & (bdev_logical_block_size(bdev) - 1))
+- return -EINVAL;
+ return bdev_add_partition(disk, p.pno, start, length);
+ case BLKPG_RESIZE_PARTITION:
+ return bdev_resize_partition(disk, p.pno, start, length);
+@@ -88,7 +89,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
+ unsigned long arg)
+ {
+ uint64_t range[2];
+- uint64_t start, len;
++ uint64_t start, len, end;
+ struct inode *inode = bdev->bd_inode;
+ int err;
+
+@@ -109,7 +110,8 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
+ if (len & 511)
+ return -EINVAL;
+
+- if (start + len > bdev_nr_bytes(bdev))
++ if (check_add_overflow(start, len, &end) ||
++ end > bdev_nr_bytes(bdev))
+ return -EINVAL;
+
+ filemap_invalidate_lock(inode->i_mapping);
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index f958e79277b8bc..78a8aa204c1565 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -621,6 +621,20 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ return rq;
+ }
+
++/*
++ * 'depth' is a number in the range 1..INT_MAX representing a number of
++ * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
++ * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
++ * Values larger than q->nr_requests have the same effect as q->nr_requests.
++ */
++static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
++{
++ struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
++ const unsigned int nrr = hctx->queue->nr_requests;
++
++ return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
++}
++
+ /*
+ * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
+ * function is used by __blk_mq_get_tag().
+@@ -637,7 +651,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
+ * Throttle asynchronous requests and writes such that these requests
+ * do not block the allocation of synchronous requests.
+ */
+- data->shallow_depth = dd->async_depth;
++ data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
+ }
+
+ /* Called by blk_mq_update_nr_requests(). */
+@@ -646,11 +660,10 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
+ struct request_queue *q = hctx->queue;
+ struct deadline_data *dd = q->elevator->elevator_data;
+ struct blk_mq_tags *tags = hctx->sched_tags;
+- unsigned int shift = tags->bitmap_tags.sb.shift;
+
+- dd->async_depth = max(1U, 3 * (1U << shift) / 4);
++ dd->async_depth = q->nr_requests;
+
+- sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
++ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
+ }
+
+ /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
+diff --git a/block/opal_proto.h b/block/opal_proto.h
+index dec7ce3a3edb70..d247a457bf6e3f 100644
+--- a/block/opal_proto.h
++++ b/block/opal_proto.h
+@@ -71,6 +71,7 @@ enum opal_response_token {
+ #define SHORT_ATOM_BYTE 0xBF
+ #define MEDIUM_ATOM_BYTE 0xDF
+ #define LONG_ATOM_BYTE 0xE3
++#define EMPTY_ATOM_BYTE 0xFF
+
+ #define OPAL_INVAL_PARAM 12
+ #define OPAL_MANUFACTURED_INACTIVE 0x08
+diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
+index c03bc105e57539..152c85df92b20e 100644
+--- a/block/partitions/cmdline.c
++++ b/block/partitions/cmdline.c
+@@ -70,8 +70,8 @@ static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+ }
+
+ if (*partdef == '(') {
+- int length;
+- char *next = strchr(++partdef, ')');
++ partdef++;
++ char *next = strsep(&partdef, ")");
+
+ if (!next) {
+ pr_warn("cmdline partition format is invalid.");
+@@ -79,11 +79,7 @@ static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+ goto fail;
+ }
+
+- length = min_t(int, next - partdef,
+- sizeof(new_subpart->name) - 1);
+- strscpy(new_subpart->name, partdef, length);
+-
+- partdef = ++next;
++ strscpy(new_subpart->name, next, sizeof(new_subpart->name));
+ } else
+ new_subpart->name[0] = '\0';
+
+@@ -117,14 +113,12 @@ static void free_subpart(struct cmdline_parts *parts)
+ }
+ }
+
+-static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
++static int parse_parts(struct cmdline_parts **parts, char *bdevdef)
+ {
+ int ret = -EINVAL;
+ char *next;
+- int length;
+ struct cmdline_subpart **next_subpart;
+ struct cmdline_parts *newparts;
+- char buf[BDEVNAME_SIZE + 32 + 4];
+
+ *parts = NULL;
+
+@@ -132,28 +126,19 @@ static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+ if (!newparts)
+ return -ENOMEM;
+
+- next = strchr(bdevdef, ':');
++ next = strsep(&bdevdef, ":");
+ if (!next) {
+ pr_warn("cmdline partition has no block device.");
+ goto fail;
+ }
+
+- length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+- strscpy(newparts->name, bdevdef, length);
++ strscpy(newparts->name, next, sizeof(newparts->name));
+ newparts->nr_subparts = 0;
+
+ next_subpart = &newparts->subpart;
+
+- while (next && *(++next)) {
+- bdevdef = next;
+- next = strchr(bdevdef, ',');
+-
+- length = (!next) ? (sizeof(buf) - 1) :
+- min_t(int, next - bdevdef, sizeof(buf) - 1);
+-
+- strscpy(buf, bdevdef, length);
+-
+- ret = parse_subpart(next_subpart, buf);
++ while ((next = strsep(&bdevdef, ","))) {
++ ret = parse_subpart(next_subpart, next);
+ if (ret)
+ goto fail;
+
+@@ -199,24 +184,17 @@ static int cmdline_parts_parse(struct cmdline_parts **parts,
+
+ *parts = NULL;
+
+- next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
++ pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ next_parts = parts;
+
+- while (next && *pbuf) {
+- next = strchr(pbuf, ';');
+- if (next)
+- *next = '\0';
+-
+- ret = parse_parts(next_parts, pbuf);
++ while ((next = strsep(&pbuf, ";"))) {
++ ret = parse_parts(next_parts, next);
+ if (ret)
+ goto fail;
+
+- if (next)
+- pbuf = ++next;
+-
+ next_parts = &(*next_parts)->next_parts;
+ }
+
+@@ -250,7 +228,6 @@ static struct cmdline_parts *bdev_parts;
+ static int add_part(int slot, struct cmdline_subpart *subpart,
+ struct parsed_partitions *state)
+ {
+- int label_min;
+ struct partition_meta_info *info;
+ char tmp[sizeof(info->volname) + 4];
+
+@@ -262,9 +239,7 @@ static int add_part(int slot, struct cmdline_subpart *subpart,
+
+ info = &state->parts[slot].info;
+
+- label_min = min_t(int, sizeof(info->volname) - 1,
+- sizeof(subpart->name));
+- strscpy(info->volname, subpart->name, label_min);
++ strscpy(info->volname, subpart->name, sizeof(info->volname));
+
+ snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+diff --git a/block/partitions/core.c b/block/partitions/core.c
+index e137a87f4db0d3..fc0ab5d8ab705b 100644
+--- a/block/partitions/core.c
++++ b/block/partitions/core.c
+@@ -458,6 +458,11 @@ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
+ goto out;
+ }
+
++ if (disk->flags & GENHD_FL_NO_PART) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ if (partition_overlaps(disk, start, length, -1)) {
+ ret = -EBUSY;
+ goto out;
+@@ -569,9 +574,11 @@ static bool blk_add_partition(struct gendisk *disk,
+
+ part = add_partition(disk, p, from, size, state->parts[p].flags,
+ &state->parts[p].info);
+- if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
+- printk(KERN_ERR " %s: p%d could not be added: %ld\n",
+- disk->disk_name, p, -PTR_ERR(part));
++ if (IS_ERR(part)) {
++ if (PTR_ERR(part) != -ENXIO) {
++ printk(KERN_ERR " %s: p%d could not be added: %pe\n",
++ disk->disk_name, p, part);
++ }
+ return true;
+ }
+
+@@ -587,10 +594,7 @@ static int blk_add_partitions(struct gendisk *disk)
+ struct parsed_partitions *state;
+ int ret = -EAGAIN, p;
+
+- if (disk->flags & GENHD_FL_NO_PART)
+- return 0;
+-
+- if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
++ if (!disk_has_partscan(disk))
+ return 0;
+
+ state = check_partition(disk);
+diff --git a/block/sed-opal.c b/block/sed-opal.c
+index 04f38a3f5d9597..1a1cb35bf4b798 100644
+--- a/block/sed-opal.c
++++ b/block/sed-opal.c
+@@ -313,7 +313,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
+ &key_type_user, key_name, true);
+
+ if (IS_ERR(kref))
+- ret = PTR_ERR(kref);
++ return PTR_ERR(kref);
+
+ key = key_ref_to_ptr(kref);
+ down_read(&key->sem);
+@@ -1055,16 +1055,20 @@ static int response_parse(const u8 *buf, size_t length,
+ token_length = response_parse_medium(iter, pos);
+ else if (pos[0] <= LONG_ATOM_BYTE) /* long atom */
+ token_length = response_parse_long(iter, pos);
++ else if (pos[0] == EMPTY_ATOM_BYTE) /* empty atom */
++ token_length = 1;
+ else /* TOKEN */
+ token_length = response_parse_token(iter, pos);
+
+ if (token_length < 0)
+ return token_length;
+
++ if (pos[0] != EMPTY_ATOM_BYTE)
++ num_entries++;
++
+ pos += token_length;
+ total -= token_length;
+ iter++;
+- num_entries++;
+ }
+
+ resp->num = num_entries;
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 650b1b3620d818..fc0f75d8be01d2 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -1291,10 +1291,11 @@ config CRYPTO_JITTERENTROPY
+
+ A non-physical non-deterministic ("true") RNG (e.g., an entropy source
+ compliant with NIST SP800-90B) intended to provide a seed to a
+- deterministic RNG (e.g. per NIST SP800-90C).
++ deterministic RNG (e.g., per NIST SP800-90C).
+ This RNG does not perform any cryptographic whitening of the generated
++ random numbers.
+
+- See https://www.chronox.de/jent.html
++ See https://www.chronox.de/jent/
+
+ config CRYPTO_JITTERENTROPY_TESTINTERFACE
+ bool "CPU Jitter RNG Test Interface"
+diff --git a/crypto/aead.c b/crypto/aead.c
+index d5ba204ebdbfa6..ecab683016b7df 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -45,8 +45,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
+ alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ memcpy(alignbuffer, key, keylen);
+ ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
+- memset(alignbuffer, 0, keylen);
+- kfree(buffer);
++ kfree_sensitive(buffer);
+ return ret;
+ }
+
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index ea6fb8e89d0653..68cc9290cabe9a 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -1116,9 +1116,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg);
+ void af_alg_free_resources(struct af_alg_async_req *areq)
+ {
+ struct sock *sk = areq->sk;
++ struct af_alg_ctx *ctx;
+
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
++
++ ctx = alg_sk(sk)->private;
++ ctx->inflight = false;
+ }
+ EXPORT_SYMBOL_GPL(af_alg_free_resources);
+
+@@ -1188,11 +1192,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
+ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
+ unsigned int areqlen)
+ {
+- struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
++ struct af_alg_ctx *ctx = alg_sk(sk)->private;
++ struct af_alg_async_req *areq;
++
++ /* Only one AIO request can be in flight. */
++ if (ctx->inflight)
++ return ERR_PTR(-EBUSY);
+
++ areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+ if (unlikely(!areq))
+ return ERR_PTR(-ENOMEM);
+
++ ctx->inflight = true;
++
+ areq->areqlen = areqlen;
+ areq->sk = sk;
+ areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 4fe95c44804733..85bc279b4233fa 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -341,6 +341,7 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
+ }
+
+ if (!strcmp(q->cra_driver_name, alg->cra_name) ||
++ !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
+ !strcmp(q->cra_name, alg->cra_driver_name))
+ goto err;
+ }
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 82c44d4899b967..e24c829d7a0154 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -91,13 +91,13 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
+ if (!(msg->msg_flags & MSG_MORE)) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+- goto unlock_free;
++ goto unlock_free_result;
+ ahash_request_set_crypt(&ctx->req, NULL,
+ ctx->result, 0);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
+ if (err)
+- goto unlock_free;
++ goto unlock_free_result;
+ }
+ goto done_more;
+ }
+@@ -170,6 +170,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
+
+ unlock_free:
+ af_alg_free_sg(&ctx->sgl);
++unlock_free_result:
+ hash_free_result(sk, ctx);
+ ctx->more = false;
+ goto unlock;
+diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
+index 1ef3b46d6f6e5c..684767ab23e24d 100644
+--- a/crypto/asymmetric_keys/Kconfig
++++ b/crypto/asymmetric_keys/Kconfig
+@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ select MPILIB
+ select CRYPTO_HASH_INFO
+ select CRYPTO_AKCIPHER
++ select CRYPTO_SIG
+ select CRYPTO_HASH
+ help
+ This option provides support for asymmetric public key type handling.
+@@ -76,7 +77,7 @@ config SIGNED_PE_FILE_VERIFICATION
+ signed PE binary.
+
+ config FIPS_SIGNATURE_SELFTEST
+- bool "Run FIPS selftests on the X.509+PKCS7 signature verification"
++ tristate "Run FIPS selftests on the X.509+PKCS7 signature verification"
+ help
+ This option causes some selftests to be run on the signature
+ verification code, using some built in data. This is required
+@@ -84,5 +85,8 @@ config FIPS_SIGNATURE_SELFTEST
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER
++ depends on X509_CERTIFICATE_PARSER
++ depends on CRYPTO_RSA
++ depends on CRYPTO_SHA256
+
+ endif # ASYMMETRIC_KEY_TYPE
+diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
+index 0d1fa1b692c6b2..1a273d6df3ebf4 100644
+--- a/crypto/asymmetric_keys/Makefile
++++ b/crypto/asymmetric_keys/Makefile
+@@ -22,7 +22,8 @@ x509_key_parser-y := \
+ x509_cert_parser.o \
+ x509_loader.o \
+ x509_public_key.o
+-x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o
++obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o
++x509_selftest-y += selftest.o
+
+ $(obj)/x509_cert_parser.o: \
+ $(obj)/x509.asn1.h \
+diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
+index a5da8ccd353ef7..43af5fa510c09f 100644
+--- a/crypto/asymmetric_keys/asymmetric_type.c
++++ b/crypto/asymmetric_keys/asymmetric_type.c
+@@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring,
+ char *req, *p;
+ int len;
+
+- WARN_ON(!id_0 && !id_1 && !id_2);
+-
+ if (id_0) {
+ lookup = id_0->data;
+ len = id_0->len;
+ } else if (id_1) {
+ lookup = id_1->data;
+ len = id_1->len;
+- } else {
++ } else if (id_2) {
+ lookup = id_2->data;
+ len = id_2->len;
++ } else {
++ WARN_ON(1);
++ return ERR_PTR(-EINVAL);
+ }
+
+ /* Construct an identifier "id:<keyid>". */
+diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c
+index fa0bf7f2428495..c50da7ef90ae99 100644
+--- a/crypto/asymmetric_keys/selftest.c
++++ b/crypto/asymmetric_keys/selftest.c
+@@ -4,10 +4,11 @@
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+-#include <linux/kernel.h>
++#include <crypto/pkcs7.h>
+ #include <linux/cred.h>
++#include <linux/kernel.h>
+ #include <linux/key.h>
+-#include <crypto/pkcs7.h>
++#include <linux/module.h>
+ #include "x509_parser.h"
+
+ struct certs_test {
+@@ -175,7 +176,7 @@ static const struct certs_test certs_tests[] __initconst = {
+ TEST(certs_selftest_1_data, certs_selftest_1_pkcs7),
+ };
+
+-int __init fips_signature_selftest(void)
++static int __init fips_signature_selftest(void)
+ {
+ struct key *keyring;
+ int ret, i;
+@@ -222,3 +223,9 @@ int __init fips_signature_selftest(void)
+ key_put(keyring);
+ return 0;
+ }
++
++late_initcall(fips_signature_selftest);
++
++MODULE_DESCRIPTION("X.509 self tests");
++MODULE_AUTHOR("Red Hat, Inc.");
++MODULE_LICENSE("GPL");
+diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
+index a299c9c56f409e..97a886cbe01c3d 100644
+--- a/crypto/asymmetric_keys/x509_parser.h
++++ b/crypto/asymmetric_keys/x509_parser.h
+@@ -40,15 +40,6 @@ struct x509_certificate {
+ bool blacklisted;
+ };
+
+-/*
+- * selftest.c
+- */
+-#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST
+-extern int __init fips_signature_selftest(void);
+-#else
+-static inline int fips_signature_selftest(void) { return 0; }
+-#endif
+-
+ /*
+ * x509_cert_parser.c
+ */
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 7c71db3ac23d48..6a4f00be22fc10 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -262,15 +262,9 @@ static struct asymmetric_key_parser x509_key_parser = {
+ /*
+ * Module stuff
+ */
+-extern int __init certs_selftest(void);
+ static int __init x509_key_init(void)
+ {
+- int ret;
+-
+- ret = register_asymmetric_key_parser(&x509_key_parser);
+- if (ret < 0)
+- return ret;
+- return fips_signature_selftest();
++ return register_asymmetric_key_parser(&x509_key_parser);
+ }
+
+ static void __exit x509_key_exit(void)
+diff --git a/crypto/cipher.c b/crypto/cipher.c
+index 47c77a3e597833..40cae908788eca 100644
+--- a/crypto/cipher.c
++++ b/crypto/cipher.c
+@@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
+ alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ memcpy(alignbuffer, key, keylen);
+ ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
+- memset(alignbuffer, 0, keylen);
+- kfree(buffer);
++ kfree_sensitive(buffer);
+ return ret;
+
+ }
+diff --git a/crypto/ecdh.c b/crypto/ecdh.c
+index 80afee3234fbe7..3049f147e0117b 100644
+--- a/crypto/ecdh.c
++++ b/crypto/ecdh.c
+@@ -33,6 +33,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ params.key_size > sizeof(u64) * ctx->ndigits)
+ return -EINVAL;
+
++ memset(ctx->private_key, 0, sizeof(ctx->private_key));
++
+ if (!params.key || !params.key_size)
+ return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
+ ctx->private_key);
+diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
+index fbd76498aba834..3f9ec273a121fd 100644
+--- a/crypto/ecdsa.c
++++ b/crypto/ecdsa.c
+@@ -373,4 +373,7 @@ module_exit(ecdsa_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
+ MODULE_DESCRIPTION("ECDSA generic algorithm");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p192");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p256");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p384");
+ MODULE_ALIAS_CRYPTO("ecdsa-generic");
+diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
+index f3c6b5e15e75ba..3811f3805b5d88 100644
+--- a/crypto/ecrdsa.c
++++ b/crypto/ecrdsa.c
+@@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
+ MODULE_DESCRIPTION("EC-RDSA generic algorithm");
++MODULE_ALIAS_CRYPTO("ecrdsa");
+ MODULE_ALIAS_CRYPTO("ecrdsa-generic");
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index 8c1d0ca412137f..d0d954fe9d54f3 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
+ err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
+ if (!err)
+ return -EINPROGRESS;
++ if (err == -EBUSY)
++ return -EAGAIN;
+
+ return err;
+ }
+@@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
+ err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
+ if (!err)
+ return -EINPROGRESS;
++ if (err == -EBUSY)
++ return -EAGAIN;
+
+ return err;
+ }
+diff --git a/crypto/rsa.c b/crypto/rsa.c
+index c79613cdce6e44..b9cd11fb7d3672 100644
+--- a/crypto/rsa.c
++++ b/crypto/rsa.c
+@@ -220,6 +220,8 @@ static int rsa_check_exponent_fips(MPI e)
+ }
+
+ e_max = mpi_alloc(0);
++ if (!e_max)
++ return -ENOMEM;
+ mpi_set_bit(e_max, 256);
+
+ if (mpi_cmp(e, e_max) >= 0) {
+diff --git a/crypto/scompress.c b/crypto/scompress.c
+index 442a82c9de7def..b108a30a760014 100644
+--- a/crypto/scompress.c
++++ b/crypto/scompress.c
+@@ -117,6 +117,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void **ctx = acomp_request_ctx(req);
+ struct scomp_scratch *scratch;
++ unsigned int dlen;
+ int ret;
+
+ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+@@ -128,6 +129,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
+ req->dlen = SCOMP_SCRATCH_SIZE;
+
++ dlen = req->dlen;
++
+ scratch = raw_cpu_ptr(&scomp_scratch);
+ spin_lock(&scratch->lock);
+
+@@ -145,6 +148,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ ret = -ENOMEM;
+ goto out;
+ }
++ } else if (req->dlen > dlen) {
++ ret = -ENOSPC;
++ goto out;
+ }
+ scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
+ 1);
+diff --git a/crypto/simd.c b/crypto/simd.c
+index edaa479a1ec5e5..d109866641a265 100644
+--- a/crypto/simd.c
++++ b/crypto/simd.c
+@@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
+ return 0;
+ }
+
+-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
++struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
++ const char *algname,
+ const char *drvname,
+ const char *basename)
+ {
+ struct simd_skcipher_alg *salg;
+- struct crypto_skcipher *tfm;
+- struct skcipher_alg *ialg;
+ struct skcipher_alg *alg;
+ int err;
+
+- tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
+- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
+- if (IS_ERR(tfm))
+- return ERR_CAST(tfm);
+-
+- ialg = crypto_skcipher_alg(tfm);
+-
+ salg = kzalloc(sizeof(*salg), GFP_KERNEL);
+ if (!salg) {
+ salg = ERR_PTR(-ENOMEM);
+- goto out_put_tfm;
++ goto out;
+ }
+
+ salg->ialg_name = basename;
+@@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+ if (err)
+ goto out_free_salg;
+
+-out_put_tfm:
+- crypto_free_skcipher(tfm);
++out:
+ return salg;
+
+ out_free_salg:
+ kfree(salg);
+ salg = ERR_PTR(err);
+- goto out_put_tfm;
++ goto out;
+ }
+ EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
+
+-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+- const char *basename)
+-{
+- char drvname[CRYPTO_MAX_ALG_NAME];
+-
+- if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
+- CRYPTO_MAX_ALG_NAME)
+- return ERR_PTR(-ENAMETOOLONG);
+-
+- return simd_skcipher_create_compat(algname, drvname, basename);
+-}
+-EXPORT_SYMBOL_GPL(simd_skcipher_create);
+-
+ void simd_skcipher_free(struct simd_skcipher_alg *salg)
+ {
+ crypto_unregister_skcipher(&salg->alg);
+@@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
+ algname = algs[i].base.cra_name + 2;
+ drvname = algs[i].base.cra_driver_name + 2;
+ basename = algs[i].base.cra_driver_name;
+- simd = simd_skcipher_create_compat(algname, drvname, basename);
++ simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto err_unregister;
+@@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm)
+ return 0;
+ }
+
+-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+- const char *drvname,
+- const char *basename)
++static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg,
++ const char *algname,
++ const char *drvname,
++ const char *basename)
+ {
+ struct simd_aead_alg *salg;
+- struct crypto_aead *tfm;
+- struct aead_alg *ialg;
+ struct aead_alg *alg;
+ int err;
+
+- tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
+- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
+- if (IS_ERR(tfm))
+- return ERR_CAST(tfm);
+-
+- ialg = crypto_aead_alg(tfm);
+-
+ salg = kzalloc(sizeof(*salg), GFP_KERNEL);
+ if (!salg) {
+ salg = ERR_PTR(-ENOMEM);
+- goto out_put_tfm;
++ goto out;
+ }
+
+ salg->ialg_name = basename;
+@@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+ if (err)
+ goto out_free_salg;
+
+-out_put_tfm:
+- crypto_free_aead(tfm);
++out:
+ return salg;
+
+ out_free_salg:
+ kfree(salg);
+ salg = ERR_PTR(err);
+- goto out_put_tfm;
+-}
+-EXPORT_SYMBOL_GPL(simd_aead_create_compat);
+-
+-struct simd_aead_alg *simd_aead_create(const char *algname,
+- const char *basename)
+-{
+- char drvname[CRYPTO_MAX_ALG_NAME];
+-
+- if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
+- CRYPTO_MAX_ALG_NAME)
+- return ERR_PTR(-ENAMETOOLONG);
+-
+- return simd_aead_create_compat(algname, drvname, basename);
++ goto out;
+ }
+-EXPORT_SYMBOL_GPL(simd_aead_create);
+
+-void simd_aead_free(struct simd_aead_alg *salg)
++static void simd_aead_free(struct simd_aead_alg *salg)
+ {
+ crypto_unregister_aead(&salg->alg);
+ kfree(salg);
+ }
+-EXPORT_SYMBOL_GPL(simd_aead_free);
+
+ int simd_register_aeads_compat(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs)
+@@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count,
+ algname = algs[i].base.cra_name + 2;
+ drvname = algs[i].base.cra_driver_name + 2;
+ basename = algs[i].base.cra_driver_name;
+- simd = simd_aead_create_compat(algname, drvname, basename);
++ simd = simd_aead_create_compat(algs + i, algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto err_unregister;
+diff --git a/crypto/xor.c b/crypto/xor.c
+index 8e72e5d5db0ded..56aa3169e87171 100644
+--- a/crypto/xor.c
++++ b/crypto/xor.c
+@@ -83,33 +83,30 @@ static void __init
+ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
+ {
+ int speed;
+- int i, j;
+- ktime_t min, start, diff;
++ unsigned long reps;
++ ktime_t min, start, t0;
+
+ tmpl->next = template_list;
+ template_list = tmpl;
+
+ preempt_disable();
+
+- min = (ktime_t)S64_MAX;
+- for (i = 0; i < 3; i++) {
+- start = ktime_get();
+- for (j = 0; j < REPS; j++) {
+- mb(); /* prevent loop optimization */
+- tmpl->do_2(BENCH_SIZE, b1, b2);
+- mb();
+- }
+- diff = ktime_sub(ktime_get(), start);
+- if (diff < min)
+- min = diff;
+- }
++ reps = 0;
++ t0 = ktime_get();
++ /* delay start until time has advanced */
++ while ((start = ktime_get()) == t0)
++ cpu_relax();
++ do {
++ mb(); /* prevent loop optimization */
++ tmpl->do_2(BENCH_SIZE, b1, b2);
++ mb();
++ } while (reps++ < REPS || (t0 = ktime_get()) == start);
++ min = ktime_sub(t0, start);
+
+ preempt_enable();
+
+ // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
+- if (!min)
+- min = 1;
+- speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
++ speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
+ tmpl->speed = speed;
+
+ pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);
+diff --git a/crypto/xts.c b/crypto/xts.c
+index 548b302c6c6a00..038f60dd512d9f 100644
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -28,7 +28,7 @@ struct xts_tfm_ctx {
+
+ struct xts_instance_ctx {
+ struct crypto_skcipher_spawn spawn;
+- char name[CRYPTO_MAX_ALG_NAME];
++ struct crypto_cipher_spawn tweak_spawn;
+ };
+
+ struct xts_request_ctx {
+@@ -306,7 +306,7 @@ static int xts_init_tfm(struct crypto_skcipher *tfm)
+
+ ctx->child = child;
+
+- tweak = crypto_alloc_cipher(ictx->name, 0, 0);
++ tweak = crypto_spawn_cipher(&ictx->tweak_spawn);
+ if (IS_ERR(tweak)) {
+ crypto_free_skcipher(ctx->child);
+ return PTR_ERR(tweak);
+@@ -333,11 +333,13 @@ static void xts_free_instance(struct skcipher_instance *inst)
+ struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ictx->spawn);
++ crypto_drop_cipher(&ictx->tweak_spawn);
+ kfree(inst);
+ }
+
+ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
+ {
++ char name[CRYPTO_MAX_ALG_NAME];
+ struct skcipher_instance *inst;
+ struct xts_instance_ctx *ctx;
+ struct skcipher_alg *alg;
+@@ -363,13 +365,13 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
+ cipher_name, 0, mask);
+ if (err == -ENOENT) {
+ err = -ENAMETOOLONG;
+- if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
++ if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+ cipher_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ err = crypto_grab_skcipher(&ctx->spawn,
+ skcipher_crypto_instance(inst),
+- ctx->name, 0, mask);
++ name, 0, mask);
+ }
+
+ if (err)
+@@ -398,23 +400,28 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
+ if (!strncmp(cipher_name, "ecb(", 4)) {
+ int len;
+
+- len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
++ len = strscpy(name, cipher_name + 4, sizeof(name));
+ if (len < 2)
+ goto err_free_inst;
+
+- if (ctx->name[len - 1] != ')')
++ if (name[len - 1] != ')')
+ goto err_free_inst;
+
+- ctx->name[len - 1] = 0;
++ name[len - 1] = 0;
+
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+- "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
++ "xts(%s)", name) >= CRYPTO_MAX_ALG_NAME) {
+ err = -ENAMETOOLONG;
+ goto err_free_inst;
+ }
+ } else
+ goto err_free_inst;
+
++ err = crypto_grab_cipher(&ctx->tweak_spawn,
++ skcipher_crypto_instance(inst), name, 0, mask);
++ if (err)
++ goto err_free_inst;
++
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
+diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
+index 4a9baf02439e42..8827cb78ca9d8c 100644
+--- a/drivers/accel/drm_accel.c
++++ b/drivers/accel/drm_accel.c
+@@ -8,7 +8,7 @@
+
+ #include <linux/debugfs.h>
+ #include <linux/device.h>
+-#include <linux/idr.h>
++#include <linux/xarray.h>
+
+ #include <drm/drm_accel.h>
+ #include <drm/drm_debugfs.h>
+@@ -17,8 +17,7 @@
+ #include <drm/drm_ioctl.h>
+ #include <drm/drm_print.h>
+
+-static DEFINE_SPINLOCK(accel_minor_lock);
+-static struct idr accel_minors_idr;
++DEFINE_XARRAY_ALLOC(accel_minors_xa);
+
+ static struct dentry *accel_debugfs_root;
+ static struct class *accel_class;
+@@ -120,99 +119,6 @@ void accel_set_device_instance_params(struct device *kdev, int index)
+ kdev->type = &accel_sysfs_device_minor;
+ }
+
+-/**
+- * accel_minor_alloc() - Allocates a new accel minor
+- *
+- * This function access the accel minors idr and allocates from it
+- * a new id to represent a new accel minor
+- *
+- * Return: A new id on success or error code in case idr_alloc failed
+- */
+-int accel_minor_alloc(void)
+-{
+- unsigned long flags;
+- int r;
+-
+- spin_lock_irqsave(&accel_minor_lock, flags);
+- r = idr_alloc(&accel_minors_idr, NULL, 0, ACCEL_MAX_MINORS, GFP_NOWAIT);
+- spin_unlock_irqrestore(&accel_minor_lock, flags);
+-
+- return r;
+-}
+-
+-/**
+- * accel_minor_remove() - Remove an accel minor
+- * @index: The minor id to remove.
+- *
+- * This function access the accel minors idr and removes from
+- * it the member with the id that is passed to this function.
+- */
+-void accel_minor_remove(int index)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave(&accel_minor_lock, flags);
+- idr_remove(&accel_minors_idr, index);
+- spin_unlock_irqrestore(&accel_minor_lock, flags);
+-}
+-
+-/**
+- * accel_minor_replace() - Replace minor pointer in accel minors idr.
+- * @minor: Pointer to the new minor.
+- * @index: The minor id to replace.
+- *
+- * This function access the accel minors idr structure and replaces the pointer
+- * that is associated with an existing id. Because the minor pointer can be
+- * NULL, we need to explicitly pass the index.
+- *
+- * Return: 0 for success, negative value for error
+- */
+-void accel_minor_replace(struct drm_minor *minor, int index)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave(&accel_minor_lock, flags);
+- idr_replace(&accel_minors_idr, minor, index);
+- spin_unlock_irqrestore(&accel_minor_lock, flags);
+-}
+-
+-/*
+- * Looks up the given minor-ID and returns the respective DRM-minor object. The
+- * refence-count of the underlying device is increased so you must release this
+- * object with accel_minor_release().
+- *
+- * The object can be only a drm_minor that represents an accel device.
+- *
+- * As long as you hold this minor, it is guaranteed that the object and the
+- * minor->dev pointer will stay valid! However, the device may get unplugged and
+- * unregistered while you hold the minor.
+- */
+-static struct drm_minor *accel_minor_acquire(unsigned int minor_id)
+-{
+- struct drm_minor *minor;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&accel_minor_lock, flags);
+- minor = idr_find(&accel_minors_idr, minor_id);
+- if (minor)
+- drm_dev_get(minor->dev);
+- spin_unlock_irqrestore(&accel_minor_lock, flags);
+-
+- if (!minor) {
+- return ERR_PTR(-ENODEV);
+- } else if (drm_dev_is_unplugged(minor->dev)) {
+- drm_dev_put(minor->dev);
+- return ERR_PTR(-ENODEV);
+- }
+-
+- return minor;
+-}
+-
+-static void accel_minor_release(struct drm_minor *minor)
+-{
+- drm_dev_put(minor->dev);
+-}
+-
+ /**
+ * accel_open - open method for ACCEL file
+ * @inode: device inode
+@@ -230,7 +136,7 @@ int accel_open(struct inode *inode, struct file *filp)
+ struct drm_minor *minor;
+ int retcode;
+
+- minor = accel_minor_acquire(iminor(inode));
++ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
+
+@@ -249,7 +155,7 @@ int accel_open(struct inode *inode, struct file *filp)
+
+ err_undo:
+ atomic_dec(&dev->open_count);
+- accel_minor_release(minor);
++ drm_minor_release(minor);
+ return retcode;
+ }
+ EXPORT_SYMBOL_GPL(accel_open);
+@@ -260,7 +166,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
+ struct drm_minor *minor;
+ int err;
+
+- minor = accel_minor_acquire(iminor(inode));
++ minor = drm_minor_acquire(&accel_minors_xa, iminor(inode));
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
+
+@@ -277,7 +183,7 @@ static int accel_stub_open(struct inode *inode, struct file *filp)
+ err = 0;
+
+ out:
+- accel_minor_release(minor);
++ drm_minor_release(minor);
+
+ return err;
+ }
+@@ -293,15 +199,13 @@ void accel_core_exit(void)
+ unregister_chrdev(ACCEL_MAJOR, "accel");
+ debugfs_remove(accel_debugfs_root);
+ accel_sysfs_destroy();
+- idr_destroy(&accel_minors_idr);
++ WARN_ON(!xa_empty(&accel_minors_xa));
+ }
+
+ int __init accel_core_init(void)
+ {
+ int ret;
+
+- idr_init(&accel_minors_idr);
+-
+ ret = accel_sysfs_init();
+ if (ret < 0) {
+ DRM_ERROR("Cannot create ACCEL class: %d\n", ret);
+diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
+index 9e84a47a21dcf4..7d733e4d506114 100644
+--- a/drivers/accel/habanalabs/common/debugfs.c
++++ b/drivers/accel/habanalabs/common/debugfs.c
+@@ -1645,19 +1645,19 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
+ &hl_data64b_fops);
+
+ debugfs_create_file("set_power_state",
+- 0200,
++ 0644,
+ root,
+ dev_entry,
+ &hl_power_fops);
+
+ debugfs_create_file("device",
+- 0200,
++ 0644,
+ root,
+ dev_entry,
+ &hl_device_fops);
+
+ debugfs_create_file("clk_gate",
+- 0200,
++ 0644,
+ root,
+ dev_entry,
+ &hl_clk_gate_fops);
+@@ -1669,13 +1669,13 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
+ &hl_stop_on_err_fops);
+
+ debugfs_create_file("dump_security_violations",
+- 0644,
++ 0400,
+ root,
+ dev_entry,
+ &hl_security_violations_fops);
+
+ debugfs_create_file("dump_razwi_events",
+- 0644,
++ 0400,
+ root,
+ dev_entry,
+ &hl_razwi_check_fops);
+@@ -1708,7 +1708,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
+ &hdev->reset_info.skip_reset_on_timeout);
+
+ debugfs_create_file("state_dump",
+- 0600,
++ 0644,
+ root,
+ dev_entry,
+ &hl_state_dump_fops);
+@@ -1726,7 +1726,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
+
+ for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+ debugfs_create_file(hl_debugfs_list[i].name,
+- 0444,
++ 0644,
+ root,
+ entry,
+ &hl_debugfs_fops);
+diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
+index b97339d1f7c6ea..ebef56478e1852 100644
+--- a/drivers/accel/habanalabs/common/device.c
++++ b/drivers/accel/habanalabs/common/device.c
+@@ -808,6 +808,9 @@ static int device_early_init(struct hl_device *hdev)
+ gaudi2_set_asic_funcs(hdev);
+ strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
+ break;
++ case ASIC_GAUDI2C:
++ gaudi2_set_asic_funcs(hdev);
++ strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));
+ break;
+ default:
+ dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
+diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
+index 2f027d5a820647..179e5e7013a120 100644
+--- a/drivers/accel/habanalabs/common/habanalabs.h
++++ b/drivers/accel/habanalabs/common/habanalabs.h
+@@ -1220,6 +1220,7 @@ struct hl_dec {
+ * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
+ * @ASIC_GAUDI2: Gaudi2 device.
+ * @ASIC_GAUDI2B: Gaudi2B device.
++ * @ASIC_GAUDI2C: Gaudi2C device.
+ */
+ enum hl_asic_type {
+ ASIC_INVALID,
+@@ -1228,6 +1229,7 @@ enum hl_asic_type {
+ ASIC_GAUDI_SEC,
+ ASIC_GAUDI2,
+ ASIC_GAUDI2B,
++ ASIC_GAUDI2C,
+ };
+
+ struct hl_cs_parser;
+@@ -2506,7 +2508,7 @@ struct hl_state_dump_specs {
+ * DEVICES
+ */
+
+-#define HL_STR_MAX 32
++#define HL_STR_MAX 64
+
+ #define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
+
+diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
+index 7263e84c1a4dc3..010bf63fcca394 100644
+--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
++++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
+@@ -101,6 +101,9 @@ static enum hl_asic_type get_asic_type(struct hl_device *hdev)
+ case REV_ID_B:
+ asic_type = ASIC_GAUDI2B;
+ break;
++ case REV_ID_C:
++ asic_type = ASIC_GAUDI2C;
++ break;
+ default:
+ break;
+ }
+diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+index 6a45a92344e9bd..a7f6c54c123eff 100644
+--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
++++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+@@ -682,7 +682,7 @@ static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+ if (!sec_attest_info)
+ return -ENOMEM;
+
+- info = kmalloc(sizeof(*info), GFP_KERNEL);
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rc = -ENOMEM;
+ goto free_sec_attest_info;
+diff --git a/drivers/accel/habanalabs/common/irq.c b/drivers/accel/habanalabs/common/irq.c
+index b1010d206c2ef1..813315cea4a7b0 100644
+--- a/drivers/accel/habanalabs/common/irq.c
++++ b/drivers/accel/habanalabs/common/irq.c
+@@ -271,6 +271,9 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
+ free_node->cq_cb = pend->ts_reg_info.cq_cb;
+ list_add(&free_node->free_objects_node, *free_list);
+
++ /* Mark TS record as free */
++ pend->ts_reg_info.in_use = false;
++
+ return 0;
+ }
+
+diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
+index 4fc72a07d2f59a..5b7d9a351133fe 100644
+--- a/drivers/accel/habanalabs/common/memory.c
++++ b/drivers/accel/habanalabs/common/memory.c
+@@ -1878,16 +1878,16 @@ static int export_dmabuf(struct hl_ctx *ctx,
+
+ static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
+ {
+- if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
++ if (!PAGE_ALIGNED(device_addr)) {
+ dev_dbg(hdev->dev,
+- "exported device memory address 0x%llx should be aligned to 0x%lx\n",
++ "exported device memory address 0x%llx should be aligned to PAGE_SIZE 0x%lx\n",
+ device_addr, PAGE_SIZE);
+ return -EINVAL;
+ }
+
+- if (size < PAGE_SIZE) {
++ if (!size || !PAGE_ALIGNED(size)) {
+ dev_dbg(hdev->dev,
+- "exported device memory size %llu should be equal to or greater than %lu\n",
++ "exported device memory size %llu should be a multiple of PAGE_SIZE %lu\n",
+ size, PAGE_SIZE);
+ return -EINVAL;
+ }
+@@ -1938,6 +1938,13 @@ static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 s
+ if (rc)
+ return rc;
+
++ if (!PAGE_ALIGNED(offset)) {
++ dev_dbg(hdev->dev,
++ "exported device memory offset %llu should be a multiple of PAGE_SIZE %lu\n",
++ offset, PAGE_SIZE);
++ return -EINVAL;
++ }
++
+ if ((offset + size) > phys_pg_pack->total_size) {
+ dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
+ offset, size, phys_pg_pack->total_size);
+diff --git a/drivers/accel/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c
+index b2145716c60533..b654302a68fc08 100644
+--- a/drivers/accel/habanalabs/common/mmu/mmu.c
++++ b/drivers/accel/habanalabs/common/mmu/mmu.c
+@@ -596,6 +596,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
+ break;
+ case ASIC_GAUDI2:
+ case ASIC_GAUDI2B:
++ case ASIC_GAUDI2C:
+ /* MMUs in Gaudi2 are always host resident */
+ hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
+ break;
+diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
+index 01f89f029355e6..27860637305556 100644
+--- a/drivers/accel/habanalabs/common/sysfs.c
++++ b/drivers/accel/habanalabs/common/sysfs.c
+@@ -251,6 +251,9 @@ static ssize_t device_type_show(struct device *dev,
+ case ASIC_GAUDI2B:
+ str = "GAUDI2B";
+ break;
++ case ASIC_GAUDI2C:
++ str = "GAUDI2C";
++ break;
+ default:
+ dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
+ hdev->asic_type);
+diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
+index 20c4583f12b0d2..31c74ca70a2e5c 100644
+--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
++++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
+@@ -8149,11 +8149,11 @@ static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u
+ eng_id[num_of_eng] = razwi_info[i].eng_id;
+ base[num_of_eng] = razwi_info[i].rtr_ctrl;
+ if (!num_of_eng)
+- str_size += snprintf(eng_name + str_size,
++ str_size += scnprintf(eng_name + str_size,
+ PSOC_RAZWI_ENG_STR_SIZE - str_size, "%s",
+ razwi_info[i].eng_name);
+ else
+- str_size += snprintf(eng_name + str_size,
++ str_size += scnprintf(eng_name + str_size,
+ PSOC_RAZWI_ENG_STR_SIZE - str_size, " or %s",
+ razwi_info[i].eng_name);
+ num_of_eng++;
+diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
+index 2742b1f801eb2a..493e556cd31b74 100644
+--- a/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
++++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
+@@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
+ mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
+ mmDCORE0_EDMA0_CORE_CTX_IDX,
+ mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
++ mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND,
+ mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_0,
+ mmDCORE0_EDMA0_QM_CQ_CFG0_1,
+@@ -1601,6 +1602,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_30,
+ mmDCORE0_TPC0_CFG_KERNEL_SRF_31,
+ mmDCORE0_TPC0_CFG_TPC_SB_L0CD,
++ mmDCORE0_TPC0_CFG_TPC_COUNT,
+ mmDCORE0_TPC0_CFG_TPC_ID,
+ mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC,
+ mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0,
+diff --git a/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
+index f5d497dc9bdc17..4f951cada07766 100644
+--- a/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
++++ b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
+@@ -25,6 +25,7 @@ enum hl_revision_id {
+ REV_ID_INVALID = 0x00,
+ REV_ID_A = 0x01,
+ REV_ID_B = 0x02,
++ REV_ID_C = 0x03
+ };
+
+ #endif /* INCLUDE_PCI_GENERAL_H_ */
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 7e9359611d69ca..5619980d9edda7 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -467,9 +467,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
+ /* Clear any pending errors */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
+
+- /* VPU 37XX does not require 10m D3hot delay */
+- if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
+- pdev->d3hot_delay = 0;
++ /* NPU does not require 10m D3hot delay */
++ pdev->d3hot_delay = 0;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+@@ -518,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
+ vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
+ vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
+ atomic64_set(&vdev->unique_id_counter, 0);
+- xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
++ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
+ lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
+
+diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
+index 2adc349126bb66..6853dfe1c7e585 100644
+--- a/drivers/accel/ivpu/ivpu_drv.h
++++ b/drivers/accel/ivpu/ivpu_drv.h
+@@ -76,6 +76,11 @@
+
+ #define IVPU_WA(wa_name) (vdev->wa.wa_name)
+
++#define IVPU_PRINT_WA(wa_name) do { \
++ if (IVPU_WA(wa_name)) \
++ ivpu_dbg(vdev, MISC, "Using WA: " #wa_name "\n"); \
++} while (0)
++
+ struct ivpu_wa_table {
+ bool punit_disabled;
+ bool clear_runtime_mem;
+diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
+index a277bbae78fc45..3b35d262ddd43a 100644
+--- a/drivers/accel/ivpu/ivpu_fw.c
++++ b/drivers/accel/ivpu/ivpu_fw.c
+@@ -55,6 +55,10 @@ static struct {
+ { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+ };
+
++/* Production fw_names from the table above */
++MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
++MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
++
+ static int ivpu_fw_request(struct ivpu_device *vdev)
+ {
+ int ret = -ENOENT;
+diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
+index 18be8b98e9a8b3..c0de7c0c991f56 100644
+--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
++++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
+@@ -53,10 +53,12 @@
+
+ #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
+
+-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+- (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
++#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
+
++#define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \
++ (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)))
++
+ #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
+ #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
+
+@@ -102,8 +104,17 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
+ vdev->wa.clear_runtime_mem = false;
+ vdev->wa.d3hot_after_power_off = true;
+
+- if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4)
++ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
++ if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
++ /* Writing 1s does not clear the interrupt status register */
+ vdev->wa.interrupt_clear_with_0 = true;
++ REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
++ }
++
++ IVPU_PRINT_WA(punit_disabled);
++ IVPU_PRINT_WA(clear_runtime_mem);
++ IVPU_PRINT_WA(d3hot_after_power_off);
++ IVPU_PRINT_WA(interrupt_clear_with_0);
+ }
+
+ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
+@@ -536,12 +547,22 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
+ return ret;
+ }
+
++static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
++{
++ ivpu_boot_dpu_active_drive(vdev, false);
++ ivpu_boot_pwr_island_isolation_drive(vdev, true);
++ ivpu_boot_pwr_island_trickle_drive(vdev, false);
++ ivpu_boot_pwr_island_drive(vdev, false);
++
++ return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
++}
++
+ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+ {
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
+- val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
++ val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+
+ REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+@@ -625,30 +646,26 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
+ ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
+ ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
+
++ ivpu_hw_read_platform(vdev);
++ ivpu_hw_wa_init(vdev);
++ ivpu_hw_timeouts_init(vdev);
++
+ return 0;
+ }
+
+ static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
+ {
+- int ret;
+- u32 val;
+-
+- if (IVPU_WA(punit_disabled))
+- return 0;
++ int ret = 0;
+
+- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+- if (ret) {
+- ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+- return ret;
++ if (ivpu_boot_pwr_domain_disable(vdev)) {
++ ivpu_err(vdev, "Failed to disable power domain\n");
++ ret = -EIO;
+ }
+
+- val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+- val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+- REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
+-
+- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+- if (ret)
+- ivpu_err(vdev, "Timed out waiting for RESET completion\n");
++ if (ivpu_pll_disable(vdev)) {
++ ivpu_err(vdev, "Failed to disable PLL\n");
++ ret = -EIO;
++ }
+
+ return ret;
+ }
+@@ -681,14 +698,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
+ {
+ int ret;
+
+- ivpu_hw_read_platform(vdev);
+- ivpu_hw_wa_init(vdev);
+- ivpu_hw_timeouts_init(vdev);
+-
+- ret = ivpu_hw_37xx_reset(vdev);
+- if (ret)
+- ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
+-
+ ret = ivpu_hw_37xx_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+@@ -756,11 +765,11 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
+ {
+ int ret = 0;
+
+- if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
+- ivpu_err(vdev, "Failed to reset the VPU\n");
++ if (!ivpu_hw_37xx_is_idle(vdev))
++ ivpu_warn(vdev, "VPU not idle during power down\n");
+
+- if (ivpu_pll_disable(vdev)) {
+- ivpu_err(vdev, "Failed to disable PLL\n");
++ if (ivpu_hw_37xx_reset(vdev)) {
++ ivpu_err(vdev, "Failed to reset VPU\n");
+ ret = -EIO;
+ }
+
+diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
+index 85171a408363fa..cced6278c4f893 100644
+--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
++++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
+@@ -24,7 +24,7 @@
+ #define SKU_HW_ID_SHIFT 16u
+ #define SKU_HW_ID_MASK 0xffff0000u
+
+-#define PLL_CONFIG_DEFAULT 0x1
++#define PLL_CONFIG_DEFAULT 0x0
+ #define PLL_CDYN_DEFAULT 0x80
+ #define PLL_EPP_DEFAULT 0x80
+ #define PLL_REF_CLK_FREQ (50 * 1000000)
+@@ -125,6 +125,10 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
+
+ if (ivpu_hw_gen(vdev) == IVPU_HW_40XX)
+ vdev->wa.disable_clock_relinquish = true;
++
++ IVPU_PRINT_WA(punit_disabled);
++ IVPU_PRINT_WA(clear_runtime_mem);
++ IVPU_PRINT_WA(disable_clock_relinquish);
+ }
+
+ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
+@@ -519,7 +523,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+ u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
+- val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
++ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
+
+ REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+@@ -693,7 +697,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+ {
+ struct ivpu_hw_info *hw = vdev->hw;
+ u32 tile_disable;
+- u32 tile_enable;
+ u32 fuse;
+
+ fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
+@@ -714,10 +717,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+ else
+ ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
+
+- tile_enable = (~tile_disable) & TILE_MAX_MASK;
+-
+- hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
+- hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
+ hw->tile_fuse = tile_disable;
+ hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+
+@@ -728,6 +727,10 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+ ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
+ ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
+
++ ivpu_hw_read_platform(vdev);
++ ivpu_hw_wa_init(vdev);
++ ivpu_hw_timeouts_init(vdev);
++
+ return 0;
+ }
+
+@@ -819,10 +822,6 @@ static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
+ return ret;
+ }
+
+- ivpu_hw_read_platform(vdev);
+- ivpu_hw_wa_init(vdev);
+- ivpu_hw_timeouts_init(vdev);
+-
+ ret = ivpu_hw_40xx_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
+index de9e69f70af7e2..76f468c9f761bf 100644
+--- a/drivers/accel/ivpu/ivpu_job.c
++++ b/drivers/accel/ivpu/ivpu_job.c
+@@ -618,6 +618,5 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev)
+
+ void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
+ {
+- kthread_stop(vdev->job_done_thread);
+- put_task_struct(vdev->job_done_thread);
++ kthread_stop_put(vdev->job_done_thread);
+ }
+diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
+index baefaf7bb3cbb9..d04a28e0524855 100644
+--- a/drivers/accel/ivpu/ivpu_mmu.c
++++ b/drivers/accel/ivpu/ivpu_mmu.c
+@@ -491,7 +491,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
+ mmu->cmdq.cons = 0;
+
+ memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
+- clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
+ mmu->evtq.prod = 0;
+ mmu->evtq.cons = 0;
+
+@@ -805,8 +804,6 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
+ if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+ return NULL;
+
+- clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
+-
+ evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
+ REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
+
+diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
+index 5036e58e7235bd..1405623b03e4ee 100644
+--- a/drivers/accel/qaic/mhi_controller.c
++++ b/drivers/accel/qaic/mhi_controller.c
+@@ -404,8 +404,21 @@ static struct mhi_controller_config aic100_config = {
+
+ static int mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 *out)
+ {
+- u32 tmp = readl_relaxed(addr);
++ u32 tmp;
+
++ /*
++ * SOC_HW_VERSION quirk
++ * The SOC_HW_VERSION register (offset 0x224) is not reliable and
++ * may contain uninitialized values, including 0xFFFFFFFF. This could
++ * cause a false positive link down error. Instead, intercept any
++ * reads and provide the correct value of the register.
++ */
++ if (addr - mhi_cntrl->regs == 0x224) {
++ *out = 0x60110200;
++ return 0;
++ }
++
++ tmp = readl_relaxed(addr);
+ if (tmp == U32_MAX)
+ return -EIO;
+
+diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
+index f4b06792c6f1c0..ed1a5af434f246 100644
+--- a/drivers/accel/qaic/qaic_data.c
++++ b/drivers/accel/qaic/qaic_data.c
+@@ -766,7 +766,6 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_
+ struct dma_buf_attachment *attach;
+ struct drm_gem_object *obj;
+ struct qaic_bo *bo;
+- size_t size;
+ int ret;
+
+ bo = qaic_alloc_init_bo();
+@@ -784,13 +783,12 @@ struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_
+ goto attach_fail;
+ }
+
+- size = PAGE_ALIGN(attach->dmabuf->size);
+- if (size == 0) {
++ if (!attach->dmabuf->size) {
+ ret = -EINVAL;
+ goto size_align_fail;
+ }
+
+- drm_gem_private_object_init(dev, obj, size);
++ drm_gem_private_object_init(dev, obj, attach->dmabuf->size);
+ /*
+ * skipping dma_buf_map_attachment() as we do not know the direction
+ * just yet. Once the direction is known in the subsequent IOCTL to
+diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
+index 1fbc9b921c4fcc..f677ad2177c2f2 100644
+--- a/drivers/accessibility/speakup/main.c
++++ b/drivers/accessibility/speakup/main.c
+@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
+ }
+ attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
+ buf[cnt++] = attr_ch;
+- while (tmpx < vc->vc_cols - 1) {
++ while (tmpx < vc->vc_cols - 1 && cnt < ARRAY_SIZE(buf) - 1) {
+ tmp_pos += 2;
+ tmpx++;
+ ch = get_char(vc, (u_short *)tmp_pos, &temp);
+diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
+index eea2a2fa4f0159..45f90610313382 100644
+--- a/drivers/accessibility/speakup/synth.c
++++ b/drivers/accessibility/speakup/synth.c
+@@ -208,8 +208,10 @@ void spk_do_flush(void)
+ wake_up_process(speakup_task);
+ }
+
+-void synth_write(const char *buf, size_t count)
++void synth_write(const char *_buf, size_t count)
+ {
++ const unsigned char *buf = (const unsigned char *) _buf;
++
+ while (count--)
+ synth_buffer_add(*buf++);
+ synth_start();
+diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
+index e120a96e1eaee8..ca87a093913599 100644
+--- a/drivers/acpi/acpi_extlog.c
++++ b/drivers/acpi/acpi_extlog.c
+@@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
+ static u32 err_seq;
+
+ estatus = extlog_elog_entry_check(cpu, bank);
+- if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC))
++ if (!estatus)
+ return NOTIFY_DONE;
+
++ if (mce->kflags & MCE_HANDLED_CEC) {
++ estatus->block_status = 0;
++ return NOTIFY_DONE;
++ }
++
+ memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN);
+ /* clear record status to enable BIOS to update it again */
+ estatus->block_status = 0;
+@@ -303,9 +308,10 @@ static int __init extlog_init(void)
+ static void __exit extlog_exit(void)
+ {
+ mce_unregister_decode_chain(&extlog_mce_dec);
+- ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
+- if (extlog_l1_addr)
++ if (extlog_l1_addr) {
++ ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
+ acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
++ }
+ if (elog_addr)
+ acpi_os_unmap_iomem(elog_addr, elog_size);
+ release_mem_region(elog_base, elog_size);
+diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
+index a2056c4c8cb709..271092f2700a1e 100644
+--- a/drivers/acpi/acpi_fpdt.c
++++ b/drivers/acpi/acpi_fpdt.c
+@@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_header = (void *)subtable_header + offset;
+ offset += record_header->length;
+
++ if (!record_header->length) {
++ pr_err(FW_BUG "Zero-length record found in FPTD.\n");
++ result = -EINVAL;
++ goto err;
++ }
++
+ switch (record_header->type) {
+ case RECORD_S3_RESUME:
+ if (subtable_type != SUBTABLE_S3PT) {
+ pr_err(FW_BUG "Invalid record %d for subtable %s\n",
+ record_header->type, signature);
+- return -EINVAL;
++ result = -EINVAL;
++ goto err;
+ }
+ if (record_resume) {
+ pr_err("Duplicate resume performance record found.\n");
+@@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_resume = (struct resume_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+ case RECORD_S3_SUSPEND:
+ if (subtable_type != SUBTABLE_S3PT) {
+@@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_suspend = (struct suspend_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+ case RECORD_BOOT:
+ if (subtable_type != SUBTABLE_FBPT) {
+ pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ record_header->type, signature);
+- return -EINVAL;
++ result = -EINVAL;
++ goto err;
+ }
+ if (record_boot) {
+ pr_err("Duplicate boot performance record found.\n");
+@@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_boot = (struct boot_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+
+ default:
+@@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ }
+ }
+ return 0;
++
++err:
++ if (record_boot)
++ sysfs_remove_group(fpdt_kobj, &boot_attr_group);
++
++ if (record_suspend)
++ sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
++
++ if (record_resume)
++ sysfs_remove_group(fpdt_kobj, &resume_attr_group);
++
++ return result;
+ }
+
+ static int __init acpi_init_fpdt(void)
+@@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
+ struct acpi_table_header *header;
+ struct fpdt_subtable_entry *subtable;
+ u32 offset = sizeof(*header);
++ int result;
+
+ status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
+
+@@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
+
+ fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+ if (!fpdt_kobj) {
+- acpi_put_table(header);
+- return -ENOMEM;
++ result = -ENOMEM;
++ goto err_nomem;
+ }
+
+ while (offset < header->length) {
+@@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
+ switch (subtable->type) {
+ case SUBTABLE_FBPT:
+ case SUBTABLE_S3PT:
+- fpdt_process_subtable(subtable->address,
++ result = fpdt_process_subtable(subtable->address,
+ subtable->type);
++ if (result)
++ goto err_subtable;
+ break;
+ default:
+ /* Other types are reserved in ACPI 6.4 spec. */
+@@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
+ offset += sizeof(*subtable);
+ }
+ return 0;
++err_subtable:
++ kobject_put(fpdt_kobj);
++
++err_nomem:
++ acpi_put_table(header);
++ return result;
+ }
+
+ fs_initcall(acpi_init_fpdt);
+diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
+index c5598b6d5db8b0..794962c5c88e95 100644
+--- a/drivers/acpi/acpi_lpit.c
++++ b/drivers/acpi/acpi_lpit.c
+@@ -105,7 +105,7 @@ static void lpit_update_residency(struct lpit_residency_info *info,
+ return;
+
+ info->frequency = lpit_native->counter_frequency ?
+- lpit_native->counter_frequency : tsc_khz * 1000;
++ lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U);
+ if (!info->frequency)
+ info->frequency = 1;
+
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 539e700de4d289..98a2ab3b684428 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -333,6 +333,7 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
+
+ static const struct property_entry bsw_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
++ PROPERTY_ENTRY_U32("num-cs", 2),
+ { }
+ };
+
+@@ -465,8 +466,9 @@ static int register_device_clock(struct acpi_device *adev,
+ if (!clk_name)
+ return -ENOMEM;
+ clk = clk_register_fractional_divider(NULL, clk_name, parent,
++ 0, prv_base, 1, 15, 16, 15,
+ CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
+- prv_base, 1, 15, 16, 15, 0, NULL);
++ NULL);
+ parent = clk_name;
+
+ clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
+diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
+index 7a453c5ff303a9..71e25c79897628 100644
+--- a/drivers/acpi/acpi_pad.c
++++ b/drivers/acpi/acpi_pad.c
+@@ -131,8 +131,10 @@ static void exit_round_robin(unsigned int tsk_index)
+ {
+ struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
+
+- cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
+- tsk_in_cpu[tsk_index] = -1;
++ if (tsk_in_cpu[tsk_index] != -1) {
++ cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
++ tsk_in_cpu[tsk_index] = -1;
++ }
+ }
+
+ static unsigned int idle_pct = 5; /* percentage */
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 0f5218e361df5c..7053f1b9fc1ddc 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -415,7 +415,7 @@ static int acpi_processor_add(struct acpi_device *device,
+
+ result = acpi_processor_get_info(device);
+ if (result) /* Processor is not physically present or unavailable */
+- return 0;
++ goto err_clear_driver_data;
+
+ BUG_ON(pr->id >= nr_cpu_ids);
+
+@@ -430,7 +430,7 @@ static int acpi_processor_add(struct acpi_device *device,
+ "BIOS reported wrong ACPI id %d for the processor\n",
+ pr->id);
+ /* Give up, but do not abort the namespace scan. */
+- goto err;
++ goto err_clear_driver_data;
+ }
+ /*
+ * processor_device_array is not cleared on errors to allow buggy BIOS
+@@ -442,12 +442,12 @@ static int acpi_processor_add(struct acpi_device *device,
+ dev = get_cpu_device(pr->id);
+ if (!dev) {
+ result = -ENODEV;
+- goto err;
++ goto err_clear_per_cpu;
+ }
+
+ result = acpi_bind_one(dev, device);
+ if (result)
+- goto err;
++ goto err_clear_per_cpu;
+
+ pr->dev = dev;
+
+@@ -458,10 +458,11 @@ static int acpi_processor_add(struct acpi_device *device,
+ dev_err(dev, "Processor driver could not be attached\n");
+ acpi_unbind_one(dev);
+
+- err:
+- free_cpumask_var(pr->throttling.shared_cpu_map);
+- device->driver_data = NULL;
++ err_clear_per_cpu:
+ per_cpu(processors, pr->id) = NULL;
++ err_clear_driver_data:
++ device->driver_data = NULL;
++ free_cpumask_var(pr->throttling.shared_cpu_map);
+ err_free_pr:
+ kfree(pr);
+ return result;
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index b411948594ff89..a971770e24ff90 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -253,8 +253,7 @@ static const struct backlight_ops acpi_backlight_ops = {
+ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+ {
+- struct acpi_device *device = cooling_dev->devdata;
+- struct acpi_video_device *video = acpi_driver_data(device);
++ struct acpi_video_device *video = cooling_dev->devdata;
+
+ *state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
+ return 0;
+@@ -263,8 +262,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
+ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+ {
+- struct acpi_device *device = cooling_dev->devdata;
+- struct acpi_video_device *video = acpi_driver_data(device);
++ struct acpi_video_device *video = cooling_dev->devdata;
+ unsigned long long level;
+ int offset;
+
+@@ -283,8 +281,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
+ static int
+ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
+ {
+- struct acpi_device *device = cooling_dev->devdata;
+- struct acpi_video_device *video = acpi_driver_data(device);
++ struct acpi_video_device *video = cooling_dev->devdata;
+ int level;
+
+ if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
+@@ -503,6 +500,15 @@ static const struct dmi_system_id video_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
+ },
+ },
++ {
++ .callback = video_set_report_key_events,
++ .driver_data = (void *)((uintptr_t)REPORT_BRIGHTNESS_KEY_EVENTS),
++ .ident = "COLORFUL X15 AT 23",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "COLORFUL"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X15 AT 23"),
++ },
++ },
+ /*
+ * Some machines change the brightness themselves when a brightness
+ * hotkey gets pressed, despite us telling them not to. In this case
+@@ -1125,7 +1131,6 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
+
+ strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+- device->driver_data = data;
+
+ data->device_id = device_id;
+ data->video = video;
+@@ -1717,12 +1722,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+ return;
+ count++;
+
+- acpi_get_parent(device->dev->handle, &acpi_parent);
+-
+- pdev = acpi_get_pci_dev(acpi_parent);
+- if (pdev) {
+- parent = &pdev->dev;
+- pci_dev_put(pdev);
++ if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) {
++ pdev = acpi_get_pci_dev(acpi_parent);
++ if (pdev) {
++ parent = &pdev->dev;
++ pci_dev_put(pdev);
++ }
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+@@ -1747,8 +1752,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+ device->backlight->props.brightness =
+ acpi_video_get_brightness(device->backlight);
+
+- device->cooling_dev = thermal_cooling_device_register("LCD",
+- device->dev, &video_cooling_ops);
++ device->cooling_dev = thermal_cooling_device_register("LCD", device,
++ &video_cooling_ops);
+ if (IS_ERR(device->cooling_dev)) {
+ /*
+ * Set cooling_dev to NULL so we don't crash trying to free it.
+@@ -2031,7 +2036,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
+ * HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
+ * evaluated to have functional panel brightness control.
+ */
+- acpi_device_fix_up_power_extended(device);
++ acpi_device_fix_up_power_children(device);
+
+ pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
+ ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
+diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
+index 30f3fc13c29d12..8d18af396de92c 100644
+--- a/drivers/acpi/acpica/Makefile
++++ b/drivers/acpi/acpica/Makefile
+@@ -5,6 +5,7 @@
+
+ ccflags-y := -D_LINUX -DBUILDING_ACPICA
+ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
++CFLAGS_tbfind.o += $(call cc-disable-warning, stringop-truncation)
+
+ # use acpi.o to put all files here into acpi.o modparam namespace
+ obj-y += acpi.o
+diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
+index ddd072cbc738d4..1c5218b79fc2ac 100644
+--- a/drivers/acpi/acpica/acevents.h
++++ b/drivers/acpi/acpica/acevents.h
+@@ -188,7 +188,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
+ u8 acpi_ns_is_locked);
+
+ void
+-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
++acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
+ acpi_adr_space_type space_id, u32 function);
+
+ acpi_status
+diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
+index 2b84ac093698a3..8dbab693204998 100644
+--- a/drivers/acpi/acpica/dbconvert.c
++++ b/drivers/acpi/acpica/dbconvert.c
+@@ -174,6 +174,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
+ elements =
+ ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS *
+ sizeof(union acpi_object));
++ if (!elements)
++ return (AE_NO_MEMORY);
+
+ this = string;
+ for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) {
+diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
+index b91155ea9c343c..c9131259f717b0 100644
+--- a/drivers/acpi/acpica/dbnames.c
++++ b/drivers/acpi/acpica/dbnames.c
+@@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
+ ACPI_FREE(buffer.pointer);
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+- acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+-
++ status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
++ if (ACPI_FAILURE(status)) {
++ acpi_os_printf("Could Not evaluate object %p\n",
++ obj_handle);
++ return (AE_OK);
++ }
+ /*
+ * Since this is a field unit, surround the output in braces
+ */
+diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
+index 18fdf2bc2d499a..cf53b9535f18e0 100644
+--- a/drivers/acpi/acpica/evregion.c
++++ b/drivers/acpi/acpica/evregion.c
+@@ -65,6 +65,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
+ acpi_gbl_default_address_spaces
+ [i])) {
+ acpi_ev_execute_reg_methods(acpi_gbl_root_node,
++ ACPI_UINT32_MAX,
+ acpi_gbl_default_address_spaces
+ [i], ACPI_REG_CONNECT);
+ }
+@@ -672,6 +673,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
+ * FUNCTION: acpi_ev_execute_reg_methods
+ *
+ * PARAMETERS: node - Namespace node for the device
++ * max_depth - Depth to which search for _REG
+ * space_id - The address space ID
+ * function - Passed to _REG: On (1) or Off (0)
+ *
+@@ -683,7 +685,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
+ ******************************************************************************/
+
+ void
+-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
++acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
+ acpi_adr_space_type space_id, u32 function)
+ {
+ struct acpi_reg_walk_info info;
+@@ -717,7 +719,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+ * regions and _REG methods. (i.e. handlers must be installed for all
+ * regions of this Space ID before we can run any _REG methods)
+ */
+- (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
++ (void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth,
+ ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL,
+ &info, NULL);
+
+diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
+index 3197e6303c5b08..95f78383bbdba1 100644
+--- a/drivers/acpi/acpica/evxfregn.c
++++ b/drivers/acpi/acpica/evxfregn.c
+@@ -85,7 +85,8 @@ acpi_install_address_space_handler_internal(acpi_handle device,
+ /* Run all _REG methods for this address space */
+
+ if (run_reg) {
+- acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
++ acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id,
++ ACPI_REG_CONNECT);
+ }
+
+ unlock_and_exit:
+@@ -263,6 +264,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
+ * FUNCTION: acpi_execute_reg_methods
+ *
+ * PARAMETERS: device - Handle for the device
++ * max_depth - Depth to which search for _REG
+ * space_id - The address space ID
+ *
+ * RETURN: Status
+@@ -271,7 +273,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
+ *
+ ******************************************************************************/
+ acpi_status
+-acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
++acpi_execute_reg_methods(acpi_handle device, u32 max_depth,
++ acpi_adr_space_type space_id)
+ {
+ struct acpi_namespace_node *node;
+ acpi_status status;
+@@ -296,7 +299,8 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
+
+ /* Run all _REG methods for this address space */
+
+- acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
++ acpi_ev_execute_reg_methods(node, max_depth, space_id,
++ ACPI_REG_CONNECT);
+ } else {
+ status = AE_BAD_PARAMETER;
+ }
+diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
+index 08196fa17080e2..82b1fa2d201fed 100644
+--- a/drivers/acpi/acpica/exprep.c
++++ b/drivers/acpi/acpica/exprep.c
+@@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
+
+ if (info->connection_node) {
+ second_desc = info->connection_node->object;
++ if (second_desc == NULL) {
++ break;
++ }
+ if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status =
+ acpi_ds_get_buffer_arguments(second_desc);
+diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
+index 8907b8bf42672a..c49b9f8de723d8 100644
+--- a/drivers/acpi/acpica/exregion.c
++++ b/drivers/acpi/acpica/exregion.c
+@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
+ struct acpi_mem_mapping *mm = mem_info->cur_mm;
+ u32 length;
+ acpi_size map_length;
+- acpi_size page_boundary_map_length;
+ #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
+ u32 remainder;
+ #endif
+@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
+ map_length = (acpi_size)
+ ((mem_info->address + mem_info->length) - address);
+
+- /*
+- * If mapping the entire remaining portion of the region will cross
+- * a page boundary, just map up to the page boundary, do not cross.
+- * On some systems, crossing a page boundary while mapping regions
+- * can cause warnings if the pages have different attributes
+- * due to resource management.
+- *
+- * This has the added benefit of constraining a single mapping to
+- * one page, which is similar to the original code that used a 4k
+- * maximum window.
+- */
+- page_boundary_map_length = (acpi_size)
+- (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
+- if (page_boundary_map_length == 0) {
+- page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
+- }
+-
+- if (map_length > page_boundary_map_length) {
+- map_length = page_boundary_map_length;
+- }
++ if (map_length > ACPI_DEFAULT_PAGE_SIZE)
++ map_length = ACPI_DEFAULT_PAGE_SIZE;
+
+ /* Create a new mapping starting at the address given */
+
+diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
+index 422c074ed2897b..28582adfc0acaf 100644
+--- a/drivers/acpi/acpica/psargs.c
++++ b/drivers/acpi/acpica/psargs.c
+@@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state);
+ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
+ *parser_state);
+
++static void acpi_ps_free_field_list(union acpi_parse_object *start);
++
+ /*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_package_length
+@@ -683,6 +685,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
+ return_PTR(field);
+ }
+
++/*******************************************************************************
++ *
++ * FUNCTION: acpi_ps_free_field_list
++ *
++ * PARAMETERS: start - First Op in field list
++ *
++ * RETURN: None.
++ *
++ * DESCRIPTION: Free all Op objects inside a field list.
++ *
++ ******************************************************************************/
++
++static void acpi_ps_free_field_list(union acpi_parse_object *start)
++{
++ union acpi_parse_object *cur = start;
++ union acpi_parse_object *next;
++ union acpi_parse_object *arg;
++
++ while (cur) {
++ next = cur->common.next;
++
++ /* AML_INT_CONNECTION_OP can have a single argument */
++
++ arg = acpi_ps_get_arg(cur, 0);
++ if (arg) {
++ acpi_ps_free_op(arg);
++ }
++
++ acpi_ps_free_op(cur);
++ cur = next;
++ }
++}
++
+ /*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_next_arg
+@@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
+ while (parser_state->aml < parser_state->pkg_end) {
+ field = acpi_ps_get_next_field(parser_state);
+ if (!field) {
++ if (arg) {
++ acpi_ps_free_field_list(arg);
++ }
++
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+@@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
+ acpi_ps_get_next_namepath(walk_state, parser_state,
+ arg,
+ ACPI_NOT_METHOD_CALL);
++ if (ACPI_FAILURE(status)) {
++ acpi_ps_free_op(arg);
++ return_ACPI_STATUS(status);
++ }
+ } else {
+ /* Single complex argument, nothing returned */
+
+@@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
+ acpi_ps_get_next_namepath(walk_state, parser_state,
+ arg,
+ ACPI_POSSIBLE_METHOD_CALL);
++ if (ACPI_FAILURE(status)) {
++ acpi_ps_free_op(arg);
++ return_ACPI_STATUS(status);
++ }
+
+ if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {
+
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index ef59d6ea16da0f..ab2a82cb1b0b48 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -101,6 +101,20 @@ static inline bool is_hest_type_generic_v2(struct ghes *ghes)
+ return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2;
+ }
+
++/*
++ * A platform may describe one error source for the handling of synchronous
++ * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI
++ * or External Interrupt). On x86, the HEST notifications are always
++ * asynchronous, so only SEA on ARM is delivered as a synchronous
++ * notification.
++ */
++static inline bool is_hest_sync_notify(struct ghes *ghes)
++{
++ u8 notify_type = ghes->generic->notify.type;
++
++ return notify_type == ACPI_HEST_NOTIFY_SEA;
++}
++
+ /*
+ * This driver isn't really modular, however for the time being,
+ * continuing to use module_param is the easiest way to remain
+@@ -209,6 +223,20 @@ int ghes_estatus_pool_init(unsigned int num_ghes)
+ return -ENOMEM;
+ }
+
++/**
++ * ghes_estatus_pool_region_free - free previously allocated memory
++ * from the ghes_estatus_pool.
++ * @addr: address of memory to free.
++ * @size: size of memory to free.
++ *
++ * Returns none.
++ */
++void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
++{
++ gen_pool_free(ghes_estatus_pool, addr, size);
++}
++EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
++
+ static int map_gen_v2(struct ghes *ghes)
+ {
+ return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
+@@ -475,7 +503,7 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
+ }
+
+ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+- int sev)
++ int sev, bool sync)
+ {
+ int flags = -1;
+ int sec_sev = ghes_severity(gdata->error_severity);
+@@ -489,7 +517,7 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+ (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
+ flags = MF_SOFT_OFFLINE;
+ if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
+- flags = 0;
++ flags = sync ? MF_ACTION_REQUIRED : 0;
+
+ if (flags != -1)
+ return ghes_do_memory_failure(mem_err->physical_addr, flags);
+@@ -497,9 +525,11 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
+ return false;
+ }
+
+-static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
++static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
++ int sev, bool sync)
+ {
+ struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
++ int flags = sync ? MF_ACTION_REQUIRED : 0;
+ bool queued = false;
+ int sec_sev, i;
+ char *p;
+@@ -524,7 +554,7 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int s
+ * and don't filter out 'corrected' error here.
+ */
+ if (is_cache && has_pa) {
+- queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
++ queued = ghes_do_memory_failure(err_info->physical_fault_addr, flags);
+ p += err_info->length;
+ continue;
+ }
+@@ -564,6 +594,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
++ u8 *aer_info;
+
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+@@ -577,11 +608,17 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+ if (gdata->flags & CPER_SEC_RESET)
+ aer_severity = AER_FATAL;
+
++ aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
++ sizeof(struct aer_capability_regs));
++ if (!aer_info)
++ return;
++ memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
++
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+- pcie_err->aer_info);
++ aer_info);
+ }
+ #endif
+ }
+@@ -645,6 +682,7 @@ static bool ghes_do_proc(struct ghes *ghes,
+ const guid_t *fru_id = &guid_null;
+ char *fru_text = "";
+ bool queued = false;
++ bool sync = is_hest_sync_notify(ghes);
+
+ sev = ghes_severity(estatus->error_severity);
+ apei_estatus_for_each_section(estatus, gdata) {
+@@ -662,13 +700,13 @@ static bool ghes_do_proc(struct ghes *ghes,
+ atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err);
+
+ arch_apei_report_mem_error(sev, mem_err);
+- queued = ghes_handle_memory_failure(gdata, sev);
++ queued = ghes_handle_memory_failure(gdata, sev, sync);
+ }
+ else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
+ ghes_handle_aer(gdata);
+ }
+ else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+- queued = ghes_handle_arm_hw_error(gdata, sev);
++ queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+ } else {
+ void *err = acpi_hest_get_payload(gdata);
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 969bf81e8d546a..e3cbaf3c3bbc15 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -678,12 +678,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
+ return count;
+ }
+
+-static const struct device_attribute alarm_attr = {
++static struct device_attribute alarm_attr = {
+ .attr = {.name = "alarm", .mode = 0644},
+ .show = acpi_battery_alarm_show,
+ .store = acpi_battery_alarm_store,
+ };
+
++static struct attribute *acpi_battery_attrs[] = {
++ &alarm_attr.attr,
++ NULL
++};
++ATTRIBUTE_GROUPS(acpi_battery);
++
+ /*
+ * The Battery Hooking API
+ *
+@@ -697,28 +703,35 @@ static LIST_HEAD(acpi_battery_list);
+ static LIST_HEAD(battery_hook_list);
+ static DEFINE_MUTEX(hook_mutex);
+
+-static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock)
++static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
+ {
+ struct acpi_battery *battery;
++
+ /*
+ * In order to remove a hook, we first need to
+ * de-register all the batteries that are registered.
+ */
+- if (lock)
+- mutex_lock(&hook_mutex);
+ list_for_each_entry(battery, &acpi_battery_list, list) {
+ if (!hook->remove_battery(battery->bat, hook))
+ power_supply_changed(battery->bat);
+ }
+- list_del(&hook->list);
+- if (lock)
+- mutex_unlock(&hook_mutex);
++ list_del_init(&hook->list);
++
+ pr_info("extension unregistered: %s\n", hook->name);
+ }
+
+ void battery_hook_unregister(struct acpi_battery_hook *hook)
+ {
+- __battery_hook_unregister(hook, 1);
++ mutex_lock(&hook_mutex);
++ /*
++ * Ignore already unregistered battery hooks. This might happen
++ * if a battery hook was previously unloaded due to an error when
++ * adding a new battery.
++ */
++ if (!list_empty(&hook->list))
++ battery_hook_unregister_unlocked(hook);
++
++ mutex_unlock(&hook_mutex);
+ }
+ EXPORT_SYMBOL_GPL(battery_hook_unregister);
+
+@@ -727,7 +740,6 @@ void battery_hook_register(struct acpi_battery_hook *hook)
+ struct acpi_battery *battery;
+
+ mutex_lock(&hook_mutex);
+- INIT_LIST_HEAD(&hook->list);
+ list_add(&hook->list, &battery_hook_list);
+ /*
+ * Now that the driver is registered, we need
+@@ -744,7 +756,7 @@ void battery_hook_register(struct acpi_battery_hook *hook)
+ * hooks.
+ */
+ pr_err("extension failed to load: %s", hook->name);
+- __battery_hook_unregister(hook, 0);
++ battery_hook_unregister_unlocked(hook);
+ goto end;
+ }
+
+@@ -783,7 +795,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
+ */
+ pr_err("error in extension, unloading: %s",
+ hook_node->name);
+- __battery_hook_unregister(hook_node, 0);
++ battery_hook_unregister_unlocked(hook_node);
+ }
+ }
+ mutex_unlock(&hook_mutex);
+@@ -816,14 +828,17 @@ static void __exit battery_hook_exit(void)
+ * need to remove the hooks.
+ */
+ list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) {
+- __battery_hook_unregister(hook, 1);
++ battery_hook_unregister(hook);
+ }
+ mutex_destroy(&hook_mutex);
+ }
+
+ static int sysfs_add_battery(struct acpi_battery *battery)
+ {
+- struct power_supply_config psy_cfg = { .drv_data = battery, };
++ struct power_supply_config psy_cfg = {
++ .drv_data = battery,
++ .attr_grp = acpi_battery_groups,
++ };
+ bool full_cap_broken = false;
+
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+@@ -868,7 +883,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
+ return result;
+ }
+ battery_hook_add_battery(battery);
+- return device_create_file(&battery->bat->dev, &alarm_attr);
++ return 0;
+ }
+
+ static void sysfs_remove_battery(struct acpi_battery *battery)
+@@ -879,7 +894,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
+ return;
+ }
+ battery_hook_remove_battery(battery);
+- device_remove_file(&battery->bat->dev, &alarm_attr);
+ power_supply_unregister(battery->bat);
+ battery->bat = NULL;
+ mutex_unlock(&battery->sysfs_lock);
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 7ff269a78c2088..7aced0b9bad7cc 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -100,6 +100,11 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
+ (cpc)->cpc_entry.reg.space_id == \
+ ACPI_ADR_SPACE_PLATFORM_COMM)
+
++/* Check if a CPC register is in FFH */
++#define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
++ (cpc)->cpc_entry.reg.space_id == \
++ ACPI_ADR_SPACE_FIXED_HARDWARE)
++
+ /* Check if a CPC register is in SystemMemory */
+ #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
+ (cpc)->cpc_entry.reg.space_id == \
+@@ -163,6 +168,16 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
+ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
+ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+
++/* Check for valid access_width, otherwise, fallback to using bit_width */
++#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
++
++/* Shift and apply the mask for CPC reads/writes */
++#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
++ GENMASK(((reg)->bit_width) - 1, 0))
++#define MASK_VAL_WRITE(reg, prev_val, val) \
++ ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
++ ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
++
+ static ssize_t show_feedback_ctrs(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+ {
+@@ -777,6 +792,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
+ } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ if (gas_t->address) {
+ void __iomem *addr;
++ size_t access_width;
+
+ if (!osc_cpc_flexible_adr_space_confirmed) {
+ pr_debug("Flexible address space capability not supported\n");
+@@ -784,7 +800,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
+ goto out_free;
+ }
+
+- addr = ioremap(gas_t->address, gas_t->bit_width/8);
++ access_width = GET_BIT_WIDTH(gas_t) / 8;
++ addr = ioremap(gas_t->address, access_width);
+ if (!addr)
+ goto out_free;
+ cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
+@@ -843,6 +860,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
+
+ /* Store CPU Logical ID */
+ cpc_ptr->cpu_id = pr->id;
++ spin_lock_init(&cpc_ptr->rmw_lock);
+
+ /* Parse PSD data for this CPU */
+ ret = acpi_get_psd(cpc_ptr, handle);
+@@ -980,6 +998,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
+ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ {
+ void __iomem *vaddr = NULL;
++ int size;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+
+@@ -989,14 +1008,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ }
+
+ *val = 0;
++ size = GET_BIT_WIDTH(reg);
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+- u32 width = 8 << (reg->access_width - 1);
+ u32 val_u32;
+ acpi_status status;
+
+ status = acpi_os_read_port((acpi_io_address)reg->address,
+- &val_u32, width);
++ &val_u32, size);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("Error: Failed to read SystemIO port %llx\n",
+ reg->address);
+@@ -1005,17 +1024,24 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+
+ *val = val_u32;
+ return 0;
+- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
++ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
++ /*
++ * For registers in PCC space, the register size is determined
++ * by the bit width field; the access size is used to indicate
++ * the PCC subspace id.
++ */
++ size = reg->bit_width;
+ vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
++ }
+ else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ vaddr = reg_res->sys_mem_vaddr;
+ else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
+ return cpc_read_ffh(cpu, reg, val);
+ else
+ return acpi_os_read_memory((acpi_physical_address)reg->address,
+- val, reg->bit_width);
++ val, size);
+
+- switch (reg->bit_width) {
++ switch (size) {
+ case 8:
+ *val = readb_relaxed(vaddr);
+ break;
+@@ -1029,27 +1055,39 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
+ *val = readq_relaxed(vaddr);
+ break;
+ default:
+- pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+- reg->bit_width, pcc_ss_id);
++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
++ pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
++ size, reg->address);
++ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
++ pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
++ size, pcc_ss_id);
++ }
+ return -EFAULT;
+ }
+
++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
++ *val = MASK_VAL_READ(reg, *val);
++
+ return 0;
+ }
+
+ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ {
+ int ret_val = 0;
++ int size;
++ u64 prev_val;
+ void __iomem *vaddr = NULL;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_reg *reg = &reg_res->cpc_entry.reg;
++ struct cpc_desc *cpc_desc;
++
++ size = GET_BIT_WIDTH(reg);
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+- u32 width = 8 << (reg->access_width - 1);
+ acpi_status status;
+
+ status = acpi_os_write_port((acpi_io_address)reg->address,
+- (u32)val, width);
++ (u32)val, size);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("Error: Failed to write SystemIO port %llx\n",
+ reg->address);
+@@ -1057,17 +1095,53 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ }
+
+ return 0;
+- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
++ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
++ /*
++ * For registers in PCC space, the register size is determined
++ * by the bit width field; the access size is used to indicate
++ * the PCC subspace id.
++ */
++ size = reg->bit_width;
+ vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
++ }
+ else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ vaddr = reg_res->sys_mem_vaddr;
+ else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
+ return cpc_write_ffh(cpu, reg, val);
+ else
+ return acpi_os_write_memory((acpi_physical_address)reg->address,
+- val, reg->bit_width);
++ val, size);
++
++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
++ cpc_desc = per_cpu(cpc_desc_ptr, cpu);
++ if (!cpc_desc) {
++ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
++ return -ENODEV;
++ }
++
++ spin_lock(&cpc_desc->rmw_lock);
++ switch (size) {
++ case 8:
++ prev_val = readb_relaxed(vaddr);
++ break;
++ case 16:
++ prev_val = readw_relaxed(vaddr);
++ break;
++ case 32:
++ prev_val = readl_relaxed(vaddr);
++ break;
++ case 64:
++ prev_val = readq_relaxed(vaddr);
++ break;
++ default:
++ spin_unlock(&cpc_desc->rmw_lock);
++ return -EFAULT;
++ }
++ val = MASK_VAL_WRITE(reg, prev_val, val);
++ val |= prev_val;
++ }
+
+- switch (reg->bit_width) {
++ switch (size) {
+ case 8:
+ writeb_relaxed(val, vaddr);
+ break;
+@@ -1081,12 +1155,20 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
+ writeq_relaxed(val, vaddr);
+ break;
+ default:
+- pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+- reg->bit_width, pcc_ss_id);
++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
++ pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
++ size, reg->address);
++ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
++ pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
++ size, pcc_ss_id);
++ }
+ ret_val = -EFAULT;
+ break;
+ }
+
++ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
++ spin_unlock(&cpc_desc->rmw_lock);
++
+ return ret_val;
+ }
+
+@@ -1154,6 +1236,19 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+ return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+ }
+
++/**
++ * cppc_get_highest_perf - Get the highest performance register value.
++ * @cpunum: CPU from which to get highest performance.
++ * @highest_perf: Return address.
++ *
++ * Return: 0 for success, -EIO otherwise.
++ */
++int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
++{
++ return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
++}
++EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
++
+ /**
+ * cppc_get_epp_perf - Get the epp register value.
+ * @cpunum: CPU from which to get epp preference value.
+@@ -1424,9 +1519,12 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
++ } else if (osc_cpc_flexible_adr_space_confirmed &&
++ CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
++ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
+ } else {
+ ret = -ENOTSUPP;
+- pr_debug("_CPC in PCC is not supported\n");
++ pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
+ }
+
+ return ret;
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index f007116a842762..3b4d048c494173 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev)
+ }
+ EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
+
++/**
++ * acpi_device_fix_up_power_children - Force a device's children into D0.
++ * @adev: Parent device object whose children's power state is to be fixed up.
++ *
++ * Call acpi_device_fix_up_power() for @adev's children so long as they
++ * are reported as present and enabled.
++ */
++void acpi_device_fix_up_power_children(struct acpi_device *adev)
++{
++ acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
++}
++EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
++
+ int acpi_device_update_power(struct acpi_device *device, int *state_p)
+ {
+ int state;
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index b9bbf074619921..6ed5e9e56be2f4 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -158,8 +158,8 @@ static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalia
+ return 0;
+
+ len = snprintf(modalias, size, "acpi:");
+- if (len <= 0)
+- return len;
++ if (len >= size)
++ return -ENOMEM;
+
+ size -= len;
+
+@@ -212,8 +212,10 @@ static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias
+ len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+ ACPI_FREE(buf.pointer);
+
+- if (len <= 0)
+- return len;
++ if (len >= size)
++ return -ENOMEM;
++
++ size -= len;
+
+ of_compatible = acpi_dev->data.of_compatible;
+ if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+@@ -542,8 +544,9 @@ int acpi_device_setup_files(struct acpi_device *dev)
+ * If device has _STR, 'description' file is created
+ */
+ if (acpi_has_method(dev->handle, "_STR")) {
+- status = acpi_evaluate_object(dev->handle, "_STR",
+- NULL, &buffer);
++ status = acpi_evaluate_object_typed(dev->handle, "_STR",
++ NULL, &buffer,
++ ACPI_TYPE_BUFFER);
+ if (ACPI_FAILURE(status))
+ buffer.pointer = NULL;
+ dev->pnp.str_obj = buffer.pointer;
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index c95d0edb0be9e5..115994dfefec1e 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -783,6 +783,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
+ unsigned long tmp;
+ int ret = 0;
+
++ if (t->rdata)
++ memset(t->rdata, 0, t->rlen);
++
+ /* start transaction */
+ spin_lock_irqsave(&ec->lock, tmp);
+ /* Enable GPE for command processing (IBF=0/OBF=1) */
+@@ -819,8 +822,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+
+ if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
+ return -EINVAL;
+- if (t->rdata)
+- memset(t->rdata, 0, t->rlen);
+
+ mutex_lock(&ec->mutex);
+ if (ec->global_lock) {
+@@ -847,7 +848,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec)
+ .wdata = NULL, .rdata = &d,
+ .wlen = 0, .rlen = 1};
+
+- return acpi_ec_transaction(ec, &t);
++ return acpi_ec_transaction_unlocked(ec, &t);
+ }
+
+ static int acpi_ec_burst_disable(struct acpi_ec *ec)
+@@ -857,7 +858,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
+ .wlen = 0, .rlen = 0};
+
+ return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
+- acpi_ec_transaction(ec, &t) : 0;
++ acpi_ec_transaction_unlocked(ec, &t) : 0;
+ }
+
+ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
+@@ -873,6 +874,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
+ return result;
+ }
+
++static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data)
++{
++ int result;
++ u8 d;
++ struct transaction t = {.command = ACPI_EC_COMMAND_READ,
++ .wdata = &address, .rdata = &d,
++ .wlen = 1, .rlen = 1};
++
++ result = acpi_ec_transaction_unlocked(ec, &t);
++ *data = d;
++ return result;
++}
++
+ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
+ {
+ u8 wdata[2] = { address, data };
+@@ -883,6 +897,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
+ return acpi_ec_transaction(ec, &t);
+ }
+
++static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data)
++{
++ u8 wdata[2] = { address, data };
++ struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
++ .wdata = wdata, .rdata = NULL,
++ .wlen = 2, .rlen = 0};
++
++ return acpi_ec_transaction_unlocked(ec, &t);
++}
++
+ int ec_read(u8 addr, u8 *val)
+ {
+ int err;
+@@ -1323,6 +1347,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ struct acpi_ec *ec = handler_context;
+ int result = 0, i, bytes = bits / 8;
+ u8 *value = (u8 *)value64;
++ u32 glk;
+
+ if ((address > 0xFF) || !value || !handler_context)
+ return AE_BAD_PARAMETER;
+@@ -1330,17 +1355,38 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ if (function != ACPI_READ && function != ACPI_WRITE)
+ return AE_BAD_PARAMETER;
+
++ mutex_lock(&ec->mutex);
++
++ if (ec->global_lock) {
++ acpi_status status;
++
++ status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
++ if (ACPI_FAILURE(status)) {
++ result = -ENODEV;
++ goto unlock;
++ }
++ }
++
+ if (ec->busy_polling || bits > 8)
+ acpi_ec_burst_enable(ec);
+
+- for (i = 0; i < bytes; ++i, ++address, ++value)
++ for (i = 0; i < bytes; ++i, ++address, ++value) {
+ result = (function == ACPI_READ) ?
+- acpi_ec_read(ec, address, value) :
+- acpi_ec_write(ec, address, *value);
++ acpi_ec_read_unlocked(ec, address, value) :
++ acpi_ec_write_unlocked(ec, address, *value);
++ if (result < 0)
++ break;
++ }
+
+ if (ec->busy_polling || bits > 8)
+ acpi_ec_burst_disable(ec);
+
++ if (ec->global_lock)
++ acpi_release_global_lock(glk);
++
++unlock:
++ mutex_unlock(&ec->mutex);
++
+ switch (result) {
+ case -EINVAL:
+ return AE_BAD_PARAMETER;
+@@ -1348,8 +1394,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ return AE_NOT_FOUND;
+ case -ETIME:
+ return AE_TIME;
+- default:
++ case 0:
+ return AE_OK;
++ default:
++ return AE_ERROR;
+ }
+ }
+
+@@ -1487,8 +1535,10 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+ acpi_ec_start(ec, false);
+
+ if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
++ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
++
+ acpi_ec_enter_noirq(ec);
+- status = acpi_install_address_space_handler_no_reg(ec->handle,
++ status = acpi_install_address_space_handler_no_reg(scope_handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler,
+ NULL, ec);
+@@ -1497,11 +1547,10 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+ return -ENODEV;
+ }
+ set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+- ec->address_space_handler_holder = ec->handle;
+ }
+
+ if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
+- acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC);
++ acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC);
+ set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
+ }
+
+@@ -1553,10 +1602,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+
+ static void ec_remove_handlers(struct acpi_ec *ec)
+ {
++ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
++
+ if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ if (ACPI_FAILURE(acpi_remove_address_space_handler(
+- ec->address_space_handler_holder,
+- ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
++ scope_handle,
++ ACPI_ADR_SPACE_EC,
++ &acpi_ec_space_handler)))
+ pr_err("failed to remove space handler\n");
+ clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+ }
+@@ -1595,14 +1647,18 @@ static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool ca
+ {
+ int ret;
+
+- ret = ec_install_handlers(ec, device, call_reg);
+- if (ret)
+- return ret;
+-
+ /* First EC capable of handling transactions */
+ if (!first_ec)
+ first_ec = ec;
+
++ ret = ec_install_handlers(ec, device, call_reg);
++ if (ret) {
++ if (ec == first_ec)
++ first_ec = NULL;
++
++ return ret;
++ }
++
+ pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
+ ec->data_addr);
+
+@@ -1709,6 +1765,12 @@ static void acpi_ec_remove(struct acpi_device *device)
+ }
+ }
+
++void acpi_ec_register_opregions(struct acpi_device *adev)
++{
++ if (first_ec && first_ec->handle != adev->handle)
++ acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC);
++}
++
+ static acpi_status
+ ec_parse_io_ports(struct acpi_resource *resource, void *context)
+ {
+@@ -1924,6 +1986,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
+ },
+ },
++ {
++ /*
++ * HP 250 G7 Notebook PC
++ */
++ .callback = ec_honor_dsdt_gpe,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
++ },
++ },
+ {
+ /*
+ * Samsung hardware
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 866c7c4ed23317..1e8ee97fc85f38 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -167,7 +167,6 @@ enum acpi_ec_event_state {
+
+ struct acpi_ec {
+ acpi_handle handle;
+- acpi_handle address_space_handler_holder;
+ int gpe;
+ int irq;
+ unsigned long command_addr;
+@@ -205,6 +204,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data);
+ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
++void acpi_ec_register_opregions(struct acpi_device *adev);
+
+ #ifdef CONFIG_PM_SLEEP
+ void acpi_ec_flush_work(void);
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index 1f4fc5f8a819d3..a44c0761fd1c06 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -183,7 +183,7 @@ static int __init slit_valid(struct acpi_table_slit *slit)
+ int i, j;
+ int d = slit->locality_count;
+ for (i = 0; i < d; i++) {
+- for (j = 0; j < d; j++) {
++ for (j = 0; j < d; j++) {
+ u8 val = slit->entry[d*i + j];
+ if (i == j) {
+ if (val != LOCAL_DISTANCE)
+@@ -206,6 +206,11 @@ int __init srat_disabled(void)
+ return acpi_numa < 0;
+ }
+
++__weak int __init numa_fill_memblks(u64 start, u64 end)
++{
++ return NUMA_NO_MEMBLK;
++}
++
+ #if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+ /*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+@@ -310,11 +315,16 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ start = cfmws->base_hpa;
+ end = cfmws->base_hpa + cfmws->window_size;
+
+- /* Skip if the SRAT already described the NUMA details for this HPA */
+- node = phys_to_target_node(start);
+- if (node != NUMA_NO_NODE)
++ /*
++ * The SRAT may have already described NUMA details for all,
++ * or a portion of, this CFMWS HPA range. Extend the memblks
++ * found for any portion of the window to cover the entire
++ * window.
++ */
++ if (!numa_fill_memblks(start, end))
+ return 0;
+
++ /* No SRAT description. Create a new node. */
+ node = acpi_map_pxm_to_node(*fake_pxm);
+
+ if (node == NUMA_NO_NODE) {
+@@ -527,7 +537,7 @@ int __init acpi_numa_init(void)
+ */
+
+ /* fake_pxm is the next unused PXM value after SRAT parsing */
+- for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) {
++ for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] > fake_pxm)
+ fake_pxm = node_to_pxm_map[i];
+ }
+diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c
+index ebd03e4729555a..0d1a82eeb4b0b6 100644
+--- a/drivers/acpi/pmic/tps68470_pmic.c
++++ b/drivers/acpi/pmic/tps68470_pmic.c
+@@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
+ struct tps68470_pmic_opregion *opregion;
+ acpi_status status;
+
+- if (!dev || !tps68470_regmap) {
+- dev_warn(dev, "dev or regmap is NULL\n");
+- return -EINVAL;
+- }
++ if (!tps68470_regmap)
++ return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
+
+ if (!handle) {
+ dev_warn(dev, "acpi handle is NULL\n");
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 3a34a8c425fe4a..831fa4a1215985 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -16,7 +16,6 @@
+ #include <linux/acpi.h>
+ #include <linux/dmi.h>
+ #include <linux/sched.h> /* need_resched() */
+-#include <linux/sort.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpu.h>
+@@ -386,25 +385,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
+ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
+ }
+
+-static int acpi_cst_latency_cmp(const void *a, const void *b)
++static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
+ {
+- const struct acpi_processor_cx *x = a, *y = b;
++ int i, j, k;
+
+- if (!(x->valid && y->valid))
+- return 0;
+- if (x->latency > y->latency)
+- return 1;
+- if (x->latency < y->latency)
+- return -1;
+- return 0;
+-}
+-static void acpi_cst_latency_swap(void *a, void *b, int n)
+-{
+- struct acpi_processor_cx *x = a, *y = b;
++ for (i = 1; i < length; i++) {
++ if (!states[i].valid)
++ continue;
+
+- if (!(x->valid && y->valid))
+- return;
+- swap(x->latency, y->latency);
++ for (j = i - 1, k = i; j >= 0; j--) {
++ if (!states[j].valid)
++ continue;
++
++ if (states[j].latency > states[k].latency)
++ swap(states[j].latency, states[k].latency);
++
++ k = j;
++ }
++ }
+ }
+
+ static int acpi_processor_power_verify(struct acpi_processor *pr)
+@@ -449,10 +447,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
+
+ if (buggy_latency) {
+ pr_notice("FW issue: working around C-state latencies out of order\n");
+- sort(&pr->power.states[1], max_cstate,
+- sizeof(struct acpi_processor_cx),
+- acpi_cst_latency_cmp,
+- acpi_cst_latency_swap);
++ acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
+ }
+
+ lapic_timer_propagate_broadcast(pr);
+@@ -592,7 +587,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+ while (1) {
+
+ if (cx->entry_method == ACPI_CSTATE_HALT)
+- safe_halt();
++ raw_safe_halt();
+ else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
+ io_idle(cx->address);
+ } else
+@@ -1430,6 +1425,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
+ acpi_processor_registered--;
+ if (acpi_processor_registered == 0)
+ cpuidle_unregister_driver(&acpi_idle_driver);
++
++ kfree(dev);
+ }
+
+ pr->flags.power_setup_done = 0;
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 413e4fcadcaf7b..4d958a165da058 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -851,6 +851,7 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args,
+ * @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
+ * @args: Location to store the returned reference with optional arguments
++ * (may be NULL)
+ *
+ * Find property with @name, verifify that it is a package containing at least
+ * one object reference and if so, store the ACPI device object pointer to the
+@@ -907,6 +908,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ if (!device)
+ return -EINVAL;
+
++ if (!args)
++ return 0;
++
+ args->fwnode = acpi_fwnode_handle(device);
+ args->nargs = 0;
+ return 0;
+@@ -1102,25 +1106,26 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ switch (proptype) {
+ case DEV_PROP_STRING:
+ break;
+- case DEV_PROP_U8 ... DEV_PROP_U64:
++ default:
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (nval > obj->buffer.length)
+ return -EOVERFLOW;
+- break;
++ } else {
++ if (nval > obj->package.count)
++ return -EOVERFLOW;
+ }
+- fallthrough;
+- default:
+- if (nval > obj->package.count)
+- return -EOVERFLOW;
+ break;
+ }
+ if (nval == 0)
+ return -EINVAL;
+
+- if (obj->type != ACPI_TYPE_BUFFER)
+- items = obj->package.elements;
+- else
++ if (obj->type == ACPI_TYPE_BUFFER) {
++ if (proptype != DEV_PROP_U8)
++ return -EPROTO;
+ items = obj;
++ } else {
++ items = obj->package.elements;
++ }
+
+ switch (proptype) {
+ case DEV_PROP_U8:
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 297a88587031e6..95233b413c1ac5 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -439,6 +439,13 @@ static const struct dmi_system_id asus_laptop[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
+ },
+ },
++ {
++ /* Asus Vivobook X1704VAP */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"),
++ },
++ },
+ {
+ .ident = "Asus ExpertBook B1402CBA",
+ .matches = {
+@@ -446,6 +453,13 @@ static const struct dmi_system_id asus_laptop[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
+ },
+ },
++ {
++ /* Asus ExpertBook B1402CVA */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
++ },
++ },
+ {
+ .ident = "Asus ExpertBook B1502CBA",
+ .matches = {
+@@ -495,6 +509,38 @@ static const struct dmi_system_id maingear_laptop[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
+ }
+ },
++ {
++ /* Asus ExpertBook B2502CVA */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "B2502CVA"),
++ },
++ },
++ {
++ /* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
++ },
++ },
++ {
++ /* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
++ },
++ },
++ {
++ /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
++ DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
++ },
++ },
++ {
++ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
++ },
++ },
+ {
+ .ident = "MAINGEAR Vector Pro 2 17",
+ .matches = {
+@@ -524,6 +570,39 @@ static const struct dmi_system_id pcspecialist_laptop[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"),
+ },
+ },
++ {
++ /* Infinity E15-5A165-BM */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"),
++ },
++ },
++ {
++ /* Infinity E15-5A305-1M */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"),
++ },
++ },
++ {
++ /* Lunnen Ground 15 / AMD Ryzen 5 5500U */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
++ DMI_MATCH(DMI_BOARD_NAME, "LLL5DAW"),
++ },
++ },
++ {
++ /* Lunnen Ground 16 / AMD Ryzen 7 5800U */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
++ DMI_MATCH(DMI_BOARD_NAME, "LL6FA"),
++ },
++ },
++ {
++ /* MAIBENBEN X577 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"),
++ DMI_MATCH(DMI_BOARD_NAME, "X577"),
++ },
++ },
+ { }
+ };
+
+@@ -535,6 +614,18 @@ static const struct dmi_system_id lg_laptop[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
+ },
+ },
++ {
++ /* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
++ },
++ },
++ {
++ /* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
++ },
++ },
+ { }
+ };
+
+diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
+index 94e3c000df2e16..fdeb46ed21d69f 100644
+--- a/drivers/acpi/sbs.c
++++ b/drivers/acpi/sbs.c
+@@ -77,7 +77,6 @@ struct acpi_battery {
+ u16 spec;
+ u8 id;
+ u8 present:1;
+- u8 have_sysfs_alarm:1;
+ };
+
+ #define to_acpi_battery(x) power_supply_get_drvdata(x)
+@@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
+ return count;
+ }
+
+-static const struct device_attribute alarm_attr = {
++static struct device_attribute alarm_attr = {
+ .attr = {.name = "alarm", .mode = 0644},
+ .show = acpi_battery_alarm_show,
+ .store = acpi_battery_alarm_store,
+ };
+
++static struct attribute *acpi_battery_attrs[] = {
++ &alarm_attr.attr,
++ NULL
++};
++ATTRIBUTE_GROUPS(acpi_battery);
++
+ /* --------------------------------------------------------------------------
+ Driver Interface
+ -------------------------------------------------------------------------- */
+@@ -518,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery)
+ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
+ {
+ struct acpi_battery *battery = &sbs->battery[id];
+- struct power_supply_config psy_cfg = { .drv_data = battery, };
++ struct power_supply_config psy_cfg = {
++ .drv_data = battery,
++ .attr_grp = acpi_battery_groups,
++ };
+ int result;
+
+ battery->id = id;
+@@ -548,10 +556,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
+ goto end;
+ }
+
+- result = device_create_file(&battery->bat->dev, &alarm_attr);
+- if (result)
+- goto end;
+- battery->have_sysfs_alarm = 1;
+ end:
+ pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n",
+ ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
+@@ -563,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
+ {
+ struct acpi_battery *battery = &sbs->battery[id];
+
+- if (battery->bat) {
+- if (battery->have_sysfs_alarm)
+- device_remove_file(&battery->bat->dev, &alarm_attr);
++ if (battery->bat)
+ power_supply_unregister(battery->bat);
+- }
+ }
+
+ static int acpi_charger_add(struct acpi_sbs *sbs)
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 691d4b7686ee7e..c0c5c5c58ae1e7 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -314,18 +314,14 @@ static int acpi_scan_device_check(struct acpi_device *adev)
+ * again).
+ */
+ if (adev->handler) {
+- dev_warn(&adev->dev, "Already enumerated\n");
+- return -EALREADY;
++ dev_dbg(&adev->dev, "Already enumerated\n");
++ return 0;
+ }
+ error = acpi_bus_scan(adev->handle);
+ if (error) {
+ dev_warn(&adev->dev, "Namespace scan failure\n");
+ return error;
+ }
+- if (!adev->handler) {
+- dev_warn(&adev->dev, "Enumeration failure\n");
+- error = -ENODEV;
+- }
+ } else {
+ error = acpi_scan_device_not_present(adev);
+ }
+@@ -1568,17 +1564,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
+ int err;
+ const struct iommu_ops *ops;
+
++ /* Serialise to make dev->iommu stable under our potential fwspec */
++ mutex_lock(&iommu_probe_device_lock);
+ /*
+ * If we already translated the fwspec there is nothing left to do,
+ * return the iommu_ops.
+ */
+ ops = acpi_iommu_fwspec_ops(dev);
+- if (ops)
++ if (ops) {
++ mutex_unlock(&iommu_probe_device_lock);
+ return ops;
++ }
+
+ err = iort_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
+ err = viot_iommu_configure(dev);
++ mutex_unlock(&iommu_probe_device_lock);
+
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+@@ -1797,7 +1798,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
+ if (dep->honor_dep)
+ adev->flags.honor_deps = 1;
+
+- adev->dep_unmet++;
++ if (!dep->met)
++ adev->dep_unmet++;
+ }
+ }
+ }
+@@ -2196,6 +2198,8 @@ static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
+ if (device->handler)
+ goto ok;
+
++ acpi_ec_register_opregions(device);
++
+ if (!device->flags.initialized) {
+ device->flags.power_manageable =
+ device->power.states[ACPI_STATE_D0].flags.valid;
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 808484d1120976..728acfeb774d83 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -385,18 +385,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
+ },
+ },
+- /*
+- * ASUS B1400CEAE hangs on resume from suspend (see
+- * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
+- */
+- {
+- .callback = init_default_s3,
+- .ident = "ASUS B1400CEAE",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
+- },
+- },
+ {},
+ };
+
+diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
+index 312730f8272eec..8263508415a8d0 100644
+--- a/drivers/acpi/thermal.c
++++ b/drivers/acpi/thermal.c
+@@ -778,9 +778,9 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
+
+ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
+ {
++ thermal_zone_device_disable(tz->thermal_zone);
+ acpi_thermal_zone_sysfs_remove(tz);
+ thermal_zone_device_unregister(tz->thermal_zone);
+- kfree(tz->trip_table);
+ tz->thermal_zone = NULL;
+ }
+
+@@ -985,7 +985,7 @@ static void acpi_thermal_remove(struct acpi_device *device)
+
+ flush_workqueue(acpi_thermal_pm_queue);
+ acpi_thermal_unregister_thermal_zone(tz);
+-
++ kfree(tz->trip_table);
+ kfree(tz);
+ }
+
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 442396f6ed1f9c..e96afb1622f95f 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -130,6 +130,16 @@ static int video_detect_force_native(const struct dmi_system_id *d)
+ return 0;
+ }
+
++static int video_detect_portege_r100(const struct dmi_system_id *d)
++{
++ struct pci_dev *dev;
++ /* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */
++ dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL);
++ if (dev)
++ acpi_backlight_dmi = acpi_backlight_vendor;
++ return 0;
++}
++
+ static const struct dmi_system_id video_detect_dmi_table[] = {
+ /*
+ * Models which should use the vendor backlight interface,
+@@ -250,6 +260,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"),
+ },
+ },
++ {
++ .callback = video_detect_force_vendor,
++ /* Panasonic Toughbook CF-18 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita Electric Industrial"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"),
++ },
++ },
+
+ /*
+ * Toshiba models with Transflective display, these need to use
+@@ -270,6 +288,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ },
+ },
+
++ /*
++ * Toshiba Portégé R100 has working both acpi_video and toshiba_acpi
++ * vendor driver. But none of them gets activated as it has a VGA with
++ * no kernel driver (Trident CyberBlade XP4m32).
++ * The DMI strings are generic so check for the VGA chip in callback.
++ */
++ {
++ .callback = video_detect_portege_r100,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
++ DMI_MATCH(DMI_BOARD_NAME, "Portable PC")
++ },
++ },
++
+ /*
+ * Models which need acpi_video backlight control where the GPU drivers
+ * do not call acpi_video_register_backlight() because no internal panel
+@@ -479,6 +513,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
+ },
+ },
++ {
++ .callback = video_detect_force_native,
++ /* Lenovo Slim 7 16ARH7 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "82UX"),
++ },
++ },
+ {
+ .callback = video_detect_force_native,
+ /* Lenovo ThinkPad X131e (3371 AMD version) */
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index 63d834dd381122..e035cec614dc8f 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -198,16 +198,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+ }
+
+ /*
+- * AMD systems from Renoir and Lucienne *require* that the NVME controller
++ * AMD systems from Renoir onwards *require* that the NVME controller
+ * is put into D3 over a Modern Standby / suspend-to-idle cycle.
+ *
+ * This is "typically" accomplished using the `StorageD3Enable`
+ * property in the _DSD that is checked via the `acpi_storage_d3` function
+- * but this property was introduced after many of these systems launched
+- * and most OEM systems don't have it in their BIOS.
++ * but some OEM systems still don't have it in their BIOS.
+ *
+ * The Microsoft documentation for StorageD3Enable mentioned that Windows has
+- * a hardcoded allowlist for D3 support, which was used for these platforms.
++ * a hardcoded allowlist for D3 support as well as a registry key to override
++ * the BIOS, which has been used for these cases.
+ *
+ * This allows quirking on Linux in a similar fashion.
+ *
+@@ -220,19 +220,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216773
+ * https://bugzilla.kernel.org/show_bug.cgi?id=217003
+ * 2) On at least one HP system StorageD3Enable is missing on the second NVME
+- disk in the system.
++ * disk in the system.
++ * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
++ * NVME device.
+ */
+-static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
+- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
+- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
+- X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
+- {}
+-};
+-
+ bool force_storage_d3(void)
+ {
+- return x86_match_cpu(storage_d3_cpu_ids);
++ if (!cpu_feature_enabled(X86_FEATURE_ZEN))
++ return false;
++ return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
+ }
+
+ /*
+@@ -261,9 +257,10 @@ bool force_storage_d3(void)
+ #define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0)
+ #define ACPI_QUIRK_UART1_SKIP BIT(1)
+ #define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2)
+-#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(3)
+-#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(4)
+-#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(5)
++#define ACPI_QUIRK_PNP_UART1_SKIP BIT(3)
++#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(4)
++#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(5)
++#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(6)
+
+ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ /*
+@@ -343,6 +340,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++ ACPI_QUIRK_PNP_UART1_SKIP |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+@@ -429,7 +427,7 @@ bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
+ }
+ EXPORT_SYMBOL_GPL(acpi_quirk_skip_i2c_client_enumeration);
+
+-int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
++static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+ {
+ struct acpi_device *adev = ACPI_COMPANION(controller_parent);
+ const struct dmi_system_id *dmi_id;
+@@ -437,20 +435,22 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
+ u64 uid;
+ int ret;
+
+- *skip = false;
+-
+ ret = acpi_dev_uid_to_integer(adev, &uid);
+ if (ret)
+ return 0;
+
+- /* to not match on PNP enumerated debug UARTs */
+- if (!dev_is_platform(controller_parent))
+- return 0;
+-
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (dmi_id)
+ quirks = (unsigned long)dmi_id->driver_data;
+
++ if (!dev_is_platform(controller_parent)) {
++ /* PNP enumerated UARTs */
++ if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
++ *skip = true;
++
++ return 0;
++ }
++
+ if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1)
+ *skip = true;
+
+@@ -464,7 +464,6 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+
+ bool acpi_quirk_skip_gpio_event_handlers(void)
+ {
+@@ -479,8 +478,21 @@ bool acpi_quirk_skip_gpio_event_handlers(void)
+ return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
+ }
+ EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
++#else
++static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
++{
++ return 0;
++}
+ #endif
+
++int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
++{
++ *skip = false;
++
++ return acpi_dmi_skip_serdev_enumeration(controller_parent, skip);
++}
++EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
++
+ /* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
+ static const struct {
+ const char *hid;
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 92128aae2d0601..94f10c6eb336a5 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -478,6 +478,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
+ {
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
+ binder_enqueue_work_ilocked(work, &thread->todo);
++
++ /* (e)poll-based threads require an explicit wakeup signal when
++ * queuing their own work; they rely on these events to consume
++ * messages without I/O block. Without it, threads risk waiting
++ * indefinitely without handling the work.
++ */
++ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
++ thread->pid == current->pid && !thread->process_todo)
++ wake_up_interruptible_sync(&thread->wait);
++
+ thread->process_todo = true;
+ }
+
+@@ -560,9 +570,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
+ static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
+ {
+ return !thread->transaction_stack &&
+- binder_worklist_empty_ilocked(&thread->todo) &&
+- (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+- BINDER_LOOPER_STATE_REGISTERED));
++ binder_worklist_empty_ilocked(&thread->todo);
+ }
+
+ static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+@@ -1698,8 +1706,10 @@ static size_t binder_get_object(struct binder_proc *proc,
+ size_t object_size = 0;
+
+ read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
+- if (offset > buffer->data_size || read_size < sizeof(*hdr))
++ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
++ !IS_ALIGNED(offset, sizeof(u32)))
+ return 0;
++
+ if (u) {
+ if (copy_from_user(object, u + offset, read_size))
+ return 0;
+@@ -3332,6 +3342,7 @@ static void binder_transaction(struct binder_proc *proc,
+ */
+ copy_size = object_offset - user_offset;
+ if (copy_size && (user_offset > object_offset ||
++ object_offset > tr->data_size ||
+ binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+@@ -5030,7 +5041,7 @@ static __poll_t binder_poll(struct file *filp,
+
+ thread = binder_get_thread(proc);
+ if (!thread)
+- return POLLERR;
++ return EPOLLERR;
+
+ binder_inner_proc_lock(thread->proc);
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+@@ -5356,7 +5367,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ goto err;
+ break;
+ case BINDER_SET_MAX_THREADS: {
+- int max_threads;
++ u32 max_threads;
+
+ if (copy_from_user(&max_threads, ubuf,
+ sizeof(max_threads))) {
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index e3db8297095a2f..34c27223cb7dd2 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ }
+ if (mm) {
+ mmap_write_unlock(mm);
+- mmput(mm);
++ mmput_async(mm);
+ }
+ return 0;
+
+@@ -304,7 +304,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ err_no_vma:
+ if (mm) {
+ mmap_write_unlock(mm);
+- mmput(mm);
++ mmput_async(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+ }
+@@ -344,8 +344,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+ continue;
+ if (!buffer->async_transaction)
+ continue;
+- total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+- + sizeof(struct binder_buffer);
++ total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
+ num_buffers++;
+ }
+
+@@ -407,17 +406,17 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+- if (is_async &&
+- alloc->free_async_space < size + sizeof(struct binder_buffer)) {
++
++ /* Pad 0-size buffers so they get assigned unique addresses */
++ size = max(size, sizeof(void *));
++
++ if (is_async && alloc->free_async_space < size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+- /* Pad 0-size buffers so they get assigned unique addresses */
+- size = max(size, sizeof(void *));
+-
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+@@ -519,7 +518,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ buffer->pid = pid;
+ buffer->oneway_spam_suspect = false;
+ if (is_async) {
+- alloc->free_async_space -= size + sizeof(struct binder_buffer);
++ alloc->free_async_space -= size;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+@@ -557,7 +556,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+- * Return: The allocated buffer or %NULL if error
++ * Return: The allocated buffer or %ERR_PTR(-errno) if error
+ */
+ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+@@ -657,8 +656,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
+ BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+- alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
+-
++ alloc->free_async_space += buffer_size;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+@@ -706,7 +704,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
+ /*
+ * We could eliminate the call to binder_alloc_clear_buf()
+ * from binder_alloc_deferred_release() by moving this to
+- * binder_alloc_free_buf_locked(). However, that could
++ * binder_free_buf_locked(). However, that could
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
+ * needed for correctness here.
+@@ -1005,7 +1003,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
+ goto err_mmget;
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+- vma = binder_alloc_get_vma(alloc);
++ vma = vma_lookup(mm, page_addr);
++ if (vma && vma != binder_alloc_get_vma(alloc))
++ goto err_invalid_vma;
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+@@ -1031,6 +1031,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED_RETRY;
+
++err_invalid_vma:
++ mmap_read_unlock(mm);
+ err_mmap_read_lock_failed:
+ mmput_async(mm);
+ err_mmget:
+diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
+index 7270d4d2220702..5b7c80b99ae865 100644
+--- a/drivers/android/binder_internal.h
++++ b/drivers/android/binder_internal.h
+@@ -421,7 +421,7 @@ struct binder_proc {
+ struct list_head todo;
+ struct binder_stats stats;
+ struct list_head delivered_death;
+- int max_threads;
++ u32 max_threads;
+ int requested_threads;
+ int requested_threads_started;
+ int tmp_ref;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 08745e7db8201f..6e76780fb43083 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -48,6 +48,7 @@ enum {
+ enum board_ids {
+ /* board IDs by feature in alphabetical order */
+ board_ahci,
++ board_ahci_43bit_dma,
+ board_ahci_ign_iferr,
+ board_ahci_low_power,
+ board_ahci_no_debounce_delay,
+@@ -128,6 +129,13 @@ static const struct ata_port_info ahci_port_info[] = {
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
++ [board_ahci_43bit_dma] = {
++ AHCI_HFLAGS (AHCI_HFLAG_43BIT_ONLY),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
+ [board_ahci_ign_iferr] = {
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
+ .flags = AHCI_FLAG_COMMON,
+@@ -596,14 +604,19 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
+
+- /* Asmedia */
+- { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
+- { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
+- { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
+- { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+- { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
+- { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
+- { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
++ /* ASMedia */
++ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
++ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
++ { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
++ { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
++ { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
++ { PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */
++ { PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */
++ { PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */
++ { PCI_VDEVICE(ASMEDIA, 0x1165), board_ahci }, /* ASM1165 */
++ { PCI_VDEVICE(ASMEDIA, 0x1166), board_ahci }, /* ASM1166 */
+
+ /*
+ * Samsung SSDs found on some macbooks. NCQ times out if MSI is
+@@ -654,6 +667,87 @@ static int mobile_lpm_policy = -1;
+ module_param(mobile_lpm_policy, int, 0644);
+ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+
++static char *ahci_mask_port_map;
++module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
++MODULE_PARM_DESC(mask_port_map,
++ "32-bits port map masks to ignore controllers ports. "
++ "Valid values are: "
++ "\"<mask>\" to apply the same mask to all AHCI controller "
++ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
++ "specify different masks for the controllers specified, "
++ "where <pci_dev> is the PCI ID of an AHCI controller in the "
++ "form \"domain:bus:dev.func\"");
++
++static void ahci_apply_port_map_mask(struct device *dev,
++ struct ahci_host_priv *hpriv, char *mask_s)
++{
++ unsigned int mask;
++
++ if (kstrtouint(mask_s, 0, &mask)) {
++ dev_err(dev, "Invalid port map mask\n");
++ return;
++ }
++
++ hpriv->mask_port_map = mask;
++}
++
++static void ahci_get_port_map_mask(struct device *dev,
++ struct ahci_host_priv *hpriv)
++{
++ char *param, *end, *str, *mask_s;
++ char *name;
++
++ if (!strlen(ahci_mask_port_map))
++ return;
++
++ str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
++ if (!str)
++ return;
++
++ /* Handle single mask case */
++ if (!strchr(str, '=')) {
++ ahci_apply_port_map_mask(dev, hpriv, str);
++ goto free;
++ }
++
++ /*
++ * Mask list case: parse the parameter to apply the mask only if
++ * the device name matches.
++ */
++ param = str;
++ end = param + strlen(param);
++ while (param && param < end && *param) {
++ name = param;
++ param = strchr(name, '=');
++ if (!param)
++ break;
++
++ *param = '\0';
++ param++;
++ if (param >= end)
++ break;
++
++ if (strcmp(dev_name(dev), name) != 0) {
++ param = strchr(param, ',');
++ if (param)
++ param++;
++ continue;
++ }
++
++ mask_s = param;
++ param = strchr(mask_s, ',');
++ if (param) {
++ *param = '\0';
++ param++;
++ }
++
++ ahci_apply_port_map_mask(dev, hpriv, mask_s);
++ }
++
++free:
++ kfree(str);
++}
++
+ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
+ {
+@@ -676,6 +770,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
+ }
+
++ /* Handle port map masks passed as module parameter. */
++ if (ahci_mask_port_map)
++ ahci_get_port_map_mask(&pdev->dev, hpriv);
++
+ ahci_save_initial_config(&pdev->dev, hpriv);
+ }
+
+@@ -943,11 +1041,20 @@ static int ahci_pci_device_resume(struct device *dev)
+
+ #endif /* CONFIG_PM */
+
+-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
++static int ahci_configure_dma_masks(struct pci_dev *pdev,
++ struct ahci_host_priv *hpriv)
+ {
+- const int dma_bits = using_dac ? 64 : 32;
++ int dma_bits;
+ int rc;
+
++ if (hpriv->cap & HOST_CAP_64) {
++ dma_bits = 64;
++ if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
++ dma_bits = 43;
++ } else {
++ dma_bits = 32;
++ }
++
+ /*
+ * If the device fixup already set the dma_mask to some non-standard
+ * value, don't extend it here. This happens on STA2X11, for example.
+@@ -1868,8 +1975,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+- if (!host)
+- return -ENOMEM;
++ if (!host) {
++ rc = -ENOMEM;
++ goto err_rm_sysfs_file;
++ }
+ host->private_data = hpriv;
+
+ if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+@@ -1920,13 +2029,13 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ ahci_gtf_filter_workaround(host);
+
+ /* initialize adapter */
+- rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
++ rc = ahci_configure_dma_masks(pdev, hpriv);
+ if (rc)
+- return rc;
++ goto err_rm_sysfs_file;
+
+ rc = ahci_pci_reset_controller(host);
+ if (rc)
+- return rc;
++ goto err_rm_sysfs_file;
+
+ ahci_pci_init_controller(host);
+ ahci_pci_print_info(host);
+@@ -1935,10 +2044,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ rc = ahci_host_activate(host, &ahci_sht);
+ if (rc)
+- return rc;
++ goto err_rm_sysfs_file;
+
+ pm_runtime_put_noidle(&pdev->dev);
+ return 0;
++
++err_rm_sysfs_file:
++ sysfs_remove_file_from_group(&pdev->dev.kobj,
++ &dev_attr_remapped_nvme.attr, NULL);
++ return rc;
+ }
+
+ static void ahci_shutdown_one(struct pci_dev *pdev)
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 4bae95b06ae3c9..df8f8a1a3a34c3 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -247,6 +247,7 @@ enum {
+ AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
+ suspend/resume */
+ AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
++ AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
+
+ /* ap->flags bits */
+
+diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
+index 64f7f7d6ba84e0..11a2c199a7c246 100644
+--- a/drivers/ata/ahci_ceva.c
++++ b/drivers/ata/ahci_ceva.c
+@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
+ u32 axicc;
+ bool is_cci_enabled;
+ int flags;
+- struct reset_control *rst;
+ };
+
+ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
+@@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+ };
+
++static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
++{
++ int rc, i;
++
++ rc = ahci_platform_enable_regulators(hpriv);
++ if (rc)
++ return rc;
++
++ rc = ahci_platform_enable_clks(hpriv);
++ if (rc)
++ goto disable_regulator;
++
++ /* Assert the controller reset */
++ rc = ahci_platform_assert_rsts(hpriv);
++ if (rc)
++ goto disable_clks;
++
++ for (i = 0; i < hpriv->nports; i++) {
++ rc = phy_init(hpriv->phys[i]);
++ if (rc)
++ goto disable_rsts;
++ }
++
++ /* De-assert the controller reset */
++ ahci_platform_deassert_rsts(hpriv);
++
++ for (i = 0; i < hpriv->nports; i++) {
++ rc = phy_power_on(hpriv->phys[i]);
++ if (rc) {
++ phy_exit(hpriv->phys[i]);
++ goto disable_phys;
++ }
++ }
++
++ return 0;
++
++disable_rsts:
++ ahci_platform_deassert_rsts(hpriv);
++
++disable_phys:
++ while (--i >= 0) {
++ phy_power_off(hpriv->phys[i]);
++ phy_exit(hpriv->phys[i]);
++ }
++
++disable_clks:
++ ahci_platform_disable_clks(hpriv);
++
++disable_regulator:
++ ahci_platform_disable_regulators(hpriv);
++
++ return rc;
++}
++
+ static int ceva_ahci_probe(struct platform_device *pdev)
+ {
+ struct device_node *np = pdev->dev.of_node;
+@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ cevapriv->ahci_pdev = pdev;
+-
+- cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+- NULL);
+- if (IS_ERR(cevapriv->rst))
+- dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
+- "failed to get reset\n");
+-
+ hpriv = ahci_platform_get_resources(pdev, 0);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+- if (!cevapriv->rst) {
+- rc = ahci_platform_enable_resources(hpriv);
+- if (rc)
+- return rc;
+- } else {
+- int i;
++ hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
++ NULL);
++ if (IS_ERR(hpriv->rsts))
++ return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
++ "failed to get reset\n");
+
+- rc = ahci_platform_enable_clks(hpriv);
+- if (rc)
+- return rc;
+- /* Assert the controller reset */
+- reset_control_assert(cevapriv->rst);
+-
+- for (i = 0; i < hpriv->nports; i++) {
+- rc = phy_init(hpriv->phys[i]);
+- if (rc)
+- return rc;
+- }
+-
+- /* De-assert the controller reset */
+- reset_control_deassert(cevapriv->rst);
+-
+- for (i = 0; i < hpriv->nports; i++) {
+- rc = phy_power_on(hpriv->phys[i]);
+- if (rc) {
+- phy_exit(hpriv->phys[i]);
+- return rc;
+- }
+- }
+- }
++ rc = ceva_ahci_platform_enable_resources(hpriv);
++ if (rc)
++ return rc;
+
+ if (of_property_read_bool(np, "ceva,broken-gen2"))
+ cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
+@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
+ if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
+ (u8 *)&cevapriv->pp2c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
+ (u8 *)&cevapriv->pp2c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ /* Read OOB timing value for COMWAKE from device-tree*/
+ if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
+ (u8 *)&cevapriv->pp3c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
+ (u8 *)&cevapriv->pp3c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ /* Read phy BURST timing value from device-tree */
+ if (of_property_read_u8_array(np, "ceva,p0-burst-params",
+ (u8 *)&cevapriv->pp4c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-burst-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-burst-params",
+ (u8 *)&cevapriv->pp4c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-burst-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ /* Read phy RETRY interval timing value from device-tree */
+ if (of_property_read_u16_array(np, "ceva,p0-retry-params",
+ (u16 *)&cevapriv->pp5c[0], 2) < 0) {
+ dev_warn(dev, "ceva,p0-retry-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ if (of_property_read_u16_array(np, "ceva,p1-retry-params",
+ (u16 *)&cevapriv->pp5c[1], 2) < 0) {
+ dev_warn(dev, "ceva,p1-retry-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ /*
+@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+- rc = ahci_platform_enable_resources(hpriv);
++ rc = ceva_ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d8cc1e27a125f0..4ed90d46a017a8 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2034,6 +2034,10 @@ void ata_dev_power_set_active(struct ata_device *dev)
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
++ /* If the device is already sleeping, do nothing. */
++ if (dev->flags & ATA_DFLAG_SLEEPING)
++ return;
++
+ /*
+ * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
+ * if supported by the device.
+@@ -2489,7 +2493,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
+ bool cdl_enabled;
+ u64 val;
+
+- if (ata_id_major_version(dev->id) < 12)
++ if (ata_id_major_version(dev->id) < 11)
+ goto not_supported;
+
+ if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
+@@ -5495,6 +5499,18 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
+ return ap;
+ }
+
++void ata_port_free(struct ata_port *ap)
++{
++ if (!ap)
++ return;
++
++ kfree(ap->pmp_link);
++ kfree(ap->slave_link);
++ kfree(ap->ncq_sense_buf);
++ kfree(ap);
++}
++EXPORT_SYMBOL_GPL(ata_port_free);
++
+ static void ata_devres_release(struct device *gendev, void *res)
+ {
+ struct ata_host *host = dev_get_drvdata(gendev);
+@@ -5521,12 +5537,7 @@ static void ata_host_release(struct kref *kref)
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+- struct ata_port *ap = host->ports[i];
+-
+- kfree(ap->pmp_link);
+- kfree(ap->slave_link);
+- kfree(ap->ncq_sense_buf);
+- kfree(ap);
++ ata_port_free(host->ports[i]);
+ host->ports[i] = NULL;
+ }
+ kfree(host);
+@@ -5576,12 +5587,16 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+ if (!host)
+ return NULL;
+
+- if (!devres_open_group(dev, NULL, GFP_KERNEL))
+- goto err_free;
++ if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
++ kfree(host);
++ return NULL;
++ }
+
+ dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
+- if (!dr)
++ if (!dr) {
++ kfree(host);
+ goto err_out;
++ }
+
+ devres_add(dev, dr);
+ dev_set_drvdata(dev, host);
+@@ -5609,8 +5624,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+
+ err_out:
+ devres_release_group(dev, NULL);
+- err_free:
+- kfree(host);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(ata_host_alloc);
+@@ -5909,7 +5922,7 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
+ * allocation time.
+ */
+ for (i = host->n_ports; host->ports[i]; i++)
+- kfree(host->ports[i]);
++ ata_port_free(host->ports[i]);
+
+ /* give ports names and add SCSI hosts */
+ for (i = 0; i < host->n_ports; i++) {
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 5686353e442cf4..a96566e1b2b84c 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -618,6 +618,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+ list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
+ struct ata_queued_cmd *qc;
+
++ /*
++ * If the scmd was added to EH, via ata_qc_schedule_eh() ->
++ * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
++ * have set DID_TIME_OUT (since libata does not have an abort
++ * handler). Thus, to clear DID_TIME_OUT, clear the host byte.
++ */
++ set_host_byte(scmd, DID_OK);
++
+ ata_qc_for_each_raw(ap, qc, i) {
+ if (qc->flags & ATA_QCFLAG_ACTIVE &&
+ qc->scsicmd == scmd)
+@@ -700,8 +708,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+ ehc->saved_ncq_enabled |= 1 << devno;
+
+ /* If we are resuming, wake up the device */
+- if (ap->pflags & ATA_PFLAG_RESUMING)
++ if (ap->pflags & ATA_PFLAG_RESUMING) {
++ dev->flags |= ATA_DFLAG_RESUMING;
+ ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
++ }
+ }
+ }
+
+@@ -3170,6 +3180,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
+ return 0;
+
+ err:
++ dev->flags &= ~ATA_DFLAG_RESUMING;
+ *r_failed_dev = dev;
+ return rc;
+ }
+@@ -4038,10 +4049,20 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
+
+ WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+
+- /* Set all devices attached to the port in standby mode */
+- ata_for_each_link(link, ap, HOST_FIRST) {
+- ata_for_each_dev(dev, link, ENABLED)
+- ata_dev_power_set_standby(dev);
++ /*
++ * We will reach this point for all of the PM events:
++ * PM_EVENT_SUSPEND (if runtime pm, PM_EVENT_AUTO will also be set)
++ * PM_EVENT_FREEZE, and PM_EVENT_HIBERNATE.
++ *
++ * We do not want to perform disk spin down for PM_EVENT_FREEZE.
++ * (Spin down will be performed by the subsequent PM_EVENT_HIBERNATE.)
++ */
++ if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) {
++ /* Set all devices attached to the port in standby mode */
++ ata_for_each_link(link, ap, HOST_FIRST) {
++ ata_for_each_dev(dev, link, ENABLED)
++ ata_dev_power_set_standby(dev);
++ }
+ }
+
+ /*
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 3a957c4da40927..5377d094bf7548 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -230,6 +230,87 @@ void ata_scsi_set_sense_information(struct ata_device *dev,
+ SCSI_SENSE_BUFFERSIZE, information);
+ }
+
++/**
++ * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer
++ * @qc: ATA PASS-THROUGH command.
++ *
++ * Populates "ATA Status Return sense data descriptor" / "Fixed format
++ * sense data" with ATA taskfile fields.
++ *
++ * LOCKING:
++ * None.
++ */
++static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc)
++{
++ struct ata_device *dev = qc->dev;
++ struct scsi_cmnd *cmd = qc->scsicmd;
++ struct ata_taskfile *tf = &qc->result_tf;
++ unsigned char *sb = cmd->sense_buffer;
++
++ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
++ ata_dev_dbg(dev,
++ "missing result TF: can't set ATA PT sense fields\n");
++ return;
++ }
++
++ if ((sb[0] & 0x7f) >= 0x72) {
++ unsigned char *desc;
++ u8 len;
++
++ /* descriptor format */
++ len = sb[7];
++ desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
++ if (!desc) {
++ if (SCSI_SENSE_BUFFERSIZE < len + 14)
++ return;
++ sb[7] = len + 14;
++ desc = sb + 8 + len;
++ }
++ desc[0] = 9;
++ desc[1] = 12;
++ /*
++ * Copy registers into sense buffer.
++ */
++ desc[2] = 0x00;
++ desc[3] = tf->error;
++ desc[5] = tf->nsect;
++ desc[7] = tf->lbal;
++ desc[9] = tf->lbam;
++ desc[11] = tf->lbah;
++ desc[12] = tf->device;
++ desc[13] = tf->status;
++
++ /*
++ * Fill in Extend bit, and the high order bytes
++ * if applicable.
++ */
++ if (tf->flags & ATA_TFLAG_LBA48) {
++ desc[2] |= 0x01;
++ desc[4] = tf->hob_nsect;
++ desc[6] = tf->hob_lbal;
++ desc[8] = tf->hob_lbam;
++ desc[10] = tf->hob_lbah;
++ }
++ } else {
++ /* Fixed sense format */
++ sb[0] |= 0x80;
++ sb[3] = tf->error;
++ sb[4] = tf->status;
++ sb[5] = tf->device;
++ sb[6] = tf->nsect;
++ if (tf->flags & ATA_TFLAG_LBA48) {
++ sb[8] |= 0x80;
++ if (tf->hob_nsect)
++ sb[8] |= 0x40;
++ if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
++ sb[8] |= 0x20;
++ }
++ sb[9] = tf->lbal;
++ sb[10] = tf->lbam;
++ sb[11] = tf->lbah;
++ }
++}
++
+ static void ata_scsi_set_invalid_field(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u16 field, u8 bit)
+ {
+@@ -837,10 +918,8 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
+ * ata_gen_passthru_sense - Generate check condition sense block.
+ * @qc: Command that completed.
+ *
+- * This function is specific to the ATA descriptor format sense
+- * block specified for the ATA pass through commands. Regardless
+- * of whether the command errored or not, return a sense
+- * block. Copy all controller registers into the sense
++ * This function is specific to the ATA pass through commands.
++ * Regardless of whether the command errored or not, return a sense
+ * block. If there was no error, we get the request from an ATA
+ * passthrough command, so we use the following sense data:
+ * sk = RECOVERED ERROR
+@@ -852,13 +931,16 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
+ */
+ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+ {
++ struct ata_device *dev = qc->dev;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->result_tf;
+- unsigned char *sb = cmd->sense_buffer;
+- unsigned char *desc = sb + 8;
+ u8 sense_key, asc, ascq;
+
+- memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
++ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
++ ata_dev_dbg(dev,
++ "missing result TF: can't generate ATA PT sense data\n");
++ return;
++ }
+
+ /*
+ * Use ata_to_sense_error() to map status register bits
+@@ -872,66 +954,18 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+ } else {
+ /*
+ * ATA PASS-THROUGH INFORMATION AVAILABLE
+- * Always in descriptor format sense.
++ *
++ * Note: we are supposed to call ata_scsi_set_sense(), which
++ * respects the D_SENSE bit, instead of unconditionally
++ * generating the sense data in descriptor format. However,
++ * because hdparm, hddtemp, and udisks incorrectly assume sense
++ * data in descriptor format, without even looking at the
++ * RESPONSE CODE field in the returned sense data (to see which
++ * format the returned sense data is in), we are stuck with
++ * being bug compatible with older kernels.
+ */
+ scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
+ }
+-
+- if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
+- u8 len;
+-
+- /* descriptor format */
+- len = sb[7];
+- desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
+- if (!desc) {
+- if (SCSI_SENSE_BUFFERSIZE < len + 14)
+- return;
+- sb[7] = len + 14;
+- desc = sb + 8 + len;
+- }
+- desc[0] = 9;
+- desc[1] = 12;
+- /*
+- * Copy registers into sense buffer.
+- */
+- desc[2] = 0x00;
+- desc[3] = tf->error;
+- desc[5] = tf->nsect;
+- desc[7] = tf->lbal;
+- desc[9] = tf->lbam;
+- desc[11] = tf->lbah;
+- desc[12] = tf->device;
+- desc[13] = tf->status;
+-
+- /*
+- * Fill in Extend bit, and the high order bytes
+- * if applicable.
+- */
+- if (tf->flags & ATA_TFLAG_LBA48) {
+- desc[2] |= 0x01;
+- desc[4] = tf->hob_nsect;
+- desc[6] = tf->hob_lbal;
+- desc[8] = tf->hob_lbam;
+- desc[10] = tf->hob_lbah;
+- }
+- } else {
+- /* Fixed sense format */
+- desc[0] = tf->error;
+- desc[1] = tf->status;
+- desc[2] = tf->device;
+- desc[3] = tf->nsect;
+- desc[7] = 0;
+- if (tf->flags & ATA_TFLAG_LBA48) {
+- desc[8] |= 0x80;
+- if (tf->hob_nsect)
+- desc[8] |= 0x40;
+- if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
+- desc[8] |= 0x20;
+- }
+- desc[9] = tf->lbal;
+- desc[10] = tf->lbam;
+- desc[11] = tf->lbah;
+- }
+ }
+
+ /**
+@@ -953,14 +987,19 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
+ u64 block;
+ u8 sense_key, asc, ascq;
+
+- memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
+-
+ if (ata_dev_disabled(dev)) {
+ /* Device disabled after error recovery */
+ /* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
+ ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
+ return;
+ }
++
++ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
++ ata_dev_dbg(dev,
++ "missing result TF: can't generate sense data\n");
++ return;
++ }
++
+ /* Use ata_to_sense_error() to map status register bits
+ * onto sense key, asc & ascq.
+ */
+@@ -1055,9 +1094,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ * Ask the sd driver to issue START STOP UNIT on runtime suspend
+ * and resume and shutdown only. For system level suspend/resume,
+ * devices power state is handled directly by libata EH.
++ * Given that disks are always spun up on system resume, also
++ * make sure that the sd driver forces runtime suspended disks
++ * to be resumed to correctly reflect the power state of the
++ * device.
+ */
+- sdev->manage_runtime_start_stop = true;
+- sdev->manage_shutdown = true;
++ sdev->manage_runtime_start_stop = 1;
++ sdev->manage_shutdown = 1;
++ sdev->force_runtime_start_on_system_start = 1;
+ }
+
+ /*
+@@ -1659,26 +1703,29 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
+ {
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ u8 *cdb = cmd->cmnd;
+- int need_sense = (qc->err_mask != 0) &&
+- !(qc->flags & ATA_QCFLAG_SENSE_VALID);
++ bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID;
++ bool is_ata_passthru = cdb[0] == ATA_16 || cdb[0] == ATA_12;
++ bool is_ck_cond_request = cdb[2] & 0x20;
++ bool is_error = qc->err_mask != 0;
+
+ /* For ATA pass thru (SAT) commands, generate a sense block if
+ * user mandated it or if there's an error. Note that if we
+- * generate because the user forced us to [CK_COND =1], a check
++ * generate because the user forced us to [CK_COND=1], a check
+ * condition is generated and the ATA register values are returned
+ * whether the command completed successfully or not. If there
+- * was no error, we use the following sense data:
++ * was no error, and CK_COND=1, we use the following sense data:
+ * sk = RECOVERED ERROR
+ * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
+ */
+- if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
+- ((cdb[2] & 0x20) || need_sense))
+- ata_gen_passthru_sense(qc);
+- else if (need_sense)
++ if (is_ata_passthru && (is_ck_cond_request || is_error || have_sense)) {
++ if (!have_sense)
++ ata_gen_passthru_sense(qc);
++ ata_scsi_set_passthru_sense_fields(qc);
++ if (is_ck_cond_request)
++ set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
++ } else if (is_error && !have_sense) {
+ ata_gen_ata_sense(qc);
+- else
+- /* Keep the SCSI ML and status byte, clear host byte. */
+- cmd->result &= 0x0000ffff;
++ }
+
+ ata_qc_done(qc);
+ }
+@@ -2343,7 +2390,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
+ case ALL_SUB_MPAGES:
+ n = ata_msense_control_spg0(dev, buf, changeable);
+ n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
+- n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
++ n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE);
+ n += ata_msense_control_ata_feature(dev, buf + n);
+ return n;
+ default:
+@@ -2617,14 +2664,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
+ /* handle completion from EH */
+ if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
+
+- if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
+- /* FIXME: not quite right; we don't want the
+- * translation of taskfile registers into a
+- * sense descriptors, since that's only
+- * correct for ATA, not ATAPI
+- */
++ if (!(qc->flags & ATA_QCFLAG_SENSE_VALID))
+ ata_gen_passthru_sense(qc);
+- }
+
+ /* SCSI EH automatically locks door if sdev->locked is
+ * set. Sometimes door lock request continues to
+@@ -4760,6 +4801,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ struct ata_link *link;
+ struct ata_device *dev;
+ unsigned long flags;
++ bool do_resume;
+ int ret = 0;
+
+ mutex_lock(&ap->scsi_scan_mutex);
+@@ -4774,25 +4816,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
+ * bail out.
+ */
+ if (ap->pflags & ATA_PFLAG_SUSPENDED)
+- goto unlock;
++ goto unlock_ap;
+
+ if (!sdev)
+ continue;
+ if (scsi_device_get(sdev))
+ continue;
+
++ do_resume = dev->flags & ATA_DFLAG_RESUMING;
++
+ spin_unlock_irqrestore(ap->lock, flags);
++ if (do_resume) {
++ ret = scsi_resume_device(sdev);
++ if (ret == -EWOULDBLOCK)
++ goto unlock_scan;
++ dev->flags &= ~ATA_DFLAG_RESUMING;
++ }
+ ret = scsi_rescan_device(sdev);
+ scsi_device_put(sdev);
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ret)
+- goto unlock;
++ goto unlock_ap;
+ }
+ }
+
+-unlock:
++unlock_ap:
+ spin_unlock_irqrestore(ap->lock, flags);
++unlock_scan:
+ mutex_unlock(&ap->scsi_scan_mutex);
+
+ /* Reschedule with a delay if scsi_rescan_device() returned an error */
+diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
+index 25a63d043c8e1f..0f77e042406619 100644
+--- a/drivers/ata/pata_isapnp.c
++++ b/drivers/ata/pata_isapnp.c
+@@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
+ if (pnp_port_valid(idev, 1)) {
+ ctl_addr = devm_ioport_map(&idev->dev,
+ pnp_port_start(idev, 1), 1);
++ if (!ctl_addr)
++ return -ENOMEM;
++
+ ap->ioaddr.altstatus_addr = ctl_addr;
+ ap->ioaddr.ctl_addr = ctl_addr;
+ ap->ops = &isapnp_port_ops;
+diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
+index 448a511cbc179a..e7ac142c2423dd 100644
+--- a/drivers/ata/pata_legacy.c
++++ b/drivers/ata/pata_legacy.c
+@@ -173,8 +173,6 @@ static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+ static struct legacy_probe probe_list[NR_HOST];
+ static struct legacy_data legacy_data[NR_HOST];
+ static struct ata_host *legacy_host[NR_HOST];
+-static int nr_legacy_host;
+-
+
+ /**
+ * legacy_probe_add - Add interface to probe list
+@@ -1276,9 +1274,11 @@ static __exit void legacy_exit(void)
+ {
+ int i;
+
+- for (i = 0; i < nr_legacy_host; i++) {
++ for (i = 0; i < NR_HOST; i++) {
+ struct legacy_data *ld = &legacy_data[i];
+- ata_host_detach(legacy_host[i]);
++
++ if (legacy_host[i])
++ ata_host_detach(legacy_host[i]);
+ platform_device_unregister(ld->platform_dev);
+ }
+ }
+diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
+index 17f6ccee53c7c2..ffbb2e8591cefc 100644
+--- a/drivers/ata/pata_macio.c
++++ b/drivers/ata/pata_macio.c
+@@ -541,7 +541,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+
+ while (sg_len) {
+ /* table overflow should never happen */
+- BUG_ON (pi++ >= MAX_DCMDS);
++ if (WARN_ON_ONCE(pi >= MAX_DCMDS))
++ return AC_ERR_SYSTEM;
+
+ len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
+ table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
+@@ -553,11 +554,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ addr += len;
+ sg_len -= len;
+ ++table;
++ ++pi;
+ }
+ }
+
+ /* Should never happen according to Tejun */
+- BUG_ON(!pi);
++ if (WARN_ON_ONCE(!pi))
++ return AC_ERR_SYSTEM;
+
+ /* Convert the last command to an input/output */
+ table--;
+diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
+index 549ff24a982311..4edddf6bcc1507 100644
+--- a/drivers/ata/pata_serverworks.c
++++ b/drivers/ata/pata_serverworks.c
+@@ -46,10 +46,11 @@
+ #define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
+ #define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
+
+-/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
+- * can overrun their FIFOs when used with the CSB5 */
+-
+-static const char *csb_bad_ata100[] = {
++/*
++ * Seagate Barracuda ATA IV Family drives in UDMA mode 5
++ * can overrun their FIFOs when used with the CSB5.
++ */
++static const char * const csb_bad_ata100[] = {
+ "ST320011A",
+ "ST340016A",
+ "ST360021A",
+@@ -163,10 +164,11 @@ static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned in
+ * @adev: ATA device
+ * @mask: Mask of proposed modes
+ *
+- * Check the blacklist and disable UDMA5 if matched
++ * Check the list of devices with broken UDMA5 and
++ * disable UDMA5 if matched.
+ */
+-
+-static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask)
++static unsigned int serverworks_csb_filter(struct ata_device *adev,
++ unsigned int mask)
+ {
+ const char *p;
+ char model_num[ATA_ID_PROD_LEN + 1];
+diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
+index 400b22ee99c33a..4c270999ba3ccd 100644
+--- a/drivers/ata/sata_gemini.c
++++ b/drivers/ata/sata_gemini.c
+@@ -200,7 +200,10 @@ int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
+ pclk = sg->sata0_pclk;
+ else
+ pclk = sg->sata1_pclk;
+- clk_enable(pclk);
++ ret = clk_enable(pclk);
++ if (ret)
++ return ret;
++
+ msleep(10);
+
+ /* Do not keep clocking a bridge that is not online */
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 45e48d653c60b5..80a45e11fb5b60 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = {
+ },
+ };
+
+-static const struct pci_device_id mv_pci_tbl[] = {
+- { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+- { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+- { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+- { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+- /* RocketRAID 1720/174x have different identifiers */
+- { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+- { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
+- { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+-
+- { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+- { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+- { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+- { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+- { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+-
+- { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
+-
+- /* Adaptec 1430SA */
+- { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
+-
+- /* Marvell 7042 support */
+- { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+-
+- /* Highpoint RocketRAID PCIe series */
+- { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+- { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+-
+- { } /* terminate list */
+-};
+-
+ static const struct mv_hw_ops mv5xxx_ops = {
+ .phy_errata = mv5_phy_errata,
+ .enable_leds = mv5_enable_leds,
+@@ -4300,6 +4269,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
+ static int mv_pci_device_resume(struct pci_dev *pdev);
+ #endif
+
++static const struct pci_device_id mv_pci_tbl[] = {
++ { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
++ { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
++ { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
++ { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
++ /* RocketRAID 1720/174x have different identifiers */
++ { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
++ { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
++ { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
++
++ { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
++ { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
++ { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
++ { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
++ { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
++
++ { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
++
++ /* Adaptec 1430SA */
++ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
++
++ /* Marvell 7042 support */
++ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
++
++ /* Highpoint RocketRAID PCIe series */
++ { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
++ { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
++
++ { } /* terminate list */
++};
+
+ static struct pci_driver mv_pci_driver = {
+ .name = DRV_NAME,
+@@ -4312,6 +4311,7 @@ static struct pci_driver mv_pci_driver = {
+ #endif
+
+ };
++MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
+
+ /**
+ * mv_print_info - Dump key info to kernel log for perusal.
+@@ -4484,7 +4484,6 @@ static void __exit mv_exit(void)
+ MODULE_AUTHOR("Brett Russ");
+ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
+ MODULE_LICENSE("GPL v2");
+-MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
+ MODULE_VERSION(DRV_VERSION);
+ MODULE_ALIAS("platform:" DRV_NAME);
+
+diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
+index cc77c024828431..df095659bae0f5 100644
+--- a/drivers/ata/sata_sil.c
++++ b/drivers/ata/sata_sil.c
+@@ -128,7 +128,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
+ static const struct sil_drivelist {
+ const char *product;
+ unsigned int quirk;
+-} sil_blacklist [] = {
++} sil_quirks[] = {
+ { "ST320012AS", SIL_QUIRK_MOD15WRITE },
+ { "ST330013AS", SIL_QUIRK_MOD15WRITE },
+ { "ST340017AS", SIL_QUIRK_MOD15WRITE },
+@@ -600,8 +600,8 @@ static void sil_thaw(struct ata_port *ap)
+ * list, and apply the fixups to only the specific
+ * devices/hosts/firmwares that need it.
+ *
+- * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
+- * The Maxtor quirk is in the blacklist, but I'm keeping the original
++ * 20040111 - Seagate drives affected by the Mod15Write bug are quirked
++ * The Maxtor quirk is in sil_quirks, but I'm keeping the original
+ * pessimistic fix for the following reasons...
+ * - There seems to be less info on it, only one device gleaned off the
+ * Windows driver, maybe only one is affected. More info would be greatly
+@@ -620,9 +620,9 @@ static void sil_dev_config(struct ata_device *dev)
+
+ ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+- for (n = 0; sil_blacklist[n].product; n++)
+- if (!strcmp(sil_blacklist[n].product, model_num)) {
+- quirks = sil_blacklist[n].quirk;
++ for (n = 0; sil_quirks[n].product; n++)
++ if (!strcmp(sil_quirks[n].product, model_num)) {
++ quirks = sil_quirks[n].quirk;
+ break;
+ }
+
+diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
+index b51d7a9d0d90ce..a482741eb181ff 100644
+--- a/drivers/ata/sata_sx4.c
++++ b/drivers/ata/sata_sx4.c
+@@ -957,8 +957,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
+
+ offset -= (idx * window_size);
+ idx++;
+- dist = ((long) (window_size - (offset + size))) >= 0 ? size :
+- (long) (window_size - offset);
++ dist = min(size, window_size - offset);
+ memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
+
+ psource += dist;
+@@ -1005,8 +1004,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
+ readl(mmio + PDC_DIMM_WINDOW_CTLR);
+ offset -= (idx * window_size);
+ idx++;
+- dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
+- (long) (window_size - offset);
++ dist = min(size, window_size - offset);
+ memcpy_toio(dimm_mmio + offset / 4, psource, dist);
+ writel(0x01, mmio + PDC_GENERAL_CTLR);
+ readl(mmio + PDC_GENERAL_CTLR);
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index e327a0229dc173..a876024d8a05f9 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -1118,8 +1118,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ rpp->len += skb->len;
+
+ if (stat & SAR_RSQE_EPDU) {
++ unsigned int len, truesize;
+ unsigned char *l1l2;
+- unsigned int len;
+
+ l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
+
+@@ -1189,14 +1189,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+
++ truesize = skb->truesize;
+ vcc->push(vcc, skb);
+ atomic_inc(&vcc->stats->rx);
+
+- if (skb->truesize > SAR_FB_SIZE_3)
++ if (truesize > SAR_FB_SIZE_3)
+ add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+- else if (skb->truesize > SAR_FB_SIZE_2)
++ else if (truesize > SAR_FB_SIZE_2)
+ add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
+- else if (skb->truesize > SAR_FB_SIZE_1)
++ else if (truesize > SAR_FB_SIZE_1)
+ add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
+ else
+ add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);
+@@ -2930,6 +2931,8 @@ open_card_ubr0(struct idt77252_dev *card)
+ vc->scq = alloc_scq(card, vc->class);
+ if (!vc->scq) {
+ printk("%s: can't get SCQ.\n", card->name);
++ kfree(card->vcs[0]);
++ card->vcs[0] = NULL;
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 32414868695305..9bba8f280a4d4c 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
+ static int reset_sar(struct atm_dev *dev)
+ {
+ IADEV *iadev;
+- int i, error = 1;
++ int i, error;
+ unsigned int pci[64];
+
+ iadev = INPH_IA_DEV(dev);
+- for(i=0; i<64; i++)
+- if ((error = pci_read_config_dword(iadev->pci,
+- i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
+- return error;
++ for (i = 0; i < 64; i++) {
++ error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
++ if (error != PCIBIOS_SUCCESSFUL)
++ return error;
++ }
+ writel(0, iadev->reg+IPHASE5575_EXT_RESET);
+- for(i=0; i<64; i++)
+- if ((error = pci_write_config_dword(iadev->pci,
+- i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
+- return error;
++ for (i = 0; i < 64; i++) {
++ error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
++ if (error != PCIBIOS_SUCCESSFUL)
++ return error;
++ }
+ udelay(5);
+ return 0;
+ }
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index 94fbc3abe60e6a..d3c30a28c410ea 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
+ struct sk_buff *skb;
+ unsigned int len;
+
+- spin_lock(&card->cli_queue_lock);
++ spin_lock_bh(&card->cli_queue_lock);
+ skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
+- spin_unlock(&card->cli_queue_lock);
++ spin_unlock_bh(&card->cli_queue_lock);
+ if(skb == NULL)
+ return sprintf(buf, "No data.\n");
+
+@@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc)
+ struct pkt_hdr *header;
+
+ /* Remove any yet-to-be-transmitted packets from the pending queue */
+- spin_lock(&card->tx_queue_lock);
++ spin_lock_bh(&card->tx_queue_lock);
+ skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
+ if (SKB_CB(skb)->vcc == vcc) {
+ skb_unlink(skb, &card->tx_queue[port]);
+ solos_pop(vcc, skb);
+ }
+ }
+- spin_unlock(&card->tx_queue_lock);
++ spin_unlock_bh(&card->tx_queue_lock);
+
+ skb = alloc_skb(sizeof(*header), GFP_KERNEL);
+ if (!skb) {
+diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
+index 3a2d883872249e..b360ddefc4238b 100644
+--- a/drivers/auxdisplay/ht16k33.c
++++ b/drivers/auxdisplay/ht16k33.c
+@@ -507,6 +507,7 @@ static int ht16k33_led_probe(struct device *dev, struct led_classdev *led,
+ led->max_brightness = MAX_BRIGHTNESS;
+
+ err = devm_led_classdev_register_ext(dev, led, &init_data);
++ fwnode_handle_put(init_data.fwnode);
+ if (err)
+ dev_err(dev, "Failed to register LED\n");
+
+diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
+index eaa31e567d1ece..5b59d133b6af4f 100644
+--- a/drivers/base/arch_numa.c
++++ b/drivers/base/arch_numa.c
+@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
+ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(__per_cpu_offset);
+
+-static int __init early_cpu_to_node(int cpu)
++int __init early_cpu_to_node(int cpu)
+ {
+ return cpu_to_node_map[cpu];
+ }
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index eb4c0ace924201..a8e3d8165232fd 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -192,11 +192,14 @@ extern struct kset *devices_kset;
+ void devices_kset_move_last(struct device *dev);
+
+ #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
+-void module_add_driver(struct module *mod, struct device_driver *drv);
++int module_add_driver(struct module *mod, struct device_driver *drv);
+ void module_remove_driver(struct device_driver *drv);
+ #else
+-static inline void module_add_driver(struct module *mod,
+- struct device_driver *drv) { }
++static inline int module_add_driver(struct module *mod,
++ struct device_driver *drv)
++{
++ return 0;
++}
+ static inline void module_remove_driver(struct device_driver *drv) { }
+ #endif
+
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 84a21084d67d16..d4361ad3b433f5 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -152,7 +152,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
+ {
+ struct bus_attribute *bus_attr = to_bus_attr(attr);
+ struct subsys_private *subsys_priv = to_subsys_private(kobj);
+- ssize_t ret = 0;
++ /* return -EIO for reading a bus attribute without show() */
++ ssize_t ret = -EIO;
+
+ if (bus_attr->show)
+ ret = bus_attr->show(subsys_priv->bus, buf);
+@@ -164,7 +165,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
+ {
+ struct bus_attribute *bus_attr = to_bus_attr(attr);
+ struct subsys_private *subsys_priv = to_subsys_private(kobj);
+- ssize_t ret = 0;
++ /* return -EIO for writing a bus attribute without store() */
++ ssize_t ret = -EIO;
+
+ if (bus_attr->store)
+ ret = bus_attr->store(subsys_priv->bus, buf, count);
+@@ -674,7 +676,12 @@ int bus_add_driver(struct device_driver *drv)
+ if (error)
+ goto out_del_list;
+ }
+- module_add_driver(drv->owner, drv);
++ error = module_add_driver(drv->owner, drv);
++ if (error) {
++ printk(KERN_ERR "%s: failed to create module links for %s\n",
++ __func__, drv->name);
++ goto out_detach;
++ }
+
+ error = driver_create_file(drv, &driver_attr_uevent);
+ if (error) {
+@@ -699,6 +706,8 @@ int bus_add_driver(struct device_driver *drv)
+
+ return 0;
+
++out_detach:
++ driver_detach(drv);
+ out_del_list:
+ klist_del(&priv->knode_bus);
+ out_unregister:
+@@ -913,6 +922,8 @@ int bus_register(const struct bus_type *bus)
+ bus_remove_file(bus, &bus_attr_uevent);
+ bus_uevent_fail:
+ kset_unregister(&priv->subsys);
++ /* Above kset_unregister() will kfree @priv */
++ priv = NULL;
+ out:
+ kfree(priv);
+ return retval;
+diff --git a/drivers/base/class.c b/drivers/base/class.c
+index 05d9df90f621be..9cd489a5770866 100644
+--- a/drivers/base/class.c
++++ b/drivers/base/class.c
+@@ -215,6 +215,7 @@ int class_register(const struct class *cls)
+ return 0;
+
+ err_out:
++ lockdep_unregister_key(key);
+ kfree(cp);
+ return error;
+ }
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 4d8b315c48a15a..60a0a4630a5bb2 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -25,6 +25,7 @@
+ #include <linux/mutex.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/netdevice.h>
++#include <linux/rcupdate.h>
+ #include <linux/sched/signal.h>
+ #include <linux/sched/mm.h>
+ #include <linux/string_helpers.h>
+@@ -44,6 +45,7 @@ static bool fw_devlink_is_permissive(void);
+ static void __fw_devlink_link_to_consumers(struct device *dev);
+ static bool fw_devlink_drv_reg_done;
+ static bool fw_devlink_best_effort;
++static struct workqueue_struct *device_link_wq;
+
+ /**
+ * __fwnode_link_add - Create a link between two fwnode_handles.
+@@ -283,10 +285,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
+ return false;
+ }
+
++#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
++ DL_FLAG_CYCLE | \
++ DL_FLAG_MANAGED)
+ static inline bool device_link_flag_is_sync_state_only(u32 flags)
+ {
+- return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
+- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
++ return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
+ }
+
+ /**
+@@ -529,12 +533,26 @@ static void devlink_dev_release(struct device *dev)
+ /*
+ * It may take a while to complete this work because of the SRCU
+ * synchronization in device_link_release_fn() and if the consumer or
+- * supplier devices get deleted when it runs, so put it into the "long"
+- * workqueue.
++ * supplier devices get deleted when it runs, so put it into the
++ * dedicated workqueue.
+ */
+- queue_work(system_long_wq, &link->rm_work);
++ queue_work(device_link_wq, &link->rm_work);
+ }
+
++/**
++ * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
++ */
++void device_link_wait_removal(void)
++{
++ /*
++ * devlink removal jobs are queued in the dedicated work queue.
++ * To be sure that all removal jobs are terminated, ensure that any
++ * scheduled work has run to completion.
++ */
++ flush_workqueue(device_link_wq);
++}
++EXPORT_SYMBOL_GPL(device_link_wait_removal);
++
+ static struct class devlink_class = {
+ .name = "devlink",
+ .dev_groups = devlink_groups,
+@@ -2057,9 +2075,14 @@ static int fw_devlink_create_devlink(struct device *con,
+
+ /*
+ * SYNC_STATE_ONLY device links don't block probing and supports cycles.
+- * So cycle detection isn't necessary and shouldn't be done.
++ * So, one might expect that cycle detection isn't necessary for them.
++ * However, if the device link was marked as SYNC_STATE_ONLY because
++ * it's part of a cycle, then we still need to do cycle detection. This
++ * is because the consumer and supplier might be part of multiple cycles
++ * and we need to detect all those cycles.
+ */
+- if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
++ if (!device_link_flag_is_sync_state_only(flags) ||
++ flags & DL_FLAG_CYCLE) {
+ device_links_write_lock();
+ if (__fw_devlink_relax_cycles(con, sup_handle)) {
+ __fwnode_link_cycle(link);
+@@ -2543,6 +2566,7 @@ static const char *dev_uevent_name(const struct kobject *kobj)
+ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
+ {
+ const struct device *dev = kobj_to_dev(kobj);
++ struct device_driver *driver;
+ int retval = 0;
+
+ /* add device node properties if present */
+@@ -2571,8 +2595,12 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
+ if (dev->type && dev->type->name)
+ add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
+
+- if (dev->driver)
+- add_uevent_var(env, "DRIVER=%s", dev->driver->name);
++ /* Synchronize with module_remove_driver() */
++ rcu_read_lock();
++ driver = READ_ONCE(dev->driver);
++ if (driver)
++ add_uevent_var(env, "DRIVER=%s", driver->name);
++ rcu_read_unlock();
+
+ /* Add common DT information about the device */
+ of_device_uevent(dev, env);
+@@ -4083,9 +4111,14 @@ int __init devices_init(void)
+ sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
+ if (!sysfs_dev_char_kobj)
+ goto char_kobj_err;
++ device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
++ if (!device_link_wq)
++ goto wq_err;
+
+ return 0;
+
++ wq_err:
++ kobject_put(sysfs_dev_char_kobj);
+ char_kobj_err:
+ kobject_put(sysfs_dev_block_kobj);
+ block_kobj_err:
+@@ -4452,9 +4485,11 @@ EXPORT_SYMBOL_GPL(device_destroy);
+ */
+ int device_rename(struct device *dev, const char *new_name)
+ {
++ struct subsys_private *sp = NULL;
+ struct kobject *kobj = &dev->kobj;
+ char *old_device_name = NULL;
+ int error;
++ bool is_link_renamed = false;
+
+ dev = get_device(dev);
+ if (!dev)
+@@ -4469,7 +4504,7 @@ int device_rename(struct device *dev, const char *new_name)
+ }
+
+ if (dev->class) {
+- struct subsys_private *sp = class_to_subsys(dev->class);
++ sp = class_to_subsys(dev->class);
+
+ if (!sp) {
+ error = -EINVAL;
+@@ -4478,16 +4513,19 @@ int device_rename(struct device *dev, const char *new_name)
+
+ error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
+ new_name, kobject_namespace(kobj));
+- subsys_put(sp);
+ if (error)
+ goto out;
++
++ is_link_renamed = true;
+ }
+
+ error = kobject_rename(kobj, new_name);
+- if (error)
+- goto out;
+-
+ out:
++ if (error && is_link_renamed)
++ sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
++ old_device_name, kobject_namespace(kobj));
++ subsys_put(sp);
++
+ put_device(dev);
+
+ kfree(old_device_name);
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index 9ea22e165acd67..ef427ee787a99b 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -144,7 +144,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+ #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ #include <linux/kexec.h>
+
+ static ssize_t crash_notes_show(struct device *dev,
+@@ -189,14 +189,14 @@ static const struct attribute_group crash_note_cpu_attr_group = {
+ #endif
+
+ static const struct attribute_group *common_cpu_attr_groups[] = {
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ &crash_note_cpu_attr_group,
+ #endif
+ NULL
+ };
+
+ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ &crash_note_cpu_attr_group,
+ #endif
+ NULL
+@@ -565,6 +565,7 @@ CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
+ CPU_SHOW_VULN_FALLBACK(retbleed);
+ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
+ CPU_SHOW_VULN_FALLBACK(gds);
++CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+@@ -579,6 +580,7 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
+ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
+ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
++static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+@@ -594,6 +596,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_retbleed.attr,
+ &dev_attr_spec_rstack_overflow.attr,
+ &dev_attr_gather_data_sampling.attr,
++ &dev_attr_reg_file_data_sampling.attr,
+ NULL
+ };
+
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index a528cec24264ab..0c3725c3eefa46 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
+
+- device_links_driver_cleanup(dev);
+ device_unbind_cleanup(dev);
++ device_links_driver_cleanup(dev);
+
+ klist_remove(&dev->p->knode_driver);
+ device_pm_check_callbacks(dev);
+diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
+index 91536ee05f144e..7e2d1f0d903a6e 100644
+--- a/drivers/base/devcoredump.c
++++ b/drivers/base/devcoredump.c
+@@ -362,6 +362,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
+ devcd->devcd_dev.class = &devcd_class;
+
+ mutex_lock(&devcd->mutex);
++ dev_set_uevent_suppress(&devcd->devcd_dev, true);
+ if (device_add(&devcd->devcd_dev))
+ goto put_device;
+
+@@ -376,6 +377,8 @@ void dev_coredumpm(struct device *dev, struct module *owner,
+ "devcoredump"))
+ dev_warn(dev, "devcoredump create_link failed\n");
+
++ dev_set_uevent_suppress(&devcd->devcd_dev, false);
++ kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
+ mutex_unlock(&devcd->mutex);
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 3df0025d12aa48..e9b0d94aeabd90 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -567,6 +567,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+ grp->id = grp;
+ if (id)
+ grp->id = id;
++ grp->color = 0;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ add_dr(dev, &grp->node[0]);
+@@ -896,9 +897,12 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
+ /*
+ * Otherwise: allocate new, larger chunk. We need to allocate before
+ * taking the lock as most probably the caller uses GFP_KERNEL.
++ * alloc_dr() will call check_dr_size() to reserve extra memory
++ * for struct devres automatically, so size @new_size user request
++ * is delivered to it directly as devm_kmalloc() does.
+ */
+ new_dr = alloc_dr(devm_kmalloc_release,
+- total_new_size, gfp, dev_to_node(dev));
++ new_size, gfp, dev_to_node(dev));
+ if (!new_dr)
+ return NULL;
+
+@@ -1222,7 +1226,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+ */
+ void devm_free_percpu(struct device *dev, void __percpu *pdata)
+ {
+- WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
++ /*
++ * Use devres_release() to prevent memory leakage as
++ * devm_free_pages() does.
++ */
++ WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
+ (__force void *)pdata));
+ }
+ EXPORT_SYMBOL_GPL(devm_free_percpu);
+diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
+index b58c42f1b1ce65..0b18c6b46e65d8 100644
+--- a/drivers/base/firmware_loader/main.c
++++ b/drivers/base/firmware_loader/main.c
+@@ -844,6 +844,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
+ {}
+ #endif
+
++/*
++ * Reject firmware file names with ".." path components.
++ * There are drivers that construct firmware file names from device-supplied
++ * strings, and we don't want some device to be able to tell us "I would like to
++ * be sent my firmware from ../../../etc/shadow, please".
++ *
++ * Search for ".." surrounded by either '/' or start/end of string.
++ *
++ * This intentionally only looks at the firmware name, not at the firmware base
++ * directory or at symlink contents.
++ */
++static bool name_contains_dotdot(const char *name)
++{
++ size_t name_len = strlen(name);
++
++ return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
++ strstr(name, "/../") != NULL ||
++ (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
++}
++
+ /* called from request_firmware() and request_firmware_work_func() */
+ static int
+ _request_firmware(const struct firmware **firmware_p, const char *name,
+@@ -864,6 +884,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
+ goto out;
+ }
+
++ if (name_contains_dotdot(name)) {
++ dev_warn(device,
++ "Firmware load for '%s' refused, path contains '..' component\n",
++ name);
++ ret = -EINVAL;
++ goto out;
++ }
++
+ ret = _request_firmware_prepare(&fw, name, device, buf, size,
+ offset, opt_flags);
+ if (ret <= 0) /* error or already assigned */
+@@ -941,6 +969,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
+ * @name will be used as $FIRMWARE in the uevent environment and
+ * should be distinctive enough not to be confused with any other
+ * firmware image for this or any other device.
++ * It must not contain any ".." path components - "foo/bar..bin" is
++ * allowed, but "foo/../bar.bin" is not.
+ *
+ * Caller must hold the reference count of @device.
+ *
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index f3b9a4d0fa3bb2..8a13babd826ce3 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -180,6 +180,9 @@ static inline unsigned long memblk_nr_poison(struct memory_block *mem)
+ }
+ #endif
+
++/*
++ * Must acquire mem_hotplug_lock in write mode.
++ */
+ static int memory_block_online(struct memory_block *mem)
+ {
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+@@ -204,10 +207,11 @@ static int memory_block_online(struct memory_block *mem)
+ if (mem->altmap)
+ nr_vmemmap_pages = mem->altmap->free;
+
++ mem_hotplug_begin();
+ if (nr_vmemmap_pages) {
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
+ ret = online_pages(start_pfn + nr_vmemmap_pages,
+@@ -215,7 +219,7 @@ static int memory_block_online(struct memory_block *mem)
+ if (ret) {
+ if (nr_vmemmap_pages)
+ mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+- return ret;
++ goto out;
+ }
+
+ /*
+@@ -227,9 +231,14 @@ static int memory_block_online(struct memory_block *mem)
+ nr_vmemmap_pages);
+
+ mem->zone = zone;
++out:
++ mem_hotplug_done();
+ return ret;
+ }
+
++/*
++ * Must acquire mem_hotplug_lock in write mode.
++ */
+ static int memory_block_offline(struct memory_block *mem)
+ {
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+@@ -247,6 +256,7 @@ static int memory_block_offline(struct memory_block *mem)
+ if (mem->altmap)
+ nr_vmemmap_pages = mem->altmap->free;
+
++ mem_hotplug_begin();
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ -nr_vmemmap_pages);
+@@ -258,13 +268,15 @@ static int memory_block_offline(struct memory_block *mem)
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(pfn_to_page(start_pfn),
+ mem->group, nr_vmemmap_pages);
+- return ret;
++ goto out;
+ }
+
+ if (nr_vmemmap_pages)
+ mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+
+ mem->zone = NULL;
++out:
++ mem_hotplug_done();
+ return ret;
+ }
+
+diff --git a/drivers/base/module.c b/drivers/base/module.c
+index 46ad4d636731dd..0d5c5da367f720 100644
+--- a/drivers/base/module.c
++++ b/drivers/base/module.c
+@@ -7,6 +7,7 @@
+ #include <linux/errno.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
++#include <linux/rcupdate.h>
+ #include "base.h"
+
+ static char *make_driver_name(struct device_driver *drv)
+@@ -30,14 +31,14 @@ static void module_create_drivers_dir(struct module_kobject *mk)
+ mutex_unlock(&drivers_dir_mutex);
+ }
+
+-void module_add_driver(struct module *mod, struct device_driver *drv)
++int module_add_driver(struct module *mod, struct device_driver *drv)
+ {
+ char *driver_name;
+- int no_warn;
+ struct module_kobject *mk = NULL;
++ int ret;
+
+ if (!drv)
+- return;
++ return 0;
+
+ if (mod)
+ mk = &mod->mkobj;
+@@ -56,17 +57,41 @@ void module_add_driver(struct module *mod, struct device_driver *drv)
+ }
+
+ if (!mk)
+- return;
++ return 0;
++
++ ret = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
++ if (ret)
++ return ret;
+
+- /* Don't check return codes; these calls are idempotent */
+- no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
+ driver_name = make_driver_name(drv);
+- if (driver_name) {
+- module_create_drivers_dir(mk);
+- no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
+- driver_name);
+- kfree(driver_name);
++ if (!driver_name) {
++ ret = -ENOMEM;
++ goto out_remove_kobj;
+ }
++
++ module_create_drivers_dir(mk);
++ if (!mk->drivers_dir) {
++ ret = -EINVAL;
++ goto out_free_driver_name;
++ }
++
++ ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
++ if (ret)
++ goto out_remove_drivers_dir;
++
++ kfree(driver_name);
++
++ return 0;
++
++out_remove_drivers_dir:
++ sysfs_remove_link(mk->drivers_dir, driver_name);
++
++out_free_driver_name:
++ kfree(driver_name);
++
++out_remove_kobj:
++ sysfs_remove_link(&drv->p->kobj, "module");
++ return ret;
+ }
+
+ void module_remove_driver(struct device_driver *drv)
+@@ -77,6 +102,9 @@ void module_remove_driver(struct device_driver *drv)
+ if (!drv)
+ return;
+
++ /* Synchronize with dev_uevent() */
++ synchronize_rcu();
++
+ sysfs_remove_link(&drv->p->kobj, "module");
+
+ if (drv->owner)
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 493d533f837556..4d588f4658c85c 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -868,11 +868,15 @@ int __register_one_node(int nid)
+ {
+ int error;
+ int cpu;
++ struct node *node;
+
+- node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
+- if (!node_devices[nid])
++ node = kzalloc(sizeof(struct node), GFP_KERNEL);
++ if (!node)
+ return -ENOMEM;
+
++ INIT_LIST_HEAD(&node->access_list);
++ node_devices[nid] = node;
++
+ error = register_node(node_devices[nid], nid);
+
+ /* link cpu under this node */
+@@ -881,7 +885,6 @@ int __register_one_node(int nid)
+ register_cpu_under_node(cpu, nid);
+ }
+
+- INIT_LIST_HEAD(&node_devices[nid]->access_list);
+ node_init_caches(nid);
+
+ return error;
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 5cb2023581d4db..582564f8dde6f9 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1102,7 +1102,7 @@ static int __init genpd_power_off_unused(void)
+
+ return 0;
+ }
+-late_initcall(genpd_power_off_unused);
++late_initcall_sync(genpd_power_off_unused);
+
+ #ifdef CONFIG_PM_SLEEP
+
+@@ -3135,7 +3135,7 @@ static int genpd_summary_one(struct seq_file *s,
+ else
+ snprintf(state, sizeof(state), "%s",
+ status_lookup[genpd->status]);
+- seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
++ seq_printf(s, "%-30s %-49s %u", genpd->name, state, genpd->performance_state);
+
+ /*
+ * Modifications on the list require holding locks on both
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index f85f3515c258fc..9c5a5f4dba5a6e 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
+ }
+
+ /**
+- * device_resume_noirq - Execute a "noirq resume" callback for given device.
++ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -655,7 +655,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
+ Out:
+ complete_all(&dev->power.completion);
+ TRACE_RESUME(error);
+- return error;
++
++ if (error) {
++ suspend_stats.failed_resume_noirq++;
++ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
++ }
+ }
+
+ static bool is_async(struct device *dev)
+@@ -668,11 +674,15 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+ {
+ reinit_completion(&dev->power.completion);
+
+- if (is_async(dev)) {
+- get_device(dev);
+- async_schedule_dev(func, dev);
++ if (!is_async(dev))
++ return false;
++
++ get_device(dev);
++
++ if (async_schedule_dev_nocall(func, dev))
+ return true;
+- }
++
++ put_device(dev);
+
+ return false;
+ }
+@@ -680,15 +690,19 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+ static void async_resume_noirq(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+-
+- error = device_resume_noirq(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
+
++ __device_resume_noirq(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume_noirq(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume_noirq))
++ return;
++
++ __device_resume_noirq(dev, pm_transition, false);
++}
++
+ static void dpm_noirq_resume_devices(pm_message_t state)
+ {
+ struct device *dev;
+@@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_message_t state)
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+- /*
+- * Advanced the async threads upfront,
+- * in case the starting of async threads is
+- * delayed by non-async resuming devices.
+- */
+- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+- dpm_async_fn(dev, async_resume_noirq);
+-
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
+ get_device(dev);
+@@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
+
+ mutex_unlock(&dpm_list_mtx);
+
+- if (!is_async(dev)) {
+- int error;
+-
+- error = device_resume_noirq(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume_noirq++;
+- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, " noirq", error);
+- }
+- }
++ device_resume_noirq(dev);
+
+ put_device(dev);
+
+@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state)
+ }
+
+ /**
+- * device_resume_early - Execute an "early resume" callback for given device.
++ * __device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -811,21 +807,31 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
+
+ pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
+- return error;
++
++ if (error) {
++ suspend_stats.failed_resume_early++;
++ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async early" : " early", error);
++ }
+ }
+
+ static void async_resume_early(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+-
+- error = device_resume_early(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
+
++ __device_resume_early(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume_early(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume_early))
++ return;
++
++ __device_resume_early(dev, pm_transition, false);
++}
++
+ /**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+@@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state)
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+- /*
+- * Advanced the async threads upfront,
+- * in case the starting of async threads is
+- * delayed by non-async resuming devices.
+- */
+- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+- dpm_async_fn(dev, async_resume_early);
+-
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
+ get_device(dev);
+@@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state)
+
+ mutex_unlock(&dpm_list_mtx);
+
+- if (!is_async(dev)) {
+- int error;
+-
+- error = device_resume_early(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume_early++;
+- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, " early", error);
+- }
+- }
++ device_resume_early(dev);
+
+ put_device(dev);
+
+@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state)
+ EXPORT_SYMBOL_GPL(dpm_resume_start);
+
+ /**
+- * device_resume - Execute "resume" callbacks for given device.
++ * __device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ */
+-static int device_resume(struct device *dev, pm_message_t state, bool async)
++static void __device_resume(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
+
+ TRACE_RESUME(error);
+
+- return error;
++ if (error) {
++ suspend_stats.failed_resume++;
++ dpm_save_failed_step(SUSPEND_RESUME);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async" : "", error);
++ }
+ }
+
+ static void async_resume(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+
+- error = device_resume(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
++ __device_resume(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume))
++ return;
++
++ __device_resume(dev, pm_transition, false);
++}
++
+ /**
+ * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
+ pm_transition = state;
+ async_error = 0;
+
+- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+- dpm_async_fn(dev, async_resume);
+-
+ while (!list_empty(&dpm_suspended_list)) {
+ dev = to_device(dpm_suspended_list.next);
++
+ get_device(dev);
+- if (!is_async(dev)) {
+- int error;
+
+- mutex_unlock(&dpm_list_mtx);
++ mutex_unlock(&dpm_list_mtx);
++
++ device_resume(dev);
+
+- error = device_resume(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume++;
+- dpm_save_failed_step(SUSPEND_RESUME);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, "", error);
+- }
++ mutex_lock(&dpm_list_mtx);
+
+- mutex_lock(&dpm_list_mtx);
+- }
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index 72b7a92337b188..cd6e559648b21b 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -120,7 +120,7 @@ static unsigned int read_magic_time(void)
+ struct rtc_time time;
+ unsigned int val;
+
+- if (mc146818_get_time(&time) < 0) {
++ if (mc146818_get_time(&time, 1000) < 0) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
+ }
+diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
+index 42171f766dcba6..5a5a9e978e85f3 100644
+--- a/drivers/base/power/wakeirq.c
++++ b/drivers/base/power/wakeirq.c
+@@ -313,8 +313,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+- wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
++ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
+ enable_irq(wirq->irq);
++ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
++ }
+ }
+
+ /**
+diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
+index 41edd6a430eb45..0b6c2277128b42 100644
+--- a/drivers/base/regmap/regcache-maple.c
++++ b/drivers/base/regmap/regcache-maple.c
+@@ -110,9 +110,10 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
+ struct maple_tree *mt = map->cache;
+ MA_STATE(mas, mt, min, max);
+ unsigned long *entry, *lower, *upper;
+- unsigned long lower_index, lower_last;
++ /* initialized to work around false-positive -Wuninitialized warning */
++ unsigned long lower_index = 0, lower_last = 0;
+ unsigned long upper_index, upper_last;
+- int ret;
++ int ret = 0;
+
+ lower = NULL;
+ upper = NULL;
+@@ -145,7 +146,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
+ upper_index = max + 1;
+ upper_last = mas.last;
+
+- upper = kmemdup(&entry[max + 1],
++ upper = kmemdup(&entry[max - mas.index + 1],
+ ((mas.last - max) *
+ sizeof(unsigned long)),
+ map->alloc_flags);
+@@ -244,7 +245,7 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
+ unsigned long lmin = min;
+ unsigned long lmax = max;
+ unsigned int r, v, sync_start;
+- int ret;
++ int ret = 0;
+ bool sync_needed = false;
+
+ map->cache_bypass = true;
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index c5d151e9c48159..ac63a73ccdaaa2 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
+ return 0;
+ }
+
++static int rbtree_all(const void *key, const struct rb_node *node)
++{
++ return 0;
++}
++
+ /**
+ * regcache_sync - Sync the register cache with the hardware.
+ *
+@@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
+ unsigned int i;
+ const char *name;
+ bool bypass;
++ struct rb_node *node;
+
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+@@ -392,6 +398,29 @@ int regcache_sync(struct regmap *map)
+ /* Restore the bypass state */
+ map->cache_bypass = bypass;
+ map->no_sync_defaults = false;
++
++ /*
++ * If we did any paging with cache bypassed and a cached
++ * paging register then the register and cache state might
++ * have gone out of sync, force writes of all the paging
++ * registers.
++ */
++ rb_for_each(node, 0, &map->range_tree, rbtree_all) {
++ struct regmap_range_node *this =
++ rb_entry(node, struct regmap_range_node, node);
++
++ /* If there's nothing in the cache there's nothing to sync */
++ if (regcache_read(map, this->selector_reg, &i) != 0)
++ continue;
++
++ ret = _regmap_write(map, this->selector_reg, i);
++ if (ret != 0) {
++ dev_err(map->dev, "Failed to write %x = %x: %d\n",
++ this->selector_reg, i, ret);
++ break;
++ }
++ }
++
+ map->unlock(map->lock_arg);
+
+ regmap_async_complete(map);
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index f36027591e1a8d..bdd80b73c3e6c1 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file,
+ name = map->dev->driver->name;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+- if (ret < 0) {
++ if (ret >= PAGE_SIZE) {
+ kfree(buf);
+ return ret;
+ }
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index 3ec611dc0c09fb..a905e955bbfc78 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+
+ if (quirks->max_write_len &&
+ (bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
+- max_write = quirks->max_write_len;
++ max_write = quirks->max_write_len -
++ (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
+
+ if (max_read || max_write) {
+ ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
+index 264d29b3fced0b..5f1e914646cd91 100644
+--- a/drivers/base/regmap/regmap-kunit.c
++++ b/drivers/base/regmap/regmap-kunit.c
+@@ -9,6 +9,23 @@
+
+ #define BLOCK_TEST_SIZE 12
+
++static void get_changed_bytes(void *orig, void *new, size_t size)
++{
++ char *o = orig;
++ char *n = new;
++ int i;
++
++ get_random_bytes(new, size);
++
++ /*
++ * This could be nicer and more efficient but we shouldn't
++ * super care.
++ */
++ for (i = 0; i < size; i++)
++ while (n[i] == o[i])
++ get_random_bytes(&n[i], 1);
++}
++
+ static const struct regmap_config test_regmap_config = {
+ .max_register = BLOCK_TEST_SIZE,
+ .reg_stride = 1,
+@@ -1131,7 +1148,7 @@ static void raw_sync(struct kunit *test)
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+- u16 val[2];
++ u16 val[3];
+ u16 *hw_buf;
+ unsigned int rval;
+ int i;
+@@ -1145,17 +1162,13 @@ static void raw_sync(struct kunit *test)
+
+ hw_buf = (u16 *)data->vals;
+
+- get_random_bytes(&val, sizeof(val));
++ get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
+
+ /* Do a regular write and a raw write in cache only mode */
+ regcache_cache_only(map, true);
+- KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
+- if (config.val_format_endian == REGMAP_ENDIAN_BIG)
+- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
+- be16_to_cpu(val[0])));
+- else
+- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
+- le16_to_cpu(val[0])));
++ KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
++ sizeof(u16) * 2));
++ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
+
+ /* We should read back the new values, and defaults for the rest */
+ for (i = 0; i < config.max_register + 1; i++) {
+@@ -1164,24 +1177,34 @@ static void raw_sync(struct kunit *test)
+ switch (i) {
+ case 2:
+ case 3:
+- case 6:
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
+ KUNIT_EXPECT_EQ(test, rval,
+- be16_to_cpu(val[i % 2]));
++ be16_to_cpu(val[i - 2]));
+ } else {
+ KUNIT_EXPECT_EQ(test, rval,
+- le16_to_cpu(val[i % 2]));
++ le16_to_cpu(val[i - 2]));
+ }
+ break;
++ case 4:
++ KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
++ break;
+ default:
+ KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
+ break;
+ }
+ }
++
++ /*
++ * The value written via _write() was translated by the core,
++ * translate the original copy for comparison purposes.
++ */
++ if (config.val_format_endian == REGMAP_ENDIAN_BIG)
++ val[2] = cpu_to_be16(val[2]);
++ else
++ val[2] = cpu_to_le16(val[2]);
+
+ /* The values should not appear in the "hardware" */
+- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
+- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
++ KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
+
+ for (i = 0; i < config.max_register + 1; i++)
+ data->written[i] = false;
+@@ -1192,8 +1215,7 @@ static void raw_sync(struct kunit *test)
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* The values should now appear in the "hardware" */
+- KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
+- KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
++ KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
+
+ regmap_exit(map);
+ }
+diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
+index 37ab23a9d0345a..7f14c5ed1e2294 100644
+--- a/drivers/base/regmap/regmap-spi.c
++++ b/drivers/base/regmap/regmap-spi.c
+@@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
+ return ERR_PTR(-ENOMEM);
+
+ max_msg_size = spi_max_message_size(spi);
+- reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+- + config->pad_bits / BITS_PER_BYTE;
++ reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
+ if (max_size + reg_reserve_size > max_msg_size)
+ max_size -= reg_reserve_size;
+
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 234a84ecde8b1b..c5b5241891a5a6 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1620,17 +1620,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+ }
+
+ if (!map->cache_bypass && map->format.parse_val) {
+- unsigned int ival;
++ unsigned int ival, offset;
+ int val_bytes = map->format.val_bytes;
+- for (i = 0; i < val_len / val_bytes; i++) {
+- ival = map->format.parse_val(val + (i * val_bytes));
+- ret = regcache_write(map,
+- reg + regmap_get_offset(map, i),
+- ival);
++
++ /* Cache the last written value for noinc writes */
++ i = noinc ? val_len - val_bytes : 0;
++ for (; i < val_len; i += val_bytes) {
++ ival = map->format.parse_val(val + i);
++ offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
++ ret = regcache_write(map, reg + offset, ival);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+- reg + regmap_get_offset(map, i), ret);
++ reg + offset, ret);
+ return ret;
+ }
+ }
+@@ -2834,6 +2836,43 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
+ }
+ EXPORT_SYMBOL_GPL(regmap_read);
+
++/**
++ * regmap_read_bypassed() - Read a value from a single register direct
++ * from the device, bypassing the cache
++ *
++ * @map: Register map to read from
++ * @reg: Register to be read from
++ * @val: Pointer to store read value
++ *
++ * A value of zero will be returned on success, a negative errno will
++ * be returned in error cases.
++ */
++int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
++{
++ int ret;
++ bool bypass, cache_only;
++
++ if (!IS_ALIGNED(reg, map->reg_stride))
++ return -EINVAL;
++
++ map->lock(map->lock_arg);
++
++ bypass = map->cache_bypass;
++ cache_only = map->cache_only;
++ map->cache_bypass = true;
++ map->cache_only = false;
++
++ ret = _regmap_read(map, reg, val);
++
++ map->cache_bypass = bypass;
++ map->cache_only = cache_only;
++
++ map->unlock(map->lock_arg);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(regmap_read_bypassed);
++
+ /**
+ * regmap_raw_read() - Read raw data from the device
+ *
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 1886995a0b3a30..079bd14bdedc7c 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -541,6 +541,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
++ if (!args)
++ return 0;
++
+ args->fwnode = software_node_get(refnode);
+ args->nargs = nargs;
+
+diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
+index cf6883756155a3..37eff1c9745155 100644
+--- a/drivers/block/aoe/aoeblk.c
++++ b/drivers/block/aoe/aoeblk.c
+@@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
+ struct gendisk *gd;
+ mempool_t *mp;
+ struct blk_mq_tag_set *set;
++ sector_t ssize;
+ ulong flags;
+ int late = 0;
+ int err;
+@@ -395,7 +396,7 @@ aoeblk_gdalloc(void *vp)
+ gd->minors = AOE_PARTITIONS;
+ gd->fops = &aoe_bdops;
+ gd->private_data = d;
+- set_capacity(gd, d->ssize);
++ ssize = d->ssize;
+ snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
+ d->aoemajor, d->aoeminor);
+
+@@ -404,6 +405,8 @@ aoeblk_gdalloc(void *vp)
+
+ spin_unlock_irqrestore(&d->lock, flags);
+
++ set_capacity(gd, ssize);
++
+ err = device_add_disk(NULL, gd, aoe_attr_groups);
+ if (err)
+ goto out_disk_cleanup;
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index d7317425be510d..d1f4ddc576451a 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -361,6 +361,7 @@ ata_rw_frameinit(struct frame *f)
+ }
+
+ ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
++ dev_hold(t->ifp->nd);
+ skb->dev = t->ifp->nd;
+ }
+
+@@ -401,6 +402,8 @@ aoecmd_ata_rw(struct aoedev *d)
+ __skb_queue_head_init(&queue);
+ __skb_queue_tail(&queue, skb);
+ aoenet_xmit(&queue);
++ } else {
++ dev_put(f->t->ifp->nd);
+ }
+ return 1;
+ }
+@@ -419,13 +422,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ifp) {
+ dev_hold(ifp);
+- if (!is_aoe_netif(ifp))
+- goto cont;
++ if (!is_aoe_netif(ifp)) {
++ dev_put(ifp);
++ continue;
++ }
+
+ skb = new_skb(sizeof *h + sizeof *ch);
+ if (skb == NULL) {
+ printk(KERN_INFO "aoe: skb alloc failure\n");
+- goto cont;
++ dev_put(ifp);
++ continue;
+ }
+ skb_put(skb, sizeof *h + sizeof *ch);
+ skb->dev = ifp;
+@@ -440,9 +446,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
+ h->major = cpu_to_be16(aoemajor);
+ h->minor = aoeminor;
+ h->cmd = AOECMD_CFG;
+-
+-cont:
+- dev_put(ifp);
+ }
+ rcu_read_unlock();
+ }
+@@ -483,10 +486,13 @@ resend(struct aoedev *d, struct frame *f)
+ memcpy(h->dst, t->addr, sizeof h->dst);
+ memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
+
++ dev_hold(t->ifp->nd);
+ skb->dev = t->ifp->nd;
+ skb = skb_clone(skb, GFP_ATOMIC);
+- if (skb == NULL)
++ if (skb == NULL) {
++ dev_put(t->ifp->nd);
+ return;
++ }
+ f->sent = ktime_get();
+ __skb_queue_head_init(&queue);
+ __skb_queue_tail(&queue, skb);
+@@ -617,6 +623,8 @@ probe(struct aoetgt *t)
+ __skb_queue_head_init(&queue);
+ __skb_queue_tail(&queue, skb);
+ aoenet_xmit(&queue);
++ } else {
++ dev_put(f->t->ifp->nd);
+ }
+ }
+
+@@ -1395,6 +1403,7 @@ aoecmd_ata_id(struct aoedev *d)
+ ah->cmdstat = ATA_CMD_ID_ATA;
+ ah->lba3 = 0xa0;
+
++ dev_hold(t->ifp->nd);
+ skb->dev = t->ifp->nd;
+
+ d->rttavg = RTTAVG_INIT;
+@@ -1404,6 +1413,8 @@ aoecmd_ata_id(struct aoedev *d)
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (skb)
+ f->sent = ktime_get();
++ else
++ dev_put(t->ifp->nd);
+
+ return skb;
+ }
+diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
+index 63773a90581dd3..1e66c7a188a121 100644
+--- a/drivers/block/aoe/aoenet.c
++++ b/drivers/block/aoe/aoenet.c
+@@ -64,6 +64,7 @@ tx(int id) __must_hold(&txlock)
+ pr_warn("aoe: packet could not be sent on %s. %s\n",
+ ifp ? ifp->name : "netif",
+ "consider increasing tx_queue_len");
++ dev_put(ifp);
+ spin_lock_irq(&txlock);
+ }
+ return 0;
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 6bc86106c7b2ab..102cc3034412d9 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -3392,10 +3392,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
+ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
+ {
+ unsigned long flags;
+- if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
++ spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
++ if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
++ spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
+ return;
++ }
+
+- spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
+ if (val == 0) {
+ drbd_uuid_move_history(device);
+ device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
+diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
+index 287a8d1d3f707f..87cf5883078f5b 100644
+--- a/drivers/block/drbd/drbd_state.c
++++ b/drivers/block/drbd/drbd_state.c
+@@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
+ ns.disk == D_OUTDATED)
+ rv = SS_CONNECTED_OUTDATES;
+
+- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
++ else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ (nc->verify_alg[0] == 0))
+ rv = SS_NO_VERIFY_ALG;
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 9f2d412fc560e1..886c6359903779 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -165,39 +165,37 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
+ return get_size(lo->lo_offset, lo->lo_sizelimit, file);
+ }
+
++/*
++ * We support direct I/O only if lo_offset is aligned with the logical I/O size
++ * of backing device, and the logical block size of loop is bigger than that of
++ * the backing device.
++ */
++static bool lo_bdev_can_use_dio(struct loop_device *lo,
++ struct block_device *backing_bdev)
++{
++ unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
++
++ if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
++ return false;
++ if (lo->lo_offset & (sb_bsize - 1))
++ return false;
++ return true;
++}
++
+ static void __loop_update_dio(struct loop_device *lo, bool dio)
+ {
+ struct file *file = lo->lo_backing_file;
+- struct address_space *mapping = file->f_mapping;
+- struct inode *inode = mapping->host;
+- unsigned short sb_bsize = 0;
+- unsigned dio_align = 0;
++ struct inode *inode = file->f_mapping->host;
++ struct block_device *backing_bdev = NULL;
+ bool use_dio;
+
+- if (inode->i_sb->s_bdev) {
+- sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
+- dio_align = sb_bsize - 1;
+- }
++ if (S_ISBLK(inode->i_mode))
++ backing_bdev = I_BDEV(inode);
++ else if (inode->i_sb->s_bdev)
++ backing_bdev = inode->i_sb->s_bdev;
+
+- /*
+- * We support direct I/O only if lo_offset is aligned with the
+- * logical I/O size of backing device, and the logical block
+- * size of loop is bigger than the backing device's.
+- *
+- * TODO: the above condition may be loosed in the future, and
+- * direct I/O may be switched runtime at that time because most
+- * of requests in sane applications should be PAGE_SIZE aligned
+- */
+- if (dio) {
+- if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
+- !(lo->lo_offset & dio_align) &&
+- (file->f_mode & FMODE_CAN_ODIRECT))
+- use_dio = true;
+- else
+- use_dio = false;
+- } else {
+- use_dio = false;
+- }
++ use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
++ (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
+
+ if (lo->use_dio == use_dio)
+ return;
+@@ -213,13 +211,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
+ if (lo->lo_state == Lo_bound)
+ blk_mq_freeze_queue(lo->lo_queue);
+ lo->use_dio = use_dio;
+- if (use_dio) {
+- blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
++ if (use_dio)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+- } else {
+- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
++ else
+ lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+- }
+ if (lo->lo_state == Lo_bound)
+ blk_mq_unfreeze_queue(lo->lo_queue);
+ }
+@@ -2040,14 +2035,6 @@ static int loop_add(int i)
+
+ blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
+
+- /*
+- * By default, we do buffer IO, so it doesn't make sense to enable
+- * merge because the I/O submitted to backing file is handled page by
+- * page. For directio mode, merge does help to dispatch bigger request
+- * to underlayer disk. We will enable merge once directio is enabled.
+- */
+- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+-
+ /*
+ * Disable partition scanning by default. The in-kernel partition
+ * scanning can be requested individually per-device during its
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 800f131222fc8f..96b349148e5788 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -67,6 +67,7 @@ struct nbd_sock {
+ struct recv_thread_args {
+ struct work_struct work;
+ struct nbd_device *nbd;
++ struct nbd_sock *nsock;
+ int index;
+ };
+
+@@ -180,6 +181,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd)
+ {
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
++ lockdep_assert_held(&cmd->lock);
++
++ /*
++ * Clear INFLIGHT flag so that this cmd won't be completed in
++ * normal completion path
++ *
++ * INFLIGHT flag will be set when the cmd is queued to nbd next
++ * time.
++ */
++ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
++
+ if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
+ blk_mq_requeue_request(req, true);
+ }
+@@ -250,7 +262,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ struct gendisk *disk = nbd->disk;
+
+ del_gendisk(disk);
+- put_disk(disk);
+ blk_mq_free_tag_set(&nbd->tag_set);
+
+ /*
+@@ -261,7 +272,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
+ idr_remove(&nbd_index_idr, nbd->index);
+ mutex_unlock(&nbd_index_mutex);
+ destroy_workqueue(nbd->recv_workq);
+- kfree(nbd);
++ put_disk(disk);
+ }
+
+ static void nbd_dev_remove_work(struct work_struct *work)
+@@ -396,6 +407,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
+ }
+ }
+
++static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
++{
++ if (refcount_inc_not_zero(&nbd->config_refs)) {
++ /*
++ * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
++ * and reading nbd->config is ordered. The pair is the barrier in
++ * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
++ * before nbd->config.
++ */
++ smp_mb__after_atomic();
++ return nbd->config;
++ }
++
++ return NULL;
++}
++
+ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
+ {
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+@@ -410,13 +437,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
+ return BLK_EH_DONE;
+ }
+
+- if (!refcount_inc_not_zero(&nbd->config_refs)) {
++ config = nbd_get_config_unlocked(nbd);
++ if (!config) {
+ cmd->status = BLK_STS_TIMEOUT;
+ __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+ mutex_unlock(&cmd->lock);
+ goto done;
+ }
+- config = nbd->config;
+
+ if (config->num_connections > 1 ||
+ (config->num_connections == 1 && nbd->tag_set.timeout)) {
+@@ -445,8 +472,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ mutex_unlock(&nsock->tx_lock);
+ }
+- mutex_unlock(&cmd->lock);
+ nbd_requeue_cmd(cmd);
++ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
+ return BLK_EH_DONE;
+ }
+@@ -490,17 +517,11 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
+ return BLK_EH_DONE;
+ }
+
+-/*
+- * Send or receive packet. Return a positive value on success and
+- * negtive value on failue, and never return 0.
+- */
+-static int sock_xmit(struct nbd_device *nbd, int index, int send,
+- struct iov_iter *iter, int msg_flags, int *sent)
++static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
++ struct iov_iter *iter, int msg_flags, int *sent)
+ {
+- struct nbd_config *config = nbd->config;
+- struct socket *sock = config->socks[index]->sock;
+ int result;
+- struct msghdr msg;
++ struct msghdr msg = {} ;
+ unsigned int noreclaim_flag;
+
+ if (unlikely(!sock)) {
+@@ -516,10 +537,6 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
+ do {
+ sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
+ sock->sk->sk_use_task_frag = false;
+- msg.msg_name = NULL;
+- msg.msg_namelen = 0;
+- msg.msg_control = NULL;
+- msg.msg_controllen = 0;
+ msg.msg_flags = msg_flags | MSG_NOSIGNAL;
+
+ if (send)
+@@ -541,6 +558,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
+ return result;
+ }
+
++/*
++ * Send or receive packet. Return a positive value on success and
++ * negtive value on failure, and never return 0.
++ */
++static int sock_xmit(struct nbd_device *nbd, int index, int send,
++ struct iov_iter *iter, int msg_flags, int *sent)
++{
++ struct nbd_config *config = nbd->config;
++ struct socket *sock = config->socks[index]->sock;
++
++ return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
++}
++
+ /*
+ * Different settings for sk->sk_sndtimeo can result in different return values
+ * if there is a signal pending when we enter sendmsg, because reasons?
+@@ -550,7 +580,10 @@ static inline int was_interrupted(int result)
+ return result == -ERESTARTSYS || result == -EINTR;
+ }
+
+-/* always call with the tx_lock held */
++/*
++ * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
++ * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
++ */
+ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ {
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+@@ -567,6 +600,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ u32 nbd_cmd_flags = 0;
+ int sent = nsock->sent, skip = 0;
+
++ lockdep_assert_held(&cmd->lock);
++ lockdep_assert_held(&nsock->tx_lock);
++
+ iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
+
+ type = req_to_nbd_cmd_type(req);
+@@ -631,7 +667,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ nsock->sent = sent;
+ }
+ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
+- return BLK_STS_RESOURCE;
++ return (__force int)BLK_STS_RESOURCE;
+ }
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Send control failed (result %d)\n", result);
+@@ -672,7 +708,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ nsock->pending = req;
+ nsock->sent = sent;
+ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
+- return BLK_STS_RESOURCE;
++ return (__force int)BLK_STS_RESOURCE;
+ }
+ dev_err(disk_to_dev(nbd->disk),
+ "Send data failed (result %d)\n",
+@@ -697,7 +733,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ return 0;
+ }
+
+-static int nbd_read_reply(struct nbd_device *nbd, int index,
++static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
+ struct nbd_reply *reply)
+ {
+ struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
+@@ -706,7 +742,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
+
+ reply->magic = 0;
+ iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
+- result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
++ result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
+ if (result < 0) {
+ if (!nbd_disconnected(nbd->config))
+ dev_err(disk_to_dev(nbd->disk),
+@@ -830,14 +866,14 @@ static void recv_work(struct work_struct *work)
+ struct nbd_device *nbd = args->nbd;
+ struct nbd_config *config = nbd->config;
+ struct request_queue *q = nbd->disk->queue;
+- struct nbd_sock *nsock;
++ struct nbd_sock *nsock = args->nsock;
+ struct nbd_cmd *cmd;
+ struct request *rq;
+
+ while (1) {
+ struct nbd_reply reply;
+
+- if (nbd_read_reply(nbd, args->index, &reply))
++ if (nbd_read_reply(nbd, nsock->sock, &reply))
+ break;
+
+ /*
+@@ -872,7 +908,6 @@ static void recv_work(struct work_struct *work)
+ percpu_ref_put(&q->q_usage_counter);
+ }
+
+- nsock = config->socks[args->index];
+ mutex_lock(&nsock->tx_lock);
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ mutex_unlock(&nsock->tx_lock);
+@@ -970,7 +1005,7 @@ static int wait_for_reconnect(struct nbd_device *nbd)
+ return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
+ }
+
+-static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
++static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ {
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+ struct nbd_device *nbd = cmd->nbd;
+@@ -978,18 +1013,20 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ struct nbd_sock *nsock;
+ int ret;
+
+- if (!refcount_inc_not_zero(&nbd->config_refs)) {
++ lockdep_assert_held(&cmd->lock);
++
++ config = nbd_get_config_unlocked(nbd);
++ if (!config) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Socks array is empty\n");
+- return -EINVAL;
++ return BLK_STS_IOERR;
+ }
+- config = nbd->config;
+
+ if (index >= config->num_connections) {
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on invalid socket\n");
+ nbd_config_put(nbd);
+- return -EINVAL;
++ return BLK_STS_IOERR;
+ }
+ cmd->status = BLK_STS_OK;
+ again:
+@@ -1012,7 +1049,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ */
+ sock_shutdown(nbd);
+ nbd_config_put(nbd);
+- return -EIO;
++ return BLK_STS_IOERR;
+ }
+ goto again;
+ }
+@@ -1025,7 +1062,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ blk_mq_start_request(req);
+ if (unlikely(nsock->pending && nsock->pending != req)) {
+ nbd_requeue_cmd(cmd);
+- ret = 0;
++ ret = BLK_STS_OK;
+ goto out;
+ }
+ /*
+@@ -1044,19 +1081,19 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ "Request send failed, requeueing\n");
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ nbd_requeue_cmd(cmd);
+- ret = 0;
++ ret = BLK_STS_OK;
+ }
+ out:
+ mutex_unlock(&nsock->tx_lock);
+ nbd_config_put(nbd);
+- return ret;
++ return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
+ }
+
+ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+ {
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+- int ret;
++ blk_status_t ret;
+
+ /*
+ * Since we look at the bio's to send the request over the network we
+@@ -1076,10 +1113,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ * appropriate.
+ */
+ ret = nbd_handle_cmd(cmd, hctx->queue_num);
+- if (ret < 0)
+- ret = BLK_STS_IOERR;
+- else if (!ret)
+- ret = BLK_STS_OK;
+ mutex_unlock(&cmd->lock);
+
+ return ret;
+@@ -1216,6 +1249,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
+ INIT_WORK(&args->work, recv_work);
+ args->index = i;
+ args->nbd = nbd;
++ args->nsock = nsock;
+ nsock->cookie++;
+ mutex_unlock(&nsock->tx_lock);
+ sockfd_put(old);
+@@ -1398,6 +1432,7 @@ static int nbd_start_device(struct nbd_device *nbd)
+ refcount_inc(&nbd->config_refs);
+ INIT_WORK(&args->work, recv_work);
+ args->nbd = nbd;
++ args->nsock = config->socks[i];
+ args->index = i;
+ queue_work(nbd->recv_workq, &args->work);
+ }
+@@ -1531,17 +1566,20 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
+ return error;
+ }
+
+-static struct nbd_config *nbd_alloc_config(void)
++static int nbd_alloc_and_init_config(struct nbd_device *nbd)
+ {
+ struct nbd_config *config;
+
++ if (WARN_ON(nbd->config))
++ return -EINVAL;
++
+ if (!try_module_get(THIS_MODULE))
+- return ERR_PTR(-ENODEV);
++ return -ENODEV;
+
+ config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
+ if (!config) {
+ module_put(THIS_MODULE);
+- return ERR_PTR(-ENOMEM);
++ return -ENOMEM;
+ }
+
+ atomic_set(&config->recv_threads, 0);
+@@ -1549,12 +1587,24 @@ static struct nbd_config *nbd_alloc_config(void)
+ init_waitqueue_head(&config->conn_wait);
+ config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
+ atomic_set(&config->live_connections, 0);
+- return config;
++
++ nbd->config = config;
++ /*
++ * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
++ * its pair is the barrier in nbd_get_config_unlocked().
++ * So nbd_get_config_unlocked() won't see nbd->config as null after
++ * refcount_inc_not_zero() succeed.
++ */
++ smp_mb__before_atomic();
++ refcount_set(&nbd->config_refs, 1);
++
++ return 0;
+ }
+
+ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
+ {
+ struct nbd_device *nbd;
++ struct nbd_config *config;
+ int ret = 0;
+
+ mutex_lock(&nbd_index_mutex);
+@@ -1567,27 +1617,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
+ ret = -ENXIO;
+ goto out;
+ }
+- if (!refcount_inc_not_zero(&nbd->config_refs)) {
+- struct nbd_config *config;
+
++ config = nbd_get_config_unlocked(nbd);
++ if (!config) {
+ mutex_lock(&nbd->config_lock);
+ if (refcount_inc_not_zero(&nbd->config_refs)) {
+ mutex_unlock(&nbd->config_lock);
+ goto out;
+ }
+- config = nbd_alloc_config();
+- if (IS_ERR(config)) {
+- ret = PTR_ERR(config);
++ ret = nbd_alloc_and_init_config(nbd);
++ if (ret) {
+ mutex_unlock(&nbd->config_lock);
+ goto out;
+ }
+- nbd->config = config;
+- refcount_set(&nbd->config_refs, 1);
++
+ refcount_inc(&nbd->refs);
+ mutex_unlock(&nbd->config_lock);
+ if (max_part)
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
+- } else if (nbd_disconnected(nbd->config)) {
++ } else if (nbd_disconnected(config)) {
+ if (max_part)
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
+ }
+@@ -1608,6 +1656,13 @@ static void nbd_release(struct gendisk *disk)
+ nbd_put(nbd);
+ }
+
++static void nbd_free_disk(struct gendisk *disk)
++{
++ struct nbd_device *nbd = disk->private_data;
++
++ kfree(nbd);
++}
++
+ static const struct block_device_operations nbd_fops =
+ {
+ .owner = THIS_MODULE,
+@@ -1615,6 +1670,7 @@ static const struct block_device_operations nbd_fops =
+ .release = nbd_release,
+ .ioctl = nbd_ioctl,
+ .compat_ioctl = nbd_ioctl,
++ .free_disk = nbd_free_disk,
+ };
+
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+@@ -1983,22 +2039,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
+ pr_err("nbd%d already in use\n", index);
+ return -EBUSY;
+ }
+- if (WARN_ON(nbd->config)) {
+- mutex_unlock(&nbd->config_lock);
+- nbd_put(nbd);
+- return -EINVAL;
+- }
+- config = nbd_alloc_config();
+- if (IS_ERR(config)) {
++
++ ret = nbd_alloc_and_init_config(nbd);
++ if (ret) {
+ mutex_unlock(&nbd->config_lock);
+ nbd_put(nbd);
+ pr_err("couldn't allocate config\n");
+- return PTR_ERR(config);
++ return ret;
+ }
+- nbd->config = config;
+- refcount_set(&nbd->config_refs, 1);
+- set_bit(NBD_RT_BOUND, &config->runtime_flags);
+
++ config = nbd->config;
++ set_bit(NBD_RT_BOUND, &config->runtime_flags);
+ ret = nbd_genl_size_set(info, nbd);
+ if (ret)
+ goto out;
+@@ -2201,7 +2252,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
+ }
+ mutex_unlock(&nbd_index_mutex);
+
+- if (!refcount_inc_not_zero(&nbd->config_refs)) {
++ config = nbd_get_config_unlocked(nbd);
++ if (!config) {
+ dev_err(nbd_to_dev(nbd),
+ "not configured, cannot reconfigure\n");
+ nbd_put(nbd);
+@@ -2209,7 +2261,6 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
+ }
+
+ mutex_lock(&nbd->config_lock);
+- config = nbd->config;
+ if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
+ !nbd->pid) {
+ dev_err(nbd_to_dev(nbd),
+@@ -2401,6 +2452,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
+ }
+
+ dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
++ if (!dev_list) {
++ nlmsg_free(reply);
++ ret = -EMSGSIZE;
++ goto out;
++ }
++
+ if (index == -1) {
+ ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
+ if (ret) {
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 968090935eb23f..97ed3bd9707f41 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -392,13 +392,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
+ static int nullb_apply_submit_queues(struct nullb_device *dev,
+ unsigned int submit_queues)
+ {
+- return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
++ int ret;
++
++ mutex_lock(&lock);
++ ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
++ mutex_unlock(&lock);
++
++ return ret;
+ }
+
+ static int nullb_apply_poll_queues(struct nullb_device *dev,
+ unsigned int poll_queues)
+ {
+- return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
++ int ret;
++
++ mutex_lock(&lock);
++ ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
++ mutex_unlock(&lock);
++
++ return ret;
+ }
+
+ NULLB_DEVICE_ATTR(size, ulong, NULL);
+@@ -444,28 +456,32 @@ static ssize_t nullb_device_power_store(struct config_item *item,
+ if (ret < 0)
+ return ret;
+
++ ret = count;
++ mutex_lock(&lock);
+ if (!dev->power && newp) {
+ if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
+- return count;
++ goto out;
++
+ ret = null_add_dev(dev);
+ if (ret) {
+ clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+- return ret;
++ goto out;
+ }
+
+ set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ dev->power = newp;
++ ret = count;
+ } else if (dev->power && !newp) {
+ if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+- mutex_lock(&lock);
+ dev->power = newp;
+ null_del_dev(dev->nullb);
+- mutex_unlock(&lock);
+ }
+ clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ }
+
+- return count;
++out:
++ mutex_unlock(&lock);
++ return ret;
+ }
+
+ CONFIGFS_ATTR(nullb_device_, power);
+@@ -1819,7 +1835,7 @@ static void null_del_dev(struct nullb *nullb)
+
+ dev = nullb->dev;
+
+- ida_simple_remove(&nullb_indexes, nullb->index);
++ ida_free(&nullb_indexes, nullb->index);
+
+ list_del_init(&nullb->list);
+
+@@ -2013,8 +2029,8 @@ static int null_validate_conf(struct nullb_device *dev)
+ return -EINVAL;
+ }
+
+- dev->blocksize = round_down(dev->blocksize, 512);
+- dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
++ if (blk_validate_block_size(dev->blocksize))
++ return -EINVAL;
+
+ if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
+ if (dev->submit_queues != nr_online_nodes)
+@@ -2153,22 +2169,17 @@ static int null_add_dev(struct nullb_device *dev)
+ nullb->q->queuedata = nullb;
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
+
+- mutex_lock(&lock);
+- rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+- if (rv < 0) {
+- mutex_unlock(&lock);
++ rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
++ if (rv < 0)
+ goto out_cleanup_zone;
+- }
++
+ nullb->index = rv;
+ dev->index = rv;
+- mutex_unlock(&lock);
+
+ blk_queue_logical_block_size(nullb->q, dev->blocksize);
+ blk_queue_physical_block_size(nullb->q, dev->blocksize);
+- if (!dev->max_sectors)
+- dev->max_sectors = queue_max_hw_sectors(nullb->q);
+- dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
+- blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
++ if (dev->max_sectors)
++ blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
+
+ if (dev->virt_boundary)
+ blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
+@@ -2187,9 +2198,7 @@ static int null_add_dev(struct nullb_device *dev)
+ if (rv)
+ goto out_ida_free;
+
+- mutex_lock(&lock);
+ list_add_tail(&nullb->list, &nullb_list);
+- mutex_unlock(&lock);
+
+ pr_info("disk %s created\n", nullb->disk_name);
+
+@@ -2238,7 +2247,9 @@ static int null_create_dev(void)
+ if (!dev)
+ return -ENOMEM;
+
++ mutex_lock(&lock);
+ ret = null_add_dev(dev);
++ mutex_unlock(&lock);
+ if (ret) {
+ null_free_dev(dev);
+ return ret;
+@@ -2268,12 +2279,6 @@ static int __init null_init(void)
+ g_bs = PAGE_SIZE;
+ }
+
+- if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
+- pr_warn("invalid max sectors\n");
+- pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
+- g_max_sectors = BLK_DEF_MAX_SECTORS;
+- }
+-
+ if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+ pr_err("invalid home_node value\n");
+ g_home_node = NUMA_NO_NODE;
+@@ -2360,10 +2365,13 @@ static void __exit null_exit(void)
+
+ if (g_queue_mode == NULL_Q_MQ && shared_tags)
+ blk_mq_free_tag_set(&tag_set);
++
++ mutex_destroy(&lock);
+ }
+
+ module_init(null_init);
+ module_exit(null_exit);
+
+ MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
++MODULE_DESCRIPTION("multi queue aware block test driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index 55c5b48bc276fe..d057f7099e7f7e 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -83,6 +83,17 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+ return -EINVAL;
+ }
+
++ /*
++ * If a smaller zone capacity was requested, do not allow a smaller last
++ * zone at the same time as such zone configuration does not correspond
++ * to any real zoned device.
++ */
++ if (dev->zone_capacity != dev->zone_size &&
++ dev->size & (dev->zone_size - 1)) {
++ pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
++ return -EINVAL;
++ }
++
+ zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ dev_capacity_sects = mb_to_sects(dev->size);
+ dev->zone_size_sects = mb_to_sects(dev->zone_size);
+@@ -112,7 +123,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+ if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
+ dev->zone_max_open = dev->zone_max_active;
+ pr_info("changed the maximum number of open zones to %u\n",
+- dev->nr_zones);
++ dev->zone_max_open);
+ } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
+ dev->zone_max_open = 0;
+ pr_info("zone_max_open limit disabled, limit >= zone count\n");
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index a999b698b131f7..6fcd7f0fe4f03e 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -362,7 +362,7 @@ enum rbd_watch_state {
+ enum rbd_lock_state {
+ RBD_LOCK_STATE_UNLOCKED,
+ RBD_LOCK_STATE_LOCKED,
+- RBD_LOCK_STATE_RELEASING,
++ RBD_LOCK_STATE_QUIESCING,
+ };
+
+ /* WatchNotify::ClientId */
+@@ -422,7 +422,7 @@ struct rbd_device {
+ struct list_head running_list;
+ struct completion acquire_wait;
+ int acquire_err;
+- struct completion releasing_wait;
++ struct completion quiescing_wait;
+
+ spinlock_t object_map_lock;
+ u8 *object_map;
+@@ -525,7 +525,7 @@ static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
+ lockdep_assert_held(&rbd_dev->lock_rwsem);
+
+ return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
+- rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
++ rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING;
+ }
+
+ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
+@@ -3452,17 +3452,19 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
+ static void rbd_lock_del_request(struct rbd_img_request *img_req)
+ {
+ struct rbd_device *rbd_dev = img_req->rbd_dev;
+- bool need_wakeup;
++ bool need_wakeup = false;
+
+ lockdep_assert_held(&rbd_dev->lock_rwsem);
+ spin_lock(&rbd_dev->lock_lists_lock);
+- rbd_assert(!list_empty(&img_req->lock_item));
+- list_del_init(&img_req->lock_item);
+- need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+- list_empty(&rbd_dev->running_list));
++ if (!list_empty(&img_req->lock_item)) {
++ rbd_assert(!list_empty(&rbd_dev->running_list));
++ list_del_init(&img_req->lock_item);
++ need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING &&
++ list_empty(&rbd_dev->running_list));
++ }
+ spin_unlock(&rbd_dev->lock_lists_lock);
+ if (need_wakeup)
+- complete(&rbd_dev->releasing_wait);
++ complete(&rbd_dev->quiescing_wait);
+ }
+
+ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
+@@ -3475,11 +3477,6 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
+ if (rbd_lock_add_request(img_req))
+ return 1;
+
+- if (rbd_dev->opts->exclusive) {
+- WARN_ON(1); /* lock got released? */
+- return -EROFS;
+- }
+-
+ /*
+ * Note the use of mod_delayed_work() in rbd_acquire_lock()
+ * and cancel_delayed_work() in wake_lock_waiters().
+@@ -3842,14 +3839,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
+ return;
+ }
+
+- list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
++ while (!list_empty(&rbd_dev->acquiring_list)) {
++ img_req = list_first_entry(&rbd_dev->acquiring_list,
++ struct rbd_img_request, lock_item);
+ mutex_lock(&img_req->state_mutex);
+ rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
++ if (!result)
++ list_move_tail(&img_req->lock_item,
++ &rbd_dev->running_list);
++ else
++ list_del_init(&img_req->lock_item);
+ rbd_img_schedule(img_req, result);
+ mutex_unlock(&img_req->state_mutex);
+ }
+-
+- list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
+ }
+
+ static bool locker_equal(const struct ceph_locker *lhs,
+@@ -4175,16 +4177,16 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
+ /*
+ * Ensure that all in-flight IO is flushed.
+ */
+- rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
+- rbd_assert(!completion_done(&rbd_dev->releasing_wait));
++ rbd_dev->lock_state = RBD_LOCK_STATE_QUIESCING;
++ rbd_assert(!completion_done(&rbd_dev->quiescing_wait));
+ if (list_empty(&rbd_dev->running_list))
+ return true;
+
+ up_write(&rbd_dev->lock_rwsem);
+- wait_for_completion(&rbd_dev->releasing_wait);
++ wait_for_completion(&rbd_dev->quiescing_wait);
+
+ down_write(&rbd_dev->lock_rwsem);
+- if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
++ if (rbd_dev->lock_state != RBD_LOCK_STATE_QUIESCING)
+ return false;
+
+ rbd_assert(list_empty(&rbd_dev->running_list));
+@@ -4595,6 +4597,10 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
+ rbd_warn(rbd_dev, "failed to update lock cookie: %d",
+ ret);
+
++ if (rbd_dev->opts->exclusive)
++ rbd_warn(rbd_dev,
++ "temporarily releasing lock on exclusive mapping");
++
+ /*
+ * Lock cookie cannot be updated on older OSDs, so do
+ * a manual release and queue an acquire.
+@@ -5376,7 +5382,7 @@ static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
+ INIT_LIST_HEAD(&rbd_dev->acquiring_list);
+ INIT_LIST_HEAD(&rbd_dev->running_list);
+ init_completion(&rbd_dev->acquire_wait);
+- init_completion(&rbd_dev->releasing_wait);
++ init_completion(&rbd_dev->quiescing_wait);
+
+ spin_lock_init(&rbd_dev->object_map_lock);
+
+@@ -6582,11 +6588,6 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
+ if (ret)
+ return ret;
+
+- /*
+- * The lock may have been released by now, unless automatic lock
+- * transitions are disabled.
+- */
+- rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
+ return 0;
+ }
+
+diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
+index c186df0ec641c8..b67e39a34010b1 100644
+--- a/drivers/block/rnbd/rnbd-srv.c
++++ b/drivers/block/rnbd/rnbd-srv.c
+@@ -585,6 +585,7 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
+ {
+ char *full_path;
+ char *a, *b;
++ int len;
+
+ full_path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!full_path)
+@@ -596,19 +597,19 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
+ */
+ a = strnstr(dev_search_path, "%SESSNAME%", sizeof(dev_search_path));
+ if (a) {
+- int len = a - dev_search_path;
++ len = a - dev_search_path;
+
+ len = snprintf(full_path, PATH_MAX, "%.*s/%s/%s", len,
+ dev_search_path, srv_sess->sessname, dev_name);
+- if (len >= PATH_MAX) {
+- pr_err("Too long path: %s, %s, %s\n",
+- dev_search_path, srv_sess->sessname, dev_name);
+- kfree(full_path);
+- return ERR_PTR(-EINVAL);
+- }
+ } else {
+- snprintf(full_path, PATH_MAX, "%s/%s",
+- dev_search_path, dev_name);
++ len = snprintf(full_path, PATH_MAX, "%s/%s",
++ dev_search_path, dev_name);
++ }
++ if (len >= PATH_MAX) {
++ pr_err("Too long path: %s, %s, %s\n",
++ dev_search_path, srv_sess->sessname, dev_name);
++ kfree(full_path);
++ return ERR_PTR(-EINVAL);
+ }
+
+ /* eliminitate duplicated slashes */
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 630ddfe6657bc9..f31607a24f5735 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -68,9 +68,6 @@ struct ublk_rq_data {
+ struct llist_node node;
+
+ struct kref ref;
+- __u64 sector;
+- __u32 operation;
+- __u32 nr_zones;
+ };
+
+ struct ublk_uring_cmd_pdu {
+@@ -115,6 +112,9 @@ struct ublk_uring_cmd_pdu {
+ */
+ #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+
++/* atomic RW with ubq->cancel_lock */
++#define UBLK_IO_FLAG_CANCELED 0x80000000
++
+ struct ublk_io {
+ /* userspace buffer address from io cmd */
+ __u64 addr;
+@@ -139,6 +139,7 @@ struct ublk_queue {
+ bool force_abort;
+ bool timeout;
+ unsigned short nr_io_ready; /* how many ios setup */
++ spinlock_t cancel_lock;
+ struct ublk_device *dev;
+ struct ublk_io ios[];
+ };
+@@ -211,6 +212,33 @@ static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
+
+ #ifdef CONFIG_BLK_DEV_ZONED
+
++struct ublk_zoned_report_desc {
++ __u64 sector;
++ __u32 operation;
++ __u32 nr_zones;
++};
++
++static DEFINE_XARRAY(ublk_zoned_report_descs);
++
++static int ublk_zoned_insert_report_desc(const struct request *req,
++ struct ublk_zoned_report_desc *desc)
++{
++ return xa_insert(&ublk_zoned_report_descs, (unsigned long)req,
++ desc, GFP_KERNEL);
++}
++
++static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc(
++ const struct request *req)
++{
++ return xa_erase(&ublk_zoned_report_descs, (unsigned long)req);
++}
++
++static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc(
++ const struct request *req)
++{
++ return xa_load(&ublk_zoned_report_descs, (unsigned long)req);
++}
++
+ static int ublk_get_nr_zones(const struct ublk_device *ub)
+ {
+ const struct ublk_param_basic *p = &ub->params.basic;
+@@ -317,7 +345,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int zones_in_request =
+ min_t(unsigned int, remaining_zones, max_zones_per_request);
+ struct request *req;
+- struct ublk_rq_data *pdu;
++ struct ublk_zoned_report_desc desc;
+ blk_status_t status;
+
+ memset(buffer, 0, buffer_length);
+@@ -328,20 +356,23 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
+ goto out;
+ }
+
+- pdu = blk_mq_rq_to_pdu(req);
+- pdu->operation = UBLK_IO_OP_REPORT_ZONES;
+- pdu->sector = sector;
+- pdu->nr_zones = zones_in_request;
++ desc.operation = UBLK_IO_OP_REPORT_ZONES;
++ desc.sector = sector;
++ desc.nr_zones = zones_in_request;
++ ret = ublk_zoned_insert_report_desc(req, &desc);
++ if (ret)
++ goto free_req;
+
+ ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
+ GFP_KERNEL);
+- if (ret) {
+- blk_mq_free_request(req);
+- goto out;
+- }
++ if (ret)
++ goto erase_desc;
+
+ status = blk_execute_rq(req, 0);
+ ret = blk_status_to_errno(status);
++erase_desc:
++ ublk_zoned_erase_report_desc(req);
++free_req:
+ blk_mq_free_request(req);
+ if (ret)
+ goto out;
+@@ -375,7 +406,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
+ {
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
+ struct ublk_io *io = &ubq->ios[req->tag];
+- struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
++ struct ublk_zoned_report_desc *desc;
+ u32 ublk_op;
+
+ switch (req_op(req)) {
+@@ -398,12 +429,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
+ ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
+ break;
+ case REQ_OP_DRV_IN:
+- ublk_op = pdu->operation;
++ desc = ublk_zoned_get_report_desc(req);
++ if (!desc)
++ return BLK_STS_IOERR;
++ ublk_op = desc->operation;
+ switch (ublk_op) {
+ case UBLK_IO_OP_REPORT_ZONES:
+ iod->op_flags = ublk_op | ublk_req_build_flags(req);
+- iod->nr_zones = pdu->nr_zones;
+- iod->start_sector = pdu->sector;
++ iod->nr_zones = desc->nr_zones;
++ iod->start_sector = desc->sector;
+ return BLK_STS_OK;
+ default:
+ return BLK_STS_IOERR;
+@@ -1477,28 +1511,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+ return ubq->nr_io_ready == ubq->q_depth;
+ }
+
+-static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+-{
+- io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+-}
+-
+ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ {
+ int i;
+
+- if (!ublk_queue_ready(ubq))
+- return;
+-
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+- if (io->flags & UBLK_IO_FLAG_ACTIVE)
+- io_uring_cmd_complete_in_task(io->cmd,
+- ublk_cmd_cancel_cb);
+- }
++ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
++ bool done;
+
+- /* all io commands are canceled */
+- ubq->nr_io_ready = 0;
++ spin_lock(&ubq->cancel_lock);
++ done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
++ if (!done)
++ io->flags |= UBLK_IO_FLAG_CANCELED;
++ spin_unlock(&ubq->cancel_lock);
++
++ if (!done)
++ io_uring_cmd_done(io->cmd,
++ UBLK_IO_RES_ABORT, 0,
++ IO_URING_F_UNLOCKED);
++ }
++ }
+ }
+
+ /* Cancel all pending commands, must be called after del_gendisk() returns */
+@@ -1545,7 +1579,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ublk_wait_tagset_rqs_idle(ub);
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+- ublk_cancel_dev(ub);
+ /* we are going to release task_struct of ubq_daemon and resets
+ * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
+ * Besides, monitor_work is not necessary in QUIESCED state since we have
+@@ -1568,6 +1601,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work)
+ __ublk_quiesce_dev(ub);
+ unlock:
+ mutex_unlock(&ub->mutex);
++ ublk_cancel_dev(ub);
+ }
+
+ static void ublk_unquiesce_dev(struct ublk_device *ub)
+@@ -1607,8 +1641,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
+ put_disk(ub->ub_disk);
+ ub->ub_disk = NULL;
+ unlock:
+- ublk_cancel_dev(ub);
+ mutex_unlock(&ub->mutex);
++ ublk_cancel_dev(ub);
+ cancel_delayed_work_sync(&ub->monitor_work);
+ }
+
+@@ -1962,6 +1996,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
+ void *ptr;
+ int size;
+
++ spin_lock_init(&ubq->cancel_lock);
+ ubq->flags = ub->dev_info.flags;
+ ubq->q_id = q_id;
+ ubq->q_depth = ub->dev_info.queue_depth;
+@@ -2292,10 +2327,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
+ * TODO: provide forward progress for RECOVERY handler, so that
+ * unprivileged device can benefit from it
+ */
+- if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
++ if (info.flags & UBLK_F_UNPRIVILEGED_DEV) {
+ info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
+ UBLK_F_USER_RECOVERY);
+
++ /*
++ * For USER_COPY, we depends on userspace to fill request
++ * buffer by pwrite() to ublk char device, which can't be
++ * used for unprivileged device
++ */
++ if (info.flags & UBLK_F_USER_COPY)
++ return -EINVAL;
++ }
++
+ /* the created device is always owned by current user */
+ ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
+
+@@ -2569,8 +2613,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+ int i;
+
+ WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
++
+ /* All old ioucmds have to be completed */
+- WARN_ON_ONCE(ubq->nr_io_ready);
++ ubq->nr_io_ready = 0;
+ /* old daemon is PF_EXITING, put it now */
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+@@ -2597,6 +2642,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
+ mutex_lock(&ub->mutex);
+ if (!ublk_can_use_recovery(ub))
+ goto out_unlock;
++ if (!ub->nr_queues_ready)
++ goto out_unlock;
+ /*
+ * START_RECOVERY is only allowd after:
+ *
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 1fe011676d070e..41b2fd7e1b9e50 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1021,12 +1021,12 @@ static void virtblk_config_changed(struct virtio_device *vdev)
+ static int init_vq(struct virtio_blk *vblk)
+ {
+ int err;
+- int i;
++ unsigned short i;
+ vq_callback_t **callbacks;
+ const char **names;
+ struct virtqueue **vqs;
+ unsigned short num_vqs;
+- unsigned int num_poll_vqs;
++ unsigned short num_poll_vqs;
+ struct virtio_device *vdev = vblk->vdev;
+ struct irq_affinity desc = { 0, };
+
+@@ -1070,13 +1070,13 @@ static int init_vq(struct virtio_blk *vblk)
+
+ for (i = 0; i < num_vqs - num_poll_vqs; i++) {
+ callbacks[i] = virtblk_done;
+- snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
++ snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i);
+ names[i] = vblk->vqs[i].name;
+ }
+
+ for (; i < num_vqs; i++) {
+ callbacks[i] = NULL;
+- snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
++ snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i);
+ names[i] = vblk->vqs[i].name;
+ }
+
+@@ -1313,6 +1313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
+ u16 min_io_size;
+ u8 physical_block_exp, alignment_offset;
+ unsigned int queue_depth;
++ size_t max_dma_size;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+@@ -1411,7 +1412,8 @@ static int virtblk_probe(struct virtio_device *vdev)
+ /* No real sector limit. */
+ blk_queue_max_hw_sectors(q, UINT_MAX);
+
+- max_size = virtio_max_dma_size(vdev);
++ max_dma_size = virtio_max_dma_size(vdev);
++ max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
+
+ /* Host can optionally specify maximum segment size and number of
+ * segments. */
+@@ -1627,14 +1629,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
+ {
+ struct virtio_blk *vblk = vdev->priv;
+
++ /* Ensure no requests in virtqueues before deleting vqs. */
++ blk_mq_freeze_queue(vblk->disk->queue);
++
+ /* Ensure we don't receive any more interrupts */
+ virtio_reset_device(vdev);
+
+ /* Make sure no work handler is accessing the device. */
+ flush_work(&vblk->config_work);
+
+- blk_mq_quiesce_queue(vblk->disk->queue);
+-
+ vdev->config->del_vqs(vdev);
+ kfree(vblk->vqs);
+
+@@ -1652,7 +1655,7 @@ static int virtblk_restore(struct virtio_device *vdev)
+
+ virtio_device_ready(vdev);
+
+- blk_mq_unquiesce_queue(vblk->disk->queue);
++ blk_mq_unfreeze_queue(vblk->disk->queue);
+ return 0;
+ }
+ #endif
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 06673c6ca25555..606f388c7a5716 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1983,6 +1983,13 @@ static void zram_destroy_comps(struct zram *zram)
+ zcomp_destroy(comp);
+ zram->num_active_comps--;
+ }
++
++ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
++ /* Do not free statically defined compression algorithms */
++ if (zram->comp_algs[prio] != default_compressor)
++ kfree(zram->comp_algs[prio]);
++ zram->comp_algs[prio] = NULL;
++ }
+ }
+
+ static void zram_reset_device(struct zram *zram)
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 88262d3a93923a..ce97b336fbfb8a 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -3,7 +3,6 @@
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ */
+
+-
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+@@ -128,7 +127,6 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
+ * for AR3012
+ */
+ static const struct usb_device_id ath3k_blist_tbl[] = {
+-
+ /* Atheros AR3012 with sflash firmware*/
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+@@ -202,7 +200,7 @@ static inline void ath3k_log_failed_loading(int err, int len, int size,
+ #define TIMEGAP_USEC_MAX 100
+
+ static int ath3k_load_firmware(struct usb_device *udev,
+- const struct firmware *firmware)
++ const struct firmware *firmware)
+ {
+ u8 *send_buf;
+ int len = 0;
+@@ -237,9 +235,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
+ memcpy(send_buf, firmware->data + sent, size);
+
+ err = usb_bulk_msg(udev, pipe, send_buf, size,
+- &len, 3000);
++ &len, 3000);
+
+- if (err || (len != size)) {
++ if (err || len != size) {
+ ath3k_log_failed_loading(err, len, size, count);
+ goto error;
+ }
+@@ -262,7 +260,7 @@ static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
+ }
+
+ static int ath3k_get_version(struct usb_device *udev,
+- struct ath3k_version *version)
++ struct ath3k_version *version)
+ {
+ return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+@@ -271,7 +269,7 @@ static int ath3k_get_version(struct usb_device *udev,
+ }
+
+ static int ath3k_load_fwfile(struct usb_device *udev,
+- const struct firmware *firmware)
++ const struct firmware *firmware)
+ {
+ u8 *send_buf;
+ int len = 0;
+@@ -310,8 +308,8 @@ static int ath3k_load_fwfile(struct usb_device *udev,
+ memcpy(send_buf, firmware->data + sent, size);
+
+ err = usb_bulk_msg(udev, pipe, send_buf, size,
+- &len, 3000);
+- if (err || (len != size)) {
++ &len, 3000);
++ if (err || len != size) {
+ ath3k_log_failed_loading(err, len, size, count);
+ kfree(send_buf);
+ return err;
+@@ -425,7 +423,6 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ }
+
+ switch (fw_version.ref_clock) {
+-
+ case ATH3K_XTAL_FREQ_26M:
+ clk_value = 26;
+ break;
+@@ -441,7 +438,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ }
+
+ snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
+- le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
++ le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
+
+ ret = request_firmware(&firmware, filename, &udev->dev);
+ if (ret < 0) {
+@@ -456,7 +453,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ }
+
+ static int ath3k_probe(struct usb_interface *intf,
+- const struct usb_device_id *id)
++ const struct usb_device_id *id)
+ {
+ const struct firmware *firmware;
+ struct usb_device *udev = interface_to_usbdev(intf);
+@@ -505,10 +502,10 @@ static int ath3k_probe(struct usb_interface *intf,
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ BT_ERR("Firmware file \"%s\" not found",
+- ATH3K_FIRMWARE);
++ ATH3K_FIRMWARE);
+ else
+ BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+- ATH3K_FIRMWARE, ret);
++ ATH3K_FIRMWARE, ret);
+ return ret;
+ }
+
+diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
+index 2462796a512a5f..a936219aebb81a 100644
+--- a/drivers/bluetooth/btintel.c
++++ b/drivers/bluetooth/btintel.c
+@@ -26,21 +26,11 @@
+ #define ECDSA_OFFSET 644
+ #define ECDSA_HEADER_LEN 320
+
+-#define BTINTEL_PPAG_NAME "PPAG"
+-
+ enum {
+ DSM_SET_WDISABLE2_DELAY = 1,
+ DSM_SET_RESET_METHOD = 3,
+ };
+
+-/* structure to store the PPAG data read from ACPI table */
+-struct btintel_ppag {
+- u32 domain;
+- u32 mode;
+- acpi_status status;
+- struct hci_dev *hdev;
+-};
+-
+ #define CMD_WRITE_BOOT_PARAMS 0xfc0e
+ struct cmd_write_boot_params {
+ __le32 boot_addr;
+@@ -441,7 +431,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
+ return PTR_ERR(skb);
+ }
+
+- if (skb->len != sizeof(*ver)) {
++ if (!skb || skb->len != sizeof(*ver)) {
+ bt_dev_err(hdev, "Intel version event size mismatch");
+ kfree_skb(skb);
+ return -EILSEQ;
+@@ -1312,65 +1302,6 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
+ return 0;
+ }
+
+-static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data,
+- void **ret)
+-{
+- acpi_status status;
+- size_t len;
+- struct btintel_ppag *ppag = data;
+- union acpi_object *p, *elements;
+- struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL};
+- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+- struct hci_dev *hdev = ppag->hdev;
+-
+- status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+- if (ACPI_FAILURE(status)) {
+- bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
+- return status;
+- }
+-
+- len = strlen(string.pointer);
+- if (len < strlen(BTINTEL_PPAG_NAME)) {
+- kfree(string.pointer);
+- return AE_OK;
+- }
+-
+- if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
+- kfree(string.pointer);
+- return AE_OK;
+- }
+- kfree(string.pointer);
+-
+- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+- if (ACPI_FAILURE(status)) {
+- ppag->status = status;
+- bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
+- return status;
+- }
+-
+- p = buffer.pointer;
+- ppag = (struct btintel_ppag *)data;
+-
+- if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
+- kfree(buffer.pointer);
+- bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
+- p->type, p->package.count);
+- ppag->status = AE_ERROR;
+- return AE_ERROR;
+- }
+-
+- elements = p->package.elements;
+-
+- /* PPAG table is located at element[1] */
+- p = &elements[1];
+-
+- ppag->domain = (u32)p->package.elements[0].integer.value;
+- ppag->mode = (u32)p->package.elements[1].integer.value;
+- ppag->status = AE_OK;
+- kfree(buffer.pointer);
+- return AE_CTRL_TERMINATE;
+-}
+-
+ static int btintel_set_debug_features(struct hci_dev *hdev,
+ const struct intel_debug_features *features)
+ {
+@@ -2399,10 +2330,13 @@ static int btintel_configure_offload(struct hci_dev *hdev)
+
+ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
+ {
+- struct btintel_ppag ppag;
+ struct sk_buff *skb;
+ struct hci_ppag_enable_cmd ppag_cmd;
+ acpi_handle handle;
++ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
++ union acpi_object *p, *elements;
++ u32 domain, mode;
++ acpi_status status;
+
+ /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
+ switch (ver->cnvr_top & 0xFFF) {
+@@ -2420,22 +2354,34 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
+ return;
+ }
+
+- memset(&ppag, 0, sizeof(ppag));
+-
+- ppag.hdev = hdev;
+- ppag.status = AE_NOT_FOUND;
+- acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL,
+- btintel_ppag_callback, &ppag, NULL);
+-
+- if (ACPI_FAILURE(ppag.status)) {
+- if (ppag.status == AE_NOT_FOUND) {
++ status = acpi_evaluate_object(handle, "PPAG", NULL, &buffer);
++ if (ACPI_FAILURE(status)) {
++ if (status == AE_NOT_FOUND) {
+ bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found");
+ return;
+ }
++ bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
+ return;
+ }
+
+- if (ppag.domain != 0x12) {
++ p = buffer.pointer;
++ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
++ bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
++ p->type, p->package.count);
++ kfree(buffer.pointer);
++ return;
++ }
++
++ elements = p->package.elements;
++
++ /* PPAG table is located at element[1] */
++ p = &elements[1];
++
++ domain = (u32)p->package.elements[0].integer.value;
++ mode = (u32)p->package.elements[1].integer.value;
++ kfree(buffer.pointer);
++
++ if (domain != 0x12) {
+ bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware");
+ return;
+ }
+@@ -2446,19 +2392,22 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
+ * BIT 1 : 0 Disabled in China
+ * 1 Enabled in China
+ */
+- if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) {
+- bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS");
++ mode &= 0x03;
++
++ if (!mode) {
++ bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in BIOS");
+ return;
+ }
+
+- ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode);
++ ppag_cmd.ppag_enable_flags = cpu_to_le32(mode);
+
+- skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT);
++ skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd),
++ &ppag_cmd, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb));
+ return;
+ }
+- bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode);
++ bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", mode);
+ kfree_skb(skb);
+ }
+
+@@ -2896,6 +2845,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
+ btintel_set_dsm_reset_method(hdev, &ver_tlv);
+
+ err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
++ if (err)
++ goto exit_error;
++
+ btintel_register_devcoredump_support(hdev);
+ break;
+ default:
+diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
+index 9658b33c824a70..18f34998a1204a 100644
+--- a/drivers/bluetooth/btmrvl_main.c
++++ b/drivers/bluetooth/btmrvl_main.c
+@@ -121,13 +121,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
+ ((event->data[2] == MODULE_BROUGHT_UP) ||
+ (event->data[2] == MODULE_ALREADY_UP)) ?
+ "Bring-up succeed" : "Bring-up failed");
+-
+- if (event->length > 3 && event->data[3])
+- priv->btmrvl_dev.dev_type = HCI_AMP;
+- else
+- priv->btmrvl_dev.dev_type = HCI_PRIMARY;
+-
+- BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
+ } else if (priv->btmrvl_dev.sendcmdflag &&
+ event->data[1] == MODULE_SHUTDOWN_REQ) {
+ BT_DBG("EVENT:%s", (event->data[2]) ?
+@@ -686,8 +679,6 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
+ hdev->wakeup = btmrvl_wakeup;
+ SET_HCIDEV_DEV(hdev, &card->func->dev);
+
+- hdev->dev_type = priv->btmrvl_dev.dev_type;
+-
+ ret = hci_register_dev(hdev);
+ if (ret < 0) {
+ BT_ERR("Can not register HCI device");
+diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
+index d76c799553aaa1..468e4165c7cc0e 100644
+--- a/drivers/bluetooth/btmrvl_sdio.c
++++ b/drivers/bluetooth/btmrvl_sdio.c
+@@ -92,7 +92,7 @@ static int btmrvl_sdio_probe_of(struct device *dev,
+ } else {
+ ret = devm_request_irq(dev, cfg->irq_bt,
+ btmrvl_wake_irq_bt,
+- 0, "bt_wake", card);
++ IRQF_NO_AUTOEN, "bt_wake", card);
+ if (ret) {
+ dev_err(dev,
+ "Failed to request irq_bt %d (%d)\n",
+@@ -101,7 +101,6 @@ static int btmrvl_sdio_probe_of(struct device *dev,
+
+ /* Configure wakeup (enabled by default) */
+ device_init_wakeup(dev, true);
+- disable_irq(cfg->irq_bt);
+ }
+ }
+
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index aaabb732082cd8..812fd2a8f853e1 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -372,14 +372,18 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
+ struct btmediatek_data *data = hci_get_priv(hdev);
+ int err;
+
+- if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
++ if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
++ kfree_skb(skb);
+ return 0;
++ }
+
+ switch (data->cd_info.state) {
+ case HCI_DEVCOREDUMP_IDLE:
+ err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
+- if (err < 0)
++ if (err < 0) {
++ kfree_skb(skb);
+ break;
++ }
+ data->cd_info.cnt = 0;
+
+ /* It is supposed coredump can be done within 5 seconds */
+@@ -405,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
+ break;
+ }
+
+- if (err < 0)
+- kfree_skb(skb);
+-
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(btmtk_process_coredump);
+@@ -420,5 +421,6 @@ MODULE_LICENSE("GPL");
+ MODULE_FIRMWARE(FIRMWARE_MT7622);
+ MODULE_FIRMWARE(FIRMWARE_MT7663);
+ MODULE_FIRMWARE(FIRMWARE_MT7668);
++MODULE_FIRMWARE(FIRMWARE_MT7922);
+ MODULE_FIRMWARE(FIRMWARE_MT7961);
+ MODULE_FIRMWARE(FIRMWARE_MT7925);
+diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
+index 56f5502baadf9f..cbcdb99a22e6dd 100644
+--- a/drivers/bluetooth/btmtk.h
++++ b/drivers/bluetooth/btmtk.h
+@@ -4,6 +4,7 @@
+ #define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
+ #define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
+ #define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
++#define FIRMWARE_MT7922 "mediatek/BT_RAM_CODE_MT7922_1_1_hdr.bin"
+ #define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
+ #define FIRMWARE_MT7925 "mediatek/mt7925/BT_RAM_CODE_MT7925_1_1_hdr.bin"
+
+diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
+index 935feab815d973..203a000a84e341 100644
+--- a/drivers/bluetooth/btmtkuart.c
++++ b/drivers/bluetooth/btmtkuart.c
+@@ -336,7 +336,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
+ return data;
+ }
+
+-static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
++static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+ {
+ struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
+ const unsigned char *p_left = data, *p_h4;
+@@ -375,25 +375,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
+ bt_dev_err(bdev->hdev,
+ "Frame reassembly failed (%d)", err);
+ bdev->rx_skb = NULL;
+- return err;
++ return;
+ }
+
+ sz_left -= sz_h4;
+ p_left += sz_h4;
+ }
+-
+- return 0;
+ }
+
+ static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ size_t count)
+ {
+ struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
+- int err;
+
+- err = btmtkuart_recv(bdev->hdev, data, count);
+- if (err < 0)
+- return err;
++ btmtkuart_recv(bdev->hdev, data, count);
+
+ bdev->hdev->stat.byte_rx += count;
+
+diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
+index b7e66b7ac57022..5ee9a8b8dcfdb8 100644
+--- a/drivers/bluetooth/btnxpuart.c
++++ b/drivers/bluetooth/btnxpuart.c
+@@ -29,6 +29,7 @@
+ #define BTNXPUART_CHECK_BOOT_SIGNATURE 3
+ #define BTNXPUART_SERDEV_OPEN 4
+ #define BTNXPUART_IR_IN_PROGRESS 5
++#define BTNXPUART_FW_DOWNLOAD_ABORT 6
+
+ /* NXP HW err codes */
+ #define BTNXPUART_IR_HW_ERR 0xb0
+@@ -126,6 +127,7 @@ struct ps_data {
+ struct hci_dev *hdev;
+ struct work_struct work;
+ struct timer_list ps_timer;
++ struct mutex ps_lock;
+ };
+
+ struct wakeup_cmd_payload {
+@@ -158,6 +160,7 @@ struct btnxpuart_dev {
+ u8 fw_name[MAX_FW_FILE_NAME_LEN];
+ u32 fw_dnld_v1_offset;
+ u32 fw_v1_sent_bytes;
++ u32 fw_dnld_v3_offset;
+ u32 fw_v3_offset_correction;
+ u32 fw_v1_expected_len;
+ u32 boot_reg_offset;
+@@ -186,6 +189,11 @@ struct btnxpuart_dev {
+ #define NXP_NAK_V3 0x7b
+ #define NXP_CRC_ERROR_V3 0x7c
+
++/* Bootloader signature error codes */
++#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */
++#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */
++#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */
++
+ #define HDR_LEN 16
+
+ #define NXP_RECV_CHIP_VER_V1 \
+@@ -276,11 +284,22 @@ struct nxp_bootloader_cmd {
+ __be32 crc;
+ } __packed;
+
++struct nxp_v3_rx_timeout_nak {
++ u8 nak;
++ __le32 offset;
++ u8 crc;
++} __packed;
++
++union nxp_v3_rx_timeout_nak_u {
++ struct nxp_v3_rx_timeout_nak pkt;
++ u8 buf[6];
++};
++
+ static u8 crc8_table[CRC8_TABLE_SIZE];
+
+ /* Default configurations */
+ #define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK
+-#define DEFAULT_PS_MODE PS_MODE_DISABLE
++#define DEFAULT_PS_MODE PS_MODE_ENABLE
+ #define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE
+
+ static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
+@@ -317,6 +336,9 @@ static void ps_start_timer(struct btnxpuart_dev *nxpdev)
+
+ if (psdata->cur_psmode == PS_MODE_ENABLE)
+ mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval));
++
++ if (psdata->ps_state == PS_STATE_AWAKE && psdata->ps_cmd == PS_CMD_ENTER_PS)
++ cancel_work_sync(&psdata->work);
+ }
+
+ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
+@@ -324,7 +346,7 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ flush_work(&psdata->work);
+- del_timer_sync(&psdata->ps_timer);
++ timer_shutdown_sync(&psdata->ps_timer);
+ }
+
+ static void ps_control(struct hci_dev *hdev, u8 ps_state)
+@@ -337,6 +359,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
+ !test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
+ return;
+
++ mutex_lock(&psdata->ps_lock);
+ switch (psdata->cur_h2c_wakeupmode) {
+ case WAKEUP_METHOD_DTR:
+ if (ps_state == PS_STATE_AWAKE)
+@@ -350,12 +373,15 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
+ status = serdev_device_break_ctl(nxpdev->serdev, 0);
+ else
+ status = serdev_device_break_ctl(nxpdev->serdev, -1);
++ msleep(20); /* Allow chip to detect UART-break and enter sleep */
+ bt_dev_dbg(hdev, "Set UART break: %s, status=%d",
+ str_on_off(ps_state == PS_STATE_SLEEP), status);
+ break;
+ }
+ if (!status)
+ psdata->ps_state = ps_state;
++ mutex_unlock(&psdata->ps_lock);
++
+ if (ps_state == PS_STATE_AWAKE)
+ btnxpuart_tx_wakeup(nxpdev);
+ }
+@@ -391,17 +417,42 @@ static void ps_setup(struct hci_dev *hdev)
+
+ psdata->hdev = hdev;
+ INIT_WORK(&psdata->work, ps_work_func);
++ mutex_init(&psdata->ps_lock);
+ timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
+ }
+
+-static void ps_wakeup(struct btnxpuart_dev *nxpdev)
++static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
+ {
+ struct ps_data *psdata = &nxpdev->psdata;
++ u8 ps_state;
++
++ mutex_lock(&psdata->ps_lock);
++ ps_state = psdata->ps_state;
++ mutex_unlock(&psdata->ps_lock);
+
+- if (psdata->ps_state != PS_STATE_AWAKE) {
++ if (ps_state != PS_STATE_AWAKE) {
+ psdata->ps_cmd = PS_CMD_EXIT_PS;
+ schedule_work(&psdata->work);
++ return true;
+ }
++ return false;
++}
++
++static void ps_cleanup(struct btnxpuart_dev *nxpdev)
++{
++ struct ps_data *psdata = &nxpdev->psdata;
++ u8 ps_state;
++
++ mutex_lock(&psdata->ps_lock);
++ ps_state = psdata->ps_state;
++ mutex_unlock(&psdata->ps_lock);
++
++ if (ps_state != PS_STATE_AWAKE)
++ ps_control(psdata->hdev, PS_STATE_AWAKE);
++
++ ps_cancel_timer(nxpdev);
++ cancel_work_sync(&psdata->work);
++ mutex_destroy(&psdata->ps_lock);
+ }
+
+ static int send_ps_cmd(struct hci_dev *hdev, void *data)
+@@ -534,6 +585,7 @@ static int nxp_download_firmware(struct hci_dev *hdev)
+ nxpdev->fw_v1_sent_bytes = 0;
+ nxpdev->fw_v1_expected_len = HDR_LEN;
+ nxpdev->boot_reg_offset = 0;
++ nxpdev->fw_dnld_v3_offset = 0;
+ nxpdev->fw_v3_offset_correction = 0;
+ nxpdev->baudrate_changed = false;
+ nxpdev->timeout_changed = false;
+@@ -548,14 +600,23 @@ static int nxp_download_firmware(struct hci_dev *hdev)
+ !test_bit(BTNXPUART_FW_DOWNLOADING,
+ &nxpdev->tx_state),
+ msecs_to_jiffies(60000));
++
++ release_firmware(nxpdev->fw);
++ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
++
+ if (err == 0) {
+- bt_dev_err(hdev, "FW Download Timeout.");
++ bt_dev_err(hdev, "FW Download Timeout. offset: %d",
++ nxpdev->fw_dnld_v1_offset ?
++ nxpdev->fw_dnld_v1_offset :
++ nxpdev->fw_dnld_v3_offset);
+ return -ETIMEDOUT;
+ }
++ if (test_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state)) {
++ bt_dev_err(hdev, "FW Download Aborted");
++ return -EINTR;
++ }
+
+ serdev_device_set_flow_control(nxpdev->serdev, true);
+- release_firmware(nxpdev->fw);
+- memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+
+ /* Allow the downloaded FW to initialize */
+ msleep(1200);
+@@ -883,6 +944,32 @@ static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
+ return 0;
+ }
+
++static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_req *req)
++{
++ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
++ __u32 offset = __le32_to_cpu(req->offset);
++ __u16 err = __le16_to_cpu(req->error);
++ union nxp_v3_rx_timeout_nak_u nak_tx_buf;
++
++ switch (err) {
++ case NXP_ACK_RX_TIMEOUT:
++ case NXP_HDR_RX_TIMEOUT:
++ case NXP_DATA_RX_TIMEOUT:
++ nak_tx_buf.pkt.nak = NXP_NAK_V3;
++ nak_tx_buf.pkt.offset = __cpu_to_le32(offset);
++ nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf,
++ sizeof(nak_tx_buf) - 1, 0xff);
++ serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf,
++ sizeof(nak_tx_buf));
++ break;
++ default:
++ bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err);
++ break;
++
++ }
++
++}
++
+ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+@@ -897,7 +984,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
+ if (!req || !nxpdev->fw)
+ goto free_skb;
+
+- nxp_send_ack(NXP_ACK_V3, hdev);
++ if (!req->error) {
++ nxp_send_ack(NXP_ACK_V3, hdev);
++ } else {
++ nxp_handle_fw_download_error(hdev, req);
++ goto free_skb;
++ }
+
+ len = __le16_to_cpu(req->len);
+
+@@ -924,9 +1016,6 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
+ wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
+ goto free_skb;
+ }
+- if (req->error)
+- bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip",
+- req->error);
+
+ offset = __le32_to_cpu(req->offset);
+ if (offset < nxpdev->fw_v3_offset_correction) {
+@@ -938,8 +1027,9 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
+ goto free_skb;
+ }
+
+- serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset -
+- nxpdev->fw_v3_offset_correction, len);
++ nxpdev->fw_dnld_v3_offset = offset - nxpdev->fw_v3_offset_correction;
++ serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data +
++ nxpdev->fw_dnld_v3_offset, len);
+
+ free_skb:
+ kfree_skb(skb);
+@@ -1171,7 +1261,6 @@ static struct sk_buff *nxp_dequeue(void *data)
+ {
+ struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data;
+
+- ps_wakeup(nxpdev);
+ ps_start_timer(nxpdev);
+ return skb_dequeue(&nxpdev->txq);
+ }
+@@ -1186,6 +1275,9 @@ static void btnxpuart_tx_work(struct work_struct *work)
+ struct sk_buff *skb;
+ int len;
+
++ if (ps_wakeup(nxpdev))
++ return;
++
+ while ((skb = nxp_dequeue(nxpdev))) {
+ len = serdev_device_write_buf(serdev, skb->data, skb->len);
+ hdev->stat.byte_tx += len;
+@@ -1232,8 +1324,12 @@ static int btnxpuart_close(struct hci_dev *hdev)
+ {
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+- ps_wakeup(nxpdev);
+ serdev_device_close(nxpdev->serdev);
++ skb_queue_purge(&nxpdev->txq);
++ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
++ kfree_skb(nxpdev->rx_skb);
++ nxpdev->rx_skb = NULL;
++ }
+ clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
+ return 0;
+ }
+@@ -1248,8 +1344,10 @@ static int btnxpuart_flush(struct hci_dev *hdev)
+
+ cancel_work_sync(&nxpdev->tx_work);
+
+- kfree_skb(nxpdev->rx_skb);
+- nxpdev->rx_skb = NULL;
++ if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
++ kfree_skb(nxpdev->rx_skb);
++ nxpdev->rx_skb = NULL;
++ }
+
+ return 0;
+ }
+@@ -1276,11 +1374,10 @@ static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data,
+ if (IS_ERR(nxpdev->rx_skb)) {
+ int err = PTR_ERR(nxpdev->rx_skb);
+ /* Safe to ignore out-of-sync bootloader signatures */
+- if (is_fw_downloading(nxpdev))
+- return count;
+- bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
++ if (!is_fw_downloading(nxpdev))
++ bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
+ nxpdev->rx_skb = NULL;
+- return err;
++ return count;
+ }
+ if (!is_fw_downloading(nxpdev))
+ nxpdev->hdev->stat.byte_rx += count;
+@@ -1366,16 +1463,22 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
+ struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
+ struct hci_dev *hdev = nxpdev->hdev;
+
+- /* Restore FW baudrate to fw_init_baudrate if changed.
+- * This will ensure FW baudrate is in sync with
+- * driver baudrate in case this driver is re-inserted.
+- */
+- if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
+- nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
+- nxp_set_baudrate_cmd(hdev, NULL);
++ if (is_fw_downloading(nxpdev)) {
++ set_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state);
++ clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
++ wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
++ wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
++ } else {
++ /* Restore FW baudrate to fw_init_baudrate if changed.
++ * This will ensure FW baudrate is in sync with
++ * driver baudrate in case this driver is re-inserted.
++ */
++ if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
++ nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
++ nxp_set_baudrate_cmd(hdev, NULL);
++ }
+ }
+-
+- ps_cancel_timer(nxpdev);
++ ps_cleanup(nxpdev);
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+ }
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index 5a35ac4138c6cf..35fb26cbf22941 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -99,7 +99,8 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
+ {
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+- char cmd, build_label[QCA_FW_BUILD_VER_LEN];
++ char *build_label;
++ char cmd;
+ int build_lbl_len, err = 0;
+
+ bt_dev_dbg(hdev, "QCA read fw build info");
+@@ -114,6 +115,11 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
+ return err;
+ }
+
++ if (skb->len < sizeof(*edl)) {
++ err = -EILSEQ;
++ goto out;
++ }
++
+ edl = (struct edl_event_hdr *)(skb->data);
+ if (!edl) {
+ bt_dev_err(hdev, "QCA read fw build info with no header");
+@@ -129,14 +135,27 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
+ goto out;
+ }
+
++ if (skb->len < sizeof(*edl) + 1) {
++ err = -EILSEQ;
++ goto out;
++ }
++
+ build_lbl_len = edl->data[0];
+- if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
+- memcpy(build_label, edl->data + 1, build_lbl_len);
+- *(build_label + build_lbl_len) = '\0';
++
++ if (skb->len < sizeof(*edl) + 1 + build_lbl_len) {
++ err = -EILSEQ;
++ goto out;
++ }
++
++ build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL);
++ if (!build_label) {
++ err = -ENOMEM;
++ goto out;
+ }
+
+ hci_set_fw_info(hdev, "%s", build_label);
+
++ kfree(build_label);
+ out:
+ kfree_skb(skb);
+ return err;
+@@ -152,7 +171,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
+ bt_dev_dbg(hdev, "QCA Patch config");
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
+- cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
++ cmd, 0, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
+@@ -205,6 +224,49 @@ static int qca_send_reset(struct hci_dev *hdev)
+ return 0;
+ }
+
++static int qca_read_fw_board_id(struct hci_dev *hdev, u16 *bid)
++{
++ u8 cmd;
++ struct sk_buff *skb;
++ struct edl_event_hdr *edl;
++ int err = 0;
++
++ cmd = EDL_GET_BID_REQ_CMD;
++ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
++ &cmd, 0, HCI_INIT_TIMEOUT);
++ if (IS_ERR(skb)) {
++ err = PTR_ERR(skb);
++ bt_dev_err(hdev, "Reading QCA board ID failed (%d)", err);
++ return err;
++ }
++
++ edl = skb_pull_data(skb, sizeof(*edl));
++ if (!edl) {
++ bt_dev_err(hdev, "QCA read board ID with no header");
++ err = -EILSEQ;
++ goto out;
++ }
++
++ if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
++ edl->rtype != EDL_GET_BID_REQ_CMD) {
++ bt_dev_err(hdev, "QCA Wrong packet: %d %d", edl->cresp, edl->rtype);
++ err = -EIO;
++ goto out;
++ }
++
++ if (skb->len < 3) {
++ err = -EILSEQ;
++ goto out;
++ }
++
++ *bid = (edl->data[1] << 8) + edl->data[2];
++ bt_dev_dbg(hdev, "%s: bid = %x", __func__, *bid);
++
++out:
++ kfree_skb(skb);
++ return err;
++}
++
+ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+ {
+ struct sk_buff *skb;
+@@ -227,9 +289,10 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+ }
+ EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+
+-static void qca_tlv_check_data(struct hci_dev *hdev,
++static int qca_tlv_check_data(struct hci_dev *hdev,
+ struct qca_fw_config *config,
+- u8 *fw_data, enum qca_btsoc_type soc_type)
++ u8 *fw_data, size_t fw_size,
++ enum qca_btsoc_type soc_type)
+ {
+ const u8 *data;
+ u32 type_len;
+@@ -239,12 +302,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ struct tlv_type_patch *tlv_patch;
+ struct tlv_type_nvm *tlv_nvm;
+ uint8_t nvm_baud_rate = config->user_baud_rate;
++ u8 type;
+
+ config->dnld_mode = QCA_SKIP_EVT_NONE;
+ config->dnld_type = QCA_SKIP_EVT_NONE;
+
+ switch (config->type) {
+ case ELF_TYPE_PATCH:
++ if (fw_size < 7)
++ return -EINVAL;
++
+ config->dnld_mode = QCA_SKIP_EVT_VSE_CC;
+ config->dnld_type = QCA_SKIP_EVT_VSE_CC;
+
+@@ -253,6 +320,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ bt_dev_dbg(hdev, "File version : 0x%x", fw_data[6]);
+ break;
+ case TLV_TYPE_PATCH:
++ if (fw_size < sizeof(struct tlv_type_hdr) + sizeof(struct tlv_type_patch))
++ return -EINVAL;
++
+ tlv = (struct tlv_type_hdr *)fw_data;
+ type_len = le32_to_cpu(tlv->type_len);
+ tlv_patch = (struct tlv_type_patch *)tlv->data;
+@@ -292,25 +362,64 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ break;
+
+ case TLV_TYPE_NVM:
++ if (fw_size < sizeof(struct tlv_type_hdr))
++ return -EINVAL;
++
+ tlv = (struct tlv_type_hdr *)fw_data;
+
+ type_len = le32_to_cpu(tlv->type_len);
+- length = (type_len >> 8) & 0x00ffffff;
++ length = type_len >> 8;
++ type = type_len & 0xff;
+
+- BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
++ /* Some NVM files have more than one set of tags, only parse
++ * the first set when it has type 2 for now. When there is
++ * more than one set there is an enclosing header of type 4.
++ */
++ if (type == 4) {
++ if (fw_size < 2 * sizeof(struct tlv_type_hdr))
++ return -EINVAL;
++
++ tlv++;
++
++ type_len = le32_to_cpu(tlv->type_len);
++ length = type_len >> 8;
++ type = type_len & 0xff;
++ }
++
++ BT_DBG("TLV Type\t\t : 0x%x", type);
+ BT_DBG("Length\t\t : %d bytes", length);
+
++ if (type != 2)
++ break;
++
++ if (fw_size < length + (tlv->data - fw_data))
++ return -EINVAL;
++
+ idx = 0;
+ data = tlv->data;
+- while (idx < length) {
++ while (idx < length - sizeof(struct tlv_type_nvm)) {
+ tlv_nvm = (struct tlv_type_nvm *)(data + idx);
+
+ tag_id = le16_to_cpu(tlv_nvm->tag_id);
+ tag_len = le16_to_cpu(tlv_nvm->tag_len);
+
++ if (length < idx + sizeof(struct tlv_type_nvm) + tag_len)
++ return -EINVAL;
++
+ /* Update NVM tags as needed */
+ switch (tag_id) {
++ case EDL_TAG_ID_BD_ADDR:
++ if (tag_len != sizeof(bdaddr_t))
++ return -EINVAL;
++
++ memcpy(&config->bdaddr, tlv_nvm->data, sizeof(bdaddr_t));
++
++ break;
++
+ case EDL_TAG_ID_HCI:
++ if (tag_len < 3)
++ return -EINVAL;
++
+ /* HCI transport layer parameters
+ * enabling software inband sleep
+ * onto controller side.
+@@ -326,6 +435,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ break;
+
+ case EDL_TAG_ID_DEEP_SLEEP:
++ if (tag_len < 1)
++ return -EINVAL;
++
+ /* Sleep enable mask
+ * enabling deep sleep feature on controller.
+ */
+@@ -334,14 +446,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
+ break;
+ }
+
+- idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
++ idx += sizeof(struct tlv_type_nvm) + tag_len;
+ }
+ break;
+
+ default:
+ BT_ERR("Unknown TLV type %d", config->type);
+- break;
++ return -EINVAL;
+ }
++
++ return 0;
+ }
+
+ static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
+@@ -491,7 +605,9 @@ static int qca_download_firmware(struct hci_dev *hdev,
+ memcpy(data, fw->data, size);
+ release_firmware(fw);
+
+- qca_tlv_check_data(hdev, config, data, soc_type);
++ ret = qca_tlv_check_data(hdev, config, data, size, soc_type);
++ if (ret)
++ goto out;
+
+ segment = data;
+ remain = size;
+@@ -574,14 +690,64 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+ }
+ EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+
++static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *config)
++{
++ struct hci_rp_read_bd_addr *bda;
++ struct sk_buff *skb;
++ int err;
++
++ if (bacmp(&hdev->public_addr, BDADDR_ANY))
++ return 0;
++
++ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
++ HCI_INIT_TIMEOUT);
++ if (IS_ERR(skb)) {
++ err = PTR_ERR(skb);
++ bt_dev_err(hdev, "Failed to read device address (%d)", err);
++ return err;
++ }
++
++ if (skb->len != sizeof(*bda)) {
++ bt_dev_err(hdev, "Device address length mismatch");
++ kfree_skb(skb);
++ return -EIO;
++ }
++
++ bda = (struct hci_rp_read_bd_addr *)skb->data;
++ if (!bacmp(&bda->bdaddr, &config->bdaddr))
++ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
++
++ kfree_skb(skb);
++
++ return 0;
++}
++
++static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
++ struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
++{
++ const char *variant;
++
++ /* hsp gf chip */
++ if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
++ variant = "g";
++ else
++ variant = "";
++
++ if (bid == 0x0)
++ snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
++ else
++ snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
++}
++
+ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
+ const char *firmware_name)
+ {
+- struct qca_fw_config config;
++ struct qca_fw_config config = {};
+ int err;
+ u8 rom_ver = 0;
+ u32 soc_ver;
++ u16 boardid = 0;
+
+ bt_dev_dbg(hdev, "QCA setup on UART");
+
+@@ -615,6 +781,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apbtfw%02x.tlv", rom_ver);
+ break;
++ case QCA_QCA2066:
++ snprintf(config.fwname, sizeof(config.fwname),
++ "qca/hpbtfw%02x.tlv", rom_ver);
++ break;
+ case QCA_QCA6390:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
+@@ -649,6 +819,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ /* Give the controller some time to get ready to receive the NVM */
+ msleep(10);
+
++ if (soc_type == QCA_QCA2066)
++ qca_read_fw_board_id(hdev, &boardid);
++
+ /* Download NVM configuration */
+ config.type = TLV_TYPE_NVM;
+ if (firmware_name) {
+@@ -671,6 +844,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apnv%02x.bin", rom_ver);
+ break;
++ case QCA_QCA2066:
++ qca_generate_hsp_nvm_name(config.fwname,
++ sizeof(config.fwname), ver, rom_ver, boardid);
++ break;
+ case QCA_QCA6390:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htnv%02x.bin", rom_ver);
+@@ -702,6 +879,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+
+ switch (soc_type) {
+ case QCA_WCN3991:
++ case QCA_QCA2066:
+ case QCA_QCA6390:
+ case QCA_WCN6750:
+ case QCA_WCN6855:
+@@ -750,6 +928,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ break;
+ }
+
++ err = qca_check_bdaddr(hdev, &config);
++ if (err)
++ return err;
++
+ bt_dev_info(hdev, "QCA setup on UART is completed");
+
+ return 0;
+@@ -758,11 +940,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
+
+ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+ {
++ bdaddr_t bdaddr_swapped;
+ struct sk_buff *skb;
+ int err;
+
+- skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
+- HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
++ baswap(&bdaddr_swapped, bdaddr);
++
++ skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
++ &bdaddr_swapped, HCI_EV_VENDOR,
++ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
+diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
+index 03bff5c0059def..215433fd76a106 100644
+--- a/drivers/bluetooth/btqca.h
++++ b/drivers/bluetooth/btqca.h
+@@ -12,6 +12,7 @@
+ #define EDL_PATCH_VER_REQ_CMD (0x19)
+ #define EDL_PATCH_TLV_REQ_CMD (0x1E)
+ #define EDL_GET_BUILD_INFO_CMD (0x20)
++#define EDL_GET_BID_REQ_CMD (0x23)
+ #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
+ #define EDL_PATCH_CONFIG_CMD (0x28)
+ #define MAX_SIZE_PER_TLV_SEGMENT (243)
+@@ -28,6 +29,7 @@
+ #define EDL_PATCH_CONFIG_RES_EVT (0x00)
+ #define QCA_DISABLE_LOGGING_SUB_OP (0x14)
+
++#define EDL_TAG_ID_BD_ADDR 2
+ #define EDL_TAG_ID_HCI (17)
+ #define EDL_TAG_ID_DEEP_SLEEP (27)
+
+@@ -46,8 +48,8 @@
+ #define get_soc_ver(soc_id, rom_ver) \
+ ((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver)))
+
+-#define QCA_FW_BUILD_VER_LEN 255
+-
++#define QCA_HSP_GF_SOC_ID 0x1200
++#define QCA_HSP_GF_SOC_MASK 0x0000ff00
+
+ enum qca_baudrate {
+ QCA_BAUDRATE_115200 = 0,
+@@ -92,6 +94,7 @@ struct qca_fw_config {
+ uint8_t user_baud_rate;
+ enum qca_tlv_dnld_mode dnld_mode;
+ enum qca_tlv_dnld_mode dnld_type;
++ bdaddr_t bdaddr;
+ };
+
+ struct edl_event_hdr {
+@@ -146,6 +149,7 @@ enum qca_btsoc_type {
+ QCA_WCN3990,
+ QCA_WCN3998,
+ QCA_WCN3991,
++ QCA_QCA2066,
+ QCA_QCA6390,
+ QCA_WCN6750,
+ QCA_WCN6855,
+diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
+index 634cf8f5ed2dbd..0c91d7635ac39e 100644
+--- a/drivers/bluetooth/btrsi.c
++++ b/drivers/bluetooth/btrsi.c
+@@ -134,7 +134,6 @@ static int rsi_hci_attach(void *priv, struct rsi_proto_ops *ops)
+ hdev->bus = HCI_USB;
+
+ hci_set_drvdata(hdev, h_adapter);
+- hdev->dev_type = HCI_PRIMARY;
+ hdev->open = rsi_hci_open;
+ hdev->close = rsi_hci_close;
+ hdev->flush = rsi_hci_flush;
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 277d039ecbb429..1e7c1f9db9e4b9 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -1285,6 +1285,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
+ btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP);
+
+ if (btrtl_dev->project_id == CHIP_ID_8852A ||
++ btrtl_dev->project_id == CHIP_ID_8852B ||
+ btrtl_dev->project_id == CHIP_ID_8852C)
+ set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks);
+
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
+index f19d31ee37ea89..fdcfe9c50313ea 100644
+--- a/drivers/bluetooth/btsdio.c
++++ b/drivers/bluetooth/btsdio.c
+@@ -32,9 +32,6 @@ static const struct sdio_device_id btsdio_table[] = {
+ /* Generic Bluetooth Type-B SDIO device */
+ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
+
+- /* Generic Bluetooth AMP controller */
+- { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) },
+-
+ { } /* Terminating entry */
+ };
+
+@@ -319,11 +316,6 @@ static int btsdio_probe(struct sdio_func *func,
+ hdev->bus = HCI_SDIO;
+ hci_set_drvdata(hdev, data);
+
+- if (id->class == SDIO_CLASS_BT_AMP)
+- hdev->dev_type = HCI_AMP;
+- else
+- hdev->dev_type = HCI_PRIMARY;
+-
+ data->hdev = hdev;
+
+ SET_HCIDEV_DEV(hdev, &func->dev);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 499f4809fcdf3d..b3a9b93f027a90 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -537,14 +537,26 @@ static const struct usb_device_id quirks_table[] = {
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x0489, 0xe122), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
+
+ /* Realtek 8852BE Bluetooth devices */
+ { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
+
+ /* Realtek Bluetooth devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
+@@ -1342,7 +1354,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
+ if (!urb)
+ return -ENOMEM;
+
+- size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
++ if (le16_to_cpu(data->udev->descriptor.idVendor) == 0x0a12 &&
++ le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001)
++ /* Fake CSR devices don't seem to support sort-transter */
++ size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
++ else
++ /* Use maximum HCI Event size so the USB stack handles
++ * ZPL/short-transfer automatically.
++ */
++ size = HCI_MAX_EVENT_SIZE;
+
+ buf = kmalloc(size, mem_flags);
+ if (!buf) {
+@@ -2818,6 +2838,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
+ goto err_free_wc;
+ }
+
++ if (data->evt_skb == NULL)
++ goto err_free_wc;
++
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+@@ -3260,7 +3283,6 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
+- struct sk_buff *skb_cd;
+
+ switch (handle) {
+ case 0xfc6f: /* Firmware dump from device */
+@@ -3273,9 +3295,12 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
+ * for backward compatibility, so we have to clone the packet
+ * extraly for the in-kernel coredump support.
+ */
+- skb_cd = skb_clone(skb, GFP_ATOMIC);
+- if (skb_cd)
+- btmtk_process_coredump(hdev, skb_cd);
++ if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
++ struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
++
++ if (skb_cd)
++ btmtk_process_coredump(hdev, skb_cd);
++ }
+
+ fallthrough;
+ case 0x05ff: /* Firmware debug logging 1 */
+@@ -3448,13 +3473,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
+
+ static void btusb_coredump_qca(struct hci_dev *hdev)
+ {
++ int err;
+ static const u8 param[] = { 0x26 };
+- struct sk_buff *skb;
+
+- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
+- if (IS_ERR(skb))
+- bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
+- kfree_skb(skb);
++ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
++ if (err < 0)
++ bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
+ }
+
+ /*
+@@ -4298,11 +4322,6 @@ static int btusb_probe(struct usb_interface *intf,
+ hdev->bus = HCI_USB;
+ hci_set_drvdata(hdev, data);
+
+- if (id->driver_info & BTUSB_AMP)
+- hdev->dev_type = HCI_AMP;
+- else
+- hdev->dev_type = HCI_PRIMARY;
+-
+ data->hdev = hdev;
+
+ SET_HCIDEV_DEV(hdev, &intf->dev);
+@@ -4468,6 +4487,7 @@ static int btusb_probe(struct usb_interface *intf,
+ set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
++ set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
+ }
+
+ if (!reset)
+diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
+index 19ad0e78864629..0dc3ca3d410736 100644
+--- a/drivers/bluetooth/hci_bcm4377.c
++++ b/drivers/bluetooth/hci_bcm4377.c
+@@ -32,7 +32,7 @@ enum bcm4377_chip {
+ #define BCM4378_DEVICE_ID 0x5f69
+ #define BCM4387_DEVICE_ID 0x5f71
+
+-#define BCM4377_TIMEOUT 1000
++#define BCM4377_TIMEOUT msecs_to_jiffies(1000)
+
+ /*
+ * These devices only support DMA transactions inside a 32bit window
+@@ -512,6 +512,7 @@ struct bcm4377_hw {
+ unsigned long disable_aspm : 1;
+ unsigned long broken_ext_scan : 1;
+ unsigned long broken_mws_transport_config : 1;
++ unsigned long broken_le_coded : 1;
+
+ int (*send_calibration)(struct bcm4377_data *bcm4377);
+ int (*send_ptb)(struct bcm4377_data *bcm4377,
+@@ -715,7 +716,7 @@ static void bcm4377_handle_ack(struct bcm4377_data *bcm4377,
+ ring->events[msgid] = NULL;
+ }
+
+- bitmap_release_region(ring->msgids, msgid, ring->n_entries);
++ bitmap_release_region(ring->msgids, msgid, 0);
+
+ unlock:
+ spin_unlock_irqrestore(&ring->lock, flags);
+@@ -1416,7 +1417,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
+
+ bda = (struct hci_rp_read_bd_addr *)skb->data;
+ if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
+- set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks);
++ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
+
+ kfree_skb(skb);
+ return 0;
+@@ -2360,18 +2361,18 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ bcm4377->hdev = hdev;
+
+ hdev->bus = HCI_PCI;
+- hdev->dev_type = HCI_PRIMARY;
+ hdev->open = bcm4377_hci_open;
+ hdev->close = bcm4377_hci_close;
+ hdev->send = bcm4377_hci_send_frame;
+ hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
+ hdev->setup = bcm4377_hci_setup;
+
+- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ if (bcm4377->hw->broken_mws_transport_config)
+ set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
+ if (bcm4377->hw->broken_ext_scan)
+ set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
++ if (bcm4377->hw->broken_le_coded)
++ set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+
+ pci_set_drvdata(pdev, bcm4377);
+ hci_set_drvdata(hdev, bcm4377);
+@@ -2461,6 +2462,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ .bar0_core2_window2 = 0x18107000,
+ .has_bar0_core2_window2 = true,
+ .broken_mws_transport_config = true,
++ .broken_le_coded = true,
+ .send_calibration = bcm4378_send_calibration,
+ .send_ptb = bcm4378_send_ptb,
+ },
+@@ -2474,6 +2476,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ .has_bar0_core2_window2 = true,
+ .clear_pciecfg_subsystem_ctrl_bit19 = true,
+ .broken_mws_transport_config = true,
++ .broken_le_coded = true,
+ .send_calibration = bcm4387_send_calibration,
+ .send_ptb = bcm4378_send_ptb,
+ },
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index 71e748a9477e44..c0436881a533c5 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -113,6 +113,7 @@ struct h5_vnd {
+ int (*suspend)(struct h5 *h5);
+ int (*resume)(struct h5 *h5);
+ const struct acpi_gpio_mapping *acpi_gpio_map;
++ int sizeof_priv;
+ };
+
+ struct h5_device_data {
+@@ -863,7 +864,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
+ if (IS_ERR(h5->device_wake_gpio))
+ return PTR_ERR(h5->device_wake_gpio);
+
+- return hci_uart_register_device(&h5->serdev_hu, &h5p);
++ return hci_uart_register_device_priv(&h5->serdev_hu, &h5p,
++ h5->vnd->sizeof_priv);
+ }
+
+ static void h5_serdev_remove(struct serdev_device *serdev)
+@@ -1070,6 +1072,7 @@ static struct h5_vnd rtl_vnd = {
+ .suspend = h5_btrtl_suspend,
+ .resume = h5_btrtl_resume,
+ .acpi_gpio_map = acpi_btrtl_gpios,
++ .sizeof_priv = sizeof(struct btrealtek_data),
+ };
+
+ static const struct h5_device_data h5_data_rtl8822cs = {
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index a26367e9fb197c..17a2f158a0dfab 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -667,11 +667,6 @@ static int hci_uart_register_dev(struct hci_uart *hu)
+ if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+
+- if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
+- hdev->dev_type = HCI_AMP;
+- else
+- hdev->dev_type = HCI_PRIMARY;
+-
+ /* Only call open() for the protocol after hdev is fully initialized as
+ * open() (or a timer/workqueue it starts) may attempt to reference it.
+ */
+@@ -722,7 +717,6 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
+ {
+ unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) |
+ BIT(HCI_UART_RESET_ON_INIT) |
+- BIT(HCI_UART_CREATE_AMP) |
+ BIT(HCI_UART_INIT_PENDING) |
+ BIT(HCI_UART_EXT_CONFIG) |
+ BIT(HCI_UART_VND_DETECT);
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 4b57e15f9c7a7a..7a552387129ef0 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -225,6 +225,7 @@ struct qca_serdev {
+ struct qca_power *bt_power;
+ u32 init_speed;
+ u32 oper_speed;
++ bool bdaddr_property_broken;
+ const char *firmware_name;
+ };
+
+@@ -1089,6 +1090,7 @@ static void qca_controller_memdump(struct work_struct *work)
+ qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
++ clear_bit(QCA_IBS_DISABLED, &qca->flags);
+ mutex_unlock(&qca->hci_memdump_lock);
+ return;
+ }
+@@ -1671,6 +1673,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ bool wakeup;
+
++ if (!hu->serdev)
++ return true;
++
+ /* BT SoC attached through the serial bus is handled by the serdev driver.
+ * So we need to use the device handle of the serdev driver to get the
+ * status of device may wakeup.
+@@ -1806,13 +1811,12 @@ static int qca_power_on(struct hci_dev *hdev)
+
+ static void hci_coredump_qca(struct hci_dev *hdev)
+ {
++ int err;
+ static const u8 param[] = { 0x26 };
+- struct sk_buff *skb;
+
+- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
+- if (IS_ERR(skb))
+- bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
+- kfree_skb(skb);
++ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
++ if (err < 0)
++ bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
+ }
+
+ static int qca_setup(struct hci_uart *hu)
+@@ -1825,6 +1829,7 @@ static int qca_setup(struct hci_uart *hu)
+ const char *firmware_name = qca_get_firmware_name(hu);
+ int ret;
+ struct qca_btsoc_version ver;
++ struct qca_serdev *qcadev;
+ const char *soc_name;
+
+ ret = qca_check_speeds(hu);
+@@ -1841,6 +1846,10 @@ static int qca_setup(struct hci_uart *hu)
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+
+ switch (soc_type) {
++ case QCA_QCA2066:
++ soc_name = "qca2066";
++ break;
++
+ case QCA_WCN3988:
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+@@ -1882,7 +1891,10 @@ static int qca_setup(struct hci_uart *hu)
+ case QCA_WCN6750:
+ case QCA_WCN6855:
+ case QCA_WCN7850:
+- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
++ qcadev = serdev_device_get_drvdata(hu->serdev);
++ if (qcadev->bdaddr_property_broken)
++ set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
++
+ hci_set_aosp_capable(hdev);
+
+ ret = qca_read_soc_version(hdev, &ver, soc_type);
+@@ -1929,8 +1941,10 @@ static int qca_setup(struct hci_uart *hu)
+ qca_debugfs_init(hdev);
+ hu->hdev->hw_error = qca_hw_error;
+ hu->hdev->cmd_timeout = qca_cmd_timeout;
+- if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
+- hu->hdev->wakeup = qca_wakeup;
++ if (hu->serdev) {
++ if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
++ hu->hdev->wakeup = qca_wakeup;
++ }
+ } else if (ret == -ENOENT) {
+ /* No patch/nvm-config found, run with original fw/config */
+ set_bit(QCA_ROM_FW, &qca->flags);
+@@ -2032,9 +2046,15 @@ static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
+ .num_vregs = 4,
+ };
+
++static const struct qca_device_data qca_soc_data_qca2066 __maybe_unused = {
++ .soc_type = QCA_QCA2066,
++ .num_vregs = 0,
++};
++
+ static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
+ .soc_type = QCA_QCA6390,
+ .num_vregs = 0,
++ .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
+ };
+
+ static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = {
+@@ -2253,6 +2273,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ if (!qcadev->oper_speed)
+ BT_DBG("UART will pick default operating speed");
+
++ qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
++ "qcom,local-bd-address-broken");
++
+ if (data)
+ qcadev->btsoc_type = data->soc_type;
+ else
+@@ -2284,20 +2307,25 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+
+ qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ GPIOD_OUT_LOW);
+- if (IS_ERR_OR_NULL(qcadev->bt_en) &&
++ if (IS_ERR(qcadev->bt_en) &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855)) {
+ dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
+- power_ctrl_enabled = false;
++ return PTR_ERR(qcadev->bt_en);
+ }
+
++ if (!qcadev->bt_en)
++ power_ctrl_enabled = false;
++
+ qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
+ GPIOD_IN);
+- if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
++ if (IS_ERR(qcadev->sw_ctrl) &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855 ||
+- data->soc_type == QCA_WCN7850))
+- dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
++ data->soc_type == QCA_WCN7850)) {
++ dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
++ return PTR_ERR(qcadev->sw_ctrl);
++ }
+
+ qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ if (IS_ERR(qcadev->susclk)) {
+@@ -2315,11 +2343,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
+ default:
+ qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
+ GPIOD_OUT_LOW);
+- if (IS_ERR_OR_NULL(qcadev->bt_en)) {
+- dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
+- power_ctrl_enabled = false;
++ if (IS_ERR(qcadev->bt_en)) {
++ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
++ return PTR_ERR(qcadev->bt_en);
+ }
+
++ if (!qcadev->bt_en)
++ power_ctrl_enabled = false;
++
+ qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ if (IS_ERR(qcadev->susclk)) {
+ dev_warn(&serdev->dev, "failed to acquire clk\n");
+@@ -2398,15 +2429,27 @@ static void qca_serdev_shutdown(struct device *dev)
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct hci_uart *hu = &qcadev->serdev_hu;
+ struct hci_dev *hdev = hu->hdev;
+- struct qca_data *qca = hu->priv;
+ const u8 ibs_wake_cmd[] = { 0xFD };
+ const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
+
+ if (qcadev->btsoc_type == QCA_QCA6390) {
+- if (test_bit(QCA_BT_OFF, &qca->flags) ||
+- !test_bit(HCI_RUNNING, &hdev->flags))
++ /* The purpose of sending the VSC is to reset SOC into a initial
++ * state and the state will ensure next hdev->setup() success.
++ * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
++ * hdev->setup() can do its job regardless of SoC state, so
++ * don't need to send the VSC.
++ * if HCI_SETUP is set, it means that hdev->setup() was never
++ * invoked and the SOC is already in the initial state, so
++ * don't also need to send the VSC.
++ */
++ if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
++ hci_dev_test_flag(hdev, HCI_SETUP))
+ return;
+
++ /* The serdev must be in open state when conrol logic arrives
++ * here, so also fix the use-after-free issue caused by that
++ * the serdev is flushed or wrote after it is closed.
++ */
+ serdev_device_write_flush(serdev);
+ ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
+ sizeof(ibs_wake_cmd));
+@@ -2559,6 +2602,7 @@ static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
+
+ #ifdef CONFIG_OF
+ static const struct of_device_id qca_bluetooth_of_match[] = {
++ { .compatible = "qcom,qca2066-bt", .data = &qca_soc_data_qca2066},
+ { .compatible = "qcom,qca6174-bt" },
+ { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
+ { .compatible = "qcom,qca9377-bt" },
+@@ -2576,6 +2620,7 @@ MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
+
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
++ { "QCOM2066", (kernel_ulong_t)&qca_soc_data_qca2066 },
+ { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+ { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
+diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
+index f16fd79bc02b8a..1165831570e3c4 100644
+--- a/drivers/bluetooth/hci_serdev.c
++++ b/drivers/bluetooth/hci_serdev.c
+@@ -300,8 +300,9 @@ static const struct serdev_device_ops hci_serdev_client_ops = {
+ .write_wakeup = hci_uart_write_wakeup,
+ };
+
+-int hci_uart_register_device(struct hci_uart *hu,
+- const struct hci_uart_proto *p)
++int hci_uart_register_device_priv(struct hci_uart *hu,
++ const struct hci_uart_proto *p,
++ int sizeof_priv)
+ {
+ int err;
+ struct hci_dev *hdev;
+@@ -325,7 +326,7 @@ int hci_uart_register_device(struct hci_uart *hu,
+ set_bit(HCI_UART_PROTO_READY, &hu->flags);
+
+ /* Initialize and register HCI device */
+- hdev = hci_alloc_dev();
++ hdev = hci_alloc_dev_priv(sizeof_priv);
+ if (!hdev) {
+ BT_ERR("Can't allocate HCI device");
+ err = -ENOMEM;
+@@ -365,11 +366,6 @@ int hci_uart_register_device(struct hci_uart *hu,
+ if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
+ set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+
+- if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
+- hdev->dev_type = HCI_AMP;
+- else
+- hdev->dev_type = HCI_PRIMARY;
+-
+ if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return 0;
+
+@@ -394,7 +390,7 @@ int hci_uart_register_device(struct hci_uart *hu,
+ percpu_free_rwsem(&hu->proto_lock);
+ return err;
+ }
+-EXPORT_SYMBOL_GPL(hci_uart_register_device);
++EXPORT_SYMBOL_GPL(hci_uart_register_device_priv);
+
+ void hci_uart_unregister_device(struct hci_uart *hu)
+ {
+diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
+index fb4a2d0d8cc802..00bf7ae82c5b72 100644
+--- a/drivers/bluetooth/hci_uart.h
++++ b/drivers/bluetooth/hci_uart.h
+@@ -37,7 +37,6 @@
+
+ #define HCI_UART_RAW_DEVICE 0
+ #define HCI_UART_RESET_ON_INIT 1
+-#define HCI_UART_CREATE_AMP 2
+ #define HCI_UART_INIT_PENDING 3
+ #define HCI_UART_EXT_CONFIG 4
+ #define HCI_UART_VND_DETECT 5
+@@ -97,7 +96,17 @@ struct hci_uart {
+
+ int hci_uart_register_proto(const struct hci_uart_proto *p);
+ int hci_uart_unregister_proto(const struct hci_uart_proto *p);
+-int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
++
++int hci_uart_register_device_priv(struct hci_uart *hu,
++ const struct hci_uart_proto *p,
++ int sizeof_priv);
++
++static inline int hci_uart_register_device(struct hci_uart *hu,
++ const struct hci_uart_proto *p)
++{
++ return hci_uart_register_device_priv(hu, p, 0);
++}
++
+ void hci_uart_unregister_device(struct hci_uart *hu);
+
+ int hci_uart_tx_wakeup(struct hci_uart *hu);
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index f3892e9ce800ff..28750a40f0ed52 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -11,6 +11,7 @@
+ #include <linux/module.h>
+ #include <asm/unaligned.h>
+
++#include <linux/atomic.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+@@ -44,6 +45,7 @@ struct vhci_data {
+ bool wakeup;
+ __u16 msft_opcode;
+ bool aosp_capable;
++ atomic_t initialized;
+ };
+
+ static int vhci_open_dev(struct hci_dev *hdev)
+@@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+
+ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+- mutex_lock(&data->open_mutex);
+ skb_queue_tail(&data->readq, skb);
+- mutex_unlock(&data->open_mutex);
+
+- wake_up_interruptible(&data->read_wait);
++ if (atomic_read(&data->initialized))
++ wake_up_interruptible(&data->read_wait);
+ return 0;
+ }
+
+@@ -383,17 +384,10 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ {
+ struct hci_dev *hdev;
+ struct sk_buff *skb;
+- __u8 dev_type;
+
+ if (data->hdev)
+ return -EBADFD;
+
+- /* bits 0-1 are dev_type (Primary or AMP) */
+- dev_type = opcode & 0x03;
+-
+- if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP)
+- return -EINVAL;
+-
+ /* bits 2-5 are reserved (must be zero) */
+ if (opcode & 0x3c)
+ return -EINVAL;
+@@ -411,7 +405,6 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ data->hdev = hdev;
+
+ hdev->bus = HCI_VIRTUAL;
+- hdev->dev_type = dev_type;
+ hci_set_drvdata(hdev, data);
+
+ hdev->open = vhci_open_dev;
+@@ -464,7 +457,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ skb_put_u8(skb, 0xff);
+ skb_put_u8(skb, opcode);
+ put_unaligned_le16(hdev->id, skb_put(skb, 2));
+- skb_queue_tail(&data->readq, skb);
++ skb_queue_head(&data->readq, skb);
++ atomic_inc(&data->initialized);
+
+ wake_up_interruptible(&data->read_wait);
+ return 0;
+@@ -632,7 +626,7 @@ static void vhci_open_timeout(struct work_struct *work)
+ struct vhci_data *data = container_of(work, struct vhci_data,
+ open_timeout.work);
+
+- vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY);
++ vhci_create_device(data, 0x00);
+ }
+
+ static int vhci_open(struct inode *inode, struct file *file)
+diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
+index 2ac70b560c46db..18208e152a3675 100644
+--- a/drivers/bluetooth/virtio_bt.c
++++ b/drivers/bluetooth/virtio_bt.c
+@@ -274,7 +274,6 @@ static int virtbt_probe(struct virtio_device *vdev)
+
+ switch (type) {
+ case VIRTIO_BT_CONFIG_TYPE_PRIMARY:
+- case VIRTIO_BT_CONFIG_TYPE_AMP:
+ break;
+ default:
+ return -EINVAL;
+@@ -303,7 +302,6 @@ static int virtbt_probe(struct virtio_device *vdev)
+ vbt->hdev = hdev;
+
+ hdev->bus = HCI_VIRTIO;
+- hdev->dev_type = type;
+ hci_set_drvdata(hdev, vbt);
+
+ hdev->open = virtbt_open;
+diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
+index c98dd6ca26297f..fab27506d945e9 100644
+--- a/drivers/bus/Kconfig
++++ b/drivers/bus/Kconfig
+@@ -186,11 +186,12 @@ config SUNXI_RSB
+
+ config TEGRA_ACONNECT
+ tristate "Tegra ACONNECT Bus Driver"
+- depends on ARCH_TEGRA_210_SOC
++ depends on ARCH_TEGRA
+ depends on OF && PM
+ help
+ Driver for the Tegra ACONNECT bus which is used to interface with
+- the devices inside the Audio Processing Engine (APE) for Tegra210.
++ the devices inside the Audio Processing Engine (APE) for
++ Tegra210 and later.
+
+ config TEGRA_GMI
+ tristate "Tegra Generic Memory Interface bus driver"
+diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
+index b715c8ab36e8bd..a65c79b08804f4 100644
+--- a/drivers/bus/arm-integrator-lm.c
++++ b/drivers/bus/arm-integrator-lm.c
+@@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev)
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(syscon);
++ of_node_put(syscon);
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index 42c9386a7b423f..f9fd1582f150de 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -117,7 +117,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
+ i++;
+ }
+
+- if (i == 0 || i % 4)
++ if (i == 0)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
+diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
+index a2125fa5fe2f97..accbf3a51d0cf5 100644
+--- a/drivers/bus/mhi/ep/internal.h
++++ b/drivers/bus/mhi/ep/internal.h
+@@ -159,6 +159,7 @@ struct mhi_ep_chan {
+ void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
+ enum mhi_ch_state state;
+ enum dma_data_direction dir;
++ size_t rd_offset;
+ u64 tre_loc;
+ u32 tre_size;
+ u32 tre_bytes_left;
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index 600881808982aa..c48f4d9f2c690b 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -71,45 +71,77 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
+ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
+ {
+- struct mhi_ring_element event = {};
++ struct mhi_ring_element *event;
++ int ret;
++
++ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
++
++ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
++ event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
++ event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+
+- event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+- event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
+- event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
++ ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
++ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+- return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
++ return ret;
+ }
+
+ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+ {
+- struct mhi_ring_element event = {};
++ struct mhi_ring_element *event;
++ int ret;
++
++ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
+
+- event.dword[0] = MHI_SC_EV_DWORD0(state);
+- event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
++ event->dword[0] = MHI_SC_EV_DWORD0(state);
++ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
++
++ return ret;
+ }
+
+ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
+ {
+- struct mhi_ring_element event = {};
++ struct mhi_ring_element *event;
++ int ret;
++
++ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
++
++ event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
++ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+
+- event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
+- event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
++ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++ return ret;
+ }
+
+ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+ {
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+- struct mhi_ring_element event = {};
++ struct mhi_ring_element *event;
++ int ret;
+
+- event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+- event.dword[0] = MHI_CC_EV_DWORD0(code);
+- event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
++ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
++
++ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
++ event->dword[0] = MHI_CC_EV_DWORD0(code);
++ event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+
+- return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
++ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
++ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
++
++ return ret;
+ }
+
+ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+@@ -151,6 +183,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
+
+ goto err_unlock;
+ }
++
++ mhi_chan->rd_offset = ch_ring->rd_offset;
+ }
+
+ /* Set channel state to RUNNING */
+@@ -280,22 +314,85 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+- return !!(ring->rd_offset == ring->wr_offset);
++ return !!(mhi_chan->rd_offset == ring->wr_offset);
+ }
+ EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+
++static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info)
++{
++ struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
++ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
++ struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan;
++ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
++ struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
++ struct mhi_result result = {};
++ int ret;
++
++ if (mhi_chan->xfer_cb) {
++ result.buf_addr = buf_info->cb_buf;
++ result.dir = mhi_chan->dir;
++ result.bytes_xferd = buf_info->size;
++
++ mhi_chan->xfer_cb(mhi_dev, &result);
++ }
++
++ /*
++ * The host will split the data packet into multiple TREs if it can't fit
++ * the packet in a single TRE. In that case, CHAIN flag will be set by the
++ * host for all TREs except the last one.
++ */
++ if (buf_info->code != MHI_EV_CC_OVERFLOW) {
++ if (MHI_TRE_DATA_GET_CHAIN(el)) {
++ /*
++ * IEOB (Interrupt on End of Block) flag will be set by the host if
++ * it expects the completion event for all TREs of a TD.
++ */
++ if (MHI_TRE_DATA_GET_IEOB(el)) {
++ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
++ MHI_TRE_DATA_GET_LEN(el),
++ MHI_EV_CC_EOB);
++ if (ret < 0) {
++ dev_err(&mhi_chan->mhi_dev->dev,
++ "Error sending transfer compl. event\n");
++ goto err_free_tre_buf;
++ }
++ }
++ } else {
++ /*
++ * IEOT (Interrupt on End of Transfer) flag will be set by the host
++ * for the last TRE of the TD and expects the completion event for
++ * the same.
++ */
++ if (MHI_TRE_DATA_GET_IEOT(el)) {
++ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
++ MHI_TRE_DATA_GET_LEN(el),
++ MHI_EV_CC_EOT);
++ if (ret < 0) {
++ dev_err(&mhi_chan->mhi_dev->dev,
++ "Error sending transfer compl. event\n");
++ goto err_free_tre_buf;
++ }
++ }
++ }
++ }
++
++ mhi_ep_ring_inc_index(ring);
++
++err_free_tre_buf:
++ kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf);
++}
++
+ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+- struct mhi_ep_ring *ring,
+- struct mhi_result *result,
+- u32 len)
++ struct mhi_ep_ring *ring)
+ {
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t tr_len, read_offset, write_offset;
++ struct mhi_ep_buf_info buf_info = {};
++ u32 len = MHI_EP_DEFAULT_MTU;
+ struct mhi_ring_element *el;
+ bool tr_done = false;
+- void *write_addr;
+- u64 read_addr;
++ void *buf_addr;
+ u32 buf_left;
+ int ret;
+
+@@ -308,7 +405,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ return -ENODEV;
+ }
+
+- el = &ring->ring_cache[ring->rd_offset];
++ el = &ring->ring_cache[mhi_chan->rd_offset];
+
+ /* Check if there is data pending to be read from previous read operation */
+ if (mhi_chan->tre_bytes_left) {
+@@ -324,81 +421,51 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+
+ read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+ write_offset = len - buf_left;
+- read_addr = mhi_chan->tre_loc + read_offset;
+- write_addr = result->buf_addr + write_offset;
++
++ buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
++ if (!buf_addr)
++ return -ENOMEM;
++
++ buf_info.host_addr = mhi_chan->tre_loc + read_offset;
++ buf_info.dev_addr = buf_addr + write_offset;
++ buf_info.size = tr_len;
++ buf_info.cb = mhi_ep_read_completion;
++ buf_info.cb_buf = buf_addr;
++ buf_info.mhi_dev = mhi_chan->mhi_dev;
++
++ if (mhi_chan->tre_bytes_left - tr_len)
++ buf_info.code = MHI_EV_CC_OVERFLOW;
+
+ dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
+- ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
++ ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
+- return ret;
++ goto err_free_buf_addr;
+ }
+
+ buf_left -= tr_len;
+ mhi_chan->tre_bytes_left -= tr_len;
+
+- /*
+- * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
+- * read completely:
+- *
+- * 1. Send completion event to the host based on the flags set in TRE.
+- * 2. Increment the local read offset of the transfer ring.
+- */
+ if (!mhi_chan->tre_bytes_left) {
+- /*
+- * The host will split the data packet into multiple TREs if it can't fit
+- * the packet in a single TRE. In that case, CHAIN flag will be set by the
+- * host for all TREs except the last one.
+- */
+- if (MHI_TRE_DATA_GET_CHAIN(el)) {
+- /*
+- * IEOB (Interrupt on End of Block) flag will be set by the host if
+- * it expects the completion event for all TREs of a TD.
+- */
+- if (MHI_TRE_DATA_GET_IEOB(el)) {
+- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+- MHI_TRE_DATA_GET_LEN(el),
+- MHI_EV_CC_EOB);
+- if (ret < 0) {
+- dev_err(&mhi_chan->mhi_dev->dev,
+- "Error sending transfer compl. event\n");
+- return ret;
+- }
+- }
+- } else {
+- /*
+- * IEOT (Interrupt on End of Transfer) flag will be set by the host
+- * for the last TRE of the TD and expects the completion event for
+- * the same.
+- */
+- if (MHI_TRE_DATA_GET_IEOT(el)) {
+- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+- MHI_TRE_DATA_GET_LEN(el),
+- MHI_EV_CC_EOT);
+- if (ret < 0) {
+- dev_err(&mhi_chan->mhi_dev->dev,
+- "Error sending transfer compl. event\n");
+- return ret;
+- }
+- }
+-
++ if (MHI_TRE_DATA_GET_IEOT(el))
+ tr_done = true;
+- }
+
+- mhi_ep_ring_inc_index(ring);
++ mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
+ }
+-
+- result->bytes_xferd += tr_len;
+ } while (buf_left && !tr_done);
+
+ return 0;
++
++err_free_buf_addr:
++ kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr);
++
++ return ret;
+ }
+
+-static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
++static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
+ {
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct mhi_result result = {};
+- u32 len = MHI_EP_DEFAULT_MTU;
+ struct mhi_ep_chan *mhi_chan;
+ int ret;
+
+@@ -419,44 +486,59 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ } else {
+ /* UL channel */
+- result.buf_addr = kzalloc(len, GFP_KERNEL);
+- if (!result.buf_addr)
+- return -ENOMEM;
+-
+ do {
+- ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
++ ret = mhi_ep_read_channel(mhi_cntrl, ring);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+- kfree(result.buf_addr);
+ return ret;
+ }
+
+- result.dir = mhi_chan->dir;
+- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+- result.bytes_xferd = 0;
+- memset(result.buf_addr, 0, len);
+-
+ /* Read until the ring becomes empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+-
+- kfree(result.buf_addr);
+ }
+
+ return 0;
+ }
+
++static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info)
++{
++ struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
++ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
++ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
++ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
++ struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
++ struct device *dev = &mhi_dev->dev;
++ struct mhi_result result = {};
++ int ret;
++
++ if (mhi_chan->xfer_cb) {
++ result.buf_addr = buf_info->cb_buf;
++ result.dir = mhi_chan->dir;
++ result.bytes_xferd = buf_info->size;
++
++ mhi_chan->xfer_cb(mhi_dev, &result);
++ }
++
++ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size,
++ buf_info->code);
++ if (ret) {
++ dev_err(dev, "Error sending transfer completion event\n");
++ return;
++ }
++
++ mhi_ep_ring_inc_index(ring);
++}
++
+ /* TODO: Handle partially formed TDs */
+ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
+ {
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
++ struct mhi_ep_buf_info buf_info = {};
+ struct mhi_ring_element *el;
+ u32 buf_left, read_offset;
+ struct mhi_ep_ring *ring;
+- enum mhi_ev_ccs code;
+- void *read_addr;
+- u64 write_addr;
+ size_t tr_len;
+ u32 tre_len;
+ int ret;
+@@ -480,40 +562,44 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
+ goto err_exit;
+ }
+
+- el = &ring->ring_cache[ring->rd_offset];
++ el = &ring->ring_cache[mhi_chan->rd_offset];
+ tre_len = MHI_TRE_DATA_GET_LEN(el);
+
+ tr_len = min(buf_left, tre_len);
+ read_offset = skb->len - buf_left;
+- read_addr = skb->data + read_offset;
+- write_addr = MHI_TRE_DATA_GET_PTR(el);
+
+- dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+- ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
+- if (ret < 0) {
+- dev_err(dev, "Error writing to the channel\n");
+- goto err_exit;
+- }
++ buf_info.dev_addr = skb->data + read_offset;
++ buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
++ buf_info.size = tr_len;
++ buf_info.cb = mhi_ep_skb_completion;
++ buf_info.cb_buf = skb;
++ buf_info.mhi_dev = mhi_dev;
+
+- buf_left -= tr_len;
+ /*
+ * For all TREs queued by the host for DL channel, only the EOT flag will be set.
+ * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
+ * the host so that the host can adjust the packet boundary to next TREs. Else send
+ * the EOT event to the host indicating the packet boundary.
+ */
+- if (buf_left)
+- code = MHI_EV_CC_OVERFLOW;
++ if (buf_left - tr_len)
++ buf_info.code = MHI_EV_CC_OVERFLOW;
+ else
+- code = MHI_EV_CC_EOT;
++ buf_info.code = MHI_EV_CC_EOT;
+
+- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
+- if (ret) {
+- dev_err(dev, "Error sending transfer completion event\n");
++ dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
++ ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info);
++ if (ret < 0) {
++ dev_err(dev, "Error writing to the channel\n");
+ goto err_exit;
+ }
+
+- mhi_ep_ring_inc_index(ring);
++ buf_left -= tr_len;
++
++ /*
++ * Update the read offset cached in mhi_chan. Actual read offset
++ * will be updated by the completion handler.
++ */
++ mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
+ } while (buf_left);
+
+ mutex_unlock(&mhi_chan->lock);
+@@ -714,7 +800,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring_item *itr, *tmp;
+- struct mhi_ring_element *el;
+ struct mhi_ep_ring *ring;
+ struct mhi_ep_chan *chan;
+ unsigned long flags;
+@@ -748,31 +833,29 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ mutex_unlock(&chan->lock);
+- kfree(itr);
++ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
+ continue;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+- if (ring->rd_offset == ring->wr_offset) {
++ if (chan->rd_offset == ring->wr_offset) {
+ mutex_unlock(&chan->lock);
+- kfree(itr);
++ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
+ continue;
+ }
+
+- el = &ring->ring_cache[ring->rd_offset];
+-
+ dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
+- ret = mhi_ep_process_ch_ring(ring, el);
++ ret = mhi_ep_process_ch_ring(ring);
+ if (ret) {
+ dev_err(dev, "Error processing ring for channel (%u): %d\n",
+ ring->ch_id, ret);
+ mutex_unlock(&chan->lock);
+- kfree(itr);
++ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
+ continue;
+ }
+
+ mutex_unlock(&chan->lock);
+- kfree(itr);
++ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
+ }
+ }
+
+@@ -828,7 +911,7 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon
+ u32 ch_id = ch_idx + i;
+
+ ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+- item = kzalloc(sizeof(*item), GFP_ATOMIC);
++ item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC);
+ if (!item)
+ return;
+
+@@ -1375,6 +1458,29 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ goto err_free_ch;
+ }
+
++ mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
++ sizeof(struct mhi_ring_element), 0,
++ 0, NULL);
++ if (!mhi_cntrl->ev_ring_el_cache) {
++ ret = -ENOMEM;
++ goto err_free_cmd;
++ }
++
++ mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
++ 0, NULL);
++ if (!mhi_cntrl->tre_buf_cache) {
++ ret = -ENOMEM;
++ goto err_destroy_ev_ring_el_cache;
++ }
++
++ mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
++ sizeof(struct mhi_ep_ring_item), 0,
++ 0, NULL);
++ if (!mhi_cntrl->ring_item_cache) {
++ ret = -ENOMEM;
++ goto err_destroy_tre_buf_cache;
++ }
++
+ INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
+ INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
+ INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
+@@ -1383,7 +1489,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+ if (!mhi_cntrl->wq) {
+ ret = -ENOMEM;
+- goto err_free_cmd;
++ goto err_destroy_ring_item_cache;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+@@ -1442,6 +1548,12 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+ err_destroy_wq:
+ destroy_workqueue(mhi_cntrl->wq);
++err_destroy_ring_item_cache:
++ kmem_cache_destroy(mhi_cntrl->ring_item_cache);
++err_destroy_ev_ring_el_cache:
++ kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
++err_destroy_tre_buf_cache:
++ kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
+ err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+ err_free_ch:
+@@ -1463,6 +1575,9 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
+
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+
++ kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
++ kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
++ kmem_cache_destroy(mhi_cntrl->ring_item_cache);
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_chan);
+
+diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
+index 115518ec76a43a..ba9f696d1aa80e 100644
+--- a/drivers/bus/mhi/ep/ring.c
++++ b/drivers/bus/mhi/ep/ring.c
+@@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+ {
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+- size_t start, copy_size;
++ struct mhi_ep_buf_info buf_info = {};
++ size_t start;
+ int ret;
+
+ /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
+@@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+
+ start = ring->wr_offset;
+ if (start < end) {
+- copy_size = (end - start) * sizeof(struct mhi_ring_element);
+- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+- (start * sizeof(struct mhi_ring_element)),
+- &ring->ring_cache[start], copy_size);
++ buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
++ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
++ buf_info.dev_addr = &ring->ring_cache[start];
++
++ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
+ if (ret < 0)
+ return ret;
+ } else {
+- copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
+- (start * sizeof(struct mhi_ring_element)),
+- &ring->ring_cache[start], copy_size);
++ buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
++ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
++ buf_info.dev_addr = &ring->ring_cache[start];
++
++ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
+ if (ret < 0)
+ return ret;
+
+ if (end) {
+- ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
+- &ring->ring_cache[0],
+- end * sizeof(struct mhi_ring_element));
++ buf_info.host_addr = ring->rbase;
++ buf_info.dev_addr = &ring->ring_cache[0];
++ buf_info.size = end * sizeof(struct mhi_ring_element);
++
++ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+- dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
++ dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
+
+ return 0;
+ }
+@@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
+ {
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
++ struct mhi_ep_buf_info buf_info = {};
+ size_t old_offset = 0;
+ u32 num_free_elem;
+ __le64 rp;
+@@ -133,12 +139,11 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
+ rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
+ memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
+
+- ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
+- sizeof(*el));
+- if (ret < 0)
+- return ret;
++ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
++ buf_info.dev_addr = el;
++ buf_info.size = sizeof(*el);
+
+- return 0;
++ return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ }
+
+ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
+diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
+index f78aefd2d7a362..cfd17c02fe20ef 100644
+--- a/drivers/bus/mhi/host/init.c
++++ b/drivers/bus/mhi/host/init.c
+@@ -62,6 +62,7 @@ static const char * const mhi_pm_state_str[] = {
+ [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
+ [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
+ [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
++ [MHI_PM_STATE_SYS_ERR_FAIL] = "SYS ERROR Failure",
+ [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+ [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
+ };
+diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
+index 2e139e76de4c03..d2858236af52b1 100644
+--- a/drivers/bus/mhi/host/internal.h
++++ b/drivers/bus/mhi/host/internal.h
+@@ -88,6 +88,7 @@ enum mhi_pm_state {
+ MHI_PM_STATE_FW_DL_ERR,
+ MHI_PM_STATE_SYS_ERR_DETECT,
+ MHI_PM_STATE_SYS_ERR_PROCESS,
++ MHI_PM_STATE_SYS_ERR_FAIL,
+ MHI_PM_STATE_SHUTDOWN_PROCESS,
+ MHI_PM_STATE_LD_ERR_FATAL_DETECT,
+ MHI_PM_STATE_MAX
+@@ -104,14 +105,16 @@ enum mhi_pm_state {
+ #define MHI_PM_FW_DL_ERR BIT(7)
+ #define MHI_PM_SYS_ERR_DETECT BIT(8)
+ #define MHI_PM_SYS_ERR_PROCESS BIT(9)
+-#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
++#define MHI_PM_SYS_ERR_FAIL BIT(10)
++#define MHI_PM_SHUTDOWN_PROCESS BIT(11)
+ /* link not accessible */
+-#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
++#define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
+
+ #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
++ MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
++ MHI_PM_FW_DL_ERR)))
+ #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
+ #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+ #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
+diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
+index dcf627b36e829e..d6653cbcf94a2e 100644
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -268,7 +268,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+
+ static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+ {
+- return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
++ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
++ !(addr & (sizeof(struct mhi_ring_element) - 1));
+ }
+
+ int mhi_destroy_device(struct device *dev, void *data)
+@@ -642,6 +643,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
++ read_unlock_bh(&mhi_chan->lock);
++
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+@@ -667,6 +670,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ kfree(buf_info->cb_buf);
+ }
+ }
++
++ read_lock_bh(&mhi_chan->lock);
+ }
+ break;
+ } /* CC_EOT */
+@@ -1122,17 +1127,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
+
+- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+-
+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+- if (unlikely(ret)) {
+- ret = -EAGAIN;
+- goto exit_unlock;
+- }
++ if (unlikely(ret))
++ return -EAGAIN;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ if (unlikely(ret))
+- goto exit_unlock;
++ return ret;
++
++ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ /* Packet is queued, take a usage ref to exit M3 if necessary
+ * for host->device buffer, balanced put is done on buffer completion
+@@ -1152,7 +1155,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ if (dir == DMA_FROM_DEVICE)
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+-exit_unlock:
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return ret;
+@@ -1204,6 +1206,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ int eot, eob, chain, bei;
+ int ret;
+
++ /* Protect accesses for reading and incrementing WP */
++ write_lock_bh(&mhi_chan->lock);
++
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+@@ -1221,8 +1226,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+- if (ret)
++ if (ret) {
++ write_unlock_bh(&mhi_chan->lock);
+ return ret;
++ }
+ }
+
+ eob = !!(flags & MHI_EOB);
+@@ -1239,6 +1246,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
++ write_unlock_bh(&mhi_chan->lock);
++
+ return 0;
+ }
+
+diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
+index 08f3f039dbddcf..154841916f5652 100644
+--- a/drivers/bus/mhi/host/pci_generic.c
++++ b/drivers/bus/mhi/host/pci_generic.c
+@@ -578,6 +578,15 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
+ .mru_default = 32768,
+ };
+
++static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
++ .name = "telit-fe990a",
++ .config = &modem_telit_fn990_config,
++ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
++ .dma_data_width = 32,
++ .sideband_wake = false,
++ .mru_default = 32768,
++};
++
+ /* Keep the list sorted based on the PID. New VID should be added as the last entry */
+ static const struct pci_device_id mhi_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
+@@ -595,9 +604,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
+ /* Telit FN990 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+- /* Telit FE990 */
++ /* Telit FE990A */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
+- .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
++ .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
+diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
+index 8a4362d75fc437..27f8a40f288cfd 100644
+--- a/drivers/bus/mhi/host/pm.c
++++ b/drivers/bus/mhi/host/pm.c
+@@ -36,7 +36,10 @@
+ * M0 <--> M0
+ * M0 -> FW_DL_ERR
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
++ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
++ * SYS_ERR_PROCESS -> SYS_ERR_FAIL
++ * SYS_ERR_FAIL -> SYS_ERR_DETECT
++ * SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+ * SHUTDOWN_PROCESS -> DISABLE
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+@@ -93,7 +96,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = {
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+- MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
++ MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
++ MHI_PM_LD_ERR_FATAL_DETECT
++ },
++ {
++ MHI_PM_SYS_ERR_FAIL,
++ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+@@ -624,7 +632,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
+ !in_reset, timeout);
+ if (!ret || in_reset) {
+ dev_err(dev, "Device failed to exit MHI Reset state\n");
+- goto exit_sys_error_transition;
++ write_lock_irq(&mhi_cntrl->pm_lock);
++ cur_state = mhi_tryset_pm_state(mhi_cntrl,
++ MHI_PM_SYS_ERR_FAIL);
++ write_unlock_irq(&mhi_cntrl->pm_lock);
++ /* Shutdown may have occurred, otherwise cleanup now */
++ if (cur_state != MHI_PM_SYS_ERR_FAIL)
++ goto exit_sys_error_transition;
+ }
+
+ /*
+diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
+index 5eb0fe73ddc45b..e384fbc6c1d931 100644
+--- a/drivers/bus/moxtet.c
++++ b/drivers/bus/moxtet.c
+@@ -755,7 +755,7 @@ static int moxtet_irq_setup(struct moxtet *moxtet)
+ moxtet->irq.masked = ~0;
+
+ ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn,
+- IRQF_ONESHOT, "moxtet", moxtet);
++ IRQF_SHARED | IRQF_ONESHOT, "moxtet", moxtet);
+ if (ret < 0)
+ goto err_free;
+
+@@ -830,6 +830,12 @@ static void moxtet_remove(struct spi_device *spi)
+ mutex_destroy(&moxtet->lock);
+ }
+
++static const struct spi_device_id moxtet_spi_ids[] = {
++ { "moxtet" },
++ { },
++};
++MODULE_DEVICE_TABLE(spi, moxtet_spi_ids);
++
+ static const struct of_device_id moxtet_dt_ids[] = {
+ { .compatible = "cznic,moxtet" },
+ {},
+@@ -841,6 +847,7 @@ static struct spi_driver moxtet_spi_driver = {
+ .name = "moxtet",
+ .of_match_table = moxtet_dt_ids,
+ },
++ .id_table = moxtet_spi_ids,
+ .probe = moxtet_probe,
+ .remove = moxtet_remove,
+ };
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index d57bc066dce6b4..9ed9239b1228f6 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -2158,13 +2158,23 @@ static int sysc_reset(struct sysc *ddata)
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+- /* Flush posted write */
++
++ /*
++ * Some devices need a delay before reading registers
++ * after reset. Presumably a srst_udelay is not needed
++ * for devices that use a rstctrl register reset.
++ */
++ if (ddata->cfg.srst_udelay)
++ fsleep(ddata->cfg.srst_udelay);
++
++ /*
++ * Flush posted write. For devices needing srst_udelay
++ * this should trigger an interconnect error if the
++ * srst_udelay value is needed but not configured.
++ */
+ sysc_val = sysc_read_sysconfig(ddata);
+ }
+
+- if (ddata->cfg.srst_udelay)
+- fsleep(ddata->cfg.srst_udelay);
+-
+ if (ddata->post_reset_quirk)
+ ddata->post_reset_quirk(ddata);
+
+diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
+index 57186c58dc849c..1d7dd3d2c101cd 100644
+--- a/drivers/cache/ax45mp_cache.c
++++ b/drivers/cache/ax45mp_cache.c
+@@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
+ unsigned long line_size;
+ unsigned long flags;
+
++ if (unlikely(start == end))
++ return;
++
+ line_size = ax45mp_priv.ax45mp_cache_line_size;
+ start = start & (~(line_size - 1));
++ end = ((end + line_size - 1) & (~(line_size - 1)));
+ local_irq_save(flags);
+ ax45mp_cpu_dcache_wb_range(start, end);
+ local_irq_restore(flags);
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index cc283980598333..01f46caf1f88b7 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2358,7 +2358,7 @@ static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
+ return -EFAULT;
+
+ tmp_info.media_flags = 0;
+- if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
++ if (cdi->last_media_change_ms > tmp_info.last_media_change)
+ tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
+
+ tmp_info.last_media_change = cdi->last_media_change_ms;
+diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
+index c6f181702b9a7b..edbc4d33811776 100644
+--- a/drivers/char/agp/parisc-agp.c
++++ b/drivers/char/agp/parisc-agp.c
+@@ -38,7 +38,7 @@ static struct _parisc_agp_info {
+
+ int lba_cap_offset;
+
+- u64 *gatt;
++ __le64 *gatt;
+ u64 gatt_entries;
+
+ u64 gart_base;
+@@ -104,7 +104,7 @@ parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
+ int i;
+
+ for (i = 0; i < info->gatt_entries; i++) {
+- info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
++ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ }
+
+ return 0;
+@@ -158,9 +158,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+ for (k = 0;
+ k < info->io_pages_per_kpage;
+ k++, j++, paddr += info->io_page_size) {
+- info->gatt[j] =
++ info->gatt[j] = cpu_to_le64(
+ parisc_agp_mask_memory(agp_bridge,
+- paddr, type);
++ paddr, type));
+ asm_io_fdc(&info->gatt[j]);
+ }
+ }
+@@ -184,7 +184,7 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+ io_pg_start = info->io_pages_per_kpage * pg_start;
+ io_pg_count = info->io_pages_per_kpage * mem->page_count;
+ for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+- info->gatt[i] = agp_bridge->scratch_page;
++ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+@@ -204,7 +204,8 @@ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+
+- return cpu_to_le64(pa);
++ /* return native (big-endian) PDIR entry */
++ return pa;
+ }
+
+ static void
+@@ -251,7 +252,8 @@ static int __init
+ agp_ioc_init(void __iomem *ioc_regs)
+ {
+ struct _parisc_agp_info *info = &parisc_agp_info;
+- u64 iova_base, *io_pdir, io_tlb_ps;
++ u64 iova_base, io_tlb_ps;
++ __le64 *io_pdir;
+ int io_tlb_shift;
+
+ printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index ee71376f174b70..3bc1d9243dbd05 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -289,8 +289,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+ if (!devp->hd_ireqfreq)
+ return -EIO;
+
+- if (count < sizeof(unsigned long))
+- return -EINVAL;
++ if (in_compat_syscall()) {
++ if (count < sizeof(compat_ulong_t))
++ return -EINVAL;
++ } else {
++ if (count < sizeof(unsigned long))
++ return -EINVAL;
++ }
+
+ add_wait_queue(&devp->hd_waitqueue, &wait);
+
+@@ -314,9 +319,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+ schedule();
+ }
+
+- retval = put_user(data, (unsigned long __user *)buf);
+- if (!retval)
+- retval = sizeof(unsigned long);
++ if (in_compat_syscall()) {
++ retval = put_user(data, (compat_ulong_t __user *)buf);
++ if (!retval)
++ retval = sizeof(compat_ulong_t);
++ } else {
++ retval = put_user(data, (unsigned long __user *)buf);
++ if (!retval)
++ retval = sizeof(unsigned long);
++ }
++
+ out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&devp->hd_waitqueue, &wait);
+@@ -671,12 +683,24 @@ struct compat_hpet_info {
+ unsigned short hi_timer;
+ };
+
++/* 32-bit types would lead to different command codes which should be
++ * translated into 64-bit ones before passed to hpet_ioctl_common
++ */
++#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info)
++#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t)
++
+ static long
+ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ struct hpet_info info;
+ int err;
+
++ if (cmd == COMPAT_HPET_INFO)
++ cmd = HPET_INFO;
++
++ if (cmd == COMPAT_HPET_IRQFREQ)
++ cmd = HPET_IRQFREQ;
++
+ mutex_lock(&hpet_mutex);
+ err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+ mutex_unlock(&hpet_mutex);
+diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
+index 86162a13681e67..9a24d19236dc70 100644
+--- a/drivers/char/hw_random/amd-rng.c
++++ b/drivers/char/hw_random/amd-rng.c
+@@ -143,8 +143,10 @@ static int __init amd_rng_mod_init(void)
+
+ found:
+ err = pci_read_config_dword(pdev, 0x58, &pmbase);
+- if (err)
++ if (err) {
++ err = pcibios_err_to_errno(err);
+ goto put_dev;
++ }
+
+ pmbase &= 0x0000FF00;
+ if (pmbase == 0) {
+diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
+index e19b0f9f48b97f..57a80ec93badac 100644
+--- a/drivers/char/hw_random/bcm2835-rng.c
++++ b/drivers/char/hw_random/bcm2835-rng.c
+@@ -70,7 +70,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
+ if (!wait)
+ return 0;
+- hwrng_msleep(rng, 1000);
++ hwrng_yield(rng);
+ }
+
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
+@@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng)
+ return ret;
+
+ ret = reset_control_reset(priv->reset);
+- if (ret)
++ if (ret) {
++ clk_disable_unprepare(priv->clk);
+ return ret;
++ }
+
+ if (priv->mask_interrupts) {
+ /* mask the interrupt */
+diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
+index 1abbff04a015a5..a55f5f2d35dff7 100644
+--- a/drivers/char/hw_random/cctrng.c
++++ b/drivers/char/hw_random/cctrng.c
+@@ -624,6 +624,7 @@ static int __maybe_unused cctrng_resume(struct device *dev)
+ /* wait for Cryptocell reset completion */
+ if (!cctrng_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
++ clk_disable_unprepare(drvdata->clk);
+ return -EBUSY;
+ }
+
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index e3598ec9cfca8b..a182fe794f9855 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -23,10 +23,13 @@
+ #include <linux/sched.h>
+ #include <linux/sched/signal.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ #include <linux/uaccess.h>
+
+ #define RNG_MODULE_NAME "hw_random"
+
++#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
++
+ static struct hwrng *current_rng;
+ /* the current rng has been explicitly chosen by user via sysfs */
+ static int cur_rng_set_by_user;
+@@ -58,7 +61,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+
+ static size_t rng_buffer_size(void)
+ {
+- return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
++ return RNG_BUFFER_SIZE;
+ }
+
+ static void add_early_randomness(struct hwrng *rng)
+@@ -171,7 +174,6 @@ static int hwrng_init(struct hwrng *rng)
+ reinit_completion(&rng->cleanup_done);
+
+ skip_init:
+- rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
+ current_quality = rng->quality; /* obsolete */
+
+ return 0;
+@@ -209,6 +211,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *offp)
+ {
++ u8 buffer[RNG_BUFFER_SIZE];
+ ssize_t ret = 0;
+ int err = 0;
+ int bytes_read, len;
+@@ -236,34 +239,37 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ if (bytes_read < 0) {
+ err = bytes_read;
+ goto out_unlock_reading;
++ } else if (bytes_read == 0 &&
++ (filp->f_flags & O_NONBLOCK)) {
++ err = -EAGAIN;
++ goto out_unlock_reading;
+ }
++
+ data_avail = bytes_read;
+ }
+
+- if (!data_avail) {
+- if (filp->f_flags & O_NONBLOCK) {
+- err = -EAGAIN;
+- goto out_unlock_reading;
+- }
+- } else {
+- len = data_avail;
++ len = data_avail;
++ if (len) {
+ if (len > size)
+ len = size;
+
+ data_avail -= len;
+
+- if (copy_to_user(buf + ret, rng_buffer + data_avail,
+- len)) {
++ memcpy(buffer, rng_buffer + data_avail, len);
++ }
++ mutex_unlock(&reading_mutex);
++ put_rng(rng);
++
++ if (len) {
++ if (copy_to_user(buf + ret, buffer, len)) {
+ err = -EFAULT;
+- goto out_unlock_reading;
++ goto out;
+ }
+
+ size -= len;
+ ret += len;
+ }
+
+- mutex_unlock(&reading_mutex);
+- put_rng(rng);
+
+ if (need_resched())
+ schedule_timeout_interruptible(1);
+@@ -274,6 +280,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ }
+ }
+ out:
++ memzero_explicit(buffer, sizeof(buffer));
+ return ret ? : err;
+
+ out_unlock_reading:
+@@ -555,6 +562,9 @@ int hwrng_register(struct hwrng *rng)
+ complete(&rng->cleanup_done);
+ init_completion(&rng->dying);
+
++ /* Adjust quality field to always have a proper value */
++ rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
++
+ if (!current_rng ||
+ (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
+ /*
+@@ -678,6 +688,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+ }
+ EXPORT_SYMBOL_GPL(hwrng_msleep);
+
++long hwrng_yield(struct hwrng *rng)
++{
++ return wait_for_completion_interruptible_timeout(&rng->dying, 1);
++}
++EXPORT_SYMBOL_GPL(hwrng_yield);
++
+ static int __init hwrng_modinit(void)
+ {
+ int ret;
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index 12fbe809183190..159baf00a86755 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -58,7 +58,8 @@ struct amd_geode_priv {
+
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ {
+- void __iomem *mem = (void __iomem *)rng->priv;
++ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++ void __iomem *mem = priv->membase;
+
+ *data = readl(mem + GEODE_RNG_DATA_REG);
+
+@@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+
+ static int geode_rng_data_present(struct hwrng *rng, int wait)
+ {
+- void __iomem *mem = (void __iomem *)rng->priv;
++ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
++ void __iomem *mem = priv->membase;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+diff --git a/drivers/char/hw_random/jh7110-trng.c b/drivers/char/hw_random/jh7110-trng.c
+index 38474d48a25e16..b1f94e3c0c6a4a 100644
+--- a/drivers/char/hw_random/jh7110-trng.c
++++ b/drivers/char/hw_random/jh7110-trng.c
+@@ -300,7 +300,7 @@ static int starfive_trng_probe(struct platform_device *pdev)
+ ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name,
+ (void *)trng);
+ if (ret)
+- return dev_err_probe(&pdev->dev, irq,
++ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register interrupt handler\n");
+
+ trng->hclk = devm_clk_get(&pdev->dev, "hclk");
+diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
+index aa993753ab120b..1e3048f2bb38f0 100644
+--- a/drivers/char/hw_random/mtk-rng.c
++++ b/drivers/char/hw_random/mtk-rng.c
+@@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
+ dev_set_drvdata(&pdev->dev, priv);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+- pm_runtime_enable(&pdev->dev);
++ devm_pm_runtime_enable(&pdev->dev);
+
+ dev_info(&pdev->dev, "registered RNG driver\n");
+
+diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
+index 56346fb328727e..ab4e87a99f0874 100644
+--- a/drivers/char/ipmi/ssif_bmc.c
++++ b/drivers/char/ipmi/ssif_bmc.c
+@@ -177,13 +177,15 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t
+ unsigned long flags;
+ ssize_t ret;
+
+- if (count > sizeof(struct ipmi_ssif_msg))
++ if (count < sizeof(msg.len) ||
++ count > sizeof(struct ipmi_ssif_msg))
+ return -EINVAL;
+
+ if (copy_from_user(&msg, buf, count))
+ return -EFAULT;
+
+- if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len)
++ if (!msg.len || msg.len > IPMI_SSIF_PAYLOAD_MAX ||
++ count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ssif_bmc->lock, flags);
+diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
+index 4c188e9e477cdf..58e9dcc2a30874 100644
+--- a/drivers/char/ppdev.c
++++ b/drivers/char/ppdev.c
+@@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp)
+ if (!port) {
+ pr_warn("%s: no associated port!\n", name);
+ rc = -ENXIO;
+- goto err;
++ goto err_free_name;
++ }
++
++ index = ida_alloc(&ida_index, GFP_KERNEL);
++ if (index < 0) {
++ pr_warn("%s: failed to get index!\n", name);
++ rc = index;
++ goto err_put_port;
+ }
+
+- index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+ memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+ ppdev_cb.irq_func = pp_irq;
+ ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+ ppdev_cb.private = pp;
+ pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
+- parport_put_port(port);
+
+ if (!pdev) {
+ pr_warn("%s: failed to register device!\n", name);
+ rc = -ENXIO;
+- ida_simple_remove(&ida_index, index);
+- goto err;
++ ida_free(&ida_index, index);
++ goto err_put_port;
+ }
+
+ pp->pdev = pdev;
+ pp->index = index;
+ dev_dbg(&pdev->dev, "registered pardevice\n");
+-err:
++err_put_port:
++ parport_put_port(port);
++err_free_name:
+ kfree(name);
+ return rc;
+ }
+@@ -750,7 +757,7 @@ static int pp_release(struct inode *inode, struct file *file)
+
+ if (pp->pdev) {
+ parport_unregister_device(pp->pdev);
+- ida_simple_remove(&ida_index, pp->index);
++ ida_free(&ida_index, pp->index);
+ pp->pdev = NULL;
+ pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
+ }
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 3cb37760dfec23..7b5d4822fa3ae1 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
+
+ static void __cold _credit_init_bits(size_t bits)
+ {
+- static struct execute_work set_ready;
++ static DECLARE_WORK(set_ready, crng_set_ready);
+ unsigned int new, orig, add;
+ unsigned long flags;
+
+@@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
+
+ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
+ crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
+- if (static_key_initialized)
+- execute_in_process_context(crng_set_ready, &set_ready);
++ if (static_key_initialized && system_unbound_wq)
++ queue_work(system_unbound_wq, &set_ready);
+ atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+@@ -890,8 +890,8 @@ void __init random_init(void)
+
+ /*
+ * If we were initialized by the cpu or bootloader before jump labels
+- * are initialized, then we should enable the static branch here, where
+- * it's guaranteed that jump labels have been initialized.
++ * or workqueues are initialized, then we should enable the static
++ * branch here, where it's guaranteed that these have been initialized.
+ */
+ if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
+ crng_set_ready(NULL);
+diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
+index 639c3f395a5afc..4c0bbba64ee500 100644
+--- a/drivers/char/tpm/eventlog/common.c
++++ b/drivers/char/tpm/eventlog/common.c
+@@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode,
+ if (!err) {
+ seq = file->private_data;
+ seq->private = chip;
++ } else {
++ put_device(&chip->dev);
+ }
+
+ return err;
+diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
+index 30b4c288c1bbc3..c3fbbf4d3db79a 100644
+--- a/drivers/char/tpm/tpm-dev-common.c
++++ b/drivers/char/tpm/tpm-dev-common.c
+@@ -47,6 +47,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
+
+ if (!ret)
+ ret = tpm2_commit_space(chip, space, buf, &len);
++ else
++ tpm2_flush_space(chip);
+
+ out_rc:
+ return ret ? ret : len;
+diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
+index 363afdd4d1d306..d4d1007fe8117e 100644
+--- a/drivers/char/tpm/tpm2-space.c
++++ b/drivers/char/tpm/tpm2-space.c
+@@ -166,6 +166,9 @@ void tpm2_flush_space(struct tpm_chip *chip)
+ struct tpm_space *space = &chip->work_space;
+ int i;
+
++ if (!space)
++ return;
++
+ for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
+ if (space->context_tbl[i] && ~space->context_tbl[i])
+ tpm2_flush_context(chip, space->context_tbl[i]);
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 1b350412d8a6be..f6aa0dfadb93ee 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -919,8 +919,6 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ int rc;
+ u32 int_status;
+
+- INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
+-
+ rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
+ tis_int_handler, IRQF_ONESHOT | flags,
+ dev_name(&chip->dev), chip);
+@@ -1022,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip)
+ interrupt = 0;
+
+ tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+- flush_work(&priv->free_irq_work);
++ if (priv->free_irq_work.func)
++ flush_work(&priv->free_irq_work);
+
+ tpm_tis_clkrun_enable(chip, false);
+
+@@ -1132,6 +1131,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ priv->phy_ops = phy_ops;
+ priv->locality_count = 0;
+ mutex_init(&priv->locality_count_mutex);
++ INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
+index c5c3197ee29f04..4bdad9e3667fa5 100644
+--- a/drivers/char/tpm/tpm_tis_spi_main.c
++++ b/drivers/char/tpm/tpm_tis_spi_main.c
+@@ -37,6 +37,7 @@
+ #include "tpm_tis_spi.h"
+
+ #define MAX_SPI_FRAMESIZE 64
++#define SPI_HDRSIZE 4
+
+ /*
+ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+@@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
+ int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ int irq, const struct tpm_tis_phy_ops *phy_ops)
+ {
+- phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
++ phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 680d1ef2a21794..796ab9a4e48fa1 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -2052,25 +2052,27 @@ static int virtcons_probe(struct virtio_device *vdev)
+ multiport = true;
+ }
+
+- err = init_vqs(portdev);
+- if (err < 0) {
+- dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
+- goto free_chrdev;
+- }
+-
+ spin_lock_init(&portdev->ports_lock);
+ INIT_LIST_HEAD(&portdev->ports);
+ INIT_LIST_HEAD(&portdev->list);
+
+- virtio_device_ready(portdev->vdev);
+-
+ INIT_WORK(&portdev->config_work, &config_work_handler);
+ INIT_WORK(&portdev->control_work, &control_work_handler);
+
+ if (multiport) {
+ spin_lock_init(&portdev->c_ivq_lock);
+ spin_lock_init(&portdev->c_ovq_lock);
++ }
+
++ err = init_vqs(portdev);
++ if (err < 0) {
++ dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
++ goto free_chrdev;
++ }
++
++ virtio_device_ready(portdev->vdev);
++
++ if (multiport) {
+ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+ if (err < 0) {
+ dev_err(&vdev->dev,
+diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
+index 5a5afa14ca8cb8..45771b1a3716a2 100644
+--- a/drivers/char/xillybus/xillyusb.c
++++ b/drivers/char/xillybus/xillyusb.c
+@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2");
+ static const char xillyname[] = "xillyusb";
+
+ static unsigned int fifo_buf_order;
++static struct workqueue_struct *wakeup_wq;
+
+ #define USB_VENDOR_ID_XILINX 0x03fd
+ #define USB_VENDOR_ID_ALTERA 0x09fb
+@@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref)
+ * errors if executed. The mechanism relies on that xdev->error is assigned
+ * a non-zero value by report_io_error() prior to queueing wakeup_all(),
+ * which prevents bulk_in_work() from calling process_bulk_in().
+- *
+- * The fact that wakeup_all() and bulk_in_work() are queued on the same
+- * workqueue makes their concurrent execution very unlikely, however the
+- * kernel's API doesn't seem to ensure this strictly.
+ */
+
+ static void wakeup_all(struct work_struct *work)
+@@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev,
+
+ if (do_once) {
+ kref_get(&xdev->kref); /* xdev is used by work item */
+- queue_work(xdev->workq, &xdev->wakeup_workitem);
++ queue_work(wakeup_wq, &xdev->wakeup_workitem);
+ }
+ }
+
+@@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = {
+
+ static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
+ {
++ struct usb_device *udev = xdev->udev;
++
++ /* Verify that device has the two fundamental bulk in/out endpoints */
++ if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
++ usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
++ return -ENODEV;
++
+ xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
+ bulk_out_work, 1, 2);
+ if (!xdev->msg_ep)
+@@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
+ __le16 *chandesc,
+ int num_channels)
+ {
+- struct xillyusb_channel *chan;
++ struct usb_device *udev = xdev->udev;
++ struct xillyusb_channel *chan, *new_channels;
+ int i;
+
+ chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+- xdev->channels = chan;
++ new_channels = chan;
+
+ for (i = 0; i < num_channels; i++, chan++) {
+ unsigned int in_desc = le16_to_cpu(*chandesc++);
+@@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
+ */
+
+ if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
++ if (usb_pipe_type_check(udev,
++ usb_sndbulkpipe(udev, i + 2))) {
++ dev_err(xdev->dev,
++ "Missing BULK OUT endpoint %d\n",
++ i + 2);
++ kfree(new_channels);
++ return -ENODEV;
++ }
++
+ chan->writable = 1;
+ chan->out_synchronous = !!(out_desc & 0x40);
+ chan->out_seekable = !!(out_desc & 0x20);
+@@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev,
+ }
+ }
+
++ xdev->channels = new_channels;
+ return 0;
+ }
+
+@@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface)
+ * just after responding with the IDT, there is no reason for any
+ * work item to be running now. To be sure that xdev->channels
+ * is updated on anything that might run in parallel, flush the
+- * workqueue, which rarely does anything.
++ * device's workqueue and the wakeup work item. This rarely
++ * does anything.
+ */
+ flush_workqueue(xdev->workq);
++ flush_work(&xdev->wakeup_workitem);
+
+ xdev->num_channels = num_channels;
+
+@@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void)
+ {
+ int rc = 0;
+
++ wakeup_wq = alloc_workqueue(xillyname, 0, 0);
++ if (!wakeup_wq)
++ return -ENOMEM;
++
+ if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
+ fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
+ else
+@@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void)
+
+ rc = usb_register(&xillyusb_driver);
+
++ if (rc)
++ destroy_workqueue(wakeup_wq);
++
+ return rc;
+ }
+
+ static void __exit xillyusb_exit(void)
+ {
+ usb_deregister(&xillyusb_driver);
++
++ destroy_workqueue(wakeup_wq);
+ }
+
+ module_init(xillyusb_init);
+diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
+index 91b5c6f1481964..4e9594714b1428 100644
+--- a/drivers/clk/at91/sama7g5.c
++++ b/drivers/clk/at91/sama7g5.c
+@@ -66,6 +66,7 @@ enum pll_component_id {
+ PLL_COMPID_FRAC,
+ PLL_COMPID_DIV0,
+ PLL_COMPID_DIV1,
++ PLL_COMPID_MAX,
+ };
+
+ /*
+@@ -165,7 +166,7 @@ static struct sama7g5_pll {
+ u8 t;
+ u8 eid;
+ u8 safe_div;
+-} sama7g5_plls[][PLL_ID_MAX] = {
++} sama7g5_plls[][PLL_COMPID_MAX] = {
+ [PLL_ID_CPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "cpupll_fracck",
+@@ -1038,7 +1039,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
+ sama7g5_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+- for (j = 0; j < 3; j++) {
++ for (j = 0; j < PLL_COMPID_MAX; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sama7g5_plls[i][j].n)
+diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
+index e4fbbf3c40fe2b..3cb235df9d379f 100644
+--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
++++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
+@@ -56,6 +56,8 @@ static int clk_dvp_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
++ data->num = NR_CLOCKS;
++
+ data->hws[0] = clk_hw_register_gate_parent_data(&pdev->dev,
+ "hdmi0-108MHz",
+ &clk_dvp_parent, 0,
+@@ -76,7 +78,6 @@ static int clk_dvp_probe(struct platform_device *pdev)
+ goto unregister_clk0;
+ }
+
+- data->num = NR_CLOCKS;
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ data);
+ if (ret)
+diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
+index 84f2af736ee8a6..83ef41d618be37 100644
+--- a/drivers/clk/bcm/clk-bcm53573-ilp.c
++++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
+@@ -112,7 +112,7 @@ static void bcm53573_ilp_init(struct device_node *np)
+ goto err_free_ilp;
+ }
+
+- ilp->regmap = syscon_node_to_regmap(of_get_parent(np));
++ ilp->regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(ilp->regmap)) {
+ err = PTR_ERR(ilp->regmap);
+ goto err_free_ilp;
+diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
+index 829406dc44a202..4d411408e4afef 100644
+--- a/drivers/clk/bcm/clk-raspberrypi.c
++++ b/drivers/clk/bcm/clk-raspberrypi.c
+@@ -371,8 +371,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+- data->hws[clks->id] = hw;
+ data->num = clks->id + 1;
++ data->hws[clks->id] = hw;
+ }
+
+ clks++;
+diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
+index 7cde328495e2b6..7914e60f3d6c56 100644
+--- a/drivers/clk/clk-en7523.c
++++ b/drivers/clk/clk-en7523.c
+@@ -40,6 +40,7 @@ struct en_clk_desc {
+ u8 div_shift;
+ u16 div_val0;
+ u8 div_step;
++ u8 div_offset;
+ };
+
+ struct en_clk_gate {
+@@ -67,6 +68,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
++ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_EMI,
+ .name = "emi",
+@@ -80,6 +82,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
++ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_BUS,
+ .name = "bus",
+@@ -93,6 +96,7 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
++ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_SLIC,
+ .name = "slic",
+@@ -133,13 +137,14 @@ static const struct en_clk_desc en7523_base_clks[] = {
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
++ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_CRYPTO,
+ .name = "crypto",
+
+ .base_reg = REG_CRYPTO_CLKSRC,
+ .base_bits = 1,
+- .base_shift = 8,
++ .base_shift = 0,
+ .base_values = emi_base,
+ .n_base_values = ARRAY_SIZE(emi_base),
+ }
+@@ -184,7 +189,7 @@ static u32 en7523_get_div(void __iomem *base, int i)
+ if (!val && desc->div_val0)
+ return desc->div_val0;
+
+- return (val + 1) * desc->div_step;
++ return (val + desc->div_offset) * desc->div_step;
+ }
+
+ static int en7523_pci_is_enabled(struct clk_hw *hw)
+diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
+index e319cfa51a8a3f..030186def9c69a 100644
+--- a/drivers/clk/clk-npcm7xx.c
++++ b/drivers/clk/clk-npcm7xx.c
+@@ -510,7 +510,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
+ return;
+
+ npcm7xx_init_fail:
+- kfree(npcm7xx_clk_data->hws);
++ kfree(npcm7xx_clk_data);
+ npcm7xx_init_np_err:
+ iounmap(clk_base);
+ npcm7xx_init_error:
+diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
+index 7d7b2cb7531804..b00c38469cfadb 100644
+--- a/drivers/clk/clk-renesas-pcie.c
++++ b/drivers/clk/clk-renesas-pcie.c
+@@ -24,10 +24,12 @@
+ #define RS9_REG_SS_AMP_0V7 0x1
+ #define RS9_REG_SS_AMP_0V8 0x2
+ #define RS9_REG_SS_AMP_0V9 0x3
++#define RS9_REG_SS_AMP_DEFAULT RS9_REG_SS_AMP_0V8
+ #define RS9_REG_SS_AMP_MASK 0x3
+ #define RS9_REG_SS_SSC_100 0
+ #define RS9_REG_SS_SSC_M025 (1 << 3)
+ #define RS9_REG_SS_SSC_M050 (3 << 3)
++#define RS9_REG_SS_SSC_DEFAULT RS9_REG_SS_SSC_100
+ #define RS9_REG_SS_SSC_MASK (3 << 3)
+ #define RS9_REG_SS_SSC_LOCK BIT(5)
+ #define RS9_REG_SR 0x2
+@@ -163,7 +165,7 @@ static u8 rs9_calc_dif(const struct rs9_driver_data *rs9, int idx)
+ enum rs9_model model = rs9->chip_info->model;
+
+ if (model == RENESAS_9FGV0241)
+- return BIT(idx) + 1;
++ return BIT(idx + 1);
+ else if (model == RENESAS_9FGV0441)
+ return BIT(idx);
+
+@@ -211,8 +213,8 @@ static int rs9_get_common_config(struct rs9_driver_data *rs9)
+ int ret;
+
+ /* Set defaults */
+- rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
+- rs9->pll_ssc = RS9_REG_SS_SSC_100;
++ rs9->pll_amplitude = RS9_REG_SS_AMP_DEFAULT;
++ rs9->pll_ssc = RS9_REG_SS_SSC_DEFAULT;
+
+ /* Output clock amplitude */
+ ret = of_property_read_u32(np, "renesas,out-amplitude-microvolt",
+@@ -253,13 +255,13 @@ static void rs9_update_config(struct rs9_driver_data *rs9)
+ int i;
+
+ /* If amplitude is non-default, update it. */
+- if (rs9->pll_amplitude != RS9_REG_SS_AMP_0V7) {
++ if (rs9->pll_amplitude != RS9_REG_SS_AMP_DEFAULT) {
+ regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_AMP_MASK,
+ rs9->pll_amplitude);
+ }
+
+ /* If SSC is non-default, update it. */
+- if (rs9->pll_ssc != RS9_REG_SS_SSC_100) {
++ if (rs9->pll_ssc != RS9_REG_SS_SSC_DEFAULT) {
+ regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_SSC_MASK,
+ rs9->pll_ssc);
+ }
+diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
+index 2c7a830ce3080f..fdec715c9ba9b3 100644
+--- a/drivers/clk/clk-scmi.c
++++ b/drivers/clk/clk-scmi.c
+@@ -213,6 +213,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
+ sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
+ if (!sclk->info) {
+ dev_dbg(dev, "invalid clock info for idx %d\n", idx);
++ devm_kfree(dev, sclk);
+ continue;
+ }
+
+diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
+index ef4ba467e747bc..5886bc54aa0e78 100644
+--- a/drivers/clk/clk-si521xx.c
++++ b/drivers/clk/clk-si521xx.c
+@@ -282,7 +282,7 @@ static int si521xx_probe(struct i2c_client *client)
+ const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev);
+ const struct clk_parent_data clk_parent_data = { .index = 0 };
+ const u8 data[3] = { SI521XX_REG_BC, 1, 1 };
+- unsigned char name[6] = "DIFF0";
++ unsigned char name[16] = "DIFF0";
+ struct clk_init_data init = {};
+ struct si521xx *si;
+ int i, ret;
+@@ -316,7 +316,7 @@ static int si521xx_probe(struct i2c_client *client)
+ /* Register clock */
+ for (i = 0; i < hweight16(chip_info); i++) {
+ memset(&init, 0, sizeof(init));
+- snprintf(name, 6, "DIFF%d", i);
++ snprintf(name, sizeof(name), "DIFF%d", i);
+ init.name = name;
+ init.ops = &si521xx_diff_clk_ops;
+ init.parent_data = &clk_parent_data;
+diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
+index 9599857842c72c..2920fe2e5e8bef 100644
+--- a/drivers/clk/clk-si5341.c
++++ b/drivers/clk/clk-si5341.c
+@@ -895,10 +895,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ r[0] = r_div ? (r_div & 0xff) : 1;
+ r[1] = (r_div >> 8) & 0xff;
+ r[2] = (r_div >> 16) & 0xff;
+- err = regmap_bulk_write(output->data->regmap,
++ return regmap_bulk_write(output->data->regmap,
+ SI5341_OUT_R_REG(output), r, 3);
+-
+- return 0;
+ }
+
+ static int si5341_output_reparent(struct clk_si5341_output *output, u8 index)
+diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
+index 01d3c4c7b0b23f..7cb7d501d7a6eb 100644
+--- a/drivers/clk/clk-sp7021.c
++++ b/drivers/clk/clk-sp7021.c
+@@ -604,14 +604,14 @@ static int sp7021_clk_probe(struct platform_device *pdev)
+ int i;
+
+ clk_base = devm_platform_ioremap_resource(pdev, 0);
+- if (!clk_base)
+- return -ENXIO;
++ if (IS_ERR(clk_base))
++ return PTR_ERR(clk_base);
+ pll_base = devm_platform_ioremap_resource(pdev, 1);
+- if (!pll_base)
+- return -ENXIO;
++ if (IS_ERR(pll_base))
++ return PTR_ERR(pll_base);
+ sys_base = devm_platform_ioremap_resource(pdev, 2);
+- if (!sys_base)
+- return -ENXIO;
++ if (IS_ERR(sys_base))
++ return PTR_ERR(sys_base);
+
+ /* enable default clks */
+ for (i = 0; i < ARRAY_SIZE(sp_clken); i++)
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 473563bc74960f..f8776065ad1f19 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
+ static HLIST_HEAD(clk_orphan_list);
+ static LIST_HEAD(clk_notifier_list);
+
++/* List of registered clks that use runtime PM */
++static HLIST_HEAD(clk_rpm_list);
++static DEFINE_MUTEX(clk_rpm_list_lock);
++
+ static const struct hlist_head *all_lists[] = {
+ &clk_root_list,
+ &clk_orphan_list,
+@@ -59,6 +63,7 @@ struct clk_core {
+ struct clk_hw *hw;
+ struct module *owner;
+ struct device *dev;
++ struct hlist_node rpm_node;
+ struct device_node *of_node;
+ struct clk_core *parent;
+ struct clk_parent_map *parents;
+@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
+ pm_runtime_put_sync(core->dev);
+ }
+
++/**
++ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
++ *
++ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
++ * that disabling unused clks avoids a deadlock where a device is runtime PM
++ * resuming/suspending and the runtime PM callback is trying to grab the
++ * prepare_lock for something like clk_prepare_enable() while
++ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
++ * PM resume/suspend the device as well.
++ *
++ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
++ * success. Otherwise the lock is released on failure.
++ *
++ * Return: 0 on success, negative errno otherwise.
++ */
++static int clk_pm_runtime_get_all(void)
++{
++ int ret;
++ struct clk_core *core, *failed;
++
++ /*
++ * Grab the list lock to prevent any new clks from being registered
++ * or unregistered until clk_pm_runtime_put_all().
++ */
++ mutex_lock(&clk_rpm_list_lock);
++
++ /*
++ * Runtime PM "get" all the devices that are needed for the clks
++ * currently registered. Do this without holding the prepare_lock, to
++ * avoid the deadlock.
++ */
++ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
++ ret = clk_pm_runtime_get(core);
++ if (ret) {
++ failed = core;
++ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
++ dev_name(failed->dev), failed->name);
++ goto err;
++ }
++ }
++
++ return 0;
++
++err:
++ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
++ if (core == failed)
++ break;
++
++ clk_pm_runtime_put(core);
++ }
++ mutex_unlock(&clk_rpm_list_lock);
++
++ return ret;
++}
++
++/**
++ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
++ *
++ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
++ * the 'clk_rpm_list_lock'.
++ */
++static void clk_pm_runtime_put_all(void)
++{
++ struct clk_core *core;
++
++ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
++ clk_pm_runtime_put(core);
++ mutex_unlock(&clk_rpm_list_lock);
++}
++
++static void clk_pm_runtime_init(struct clk_core *core)
++{
++ struct device *dev = core->dev;
++
++ if (dev && pm_runtime_enabled(dev)) {
++ core->rpm_enabled = true;
++
++ mutex_lock(&clk_rpm_list_lock);
++ hlist_add_head(&core->rpm_node, &clk_rpm_list);
++ mutex_unlock(&clk_rpm_list_lock);
++ }
++}
++
+ /*** locking ***/
+ static void clk_prepare_lock(void)
+ {
+@@ -418,6 +506,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+
++ if (!hw)
++ return NULL;
++
+ return hw->core;
+ }
+
+@@ -1359,9 +1450,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+ if (core->flags & CLK_IGNORE_UNUSED)
+ return;
+
+- if (clk_pm_runtime_get(core))
+- return;
+-
+ if (clk_core_is_prepared(core)) {
+ trace_clk_unprepare(core);
+ if (core->ops->unprepare_unused)
+@@ -1370,8 +1458,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+ core->ops->unprepare(core->hw);
+ trace_clk_unprepare_complete(core);
+ }
+-
+- clk_pm_runtime_put(core);
+ }
+
+ static void __init clk_disable_unused_subtree(struct clk_core *core)
+@@ -1387,9 +1473,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
+ if (core->flags & CLK_OPS_PARENT_ENABLE)
+ clk_core_prepare_enable(core->parent);
+
+- if (clk_pm_runtime_get(core))
+- goto unprepare_out;
+-
+ flags = clk_enable_lock();
+
+ if (core->enable_count)
+@@ -1414,8 +1497,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
+
+ unlock_out:
+ clk_enable_unlock(flags);
+- clk_pm_runtime_put(core);
+-unprepare_out:
+ if (core->flags & CLK_OPS_PARENT_ENABLE)
+ clk_core_disable_unprepare(core->parent);
+ }
+@@ -1431,6 +1512,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
+ static int __init clk_disable_unused(void)
+ {
+ struct clk_core *core;
++ int ret;
+
+ if (clk_ignore_unused) {
+ pr_warn("clk: Not disabling unused clocks\n");
+@@ -1439,6 +1521,13 @@ static int __init clk_disable_unused(void)
+
+ pr_info("clk: Disabling unused clocks\n");
+
++ ret = clk_pm_runtime_get_all();
++ if (ret)
++ return ret;
++ /*
++ * Grab the prepare lock to keep the clk topology stable while iterating
++ * over clks.
++ */
+ clk_prepare_lock();
+
+ hlist_for_each_entry(core, &clk_root_list, child_node)
+@@ -1455,6 +1544,8 @@ static int __init clk_disable_unused(void)
+
+ clk_prepare_unlock();
+
++ clk_pm_runtime_put_all();
++
+ return 0;
+ }
+ late_initcall_sync(clk_disable_unused);
+@@ -3188,28 +3279,41 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+ int level)
+ {
+ int phase;
++ struct clk *clk_user;
++ int multi_node = 0;
+
+- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
++ seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ",
+ level * 3 + 1, "",
+- 30 - level * 3, c->name,
++ 35 - level * 3, c->name,
+ c->enable_count, c->prepare_count, c->protect_count,
+ clk_core_get_rate_recalc(c),
+ clk_core_get_accuracy_recalc(c));
+
+ phase = clk_core_get_phase(c);
+ if (phase >= 0)
+- seq_printf(s, "%5d", phase);
++ seq_printf(s, "%-5d", phase);
+ else
+ seq_puts(s, "-----");
+
+- seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
++ seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000));
+
+ if (c->ops->is_enabled)
+- seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
++ seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N');
+ else if (!c->ops->enable)
+- seq_printf(s, " %9c\n", 'Y');
++ seq_printf(s, " %5c ", 'Y');
+ else
+- seq_printf(s, " %9c\n", '?');
++ seq_printf(s, " %5c ", '?');
++
++ hlist_for_each_entry(clk_user, &c->clks, clks_node) {
++ seq_printf(s, "%*s%-*s %-25s\n",
++ level * 3 + 2 + 105 * multi_node, "",
++ 30,
++ clk_user->dev_id ? clk_user->dev_id : "deviceless",
++ clk_user->con_id ? clk_user->con_id : "no_connection_id");
++
++ multi_node = 1;
++ }
++
+ }
+
+ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
+@@ -3217,9 +3321,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
+ {
+ struct clk_core *child;
+
+- clk_pm_runtime_get(c);
+ clk_summary_show_one(s, c, level);
+- clk_pm_runtime_put(c);
+
+ hlist_for_each_entry(child, &c->children, child_node)
+ clk_summary_show_subtree(s, child, level + 1);
+@@ -3229,10 +3331,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
+ {
+ struct clk_core *c;
+ struct hlist_head **lists = s->private;
++ int ret;
+
+- seq_puts(s, " enable prepare protect duty hardware\n");
+- seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
+- seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
++ seq_puts(s, " enable prepare protect duty hardware connection\n");
++ seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
++ seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
++
++ ret = clk_pm_runtime_get_all();
++ if (ret)
++ return ret;
+
+ clk_prepare_lock();
+
+@@ -3241,6 +3348,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
+ clk_summary_show_subtree(s, c, 0);
+
+ clk_prepare_unlock();
++ clk_pm_runtime_put_all();
+
+ return 0;
+ }
+@@ -3288,8 +3396,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
+ struct clk_core *c;
+ bool first_node = true;
+ struct hlist_head **lists = s->private;
++ int ret;
++
++ ret = clk_pm_runtime_get_all();
++ if (ret)
++ return ret;
+
+ seq_putc(s, '{');
++
+ clk_prepare_lock();
+
+ for (; *lists; lists++) {
+@@ -3302,6 +3416,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
+ }
+
+ clk_prepare_unlock();
++ clk_pm_runtime_put_all();
+
+ seq_puts(s, "}\n");
+ return 0;
+@@ -3916,8 +4031,6 @@ static int __clk_core_init(struct clk_core *core)
+ }
+
+ clk_core_reparent_orphans_nolock();
+-
+- kref_init(&core->ref);
+ out:
+ clk_pm_runtime_put(core);
+ unlock:
+@@ -4146,6 +4259,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
+ kfree(core->parents);
+ }
+
++/* Free memory allocated for a struct clk_core */
++static void __clk_release(struct kref *ref)
++{
++ struct clk_core *core = container_of(ref, struct clk_core, ref);
++
++ if (core->rpm_enabled) {
++ mutex_lock(&clk_rpm_list_lock);
++ hlist_del(&core->rpm_node);
++ mutex_unlock(&clk_rpm_list_lock);
++ }
++
++ clk_core_free_parent_map(core);
++ kfree_const(core->name);
++ kfree(core);
++}
++
+ static struct clk *
+ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ {
+@@ -4166,6 +4295,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ goto fail_out;
+ }
+
++ kref_init(&core->ref);
++
+ core->name = kstrdup_const(init->name, GFP_KERNEL);
+ if (!core->name) {
+ ret = -ENOMEM;
+@@ -4178,9 +4309,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ }
+ core->ops = init->ops;
+
+- if (dev && pm_runtime_enabled(dev))
+- core->rpm_enabled = true;
+ core->dev = dev;
++ clk_pm_runtime_init(core);
+ core->of_node = np;
+ if (dev && dev->driver)
+ core->owner = dev->driver->owner;
+@@ -4220,12 +4350,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
+ hw->clk = NULL;
+
+ fail_create_clk:
+- clk_core_free_parent_map(core);
+ fail_parents:
+ fail_ops:
+- kfree_const(core->name);
+ fail_name:
+- kfree(core);
++ kref_put(&core->ref, __clk_release);
+ fail_out:
+ return ERR_PTR(ret);
+ }
+@@ -4305,18 +4433,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
+ }
+ EXPORT_SYMBOL_GPL(of_clk_hw_register);
+
+-/* Free memory allocated for a clock. */
+-static void __clk_release(struct kref *ref)
+-{
+- struct clk_core *core = container_of(ref, struct clk_core, ref);
+-
+- lockdep_assert_held(&prepare_lock);
+-
+- clk_core_free_parent_map(core);
+- kfree_const(core->name);
+- kfree(core);
+-}
+-
+ /*
+ * Empty clk_ops for unregistered clocks. These are used temporarily
+ * after clk_unregister() was called on a clock and until last clock
+@@ -4407,7 +4523,8 @@ void clk_unregister(struct clk *clk)
+ if (ops == &clk_nodrv_ops) {
+ pr_err("%s: unregistered clock: %s\n", __func__,
+ clk->core->name);
+- goto unlock;
++ clk_prepare_unlock();
++ return;
+ }
+ /*
+ * Assign empty clock ops for consumers that might still hold
+@@ -4441,11 +4558,10 @@ void clk_unregister(struct clk *clk)
+ if (clk->core->protect_count)
+ pr_warn("%s: unregistering protected clock: %s\n",
+ __func__, clk->core->name);
++ clk_prepare_unlock();
+
+ kref_put(&clk->core->ref, __clk_release);
+ free_clk(clk);
+-unlock:
+- clk_prepare_unlock();
+ }
+ EXPORT_SYMBOL_GPL(clk_unregister);
+
+@@ -4604,13 +4720,11 @@ void __clk_put(struct clk *clk)
+ if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
+ clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
+
+- owner = clk->core->owner;
+- kref_put(&clk->core->ref, __clk_release);
+-
+ clk_prepare_unlock();
+
++ owner = clk->core->owner;
++ kref_put(&clk->core->ref, __clk_release);
+ module_put(owner);
+-
+ free_clk(clk);
+ }
+
+diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
+index ee37d0be6877db..9cd80522ca2d77 100644
+--- a/drivers/clk/clkdev.c
++++ b/drivers/clk/clkdev.c
+@@ -144,7 +144,7 @@ void clkdev_add_table(struct clk_lookup *cl, size_t num)
+ mutex_unlock(&clocks_mutex);
+ }
+
+-#define MAX_DEV_ID 20
++#define MAX_DEV_ID 24
+ #define MAX_CON_ID 16
+
+ struct clk_lookup_alloc {
+diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
+index e5b2cdfe88ce18..dff7ca35536cc1 100644
+--- a/drivers/clk/davinci/da8xx-cfgchip.c
++++ b/drivers/clk/davinci/da8xx-cfgchip.c
+@@ -508,7 +508,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev,
+ const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" };
+ struct clk *fck_clk;
+ struct da8xx_usb0_clk48 *usb0;
+- struct clk_init_data init;
++ struct clk_init_data init = {};
+ int ret;
+
+ fck_clk = devm_clk_get(dev, "fck");
+@@ -583,7 +583,7 @@ da8xx_cfgchip_register_usb1_clk48(struct device *dev,
+ {
+ const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" };
+ struct da8xx_usb1_clk48 *usb1;
+- struct clk_init_data init;
++ struct clk_init_data init = {};
+ int ret;
+
+ usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL);
+diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
+index b871872d9960db..141b727ff60d64 100644
+--- a/drivers/clk/hisilicon/clk-hi3519.c
++++ b/drivers/clk/hisilicon/clk-hi3519.c
+@@ -130,7 +130,7 @@ static void hi3519_clk_unregister(struct platform_device *pdev)
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_gate(hi3519_gate_clks,
+- ARRAY_SIZE(hi3519_mux_clks),
++ ARRAY_SIZE(hi3519_gate_clks),
+ crg->clk_data);
+ hisi_clk_unregister_mux(hi3519_mux_clks,
+ ARRAY_SIZE(hi3519_mux_clks),
+diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
+index ff4ca0edce06a3..4623befafaec4d 100644
+--- a/drivers/clk/hisilicon/clk-hi3559a.c
++++ b/drivers/clk/hisilicon/clk-hi3559a.c
+@@ -491,7 +491,6 @@ static void hisi_clk_register_pll(struct hi3559av100_pll_clock *clks,
+
+ clk = clk_register(NULL, &p_clk->hw);
+ if (IS_ERR(clk)) {
+- devm_kfree(dev, p_clk);
+ dev_err(dev, "%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
+index 2d7186905abdc0..5d0226530fdb2f 100644
+--- a/drivers/clk/hisilicon/clk-hi3620.c
++++ b/drivers/clk/hisilicon/clk-hi3620.c
+@@ -466,8 +466,10 @@ static void __init hi3620_mmc_clk_init(struct device_node *node)
+ return;
+
+ clk_data->clks = kcalloc(num, sizeof(*clk_data->clks), GFP_KERNEL);
+- if (!clk_data->clks)
++ if (!clk_data->clks) {
++ kfree(clk_data);
+ return;
++ }
+
+ for (i = 0; i < num; i++) {
+ struct hisi_mmc_clock *mmc_clk = &hi3620_mmc_clks[i];
+diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
+index f6b82e0b9703a7..db3bca5f4ec9c4 100644
+--- a/drivers/clk/imx/Kconfig
++++ b/drivers/clk/imx/Kconfig
+@@ -96,6 +96,7 @@ config CLK_IMX8QXP
+ depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ depends on IMX_SCU && HAVE_ARM_SMCCC
+ select MXC_CLK_SCU
++ select MXC_CLK
+ help
+ Build the driver for IMX8QXP SCU based clocks.
+
+diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
+index e208ddc511339e..db7f40b07d1abf 100644
+--- a/drivers/clk/imx/clk-composite-7ulp.c
++++ b/drivers/clk/imx/clk-composite-7ulp.c
+@@ -14,6 +14,7 @@
+ #include "../clk-fractional-divider.h"
+ #include "clk.h"
+
++#define PCG_PR_MASK BIT(31)
+ #define PCG_PCS_SHIFT 24
+ #define PCG_PCS_MASK 0x7
+ #define PCG_CGC_SHIFT 30
+@@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
+ struct clk_hw *hw;
+ u32 val;
+
++ val = readl(reg);
++ if (!(val & PCG_PR_MASK)) {
++ pr_info("PCC PR is 0 for clk:%s, bypass\n", name);
++ return 0;
++ }
++
+ if (mux_present) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
+index 27a08c50ac1d84..ac5e9d60acb833 100644
+--- a/drivers/clk/imx/clk-composite-8m.c
++++ b/drivers/clk/imx/clk-composite-8m.c
+@@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = {
+ .determine_rate = imx8m_clk_composite_mux_determine_rate,
+ };
+
++static int imx8m_clk_composite_gate_enable(struct clk_hw *hw)
++{
++ struct clk_gate *gate = to_clk_gate(hw);
++ unsigned long flags;
++ u32 val;
++
++ spin_lock_irqsave(gate->lock, flags);
++
++ val = readl(gate->reg);
++ val |= BIT(gate->bit_idx);
++ writel(val, gate->reg);
++
++ spin_unlock_irqrestore(gate->lock, flags);
++
++ return 0;
++}
++
++static void imx8m_clk_composite_gate_disable(struct clk_hw *hw)
++{
++ /* composite clk requires the disable hook */
++}
++
++static const struct clk_ops imx8m_clk_composite_gate_ops = {
++ .enable = imx8m_clk_composite_gate_enable,
++ .disable = imx8m_clk_composite_gate_disable,
++ .is_enabled = clk_gate_is_enabled,
++};
++
+ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
+ const char * const *parent_names,
+ int num_parents, void __iomem *reg,
+@@ -217,10 +245,11 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
+ struct clk_mux *mux = NULL;
+ const struct clk_ops *divider_ops;
+ const struct clk_ops *mux_ops;
++ const struct clk_ops *gate_ops;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+- goto fail;
++ return ERR_CAST(hw);
+
+ mux_hw = &mux->hw;
+ mux->reg = reg;
+@@ -230,7 +259,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+- goto fail;
++ goto free_mux;
+
+ div_hw = &div->hw;
+ div->reg = reg;
+@@ -257,28 +286,32 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
+ div->flags = CLK_DIVIDER_ROUND_CLOSEST;
+
+ /* skip registering the gate ops if M4 is enabled */
+- if (!mcore_booted) {
+- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+- if (!gate)
+- goto fail;
+-
+- gate_hw = &gate->hw;
+- gate->reg = reg;
+- gate->bit_idx = PCG_CGC_SHIFT;
+- gate->lock = &imx_ccm_lock;
+- }
++ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
++ if (!gate)
++ goto free_div;
++
++ gate_hw = &gate->hw;
++ gate->reg = reg;
++ gate->bit_idx = PCG_CGC_SHIFT;
++ gate->lock = &imx_ccm_lock;
++ if (!mcore_booted)
++ gate_ops = &clk_gate_ops;
++ else
++ gate_ops = &imx8m_clk_composite_gate_ops;
+
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux_hw, mux_ops, div_hw,
+- divider_ops, gate_hw, &clk_gate_ops, flags);
++ divider_ops, gate_hw, gate_ops, flags);
+ if (IS_ERR(hw))
+- goto fail;
++ goto free_gate;
+
+ return hw;
+
+-fail:
++free_gate:
+ kfree(gate);
++free_div:
+ kfree(div);
++free_mux:
+ kfree(mux);
+ return ERR_CAST(hw);
+ }
+diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
+index 81164bdcd6cc9a..6c6c5a30f3282d 100644
+--- a/drivers/clk/imx/clk-composite-93.c
++++ b/drivers/clk/imx/clk-composite-93.c
+@@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
+
+ static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
+ {
++ /*
++ * Skip disable the root clock gate if mcore enabled.
++ * The root clock may be used by the mcore.
++ */
++ if (mcore_booted)
++ return;
++
+ imx93_clk_composite_gate_endisable(hw, 0);
+ }
+
+@@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux_hw, &clk_mux_ro_ops, div_hw,
+ &clk_divider_ro_ops, NULL, NULL, flags);
+- } else if (!mcore_booted) {
++ } else {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto fail;
+@@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
+ &imx93_clk_composite_divider_ops, gate_hw,
+ &imx93_clk_composite_gate_ops,
+ flags | CLK_SET_RATE_NO_REPARENT);
+- } else {
+- hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+- mux_hw, &imx93_clk_composite_mux_ops, div_hw,
+- &imx93_clk_composite_divider_ops, NULL,
+- &imx93_clk_composite_gate_ops,
+- flags | CLK_SET_RATE_NO_REPARENT);
+ }
+
+ if (IS_ERR(hw))
+diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
+index 44462ab50e513c..1becba2b62d0be 100644
+--- a/drivers/clk/imx/clk-fracn-gppll.c
++++ b/drivers/clk/imx/clk-fracn-gppll.c
+@@ -291,6 +291,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
+ if (val & POWERUP_MASK)
+ return 0;
+
++ if (pll->flags & CLK_FRACN_GPPLL_FRACN)
++ writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR),
++ pll->base + PLL_NUMERATOR);
++
+ val |= CLKMUX_BYPASS;
+ writel_relaxed(val, pll->base + PLL_CTRL);
+
+diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
+index f9394e94f69d73..05c7a82b751f3c 100644
+--- a/drivers/clk/imx/clk-imx6ul.c
++++ b/drivers/clk/imx/clk-imx6ul.c
+@@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
+
+ clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
+
+- clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk);
+- clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk);
++ clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk);
++ clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk);
+
+ imx_register_uart_clocks();
+ }
+diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
+index 2b77d1fc7bb946..1e1296e748357b 100644
+--- a/drivers/clk/imx/clk-imx7d.c
++++ b/drivers/clk/imx/clk-imx7d.c
+@@ -498,9 +498,9 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
+ hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE);
+- hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE);
++ hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
+ hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE);
+- hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE);
++ hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
+ hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE);
+diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
+index 1e82f72b75c674..1c95ae905eec82 100644
+--- a/drivers/clk/imx/clk-imx8-acm.c
++++ b/drivers/clk/imx/clk-imx8-acm.c
+@@ -279,8 +279,10 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev,
+
+ for (i = 0; i < dev_pm->num_domains; i++) {
+ dev_pm->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+- if (IS_ERR(dev_pm->pd_dev[i]))
+- return PTR_ERR(dev_pm->pd_dev[i]);
++ if (IS_ERR(dev_pm->pd_dev[i])) {
++ ret = PTR_ERR(dev_pm->pd_dev[i]);
++ goto detach_pm;
++ }
+
+ dev_pm->pd_dev_link[i] = device_link_add(dev,
+ dev_pm->pd_dev[i],
+@@ -371,7 +373,7 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
+ sels[i].shift, sels[i].width,
+ 0, NULL, NULL);
+ if (IS_ERR(hws[sels[i].clkid])) {
+- pm_runtime_disable(&pdev->dev);
++ ret = PTR_ERR(hws[sels[i].clkid]);
+ goto err_clk_register;
+ }
+ }
+@@ -381,12 +383,16 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
+ if (ret < 0) {
+ dev_err(dev, "failed to register hws for ACM\n");
+- pm_runtime_disable(&pdev->dev);
++ goto err_clk_register;
+ }
+
+-err_clk_register:
++ pm_runtime_put_sync(&pdev->dev);
++ return 0;
+
++err_clk_register:
+ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ clk_imx_acm_detach_pm_domains(&pdev->dev, &priv->dev_pm);
+
+ return ret;
+ }
+diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
+index e4300df88f1acc..ab2a028b3027d3 100644
+--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
++++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
+@@ -18,7 +18,12 @@
+
+ #define CLKEN0 0x000
+ #define CLKEN1 0x004
+-#define SAI_MCLK_SEL(n) (0x300 + 4 * (n)) /* n in 0..5 */
++#define SAI1_MCLK_SEL 0x300
++#define SAI2_MCLK_SEL 0x304
++#define SAI3_MCLK_SEL 0x308
++#define SAI5_MCLK_SEL 0x30C
++#define SAI6_MCLK_SEL 0x310
++#define SAI7_MCLK_SEL 0x314
+ #define PDM_SEL 0x318
+ #define SAI_PLL_GNRL_CTL 0x400
+
+@@ -95,13 +100,13 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1_SEL, {}, \
+ clk_imx8mp_audiomix_sai##n##_mclk1_parents, \
+ ARRAY_SIZE(clk_imx8mp_audiomix_sai##n##_mclk1_parents), \
+- SAI_MCLK_SEL(n), 1, 0 \
++ SAI##n##_MCLK_SEL, 1, 0 \
+ }, { \
+ "sai"__stringify(n)"_mclk2_sel", \
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2_SEL, {}, \
+ clk_imx8mp_audiomix_sai_mclk2_parents, \
+ ARRAY_SIZE(clk_imx8mp_audiomix_sai_mclk2_parents), \
+- SAI_MCLK_SEL(n), 4, 1 \
++ SAI##n##_MCLK_SEL, 4, 1 \
+ }, { \
+ "sai"__stringify(n)"_ipg_cg", \
+ IMX8MP_CLK_AUDIOMIX_SAI##n##_IPG, \
+@@ -141,6 +146,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
+ PDM_SEL, 2, 0 \
+ }
+
++#define CLK_GATE_PARENT(gname, cname, pname) \
++ { \
++ gname"_cg", \
++ IMX8MP_CLK_AUDIOMIX_##cname, \
++ { .fw_name = pname, .name = pname }, NULL, 1, \
++ CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \
++ 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \
++ }
++
+ struct clk_imx8mp_audiomix_sel {
+ const char *name;
+ int clkid;
+@@ -158,14 +172,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
+ CLK_GATE("earc", EARC_IPG),
+ CLK_GATE("ocrama", OCRAMA_IPG),
+ CLK_GATE("aud2htx", AUD2HTX_IPG),
+- CLK_GATE("earc_phy", EARC_PHY),
++ CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
+ CLK_GATE("sdma2", SDMA2_ROOT),
+ CLK_GATE("sdma3", SDMA3_ROOT),
+ CLK_GATE("spba2", SPBA2_ROOT),
+ CLK_GATE("dsp", DSP_ROOT),
+ CLK_GATE("dspdbg", DSPDBG_ROOT),
+ CLK_GATE("edma", EDMA_ROOT),
+- CLK_GATE("audpll", AUDPLL_ROOT),
++ CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
+ CLK_GATE("mu2", MU2_ROOT),
+ CLK_GATE("mu3", MU3_ROOT),
+ CLK_PDM,
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 670aa2bab3017e..e561ff7b135fb5 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -551,8 +551,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+
+ hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
+
+- hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
+- hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
++ hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
++ hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
+ hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
+ hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
+ hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 4bd65879fcd347..f70ed231b92d63 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -288,8 +288,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ void __iomem *base;
+ int err;
+
+- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+- IMX8MQ_CLK_END), GFP_KERNEL);
++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MQ_CLK_END), GFP_KERNEL);
+ if (WARN_ON(!clk_hw_data))
+ return -ENOMEM;
+
+@@ -306,10 +305,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ hws[IMX8MQ_CLK_EXT4] = imx_get_clk_hw_by_name(np, "clk_ext4");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
+- base = of_iomap(np, 0);
++ base = devm_of_iomap(dev, np, 0, NULL);
+ of_node_put(np);
+- if (WARN_ON(!base))
+- return -ENOMEM;
++ if (WARN_ON(IS_ERR(base))) {
++ err = PTR_ERR(base);
++ goto unregister_hws;
++ }
+
+ hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+@@ -395,8 +396,10 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+
+ np = dev->of_node;
+ base = devm_platform_ioremap_resource(pdev, 0);
+- if (WARN_ON(IS_ERR(base)))
+- return PTR_ERR(base);
++ if (WARN_ON(IS_ERR(base))) {
++ err = PTR_ERR(base);
++ goto unregister_hws;
++ }
+
+ /* CORE */
+ hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000);
+diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
+index cadcbb318f5cf5..6d458995f3887d 100644
+--- a/drivers/clk/imx/clk-imx8qxp.c
++++ b/drivers/clk/imx/clk-imx8qxp.c
+@@ -66,6 +66,22 @@ static const char * const lcd_pxl_sels[] = {
+ "lcd_pxl_bypass_div_clk",
+ };
+
++static const char *const lvds0_sels[] = {
++ "clk_dummy",
++ "clk_dummy",
++ "clk_dummy",
++ "clk_dummy",
++ "mipi0_lvds_bypass_clk",
++};
++
++static const char *const lvds1_sels[] = {
++ "clk_dummy",
++ "clk_dummy",
++ "clk_dummy",
++ "clk_dummy",
++ "mipi1_lvds_bypass_clk",
++};
++
+ static const char * const mipi_sels[] = {
+ "clk_dummy",
+ "clk_dummy",
+@@ -147,10 +163,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+ imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("adc1_clk", IMX_SC_R_ADC_1, IMX_SC_PM_CLK_PER);
+ imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
++ imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
+ imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
+- imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
+ imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
+- imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
++ imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
+
+ /* Audio SS */
+ imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
+@@ -183,26 +199,26 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+ imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
+
+ /* Display controller SS */
+- imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
+- imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
+ imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL);
+ imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL);
+ imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS);
++ imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
++ imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
+ imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS);
+
+- imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
+- imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
+ imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL);
+ imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL);
+ imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS);
++ imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
++ imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
+ imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS);
+
+ /* MIPI-LVDS SS */
+ imx_clk_scu("mipi0_bypass_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu("mipi0_pixel_clk", IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PER);
+- imx_clk_scu("mipi0_lvds_pixel_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
+ imx_clk_scu("mipi0_lvds_bypass_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_BYPASS);
+- imx_clk_scu("mipi0_lvds_phy_clk", IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
++ imx_clk_scu2("mipi0_lvds_pixel_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC2);
++ imx_clk_scu2("mipi0_lvds_phy_clk", lvds0_sels, ARRAY_SIZE(lvds0_sels), IMX_SC_R_LVDS_0, IMX_SC_PM_CLK_MISC3);
+ imx_clk_scu2("mipi0_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_MST_BUS);
+ imx_clk_scu2("mipi0_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_SLV_BUS);
+ imx_clk_scu2("mipi0_dsi_phy_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_0, IMX_SC_PM_CLK_PHY);
+@@ -212,9 +228,9 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
+
+ imx_clk_scu("mipi1_bypass_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_BYPASS);
+ imx_clk_scu("mipi1_pixel_clk", IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_PER);
+- imx_clk_scu("mipi1_lvds_pixel_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
+ imx_clk_scu("mipi1_lvds_bypass_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_BYPASS);
+- imx_clk_scu("mipi1_lvds_phy_clk", IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
++ imx_clk_scu2("mipi1_lvds_pixel_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC2);
++ imx_clk_scu2("mipi1_lvds_phy_clk", lvds1_sels, ARRAY_SIZE(lvds1_sels), IMX_SC_R_LVDS_1, IMX_SC_PM_CLK_MISC3);
+
+ imx_clk_scu2("mipi1_dsi_tx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_MST_BUS);
+ imx_clk_scu2("mipi1_dsi_rx_esc_clk", mipi_sels, ARRAY_SIZE(mipi_sels), IMX_SC_R_MIPI_1, IMX_SC_PM_CLK_SLV_BUS);
+diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
+index ee5c72369334ff..6bbdd4705d71fd 100644
+--- a/drivers/clk/keystone/pll.c
++++ b/drivers/clk/keystone/pll.c
+@@ -281,12 +281,13 @@ static void __init of_pll_div_clk_init(struct device_node *node)
+
+ clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
+ mask, 0, NULL);
+- if (clk) {
+- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+- } else {
++ if (IS_ERR(clk)) {
+ pr_err("%s: error registering divider %s\n", __func__, clk_name);
+ iounmap(reg);
++ return;
+ }
++
++ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
+
+@@ -328,10 +329,12 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
+ clk = clk_register_mux(NULL, clk_name, (const char **)&parents,
+ ARRAY_SIZE(parents) , 0, reg, shift, mask,
+ 0, NULL);
+- if (clk)
+- of_clk_add_provider(node, of_clk_src_simple_get, clk);
+- else
++ if (IS_ERR(clk)) {
+ pr_err("%s: error registering mux %s\n", __func__, clk_name);
++ return;
++ }
++
++ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+ CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
+
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index c81f3e33ce568f..12d9560eb4ba22 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -667,6 +667,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+@@ -747,6 +749,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return;
+
+ for (i = 0; i < CLK_INFRA_NR; i++)
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -774,6 +778,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return -ENOMEM;
+ } else {
+ for (i = 0; i < CLK_INFRA_NR; i++) {
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+@@ -890,6 +896,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
+index 1f4c8d0c041abe..9c7f7407d7980b 100644
+--- a/drivers/clk/mediatek/clk-mt6765.c
++++ b/drivers/clk/mediatek/clk-mt6765.c
+@@ -737,6 +737,8 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+@@ -769,6 +771,8 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ clk_data);
+@@ -807,6 +811,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
+ ARRAY_SIZE(ifr_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
+index 3ee2f5a2319a0e..ffedb1fe3c672d 100644
+--- a/drivers/clk/mediatek/clk-mt6779.c
++++ b/drivers/clk/mediatek/clk-mt6779.c
+@@ -1217,6 +1217,8 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+@@ -1237,6 +1239,8 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
+index 2ebd25f0ce71df..f12d4e9ff0bbaf 100644
+--- a/drivers/clk/mediatek/clk-mt6797.c
++++ b/drivers/clk/mediatek/clk-mt6797.c
+@@ -390,6 +390,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
+ clk_data);
+@@ -545,6 +547,8 @@ static void mtk_infrasys_init_early(struct device_node *node)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return;
+
+ for (i = 0; i < CLK_INFRA_NR; i++)
+ infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+@@ -570,6 +574,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+
+ if (!infra_clk_data) {
+ infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
++ if (!infra_clk_data)
++ return -ENOMEM;
+ } else {
+ for (i = 0; i < CLK_INFRA_NR; i++) {
+ if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
+diff --git a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
+index 9cffd278e9a43e..1b8f859b6b6ccd 100644
+--- a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
++++ b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
+@@ -127,7 +127,6 @@ static void clk_mt7622_apmixed_remove(struct platform_device *pdev)
+ of_clk_del_provider(node);
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+- mtk_free_clk_data(clk_data);
+ }
+
+ static const struct of_device_id of_match_clk_mt7622_apmixed[] = {
+diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
+index fe714debdc9ece..1bfedc988cfe89 100644
+--- a/drivers/clk/mediatek/clk-mt7629-eth.c
++++ b/drivers/clk/mediatek/clk-mt7629-eth.c
+@@ -77,6 +77,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, eth_clks,
+ CLK_ETH_NR_CLK, clk_data);
+@@ -100,6 +102,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++],
+ CLK_SGMII_NR_CLK, clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
+index 2882107d0f2403..b8a1f01bc974d2 100644
+--- a/drivers/clk/mediatek/clk-mt7629.c
++++ b/drivers/clk/mediatek/clk-mt7629.c
+@@ -555,6 +555,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+@@ -579,6 +581,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
+ struct clk_hw_onecell_data *clk_data;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
+@@ -602,6 +606,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
++ if (!clk_data)
++ return -ENOMEM;
+
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
+diff --git a/drivers/clk/mediatek/clk-mt7981-topckgen.c b/drivers/clk/mediatek/clk-mt7981-topckgen.c
+index 682f4ca9e89ada..493aa11d3a175f 100644
+--- a/drivers/clk/mediatek/clk-mt7981-topckgen.c
++++ b/drivers/clk/mediatek/clk-mt7981-topckgen.c
+@@ -357,8 +357,9 @@ static const struct mtk_mux top_muxes[] = {
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_325M_SEL, "sgm_325m_sel",
+ sgm_325m_parents, 0x050, 0x054, 0x058, 8, 1, 15,
+ 0x1C0, 21),
+- MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel", sgm_reg_parents,
+- 0x050, 0x054, 0x058, 16, 1, 23, 0x1C0, 22),
++ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel", sgm_reg_parents,
++ 0x050, 0x054, 0x058, 16, 1, 23, 0x1C0, 22,
++ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EIP97B_SEL, "eip97b_sel", eip97b_parents,
+ 0x050, 0x054, 0x058, 24, 3, 31, 0x1C0, 23),
+ /* CLK_CFG_6 */
+diff --git a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
+index d1239b4b3db74b..41bb2d2e2ea740 100644
+--- a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
++++ b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
+@@ -59,7 +59,7 @@ static int clk_mt8135_apmixed_probe(struct platform_device *pdev)
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+- return ret;
++ goto free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+@@ -69,6 +69,8 @@ static int clk_mt8135_apmixed_probe(struct platform_device *pdev)
+
+ unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
++free_clk_data:
++ mtk_free_clk_data(clk_data);
+
+ return ret;
+ }
+diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+index ba504e19d42031..62d876e150e117 100644
+--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
++++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+@@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
+ static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
++ .need_runtime_pm = true,
+ };
+
+ static const struct of_device_id of_match_clk_mt8183_mfg[] = {
+diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
+index 6e23461a04559c..934d5a15acfc58 100644
+--- a/drivers/clk/mediatek/clk-mt8183.c
++++ b/drivers/clk/mediatek/clk-mt8183.c
+@@ -790,7 +790,7 @@ static const struct mtk_gate infra_clks[] = {
+ /* infra_sspm_26m_self is main clock in co-processor, should not be closed in Linux. */
+ GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", "f_f26m_ck", 3, CLK_IS_CRITICAL),
+ /* infra_sspm_32k_self is main clock in co-processor, should not be closed in Linux. */
+- GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "f_f26m_ck", 4, CLK_IS_CRITICAL),
++ GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "clk32k", 4, CLK_IS_CRITICAL),
+ GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5),
+ GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6),
+ GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_hclk_sel", 7),
+diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
+index 01a2ef8f594ef5..3f62ec75073367 100644
+--- a/drivers/clk/mediatek/clk-mt8365-mm.c
++++ b/drivers/clk/mediatek/clk-mt8365-mm.c
+@@ -53,7 +53,7 @@ static const struct mtk_gate mm_clks[] = {
+ GATE_MM0(CLK_MM_MM_DSI0, "mm_dsi0", "mm_sel", 17),
+ GATE_MM0(CLK_MM_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 18),
+ GATE_MM0(CLK_MM_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 19),
+- GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "vpll_dpix", 20),
++ GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "dpi0_sel", 20),
+ GATE_MM0(CLK_MM_MM_FAKE, "mm_fake", "mm_sel", 21),
+ GATE_MM0(CLK_MM_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 22),
+ GATE_MM0(CLK_MM_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 23),
+diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
+index 2e55368dc4d820..ba1d1c495bc2bf 100644
+--- a/drivers/clk/mediatek/clk-mtk.c
++++ b/drivers/clk/mediatek/clk-mtk.c
+@@ -13,6 +13,7 @@
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
+ #include <linux/slab.h>
+
+ #include "clk-mtk.h"
+@@ -494,6 +495,18 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
+ }
+
++
++ if (mcd->need_runtime_pm) {
++ devm_pm_runtime_enable(&pdev->dev);
++ /*
++ * Do a pm_runtime_resume_and_get() to workaround a possible
++ * deadlock between clk_register() and the genpd framework.
++ */
++ r = pm_runtime_resume_and_get(&pdev->dev);
++ if (r)
++ return r;
++ }
++
+ /* Calculate how many clk_hw_onecell_data entries to allocate */
+ num_clks = mcd->num_clks + mcd->num_composite_clks;
+ num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
+@@ -574,6 +587,9 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ goto unregister_clks;
+ }
+
++ if (mcd->need_runtime_pm)
++ pm_runtime_put(&pdev->dev);
++
+ return r;
+
+ unregister_clks:
+@@ -604,6 +620,9 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ free_base:
+ if (mcd->shared_io && base)
+ iounmap(base);
++
++ if (mcd->need_runtime_pm)
++ pm_runtime_put(&pdev->dev);
+ return r;
+ }
+
+diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
+index 22096501a60a7b..c17fe1c2d732da 100644
+--- a/drivers/clk/mediatek/clk-mtk.h
++++ b/drivers/clk/mediatek/clk-mtk.h
+@@ -237,6 +237,8 @@ struct mtk_clk_desc {
+
+ int (*clk_notifier_func)(struct device *dev, struct clk *clk);
+ unsigned int mfg_clk_idx;
++
++ bool need_runtime_pm;
+ };
+
+ int mtk_clk_pdev_probe(struct platform_device *pdev);
+diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
+index a4eca5fd539c83..513ab6b1b32292 100644
+--- a/drivers/clk/mediatek/clk-pll.c
++++ b/drivers/clk/mediatek/clk-pll.c
+@@ -321,10 +321,8 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
+
+ ret = clk_hw_register(NULL, &pll->hw);
+
+- if (ret) {
+- kfree(pll);
++ if (ret)
+ return ERR_PTR(ret);
+- }
+
+ return &pll->hw;
+ }
+@@ -340,6 +338,8 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
+ return ERR_PTR(-ENOMEM);
+
+ hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
++ if (IS_ERR(hw))
++ kfree(pll);
+
+ return hw;
+ }
+diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
+index 3a2b3f90be25d5..094ec8a26d6683 100644
+--- a/drivers/clk/mediatek/clk-pllfh.c
++++ b/drivers/clk/mediatek/clk-pllfh.c
+@@ -68,7 +68,7 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs,
+
+ node = of_find_compatible_node(NULL, NULL, compatible_node);
+ if (!node) {
+- pr_err("cannot find \"%s\"\n", compatible_node);
++ pr_warn("cannot find \"%s\"\n", compatible_node);
+ return;
+ }
+
+diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
+index c12f81dfa67453..5f60f2bcca592a 100644
+--- a/drivers/clk/meson/axg.c
++++ b/drivers/clk/meson/axg.c
+@@ -2142,7 +2142,9 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
+ &axg_vclk_input,
+ &axg_vclk2_input,
+ &axg_vclk_div,
++ &axg_vclk_div1,
+ &axg_vclk2_div,
++ &axg_vclk2_div1,
+ &axg_vclk_div2_en,
+ &axg_vclk_div4_en,
+ &axg_vclk_div6_en,
+diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
+index fb0df64cf053c4..c5a7ba1deaa3a1 100644
+--- a/drivers/clk/mmp/clk-of-pxa168.c
++++ b/drivers/clk/mmp/clk-of-pxa168.c
+@@ -308,18 +308,21 @@ static void __init pxa168_clk_init(struct device_node *np)
+ pxa_unit->mpmu_base = of_iomap(np, 0);
+ if (!pxa_unit->mpmu_base) {
+ pr_err("failed to map mpmu registers\n");
++ kfree(pxa_unit);
+ return;
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+ if (!pxa_unit->apmu_base) {
+ pr_err("failed to map apmu registers\n");
++ kfree(pxa_unit);
+ return;
+ }
+
+ pxa_unit->apbc_base = of_iomap(np, 2);
+ if (!pxa_unit->apbc_base) {
+ pr_err("failed to map apbc registers\n");
++ kfree(pxa_unit);
+ return;
+ }
+
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 865db5202e4cfe..a79b837583894f 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -131,6 +131,7 @@ config IPQ_APSS_6018
+ tristate "IPQ APSS Clock Controller"
+ select IPQ_APSS_PLL
+ depends on QCOM_APCS_IPC || COMPILE_TEST
++ depends on QCOM_SMEM
+ help
+ Support for APSS clock controller on IPQ platforms. The
+ APSS clock controller manages the Mux and enable block that feeds the
+diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
+index e170331858cc14..41279e5437a620 100644
+--- a/drivers/clk/qcom/apss-ipq-pll.c
++++ b/drivers/clk/qcom/apss-ipq-pll.c
+@@ -68,13 +68,13 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_stromer_ops,
++ .ops = &clk_alpha_pll_stromer_plus_ops,
+ },
+ },
+ };
+
+ static const struct alpha_pll_config ipq5332_pll_config = {
+- .l = 0x3e,
++ .l = 0x2d,
+ .config_ctl_val = 0x4001075b,
+ .config_ctl_hi_val = 0x304,
+ .main_output_mask = BIT(0),
+diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
+index 49f046ea857cbe..c1551de51d4013 100644
+--- a/drivers/clk/qcom/camcc-sc7280.c
++++ b/drivers/clk/qcom/camcc-sc7280.c
+@@ -2260,6 +2260,7 @@ static struct gdsc cam_cc_bps_gdsc = {
+ .name = "cam_cc_bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ };
+
+@@ -2269,6 +2270,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
+ .name = "cam_cc_ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE,
+ };
+
+@@ -2278,6 +2280,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
+ .name = "cam_cc_ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE,
+ };
+
+@@ -2287,6 +2290,7 @@ static struct gdsc cam_cc_ife_2_gdsc = {
+ .name = "cam_cc_ife_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE,
+ };
+
+@@ -2296,6 +2300,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
+ .name = "cam_cc_ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .parent = &cam_cc_titan_top_gdsc.pd,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ };
+
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index e4ef645f65d1fd..8b3e5f84e89a77 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -40,7 +40,7 @@
+
+ #define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
+ # define PLL_POST_DIV_SHIFT 8
+-# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
++# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
+ # define PLL_ALPHA_EN BIT(24)
+ # define PLL_ALPHA_MODE BIT(25)
+ # define PLL_VCO_SHIFT 20
+@@ -212,7 +212,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+- [PLL_OFF_CONFIG_CTL_U] = 0xff,
+ [PLL_OFF_TEST_CTL] = 0x30,
+ [PLL_OFF_TEST_CTL_U] = 0x34,
+ [PLL_OFF_STATUS] = 0x28,
+@@ -1479,8 +1478,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ }
+
+ return regmap_update_bits(regmap, PLL_USER_CTL(pll),
+- PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
+- val << PLL_POST_DIV_SHIFT);
++ PLL_POST_DIV_MASK(pll) << pll->post_div_shift,
++ val << pll->post_div_shift);
+ }
+
+ const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
+@@ -1639,7 +1638,7 @@ static int __alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
+ if (ret < 0)
+ return ret;
+
+- regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
++ regmap_update_bits(pll->clkr.regmap, PLL_L_VAL(pll), LUCID_EVO_PLL_L_VAL_MASK, l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ /* Latch the PLL input */
+@@ -1758,6 +1757,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
+
++/**
++ * clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll
++ *
++ * @pll: clk alpha pll
++ * @regmap: register map
++ * @config: configuration to apply for pll
++ */
++void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
++ const struct alpha_pll_config *config)
++{
++ /*
++ * If the bootloader left the PLL enabled it's likely that there are
++ * RCGs that will lock up if we disable the PLL below.
++ */
++ if (trion_pll_is_enabled(pll, regmap)) {
++ pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n");
++ return;
++ }
++
++ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
++ regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
++ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
++ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
++ config->config_ctl_val);
++ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
++ config->config_ctl_hi_val);
++ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll),
++ config->config_ctl_hi1_val);
++ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
++ config->user_ctl_val);
++ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
++ config->user_ctl_hi_val);
++ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll),
++ config->user_ctl_hi1_val);
++ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
++ config->test_ctl_val);
++ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
++ config->test_ctl_hi_val);
++ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll),
++ config->test_ctl_hi1_val);
++
++ /* Disable PLL output */
++ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
++
++ /* Set operation mode to OFF */
++ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
++
++ /* Place the PLL in STANDBY mode */
++ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
++}
++EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure);
++
+ static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
+ {
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+@@ -2445,6 +2496,8 @@ static int clk_alpha_pll_stromer_set_rate(struct clk_hw *hw, unsigned long rate,
+ rate = alpha_pll_round_rate(rate, prate, &l, &a, ALPHA_REG_BITWIDTH);
+
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
++
++ a <<= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ a >> ALPHA_BITWIDTH);
+@@ -2479,3 +2532,69 @@ const struct clk_ops clk_alpha_pll_stromer_ops = {
+ .set_rate = clk_alpha_pll_stromer_set_rate,
+ };
+ EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_ops);
++
++static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw,
++ unsigned long rate,
++ unsigned long prate)
++{
++ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
++ u32 l, alpha_width = pll_alpha_width(pll);
++ int ret, pll_mode;
++ u64 a;
++
++ rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
++
++ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &pll_mode);
++ if (ret)
++ return ret;
++
++ regmap_write(pll->clkr.regmap, PLL_MODE(pll), 0);
++
++ /* Delay of 2 output clock ticks required until output is disabled */
++ udelay(1);
++
++ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
++
++ if (alpha_width > ALPHA_BITWIDTH)
++ a <<= alpha_width - ALPHA_BITWIDTH;
++
++ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
++ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
++ a >> ALPHA_BITWIDTH);
++
++ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
++ PLL_ALPHA_EN, PLL_ALPHA_EN);
++
++ regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL);
++
++ /* Wait five micro seconds or more */
++ udelay(5);
++ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N,
++ PLL_RESET_N);
++
++ /* The lock time should be less than 50 micro seconds worst case */
++ usleep_range(50, 60);
++
++ ret = wait_for_pll_enable_lock(pll);
++ if (ret) {
++ pr_err("Wait for PLL enable lock failed [%s] %d\n",
++ clk_hw_get_name(hw), ret);
++ return ret;
++ }
++
++ if (pll_mode & PLL_OUTCTRL)
++ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL,
++ PLL_OUTCTRL);
++
++ return 0;
++}
++
++const struct clk_ops clk_alpha_pll_stromer_plus_ops = {
++ .prepare = clk_alpha_pll_enable,
++ .unprepare = clk_alpha_pll_disable,
++ .is_enabled = clk_alpha_pll_is_enabled,
++ .recalc_rate = clk_alpha_pll_recalc_rate,
++ .determine_rate = clk_alpha_pll_stromer_determine_rate,
++ .set_rate = clk_alpha_pll_stromer_plus_set_rate,
++};
++EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_plus_ops);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
+index e4bd863027ab63..3fd0ef41c72c89 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.h
++++ b/drivers/clk/qcom/clk-alpha-pll.h
+@@ -152,6 +152,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_ops;
+ extern const struct clk_ops clk_alpha_pll_huayra_ops;
+ extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops;
+ extern const struct clk_ops clk_alpha_pll_stromer_ops;
++extern const struct clk_ops clk_alpha_pll_stromer_plus_ops;
+
+ extern const struct clk_ops clk_alpha_pll_fabia_ops;
+ extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
+@@ -197,6 +198,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+
+ void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
++void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
++ const struct alpha_pll_config *config);
+ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+ void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
+index e6d84c8c7989c8..84c497f361bc6b 100644
+--- a/drivers/clk/qcom/clk-rcg.h
++++ b/drivers/clk/qcom/clk-rcg.h
+@@ -176,6 +176,7 @@ extern const struct clk_ops clk_byte2_ops;
+ extern const struct clk_ops clk_pixel_ops;
+ extern const struct clk_ops clk_gfx3d_ops;
+ extern const struct clk_ops clk_rcg2_shared_ops;
++extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
+ extern const struct clk_ops clk_dp_ops;
+
+ struct clk_rcg_dfs_data {
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index e22baf3a7112aa..461f54fe5e4f1f 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -158,17 +158,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
+ static unsigned long
+ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+ {
+- if (hid_div) {
+- rate *= 2;
+- rate /= hid_div + 1;
+- }
++ if (hid_div)
++ rate = mult_frac(rate, 2, hid_div + 1);
+
+- if (mode) {
+- u64 tmp = rate;
+- tmp *= m;
+- do_div(tmp, n);
+- rate = tmp;
+- }
++ if (mode)
++ rate = mult_frac(rate, m, n);
+
+ return rate;
+ }
+@@ -1144,7 +1138,39 @@ clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+ return clk_rcg2_recalc_rate(hw, parent_rate);
+ }
+
++static int clk_rcg2_shared_init(struct clk_hw *hw)
++{
++ /*
++ * This does a few things:
++ *
++ * 1. Sets rcg->parked_cfg to reflect the value at probe so that the
++ * proper parent is reported from clk_rcg2_shared_get_parent().
++ *
++ * 2. Clears the force enable bit of the RCG because we rely on child
++ * clks (branches) to turn the RCG on/off with a hardware feedback
++ * mechanism and only set the force enable bit in the RCG when we
++ * want to make sure the clk stays on for parent switches or
++ * parking.
++ *
++ * 3. Parks shared RCGs on the safe source at registration because we
++ * can't be certain that the parent clk will stay on during boot,
++ * especially if the parent is shared. If this RCG is enabled at
++ * boot, and the parent is turned off, the RCG will get stuck on. A
++ * GDSC can wedge if is turned on and the RCG is stuck on because
++ * the GDSC's controller will hang waiting for the clk status to
++ * toggle on when it never does.
++ *
++ * The safest option here is to "park" the RCG at init so that the clk
++ * can never get stuck on or off. This ensures the GDSC can't get
++ * wedged.
++ */
++ clk_rcg2_shared_disable(hw);
++
++ return 0;
++}
++
+ const struct clk_ops clk_rcg2_shared_ops = {
++ .init = clk_rcg2_shared_init,
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_shared_get_parent,
+@@ -1156,6 +1182,36 @@ const struct clk_ops clk_rcg2_shared_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+
++static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
++{
++ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
++
++ /*
++ * Read the config register so that the parent is properly mapped at
++ * registration time.
++ */
++ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
++
++ return 0;
++}
++
++/*
++ * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left
++ * unchanged at registration time.
++ */
++const struct clk_ops clk_rcg2_shared_no_init_park_ops = {
++ .init = clk_rcg2_shared_no_init_park,
++ .enable = clk_rcg2_shared_enable,
++ .disable = clk_rcg2_shared_disable,
++ .get_parent = clk_rcg2_shared_get_parent,
++ .set_parent = clk_rcg2_shared_set_parent,
++ .recalc_rate = clk_rcg2_shared_recalc_rate,
++ .determine_rate = clk_rcg2_determine_rate,
++ .set_rate = clk_rcg2_shared_set_rate,
++ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
++};
++EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops);
++
+ /* Common APIs to be used for DFS based RCGR */
+ static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
+ struct freq_tbl *f)
+diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
+index 4c5b552b47b6a1..a556c9e77d192d 100644
+--- a/drivers/clk/qcom/clk-rpmh.c
++++ b/drivers/clk/qcom/clk-rpmh.c
+@@ -263,6 +263,8 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
+ cmd_state = 0;
+ }
+
++ cmd_state = min(cmd_state, BCM_TCS_CMD_VOTE_MASK);
++
+ if (c->last_sent_aggr_state != cmd_state) {
+ cmd.addr = c->res_addr;
+ cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
+diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
+index 0191fc0dd7dac1..789903a1b3f2b3 100644
+--- a/drivers/clk/qcom/clk-smd-rpm.c
++++ b/drivers/clk/qcom/clk-smd-rpm.c
+@@ -758,6 +758,7 @@ static struct clk_smd_rpm *msm8976_clks[] = {
+
+ static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
+ .clks = msm8976_clks,
++ .num_clks = ARRAY_SIZE(msm8976_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+ };
+diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
+index 735adfefc37983..e792e0b130d333 100644
+--- a/drivers/clk/qcom/dispcc-sdm845.c
++++ b/drivers/clk/qcom/dispcc-sdm845.c
+@@ -759,6 +759,8 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
+
+ static struct gdsc mdss_gdsc = {
+ .gdscr = 0x3000,
++ .en_few_wait_val = 0x6,
++ .en_rest_wait_val = 0x5,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index ea6f54ed846ece..441f042f5ea459 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -221,26 +221,17 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ },
+ };
+
+-static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
+- F(162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(810000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+- { }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ .cmd_rcgr = 0x10f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+- .freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
+index e17bb8b543b51b..317a7e2b50bfbc 100644
+--- a/drivers/clk/qcom/dispcc-sm8250.c
++++ b/drivers/clk/qcom/dispcc-sm8250.c
+@@ -851,6 +851,7 @@ static struct clk_branch disp_cc_mdss_dp_link1_intf_clk = {
+ &disp_cc_mdss_dp_link1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -886,6 +887,7 @@ static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+ &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1011,6 +1013,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1359,8 +1362,13 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
+ disp_cc_sm8250_clocks[DISP_CC_MDSS_EDP_GTC_CLK_SRC] = NULL;
+ }
+
+- clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+- clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
++ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) {
++ clk_lucid_5lpe_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
++ clk_lucid_5lpe_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
++ } else {
++ clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
++ clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
++ }
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, 0x8000, 0x10, 0x10);
+diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
+index 2c4aecd75186b0..239cc726c7e296 100644
+--- a/drivers/clk/qcom/dispcc-sm8450.c
++++ b/drivers/clk/qcom/dispcc-sm8450.c
+@@ -309,26 +309,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ },
+ };
+
+-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+- F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+- { }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+@@ -382,13 +373,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+@@ -442,13 +432,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+@@ -502,13 +491,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
+index aefa19f3c2c514..95b4c0548f50d6 100644
+--- a/drivers/clk/qcom/dispcc-sm8550.c
++++ b/drivers/clk/qcom/dispcc-sm8550.c
+@@ -81,6 +81,10 @@ static const struct alpha_pll_config disp_cc_pll0_config = {
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
++ .test_ctl_val = 0x00000000,
++ .test_ctl_hi_val = 0x00000003,
++ .test_ctl_hi1_val = 0x00009000,
++ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+ };
+@@ -108,6 +112,10 @@ static const struct alpha_pll_config disp_cc_pll1_config = {
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
++ .test_ctl_val = 0x00000000,
++ .test_ctl_hi_val = 0x00000003,
++ .test_ctl_hi1_val = 0x00009000,
++ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+ };
+@@ -188,7 +196,7 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+- { P_DP1_PHY_PLL_VCO_DIV_CLK, 2 },
++ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+@@ -205,7 +213,7 @@ static const struct clk_parent_data disp_cc_parent_data_4[] = {
+
+ static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+- { P_DSI0_PHY_PLL_OUT_BYTECLK, 4 },
++ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+ };
+
+@@ -337,26 +345,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ },
+ };
+
+-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+- F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+- F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+- { }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x8170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+@@ -401,7 +400,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_dp_ops,
++ .ops = &clk_rcg2_ops,
+ },
+ };
+
+@@ -410,13 +409,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+@@ -470,13 +468,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+@@ -530,13 +527,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_byte2_ops,
+ },
+ };
+
+@@ -566,7 +562,7 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -581,7 +577,7 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1615,7 +1611,7 @@ static struct gdsc mdss_gdsc = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = HW_CTRL | RETAIN_FF_ENABLE,
++ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc mdss_int2_gdsc = {
+@@ -1624,7 +1620,7 @@ static struct gdsc mdss_int2_gdsc = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = HW_CTRL | RETAIN_FF_ENABLE,
++ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+ };
+
+ static struct clk_regmap *disp_cc_sm8550_clocks[] = {
+diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
+index 19dc2b71cacf00..3136ba1c2a59cc 100644
+--- a/drivers/clk/qcom/gcc-ipq5018.c
++++ b/drivers/clk/qcom/gcc-ipq5018.c
+@@ -128,7 +128,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -143,7 +142,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -158,7 +156,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -859,6 +856,7 @@ static struct clk_rcg2 lpass_sway_clk_src = {
+
+ static const struct freq_tbl ftbl_pcie0_aux_clk_src[] = {
+ F(2000000, P_XO, 12, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 pcie0_aux_clk_src = {
+@@ -1101,6 +1099,7 @@ static const struct freq_tbl ftbl_qpic_io_macro_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 qpic_io_macro_clk_src = {
+@@ -1196,6 +1195,7 @@ static struct clk_rcg2 ubi0_axi_clk_src = {
+ static const struct freq_tbl ftbl_ubi0_core_clk_src[] = {
+ F(850000000, P_UBI32_PLL, 1, 0, 0),
+ F(1000000000, P_UBI32_PLL, 1, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 ubi0_core_clk_src = {
+@@ -1756,7 +1756,7 @@ static struct clk_branch gcc_gmac0_sys_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .halt_bit = 31,
+ .clkr = {
+- .enable_reg = 0x683190,
++ .enable_reg = 0x68190,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gmac0_sys_clk",
+@@ -2182,7 +2182,7 @@ static struct clk_branch gcc_pcie1_axi_s_clk = {
+ };
+
+ static struct clk_branch gcc_pcie1_pipe_clk = {
+- .halt_reg = 8,
++ .halt_reg = 0x76018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .halt_bit = 31,
+ .clkr = {
+@@ -3634,7 +3634,7 @@ static const struct qcom_reset_map gcc_ipq5018_resets[] = {
+ [GCC_SYSTEM_NOC_BCR] = { 0x26000, 0 },
+ [GCC_TCSR_BCR] = { 0x28000, 0 },
+ [GCC_TLMM_BCR] = { 0x34000, 0 },
+- [GCC_UBI0_AXI_ARES] = { 0x680},
++ [GCC_UBI0_AXI_ARES] = { 0x68010, 0 },
+ [GCC_UBI0_AHB_ARES] = { 0x68010, 1 },
+ [GCC_UBI0_NC_AXI_ARES] = { 0x68010, 2 },
+ [GCC_UBI0_DBG_ARES] = { 0x68010, 3 },
+diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
+index b02026f8549b2f..6a4877d8882946 100644
+--- a/drivers/clk/qcom/gcc-ipq5332.c
++++ b/drivers/clk/qcom/gcc-ipq5332.c
+@@ -71,7 +71,6 @@ static struct clk_fixed_factor gpll0_div2 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -85,7 +84,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -114,7 +112,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -154,7 +151,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -3392,6 +3388,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
+ [GCC_QDSS_DAP_DIV_CLK_SRC] = &gcc_qdss_dap_div_clk_src.clkr,
+ [GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr,
+ [GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr,
++ [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+ [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index 6120fbbc5de053..2e4189e770d3ff 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -72,7 +72,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -86,7 +85,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -161,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -192,7 +189,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -243,7 +239,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -274,7 +269,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ &nss_crypto_pll_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -1560,6 +1554,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = {
+
+ static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
++ { }
+ };
+
+ static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
+@@ -1740,6 +1735,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(216000000, P_GPLL6, 5, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
++ { }
+ };
+
+ static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index 63ac2ced76bb95..7bc679871f324f 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -75,7 +75,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -121,7 +120,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -154,7 +152,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -188,7 +185,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -201,7 +197,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -266,7 +261,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ &nss_crypto_pll_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -650,6 +644,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
+
+ static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
++ { }
+ };
+
+ static const struct clk_parent_data gcc_xo_gpll0_sleep_clk[] = {
+@@ -801,6 +796,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
++ { }
+ };
+
+ static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
+diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
+index 8f430367299e66..cdbbf2cc9c5d19 100644
+--- a/drivers/clk/qcom/gcc-ipq9574.c
++++ b/drivers/clk/qcom/gcc-ipq9574.c
+@@ -65,7 +65,7 @@ static const struct clk_parent_data gcc_sleep_clk_data[] = {
+
+ static struct clk_alpha_pll gpll0_main = {
+ .offset = 0x20000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(0),
+@@ -87,14 +87,13 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ &gpll0_main.clkr.hw
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x20000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll0",
+@@ -102,14 +101,13 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ &gpll0_main.clkr.hw
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll4_main = {
+ .offset = 0x22000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(2),
+@@ -124,7 +122,7 @@ static struct clk_alpha_pll gpll4_main = {
+
+ static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x22000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll4",
+@@ -132,14 +130,13 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll2_main = {
+ .offset = 0x21000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(1),
+@@ -154,7 +151,7 @@ static struct clk_alpha_pll gpll2_main = {
+
+ static struct clk_alpha_pll_postdiv gpll2 = {
+ .offset = 0x21000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll2",
+@@ -162,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+ };
+@@ -2086,6 +2082,7 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
+ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
+ F(150000000, P_GPLL4, 8, 0, 0),
+ F(300000000, P_GPLL4, 4, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+@@ -2143,9 +2140,10 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+
+ static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16010,
++ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+- .enable_reg = 0x16010,
+- .enable_mask = BIT(0),
++ .enable_reg = 0xb004,
++ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_crypto_axi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+@@ -2159,9 +2157,10 @@ static struct clk_branch gcc_crypto_axi_clk = {
+
+ static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16014,
++ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+- .enable_reg = 0x16014,
+- .enable_mask = BIT(0),
++ .enable_reg = 0xb004,
++ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_crypto_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
+index 14dcc3f036683c..e7b03a17514a5d 100644
+--- a/drivers/clk/qcom/gcc-msm8996.c
++++ b/drivers/clk/qcom/gcc-msm8996.c
+@@ -244,71 +244,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+ { .hw = &gpll0_early_div.hw }
+ };
+
+-static const struct freq_tbl ftbl_system_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+- F(100000000, P_GPLL0, 6, 0, 0),
+- F(150000000, P_GPLL0, 4, 0, 0),
+- F(200000000, P_GPLL0, 3, 0, 0),
+- F(240000000, P_GPLL0, 2.5, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 system_noc_clk_src = {
+- .cmd_rcgr = 0x0401c,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+- .freq_tbl = ftbl_system_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "system_noc_clk_src",
+- .parent_data = gcc_xo_gpll0_gpll0_early_div,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+-static const struct freq_tbl ftbl_config_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(37500000, P_GPLL0, 16, 0, 0),
+- F(75000000, P_GPLL0, 8, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 config_noc_clk_src = {
+- .cmd_rcgr = 0x0500c,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_map,
+- .freq_tbl = ftbl_config_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "config_noc_clk_src",
+- .parent_data = gcc_xo_gpll0,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+-static const struct freq_tbl ftbl_periph_noc_clk_src[] = {
+- F(19200000, P_XO, 1, 0, 0),
+- F(37500000, P_GPLL0, 16, 0, 0),
+- F(50000000, P_GPLL0, 12, 0, 0),
+- F(75000000, P_GPLL0, 8, 0, 0),
+- F(100000000, P_GPLL0, 6, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 periph_noc_clk_src = {
+- .cmd_rcgr = 0x06014,
+- .hid_width = 5,
+- .parent_map = gcc_xo_gpll0_map,
+- .freq_tbl = ftbl_periph_noc_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "periph_noc_clk_src",
+- .parent_data = gcc_xo_gpll0,
+- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+ static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(120000000, P_GPLL0, 5, 0, 0),
+@@ -1297,11 +1232,7 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
++ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1464,11 +1395,6 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1498,11 +1424,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1549,11 +1470,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1583,11 +1499,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1617,11 +1528,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1635,11 +1541,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -1977,11 +1878,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2318,11 +2214,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2353,11 +2244,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2370,11 +2256,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2422,11 +2303,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2520,11 +2396,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2537,11 +2408,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2554,11 +2420,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2606,11 +2467,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2623,11 +2479,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2640,11 +2491,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2692,11 +2538,6 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2709,11 +2550,6 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_mstr_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2726,11 +2562,6 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2778,11 +2609,6 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -2829,11 +2655,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3060,11 +2881,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_snoc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3077,11 +2894,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_cnoc_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3094,11 +2907,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_aggre0_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3111,11 +2920,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_aggre0_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3162,10 +2967,6 @@ static struct clk_branch gcc_dcc_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3178,10 +2979,6 @@ static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3194,11 +2991,6 @@ static struct clk_branch gcc_qspi_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &periph_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3347,10 +3139,6 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &config_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3363,10 +3151,6 @@ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3379,10 +3163,6 @@ static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3395,10 +3175,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+- .parent_hws = (const struct clk_hw*[]){
+- &system_noc_clk_src.clkr.hw,
+- },
+- .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -3495,9 +3271,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+diff --git a/drivers/clk/qcom/gcc-sa8775p.c b/drivers/clk/qcom/gcc-sa8775p.c
+index 8171d23c96e64d..a54438205698cd 100644
+--- a/drivers/clk/qcom/gcc-sa8775p.c
++++ b/drivers/clk/qcom/gcc-sa8775p.c
+@@ -4305,74 +4305,114 @@ static struct clk_branch gcc_video_axi1_clk = {
+
+ static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0xa9004,
++ .collapse_ctrl = 0x4b104,
++ .collapse_mask = BIT(0),
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x77004,
++ .collapse_ctrl = 0x4b104,
++ .collapse_mask = BIT(1),
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct gdsc ufs_card_gdsc = {
+ .gdscr = 0x81004,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ufs_card_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x83004,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct gdsc usb20_prim_gdsc = {
+ .gdscr = 0x1c004,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x1b004,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct gdsc usb30_sec_gdsc = {
+ .gdscr = 0x2f004,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct gdsc emac0_gdsc = {
+ .gdscr = 0xb6004,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "emac0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct gdsc emac1_gdsc = {
+ .gdscr = 0xb4004,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "emac1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+ };
+
+ static struct clk_regmap *gcc_sa8775p_clocks[] = {
+diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
+index 2b661df5de2660..bc81026292fc9b 100644
+--- a/drivers/clk/qcom/gcc-sc7280.c
++++ b/drivers/clk/qcom/gcc-sc7280.c
+@@ -3467,6 +3467,9 @@ static int gcc_sc7280_probe(struct platform_device *pdev)
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13));
+
++ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
++ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
++
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c
+index ae21473815596d..ec0c45881c67a7 100644
+--- a/drivers/clk/qcom/gcc-sc8180x.c
++++ b/drivers/clk/qcom/gcc-sc8180x.c
+@@ -142,6 +142,23 @@ static struct clk_alpha_pll gpll7 = {
+ },
+ };
+
++static struct clk_alpha_pll gpll9 = {
++ .offset = 0x1c000,
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
++ .clkr = {
++ .enable_reg = 0x52000,
++ .enable_mask = BIT(9),
++ .hw.init = &(const struct clk_init_data) {
++ .name = "gpll9",
++ .parent_data = &(const struct clk_parent_data) {
++ .fw_name = "bi_tcxo",
++ },
++ .num_parents = 1,
++ .ops = &clk_alpha_pll_fixed_trion_ops,
++ },
++ },
++};
++
+ static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+@@ -241,7 +258,7 @@ static const struct parent_map gcc_parent_map_7[] = {
+ static const struct clk_parent_data gcc_parents_7[] = {
+ { .fw_name = "bi_tcxo", },
+ { .hw = &gpll0.clkr.hw },
+- { .name = "gppl9" },
++ { .hw = &gpll9.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ };
+@@ -260,28 +277,6 @@ static const struct clk_parent_data gcc_parents_8[] = {
+ { .hw = &gpll0_out_even.clkr.hw },
+ };
+
+-static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+- F(19200000, P_BI_TCXO, 1, 0, 0),
+- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+- { }
+-};
+-
+-static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+- .cmd_rcgr = 0x48014,
+- .mnd_width = 0,
+- .hid_width = 5,
+- .parent_map = gcc_parent_map_0,
+- .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+- .clkr.hw.init = &(struct clk_init_data){
+- .name = "gcc_cpuss_ahb_clk_src",
+- .parent_data = gcc_parents_0,
+- .num_parents = ARRAY_SIZE(gcc_parents_0),
+- .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
+- },
+-};
+-
+ static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+@@ -916,7 +911,7 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
++ F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+ };
+
+@@ -939,9 +934,8 @@ static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+- F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+- F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
++ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+ };
+
+@@ -1599,25 +1593,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ },
+ };
+
+-/* For CPUSS functionality the AHB clock needs to be left enabled */
+-static struct clk_branch gcc_cpuss_ahb_clk = {
+- .halt_reg = 0x48000,
+- .halt_check = BRANCH_HALT_VOTED,
+- .clkr = {
+- .enable_reg = 0x52004,
+- .enable_mask = BIT(21),
+- .hw.init = &(struct clk_init_data){
+- .name = "gcc_cpuss_ahb_clk",
+- .parent_hws = (const struct clk_hw *[]){
+- &gcc_cpuss_ahb_clk_src.clkr.hw
+- },
+- .num_parents = 1,
+- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+- .ops = &clk_branch2_ops,
+- },
+- },
+-};
+-
+ static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+@@ -3150,25 +3125,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
+ },
+ };
+
+-/* For CPUSS functionality the SYS NOC clock needs to be left enabled */
+-static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+- .halt_reg = 0x4819c,
+- .halt_check = BRANCH_HALT_VOTED,
+- .clkr = {
+- .enable_reg = 0x52004,
+- .enable_mask = BIT(0),
+- .hw.init = &(struct clk_init_data){
+- .name = "gcc_sys_noc_cpuss_ahb_clk",
+- .parent_hws = (const struct clk_hw *[]){
+- &gcc_cpuss_ahb_clk_src.clkr.hw
+- },
+- .num_parents = 1,
+- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+- .ops = &clk_branch2_ops,
+- },
+- },
+-};
+-
+ static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+@@ -4258,8 +4214,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+- [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+- [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+@@ -4396,7 +4350,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+- [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+@@ -4483,6 +4436,7 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = {
+ [GPLL1] = &gpll1.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL7] = &gpll7.clkr,
++ [GPLL9] = &gpll9.clkr,
+ };
+
+ static const struct qcom_reset_map gcc_sc8180x_resets[] = {
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index 725cd52d2398ed..ea4c3bf4fb9bf7 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -4037,3 +4037,4 @@ module_exit(gcc_sdm845_exit);
+ MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("platform:gcc-sdm845");
++MODULE_SOFTDEP("pre: rpmhpd");
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index cf4a7b6e0b23ad..0559a33faf00e6 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = {
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+- .parent_hws = (const struct clk_hw*[]){
+- &gpll0.clkr.hw,
++ .parent_data = &(const struct clk_parent_data){
++ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+@@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = {
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+- &gpll0.clkr.hw,
++ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+@@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = {
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+- .parent_hws = (const struct clk_hw*[]){
+- &gpll0.clkr.hw,
++ .parent_data = &(const struct clk_parent_data){
++ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
+index 41ab210875fb24..05d115c52dfebb 100644
+--- a/drivers/clk/qcom/gcc-sm8150.c
++++ b/drivers/clk/qcom/gcc-sm8150.c
+@@ -774,7 +774,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+- .flags = CLK_SET_RATE_PARENT,
++ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_floor_ops,
+ },
+ };
+diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
+index c6c5261264f118..272da807c6945d 100644
+--- a/drivers/clk/qcom/gcc-sm8250.c
++++ b/drivers/clk/qcom/gcc-sm8250.c
+@@ -3226,7 +3226,7 @@ static struct gdsc pcie_0_gdsc = {
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ };
+
+ static struct gdsc pcie_1_gdsc = {
+@@ -3234,7 +3234,7 @@ static struct gdsc pcie_1_gdsc = {
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ };
+
+ static struct gdsc pcie_2_gdsc = {
+@@ -3242,7 +3242,7 @@ static struct gdsc pcie_2_gdsc = {
+ .pd = {
+ .name = "pcie_2_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ };
+
+ static struct gdsc ufs_card_gdsc = {
+diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
+index 56354298255160..4c55df89ddca7d 100644
+--- a/drivers/clk/qcom/gcc-sm8450.c
++++ b/drivers/clk/qcom/gcc-sm8450.c
+@@ -2974,7 +2974,7 @@ static struct gdsc pcie_0_gdsc = {
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ };
+
+ static struct gdsc pcie_1_gdsc = {
+@@ -2982,7 +2982,7 @@ static struct gdsc pcie_1_gdsc = {
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ };
+
+ static struct gdsc ufs_phy_gdsc = {
+diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
+index 586126c4dd907c..eb3765c57b6502 100644
+--- a/drivers/clk/qcom/gcc-sm8550.c
++++ b/drivers/clk/qcom/gcc-sm8550.c
+@@ -401,7 +401,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -416,7 +416,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -431,7 +431,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -451,7 +451,7 @@ static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -471,7 +471,7 @@ static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -486,7 +486,7 @@ static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -501,7 +501,7 @@ static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -521,7 +521,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1025,7 +1025,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1048,7 +1048,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1071,7 +1071,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1093,7 +1093,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1114,7 +1114,7 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1136,7 +1136,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1159,7 +1159,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+ };
+
+@@ -1174,7 +1174,7 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -1189,7 +1189,7 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -2998,38 +2998,46 @@ static struct clk_branch gcc_video_axi1_clk = {
+
+ static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
++ .collapse_ctrl = 0x52020,
++ .collapse_mask = BIT(0),
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = POLL_CFG_GDSCR,
++ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
++ .collapse_ctrl = 0x52020,
++ .collapse_mask = BIT(3),
+ .pd = {
+ .name = "pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = POLL_CFG_GDSCR,
++ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
++ .collapse_ctrl = 0x52020,
++ .collapse_mask = BIT(1),
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = POLL_CFG_GDSCR,
++ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc pcie_1_phy_gdsc = {
+ .gdscr = 0x8e000,
++ .collapse_ctrl = 0x52020,
++ .collapse_mask = BIT(4),
+ .pd = {
+ .name = "pcie_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = POLL_CFG_GDSCR,
++ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc ufs_phy_gdsc = {
+@@ -3038,7 +3046,7 @@ static struct gdsc ufs_phy_gdsc = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = POLL_CFG_GDSCR,
++ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc ufs_mem_phy_gdsc = {
+@@ -3047,7 +3055,7 @@ static struct gdsc ufs_mem_phy_gdsc = {
+ .name = "ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = POLL_CFG_GDSCR,
++ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc usb30_prim_gdsc = {
+@@ -3056,7 +3064,7 @@ static struct gdsc usb30_prim_gdsc = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = POLL_CFG_GDSCR,
++ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc usb3_phy_gdsc = {
+@@ -3065,7 +3073,7 @@ static struct gdsc usb3_phy_gdsc = {
+ .name = "usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = POLL_CFG_GDSCR,
++ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+ static struct clk_regmap *gcc_sm8550_clocks[] = {
+diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
+index 26ecfa63be1939..0d9a8379efaa83 100644
+--- a/drivers/clk/qcom/gpucc-sa8775p.c
++++ b/drivers/clk/qcom/gpucc-sa8775p.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+@@ -161,7 +161,7 @@ static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -181,7 +181,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -200,7 +200,7 @@ static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -280,7 +280,7 @@ static struct clk_branch gpu_cc_ahb_clk = {
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -294,8 +294,7 @@ static struct clk_branch gpu_cc_cb_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cb_clk",
+- .flags = CLK_IS_CRITICAL,
+- .ops = &clk_branch2_ops,
++ .ops = &clk_branch2_aon_ops,
+ },
+ },
+ };
+@@ -312,7 +311,7 @@ static struct clk_branch gpu_cc_crc_ahb_clk = {
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -330,7 +329,7 @@ static struct clk_branch gpu_cc_cx_ff_clk = {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -348,7 +347,7 @@ static struct clk_branch gpu_cc_cx_gmu_clk = {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+@@ -362,7 +361,6 @@ static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+- .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -380,7 +378,7 @@ static struct clk_branch gpu_cc_cxo_aon_clk = {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -398,7 +396,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
+ &gpu_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -416,7 +414,7 @@ static struct clk_branch gpu_cc_demet_clk = {
+ &gpu_cc_demet_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+@@ -430,7 +428,6 @@ static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+- .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -448,7 +445,7 @@ static struct clk_branch gpu_cc_hub_aon_clk = {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+@@ -466,7 +463,7 @@ static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
++ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+@@ -480,7 +477,6 @@ static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_memnoc_gfx_clk",
+- .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -494,7 +490,6 @@ static struct clk_branch gpu_cc_sleep_clk = {
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+- .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+@@ -528,16 +523,22 @@ static struct clk_regmap *gpu_cc_sa8775p_clocks[] = {
+
+ static struct gdsc cx_gdsc = {
+ .gdscr = 0x9108,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .gds_hw_ctrl = 0x953c,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON,
++ .flags = VOTABLE | RETAIN_FF_ENABLE,
+ };
+
+ static struct gdsc gx_gdsc = {
+ .gdscr = 0x905c,
++ .en_rest_wait_val = 0x2,
++ .en_few_wait_val = 0x2,
++ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
+index 8422fd0474932d..c89a5b59ddb7c2 100644
+--- a/drivers/clk/qcom/gpucc-sm8150.c
++++ b/drivers/clk/qcom/gpucc-sm8150.c
+@@ -37,8 +37,8 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_val = 0x00000000,
+- .test_ctl_hi_val = 0x00000002,
+- .test_ctl_hi1_val = 0x00000000,
++ .test_ctl_hi_val = 0x00000000,
++ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
+index 8dc54dff983f3a..33c4fb8891caa3 100644
+--- a/drivers/clk/qcom/gpucc-sm8350.c
++++ b/drivers/clk/qcom/gpucc-sm8350.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
++ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/clk.h>
+@@ -147,7 +148,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+@@ -169,7 +170,7 @@ static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_shared_ops,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
+index 97358c98c6c98e..d8c1f2b41eeb39 100644
+--- a/drivers/clk/qcom/kpss-xcc.c
++++ b/drivers/clk/qcom/kpss-xcc.c
+@@ -63,9 +63,7 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev)
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+- of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, hw);
+-
+- return 0;
++ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, hw);
+ }
+
+ static struct platform_driver kpss_xcc_driver = {
+diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
+index 02fc21208dd14b..c89700ab93f9c6 100644
+--- a/drivers/clk/qcom/mmcc-apq8084.c
++++ b/drivers/clk/qcom/mmcc-apq8084.c
+@@ -348,6 +348,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
+ F(333430000, P_MMPLL1, 3.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(466800000, P_MMPLL1, 2.5, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 mmss_axi_clk_src = {
+@@ -372,6 +373,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(228570000, P_MMPLL0, 3.5, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 ocmemnoc_clk_src = {
+diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
+index 1f3bd302fe6ed4..6df22a67f02d3e 100644
+--- a/drivers/clk/qcom/mmcc-msm8974.c
++++ b/drivers/clk/qcom/mmcc-msm8974.c
+@@ -290,6 +290,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
+ F(291750000, P_MMPLL1, 4, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ F(466800000, P_MMPLL1, 2.5, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 mmss_axi_clk_src = {
+@@ -314,6 +315,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(291750000, P_MMPLL1, 4, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
++ { }
+ };
+
+ static struct clk_rcg2 ocmemnoc_clk_src = {
+diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
+index a023c4374be96a..275fb3b71ede4c 100644
+--- a/drivers/clk/qcom/mmcc-msm8998.c
++++ b/drivers/clk/qcom/mmcc-msm8998.c
+@@ -2439,6 +2439,7 @@ static struct clk_branch fd_ahb_clk = {
+
+ static struct clk_branch mnoc_ahb_clk = {
+ .halt_reg = 0x5024,
++ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+@@ -2454,6 +2455,7 @@ static struct clk_branch mnoc_ahb_clk = {
+
+ static struct clk_branch bimc_smmu_ahb_clk = {
+ .halt_reg = 0xe004,
++ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+@@ -2471,6 +2473,7 @@ static struct clk_branch bimc_smmu_ahb_clk = {
+
+ static struct clk_branch bimc_smmu_axi_clk = {
+ .halt_reg = 0xe008,
++ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe008,
+ .hwcg_bit = 1,
+ .clkr = {
+@@ -2532,6 +2535,8 @@ static struct clk_branch vmem_ahb_clk = {
+
+ static struct gdsc video_top_gdsc = {
+ .gdscr = 0x1024,
++ .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
++ .cxc_count = 3,
+ .pd = {
+ .name = "video_top",
+ },
+@@ -2540,20 +2545,26 @@ static struct gdsc video_top_gdsc = {
+
+ static struct gdsc video_subcore0_gdsc = {
+ .gdscr = 0x1040,
++ .cxcs = (unsigned int []){ 0x1048 },
++ .cxc_count = 1,
+ .pd = {
+ .name = "video_subcore0",
+ },
+ .parent = &video_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = HW_CTRL,
+ };
+
+ static struct gdsc video_subcore1_gdsc = {
+ .gdscr = 0x1044,
++ .cxcs = (unsigned int []){ 0x104c },
++ .cxc_count = 1,
+ .pd = {
+ .name = "video_subcore1",
+ },
+ .parent = &video_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
++ .flags = HW_CTRL,
+ };
+
+ static struct gdsc mdss_gdsc = {
+@@ -2607,11 +2618,13 @@ static struct gdsc camss_cpp_gdsc = {
+ static struct gdsc bimc_smmu_gdsc = {
+ .gdscr = 0xe020,
+ .gds_hw_ctrl = 0xe024,
++ .cxcs = (unsigned int []){ 0xe008 },
++ .cxc_count = 1,
+ .pd = {
+ .name = "bimc_smmu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+- .flags = HW_CTRL | ALWAYS_ON,
++ .flags = VOTABLE,
+ };
+
+ static struct clk_regmap *mmcc_msm8998_clocks[] = {
+diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
+index e45e32804d2c75..d96c96a9089f40 100644
+--- a/drivers/clk/qcom/reset.c
++++ b/drivers/clk/qcom/reset.c
+@@ -22,8 +22,8 @@ static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
+ return 0;
+ }
+
+-static int
+-qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
++static int qcom_reset_set_assert(struct reset_controller_dev *rcdev,
++ unsigned long id, bool assert)
+ {
+ struct qcom_reset_controller *rst;
+ const struct qcom_reset_map *map;
+@@ -33,21 +33,22 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+ map = &rst->reset_map[id];
+ mask = map->bitmask ? map->bitmask : BIT(map->bit);
+
+- return regmap_update_bits(rst->regmap, map->reg, mask, mask);
++ regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0);
++
++ /* Read back the register to ensure write completion, ignore the value */
++ regmap_read(rst->regmap, map->reg, &mask);
++
++ return 0;
+ }
+
+-static int
+-qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
++static int qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+ {
+- struct qcom_reset_controller *rst;
+- const struct qcom_reset_map *map;
+- u32 mask;
+-
+- rst = to_qcom_reset_controller(rcdev);
+- map = &rst->reset_map[id];
+- mask = map->bitmask ? map->bitmask : BIT(map->bit);
++ return qcom_reset_set_assert(rcdev, id, true);
++}
+
+- return regmap_update_bits(rst->regmap, map->reg, mask, 0);
++static int qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
++{
++ return qcom_reset_set_assert(rcdev, id, false);
+ }
+
+ const struct reset_control_ops qcom_reset_ops = {
+diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
+index 1afdbe4a249d62..52a9a453a14326 100644
+--- a/drivers/clk/qcom/videocc-sm8150.c
++++ b/drivers/clk/qcom/videocc-sm8150.c
+@@ -33,6 +33,7 @@ static struct alpha_pll_config video_pll0_config = {
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
++ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+@@ -214,6 +215,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {
+
+ static const struct qcom_reset_map video_cc_sm8150_resets[] = {
+ [VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
++ [VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
++ [VIDEO_CC_MVS0_BCR] = { 0x870 },
++ [VIDEO_CC_MVS1_BCR] = { 0x8b0 },
++ [VIDEO_CC_MVSC_BCR] = { 0x810 },
+ };
+
+ static const struct qcom_cc_desc video_cc_sm8150_desc = {
+diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
+index 1e7991439527a8..50a443bf79ecd3 100644
+--- a/drivers/clk/ralink/clk-mtmips.c
++++ b/drivers/clk/ralink/clk-mtmips.c
+@@ -821,6 +821,10 @@ static const struct mtmips_clk_data mt76x8_clk_data = {
+ };
+
+ static const struct of_device_id mtmips_of_match[] = {
++ {
++ .compatible = "ralink,rt2880-reset",
++ .data = NULL,
++ },
+ {
+ .compatible = "ralink,rt2880-sysc",
+ .data = &rt2880_clk_data,
+@@ -1088,25 +1092,11 @@ static int mtmips_clk_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static const struct of_device_id mtmips_clk_of_match[] = {
+- { .compatible = "ralink,rt2880-reset" },
+- { .compatible = "ralink,rt2880-sysc" },
+- { .compatible = "ralink,rt3050-sysc" },
+- { .compatible = "ralink,rt3052-sysc" },
+- { .compatible = "ralink,rt3352-sysc" },
+- { .compatible = "ralink,rt3883-sysc" },
+- { .compatible = "ralink,rt5350-sysc" },
+- { .compatible = "ralink,mt7620-sysc" },
+- { .compatible = "ralink,mt7628-sysc" },
+- { .compatible = "ralink,mt7688-sysc" },
+- {}
+-};
+-
+ static struct platform_driver mtmips_clk_driver = {
+ .probe = mtmips_clk_probe,
+ .driver = {
+ .name = "mtmips-clk",
+- .of_match_table = mtmips_clk_of_match,
++ .of_match_table = mtmips_of_match,
+ },
+ };
+
+diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+index 4c2872f45387ff..ff3f85e906fe17 100644
+--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+@@ -139,7 +139,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("avb3", 214, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb4", 215, R8A779A0_CLK_S3D2),
+ DEF_MOD("avb5", 216, R8A779A0_CLK_S3D2),
+- DEF_MOD("canfd0", 328, R8A779A0_CLK_CANFD),
++ DEF_MOD("canfd0", 328, R8A779A0_CLK_S3D2),
+ DEF_MOD("csi40", 331, R8A779A0_CLK_CSI0),
+ DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0),
+ DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0),
+diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+index f721835c7e2124..cc06127406ab57 100644
+--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+@@ -161,7 +161,7 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ DEF_MOD("cmt1", 911, R8A779F0_CLK_R),
+ DEF_MOD("cmt2", 912, R8A779F0_CLK_R),
+ DEF_MOD("cmt3", 913, R8A779F0_CLK_R),
+- DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
++ DEF_MOD("pfc0", 915, R8A779F0_CLK_CPEX),
+ DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
+ DEF_MOD("rswitch2", 1505, R8A779F0_CLK_RSW2),
+ DEF_MOD("ether-serdes", 1506, R8A779F0_CLK_S0D2_HSC),
+diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+index 7cc580d6736261..7999faa9a921be 100644
+--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+@@ -22,7 +22,7 @@
+
+ enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+- LAST_DT_CORE_CLK = R8A779G0_CLK_R,
++ LAST_DT_CORE_CLK = R8A779G0_CLK_CP,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+@@ -141,6 +141,7 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
+ DEF_FIXED("svd2_vip", R8A779G0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1),
+ DEF_FIXED("cbfusa", R8A779G0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A779G0_CLK_CPEX, CLK_EXTAL, 2, 1),
++ DEF_FIXED("cp", R8A779G0_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("viobus", R8A779G0_CLK_VIOBUS, CLK_VIO, 1, 1),
+ DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1),
+ DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1),
+@@ -230,10 +231,10 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
+ DEF_MOD("cmt1", 911, R8A779G0_CLK_R),
+ DEF_MOD("cmt2", 912, R8A779G0_CLK_R),
+ DEF_MOD("cmt3", 913, R8A779G0_CLK_R),
+- DEF_MOD("pfc0", 915, R8A779G0_CLK_CL16M),
+- DEF_MOD("pfc1", 916, R8A779G0_CLK_CL16M),
+- DEF_MOD("pfc2", 917, R8A779G0_CLK_CL16M),
+- DEF_MOD("pfc3", 918, R8A779G0_CLK_CL16M),
++ DEF_MOD("pfc0", 915, R8A779G0_CLK_CP),
++ DEF_MOD("pfc1", 916, R8A779G0_CLK_CP),
++ DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
++ DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
+ DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
+ DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
+diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
+index 1a7a6d60aca44b..6c6bc79b2e9cec 100644
+--- a/drivers/clk/renesas/r9a07g043-cpg.c
++++ b/drivers/clk/renesas/r9a07g043-cpg.c
+@@ -250,6 +250,10 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
+ 0x5a8, 1),
+ DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
+ 0x5ac, 0),
++#ifdef CONFIG_RISCV
++ DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
++ 0x608, 0),
++#endif
+ };
+
+ static struct rzg2l_reset r9a07g043_resets[] = {
+@@ -303,6 +307,10 @@ static struct rzg2l_reset r9a07g043_resets[] = {
+ DEF_RST(R9A07G043_ADC_PRESETN, 0x8a8, 0),
+ DEF_RST(R9A07G043_ADC_ADRST_N, 0x8a8, 1),
+ DEF_RST(R9A07G043_TSU_PRESETN, 0x8ac, 0),
++#ifdef CONFIG_RISCV
++ DEF_RST(R9A07G043_NCEPLIC_ARESETN, 0x908, 0),
++#endif
++
+ };
+
+ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+@@ -312,6 +320,7 @@ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+ #endif
+ #ifdef CONFIG_RISCV
+ MOD_CLK_BASE + R9A07G043_IAX45_CLK,
++ MOD_CLK_BASE + R9A07G043_NCEPLIC_ACLK,
+ #endif
+ MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
+ };
+diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
+index e2e0447de1901d..5a15f8788b9227 100644
+--- a/drivers/clk/renesas/rcar-cpg-lib.c
++++ b/drivers/clk/renesas/rcar-cpg-lib.c
+@@ -70,8 +70,21 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ #define STPnHCK BIT(9 - SDnSRCFC_SHIFT)
+
+ static const struct clk_div_table cpg_sdh_div_table[] = {
++ /*
++ * These values are recommended by the datasheet. Because they come
++ * first, Linux will only use these.
++ */
+ { 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 },
+- { STPnHCK | 4, 16 }, { 0, 0 },
++ { STPnHCK | 4, 16 },
++ /*
++ * These values are not recommended because STPnHCK is wrong. But they
++ * have been seen because of broken firmware. So, we support reading
++ * them but Linux will sanitize them when initializing through
++ * recalc_rate.
++ */
++ { STPnHCK | 0, 1 }, { STPnHCK | 1, 2 }, { 2, 4 }, { 3, 8 }, { 4, 16 },
++ /* Sentinel */
++ { 0, 0 }
+ };
+
+ struct clk * __init cpg_sdh_clk_register(const char *name,
+diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
+index 47f488387f33a1..75f9eca020ce57 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.c
++++ b/drivers/clk/renesas/rzg2l-cpg.c
+@@ -11,6 +11,7 @@
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/clk/renesas.h>
+@@ -38,14 +39,13 @@
+ #define WARN_DEBUG(x) do { } while (0)
+ #endif
+
+-#define DIV_RSMASK(v, s, m) ((v >> s) & m)
+ #define GET_SHIFT(val) ((val >> 12) & 0xff)
+ #define GET_WIDTH(val) ((val >> 8) & 0xf)
+
+-#define KDIV(val) DIV_RSMASK(val, 16, 0xffff)
+-#define MDIV(val) DIV_RSMASK(val, 6, 0x3ff)
+-#define PDIV(val) DIV_RSMASK(val, 0, 0x3f)
+-#define SDIV(val) DIV_RSMASK(val, 0, 0x7)
++#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
++#define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
++#define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
++#define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
+
+ #define CLK_ON_R(reg) (reg)
+ #define CLK_MON_R(reg) (0x180 + (reg))
+@@ -188,7 +188,9 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ u32 off = GET_REG_OFFSET(hwdata->conf);
+ u32 shift = GET_SHIFT(hwdata->conf);
+ const u32 clk_src_266 = 2;
+- u32 bitmask;
++ u32 msk, val, bitmask;
++ unsigned long flags;
++ int ret;
+
+ /*
+ * As per the HW manual, we should not directly switch from 533 MHz to
+@@ -202,26 +204,30 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ * the index to value mapping is done by adding 1 to the index.
+ */
+ bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
++ msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
++ spin_lock_irqsave(&priv->rmw_lock, flags);
+ if (index != clk_src_266) {
+- u32 msk, val;
+- int ret;
+-
+ writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
+
+- msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
+-
+- ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
+- !(val & msk), 100,
+- CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
+- if (ret) {
+- dev_err(priv->dev, "failed to switch clk source\n");
+- return ret;
+- }
++ ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++ !(val & msk), 10,
++ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++ if (ret)
++ goto unlock;
+ }
+
+ writel(bitmask | ((index + 1) << shift), priv->base + off);
+
+- return 0;
++ ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
++ !(val & msk), 10,
++ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
++unlock:
++ spin_unlock_irqrestore(&priv->rmw_lock, flags);
++
++ if (ret)
++ dev_err(priv->dev, "failed to switch clk source\n");
++
++ return ret;
+ }
+
+ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+@@ -232,14 +238,8 @@ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+
+ val >>= GET_SHIFT(hwdata->conf);
+ val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
+- if (val) {
+- val--;
+- } else {
+- /* Prohibited clk source, change it to 533 MHz(reset value) */
+- rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
+- }
+
+- return val;
++ return val ? val - 1 : 0;
+ }
+
+ static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
+@@ -695,18 +695,18 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzg2l_cpg_priv *priv = pll_clk->priv;
+ unsigned int val1, val2;
+- unsigned int mult = 1;
+- unsigned int div = 1;
++ u64 rate;
+
+ if (pll_clk->type != CLK_TYPE_SAM_PLL)
+ return parent_rate;
+
+ val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
+ val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
+- mult = MDIV(val1) + KDIV(val1) / 65536;
+- div = PDIV(val1) << SDIV(val2);
+
+- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
++ rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
++ 16 + SDIV(val2));
++
++ return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
+ }
+
+ static const struct clk_ops rzg2l_cpg_pll_ops = {
+@@ -1105,41 +1105,33 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
+
+ #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
+
+-static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
+- unsigned long id)
+-{
+- struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+- const struct rzg2l_cpg_info *info = priv->info;
+- unsigned int reg = info->resets[id].off;
+- u32 dis = BIT(info->resets[id].bit);
+- u32 we = dis << 16;
+-
+- dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
+-
+- /* Reset module */
+- writel(we, priv->base + CLK_RST_R(reg));
+-
+- /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
+- udelay(35);
+-
+- /* Release module from reset state */
+- writel(we | dis, priv->base + CLK_RST_R(reg));
+-
+- return 0;
+-}
+-
+ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+ {
+ struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+ const struct rzg2l_cpg_info *info = priv->info;
+ unsigned int reg = info->resets[id].off;
+- u32 value = BIT(info->resets[id].bit) << 16;
++ u32 mask = BIT(info->resets[id].bit);
++ s8 monbit = info->resets[id].monbit;
++ u32 value = mask << 16;
+
+ dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
+
+ writel(value, priv->base + CLK_RST_R(reg));
+- return 0;
++
++ if (info->has_clk_mon_regs) {
++ reg = CLK_MRST_R(reg);
++ } else if (monbit >= 0) {
++ reg = CPG_RST_MON;
++ mask = BIT(monbit);
++ } else {
++ /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
++ udelay(35);
++ return 0;
++ }
++
++ return readl_poll_timeout_atomic(priv->base + reg, value,
++ value & mask, 10, 200);
+ }
+
+ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
+@@ -1148,14 +1140,40 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
+ struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+ const struct rzg2l_cpg_info *info = priv->info;
+ unsigned int reg = info->resets[id].off;
+- u32 dis = BIT(info->resets[id].bit);
+- u32 value = (dis << 16) | dis;
++ u32 mask = BIT(info->resets[id].bit);
++ s8 monbit = info->resets[id].monbit;
++ u32 value = (mask << 16) | mask;
+
+ dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
+ CLK_RST_R(reg));
+
+ writel(value, priv->base + CLK_RST_R(reg));
+- return 0;
++
++ if (info->has_clk_mon_regs) {
++ reg = CLK_MRST_R(reg);
++ } else if (monbit >= 0) {
++ reg = CPG_RST_MON;
++ mask = BIT(monbit);
++ } else {
++ /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
++ udelay(35);
++ return 0;
++ }
++
++ return readl_poll_timeout_atomic(priv->base + reg, value,
++ !(value & mask), 10, 200);
++}
++
++static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ int ret;
++
++ ret = rzg2l_cpg_assert(rcdev, id);
++ if (ret)
++ return ret;
++
++ return rzg2l_cpg_deassert(rcdev, id);
+ }
+
+ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
+@@ -1163,18 +1181,21 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
+ {
+ struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+ const struct rzg2l_cpg_info *info = priv->info;
+- unsigned int reg = info->resets[id].off;
+- u32 bitmask = BIT(info->resets[id].bit);
+ s8 monbit = info->resets[id].monbit;
++ unsigned int reg;
++ u32 bitmask;
+
+ if (info->has_clk_mon_regs) {
+- return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
++ reg = CLK_MRST_R(info->resets[id].off);
++ bitmask = BIT(info->resets[id].bit);
+ } else if (monbit >= 0) {
+- u32 monbitmask = BIT(monbit);
+-
+- return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
++ reg = CPG_RST_MON;
++ bitmask = BIT(monbit);
++ } else {
++ return -ENOTSUPP;
+ }
+- return -ENOTSUPP;
++
++ return !!(readl(priv->base + reg) & bitmask);
+ }
+
+ static const struct reset_control_ops rzg2l_cpg_reset_ops = {
+diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
+index 6cee9e56acc722..91e9c2569f801b 100644
+--- a/drivers/clk/renesas/rzg2l-cpg.h
++++ b/drivers/clk/renesas/rzg2l-cpg.h
+@@ -43,7 +43,7 @@
+ #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
+ #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
+
+-#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000
++#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200
+
+ /* n = 0/1/2 for PLL1/4/6 */
+ #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n))
+diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
+index aa53797dbfc145..75071e0cd3216e 100644
+--- a/drivers/clk/rockchip/clk-rk3128.c
++++ b/drivers/clk/rockchip/clk-rk3128.c
+@@ -138,7 +138,7 @@ PNAME(mux_pll_src_5plls_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3", "usb480
+ PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "gpll_div2", "usb480m" };
+ PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "gpll_div2" };
+
+-PNAME(mux_aclk_peri_src_p) = { "gpll_peri", "cpll_peri", "gpll_div2_peri", "gpll_div3_peri" };
++PNAME(mux_clk_peri_src_p) = { "gpll", "cpll", "gpll_div2", "gpll_div3" };
+ PNAME(mux_mmc_src_p) = { "cpll", "gpll", "gpll_div2", "xin24m" };
+ PNAME(mux_clk_cif_out_src_p) = { "clk_cif_src", "xin24m" };
+ PNAME(mux_sclk_vop_src_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3" };
+@@ -275,23 +275,17 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
+ RK2928_CLKGATE_CON(0), 11, GFLAGS),
+
+ /* PD_PERI */
+- GATE(0, "gpll_peri", "gpll", CLK_IGNORE_UNUSED,
++ COMPOSITE(0, "clk_peri_src", mux_clk_peri_src_p, 0,
++ RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS,
+ RK2928_CLKGATE_CON(2), 0, GFLAGS),
+- GATE(0, "cpll_peri", "cpll", CLK_IGNORE_UNUSED,
+- RK2928_CLKGATE_CON(2), 0, GFLAGS),
+- GATE(0, "gpll_div2_peri", "gpll_div2", CLK_IGNORE_UNUSED,
+- RK2928_CLKGATE_CON(2), 0, GFLAGS),
+- GATE(0, "gpll_div3_peri", "gpll_div3", CLK_IGNORE_UNUSED,
+- RK2928_CLKGATE_CON(2), 0, GFLAGS),
+- COMPOSITE_NOGATE(0, "aclk_peri_src", mux_aclk_peri_src_p, 0,
+- RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS),
+- COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
++
++ COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "clk_peri_src", 0,
+ RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ RK2928_CLKGATE_CON(2), 3, GFLAGS),
+- COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0,
++ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "clk_peri_src", 0,
+ RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ RK2928_CLKGATE_CON(2), 2, GFLAGS),
+- GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0,
++ GATE(ACLK_PERI, "aclk_peri", "clk_peri_src", 0,
+ RK2928_CLKGATE_CON(2), 1, GFLAGS),
+
+ GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
+@@ -316,7 +310,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
+ GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(2), 15, GFLAGS),
+
+- COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
++ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
+ RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK2928_CLKGATE_CON(2), 11, GFLAGS),
+
+@@ -490,7 +484,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
+ GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
+ GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+- GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
++ GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
+ GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
+diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
+index a24a35553e1349..7343d2d7676bca 100644
+--- a/drivers/clk/rockchip/clk-rk3228.c
++++ b/drivers/clk/rockchip/clk-rk3228.c
+@@ -409,7 +409,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
+ RK2928_CLKSEL_CON(29), 0, 3, DFLAGS),
+ DIV(0, "sclk_vop_pre", "sclk_vop_src", 0,
+ RK2928_CLKSEL_CON(27), 8, 8, DFLAGS),
+- MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0,
++ MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
+
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
+index 16dabe2b9c47f4..db713e1526cdc3 100644
+--- a/drivers/clk/rockchip/clk-rk3568.c
++++ b/drivers/clk/rockchip/clk-rk3568.c
+@@ -72,6 +72,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(297000000, 2, 99, 4, 1, 1, 0),
++ RK3036_PLL_RATE(292500000, 1, 195, 4, 4, 1, 0),
+ RK3036_PLL_RATE(241500000, 2, 161, 4, 2, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
+diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
+index 6994165e03957c..d8ffcaefa480b3 100644
+--- a/drivers/clk/rockchip/clk-rk3588.c
++++ b/drivers/clk/rockchip/clk-rk3588.c
+@@ -526,7 +526,7 @@ PNAME(pmu_200m_100m_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src" };
+ PNAME(pmu_300m_24m_p) = { "clk_300m_src", "xin24m" };
+ PNAME(pmu_400m_24m_p) = { "clk_400m_src", "xin24m" };
+ PNAME(pmu_100m_50m_24m_src_p) = { "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" };
+-PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "32k", "clk_pmu1_100m_src" };
++PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "xin32k", "clk_pmu1_100m_src" };
+ PNAME(hclk_pmu1_root_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" };
+ PNAME(hclk_pmu_cm0_root_p) = { "clk_pmu1_400m_src", "clk_pmu1_200m_src", "clk_pmu1_100m_src", "xin24m" };
+ PNAME(mclk_pdm0_p) = { "clk_pmu1_300m_src", "clk_pmu1_200m_src" };
+diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
+index 4059d9365ae642..b9d6ffcb340912 100644
+--- a/drivers/clk/rockchip/clk.c
++++ b/drivers/clk/rockchip/clk.c
+@@ -433,12 +433,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk)
+ {
+- struct clk *clk = NULL;
++ struct clk *clk;
+ unsigned int idx;
+ unsigned long flags;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ flags = list->flags;
++ clk = NULL;
+
+ /* catch simple muxes */
+ switch (list->branch_type) {
+diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
+index f7d7427a558ba0..87387d4cbf48a2 100644
+--- a/drivers/clk/samsung/clk-exynos7885.c
++++ b/drivers/clk/samsung/clk-exynos7885.c
+@@ -20,7 +20,7 @@
+ #define CLKS_NR_TOP (CLK_GOUT_FSYS_USB30DRD + 1)
+ #define CLKS_NR_CORE (CLK_GOUT_TREX_P_CORE_PCLK_P_CORE + 1)
+ #define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1)
+-#define CLKS_NR_FSYS (CLK_GOUT_MMC_SDIO_SDCLKIN + 1)
++#define CLKS_NR_FSYS (CLK_MOUT_FSYS_USB30DRD_USER + 1)
+
+ /* ---- CMU_TOP ------------------------------------------------------------- */
+
+diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
+index bdc1eef7d6e548..c7b0b9751307b9 100644
+--- a/drivers/clk/samsung/clk-exynos850.c
++++ b/drivers/clk/samsung/clk-exynos850.c
+@@ -605,7 +605,7 @@ static const struct samsung_div_clock apm_div_clks[] __initconst = {
+
+ static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus",
+- CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, 0, 0),
++ CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus",
+ "mout_clkcmu_chub_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0),
+@@ -974,19 +974,19 @@ static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = {
+ static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p,
+ CLK_CON_MUX_CLK_CMGP_ADC, 0, 1),
+- MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
+- CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1),
+- MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
+- CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1),
++ MUX_F(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
++ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1, CLK_SET_RATE_PARENT, 0),
++ MUX_F(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
++ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1, CLK_SET_RATE_PARENT, 0),
+ };
+
+ static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus",
+ CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4),
+- DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
+- CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5),
+- DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
+- CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5),
++ DIV_F(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
++ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5, CLK_SET_RATE_PARENT, 0),
++ DIV_F(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
++ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5, CLK_SET_RATE_PARENT, 0),
+ };
+
+ static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
+@@ -1001,12 +1001,12 @@ static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0",
+- CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, 0, 0),
++ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1",
+- CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, 0, 0),
++ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0),
+@@ -1557,8 +1557,9 @@ static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
+ mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user",
+ mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1),
+- MUX(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", mout_peri_spi_user_p,
+- PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1),
++ MUX_F(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user",
++ mout_peri_spi_user_p, PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1,
++ CLK_SET_RATE_PARENT, 0),
+ };
+
+ static const struct samsung_div_clock peri_div_clks[] __initconst = {
+@@ -1568,8 +1569,8 @@ static const struct samsung_div_clock peri_div_clks[] __initconst = {
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5),
+ DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2",
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5),
+- DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
+- CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5),
++ DIV_F(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
++ CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5, CLK_SET_RATE_PARENT, 0),
+ };
+
+ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+@@ -1611,7 +1612,7 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0",
+- CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, 0, 0),
++ CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
+diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
+index e9c06eb93e6665..f04bacacab2cb8 100644
+--- a/drivers/clk/samsung/clk-exynosautov9.c
++++ b/drivers/clk/samsung/clk-exynosautov9.c
+@@ -352,13 +352,13 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared1_pll", "oscclk",
++ PLL(pll_0822x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared2_pll", "oscclk",
++ PLL(pll_0822x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared3_pll", "oscclk",
++ PLL(pll_0822x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared4_pll", "oscclk",
++ PLL(pll_0822x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ };
+
+diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
+index af81eb835bc235..b1be6a2d24aa9c 100644
+--- a/drivers/clk/sifive/sifive-prci.c
++++ b/drivers/clk/sifive/sifive-prci.c
+@@ -4,7 +4,6 @@
+ * Copyright (C) 2020 Zong Li
+ */
+
+-#include <linux/clkdev.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+@@ -536,13 +535,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
+ return r;
+ }
+
+- r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
+- if (r) {
+- dev_warn(dev, "Failed to register clkdev for %s: %d\n",
+- init.name, r);
+- return r;
+- }
+-
+ pd->hw_clks.hws[i] = &pic->hw;
+ }
+
+diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
+index 75234e0783e1cc..83fe4eb3133cbe 100644
+--- a/drivers/clk/socfpga/stratix10-clk.h
++++ b/drivers/clk/socfpga/stratix10-clk.h
+@@ -7,8 +7,10 @@
+ #define __STRATIX10_CLK_H
+
+ struct stratix10_clock_data {
+- struct clk_hw_onecell_data clk_data;
+ void __iomem *base;
++
++ /* Must be last */
++ struct clk_hw_onecell_data clk_data;
+ };
+
+ struct stratix10_pll_clock {
+diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+index 3884eff9fe9315..58ef7c6d69cce9 100644
+--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
++++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+@@ -385,6 +385,32 @@ int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv,
+ }
+ EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
+
++/*
++ * This clock notifier is called when the rate of PLL0 clock is to be changed.
++ * The cpu_root clock should save the curent parent clock and switch its parent
++ * clock to osc before PLL0 rate will be changed. Then switch its parent clock
++ * back after the PLL0 rate is completed.
++ */
++static int jh7110_pll0_clk_notifier_cb(struct notifier_block *nb,
++ unsigned long action, void *data)
++{
++ struct jh71x0_clk_priv *priv = container_of(nb, struct jh71x0_clk_priv, pll_clk_nb);
++ struct clk *cpu_root = priv->reg[JH7110_SYSCLK_CPU_ROOT].hw.clk;
++ int ret = 0;
++
++ if (action == PRE_RATE_CHANGE) {
++ struct clk *osc = clk_get(priv->dev, "osc");
++
++ priv->original_clk = clk_get_parent(cpu_root);
++ ret = clk_set_parent(cpu_root, osc);
++ clk_put(osc);
++ } else if (action == POST_RATE_CHANGE) {
++ ret = clk_set_parent(cpu_root, priv->original_clk);
++ }
++
++ return notifier_from_errno(ret);
++}
++
+ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
+ {
+ struct jh71x0_clk_priv *priv;
+@@ -413,7 +439,10 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->pll[0]))
+ return PTR_ERR(priv->pll[0]);
+ } else {
+- clk_put(pllclk);
++ priv->pll_clk_nb.notifier_call = jh7110_pll0_clk_notifier_cb;
++ ret = clk_notifier_register(pllclk, &priv->pll_clk_nb);
++ if (ret)
++ return ret;
+ priv->pll[0] = NULL;
+ }
+
+diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+index 10cc1ec4392517..36340ca42cc7ed 100644
+--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c
++++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+@@ -145,7 +145,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
+
+ /* enable power domain and clocks */
+ pm_runtime_enable(priv->dev);
+- ret = pm_runtime_get_sync(priv->dev);
++ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret < 0)
+ return dev_err_probe(priv->dev, ret, "failed to turn on power\n");
+
+diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
+index 34bb11c72eb73b..ebc55b9ef83705 100644
+--- a/drivers/clk/starfive/clk-starfive-jh71x0.h
++++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
+@@ -114,6 +114,8 @@ struct jh71x0_clk_priv {
+ spinlock_t rmw_lock;
+ struct device *dev;
+ void __iomem *base;
++ struct clk *original_clk;
++ struct notifier_block pll_clk_nb;
+ struct clk_hw *pll[3];
+ struct jh71x0_clk reg[];
+ };
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+index 8951ffc14ff52c..6a4b2b9ef30a82 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+@@ -182,6 +182,8 @@ static struct ccu_nkm pll_mipi_clk = {
+ &ccu_nkm_ops,
+ CLK_SET_RATE_UNGATE | CLK_SET_RATE_PARENT),
+ .features = CCU_FEATURE_CLOSEST_RATE,
++ .min_rate = 500000000,
++ .max_rate = 1400000000,
+ },
+ };
+
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+index 42568c6161814d..892df807275c8e 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+@@ -1181,11 +1181,18 @@ static const u32 usb2_clk_regs[] = {
+ SUN50I_H6_USB3_CLK_REG,
+ };
+
++static struct ccu_mux_nb sun50i_h6_cpu_nb = {
++ .common = &cpux_clk.common,
++ .cm = &cpux_clk.mux,
++ .delay_us = 1,
++ .bypass_index = 0, /* index of 24 MHz oscillator */
++};
++
+ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
+ {
+ void __iomem *reg;
++ int i, ret;
+ u32 val;
+- int i;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+@@ -1252,7 +1259,15 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
+ val |= BIT(24);
+ writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG);
+
+- return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
++ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
++ if (ret)
++ return ret;
++
++ /* Reparent CPU during PLL CPUX rate changes */
++ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
++ &sun50i_h6_cpu_nb);
++
++ return 0;
+ }
+
+ static const struct of_device_id sun50i_h6_ccu_ids[] = {
+diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
+index 8babce55302f5f..be375ce0149c8b 100644
+--- a/drivers/clk/sunxi-ng/ccu_common.c
++++ b/drivers/clk/sunxi-ng/ccu_common.c
+@@ -44,6 +44,16 @@ bool ccu_is_better_rate(struct ccu_common *common,
+ unsigned long current_rate,
+ unsigned long best_rate)
+ {
++ unsigned long min_rate, max_rate;
++
++ clk_hw_get_rate_range(&common->hw, &min_rate, &max_rate);
++
++ if (current_rate > max_rate)
++ return false;
++
++ if (current_rate < min_rate)
++ return false;
++
+ if (common->features & CCU_FEATURE_CLOSEST_RATE)
+ return abs(current_rate - target_rate) < abs(best_rate - target_rate);
+
+@@ -138,6 +148,21 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
+ }
+ }
+
++ for (i = 0; i < desc->num_ccu_clks; i++) {
++ struct ccu_common *cclk = desc->ccu_clks[i];
++
++ if (!cclk)
++ continue;
++
++ if (cclk->max_rate)
++ clk_hw_set_rate_range(&cclk->hw, cclk->min_rate,
++ cclk->max_rate);
++ else
++ WARN(cclk->min_rate,
++ "No max_rate, ignoring min_rate of clock %d - %s\n",
++ i, clk_hw_get_name(&cclk->hw));
++ }
++
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ desc->hw_clks);
+ if (ret)
+diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
+index 942a72c0943744..329734f8cf42b4 100644
+--- a/drivers/clk/sunxi-ng/ccu_common.h
++++ b/drivers/clk/sunxi-ng/ccu_common.h
+@@ -31,6 +31,9 @@ struct ccu_common {
+ u16 lock_reg;
+ u32 prediv;
+
++ unsigned long min_rate;
++ unsigned long max_rate;
++
+ unsigned long features;
+ spinlock_t *lock;
+ struct clk_hw hw;
+diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
+index d964e3affd42ce..0eab7f3e2eab9e 100644
+--- a/drivers/clk/ti/clk-dra7-atl.c
++++ b/drivers/clk/ti/clk-dra7-atl.c
+@@ -240,6 +240,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
+ }
+
+ clk = of_clk_get_from_provider(&clkspec);
++ of_node_put(clkspec.np);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to get atl clock %d from provider\n",
+ __func__, i);
+diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
+index 768a1f3398b47d..5d5bb123ba9494 100644
+--- a/drivers/clk/ti/divider.c
++++ b/drivers/clk/ti/divider.c
+@@ -309,7 +309,6 @@ static struct clk *_register_divider(struct device_node *node,
+ u32 flags,
+ struct clk_omap_divider *div)
+ {
+- struct clk *clk;
+ struct clk_init_data init;
+ const char *parent_name;
+ const char *name;
+@@ -326,12 +325,7 @@ static struct clk *_register_divider(struct device_node *node,
+ div->hw.init = &init;
+
+ /* register the clock */
+- clk = of_ti_clk_register(node, &div->hw, name);
+-
+- if (IS_ERR(clk))
+- kfree(div);
+-
+- return clk;
++ return of_ti_clk_register(node, &div->hw, name);
+ }
+
+ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
+diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
+index 1f3234f2266744..e9cd80e085dc3b 100644
+--- a/drivers/clk/visconti/pll.c
++++ b/drivers/clk/visconti/pll.c
+@@ -329,12 +329,12 @@ struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+- for (i = 0; i < nr_plls; ++i)
+- ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+-
+ ctx->node = np;
+ ctx->reg_base = base;
+ ctx->clk_data.num = nr_plls;
+
++ for (i = 0; i < nr_plls; ++i)
++ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
++
+ return ctx;
+ }
+diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
+index 01d07f1bf01b10..c4bd40676da4bf 100644
+--- a/drivers/clk/visconti/pll.h
++++ b/drivers/clk/visconti/pll.h
+@@ -15,8 +15,10 @@
+
+ struct visconti_pll_provider {
+ void __iomem *reg_base;
+- struct clk_hw_onecell_data clk_data;
+ struct device_node *node;
++
++ /* Must be last */
++ struct clk_hw_onecell_data clk_data;
+ };
+
+ #define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
+diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
+index 7bdeaff2bfd68b..c28d3dacf0fb22 100644
+--- a/drivers/clk/zynq/clkc.c
++++ b/drivers/clk/zynq/clkc.c
+@@ -42,6 +42,7 @@ static void __iomem *zynq_clkc_base;
+ #define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204)
+
+ #define NUM_MIO_PINS 54
++#define CLK_NAME_LEN 16
+
+ #define DBG_CLK_CTRL_CLKACT_TRC BIT(0)
+ #define DBG_CLK_CTRL_CPU_1XCLKACT BIT(1)
+@@ -215,7 +216,7 @@ static void __init zynq_clk_setup(struct device_node *np)
+ int i;
+ u32 tmp;
+ int ret;
+- char *clk_name;
++ char clk_name[CLK_NAME_LEN];
+ unsigned int fclk_enable = 0;
+ const char *clk_output_name[clk_max];
+ const char *cpu_parents[4];
+@@ -426,12 +427,10 @@ static void __init zynq_clk_setup(struct device_node *np)
+ "gem1_emio_mux", CLK_SET_RATE_PARENT,
+ SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
+
+- tmp = strlen("mio_clk_00x");
+- clk_name = kmalloc(tmp, GFP_KERNEL);
+ for (i = 0; i < NUM_MIO_PINS; i++) {
+ int idx;
+
+- snprintf(clk_name, tmp, "mio_clk_%2.2d", i);
++ snprintf(clk_name, CLK_NAME_LEN, "mio_clk_%2.2d", i);
+ idx = of_property_match_string(np, "clock-names", clk_name);
+ if (idx >= 0)
+ can_mio_mux_parents[i] = of_clk_get_parent_name(np,
+@@ -439,7 +438,6 @@ static void __init zynq_clk_setup(struct device_node *np)
+ else
+ can_mio_mux_parents[i] = dummy_nm;
+ }
+- kfree(clk_name);
+ clk_register_mux(NULL, "can_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
+ &canclk_lock);
+diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
+index 60359333f26dbe..9b5d3050b74229 100644
+--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
++++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
+@@ -89,7 +89,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ static const struct clk_ops zynqmp_clk_mux_ops = {
+ .get_parent = zynqmp_clk_mux_get_parent,
+ .set_parent = zynqmp_clk_mux_set_parent,
+- .determine_rate = __clk_mux_determine_rate,
++ .determine_rate = __clk_mux_determine_rate_closest,
+ };
+
+ static const struct clk_ops zynqmp_clk_mux_ro_ops = {
+diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
+index 33a3b2a226595d..5a00487ae408be 100644
+--- a/drivers/clk/zynqmp/divider.c
++++ b/drivers/clk/zynqmp/divider.c
+@@ -110,52 +110,6 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
+ return DIV_ROUND_UP_ULL(parent_rate, value);
+ }
+
+-static void zynqmp_get_divider2_val(struct clk_hw *hw,
+- unsigned long rate,
+- struct zynqmp_clk_divider *divider,
+- u32 *bestdiv)
+-{
+- int div1;
+- int div2;
+- long error = LONG_MAX;
+- unsigned long div1_prate;
+- struct clk_hw *div1_parent_hw;
+- struct zynqmp_clk_divider *pdivider;
+- struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
+-
+- if (!div2_parent_hw)
+- return;
+-
+- pdivider = to_zynqmp_clk_divider(div2_parent_hw);
+- if (!pdivider)
+- return;
+-
+- div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
+- if (!div1_parent_hw)
+- return;
+-
+- div1_prate = clk_hw_get_rate(div1_parent_hw);
+- *bestdiv = 1;
+- for (div1 = 1; div1 <= pdivider->max_div;) {
+- for (div2 = 1; div2 <= divider->max_div;) {
+- long new_error = ((div1_prate / div1) / div2) - rate;
+-
+- if (abs(new_error) < abs(error)) {
+- *bestdiv = div2;
+- error = new_error;
+- }
+- if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+- div2 = div2 << 1;
+- else
+- div2++;
+- }
+- if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
+- div1 = div1 << 1;
+- else
+- div1++;
+- }
+-}
+-
+ /**
+ * zynqmp_clk_divider_round_rate() - Round rate of divider clock
+ * @hw: handle between common and hardware-specific interfaces
+@@ -174,6 +128,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
+ u32 div_type = divider->div_type;
+ u32 bestdiv;
+ int ret;
++ u8 width;
+
+ /* if read only, just return current value */
+ if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+@@ -193,23 +148,12 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
+ return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ }
+
+- bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
+-
+- /*
+- * In case of two divisors, compute best divider values and return
+- * divider2 value based on compute value. div1 will be automatically
+- * set to optimum based on required total divider value.
+- */
+- if (div_type == TYPE_DIV2 &&
+- (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+- zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
+- }
++ width = fls(divider->max_div);
+
+- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
+- bestdiv = rate % *prate ? 1 : bestdiv;
++ rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
+
+- bestdiv = min_t(u32, bestdiv, divider->max_div);
+- *prate = rate * bestdiv;
++ if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
++ *prate = rate;
+
+ return rate;
+ }
+diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
+index 7dd2c615bce231..071b04f1ee7309 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -836,8 +836,9 @@ static u64 __arch_timer_check_delta(void)
+ * Note that TVAL is signed, thus has only 31 of its
+ * 32 bits to express magnitude.
+ */
+- MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
+- APM_CPU_PART_POTENZA)),
++ MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
++ APM_CPU_PART_XGENE),
++ APM_CPU_VAR_POTENZA, 0x0, 0xf),
+ {},
+ };
+
+diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
+index 44a61dc6f93208..22a58d35a41fa5 100644
+--- a/drivers/clocksource/arm_global_timer.c
++++ b/drivers/clocksource/arm_global_timer.c
+@@ -32,7 +32,7 @@
+ #define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
+ #define GT_CONTROL_AUTO_INC BIT(3) /* banked */
+ #define GT_CONTROL_PRESCALER_SHIFT 8
+-#define GT_CONTROL_PRESCALER_MAX 0xF
++#define GT_CONTROL_PRESCALER_MAX 0xFF
+ #define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
+ GT_CONTROL_PRESCALER_SHIFT)
+
+@@ -290,18 +290,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ {
+- int psv;
++ unsigned long psv;
+
+- psv = DIV_ROUND_CLOSEST(ndata->new_rate,
+- gt_target_rate);
+-
+- if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
++ psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate);
++ if (!psv ||
++ abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
+ return NOTIFY_BAD;
+
+ psv--;
+
+ /* prescaler within legal range? */
+- if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
++ if (psv > GT_CONTROL_PRESCALER_MAX)
+ return NOTIFY_BAD;
+
+ /*
+diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
+index 26919556ef5f0b..b72b36e0abed86 100644
+--- a/drivers/clocksource/sh_cmt.c
++++ b/drivers/clocksource/sh_cmt.c
+@@ -528,6 +528,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
+ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
+ {
+ struct sh_cmt_channel *ch = dev_id;
++ unsigned long flags;
+
+ /* clear flags */
+ sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
+@@ -558,6 +559,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
+
+ ch->flags &= ~FLAG_SKIPEVENT;
+
++ raw_spin_lock_irqsave(&ch->lock, flags);
++
+ if (ch->flags & FLAG_REPROGRAM) {
+ ch->flags &= ~FLAG_REPROGRAM;
+ sh_cmt_clock_event_program_verify(ch, 1);
+@@ -570,6 +573,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
+
+ ch->flags &= ~FLAG_IRQCONTEXT;
+
++ raw_spin_unlock_irqrestore(&ch->lock, flags);
++
+ return IRQ_HANDLED;
+ }
+
+@@ -780,12 +785,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
+ struct clock_event_device *ced)
+ {
+ struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
++ unsigned long flags;
+
+ BUG_ON(!clockevent_state_oneshot(ced));
++
++ raw_spin_lock_irqsave(&ch->lock, flags);
++
+ if (likely(ch->flags & FLAG_IRQCONTEXT))
+ ch->next_match_value = delta - 1;
+ else
+- sh_cmt_set_next(ch, delta - 1);
++ __sh_cmt_set_next(ch, delta - 1);
++
++ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ return 0;
+ }
+diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
+index 27af17c9959004..2a90c92a9182ab 100644
+--- a/drivers/clocksource/timer-atmel-tcb.c
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
+ writel(mck_divisor_idx /* likely divide-by-8 */
+ | ATMEL_TC_WAVE
+ | ATMEL_TC_WAVESEL_UP /* free-run */
++ | ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
+ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
+ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
+ tcaddr + ATMEL_TC_REG(0, CMR));
+diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
+index 28ab4f1a7c713b..6a878d227a13b5 100644
+--- a/drivers/clocksource/timer-imx-gpt.c
++++ b/drivers/clocksource/timer-imx-gpt.c
+@@ -434,12 +434,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
+ return -ENOMEM;
+
+ imxtm->base = of_iomap(np, 0);
+- if (!imxtm->base)
+- return -ENXIO;
++ if (!imxtm->base) {
++ ret = -ENXIO;
++ goto err_kfree;
++ }
+
+ imxtm->irq = irq_of_parse_and_map(np, 0);
+- if (imxtm->irq <= 0)
+- return -EINVAL;
++ if (imxtm->irq <= 0) {
++ ret = -EINVAL;
++ goto err_kfree;
++ }
+
+ imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
+
+@@ -452,11 +456,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
+
+ ret = _mxc_timer_init(imxtm);
+ if (ret)
+- return ret;
++ goto err_kfree;
+
+ initialized = 1;
+
+ return 0;
++
++err_kfree:
++ kfree(imxtm);
++ return ret;
+ }
+
+ static int __init imx1_timer_init_dt(struct device_node *np)
+diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
+index bd64a8a8427f3c..92c025b70eb62f 100644
+--- a/drivers/clocksource/timer-imx-tpm.c
++++ b/drivers/clocksource/timer-imx-tpm.c
+@@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
+ static int tpm_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+ {
+- unsigned long next, now;
++ unsigned long next, prev, now;
+
+- next = tpm_read_counter();
+- next += delta;
++ prev = tpm_read_counter();
++ next = prev + delta;
+ writel(next, timer_base + TPM_C0V);
+ now = tpm_read_counter();
+
++ /*
++ * Need to wait CNT increase at least 1 cycle to make sure
++ * the C0V has been updated into HW.
++ */
++ if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
++ while (now == tpm_read_counter())
++ ;
++
+ /*
+ * NOTE: We observed in a very small probability, the bus fabric
+ * contention between GPU and A7 may results a few cycles delay
+ * of writing CNT registers which may cause the min_delta event got
+ * missed, so we need add a ETIME check here in case it happened.
+ */
+- return (int)(next - now) <= 0 ? -ETIME : 0;
++ return (now - prev) >= delta ? -ETIME : 0;
+ }
+
+ static int tpm_set_state_oneshot(struct clock_event_device *evt)
+diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
+index c3f54d9912be79..420202bf76e42c 100644
+--- a/drivers/clocksource/timer-of.c
++++ b/drivers/clocksource/timer-of.c
+@@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
+
+ struct clock_event_device *clkevt = &to->clkevt;
+
+- if (of_irq->percpu)
+- free_percpu_irq(of_irq->irq, clkevt);
+- else
+- free_irq(of_irq->irq, clkevt);
++ free_irq(of_irq->irq, clkevt);
+ }
+
+ /**
+@@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
+ * - Get interrupt number by name
+ * - Get interrupt number by index
+ *
+- * When the interrupt is per CPU, 'request_percpu_irq()' is called,
+- * otherwise 'request_irq()' is used.
+- *
+ * Returns 0 on success, < 0 otherwise
+ */
+ static __init int timer_of_irq_init(struct device_node *np,
+@@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np,
+ return -EINVAL;
+ }
+
+- ret = of_irq->percpu ?
+- request_percpu_irq(of_irq->irq, of_irq->handler,
+- np->full_name, clkevt) :
+- request_irq(of_irq->irq, of_irq->handler,
+- of_irq->flags ? of_irq->flags : IRQF_TIMER,
+- np->full_name, clkevt);
++ ret = request_irq(of_irq->irq, of_irq->handler,
++ of_irq->flags ? of_irq->flags : IRQF_TIMER,
++ np->full_name, clkevt);
+ if (ret) {
+ pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
+ return ret;
+diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
+index a5478f3e8589df..01a2c6b7db0659 100644
+--- a/drivers/clocksource/timer-of.h
++++ b/drivers/clocksource/timer-of.h
+@@ -11,7 +11,6 @@
+ struct of_timer_irq {
+ int irq;
+ int index;
+- int percpu;
+ const char *name;
+ unsigned long flags;
+ irq_handler_t handler;
+diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c
+index b4afe3a6758351..eac4c95c6127f2 100644
+--- a/drivers/clocksource/timer-qcom.c
++++ b/drivers/clocksource/timer-qcom.c
+@@ -233,6 +233,7 @@ static int __init msm_dt_timer_init(struct device_node *np)
+ }
+
+ if (of_property_read_u32(np, "clock-frequency", &freq)) {
++ iounmap(cpu0_base);
+ pr_err("Unknown frequency\n");
+ return -EINVAL;
+ }
+@@ -243,7 +244,11 @@ static int __init msm_dt_timer_init(struct device_node *np)
+ freq /= 4;
+ writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
+
+- return msm_timer_init(freq, 32, irq, !!percpu_offset);
++ ret = msm_timer_init(freq, 32, irq, !!percpu_offset);
++ if (ret)
++ iounmap(cpu0_base);
++
++ return ret;
+ }
+ TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
+ TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index 09ab29cb7f6416..56acf26172621f 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -140,6 +140,8 @@ struct dmtimer {
+ struct platform_device *pdev;
+ struct list_head node;
+ struct notifier_block nb;
++ struct notifier_block fclk_nb;
++ unsigned long fclk_rate;
+ };
+
+ static u32 omap_reserved_systimers;
+@@ -181,7 +183,7 @@ static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
+ * dmtimer_write - write timer registers in posted and non-posted mode
+ * @timer: timer pointer over which write operation is to perform
+ * @reg: lowest byte holds the register offset
+- * @value: data to write into the register
++ * @val: data to write into the register
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode, the write
+ * pending bit must be checked. Otherwise a write on a register which has a
+@@ -253,8 +255,7 @@ static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
+ timer->posted = OMAP_TIMER_POSTED;
+ }
+
+-static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+- unsigned long rate)
++static inline void __omap_dm_timer_stop(struct dmtimer *timer)
+ {
+ u32 l;
+
+@@ -269,7 +270,7 @@ static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+ * Wait for functional clock period x 3.5 to make sure that
+ * timer is stopped
+ */
+- udelay(3500000 / rate + 1);
++ udelay(3500000 / timer->fclk_rate + 1);
+ #endif
+ }
+
+@@ -348,6 +349,21 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
+ return NOTIFY_OK;
+ }
+
++static int omap_timer_fclk_notifier(struct notifier_block *nb,
++ unsigned long event, void *data)
++{
++ struct clk_notifier_data *clk_data = data;
++ struct dmtimer *timer = container_of(nb, struct dmtimer, fclk_nb);
++
++ switch (event) {
++ case POST_RATE_CHANGE:
++ timer->fclk_rate = clk_data->new_rate;
++ return NOTIFY_OK;
++ default:
++ return NOTIFY_DONE;
++ }
++}
++
+ static int omap_dm_timer_reset(struct dmtimer *timer)
+ {
+ u32 l, timeout = 100000;
+@@ -754,7 +770,6 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+ {
+ struct dmtimer *timer;
+ struct device *dev;
+- unsigned long rate = 0;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer))
+@@ -762,10 +777,7 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
+
+ dev = &timer->pdev->dev;
+
+- if (!timer->omap1)
+- rate = clk_get_rate(timer->fclk);
+-
+- __omap_dm_timer_stop(timer, rate);
++ __omap_dm_timer_stop(timer);
+
+ pm_runtime_put_sync(dev);
+
+@@ -937,7 +949,7 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
+
+ /**
+ * omap_dm_timer_set_int_disable - disable timer interrupts
+- * @timer: pointer to timer handle
++ * @cookie: pointer to timer cookie
+ * @mask: bit mask of interrupts to be disabled
+ *
+ * Disables the specified timer interrupts for a timer.
+@@ -1124,6 +1136,14 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
+ timer->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(timer->fclk))
+ return PTR_ERR(timer->fclk);
++
++ timer->fclk_nb.notifier_call = omap_timer_fclk_notifier;
++ ret = devm_clk_notifier_register(dev, timer->fclk,
++ &timer->fclk_nb);
++ if (ret)
++ return ret;
++
++ timer->fclk_rate = clk_get_rate(timer->fclk);
+ } else {
+ timer->fclk = ERR_PTR(-ENODEV);
+ }
+diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
+index 30ea8b53ebf819..05ae9122823f80 100644
+--- a/drivers/comedi/drivers/comedi_test.c
++++ b/drivers/comedi/drivers/comedi_test.c
+@@ -87,6 +87,8 @@ struct waveform_private {
+ struct comedi_device *dev; /* parent comedi device */
+ u64 ao_last_scan_time; /* time of previous AO scan in usec */
+ unsigned int ao_scan_period; /* AO scan period in usec */
++ bool ai_timer_enable:1; /* should AI timer be running? */
++ bool ao_timer_enable:1; /* should AO timer be running? */
+ unsigned short ao_loopbacks[N_CHANS];
+ };
+
+@@ -236,8 +238,12 @@ static void waveform_ai_timer(struct timer_list *t)
+ time_increment = devpriv->ai_convert_time - now;
+ else
+ time_increment = 1;
+- mod_timer(&devpriv->ai_timer,
+- jiffies + usecs_to_jiffies(time_increment));
++ spin_lock(&dev->spinlock);
++ if (devpriv->ai_timer_enable) {
++ mod_timer(&devpriv->ai_timer,
++ jiffies + usecs_to_jiffies(time_increment));
++ }
++ spin_unlock(&dev->spinlock);
+ }
+
+ overrun:
+@@ -393,9 +399,12 @@ static int waveform_ai_cmd(struct comedi_device *dev,
+ * Seem to need an extra jiffy here, otherwise timer expires slightly
+ * early!
+ */
++ spin_lock_bh(&dev->spinlock);
++ devpriv->ai_timer_enable = true;
+ devpriv->ai_timer.expires =
+ jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
+ add_timer(&devpriv->ai_timer);
++ spin_unlock_bh(&dev->spinlock);
+ return 0;
+ }
+
+@@ -404,6 +413,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
+ {
+ struct waveform_private *devpriv = dev->private;
+
++ spin_lock_bh(&dev->spinlock);
++ devpriv->ai_timer_enable = false;
++ spin_unlock_bh(&dev->spinlock);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ai_timer);
+@@ -495,8 +507,12 @@ static void waveform_ao_timer(struct timer_list *t)
+ unsigned int time_inc = devpriv->ao_last_scan_time +
+ devpriv->ao_scan_period - now;
+
+- mod_timer(&devpriv->ao_timer,
+- jiffies + usecs_to_jiffies(time_inc));
++ spin_lock(&dev->spinlock);
++ if (devpriv->ao_timer_enable) {
++ mod_timer(&devpriv->ao_timer,
++ jiffies + usecs_to_jiffies(time_inc));
++ }
++ spin_unlock(&dev->spinlock);
+ }
+
+ underrun:
+@@ -517,9 +533,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
+ async->inttrig = NULL;
+
+ devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
++ spin_lock_bh(&dev->spinlock);
++ devpriv->ao_timer_enable = true;
+ devpriv->ao_timer.expires =
+ jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
+ add_timer(&devpriv->ao_timer);
++ spin_unlock_bh(&dev->spinlock);
+
+ return 1;
+ }
+@@ -604,6 +623,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
+ struct waveform_private *devpriv = dev->private;
+
+ s->async->inttrig = NULL;
++ spin_lock_bh(&dev->spinlock);
++ devpriv->ao_timer_enable = false;
++ spin_unlock_bh(&dev->spinlock);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ao_timer);
+diff --git a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
+index d55521b5bdcb2d..892a66b2cea665 100644
+--- a/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
++++ b/drivers/comedi/drivers/ni_routing/tools/convert_c_to_py.c
+@@ -140,6 +140,11 @@ int main(void)
+ {
+ FILE *fp = fopen("ni_values.py", "w");
+
++ if (fp == NULL) {
++ fprintf(stderr, "Could not open file!");
++ return -1;
++ }
++
+ /* write route register values */
+ fprintf(fp, "ni_route_values = {\n");
+ for (int i = 0; ni_all_route_values[i]; ++i)
+diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
+index 4536ed43f65b27..84dce5184a77ae 100644
+--- a/drivers/comedi/drivers/vmk80xx.c
++++ b/drivers/comedi/drivers/vmk80xx.c
+@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
+ struct vmk80xx_private *devpriv = dev->private;
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usb_host_interface *iface_desc = intf->cur_altsetting;
+- struct usb_endpoint_descriptor *ep_desc;
+- int i;
+-
+- if (iface_desc->desc.bNumEndpoints != 2)
+- return -ENODEV;
+-
+- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+- ep_desc = &iface_desc->endpoint[i].desc;
+-
+- if (usb_endpoint_is_int_in(ep_desc) ||
+- usb_endpoint_is_bulk_in(ep_desc)) {
+- if (!devpriv->ep_rx)
+- devpriv->ep_rx = ep_desc;
+- continue;
+- }
++ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
++ int ret;
+
+- if (usb_endpoint_is_int_out(ep_desc) ||
+- usb_endpoint_is_bulk_out(ep_desc)) {
+- if (!devpriv->ep_tx)
+- devpriv->ep_tx = ep_desc;
+- continue;
+- }
+- }
++ if (devpriv->model == VMK8061_MODEL)
++ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
++ &ep_tx_desc, NULL, NULL);
++ else
++ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
++ &ep_rx_desc, &ep_tx_desc);
+
+- if (!devpriv->ep_rx || !devpriv->ep_tx)
++ if (ret)
+ return -ENODEV;
+
++ devpriv->ep_rx = ep_rx_desc;
++ devpriv->ep_tx = ep_tx_desc;
++
+ if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
+ return -EINVAL;
+
+diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
+index b0f24cf3e891de..4d3de4a35801fc 100644
+--- a/drivers/counter/ti-eqep.c
++++ b/drivers/counter/ti-eqep.c
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/bitops.h>
++#include <linux/clk.h>
+ #include <linux/counter.h>
+ #include <linux/kernel.h>
+ #include <linux/mod_devicetable.h>
+@@ -376,6 +377,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
+ struct counter_device *counter;
+ struct ti_eqep_cnt *priv;
+ void __iomem *base;
++ struct clk *clk;
+ int err;
+
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+@@ -415,6 +417,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
++ clk = devm_clk_get_enabled(dev, NULL);
++ if (IS_ERR(clk))
++ return dev_err_probe(dev, PTR_ERR(clk), "failed to enable clock\n");
++
+ err = counter_add(counter);
+ if (err < 0) {
+ pm_runtime_put_sync(dev);
+diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
+index 123b4bbfcfee19..c5cecbd89ba9ce 100644
+--- a/drivers/cpufreq/Kconfig.arm
++++ b/drivers/cpufreq/Kconfig.arm
+@@ -173,6 +173,7 @@ config ARM_QCOM_CPUFREQ_NVMEM
+ config ARM_QCOM_CPUFREQ_HW
+ tristate "QCOM CPUFreq HW driver"
+ depends on ARCH_QCOM || COMPILE_TEST
++ depends on COMMON_CLK
+ help
+ Support for the CPUFreq HW driver.
+ Some QCOM chipsets have a HW engine to offload the steps
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 37f1cdf46d2918..4ac3a35dcd983c 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -890,8 +890,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
+
+- if (acpi_cpufreq_driver.set_boost)
++ if (acpi_cpufreq_driver.set_boost) {
+ set_boost(policy, acpi_cpufreq_driver.boost_enabled);
++ policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
++ }
+
+ return result;
+
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 9a1e194d5cf882..f461f99eb040c6 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -37,6 +37,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/static_call.h>
+ #include <linux/amd-pstate.h>
++#include <linux/topology.h>
+
+ #include <acpi/processor.h>
+ #include <acpi/cppc_acpi.h>
+@@ -49,6 +50,8 @@
+
+ #define AMD_PSTATE_TRANSITION_LATENCY 20000
+ #define AMD_PSTATE_TRANSITION_DELAY 1000
++#define CPPC_HIGHEST_PERF_PERFORMANCE 196
++#define CPPC_HIGHEST_PERF_DEFAULT 166
+
+ /*
+ * TODO: We need more time to fine tune processors with shared memory solution
+@@ -64,6 +67,7 @@ static struct cpufreq_driver amd_pstate_driver;
+ static struct cpufreq_driver amd_pstate_epp_driver;
+ static int cppc_state = AMD_PSTATE_UNDEFINED;
+ static bool cppc_enabled;
++static bool amd_pstate_prefcore = true;
+
+ /*
+ * AMD Energy Preference Performance (EPP)
+@@ -175,6 +179,26 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+ return index;
+ }
+
++static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
++ u32 des_perf, u32 max_perf, bool fast_switch)
++{
++ if (fast_switch)
++ wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
++ else
++ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
++ READ_ONCE(cpudata->cppc_req_cached));
++}
++
++DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
++
++static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
++ u32 min_perf, u32 des_perf,
++ u32 max_perf, bool fast_switch)
++{
++ static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
++ max_perf, fast_switch);
++}
++
+ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+ {
+ int ret;
+@@ -191,6 +215,9 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+ if (!ret)
+ cpudata->epp_cached = epp;
+ } else {
++ amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
++ cpudata->max_limit_perf, false);
++
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+@@ -287,6 +314,21 @@ static inline int amd_pstate_enable(bool enable)
+ return static_call(amd_pstate_enable)(enable);
+ }
+
++static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
++{
++ struct cpuinfo_x86 *c = &cpu_data(0);
++
++ /*
++ * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
++ * the highest performance level is set to 196.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=218759
++ */
++ if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
++ return CPPC_HIGHEST_PERF_PERFORMANCE;
++
++ return CPPC_HIGHEST_PERF_DEFAULT;
++}
++
+ static int pstate_init_perf(struct amd_cpudata *cpudata)
+ {
+ u64 cap1;
+@@ -297,21 +339,22 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
+ if (ret)
+ return ret;
+
+- /*
+- * TODO: Introduce AMD specific power feature.
+- *
+- * CPPC entry doesn't indicate the highest performance in some ASICs.
++ /* For platforms that do not support the preferred core feature, the
++ * highest_pef may be configured with 166 or 255, to avoid max frequency
++ * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
++ * the default max perf.
+ */
+- highest_perf = amd_get_highest_perf();
+- if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
++ if (cpudata->hw_prefcore)
++ highest_perf = amd_pstate_highest_perf_set(cpudata);
++ else
+ highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
+-
++ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
+ WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
+ WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+-
++ WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ return 0;
+ }
+
+@@ -324,16 +367,18 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
+ if (ret)
+ return ret;
+
+- highest_perf = amd_get_highest_perf();
+- if (highest_perf > cppc_perf.highest_perf)
++ if (cpudata->hw_prefcore)
++ highest_perf = amd_pstate_highest_perf_set(cpudata);
++ else
+ highest_perf = cppc_perf.highest_perf;
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
+-
++ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_perf,
+ cppc_perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
++ WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ return 0;
+@@ -360,16 +405,6 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
+ return static_call(amd_pstate_init_perf)(cpudata);
+ }
+
+-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+- u32 des_perf, u32 max_perf, bool fast_switch)
+-{
+- if (fast_switch)
+- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
+- else
+- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+- READ_ONCE(cpudata->cppc_req_cached));
+-}
+-
+ static void cppc_update_perf(struct amd_cpudata *cpudata,
+ u32 min_perf, u32 des_perf,
+ u32 max_perf, bool fast_switch)
+@@ -383,16 +418,6 @@ static void cppc_update_perf(struct amd_cpudata *cpudata,
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+
+-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+-
+-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+- u32 min_perf, u32 des_perf,
+- u32 max_perf, bool fast_switch)
+-{
+- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+- max_perf, fast_switch);
+-}
+-
+ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
+ {
+ u64 aperf, mperf, tsc;
+@@ -432,6 +457,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+ u64 prev = READ_ONCE(cpudata->cppc_req_cached);
+ u64 value = prev;
+
++ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
++ cpudata->max_limit_perf);
++ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
++ cpudata->max_limit_perf);
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+
+ if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
+@@ -470,6 +499,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+ return 0;
+ }
+
++static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
++{
++ u32 max_limit_perf, min_limit_perf;
++ struct amd_cpudata *cpudata = policy->driver_data;
++
++ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
++ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
++
++ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
++ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
++ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
++ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
++
++ return 0;
++}
++
+ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq, bool fast_switch)
+ {
+@@ -480,6 +525,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+ if (!cpudata->max_freq)
+ return -ENODEV;
+
++ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
++ amd_pstate_update_min_max_limit(policy);
++
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ max_perf = cap_perf;
+@@ -518,7 +566,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
+ static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+ {
+- return amd_pstate_update_freq(policy, target_freq, true);
++ if (!amd_pstate_update_freq(policy, target_freq, true))
++ return target_freq;
++ return policy->cur;
+ }
+
+ static void amd_pstate_adjust_perf(unsigned int cpu,
+@@ -532,6 +582,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ struct amd_cpudata *cpudata = policy->driver_data;
+ unsigned int target_freq;
+
++ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
++ amd_pstate_update_min_max_limit(policy);
++
++
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ max_freq = READ_ONCE(cpudata->max_freq);
+@@ -540,7 +594,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ if (target_perf < capacity)
+ des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
+
+- min_perf = READ_ONCE(cpudata->highest_perf);
++ min_perf = READ_ONCE(cpudata->lowest_perf);
+ if (_min_perf < capacity)
+ min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+
+@@ -676,6 +730,80 @@ static void amd_perf_ctl_reset(unsigned int cpu)
+ wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+ }
+
++/*
++ * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
++ * due to locking, so queue the work for later.
++ */
++static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
++{
++ sched_set_itmt_support();
++}
++static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
++
++/*
++ * Get the highest performance register value.
++ * @cpu: CPU from which to get highest performance.
++ * @highest_perf: Return address.
++ *
++ * Return: 0 for success, -EIO otherwise.
++ */
++static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
++{
++ int ret;
++
++ if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ u64 cap1;
++
++ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
++ if (ret)
++ return ret;
++ WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
++ } else {
++ u64 cppc_highest_perf;
++
++ ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
++ if (ret)
++ return ret;
++ WRITE_ONCE(*highest_perf, cppc_highest_perf);
++ }
++
++ return (ret);
++}
++
++#define CPPC_MAX_PERF U8_MAX
++
++static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
++{
++ int ret, prio;
++ u32 highest_perf;
++
++ ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
++ if (ret)
++ return;
++
++ cpudata->hw_prefcore = true;
++ /* check if CPPC preferred core feature is enabled*/
++ if (highest_perf < CPPC_MAX_PERF)
++ prio = (int)highest_perf;
++ else {
++ pr_debug("AMD CPPC preferred core is unsupported!\n");
++ cpudata->hw_prefcore = false;
++ return;
++ }
++
++ if (!amd_pstate_prefcore)
++ return;
++
++ /*
++ * The priorities can be set regardless of whether or not
++ * sched_set_itmt_support(true) has been called and it is valid to
++ * update them at any time after it has been called.
++ */
++ sched_set_itmt_core_prio(prio, cpudata->cpu);
++
++ schedule_work(&sched_prefcore_work);
++}
++
+ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+ {
+ int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+@@ -697,6 +825,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+
+ cpudata->cpu = policy->cpu;
+
++ amd_pstate_init_prefcore(cpudata);
++
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+@@ -745,6 +875,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+ /* Initial processor data capability frequencies */
+ cpudata->max_freq = max_freq;
+ cpudata->min_freq = min_freq;
++ cpudata->max_limit_freq = max_freq;
++ cpudata->min_limit_freq = min_freq;
+ cpudata->nominal_freq = nominal_freq;
+ cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+
+@@ -845,16 +977,32 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
+ return sysfs_emit(buf, "%u\n", perf);
+ }
+
++static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
++ char *buf)
++{
++ bool hw_prefcore;
++ struct amd_cpudata *cpudata = policy->driver_data;
++
++ hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
++
++ return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
++}
++
+ static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+ {
+ int i = 0;
+ int offset = 0;
++ struct amd_cpudata *cpudata = policy->driver_data;
++
++ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
++ return sysfs_emit_at(buf, offset, "%s\n",
++ energy_perf_strings[EPP_INDEX_PERFORMANCE]);
+
+ while (energy_perf_strings[i] != NULL)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+
+- sysfs_emit_at(buf, offset, "\n");
++ offset += sysfs_emit_at(buf, offset, "\n");
+
+ return offset;
+ }
+@@ -1037,18 +1185,27 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
+ return ret < 0 ? ret : count;
+ }
+
++static ssize_t prefcore_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
++}
++
+ cpufreq_freq_attr_ro(amd_pstate_max_freq);
+ cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
+
+ cpufreq_freq_attr_ro(amd_pstate_highest_perf);
++cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
+ cpufreq_freq_attr_rw(energy_performance_preference);
+ cpufreq_freq_attr_ro(energy_performance_available_preferences);
+ static DEVICE_ATTR_RW(status);
++static DEVICE_ATTR_RO(prefcore);
+
+ static struct freq_attr *amd_pstate_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
++ &amd_pstate_hw_prefcore,
+ NULL,
+ };
+
+@@ -1056,6 +1213,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
++ &amd_pstate_hw_prefcore,
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ NULL,
+@@ -1063,6 +1221,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
+
+ static struct attribute *pstate_global_attributes[] = {
+ &dev_attr_status.attr,
++ &dev_attr_prefcore.attr,
+ NULL
+ };
+
+@@ -1114,6 +1273,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+ cpudata->cpu = policy->cpu;
+ cpudata->epp_policy = 0;
+
++ amd_pstate_init_prefcore(cpudata);
++
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+@@ -1179,21 +1340,36 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+
+ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+ {
++ struct amd_cpudata *cpudata = policy->driver_data;
++
++ if (cpudata) {
++ kfree(cpudata);
++ policy->driver_data = NULL;
++ }
++
+ pr_debug("CPU %d exiting\n", policy->cpu);
+ return 0;
+ }
+
+-static void amd_pstate_epp_init(unsigned int cpu)
++static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+ {
+- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+- u32 max_perf, min_perf;
++ u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
+ u64 value;
+ s16 epp;
+
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
++ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
++ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
++ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
++ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
++
++ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
++ cpudata->max_limit_perf);
++ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
++ cpudata->max_limit_perf);
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+@@ -1210,9 +1386,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(0);
+
+- if (cpudata->epp_policy == cpudata->policy)
+- goto skip_epp;
+-
+ cpudata->epp_policy = cpudata->policy;
+
+ /* Get BIOS pre-defined epp value */
+@@ -1222,7 +1395,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
+ * This return value can only be negative for shared_memory
+ * systems where EPP register read/write not supported.
+ */
+- goto skip_epp;
++ return;
+ }
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+@@ -1236,8 +1409,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ amd_pstate_set_epp(cpudata, epp);
+-skip_epp:
+- cpufreq_cpu_put(policy);
+ }
+
+ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+@@ -1252,7 +1423,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+
+ cpudata->policy = policy->policy;
+
+- amd_pstate_epp_init(policy->cpu);
++ amd_pstate_epp_update_limit(policy);
+
+ return 0;
+ }
+@@ -1527,7 +1698,17 @@ static int __init amd_pstate_param(char *str)
+
+ return amd_pstate_set_driver(mode_idx);
+ }
++
++static int __init amd_prefcore_param(char *str)
++{
++ if (!strcmp(str, "disable"))
++ amd_pstate_prefcore = false;
++
++ return 0;
++}
++
+ early_param("amd_pstate", amd_pstate_param);
++early_param("amd_prefcore", amd_prefcore_param);
+
+ MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
+ MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index 35fb3a559ea97b..ea8438550b4901 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -481,7 +481,12 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+ {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+- struct private_data *priv = policy->driver_data;
++ struct private_data *priv;
++
++ if (!policy)
++ return 0;
++
++ priv = policy->driver_data;
+
+ cpufreq_cpu_put(policy);
+
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index fe08ca419b3dc3..1ba3943be8a3dd 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -844,10 +844,15 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+ {
+ struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+- struct cppc_cpudata *cpu_data = policy->driver_data;
++ struct cppc_cpudata *cpu_data;
+ u64 delivered_perf;
+ int ret;
+
++ if (!policy)
++ return -ENODEV;
++
++ cpu_data = policy->driver_data;
++
+ cpufreq_cpu_put(policy);
+
+ ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
+@@ -927,10 +932,15 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
+ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
+ {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+- struct cppc_cpudata *cpu_data = policy->driver_data;
++ struct cppc_cpudata *cpu_data;
+ u64 desired_perf;
+ int ret;
+
++ if (!policy)
++ return -ENODEV;
++
++ cpu_data = policy->driver_data;
++
+ cpufreq_cpu_put(policy);
+
+ ret = cppc_get_desired_perf(cpu, &desired_perf);
+diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
+index 8bd6e5e8f121ce..2d83bbc65dd0bd 100644
+--- a/drivers/cpufreq/cpufreq-dt.c
++++ b/drivers/cpufreq/cpufreq-dt.c
+@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
+ if (!priv)
+ return -ENOMEM;
+
+- if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
++ if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_set_cpu(cpu, priv->cpus);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 60ed89000e82dc..df445b44e9ec0b 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -644,14 +644,16 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
+ if (policy->boost_enabled == enable)
+ return count;
+
++ policy->boost_enabled = enable;
++
+ cpus_read_lock();
+ ret = cpufreq_driver->set_boost(policy, enable);
+ cpus_read_unlock();
+
+- if (ret)
++ if (ret) {
++ policy->boost_enabled = !policy->boost_enabled;
+ return ret;
+-
+- policy->boost_enabled = enable;
++ }
+
+ return count;
+ }
+@@ -1419,6 +1421,10 @@ static int cpufreq_online(unsigned int cpu)
+ goto out_free_policy;
+ }
+
++ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
++ if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
++ policy->boost_enabled = true;
++
+ /*
+ * The initialization has succeeded and the policy is online.
+ * If there is a problem with its frequency table, take it
+@@ -1571,7 +1577,8 @@ static int cpufreq_online(unsigned int cpu)
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
+- if (cpufreq_thermal_control_enabled(cpufreq_driver))
++ /* Register cpufreq cooling only for a new policy */
++ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
+ policy->cdev = of_cpufreq_cooling_register(policy);
+
+ pr_debug("initialization complete\n");
+@@ -1655,11 +1662,6 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
+ else
+ policy->last_policy = policy->policy;
+
+- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+- cpufreq_cooling_unregister(policy->cdev);
+- policy->cdev = NULL;
+- }
+-
+ if (has_target())
+ cpufreq_exit_governor(policy);
+
+@@ -1669,10 +1671,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
+ */
+ if (cpufreq_driver->offline) {
+ cpufreq_driver->offline(policy);
+- } else if (cpufreq_driver->exit) {
+- cpufreq_driver->exit(policy);
+- policy->freq_table = NULL;
++ return;
+ }
++
++ if (cpufreq_driver->exit)
++ cpufreq_driver->exit(policy);
++
++ policy->freq_table = NULL;
+ }
+
+ static int cpufreq_offline(unsigned int cpu)
+@@ -1720,8 +1725,17 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+ return;
+ }
+
++ /*
++ * Unregister cpufreq cooling once all the CPUs of the policy are
++ * removed.
++ */
++ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
++ cpufreq_cooling_unregister(policy->cdev);
++ policy->cdev = NULL;
++ }
++
+ /* We did light-weight exit earlier, do full tear down now */
+- if (cpufreq_driver->offline)
++ if (cpufreq_driver->offline && cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ up_write(&policy->rwsem);
+@@ -2756,11 +2770,12 @@ int cpufreq_boost_trigger_state(int state)
+
+ cpus_read_lock();
+ for_each_active_policy(policy) {
++ policy->boost_enabled = state;
+ ret = cpufreq_driver->set_boost(policy, state);
+- if (ret)
++ if (ret) {
++ policy->boost_enabled = !policy->boost_enabled;
+ goto err_reset_state;
+-
+- policy->boost_enabled = state;
++ }
+ }
+ cpus_read_unlock();
+
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index a33df3c66c88c2..40a9ff18da068c 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+ len += sysfs_emit_at(buf, len, " From : To\n");
+ len += sysfs_emit_at(buf, len, " : ");
+ for (i = 0; i < stats->state_num; i++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
+ }
+- if (len >= PAGE_SIZE)
+- return PAGE_SIZE;
++ if (len >= PAGE_SIZE - 1)
++ return PAGE_SIZE - 1;
+
+ len += sysfs_emit_at(buf, len, "\n");
+
+ for (i = 0; i < stats->state_num; i++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
+
+ for (j = 0; j < stats->state_num; j++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+
+ if (pending)
+@@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+
+ len += sysfs_emit_at(buf, len, "%9u ", count);
+ }
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "\n");
+ }
+
+- if (len >= PAGE_SIZE) {
++ if (len >= PAGE_SIZE - 1) {
+ pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
+ }
+diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
+index c4d4643b6ca650..c17dc51a5a022d 100644
+--- a/drivers/cpufreq/freq_table.c
++++ b/drivers/cpufreq/freq_table.c
+@@ -40,7 +40,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
+
+- if (!cpufreq_boost_enabled()
++ if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
+ && (pos->flags & CPUFREQ_BOOST_FREQ))
+ continue;
+
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index 494d044b9e7207..33728c242f66ca 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -327,7 +327,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
+ imx6x_disable_freq_in_opp(dev, 696000000);
+
+ if (of_machine_is_compatible("fsl,imx6ull")) {
+- if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
++ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
+ imx6x_disable_freq_in_opp(dev, 792000000);
+
+ if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index dc50c9fb488dfc..8a4fdf212ce0de 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -356,15 +356,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
+ int ret;
+
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+- if (ret)
+- return;
+-
+ /*
+- * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
+- * In this case we can't use CPPC.highest_perf to enable ITMT.
+- * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
++ * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
++ *
++ * Also, on some systems with overclocking enabled, CPPC.highest_perf is
++ * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
++ * Fall back to MSR_HWP_CAPABILITIES then too.
+ */
+- if (cppc_perf.highest_perf == CPPC_MAX_PERF)
++ if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
+ cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+
+ /*
+@@ -526,6 +525,30 @@ static int intel_pstate_cppc_get_scaling(int cpu)
+ }
+ #endif /* CONFIG_ACPI_CPPC_LIB */
+
++static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
++ unsigned int relation)
++{
++ if (freq == cpu->pstate.turbo_freq)
++ return cpu->pstate.turbo_pstate;
++
++ if (freq == cpu->pstate.max_freq)
++ return cpu->pstate.max_pstate;
++
++ switch (relation) {
++ case CPUFREQ_RELATION_H:
++ return freq / cpu->pstate.scaling;
++ case CPUFREQ_RELATION_C:
++ return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
++ }
++
++ return DIV_ROUND_UP(freq, cpu->pstate.scaling);
++}
++
++static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
++{
++ return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
++}
++
+ /**
+ * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
+ * @cpu: Target CPU.
+@@ -543,6 +566,7 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
+ int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+ int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
+ int scaling = cpu->pstate.scaling;
++ int freq;
+
+ pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+@@ -556,16 +580,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
+ cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
+ perf_ctl_scaling);
+
+- cpu->pstate.max_pstate_physical =
+- DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
+- scaling);
++ freq = perf_ctl_max_phys * perf_ctl_scaling;
++ cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
+
+- cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
++ freq = cpu->pstate.min_pstate * perf_ctl_scaling;
++ cpu->pstate.min_freq = freq;
+ /*
+ * Cast the min P-state value retrieved via pstate_funcs.get_min() to
+ * the effective range of HWP performance levels.
+ */
+- cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
++ cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
+ }
+
+ static inline void update_turbo_state(void)
+@@ -1608,7 +1632,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ }
+
+-static DEFINE_SPINLOCK(hwp_notify_lock);
++static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
+ static cpumask_t hwp_intr_enable_mask;
+
+ void notify_hwp_interrupt(void)
+@@ -1625,7 +1649,7 @@ void notify_hwp_interrupt(void)
+ if (!(value & 0x01))
+ return;
+
+- spin_lock_irqsave(&hwp_notify_lock, flags);
++ raw_spin_lock_irqsave(&hwp_notify_lock, flags);
+
+ if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
+ goto ack_intr;
+@@ -1649,13 +1673,13 @@ void notify_hwp_interrupt(void)
+
+ schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+
+- spin_unlock_irqrestore(&hwp_notify_lock, flags);
++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ return;
+
+ ack_intr:
+ wrmsrl_safe(MSR_HWP_STATUS, 0);
+- spin_unlock_irqrestore(&hwp_notify_lock, flags);
++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ }
+
+ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
+@@ -1668,10 +1692,10 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+
+- spin_lock_irqsave(&hwp_notify_lock, flags);
++ raw_spin_lock_irqsave(&hwp_notify_lock, flags);
+ if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
+ cancel_delayed_work(&cpudata->hwp_notify_work);
+- spin_unlock_irqrestore(&hwp_notify_lock, flags);
++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ }
+
+ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
+@@ -1680,10 +1704,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
+ unsigned long flags;
+
+- spin_lock_irqsave(&hwp_notify_lock, flags);
++ raw_spin_lock_irqsave(&hwp_notify_lock, flags);
+ INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
+ cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+- spin_unlock_irqrestore(&hwp_notify_lock, flags);
++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
+@@ -2528,13 +2552,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ * abstract values to represent performance rather than pure ratios.
+ */
+ if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
+- int scaling = cpu->pstate.scaling;
+ int freq;
+
+ freq = max_policy_perf * perf_ctl_scaling;
+- max_policy_perf = DIV_ROUND_UP(freq, scaling);
++ max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
+ freq = min_policy_perf * perf_ctl_scaling;
+- min_policy_perf = DIV_ROUND_UP(freq, scaling);
++ min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
+ }
+
+ pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
+@@ -2908,18 +2931,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+- switch (relation) {
+- case CPUFREQ_RELATION_L:
+- target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
+- break;
+- case CPUFREQ_RELATION_H:
+- target_pstate = freqs.new / cpu->pstate.scaling;
+- break;
+- default:
+- target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
+- break;
+- }
+-
++ target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
+
+ freqs.new = target_pstate * cpu->pstate.scaling;
+@@ -2937,7 +2949,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
+
+ update_turbo_state();
+
+- target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
++ target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
+
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
+
+@@ -2974,6 +2986,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
+ if (min_pstate < cpu->min_perf_ratio)
+ min_pstate = cpu->min_perf_ratio;
+
++ if (min_pstate > cpu->max_perf_ratio)
++ min_pstate = cpu->max_perf_ratio;
++
+ max_pstate = min(cap_pstate, cpu->max_perf_ratio);
+ if (max_pstate < min_pstate)
+ max_pstate = min_pstate;
+@@ -3121,10 +3136,10 @@ static void intel_pstate_driver_cleanup(void)
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_clear_update_util_hook(cpu);
+
+- spin_lock(&hwp_notify_lock);
++ raw_spin_lock(&hwp_notify_lock);
+ kfree(all_cpu_data[cpu]);
+ WRITE_ONCE(all_cpu_data[cpu], NULL);
+- spin_unlock(&hwp_notify_lock);
++ raw_spin_unlock(&hwp_notify_lock);
+ }
+ }
+ cpus_read_unlock();
+diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
+index d46afb3c009230..8d097dcddda47d 100644
+--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
++++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
+@@ -13,6 +13,7 @@
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
+ #include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
+
+ #define LUT_MAX_ENTRIES 32U
+@@ -300,7 +301,23 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
+ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
+ {
+ const void *data;
+- int ret;
++ int ret, cpu;
++ struct device *cpu_dev;
++ struct regulator *cpu_reg;
++
++ /* Make sure that all CPU supplies are available before proceeding. */
++ for_each_possible_cpu(cpu) {
++ cpu_dev = get_cpu_device(cpu);
++ if (!cpu_dev)
++ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
++ "Failed to get cpu%d device\n", cpu);
++
++ cpu_reg = devm_regulator_get(cpu_dev, "cpu");
++ if (IS_ERR(cpu_reg))
++ return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg),
++ "CPU%d regulator get failed\n", cpu);
++ }
++
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
+index 84d7033e5efe83..ef51dfb39baa92 100644
+--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
+@@ -40,10 +40,14 @@ struct qcom_cpufreq_match_data {
+ const char **genpd_names;
+ };
+
++struct qcom_cpufreq_drv_cpu {
++ int opp_token;
++};
++
+ struct qcom_cpufreq_drv {
+- int *opp_tokens;
+ u32 versions;
+ const struct qcom_cpufreq_match_data *data;
++ struct qcom_cpufreq_drv_cpu cpus[];
+ };
+
+ static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
+@@ -243,42 +247,39 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
+ return -ENOENT;
+ }
+
+- drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+- if (!drv)
++ drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
++ GFP_KERNEL);
++ if (!drv) {
++ of_node_put(np);
+ return -ENOMEM;
++ }
+
+ match = pdev->dev.platform_data;
+ drv->data = match->data;
+ if (!drv->data) {
+- ret = -ENODEV;
+- goto free_drv;
++ of_node_put(np);
++ return -ENODEV;
+ }
+
+ if (drv->data->get_version) {
+ speedbin_nvmem = of_nvmem_cell_get(np, NULL);
+ if (IS_ERR(speedbin_nvmem)) {
+- ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+- "Could not get nvmem cell\n");
+- goto free_drv;
++ of_node_put(np);
++ return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
++ "Could not get nvmem cell\n");
+ }
+
+ ret = drv->data->get_version(cpu_dev,
+ speedbin_nvmem, &pvs_name, drv);
+ if (ret) {
++ of_node_put(np);
+ nvmem_cell_put(speedbin_nvmem);
+- goto free_drv;
++ return ret;
+ }
+ nvmem_cell_put(speedbin_nvmem);
+ }
+ of_node_put(np);
+
+- drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
+- GFP_KERNEL);
+- if (!drv->opp_tokens) {
+- ret = -ENOMEM;
+- goto free_drv;
+- }
+-
+ for_each_possible_cpu(cpu) {
+ struct dev_pm_opp_config config = {
+ .supported_hw = NULL,
+@@ -304,9 +305,9 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
+ }
+
+ if (config.supported_hw || config.genpd_names) {
+- drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
+- if (drv->opp_tokens[cpu] < 0) {
+- ret = drv->opp_tokens[cpu];
++ drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
++ if (drv->cpus[cpu].opp_token < 0) {
++ ret = drv->cpus[cpu].opp_token;
+ dev_err(cpu_dev, "Failed to set OPP config\n");
+ goto free_opp;
+ }
+@@ -325,11 +326,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
+
+ free_opp:
+ for_each_possible_cpu(cpu)
+- dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+- kfree(drv->opp_tokens);
+-free_drv:
+- kfree(drv);
+-
++ dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ return ret;
+ }
+
+@@ -341,10 +338,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
+ platform_device_unregister(cpufreq_dt_pdev);
+
+ for_each_possible_cpu(cpu)
+- dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+-
+- kfree(drv->opp_tokens);
+- kfree(drv);
++ dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ }
+
+ static struct platform_driver qcom_cpufreq_driver = {
+diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
+index f34e6382a4c500..079940c69ee0ba 100644
+--- a/drivers/cpufreq/scmi-cpufreq.c
++++ b/drivers/cpufreq/scmi-cpufreq.c
+@@ -62,9 +62,9 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+ {
+ struct scmi_data *priv = policy->driver_data;
++ unsigned long freq = target_freq;
+
+- if (!perf_ops->freq_set(ph, priv->domain_id,
+- target_freq * 1000, true))
++ if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true))
+ return target_freq;
+
+ return 0;
+@@ -310,8 +310,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
+
+ #ifdef CONFIG_COMMON_CLK
+ /* dummy clock provider as needed by OPP if clocks property is used */
+- if (of_property_present(dev->of_node, "#clock-cells"))
+- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
++ if (of_property_present(dev->of_node, "#clock-cells")) {
++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
++ if (ret)
++ return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
++ }
+ #endif
+
+ ret = cpufreq_register_driver(&scmi_cpufreq_driver);
+diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
+index 88ef5e57ccd05c..386aed3637b4ef 100644
+--- a/drivers/cpufreq/tegra194-cpufreq.c
++++ b/drivers/cpufreq/tegra194-cpufreq.c
+@@ -450,6 +450,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
+ if (IS_ERR(opp))
+ continue;
+
++ dev_pm_opp_put(opp);
++
+ ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
+index 3c37d78996607f..cb5d1c8fefeb44 100644
+--- a/drivers/cpufreq/ti-cpufreq.c
++++ b/drivers/cpufreq/ti-cpufreq.c
+@@ -61,6 +61,9 @@ struct ti_cpufreq_soc_data {
+ unsigned long efuse_shift;
+ unsigned long rev_offset;
+ bool multi_regulator;
++/* Backward compatibility hack: Might have missing syscon */
++#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1
++ u8 quirks;
+ };
+
+ struct ti_cpufreq_data {
+@@ -182,6 +185,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
+ .efuse_mask = BIT(3),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
++ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
+ };
+
+ /*
+@@ -209,6 +213,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = {
+ .efuse_mask = BIT(9),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = true,
++ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
+ };
+
+ /*
+@@ -223,6 +228,7 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
+ .efuse_mask = 0,
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
++ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
+ };
+
+ static struct ti_cpufreq_soc_data am625_soc_data = {
+@@ -250,7 +256,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
+
+ ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
+ &efuse);
+- if (ret == -EIO) {
++ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->efuse_offset, 4);
+@@ -291,7 +297,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
+
+ ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
+ &revision);
+- if (ret == -EIO) {
++ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->rev_offset, 4);
+@@ -418,7 +424,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
+
+ ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
+ if (ret < 0) {
+- dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
++ dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n");
+ goto fail_put_node;
+ }
+
+diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
+index e66df22f96955f..d8515d5c0853dc 100644
+--- a/drivers/cpuidle/cpuidle-haltpoll.c
++++ b/drivers/cpuidle/cpuidle-haltpoll.c
+@@ -25,13 +25,12 @@ MODULE_PARM_DESC(force, "Load unconditionally");
+ static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
+ static enum cpuhp_state haltpoll_hp_state;
+
+-static int default_enter_idle(struct cpuidle_device *dev,
+- struct cpuidle_driver *drv, int index)
++static __cpuidle int default_enter_idle(struct cpuidle_device *dev,
++ struct cpuidle_driver *drv, int index)
+ {
+- if (current_clr_polling_and_test()) {
+- local_irq_enable();
++ if (current_clr_polling_and_test())
+ return index;
+- }
++
+ arch_cpu_idle();
+ return index;
+ }
+diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
+index e8094fc92491eb..c0fe92409175a4 100644
+--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
+@@ -8,6 +8,7 @@
+
+ #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+
++#include <linux/cleanup.h>
+ #include <linux/cpuhotplug.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpumask.h>
+@@ -267,19 +268,16 @@ static int sbi_cpuidle_dt_init_states(struct device *dev,
+ {
+ struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
+ struct device_node *state_node;
+- struct device_node *cpu_node;
+ u32 *states;
+ int i, ret;
+
+- cpu_node = of_cpu_device_node_get(cpu);
++ struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
+
+ states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
+- if (!states) {
+- ret = -ENOMEM;
+- goto fail;
+- }
++ if (!states)
++ return -ENOMEM;
+
+ /* Parse SBI specific details from state DT nodes */
+ for (i = 1; i < state_count; i++) {
+@@ -295,10 +293,8 @@ static int sbi_cpuidle_dt_init_states(struct device *dev,
+
+ pr_debug("sbi-state %#x index %d\n", states[i], i);
+ }
+- if (i != state_count) {
+- ret = -ENODEV;
+- goto fail;
+- }
++ if (i != state_count)
++ return -ENODEV;
+
+ /* Initialize optional data, used for the hierarchical topology. */
+ ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
+@@ -308,10 +304,7 @@ static int sbi_cpuidle_dt_init_states(struct device *dev,
+ /* Store states in the per-cpu struct. */
+ data->states = states;
+
+-fail:
+- of_node_put(cpu_node);
+-
+- return ret;
++ return 0;
+ }
+
+ static void sbi_cpuidle_deinit_cpu(int cpu)
+diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
+index d9cda7f6ccb98d..cf5873cc45dc8c 100644
+--- a/drivers/cpuidle/driver.c
++++ b/drivers/cpuidle/driver.c
+@@ -16,6 +16,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/tick.h>
+ #include <linux/cpu.h>
++#include <linux/math64.h>
+
+ #include "cpuidle.h"
+
+@@ -187,7 +188,7 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
+ s->target_residency = div_u64(s->target_residency_ns, NSEC_PER_USEC);
+
+ if (s->exit_latency > 0)
+- s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
++ s->exit_latency_ns = mul_u32_u32(s->exit_latency, NSEC_PER_USEC);
+ else if (s->exit_latency_ns < 0)
+ s->exit_latency_ns = 0;
+ else
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+index 8d4c42863a621e..d2cf9619018b1a 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+@@ -299,22 +299,6 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
+ return err;
+ }
+
+-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+-{
+- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+- struct sun8i_ce_dev *ce = op->ce;
+- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+- int flow, err;
+-
+- flow = rctx->flow;
+- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+- local_bh_disable();
+- crypto_finalize_skcipher_request(engine, breq, err);
+- local_bh_enable();
+-}
+-
+ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
+ void *async_req)
+ {
+@@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
+ dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
+ }
+
++static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
++{
++ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
++ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
++ struct sun8i_ce_dev *ce = op->ce;
++ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
++ int flow, err;
++
++ flow = rctx->flow;
++ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
++ sun8i_ce_cipher_unprepare(engine, areq);
++ local_bh_disable();
++ crypto_finalize_skcipher_request(engine, breq, err);
++ local_bh_enable();
++}
++
+ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
+ {
+ int err = sun8i_ce_cipher_prepare(engine, areq);
+@@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
+ return err;
+
+ sun8i_ce_cipher_run(engine, areq);
+- sun8i_ce_cipher_unprepare(engine, areq);
+ return 0;
+ }
+
+diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
+index 07989bb8c220a4..3fdc64b5a65e7e 100644
+--- a/drivers/crypto/bcm/spu2.c
++++ b/drivers/crypto/bcm/spu2.c
+@@ -495,7 +495,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
+ if (hash_iv_len) {
+ packet_log(" Hash IV Length %u bytes\n", hash_iv_len);
+ packet_dump(" hash IV: ", ptr, hash_iv_len);
+- ptr += ciph_key_len;
++ ptr += hash_iv_len;
+ }
+
+ if (ciph_iv_len) {
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index eba2d750c3b074..066f08a3a040d8 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -575,7 +575,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
+ return -EINVAL;
+
+- ctx->cdata.key_virt = key;
++ memcpy(ctx->key, key, keylen);
++ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+index 9156bbe038b7b0..a148ff1f0872c4 100644
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -641,7 +641,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ if (keylen != CHACHA_KEY_SIZE + saltlen)
+ return -EINVAL;
+
+- ctx->cdata.key_virt = key;
++ memcpy(ctx->key, key, keylen);
++ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 290c8500c247f9..65785dc5b73b2b 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -708,6 +708,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
+ GFP_KERNEL : GFP_ATOMIC;
+ struct ahash_edesc *edesc;
+
++ sg_num = pad_sg_nents(sg_num);
+ edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
+ if (!edesc)
+ return NULL;
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index aa4e1a5006919d..cb8e99936abb72 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
+
+ wa->dma.address = dma_map_single(wa->dev, wa->address, len,
+ dir);
+- if (dma_mapping_error(wa->dev, wa->dma.address))
++ if (dma_mapping_error(wa->dev, wa->dma.address)) {
++ kfree(wa->address);
++ wa->address = NULL;
+ return -ENOMEM;
++ }
+
+ wa->dma.length = len;
+ }
+diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
+index 839ea14b9a853f..6f33149ef80df0 100644
+--- a/drivers/crypto/ccp/dbc.c
++++ b/drivers/crypto/ccp/dbc.c
+@@ -205,7 +205,7 @@ int dbc_dev_init(struct psp_device *psp)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
+- dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
++ dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!dbc_dev->mbox) {
+ ret = -ENOMEM;
+ goto cleanup_dev;
+diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
+index 94367bc49e35b8..1b8ed33897332e 100644
+--- a/drivers/crypto/ccp/platform-access.c
++++ b/drivers/crypto/ccp/platform-access.c
+@@ -118,9 +118,16 @@ int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
+ goto unlock;
+ }
+
+- /* Store the status in request header for caller to investigate */
++ /*
++ * Read status from PSP. If status is non-zero, it indicates an error
++ * occurred during "processing" of the command.
++ * If status is zero, it indicates the command was "processed"
++ * successfully, but the result of the command is in the payload.
++ * Return both cases to the caller as -EIO to investigate.
++ */
+ cmd_reg = ioread32(cmd);
+- req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
++ if (FIELD_GET(PSP_CMDRESP_STS, cmd_reg))
++ req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
+ if (req->header.status) {
+ ret = -EIO;
+ goto unlock;
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index f97166fba9d930..07e6f782b62252 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -520,10 +520,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init);
+
+ static int __sev_platform_shutdown_locked(int *error)
+ {
+- struct sev_device *sev = psp_master->sev_data;
++ struct psp_device *psp = psp_master;
++ struct sev_device *sev;
+ int ret;
+
+- if (!sev || sev->state == SEV_STATE_UNINIT)
++ if (!psp || !psp->sev_data)
++ return 0;
++
++ sev = psp->sev_data;
++
++ if (sev->state == SEV_STATE_UNINIT)
+ return 0;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+@@ -1361,6 +1367,8 @@ void sev_pci_init(void)
+ return;
+
+ err:
++ sev_dev_destroy(psp_master);
++
+ psp_master->sev_data = NULL;
+ }
+
+diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
+index 7d79a8744f9a6a..c43ad7e1acf7ea 100644
+--- a/drivers/crypto/ccp/sp-platform.c
++++ b/drivers/crypto/ccp/sp-platform.c
+@@ -39,44 +39,38 @@ static const struct sp_dev_vdata dev_vdata[] = {
+ },
+ };
+
+-#ifdef CONFIG_ACPI
+ static const struct acpi_device_id sp_acpi_match[] = {
+ { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ { },
+ };
+ MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+-#endif
+
+-#ifdef CONFIG_OF
+ static const struct of_device_id sp_of_match[] = {
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&dev_vdata[0] },
+ { },
+ };
+ MODULE_DEVICE_TABLE(of, sp_of_match);
+-#endif
+
+ static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
+ {
+-#ifdef CONFIG_OF
+ const struct of_device_id *match;
+
+ match = of_match_node(sp_of_match, pdev->dev.of_node);
+ if (match && match->data)
+ return (struct sp_dev_vdata *)match->data;
+-#endif
++
+ return NULL;
+ }
+
+ static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+ {
+-#ifdef CONFIG_ACPI
+ const struct acpi_device_id *match;
+
+ match = acpi_match_device(sp_acpi_match, &pdev->dev);
+ if (match && match->driver_data)
+ return (struct sp_dev_vdata *)match->driver_data;
+-#endif
++
+ return NULL;
+ }
+
+@@ -214,12 +208,8 @@ static int sp_platform_resume(struct platform_device *pdev)
+ static struct platform_driver sp_platform_driver = {
+ .driver = {
+ .name = "ccp",
+-#ifdef CONFIG_ACPI
+ .acpi_match_table = sp_acpi_match,
+-#endif
+-#ifdef CONFIG_OF
+ .of_match_table = sp_of_match,
+-#endif
+ },
+ .probe = sp_platform_probe,
+ .remove = sp_platform_remove,
+diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
+index 2cc1591949db7e..bd205f1f2279e4 100644
+--- a/drivers/crypto/hisilicon/debugfs.c
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -794,8 +794,14 @@ static void dfx_regs_uninit(struct hisi_qm *qm,
+ {
+ int i;
+
++ if (!dregs)
++ return;
++
+ /* Setting the pointer is NULL to prevent double free */
+ for (i = 0; i < reg_len; i++) {
++ if (!dregs[i].regs)
++ continue;
++
+ kfree(dregs[i].regs);
+ dregs[i].regs = NULL;
+ }
+@@ -845,14 +851,21 @@ static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
+ static int qm_diff_regs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, u32 reg_len)
+ {
++ int ret;
++
+ qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+- if (IS_ERR(qm->debug.qm_diff_regs))
+- return PTR_ERR(qm->debug.qm_diff_regs);
++ if (IS_ERR(qm->debug.qm_diff_regs)) {
++ ret = PTR_ERR(qm->debug.qm_diff_regs);
++ qm->debug.qm_diff_regs = NULL;
++ return ret;
++ }
+
+ qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+ if (IS_ERR(qm->debug.acc_diff_regs)) {
+ dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+- return PTR_ERR(qm->debug.acc_diff_regs);
++ ret = PTR_ERR(qm->debug.acc_diff_regs);
++ qm->debug.acc_diff_regs = NULL;
++ return ret;
+ }
+
+ return 0;
+@@ -893,7 +906,9 @@ static int qm_last_regs_init(struct hisi_qm *qm)
+ static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
+ {
+ dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
++ qm->debug.acc_diff_regs = NULL;
+ dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++ qm->debug.qm_diff_regs = NULL;
+ }
+
+ /**
+diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
+index 39297ce70f441e..3463f5ee83c0df 100644
+--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
++++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
+@@ -13,9 +13,7 @@
+ #include <linux/uacce.h>
+ #include "hpre.h"
+
+-#define HPRE_QM_ABNML_INT_MASK 0x100004
+ #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
+-#define HPRE_COMM_CNT_CLR_CE 0x0
+ #define HPRE_CTRL_CNT_CLR_CE 0x301000
+ #define HPRE_FSM_MAX_CNT 0x301008
+ #define HPRE_VFG_AXQOS 0x30100c
+@@ -42,7 +40,6 @@
+ #define HPRE_HAC_INT_SET 0x301500
+ #define HPRE_RNG_TIMEOUT_NUM 0x301A34
+ #define HPRE_CORE_INT_ENABLE 0
+-#define HPRE_CORE_INT_DISABLE GENMASK(21, 0)
+ #define HPRE_RDCHN_INI_ST 0x301a00
+ #define HPRE_CLSTR_BASE 0x302000
+ #define HPRE_CORE_EN_OFFSET 0x04
+@@ -66,7 +63,6 @@
+ #define HPRE_CLSTR_ADDR_INTRVL 0x1000
+ #define HPRE_CLUSTER_INQURY 0x100
+ #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
+-#define HPRE_TIMEOUT_ABNML_BIT 6
+ #define HPRE_PASID_EN_BIT 9
+ #define HPRE_REG_RD_INTVRL_US 10
+ #define HPRE_REG_RD_TMOUT_US 1000
+@@ -117,8 +113,6 @@
+ #define HPRE_DFX_COMMON2_LEN 0xE
+ #define HPRE_DFX_CORE_LEN 0x43
+
+-#define HPRE_DEV_ALG_MAX_LEN 256
+-
+ static const char hpre_name[] = "hisi_hpre";
+ static struct dentry *hpre_debugfs_root;
+ static const struct pci_device_id hpre_dev_ids[] = {
+@@ -134,12 +128,7 @@ struct hpre_hw_error {
+ const char *msg;
+ };
+
+-struct hpre_dev_alg {
+- u32 alg_msk;
+- const char *alg;
+-};
+-
+-static const struct hpre_dev_alg hpre_dev_algs[] = {
++static const struct qm_dev_alg hpre_dev_algs[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = "rsa\n"
+@@ -209,9 +198,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
+ {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
+ {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
+ {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+- {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
+- {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
+- {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
++ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E},
++ {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
++ {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
+ {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
+@@ -232,6 +221,20 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
+ {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
+ };
+
++enum hpre_pre_store_cap_idx {
++ HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
++ HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
++ HPRE_DRV_ALG_BITMAP_CAP_IDX,
++ HPRE_DEV_ALG_BITMAP_CAP_IDX,
++};
++
++static const u32 hpre_pre_store_caps[] = {
++ HPRE_CLUSTER_NUM_CAP,
++ HPRE_CORE_ENABLE_BITMAP_CAP,
++ HPRE_DRV_ALG_BITMAP_CAP,
++ HPRE_DEV_ALG_BITMAP_CAP,
++};
++
+ static const struct hpre_hw_error hpre_hw_errors[] = {
+ {
+ .int_msk = BIT(0),
+@@ -350,46 +353,19 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
+ },
+ };
+
++static const struct hisi_qm_err_ini hpre_err_ini;
++
+ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
+ {
+ u32 cap_val;
+
+- cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
++ cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
+ if (alg & cap_val)
+ return true;
+
+ return false;
+ }
+
+-static int hpre_set_qm_algs(struct hisi_qm *qm)
+-{
+- struct device *dev = &qm->pdev->dev;
+- char *algs, *ptr;
+- u32 alg_msk;
+- int i;
+-
+- if (!qm->use_sva)
+- return 0;
+-
+- algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+- if (!algs)
+- return -ENOMEM;
+-
+- alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
+-
+- for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
+- if (alg_msk & hpre_dev_algs[i].alg_msk)
+- strcat(algs, hpre_dev_algs[i].alg);
+-
+- ptr = strrchr(algs, '\n');
+- if (ptr)
+- *ptr = '\0';
+-
+- qm->uacce->algs = algs;
+-
+- return 0;
+-}
+-
+ static int hpre_diff_regs_show(struct seq_file *s, void *unused)
+ {
+ struct hisi_qm *qm = s->private;
+@@ -433,8 +409,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ }
+
+@@ -456,16 +435,6 @@ static u32 vfs_num;
+ module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
+-static inline int hpre_cluster_num(struct hisi_qm *qm)
+-{
+- return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
+-}
+-
+-static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
+-{
+- return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
+-}
+-
+ struct hisi_qp *hpre_create_qp(u8 type)
+ {
+ int node = cpu_to_node(smp_processor_id());
+@@ -532,13 +501,15 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
+
+ static int hpre_set_cluster(struct hisi_qm *qm)
+ {
+- u32 cluster_core_mask = hpre_cluster_core_mask(qm);
+- u8 clusters_num = hpre_cluster_num(qm);
+ struct device *dev = &qm->pdev->dev;
+ unsigned long offset;
++ u32 cluster_core_mask;
++ u8 clusters_num;
+ u32 val = 0;
+ int ret, i;
+
++ cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
++ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ for (i = 0; i < clusters_num; i++) {
+ offset = i * HPRE_CLSTR_ADDR_INTRVL;
+
+@@ -680,11 +651,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
+ writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
+
+- /* HPRE need more time, we close this interrupt */
+- val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
+- val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
+- writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
+-
+ if (qm->ver >= QM_HW_V3)
+ writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
+ qm->io_base + HPRE_TYPES_ENB);
+@@ -693,9 +659,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
+
+ writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
+ writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
+- writel(0x0, qm->io_base + HPRE_INT_MASK);
+ writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
+- writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
+ writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
+
+ writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
+@@ -733,11 +697,12 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
+
+ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
+ {
+- u8 clusters_num = hpre_cluster_num(qm);
+ unsigned long offset;
++ u8 clusters_num;
+ int i;
+
+ /* clear clusterX/cluster_ctrl */
++ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ for (i = 0; i < clusters_num; i++) {
+ offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
+ writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
+@@ -784,7 +749,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm)
+
+ static void hpre_hw_error_enable(struct hisi_qm *qm)
+ {
+- u32 ce, nfe;
++ u32 ce, nfe, err_en;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+@@ -801,7 +766,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm)
+ hpre_master_ooo_ctrl(qm, true);
+
+ /* enable hpre hw error interrupts */
+- writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
++ err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
++ writel(~err_en, qm->io_base + HPRE_INT_MASK);
+ }
+
+ static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
+@@ -1024,16 +990,17 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
+
+ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
+ {
+- u8 clusters_num = hpre_cluster_num(qm);
+ struct device *dev = &qm->pdev->dev;
+ char buf[HPRE_DBGFS_VAL_MAX_LEN];
+ struct debugfs_regset32 *regset;
+ struct dentry *tmp_d;
++ u8 clusters_num;
+ int i, ret;
+
++ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ for (i = 0; i < clusters_num; i++) {
+ ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
+- if (ret < 0)
++ if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
+ return -EINVAL;
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
+
+@@ -1135,8 +1102,37 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
+ debugfs_remove_recursive(qm->debug.debug_root);
+ }
+
++static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
++{
++ struct hisi_qm_cap_record *hpre_cap;
++ struct device *dev = &qm->pdev->dev;
++ size_t i, size;
++
++ size = ARRAY_SIZE(hpre_pre_store_caps);
++ hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
++ if (!hpre_cap)
++ return -ENOMEM;
++
++ for (i = 0; i < size; i++) {
++ hpre_cap[i].type = hpre_pre_store_caps[i];
++ hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
++ hpre_pre_store_caps[i], qm->cap_ver);
++ }
++
++ if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
++ dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
++ hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
++ return -EINVAL;
++ }
++
++ qm->cap_tables.dev_cap_table = hpre_cap;
++
++ return 0;
++}
++
+ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ {
++ u64 alg_msk;
+ int ret;
+
+ if (pdev->revision == QM_HW_V1) {
+@@ -1157,6 +1153,9 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &hpre_devices;
++ qm->err_ini = &hpre_err_ini;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ }
+
+ ret = hisi_qm_init(qm);
+@@ -1165,7 +1164,16 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ return ret;
+ }
+
+- ret = hpre_set_qm_algs(qm);
++ /* Fetch and save the value of capability registers */
++ ret = hpre_pre_store_cap_reg(qm);
++ if (ret) {
++ pci_err(pdev, "Failed to pre-store capability registers!\n");
++ hisi_qm_uninit(qm);
++ return ret;
++ }
++
++ alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
++ ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
+ if (ret) {
+ pci_err(pdev, "Failed to set hpre algs!\n");
+ hisi_qm_uninit(qm);
+@@ -1178,11 +1186,12 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
+ {
+ int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
+ int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
+- u8 clusters_num = hpre_cluster_num(qm);
+ struct qm_debug *debug = &qm->debug;
+ void __iomem *io_base;
++ u8 clusters_num;
+ int i, j, idx;
+
++ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
+ com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ if (!debug->last_words)
+@@ -1219,10 +1228,10 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
+ {
+ int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
+ int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
+- u8 clusters_num = hpre_cluster_num(qm);
+ struct qm_debug *debug = &qm->debug;
+ struct pci_dev *pdev = qm->pdev;
+ void __iomem *io_base;
++ u8 clusters_num;
+ int i, j, idx;
+ u32 val;
+
+@@ -1237,6 +1246,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
+ hpre_com_dfx_regs[i].name, debug->last_words[i], val);
+ }
+
++ clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ for (i = 0; i < clusters_num; i++) {
+ io_base = qm->io_base + hpre_cluster_offsets[i];
+ for (j = 0; j < cluster_dfx_regs_num; j++) {
+@@ -1333,8 +1343,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
+
+ hpre_open_sva_prefetch(qm);
+
+- qm->err_ini = &hpre_err_ini;
+- qm->err_ini->err_info_init(qm);
+ hisi_qm_dev_err_init(qm);
+ ret = hpre_show_last_regs_init(qm);
+ if (ret)
+@@ -1363,6 +1371,18 @@ static int hpre_probe_init(struct hpre *hpre)
+ return 0;
+ }
+
++static void hpre_probe_uninit(struct hisi_qm *qm)
++{
++ if (qm->fun_type == QM_HW_VF)
++ return;
++
++ hpre_cnt_regs_clear(qm);
++ qm->debug.curr_qm_qp_num = 0;
++ hpre_show_last_regs_uninit(qm);
++ hpre_close_sva_prefetch(qm);
++ hisi_qm_dev_err_uninit(qm);
++}
++
+ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ struct hisi_qm *qm;
+@@ -1388,7 +1408,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+- goto err_with_err_init;
++ goto err_with_probe_init;
+
+ ret = hpre_debugfs_init(qm);
+ if (ret)
+@@ -1425,9 +1445,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ hpre_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+
+-err_with_err_init:
+- hpre_show_last_regs_uninit(qm);
+- hisi_qm_dev_err_uninit(qm);
++err_with_probe_init:
++ hpre_probe_uninit(qm);
+
+ err_with_qm_init:
+ hisi_qm_uninit(qm);
+@@ -1448,13 +1467,7 @@ static void hpre_remove(struct pci_dev *pdev)
+ hpre_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+
+- if (qm->fun_type == QM_HW_PF) {
+- hpre_cnt_regs_clear(qm);
+- qm->debug.curr_qm_qp_num = 0;
+- hpre_show_last_regs_uninit(qm);
+- hisi_qm_dev_err_uninit(qm);
+- }
+-
++ hpre_probe_uninit(qm);
+ hisi_qm_uninit(qm);
+ }
+
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index a99fd589445cef..1b00edbbfe26a9 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -206,8 +206,6 @@
+ #define WAIT_PERIOD 20
+ #define REMOVE_WAIT_DELAY 10
+
+-#define QM_DRIVER_REMOVING 0
+-#define QM_RST_SCHED 1
+ #define QM_QOS_PARAM_NUM 2
+ #define QM_QOS_MAX_VAL 1000
+ #define QM_QOS_RATE 100
+@@ -230,6 +228,8 @@
+ #define QM_QOS_MAX_CIR_U 6
+ #define QM_AUTOSUSPEND_DELAY 3000
+
++#define QM_DEV_ALG_MAX_LEN 256
++
+ #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
+ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
+ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
+@@ -308,6 +308,13 @@ enum qm_basic_type {
+ QM_VF_IRQ_NUM_CAP,
+ };
+
++enum qm_pre_store_cap_idx {
++ QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
++ QM_AEQ_IRQ_TYPE_CAP_IDX,
++ QM_ABN_IRQ_TYPE_CAP_IDX,
++ QM_PF2VF_IRQ_TYPE_CAP_IDX,
++};
++
+ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
+ {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
+ {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
+@@ -337,6 +344,13 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
+ {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
+ };
+
++static const u32 qm_pre_store_caps[] = {
++ QM_EQ_IRQ_TYPE_CAP,
++ QM_AEQ_IRQ_TYPE_CAP,
++ QM_ABN_IRQ_TYPE_CAP,
++ QM_PF2VF_IRQ_TYPE_CAP,
++};
++
+ struct qm_mailbox {
+ __le16 w0;
+ __le16 queue_num;
+@@ -441,6 +455,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
+ };
+
+ static void qm_irqs_unregister(struct hisi_qm *qm);
++static int qm_reset_device(struct hisi_qm *qm);
+
+ static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
+ {
+@@ -789,6 +804,40 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+ *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
+ }
+
++int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
++ u32 dev_algs_size)
++{
++ struct device *dev = &qm->pdev->dev;
++ char *algs, *ptr;
++ int i;
++
++ if (!qm->uacce)
++ return 0;
++
++ if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) {
++ dev_err(dev, "algs size %u is equal or larger than %d.\n",
++ dev_algs_size, QM_DEV_ALG_MAX_LEN);
++ return -EINVAL;
++ }
++
++ algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
++ if (!algs)
++ return -ENOMEM;
++
++ for (i = 0; i < dev_algs_size; i++)
++ if (alg_msk & dev_algs[i].alg_msk)
++ strcat(algs, dev_algs[i].alg);
++
++ ptr = strrchr(algs, '\n');
++ if (ptr) {
++ *ptr = '\0';
++ qm->uacce->algs = algs;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(hisi_qm_set_algs);
++
+ static u32 qm_get_irq_num(struct hisi_qm *qm)
+ {
+ if (qm->fun_type == QM_HW_PF)
+@@ -849,53 +898,23 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
+ qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
+ qp->qp_status.cq_head, 0);
+ atomic_dec(&qp->qp_status.used);
++
++ cond_resched();
+ }
+
+ /* set c_flag */
+ qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
+ }
+
+-static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
+-{
+- struct hisi_qm *qm = poll_data->qm;
+- struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
+- u16 eq_depth = qm->eq_depth;
+- int eqe_num = 0;
+- u16 cqn;
+-
+- while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
+- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+- poll_data->qp_finish_id[eqe_num] = cqn;
+- eqe_num++;
+-
+- if (qm->status.eq_head == eq_depth - 1) {
+- qm->status.eqc_phase = !qm->status.eqc_phase;
+- eqe = qm->eqe;
+- qm->status.eq_head = 0;
+- } else {
+- eqe++;
+- qm->status.eq_head++;
+- }
+-
+- if (eqe_num == (eq_depth >> 1) - 1)
+- break;
+- }
+-
+- qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+-
+- return eqe_num;
+-}
+-
+ static void qm_work_process(struct work_struct *work)
+ {
+ struct hisi_qm_poll_data *poll_data =
+ container_of(work, struct hisi_qm_poll_data, work);
+ struct hisi_qm *qm = poll_data->qm;
++ u16 eqe_num = poll_data->eqe_num;
+ struct hisi_qp *qp;
+- int eqe_num, i;
++ int i;
+
+- /* Get qp id of completed tasks and re-enable the interrupt. */
+- eqe_num = qm_get_complete_eqe_num(poll_data);
+ for (i = eqe_num - 1; i >= 0; i--) {
+ qp = &qm->qp_array[poll_data->qp_finish_id[i]];
+ if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
+@@ -911,39 +930,55 @@ static void qm_work_process(struct work_struct *work)
+ }
+ }
+
+-static bool do_qm_eq_irq(struct hisi_qm *qm)
++static void qm_get_complete_eqe_num(struct hisi_qm *qm)
+ {
+ struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
+- struct hisi_qm_poll_data *poll_data;
+- u16 cqn;
++ struct hisi_qm_poll_data *poll_data = NULL;
++ u16 eq_depth = qm->eq_depth;
++ u16 cqn, eqe_num = 0;
+
+- if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
+- return false;
++ if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
++ atomic64_inc(&qm->debug.dfx.err_irq_cnt);
++ qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
++ return;
++ }
+
+- if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
++ cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
++ if (unlikely(cqn >= qm->qp_num))
++ return;
++ poll_data = &qm->poll_data[cqn];
++
++ while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
+ cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+- poll_data = &qm->poll_data[cqn];
+- queue_work(qm->wq, &poll_data->work);
++ poll_data->qp_finish_id[eqe_num] = cqn;
++ eqe_num++;
++
++ if (qm->status.eq_head == eq_depth - 1) {
++ qm->status.eqc_phase = !qm->status.eqc_phase;
++ eqe = qm->eqe;
++ qm->status.eq_head = 0;
++ } else {
++ eqe++;
++ qm->status.eq_head++;
++ }
+
+- return true;
++ if (eqe_num == (eq_depth >> 1) - 1)
++ break;
+ }
+
+- return false;
++ poll_data->eqe_num = eqe_num;
++ queue_work(qm->wq, &poll_data->work);
++ qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+ }
+
+ static irqreturn_t qm_eq_irq(int irq, void *data)
+ {
+ struct hisi_qm *qm = data;
+- bool ret;
+-
+- ret = do_qm_eq_irq(qm);
+- if (ret)
+- return IRQ_HANDLED;
+
+- atomic64_inc(&qm->debug.dfx.err_irq_cnt);
+- qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
++ /* Get qp id of completed tasks and re-enable the interrupt */
++ qm_get_complete_eqe_num(qm);
+
+- return IRQ_NONE;
++ return IRQ_HANDLED;
+ }
+
+ static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
+@@ -1025,6 +1060,8 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
+ u16 aeq_depth = qm->aeq_depth;
+ u32 type, qp_id;
+
++ atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
++
+ while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
+ type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
+ qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
+@@ -1062,17 +1099,6 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+-static irqreturn_t qm_aeq_irq(int irq, void *data)
+-{
+- struct hisi_qm *qm = data;
+-
+- atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
+- if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
+- return IRQ_NONE;
+-
+- return IRQ_WAKE_THREAD;
+-}
+-
+ static void qm_init_qp_status(struct hisi_qp *qp)
+ {
+ struct hisi_qp_status *qp_status = &qp->qp_status;
+@@ -2824,7 +2850,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
+ mutex_init(&qm->mailbox_lock);
+ init_rwsem(&qm->qps_lock);
+ qm->qp_in_used = 0;
+- qm->misc_ctl = false;
+ if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
+ if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+ dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+@@ -2928,12 +2953,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
+ hisi_qm_set_state(qm, QM_NOT_READY);
+ up_write(&qm->qps_lock);
+
++ qm_remove_uacce(qm);
+ qm_irqs_unregister(qm);
+ hisi_qm_pci_uninit(qm);
+- if (qm->use_sva) {
+- uacce_remove(qm->uacce);
+- qm->uacce = NULL;
+- }
+ }
+ EXPORT_SYMBOL_GPL(hisi_qm_uninit);
+
+@@ -4084,6 +4106,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
+ return -ETIMEDOUT;
+ }
+
++static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
++{
++ u32 nfe_enb = 0;
++
++ /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
++ if (qm->ver >= QM_HW_V3)
++ return;
++
++ if (!qm->err_status.is_dev_ecc_mbit &&
++ qm->err_status.is_qm_ecc_mbit &&
++ qm->err_ini->close_axi_master_ooo) {
++ qm->err_ini->close_axi_master_ooo(qm);
++ } else if (qm->err_status.is_dev_ecc_mbit &&
++ !qm->err_status.is_qm_ecc_mbit &&
++ !qm->err_ini->close_axi_master_ooo) {
++ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
++ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
++ qm->io_base + QM_RAS_NFE_ENABLE);
++ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
++ }
++}
++
+ static int qm_vf_reset_prepare(struct hisi_qm *qm,
+ enum qm_stop_reason stop_reason)
+ {
+@@ -4148,6 +4192,8 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
+ return ret;
+ }
+
++ qm_dev_ecc_mbit_handle(qm);
++
+ /* PF obtains the information of VF by querying the register. */
+ qm_cmd_uninit(qm);
+
+@@ -4178,33 +4224,26 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
+ return 0;
+ }
+
+-static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
++static int qm_master_ooo_check(struct hisi_qm *qm)
+ {
+- u32 nfe_enb = 0;
++ u32 val;
++ int ret;
+
+- /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
+- if (qm->ver >= QM_HW_V3)
+- return;
++ /* Check the ooo register of the device before resetting the device. */
++ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
++ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
++ val, (val == ACC_MASTER_TRANS_RETURN_RW),
++ POLL_PERIOD, POLL_TIMEOUT);
++ if (ret)
++ pci_warn(qm->pdev, "Bus lock! Please reset system.\n");
+
+- if (!qm->err_status.is_dev_ecc_mbit &&
+- qm->err_status.is_qm_ecc_mbit &&
+- qm->err_ini->close_axi_master_ooo) {
+- qm->err_ini->close_axi_master_ooo(qm);
+- } else if (qm->err_status.is_dev_ecc_mbit &&
+- !qm->err_status.is_qm_ecc_mbit &&
+- !qm->err_ini->close_axi_master_ooo) {
+- nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+- qm->io_base + QM_RAS_NFE_ENABLE);
+- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+- }
++ return ret;
+ }
+
+-static int qm_soft_reset(struct hisi_qm *qm)
++static int qm_soft_reset_prepare(struct hisi_qm *qm)
+ {
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+- u32 val;
+
+ /* Ensure all doorbells and mailboxes received by QM */
+ ret = qm_check_req_recv(qm);
+@@ -4225,30 +4264,23 @@ static int qm_soft_reset(struct hisi_qm *qm)
+ return ret;
+ }
+
+- qm_dev_ecc_mbit_handle(qm);
+-
+- /* OOO register set and check */
+- writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+- qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+-
+- /* If bus lock, reset chip */
+- ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+- val,
+- (val == ACC_MASTER_TRANS_RETURN_RW),
+- POLL_PERIOD, POLL_TIMEOUT);
+- if (ret) {
+- pci_emerg(pdev, "Bus lock! Please reset system.\n");
++ ret = qm_master_ooo_check(qm);
++ if (ret)
+ return ret;
+- }
+
+ if (qm->err_ini->close_sva_prefetch)
+ qm->err_ini->close_sva_prefetch(qm);
+
+ ret = qm_set_pf_mse(qm, false);
+- if (ret) {
++ if (ret)
+ pci_err(pdev, "Fails to disable pf MSE bit.\n");
+- return ret;
+- }
++
++ return ret;
++}
++
++static int qm_reset_device(struct hisi_qm *qm)
++{
++ struct pci_dev *pdev = qm->pdev;
+
+ /* The reset related sub-control registers are not in PCI BAR */
+ if (ACPI_HANDLE(&pdev->dev)) {
+@@ -4267,12 +4299,23 @@ static int qm_soft_reset(struct hisi_qm *qm)
+ pci_err(pdev, "Reset step %llu failed!\n", value);
+ return -EIO;
+ }
+- } else {
+- pci_err(pdev, "No reset method!\n");
+- return -EINVAL;
++
++ return 0;
+ }
+
+- return 0;
++ pci_err(pdev, "No reset method!\n");
++ return -EINVAL;
++}
++
++static int qm_soft_reset(struct hisi_qm *qm)
++{
++ int ret;
++
++ ret = qm_soft_reset_prepare(qm);
++ if (ret)
++ return ret;
++
++ return qm_reset_device(qm);
+ }
+
+ static int qm_vf_reset_done(struct hisi_qm *qm)
+@@ -4929,7 +4972,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
++ val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return;
+
+@@ -4946,7 +4989,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
++ val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return 0;
+
+@@ -4963,7 +5006,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
++ val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+@@ -4977,7 +5020,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
+ u32 irq_vector, val;
+ int ret;
+
+- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
++ val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+@@ -4994,7 +5037,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
++ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+@@ -5008,13 +5051,13 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
+ u32 irq_vector, val;
+ int ret;
+
+- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
++ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+- ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
+- qm_aeq_thread, 0, qm->dev_name, qm);
++ ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL,
++ qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+@@ -5026,7 +5069,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
++ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+@@ -5040,7 +5083,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
+ u32 irq_vector, val;
+ int ret;
+
+- val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
++ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+@@ -5093,6 +5136,7 @@ static int qm_irqs_register(struct hisi_qm *qm)
+
+ static int qm_get_qp_num(struct hisi_qm *qm)
+ {
++ struct device *dev = &qm->pdev->dev;
+ bool is_db_isolation;
+
+ /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+@@ -5109,17 +5153,47 @@ static int qm_get_qp_num(struct hisi_qm *qm)
+ qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ QM_FUNC_MAX_QP_CAP, is_db_isolation);
+
+- /* check if qp number is valid */
+- if (qm->qp_num > qm->max_qp_num) {
+- dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
++ if (qm->qp_num <= qm->max_qp_num)
++ return 0;
++
++ if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
++ /* Check whether the set qp number is valid */
++ dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n",
+ qm->qp_num, qm->max_qp_num);
+ return -EINVAL;
+ }
+
++ dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n",
++ qm->qp_num, qm->max_qp_num);
++ qm->qp_num = qm->max_qp_num;
++ qm->debug.curr_qm_qp_num = qm->qp_num;
++
++ return 0;
++}
++
++static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
++{
++ struct hisi_qm_cap_record *qm_cap;
++ struct pci_dev *pdev = qm->pdev;
++ size_t i, size;
++
++ size = ARRAY_SIZE(qm_pre_store_caps);
++ qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
++ if (!qm_cap)
++ return -ENOMEM;
++
++ for (i = 0; i < size; i++) {
++ qm_cap[i].type = qm_pre_store_caps[i];
++ qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
++ qm_pre_store_caps[i], qm->cap_ver);
++ }
++
++ qm->cap_tables.qm_cap_table = qm_cap;
++
+ return 0;
+ }
+
+-static void qm_get_hw_caps(struct hisi_qm *qm)
++static int qm_get_hw_caps(struct hisi_qm *qm)
+ {
+ const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
+ qm_cap_info_pf : qm_cap_info_vf;
+@@ -5150,6 +5224,9 @@ static void qm_get_hw_caps(struct hisi_qm *qm)
+ if (val)
+ set_bit(cap_info[i].type, &qm->caps);
+ }
++
++ /* Fetch and save the value of irq type related capability registers */
++ return qm_pre_store_irq_type_caps(qm);
+ }
+
+ static int qm_get_pci_res(struct hisi_qm *qm)
+@@ -5171,7 +5248,10 @@ static int qm_get_pci_res(struct hisi_qm *qm)
+ goto err_request_mem_regions;
+ }
+
+- qm_get_hw_caps(qm);
++ ret = qm_get_hw_caps(qm);
++ if (ret)
++ goto err_ioremap;
++
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
+ qm->db_interval = QM_QP_DB_INTERVAL;
+ qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
+@@ -5203,6 +5283,35 @@ static int qm_get_pci_res(struct hisi_qm *qm)
+ return ret;
+ }
+
++static int qm_clear_device(struct hisi_qm *qm)
++{
++ acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev);
++ int ret;
++
++ if (qm->fun_type == QM_HW_VF)
++ return 0;
++
++ /* Device does not support reset, return */
++ if (!qm->err_ini->err_info_init)
++ return 0;
++ qm->err_ini->err_info_init(qm);
++
++ if (!handle)
++ return 0;
++
++ /* No reset method, return */
++ if (!acpi_has_method(handle, qm->err_info.acpi_rst))
++ return 0;
++
++ ret = qm_master_ooo_check(qm);
++ if (ret) {
++ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
++ return ret;
++ }
++
++ return qm_reset_device(qm);
++}
++
+ static int hisi_qm_pci_init(struct hisi_qm *qm)
+ {
+ struct pci_dev *pdev = qm->pdev;
+@@ -5232,8 +5341,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
+ goto err_get_pci_res;
+ }
+
++ ret = qm_clear_device(qm);
++ if (ret)
++ goto err_free_vectors;
++
+ return 0;
+
++err_free_vectors:
++ pci_free_irq_vectors(pdev);
+ err_get_pci_res:
+ qm_put_pci_res(qm);
+ err_disable_pcidev:
+@@ -5499,7 +5614,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
+ {
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+- u32 val;
+
+ ret = qm->ops->set_msi(qm, false);
+ if (ret) {
+@@ -5507,18 +5621,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
+ return ret;
+ }
+
+- /* shutdown OOO register */
+- writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+- qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+-
+- ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+- val,
+- (val == ACC_MASTER_TRANS_RETURN_RW),
+- POLL_PERIOD, POLL_TIMEOUT);
+- if (ret) {
+- pci_emerg(pdev, "Bus lock! Please reset system.\n");
++ ret = qm_master_ooo_check(qm);
++ if (ret)
+ return ret;
+- }
+
+ ret = qm_set_pf_mse(qm, false);
+ if (ret)
+diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
+index 1406a422d45517..8e36aa9c681be4 100644
+--- a/drivers/crypto/hisilicon/qm_common.h
++++ b/drivers/crypto/hisilicon/qm_common.h
+@@ -4,7 +4,6 @@
+ #define QM_COMMON_H
+
+ #define QM_DBG_READ_LEN 256
+-#define QM_RESETTING 2
+
+ struct qm_cqe {
+ __le32 rsvd0;
+diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
+index 3e57fc04b37703..410c83712e2851 100644
+--- a/drivers/crypto/hisilicon/sec2/sec.h
++++ b/drivers/crypto/hisilicon/sec2/sec.h
+@@ -220,6 +220,13 @@ enum sec_cap_type {
+ SEC_CORE4_ALG_BITMAP_HIGH,
+ };
+
++enum sec_cap_reg_record_idx {
++ SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
++ SEC_DRV_ALG_BITMAP_HIGH_IDX,
++ SEC_DEV_ALG_BITMAP_LOW_IDX,
++ SEC_DEV_ALG_BITMAP_HIGH_IDX,
++};
++
+ void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
+ struct hisi_qp **sec_create_qps(void);
+ int sec_register_to_crypto(struct hisi_qm *qm);
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 074e50ef512c11..932cc277eb3a5e 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -478,8 +478,10 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
+
+ if (ctx->pbuf_supported)
+ sec_free_pbuf_resource(dev, qp_ctx->res);
+- if (ctx->alg_type == SEC_AEAD)
++ if (ctx->alg_type == SEC_AEAD) {
+ sec_free_mac_resource(dev, qp_ctx->res);
++ sec_free_aiv_resource(dev, qp_ctx->res);
++ }
+ }
+
+ static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
+@@ -2543,8 +2545,12 @@ static int sec_register_aead(u64 alg_mask)
+
+ int sec_register_to_crypto(struct hisi_qm *qm)
+ {
+- u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
+- int ret;
++ u64 alg_mask;
++ int ret = 0;
++
++ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
++ SEC_DRV_ALG_BITMAP_LOW_IDX);
++
+
+ ret = sec_register_skcipher(alg_mask);
+ if (ret)
+@@ -2559,7 +2565,10 @@ int sec_register_to_crypto(struct hisi_qm *qm)
+
+ void sec_unregister_from_crypto(struct hisi_qm *qm)
+ {
+- u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
++ u64 alg_mask;
++
++ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
++ SEC_DRV_ALG_BITMAP_LOW_IDX);
+
+ sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index 77f9f131b85035..cf7b6a37e7df7a 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -120,7 +120,6 @@
+ GENMASK_ULL(42, 25))
+ #define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
+ GENMASK_ULL(45, 43))
+-#define SEC_DEV_ALG_MAX_LEN 256
+
+ struct sec_hw_error {
+ u32 int_msk;
+@@ -132,11 +131,6 @@ struct sec_dfx_item {
+ u32 offset;
+ };
+
+-struct sec_dev_alg {
+- u64 alg_msk;
+- const char *algs;
+-};
+-
+ static const char sec_name[] = "hisi_sec2";
+ static struct dentry *sec_debugfs_root;
+
+@@ -173,15 +167,22 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
+ {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ };
+
+-static const struct sec_dev_alg sec_dev_algs[] = { {
++static const u32 sec_pre_store_caps[] = {
++ SEC_DRV_ALG_BITMAP_LOW,
++ SEC_DRV_ALG_BITMAP_HIGH,
++ SEC_DEV_ALG_BITMAP_LOW,
++ SEC_DEV_ALG_BITMAP_HIGH,
++};
++
++static const struct qm_dev_alg sec_dev_algs[] = { {
+ .alg_msk = SEC_CIPHER_BITMAP,
+- .algs = "cipher\n",
++ .alg = "cipher\n",
+ }, {
+ .alg_msk = SEC_DIGEST_BITMAP,
+- .algs = "digest\n",
++ .alg = "digest\n",
+ }, {
+ .alg_msk = SEC_AEAD_BITMAP,
+- .algs = "aead\n",
++ .alg = "aead\n",
+ },
+ };
+
+@@ -311,8 +312,11 @@ static int sec_diff_regs_show(struct seq_file *s, void *unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
+
++static bool pf_q_num_flag;
+ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ }
+
+@@ -391,8 +395,8 @@ u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
+ {
+ u32 cap_val_h, cap_val_l;
+
+- cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
+- cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
++ cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val;
++ cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val;
+
+ return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
+ }
+@@ -1057,9 +1061,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
+ struct hisi_qm *qm = &sec->qm;
+ int ret;
+
+- qm->err_ini = &sec_err_ini;
+- qm->err_ini->err_info_init(qm);
+-
+ ret = sec_set_user_domain_and_cache(qm);
+ if (ret)
+ return ret;
+@@ -1074,37 +1075,31 @@ static int sec_pf_probe_init(struct sec_dev *sec)
+ return ret;
+ }
+
+-static int sec_set_qm_algs(struct hisi_qm *qm)
++static int sec_pre_store_cap_reg(struct hisi_qm *qm)
+ {
+- struct device *dev = &qm->pdev->dev;
+- char *algs, *ptr;
+- u64 alg_mask;
+- int i;
+-
+- if (!qm->use_sva)
+- return 0;
++ struct hisi_qm_cap_record *sec_cap;
++ struct pci_dev *pdev = qm->pdev;
++ size_t i, size;
+
+- algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+- if (!algs)
++ size = ARRAY_SIZE(sec_pre_store_caps);
++ sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
++ if (!sec_cap)
+ return -ENOMEM;
+
+- alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
+-
+- for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
+- if (alg_mask & sec_dev_algs[i].alg_msk)
+- strcat(algs, sec_dev_algs[i].algs);
+-
+- ptr = strrchr(algs, '\n');
+- if (ptr)
+- *ptr = '\0';
++ for (i = 0; i < size; i++) {
++ sec_cap[i].type = sec_pre_store_caps[i];
++ sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
++ sec_pre_store_caps[i], qm->cap_ver);
++ }
+
+- qm->uacce->algs = algs;
++ qm->cap_tables.dev_cap_table = sec_cap;
+
+ return 0;
+ }
+
+ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ {
++ u64 alg_msk;
+ int ret;
+
+ qm->pdev = pdev;
+@@ -1120,6 +1115,9 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &sec_devices;
++ qm->err_ini = &sec_err_ini;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+@@ -1137,7 +1135,16 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ return ret;
+ }
+
+- ret = sec_set_qm_algs(qm);
++ /* Fetch and save the value of capability registers */
++ ret = sec_pre_store_cap_reg(qm);
++ if (ret) {
++ pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
++ hisi_qm_uninit(qm);
++ return ret;
++ }
++
++ alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
++ ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set sec algs!\n");
+ hisi_qm_uninit(qm);
+@@ -1173,6 +1180,12 @@ static int sec_probe_init(struct sec_dev *sec)
+
+ static void sec_probe_uninit(struct hisi_qm *qm)
+ {
++ if (qm->fun_type == QM_HW_VF)
++ return;
++
++ sec_debug_regs_clear(qm);
++ sec_show_last_regs_uninit(qm);
++ sec_close_sva_prefetch(qm);
+ hisi_qm_dev_err_uninit(qm);
+ }
+
+@@ -1265,7 +1278,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ sec_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+ err_probe_uninit:
+- sec_show_last_regs_uninit(qm);
+ sec_probe_uninit(qm);
+ err_qm_uninit:
+ sec_qm_uninit(qm);
+@@ -1287,11 +1299,6 @@ static void sec_remove(struct pci_dev *pdev)
+ sec_debugfs_exit(qm);
+
+ (void)hisi_qm_stop(qm, QM_NORMAL);
+-
+- if (qm->fun_type == QM_HW_PF)
+- sec_debug_regs_clear(qm);
+- sec_show_last_regs_uninit(qm);
+-
+ sec_probe_uninit(qm);
+
+ sec_qm_uninit(qm);
+diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
+index f3ce34198775d8..9d47b3675da7d4 100644
+--- a/drivers/crypto/hisilicon/zip/zip_main.c
++++ b/drivers/crypto/hisilicon/zip/zip_main.c
+@@ -73,7 +73,6 @@
+ #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
+ #define HZIP_WR_PORT BIT(11)
+
+-#define HZIP_DEV_ALG_MAX_LEN 256
+ #define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
+ #define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
+ #define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
+@@ -106,6 +105,14 @@
+ #define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \
+ HZIP_CORE_GATED_OOO_EN)
+
++/* zip comp high performance */
++#define HZIP_HIGH_PERF_OFFSET 0x301208
++
++enum {
++ HZIP_HIGH_COMP_RATE,
++ HZIP_HIGH_COMP_PERF,
++};
++
+ static const char hisi_zip_name[] = "hisi_zip";
+ static struct dentry *hzip_debugfs_root;
+
+@@ -119,23 +126,18 @@ struct zip_dfx_item {
+ u32 offset;
+ };
+
+-struct zip_dev_alg {
+- u32 alg_msk;
+- const char *algs;
+-};
+-
+-static const struct zip_dev_alg zip_dev_algs[] = { {
++static const struct qm_dev_alg zip_dev_algs[] = { {
+ .alg_msk = HZIP_ALG_ZLIB_BIT,
+- .algs = "zlib\n",
++ .alg = "zlib\n",
+ }, {
+ .alg_msk = HZIP_ALG_GZIP_BIT,
+- .algs = "gzip\n",
++ .alg = "gzip\n",
+ }, {
+ .alg_msk = HZIP_ALG_DEFLATE_BIT,
+- .algs = "deflate\n",
++ .alg = "deflate\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+- .algs = "lz77_zstd\n",
++ .alg = "lz77_zstd\n",
+ },
+ };
+
+@@ -246,6 +248,26 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = {
+ {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
+ };
+
++enum zip_pre_store_cap_idx {
++ ZIP_CORE_NUM_CAP_IDX = 0x0,
++ ZIP_CLUSTER_COMP_NUM_CAP_IDX,
++ ZIP_CLUSTER_DECOMP_NUM_CAP_IDX,
++ ZIP_DECOMP_ENABLE_BITMAP_IDX,
++ ZIP_COMP_ENABLE_BITMAP_IDX,
++ ZIP_DRV_ALG_BITMAP_IDX,
++ ZIP_DEV_ALG_BITMAP_IDX,
++};
++
++static const u32 zip_pre_store_caps[] = {
++ ZIP_CORE_NUM_CAP,
++ ZIP_CLUSTER_COMP_NUM_CAP,
++ ZIP_CLUSTER_DECOMP_NUM_CAP,
++ ZIP_DECOMP_ENABLE_BITMAP,
++ ZIP_COMP_ENABLE_BITMAP,
++ ZIP_DRV_ALG_BITMAP,
++ ZIP_DEV_ALG_BITMAP,
++};
++
+ enum {
+ HZIP_COMP_CORE0,
+ HZIP_COMP_CORE1,
+@@ -351,6 +373,37 @@ static int hzip_diff_regs_show(struct seq_file *s, void *unused)
+ return 0;
+ }
+ DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs);
++
++static int perf_mode_set(const char *val, const struct kernel_param *kp)
++{
++ int ret;
++ u32 n;
++
++ if (!val)
++ return -EINVAL;
++
++ ret = kstrtou32(val, 10, &n);
++ if (ret != 0 || (n != HZIP_HIGH_COMP_PERF &&
++ n != HZIP_HIGH_COMP_RATE))
++ return -EINVAL;
++
++ return param_set_int(val, kp);
++}
++
++static const struct kernel_param_ops zip_com_perf_ops = {
++ .set = perf_mode_set,
++ .get = param_get_int,
++};
++
++/*
++ * perf_mode = 0 means enable high compression rate mode,
++ * perf_mode = 1 means enable high compression performance mode.
++ * These two modes only apply to the compression direction.
++ */
++static u32 perf_mode = HZIP_HIGH_COMP_RATE;
++module_param_cb(perf_mode, &zip_com_perf_ops, &perf_mode, 0444);
++MODULE_PARM_DESC(perf_mode, "ZIP high perf mode 0(default), 1(enable)");
++
+ static const struct kernel_param_ops zip_uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+@@ -364,8 +417,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
+ module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
+ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
+
++static bool pf_q_num_flag;
+ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+ {
++ pf_q_num_flag = true;
++
+ return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ }
+
+@@ -406,40 +462,33 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
+ {
+ u32 cap_val;
+
+- cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
++ cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val;
+ if ((alg & cap_val) == alg)
+ return true;
+
+ return false;
+ }
+
+-static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
++static int hisi_zip_set_high_perf(struct hisi_qm *qm)
+ {
+- struct device *dev = &qm->pdev->dev;
+- char *algs, *ptr;
+- u32 alg_mask;
+- int i;
+-
+- if (!qm->use_sva)
+- return 0;
+-
+- algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+- if (!algs)
+- return -ENOMEM;
+-
+- alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
+-
+- for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
+- if (alg_mask & zip_dev_algs[i].alg_msk)
+- strcat(algs, zip_dev_algs[i].algs);
+-
+- ptr = strrchr(algs, '\n');
+- if (ptr)
+- *ptr = '\0';
++ u32 val;
++ int ret;
+
+- qm->uacce->algs = algs;
++ val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
++ if (perf_mode == HZIP_HIGH_COMP_PERF)
++ val |= HZIP_HIGH_COMP_PERF;
++ else
++ val &= ~HZIP_HIGH_COMP_PERF;
++
++ /* Set perf mode */
++ writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
++ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
++ val, val == perf_mode, HZIP_DELAY_1_US,
++ HZIP_POLL_TIMEOUT_US);
++ if (ret)
++ pci_err(qm->pdev, "failed to set perf mode\n");
+
+- return 0;
++ return ret;
+ }
+
+ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+@@ -538,10 +587,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
+ }
+
+ /* let's open all compression/decompression cores */
+- dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+- ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
+- comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+- ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
++ dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
++ comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val;
+ writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
+
+ /* enable sqc,cqc writeback */
+@@ -768,9 +815,8 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
+ char buf[HZIP_BUF_SIZE];
+ int i;
+
+- zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+- zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+- qm->cap_ver);
++ zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
++ zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
+@@ -912,7 +958,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
+ u32 zip_core_num;
+ int i, j, idx;
+
+- zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
++ zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+
+ debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
+ sizeof(unsigned int), GFP_KERNEL);
+@@ -968,9 +1014,9 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
+ hzip_com_dfx_regs[i].name, debug->last_words[i], val);
+ }
+
+- zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+- zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+- qm->cap_ver);
++ zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
++ zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
++
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
+ scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
+@@ -1104,13 +1150,15 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+
+ hisi_zip->ctrl = ctrl;
+ ctrl->hisi_zip = hisi_zip;
+- qm->err_ini = &hisi_zip_err_ini;
+- qm->err_ini->err_info_init(qm);
+
+ ret = hisi_zip_set_user_domain_and_cache(qm);
+ if (ret)
+ return ret;
+
++ ret = hisi_zip_set_high_perf(qm);
++ if (ret)
++ return ret;
++
+ hisi_zip_open_sva_prefetch(qm);
+ hisi_qm_dev_err_init(qm);
+ hisi_zip_debug_regs_clear(qm);
+@@ -1122,8 +1170,31 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+ return ret;
+ }
+
++static int zip_pre_store_cap_reg(struct hisi_qm *qm)
++{
++ struct hisi_qm_cap_record *zip_cap;
++ struct pci_dev *pdev = qm->pdev;
++ size_t i, size;
++
++ size = ARRAY_SIZE(zip_pre_store_caps);
++ zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
++ if (!zip_cap)
++ return -ENOMEM;
++
++ for (i = 0; i < size; i++) {
++ zip_cap[i].type = zip_pre_store_caps[i];
++ zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
++ zip_pre_store_caps[i], qm->cap_ver);
++ }
++
++ qm->cap_tables.dev_cap_table = zip_cap;
++
++ return 0;
++}
++
+ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ {
++ u64 alg_msk;
+ int ret;
+
+ qm->pdev = pdev;
+@@ -1139,6 +1210,9 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ qm->qm_list = &zip_devices;
++ qm->err_ini = &hisi_zip_err_ini;
++ if (pf_q_num_flag)
++ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
+ } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
+ /*
+ * have no way to get qm configure in VM in v1 hardware,
+@@ -1157,7 +1231,16 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+ return ret;
+ }
+
+- ret = hisi_zip_set_qm_algs(qm);
++ /* Fetch and save the value of capability registers */
++ ret = zip_pre_store_cap_reg(qm);
++ if (ret) {
++ pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
++ hisi_qm_uninit(qm);
++ return ret;
++ }
++
++ alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val;
++ ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set zip algs!\n");
+ hisi_qm_uninit(qm);
+@@ -1194,6 +1277,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
+ return 0;
+ }
+
++static void hisi_zip_probe_uninit(struct hisi_qm *qm)
++{
++ if (qm->fun_type == QM_HW_VF)
++ return;
++
++ hisi_zip_show_last_regs_uninit(qm);
++ hisi_zip_close_sva_prefetch(qm);
++ hisi_qm_dev_err_uninit(qm);
++}
++
+ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ struct hisi_zip *hisi_zip;
+@@ -1220,7 +1313,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+- goto err_dev_err_uninit;
++ goto err_probe_uninit;
+
+ ret = hisi_zip_debugfs_init(qm);
+ if (ret)
+@@ -1257,9 +1350,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ hisi_zip_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+
+-err_dev_err_uninit:
+- hisi_zip_show_last_regs_uninit(qm);
+- hisi_qm_dev_err_uninit(qm);
++err_probe_uninit:
++ hisi_zip_probe_uninit(qm);
+
+ err_qm_uninit:
+ hisi_zip_qm_uninit(qm);
+@@ -1280,8 +1372,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
+
+ hisi_zip_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+- hisi_zip_show_last_regs_uninit(qm);
+- hisi_qm_dev_err_uninit(qm);
++ hisi_zip_probe_uninit(qm);
+ hisi_zip_qm_uninit(qm);
+ }
+
+diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
+index 272c28b5a0883e..b83818634ae477 100644
+--- a/drivers/crypto/inside-secure/safexcel_cipher.c
++++ b/drivers/crypto/inside-secure/safexcel_cipher.c
+@@ -742,9 +742,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
+ max(totlen_src, totlen_dst));
+ return -EINVAL;
+ }
+- if (sreq->nr_src > 0)
+- dma_map_sg(priv->dev, src, sreq->nr_src,
+- DMA_BIDIRECTIONAL);
++ if (sreq->nr_src > 0 &&
++ !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL))
++ return -EIO;
+ } else {
+ if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
+ dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
+@@ -752,8 +752,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
+ return -EINVAL;
+ }
+
+- if (sreq->nr_src > 0)
+- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
++ if (sreq->nr_src > 0 &&
++ !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE))
++ return -EIO;
+
+ if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
+ dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
+@@ -762,9 +763,11 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
+ goto unmap;
+ }
+
+- if (sreq->nr_dst > 0)
+- dma_map_sg(priv->dev, dst, sreq->nr_dst,
+- DMA_FROM_DEVICE);
++ if (sreq->nr_dst > 0 &&
++ !dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE)) {
++ ret = -EIO;
++ goto unmap;
++ }
+ }
+
+ memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+index dd4464b7e00b18..615af08832076a 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+@@ -3,6 +3,7 @@
+ #include <linux/iopoll.h>
+ #include <adf_accel_devices.h>
+ #include <adf_cfg.h>
++#include <adf_cfg_services.h>
+ #include <adf_clock.h>
+ #include <adf_common_drv.h>
+ #include <adf_gen4_dc.h>
+@@ -13,6 +14,10 @@
+ #include "adf_4xxx_hw_data.h"
+ #include "icp_qat_hw.h"
+
++#define ADF_AE_GROUP_0 GENMASK(3, 0)
++#define ADF_AE_GROUP_1 GENMASK(7, 4)
++#define ADF_AE_GROUP_2 BIT(8)
++
+ enum adf_fw_objs {
+ ADF_FW_SYM_OBJ,
+ ADF_FW_ASYM_OBJ,
+@@ -40,39 +45,45 @@ struct adf_fw_config {
+ };
+
+ static const struct adf_fw_config adf_fw_cy_config[] = {
+- {0xF0, ADF_FW_SYM_OBJ},
+- {0xF, ADF_FW_ASYM_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_dc_config[] = {
+- {0xF0, ADF_FW_DC_OBJ},
+- {0xF, ADF_FW_DC_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_sym_config[] = {
+- {0xF0, ADF_FW_SYM_OBJ},
+- {0xF, ADF_FW_SYM_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_asym_config[] = {
+- {0xF0, ADF_FW_ASYM_OBJ},
+- {0xF, ADF_FW_ASYM_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_asym_dc_config[] = {
+- {0xF0, ADF_FW_ASYM_OBJ},
+- {0xF, ADF_FW_DC_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static const struct adf_fw_config adf_fw_sym_dc_config[] = {
+- {0xF0, ADF_FW_SYM_OBJ},
+- {0xF, ADF_FW_DC_OBJ},
+- {0x100, ADF_FW_ADMIN_OBJ},
++ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
++};
++
++static const struct adf_fw_config adf_fw_dcc_config[] = {
++ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
++ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
++ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
+ };
+
+ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
+@@ -80,6 +91,7 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
+ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
+ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
+ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
++static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
+
+ /* Worker thread to service arbiter mappings */
+ static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
+@@ -94,59 +106,18 @@ static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
+ 0x0
+ };
+
++static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = {
++ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
++ 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
++ 0x0
++};
++
+ static struct adf_hw_device_class adf_4xxx_class = {
+ .name = ADF_4XXX_DEVICE_NAME,
+ .type = DEV_4XXX,
+ .instances = 0,
+ };
+
+-enum dev_services {
+- SVC_CY = 0,
+- SVC_CY2,
+- SVC_DC,
+- SVC_SYM,
+- SVC_ASYM,
+- SVC_DC_ASYM,
+- SVC_ASYM_DC,
+- SVC_DC_SYM,
+- SVC_SYM_DC,
+-};
+-
+-static const char *const dev_cfg_services[] = {
+- [SVC_CY] = ADF_CFG_CY,
+- [SVC_CY2] = ADF_CFG_ASYM_SYM,
+- [SVC_DC] = ADF_CFG_DC,
+- [SVC_SYM] = ADF_CFG_SYM,
+- [SVC_ASYM] = ADF_CFG_ASYM,
+- [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
+- [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
+- [SVC_DC_SYM] = ADF_CFG_DC_SYM,
+- [SVC_SYM_DC] = ADF_CFG_SYM_DC,
+-};
+-
+-static int get_service_enabled(struct adf_accel_dev *accel_dev)
+-{
+- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+- int ret;
+-
+- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+- ADF_SERVICES_ENABLED, services);
+- if (ret) {
+- dev_err(&GET_DEV(accel_dev),
+- ADF_SERVICES_ENABLED " param not found\n");
+- return ret;
+- }
+-
+- ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
+- services);
+- if (ret < 0)
+- dev_err(&GET_DEV(accel_dev),
+- "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
+- services);
+-
+- return ret;
+-}
+-
+ static u32 get_accel_mask(struct adf_hw_device_data *self)
+ {
+ return ADF_4XXX_ACCELERATORS_MASK;
+@@ -212,6 +183,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+ {
+ struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities_sym, capabilities_asym, capabilities_dc;
++ u32 capabilities_dcc;
+ u32 fusectl1;
+
+ /* Read accelerator capabilities mask */
+@@ -278,12 +250,20 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+- switch (get_service_enabled(accel_dev)) {
++ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return capabilities_sym | capabilities_asym;
+ case SVC_DC:
+ return capabilities_dc;
++ case SVC_DCC:
++ /*
++ * Sym capabilities are available for chaining operations,
++ * but sym crypto instances cannot be supported
++ */
++ capabilities_dcc = capabilities_dc | capabilities_sym;
++ capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
++ return capabilities_dcc;
+ case SVC_SYM:
+ return capabilities_sym;
+ case SVC_ASYM:
+@@ -306,9 +286,11 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+
+ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+ {
+- switch (get_service_enabled(accel_dev)) {
++ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_DC:
+ return thrd_to_arb_map_dc;
++ case SVC_DCC:
++ return thrd_to_arb_map_dcc;
+ default:
+ return default_thrd_to_arb_map;
+ }
+@@ -393,38 +375,104 @@ static u32 uof_get_num_objs(void)
+ return ARRAY_SIZE(adf_fw_cy_config);
+ }
+
+-static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+- const char * const fw_objs[], int num_objs)
++static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
+ {
+- int id;
+-
+- switch (get_service_enabled(accel_dev)) {
++ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+- id = adf_fw_cy_config[obj_num].obj;
+- break;
++ return adf_fw_cy_config;
+ case SVC_DC:
+- id = adf_fw_dc_config[obj_num].obj;
+- break;
++ return adf_fw_dc_config;
++ case SVC_DCC:
++ return adf_fw_dcc_config;
+ case SVC_SYM:
+- id = adf_fw_sym_config[obj_num].obj;
+- break;
++ return adf_fw_sym_config;
+ case SVC_ASYM:
+- id = adf_fw_asym_config[obj_num].obj;
+- break;
++ return adf_fw_asym_config;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+- id = adf_fw_asym_dc_config[obj_num].obj;
+- break;
++ return adf_fw_asym_dc_config;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+- id = adf_fw_sym_dc_config[obj_num].obj;
+- break;
++ return adf_fw_sym_dc_config;
+ default:
+- id = -EINVAL;
+- break;
++ return NULL;
++ }
++}
++
++enum adf_rp_groups {
++ RP_GROUP_0 = 0,
++ RP_GROUP_1,
++ RP_GROUP_COUNT
++};
++
++static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
++{
++ enum adf_cfg_service_type rps[RP_GROUP_COUNT];
++ const struct adf_fw_config *fw_config;
++ u16 ring_to_svc_map;
++ int i, j;
++
++ fw_config = get_fw_config(accel_dev);
++ if (!fw_config)
++ return 0;
++
++ /* If dcc, all rings handle compression requests */
++ if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
++ for (i = 0; i < RP_GROUP_COUNT; i++)
++ rps[i] = COMP;
++ goto set_mask;
+ }
+
++ for (i = 0; i < RP_GROUP_COUNT; i++) {
++ switch (fw_config[i].ae_mask) {
++ case ADF_AE_GROUP_0:
++ j = RP_GROUP_0;
++ break;
++ case ADF_AE_GROUP_1:
++ j = RP_GROUP_1;
++ break;
++ default:
++ return 0;
++ }
++
++ switch (fw_config[i].obj) {
++ case ADF_FW_SYM_OBJ:
++ rps[j] = SYM;
++ break;
++ case ADF_FW_ASYM_OBJ:
++ rps[j] = ASYM;
++ break;
++ case ADF_FW_DC_OBJ:
++ rps[j] = COMP;
++ break;
++ default:
++ rps[j] = 0;
++ break;
++ }
++ }
++
++set_mask:
++ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
++ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
++ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
++ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
++
++ return ring_to_svc_map;
++}
++
++static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
++ const char * const fw_objs[], int num_objs)
++{
++ const struct adf_fw_config *fw_config;
++ int id;
++
++ fw_config = get_fw_config(accel_dev);
++ if (fw_config)
++ id = fw_config[obj_num].obj;
++ else
++ id = -EINVAL;
++
+ if (id < 0 || id > num_objs)
+ return NULL;
+
+@@ -447,26 +495,13 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n
+
+ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+ {
+- switch (get_service_enabled(accel_dev)) {
+- case SVC_CY:
+- return adf_fw_cy_config[obj_num].ae_mask;
+- case SVC_DC:
+- return adf_fw_dc_config[obj_num].ae_mask;
+- case SVC_CY2:
+- return adf_fw_cy_config[obj_num].ae_mask;
+- case SVC_SYM:
+- return adf_fw_sym_config[obj_num].ae_mask;
+- case SVC_ASYM:
+- return adf_fw_asym_config[obj_num].ae_mask;
+- case SVC_ASYM_DC:
+- case SVC_DC_ASYM:
+- return adf_fw_asym_dc_config[obj_num].ae_mask;
+- case SVC_SYM_DC:
+- case SVC_DC_SYM:
+- return adf_fw_sym_dc_config[obj_num].ae_mask;
+- default:
++ const struct adf_fw_config *fw_config;
++
++ fw_config = get_fw_config(accel_dev);
++ if (!fw_config)
+ return 0;
+- }
++
++ return fw_config[obj_num].ae_mask;
+ }
+
+ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
+@@ -522,6 +557,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
++ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+index 6d4e2e139ffa24..f6f9e20f74b543 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+@@ -11,6 +11,7 @@
+ #include <adf_heartbeat.h>
+
+ #include "adf_4xxx_hw_data.h"
++#include "adf_cfg_services.h"
+ #include "qat_compression.h"
+ #include "qat_crypto.h"
+ #include "adf_transport_access_macros.h"
+@@ -23,30 +24,6 @@ static const struct pci_device_id adf_pci_tbl[] = {
+ };
+ MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+-enum configs {
+- DEV_CFG_CY = 0,
+- DEV_CFG_DC,
+- DEV_CFG_SYM,
+- DEV_CFG_ASYM,
+- DEV_CFG_ASYM_SYM,
+- DEV_CFG_ASYM_DC,
+- DEV_CFG_DC_ASYM,
+- DEV_CFG_SYM_DC,
+- DEV_CFG_DC_SYM,
+-};
+-
+-static const char * const services_operations[] = {
+- ADF_CFG_CY,
+- ADF_CFG_DC,
+- ADF_CFG_SYM,
+- ADF_CFG_ASYM,
+- ADF_CFG_ASYM_SYM,
+- ADF_CFG_ASYM_DC,
+- ADF_CFG_DC_ASYM,
+- ADF_CFG_SYM_DC,
+- ADF_CFG_DC_SYM,
+-};
+-
+ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+ {
+ if (accel_dev->hw_device) {
+@@ -292,16 +269,17 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+ if (ret)
+ goto err;
+
+- ret = sysfs_match_string(services_operations, services);
++ ret = sysfs_match_string(adf_cfg_services, services);
+ if (ret < 0)
+ goto err;
+
+ switch (ret) {
+- case DEV_CFG_CY:
+- case DEV_CFG_ASYM_SYM:
++ case SVC_CY:
++ case SVC_CY2:
+ ret = adf_crypto_dev_config(accel_dev);
+ break;
+- case DEV_CFG_DC:
++ case SVC_DC:
++ case SVC_DCC:
+ ret = adf_comp_dev_config(accel_dev);
+ break;
+ default:
+@@ -485,7 +463,9 @@ module_pci_driver(adf_driver);
+ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_AUTHOR("Intel");
+ MODULE_FIRMWARE(ADF_4XXX_FW);
++MODULE_FIRMWARE(ADF_402XX_FW);
+ MODULE_FIRMWARE(ADF_4XXX_MMP);
++MODULE_FIRMWARE(ADF_402XX_MMP);
+ MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+ MODULE_VERSION(ADF_DRV_VERSION);
+ MODULE_SOFTDEP("pre: crypto-intel_qat");
+diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
+index 43622c7fca712c..8dbf146de3fa59 100644
+--- a/drivers/crypto/intel/qat/qat_common/Makefile
++++ b/drivers/crypto/intel/qat/qat_common/Makefile
+@@ -3,6 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+ intel_qat-objs := adf_cfg.o \
+ adf_isr.o \
+ adf_ctl_drv.o \
++ adf_cfg_services.o \
+ adf_dev_mgr.o \
+ adf_init.o \
+ adf_accel_engine.o \
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+index e57abde66f4fb3..79d5a1535eda34 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+@@ -29,7 +29,7 @@
+ #define ADF_PCI_MAX_BARS 3
+ #define ADF_DEVICE_NAME_LENGTH 32
+ #define ADF_ETR_MAX_RINGS_PER_BANK 16
+-#define ADF_MAX_MSIX_VECTOR_NAME 16
++#define ADF_MAX_MSIX_VECTOR_NAME 48
+ #define ADF_DEVICE_NAME_PREFIX "qat_"
+
+ enum adf_accel_capabilities {
+@@ -182,6 +182,7 @@ struct adf_hw_device_data {
+ void (*get_arb_info)(struct arb_info *arb_csrs_info);
+ void (*get_admin_info)(struct admin_info *admin_csrs_info);
+ enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
++ u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
+ int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+ void (*free_irq)(struct adf_accel_dev *accel_dev);
+ void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
+index ff790823b86861..194d64d4b99a1b 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
+@@ -8,6 +8,7 @@
+ #include <linux/dma-mapping.h>
+ #include "adf_accel_devices.h"
+ #include "adf_common_drv.h"
++#include "adf_cfg.h"
+ #include "adf_heartbeat.h"
+ #include "icp_qat_fw_init_admin.h"
+
+@@ -212,6 +213,17 @@ int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
+ return 0;
+ }
+
++static int adf_set_chaining(struct adf_accel_dev *accel_dev)
++{
++ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
++ struct icp_qat_fw_init_admin_resp resp = { };
++ struct icp_qat_fw_init_admin_req req = { };
++
++ req.cmd_id = ICP_QAT_FW_DC_CHAIN_INIT;
++
++ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
++}
++
+ static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
+ u32 *capabilities)
+ {
+@@ -284,6 +296,19 @@ int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ }
+
++static bool is_dcc_enabled(struct adf_accel_dev *accel_dev)
++{
++ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
++ int ret;
++
++ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
++ ADF_SERVICES_ENABLED, services);
++ if (ret)
++ return false;
++
++ return !strcmp(services, "dcc");
++}
++
+ /**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+@@ -297,6 +322,16 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+ u32 dc_capabilities = 0;
+ int ret;
+
++ ret = adf_set_fw_constants(accel_dev);
++ if (ret)
++ return ret;
++
++ if (is_dcc_enabled(accel_dev)) {
++ ret = adf_set_chaining(accel_dev);
++ if (ret)
++ return ret;
++ }
++
+ ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
+@@ -304,10 +339,6 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+ }
+ accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
+
+- ret = adf_set_fw_constants(accel_dev);
+- if (ret)
+- return ret;
+-
+ return adf_init_ae(accel_dev);
+ }
+ EXPORT_SYMBOL_GPL(adf_send_admin_init);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
+index 04af32a2811c8f..af495a6f039f6b 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
+@@ -92,7 +92,8 @@ static void adf_device_reset_worker(struct work_struct *work)
+ if (adf_dev_restart(accel_dev)) {
+ /* The device hanged and we can't restart it so stop here */
+ dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+- kfree(reset_data);
++ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
++ kfree(reset_data);
+ WARN(1, "QAT: device restart failed. Device is unusable\n");
+ return;
+ }
+@@ -100,10 +101,10 @@ static void adf_device_reset_worker(struct work_struct *work)
+ clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+ /* The dev is back alive. Notify the caller if in sync mode */
+- if (reset_data->mode == ADF_DEV_RESET_SYNC)
+- complete(&reset_data->compl);
+- else
++ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
+ kfree(reset_data);
++ else
++ complete(&reset_data->compl);
+ }
+
+ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+@@ -135,6 +136,7 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+ if (!timeout) {
+ dev_err(&GET_DEV(accel_dev),
+ "Reset device timeout expired\n");
++ cancel_work_sync(&reset_data->reset_work);
+ ret = -EFAULT;
+ }
+ kfree(reset_data);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
+index 8836f015c39c41..2cf102ad4ca82d 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
+@@ -290,17 +290,19 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+ * 3. if the key exists with the same value, then return without doing
+ * anything (the newly created key_val is freed).
+ */
++ down_write(&cfg->lock);
+ if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
+ if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
+ adf_cfg_keyval_remove(key, section);
+ } else {
+ kfree(key_val);
+- return 0;
++ goto out;
+ }
+ }
+
+- down_write(&cfg->lock);
+ adf_cfg_keyval_add(key_val, section);
++
++out:
+ up_write(&cfg->lock);
+ return 0;
+ }
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+new file mode 100644
+index 00000000000000..26805229446843
+--- /dev/null
++++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+@@ -0,0 +1,47 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright(c) 2023 Intel Corporation */
++
++#include <linux/export.h>
++#include <linux/pci.h>
++#include <linux/string.h>
++#include "adf_cfg.h"
++#include "adf_cfg_services.h"
++#include "adf_cfg_strings.h"
++
++const char *const adf_cfg_services[] = {
++ [SVC_CY] = ADF_CFG_CY,
++ [SVC_CY2] = ADF_CFG_ASYM_SYM,
++ [SVC_DC] = ADF_CFG_DC,
++ [SVC_DCC] = ADF_CFG_DCC,
++ [SVC_SYM] = ADF_CFG_SYM,
++ [SVC_ASYM] = ADF_CFG_ASYM,
++ [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
++ [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
++ [SVC_DC_SYM] = ADF_CFG_DC_SYM,
++ [SVC_SYM_DC] = ADF_CFG_SYM_DC,
++};
++EXPORT_SYMBOL_GPL(adf_cfg_services);
++
++int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
++{
++ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
++ int ret;
++
++ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
++ ADF_SERVICES_ENABLED, services);
++ if (ret) {
++ dev_err(&GET_DEV(accel_dev),
++ ADF_SERVICES_ENABLED " param not found\n");
++ return ret;
++ }
++
++ ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
++ services);
++ if (ret < 0)
++ dev_err(&GET_DEV(accel_dev),
++ "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
++ services);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(adf_get_service_enabled);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+new file mode 100644
+index 00000000000000..c6b0328b0f5b03
+--- /dev/null
++++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+@@ -0,0 +1,28 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/* Copyright(c) 2023 Intel Corporation */
++#ifndef _ADF_CFG_SERVICES_H_
++#define _ADF_CFG_SERVICES_H_
++
++#include "adf_cfg_strings.h"
++
++struct adf_accel_dev;
++
++enum adf_services {
++ SVC_CY = 0,
++ SVC_CY2,
++ SVC_DC,
++ SVC_DCC,
++ SVC_SYM,
++ SVC_ASYM,
++ SVC_DC_ASYM,
++ SVC_ASYM_DC,
++ SVC_DC_SYM,
++ SVC_SYM_DC,
++ SVC_COUNT
++};
++
++extern const char *const adf_cfg_services[SVC_COUNT];
++
++int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
++
++#endif
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+index 6066dc637352ca..322b76903a737d 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+@@ -32,6 +32,7 @@
+ #define ADF_CFG_DC_ASYM "dc;asym"
+ #define ADF_CFG_SYM_DC "sym;dc"
+ #define ADF_CFG_DC_SYM "dc;sym"
++#define ADF_CFG_DCC "dcc"
+ #define ADF_SERVICES_ENABLED "ServicesEnabled"
+ #define ADF_PM_IDLE_SUPPORT "PmIdleSupport"
+ #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c
+index dc0778691eb0ba..eae44969dc84fa 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_clock.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c
+@@ -82,6 +82,9 @@ static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency)
+ }
+
+ delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1);
++ if (!delta_us)
++ return -EINVAL;
++
+ temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10;
+ temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us);
+ /*
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+index 673b5044c62a50..79ff7982378d9f 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+@@ -25,6 +25,8 @@
+ #define ADF_STATUS_AE_STARTED 6
+ #define ADF_STATUS_PF_RUNNING 7
+ #define ADF_STATUS_IRQ_ALLOCATED 8
++#define ADF_STATUS_CRYPTO_ALGS_REGISTERED 9
++#define ADF_STATUS_COMP_ALGS_REGISTERED 10
+
+ enum adf_dev_reset_mode {
+ ADF_DEV_RESET_ASYNC = 0,
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
+index 70ef1196393814..43af81fcab8686 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
+@@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+- errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
++ /* Update only section of errmsk3 related to VF2PF */
++ errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
++ errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+ /* Return the sources of the (new) interrupt(s) */
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
+index 89001fe92e7629..0f9e2d59ce3857 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
+@@ -97,6 +97,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
+ return -EFAULT;
+ }
+
++ if (hw_data->get_ring_to_svc_map)
++ hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
++
+ if (adf_ae_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to initialise Acceleration Engine\n");
+@@ -231,6 +234,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
++ set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+
+ if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+ dev_err(&GET_DEV(accel_dev),
+@@ -239,6 +243,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ return -EFAULT;
+ }
++ set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
+
+ adf_dbgfs_add(accel_dev);
+
+@@ -272,13 +277,17 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
+ clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+ clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+- if (!list_empty(&accel_dev->crypto_list)) {
++ if (!list_empty(&accel_dev->crypto_list) &&
++ test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
+ qat_algs_unregister();
+ qat_asym_algs_unregister();
+ }
++ clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
+
+- if (!list_empty(&accel_dev->compression_list))
++ if (!list_empty(&accel_dev->compression_list) &&
++ test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
+ qat_comp_algs_unregister();
++ clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
+
+ list_for_each(list_itr, &service_table) {
+ service = list_entry(list_itr, struct service_hndl, list);
+@@ -440,13 +449,6 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+
+ mutex_lock(&accel_dev->state_lock);
+
+- if (!adf_dev_started(accel_dev)) {
+- dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+- accel_dev->accel_id);
+- ret = -EINVAL;
+- goto out;
+- }
+-
+ if (reconfig) {
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ goto out;
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+index a74d2f93036709..8f04b0d3c5ac89 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+@@ -5,6 +5,7 @@
+ #include <linux/pci.h>
+ #include "adf_accel_devices.h"
+ #include "adf_cfg.h"
++#include "adf_cfg_services.h"
+ #include "adf_common_drv.h"
+
+ static const char * const state_operations[] = {
+@@ -52,6 +53,13 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ case DEV_DOWN:
+ dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+
++ if (!adf_dev_started(accel_dev)) {
++ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
++ accel_id);
++
++ break;
++ }
++
+ ret = adf_dev_down(accel_dev, true);
+ if (ret < 0)
+ return -EINVAL;
+@@ -61,7 +69,9 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+
+ ret = adf_dev_up(accel_dev, true);
+- if (ret < 0) {
++ if (ret == -EALREADY) {
++ break;
++ } else if (ret) {
+ dev_err(dev, "Failed to start device qat_dev%d\n",
+ accel_id);
+ adf_dev_down(accel_dev, true);
+@@ -75,18 +85,6 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ return count;
+ }
+
+-static const char * const services_operations[] = {
+- ADF_CFG_CY,
+- ADF_CFG_DC,
+- ADF_CFG_SYM,
+- ADF_CFG_ASYM,
+- ADF_CFG_ASYM_SYM,
+- ADF_CFG_ASYM_DC,
+- ADF_CFG_DC_ASYM,
+- ADF_CFG_SYM_DC,
+- ADF_CFG_DC_SYM,
+-};
+-
+ static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
+@@ -121,7 +119,7 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+- ret = sysfs_match_string(services_operations, buf);
++ ret = sysfs_match_string(adf_cfg_services, buf);
+ if (ret < 0)
+ return ret;
+
+@@ -135,7 +133,7 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a
+ return -EINVAL;
+ }
+
+- ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
++ ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+index 08bca1c506c0ef..e2dd568b87b519 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+@@ -90,7 +90,7 @@ DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
+ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+ {
+ struct adf_etr_ring_debug_entry *ring_debug;
+- char entry_name[8];
++ char entry_name[16];
+
+ ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+ if (!ring_debug)
+@@ -192,7 +192,7 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+ {
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct dentry *parent = accel_dev->transport->debug;
+- char name[8];
++ char name[16];
+
+ snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+ bank->bank_debug_dir = debugfs_create_dir(name, parent);
+diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+index 3e968a4bcc9cd5..019a6443834e0b 100644
+--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
++++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+@@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id {
+ ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+ ICP_QAT_FW_HEARTBEAT_GET = 8,
+ ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
++ ICP_QAT_FW_DC_CHAIN_INIT = 11,
+ ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
+ ICP_QAT_FW_TIMER_GET = 19,
+ ICP_QAT_FW_PM_STATE_CONFIG = 128,
+diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
+index bb80455b3e81e2..b97b678823a975 100644
+--- a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
+@@ -40,40 +40,44 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+ spin_unlock_bh(&backlog->lock);
+ }
+
+-static void qat_alg_backlog_req(struct qat_alg_req *req,
+- struct qat_instance_backlog *backlog)
+-{
+- INIT_LIST_HEAD(&req->list);
+-
+- spin_lock_bh(&backlog->lock);
+- list_add_tail(&req->list, &backlog->list);
+- spin_unlock_bh(&backlog->lock);
+-}
+-
+-static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++static bool qat_alg_try_enqueue(struct qat_alg_req *req)
+ {
+ struct qat_instance_backlog *backlog = req->backlog;
+ struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ u32 *fw_req = req->fw_req;
+
+- /* If any request is already backlogged, then add to backlog list */
++ /* Check if any request is already backlogged */
+ if (!list_empty(&backlog->list))
+- goto enqueue;
++ return false;
+
+- /* If ring is nearly full, then add to backlog list */
++ /* Check if ring is nearly full */
+ if (adf_ring_nearly_full(tx_ring))
+- goto enqueue;
++ return false;
+
+- /* If adding request to HW ring fails, then add to backlog list */
++ /* Try to enqueue to HW ring */
+ if (adf_send_message(tx_ring, fw_req))
+- goto enqueue;
++ return false;
+
+- return -EINPROGRESS;
++ return true;
++}
+
+-enqueue:
+- qat_alg_backlog_req(req, backlog);
+
+- return -EBUSY;
++static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
++{
++ struct qat_instance_backlog *backlog = req->backlog;
++ int ret = -EINPROGRESS;
++
++ if (qat_alg_try_enqueue(req))
++ return ret;
++
++ spin_lock_bh(&backlog->lock);
++ if (!qat_alg_try_enqueue(req)) {
++ list_add_tail(&req->list, &backlog->list);
++ ret = -EBUSY;
++ }
++ spin_unlock_bh(&backlog->lock);
++
++ return ret;
+ }
+
+ int qat_alg_send_message(struct qat_alg_req *req)
+diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+index 09551f94912653..0e40897cc983a8 100644
+--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+@@ -191,8 +191,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+- errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+- errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
++ /* Update only section of errmsk3 and errmsk5 related to VF2PF */
++ errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
++ errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
++
++ errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
++ errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
+index a48591af12d025..78217577aa5403 100644
+--- a/drivers/crypto/marvell/Kconfig
++++ b/drivers/crypto/marvell/Kconfig
+@@ -28,6 +28,7 @@ config CRYPTO_DEV_OCTEONTX_CPT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
++ select CRYPTO_AUTHENC
+ select CRYPTO_DEV_MARVELL
+ help
+ This driver allows you to utilize the Marvell Cryptographic
+@@ -47,6 +48,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
++ select CRYPTO_AUTHENC
+ select NET_DEVLINK
+ help
+ This driver allows you to utilize the Marvell Cryptographic
+diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+index 1c2c870e887aab..f64b72398eced9 100644
+--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
++++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+@@ -17,7 +17,6 @@
+ #include <crypto/sha2.h>
+ #include <crypto/xts.h>
+ #include <crypto/scatterwalk.h>
+-#include <linux/rtnetlink.h>
+ #include <linux/sort.h>
+ #include <linux/module.h>
+ #include "otx_cptvf.h"
+@@ -66,6 +65,8 @@ static struct cpt_device_table ae_devices = {
+ .count = ATOMIC_INIT(0)
+ };
+
++static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
++
+ static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
+ {
+ int count, ret = 0;
+@@ -515,44 +516,61 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
+ ctx->cipher_type = cipher_type;
+ ctx->mac_type = mac_type;
+
++ switch (ctx->mac_type) {
++ case OTX_CPT_SHA1:
++ ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
++ break;
++
++ case OTX_CPT_SHA256:
++ ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
++ break;
++
++ case OTX_CPT_SHA384:
++ ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
++ break;
++
++ case OTX_CPT_SHA512:
++ ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
++ break;
++ }
++
++ if (IS_ERR(ctx->hashalg))
++ return PTR_ERR(ctx->hashalg);
++
++ crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
++
++ if (!ctx->hashalg)
++ return 0;
++
+ /*
+ * When selected cipher is NULL we use HMAC opcode instead of
+ * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
+ * for calculating ipad and opad
+ */
+ if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
+- switch (ctx->mac_type) {
+- case OTX_CPT_SHA1:
+- ctx->hashalg = crypto_alloc_shash("sha1", 0,
+- CRYPTO_ALG_ASYNC);
+- if (IS_ERR(ctx->hashalg))
+- return PTR_ERR(ctx->hashalg);
+- break;
+-
+- case OTX_CPT_SHA256:
+- ctx->hashalg = crypto_alloc_shash("sha256", 0,
+- CRYPTO_ALG_ASYNC);
+- if (IS_ERR(ctx->hashalg))
+- return PTR_ERR(ctx->hashalg);
+- break;
++ int ss = crypto_shash_statesize(ctx->hashalg);
+
+- case OTX_CPT_SHA384:
+- ctx->hashalg = crypto_alloc_shash("sha384", 0,
+- CRYPTO_ALG_ASYNC);
+- if (IS_ERR(ctx->hashalg))
+- return PTR_ERR(ctx->hashalg);
+- break;
++ ctx->ipad = kzalloc(ss, GFP_KERNEL);
++ if (!ctx->ipad) {
++ crypto_free_shash(ctx->hashalg);
++ return -ENOMEM;
++ }
+
+- case OTX_CPT_SHA512:
+- ctx->hashalg = crypto_alloc_shash("sha512", 0,
+- CRYPTO_ALG_ASYNC);
+- if (IS_ERR(ctx->hashalg))
+- return PTR_ERR(ctx->hashalg);
+- break;
++ ctx->opad = kzalloc(ss, GFP_KERNEL);
++ if (!ctx->opad) {
++ kfree(ctx->ipad);
++ crypto_free_shash(ctx->hashalg);
++ return -ENOMEM;
+ }
+ }
+
+- crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
++ ctx->sdesc = alloc_sdesc(ctx->hashalg);
++ if (!ctx->sdesc) {
++ kfree(ctx->opad);
++ kfree(ctx->ipad);
++ crypto_free_shash(ctx->hashalg);
++ return -ENOMEM;
++ }
+
+ return 0;
+ }
+@@ -608,8 +626,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm)
+
+ kfree(ctx->ipad);
+ kfree(ctx->opad);
+- if (ctx->hashalg)
+- crypto_free_shash(ctx->hashalg);
++ crypto_free_shash(ctx->hashalg);
+ kfree(ctx->sdesc);
+ }
+
+@@ -705,7 +722,7 @@ static inline void swap_data64(void *buf, u32 len)
+ *dst = cpu_to_be64p(src);
+ }
+
+-static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
++static int swap_pad(u8 mac_type, u8 *pad)
+ {
+ struct sha512_state *sha512;
+ struct sha256_state *sha256;
+@@ -713,22 +730,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+
+ switch (mac_type) {
+ case OTX_CPT_SHA1:
+- sha1 = (struct sha1_state *) in_pad;
++ sha1 = (struct sha1_state *)pad;
+ swap_data32(sha1->state, SHA1_DIGEST_SIZE);
+- memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
+ break;
+
+ case OTX_CPT_SHA256:
+- sha256 = (struct sha256_state *) in_pad;
++ sha256 = (struct sha256_state *)pad;
+ swap_data32(sha256->state, SHA256_DIGEST_SIZE);
+- memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
+ break;
+
+ case OTX_CPT_SHA384:
+ case OTX_CPT_SHA512:
+- sha512 = (struct sha512_state *) in_pad;
++ sha512 = (struct sha512_state *)pad;
+ swap_data64(sha512->state, SHA512_DIGEST_SIZE);
+- memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
+ break;
+
+ default:
+@@ -738,55 +752,53 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+ return 0;
+ }
+
+-static int aead_hmac_init(struct crypto_aead *cipher)
++static int aead_hmac_init(struct crypto_aead *cipher,
++ struct crypto_authenc_keys *keys)
+ {
+ struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
+- int state_size = crypto_shash_statesize(ctx->hashalg);
+ int ds = crypto_shash_digestsize(ctx->hashalg);
+ int bs = crypto_shash_blocksize(ctx->hashalg);
+- int authkeylen = ctx->auth_key_len;
++ int authkeylen = keys->authkeylen;
+ u8 *ipad = NULL, *opad = NULL;
+- int ret = 0, icount = 0;
++ int icount = 0;
++ int ret;
+
+- ctx->sdesc = alloc_sdesc(ctx->hashalg);
+- if (!ctx->sdesc)
+- return -ENOMEM;
++ if (authkeylen > bs) {
++ ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
++ authkeylen, ctx->key);
++ if (ret)
++ return ret;
++ authkeylen = ds;
++ } else
++ memcpy(ctx->key, keys->authkey, authkeylen);
+
+- ctx->ipad = kzalloc(bs, GFP_KERNEL);
+- if (!ctx->ipad) {
+- ret = -ENOMEM;
+- goto calc_fail;
+- }
++ ctx->enc_key_len = keys->enckeylen;
++ ctx->auth_key_len = authkeylen;
+
+- ctx->opad = kzalloc(bs, GFP_KERNEL);
+- if (!ctx->opad) {
+- ret = -ENOMEM;
+- goto calc_fail;
+- }
++ if (ctx->cipher_type == OTX_CPT_CIPHER_NULL)
++ return keys->enckeylen ? -EINVAL : 0;
+
+- ipad = kzalloc(state_size, GFP_KERNEL);
+- if (!ipad) {
+- ret = -ENOMEM;
+- goto calc_fail;
++ switch (keys->enckeylen) {
++ case AES_KEYSIZE_128:
++ ctx->key_type = OTX_CPT_AES_128_BIT;
++ break;
++ case AES_KEYSIZE_192:
++ ctx->key_type = OTX_CPT_AES_192_BIT;
++ break;
++ case AES_KEYSIZE_256:
++ ctx->key_type = OTX_CPT_AES_256_BIT;
++ break;
++ default:
++ /* Invalid key length */
++ return -EINVAL;
+ }
+
+- opad = kzalloc(state_size, GFP_KERNEL);
+- if (!opad) {
+- ret = -ENOMEM;
+- goto calc_fail;
+- }
++ memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
+
+- if (authkeylen > bs) {
+- ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
+- authkeylen, ipad);
+- if (ret)
+- goto calc_fail;
+-
+- authkeylen = ds;
+- } else {
+- memcpy(ipad, ctx->key, authkeylen);
+- }
++ ipad = ctx->ipad;
++ opad = ctx->opad;
+
++ memcpy(ipad, ctx->key, authkeylen);
+ memset(ipad + authkeylen, 0, bs - authkeylen);
+ memcpy(opad, ipad, bs);
+
+@@ -804,7 +816,7 @@ static int aead_hmac_init(struct crypto_aead *cipher)
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, ipad);
+- ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
++ ret = swap_pad(ctx->mac_type, ipad);
+ if (ret)
+ goto calc_fail;
+
+@@ -812,25 +824,9 @@ static int aead_hmac_init(struct crypto_aead *cipher)
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, opad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, opad);
+- ret = copy_pad(ctx->mac_type, ctx->opad, opad);
+- if (ret)
+- goto calc_fail;
+-
+- kfree(ipad);
+- kfree(opad);
+-
+- return 0;
++ ret = swap_pad(ctx->mac_type, opad);
+
+ calc_fail:
+- kfree(ctx->ipad);
+- ctx->ipad = NULL;
+- kfree(ctx->opad);
+- ctx->opad = NULL;
+- kfree(ipad);
+- kfree(opad);
+- kfree(ctx->sdesc);
+- ctx->sdesc = NULL;
+-
+ return ret;
+ }
+
+@@ -838,57 +834,15 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+ {
+- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
+- struct crypto_authenc_key_param *param;
+- int enckeylen = 0, authkeylen = 0;
+- struct rtattr *rta = (void *)key;
+- int status = -EINVAL;
+-
+- if (!RTA_OK(rta, keylen))
+- goto badkey;
+-
+- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+- goto badkey;
+-
+- if (RTA_PAYLOAD(rta) < sizeof(*param))
+- goto badkey;
+-
+- param = RTA_DATA(rta);
+- enckeylen = be32_to_cpu(param->enckeylen);
+- key += RTA_ALIGN(rta->rta_len);
+- keylen -= RTA_ALIGN(rta->rta_len);
+- if (keylen < enckeylen)
+- goto badkey;
++ struct crypto_authenc_keys authenc_keys;
++ int status;
+
+- if (keylen > OTX_CPT_MAX_KEY_SIZE)
+- goto badkey;
+-
+- authkeylen = keylen - enckeylen;
+- memcpy(ctx->key, key, keylen);
+-
+- switch (enckeylen) {
+- case AES_KEYSIZE_128:
+- ctx->key_type = OTX_CPT_AES_128_BIT;
+- break;
+- case AES_KEYSIZE_192:
+- ctx->key_type = OTX_CPT_AES_192_BIT;
+- break;
+- case AES_KEYSIZE_256:
+- ctx->key_type = OTX_CPT_AES_256_BIT;
+- break;
+- default:
+- /* Invalid key length */
+- goto badkey;
+- }
+-
+- ctx->enc_key_len = enckeylen;
+- ctx->auth_key_len = authkeylen;
+-
+- status = aead_hmac_init(cipher);
++ status = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
+ if (status)
+ goto badkey;
+
+- return 0;
++ status = aead_hmac_init(cipher, &authenc_keys);
++
+ badkey:
+ return status;
+ }
+@@ -897,36 +851,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+ {
+- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
+- struct crypto_authenc_key_param *param;
+- struct rtattr *rta = (void *)key;
+- int enckeylen = 0;
+-
+- if (!RTA_OK(rta, keylen))
+- goto badkey;
+-
+- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+- goto badkey;
+-
+- if (RTA_PAYLOAD(rta) < sizeof(*param))
+- goto badkey;
+-
+- param = RTA_DATA(rta);
+- enckeylen = be32_to_cpu(param->enckeylen);
+- key += RTA_ALIGN(rta->rta_len);
+- keylen -= RTA_ALIGN(rta->rta_len);
+- if (enckeylen != 0)
+- goto badkey;
+-
+- if (keylen > OTX_CPT_MAX_KEY_SIZE)
+- goto badkey;
+-
+- memcpy(ctx->key, key, keylen);
+- ctx->enc_key_len = enckeylen;
+- ctx->auth_key_len = keylen;
+- return 0;
+-badkey:
+- return -EINVAL;
++ return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
+ }
+
+ static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+index 6edd27ff8c4e3c..e4bd3f030ceca7 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+@@ -419,8 +419,8 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
+ return 0;
+
+ free_iq:
+- otx2_cpt_free_instruction_queues(lfs);
+ cptlf_hw_cleanup(lfs);
++ otx2_cpt_free_instruction_queues(lfs);
+ detach_rsrcs:
+ otx2_cpt_detach_rsrcs_msg(lfs);
+ clear_lfs_num:
+@@ -431,11 +431,13 @@ EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
+
+ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+ {
+- lfs->lfs_num = 0;
+ /* Cleanup LFs hardware side */
+ cptlf_hw_cleanup(lfs);
++ /* Free instruction queues */
++ otx2_cpt_free_instruction_queues(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
++ lfs->lfs_num = 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
+
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+index e27ddd3c4e5581..4385d3df52b4d4 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+@@ -11,7 +11,6 @@
+ #include <crypto/xts.h>
+ #include <crypto/gcm.h>
+ #include <crypto/scatterwalk.h>
+-#include <linux/rtnetlink.h>
+ #include <linux/sort.h>
+ #include <linux/module.h>
+ #include "otx2_cptvf.h"
+@@ -54,6 +53,8 @@ static struct cpt_device_table se_devices = {
+ .count = ATOMIC_INIT(0)
+ };
+
++static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
++
+ static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
+ {
+ int count;
+@@ -580,40 +581,56 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
+ ctx->cipher_type = cipher_type;
+ ctx->mac_type = mac_type;
+
++ switch (ctx->mac_type) {
++ case OTX2_CPT_SHA1:
++ ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
++ break;
++
++ case OTX2_CPT_SHA256:
++ ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
++ break;
++
++ case OTX2_CPT_SHA384:
++ ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
++ break;
++
++ case OTX2_CPT_SHA512:
++ ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
++ break;
++ }
++
++ if (IS_ERR(ctx->hashalg))
++ return PTR_ERR(ctx->hashalg);
++
++ if (ctx->hashalg) {
++ ctx->sdesc = alloc_sdesc(ctx->hashalg);
++ if (!ctx->sdesc) {
++ crypto_free_shash(ctx->hashalg);
++ return -ENOMEM;
++ }
++ }
++
+ /*
+ * When selected cipher is NULL we use HMAC opcode instead of
+ * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
+ * for calculating ipad and opad
+ */
+- if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
+- switch (ctx->mac_type) {
+- case OTX2_CPT_SHA1:
+- ctx->hashalg = crypto_alloc_shash("sha1", 0,
+- CRYPTO_ALG_ASYNC);
+- if (IS_ERR(ctx->hashalg))
+- return PTR_ERR(ctx->hashalg);
+- break;
+-
+- case OTX2_CPT_SHA256:
+- ctx->hashalg = crypto_alloc_shash("sha256", 0,
+- CRYPTO_ALG_ASYNC);
+- if (IS_ERR(ctx->hashalg))
+- return PTR_ERR(ctx->hashalg);
+- break;
++ if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) {
++ int ss = crypto_shash_statesize(ctx->hashalg);
+
+- case OTX2_CPT_SHA384:
+- ctx->hashalg = crypto_alloc_shash("sha384", 0,
+- CRYPTO_ALG_ASYNC);
+- if (IS_ERR(ctx->hashalg))
+- return PTR_ERR(ctx->hashalg);
+- break;
++ ctx->ipad = kzalloc(ss, GFP_KERNEL);
++ if (!ctx->ipad) {
++ kfree(ctx->sdesc);
++ crypto_free_shash(ctx->hashalg);
++ return -ENOMEM;
++ }
+
+- case OTX2_CPT_SHA512:
+- ctx->hashalg = crypto_alloc_shash("sha512", 0,
+- CRYPTO_ALG_ASYNC);
+- if (IS_ERR(ctx->hashalg))
+- return PTR_ERR(ctx->hashalg);
+- break;
++ ctx->opad = kzalloc(ss, GFP_KERNEL);
++ if (!ctx->opad) {
++ kfree(ctx->ipad);
++ kfree(ctx->sdesc);
++ crypto_free_shash(ctx->hashalg);
++ return -ENOMEM;
+ }
+ }
+ switch (ctx->cipher_type) {
+@@ -686,8 +703,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
+
+ kfree(ctx->ipad);
+ kfree(ctx->opad);
+- if (ctx->hashalg)
+- crypto_free_shash(ctx->hashalg);
++ crypto_free_shash(ctx->hashalg);
+ kfree(ctx->sdesc);
+
+ if (ctx->fbk_cipher) {
+@@ -760,7 +776,7 @@ static inline void swap_data64(void *buf, u32 len)
+ cpu_to_be64s(src);
+ }
+
+-static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
++static int swap_pad(u8 mac_type, u8 *pad)
+ {
+ struct sha512_state *sha512;
+ struct sha256_state *sha256;
+@@ -768,22 +784,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+
+ switch (mac_type) {
+ case OTX2_CPT_SHA1:
+- sha1 = (struct sha1_state *) in_pad;
++ sha1 = (struct sha1_state *)pad;
+ swap_data32(sha1->state, SHA1_DIGEST_SIZE);
+- memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA256:
+- sha256 = (struct sha256_state *) in_pad;
++ sha256 = (struct sha256_state *)pad;
+ swap_data32(sha256->state, SHA256_DIGEST_SIZE);
+- memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA384:
+ case OTX2_CPT_SHA512:
+- sha512 = (struct sha512_state *) in_pad;
++ sha512 = (struct sha512_state *)pad;
+ swap_data64(sha512->state, SHA512_DIGEST_SIZE);
+- memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
+ break;
+
+ default:
+@@ -793,55 +806,54 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+ return 0;
+ }
+
+-static int aead_hmac_init(struct crypto_aead *cipher)
++static int aead_hmac_init(struct crypto_aead *cipher,
++ struct crypto_authenc_keys *keys)
+ {
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
+- int state_size = crypto_shash_statesize(ctx->hashalg);
+ int ds = crypto_shash_digestsize(ctx->hashalg);
+ int bs = crypto_shash_blocksize(ctx->hashalg);
+- int authkeylen = ctx->auth_key_len;
++ int authkeylen = keys->authkeylen;
+ u8 *ipad = NULL, *opad = NULL;
+- int ret = 0, icount = 0;
++ int icount = 0;
++ int ret;
+
+- ctx->sdesc = alloc_sdesc(ctx->hashalg);
+- if (!ctx->sdesc)
+- return -ENOMEM;
++ if (authkeylen > bs) {
++ ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
++ authkeylen, ctx->key);
++ if (ret)
++ goto calc_fail;
+
+- ctx->ipad = kzalloc(bs, GFP_KERNEL);
+- if (!ctx->ipad) {
+- ret = -ENOMEM;
+- goto calc_fail;
+- }
++ authkeylen = ds;
++ } else
++ memcpy(ctx->key, keys->authkey, authkeylen);
+
+- ctx->opad = kzalloc(bs, GFP_KERNEL);
+- if (!ctx->opad) {
+- ret = -ENOMEM;
+- goto calc_fail;
+- }
++ ctx->enc_key_len = keys->enckeylen;
++ ctx->auth_key_len = authkeylen;
+
+- ipad = kzalloc(state_size, GFP_KERNEL);
+- if (!ipad) {
+- ret = -ENOMEM;
+- goto calc_fail;
+- }
++ if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL)
++ return keys->enckeylen ? -EINVAL : 0;
+
+- opad = kzalloc(state_size, GFP_KERNEL);
+- if (!opad) {
+- ret = -ENOMEM;
+- goto calc_fail;
++ switch (keys->enckeylen) {
++ case AES_KEYSIZE_128:
++ ctx->key_type = OTX2_CPT_AES_128_BIT;
++ break;
++ case AES_KEYSIZE_192:
++ ctx->key_type = OTX2_CPT_AES_192_BIT;
++ break;
++ case AES_KEYSIZE_256:
++ ctx->key_type = OTX2_CPT_AES_256_BIT;
++ break;
++ default:
++ /* Invalid key length */
++ return -EINVAL;
+ }
+
+- if (authkeylen > bs) {
+- ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
+- authkeylen, ipad);
+- if (ret)
+- goto calc_fail;
++ memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
+
+- authkeylen = ds;
+- } else {
+- memcpy(ipad, ctx->key, authkeylen);
+- }
++ ipad = ctx->ipad;
++ opad = ctx->opad;
+
++ memcpy(ipad, ctx->key, authkeylen);
+ memset(ipad + authkeylen, 0, bs - authkeylen);
+ memcpy(opad, ipad, bs);
+
+@@ -859,7 +871,7 @@ static int aead_hmac_init(struct crypto_aead *cipher)
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, ipad);
+- ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
++ ret = swap_pad(ctx->mac_type, ipad);
+ if (ret)
+ goto calc_fail;
+
+@@ -867,25 +879,9 @@ static int aead_hmac_init(struct crypto_aead *cipher)
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, opad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, opad);
+- ret = copy_pad(ctx->mac_type, ctx->opad, opad);
+- if (ret)
+- goto calc_fail;
+-
+- kfree(ipad);
+- kfree(opad);
+-
+- return 0;
++ ret = swap_pad(ctx->mac_type, opad);
+
+ calc_fail:
+- kfree(ctx->ipad);
+- ctx->ipad = NULL;
+- kfree(ctx->opad);
+- ctx->opad = NULL;
+- kfree(ipad);
+- kfree(opad);
+- kfree(ctx->sdesc);
+- ctx->sdesc = NULL;
+-
+ return ret;
+ }
+
+@@ -893,87 +889,17 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+ {
+- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
+- struct crypto_authenc_key_param *param;
+- int enckeylen = 0, authkeylen = 0;
+- struct rtattr *rta = (void *)key;
+-
+- if (!RTA_OK(rta, keylen))
+- return -EINVAL;
++ struct crypto_authenc_keys authenc_keys;
+
+- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+- return -EINVAL;
+-
+- if (RTA_PAYLOAD(rta) < sizeof(*param))
+- return -EINVAL;
+-
+- param = RTA_DATA(rta);
+- enckeylen = be32_to_cpu(param->enckeylen);
+- key += RTA_ALIGN(rta->rta_len);
+- keylen -= RTA_ALIGN(rta->rta_len);
+- if (keylen < enckeylen)
+- return -EINVAL;
+-
+- if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+- return -EINVAL;
+-
+- authkeylen = keylen - enckeylen;
+- memcpy(ctx->key, key, keylen);
+-
+- switch (enckeylen) {
+- case AES_KEYSIZE_128:
+- ctx->key_type = OTX2_CPT_AES_128_BIT;
+- break;
+- case AES_KEYSIZE_192:
+- ctx->key_type = OTX2_CPT_AES_192_BIT;
+- break;
+- case AES_KEYSIZE_256:
+- ctx->key_type = OTX2_CPT_AES_256_BIT;
+- break;
+- default:
+- /* Invalid key length */
+- return -EINVAL;
+- }
+-
+- ctx->enc_key_len = enckeylen;
+- ctx->auth_key_len = authkeylen;
+-
+- return aead_hmac_init(cipher);
++ return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?:
++ aead_hmac_init(cipher, &authenc_keys);
+ }
+
+ static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+ {
+- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
+- struct crypto_authenc_key_param *param;
+- struct rtattr *rta = (void *)key;
+- int enckeylen = 0;
+-
+- if (!RTA_OK(rta, keylen))
+- return -EINVAL;
+-
+- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+- return -EINVAL;
+-
+- if (RTA_PAYLOAD(rta) < sizeof(*param))
+- return -EINVAL;
+-
+- param = RTA_DATA(rta);
+- enckeylen = be32_to_cpu(param->enckeylen);
+- key += RTA_ALIGN(rta->rta_len);
+- keylen -= RTA_ALIGN(rta->rta_len);
+- if (enckeylen != 0)
+- return -EINVAL;
+-
+- if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+- return -EINVAL;
+-
+- memcpy(ctx->key, key, keylen);
+- ctx->enc_key_len = enckeylen;
+- ctx->auth_key_len = keylen;
+-
+- return 0;
++ return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
+ }
+
+ static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+index bac729c885f960..215a1b17b6ce0a 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+@@ -249,8 +249,11 @@ static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
+ otx2_cptlf_unregister_interrupts(lfs);
+ /* Cleanup LFs software side */
+ lf_sw_cleanup(lfs);
++ /* Free instruction queues */
++ otx2_cpt_free_instruction_queues(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
++ lfs->lfs_num = 0;
+ }
+
+ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+index 8c143180645e5b..29c9537216fa6d 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+@@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
+ theend:
+ pm_runtime_put_autosuspend(rkc->dev);
+
++ rk_hash_unprepare(engine, breq);
++
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, breq, err);
+ local_bh_enable();
+
+- rk_hash_unprepare(engine, breq);
+-
+ return 0;
+ }
+
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index 6238d34f8db2f6..94eb6f6afa2572 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -1869,9 +1869,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
+ crypto_aead_set_flags(ctx->fallback.aead,
+ crypto_aead_get_flags(authenc) &
+ CRYPTO_TFM_REQ_MASK);
+- crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+
+- return 0;
++ return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+ }
+
+ static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
+index 62d93526920f80..8e84dd98a273da 100644
+--- a/drivers/crypto/sahara.c
++++ b/drivers/crypto/sahara.c
+@@ -43,7 +43,6 @@
+ #define FLAGS_MODE_MASK 0x000f
+ #define FLAGS_ENCRYPT BIT(0)
+ #define FLAGS_CBC BIT(1)
+-#define FLAGS_NEW_KEY BIT(3)
+
+ #define SAHARA_HDR_BASE 0x00800000
+ #define SAHARA_HDR_SKHA_ALG_AES 0
+@@ -141,8 +140,6 @@ struct sahara_hw_link {
+ };
+
+ struct sahara_ctx {
+- unsigned long flags;
+-
+ /* AES-specific context */
+ int keylen;
+ u8 key[AES_KEYSIZE_128];
+@@ -151,6 +148,7 @@ struct sahara_ctx {
+
+ struct sahara_aes_reqctx {
+ unsigned long mode;
++ u8 iv_out[AES_BLOCK_SIZE];
+ struct skcipher_request fallback_req; // keep at the end
+ };
+
+@@ -446,27 +444,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ int ret;
+ int i, j;
+ int idx = 0;
++ u32 len;
+
+- /* Copy new key if necessary */
+- if (ctx->flags & FLAGS_NEW_KEY) {
+- memcpy(dev->key_base, ctx->key, ctx->keylen);
+- ctx->flags &= ~FLAGS_NEW_KEY;
++ memcpy(dev->key_base, ctx->key, ctx->keylen);
+
+- if (dev->flags & FLAGS_CBC) {
+- dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+- dev->hw_desc[idx]->p1 = dev->iv_phys_base;
+- } else {
+- dev->hw_desc[idx]->len1 = 0;
+- dev->hw_desc[idx]->p1 = 0;
+- }
+- dev->hw_desc[idx]->len2 = ctx->keylen;
+- dev->hw_desc[idx]->p2 = dev->key_phys_base;
+- dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
++ if (dev->flags & FLAGS_CBC) {
++ dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
++ dev->hw_desc[idx]->p1 = dev->iv_phys_base;
++ } else {
++ dev->hw_desc[idx]->len1 = 0;
++ dev->hw_desc[idx]->p1 = 0;
++ }
++ dev->hw_desc[idx]->len2 = ctx->keylen;
++ dev->hw_desc[idx]->p2 = dev->key_phys_base;
++ dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
++ dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+
+- dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
++ idx++;
+
+- idx++;
+- }
+
+ dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
+ if (dev->nb_in_sg < 0) {
+@@ -488,24 +483,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ DMA_TO_DEVICE);
+ if (!ret) {
+ dev_err(dev->device, "couldn't map in sg\n");
+- goto unmap_in;
++ return -EINVAL;
+ }
++
+ ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_FROM_DEVICE);
+ if (!ret) {
+ dev_err(dev->device, "couldn't map out sg\n");
+- goto unmap_out;
++ goto unmap_in;
+ }
+
+ /* Create input links */
+ dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
+ sg = dev->in_sg;
++ len = dev->total;
+ for (i = 0; i < dev->nb_in_sg; i++) {
+- dev->hw_link[i]->len = sg->length;
++ dev->hw_link[i]->len = min(len, sg->length);
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
++ len -= min(len, sg->length);
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ }
+@@ -514,12 +512,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ /* Create output links */
+ dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
+ sg = dev->out_sg;
++ len = dev->total;
+ for (j = i; j < dev->nb_out_sg + i; j++) {
+- dev->hw_link[j]->len = sg->length;
++ dev->hw_link[j]->len = min(len, sg->length);
+ dev->hw_link[j]->p = sg->dma_address;
+ if (j == (dev->nb_out_sg + i - 1)) {
+ dev->hw_link[j]->next = 0;
+ } else {
++ len -= min(len, sg->length);
+ dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
+ sg = sg_next(sg);
+ }
+@@ -538,9 +538,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+
+ return 0;
+
+-unmap_out:
+- dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+- DMA_FROM_DEVICE);
+ unmap_in:
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+@@ -548,8 +545,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+ return -EINVAL;
+ }
+
++static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
++{
++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++ /* Update IV buffer to contain the last ciphertext block */
++ if (rctx->mode & FLAGS_ENCRYPT) {
++ sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
++ ivsize, req->cryptlen - ivsize);
++ } else {
++ memcpy(req->iv, rctx->iv_out, ivsize);
++ }
++}
++
+ static int sahara_aes_process(struct skcipher_request *req)
+ {
++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct sahara_dev *dev = dev_ptr;
+ struct sahara_ctx *ctx;
+ struct sahara_aes_reqctx *rctx;
+@@ -571,8 +584,17 @@ static int sahara_aes_process(struct skcipher_request *req)
+ rctx->mode &= FLAGS_MODE_MASK;
+ dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+- if ((dev->flags & FLAGS_CBC) && req->iv)
+- memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
++ if ((dev->flags & FLAGS_CBC) && req->iv) {
++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++ memcpy(dev->iv_base, req->iv, ivsize);
++
++ if (!(dev->flags & FLAGS_ENCRYPT)) {
++ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
++ rctx->iv_out, ivsize,
++ req->cryptlen - ivsize);
++ }
++ }
+
+ /* assign new context to device */
+ dev->ctx = ctx;
+@@ -585,16 +607,20 @@ static int sahara_aes_process(struct skcipher_request *req)
+
+ timeout = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+- if (!timeout) {
+- dev_err(dev->device, "AES timeout\n");
+- return -ETIMEDOUT;
+- }
+
+ dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+
++ if (!timeout) {
++ dev_err(dev->device, "AES timeout\n");
++ return -ETIMEDOUT;
++ }
++
++ if ((dev->flags & FLAGS_CBC) && req->iv)
++ sahara_aes_cbc_update_iv(req);
++
+ return 0;
+ }
+
+@@ -608,7 +634,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ /* SAHARA only supports 128bit keys */
+ if (keylen == AES_KEYSIZE_128) {
+ memcpy(ctx->key, key, keylen);
+- ctx->flags |= FLAGS_NEW_KEY;
+ return 0;
+ }
+
+@@ -624,12 +649,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ return crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ }
+
++static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
++{
++ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++ struct sahara_ctx *ctx = crypto_skcipher_ctx(
++ crypto_skcipher_reqtfm(req));
++
++ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++ skcipher_request_set_callback(&rctx->fallback_req,
++ req->base.flags,
++ req->base.complete,
++ req->base.data);
++ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++ req->dst, req->cryptlen, req->iv);
++
++ if (mode & FLAGS_ENCRYPT)
++ return crypto_skcipher_encrypt(&rctx->fallback_req);
++
++ return crypto_skcipher_decrypt(&rctx->fallback_req);
++}
++
+ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
+ {
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
++ struct sahara_ctx *ctx = crypto_skcipher_ctx(
++ crypto_skcipher_reqtfm(req));
+ struct sahara_dev *dev = dev_ptr;
+ int err = 0;
+
++ if (!req->cryptlen)
++ return 0;
++
++ if (unlikely(ctx->keylen != AES_KEYSIZE_128))
++ return sahara_aes_fallback(req, mode);
++
+ dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
+ req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+
+@@ -652,81 +705,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
+
+ static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
+ {
+- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+- struct sahara_ctx *ctx = crypto_skcipher_ctx(
+- crypto_skcipher_reqtfm(req));
+-
+- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+- skcipher_request_set_callback(&rctx->fallback_req,
+- req->base.flags,
+- req->base.complete,
+- req->base.data);
+- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+- req->dst, req->cryptlen, req->iv);
+- return crypto_skcipher_encrypt(&rctx->fallback_req);
+- }
+-
+ return sahara_aes_crypt(req, FLAGS_ENCRYPT);
+ }
+
+ static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
+ {
+- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+- struct sahara_ctx *ctx = crypto_skcipher_ctx(
+- crypto_skcipher_reqtfm(req));
+-
+- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+- skcipher_request_set_callback(&rctx->fallback_req,
+- req->base.flags,
+- req->base.complete,
+- req->base.data);
+- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+- req->dst, req->cryptlen, req->iv);
+- return crypto_skcipher_decrypt(&rctx->fallback_req);
+- }
+-
+ return sahara_aes_crypt(req, 0);
+ }
+
+ static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
+ {
+- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+- struct sahara_ctx *ctx = crypto_skcipher_ctx(
+- crypto_skcipher_reqtfm(req));
+-
+- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+- skcipher_request_set_callback(&rctx->fallback_req,
+- req->base.flags,
+- req->base.complete,
+- req->base.data);
+- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+- req->dst, req->cryptlen, req->iv);
+- return crypto_skcipher_encrypt(&rctx->fallback_req);
+- }
+-
+ return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+ }
+
+ static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
+ {
+- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+- struct sahara_ctx *ctx = crypto_skcipher_ctx(
+- crypto_skcipher_reqtfm(req));
+-
+- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+- skcipher_request_set_callback(&rctx->fallback_req,
+- req->base.flags,
+- req->base.complete,
+- req->base.data);
+- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+- req->dst, req->cryptlen, req->iv);
+- return crypto_skcipher_decrypt(&rctx->fallback_req);
+- }
+-
+ return sahara_aes_crypt(req, FLAGS_CBC);
+ }
+
+@@ -783,6 +776,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+ int start)
+ {
+ struct scatterlist *sg;
++ unsigned int len;
+ unsigned int i;
+ int ret;
+
+@@ -804,12 +798,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+ if (!ret)
+ return -EFAULT;
+
++ len = rctx->total;
+ for (i = start; i < dev->nb_in_sg + start; i++) {
+- dev->hw_link[i]->len = sg->length;
++ dev->hw_link[i]->len = min(len, sg->length);
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg + start - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
++ len -= min(len, sg->length);
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ }
+@@ -890,24 +886,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
+ return 0;
+ }
+
+-static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
+-{
+- if (!sg || !sg->length)
+- return nbytes;
+-
+- while (nbytes && sg) {
+- if (nbytes <= sg->length) {
+- sg->length = nbytes;
+- sg_mark_end(sg);
+- break;
+- }
+- nbytes -= sg->length;
+- sg = sg_next(sg);
+- }
+-
+- return nbytes;
+-}
+-
+ static int sahara_sha_prepare_request(struct ahash_request *req)
+ {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -944,36 +922,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
+ hash_later, 0);
+ }
+
+- /* nbytes should now be multiple of blocksize */
+- req->nbytes = req->nbytes - hash_later;
+-
+- sahara_walk_and_recalc(req->src, req->nbytes);
+-
++ rctx->total = len - hash_later;
+ /* have data from previous operation and current */
+ if (rctx->buf_cnt && req->nbytes) {
+ sg_init_table(rctx->in_sg_chain, 2);
+ sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
+-
+ sg_chain(rctx->in_sg_chain, 2, req->src);
+-
+- rctx->total = req->nbytes + rctx->buf_cnt;
+ rctx->in_sg = rctx->in_sg_chain;
+-
+- req->src = rctx->in_sg_chain;
+ /* only data from previous operation */
+ } else if (rctx->buf_cnt) {
+- if (req->src)
+- rctx->in_sg = req->src;
+- else
+- rctx->in_sg = rctx->in_sg_chain;
+- /* buf was copied into rembuf above */
++ rctx->in_sg = rctx->in_sg_chain;
+ sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
+- rctx->total = rctx->buf_cnt;
+ /* no data from previous operation */
+ } else {
+ rctx->in_sg = req->src;
+- rctx->total = req->nbytes;
+- req->src = rctx->in_sg;
+ }
+
+ /* on next call, we only have the remaining data in the buffer */
+@@ -994,7 +956,10 @@ static int sahara_sha_process(struct ahash_request *req)
+ return ret;
+
+ if (rctx->first) {
+- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
++ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
++ if (ret)
++ return ret;
++
+ dev->hw_desc[0]->next = 0;
+ rctx->first = 0;
+ } else {
+@@ -1002,7 +967,10 @@ static int sahara_sha_process(struct ahash_request *req)
+
+ sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
+ dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
++ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
++ if (ret)
++ return ret;
++
+ dev->hw_desc[1]->next = 0;
+ }
+
+@@ -1015,18 +983,19 @@ static int sahara_sha_process(struct ahash_request *req)
+
+ timeout = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+- if (!timeout) {
+- dev_err(dev->device, "SHA timeout\n");
+- return -ETIMEDOUT;
+- }
+
+ if (rctx->sg_in_idx)
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+
++ if (!timeout) {
++ dev_err(dev->device, "SHA timeout\n");
++ return -ETIMEDOUT;
++ }
++
+ memcpy(rctx->context, dev->context_base, rctx->context_size);
+
+- if (req->result)
++ if (req->result && rctx->last)
+ memcpy(req->result, rctx->context, rctx->digest_size);
+
+ return 0;
+@@ -1170,8 +1139,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
+ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
+ {
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+- sizeof(struct sahara_sha_reqctx) +
+- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
++ sizeof(struct sahara_sha_reqctx));
+
+ return 0;
+ }
+diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
+index 08e974e0dd1247..4f5b6818208dc2 100644
+--- a/drivers/crypto/starfive/jh7110-cryp.c
++++ b/drivers/crypto/starfive/jh7110-cryp.c
+@@ -168,7 +168,7 @@ static int starfive_cryp_probe(struct platform_device *pdev)
+ ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name,
+ (void *)cryp);
+ if (ret)
+- return dev_err_probe(&pdev->dev, irq,
++ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register interrupt handler\n");
+
+ clk_prepare_enable(cryp->hclk);
+@@ -180,12 +180,8 @@ static int starfive_cryp_probe(struct platform_device *pdev)
+ spin_unlock(&dev_list.lock);
+
+ ret = starfive_dma_init(cryp);
+- if (ret) {
+- if (ret == -EPROBE_DEFER)
+- goto err_probe_defer;
+- else
+- goto err_dma_init;
+- }
++ if (ret)
++ goto err_dma_init;
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(&pdev->dev, 1);
+@@ -233,7 +229,7 @@ static int starfive_cryp_probe(struct platform_device *pdev)
+
+ tasklet_kill(&cryp->aes_done);
+ tasklet_kill(&cryp->hash_done);
+-err_probe_defer:
++
+ return ret;
+ }
+
+diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
+index fe011d50473d76..607f70292b215d 100644
+--- a/drivers/crypto/starfive/jh7110-cryp.h
++++ b/drivers/crypto/starfive/jh7110-cryp.h
+@@ -30,6 +30,7 @@
+ #define MAX_KEY_SIZE SHA512_BLOCK_SIZE
+ #define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE
+ #define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE
++#define STARFIVE_RSA_MAX_KEYSZ 256
+
+ union starfive_aes_csr {
+ u32 v;
+@@ -212,12 +213,11 @@ struct starfive_cryp_request_ctx {
+ struct scatterlist *out_sg;
+ struct ahash_request ahash_fbk_req;
+ size_t total;
+- size_t nents;
+ unsigned int blksize;
+ unsigned int digsize;
+ unsigned long in_sg_len;
+ unsigned char *adata;
+- u8 rsa_data[] __aligned(sizeof(u32));
++ u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32));
+ };
+
+ struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx);
+diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
+index f31bbd825f883f..1db9a3d02848b5 100644
+--- a/drivers/crypto/starfive/jh7110-rsa.c
++++ b/drivers/crypto/starfive/jh7110-rsa.c
+@@ -37,7 +37,6 @@
+ // A * A * R mod N ==> A
+ #define CRYPTO_CMD_AARN 0x7
+
+-#define STARFIVE_RSA_MAX_KEYSZ 256
+ #define STARFIVE_RSA_RESET 0x2
+
+ static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
+@@ -91,7 +90,7 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
+ {
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+- int count = rctx->total / sizeof(u32) - 1;
++ int count = (ALIGN(rctx->total, 4) / 4) - 1;
+ int loop;
+ u32 temp;
+ u8 opsize;
+@@ -274,12 +273,17 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+- int ret = 0;
++ int ret = 0, shift = 0;
+
+ writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+- rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents,
+- rctx->rsa_data, rctx->total);
++ if (!IS_ALIGNED(rctx->total, sizeof(u32))) {
++ shift = sizeof(u32) - (rctx->total & 0x3);
++ memset(rctx->rsa_data, 0, shift);
++ }
++
++ rctx->total = sg_copy_to_buffer(rctx->in_sg, sg_nents(rctx->in_sg),
++ rctx->rsa_data + shift, rctx->total);
+
+ if (enc) {
+ key->bitlen = key->e_bitlen;
+@@ -329,7 +333,6 @@ static int starfive_rsa_enc(struct akcipher_request *req)
+ rctx->in_sg = req->src;
+ rctx->out_sg = req->dst;
+ rctx->total = req->src_len;
+- rctx->nents = sg_nents(rctx->in_sg);
+ ctx->rctx = rctx;
+
+ return starfive_rsa_enc_core(ctx, 1);
+diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
+index 90a920e7f6642f..c439be1650c84d 100644
+--- a/drivers/crypto/stm32/stm32-crc32.c
++++ b/drivers/crypto/stm32/stm32-crc32.c
+@@ -104,7 +104,7 @@ static struct stm32_crc *stm32_crc_get_next_crc(void)
+ struct stm32_crc *crc;
+
+ spin_lock_bh(&crc_list.lock);
+- crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
++ crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
+ if (crc)
+ list_move_tail(&crc->list, &crc_list.dev_list);
+ spin_unlock_bh(&crc_list.lock);
+diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
+index f095f0065428a9..2f1b82cf10b1c4 100644
+--- a/drivers/crypto/stm32/stm32-cryp.c
++++ b/drivers/crypto/stm32/stm32-cryp.c
+@@ -11,6 +11,7 @@
+ #include <crypto/internal/des.h>
+ #include <crypto/internal/skcipher.h>
+ #include <crypto/scatterwalk.h>
++#include <linux/bottom_half.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/err.h>
+@@ -1665,8 +1666,11 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+ it_mask &= ~IMSCR_OUT;
+ stm32_cryp_write(cryp, cryp->caps->imsc, it_mask);
+
+- if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out)
++ if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) {
++ local_bh_disable();
+ stm32_cryp_finish_req(cryp, 0);
++ local_bh_enable();
++ }
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+index 2621ff8a93764d..de53eddf6796b6 100644
+--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
+ }
+
+ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
+- struct virtio_crypto_ctrl_header *header, void *para,
++ struct virtio_crypto_ctrl_header *header,
++ struct virtio_crypto_akcipher_session_para *para,
+ const uint8_t *key, unsigned int keylen)
+ {
+ struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
+@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
+
+ ctrl = &vc_ctrl_req->ctrl;
+ memcpy(&ctrl->header, header, sizeof(ctrl->header));
+- memcpy(&ctrl->u, para, sizeof(ctrl->u));
++ memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
+ input = &vc_ctrl_req->input;
+ input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
+index 154590e1f7643d..7059bbe5a2ebaa 100644
+--- a/drivers/crypto/virtio/virtio_crypto_common.h
++++ b/drivers/crypto/virtio/virtio_crypto_common.h
+@@ -10,6 +10,7 @@
+ #include <linux/virtio.h>
+ #include <linux/crypto.h>
+ #include <linux/spinlock.h>
++#include <linux/interrupt.h>
+ #include <crypto/aead.h>
+ #include <crypto/aes.h>
+ #include <crypto/engine.h>
+@@ -28,6 +29,7 @@ struct data_queue {
+ char name[32];
+
+ struct crypto_engine *engine;
++ struct tasklet_struct done_task;
+ };
+
+ struct virtio_crypto {
+diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
+index 43a0838d31ff01..b909c6a2bf1c34 100644
+--- a/drivers/crypto/virtio/virtio_crypto_core.c
++++ b/drivers/crypto/virtio/virtio_crypto_core.c
+@@ -72,27 +72,28 @@ int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterl
+ return 0;
+ }
+
+-static void virtcrypto_dataq_callback(struct virtqueue *vq)
++static void virtcrypto_done_task(unsigned long data)
+ {
+- struct virtio_crypto *vcrypto = vq->vdev->priv;
++ struct data_queue *data_vq = (struct data_queue *)data;
++ struct virtqueue *vq = data_vq->vq;
+ struct virtio_crypto_request *vc_req;
+- unsigned long flags;
+ unsigned int len;
+- unsigned int qid = vq->index;
+
+- spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+- spin_unlock_irqrestore(
+- &vcrypto->data_vq[qid].lock, flags);
+ if (vc_req->alg_cb)
+ vc_req->alg_cb(vc_req, len);
+- spin_lock_irqsave(
+- &vcrypto->data_vq[qid].lock, flags);
+ }
+ } while (!virtqueue_enable_cb(vq));
+- spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
++}
++
++static void virtcrypto_dataq_callback(struct virtqueue *vq)
++{
++ struct virtio_crypto *vcrypto = vq->vdev->priv;
++ struct data_queue *dq = &vcrypto->data_vq[vq->index];
++
++ tasklet_schedule(&dq->done_task);
+ }
+
+ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+@@ -150,6 +151,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+ ret = -ENOMEM;
+ goto err_engine;
+ }
++ tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
++ (unsigned long)&vi->data_vq[i]);
+ }
+
+ kfree(names);
+@@ -497,12 +500,15 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+ static void virtcrypto_remove(struct virtio_device *vdev)
+ {
+ struct virtio_crypto *vcrypto = vdev->priv;
++ int i;
+
+ dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
+ flush_work(&vcrypto->config_work);
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
++ for (i = 0; i < vcrypto->max_data_queues; i++)
++ tasklet_kill(&vcrypto->data_vq[i].done_task);
+ virtio_reset_device(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ virtcrypto_clear_crypto_engines(vcrypto);
+diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+index ce335578b759ed..84103fc3f66f18 100644
+--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
++++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+@@ -231,7 +231,10 @@ static int zynqmp_handle_aes_req(struct crypto_engine *engine,
+ err = zynqmp_aes_aead_cipher(areq);
+ }
+
++ local_bh_disable();
+ crypto_finalize_aead_request(engine, areq, err);
++ local_bh_enable();
++
+ return 0;
+ }
+
+diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
+index 40d055560e52fd..43195345583098 100644
+--- a/drivers/cxl/acpi.c
++++ b/drivers/cxl/acpi.c
+@@ -194,31 +194,27 @@ struct cxl_cfmws_context {
+ int id;
+ };
+
+-static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+- const unsigned long end)
++static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
++ struct cxl_cfmws_context *ctx)
+ {
+ int target_map[CXL_DECODER_MAX_INTERLEAVE];
+- struct cxl_cfmws_context *ctx = arg;
+ struct cxl_port *root_port = ctx->root_port;
+ struct resource *cxl_res = ctx->cxl_res;
+ struct cxl_cxims_context cxims_ctx;
+ struct cxl_root_decoder *cxlrd;
+ struct device *dev = ctx->dev;
+- struct acpi_cedt_cfmws *cfmws;
+ cxl_calc_hb_fn cxl_calc_hb;
+ struct cxl_decoder *cxld;
+ unsigned int ways, i, ig;
+ struct resource *res;
+ int rc;
+
+- cfmws = (struct acpi_cedt_cfmws *) header;
+-
+ rc = cxl_acpi_cfmws_verify(dev, cfmws);
+ if (rc) {
+ dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+- return 0;
++ return rc;
+ }
+
+ rc = eiw_to_ways(cfmws->interleave_ways, &ways);
+@@ -254,7 +250,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+
+ cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
+ if (IS_ERR(cxlrd))
+- return 0;
++ return PTR_ERR(cxlrd);
+
+ cxld = &cxlrd->cxlsd.cxld;
+ cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
+@@ -295,16 +291,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ put_device(&cxld->dev);
+ else
+ rc = cxl_decoder_autoremove(dev, cxld);
+- if (rc) {
+- dev_err(dev, "Failed to add decode range: %pr", res);
+- return rc;
+- }
+- dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
+- dev_name(&cxld->dev),
+- phys_to_target_node(cxld->hpa_range.start),
+- cxld->hpa_range.start, cxld->hpa_range.end);
+-
+- return 0;
++ return rc;
+
+ err_insert:
+ kfree(res->name);
+@@ -313,6 +300,29 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ return -ENOMEM;
+ }
+
++static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
++ const unsigned long end)
++{
++ struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
++ struct cxl_cfmws_context *ctx = arg;
++ struct device *dev = ctx->dev;
++ int rc;
++
++ rc = __cxl_parse_cfmws(cfmws, ctx);
++ if (rc)
++ dev_err(dev,
++ "Failed to add decode range: [%#llx - %#llx] (%d)\n",
++ cfmws->base_hpa,
++ cfmws->base_hpa + cfmws->window_size - 1, rc);
++ else
++ dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
++ phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
++ cfmws->base_hpa + cfmws->window_size - 1);
++
++ /* never fail cxl_acpi load for a single window failure */
++ return 0;
++}
++
+ __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
+ struct device *dev)
+ {
+diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
+index 45e7e044cf4a04..6444cc827c9ceb 100644
+--- a/drivers/cxl/core/core.h
++++ b/drivers/cxl/core/core.h
+@@ -27,7 +27,14 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+ int cxl_region_init(void);
+ void cxl_region_exit(void);
+ int cxl_get_poison_by_endpoint(struct cxl_port *port);
++struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
++
+ #else
++static inline
++struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
++{
++ return NULL;
++}
+ static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
+ {
+ return 0;
+@@ -75,6 +82,7 @@ resource_size_t __rcrb_to_component(struct device *dev,
+ enum cxl_rcrb which);
+
+ extern struct rw_semaphore cxl_dpa_rwsem;
++extern struct rw_semaphore cxl_region_rwsem;
+
+ int cxl_memdev_init(void);
+ void cxl_memdev_exit(void);
+diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
+index 4449b34a80cc9b..3600b7cbfb5893 100644
+--- a/drivers/cxl/core/hdm.c
++++ b/drivers/cxl/core/hdm.c
+@@ -52,6 +52,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+ struct cxl_dport *dport = NULL;
+ int single_port_map[1];
+ unsigned long index;
++ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
++
++ /*
++ * Capability checks are moot for passthrough decoders, support
++ * any and all possibilities.
++ */
++ cxlhdm->interleave_mask = ~0U;
++ cxlhdm->iw_cap_mask = ~0UL;
+
+ cxlsd = cxl_switch_decoder_alloc(port, 1);
+ if (IS_ERR(cxlsd))
+@@ -79,13 +87,18 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
+ cxlhdm->interleave_mask |= GENMASK(11, 8);
+ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
+ cxlhdm->interleave_mask |= GENMASK(14, 12);
++ cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
++ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
++ cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
++ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
++ cxlhdm->iw_cap_mask |= BIT(16);
+ }
+
+ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
+ struct cxl_component_regs *regs)
+ {
+ struct cxl_register_map map = {
+- .dev = &port->dev,
++ .host = &port->dev,
+ .resource = port->component_reg_phys,
+ .base = crb,
+ .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
+@@ -373,10 +386,9 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
+ {
+ resource_size_t base = -1;
+
+- down_read(&cxl_dpa_rwsem);
++ lockdep_assert_held(&cxl_dpa_rwsem);
+ if (cxled->dpa_res)
+ base = cxled->dpa_res->start;
+- up_read(&cxl_dpa_rwsem);
+
+ return base;
+ }
+@@ -575,17 +587,11 @@ static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
+ CXL_HDM_DECODER0_CTRL_HOSTONLY);
+ }
+
+-static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
++static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+ {
+ struct cxl_dport **t = &cxlsd->target[0];
+ int ways = cxlsd->cxld.interleave_ways;
+
+- if (dev_WARN_ONCE(&cxlsd->cxld.dev,
+- ways > 8 || ways > cxlsd->nr_targets,
+- "ways: %d overflows targets: %d\n", ways,
+- cxlsd->nr_targets))
+- return -ENXIO;
+-
+ *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
+ if (ways > 1)
+ *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
+@@ -601,8 +607,6 @@ static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+ *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
+ if (ways > 7)
+ *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
+-
+- return 0;
+ }
+
+ /*
+@@ -643,13 +647,33 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+- if (port->commit_end + 1 != id) {
++ if (cxl_num_decoders_committed(port) != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+- dev_name(&cxld->dev), port->id, port->commit_end + 1);
++ dev_name(&cxld->dev), port->id,
++ cxl_num_decoders_committed(port));
+ return -EBUSY;
+ }
+
++ /*
++ * For endpoint decoders hosted on CXL memory devices that
++ * support the sanitize operation, make sure sanitize is not in-flight.
++ */
++ if (is_endpoint_decoder(&cxld->dev)) {
++ struct cxl_endpoint_decoder *cxled =
++ to_cxl_endpoint_decoder(&cxld->dev);
++ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++ struct cxl_memdev_state *mds =
++ to_cxl_memdev_state(cxlmd->cxlds);
++
++ if (mds && mds->security.sanitize_active) {
++ dev_dbg(&cxlmd->dev,
++ "attempted to commit %s during sanitize\n",
++ dev_name(&cxld->dev));
++ return -EBUSY;
++ }
++ }
++
+ down_read(&cxl_dpa_rwsem);
+ /* common decoder settings */
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+@@ -670,13 +694,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+ void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
+ u64 targets;
+
+- rc = cxlsd_set_targets(cxlsd, &targets);
+- if (rc) {
+- dev_dbg(&port->dev, "%s: target configuration error\n",
+- dev_name(&cxld->dev));
+- goto err;
+- }
+-
++ cxlsd_set_targets(cxlsd, &targets);
+ writel(upper_32_bits(targets), tl_hi);
+ writel(lower_32_bits(targets), tl_lo);
+ } else {
+@@ -694,7 +712,6 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
+
+ port->commit_end++;
+ rc = cxld_await_commit(hdm, cxld->id);
+-err:
+ if (rc) {
+ dev_dbg(&port->dev, "%s: error %d committing decoder\n",
+ dev_name(&cxld->dev), rc);
+@@ -844,7 +861,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ else
+ cxld->target_type = CXL_DECODER_DEVMEM;
+- if (cxld->id != port->commit_end + 1) {
++
++ guard(rwsem_write)(&cxl_region_rwsem);
++ if (cxld->id != cxl_num_decoders_committed(port)) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Committed out of order\n",
+ port->id, cxld->id);
+diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
+index 4df4f614f490ef..fecaa18f4dd203 100644
+--- a/drivers/cxl/core/mbox.c
++++ b/drivers/cxl/core/mbox.c
+@@ -928,7 +928,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
+ for (cnt = 0; cnt < total; cnt++) {
+ payload->handles[i++] = get_pl->records[cnt].hdr.handle;
+ dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
+- le16_to_cpu(payload->handles[i]));
++ le16_to_cpu(payload->handles[i - 1]));
+
+ if (i == max_handles) {
+ payload->nr_recs = i;
+@@ -959,24 +959,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
+ struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
+ struct device *dev = mds->cxlds.dev;
+ struct cxl_get_event_payload *payload;
+- struct cxl_mbox_cmd mbox_cmd;
+ u8 log_type = type;
+ u16 nr_rec;
+
+ mutex_lock(&mds->event.log_lock);
+ payload = mds->event.buf;
+
+- mbox_cmd = (struct cxl_mbox_cmd) {
+- .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+- .payload_in = &log_type,
+- .size_in = sizeof(log_type),
+- .payload_out = payload,
+- .size_out = mds->payload_size,
+- .min_out = struct_size(payload, records, 0),
+- };
+-
+ do {
+ int rc, i;
++ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
++ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
++ .payload_in = &log_type,
++ .size_in = sizeof(log_type),
++ .payload_out = payload,
++ .size_out = mds->payload_size,
++ .min_out = struct_size(payload, records, 0),
++ };
+
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ if (rc) {
+@@ -1125,20 +1123,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
+ }
+ EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
+
+-/**
+- * cxl_mem_sanitize() - Send a sanitization command to the device.
+- * @mds: The device data for the operation
+- * @cmd: The specific sanitization command opcode
+- *
+- * Return: 0 if the command was executed successfully, regardless of
+- * whether or not the actual security operation is done in the background,
+- * such as for the Sanitize case.
+- * Error return values can be the result of the mailbox command, -EINVAL
+- * when security requirements are not met or invalid contexts.
+- *
+- * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
+- */
+-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
++static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+ {
+ int rc;
+ u32 sec_out = 0;
+@@ -1183,7 +1168,45 @@ int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+
+ return 0;
+ }
+-EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
++
++
++/**
++ * cxl_mem_sanitize() - Send a sanitization command to the device.
++ * @cxlmd: The device for the operation
++ * @cmd: The specific sanitization command opcode
++ *
++ * Return: 0 if the command was executed successfully, regardless of
++ * whether or not the actual security operation is done in the background,
++ * such as for the Sanitize case.
++ * Error return values can be the result of the mailbox command, -EINVAL
++ * when security requirements are not met or invalid contexts, or -EBUSY
++ * if the sanitize operation is already in flight.
++ *
++ * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
++ */
++int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
++{
++ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
++ struct cxl_port *endpoint;
++ int rc;
++
++ /* synchronize with cxl_mem_probe() and decoder write operations */
++ device_lock(&cxlmd->dev);
++ endpoint = cxlmd->endpoint;
++ down_read(&cxl_region_rwsem);
++ /*
++ * Require an endpoint to be safe otherwise the driver can not
++ * be sure that the device is unmapped.
++ */
++ if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
++ rc = __cxl_mem_sanitize(mds, cmd);
++ else
++ rc = -EBUSY;
++ up_read(&cxl_region_rwsem);
++ device_unlock(&cxlmd->dev);
++
++ return rc;
++}
+
+ static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+@@ -1285,7 +1308,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_mbox_poison_out *po;
+ struct cxl_mbox_poison_in pi;
+- struct cxl_mbox_cmd mbox_cmd;
+ int nr_records = 0;
+ int rc;
+
+@@ -1297,16 +1319,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
+ pi.offset = cpu_to_le64(offset);
+ pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
+
+- mbox_cmd = (struct cxl_mbox_cmd) {
+- .opcode = CXL_MBOX_OP_GET_POISON,
+- .size_in = sizeof(pi),
+- .payload_in = &pi,
+- .size_out = mds->payload_size,
+- .payload_out = po,
+- .min_out = struct_size(po, record, 0),
+- };
+-
+ do {
++ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
++ .opcode = CXL_MBOX_OP_GET_POISON,
++ .size_in = sizeof(pi),
++ .payload_in = &pi,
++ .size_out = mds->payload_size,
++ .payload_out = po,
++ .min_out = struct_size(po, record, 0),
++ };
++
+ rc = cxl_internal_send_cmd(mds, &mbox_cmd);
+ if (rc)
+ break;
+diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
+index 14b547c07f5476..eb895c70043fdf 100644
+--- a/drivers/cxl/core/memdev.c
++++ b/drivers/cxl/core/memdev.c
+@@ -125,13 +125,16 @@ static ssize_t security_state_show(struct device *dev,
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+- u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+- u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
+- u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
+ unsigned long state = mds->security.state;
++ int rc = 0;
+
+- if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
+- return sysfs_emit(buf, "sanitize\n");
++ /* sync with latest submission state */
++ mutex_lock(&mds->mbox_mutex);
++ if (mds->security.sanitize_active)
++ rc = sysfs_emit(buf, "sanitize\n");
++ mutex_unlock(&mds->mbox_mutex);
++ if (rc)
++ return rc;
+
+ if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
+ return sysfs_emit(buf, "disabled\n");
+@@ -152,24 +155,17 @@ static ssize_t security_sanitize_store(struct device *dev,
+ const char *buf, size_t len)
+ {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+- struct cxl_port *port = cxlmd->endpoint;
+ bool sanitize;
+ ssize_t rc;
+
+ if (kstrtobool(buf, &sanitize) || !sanitize)
+ return -EINVAL;
+
+- if (!port || !is_cxl_endpoint(port))
+- return -EINVAL;
+-
+- /* ensure no regions are mapped to this memdev */
+- if (port->commit_end != -1)
+- return -EBUSY;
+-
+- rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
++ rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
++ if (rc)
++ return rc;
+
+- return rc ? rc : len;
++ return len;
+ }
+ static struct device_attribute dev_attr_security_sanitize =
+ __ATTR(sanitize, 0200, NULL, security_sanitize_store);
+@@ -179,24 +175,17 @@ static ssize_t security_erase_store(struct device *dev,
+ const char *buf, size_t len)
+ {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+- struct cxl_port *port = cxlmd->endpoint;
+ ssize_t rc;
+ bool erase;
+
+ if (kstrtobool(buf, &erase) || !erase)
+ return -EINVAL;
+
+- if (!port || !is_cxl_endpoint(port))
+- return -EINVAL;
+-
+- /* ensure no regions are mapped to this memdev */
+- if (port->commit_end != -1)
+- return -EBUSY;
+-
+- rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
++ rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
++ if (rc)
++ return rc;
+
+- return rc ? rc : len;
++ return len;
+ }
+ static struct device_attribute dev_attr_security_erase =
+ __ATTR(erase, 0200, NULL, security_erase_store);
+@@ -238,11 +227,17 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
+ if (!port || !is_cxl_endpoint(port))
+ return -EINVAL;
+
+- rc = down_read_interruptible(&cxl_dpa_rwsem);
++ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+- if (port->commit_end == -1) {
++ rc = down_read_interruptible(&cxl_dpa_rwsem);
++ if (rc) {
++ up_read(&cxl_region_rwsem);
++ return rc;
++ }
++
++ if (cxl_num_decoders_committed(port) == 0) {
+ /* No regions mapped to this memdev */
+ rc = cxl_get_poison_by_memdev(cxlmd);
+ } else {
+@@ -250,55 +245,12 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
+ rc = cxl_get_poison_by_endpoint(port);
+ }
+ up_read(&cxl_dpa_rwsem);
++ up_read(&cxl_region_rwsem);
+
+ return rc;
+ }
+ EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
+
+-struct cxl_dpa_to_region_context {
+- struct cxl_region *cxlr;
+- u64 dpa;
+-};
+-
+-static int __cxl_dpa_to_region(struct device *dev, void *arg)
+-{
+- struct cxl_dpa_to_region_context *ctx = arg;
+- struct cxl_endpoint_decoder *cxled;
+- u64 dpa = ctx->dpa;
+-
+- if (!is_endpoint_decoder(dev))
+- return 0;
+-
+- cxled = to_cxl_endpoint_decoder(dev);
+- if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+- return 0;
+-
+- if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+- return 0;
+-
+- dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
+- dev_name(&cxled->cxld.region->dev));
+-
+- ctx->cxlr = cxled->cxld.region;
+-
+- return 1;
+-}
+-
+-static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
+-{
+- struct cxl_dpa_to_region_context ctx;
+- struct cxl_port *port;
+-
+- ctx = (struct cxl_dpa_to_region_context) {
+- .dpa = dpa,
+- };
+- port = cxlmd->endpoint;
+- if (port && is_cxl_endpoint(port) && port->commit_end != -1)
+- device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
+-
+- return ctx.cxlr;
+-}
+-
+ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
+ {
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+@@ -335,10 +287,16 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+- rc = down_read_interruptible(&cxl_dpa_rwsem);
++ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
++ rc = down_read_interruptible(&cxl_dpa_rwsem);
++ if (rc) {
++ up_read(&cxl_region_rwsem);
++ return rc;
++ }
++
+ rc = cxl_validate_poison_dpa(cxlmd, dpa);
+ if (rc)
+ goto out;
+@@ -366,6 +324,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+ trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
+ out:
+ up_read(&cxl_dpa_rwsem);
++ up_read(&cxl_region_rwsem);
+
+ return rc;
+ }
+@@ -383,10 +342,16 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+- rc = down_read_interruptible(&cxl_dpa_rwsem);
++ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
++ rc = down_read_interruptible(&cxl_dpa_rwsem);
++ if (rc) {
++ up_read(&cxl_region_rwsem);
++ return rc;
++ }
++
+ rc = cxl_validate_poison_dpa(cxlmd, dpa);
+ if (rc)
+ goto out;
+@@ -423,6 +388,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+ trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
+ out:
+ up_read(&cxl_dpa_rwsem);
++ up_read(&cxl_region_rwsem);
+
+ return rc;
+ }
+@@ -556,21 +522,11 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
+ }
+ EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
+
+-static void cxl_memdev_security_shutdown(struct device *dev)
+-{
+- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+-
+- if (mds->security.poll)
+- cancel_delayed_work_sync(&mds->security.poll_dwork);
+-}
+-
+ static void cxl_memdev_shutdown(struct device *dev)
+ {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+ down_write(&cxl_memdev_rwsem);
+- cxl_memdev_security_shutdown(dev);
+ cxlmd->cxlds = NULL;
+ up_write(&cxl_memdev_rwsem);
+ }
+@@ -580,8 +536,8 @@ static void cxl_memdev_unregister(void *_cxlmd)
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
+
+- cxl_memdev_shutdown(dev);
+ cdev_device_del(&cxlmd->cdev, dev);
++ cxl_memdev_shutdown(dev);
+ put_device(dev);
+ }
+
+@@ -961,17 +917,16 @@ static const struct fw_upload_ops cxl_memdev_fw_ops = {
+ .cleanup = cxl_fw_cleanup,
+ };
+
+-static void devm_cxl_remove_fw_upload(void *fwl)
++static void cxl_remove_fw_upload(void *fwl)
+ {
+ firmware_upload_unregister(fwl);
+ }
+
+-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
++int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
+ {
+ struct cxl_dev_state *cxlds = &mds->cxlds;
+ struct device *dev = &cxlds->cxlmd->dev;
+ struct fw_upload *fwl;
+- int rc;
+
+ if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
+ return 0;
+@@ -979,19 +934,10 @@ int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
+ fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
+ &cxl_memdev_fw_ops, mds);
+ if (IS_ERR(fwl))
+- return dev_err_probe(dev, PTR_ERR(fwl),
+- "Failed to register firmware loader\n");
+-
+- rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
+- fwl);
+- if (rc)
+- dev_err(dev,
+- "Failed to add firmware loader remove action: %d\n",
+- rc);
+-
+- return rc;
++ return PTR_ERR(fwl);
++ return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
+ }
+-EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
++EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
+
+ static const struct file_operations cxl_memdev_fops = {
+ .owner = THIS_MODULE,
+@@ -1002,36 +948,8 @@ static const struct file_operations cxl_memdev_fops = {
+ .llseek = noop_llseek,
+ };
+
+-static void put_sanitize(void *data)
+-{
+- struct cxl_memdev_state *mds = data;
+-
+- sysfs_put(mds->security.sanitize_node);
+-}
+-
+-static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
+-{
+- struct cxl_dev_state *cxlds = cxlmd->cxlds;
+- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+- struct device *dev = &cxlmd->dev;
+- struct kernfs_node *sec;
+-
+- sec = sysfs_get_dirent(dev->kobj.sd, "security");
+- if (!sec) {
+- dev_err(dev, "sysfs_get_dirent 'security' failed\n");
+- return -ENODEV;
+- }
+- mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
+- sysfs_put(sec);
+- if (!mds->security.sanitize_node) {
+- dev_err(dev, "sysfs_get_dirent 'state' failed\n");
+- return -ENODEV;
+- }
+-
+- return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
+- }
+-
+-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
++struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
++ struct cxl_dev_state *cxlds)
+ {
+ struct cxl_memdev *cxlmd;
+ struct device *dev;
+@@ -1059,11 +977,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+ if (rc)
+ goto err;
+
+- rc = cxl_memdev_security_init(cxlmd);
+- if (rc)
+- goto err;
+-
+- rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
++ rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
+ if (rc)
+ return ERR_PTR(rc);
+ return cxlmd;
+@@ -1079,6 +993,50 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+ }
+ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
+
++static void sanitize_teardown_notifier(void *data)
++{
++ struct cxl_memdev_state *mds = data;
++ struct kernfs_node *state;
++
++ /*
++ * Prevent new irq triggered invocations of the workqueue and
++ * flush inflight invocations.
++ */
++ mutex_lock(&mds->mbox_mutex);
++ state = mds->security.sanitize_node;
++ mds->security.sanitize_node = NULL;
++ mutex_unlock(&mds->mbox_mutex);
++
++ cancel_delayed_work_sync(&mds->security.poll_dwork);
++ sysfs_put(state);
++}
++
++int devm_cxl_sanitize_setup_notifier(struct device *host,
++ struct cxl_memdev *cxlmd)
++{
++ struct cxl_dev_state *cxlds = cxlmd->cxlds;
++ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
++ struct kernfs_node *sec;
++
++ if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
++ return 0;
++
++ /*
++ * Note, the expectation is that @cxlmd would have failed to be
++ * created if these sysfs_get_dirent calls fail.
++ */
++ sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
++ if (!sec)
++ return -ENOENT;
++ mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
++ sysfs_put(sec);
++ if (!mds->security.sanitize_node)
++ return -ENOENT;
++
++ return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
++}
++EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
++
+ __init int cxl_memdev_init(void)
+ {
+ dev_t devt;
+diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
+index c7a7887ebdcff8..6edfd054667373 100644
+--- a/drivers/cxl/core/pci.c
++++ b/drivers/cxl/core/pci.c
+@@ -388,10 +388,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
+
+ size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
+ if (!size) {
+- info->dvsec_range[i] = (struct range) {
+- .start = 0,
+- .end = CXL_RESOURCE_NONE,
+- };
+ continue;
+ }
+
+@@ -409,12 +405,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
+
+ base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
+
+- info->dvsec_range[i] = (struct range) {
++ info->dvsec_range[ranges++] = (struct range) {
+ .start = base,
+ .end = base + size - 1
+ };
+-
+- ranges++;
+ }
+
+ info->ranges = ranges;
+@@ -475,9 +469,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ allowed++;
+ }
+
+- if (!allowed) {
+- cxl_set_mem_enable(cxlds, 0);
+- info->mem_enabled = 0;
++ if (!allowed && info->mem_enabled) {
++ dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
++ return -ENXIO;
+ }
+
+ /*
+diff --git a/drivers/cxl/core/pmu.c b/drivers/cxl/core/pmu.c
+index 7684c843e5a59c..5d8e06b0ba6e88 100644
+--- a/drivers/cxl/core/pmu.c
++++ b/drivers/cxl/core/pmu.c
+@@ -23,7 +23,7 @@ const struct device_type cxl_pmu_type = {
+
+ static void remove_dev(void *dev)
+ {
+- device_del(dev);
++ device_unregister(dev);
+ }
+
+ int devm_cxl_pmu_add(struct device *parent, struct cxl_pmu_regs *regs,
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index 7ca01a834e188c..c67cc8c9d5cc61 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -28,9 +28,22 @@
+ * instantiated by the core.
+ */
+
++/*
++ * All changes to the interleave configuration occur with this lock held
++ * for write.
++ */
++DECLARE_RWSEM(cxl_region_rwsem);
++
+ static DEFINE_IDA(cxl_port_ida);
+ static DEFINE_XARRAY(cxl_root_buses);
+
++int cxl_num_decoders_committed(struct cxl_port *port)
++{
++ lockdep_assert_held(&cxl_region_rwsem);
++
++ return port->commit_end + 1;
++}
++
+ static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
+@@ -159,14 +172,10 @@ static ssize_t target_list_show(struct device *dev,
+ {
+ struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
+ ssize_t offset;
+- unsigned int seq;
+ int rc;
+
+- do {
+- seq = read_seqbegin(&cxlsd->target_lock);
+- rc = emit_target_list(cxlsd, buf);
+- } while (read_seqretry(&cxlsd->target_lock, seq));
+-
++ guard(rwsem_read)(&cxl_region_rwsem);
++ rc = emit_target_list(cxlsd, buf);
+ if (rc < 0)
+ return rc;
+ offset = rc;
+@@ -213,9 +222,9 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
+ char *buf)
+ {
+ struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+- u64 base = cxl_dpa_resource_start(cxled);
+
+- return sysfs_emit(buf, "%#llx\n", base);
++ guard(rwsem_read)(&cxl_dpa_rwsem);
++ return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
+ }
+ static DEVICE_ATTR_RO(dpa_resource);
+
+@@ -691,14 +700,14 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
+ return ERR_PTR(rc);
+ }
+
+-static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
++static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
+ resource_size_t component_reg_phys)
+ {
+ if (component_reg_phys == CXL_RESOURCE_NONE)
+ return 0;
+
+ *map = (struct cxl_register_map) {
+- .dev = dev,
++ .host = host,
+ .reg_type = CXL_REGLOC_RBI_COMPONENT,
+ .resource = component_reg_phys,
+ .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
+@@ -716,13 +725,23 @@ static int cxl_port_setup_regs(struct cxl_port *port,
+ component_reg_phys);
+ }
+
+-static int cxl_dport_setup_regs(struct cxl_dport *dport,
++static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
+ resource_size_t component_reg_phys)
+ {
++ int rc;
++
+ if (dev_is_platform(dport->dport_dev))
+ return 0;
+- return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
+- component_reg_phys);
++
++ /*
++ * use @dport->dport_dev for the context for error messages during
++ * register probing, and fixup @host after the fact, since @host may be
++ * NULL.
++ */
++ rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
++ component_reg_phys);
++ dport->comp_map.host = host;
++ return rc;
+ }
+
+ static struct cxl_port *__devm_cxl_add_port(struct device *host,
+@@ -983,7 +1002,16 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ if (!dport)
+ return ERR_PTR(-ENOMEM);
+
+- if (rcrb != CXL_RESOURCE_NONE) {
++ dport->dport_dev = dport_dev;
++ dport->port_id = port_id;
++ dport->port = port;
++
++ if (rcrb == CXL_RESOURCE_NONE) {
++ rc = cxl_dport_setup_regs(&port->dev, dport,
++ component_reg_phys);
++ if (rc)
++ return ERR_PTR(rc);
++ } else {
+ dport->rcrb.base = rcrb;
+ component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
+ CXL_RCRB_DOWNSTREAM);
+@@ -992,6 +1020,14 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ return ERR_PTR(-ENXIO);
+ }
+
++ /*
++ * RCH @dport is not ready to map until associated with its
++ * memdev
++ */
++ rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
++ if (rc)
++ return ERR_PTR(rc);
++
+ dport->rch = true;
+ }
+
+@@ -999,14 +1035,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
+ dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
+ &component_reg_phys);
+
+- dport->dport_dev = dport_dev;
+- dport->port_id = port_id;
+- dport->port = port;
+-
+- rc = cxl_dport_setup_regs(dport, component_reg_phys);
+- if (rc)
+- return ERR_PTR(rc);
+-
+ cond_cxl_root_lock(port);
+ rc = add_dport(port, dport);
+ cond_cxl_root_unlock(port);
+@@ -1217,35 +1245,39 @@ static struct device *grandparent(struct device *dev)
+ return NULL;
+ }
+
++static struct device *endpoint_host(struct cxl_port *endpoint)
++{
++ struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
++
++ if (is_cxl_root(port))
++ return port->uport_dev;
++ return &port->dev;
++}
++
+ static void delete_endpoint(void *data)
+ {
+ struct cxl_memdev *cxlmd = data;
+ struct cxl_port *endpoint = cxlmd->endpoint;
+- struct cxl_port *parent_port;
+- struct device *parent;
+-
+- parent_port = cxl_mem_find_port(cxlmd, NULL);
+- if (!parent_port)
+- goto out;
+- parent = &parent_port->dev;
++ struct device *host = endpoint_host(endpoint);
+
+- device_lock(parent);
+- if (parent->driver && !endpoint->dead) {
+- devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
+- devm_release_action(parent, cxl_unlink_uport, endpoint);
+- devm_release_action(parent, unregister_port, endpoint);
++ device_lock(host);
++ if (host->driver && !endpoint->dead) {
++ devm_release_action(host, cxl_unlink_parent_dport, endpoint);
++ devm_release_action(host, cxl_unlink_uport, endpoint);
++ devm_release_action(host, unregister_port, endpoint);
+ }
+ cxlmd->endpoint = NULL;
+- device_unlock(parent);
+- put_device(parent);
+-out:
++ device_unlock(host);
+ put_device(&endpoint->dev);
++ put_device(host);
+ }
+
+ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
+ {
++ struct device *host = endpoint_host(endpoint);
+ struct device *dev = &cxlmd->dev;
+
++ get_device(host);
+ get_device(&endpoint->dev);
+ cxlmd->endpoint = endpoint;
+ cxlmd->depth = endpoint->depth;
+@@ -1543,7 +1575,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
+ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
+ struct cxl_port *port, int *target_map)
+ {
+- int i, rc = 0;
++ int i;
+
+ if (!target_map)
+ return 0;
+@@ -1553,19 +1585,16 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
+ if (xa_empty(&port->dports))
+ return -EINVAL;
+
+- write_seqlock(&cxlsd->target_lock);
+- for (i = 0; i < cxlsd->nr_targets; i++) {
++ guard(rwsem_write)(&cxl_region_rwsem);
++ for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
+ struct cxl_dport *dport = find_dport(port, target_map[i]);
+
+- if (!dport) {
+- rc = -ENXIO;
+- break;
+- }
++ if (!dport)
++ return -ENXIO;
+ cxlsd->target[i] = dport;
+ }
+- write_sequnlock(&cxlsd->target_lock);
+
+- return rc;
++ return 0;
+ }
+
+ struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
+@@ -1635,7 +1664,6 @@ static int cxl_switch_decoder_init(struct cxl_port *port,
+ return -EINVAL;
+
+ cxlsd->nr_targets = nr_targets;
+- seqlock_init(&cxlsd->target_lock);
+ return cxl_decoder_init(port, &cxlsd->cxld);
+ }
+
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 6d63b8798c2992..5060d9802795ee 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -28,12 +28,6 @@
+ * 3. Decoder targets
+ */
+
+-/*
+- * All changes to the interleave configuration occur with this lock held
+- * for write.
+- */
+-static DECLARE_RWSEM(cxl_region_rwsem);
+-
+ static struct cxl_region *to_cxl_region(struct device *dev);
+
+ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+@@ -294,7 +288,7 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ */
+ rc = cxl_region_invalidate_memregion(cxlr);
+ if (rc)
+- return rc;
++ goto out;
+
+ if (commit) {
+ rc = cxl_region_decode_commit(cxlr);
+@@ -403,7 +397,7 @@ static ssize_t interleave_ways_store(struct device *dev,
+ return rc;
+
+ /*
+- * Even for x3, x9, and x12 interleaves the region interleave must be a
++ * Even for x3, x6, and x12 interleaves the region interleave must be a
+ * power of 2 multiple of the host bridge interleave.
+ */
+ if (!is_power_of_2(val / cxld->interleave_ways) ||
+@@ -531,7 +525,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ struct resource *res;
+- u32 remainder = 0;
++ u64 remainder = 0;
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+@@ -551,7 +545,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
+ (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ return -ENXIO;
+
+- div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
++ div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
+ if (remainder)
+ return -EINVAL;
+
+@@ -735,12 +729,17 @@ static int match_auto_decoder(struct device *dev, void *data)
+ return 0;
+ }
+
+-static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
+- struct cxl_region *cxlr)
++static struct cxl_decoder *
++cxl_region_find_decoder(struct cxl_port *port,
++ struct cxl_endpoint_decoder *cxled,
++ struct cxl_region *cxlr)
+ {
+ struct device *dev;
+ int id = 0;
+
++ if (port == cxled_to_port(cxled))
++ return &cxled->cxld;
++
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
+ dev = device_find_child(&port->dev, &cxlr->params,
+ match_auto_decoder);
+@@ -758,8 +757,31 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
+ return to_cxl_decoder(dev);
+ }
+
+-static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
+- struct cxl_region *cxlr)
++static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
++ struct cxl_decoder *cxld)
++{
++ struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
++ struct cxl_decoder *cxld_iter = rr->decoder;
++
++ /*
++ * Allow the out of order assembly of auto-discovered regions.
++ * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
++ * in HPA order. Confirm that the decoder with the lesser HPA
++ * starting address has the lesser id.
++ */
++ dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
++ dev_name(&cxld->dev), cxld->id,
++ dev_name(&cxld_iter->dev), cxld_iter->id);
++
++ if (cxld_iter->id > cxld->id)
++ return true;
++
++ return false;
++}
++
++static struct cxl_region_ref *
++alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
++ struct cxl_endpoint_decoder *cxled)
+ {
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_region_ref *cxl_rr, *iter;
+@@ -769,16 +791,21 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
+ xa_for_each(&port->regions, index, iter) {
+ struct cxl_region_params *ip = &iter->region->params;
+
+- if (!ip->res)
++ if (!ip->res || ip->res->start < p->res->start)
+ continue;
+
+- if (ip->res->start > p->res->start) {
+- dev_dbg(&cxlr->dev,
+- "%s: HPA order violation %s:%pr vs %pr\n",
+- dev_name(&port->dev),
+- dev_name(&iter->region->dev), ip->res, p->res);
+- return ERR_PTR(-EBUSY);
++ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
++ struct cxl_decoder *cxld;
++
++ cxld = cxl_region_find_decoder(port, cxled, cxlr);
++ if (auto_order_ok(port, iter->region, cxld))
++ continue;
+ }
++ dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
++ dev_name(&port->dev),
++ dev_name(&iter->region->dev), ip->res, p->res);
++
++ return ERR_PTR(-EBUSY);
+ }
+
+ cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
+@@ -858,10 +885,7 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
+ {
+ struct cxl_decoder *cxld;
+
+- if (port == cxled_to_port(cxled))
+- cxld = &cxled->cxld;
+- else
+- cxld = cxl_region_find_decoder(port, cxlr);
++ cxld = cxl_region_find_decoder(port, cxled, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+@@ -958,7 +982,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
+ nr_targets_inc = true;
+ }
+ } else {
+- cxl_rr = alloc_region_ref(port, cxlr);
++ cxl_rr = alloc_region_ref(port, cxlr, cxled);
+ if (IS_ERR(cxl_rr)) {
+ dev_dbg(&cxlr->dev,
+ "%s: failed to allocate region reference\n",
+@@ -973,6 +997,26 @@ static int cxl_port_attach_region(struct cxl_port *port,
+ }
+ cxld = cxl_rr->decoder;
+
++ /*
++ * the number of targets should not exceed the target_count
++ * of the decoder
++ */
++ if (is_switch_decoder(&cxld->dev)) {
++ struct cxl_switch_decoder *cxlsd;
++
++ cxlsd = to_cxl_switch_decoder(&cxld->dev);
++ if (cxl_rr->nr_targets > cxlsd->nr_targets) {
++ dev_dbg(&cxlr->dev,
++ "%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
++ dev_name(port->uport_dev), dev_name(&port->dev),
++ dev_name(&cxld->dev), dev_name(&cxlmd->dev),
++ dev_name(&cxled->cxld.dev), pos,
++ cxlsd->nr_targets);
++ rc = -ENXIO;
++ goto out_erase;
++ }
++ }
++
+ rc = cxl_rr_ep_add(cxl_rr, cxled);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+@@ -1082,6 +1126,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
+ return 0;
+ }
+
++static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
++{
++ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
++ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
++ unsigned int interleave_mask;
++ u8 eiw;
++ u16 eig;
++ int high_pos, low_pos;
++
++ if (!test_bit(iw, &cxlhdm->iw_cap_mask))
++ return -ENXIO;
++ /*
++ * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
++ * if eiw < 8:
++ * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
++ * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
++ *
++ * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
++ * interleave bits are none.
++ *
++ * if eiw >= 8:
++ * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
++ * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
++ *
++ * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
++ * interleave bits are none.
++ */
++ ways_to_eiw(iw, &eiw);
++ if (eiw == 0 || eiw == 8)
++ return 0;
++
++ granularity_to_eig(ig, &eig);
++ if (eiw > 8)
++ high_pos = eiw + eig - 1;
++ else
++ high_pos = eiw + eig + 7;
++ low_pos = eig + 8;
++ interleave_mask = GENMASK(high_pos, low_pos);
++ if (interleave_mask & ~cxlhdm->interleave_mask)
++ return -ENXIO;
++
++ return 0;
++}
++
+ static int cxl_port_setup_targets(struct cxl_port *port,
+ struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
+@@ -1133,7 +1221,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ }
+
+ if (is_cxl_root(parent_port)) {
+- parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
++ /*
++ * Root decoder IG is always set to value in CFMWS which
++ * may be different than this region's IG. We can use the
++ * region's IG here since interleave_granularity_store()
++ * does not allow interleaved host-bridges with
++ * root IG != region IG.
++ */
++ parent_ig = p->interleave_granularity;
+ parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ /*
+ * For purposes of address bit routing, use power-of-2 math for
+@@ -1195,6 +1290,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ return rc;
+ }
+
++ if (iw > 8 || iw > cxlsd->nr_targets) {
++ dev_dbg(&cxlr->dev,
++ "%s:%s:%s: ways: %d overflows targets: %d\n",
++ dev_name(port->uport_dev), dev_name(&port->dev),
++ dev_name(&cxld->dev), iw, cxlsd->nr_targets);
++ return -ENXIO;
++ }
++
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ if (cxld->interleave_ways != iw ||
+ cxld->interleave_granularity != ig ||
+@@ -1217,6 +1320,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ return -ENXIO;
+ }
+ } else {
++ rc = check_interleave_cap(cxld, iw, ig);
++ if (rc) {
++ dev_dbg(&cxlr->dev,
++ "%s:%s iw: %d ig: %d is not supported\n",
++ dev_name(port->uport_dev),
++ dev_name(&port->dev), iw, ig);
++ return rc;
++ }
++
+ cxld->interleave_ways = iw;
+ cxld->interleave_granularity = ig;
+ cxld->hpa_range = (struct range) {
+@@ -1416,10 +1528,13 @@ static int cxl_region_attach_position(struct cxl_region *cxlr,
+ const struct cxl_dport *dport, int pos)
+ {
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++ struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd;
++ struct cxl_decoder *cxld = &cxlsd->cxld;
++ int iw = cxld->interleave_ways;
+ struct cxl_port *iter;
+ int rc;
+
+- if (cxlrd->calc_hb(cxlrd, pos) != dport) {
++ if (dport != cxlrd->cxlsd.target[pos % iw]) {
+ dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ dev_name(&cxlrd->cxlsd.cxld.dev));
+@@ -1480,6 +1595,14 @@ static int cxl_region_attach_auto(struct cxl_region *cxlr,
+ return 0;
+ }
+
++static int cmp_interleave_pos(const void *a, const void *b)
++{
++ struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
++ struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
++
++ return cxled_a->pos - cxled_b->pos;
++}
++
+ static struct cxl_port *next_port(struct cxl_port *port)
+ {
+ if (!port->parent_dport)
+@@ -1487,119 +1610,127 @@ static struct cxl_port *next_port(struct cxl_port *port)
+ return port->parent_dport->port;
+ }
+
+-static int decoder_match_range(struct device *dev, void *data)
++static int match_switch_decoder_by_range(struct device *dev, void *data)
+ {
+- struct cxl_endpoint_decoder *cxled = data;
+ struct cxl_switch_decoder *cxlsd;
++ struct range *r1, *r2 = data;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+- return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range);
+-}
++ r1 = &cxlsd->cxld.hpa_range;
+
+-static void find_positions(const struct cxl_switch_decoder *cxlsd,
+- const struct cxl_port *iter_a,
+- const struct cxl_port *iter_b, int *a_pos,
+- int *b_pos)
+-{
+- int i;
+-
+- for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) {
+- if (cxlsd->target[i] == iter_a->parent_dport)
+- *a_pos = i;
+- else if (cxlsd->target[i] == iter_b->parent_dport)
+- *b_pos = i;
+- if (*a_pos >= 0 && *b_pos >= 0)
+- break;
+- }
++ if (is_root_decoder(dev))
++ return range_contains(r1, r2);
++ return (r1->start == r2->start && r1->end == r2->end);
+ }
+
+-static int cmp_decode_pos(const void *a, const void *b)
++static int find_pos_and_ways(struct cxl_port *port, struct range *range,
++ int *pos, int *ways)
+ {
+- struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
+- struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
+- struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a);
+- struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b);
+- struct cxl_port *port_a = cxled_to_port(cxled_a);
+- struct cxl_port *port_b = cxled_to_port(cxled_b);
+- struct cxl_port *iter_a, *iter_b, *port = NULL;
+ struct cxl_switch_decoder *cxlsd;
++ struct cxl_port *parent;
+ struct device *dev;
+- int a_pos, b_pos;
+- unsigned int seq;
+-
+- /* Exit early if any prior sorting failed */
+- if (cxled_a->pos < 0 || cxled_b->pos < 0)
+- return 0;
++ int rc = -ENXIO;
+
+- /*
+- * Walk up the hierarchy to find a shared port, find the decoder that
+- * maps the range, compare the relative position of those dport
+- * mappings.
+- */
+- for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) {
+- struct cxl_port *next_a, *next_b;
++ parent = next_port(port);
++ if (!parent)
++ return rc;
+
+- next_a = next_port(iter_a);
+- if (!next_a)
+- break;
++ dev = device_find_child(&parent->dev, range,
++ match_switch_decoder_by_range);
++ if (!dev) {
++ dev_err(port->uport_dev,
++ "failed to find decoder mapping %#llx-%#llx\n",
++ range->start, range->end);
++ return rc;
++ }
++ cxlsd = to_cxl_switch_decoder(dev);
++ *ways = cxlsd->cxld.interleave_ways;
+
+- for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) {
+- next_b = next_port(iter_b);
+- if (next_a != next_b)
+- continue;
+- port = next_a;
++ for (int i = 0; i < *ways; i++) {
++ if (cxlsd->target[i] == port->parent_dport) {
++ *pos = i;
++ rc = 0;
+ break;
+ }
+-
+- if (port)
+- break;
+ }
++ put_device(dev);
+
+- if (!port) {
+- dev_err(cxlmd_a->dev.parent,
+- "failed to find shared port with %s\n",
+- dev_name(cxlmd_b->dev.parent));
+- goto err;
+- }
++ return rc;
++}
+
+- dev = device_find_child(&port->dev, cxled_a, decoder_match_range);
+- if (!dev) {
+- struct range *range = &cxled_a->cxld.hpa_range;
++/**
++ * cxl_calc_interleave_pos() - calculate an endpoint position in a region
++ * @cxled: endpoint decoder member of given region
++ *
++ * The endpoint position is calculated by traversing the topology from
++ * the endpoint to the root decoder and iteratively applying this
++ * calculation:
++ *
++ * position = position * parent_ways + parent_pos;
++ *
++ * ...where @position is inferred from switch and root decoder target lists.
++ *
++ * Return: position >= 0 on success
++ * -ENXIO on failure
++ */
++static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
++{
++ struct cxl_port *iter, *port = cxled_to_port(cxled);
++ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
++ struct range *range = &cxled->cxld.hpa_range;
++ int parent_ways = 0, parent_pos = 0, pos = 0;
++ int rc;
+
+- dev_err(port->uport_dev,
+- "failed to find decoder that maps %#llx-%#llx\n",
+- range->start, range->end);
+- goto err;
+- }
++ /*
++ * Example: the expected interleave order of the 4-way region shown
++ * below is: mem0, mem2, mem1, mem3
++ *
++ * root_port
++ * / \
++ * host_bridge_0 host_bridge_1
++ * | | | |
++ * mem0 mem1 mem2 mem3
++ *
++ * In the example the calculator will iterate twice. The first iteration
++ * uses the mem position in the host-bridge and the ways of the host-
++ * bridge to generate the first, or local, position. The second
++ * iteration uses the host-bridge position in the root_port and the ways
++ * of the root_port to refine the position.
++ *
++ * A trace of the calculation per endpoint looks like this:
++ * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
++ * pos = 0 * 2 + 0 pos = 0 * 2 + 1
++ * pos: 0 pos: 1
++ *
++ * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
++ * pos = 1 * 2 + 0 pos = 1 * 2 + 1
++ * pos: 2 pos = 3
++ *
++ * Note that while this example is simple, the method applies to more
++ * complex topologies, including those with switches.
++ */
+
+- cxlsd = to_cxl_switch_decoder(dev);
+- do {
+- seq = read_seqbegin(&cxlsd->target_lock);
+- find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos);
+- } while (read_seqretry(&cxlsd->target_lock, seq));
++ /* Iterate from endpoint to root_port refining the position */
++ for (iter = port; iter; iter = next_port(iter)) {
++ if (is_cxl_root(iter))
++ break;
+
+- put_device(dev);
++ rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
++ if (rc)
++ return rc;
+
+- if (a_pos < 0 || b_pos < 0) {
+- dev_err(port->uport_dev,
+- "failed to find shared decoder for %s and %s\n",
+- dev_name(cxlmd_a->dev.parent),
+- dev_name(cxlmd_b->dev.parent));
+- goto err;
++ pos = pos * parent_ways + parent_pos;
+ }
+
+- dev_dbg(port->uport_dev, "%s comes %s %s\n",
+- dev_name(cxlmd_a->dev.parent),
+- a_pos - b_pos < 0 ? "before" : "after",
+- dev_name(cxlmd_b->dev.parent));
++ dev_dbg(&cxlmd->dev,
++ "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
++ dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
++ dev_name(&port->dev), range->start, range->end, pos);
+
+- return a_pos - b_pos;
+-err:
+- cxled_a->pos = -1;
+- return 0;
++ return pos;
+ }
+
+ static int cxl_region_sort_targets(struct cxl_region *cxlr)
+@@ -1607,22 +1738,21 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr)
+ struct cxl_region_params *p = &cxlr->params;
+ int i, rc = 0;
+
+- sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos,
+- NULL);
+-
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
++ cxled->pos = cxl_calc_interleave_pos(cxled);
+ /*
+- * Record that sorting failed, but still continue to restore
+- * cxled->pos with its ->targets[] position so that follow-on
+- * code paths can reliably do p->targets[cxled->pos] to
+- * self-reference their entry.
++ * Record that sorting failed, but still continue to calc
++ * cxled->pos so that follow-on code paths can reliably
++ * do p->targets[cxled->pos] to self-reference their entry.
+ */
+ if (cxled->pos < 0)
+ rc = -ENXIO;
+- cxled->pos = i;
+ }
++ /* Keep the cxlr target list in interleave position order */
++ sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
++ cmp_interleave_pos, NULL);
+
+ dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
+ return rc;
+@@ -1638,6 +1768,15 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ struct cxl_dport *dport;
+ int rc = -ENXIO;
+
++ rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
++ p->interleave_granularity);
++ if (rc) {
++ dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
++ dev_name(&cxled->cxld.dev), p->interleave_ways,
++ p->interleave_granularity);
++ return rc;
++ }
++
+ if (cxled->mode != cxlr->mode) {
+ dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
+ dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
+@@ -1658,6 +1797,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ return -ENXIO;
+ }
+
++ if (p->nr_targets >= p->interleave_ways) {
++ dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
++ p->nr_targets);
++ return -EINVAL;
++ }
++
+ ep_port = cxled_to_port(cxled);
+ root_port = cxlrd_to_port(cxlrd);
+ dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+@@ -1750,7 +1895,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ if (p->nr_targets == p->interleave_ways) {
+ rc = cxl_region_setup_targets(cxlr);
+ if (rc)
+- goto err_decrement;
++ return rc;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+@@ -1761,13 +1906,27 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ .end = p->res->end,
+ };
+
+- return 0;
++ if (p->nr_targets != p->interleave_ways)
++ return 0;
+
+-err_decrement:
+- p->nr_targets--;
+- cxled->pos = -1;
+- p->targets[pos] = NULL;
+- return rc;
++ /*
++ * Test the auto-discovery position calculator function
++ * against this successfully created user-defined region.
++ * A fail message here means that this interleave config
++ * will fail when presented as CXL_REGION_F_AUTO.
++ */
++ for (int i = 0; i < p->nr_targets; i++) {
++ struct cxl_endpoint_decoder *cxled = p->targets[i];
++ int test_pos;
++
++ test_pos = cxl_calc_interleave_pos(cxled);
++ dev_dbg(&cxled->cxld.dev,
++ "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
++ (test_pos == cxled->pos) ? "success" : "fail",
++ test_pos, cxled->pos);
++ }
++
++ return 0;
+ }
+
+ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+@@ -2112,15 +2271,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
+ struct device *dev;
+ int rc;
+
+- switch (mode) {
+- case CXL_DECODER_RAM:
+- case CXL_DECODER_PMEM:
+- break;
+- default:
+- dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
+- return ERR_PTR(-EINVAL);
+- }
+-
+ cxlr = cxl_region_alloc(cxlrd, id);
+ if (IS_ERR(cxlr))
+ return cxlr;
+@@ -2171,6 +2321,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
+ {
+ int rc;
+
++ switch (mode) {
++ case CXL_DECODER_RAM:
++ case CXL_DECODER_PMEM:
++ break;
++ default:
++ dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
++ return ERR_PTR(-EINVAL);
++ }
++
+ rc = memregion_alloc(GFP_KERNEL);
+ if (rc < 0)
+ return ERR_PTR(rc);
+@@ -2423,10 +2582,6 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
+ struct cxl_poison_context ctx;
+ int rc = 0;
+
+- rc = down_read_interruptible(&cxl_region_rwsem);
+- if (rc)
+- return rc;
+-
+ ctx = (struct cxl_poison_context) {
+ .port = port
+ };
+@@ -2436,10 +2591,64 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
+ rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev),
+ &ctx);
+
+- up_read(&cxl_region_rwsem);
+ return rc;
+ }
+
++struct cxl_dpa_to_region_context {
++ struct cxl_region *cxlr;
++ u64 dpa;
++};
++
++static int __cxl_dpa_to_region(struct device *dev, void *arg)
++{
++ struct cxl_dpa_to_region_context *ctx = arg;
++ struct cxl_endpoint_decoder *cxled;
++ struct cxl_region *cxlr;
++ u64 dpa = ctx->dpa;
++
++ if (!is_endpoint_decoder(dev))
++ return 0;
++
++ cxled = to_cxl_endpoint_decoder(dev);
++ if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
++ return 0;
++
++ if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
++ return 0;
++
++ /*
++ * Stop the region search (return 1) when an endpoint mapping is
++ * found. The region may not be fully constructed so offering
++ * the cxlr in the context structure is not guaranteed.
++ */
++ cxlr = cxled->cxld.region;
++ if (cxlr)
++ dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
++ dev_name(&cxlr->dev));
++ else
++ dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa,
++ dev_name(dev));
++
++ ctx->cxlr = cxlr;
++
++ return 1;
++}
++
++struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
++{
++ struct cxl_dpa_to_region_context ctx;
++ struct cxl_port *port;
++
++ ctx = (struct cxl_dpa_to_region_context) {
++ .dpa = dpa,
++ };
++ port = cxlmd->endpoint;
++ if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
++ device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
++
++ return ctx.cxlr;
++}
++
+ static struct lock_class_key cxl_pmem_region_key;
+
+ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+@@ -2480,6 +2689,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+ if (i == 0) {
+ cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
+ if (!cxl_nvb) {
++ kfree(cxlr_pmem);
+ cxlr_pmem = ERR_PTR(-ENODEV);
+ goto out;
+ }
+@@ -2696,7 +2906,7 @@ static int devm_cxl_add_dax_region(struct cxl_region *cxlr)
+ return rc;
+ }
+
+-static int match_decoder_by_range(struct device *dev, void *data)
++static int match_root_decoder_by_range(struct device *dev, void *data)
+ {
+ struct range *r1, *r2 = data;
+ struct cxl_root_decoder *cxlrd;
+@@ -2827,7 +3037,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+ int rc;
+
+ cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
+- match_decoder_by_range);
++ match_root_decoder_by_range);
+ if (!cxlrd_dev) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s no CXL window for range %#llx:%#llx\n",
+diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
+index 6281127b3e9d97..bab4592db647f7 100644
+--- a/drivers/cxl/core/regs.c
++++ b/drivers/cxl/core/regs.c
+@@ -204,7 +204,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
+ struct cxl_component_regs *regs,
+ unsigned long map_mask)
+ {
+- struct device *dev = map->dev;
++ struct device *host = map->host;
+ struct mapinfo {
+ const struct cxl_reg_map *rmap;
+ void __iomem **addr;
+@@ -225,7 +225,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
+ continue;
+ phys_addr = map->resource + mi->rmap->offset;
+ length = mi->rmap->size;
+- *(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length);
++ *(mi->addr) = devm_cxl_iomap_block(host, phys_addr, length);
+ if (!*(mi->addr))
+ return -ENOMEM;
+ }
+@@ -237,7 +237,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
+ int cxl_map_device_regs(const struct cxl_register_map *map,
+ struct cxl_device_regs *regs)
+ {
+- struct device *dev = map->dev;
++ struct device *host = map->host;
+ resource_size_t phys_addr = map->resource;
+ struct mapinfo {
+ const struct cxl_reg_map *rmap;
+@@ -259,7 +259,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
+
+ addr = phys_addr + mi->rmap->offset;
+ length = mi->rmap->size;
+- *(mi->addr) = devm_cxl_iomap_block(dev, addr, length);
++ *(mi->addr) = devm_cxl_iomap_block(host, addr, length);
+ if (!*(mi->addr))
+ return -ENOMEM;
+ }
+@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
+ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
+ struct cxl_register_map *map)
+ {
++ u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+ int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
+ u64 offset = ((u64)reg_hi << 32) |
+ (reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
+@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
+ if (offset > pci_resource_len(pdev, bar)) {
+ dev_warn(&pdev->dev,
+ "BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
+- &pdev->resource[bar], &offset, map->reg_type);
++ &pdev->resource[bar], &offset, reg_type);
+ return false;
+ }
+
+- map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
++ map->reg_type = reg_type;
+ map->resource = pci_resource_start(pdev, bar) + offset;
+ map->max_size = pci_resource_len(pdev, bar) - offset;
+ return true;
+@@ -309,7 +310,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ int regloc, i;
+
+ *map = (struct cxl_register_map) {
+- .dev = &pdev->dev,
++ .host = &pdev->dev,
+ .resource = CXL_RESOURCE_NONE,
+ };
+
+@@ -403,15 +404,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
+
+ static int cxl_map_regblock(struct cxl_register_map *map)
+ {
+- struct device *dev = map->dev;
++ struct device *host = map->host;
+
+ map->base = ioremap(map->resource, map->max_size);
+ if (!map->base) {
+- dev_err(dev, "failed to map registers\n");
++ dev_err(host, "failed to map registers\n");
+ return -ENOMEM;
+ }
+
+- dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
++ dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource);
+ return 0;
+ }
+
+@@ -425,28 +426,28 @@ static int cxl_probe_regs(struct cxl_register_map *map)
+ {
+ struct cxl_component_reg_map *comp_map;
+ struct cxl_device_reg_map *dev_map;
+- struct device *dev = map->dev;
++ struct device *host = map->host;
+ void __iomem *base = map->base;
+
+ switch (map->reg_type) {
+ case CXL_REGLOC_RBI_COMPONENT:
+ comp_map = &map->component_map;
+- cxl_probe_component_regs(dev, base, comp_map);
+- dev_dbg(dev, "Set up component registers\n");
++ cxl_probe_component_regs(host, base, comp_map);
++ dev_dbg(host, "Set up component registers\n");
+ break;
+ case CXL_REGLOC_RBI_MEMDEV:
+ dev_map = &map->device_map;
+- cxl_probe_device_regs(dev, base, dev_map);
++ cxl_probe_device_regs(host, base, dev_map);
+ if (!dev_map->status.valid || !dev_map->mbox.valid ||
+ !dev_map->memdev.valid) {
+- dev_err(dev, "registers not found: %s%s%s\n",
++ dev_err(host, "registers not found: %s%s%s\n",
+ !dev_map->status.valid ? "status " : "",
+ !dev_map->mbox.valid ? "mbox " : "",
+ !dev_map->memdev.valid ? "memdev " : "");
+ return -ENXIO;
+ }
+
+- dev_dbg(dev, "Probing device registers...\n");
++ dev_dbg(host, "Probing device registers...\n");
+ break;
+ default:
+ break;
+diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
+index a0b5819bc70b30..bdf24867d5174c 100644
+--- a/drivers/cxl/core/trace.h
++++ b/drivers/cxl/core/trace.h
+@@ -252,8 +252,8 @@ TRACE_EVENT(cxl_generic_event,
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+-#define CXL_DPA_FLAGS_MASK 0x3F
+-#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK)
++#define CXL_DPA_FLAGS_MASK GENMASK(1, 0)
++#define CXL_DPA_MASK GENMASK_ULL(63, 6)
+
+ #define CXL_DPA_VOLATILE BIT(0)
+ #define CXL_DPA_NOT_REPAIRABLE BIT(1)
+@@ -642,18 +642,18 @@ u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
+
+ TRACE_EVENT(cxl_poison,
+
+- TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region,
++ TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,
+ const struct cxl_poison_record *record, u8 flags,
+ __le64 overflow_ts, enum cxl_poison_trace_type trace_type),
+
+- TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type),
++ TP_ARGS(cxlmd, cxlr, record, flags, overflow_ts, trace_type),
+
+ TP_STRUCT__entry(
+ __string(memdev, dev_name(&cxlmd->dev))
+ __string(host, dev_name(cxlmd->dev.parent))
+ __field(u64, serial)
+ __field(u8, trace_type)
+- __string(region, region)
++ __string(region, cxlr ? dev_name(&cxlr->dev) : "")
+ __field(u64, overflow_ts)
+ __field(u64, hpa)
+ __field(u64, dpa)
+@@ -673,10 +673,10 @@ TRACE_EVENT(cxl_poison,
+ __entry->source = cxl_poison_record_source(record);
+ __entry->trace_type = trace_type;
+ __entry->flags = flags;
+- if (region) {
+- __assign_str(region, dev_name(&region->dev));
+- memcpy(__entry->uuid, &region->params.uuid, 16);
+- __entry->hpa = cxl_trace_hpa(region, cxlmd,
++ if (cxlr) {
++ __assign_str(region, dev_name(&cxlr->dev));
++ memcpy(__entry->uuid, &cxlr->params.uuid, 16);
++ __entry->hpa = cxl_trace_hpa(cxlr, cxlmd,
+ __entry->dpa);
+ } else {
+ __assign_str(region, "");
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 76d92561af2949..bb3ad219b6b316 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -43,6 +43,8 @@
+ #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
+ #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
+ #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
++#define CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11)
++#define CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12)
+ #define CXL_HDM_DECODER_CTRL_OFFSET 0x4
+ #define CXL_HDM_DECODER_ENABLE BIT(1)
+ #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)
+@@ -247,7 +249,7 @@ struct cxl_pmu_reg_map {
+
+ /**
+ * struct cxl_register_map - DVSEC harvested register block mapping parameters
+- * @dev: device for devm operations and logging
++ * @host: device for devm operations and logging
+ * @base: virtual base of the register-block-BAR + @block_offset
+ * @resource: physical resource base of the register block
+ * @max_size: maximum mapping size to perform register search
+@@ -257,7 +259,7 @@ struct cxl_pmu_reg_map {
+ * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
+ */
+ struct cxl_register_map {
+- struct device *dev;
++ struct device *host;
+ void __iomem *base;
+ resource_size_t resource;
+ resource_size_t max_size;
+@@ -404,7 +406,6 @@ struct cxl_endpoint_decoder {
+ /**
+ * struct cxl_switch_decoder - Switch specific CXL HDM Decoder
+ * @cxld: base cxl_decoder object
+- * @target_lock: coordinate coherent reads of the target list
+ * @nr_targets: number of elements in @target
+ * @target: active ordered target list in current decoder configuration
+ *
+@@ -416,7 +417,6 @@ struct cxl_endpoint_decoder {
+ */
+ struct cxl_switch_decoder {
+ struct cxl_decoder cxld;
+- seqlock_t target_lock;
+ int nr_targets;
+ struct cxl_dport *target[];
+ };
+@@ -679,6 +679,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
+ return port->uport_dev == port->dev.parent;
+ }
+
++int cxl_num_decoders_committed(struct cxl_port *port);
+ bool is_cxl_port(const struct device *dev);
+ struct cxl_port *to_cxl_port(const struct device *dev);
+ struct pci_bus;
+diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
+index 706f8a6d1ef43c..edb46123e3eb0f 100644
+--- a/drivers/cxl/cxlmem.h
++++ b/drivers/cxl/cxlmem.h
+@@ -84,9 +84,12 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
+ return is_cxl_memdev(port->uport_dev);
+ }
+
+-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
++struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
++ struct cxl_dev_state *cxlds);
++int devm_cxl_sanitize_setup_notifier(struct device *host,
++ struct cxl_memdev *cxlmd);
+ struct cxl_memdev_state;
+-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
++int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds);
+ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ resource_size_t base, resource_size_t len,
+ resource_size_t skipped);
+@@ -360,16 +363,16 @@ struct cxl_fw_state {
+ *
+ * @state: state of last security operation
+ * @enabled_cmds: All security commands enabled in the CEL
+- * @poll: polling for sanitization is enabled, device has no mbox irq support
+ * @poll_tmo_secs: polling timeout
++ * @sanitize_active: sanitize completion pending
+ * @poll_dwork: polling work item
+ * @sanitize_node: sanitation sysfs file to notify
+ */
+ struct cxl_security_state {
+ unsigned long state;
+ DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
+- bool poll;
+ int poll_tmo_secs;
++ bool sanitize_active;
+ struct delayed_work poll_dwork;
+ struct kernfs_node *sanitize_node;
+ };
+@@ -535,7 +538,7 @@ enum cxl_opcode {
+ 0x3b, 0x3f, 0x17)
+
+ #define DEFINE_CXL_VENDOR_DEBUG_UUID \
+- UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \
++ UUID_INIT(0x5e1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \
+ 0x40, 0x3d, 0x86)
+
+ struct cxl_mbox_get_supported_logs {
+@@ -883,13 +886,23 @@ static inline void cxl_mem_active_dec(void)
+ }
+ #endif
+
+-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
++int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
+
++/**
++ * struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities
++ * @regs: mapped registers, see devm_cxl_setup_hdm()
++ * @decoder_count: number of decoders for this port
++ * @target_count: for switch decoders, max downstream port targets
++ * @interleave_mask: interleave granularity capability, see check_interleave_cap()
++ * @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap()
++ * @port: mapped cxl_port, see devm_cxl_setup_hdm()
++ */
+ struct cxl_hdm {
+ struct cxl_component_regs regs;
+ unsigned int decoder_count;
+ unsigned int target_count;
+ unsigned int interleave_mask;
++ unsigned long iw_cap_mask;
+ struct cxl_port *port;
+ };
+
+diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
+index 44a21ab7add51b..8bece1e2e2491d 100644
+--- a/drivers/cxl/pci.c
++++ b/drivers/cxl/pci.c
+@@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
+ reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
+ opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
+ if (opcode == CXL_MBOX_OP_SANITIZE) {
++ mutex_lock(&mds->mbox_mutex);
+ if (mds->security.sanitize_node)
+- sysfs_notify_dirent(mds->security.sanitize_node);
+-
+- dev_dbg(cxlds->dev, "Sanitization operation ended\n");
++ mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
++ mutex_unlock(&mds->mbox_mutex);
+ } else {
+ /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
+ rcuwait_wake_up(&mds->mbox_wait);
+@@ -152,18 +152,16 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
+ mutex_lock(&mds->mbox_mutex);
+ if (cxl_mbox_background_complete(cxlds)) {
+ mds->security.poll_tmo_secs = 0;
+- put_device(cxlds->dev);
+-
+ if (mds->security.sanitize_node)
+ sysfs_notify_dirent(mds->security.sanitize_node);
++ mds->security.sanitize_active = false;
+
+ dev_dbg(cxlds->dev, "Sanitization operation ended\n");
+ } else {
+ int timeout = mds->security.poll_tmo_secs + 10;
+
+ mds->security.poll_tmo_secs = min(15 * 60, timeout);
+- queue_delayed_work(system_wq, &mds->security.poll_dwork,
+- timeout * HZ);
++ schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
+ }
+ mutex_unlock(&mds->mbox_mutex);
+ }
+@@ -295,18 +293,15 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
+ * and allow userspace to poll(2) for completion.
+ */
+ if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
+- if (mds->security.poll) {
+- /* hold the device throughout */
+- get_device(cxlds->dev);
+-
+- /* give first timeout a second */
+- timeout = 1;
+- mds->security.poll_tmo_secs = timeout;
+- queue_delayed_work(system_wq,
+- &mds->security.poll_dwork,
+- timeout * HZ);
+- }
+-
++ if (mds->security.sanitize_active)
++ return -EBUSY;
++
++ /* give first timeout a second */
++ timeout = 1;
++ mds->security.poll_tmo_secs = timeout;
++ mds->security.sanitize_active = true;
++ schedule_delayed_work(&mds->security.poll_dwork,
++ timeout * HZ);
+ dev_dbg(dev, "Sanitization operation started\n");
+ goto success;
+ }
+@@ -389,7 +384,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+ const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
+ struct device *dev = cxlds->dev;
+ unsigned long timeout;
++ int irq, msgnum;
+ u64 md_status;
++ u32 ctrl;
+
+ timeout = jiffies + mbox_ready_timeout * HZ;
+ do {
+@@ -437,33 +434,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+ dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
+
+ rcuwait_init(&mds->mbox_wait);
++ INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
+
+- if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
+- u32 ctrl;
+- int irq, msgnum;
+- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+-
+- msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
+- irq = pci_irq_vector(pdev, msgnum);
+- if (irq < 0)
+- goto mbox_poll;
+-
+- if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
+- goto mbox_poll;
++ /* background command interrupts are optional */
++ if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
++ return 0;
+
+- /* enable background command mbox irq support */
+- ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+- ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
+- writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
++ msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
++ irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
++ if (irq < 0)
++ return 0;
+
++ if (cxl_request_irq(cxlds, irq, NULL, cxl_pci_mbox_irq))
+ return 0;
+- }
+
+-mbox_poll:
+- mds->security.poll = true;
+- INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
++ dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
++ /* enable background command mbox irq support */
++ ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
++ ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
++ writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+
+- dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
+ return 0;
+ }
+
+@@ -484,7 +474,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
+ resource_size_t component_reg_phys;
+
+ *map = (struct cxl_register_map) {
+- .dev = &pdev->dev,
++ .host = &pdev->dev,
+ .resource = CXL_RESOURCE_NONE,
+ };
+
+@@ -882,11 +872,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (rc)
+ return rc;
+
+- cxlmd = devm_cxl_add_memdev(cxlds);
++ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+- rc = cxl_memdev_setup_fw_upload(mds);
++ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
++ if (rc)
++ return rc;
++
++ rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/dax/device.c b/drivers/dax/device.c
+index 93ebedc5ec8ca3..01e89b7ac637f2 100644
+--- a/drivers/dax/device.c
++++ b/drivers/dax/device.c
+@@ -86,7 +86,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+ nr_pages = 1;
+
+ pgoff = linear_page_index(vmf->vma,
+- ALIGN(vmf->address, fault_size));
++ ALIGN_DOWN(vmf->address, fault_size));
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 474d81831ad36b..49c542ecccde3b 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -461,10 +461,14 @@ static void devfreq_monitor(struct work_struct *work)
+ if (err)
+ dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+
++ if (devfreq->stop_polling)
++ goto out;
++
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+- mutex_unlock(&devfreq->lock);
+
++out:
++ mutex_unlock(&devfreq->lock);
+ trace_devfreq_monitor(devfreq);
+ }
+
+@@ -483,6 +487,10 @@ void devfreq_monitor_start(struct devfreq *devfreq)
+ if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
+ return;
+
++ mutex_lock(&devfreq->lock);
++ if (delayed_work_pending(&devfreq->work))
++ goto out;
++
+ switch (devfreq->profile->timer) {
+ case DEVFREQ_TIMER_DEFERRABLE:
+ INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+@@ -491,12 +499,16 @@ void devfreq_monitor_start(struct devfreq *devfreq)
+ INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
+ break;
+ default:
+- return;
++ goto out;
+ }
+
+ if (devfreq->profile->polling_ms)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
++
++out:
++ devfreq->stop_polling = false;
++ mutex_unlock(&devfreq->lock);
+ }
+ EXPORT_SYMBOL(devfreq_monitor_start);
+
+@@ -513,6 +525,14 @@ void devfreq_monitor_stop(struct devfreq *devfreq)
+ if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
+ return;
+
++ mutex_lock(&devfreq->lock);
++ if (devfreq->stop_polling) {
++ mutex_unlock(&devfreq->lock);
++ return;
++ }
++
++ devfreq->stop_polling = true;
++ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+ }
+ EXPORT_SYMBOL(devfreq_monitor_stop);
+@@ -1688,7 +1708,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+ struct devfreq *df = to_devfreq(dev);
+- ssize_t len;
++ ssize_t len = 0;
+ int i, j;
+ unsigned int max_state;
+
+@@ -1697,7 +1717,7 @@ static ssize_t trans_stat_show(struct device *dev,
+ max_state = df->max_state;
+
+ if (max_state == 0)
+- return sprintf(buf, "Not Supported.\n");
++ return scnprintf(buf, PAGE_SIZE, "Not Supported.\n");
+
+ mutex_lock(&df->lock);
+ if (!df->stop_polling &&
+@@ -1707,31 +1727,52 @@ static ssize_t trans_stat_show(struct device *dev,
+ }
+ mutex_unlock(&df->lock);
+
+- len = sprintf(buf, " From : To\n");
+- len += sprintf(buf + len, " :");
+- for (i = 0; i < max_state; i++)
+- len += sprintf(buf + len, "%10lu",
+- df->freq_table[i]);
++ len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " :");
++ for (i = 0; i < max_state; i++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu",
++ df->freq_table[i]);
++ }
++ if (len >= PAGE_SIZE - 1)
++ return PAGE_SIZE - 1;
+
+- len += sprintf(buf + len, " time(ms)\n");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " time(ms)\n");
+
+ for (i = 0; i < max_state; i++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
+ if (df->freq_table[i] == df->previous_freq)
+- len += sprintf(buf + len, "*");
++ len += scnprintf(buf + len, PAGE_SIZE - len, "*");
+ else
+- len += sprintf(buf + len, " ");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " ");
++ if (len >= PAGE_SIZE - 1)
++ break;
++
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:",
++ df->freq_table[i]);
++ for (j = 0; j < max_state; j++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10u",
++ df->stats.trans_table[(i * max_state) + j]);
++ }
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64)
++ jiffies64_to_msecs(df->stats.time_in_state[i]));
++ }
+
+- len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
+- for (j = 0; j < max_state; j++)
+- len += sprintf(buf + len, "%10u",
+- df->stats.trans_table[(i * max_state) + j]);
++ if (len < PAGE_SIZE - 1)
++ len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n",
++ df->stats.total_trans);
+
+- len += sprintf(buf + len, "%10llu\n", (u64)
+- jiffies64_to_msecs(df->stats.time_in_state[i]));
++ if (len >= PAGE_SIZE - 1) {
++ pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
++ return -EFBIG;
+ }
+
+- len += sprintf(buf + len, "Total transition : %u\n",
+- df->stats.total_trans);
+ return len;
+ }
+
+diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
+index 39ac069cabc756..74893c06aa087f 100644
+--- a/drivers/devfreq/event/rockchip-dfi.c
++++ b/drivers/devfreq/event/rockchip-dfi.c
+@@ -193,14 +193,15 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
+ return dev_err_probe(dev, PTR_ERR(data->clk),
+ "Cannot get the clk pclk_ddr_mon\n");
+
+- /* try to find the optional reference to the pmu syscon */
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+- if (node) {
+- data->regmap_pmu = syscon_node_to_regmap(node);
+- of_node_put(node);
+- if (IS_ERR(data->regmap_pmu))
+- return PTR_ERR(data->regmap_pmu);
+- }
++ if (!node)
++ return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
++
++ data->regmap_pmu = syscon_node_to_regmap(node);
++ of_node_put(node);
++ if (IS_ERR(data->regmap_pmu))
++ return PTR_ERR(data->regmap_pmu);
++
+ data->dev = dev;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
+index 38b4110378de05..eb8b733065b24d 100644
+--- a/drivers/dma-buf/dma-resv.c
++++ b/drivers/dma-buf/dma-resv.c
+@@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+
+ dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
+ if ((old->context == fence->context && old_usage >= usage &&
+- dma_fence_is_later(fence, old)) ||
++ dma_fence_is_later_or_same(fence, old)) ||
+ dma_fence_is_signaled(old)) {
+ dma_resv_list_set(fobj, i, fence, usage);
+ dma_fence_put(old);
+diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
+index ee899f8e67215f..bea7e574f916e1 100644
+--- a/drivers/dma-buf/heaps/cma_heap.c
++++ b/drivers/dma-buf/heaps/cma_heap.c
+@@ -165,7 +165,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
+ struct vm_area_struct *vma = vmf->vma;
+ struct cma_heap_buffer *buffer = vma->vm_private_data;
+
+- if (vmf->pgoff > buffer->pagecount)
++ if (vmf->pgoff >= buffer->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = buffer->pages[vmf->pgoff];
+diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
+index c0979c8049b5a3..ed4b323886e430 100644
+--- a/drivers/dma-buf/st-dma-fence-chain.c
++++ b/drivers/dma-buf/st-dma-fence-chain.c
+@@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, f, 1);
+- if (!chain)
++ if (chain)
++ dma_fence_enable_sw_signaling(chain);
++ else
+ err = -ENOMEM;
+
+- dma_fence_enable_sw_signaling(chain);
+-
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+@@ -476,10 +476,9 @@ static int find_race(void *arg)
+ for (i = 0; i < ncpus; i++) {
+ int ret;
+
+- ret = kthread_stop(threads[i]);
++ ret = kthread_stop_put(threads[i]);
+ if (ret && !err)
+ err = ret;
+- put_task_struct(threads[i]);
+ }
+ kfree(threads);
+
+@@ -591,8 +590,7 @@ static int wait_forward(void *arg)
+ for (i = 0; i < fc.chain_length; i++)
+ dma_fence_signal(fc.fences[i]);
+
+- err = kthread_stop(tsk);
+- put_task_struct(tsk);
++ err = kthread_stop_put(tsk);
+
+ err:
+ fence_chains_fini(&fc);
+@@ -621,8 +619,7 @@ static int wait_backward(void *arg)
+ for (i = fc.chain_length; i--; )
+ dma_fence_signal(fc.fences[i]);
+
+- err = kthread_stop(tsk);
+- put_task_struct(tsk);
++ err = kthread_stop_put(tsk);
+
+ err:
+ fence_chains_fini(&fc);
+@@ -669,8 +666,7 @@ static int wait_random(void *arg)
+ for (i = 0; i < fc.chain_length; i++)
+ dma_fence_signal(fc.fences[i]);
+
+- err = kthread_stop(tsk);
+- put_task_struct(tsk);
++ err = kthread_stop_put(tsk);
+
+ err:
+ fence_chains_fini(&fc);
+diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
+index fb6e0a6ae2c96e..6a1bfcd0cc2108 100644
+--- a/drivers/dma-buf/st-dma-fence.c
++++ b/drivers/dma-buf/st-dma-fence.c
+@@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
+ t[i].before = pass;
+ t[i].task = kthread_run(thread_signal_callback, &t[i],
+ "dma-fence:%d", i);
++ if (IS_ERR(t[i].task)) {
++ ret = PTR_ERR(t[i].task);
++ while (--i >= 0)
++ kthread_stop_put(t[i].task);
++ return ret;
++ }
+ get_task_struct(t[i].task);
+ }
+
+@@ -548,11 +554,9 @@ static int race_signal_callback(void *arg)
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
+ int err;
+
+- err = kthread_stop(t[i].task);
++ err = kthread_stop_put(t[i].task);
+ if (err && !ret)
+ ret = err;
+-
+- put_task_struct(t[i].task);
+ }
+ }
+
+diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
+index 101394f16930f8..237bce21d1e724 100644
+--- a/drivers/dma-buf/sync_debug.c
++++ b/drivers/dma-buf/sync_debug.c
+@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+
+ seq_printf(s, "%s: %d\n", obj->name, obj->value);
+
+- spin_lock_irq(&obj->lock);
++ spin_lock(&obj->lock); /* Caller already disabled IRQ. */
+ list_for_each(pos, &obj->pt_list) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, link);
+ sync_print_fence(s, &pt->base, false);
+ }
+- spin_unlock_irq(&obj->lock);
++ spin_unlock(&obj->lock);
+ }
+
+ static void sync_print_sync_file(struct seq_file *s,
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 4ccae1a3b88427..e36506471a4f67 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -380,7 +380,7 @@ config LPC18XX_DMAMUX
+
+ config MCF_EDMA
+ tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
+- depends on M5441x || COMPILE_TEST
++ depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+@@ -629,16 +629,16 @@ config TEGRA20_APB_DMA
+
+ config TEGRA210_ADMA
+ tristate "NVIDIA Tegra210 ADMA support"
+- depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
++ depends on (ARCH_TEGRA || COMPILE_TEST)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+- Support for the NVIDIA Tegra210 ADMA controller driver. The
+- DMA controller has multiple DMA channels and is used to service
+- various audio clients in the Tegra210 audio processing engine
+- (APE). This DMA controller transfers data from memory to
+- peripheral and vice versa. It does not support memory to
+- memory data transfer.
++ Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA
++ controller driver. The DMA controller has multiple DMA channels
++ and is used to service various audio clients in the Tegra210
++ audio processing engine (APE). This DMA controller transfers
++ data from memory to peripheral and vice versa. It does not
++ support memory to memory data transfer.
+
+ config TIMB_DMA
+ tristate "Timberdale FPGA DMA support"
+diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
+index 4153c2edb04901..711e3756a39a52 100644
+--- a/drivers/dma/altera-msgdma.c
++++ b/drivers/dma/altera-msgdma.c
+@@ -233,7 +233,7 @@ static void msgdma_free_descriptor(struct msgdma_device *mdev,
+ struct msgdma_sw_desc *child, *next;
+
+ mdev->desc_free_cnt++;
+- list_add_tail(&desc->node, &mdev->free_list);
++ list_move_tail(&desc->node, &mdev->free_list);
+ list_for_each_entry_safe(child, next, &desc->tx_list, node) {
+ mdev->desc_free_cnt++;
+ list_move_tail(&child->node, &mdev->free_list);
+@@ -583,17 +583,16 @@ static void msgdma_issue_pending(struct dma_chan *chan)
+ static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
+ {
+ struct msgdma_sw_desc *desc, *next;
++ unsigned long irqflags;
+
+ list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
+ struct dmaengine_desc_callback cb;
+
+- list_del(&desc->node);
+-
+ dmaengine_desc_get_callback(&desc->async_tx, &cb);
+ if (dmaengine_desc_callback_valid(&cb)) {
+- spin_unlock(&mdev->lock);
++ spin_unlock_irqrestore(&mdev->lock, irqflags);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+- spin_lock(&mdev->lock);
++ spin_lock_irqsave(&mdev->lock, irqflags);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
+index 3af795635c5ce6..356298e4dd22b3 100644
+--- a/drivers/dma/apple-admac.c
++++ b/drivers/dma/apple-admac.c
+@@ -57,6 +57,8 @@
+
+ #define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
+
++#define BUS_WIDTH_WORD_SIZE GENMASK(3, 0)
++#define BUS_WIDTH_FRAME_SIZE GENMASK(7, 4)
+ #define BUS_WIDTH_8BIT 0x00
+ #define BUS_WIDTH_16BIT 0x01
+ #define BUS_WIDTH_32BIT 0x02
+@@ -740,7 +742,8 @@ static int admac_device_config(struct dma_chan *chan,
+ struct admac_data *ad = adchan->host;
+ bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
+ int wordsize = 0;
+- u32 bus_width = 0;
++ u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
++ ~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
+
+ switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
+index fc7cdad371616a..4f426be2868843 100644
+--- a/drivers/dma/dma-axi-dmac.c
++++ b/drivers/dma/dma-axi-dmac.c
+@@ -1033,8 +1033,8 @@ static int axi_dmac_remove(struct platform_device *pdev)
+ {
+ struct axi_dmac *dmac = platform_get_drvdata(pdev);
+
+- of_dma_controller_free(pdev->dev.of_node);
+ free_irq(dmac->irq, dmac);
++ of_dma_controller_free(pdev->dev.of_node);
+ tasklet_kill(&dmac->chan.vchan.task);
+ dma_async_device_unregister(&dmac->dma_dev);
+ clk_disable_unprepare(dmac->clk);
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index b7388ae62d7f1f..491b222402216a 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -1103,6 +1103,9 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+ static void __dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+ {
++ if (chan->local == NULL)
++ return;
++
+ WARN_ONCE(!device->device_release && chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+index dd02f84e404d08..72fb40de58b3f4 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+@@ -256,6 +256,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
+ kfree(desc);
+ return NULL;
+ }
++ desc->nr_hw_descs = num;
+
+ return desc;
+ }
+@@ -282,7 +283,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
+ static void axi_desc_put(struct axi_dma_desc *desc)
+ {
+ struct axi_dma_chan *chan = desc->chan;
+- int count = atomic_read(&chan->descs_allocated);
++ int count = desc->nr_hw_descs;
+ struct axi_dma_hw_desc *hw_desc;
+ int descs_put;
+
+@@ -1093,9 +1094,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
+ /* Remove the completed descriptor from issued list before completing */
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+-
+- /* Submit queued descriptors after processing the completed ones */
+- axi_chan_start_first_queued(chan);
+ }
+
+ out:
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+index eb267cb24f6702..8521530a34ec46 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+@@ -104,6 +104,7 @@ struct axi_dma_desc {
+ u32 completed_blocks;
+ u32 length;
+ u32 period_len;
++ u32 nr_hw_descs;
+ };
+
+ struct axi_dma_chan_config {
+diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
+index b38786f0ad7995..b75fdaffad9a4e 100644
+--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
+@@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
+ dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
+ }
+
++static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
++{
++ /*
++ * In case of remote eDMA engine setup, the DW PCIe RP/EP internal
++ * configuration registers and application memory are normally accessed
++ * over different buses. Ensure LL-data reaches the memory before the
++ * doorbell register is toggled by issuing the dummy-read from the remote
++ * LL memory in a hope that the MRd TLP will return only after the
++ * last MWr TLP is completed
++ */
++ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
++ readl(chunk->ll_region.vaddr.io);
++}
++
+ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+ {
+ struct dw_edma_chan *chan = chunk->chan;
+@@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
+ }
++
++ dw_edma_v0_sync_ll_data(chunk);
++
+ /* Doorbell */
+ SET_RW_32(dw, chan->dir, doorbell,
+ FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
+diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+index 0745d9e7d259b1..406f169b09a75a 100644
+--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
++++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+@@ -176,7 +176,7 @@ dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
+ };
+ struct dentry *regs_dent, *ch_dent;
+ int nr_entries, i;
+- char name[16];
++ char name[32];
+
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
+
+@@ -239,7 +239,7 @@ static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
+ };
+ struct dentry *regs_dent, *ch_dent;
+ int nr_entries, i;
+- char name[16];
++ char name[32];
+
+ regs_dent = debugfs_create_dir(READ_STR, dent);
+
+diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+index 00b735a0202ab2..e3f8db4fe909a1 100644
+--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
+@@ -17,8 +17,8 @@ enum dw_hdma_control {
+ DW_HDMA_V0_CB = BIT(0),
+ DW_HDMA_V0_TCB = BIT(1),
+ DW_HDMA_V0_LLP = BIT(2),
+- DW_HDMA_V0_LIE = BIT(3),
+- DW_HDMA_V0_RIE = BIT(4),
++ DW_HDMA_V0_LWIE = BIT(3),
++ DW_HDMA_V0_RWIE = BIT(4),
+ DW_HDMA_V0_CCS = BIT(8),
+ DW_HDMA_V0_LLE = BIT(9),
+ };
+@@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
+
+ static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
+ {
+- u32 num_ch = 0;
+- int id;
+-
+- for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
+- if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
+- num_ch++;
+- }
+-
+- if (num_ch > HDMA_V0_MAX_NR_CH)
+- num_ch = HDMA_V0_MAX_NR_CH;
+-
+- return (u16)num_ch;
++ /*
++ * The HDMA IP have no way to know the number of hardware channels
++ * available, we set it to maximum channels and let the platform
++ * set the right number of channels.
++ */
++ return HDMA_V0_MAX_NR_CH;
+ }
+
+ static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
+@@ -201,25 +195,14 @@ static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk,
+ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
+ {
+ struct dw_edma_burst *child;
+- struct dw_edma_chan *chan = chunk->chan;
+ u32 control = 0, i = 0;
+- int j;
+
+ if (chunk->cb)
+ control = DW_HDMA_V0_CB;
+
+- j = chunk->bursts_alloc;
+- list_for_each_entry(child, &chunk->burst->list, list) {
+- j--;
+- if (!j) {
+- control |= DW_HDMA_V0_LIE;
+- if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+- control |= DW_HDMA_V0_RIE;
+- }
+-
++ list_for_each_entry(child, &chunk->burst->list, list)
+ dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz,
+ child->sar, child->dar);
+- }
+
+ control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB;
+ if (!chunk->cb)
+@@ -228,6 +211,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
+ dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
+ }
+
++static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
++{
++ /*
++ * In case of remote HDMA engine setup, the DW PCIe RP/EP internal
++ * configuration registers and application memory are normally accessed
++ * over different buses. Ensure LL-data reaches the memory before the
++ * doorbell register is toggled by issuing the dummy-read from the remote
++ * LL memory in a hope that the MRd TLP will return only after the
++ * last MWr TLP is completed
++ */
++ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
++ readl(chunk->ll_region.vaddr.io);
++}
++
+ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+ {
+ struct dw_edma_chan *chan = chunk->chan;
+@@ -239,10 +236,13 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+ if (first) {
+ /* Enable engine */
+ SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
+- /* Interrupt enable&unmask - done, abort */
+- tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
+- HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
+- HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
++ /* Interrupt unmask - stop, abort */
++ tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup);
++ tmp &= ~(HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
++ /* Interrupt enable - stop, abort */
++ tmp |= HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
++ if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
++ tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
+ SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
+ /* Channel control */
+ SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
+@@ -256,6 +256,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+ /* Set consumer cycle */
+ SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
+ HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
++
++ dw_hdma_v0_sync_ll_data(chunk);
++
+ /* Doorbell */
+ SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
+ }
+diff --git a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
+index 520c81978b085f..dcdc57fe976c13 100644
+--- a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
++++ b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
+@@ -116,7 +116,7 @@ static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
+ static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
+ {
+ struct dentry *regs_dent, *ch_dent;
+- char name[16];
++ char name[32];
+ int i;
+
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
+@@ -133,7 +133,7 @@ static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
+ static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent)
+ {
+ struct dentry *regs_dent, *ch_dent;
+- char name[16];
++ char name[32];
+ int i;
+
+ regs_dent = debugfs_create_dir(READ_STR, dent);
+diff --git a/drivers/dma/dw-edma/dw-hdma-v0-regs.h b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
+index a974abdf8aaf5e..eab5fd7177e545 100644
+--- a/drivers/dma/dw-edma/dw-hdma-v0-regs.h
++++ b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
+@@ -15,7 +15,7 @@
+ #define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
+ #define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
+ #define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
+-#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
++#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
+ #define HDMA_V0_ABORT_INT_MASK BIT(2)
+ #define HDMA_V0_STOP_INT_MASK BIT(0)
+ #define HDMA_V0_LINKLIST_EN BIT(0)
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 5f7d690e3dbae8..b341a6f1b04383 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -16,6 +16,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/log2.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -621,12 +622,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ struct dw_desc *prev;
+ struct dw_desc *first;
+ u32 ctllo, ctlhi;
+- u8 m_master = dwc->dws.m_master;
+- u8 lms = DWC_LLP_LMS(m_master);
++ u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
+ dma_addr_t reg;
+ unsigned int reg_width;
+ unsigned int mem_width;
+- unsigned int data_width = dw->pdata->data_width[m_master];
+ unsigned int i;
+ struct scatterlist *sg;
+ size_t total_len = 0;
+@@ -660,7 +659,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+- mem_width = __ffs(data_width | mem | len);
++ mem_width = __ffs(sconfig->src_addr_width | mem | len);
+
+ slave_sg_todev_fill_desc:
+ desc = dwc_desc_get(dwc);
+@@ -720,7 +719,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ lli_write(desc, sar, reg);
+ lli_write(desc, dar, mem);
+ lli_write(desc, ctlhi, ctlhi);
+- mem_width = __ffs(data_width | mem);
++ mem_width = __ffs(sconfig->dst_addr_width | mem);
+ lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
+ desc->len = dlen;
+
+@@ -780,17 +779,93 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ }
+ EXPORT_SYMBOL_GPL(dw_dma_filter);
+
++static int dwc_verify_p_buswidth(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ u32 reg_width, max_width;
++
++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
++ reg_width = dwc->dma_sconfig.dst_addr_width;
++ else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
++ reg_width = dwc->dma_sconfig.src_addr_width;
++ else /* DMA_MEM_TO_MEM */
++ return 0;
++
++ max_width = dw->pdata->data_width[dwc->dws.p_master];
++
++ /* Fall-back to 1-byte transfer width if undefined */
++ if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
++ reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++ else if (!is_power_of_2(reg_width) || reg_width > max_width)
++ return -EINVAL;
++ else /* bus width is valid */
++ return 0;
++
++ /* Update undefined addr width value */
++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
++ dwc->dma_sconfig.dst_addr_width = reg_width;
++ else /* DMA_DEV_TO_MEM */
++ dwc->dma_sconfig.src_addr_width = reg_width;
++
++ return 0;
++}
++
++static int dwc_verify_m_buswidth(struct dma_chan *chan)
++{
++ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
++ u32 reg_width, reg_burst, mem_width;
++
++ mem_width = dw->pdata->data_width[dwc->dws.m_master];
++
++ /*
++ * It's possible to have a data portion locked in the DMA FIFO in case
++ * of the channel suspension. Subsequent channel disabling will cause
++ * that data silent loss. In order to prevent that maintain the src and
++ * dst transfer widths coherency by means of the relation:
++ * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH)
++ * Look for the details in the commit message that brings this change.
++ *
++ * Note the DMA configs utilized in the calculations below must have
++ * been verified to have correct values by this method call.
++ */
++ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
++ reg_width = dwc->dma_sconfig.dst_addr_width;
++ if (mem_width < reg_width)
++ return -EINVAL;
++
++ dwc->dma_sconfig.src_addr_width = mem_width;
++ } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) {
++ reg_width = dwc->dma_sconfig.src_addr_width;
++ reg_burst = rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst);
++
++ dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst);
++ }
++
++ return 0;
++}
++
+ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+ {
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
++ int ret;
+
+ memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+
+ dwc->dma_sconfig.src_maxburst =
+- clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst);
++ clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst);
+ dwc->dma_sconfig.dst_maxburst =
+- clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
++ clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst);
++
++ ret = dwc_verify_p_buswidth(chan);
++ if (ret)
++ return ret;
++
++ ret = dwc_verify_m_buswidth(chan);
++ if (ret)
++ return ret;
+
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
+ dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
+diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+index a42a37634881b2..da91bc9a8e6f0e 100644
+--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
++++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+@@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
+ if (!dpaa2_chan->fd_pool)
+ goto err;
+
+- dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
+- sizeof(struct dpaa2_fl_entry),
+- sizeof(struct dpaa2_fl_entry), 0);
++ dpaa2_chan->fl_pool =
++ dma_pool_create("fl_pool", dev,
++ sizeof(struct dpaa2_fl_entry) * 3,
++ sizeof(struct dpaa2_fl_entry), 0);
++
+ if (!dpaa2_chan->fl_pool)
+ goto err_fd;
+
+ dpaa2_chan->sdd_pool =
+ dma_pool_create("sdd_pool", dev,
+- sizeof(struct dpaa2_qdma_sd_d),
++ sizeof(struct dpaa2_qdma_sd_d) * 2,
+ sizeof(struct dpaa2_qdma_sd_d), 0);
+ if (!dpaa2_chan->sdd_pool)
+ goto err_fl;
+diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
+index 6a3abe5b17908d..53fdfd32a7e772 100644
+--- a/drivers/dma/fsl-edma-common.c
++++ b/drivers/dma/fsl-edma-common.c
+@@ -3,6 +3,7 @@
+ // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
+ // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+
++#include <linux/clk.h>
+ #include <linux/dmapool.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -74,18 +75,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
+
+ flags = fsl_edma_drvflags(fsl_chan);
+ val = edma_readl_chreg(fsl_chan, ch_sbr);
+- /* Remote/local swapped wrongly on iMX8 QM Audio edma */
+- if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
+- if (!fsl_chan->is_rxchan)
+- val |= EDMA_V3_CH_SBR_RD;
+- else
+- val |= EDMA_V3_CH_SBR_WR;
+- } else {
+- if (fsl_chan->is_rxchan)
+- val |= EDMA_V3_CH_SBR_RD;
+- else
+- val |= EDMA_V3_CH_SBR_WR;
+- }
++ if (fsl_chan->is_rxchan)
++ val |= EDMA_V3_CH_SBR_RD;
++ else
++ val |= EDMA_V3_CH_SBR_WR;
+
+ if (fsl_chan->is_remote)
+ val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
+@@ -97,8 +90,8 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
+ * ch_mux: With the exception of 0, attempts to write a value
+ * already in use will be forced to 0.
+ */
+- if (!edma_readl_chreg(fsl_chan, ch_mux))
+- edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
++ if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
++ edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
+ }
+
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+@@ -134,7 +127,7 @@ static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
+ flags = fsl_edma_drvflags(fsl_chan);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+- edma_writel_chreg(fsl_chan, 0, ch_mux);
++ edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
+
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+@@ -503,7 +496,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
+ if (fsl_chan->is_multi_fifo) {
+ /* set mloff to support multiple fifo */
+ burst = cfg->direction == DMA_DEV_TO_MEM ?
+- cfg->src_addr_width : cfg->dst_addr_width;
++ cfg->src_maxburst : cfg->dst_maxburst;
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
+ /* enable DMLOE/SMLOE */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+@@ -754,6 +747,8 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
+ fsl_desc->iscyclic = false;
+
+ fsl_chan->is_sw = true;
++ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
++ fsl_chan->is_remote = true;
+
+ /* To match with copy_align and max_seg_size so 1 tcd is enough */
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
+@@ -802,6 +797,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
+ {
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
++ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
++ clk_prepare_enable(fsl_chan->clk);
++
+ fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+ sizeof(struct fsl_edma_hw_tcd),
+ 32, 0);
+@@ -828,6 +826,10 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
+ dma_pool_destroy(fsl_chan->tcd_pool);
+ fsl_chan->tcd_pool = NULL;
+ fsl_chan->is_sw = false;
++ fsl_chan->srcid = 0;
++ fsl_chan->is_remote = false;
++ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
++ clk_disable_unprepare(fsl_chan->clk);
+ }
+
+ void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
+index 40d50cc3d75a34..6028389de408b0 100644
+--- a/drivers/dma/fsl-edma-common.h
++++ b/drivers/dma/fsl-edma-common.h
+@@ -30,8 +30,9 @@
+ #define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
+ #define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
+
+-#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
+-#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
++#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
++#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
++#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
+
+ #define EDMA_TCD_CSR_START BIT(0)
+ #define EDMA_TCD_CSR_INT_MAJOR BIT(1)
+@@ -145,6 +146,7 @@ struct fsl_edma_chan {
+ enum dma_data_direction dma_dir;
+ char chan_name[32];
+ struct fsl_edma_hw_tcd __iomem *tcd;
++ void __iomem *mux_addr;
+ u32 real_count;
+ struct work_struct issue_worker;
+ struct platform_device *pdev;
+@@ -176,8 +178,7 @@ struct fsl_edma_desc {
+ #define FSL_EDMA_DRV_HAS_PD BIT(5)
+ #define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
+ #define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
+-/* imx8 QM audio edma remote local swapped */
+-#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
++#define FSL_EDMA_DRV_MEM_REMOTE BIT(8)
+ /* control and status register is in tcd address space, edma3 reg layout */
+ #define FSL_EDMA_DRV_SPLIT_REG BIT(9)
+ #define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
+@@ -206,6 +207,8 @@ struct fsl_edma_drvdata {
+ u32 chreg_off;
+ u32 chreg_space_sz;
+ u32 flags;
++ u32 mux_off; /* channel mux register offset */
++ u32 mux_skip; /* how much skip for each channel */
+ int (*setup_irq)(struct platform_device *pdev,
+ struct fsl_edma_engine *fsl_edma);
+ };
+diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
+index 8c4ed7012e232e..8a0ae90548997c 100644
+--- a/drivers/dma/fsl-edma-main.c
++++ b/drivers/dma/fsl-edma-main.c
+@@ -9,6 +9,8 @@
+ * Vybrid and Layerscape SoCs.
+ */
+
++#include <dt-bindings/dma/fsl-edma.h>
++#include <linux/bitfield.h>
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/clk.h>
+@@ -23,10 +25,6 @@
+
+ #include "fsl-edma-common.h"
+
+-#define ARGS_RX BIT(0)
+-#define ARGS_REMOTE BIT(1)
+-#define ARGS_MULTI_FIFO BIT(2)
+-
+ static void fsl_edma_synchronize(struct dma_chan *chan)
+ {
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+@@ -155,9 +153,15 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
+ i = fsl_chan - fsl_edma->chans;
+
+ fsl_chan->priority = dma_spec->args[1];
+- fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
+- fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
+- fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
++ fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
++ fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
++ fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
++
++ if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1))
++ continue;
++
++ if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1))
++ continue;
+
+ if (!b_chmux && i == dma_spec->args[0]) {
+ chan = dma_get_slave_channel(chan);
+@@ -336,16 +340,19 @@ static struct fsl_edma_drvdata imx7ulp_data = {
+ };
+
+ static struct fsl_edma_drvdata imx8qm_data = {
+- .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
++ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+ };
+
+-static struct fsl_edma_drvdata imx8qm_audio_data = {
+- .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
++static struct fsl_edma_drvdata imx8ulp_data = {
++ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK |
++ FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
++ .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
++ .mux_skip = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+ };
+
+@@ -360,6 +367,8 @@ static struct fsl_edma_drvdata imx93_data4 = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
+ .chreg_space_sz = 0x8000,
+ .chreg_off = 0x10000,
++ .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
++ .mux_skip = 0x8000,
+ .setup_irq = fsl_edma3_irq_init,
+ };
+
+@@ -368,7 +377,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
+ { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
+ { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
+ { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
+- { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
++ { .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data},
+ { .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
+ { .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
+ { /* sentinel */ }
+@@ -400,9 +409,8 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
+ link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+- if (IS_ERR(link)) {
+- dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
+- PTR_ERR(link));
++ if (!link) {
++ dev_err(dev, "Failed to add device_link to %d\n", i);
+ return -EINVAL;
+ }
+
+@@ -424,6 +432,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
+ struct fsl_edma_engine *fsl_edma;
+ const struct fsl_edma_drvdata *drvdata = NULL;
+ u32 chan_mask[2] = {0, 0};
++ char clk_name[36];
+ struct edma_regs *regs;
+ int chans;
+ int ret, i;
+@@ -537,12 +546,23 @@ static int fsl_edma_probe(struct platform_device *pdev)
+ offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
+ fsl_chan->tcd = fsl_edma->membase
+ + i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
++ fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
++
++ if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
++ snprintf(clk_name, sizeof(clk_name), "ch%02d", i);
++ fsl_chan->clk = devm_clk_get_enabled(&pdev->dev,
++ (const char *)clk_name);
+
++ if (IS_ERR(fsl_chan->clk))
++ return PTR_ERR(fsl_chan->clk);
++ }
+ fsl_chan->pdev = pdev;
+ vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
+
+ edma_write_tcdreg(fsl_chan, 0, csr);
+ fsl_edma_chan_mux(fsl_chan, 0, false);
++ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK)
++ clk_disable_unprepare(fsl_chan->clk);
+ }
+
+ ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
+@@ -587,7 +607,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
+ DMAENGINE_ALIGN_32_BYTES;
+
+ /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
+- dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
++ dma_set_max_seg_size(fsl_edma->dma_dev.dev,
++ FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
+
+ fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+@@ -640,6 +661,8 @@ static int fsl_edma_suspend_late(struct device *dev)
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
++ if (fsl_edma->chan_masked & BIT(i))
++ continue;
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ /* Make sure chan is idle or will force disable. */
+ if (unlikely(!fsl_chan->idle)) {
+@@ -664,13 +687,16 @@ static int fsl_edma_resume_early(struct device *dev)
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
++ if (fsl_edma->chan_masked & BIT(i))
++ continue;
+ fsl_chan->pm_state = RUNNING;
+ edma_write_tcdreg(fsl_chan, 0, csr);
+ if (fsl_chan->slave_id != 0)
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
+ }
+
+- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
++ if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
++ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+
+ return 0;
+ }
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index a8cc8a4bc6102c..b7a2254b0de475 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -109,6 +109,7 @@
+ #define FSL_QDMA_CMD_WTHROTL_OFFSET 20
+ #define FSL_QDMA_CMD_DSEN_OFFSET 19
+ #define FSL_QDMA_CMD_LWC_OFFSET 16
++#define FSL_QDMA_CMD_PF BIT(17)
+
+ /* Field definition for Descriptor status */
+ #define QDMA_CCDF_STATUS_RTE BIT(5)
+@@ -384,7 +385,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
+ qdma_csgf_set_f(csgf_dest, len);
+ /* Descriptor Buffer */
+ cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
+- FSL_QDMA_CMD_RWTTYPE_OFFSET);
++ FSL_QDMA_CMD_RWTTYPE_OFFSET) |
++ FSL_QDMA_CMD_PF;
+ sdf->data = QDMA_SDDF_CMD(cmd);
+
+ cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
+@@ -514,11 +516,11 @@ static struct fsl_qdma_queue
+ queue_temp = queue_head + i + (j * queue_num);
+
+ queue_temp->cq =
+- dma_alloc_coherent(&pdev->dev,
+- sizeof(struct fsl_qdma_format) *
+- queue_size[i],
+- &queue_temp->bus_addr,
+- GFP_KERNEL);
++ dmam_alloc_coherent(&pdev->dev,
++ sizeof(struct fsl_qdma_format) *
++ queue_size[i],
++ &queue_temp->bus_addr,
++ GFP_KERNEL);
+ if (!queue_temp->cq)
+ return NULL;
+ queue_temp->block_base = fsl_qdma->block_base +
+@@ -563,11 +565,11 @@ static struct fsl_qdma_queue
+ /*
+ * Buffer for queue command
+ */
+- status_head->cq = dma_alloc_coherent(&pdev->dev,
+- sizeof(struct fsl_qdma_format) *
+- status_size,
+- &status_head->bus_addr,
+- GFP_KERNEL);
++ status_head->cq = dmam_alloc_coherent(&pdev->dev,
++ sizeof(struct fsl_qdma_format) *
++ status_size,
++ &status_head->bus_addr,
++ GFP_KERNEL);
+ if (!status_head->cq) {
+ devm_kfree(&pdev->dev, status_head);
+ return NULL;
+@@ -805,7 +807,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
+ int i;
+ int cpu;
+ int ret;
+- char irq_name[20];
++ char irq_name[32];
+
+ fsl_qdma->error_irq =
+ platform_get_irq_byname(pdev, "qdma-error");
+@@ -1197,10 +1199,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
+ if (!fsl_qdma->queue)
+ return -ENOMEM;
+
+- ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+- if (ret)
+- return ret;
+-
+ fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+ if (fsl_qdma->irq_base < 0)
+ return fsl_qdma->irq_base;
+@@ -1239,16 +1237,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, fsl_qdma);
+
+- ret = dma_async_device_register(&fsl_qdma->dma_dev);
++ ret = fsl_qdma_reg_init(fsl_qdma);
+ if (ret) {
+- dev_err(&pdev->dev,
+- "Can't register NXP Layerscape qDMA engine.\n");
++ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ return ret;
+ }
+
+- ret = fsl_qdma_reg_init(fsl_qdma);
++ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
++ if (ret)
++ return ret;
++
++ ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ if (ret) {
+- dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
++ dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
+ return ret;
+ }
+
+@@ -1268,8 +1269,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
+
+ static int fsl_qdma_remove(struct platform_device *pdev)
+ {
+- int i;
+- struct fsl_qdma_queue *status;
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+
+@@ -1278,11 +1277,6 @@ static int fsl_qdma_remove(struct platform_device *pdev)
+ of_dma_controller_free(np);
+ dma_async_device_unregister(&fsl_qdma->dma_dev);
+
+- for (i = 0; i < fsl_qdma->block_number; i++) {
+- status = fsl_qdma->status[i];
+- dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+- status->n_cq, status->cq, status->bus_addr);
+- }
+ return 0;
+ }
+
+diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
+index 0ac634a51c5e35..6fa797fd85017d 100644
+--- a/drivers/dma/idma64.c
++++ b/drivers/dma/idma64.c
+@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
+ u32 status_err;
+ unsigned short i;
+
++ /* Since IRQ may be shared, check if DMA controller is powered on */
++ if (status == GENMASK(31, 0))
++ return IRQ_NONE;
++
+ dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
+
+ /* Check if we have any interrupt from the DMA controller */
+@@ -594,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip)
+
+ idma64->dma.dev = chip->sysdev;
+
+- dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
++ ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
++ if (ret)
++ return ret;
+
+ ret = dma_async_device_register(&idma64->dma);
+ if (ret)
+diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
+index dc096839ac6374..c5e679070e4633 100644
+--- a/drivers/dma/idxd/Makefile
++++ b/drivers/dma/idxd/Makefile
+@@ -1,12 +1,12 @@
+ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+
++obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
++idxd_bus-y := bus.o
++
+ obj-$(CONFIG_INTEL_IDXD) += idxd.o
+ idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
+
+ idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
+
+-obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
+-idxd_bus-y := bus.o
+-
+ obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
+ idxd_compat-y := compat.o
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index d32deb9b4e3dee..c18633ad8455fa 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -342,10 +342,10 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
+ if (!evl)
+ return;
+
+- spin_lock(&evl->lock);
++ mutex_lock(&evl->lock);
+ status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ t = status.tail;
+- h = evl->head;
++ h = status.head;
+ size = evl->size;
+
+ while (h != t) {
+@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
+ set_bit(h, evl->bmap);
+ h = (h + 1) % size;
+ }
+- spin_unlock(&evl->lock);
+-
+ drain_workqueue(wq->wq);
++ mutex_unlock(&evl->lock);
+ }
+
+ static int idxd_cdev_release(struct inode *node, struct file *filep)
+@@ -401,6 +400,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+ int rc;
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
++
++ /*
++ * Due to an erratum in some of the devices supported by the driver,
++ * direct user submission to the device can be unsafe.
++ * (See the INTEL-SA-01084 security advisory)
++ *
++ * For the devices that exhibit this behavior, require that the user
++ * has CAP_SYS_RAWIO capabilities.
++ */
++ if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
+ rc = check_vma(wq, vma, __func__);
+ if (rc < 0)
+ return rc;
+@@ -415,6 +426,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+ vma->vm_page_prot);
+ }
+
++static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
++ struct dsa_hw_desc __user *udesc)
++{
++ struct idxd_wq *wq = ctx->wq;
++ struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
++ const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
++ void __iomem *portal = idxd_wq_portal_addr(wq);
++ struct dsa_hw_desc descriptor __aligned(64);
++ int rc;
++
++ rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
++ if (rc)
++ return -EFAULT;
++
++ /*
++ * DSA devices are capable of indirect ("batch") command submission.
++ * On devices where direct user submissions are not safe, we cannot
++ * allow this since there is no good way for us to verify these
++ * indirect commands.
++ */
++ if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
++ !wq->idxd->user_submission_safe)
++ return -EINVAL;
++ /*
++ * As per the programming specification, the completion address must be
++ * aligned to 32 or 64 bytes. If this is violated the hardware
++ * engine can get very confused (security issue).
++ */
++ if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
++ return -EINVAL;
++
++ if (wq_dedicated(wq))
++ iosubmit_cmds512(portal, &descriptor, 1);
++ else {
++ descriptor.priv = 0;
++ descriptor.pasid = ctx->pasid;
++ rc = idxd_enqcmds(wq, portal, &descriptor);
++ if (rc < 0)
++ return rc;
++ }
++
++ return 0;
++}
++
++static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
++ loff_t *unused)
++{
++ struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
++ struct idxd_user_context *ctx = filp->private_data;
++ ssize_t written = 0;
++ int i;
++
++ for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
++ int rc = idxd_submit_user_descriptor(ctx, udesc + i);
++
++ if (rc)
++ return written ? written : rc;
++
++ written += sizeof(struct dsa_hw_desc);
++ }
++
++ return written;
++}
++
+ static __poll_t idxd_cdev_poll(struct file *filp,
+ struct poll_table_struct *wait)
+ {
+@@ -437,6 +512,7 @@ static const struct file_operations idxd_cdev_fops = {
+ .open = idxd_cdev_open,
+ .release = idxd_cdev_release,
+ .mmap = idxd_cdev_mmap,
++ .write = idxd_cdev_write,
+ .poll = idxd_cdev_poll,
+ };
+
+@@ -501,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ struct idxd_cdev *idxd_cdev;
+
+ idxd_cdev = wq->idxd_cdev;
+- ida_destroy(&file_ida);
+ wq->idxd_cdev = NULL;
+ cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
+ put_device(cdev_dev(idxd_cdev));
+diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
+index 9cfbd9b14c4c43..ad4245cb301d50 100644
+--- a/drivers/dma/idxd/debugfs.c
++++ b/drivers/dma/idxd/debugfs.c
+@@ -66,11 +66,11 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
+ if (!evl || !evl->log)
+ return 0;
+
+- spin_lock(&evl->lock);
++ mutex_lock(&evl->lock);
+
+- h = evl->head;
+ evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ t = evl_status.tail;
++ h = evl_status.head;
+ evl_size = evl->size;
+
+ seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
+@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
+ dump_event_entry(idxd, s, i, &count, processed);
+ }
+
+- spin_unlock(&evl->lock);
++ mutex_unlock(&evl->lock);
+ return 0;
+ }
+
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 8f754f922217de..542d340552dd71 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -770,7 +770,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
+ goto err_alloc;
+ }
+
+- spin_lock(&evl->lock);
++ mutex_lock(&evl->lock);
+ evl->log = addr;
+ evl->dma = dma_addr;
+ evl->log_size = size;
+@@ -791,7 +791,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
+ gencfg.evl_en = 1;
+ iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+
+- spin_unlock(&evl->lock);
++ mutex_unlock(&evl->lock);
+ return 0;
+
+ err_alloc:
+@@ -802,6 +802,9 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
+
+ static void idxd_device_evl_free(struct idxd_device *idxd)
+ {
++ void *evl_log;
++ unsigned int evl_log_size;
++ dma_addr_t evl_dma;
+ union gencfg_reg gencfg;
+ union genctrl_reg genctrl;
+ struct device *dev = &idxd->pdev->dev;
+@@ -811,7 +814,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+ if (!gencfg.evl_en)
+ return;
+
+- spin_lock(&evl->lock);
++ mutex_lock(&evl->lock);
+ gencfg.evl_en = 0;
+ iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+
+@@ -822,11 +825,15 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
+ iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
+ iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
+
+- dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
+ bitmap_free(evl->bmap);
++ evl_log = evl->log;
++ evl_log_size = evl->log_size;
++ evl_dma = evl->dma;
+ evl->log = NULL;
+ evl->size = IDXD_EVL_SIZE_MIN;
+- spin_unlock(&evl->lock);
++ mutex_unlock(&evl->lock);
++
++ dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
+ }
+
+ static void idxd_group_config_write(struct idxd_group *group)
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index e269ca1f486255..bea10c5cdb76bb 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -275,18 +275,18 @@ struct idxd_driver_data {
+ int evl_cr_off;
+ int cr_status_off;
+ int cr_result_off;
++ bool user_submission_safe;
+ };
+
+ struct idxd_evl {
+ /* Lock to protect event log access. */
+- spinlock_t lock;
++ struct mutex lock;
+ void *log;
+ dma_addr_t dma;
+ /* Total size of event log = number of entries * entry size. */
+ unsigned int log_size;
+ /* The number of entries in the event log. */
+ u16 size;
+- u16 head;
+ unsigned long *bmap;
+ bool batch_fail[IDXD_MAX_BATCH_IDENT];
+ };
+@@ -361,6 +361,8 @@ struct idxd_device {
+
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_evl_file;
++
++ bool user_submission_safe;
+ };
+
+ static inline unsigned int evl_ent_size(struct idxd_device *idxd)
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 0eb1c827a215f9..786afb256b6e0d 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -47,6 +47,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
+ .align = 32,
+ .dev_type = &dsa_device_type,
+ .evl_cr_off = offsetof(struct dsa_evl_entry, cr),
++ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
+ .cr_status_off = offsetof(struct dsa_completion_record, status),
+ .cr_result_off = offsetof(struct dsa_completion_record, result),
+ },
+@@ -57,6 +58,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
+ .align = 64,
+ .dev_type = &iax_device_type,
+ .evl_cr_off = offsetof(struct iax_evl_entry, cr),
++ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
+ .cr_status_off = offsetof(struct iax_completion_record, status),
+ .cr_result_off = offsetof(struct iax_completion_record, error_code),
+ },
+@@ -342,7 +344,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
+ static int idxd_init_evl(struct idxd_device *idxd)
+ {
+ struct device *dev = &idxd->pdev->dev;
++ unsigned int evl_cache_size;
+ struct idxd_evl *evl;
++ const char *idxd_name;
+
+ if (idxd->hw.gen_cap.evl_support == 0)
+ return 0;
+@@ -351,12 +355,19 @@ static int idxd_init_evl(struct idxd_device *idxd)
+ if (!evl)
+ return -ENOMEM;
+
+- spin_lock_init(&evl->lock);
++ mutex_init(&evl->lock);
+ evl->size = IDXD_EVL_SIZE_MIN;
+
+- idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
+- sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
+- 0, 0, NULL);
++ idxd_name = dev_name(idxd_confdev(idxd));
++ evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
++ /*
++ * Since completion record in evl_cache will be copied to user
++ * when handling completion record page fault, need to create
++ * the cache suitable for user copy.
++ */
++ idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
++ 0, 0, 0, evl_cache_size,
++ NULL);
+ if (!idxd->evl_cache) {
+ kfree(evl);
+ return -ENOMEM;
+@@ -758,6 +769,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+ idxd->hw.version);
+
++ idxd->user_submission_safe = data->user_submission_safe;
++
+ return 0;
+
+ err_dev_register:
+diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
+index b501320a9c7ad0..7efc85b5bad9e9 100644
+--- a/drivers/dma/idxd/irq.c
++++ b/drivers/dma/idxd/irq.c
+@@ -363,13 +363,13 @@ static void process_evl_entries(struct idxd_device *idxd)
+ evl_status.bits = 0;
+ evl_status.int_pending = 1;
+
+- spin_lock(&evl->lock);
++ mutex_lock(&evl->lock);
+ /* Clear interrupt pending bit */
+ iowrite32(evl_status.bits_upper32,
+ idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
+- h = evl->head;
+ evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+ t = evl_status.tail;
++ h = evl_status.head;
+ size = idxd->evl->size;
+
+ while (h != t) {
+@@ -378,10 +378,9 @@ static void process_evl_entries(struct idxd_device *idxd)
+ h = (h + 1) % size;
+ }
+
+- evl->head = h;
+ evl_status.head = h;
+ iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
+- spin_unlock(&evl->lock);
++ mutex_unlock(&evl->lock);
+ }
+
+ irqreturn_t idxd_misc_thread(int vec, void *data)
+@@ -612,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
+
+ spin_unlock(&irq_entry->list_lock);
+
+- list_for_each_entry(desc, &flist, list) {
++ list_for_each_entry_safe(desc, n, &flist, list) {
+ /*
+ * Check against the original status as ABORT is software defined
+ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ */
++ list_del(&desc->list);
++
+ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+ continue;
+diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
+index fdda6d60426295..5e94247e1ea703 100644
+--- a/drivers/dma/idxd/perfmon.c
++++ b/drivers/dma/idxd/perfmon.c
+@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+-
+ /* migrate events if there is a valid target */
+- if (target < nr_cpu_ids)
++ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
+- else
+- target = -1;
+-
+- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
++ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
++ }
+
+ return 0;
+ }
+diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
+index 7b54a3939ea135..cfbcd1adb1d1c2 100644
+--- a/drivers/dma/idxd/registers.h
++++ b/drivers/dma/idxd/registers.h
+@@ -6,9 +6,6 @@
+ #include <uapi/linux/idxd.h>
+
+ /* PCI Config */
+-#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+-#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
+-
+ #define DEVICE_VERSION_1 0x100
+ #define DEVICE_VERSION_2 0x200
+
+diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
+index c01db23e3333f7..3f922518e3a525 100644
+--- a/drivers/dma/idxd/submit.c
++++ b/drivers/dma/idxd/submit.c
+@@ -182,13 +182,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+
+ portal = idxd_wq_portal_addr(wq);
+
+- /*
+- * The wmb() flushes writes to coherent DMA data before
+- * possibly triggering a DMA read. The wmb() is necessary
+- * even on UP because the recipient is a device.
+- */
+- wmb();
+-
+ /*
+ * Pending the descriptor to the lockless list for the irq_entry
+ * that we designated the descriptor to.
+@@ -199,6 +192,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+ llist_add(&desc->llnode, &ie->pending_llist);
+ }
+
++ /*
++ * The wmb() flushes writes to coherent DMA data before
++ * possibly triggering a DMA read. The wmb() is necessary
++ * even on UP because the recipient is a device.
++ */
++ wmb();
++
+ if (wq_dedicated(wq)) {
+ iosubmit_cmds512(portal, desc->hw, 1);
+ } else {
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 7caba90d85b31e..1fd5a93045f79e 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1197,12 +1197,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
+ static struct device_attribute dev_attr_wq_enqcmds_retries =
+ __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+
++static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
++{
++ ssize_t pos;
++ int i;
++
++ pos = 0;
++ for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
++ unsigned long val = opcap_bmap[i];
++
++ /* On systems where direct user submissions are not safe, we need to clear out
++ * the BATCH capability from the capability mask in sysfs since we cannot support
++ * that command on such systems.
++ */
++ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
++ clear_bit(DSA_OPCODE_BATCH % 64, &val);
++
++ pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
++ pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
++ }
++
++ return pos;
++}
++
+ static ssize_t wq_op_config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
++ return op_cap_show_common(dev, buf, wq->opcap_bmap);
+ }
+
+ static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
+@@ -1421,7 +1444,7 @@ static ssize_t op_cap_show(struct device *dev,
+ {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
++ return op_cap_show_common(dev, buf, idxd->opcap_bmap);
+ }
+ static DEVICE_ATTR_RO(op_cap);
+
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index 9c364e92cb828d..e8f45a7fded435 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -534,18 +534,6 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
+ return err;
+ }
+
+-static int ioat_register(struct ioatdma_device *ioat_dma)
+-{
+- int err = dma_async_device_register(&ioat_dma->dma_dev);
+-
+- if (err) {
+- ioat_disable_interrupts(ioat_dma);
+- dma_pool_destroy(ioat_dma->completion_pool);
+- }
+-
+- return err;
+-}
+-
+ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+ {
+ struct dma_device *dma = &ioat_dma->dma_dev;
+@@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+ ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+ }
+
+- err = ioat_register(ioat_dma);
++ err = dma_async_device_register(&ioat_dma->dma_dev);
+ if (err)
+- return err;
++ goto err_disable_interrupts;
+
+ ioat_kobject_add(ioat_dma, &ioat_ktype);
+
+@@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+
+ /* disable relaxed ordering */
+ err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
+- if (err)
+- return pcibios_err_to_errno(err);
++ if (err) {
++ err = pcibios_err_to_errno(err);
++ goto err_disable_interrupts;
++ }
+
+ /* clear relaxed ordering enable */
+ val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
+- if (err)
+- return pcibios_err_to_errno(err);
++ if (err) {
++ err = pcibios_err_to_errno(err);
++ goto err_disable_interrupts;
++ }
+
+ if (ioat_dma->cap & IOAT_CAP_DPS)
+ writeb(ioat_pending_level + 1,
+ ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
+
+ return 0;
++
++err_disable_interrupts:
++ ioat_disable_interrupts(ioat_dma);
++ dma_pool_destroy(ioat_dma->completion_pool);
++ return err;
+ }
+
+ static void ioat_shutdown(struct pci_dev *pdev)
+@@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct ioatdma_device *device;
++ unsigned int i;
++ u8 version;
+ int err;
+
+ err = pcim_enable_device(pdev);
+@@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (!iomap)
+ return -ENOMEM;
+
++ version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
++ if (version < IOAT_VER_3_0)
++ return -ENODEV;
++
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ return err;
+@@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, device);
+
+- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
++ device->version = version;
+ if (device->version >= IOAT_VER_3_4)
+ ioat_dca_enabled = 0;
+- if (device->version >= IOAT_VER_3_0) {
+- if (is_skx_ioat(pdev))
+- device->version = IOAT_VER_3_2;
+- err = ioat3_dma_probe(device, ioat_dca_enabled);
+- } else
+- return -ENODEV;
+
++ if (is_skx_ioat(pdev))
++ device->version = IOAT_VER_3_2;
++
++ err = ioat3_dma_probe(device, ioat_dca_enabled);
+ if (err) {
++ for (i = 0; i < IOAT_MAX_CHANS; i++)
++ kfree(device->idx[i]);
++ kfree(device);
+ dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+ return -ENODEV;
+ }
+@@ -1445,6 +1449,7 @@ module_init(ioat_init_module);
+ static void __exit ioat_exit_module(void)
+ {
+ pci_unregister_driver(&ioat_pci_driver);
++ kmem_cache_destroy(ioat_sed_cache);
+ kmem_cache_destroy(ioat_cache);
+ }
+ module_exit(ioat_exit_module);
+diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
+index 384476757c5e3a..3bcf73ef69dc7f 100644
+--- a/drivers/dma/owl-dma.c
++++ b/drivers/dma/owl-dma.c
+@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
+ else
+ regval &= ~val;
+
+- writel(val, pchan->base + reg);
++ writel(regval, pchan->base + reg);
+ }
+
+ static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
+@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
+ else
+ regval &= ~val;
+
+- writel(val, od->base + reg);
++ writel(regval, od->base + reg);
+ }
+
+ static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
+diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
+index 1aa65e5de0f3ad..f792407348077d 100644
+--- a/drivers/dma/ptdma/ptdma-dmaengine.c
++++ b/drivers/dma/ptdma/ptdma-dmaengine.c
+@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+
+- dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
+-
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
+index 1b046d9a3a269a..16d342654da2bf 100644
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -722,7 +722,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
+ dma_addr_t dma;
+ struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
+
+- BUG_ON(sw_desc->nb_desc == 0);
+ for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
+ if (i > 0)
+ dma = sw_desc->hw_desc[i - 1]->ddadr;
+diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
+index 9c121a4b33ad82..f97d80343aea42 100644
+--- a/drivers/dma/sh/shdma.h
++++ b/drivers/dma/sh/shdma.h
+@@ -25,7 +25,7 @@ struct sh_dmae_chan {
+ const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ void __iomem *base;
+- char dev_id[16]; /* unique name per DMAC of channel */
++ char dev_id[32]; /* unique name per DMAC of channel */
+ int pm_error;
+ dma_addr_t slave_addr;
+ };
+diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
+index 0b30151fb45c4c..9840594a6aaa1f 100644
+--- a/drivers/dma/stm32-dma.c
++++ b/drivers/dma/stm32-dma.c
+@@ -1249,8 +1249,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
+ enum dma_slave_buswidth max_width;
+ struct stm32_dma_desc *desc;
+ size_t xfer_count, offset;
+- u32 num_sgs, best_burst, dma_burst, threshold;
+- int i;
++ u32 num_sgs, best_burst, threshold;
++ int dma_burst, i;
+
+ num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
+ desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
+@@ -1268,6 +1268,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
+ best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
+ threshold, max_width);
+ dma_burst = stm32_dma_get_burst(chan, best_burst);
++ if (dma_burst < 0) {
++ kfree(desc);
++ return NULL;
++ }
+
+ stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+ desc->sg_req[i].chan_reg.dma_scr =
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index bae08b3f55c73f..f414efdbd809e1 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -489,7 +489,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
+ src_maxburst = chan->dma_config.src_maxburst;
+ dst_maxburst = chan->dma_config.dst_maxburst;
+
+- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+
+@@ -965,7 +965,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+ if (!desc)
+ return NULL;
+
+- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
+index 33b10100110096..674cf630528383 100644
+--- a/drivers/dma/tegra186-gpc-dma.c
++++ b/drivers/dma/tegra186-gpc-dma.c
+@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
+ bytes_xfer = dma_desc->bytes_xfer +
+ sg_req[dma_desc->sg_idx].len - (wcount * 4);
+
++ if (dma_desc->bytes_req == bytes_xfer)
++ return 0;
++
+ residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
+
+ return residual;
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index aa8e2e8ac26098..155c409d2b434d 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2401,9 +2401,14 @@ static int edma_probe(struct platform_device *pdev)
+ if (irq < 0 && node)
+ irq = irq_of_parse_and_map(node, 0);
+
+- if (irq >= 0) {
++ if (irq > 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+ dev_name(dev));
++ if (!irq_name) {
++ ret = -ENOMEM;
++ goto err_disable_pm;
++ }
++
+ ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+ ecc);
+ if (ret) {
+@@ -2417,9 +2422,14 @@ static int edma_probe(struct platform_device *pdev)
+ if (irq < 0 && node)
+ irq = irq_of_parse_and_map(node, 2);
+
+- if (irq >= 0) {
++ if (irq > 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+ dev_name(dev));
++ if (!irq_name) {
++ ret = -ENOMEM;
++ goto err_disable_pm;
++ }
++
+ ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+ ecc);
+ if (ret) {
+diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c
+index 2b6fd6e37c6107..1272b1541f61e2 100644
+--- a/drivers/dma/ti/k3-psil-am62.c
++++ b/drivers/dma/ti/k3-psil-am62.c
+@@ -74,7 +74,9 @@ static struct psil_ep am62_src_ep_map[] = {
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+- /* PDMA_MAIN0 - SPI0-3 */
++ /* PDMA_MAIN0 - SPI0-2 */
++ PSIL_PDMA_XY_PKT(0x4300),
++ PSIL_PDMA_XY_PKT(0x4301),
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+@@ -85,8 +87,6 @@ static struct psil_ep am62_src_ep_map[] = {
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+- PSIL_PDMA_XY_PKT(0x430c),
+- PSIL_PDMA_XY_PKT(0x430d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+@@ -141,7 +141,9 @@ static struct psil_ep am62_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+- /* PDMA_MAIN0 - SPI0-3 */
++ /* PDMA_MAIN0 - SPI0-2 */
++ PSIL_PDMA_XY_PKT(0xc300),
++ PSIL_PDMA_XY_PKT(0xc301),
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+@@ -152,8 +154,6 @@ static struct psil_ep am62_dst_ep_map[] = {
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+- PSIL_PDMA_XY_PKT(0xc30c),
+- PSIL_PDMA_XY_PKT(0xc30d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c
+index ca9d71f914220a..4cf9123b0e9326 100644
+--- a/drivers/dma/ti/k3-psil-am62a.c
++++ b/drivers/dma/ti/k3-psil-am62a.c
+@@ -84,7 +84,9 @@ static struct psil_ep am62a_src_ep_map[] = {
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+- /* PDMA_MAIN0 - SPI0-3 */
++ /* PDMA_MAIN0 - SPI0-2 */
++ PSIL_PDMA_XY_PKT(0x4300),
++ PSIL_PDMA_XY_PKT(0x4301),
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+@@ -95,8 +97,6 @@ static struct psil_ep am62a_src_ep_map[] = {
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+- PSIL_PDMA_XY_PKT(0x430c),
+- PSIL_PDMA_XY_PKT(0x430d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+@@ -151,7 +151,9 @@ static struct psil_ep am62a_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+- /* PDMA_MAIN0 - SPI0-3 */
++ /* PDMA_MAIN0 - SPI0-2 */
++ PSIL_PDMA_XY_PKT(0xc300),
++ PSIL_PDMA_XY_PKT(0xc301),
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+@@ -162,8 +164,6 @@ static struct psil_ep am62a_dst_ep_map[] = {
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+- PSIL_PDMA_XY_PKT(0xc30c),
+- PSIL_PDMA_XY_PKT(0xc30d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index 30fd2f386f36a1..02a1ab04f498e5 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -3968,6 +3968,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ {
+ struct udma_chan *uc = to_udma_chan(&vc->chan);
+ struct udma_desc *d;
++ u8 status;
+
+ if (!vd)
+ return;
+@@ -3977,12 +3978,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ if (d->metadata_size)
+ udma_fetch_epib(uc, d);
+
+- /* Provide residue information for the client */
+ if (result) {
+ void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+
+ if (cppi5_desc_get_type(desc_vaddr) ==
+ CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
++ /* Provide residue information for the client */
+ result->residue = d->residue -
+ cppi5_hdesc_get_pktlen(desc_vaddr);
+ if (result->residue)
+@@ -3991,7 +3992,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ result->result = DMA_TRANS_NOERROR;
+ } else {
+ result->residue = 0;
+- result->result = DMA_TRANS_NOERROR;
++ /* Propagate TR Response errors to the client */
++ status = d->hwdesc[0].tr_resp_base->status;
++ if (status)
++ result->result = DMA_TRANS_ABORTED;
++ else
++ result->result = DMA_TRANS_NOERROR;
+ }
+ }
+ }
+@@ -4464,7 +4470,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+ ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
+ break;
+ case DMA_TYPE_BCDMA:
+- ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
++ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
++ BCDMA_CAP3_HBCHAN_CNT(cap3) +
++ BCDMA_CAP3_UBCHAN_CNT(cap3);
+ ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
+ ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
+ ud->rflow_cnt = ud->rchan_cnt;
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
+index 84dc5240a8074a..93938ed80fc837 100644
+--- a/drivers/dma/xilinx/xilinx_dpdma.c
++++ b/drivers/dma/xilinx/xilinx_dpdma.c
+@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
+ * @running: true if the channel is running
+ * @first_frame: flag for the first frame of stream
+ * @video_group: flag if multi-channel operation is needed for video channels
+- * @lock: lock to access struct xilinx_dpdma_chan
++ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
++ * @vchan.lock, if both are to be held.
+ * @desc_pool: descriptor allocation pool
+ * @err_task: error IRQ bottom half handler
+ * @desc: References to descriptors being processed
+@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
+ * Complete the active descriptor, if any, promote the pending
+ * descriptor to active, and queue the next transfer, if any.
+ */
++ spin_lock(&chan->vchan.lock);
+ if (chan->desc.active)
+ vchan_cookie_complete(&chan->desc.active->vdesc);
+ chan->desc.active = pending;
+ chan->desc.pending = NULL;
+
+ xilinx_dpdma_chan_queue_transfer(chan);
++ spin_unlock(&chan->vchan.lock);
+
+ out:
+ spin_unlock_irqrestore(&chan->lock, flags);
+@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+- spin_lock_irqsave(&chan->vchan.lock, flags);
++ spin_lock_irqsave(&chan->lock, flags);
++ spin_lock(&chan->vchan.lock);
+ if (vchan_issue_pending(&chan->vchan))
+ xilinx_dpdma_chan_queue_transfer(chan);
+- spin_unlock_irqrestore(&chan->vchan.lock, flags);
++ spin_unlock(&chan->vchan.lock);
++ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+ static int xilinx_dpdma_config(struct dma_chan *dchan,
+@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
+ XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
+
+ spin_lock_irqsave(&chan->lock, flags);
++ spin_lock(&chan->vchan.lock);
+ xilinx_dpdma_chan_queue_transfer(chan);
++ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
+index 61945d3113cc32..446364264e2b18 100644
+--- a/drivers/edac/Makefile
++++ b/drivers/edac/Makefile
+@@ -54,11 +54,13 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o
+ layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o
+ obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o
+
+-skx_edac-y := skx_common.o skx_base.o
+-obj-$(CONFIG_EDAC_SKX) += skx_edac.o
++skx_edac_common-y := skx_common.o
+
+-i10nm_edac-y := skx_common.o i10nm_base.o
+-obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o
++skx_edac-y := skx_base.o
++obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
++
++i10nm_edac-y := i10nm_base.o
++obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
+
+ obj-$(CONFIG_EDAC_CELL) += cell_edac.o
+ obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 9b6642d0087130..b61c7f02a8c17c 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -80,7 +80,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ amd64_warn("%s: error reading F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+- return err;
++ return pcibios_err_to_errno(err);
+ }
+
+ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+@@ -93,7 +93,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ amd64_warn("%s: error writing to F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+- return err;
++ return pcibios_err_to_errno(err);
+ }
+
+ /*
+@@ -1016,8 +1016,10 @@ static int gpu_get_node_map(void)
+ }
+
+ ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
+- if (ret)
++ if (ret) {
++ ret = pcibios_err_to_errno(ret);
+ goto out;
++ }
+
+ gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
+ gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
+diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
+index 1a18693294db48..a0edb61a5a01ac 100644
+--- a/drivers/edac/igen6_edac.c
++++ b/drivers/edac/igen6_edac.c
+@@ -245,7 +245,7 @@ static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc)
+ if (igen6_tom <= _4GB)
+ return eaddr + igen6_tolud - _4GB;
+
+- if (eaddr < _4GB)
++ if (eaddr >= igen6_tom)
+ return eaddr + igen6_tolud - igen6_tom;
+
+ return eaddr;
+@@ -627,7 +627,7 @@ static int errcmd_enable_error_reporting(bool enable)
+
+ rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
+ if (rc)
+- return rc;
++ return pcibios_err_to_errno(rc);
+
+ if (enable)
+ errcmd |= ERRCMD_CE | ERRSTS_UE;
+@@ -636,7 +636,7 @@ static int errcmd_enable_error_reporting(bool enable)
+
+ rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
+ if (rc)
+- return rc;
++ return pcibios_err_to_errno(rc);
+
+ return 0;
+ }
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index ce3e0069e028d0..8d18099fd528cf 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -48,7 +48,7 @@ static u64 skx_tolm, skx_tohm;
+ static LIST_HEAD(dev_edac_list);
+ static bool skx_mem_cfg_2lm;
+
+-int __init skx_adxl_get(void)
++int skx_adxl_get(void)
+ {
+ const char * const *names;
+ int i, j;
+@@ -110,12 +110,14 @@ int __init skx_adxl_get(void)
+
+ return -ENODEV;
+ }
++EXPORT_SYMBOL_GPL(skx_adxl_get);
+
+-void __exit skx_adxl_put(void)
++void skx_adxl_put(void)
+ {
+ kfree(adxl_values);
+ kfree(adxl_msg);
+ }
++EXPORT_SYMBOL_GPL(skx_adxl_put);
+
+ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
+ {
+@@ -187,12 +189,14 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
+ {
+ skx_mem_cfg_2lm = mem_cfg_2lm;
+ }
++EXPORT_SYMBOL_GPL(skx_set_mem_cfg);
+
+ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
+ {
+ driver_decode = decode;
+ skx_show_retry_rd_err_log = show_retry_log;
+ }
++EXPORT_SYMBOL_GPL(skx_set_decode);
+
+ int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+ {
+@@ -206,6 +210,7 @@ int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+ *id = GET_BITFIELD(reg, 12, 14);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(skx_get_src_id);
+
+ int skx_get_node_id(struct skx_dev *d, u8 *id)
+ {
+@@ -219,6 +224,7 @@ int skx_get_node_id(struct skx_dev *d, u8 *id)
+ *id = GET_BITFIELD(reg, 0, 2);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(skx_get_node_id);
+
+ static int get_width(u32 mtr)
+ {
+@@ -284,6 +290,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
+ *list = &dev_edac_list;
+ return ndev;
+ }
++EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings);
+
+ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
+ {
+@@ -323,6 +330,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
+ pci_dev_put(pdev);
+ return -ENODEV;
+ }
++EXPORT_SYMBOL_GPL(skx_get_hi_lo);
+
+ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
+ int minval, int maxval, const char *name)
+@@ -394,6 +402,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
+
+ return 1;
+ }
++EXPORT_SYMBOL_GPL(skx_get_dimm_info);
+
+ int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
+ int chan, int dimmno, const char *mod_str)
+@@ -442,6 +451,7 @@ int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
+
+ return (size == 0 || size == ~0ull) ? 0 : 1;
+ }
++EXPORT_SYMBOL_GPL(skx_get_nvdimm_info);
+
+ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
+ const char *ctl_name, const char *mod_str,
+@@ -512,6 +522,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
+ imc->mci = NULL;
+ return rc;
+ }
++EXPORT_SYMBOL_GPL(skx_register_mci);
+
+ static void skx_unregister_mci(struct skx_imc *imc)
+ {
+@@ -648,6 +659,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ memset(&res, 0, sizeof(res));
+ res.mce = mce;
+ res.addr = mce->addr & MCI_ADDR_PHYSADDR;
++ if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) {
++ pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
++ return NOTIFY_DONE;
++ }
+
+ /* Try driver decoder first */
+ if (!(driver_decode && driver_decode(&res))) {
+@@ -684,6 +699,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ mce->kflags |= MCE_HANDLED_EDAC;
+ return NOTIFY_DONE;
+ }
++EXPORT_SYMBOL_GPL(skx_mce_check_error);
+
+ void skx_remove(void)
+ {
+@@ -721,3 +737,8 @@ void skx_remove(void)
+ kfree(d);
+ }
+ }
++EXPORT_SYMBOL_GPL(skx_remove);
++
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Tony Luck");
++MODULE_DESCRIPTION("MC Driver for Intel server processors");
+diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
+index b6d3607dffe27b..11faf1db4fa482 100644
+--- a/drivers/edac/skx_common.h
++++ b/drivers/edac/skx_common.h
+@@ -231,8 +231,8 @@ typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
+ typedef bool (*skx_decode_f)(struct decoded_addr *res);
+ typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err);
+
+-int __init skx_adxl_get(void);
+-void __exit skx_adxl_put(void);
++int skx_adxl_get(void);
++void skx_adxl_put(void);
+ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
+ void skx_set_mem_cfg(bool mem_cfg_2lm);
+
+diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
+index c4fc64cbecd0e6..6ddc90d7ba7c2a 100644
+--- a/drivers/edac/synopsys_edac.c
++++ b/drivers/edac/synopsys_edac.c
+@@ -9,6 +9,8 @@
+ #include <linux/edac.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
++#include <linux/spinlock.h>
++#include <linux/sizes.h>
+ #include <linux/interrupt.h>
+ #include <linux/of.h>
+
+@@ -299,6 +301,7 @@ struct synps_ecc_status {
+ /**
+ * struct synps_edac_priv - DDR memory controller private instance data.
+ * @baseaddr: Base address of the DDR controller.
++ * @reglock: Concurrent CSRs access lock.
+ * @message: Buffer for framing the event specific info.
+ * @stat: ECC status information.
+ * @p_data: Platform data.
+@@ -313,6 +316,7 @@ struct synps_ecc_status {
+ */
+ struct synps_edac_priv {
+ void __iomem *baseaddr;
++ spinlock_t reglock;
+ char message[SYNPS_EDAC_MSG_SIZE];
+ struct synps_ecc_status stat;
+ const struct synps_platform_data *p_data;
+@@ -334,6 +338,7 @@ struct synps_edac_priv {
+ * @get_mtype: Get mtype.
+ * @get_dtype: Get dtype.
+ * @get_ecc_state: Get ECC state.
++ * @get_mem_info: Get EDAC memory info
+ * @quirks: To differentiate IPs.
+ */
+ struct synps_platform_data {
+@@ -341,6 +346,9 @@ struct synps_platform_data {
+ enum mem_type (*get_mtype)(const void __iomem *base);
+ enum dev_type (*get_dtype)(const void __iomem *base);
+ bool (*get_ecc_state)(void __iomem *base);
++#ifdef CONFIG_EDAC_DEBUG
++ u64 (*get_mem_info)(struct synps_edac_priv *priv);
++#endif
+ int quirks;
+ };
+
+@@ -399,6 +407,25 @@ static int zynq_get_error_info(struct synps_edac_priv *priv)
+ return 0;
+ }
+
++#ifdef CONFIG_EDAC_DEBUG
++/**
++ * zynqmp_get_mem_info - Get the current memory info.
++ * @priv: DDR memory controller private instance data.
++ *
++ * Return: host interface address.
++ */
++static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv)
++{
++ u64 hif_addr = 0, linear_addr;
++
++ linear_addr = priv->poison_addr;
++ if (linear_addr >= SZ_32G)
++ linear_addr = linear_addr - SZ_32G + SZ_2G;
++ hif_addr = linear_addr >> 3;
++ return hif_addr;
++}
++#endif
++
+ /**
+ * zynqmp_get_error_info - Get the current ECC error info.
+ * @priv: DDR memory controller private instance data.
+@@ -408,7 +435,8 @@ static int zynq_get_error_info(struct synps_edac_priv *priv)
+ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
+ {
+ struct synps_ecc_status *p;
+- u32 regval, clearval = 0;
++ u32 regval, clearval;
++ unsigned long flags;
+ void __iomem *base;
+
+ base = priv->baseaddr;
+@@ -452,10 +480,14 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
+ p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
+ p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
+ out:
+- clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
+- clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
++ spin_lock_irqsave(&priv->reglock, flags);
++
++ clearval = readl(base + ECC_CLR_OFST) |
++ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
++ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
+ writel(clearval, base + ECC_CLR_OFST);
+- writel(0x0, base + ECC_CLR_OFST);
++
++ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ return 0;
+ }
+@@ -515,24 +547,41 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
+
+ static void enable_intr(struct synps_edac_priv *priv)
+ {
++ unsigned long flags;
++
+ /* Enable UE/CE Interrupts */
+- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
+- writel(DDR_UE_MASK | DDR_CE_MASK,
+- priv->baseaddr + ECC_CLR_OFST);
+- else
++ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
+ writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+ priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+
++ return;
++ }
++
++ spin_lock_irqsave(&priv->reglock, flags);
++
++ writel(DDR_UE_MASK | DDR_CE_MASK,
++ priv->baseaddr + ECC_CLR_OFST);
++
++ spin_unlock_irqrestore(&priv->reglock, flags);
+ }
+
+ static void disable_intr(struct synps_edac_priv *priv)
+ {
++ unsigned long flags;
++
+ /* Disable UE/CE Interrupts */
+- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
+- writel(0x0, priv->baseaddr + ECC_CLR_OFST);
+- else
++ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
+ writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+ priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
++
++ return;
++ }
++
++ spin_lock_irqsave(&priv->reglock, flags);
++
++ writel(0, priv->baseaddr + ECC_CLR_OFST);
++
++ spin_unlock_irqrestore(&priv->reglock, flags);
+ }
+
+ /**
+@@ -576,8 +625,6 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
+ /* v3.0 of the controller does not have this register */
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
+ writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
+- else
+- enable_intr(priv);
+
+ return IRQ_HANDLED;
+ }
+@@ -899,6 +946,9 @@ static const struct synps_platform_data zynqmp_edac_def = {
+ .get_mtype = zynqmp_get_mtype,
+ .get_dtype = zynqmp_get_dtype,
+ .get_ecc_state = zynqmp_get_ecc_state,
++#ifdef CONFIG_EDAC_DEBUG
++ .get_mem_info = zynqmp_get_mem_info,
++#endif
+ .quirks = (DDR_ECC_INTR_SUPPORT
+ #ifdef CONFIG_EDAC_DEBUG
+ | DDR_ECC_DATA_POISON_SUPPORT
+@@ -952,10 +1002,16 @@ MODULE_DEVICE_TABLE(of, synps_edac_match);
+ static void ddr_poison_setup(struct synps_edac_priv *priv)
+ {
+ int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
++ const struct synps_platform_data *p_data;
+ int index;
+ ulong hif_addr = 0;
+
+- hif_addr = priv->poison_addr >> 3;
++ p_data = priv->p_data;
++
++ if (p_data->get_mem_info)
++ hif_addr = p_data->get_mem_info(priv);
++ else
++ hif_addr = priv->poison_addr >> 3;
+
+ for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
+ if (priv->row_shift[index])
+@@ -1359,6 +1415,7 @@ static int mc_probe(struct platform_device *pdev)
+ priv = mci->pvt_info;
+ priv->baseaddr = baseaddr;
+ priv->p_data = p_data;
++ spin_lock_init(&priv->reglock);
+
+ mc_init(mci, pdev);
+
+diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
+index b9c5772da959cc..90d46e5c4ff069 100644
+--- a/drivers/edac/thunderx_edac.c
++++ b/drivers/edac/thunderx_edac.c
+@@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
+ decode_register(other, OCX_OTHER_SIZE,
+ ocx_com_errors, ctx->reg_com_int);
+
+- strncat(msg, other, OCX_MESSAGE_SIZE);
++ strlcat(msg, other, OCX_MESSAGE_SIZE);
+
+ for (lane = 0; lane < OCX_RX_LANES; lane++)
+ if (ctx->reg_com_int & BIT(lane)) {
+@@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
+ lane, ctx->reg_lane_int[lane],
+ lane, ctx->reg_lane_stat11[lane]);
+
+- strncat(msg, other, OCX_MESSAGE_SIZE);
++ strlcat(msg, other, OCX_MESSAGE_SIZE);
+
+ decode_register(other, OCX_OTHER_SIZE,
+ ocx_lane_errors,
+ ctx->reg_lane_int[lane]);
+- strncat(msg, other, OCX_MESSAGE_SIZE);
++ strlcat(msg, other, OCX_MESSAGE_SIZE);
+ }
+
+ if (ctx->reg_com_int & OCX_COM_INT_CE)
+@@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
+ decode_register(other, OCX_OTHER_SIZE,
+ ocx_com_link_errors, ctx->reg_com_link_int);
+
+- strncat(msg, other, OCX_MESSAGE_SIZE);
++ strlcat(msg, other, OCX_MESSAGE_SIZE);
+
+ if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
+ edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
+@@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
+
+ decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
+
+- strncat(msg, other, L2C_MESSAGE_SIZE);
++ strlcat(msg, other, L2C_MESSAGE_SIZE);
+
+ if (ctx->reg_int & mask_ue)
+ edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
+diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
+index 8de9023c2a3878..cf472e44c5ff90 100644
+--- a/drivers/extcon/Kconfig
++++ b/drivers/extcon/Kconfig
+@@ -116,7 +116,8 @@ config EXTCON_MAX77843
+
+ config EXTCON_MAX8997
+ tristate "Maxim MAX8997 EXTCON Support"
+- depends on MFD_MAX8997 && IRQ_DOMAIN
++ depends on MFD_MAX8997
++ select IRQ_DOMAIN
+ help
+ If you say yes here you get support for the MUIC device of
+ Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
+diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
+index 6f7a60d2ed9161..e7f55c021e562f 100644
+--- a/drivers/extcon/extcon.c
++++ b/drivers/extcon/extcon.c
+@@ -1280,8 +1280,6 @@ int extcon_dev_register(struct extcon_dev *edev)
+
+ edev->id = ret;
+
+- dev_set_name(&edev->dev, "extcon%d", edev->id);
+-
+ ret = extcon_alloc_cables(edev);
+ if (ret < 0)
+ goto err_alloc_cables;
+@@ -1310,6 +1308,7 @@ int extcon_dev_register(struct extcon_dev *edev)
+ RAW_INIT_NOTIFIER_HEAD(&edev->nh_all);
+
+ dev_set_drvdata(&edev->dev, edev);
++ dev_set_name(&edev->dev, "extcon%d", edev->id);
+ edev->state = 0;
+
+ ret = device_register(&edev->dev);
+diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
+index 6ac5ff20a2fe22..401a77e3b5fa8e 100644
+--- a/drivers/firewire/core-card.c
++++ b/drivers/firewire/core-card.c
+@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
+ */
+ card->bm_generation = generation;
+
+- if (root_device == NULL) {
++ if (card->gap_count == 0) {
++ /*
++ * If self IDs have inconsistent gap counts, do a
++ * bus reset ASAP. The config rom read might never
++ * complete, so don't wait for it. However, still
++ * send a PHY configuration packet prior to the
++ * bus reset. The PHY configuration packet might
++ * fail, but 1394-2008 8.4.5.2 explicitly permits
++ * it in this case, so it should be safe to try.
++ */
++ new_root_id = local_id;
++ /*
++ * We must always send a bus reset if the gap count
++ * is inconsistent, so bypass the 5-reset limit.
++ */
++ card->bm_retries = 0;
++ } else if (root_device == NULL) {
+ /*
+ * Either link_on is false, or we failed to read the
+ * config rom. In either case, pick another root.
+@@ -484,7 +500,19 @@ static void bm_work(struct work_struct *work)
+ fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
+ new_root_id, gap_count);
+ fw_send_phy_config(card, new_root_id, generation, gap_count);
+- reset_bus(card, true);
++ /*
++ * Where possible, use a short bus reset to minimize
++ * disruption to isochronous transfers. But in the event
++ * of a gap count inconsistency, use a long bus reset.
++ *
++ * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
++ * may set different gap counts after a bus reset. On a mixed
++ * 1394/1394a bus, a short bus reset can get doubled. Some
++ * nodes may treat the double reset as one bus reset and others
++ * may treat it as two, causing a gap count inconsistency
++ * again. Using a long bus reset prevents this.
++ */
++ reset_bus(card, card->gap_count != 0);
+ /* Will allocate broadcast channel after the reset. */
+ goto out;
+ }
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 6274b86eb94377..73cc2f2dcbf923 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -598,11 +598,11 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts
+ queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
+
+ break;
++ }
+ default:
+ WARN_ON(1);
+ break;
+ }
+- }
+
+ /* Drop the idr's reference */
+ client_put(client);
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index aa597cda0d8874..da8a4c8f287687 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -100,10 +100,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
+ *
+- * The string is taken from a minimal ASCII text descriptor leaf after
+- * the immediate entry with @key. The string is zero-terminated.
+- * An overlong string is silently truncated such that it and the
+- * zero byte fit into @size.
++ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
++ * @key. The string is zero-terminated. An overlong string is silently truncated such that it
++ * and the zero byte fit into @size.
+ *
+ * Returns strlen(buf) or a negative error code.
+ */
+@@ -717,14 +716,11 @@ static void create_units(struct fw_device *device)
+ fw_unit_attributes,
+ &unit->attribute_group);
+
+- if (device_register(&unit->device) < 0)
+- goto skip_unit;
+-
+ fw_device_get(device);
+- continue;
+-
+- skip_unit:
+- kfree(unit);
++ if (device_register(&unit->device) < 0) {
++ put_device(&unit->device);
++ continue;
++ }
+ }
+ }
+
+diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
+index b0d671db178a85..ea31ac7ac1ca93 100644
+--- a/drivers/firewire/nosy.c
++++ b/drivers/firewire/nosy.c
+@@ -148,10 +148,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length)
+ if (atomic_read(&buffer->size) == 0)
+ return -ENODEV;
+
+- /* FIXME: Check length <= user_length. */
++ length = buffer->head->length;
++
++ if (length > user_length)
++ return 0;
+
+ end = buffer->data + buffer->capacity;
+- length = buffer->head->length;
+
+ if (&buffer->head->data[length] < end) {
+ if (copy_to_user(data, buffer->head->data, length))
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 7e88fd4897414b..b9ae0340b8a703 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
+ #define QUIRK_TI_SLLZ059 0x20
+ #define QUIRK_IR_WAKE 0x40
+
++// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
++// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
++// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
++// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
++// while it is probable due to detection of any type of PCIe error.
++#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
++
++#if IS_ENABLED(CONFIG_X86)
++
++static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
++{
++ return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
++}
++
++#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
++
++static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
++{
++ const struct pci_dev *pcie_to_pci_bridge;
++
++ // Detect any type of AMD Ryzen machine.
++ if (!static_cpu_has(X86_FEATURE_ZEN))
++ return false;
++
++ // Detect VIA VT6306/6307/6308.
++ if (pdev->vendor != PCI_VENDOR_ID_VIA)
++ return false;
++ if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
++ return false;
++
++ // Detect Asmedia ASM1083/1085.
++ pcie_to_pci_bridge = pdev->bus->self;
++ if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
++ return false;
++ if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
++ return false;
++
++ return true;
++}
++
++#else
++#define has_reboot_by_cycle_timer_read_quirk(ohci) false
++#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
++#endif
++
+ /* In case of multiple matches in ohci_quirks[], only the first one is used. */
+ static const struct {
+ unsigned short vendor, device, revision, flags;
+@@ -1511,6 +1556,8 @@ static int handle_at_packet(struct context *context,
+ #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
+ #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
+
++static u32 get_cycle_time(struct fw_ohci *ohci);
++
+ static void handle_local_rom(struct fw_ohci *ohci,
+ struct fw_packet *packet, u32 csr)
+ {
+@@ -1535,6 +1582,8 @@ static void handle_local_rom(struct fw_ohci *ohci,
+ (void *) ohci->config_rom + i, length);
+ }
+
++ // Timestamping on behalf of the hardware.
++ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
+ fw_core_handle_response(&ohci->card, &response);
+ }
+
+@@ -1583,6 +1632,8 @@ static void handle_local_lock(struct fw_ohci *ohci,
+ fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
+
+ out:
++ // Timestamping on behalf of the hardware.
++ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
+ fw_core_handle_response(&ohci->card, &response);
+ }
+
+@@ -1625,8 +1676,6 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
+ }
+ }
+
+-static u32 get_cycle_time(struct fw_ohci *ohci);
+-
+ static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
+ {
+ unsigned long flags;
+@@ -1724,6 +1773,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci)
+ s32 diff01, diff12;
+ int i;
+
++ if (has_reboot_by_cycle_timer_read_quirk(ohci))
++ return 0;
++
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+
+ if (ohci->quirks & QUIRK_CYCLE_TIMER) {
+@@ -2012,6 +2064,8 @@ static void bus_reset_work(struct work_struct *work)
+
+ ohci->generation = generation;
+ reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
++ if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
++ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+
+ if (ohci->quirks & QUIRK_RESET_PACKET)
+ ohci->request_generation = generation;
+@@ -2077,12 +2131,14 @@ static irqreturn_t irq_handler(int irq, void *data)
+ return IRQ_NONE;
+
+ /*
+- * busReset and postedWriteErr must not be cleared yet
++ * busReset and postedWriteErr events must not be cleared yet
+ * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
+ */
+ reg_write(ohci, OHCI1394_IntEventClear,
+ event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
+ log_irqs(ohci, event);
++ if (event & OHCI1394_busReset)
++ reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
+
+ if (event & OHCI1394_selfIDComplete)
+ queue_work(selfid_workqueue, &ohci->bus_reset_work);
+@@ -3630,6 +3686,9 @@ static int pci_probe(struct pci_dev *dev,
+ if (param_quirks)
+ ohci->quirks = param_quirks;
+
++ if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
++ ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
++
+ /*
+ * Because dma_alloc_coherent() allocates at least one page,
+ * we save space by using a common buffer for the AR request/
+@@ -3722,6 +3781,7 @@ static int pci_probe(struct pci_dev *dev,
+ return 0;
+
+ fail_msi:
++ devm_free_irq(&dev->dev, dev->irq, ohci);
+ pci_disable_msi(dev);
+
+ return err;
+@@ -3749,6 +3809,7 @@ static void pci_remove(struct pci_dev *dev)
+
+ software_reset(ohci);
+
++ devm_free_irq(&dev->dev, dev->irq, ohci);
+ pci_disable_msi(dev);
+
+ dev_notice(&dev->dev, "removing fw-ohci device\n");
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 7edf2c95282fa2..e779d866022b9f 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+ sdev->use_10_for_rw = 1;
+
+ if (sbp2_param_exclusive_login) {
+- sdev->manage_system_start_stop = true;
+- sdev->manage_runtime_start_stop = true;
+- sdev->manage_shutdown = true;
++ sdev->manage_system_start_stop = 1;
++ sdev->manage_runtime_start_stop = 1;
++ sdev->manage_shutdown = 1;
+ }
+
+ if (sdev->type == TYPE_ROM)
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index b59e3041fd6275..f0e9f250669e2f 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -229,6 +229,7 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+ config SYSFB
+ bool
+ select BOOT_VESA_SUPPORT
++ select SCREEN_INFO
+
+ config SYSFB_SIMPLEFB
+ bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
+diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
+index 2b8bfcd010f5fd..7865438b36960d 100644
+--- a/drivers/firmware/arm_ffa/bus.c
++++ b/drivers/firmware/arm_ffa/bus.c
+@@ -193,6 +193,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ dev->release = ffa_release_device;
+ dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+
++ ffa_dev->id = id;
+ ffa_dev->vm_id = vm_id;
+ ffa_dev->ops = ops;
+ uuid_copy(&ffa_dev->uuid, uuid);
+diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
+index 121f4fc903cd57..7cd6b1564e8018 100644
+--- a/drivers/firmware/arm_ffa/driver.c
++++ b/drivers/firmware/arm_ffa/driver.c
+@@ -587,17 +587,9 @@ static int ffa_partition_info_get(const char *uuid_str,
+ return 0;
+ }
+
+-static void _ffa_mode_32bit_set(struct ffa_device *dev)
+-{
+- dev->mode_32bit = true;
+-}
+-
+ static void ffa_mode_32bit_set(struct ffa_device *dev)
+ {
+- if (drv_info->version > FFA_VERSION_1_0)
+- return;
+-
+- _ffa_mode_32bit_set(dev);
++ dev->mode_32bit = true;
+ }
+
+ static int ffa_sync_send_receive(struct ffa_device *dev,
+@@ -706,7 +698,7 @@ static void ffa_setup_partitions(void)
+
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+- _ffa_mode_32bit_set(ffa_dev);
++ ffa_mode_32bit_set(ffa_dev);
+ }
+ kfree(pbuf);
+ }
+diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
+index c46dc5215af7a7..00b165d1f502df 100644
+--- a/drivers/firmware/arm_scmi/common.h
++++ b/drivers/firmware/arm_scmi/common.h
+@@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
+ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
++bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
+
+ /* declarations for message passing transports */
+ struct scmi_msg_payld;
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index 19246ed1f01ff7..b8d470417e8f99 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
+ {
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
++ /*
++ * An A2P IRQ is NOT valid when received while the platform still has
++ * the ownership of the channel, because the platform at first releases
++ * the SMT channel and then sends the completion interrupt.
++ *
++ * This addresses a possible race condition in which a spurious IRQ from
++ * a previous timed-out reply which arrived late could be wrongly
++ * associated with the next pending transaction.
++ */
++ if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
++ dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
++ return;
++ }
++
+ scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
+ }
+
+diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
+index e123de6e8c67a9..aa02392265d326 100644
+--- a/drivers/firmware/arm_scmi/optee.c
++++ b/drivers/firmware/arm_scmi/optee.c
+@@ -467,6 +467,13 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
++ /*
++ * Different protocols might share the same chan info, so a previous
++ * call might have already freed the structure.
++ */
++ if (!channel)
++ return 0;
++
+ mutex_lock(&scmi_optee_private->mu);
+ list_del(&channel->link);
+ mutex_unlock(&scmi_optee_private->mu);
+diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
+index 30dedd6ebfde61..dd344506b0a37d 100644
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -145,7 +145,6 @@ struct scmi_msg_resp_perf_describe_levels_v4 {
+ struct perf_dom_info {
+ u32 id;
+ bool set_limits;
+- bool set_perf;
+ bool perf_limit_notify;
+ bool perf_level_notify;
+ bool perf_fastchannels;
+@@ -153,8 +152,8 @@ struct perf_dom_info {
+ u32 opp_count;
+ u32 sustained_freq_khz;
+ u32 sustained_perf_level;
+- u32 mult_factor;
+- char name[SCMI_MAX_STR_SIZE];
++ unsigned long mult_factor;
++ struct scmi_perf_domain_info info;
+ struct scmi_opp opp[MAX_OPPS];
+ struct scmi_fc_info *fc_info;
+ struct xarray opps_by_idx;
+@@ -257,7 +256,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ flags = le32_to_cpu(attr->flags);
+
+ dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
+- dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
++ dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
+ dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
+ dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
+@@ -269,14 +268,16 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ dom_info->sustained_perf_level =
+ le32_to_cpu(attr->sustained_perf_level);
+ if (!dom_info->sustained_freq_khz ||
+- !dom_info->sustained_perf_level)
++ !dom_info->sustained_perf_level ||
++ dom_info->level_indexing_mode)
+ /* CPUFreq converts to kHz, hence default 1000 */
+ dom_info->mult_factor = 1000;
+ else
+ dom_info->mult_factor =
+- (dom_info->sustained_freq_khz * 1000) /
+- dom_info->sustained_perf_level;
+- strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
++ (dom_info->sustained_freq_khz * 1000UL)
++ / dom_info->sustained_perf_level;
++ strscpy(dom_info->info.name, attr->name,
++ SCMI_SHORT_NAME_MAX_SIZE);
+ }
+
+ ph->xops->xfer_put(ph, t);
+@@ -288,7 +289,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(flags))
+ ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
+- dom_info->id, dom_info->name,
++ dom_info->id, dom_info->info.name,
+ SCMI_MAX_STR_SIZE);
+
+ if (dom_info->level_indexing_mode) {
+@@ -346,8 +347,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
+ }
+
+ static inline void
+-process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
+- unsigned int loop_idx,
++process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
++ struct scmi_opp *opp, unsigned int loop_idx,
+ const struct scmi_msg_resp_perf_describe_levels_v4 *r)
+ {
+ opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
+@@ -358,10 +359,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
+ /* Note that PERF v4 reports always five 32-bit words */
+ opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
+ if (dom->level_indexing_mode) {
++ int ret;
++
+ opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
+
+- xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL);
+- xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
++ ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
++ GFP_KERNEL);
++ if (ret)
++ dev_warn(dev,
++ "Failed to add opps_by_idx at %d - ret:%d\n",
++ opp->level_index, ret);
++
++ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
++ if (ret)
++ dev_warn(dev,
++ "Failed to add opps_by_lvl at %d - ret:%d\n",
++ opp->perf, ret);
++
+ hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
+ }
+ }
+@@ -378,7 +392,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
+ if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
+ process_response_opp(opp, st->loop_idx, response);
+ else
+- process_response_opp_v4(p->perf_dom, opp, st->loop_idx,
++ process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
+ response);
+ p->perf_dom->opp_count++;
+
+@@ -423,6 +437,36 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph,
+ return ret;
+ }
+
++static int scmi_perf_num_domains_get(const struct scmi_protocol_handle *ph)
++{
++ struct scmi_perf_info *pi = ph->get_priv(ph);
++
++ return pi->num_domains;
++}
++
++static inline struct perf_dom_info *
++scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
++{
++ struct scmi_perf_info *pi = ph->get_priv(ph);
++
++ if (domain >= pi->num_domains)
++ return ERR_PTR(-EINVAL);
++
++ return pi->dom_info + domain;
++}
++
++static const struct scmi_perf_domain_info *
++scmi_perf_info_get(const struct scmi_protocol_handle *ph, u32 domain)
++{
++ struct perf_dom_info *dom;
++
++ dom = scmi_perf_domain_lookup(ph, domain);
++ if (IS_ERR(dom))
++ return ERR_PTR(-EINVAL);
++
++ return &dom->info;
++}
++
+ static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
+ {
+@@ -446,17 +490,6 @@ static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
+ return ret;
+ }
+
+-static inline struct perf_dom_info *
+-scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
+-{
+- struct scmi_perf_info *pi = ph->get_priv(ph);
+-
+- if (domain >= pi->num_domains)
+- return ERR_PTR(-EINVAL);
+-
+- return pi->dom_info + domain;
+-}
+-
+ static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 max_perf,
+ u32 min_perf)
+@@ -780,7 +813,6 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
+ {
+ int idx, ret, domain;
+ unsigned long freq;
+- struct scmi_opp *opp;
+ struct perf_dom_info *dom;
+
+ domain = scmi_dev_domain_id(dev);
+@@ -791,28 +823,21 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+- for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
++ for (idx = 0; idx < dom->opp_count; idx++) {
+ if (!dom->level_indexing_mode)
+- freq = opp->perf * dom->mult_factor;
++ freq = dom->opp[idx].perf * dom->mult_factor;
+ else
+- freq = opp->indicative_freq * 1000;
++ freq = dom->opp[idx].indicative_freq * dom->mult_factor;
+
+ ret = dev_pm_opp_add(dev, freq, 0);
+ if (ret) {
+ dev_warn(dev, "failed to add opp %luHz\n", freq);
+-
+- while (idx-- > 0) {
+- if (!dom->level_indexing_mode)
+- freq = (--opp)->perf * dom->mult_factor;
+- else
+- freq = (--opp)->indicative_freq * 1000;
+- dev_pm_opp_remove(dev, freq);
+- }
++ dev_pm_opp_remove_all_dynamic(dev);
+ return ret;
+ }
+
+ dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n",
+- domain, dom->name, idx, freq);
++ domain, dom->info.name, idx, freq);
+ }
+ return 0;
+ }
+@@ -851,7 +876,8 @@ static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
+ } else {
+ struct scmi_opp *opp;
+
+- opp = LOOKUP_BY_FREQ(dom->opps_by_freq, freq / 1000);
++ opp = LOOKUP_BY_FREQ(dom->opps_by_freq,
++ freq / dom->mult_factor);
+ if (!opp)
+ return -EIO;
+
+@@ -885,7 +911,7 @@ static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
+ if (!opp)
+ return -EIO;
+
+- *freq = opp->indicative_freq * 1000;
++ *freq = opp->indicative_freq * dom->mult_factor;
+ }
+
+ return ret;
+@@ -908,7 +934,7 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ if (!dom->level_indexing_mode)
+ opp_freq = opp->perf * dom->mult_factor;
+ else
+- opp_freq = opp->indicative_freq * 1000;
++ opp_freq = opp->indicative_freq * dom->mult_factor;
+
+ if (opp_freq < *freq)
+ continue;
+@@ -948,6 +974,8 @@ scmi_power_scale_get(const struct scmi_protocol_handle *ph)
+ }
+
+ static const struct scmi_perf_proto_ops perf_proto_ops = {
++ .num_domains_get = scmi_perf_num_domains_get,
++ .info_get = scmi_perf_info_get,
+ .limits_set = scmi_perf_limits_set,
+ .limits_get = scmi_perf_limits_get,
+ .level_set = scmi_perf_level_set,
+diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
+index 0493aa3c12bf53..130d13e9cd6beb 100644
+--- a/drivers/firmware/arm_scmi/raw_mode.c
++++ b/drivers/firmware/arm_scmi/raw_mode.c
+@@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
+ rd->raw = raw;
+ filp->private_data = rd;
+
+- return 0;
++ return nonseekable_open(inode, filp);
+ }
+
+ static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
+@@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .write = scmi_dbg_raw_mode_reset_write,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_notif_read,
+ .poll = scmi_test_dbg_raw_mode_notif_poll,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_errors_read,
+ .poll = scmi_test_dbg_raw_mode_errors_poll,
++ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ };
+
+@@ -1111,7 +1116,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+ int i;
+
+ for (i = 0; i < num_chans; i++) {
+- void *xret;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_init(raw);
+@@ -1120,13 +1124,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+ goto err_xa;
+ }
+
+- xret = xa_store(&raw->chans_q, channels[i], q,
++ ret = xa_insert(&raw->chans_q, channels[i], q,
+ GFP_KERNEL);
+- if (xa_err(xret)) {
++ if (ret) {
+ dev_err(dev,
+ "Fail to allocate Raw queue 0x%02X\n",
+ channels[i]);
+- ret = xa_err(xret);
+ goto err_xa;
+ }
+ }
+@@ -1322,6 +1325,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
+ dev = raw->handle->dev;
+ q = scmi_raw_queue_select(raw, idx,
+ SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
++ if (!q) {
++ dev_warn(dev,
++ "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
++ idx, chan_id);
++ return;
++ }
+
+ /*
+ * Grab the msg_q_lock upfront to avoid a possible race between
+diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
+index 87b4f4d35f0623..517d52fb3bcbbb 100644
+--- a/drivers/firmware/arm_scmi/shmem.c
++++ b/drivers/firmware/arm_scmi/shmem.c
+@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ }
++
++bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
++{
++ return (ioread32(&shmem->channel_status) &
++ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
++}
+diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
+index c193516a254d9e..771797b6e26806 100644
+--- a/drivers/firmware/arm_scmi/smc.c
++++ b/drivers/firmware/arm_scmi/smc.c
+@@ -196,6 +196,13 @@ static int smc_chan_free(int id, void *p, void *data)
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
++ /*
++ * Different protocols might share the same chan info, so a previous
++ * smc_chan_free call might have already freed the structure.
++ */
++ if (!scmi_info)
++ return 0;
++
+ /* Ignore any possible further reception on the IRQ path */
+ if (scmi_info->irq > 0)
+ free_irq(scmi_info->irq, scmi_info);
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 79d4254d1f9bc5..e62ffffe5fb8d4 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -522,7 +522,7 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
+ {
+ cs_dsp_debugfs_clear(dsp);
+ debugfs_remove_recursive(dsp->debugfs_root);
+- dsp->debugfs_root = NULL;
++ dsp->debugfs_root = ERR_PTR(-ENODEV);
+ }
+ EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+ #else
+@@ -796,6 +796,9 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
+
+ lockdep_assert_held(&ctl->dsp->pwr_lock);
+
++ if (ctl->flags && !(ctl->flags & WMFW_CTL_FLAG_WRITEABLE))
++ return -EPERM;
++
+ if (len + off * sizeof(u32) > ctl->len)
+ return -EINVAL;
+
+@@ -1053,9 +1056,16 @@ struct cs_dsp_coeff_parsed_coeff {
+ int len;
+ };
+
+-static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
++static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, unsigned int avail,
++ const u8 **str)
+ {
+- int length;
++ int length, total_field_len;
++
++ /* String fields are at least one __le32 */
++ if (sizeof(__le32) > avail) {
++ *pos = NULL;
++ return 0;
++ }
+
+ switch (bytes) {
+ case 1:
+@@ -1068,10 +1078,16 @@ static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
+ return 0;
+ }
+
++ total_field_len = ((length + bytes) + 3) & ~0x03;
++ if ((unsigned int)total_field_len > avail) {
++ *pos = NULL;
++ return 0;
++ }
++
+ if (str)
+ *str = *pos + bytes;
+
+- *pos += ((length + bytes) + 3) & ~0x03;
++ *pos += total_field_len;
+
+ return length;
+ }
+@@ -1096,71 +1112,134 @@ static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos)
+ return val;
+ }
+
+-static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data,
+- struct cs_dsp_coeff_parsed_alg *blk)
++static int cs_dsp_coeff_parse_alg(struct cs_dsp *dsp,
++ const struct wmfw_region *region,
++ struct cs_dsp_coeff_parsed_alg *blk)
+ {
+ const struct wmfw_adsp_alg_data *raw;
++ unsigned int data_len = le32_to_cpu(region->len);
++ unsigned int pos;
++ const u8 *tmp;
++
++ raw = (const struct wmfw_adsp_alg_data *)region->data;
+
+ switch (dsp->fw_ver) {
+ case 0:
+ case 1:
+- raw = (const struct wmfw_adsp_alg_data *)*data;
+- *data = raw->data;
++ if (sizeof(*raw) > data_len)
++ return -EOVERFLOW;
+
+ blk->id = le32_to_cpu(raw->id);
+ blk->name = raw->name;
+- blk->name_len = strlen(raw->name);
++ blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name));
+ blk->ncoeff = le32_to_cpu(raw->ncoeff);
++
++ pos = sizeof(*raw);
+ break;
+ default:
+- blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data);
+- blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data,
++ if (sizeof(raw->id) > data_len)
++ return -EOVERFLOW;
++
++ tmp = region->data;
++ blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), &tmp);
++ pos = tmp - region->data;
++
++ tmp = &region->data[pos];
++ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos,
+ &blk->name);
+- cs_dsp_coeff_parse_string(sizeof(u16), data, NULL);
+- blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data);
++ if (!tmp)
++ return -EOVERFLOW;
++
++ pos = tmp - region->data;
++ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL);
++ if (!tmp)
++ return -EOVERFLOW;
++
++ pos = tmp - region->data;
++ if (sizeof(raw->ncoeff) > (data_len - pos))
++ return -EOVERFLOW;
++
++ blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), &tmp);
++ pos += sizeof(raw->ncoeff);
+ break;
+ }
+
++ if ((int)blk->ncoeff < 0)
++ return -EOVERFLOW;
++
+ cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id);
+ cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name);
+ cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff);
++
++ return pos;
+ }
+
+-static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+- struct cs_dsp_coeff_parsed_coeff *blk)
++static int cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp,
++ const struct wmfw_region *region,
++ unsigned int pos,
++ struct cs_dsp_coeff_parsed_coeff *blk)
+ {
+ const struct wmfw_adsp_coeff_data *raw;
++ unsigned int data_len = le32_to_cpu(region->len);
++ unsigned int blk_len, blk_end_pos;
+ const u8 *tmp;
+- int length;
++
++ raw = (const struct wmfw_adsp_coeff_data *)&region->data[pos];
++ if (sizeof(raw->hdr) > (data_len - pos))
++ return -EOVERFLOW;
++
++ blk_len = le32_to_cpu(raw->hdr.size);
++ if (blk_len > S32_MAX)
++ return -EOVERFLOW;
++
++ if (blk_len > (data_len - pos - sizeof(raw->hdr)))
++ return -EOVERFLOW;
++
++ blk_end_pos = pos + sizeof(raw->hdr) + blk_len;
++
++ blk->offset = le16_to_cpu(raw->hdr.offset);
++ blk->mem_type = le16_to_cpu(raw->hdr.type);
+
+ switch (dsp->fw_ver) {
+ case 0:
+ case 1:
+- raw = (const struct wmfw_adsp_coeff_data *)*data;
+- *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size);
++ if (sizeof(*raw) > (data_len - pos))
++ return -EOVERFLOW;
+
+- blk->offset = le16_to_cpu(raw->hdr.offset);
+- blk->mem_type = le16_to_cpu(raw->hdr.type);
+ blk->name = raw->name;
+- blk->name_len = strlen(raw->name);
++ blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name));
+ blk->ctl_type = le16_to_cpu(raw->ctl_type);
+ blk->flags = le16_to_cpu(raw->flags);
+ blk->len = le32_to_cpu(raw->len);
+ break;
+ default:
+- tmp = *data;
+- blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp);
+- blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp);
+- length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp);
+- blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp,
++ pos += sizeof(raw->hdr);
++ tmp = &region->data[pos];
++ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos,
+ &blk->name);
+- cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL);
+- cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL);
++ if (!tmp)
++ return -EOVERFLOW;
++
++ pos = tmp - region->data;
++ cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, NULL);
++ if (!tmp)
++ return -EOVERFLOW;
++
++ pos = tmp - region->data;
++ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL);
++ if (!tmp)
++ return -EOVERFLOW;
++
++ pos = tmp - region->data;
++ if (sizeof(raw->ctl_type) + sizeof(raw->flags) + sizeof(raw->len) >
++ (data_len - pos))
++ return -EOVERFLOW;
++
+ blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp);
++ pos += sizeof(raw->ctl_type);
+ blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp);
++ pos += sizeof(raw->flags);
+ blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp);
+-
+- *data = *data + sizeof(raw->hdr) + length;
+ break;
+ }
+
+@@ -1170,6 +1249,8 @@ static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+ cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags);
+ cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type);
+ cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
++
++ return blk_end_pos;
+ }
+
+ static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp,
+@@ -1193,12 +1274,16 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
+ struct cs_dsp_alg_region alg_region = {};
+ struct cs_dsp_coeff_parsed_alg alg_blk;
+ struct cs_dsp_coeff_parsed_coeff coeff_blk;
+- const u8 *data = region->data;
+- int i, ret;
++ int i, pos, ret;
++
++ pos = cs_dsp_coeff_parse_alg(dsp, region, &alg_blk);
++ if (pos < 0)
++ return pos;
+
+- cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk);
+ for (i = 0; i < alg_blk.ncoeff; i++) {
+- cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk);
++ pos = cs_dsp_coeff_parse_coeff(dsp, region, pos, &coeff_blk);
++ if (pos < 0)
++ return pos;
+
+ switch (coeff_blk.ctl_type) {
+ case WMFW_CTL_TYPE_BYTES:
+@@ -1267,6 +1352,10 @@ static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp,
+ const struct wmfw_adsp1_sizes *adsp1_sizes;
+
+ adsp1_sizes = (void *)&firmware->data[pos];
++ if (sizeof(*adsp1_sizes) > firmware->size - pos) {
++ cs_dsp_err(dsp, "%s: file truncated\n", file);
++ return 0;
++ }
+
+ cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file,
+ le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm),
+@@ -1283,6 +1372,10 @@ static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp,
+ const struct wmfw_adsp2_sizes *adsp2_sizes;
+
+ adsp2_sizes = (void *)&firmware->data[pos];
++ if (sizeof(*adsp2_sizes) > firmware->size - pos) {
++ cs_dsp_err(dsp, "%s: file truncated\n", file);
++ return 0;
++ }
+
+ cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file,
+ le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym),
+@@ -1322,7 +1415,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ struct regmap *regmap = dsp->regmap;
+ unsigned int pos = 0;
+ const struct wmfw_header *header;
+- const struct wmfw_adsp1_sizes *adsp1_sizes;
+ const struct wmfw_footer *footer;
+ const struct wmfw_region *region;
+ const struct cs_dsp_region *mem;
+@@ -1338,10 +1430,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+
+ ret = -EINVAL;
+
+- pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
+- if (pos >= firmware->size) {
+- cs_dsp_err(dsp, "%s: file too short, %zu bytes\n",
+- file, firmware->size);
++ if (sizeof(*header) >= firmware->size) {
++ ret = -EOVERFLOW;
+ goto out_fw;
+ }
+
+@@ -1369,22 +1459,36 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+
+ pos = sizeof(*header);
+ pos = dsp->ops->parse_sizes(dsp, file, pos, firmware);
++ if ((pos == 0) || (sizeof(*footer) > firmware->size - pos)) {
++ ret = -EOVERFLOW;
++ goto out_fw;
++ }
+
+ footer = (void *)&firmware->data[pos];
+ pos += sizeof(*footer);
+
+ if (le32_to_cpu(header->len) != pos) {
+- cs_dsp_err(dsp, "%s: unexpected header length %d\n",
+- file, le32_to_cpu(header->len));
++ ret = -EOVERFLOW;
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file,
+ le64_to_cpu(footer->timestamp));
+
+- while (pos < firmware->size &&
+- sizeof(*region) < firmware->size - pos) {
++ while (pos < firmware->size) {
++ /* Is there enough data for a complete block header? */
++ if (sizeof(*region) > firmware->size - pos) {
++ ret = -EOVERFLOW;
++ goto out_fw;
++ }
++
+ region = (void *)&(firmware->data[pos]);
++
++ if (le32_to_cpu(region->len) > firmware->size - pos - sizeof(*region)) {
++ ret = -EOVERFLOW;
++ goto out_fw;
++ }
++
+ region_name = "Unknown";
+ reg = 0;
+ text = NULL;
+@@ -1441,16 +1545,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ regions, le32_to_cpu(region->len), offset,
+ region_name);
+
+- if (le32_to_cpu(region->len) >
+- firmware->size - pos - sizeof(*region)) {
+- cs_dsp_err(dsp,
+- "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+- file, regions, region_name,
+- le32_to_cpu(region->len), firmware->size);
+- ret = -EINVAL;
+- goto out_fw;
+- }
+-
+ if (text) {
+ memcpy(text, region->data, le32_to_cpu(region->len));
+ cs_dsp_info(dsp, "%s: %s\n", file, text);
+@@ -1501,6 +1595,9 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ cs_dsp_buf_free(&buf_list);
+ kfree(text);
+
++ if (ret == -EOVERFLOW)
++ cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
++
+ return ret;
+ }
+
+@@ -2068,10 +2165,20 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ pos = le32_to_cpu(hdr->len);
+
+ blocks = 0;
+- while (pos < firmware->size &&
+- sizeof(*blk) < firmware->size - pos) {
++ while (pos < firmware->size) {
++ /* Is there enough data for a complete block header? */
++ if (sizeof(*blk) > firmware->size - pos) {
++ ret = -EOVERFLOW;
++ goto out_fw;
++ }
++
+ blk = (void *)(&firmware->data[pos]);
+
++ if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) {
++ ret = -EOVERFLOW;
++ goto out_fw;
++ }
++
+ type = le16_to_cpu(blk->type);
+ offset = le16_to_cpu(blk->offset);
+ version = le32_to_cpu(blk->ver) >> 8;
+@@ -2168,17 +2275,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ }
+
+ if (reg) {
+- if (le32_to_cpu(blk->len) >
+- firmware->size - pos - sizeof(*blk)) {
+- cs_dsp_err(dsp,
+- "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+- file, blocks, region_name,
+- le32_to_cpu(blk->len),
+- firmware->size);
+- ret = -EINVAL;
+- goto out_fw;
+- }
+-
+ buf = cs_dsp_buf_alloc(blk->data,
+ le32_to_cpu(blk->len),
+ &buf_list);
+@@ -2218,6 +2314,10 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+ kfree(text);
++
++ if (ret == -EOVERFLOW)
++ cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
++
+ return ret;
+ }
+
+@@ -2246,6 +2346,11 @@ static int cs_dsp_common_init(struct cs_dsp *dsp)
+
+ mutex_init(&dsp->pwr_lock);
+
++#ifdef CONFIG_DEBUG_FS
++ /* Ensure this is invalid if client never provides a debugfs root */
++ dsp->debugfs_root = ERR_PTR(-ENODEV);
++#endif
++
+ return 0;
+ }
+
+diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
+index 5f3a3e913d28fb..d19c78a78ae3ac 100644
+--- a/drivers/firmware/dmi-id.c
++++ b/drivers/firmware/dmi-id.c
+@@ -169,9 +169,14 @@ static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
+ return 0;
+ }
+
++static void dmi_dev_release(struct device *dev)
++{
++ kfree(dev);
++}
++
+ static struct class dmi_class = {
+ .name = "dmi",
+- .dev_release = (void(*)(struct device *)) kfree,
++ .dev_release = dmi_dev_release,
+ .dev_uevent = dmi_dev_uevent,
+ };
+
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 015c95a825d315..ac2a5d2d47463f 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -101,6 +101,17 @@ static void dmi_decode_table(u8 *buf,
+ (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+ const struct dmi_header *dm = (const struct dmi_header *)data;
+
++ /*
++ * If a short entry is found (less than 4 bytes), not only it
++ * is invalid, but we cannot reliably locate the next entry.
++ */
++ if (dm->length < sizeof(struct dmi_header)) {
++ pr_warn(FW_BUG
++ "Corrupted DMI table, offset %zd (only %d entries processed)\n",
++ data - buf, i);
++ break;
++ }
++
+ /*
+ * We want to know the total length (formatted area and
+ * strings) before decoding to make sure we won't run off the
+diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
+index 83f5bb57fa4c46..83092d93f36a63 100644
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+- int md_size = md->num_pages << EFI_PAGE_SHIFT;
++ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
+index 3e8d4b51a8140c..97bafb5f703892 100644
+--- a/drivers/firmware/efi/capsule-loader.c
++++ b/drivers/firmware/efi/capsule-loader.c
+@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
+ return -ENOMEM;
+ }
+
+- cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
++ cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
+ if (!cap_info->phys) {
+ kfree(cap_info->pages);
+ kfree(cap_info);
+diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
+index ef0820f1a9246e..59b0d7197b6852 100644
+--- a/drivers/firmware/efi/efi-init.c
++++ b/drivers/firmware/efi/efi-init.c
+@@ -134,15 +134,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ case EFI_PERSISTENT_MEMORY:
+- /*
+- * Special purpose memory is 'soft reserved', which means it
+- * is set aside initially, but can be hotplugged back in or
+- * be assigned to the dax driver after boot.
+- */
+- if (efi_soft_reserve_enabled() &&
+- (md->attribute & EFI_MEMORY_SP))
+- return false;
+-
+ /*
+ * According to the spec, these regions are no longer reserved
+ * after calling ExitBootServices(). However, we can only use
+@@ -187,6 +178,16 @@ static __init void reserve_regions(void)
+ size = npages << PAGE_SHIFT;
+
+ if (is_memory(md)) {
++ /*
++ * Special purpose memory is 'soft reserved', which
++ * means it is set aside initially. Don't add a memblock
++ * for it now so that it can be hotplugged back in or
++ * be assigned to the dax driver after boot.
++ */
++ if (efi_soft_reserve_enabled() &&
++ (md->attribute & EFI_MEMORY_SP))
++ continue;
++
+ early_init_dt_add_memory_arch(paddr, size);
+
+ if (!is_usable_memory(md))
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 1974f0ad32badb..2c1095dcc2f2f8 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -199,6 +199,8 @@ static bool generic_ops_supported(void)
+
+ name_size = sizeof(name);
+
++ if (!efi.get_next_variable)
++ return false;
+ status = efi.get_next_variable(&name_size, &name, &guid);
+ if (status == EFI_UNSUPPORTED)
+ return false;
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index a1157c2a717040..a0f1569b790da5 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
+ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
+ -DEFI_HAVE_STRCMP -fno-builtin -fpic \
+ $(call cc-option,-mno-single-pic-base)
+-cflags-$(CONFIG_RISCV) += -fpic
++cflags-$(CONFIG_RISCV) += -fpic -mno-relax
+ cflags-$(CONFIG_LOONGARCH) += -fpie
+
+ cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
+@@ -108,13 +108,6 @@ lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
+ # https://bugs.llvm.org/show_bug.cgi?id=46480
+ STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property
+
+-#
+-# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
+-# .bss section, so the .bss section of the EFI stub needs to be included in the
+-# .data section of the compressed kernel to ensure initialization. Rename the
+-# .bss section here so it's easy to pick out in the linker script.
+-#
+-STUBCOPY_FLAGS-$(CONFIG_X86) += --rename-section .bss=.bss.efistub,load,alloc
+ STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32
+ STUBCOPY_RELOC-$(CONFIG_X86_64) := R_X86_64_64
+
+diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
+index bfa30625f5d031..3dc2f9aaf08db0 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -24,6 +24,8 @@ static bool efi_noinitrd;
+ static bool efi_nosoftreserve;
+ static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+
++int efi_mem_encrypt;
++
+ bool __pure __efi_soft_reserve_enabled(void)
+ {
+ return !efi_nosoftreserve;
+@@ -75,6 +77,12 @@ efi_status_t efi_parse_options(char const *cmdline)
+ efi_noinitrd = true;
+ } else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
+ efi_no5lvl = true;
++ } else if (IS_ENABLED(CONFIG_ARCH_HAS_MEM_ENCRYPT) &&
++ !strcmp(param, "mem_encrypt") && val) {
++ if (parse_option_str(val, "on"))
++ efi_mem_encrypt = 1;
++ else if (parse_option_str(val, "off"))
++ efi_mem_encrypt = -1;
+ } else if (!strcmp(param, "efi") && val) {
+ efi_nochunk = parse_option_str(val, "nochunk");
+ efi_novamap |= parse_option_str(val, "novamap");
+diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
+index 212687c30d79c4..fc18fd649ed771 100644
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -37,8 +37,8 @@ extern bool efi_no5lvl;
+ extern bool efi_nochunk;
+ extern bool efi_nokaslr;
+ extern int efi_loglevel;
++extern int efi_mem_encrypt;
+ extern bool efi_novamap;
+-
+ extern const efi_system_table_t *efi_system_table;
+
+ typedef union efi_dxe_services_table efi_dxe_services_table_t;
+@@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
+
+ efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed,
+- int memory_type, unsigned long alloc_limit);
++ int memory_type, unsigned long alloc_min,
++ unsigned long alloc_max);
+
+ efi_status_t efi_random_get_seed(void);
+
+diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
+index 70e9789ff9de0a..6a337f1f8787b3 100644
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -335,8 +335,8 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
+
+ fail:
+ efi_free(fdt_size, fdt_addr);
+-
+- efi_bs_call(free_pool, priv.runtime_map);
++ if (!efi_novamap)
++ efi_bs_call(free_pool, priv.runtime_map);
+
+ return EFI_LOAD_ERROR;
+ }
+diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
+index 62d63f7a2645bf..1a9808012abd36 100644
+--- a/drivers/firmware/efi/libstub/kaslr.c
++++ b/drivers/firmware/efi/libstub/kaslr.c
+@@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
+ */
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed,
+- EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
++ EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
+ } else {
+diff --git a/drivers/firmware/efi/libstub/loongarch-stub.c b/drivers/firmware/efi/libstub/loongarch-stub.c
+index 72c71ae201f0da..736b6aae323d35 100644
+--- a/drivers/firmware/efi/libstub/loongarch-stub.c
++++ b/drivers/firmware/efi/libstub/loongarch-stub.c
+@@ -8,10 +8,10 @@
+ #include <asm/efi.h>
+ #include <asm/addrspace.h>
+ #include "efistub.h"
++#include "loongarch-stub.h"
+
+ extern int kernel_asize;
+ extern int kernel_fsize;
+-extern int kernel_offset;
+ extern int kernel_entry;
+
+ efi_status_t handle_kernel_image(unsigned long *image_addr,
+@@ -24,7 +24,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
+ efi_status_t status;
+ unsigned long kernel_addr = 0;
+
+- kernel_addr = (unsigned long)&kernel_offset - kernel_offset;
++ kernel_addr = (unsigned long)image->image_base;
+
+ status = efi_relocate_kernel(&kernel_addr, kernel_fsize, kernel_asize,
+ EFI_KIMG_PREFERRED_ADDRESS, efi_get_kimg_min_align(), 0x0);
+@@ -35,9 +35,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
+ return status;
+ }
+
+-unsigned long kernel_entry_address(void)
++unsigned long kernel_entry_address(unsigned long kernel_addr,
++ efi_loaded_image_t *image)
+ {
+- unsigned long base = (unsigned long)&kernel_offset - kernel_offset;
++ unsigned long base = (unsigned long)image->image_base;
+
+- return (unsigned long)&kernel_entry - base + VMLINUX_LOAD_ADDRESS;
++ return (unsigned long)&kernel_entry - base + kernel_addr;
+ }
+diff --git a/drivers/firmware/efi/libstub/loongarch-stub.h b/drivers/firmware/efi/libstub/loongarch-stub.h
+new file mode 100644
+index 00000000000000..cd015955a0152b
+--- /dev/null
++++ b/drivers/firmware/efi/libstub/loongarch-stub.h
+@@ -0,0 +1,4 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++unsigned long kernel_entry_address(unsigned long kernel_addr,
++ efi_loaded_image_t *image);
+diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
+index 807cba2693fc17..d0ef93551c44f6 100644
+--- a/drivers/firmware/efi/libstub/loongarch.c
++++ b/drivers/firmware/efi/libstub/loongarch.c
+@@ -8,6 +8,7 @@
+ #include <asm/efi.h>
+ #include <asm/addrspace.h>
+ #include "efistub.h"
++#include "loongarch-stub.h"
+
+ typedef void __noreturn (*kernel_entry_t)(bool efi, unsigned long cmdline,
+ unsigned long systab);
+@@ -37,9 +38,10 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
+ return EFI_SUCCESS;
+ }
+
+-unsigned long __weak kernel_entry_address(void)
++unsigned long __weak kernel_entry_address(unsigned long kernel_addr,
++ efi_loaded_image_t *image)
+ {
+- return *(unsigned long *)(PHYSADDR(VMLINUX_LOAD_ADDRESS) + 8);
++ return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr;
+ }
+
+ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+@@ -73,7 +75,7 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
+ csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
+
+- real_kernel_entry = (void *)kernel_entry_address();
++ real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image);
+
+ real_kernel_entry(true, (unsigned long)cmdline_ptr,
+ (unsigned long)efi_system_table);
+diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
+index 674a064b8f7adc..c41e7b2091cdd1 100644
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -17,7 +17,7 @@
+ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+ unsigned long align_shift,
+- u64 alloc_limit)
++ u64 alloc_min, u64 alloc_max)
+ {
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
+@@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ return 0;
+
+ region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
+- alloc_limit);
++ alloc_max);
+ if (region_end < size)
+ return 0;
+
+- first_slot = round_up(md->phys_addr, align);
++ first_slot = round_up(max(md->phys_addr, alloc_min), align);
+ last_slot = round_down(region_end - size + 1, align);
+
+ if (first_slot > last_slot)
+@@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size,
+ unsigned long *addr,
+ unsigned long random_seed,
+ int memory_type,
+- unsigned long alloc_limit)
++ unsigned long alloc_min,
++ unsigned long alloc_max)
+ {
+ unsigned long total_slots = 0, target_slot;
+ unsigned long total_mirrored_slots = 0;
+@@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size,
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
+ unsigned long slots;
+
+- slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
++ slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
++ alloc_max);
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
+@@ -118,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
+ continue;
+ }
+
+- target = round_up(md->phys_addr, align) + target_slot * align;
++ target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align;
+ pages = size / EFI_PAGE_SIZE;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c
+index a51ec201ca3cbe..5d3a1e32d1776b 100644
+--- a/drivers/firmware/efi/libstub/screen_info.c
++++ b/drivers/firmware/efi/libstub/screen_info.c
+@@ -32,6 +32,8 @@ struct screen_info *__alloc_screen_info(void)
+ if (status != EFI_SUCCESS)
+ return NULL;
+
++ memset(si, 0, sizeof(*si));
++
+ status = efi_bs_call(install_configuration_table,
+ &screen_info_guid, si);
+ if (status == EFI_SUCCESS)
+diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
+index 7acbac16eae0b2..95da291c3083ef 100644
+--- a/drivers/firmware/efi/libstub/tpm.c
++++ b/drivers/firmware/efi/libstub/tpm.c
+@@ -115,7 +115,7 @@ void efi_retrieve_tpm2_eventlog(void)
+ }
+
+ /* Allocate space for the logs and copy them. */
+- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
++ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
+ sizeof(*log_tbl) + log_size, (void **)&log_tbl);
+
+ if (status != EFI_SUCCESS) {
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index 9d5df683f8821c..b2b06d18b7b4a7 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -21,6 +21,8 @@
+ #include "efistub.h"
+ #include "x86-stub.h"
+
++extern char _bss[], _ebss[];
++
+ const efi_system_table_t *efi_system_table;
+ const efi_dxe_services_table_t *efi_dxe_table;
+ static efi_loaded_image_t *image = NULL;
+@@ -223,8 +225,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
+ }
+ }
+
+-void efi_adjust_memory_range_protection(unsigned long start,
+- unsigned long size)
++efi_status_t efi_adjust_memory_range_protection(unsigned long start,
++ unsigned long size)
+ {
+ efi_status_t status;
+ efi_gcd_memory_space_desc_t desc;
+@@ -236,13 +238,26 @@ void efi_adjust_memory_range_protection(unsigned long start,
+ rounded_end = roundup(start + size, EFI_PAGE_SIZE);
+
+ if (memattr != NULL) {
+- efi_call_proto(memattr, clear_memory_attributes, rounded_start,
+- rounded_end - rounded_start, EFI_MEMORY_XP);
+- return;
++ status = efi_call_proto(memattr, set_memory_attributes,
++ rounded_start,
++ rounded_end - rounded_start,
++ EFI_MEMORY_RO);
++ if (status != EFI_SUCCESS) {
++ efi_warn("Failed to set EFI_MEMORY_RO attribute\n");
++ return status;
++ }
++
++ status = efi_call_proto(memattr, clear_memory_attributes,
++ rounded_start,
++ rounded_end - rounded_start,
++ EFI_MEMORY_XP);
++ if (status != EFI_SUCCESS)
++ efi_warn("Failed to clear EFI_MEMORY_XP attribute\n");
++ return status;
+ }
+
+ if (efi_dxe_table == NULL)
+- return;
++ return EFI_SUCCESS;
+
+ /*
+ * Don't modify memory region attributes, they are
+@@ -255,7 +270,7 @@ void efi_adjust_memory_range_protection(unsigned long start,
+ status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
+
+ if (status != EFI_SUCCESS)
+- return;
++ break;
+
+ next = desc.base_address + desc.length;
+
+@@ -280,8 +295,10 @@ void efi_adjust_memory_range_protection(unsigned long start,
+ unprotect_start,
+ unprotect_start + unprotect_size,
+ status);
++ break;
+ }
+ }
++ return EFI_SUCCESS;
+ }
+
+ static void setup_unaccepted_memory(void)
+@@ -307,17 +324,20 @@ static void setup_unaccepted_memory(void)
+ efi_err("Memory acceptance protocol failed\n");
+ }
+
++static efi_char16_t *efistub_fw_vendor(void)
++{
++ unsigned long vendor = efi_table_attr(efi_system_table, fw_vendor);
++
++ return (efi_char16_t *)vendor;
++}
++
+ static const efi_char16_t apple[] = L"Apple";
+
+ static void setup_quirks(struct boot_params *boot_params)
+ {
+- efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
+- efi_table_attr(efi_system_table, fw_vendor);
+-
+- if (!memcmp(fw_vendor, apple, sizeof(apple))) {
+- if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+- retrieve_apple_device_properties(boot_params);
+- }
++ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) &&
++ !memcmp(efistub_fw_vendor(), apple, sizeof(apple)))
++ retrieve_apple_device_properties(boot_params);
+ }
+
+ /*
+@@ -449,14 +469,17 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
+ {
++ efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
+ struct boot_params *boot_params;
+ struct setup_header *hdr;
+- void *image_base;
+- efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
+ int options_size = 0;
+ efi_status_t status;
++ unsigned long alloc;
+ char *cmdline_ptr;
+
++ if (efi_is_native())
++ memset(_bss, 0, _ebss - _bss);
++
+ efi_system_table = sys_table_arg;
+
+ /* Check if we were booted by the EFI firmware */
+@@ -469,58 +492,32 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_exit(handle, status);
+ }
+
+- image_base = efi_table_attr(image, image_base);
+-
+- status = efi_allocate_pages(sizeof(struct boot_params),
+- (unsigned long *)&boot_params, ULONG_MAX);
+- if (status != EFI_SUCCESS) {
+- efi_err("Failed to allocate lowmem for boot params\n");
++ status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX);
++ if (status != EFI_SUCCESS)
+ efi_exit(handle, status);
+- }
+-
+- memset(boot_params, 0x0, sizeof(struct boot_params));
+
+- hdr = &boot_params->hdr;
++ boot_params = memset((void *)alloc, 0x0, PARAM_SIZE);
++ hdr = &boot_params->hdr;
+
+- /* Copy the setup header from the second sector to boot_params */
+- memcpy(&hdr->jump, image_base + 512,
+- sizeof(struct setup_header) - offsetof(struct setup_header, jump));
+-
+- /*
+- * Fill out some of the header fields ourselves because the
+- * EFI firmware loader doesn't load the first sector.
+- */
++ /* Assign the setup_header fields that the kernel actually cares about */
+ hdr->root_flags = 1;
+ hdr->vid_mode = 0xffff;
+- hdr->boot_flag = 0xAA55;
+
+ hdr->type_of_loader = 0x21;
++ hdr->initrd_addr_max = INT_MAX;
+
+ /* Convert unicode cmdline to ascii */
+ cmdline_ptr = efi_convert_cmdline(image, &options_size);
+- if (!cmdline_ptr)
+- goto fail;
+-
+- efi_set_u64_split((unsigned long)cmdline_ptr,
+- &hdr->cmd_line_ptr, &boot_params->ext_cmd_line_ptr);
+-
+- hdr->ramdisk_image = 0;
+- hdr->ramdisk_size = 0;
++ if (!cmdline_ptr) {
++ efi_free(PARAM_SIZE, alloc);
++ efi_exit(handle, EFI_OUT_OF_RESOURCES);
++ }
+
+- /*
+- * Disregard any setup data that was provided by the bootloader:
+- * setup_data could be pointing anywhere, and we have no way of
+- * authenticating or validating the payload.
+- */
+- hdr->setup_data = 0;
++ efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
++ &boot_params->ext_cmd_line_ptr);
+
+ efi_stub_entry(handle, sys_table_arg, boot_params);
+ /* not reached */
+-
+-fail:
+- efi_free(sizeof(struct boot_params), (unsigned long)boot_params);
+-
+- efi_exit(handle, status);
+ }
+
+ static void add_e820ext(struct boot_params *params,
+@@ -786,6 +783,26 @@ static void error(char *str)
+ efi_warn("Decompression failed: %s\n", str);
+ }
+
++static const char *cmdline_memmap_override;
++
++static efi_status_t parse_options(const char *cmdline)
++{
++ static const char opts[][14] = {
++ "mem=", "memmap=", "efi_fake_mem=", "hugepages="
++ };
++
++ for (int i = 0; i < ARRAY_SIZE(opts); i++) {
++ const char *p = strstr(cmdline, opts[i]);
++
++ if (p == cmdline || (p > cmdline && isspace(p[-1]))) {
++ cmdline_memmap_override = opts[i];
++ break;
++ }
++ }
++
++ return efi_parse_options(cmdline);
++}
++
+ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+ {
+ unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+@@ -799,15 +816,34 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
+ u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
++ static const efi_char16_t ami[] = L"American Megatrends";
+
+ efi_get_seed(seed, sizeof(seed));
+
+ virt_addr += (range * seed[1]) >> 32;
+ virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
++
++ /*
++ * Older Dell systems with AMI UEFI firmware v2.0 may hang
++ * while decompressing the kernel if physical address
++ * randomization is enabled.
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=218173
++ */
++ if (efi_system_table->hdr.revision <= EFI_2_00_SYSTEM_TABLE_REVISION &&
++ !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
++ efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
++ seed[0] = 0;
++ } else if (cmdline_memmap_override) {
++ efi_info("%s detected on the kernel command line - disabling physical KASLR\n",
++ cmdline_memmap_override);
++ seed[0] = 0;
++ }
+ }
+
+ status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
+ seed[0], EFI_LOADER_CODE,
++ LOAD_PHYSICAL_ADDR,
+ EFI_X86_KERNEL_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ return status;
+@@ -820,9 +856,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+
+ *kernel_entry = addr + entry;
+
+- efi_adjust_memory_range_protection(addr, kernel_total_size);
+-
+- return EFI_SUCCESS;
++ return efi_adjust_memory_range_protection(addr, kernel_text_size);
+ }
+
+ static void __noreturn enter_kernel(unsigned long kernel_addr,
+@@ -878,7 +912,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ }
+
+ #ifdef CONFIG_CMDLINE_BOOL
+- status = efi_parse_options(CONFIG_CMDLINE);
++ status = parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+@@ -887,13 +921,16 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ ((u64)boot_params->ext_cmd_line_ptr << 32));
+- status = efi_parse_options((char *)cmdline_paddr);
++ status = parse_options((char *)cmdline_paddr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+ }
+
++ if (efi_mem_encrypt > 0)
++ hdr->xloadflags |= XLF_MEM_ENCRYPTION;
++
+ status = efi_decompress_kernel(&kernel_entry);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to decompress kernel\n");
+@@ -968,8 +1005,6 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params)
+ {
+- extern char _bss[], _ebss[];
+-
+ memset(_bss, 0, _ebss - _bss);
+ efi_stub_entry(handle, sys_table_arg, boot_params);
+ }
+diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
+index 2748bca192dfb2..4433d0f97441ca 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.h
++++ b/drivers/firmware/efi/libstub/x86-stub.h
+@@ -7,8 +7,8 @@ extern struct boot_params *boot_params_pointer asm("boot_params");
+ extern void trampoline_32bit_src(void *, bool);
+ extern const u16 trampoline_ljmp_imm_offset;
+
+-void efi_adjust_memory_range_protection(unsigned long start,
+- unsigned long size);
++efi_status_t efi_adjust_memory_range_protection(unsigned long start,
++ unsigned long size);
+
+ #ifdef CONFIG_X86_64
+ efi_status_t efi_setup_5level_paging(void);
+diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
+index bdb17eac0cb401..1ceace95675868 100644
+--- a/drivers/firmware/efi/libstub/zboot.c
++++ b/drivers/firmware/efi/libstub/zboot.c
+@@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
+ }
+
+ status = efi_random_alloc(alloc_size, min_kimg_align, &image_base,
+- seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
++ seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory\n");
+ goto free_cmdline;
+diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
+index ac8c0ef851581f..af2c82f7bd9024 100644
+--- a/drivers/firmware/efi/libstub/zboot.lds
++++ b/drivers/firmware/efi/libstub/zboot.lds
+@@ -41,6 +41,7 @@ SECTIONS
+ }
+
+ /DISCARD/ : {
++ *(.discard .discard.*)
+ *(.modinfo .init.modinfo)
+ }
+ }
+diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
+index a1180461a445cf..77dd20f9df312c 100644
+--- a/drivers/firmware/efi/memmap.c
++++ b/drivers/firmware/efi/memmap.c
+@@ -15,10 +15,6 @@
+ #include <asm/early_ioremap.h>
+ #include <asm/efi.h>
+
+-#ifndef __efi_memmap_free
+-#define __efi_memmap_free(phys, size, flags) do { } while (0)
+-#endif
+-
+ /**
+ * __efi_memmap_init - Common code for mapping the EFI memory map
+ * @data: EFI memory map data
+@@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
+ return -ENOMEM;
+ }
+
+- if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
+- __efi_memmap_free(efi.memmap.phys_map,
+- efi.memmap.desc_size * efi.memmap.nr_map,
+- efi.memmap.flags);
+-
+ map.phys_map = data->phys_map;
+ map.nr_map = data->size / data->desc_size;
+ map.map_end = map.map + data->size;
+diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
+index 09525fb5c240e6..01f0f90ea41831 100644
+--- a/drivers/firmware/efi/riscv-runtime.c
++++ b/drivers/firmware/efi/riscv-runtime.c
+@@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+- int md_size = md->num_pages << EFI_PAGE_SHIFT;
++ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
+index 135278ddaf627b..6c3d84d9bcc16f 100644
+--- a/drivers/firmware/efi/unaccepted_memory.c
++++ b/drivers/firmware/efi/unaccepted_memory.c
+@@ -3,6 +3,7 @@
+ #include <linux/efi.h>
+ #include <linux/memblock.h>
+ #include <linux/spinlock.h>
++#include <linux/nmi.h>
+ #include <asm/unaccepted_memory.h>
+
+ /* Protects unaccepted memory bitmap and accepting_list */
+@@ -100,7 +101,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
+ * overlap on physical address level.
+ */
+ list_for_each_entry(entry, &accepting_list, list) {
+- if (entry->end < range.start)
++ if (entry->end <= range.start)
+ continue;
+ if (entry->start >= range.end)
+ continue;
+@@ -148,6 +149,9 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
+ }
+
+ list_del(&range.list);
++
++ touch_softlockup_watchdog();
++
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+ }
+
+diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
+index d9629ff8786199..2328ca58bba61f 100644
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state)
+
+ static int psci_system_suspend(unsigned long unused)
+ {
++ int err;
+ phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+
+- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
++ err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ pa_cpu_resume, 0, 0);
++ return psci_to_linux_errno(err);
+ }
+
+ static int psci_system_suspend_enter(suspend_state_t state)
+diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
+index 16cf88acfa8ee0..0a2a2c794d0eda 100644
+--- a/drivers/firmware/qcom_scm-smc.c
++++ b/drivers/firmware/qcom_scm-smc.c
+@@ -71,7 +71,7 @@ int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
+ struct arm_smccc_res get_wq_res;
+ struct arm_smccc_args get_wq_ctx = {0};
+
+- get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
++ get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
+
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index 06fe8aca870d7b..7af59985f1c1f9 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -167,6 +167,12 @@ static enum qcom_scm_convention __get_convention(void)
+ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ return qcom_scm_convention;
+
++ /*
++ * Per the "SMC calling convention specification", the 64-bit calling
++ * convention can only be used when the client is 64-bit, otherwise
++ * system will encounter the undefined behaviour.
++ */
++#if IS_ENABLED(CONFIG_ARM64)
+ /*
+ * Device isn't required as there is only one argument - no device
+ * needed to dma_map_single to secure world
+@@ -187,6 +193,7 @@ static enum qcom_scm_convention __get_convention(void)
+ forced = true;
+ goto found;
+ }
++#endif
+
+ probed_convention = SMC_CONVENTION_ARM_32;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+@@ -491,13 +498,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+
+ ret = qcom_scm_bw_enable();
+ if (ret)
+- return ret;
++ goto disable_clk;
+
+ desc.args[1] = mdata_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+-
+ qcom_scm_bw_disable();
++
++disable_clk:
+ qcom_scm_clk_disable();
+
+ out:
+@@ -559,10 +567,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+
+ ret = qcom_scm_bw_enable();
+ if (ret)
+- return ret;
++ goto disable_clk;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
++
++disable_clk:
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+@@ -594,10 +604,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
+
+ ret = qcom_scm_bw_enable();
+ if (ret)
+- return ret;
++ goto disable_clk;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
++
++disable_clk:
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+@@ -628,11 +640,12 @@ int qcom_scm_pas_shutdown(u32 peripheral)
+
+ ret = qcom_scm_bw_enable();
+ if (ret)
+- return ret;
++ goto disable_clk;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+-
+ qcom_scm_bw_disable();
++
++disable_clk:
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+@@ -1326,7 +1339,7 @@ static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+ */
+ bool qcom_scm_is_available(void)
+ {
+- return !!__scm;
++ return !!READ_ONCE(__scm);
+ }
+ EXPORT_SYMBOL_GPL(qcom_scm_is_available);
+
+@@ -1407,10 +1420,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ if (!scm)
+ return -ENOMEM;
+
++ scm->dev = &pdev->dev;
+ ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
+ if (ret < 0)
+ return ret;
+
++ init_completion(&scm->waitq_comp);
+ mutex_init(&scm->scm_bw_lock);
+
+ scm->path = devm_of_icc_get(&pdev->dev, NULL);
+@@ -1442,10 +1457,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
+- __scm = scm;
+- __scm->dev = &pdev->dev;
+-
+- init_completion(&__scm->waitq_comp);
++ /* Let all above stores be available after this */
++ smp_store_release(&__scm, scm);
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0) {
+diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
+index f66efaa5196d9d..428ae54d3196c2 100644
+--- a/drivers/firmware/raspberrypi.c
++++ b/drivers/firmware/raspberrypi.c
+@@ -9,6 +9,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/kref.h>
+ #include <linux/mailbox_client.h>
++#include <linux/mailbox_controller.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
+@@ -97,8 +98,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
+ if (size & 3)
+ return -EINVAL;
+
+- buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
+- GFP_ATOMIC);
++ buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size),
++ &bus_addr, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+@@ -126,7 +127,7 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
+ ret = -EINVAL;
+ }
+
+- dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
++ dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr);
+
+ return ret;
+ }
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index 82fcfd29bc4d29..defd7a36cb08a4 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -77,6 +77,8 @@ static __init int sysfb_init(void)
+ bool compatible;
+ int ret = 0;
+
++ screen_info_apply_fixups();
++
+ mutex_lock(&disable_lock);
+ if (disabled)
+ goto unlock_mutex;
+@@ -128,4 +130,4 @@ static __init int sysfb_init(void)
+ }
+
+ /* must execute after PCI subsystem for EFI quirks */
+-subsys_initcall_sync(sysfb_init);
++device_initcall(sysfb_init);
+diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
+index 6dfe3d34109ee9..b20d04950d99b3 100644
+--- a/drivers/firmware/tegra/bpmp-debugfs.c
++++ b/drivers/firmware/tegra/bpmp-debugfs.c
+@@ -77,7 +77,7 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
+
+ root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
+ if (!root_path_buf)
+- goto out;
++ return NULL;
+
+ root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
+ root_path_buf_len);
+diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
+index 51d062e0c3f129..c3a1dc3449617f 100644
+--- a/drivers/firmware/tegra/bpmp.c
++++ b/drivers/firmware/tegra/bpmp.c
+@@ -24,12 +24,6 @@
+ #define MSG_RING BIT(1)
+ #define TAG_SZ 32
+
+-static inline struct tegra_bpmp *
+-mbox_client_to_bpmp(struct mbox_client *client)
+-{
+- return container_of(client, struct tegra_bpmp, mbox.client);
+-}
+-
+ static inline const struct tegra_bpmp_ops *
+ channel_to_ops(struct tegra_bpmp_channel *channel)
+ {
+@@ -313,6 +307,8 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+ }
+
++static int __maybe_unused tegra_bpmp_resume(struct device *dev);
++
+ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+ {
+@@ -325,6 +321,14 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
++ if (bpmp->suspended) {
++ /* Reset BPMP IPC channels during resume based on flags passed */
++ if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
++ tegra_bpmp_resume(bpmp->dev);
++ else
++ return -EAGAIN;
++ }
++
+ channel = bpmp->tx_channel;
+
+ spin_lock(&bpmp->atomic_tx_lock);
+@@ -364,6 +368,14 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
++ if (bpmp->suspended) {
++ /* Reset BPMP IPC channels during resume based on flags passed */
++ if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
++ tegra_bpmp_resume(bpmp->dev);
++ else
++ return -EAGAIN;
++ }
++
+ channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ msg->tx.size);
+ if (IS_ERR(channel))
+@@ -796,10 +808,21 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
+ return err;
+ }
+
++static int __maybe_unused tegra_bpmp_suspend(struct device *dev)
++{
++ struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
++
++ bpmp->suspended = true;
++
++ return 0;
++}
++
+ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+ {
+ struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
+
++ bpmp->suspended = false;
++
+ if (bpmp->soc->ops->resume)
+ return bpmp->soc->ops->resume(bpmp);
+ else
+@@ -807,6 +830,7 @@ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+ }
+
+ static const struct dev_pm_ops tegra_bpmp_pm_ops = {
++ .suspend_noirq = tegra_bpmp_suspend,
+ .resume_noirq = tegra_bpmp_resume,
+ };
+
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index 26a37f47f4ca54..3b4c9355cb60f6 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -161,7 +161,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ {
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+- char debug_name[50] = "ti_sci_debug@";
++ char debug_name[50];
+
+ /* Debug region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+@@ -178,10 +178,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ /* Setup NULL termination */
+ info->debug_buffer[info->debug_region_size] = 0;
+
+- info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
+- sizeof(debug_name) -
+- sizeof("ti_sci_debug@")),
+- 0444, NULL, info, &ti_sci_debug_fops);
++ snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
++ dev_name(dev));
++ info->d = debugfs_create_file(debug_name, 0444, NULL, info,
++ &ti_sci_debug_fops);
+ if (IS_ERR(info->d))
+ return PTR_ERR(info->d);
+
+@@ -190,19 +190,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
+ return 0;
+ }
+
+-/**
+- * ti_sci_debugfs_destroy() - clean up log debug file
+- * @pdev: platform device pointer
+- * @info: Pointer to SCI entity information
+- */
+-static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+- struct ti_sci_info *info)
+-{
+- if (IS_ERR(info->debug_region))
+- return;
+-
+- debugfs_remove(info->d);
+-}
+ #else /* CONFIG_DEBUG_FS */
+ static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ struct ti_sci_info *info)
+@@ -3449,43 +3436,12 @@ static int ti_sci_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int ti_sci_remove(struct platform_device *pdev)
+-{
+- struct ti_sci_info *info;
+- struct device *dev = &pdev->dev;
+- int ret = 0;
+-
+- of_platform_depopulate(dev);
+-
+- info = platform_get_drvdata(pdev);
+-
+- if (info->nb.notifier_call)
+- unregister_restart_handler(&info->nb);
+-
+- mutex_lock(&ti_sci_list_mutex);
+- if (info->users)
+- ret = -EBUSY;
+- else
+- list_del(&info->node);
+- mutex_unlock(&ti_sci_list_mutex);
+-
+- if (!ret) {
+- ti_sci_debugfs_destroy(pdev, info);
+-
+- /* Safe to free channels since no more users */
+- mbox_free_channel(info->chan_tx);
+- mbox_free_channel(info->chan_rx);
+- }
+-
+- return ret;
+-}
+-
+ static struct platform_driver ti_sci_driver = {
+ .probe = ti_sci_probe,
+- .remove = ti_sci_remove,
+ .driver = {
+ .name = "ti-sci",
+ .of_match_table = of_match_ptr(ti_sci_of_match),
++ .suppress_bind_attrs = true,
+ },
+ };
+ module_platform_driver(ti_sci_driver);
+diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
+index 2de0fb139ce176..3d354ebd38c285 100644
+--- a/drivers/firmware/turris-mox-rwtm.c
++++ b/drivers/firmware/turris-mox-rwtm.c
+@@ -2,7 +2,7 @@
+ /*
+ * Turris Mox rWTM firmware driver
+ *
+- * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
++ * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
+ */
+
+ #include <linux/armada-37xx-rwtm-mailbox.h>
+@@ -174,6 +174,9 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
+ struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
+ struct armada_37xx_rwtm_rx_msg *msg = data;
+
++ if (completion_done(&rwtm->cmd_done))
++ return;
++
+ rwtm->reply = *msg;
+ complete(&rwtm->cmd_done);
+ }
+@@ -199,9 +202,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
+ if (ret < 0)
+ return ret;
+
+- ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+- if (ret < 0)
+- return ret;
++ if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
++ return -ETIMEDOUT;
+
+ ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
+ if (ret == -ENODATA) {
+@@ -235,9 +237,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
+ if (ret < 0)
+ return ret;
+
+- ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+- if (ret < 0)
+- return ret;
++ if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
++ return -ETIMEDOUT;
+
+ ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
+ if (ret == -ENODATA) {
+@@ -274,9 +275,8 @@ static int check_get_random_support(struct mox_rwtm *rwtm)
+ if (ret < 0)
+ return ret;
+
+- ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+- if (ret < 0)
+- return ret;
++ if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2))
++ return -ETIMEDOUT;
+
+ return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+ }
+@@ -499,6 +499,7 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, rwtm);
+
+ mutex_init(&rwtm->busy);
++ init_completion(&rwtm->cmd_done);
+
+ rwtm->mbox_client.dev = dev;
+ rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
+@@ -512,8 +513,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
+ goto remove_files;
+ }
+
+- init_completion(&rwtm->cmd_done);
+-
+ ret = mox_get_board_info(rwtm);
+ if (ret < 0)
+ dev_warn(dev, "Cannot read board information: %i\n", ret);
+diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
+index 98b8fd16183e41..80cac3a5f97678 100644
+--- a/drivers/fpga/dfl-pci.c
++++ b/drivers/fpga/dfl-pci.c
+@@ -78,6 +78,7 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
+ #define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
+ #define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
+ /* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
++#define PCIE_SUBDEVICE_ID_INTEL_D5005 0x138d
+ #define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
+ #define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
+ #define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
+@@ -101,6 +102,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
+ {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
+ {PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
++ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
++ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_D5005),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
+index a024be2b84e291..83d35fbb824505 100644
+--- a/drivers/fpga/fpga-bridge.c
++++ b/drivers/fpga/fpga-bridge.c
+@@ -55,33 +55,26 @@ int fpga_bridge_disable(struct fpga_bridge *bridge)
+ }
+ EXPORT_SYMBOL_GPL(fpga_bridge_disable);
+
+-static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
++static struct fpga_bridge *__fpga_bridge_get(struct device *bridge_dev,
+ struct fpga_image_info *info)
+ {
+ struct fpga_bridge *bridge;
+- int ret = -ENODEV;
+
+- bridge = to_fpga_bridge(dev);
++ bridge = to_fpga_bridge(bridge_dev);
+
+ bridge->info = info;
+
+- if (!mutex_trylock(&bridge->mutex)) {
+- ret = -EBUSY;
+- goto err_dev;
+- }
++ if (!mutex_trylock(&bridge->mutex))
++ return ERR_PTR(-EBUSY);
+
+- if (!try_module_get(dev->parent->driver->owner))
+- goto err_ll_mod;
++ if (!try_module_get(bridge->br_ops_owner)) {
++ mutex_unlock(&bridge->mutex);
++ return ERR_PTR(-ENODEV);
++ }
+
+ dev_dbg(&bridge->dev, "get\n");
+
+ return bridge;
+-
+-err_ll_mod:
+- mutex_unlock(&bridge->mutex);
+-err_dev:
+- put_device(dev);
+- return ERR_PTR(ret);
+ }
+
+ /**
+@@ -98,13 +91,18 @@ static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
+ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+ struct fpga_image_info *info)
+ {
+- struct device *dev;
++ struct fpga_bridge *bridge;
++ struct device *bridge_dev;
+
+- dev = class_find_device_by_of_node(&fpga_bridge_class, np);
+- if (!dev)
++ bridge_dev = class_find_device_by_of_node(&fpga_bridge_class, np);
++ if (!bridge_dev)
+ return ERR_PTR(-ENODEV);
+
+- return __fpga_bridge_get(dev, info);
++ bridge = __fpga_bridge_get(bridge_dev, info);
++ if (IS_ERR(bridge))
++ put_device(bridge_dev);
++
++ return bridge;
+ }
+ EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
+
+@@ -125,6 +123,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
+ struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ struct fpga_image_info *info)
+ {
++ struct fpga_bridge *bridge;
+ struct device *bridge_dev;
+
+ bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev,
+@@ -132,7 +131,11 @@ struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ if (!bridge_dev)
+ return ERR_PTR(-ENODEV);
+
+- return __fpga_bridge_get(bridge_dev, info);
++ bridge = __fpga_bridge_get(bridge_dev, info);
++ if (IS_ERR(bridge))
++ put_device(bridge_dev);
++
++ return bridge;
+ }
+ EXPORT_SYMBOL_GPL(fpga_bridge_get);
+
+@@ -146,7 +149,7 @@ void fpga_bridge_put(struct fpga_bridge *bridge)
+ dev_dbg(&bridge->dev, "put\n");
+
+ bridge->info = NULL;
+- module_put(bridge->dev.parent->driver->owner);
++ module_put(bridge->br_ops_owner);
+ mutex_unlock(&bridge->mutex);
+ put_device(&bridge->dev);
+ }
+@@ -316,18 +319,19 @@ static struct attribute *fpga_bridge_attrs[] = {
+ ATTRIBUTE_GROUPS(fpga_bridge);
+
+ /**
+- * fpga_bridge_register - create and register an FPGA Bridge device
++ * __fpga_bridge_register - create and register an FPGA Bridge device
+ * @parent: FPGA bridge device from pdev
+ * @name: FPGA bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: FPGA bridge private data
++ * @owner: owner module containing the br_ops
+ *
+ * Return: struct fpga_bridge pointer or ERR_PTR()
+ */
+ struct fpga_bridge *
+-fpga_bridge_register(struct device *parent, const char *name,
+- const struct fpga_bridge_ops *br_ops,
+- void *priv)
++__fpga_bridge_register(struct device *parent, const char *name,
++ const struct fpga_bridge_ops *br_ops,
++ void *priv, struct module *owner)
+ {
+ struct fpga_bridge *bridge;
+ int id, ret;
+@@ -357,6 +361,7 @@ fpga_bridge_register(struct device *parent, const char *name,
+
+ bridge->name = name;
+ bridge->br_ops = br_ops;
++ bridge->br_ops_owner = owner;
+ bridge->priv = priv;
+
+ bridge->dev.groups = br_ops->groups;
+@@ -386,7 +391,7 @@ fpga_bridge_register(struct device *parent, const char *name,
+
+ return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL_GPL(fpga_bridge_register);
++EXPORT_SYMBOL_GPL(__fpga_bridge_register);
+
+ /**
+ * fpga_bridge_unregister - unregister an FPGA bridge
+diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
+index 06651389c59262..0f4035b089a2ea 100644
+--- a/drivers/fpga/fpga-mgr.c
++++ b/drivers/fpga/fpga-mgr.c
+@@ -664,20 +664,16 @@ static struct attribute *fpga_mgr_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(fpga_mgr);
+
+-static struct fpga_manager *__fpga_mgr_get(struct device *dev)
++static struct fpga_manager *__fpga_mgr_get(struct device *mgr_dev)
+ {
+ struct fpga_manager *mgr;
+
+- mgr = to_fpga_manager(dev);
++ mgr = to_fpga_manager(mgr_dev);
+
+- if (!try_module_get(dev->parent->driver->owner))
+- goto err_dev;
++ if (!try_module_get(mgr->mops_owner))
++ mgr = ERR_PTR(-ENODEV);
+
+ return mgr;
+-
+-err_dev:
+- put_device(dev);
+- return ERR_PTR(-ENODEV);
+ }
+
+ static int fpga_mgr_dev_match(struct device *dev, const void *data)
+@@ -693,12 +689,18 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data)
+ */
+ struct fpga_manager *fpga_mgr_get(struct device *dev)
+ {
+- struct device *mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev,
+- fpga_mgr_dev_match);
++ struct fpga_manager *mgr;
++ struct device *mgr_dev;
++
++ mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, fpga_mgr_dev_match);
+ if (!mgr_dev)
+ return ERR_PTR(-ENODEV);
+
+- return __fpga_mgr_get(mgr_dev);
++ mgr = __fpga_mgr_get(mgr_dev);
++ if (IS_ERR(mgr))
++ put_device(mgr_dev);
++
++ return mgr;
+ }
+ EXPORT_SYMBOL_GPL(fpga_mgr_get);
+
+@@ -711,13 +713,18 @@ EXPORT_SYMBOL_GPL(fpga_mgr_get);
+ */
+ struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+ {
+- struct device *dev;
++ struct fpga_manager *mgr;
++ struct device *mgr_dev;
+
+- dev = class_find_device_by_of_node(&fpga_mgr_class, node);
+- if (!dev)
++ mgr_dev = class_find_device_by_of_node(&fpga_mgr_class, node);
++ if (!mgr_dev)
+ return ERR_PTR(-ENODEV);
+
+- return __fpga_mgr_get(dev);
++ mgr = __fpga_mgr_get(mgr_dev);
++ if (IS_ERR(mgr))
++ put_device(mgr_dev);
++
++ return mgr;
+ }
+ EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
+
+@@ -727,7 +734,7 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
+ */
+ void fpga_mgr_put(struct fpga_manager *mgr)
+ {
+- module_put(mgr->dev.parent->driver->owner);
++ module_put(mgr->mops_owner);
+ put_device(&mgr->dev);
+ }
+ EXPORT_SYMBOL_GPL(fpga_mgr_put);
+@@ -766,9 +773,10 @@ void fpga_mgr_unlock(struct fpga_manager *mgr)
+ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
+
+ /**
+- * fpga_mgr_register_full - create and register an FPGA Manager device
++ * __fpga_mgr_register_full - create and register an FPGA Manager device
+ * @parent: fpga manager device from pdev
+ * @info: parameters for fpga manager
++ * @owner: owner module containing the ops
+ *
+ * The caller of this function is responsible for calling fpga_mgr_unregister().
+ * Using devm_fpga_mgr_register_full() instead is recommended.
+@@ -776,7 +784,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
+ * Return: pointer to struct fpga_manager pointer or ERR_PTR()
+ */
+ struct fpga_manager *
+-fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
++__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
++ struct module *owner)
+ {
+ const struct fpga_manager_ops *mops = info->mops;
+ struct fpga_manager *mgr;
+@@ -804,6 +813,8 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
+
+ mutex_init(&mgr->ref_mutex);
+
++ mgr->mops_owner = owner;
++
+ mgr->name = info->name;
+ mgr->mops = info->mops;
+ mgr->priv = info->priv;
+@@ -841,14 +852,15 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
+
+ return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
++EXPORT_SYMBOL_GPL(__fpga_mgr_register_full);
+
+ /**
+- * fpga_mgr_register - create and register an FPGA Manager device
++ * __fpga_mgr_register - create and register an FPGA Manager device
+ * @parent: fpga manager device from pdev
+ * @name: fpga manager name
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
++ * @owner: owner module containing the ops
+ *
+ * The caller of this function is responsible for calling fpga_mgr_unregister().
+ * Using devm_fpga_mgr_register() instead is recommended. This simple
+@@ -859,8 +871,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
+ * Return: pointer to struct fpga_manager pointer or ERR_PTR()
+ */
+ struct fpga_manager *
+-fpga_mgr_register(struct device *parent, const char *name,
+- const struct fpga_manager_ops *mops, void *priv)
++__fpga_mgr_register(struct device *parent, const char *name,
++ const struct fpga_manager_ops *mops, void *priv, struct module *owner)
+ {
+ struct fpga_manager_info info = { 0 };
+
+@@ -868,9 +880,9 @@ fpga_mgr_register(struct device *parent, const char *name,
+ info.mops = mops;
+ info.priv = priv;
+
+- return fpga_mgr_register_full(parent, &info);
++ return __fpga_mgr_register_full(parent, &info, owner);
+ }
+-EXPORT_SYMBOL_GPL(fpga_mgr_register);
++EXPORT_SYMBOL_GPL(__fpga_mgr_register);
+
+ /**
+ * fpga_mgr_unregister - unregister an FPGA manager
+@@ -900,9 +912,10 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
+ }
+
+ /**
+- * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
++ * __devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
+ * @parent: fpga manager device from pdev
+ * @info: parameters for fpga manager
++ * @owner: owner module containing the ops
+ *
+ * Return: fpga manager pointer on success, negative error code otherwise.
+ *
+@@ -910,7 +923,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
+ * function will be called automatically when the managing device is detached.
+ */
+ struct fpga_manager *
+-devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
++__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
++ struct module *owner)
+ {
+ struct fpga_mgr_devres *dr;
+ struct fpga_manager *mgr;
+@@ -919,7 +933,7 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+- mgr = fpga_mgr_register_full(parent, info);
++ mgr = __fpga_mgr_register_full(parent, info, owner);
+ if (IS_ERR(mgr)) {
+ devres_free(dr);
+ return mgr;
+@@ -930,14 +944,15 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf
+
+ return mgr;
+ }
+-EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
++EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register_full);
+
+ /**
+- * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
++ * __devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
+ * @parent: fpga manager device from pdev
+ * @name: fpga manager name
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
++ * @owner: owner module containing the ops
+ *
+ * Return: fpga manager pointer on success, negative error code otherwise.
+ *
+@@ -946,8 +961,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
+ * device is detached.
+ */
+ struct fpga_manager *
+-devm_fpga_mgr_register(struct device *parent, const char *name,
+- const struct fpga_manager_ops *mops, void *priv)
++__devm_fpga_mgr_register(struct device *parent, const char *name,
++ const struct fpga_manager_ops *mops, void *priv,
++ struct module *owner)
+ {
+ struct fpga_manager_info info = { 0 };
+
+@@ -955,9 +971,9 @@ devm_fpga_mgr_register(struct device *parent, const char *name,
+ info.mops = mops;
+ info.priv = priv;
+
+- return devm_fpga_mgr_register_full(parent, &info);
++ return __devm_fpga_mgr_register_full(parent, &info, owner);
+ }
+-EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
++EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register);
+
+ static void fpga_mgr_dev_release(struct device *dev)
+ {
+diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
+index b364a929425ce9..753cd142503e0e 100644
+--- a/drivers/fpga/fpga-region.c
++++ b/drivers/fpga/fpga-region.c
+@@ -53,7 +53,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region)
+ }
+
+ get_device(dev);
+- if (!try_module_get(dev->parent->driver->owner)) {
++ if (!try_module_get(region->ops_owner)) {
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+ return ERR_PTR(-ENODEV);
+@@ -75,7 +75,7 @@ static void fpga_region_put(struct fpga_region *region)
+
+ dev_dbg(dev, "put\n");
+
+- module_put(dev->parent->driver->owner);
++ module_put(region->ops_owner);
+ put_device(dev);
+ mutex_unlock(&region->mutex);
+ }
+@@ -181,14 +181,16 @@ static struct attribute *fpga_region_attrs[] = {
+ ATTRIBUTE_GROUPS(fpga_region);
+
+ /**
+- * fpga_region_register_full - create and register an FPGA Region device
++ * __fpga_region_register_full - create and register an FPGA Region device
+ * @parent: device parent
+ * @info: parameters for FPGA Region
++ * @owner: module containing the get_bridges function
+ *
+ * Return: struct fpga_region or ERR_PTR()
+ */
+ struct fpga_region *
+-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info)
++__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
++ struct module *owner)
+ {
+ struct fpga_region *region;
+ int id, ret = 0;
+@@ -213,6 +215,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
+ region->compat_id = info->compat_id;
+ region->priv = info->priv;
+ region->get_bridges = info->get_bridges;
++ region->ops_owner = owner;
+
+ mutex_init(&region->mutex);
+ INIT_LIST_HEAD(&region->bridge_list);
+@@ -241,13 +244,14 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
+
+ return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL_GPL(fpga_region_register_full);
++EXPORT_SYMBOL_GPL(__fpga_region_register_full);
+
+ /**
+- * fpga_region_register - create and register an FPGA Region device
++ * __fpga_region_register - create and register an FPGA Region device
+ * @parent: device parent
+ * @mgr: manager that programs this region
+ * @get_bridges: optional function to get bridges to a list
++ * @owner: module containing the get_bridges function
+ *
+ * This simple version of the register function should be sufficient for most users.
+ * The fpga_region_register_full() function is available for users that need to
+@@ -256,17 +260,17 @@ EXPORT_SYMBOL_GPL(fpga_region_register_full);
+ * Return: struct fpga_region or ERR_PTR()
+ */
+ struct fpga_region *
+-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+- int (*get_bridges)(struct fpga_region *))
++__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
++ int (*get_bridges)(struct fpga_region *), struct module *owner)
+ {
+ struct fpga_region_info info = { 0 };
+
+ info.mgr = mgr;
+ info.get_bridges = get_bridges;
+
+- return fpga_region_register_full(parent, &info);
++ return __fpga_region_register_full(parent, &info, owner);
+ }
+-EXPORT_SYMBOL_GPL(fpga_region_register);
++EXPORT_SYMBOL_GPL(__fpga_region_register);
+
+ /**
+ * fpga_region_unregister - unregister an FPGA region
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 673bafb8be5887..ebd4e113dc2654 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -691,7 +691,8 @@ config GPIO_UNIPHIER
+ Say yes here to support UniPhier GPIOs.
+
+ config GPIO_VF610
+- def_bool y
++ bool "VF610 GPIO support"
++ default y if SOC_VF610
+ depends on ARCH_MXC
+ select GPIOLIB_IRQCHIP
+ help
+@@ -1506,7 +1507,7 @@ config GPIO_TPS68470
+ are "output only" GPIOs.
+
+ config GPIO_TQMX86
+- tristate "TQ-Systems QTMX86 GPIO"
++ tristate "TQ-Systems TQMx86 GPIO"
+ depends on MFD_TQMX86 || COMPILE_TEST
+ depends on HAS_IOPORT_MAP
+ select GPIOLIB_IRQCHIP
+diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
+index e00c333105170f..753e7be039e4d9 100644
+--- a/drivers/gpio/gpio-74x164.c
++++ b/drivers/gpio/gpio-74x164.c
+@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
+ if (IS_ERR(chip->gpiod_oe))
+ return PTR_ERR(chip->gpiod_oe);
+
+- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+-
+ spi_set_drvdata(spi, chip);
+
+ chip->gpio_chip.label = spi->modalias;
+@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
+ goto exit_destroy;
+ }
+
++ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
++
+ ret = gpiochip_add_data(&chip->gpio_chip, chip);
+ if (!ret)
+ return 0;
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index 58f107194fdafd..76468e6a2899a8 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -406,6 +406,8 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ gpio->dcache[GPIO_BANK(offset)] = reg;
+
+ iowrite32(reg, addr);
++ /* Flush write */
++ ioread32(addr);
+ }
+
+ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
+@@ -1191,7 +1193,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
+ if (!gpio_id)
+ return -EINVAL;
+
+- gpio->clk = of_clk_get(pdev->dev.of_node, 0);
++ gpio->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(gpio->clk)) {
+ dev_warn(&pdev->dev,
+ "Failed to get clock from devicetree, debouncing disabled\n");
+diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
+index 1ee62cd58582b6..25db014494a4de 100644
+--- a/drivers/gpio/gpio-crystalcove.c
++++ b/drivers/gpio/gpio-crystalcove.c
+@@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
+ case 0x5e:
+ return GPIOPANELCTL;
+ default:
+- return -EOPNOTSUPP;
++ return -ENOTSUPP;
+ }
+ }
+
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index 8db5717bdabe56..75107ede3bf8f4 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -225,6 +225,11 @@ static int davinci_gpio_probe(struct platform_device *pdev)
+ else
+ nirq = DIV_ROUND_UP(ngpio, 16);
+
++ if (nirq > MAX_INT_PER_BANK) {
++ dev_err(dev, "Too many IRQs!\n");
++ return -EINVAL;
++ }
++
+ chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL);
+ if (!chips)
+ return -ENOMEM;
+@@ -284,7 +289,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
+ * serve as EDMA event triggers.
+ */
+
+-static void gpio_irq_disable(struct irq_data *d)
++static void gpio_irq_mask(struct irq_data *d)
+ {
+ struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
+@@ -293,7 +298,7 @@ static void gpio_irq_disable(struct irq_data *d)
+ writel_relaxed(mask, &g->clr_rising);
+ }
+
+-static void gpio_irq_enable(struct irq_data *d)
++static void gpio_irq_unmask(struct irq_data *d)
+ {
+ struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
+@@ -319,8 +324,8 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger)
+
+ static struct irq_chip gpio_irqchip = {
+ .name = "GPIO",
+- .irq_enable = gpio_irq_enable,
+- .irq_disable = gpio_irq_disable,
++ .irq_unmask = gpio_irq_unmask,
++ .irq_mask = gpio_irq_mask,
+ .irq_set_type = gpio_irq_type,
+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ };
+diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
+index c22fcaa44a614c..6b7d47a52b10a1 100644
+--- a/drivers/gpio/gpio-dwapb.c
++++ b/drivers/gpio/gpio-dwapb.c
+@@ -283,13 +283,15 @@ static void dwapb_irq_enable(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+- val = dwapb_read(gpio, GPIO_INTEN);
+- val |= BIT(irqd_to_hwirq(d));
++ val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTEN, val);
++ val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
++ dwapb_write(gpio, GPIO_INTMASK, val);
+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ }
+
+@@ -297,12 +299,14 @@ static void dwapb_irq_disable(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+- val = dwapb_read(gpio, GPIO_INTEN);
+- val &= ~BIT(irqd_to_hwirq(d));
++ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
++ dwapb_write(gpio, GPIO_INTMASK, val);
++ val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTEN, val);
+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ }
+diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
+index 5320cf1de89c46..b24e349deed5eb 100644
+--- a/drivers/gpio/gpio-eic-sprd.c
++++ b/drivers/gpio/gpio-eic-sprd.c
+@@ -321,20 +321,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ switch (flow_type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ state = sprd_eic_get(chip, offset);
+- if (state)
++ if (state) {
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IEV, 0);
+- else
++ sprd_eic_update(chip, offset,
++ SPRD_EIC_DBNC_IC, 1);
++ } else {
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IEV, 1);
++ sprd_eic_update(chip, offset,
++ SPRD_EIC_DBNC_IC, 1);
++ }
+ break;
+ default:
+ return -ENOTSUPP;
+@@ -346,20 +353,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ switch (flow_type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ state = sprd_eic_get(chip, offset);
+- if (state)
++ if (state) {
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTPOL, 0);
+- else
++ sprd_eic_update(chip, offset,
++ SPRD_EIC_LATCH_INTCLR, 1);
++ } else {
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTPOL, 1);
++ sprd_eic_update(chip, offset,
++ SPRD_EIC_LATCH_INTCLR, 1);
++ }
+ break;
+ default:
+ return -ENOTSUPP;
+@@ -373,29 +387,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+ default:
+@@ -408,29 +427,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
++ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+ default:
+diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
+index 5ef8af8249806a..c097e310c9e841 100644
+--- a/drivers/gpio/gpio-lpc32xx.c
++++ b/drivers/gpio/gpio-lpc32xx.c
+@@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
+ { .compatible = "nxp,lpc3220-gpio", },
+ { },
+ };
++MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match);
+
+ static struct platform_driver lpc32xx_gpio_driver = {
+ .driver = {
+diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
+index 7a3e1760fc5b7d..10ea71273c8915 100644
+--- a/drivers/gpio/gpio-mlxbf3.c
++++ b/drivers/gpio/gpio-mlxbf3.c
+@@ -39,6 +39,8 @@
+ #define MLXBF_GPIO_CAUSE_OR_EVTEN0 0x14
+ #define MLXBF_GPIO_CAUSE_OR_CLRCAUSE 0x18
+
++#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
++
+ struct mlxbf3_gpio_context {
+ struct gpio_chip gc;
+
+@@ -82,6 +84,8 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
+ val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
++
++ writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
+ raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+
+ gpiochip_disable_irq(gc, offset);
+@@ -215,6 +219,8 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
+ gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
+ gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
+ gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR, 0);
++ if (ret)
++ return dev_err_probe(dev, ret, "%s: bgpio_init() failed", __func__);
+
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+@@ -251,6 +257,15 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
+ return 0;
+ }
+
++static void mlxbf3_gpio_shutdown(struct platform_device *pdev)
++{
++ struct mlxbf3_gpio_context *gs = platform_get_drvdata(pdev);
++
++ /* Disable and clear all interrupts */
++ writel(0, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
++ writel(MLXBF_GPIO_CLR_ALL_INTS, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
++}
++
+ static const struct acpi_device_id mlxbf3_gpio_acpi_match[] = {
+ { "MLNXBF33", 0 },
+ {}
+@@ -263,6 +278,7 @@ static struct platform_driver mlxbf3_gpio_driver = {
+ .acpi_match_table = mlxbf3_gpio_acpi_match,
+ },
+ .probe = mlxbf3_gpio_probe,
++ .shutdown = mlxbf3_gpio_shutdown,
+ };
+ module_platform_driver(mlxbf3_gpio_driver);
+
+diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
+index 74fdf0d87b2c8e..c9f9f4e36c89b5 100644
+--- a/drivers/gpio/gpio-mmio.c
++++ b/drivers/gpio/gpio-mmio.c
+@@ -622,8 +622,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
+ ret = gpiochip_get_ngpios(gc, dev);
+ if (ret)
+ gc->ngpio = gc->bgpio_bits;
+- else
+- gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8));
+
+ ret = bgpio_setup_io(gc, dat, set, clr, flags);
+ if (ret)
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index bdd50a78e4142d..ce9a94e332801f 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -766,6 +766,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
+ int level;
+
+ if (chip->driver_data & PCA_PCAL) {
++ guard(mutex)(&chip->i2c_lock);
++
+ /* Enable latch on interrupt-enabled inputs */
+ pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
+
+diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
+index b35b9604413f08..caeb3bdc78f8db 100644
+--- a/drivers/gpio/gpio-rockchip.c
++++ b/drivers/gpio/gpio-rockchip.c
+@@ -713,6 +713,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
+ return -ENODEV;
+
+ pctldev = of_pinctrl_get(pctlnp);
++ of_node_put(pctlnp);
+ if (!pctldev)
+ return -EPROBE_DEFER;
+
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index 44bf1709a6488c..a8e5ac95cf1702 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -1438,10 +1438,10 @@ static const struct config_item_type gpio_sim_device_config_group_type = {
+ static struct config_group *
+ gpio_sim_config_make_device_group(struct config_group *group, const char *name)
+ {
+- struct gpio_sim_device *dev __free(kfree) = NULL;
+ int id;
+
+- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ struct gpio_sim_device *dev __free(kfree) = kzalloc(sizeof(*dev),
++ GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
+index 7ce3eddaed2572..1ce40b7673b114 100644
+--- a/drivers/gpio/gpio-tangier.c
++++ b/drivers/gpio/gpio-tangier.c
+@@ -205,7 +205,8 @@ static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+
+ static void tng_irq_ack(struct irq_data *d)
+ {
+- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct tng_gpio *priv = gpiochip_get_data(gc);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ unsigned long flags;
+ void __iomem *gisr;
+@@ -241,7 +242,8 @@ static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
+
+ static void tng_irq_mask(struct irq_data *d)
+ {
+- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct tng_gpio *priv = gpiochip_get_data(gc);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+
+ tng_irq_unmask_mask(priv, gpio, false);
+@@ -250,7 +252,8 @@ static void tng_irq_mask(struct irq_data *d)
+
+ static void tng_irq_unmask(struct irq_data *d)
+ {
+- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct tng_gpio *priv = gpiochip_get_data(gc);
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(&priv->chip, gpio);
+diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
+index d87dd06db40d07..9130c691a2dd32 100644
+--- a/drivers/gpio/gpio-tegra186.c
++++ b/drivers/gpio/gpio-tegra186.c
+@@ -36,12 +36,6 @@
+ #define TEGRA186_GPIO_SCR_SEC_REN BIT(27)
+ #define TEGRA186_GPIO_SCR_SEC_G1W BIT(9)
+ #define TEGRA186_GPIO_SCR_SEC_G1R BIT(1)
+-#define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \
+- TEGRA186_GPIO_SCR_SEC_REN | \
+- TEGRA186_GPIO_SCR_SEC_G1R | \
+- TEGRA186_GPIO_SCR_SEC_G1W)
+-#define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \
+- TEGRA186_GPIO_SCR_SEC_REN)
+
+ /* control registers */
+ #define TEGRA186_GPIO_ENABLE_CONFIG 0x00
+@@ -177,10 +171,18 @@ static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned
+
+ value = __raw_readl(secure + TEGRA186_GPIO_SCR);
+
+- if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0)
+- return true;
++ /*
++ * When SCR_SEC_[R|W]EN is unset, then we have full read/write access to all the
++ * registers for given GPIO pin.
++ * When SCR_SEC[R|W]EN is set, then there is need to further check the accompanying
++ * SCR_SEC_G1[R|W] bit to determine read/write access to all the registers for given
++ * GPIO pin.
++ */
+
+- if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS)
++ if (((value & TEGRA186_GPIO_SCR_SEC_REN) == 0 ||
++ ((value & TEGRA186_GPIO_SCR_SEC_REN) && (value & TEGRA186_GPIO_SCR_SEC_G1R))) &&
++ ((value & TEGRA186_GPIO_SCR_SEC_WEN) == 0 ||
++ ((value & TEGRA186_GPIO_SCR_SEC_WEN) && (value & TEGRA186_GPIO_SCR_SEC_G1W))))
+ return true;
+
+ return false;
+diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
+index 3a28c1f273c396..f2e7e8754d95d6 100644
+--- a/drivers/gpio/gpio-tqmx86.c
++++ b/drivers/gpio/gpio-tqmx86.c
+@@ -6,6 +6,7 @@
+ * Vadim V.Vlasov <vvlasov@dev.rtsoft.ru>
+ */
+
++#include <linux/bitmap.h>
+ #include <linux/bitops.h>
+ #include <linux/errno.h>
+ #include <linux/gpio/driver.h>
+@@ -28,16 +29,25 @@
+ #define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
+ #define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
+
++#define TQMX86_GPII_NONE 0
+ #define TQMX86_GPII_FALLING BIT(0)
+ #define TQMX86_GPII_RISING BIT(1)
++/* Stored in irq_type as a trigger type, but not actually valid as a register
++ * value, so the name doesn't use "GPII"
++ */
++#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
+ #define TQMX86_GPII_MASK (BIT(0) | BIT(1))
+ #define TQMX86_GPII_BITS 2
++/* Stored in irq_type with GPII bits */
++#define TQMX86_INT_UNMASKED BIT(2)
+
+ struct tqmx86_gpio_data {
+ struct gpio_chip chip;
+ void __iomem *io_base;
+ int irq;
++ /* Lock must be held for accessing output and irq_type fields */
+ raw_spinlock_t spinlock;
++ DECLARE_BITMAP(output, TQMX86_NGPIO);
+ u8 irq_type[TQMX86_NGPI];
+ };
+
+@@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ {
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+- u8 val;
+
+ raw_spin_lock_irqsave(&gpio->spinlock, flags);
+- val = tqmx86_gpio_read(gpio, TQMX86_GPIOD);
+- if (value)
+- val |= BIT(offset);
+- else
+- val &= ~BIT(offset);
+- tqmx86_gpio_write(gpio, val, TQMX86_GPIOD);
++ __assign_bit(offset, gpio->output, value);
++ tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
+ raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ }
+
+@@ -107,21 +112,38 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
+ return GPIO_LINE_DIRECTION_OUT;
+ }
+
++static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
++ __must_hold(&gpio->spinlock)
++{
++ u8 type = TQMX86_GPII_NONE, gpiic;
++
++ if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
++ type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
++
++ if (type == TQMX86_INT_BOTH)
++ type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
++ ? TQMX86_GPII_FALLING
++ : TQMX86_GPII_RISING;
++ }
++
++ gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
++ gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
++ gpiic |= type << (offset * TQMX86_GPII_BITS);
++ tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++}
++
+ static void tqmx86_gpio_irq_mask(struct irq_data *data)
+ {
+ unsigned int offset = (data->hwirq - TQMX86_NGPO);
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(
+ irq_data_get_irq_chip_data(data));
+ unsigned long flags;
+- u8 gpiic, mask;
+-
+- mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
+
+ raw_spin_lock_irqsave(&gpio->spinlock, flags);
+- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+- gpiic &= ~mask;
+- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++ gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
++ tqmx86_gpio_irq_config(gpio, offset);
+ raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
++
+ gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
+ }
+
+@@ -131,16 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(
+ irq_data_get_irq_chip_data(data));
+ unsigned long flags;
+- u8 gpiic, mask;
+-
+- mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
+
+ gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
++
+ raw_spin_lock_irqsave(&gpio->spinlock, flags);
+- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+- gpiic &= ~mask;
+- gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS);
+- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++ gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
++ tqmx86_gpio_irq_config(gpio, offset);
+ raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ }
+
+@@ -151,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+ unsigned int offset = (data->hwirq - TQMX86_NGPO);
+ unsigned int edge_type = type & IRQF_TRIGGER_MASK;
+ unsigned long flags;
+- u8 new_type, gpiic;
++ u8 new_type;
+
+ switch (edge_type) {
+ case IRQ_TYPE_EDGE_RISING:
+@@ -161,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+ new_type = TQMX86_GPII_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+- new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING;
++ new_type = TQMX86_INT_BOTH;
+ break;
+ default:
+ return -EINVAL; /* not supported */
+ }
+
+- gpio->irq_type[offset] = new_type;
+-
+ raw_spin_lock_irqsave(&gpio->spinlock, flags);
+- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+- gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS));
+- gpiic |= new_type << (offset * TQMX86_GPII_BITS);
+- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++ gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
++ gpio->irq_type[offset] |= new_type;
++ tqmx86_gpio_irq_config(gpio, offset);
+ raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+
+ return 0;
+@@ -184,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+- unsigned long irq_bits;
+- int i = 0;
++ unsigned long irq_bits, flags;
++ int i;
+ u8 irq_status;
+
+ chained_irq_enter(irq_chip, desc);
+@@ -194,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
+ tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
+
+ irq_bits = irq_status;
++
++ raw_spin_lock_irqsave(&gpio->spinlock, flags);
++ for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
++ /*
++ * Edge-both triggers are implemented by flipping the edge
++ * trigger after each interrupt, as the controller only supports
++ * either rising or falling edge triggers, but not both.
++ *
++ * Internally, the TQMx86 GPIO controller has separate status
++ * registers for rising and falling edge interrupts. GPIIC
++ * configures which bits from which register are visible in the
++ * interrupt status register GPIIS and defines what triggers the
++ * parent IRQ line. Writing to GPIIS always clears both rising
++ * and falling interrupt flags internally, regardless of the
++ * currently configured trigger.
++ *
++ * In consequence, we can cleanly implement the edge-both
++ * trigger in software by first clearing the interrupt and then
++ * setting the new trigger based on the current GPIO input in
++ * tqmx86_gpio_irq_config() - even if an edge arrives between
++ * reading the input and setting the trigger, we will have a new
++ * interrupt pending.
++ */
++ if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
++ tqmx86_gpio_irq_config(gpio, i);
++ }
++ raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
++
+ for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
+ generic_handle_domain_irq(gpio->chip.irq.domain,
+ i + TQMX86_NGPO);
+@@ -277,6 +320,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
+
+ tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
+
++ /*
++ * Reading the previous output state is not possible with TQMx86 hardware.
++ * Initialize all outputs to 0 to have a defined state that matches the
++ * shadow register.
++ */
++ tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD);
++
+ chip = &gpio->chip;
+ chip->label = "gpio-tqmx86";
+ chip->owner = THIS_MODULE;
+diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
+index c18b6b47384f1b..94ca9d03c09494 100644
+--- a/drivers/gpio/gpio-wcove.c
++++ b/drivers/gpio/gpio-wcove.c
+@@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
+ unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
+
+ if (gpio >= WCOVE_GPIO_NUM)
+- return -EOPNOTSUPP;
++ return -ENOTSUPP;
+
+ return reg + gpio;
+ }
+diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
+index a0d69387c1532d..2f3c9ebfa78d1d 100644
+--- a/drivers/gpio/gpio-zynqmp-modepin.c
++++ b/drivers/gpio/gpio-zynqmp-modepin.c
+@@ -146,6 +146,7 @@ static const struct of_device_id modepin_platform_id[] = {
+ { .compatible = "xlnx,zynqmp-gpio-modepin", },
+ { }
+ };
++MODULE_DEVICE_TABLE(of, modepin_platform_id);
+
+ static struct platform_driver modepin_platform_driver = {
+ .driver = {
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 51e41676de0b8d..b366b4ca4c40e9 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -128,7 +128,24 @@ static bool acpi_gpio_deferred_req_irqs_done;
+
+ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
+ {
+- return device_match_acpi_handle(&gc->gpiodev->dev, data);
++ /* First check the actual GPIO device */
++ if (device_match_acpi_handle(&gc->gpiodev->dev, data))
++ return true;
++
++ /*
++ * When the ACPI device is artificially split to the banks of GPIOs,
++ * where each of them is represented by a separate GPIO device,
++ * the firmware node of the physical device may not be shared among
++ * the banks as they may require different values for the same property,
++ * e.g., number of GPIOs in a certain bank. In such case the ACPI handle
++ * of a GPIO device is NULL and can not be used. Hence we have to check
++ * the parent device to be sure that there is no match before bailing
++ * out.
++ */
++ if (gc->parent)
++ return device_match_acpi_handle(gc->parent, data);
++
++ return false;
+ }
+
+ /**
+@@ -1655,6 +1672,40 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
++ {
++ /*
++ * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
++ * a "dolby" button. At the ACPI level an _AEI event-handler
++ * is connected which sets an ACPI variable to 1 on both
++ * edges. This variable can be polled + cleared to 0 using
++ * WMI. But since the variable is set on both edges the WMI
++ * interface is pretty useless even when polling.
++ * So instead the x86-android-tablets code instantiates
++ * a gpio-keys platform device for it.
++ * Ignore the _AEI handler for the pin, so that it is not busy.
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
++ },
++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++ .ignore_interrupt = "INT33FC:00@3",
++ },
++ },
++ {
++ /*
++ * Spurious wakeups from TP_ATTN# pin
++ * Found in BIOS 0.35
++ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
++ },
++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++ .ignore_wake = "PNP0C50:00@8",
++ },
++ },
+ {} /* Terminating entry */
+ };
+
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index e39d344feb2899..545998e9f6ad21 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -5,6 +5,7 @@
+ #include <linux/bitmap.h>
+ #include <linux/build_bug.h>
+ #include <linux/cdev.h>
++#include <linux/cleanup.h>
+ #include <linux/compat.h>
+ #include <linux/compiler.h>
+ #include <linux/device.h>
+@@ -21,6 +22,7 @@
+ #include <linux/mutex.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/poll.h>
++#include <linux/rbtree.h>
+ #include <linux/seq_file.h>
+ #include <linux/spinlock.h>
+ #include <linux/timekeeping.h>
+@@ -130,6 +132,10 @@ struct linehandle_state {
+ GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ GPIOHANDLE_REQUEST_OPEN_SOURCE)
+
++#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
++ (GPIOHANDLE_REQUEST_INPUT | \
++ GPIOHANDLE_REQUEST_OUTPUT)
++
+ static int linehandle_validate_flags(u32 flags)
+ {
+ /* Return an error if an unknown flag is set */
+@@ -210,21 +216,21 @@ static long linehandle_set_config(struct linehandle_state *lh,
+ if (ret)
+ return ret;
+
++ /* Lines must be reconfigured explicitly as input or output. */
++ if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
++ return -EINVAL;
++
+ for (i = 0; i < lh->num_descs; i++) {
+ desc = lh->descs[i];
+- linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
++ linehandle_flags_to_desc_flags(lflags, &desc->flags);
+
+- /*
+- * Lines have to be requested explicitly for input
+- * or output, else the line will be treated "as is".
+- */
+ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ int val = !!gcnf.default_values[i];
+
+ ret = gpiod_direction_output(desc, val);
+ if (ret)
+ return ret;
+- } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
++ } else {
+ ret = gpiod_direction_input(desc);
+ if (ret)
+ return ret;
+@@ -461,6 +467,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+
+ /**
+ * struct line - contains the state of a requested line
++ * @node: to store the object in supinfo_tree if supplemental
+ * @desc: the GPIO descriptor for this line.
+ * @req: the corresponding line request
+ * @irq: the interrupt triggered in response to events on this GPIO
+@@ -473,6 +480,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ * @line_seqno: the seqno for the current edge event in the sequence of
+ * events for this line.
+ * @work: the worker that implements software debouncing
++ * @debounce_period_us: the debounce period in microseconds
+ * @sw_debounced: flag indicating if the software debouncer is active
+ * @level: the current debounced physical level of the line
+ * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
+@@ -481,6 +489,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ * @last_seqno: the last sequence number before debounce period expires
+ */
+ struct line {
++ struct rb_node node;
+ struct gpio_desc *desc;
+ /*
+ * -- edge detector specific fields --
+@@ -514,6 +523,15 @@ struct line {
+ * -- debouncer specific fields --
+ */
+ struct delayed_work work;
++ /*
++ * debounce_period_us is accessed by debounce_irq_handler() and
++ * process_hw_ts() which are disabled when modified by
++ * debounce_setup(), edge_detector_setup() or edge_detector_stop()
++ * or can live with a stale version when updated by
++ * edge_detector_update().
++ * The modifying functions are themselves mutually exclusive.
++ */
++ unsigned int debounce_period_us;
+ /*
+ * sw_debounce is accessed by linereq_set_config(), which is the
+ * only setter, and linereq_get_values(), which can live with a
+@@ -546,6 +564,17 @@ struct line {
+ #endif /* CONFIG_HTE */
+ };
+
++/*
++ * a rbtree of the struct lines containing supplemental info.
++ * Used to populate gpio_v2_line_info with cdev specific fields not contained
++ * in the struct gpio_desc.
++ * A line is determined to contain supplemental information by
++ * line_has_supinfo().
++ */
++static struct rb_root supinfo_tree = RB_ROOT;
++/* covers supinfo_tree */
++static DEFINE_SPINLOCK(supinfo_lock);
++
+ /**
+ * struct linereq - contains the state of a userspace line request
+ * @gdev: the GPIO device the line request pertains to
+@@ -559,7 +588,8 @@ struct line {
+ * this line request. Note that this is not used when @num_lines is 1, as
+ * the line_seqno is then the same and is cheaper to calculate.
+ * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
+- * of configuration, particularly multi-step accesses to desc flags.
++ * of configuration, particularly multi-step accesses to desc flags and
++ * changes to supinfo status.
+ * @lines: the lines held by this line request, with @num_lines elements.
+ */
+ struct linereq {
+@@ -575,6 +605,103 @@ struct linereq {
+ struct line lines[];
+ };
+
++static void supinfo_insert(struct line *line)
++{
++ struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL;
++ struct line *entry;
++
++ guard(spinlock)(&supinfo_lock);
++
++ while (*new) {
++ entry = container_of(*new, struct line, node);
++
++ parent = *new;
++ if (line->desc < entry->desc) {
++ new = &((*new)->rb_left);
++ } else if (line->desc > entry->desc) {
++ new = &((*new)->rb_right);
++ } else {
++ /* this should never happen */
++ WARN(1, "duplicate line inserted");
++ return;
++ }
++ }
++
++ rb_link_node(&line->node, parent, new);
++ rb_insert_color(&line->node, &supinfo_tree);
++}
++
++static void supinfo_erase(struct line *line)
++{
++ guard(spinlock)(&supinfo_lock);
++
++ rb_erase(&line->node, &supinfo_tree);
++}
++
++static struct line *supinfo_find(struct gpio_desc *desc)
++{
++ struct rb_node *node = supinfo_tree.rb_node;
++ struct line *line;
++
++ while (node) {
++ line = container_of(node, struct line, node);
++ if (desc < line->desc)
++ node = node->rb_left;
++ else if (desc > line->desc)
++ node = node->rb_right;
++ else
++ return line;
++ }
++ return NULL;
++}
++
++static void supinfo_to_lineinfo(struct gpio_desc *desc,
++ struct gpio_v2_line_info *info)
++{
++ struct gpio_v2_line_attribute *attr;
++ struct line *line;
++
++ guard(spinlock)(&supinfo_lock);
++
++ line = supinfo_find(desc);
++ if (!line)
++ return;
++
++ attr = &info->attrs[info->num_attrs];
++ attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
++ attr->debounce_period_us = READ_ONCE(line->debounce_period_us);
++ info->num_attrs++;
++}
++
++static inline bool line_has_supinfo(struct line *line)
++{
++ return READ_ONCE(line->debounce_period_us);
++}
++
++/*
++ * Checks line_has_supinfo() before and after the change to avoid unnecessary
++ * supinfo_tree access.
++ * Called indirectly by linereq_create() or linereq_set_config() so line
++ * is already protected from concurrent changes.
++ */
++static void line_set_debounce_period(struct line *line,
++ unsigned int debounce_period_us)
++{
++ bool was_suppl = line_has_supinfo(line);
++
++ WRITE_ONCE(line->debounce_period_us, debounce_period_us);
++
++ /* if supinfo status is unchanged then we're done */
++ if (line_has_supinfo(line) == was_suppl)
++ return;
++
++ /* supinfo status has changed, so update the tree */
++ if (was_suppl)
++ supinfo_erase(line);
++ else
++ supinfo_insert(line);
++}
++
+ #define GPIO_V2_LINE_BIAS_FLAGS \
+ (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
+ GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
+@@ -655,6 +782,25 @@ static u32 line_event_id(int level)
+ GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ }
+
++static inline char *make_irq_label(const char *orig)
++{
++ char *new;
++
++ if (!orig)
++ return NULL;
++
++ new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
++ if (!new)
++ return ERR_PTR(-ENOMEM);
++
++ return new;
++}
++
++static inline void free_irq_label(const char *label)
++{
++ kfree(label);
++}
++
+ #ifdef CONFIG_HTE
+
+ static enum hte_return process_hw_ts_thread(void *p)
+@@ -723,7 +869,7 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
+ line->total_discard_seq++;
+ line->last_seqno = ts->seq;
+ mod_delayed_work(system_wq, &line->work,
+- usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
++ usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
+ } else {
+ if (unlikely(ts->seq < line->line_seqno))
+ return HTE_CB_HANDLED;
+@@ -864,7 +1010,7 @@ static irqreturn_t debounce_irq_handler(int irq, void *p)
+ struct line *line = p;
+
+ mod_delayed_work(system_wq, &line->work,
+- usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
++ usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
+
+ return IRQ_HANDLED;
+ }
+@@ -942,11 +1088,12 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
+ {
+ unsigned long irqflags;
+ int ret, level, irq;
++ char *label;
+
+ /* try hardware */
+ ret = gpiod_set_debounce(line->desc, debounce_period_us);
+ if (!ret) {
+- WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
++ line_set_debounce_period(line, debounce_period_us);
+ return ret;
+ }
+ if (ret != -ENOTSUPP)
+@@ -964,11 +1111,17 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
+ if (irq < 0)
+ return -ENXIO;
+
++ label = make_irq_label(line->req->label);
++ if (IS_ERR(label))
++ return -ENOMEM;
++
+ irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
+ ret = request_irq(irq, debounce_irq_handler, irqflags,
+- line->req->label, line);
+- if (ret)
++ label, line);
++ if (ret) {
++ free_irq_label(label);
+ return ret;
++ }
+ line->irq = irq;
+ } else {
+ ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
+@@ -1013,7 +1166,7 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
+ static void edge_detector_stop(struct line *line)
+ {
+ if (line->irq) {
+- free_irq(line->irq, line);
++ free_irq_label(free_irq(line->irq, line));
+ line->irq = 0;
+ }
+
+@@ -1025,8 +1178,7 @@ static void edge_detector_stop(struct line *line)
+ cancel_delayed_work_sync(&line->work);
+ WRITE_ONCE(line->sw_debounced, 0);
+ WRITE_ONCE(line->edflags, 0);
+- if (line->desc)
+- WRITE_ONCE(line->desc->debounce_period_us, 0);
++ line_set_debounce_period(line, 0);
+ /* do not change line->level - see comment in debounced_value() */
+ }
+
+@@ -1038,6 +1190,7 @@ static int edge_detector_setup(struct line *line,
+ unsigned long irqflags = 0;
+ u64 eflags;
+ int irq, ret;
++ char *label;
+
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
+ if (eflags && !kfifo_initialized(&line->req->events)) {
+@@ -1051,7 +1204,7 @@ static int edge_detector_setup(struct line *line,
+ ret = debounce_setup(line, debounce_period_us);
+ if (ret)
+ return ret;
+- WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
++ line_set_debounce_period(line, debounce_period_us);
+ }
+
+ /* detection disabled or sw debouncer will provide edge detection */
+@@ -1074,11 +1227,17 @@ static int edge_detector_setup(struct line *line,
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+ irqflags |= IRQF_ONESHOT;
+
++ label = make_irq_label(line->req->label);
++ if (IS_ERR(label))
++ return PTR_ERR(label);
++
+ /* Request a thread to read the events */
+ ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
+- irqflags, line->req->label, line);
+- if (ret)
++ irqflags, label, line);
++ if (ret) {
++ free_irq_label(label);
+ return ret;
++ }
+
+ line->irq = irq;
+ return 0;
+@@ -1088,17 +1247,31 @@ static int edge_detector_update(struct line *line,
+ struct gpio_v2_line_config *lc,
+ unsigned int line_idx, u64 edflags)
+ {
++ u64 eflags;
++ int ret;
+ u64 active_edflags = READ_ONCE(line->edflags);
+ unsigned int debounce_period_us =
+ gpio_v2_line_config_debounce_period(lc, line_idx);
+
+ if ((active_edflags == edflags) &&
+- (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
++ (READ_ONCE(line->debounce_period_us) == debounce_period_us))
+ return 0;
+
+ /* sw debounced and still will be...*/
+ if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
+- WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
++ line_set_debounce_period(line, debounce_period_us);
++ /*
++ * ensure event fifo is initialised if edge detection
++ * is now enabled.
++ */
++ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
++ if (eflags && !kfifo_initialized(&line->req->events)) {
++ ret = kfifo_alloc(&line->req->events,
++ line->req->event_buffer_size,
++ GFP_KERNEL);
++ if (ret)
++ return ret;
++ }
+ return 0;
+ }
+
+@@ -1392,12 +1565,14 @@ static long linereq_set_config_unlocked(struct linereq *lr,
+ line = &lr->lines[i];
+ desc = lr->lines[i].desc;
+ flags = gpio_v2_line_config_flags(lc, i);
+- gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+- edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
+ /*
+- * Lines have to be requested explicitly for input
+- * or output, else the line will be treated "as is".
++ * Lines not explicitly reconfigured as input or output
++ * are left unchanged.
+ */
++ if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
++ continue;
++ gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
++ edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
+ if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
+ int val = gpio_v2_line_config_output_value(lc, i);
+
+@@ -1405,7 +1580,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
+ ret = gpiod_direction_output(desc, val);
+ if (ret)
+ return ret;
+- } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
++ } else {
+ ret = gpiod_direction_input(desc);
+ if (ret)
+ return ret;
+@@ -1573,6 +1748,7 @@ static ssize_t linereq_read(struct file *file, char __user *buf,
+
+ static void linereq_free(struct linereq *lr)
+ {
++ struct line *line;
+ unsigned int i;
+
+ if (lr->device_unregistered_nb.notifier_call)
+@@ -1580,10 +1756,14 @@ static void linereq_free(struct linereq *lr)
+ &lr->device_unregistered_nb);
+
+ for (i = 0; i < lr->num_lines; i++) {
+- if (lr->lines[i].desc) {
+- edge_detector_stop(&lr->lines[i]);
+- gpiod_free(lr->lines[i].desc);
+- }
++ line = &lr->lines[i];
++ if (!line->desc)
++ continue;
++
++ edge_detector_stop(line);
++ if (line_has_supinfo(line))
++ supinfo_erase(line);
++ gpiod_free(line->desc);
+ }
+ kfifo_free(&lr->events);
+ kfree(lr->label);
+@@ -1943,7 +2123,7 @@ static void lineevent_free(struct lineevent_state *le)
+ blocking_notifier_chain_unregister(&le->gdev->device_notifier,
+ &le->device_unregistered_nb);
+ if (le->irq)
+- free_irq(le->irq, le);
++ free_irq_label(free_irq(le->irq, le));
+ if (le->desc)
+ gpiod_free(le->desc);
+ kfree(le->label);
+@@ -2091,6 +2271,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ int fd;
+ int ret;
+ int irq, irqflags = 0;
++ char *label;
+
+ if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
+ return -EFAULT;
+@@ -2175,15 +2356,23 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ if (ret)
+ goto out_free_le;
+
++ label = make_irq_label(le->label);
++ if (IS_ERR(label)) {
++ ret = PTR_ERR(label);
++ goto out_free_le;
++ }
++
+ /* Request a thread to read the events */
+ ret = request_threaded_irq(irq,
+ lineevent_irq_handler,
+ lineevent_irq_thread,
+ irqflags,
+- le->label,
++ label,
+ le);
+- if (ret)
++ if (ret) {
++ free_irq_label(label);
+ goto out_free_le;
++ }
+
+ le->irq = irq;
+
+@@ -2274,8 +2463,6 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
+ struct gpio_chip *gc = desc->gdev->chip;
+ bool ok_for_pinctrl;
+ unsigned long flags;
+- u32 debounce_period_us;
+- unsigned int num_attrs = 0;
+
+ memset(info, 0, sizeof(*info));
+ info->offset = gpio_chip_hwgpio(desc);
+@@ -2342,14 +2529,6 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
+ else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
+ info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
+
+- debounce_period_us = READ_ONCE(desc->debounce_period_us);
+- if (debounce_period_us) {
+- info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
+- info->attrs[num_attrs].debounce_period_us = debounce_period_us;
+- num_attrs++;
+- }
+- info->num_attrs = num_attrs;
+-
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ }
+
+@@ -2456,6 +2635,7 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
+ return -EBUSY;
+ }
+ gpio_desc_to_lineinfo(desc, &lineinfo);
++ supinfo_to_lineinfo(desc, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+ if (watch)
+@@ -2482,10 +2662,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+ return 0;
+ }
+
+-/*
+- * gpio_ioctl() - ioctl handler for the GPIO chardev
+- */
+-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ struct gpio_chardev_data *cdev = file->private_data;
+ struct gpio_device *gdev = cdev->gdev;
+@@ -2522,6 +2699,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ }
+ }
+
++/*
++ * gpio_ioctl() - ioctl handler for the GPIO chardev
++ */
++static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct gpio_chardev_data *cdev = file->private_data;
++
++ return call_ioctl_locked(file, cmd, arg, cdev->gdev,
++ gpio_ioctl_unlocked);
++}
++
+ #ifdef CONFIG_COMPAT
+ static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+@@ -2546,6 +2734,7 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
+ chg.event_type = action;
+ chg.timestamp_ns = ktime_get_ns();
+ gpio_desc_to_lineinfo(desc, &chg.info);
++ supinfo_to_lineinfo(desc, &chg.info);
+
+ ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
+ if (ret)
+@@ -2766,11 +2955,11 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
+ struct gpio_chardev_data *cdev = file->private_data;
+ struct gpio_device *gdev = cdev->gdev;
+
+- bitmap_free(cdev->watched_lines);
+ blocking_notifier_chain_unregister(&gdev->device_notifier,
+ &cdev->device_unregistered_nb);
+ blocking_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
++ bitmap_free(cdev->watched_lines);
+ gpio_device_put(gdev);
+ kfree(cdev);
+
+diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
+index fe9ce6b19f15c5..4987e62dcb3d15 100644
+--- a/drivers/gpio/gpiolib-devres.c
++++ b/drivers/gpio/gpiolib-devres.c
+@@ -158,7 +158,7 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+- desc = fwnode_gpiod_get_index(fwnode, con_id, index, flags, label);
++ desc = gpiod_find_and_request(dev, fwnode, con_id, index, flags, label, false);
+ if (IS_ERR(desc)) {
+ devres_free(dr);
+ return desc;
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 531faabead0f40..cec9e8f29bbdfe 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -192,6 +192,24 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
+ */
+ { "himax,hx8357", "gpios-reset", false },
+ { "himax,hx8369", "gpios-reset", false },
++#endif
++#if IS_ENABLED(CONFIG_PCI_LANTIQ)
++ /*
++ * According to the PCI specification, the RST# pin is an
++ * active-low signal. However, most of the device trees that
++ * have been widely used for a long time incorrectly describe
++ * reset GPIO as active-high, and were also using wrong name
++ * for the property.
++ */
++ { "lantiq,pci-xway", "gpio-reset", false },
++#endif
++#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
++ /*
++ * DTS for Nokia N900 incorrectly specified "active high"
++ * polarity for the reset line, while the chip actually
++ * treats it as "active low".
++ */
++ { "ti,tsc2005", "reset-gpios", false },
+ #endif
+ };
+ unsigned int i;
+@@ -491,9 +509,9 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
+ { "reset", "reset-n-io", "marvell,nfc-uart" },
+ { "reset", "reset-n-io", "mrvl,nfc-uart" },
+ #endif
+-#if !IS_ENABLED(CONFIG_PCI_LANTIQ)
++#if IS_ENABLED(CONFIG_PCI_LANTIQ)
+ /* MIPS Lantiq PCI */
+- { "reset", "gpios-reset", "lantiq,pci-xway" },
++ { "reset", "gpio-reset", "lantiq,pci-xway" },
+ #endif
+
+ /*
+@@ -512,6 +530,10 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
+ #if IS_ENABLED(CONFIG_SND_SOC_CS42L56)
+ { "reset", "cirrus,gpio-nreset", "cirrus,cs42l56" },
+ #endif
++#if IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)
++ { "i2s1-in-sel-gpio1", NULL, "mediatek,mt2701-cs42448-machine" },
++ { "i2s1-in-sel-gpio2", NULL, "mediatek,mt2701-cs42448-machine" },
++#endif
+ #if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X)
+ { "reset", "gpio-reset", "ti,tlv320aic3x" },
+ { "reset", "gpio-reset", "ti,tlv320aic33" },
+diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
+index 50503a4525eb03..6c27312c627883 100644
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+
+ #include <linux/bitops.h>
++#include <linux/cleanup.h>
+ #include <linux/device.h>
+ #include <linux/idr.h>
+ #include <linux/init.h>
+@@ -474,14 +475,17 @@ static ssize_t export_store(const struct class *class,
+ goto done;
+
+ status = gpiod_set_transitory(desc, false);
+- if (!status) {
+- status = gpiod_export(desc, true);
+- if (status < 0)
+- gpiod_free(desc);
+- else
+- set_bit(FLAG_SYSFS, &desc->flags);
++ if (status) {
++ gpiod_free(desc);
++ goto done;
+ }
+
++ status = gpiod_export(desc, true);
++ if (status < 0)
++ gpiod_free(desc);
++ else
++ set_bit(FLAG_SYSFS, &desc->flags);
++
+ done:
+ if (status)
+ pr_debug("%s: status %d\n", __func__, status);
+@@ -771,15 +775,15 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
+ struct gpio_desc *desc;
+ struct gpio_chip *chip = gdev->chip;
+
+- if (!gdev->mockdev)
+- return;
++ scoped_guard(mutex, &sysfs_lock) {
++ if (!gdev->mockdev)
++ return;
+
+- device_unregister(gdev->mockdev);
++ device_unregister(gdev->mockdev);
+
+- /* prevent further gpiod exports */
+- mutex_lock(&sysfs_lock);
+- gdev->mockdev = NULL;
+- mutex_unlock(&sysfs_lock);
++ /* prevent further gpiod exports */
++ gdev->mockdev = NULL;
++ }
+
+ /* unregister gpiod class devices owned by sysfs */
+ for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) {
+@@ -814,7 +818,7 @@ static int __init gpiolib_sysfs_init(void)
+ * gpiochip_sysfs_register() acquires a mutex. This is unsafe
+ * and needs to be fixed.
+ *
+- * Also it would be nice to use gpiochip_find() here so we
++ * Also it would be nice to use gpio_device_find() here so we
+ * can keep gpio_chips local to gpiolib.c, but the yield of
+ * gpio_lock prevents us from doing this.
+ */
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 40a0022ea71909..5c0016c77d2abe 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -15,6 +15,7 @@
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
+ #include <linux/of.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/seq_file.h>
+@@ -164,7 +165,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc,
+ if (hwnum >= gdev->ngpio)
+ return ERR_PTR(-EINVAL);
+
+- return &gdev->descs[hwnum];
++ return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)];
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_get_desc);
+
+@@ -894,11 +895,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+
+ ret = gpiochip_irqchip_init_valid_mask(gc);
+ if (ret)
+- goto err_remove_acpi_chip;
++ goto err_free_hogs;
+
+ ret = gpiochip_irqchip_init_hw(gc);
+ if (ret)
+- goto err_remove_acpi_chip;
++ goto err_remove_irqchip_mask;
+
+ ret = gpiochip_add_irqchip(gc, lock_key, request_key);
+ if (ret)
+@@ -923,13 +924,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ gpiochip_irqchip_remove(gc);
+ err_remove_irqchip_mask:
+ gpiochip_irqchip_free_valid_mask(gc);
+-err_remove_acpi_chip:
++err_free_hogs:
++ gpiochip_free_hogs(gc);
+ acpi_gpiochip_remove(gc);
++ gpiochip_remove_pin_ranges(gc);
+ err_remove_of_chip:
+- gpiochip_free_hogs(gc);
+ of_gpiochip_remove(gc);
+ err_free_gpiochip_mask:
+- gpiochip_remove_pin_ranges(gc);
+ gpiochip_free_valid_mask(gc);
+ if (gdev->dev.release) {
+ /* release() has been registered by gpiochip_setup_dev() */
+@@ -1014,16 +1015,10 @@ void gpiochip_remove(struct gpio_chip *gc)
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_remove);
+
+-/**
+- * gpiochip_find() - iterator for locating a specific gpio_chip
+- * @data: data to pass to match function
+- * @match: Callback function to check gpio_chip
++/*
++ * FIXME: This will be removed soon.
+ *
+- * Similar to bus_find_device. It returns a reference to a gpio_chip as
+- * determined by a user supplied @match callback. The callback should return
+- * 0 if the device doesn't match and non-zero if it does. If the callback is
+- * non-zero, this function will return to the caller and not iterate over any
+- * more gpio_chips.
++ * This function is depracated, don't use.
+ */
+ struct gpio_chip *gpiochip_find(void *data,
+ int (*match)(struct gpio_chip *gc,
+@@ -1031,21 +1026,62 @@ struct gpio_chip *gpiochip_find(void *data,
+ {
+ struct gpio_device *gdev;
+ struct gpio_chip *gc = NULL;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&gpio_lock, flags);
+- list_for_each_entry(gdev, &gpio_devices, list)
+- if (gdev->chip && match(gdev->chip, data)) {
+- gc = gdev->chip;
+- break;
+- }
+
+- spin_unlock_irqrestore(&gpio_lock, flags);
++ gdev = gpio_device_find(data, match);
++ if (gdev) {
++ gc = gdev->chip;
++ gpio_device_put(gdev);
++ }
+
+ return gc;
+ }
+ EXPORT_SYMBOL_GPL(gpiochip_find);
+
++/**
++ * gpio_device_find() - find a specific GPIO device
++ * @data: data to pass to match function
++ * @match: Callback function to check gpio_chip
++ *
++ * Returns:
++ * New reference to struct gpio_device.
++ *
++ * Similar to bus_find_device(). It returns a reference to a gpio_device as
++ * determined by a user supplied @match callback. The callback should return
++ * 0 if the device doesn't match and non-zero if it does. If the callback
++ * returns non-zero, this function will return to the caller and not iterate
++ * over any more gpio_devices.
++ *
++ * The callback takes the GPIO chip structure as argument. During the execution
++ * of the callback function the chip is protected from being freed. TODO: This
++ * actually has yet to be implemented.
++ *
++ * If the function returns non-NULL, the returned reference must be freed by
++ * the caller using gpio_device_put().
++ */
++struct gpio_device *gpio_device_find(void *data,
++ int (*match)(struct gpio_chip *gc,
++ void *data))
++{
++ struct gpio_device *gdev;
++
++ /*
++ * Not yet but in the future the spinlock below will become a mutex.
++ * Annotate this function before anyone tries to use it in interrupt
++ * context like it happened with gpiochip_find().
++ */
++ might_sleep();
++
++ guard(spinlock_irqsave)(&gpio_lock);
++
++ list_for_each_entry(gdev, &gpio_devices, list) {
++ if (gdev->chip && match(gdev->chip, data))
++ return gpio_device_get(gdev);
++ }
++
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(gpio_device_find);
++
+ static int gpiochip_match_name(struct gpio_chip *gc, void *data)
+ {
+ const char *name = data;
+@@ -1058,6 +1094,30 @@ static struct gpio_chip *find_chip_by_name(const char *name)
+ return gpiochip_find((void *)name, gpiochip_match_name);
+ }
+
++/**
++ * gpio_device_get() - Increase the reference count of this GPIO device
++ * @gdev: GPIO device to increase the refcount for
++ *
++ * Returns:
++ * Pointer to @gdev.
++ */
++struct gpio_device *gpio_device_get(struct gpio_device *gdev)
++{
++ return to_gpio_device(get_device(&gdev->dev));
++}
++EXPORT_SYMBOL_GPL(gpio_device_get);
++
++/**
++ * gpio_device_put() - Decrease the reference count of this GPIO device and
++ * possibly free all resources associated with it.
++ * @gdev: GPIO device to decrease the reference count for
++ */
++void gpio_device_put(struct gpio_device *gdev)
++{
++ put_device(&gdev->dev);
++}
++EXPORT_SYMBOL_GPL(gpio_device_put);
++
+ #ifdef CONFIG_GPIOLIB_IRQCHIP
+
+ /*
+@@ -3962,13 +4022,13 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
+ return desc;
+ }
+
+-static struct gpio_desc *gpiod_find_and_request(struct device *consumer,
+- struct fwnode_handle *fwnode,
+- const char *con_id,
+- unsigned int idx,
+- enum gpiod_flags flags,
+- const char *label,
+- bool platform_lookup_allowed)
++struct gpio_desc *gpiod_find_and_request(struct device *consumer,
++ struct fwnode_handle *fwnode,
++ const char *con_id,
++ unsigned int idx,
++ enum gpiod_flags flags,
++ const char *label,
++ bool platform_lookup_allowed)
+ {
+ unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ struct gpio_desc *desc;
+diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
+index a0a67569300b98..9bbde38238d33a 100644
+--- a/drivers/gpio/gpiolib.h
++++ b/drivers/gpio/gpiolib.h
+@@ -86,16 +86,6 @@ static inline struct gpio_device *to_gpio_device(struct device *dev)
+ return container_of(dev, struct gpio_device, dev);
+ }
+
+-static inline struct gpio_device *gpio_device_get(struct gpio_device *gdev)
+-{
+- return to_gpio_device(get_device(&gdev->dev));
+-}
+-
+-static inline void gpio_device_put(struct gpio_device *gdev)
+-{
+- put_device(&gdev->dev);
+-}
+-
+ /* gpio suffixes used for ACPI and device tree lookup */
+ static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
+
+@@ -217,6 +207,14 @@ static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
+ return ret;
+ }
+
++struct gpio_desc *gpiod_find_and_request(struct device *consumer,
++ struct fwnode_handle *fwnode,
++ const char *con_id,
++ unsigned int idx,
++ enum gpiod_flags flags,
++ const char *label,
++ bool platform_lookup_allowed);
++
+ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
+ unsigned long lflags, enum gpiod_flags dflags);
+ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 3caa020391c752..ec4abf9ff47b5f 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -198,7 +198,7 @@ config DRM_TTM
+ config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+- depends on DRM && KUNIT && MMU
++ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
+ select DRM_TTM
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+@@ -206,7 +206,8 @@ config DRM_TTM_KUNIT_TEST
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+- developers.
++ developers. It depends on (UML || COMPILE_TEST) since no other driver
++ which uses TTM can be loaded while running the tests.
+
+ If in doubt, say "N".
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+index 2b97b8a96fb494..7fea4f0f495a39 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
++++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+@@ -100,7 +100,7 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+- return r;
++ return 0;
+ }
+
+ static int
+@@ -333,6 +333,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ {
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
++ struct amdgpu_ras *con;
+ int r;
+
+ if (reset_device_list == NULL)
+@@ -358,7 +359,30 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ */
+ amdgpu_register_gpu_instance(tmp_adev);
+
+- /* Resume RAS */
++ /* Resume RAS, ecc_irq */
++ con = amdgpu_ras_get_context(tmp_adev);
++ if (!amdgpu_sriov_vf(tmp_adev) && con) {
++ if (tmp_adev->sdma.ras &&
++ tmp_adev->sdma.ras->ras_block.ras_late_init) {
++ r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev,
++ &tmp_adev->sdma.ras->ras_block.ras_comm);
++ if (r) {
++ dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r);
++ goto end;
++ }
++ }
++
++ if (tmp_adev->gfx.ras &&
++ tmp_adev->gfx.ras->ras_block.ras_late_init) {
++ r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev,
++ &tmp_adev->gfx.ras->ras_block.ras_comm);
++ if (r) {
++ dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r);
++ goto end;
++ }
++ }
++ }
++
+ amdgpu_ras_resume(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index a79d53bdbe136a..d59e8536192ca9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1009,6 +1009,8 @@ struct amdgpu_device {
+ bool in_s3;
+ bool in_s4;
+ bool in_s0ix;
++ /* indicate amdgpu suspension status */
++ bool suspend_complete;
+
+ enum pp_mp1_state mp1_state;
+ struct amdgpu_doorbell_index doorbell_index;
+@@ -1367,6 +1369,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ void amdgpu_driver_release_kms(struct drm_device *dev);
+
+ int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
++int amdgpu_device_prepare(struct drm_device *dev);
+ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
+ int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
+ u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+index a4d65973bf7cf4..80771b1480fff5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+@@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
+ amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+ amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+ amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
++ res.clock = clock;
+
+ return res;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 25d5fda5b243e3..af6c6d89e63afb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -335,15 +335,15 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
+ return r;
+ }
+
+-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
++void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
+ {
+- struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
++ struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
+
+- amdgpu_bo_reserve(bo, true);
+- amdgpu_bo_kunmap(bo);
+- amdgpu_bo_unpin(bo);
+- amdgpu_bo_unreserve(bo);
+- amdgpu_bo_unref(&(bo));
++ amdgpu_bo_reserve(*bo, true);
++ amdgpu_bo_kunmap(*bo);
++ amdgpu_bo_unpin(*bo);
++ amdgpu_bo_unreserve(*bo);
++ amdgpu_bo_unref(bo);
+ }
+
+ int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 2fe9860725bd94..3134e6ad81d1d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -221,7 +221,7 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool mqd_gfx9);
+-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
++void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
+ int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
+ void **mem_obj);
+ void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
+@@ -303,6 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
+ struct kgd_mem *mem, void *drm_priv);
+ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
++int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+ int amdgpu_amdkfd_gpuvm_sync_memory(
+ struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
+ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+index 469785d337911a..1ef758ac5076ef 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+@@ -90,7 +90,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+ return NULL;
+
+ fence = container_of(f, struct amdgpu_amdkfd_fence, base);
+- if (fence && f->ops == &amdkfd_fence_ops)
++ if (f->ops == &amdkfd_fence_ops)
+ return fence;
+
+ return NULL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index e036011137aa22..a1f35510d53955 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -213,7 +213,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) ||
+ (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
+- vram_size - reserved_for_pt)) {
++ vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) {
+ ret = -ENOMEM;
+ goto release;
+ }
+@@ -407,6 +407,10 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
+ "Called with userptr BO"))
+ return -EINVAL;
+
++ /* bo has been pinned, not need validate it */
++ if (bo->tbo.pin_count)
++ return 0;
++
+ amdgpu_bo_placement_from_domain(bo, domain);
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+@@ -733,7 +737,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
+ enum dma_data_direction dir;
+
+ if (unlikely(!ttm->sg)) {
+- pr_err("SG Table of BO is UNEXPECTEDLY NULL");
++ pr_debug("SG Table of BO is NULL");
+ return;
+ }
+
+@@ -1135,7 +1139,8 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
+ int ret;
+
+ ctx->sync = &mem->sync;
+- drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
++ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
++ DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_until_all_locked(&ctx->exec) {
+ ctx->n_vms = 0;
+ list_for_each_entry(entry, &mem->attachments, list) {
+@@ -1201,8 +1206,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
+ amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
+
+ amdgpu_sync_fence(sync, bo_va->last_pt_update);
+-
+- kfd_mem_dmaunmap_attachment(mem, entry);
+ }
+
+ static int update_gpuvm_pte(struct kgd_mem *mem,
+@@ -1257,6 +1260,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
+
+ update_gpuvm_pte_failed:
+ unmap_bo_from_gpuvm(mem, entry, sync);
++ kfd_mem_dmaunmap_attachment(mem, entry);
+ return ret;
+ }
+
+@@ -1785,6 +1789,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ err_bo_create:
+ amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
+ err_reserve_limit:
++ amdgpu_sync_free(&(*mem)->sync);
+ mutex_destroy(&(*mem)->lock);
+ if (gobj)
+ drm_gem_object_put(gobj);
+@@ -1860,8 +1865,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ mem->va + bo_size * (1 + mem->aql_queue));
+
+ /* Remove from VM internal data structures */
+- list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
++ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
++ kfd_mem_dmaunmap_attachment(mem, entry);
+ kfd_mem_detach(entry);
++ }
+
+ ret = unreserve_bo_and_vms(&ctx, false, false);
+
+@@ -2035,6 +2042,37 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ return ret;
+ }
+
++int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
++{
++ struct kfd_mem_attachment *entry;
++ struct amdgpu_vm *vm;
++ int ret;
++
++ vm = drm_priv_to_vm(drm_priv);
++
++ mutex_lock(&mem->lock);
++
++ ret = amdgpu_bo_reserve(mem->bo, true);
++ if (ret)
++ goto out;
++
++ list_for_each_entry(entry, &mem->attachments, list) {
++ if (entry->bo_va->base.vm != vm)
++ continue;
++ if (entry->bo_va->base.bo->tbo.ttm &&
++ !entry->bo_va->base.bo->tbo.ttm->sg)
++ continue;
++
++ kfd_mem_dmaunmap_attachment(mem, entry);
++ }
++
++ amdgpu_bo_unreserve(mem->bo);
++out:
++ mutex_unlock(&mem->lock);
++
++ return ret;
++}
++
+ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
+ {
+@@ -2597,7 +2635,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
+
+ /* keep mem without hmm range at userptr_inval_list */
+ if (!mem->range)
+- continue;
++ continue;
+
+ /* Only check mem with hmm range associated */
+ valid = amdgpu_ttm_tt_get_user_pages_done(
+@@ -2814,9 +2852,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+ if (!attachment->is_mapped)
+ continue;
+
+- if (attachment->bo_va->base.bo->tbo.pin_count)
+- continue;
+-
+ kfd_mem_dmaunmap_attachment(mem, attachment);
+ ret = update_gpuvm_pte(mem, attachment, &sync_obj);
+ if (ret) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index dce9e7d5e4ec67..a14a54a734c128 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -1476,6 +1476,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
+ (u32)le32_to_cpu(*((u32 *)reg_data + j));
+ j++;
+ } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
++ if (i == 0)
++ continue;
+ reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
+ reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index fb2681dd6b338c..6521d06c7e4e72 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -211,6 +211,7 @@ union igp_info {
+ struct atom_integrated_system_info_v1_11 v11;
+ struct atom_integrated_system_info_v1_12 v12;
+ struct atom_integrated_system_info_v2_1 v21;
++ struct atom_integrated_system_info_v2_3 v23;
+ };
+
+ union umc_info {
+@@ -359,6 +360,20 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
++ case 3:
++ mem_channel_number = igp_info->v23.umachannelnumber;
++ if (!mem_channel_number)
++ mem_channel_number = 1;
++ mem_type = igp_info->v23.memorytype;
++ if (mem_type == LpDdr5MemType)
++ mem_channel_width = 32;
++ else
++ mem_channel_width = 64;
++ if (vram_width)
++ *vram_width = mem_channel_number * mem_channel_width;
++ if (vram_type)
++ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
++ break;
+ default:
+ return -EINVAL;
+ }
+@@ -384,7 +399,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ mem_channel_number = vram_info->v30.channel_num;
+ mem_channel_width = vram_info->v30.channel_width;
+ if (vram_width)
+- *vram_width = mem_channel_number * (1 << mem_channel_width);
++ *vram_width = mem_channel_number * 16;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index 38ccec913f0097..f3a09ecb76992b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -29,6 +29,7 @@
+ #include "amdgpu.h"
+ #include "atom.h"
+
++#include <linux/device.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/acpi.h>
+@@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
++ /* ATRM is for on-platform devices only */
++ if (dev_is_removable(&adev->pdev->dev))
++ return false;
++
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index b6298e901cbd4f..9a53ca555e7088 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -183,6 +183,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ }
+
+ rcu_read_unlock();
++ *result = NULL;
+ return -ENOENT;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index b8280be6225d9f..c3d89088123dbd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ struct amdgpu_firmware_info *ucode;
+
+ id = fw_type_convert(cgs_device, type);
++ if (id >= AMDGPU_UCODE_ID_MAXIMUM)
++ return -EINVAL;
++
+ ucode = &adev->firmware.ucode[id];
+ if (ucode->fw == NULL)
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index d93a8961274c6a..13c97ba7a820b4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -207,7 +207,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ }
+
+ for (i = 0; i < p->nchunks; i++) {
+- struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
++ struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
+ struct drm_amdgpu_cs_chunk user_chunk;
+ uint32_t __user *cdata;
+
+@@ -263,6 +263,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ if (size < sizeof(struct drm_amdgpu_bo_list_in))
+ goto free_partial_kdata;
+
++ /* Only a single BO list is allowed to simplify handling. */
++ if (p->bo_list)
++ goto free_partial_kdata;
++
+ ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
+ if (ret)
+ goto free_partial_kdata;
+@@ -819,7 +823,7 @@ static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
+
+ p->bytes_moved += ctx.bytes_moved;
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+- amdgpu_bo_in_cpu_visible_vram(bo))
++ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ p->bytes_moved_vis += ctx.bytes_moved;
+
+ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+@@ -1057,6 +1061,9 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
+ r = amdgpu_ring_parse_cs(ring, p, job, ib);
+ if (r)
+ return r;
++
++ if (ib->sa_bo)
++ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
+@@ -1093,6 +1100,21 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
+ unsigned int i;
+ int r;
+
++ /*
++ * We can't use gang submit on with reserved VMIDs when the VM changes
++ * can't be invalidated by more than one engine at the same time.
++ */
++ if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
++ for (i = 0; i < p->gang_size; ++i) {
++ struct drm_sched_entity *entity = p->entities[i];
++ struct drm_gpu_scheduler *sched = entity->rq->sched;
++ struct amdgpu_ring *ring = to_amdgpu_ring(sched);
++
++ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
++ return -EINVAL;
++ }
++ }
++
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (r)
+ return r;
+@@ -1411,7 +1433,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ if (r == -ENOMEM)
+ DRM_ERROR("Not enough memory for command submission!\n");
+ else if (r != -ERESTARTSYS && r != -EAGAIN)
+- DRM_ERROR("Failed to process the buffer list %d!\n", r);
++ DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ goto error_fini;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 76549c2cffebe1..cf8804fa7e9716 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -684,16 +684,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+
+ switch (args->in.op) {
+ case AMDGPU_CTX_OP_ALLOC_CTX:
++ if (args->in.flags)
++ return -EINVAL;
+ r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
+ args->out.alloc.ctx_id = id;
+ break;
+ case AMDGPU_CTX_OP_FREE_CTX:
++ if (args->in.flags)
++ return -EINVAL;
+ r = amdgpu_ctx_free(fpriv, id);
+ break;
+ case AMDGPU_CTX_OP_QUERY_STATE:
++ if (args->in.flags)
++ return -EINVAL;
+ r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
+ break;
+ case AMDGPU_CTX_OP_QUERY_STATE2:
++ if (args->in.flags)
++ return -EINVAL;
+ r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
+ break;
+ case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index a4faea4fa0b592..1c2c9ff9d39df0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -638,6 +638,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
++ if (!adev->didt_rreg)
++ return -EOPNOTSUPP;
++
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+@@ -694,6 +697,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
++ if (!adev->didt_wreg)
++ return -EOPNOTSUPP;
++
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+@@ -748,6 +754,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ ssize_t result = 0;
+ int r;
+
++ if (!adev->smc_rreg)
++ return -EOPNOTSUPP;
++
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+@@ -804,6 +813,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
+ ssize_t result = 0;
+ int r;
+
++ if (!adev->smc_wreg)
++ return -EOPNOTSUPP;
++
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+@@ -2040,12 +2052,13 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ char reg_offset[11];
+ uint32_t *new = NULL, *tmp = NULL;
+- int ret, i = 0, len = 0;
++ unsigned int len = 0;
++ int ret, i = 0;
+
+ do {
+ memset(reg_offset, 0, 11);
+ if (copy_from_user(reg_offset, buf + len,
+- min(10, ((int)size-len)))) {
++ min(10, (size-len)))) {
+ ret = -EFAULT;
+ goto error_free;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 2b8356699f235d..9c99d69b4b083e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -43,6 +43,7 @@
+ #include <drm/drm_fb_helper.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/amdgpu_drm.h>
++#include <linux/device.h>
+ #include <linux/vgaarb.h>
+ #include <linux/vga_switcheroo.h>
+ #include <linux/efi.h>
+@@ -1217,6 +1218,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
+ return true;
+
+ fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
++ release_firmware(adev->pm.fw);
+ if (fw_ver < 0x00160e00)
+ return true;
+ }
+@@ -1547,6 +1549,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
+ } else {
+ pr_info("switched off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
++ amdgpu_device_prepare(dev);
+ amdgpu_device_suspend(dev, true);
+ amdgpu_device_cache_pci_state(pdev);
+ /* Shut down the device */
+@@ -1895,15 +1898,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+
+ adev->firmware.gpu_info_fw = NULL;
+
+- if (adev->mman.discovery_bin) {
+- /*
+- * FIXME: The bounding box is still needed by Navi12, so
+- * temporarily read it from gpu_info firmware. Should be dropped
+- * when DAL no longer needs it.
+- */
+- if (adev->asic_type != CHIP_NAVI12)
+- return 0;
+- }
++ if (adev->mman.discovery_bin)
++ return 0;
+
+ switch (adev->asic_type) {
+ default:
+@@ -2018,7 +2014,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+ */
+ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ {
+- struct drm_device *dev = adev_to_drm(adev);
+ struct pci_dev *parent;
+ int i, r;
+ bool total;
+@@ -2089,7 +2084,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ ((adev->flags & AMD_IS_APU) == 0) &&
+- !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
++ !dev_is_removable(&adev->pdev->dev))
+ adev->flags |= AMD_IS_PX;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+@@ -2103,6 +2098,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
++ if (!amdgpu_device_pcie_dynamic_switching_supported())
++ adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+
+ total = true;
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+@@ -3476,10 +3473,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
+ adev->gfx.mcbp = true;
+ else if (amdgpu_mcbp == 0)
+ adev->gfx.mcbp = false;
+- else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
+- (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
+- adev->gfx.num_gfx_rings)
+- adev->gfx.mcbp = true;
+
+ if (amdgpu_sriov_vf(adev))
+ adev->gfx.mcbp = true;
+@@ -3568,6 +3561,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ mutex_init(&adev->grbm_idx_mutex);
+ mutex_init(&adev->mn_lock);
+ mutex_init(&adev->virt.vf_errors.lock);
++ mutex_init(&adev->virt.rlcg_reg_lock);
+ hash_init(adev->mn_hash);
+ mutex_init(&adev->psp.mutex);
+ mutex_init(&adev->notifier_lock);
+@@ -3901,7 +3895,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+
+ px = amdgpu_device_supports_px(ddev);
+
+- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++ if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ apple_gmux_detect(NULL, NULL)))
+ vga_switcheroo_register_client(adev->pdev,
+ &amdgpu_switcheroo_ops, px);
+@@ -4046,7 +4040,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+
+ px = amdgpu_device_supports_px(adev_to_drm(adev));
+
+- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++ if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ apple_gmux_detect(NULL, NULL)))
+ vga_switcheroo_unregister_client(adev->pdev);
+
+@@ -4102,6 +4096,43 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
+ /*
+ * Suspend & resume.
+ */
++/**
++ * amdgpu_device_prepare - prepare for device suspend
++ *
++ * @dev: drm dev pointer
++ *
++ * Prepare to put the hw in the suspend state (all asics).
++ * Returns 0 for success or an error on failure.
++ * Called at driver suspend.
++ */
++int amdgpu_device_prepare(struct drm_device *dev)
++{
++ struct amdgpu_device *adev = drm_to_adev(dev);
++ int i, r;
++
++ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
++ return 0;
++
++ /* Evict the majority of BOs before starting suspend sequence */
++ r = amdgpu_device_evict_resources(adev);
++ if (r)
++ return r;
++
++ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
++
++ for (i = 0; i < adev->num_ip_blocks; i++) {
++ if (!adev->ip_blocks[i].status.valid)
++ continue;
++ if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
++ continue;
++ r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
++ if (r)
++ return r;
++ }
++
++ return 0;
++}
++
+ /**
+ * amdgpu_device_suspend - initiate device suspend
+ *
+@@ -4122,11 +4153,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+
+ adev->in_suspend = true;
+
+- /* Evict the majority of BOs before grabbing the full access */
+- r = amdgpu_device_evict_resources(adev);
+- if (r)
+- return r;
+-
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_virt_request_full_gpu(adev, false);
+@@ -4141,7 +4167,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+
+ cancel_delayed_work_sync(&adev->delayed_init_work);
+- flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+
+ amdgpu_ras_suspend(adev);
+
+@@ -4455,7 +4480,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+ shadow = vmbo->shadow;
+
+ /* No need to recover an evicted BO */
+- if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
++ if (!shadow->tbo.resource ||
++ shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+ shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
+ continue;
+@@ -4661,11 +4687,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
+
+ dev_info(adev->dev, "GPU mode1 reset\n");
+
++ /* Cache the state before bus master disable. The saved config space
++ * values are used in other cases like restore after mode-2 reset.
++ */
++ amdgpu_device_cache_pci_state(adev->pdev);
++
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+
+- amdgpu_device_cache_pci_state(adev->pdev);
+-
+ if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+ dev_info(adev->dev, "GPU smu mode1 reset\n");
+ ret = amdgpu_dpm_mode1_reset(adev);
+@@ -5183,7 +5212,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+- if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
++ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
++ amdgpu_ras_get_context(adev)->reboot) {
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+@@ -5206,7 +5236,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ * to put adev in the 1st position.
+ */
+ INIT_LIST_HEAD(&device_list);
+- if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
++ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (gpu_reset_for_dev_remove && adev->shutdown)
+@@ -5617,7 +5647,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
+ adev->nbio.funcs->enable_doorbell_interrupt)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
+
+- if (amdgpu_passthrough(adev) &&
++ if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
+ adev->nbio.funcs->clear_doorbell_interrupt)
+ adev->nbio.funcs->clear_doorbell_interrupt(adev);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 7d5e7ad28ba82a..b04d789bfd1005 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -93,6 +93,7 @@
+ MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+
+ #define mmRCC_CONFIG_MEMSIZE 0xde3
++#define mmMP0_SMN_C2PMSG_33 0x16061
+ #define mmMM_INDEX 0x0
+ #define mmMM_INDEX_HI 0x6
+ #define mmMM_DATA 0x1
+@@ -231,8 +232,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
+ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary)
+ {
+- uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+- int ret = 0;
++ uint64_t vram_size;
++ u32 msg;
++ int i, ret = 0;
++
++ /* It can take up to a second for IFWI init to complete on some dGPUs,
++ * but generally it should be in the 60-100ms range. Normally this starts
++ * as soon as the device gets power so by the time the OS loads this has long
++ * completed. However, when a card is hotplugged via e.g., USB4, we need to
++ * wait for this to complete. Once the C2PMSG is updated, we can
++ * continue.
++ */
++ if (dev_is_removable(&adev->pdev->dev)) {
++ for (i = 0; i < 1000; i++) {
++ msg = RREG32(mmMP0_SMN_C2PMSG_33);
++ if (msg & 0x80000000)
++ break;
++ msleep(1);
++ }
++ }
++ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+
+ if (vram_size) {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+@@ -1251,11 +1270,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
+ * 0b10 : encode is disabled
+ * 0b01 : decode is disabled
+ */
+- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+- ip->revision & 0xc0;
+- ip->revision &= ~0xc0;
+ if (adev->vcn.num_vcn_inst <
+ AMDGPU_MAX_VCN_INSTANCES) {
++ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
++ ip->revision & 0xc0;
+ adev->vcn.num_vcn_inst++;
+ adev->vcn.inst_mask |=
+ (1U << ip->instance_number);
+@@ -1266,6 +1284,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
+ adev->vcn.num_vcn_inst + 1,
+ AMDGPU_MAX_VCN_INSTANCES);
+ }
++ ip->revision &= ~0xc0;
+ }
+ if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
+@@ -1531,7 +1550,7 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+ break;
+ case 2:
+ mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+- adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
++ adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
+ break;
+ default:
+ dev_err(adev->dev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 363e6a2cad8c20..5fbb9caa7415fd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -340,14 +340,11 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ adev->have_disp_power_ref = true;
+ return ret;
+ }
+- /* if we have no active crtcs, then drop the power ref
+- * we got before
++ /* if we have no active crtcs, then go to
++ * drop the power ref we got before
+ */
+- if (!active && adev->have_disp_power_ref) {
+- pm_runtime_put_autosuspend(dev->dev);
++ if (!active && adev->have_disp_power_ref)
+ adev->have_disp_power_ref = false;
+- }
+-
+ out:
+ /* drop the power reference we got coming in here */
+ pm_runtime_put_autosuspend(dev->dev);
+@@ -912,8 +909,7 @@ static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+ {
+ u64 micro_tile_mode;
+
+- /* Zero swizzle mode means linear */
+- if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
++ if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */
+ return 0;
+
+ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+@@ -1037,6 +1033,30 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
+ block_width = 256 / format_info->cpp[i];
+ block_height = 1;
+ block_size_log2 = 8;
++ } else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) {
++ int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
++
++ switch (swizzle) {
++ case AMD_FMT_MOD_TILE_GFX12_256B_2D:
++ block_size_log2 = 8;
++ break;
++ case AMD_FMT_MOD_TILE_GFX12_4K_2D:
++ block_size_log2 = 12;
++ break;
++ case AMD_FMT_MOD_TILE_GFX12_64K_2D:
++ block_size_log2 = 16;
++ break;
++ case AMD_FMT_MOD_TILE_GFX12_256K_2D:
++ block_size_log2 = 18;
++ break;
++ default:
++ drm_dbg_kms(rfb->base.dev,
++ "Gfx12 swizzle mode with unknown block size: %d\n", swizzle);
++ return -EINVAL;
++ }
++
++ get_block_dimensions(block_size_log2, format_info->cpp[i],
++ &block_width, &block_height);
+ } else {
+ int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
+
+@@ -1072,7 +1092,8 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
+ return ret;
+ }
+
+- if (AMD_FMT_MOD_GET(DCC, modifier)) {
++ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 &&
++ AMD_FMT_MOD_GET(DCC, modifier)) {
+ if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
+ block_size_log2 = get_dcc_block_size(modifier, false, false);
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+index 09f6727e7c73ae..4a8b33f55f6bc3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+@@ -357,8 +357,9 @@ int amdgpu_doorbell_init(struct amdgpu_device *adev);
+ void amdgpu_doorbell_fini(struct amdgpu_device *adev);
+ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev);
+ uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev,
+- struct amdgpu_bo *db_bo,
+- uint32_t doorbell_index);
++ struct amdgpu_bo *db_bo,
++ uint32_t doorbell_index,
++ uint32_t db_size);
+
+ #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+ #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+index 8eee5d783a92bd..3f3662e8b87103 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+@@ -113,20 +113,25 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
+ *
+ * @adev: amdgpu_device pointer
+ * @db_bo: doorbell object's bo
+- * @db_index: doorbell relative index in this doorbell object
++ * @doorbell_index: doorbell relative index in this doorbell object
++ * @db_size: doorbell size is in byte
+ *
+ * returns doorbell's absolute index in BAR
+ */
+ uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev,
+- struct amdgpu_bo *db_bo,
+- uint32_t doorbell_index)
++ struct amdgpu_bo *db_bo,
++ uint32_t doorbell_index,
++ uint32_t db_size)
+ {
+ int db_bo_offset;
+
+ db_bo_offset = amdgpu_bo_gpu_offset_no_check(db_bo);
+
+- /* doorbell index is 32 bit but doorbell's size is 64-bit, so *2 */
+- return db_bo_offset / sizeof(u32) + doorbell_index * 2;
++ /* doorbell index is 32 bit but doorbell's size can be 32 bit
++ * or 64 bit, so *db_size(in byte)/4 for alignment.
++ */
++ return db_bo_offset / sizeof(u32) + doorbell_index *
++ DIV_ROUND_UP(db_size, 4);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 81edf66dbea8bd..f9bc38d20ce3ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2195,6 +2195,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
++ pci_wake_from_d3(pdev, TRUE);
++
+ /*
+ * For runpm implemented via BACO, PMFW will handle the
+ * timing for BACO in and out:
+@@ -2384,8 +2386,9 @@ static int amdgpu_pmops_prepare(struct device *dev)
+ /* Return a positive number here so
+ * DPM_FLAG_SMART_SUSPEND works properly
+ */
+- if (amdgpu_device_supports_boco(drm_dev))
+- return pm_runtime_suspended(dev);
++ if (amdgpu_device_supports_boco(drm_dev) &&
++ pm_runtime_suspended(dev))
++ return 1;
+
+ /* if we will not support s3 or s2i for the device
+ * then skip suspend
+@@ -2394,7 +2397,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
+ !amdgpu_acpi_is_s3_active(adev))
+ return 1;
+
+- return 0;
++ return amdgpu_device_prepare(drm_dev);
+ }
+
+ static void amdgpu_pmops_complete(struct device *dev)
+@@ -2407,6 +2410,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
++ adev->suspend_complete = false;
+ if (amdgpu_acpi_is_s0ix_active(adev))
+ adev->in_s0ix = true;
+ else if (amdgpu_acpi_is_s3_active(adev))
+@@ -2421,6 +2425,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
++ adev->suspend_complete = true;
+ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+
+@@ -2594,6 +2599,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
+ if (amdgpu_device_supports_boco(drm_dev))
+ adev->mp1_state = PP_MP1_STATE_UNLOAD;
+
++ ret = amdgpu_device_prepare(drm_dev);
++ if (ret)
++ return ret;
+ ret = amdgpu_device_suspend(drm_dev, false);
+ if (ret) {
+ adev->in_runpm = false;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+index e71768661ca8d2..09a34c7258e226 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+@@ -179,7 +179,7 @@ static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+ * Returns the number of bytes read/written; -errno on error.
+ */
+ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+- u8 *eeprom_buf, u16 buf_size, bool read)
++ u8 *eeprom_buf, u32 buf_size, bool read)
+ {
+ const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
+ u16 limit;
+@@ -225,7 +225,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+
+ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ u32 eeprom_addr, u8 *eeprom_buf,
+- u16 bytes)
++ u32 bytes)
+ {
+ return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
+ true);
+@@ -233,7 +233,7 @@ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+
+ int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
+ u32 eeprom_addr, u8 *eeprom_buf,
+- u16 bytes)
++ u32 bytes)
+ {
+ return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
+ false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
+index 6935adb2be1f1c..8083b8253ef433 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
+@@ -28,10 +28,10 @@
+
+ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ u32 eeprom_addr, u8 *eeprom_buf,
+- u16 bytes);
++ u32 bytes);
+
+ int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
+ u32 eeprom_addr, u8 *eeprom_buf,
+- u16 bytes);
++ u32 bytes);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+index 6038b5021b27be..792c059ff7b352 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+@@ -105,6 +105,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+ stats.requested_visible_vram/1024UL);
+ drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
+ stats.requested_gtt/1024UL);
++ drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
++ drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
++ drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
++
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ if (!usage[hw_ip])
+ continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+index 73b8cca35bab87..eace2c9d0c3624 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+@@ -34,6 +34,7 @@
+ #include <asm/set_memory.h>
+ #endif
+ #include "amdgpu.h"
++#include "amdgpu_reset.h"
+ #include <drm/drm_drv.h>
+ #include <drm/ttm/ttm_tt.h>
+
+@@ -400,7 +401,10 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
+ return;
+
+ mb();
+- amdgpu_device_flush_hdp(adev, NULL);
++ if (down_read_trylock(&adev->reset_domain->sem)) {
++ amdgpu_device_flush_hdp(adev, NULL);
++ up_read(&adev->reset_domain->sem);
++ }
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index ca4d2d430e28c8..a1b15d0d6c4893 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -962,6 +962,7 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct task_struct *task;
+ struct drm_gem_object *gobj;
++ struct pid *pid;
+ int id;
+
+ /*
+@@ -971,8 +972,9 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+- task = pid_task(file->pid, PIDTYPE_TGID);
+- seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
++ pid = rcu_dereference(file->pid);
++ task = pid_task(pid, PIDTYPE_TGID);
++ seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 2382921710ece7..e7b053898f9e90 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -384,9 +384,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring = &kiq->ring;
+ u32 domain = AMDGPU_GEM_DOMAIN_GTT;
+
++#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+ /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
+ domain |= AMDGPU_GEM_DOMAIN_VRAM;
++#endif
+
+ /* create MQD for KIQ */
+ if (!adev->enable_mes_kiq && !ring->mqd_obj) {
+@@ -700,8 +702,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+
+ if (adev->gfx.gfx_off_req_count == 0 &&
+ !adev->gfx.gfx_off_state) {
+- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
++ /* If going to s2idle, no need to wait */
++ if (adev->in_s0ix) {
++ if (!amdgpu_dpm_set_powergating_by_smu(adev,
++ AMD_IP_BLOCK_TYPE_GFX, true))
++ adev->gfx.gfx_off_state = true;
++ } else {
++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ delay);
++ }
+ }
+ } else {
+ if (adev->gfx.gfx_off_req_count == 0) {
+@@ -784,8 +793,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
+ int r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+- if (!amdgpu_persistent_edc_harvesting_supported(adev))
+- amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
++ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
++ r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
++ if (r)
++ return r;
++ }
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+@@ -929,7 +941,10 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
+- amdgpu_ring_alloc(ring, 32);
++ r = amdgpu_ring_alloc(ring, 32);
++ if (r)
++ goto failed_unlock;
++
+ amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+@@ -995,7 +1010,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+- amdgpu_ring_alloc(ring, 32);
++ r = amdgpu_ring_alloc(ring, 32);
++ if (r)
++ goto failed_unlock;
++
+ amdgpu_ring_emit_wreg(ring, reg, v);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+@@ -1031,6 +1049,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+
+ failed_undo:
+ amdgpu_ring_undo(ring);
++failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ failed_kiq_write:
+ dev_err(adev->dev, "failed to write reg:%x\n", reg);
+@@ -1175,7 +1194,8 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ default:
+- break;
++ dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
++ return;
+ }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index d78bd97325434f..0b6a0e149f1c4c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -650,7 +650,6 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
+ struct amdgpu_gmc *gmc = &adev->gmc;
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
+- gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(9, 4, 0) ||
+ gc_ver == IP_VERSION(9, 4, 1) ||
+ gc_ver == IP_VERSION(9, 4, 2) ||
+@@ -876,21 +875,28 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
+ * seconds, so here, we just pick up three parts for emulation.
+ */
+ ret = memcmp(vram_ptr, cptr, 10);
+- if (ret)
+- return ret;
++ if (ret) {
++ ret = -EIO;
++ goto release_buffer;
++ }
+
+ ret = memcmp(vram_ptr + (size / 2), cptr, 10);
+- if (ret)
+- return ret;
++ if (ret) {
++ ret = -EIO;
++ goto release_buffer;
++ }
+
+ ret = memcmp(vram_ptr + size - 10, cptr, 10);
+- if (ret)
+- return ret;
++ if (ret) {
++ ret = -EIO;
++ goto release_buffer;
++ }
+
++release_buffer:
+ amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
+ &vram_ptr);
+
+- return 0;
++ return ret;
+ }
+
+ static ssize_t current_memory_partition_show(
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+index 081267161d4018..57516a8c5db347 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+@@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
+ */
+ int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
+ {
++ int r;
++
+ if (bo->kfd_bo)
+- return mmu_interval_notifier_insert(&bo->notifier, current->mm,
++ r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ addr, amdgpu_bo_size(bo),
+ &amdgpu_hmm_hsa_ops);
+- return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+- amdgpu_bo_size(bo),
+- &amdgpu_hmm_gfx_ops);
++ else
++ r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
++ amdgpu_bo_size(bo),
++ &amdgpu_hmm_gfx_ops);
++ if (r)
++ /*
++ * Make sure amdgpu_hmm_unregister() doesn't call
++ * mmu_interval_notifier_remove() when the notifier isn't properly
++ * initialized.
++ */
++ bo->notifier.mm = NULL;
++
++ return r;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index ff1ea99292fbf0..69dfc699d78b07 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ if (r || !idle)
+ goto error;
+
+- if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
++ if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
+ r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
+ if (r || !id)
+ goto error;
+@@ -459,6 +459,19 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ return r;
+ }
+
++/*
++ * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
++ * @vm: the VM to check
++ * @vmhub: the VMHUB which will be used
++ *
++ * Returns: True if the VM will use a reserved VMID.
++ */
++bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
++{
++ return vm->reserved_vmid[vmhub] ||
++ (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
++}
++
+ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ unsigned vmhub)
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+index fa8c42c83d5d26..240fa675126029 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+@@ -78,6 +78,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
+
+ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id);
++bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
+ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ unsigned vmhub);
+ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+index 6c6184f0dbc17e..508f02eb0cf8f9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+@@ -28,7 +28,7 @@
+ #define AMDGPU_IH_MAX_NUM_IVS 32
+
+ #define IH_RING_SIZE (256 * 1024)
+-#define IH_SW_RING_SIZE (8 * 1024) /* enough for 256 CAM entries */
++#define IH_SW_RING_SIZE (16 * 1024) /* enough for 512 CAM entries */
+
+ struct amdgpu_device;
+ struct amdgpu_iv_entry;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index fa6d0adcec206c..5978edf7ea71e4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -438,6 +438,14 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+
+ entry.ih = ih;
+ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
++
++ /*
++ * timestamp is not supported on some legacy SOCs (cik, cz, iceland,
++ * si and tonga), so initialize timestamp and timestamp_src to 0
++ */
++ entry.timestamp = 0;
++ entry.timestamp_src = 0;
++
+ amdgpu_ih_decode_iv(adev, &entry);
+
+ trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 78476bc75b4e1d..99dd86337e8412 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -258,9 +258,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
+ struct dma_fence *fence = NULL;
+ int r;
+
+- /* Ignore soft recovered fences here */
+ r = drm_sched_entity_error(s_entity);
+- if (r && r != -ENODATA)
++ if (r)
+ goto error;
+
+ if (!fence && job->gang_submit)
+@@ -300,12 +299,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ dma_fence_set_error(finished, -ECANCELED);
+
+ if (finished->error < 0) {
+- DRM_INFO("Skip scheduling IBs!\n");
++ dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
++ ring->name);
+ } else {
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
+ &fence);
+ if (r)
+- DRM_ERROR("Error scheduling IBs (%d)\n", r);
++ dev_err(adev->dev,
++ "Error scheduling IBs (%d) in ring(%s)", r,
++ ring->name);
+ }
+
+ job->job_run_counter++;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index d30dc0b718c73e..5797055b1148f7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -43,6 +43,7 @@
+ #include "amdgpu_gem.h"
+ #include "amdgpu_display.h"
+ #include "amdgpu_ras.h"
++#include "amdgpu_reset.h"
+ #include "amd_pcie.h"
+
+ void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
+@@ -722,6 +723,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ ? -EFAULT : 0;
+ }
+ case AMDGPU_INFO_READ_MMR_REG: {
++ int ret = 0;
+ unsigned int n, alloc_size;
+ uint32_t *regs;
+ unsigned int se_num = (info->read_mmr_reg.instance >>
+@@ -731,24 +733,37 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
+ AMDGPU_INFO_MMR_SH_INDEX_MASK;
+
++ if (!down_read_trylock(&adev->reset_domain->sem))
++ return -ENOENT;
++
+ /* set full masks if the userspace set all bits
+ * in the bitfields
+ */
+- if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
++ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) {
+ se_num = 0xffffffff;
+- else if (se_num >= AMDGPU_GFX_MAX_SE)
+- return -EINVAL;
+- if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
++ } else if (se_num >= AMDGPU_GFX_MAX_SE) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) {
+ sh_num = 0xffffffff;
+- else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
+- return -EINVAL;
++ } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+- if (info->read_mmr_reg.count > 128)
+- return -EINVAL;
++ if (info->read_mmr_reg.count > 128) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
+- if (!regs)
+- return -ENOMEM;
++ if (!regs) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
+ alloc_size = info->read_mmr_reg.count * sizeof(*regs);
+
+ amdgpu_gfx_off_ctrl(adev, false);
+@@ -760,13 +775,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ info->read_mmr_reg.dword_offset + i);
+ kfree(regs);
+ amdgpu_gfx_off_ctrl(adev, true);
+- return -EFAULT;
++ ret = -EFAULT;
++ goto out;
+ }
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
+ n = copy_to_user(out, regs, min(size, alloc_size));
+ kfree(regs);
+- return n ? -EFAULT : 0;
++ ret = (n ? -EFAULT : 0);
++out:
++ up_read(&adev->reset_domain->sem);
++ return ret;
+ }
+ case AMDGPU_INFO_DEV_INFO: {
+ struct drm_amdgpu_info_device *dev_info;
+@@ -1026,7 +1045,12 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_GPU_AVG_POWER,
+ (void *)&ui32, &ui32_size)) {
+- return -EINVAL;
++ /* fall back to input power for backwards compat */
++ if (amdgpu_dpm_read_sensor(adev,
++ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
++ (void *)&ui32, &ui32_size)) {
++ return -EINVAL;
++ }
+ }
+ ui32 >>= 8;
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index b6015157763af8..c5c55e132af21d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -556,8 +556,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
+ mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
+ mqd_prop.hqd_active = false;
+
++ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
++ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
++ mutex_lock(&adev->srbm_mutex);
++ amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
++ }
++
+ mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
+
++ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
++ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
++ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
++ mutex_unlock(&adev->srbm_mutex);
++ }
++
+ amdgpu_bo_unreserve(q->mqd_obj);
+ }
+
+@@ -873,6 +885,11 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
+ op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
+ op_input.set_shader_debugger.process_context_addr = process_context_addr;
+ op_input.set_shader_debugger.flags.u32all = flags;
++
++ /* use amdgpu mes_flush_shader_debugger instead */
++ if (op_input.set_shader_debugger.flags.process_ctx_flush)
++ return -EINVAL;
++
+ op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
+ memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
+ sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
+@@ -892,6 +909,32 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
+ return r;
+ }
+
++int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
++ uint64_t process_context_addr)
++{
++ struct mes_misc_op_input op_input = {0};
++ int r;
++
++ if (!adev->mes.funcs->misc_op) {
++ DRM_ERROR("mes flush shader debugger is not supported!\n");
++ return -EINVAL;
++ }
++
++ op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
++ op_input.set_shader_debugger.process_context_addr = process_context_addr;
++ op_input.set_shader_debugger.flags.process_ctx_flush = true;
++
++ amdgpu_mes_lock(&adev->mes);
++
++ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
++ if (r)
++ DRM_ERROR("failed to set_shader_debugger\n");
++
++ amdgpu_mes_unlock(&adev->mes);
++
++ return r;
++}
++
+ static void
+ amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+@@ -993,9 +1036,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+ switch (queue_type) {
+ case AMDGPU_RING_TYPE_GFX:
+ ring->funcs = adev->gfx.gfx_ring[0].funcs;
++ ring->me = adev->gfx.gfx_ring[0].me;
++ ring->pipe = adev->gfx.gfx_ring[0].pipe;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ ring->funcs = adev->gfx.compute_ring[0].funcs;
++ ring->me = adev->gfx.compute_ring[0].me;
++ ring->pipe = adev->gfx.compute_ring[0].pipe;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ring->funcs = adev->sdma.instance[0].ring.funcs;
+@@ -1051,6 +1098,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
+ return;
+
+ amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
++ del_timer_sync(&ring->fence_drv.fallback_timer);
+ amdgpu_ring_fini(ring);
+ kfree(ring);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+index a27b424ffe0056..c2c88b772361d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+@@ -291,9 +291,10 @@ struct mes_misc_op_input {
+ uint64_t process_context_addr;
+ union {
+ struct {
+- uint64_t single_memop : 1;
+- uint64_t single_alu_op : 1;
+- uint64_t reserved: 30;
++ uint32_t single_memop : 1;
++ uint32_t single_alu_op : 1;
++ uint32_t reserved: 29;
++ uint32_t process_ctx_flush: 1;
+ };
+ uint32_t u32all;
+ } flags;
+@@ -369,7 +370,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
+ const uint32_t *tcp_watch_cntl,
+ uint32_t flags,
+ bool trap_en);
+-
++int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
++ uint64_t process_context_addr);
+ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+ int queue_type, int idx,
+ struct amdgpu_mes_ctx_data *ctx_data,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index ace837cfa0a6bc..4e9ae52ef9fdbf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -613,6 +613,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
+ else
+ amdgpu_bo_placement_from_domain(bo, bp->domain);
+ if (bp->type == ttm_bo_type_kernel)
++ bo->tbo.priority = 2;
++ else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
+ bo->tbo.priority = 1;
+
+ if (!bp->destroy)
+@@ -625,8 +627,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
+ return r;
+
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+- bo->tbo.resource->mem_type == TTM_PL_VRAM &&
+- amdgpu_bo_in_cpu_visible_vram(bo))
++ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
+ ctx.bytes_moved);
+ else
+@@ -1250,7 +1251,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+ * amdgpu_bo_move_notify - notification about a memory move
+ * @bo: pointer to a buffer object
+ * @evict: if this move is evicting the buffer from the graphics address space
+- * @new_mem: new information of the bufer object
++ * @new_mem: new resource for backing the BO
+ *
+ * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+ * bookkeeping.
+@@ -1261,8 +1262,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+- struct amdgpu_bo *abo;
+ struct ttm_resource *old_mem = bo->resource;
++ struct amdgpu_bo *abo;
+
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return;
+@@ -1273,44 +1274,50 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ amdgpu_bo_kunmap(abo);
+
+ if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+- bo->resource->mem_type != TTM_PL_SYSTEM)
++ old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
+ dma_buf_move_notify(abo->tbo.base.dma_buf);
+
+- /* remember the eviction */
+- if (evict)
+- atomic64_inc(&adev->num_evictions);
+-
+- /* update statistics */
+- if (!new_mem)
+- return;
+-
+ /* move_notify is called before move happens */
+- trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
++ trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
++ old_mem ? old_mem->mem_type : -1);
+ }
+
+ void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
+ struct amdgpu_mem_stats *stats)
+ {
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
++ struct ttm_resource *res = bo->tbo.resource;
+ uint64_t size = amdgpu_bo_size(bo);
++ struct drm_gem_object *obj;
+ unsigned int domain;
++ bool shared;
+
+ /* Abort if the BO doesn't currently have a backing store */
+- if (!bo->tbo.resource)
++ if (!res)
+ return;
+
+- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
++ obj = &bo->tbo.base;
++ shared = drm_gem_object_is_shared_for_memory_stats(obj);
++
++ domain = amdgpu_mem_type_to_domain(res->mem_type);
+ switch (domain) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ stats->vram += size;
+- if (amdgpu_bo_in_cpu_visible_vram(bo))
++ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ stats->visible_vram += size;
++ if (shared)
++ stats->vram_shared += size;
+ break;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ stats->gtt += size;
++ if (shared)
++ stats->gtt_shared += size;
+ break;
+ case AMDGPU_GEM_DOMAIN_CPU:
+ default:
+ stats->cpu += size;
++ if (shared)
++ stats->cpu_shared += size;
+ break;
+ }
+
+@@ -1395,10 +1402,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ /* Remember that this BO was accessed by the CPU */
+ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+- if (bo->resource->mem_type != TTM_PL_VRAM)
+- return 0;
+-
+- if (amdgpu_bo_in_cpu_visible_vram(abo))
++ if (amdgpu_res_cpu_visible(adev, bo->resource))
+ return 0;
+
+ /* Can't move a pinned BO to visible VRAM */
+@@ -1422,7 +1426,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+
+ /* this should never happen */
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+- !amdgpu_bo_in_cpu_visible_vram(abo))
++ !amdgpu_res_cpu_visible(adev, bo->resource))
+ return VM_FAULT_SIGBUS;
+
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+@@ -1582,6 +1586,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
+ */
+ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
+ {
++ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct dma_buf_attachment *attachment;
+ struct dma_buf *dma_buf;
+ const char *placement;
+@@ -1590,10 +1595,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
+
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ unsigned int domain;
++
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ switch (domain) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+- if (amdgpu_bo_in_cpu_visible_vram(bo))
++ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ placement = "VRAM VISIBLE";
+ else
+ placement = "VRAM";
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index d28e21baef16ee..bc42ccbde659ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -138,12 +138,18 @@ struct amdgpu_bo_vm {
+ struct amdgpu_mem_stats {
+ /* current VRAM usage, includes visible VRAM */
+ uint64_t vram;
++ /* current shared VRAM usage, includes visible VRAM */
++ uint64_t vram_shared;
+ /* current visible VRAM usage */
+ uint64_t visible_vram;
+ /* current GTT usage */
+ uint64_t gtt;
++ /* current shared GTT usage */
++ uint64_t gtt_shared;
+ /* current system memory usage */
+ uint64_t cpu;
++ /* current shared system memory usage */
++ uint64_t cpu_shared;
+ /* sum of evicted buffers, includes visible VRAM */
+ uint64_t evicted_vram;
+ /* sum of evicted buffers due to CPU access */
+@@ -244,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
+ }
+
+-/**
+- * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
+- */
+-static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
+-{
+- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+- struct amdgpu_res_cursor cursor;
+-
+- if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
+- return false;
+-
+- amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
+- while (cursor.remaining) {
+- if (cursor.start < adev->gmc.visible_vram_size)
+- return true;
+-
+- amdgpu_res_next(&cursor, cursor.size);
+- }
+-
+- return false;
+-}
+-
+ /**
+ * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
+ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 429ef212c1f25b..a4f9015345ccb5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -1336,6 +1336,9 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
+ uint8_t dst_num_links = node_info.num_links;
+
+ hive = amdgpu_get_xgmi_hive(psp->adev);
++ if (WARN_ON(!hive))
++ return;
++
+ list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
+ struct psp_xgmi_topology_info *mirror_top_info;
+ int j;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+index 468a67b302d4c1..9aff579c6abf54 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+@@ -166,6 +166,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
+ if (ret)
+ return -EFAULT;
+
++ if (ta_bin_len > PSP_1_MEG)
++ return -EINVAL;
++
+ copy_pos += sizeof(uint32_t);
+
+ ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
+@@ -334,7 +337,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+
+ set_ta_context_funcs(psp, ta_type, &context);
+
+- if (!context->initialized) {
++ if (!context || !context->initialized) {
+ dev_err(adev->dev, "TA is not initialized\n");
+ ret = -EINVAL;
+ goto err_free_shared_buf;
+@@ -362,7 +365,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ }
+ }
+
+- if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
++ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
+ ret = -EFAULT;
+
+ err_free_shared_buf:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 163445baa4fc80..7cba98f8bbdca8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -1025,6 +1025,9 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
+ if (!obj)
+ return -EINVAL;
+
++ if (!info || info->head.block == AMDGPU_RAS_BLOCK_COUNT)
++ return -EINVAL;
++
+ if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
+ amdgpu_ras_get_ecc_info(adev, &err_data);
+ } else {
+@@ -1373,7 +1376,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+- sysfs_remove_file_from_group(&adev->dev->kobj,
++ if (adev->dev->kobj.sd)
++ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &con->badpages_attr.attr,
+ RAS_FS_NAME);
+ }
+@@ -1390,7 +1394,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
+ .attrs = attrs,
+ };
+
+- sysfs_remove_group(&adev->dev->kobj, &group);
++ if (adev->dev->kobj.sd)
++ sysfs_remove_group(&adev->dev->kobj, &group);
+
+ return 0;
+ }
+@@ -1437,7 +1442,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ if (!obj || !obj->attr_inuse)
+ return -EINVAL;
+
+- sysfs_remove_file_from_group(&adev->dev->kobj,
++ if (adev->dev->kobj.sd)
++ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ RAS_FS_NAME);
+ obj->attr_inuse = 0;
+@@ -1774,12 +1780,15 @@ static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
+ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info)
+ {
+- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+- struct ras_ih_data *data = &obj->ih_data;
++ struct ras_manager *obj;
++ struct ras_ih_data *data;
+
++ obj = amdgpu_ras_find_obj(adev, &info->head);
+ if (!obj)
+ return -EINVAL;
+
++ data = &obj->ih_data;
++
+ if (data->inuse == 0)
+ return 0;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+index 595d5e535aca63..9d82701d365bbe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+@@ -214,6 +214,12 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ return true;
+ case IP_VERSION(13, 0, 0):
++ if (strnstr(atom_ctx->vbios_pn, "D707",
++ sizeof(atom_ctx->vbios_pn)))
++ control->i2c_address = EEPROM_I2C_MADDR_0;
++ else
++ control->i2c_address = EEPROM_I2C_MADDR_4;
++ return true;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 80d6e132e4095d..f44b303ae287a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -352,7 +352,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ ring->max_dw = max_dw;
+ ring->hw_prio = hw_prio;
+
+- if (!ring->no_scheduler) {
++ if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
+ hw_ip = ring->funcs->type;
+ num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+@@ -469,8 +469,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+- int r, i;
+ uint32_t value, result, early[3];
++ loff_t i;
++ int r;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+@@ -520,46 +521,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
+ {
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ volatile u32 *mqd;
+- int r;
++ u32 *kbuf;
++ int r, i;
+ uint32_t value, result;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+- result = 0;
++ kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
++ if (!kbuf)
++ return -ENOMEM;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+- return r;
++ goto err_free;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
+- if (r) {
+- amdgpu_bo_unreserve(ring->mqd_obj);
+- return r;
+- }
++ if (r)
++ goto err_unreserve;
++
++ /*
++ * Copy to local buffer to avoid put_user(), which might fault
++ * and acquire mmap_sem, under reservation_ww_class_mutex.
++ */
++ for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
++ kbuf[i] = mqd[i];
+
++ amdgpu_bo_kunmap(ring->mqd_obj);
++ amdgpu_bo_unreserve(ring->mqd_obj);
++
++ result = 0;
+ while (size) {
+ if (*pos >= ring->mqd_size)
+- goto done;
++ break;
+
+- value = mqd[*pos/4];
++ value = kbuf[*pos/4];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+- goto done;
++ goto err_free;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
+
+-done:
+- amdgpu_bo_kunmap(ring->mqd_obj);
+- mqd = NULL;
+- amdgpu_bo_unreserve(ring->mqd_obj);
+- if (r)
+- return r;
+-
++ kfree(kbuf);
+ return result;
++
++err_unreserve:
++ amdgpu_bo_unreserve(ring->mqd_obj);
++err_free:
++ kfree(kbuf);
++ return r;
+ }
+
+ static const struct file_operations amdgpu_debugfs_mqd_fops = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+index 8ed0e073656f88..41ebe690eeffa7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+@@ -135,6 +135,10 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
+ mutex_unlock(&psp->securedisplay_context.mutex);
+ break;
+ case 2:
++ if (size < 3 || phy_id >= TA_SECUREDISPLAY_MAX_PHY) {
++ dev_err(adev->dev, "Invalid input: %s\n", str);
++ return -EINVAL;
++ }
+ mutex_lock(&psp->securedisplay_context.mutex);
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+index dcd8c066bc1f50..1b013a44ca99af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+@@ -191,7 +191,8 @@ static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
+
+ /* Never sync to VM updates either. */
+ if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+- owner != AMDGPU_FENCE_OWNER_UNDEFINED)
++ owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
++ owner != AMDGPU_FENCE_OWNER_KFD)
+ return false;
+
+ /* Ignore fences depending on the sync mode */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 4e51dce3aab5d6..8c3fb1562ffef9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -137,7 +137,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+- amdgpu_bo_in_cpu_visible_vram(abo)) {
++ amdgpu_res_cpu_visible(adev, bo->resource)) {
+
+ /* Try evicting to the CPU inaccessible part of VRAM
+ * first, but only set GTT as busy placement, so this
+@@ -408,40 +408,55 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ return r;
+ }
+
+-/*
+- * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
++/**
++ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
++ * @adev: amdgpu device
++ * @res: the resource to check
+ *
+- * Called by amdgpu_bo_move()
++ * Returns: true if the full resource is CPU visible, false otherwise.
+ */
+-static bool amdgpu_mem_visible(struct amdgpu_device *adev,
+- struct ttm_resource *mem)
++bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
++ struct ttm_resource *res)
+ {
+- u64 mem_size = (u64)mem->size;
+ struct amdgpu_res_cursor cursor;
+- u64 end;
+
+- if (mem->mem_type == TTM_PL_SYSTEM ||
+- mem->mem_type == TTM_PL_TT)
++ if (!res)
++ return false;
++
++ if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
++ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
+ return true;
+- if (mem->mem_type != TTM_PL_VRAM)
++
++ if (res->mem_type != TTM_PL_VRAM)
+ return false;
+
+- amdgpu_res_first(mem, 0, mem_size, &cursor);
+- end = cursor.start + cursor.size;
++ amdgpu_res_first(res, 0, res->size, &cursor);
+ while (cursor.remaining) {
++ if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
++ return false;
+ amdgpu_res_next(&cursor, cursor.size);
++ }
+
+- if (!cursor.remaining)
+- break;
++ return true;
++}
+
+- /* ttm_resource_ioremap only supports contiguous memory */
+- if (end != cursor.start)
+- return false;
++/*
++ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
++ *
++ * Called by amdgpu_bo_move()
++ */
++static bool amdgpu_res_copyable(struct amdgpu_device *adev,
++ struct ttm_resource *mem)
++{
++ if (!amdgpu_res_cpu_visible(adev, mem))
++ return false;
+
+- end = cursor.start + cursor.size;
+- }
++ /* ttm_resource_ioremap only supports contiguous memory */
++ if (mem->mem_type == TTM_PL_VRAM &&
++ !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
++ return false;
+
+- return end <= adev->gmc.visible_vram_size;
++ return true;
+ }
+
+ /*
+@@ -471,14 +486,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
+ bo->ttm == NULL)) {
++ amdgpu_bo_move_notify(bo, evict, new_mem);
+ ttm_bo_move_null(bo, new_mem);
+- goto out;
++ return 0;
+ }
+ if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
++ amdgpu_bo_move_notify(bo, evict, new_mem);
+ ttm_bo_move_null(bo, new_mem);
+- goto out;
++ return 0;
+ }
+ if ((old_mem->mem_type == TTM_PL_TT ||
+ old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
+@@ -488,9 +505,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ return r;
+
+ amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
++ amdgpu_bo_move_notify(bo, evict, new_mem);
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, new_mem);
+- goto out;
++ return 0;
+ }
+
+ if (old_mem->mem_type == AMDGPU_PL_GDS ||
+@@ -502,8 +520,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ new_mem->mem_type == AMDGPU_PL_OA ||
+ new_mem->mem_type == AMDGPU_PL_DOORBELL) {
+ /* Nothing to save here */
++ amdgpu_bo_move_notify(bo, evict, new_mem);
+ ttm_bo_move_null(bo, new_mem);
+- goto out;
++ return 0;
+ }
+
+ if (bo->type == ttm_bo_type_device &&
+@@ -515,27 +534,28 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+
+- if (adev->mman.buffer_funcs_enabled) {
+- if (((old_mem->mem_type == TTM_PL_SYSTEM &&
+- new_mem->mem_type == TTM_PL_VRAM) ||
+- (old_mem->mem_type == TTM_PL_VRAM &&
+- new_mem->mem_type == TTM_PL_SYSTEM))) {
+- hop->fpfn = 0;
+- hop->lpfn = 0;
+- hop->mem_type = TTM_PL_TT;
+- hop->flags = TTM_PL_FLAG_TEMPORARY;
+- return -EMULTIHOP;
+- }
++ if (adev->mman.buffer_funcs_enabled &&
++ ((old_mem->mem_type == TTM_PL_SYSTEM &&
++ new_mem->mem_type == TTM_PL_VRAM) ||
++ (old_mem->mem_type == TTM_PL_VRAM &&
++ new_mem->mem_type == TTM_PL_SYSTEM))) {
++ hop->fpfn = 0;
++ hop->lpfn = 0;
++ hop->mem_type = TTM_PL_TT;
++ hop->flags = TTM_PL_FLAG_TEMPORARY;
++ return -EMULTIHOP;
++ }
+
++ amdgpu_bo_move_notify(bo, evict, new_mem);
++ if (adev->mman.buffer_funcs_enabled)
+ r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
+- } else {
++ else
+ r = -ENODEV;
+- }
+
+ if (r) {
+ /* Check that all memory is CPU accessible */
+- if (!amdgpu_mem_visible(adev, old_mem) ||
+- !amdgpu_mem_visible(adev, new_mem)) {
++ if (!amdgpu_res_copyable(adev, old_mem) ||
++ !amdgpu_res_copyable(adev, new_mem)) {
+ pr_err("Move buffer fallback to memcpy unavailable\n");
+ return r;
+ }
+@@ -545,10 +565,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ return r;
+ }
+
+-out:
+- /* update statistics */
++ /* update statistics after the move */
++ if (evict)
++ atomic64_inc(&adev->num_evictions);
+ atomic64_add(bo->base.size, &adev->num_bytes_moved);
+- amdgpu_bo_move_notify(bo, evict, new_mem);
+ return 0;
+ }
+
+@@ -561,7 +581,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+- size_t bus_size = (size_t)mem->size;
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+@@ -572,9 +591,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+- /* check if it's visible */
+- if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
+- return -EINVAL;
+
+ if (adev->mman.aper_base_kaddr &&
+ mem->placement & TTM_PL_FLAG_CONTIGUOUS)
+@@ -868,6 +884,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+ amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ gtt->ttm.dma_address, flags);
+ }
++ gtt->bound = true;
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 65ec82141a8e01..32cf6b6f6efd96 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
+ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
+ uint64_t start);
+
++bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
++ struct ttm_resource *res);
++
+ int amdgpu_ttm_init(struct amdgpu_device *adev);
+ void amdgpu_ttm_fini(struct amdgpu_device *adev);
+ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 8beefc045e1451..bef7541770641c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -1326,9 +1326,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+
+ if (err)
+ return -ENODEV;
++
+ err = amdgpu_ucode_validate(*fw);
+- if (err)
++ if (err) {
+ dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
++ release_firmware(*fw);
++ *fw = NULL;
++ }
+
+ return err;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 1904edf6840716..88a3aa36b41d77 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -742,7 +742,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
+ uint32_t created = 0;
+ uint32_t allocated = 0;
+ uint32_t tmp, handle = 0;
+- uint32_t *size = &tmp;
++ uint32_t dummy = 0xffffffff;
++ uint32_t *size = &dummy;
+ unsigned int idx;
+ int i, r = 0;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 36b55d2bd51a91..111350ef1b742a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -135,6 +135,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ }
+ }
+
++ /* from vcn4 and above, only unified queue is used */
++ adev->vcn.using_unified_queue =
++ adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0);
++
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+
+@@ -259,18 +263,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-/* from vcn4 and above, only unified queue is used */
+-static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
+-{
+- struct amdgpu_device *adev = ring->adev;
+- bool ret = false;
+-
+- if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
+- ret = true;
+-
+- return ret;
+-}
+-
+ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
+ {
+ bool ret = false;
+@@ -292,8 +284,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+ void *ptr;
+ int i, idx;
+
++ bool in_ras_intr = amdgpu_ras_intr_triggered();
++
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
++ /* err_event_athub will corrupt VCPU buffer, so we need to
++ * restore fw data and clear buffer in amdgpu_vcn_resume() */
++ if (in_ras_intr)
++ return 0;
++
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+@@ -373,7 +372,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
+
+- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
++ !adev->vcn.using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (fence[j] ||
+@@ -419,7 +420,9 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
+
+- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
++ !adev->vcn.using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+@@ -445,8 +448,12 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+
+ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
+ {
++ struct amdgpu_device *adev = ring->adev;
++
++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+- ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
++ !adev->vcn.using_unified_queue)
+ atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+ atomic_dec(&ring->adev->vcn.total_submission_cnt);
+@@ -700,12 +707,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+- bool sq = amdgpu_vcn_using_unified_queue(ring);
+ uint32_t *ib_checksum;
+ uint32_t ib_pack_in_dw;
+ int i, r;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+@@ -718,7 +724,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
+ ib->length_dw = 0;
+
+ /* single queue headers */
+- if (sq) {
++ if (adev->vcn.using_unified_queue) {
+ ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ + 4 + 2; /* engine info + decoding ib in dw */
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
+@@ -737,7 +743,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+@@ -827,15 +833,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ struct dma_fence **fence)
+ {
+ unsigned int ib_size_dw = 16;
++ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint32_t *ib_checksum = NULL;
+ uint64_t addr;
+- bool sq = amdgpu_vcn_using_unified_queue(ring);
+ int i, r;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+@@ -849,7 +855,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+
+ ib->length_dw = 0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
+ ib->ptr[ib->length_dw++] = 0x00000018;
+@@ -871,7 +877,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+@@ -894,15 +900,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ struct dma_fence **fence)
+ {
+ unsigned int ib_size_dw = 16;
++ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint32_t *ib_checksum = NULL;
+ uint64_t addr;
+- bool sq = amdgpu_vcn_using_unified_queue(ring);
+ int i, r;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+@@ -916,7 +922,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+
+ ib->length_dw = 0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
+ ib->ptr[ib->length_dw++] = 0x00000018;
+@@ -938,7 +944,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index a3eed90b6af090..3dc2cffdae4fca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -284,6 +284,7 @@ struct amdgpu_vcn {
+
+ uint16_t inst_mask;
+ uint8_t num_inst_per_aid;
++ bool using_unified_queue;
+ };
+
+ struct amdgpu_fw_shared_rb_ptrs_struct {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 96857ae7fb5bc6..22575422ca7ec1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -137,8 +137,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
+
+ if (virt->ops && virt->ops->req_full_gpu) {
+ r = virt->ops->req_full_gpu(adev, init);
+- if (r)
++ if (r) {
++ adev->no_hw_access = true;
+ return r;
++ }
+
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ }
+@@ -615,7 +617,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+ vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
+ vf2pf_info->checksum =
+ amd_sriov_msg_checksum(
+- vf2pf_info, vf2pf_info->header.size, 0, 0);
++ vf2pf_info, sizeof(*vf2pf_info), 0, 0);
+
+ return 0;
+ }
+@@ -998,11 +1000,17 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
+ return 0;
+ }
+
++ if (amdgpu_device_skip_hw_access(adev))
++ return 0;
++
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
+ scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
+ scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
+ scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+ scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
++
++ mutex_lock(&adev->virt.rlcg_reg_lock);
++
+ if (reg_access_ctrl->spare_int)
+ spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+
+@@ -1058,6 +1066,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
+ }
+
+ ret = readl(scratch_reg0);
++
++ mutex_unlock(&adev->virt.rlcg_reg_lock);
++
+ return ret;
+ }
+
+@@ -1067,6 +1078,9 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+ {
+ u32 rlcg_flag;
+
++ if (amdgpu_device_skip_hw_access(adev))
++ return;
++
+ if (!amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
+ amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
+@@ -1084,6 +1098,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+ {
+ u32 rlcg_flag;
+
++ if (amdgpu_device_skip_hw_access(adev))
++ return 0;
++
+ if (!amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
+ return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index fabb83e9d9aec7..23b6efa9d25df8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -263,6 +263,8 @@ struct amdgpu_virt {
+
+ /* the ucode id to signal the autoload */
+ uint32_t autoload_ucode_id;
++
++ struct mutex rlcg_reg_lock;
+ };
+
+ struct amdgpu_video_codec_info;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+index 7148a216ae2fe4..f417c3393a0904 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+@@ -2,6 +2,7 @@
+
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_simple_kms_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_vblank.h>
+
+ #include "amdgpu.h"
+@@ -239,6 +240,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
+
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
++ if (!mode)
++ continue;
+ drm_mode_probed_add(connector, mode);
+ }
+
+@@ -311,7 +314,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
+ return 0;
+ }
+ afb = to_amdgpu_framebuffer(new_state->fb);
+- obj = new_state->fb->obj[0];
++
++ obj = drm_gem_fb_get_obj(new_state->fb, 0);
++ if (!obj) {
++ DRM_ERROR("Failed to get obj from framebuffer\n");
++ return -EINVAL;
++ }
++
+ rbo = gem_to_amdgpu_bo(obj);
+ adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+
+@@ -365,12 +374,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+ {
+ struct amdgpu_bo *rbo;
++ struct drm_gem_object *obj;
+ int r;
+
+ if (!old_state->fb)
+ return;
+
+- rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
++ obj = drm_gem_fb_get_obj(old_state->fb, 0);
++ if (!obj) {
++ DRM_ERROR("Failed to get obj from framebuffer\n");
++ return;
++ }
++
++ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 82f25996ff5ef6..f02b6232680f33 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -285,6 +285,7 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
+ list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
+ struct amdgpu_bo *bo = vm_bo->bo;
+
++ vm_bo->moved = true;
+ if (!bo || bo->tbo.type != ttm_bo_type_kernel)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ else if (bo->parent)
+@@ -417,7 +418,7 @@ uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ if (!vm)
+ return result;
+
+- result += vm->generation;
++ result += lower_32_bits(vm->generation);
+ /* Add one if the page tables will be re-generated on next CS */
+ if (drm_sched_entity_error(&vm->delayed))
+ ++result;
+@@ -442,13 +443,14 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
+ {
++ uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *shadow;
+ struct amdgpu_bo *bo;
+ int r;
+
+- if (drm_sched_entity_error(&vm->delayed)) {
+- ++vm->generation;
++ if (vm->generation != new_vm_generation) {
++ vm->generation = new_vm_generation;
+ amdgpu_vm_bo_reset_state_machine(vm);
+ amdgpu_vm_fini_entities(vm);
+ r = amdgpu_vm_init_entities(adev, vm);
+@@ -1095,8 +1097,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
+ bo = gem_to_amdgpu_bo(gobj);
+ }
+ mem = bo->tbo.resource;
+- if (mem->mem_type == TTM_PL_TT ||
+- mem->mem_type == AMDGPU_PL_PREEMPT)
++ if (mem && (mem->mem_type == TTM_PL_TT ||
++ mem->mem_type == AMDGPU_PL_PREEMPT))
+ pages_addr = bo->tbo.ttm->dma_address;
+ }
+
+@@ -1499,6 +1501,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
+ }
+
++/* Validate operation parameters to prevent potential abuse */
++static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
++ struct amdgpu_bo *bo,
++ uint64_t saddr,
++ uint64_t offset,
++ uint64_t size)
++{
++ uint64_t tmp, lpfn;
++
++ if (saddr & AMDGPU_GPU_PAGE_MASK
++ || offset & AMDGPU_GPU_PAGE_MASK
++ || size & AMDGPU_GPU_PAGE_MASK)
++ return -EINVAL;
++
++ if (check_add_overflow(saddr, size, &tmp)
++ || check_add_overflow(offset, size, &tmp)
++ || size == 0 /* which also leads to end < begin */)
++ return -EINVAL;
++
++ /* make sure object fit at this offset */
++ if (bo && offset + size > amdgpu_bo_size(bo))
++ return -EINVAL;
++
++ /* Ensure last pfn not exceed max_pfn */
++ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
++ if (lpfn >= adev->vm_manager.max_pfn)
++ return -EINVAL;
++
++ return 0;
++}
++
+ /**
+ * amdgpu_vm_bo_map - map bo inside a vm
+ *
+@@ -1525,21 +1558,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ uint64_t eaddr;
++ int r;
+
+- /* validate the parameters */
+- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+- return -EINVAL;
+- if (saddr + size <= saddr || offset + size <= offset)
+- return -EINVAL;
+-
+- /* make sure object fit at this offset */
+- eaddr = saddr + size - 1;
+- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+- return -EINVAL;
++ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
++ if (r)
++ return r;
+
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+- eaddr /= AMDGPU_GPU_PAGE_SIZE;
++ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+
+ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ if (tmp) {
+@@ -1592,17 +1618,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ uint64_t eaddr;
+ int r;
+
+- /* validate the parameters */
+- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+- return -EINVAL;
+- if (saddr + size <= saddr || offset + size <= offset)
+- return -EINVAL;
+-
+- /* make sure object fit at this offset */
+- eaddr = saddr + size - 1;
+- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
+- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
+- return -EINVAL;
++ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
++ if (r)
++ return r;
+
+ /* Allocate all the needed memory */
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+@@ -1616,7 +1634,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ }
+
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+- eaddr /= AMDGPU_GPU_PAGE_SIZE;
++ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+
+ mapping->start = saddr;
+ mapping->last = eaddr;
+@@ -1703,10 +1721,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
+ LIST_HEAD(removed);
+ uint64_t eaddr;
++ int r;
++
++ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
++ if (r)
++ return r;
+
+- eaddr = saddr + size - 1;
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+- eaddr /= AMDGPU_GPU_PAGE_SIZE;
++ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
+
+ /* Allocate all the needed memory */
+ before = kzalloc(sizeof(*before), GFP_KERNEL);
+@@ -2125,7 +2147,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
+ * Returns:
+ * 0 for success, error for failure.
+ */
+-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
++int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
++ int32_t xcp_id)
+ {
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_bo_vm *root;
+@@ -2144,6 +2167,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ INIT_LIST_HEAD(&vm->done);
+ INIT_LIST_HEAD(&vm->pt_freed);
+ INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
++ INIT_KFIFO(vm->faults);
+
+ r = amdgpu_vm_init_entities(adev, vm);
+ if (r)
+@@ -2169,7 +2193,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ vm->last_update = dma_fence_get_stub();
+ vm->last_unlocked = dma_fence_get_stub();
+ vm->last_tlb_flush = dma_fence_get_stub();
+- vm->generation = 0;
++ vm->generation = amdgpu_vm_generation(adev, NULL);
+
+ mutex_init(&vm->eviction_lock);
+ vm->evicting = false;
+@@ -2178,34 +2202,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ false, &root, xcp_id);
+ if (r)
+ goto error_free_delayed;
+- root_bo = &root->bo;
++
++ root_bo = amdgpu_bo_ref(&root->bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+- if (r)
+- goto error_free_root;
++ if (r) {
++ amdgpu_bo_unref(&root->shadow);
++ amdgpu_bo_unref(&root_bo);
++ goto error_free_delayed;
++ }
+
++ amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
+ r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
+ if (r)
+- goto error_unreserve;
+-
+- amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
++ goto error_free_root;
+
+ r = amdgpu_vm_pt_clear(adev, vm, root, false);
+ if (r)
+- goto error_unreserve;
++ goto error_free_root;
+
+ amdgpu_bo_unreserve(vm->root.bo);
+-
+- INIT_KFIFO(vm->faults);
++ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+
+-error_unreserve:
+- amdgpu_bo_unreserve(vm->root.bo);
+-
+ error_free_root:
+- amdgpu_bo_unref(&root->shadow);
++ amdgpu_vm_pt_free_root(adev, vm);
++ amdgpu_bo_unreserve(vm->root.bo);
+ amdgpu_bo_unref(&root_bo);
+- vm->root.bo = NULL;
+
+ error_free_delayed:
+ dma_fence_put(vm->last_tlb_flush);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+index 96d601e209b8bd..026a3db9472983 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+@@ -642,13 +642,14 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
+
+ if (!entry->bo)
+ return;
++
++ entry->bo->vm_bo = NULL;
+ shadow = amdgpu_bo_shadowed(entry->bo);
+ if (shadow) {
+ ttm_bo_set_bulk_move(&shadow->tbo, NULL);
+ amdgpu_bo_unref(&shadow);
+ }
+ ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
+- entry->bo->vm_bo = NULL;
+
+ spin_lock(&entry->vm->status_lock);
+ list_del(&entry->vm_status);
+@@ -765,11 +766,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm_bo_base *entry)
+ {
+ struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
+- struct amdgpu_bo *bo = parent->bo, *pbo;
++ struct amdgpu_bo *bo, *pbo;
+ struct amdgpu_vm *vm = params->vm;
+ uint64_t pde, pt, flags;
+ unsigned int level;
+
++ if (WARN_ON(!parent))
++ return -EINVAL;
++
++ bo = parent->bo;
+ for (level = 0, pbo = bo->parent; pbo; ++level)
+ pbo = pbo->parent;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+index 349416e176a127..1cf1498204678b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+@@ -102,6 +102,11 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
+ if (!r)
+ r = amdgpu_sync_push_to_job(&sync, p->job);
+ amdgpu_sync_free(&sync);
++
++ if (r) {
++ p->num_dw_left = 0;
++ amdgpu_job_free(p->job);
++ }
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+index 9a1036aeec2a0b..9142238e7791a5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+@@ -179,6 +179,6 @@ amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from)
+
+ #define for_each_xcp(xcp_mgr, xcp, i) \
+ for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \
+- xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
++ ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+index 104a5ad8397da7..198687545407e9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+@@ -209,7 +209,7 @@ struct amd_sriov_msg_pf2vf_info {
+ uint32_t pcie_atomic_ops_support_flags;
+ /* reserved */
+ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
+-};
++} __packed;
+
+ struct amd_sriov_msg_vf2pf_info_header {
+ /* the total structure size in byte */
+@@ -267,7 +267,7 @@ struct amd_sriov_msg_vf2pf_info {
+
+ /* reserved */
+ uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
+-};
++} __packed;
+
+ /* mailbox message send from guest to host */
+ enum amd_sriov_mailbox_request_message {
+diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+index d0fc62784e8217..6c6f9d9b5d8978 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
++++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+@@ -61,6 +61,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
+ }
+
++static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
++{
++ return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
++}
++
+ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
+ uint32_t inst_idx, struct amdgpu_ring *ring)
+ {
+@@ -86,7 +91,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ ip_blk = AMDGPU_XCP_VCN;
+- if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
++ if (aqua_vanjaram_xcp_vcn_shared(adev))
+ inst_mask = 1 << (inst_idx * 2);
+ break;
+ default:
+@@ -139,10 +144,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
+
+ aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+- /* VCN is shared by two partitions under CPX MODE */
++ /* VCN may be shared by two partitions under CPX MODE in certain
++ * configs.
++ */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+- adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
++ aqua_vanjaram_xcp_vcn_shared(adev))
+ aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+ }
+
+@@ -493,6 +500,12 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+
+ if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
+ mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
++ if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
++ dev_err(adev->dev,
++ "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
++ adev->gmc.num_mem_partitions);
++ return -EINVAL;
++ }
+ } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
+ dev_err(adev->dev,
+ "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
+index 9f63ddb89b75c1..1195d37f19fc5c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.c
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
+@@ -313,7 +313,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+ DEBUG("IMM 0x%02X\n", val);
+ return val;
+ }
+- return 0;
++ break;
+ case ATOM_ARG_PLL:
+ idx = U8(*ptr);
+ (*ptr)++;
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+index d95b2dc7806341..157e898dc3820d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+@@ -2065,26 +2065,29 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
+ fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+ if (fake_edid_record->ucFakeEDIDLength) {
+ struct edid *edid;
+- int edid_size =
+- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+- edid = kmalloc(edid_size, GFP_KERNEL);
++ int edid_size;
++
++ if (fake_edid_record->ucFakeEDIDLength == 128)
++ edid_size = fake_edid_record->ucFakeEDIDLength;
++ else
++ edid_size = fake_edid_record->ucFakeEDIDLength * 128;
++ edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0],
++ edid_size, GFP_KERNEL);
+ if (edid) {
+- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+- fake_edid_record->ucFakeEDIDLength);
+-
+ if (drm_edid_is_valid(edid)) {
+ adev->mode_info.bios_hardcoded_edid = edid;
+ adev->mode_info.bios_hardcoded_edid_size = edid_size;
+- } else
++ } else {
+ kfree(edid);
++ }
+ }
++ record += struct_size(fake_edid_record,
++ ucFakeEDIDString,
++ edid_size);
++ } else {
++ /* empty fake edid record must be 3 bytes long */
++ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
+ }
+- record += fake_edid_record->ucFakeEDIDLength ?
+- struct_size(fake_edid_record,
+- ucFakeEDIDString,
+- fake_edid_record->ucFakeEDIDLength) :
+- /* empty fake edid record must be 3 bytes long */
+- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
+ break;
+ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+index 6f7c031dd197a2..f24e34dc33d1de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+@@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(mmIH_RB_CNTL, tmp);
++
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
++ WREG32(mmIH_RB_CNTL, tmp);
+ }
+ return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+index b8c47e0cf37ad5..c19681492efa74 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+@@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ WREG32(mmIH_RB_CNTL, tmp);
+
+ out:
+ return (wptr & ih->ptr_mask);
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+index 5dfab80ffff213..cd298556f7a608 100644
+--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+@@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
+ int fb_channel_number;
+
+ fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
++ if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
++ fb_channel_number = 0;
+
+ return df_v1_7_channel_number[fb_channel_number];
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 9032d7a24d7cd7..53c99bc6abb333 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -3989,16 +3989,13 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
+
+ if (!amdgpu_sriov_vf(adev)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
+- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+- /* don't check this. There are apparently firmwares in the wild with
+- * incorrect size in the header
+- */
+- if (err == -ENODEV)
+- goto out;
++ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+- dev_dbg(adev->dev,
+- "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
+- fw_name);
++ goto out;
++
++ /* don't validate this firmware. There are apparently firmwares
++ * in the wild with incorrect size in the header
++ */
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+ version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+ version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+@@ -4023,8 +4020,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
+ err = 0;
+ adev->gfx.mec2_fw = NULL;
+ }
+- amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+- amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
+
+ gfx_v10_0_check_fw_write_wait(adev);
+ out:
+@@ -6457,11 +6452,11 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+- memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ } else {
+ /* restore mqd with the backup copy */
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+- memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+ /* reset the ring */
+ ring->wptr = 0;
+ *ring->wptr_cpu_addr = 0;
+@@ -6575,7 +6570,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ #ifdef __BIG_ENDIAN
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
+ #endif
+- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
++ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+@@ -6735,7 +6730,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
+ /* reset MQD to a clean status */
+ if (adev->gfx.kiq[0].mqd_backup)
+- memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+
+ /* reset ring buffer */
+ ring->wptr = 0;
+@@ -6758,7 +6753,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.kiq[0].mqd_backup)
+- memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+ }
+
+ return 0;
+@@ -6779,11 +6774,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ } else {
+ /* restore MQD to a clean status */
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ /* reset ring buffer */
+ ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+@@ -7897,22 +7892,15 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+ static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+ unsigned int vmid)
+ {
+- u32 reg, data;
++ u32 data;
+
+ /* not for *_SOC15 */
+- reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+- if (amdgpu_sriov_is_pp_one_vf(adev))
+- data = RREG32_NO_KIQ(reg);
+- else
+- data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
++ data = RREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+- if (amdgpu_sriov_is_pp_one_vf(adev))
+- WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+- else
+- WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
++ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ }
+
+ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+@@ -8760,7 +8748,9 @@ static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
++ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
++ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ }
+
+ static void
+@@ -9162,7 +9152,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
+ 7 + /* PIPELINE_SYNC */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+- 2 + /* VM_FLUSH */
++ 4 + /* VM_FLUSH */
+ 8 + /* FENCE for VM_FLUSH */
+ 20 + /* GDS switch */
+ 4 + /* double SWITCH_BUFFER,
+@@ -9253,7 +9243,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
+ 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+- 2 + /* gfx_v10_0_ring_emit_vm_flush */
+ 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
+ .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
+ .emit_ib = gfx_v10_0_ring_emit_ib_compute,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 762d7a19f1be16..54ec9b32562c28 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -83,6 +83,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
+
++static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
++ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
++};
++
+ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
+@@ -275,6 +279,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
+ default:
+ break;
+ }
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_11_0,
++ (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
++
+ }
+
+ static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
+@@ -390,7 +398,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
+
+- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
+@@ -1608,7 +1616,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
+ active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ }
+
+- active_rb_bitmap |= global_active_rb_bitmap;
++ active_rb_bitmap &= global_active_rb_bitmap;
+ adev->gfx.config.backend_enable_mask = active_rb_bitmap;
+ adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
+ }
+@@ -3684,11 +3692,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+- memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ } else {
+ /* restore mqd with the backup copy */
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+- memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+ /* reset the ring */
+ ring->wptr = 0;
+ *ring->wptr_cpu_addr = 0;
+@@ -3799,7 +3807,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
+ (order_base_2(prop->queue_size / 4) - 1));
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+ (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
+- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
++ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+@@ -3977,7 +3985,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
+ /* reset MQD to a clean status */
+ if (adev->gfx.kiq[0].mqd_backup)
+- memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+
+ /* reset ring buffer */
+ ring->wptr = 0;
+@@ -4000,7 +4008,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.kiq[0].mqd_backup)
+- memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+ }
+
+ return 0;
+@@ -4021,11 +4029,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
++ memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ } else {
+ /* restore MQD to a clean status */
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
++ memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ /* reset ring buffer */
+ ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+@@ -4261,11 +4269,11 @@ static int gfx_v11_0_hw_init(void *handle)
+ /* RLC autoload sequence 1: Program rlc ram */
+ if (adev->gfx.imu.funcs->program_rlc_ram)
+ adev->gfx.imu.funcs->program_rlc_ram(adev);
++ /* rlc autoload firmware */
++ r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
++ if (r)
++ return r;
+ }
+- /* rlc autoload firmware */
+- r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
+- if (r)
+- return r;
+ } else {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
+@@ -4953,23 +4961,16 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+
+ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+ {
+- u32 reg, data;
++ u32 data;
+
+ amdgpu_gfx_off_ctrl(adev, false);
+
+- reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
+- if (amdgpu_sriov_is_pp_one_vf(adev))
+- data = RREG32_NO_KIQ(reg);
+- else
+- data = RREG32(reg);
++ data = RREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+- if (amdgpu_sriov_is_pp_one_vf(adev))
+- WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
+- else
+- WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
++ WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
+ }
+@@ -5700,7 +5701,9 @@ static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
++ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ WREG32_SOC15(GC, 0, regSQ_CMD, value);
++ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ }
+
+ static void
+@@ -6094,7 +6097,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
+ 7 + /* PIPELINE_SYNC */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+- 2 + /* VM_FLUSH */
++ 4 + /* VM_FLUSH */
+ 8 + /* FENCE for VM_FLUSH */
+ 20 + /* GDS switch */
+ 5 + /* COND_EXEC */
+@@ -6179,7 +6182,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
+ 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+- 2 + /* gfx_v11_0_ring_emit_vm_flush */
+ 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
+ .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
+ .emit_ib = gfx_v11_0_ring_emit_ib_compute,
+@@ -6345,6 +6347,9 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
++ bitmap = i * adev->gfx.config.max_sh_per_se + j;
++ if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
++ continue;
+ mask = 1;
+ counter = 0;
+ gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 885ebd703260f0..1943beb135c4c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 16,
+- AMDGPU_IB_POOL_DIRECT, &ib);
++
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index fd61574a737cb1..895060f6948f30 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1039,8 +1039,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 16,
+- AMDGPU_IB_POOL_DIRECT, &ib);
++
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+@@ -1172,6 +1172,10 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+ /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
+ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
++ /* https://bbs.openkylin.top/t/topic/171497 */
++ { 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
++ /* HP 705G4 DM with R5 2400G */
++ { 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
+ { 0, 0, 0, 0, 0 },
+ };
+
+@@ -3033,6 +3037,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
+
+ gfx_v9_0_cp_gfx_enable(adev, true);
+
++ /* Now only limit the quirk on the APU gfx9 series and already
++ * confirmed that the APU gfx10/gfx11 needn't such update.
++ */
++ if (adev->flags & AMD_IS_APU &&
++ adev->in_s3 && !adev->suspend_complete) {
++ DRM_INFO(" Will skip the CSB packet resubmit\n");
++ return 0;
++ }
+ r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+@@ -5697,7 +5709,9 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
++ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
++ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ }
+
+ static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+@@ -6980,7 +6994,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
+ 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+- 2 + /* gfx_v9_0_ring_emit_vm_flush */
+ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+@@ -7018,7 +7031,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
+ 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+- 2 + /* gfx_v9_0_ring_emit_vm_flush */
+ 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
+ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
+ .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index 18ce5fe45f6f86..caa04d897c2ded 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -296,8 +296,8 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 16,
+- AMDGPU_IB_POOL_DIRECT, &ib);
++
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+@@ -425,16 +425,16 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
+
+ static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
+ {
+- const char *chip_name;
++ char ucode_prefix[15];
+ int r;
+
+- chip_name = "gc_9_4_3";
++ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+- r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
++ r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
+ if (r)
+ return r;
+
+- r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
++ r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
+ if (r)
+ return r;
+
+@@ -4290,9 +4290,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i
+ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ struct amdgpu_cu_info *cu_info)
+ {
+- int i, j, k, counter, xcc_id, active_cu_number = 0;
+- u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
++ int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
++ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
+ unsigned disable_masks[4 * 4];
++ bool is_symmetric_cus;
+
+ if (!adev || !cu_info)
+ return -EINVAL;
+@@ -4310,6 +4311,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
++ is_symmetric_cus = true;
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+@@ -4337,6 +4339,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
+ }
++ if (i && is_symmetric_cus && prev_counter != counter)
++ is_symmetric_cus = false;
++ prev_counter = counter;
++ }
++ if (is_symmetric_cus) {
++ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
++ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
++ tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
++ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
+ }
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+index cdc290a474a927..66c6bab75f8a58 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+@@ -102,7 +102,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
++ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
++ AMD_APU_IS_RENOIR |
++ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the
+ * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+index 0834af7715497d..b50f24f7ea5c99 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+@@ -139,7 +139,9 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
++ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
++ AMD_APU_IS_RENOIR |
++ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the
+ * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index fa87a85e1017e7..62ecf4d89cb9cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -1141,6 +1141,10 @@ static int gmc_v10_0_hw_fini(void *handle)
+
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+
++ if (adev->gmc.ecc_irq.funcs &&
++ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
++ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index e3b76fd28d158c..3d797a1adef3e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -974,6 +974,11 @@ static int gmc_v11_0_hw_fini(void *handle)
+ }
+
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
++
++ if (adev->gmc.ecc_irq.funcs &&
++ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
++ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
++
+ gmc_v11_0_gart_disable(adev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index 5b837a65fad20c..dfee4aae80393a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -914,8 +914,8 @@ static int gmc_v6_0_hw_init(void *handle)
+
+ if (amdgpu_emu_mode == 1)
+ return amdgpu_gmc_vram_checking(adev);
+- else
+- return r;
++
++ return 0;
+ }
+
+ static int gmc_v6_0_hw_fini(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 6a6929ac27482d..fd905889a4c63b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -1103,8 +1103,8 @@ static int gmc_v7_0_hw_init(void *handle)
+
+ if (amdgpu_emu_mode == 1)
+ return amdgpu_gmc_vram_checking(adev);
+- else
+- return r;
++
++ return 0;
+ }
+
+ static int gmc_v7_0_hw_fini(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 5af23520251322..0bebcdbb265807 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1224,8 +1224,8 @@ static int gmc_v8_0_hw_init(void *handle)
+
+ if (amdgpu_emu_mode == 1)
+ return amdgpu_gmc_vram_checking(adev);
+- else
+- return r;
++
++ return 0;
+ }
+
+ static int gmc_v8_0_hw_fini(void *handle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index f9a5a2c0573e41..6d2b9d260d92c5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1949,7 +1949,7 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
+ break;
+ }
+
+- size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT;
++ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+@@ -2220,8 +2220,6 @@ static int gmc_v9_0_sw_fini(void *handle)
+
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ amdgpu_gmc_sysfs_fini(adev);
+- adev->gmc.num_mem_partitions = 0;
+- kfree(adev->gmc.mem_partitions);
+
+ amdgpu_gmc_ras_fini(adev);
+ amdgpu_gem_force_release(adev);
+@@ -2235,6 +2233,9 @@ static int gmc_v9_0_sw_fini(void *handle)
+ amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
+ amdgpu_bo_fini(adev);
+
++ adev->gmc.num_mem_partitions = 0;
++ kfree(adev->gmc.mem_partitions);
++
+ return 0;
+ }
+
+@@ -2372,8 +2373,8 @@ static int gmc_v9_0_hw_init(void *handle)
+
+ if (amdgpu_emu_mode == 1)
+ return amdgpu_gmc_vram_checking(adev);
+- else
+- return r;
++
++ return 0;
+ }
+
+ /**
+@@ -2412,6 +2413,10 @@ static int gmc_v9_0_hw_fini(void *handle)
+
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+
++ if (adev->gmc.ecc_irq.funcs &&
++ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
++ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+index aecad530b10a61..2c02ae69883d2b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+@@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ WREG32(mmIH_RB_CNTL, tmp);
+
+ out:
+ return (wptr & ih->ptr_mask);
+diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+index ec0c8f8b465ab2..725b1a585088db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+@@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
++
++ if (enable) {
++ /* Unset the CLEAR_OVERFLOW bit to make sure the next step
++ * is switching the bit from 0 to 1
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
++ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
++ return -ETIMEDOUT;
++ } else {
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++ }
++
++ /* Clear RB_OVERFLOW bit */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
++ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
++ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
++ return -ETIMEDOUT;
++ } else {
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++ }
++
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ }
++
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+@@ -418,6 +446,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ out:
+ return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+index 8fb05eae340ad2..b8da0fc29378c4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+@@ -418,6 +418,13 @@ static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev,
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
+ out:
+ return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+index 4ab90c7852c3ed..ca123ff553477e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+@@ -39,7 +39,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
+
+ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
+ {
+- char fw_name[40];
++ char fw_name[45];
+ char ucode_prefix[30];
+ int err;
+ const struct imu_firmware_header_v1_0 *imu_hdr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+index 77595e9622da34..7ac0228fe532ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+@@ -23,6 +23,7 @@
+
+ #include "amdgpu.h"
+ #include "amdgpu_jpeg.h"
++#include "amdgpu_cs.h"
+ #include "soc15.h"
+ #include "soc15d.h"
+ #include "vcn_v1_0.h"
+@@ -34,6 +35,9 @@
+ static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
++static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
++ struct amdgpu_job *job,
++ struct amdgpu_ib *ib);
+
+ static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+ {
+@@ -300,7 +304,10 @@ static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring,
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++ if (ring->funcs->parse_cs)
++ amdgpu_ring_write(ring, 0);
++ else
++ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+@@ -554,6 +561,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
+ .get_rptr = jpeg_v1_0_decode_ring_get_rptr,
+ .get_wptr = jpeg_v1_0_decode_ring_get_wptr,
+ .set_wptr = jpeg_v1_0_decode_ring_set_wptr,
++ .parse_cs = jpeg_v1_dec_ring_parse_cs,
+ .emit_frame_size =
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+@@ -612,3 +620,69 @@ static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+
+ vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+ }
++
++/**
++ * jpeg_v1_dec_ring_parse_cs - command submission parser
++ *
++ * @parser: Command submission parser context
++ * @job: the job to parse
++ * @ib: the IB to parse
++ *
++ * Parse the command stream, return -EINVAL for invalid packet,
++ * 0 otherwise
++ */
++static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
++ struct amdgpu_job *job,
++ struct amdgpu_ib *ib)
++{
++ u32 i, reg, res, cond, type;
++ int ret = 0;
++ struct amdgpu_device *adev = parser->adev;
++
++ for (i = 0; i < ib->length_dw ; i += 2) {
++ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
++ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
++ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
++ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
++
++ if (res || cond != PACKETJ_CONDITION_CHECK0) /* only allow 0 for now */
++ return -EINVAL;
++
++ if (reg >= JPEG_V1_REG_RANGE_START && reg <= JPEG_V1_REG_RANGE_END)
++ continue;
++
++ switch (type) {
++ case PACKETJ_TYPE0:
++ if (reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH &&
++ reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW &&
++ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH &&
++ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW &&
++ reg != JPEG_V1_REG_CTX_INDEX &&
++ reg != JPEG_V1_REG_CTX_DATA) {
++ ret = -EINVAL;
++ }
++ break;
++ case PACKETJ_TYPE1:
++ if (reg != JPEG_V1_REG_CTX_DATA)
++ ret = -EINVAL;
++ break;
++ case PACKETJ_TYPE3:
++ if (reg != JPEG_V1_REG_SOFT_RESET)
++ ret = -EINVAL;
++ break;
++ case PACKETJ_TYPE6:
++ if (ib->ptr[i] != CP_PACKETJ_NOP)
++ ret = -EINVAL;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ if (ret) {
++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
++ break;
++ }
++ }
++
++ return ret;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
+index bbf33a6a397298..9654d22e03763c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
+@@ -29,4 +29,15 @@ int jpeg_v1_0_sw_init(void *handle);
+ void jpeg_v1_0_sw_fini(void *handle);
+ void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
+
++#define JPEG_V1_REG_RANGE_START 0x8000
++#define JPEG_V1_REG_RANGE_END 0x803f
++
++#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x8238
++#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x8239
++#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH 0x825a
++#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW 0x825b
++#define JPEG_V1_REG_CTX_INDEX 0x8328
++#define JPEG_V1_REG_CTX_DATA 0x8329
++#define JPEG_V1_REG_SOFT_RESET 0x83a0
++
+ #endif /*__JPEG_V1_0_H__*/
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+index 1c8116d75f63c4..fbe57499495ec5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+@@ -543,11 +543,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
+
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+index 1de79d660285d7..78aaaee492e111 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+@@ -23,6 +23,7 @@
+
+ #include "amdgpu.h"
+ #include "amdgpu_jpeg.h"
++#include "amdgpu_cs.h"
+ #include "soc15.h"
+ #include "soc15d.h"
+ #include "jpeg_v4_0_3.h"
+@@ -769,11 +770,15 @@ static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++
++ if (ring->funcs->parse_cs)
++ amdgpu_ring_write(ring, 0);
++ else
++ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++ amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+@@ -1052,6 +1057,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
+ .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
+ .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
+ .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
++ .parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+@@ -1216,3 +1222,56 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
+ {
+ adev->jpeg.ras = &jpeg_v4_0_3_ras;
+ }
++
++/**
++ * jpeg_v4_0_3_dec_ring_parse_cs - command submission parser
++ *
++ * @parser: Command submission parser context
++ * @job: the job to parse
++ * @ib: the IB to parse
++ *
++ * Parse the command stream, return -EINVAL for invalid packet,
++ * 0 otherwise
++ */
++int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
++ struct amdgpu_job *job,
++ struct amdgpu_ib *ib)
++{
++ uint32_t i, reg, res, cond, type;
++ struct amdgpu_device *adev = parser->adev;
++
++ for (i = 0; i < ib->length_dw ; i += 2) {
++ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
++ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
++ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
++ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
++
++ if (res) /* only support 0 at the moment */
++ return -EINVAL;
++
++ switch (type) {
++ case PACKETJ_TYPE0:
++ if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
++ return -EINVAL;
++ }
++ break;
++ case PACKETJ_TYPE3:
++ if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
++ return -EINVAL;
++ }
++ break;
++ case PACKETJ_TYPE6:
++ if (ib->ptr[i] == CP_PACKETJ_NOP)
++ continue;
++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
++ return -EINVAL;
++ default:
++ dev_err(adev->dev, "Unknown packet type %d !\n", type);
++ return -EINVAL;
++ }
++ }
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+index 22483dc663518f..9598eda9d71564 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+@@ -46,6 +46,12 @@
+
+ #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+
++#define JPEG_REG_RANGE_START 0x4000
++#define JPEG_REG_RANGE_END 0x41c2
++
+ extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
+
++int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
++ struct amdgpu_job *job,
++ struct amdgpu_ib *ib);
+ #endif /* __JPEG_V4_0_3_H__ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+index fb91b31056cae7..d25f87fb197148 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+@@ -96,7 +96,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
++ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
++ AMD_APU_IS_RENOIR |
++ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+index 784c4e07747077..3d8e579d5c4e8a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+@@ -130,6 +130,9 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
+ uint64_t value;
+ int i;
+
++ if (amdgpu_sriov_vf(adev))
++ return;
++
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ /* Program the AGP BAR */
+@@ -139,9 +142,6 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
+ WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
+ adev->gmc.agp_end >> 24);
+
+- if (amdgpu_sriov_vf(adev))
+- return;
+-
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+index b6a8478dabf43c..737eff53f54f04 100644
+--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+@@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ out:
+ return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+index 4038455d799845..ef368ca79a6686 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+@@ -28,6 +28,7 @@
+ #include "nbio/nbio_2_3_offset.h"
+ #include "nbio/nbio_2_3_sh_mask.h"
+ #include <uapi/linux/kfd_ioctl.h>
++#include <linux/device.h>
+ #include <linux/pci.h>
+
+ #define smnPCIE_CONFIG_CNTL 0x11180044
+@@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
+
+ data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+
+- if (pci_is_thunderbolt_attached(adev->pdev))
++ if (dev_is_removable(&adev->pdev->dev))
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ else
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+@@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+- if (pci_is_thunderbolt_attached(adev->pdev))
++ if (dev_is_removable(&adev->pdev->dev))
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ else
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+index 685abf57ffddc1..977b956bf930a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -384,7 +384,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
+ else
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+- if (!ras->disable_ras_err_cnt_harvest) {
++ if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
+ /*
+ * clear error status after ras_controller_intr
+ * according to hw team and count ue number
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+index f85eec05d21815..0a601336cf697f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+@@ -426,6 +426,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
+ u32 inst_mask;
+ int i;
+
++ if (amdgpu_sriov_vf(adev))
++ adev->rmmio_remap.reg_offset =
++ SOC15_REG_OFFSET(
++ NBIO, 0,
++ regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
++ << 2;
+ WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
+ 0xff & ~(adev->gfx.xcc_mask));
+
+@@ -604,11 +610,6 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
+
+ dev_info(adev->dev, "RAS controller interrupt triggered "
+ "by NBIF error\n");
+-
+- /* ras_controller_int is dedicated for nbif ras error,
+- * not the global interrupt for sync flood
+- */
+- amdgpu_ras_reset_gpu(adev);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+index 469eed084976c7..fe1995ed13be7b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+@@ -59,6 +59,9 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
+ /* Read USB-PD from LFB */
+ #define GFX_CMD_USB_PD_USE_LFB 0x480
+
++/* Retry times for vmbx ready wait */
++#define PSP_VMBX_POLLING_LIMIT 3000
++
+ /* VBIOS gfl defines */
+ #define MBOX_READY_MASK 0x80000000
+ #define MBOX_STATUS_MASK 0x0000FFFF
+@@ -138,7 +141,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
+ struct amdgpu_device *adev = psp->adev;
+ int retry_loop, ret;
+
+- for (retry_loop = 0; retry_loop < 70; retry_loop++) {
++ for (retry_loop = 0; retry_loop < PSP_VMBX_POLLING_LIMIT; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_33 set to 1 */
+ ret = psp_wait_for(
+@@ -158,14 +161,18 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
+ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+ {
+ struct amdgpu_device *adev = psp->adev;
+- int retry_loop, ret;
++ int retry_loop, retry_cnt, ret;
+
++ retry_cnt =
++ (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) ?
++ PSP_VMBX_POLLING_LIMIT :
++ 10;
+ /* Wait for bootloader to signify that it is ready having bit 31 of
+ * C2PMSG_35 set to 1. All other bits are expected to be cleared.
+ * If there is an error in processing command, bits[7:0] will be set.
+ * This is applicable for PSP v13.0.6 and newer.
+ */
+- for (retry_loop = 0; retry_loop < 10; retry_loop++) {
++ for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0xffffffff, false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index cd37f45e01a119..0ba9a3d3312f5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -2027,10 +2027,13 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+ {
+- uint32_t instance;
++ int instance;
+
+ DRM_DEBUG("IH: SDMA trap\n");
+ instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
++ if (instance < 0)
++ return instance;
++
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+index f413898dda37db..4e8d5e6a65e410 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+@@ -365,7 +365,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ u32 ref_and_mask = 0;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
++ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
++ << (ring->me % adev->sdma.num_inst_per_aid);
+
+ sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+@@ -1612,19 +1613,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
+ u32 sdma_cntl;
+
+ sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
+- switch (state) {
+- case AMDGPU_IRQ_STATE_DISABLE:
+- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
+- DRAM_ECC_INT_ENABLE, 0);
+- WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
+- break;
+- /* sdma ecc interrupt is enabled by default
+- * driver doesn't need to do anything to
+- * enable the interrupt */
+- case AMDGPU_IRQ_STATE_ENABLE:
+- default:
+- break;
+- }
++ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
++ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
++ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index 2b3ebebc4299c0..47d4840c6275c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -188,6 +188,14 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
++ /* SDMA seems to miss doorbells sometimes when powergating kicks in.
++ * Updating the wptr directly will wake it. This is only safe because
++ * we disallow gfxoff in begin_use() and then allow it again in end_use().
++ */
++ WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
++ lower_32_bits(ring->wptr << 2));
++ WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
++ upper_32_bits(ring->wptr << 2));
+ } else {
+ DRM_DEBUG("Not using doorbell -- "
+ "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+@@ -292,17 +300,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+ u32 ref_and_mask = 0;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+-
+- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+- amdgpu_ring_write(ring, ref_and_mask); /* reference */
+- amdgpu_ring_write(ring, ref_and_mask); /* mask */
+- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
++ if (ring->me > 1) {
++ amdgpu_asic_flush_hdp(adev, ring);
++ } else {
++ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
++
++ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
++ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
++ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
++ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
++ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
++ amdgpu_ring_write(ring, ref_and_mask); /* reference */
++ amdgpu_ring_write(ring, ref_and_mask); /* mask */
++ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
++ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
++ }
+ }
+
+ /**
+@@ -1651,6 +1663,36 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
+ *flags |= AMD_CG_SUPPORT_SDMA_LS;
+ }
+
++static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
++ * disallow GFXOFF in some cases leading to
++ * hangs in SDMA. Disallow GFXOFF while SDMA is active.
++ * We can probably just limit this to 5.2.3,
++ * but it shouldn't hurt for other parts since
++ * this GFXOFF will be disallowed anyway when SDMA is
++ * active, this just makes it explicit.
++ * sdma_v5_2_ring_set_wptr() takes advantage of this
++ * to update the wptr because sometimes SDMA seems to miss
++ * doorbells when entering PG. If you remove this, update
++ * sdma_v5_2_ring_set_wptr() as well!
++ */
++ amdgpu_gfx_off_ctrl(adev, false);
++}
++
++static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++
++ /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
++ * disallow GFXOFF in some cases leading to
++ * hangs in SDMA. Allow GFXOFF when SDMA is complete.
++ */
++ amdgpu_gfx_off_ctrl(adev, true);
++}
++
+ const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
+ .name = "sdma_v5_2",
+ .early_init = sdma_v5_2_early_init,
+@@ -1698,6 +1740,8 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
+ .test_ib = sdma_v5_2_ring_test_ib,
+ .insert_nop = sdma_v5_2_ring_insert_nop,
+ .pad_ib = sdma_v5_2_ring_pad_ib,
++ .begin_use = sdma_v5_2_ring_begin_use,
++ .end_use = sdma_v5_2_ring_end_use,
+ .emit_wreg = sdma_v5_2_ring_emit_wreg,
+ .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+index 9a24f17a57502e..cada9f300a7f51 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+@@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(IH_RB_CNTL, tmp);
++
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
++ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+index 8b8086d5c864bc..896c7e434d3bc8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
++++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+@@ -93,7 +93,7 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+- return r;
++ return 0;
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
+index ae29620b1ea405..a7cef33a2a3da6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
++++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
+@@ -92,7 +92,7 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev)
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+- return r;
++ return 0;
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index f5be40d7ba3679..a41ed67ea9feaf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -325,7 +325,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
+ u32 reference_clock = adev->clock.spll.reference_freq;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
+- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
++ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) ||
++ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6))
+ return 10000;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
+@@ -573,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
+ return AMD_RESET_METHOD_MODE1;
+ }
+
++static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
++{
++ u32 sol_reg;
++
++ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
++
++ /* Will reset for the following suspend abort cases.
++ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
++ * 2) S3 suspend abort and TOS already launched.
++ */
++ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
++ !adev->suspend_complete &&
++ sol_reg)
++ return true;
++
++ return false;
++}
++
+ static int soc15_asic_reset(struct amdgpu_device *adev)
+ {
+ /* original raven doesn't have full asic reset */
+- if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+- (adev->apu_flags & AMD_APU_IS_RAVEN2))
++ /* On the latest Raven, the GPU reset can be performed
++ * successfully. So now, temporarily enable it for the
++ * S3 suspend abort case.
++ */
++ if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
++ (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
++ !soc15_need_reset_on_resume(adev))
+ return 0;
+
+ switch (soc15_asic_reset_method(adev)) {
+@@ -1159,6 +1183,11 @@ static int soc15_common_early_init(void *handle)
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG;
+ adev->external_rev_id = adev->rev_id + 0x46;
++ /* GC 9.4.3 uses MMIO register region hole at a different offset */
++ if (!amdgpu_sriov_vf(adev)) {
++ adev->rmmio_remap.reg_offset = 0x1A000;
++ adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000;
++ }
+ break;
+ default:
+ /* FIXME: not supported yet */
+@@ -1294,6 +1323,10 @@ static int soc15_common_resume(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ if (soc15_need_reset_on_resume(adev)) {
++ dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
++ soc15_asic_reset(adev);
++ }
+ return soc15_common_hw_init(adev);
+ }
+
+@@ -1416,9 +1449,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+- adev->nbio.funcs->get_clockgating_state(adev, flags);
++ if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
++ adev->nbio.funcs->get_clockgating_state(adev, flags);
+
+- adev->hdp.funcs->get_clock_gating_state(adev, flags);
++ if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
++ adev->hdp.funcs->get_clock_gating_state(adev, flags);
+
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
+
+@@ -1434,9 +1469,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
+ }
+
+ /* AMD_CG_SUPPORT_ROM_MGCG */
+- adev->smuio.funcs->get_clock_gating_state(adev, flags);
++ if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
++ adev->smuio.funcs->get_clock_gating_state(adev, flags);
+
+- adev->df.funcs->get_clockgating_state(adev, flags);
++ if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
++ adev->df.funcs->get_clockgating_state(adev, flags);
+ }
+
+ static int soc15_common_set_powergating_state(void *handle,
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
+index 2357ff39323f05..e74e1983da53aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
++++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
+@@ -76,6 +76,12 @@
+ ((cond & 0xF) << 24) | \
+ ((type & 0xF) << 28))
+
++#define CP_PACKETJ_NOP 0x60000000
++#define CP_PACKETJ_GET_REG(x) ((x) & 0x3FFFF)
++#define CP_PACKETJ_GET_RES(x) (((x) >> 18) & 0x3F)
++#define CP_PACKETJ_GET_COND(x) (((x) >> 24) & 0xF)
++#define CP_PACKETJ_GET_TYPE(x) (((x) >> 28) & 0xF)
++
+ /* Packet 3 types */
+ #define PACKET3_NOP 0x10
+ #define PACKET3_SET_BASE 0x11
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 8b2ff2b281b0ad..4712ffc0a482c8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
+ /* SOC21 */
+ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+ };
+
+ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
++ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
+ };
+
+ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = {
+@@ -449,10 +449,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
+ {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+- return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+- return false;
+ default:
+ return true;
+ }
+@@ -804,10 +802,35 @@ static int soc21_common_suspend(void *handle)
+ return soc21_common_hw_fini(adev);
+ }
+
++static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
++{
++ u32 sol_reg1, sol_reg2;
++
++ /* Will reset for the following suspend abort cases.
++ * 1) Only reset dGPU side.
++ * 2) S3 suspend got aborted and TOS is active.
++ */
++ if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
++ !adev->suspend_complete) {
++ sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
++ msleep(100);
++ sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
++
++ return (sol_reg1 != sol_reg2);
++ }
++
++ return false;
++}
++
+ static int soc21_common_resume(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ if (soc21_need_reset_on_resume(adev)) {
++ dev_info(adev->dev, "S3 suspend aborted, resetting...");
++ soc21_asic_reset(adev);
++ }
++
+ return soc21_common_hw_init(adev);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+index 917707bba7f362..450b6e83150914 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+@@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ WREG32(mmIH_RB_CNTL, tmp);
++
+ out:
+ return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+index d364c6dd152c33..bf68e18e3824b8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+@@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
+ out:
+ return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+index dbc99536440f2f..131e7b769519c8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+@@ -421,6 +421,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
++ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
++ * can be detected.
++ */
++ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
++ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
++
+ out:
+ return (wptr & ih->ptr_mask);
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index c37f1fcd2165b5..19d46be6394295 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -417,7 +417,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+
+ err_create_queue:
+ if (wptr_bo)
+- amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
++ amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&wptr_bo);
+ err_wptr_map_gart:
+ err_bind_process:
+ err_pdd:
+@@ -778,8 +778,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
+ * nodes, but not more than args->num_of_nodes as that is
+ * the amount of memory allocated by user
+ */
+- pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
+- args->num_of_nodes), GFP_KERNEL);
++ pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
++ GFP_KERNEL);
+ if (!pa)
+ return -ENOMEM;
+
+@@ -1138,7 +1138,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ goto err_unlock;
+ }
+ offset = dev->adev->rmmio_remap.bus_addr;
+- if (!offset) {
++ if (!offset || (PAGE_SIZE > 4096)) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+@@ -1432,17 +1432,23 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ goto sync_memory_failed;
+ }
+ }
+- mutex_unlock(&p->mutex);
+
+- if (flush_tlb) {
+- /* Flush TLBs after waiting for the page table updates to complete */
+- for (i = 0; i < args->n_devices; i++) {
+- peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+- if (WARN_ON_ONCE(!peer_pdd))
+- continue;
++ /* Flush TLBs after waiting for the page table updates to complete */
++ for (i = 0; i < args->n_devices; i++) {
++ peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
++ if (WARN_ON_ONCE(!peer_pdd))
++ continue;
++ if (flush_tlb)
+ kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
+- }
++
++ /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
++ err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
++ if (err)
++ goto sync_memory_failed;
+ }
++
++ mutex_unlock(&p->mutex);
++
+ kfree(devices_arr);
+
+ return 0;
+@@ -1516,7 +1522,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+
+ /* Find a KFD GPU device that supports the get_dmabuf_info query */
+ for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
+- if (dev)
++ if (dev && !kfd_devcgroup_check_permission(dev))
+ break;
+ if (!dev)
+ return -EINVAL;
+@@ -1538,7 +1544,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ if (xcp_id >= 0)
+ args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
+ else
+- args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
++ args->gpu_id = dev->id;
+ args->flags = flags;
+
+ /* Copy metadata buffer to user mode */
+@@ -2307,7 +2313,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
+ return -EINVAL;
+ }
+ offset = pdd->dev->adev->rmmio_remap.bus_addr;
+- if (!offset) {
++ if (!offset || (PAGE_SIZE > 4096)) {
+ pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
+ return -ENOMEM;
+ }
+@@ -3348,6 +3354,9 @@ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
++ if (PAGE_SIZE > 4096)
++ return -EINVAL;
++
+ address = dev->adev->rmmio_remap.bus_addr;
+
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index 74c2d7a0d62857..2f54ee08f26961 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -42,8 +42,6 @@
+ #define CRAT_OEMTABLEID_LENGTH 8
+ #define CRAT_RESERVED_LENGTH 6
+
+-#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+-
+ /* Compute Unit flags */
+ #define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */
+ #define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+index 9ec750666382fe..94aaf2fc556ca1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+@@ -103,7 +103,8 @@ void debug_event_write_work_handler(struct work_struct *work)
+ struct kfd_process,
+ debug_event_workarea);
+
+- kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
++ if (process->debug_trap_enabled && process->dbg_ev_file)
++ kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
+ }
+
+ /* update process/device/queue exception status, write to descriptor
+@@ -645,6 +646,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target)
+ else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
+ target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
+
++ cancel_work_sync(&target->debug_event_workarea);
+ fput(target->dbg_ev_file);
+ target->dbg_ev_file = NULL;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 93ce181eb3baa0..9d0b0bf70ad1ea 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -402,15 +402,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
+ f2g = &gfx_v11_kfd2kgd;
+ break;
+ case IP_VERSION(11, 0, 3):
+- if ((adev->pdev->device == 0x7460 &&
+- adev->pdev->revision == 0x00) ||
+- (adev->pdev->device == 0x7461 &&
+- adev->pdev->revision == 0x00))
+- /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
+- gfx_target_version = 110005;
+- else
+- /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+- gfx_target_version = 110001;
++ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
++ gfx_target_version = 110001;
+ f2g = &gfx_v11_kfd2kgd;
+ break;
+ default:
+@@ -845,7 +838,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ kfd_doorbell_error:
+ kfd_gtt_sa_fini(kfd);
+ kfd_gtt_sa_init_error:
+- amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
++ amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
+ alloc_gtt_mem_failure:
+ dev_err(kfd_device,
+ "device %x:%x NOT added due to errors\n",
+@@ -863,7 +856,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
+ kfd_doorbell_fini(kfd);
+ ida_destroy(&kfd->doorbell_ida);
+ kfd_gtt_sa_fini(kfd);
+- amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
++ amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
+ }
+
+ kfree(kfd);
+@@ -935,7 +928,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+ {
+ struct kfd_node *node;
+ int i;
+- int count;
+
+ if (!kfd->init_complete)
+ return;
+@@ -943,12 +935,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+ /* for runtime suspend, skip locking kfd */
+ if (!run_pm) {
+ mutex_lock(&kfd_processes_mutex);
+- count = ++kfd_locked;
+- mutex_unlock(&kfd_processes_mutex);
+-
+ /* For first KFD device suspend all the KFD processes */
+- if (count == 1)
++ if (++kfd_locked == 1)
+ kfd_suspend_all_processes();
++ mutex_unlock(&kfd_processes_mutex);
+ }
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+@@ -959,7 +949,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+
+ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+ {
+- int ret, count, i;
++ int ret, i;
+
+ if (!kfd->init_complete)
+ return 0;
+@@ -973,12 +963,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+ /* for runtime resume, skip unlocking kfd */
+ if (!run_pm) {
+ mutex_lock(&kfd_processes_mutex);
+- count = --kfd_locked;
+- mutex_unlock(&kfd_processes_mutex);
+-
+- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+- if (count == 0)
++ if (--kfd_locked == 0)
+ ret = kfd_resume_all_processes();
++ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
++ mutex_unlock(&kfd_processes_mutex);
+ }
+
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 0d3d538b64ebc3..4d9a406925e189 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -407,7 +407,8 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
+
+ q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev,
+ qpd->proc_doorbells,
+- q->doorbell_id);
++ q->doorbell_id,
++ dev->kfd->device_info.doorbell_size);
+ return 0;
+ }
+
+@@ -1979,6 +1980,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
+ while (halt_if_hws_hang)
+ schedule();
++ kfd_hws_hang(dqm);
+ return -ETIME;
+ }
+
+@@ -2608,7 +2610,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
+ {
+ WARN(!mqd, "No hiq sdma mqd trunk to free");
+
+- amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
++ amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
+ }
+
+ void device_queue_manager_uninit(struct device_queue_manager *dqm)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index 7b38537c7c99bd..05c74887fd6fda 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -161,7 +161,10 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
+ if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+ return NULL;
+
+- *doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx);
++ *doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev,
++ kfd->doorbells,
++ inx,
++ kfd->device_info.doorbell_size);
+ inx *= 2;
+
+ pr_debug("Get kernel queue doorbell\n"
+@@ -240,7 +243,10 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
+ return 0;
+ }
+
+- first_db_index = amdgpu_doorbell_index_on_bar(adev, pdd->qpd.proc_doorbells, 0);
++ first_db_index = amdgpu_doorbell_index_on_bar(adev,
++ pdd->qpd.proc_doorbells,
++ 0,
++ pdd->dev->kfd->device_info.doorbell_size);
+ return adev->doorbell.base + first_db_index * sizeof(uint32_t);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+index 62b205dac63a05..6604a3f99c5ecf 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+@@ -330,12 +330,6 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
+ pdd->gpuvm_limit =
+ pdd->dev->kfd->shared_resources.gpuvm_size - 1;
+
+- /* dGPUs: the reserved space for kernel
+- * before SVM
+- */
+- pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+- pdd->qpd.ib_base = SVM_IB_BASE;
+-
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+ }
+@@ -345,18 +339,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
+ pdd->lds_base = MAKE_LDS_APP_BASE_V9();
+ pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
+
+- pdd->gpuvm_base = PAGE_SIZE;
++ /* Raven needs SVM to support graphic handle, etc. Leave the small
++ * reserved space before SVM on Raven as well, even though we don't
++ * have to.
++ * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
++ * are used in Thunk to reserve SVM.
++ */
++ pdd->gpuvm_base = SVM_USER_BASE;
+ pdd->gpuvm_limit =
+ pdd->dev->kfd->shared_resources.gpuvm_size - 1;
+
+ pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
+ pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+-
+- /*
+- * Place TBA/TMA on opposite side of VM hole to prevent
+- * stray faults from triggering SVM on these pages.
+- */
+- pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
+ }
+
+ int kfd_init_apertures(struct kfd_process *process)
+@@ -413,6 +407,12 @@ int kfd_init_apertures(struct kfd_process *process)
+ return -EINVAL;
+ }
+ }
++
++ /* dGPUs: the reserved space for kernel
++ * before SVM
++ */
++ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
++ pdd->qpd.ib_base = SVM_IB_BASE;
+ }
+
+ dev_dbg(kfd_device, "node id %u\n", id);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+index c7991e07b6be56..f85ca6cb90f56c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+@@ -268,7 +268,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+@@ -284,7 +284,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+- pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
++ pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+@@ -310,7 +310,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
+ ERR_TYPE);
+- pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
++ pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+@@ -336,7 +336,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ break;
+ }
+ kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
+- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
++ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
++ KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
+ kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(context_id0),
+ KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+index f933bd231fb9ce..3ca9c160da7c23 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+@@ -150,7 +150,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
+
+ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
+@@ -165,7 +165,7 @@ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+
+ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+@@ -177,7 +177,7 @@ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+
+ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_warn(
++ pr_warn_ratelimited(
+ "sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
+@@ -325,7 +325,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
+ /* CP */
+ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+ kfd_signal_event_interrupt(pasid, context_id0, 32);
+- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
++ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
++ KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
+ kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_CTXID0_DOORBELL_ID(context_id0),
+ KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index 830396b1c3b145..8a6729939ae55f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -333,7 +333,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
+@@ -347,7 +347,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+- pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
++ pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+@@ -366,7 +366,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
+- pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
++ pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+@@ -385,7 +385,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ break;
+ }
+ kfd_signal_event_interrupt(pasid, sq_int_data, 24);
+- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
++ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
++ KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
+ kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(context_id0),
+ KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 7d82c7da223ab8..3263b5fa182d20 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -516,10 +516,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ start = prange->start << PAGE_SHIFT;
+ end = (prange->last + 1) << PAGE_SHIFT;
+
++ r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
++ prange->npages * PAGE_SIZE,
++ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
++ node->xcp ? node->xcp->id : 0);
++ if (r) {
++ dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
++ return -ENOSPC;
++ }
++
+ r = svm_range_vram_node_new(node, prange, true);
+ if (r) {
+ dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
+- return r;
++ goto out;
+ }
+ ttm_res_offset = prange->offset << PAGE_SHIFT;
+
+@@ -549,6 +558,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ svm_range_vram_node_free(prange);
+ }
+
++out:
++ amdgpu_amdkfd_unreserve_mem_limit(node->adev,
++ prange->npages * PAGE_SIZE,
++ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
++ node->xcp ? node->xcp->id : 0);
+ return r < 0 ? r : 0;
+ }
+
+@@ -1021,7 +1035,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
+ } else {
+ res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
+ if (IS_ERR(res))
+- return -ENOMEM;
++ return PTR_ERR(res);
+ pgmap->range.start = res->start;
+ pgmap->range.end = res->end;
+ pgmap->type = MEMORY_DEVICE_PRIVATE;
+@@ -1037,10 +1051,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
+ r = devm_memremap_pages(adev->dev, pgmap);
+ if (IS_ERR(r)) {
+ pr_err("failed to register HMM device memory\n");
+- /* Disable SVM support capability */
+- pgmap->type = 0;
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE)
+ devm_release_mem_region(adev->dev, res->start, resource_size(res));
++ /* Disable SVM support capability */
++ pgmap->type = 0;
+ return PTR_ERR(r);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+index 447829c22295c6..4c3f379803117e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+@@ -223,7 +223,7 @@ void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+ {
+ if (mqd_mem_obj->gtt_mem) {
+- amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
++ amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, &mqd_mem_obj->gtt_mem);
+ kfree(mqd_mem_obj);
+ } else {
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+index 8b7fed91352696..22cbfa1bdaddb9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+@@ -170,6 +170,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |=
+ ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
++ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+index 15277f1d5cf0a9..d722cbd317834a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+@@ -224,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |=
+ ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
++ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+ m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 42d881809dc70e..1ac66c5337df47 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -686,7 +686,7 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ m = get_mqd(mqd + size * xcc);
+ update_mqd(mm, m, q, minfo);
+
+- update_cu_mask(mm, mqd, minfo, xcc);
++ update_cu_mask(mm, m, minfo, xcc);
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ switch (xcc) {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index fa24e1852493dc..67204c3dfbb8f6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -971,7 +971,7 @@ struct kfd_process {
+ struct work_struct debug_event_workarea;
+
+ /* Tracks debug per-vmid request for debug flags */
+- bool dbg_flags;
++ u32 dbg_flags;
+
+ atomic_t poison;
+ /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
+@@ -1128,7 +1128,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
+ struct kfd_dev *dev = adev->kfd.dev;
+ uint32_t i;
+
+- if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
++ if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3))
+ return dev->nodes[0];
+
+ for (i = 0; i < dev->num_nodes; i++)
+@@ -1466,7 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
+
+ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
+ {
+- return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
++ return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
+ }
+@@ -1482,10 +1482,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
+
+ /* Cgroup Support */
+ /* Check with device cgroup if @kfd device is accessible */
+-static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
++static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
+ {
+ #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+- struct drm_device *ddev = adev_to_drm(kfd->adev);
++ struct drm_device *ddev;
++
++ if (node->xcp)
++ ddev = node->xcp->ddev;
++ else
++ ddev = adev_to_drm(node->adev);
+
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
+ ddev->render->index,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index fbf053001af978..43f520b3796700 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -818,9 +818,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
+ mutex_lock(&kfd_processes_mutex);
+
+ if (kfd_is_locked()) {
+- mutex_unlock(&kfd_processes_mutex);
+ pr_debug("KFD is locked! Cannot create process");
+- return ERR_PTR(-EINVAL);
++ process = ERR_PTR(-EINVAL);
++ goto out;
+ }
+
+ /* A prior open of /dev/kfd could have already created the process. */
+@@ -828,6 +828,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
+ if (process) {
+ pr_debug("Process already found\n");
+ } else {
++ /* If the process just called exec(3), it is possible that the
++ * cleanup of the kfd_process (following the release of the mm
++ * of the old process image) is still in the cleanup work queue.
++ * Make sure to drain any job before trying to recreate any
++ * resource for this process.
++ */
++ flush_workqueue(kfd_process_wq);
++
+ process = create_process(thread);
+ if (IS_ERR(process))
+ goto out;
+@@ -1039,7 +1047,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
+
+ if (pdd->dev->kfd->shared_resources.enable_mes)
+ amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
+- pdd->proc_ctx_bo);
++ &pdd->proc_ctx_bo);
+ /*
+ * before destroying pdd, make sure to report availability
+ * for auto suspend
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index adb5e4bdc0b204..0583af4e84fa3f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -28,6 +28,7 @@
+ #include "kfd_priv.h"
+ #include "kfd_kernel_queue.h"
+ #include "amdgpu_amdkfd.h"
++#include "amdgpu_reset.h"
+
+ static inline struct process_queue_node *get_queue_by_qid(
+ struct process_queue_manager *pqm, unsigned int qid)
+@@ -87,6 +88,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
+ return;
+
+ dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
++ if (dev->kfd->shared_resources.enable_mes &&
++ down_read_trylock(&dev->adev->reset_domain->sem)) {
++ amdgpu_mes_flush_shader_debugger(dev->adev,
++ pdd->proc_ctx_gpu_addr);
++ up_read(&dev->adev->reset_domain->sem);
++ }
+ pdd->already_dequeued = true;
+ }
+
+@@ -169,16 +176,43 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
+ return 0;
+ }
+
++static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
++ struct process_queue_node *pqn)
++{
++ struct kfd_node *dev;
++ struct kfd_process_device *pdd;
++
++ dev = pqn->q->device;
++
++ pdd = kfd_get_process_device_data(dev, pqm->process);
++ if (!pdd) {
++ pr_err("Process device data doesn't exist\n");
++ return;
++ }
++
++ if (pqn->q->gws) {
++ if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
++ !dev->kfd->shared_resources.enable_mes)
++ amdgpu_amdkfd_remove_gws_from_process(
++ pqm->process->kgd_process_info, pqn->q->gws);
++ pdd->qpd.num_gws = 0;
++ }
++
++ if (dev->kfd->shared_resources.enable_mes) {
++ amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo);
++ if (pqn->q->wptr_bo)
++ amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo);
++ }
++}
++
+ void pqm_uninit(struct process_queue_manager *pqm)
+ {
+ struct process_queue_node *pqn, *next;
+
+ list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
+- if (pqn->q && pqn->q->gws &&
+- KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
+- !pqn->q->device->kfd->shared_resources.enable_mes)
+- amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
+- pqn->q->gws);
++ if (pqn->q)
++ pqm_clean_queue_resource(pqm, pqn);
++
+ kfd_procfs_del_queue(pqn->q);
+ uninit_queue(pqn->q);
+ list_del(&pqn->process_queue_list);
+@@ -377,7 +411,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ */
+ uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
+ pdd->qpd.proc_doorbells,
+- 0);
++ 0,
++ pdd->dev->kfd->device_info.doorbell_size);
+
+ *p_doorbell_offset_in_process = (q->properties.doorbell_off
+ - first_db_index) * sizeof(uint32_t);
+@@ -460,22 +495,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ goto err_destroy_queue;
+ }
+
+- if (pqn->q->gws) {
+- if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
+- !dev->kfd->shared_resources.enable_mes)
+- amdgpu_amdkfd_remove_gws_from_process(
+- pqm->process->kgd_process_info,
+- pqn->q->gws);
+- pdd->qpd.num_gws = 0;
+- }
+-
+- if (dev->kfd->shared_resources.enable_mes) {
+- amdgpu_amdkfd_free_gtt_mem(dev->adev,
+- pqn->q->gang_ctx_bo);
+- if (pqn->q->wptr_bo)
+- amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
+-
+- }
++ pqm_clean_queue_resource(pqm, pqn);
+ uninit_queue(pqn->q);
+ }
+
+@@ -962,6 +982,7 @@ int kfd_criu_restore_queue(struct kfd_process *p,
+ pr_debug("Queue id %d was restored successfully\n", queue_id);
+
+ kfree(q_data);
++ kfree(q_extra_data);
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index bb16b795d1bc2a..ce76d455499841 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -391,14 +391,9 @@ static void svm_range_bo_release(struct kref *kref)
+ spin_lock(&svm_bo->list_lock);
+ }
+ spin_unlock(&svm_bo->list_lock);
+- if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
+- /* We're not in the eviction worker.
+- * Signal the fence and synchronize with any
+- * pending eviction work.
+- */
++ if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
++ /* We're not in the eviction worker. Signal the fence. */
+ dma_fence_signal(&svm_bo->eviction_fence->base);
+- cancel_work_sync(&svm_bo->eviction_work);
+- }
+ dma_fence_put(&svm_bo->eviction_fence->base);
+ amdgpu_bo_unref(&svm_bo->bo);
+ kfree(svm_bo);
+@@ -495,11 +490,11 @@ svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
+
+ /* We need a new svm_bo. Spin-loop to wait for concurrent
+ * svm_range_bo_release to finish removing this range from
+- * its range list. After this, it is safe to reuse the
+- * svm_bo pointer and svm_bo_list head.
++ * its range list and set prange->svm_bo to null. After this,
++ * it is safe to reuse the svm_bo pointer and svm_bo_list head.
+ */
+- while (!list_empty_careful(&prange->svm_bo_list))
+- ;
++ while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
++ cond_resched();
+
+ return false;
+ }
+@@ -628,8 +623,15 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
+
+ void svm_range_vram_node_free(struct svm_range *prange)
+ {
+- svm_range_bo_unref(prange->svm_bo);
+- prange->ttm_res = NULL;
++ /* serialize prange->svm_bo unref */
++ mutex_lock(&prange->lock);
++ /* prange->svm_bo has not been unref */
++ if (prange->ttm_res) {
++ prange->ttm_res = NULL;
++ mutex_unlock(&prange->lock);
++ svm_range_bo_unref(prange->svm_bo);
++ } else
++ mutex_unlock(&prange->lock);
+ }
+
+ struct kfd_node *
+@@ -760,7 +762,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
+ prange->flags &= ~attrs[i].value;
+ break;
+ case KFD_IOCTL_SVM_ATTR_GRANULARITY:
+- prange->granularity = attrs[i].value;
++ prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
+ break;
+ default:
+ WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
+@@ -820,7 +822,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
+ }
+ }
+
+- return !prange->is_error_flag;
++ return true;
+ }
+
+ /**
+@@ -1625,18 +1627,24 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
+ if (test_bit(gpuidx, prange->bitmap_access))
+ bitmap_set(ctx->bitmap, gpuidx, 1);
+ }
++
++ /*
++ * If prange is already mapped or with always mapped flag,
++ * update mapping on GPUs with ACCESS attribute
++ */
++ if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
++ if (prange->mapped_to_gpu ||
++ prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
++ bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
++ }
+ } else {
+ bitmap_or(ctx->bitmap, prange->bitmap_access,
+ prange->bitmap_aip, MAX_GPU_INSTANCE);
+ }
+
+ if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
+- bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
+- if (!prange->mapped_to_gpu ||
+- bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
+- r = 0;
+- goto free_ctx;
+- }
++ r = 0;
++ goto free_ctx;
+ }
+
+ if (prange->actual_loc && !prange->ttm_res) {
+@@ -1662,73 +1670,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
+
+ start = prange->start << PAGE_SHIFT;
+ end = (prange->last + 1) << PAGE_SHIFT;
+- for (addr = start; addr < end && !r; ) {
++ for (addr = start; !r && addr < end; ) {
+ struct hmm_range *hmm_range;
+ struct vm_area_struct *vma;
+- unsigned long next;
++ unsigned long next = 0;
+ unsigned long offset;
+ unsigned long npages;
+ bool readonly;
+
+ vma = vma_lookup(mm, addr);
+- if (!vma) {
++ if (vma) {
++ readonly = !(vma->vm_flags & VM_WRITE);
++
++ next = min(vma->vm_end, end);
++ npages = (next - addr) >> PAGE_SHIFT;
++ WRITE_ONCE(p->svms.faulting_task, current);
++ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
++ readonly, owner, NULL,
++ &hmm_range);
++ WRITE_ONCE(p->svms.faulting_task, NULL);
++ if (r) {
++ pr_debug("failed %d to get svm range pages\n", r);
++ if (r == -EBUSY)
++ r = -EAGAIN;
++ }
++ } else {
+ r = -EFAULT;
+- goto unreserve_out;
+- }
+- readonly = !(vma->vm_flags & VM_WRITE);
+-
+- next = min(vma->vm_end, end);
+- npages = (next - addr) >> PAGE_SHIFT;
+- WRITE_ONCE(p->svms.faulting_task, current);
+- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+- readonly, owner, NULL,
+- &hmm_range);
+- WRITE_ONCE(p->svms.faulting_task, NULL);
+- if (r) {
+- pr_debug("failed %d to get svm range pages\n", r);
+- if (r == -EBUSY)
+- r = -EAGAIN;
+- goto unreserve_out;
+ }
+
+- offset = (addr - start) >> PAGE_SHIFT;
+- r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
+- hmm_range->hmm_pfns);
+- if (r) {
+- pr_debug("failed %d to dma map range\n", r);
+- goto unreserve_out;
++ if (!r) {
++ offset = (addr - start) >> PAGE_SHIFT;
++ r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
++ hmm_range->hmm_pfns);
++ if (r)
++ pr_debug("failed %d to dma map range\n", r);
+ }
+
+ svm_range_lock(prange);
+- if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
++ if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) {
+ pr_debug("hmm update the range, need validate again\n");
+ r = -EAGAIN;
+- goto unlock_out;
+ }
+- if (!list_empty(&prange->child_list)) {
++
++ if (!r && !list_empty(&prange->child_list)) {
+ pr_debug("range split by unmap in parallel, validate again\n");
+ r = -EAGAIN;
+- goto unlock_out;
+ }
+
+- r = svm_range_map_to_gpus(prange, offset, npages, readonly,
+- ctx->bitmap, wait, flush_tlb);
++ if (!r)
++ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
++ ctx->bitmap, wait, flush_tlb);
++
++ if (!r && next == end)
++ prange->mapped_to_gpu = true;
+
+-unlock_out:
+ svm_range_unlock(prange);
+
+ addr = next;
+ }
+
+- if (addr == end) {
+- prange->validated_once = true;
+- prange->mapped_to_gpu = true;
+- }
+-
+-unreserve_out:
+ svm_range_unreserve_bos(ctx);
+-
+- prange->is_error_flag = !!r;
+ if (!r)
+ prange->validate_timestamp = ktime_get_boottime();
+
+@@ -2097,7 +2098,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
+ next = interval_tree_iter_next(node, start, last);
+ next_start = min(node->last, last) + 1;
+
+- if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
++ if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
++ prange->mapped_to_gpu) {
+ /* nothing to do */
+ } else if (node->start < start || node->last > last) {
+ /* node intersects the update range and its attributes
+@@ -2341,8 +2343,10 @@ static void svm_range_deferred_list_work(struct work_struct *work)
+ mutex_unlock(&svms->lock);
+ mmap_write_unlock(mm);
+
+- /* Pairs with mmget in svm_range_add_list_work */
+- mmput(mm);
++ /* Pairs with mmget in svm_range_add_list_work. If dropping the
++ * last mm refcount, schedule release work to avoid circular locking
++ */
++ mmput_async(mm);
+
+ spin_lock(&svms->deferred_list_lock);
+ }
+@@ -2653,6 +2657,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
+ {
+ struct vm_area_struct *vma;
+ struct interval_tree_node *node;
++ struct rb_node *rb_node;
+ unsigned long start_limit, end_limit;
+
+ vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
+@@ -2672,16 +2677,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
+ if (node) {
+ end_limit = min(end_limit, node->start);
+ /* Last range that ends before the fault address */
+- node = container_of(rb_prev(&node->rb),
+- struct interval_tree_node, rb);
++ rb_node = rb_prev(&node->rb);
+ } else {
+ /* Last range must end before addr because
+ * there was no range after addr
+ */
+- node = container_of(rb_last(&p->svms.objects.rb_root),
+- struct interval_tree_node, rb);
++ rb_node = rb_last(&p->svms.objects.rb_root);
+ }
+- if (node) {
++ if (rb_node) {
++ node = container_of(rb_node, struct interval_tree_node, rb);
+ if (node->last >= addr) {
+ WARN(1, "Overlap with prev node and page fault addr\n");
+ return -EFAULT;
+@@ -3412,18 +3416,19 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
+ r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+ *migrated = !r;
+
+- return r;
++ return 0;
+ }
+
+ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
+ {
+- if (!fence)
+- return -EINVAL;
+-
+- if (dma_fence_is_signaled(&fence->base))
+- return 0;
+-
+- if (fence->svm_bo) {
++ /* Dereferencing fence->svm_bo is safe here because the fence hasn't
++ * signaled yet and we're under the protection of the fence->lock.
++ * After the fence is signaled in svm_range_bo_release, we cannot get
++ * here any more.
++ *
++ * Reference is dropped in svm_range_evict_svm_bo_worker.
++ */
++ if (svm_bo_ref_unless_zero(fence->svm_bo)) {
+ WRITE_ONCE(fence->svm_bo->evicting, 1);
+ schedule_work(&fence->svm_bo->eviction_work);
+ }
+@@ -3438,8 +3443,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
+ int r = 0;
+
+ svm_bo = container_of(work, struct svm_range_bo, eviction_work);
+- if (!svm_bo_ref_unless_zero(svm_bo))
+- return; /* svm_bo was freed while eviction was pending */
+
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ mm = svm_bo->eviction_fence->mm;
+@@ -3507,7 +3510,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
+ struct svm_range *next;
+ bool update_mapping = false;
+ bool flush_tlb;
+- int r = 0;
++ int r, ret = 0;
+
+ pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
+ p->pasid, &p->svms, start, start + size - 1, size);
+@@ -3595,7 +3598,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
+ out_unlock_range:
+ mutex_unlock(&prange->migrate_mutex);
+ if (r)
+- break;
++ ret = r;
+ }
+
+ dynamic_svm_range_dump(svms);
+@@ -3608,7 +3611,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
+ pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
+ &p->svms, start, start + size - 1, r);
+
+- return r;
++ return ret ? ret : r;
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+index 9e668eeefb32df..25f71190573865 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+@@ -132,9 +132,7 @@ struct svm_range {
+ struct list_head child_list;
+ DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
+ DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
+- bool validated_once;
+ bool mapped_to_gpu;
+- bool is_error_flag;
+ };
+
+ static inline void svm_range_lock(struct svm_range *prange)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index c8c75ff7cea80d..8362a71ab70752 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -958,8 +958,7 @@ static void kfd_update_system_properties(void)
+ dev = list_last_entry(&topology_device_list,
+ struct kfd_topology_device, list);
+ if (dev) {
+- sys_props.platform_id =
+- (*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
++ sys_props.platform_id = dev->oem_id64;
+ sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
+ sys_props.platform_rev = dev->oem_revision;
+ }
+@@ -1342,10 +1341,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
+ num_cpu++;
+ }
+
++ if (list_empty(&kdev->io_link_props))
++ return -ENODATA;
++
+ gpu_link = list_first_entry(&kdev->io_link_props,
+- struct kfd_iolink_properties, list);
+- if (!gpu_link)
+- return -ENOMEM;
++ struct kfd_iolink_properties, list);
+
+ for (i = 0; i < num_cpu; i++) {
+ /* CPU <--> GPU */
+@@ -1423,15 +1423,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
+ peer->gpu->adev))
+ return ret;
+
++ if (list_empty(&kdev->io_link_props))
++ return -ENODATA;
++
+ iolink1 = list_first_entry(&kdev->io_link_props,
+- struct kfd_iolink_properties, list);
+- if (!iolink1)
+- return -ENOMEM;
++ struct kfd_iolink_properties, list);
++
++ if (list_empty(&peer->io_link_props))
++ return -ENODATA;
+
+ iolink2 = list_first_entry(&peer->io_link_props,
+- struct kfd_iolink_properties, list);
+- if (!iolink2)
+- return -ENOMEM;
++ struct kfd_iolink_properties, list);
+
+ props = kfd_alloc_struct(props);
+ if (!props)
+@@ -1449,17 +1451,19 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
+ /* CPU->CPU link*/
+ cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
+ if (cpu_dev) {
+- list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
+- if (iolink3->node_to == iolink2->node_to)
+- break;
+-
+- props->weight += iolink3->weight;
+- props->min_latency += iolink3->min_latency;
+- props->max_latency += iolink3->max_latency;
+- props->min_bandwidth = min(props->min_bandwidth,
+- iolink3->min_bandwidth);
+- props->max_bandwidth = min(props->max_bandwidth,
+- iolink3->max_bandwidth);
++ list_for_each_entry(iolink3, &cpu_dev->io_link_props, list) {
++ if (iolink3->node_to != iolink2->node_to)
++ continue;
++
++ props->weight += iolink3->weight;
++ props->min_latency += iolink3->min_latency;
++ props->max_latency += iolink3->max_latency;
++ props->min_bandwidth = min(props->min_bandwidth,
++ iolink3->min_bandwidth);
++ props->max_bandwidth = min(props->max_bandwidth,
++ iolink3->max_bandwidth);
++ break;
++ }
+ } else {
+ WARN(1, "CPU node not found");
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 27386ce9a021da..2d1c9d771bef2d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -154,7 +154,10 @@ struct kfd_topology_device {
+ struct attribute attr_gpuid;
+ struct attribute attr_name;
+ struct attribute attr_props;
+- uint8_t oem_id[CRAT_OEMID_LENGTH];
++ union {
++ uint8_t oem_id[CRAT_OEMID_LENGTH];
++ uint64_t oem_id64;
++ };
+ uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
+ uint32_t oem_revision;
+ };
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 868946dd7ef126..a3f17c572bf06e 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -65,7 +65,6 @@
+ #include "amdgpu_dm_debugfs.h"
+ #endif
+ #include "amdgpu_dm_psr.h"
+-#include "amdgpu_dm_replay.h"
+
+ #include "ivsrcid/ivsrcid_vislands30.h"
+
+@@ -265,7 +264,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+ {
+- u32 v_blank_start, v_blank_end, h_position, v_position;
++ u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
+ struct amdgpu_crtc *acrtc = NULL;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+@@ -715,6 +714,12 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
+ return;
+ }
+
++ /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
++ if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
++ DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
++ return;
++ }
++
+ link_index = notify->link_index;
+ link = adev->dm.dc->links[link_index];
+ dev = adev->dm.ddev;
+@@ -802,7 +807,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
+ */
+ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+ {
+- struct dmub_notification notify;
++ struct dmub_notification notify = {0};
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+@@ -1248,7 +1253,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ /* AGP aperture is disabled */
+ if (agp_bot == agp_top) {
+ logical_addr_low = adev->gmc.fb_start >> 18;
+- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
++ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
++ AMD_APU_IS_RENOIR |
++ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+@@ -1260,7 +1267,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ logical_addr_high = adev->gmc.fb_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
++ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
++ AMD_APU_IS_RENOIR |
++ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+@@ -1692,8 +1701,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ dce_version_to_string(adev->dm.dc->ctx->dce_version));
+ } else {
+- DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
+- dce_version_to_string(adev->dm.dc->ctx->dce_version));
++ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ goto error;
+ }
+
+@@ -1814,21 +1822,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
+- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+- goto error;
+- }
+- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
+- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+- goto error;
+- }
+- }
+-
+- /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+- * It is expected that DMUB will resend any pending notifications at this point, for
+- * example HPD from DPIA.
+- */
+- if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
++ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
++ * It is expected that DMUB will resend any pending notifications at this point. Note
++ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
++ * align legacy interface initialization sequence. Connection status will be proactivly
++ * detected once in the amdgpu_dm_initialize_drm_device.
++ */
+ dc_enable_dmub_outbox(adev->dm.dc);
+
+ /* DPIA trace goes to dmesg logs only if outbox is enabled */
+@@ -1909,17 +1908,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+- if (adev->dm.dc)
++ if (adev->dm.dc) {
+ dc_deinit_callbacks(adev->dm.dc);
+-
+- if (adev->dm.dc)
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+-
+- if (dc_enable_dmub_notifications(adev->dm.dc)) {
+- kfree(adev->dm.dmub_notify);
+- adev->dm.dmub_notify = NULL;
+- destroy_workqueue(adev->dm.delayed_hpd_wq);
+- adev->dm.delayed_hpd_wq = NULL;
++ if (dc_enable_dmub_notifications(adev->dm.dc)) {
++ kfree(adev->dm.dmub_notify);
++ adev->dm.dmub_notify = NULL;
++ destroy_workqueue(adev->dm.delayed_hpd_wq);
++ adev->dm.delayed_hpd_wq = NULL;
++ }
+ }
+
+ if (adev->dm.dmub_bo)
+@@ -2085,7 +2082,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+- struct dmub_srv_fb_params fb_params;
++ struct dmub_srv_memory_params memory_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+@@ -2185,6 +2182,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
++ region_params.is_mailbox_in_inbox = false;
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+@@ -2208,10 +2206,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+- memset(&fb_params, 0, sizeof(fb_params));
+- fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+- fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+- fb_params.region_info = &region_info;
++ memset(&memory_params, 0, sizeof(memory_params));
++ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
++ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
++ memory_params.region_info = &region_info;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+@@ -2223,7 +2221,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ return -ENOMEM;
+ }
+
+- status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
++ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+@@ -2253,6 +2251,7 @@ static int dm_sw_fini(void *handle)
+
+ if (adev->dm.dmub_srv) {
+ dmub_srv_destroy(adev->dm.dmub_srv);
++ kfree(adev->dm.dmub_srv);
+ adev->dm.dmub_srv = NULL;
+ }
+
+@@ -2635,7 +2634,8 @@ static int dm_suspend(void *handle)
+
+ dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+
+- dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
++ if (dm->cached_dc_state)
++ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+ amdgpu_dm_commit_zero_streams(dm->dc);
+
+@@ -2963,6 +2963,7 @@ static int dm_resume(void *handle)
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
++ dm_new_crtc_state->base.color_mgmt_changed = true;
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+@@ -2981,6 +2982,10 @@ static int dm_resume(void *handle)
+ /* Do mst topology probing after resuming cached state*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
++
++ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
++ continue;
++
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+@@ -3481,6 +3486,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
++ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
++ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
++ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
++
++ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
++ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
++ }
++
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+@@ -3506,10 +3519,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+-
+- if (adev->dm.hpd_rx_offload_wq)
+- adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+- aconnector;
+ }
+ }
+
+@@ -4034,6 +4043,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+
+ #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+ #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
++#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2)
+ #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
+
+ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+@@ -4048,6 +4058,21 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+ return;
+
+ amdgpu_acpi_get_backlight_caps(&caps);
++
++ /* validate the firmware value is sane */
++ if (caps.caps_valid) {
++ int spread = caps.max_input_signal - caps.min_input_signal;
++
++ if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
++ caps.min_input_signal < 0 ||
++ spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT ||
++ spread < AMDGPU_DM_MIN_SPREAD) {
++ DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n",
++ caps.min_input_signal, caps.max_input_signal);
++ caps.caps_valid = false;
++ }
++ }
++
+ if (caps.caps_valid) {
+ dm->backlight_caps[bl_idx].caps_valid = true;
+ if (caps.aux_support)
+@@ -4338,7 +4363,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ const struct dc_plane_cap *plane;
+ bool psr_feature_enabled = false;
+- bool replay_feature_enabled = false;
+ int max_overlay = dm->dc->caps.max_slave_planes;
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+@@ -4355,7 +4379,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+
+ /* There is one primary plane per CRTC */
+ primary_planes = dm->dc->caps.max_streams;
+- ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
++ if (primary_planes > AMDGPU_MAX_PLANES) {
++ DRM_ERROR("DM: Plane nums out of 6 planes\n");
++ return -EINVAL;
++ }
+
+ /*
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
+@@ -4448,20 +4475,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ }
+ }
+
+- if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+- switch (adev->ip_versions[DCE_HWIP][0]) {
+- case IP_VERSION(3, 1, 4):
+- case IP_VERSION(3, 1, 5):
+- case IP_VERSION(3, 1, 6):
+- case IP_VERSION(3, 2, 0):
+- case IP_VERSION(3, 2, 1):
+- replay_feature_enabled = true;
+- break;
+- default:
+- replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+- break;
+- }
+- }
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+ struct dc_link *link = NULL;
+@@ -4493,6 +4506,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+
+ link = dc_get_link_at_index(dm->dc, i);
+
++ if (dm->hpd_rx_offload_wq)
++ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
++ aconnector;
++
+ if (!dc_link_detect_connection_type(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+@@ -4510,12 +4527,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ setup_backlight_device(dm, aconnector);
+
+- /*
+- * Disable psr if replay can be enabled
+- */
+- if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
+- psr_feature_enabled = false;
+-
+ if (psr_feature_enabled)
+ amdgpu_dm_set_psr_caps(link);
+
+@@ -5170,6 +5181,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return;
+
++ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
++ goto ffu;
++
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ clips = drm_plane_get_damage_clips(new_plane_state);
+
+@@ -5773,6 +5787,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+ &aconnector->base.probed_modes :
+ &aconnector->base.modes;
+
++ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
++ return NULL;
++
+ if (aconnector->freesync_vid_base.clock != 0)
+ return &aconnector->freesync_vid_base;
+
+@@ -6087,7 +6104,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ if (recalculate_timing) {
+ freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ drm_mode_copy(&saved_mode, &mode);
++ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
+ drm_mode_copy(&mode, freesync_mode);
++ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode, scale);
+@@ -6137,19 +6156,25 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
+
+- if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
++ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
++ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
++ stream->signal == SIGNAL_TYPE_EDP) {
++ const struct dc_edid_caps *edid_caps;
++ unsigned int disable_colorimetry = 0;
++
++ if (aconnector->dc_sink) {
++ edid_caps = &aconnector->dc_sink->edid_caps;
++ disable_colorimetry = edid_caps->panel_patch.disable_colorimetry;
++ }
++
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+- stream->use_vsc_sdp_for_colorimetry = false;
+- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+- stream->use_vsc_sdp_for_colorimetry =
+- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
+- } else {
+- if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+- stream->use_vsc_sdp_for_colorimetry = true;
+- }
++ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
++ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
++ !disable_colorimetry;
++
+ if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+@@ -6236,7 +6261,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+- dm_new_state->abm_level = val;
++ dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
+ ret = 0;
+ }
+
+@@ -6281,7 +6306,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+- *val = dm_state->abm_level;
++ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
++ dm_state->abm_level : 0;
+ ret = 0;
+ }
+
+@@ -6354,7 +6380,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+ state->pbn = 0;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+- state->abm_level = amdgpu_dm_abm_level;
++ state->abm_level = amdgpu_dm_abm_level ?:
++ ABM_LEVEL_IMMEDIATE_DISABLE;
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+ }
+@@ -6491,7 +6518,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+- dc_sink_retain(aconnector->dc_sink);
++ if (aconnector->dc_sink)
++ dc_sink_retain(aconnector->dc_sink);
+ }
+ }
+
+@@ -6863,8 +6891,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+- if (!mst_state->pbn_div)
+- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
++ mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
+
+ if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
+@@ -6876,7 +6903,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ max_bpc);
+ bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+ clock = adjusted_mode->clock;
+- dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
++ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
+ }
+
+ dm_new_connector_state->vcpi_slots =
+@@ -6904,7 +6931,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
+ int i, j, ret;
+- int vcpi, pbn_div, pbn, slot_num = 0;
++ int vcpi, pbn_div, pbn = 0, slot_num = 0;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+
+@@ -6941,7 +6968,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ }
+ }
+
+- if (j == dc_state->stream_count)
++ if (j == dc_state->stream_count || pbn_div == 0)
+ continue;
+
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+@@ -7305,7 +7332,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ drm_add_modes_noedid(connector, 1920, 1080);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+- amdgpu_dm_connector_add_common_modes(encoder, connector);
++ if (encoder)
++ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ }
+ amdgpu_dm_fbc_init(connector);
+@@ -7431,6 +7459,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ int i;
+ int result = -EIO;
+
++ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
++ return result;
++
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+@@ -8286,15 +8317,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ bundle->stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
+ }
+- } else if (cursor_update && acrtc_state->active_planes > 0 &&
+- acrtc_attach->base.state->event) {
+- drm_crtc_vblank_get(pcrtc);
+-
++ } else if (cursor_update && acrtc_state->active_planes > 0) {
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+-
+- acrtc_attach->event = acrtc_attach->base.state->event;
+- acrtc_attach->base.state->event = NULL;
+-
++ if (acrtc_attach->base.state->event) {
++ drm_crtc_vblank_get(pcrtc);
++ acrtc_attach->event = acrtc_attach->base.state->event;
++ acrtc_attach->base.state->event = NULL;
++ }
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+@@ -8459,6 +8488,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
+ continue;
+
+ notify:
++ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
++ continue;
++
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+@@ -9539,14 +9571,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
++ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ int i;
+
+ /*
+- * TODO: Remove this hack once the checks below are sufficient
+- * enough to determine when we need to reset all the planes on
+- * the stream.
++ * TODO: Remove this hack for all asics once it proves that the
++ * fast updates works fine on DCN3.2+.
+ */
+- if (state->allow_modeset)
++ if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+@@ -9892,16 +9924,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ }
+ }
+
++static void
++dm_get_plane_scale(struct drm_plane_state *plane_state,
++ int *out_plane_scale_w, int *out_plane_scale_h)
++{
++ int plane_src_w, plane_src_h;
++
++ dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
++ *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
++ *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
++}
++
+ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+ {
+- struct drm_plane *cursor = crtc->cursor, *underlying;
++ struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
++ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+- int cursor_src_w, cursor_src_h;
+- int underlying_src_w, underlying_src_h;
++ bool any_relevant_change = false;
+
+ /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ * cursor per pipe but it's going to inherit the scaling and
+@@ -9909,13 +9952,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ * blending properties match the underlying planes'.
+ */
+
+- new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
+- if (!new_cursor_state || !new_cursor_state->fb)
++ /* If no plane was enabled or changed scaling, no need to check again */
++ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
++ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
++
++ if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
++ continue;
++
++ if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
++ any_relevant_change = true;
++ break;
++ }
++
++ if (new_plane_state->fb == old_plane_state->fb &&
++ new_plane_state->crtc_w == old_plane_state->crtc_w &&
++ new_plane_state->crtc_h == old_plane_state->crtc_h)
++ continue;
++
++ dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
++ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
++
++ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
++ any_relevant_change = true;
++ break;
++ }
++ }
++
++ if (!any_relevant_change)
+ return 0;
+
+- dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
+- cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
+- cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
++ new_cursor_state = drm_atomic_get_plane_state(state, cursor);
++ if (IS_ERR(new_cursor_state))
++ return PTR_ERR(new_cursor_state);
++
++ if (!new_cursor_state->fb)
++ return 0;
++
++ dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
++
++ /* Need to check all enabled planes, even if this commit doesn't change
++ * their state
++ */
++ i = drm_atomic_add_affected_planes(state, crtc);
++ if (i)
++ return i;
+
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+@@ -9926,10 +10006,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ if (!new_underlying_state->fb)
+ continue;
+
+- dm_get_oriented_plane_size(new_underlying_state,
+- &underlying_src_w, &underlying_src_h);
+- underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
+- underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
++ dm_get_plane_scale(new_underlying_state,
++ &underlying_scale_w, &underlying_scale_h);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+@@ -10021,7 +10099,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+- struct dsc_mst_fairness_vars vars[MAX_PIPES];
++ struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0};
+
+ trace_amdgpu_dm_atomic_check_begin(state);
+
+@@ -10321,11 +10399,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ goto fail;
+ }
+
+- ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+- if (ret) {
+- DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+- ret = -EINVAL;
+- goto fail;
++ if (dc_resource_is_dsc_encoding_supported(dc)) {
++ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
++ if (ret) {
++ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
++ ret = -EINVAL;
++ goto fail;
++ }
+ }
+
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
+@@ -10585,6 +10665,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+ return ret;
+ }
+
++static void parse_edid_displayid_vrr(struct drm_connector *connector,
++ struct edid *edid)
++{
++ u8 *edid_ext = NULL;
++ int i;
++ int j = 0;
++ u16 min_vfreq;
++ u16 max_vfreq;
++
++ if (edid == NULL || edid->extensions == 0)
++ return;
++
++ /* Find DisplayID extension */
++ for (i = 0; i < edid->extensions; i++) {
++ edid_ext = (void *)(edid + (i + 1));
++ if (edid_ext[0] == DISPLAYID_EXT)
++ break;
++ }
++
++ if (edid_ext == NULL)
++ return;
++
++ while (j < EDID_LENGTH) {
++ /* Get dynamic video timing range from DisplayID if available */
++ if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 &&
++ (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
++ min_vfreq = edid_ext[j+9];
++ if (edid_ext[j+1] & 7)
++ max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
++ else
++ max_vfreq = edid_ext[j+10];
++
++ if (max_vfreq && min_vfreq) {
++ connector->display_info.monitor_range.max_vfreq = max_vfreq;
++ connector->display_info.monitor_range.min_vfreq = min_vfreq;
++
++ return;
++ }
++ }
++ j++;
++ }
++}
++
+ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ {
+@@ -10707,18 +10830,31 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ if (!adev->dm.freesync_module)
+ goto update;
+
+- if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+- || sink->sink_signal == SIGNAL_TYPE_EDP) {
++ /* Some eDP panels only have the refresh rate range info in DisplayID */
++ if ((connector->display_info.monitor_range.min_vfreq == 0 ||
++ connector->display_info.monitor_range.max_vfreq == 0))
++ parse_edid_displayid_vrr(connector, edid);
++
++ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
++ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ bool edid_check_required = false;
+
+- if (edid) {
+- edid_check_required = is_dp_capable_without_timing_msa(
+- adev->dm.dc,
+- amdgpu_dm_connector);
++ if (is_dp_capable_without_timing_msa(adev->dm.dc,
++ amdgpu_dm_connector)) {
++ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
++ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
++ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
++ if (amdgpu_dm_connector->max_vfreq -
++ amdgpu_dm_connector->min_vfreq > 10)
++ freesync_capable = true;
++ } else {
++ edid_check_required = edid->version > 1 ||
++ (edid->version == 1 &&
++ edid->revision > 1);
++ }
+ }
+
+- if (edid_check_required == true && (edid->version > 1 ||
+- (edid->version == 1 && edid->revision > 1))) {
++ if (edid_check_required) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+@@ -10738,14 +10874,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ if (range->flags != 1)
+ continue;
+
+- amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+- amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+- amdgpu_dm_connector->pixel_clock_mhz =
+- range->pixel_clock_mhz * 10;
+-
+ connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+
++ if (edid->revision >= 4) {
++ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
++ connector->display_info.monitor_range.min_vfreq += 255;
++ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
++ connector->display_info.monitor_range.max_vfreq += 255;
++ }
++
++ amdgpu_dm_connector->min_vfreq =
++ connector->display_info.monitor_range.min_vfreq;
++ amdgpu_dm_connector->max_vfreq =
++ connector->display_info.monitor_range.max_vfreq;
++ amdgpu_dm_connector->pixel_clock_mhz =
++ range->pixel_clock_mhz * 10;
++
+ break;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 9e4cc5eeda767e..88606b805330d7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -49,7 +49,7 @@
+
+ #define AMDGPU_DM_MAX_NUM_EDP 2
+
+-#define AMDGPU_DMUB_NOTIFICATION_MAX 5
++#define AMDGPU_DMUB_NOTIFICATION_MAX 6
+
+ #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
+ #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 97b7a0b8a1c26c..30d4c6fd95f531 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -29,7 +29,6 @@
+ #include "dc.h"
+ #include "amdgpu.h"
+ #include "amdgpu_dm_psr.h"
+-#include "amdgpu_dm_replay.h"
+ #include "amdgpu_dm_crtc.h"
+ #include "amdgpu_dm_plane.h"
+ #include "amdgpu_dm_trace.h"
+@@ -124,12 +123,7 @@ static void vblank_control_worker(struct work_struct *work)
+ * fill_dc_dirty_rects().
+ */
+ if (vblank_work->stream && vblank_work->stream->link) {
+- /*
+- * Prioritize replay, instead of psr
+- */
+- if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
+- amdgpu_dm_replay_enable(vblank_work->stream, false);
+- else if (vblank_work->enable) {
++ if (vblank_work->enable) {
+ if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
+ vblank_work->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(vblank_work->stream);
+@@ -138,7 +132,6 @@ static void vblank_control_worker(struct work_struct *work)
+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
+ #endif
+- vblank_work->stream->link->panel_config.psr.disallow_replay &&
+ vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
+ amdgpu_dm_psr_enable(vblank_work->stream);
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 7c21e21bcc51a0..c8609595f324b4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -1219,7 +1219,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b
+ size_t size, loff_t *pos)
+ {
+ int r;
+- uint8_t data[36];
++ uint8_t data[36] = {0};
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dm_crtc_state *acrtc_state;
+ uint32_t write_size = 36;
+@@ -1453,7 +1453,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+ const uint32_t rd_buf_size = 10;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+- int i, r, str_len = 30;
++ int i, r, str_len = 10;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+@@ -1465,7 +1465,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -1566,7 +1568,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -1651,7 +1655,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -1750,7 +1756,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -1835,7 +1843,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -1934,7 +1944,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -2015,7 +2027,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -2111,7 +2125,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -2190,7 +2206,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -2246,7 +2264,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -2317,7 +2337,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -2388,7 +2410,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream &&
+- pipe_ctx->stream->link == aconnector->dc_link)
++ pipe_ctx->stream->link == aconnector->dc_link &&
++ pipe_ctx->stream->sink &&
++ pipe_ctx->stream->sink == aconnector->dc_sink)
+ break;
+ }
+
+@@ -2905,7 +2929,7 @@ static int psr_read_residency(void *data, u64 *val)
+ {
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+- u32 residency;
++ u32 residency = 0;
+
+ link->dc->link_srv->edp_get_psr_residency(link, &residency);
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 4b230933b28ebf..227a148b0f82a5 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -63,6 +63,18 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
+ DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
+ edid_caps->panel_patch.disable_fams = true;
+ break;
++ /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
++ case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
++ case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
++ case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
++ case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
++ DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
++ edid_caps->panel_patch.remove_sink_ext_caps = true;
++ break;
++ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
++ DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
++ edid_caps->panel_patch.disable_colorimetry = true;
++ break;
+ default:
+ return;
+ }
+@@ -113,6 +125,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+
+ edid_caps->edid_hdmi = connector->display_info.is_hdmi;
+
++ apply_edid_quirks(edid_buf, edid_caps);
++
+ sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
+ if (sad_count <= 0)
+ return result;
+@@ -139,8 +153,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
+ else
+ edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+
+- apply_edid_quirks(edid_buf, edid_caps);
+-
+ kfree(sads);
+ kfree(sadb);
+
+@@ -950,6 +962,11 @@ int dm_helper_dmub_aux_transfer_sync(
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+ {
++ if (!link->hpd_status) {
++ *operation_result = AUX_RET_ERROR_HPD_DISCON;
++ return -1;
++ }
++
+ return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
+ operation_result);
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 57230661132bd9..d390e3d62e56e3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -246,7 +246,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
+ aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
+
+ /* synaptics cascaded MST hub case */
+- if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
++ if (is_synaptics_cascaded_panamera(aconnector->dc_link, port))
+ aconnector->dsc_aux = port->mgr->aux;
+
+ if (!aconnector->dsc_aux)
+@@ -606,6 +606,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ &connector->base,
+ dev->mode_config.tile_property,
+ 0);
++ connector->colorspace_property = master->base.colorspace_property;
++ if (connector->colorspace_property)
++ drm_connector_attach_colorspace_property(connector);
+
+ drm_connector_set_path_property(connector, pathprop);
+
+@@ -1112,7 +1115,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+ params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
+ params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
+- dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy);
++ dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
+ if (!dc_dsc_compute_bandwidth_range(
+ stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+@@ -1263,6 +1266,9 @@ static bool is_dsc_need_re_compute(
+ }
+ }
+
++ if (new_stream_on_link_num == 0)
++ return false;
++
+ /* check current_state if there stream on link but it is not in
+ * new request state
+ */
+@@ -1577,7 +1583,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
+ {
+ struct dc_dsc_policy dsc_policy = {0};
+
+- dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy);
++ dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
+ dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+@@ -1598,31 +1604,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ unsigned int max_compressed_bw_in_kbps = 0;
+ struct dc_dsc_bw_range bw_range = {0};
+- struct drm_dp_mst_topology_mgr *mst_mgr;
++ uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
+
+ /*
+- * check if the mode could be supported if DSC pass-through is supported
+- * AND check if there enough bandwidth available to support the mode
+- * with DSC enabled.
++ * Consider the case with the depth of the mst topology tree is equal or less than 2
++ * A. When dsc bitstream can be transmitted along the entire path
++ * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
++ * 2. dsc passthrough supported at MST branch, or
++ * 3. dsc decoding supported at leaf MST device
++ * Use maximum dsc compression as bw constraint
++ * B. When dsc bitstream cannot be transmitted along the entire path
++ * Use native bw as bw constraint
+ */
+ if (is_dsc_common_config_possible(stream, &bw_range) &&
+- aconnector->mst_output_port->passthrough_aux) {
+- mst_mgr = aconnector->mst_output_port->mgr;
+- mutex_lock(&mst_mgr->lock);
+-
++ (aconnector->mst_output_port->passthrough_aux ||
++ aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
+ cur_link_settings = stream->link->verified_link_cap;
+
+ upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+- &cur_link_settings
+- );
+- down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
++ &cur_link_settings);
++ down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
+
+ /* pick the bottleneck */
+ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
+ down_link_bw_in_kbps);
+
+- mutex_unlock(&mst_mgr->lock);
+-
+ /*
+ * use the maximum dsc compression bandwidth as the required
+ * bandwidth for the mode
+@@ -1636,9 +1642,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ } else {
+ /* check if mode could be supported within full_pbn */
+ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
+- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
+-
+- if (pbn > aconnector->mst_output_port->full_pbn)
++ pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
++ if (pbn > full_pbn)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index cc74dd69acf2ba..d1329f20b7bd4b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -28,6 +28,7 @@
+ #include <drm/drm_blend.h>
+ #include <drm/drm_gem_atomic_helper.h>
+ #include <drm/drm_plane_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_fourcc.h>
+
+ #include "amdgpu.h"
+@@ -848,10 +849,14 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ }
+
+ afb = to_amdgpu_framebuffer(new_state->fb);
+- obj = new_state->fb->obj[0];
++ obj = drm_gem_fb_get_obj(new_state->fb, 0);
++ if (!obj) {
++ DRM_ERROR("Failed to get obj from framebuffer\n");
++ return -EINVAL;
++ }
++
+ rbo = gem_to_amdgpu_bo(obj);
+ adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+-
+ r = amdgpu_bo_reserve(rbo, true);
+ if (r) {
+ dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+@@ -1276,7 +1281,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
+ adev->dm.dc->caps.color.dpp.gamma_corr)
+ attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
+
+- attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
++ if (afb)
++ attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
+
+ if (crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+index 6b319044758151..684b005f564c47 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+@@ -667,6 +667,9 @@ static enum bp_result get_ss_info_v3_1(
+ ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
+ DATA_TABLES(ASIC_InternalSS_Info),
+ struct_size(ss_table_header_include, asSpreadSpectrum, 1)));
++ if (!ss_table_header_include)
++ return BP_RESULT_UNSUPPORTED;
++
+ table_size =
+ (le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+@@ -1036,6 +1039,8 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+ &bp->base,
+ DATA_TABLES(ASIC_InternalSS_Info),
+ struct_size(header, asSpreadSpectrum, 1)));
++ if (!header)
++ return result;
+
+ memset(info, 0, sizeof(struct spread_spectrum_info));
+
+@@ -1109,6 +1114,8 @@ static enum bp_result get_ss_info_from_ss_info_table(
+ get_atom_data_table_revision(header, &revision);
+
+ tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
++ if (!tbl)
++ return result;
+
+ if (1 != revision.major || 2 > revision.minor)
+ return result;
+@@ -1636,6 +1643,8 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
+
+ tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
+ DATA_TABLES(SS_Info));
++ if (!tbl)
++ return number;
+
+ if (1 != revision.major || 2 > revision.minor)
+ return number;
+@@ -1718,6 +1727,8 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+ &bp->base,
+ DATA_TABLES(ASIC_InternalSS_Info),
+ struct_size(header_include, asSpreadSpectrum, 1)));
++ if (!header_include)
++ return 0;
+
+ size = (le16_to_cpu(header_include->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+@@ -1756,6 +1767,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
+ DATA_TABLES(ASIC_InternalSS_Info),
+ struct_size(header_include, asSpreadSpectrum, 1)));
++ if (!header_include)
++ return number;
++
+ size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+@@ -2552,8 +2566,8 @@ static enum bp_result construct_integrated_info(
+
+ /* Sort voltage table from low to high*/
+ if (result == BP_RESULT_OK) {
+- uint32_t i;
+- uint32_t j;
++ int32_t i;
++ int32_t j;
+
+ for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ for (j = i; j > 0; --j) {
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index 484d62bcf2c2e2..384ddb28e6f6d6 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -1015,13 +1015,20 @@ static enum bp_result get_ss_info_v4_5(
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
+ break;
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+- ss_info->spread_spectrum_percentage =
++ if (bp->base.integrated_info) {
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
++ ss_info->spread_spectrum_percentage =
++ bp->base.integrated_info->gpuclk_ss_percentage;
++ ss_info->type.CENTER_MODE =
++ bp->base.integrated_info->gpuclk_ss_type;
++ } else {
++ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dp_ss_percentage;
+- ss_info->spread_spectrum_range =
++ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dp_ss_rate_10hz * 10;
+- if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+- ss_info->type.CENTER_MODE = true;
+-
++ if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
++ ss_info->type.CENTER_MODE = true;
++ }
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+@@ -1692,7 +1699,7 @@ static enum bp_result bios_parser_enable_disp_power_gating(
+ static enum bp_result bios_parser_enable_lvtma_control(
+ struct dc_bios *dcb,
+ uint8_t uc_pwr_on,
+- uint8_t panel_instance,
++ uint8_t pwrseq_instance,
+ uint8_t bypass_panel_control_wait)
+ {
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+@@ -1700,7 +1707,7 @@ static enum bp_result bios_parser_enable_lvtma_control(
+ if (!bp->cmd_tbl.enable_lvtma_control)
+ return BP_RESULT_FAILURE;
+
+- return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait);
++ return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, pwrseq_instance, bypass_panel_control_wait);
+ }
+
+ static bool bios_parser_is_accelerated_mode(
+@@ -1853,19 +1860,21 @@ static enum bp_result get_firmware_info_v3_2(
+ /* Vega12 */
+ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
+ DATA_TABLES(smu_info));
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
+ if (!smu_info_v3_2)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
++
+ info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
+ } else if (revision.minor == 3) {
+ /* Vega20 */
+ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
+ DATA_TABLES(smu_info));
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
+ if (!smu_info_v3_3)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
++
+ info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
+ }
+
+@@ -2428,10 +2437,11 @@ static enum bp_result get_integrated_info_v11(
+ info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
+ DATA_TABLES(integratedsysteminfo));
+
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
+ if (info_v11 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
++
+ info->gpu_cap_info =
+ le32_to_cpu(info_v11->gpucapinfo);
+ /*
+@@ -2643,11 +2653,12 @@ static enum bp_result get_integrated_info_v2_1(
+
+ info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
+ DATA_TABLES(integratedsysteminfo));
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
+
+ if (info_v2_1 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
++
+ info->gpu_cap_info =
+ le32_to_cpu(info_v2_1->gpucapinfo);
+ /*
+@@ -2805,11 +2816,11 @@ static enum bp_result get_integrated_info_v2_2(
+ info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
+ DATA_TABLES(integratedsysteminfo));
+
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+-
+ if (info_v2_2 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
++
+ info->gpu_cap_info =
+ le32_to_cpu(info_v2_2->gpucapinfo);
+ /*
+@@ -2826,6 +2837,8 @@ static enum bp_result get_integrated_info_v2_2(
+ info->ma_channel_number = info_v2_2->umachannelnumber;
+ info->dp_ss_control =
+ le16_to_cpu(info_v2_2->reserved1);
++ info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
++ info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
+
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+@@ -2922,8 +2935,11 @@ static enum bp_result construct_integrated_info(
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+
+- uint32_t i;
+- uint32_t j;
++ int32_t i;
++ int32_t j;
++
++ if (!info)
++ return result;
+
+ if (info && DATA_TABLES(integratedsysteminfo)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+@@ -2948,6 +2964,7 @@ static enum bp_result construct_integrated_info(
+ result = get_integrated_info_v2_1(bp, info);
+ break;
+ case 2:
++ case 3:
+ result = get_integrated_info_v2_2(bp, info);
+ break;
+ default:
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+index 90a02d7bd3da3f..ab0adabf9dd4c6 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+@@ -976,7 +976,7 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
+ static enum bp_result enable_lvtma_control(
+ struct bios_parser *bp,
+ uint8_t uc_pwr_on,
+- uint8_t panel_instance,
++ uint8_t pwrseq_instance,
+ uint8_t bypass_panel_control_wait);
+
+ static void init_enable_lvtma_control(struct bios_parser *bp)
+@@ -989,7 +989,7 @@ static void init_enable_lvtma_control(struct bios_parser *bp)
+ static void enable_lvtma_control_dmcub(
+ struct dc_dmub_srv *dmcub,
+ uint8_t uc_pwr_on,
+- uint8_t panel_instance,
++ uint8_t pwrseq_instance,
+ uint8_t bypass_panel_control_wait)
+ {
+
+@@ -1002,8 +1002,8 @@ static void enable_lvtma_control_dmcub(
+ DMUB_CMD__VBIOS_LVTMA_CONTROL;
+ cmd.lvtma_control.data.uc_pwr_action =
+ uc_pwr_on;
+- cmd.lvtma_control.data.panel_inst =
+- panel_instance;
++ cmd.lvtma_control.data.pwrseq_inst =
++ pwrseq_instance;
+ cmd.lvtma_control.data.bypass_panel_control_wait =
+ bypass_panel_control_wait;
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+@@ -1012,7 +1012,7 @@ static void enable_lvtma_control_dmcub(
+ static enum bp_result enable_lvtma_control(
+ struct bios_parser *bp,
+ uint8_t uc_pwr_on,
+- uint8_t panel_instance,
++ uint8_t pwrseq_instance,
+ uint8_t bypass_panel_control_wait)
+ {
+ enum bp_result result = BP_RESULT_FAILURE;
+@@ -1021,7 +1021,7 @@ static enum bp_result enable_lvtma_control(
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv,
+ uc_pwr_on,
+- panel_instance,
++ pwrseq_instance,
+ bypass_panel_control_wait);
+ return BP_RESULT_OK;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+index b6d09bf6cf72b6..41c8c014397f29 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+@@ -96,7 +96,7 @@ struct cmd_tbl {
+ struct bios_parser *bp, uint8_t id);
+ enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
+ uint8_t uc_pwr_on,
+- uint8_t panel_instance,
++ uint8_t pwrseq_instance,
+ uint8_t bypass_panel_control_wait);
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+index 0c6a4ab72b1d29..97cdc24cef9a5c 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -484,7 +484,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* Modify previous watermark range to cover up to max */
+- ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
++ if (num_valid_sets > 0)
++ ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ }
+ num_valid_sets++;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+index 7326b756584610..2618504e260e47 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+@@ -131,30 +131,27 @@ static int dcn314_get_active_display_cnt_wa(
+ return display_count;
+ }
+
+-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
++static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
++ bool safe_to_lower, bool disable)
+ {
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++ struct pipe_ctx *pipe = safe_to_lower
++ ? &context->res_ctx.pipe_ctx[i]
++ : &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->top_pipe || pipe->prev_odm_pipe)
+ continue;
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+- struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+-
+ if (disable) {
+- if (stream_enc && stream_enc->funcs->disable_fifo)
+- pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
++ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
++ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
+- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+-
+- if (stream_enc && stream_enc->funcs->enable_fifo)
+- pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ }
+ }
+ }
+@@ -252,11 +249,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+- dcn314_disable_otg_wa(clk_mgr_base, context, true);
++ dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+- dcn314_disable_otg_wa(clk_mgr_base, context, false);
++ dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+
+ update_dispclk = true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+index b2c4f97afc8b4c..d4d3f58a613f7a 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+@@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
+ */
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
+ if (safe_to_lower) {
++ if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
++ dcn315_smu_set_dtbclk(clk_mgr, false);
++ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
++ }
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ display_count = dcn315_get_active_display_cnt_wa(dc, context);
+@@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
+ }
+ }
+ } else {
++ if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
++ dcn315_smu_set_dtbclk(clk_mgr, true);
++ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
++ }
+ /* check that we're not already in D0 */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ union display_idle_optimization_u idle_info = { 0 };
+@@ -334,7 +342,7 @@ static struct wm_table lpddr5_wm_table = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+- .pstate_latency_us = 11.65333,
++ .pstate_latency_us = 129.0,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
+ .valid = true,
+@@ -342,7 +350,7 @@ static struct wm_table lpddr5_wm_table = {
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+- .pstate_latency_us = 11.65333,
++ .pstate_latency_us = 129.0,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
+ .valid = true,
+@@ -350,7 +358,7 @@ static struct wm_table lpddr5_wm_table = {
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+- .pstate_latency_us = 11.65333,
++ .pstate_latency_us = 129.0,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
+ .valid = true,
+@@ -358,7 +366,7 @@ static struct wm_table lpddr5_wm_table = {
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+- .pstate_latency_us = 11.65333,
++ .pstate_latency_us = 129.0,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
+ .valid = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+index 09151cc56ce4f2..a13ead3d21e310 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
+ return display_count;
+ }
+
+-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
++static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
++ bool safe_to_lower, bool disable)
+ {
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++ struct pipe_ctx *pipe = safe_to_lower
++ ? &context->res_ctx.pipe_ctx[i]
++ : &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->top_pipe || pipe->prev_odm_pipe)
+ continue;
+- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
+- dc_is_virtual_signal(pipe->stream->signal))) {
++ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
++ !pipe->stream->link_enc)) {
+ if (disable) {
+- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
++ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
++ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
++
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+- dcn316_disable_otg_wa(clk_mgr_base, context, true);
++ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+- dcn316_disable_otg_wa(clk_mgr_base, context, false);
++ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+
+ update_dispclk = true;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index e9345f6554dbcb..2428a4763b85f6 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -547,8 +547,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
+ * since we calculate mode support based on softmax being the max UCLK
+ * frequency.
+ */
+- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+- dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
++ if (dc->debug.disable_dc_mode_overwrite) {
++ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
++ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
++ } else
++ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
++ dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ } else {
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ }
+@@ -581,8 +585,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
+ /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
+ if (clk_mgr_base->clks.p_state_change_support &&
+ (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
+- !dc->work_arounds.clock_update_disable_mask.uclk)
++ !dc->work_arounds.clock_update_disable_mask.uclk) {
++ if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite)
++ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
++ max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)));
++
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
++ }
+
+ if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
+ clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index d08e60dff46deb..c2efe18ceacd07 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -990,7 +990,8 @@ static bool dc_construct(struct dc *dc,
+ /* set i2c speed if not done by the respective dcnxxx__resource.c */
+ if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+-
++ if (dc->caps.max_optimizable_video_width == 0)
++ dc->caps.max_optimizable_video_width = 5120;
+ dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ if (!dc->clk_mgr)
+ goto fail;
+@@ -1069,53 +1070,6 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
+ }
+ }
+
+-static void phantom_pipe_blank(
+- struct dc *dc,
+- struct timing_generator *tg,
+- int width,
+- int height)
+-{
+- struct dce_hwseq *hws = dc->hwseq;
+- enum dc_color_space color_space;
+- struct tg_color black_color = {0};
+- struct output_pixel_processor *opp = NULL;
+- uint32_t num_opps, opp_id_src0, opp_id_src1;
+- uint32_t otg_active_width, otg_active_height;
+- uint32_t i;
+-
+- /* program opp dpg blank color */
+- color_space = COLOR_SPACE_SRGB;
+- color_space_to_black_color(dc, color_space, &black_color);
+-
+- otg_active_width = width;
+- otg_active_height = height;
+-
+- /* get the OPTC source */
+- tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+-
+- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+- if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+- opp = dc->res_pool->opps[i];
+- break;
+- }
+- }
+-
+- if (opp && opp->funcs->opp_set_disp_pattern_generator)
+- opp->funcs->opp_set_disp_pattern_generator(
+- opp,
+- CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+- COLOR_DEPTH_UNDEFINED,
+- &black_color,
+- otg_active_width,
+- otg_active_height,
+- 0);
+-
+- if (tg->funcs->is_tg_enabled(tg))
+- hws->funcs.wait_for_blank_complete(opp);
+-}
+-
+ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+ {
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+@@ -1206,7 +1160,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+
+ main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
+ main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+- phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
++ if (dc->hwss.blank_phantom)
++ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+@@ -1343,6 +1298,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
+ return NULL;
+
+ if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
++ dc->caps.linear_pitch_alignment = 64;
+ if (!dc_construct_ctx(dc, init_params))
+ goto destruct_dc;
+ } else {
+@@ -1735,7 +1691,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
+ if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
+ return false;
+
+- if (!se->funcs->dp_get_pixel_format)
++ if (!se || !se->funcs->dp_get_pixel_format)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format(
+@@ -1755,6 +1711,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
+ return false;
+ }
+
++ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
++ return false;
++
+ if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
+ DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
+ return false;
+@@ -1888,7 +1847,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
++ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+ disable_dangling_plane(dc, context);
+@@ -1993,9 +1952,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ wait_for_no_pipes_pending(dc, context);
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
++ /* Need to do otg sync again as otg could be out of sync due to otg
++ * workaround applied during clock update
++ */
++ dc_trigger_sync(dc, context);
+ }
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
++ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+@@ -2242,7 +2205,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
+
+ dc->hwss.optimize_bandwidth(dc, context);
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
++ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, true);
+ }
+
+@@ -2488,6 +2451,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ }
+
+ static enum surface_update_type get_scaling_info_update_type(
++ const struct dc *dc,
+ const struct dc_surface_update *u)
+ {
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+@@ -2520,6 +2484,12 @@ static enum surface_update_type get_scaling_info_update_type(
+ update_flags->bits.clock_change = 1;
+ }
+
++ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
++ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
++ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
++ /* Changing clip size of a large surface may result in MPC slice count change */
++ update_flags->bits.bandwidth_change = 1;
++
+ if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ || u->scaling_info->src_rect.y != u->surface->src_rect.y
+ || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+@@ -2557,7 +2527,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
+ type = get_plane_info_update_type(u);
+ elevate_update_type(&overall_type, type);
+
+- type = get_scaling_info_update_type(u);
++ type = get_scaling_info_update_type(dc, u);
+ elevate_update_type(&overall_type, type);
+
+ if (u->flip_addr) {
+@@ -3571,7 +3541,7 @@ static void commit_planes_for_stream(struct dc *dc,
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->hwss.prepare_bandwidth(dc, context);
+
+- if (dc->debug.enable_double_buffered_dsc_pg_support)
++ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+ context_clock_trace(dc, context);
+@@ -3827,7 +3797,8 @@ static void commit_planes_for_stream(struct dc *dc,
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
++ if (top_pipe_to_program &&
++ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+@@ -4374,6 +4345,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
+ update_type,
+ context);
+ } else {
++ if (!stream_update &&
++ dc->hwss.is_pipe_topology_transition_seamless &&
++ !dc->hwss.is_pipe_topology_transition_seamless(
++ dc, dc->current_state, context)) {
++
++ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
++ BREAK_TO_DEBUGGER();
++ }
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+@@ -4737,7 +4716,8 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
+ if (allow == dc->idle_optimizations_allowed)
+ return;
+
+- if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
++ if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL &&
++ dc->hwss.apply_idle_power_optimizations(dc, allow))
+ dc->idle_optimizations_allowed = allow;
+ }
+
+@@ -4895,18 +4875,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
+ */
+ bool dc_is_dmub_outbox_supported(struct dc *dc)
+ {
+- /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
+- if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+- dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+- !dc->debug.dpia_debug.bits.disable_dpia)
+- return true;
++ switch (dc->ctx->asic_id.chip_family) {
+
+- if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
+- !dc->debug.dpia_debug.bits.disable_dpia)
+- return true;
++ case FAMILY_YELLOW_CARP:
++ /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
++ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
++ !dc->debug.dpia_debug.bits.disable_dpia)
++ return true;
++ break;
++
++ case AMDGPU_FAMILY_GC_11_0_1:
++ case AMDGPU_FAMILY_GC_11_5_0:
++ if (!dc->debug.dpia_debug.bits.disable_dpia)
++ return true;
++ break;
++
++ default:
++ break;
++ }
+
+ /* dmub aux needs dmub notifications to be enabled */
+ return dc->debug.enable_dmub_aux_for_legacy_ddc;
++
+ }
+
+ /**
+@@ -5284,3 +5274,24 @@ void dc_query_current_properties(struct dc *dc, struct dc_current_properties *pr
+ properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
+ }
+
++/**
++ *****************************************************************************
++ * dc_set_edp_power() - DM controls eDP power to be ON/OFF
++ *
++ * Called when DM wants to power on/off eDP.
++ * Only work on links with flag skip_implict_edp_power_control is set.
++ *
++ *****************************************************************************
++ */
++void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
++ bool powerOn)
++{
++ if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
++ return;
++
++ if (edp_link->skip_implict_edp_power_control == false)
++ return;
++
++ edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
++}
++
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+index ed94187c2afa2d..f365773d571485 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+@@ -497,7 +497,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+ link->dc->link_srv->enable_hpd_filter(link, enable);
+ }
+
+-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
++bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+ {
+ return dc->link_srv->validate_dpia_bandwidth(streams, count);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index f7b51aca602006..99fcd39bb15e0d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -996,7 +996,7 @@ static void adjust_recout_for_visual_confirm(struct rect *recout,
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ int dpp_offset, base_offset;
+
+- if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
++ if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE || !pipe_ctx->plane_res.dpp)
+ return;
+
+ dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO;
+@@ -2154,6 +2154,8 @@ static bool are_stream_backends_same(
+ bool dc_is_stream_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+ {
++ if (!old_stream || !stream)
++ return false;
+
+ if (!are_stream_backends_same(old_stream, stream))
+ return false;
+@@ -2385,6 +2387,9 @@ static struct audio *find_first_free_audio(
+ {
+ int i, available_audio_count;
+
++ if (id == ENGINE_ID_UNKNOWN)
++ return NULL;
++
+ available_audio_count = pool->audio_count;
+
+ for (i = 0; i < available_audio_count; i++) {
+@@ -2874,8 +2879,10 @@ static bool planes_changed_for_existing_stream(struct dc_state *context,
+ }
+ }
+
+- if (!stream_status)
++ if (!stream_status) {
+ ASSERT(0);
++ return false;
++ }
+
+ for (i = 0; i < set_count; i++)
+ if (set[i].stream == stream)
+@@ -3924,6 +3931,9 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+
+ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
+ {
++ if (dc == NULL || stream == NULL)
++ return DC_ERROR_UNEXPECTED;
++
+ struct dc_link *link = stream->link;
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+ enum dc_status res = DC_OK;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 01fe2d2fd24172..ebe571fcefe32e 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -582,7 +582,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+- if (res_ctx->pipe_ctx[i].stream != stream)
++ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ continue;
+
+ return tg->funcs->get_frame_count(tg);
+@@ -641,7 +641,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+- if (res_ctx->pipe_ctx[i].stream != stream)
++ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ continue;
+
+ tg->funcs->get_scanoutpos(tg,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+index a80e45300783c0..f4f3ca7aad60e2 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+@@ -154,7 +154,8 @@ const struct dc_plane_status *dc_plane_get_status(
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+- pipe_ctx->plane_state->status.is_flip_pending = false;
++ if (pipe_ctx->plane_state)
++ pipe_ctx->plane_state->status.is_flip_pending = false;
+
+ break;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 31e3183497a7f5..5f2eac868b7472 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -231,6 +231,11 @@ struct dc_caps {
+ uint32_t dmdata_alloc_size;
+ unsigned int max_cursor_size;
+ unsigned int max_video_width;
++ /*
++ * max video plane width that can be safely assumed to be always
++ * supported by single DPP pipe.
++ */
++ unsigned int max_optimizable_video_width;
+ unsigned int min_horizontal_blanking_period;
+ int linear_pitch_alignment;
+ bool dcc_const_color;
+@@ -1533,7 +1538,6 @@ struct dc_link {
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+- struct backlight_settings backlight_settings;
+ struct psr_settings psr_settings;
+
+ struct replay_settings replay_settings;
+@@ -1573,6 +1577,7 @@ struct dc_link {
+ struct phy_state phy_state;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
++ bool skip_implict_edp_power_control;
+ };
+
+ /* Return an enumerated dc_link.
+@@ -1592,6 +1597,9 @@ void dc_get_edp_links(const struct dc *dc,
+ struct dc_link **edp_links,
+ int *edp_num);
+
++void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
++ bool powerOn);
++
+ /* The function initiates detection handshake over the given link. It first
+ * determines if there are display connections over the link. If so it initiates
+ * detection protocols supported by the connected receiver device. The function
+@@ -2108,11 +2116,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+ *
+ * @dc: pointer to dc struct
+ * @stream: pointer to all possible streams
+- * @num_streams: number of valid DPIA streams
++ * @count: number of valid DPIA streams
+ *
+ * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ */
+-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
++bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
+ const unsigned int count);
+
+ /* Sink Interfaces - A sink corresponds to a display output device */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+index be9aa1a71847d7..26940d94d8fb40 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+@@ -140,7 +140,7 @@ struct dc_vbios_funcs {
+ enum bp_result (*enable_lvtma_control)(
+ struct dc_bios *bios,
+ uint8_t uc_pwr_on,
+- uint8_t panel_instance,
++ uint8_t pwrseq_instance,
+ uint8_t bypass_panel_control_wait);
+
+ enum bp_result (*get_soc_bb_info)(
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index cfaa39c5dd16bd..83719f5bea4956 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -1433,6 +1433,12 @@ struct dp_trace {
+ #ifndef DP_TUNNELING_STATUS
+ #define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
+ #endif
++#ifndef DP_TUNNELING_MAX_LINK_RATE
++#define DP_TUNNELING_MAX_LINK_RATE 0xE0028 /* 1.4a */
++#endif
++#ifndef DP_TUNNELING_MAX_LANE_COUNT
++#define DP_TUNNELING_MAX_LANE_COUNT 0xE0029 /* 1.4a */
++#endif
+ #ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
+ #define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+index fe3078b8789ef1..01c07545ef6b47 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+@@ -100,7 +100,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
+ */
+ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
+ uint32_t max_target_bpp_limit_override_x16,
+- struct dc_dsc_policy *policy);
++ struct dc_dsc_policy *policy,
++ const enum dc_link_encoding_format link_encoding);
+
+ void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+index 100d62162b717e..00de342e5290b7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+@@ -244,7 +244,7 @@ enum pixel_format {
+ #define DC_MAX_DIRTY_RECTS 3
+ struct dc_flip_addrs {
+ struct dc_plane_address address;
+- unsigned int flip_timestamp_in_us;
++ unsigned long long flip_timestamp_in_us;
+ bool flip_immediate;
+ /* TODO: add flip duration for FreeSync */
+ bool triplebuffer_flips;
+@@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
+ struct fixed31_32 v_scale_ratio;
+ enum dc_rotation_angle rotation;
+ bool mirror;
++ struct dc_stream_state *stream;
+ };
+
+ /* IPP related types */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 3697ea1d14c1bf..d5b3e3a32cc6d4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -302,7 +302,6 @@ struct dc_stream_state {
+ bool vblank_synchronized;
+ bool fpo_in_use;
+ struct mall_stream_config mall_stream_config;
+- bool skip_edp_power_down;
+ };
+
+ #define ABM_LEVEL_IMMEDIATE_DISABLE 255
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 445ad79001ce2d..6eaa02a80344bc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -189,6 +189,8 @@ struct dc_panel_patch {
+ unsigned int disable_fams;
+ unsigned int skip_avmute;
+ unsigned int mst_start_top_delay;
++ unsigned int remove_sink_ext_caps;
++ unsigned int disable_colorimetry;
+ };
+
+ struct dc_edid_caps {
+@@ -1002,10 +1004,6 @@ struct link_mst_stream_allocation_table {
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+ };
+
+-struct backlight_settings {
+- uint32_t backlight_millinits;
+-};
+-
+ /* PSR feature flags */
+ struct psr_settings {
+ bool psr_feature_enabled; // PSR is supported by sink
+@@ -1113,21 +1111,25 @@ struct dc_panel_config {
+ } ilr;
+ };
+
++#define MAX_SINKS_PER_LINK 4
++
+ /*
+ * USB4 DPIA BW ALLOCATION STRUCTS
+ */
+ struct dc_dpia_bw_alloc {
+- int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
+- int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
+- int sink_max_bw; // The Max BW that sink can require/support
++ int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
++ int link_verified_bw; // The Verified BW that link can allocated and use that has been verified already
++ int link_max_bw; // The Max BW that link can require/support
++ int allocated_bw; // The Actual Allocated BW for this DPIA
+ int estimated_bw; // The estimated available BW for this DPIA
+ int bw_granularity; // BW Granularity
++ int dp_overhead; // DP overhead in dp tunneling
+ bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
+ bool response_ready; // Response ready from the CM side
++ uint8_t nrd_max_lane_count; // Non-reduced max lane count
++ uint8_t nrd_max_link_rate; // Non-reduced max link rate
+ };
+
+-#define MAX_SINKS_PER_LINK 4
+-
+ enum dc_hpd_enable_select {
+ HPD_EN_FOR_ALL_EDP = 0,
+ HPD_EN_FOR_PRIMARY_EDP_ONLY,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index b87bfecb7755ae..a8e79104b684ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -586,7 +586,8 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+ if (state == PSR_STATE0)
+ break;
+ }
+- fsleep(500);
++ /* must *not* be fsleep - this can be called from high irq levels */
++ udelay(500);
+ }
+
+ /* assert if max retry hit */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+index d3e6544022b787..930fd929e93a4f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+@@ -145,7 +145,11 @@ static bool dmub_abm_save_restore_ex(
+ return ret;
+ }
+
+-static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
++static bool dmub_abm_set_pipe_ex(struct abm *abm,
++ uint32_t otg_inst,
++ uint32_t option,
++ uint32_t panel_inst,
++ uint32_t pwrseq_inst)
+ {
+ bool ret = false;
+ unsigned int feature_support;
+@@ -153,7 +157,7 @@ static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t op
+ feature_support = abm_feature_support(abm, panel_inst);
+
+ if (feature_support == ABM_LCD_SUPPORT)
+- ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst);
++ ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst, pwrseq_inst);
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+index 592a8f7a1c6d00..42c802afc4681b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+@@ -254,7 +254,11 @@ bool dmub_abm_save_restore(
+ return true;
+ }
+
+-bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
++bool dmub_abm_set_pipe(struct abm *abm,
++ uint32_t otg_inst,
++ uint32_t option,
++ uint32_t panel_inst,
++ uint32_t pwrseq_inst)
+ {
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+@@ -264,6 +268,7 @@ bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
++ cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
+index 853564d7f4714c..07ea6c8d414f3b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
+@@ -44,7 +44,7 @@ bool dmub_abm_save_restore(
+ struct dc_context *dc,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData);
+-bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst);
++bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst);
+ bool dmub_abm_set_backlight_level(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+index 0f24b6fbd22013..4704c9c85ee6f5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+@@ -216,7 +216,8 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
+ break;
+ }
+
+- fsleep(500);
++ /* must *not* be fsleep - this can be called from high irq levels */
++ udelay(500);
+ }
+
+ /* assert if max retry hit */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+index 28149e53c2a68f..eeb5b8247c9652 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+@@ -102,7 +102,8 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait,
+ break;
+ }
+
+- fsleep(500);
++ /* must *not* be fsleep - this can be called from high irq levels */
++ udelay(500);
+ }
+
+ /* assert if max retry hit */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 2a6157555fd1e4..7b5c1498941dd6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -788,7 +788,7 @@ void dce110_edp_power_control(
+ struct dc_context *ctx = link->ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result bp_result;
+- uint8_t panel_instance;
++ uint8_t pwrseq_instance;
+
+
+ if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
+@@ -871,7 +871,7 @@ void dce110_edp_power_control(
+ cntl.coherent = false;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.hpd_sel = link->link_enc->hpd_source;
+- panel_instance = link->panel_cntl->inst;
++ pwrseq_instance = link->panel_cntl->pwrseq_inst;
+
+ if (ctx->dc->ctx->dmub_srv &&
+ ctx->dc->debug.dmub_command_table) {
+@@ -879,11 +879,11 @@ void dce110_edp_power_control(
+ if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) {
+ bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_POWER_ON,
+- panel_instance, link->link_powered_externally);
++ pwrseq_instance, link->link_powered_externally);
+ } else {
+ bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_POWER_OFF,
+- panel_instance, link->link_powered_externally);
++ pwrseq_instance, link->link_powered_externally);
+ }
+ }
+
+@@ -954,7 +954,7 @@ void dce110_edp_backlight_control(
+ {
+ struct dc_context *ctx = link->ctx;
+ struct bp_transmitter_control cntl = { 0 };
+- uint8_t panel_instance;
++ uint8_t pwrseq_instance;
+ unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
+ unsigned int post_T7_delay = OLED_POST_T7_DELAY;
+
+@@ -1007,7 +1007,7 @@ void dce110_edp_backlight_control(
+ */
+ /* dc_service_sleep_in_milliseconds(50); */
+ /*edp 1.2*/
+- panel_instance = link->panel_cntl->inst;
++ pwrseq_instance = link->panel_cntl->pwrseq_inst;
+
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) {
+ if (!link->dc->config.edp_no_power_sequencing)
+@@ -1032,11 +1032,11 @@ void dce110_edp_backlight_control(
+ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
+ ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_LCD_BLON,
+- panel_instance, link->link_powered_externally);
++ pwrseq_instance, link->link_powered_externally);
+ else
+ ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+ LVTMA_CONTROL_LCD_BLOFF,
+- panel_instance, link->link_powered_externally);
++ pwrseq_instance, link->link_powered_externally);
+ }
+
+ link_transmitter_control(ctx->dc_bios, &cntl);
+@@ -1179,9 +1179,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ if (dccg) {
+- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
++ if (dccg && dccg->funcs->set_dtbclk_dto)
++ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
+ } else if (dccg && dccg->funcs->disable_symclk_se) {
+ dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
+@@ -1226,7 +1227,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
+ struct dce_hwseq *hws = link->dc->hwseq;
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+- if (!stream->skip_edp_power_down)
++ if (!link->skip_implict_edp_power_control)
+ hws->funcs.edp_backlight_control(link, false);
+ link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
+ }
+@@ -2124,7 +2125,8 @@ static void dce110_reset_hw_ctx_wrap(
+ BREAK_TO_DEBUGGER();
+ }
+ pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
+- pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
++ if (dc_is_hdmi_tmds_signal(pipe_ctx_old->stream->signal))
++ pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+ pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
+ pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index 3538973bd0c6cb..684e30f9cf8985 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -382,6 +382,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
++ if (i >= TRANSFER_FUNC_POINTS) {
++ DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
++ i, TRANSFER_FUNC_POINTS);
++ return false;
++ }
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+@@ -566,6 +571,8 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
++ if (i >= TRANSFER_FUNC_POINTS)
++ return false;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 9834b75f1837ba..ff38a85c4fa22d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -111,7 +111,8 @@ void dcn10_lock_all_pipes(struct dc *dc,
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream ||
+ (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
+- !tg->funcs->is_tg_enabled(tg))
++ !tg->funcs->is_tg_enabled(tg) ||
++ pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+@@ -1053,7 +1054,8 @@ static void dcn10_reset_back_end_for_pipe(
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, NULL);
+- pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
++ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+@@ -1830,6 +1832,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ {
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
++ if (!stream)
++ return false;
++
+ if (dpp == NULL)
+ return false;
+
+@@ -1852,8 +1857,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ } else
+ dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
+
+- if (stream != NULL && stream->ctx != NULL &&
+- stream->out_transfer_func != NULL) {
++ if (stream->ctx &&
++ stream->out_transfer_func) {
+ log_tf(stream->ctx,
+ stream->out_transfer_func,
+ dpp->regamma_params.hw_points_num);
+@@ -3406,7 +3411,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+ .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+ .rotation = pipe_ctx->plane_state->rotation,
+- .mirror = pipe_ctx->plane_state->horizontal_mirror
++ .mirror = pipe_ctx->plane_state->horizontal_mirror,
++ .stream = pipe_ctx->stream,
+ };
+ bool pipe_split_on = false;
+ bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
+@@ -3515,7 +3521,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+- pos_cpy.x = temp_x + viewport_width;
++ pos_cpy.x = 2 * viewport_width - temp_x;
+ }
+ }
+ } else {
+@@ -3608,7 +3614,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+- pos_cpy.x = 2 * viewport_width - temp_x;
++ pos_cpy.x = temp_x + viewport_width;
+ }
+ }
+ } else {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+index 994fb732a7cb76..a0d437f0ce2baf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+@@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter(
+ int pair;
+ uint16_t odd_coef, even_coef;
+
++ if (!filter)
++ return;
++
+ for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
+ for (pair = 0; pair < tap_pairs; pair++) {
+ even_coef = filter[phase * taps + 2 * pair];
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+index 4566bc7abf17e6..aa252dc2632671 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+@@ -1075,8 +1075,16 @@ void hubp2_cursor_set_position(
+ if (src_y_offset < 0)
+ src_y_offset = 0;
+ /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+- hubp->cur_rect.x = src_x_offset + param->viewport.x;
+- hubp->cur_rect.y = src_y_offset + param->viewport.y;
++ if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
++ param->rotation != ROTATION_ANGLE_0) {
++ hubp->cur_rect.x = 0;
++ hubp->cur_rect.y = 0;
++ hubp->cur_rect.w = param->stream->timing.h_addressable;
++ hubp->cur_rect.h = param->stream->timing.v_addressable;
++ } else {
++ hubp->cur_rect.x = src_x_offset + param->viewport.x;
++ hubp->cur_rect.y = src_y_offset + param->viewport.y;
++ }
+ }
+
+ void hubp2_clk_cntl(struct hubp *hubp, bool enable)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index aeadc587433fd5..12af2859002f72 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1792,6 +1792,8 @@ void dcn20_program_front_end_for_ctx(
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+ DC_LOGGER_INIT(dc->ctx->logger);
++ unsigned int prev_hubp_count = 0;
++ unsigned int hubp_count = 0;
+
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+@@ -1815,6 +1817,20 @@ void dcn20_program_front_end_for_ctx(
+ }
+ }
+
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
++ prev_hubp_count++;
++ if (context->res_ctx.pipe_ctx[i].plane_state)
++ hubp_count++;
++ }
++
++ if (prev_hubp_count == 0 && hubp_count > 0) {
++ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
++ dc->res_pool->hubbub->funcs->force_pstate_change_control(
++ dc->res_pool->hubbub, true, false);
++ udelay(500);
++ }
++
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+@@ -1830,8 +1846,16 @@ void dcn20_program_front_end_for_ctx(
+ dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+- if (tg->funcs->enable_crtc)
++ if (tg->funcs->enable_crtc) {
++ if (dc->hwss.blank_phantom) {
++ int main_pipe_width, main_pipe_height;
++
++ main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
++ main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
++ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
++ }
+ tg->funcs->enable_crtc(tg);
++ }
+ }
+ }
+ /* OTG blank before disabling all front ends */
+@@ -1954,6 +1978,10 @@ void dcn20_post_unlock_program_front_end(
+ }
+ }
+
++ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
++ dc->res_pool->hubbub->funcs->force_pstate_change_control(
++ dc->res_pool->hubbub, false, false);
++
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+@@ -2505,7 +2533,8 @@ static void dcn20_reset_back_end_for_pipe(
+ * the case where the same symclk is shared across multiple otg
+ * instances
+ */
+- link->phy_state.symclk_ref_cnts.otg = 0;
++ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
++ link->phy_state.symclk_ref_cnts.otg = 0;
+ if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
+ link_hwss->disable_link_output(link,
+ &pipe_ctx->link_res, pipe_ctx->stream->signal);
+@@ -2699,18 +2728,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
+ }
+
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
+- dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+-
+- phyd32clk = get_phyd32clk_src(link);
+- dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+-
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
++ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
++ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
++
++ phyd32clk = get_phyd32clk_src(link);
++ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+ } else {
+ }
+ if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+index 43463d08f21ba9..1b08749b084b1d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+@@ -137,7 +137,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
+ pipe_ctx->stream->dpms_off = true;
+ }
+
+-static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
++static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
++ uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst)
+ {
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+@@ -147,6 +148,7 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
++ cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+@@ -179,7 +181,6 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+-
+ struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+
+ if (dmcu) {
+@@ -190,9 +191,13 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+ if (abm && panel_cntl) {
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
+- panel_cntl->inst);
++ panel_cntl->inst, panel_cntl->pwrseq_inst);
+ } else {
+- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst);
++ dmub_abm_set_pipe(abm,
++ otg_inst,
++ SET_ABM_PIPE_IMMEDIATELY_DISABLE,
++ panel_cntl->inst,
++ panel_cntl->pwrseq_inst);
+ }
+ panel_cntl->funcs->store_backlight_level(panel_cntl);
+ }
+@@ -201,21 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
+ {
+ struct abm *abm = pipe_ctx->stream_res.abm;
+- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
++ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
++ uint32_t otg_inst;
++
++ if (!abm && !tg && !panel_cntl)
++ return;
++
++ otg_inst = tg->inst;
+
+ if (dmcu) {
+ dce110_set_pipe(pipe_ctx);
+ return;
+ }
+
+- if (abm && panel_cntl) {
+- if (abm->funcs && abm->funcs->set_pipe_ex) {
+- abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+- } else {
+- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+- }
++ if (abm->funcs && abm->funcs->set_pipe_ex) {
++ abm->funcs->set_pipe_ex(abm,
++ otg_inst,
++ SET_ABM_PIPE_NORMAL,
++ panel_cntl->inst,
++ panel_cntl->pwrseq_inst);
++ } else {
++ dmub_abm_set_pipe(abm, otg_inst,
++ SET_ABM_PIPE_NORMAL,
++ panel_cntl->inst,
++ panel_cntl->pwrseq_inst);
+ }
+ }
+
+@@ -225,26 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ {
+ struct dc_context *dc = pipe_ctx->stream->ctx;
+ struct abm *abm = pipe_ctx->stream_res.abm;
++ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
++ uint32_t otg_inst;
++
++ if (!abm && !tg && !panel_cntl)
++ return false;
++
++ otg_inst = tg->inst;
+
+ if (dc->dc->res_pool->dmcu) {
+ dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
+ return true;
+ }
+
+- if (abm != NULL) {
+- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+-
+- if (abm && panel_cntl) {
+- if (abm->funcs && abm->funcs->set_pipe_ex) {
+- abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+- } else {
+- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+- }
+- }
++ if (abm->funcs && abm->funcs->set_pipe_ex) {
++ abm->funcs->set_pipe_ex(abm,
++ otg_inst,
++ SET_ABM_PIPE_NORMAL,
++ panel_cntl->inst,
++ panel_cntl->pwrseq_inst);
++ } else {
++ dmub_abm_set_pipe(abm,
++ otg_inst,
++ SET_ABM_PIPE_NORMAL,
++ panel_cntl->inst,
++ panel_cntl->pwrseq_inst);
+ }
+
+- if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
++ if (abm->funcs && abm->funcs->set_backlight_level_pwm)
+ abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
+ frame_ramp, 0, panel_cntl->inst);
+ else
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+index e0df9b0065f9c0..62c02adae7e76e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+@@ -178,6 +178,8 @@ bool cm3_helper_translate_curve_to_hw_format(
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
++ if (i >= TRANSFER_FUNC_POINTS)
++ return false;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+@@ -355,6 +357,8 @@ bool cm3_helper_translate_curve_to_degamma_hw_format(
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
++ if (i >= TRANSFER_FUNC_POINTS)
++ return false;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 255713ec29bb0f..d59af329d0009e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -214,7 +214,11 @@ bool dcn30_set_output_transfer_func(struct dc *dc,
+ }
+ }
+
+- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
++ if (mpc->funcs->set_output_gamma)
++ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
++ else
++ DC_LOG_ERROR("%s: set_output_gamma function pointer is NULL.\n", __func__);
++
+ return ret;
+ }
+
+@@ -619,10 +623,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+ if (pipe_ctx == NULL)
+ return;
+
+- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
++ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
+ pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
+ pipe_ctx->stream_res.stream_enc,
+ enable);
++
++ /* Wait for two frame to make sure AV mute is sent out */
++ if (enable) {
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++ }
++ }
+ }
+
+ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+index 79d6697d13b678..9485fda890cd71 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+@@ -996,7 +996,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
+ vpg = dcn301_vpg_create(ctx, vpg_inst);
+ afmt = dcn301_afmt_create(ctx, afmt_inst);
+
+- if (!enc1 || !vpg || !afmt) {
++ if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+index 5b7ad38f85e08f..65e45a0b4ff343 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+@@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
+ x),
+ 25));
+
++ // If y rounds up to integer, carry it over to x.
++ if (y >> 25) {
++ x += 1;
++ y = 0;
++ }
++
+ switch (stream_encoder_inst) {
+ case 0:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+index 2a7f47642a4479..22da2007601ee9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+@@ -523,7 +523,8 @@ static void dcn31_reset_back_end_for_pipe(
+ if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
+ pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+- pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
++ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
++ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
+
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+index 217acd4e292a30..d849b1eaa4a5c3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+@@ -50,7 +50,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub
+ cmd->panel_cntl.header.type = DMUB_CMD__PANEL_CNTL;
+ cmd->panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO;
+ cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
+- cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst;
++ cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
+
+ return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ }
+@@ -78,7 +78,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+ cmd.panel_cntl.header.type = DMUB_CMD__PANEL_CNTL;
+ cmd.panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_HW_INIT;
+ cmd.panel_cntl.header.payload_bytes = sizeof(cmd.panel_cntl.data);
+- cmd.panel_cntl.data.inst = dcn31_panel_cntl->base.inst;
++ cmd.panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
+ cmd.panel_cntl.data.bl_pwm_cntl = panel_cntl->stored_backlight_registers.BL_PWM_CNTL;
+ cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL;
+ cmd.panel_cntl.data.bl_pwm_ref_div1 =
+@@ -157,4 +157,5 @@ void dcn31_panel_cntl_construct(
+ dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
+ dcn31_panel_cntl->base.ctx = init_data->ctx;
+ dcn31_panel_cntl->base.inst = init_data->inst;
++ dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+index 4d2820ffe4682f..33a8626bda7359 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+@@ -476,7 +476,8 @@ void dcn314_disable_link_output(struct dc_link *link,
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+- link->dc->hwss.edp_backlight_control)
++ link->dc->hwss.edp_backlight_control &&
++ !link->skip_implict_edp_power_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 004beed9bd444c..3e65e683db0acf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -869,7 +869,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
+- .minimum_z8_residency_time = 2000,
++ .minimum_z8_residency_time = 2100,
+ .psr_skip_crtc_disable = true,
+ .replay_skip_crtc_disabled = true,
+ .disable_dmcu = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index 680e7fa8d18abc..650e1598bddcb1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -77,6 +77,9 @@ void dcn32_dsc_pg_control(
+ if (hws->ctx->dc->debug.disable_dsc_power_gate)
+ return;
+
++ if (!hws->ctx->dc->debug.enable_double_buffered_dsc_pg_support)
++ return;
++
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
+@@ -214,7 +217,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
+ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
+ {
+ int i;
+- uint8_t num_ways = 0;
++ uint32_t num_ways = 0;
+ uint32_t mall_ss_size_bytes = 0;
+
+ mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
+@@ -244,7 +247,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
+ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ {
+ union dmub_rb_cmd cmd;
+- uint8_t ways, i;
++ uint8_t i;
++ uint32_t ways;
+ int j;
+ bool mall_ss_unsupported = false;
+ struct dc_plane_state *plane = NULL;
+@@ -304,7 +308,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
+ cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
+ cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
+- cmd.cab.cab_alloc_ways = ways;
++ cmd.cab.cab_alloc_ways = (uint8_t)ways;
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+@@ -482,8 +486,7 @@ bool dcn32_set_mcm_luts(
+ if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->blend_tf->pwl;
+ else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+- plane_state->blend_tf,
++ cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ lut_params = &dpp_base->regamma_params;
+ }
+@@ -497,8 +500,7 @@ bool dcn32_set_mcm_luts(
+ else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ // TODO: dpp_base replace
+ ASSERT(false);
+- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+- plane_state->in_shaper_func,
++ cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ lut_params = &dpp_base->shaper_params;
+ }
+@@ -1573,3 +1575,101 @@ void dcn32_init_blank(
+ if (opp)
+ hws->funcs.wait_for_blank_complete(opp);
+ }
++
++void dcn32_blank_phantom(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height)
++{
++ struct dce_hwseq *hws = dc->hwseq;
++ enum dc_color_space color_space;
++ struct tg_color black_color = {0};
++ struct output_pixel_processor *opp = NULL;
++ uint32_t num_opps, opp_id_src0, opp_id_src1;
++ uint32_t otg_active_width, otg_active_height;
++ uint32_t i;
++
++ /* program opp dpg blank color */
++ color_space = COLOR_SPACE_SRGB;
++ color_space_to_black_color(dc, color_space, &black_color);
++
++ otg_active_width = width;
++ otg_active_height = height;
++
++ /* get the OPTC source */
++ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
++ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
++
++ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
++ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
++ opp = dc->res_pool->opps[i];
++ break;
++ }
++ }
++
++ if (opp && opp->funcs->opp_set_disp_pattern_generator)
++ opp->funcs->opp_set_disp_pattern_generator(
++ opp,
++ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
++ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
++ COLOR_DEPTH_UNDEFINED,
++ &black_color,
++ otg_active_width,
++ otg_active_height,
++ 0);
++
++ if (tg->funcs->is_tg_enabled(tg))
++ hws->funcs.wait_for_blank_complete(opp);
++}
++
++bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx)
++{
++ int i;
++ const struct pipe_ctx *cur_pipe, *new_pipe;
++ bool is_seamless = true;
++
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ cur_pipe = &cur_ctx->res_ctx.pipe_ctx[i];
++ new_pipe = &new_ctx->res_ctx.pipe_ctx[i];
++
++ if (resource_is_pipe_type(cur_pipe, FREE_PIPE) ||
++ resource_is_pipe_type(new_pipe, FREE_PIPE))
++ /* adding or removing free pipes is always seamless */
++ continue;
++ else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) {
++ if (resource_is_pipe_type(new_pipe, OTG_MASTER))
++ if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id)
++ /* OTG master with the same stream is seamless */
++ continue;
++ } else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) {
++ if (resource_is_pipe_type(new_pipe, OPP_HEAD)) {
++ if (cur_pipe->stream_res.tg == new_pipe->stream_res.tg)
++ /*
++ * OPP heads sharing the same timing
++ * generator is seamless
++ */
++ continue;
++ }
++ } else if (resource_is_pipe_type(cur_pipe, DPP_PIPE)) {
++ if (resource_is_pipe_type(new_pipe, DPP_PIPE)) {
++ if (cur_pipe->stream_res.opp == new_pipe->stream_res.opp)
++ /*
++ * DPP pipes sharing the same OPP head is
++ * seamless
++ */
++ continue;
++ }
++ }
++
++ /*
++ * This pipe's transition doesn't fall under any seamless
++ * conditions
++ */
++ is_seamless = false;
++ break;
++ }
++
++ return is_seamless;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+index 2d2628f31bed7d..9992e40acd217b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+@@ -115,4 +115,13 @@ void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+
++void dcn32_blank_phantom(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height);
++
++bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx);
++
+ #endif /* __DC_HWSS_DCN32_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+index c7417147dff19b..1edadff39a5eff 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+@@ -115,6 +115,8 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
+ .update_phantom_vp_position = dcn32_update_phantom_vp_position,
+ .update_dsc_pg = dcn32_update_dsc_pg,
+ .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
++ .blank_phantom = dcn32_blank_phantom,
++ .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
+ };
+
+ static const struct hwseq_private_funcs dcn32_private_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+index 8abb94f60078fc..058dee76054ea7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+@@ -142,6 +142,16 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
++ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
++ OPTC_SEG0_SRC_SEL, 0xf,
++ OPTC_SEG1_SRC_SEL, 0xf,
++ OPTC_SEG2_SRC_SEL, 0xf,
++ OPTC_SEG3_SRC_SEL, 0xf,
++ OPTC_NUM_OF_INPUT_SEGMENT, 0);
++
++ REG_UPDATE(OPTC_MEMORY_CONFIG,
++ OPTC_MEM_SEL, 0);
++
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+@@ -174,6 +184,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
+ {
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
++ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
++ OPTC_SEG0_SRC_SEL, 0xf,
++ OPTC_SEG1_SRC_SEL, 0xf,
++ OPTC_SEG2_SRC_SEL, 0xf,
++ OPTC_SEG3_SRC_SEL, 0xf,
++ OPTC_NUM_OF_INPUT_SEGMENT, 0);
++
+ REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
+ }
+
+@@ -219,9 +236,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+-
+- // Setup manual flow control for EOF via TRIG_A
+- optc->funcs->setup_manual_trigger(optc);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+index 8d73cceb485bf4..aa4c64eec7b3d6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+@@ -1756,6 +1756,9 @@ static bool dcn321_resource_construct(
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
++ /* Use pipe context based otg sync logic */
++ dc->config.use_pipe_ctx_sync_logic = true;
++
+ dc->config.dc_mode_clk_limit_support = true;
+ /* read VBIOS LTTPR caps */
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+index 77cf5545c94cc8..0ba9a7997d561c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+@@ -61,18 +61,22 @@ endif
+ endif
+
+ ifneq ($(CONFIG_FRAME_WARN),0)
++ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
++frame_warn_flag := -Wframe-larger-than=3072
++else
+ frame_warn_flag := -Wframe-larger-than=2048
+ endif
++endif
+
+ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
+ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
+-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
++CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
+-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
++CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
+-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
++CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
+ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+index 50b0434354f8f5..c08169de3660c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+@@ -1453,10 +1453,9 @@ void dcn_bw_update_from_pplib_fclks(
+ ASSERT(fclks->num_levels);
+
+ vmin0p65_idx = 0;
+- vmid0p72_idx = fclks->num_levels -
+- (fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+- vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+- vmax0p9_idx = fclks->num_levels - 1;
++ vmid0p72_idx = fclks->num_levels > 2 ? fclks->num_levels - 3 : 0;
++ vnom0p8_idx = fclks->num_levels > 1 ? fclks->num_levels - 2 : 0;
++ vmax0p9_idx = fclks->num_levels > 0 ? fclks->num_levels - 1 : 0;
+
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+index 2cbdd75429ffd6..6e669a2c5b2d44 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+@@ -36,7 +36,7 @@
+ * Define the maximum amount of states supported by the ASIC. Every ASIC has a
+ * specific number of states; this macro defines the maximum number of states.
+ */
+-#define DC__VOLTAGE_STATES 20
++#define DC__VOLTAGE_STATES 40
+ #define DC__NUM_DPP__4 1
+ #define DC__NUM_DPP__0_PRESENT 1
+ #define DC__NUM_DPP__1_PRESENT 1
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 5805fb02af14e3..8a5a038fd85578 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -438,7 +438,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+ .use_urgent_burst_bw = 0
+ };
+
+-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
++struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = {
++ .clock_limits = {
++ {
++ .state = 0,
++ .dcfclk_mhz = 560.0,
++ .fabricclk_mhz = 560.0,
++ .dispclk_mhz = 513.0,
++ .dppclk_mhz = 513.0,
++ .phyclk_mhz = 540.0,
++ .socclk_mhz = 560.0,
++ .dscclk_mhz = 171.0,
++ .dram_speed_mts = 1069.0,
++ },
++ {
++ .state = 1,
++ .dcfclk_mhz = 694.0,
++ .fabricclk_mhz = 694.0,
++ .dispclk_mhz = 642.0,
++ .dppclk_mhz = 642.0,
++ .phyclk_mhz = 600.0,
++ .socclk_mhz = 694.0,
++ .dscclk_mhz = 214.0,
++ .dram_speed_mts = 1324.0,
++ },
++ {
++ .state = 2,
++ .dcfclk_mhz = 875.0,
++ .fabricclk_mhz = 875.0,
++ .dispclk_mhz = 734.0,
++ .dppclk_mhz = 734.0,
++ .phyclk_mhz = 810.0,
++ .socclk_mhz = 875.0,
++ .dscclk_mhz = 245.0,
++ .dram_speed_mts = 1670.0,
++ },
++ {
++ .state = 3,
++ .dcfclk_mhz = 1000.0,
++ .fabricclk_mhz = 1000.0,
++ .dispclk_mhz = 1100.0,
++ .dppclk_mhz = 1100.0,
++ .phyclk_mhz = 810.0,
++ .socclk_mhz = 1000.0,
++ .dscclk_mhz = 367.0,
++ .dram_speed_mts = 2000.0,
++ },
++ {
++ .state = 4,
++ .dcfclk_mhz = 1200.0,
++ .fabricclk_mhz = 1200.0,
++ .dispclk_mhz = 1284.0,
++ .dppclk_mhz = 1284.0,
++ .phyclk_mhz = 810.0,
++ .socclk_mhz = 1200.0,
++ .dscclk_mhz = 428.0,
++ .dram_speed_mts = 2000.0,
++ },
++ {
++ .state = 5,
++ .dcfclk_mhz = 1200.0,
++ .fabricclk_mhz = 1200.0,
++ .dispclk_mhz = 1284.0,
++ .dppclk_mhz = 1284.0,
++ .phyclk_mhz = 810.0,
++ .socclk_mhz = 1200.0,
++ .dscclk_mhz = 428.0,
++ .dram_speed_mts = 2000.0,
++ },
++ },
++
++ .num_states = 5,
++ .sr_exit_time_us = 1.9,
++ .sr_enter_plus_exit_time_us = 4.4,
++ .urgent_latency_us = 3.0,
++ .urgent_latency_pixel_data_only_us = 4.0,
++ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
++ .urgent_latency_vm_data_only_us = 4.0,
++ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
++ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
++ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
++ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
++ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
++ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
++ .max_avg_sdp_bw_use_normal_percent = 40.0,
++ .max_avg_dram_bw_use_normal_percent = 40.0,
++ .writeback_latency_us = 12.0,
++ .ideal_dram_bw_after_urgent_percent = 40.0,
++ .max_request_size_bytes = 256,
++ .dram_channel_width_bytes = 16,
++ .fabric_datapath_to_dcn_data_return_bytes = 64,
++ .dcn_downspread_percent = 0.5,
++ .downspread_percent = 0.5,
++ .dram_page_open_time_ns = 50.0,
++ .dram_rw_turnaround_time_ns = 17.5,
++ .dram_return_buffer_per_channel_bytes = 8192,
++ .round_trip_ping_latency_dcfclk_cycles = 131,
++ .urgent_out_of_order_return_per_channel_bytes = 4096,
++ .channel_interleave_bytes = 256,
++ .num_banks = 8,
++ .num_chans = 16,
++ .vmm_page_size_bytes = 4096,
++ .dram_clock_change_latency_us = 45.0,
++ .writeback_dram_clock_change_latency_us = 23.0,
++ .return_bus_width_bytes = 64,
++ .dispclk_dppclk_vco_speed_mhz = 3850,
++ .xfc_bus_transport_time_us = 20,
++ .xfc_xbuf_latency_tolerance_us = 50,
++ .use_urgent_burst_bw = 0,
++};
+
+ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
+ .odm_capable = 1,
+@@ -948,10 +1056,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ {
+ int plane_count;
+ int i;
+- unsigned int min_dst_y_next_start_us;
+
+ plane_count = 0;
+- min_dst_y_next_start_us = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+@@ -973,26 +1079,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ struct dc_stream_status *stream_status = &context->stream_status[0];
+- struct dc_stream_state *current_stream = context->streams[0];
+ int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+ bool is_pwrseq0 = link->link_index == 0;
+- bool isFreesyncVideo;
+-
+- isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
+- isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
+- min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
+- break;
+- }
+- }
+
+ /* Don't support multi-plane configurations */
+ if (stream_status->plane_count > 1)
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+
+- if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
++ if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+ else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+index 0fc9f3e3ffaefd..f603486af6e306 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+@@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+
+ static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+ {
+- unsigned int ret_val = 0;
++ unsigned int ret_val = 1;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+index 618f4b682ab1b1..9f28e4d3c664c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+@@ -53,7 +53,7 @@ static void calculate_ttu_cursor(
+
+ static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+ {
+- unsigned int ret_val = 0;
++ unsigned int ret_val = 1;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+index e2bcd205aa936f..8da97a96b1ceb9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+@@ -304,6 +304,16 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ }
+
++ /* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
++ * MAX_NUM_DPM_LVL is 8.
++ * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
++ * DC__VOLTAGE_STATES is 40.
++ */
++ if (num_states > MAX_NUM_DPM_LVL) {
++ ASSERT(0);
++ return;
++ }
++
+ dcn3_02_soc.num_states = num_states;
+ for (i = 0; i < dcn3_02_soc.num_states; i++) {
+ dcn3_02_soc.clock_limits[i].state = i;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+index 3eb3a021ab7d72..c283780ad0621a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+@@ -299,6 +299,16 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ }
+
++ /* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
++ * MAX_NUM_DPM_LVL is 8.
++ * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
++ * DC__VOLTAGE_STATES is 40.
++ */
++ if (num_states > MAX_NUM_DPM_LVL) {
++ ASSERT(0);
++ return;
++ }
++
+ dcn3_03_soc.num_states = num_states;
+ for (i = 0; i < dcn3_03_soc.num_states; i++) {
+ dcn3_03_soc.clock_limits[i].state = i;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+index deb6d162a2d5c0..7307b7b8d8ad75 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
++ .dispclk_dppclk_vco_speed_mhz = 2400.0,
+ .num_chans = 4,
+ .dummy_pstate_latency_us = 10.0
+ };
+@@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
++ .dispclk_dppclk_vco_speed_mhz = 2500.0,
+ };
+
+ void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index 711d4085b33b8f..3d82cbef12740c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -1964,6 +1964,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ int i, pipe_idx, vlevel_temp = 0;
+ double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
++ double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
+ double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+ dm_dram_clock_change_unsupported;
+@@ -2151,7 +2152,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ }
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+- min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
++ min_dram_speed_mts = dram_speed_from_validation;
+ min_dram_speed_mts_margin = 160;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+@@ -2884,6 +2885,16 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ }
+
++ /* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
++ * MAX_NUM_DPM_LVL is 8.
++ * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
++ * DC__VOLTAGE_STATES is 40.
++ */
++ if (num_states > MAX_NUM_DPM_LVL) {
++ ASSERT(0);
++ return;
++ }
++
+ dcn3_2_soc.num_states = num_states;
+ for (i = 0; i < dcn3_2_soc.num_states; i++) {
+ dcn3_2_soc.clock_limits[i].state = i;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index cbdfb762c10c58..0782a34689a00f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -813,6 +813,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
+ v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
++ mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
++
+ /* Output */
+ &v->DSTXAfterScaler[k],
+ &v->DSTYAfterScaler[k],
+@@ -3317,6 +3319,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ v->SwathHeightCThisState[k], v->TWait,
+ (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
++ mode_lib->vba.PrefetchModePerState[i][j] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
+
+ /* Output */
+ &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
+@@ -3361,6 +3364,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ &mode_lib->vba.UrgentBurstFactorLumaPre[k],
+ &mode_lib->vba.UrgentBurstFactorChromaPre[k],
+ &mode_lib->vba.NotUrgentLatencyHidingPre[k]);
++
++ v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] /
++ 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k];
+ }
+
+ {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+index ecea008f19d3aa..208b89d13d3f6d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+@@ -3423,6 +3423,7 @@ bool dml32_CalculatePrefetchSchedule(
+ unsigned int SwathHeightC,
+ double TWait,
+ double TPreReq,
++ bool ExtendPrefetchIfPossible,
+ /* Output */
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+@@ -3892,12 +3893,32 @@ bool dml32_CalculatePrefetchSchedule(
+ /* Clamp to oto for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_oto;
+ } else {
+- *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+- TimeForFetchingMetaPTE = Tvm_equ;
+- TimeForFetchingRowInVBlank = Tr0_equ;
+- *PrefetchBandwidth = prefetch_bw_equ;
+- /* Clamp to equ for bandwidth calculation */
+- LinesForPrefetchBandwidth = dst_y_prefetch_equ;
++ /* For mode programming we want to extend the prefetch as much as possible
++ * (up to oto, or as long as we can for equ) if we're not already applying
++ * the 60us prefetch requirement. This is to avoid intermittent underflow
++ * issues during prefetch.
++ *
++ * The prefetch extension is applied under the following scenarios:
++ * 1. We're in prefetch mode > 0 (i.e. we don't support MCLK switch in blank)
++ * 2. We're using subvp or drr methods of p-state switch, in which case we
++ * we don't care if prefetch takes up more of the blanking time
++ *
++ * Mode programming typically chooses the smallest prefetch time possible
++ * (i.e. highest bandwidth during prefetch) presumably to create margin between
++ * p-states / c-states that happen in vblank and prefetch. Therefore we only
++ * apply this prefetch extension when p-state in vblank is not required (UCLK
++ * p-states take up the most vblank time).
++ */
++ if (ExtendPrefetchIfPossible && TPreReq == 0 && VStartup < MaxVStartup) {
++ MyError = true;
++ } else {
++ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
++ TimeForFetchingMetaPTE = Tvm_equ;
++ TimeForFetchingRowInVBlank = Tr0_equ;
++ *PrefetchBandwidth = prefetch_bw_equ;
++ /* Clamp to equ for bandwidth calculation */
++ LinesForPrefetchBandwidth = dst_y_prefetch_equ;
++ }
+ }
+
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+index 592d174df6c629..5d34735df83db1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+@@ -747,6 +747,7 @@ bool dml32_CalculatePrefetchSchedule(
+ unsigned int SwathHeightC,
+ double TWait,
+ double TPreReq,
++ bool ExtendPrefetchIfPossible,
+ /* Output */
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+index b26fcf86014c70..ae2196c36f218b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+@@ -789,6 +789,16 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ }
+
++ /* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
++ * MAX_NUM_DPM_LVL is 8.
++ * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
++ * DC__VOLTAGE_STATES is 40.
++ */
++ if (num_states > MAX_NUM_DPM_LVL) {
++ ASSERT(0);
++ return;
++ }
++
+ dcn3_21_soc.num_states = num_states;
+ for (i = 0; i < dcn3_21_soc.num_states; i++) {
+ dcn3_21_soc.clock_limits[i].state = i;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+index 9a3ded31119529..85453bbb4f9b13 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+@@ -1099,8 +1099,13 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
+
+ // Total Available Pipes Support Check
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+- total_pipes += mode_lib->vba.DPPPerPlane[k];
+ pipe_idx = get_pipe_idx(mode_lib, k);
++ if (pipe_idx == -1) {
++ ASSERT(0);
++ continue; // skip inactive planes
++ }
++ total_pipes += mode_lib->vba.DPPPerPlane[k];
++
+ if (mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz > 0.0)
+ mode_lib->vba.DPPCLK[k] = mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz;
+ else
+diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+index 3966845c769453..9edc9b0e3f082d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
++++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+@@ -861,7 +861,7 @@ static bool setup_dsc_config(
+
+ memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
+
+- dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy);
++ dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy, link_encoding);
+ pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right;
+ pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+
+@@ -1033,7 +1033,12 @@ static bool setup_dsc_config(
+ if (!is_dsc_possible)
+ goto done;
+
+- dsc_cfg->num_slices_v = pic_height/slice_height;
++ if (slice_height > 0) {
++ dsc_cfg->num_slices_v = pic_height / slice_height;
++ } else {
++ is_dsc_possible = false;
++ goto done;
++ }
+
+ if (target_bandwidth_kbps > 0) {
+ is_dsc_possible = decide_dsc_target_bpp_x16(
+@@ -1129,7 +1134,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
+
+ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
+ uint32_t max_target_bpp_limit_override_x16,
+- struct dc_dsc_policy *policy)
++ struct dc_dsc_policy *policy,
++ const enum dc_link_encoding_format link_encoding)
+ {
+ uint32_t bpc = 0;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+index 3ede6e02c3a786..f2037d78f71abd 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+@@ -56,7 +56,7 @@ struct gpio_service *dal_gpio_service_create(
+ struct dc_context *ctx)
+ {
+ struct gpio_service *service;
+- uint32_t index_of_id;
++ int32_t index_of_id;
+
+ service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
+
+@@ -112,7 +112,7 @@ struct gpio_service *dal_gpio_service_create(
+ return service;
+
+ failure_2:
+- while (index_of_id) {
++ while (index_of_id > 0) {
+ --index_of_id;
+ kfree(service->busyness[index_of_id]);
+ }
+@@ -239,6 +239,9 @@ static bool is_pin_busy(
+ enum gpio_id id,
+ uint32_t en)
+ {
++ if (id == GPIO_ID_UNKNOWN)
++ return false;
++
+ return service->busyness[id][en];
+ }
+
+@@ -247,6 +250,9 @@ static void set_pin_busy(
+ enum gpio_id id,
+ uint32_t en)
+ {
++ if (id == GPIO_ID_UNKNOWN)
++ return;
++
+ service->busyness[id][en] = true;
+ }
+
+@@ -255,6 +261,9 @@ static void set_pin_free(
+ enum gpio_id id,
+ uint32_t en)
+ {
++ if (id == GPIO_ID_UNKNOWN)
++ return;
++
+ service->busyness[id][en] = false;
+ }
+
+@@ -263,7 +272,7 @@ enum gpio_result dal_gpio_service_lock(
+ enum gpio_id id,
+ uint32_t en)
+ {
+- if (!service->busyness[id]) {
++ if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_OPEN_FAILED;
+ }
+@@ -277,7 +286,7 @@ enum gpio_result dal_gpio_service_unlock(
+ enum gpio_id id,
+ uint32_t en)
+ {
+- if (!service->busyness[id]) {
++ if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_OPEN_FAILED;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+index 25ffc052d53be9..df2cb5279ce51d 100644
+--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
++++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+@@ -130,13 +130,21 @@ static bool hdmi_14_process_transaction(
+ const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ struct i2c_command i2c_command;
+- uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
++ uint8_t offset;
+ struct i2c_payload i2c_payloads[] = {
+- { true, 0, 1, &offset },
++ { true, 0, 1, 0 },
+ /* actual hdcp payload, will be filled later, zeroed for now*/
+ { 0 }
+ };
+
++ if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
++ DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
++ return false;
++ }
++
++ offset = hdcp_i2c_offsets[message_info->msg_id];
++ i2c_payloads[0].data = &offset;
++
+ switch (message_info->link) {
+ case HDCP_LINK_SECONDARY:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+@@ -310,6 +318,11 @@ static bool dp_11_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+ {
++ if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
++ DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
++ return false;
++ }
++
+ return dpcd_access_helper(
+ link,
+ message_info->length,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+index 33db15d69f2337..9f521cf0fc5a2b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+@@ -64,7 +64,8 @@ struct abm_funcs {
+ bool (*set_pipe_ex)(struct abm *abm,
+ unsigned int otg_inst,
+ unsigned int option,
+- unsigned int panel_inst);
++ unsigned int panel_inst,
++ unsigned int pwrseq_inst);
+ };
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+index 24af9d80b9373a..248adc1705e357 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+@@ -56,12 +56,14 @@ struct panel_cntl_funcs {
+ struct panel_cntl_init_data {
+ struct dc_context *ctx;
+ uint32_t inst;
++ uint32_t pwrseq_inst;
+ };
+
+ struct panel_cntl {
+ const struct panel_cntl_funcs *funcs;
+ struct dc_context *ctx;
+ uint32_t inst;
++ uint32_t pwrseq_inst;
+ /* registers setting needs to be saved and restored at InitBacklight */
+ struct panel_cntl_backlight_registers stored_backlight_registers;
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 02ff99f7bec2ba..66e680902c95c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -388,6 +388,11 @@ struct hw_sequencer_funcs {
+ void (*z10_restore)(const struct dc *dc);
+ void (*z10_save_init)(struct dc *dc);
+
++ void (*blank_phantom)(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height);
++
+ void (*update_visual_confirm_color)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id);
+@@ -396,6 +401,9 @@ struct hw_sequencer_funcs {
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
+ void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
++ bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx);
+
+ void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
+ void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
+index e3e8c76c17cfac..d7685368140ab5 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
+@@ -295,6 +295,7 @@ struct link_service {
+ bool (*edp_receiver_ready_T9)(struct dc_link *link);
+ bool (*edp_receiver_ready_T7)(struct dc_link *link);
+ bool (*edp_power_alpm_dpcd_enable)(struct dc_link *link, bool enable);
++ void (*edp_set_panel_power)(struct dc_link *link, bool powerOn);
+
+
+ /*************************** DP CTS ************************************/
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+index 44649db5f3e32f..5646b7788f02e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+@@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
+ info->ext_id);
+ uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
+
+- struct timing_generator *tg =
+- dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
++ struct timing_generator *tg;
++
++ if (pipe_offset >= MAX_PIPES)
++ return false;
++
++ tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+
+ if (enable) {
+ if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+index e1257404357b11..cec68c5dba1322 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+@@ -28,6 +28,8 @@
+ #include "dccg.h"
+ #include "clk_mgr.h"
+
++#define DC_LOGGER link->ctx->logger
++
+ void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
+ struct fixed31_32 throttled_vcp_size)
+ {
+@@ -108,6 +110,11 @@ void enable_hpo_dp_link_output(struct dc_link *link,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+ {
++ if (!link_res->hpo_dp_link_enc) {
++ DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__);
++ return;
++ }
++
+ if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+ link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+ link->dc->res_pool->dccg,
+@@ -124,6 +131,11 @@ void disable_hpo_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+ {
++ if (!link_res->hpo_dp_link_enc) {
++ DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__);
++ return;
++ }
++
+ link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
+ link_res->hpo_dp_link_enc->funcs->disable_link_phy(
+ link_res->hpo_dp_link_enc, signal);
+diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+index b621b97711b617..a7f5b0f6272ce0 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+@@ -162,7 +162,12 @@ static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin
+ link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ link_res->hpo_dp_link_enc, tp_params);
+ }
++
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
++
++ // Give retimer extra time to lock before updating DP_TRAINING_PATTERN_SET to TPS1
++ if (tp_params->dp_phy_pattern == DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE)
++ msleep(30);
+ }
+
+ static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+index c9b6676eaf53b9..c7a9e286a5d4d3 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+@@ -876,7 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ (link->dpcd_sink_ext_caps.bits.oled == 1)) {
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+- set_cached_brightness_aux(link);
++ set_default_brightness_aux(link);
+ }
+
+ return true;
+@@ -1085,6 +1085,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ link->ctx->dc->debug.hdmi20_disable = true;
+
++ if (sink->edid_caps.panel_patch.remove_sink_ext_caps)
++ link->dpcd_sink_ext_caps.raw = 0;
++
+ if (dc_is_hdmi_signal(link->connector_signal))
+ read_scdc_caps(link->ddc, link->local_sink);
+
+@@ -1163,6 +1166,12 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
+ // Override dc_panel_config if system has specific settings
+ dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
++
++ //sink only can use supported link rate table, we are foreced to enable it
++ if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
++ link->panel_config.ilr.optimize_edp_link_rate = true;
++ if (edp_is_ilr_optimization_enabled(link))
++ link->reported_link_cap.link_rate = get_max_link_rate_from_ilr_table(link);
+ }
+
+ } else {
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+index 79aef205598b7f..4901e27f678bcf 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+@@ -873,11 +873,15 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
+ {
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+- DC_LOGGER_INIT(dsc->ctx->logger);
+
+- if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
++ if (!pipe_ctx->stream->timing.flags.DSC)
++ return false;
++
++ if (!dsc)
+ return false;
+
++ DC_LOGGER_INIT(dsc->ctx->logger);
++
+ if (enable) {
+ struct dsc_config dsc_cfg;
+ uint8_t dsc_packed_pps[128];
+@@ -1055,18 +1059,21 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
+ uint32_t denominator = 1;
+
+ /*
+- * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
++ * The 1.006 factor (margin 5300ppm + 300ppm ~ 0.6% as per spec) is not
++ * required when determining PBN/time slot utilization on the link between
++ * us and the branch, since that overhead is already accounted for in
++ * the get_pbn_per_slot function.
++ *
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+- * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+- * peak_kbps *= 8 convert to bytes
++ * peak_kbps /= (8 * 1000) convert to bytes
+ */
+
+- numerator = 64 * PEAK_FACTOR_X1000;
+- denominator = 54 * 8 * 1000 * 1000;
++ numerator = 64;
++ denominator = 54 * 8 * 1000;
+ kbps *= numerator;
+ peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
+
+@@ -1930,7 +1937,7 @@ static void disable_link_dp(struct dc_link *link,
+ dp_disable_link_phy(link, link_res, signal);
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+- if (!link->dc->config.edp_no_power_sequencing)
++ if (!link->skip_implict_edp_power_control)
+ link->dc->hwss.edp_power_control(link, false);
+ }
+
+@@ -2064,17 +2071,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
+ }
+ }
+
+- /*
+- * If the link is DP-over-USB4 do the following:
+- * - Train with fallback when enabling DPIA link. Conventional links are
++ /* Train with fallback when enabling DPIA link. Conventional links are
+ * trained with fallback during sink detection.
+- * - Allocate only what the stream needs for bw in Gbps. Inform the CM
+- * in case stream needs more or less bw from what has been allocated
+- * earlier at plug time.
+ */
+- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
++ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ do_fallback = true;
+- }
+
+ /*
+ * Temporary w/a to get DP2.0 link rates to work with SST.
+@@ -2140,8 +2141,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
+ if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+- set_cached_brightness_aux(link);
+-
++ set_default_brightness_aux(link);
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ edp_backlight_enable_aux(link, true);
+@@ -2219,7 +2219,7 @@ static enum dc_status enable_link(
+ * link settings. Need to call disable first before enabling at
+ * new link settings.
+ */
+- if (link->link_status.link_active && !stream->skip_edp_power_down)
++ if (link->link_status.link_active)
+ disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+
+ switch (pipe_ctx->stream->signal) {
+@@ -2257,6 +2257,32 @@ static enum dc_status enable_link(
+ return status;
+ }
+
++static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
++{
++ return true;
++}
++
++static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
++{
++ bool ret;
++
++ int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
++ dc_link_get_highest_encoding_format(stream->sink->link));
++
++ ret = allocate_usb4_bandwidth_for_stream(stream, bw);
++
++ return ret;
++}
++
++static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
++{
++ bool ret;
++
++ ret = allocate_usb4_bandwidth_for_stream(stream, 0);
++
++ return ret;
++}
++
+ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+ {
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+@@ -2293,6 +2319,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+ update_psp_stream_config(pipe_ctx, true);
+ dc->hwss.blank_stream(pipe_ctx);
+
++ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
++ deallocate_usb4_bandwidth(pipe_ctx->stream);
++
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ deallocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+@@ -2338,9 +2367,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+ dc->hwss.disable_stream(pipe_ctx);
+ } else {
+ dc->hwss.disable_stream(pipe_ctx);
+- if (!pipe_ctx->stream->skip_edp_power_down) {
+- disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+- }
++ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ }
+
+ if (pipe_ctx->stream->timing.flags.DSC) {
+@@ -2516,6 +2543,9 @@ void link_set_dpms_on(
+ }
+ }
+
++ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
++ allocate_usb4_bandwidth(pipe_ctx->stream);
++
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+index 0895742a310241..eb7c9f226af5cb 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+@@ -223,6 +223,7 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
+ link_srv->edp_receiver_ready_T9 = edp_receiver_ready_T9;
+ link_srv->edp_receiver_ready_T7 = edp_receiver_ready_T7;
+ link_srv->edp_power_alpm_dpcd_enable = edp_power_alpm_dpcd_enable;
++ link_srv->edp_set_panel_power = edp_set_panel_power;
+ }
+
+ /* link dp cts implements dp compliance test automation protocols and manual
+@@ -366,6 +367,27 @@ static enum transmitter translate_encoder_to_transmitter(
+ }
+ }
+
++static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
++{
++ uint8_t pwrseq_inst = 0xF;
++
++ switch (link->eng_id) {
++ case ENGINE_ID_DIGA:
++ pwrseq_inst = 0;
++ break;
++ case ENGINE_ID_DIGB:
++ pwrseq_inst = 1;
++ break;
++ default:
++ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
++ ASSERT(false);
++ break;
++ }
++
++ return pwrseq_inst;
++}
++
++
+ static void link_destruct(struct dc_link *link)
+ {
+ int i;
+@@ -381,7 +403,7 @@ static void link_destruct(struct dc_link *link)
+ if (link->panel_cntl)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+
+- if (link->link_enc) {
++ if (link->link_enc && !link->is_dig_mapping_flexible) {
+ /* Update link encoder resource tracking variables. These are used for
+ * the dynamic assignment of link encoders to streams. Virtual links
+ * are not assigned encoder resources on creation.
+@@ -593,24 +615,6 @@ static bool construct_phy(struct dc_link *link,
+ link->ddc_hw_inst =
+ dal_ddc_get_line(get_ddc_pin(link->ddc));
+
+-
+- if (link->dc->res_pool->funcs->panel_cntl_create &&
+- (link->link_id.id == CONNECTOR_ID_EDP ||
+- link->link_id.id == CONNECTOR_ID_LVDS)) {
+- panel_cntl_init_data.ctx = dc_ctx;
+- panel_cntl_init_data.inst =
+- panel_cntl_init_data.ctx->dc_edp_id_count;
+- link->panel_cntl =
+- link->dc->res_pool->funcs->panel_cntl_create(
+- &panel_cntl_init_data);
+- panel_cntl_init_data.ctx->dc_edp_id_count++;
+-
+- if (link->panel_cntl == NULL) {
+- DC_ERROR("Failed to create link panel_cntl!\n");
+- goto panel_cntl_create_fail;
+- }
+- }
+-
+ enc_init_data.ctx = dc_ctx;
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
+ &enc_init_data.encoder);
+@@ -625,14 +629,14 @@ static bool construct_phy(struct dc_link *link,
+ link->link_enc =
+ link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
+
+- DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+- DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
+-
+ if (!link->link_enc) {
+ DC_ERROR("Failed to create link encoder!\n");
+ goto link_enc_create_fail;
+ }
+
++ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
++ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
++
+ /* Update link encoder tracking variables. These are used for the dynamic
+ * assignment of link encoders to streams.
+ */
+@@ -641,6 +645,23 @@ static bool construct_phy(struct dc_link *link,
+ link->dc->res_pool->dig_link_enc_count++;
+
+ link->link_enc_hw_inst = link->link_enc->transmitter;
++
++ if (link->dc->res_pool->funcs->panel_cntl_create &&
++ (link->link_id.id == CONNECTOR_ID_EDP ||
++ link->link_id.id == CONNECTOR_ID_LVDS)) {
++ panel_cntl_init_data.ctx = dc_ctx;
++ panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
++ panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
++ link->panel_cntl =
++ link->dc->res_pool->funcs->panel_cntl_create(
++ &panel_cntl_init_data);
++ panel_cntl_init_data.ctx->dc_edp_id_count++;
++
++ if (link->panel_cntl == NULL) {
++ DC_ERROR("Failed to create link panel_cntl!\n");
++ goto panel_cntl_create_fail;
++ }
++ }
+ for (i = 0; i < 4; i++) {
+ if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
+ link->link_id, i,
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+index b45fda96eaf649..5b0bc7f6a188cc 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+@@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
+ return DC_OK;
+ }
+
++/*
++ * This function calculates the bandwidth required for the stream timing
++ * and aggregates the stream bandwidth for the respective dpia link
++ *
++ * @stream: pointer to the dc_stream_state struct instance
++ * @num_streams: number of streams to be validated
++ *
++ * return: true if validation is succeeded
++ */
+ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+ {
+- bool ret = true;
+- int bw_needed[MAX_DPIA_NUM];
+- struct dc_link *link[MAX_DPIA_NUM];
++ int bw_needed[MAX_DPIA_NUM] = {0};
++ struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
++ int num_dpias = 0;
++
++ for (unsigned int i = 0; i < num_streams; ++i) {
++ if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
++ /* new dpia sst stream, check whether it exceeds max dpia */
++ if (num_dpias >= MAX_DPIA_NUM)
++ return false;
+
+- if (!num_streams || num_streams > MAX_DPIA_NUM)
+- return ret;
++ dpia_link[num_dpias] = stream[i].link;
++ bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
++ dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
++ num_dpias++;
++ } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
++ uint8_t j = 0;
++ /* check whether its a known dpia link */
++ for (; j < num_dpias; ++j) {
++ if (dpia_link[j] == stream[i].link)
++ break;
++ }
++
++ if (j == num_dpias) {
++ /* new dpia mst stream, check whether it exceeds max dpia */
++ if (num_dpias >= MAX_DPIA_NUM)
++ return false;
++ else {
++ dpia_link[j] = stream[i].link;
++ num_dpias++;
++ }
++ }
++
++ bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
++ dc_link_get_highest_encoding_format(dpia_link[j]));
++ }
++ }
+
+- for (uint8_t i = 0; i < num_streams; ++i) {
++ /* Include dp overheads */
++ for (uint8_t i = 0; i < num_dpias; ++i) {
++ int dp_overhead = 0;
+
+- link[i] = stream[i].link;
+- bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+- dc_link_get_highest_encoding_format(link[i]));
++ dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
++ bw_needed[i] += dp_overhead;
+ }
+
+- ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+-
+- return ret;
++ return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+index 237e0ff955f3cc..3d589072fe307e 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -528,7 +528,7 @@ static bool decide_fallback_link_setting_max_bw_policy(
+ struct dc_link_settings *cur,
+ enum link_training_result training_result)
+ {
+- uint8_t cur_idx = 0, next_idx;
++ uint32_t cur_idx = 0, next_idx;
+ bool found = false;
+
+ if (training_result == LINK_TRAINING_ABORT)
+@@ -707,8 +707,7 @@ bool edp_decide_link_settings(struct dc_link *link,
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+- link->dpcd_caps.edp_supported_link_rates_count == 0) {
++ if (!edp_is_ilr_optimization_enabled(link)) {
+ *link_setting = link->verified_link_cap;
+ return true;
+ }
+@@ -772,8 +771,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+- if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+- link->dpcd_caps.edp_supported_link_rates_count == 0)) {
++ if (!edp_is_ilr_optimization_enabled(link)) {
+ /* for DSC enabled case, we search for minimum lane count */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+@@ -910,21 +908,17 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
+
+ memset(link_setting, 0, sizeof(*link_setting));
+
+- /* if preferred is specified through AMDDP, use it, if it's enough
+- * to drive the mode
+- */
+- if (link->preferred_link_setting.lane_count !=
+- LANE_COUNT_UNKNOWN &&
+- link->preferred_link_setting.link_rate !=
+- LINK_RATE_UNKNOWN) {
++ if (dc_is_dp_signal(stream->signal) &&
++ link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
++ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) {
++ /* if preferred is specified through AMDDP, use it, if it's enough
++ * to drive the mode
++ */
+ *link_setting = link->preferred_link_setting;
+- return true;
+- }
+-
+- /* MST doesn't perform link training for now
+- * TODO: add MST specific link training routine
+- */
+- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
++ } else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
++ /* MST doesn't perform link training for now
++ * TODO: add MST specific link training routine
++ */
+ decide_mst_link_settings(link, link_setting);
+ } else if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* enable edp link optimization for DSC eDP case */
+@@ -1586,9 +1580,17 @@ static bool retrieve_link_cap(struct dc_link *link)
+ return false;
+ }
+
+- if (dp_is_lttpr_present(link))
++ if (dp_is_lttpr_present(link)) {
+ configure_lttpr_mode_transparent(link);
+
++ // Echo TOTAL_LTTPR_CNT back downstream
++ core_link_write_dpcd(
++ link,
++ DP_TOTAL_LTTPR_CNT,
++ &link->dpcd_caps.lttpr_caps.phy_repeater_cnt,
++ sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
++ }
++
+ /* Read DP tunneling information. */
+ status = dpcd_get_tunneling_device_data(link);
+
+@@ -1938,9 +1940,7 @@ void detect_edp_sink_caps(struct dc_link *link)
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
+- (link->panel_config.ilr.optimize_edp_link_rate ||
+- link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
++ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13) {
+ // Read DPCD 00010h - 0001Fh 16 bytes at one shot
+ core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+@@ -1958,12 +1958,10 @@ void detect_edp_sink_caps(struct dc_link *link)
+ link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
+-
+- if (link->reported_link_cap.link_rate < link_rate)
+- link->reported_link_cap.link_rate = link_rate;
+ }
+ }
+ }
++
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP,
+ &backlight_adj_cap, sizeof(backlight_adj_cap));
+
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+index 7581023daa4789..5a965c26bf2095 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+@@ -50,15 +50,28 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
+ && tmp->hpd_status
+ && tmp->dpia_bw_alloc_config.bw_alloc_enabled);
+ }
++
+ static void reset_bw_alloc_struct(struct dc_link *link)
+ {
+ link->dpia_bw_alloc_config.bw_alloc_enabled = false;
+- link->dpia_bw_alloc_config.sink_verified_bw = 0;
+- link->dpia_bw_alloc_config.sink_max_bw = 0;
++ link->dpia_bw_alloc_config.link_verified_bw = 0;
++ link->dpia_bw_alloc_config.link_max_bw = 0;
++ link->dpia_bw_alloc_config.allocated_bw = 0;
+ link->dpia_bw_alloc_config.estimated_bw = 0;
+ link->dpia_bw_alloc_config.bw_granularity = 0;
++ link->dpia_bw_alloc_config.dp_overhead = 0;
+ link->dpia_bw_alloc_config.response_ready = false;
++ link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
++ link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
++ for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
++ link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
++ DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
+ }
++
++#define BW_GRANULARITY_0 4 // 0.25 Gbps
++#define BW_GRANULARITY_1 2 // 0.5 Gbps
++#define BW_GRANULARITY_2 1 // 1 Gbps
++
+ static uint8_t get_bw_granularity(struct dc_link *link)
+ {
+ uint8_t bw_granularity = 0;
+@@ -71,16 +84,20 @@ static uint8_t get_bw_granularity(struct dc_link *link)
+
+ switch (bw_granularity & 0x3) {
+ case 0:
+- bw_granularity = 4;
++ bw_granularity = BW_GRANULARITY_0;
+ break;
+ case 1:
++ bw_granularity = BW_GRANULARITY_1;
++ break;
++ case 2:
+ default:
+- bw_granularity = 2;
++ bw_granularity = BW_GRANULARITY_2;
+ break;
+ }
+
+ return bw_granularity;
+ }
++
+ static int get_estimated_bw(struct dc_link *link)
+ {
+ uint8_t bw_estimated_bw = 0;
+@@ -93,31 +110,33 @@ static int get_estimated_bw(struct dc_link *link)
+
+ return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ }
+-static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link)
++
++static int get_non_reduced_max_link_rate(struct dc_link *link)
+ {
+- if (bw_needed > 0)
+- *stream_allocated_bw += bw_needed;
++ uint8_t nrd_max_link_rate = 0;
+
+- return true;
++ core_link_read_dpcd(
++ link,
++ DP_TUNNELING_MAX_LINK_RATE,
++ &nrd_max_link_rate,
++ sizeof(uint8_t));
++
++ return nrd_max_link_rate;
+ }
+-static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link)
+-{
+- bool ret = false;
+
+- if (*stream_allocated_bw > 0) {
+- *stream_allocated_bw -= bw_to_dealloc;
+- ret = true;
+- } else {
+- //Do nothing for now
+- ret = true;
+- }
++static int get_non_reduced_max_lane_count(struct dc_link *link)
++{
++ uint8_t nrd_max_lane_count = 0;
+
+- // Unplug so reset values
+- if (!link->hpd_status)
+- reset_bw_alloc_struct(link);
++ core_link_read_dpcd(
++ link,
++ DP_TUNNELING_MAX_LANE_COUNT,
++ &nrd_max_lane_count,
++ sizeof(uint8_t));
+
+- return ret;
++ return nrd_max_lane_count;
+ }
++
+ /*
+ * Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
+ * granuality, Driver_ID, CM_Group, & populate the BW allocation structs
+@@ -125,10 +144,22 @@ static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, stru
+ */
+ static void init_usb4_bw_struct(struct dc_link *link)
+ {
+- // Init the known values
++ reset_bw_alloc_struct(link);
++
++ /* init the known values */
+ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
++ link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
++ link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
++
++ DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
++ __func__, link->dpia_bw_alloc_config.bw_granularity,
++ link->dpia_bw_alloc_config.estimated_bw);
++ DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
++ __func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
++ link->dpia_bw_alloc_config.nrd_max_lane_count);
+ }
++
+ static uint8_t get_lowest_dpia_index(struct dc_link *link)
+ {
+ const struct dc *dc_struct = link->dc;
+@@ -141,51 +172,66 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
+ dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ continue;
+
+- if (idx > dc_struct->links[i]->link_index)
++ if (idx > dc_struct->links[i]->link_index) {
+ idx = dc_struct->links[i]->link_index;
++ break;
++ }
+ }
+
+ return idx;
+ }
++
+ /*
+- * Get the Max Available BW or Max Estimated BW for each Host Router
++ * Get the maximum dp tunnel banwidth of host router
+ *
+- * @link: pointer to the dc_link struct instance
+- * @type: ESTIMATD BW or MAX AVAILABLE BW
++ * @dc: pointer to the dc struct instance
++ * @hr_index: host router index
+ *
+- * return: response_ready flag from dc_link struct
++ * return: host router maximum dp tunnel bandwidth
+ */
+-static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
++static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
+ {
+- const struct dc *dc_struct = link->dc;
+- uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
+- uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
+- struct dc_link *link_temp;
++ uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
++ uint8_t hr_index_temp = 0;
++ struct dc_link *link_dpia_primary, *link_dpia_secondary;
+ int total_bw = 0;
+- int i;
+
+- for (i = 0; i < MAX_PIPES * 2; ++i) {
+-
+- if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+- continue;
++ for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
+
+- link_temp = dc_struct->links[i];
+- if (!link_temp || !link_temp->hpd_status)
++ if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ continue;
+
+- idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
+-
+- if (idx_temp == idx) {
+-
+- if (type == HOST_ROUTER_BW_ESTIMATED)
+- total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
+- else if (type == HOST_ROUTER_BW_ALLOCATED)
+- total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
++ hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
++
++ if (hr_index_temp == hr_index) {
++ link_dpia_primary = dc->links[i];
++ link_dpia_secondary = dc->links[i + 1];
++
++ /**
++ * If BW allocation enabled on both DPIAs, then
++ * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
++ * otherwise HR BW = Estimated(bw alloc enabled dpia)
++ */
++ if ((link_dpia_primary->hpd_status &&
++ link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
++ (link_dpia_secondary->hpd_status &&
++ link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
++ total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
++ link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
++ } else if (link_dpia_primary->hpd_status &&
++ link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
++ total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
++ } else if (link_dpia_secondary->hpd_status &&
++ link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
++ total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
++ }
++ break;
+ }
+ }
+
+ return total_bw;
+ }
++
+ /*
+ * Cleanup function for when the dpia is unplugged to reset struct
+ * and perform any required clean up
+@@ -194,42 +240,49 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
+ *
+ * return: none
+ */
+-static bool dpia_bw_alloc_unplug(struct dc_link *link)
++static void dpia_bw_alloc_unplug(struct dc_link *link)
+ {
+- if (!link)
+- return true;
+-
+- return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+- link->dpia_bw_alloc_config.sink_allocated_bw, link);
++ if (link) {
++ DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
++ __func__, link->link_index);
++ reset_bw_alloc_struct(link);
++ }
+ }
++
+ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+ {
+ uint8_t requested_bw;
+ uint32_t temp;
+
+- // 1. Add check for this corner case #1
+- if (req_bw > link->dpia_bw_alloc_config.estimated_bw)
++ /* Error check whether request bw greater than allocated */
++ if (req_bw > link->dpia_bw_alloc_config.estimated_bw) {
++ DC_LOG_ERROR("%s: Request bw greater than estimated bw for link(%d)\n",
++ __func__, link->link_index);
+ req_bw = link->dpia_bw_alloc_config.estimated_bw;
++ }
+
+ temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
+ requested_bw = temp / Kbps_TO_Gbps;
+
+- // Always make sure to add more to account for floating points
++ /* Always make sure to add more to account for floating points */
+ if (temp % Kbps_TO_Gbps)
+ ++requested_bw;
+
+- // 2. Add check for this corner case #2
++ /* Error check whether requested and allocated are equal */
+ req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+- if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw)
+- return;
++ if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
++ DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
++ __func__, link->link_index);
++ }
+
+- if (core_link_write_dpcd(
++ link->dpia_bw_alloc_config.response_ready = false; // Reset flag
++ core_link_write_dpcd(
+ link,
+ REQUESTED_BW,
+ &requested_bw,
+- sizeof(uint8_t)) == DC_OK)
+- link->dpia_bw_alloc_config.response_ready = false; // Reset flag
++ sizeof(uint8_t));
+ }
++
+ /*
+ * Return the response_ready flag from dc_link struct
+ *
+@@ -241,6 +294,7 @@ static bool get_cm_response_ready_flag(struct dc_link *link)
+ {
+ return link->dpia_bw_alloc_config.response_ready;
+ }
++
+ // ------------------------------------------------------------------
+ // PUBLIC FUNCTIONS
+ // ------------------------------------------------------------------
+@@ -277,27 +331,35 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+ DPTX_BW_ALLOCATION_MODE_CONTROL,
+ &response,
+ sizeof(uint8_t)) != DC_OK) {
+- DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
+- __func__);
++ DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
++ __func__, link->link_index);
+ } else {
+ // SUCCESS Enabled DPtx BW Allocation Mode Support
+- link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+- DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n",
+- __func__);
++ DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
++ __func__, link->link_index);
+
+ ret = true;
+ init_usb4_bw_struct(link);
++ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
++
++ /*
++ * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
++ * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
++ * to make the CM to release preallocation and update estimated BW correctly for
++ * all DPIAs per host router
++ */
++ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ }
+ }
+
+ out:
+ return ret;
+ }
++
+ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result)
+ {
+ int bw_needed = 0;
+ int estimated = 0;
+- int host_router_total_estimated_bw = 0;
+
+ if (!get_bw_alloc_proceed_flag((link)))
+ return;
+@@ -306,14 +368,22 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
+
+ case DPIA_BW_REQ_FAILED:
+
+- DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
++ /*
++ * Ideally, we shouldn't run into this case as we always validate available
++ * bandwidth and request within that limit
++ */
++ estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
++
++ DC_LOG_ERROR("%s: BW REQ FAILURE for DP-TX Request for link(%d)\n",
++ __func__, link->link_index);
++ DC_LOG_ERROR("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
++ __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
+
+- // Update the new Estimated BW value updated by CM
+- link->dpia_bw_alloc_config.estimated_bw =
+- bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
++ /* Update the new Estimated BW value updated by CM */
++ link->dpia_bw_alloc_config.estimated_bw = estimated;
+
++ /* Allocate the previously requested bandwidth */
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
+- link->dpia_bw_alloc_config.response_ready = false;
+
+ /*
+ * If FAIL then it is either:
+@@ -326,68 +396,34 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
+
+ case DPIA_BW_REQ_SUCCESS:
+
+- DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
+-
+- // 1. SUCCESS 1st time before any Pruning is done
+- // 2. SUCCESS after prev. FAIL before any Pruning is done
+- // 3. SUCCESS after Pruning is done but before enabling link
+-
+ bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+- // 1.
+- if (!link->dpia_bw_alloc_config.sink_allocated_bw) {
+-
+- allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link);
+- link->dpia_bw_alloc_config.sink_verified_bw =
+- link->dpia_bw_alloc_config.sink_allocated_bw;
+-
+- // SUCCESS from first attempt
+- if (link->dpia_bw_alloc_config.sink_allocated_bw >
+- link->dpia_bw_alloc_config.sink_max_bw)
+- link->dpia_bw_alloc_config.sink_verified_bw =
+- link->dpia_bw_alloc_config.sink_max_bw;
+- }
+- // 3.
+- else if (link->dpia_bw_alloc_config.sink_allocated_bw) {
+-
+- // Find out how much do we need to de-alloc
+- if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed)
+- deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+- link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link);
+- else
+- allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+- bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
+- }
++ DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
++ __func__, link->link_index);
++ DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
++ __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
+
+- // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA
+- // => check if estimated_bw changed
++ link->dpia_bw_alloc_config.allocated_bw = bw_needed;
+
+ link->dpia_bw_alloc_config.response_ready = true;
+ break;
+
+ case DPIA_EST_BW_CHANGED:
+
+- DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
+-
+ estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+- host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
+
+- // 1. If due to unplug of other sink
+- if (estimated == host_router_total_estimated_bw) {
+- // First update the estimated & max_bw fields
+- if (link->dpia_bw_alloc_config.estimated_bw < estimated)
+- link->dpia_bw_alloc_config.estimated_bw = estimated;
+- }
+- // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw
+- else {
+- // We lost estimated bw usually due to plug event of other dpia
+- link->dpia_bw_alloc_config.estimated_bw = estimated;
+- }
++ DC_LOG_DEBUG("%s: ESTIMATED BW CHANGED for link(%d)\n",
++ __func__, link->link_index);
++ DC_LOG_DEBUG("%s: current estimated_bw(%d), new estimated_bw(%d)\n",
++ __func__, link->dpia_bw_alloc_config.estimated_bw, estimated);
++
++ link->dpia_bw_alloc_config.estimated_bw = estimated;
+ break;
+
+ case DPIA_BW_ALLOC_CAPS_CHANGED:
+
+- DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
++ DC_LOG_ERROR("%s: BW ALLOC CAPABILITY CHANGED to Disabled for link(%d)\n",
++ __func__, link->link_index);
+ link->dpia_bw_alloc_config.bw_alloc_enabled = false;
+ break;
+ }
+@@ -405,21 +441,21 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+ if (link->hpd_status && peak_bw > 0) {
+
+ // If DP over USB4 then we need to check BW allocation
+- link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
+- set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
++ link->dpia_bw_alloc_config.link_max_bw = peak_bw;
++ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
+
+ do {
+- if (!(timeout > 0))
++ if (timeout > 0)
+ timeout--;
+ else
+ break;
+- fsleep(10 * 1000);
++ msleep(10);
+ } while (!get_cm_response_ready_flag(link));
+
+ if (!timeout)
+ ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+- else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+- ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
++ else if (link->dpia_bw_alloc_config.allocated_bw > 0)
++ ret = link->dpia_bw_alloc_config.allocated_bw;
+ }
+ //2. Cold Unplug
+ else if (!link->hpd_status)
+@@ -428,65 +464,102 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+ out:
+ return ret;
+ }
+-int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
++bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+ {
+- int ret = 0;
++ bool ret = false;
+ uint8_t timeout = 10;
+
++ DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
++ __func__, link->link_index, link->hpd_status,
++ link->dpia_bw_alloc_config.allocated_bw, req_bw);
++
+ if (!get_bw_alloc_proceed_flag(link))
+ goto out;
+
+- /*
+- * Sometimes stream uses same timing parameters as the already
+- * allocated max sink bw so no need to re-alloc
+- */
+- if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) {
+- set_usb4_req_bw_req(link, req_bw);
+- do {
+- if (!(timeout > 0))
+- timeout--;
+- else
+- break;
+- udelay(10 * 1000);
+- } while (!get_cm_response_ready_flag(link));
++ set_usb4_req_bw_req(link, req_bw);
++ do {
++ if (timeout > 0)
++ timeout--;
++ else
++ break;
++ msleep(10);
++ } while (!get_cm_response_ready_flag(link));
+
+- if (!timeout)
+- ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+- else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+- ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+- }
++ if (timeout)
++ ret = true;
+
+ out:
++ DC_LOG_DEBUG("%s: EXIT: timeout(%d), ret(%d)\n", __func__, timeout, ret);
+ return ret;
+ }
++
+ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
+ {
+ bool ret = true;
+- int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
+- uint8_t lowest_dpia_index = 0, dpia_index = 0;
+- uint8_t i;
++ int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
++ uint8_t lowest_dpia_index, i, hr_index;
+
+ if (!num_dpias || num_dpias > MAX_DPIA_NUM)
+ return ret;
+
+- //Get total Host Router BW & Validate against each Host Router max BW
++ lowest_dpia_index = get_lowest_dpia_index(link[0]);
++
++ /* get total Host Router BW with granularity for the given modes */
+ for (i = 0; i < num_dpias; ++i) {
++ int granularity_Gbps = 0;
++ int bw_granularity = 0;
+
+ if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
+ continue;
+
+- lowest_dpia_index = get_lowest_dpia_index(link[i]);
+ if (link[i]->link_index < lowest_dpia_index)
+ continue;
+
+- dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
+- bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
+- if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
++ granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
++ bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
++ ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
+
+- ret = false;
+- break;
++ hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
++ bw_needed_per_hr[hr_index] += bw_granularity;
++ }
++
++ /* validate against each Host Router max BW */
++ for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
++ if (bw_needed_per_hr[hr_index]) {
++ host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
++ if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
++ ret = false;
++ break;
++ }
+ }
+ }
+
+ return ret;
+ }
++
++int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
++{
++ int dp_overhead = 0, link_mst_overhead = 0;
++
++ if (!get_bw_alloc_proceed_flag((link)))
++ return dp_overhead;
++
++ /* if its mst link, add MTPH overhead */
++ if ((link->type == dc_connection_mst_branch) &&
++ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
++ /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
++ * MST overhead is 1/64 of link bandwidth (excluding any overhead)
++ */
++ const struct dc_link_settings *link_cap =
++ dc_link_get_link_cap(link);
++ uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
++ (uint32_t)link_cap->lane_count *
++ LINK_RATE_REF_FREQ_IN_KHZ * 8;
++ link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
++ }
++
++ /* add all the overheads */
++ dp_overhead = link_mst_overhead;
++
++ return dp_overhead;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+index 7292690383ae1f..3b6d8494f9d5da 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+@@ -59,9 +59,9 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+ * @link: pointer to the dc_link struct instance
+ * @req_bw: Bw requested by the stream
+ *
+- * return: allocated bw else return 0
++ * return: true if allocated successfully
+ */
+-int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
++bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw);
+
+ /*
+ * Handle the USB4 BW Allocation related functionality here:
+@@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
+ */
+ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
+
++/*
++ * Obtain all the DP overheads in dp tunneling for the dpia link
++ *
++ * @link: pointer to the dc_link struct instance
++ *
++ * return: DP overheads in DP tunneling
++ */
++int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
++
+ #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+index b7abba55bc2fdf..9bde0c8bf914a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+@@ -73,7 +73,8 @@ void dp_disable_link_phy(struct dc_link *link,
+ {
+ struct dc *dc = link->ctx->dc;
+
+- if (!link->wa_flags.dp_keep_receiver_powered)
++ if (!link->wa_flags.dp_keep_receiver_powered &&
++ !link->skip_implict_edp_power_control)
+ dpcd_write_rx_power_ctrl(link, false);
+
+ dc->hwss.disable_link_output(link, link_res, signal);
+@@ -142,32 +143,25 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
++ if (link_enc->funcs->fec_set_ready == NULL)
++ return DC_NOT_SUPPORTED;
+
+- if (!dp_should_enable_fec(link))
+- return status;
+-
+- if (link_enc->funcs->fec_set_ready &&
+- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+- if (ready) {
+- fec_config = 1;
+- status = core_link_write_dpcd(link,
+- DP_FEC_CONFIGURATION,
+- &fec_config,
+- sizeof(fec_config));
+- if (status == DC_OK) {
+- link_enc->funcs->fec_set_ready(link_enc, true);
+- link->fec_state = dc_link_fec_ready;
+- } else {
+- link_enc->funcs->fec_set_ready(link_enc, false);
+- link->fec_state = dc_link_fec_not_ready;
+- dm_error("dpcd write failed to set fec_ready");
+- }
+- } else if (link->fec_state == dc_link_fec_ready) {
++ if (ready && dp_should_enable_fec(link)) {
++ fec_config = 1;
++
++ status = core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
++ &fec_config, sizeof(fec_config));
++
++ if (status == DC_OK) {
++ link_enc->funcs->fec_set_ready(link_enc, true);
++ link->fec_state = dc_link_fec_ready;
++ }
++ } else {
++ if (link->fec_state == dc_link_fec_ready) {
+ fec_config = 0;
+- status = core_link_write_dpcd(link,
+- DP_FEC_CONFIGURATION,
+- &fec_config,
+- sizeof(fec_config));
++ core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
++ &fec_config, sizeof(fec_config));
++
+ link_enc->funcs->fec_set_ready(link_enc, false);
+ link->fec_state = dc_link_fec_not_ready;
+ }
+@@ -182,14 +176,12 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+-
+- if (!dp_should_enable_fec(link))
++ if (link_enc->funcs->fec_set_enable == NULL)
+ return;
+
+- if (link_enc->funcs->fec_set_enable &&
+- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+- if (link->fec_state == dc_link_fec_ready && enable) {
+- /* Accord to DP spec, FEC enable sequence can first
++ if (enable && dp_should_enable_fec(link)) {
++ if (link->fec_state == dc_link_fec_ready) {
++ /* According to DP spec, FEC enable sequence can first
+ * be transmitted anytime after 1000 LL codes have
+ * been transmitted on the link after link training
+ * completion. Using 1 lane RBR should have the maximum
+@@ -199,7 +191,9 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
+ udelay(7);
+ link_enc->funcs->fec_set_enable(link_enc, true);
+ link->fec_state = dc_link_fec_enabled;
+- } else if (link->fec_state == dc_link_fec_enabled && !enable) {
++ }
++ } else {
++ if (link->fec_state == dc_link_fec_enabled) {
+ link_enc->funcs->fec_set_enable(link_enc, false);
+ link->fec_state = dc_link_fec_ready;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+index 90339c2dfd8487..9d1adfc09fb2aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+@@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status(
+ {
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_status lane_status;
++ union lane_align_status_updated dpcd_lane_status_updated;
+ uint8_t dpcd_buf[6] = {0};
+ uint32_t lane;
+
+@@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status(
+ * check lanes status
+ */
+ lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane);
++ dpcd_lane_status_updated.raw = dpcd_buf[4];
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+- !lane_status.bits.SYMBOL_LOCKED_0) {
++ !lane_status.bits.SYMBOL_LOCKED_0 ||
++ !dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+@@ -807,7 +810,7 @@ void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
++ union dpcd_training_lane *dpcd_lane_settings)
+ {
+ uint32_t lane;
+
+@@ -911,10 +914,10 @@ static enum dc_status configure_lttpr_mode_non_transparent(
+ /* Driver does not need to train the first hop. Skip DPCD read and clear
+ * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
+ */
+- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
++ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && repeater_cnt > 0 && repeater_cnt < MAX_REPEATER_CNT)
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
+
+- for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
++ for (repeater_id = repeater_cnt; repeater_id > 0 && repeater_id < MAX_REPEATER_CNT; repeater_id--) {
+ aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
+ core_link_read_dpcd(
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+index 7d027bac82551d..851bd17317a0c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+@@ -111,7 +111,7 @@ void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
++ union dpcd_training_lane *dpcd_lane_settings);
+
+ enum dc_dp_training_pattern decide_cr_training_pattern(
+ const struct dc_link_settings *link_settings);
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+index fd8f6f19814617..68096d12f52fd6 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+@@ -115,7 +115,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
+ lt_settings->cr_pattern_time = 16000;
+
+ /* Fixed VS/PE specific: Toggle link rate */
+- apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
++ apply_toggle_rate_wa = ((link->vendor_specific_lttpr_link_rate_wa == target_rate) || (link->vendor_specific_lttpr_link_rate_wa == 0));
+ target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
+ toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
+
+@@ -271,7 +271,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
++ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+@@ -617,7 +617,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
++ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+index 5c9a30211c109f..fc50931c2aecbb 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+@@ -205,7 +205,7 @@ enum dc_status core_link_read_dpcd(
+ uint32_t extended_size;
+ /* size of the remaining partitioned address space */
+ uint32_t size_left_to_read;
+- enum dc_status status;
++ enum dc_status status = DC_ERROR_UNEXPECTED;
+ /* size of the next partition to be read from */
+ uint32_t partition_size;
+ uint32_t data_index = 0;
+@@ -234,7 +234,7 @@ enum dc_status core_link_write_dpcd(
+ {
+ uint32_t partition_size;
+ uint32_t data_index = 0;
+- enum dc_status status;
++ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ while (size) {
+ partition_size = dpcd_get_next_partition_size(address, size);
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+index 98e715aa6d8e34..13104d000b9e09 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+@@ -33,6 +33,7 @@
+ #include "link_dp_capability.h"
+ #include "dm_helpers.h"
+ #include "dal_asic_id.h"
++#include "link_dp_phy.h"
+ #include "dce/dmub_psr.h"
+ #include "dc/dc_dmub_srv.h"
+ #include "dce/dmub_replay.h"
+@@ -167,7 +168,6 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+- link->backlight_settings.backlight_millinits = backlight_millinits;
+
+ if (!link->dpcd_caps.panel_luminance_control) {
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+@@ -280,9 +280,9 @@ bool set_default_brightness_aux(struct dc_link *link)
+ if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
+ if (!read_default_bl_aux(link, &default_backlight))
+ default_backlight = 150000;
+- // if < 5 nits or > 5000, it might be wrong readback
+- if (default_backlight < 5000 || default_backlight > 5000000)
+- default_backlight = 150000; //
++ // if < 1 nits or > 5000, it might be wrong readback
++ if (default_backlight < 1000 || default_backlight > 5000000)
++ default_backlight = 150000;
+
+ return edp_set_backlight_level_nits(link, true,
+ default_backlight, 0);
+@@ -290,14 +290,23 @@ bool set_default_brightness_aux(struct dc_link *link)
+ return false;
+ }
+
+-bool set_cached_brightness_aux(struct dc_link *link)
++bool edp_is_ilr_optimization_enabled(struct dc_link *link)
+ {
+- if (link->backlight_settings.backlight_millinits)
+- return edp_set_backlight_level_nits(link, true,
+- link->backlight_settings.backlight_millinits, 0);
+- else
+- return set_default_brightness_aux(link);
+- return false;
++ if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate)
++ return false;
++ return true;
++}
++
++enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link)
++{
++ enum dc_link_rate link_rate = link->reported_link_cap.link_rate;
++
++ for (int i = 0; i < link->dpcd_caps.edp_supported_link_rates_count; i++) {
++ if (link_rate < link->dpcd_caps.edp_supported_link_rates[i])
++ link_rate = link->dpcd_caps.edp_supported_link_rates[i];
++ }
++
++ return link_rate;
+ }
+
+ bool edp_is_ilr_optimization_required(struct dc_link *link,
+@@ -311,8 +320,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
+
+ ASSERT(link || crtc_timing); // invalid input
+
+- if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+- !link->panel_config.ilr.optimize_edp_link_rate)
++ if (!edp_is_ilr_optimization_enabled(link))
+ return false;
+
+
+@@ -362,6 +370,34 @@ void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+ link->dc->hwss.edp_backlight_control(link, true);
+ }
+
++void edp_set_panel_power(struct dc_link *link, bool powerOn)
++{
++ if (powerOn) {
++ // 1. panel VDD on
++ if (!link->dc->config.edp_no_power_sequencing)
++ link->dc->hwss.edp_power_control(link, true);
++ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
++
++ // 2. panel BL on
++ if (link->dc->hwss.edp_backlight_control)
++ link->dc->hwss.edp_backlight_control(link, true);
++
++ // 3. Rx power on
++ dpcd_write_rx_power_ctrl(link, true);
++ } else {
++ // 3. Rx power off
++ dpcd_write_rx_power_ctrl(link, false);
++
++ // 2. panel BL off
++ if (link->dc->hwss.edp_backlight_control)
++ link->dc->hwss.edp_backlight_control(link, false);
++
++ // 1. panel VDD off
++ if (!link->dc->config.edp_no_power_sequencing)
++ link->dc->hwss.edp_power_control(link, false);
++ }
++}
++
+ bool edp_wait_for_t12(struct dc_link *link)
+ {
+ if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
+@@ -846,7 +882,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
+
+ /* Set power optimization flag */
+ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
+- if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) {
++ if (replay != NULL && link->replay_settings.replay_feature_enabled &&
++ replay->funcs->replay_set_power_opt) {
+ replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
+ link->replay_settings.replay_power_opt_active = *power_opts;
+ }
+@@ -884,8 +921,8 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
+ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+ {
+ /* To-do: Setup Replay */
+- struct dc *dc = link->ctx->dc;
+- struct dmub_replay *replay = dc->res_pool->replay;
++ struct dc *dc;
++ struct dmub_replay *replay;
+ int i;
+ unsigned int panel_inst;
+ struct replay_context replay_context = { 0 };
+@@ -901,6 +938,10 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
+ if (!link)
+ return false;
+
++ dc = link->ctx->dc;
++
++ replay = dc->res_pool->replay;
++
+ if (!replay)
+ return false;
+
+@@ -929,8 +970,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
+
+ replay_context.line_time_in_ns = lineTimeInNs;
+
+- if (replay)
+- link->replay_settings.replay_feature_enabled =
++ link->replay_settings.replay_feature_enabled =
+ replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
+ if (link->replay_settings.replay_feature_enabled) {
+
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+index 0a5bbda8c739c4..a034288ad75d4a 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+@@ -30,7 +30,6 @@
+ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
+ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
+ bool set_default_brightness_aux(struct dc_link *link);
+-bool set_cached_brightness_aux(struct dc_link *link);
+ void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
+ int edp_get_backlight_level(const struct dc_link *link);
+ bool edp_get_backlight_level_nits(struct dc_link *link,
+@@ -64,9 +63,12 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
+ bool edp_wait_for_t12(struct dc_link *link);
+ bool edp_is_ilr_optimization_required(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
++bool edp_is_ilr_optimization_enabled(struct dc_link *link);
++enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link);
+ bool edp_backlight_enable_aux(struct dc_link *link, bool enable);
+ void edp_add_delay_for_T9(struct dc_link *link);
+ bool edp_receiver_ready_T9(struct dc_link *link);
+ bool edp_receiver_ready_T7(struct dc_link *link);
+ bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
++void edp_set_panel_power(struct dc_link *link, bool powerOn);
+ #endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index 2d995c87fbb986..d3c4a9a577eeab 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -186,6 +186,7 @@ struct dmub_srv_region_params {
+ uint32_t vbios_size;
+ const uint8_t *fw_inst_const;
+ const uint8_t *fw_bss_data;
++ bool is_mailbox_in_inbox;
+ };
+
+ /**
+@@ -205,20 +206,25 @@ struct dmub_srv_region_params {
+ */
+ struct dmub_srv_region_info {
+ uint32_t fb_size;
++ uint32_t inbox_size;
+ uint8_t num_regions;
+ struct dmub_region regions[DMUB_WINDOW_TOTAL];
+ };
+
+ /**
+- * struct dmub_srv_fb_params - parameters used for driver fb setup
++ * struct dmub_srv_memory_params - parameters used for driver fb setup
+ * @region_info: region info calculated by dmub service
+- * @cpu_addr: base cpu address for the framebuffer
+- * @gpu_addr: base gpu virtual address for the framebuffer
++ * @cpu_fb_addr: base cpu address for the framebuffer
++ * @cpu_inbox_addr: base cpu address for the gart
++ * @gpu_fb_addr: base gpu virtual address for the framebuffer
++ * @gpu_inbox_addr: base gpu virtual address for the gart
+ */
+-struct dmub_srv_fb_params {
++struct dmub_srv_memory_params {
+ const struct dmub_srv_region_info *region_info;
+- void *cpu_addr;
+- uint64_t gpu_addr;
++ void *cpu_fb_addr;
++ void *cpu_inbox_addr;
++ uint64_t gpu_fb_addr;
++ uint64_t gpu_inbox_addr;
+ };
+
+ /**
+@@ -546,8 +552,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+- const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++ const struct dmub_srv_memory_params *params,
+ struct dmub_srv_fb_info *out);
+
+ /**
+diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+index 7afa78b918b58f..d58cb7f63a4b12 100644
+--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
++++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+@@ -3301,6 +3301,16 @@ struct dmub_cmd_abm_set_pipe_data {
+ * TODO: Remove.
+ */
+ uint8_t ramping_boundary;
++
++ /**
++ * PwrSeq HW Instance.
++ */
++ uint8_t pwrseq_inst;
++
++ /**
++ * Explicit padding to 4 byte boundary.
++ */
++ uint8_t pad[3];
+ };
+
+ /**
+@@ -3715,7 +3725,7 @@ enum dmub_cmd_panel_cntl_type {
+ * struct dmub_cmd_panel_cntl_data - Panel control data.
+ */
+ struct dmub_cmd_panel_cntl_data {
+- uint32_t inst; /**< panel instance */
++ uint32_t pwrseq_inst; /**< pwrseq instance */
+ uint32_t current_backlight; /* in/out */
+ uint32_t bl_pwm_cntl; /* in/out */
+ uint32_t bl_pwm_period_cntl; /* in/out */
+@@ -3742,7 +3752,7 @@ struct dmub_cmd_lvtma_control_data {
+ uint8_t uc_pwr_action; /**< LVTMA_ACTION */
+ uint8_t bypass_panel_control_wait;
+ uint8_t reserved_0[2]; /**< For future use */
+- uint8_t panel_inst; /**< LVTMA control instance */
++ uint8_t pwrseq_inst; /**< LVTMA control instance */
+ uint8_t reserved_1[3]; /**< For future use */
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index 93624ffe4eb824..6c45e216c709c2 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -386,7 +386,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+ uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
+-
++ uint32_t previous_top = 0;
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+@@ -411,8 +411,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ bios->base = dmub_align(stack->top, 256);
+ bios->top = bios->base + params->vbios_size;
+
+- mail->base = dmub_align(bios->top, 256);
+- mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ if (params->is_mailbox_in_inbox) {
++ mail->base = 0;
++ mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ previous_top = bios->top;
++ } else {
++ mail->base = dmub_align(bios->top, 256);
++ mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ previous_top = mail->top;
++ }
+
+ fw_info = dmub_get_fw_meta_info(params);
+
+@@ -431,7 +438,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ dmub->fw_version = fw_info->fw_version;
+ }
+
+- trace_buff->base = dmub_align(mail->top, 256);
++ trace_buff->base = dmub_align(previous_top, 256);
+ trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
+
+ fw_state->base = dmub_align(trace_buff->top, 256);
+@@ -442,11 +449,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+
+ out->fb_size = dmub_align(scratch_mem->top, 4096);
+
++ if (params->is_mailbox_in_inbox)
++ out->inbox_size = dmub_align(mail->top, 4096);
++
+ return DMUB_STATUS_OK;
+ }
+
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+- const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++ const struct dmub_srv_memory_params *params,
+ struct dmub_srv_fb_info *out)
+ {
+ uint8_t *cpu_base;
+@@ -461,8 +471,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
+ return DMUB_STATUS_INVALID;
+
+- cpu_base = (uint8_t *)params->cpu_addr;
+- gpu_base = params->gpu_addr;
++ cpu_base = (uint8_t *)params->cpu_fb_addr;
++ gpu_base = params->gpu_fb_addr;
+
+ for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
+ const struct dmub_region *reg =
+@@ -470,6 +480,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+
+ out->fb[i].cpu_addr = cpu_base + reg->base;
+ out->fb[i].gpu_addr = gpu_base + reg->base;
++
++ if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
++ out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
++ out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
++ }
++
+ out->fb[i].size = reg->top - reg->base;
+ }
+
+@@ -658,9 +674,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+- dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
++ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
++
++ if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
++ return DMUB_STATUS_HW_FAILURE;
++ } else {
++ dmub->inbox1_rb.rptr = rptr;
++ dmub->inbox1_rb.wrpt = wptr;
++ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++ }
+ }
+
+ return DMUB_STATUS_OK;
+@@ -694,6 +717,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
++ if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
++ dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
++ return DMUB_STATUS_HW_FAILURE;
++ }
++
+ if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ return DMUB_STATUS_OK;
+
+@@ -969,6 +997,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
+ ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
+ if (ack)
+ return DMUB_STATUS_OK;
++ udelay(1);
+ }
+ return DMUB_STATUS_TIMEOUT;
+ }
+diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+index 914f28e9f22426..aee5170f5fb231 100644
+--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
++++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+@@ -177,4 +177,9 @@ enum dpcd_psr_sink_states {
+ #define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
+ #define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
+
++/* Remove once drm_dp_helper.h is updated upstream */
++#ifndef DP_TOTAL_LTTPR_CNT
++#define DP_TOTAL_LTTPR_CNT 0xF000A /* 2.1 */
++#endif
++
+ #endif /* __DAL_DPCD_DEFS_H__ */
+diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+index bc96d021136080..813463ffe15c52 100644
+--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
++++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+@@ -417,6 +417,8 @@ struct integrated_info {
+ /* V2.1 */
+ struct edp_info edp1_info;
+ struct edp_info edp2_info;
++ uint32_t gpuclk_ss_percentage;
++ uint32_t gpuclk_ss_type;
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index ef3a674090211c..803586f4267af8 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -133,7 +133,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
+
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
+- stream->timing.h_total), 1000000);
++ stream->timing.h_total) + 500000, 1000000);
+
+ /* v_total cannot be less than nominal */
+ if (v_total < stream->timing.v_total) {
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+index 1ddb4f5eac8e53..cee5e9e64ae711 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+@@ -432,18 +432,18 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
+ goto out;
+ }
+
+- if (status == MOD_HDCP_STATUS_SUCCESS)
+- mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+- &input->bstatus_read, &status,
+- hdcp, "bstatus_read");
+- if (status == MOD_HDCP_STATUS_SUCCESS)
+- mod_hdcp_execute_and_set(check_link_integrity_dp,
+- &input->link_integrity_check, &status,
+- hdcp, "link_integrity_check");
+- if (status == MOD_HDCP_STATUS_SUCCESS)
+- mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+- &input->reauth_request_check, &status,
+- hdcp, "reauth_request_check");
++ mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
++ &input->bstatus_read, &status,
++ hdcp, "bstatus_read");
++
++ mod_hdcp_execute_and_set(check_link_integrity_dp,
++ &input->link_integrity_check, &status,
++ hdcp, "link_integrity_check");
++
++ mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
++ &input->reauth_request_check, &status,
++ hdcp, "reauth_request_check");
++
+ out:
+ return status;
+ }
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+index f7b5583ee609a5..1b2df97226a3f2 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+@@ -156,7 +156,16 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
++ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
++ msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
++ return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ if (is_dp_hdcp(hdcp)) {
++ int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
++ sizeof(hdcp_dpcd_addrs[0]);
++ if (msg_id >= num_dpcd_addrs)
++ return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+@@ -171,6 +180,11 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ data_offset += cur_size;
+ }
+ } else {
++ int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
++ sizeof(hdcp_i2c_offsets[0]);
++ if (msg_id >= num_i2c_offsets)
++ return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ success = hdcp->config.ddc.funcs.read_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+@@ -215,7 +229,16 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
++ if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
++ msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
++ return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ if (is_dp_hdcp(hdcp)) {
++ int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
++ sizeof(hdcp_dpcd_addrs[0]);
++ if (msg_id >= num_dpcd_addrs)
++ return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.write_dpcd(
+@@ -231,6 +254,11 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ data_offset += cur_size;
+ }
+ } else {
++ int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
++ sizeof(hdcp_i2c_offsets[0]);
++ if (msg_id >= num_i2c_offsets)
++ return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ memmove(&hdcp->buf[1], buf, buf_len);
+ success = hdcp->config.ddc.funcs.write_i2c(
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index ee67a35c2a8edd..ff930a71e496a9 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
++ if (!display)
++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
+
+ if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
+diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+index 4220fd8fdd60ca..54cd86060f4d67 100644
+--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
++++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+@@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
+ unsigned int length);
+
+ void mod_stats_update_flip(struct mod_stats *mod_stats,
+- unsigned long timestamp_in_ns);
++ unsigned long long timestamp_in_ns);
+
+ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
+- unsigned long timestamp_in_ns);
++ unsigned long long timestamp_in_ns);
+
+ void mod_stats_update_freesync(struct mod_stats *mod_stats,
+ unsigned int v_total_min,
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 73a2b37fbbd759..2b3d5183818aca 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -839,6 +839,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
+ ((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) ||
+ (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07)))
+ isPSRSUSupported = false;
++ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
++ isPSRSUSupported = false;
+ else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
+ isPSRSUSupported = true;
+ }
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index 67d7b7ee8a2a02..a9880fc5319550 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -240,7 +240,6 @@ enum DC_FEATURE_MASK {
+ DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
+ DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
+ DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+- DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
+ };
+
+ enum DC_DEBUG_MASK {
+@@ -251,7 +250,6 @@ enum DC_DEBUG_MASK {
+ DC_DISABLE_PSR = 0x10,
+ DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
+ DC_DISABLE_MPO = 0x40,
+- DC_DISABLE_REPLAY = 0x50,
+ DC_ENABLE_DPIA_TRACE = 0x80,
+ };
+
+@@ -297,6 +295,7 @@ struct amd_ip_funcs {
+ int (*hw_init)(void *handle);
+ int (*hw_fini)(void *handle);
+ void (*late_fini)(void *handle);
++ int (*prepare_suspend)(void *handle);
+ int (*suspend)(void *handle);
+ int (*resume)(void *handle);
+ bool (*is_idle)(void *handle);
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+index c92c4b83253f81..4bff1ef8a9a640 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+@@ -6369,6 +6369,8 @@
+ #define regTCP_INVALIDATE_BASE_IDX 1
+ #define regTCP_STATUS 0x19a1
+ #define regTCP_STATUS_BASE_IDX 1
++#define regTCP_CNTL 0x19a2
++#define regTCP_CNTL_BASE_IDX 1
+ #define regTCP_CNTL2 0x19a3
+ #define regTCP_CNTL2_BASE_IDX 1
+ #define regTCP_DEBUG_INDEX 0x19a5
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index fa7d6ced786f19..ccc79bdd4f5adf 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -702,7 +702,7 @@ struct atom_gpio_pin_lut_v2_1
+ {
+ struct atom_common_table_header table_header;
+ /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
+- struct atom_gpio_pin_assignment gpio_pin[8];
++ struct atom_gpio_pin_assignment gpio_pin[];
+ };
+
+
+@@ -1006,7 +1006,7 @@ struct display_object_info_table_v1_4
+ uint16_t supporteddevices;
+ uint8_t number_of_path;
+ uint8_t reserved;
+- struct atom_display_object_path_v2 display_path[8]; //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path
++ struct atom_display_object_path_v2 display_path[]; //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path
+ };
+
+ struct display_object_info_table_v1_5 {
+@@ -1016,7 +1016,7 @@ struct display_object_info_table_v1_5 {
+ uint8_t reserved;
+ // the real number of this included in the structure is calculated by using the
+ // (whole structure size - the header size- number_of_path)/size of atom_display_object_path
+- struct atom_display_object_path_v3 display_path[8];
++ struct atom_display_object_path_v3 display_path[];
+ };
+
+ /*
+@@ -1625,6 +1625,49 @@ struct atom_integrated_system_info_v2_2
+ uint32_t reserved4[189];
+ };
+
++struct uma_carveout_option {
++ char optionName[29]; //max length of string is 28chars + '\0'. Current design is for "minimum", "Medium", "High". This makes entire struct size 64bits
++ uint8_t memoryCarvedGb; //memory carved out with setting
++ uint8_t memoryRemainingGb; //memory remaining on system
++ union {
++ struct _flags {
++ uint8_t Auto : 1;
++ uint8_t Custom : 1;
++ uint8_t Reserved : 6;
++ } flags;
++ uint8_t all8;
++ } uma_carveout_option_flags;
++};
++
++struct atom_integrated_system_info_v2_3 {
++ struct atom_common_table_header table_header;
++ uint32_t vbios_misc; // enum of atom_system_vbiosmisc_def
++ uint32_t gpucapinfo; // enum of atom_system_gpucapinf_def
++ uint32_t system_config;
++ uint32_t cpucapinfo;
++ uint16_t gpuclk_ss_percentage; // unit of 0.001%, 1000 mean 1%
++ uint16_t gpuclk_ss_type;
++ uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
++ uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
++ uint8_t umachannelnumber; // number of memory channels
++ uint8_t htc_hyst_limit;
++ uint8_t htc_tmp_limit;
++ uint8_t reserved1; // dp_ss_control
++ uint8_t gpu_package_id;
++ struct edp_info_table edp1_info;
++ struct edp_info_table edp2_info;
++ uint32_t reserved2[8];
++ struct atom_external_display_connection_info extdispconninfo;
++ uint8_t UMACarveoutVersion;
++ uint8_t UMACarveoutIndexMax;
++ uint8_t UMACarveoutTypeDefault;
++ uint8_t UMACarveoutIndexDefault;
++ uint8_t UMACarveoutType; //Auto or Custom
++ uint8_t UMACarveoutIndex;
++ struct uma_carveout_option UMASizeControlOption[20];
++ uint8_t reserved3[110];
++};
++
+ // system_config
+ enum atom_system_vbiosmisc_def{
+ INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT = 0x01,
+@@ -3508,7 +3551,7 @@ struct atom_gpio_voltage_object_v4
+ uint8_t phase_delay_us; // phase delay in unit of micro second
+ uint8_t reserved;
+ uint32_t gpio_mask_val; // GPIO Mask value
+- struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
++ struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
+ };
+
+ struct atom_svid2_voltage_object_v4
+diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+index b1db2b19018742..e07e93167a82c2 100644
+--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
++++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+@@ -571,7 +571,8 @@ struct SET_SHADER_DEBUGGER {
+ struct {
+ uint32_t single_memop : 1; /* SQ_DEBUG.single_memop */
+ uint32_t single_alu_op : 1; /* SQ_DEBUG.single_alu_op */
+- uint32_t reserved : 30;
++ uint32_t reserved : 29;
++ uint32_t process_ctx_flush : 1;
+ };
+ uint32_t u32all;
+ } flags;
+diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
+index 0b6a057e0a4c48..5aac8d545bdc6d 100644
+--- a/drivers/gpu/drm/amd/include/pptable.h
++++ b/drivers/gpu/drm/amd/include/pptable.h
+@@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+ typedef struct _ATOM_PPLIB_STATE
+ {
+ UCHAR ucNonClockStateIndex;
+- UCHAR ucClockStateIndices[1]; // variable-sized
++ UCHAR ucClockStateIndices[]; // variable-sized
+ } ATOM_PPLIB_STATE;
+
+
+@@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+- UCHAR clockInfoIndex[1];
++ UCHAR clockInfoIndex[];
+ } ATOM_PPLIB_STATE_V2;
+
+ typedef struct _StateArray{
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 8bb2da13826f16..babb73147adfb3 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -734,7 +734,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+- if (count > 127)
++ if (count > 127 || count == 0)
+ return -EINVAL;
+
+ if (*buf == 's')
+@@ -754,7 +754,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ else
+ return -EINVAL;
+
+- memcpy(buf_cpy, buf, count+1);
++ memcpy(buf_cpy, buf, count);
++ buf_cpy[count] = 0;
+
+ tmp_str = buf_cpy;
+
+@@ -771,6 +772,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ return -EINVAL;
+ parameter_size++;
+
++ if (!tmp_str)
++ break;
++
+ while (isspace(*tmp_str))
+ tmp_str++;
+ }
+@@ -1467,9 +1471,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
+ return -EINVAL;
+ }
+
+-static unsigned int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
+- enum amd_pp_sensors sensor,
+- void *query)
++static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
++ enum amd_pp_sensors sensor,
++ void *query)
+ {
+ int r, size = sizeof(uint32_t);
+
+@@ -2391,6 +2395,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
+ {
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ int err, ret;
++ u32 pwm_mode;
+ int value;
+
+ if (amdgpu_in_reset(adev))
+@@ -2402,13 +2407,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
+ if (err)
+ return err;
+
++ if (value == 0)
++ pwm_mode = AMD_FAN_CTRL_NONE;
++ else if (value == 1)
++ pwm_mode = AMD_FAN_CTRL_MANUAL;
++ else if (value == 2)
++ pwm_mode = AMD_FAN_CTRL_AUTO;
++ else
++ return -EINVAL;
++
+ ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return ret;
+ }
+
+- ret = amdgpu_dpm_set_fan_control_mode(adev, value);
++ ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+@@ -2773,8 +2787,8 @@ static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
+ return sysfs_emit(buf, "vddnb\n");
+ }
+
+-static unsigned int amdgpu_hwmon_get_power(struct device *dev,
+- enum amd_pp_sensors sensor)
++static int amdgpu_hwmon_get_power(struct device *dev,
++ enum amd_pp_sensors sensor)
+ {
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ unsigned int uw;
+@@ -2795,7 +2809,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+ {
+- unsigned int val;
++ int val;
+
+ val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
+ if (val < 0)
+@@ -2808,7 +2822,7 @@ static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+ {
+- unsigned int val;
++ int val;
+
+ val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
+ if (val < 0)
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+index 5d28c951a31972..c8586cb7d0fec5 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+@@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ if (table[i].ulSupportedSCLK != 0) {
++ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
++ continue;
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+ table[i].usVoltageID;
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+@@ -2735,10 +2737,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev)
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
+- if (ps == NULL) {
+- kfree(adev->pm.dpm.ps);
++ if (ps == NULL)
+ return -ENOMEM;
+- }
+ adev->pm.dpm.ps[i].ps_priv = ps;
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+index 81fb4e5dd804bd..60377747bab4fc 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+@@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ dep_table);
+- if (ret) {
+- amdgpu_free_extended_power_table(adev);
++ if (ret)
+ return ret;
+- }
+ }
+ if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+@@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ dep_table);
+- if (ret) {
+- amdgpu_free_extended_power_table(adev);
++ if (ret)
+ return ret;
+- }
+ }
+ if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+@@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ dep_table);
+- if (ret) {
+- amdgpu_free_extended_power_table(adev);
++ if (ret)
+ return ret;
+- }
+ }
+ if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+@@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ dep_table);
+- if (ret) {
+- amdgpu_free_extended_power_table(adev);
++ if (ret)
+ return ret;
+- }
+ }
+ if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
+ ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
+@@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ kcalloc(psl->ucNumEntries,
+ sizeof(struct amdgpu_phase_shedding_limits_entry),
+ GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
+ return -ENOMEM;
+- }
+
+ entry = &psl->entries[0];
+ for (i = 0; i < psl->ucNumEntries; i++) {
+@@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ ATOM_PPLIB_CAC_Leakage_Record *entry;
+ u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
+ return -ENOMEM;
+- }
+ entry = &cac_table->entries[0];
+ for (i = 0; i < cac_table->ucNumEntries; i++) {
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+@@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
+ return -ENOMEM;
+- }
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+@@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
+ return -ENOMEM;
+- }
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+@@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
+ return -ENOMEM;
+- }
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+@@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ le16_to_cpu(ext_hdr->usPPMTableOffset));
+ adev->pm.dpm.dyn_state.ppm_table =
+ kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.ppm_table) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.ppm_table)
+ return -ENOMEM;
+- }
+ adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
+ adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
+ le16_to_cpu(ppm->usCpuCoreNumber);
+@@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
+ return -ENOMEM;
+- }
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+@@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ ATOM_PowerTune_Table *pt;
+ adev->pm.dpm.dyn_state.cac_tdp_table =
+ kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.cac_tdp_table)
+ return -ENOMEM;
+- }
+ if (rev > 0) {
+ ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+ (mode_info->atom_context->bios + data_offset +
+@@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+ ret = amdgpu_parse_clk_voltage_dep_table(
+ &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
+ dep_table);
+- if (ret) {
+- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
++ if (ret)
+ return ret;
+- }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+index 02e69ccff3bac4..99dde52a429013 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int si_set_temperature_range(struct amdgpu_device *adev)
++{
++ int ret;
++
++ ret = si_thermal_enable_alert(adev, false);
++ if (ret)
++ return ret;
++ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
++ if (ret)
++ return ret;
++ ret = si_thermal_enable_alert(adev, true);
++ if (ret)
++ return ret;
++
++ return ret;
++}
++
+ static void si_dpm_disable(struct amdgpu_device *adev)
+ {
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+@@ -7379,10 +7396,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
+ kcalloc(4,
+ sizeof(struct amdgpu_clock_voltage_dependency_entry),
+ GFP_KERNEL);
+- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+- amdgpu_free_extended_power_table(adev);
++ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
+ return -ENOMEM;
+- }
++
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+@@ -7609,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+
+ static int si_dpm_late_init(void *handle)
+ {
++ int ret;
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++
++ if (!adev->pm.dpm_enabled)
++ return 0;
++
++ ret = si_set_temperature_range(adev);
++ if (ret)
++ return ret;
++#if 0 //TODO ?
++ si_dpm_powergate_uvd(adev, true);
++#endif
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+index 9e4f8a4104a346..86f95a291d65f6 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -99,7 +99,7 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work)
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct amdgpu_dpm_thermal *range =
+ &adev->pm.dpm.thermal;
+- uint32_t gpu_temperature, size;
++ uint32_t gpu_temperature, size = sizeof(gpu_temperature);
+ int ret;
+
+ /*
+@@ -927,7 +927,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+ enum PP_SMC_POWER_PROFILE type, bool en)
+ {
+ struct pp_hwmgr *hwmgr = handle;
+- long workload;
++ long workload[1];
+ uint32_t index;
+
+ if (!hwmgr || !hwmgr->pm_en)
+@@ -945,12 +945,12 @@ static int pp_dpm_switch_power_profile(void *handle,
+ hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
+ index = fls(hwmgr->workload_mask);
+ index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
+- workload = hwmgr->workload_setting[index];
++ workload[0] = hwmgr->workload_setting[index];
+ } else {
+ hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
+ index = fls(hwmgr->workload_mask);
+ index = index <= Workload_Policy_Max ? index - 1 : 0;
+- workload = hwmgr->workload_setting[index];
++ workload[0] = hwmgr->workload_setting[index];
+ }
+
+ if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
+@@ -960,7 +960,7 @@ static int pp_dpm_switch_power_profile(void *handle,
+ }
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+- hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
++ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+index 1d829402cd2e23..18f00038d8441c 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+@@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ {
+ int result;
+ unsigned int i;
+- unsigned int table_entries;
+ struct pp_power_state *state;
+- int size;
++ int size, table_entries;
+
+ if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
+ return 0;
+@@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ if (hwmgr->hwmgr_func->get_power_state_size == NULL)
+ return 0;
+
+- hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
++ table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
+
+- hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
++ size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
+ sizeof(struct pp_power_state);
+
+- if (table_entries == 0 || size == 0) {
++ if (table_entries <= 0 || size == 0) {
+ pr_warn("Please check whether power state management is supported on this asic\n");
++ hwmgr->num_ps = 0;
++ hwmgr->ps_size = 0;
+ return 0;
+ }
++ hwmgr->num_ps = table_entries;
++ hwmgr->ps_size = size;
+
+ hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
+ if (hwmgr->ps == NULL)
+@@ -269,7 +272,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
+ struct pp_power_state *new_ps)
+ {
+ uint32_t index;
+- long workload;
++ long workload[1];
+
+ if (hwmgr->not_vf) {
+ if (!skip_display_settings)
+@@ -294,10 +297,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ index = fls(hwmgr->workload_mask);
+ index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
+- workload = hwmgr->workload_setting[index];
++ workload[0] = hwmgr->workload_setting[index];
+
+- if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
+- hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
++ if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode)
++ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+index f503e61faa6008..cc3b62f7339417 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+@@ -73,8 +73,9 @@ static int atomctrl_retrieve_ac_timing(
+ j++;
+ } else if ((table->mc_reg_address[i].uc_pre_reg_data &
+ LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
+- table->mc_reg_table_entry[num_ranges].mc_data[i] =
+- table->mc_reg_table_entry[num_ranges].mc_data[i-1];
++ if (i)
++ table->mc_reg_table_entry[num_ranges].mc_data[i] =
++ table->mc_reg_table_entry[num_ranges].mc_data[i-1];
+ }
+ }
+ num_ranges++;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+index 7a31cfa5e7fb4d..9fcad69a9f3446 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
+ typedef struct _ATOM_Tonga_State_Array {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_State_Array;
+
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+@@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_MCLK_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+@@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_SCLK_Dependency_Table;
+
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+@@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Polaris_SCLK_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_PCIE_Record {
+@@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
+ typedef struct _ATOM_Tonga_PCIE_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_PCIE_Table;
+
+ typedef struct _ATOM_Polaris10_PCIE_Record {
+@@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
+ typedef struct _ATOM_Polaris10_PCIE_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Polaris10_PCIE_Table;
+
+
+@@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
+ typedef struct _ATOM_Tonga_MM_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_MM_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+@@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_Voltage_Lookup_Table;
+
+ typedef struct _ATOM_Tonga_Fan_Table {
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+index f2a55c1413f597..17882f8dfdd34f 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+@@ -200,7 +200,7 @@ static int get_platform_power_management_table(
+ struct pp_hwmgr *hwmgr,
+ ATOM_Tonga_PPM_Table *atom_ppm_table)
+ {
+- struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
++ struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ struct phm_ppt_v1_information *pp_table_information =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
+index 5794b64507bf94..56a22575258064 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
+@@ -1185,6 +1185,8 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
+ fw_info = smu_atom_get_data_table(hwmgr->adev,
+ GetIndexIntoMasterTable(DATA, FirmwareInfo),
+ &size, &frev, &crev);
++ PP_ASSERT_WITH_CODE(fw_info != NULL,
++ "Missing firmware info!", return -EINVAL);
+
+ if ((fw_info->ucTableFormatRevision == 1)
+ && (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V1_4)))
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+index 02ba68d7c6546b..f62381b189ade9 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+@@ -1036,7 +1036,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
+
+ switch (type) {
+ case PP_SCLK:
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
++ if (ret)
++ return ret;
+
+ /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
+ if (now == data->gfx_max_freq_limit/100)
+@@ -1057,7 +1059,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ i == 2 ? "*" : "");
+ break;
+ case PP_MCLK:
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
++ if (ret)
++ return ret;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -1550,7 +1554,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ }
+
+ if (input[0] == 0) {
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++ if (ret)
++ return ret;
++
+ if (input[1] < min_freq) {
+ pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], min_freq);
+@@ -1558,7 +1565,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ }
+ smu10_data->gfx_actual_soft_min_freq = input[1];
+ } else if (input[0] == 1) {
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++ if (ret)
++ return ret;
++
+ if (input[1] > max_freq) {
+ pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_freq);
+@@ -1573,10 +1583,15 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+-
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
++ if (ret)
++ return ret;
+ smu10_data->gfx_actual_soft_min_freq = min_freq;
++
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
++ if (ret)
++ return ret;
++
+ smu10_data->gfx_actual_soft_max_freq = max_freq;
+ } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (size != 0) {
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 5a2371484a58c5..53849fd3615f68 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1823,9 +1823,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+
+ data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+ data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
+- data->pcie_dpm_key_disabled =
+- !amdgpu_device_pcie_dynamic_switching_supported() ||
+- !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
++ data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+ /* need to set voltage control types before EVV patching */
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
+@@ -2959,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
+
+ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ {
++ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu7_hwmgr *data;
+ int result = 0;
+
+@@ -2976,6 +2975,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ result = smu7_get_evv_voltages(hwmgr);
+ if (result) {
+ pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
++ kfree(hwmgr->backend);
++ hwmgr->backend = NULL;
+ return -EINVAL;
+ }
+ } else {
+@@ -2993,38 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ /* Initalize Dynamic State Adjustment Rule Settings */
+ result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+
+- if (0 == result) {
+- struct amdgpu_device *adev = hwmgr->adev;
++ if (result)
++ goto fail;
+
+- data->is_tlu_enabled = false;
++ data->is_tlu_enabled = false;
+
+- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
++ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ SMU7_MAX_HARDWARE_POWERLEVELS;
+- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
++ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
++ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+- data->pcie_gen_cap = adev->pm.pcie_gen_mask;
+- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+- data->pcie_spc_cap = 20;
+- else
+- data->pcie_spc_cap = 16;
+- data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
+-
+- hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+- hwmgr->platform_descriptor.clockStep.engineClock = 500;
+- hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+- smu7_thermal_parameter_init(hwmgr);
+- } else {
+- /* Ignore return value in here, we are cleaning up a mess. */
+- smu7_hwmgr_backend_fini(hwmgr);
+- }
++ data->pcie_gen_cap = adev->pm.pcie_gen_mask;
++ if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
++ data->pcie_spc_cap = 20;
++ else
++ data->pcie_spc_cap = 16;
++ data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
++
++ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
++ /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
++ hwmgr->platform_descriptor.clockStep.engineClock = 500;
++ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
++ smu7_thermal_parameter_init(hwmgr);
+
+ result = smu7_update_edc_leakage_table(hwmgr);
+ if (result)
+- return result;
++ goto fail;
+
+ return 0;
++fail:
++ smu7_hwmgr_backend_fini(hwmgr);
++ return result;
+ }
+
+ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
+@@ -3314,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ const struct pp_power_state *current_ps)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+- struct smu7_power_state *smu7_ps =
+- cast_phw_smu7_power_state(&request_ps->hardware);
++ struct smu7_power_state *smu7_ps;
+ uint32_t sclk;
+ uint32_t mclk;
+ struct PP_Clocks minimum_clocks = {0};
+@@ -3332,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ uint32_t latency;
+ bool latency_allowed = false;
+
++ smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware);
++ if (!smu7_ps)
++ return -EINVAL;
++
+ data->battery_state = (PP_StateUILabel_Battery ==
+ request_ps->classification.ui_label);
+ data->mclk_ignore_signal = false;
+@@ -3997,6 +4000,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ uint32_t sclk, mclk, activity_percent;
+ uint32_t offset, val_vid;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
++ struct amdgpu_device *adev = hwmgr->adev;
+
+ /* size must be at least 4 bytes for all sensors */
+ if (*size < 4)
+@@ -4040,7 +4044,21 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ *size = 4;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+- return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
++ if ((adev->asic_type != CHIP_HAWAII) &&
++ (adev->asic_type != CHIP_BONAIRE) &&
++ (adev->asic_type != CHIP_FIJI) &&
++ (adev->asic_type != CHIP_TONGA))
++ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
++ else
++ return -EOPNOTSUPP;
++ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
++ if ((adev->asic_type != CHIP_HAWAII) &&
++ (adev->asic_type != CHIP_BONAIRE) &&
++ (adev->asic_type != CHIP_FIJI) &&
++ (adev->asic_type != CHIP_TONGA))
++ return -EOPNOTSUPP;
++ else
++ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
+ (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
+@@ -5623,7 +5641,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
+ mode = input[size];
+ switch (mode) {
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+- if (size < 8 && size != 0)
++ if (size != 8 && size != 0)
+ return -EINVAL;
+ /* If only CUSTOM is passed in, use the saved values. Check
+ * that we actually have a CUSTOM profile by ensuring that
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+index b015a601b385ae..7e119742087325 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+@@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+ unsigned long clock = 0;
+ uint32_t level;
++ int ret;
+
+ if (NULL == table || table->count <= 0)
+ return -EINVAL;
+@@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
+ data->uvd_dpm.soft_min_clk = 0;
+ data->uvd_dpm.hard_min_clk = 0;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
++ if (ret)
++ return ret;
+
+ if (level < table->count)
+ clock = table->entries[level].vclk;
+@@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+ unsigned long clock = 0;
+ uint32_t level;
++ int ret;
+
+ if (NULL == table || table->count <= 0)
+ return -EINVAL;
+@@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
+ data->vce_dpm.soft_min_clk = 0;
+ data->vce_dpm.hard_min_clk = 0;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
++ if (ret)
++ return ret;
+
+ if (level < table->count)
+ clock = table->entries[level].ecclk;
+@@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
+ hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+ unsigned long clock = 0;
+ uint32_t level;
++ int ret;
+
+ if (NULL == table || table->count <= 0)
+ return -EINVAL;
+@@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
+ data->acp_dpm.soft_min_clk = 0;
+ data->acp_dpm.hard_min_clk = 0;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
++ if (ret)
++ return ret;
+
+ if (level < table->count)
+ clock = table->entries[level].acpclk;
+@@ -1065,16 +1074,18 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *prequest_ps,
+ const struct pp_power_state *pcurrent_ps)
+ {
+- struct smu8_power_state *smu8_ps =
+- cast_smu8_power_state(&prequest_ps->hardware);
+-
+- const struct smu8_power_state *smu8_current_ps =
+- cast_const_smu8_power_state(&pcurrent_ps->hardware);
+-
++ struct smu8_power_state *smu8_ps;
++ const struct smu8_power_state *smu8_current_ps;
+ struct smu8_hwmgr *data = hwmgr->backend;
+ struct PP_Clocks clocks = {0, 0, 0, 0};
+ bool force_high;
+
++ smu8_ps = cast_smu8_power_state(&prequest_ps->hardware);
++ smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware);
++
++ if (!smu8_ps || !smu8_current_ps)
++ return -EINVAL;
++
+ smu8_ps->need_dfs_bypass = true;
+
+ data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index 6d6bc6a380b365..6c87b3d4ab362f 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -354,13 +354,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ struct vega10_hwmgr *data = hwmgr->backend;
+- int i;
+ uint32_t sub_vendor_id, hw_revision;
+ uint32_t top32, bottom32;
+ struct amdgpu_device *adev = hwmgr->adev;
++ int ret, i;
+
+ vega10_initialize_power_tune_defaults(hwmgr);
+
+@@ -485,9 +485,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ if (data->registry_data.vr0hot_enabled)
+ data->smu_features[GNLD_VR0HOT].supported = true;
+
+- smum_send_msg_to_smc(hwmgr,
++ ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetSmuVersion,
+ &hwmgr->smu_version);
++ if (ret)
++ return ret;
++
+ /* ACG firmware has major version 5 */
+ if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
+ data->smu_features[GNLD_ACG].supported = true;
+@@ -505,10 +508,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ data->smu_features[GNLD_PCC_LIMIT].supported = true;
+
+ /* Get the SN to turn into a Unique ID */
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++ if (ret)
++ return ret;
++
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++ if (ret)
++ return ret;
+
+ adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++ return 0;
+ }
+
+ #ifdef PPLIB_VEGA10_EVV_SUPPORT
+@@ -882,7 +891,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+
+ vega10_set_features_platform_caps(hwmgr);
+
+- vega10_init_dpm_defaults(hwmgr);
++ result = vega10_init_dpm_defaults(hwmgr);
++ if (result)
++ return result;
+
+ #ifdef PPLIB_VEGA10_EVV_SUPPORT
+ /* Get leakage voltage based on leakage ID. */
+@@ -2350,15 +2361,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
+ {
+ struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t agc_btc_response;
++ int ret;
+
+ if (data->smu_features[GNLD_ACG].supported) {
+ if (0 == vega10_enable_smc_features(hwmgr, true,
+ data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
+ data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
++ if (ret)
++ return ret;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
++ if (ret)
++ agc_btc_response = 0;
+
+ if (1 == agc_btc_response) {
+ if (1 == data->acg_loop_state)
+@@ -2571,8 +2587,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
+ }
+ }
+
+- pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
++ result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_SVID2, &voltage_table);
++ PP_ASSERT_WITH_CODE(!result,
++ "Failed to get voltage table!",
++ return result);
+ pp_table->MaxVidStep = voltage_table.max_vid_step;
+
+ pp_table->GfxDpmVoltageMode =
+@@ -3259,8 +3278,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ const struct pp_power_state *current_ps)
+ {
+ struct amdgpu_device *adev = hwmgr->adev;
+- struct vega10_power_state *vega10_ps =
+- cast_phw_vega10_power_state(&request_ps->hardware);
++ struct vega10_power_state *vega10_ps;
+ uint32_t sclk;
+ uint32_t mclk;
+ struct PP_Clocks minimum_clocks = {0};
+@@ -3278,6 +3296,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+ uint32_t latency;
+
++ vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware);
++ if (!vega10_ps)
++ return -EINVAL;
++
+ data->battery_state = (PP_StateUILabel_Battery ==
+ request_ps->classification.ui_label);
+
+@@ -3415,13 +3437,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
+ const struct vega10_power_state *vega10_ps =
+ cast_const_phw_vega10_power_state(states->pnew_state);
+ struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+- uint32_t sclk = vega10_ps->performance_levels
+- [vega10_ps->performance_level_count - 1].gfx_clock;
+ struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+- uint32_t mclk = vega10_ps->performance_levels
+- [vega10_ps->performance_level_count - 1].mem_clock;
++ uint32_t sclk, mclk;
+ uint32_t i;
+
++ if (vega10_ps == NULL)
++ return -EINVAL;
++ sclk = vega10_ps->performance_levels
++ [vega10_ps->performance_level_count - 1].gfx_clock;
++ mclk = vega10_ps->performance_levels
++ [vega10_ps->performance_level_count - 1].mem_clock;
++
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+@@ -3728,6 +3754,9 @@ static int vega10_generate_dpm_level_enable_mask(
+ cast_const_phw_vega10_power_state(states->pnew_state);
+ int i;
+
++ if (vega10_ps == NULL)
++ return -EINVAL;
++
+ PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
+ "Attempt to Trim DPM States Failed!",
+ return -1);
+@@ -3900,11 +3929,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
+ uint32_t *query)
+ {
+ uint32_t value;
++ int ret;
+
+ if (!query)
+ return -EINVAL;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
++ if (ret)
++ return ret;
+
+ /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
+ *query = value << 8;
+@@ -4800,14 +4832,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+ PPTable_t *pptable = &(data->smc_state_table.pp_table);
+
+- int i, now, size = 0, count = 0;
++ int i, ret, now, size = 0, count = 0;
+
+ switch (type) {
+ case PP_SCLK:
+ if (data->registry_data.sclk_dpm_key_disabled)
+ break;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
++ if (ret)
++ break;
+
+ if (hwmgr->pp_one_vf &&
+ (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
+@@ -4823,7 +4857,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ if (data->registry_data.mclk_dpm_key_disabled)
+ break;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
++ if (ret)
++ break;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4834,7 +4870,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ if (data->registry_data.socclk_dpm_key_disabled)
+ break;
+
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
++ if (ret)
++ break;
+
+ for (i = 0; i < soc_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4845,8 +4883,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ if (data->registry_data.dcefclk_dpm_key_disabled)
+ break;
+
+- smum_send_msg_to_smc_with_parameter(hwmgr,
++ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
++ if (ret)
++ break;
+
+ for (i = 0; i < dcef_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4995,6 +5035,8 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
+
+ vega10_psa = cast_const_phw_vega10_power_state(pstate1);
+ vega10_psb = cast_const_phw_vega10_power_state(pstate2);
++ if (vega10_psa == NULL || vega10_psb == NULL)
++ return -EINVAL;
+
+ /* If the two states don't even have the same number of performance levels
+ * they cannot be the same state.
+@@ -5128,6 +5170,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+ return -EINVAL;
+
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++ if (vega10_ps == NULL)
++ return -EINVAL;
+
+ vega10_ps->performance_levels
+ [vega10_ps->performance_level_count - 1].gfx_clock =
+@@ -5179,6 +5223,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+ return -EINVAL;
+
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++ if (vega10_ps == NULL)
++ return -EINVAL;
+
+ vega10_ps->performance_levels
+ [vega10_ps->performance_level_count - 1].mem_clock =
+@@ -5420,6 +5466,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
+ return;
+
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++ if (vega10_ps == NULL)
++ return;
++
+ max_level = vega10_ps->performance_level_count - 1;
+
+ if (vega10_ps->performance_levels[max_level].gfx_clock !=
+@@ -5442,6 +5491,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
+
+ ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++ if (vega10_ps == NULL)
++ return;
++
+ max_level = vega10_ps->performance_level_count - 1;
+
+ if (vega10_ps->performance_levels[max_level].gfx_clock !=
+@@ -5632,6 +5684,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
+ return -EINVAL;
+
+ vega10_ps = cast_const_phw_vega10_power_state(state);
++ if (vega10_ps == NULL)
++ return -EINVAL;
+
+ i = index > vega10_ps->performance_level_count - 1 ?
+ vega10_ps->performance_level_count - 1 : index;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+index 460067933de2ef..069c0f5205e004 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+@@ -293,12 +293,12 @@ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t top32, bottom32;
+- int i;
++ int i, ret;
+
+ data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
+ FEATURE_DPM_PREFETCHER_BIT;
+@@ -364,10 +364,16 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ }
+
+ /* Get the SN to turn into a Unique ID */
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++ if (ret)
++ return ret;
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++ if (ret)
++ return ret;
+
+ adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++
++ return 0;
+ }
+
+ static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+@@ -410,7 +416,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+
+ vega12_set_features_platform_caps(hwmgr);
+
+- vega12_init_dpm_defaults(hwmgr);
++ result = vega12_init_dpm_defaults(hwmgr);
++ if (result) {
++ pr_err("%s failed\n", __func__);
++ return result;
++ }
+
+ /* Parse pptable data read from VBIOS */
+ vega12_set_private_data_based_on_pptable(hwmgr);
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index 3b33af30eb0fbc..9fdb9990d18829 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -328,12 +328,12 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+ return 0;
+ }
+
+-static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t top32, bottom32;
+- int i;
++ int i, ret;
+
+ data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
+ FEATURE_DPM_PREFETCHER_BIT;
+@@ -404,10 +404,17 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ }
+
+ /* Get the SN to turn into a Unique ID */
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++ if (ret)
++ return ret;
++
++ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++ if (ret)
++ return ret;
+
+ adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++
++ return 0;
+ }
+
+ static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+@@ -427,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data;
+ struct amdgpu_device *adev = hwmgr->adev;
++ int result;
+
+ data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+@@ -452,8 +460,11 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+
+ vega20_set_features_platform_caps(hwmgr);
+
+- vega20_init_dpm_defaults(hwmgr);
+-
++ result = vega20_init_dpm_defaults(hwmgr);
++ if (result) {
++ pr_err("%s failed\n", __func__);
++ return result;
++ }
+ /* Parse pptable data read from VBIOS */
+ vega20_set_private_data_based_on_pptable(hwmgr);
+
+@@ -4091,9 +4102,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+- if (size == 0 && !data->is_custom_profile_set)
++
++ if (size != 10 && size != 0)
+ return -EINVAL;
+- if (size < 10 && size != 0)
++
++ if (size == 0 && !data->is_custom_profile_set)
+ return -EINVAL;
+
+ result = vega20_get_activity_monitor_coeff(hwmgr,
+@@ -4155,6 +4168,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ activity_monitor.Fclk_PD_Data_error_coeff = input[8];
+ activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ break;
++ default:
++ return -EINVAL;
+ }
+
+ result = vega20_set_activity_monitor_coeff(hwmgr,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+index a70d7389664904..f9c0f117725dd1 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+@@ -130,13 +130,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ uint64_t *features_enabled)
+ {
+ uint32_t enabled_features;
++ int ret;
+
+ if (features_enabled == NULL)
+ return -EINVAL;
+
+- smum_send_msg_to_smc(hwmgr,
++ ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeatures,
+ &enabled_features);
++ if (ret)
++ return ret;
++
+ *features_enabled = enabled_features;
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index f005a90c35af43..4d17b6958397ed 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -24,6 +24,7 @@
+
+ #include <linux/firmware.h>
+ #include <linux/pci.h>
++#include <linux/power_supply.h>
+ #include <linux/reboot.h>
+
+ #include "amdgpu.h"
+@@ -741,16 +742,8 @@ static int smu_late_init(void *handle)
+ * handle the switch automatically. Driver involvement
+ * is unnecessary.
+ */
+- if (!smu->dc_controlled_by_gpio) {
+- ret = smu_set_power_source(smu,
+- adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
+- SMU_POWER_SOURCE_DC);
+- if (ret) {
+- dev_err(adev->dev, "Failed to switch to %s mode!\n",
+- adev->pm.ac_power ? "AC" : "DC");
+- return ret;
+- }
+- }
++ adev->pm.ac_power = power_supply_is_system_supplied() > 0;
++ smu_set_ac_dc(smu);
+
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
+@@ -1232,7 +1225,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
+ {
+ struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
+- uint32_t pcie_gen = 0, pcie_width = 0;
++ uint8_t pcie_gen = 0, pcie_width = 0;
+ uint64_t features_supported;
+ int ret = 0;
+
+@@ -1848,12 +1841,13 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
+ }
+
+ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+- enum amd_dpm_forced_level level,
+- bool skip_display_settings)
++ enum amd_dpm_forced_level level,
++ bool skip_display_settings,
++ bool init)
+ {
+ int ret = 0;
+ int index = 0;
+- long workload;
++ long workload[1];
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!skip_display_settings) {
+@@ -1893,10 +1887,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+- workload = smu->workload_setting[index];
++ workload[0] = smu->workload_setting[index];
+
+- if (smu->power_profile_mode != workload)
+- smu_bump_power_profile_mode(smu, &workload, 0);
++ if (init || smu->power_profile_mode != workload[0])
++ smu_bump_power_profile_mode(smu, workload, 0);
+ }
+
+ return ret;
+@@ -1916,11 +1910,13 @@ static int smu_handle_task(struct smu_context *smu,
+ ret = smu_pre_display_config_changed(smu);
+ if (ret)
+ return ret;
+- ret = smu_adjust_power_state_dynamic(smu, level, false);
++ ret = smu_adjust_power_state_dynamic(smu, level, false, false);
+ break;
+ case AMD_PP_TASK_COMPLETE_INIT:
++ ret = smu_adjust_power_state_dynamic(smu, level, true, true);
++ break;
+ case AMD_PP_TASK_READJUST_POWER_STATE:
+- ret = smu_adjust_power_state_dynamic(smu, level, true);
++ ret = smu_adjust_power_state_dynamic(smu, level, true, false);
+ break;
+ default:
+ break;
+@@ -1946,7 +1942,7 @@ static int smu_switch_power_profile(void *handle,
+ {
+ struct smu_context *smu = handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+- long workload;
++ long workload[1];
+ uint32_t index;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+@@ -1959,17 +1955,17 @@ static int smu_switch_power_profile(void *handle,
+ smu->workload_mask &= ~(1 << smu->workload_prority[type]);
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+- workload = smu->workload_setting[index];
++ workload[0] = smu->workload_setting[index];
+ } else {
+ smu->workload_mask |= (1 << smu->workload_prority[type]);
+ index = fls(smu->workload_mask);
+ index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+- workload = smu->workload_setting[index];
++ workload[0] = smu->workload_setting[index];
+ }
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+- smu_bump_power_profile_mode(smu, &workload, 0);
++ smu_bump_power_profile_mode(smu, workload, 0);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index 5a52098bcf1664..72ed836328966c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -844,7 +844,7 @@ struct pptable_funcs {
+ * &pcie_gen_cap: Maximum allowed PCIe generation.
+ * &pcie_width_cap: Maximum allowed PCIe width.
+ */
+- int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
++ int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
+
+ /**
+ * @i2c_init: Initialize i2c.
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index 355c156d871aff..cc02f979e9e984 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -296,8 +296,8 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ uint32_t pptable_id);
+
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap);
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap);
+
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+index 704a2b577a0e2f..4c58c2cd26d886 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+@@ -2356,8 +2356,8 @@ static uint16_t arcturus_get_current_pcie_link_speed(struct smu_context *smu)
+
+ /* TODO: confirm this on real target */
+ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
+- if ((esm_ctrl >> 15) & 0x1FFFF)
+- return (uint16_t)(((esm_ctrl >> 8) & 0x3F) + 128);
++ if ((esm_ctrl >> 15) & 0x1)
++ return (uint16_t)(((esm_ctrl >> 8) & 0x7F) + 128);
+
+ return smu_v11_0_get_current_pcie_link_speed(smu);
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index 18487ae10bcff4..b1b23233635a64 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -1222,19 +1222,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
+ value);
+ }
+
+-static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
++static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
+ {
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ DpmDescriptor_t *dpm_desc = NULL;
+- uint32_t clk_index = 0;
++ int clk_index = 0;
+
+ clk_index = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_CLK,
+ clk_type);
++ if (clk_index < 0)
++ return clk_index;
++
+ dpm_desc = &pptable->DpmDescriptor[clk_index];
+
+ /* 0 - Fine grained DPM, 1 - Discrete DPM */
+- return dpm_desc->SnapToDiscrete == 0;
++ return dpm_desc->SnapToDiscrete == 0 ? 1 : 0;
+ }
+
+ static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
+@@ -1290,7 +1293,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu,
+ if (ret)
+ return ret;
+
+- if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++ ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++ if (ret < 0)
++ return ret;
++
++ if (!ret) {
+ for (i = 0; i < count; i++) {
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type, i, &value);
+@@ -1499,7 +1506,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
+ if (ret)
+ return size;
+
+- if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++ ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++ if (ret < 0)
++ return ret;
++
++ if (!ret) {
+ for (i = 0; i < count; i++) {
+ ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ if (ret)
+@@ -1668,7 +1679,11 @@ static int navi10_force_clk_levels(struct smu_context *smu,
+ case SMU_UCLK:
+ case SMU_FCLK:
+ /* There is only 2 levels for fine grained DPM */
+- if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
++ ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
++ if (ret < 0)
++ return ret;
++
++ if (ret) {
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ }
+@@ -2376,8 +2391,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
+ }
+
+ static int navi10_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index da2860da60188e..a7f4f82d23b4b9 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2085,14 +2085,14 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ #define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+ uint8_t *table_member1, *table_member2;
+- uint32_t min_gen_speed, max_gen_speed;
+- uint32_t min_lane_width, max_lane_width;
++ uint8_t min_gen_speed, max_gen_speed;
++ uint8_t min_lane_width, max_lane_width;
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+@@ -2108,7 +2108,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ min_lane_width = min_lane_width > max_lane_width ?
+ max_lane_width : min_lane_width;
+
+- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ pcie_table->pcie_gen[0] = max_gen_speed;
+ pcie_table->pcie_lane[0] = max_lane_width;
+ } else {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index aa4a5498a12f73..123c19bb622808 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -1441,10 +1441,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
+ case 0x3:
+ dev_dbg(adev->dev, "Switched to AC mode!\n");
+ schedule_work(&smu->interrupt_work);
++ adev->pm.ac_power = true;
+ break;
+ case 0x4:
+ dev_dbg(adev->dev, "Switched to DC mode!\n");
+ schedule_work(&smu->interrupt_work);
++ adev->pm.ac_power = false;
+ break;
+ case 0x7:
+ /*
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index 201cec59984281..f46cda88948312 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -1009,6 +1009,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
+ }
+ }
+ if (min) {
++ ret = vangogh_get_profiling_clk_mask(smu,
++ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
++ NULL,
++ NULL,
++ &mclk_mask,
++ &fclk_mask,
++ &soc_mask);
++ if (ret)
++ goto failed;
++
++ vclk_mask = dclk_mask = 0;
++
+ switch (clk_type) {
+ case SMU_UCLK:
+ case SMU_MCLK:
+@@ -2481,6 +2493,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
+ start, &residency);
++ if (ret)
++ return ret;
+
+ if (!start)
+ adev->gfx.gfx_off_residency = residency;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+index cc3169400c9b08..ded8952d984907 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+@@ -257,8 +257,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
+ }
+
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+- if (!smu_table->ecc_table)
++ if (!smu_table->ecc_table) {
++ kfree(smu_table->metrics_table);
++ kfree(smu_table->gpu_metrics_table);
+ return -ENOMEM;
++ }
+
+ return 0;
+ }
+@@ -1717,8 +1720,8 @@ static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
+
+ /* TODO: confirm this on real target */
+ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
+- if ((esm_ctrl >> 15) & 0x1FFFF)
+- return (((esm_ctrl >> 8) & 0x3F) + 128);
++ if ((esm_ctrl >> 15) & 0x1)
++ return (((esm_ctrl >> 8) & 0x7F) + 128);
+
+ return smu_v13_0_get_current_pcie_link_speed(smu);
+ }
+@@ -1928,7 +1931,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
+-
++ if (index < 0 )
++ return -EINVAL;
+ mutex_lock(&smu->message_lock);
+ if (smu_version >= 0x00441400) {
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 0232adb95df3a8..c0adfa46ac7896 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -79,8 +79,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
+ #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
+ #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
+ #define smnPCIE_LC_SPEED_CNTL 0x11140290
+-#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
+-#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
++#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
++#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
+
+ static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+
+@@ -1377,10 +1377,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
+ case 0x3:
+ dev_dbg(adev->dev, "Switched to AC mode!\n");
+ smu_v13_0_ack_ac_dc_interrupt(smu);
++ adev->pm.ac_power = true;
+ break;
+ case 0x4:
+ dev_dbg(adev->dev, "Switched to DC mode!\n");
+ smu_v13_0_ack_ac_dc_interrupt(smu);
++ adev->pm.ac_power = false;
+ break;
+ case 0x7:
+ /*
+@@ -2420,8 +2422,8 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ }
+
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+@@ -2430,7 +2432,10 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++ if (!num_of_levels)
++ return 0;
++
++ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 3903a47669e437..4022dd44ebb2b3 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -352,12 +352,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
+- powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
+ smu_baco->platform_support = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+- smu_baco->maco_support = true;
++ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++ smu_baco->maco_support = true;
++ }
+
+ /*
+ * We are in the transition to a new OD mechanism.
+@@ -2163,38 +2163,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ }
+ }
+
+- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+- (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
+- ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
+- ret = smu_cmn_update_table(smu,
+- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+- WORKLOAD_PPLIB_COMPUTE_BIT,
+- (void *)(&activity_monitor_external),
+- false);
+- if (ret) {
+- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+- return ret;
+- }
+-
+- ret = smu_cmn_update_table(smu,
+- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+- WORKLOAD_PPLIB_CUSTOM_BIT,
+- (void *)(&activity_monitor_external),
+- true);
+- if (ret) {
+- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+- return ret;
+- }
+-
+- workload_type = smu_cmn_to_asic_specific_index(smu,
+- CMN2ASIC_MAPPING_WORKLOAD,
+- PP_SMC_POWER_PROFILE_CUSTOM);
+- } else {
+- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+- workload_type = smu_cmn_to_asic_specific_index(smu,
++ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ smu->power_profile_mode);
+- }
+
+ if (workload_type < 0)
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+index 626591f54bc497..1fd4702dc63936 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -226,8 +226,20 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+- if (!en && !adev->in_s0ix)
++ if (!en && !adev->in_s0ix) {
++ if (adev->in_s4) {
++ /* Adds a GFX reset as workaround just before sending the
++ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
++ * an invalid state.
++ */
++ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
++ SMU_RESET_MODE_2, NULL);
++ if (ret)
++ return ret;
++ }
++
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+index de80e191a92c49..44c5f8585f1ee7 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+@@ -1941,8 +1941,8 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
+
+ /* TODO: confirm this on real target */
+ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
+- if ((esm_ctrl >> 15) & 0x1FFFF)
+- return (((esm_ctrl >> 8) & 0x3F) + 128);
++ if ((esm_ctrl >> 15) & 0x1)
++ return (((esm_ctrl >> 8) & 0x7F) + 128);
+
+ speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+@@ -1968,8 +1968,10 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
+
+ metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+- if (ret)
++ if (ret) {
++ kfree(metrics);
+ return ret;
++ }
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+
+@@ -2037,6 +2039,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
+ return sizeof(struct gpu_metrics_v1_3);
+ }
+
++static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
++{
++ struct amdgpu_device *adev = smu->adev;
++ int i;
++
++ for (i = 0; i < 16; i++)
++ pci_write_config_dword(adev->pdev, i * 4,
++ adev->pdev->saved_config_space[i]);
++ pci_restore_msi_state(adev->pdev);
++}
++
+ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
+ {
+ int ret = 0, index;
+@@ -2045,6 +2058,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
++ if (index < 0)
++ return index;
+
+ mutex_lock(&smu->message_lock);
+
+@@ -2058,6 +2073,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
+ /* Restore the config space saved during init */
+ amdgpu_device_load_pci_state(adev->pdev);
+
++ /* Certain platforms have switches which assign virtual BAR values to
++ * devices. OS uses the virtual BAR values and device behind the switch
++ * is assgined another BAR value. When device's config space registers
++ * are queried, switch returns the virtual BAR values. When mode-2 reset
++ * is performed, switch is unaware of it, and will continue to return
++ * the same virtual values to the OS.This affects
++ * pci_restore_config_space() API as it doesn't write the value saved if
++ * the current value read from config space is the same as what is
++ * saved. As a workaround, make sure the config space is restored
++ * always.
++ */
++ if (!(adev->flags & AMD_IS_APU))
++ smu_v13_0_6_restore_pci_config(smu);
++
+ dev_dbg(smu->adev->dev, "wait for reset ack\n");
+ do {
+ ret = smu_cmn_wait_for_response(smu);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 94ef5b4d116d7c..51ae41cb43ea0e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -341,12 +341,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
+ if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO ||
+- powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++ if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
+ smu_baco->platform_support = true;
+
+- if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+- smu_baco->maco_support = true;
++ if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++ && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
++ smu_baco->maco_support = true;
++ }
+
+ #if 0
+ if (!overdrive_lowerlimits->FeatureCtrlMask ||
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+index 2c661f28410eda..b645c5998230b0 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+@@ -5,6 +5,7 @@
+ *
+ */
+ #include <linux/clk.h>
++#include <linux/of.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/spinlock.h>
+
+@@ -610,12 +611,34 @@ get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc)
+ return NULL;
+ }
+
++static int komeda_attach_bridge(struct device *dev,
++ struct komeda_pipeline *pipe,
++ struct drm_encoder *encoder)
++{
++ struct drm_bridge *bridge;
++ int err;
++
++ bridge = devm_drm_of_get_bridge(dev, pipe->of_node,
++ KOMEDA_OF_PORT_OUTPUT, 0);
++ if (IS_ERR(bridge))
++ return dev_err_probe(dev, PTR_ERR(bridge), "remote bridge not found for pipe: %s\n",
++ of_node_full_name(pipe->of_node));
++
++ err = drm_bridge_attach(encoder, bridge, NULL, 0);
++ if (err)
++ dev_err(dev, "bridge_attach() failed for pipe: %s\n",
++ of_node_full_name(pipe->of_node));
++
++ return err;
++}
++
+ static int komeda_crtc_add(struct komeda_kms_dev *kms,
+ struct komeda_crtc *kcrtc)
+ {
+ struct drm_crtc *crtc = &kcrtc->base;
+ struct drm_device *base = &kms->base;
+- struct drm_bridge *bridge;
++ struct komeda_pipeline *pipe = kcrtc->master;
++ struct drm_encoder *encoder = &kcrtc->encoder;
+ int err;
+
+ err = drm_crtc_init_with_planes(base, crtc,
+@@ -626,27 +649,25 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
+
+ drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs);
+
+- crtc->port = kcrtc->master->of_output_port;
++ crtc->port = pipe->of_output_port;
+
+ /* Construct an encoder for each pipeline and attach it to the remote
+ * bridge
+ */
+ kcrtc->encoder.possible_crtcs = drm_crtc_mask(crtc);
+- err = drm_simple_encoder_init(base, &kcrtc->encoder,
+- DRM_MODE_ENCODER_TMDS);
++ err = drm_simple_encoder_init(base, encoder, DRM_MODE_ENCODER_TMDS);
+ if (err)
+ return err;
+
+- bridge = devm_drm_of_get_bridge(base->dev, kcrtc->master->of_node,
+- KOMEDA_OF_PORT_OUTPUT, 0);
+- if (IS_ERR(bridge))
+- return PTR_ERR(bridge);
+-
+- err = drm_bridge_attach(&kcrtc->encoder, bridge, NULL, 0);
++ if (pipe->of_output_links[0]) {
++ err = komeda_attach_bridge(base->dev, pipe, encoder);
++ if (err)
++ return err;
++ }
+
+ drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE);
+
+- return err;
++ return 0;
+ }
+
+ int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev)
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+index 9299026701f348..1a5fa7df284dec 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+@@ -160,6 +160,7 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
+ struct drm_plane *plane;
+ struct list_head zorder_list;
+ int order = 0, err;
++ u32 slave_zpos = 0;
+
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
+ crtc->base.id, crtc->name);
+@@ -199,10 +200,13 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
+ plane_st->zpos, plane_st->normalized_zpos);
+
+ /* calculate max slave zorder */
+- if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
++ if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) {
++ slave_zpos = plane_st->normalized_zpos;
++ if (to_kplane_st(plane_st)->layer_split)
++ slave_zpos++;
+ kcrtc_st->max_slave_zorder =
+- max(plane_st->normalized_zpos,
+- kcrtc_st->max_slave_zorder);
++ max(slave_zpos, kcrtc_st->max_slave_zorder);
++ }
+ }
+
+ crtc_st->zpos_changed = true;
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index 4618687a8f4d64..f4e76b46ca327a 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
+ u32 avail_scalers;
+
+ pipe_st = komeda_pipeline_get_state(c->pipeline, state);
+- if (!pipe_st)
++ if (IS_ERR_OR_NULL(pipe_st))
+ return NULL;
+
+ avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
+@@ -1223,7 +1223,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ return 0;
+ }
+
+-static void
++static int
+ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ struct komeda_pipeline_state *new)
+ {
+@@ -1243,8 +1243,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = komeda_component_get_state_and_set_user(c,
+ drm_st, NULL, new->crtc);
++ if (PTR_ERR(c_st) == -EDEADLK)
++ return -EDEADLK;
+ WARN_ON(IS_ERR(c_st));
+ }
++
++ return 0;
+ }
+
+ /* release unclaimed pipeline resource */
+@@ -1266,9 +1270,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ if (WARN_ON(IS_ERR_OR_NULL(st)))
+ return -EINVAL;
+
+- komeda_pipeline_unbound_components(pipe, st);
++ return komeda_pipeline_unbound_components(pipe, st);
+
+- return 0;
+ }
+
+ /* Since standalone disabled components must be disabled separately and in the
+diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
+index 626709bec6f5ff..2577f0cef8fcda 100644
+--- a/drivers/gpu/drm/arm/malidp_mw.c
++++ b/drivers/gpu/drm/arm/malidp_mw.c
+@@ -72,7 +72,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(connector->state);
+- __drm_atomic_helper_connector_reset(connector, &mw_state->base);
++ connector->state = NULL;
++
++ if (mw_state)
++ __drm_atomic_helper_connector_reset(connector, &mw_state->base);
+ }
+
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+index d207b03f8357c7..78122b35a0cbb3 100644
+--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
++++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+@@ -358,11 +358,18 @@ static void aspeed_gfx_remove(struct platform_device *pdev)
+ sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
+ drm_dev_unregister(drm);
+ aspeed_gfx_unload(drm);
++ drm_atomic_helper_shutdown(drm);
++}
++
++static void aspeed_gfx_shutdown(struct platform_device *pdev)
++{
++ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+ }
+
+ static struct platform_driver aspeed_gfx_platform_driver = {
+ .probe = aspeed_gfx_probe,
+ .remove_new = aspeed_gfx_remove,
++ .shutdown = aspeed_gfx_shutdown,
+ .driver = {
+ .name = "aspeed_gfx",
+ .of_match_table = aspeed_gfx_match,
+diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
+index fdd9a493aa9c08..c6f226b6f08136 100644
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
+ {
+ struct ast_device *ast = to_ast_device(dev);
+ u8 video_on_off = on;
++ u32 i = 0;
+
+ // Video On/Off
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
+@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
+ ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
+ // wait 1 ms
+ mdelay(1);
++ if (++i > 200)
++ break;
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index 848a9f1403e896..f7053f2972bb92 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -172,6 +172,17 @@ to_ast_sil164_connector(struct drm_connector *connector)
+ return container_of(connector, struct ast_sil164_connector, base);
+ }
+
++struct ast_bmc_connector {
++ struct drm_connector base;
++ struct drm_connector *physical_connector;
++};
++
++static inline struct ast_bmc_connector *
++to_ast_bmc_connector(struct drm_connector *connector)
++{
++ return container_of(connector, struct ast_bmc_connector, base);
++}
++
+ /*
+ * Device
+ */
+@@ -216,7 +227,7 @@ struct ast_device {
+ } astdp;
+ struct {
+ struct drm_encoder encoder;
+- struct drm_connector connector;
++ struct ast_bmc_connector bmc_connector;
+ } bmc;
+ } output;
+
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 32f04ec6c386fa..3de0f457fff6ab 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -1767,6 +1767,30 @@ static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+ };
+
++static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
++ struct drm_modeset_acquire_ctx *ctx,
++ bool force)
++{
++ struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector);
++ struct drm_connector *physical_connector = bmc_connector->physical_connector;
++
++ /*
++ * Most user-space compositors cannot handle more than one connected
++ * connector per CRTC. Hence, we only mark the BMC as connected if the
++ * physical connector is disconnected. If the physical connector's status
++ * is connected or unknown, the BMC remains disconnected. This has no
++ * effect on the output of the BMC.
++ *
++ * FIXME: Remove this logic once user-space compositors can handle more
++ * than one connector per CRTC. The BMC should always be connected.
++ */
++
++ if (physical_connector && physical_connector->status == connector_status_disconnected)
++ return connector_status_connected;
++
++ return connector_status_disconnected;
++}
++
+ static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
+ {
+ return drm_add_modes_noedid(connector, 4096, 4096);
+@@ -1774,6 +1798,7 @@ static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
+
+ static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
+ .get_modes = ast_bmc_connector_helper_get_modes,
++ .detect_ctx = ast_bmc_connector_helper_detect_ctx,
+ };
+
+ static const struct drm_connector_funcs ast_bmc_connector_funcs = {
+@@ -1784,12 +1809,33 @@ static const struct drm_connector_funcs ast_bmc_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ };
+
+-static int ast_bmc_output_init(struct ast_device *ast)
++static int ast_bmc_connector_init(struct drm_device *dev,
++ struct ast_bmc_connector *bmc_connector,
++ struct drm_connector *physical_connector)
++{
++ struct drm_connector *connector = &bmc_connector->base;
++ int ret;
++
++ ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
++ DRM_MODE_CONNECTOR_VIRTUAL);
++ if (ret)
++ return ret;
++
++ drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
++
++ bmc_connector->physical_connector = physical_connector;
++
++ return 0;
++}
++
++static int ast_bmc_output_init(struct ast_device *ast,
++ struct drm_connector *physical_connector)
+ {
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.bmc.encoder;
+- struct drm_connector *connector = &ast->output.bmc.connector;
++ struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector;
++ struct drm_connector *connector = &bmc_connector->base;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder,
+@@ -1799,13 +1845,10 @@ static int ast_bmc_output_init(struct ast_device *ast)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+- ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
+- DRM_MODE_CONNECTOR_VIRTUAL);
++ ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector);
+ if (ret)
+ return ret;
+
+- drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
+-
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+@@ -1864,6 +1907,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
+ int ast_mode_config_init(struct ast_device *ast)
+ {
+ struct drm_device *dev = &ast->base;
++ struct drm_connector *physical_connector = NULL;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+@@ -1904,23 +1948,27 @@ int ast_mode_config_init(struct ast_device *ast)
+ ret = ast_vga_output_init(ast);
+ if (ret)
+ return ret;
++ physical_connector = &ast->output.vga.vga_connector.base;
+ }
+ if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
+ ret = ast_sil164_output_init(ast);
+ if (ret)
+ return ret;
++ physical_connector = &ast->output.sil164.sil164_connector.base;
+ }
+ if (ast->tx_chip_types & AST_TX_DP501_BIT) {
+ ret = ast_dp501_output_init(ast);
+ if (ret)
+ return ret;
++ physical_connector = &ast->output.dp501.connector;
+ }
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
+ ret = ast_astdp_output_init(ast);
+ if (ret)
+ return ret;
++ physical_connector = &ast->output.astdp.connector;
+ }
+- ret = ast_bmc_output_init(ast);
++ ret = ast_bmc_output_init(ast, physical_connector);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index 44a660a4bdbfc4..3e6a4e2044c0eb 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -181,6 +181,7 @@ config DRM_NWL_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
++ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ select MFD_SYSCON
+ select MULTIPLEXER
+@@ -227,6 +228,7 @@ config DRM_SAMSUNG_DSIM
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
++ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ The Samsung MIPI DSIM bridge controller driver.
+@@ -311,6 +313,7 @@ config DRM_TOSHIBA_TC358768
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
++ select VIDEOMODE_HELPERS
+ help
+ Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index 2611afd2c1c136..ef2b6ce544d0a8 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -1291,17 +1291,6 @@ static int adv7511_probe(struct i2c_client *i2c)
+
+ INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
+- if (i2c->irq) {
+- init_waitqueue_head(&adv7511->wq);
+-
+- ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+- adv7511_irq_handler,
+- IRQF_ONESHOT, dev_name(dev),
+- adv7511);
+- if (ret)
+- goto err_unregister_cec;
+- }
+-
+ adv7511_power_off(adv7511);
+
+ i2c_set_clientdata(i2c, adv7511);
+@@ -1325,6 +1314,17 @@ static int adv7511_probe(struct i2c_client *i2c)
+
+ adv7511_audio_init(dev, adv7511);
+
++ if (i2c->irq) {
++ init_waitqueue_head(&adv7511->wq);
++
++ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
++ adv7511_irq_handler,
++ IRQF_ONESHOT, dev_name(dev),
++ adv7511);
++ if (ret)
++ goto err_unregister_audio;
++ }
++
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535) {
+ ret = adv7533_attach_dsi(adv7511);
+ if (ret)
+diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+index 6a4f20fccf8417..7b0bc9704eacb1 100644
+--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+@@ -1027,7 +1027,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ u32 status_reg;
+ u8 *buffer = msg->buffer;
+ unsigned int i;
+- int num_transferred = 0;
+ int ret;
+
+ /* Buffer size of AUX CH is 16 bytes */
+@@ -1079,7 +1078,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ reg = buffer[i];
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+ 4 * i);
+- num_transferred++;
+ }
+ }
+
+@@ -1127,7 +1125,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+ 4 * i);
+ buffer[i] = (unsigned char)reg;
+- num_transferred++;
+ }
+ }
+
+@@ -1144,7 +1141,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+
+- return num_transferred > 0 ? num_transferred : -EBUSY;
++ return msg->size;
+
+ aux_error:
+ /* if aux err happen, reset aux */
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index 51abe42c639e54..c1191ef5e8e679 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -1298,10 +1298,32 @@ static void anx7625_config(struct anx7625_data *ctx)
+ XTAL_FRQ_SEL, XTAL_FRQ_27M);
+ }
+
++static int anx7625_hpd_timer_config(struct anx7625_data *ctx)
++{
++ int ret;
++
++ /* Set irq detect window to 2ms */
++ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
++ HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
++ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
++ HPD_DET_TIMER_BIT8_15,
++ (HPD_TIME >> 8) & 0xFF);
++ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
++ HPD_DET_TIMER_BIT16_23,
++ (HPD_TIME >> 16) & 0xFF);
++
++ return ret;
++}
++
++static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx)
++{
++ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, GPIO_CTRL_2);
++}
++
+ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
+ {
+ struct device *dev = ctx->dev;
+- int ret;
++ int ret, val;
+
+ /* Reset main ocm */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
+@@ -1315,6 +1337,19 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
+ DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
+ else
+ DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
++
++ /*
++ * Make sure the HPD GPIO already be configured after OCM release before
++ * setting HPD detect window register. Here we poll the status register
++ * at maximum 40ms, then config HPD irq detect window register
++ */
++ readx_poll_timeout(anx7625_read_hpd_gpio_config_status,
++ ctx, val,
++ ((val & HPD_SOURCE) || (val < 0)),
++ 2000, 2000 * 20);
++
++ /* Set HPD irq detect window to 2ms */
++ anx7625_hpd_timer_config(ctx);
+ }
+
+ static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
+@@ -1437,20 +1472,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
+
+ static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
+ {
+- int ret;
+-
+- /* Set irq detect window to 2ms */
+- ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+- HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
+- ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+- HPD_DET_TIMER_BIT8_15,
+- (HPD_TIME >> 8) & 0xFF);
+- ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+- HPD_DET_TIMER_BIT16_23,
+- (HPD_TIME >> 16) & 0xFF);
+- if (ret < 0)
+- return ret;
+-
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
+ }
+
+@@ -1741,6 +1762,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
+ u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ int ret = 0;
+
++ mutex_lock(&ctx->aux_lock);
+ pm_runtime_get_sync(dev);
+ msg->reply = 0;
+ switch (request) {
+@@ -1757,6 +1779,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
+ msg->size, msg->buffer);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
++ mutex_unlock(&ctx->aux_lock);
+
+ return ret;
+ }
+@@ -2053,10 +2076,8 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
+ };
+
+ host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
+- if (!host) {
+- DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
+- return -EPROBE_DEFER;
+- }
++ if (!host)
++ return dev_err_probe(dev, -EPROBE_DEFER, "fail to find dsi host.\n");
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi)) {
+@@ -2453,18 +2474,27 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
+ ctx->connector = NULL;
+ anx7625_dp_stop(ctx);
+
+- pm_runtime_put_sync(dev);
++ mutex_lock(&ctx->aux_lock);
++ pm_runtime_put_sync_suspend(dev);
++ mutex_unlock(&ctx->aux_lock);
+ }
+
++static void
++anx7625_audio_update_connector_status(struct anx7625_data *ctx,
++ enum drm_connector_status status);
++
+ static enum drm_connector_status
+ anx7625_bridge_detect(struct drm_bridge *bridge)
+ {
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
++ enum drm_connector_status status;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
+
+- return anx7625_sink_detect(ctx);
++ status = anx7625_sink_detect(ctx);
++ anx7625_audio_update_connector_status(ctx, status);
++ return status;
+ }
+
+ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
+@@ -2647,6 +2677,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+
+ mutex_init(&platform->lock);
+ mutex_init(&platform->hdcp_wq_lock);
++ mutex_init(&platform->aux_lock);
+
+ INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
+ platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
+index 5af819611ebce8..39ed35d3383633 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
+@@ -259,6 +259,10 @@
+ #define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */
+ #define AP_DISABLE_PD BIT(6)
+ #define AP_DISABLE_DISPLAY BIT(7)
++
++#define GPIO_CTRL_2 0x49
++#define HPD_SOURCE BIT(6)
++
+ /***************************************************************/
+ /* Register definition of device address 0x84 */
+ #define MIPI_PHY_CONTROL_3 0x03
+@@ -471,6 +475,8 @@ struct anx7625_data {
+ struct workqueue_struct *hdcp_workqueue;
+ /* Lock for hdcp work queue */
+ struct mutex hdcp_wq_lock;
++ /* Lock for aux transfer and disable */
++ struct mutex aux_lock;
+ char edid_block;
+ struct display_timing dt;
+ u8 display_timing_valid;
+diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
+index ec35215a20034c..cced81633ddcda 100644
+--- a/drivers/gpu/drm/bridge/cadence/Kconfig
++++ b/drivers/gpu/drm/bridge/cadence/Kconfig
+@@ -4,6 +4,7 @@ config DRM_CDNS_DSI
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
++ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ depends on OF
+ help
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+index 6af565ac307ae3..858f5b6508491f 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+@@ -2057,6 +2057,9 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
+ mhdp_state = to_cdns_mhdp_bridge_state(new_state);
+
+ mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
++ if (!mhdp_state->current_mode)
++ return;
++
+ drm_mode_set_name(mhdp_state->current_mode);
+
+ dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+index 946212a9559814..5e3b8edcf79487 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+@@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
+
+ static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
+ {
+- int ret, tries = 3;
++ int ret = -EINVAL;
++ int tries = 3;
+ u32 i;
+
+ for (i = 0; i < tries; i++) {
+diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
+index d205e755e524ae..5e295f86f2a73f 100644
+--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
++++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
+@@ -563,10 +563,8 @@ static int chipone_dsi_host_attach(struct chipone *icn)
+
+ host = of_find_mipi_dsi_host_by_node(host_node);
+ of_node_put(host_node);
+- if (!host) {
+- dev_err(dev, "failed to find dsi host\n");
+- return -EPROBE_DEFER;
+- }
++ if (!host)
++ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index 2f300f5ca051cc..4ad527fe04f27e 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -1306,9 +1306,15 @@ static void it6505_video_reset(struct it6505 *it6505)
+ it6505_link_reset_step_train(it6505);
+ it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE);
+ it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00);
+- it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET);
++
++ it6505_set_bits(it6505, REG_VID_BUS_CTRL1, TX_FIFO_RESET, TX_FIFO_RESET);
++ it6505_set_bits(it6505, REG_VID_BUS_CTRL1, TX_FIFO_RESET, 0x00);
++
+ it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, RST_501_FIFO);
+ it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, 0x00);
++
++ it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET);
++ usleep_range(1000, 2000);
+ it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, 0x00);
+ }
+
+@@ -2240,14 +2246,15 @@ static void it6505_link_training_work(struct work_struct *work)
+ ret = it6505_link_start_auto_train(it6505);
+ DRM_DEV_DEBUG_DRIVER(dev, "auto train %s, auto_train_retry: %d",
+ ret ? "pass" : "failed", it6505->auto_train_retry);
+- it6505->auto_train_retry--;
+
+ if (ret) {
++ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+ it6505_link_train_ok(it6505);
+- return;
++ } else {
++ it6505->auto_train_retry--;
++ it6505_dump(it6505);
+ }
+
+- it6505_dump(it6505);
+ }
+
+ static void it6505_plugged_status_to_codec(struct it6505 *it6505)
+@@ -2468,31 +2475,53 @@ static void it6505_irq_link_train_fail(struct it6505 *it6505)
+ schedule_work(&it6505->link_works);
+ }
+
+-static void it6505_irq_video_fifo_error(struct it6505 *it6505)
++static bool it6505_test_bit(unsigned int bit, const unsigned int *addr)
+ {
+- struct device *dev = it6505->dev;
+-
+- DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt");
+- it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+- flush_work(&it6505->link_works);
+- it6505_stop_hdcp(it6505);
+- it6505_video_reset(it6505);
++ return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE));
+ }
+
+-static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505)
++static void it6505_irq_video_handler(struct it6505 *it6505, const int *int_status)
+ {
+ struct device *dev = it6505->dev;
++ int reg_0d, reg_int03;
+
+- DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt");
+- it6505->auto_train_retry = AUTO_TRAIN_RETRY;
+- flush_work(&it6505->link_works);
+- it6505_stop_hdcp(it6505);
+- it6505_video_reset(it6505);
+-}
++ /*
++ * When video SCDT change with video not stable,
++ * Or video FIFO error, need video reset
++ */
+
+-static bool it6505_test_bit(unsigned int bit, const unsigned int *addr)
+-{
+- return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE));
++ if ((!it6505_get_video_status(it6505) &&
++ (it6505_test_bit(INT_SCDT_CHANGE, (unsigned int *)int_status))) ||
++ (it6505_test_bit(BIT_INT_IO_FIFO_OVERFLOW,
++ (unsigned int *)int_status)) ||
++ (it6505_test_bit(BIT_INT_VID_FIFO_ERROR,
++ (unsigned int *)int_status))) {
++ it6505->auto_train_retry = AUTO_TRAIN_RETRY;
++ flush_work(&it6505->link_works);
++ it6505_stop_hdcp(it6505);
++ it6505_video_reset(it6505);
++
++ usleep_range(10000, 11000);
++
++ /*
++ * Clear FIFO error IRQ to prevent fifo error -> reset loop
++ * HW will trigger SCDT change IRQ again when video stable
++ */
++
++ reg_int03 = it6505_read(it6505, INT_STATUS_03);
++ reg_0d = it6505_read(it6505, REG_SYSTEM_STS);
++
++ reg_int03 &= (BIT(INT_VID_FIFO_ERROR) | BIT(INT_IO_LATCH_FIFO_OVERFLOW));
++ it6505_write(it6505, INT_STATUS_03, reg_int03);
++
++ DRM_DEV_DEBUG_DRIVER(dev, "reg08 = 0x%02x", reg_int03);
++ DRM_DEV_DEBUG_DRIVER(dev, "reg0D = 0x%02x", reg_0d);
++
++ return;
++ }
++
++ if (it6505_test_bit(INT_SCDT_CHANGE, (unsigned int *)int_status))
++ it6505_irq_scdt(it6505);
+ }
+
+ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
+@@ -2505,15 +2534,12 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
+ } irq_vec[] = {
+ { BIT_INT_HPD, it6505_irq_hpd },
+ { BIT_INT_HPD_IRQ, it6505_irq_hpd_irq },
+- { BIT_INT_SCDT, it6505_irq_scdt },
+ { BIT_INT_HDCP_FAIL, it6505_irq_hdcp_fail },
+ { BIT_INT_HDCP_DONE, it6505_irq_hdcp_done },
+ { BIT_INT_AUX_CMD_FAIL, it6505_irq_aux_cmd_fail },
+ { BIT_INT_HDCP_KSV_CHECK, it6505_irq_hdcp_ksv_check },
+ { BIT_INT_AUDIO_FIFO_ERROR, it6505_irq_audio_fifo_error },
+ { BIT_INT_LINK_TRAIN_FAIL, it6505_irq_link_train_fail },
+- { BIT_INT_VID_FIFO_ERROR, it6505_irq_video_fifo_error },
+- { BIT_INT_IO_FIFO_OVERFLOW, it6505_irq_io_latch_fifo_overflow },
+ };
+ int int_status[3], i;
+
+@@ -2543,6 +2569,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
+ if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
+ irq_vec[i].handler(it6505);
+ }
++ it6505_irq_video_handler(it6505, (unsigned int *)int_status);
+ }
+
+ pm_runtime_put_sync(dev);
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index 466641c77fe911..8f5846b76d5943 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -884,14 +884,14 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
+ mutex_lock(&ctx->lock);
+ ret = it66121_preamble_ddc(ctx);
+ if (ret) {
+- edid = ERR_PTR(ret);
++ edid = NULL;
+ goto out_unlock;
+ }
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+ IT66121_DDC_HEADER_EDID);
+ if (ret) {
+- edid = ERR_PTR(ret);
++ edid = NULL;
+ goto out_unlock;
+ }
+
+@@ -1447,10 +1447,14 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
+ struct it66121_ctx *ctx = dev_get_drvdata(dev);
+
+ mutex_lock(&ctx->lock);
+-
+- memcpy(buf, ctx->connector->eld,
+- min(sizeof(ctx->connector->eld), len));
+-
++ if (!ctx->connector) {
++ /* Pass en empty ELD if connector not available */
++ dev_dbg(dev, "No connector present, passing empty EDID data");
++ memset(buf, 0, len);
++ } else {
++ memcpy(buf, ctx->connector->eld,
++ min(sizeof(ctx->connector->eld), len));
++ }
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index 4eaea67fb71c20..5e43a40a5d5221 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -45,7 +45,6 @@ struct lt8912 {
+
+ u8 data_lanes;
+ bool is_power_on;
+- bool is_attached;
+ };
+
+ static int lt8912_write_init_config(struct lt8912 *lt)
+@@ -412,50 +411,31 @@ static const struct drm_connector_funcs lt8912_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ };
+
+-static enum drm_mode_status
+-lt8912_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
+-{
+- if (mode->clock > 150000)
+- return MODE_CLOCK_HIGH;
+-
+- if (mode->hdisplay > 1920)
+- return MODE_BAD_HVALUE;
+-
+- if (mode->vdisplay > 1080)
+- return MODE_BAD_VVALUE;
+-
+- return MODE_OK;
+-}
+-
+ static int lt8912_connector_get_modes(struct drm_connector *connector)
+ {
+- struct edid *edid;
+- int ret = -1;
+- int num = 0;
++ const struct drm_edid *drm_edid;
+ struct lt8912 *lt = connector_to_lt8912(connector);
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
++ int ret, num;
+
+- edid = drm_bridge_get_edid(lt->hdmi_port, connector);
+- if (edid) {
+- drm_connector_update_edid_property(connector, edid);
+- num = drm_add_edid_modes(connector, edid);
+- } else {
+- return ret;
+- }
++ drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
++ drm_edid_connector_update(connector, drm_edid);
++ if (!drm_edid)
++ return 0;
++
++ num = drm_edid_connector_add_modes(connector);
+
+ ret = drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+- if (ret)
+- num = ret;
++ if (ret < 0)
++ num = 0;
+
+- kfree(edid);
++ drm_edid_free(drm_edid);
+ return num;
+ }
+
+ static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = {
+ .get_modes = lt8912_connector_get_modes,
+- .mode_valid = lt8912_connector_mode_valid,
+ };
+
+ static void lt8912_bridge_mode_set(struct drm_bridge *bridge,
+@@ -486,10 +466,8 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
+ };
+
+ host = of_find_mipi_dsi_host_by_node(lt->host_node);
+- if (!host) {
+- dev_err(dev, "failed to find dsi host\n");
+- return -EPROBE_DEFER;
+- }
++ if (!host)
++ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi)) {
+@@ -559,6 +537,13 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct lt8912 *lt = bridge_to_lt8912(bridge);
+ int ret;
+
++ ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
++ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
++ if (ret < 0) {
++ dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
++ return ret;
++ }
++
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ ret = lt8912_bridge_connector_init(bridge);
+ if (ret) {
+@@ -575,8 +560,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ if (ret)
+ goto error;
+
+- lt->is_attached = true;
+-
+ return 0;
+
+ error:
+@@ -588,15 +571,27 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
+ {
+ struct lt8912 *lt = bridge_to_lt8912(bridge);
+
+- if (lt->is_attached) {
+- lt8912_hard_power_off(lt);
++ lt8912_hard_power_off(lt);
+
+- if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
+- drm_bridge_hpd_disable(lt->hdmi_port);
++ if (lt->connector.dev && lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
++ drm_bridge_hpd_disable(lt->hdmi_port);
++}
+
+- drm_connector_unregister(&lt->connector);
+- drm_connector_cleanup(&lt->connector);
+- }
++static enum drm_mode_status
++lt8912_bridge_mode_valid(struct drm_bridge *bridge,
++ const struct drm_display_info *info,
++ const struct drm_display_mode *mode)
++{
++ if (mode->clock > 150000)
++ return MODE_CLOCK_HIGH;
++
++ if (mode->hdisplay > 1920)
++ return MODE_BAD_HVALUE;
++
++ if (mode->vdisplay > 1080)
++ return MODE_BAD_VVALUE;
++
++ return MODE_OK;
+ }
+
+ static enum drm_connector_status
+@@ -629,6 +624,7 @@ static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge,
+ static const struct drm_bridge_funcs lt8912_bridge_funcs = {
+ .attach = lt8912_bridge_attach,
+ .detach = lt8912_bridge_detach,
++ .mode_valid = lt8912_bridge_mode_valid,
+ .mode_set = lt8912_bridge_mode_set,
+ .enable = lt8912_bridge_enable,
+ .detect = lt8912_bridge_detect,
+@@ -750,7 +746,6 @@ static void lt8912_remove(struct i2c_client *client)
+ {
+ struct lt8912 *lt = i2c_get_clientdata(client);
+
+- lt8912_bridge_detach(&lt->bridge);
+ drm_bridge_remove(&lt->bridge);
+ lt8912_free_i2c(lt);
+ lt8912_put_dt(lt);
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index 9663601ce09818..89bdd938757e11 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -760,10 +760,8 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
+ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+- if (!host) {
+- dev_err(lt9611->dev, "failed to find dsi host\n");
+- return ERR_PTR(-EPROBE_DEFER);
+- }
++ if (!host)
++ return ERR_PTR(dev_err_probe(lt9611->dev, -EPROBE_DEFER, "failed to find dsi host\n"));
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+index 22c84d29c2bc58..c41ffd0bc04941 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+@@ -265,10 +265,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
+ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+- if (!host) {
+- dev_err(dev, "failed to find dsi host\n");
+- return ERR_PTR(-EPROBE_DEFER);
+- }
++ if (!host)
++ return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"));
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi)) {
+@@ -929,9 +927,9 @@ static int lt9611uxc_probe(struct i2c_client *client)
+ init_waitqueue_head(&lt9611uxc->wq);
+ INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
+- ret = devm_request_threaded_irq(dev, client->irq, NULL,
+- lt9611uxc_irq_thread_handler,
+- IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
++ ret = request_threaded_irq(client->irq, NULL,
++ lt9611uxc_irq_thread_handler,
++ IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_disable_regulators;
+@@ -967,6 +965,8 @@ static int lt9611uxc_probe(struct i2c_client *client)
+ return lt9611uxc_audio_init(dev, lt9611uxc);
+
+ err_remove_bridge:
++ free_irq(client->irq, lt9611uxc);
++ cancel_work_sync(&lt9611uxc->work);
+ drm_bridge_remove(&lt9611uxc->bridge);
+
+ err_disable_regulators:
+@@ -983,7 +983,7 @@ static void lt9611uxc_remove(struct i2c_client *client)
+ {
+ struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
+
+- disable_irq(client->irq);
++ free_irq(client->irq, lt9611uxc);
+ cancel_work_sync(&lt9611uxc->work);
+ lt9611uxc_audio_exit(lt9611uxc);
+ drm_bridge_remove(&lt9611uxc->bridge);
+diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
+index d81920227a8aeb..7c0076e499533a 100644
+--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
++++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
+@@ -54,13 +54,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
+ int ret;
+
+ ret = i2c_master_send(ptn_bridge->client, &addr, 1);
+- if (ret <= 0) {
++ if (ret < 0) {
+ DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(ptn_bridge->client, buf, len);
+- if (ret <= 0) {
++ if (ret < 0) {
+ DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
+ return ret;
+ }
+@@ -78,7 +78,7 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
+ buf[1] = val;
+
+ ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
+- if (ret <= 0) {
++ if (ret < 0) {
+ DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
+index 9316384b44745b..a1dd2ead8dcc4d 100644
+--- a/drivers/gpu/drm/bridge/panel.c
++++ b/drivers/gpu/drm/bridge/panel.c
+@@ -360,9 +360,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
+
+ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
+ {
+- struct drm_bridge **bridge = res;
++ struct drm_bridge *bridge = *(struct drm_bridge **)res;
+
+- drm_panel_bridge_remove(*bridge);
++ if (!bridge)
++ return;
++
++ drm_bridge_remove(bridge);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
+index 8161b1a1a4b12f..14d4dcf239da83 100644
+--- a/drivers/gpu/drm/bridge/parade-ps8640.c
++++ b/drivers/gpu/drm/bridge/parade-ps8640.c
+@@ -107,6 +107,7 @@ struct ps8640 {
+ struct device_link *link;
+ bool pre_enabled;
+ bool need_post_hpd_delay;
++ struct mutex aux_lock;
+ };
+
+ static const struct regmap_config ps8640_regmap_config[] = {
+@@ -210,7 +211,7 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
+ struct ps8640 *ps_bridge = aux_to_ps8640(aux);
+ struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL];
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+- unsigned int len = msg->size;
++ size_t len = msg->size;
+ unsigned int data;
+ unsigned int base;
+ int ret;
+@@ -330,11 +331,12 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
+ return ret;
+ }
+
+- buf[i] = data;
++ if (i < msg->size)
++ buf[i] = data;
+ }
+ }
+
+- return len;
++ return min(len, msg->size);
+ }
+
+ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
+@@ -344,11 +346,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+ int ret;
+
++ mutex_lock(&ps_bridge->aux_lock);
+ pm_runtime_get_sync(dev);
++ ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
++ if (ret) {
++ pm_runtime_put_sync_suspend(dev);
++ goto exit;
++ }
+ ret = ps8640_aux_transfer_msg(aux, msg);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
++exit:
++ mutex_unlock(&ps_bridge->aux_lock);
++
+ return ret;
+ }
+
+@@ -469,7 +480,18 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
+ ps_bridge->pre_enabled = false;
+
+ ps8640_bridge_vdo_control(ps_bridge, DISABLE);
++
++ /*
++ * The bridge seems to expect everything to be power cycled at the
++ * disable process, so grab a lock here to make sure
++ * ps8640_aux_transfer() is not holding a runtime PM reference and
++ * preventing the bridge from suspend.
++ */
++ mutex_lock(&ps_bridge->aux_lock);
++
+ pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
++
++ mutex_unlock(&ps_bridge->aux_lock);
+ }
+
+ static int ps8640_bridge_attach(struct drm_bridge *bridge,
+@@ -618,6 +640,8 @@ static int ps8640_probe(struct i2c_client *client)
+ if (!ps_bridge)
+ return -ENOMEM;
+
++ mutex_init(&ps_bridge->aux_lock);
++
+ ps_bridge->supplies[0].supply = "vdd12";
+ ps_bridge->supplies[1].supply = "vdd33";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
+diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
+index cf777bdb25d2a4..f24666b4819384 100644
+--- a/drivers/gpu/drm/bridge/samsung-dsim.c
++++ b/drivers/gpu/drm/bridge/samsung-dsim.c
+@@ -385,7 +385,7 @@ static const unsigned int imx8mm_dsim_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+- [PHYCTRL_ULPS_EXIT] = 0,
++ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
+@@ -413,6 +413,7 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
+ .m_min = 41,
+ .m_max = 125,
+ .min_freq = 500,
++ .has_broken_fifoctrl_emptyhdr = 1,
+ };
+
+ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+@@ -429,6 +430,7 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
+ .m_min = 41,
+ .m_max = 125,
+ .min_freq = 500,
++ .has_broken_fifoctrl_emptyhdr = 1,
+ };
+
+ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
+@@ -939,10 +941,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
+ reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+ reg &= ~DSIM_STOP_STATE_CNT_MASK;
+ reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
+-
+- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
+- reg |= DSIM_FORCE_STOP_STATE;
+-
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+
+ reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
+@@ -1010,8 +1008,20 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
+ do {
+ u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
+
+- if (reg & DSIM_SFR_HEADER_EMPTY)
+- return 0;
++ if (!dsi->driver_data->has_broken_fifoctrl_emptyhdr) {
++ if (reg & DSIM_SFR_HEADER_EMPTY)
++ return 0;
++ } else {
++ if (!(reg & DSIM_SFR_HEADER_FULL)) {
++ /*
++ * Wait a little bit, so the pending data can
++ * actually leave the FIFO to avoid overflow.
++ */
++ if (!cond_resched())
++ usleep_range(950, 1050);
++ return 0;
++ }
++ }
+
+ if (!cond_resched())
+ usleep_range(950, 1050);
+@@ -1387,18 +1397,6 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
+ disable_irq(dsi->irq);
+ }
+
+-static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
+-{
+- u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+-
+- if (enable)
+- reg |= DSIM_FORCE_STOP_STATE;
+- else
+- reg &= ~DSIM_FORCE_STOP_STATE;
+-
+- samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+-}
+-
+ static int samsung_dsim_init(struct samsung_dsim *dsi)
+ {
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
+@@ -1448,9 +1446,6 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
+ ret = samsung_dsim_init(dsi);
+ if (ret)
+ return;
+-
+- samsung_dsim_set_display_mode(dsi);
+- samsung_dsim_set_display_enable(dsi, true);
+ }
+ }
+
+@@ -1459,12 +1454,8 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
+ {
+ struct samsung_dsim *dsi = bridge_to_dsi(bridge);
+
+- if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
+- samsung_dsim_set_display_mode(dsi);
+- samsung_dsim_set_display_enable(dsi, true);
+- } else {
+- samsung_dsim_set_stop_state(dsi, false);
+- }
++ samsung_dsim_set_display_mode(dsi);
++ samsung_dsim_set_display_enable(dsi, true);
+
+ dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
+ }
+@@ -1477,9 +1468,6 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return;
+
+- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
+- samsung_dsim_set_stop_state(dsi, true);
+-
+ dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
+ }
+
+@@ -1781,8 +1769,6 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
+ if (ret)
+ return ret;
+
+- samsung_dsim_set_stop_state(dsi, false);
+-
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
+index 2bdc5b439bebd5..4560ae9cbce150 100644
+--- a/drivers/gpu/drm/bridge/sii902x.c
++++ b/drivers/gpu/drm/bridge/sii902x.c
+@@ -1080,6 +1080,26 @@ static int sii902x_init(struct sii902x *sii902x)
+ return ret;
+ }
+
++ ret = sii902x_audio_codec_init(sii902x, dev);
++ if (ret)
++ return ret;
++
++ i2c_set_clientdata(sii902x->i2c, sii902x);
++
++ sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
++ 1, 0, I2C_MUX_GATE,
++ sii902x_i2c_bypass_select,
++ sii902x_i2c_bypass_deselect);
++ if (!sii902x->i2cmux) {
++ ret = -ENOMEM;
++ goto err_unreg_audio;
++ }
++
++ sii902x->i2cmux->priv = sii902x;
++ ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
++ if (ret)
++ goto err_unreg_audio;
++
+ sii902x->bridge.funcs = &sii902x_bridge_funcs;
+ sii902x->bridge.of_node = dev->of_node;
+ sii902x->bridge.timings = &default_sii902x_timings;
+@@ -1090,19 +1110,13 @@ static int sii902x_init(struct sii902x *sii902x)
+
+ drm_bridge_add(&sii902x->bridge);
+
+- sii902x_audio_codec_init(sii902x, dev);
+-
+- i2c_set_clientdata(sii902x->i2c, sii902x);
++ return 0;
+
+- sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
+- 1, 0, I2C_MUX_GATE,
+- sii902x_i2c_bypass_select,
+- sii902x_i2c_bypass_deselect);
+- if (!sii902x->i2cmux)
+- return -ENOMEM;
++err_unreg_audio:
++ if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
++ platform_device_unregister(sii902x->audio.pdev);
+
+- sii902x->i2cmux->priv = sii902x;
+- return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
++ return ret;
+ }
+
+ static int sii902x_probe(struct i2c_client *client)
+@@ -1170,12 +1184,14 @@ static int sii902x_probe(struct i2c_client *client)
+ }
+
+ static void sii902x_remove(struct i2c_client *client)
+-
+ {
+ struct sii902x *sii902x = i2c_get_clientdata(client);
+
+- i2c_mux_del_adapters(sii902x->i2cmux);
+ drm_bridge_remove(&sii902x->bridge);
++ i2c_mux_del_adapters(sii902x->i2cmux);
++
++ if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
++ platform_device_unregister(sii902x->audio.pdev);
+ }
+
+ static const struct of_device_id sii902x_dt_ids[] = {
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index b45bffab7c8174..7fd4a5fe03edf6 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -2034,7 +2034,7 @@ static irqreturn_t tc_irq_handler(int irq, void *arg)
+ dev_err(tc->dev, "syserr %x\n", stat);
+ }
+
+- if (tc->hpd_pin >= 0 && tc->bridge.dev) {
++ if (tc->hpd_pin >= 0 && tc->bridge.dev && tc->aux.drm_dev) {
+ /*
+ * H is triggered when the GPIO goes high.
+ *
+@@ -2273,7 +2273,7 @@ static int tc_probe(struct i2c_client *client)
+ } else {
+ if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
+ dev_err(dev, "failed to parse HPD number\n");
+- return ret;
++ return -EINVAL;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
+index 819a4b6ec2a07f..c72d5fbbb0ec40 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -9,12 +9,14 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/kernel.h>
++#include <linux/math64.h>
+ #include <linux/media-bus-format.h>
+ #include <linux/minmax.h>
+ #include <linux/module.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
++#include <linux/units.h>
+
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+@@ -156,6 +158,7 @@ struct tc358768_priv {
+ u32 frs; /* PLL Freqency range for HSCK (post divider) */
+
+ u32 dsiclk; /* pll_clk / 2 */
++ u32 pclk; /* incoming pclk rate */
+ };
+
+ static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
+@@ -216,6 +219,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ u32 tmp, orig;
+
+ tc358768_read(priv, reg, &orig);
++
++ if (priv->error)
++ return;
++
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ if (tmp != orig)
+@@ -375,6 +382,7 @@ static int tc358768_calc_pll(struct tc358768_priv *priv,
+ priv->prd = best_prd;
+ priv->frs = frs;
+ priv->dsiclk = best_pll / 2;
++ priv->pclk = mode->clock * 1000;
+
+ return 0;
+ }
+@@ -600,7 +608,7 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+
+ dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ clk_get_rate(priv->refclk), fbd, prd, frs);
+- dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
++ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n",
+ priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+@@ -623,15 +631,36 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
+ return tc358768_clear_error(priv);
+ }
+
+-#define TC358768_PRECISION 1000
+-static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
++static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps)
++{
++ return DIV_ROUND_UP(ns * 1000, period_ps);
++}
++
++static u32 tc358768_ps_to_ns(u32 ps)
+ {
+- return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
++ return ps / 1000;
+ }
+
+-static u32 tc358768_to_ns(u32 nsk)
++static u32 tc358768_dpi_to_ns(u32 val, u32 pclk)
+ {
+- return (nsk / TC358768_PRECISION);
++ return (u32)div_u64((u64)val * NANO, pclk);
++}
++
++/* Convert value in DPI pixel clock units to DSI byte count */
++static u32 tc358768_dpi_to_dsi_bytes(struct tc358768_priv *priv, u32 val)
++{
++ u64 m = (u64)val * priv->dsiclk / 4 * priv->dsi_lanes;
++ u64 n = priv->pclk;
++
++ return (u32)div_u64(m + n - 1, n);
++}
++
++static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
++{
++ u64 m = (u64)val * NANO;
++ u64 n = priv->dsiclk / 4 * priv->dsi_lanes;
++
++ return (u32)div_u64(m, n);
+ }
+
+ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+@@ -642,13 +671,23 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ u32 val, val2, lptxcnt, hact, data_type;
+ s32 raw_val;
+ const struct drm_display_mode *mode;
+- u32 dsibclk_nsk, dsiclk_nsk, ui_nsk;
+- u32 dsiclk, dsibclk, video_start;
+- const u32 internal_delay = 40;
++ u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
++ u32 dsiclk, hsbyteclk;
+ int ret, i;
++ struct videomode vm;
++ struct device *dev = priv->dev;
++ /* In pixelclock units */
++ u32 dpi_htot, dpi_data_start;
++ /* In byte units */
++ u32 dsi_dpi_htot, dsi_dpi_data_start;
++ u32 dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp;
++ const u32 dsi_hss = 4; /* HSS is a short packet (4 bytes) */
++ /* In hsbyteclk units */
++ u32 dsi_vsdly;
++ const u32 internal_dly = 40;
+
+ if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+- dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
++ dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
+ mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ }
+
+@@ -656,7 +695,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+
+ ret = tc358768_sw_reset(priv);
+ if (ret) {
+- dev_err(priv->dev, "Software reset failed: %d\n", ret);
++ dev_err(dev, "Software reset failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+@@ -664,53 +703,194 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ mode = &bridge->encoder->crtc->state->adjusted_mode;
+ ret = tc358768_setup_pll(priv, mode);
+ if (ret) {
+- dev_err(priv->dev, "PLL setup failed: %d\n", ret);
++ dev_err(dev, "PLL setup failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
++ drm_display_mode_to_videomode(mode, &vm);
++
+ dsiclk = priv->dsiclk;
+- dsibclk = dsiclk / 4;
++ hsbyteclk = dsiclk / 4;
+
+ /* Data Format Control Register */
+ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB888:
+ val |= (0x3 << 4);
+- hact = mode->hdisplay * 3;
+- video_start = (mode->htotal - mode->hsync_start) * 3;
++ hact = vm.hactive * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= (0x4 << 4);
+- hact = mode->hdisplay * 3;
+- video_start = (mode->htotal - mode->hsync_start) * 3;
++ hact = vm.hactive * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= (0x4 << 4) | BIT(3);
+- hact = mode->hdisplay * 18 / 8;
+- video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
++ hact = vm.hactive * 18 / 8;
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ val |= (0x5 << 4);
+- hact = mode->hdisplay * 2;
+- video_start = (mode->htotal - mode->hsync_start) * 2;
++ hact = vm.hactive * 2;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+- dev_err(priv->dev, "Invalid data format (%u)\n",
++ dev_err(dev, "Invalid data format (%u)\n",
+ dsi_dev->format);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
++ /*
++ * There are three important things to make TC358768 work correctly,
++ * which are not trivial to manage:
++ *
++ * 1. Keep the DPI line-time and the DSI line-time as close to each
++ * other as possible.
++ * 2. TC358768 goes to LP mode after each line's active area. The DSI
++ * HFP period has to be long enough for entering and exiting LP mode.
++ * But it is not clear how to calculate this.
++ * 3. VSDly (video start delay) has to be long enough to ensure that the
++ * DSI TX does not start transmitting until we have started receiving
++ * pixel data from the DPI input. It is not clear how to calculate
++ * this either.
++ */
++
++ dpi_htot = vm.hactive + vm.hfront_porch + vm.hsync_len + vm.hback_porch;
++ dpi_data_start = vm.hsync_len + vm.hback_porch;
++
++ dev_dbg(dev, "dpi horiz timing (pclk): %u + %u + %u + %u = %u\n",
++ vm.hsync_len, vm.hback_porch, vm.hactive, vm.hfront_porch,
++ dpi_htot);
++
++ dev_dbg(dev, "dpi horiz timing (ns): %u + %u + %u + %u = %u\n",
++ tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
++ tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
++ tc358768_dpi_to_ns(vm.hactive, vm.pixelclock),
++ tc358768_dpi_to_ns(vm.hfront_porch, vm.pixelclock),
++ tc358768_dpi_to_ns(dpi_htot, vm.pixelclock));
++
++ dev_dbg(dev, "dpi data start (ns): %u + %u = %u\n",
++ tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
++ tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
++ tc358768_dpi_to_ns(dpi_data_start, vm.pixelclock));
++
++ dsi_dpi_htot = tc358768_dpi_to_dsi_bytes(priv, dpi_htot);
++ dsi_dpi_data_start = tc358768_dpi_to_dsi_bytes(priv, dpi_data_start);
++
++ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
++ dsi_hsw = tc358768_dpi_to_dsi_bytes(priv, vm.hsync_len);
++ dsi_hbp = tc358768_dpi_to_dsi_bytes(priv, vm.hback_porch);
++ } else {
++ /* HBP is included in HSW in event mode */
++ dsi_hbp = 0;
++ dsi_hsw = tc358768_dpi_to_dsi_bytes(priv,
++ vm.hsync_len +
++ vm.hback_porch);
++
++ /*
++ * The pixel packet includes the actual pixel data, and:
++ * DSI packet header = 4 bytes
++ * DCS code = 1 byte
++ * DSI packet footer = 2 bytes
++ */
++ dsi_hact = hact + 4 + 1 + 2;
++
++ dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
++
++ /*
++ * Here we should check if HFP is long enough for entering LP
++ * and exiting LP, but it's not clear how to calculate that.
++ * Instead, this is a naive algorithm that just adjusts the HFP
++ * and HSW so that HFP is (at least) roughly 2/3 of the total
++ * blanking time.
++ */
++ if (dsi_hfp < (dsi_hfp + dsi_hsw + dsi_hss) * 2 / 3) {
++ u32 old_hfp = dsi_hfp;
++ u32 old_hsw = dsi_hsw;
++ u32 tot = dsi_hfp + dsi_hsw + dsi_hss;
++
++ dsi_hsw = tot / 3;
++
++ /*
++ * Seems like sometimes HSW has to be divisible by num-lanes, but
++ * not always...
++ */
++ dsi_hsw = roundup(dsi_hsw, priv->dsi_lanes);
++
++ dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
++
++ dev_dbg(dev,
++ "hfp too short, adjusting dsi hfp and dsi hsw from %u, %u to %u, %u\n",
++ old_hfp, old_hsw, dsi_hfp, dsi_hsw);
++ }
++
++ dev_dbg(dev,
++ "dsi horiz timing (bytes): %u, %u + %u + %u + %u = %u\n",
++ dsi_hss, dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp,
++ dsi_hss + dsi_hsw + dsi_hbp + dsi_hact + dsi_hfp);
++
++ dev_dbg(dev, "dsi horiz timing (ns): %u + %u + %u + %u + %u = %u\n",
++ tc358768_dsi_bytes_to_ns(priv, dsi_hss),
++ tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
++ tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
++ tc358768_dsi_bytes_to_ns(priv, dsi_hact),
++ tc358768_dsi_bytes_to_ns(priv, dsi_hfp),
++ tc358768_dsi_bytes_to_ns(priv, dsi_hss + dsi_hsw +
++ dsi_hbp + dsi_hact + dsi_hfp));
++ }
++
++ /* VSDly calculation */
++
++ /* Start with the HW internal delay */
++ dsi_vsdly = internal_dly;
++
++ /* Convert to byte units as the other variables are in byte units */
++ dsi_vsdly *= priv->dsi_lanes;
++
++ /* Do we need more delay, in addition to the internal? */
++ if (dsi_dpi_data_start > dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp) {
++ dsi_vsdly = dsi_dpi_data_start - dsi_hss - dsi_hsw - dsi_hbp;
++ dsi_vsdly = roundup(dsi_vsdly, priv->dsi_lanes);
++ }
++
++ dev_dbg(dev, "dsi data start (bytes) %u + %u + %u + %u = %u\n",
++ dsi_vsdly, dsi_hss, dsi_hsw, dsi_hbp,
++ dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp);
++
++ dev_dbg(dev, "dsi data start (ns) %u + %u + %u + %u = %u\n",
++ tc358768_dsi_bytes_to_ns(priv, dsi_vsdly),
++ tc358768_dsi_bytes_to_ns(priv, dsi_hss),
++ tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
++ tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
++ tc358768_dsi_bytes_to_ns(priv, dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp));
++
++ /* Convert back to hsbyteclk */
++ dsi_vsdly /= priv->dsi_lanes;
++
++ /*
++ * The docs say that there is an internal delay of 40 cycles.
++ * However, we get underflows if we follow that rule. If we
++ * instead ignore the internal delay, things work. So either
++ * the docs are wrong or the calculations are wrong.
++ *
++ * As a temporary fix, add the internal delay here, to counter
++ * the subtraction when writing the register.
++ */
++ dsi_vsdly += internal_dly;
++
++ /* Clamp to the register max */
++ if (dsi_vsdly - internal_dly > 0x3ff) {
++ dev_warn(dev, "VSDly too high, underflows likely\n");
++ dsi_vsdly = 0x3ff + internal_dly;
++ }
++
+ /* VSDly[9:0] */
+- video_start = max(video_start, internal_delay + 1) - internal_delay;
+- tc358768_write(priv, TC358768_VSDLY, video_start);
++ tc358768_write(priv, TC358768_VSDLY, dsi_vsdly - internal_dly);
+
+ tc358768_write(priv, TC358768_DATAFMT, val);
+ tc358768_write(priv, TC358768_DSITX_DT, data_type);
+@@ -722,67 +902,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+
+ /* DSI Timings */
+- dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+- dsibclk);
+- dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+- ui_nsk = dsiclk_nsk / 2;
+- dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+- dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+- dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
++ hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk);
++ dsiclk_ps = (u32)div_u64(PICO, dsiclk);
++ ui_ps = dsiclk_ps / 2;
++ dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps,
++ ui_ps, hsbyteclk_ps);
+
+ /* LP11 > 100us for D-PHY Rx Init */
+- val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+- dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
++ val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "LINEINITCNT: %u\n", val);
+ tc358768_write(priv, TC358768_LINEINITCNT, val);
+
+ /* LPTimeCnt > 50ns */
+- val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
++ val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1;
+ lptxcnt = val;
+- dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
++ dev_dbg(dev, "LPTXTIMECNT: %u\n", val);
+ tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+
+ /* 38ns < TCLK_PREPARE < 95ns */
+- val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
++ val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "TCLK_PREPARECNT %u\n", val);
+ /* TCLK_PREPARE + TCLK_ZERO > 300ns */
+- val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
+- dsibclk_nsk) - 2;
++ val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps),
++ hsbyteclk_ps) - 2;
++ dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2);
+ val |= val2 << 8;
+- dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+ /* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
+- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
++ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5;
+ val = clamp(raw_val, 0, 127);
+- dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
++ dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val);
+ tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+ /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+- val = 50 + tc358768_to_ns(4 * ui_nsk);
+- val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
++ val = 50 + tc358768_ps_to_ns(4 * ui_ps);
++ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1;
++ dev_dbg(dev, "THS_PREPARECNT %u\n", val);
+ /* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
+- raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
++ raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10;
+ val2 = clamp(raw_val, 0, 127);
++ dev_dbg(dev, "THS_ZEROCNT %u\n", val2);
+ val |= val2 << 8;
+- dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+ /* TWAKEUP > 1ms in lptxcnt steps */
+- val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
++ val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps);
+ val = val / (lptxcnt + 1) - 1;
+- dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
++ dev_dbg(dev, "TWAKEUP: %u\n", val);
+ tc358768_write(priv, TC358768_TWAKEUP, val);
+
+ /* TCLK_POSTCNT > 60ns + 52*UI */
+- val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+- dsibclk_nsk) - 3;
+- dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
++ val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps),
++ hsbyteclk_ps) - 3;
++ dev_dbg(dev, "TCLK_POSTCNT: %u\n", val);
+ tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+ /* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
+- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
+- dsibclk_nsk) - 4;
++ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps),
++ hsbyteclk_ps) - 4;
+ val = clamp(raw_val, 0, 15);
+- dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
++ dev_dbg(dev, "THS_TRAILCNT: %u\n", val);
+ tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+ val = BIT(0);
+@@ -790,16 +970,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ val |= BIT(i + 1);
+ tc358768_write(priv, TC358768_HSTXVREGEN, val);
+
+- if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+- tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
++ tc358768_write(priv, TC358768_TXOPTIONCNTRL,
++ (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0));
+
+ /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+- val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+- val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
+- val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+- dsibclk_nsk) - 2;
++ val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4);
++ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1;
++ dev_dbg(dev, "TXTAGOCNT: %u\n", val);
++ val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps),
++ hsbyteclk_ps) - 2;
++ dev_dbg(dev, "RXTASURECNT: %u\n", val2);
+ val = val << 16 | val2;
+- dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ tc358768_write(priv, TC358768_BTACNTRL1, val);
+
+ /* START[0] */
+@@ -810,58 +991,44 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+ tc358768_write(priv, TC358768_DSI_EVENT, 0);
+
+ /* vact */
+- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+
+ /* vsw */
+- tc358768_write(priv, TC358768_DSI_VSW,
+- mode->vsync_end - mode->vsync_start);
++ tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len);
++
+ /* vbp */
+- tc358768_write(priv, TC358768_DSI_VBPR,
+- mode->vtotal - mode->vsync_end);
+-
+- /* hsw * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
+- tc358768_write(priv, TC358768_DSI_HSW, val);
+-
+- /* hbp * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->htotal - mode->hsync_end) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
+- tc358768_write(priv, TC358768_DSI_HBPR, val);
++ tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
+ } else {
+ /* Set event mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+ /* vact */
+- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
++ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
+
+ /* vsw (+ vbp) */
+ tc358768_write(priv, TC358768_DSI_VSW,
+- mode->vtotal - mode->vsync_start);
++ vm.vsync_len + vm.vback_porch);
++
+ /* vbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_VBPR, 0);
++ }
+
+- /* (hsw + hbp) * byteclk * ndl / pclk */
+- val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+- mode->clock * 1000);
+- tc358768_write(priv, TC358768_DSI_HSW, val);
++ /* hsw (bytes) */
++ tc358768_write(priv, TC358768_DSI_HSW, dsi_hsw);
+
+- /* hbp (not used in event mode) */
+- tc358768_write(priv, TC358768_DSI_HBPR, 0);
+- }
++ /* hbp (bytes) */
++ tc358768_write(priv, TC358768_DSI_HBPR, dsi_hbp);
+
+ /* hact (bytes) */
+ tc358768_write(priv, TC358768_DSI_HACT, hact);
+
+ /* VSYNC polarity */
+- if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+- tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
++ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5),
++ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0);
++
+ /* HSYNC polarity */
+- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+- tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
++ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0),
++ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0);
+
+ /* Start DSI Tx */
+ tc358768_write(priv, TC358768_DSI_START, 0x1);
+@@ -891,7 +1058,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+- dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
++ dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
+index 90a89d70d83287..c737670631929a 100644
+--- a/drivers/gpu/drm/bridge/tc358775.c
++++ b/drivers/gpu/drm/bridge/tc358775.c
+@@ -454,10 +454,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
+ dev_dbg(tc->dev, "bus_formats %04x bpc %d\n",
+ connector->display_info.bus_formats[0],
+ tc->bpc);
+- /*
+- * Default hardware register settings of tc358775 configured
+- * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format
+- */
+ if (connector->display_info.bus_formats[0] ==
+ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) {
+ /* VESA-24 */
+@@ -468,14 +464,15 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
+ d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
+ d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+ d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
+- } else { /* MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */
+- d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
+- d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0));
+- d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0));
+- d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
+- d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2));
+- d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+- d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0));
++ } else {
++ /* JEIDA-18 and JEIDA-24 */
++ d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R2, LVI_R3, LVI_R4, LVI_R5));
++ d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R6, LVI_R1, LVI_R7, LVI_G2));
++ d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G3, LVI_G4, LVI_G0, LVI_G1));
++ d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G5, LVI_G6, LVI_G7, LVI_B2));
++ d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B0, LVI_B1, LVI_B3, LVI_B4));
++ d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B5, LVI_B6, LVI_B7, LVI_L0));
++ d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R0));
+ }
+
+ d2l_write(tc->i2c, VFUEN, VFUEN_EN);
+@@ -610,10 +607,8 @@ static int tc_attach_host(struct tc_data *tc)
+ };
+
+ host = of_find_mipi_dsi_host_by_node(tc->host_node);
+- if (!host) {
+- dev_err(dev, "failed to find dsi host\n");
+- return -EPROBE_DEFER;
+- }
++ if (!host)
++ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
+index b65632ec7e7daa..3f933ba2946820 100644
+--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
++++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
+@@ -319,12 +319,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
+ .channel = 0,
+ .node = NULL,
+ };
++ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(dlpc->host_node);
+- if (!host) {
+- DRM_DEV_ERROR(dev, "failed to find dsi host\n");
+- return -EPROBE_DEFER;
+- }
++ if (!host)
++ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+
+ dlpc->dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dlpc->dsi)) {
+@@ -336,7 +335,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
+ dlpc->dsi->format = MIPI_DSI_FMT_RGB565;
+ dlpc->dsi->lanes = dlpc->dsi_lanes;
+
+- return devm_mipi_dsi_attach(dev, dlpc->dsi);
++ ret = devm_mipi_dsi_attach(dev, dlpc->dsi);
++ if (ret)
++ DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
++
++ return ret;
+ }
+
+ static int dlpc3433_probe(struct i2c_client *client)
+@@ -367,10 +370,8 @@ static int dlpc3433_probe(struct i2c_client *client)
+ drm_bridge_add(&dlpc->bridge);
+
+ ret = dlpc_host_attach(dlpc);
+- if (ret) {
+- DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
++ if (ret)
+ goto err_remove_bridge;
+- }
+
+ return 0;
+
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 061e8bd5915de8..8a23116346a8a5 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -478,7 +478,6 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
+ dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
+ /* On failure, disable PLL again and exit. */
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+- regulator_disable(ctx->vcc);
+ return;
+ }
+
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 84148a79414b7f..3309c01fa7153c 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -527,6 +527,7 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
+ u32 request_val = AUX_CMD_REQ(msg->request);
+ u8 *buf = msg->buffer;
+ unsigned int len = msg->size;
++ unsigned int short_len;
+ unsigned int val;
+ int ret;
+ u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG];
+@@ -600,7 +601,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
+ }
+
+ if (val & AUX_IRQ_STATUS_AUX_SHORT) {
+- ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len);
++ ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &short_len);
++ len = min(len, short_len);
+ if (ret)
+ goto exit;
+ } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) {
+diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
+index e0e015243a602d..b588fea12502d6 100644
+--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
++++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
+@@ -179,7 +179,7 @@ static int tpd12s015_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int __exit tpd12s015_remove(struct platform_device *pdev)
++static int tpd12s015_remove(struct platform_device *pdev)
+ {
+ struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
+
+@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
+
+ static struct platform_driver tpd12s015_driver = {
+ .probe = tpd12s015_probe,
+- .remove = __exit_p(tpd12s015_remove),
++ .remove = tpd12s015_remove,
+ .driver = {
+ .name = "tpd12s015",
+ .of_match_table = tpd12s015_of_match,
+diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
+index e6503f1c5927bb..17ab38304885cd 100644
+--- a/drivers/gpu/drm/ci/build.yml
++++ b/drivers/gpu/drm/ci/build.yml
+@@ -1,6 +1,7 @@
+ .build:
+ extends:
+ - .build-rules
++ - .container+build-rules
+ stage: build
+ artifacts:
+ paths:
+diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
+index 2c4df53f5dfe3a..36949243674800 100644
+--- a/drivers/gpu/drm/ci/gitlab-ci.yml
++++ b/drivers/gpu/drm/ci/gitlab-ci.yml
+@@ -1,6 +1,6 @@
+ variables:
+ DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
+- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 0dc961645c4f0241f8512cb0ec3ad59635842072
++ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha edfbf74df1d4d6ce54ffe24566108be0e1a98c3d
+
+ UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
+ TARGET_BRANCH: drm-next
+@@ -24,7 +24,9 @@ variables:
+ PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
+ # per-job artifact storage on MinIO
+ JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
+-
++ # default kernel for rootfs before injecting the current kernel tree
++ KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/gfx-ci/linux/v6.4.12-for-mesa-ci-f6b4ad45f48d
++ LAVA_TAGS: subset-1-gfx
+ LAVA_JOB_PRIORITY: 30
+
+ default:
+@@ -86,6 +88,17 @@ include:
+ - '/.gitlab-ci/container/gitlab-ci.yml'
+ - '/.gitlab-ci/test/gitlab-ci.yml'
+ - '/.gitlab-ci/lava/lava-gitlab-ci.yml'
++ - '/src/microsoft/ci/gitlab-ci-inc.yml'
++ - '/src/gallium/drivers/zink/ci/gitlab-ci-inc.yml'
++ - '/src/gallium/drivers/crocus/ci/gitlab-ci-inc.yml'
++ - '/src/gallium/drivers/softpipe/ci/gitlab-ci-inc.yml'
++ - '/src/gallium/drivers/llvmpipe/ci/gitlab-ci-inc.yml'
++ - '/src/gallium/drivers/virgl/ci/gitlab-ci-inc.yml'
++ - '/src/gallium/drivers/nouveau/ci/gitlab-ci-inc.yml'
++ - '/src/gallium/frontends/lavapipe/ci/gitlab-ci-inc.yml'
++ - '/src/intel/ci/gitlab-ci-inc.yml'
++ - '/src/freedreno/ci/gitlab-ci-inc.yml'
++ - '/src/amd/ci/gitlab-ci-inc.yml'
+ - drivers/gpu/drm/ci/image-tags.yml
+ - drivers/gpu/drm/ci/container.yml
+ - drivers/gpu/drm/ci/static-checks.yml
+@@ -154,6 +167,11 @@ stages:
+ # Run automatically once all dependency jobs have passed
+ - when: on_success
+
++# When to automatically run the CI for container jobs
++.container+build-rules:
++ rules:
++ - !reference [.no_scheduled_pipelines-rules, rules]
++ - when: manual
+
+ .ci-deqp-artifacts:
+ artifacts:
+diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
+index f051b6c547c531..157d987149f072 100644
+--- a/drivers/gpu/drm/ci/image-tags.yml
++++ b/drivers/gpu/drm/ci/image-tags.yml
+@@ -1,5 +1,5 @@
+ variables:
+- CONTAINER_TAG: "2023-08-10-mesa-uprev"
++ CONTAINER_TAG: "2023-10-11-mesa-uprev"
+ DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
+ DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
+
+diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
+index 0c4456b21b0fc6..379f26ea87cc00 100755
+--- a/drivers/gpu/drm/ci/lava-submit.sh
++++ b/drivers/gpu/drm/ci/lava-submit.sh
+@@ -22,7 +22,7 @@ cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
+
+ # Prepare env vars for upload.
+ section_start variables "Variables passed through:"
+-KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \
++KERNEL_IMAGE_BASE="https://${BASE_SYSTEM_HOST_PATH}" \
+ artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
+ section_end variables
+
+diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
+index 6473cddaa7a962..6f81dc10865b5e 100644
+--- a/drivers/gpu/drm/ci/test.yml
++++ b/drivers/gpu/drm/ci/test.yml
+@@ -86,7 +86,7 @@ msm:sc7180:
+ extends:
+ - .lava-igt:arm64
+ stage: msm
+- parallel: 2
++ parallel: 4
+ variables:
+ DRIVER_NAME: msm
+ DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
+@@ -104,7 +104,10 @@ msm:apq8016:
+ DRIVER_NAME: msm
+ BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc.dtb
+ GPU_VERSION: apq8016
+- BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
++ # disabling unused clocks congests with the MDSS runtime PM trying to
++ # disable those clocks and causes boot to fail.
++ # Reproducer: DRM_MSM=y, DRM_I2C_ADV7511=m
++ BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
+ RUNNER_TAG: google-freedreno-db410c
+ script:
+ - ./install/bare-metal/fastboot.sh
+@@ -155,7 +158,7 @@ rockchip:rk3399:
+ extends:
+ - .lava-igt:arm64
+ stage: rockchip
+- parallel: 3
++ parallel: 2
+ variables:
+ DRIVER_NAME: rockchip
+ DEVICE_TYPE: rk3399-gru-kevin
+@@ -178,7 +181,7 @@ rockchip:rk3399:
+ i915:apl:
+ extends:
+ - .i915
+- parallel: 12
++ parallel: 3
+ variables:
+ DEVICE_TYPE: asus-C523NA-A20057-coral
+ GPU_VERSION: apl
+@@ -187,7 +190,7 @@ i915:apl:
+ i915:glk:
+ extends:
+ - .i915
+- parallel: 5
++ parallel: 2
+ variables:
+ DEVICE_TYPE: hp-x360-12b-ca0010nr-n4020-octopus
+ GPU_VERSION: glk
+@@ -196,7 +199,7 @@ i915:glk:
+ i915:amly:
+ extends:
+ - .i915
+- parallel: 8
++ parallel: 2
+ variables:
+ DEVICE_TYPE: asus-C433TA-AJ0005-rammus
+ GPU_VERSION: amly
+@@ -205,7 +208,7 @@ i915:amly:
+ i915:kbl:
+ extends:
+ - .i915
+- parallel: 5
++ parallel: 3
+ variables:
+ DEVICE_TYPE: hp-x360-14-G1-sona
+ GPU_VERSION: kbl
+@@ -214,7 +217,7 @@ i915:kbl:
+ i915:whl:
+ extends:
+ - .i915
+- parallel: 8
++ parallel: 2
+ variables:
+ DEVICE_TYPE: dell-latitude-5400-8665U-sarien
+ GPU_VERSION: whl
+@@ -223,7 +226,7 @@ i915:whl:
+ i915:cml:
+ extends:
+ - .i915
+- parallel: 6
++ parallel: 2
+ variables:
+ DEVICE_TYPE: asus-C436FA-Flip-hatch
+ GPU_VERSION: cml
+@@ -232,11 +235,11 @@ i915:cml:
+ i915:tgl:
+ extends:
+ - .i915
+- parallel: 6
++ parallel: 5
+ variables:
+- DEVICE_TYPE: asus-cx9400-volteer
++ DEVICE_TYPE: acer-cp514-2h-1130g7-volteer
+ GPU_VERSION: tgl
+- RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer
++ RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer
+
+ .amdgpu:
+ extends:
+@@ -251,6 +254,7 @@ i915:tgl:
+ amdgpu:stoney:
+ extends:
+ - .amdgpu
++ parallel: 2
+ variables:
+ DEVICE_TYPE: hp-11A-G6-EE-grunt
+ GPU_VERSION: stoney
+@@ -269,6 +273,7 @@ amdgpu:stoney:
+ mediatek:mt8173:
+ extends:
+ - .mediatek
++ parallel: 4
+ variables:
+ DEVICE_TYPE: mt8173-elm-hana
+ GPU_VERSION: mt8173
+@@ -280,6 +285,7 @@ mediatek:mt8173:
+ mediatek:mt8183:
+ extends:
+ - .mediatek
++ parallel: 3
+ variables:
+ DEVICE_TYPE: mt8183-kukui-jacuzzi-juniper-sku16
+ GPU_VERSION: mt8183
+@@ -289,6 +295,7 @@ mediatek:mt8183:
+ .mediatek:mt8192:
+ extends:
+ - .mediatek
++ parallel: 3
+ variables:
+ DEVICE_TYPE: mt8192-asurada-spherion-r0
+ GPU_VERSION: mt8192
+@@ -307,6 +314,7 @@ mediatek:mt8183:
+ meson:g12b:
+ extends:
+ - .meson
++ parallel: 3
+ variables:
+ DEVICE_TYPE: meson-g12b-a311d-khadas-vim3
+ GPU_VERSION: g12b
+diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
+index e6a78fd32380a1..851f0baf94600c 100644
+--- a/drivers/gpu/drm/display/drm_dp_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_helper.c
+@@ -532,6 +532,15 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+
+ mutex_lock(&aux->hw_mutex);
+
++ /*
++ * If the device attached to the aux bus is powered down then there's
++ * no reason to attempt a transfer. Error out immediately.
++ */
++ if (aux->powered_down) {
++ ret = -EBUSY;
++ goto unlock;
++ }
++
+ /*
+ * The specification doesn't give any recommendation on how often to
+ * retry native transactions. We used to retry 7 times like for
+@@ -599,6 +608,29 @@ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
+ }
+ EXPORT_SYMBOL(drm_dp_dpcd_probe);
+
++/**
++ * drm_dp_dpcd_set_powered() - Set whether the DP device is powered
++ * @aux: DisplayPort AUX channel; for convenience it's OK to pass NULL here
++ * and the function will be a no-op.
++ * @powered: true if powered; false if not
++ *
++ * If the endpoint device on the DP AUX bus is known to be powered down
++ * then this function can be called to make future transfers fail immediately
++ * instead of needing to time out.
++ *
++ * If this function is never called then a device defaults to being powered.
++ */
++void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered)
++{
++ if (!aux)
++ return;
++
++ mutex_lock(&aux->hw_mutex);
++ aux->powered_down = !powered;
++ mutex_unlock(&aux->hw_mutex);
++}
++EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
++
+ /**
+ * drm_dp_dpcd_read() - read a series of bytes from the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+@@ -1855,6 +1887,9 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ struct drm_dp_aux_msg msg;
+ int err = 0;
+
++ if (aux->powered_down)
++ return -EBUSY;
++
+ dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
+
+ memset(&msg, 0, sizeof(msg));
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 8c929ef72c72c7..6ead31701e79ea 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -2923,7 +2923,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+
+ /* FIXME: Actually do some real error handling here */
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+- if (ret <= 0) {
++ if (ret < 0) {
+ drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
+ goto out;
+ }
+@@ -2975,7 +2975,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ mutex_unlock(&mgr->lock);
+
+ out:
+- if (ret <= 0)
++ if (ret < 0)
+ mstb->link_address_sent = false;
+ kfree(txmsg);
+ return ret < 0 ? ret : changed;
+@@ -4024,6 +4024,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ const struct drm_dp_connection_status_notify *conn_stat =
+ &up_req->msg.u.conn_stat;
++ bool handle_csn;
+
+ drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ conn_stat->port_number,
+@@ -4032,6 +4033,16 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ conn_stat->message_capability_status,
+ conn_stat->input_port,
+ conn_stat->peer_device_type);
++
++ mutex_lock(&mgr->probe_lock);
++ handle_csn = mgr->mst_primary->link_address_sent;
++ mutex_unlock(&mgr->probe_lock);
++
++ if (!handle_csn) {
++ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
++ kfree(up_req);
++ goto out;
++ }
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ const struct drm_dp_resource_status_notify *res_stat =
+ &up_req->msg.u.resource_stat;
+@@ -4690,13 +4701,12 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
+
+ /**
+ * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
+- * @clock: dot clock for the mode
+- * @bpp: bpp for the mode.
+- * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
++ * @clock: dot clock
++ * @bpp: bpp as .4 binary fixed point
+ *
+ * This uses the formula in the spec to calculate the PBN value for a mode.
+ */
+-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
++int drm_dp_calc_pbn_mode(int clock, int bpp)
+ {
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+@@ -4707,18 +4717,9 @@ int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+- *
+- * If the bpp is in units of 1/16, further divide by 16. Put this
+- * factor in the numerator rather than the denominator to avoid
+- * integer overflow
+ */
+-
+- if (dsc)
+- return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
+- 8 * 54 * 1000 * 1000);
+-
+- return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
+- 8 * 54 * 1000 * 1000);
++ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 >> 4),
++ 1000 * 8 * 54 * 1000);
+ }
+ EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 60794fcde1d50f..554d4468aa7c08 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -2012,7 +2012,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
+ return ret;
+
+ drm_atomic_helper_async_commit(dev, state);
+- drm_atomic_helper_cleanup_planes(dev, state);
++ drm_atomic_helper_unprepare_planes(dev, state);
+
+ return 0;
+ }
+@@ -2072,7 +2072,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
+ return 0;
+
+ err:
+- drm_atomic_helper_cleanup_planes(dev, state);
++ drm_atomic_helper_unprepare_planes(dev, state);
+ return ret;
+ }
+ EXPORT_SYMBOL(drm_atomic_helper_commit);
+@@ -2650,6 +2650,39 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+ }
+ EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
+
++/**
++ * drm_atomic_helper_unprepare_planes - release plane resources on aborts
++ * @dev: DRM device
++ * @state: atomic state object with old state structures
++ *
++ * This function cleans up plane state, specifically framebuffers, from the
++ * atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
++ * when aborting an atomic commit. For cleaning up after a successful commit
++ * use drm_atomic_helper_cleanup_planes().
++ */
++void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
++ struct drm_atomic_state *state)
++{
++ struct drm_plane *plane;
++ struct drm_plane_state *new_plane_state;
++ int i;
++
++ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
++ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
++
++ if (funcs->end_fb_access)
++ funcs->end_fb_access(plane, new_plane_state);
++ }
++
++ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
++ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
++
++ if (funcs->cleanup_fb)
++ funcs->cleanup_fb(plane, new_plane_state);
++ }
++}
++EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
++
+ static bool plane_crtc_active(const struct drm_plane_state *state)
+ {
+ return state->crtc && state->crtc->state->active;
+@@ -2784,6 +2817,17 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
+
+ funcs->atomic_flush(crtc, old_state);
+ }
++
++ /*
++ * Signal end of framebuffer access here before hw_done. After hw_done,
++ * a later commit might have already released the plane state.
++ */
++ for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
++ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
++
++ if (funcs->end_fb_access)
++ funcs->end_fb_access(plane, old_plane_state);
++ }
+ }
+ EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
+
+@@ -2911,40 +2955,22 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
+ * configuration. Hence the old configuration must be perserved in @old_state to
+ * be able to call this function.
+ *
+- * This function must also be called on the new state when the atomic update
+- * fails at any point after calling drm_atomic_helper_prepare_planes().
++ * This function may not be called on the new state when the atomic update
++ * fails at any point after calling drm_atomic_helper_prepare_planes(). Use
++ * drm_atomic_helper_unprepare_planes() in this case.
+ */
+ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+ {
+ struct drm_plane *plane;
+- struct drm_plane_state *old_plane_state, *new_plane_state;
++ struct drm_plane_state *old_plane_state;
+ int i;
+
+- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
++ for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+- if (funcs->end_fb_access)
+- funcs->end_fb_access(plane, new_plane_state);
+- }
+-
+- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
+- const struct drm_plane_helper_funcs *funcs;
+- struct drm_plane_state *plane_state;
+-
+- /*
+- * This might be called before swapping when commit is aborted,
+- * in which case we have to cleanup the new state.
+- */
+- if (old_plane_state == plane->state)
+- plane_state = new_plane_state;
+- else
+- plane_state = old_plane_state;
+-
+- funcs = plane->helper_private;
+-
+ if (funcs->cleanup_fb)
+- funcs->cleanup_fb(plane, plane_state);
++ funcs->cleanup_fb(plane, old_plane_state);
+ }
+ }
+ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
+diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
+index 98d3b10c08ae19..ab03b08433f8f3 100644
+--- a/drivers/gpu/drm/drm_atomic_uapi.c
++++ b/drivers/gpu/drm/drm_atomic_uapi.c
+@@ -585,7 +585,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
+ &state->fb_damage_clips,
+ val,
+ -1,
+- sizeof(struct drm_rect),
++ sizeof(struct drm_mode_rect),
+ &replaced);
+ return ret;
+ } else if (property == plane->scaling_filter_property) {
+diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
+index cf92a9ae8034c9..6899b3dc1f12a5 100644
+--- a/drivers/gpu/drm/drm_auth.c
++++ b/drivers/gpu/drm/drm_auth.c
+@@ -235,7 +235,8 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
+ static int
+ drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
+ {
+- if (file_priv->pid == task_pid(current) && file_priv->was_master)
++ if (file_priv->was_master &&
++ rcu_access_pointer(file_priv->pid) == task_tgid(current))
+ return 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
+index 39e68e45bb124b..62d8a291c49c7a 100644
+--- a/drivers/gpu/drm/drm_bridge.c
++++ b/drivers/gpu/drm/drm_bridge.c
+@@ -27,8 +27,9 @@
+ #include <linux/mutex.h>
+
+ #include <drm/drm_atomic_state_helper.h>
+-#include <drm/drm_debugfs.h>
+ #include <drm/drm_bridge.h>
++#include <drm/drm_debugfs.h>
++#include <drm/drm_edid.h>
+ #include <drm/drm_encoder.h>
+ #include <drm/drm_file.h>
+ #include <drm/drm_of.h>
+@@ -686,11 +687,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ */
+ list_for_each_entry_from(next, &encoder->bridge_chain,
+ chain_node) {
+- if (next->pre_enable_prev_first) {
++ if (!next->pre_enable_prev_first) {
+ next = list_prev_entry(next, chain_node);
+ limit = next;
+ break;
+ }
++
++ if (list_is_last(&next->chain_node,
++ &encoder->bridge_chain)) {
++ limit = next;
++ break;
++ }
+ }
+
+ /* Call these bridges in reverse order */
+@@ -773,7 +780,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ /* Found first bridge that does NOT
+ * request prev to be enabled first
+ */
+- limit = list_prev_entry(next, chain_node);
++ limit = next;
+ break;
+ }
+ }
+@@ -1206,6 +1213,47 @@ int drm_bridge_get_modes(struct drm_bridge *bridge,
+ }
+ EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
+
++/**
++ * drm_bridge_edid_read - read the EDID data of the connected display
++ * @bridge: bridge control structure
++ * @connector: the connector to read EDID for
++ *
++ * If the bridge supports output EDID retrieval, as reported by the
++ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
++ * the EDID and return it. Otherwise return NULL.
++ *
++ * If &drm_bridge_funcs.edid_read is not set, fall back to using
++ * drm_bridge_get_edid() and wrapping it in struct drm_edid.
++ *
++ * RETURNS:
++ * The retrieved EDID on success, or NULL otherwise.
++ */
++const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
++ struct drm_connector *connector)
++{
++ if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
++ return NULL;
++
++ /* Transitional: Fall back to ->get_edid. */
++ if (!bridge->funcs->edid_read) {
++ const struct drm_edid *drm_edid;
++ struct edid *edid;
++
++ edid = drm_bridge_get_edid(bridge, connector);
++ if (!edid)
++ return NULL;
++
++ drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
++
++ kfree(edid);
++
++ return drm_edid;
++ }
++
++ return bridge->funcs->edid_read(bridge, connector);
++}
++EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
++
+ /**
+ * drm_bridge_get_edid - get the EDID data of the connected display
+ * @bridge: bridge control structure
+@@ -1215,6 +1263,8 @@ EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
+ * get the EDID and return it. Otherwise return NULL.
+ *
++ * Deprecated. Prefer using drm_bridge_edid_read().
++ *
+ * RETURNS:
+ * The retrieved EDID on success, or NULL otherwise.
+ */
+diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
+index e6f5ba5f4bafd0..d9cbf4e3327cdb 100644
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
+ u64 start, u64 end,
+ unsigned int order)
+ {
++ u64 req_size = mm->chunk_size << order;
+ struct drm_buddy_block *block;
+ struct drm_buddy_block *buddy;
+ LIST_HEAD(dfs);
+@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
+ if (drm_buddy_block_is_allocated(block))
+ continue;
+
++ if (block_start < start || block_end > end) {
++ u64 adjusted_start = max(block_start, start);
++ u64 adjusted_end = min(block_end, end);
++
++ if (round_down(adjusted_end + 1, req_size) <=
++ round_up(adjusted_start, req_size))
++ continue;
++ }
++
+ if (contains(start, end, block_start, block_end) &&
+ order == drm_buddy_block_order(block)) {
+ /*
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index 871e4e2129d6da..51df7244de7185 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ unsigned int total_modes_count = 0;
+ struct drm_client_offset *offsets;
+ unsigned int connector_count = 0;
++ /* points to modes protected by mode_config.mutex */
+ struct drm_display_mode **modes;
+ struct drm_crtc **crtcs;
+ int i, ret = 0;
+@@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+ drm_client_pick_crtcs(client, connectors, connector_count,
+ crtcs, modes, 0, width, height);
+ }
+- mutex_unlock(&dev->mode_config.mutex);
+
+ drm_client_modeset_release(client);
+
+@@ -869,12 +869,18 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
+
+ kfree(modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev, mode);
++ if (!modeset->mode) {
++ ret = -ENOMEM;
++ break;
++ }
++
+ drm_connector_get(connector);
+ modeset->connectors[modeset->num_connectors++] = connector;
+ modeset->x = offset->x;
+ modeset->y = offset->y;
+ }
+ }
++ mutex_unlock(&dev->mode_config.mutex);
+
+ mutex_unlock(&client->modeset_mutex);
+ out:
+diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
+index c44d5bcf12847b..309aad5f0c808d 100644
+--- a/drivers/gpu/drm/drm_connector.c
++++ b/drivers/gpu/drm/drm_connector.c
+@@ -2925,7 +2925,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ else
+- drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe",
++ drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe\n",
+ connector->base.id, connector->name);
+ }
+
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index df9bf3c9206e71..65f9f66933bba2 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -715,8 +715,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ struct drm_mode_set set;
+ uint32_t __user *set_connectors_ptr;
+ struct drm_modeset_acquire_ctx ctx;
+- int ret;
+- int i;
++ int ret, i, num_connectors = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EOPNOTSUPP;
+@@ -871,6 +870,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ connector->name);
+
+ connector_set[i] = connector;
++ num_connectors++;
+ }
+ }
+
+@@ -879,7 +879,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ set.y = crtc_req->y;
+ set.mode = mode;
+ set.connectors = connector_set;
+- set.num_connectors = crtc_req->count_connectors;
++ set.num_connectors = num_connectors;
+ set.fb = fb;
+
+ if (drm_drv_uses_atomic_modeset(dev))
+@@ -892,7 +892,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ drm_framebuffer_put(fb);
+
+ if (connector_set) {
+- for (i = 0; i < crtc_req->count_connectors; i++) {
++ for (i = 0; i < num_connectors; i++) {
+ if (connector_set[i])
+ drm_connector_put(connector_set[i]);
+ }
+@@ -904,6 +904,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ connector_set = NULL;
+ fb = NULL;
+ mode = NULL;
++ num_connectors = 0;
+
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
+
+diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
+index d8b2955e88fd0a..afb02aae707b4f 100644
+--- a/drivers/gpu/drm/drm_damage_helper.c
++++ b/drivers/gpu/drm/drm_damage_helper.c
+@@ -241,7 +241,8 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
+ iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
+
+- if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
++ if (!iter->clips || state->ignore_damage_clips ||
++ !drm_rect_equals(&state->src, &old_state->src)) {
+ iter->clips = NULL;
+ iter->num_clips = 0;
+ iter->full_update = true;
+diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
+index 2de43ff3ce0a43..41b0682c638ef0 100644
+--- a/drivers/gpu/drm/drm_debugfs.c
++++ b/drivers/gpu/drm/drm_debugfs.c
+@@ -92,15 +92,17 @@ static int drm_clients_info(struct seq_file *m, void *data)
+ */
+ mutex_lock(&dev->filelist_mutex);
+ list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
+- struct task_struct *task;
+ bool is_current_master = drm_is_current_master(priv);
++ struct task_struct *task;
++ struct pid *pid;
+
+- rcu_read_lock(); /* locks pid_task()->comm */
+- task = pid_task(priv->pid, PIDTYPE_TGID);
++ rcu_read_lock(); /* Locks priv->pid and pid_task()->comm! */
++ pid = rcu_dereference(priv->pid);
++ task = pid_task(pid, PIDTYPE_TGID);
+ uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
+ task ? task->comm : "<unknown>",
+- pid_vnr(priv->pid),
++ pid_vnr(pid),
+ priv->minor->index,
+ is_current_master ? 'y' : 'n',
+ priv->authenticated ? 'y' : 'n',
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 3eda026ffac6a9..d453d710ef0c10 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -34,6 +34,7 @@
+ #include <linux/pseudo_fs.h>
+ #include <linux/slab.h>
+ #include <linux/srcu.h>
++#include <linux/xarray.h>
+
+ #include <drm/drm_accel.h>
+ #include <drm/drm_cache.h>
+@@ -54,8 +55,7 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
+ MODULE_DESCRIPTION("DRM shared core routines");
+ MODULE_LICENSE("GPL and additional rights");
+
+-static DEFINE_SPINLOCK(drm_minor_lock);
+-static struct idr drm_minors_idr;
++DEFINE_XARRAY_ALLOC(drm_minors_xa);
+
+ /*
+ * If the drm core fails to init for whatever reason,
+@@ -83,6 +83,18 @@ DEFINE_STATIC_SRCU(drm_unplug_srcu);
+ * registered and unregistered dynamically according to device-state.
+ */
+
++static struct xarray *drm_minor_get_xa(enum drm_minor_type type)
++{
++ if (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER)
++ return &drm_minors_xa;
++#if IS_ENABLED(CONFIG_DRM_ACCEL)
++ else if (type == DRM_MINOR_ACCEL)
++ return &accel_minors_xa;
++#endif
++ else
++ return ERR_PTR(-EOPNOTSUPP);
++}
++
+ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
+ enum drm_minor_type type)
+ {
+@@ -101,25 +113,31 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
+ static void drm_minor_alloc_release(struct drm_device *dev, void *data)
+ {
+ struct drm_minor *minor = data;
+- unsigned long flags;
+
+ WARN_ON(dev != minor->dev);
+
+ put_device(minor->kdev);
+
+- if (minor->type == DRM_MINOR_ACCEL) {
+- accel_minor_remove(minor->index);
+- } else {
+- spin_lock_irqsave(&drm_minor_lock, flags);
+- idr_remove(&drm_minors_idr, minor->index);
+- spin_unlock_irqrestore(&drm_minor_lock, flags);
+- }
++ xa_erase(drm_minor_get_xa(minor->type), minor->index);
+ }
+
++/*
++ * DRM used to support 64 devices, for backwards compatibility we need to maintain the
++ * minor allocation scheme where minors 0-63 are primary nodes, 64-127 are control nodes,
++ * and 128-191 are render nodes.
++ * After reaching the limit, we're allocating minors dynamically - first-come, first-serve.
++ * Accel nodes are using a distinct major, so the minors are allocated in continuous 0-MAX
++ * range.
++ */
++#define DRM_MINOR_LIMIT(t) ({ \
++ typeof(t) _t = (t); \
++ _t == DRM_MINOR_ACCEL ? XA_LIMIT(0, ACCEL_MAX_MINORS) : XA_LIMIT(64 * _t, 64 * _t + 63); \
++})
++#define DRM_EXTENDED_MINOR_LIMIT XA_LIMIT(192, (1 << MINORBITS) - 1)
++
+ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
+ {
+ struct drm_minor *minor;
+- unsigned long flags;
+ int r;
+
+ minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
+@@ -129,25 +147,14 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
+ minor->type = type;
+ minor->dev = dev;
+
+- idr_preload(GFP_KERNEL);
+- if (type == DRM_MINOR_ACCEL) {
+- r = accel_minor_alloc();
+- } else {
+- spin_lock_irqsave(&drm_minor_lock, flags);
+- r = idr_alloc(&drm_minors_idr,
+- NULL,
+- 64 * type,
+- 64 * (type + 1),
+- GFP_NOWAIT);
+- spin_unlock_irqrestore(&drm_minor_lock, flags);
+- }
+- idr_preload_end();
+-
++ r = xa_alloc(drm_minor_get_xa(type), &minor->index,
++ NULL, DRM_MINOR_LIMIT(type), GFP_KERNEL);
++ if (r == -EBUSY && (type == DRM_MINOR_PRIMARY || type == DRM_MINOR_RENDER))
++ r = xa_alloc(&drm_minors_xa, &minor->index,
++ NULL, DRM_EXTENDED_MINOR_LIMIT, GFP_KERNEL);
+ if (r < 0)
+ return r;
+
+- minor->index = r;
+-
+ r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
+ if (r)
+ return r;
+@@ -163,7 +170,7 @@ static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
+ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
+ {
+ struct drm_minor *minor;
+- unsigned long flags;
++ void *entry;
+ int ret;
+
+ DRM_DEBUG("\n");
+@@ -187,13 +194,12 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
+ goto err_debugfs;
+
+ /* replace NULL with @minor so lookups will succeed from now on */
+- if (minor->type == DRM_MINOR_ACCEL) {
+- accel_minor_replace(minor, minor->index);
+- } else {
+- spin_lock_irqsave(&drm_minor_lock, flags);
+- idr_replace(&drm_minors_idr, minor, minor->index);
+- spin_unlock_irqrestore(&drm_minor_lock, flags);
++ entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL);
++ if (xa_is_err(entry)) {
++ ret = xa_err(entry);
++ goto err_debugfs;
+ }
++ WARN_ON(entry);
+
+ DRM_DEBUG("new minor registered %d\n", minor->index);
+ return 0;
+@@ -206,20 +212,13 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
+ static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
+ {
+ struct drm_minor *minor;
+- unsigned long flags;
+
+ minor = *drm_minor_get_slot(dev, type);
+ if (!minor || !device_is_registered(minor->kdev))
+ return;
+
+ /* replace @minor with NULL so lookups will fail from now on */
+- if (minor->type == DRM_MINOR_ACCEL) {
+- accel_minor_replace(NULL, minor->index);
+- } else {
+- spin_lock_irqsave(&drm_minor_lock, flags);
+- idr_replace(&drm_minors_idr, NULL, minor->index);
+- spin_unlock_irqrestore(&drm_minor_lock, flags);
+- }
++ xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL);
+
+ device_del(minor->kdev);
+ dev_set_drvdata(minor->kdev, NULL); /* safety belt */
+@@ -235,16 +234,15 @@ static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type typ
+ * minor->dev pointer will stay valid! However, the device may get unplugged and
+ * unregistered while you hold the minor.
+ */
+-struct drm_minor *drm_minor_acquire(unsigned int minor_id)
++struct drm_minor *drm_minor_acquire(struct xarray *minor_xa, unsigned int minor_id)
+ {
+ struct drm_minor *minor;
+- unsigned long flags;
+
+- spin_lock_irqsave(&drm_minor_lock, flags);
+- minor = idr_find(&drm_minors_idr, minor_id);
++ xa_lock(minor_xa);
++ minor = xa_load(minor_xa, minor_id);
+ if (minor)
+ drm_dev_get(minor->dev);
+- spin_unlock_irqrestore(&drm_minor_lock, flags);
++ xa_unlock(minor_xa);
+
+ if (!minor) {
+ return ERR_PTR(-ENODEV);
+@@ -940,8 +938,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
+ goto err_minors;
+ }
+
+- if (drm_core_check_feature(dev, DRIVER_MODESET))
+- drm_modeset_register_all(dev);
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++ ret = drm_modeset_register_all(dev);
++ if (ret)
++ goto err_unload;
++ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver->name, driver->major, driver->minor,
+@@ -951,6 +952,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
+
+ goto out_unlock;
+
++err_unload:
++ if (dev->driver->unload)
++ dev->driver->unload(dev);
+ err_minors:
+ remove_compat_control_link(dev);
+ drm_minor_unregister(dev, DRM_MINOR_ACCEL);
+@@ -1032,7 +1036,7 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
+
+ DRM_DEBUG("\n");
+
+- minor = drm_minor_acquire(iminor(inode));
++ minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
+
+@@ -1067,7 +1071,7 @@ static void drm_core_exit(void)
+ unregister_chrdev(DRM_MAJOR, "drm");
+ debugfs_remove(drm_debugfs_root);
+ drm_sysfs_destroy();
+- idr_destroy(&drm_minors_idr);
++ WARN_ON(!xa_empty(&drm_minors_xa));
+ drm_connector_ida_destroy();
+ }
+
+@@ -1076,7 +1080,6 @@ static int __init drm_core_init(void)
+ int ret;
+
+ drm_connector_ida_init();
+- idr_init(&drm_minors_idr);
+ drm_memcpy_init_early();
+
+ ret = drm_sysfs_init();
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 4b71040ae5be5c..ee3fab115c4b5b 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -2308,7 +2308,8 @@ int drm_edid_override_connector_update(struct drm_connector *connector)
+
+ override = drm_edid_override_get(connector);
+ if (override) {
+- num_modes = drm_edid_connector_update(connector, override);
++ if (drm_edid_connector_update(connector, override) == 0)
++ num_modes = drm_edid_connector_add_modes(connector);
+
+ drm_edid_free(override);
+
+@@ -3499,11 +3500,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
+ mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+- /* Some EDIDs have bogus h/vtotal values */
+- if (mode->hsync_end > mode->htotal)
+- mode->htotal = mode->hsync_end + 1;
+- if (mode->vsync_end > mode->vtotal)
+- mode->vtotal = mode->vsync_end + 1;
++ /* Some EDIDs have bogus h/vsync_end values */
++ if (mode->hsync_end > mode->htotal) {
++ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing hsync_end %d->%d\n",
++ connector->base.id, connector->name,
++ mode->hsync_end, mode->htotal);
++ mode->hsync_end = mode->htotal;
++ }
++ if (mode->vsync_end > mode->vtotal) {
++ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing vsync_end %d->%d\n",
++ connector->base.id, connector->name,
++ mode->vsync_end, mode->vtotal);
++ mode->vsync_end = mode->vtotal;
++ }
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+@@ -7312,7 +7321,7 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
+ static bool displayid_is_tiled_block(const struct displayid_iter *iter,
+ const struct displayid_block *block)
+ {
+- return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
++ return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 &&
+ block->tag == DATA_BLOCK_TILED_DISPLAY) ||
+ (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
+ block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index d612133e2cf7ec..618b045230336e 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -524,6 +524,9 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
++ if (!drm_leak_fbdev_smem)
++ info->flags |= FBINFO_HIDE_SMEM_START;
++
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret)
+ goto err_release;
+@@ -628,6 +631,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
+ static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
+ u32 width, u32 height)
+ {
++ /*
++ * This function may be invoked by panic() to flush the frame
++ * buffer, where all CPUs except the panic CPU are stopped.
++ * During the following schedule_work(), the panic CPU needs
++ * the worker_pool lock, which might be held by a stopped CPU,
++ * causing schedule_work() and panic() to block. Return early on
++ * oops_in_progress to prevent this blocking.
++ */
++ if (oops_in_progress)
++ return;
++
+ drm_fb_helper_add_damage_clip(helper, x, y, width, height);
+
+ schedule_work(&helper->damage_work);
+@@ -1860,9 +1874,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
+ info = fb_helper->info;
+ info->var.pixclock = 0;
+
+- if (!drm_leak_fbdev_smem)
+- info->flags |= FBINFO_HIDE_SMEM_START;
+-
+ /* Need to drop locks to avoid recursive deadlock in
+ * register_framebuffer. This is ok because the only thing left to do is
+ * register the fbdev emulation instance in kernel_fb_helper_list. */
+diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
+index 6c9427bb4053ba..13cd754af311d1 100644
+--- a/drivers/gpu/drm/drm_fbdev_dma.c
++++ b/drivers/gpu/drm/drm_fbdev_dma.c
+@@ -130,7 +130,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->screen_buffer = map.vaddr;
+- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
++ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
++ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
++ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
++ }
+ info->fix.smem_len = info->screen_size;
+
+ return 0;
+diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
+index d647d89764cb98..b4659cd6285ab6 100644
+--- a/drivers/gpu/drm/drm_fbdev_generic.c
++++ b/drivers/gpu/drm/drm_fbdev_generic.c
+@@ -113,7 +113,6 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ /* screen */
+ info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+ info->screen_buffer = screen_buffer;
+- info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer));
+ info->fix.smem_len = screen_size;
+
+ /* deferred I/O */
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index 883d83bc0e3d5f..48af0e2960a226 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -160,7 +160,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
+
+ /* Get a unique identifier for fdinfo: */
+ file->client_id = atomic64_inc_return(&ident);
+- file->pid = get_pid(task_tgid(current));
++ rcu_assign_pointer(file->pid, get_pid(task_tgid(current)));
+ file->minor = minor;
+
+ /* for compatibility root is always authenticated */
+@@ -200,7 +200,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
+ drm_syncobj_release(file);
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ drm_gem_release(dev, file);
+- put_pid(file->pid);
++ put_pid(rcu_access_pointer(file->pid));
+ kfree(file);
+
+ return ERR_PTR(ret);
+@@ -291,7 +291,7 @@ void drm_file_free(struct drm_file *file)
+
+ WARN_ON(!list_empty(&file->event_list));
+
+- put_pid(file->pid);
++ put_pid(rcu_access_pointer(file->pid));
+ kfree(file);
+ }
+
+@@ -413,7 +413,7 @@ int drm_open(struct inode *inode, struct file *filp)
+ int retcode;
+ int need_setup = 0;
+
+- minor = drm_minor_acquire(iminor(inode));
++ minor = drm_minor_acquire(&drm_minors_xa, iminor(inode));
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
+
+@@ -505,6 +505,38 @@ int drm_release(struct inode *inode, struct file *filp)
+ }
+ EXPORT_SYMBOL(drm_release);
+
++void drm_file_update_pid(struct drm_file *filp)
++{
++ struct drm_device *dev;
++ struct pid *pid, *old;
++
++ /*
++ * Master nodes need to keep the original ownership in order for
++ * drm_master_check_perm to keep working correctly. (See comment in
++ * drm_auth.c.)
++ */
++ if (filp->was_master)
++ return;
++
++ pid = task_tgid(current);
++
++ /*
++ * Quick unlocked check since the model is a single handover followed by
++ * exclusive repeated use.
++ */
++ if (pid == rcu_access_pointer(filp->pid))
++ return;
++
++ dev = filp->minor->dev;
++ mutex_lock(&dev->filelist_mutex);
++ get_pid(pid);
++ old = rcu_replace_pointer(filp->pid, pid, 1);
++ mutex_unlock(&dev->filelist_mutex);
++
++ synchronize_rcu();
++ put_pid(old);
++}
++
+ /**
+ * drm_release_noglobal - release method for DRM file
+ * @inode: device inode
+@@ -924,7 +956,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
+ {
+ struct drm_gem_object *obj;
+ struct drm_memory_stats status = {};
+- enum drm_gem_object_status supported_status;
++ enum drm_gem_object_status supported_status = 0;
+ int id;
+
+ spin_lock(&file->table_lock);
+diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
+index aff3746dedfb48..1955eaeba0ab7c 100644
+--- a/drivers/gpu/drm/drm_framebuffer.c
++++ b/drivers/gpu/drm/drm_framebuffer.c
+@@ -570,7 +570,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+- int ret;
++ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index e435f986cd135b..1ff0678be7c75b 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -610,6 +610,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
+ return ret;
+ }
+
++ if (is_cow_mapping(vma->vm_flags))
++ return -EINVAL;
++
+ dma_resv_lock(shmem->base.resv, NULL);
+ ret = drm_gem_shmem_get_pages(shmem);
+ dma_resv_unlock(shmem->base.resv);
+diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
+index ba12acd551390b..0ef5fc2a61f194 100644
+--- a/drivers/gpu/drm/drm_internal.h
++++ b/drivers/gpu/drm/drm_internal.h
+@@ -77,10 +77,6 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
+
+-/* drm_drv.c */
+-struct drm_minor *drm_minor_acquire(unsigned int minor_id);
+-void drm_minor_release(struct drm_minor *minor);
+-
+ /* drm_managed.c */
+ void drm_managed_release(struct drm_device *dev);
+ void drmm_add_final_kfree(struct drm_device *dev, void *container);
+diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
+index f03ffbacfe9b48..77590b0f38fa38 100644
+--- a/drivers/gpu/drm/drm_ioctl.c
++++ b/drivers/gpu/drm/drm_ioctl.c
+@@ -776,6 +776,9 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
+ struct drm_device *dev = file_priv->minor->dev;
+ int retcode;
+
++ /* Update drm_file owner if fd was passed along. */
++ drm_file_update_pid(file_priv);
++
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
+index 150fe155506809..94375c6a542564 100644
+--- a/drivers/gpu/drm/drm_lease.c
++++ b/drivers/gpu/drm/drm_lease.c
+@@ -510,8 +510,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ /* Handle leased objects, if any */
+ idr_init(&leases);
+ if (object_count != 0) {
+- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+- array_size(object_count, sizeof(__u32)));
++ object_ids = memdup_array_user(u64_to_user_ptr(cl->object_ids),
++ object_count, sizeof(__u32));
+ if (IS_ERR(object_ids)) {
+ ret = PTR_ERR(object_ids);
+ idr_destroy(&leases);
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index 14201f73aab134..52a93149363b46 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -347,7 +347,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
+ {
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+- mipi_dsi_detach(dsi);
++ if (dsi->attached)
++ mipi_dsi_detach(dsi);
+ mipi_dsi_device_unregister(dsi);
+
+ return 0;
+@@ -370,11 +371,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister);
+ int mipi_dsi_attach(struct mipi_dsi_device *dsi)
+ {
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
++ int ret;
+
+ if (!ops || !ops->attach)
+ return -ENOSYS;
+
+- return ops->attach(dsi->host, dsi);
++ ret = ops->attach(dsi->host, dsi);
++ if (ret)
++ return ret;
++
++ dsi->attached = true;
++
++ return 0;
+ }
+ EXPORT_SYMBOL(mipi_dsi_attach);
+
+@@ -386,9 +394,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
+ {
+ const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+
++ if (WARN_ON(!dsi->attached))
++ return -EINVAL;
++
+ if (!ops || !ops->detach)
+ return -ENOSYS;
+
++ dsi->attached = false;
++
+ return ops->detach(dsi->host, dsi);
+ }
+ EXPORT_SYMBOL(mipi_dsi_detach);
+@@ -641,7 +654,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
++int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+ {
+ /* Note: Needs updating for non-default PPS or algorithm */
+ u8 tx[2] = { enable << 0, 0 };
+@@ -666,8 +679,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+- const struct drm_dsc_picture_parameter_set *pps)
++int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
++ const struct drm_dsc_picture_parameter_set *pps)
+ {
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
+index f858dfedf2cfcf..2c582020cb4237 100644
+--- a/drivers/gpu/drm/drm_modeset_helper.c
++++ b/drivers/gpu/drm/drm_modeset_helper.c
+@@ -193,13 +193,22 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
+
+ if (!dev)
+ return 0;
++ /*
++ * Don't disable polling if it was never initialized
++ */
++ if (dev->mode_config.poll_enabled)
++ drm_kms_helper_poll_disable(dev);
+
+- drm_kms_helper_poll_disable(dev);
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
+ state = drm_atomic_helper_suspend(dev);
+ if (IS_ERR(state)) {
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+- drm_kms_helper_poll_enable(dev);
++ /*
++ * Don't enable polling if it was never initialized
++ */
++ if (dev->mode_config.poll_enabled)
++ drm_kms_helper_poll_enable(dev);
++
+ return PTR_ERR(state);
+ }
+
+@@ -239,7 +248,11 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
+ dev->mode_config.suspend_state = NULL;
+
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+- drm_kms_helper_poll_enable(dev);
++ /*
++ * Don't enable polling if it is not initialized
++ */
++ if (dev->mode_config.poll_enabled)
++ drm_kms_helper_poll_enable(dev);
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
+index e814020bbcd3b3..cfbe020de54e01 100644
+--- a/drivers/gpu/drm/drm_panel.c
++++ b/drivers/gpu/drm/drm_panel.c
+@@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
+ * The modes probed from the panel are automatically added to the connector
+ * that the panel is attached to.
+ *
+- * Return: The number of modes available from the panel on success or a
+- * negative error code on failure.
++ * Return: The number of modes available from the panel on success, or 0 on
++ * failure (no modes).
+ */
+ int drm_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+ {
+ if (!panel)
+- return -EINVAL;
++ return 0;
+
+- if (panel->funcs && panel->funcs->get_modes)
+- return panel->funcs->get_modes(panel, connector);
++ if (panel->funcs && panel->funcs->get_modes) {
++ int num;
+
+- return -EOPNOTSUPP;
++ num = panel->funcs->get_modes(panel, connector);
++ if (num > 0)
++ return num;
++ }
++
++ return 0;
+ }
+ EXPORT_SYMBOL(drm_panel_get_modes);
+
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index d5c15292ae9378..5b2506c65e9520 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -117,6 +117,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+ };
+
++static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
++ .width = 1080,
++ .height = 1920,
++ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
+ .width = 1200,
+ .height = 1920,
+@@ -196,6 +202,24 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
++ }, { /* AYA NEO KUN */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
++ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
++ },
++ .driver_data = (void *)&lcd1600x2560_rightside_up,
++ }, { /* AYN Loki Max */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Max"),
++ },
++ .driver_data = (void *)&lcd1080x1920_leftside_up,
++ }, { /* AYN Loki Zero */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Zero"),
++ },
++ .driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* Chuwi HiBook (CWI514) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+@@ -279,6 +303,12 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
++ }, { /* GPD Win Mini */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1617-01")
++ },
++ .driver_data = (void *)&lcd1080x1920_rightside_up,
+ }, { /* I.T.Works TW891 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+@@ -336,6 +366,12 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
++ }, { /* Lenovo Legion Go 8APU1 */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"),
++ },
++ .driver_data = (void *)&lcd1600x2560_leftside_up,
+ }, { /* Lenovo Yoga Book X90F / X90L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+@@ -390,6 +426,12 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)&lcd1600x2560_leftside_up,
++ }, { /* OrangePi Neo */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
++ },
++ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Samsung GalaxyBook 10.6 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+@@ -403,6 +445,13 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
++ }, { /* Valve Steam Deck */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
++ },
++ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* VIOS LTH17 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
+diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
+index 24e7998d17313e..311e179904a2ab 100644
+--- a/drivers/gpu/drm/drm_plane.c
++++ b/drivers/gpu/drm/drm_plane.c
+@@ -678,6 +678,19 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ !file_priv->universal_planes)
+ continue;
+
++ /*
++ * If we're running on a virtualized driver then,
++ * unless userspace advertizes support for the
++ * virtualized cursor plane, disable cursor planes
++ * because they'll be broken due to missing cursor
++ * hotspot info.
++ */
++ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
++ drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
++ file_priv->atomic &&
++ !file_priv->supports_virtualized_cursor_plane)
++ continue;
++
+ if (drm_lease_held(file_priv, plane->base.id)) {
+ if (count < plane_resp->count_planes &&
+ put_user(plane->base.id, plane_ptr + count))
+@@ -1387,6 +1400,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ out:
+ if (fb)
+ drm_framebuffer_put(fb);
++ fb = NULL;
+ if (plane->old_fb)
+ drm_framebuffer_put(plane->old_fb);
+ plane->old_fb = NULL;
+diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
+index 63b709a67471b9..03bd3c7bd0dc2c 100644
+--- a/drivers/gpu/drm/drm_prime.c
++++ b/drivers/gpu/drm/drm_prime.c
+@@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+ }
+ EXPORT_SYMBOL(drm_gem_dmabuf_release);
+
+-/*
++/**
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
+ * @dev: drm_device to import into
+ * @file_priv: drm file-private structure
+@@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+-static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+- struct drm_file *file_priv, int prime_fd,
+- uint32_t *handle)
++int drm_gem_prime_fd_to_handle(struct drm_device *dev,
++ struct drm_file *file_priv, int prime_fd,
++ uint32_t *handle)
+ {
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+@@ -360,6 +360,7 @@ static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ dma_buf_put(dma_buf);
+ return ret;
+ }
++EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+@@ -408,7 +409,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
+ return dmabuf;
+ }
+
+-/*
++/**
+ * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+@@ -421,10 +422,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
+ * The actual exporting from GEM object to a dma-buf is done through the
+ * &drm_gem_object_funcs.export callback.
+ */
+-static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+- struct drm_file *file_priv, uint32_t handle,
+- uint32_t flags,
+- int *prime_fd)
++int drm_gem_prime_handle_to_fd(struct drm_device *dev,
++ struct drm_file *file_priv, uint32_t handle,
++ uint32_t flags,
++ int *prime_fd)
+ {
+ struct drm_gem_object *obj;
+ int ret = 0;
+@@ -506,6 +507,7 @@ static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+
+ return ret;
+ }
++EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+
+ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+@@ -580,7 +582,12 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
+ {
+ struct drm_gem_object *obj = dma_buf->priv;
+
+- if (!obj->funcs->get_sg_table)
++ /*
++ * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
++ * that implement their own ->map_dma_buf() do not.
++ */
++ if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
++ !obj->funcs->get_sg_table)
+ return -ENOSYS;
+
+ return drm_gem_pin(obj);
+@@ -818,7 +825,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+ if (max_segment == 0)
+ max_segment = UINT_MAX;
+ err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
+- nr_pages << PAGE_SHIFT,
++ (unsigned long)nr_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
+ if (err) {
+ kfree(sg);
+@@ -864,9 +871,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
+ * @obj: GEM object to export
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
+ *
+- * This is the implementation of the &drm_gem_object_funcs.export functions
+- * for GEM drivers using the PRIME helpers. It is used as the default for
+- * drivers that do not set their own.
++ * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
++ * using the PRIME helpers. It is used as the default in
++ * drm_gem_prime_handle_to_fd().
+ */
+ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
+ int flags)
+@@ -962,9 +969,10 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
+ * @dev: drm_device to import into
+ * @dma_buf: dma-buf object to import
+ *
+- * This is the implementation of the gem_prime_import functions for GEM
+- * drivers using the PRIME helpers. It is the default for drivers that do
+- * not set their own &drm_driver.gem_prime_import.
++ * This is the implementation of the gem_prime_import functions for GEM drivers
++ * using the PRIME helpers. Drivers can use this as their
++ * &drm_driver.gem_prime_import implementation. It is used as the default
++ * implementation in drm_gem_prime_fd_to_handle().
+ *
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
+ * &drm_gem_object_funcs.free hook when using this function.
+diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
+index 5b93c11895bb1e..aab76334083e8a 100644
+--- a/drivers/gpu/drm/drm_print.c
++++ b/drivers/gpu/drm/drm_print.c
+@@ -100,8 +100,9 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str)
+ copy = iterator->remain;
+
+ /* Copy out the bit of the string that we need */
+- memcpy(iterator->data,
+- str + (iterator->start - iterator->offset), copy);
++ if (iterator->data)
++ memcpy(iterator->data,
++ str + (iterator->start - iterator->offset), copy);
+
+ iterator->offset = iterator->start + copy;
+ iterator->remain -= copy;
+@@ -110,7 +111,8 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str)
+
+ len = min_t(ssize_t, strlen(str), iterator->remain);
+
+- memcpy(iterator->data + pos, str, len);
++ if (iterator->data)
++ memcpy(iterator->data + pos, str, len);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+@@ -140,8 +142,9 @@ void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
+ if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
+ ssize_t pos = iterator->offset - iterator->start;
+
+- snprintf(((char *) iterator->data) + pos,
+- iterator->remain, "%pV", vaf);
++ if (iterator->data)
++ snprintf(((char *) iterator->data) + pos,
++ iterator->remain, "%pV", vaf);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 3f479483d7d80f..c90afb5d089871 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -293,14 +293,17 @@ static void reschedule_output_poll_work(struct drm_device *dev)
+ * Drivers can call this helper from their device resume implementation. It is
+ * not an error to call this even when output polling isn't enabled.
+ *
++ * If device polling was never initialized before, this call will trigger a
++ * warning and return.
++ *
+ * Note that calls to enable and disable polling must be strictly ordered, which
+ * is automatically the case when they're only call from suspend/resume
+ * callbacks.
+ */
+ void drm_kms_helper_poll_enable(struct drm_device *dev)
+ {
+- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
+- dev->mode_config.poll_running)
++ if (drm_WARN_ON_ONCE(dev, !dev->mode_config.poll_enabled) ||
++ !drm_kms_helper_poll || dev->mode_config.poll_running)
+ return;
+
+ if (drm_kms_helper_enable_hpd(dev) ||
+@@ -419,6 +422,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
+
+ count = connector_funcs->get_modes(connector);
+
++ /* The .get_modes() callback should not return negative values. */
++ if (count < 0) {
++ drm_err(connector->dev, ".get_modes() returned %pe\n",
++ ERR_PTR(count));
++ count = 0;
++ }
++
+ /*
+ * Fallback for when DDC probe failed in drm_get_edid() and thus skipped
+ * override/firmware EDID.
+@@ -619,8 +629,12 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ 0);
+ }
+
+- /* Re-enable polling in case the global poll config changed. */
+- drm_kms_helper_poll_enable(dev);
++ /*
++ * Re-enable polling in case the global poll config changed but polling
++ * is still initialized.
++ */
++ if (dev->mode_config.poll_enabled)
++ drm_kms_helper_poll_enable(dev);
+
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+@@ -871,12 +885,18 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+ * not an error to call this even when output polling isn't enabled or already
+ * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
+ *
++ * If however, the polling was never initialized, this call will trigger a
++ * warning and return
++ *
+ * Note that calls to enable and disable polling must be strictly ordered, which
+ * is automatically the case when they're only call from suspend/resume
+ * callbacks.
+ */
+ void drm_kms_helper_poll_disable(struct drm_device *dev)
+ {
++ if (drm_WARN_ON(dev, !dev->mode_config.poll_enabled))
++ return;
++
+ if (dev->mode_config.poll_running)
+ drm_kms_helper_disable_hpd(dev);
+
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index f7003d1ec5ef1e..7b4ed5ca0a9bd2 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -1034,7 +1034,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ uint64_t *points;
+ uint32_t signaled_count, i;
+
+- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
++ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ lockdep_assert_none_held_once();
+
+ points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+@@ -1069,7 +1070,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+ dma_fence_put(fence);
+- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
++ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ continue;
+ } else {
+ timeout = -EINVAL;
+@@ -1102,7 +1104,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ * fallthough and try a 0 timeout wait!
+ */
+
+- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
++ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ for (i = 0; i < count; ++i)
+ drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
+ }
+@@ -1377,10 +1380,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+
+ /* This happens inside the syncobj lock */
+ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
++ if (!fence)
++ return;
++
+ ret = dma_fence_chain_find_seqno(&fence, entry->point);
+- if (ret != 0 || !fence) {
++ if (ret != 0) {
++ /* The given seqno has not been submitted yet. */
+ dma_fence_put(fence);
+ return;
++ } else if (!fence) {
++ /* If dma_fence_chain_find_seqno returns 0 but sets the fence
++ * to NULL, it implies that the given seqno is signaled and a
++ * later seqno has already been submitted. Assign a stub fence
++ * so that the eventfd still gets signaled below.
++ */
++ fence = dma_fence_get_stub();
+ }
+
+ list_del_init(&entry->node);
+@@ -1407,6 +1421,7 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ struct drm_syncobj *syncobj;
+ struct eventfd_ctx *ev_fd_ctx;
+ struct syncobj_eventfd_entry *entry;
++ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+@@ -1422,13 +1437,15 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ return -ENOENT;
+
+ ev_fd_ctx = eventfd_ctx_fdget(args->fd);
+- if (IS_ERR(ev_fd_ctx))
+- return PTR_ERR(ev_fd_ctx);
++ if (IS_ERR(ev_fd_ctx)) {
++ ret = PTR_ERR(ev_fd_ctx);
++ goto err_fdget;
++ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+- eventfd_ctx_put(ev_fd_ctx);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto err_kzalloc;
+ }
+ entry->syncobj = syncobj;
+ entry->ev_fd_ctx = ev_fd_ctx;
+@@ -1439,6 +1456,12 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ drm_syncobj_put(syncobj);
+
+ return 0;
++
++err_kzalloc:
++ eventfd_ctx_put(ev_fd_ctx);
++err_fdget:
++ drm_syncobj_put(syncobj);
++ return ret;
+ }
+
+ int
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+index a8d3fa81e4ec5d..f9bc837e22bddc 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+@@ -494,7 +494,7 @@ static const struct drm_driver etnaviv_drm_driver = {
+ .desc = "etnaviv DRM",
+ .date = "20151214",
+ .major = 1,
+- .minor = 3,
++ .minor = 4,
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index b5f73502e3dd42..69fccbcd92c622 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -356,9 +356,11 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+
+ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
+ {
+- if (op & ETNA_PREP_READ)
++ op &= ETNA_PREP_READ | ETNA_PREP_WRITE;
++
++ if (op == ETNA_PREP_READ)
+ return DMA_FROM_DEVICE;
+- else if (op & ETNA_PREP_WRITE)
++ else if (op == ETNA_PREP_WRITE)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_BIDIRECTIONAL;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index 9276756e1397d3..371e1f2733f6fb 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -632,8 +632,8 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
+ /* Disable TX clock gating on affected core revisions. */
+ if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
+ etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+- etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
+- etnaviv_is_model_rev(gpu, GC2000, 0x6203))
++ etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
++ etnaviv_is_model_rev(gpu, GC7000, 0x6203))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
+
+ /* Disable SE and RA clock gating on affected core revisions. */
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+index 67201242438bed..8665f2658d51b3 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+@@ -265,6 +265,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
+ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+ {
+ struct etnaviv_chip_identity *ident = &gpu->identity;
++ const u32 product_id = ident->product_id;
++ const u32 customer_id = ident->customer_id;
++ const u32 eco_id = ident->eco_id;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
+@@ -278,6 +281,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+ etnaviv_chip_identities[i].eco_id == ~0U)) {
+ memcpy(ident, &etnaviv_chip_identities[i],
+ sizeof(*ident));
++
++ /* Restore some id values as ~0U aka 'don't care' might been used. */
++ ident->product_id = product_id;
++ ident->customer_id = customer_id;
++ ident->eco_id = eco_id;
++
+ return true;
+ }
+ }
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+index 345fec6cb1a4c1..97e406d9ac06f4 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+@@ -38,9 +38,6 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ u32 dma_addr;
+ int change;
+
+- /* block scheduler */
+- drm_sched_stop(&gpu->sched, sched_job);
+-
+ /*
+ * If the GPU managed to complete this jobs fence, the timout is
+ * spurious. Bail out.
+@@ -63,6 +60,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ goto out_no_timeout;
+ }
+
++ /* block scheduler */
++ drm_sched_stop(&gpu->sched, sched_job);
++
+ if(sched_job)
+ drm_sched_increase_karma(sched_job);
+
+@@ -76,8 +76,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+
+ out_no_timeout:
+- /* restart scheduler after GPU is usable again */
+- drm_sched_start(&gpu->sched, true);
++ list_add(&sched_job->list, &sched_job->sched->pending_list);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+
+diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+index 4d986077738b9b..bce027552474a6 100644
+--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+@@ -319,9 +319,9 @@ static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb)
+ {
+- struct exynos_drm_plane plane = ctx->planes[win];
++ struct exynos_drm_plane *plane = &ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+- to_exynos_plane_state(plane.base.state);
++ to_exynos_plane_state(plane->base.state);
+ unsigned int alpha = state->base.alpha;
+ unsigned int pixel_alpha;
+ unsigned long val;
+diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
+index 3404ec1367fb92..71ee824c4140bd 100644
+--- a/drivers/gpu/drm/exynos/exynos_dp.c
++++ b/drivers/gpu/drm/exynos/exynos_dp.c
+@@ -288,7 +288,6 @@ struct platform_driver dp_driver = {
+ .remove = exynos_dp_remove,
+ .driver = {
+ .name = "exynos-dp",
+- .owner = THIS_MODULE,
+ .pm = pm_ptr(&exynos_dp_pm_ops),
+ .of_match_table = exynos_dp_match,
+ },
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
+index a971590b813230..e2c7373f20c6b7 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
+@@ -107,18 +107,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ return 0;
+
+ if (!priv->mapping) {
+- void *mapping;
++ void *mapping = NULL;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ mapping = iommu_get_domain_for_dev(priv->dma_dev);
+- else
+- mapping = ERR_PTR(-ENODEV);
+
+- if (IS_ERR(mapping))
+- return PTR_ERR(mapping);
++ if (!mapping)
++ return -ENODEV;
+ priv->mapping = mapping;
+ }
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+index 8399256cb5c9d7..5380fb6c55ae1e 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+@@ -300,6 +300,7 @@ static int exynos_drm_bind(struct device *dev)
+ drm_mode_config_cleanup(drm);
+ exynos_drm_cleanup_dma(drm);
+ kfree(private);
++ dev_set_drvdata(dev, NULL);
+ err_free_drm:
+ drm_dev_put(drm);
+
+@@ -313,6 +314,7 @@ static void exynos_drm_unbind(struct device *dev)
+ drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
++ drm_atomic_helper_shutdown(drm);
+
+ component_unbind_all(drm->dev, drm);
+ drm_mode_config_cleanup(drm);
+@@ -350,9 +352,18 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++static void exynos_drm_platform_shutdown(struct platform_device *pdev)
++{
++ struct drm_device *drm = platform_get_drvdata(pdev);
++
++ if (drm)
++ drm_atomic_helper_shutdown(drm);
++}
++
+ static struct platform_driver exynos_drm_platform_driver = {
+ .probe = exynos_drm_platform_probe,
+ .remove = exynos_drm_platform_remove,
++ .shutdown = exynos_drm_platform_shutdown,
+ .driver = {
+ .name = "exynos-drm",
+ .pm = &exynos_drm_pm_ops,
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+index 8dde7b1e9b35d9..5bdc246f5fad09 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+@@ -661,9 +661,9 @@ static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
+ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb, int width)
+ {
+- struct exynos_drm_plane plane = ctx->planes[win];
++ struct exynos_drm_plane *plane = &ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+- to_exynos_plane_state(plane.base.state);
++ to_exynos_plane_state(plane->base.state);
+ uint32_t pixel_format = fb->format->format;
+ unsigned int alpha = state->base.alpha;
+ u32 val = WINCONx_ENWIN;
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+index 34cdabc30b4f5e..1456abd5b9dde1 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+@@ -1173,7 +1173,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
+ struct exynos_drm_ipp *ipp = &ctx->ipp;
+
+ ctx->drm_dev = drm_dev;
+- ctx->drm_dev = drm_dev;
++ ipp->drm_dev = drm_dev;
+ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
+
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
+@@ -1342,7 +1342,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
+ for (i = 0; i < ctx->num_clocks; i++) {
+ ret = clk_prepare_enable(ctx->clocks[i]);
+ if (ret) {
+- while (--i > 0)
++ while (--i >= 0)
+ clk_disable_unprepare(ctx->clocks[i]);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index f5e1adfcaa514e..e17f9c5c9c90e6 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
+ struct vidi_context *ctx = ctx_from_connector(connector);
+ struct edid *edid;
+ int edid_len;
++ int count;
+
+ /*
+ * the edid data comes from user side and it would be set
+@@ -316,19 +317,23 @@ static int vidi_get_modes(struct drm_connector *connector)
+ */
+ if (!ctx->raw_edid) {
+ DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
+- return -EFAULT;
++ return 0;
+ }
+
+ edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+ edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
+- return -ENOMEM;
++ return 0;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+
+- return drm_add_edid_modes(connector, edid);
++ count = drm_add_edid_modes(connector, edid);
++
++ kfree(edid);
++
++ return count;
+ }
+
+ static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index f3aaa4ea3e6820..906133331a4424 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
+ int ret;
+
+ if (!hdata->ddc_adpt)
+- return -ENODEV;
++ goto no_edid;
+
+ edid = drm_get_edid(connector, hdata->ddc_adpt);
+ if (!edid)
+- return -ENODEV;
++ goto no_edid;
+
+ hdata->dvi_mode = !connector->display_info.is_hdmi;
+ DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
+@@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
+ kfree(edid);
+
+ return ret;
++
++no_edid:
++ return drm_add_modes_noedid(connector, 640, 480);
+ }
+
+ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
+@@ -1861,6 +1864,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
+ return ret;
+
+ crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
++ if (IS_ERR(crtc))
++ return PTR_ERR(crtc);
+ crtc->pipe_clk = &hdata->phy_clk;
+
+ ret = hdmi_create_connector(encoder);
+diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
+index 4f302cd5e1a6ca..58fed80c7392a0 100644
+--- a/drivers/gpu/drm/gma500/Makefile
++++ b/drivers/gpu/drm/gma500/Makefile
+@@ -34,7 +34,6 @@ gma500_gfx-y += \
+ psb_intel_lvds.o \
+ psb_intel_modes.o \
+ psb_intel_sdvo.o \
+- psb_lid.o \
+ psb_irq.o
+
+ gma500_gfx-$(CONFIG_ACPI) += opregion.o
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+index f08a6803dc1849..3adc2c9ab72da0 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+@@ -311,6 +311,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ if (!mode)
++ return 0;
++
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
+index dcfcd7b89d4a1d..6dece8f0e380f7 100644
+--- a/drivers/gpu/drm/gma500/psb_device.c
++++ b/drivers/gpu/drm/gma500/psb_device.c
+@@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
+ }
+
+ psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
+- /* This must occur after the backlight is properly initialised */
+- psb_lid_timer_init(dev_priv);
++
+ return 0;
+ }
+
+@@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
+
+ static void psb_chip_teardown(struct drm_device *dev)
+ {
+- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+- psb_lid_timer_takedown(dev_priv);
+ gma_intel_teardown_gmbus(dev);
+ }
+
+diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
+index f7f709df99b498..bb1cd45c085cd6 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.h
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -170,7 +170,6 @@
+
+ #define PSB_NUM_VBLANKS 2
+ #define PSB_WATCHDOG_DELAY (HZ * 2)
+-#define PSB_LID_DELAY (HZ / 10)
+
+ #define PSB_MAX_BRIGHTNESS 100
+
+@@ -424,6 +423,7 @@ struct drm_psb_private {
+ uint32_t pipestat[PSB_NUM_PIPE];
+
+ spinlock_t irqmask_lock;
++ bool irq_enabled;
+
+ /* Power */
+ bool pm_initialized;
+@@ -498,11 +498,7 @@ struct drm_psb_private {
+ /* Hotplug handling */
+ struct work_struct hotplug_work;
+
+- /* LID-Switch */
+- spinlock_t lid_lock;
+- struct timer_list lid_timer;
+ struct psb_intel_opregion opregion;
+- u32 lid_last_state;
+
+ /* Watchdog */
+ uint32_t apm_reg;
+@@ -598,10 +594,6 @@ struct psb_ops {
+ int i2c_bus; /* I2C bus identifier for Moorestown */
+ };
+
+-/* psb_lid.c */
+-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
+-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
+-
+ /* modesetting */
+ extern void psb_modeset_init(struct drm_device *dev);
+ extern void psb_modeset_cleanup(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+index 8486de230ec91b..8d1be94a443b24 100644
+--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+@@ -504,6 +504,9 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ if (!mode)
++ return 0;
++
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
+index 343c51250207d8..7bbb79b0497d8d 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -327,6 +327,8 @@ int gma_irq_install(struct drm_device *dev)
+
+ gma_irq_postinstall(dev);
+
++ dev_priv->irq_enabled = true;
++
+ return 0;
+ }
+
+@@ -337,6 +339,9 @@ void gma_irq_uninstall(struct drm_device *dev)
+ unsigned long irqflags;
+ unsigned int i;
+
++ if (!dev_priv->irq_enabled)
++ return;
++
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (dev_priv->ops->hotplug_enable)
+diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
+deleted file mode 100644
+index 58a7fe39263601..00000000000000
+--- a/drivers/gpu/drm/gma500/psb_lid.c
++++ /dev/null
+@@ -1,80 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/**************************************************************************
+- * Copyright (c) 2007, Intel Corporation.
+- *
+- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+- **************************************************************************/
+-
+-#include <linux/spinlock.h>
+-
+-#include "psb_drv.h"
+-#include "psb_intel_reg.h"
+-#include "psb_reg.h"
+-
+-static void psb_lid_timer_func(struct timer_list *t)
+-{
+- struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
+- struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
+- struct timer_list *lid_timer = &dev_priv->lid_timer;
+- unsigned long irq_flags;
+- u32 __iomem *lid_state = dev_priv->opregion.lid_state;
+- u32 pp_status;
+-
+- if (readl(lid_state) == dev_priv->lid_last_state)
+- goto lid_timer_schedule;
+-
+- if ((readl(lid_state)) & 0x01) {
+- /*lid state is open*/
+- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
+- do {
+- pp_status = REG_READ(PP_STATUS);
+- } while ((pp_status & PP_ON) == 0 &&
+- (pp_status & PP_SEQUENCE_MASK) != 0);
+-
+- if (REG_READ(PP_STATUS) & PP_ON) {
+- /*FIXME: should be backlight level before*/
+- psb_intel_lvds_set_brightness(dev, 100);
+- } else {
+- DRM_DEBUG("LVDS panel never powered up");
+- return;
+- }
+- } else {
+- psb_intel_lvds_set_brightness(dev, 0);
+-
+- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
+- do {
+- pp_status = REG_READ(PP_STATUS);
+- } while ((pp_status & PP_ON) == 0);
+- }
+- dev_priv->lid_last_state = readl(lid_state);
+-
+-lid_timer_schedule:
+- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+- if (!timer_pending(lid_timer)) {
+- lid_timer->expires = jiffies + PSB_LID_DELAY;
+- add_timer(lid_timer);
+- }
+- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+-}
+-
+-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
+-{
+- struct timer_list *lid_timer = &dev_priv->lid_timer;
+- unsigned long irq_flags;
+-
+- spin_lock_init(&dev_priv->lid_lock);
+- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+-
+- timer_setup(lid_timer, psb_lid_timer_func, 0);
+-
+- lid_timer->expires = jiffies + PSB_LID_DELAY;
+-
+- add_timer(lid_timer);
+- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+-}
+-
+-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
+-{
+- del_timer_sync(&dev_priv->lid_timer);
+-}
+-
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index 79f65eff6bb2a3..23400313d8a64c 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -104,6 +104,7 @@ gt-y += \
+ gt/intel_ggtt_fencing.o \
+ gt/intel_gt.o \
+ gt/intel_gt_buffer_pool.o \
++ gt/intel_gt_ccs_mode.o \
+ gt/intel_gt_clock_utils.o \
+ gt/intel_gt_debugfs.o \
+ gt/intel_gt_engines_debugfs.o \
+diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
+index 4c7187f7913ea5..e8ee0a08947e8f 100644
+--- a/drivers/gpu/drm/i915/display/g4x_dp.c
++++ b/drivers/gpu/drm/i915/display/g4x_dp.c
+@@ -141,7 +141,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
+
+ intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe),
+ TRANS_DP_ENH_FRAMING,
+- drm_dp_enhanced_frame_cap(intel_dp->dpcd) ?
++ pipe_config->enhanced_framing ?
+ TRANS_DP_ENH_FRAMING : 0);
+ } else {
+ if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
+@@ -153,7 +153,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
++ if (pipe_config->enhanced_framing)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (IS_CHERRYVIEW(dev_priv))
+@@ -351,6 +351,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
+ u32 trans_dp = intel_de_read(dev_priv,
+ TRANS_DP_CTL(crtc->pipe));
+
++ if (trans_dp & TRANS_DP_ENH_FRAMING)
++ pipe_config->enhanced_framing = true;
++
+ if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+@@ -361,6 +364,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ } else {
++ if (tmp & DP_ENHANCED_FRAMING)
++ pipe_config->enhanced_framing = true;
++
+ if (tmp & DP_SYNC_HS_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index ad6488e9c2b2b8..5b8efe8e735a9b 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -1440,6 +1440,13 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+ static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
++ struct drm_i915_private *i915 = to_i915(connector->dev);
++ enum drm_mode_status status;
++
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
++
+ /* FIXME: DSC? */
+ return intel_dsi_mode_valid(connector, mode);
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
+index 7cf51dd8c05670..aaddd8c0cfa0ee 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic.c
++++ b/drivers/gpu/drm/i915/display/intel_atomic.c
+@@ -259,6 +259,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
+ drm_property_blob_get(crtc_state->post_csc_lut);
+
+ crtc_state->update_pipe = false;
++ crtc_state->update_m_n = false;
+ crtc_state->disable_lp_wm = false;
+ crtc_state->disable_cxsr = false;
+ crtc_state->update_wm_pre = false;
+diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
+index 3d9c9b4f27f802..1cf1674897e9f2 100644
+--- a/drivers/gpu/drm/i915/display/intel_audio.c
++++ b/drivers/gpu/drm/i915/display/intel_audio.c
+@@ -75,19 +75,6 @@ struct intel_audio_funcs {
+ struct intel_crtc_state *crtc_state);
+ };
+
+-/* DP N/M table */
+-#define LC_810M 810000
+-#define LC_540M 540000
+-#define LC_270M 270000
+-#define LC_162M 162000
+-
+-struct dp_aud_n_m {
+- int sample_rate;
+- int clock;
+- u16 m;
+- u16 n;
+-};
+-
+ struct hdmi_aud_ncts {
+ int sample_rate;
+ int clock;
+@@ -95,60 +82,6 @@ struct hdmi_aud_ncts {
+ int cts;
+ };
+
+-/* Values according to DP 1.4 Table 2-104 */
+-static const struct dp_aud_n_m dp_aud_n_m[] = {
+- { 32000, LC_162M, 1024, 10125 },
+- { 44100, LC_162M, 784, 5625 },
+- { 48000, LC_162M, 512, 3375 },
+- { 64000, LC_162M, 2048, 10125 },
+- { 88200, LC_162M, 1568, 5625 },
+- { 96000, LC_162M, 1024, 3375 },
+- { 128000, LC_162M, 4096, 10125 },
+- { 176400, LC_162M, 3136, 5625 },
+- { 192000, LC_162M, 2048, 3375 },
+- { 32000, LC_270M, 1024, 16875 },
+- { 44100, LC_270M, 784, 9375 },
+- { 48000, LC_270M, 512, 5625 },
+- { 64000, LC_270M, 2048, 16875 },
+- { 88200, LC_270M, 1568, 9375 },
+- { 96000, LC_270M, 1024, 5625 },
+- { 128000, LC_270M, 4096, 16875 },
+- { 176400, LC_270M, 3136, 9375 },
+- { 192000, LC_270M, 2048, 5625 },
+- { 32000, LC_540M, 1024, 33750 },
+- { 44100, LC_540M, 784, 18750 },
+- { 48000, LC_540M, 512, 11250 },
+- { 64000, LC_540M, 2048, 33750 },
+- { 88200, LC_540M, 1568, 18750 },
+- { 96000, LC_540M, 1024, 11250 },
+- { 128000, LC_540M, 4096, 33750 },
+- { 176400, LC_540M, 3136, 18750 },
+- { 192000, LC_540M, 2048, 11250 },
+- { 32000, LC_810M, 1024, 50625 },
+- { 44100, LC_810M, 784, 28125 },
+- { 48000, LC_810M, 512, 16875 },
+- { 64000, LC_810M, 2048, 50625 },
+- { 88200, LC_810M, 1568, 28125 },
+- { 96000, LC_810M, 1024, 16875 },
+- { 128000, LC_810M, 4096, 50625 },
+- { 176400, LC_810M, 3136, 28125 },
+- { 192000, LC_810M, 2048, 16875 },
+-};
+-
+-static const struct dp_aud_n_m *
+-audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
+-{
+- int i;
+-
+- for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+- if (rate == dp_aud_n_m[i].sample_rate &&
+- crtc_state->port_clock == dp_aud_n_m[i].clock)
+- return &dp_aud_n_m[i];
+- }
+-
+- return NULL;
+-}
+-
+ static const struct {
+ int clock;
+ u32 config;
+@@ -386,47 +319,17 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+ {
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+- struct i915_audio_component *acomp = i915->display.audio.component;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+- enum port port = encoder->port;
+- const struct dp_aud_n_m *nm;
+- int rate;
+- u32 tmp;
+-
+- rate = acomp ? acomp->aud_sample_rate[port] : 0;
+- nm = audio_config_dp_get_n_m(crtc_state, rate);
+- if (nm)
+- drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m,
+- nm->n);
+- else
+- drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n");
+-
+- tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
+- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+- tmp |= AUD_CONFIG_N_VALUE_INDEX;
+-
+- if (nm) {
+- tmp &= ~AUD_CONFIG_N_MASK;
+- tmp |= AUD_CONFIG_N(nm->n);
+- tmp |= AUD_CONFIG_N_PROG_ENABLE;
+- }
+-
+- intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
+-
+- tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+- tmp &= ~AUD_CONFIG_M_MASK;
+- tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+- tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+
+- if (nm) {
+- tmp |= nm->m;
+- tmp |= AUD_M_CTS_M_VALUE_INDEX;
+- tmp |= AUD_M_CTS_M_PROG_ENABLE;
+- }
++ /* Enable time stamps. Let HW calculate Maud/Naud values */
++ intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
++ AUD_CONFIG_N_VALUE_INDEX |
++ AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
++ AUD_CONFIG_UPPER_N_MASK |
++ AUD_CONFIG_LOWER_N_MASK |
++ AUD_CONFIG_N_PROG_ENABLE,
++ AUD_CONFIG_N_VALUE_INDEX);
+
+- intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ }
+
+ static void
+@@ -1348,17 +1251,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
+ static void i915_audio_component_init(struct drm_i915_private *i915)
+ {
+ u32 aud_freq, aud_freq_init;
+- int ret;
+-
+- ret = component_add_typed(i915->drm.dev,
+- &i915_audio_component_bind_ops,
+- I915_COMPONENT_AUDIO);
+- if (ret < 0) {
+- drm_err(&i915->drm,
+- "failed to add audio component (%d)\n", ret);
+- /* continue with reduced functionality */
+- return;
+- }
+
+ if (DISPLAY_VER(i915) >= 9) {
+ aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
+@@ -1381,6 +1273,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
+
+ /* init with current cdclk */
+ intel_audio_cdclk_change_post(i915);
++}
++
++static void i915_audio_component_register(struct drm_i915_private *i915)
++{
++ int ret;
++
++ ret = component_add_typed(i915->drm.dev,
++ &i915_audio_component_bind_ops,
++ I915_COMPONENT_AUDIO);
++ if (ret < 0) {
++ drm_err(&i915->drm,
++ "failed to add audio component (%d)\n", ret);
++ /* continue with reduced functionality */
++ return;
++ }
+
+ i915->display.audio.component_registered = true;
+ }
+@@ -1413,6 +1320,12 @@ void intel_audio_init(struct drm_i915_private *i915)
+ i915_audio_component_init(i915);
+ }
+
++void intel_audio_register(struct drm_i915_private *i915)
++{
++ if (!i915->display.audio.lpe.platdev)
++ i915_audio_component_register(i915);
++}
++
+ /**
+ * intel_audio_deinit() - deinitialize the audio driver
+ * @i915: the i915 drm device private data
+diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
+index 07d034a981e90e..9779343a371068 100644
+--- a/drivers/gpu/drm/i915/display/intel_audio.h
++++ b/drivers/gpu/drm/i915/display/intel_audio.h
+@@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
+ void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
+ void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+ void intel_audio_init(struct drm_i915_private *dev_priv);
++void intel_audio_register(struct drm_i915_private *i915);
+ void intel_audio_deinit(struct drm_i915_private *dev_priv);
+ void intel_audio_sdp_split_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
+index 2e8f17c0452223..ff9b9918b0a134 100644
+--- a/drivers/gpu/drm/i915/display/intel_backlight.c
++++ b/drivers/gpu/drm/i915/display/intel_backlight.c
+@@ -274,7 +274,7 @@ static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state,
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
+
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
++ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
+ }
+
+ static void
+@@ -427,7 +427,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
+ intel_backlight_set_pwm_level(old_conn_state, level);
+
+ panel->backlight.pwm_state.enabled = false;
+- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
++ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
+ }
+
+ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
+@@ -749,7 +749,7 @@ static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ panel->backlight.pwm_state.enabled = true;
+- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
++ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
+ }
+
+ static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index f735b035436c02..27d1c49b46ec48 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -1035,22 +1035,11 @@ parse_lfp_backlight(struct drm_i915_private *i915,
+ panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ panel->vbt.backlight.controller = 0;
+ if (i915->display.vbt.version >= 191) {
+- size_t exp_size;
++ const struct lfp_backlight_control_method *method;
+
+- if (i915->display.vbt.version >= 236)
+- exp_size = sizeof(struct bdb_lfp_backlight_data);
+- else if (i915->display.vbt.version >= 234)
+- exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
+- else
+- exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
+-
+- if (get_blocksize(backlight_data) >= exp_size) {
+- const struct lfp_backlight_control_method *method;
+-
+- method = &backlight_data->backlight_control[panel_type];
+- panel->vbt.backlight.type = method->type;
+- panel->vbt.backlight.controller = method->controller;
+- }
++ method = &backlight_data->backlight_control[panel_type];
++ panel->vbt.backlight.type = method->type;
++ panel->vbt.backlight.controller = method->controller;
+ }
+
+ panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+@@ -1945,16 +1934,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+-static void fixup_mipi_sequences(struct drm_i915_private *i915,
+- struct intel_panel *panel)
++static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ u8 *init_otp;
+ int len;
+
+- /* Limit this to VLV for now. */
+- if (!IS_VALLEYVIEW(i915))
+- return;
+-
+ /* Limit this to v1 vid-mode sequences */
+ if (panel->vbt.dsi.config->is_cmd_mode ||
+ panel->vbt.dsi.seq_version != 1)
+@@ -1990,6 +1975,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+ }
+
++/*
++ * Some machines (eg. Lenovo 82TQ) appear to have broken
++ * VBT sequences:
++ * - INIT_OTP is not present at all
++ * - what should be in INIT_OTP is in DISPLAY_ON
++ * - what should be in DISPLAY_ON is in BACKLIGHT_ON
++ * (along with the actual backlight stuff)
++ *
++ * To make those work we simply swap DISPLAY_ON and INIT_OTP.
++ *
++ * TODO: Do we need to limit this to specific machines,
++ * or examine the contents of the sequences to
++ * avoid false positives?
++ */
++static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
++ struct intel_panel *panel)
++{
++ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
++ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
++ drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
++
++ swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
++ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
++ }
++}
++
++static void fixup_mipi_sequences(struct drm_i915_private *i915,
++ struct intel_panel *panel)
++{
++ if (DISPLAY_VER(i915) >= 11)
++ icl_fixup_mipi_sequences(i915, panel);
++ else if (IS_VALLEYVIEW(i915))
++ vlv_fixup_mipi_sequences(i915, panel);
++}
++
+ static void
+ parse_mipi_sequence(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+@@ -3313,6 +3333,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
+ {
+ const struct child_device_config *child = &devdata->child;
+
++ if (!devdata)
++ return false;
++
+ if (!intel_bios_encoder_supports_dp(devdata) ||
+ !intel_bios_encoder_supports_hdmi(devdata))
+ return false;
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index 2fb030b1ff1de3..fc3a6eb1de7414 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -2453,7 +2453,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+- enum pipe pipe = new_cdclk_state->pipe;
++ struct intel_cdclk_config cdclk_config;
++ enum pipe pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+@@ -2462,12 +2463,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
+ if (IS_DG2(i915))
+ intel_cdclk_pcode_pre_notify(state);
+
+- if (pipe == INVALID_PIPE ||
+- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
++ if (new_cdclk_state->disable_pipes) {
++ cdclk_config = new_cdclk_state->actual;
++ pipe = INVALID_PIPE;
++ } else {
++ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
++ cdclk_config = new_cdclk_state->actual;
++ pipe = new_cdclk_state->pipe;
++ } else {
++ cdclk_config = old_cdclk_state->actual;
++ pipe = INVALID_PIPE;
++ }
+
+- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
++ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
++ old_cdclk_state->actual.voltage_level);
+ }
++
++ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
++
++ intel_set_cdclk(i915, &cdclk_config, pipe);
+ }
+
+ /**
+@@ -2485,7 +2499,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+- enum pipe pipe = new_cdclk_state->pipe;
++ enum pipe pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+@@ -2494,12 +2508,15 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
+ if (IS_DG2(i915))
+ intel_cdclk_pcode_post_notify(state);
+
+- if (pipe != INVALID_PIPE &&
+- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
++ if (!new_cdclk_state->disable_pipes &&
++ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
++ pipe = new_cdclk_state->pipe;
++ else
++ pipe = INVALID_PIPE;
+
+- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+- }
++ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
++
++ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ }
+
+ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+@@ -2688,6 +2705,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
+ for_each_pipe(dev_priv, pipe)
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+
++ /*
++ * Avoid glk_force_audio_cdclk() causing excessive screen
++ * blinking when multiple pipes are active by making sure
++ * CDCLK frequency is always high enough for audio. With a
++ * single active pipe we can always change CDCLK frequency
++ * by changing the cd2x divider (see glk_cdclk_table[]) and
++ * thus a full modeset won't be needed then.
++ */
++ if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
++ !is_power_of_2(cdclk_state->active_pipes))
++ min_cdclk = max(2 * 96000, min_cdclk);
++
+ if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
+ drm_dbg_kms(&dev_priv->drm,
+ "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+@@ -2934,6 +2963,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
+ return NULL;
+
+ cdclk_state->pipe = INVALID_PIPE;
++ cdclk_state->disable_pipes = false;
+
+ return &cdclk_state->base;
+ }
+@@ -3112,6 +3142,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+ if (ret)
+ return ret;
+
++ new_cdclk_state->disable_pipes = true;
++
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset required for cdclk change\n");
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
+index 48fd7d39e0cd9c..71bc032bfef16e 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
+@@ -51,6 +51,9 @@ struct intel_cdclk_state {
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
++
++ /* update cdclk with pipes disabled */
++ bool disable_pipes;
+ };
+
+ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
+diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
+index 8090747586877e..4352f901776152 100644
+--- a/drivers/gpu/drm/i915/display/intel_crt.c
++++ b/drivers/gpu/drm/i915/display/intel_crt.c
+@@ -348,8 +348,13 @@ intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int max_dotclk = dev_priv->max_dotclk_freq;
++ enum drm_mode_status status;
+ int max_clock;
+
++ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++ if (status != MODE_OK)
++ return status;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+@@ -451,6 +456,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
+ /* FDI must always be 2.7 GHz */
+ pipe_config->port_clock = 135000 * 2;
+
++ pipe_config->enhanced_framing = true;
++
+ adjusted_mode->crtc_clock = lpt_iclkip(pipe_config);
+
+ return 0;
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
+index 182c6dd64f47cf..cfbfbfed3f5e66 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc.c
++++ b/drivers/gpu/drm/i915/display/intel_crtc.c
+@@ -468,9 +468,56 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+ return vblank_start;
+ }
+
++static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
++ struct intel_crtc *crtc,
++ int *min, int *max, int *vblank_start)
++{
++ const struct intel_crtc_state *old_crtc_state =
++ intel_atomic_get_old_crtc_state(state, crtc);
++ const struct intel_crtc_state *new_crtc_state =
++ intel_atomic_get_new_crtc_state(state, crtc);
++ const struct intel_crtc_state *crtc_state;
++ const struct drm_display_mode *adjusted_mode;
++
++ /*
++ * During fastsets/etc. the transcoder is still
++ * running with the old timings at this point.
++ *
++ * TODO: maybe just use the active timings here?
++ */
++ if (intel_crtc_needs_modeset(new_crtc_state))
++ crtc_state = new_crtc_state;
++ else
++ crtc_state = old_crtc_state;
++
++ adjusted_mode = &crtc_state->hw.adjusted_mode;
++
++ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
++ if (intel_vrr_is_push_sent(crtc_state))
++ *vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
++ else
++ *vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
++ } else {
++ *vblank_start = intel_mode_vblank_start(adjusted_mode);
++ }
++
++ /* FIXME needs to be calibrated sensibly */
++ *min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
++ VBLANK_EVASION_TIME_US);
++ *max = *vblank_start - 1;
++
++ /*
++ * M/N is double buffered on the transcoder's undelayed vblank,
++ * so with seamless M/N we must evade both vblanks.
++ */
++ if (new_crtc_state->update_m_n)
++ *min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
++}
++
+ /**
+ * intel_pipe_update_start() - start update of a set of display registers
+- * @new_crtc_state: the new crtc state
++ * @state: the atomic state
++ * @crtc: the crtc
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+@@ -480,11 +527,12 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays.
+ */
+-void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
++void intel_pipe_update_start(struct intel_atomic_state *state,
++ struct intel_crtc *crtc)
+ {
+- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+- const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
++ struct intel_crtc_state *new_crtc_state =
++ intel_atomic_get_new_crtc_state(state, crtc);
+ long timeout = msecs_to_jiffies_timeout(1);
+ int scanline, min, max, vblank_start;
+ wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+@@ -500,27 +548,7 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
+ if (intel_crtc_needs_vblank_work(new_crtc_state))
+ intel_crtc_vblank_work_init(new_crtc_state);
+
+- if (new_crtc_state->vrr.enable) {
+- if (intel_vrr_is_push_sent(new_crtc_state))
+- vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
+- else
+- vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+- } else {
+- vblank_start = intel_mode_vblank_start(adjusted_mode);
+- }
+-
+- /* FIXME needs to be calibrated sensibly */
+- min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+- VBLANK_EVASION_TIME_US);
+- max = vblank_start - 1;
+-
+- /*
+- * M/N is double buffered on the transcoder's undelayed vblank,
+- * so with seamless M/N we must evade both vblanks.
+- */
+- if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+- min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+-
++ intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
+ if (min <= 0 || max <= 0)
+ goto irq_disable;
+
+@@ -631,15 +659,18 @@ static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+
+ /**
+ * intel_pipe_update_end() - end update of a set of display registers
+- * @new_crtc_state: the new crtc state
++ * @state: the atomic state
++ * @crtc: the crtc
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank.
+ */
+-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
++void intel_pipe_update_end(struct intel_atomic_state *state,
++ struct intel_crtc *crtc)
+ {
+- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
++ struct intel_crtc_state *new_crtc_state =
++ intel_atomic_get_new_crtc_state(state, crtc);
+ enum pipe pipe = crtc->pipe;
+ int scanline_end = intel_get_crtc_scanline(crtc);
+ u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
+@@ -697,15 +728,6 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
+ */
+ intel_vrr_send_push(new_crtc_state);
+
+- /*
+- * Seamless M/N update may need to update frame timings.
+- *
+- * FIXME Should be synchronized with the start of vblank somehow...
+- */
+- if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+- intel_crtc_update_active_timings(new_crtc_state,
+- new_crtc_state->vrr.enable);
+-
+ local_irq_enable();
+
+ if (intel_vgpu_active(dev_priv))
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
+index 51a4c8df9e6574..22d7993d1f0ba9 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc.h
++++ b/drivers/gpu/drm/i915/display/intel_crtc.h
+@@ -36,8 +36,10 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
+ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
+-void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state);
+-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
++void intel_pipe_update_start(struct intel_atomic_state *state,
++ struct intel_crtc *crtc);
++void intel_pipe_update_end(struct intel_atomic_state *state,
++ struct intel_crtc *crtc);
+ void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
+ struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
+ struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+index 8d4640d0fd346b..66fe880af8f3f0 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
++++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+@@ -258,6 +258,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
++ drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n",
++ str_enabled_disabled(pipe_config->fec_enable),
++ str_enabled_disabled(pipe_config->enhanced_framing));
+ }
+
+ drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
+diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
+index b342fad180ca5b..61df6cd3f37788 100644
+--- a/drivers/gpu/drm/i915/display/intel_cursor.c
++++ b/drivers/gpu/drm/i915/display/intel_cursor.c
+@@ -23,6 +23,8 @@
+ #include "intel_psr.h"
+ #include "skl_watermark.h"
+
++#include "gem/i915_gem_object.h"
++
+ /* Cursor formats */
+ static const u32 intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+@@ -32,12 +34,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+ {
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+- const struct drm_framebuffer *fb = plane_state->hw.fb;
+- const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ u32 base;
+
+ if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
+- base = sg_dma_address(obj->mm.pages->sgl);
++ base = plane_state->phys_dma_addr;
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+index 80e4ec6ee4031b..048e581fda16cb 100644
+--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
++++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+@@ -2420,7 +2420,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
+
+ val |= XELPDP_FORWARD_CLOCK_UNGATE;
+
+- if (is_hdmi_frl(crtc_state->port_clock))
++ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
++ is_hdmi_frl(crtc_state->port_clock))
+ val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ else
+ val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 84bbf854337aa7..b347f906234945 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -3432,7 +3432,7 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
+ dp_tp_ctl |= DP_TP_CTL_MODE_MST;
+ } else {
+ dp_tp_ctl |= DP_TP_CTL_MODE_SST;
+- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
++ if (crtc_state->enhanced_framing)
+ dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+@@ -3489,7 +3489,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
+ dp_tp_ctl |= DP_TP_CTL_MODE_MST;
+ } else {
+ dp_tp_ctl |= DP_TP_CTL_MODE_SST;
+- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
++ if (crtc_state->enhanced_framing)
+ dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+@@ -3724,17 +3724,14 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
+ intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder,
+ &pipe_config->dp_m2_n2);
+
+- if (DISPLAY_VER(dev_priv) >= 11) {
+- i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config);
++ pipe_config->enhanced_framing =
++ intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) &
++ DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+
++ if (DISPLAY_VER(dev_priv) >= 11)
+ pipe_config->fec_enable =
+- intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+-
+- drm_dbg_kms(&dev_priv->drm,
+- "[ENCODER:%d:%s] Fec status: %u\n",
+- encoder->base.base.id, encoder->base.name,
+- pipe_config->fec_enable);
+- }
++ intel_de_read(dev_priv,
++ dp_tp_ctl_reg(encoder, pipe_config)) & DP_TP_CTL_FEC_ENABLE;
+
+ if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
+ pipe_config->infoframes.enable |=
+@@ -3747,6 +3744,9 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
+ if (!HAS_DP20(dev_priv)) {
+ /* FDI */
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
++ pipe_config->enhanced_framing =
++ intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) &
++ DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ break;
+ }
+ fallthrough; /* 128b/132b */
+@@ -4111,7 +4111,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
+ static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
+ const struct intel_crtc_state *crtc_state2)
+ {
++ /*
++ * FIXME the modeset sequence is currently wrong and
++ * can't deal with bigjoiner + port sync at the same time.
++ */
+ return crtc_state1->hw.active && crtc_state2->hw.active &&
++ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
+ crtc_state1->output_types == crtc_state2->output_types &&
+ crtc_state1->output_format == crtc_state2->output_format &&
+ crtc_state1->lane_count == crtc_state2->lane_count &&
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 763ab569d8f324..1a59fca40252cd 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -5215,7 +5215,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+
+ if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+- if (!fastset || !pipe_config->seamless_m_n)
++ if (!fastset || !pipe_config->update_m_n)
+ PIPE_CONF_CHECK_M_N(dp_m_n);
+ } else {
+ PIPE_CONF_CHECK_M_N(dp_m_n);
+@@ -5255,6 +5255,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
+ PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
+ PIPE_CONF_CHECK_BOOL(has_infoframe);
++ PIPE_CONF_CHECK_BOOL(enhanced_framing);
+ PIPE_CONF_CHECK_BOOL(fec_enable);
+
+ PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
+@@ -5352,7 +5353,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
+ PIPE_CONF_CHECK_I(pipe_bpp);
+
+- if (!fastset || !pipe_config->seamless_m_n) {
++ if (!fastset || !pipe_config->update_m_n) {
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
+ }
+@@ -5447,6 +5448,7 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
+
+ crtc_state->uapi.mode_changed = true;
+ crtc_state->update_pipe = false;
++ crtc_state->update_m_n = false;
+
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
+@@ -5564,13 +5566,14 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
+ {
+ struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
+
+- if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
++ if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
+ drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
++ else
++ new_crtc_state->uapi.mode_changed = false;
+
+- return;
+- }
++ if (intel_crtc_needs_modeset(new_crtc_state))
++ new_crtc_state->update_m_n = false;
+
+- new_crtc_state->uapi.mode_changed = false;
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ new_crtc_state->update_pipe = true;
+ }
+@@ -5976,6 +5979,17 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
+ return -EINVAL;
+ }
+
++ /*
++ * FIXME: Bigjoiner+async flip is busted currently.
++ * Remove this check once the issues are fixed.
++ */
++ if (new_crtc_state->bigjoiner_pipes) {
++ drm_dbg_kms(&i915->drm,
++ "[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
++ crtc->base.base.id, crtc->base.name);
++ return -EINVAL;
++ }
++
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+@@ -6285,6 +6299,7 @@ int intel_atomic_check(struct drm_device *dev,
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
++ new_crtc_state->update_m_n = false;
+ }
+ }
+
+@@ -6297,6 +6312,7 @@ int intel_atomic_check(struct drm_device *dev,
+ if (intel_cpu_transcoders_need_modeset(state, trans)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
++ new_crtc_state->update_m_n = false;
+ }
+ }
+
+@@ -6304,6 +6320,7 @@ int intel_atomic_check(struct drm_device *dev,
+ if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
++ new_crtc_state->update_m_n = false;
+ }
+ }
+ }
+@@ -6482,7 +6499,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_set_linetime_wm(new_crtc_state);
+
+- if (new_crtc_state->seamless_m_n)
++ if (new_crtc_state->update_m_n)
+ intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
+ &new_crtc_state->dp_m_n);
+ }
+@@ -6521,6 +6538,8 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+ {
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
++ const struct intel_crtc_state *old_crtc_state =
++ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+@@ -6532,6 +6551,9 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ !intel_crtc_needs_modeset(new_crtc_state))
+ skl_detach_scalers(new_crtc_state);
++
++ if (vrr_enabling(old_crtc_state, new_crtc_state))
++ intel_vrr_enable(new_crtc_state);
+ }
+
+ static void intel_enable_crtc(struct intel_atomic_state *state,
+@@ -6572,12 +6594,6 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ intel_dpt_configure(crtc);
+ }
+
+- if (vrr_enabling(old_crtc_state, new_crtc_state)) {
+- intel_vrr_enable(new_crtc_state);
+- intel_crtc_update_active_timings(new_crtc_state,
+- new_crtc_state->vrr.enable);
+- }
+-
+ if (!modeset) {
+ if (new_crtc_state->preload_luts &&
+ intel_crtc_needs_color_update(new_crtc_state))
+@@ -6604,7 +6620,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ intel_crtc_planes_update_noarm(state, crtc);
+
+ /* Perform vblank evasion around commit operation */
+- intel_pipe_update_start(new_crtc_state);
++ intel_pipe_update_start(state, crtc);
+
+ commit_pipe_pre_planes(state, crtc);
+
+@@ -6612,7 +6628,16 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+
+ commit_pipe_post_planes(state, crtc);
+
+- intel_pipe_update_end(new_crtc_state);
++ intel_pipe_update_end(state, crtc);
++
++ /*
++ * VRR/Seamless M/N update may need to update frame timings.
++ *
++ * FIXME Should be synchronized with the start of vblank somehow...
++ */
++ if (vrr_enabling(old_crtc_state, new_crtc_state) || new_crtc_state->update_m_n)
++ intel_crtc_update_active_timings(new_crtc_state,
++ new_crtc_state->vrr.enable);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+@@ -6658,10 +6683,11 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
++ intel_pre_plane_update(state, crtc);
++
+ if (!old_crtc_state->hw.active)
+ continue;
+
+- intel_pre_plane_update(state, crtc);
+ intel_crtc_disable_planes(state, crtc);
+ }
+
+@@ -7279,7 +7305,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_color_cleanup_commit(new_crtc_state);
+
+- drm_atomic_helper_cleanup_planes(dev, &state->base);
++ drm_atomic_helper_unprepare_planes(dev, &state->base);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ return ret;
+ }
+@@ -7660,6 +7686,16 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
+ mode->vtotal > vtotal_max)
+ return MODE_V_ILLEGAL;
+
++ return MODE_OK;
++}
++
++enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
++ const struct drm_display_mode *mode)
++{
++ /*
++ * Additional transcoder timing limits,
++ * excluding BXT/GLK DSI transcoders.
++ */
+ if (DISPLAY_VER(dev_priv) >= 5) {
+ if (mode->hdisplay < 64 ||
+ mode->htotal - mode->hdisplay < 32)
+diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
+index 49ac8473b988b3..13b0904d42e3df 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.h
++++ b/drivers/gpu/drm/i915/display/intel_display.h
+@@ -405,6 +405,9 @@ enum drm_mode_status
+ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode,
+ bool bigjoiner);
++enum drm_mode_status
++intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
++ const struct drm_display_mode *mode);
+ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+ bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
+ bool is_trans_port_sync_master(const struct intel_crtc_state *state);
+diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
+index 215e682bd8b7a4..5fd07c18177661 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_device.h
++++ b/drivers/gpu/drm/i915/display/intel_display_device.h
+@@ -46,6 +46,7 @@ struct drm_printer;
+ #define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
+ #define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
+ #define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
++#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
+ #define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
+ #define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
+ #define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)
+diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
+index 8f144d4d3c3983..26514f931af7a6 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
++++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
+@@ -386,6 +386,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
+
+ intel_audio_init(i915);
+
++ intel_audio_register(i915);
++
+ intel_display_debugfs_register(i915);
+
+ /*
+diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
+index 916009894d89c7..1e099908772191 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
++++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
+@@ -246,7 +246,14 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+
+- return intel_port_to_phy(i915, dig_port->base.port);
++ /*
++ * FIXME should we care about the (VBT defined) dig_port->aux_ch
++ * relationship or should this be purely defined by the hardware layout?
++ * Currently if the port doesn't appear in the VBT, or if it's declared
++ * as HDMI-only and routed to a combo PHY, the encoder either won't be
++ * present at all or it will not have an aux_ch assigned.
++ */
++ return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
+ }
+
+ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+@@ -414,7 +421,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+
+ intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+- if (DISPLAY_VER(dev_priv) < 12)
++ /* FIXME this is a mess */
++ if (phy != PHY_NONE)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+ 0, ICL_LANE_ENABLE_AUX);
+
+@@ -437,7 +445,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+
+- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
++ /* FIXME this is a mess */
++ if (phy != PHY_NONE)
++ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
++ ICL_LANE_ENABLE_AUX, 0);
+
+ intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
+index 99bdb833591ce1..7862e7cefe0278 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
++++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
+@@ -411,7 +411,7 @@ TRACE_EVENT(intel_fbc_activate,
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __assign_str(dev, __dev_name_kms(plane));
+- __assign_str(name, plane->base.name)
++ __assign_str(name, plane->base.name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+@@ -438,7 +438,7 @@ TRACE_EVENT(intel_fbc_deactivate,
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __assign_str(dev, __dev_name_kms(plane));
+- __assign_str(name, plane->base.name)
++ __assign_str(name, plane->base.name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+@@ -465,7 +465,7 @@ TRACE_EVENT(intel_fbc_nuke,
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __assign_str(dev, __dev_name_kms(plane));
+- __assign_str(name, plane->base.name)
++ __assign_str(name, plane->base.name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 731f2ec04d5cda..1c23b186aff20c 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -701,6 +701,7 @@ struct intel_plane_state {
+ #define PLANE_HAS_FENCE BIT(0)
+
+ struct intel_fb_view view;
++ u32 phys_dma_addr; /* for cursor_needs_physical */
+
+ /* Plane pxp decryption state */
+ bool decrypt;
+@@ -1083,6 +1084,7 @@ struct intel_crtc_state {
+
+ unsigned fb_bits; /* framebuffers to flip */
+ bool update_pipe; /* can a fast modeset be performed? */
++ bool update_m_n; /* update M/N seamlessly during fastset? */
+ bool disable_cxsr;
+ bool update_wm_pre, update_wm_post; /* watermarks are updated */
+ bool fifo_changed; /* FIFO split is changed */
+@@ -1195,7 +1197,6 @@ struct intel_crtc_state {
+ /* m2_n2 for eDP downclock */
+ struct intel_link_m_n dp_m2_n2;
+ bool has_drrs;
+- bool seamless_m_n;
+
+ /* PSR is supported but might not be enabled due the lack of enabled planes */
+ bool has_psr;
+@@ -1362,6 +1363,8 @@ struct intel_crtc_state {
+ u16 linetime;
+ u16 ips_linetime;
+
++ bool enhanced_framing;
++
+ /* Forward Error correction State */
+ bool fec_enable;
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
+index 5f479f3828bbee..8751973b5730fe 100644
+--- a/drivers/gpu/drm/i915/display/intel_dmc.c
++++ b/drivers/gpu/drm/i915/display/intel_dmc.c
+@@ -389,7 +389,7 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
+ enum intel_dmc_id dmc_id;
+
+ /* TODO: check if the following applies to all D13+ platforms. */
+- if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
++ if (!IS_TIGERLAKE(i915))
+ return;
+
+ for_each_dmc_id(dmc_id) {
+@@ -493,6 +493,45 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+ intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+ }
+
++static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
++ enum intel_dmc_id dmc_id, i915_reg_t reg)
++{
++ u32 offset = i915_mmio_reg_offset(reg);
++ u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
++ u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
++
++ return offset >= start && offset < end;
++}
++
++static bool disable_dmc_evt(struct drm_i915_private *i915,
++ enum intel_dmc_id dmc_id,
++ i915_reg_t reg, u32 data)
++{
++ if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
++ return false;
++
++ /* keep all pipe DMC events disabled by default */
++ if (dmc_id != DMC_FW_MAIN)
++ return true;
++
++ return false;
++}
++
++static u32 dmc_mmiodata(struct drm_i915_private *i915,
++ struct intel_dmc *dmc,
++ enum intel_dmc_id dmc_id, int i)
++{
++ if (disable_dmc_evt(i915, dmc_id,
++ dmc->dmc_info[dmc_id].mmioaddr[i],
++ dmc->dmc_info[dmc_id].mmiodata[i]))
++ return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
++ DMC_EVT_CTL_TYPE_EDGE_0_1) |
++ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
++ DMC_EVT_CTL_EVENT_ID_FALSE);
++ else
++ return dmc->dmc_info[dmc_id].mmiodata[i];
++}
++
+ /**
+ * intel_dmc_load_program() - write the firmware from memory to register.
+ * @i915: i915 drm device.
+@@ -532,7 +571,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
+ for_each_dmc_id(dmc_id) {
+ for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
+ intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
+- dmc->dmc_info[dmc_id].mmiodata[i]);
++ dmc_mmiodata(i915, dmc, dmc_id, i));
+ }
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index e0e4cb52928461..c8b6d0f79c9b41 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -393,6 +393,10 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
+ struct intel_encoder *encoder = &intel_dig_port->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
++ /* eDP MSO is not compatible with joiner */
++ if (intel_dp->mso_link_count)
++ return false;
++
+ return DISPLAY_VER(dev_priv) >= 12 ||
+ (DISPLAY_VER(dev_priv) == 11 &&
+ encoder->port != PORT_A);
+@@ -430,7 +434,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp)
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_is_c10phy(i915, phy))
+- return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
++ return 810000;
+
+ return 2000000;
+ }
+@@ -1127,6 +1131,10 @@ intel_dp_mode_valid(struct drm_connector *_connector,
+ enum drm_mode_status status;
+ bool dsc = false, bigjoiner = false;
+
++ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++ if (status != MODE_OK)
++ return status;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+@@ -1306,13 +1314,14 @@ bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
+ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+ {
++ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+- /* On TGL, FEC is supported on all Pipes */
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return true;
+
+- if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
++ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
++ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
+ return true;
+
+ return false;
+@@ -2143,8 +2152,12 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
+ intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
+ int pixel_clock;
+
+- if (has_seamless_m_n(connector))
+- pipe_config->seamless_m_n = true;
++ /*
++ * FIXME all joined pipes share the same transcoder.
++ * Need to account for that when updating M/N live.
++ */
++ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
++ pipe_config->update_m_n = true;
+
+ if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
+ if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
+@@ -2308,6 +2321,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
+
++ pipe_config->enhanced_framing =
++ drm_dp_enhanced_frame_cap(intel_dp->dpcd);
++
+ if (pipe_config->dsc.compression_enable)
+ output_bpp = pipe_config->dsc.compressed_bpp;
+ else
+@@ -3980,7 +3996,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
+ intel_dp->train_set, crtc_state->lane_count);
+
+ drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
+- link_status[DP_DPCD_REV]);
++ intel_dp->dpcd[DP_DPCD_REV]);
+ }
+
+ static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+@@ -4358,6 +4374,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
+ !intel_dp_mst_is_master_trans(crtc_state))
+ continue;
+
++ intel_dp->link_trained = false;
++
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+ intel_dp_start_link_train(intel_dp, crtc_state);
+@@ -5517,8 +5535,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ * (eg. Acer Chromebook C710), so we'll check it only if multiple
+ * ports are attempting to use the same AUX CH, according to VBT.
+ */
+- if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
+- !intel_digital_port_connected(encoder)) {
++ if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
+ /*
+ * If this fails, presume the DPCD answer came
+ * from some other port using the same AUX CH.
+@@ -5526,10 +5543,27 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ * FIXME maybe cleaner to check this before the
+ * DPCD read? Would need sort out the VDD handling...
+ */
+- drm_info(&dev_priv->drm,
+- "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
+- encoder->base.base.id, encoder->base.name);
+- goto out_vdd_off;
++ if (!intel_digital_port_connected(encoder)) {
++ drm_info(&dev_priv->drm,
++ "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
++ encoder->base.base.id, encoder->base.name);
++ goto out_vdd_off;
++ }
++
++ /*
++ * Unfortunately even the HPD based detection fails on
++ * eg. Asus B360M-A (CFL+CNP), so as a last resort fall
++ * back to checking for a VGA branch device. Only do this
++ * on known affected platforms to minimize false positives.
++ */
++ if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
++ (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
++ DP_DWN_STRM_PORT_TYPE_ANALOG) {
++ drm_info(&dev_priv->drm,
++ "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
++ encoder->base.base.id, encoder->base.name);
++ goto out_vdd_off;
++ }
+ }
+
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+index a263773f4d68a4..eb5559e1a20024 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+@@ -114,10 +114,24 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
+ return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
+ }
+
+-static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
++static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
++{
++ return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
++ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] ==
++ DP_PHY_REPEATER_MODE_TRANSPARENT;
++}
++
++/*
++ * Read the LTTPR common capabilities and switch the LTTPR PHYs to
++ * non-transparent mode if this is supported. Preserve the
++ * transparent/non-transparent mode on an active link.
++ *
++ * Return the number of detected LTTPRs in non-transparent mode or 0 if the
++ * LTTPRs are in transparent mode or the detection failed.
++ */
++static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+ {
+ int lttpr_count;
+- int i;
+
+ if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
+ return 0;
+@@ -131,6 +145,19 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
+ if (lttpr_count == 0)
+ return 0;
+
++ /*
++ * Don't change the mode on an active link, to prevent a loss of link
++ * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR
++ * resetting its internal state when the mode is changed from
++ * non-transparent to transparent.
++ */
++ if (intel_dp->link_trained) {
++ if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
++ goto out_reset_lttpr_count;
++
++ return lttpr_count;
++ }
++
+ /*
+ * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
+ * non-transparent mode and the disable->enable non-transparent mode
+@@ -151,11 +178,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
+ "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
+
+ intel_dp_set_lttpr_transparent_mode(intel_dp, true);
+- intel_dp_reset_lttpr_count(intel_dp);
+
+- return 0;
++ goto out_reset_lttpr_count;
+ }
+
++ return lttpr_count;
++
++out_reset_lttpr_count:
++ intel_dp_reset_lttpr_count(intel_dp);
++
++ return 0;
++}
++
++static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
++{
++ int lttpr_count;
++ int i;
++
++ lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd);
++
+ for (i = 0; i < lttpr_count; i++)
+ intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
+
+@@ -650,19 +691,30 @@ intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 link_bw, u8 rate_select)
+ {
+- u8 link_config[2];
++ u8 lane_count = crtc_state->lane_count;
++
++ if (crtc_state->enhanced_framing)
++ lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+- /* Write the link configuration data */
+- link_config[0] = link_bw;
+- link_config[1] = crtc_state->lane_count;
+- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+-
+- /* eDP 1.4 rate select method. */
+- if (!link_bw)
+- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
+- &rate_select, 1);
++ if (link_bw) {
++ /* DP and eDP v1.3 and earlier link bw set method. */
++ u8 link_config[] = { link_bw, lane_count };
++
++ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
++ ARRAY_SIZE(link_config));
++ } else {
++ /*
++ * eDP v1.4 and later link rate set method.
++ *
++ * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
++ * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
++ *
++ * eDP v1.5 sinks allow choosing either, and the last choice
++ * shall be active.
++ */
++ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
++ drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
++ }
+ }
+
+ /*
+@@ -1342,10 +1394,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
+ {
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool passed;
+-
+ /*
+- * TODO: Reiniting LTTPRs here won't be needed once proper connector
+- * HW state readout is added.
++ * Reinit the LTTPRs here to ensure that they are switched to
++ * non-transparent mode. During an earlier LTTPR detection this
++ * could've been prevented by an active link.
+ */
+ int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index e3f176a093d2f3..d2f8f20722d92c 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -109,8 +109,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
+ continue;
+
+ crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
+- dsc ? bpp << 4 : bpp,
+- dsc);
++ bpp << 4);
+
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
+ connector->port,
+@@ -921,6 +920,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ return 0;
+ }
+
++ *status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++ if (*status != MODE_OK)
++ return 0;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ *status = MODE_NO_DBLESCAN;
+ return 0;
+@@ -937,7 +940,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ return ret;
+
+ if (mode_rate > max_rate || mode->clock > max_dotclk ||
+- drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
++ drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
+ *status = MODE_CLOCK_HIGH;
+ return 0;
+ }
+@@ -955,9 +958,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ bigjoiner = true;
+ max_dotclk *= 2;
++
++ /* TODO: add support for bigjoiner */
++ *status = MODE_CLOCK_HIGH;
++ return 0;
+ }
+
+- if (DISPLAY_VER(dev_priv) >= 10 &&
++ if (HAS_DSC_MST(dev_priv) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+ /*
+ * TBD pass the connector BPC,
+@@ -988,11 +995,15 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ * Big joiner configuration needs DSC for TGL which is not true for
+ * XE_LPD where uncompressed joiner is supported.
+ */
+- if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
+- return MODE_CLOCK_HIGH;
++ if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
++ *status = MODE_CLOCK_HIGH;
++ return 0;
++ }
+
+- if (mode_rate > max_rate && !dsc)
+- return MODE_CLOCK_HIGH;
++ if (mode_rate > max_rate && !dsc) {
++ *status = MODE_CLOCK_HIGH;
++ return 0;
++ }
+
+ *status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
+ return 0;
+diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+index 6d68b36292d361..247e7d675e2b93 100644
+--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
++++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+@@ -1556,7 +1556,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+ }
+
+ static int
+-skl_ddi_calculate_wrpll(int clock /* in Hz */,
++skl_ddi_calculate_wrpll(int clock,
+ int ref_clock,
+ struct skl_wrpll_params *wrpll_params)
+ {
+@@ -1581,7 +1581,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ };
+ unsigned int dco, d, i;
+ unsigned int p0, p1, p2;
+- u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
++ u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
+
+ for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+@@ -1713,7 +1713,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+- ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
++ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
+ i915->display.dpll.ref_clks.nssc, &wrpll_params);
+ if (ret)
+ return ret;
+@@ -2462,7 +2462,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
+ static bool
+ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
+ {
+- return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
++ return ((IS_ELKHARTLAKE(i915) &&
+ IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
+ IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
+ i915->display.dpll.ref_clks.nssc == 38400;
+diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
+index b386894c3a6db2..d1cfa966d48dac 100644
+--- a/drivers/gpu/drm/i915/display/intel_dvo.c
++++ b/drivers/gpu/drm/i915/display/intel_dvo.c
+@@ -217,11 +217,17 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
+ struct drm_display_mode *mode)
+ {
+ struct intel_connector *connector = to_intel_connector(_connector);
++ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(connector, mode);
+ int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
+ int target_clock = mode->clock;
++ enum drm_mode_status status;
++
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
+index 446bbf7986b6f7..689b7c16d30072 100644
+--- a/drivers/gpu/drm/i915/display/intel_fb.c
++++ b/drivers/gpu/drm/i915/display/intel_fb.c
+@@ -1370,7 +1370,8 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int stride_tiles;
+
+- if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
++ if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
++ src_stride_tiles < dst_stride_tiles)
+ stride_tiles = src_stride_tiles;
+ else
+ stride_tiles = dst_stride_tiles;
+@@ -1497,8 +1498,20 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
+
+ size += remap_info->size;
+ } else {
+- unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
+- remap_info->width);
++ unsigned int dst_stride;
++
++ /*
++ * The hardware automagically calculates the CCS AUX surface
++ * stride from the main surface stride so can't really remap a
++ * smaller subset (unless we'd remap in whole AUX page units).
++ */
++ if (intel_fb_needs_pot_stride_remap(fb) &&
++ intel_fb_is_ccs_modifier(fb->base.modifier))
++ dst_stride = remap_info->src_stride;
++ else
++ dst_stride = remap_info->width;
++
++ dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
+
+ assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
+ color_plane_info->mapping_stride = dst_stride *
+diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
+index fffd568070d414..a131656757f2b6 100644
+--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
++++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
+@@ -254,6 +254,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
++
++ /*
++ * Pre-populate the dma address before we enter the vblank
++ * evade critical section as i915_gem_object_get_dma_address()
++ * will trigger might_sleep() even if it won't actually sleep,
++ * which is the case when the fb has already been pinned.
++ */
++ if (phys_cursor)
++ plane_state->phys_dma_addr =
++ i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
+index a42549fa96918e..cb99839afcd03a 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
++++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
+@@ -1005,7 +1005,8 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
+ hdcp->value = value;
+ if (update_property) {
+ drm_connector_get(&connector->base);
+- queue_work(i915->unordered_wq, &hdcp->prop_work);
++ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
++ drm_connector_put(&connector->base);
+ }
+ }
+
+@@ -2480,7 +2481,8 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_connector_get(&connector->base);
+- queue_work(i915->unordered_wq, &hdcp->prop_work);
++ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
++ drm_connector_put(&connector->base);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+@@ -2497,7 +2499,9 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ */
+ if (!desired_and_not_enabled && !content_protection_type_changed) {
+ drm_connector_get(&connector->base);
+- queue_work(i915->unordered_wq, &hdcp->prop_work);
++ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
++ drm_connector_put(&connector->base);
++
+ }
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+index 8023c85c7fa0ea..74059384892af8 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
++++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+@@ -249,7 +249,7 @@
+ #define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+- PIPE_HDCP2_STREAM_STATUS(pipe))
++ PIPE_HDCP2_STREAM_STATUS(port))
+
+ #define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+ #define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
+index 94a7e1537f4278..bc975918e0eb45 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -1986,6 +1986,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
+ bool ycbcr_420_only;
+ enum intel_output_format sink_format;
+
++ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++ if (status != MODE_OK)
++ return status;
++
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
+index 3ace56979b70e0..dcb07d9a739d95 100644
+--- a/drivers/gpu/drm/i915/display/intel_lvds.c
++++ b/drivers/gpu/drm/i915/display/intel_lvds.c
+@@ -389,11 +389,16 @@ intel_lvds_mode_valid(struct drm_connector *_connector,
+ struct drm_display_mode *mode)
+ {
+ struct intel_connector *connector = to_intel_connector(_connector);
++ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(connector, mode);
+ int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
+ enum drm_mode_status status;
+
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 97d5eef10130df..5cf3db7058b98c 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -674,7 +674,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
+
+ val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
+
+- val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
++ if (DISPLAY_VER(dev_priv) < 20)
++ val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
++
+ if (IS_HASWELL(dev_priv))
+ val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+@@ -1398,9 +1400,21 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ * can rely on frontbuffer tracking.
+ */
+ mask = EDP_PSR_DEBUG_MASK_MEMUP |
+- EDP_PSR_DEBUG_MASK_HPD |
+- EDP_PSR_DEBUG_MASK_LPSP |
+- EDP_PSR_DEBUG_MASK_MAX_SLEEP;
++ EDP_PSR_DEBUG_MASK_HPD;
++
++ /*
++ * For some unknown reason on HSW non-ULT (or at least on
++ * Dell Latitude E6540) external displays start to flicker
++ * when PSR is enabled on the eDP. SR/PC6 residency is much
++ * higher than should be possible with an external display.
++ * As a workaround leave LPSP unmasked to prevent PSR entry
++ * when external displays are active.
++ */
++ if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
++ mask |= EDP_PSR_DEBUG_MASK_LPSP;
++
++ if (DISPLAY_VER(dev_priv) < 20)
++ mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+ /*
+ * No separate pipe reg write mask on hsw/bdw, so have to unmask all
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index 7d25a64698e2f5..18ae41d5f4f988 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -1212,7 +1212,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_tv_format format;
+ u32 format_map;
+
+- format_map = 1 << conn_state->tv.mode;
++ format_map = 1 << conn_state->tv.legacy_mode;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+@@ -1906,13 +1906,19 @@ static enum drm_mode_status
+ intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
++ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(connector);
+- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
++ int max_dotclk = i915->max_dotclk_freq;
++ enum drm_mode_status status;
+ int clock = mode->clock;
+
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+@@ -2289,7 +2295,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
+ * Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+- format_map = 1 << conn_state->tv.mode;
++ format_map = 1 << conn_state->tv.legacy_mode;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+
+@@ -2354,7 +2360,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
+ int i;
+
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+- if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
++ if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
+ *val = i;
+
+ return 0;
+@@ -2410,7 +2416,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
+ struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
+
+ if (property == intel_sdvo_connector->tv_format) {
+- state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
++ state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
+
+ if (state->crtc) {
+ struct drm_crtc_state *crtc_state =
+@@ -3065,7 +3071,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ drm_property_add_enum(intel_sdvo_connector->tv_format, i,
+ tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+
+- intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
++ intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
+index 3ebf41859043e9..cdf2455440beaf 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.c
++++ b/drivers/gpu/drm/i915/display/intel_tc.c
+@@ -58,7 +58,7 @@ struct intel_tc_port {
+ struct delayed_work link_reset_work;
+ int link_refcount;
+ bool legacy_port:1;
+- char port_name[8];
++ const char *port_name;
+ enum tc_port_mode mode;
+ enum tc_port_mode init_mode;
+ enum phy_fia phy_fia;
+@@ -1841,8 +1841,12 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+ else
+ tc->phy_ops = &icl_tc_phy_ops;
+
+- snprintf(tc->port_name, sizeof(tc->port_name),
+- "%c/TC#%d", port_name(port), tc_port + 1);
++ tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
++ tc_port + 1);
++ if (!tc->port_name) {
++ kfree(tc);
++ return -ENOMEM;
++ }
+
+ mutex_init(&tc->lock);
+ /* TODO: Combine the two works */
+@@ -1863,6 +1867,7 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
+ {
+ intel_tc_port_suspend(dig_port);
+
++ kfree(dig_port->tc->port_name);
+ kfree(dig_port->tc);
+ dig_port->tc = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
+index 36b479b46b6004..042ed966807edb 100644
+--- a/drivers/gpu/drm/i915/display/intel_tv.c
++++ b/drivers/gpu/drm/i915/display/intel_tv.c
+@@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
+
+ static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
+ {
+- int format = conn_state->tv.mode;
++ int format = conn_state->tv.legacy_mode;
+
+ return &tv_modes[format];
+ }
+@@ -958,8 +958,14 @@ static enum drm_mode_status
+ intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
++ struct drm_i915_private *i915 = to_i915(connector->dev);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
+- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
++ int max_dotclk = i915->max_dotclk_freq;
++ enum drm_mode_status status;
++
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+@@ -1704,7 +1710,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
+ break;
+ }
+
+- connector->state->tv.mode = i;
++ connector->state->tv.legacy_mode = i;
+ }
+
+ static int
+@@ -1859,7 +1865,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
+ old_state = drm_atomic_get_old_connector_state(state, connector);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
+
+- if (old_state->tv.mode != new_state->tv.mode ||
++ if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
+ old_state->tv.margins.left != new_state->tv.margins.left ||
+ old_state->tv.margins.right != new_state->tv.margins.right ||
+ old_state->tv.margins.top != new_state->tv.margins.top ||
+@@ -1896,7 +1902,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
+ conn_state->tv.margins.right = 46;
+ conn_state->tv.margins.bottom = 37;
+
+- conn_state->tv.mode = 0;
++ conn_state->tv.legacy_mode = 0;
+
+ /* Create TV properties then attach current values */
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+@@ -1910,7 +1916,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
+
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.legacy_tv_mode_property,
+- conn_state->tv.mode);
++ conn_state->tv.legacy_mode);
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.tv_left_margin_property,
+ conn_state->tv.margins.left);
+diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+index a9f44abfc9fc28..b50cd0dcabda90 100644
+--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
++++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+@@ -897,11 +897,6 @@ struct lfp_brightness_level {
+ u16 reserved;
+ } __packed;
+
+-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
+- offsetof(struct bdb_lfp_backlight_data, brightness_level)
+-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
+- offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
+-
+ struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct lfp_backlight_data_entry data[16];
+diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
+index 88e4759b538b64..b844bdd16de99a 100644
+--- a/drivers/gpu/drm/i915/display/intel_vrr.c
++++ b/drivers/gpu/drm/i915/display/intel_vrr.c
+@@ -111,6 +111,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ if (!intel_vrr_is_capable(connector))
+ return;
+
++ /*
++ * FIXME all joined pipes share the same transcoder.
++ * Need to account for that during VRR toggle/push/etc.
++ */
++ if (crtc_state->bigjoiner_pipes)
++ return;
++
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return;
+
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
+index 1e7c97243fcf55..8a934bada6245d 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.c
++++ b/drivers/gpu/drm/i915/display/skl_scaler.c
+@@ -504,7 +504,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ {
+ struct drm_plane *plane = NULL;
+ struct intel_plane *intel_plane;
+- struct intel_plane_state *plane_state = NULL;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_atomic_state *drm_state = crtc_state->uapi.state;
+@@ -536,6 +535,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+
+ /* walkthrough scaler_users bits and start assigning scalers */
+ for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
++ struct intel_plane_state *plane_state = NULL;
+ int *scaler_id;
+ const char *name;
+ int idx, ret;
+diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+index ffc15d278a39d3..d557ecd4e1ebe1 100644
+--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
++++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+@@ -20,6 +20,7 @@
+ #include "skl_scaler.h"
+ #include "skl_universal_plane.h"
+ #include "skl_watermark.h"
++#include "gt/intel_gt.h"
+ #include "pxp/intel_pxp.h"
+
+ static const u32 skl_plane_formats[] = {
+@@ -2169,8 +2170,8 @@ static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+ {
+ /* Wa_14017240301 */
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
++ if (IS_GFX_GT_IP_STEP(to_gt(i915), IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(to_gt(i915), IP_VER(12, 71), STEP_A0, STEP_B0))
+ return false;
+
+ /* Wa_22011186057 */
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index a96e7d028c5c61..d778b88413b777 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -1540,9 +1540,25 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
+ .destroy = intel_dsi_encoder_destroy,
+ };
+
++static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct drm_i915_private *i915 = to_i915(connector->dev);
++
++ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
++ enum drm_mode_status status;
++
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
++ }
++
++ return intel_dsi_mode_valid(connector, mode);
++}
++
+ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+- .mode_valid = intel_dsi_mode_valid,
++ .mode_valid = vlv_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+ };
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 9a9ff84c90d7e6..e38f06a6e56ebc 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
+ if (idx >= pc->num_user_engines)
+ return -EINVAL;
+
++ idx = array_index_nospec(idx, pc->num_user_engines);
+ pe = &pc->user_engines[idx];
+
+ /* Only render engine supports RPCS configuration. */
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
+index d24c0ce8805c70..19156ba4b9ef40 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
+@@ -405,8 +405,8 @@ static int ext_set_pat(struct i915_user_extension __user *base, void *data)
+ BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
+ offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
+
+- /* Limiting the extension only to Meteor Lake */
+- if (!IS_METEORLAKE(i915))
++ /* Limiting the extension only to Xe_LPG and beyond */
++ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))
+ return -ENODEV;
+
+ if (copy_from_user(&ext, base, sizeof(ext)))
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index 310654542b42c1..a59c17ec7fa366 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
+ return i915_error_to_vmf_fault(err);
+ }
+
++static void set_address_limits(struct vm_area_struct *area,
++ struct i915_vma *vma,
++ unsigned long obj_offset,
++ unsigned long *start_vaddr,
++ unsigned long *end_vaddr)
++{
++ unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
++ long start, end; /* memory boundaries */
++
++ /*
++ * Let's move into the ">> PAGE_SHIFT"
++ * domain to be sure not to lose bits
++ */
++ vm_start = area->vm_start >> PAGE_SHIFT;
++ vm_end = area->vm_end >> PAGE_SHIFT;
++ vma_size = vma->size >> PAGE_SHIFT;
++
++ /*
++ * Calculate the memory boundaries by considering the offset
++ * provided by the user during memory mapping and the offset
++ * provided for the partial mapping.
++ */
++ start = vm_start;
++ start -= obj_offset;
++ start += vma->gtt_view.partial.offset;
++ end = start + vma_size;
++
++ start = max_t(long, start, vm_start);
++ end = min_t(long, end, vm_end);
++
++ /* Let's move back into the "<< PAGE_SHIFT" domain */
++ *start_vaddr = (unsigned long)start << PAGE_SHIFT;
++ *end_vaddr = (unsigned long)end << PAGE_SHIFT;
++}
++
+ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
+ {
+ #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
+@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ bool write = area->vm_flags & VM_WRITE;
+ struct i915_gem_ww_ctx ww;
++ unsigned long obj_offset;
++ unsigned long start, end; /* memory boundaries */
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ pgoff_t page_offset;
++ unsigned long pfn;
+ int srcu;
+ int ret;
+
+- /* We don't use vmf->pgoff since that has the fake offset */
++ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
+ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
++ page_offset += obj_offset;
+
+ trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
+ if (ret)
+ goto err_unpin;
+
++ set_address_limits(area, vma, obj_offset, &start, &end);
++
++ pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
++ pfn += (start - area->vm_start) >> PAGE_SHIFT;
++ pfn += obj_offset - vma->gtt_view.partial.offset;
++
+ /* Finally, remap it using the new GTT offset */
+- ret = remap_io_mapping(area,
+- area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
+- (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
+- min_t(u64, vma->size, area->vm_end - area->vm_start),
+- &ggtt->iomap);
++ ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
+ if (ret)
+ goto err_fence;
+
+@@ -1088,6 +1129,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
+ mmo = mmap_offset_attach(obj, mmap_type, NULL);
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
++
++ vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+index f607b87890ddd6..c096fcdb2f1ed4 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+@@ -285,7 +285,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
+ static inline bool
+ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+ {
+- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
++ /* TODO: make DPT shrinkable when it has no bound vmas */
++ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
++ !obj->is_dpt;
+ }
+
+ static inline bool
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+index 9227f8146a583f..6dc097a2ac07b3 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+@@ -1136,7 +1136,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
+ GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
+ }
+
+- if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
++ if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
+ intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+index 1d3ebdf4069b5d..c08b67593565c5 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+@@ -379,6 +379,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
+ {
+ GEM_WARN_ON(obj->userptr.page_ref);
+
++ if (!obj->userptr.notifier.mm)
++ return;
++
+ mmu_interval_notifier_remove(&obj->userptr.notifier);
+ obj->userptr.notifier.mm = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+index 7ad36198aab2a4..cddf8c16e9a726 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+@@ -4,9 +4,9 @@
+ */
+
+ #include "gen8_engine_cs.h"
+-#include "i915_drv.h"
+ #include "intel_engine_regs.h"
+ #include "intel_gpu_commands.h"
++#include "intel_gt.h"
+ #include "intel_lrc.h"
+ #include "intel_ring.h"
+
+@@ -226,8 +226,8 @@ u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
+ static int mtl_dummy_pipe_control(struct i915_request *rq)
+ {
+ /* Wa_14016712196 */
+- if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) {
++ if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
++ IS_DG2(rq->i915)) {
+ u32 *cs;
+
+ /* dummy PIPE_CONTROL + depth flush */
+@@ -808,6 +808,7 @@ u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
+ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+ {
+ struct drm_i915_private *i915 = rq->i915;
++ struct intel_gt *gt = rq->engine->gt;
+ u32 flags = (PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TLB_INVALIDATE |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+@@ -818,8 +819,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ /* Wa_14016712196 */
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
++ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
+ /* dummy PIPE_CONTROL + depth flush */
+ cs = gen12_emit_pipe_control(cs, 0,
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
+diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+index ecc990ec1b9526..f2973cd1a8aaef 100644
+--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
++++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+@@ -258,8 +258,13 @@ static void signal_irq_work(struct irq_work *work)
+ i915_request_put(rq);
+ }
+
++ /* Lazy irq enabling after HW submission */
+ if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
+ intel_breadcrumbs_arm_irq(b);
++
++ /* And confirm that we still want irqs enabled before we yield */
++ if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
++ intel_breadcrumbs_disarm_irq(b);
+ }
+
+ struct intel_breadcrumbs *
+@@ -310,13 +315,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+ return;
+
+ /* Kick the work once more to drain the signalers, and disarm the irq */
+- irq_work_sync(&b->irq_work);
+- while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
+- local_irq_disable();
+- signal_irq_work(&b->irq_work);
+- local_irq_enable();
+- cond_resched();
+- }
++ irq_work_queue(&b->irq_work);
+ }
+
+ void intel_breadcrumbs_free(struct kref *kref)
+@@ -399,7 +398,7 @@ static void insert_breadcrumb(struct i915_request *rq)
+ * the request as it may have completed and raised the interrupt as
+ * we were attaching it into the lists.
+ */
+- if (!b->irq_armed || __i915_request_is_complete(rq))
++ if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
+ irq_work_queue(&b->irq_work);
+ }
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index e85d70a62123f9..d9bb352b8baab7 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -912,6 +912,29 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
+ info->engine_mask &= ~BIT(GSC0);
+ }
+
++ /*
++ * Do not create the command streamer for CCS slices beyond the first.
++ * All the workload submitted to the first engine will be shared among
++ * all the slices.
++ *
++ * Once the user will be allowed to customize the CCS mode, then this
++ * check needs to be removed.
++ */
++ if (IS_DG2(gt->i915)) {
++ u8 first_ccs = __ffs(CCS_MASK(gt));
++
++ /*
++ * Store the number of active cslices before
++ * changing the CCS engine configuration
++ */
++ gt->ccs.cslices = CCS_MASK(gt);
++
++ /* Mask off all the CCS engine */
++ info->engine_mask &= ~GENMASK(CCS3, CCS0);
++ /* Put back in the first CCS engine */
++ info->engine_mask |= BIT(_CCS(first_ccs));
++ }
++
+ return info->engine_mask;
+ }
+
+@@ -1616,9 +1639,7 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
+ * Wa_22011802037: Prior to doing a reset, ensure CS is
+ * stopped, set ring stop bit and prefetch disable bit to halt CS
+ */
+- if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+- (GRAPHICS_VER(engine->i915) >= 11 &&
+- GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
++ if (intel_engine_reset_needs_wa_22011802037(engine->gt))
+ intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
+ _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+index b538b5c04948f6..5a3a5b29d15077 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+@@ -21,7 +21,7 @@ static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine)
+ {
+ struct drm_i915_private *i915 = engine->i915;
+
+- if (IS_METEORLAKE(i915) && engine->id == GSC0) {
++ if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) {
+ intel_uncore_write(engine->gt->uncore,
+ RC_PSMI_CTRL_GSCCS,
+ _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
+@@ -278,9 +278,6 @@ static int __engine_park(struct intel_wakeref *wf)
+ intel_engine_park_heartbeat(engine);
+ intel_breadcrumbs_park(engine->breadcrumbs);
+
+- /* Must be reset upon idling, or we may miss the busy wakeup. */
+- GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
+-
+ if (engine->park)
+ engine->park(engine);
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
+index dcedff41a825fc..d304e0a948f0d1 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
+@@ -42,12 +42,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
+ (struct llist_head *)&engine->i915->uabi_engines);
+ }
+
+-static const u8 uabi_classes[] = {
++#define I915_NO_UABI_CLASS ((u16)(-1))
++
++static const u16 uabi_classes[] = {
+ [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
+ [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
+ [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
+ [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
+ [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
++ [OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
+ };
+
+ static int engine_cmp(void *priv, const struct list_head *A,
+@@ -202,6 +205,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
+
+ void intel_engines_driver_register(struct drm_i915_private *i915)
+ {
++ u16 name_instance, other_instance = 0;
+ struct legacy_ring ring = {};
+ struct list_head *it, *next;
+ struct rb_node **p, *prev;
+@@ -219,27 +223,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
+ if (intel_gt_has_unrecoverable_error(engine->gt))
+ continue; /* ignore incomplete engines */
+
+- /*
+- * We don't want to expose the GSC engine to the users, but we
+- * still rename it so it is easier to identify in the debug logs
+- */
+- if (engine->id == GSC0) {
+- engine_rename(engine, "gsc", 0);
+- continue;
+- }
+-
+ GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
+ engine->uabi_class = uabi_classes[engine->class];
++ if (engine->uabi_class == I915_NO_UABI_CLASS) {
++ name_instance = other_instance++;
++ } else {
++ GEM_BUG_ON(engine->uabi_class >=
++ ARRAY_SIZE(i915->engine_uabi_class_count));
++ name_instance =
++ i915->engine_uabi_class_count[engine->uabi_class]++;
++ }
++ engine->uabi_instance = name_instance;
+
+- GEM_BUG_ON(engine->uabi_class >=
+- ARRAY_SIZE(i915->engine_uabi_class_count));
+- engine->uabi_instance =
+- i915->engine_uabi_class_count[engine->uabi_class]++;
+-
+- /* Replace the internal name with the final user facing name */
++ /*
++ * Replace the internal name with the final user and log facing
++ * name.
++ */
+ engine_rename(engine,
+ intel_engine_class_repr(engine->class),
+- engine->uabi_instance);
++ name_instance);
++
++ if (engine->uabi_class == I915_NO_UABI_CLASS)
++ continue;
+
+ rb_link_node(&engine->uabi_node, prev, p);
+ rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index 3292524469d509..2065be5a196bf6 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -3001,9 +3001,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
+ * Wa_22011802037: In addition to stopping the cs, we need
+ * to wait for any pending mi force wakeups
+ */
+- if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+- (GRAPHICS_VER(engine->i915) >= 11 &&
+- GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
++ if (intel_engine_reset_needs_wa_22011802037(engine->gt))
+ intel_engine_wait_for_pending_mi_fw(engine);
+
+ engine->execlists.reset_ccid = active_ccid(engine);
+@@ -3274,6 +3272,9 @@ static void execlists_park(struct intel_engine_cs *engine)
+ {
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
++
++ /* Reset upon idling, or we may delay the busy wakeup. */
++ WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
+ }
+
+ static void add_to_engine(struct i915_request *rq)
+@@ -3314,11 +3315,7 @@ static void remove_from_engine(struct i915_request *rq)
+
+ static bool can_preempt(struct intel_engine_cs *engine)
+ {
+- if (GRAPHICS_VER(engine->i915) > 8)
+- return true;
+-
+- /* GPGPU on bdw requires extra w/a; not implemented */
+- return engine->class != RENDER_CLASS;
++ return GRAPHICS_VER(engine->i915) > 8;
+ }
+
+ static void kick_execlists(const struct i915_request *rq, int prio)
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+index da21f2786b5d7d..b20d8fe8aa95d5 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+@@ -190,6 +190,21 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+ spin_unlock_irq(&uncore->lock);
+ }
+
++static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915)
++{
++ /*
++ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
++ * will be dropped. For WC mappings in general we have 64 byte burst
++ * writes when the WC buffer is flushed, so we can't use it, but have to
++ * resort to an uncached mapping. The WC issue is easily caught by the
++ * readback check when writing GTT PTE entries.
++ */
++ if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
++ return true;
++
++ return false;
++}
++
+ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+ {
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+@@ -197,8 +212,12 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+ /*
+ * Note that as an uncached mmio write, this will flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
++ *
++ * Only perform this when GGTT is mapped as WC, see ggtt_probe_common().
+ */
+- intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
++ if (needs_wc_ggtt_mapping(ggtt->vm.i915))
++ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
++ GFX_FLSH_CNTL_EN);
+ }
+
+ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+@@ -902,17 +921,11 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+ GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+
+- /*
+- * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+- * will be dropped. For WC mappings in general we have 64 byte burst
+- * writes when the WC buffer is flushed, so we can't use it, but have to
+- * resort to an uncached mapping. The WC issue is easily caught by the
+- * readback check when writing GTT PTE entries.
+- */
+- if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+- ggtt->gsm = ioremap(phys_addr, size);
+- else
++ if (needs_wc_ggtt_mapping(i915))
+ ggtt->gsm = ioremap_wc(phys_addr, size);
++ else
++ ggtt->gsm = ioremap(phys_addr, size);
++
+ if (!ggtt->gsm) {
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+index 40371b8a9bbbd7..93bc1cc1ee7e64 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+@@ -298,6 +298,7 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
+ return;
+
+ GEM_BUG_ON(fence->vma != vma);
++ i915_active_wait(&fence->active);
+ GEM_BUG_ON(!i915_active_is_idle(&fence->active));
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
+index 449f0b7fc84343..95631e8f39e7b6 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt.c
+@@ -967,8 +967,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
+
+ err:
+ i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+- intel_gt_release_all(i915);
+-
+ return ret;
+ }
+
+@@ -987,15 +985,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915)
+ return 0;
+ }
+
+-void intel_gt_release_all(struct drm_i915_private *i915)
+-{
+- struct intel_gt *gt;
+- unsigned int id;
+-
+- for_each_gt(gt, i915, id)
+- i915->gt[id] = NULL;
+-}
+-
+ void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p)
+ {
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
+index 6c34547b58b59f..6e63b46682f76b 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt.h
+@@ -14,6 +14,37 @@
+ struct drm_i915_private;
+ struct drm_printer;
+
++/*
++ * Check that the GT is a graphics GT and has an IP version within the
++ * specified range (inclusive).
++ */
++#define IS_GFX_GT_IP_RANGE(gt, from, until) ( \
++ BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \
++ BUILD_BUG_ON_ZERO((until) < (from)) + \
++ ((gt)->type != GT_MEDIA && \
++ GRAPHICS_VER_FULL((gt)->i915) >= (from) && \
++ GRAPHICS_VER_FULL((gt)->i915) <= (until)))
++
++/*
++ * Check that the GT is a graphics GT with a specific IP version and has
++ * a stepping in the range [from, until). The lower stepping bound is
++ * inclusive, the upper bound is exclusive. The most common use-case of this
++ * macro is for checking bounds for workarounds, which usually have a stepping
++ * ("from") at which the hardware issue is first present and another stepping
++ * ("until") at which a hardware fix is present and the software workaround is
++ * no longer necessary. E.g.,
++ *
++ * IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0)
++ * IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B1, STEP_FOREVER)
++ *
++ * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
++ * stepping bound for the specified IP version.
++ */
++#define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \
++ BUILD_BUG_ON_ZERO((until) <= (from)) + \
++ (IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \
++ IS_GRAPHICS_STEP((gt)->i915, (from), (until))))
++
+ #define GT_TRACE(gt, fmt, ...) do { \
+ const struct intel_gt *gt__ __maybe_unused = (gt); \
+ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+new file mode 100644
+index 00000000000000..3c62a44e9106ce
+--- /dev/null
++++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+@@ -0,0 +1,39 @@
++// SPDX-License-Identifier: MIT
++/*
++ * Copyright © 2024 Intel Corporation
++ */
++
++#include "i915_drv.h"
++#include "intel_gt.h"
++#include "intel_gt_ccs_mode.h"
++#include "intel_gt_regs.h"
++
++unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
++{
++ int cslice;
++ u32 mode = 0;
++ int first_ccs = __ffs(CCS_MASK(gt));
++
++ if (!IS_DG2(gt->i915))
++ return 0;
++
++ /* Build the value for the fixed CCS load balancing */
++ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
++ if (gt->ccs.cslices & BIT(cslice))
++ /*
++ * If available, assign the cslice
++ * to the first available engine...
++ */
++ mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs);
++
++ else
++ /*
++ * ... otherwise, mark the cslice as
++ * unavailable if no CCS dispatches here
++ */
++ mode |= XEHP_CCS_MODE_CSLICE(cslice,
++ XEHP_CCS_MODE_CSLICE_MASK);
++ }
++
++ return mode;
++}
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
+new file mode 100644
+index 00000000000000..55547f2ff426a4
+--- /dev/null
++++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: MIT */
++/*
++ * Copyright © 2024 Intel Corporation
++ */
++
++#ifndef __INTEL_GT_CCS_MODE_H__
++#define __INTEL_GT_CCS_MODE_H__
++
++struct intel_gt;
++
++unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt);
++
++#endif /* __INTEL_GT_CCS_MODE_H__ */
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+index 2c0f1f3e28ff89..c6dec485aefbec 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+@@ -3,8 +3,7 @@
+ * Copyright © 2022 Intel Corporation
+ */
+
+-#include "i915_drv.h"
+-
++#include "intel_gt.h"
+ #include "intel_gt_mcr.h"
+ #include "intel_gt_print.h"
+ #include "intel_gt_regs.h"
+@@ -166,8 +165,8 @@ void intel_gt_mcr_init(struct intel_gt *gt)
+ gt->steering_table[OADDRM] = xelpmp_oaddrm_steering_table;
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
+ /* Wa_14016747170 */
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
+ fuse = REG_FIELD_GET(MTL_GT_L3_EXC_MASK,
+ intel_uncore_read(gt->uncore,
+ MTL_GT_ACTIVITY_FACTOR));
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+index 2cdfb2f713d026..64acab146b52f8 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+@@ -1468,8 +1468,14 @@
+ #define ECOBITS_PPGTT_CACHE4B (0 << 8)
+
+ #define GEN12_RCU_MODE _MMIO(0x14800)
++#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
+ #define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+
++#define XEHP_CCS_MODE _MMIO(0x14804)
++#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */
++#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1)
++#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
++
+ #define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
+ #define CHV_FGT_DISABLE_SS0 (1 << 10)
+ #define CHV_FGT_DISABLE_SS1 (1 << 11)
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
+index def7dd0eb6f196..cfdd2ad5e9549c 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
+@@ -207,6 +207,14 @@ struct intel_gt {
+ [MAX_ENGINE_INSTANCE + 1];
+ enum intel_submission_method submission_method;
+
++ struct {
++ /*
++ * Mask of the non fused CCS slices
++ * to be used for the load balancing
++ */
++ intel_engine_mask_t cslices;
++ } ccs;
++
+ /*
+ * Default address space (either GGTT or ppGTT depending on arch).
+ *
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index c378cc7c953c47..b99efa348ad1ec 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -1316,29 +1316,6 @@ gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
+ return cs;
+ }
+
+-/*
+- * On DG2 during context restore of a preempted context in GPGPU mode,
+- * RCS restore hang is detected. This is extremely timing dependent.
+- * To address this below sw wabb is implemented for DG2 A steppings.
+- */
+-static u32 *
+-dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
+-{
+- *cs++ = MI_LOAD_REGISTER_IMM(1);
+- *cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG(ce->engine->mmio_base));
+- *cs++ = 0x21;
+-
+- *cs++ = MI_LOAD_REGISTER_REG;
+- *cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
+- *cs++ = i915_mmio_reg_offset(XEHP_CULLBIT1);
+-
+- *cs++ = MI_LOAD_REGISTER_REG;
+- *cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
+- *cs++ = i915_mmio_reg_offset(XEHP_CULLBIT2);
+-
+- return cs;
+-}
+-
+ /*
+ * The bspec's tuning guide asks us to program a vertical watermark value of
+ * 0x3FF. However this register is not saved/restored properly by the
+@@ -1363,21 +1340,15 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
+ cs = gen12_emit_cmd_buf_wa(ce, cs);
+ cs = gen12_emit_restore_scratch(ce, cs);
+
+- /* Wa_22011450934:dg2 */
+- if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_A0, STEP_B0) ||
+- IS_DG2_GRAPHICS_STEP(ce->engine->i915, G11, STEP_A0, STEP_B0))
+- cs = dg2_emit_rcs_hang_wabb(ce, cs);
+-
+ /* Wa_16013000631:dg2 */
+- if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
+- IS_DG2_G11(ce->engine->i915))
++ if (IS_DG2_G11(ce->engine->i915))
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
+
+ cs = gen12_emit_aux_table_inv(ce->engine, cs);
+
+ /* Wa_16014892111 */
+- if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(ce->engine->i915, P, STEP_A0, STEP_B0) ||
++ if (IS_GFX_GT_IP_STEP(ce->engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(ce->engine->gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
+ IS_DG2(ce->engine->i915))
+ cs = dg2_emit_draw_watermark_setting(cs);
+
+@@ -1391,8 +1362,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
+ cs = gen12_emit_restore_scratch(ce, cs);
+
+ /* Wa_16013000631:dg2 */
+- if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
+- IS_DG2_G11(ce->engine->i915))
++ if (IS_DG2_G11(ce->engine->i915))
+ if (ce->engine->class == COMPUTE_CLASS)
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
+diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
+index 2c014407225ccb..07269ff3be136d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -404,18 +404,6 @@ static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+ };
+
+-static const struct drm_i915_mocs_entry dg2_mocs_table_g10_ax[] = {
+- /* Wa_14011441408: Set Go to Memory for MOCS#0 */
+- MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+- /* UC - Coherent; GO:Memory */
+- MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+- /* UC - Non-Coherent; GO:Memory */
+- MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+-
+- /* WB - LC */
+- MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+-};
+-
+ static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
+ /* Error */
+ MOCS_ENTRY(0, 0, L3_3_WB),
+@@ -507,7 +495,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+ memset(table, 0, sizeof(struct drm_i915_mocs_table));
+
+ table->unused_entries_index = I915_MOCS_PTE;
+- if (IS_METEORLAKE(i915)) {
++ if (IS_GFX_GT_IP_RANGE(&i915->gt0, IP_VER(12, 70), IP_VER(12, 71))) {
+ table->size = ARRAY_SIZE(mtl_mocs_table);
+ table->table = mtl_mocs_table;
+ table->n_entries = MTL_NUM_MOCS_ENTRIES;
+@@ -521,13 +509,8 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+ table->wb_index = 2;
+ table->unused_entries_index = 2;
+ } else if (IS_DG2(i915)) {
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+- table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
+- table->table = dg2_mocs_table_g10_ax;
+- } else {
+- table->size = ARRAY_SIZE(dg2_mocs_table);
+- table->table = dg2_mocs_table;
+- }
++ table->size = ARRAY_SIZE(dg2_mocs_table);
++ table->table = dg2_mocs_table;
+ table->uc_index = 1;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->unused_entries_index = 3;
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index 58bb1c55294c93..9e113e9473260a 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -118,14 +118,12 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
+ GEN6_RC_CTL_EI_MODE(1);
+
+ /*
+- * Wa_16011777198 and BSpec 52698 - Render powergating must be off.
++ * BSpec 52698 - Render powergating must be off.
+ * FIXME BSpec is outdated, disabling powergating for MTL is just
+ * temporary wa and should be removed after fixing real cause
+ * of forcewake timeouts.
+ */
+- if (IS_METEORLAKE(gt->i915) ||
+- IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+- IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
++ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ pg_enable =
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
+@@ -584,19 +582,23 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
+
+ static void rc6_res_reg_init(struct intel_rc6 *rc6)
+ {
+- memset(rc6->res_reg, INVALID_MMIO_REG.reg, sizeof(rc6->res_reg));
++ i915_reg_t res_reg[INTEL_RC6_RES_MAX] = {
++ [0 ... INTEL_RC6_RES_MAX - 1] = INVALID_MMIO_REG,
++ };
+
+ switch (rc6_to_gt(rc6)->type) {
+ case GT_MEDIA:
+- rc6->res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
++ res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
+ break;
+ default:
+- rc6->res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
+- rc6->res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
+- rc6->res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
+- rc6->res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
++ res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
++ res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
++ res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
++ res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
+ break;
+ }
++
++ memcpy(rc6->res_reg, res_reg, sizeof(res_reg));
+ }
+
+ void intel_rc6_init(struct intel_rc6 *rc6)
+diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
+index cc6bd21a3e51f1..13fb8e5042c584 100644
+--- a/drivers/gpu/drm/i915/gt/intel_reset.c
++++ b/drivers/gpu/drm/i915/gt/intel_reset.c
+@@ -705,7 +705,7 @@ static int __reset_guc(struct intel_gt *gt)
+
+ static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+ {
+- if (!IS_METEORLAKE(gt->i915) || !HAS_ENGINE(gt, GSC0))
++ if (MEDIA_VER_FULL(gt->i915) != IP_VER(13, 0) || !HAS_ENGINE(gt, GSC0))
+ return false;
+
+ if (!__HAS_ENGINE(engine_mask, GSC0))
+@@ -1297,7 +1297,7 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
+ if (msg)
+ drm_notice(&engine->i915->drm,
+ "Resetting %s for %s\n", engine->name, msg);
+- atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
++ i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
+
+ ret = intel_gt_reset_engine(engine);
+ if (ret) {
+@@ -1632,6 +1632,24 @@ void __intel_fini_wedge(struct intel_wedge_me *w)
+ w->gt = NULL;
+ }
+
++/*
++ * Wa_22011802037 requires that we (or the GuC) ensure that no command
++ * streamers are executing MI_FORCE_WAKE while an engine reset is initiated.
++ */
++bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt)
++{
++ if (GRAPHICS_VER(gt->i915) < 11)
++ return false;
++
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0))
++ return true;
++
++ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
++ return false;
++
++ return true;
++}
++
+ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ #include "selftest_reset.c"
+ #include "selftest_hangcheck.c"
+diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
+index 25c975b6e8fc01..f615b30b81c594 100644
+--- a/drivers/gpu/drm/i915/gt/intel_reset.h
++++ b/drivers/gpu/drm/i915/gt/intel_reset.h
+@@ -78,4 +78,6 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
+ bool intel_has_gpu_reset(const struct intel_gt *gt);
+ bool intel_has_reset_engine(const struct intel_gt *gt);
+
++bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt);
++
+ #endif /* I915_RESET_H */
+diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
+index 092542f53aad9c..4feef874e6d695 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rps.c
++++ b/drivers/gpu/drm/i915/gt/intel_rps.c
+@@ -1161,7 +1161,7 @@ void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *c
+ {
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+- if (IS_METEORLAKE(i915))
++ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
+ return mtl_get_freq_caps(rps, caps);
+ else
+ return __gen6_rps_get_freq_caps(rps, caps);
+diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+index 3ae0dbd39eaa3c..8fbb0686c5348d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+@@ -10,6 +10,7 @@
+ #include "intel_engine_regs.h"
+ #include "intel_gpu_commands.h"
+ #include "intel_gt.h"
++#include "intel_gt_ccs_mode.h"
+ #include "intel_gt_mcr.h"
+ #include "intel_gt_regs.h"
+ #include "intel_ring.h"
+@@ -50,7 +51,8 @@
+ * registers belonging to BCS, VCS or VECS should be implemented in
+ * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
+ * engine's MMIO range but that are part of of the common RCS/CCS reset domain
+- * should be implemented in general_render_compute_wa_init().
++ * should be implemented in general_render_compute_wa_init(). The settings
++ * about the CCS load balancing should be added in ccs_engine_wa_mode().
+ *
+ * - GT workarounds: the list of these WAs is applied whenever these registers
+ * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
+@@ -764,39 +766,15 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
+ {
+ dg2_ctx_gt_tuning_init(engine, wal);
+
+- /* Wa_16011186671:dg2_g11 */
+- if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+- wa_mcr_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH);
+- wa_mcr_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE);
+- }
+-
+- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+- /* Wa_14010469329:dg2_g10 */
+- wa_mcr_masked_en(wal, XEHP_COMMON_SLICE_CHICKEN3,
+- XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE);
+-
+- /*
+- * Wa_22010465075:dg2_g10
+- * Wa_22010613112:dg2_g10
+- * Wa_14010698770:dg2_g10
+- */
+- wa_mcr_masked_en(wal, XEHP_COMMON_SLICE_CHICKEN3,
+- GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+- }
+-
+ /* Wa_16013271637:dg2 */
+ wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
+ MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
+
+ /* Wa_14014947963:dg2 */
+- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
+- IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+- wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
++ wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
+
+ /* Wa_18018764978:dg2 */
+- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) ||
+- IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+- wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
++ wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
+
+ /* Wa_15010599737:dg2 */
+ wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+@@ -805,27 +783,32 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
+ wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+ }
+
+-static void mtl_ctx_gt_tuning_init(struct intel_engine_cs *engine,
+- struct i915_wa_list *wal)
++static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
++ struct i915_wa_list *wal)
+ {
+- struct drm_i915_private *i915 = engine->i915;
++ struct intel_gt *gt = engine->gt;
+
+ dg2_ctx_gt_tuning_init(engine, wal);
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
++ /*
++ * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
++ * gen12_emit_indirect_ctx_rcs() rather than here on some early
++ * steppings.
++ */
++ if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
+ wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
+ }
+
+-static void mtl_ctx_workarounds_init(struct intel_engine_cs *engine,
+- struct i915_wa_list *wal)
++static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
++ struct i915_wa_list *wal)
+ {
+- struct drm_i915_private *i915 = engine->i915;
++ struct intel_gt *gt = engine->gt;
+
+- mtl_ctx_gt_tuning_init(engine, wal);
++ xelpg_ctx_gt_tuning_init(engine, wal);
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
+ /* Wa_14014947963 */
+ wa_masked_field_set(wal, VF_PREEMPTION,
+ PREEMPTION_VERTEX_COUNT, 0x4000);
+@@ -931,8 +914,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
+ if (engine->class != RENDER_CLASS)
+ goto done;
+
+- if (IS_METEORLAKE(i915))
+- mtl_ctx_workarounds_init(engine, wal);
++ if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
++ xelpg_ctx_workarounds_init(engine, wal);
+ else if (IS_PONTEVECCHIO(i915))
+ ; /* noop; none at this time */
+ else if (IS_DG2(i915))
+@@ -1606,31 +1589,11 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ static void
+ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ {
+- struct intel_engine_cs *engine;
+- int id;
+-
+ xehp_init_mcr(gt, wal);
+
+ /* Wa_14011060649:dg2 */
+ wa_14011060649(gt, wal);
+
+- /*
+- * Although there are per-engine instances of these registers,
+- * they technically exist outside the engine itself and are not
+- * impacted by engine resets. Furthermore, they're part of the
+- * GuC blacklist so trying to treat them as engine workarounds
+- * will result in GuC initialization failure and a wedged GPU.
+- */
+- for_each_engine(engine, gt, id) {
+- if (engine->class != VIDEO_DECODE_CLASS)
+- continue;
+-
+- /* Wa_16010515920:dg2_g10 */
+- if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+- wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base),
+- ALNUNIT_CLKGATE_DIS);
+- }
+-
+ if (IS_DG2_G10(gt->i915)) {
+ /* Wa_22010523718:dg2 */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+@@ -1641,65 +1604,6 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ DSS_ROUTER_CLKGATE_DIS);
+ }
+
+- if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) ||
+- IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) {
+- /* Wa_14012362059:dg2 */
+- wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+- }
+-
+- if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
+- /* Wa_14010948348:dg2_g10 */
+- wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
+-
+- /* Wa_14011037102:dg2_g10 */
+- wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS);
+-
+- /* Wa_14011371254:dg2_g10 */
+- wa_mcr_write_or(wal, XEHP_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS);
+-
+- /* Wa_14011431319:dg2_g10 */
+- wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
+- GAMTLBVDBOX7_CLKGATE_DIS |
+- GAMTLBVDBOX6_CLKGATE_DIS |
+- GAMTLBVDBOX5_CLKGATE_DIS |
+- GAMTLBVDBOX4_CLKGATE_DIS |
+- GAMTLBVDBOX3_CLKGATE_DIS |
+- GAMTLBVDBOX2_CLKGATE_DIS |
+- GAMTLBVDBOX1_CLKGATE_DIS |
+- GAMTLBVDBOX0_CLKGATE_DIS |
+- GAMTLBKCR_CLKGATE_DIS |
+- GAMTLBGUC_CLKGATE_DIS |
+- GAMTLBBLT_CLKGATE_DIS);
+- wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
+- GAMTLBGFXA1_CLKGATE_DIS |
+- GAMTLBCOMPA0_CLKGATE_DIS |
+- GAMTLBCOMPA1_CLKGATE_DIS |
+- GAMTLBCOMPB0_CLKGATE_DIS |
+- GAMTLBCOMPB1_CLKGATE_DIS |
+- GAMTLBCOMPC0_CLKGATE_DIS |
+- GAMTLBCOMPC1_CLKGATE_DIS |
+- GAMTLBCOMPD0_CLKGATE_DIS |
+- GAMTLBCOMPD1_CLKGATE_DIS |
+- GAMTLBMERT_CLKGATE_DIS |
+- GAMTLBVEBOX3_CLKGATE_DIS |
+- GAMTLBVEBOX2_CLKGATE_DIS |
+- GAMTLBVEBOX1_CLKGATE_DIS |
+- GAMTLBVEBOX0_CLKGATE_DIS);
+-
+- /* Wa_14010569222:dg2_g10 */
+- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+- GAMEDIA_CLKGATE_DIS);
+-
+- /* Wa_14011028019:dg2_g10 */
+- wa_mcr_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
+-
+- /* Wa_14010680813:dg2_g10 */
+- wa_mcr_write_or(wal, XEHP_GAMSTLB_CTRL,
+- CONTROL_BLOCK_CLKGATE_DIS |
+- EGRESS_BLOCK_CLKGATE_DIS |
+- TAG_BLOCK_CLKGATE_DIS);
+- }
+-
+ /* Wa_14014830051:dg2 */
+ wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
+
+@@ -1741,14 +1645,15 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ static void
+ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ {
+- /* Wa_14018778641 / Wa_18018781329 */
++ /* Wa_14018575942 / Wa_18018781329 */
++ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_22016670082 */
+ wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
+
+- if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0)) {
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
+ /* Wa_14014830051 */
+ wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
+
+@@ -1791,10 +1696,8 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ */
+ static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
+ {
+- if (IS_METEORLAKE(gt->i915)) {
+- if (gt->type != GT_MEDIA)
+- wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+-
++ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
++ wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
+ }
+
+@@ -1826,7 +1729,7 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
+ return;
+ }
+
+- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
++ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
+ xelpg_gt_workarounds_init(gt, wal);
+ else if (IS_PONTEVECCHIO(i915))
+ pvc_gt_workarounds_init(gt, wal);
+@@ -2242,29 +2145,10 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+- /*
+- * Wa_1507100340:dg2_g10
+- *
+- * This covers 4 registers which are next to one another :
+- * - PS_INVOCATION_COUNT
+- * - PS_INVOCATION_COUNT_UDW
+- * - PS_DEPTH_COUNT
+- * - PS_DEPTH_COUNT_UDW
+- */
+- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
+- whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+- RING_FORCE_TO_NONPRIV_ACCESS_RD |
+- RING_FORCE_TO_NONPRIV_RANGE_4);
+-
+ /* Required by recommended tuning setting (not a workaround) */
+ whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
+
+ break;
+- case COMPUTE_CLASS:
+- /* Wa_16011157294:dg2_g10 */
+- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
+- whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
+- break;
+ default:
+ break;
+ }
+@@ -2294,7 +2178,7 @@ static void pvc_whitelist_build(struct intel_engine_cs *engine)
+ blacklist_trtt(engine);
+ }
+
+-static void mtl_whitelist_build(struct intel_engine_cs *engine)
++static void xelpg_whitelist_build(struct intel_engine_cs *engine)
+ {
+ struct i915_wa_list *w = &engine->whitelist;
+
+@@ -2316,8 +2200,10 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
+
+ wa_init_start(w, engine->gt, "whitelist", engine->name);
+
+- if (IS_METEORLAKE(i915))
+- mtl_whitelist_build(engine);
++ if (engine->gt->type == GT_MEDIA)
++ ; /* none yet */
++ else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
++ xelpg_whitelist_build(engine);
+ else if (IS_PONTEVECCHIO(i915))
+ pvc_whitelist_build(engine);
+ else if (IS_DG2(i915))
+@@ -2415,62 +2301,35 @@ engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ }
+ }
+
+-static bool needs_wa_1308578152(struct intel_engine_cs *engine)
+-{
+- return intel_sseu_find_first_xehp_dss(&engine->gt->info.sseu, 0, 0) >=
+- GEN_DSS_PER_GSLICE;
+-}
+-
+ static void
+ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ {
+ struct drm_i915_private *i915 = engine->i915;
++ struct intel_gt *gt = engine->gt;
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
+ /* Wa_22014600077 */
+ wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
+ ENABLE_EU_COUNT_FOR_TDL_FLUSH);
+ }
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+- IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+- IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
++ IS_DG2(i915)) {
+ /* Wa_1509727124 */
+ wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
+ SC_DISABLE_POWER_OPTIMIZATION_EBB);
+ }
+
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+- IS_DG2_G11(i915) || IS_DG2_G12(i915) ||
+- IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0)) {
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_DG2(i915)) {
+ /* Wa_22012856258 */
+ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
+ GEN12_DISABLE_READ_SUPPRESSION);
+ }
+
+- if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
+- /* Wa_14013392000:dg2_g11 */
+- wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
+- }
+-
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
+- IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
+- /* Wa_14012419201:dg2 */
+- wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4,
+- GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
+- }
+-
+- /* Wa_1308578152:dg2_g10 when first gslice is fused off */
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
+- needs_wa_1308578152(engine)) {
+- wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
+- GEN12_REPLAY_MODE_GRANULARITY);
+- }
+-
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+- IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
++ if (IS_DG2(i915)) {
+ /*
+ * Wa_22010960976:dg2
+ * Wa_14013347512:dg2
+@@ -2479,34 +2338,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
+ }
+
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+- /*
+- * Wa_1608949956:dg2_g10
+- * Wa_14010198302:dg2_g10
+- */
+- wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
+- MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
+- }
+-
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
+- /* Wa_22010430635:dg2 */
+- wa_mcr_masked_en(wal,
+- GEN9_ROW_CHICKEN4,
+- GEN12_DISABLE_GRF_CLEAR);
+-
+- /* Wa_14013202645:dg2 */
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+- IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
+- wa_mcr_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
+-
+- /* Wa_22012532006:dg2 */
+- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
+- IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
+- wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+- DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
+-
+- if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
+- IS_DG2_G10(i915)) {
++ if (IS_DG2_G11(i915) || IS_DG2_G10(i915)) {
+ /* Wa_22014600077:dg2 */
+ wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
+@@ -2514,6 +2346,19 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ true);
+ }
+
++ if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
++ IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
++ /*
++ * Wa_1606700617:tgl,dg1,adl-p
++ * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
++ * Wa_14010826681:tgl,dg1,rkl,adl-p
++ * Wa_18019627453:dg2
++ */
++ wa_masked_en(wal,
++ GEN9_CS_DEBUG_MODE1,
++ FF_DOP_CLOCK_GATE_DISABLE);
++ }
++
+ if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
+@@ -2527,19 +2372,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ */
+ wa_write_or(wal, GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+- }
+
+- if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) ||
+- IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+- /*
+- * Wa_1606700617:tgl,dg1,adl-p
+- * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
+- * Wa_14010826681:tgl,dg1,rkl,adl-p
+- * Wa_18019627453:dg2
+- */
+- wa_masked_en(wal,
+- GEN9_CS_DEBUG_MODE1,
+- FF_DOP_CLOCK_GATE_DISABLE);
++ /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
++ wa_mcr_masked_en(wal,
++ GEN10_SAMPLER_MODE,
++ ENABLE_SMALLPL);
+ }
+
+ if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
+@@ -2566,14 +2403,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+ }
+
+- if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) ||
+- IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
+- /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
+- wa_mcr_masked_en(wal,
+- GEN10_SAMPLER_MODE,
+- ENABLE_SMALLPL);
+- }
+-
+ if (GRAPHICS_VER(i915) == 11) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+@@ -2975,10 +2804,12 @@ ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ * function invoked by __intel_engine_init_ctx_wa().
+ */
+ static void
+-add_render_compute_tuning_settings(struct drm_i915_private *i915,
++add_render_compute_tuning_settings(struct intel_gt *gt,
+ struct i915_wa_list *wal)
+ {
+- if (IS_METEORLAKE(i915) || IS_DG2(i915))
++ struct drm_i915_private *i915 = gt->i915;
++
++ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
+ wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
+
+ /*
+@@ -2994,6 +2825,30 @@ add_render_compute_tuning_settings(struct drm_i915_private *i915,
+ wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
+ }
+
++static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
++{
++ struct intel_gt *gt = engine->gt;
++ u32 mode;
++
++ if (!IS_DG2(gt->i915))
++ return;
++
++ /*
++ * Wa_14019159160: This workaround, along with others, leads to
++ * significant challenges in utilizing load balancing among the
++ * CCS slices. Consequently, an architectural decision has been
++ * made to completely disable automatic CCS load balancing.
++ */
++ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
++
++ /*
++ * After having disabled automatic load balancing we need to
++ * assign all slices to a single CCS. We will call it CCS mode 1
++ */
++ mode = intel_gt_apply_ccs_mode(gt);
++ wa_masked_en(wal, XEHP_CCS_MODE, mode);
++}
++
+ /*
+ * The workarounds in this function apply to shared registers in
+ * the general render reset domain that aren't tied to a
+@@ -3007,8 +2862,9 @@ static void
+ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ {
+ struct drm_i915_private *i915 = engine->i915;
++ struct intel_gt *gt = engine->gt;
+
+- add_render_compute_tuning_settings(i915, wal);
++ add_render_compute_tuning_settings(gt, wal);
+
+ if (GRAPHICS_VER(i915) >= 11) {
+ /* This is not a Wa (although referred to as
+@@ -3029,13 +2885,14 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
+ GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
+ }
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
++ IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74)))
+ /* Wa_14017856879 */
+ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
+ /*
+ * Wa_14017066071
+ * Wa_14017654203
+@@ -3043,37 +2900,47 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
+ wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
+ MTL_DISABLE_SAMPLER_SC_OOO);
+
+- if (IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
+ /* Wa_22015279794 */
+ wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
+ DISABLE_PREFETCH_INTO_IC);
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+- IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+- IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
++ IS_DG2(i915)) {
+ /* Wa_22013037850 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
+ DISABLE_128B_EVICTION_COMMAND_UDW);
++
++ /* Wa_18017747507 */
++ wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
+ }
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
++ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
+ IS_PONTEVECCHIO(i915) ||
+ IS_DG2(i915)) {
+ /* Wa_22014226127 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+ }
+
+- if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+- IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+- IS_DG2(i915)) {
+- /* Wa_18017747507 */
+- wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
++ if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) {
++ /* Wa_14015227452:dg2,pvc */
++ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
++
++ /* Wa_16015675438:dg2,pvc */
++ wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
++ }
++
++ if (IS_DG2(i915)) {
++ /*
++ * Wa_16011620976:dg2_g11
++ * Wa_22015475538:dg2
++ */
++ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+ }
+
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+- IS_DG2_G11(i915)) {
++ if (IS_DG2_G11(i915)) {
+ /*
+ * Wa_22012826095:dg2
+ * Wa_22013059131:dg2
+@@ -3085,18 +2952,18 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
+ /* Wa_22013059131:dg2 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
+ FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
+- }
+
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ /*
+- * Wa_14010918519:dg2_g10
++ * Wa_22012654132
+ *
+- * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
+- * so ignoring verification.
++ * Note that register 0xE420 is write-only and cannot be read
++ * back for verification on DG2 (due to Wa_14012342262), so
++ * we need to explicitly skip the readback.
+ */
+- wa_mcr_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
+- FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
+- 0, false);
++ wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
++ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
++ 0 /* write-only, so skip validation */,
++ true);
+ }
+
+ if (IS_XEHPSDV(i915)) {
+@@ -3114,35 +2981,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
+ wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+ }
+-
+- if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
+- /* Wa_14015227452:dg2,pvc */
+- wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
+-
+- /* Wa_16015675438:dg2,pvc */
+- wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
+- }
+-
+- if (IS_DG2(i915)) {
+- /*
+- * Wa_16011620976:dg2_g11
+- * Wa_22015475538:dg2
+- */
+- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+- }
+-
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) || IS_DG2_G11(i915))
+- /*
+- * Wa_22012654132
+- *
+- * Note that register 0xE420 is write-only and cannot be read
+- * back for verification on DG2 (due to Wa_14012342262), so
+- * we need to explicitly skip the readback.
+- */
+- wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
+- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+- 0 /* write-only, so skip validation */,
+- true);
+ }
+
+ static void
+@@ -3158,8 +2996,10 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
+ * to a single RCS/CCS engine's workaround list since
+ * they're reset as part of the general render domain reset.
+ */
+- if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
++ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
+ general_render_compute_wa_init(engine, wal);
++ ccs_engine_wa_mode(engine, wal);
++ }
+
+ if (engine->class == COMPUTE_CLASS)
+ ccs_engine_wa_init(engine, wal);
+diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
+index 3def5ca72decfd..0fb07f073baa61 100644
+--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
++++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
+@@ -719,11 +719,9 @@ static int threaded_migrate(struct intel_migrate *migrate,
+ if (IS_ERR_OR_NULL(tsk))
+ continue;
+
+- status = kthread_stop(tsk);
++ status = kthread_stop_put(tsk);
+ if (status && !err)
+ err = status;
+-
+- put_task_struct(tsk);
+ }
+
+ kfree(thread);
+diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+index 58012edd4eb0ec..4f4f53c42a9c56 100644
+--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
++++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+@@ -29,9 +29,9 @@
+ */
+
+ #define GUC_KLV_LEN_MIN 1u
+-#define GUC_KLV_0_KEY (0xffff << 16)
+-#define GUC_KLV_0_LEN (0xffff << 0)
+-#define GUC_KLV_n_VALUE (0xffffffff << 0)
++#define GUC_KLV_0_KEY (0xffffu << 16)
++#define GUC_KLV_0_LEN (0xffffu << 0)
++#define GUC_KLV_n_VALUE (0xffffffffu << 0)
+
+ /**
+ * DOC: GuC Self Config KLVs
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+index 0d3b22a7436595..e251e061d1adb7 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+@@ -304,7 +304,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
+ {
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+
+- if (!intel_uc_fw_is_loadable(&gsc->fw))
++ if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw))
+ return;
+
+ if (intel_gsc_uc_fw_init_done(gsc))
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+index 569b5fe94c416f..861d0c58388cfc 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+@@ -272,18 +272,14 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ flags |= GUC_WA_POLLCS;
+
+- /* Wa_16011759253:dg2_g10:a0 */
+- if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+- flags |= GUC_WA_GAM_CREDITS;
+-
+ /* Wa_14014475959 */
+- if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
++ if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
+ IS_DG2(gt->i915))
+ flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+
+ /*
+- * Wa_14012197797:dg2_g10:a0,dg2_g11:a0
+- * Wa_22011391025:dg2_g10,dg2_g11,dg2_g12
++ * Wa_14012197797
++ * Wa_22011391025
+ *
+ * The same WA bit is used for both and 22011391025 is applicable to
+ * all DG2.
+@@ -292,22 +288,14 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
+ flags |= GUC_WA_DUAL_QUEUE;
+
+ /* Wa_22011802037: graphics version 11/12 */
+- if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
+- (GRAPHICS_VER(gt->i915) >= 11 &&
+- GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70)))
++ if (intel_engine_reset_needs_wa_22011802037(gt))
+ flags |= GUC_WA_PRE_PARSER;
+
+- /* Wa_16011777198:dg2 */
+- if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+- IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
+- flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
+-
+ /*
+- * Wa_22012727170:dg2_g10[a0-c0), dg2_g11[a0..)
+- * Wa_22012727685:dg2_g11[a0..)
++ * Wa_22012727170
++ * Wa_22012727685
+ */
+- if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+- IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER))
++ if (IS_DG2_G11(gt->i915))
+ flags |= GUC_WA_CONTEXT_ISOLATION;
+
+ /* Wa_16015675438 */
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index dc7b40e06e38af..236dfff81fea43 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -1690,9 +1690,7 @@ static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
+ * Wa_22011802037: In addition to stopping the cs, we need
+ * to wait for any pending mi force wakeups
+ */
+- if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+- (GRAPHICS_VER(engine->i915) >= 11 &&
+- GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70))) {
++ if (intel_engine_reset_needs_wa_22011802037(engine->gt)) {
+ intel_engine_stop_cs(engine);
+ intel_engine_wait_for_pending_mi_fw(engine);
+ }
+@@ -2697,9 +2695,9 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+- wq_desc_offset = i915_ggtt_offset(ce->state) +
++ wq_desc_offset = (u64)i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+- wq_base_offset = i915_ggtt_offset(ce->state) +
++ wq_base_offset = (u64)i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ info->wq_desc_lo = lower_32_bits(wq_desc_offset);
+ info->wq_desc_hi = upper_32_bits(wq_desc_offset);
+@@ -4299,7 +4297,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
+
+ /* Wa_14014475959:dg2 */
+ if (engine->class == COMPUTE_CLASS)
+- if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
++ if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
+ IS_DG2(engine->i915))
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+
+@@ -4774,7 +4772,8 @@ static void capture_error_state(struct intel_guc *guc,
+ if (match) {
+ intel_engine_set_hung_context(e, ce);
+ engine_mask |= e->mask;
+- atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
++ i915_increase_reset_engine_count(&i915->gpu_error,
++ e);
+ }
+ }
+
+@@ -4786,7 +4785,7 @@ static void capture_error_state(struct intel_guc *guc,
+ } else {
+ intel_engine_set_hung_context(ce->engine, ce);
+ engine_mask = ce->engine->mask;
+- atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
++ i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
+ }
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+index 9a431726c8d5b1..ac7b3aad2222e8 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+@@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
+ return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
+ }
+
++static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw)
++{
++ return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0;
++}
++
+ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
+ {
+ return uc_fw->user_overridden;
+diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
+index a9f7fa9b90bdad..d30f8814d9b106 100644
+--- a/drivers/gpu/drm/i915/gvt/handlers.c
++++ b/drivers/gpu/drm/i915/gvt/handlers.c
+@@ -2850,8 +2850,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ for (i = start; i < end; i += 4) {
+ p = intel_gvt_find_mmio_info(gvt, i);
+ if (p) {
+- WARN(1, "dup mmio definition offset %x\n",
+- info->offset);
++ WARN(1, "dup mmio definition offset %x\n", i);
+
+ /* We return -EEXIST here to make GVT-g load fail.
+ * So duplicated MMIO can be found as soon as
+diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
+index 68eca023bbc68b..80301472ac9881 100644
+--- a/drivers/gpu/drm/i915/gvt/interrupt.c
++++ b/drivers/gpu/drm/i915/gvt/interrupt.c
+@@ -405,7 +405,7 @@ static void init_irq_map(struct intel_gvt_irq *irq)
+ #define MSI_CAP_DATA(offset) (offset + 8)
+ #define MSI_CAP_EN 0x1
+
+-static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
++static void inject_virtual_interrupt(struct intel_vgpu *vgpu)
+ {
+ unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+ u16 control, data;
+@@ -417,10 +417,10 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+
+ /* Do not generate MSI if MSIEN is disabled */
+ if (!(control & MSI_CAP_EN))
+- return 0;
++ return;
+
+ if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+- return -EINVAL;
++ return;
+
+ trace_inject_msi(vgpu->id, addr, data);
+
+@@ -434,10 +434,9 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+ * returned and don't inject interrupt into guest.
+ */
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+- return -ESRCH;
+- if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
+- return -EFAULT;
+- return 0;
++ return;
++ if (vgpu->msi_trigger)
++ eventfd_signal(vgpu->msi_trigger, 1);
+ }
+
+ static void propagate_event(struct intel_gvt_irq *irq,
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 4de44cf1026dce..7a90a2e32c9f1b 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -144,7 +144,7 @@ static const char *i915_cache_level_str(struct drm_i915_gem_object *obj)
+ {
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+
+- if (IS_METEORLAKE(i915)) {
++ if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) {
+ switch (obj->pat_index) {
+ case 0: return " WB";
+ case 1: return " WT";
+diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
+index ec4d26b3c17cc1..8dc5f85b7747b4 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -777,7 +777,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ ret = i915_driver_mmio_probe(i915);
+ if (ret < 0)
+- goto out_tiles_cleanup;
++ goto out_runtime_pm_put;
+
+ ret = i915_driver_hw_probe(i915);
+ if (ret < 0)
+@@ -837,8 +837,6 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ i915_ggtt_driver_late_release(i915);
+ out_cleanup_mmio:
+ i915_driver_mmio_release(i915);
+-out_tiles_cleanup:
+- intel_gt_release_all(i915);
+ out_runtime_pm_put:
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ i915_driver_late_release(i915);
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 7a8ce7239bc9e3..e0e0493d6c1f0d 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -658,10 +658,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
+ #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
+ (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
+
+-#define IS_MTL_GRAPHICS_STEP(__i915, variant, since, until) \
+- (IS_SUBPLATFORM(__i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_##variant) && \
+- IS_GRAPHICS_STEP(__i915, since, until))
+-
+ #define IS_MTL_DISPLAY_STEP(__i915, since, until) \
+ (IS_METEORLAKE(__i915) && \
+ IS_DISPLAY_STEP(__i915, since, until))
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
+index 9f5971f5e98014..48f6c00402c47a 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.h
++++ b/drivers/gpu/drm/i915/i915_gpu_error.h
+@@ -16,6 +16,7 @@
+
+ #include "display/intel_display_device.h"
+ #include "gt/intel_engine.h"
++#include "gt/intel_engine_types.h"
+ #include "gt/intel_gt_types.h"
+ #include "gt/uc/intel_uc_fw.h"
+
+@@ -232,7 +233,7 @@ struct i915_gpu_error {
+ atomic_t reset_count;
+
+ /** Number of times an engine has been reset */
+- atomic_t reset_engine_count[I915_NUM_ENGINES];
++ atomic_t reset_engine_count[MAX_ENGINE_CLASS];
+ };
+
+ struct drm_i915_error_state_buf {
+@@ -255,7 +256,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
+ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
+ const struct intel_engine_cs *engine)
+ {
+- return atomic_read(&error->reset_engine_count[engine->uabi_class]);
++ return atomic_read(&error->reset_engine_count[engine->class]);
++}
++
++static inline void
++i915_increase_reset_engine_count(struct i915_gpu_error *error,
++ const struct intel_engine_cs *engine)
++{
++ atomic_inc(&error->reset_engine_count[engine->class]);
+ }
+
+ #define CORE_DUMP_FLAG_NONE 0x0
+diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
+index 975da8e7f2a9f8..c0662a022f59c1 100644
+--- a/drivers/gpu/drm/i915/i915_hwmon.c
++++ b/drivers/gpu/drm/i915/i915_hwmon.c
+@@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
+ struct intel_uncore *uncore = ddat->uncore;
+ intel_wakeref_t wakeref;
+
+- mutex_lock(&hwmon->hwmon_lock);
++ with_intel_runtime_pm(uncore->rpm, wakeref) {
++ mutex_lock(&hwmon->hwmon_lock);
+
+- with_intel_runtime_pm(uncore->rpm, wakeref)
+ intel_uncore_rmw(uncore, reg, clear, set);
+
+- mutex_unlock(&hwmon->hwmon_lock);
++ mutex_unlock(&hwmon->hwmon_lock);
++ }
+ }
+
+ /*
+@@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
+ else
+ rgaddr = hwmon->rg.energy_status_all;
+
+- mutex_lock(&hwmon->hwmon_lock);
++ with_intel_runtime_pm(uncore->rpm, wakeref) {
++ mutex_lock(&hwmon->hwmon_lock);
+
+- with_intel_runtime_pm(uncore->rpm, wakeref)
+ reg_val = intel_uncore_read(uncore, rgaddr);
+
+- if (reg_val >= ei->reg_val_prev)
+- ei->accum_energy += reg_val - ei->reg_val_prev;
+- else
+- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+- ei->reg_val_prev = reg_val;
++ if (reg_val >= ei->reg_val_prev)
++ ei->accum_energy += reg_val - ei->reg_val_prev;
++ else
++ ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
++ ei->reg_val_prev = reg_val;
+
+- *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
+- hwmon->scl_shift_energy);
+- mutex_unlock(&hwmon->hwmon_lock);
++ *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
++ hwmon->scl_shift_energy);
++ mutex_unlock(&hwmon->hwmon_lock);
++ }
+ }
+
+ static ssize_t
+@@ -175,7 +177,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
+ * tau4 = (4 | x) << y
+ * but add 2 when doing the final right shift to account for units
+ */
+- tau4 = ((1 << x_w) | x) << y;
++ tau4 = (u64)((1 << x_w) | x) << y;
+ /* val in hwmon interface units (millisec) */
+ out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+@@ -211,7 +213,7 @@ hwm_power1_max_interval_store(struct device *dev,
+ r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
+ x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
+ y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
+- tau4 = ((1 << x_w) | x) << y;
++ tau4 = (u64)((1 << x_w) | x) << y;
+ max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+ if (val > max_win)
+@@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
+
+ /* Block waiting for GuC reset to complete when needed */
+ for (;;) {
++ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ mutex_lock(&hwmon->hwmon_lock);
+
+ prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
+@@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
+ }
+
+ mutex_unlock(&hwmon->hwmon_lock);
++ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
+
+ schedule();
+ }
+ finish_wait(&ddat->waitq, &wait);
+ if (ret)
+- goto unlock;
+-
+- wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
++ goto exit;
+
+ /* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
+ if (val == PL1_DISABLE) {
+@@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
+ intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
+ exit:
+- intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
+-unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
++ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
+ return ret;
+ }
+
+@@ -792,7 +793,7 @@ void i915_hwmon_register(struct drm_i915_private *i915)
+ if (!IS_DGFX(i915))
+ return;
+
+- hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
++ hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return;
+
+@@ -818,14 +819,12 @@ void i915_hwmon_register(struct drm_i915_private *i915)
+ hwm_get_preregistration_info(i915);
+
+ /* hwmon_dev points to device hwmon<i> */
+- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
+- ddat,
+- &hwm_chip_info,
+- hwm_groups);
+- if (IS_ERR(hwmon_dev)) {
+- i915->hwmon = NULL;
+- return;
+- }
++ hwmon_dev = hwmon_device_register_with_info(dev, ddat->name,
++ ddat,
++ &hwm_chip_info,
++ hwm_groups);
++ if (IS_ERR(hwmon_dev))
++ goto err;
+
+ ddat->hwmon_dev = hwmon_dev;
+
+@@ -838,16 +837,36 @@ void i915_hwmon_register(struct drm_i915_private *i915)
+ if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
+ continue;
+
+- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name,
+- ddat_gt,
+- &hwm_gt_chip_info,
+- NULL);
++ hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name,
++ ddat_gt,
++ &hwm_gt_chip_info,
++ NULL);
+ if (!IS_ERR(hwmon_dev))
+ ddat_gt->hwmon_dev = hwmon_dev;
+ }
++ return;
++err:
++ i915_hwmon_unregister(i915);
+ }
+
+ void i915_hwmon_unregister(struct drm_i915_private *i915)
+ {
+- fetch_and_zero(&i915->hwmon);
++ struct i915_hwmon *hwmon = i915->hwmon;
++ struct intel_gt *gt;
++ int i;
++
++ if (!hwmon)
++ return;
++
++ for_each_gt(gt, i915, i)
++ if (hwmon->ddat_gt[i].hwmon_dev)
++ hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev);
++
++ if (hwmon->ddat.hwmon_dev)
++ hwmon_device_unregister(hwmon->ddat.hwmon_dev);
++
++ mutex_destroy(&hwmon->hwmon_lock);
++
++ kfree(i915->hwmon);
++ i915->hwmon = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 59e1e21df27104..0808b54d3c5185 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -785,10 +785,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
+ * The reason field includes flags identifying what
+ * triggered this specific report (mostly timer
+ * triggered or e.g. due to a context switch).
+- *
+- * In MMIO triggered reports, some platforms do not set the
+- * reason bit in this field and it is valid to have a reason
+- * field of zero.
+ */
+ reason = oa_report_reason(stream, report);
+ ctx_id = oa_context_id(stream, report32);
+@@ -800,8 +796,41 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
+ *
+ * Note: that we don't clear the valid_ctx_bit so userspace can
+ * understand that the ID has been squashed by the kernel.
++ *
++ * Update:
++ *
++ * On XEHP platforms the behavior of context id valid bit has
++ * changed compared to prior platforms. To describe this, we
++ * define a few terms:
++ *
++ * context-switch-report: This is a report with the reason type
++ * being context-switch. It is generated when a context switches
++ * out.
++ *
++ * context-valid-bit: A bit that is set in the report ID field
++ * to indicate that a valid context has been loaded.
++ *
++ * gpu-idle: A condition characterized by a
++ * context-switch-report with context-valid-bit set to 0.
++ *
++ * On prior platforms, context-id-valid bit is set to 0 only
++ * when GPU goes idle. In all other reports, it is set to 1.
++ *
++ * On XEHP platforms, context-valid-bit is set to 1 in a context
++ * switch report if a new context switched in. For all other
++ * reports it is set to 0.
++ *
++ * This change in behavior causes an issue with MMIO triggered
++ * reports. MMIO triggered reports have the markers in the
++ * context ID field and the context-valid-bit is 0. The logic
++ * below to squash the context ID would render the report
++ * useless since the user will not be able to find it in the OA
++ * buffer. Since MMIO triggered reports exist only on XEHP,
++ * we should avoid squashing these for XEHP platforms.
+ */
+- if (oa_report_ctx_invalid(stream, report)) {
++
++ if (oa_report_ctx_invalid(stream, report) &&
++ GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) {
+ ctx_id = INVALID_CTX_ID;
+ oa_context_id_squash(stream, report32);
+ }
+@@ -2752,26 +2781,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
+ return 0;
+ }
+
+-static int
+-gen12_configure_all_contexts(struct i915_perf_stream *stream,
+- const struct i915_oa_config *oa_config,
+- struct i915_active *active)
+-{
+- struct flex regs[] = {
+- {
+- GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
+- CTX_R_PWR_CLK_STATE,
+- },
+- };
+-
+- if (stream->engine->class != RENDER_CLASS)
+- return 0;
+-
+- return oa_configure_all_contexts(stream,
+- regs, ARRAY_SIZE(regs),
+- active);
+-}
+-
+ static int
+ lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+@@ -2878,7 +2887,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
+ {
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_uncore *uncore = stream->uncore;
+- struct i915_oa_config *oa_config = stream->oa_config;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ u32 sqcnt1;
+@@ -2922,15 +2930,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
+
+ intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
+
+- /*
+- * Update all contexts prior writing the mux configurations as we need
+- * to make sure all slices/subslices are ON before writing to NOA
+- * registers.
+- */
+- ret = gen12_configure_all_contexts(stream, oa_config, active);
+- if (ret)
+- return ret;
+-
+ /*
+ * For Gen12, performance counters are context
+ * saved/restored. Only enable it for the context that
+@@ -2985,9 +2984,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
+ _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
+ }
+
+- /* Reset all contexts' slices/subslices configurations. */
+- gen12_configure_all_contexts(stream, NULL, NULL);
+-
+ /* disable the context save/restore or OAR counters */
+ if (stream->ctx)
+ gen12_configure_oar_context(stream, NULL);
+@@ -3226,11 +3222,10 @@ get_sseu_config(struct intel_sseu *out_sseu,
+ */
+ u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
+ {
+- /*
+- * Wa_18013179988:dg2
+- * Wa_14015846243:mtl
+- */
+- if (IS_DG2(i915) || IS_METEORLAKE(i915)) {
++ struct intel_gt *gt = to_gt(i915);
++
++ /* Wa_18013179988 */
++ if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+@@ -4286,11 +4281,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ u32 known_open_flags;
+ int ret;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
+ I915_PERF_FLAG_FD_NONBLOCK |
+@@ -4538,7 +4530,7 @@ static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
+
+ static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
+ {
+- if (IS_METEORLAKE(perf->i915))
++ if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
+ return reg_in_range_table(addr, mtl_oa_mux_regs);
+ else
+ return reg_in_range_table(addr, gen12_oa_mux_regs);
+@@ -4666,11 +4658,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct i915_oa_reg *regs;
+ int err, id;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ if (!perf->metrics_kobj) {
+ drm_dbg(&perf->i915->drm,
+@@ -4832,11 +4821,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct i915_oa_config *oa_config;
+ int ret;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
+ drm_dbg(&perf->i915->drm,
+diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
+index 8a9aad523eec2c..1d4cc91c0e40d5 100644
+--- a/drivers/gpu/drm/i915/i915_sw_fence.c
++++ b/drivers/gpu/drm/i915/i915_sw_fence.c
+@@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
+ debug_object_init(fence, &i915_sw_fence_debug_descr);
+ }
+
+-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
+ {
+ debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
+ }
+@@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+ debug_object_destroy(fence, &i915_sw_fence_debug_descr);
+ }
+
+-static inline void debug_fence_free(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
+ {
+ debug_object_free(fence, &i915_sw_fence_debug_descr);
+ smp_wmb(); /* flush the change in state before reallocation */
+@@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
+ {
+ }
+
+-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
+ {
+ }
+
+@@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+ {
+ }
+
+-static inline void debug_fence_free(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
+ {
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
+index 6f180ee138531d..46e4a45e3c72ae 100644
+--- a/drivers/gpu/drm/i915/i915_vma.c
++++ b/drivers/gpu/drm/i915/i915_vma.c
+@@ -33,6 +33,7 @@
+ #include "gt/intel_engine.h"
+ #include "gt/intel_engine_heartbeat.h"
+ #include "gt/intel_gt.h"
++#include "gt/intel_gt_pm.h"
+ #include "gt/intel_gt_requests.h"
+ #include "gt/intel_tlb.h"
+
+@@ -102,12 +103,34 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
+
+ static int __i915_vma_active(struct i915_active *ref)
+ {
+- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
++ struct i915_vma *vma = active_to_vma(ref);
++
++ if (!i915_vma_tryget(vma))
++ return -ENOENT;
++
++ /*
++ * Exclude global GTT VMA from holding a GT wakeref
++ * while active, otherwise GPU never goes idle.
++ */
++ if (!i915_vma_is_ggtt(vma))
++ intel_gt_pm_get(vma->vm->gt);
++
++ return 0;
+ }
+
+ static void __i915_vma_retire(struct i915_active *ref)
+ {
+- i915_vma_put(active_to_vma(ref));
++ struct i915_vma *vma = active_to_vma(ref);
++
++ if (!i915_vma_is_ggtt(vma)) {
++ /*
++ * Since we can be called from atomic contexts,
++ * use an async variant of intel_gt_pm_put().
++ */
++ intel_gt_pm_put_async(vma->vm->gt);
++ }
++
++ i915_vma_put(vma);
+ }
+
+ static struct i915_vma *
+@@ -1403,7 +1426,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ struct i915_vma_work *work = NULL;
+ struct dma_fence *moving = NULL;
+ struct i915_vma_resource *vma_res = NULL;
+- intel_wakeref_t wakeref = 0;
++ intel_wakeref_t wakeref;
+ unsigned int bound;
+ int err;
+
+@@ -1423,8 +1446,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ if (err)
+ return err;
+
+- if (flags & PIN_GLOBAL)
+- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
++ /*
++ * In case of a global GTT, we must hold a runtime-pm wakeref
++ * while global PTEs are updated. In other cases, we hold
++ * the rpm reference while the VMA is active. Since runtime
++ * resume may require allocations, which are forbidden inside
++ * vm->mutex, get the first rpm wakeref outside of the mutex.
++ */
++ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+
+ if (flags & vma->vm->bind_async_flags) {
+ /* lock VM */
+@@ -1560,8 +1589,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ if (work)
+ dma_fence_work_commit_imm(&work->base);
+ err_rpm:
+- if (wakeref)
+- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
++ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+
+ if (moving)
+ dma_fence_put(moving);
+diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
+index 81a4d32734e946..c66eb6abd4a2ee 100644
+--- a/drivers/gpu/drm/i915/intel_clock_gating.c
++++ b/drivers/gpu/drm/i915/intel_clock_gating.c
+@@ -396,14 +396,6 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
+ /* Wa_22010954014:dg2 */
+ intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
+ SGSI_SIDECLK_DIS);
+-
+- /*
+- * Wa_14010733611:dg2_g10
+- * Wa_22010146351:dg2_g10
+- */
+- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
+- intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
+- SGR_DIS | SGGI_DIS);
+ }
+
+ static void pvc_init_clock_gating(struct drm_i915_private *i915)
+diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
+index 4ddc6d902752af..7d41874a49c589 100644
+--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
++++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
+@@ -37,8 +37,9 @@ int igt_live_test_begin(struct igt_live_test *t,
+ }
+
+ for_each_engine(engine, gt, id)
+- t->reset_engine[id] =
+- i915_reset_engine_count(&i915->gpu_error, engine);
++ t->reset_engine[i][id] =
++ i915_reset_engine_count(&i915->gpu_error,
++ engine);
+ }
+
+ t->reset_global = i915_reset_count(&i915->gpu_error);
+@@ -66,14 +67,14 @@ int igt_live_test_end(struct igt_live_test *t)
+
+ for_each_gt(gt, i915, i) {
+ for_each_engine(engine, gt, id) {
+- if (t->reset_engine[id] ==
++ if (t->reset_engine[i][id] ==
+ i915_reset_engine_count(&i915->gpu_error, engine))
+ continue;
+
+ gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n",
+ t->func, t->name, engine->name,
+ i915_reset_engine_count(&i915->gpu_error, engine) -
+- t->reset_engine[id]);
++ t->reset_engine[i][id]);
+ return -EIO;
+ }
+ }
+diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
+index 36ed42736c5216..83e3ad430922fe 100644
+--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
++++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
+@@ -7,6 +7,7 @@
+ #ifndef IGT_LIVE_TEST_H
+ #define IGT_LIVE_TEST_H
+
++#include "gt/intel_gt_defines.h" /* for I915_MAX_GT */
+ #include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
+
+ struct drm_i915_private;
+@@ -17,7 +18,7 @@ struct igt_live_test {
+ const char *name;
+
+ unsigned int reset_global;
+- unsigned int reset_engine[I915_NUM_ENGINES];
++ unsigned int reset_engine[I915_MAX_GT][I915_NUM_ENGINES];
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+index 0fa0b590830b66..c62df2557dc65e 100644
+--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
++++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
+ int ret;
+
+ if (!mode)
+- return -EINVAL;
++ return 0;
+
+ ret = of_get_drm_display_mode(np, &imxpd->mode,
+ &imxpd->bus_flags,
+ OF_USE_NATIVE_MODE);
+ if (ret) {
+ drm_mode_destroy(connector->dev, mode);
+- return ret;
++ return 0;
+ }
+
+ drm_mode_copy(mode, &imxpd->mode);
+diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+index 22b65f4a0e3034..4beb3b4bd6942c 100644
+--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
++++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+@@ -342,21 +342,12 @@ static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers =
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+ };
+
+-static void imx_lcdc_release(struct drm_device *drm)
+-{
+- struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm);
+-
+- drm_kms_helper_poll_fini(drm);
+- kfree(lcdc);
+-}
+-
+ DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops);
+
+ static struct drm_driver imx_lcdc_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &imx_lcdc_drm_fops,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
+- .release = imx_lcdc_release,
+ .name = "imx-lcdc",
+ .desc = "i.MX LCDC driver",
+ .date = "20200716",
+diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
+index fbc43f243c54d2..6d000504e1a4ee 100644
+--- a/drivers/gpu/drm/lima/lima_bcast.c
++++ b/drivers/gpu/drm/lima/lima_bcast.c
+@@ -43,6 +43,18 @@ void lima_bcast_suspend(struct lima_ip *ip)
+
+ }
+
++int lima_bcast_mask_irq(struct lima_ip *ip)
++{
++ bcast_write(LIMA_BCAST_BROADCAST_MASK, 0);
++ bcast_write(LIMA_BCAST_INTERRUPT_MASK, 0);
++ return 0;
++}
++
++int lima_bcast_reset(struct lima_ip *ip)
++{
++ return lima_bcast_hw_init(ip);
++}
++
+ int lima_bcast_init(struct lima_ip *ip)
+ {
+ int i;
+diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
+index 465ee587bceb2f..cd08841e47879c 100644
+--- a/drivers/gpu/drm/lima/lima_bcast.h
++++ b/drivers/gpu/drm/lima/lima_bcast.h
+@@ -13,4 +13,7 @@ void lima_bcast_fini(struct lima_ip *ip);
+
+ void lima_bcast_enable(struct lima_device *dev, int num_pp);
+
++int lima_bcast_mask_irq(struct lima_ip *ip);
++int lima_bcast_reset(struct lima_ip *ip);
++
+ #endif
+diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
+index 10fd9154cc4653..8c9b656eeb59d2 100644
+--- a/drivers/gpu/drm/lima/lima_drv.c
++++ b/drivers/gpu/drm/lima/lima_drv.c
+@@ -486,3 +486,4 @@ module_platform_driver(lima_platform_driver);
+ MODULE_AUTHOR("Lima Project Developers");
+ MODULE_DESCRIPTION("Lima DRM Driver");
+ MODULE_LICENSE("GPL v2");
++MODULE_SOFTDEP("pre: governor_simpleondemand");
+diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
+index 4f9736e5f929be..7ea244d876ca63 100644
+--- a/drivers/gpu/drm/lima/lima_gem.c
++++ b/drivers/gpu/drm/lima/lima_gem.c
+@@ -75,29 +75,34 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+ } else {
+ bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+ if (!bo->base.sgt) {
+- sg_free_table(&sgt);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto err_out0;
+ }
+ }
+
+ ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
+- if (ret) {
+- sg_free_table(&sgt);
+- kfree(bo->base.sgt);
+- bo->base.sgt = NULL;
+- return ret;
+- }
++ if (ret)
++ goto err_out1;
+
+ *bo->base.sgt = sgt;
+
+ if (vm) {
+ ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
+ if (ret)
+- return ret;
++ goto err_out2;
+ }
+
+ bo->heap_size = new_size;
+ return 0;
++
++err_out2:
++ dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
++err_out1:
++ kfree(bo->base.sgt);
++ bo->base.sgt = NULL;
++err_out0:
++ sg_free_table(&sgt);
++ return ret;
+ }
+
+ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index 8dd501b7a3d0d8..82071835ec9ed2 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -166,6 +166,11 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
+ gp_write(LIMA_GP_CMD, cmd);
+ }
+
++static int lima_gp_bus_stop_poll(struct lima_ip *ip)
++{
++ return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED);
++}
++
+ static int lima_gp_hard_reset_poll(struct lima_ip *ip)
+ {
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
+@@ -179,6 +184,13 @@ static int lima_gp_hard_reset(struct lima_ip *ip)
+
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ gp_write(LIMA_GP_INT_MASK, 0);
++
++ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS);
++ ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100);
++ if (ret) {
++ dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
++ return ret;
++ }
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
+ ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
+ if (ret) {
+@@ -212,6 +224,13 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
+ lima_sched_pipe_task_done(pipe);
+ }
+
++static void lima_gp_task_mask_irq(struct lima_sched_pipe *pipe)
++{
++ struct lima_ip *ip = pipe->processor[0];
++
++ gp_write(LIMA_GP_INT_MASK, 0);
++}
++
+ static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
+ {
+ struct lima_ip *ip = pipe->processor[0];
+@@ -317,7 +336,9 @@ int lima_gp_init(struct lima_ip *ip)
+
+ void lima_gp_fini(struct lima_ip *ip)
+ {
++ struct lima_device *dev = ip->dev;
+
++ devm_free_irq(dev->dev, ip->irq, ip);
+ }
+
+ int lima_gp_pipe_init(struct lima_device *dev)
+@@ -344,6 +365,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
+ pipe->task_error = lima_gp_task_error;
+ pipe->task_mmu_error = lima_gp_task_mmu_error;
+ pipe->task_recover = lima_gp_task_recover;
++ pipe->task_mask_irq = lima_gp_task_mask_irq;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
+index a1ae6c252dc2b5..8ca7047adbaca9 100644
+--- a/drivers/gpu/drm/lima/lima_mmu.c
++++ b/drivers/gpu/drm/lima/lima_mmu.c
+@@ -118,7 +118,12 @@ int lima_mmu_init(struct lima_ip *ip)
+
+ void lima_mmu_fini(struct lima_ip *ip)
+ {
++ struct lima_device *dev = ip->dev;
++
++ if (ip->id == lima_ip_ppmmu_bcast)
++ return;
+
++ devm_free_irq(dev->dev, ip->irq, ip);
+ }
+
+ void lima_mmu_flush_tlb(struct lima_ip *ip)
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
+index a5c95bed08c09c..d34c9e8840f454 100644
+--- a/drivers/gpu/drm/lima/lima_pp.c
++++ b/drivers/gpu/drm/lima/lima_pp.c
+@@ -266,7 +266,9 @@ int lima_pp_init(struct lima_ip *ip)
+
+ void lima_pp_fini(struct lima_ip *ip)
+ {
++ struct lima_device *dev = ip->dev;
+
++ devm_free_irq(dev->dev, ip->irq, ip);
+ }
+
+ int lima_pp_bcast_resume(struct lima_ip *ip)
+@@ -299,7 +301,9 @@ int lima_pp_bcast_init(struct lima_ip *ip)
+
+ void lima_pp_bcast_fini(struct lima_ip *ip)
+ {
++ struct lima_device *dev = ip->dev;
+
++ devm_free_irq(dev->dev, ip->irq, ip);
+ }
+
+ static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+@@ -408,6 +412,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe)
+
+ lima_pp_hard_reset(ip);
+ }
++
++ if (pipe->bcast_processor)
++ lima_bcast_reset(pipe->bcast_processor);
+ }
+
+ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+@@ -416,6 +423,20 @@ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+ lima_sched_pipe_task_done(pipe);
+ }
+
++static void lima_pp_task_mask_irq(struct lima_sched_pipe *pipe)
++{
++ int i;
++
++ for (i = 0; i < pipe->num_processor; i++) {
++ struct lima_ip *ip = pipe->processor[i];
++
++ pp_write(LIMA_PP_INT_MASK, 0);
++ }
++
++ if (pipe->bcast_processor)
++ lima_bcast_mask_irq(pipe->bcast_processor);
++}
++
+ static struct kmem_cache *lima_pp_task_slab;
+ static int lima_pp_task_slab_refcnt;
+
+@@ -447,6 +468,7 @@ int lima_pp_pipe_init(struct lima_device *dev)
+ pipe->task_fini = lima_pp_task_fini;
+ pipe->task_error = lima_pp_task_error;
+ pipe->task_mmu_error = lima_pp_task_mmu_error;
++ pipe->task_mask_irq = lima_pp_task_mask_irq;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
+index ffd91a5ee29901..1114bffe38c837 100644
+--- a/drivers/gpu/drm/lima/lima_sched.c
++++ b/drivers/gpu/drm/lima/lima_sched.c
+@@ -402,6 +402,13 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
+ struct lima_sched_task *task = to_lima_task(job);
+ struct lima_device *ldev = pipe->ldev;
+
++ /*
++ * The task might still finish while this timeout handler runs.
++ * To prevent a race condition on its completion, mask all irqs
++ * on the running core until the next hard reset completes.
++ */
++ pipe->task_mask_irq(pipe);
++
+ if (!pipe->error)
+ DRM_ERROR("lima job timeout\n");
+
+diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
+index 6a11764d87b389..edf205be436998 100644
+--- a/drivers/gpu/drm/lima/lima_sched.h
++++ b/drivers/gpu/drm/lima/lima_sched.h
+@@ -80,6 +80,7 @@ struct lima_sched_pipe {
+ void (*task_error)(struct lima_sched_pipe *pipe);
+ void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+ int (*task_recover)(struct lima_sched_pipe *pipe);
++ void (*task_mask_irq)(struct lima_sched_pipe *pipe);
+
+ struct work_struct recover_work;
+ };
+diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.c b/drivers/gpu/drm/loongson/lsdc_pixpll.c
+index 04c15b4697e218..2609a2256da4bf 100644
+--- a/drivers/gpu/drm/loongson/lsdc_pixpll.c
++++ b/drivers/gpu/drm/loongson/lsdc_pixpll.c
+@@ -120,12 +120,14 @@ static int lsdc_pixel_pll_setup(struct lsdc_pixpll * const this)
+ struct lsdc_pixpll_parms *pparms;
+
+ this->mmio = ioremap(this->reg_base, this->reg_size);
+- if (IS_ERR_OR_NULL(this->mmio))
++ if (!this->mmio)
+ return -ENOMEM;
+
+ pparms = kzalloc(sizeof(*pparms), GFP_KERNEL);
+- if (IS_ERR_OR_NULL(pparms))
++ if (!pparms) {
++ iounmap(this->mmio);
+ return -ENOMEM;
++ }
+
+ pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+index e525a6b9e5b0bc..22f768d923d5ab 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+@@ -103,7 +103,7 @@ void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt)
+ mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs,
+ DISP_REG_MERGE_CTRL);
+
+- if (priv->async_clk)
++ if (!cmdq_pkt && priv->async_clk)
+ reset_control_reset(priv->reset_ctl);
+ }
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+index 2bffe424546667..6f15069da8b020 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+@@ -38,6 +38,7 @@
+ #define DISP_REG_OVL_PITCH_MSB(n) (0x0040 + 0x20 * (n))
+ #define OVL_PITCH_MSB_2ND_SUBBUF BIT(16)
+ #define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
++#define OVL_CONST_BLEND BIT(28)
+ #define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
+ #define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
+ #define DISP_REG_OVL_ADDR_MT2701 0x0040
+@@ -71,6 +72,8 @@
+ #define OVL_CON_VIRT_FLIP BIT(9)
+ #define OVL_CON_HORZ_FLIP BIT(10)
+
++#define OVL_COLOR_ALPHA GENMASK(31, 24)
++
+ static const u32 mt8173_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+@@ -273,7 +276,13 @@ void mtk_ovl_config(struct device *dev, unsigned int w,
+ if (w != 0 && h != 0)
+ mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_ROI_SIZE);
+- mtk_ddp_write_relaxed(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_BGCLR);
++
++ /*
++ * The background color must be opaque black (ARGB),
++ * otherwise the alpha blending will have no effect
++ */
++ mtk_ddp_write_relaxed(cmdq_pkt, OVL_COLOR_ALPHA, &ovl->cmdq_reg,
++ ovl->regs, DISP_REG_OVL_ROI_BGCLR);
+
+ mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
+ mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
+@@ -407,6 +416,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ unsigned int fmt = pending->format;
+ unsigned int offset = (pending->y << 16) | pending->x;
+ unsigned int src_size = (pending->height << 16) | pending->width;
++ unsigned int ignore_pixel_alpha = 0;
+ unsigned int con;
+ bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
+ union overlay_pitch {
+@@ -428,6 +438,14 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+ if (state->base.fb && state->base.fb->format->has_alpha)
+ con |= OVL_CON_AEN | OVL_CON_ALPHA;
+
++ /* CONST_BLD must be enabled for XRGB formats although the alpha channel
++ * can be ignored, or OVL will still read the value from memory.
++ * For RGB888 related formats, whether CONST_BLD is enabled or not won't
++ * affect the result. Therefore we use !has_alpha as the condition.
++ */
++ if (state->base.fb && !state->base.fb->format->has_alpha)
++ ignore_pixel_alpha = OVL_CONST_BLEND;
++
+ if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ con |= OVL_CON_VIRT_FLIP;
+ addr += (pending->height - 1) * pending->pitch;
+@@ -443,8 +461,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
+
+ mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_CON(idx));
+- mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb, &ovl->cmdq_reg, ovl->regs,
+- DISP_REG_OVL_PITCH(idx));
++ mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha,
++ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_SRC_SIZE(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs,
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+index 6bf6367853fbae..036028b8f5248d 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+@@ -111,7 +111,7 @@ void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx,
+ merge = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MERGE0 + idx];
+ ethdr = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0];
+
+- if (!pending->enable) {
++ if (!pending->enable || !pending->width || !pending->height) {
+ mtk_merge_stop_cmdq(merge, cmdq_pkt);
+ mtk_mdp_rdma_stop(rdma_l, cmdq_pkt);
+ mtk_mdp_rdma_stop(rdma_r, cmdq_pkt);
+@@ -436,8 +436,10 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
+ }
+
+ comp_pdev = of_find_device_by_node(node);
+- if (!comp_pdev)
++ if (!comp_pdev) {
++ of_node_put(node);
+ return -EPROBE_DEFER;
++ }
+
+ priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 2cb47f6637568b..48a4defbc66cc8 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -2027,21 +2027,20 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
+ return ret;
+ }
+
+-static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+- struct drm_connector *connector)
++static const struct drm_edid *mtk_dp_edid_read(struct drm_bridge *bridge,
++ struct drm_connector *connector)
+ {
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ bool enabled = mtk_dp->enabled;
+- struct edid *new_edid = NULL;
++ const struct drm_edid *drm_edid;
+ struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
+- struct cea_sad *sads;
+
+ if (!enabled) {
+ drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
+ mtk_dp_aux_panel_poweron(mtk_dp, true);
+ }
+
+- new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc);
++ drm_edid = drm_edid_read_ddc(connector, &mtk_dp->aux.ddc);
+
+ /*
+ * Parse capability here to let atomic_get_input_bus_fmts and
+@@ -2049,12 +2048,32 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ */
+ if (mtk_dp_parse_capabilities(mtk_dp)) {
+ drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
+- new_edid = NULL;
++ drm_edid_free(drm_edid);
++ drm_edid = NULL;
+ }
+
+- if (new_edid) {
+- audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
+- audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
++ if (drm_edid) {
++ /*
++ * FIXME: get rid of drm_edid_raw()
++ */
++ const struct edid *edid = drm_edid_raw(drm_edid);
++ struct cea_sad *sads;
++ int ret;
++
++ ret = drm_edid_to_sad(edid, &sads);
++ /* Ignore any errors */
++ if (ret < 0)
++ ret = 0;
++ if (ret)
++ kfree(sads);
++ audio_caps->sad_count = ret;
++
++ /*
++ * FIXME: This should use connector->display_info.has_audio from
++ * a path that has read the EDID and called
++ * drm_edid_connector_update().
++ */
++ audio_caps->detect_monitor = drm_detect_monitor_audio(edid);
+ }
+
+ if (!enabled) {
+@@ -2062,7 +2081,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
+ }
+
+- return new_edid;
++ return drm_edid;
+ }
+
+ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+@@ -2076,7 +2095,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
+ !mtk_dp->train_info.cable_plugged_in) {
+- ret = -EAGAIN;
++ ret = -EIO;
+ goto err;
+ }
+
+@@ -2414,7 +2433,7 @@ static const struct drm_bridge_funcs mtk_dp_bridge_funcs = {
+ .atomic_enable = mtk_dp_bridge_atomic_enable,
+ .atomic_disable = mtk_dp_bridge_atomic_disable,
+ .mode_valid = mtk_dp_bridge_mode_valid,
+- .get_edid = mtk_dp_get_edid,
++ .edid_read = mtk_dp_edid_read,
+ .detect = mtk_dp_bdg_detect,
+ };
+
+@@ -2780,3 +2799,4 @@ MODULE_AUTHOR("Markus Schneider-Pargmann <msp@baylibre.com>");
+ MODULE_AUTHOR("Bo-Chen Chen <rex-bc.chen@mediatek.com>");
+ MODULE_DESCRIPTION("MediaTek DisplayPort Driver");
+ MODULE_LICENSE("GPL");
++MODULE_SOFTDEP("pre: phy_mtk_dp");
+diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
+index 2f931e4e2b6009..bc073a6b367e5b 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
+@@ -957,20 +957,6 @@ static const struct mtk_dpi_conf mt8186_conf = {
+ .csc_enable_bit = CSC_ENABLE,
+ };
+
+-static const struct mtk_dpi_conf mt8188_dpintf_conf = {
+- .cal_factor = mt8195_dpintf_calculate_factor,
+- .max_clock_khz = 600000,
+- .output_fmts = mt8195_output_fmts,
+- .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
+- .pixels_per_iter = 4,
+- .input_2pixel = false,
+- .dimension_mask = DPINTF_HPW_MASK,
+- .hvsize_mask = DPINTF_HSIZE_MASK,
+- .channel_swap_shift = DPINTF_CH_SWAP,
+- .yuv422_en_bit = DPINTF_YUV422_EN,
+- .csc_enable_bit = DPINTF_CSC_ENABLE,
+-};
+-
+ static const struct mtk_dpi_conf mt8192_conf = {
+ .cal_factor = mt8183_calculate_factor,
+ .reg_h_fre_con = 0xe0,
+@@ -1094,7 +1080,7 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
+ { .compatible = "mediatek,mt8173-dpi", .data = &mt8173_conf },
+ { .compatible = "mediatek,mt8183-dpi", .data = &mt8183_conf },
+ { .compatible = "mediatek,mt8186-dpi", .data = &mt8186_conf },
+- { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8188_dpintf_conf },
++ { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8195_dpintf_conf },
+ { .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf },
+ { .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf },
+ { /* sentinel */ },
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index b6fa4ad2f94dc0..659112da47b692 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -67,6 +67,8 @@ struct mtk_drm_crtc {
+ /* lock for display hardware access */
+ struct mutex hw_lock;
+ bool config_updating;
++ /* lock for config_updating to cmd buffer */
++ spinlock_t config_lock;
+ };
+
+ struct mtk_crtc_state {
+@@ -93,20 +95,27 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+ struct drm_crtc *crtc = &mtk_crtc->base;
+ unsigned long flags;
+
+- spin_lock_irqsave(&crtc->dev->event_lock, flags);
+- drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
+- drm_crtc_vblank_put(crtc);
+- mtk_crtc->event = NULL;
+- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++ if (mtk_crtc->event) {
++ spin_lock_irqsave(&crtc->dev->event_lock, flags);
++ drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
++ drm_crtc_vblank_put(crtc);
++ mtk_crtc->event = NULL;
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++ }
+ }
+
+ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+ {
++ unsigned long flags;
++
+ drm_crtc_handle_vblank(&mtk_crtc->base);
++
++ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
+ mtk_drm_crtc_finish_page_flip(mtk_crtc);
+ mtk_crtc->pending_needs_vblank = false;
+ }
++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ }
+
+ #if IS_REACHABLE(CONFIG_MTK_CMDQ)
+@@ -289,12 +298,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
+ struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
+ struct mtk_crtc_state *state;
+ unsigned int i;
++ unsigned long flags;
+
+ if (data->sta < 0)
+ return;
+
+ state = to_mtk_crtc_state(mtk_crtc->base.state);
+
++ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
++ if (mtk_crtc->config_updating) {
++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
++ goto ddp_cmdq_cb_out;
++ }
++
+ state->pending_config = false;
+
+ if (mtk_crtc->pending_planes) {
+@@ -321,6 +337,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
+ mtk_crtc->pending_async_planes = false;
+ }
+
++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
++
++ddp_cmdq_cb_out:
++
+ mtk_crtc->cmdq_vblank_cnt = 0;
+ wake_up(&mtk_crtc->cb_blocking_queue);
+ }
+@@ -408,6 +428,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+ unsigned int local_layer;
+
+ plane_state = to_mtk_plane_state(plane->state);
++
++ /* should not enable layer before crtc enabled */
++ plane_state->pending.enable = false;
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ if (comp)
+ mtk_ddp_comp_layer_config(comp, local_layer,
+@@ -427,6 +450,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
+ {
+ struct drm_device *drm = mtk_crtc->base.dev;
+ struct drm_crtc *crtc = &mtk_crtc->base;
++ unsigned long flags;
+ int i;
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+@@ -458,10 +482,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
+ pm_runtime_put(drm->dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+- spin_lock_irq(&crtc->dev->event_lock);
++ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+- spin_unlock_irq(&crtc->dev->event_lock);
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+ }
+
+@@ -550,9 +574,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
+ unsigned int pending_planes = 0, pending_async_planes = 0;
+ int i;
++ unsigned long flags;
+
+ mutex_lock(&mtk_crtc->hw_lock);
++
++ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ mtk_crtc->config_updating = true;
++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
++
+ if (needs_vblank)
+ mtk_crtc->pending_needs_vblank = true;
+
+@@ -606,7 +635,10 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
+ mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
+ }
+ #endif
++ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ mtk_crtc->config_updating = false;
++ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
++
+ mutex_unlock(&mtk_crtc->hw_lock);
+ }
+
+@@ -744,6 +776,7 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ crtc);
+ struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
++ unsigned long flags;
+
+ if (mtk_crtc->event && mtk_crtc_state->base.event)
+ DRM_ERROR("new event while there is still a pending event\n");
+@@ -751,7 +784,11 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ if (mtk_crtc_state->base.event) {
+ mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
++
++ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ mtk_crtc->event = mtk_crtc_state->base.event;
++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
++
+ mtk_crtc_state->base.event = NULL;
+ }
+ }
+@@ -877,7 +914,14 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
+
+ struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc)
+ {
+- struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
++ struct mtk_drm_crtc *mtk_crtc = NULL;
++
++ if (!crtc)
++ return NULL;
++
++ mtk_crtc = to_mtk_crtc(crtc);
++ if (!mtk_crtc)
++ return NULL;
+
+ return mtk_crtc->dma_dev;
+ }
+@@ -997,6 +1041,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
+ drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
+ mutex_init(&mtk_crtc->hw_lock);
++ spin_lock_init(&mtk_crtc->config_lock);
+
+ #if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ i = priv->mbox_index++;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+index 771f4e1733539c..66ccde966e3c10 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+@@ -553,7 +553,7 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
+ int ret;
+ #endif
+
+- if (comp_id < 0 || comp_id >= DDP_COMPONENT_DRM_ID_MAX)
++ if (comp_id >= DDP_COMPONENT_DRM_ID_MAX)
+ return -EINVAL;
+
+ type = mtk_ddp_matches[comp_id].type;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 93552d76b6e778..ffe016d6cbcfe0 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -288,6 +288,7 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
+ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
+ .main_path = mt8188_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8188_mtk_ddp_main),
++ .mmsys_dev_num = 1,
+ };
+
+ static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
+@@ -420,6 +421,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ struct mtk_drm_private *private = drm->dev_private;
+ struct mtk_drm_private *priv_n;
+ struct device *dma_dev = NULL;
++ struct drm_crtc *crtc;
+ int ret, i, j;
+
+ if (drm_firmware_drivers_only())
+@@ -494,7 +496,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ }
+
+ /* Use OVL device for all DMA memory allocations */
+- dma_dev = mtk_drm_crtc_dma_dev_get(drm_crtc_from_index(drm, 0));
++ crtc = drm_crtc_from_index(drm, 0);
++ if (crtc)
++ dma_dev = mtk_drm_crtc_dma_dev_get(crtc);
+ if (!dma_dev) {
+ ret = -ENODEV;
+ dev_err(drm->dev, "Need at least one OVL device\n");
+@@ -715,6 +719,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
+ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8192-disp-ovl",
+ .data = (void *)MTK_DISP_OVL },
++ { .compatible = "mediatek,mt8195-disp-ovl",
++ .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8183-disp-ovl-2l",
+ .data = (void *)MTK_DISP_OVL_2L },
+ { .compatible = "mediatek,mt8192-disp-ovl-2l",
+@@ -922,6 +928,13 @@ static void mtk_drm_remove(struct platform_device *pdev)
+ of_node_put(private->comp_node[i]);
+ }
+
++static void mtk_drm_shutdown(struct platform_device *pdev)
++{
++ struct mtk_drm_private *private = platform_get_drvdata(pdev);
++
++ drm_atomic_helper_shutdown(private->drm);
++}
++
+ static int mtk_drm_sys_prepare(struct device *dev)
+ {
+ struct mtk_drm_private *private = dev_get_drvdata(dev);
+@@ -953,6 +966,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
+ static struct platform_driver mtk_drm_platform_driver = {
+ .probe = mtk_drm_probe,
+ .remove_new = mtk_drm_remove,
++ .shutdown = mtk_drm_shutdown,
+ .driver = {
+ .name = "mediatek-drm",
+ .pm = &mtk_drm_pm_ops,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 0e0a41b2f57f05..1bf229615b0188 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -38,6 +38,9 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
+
+ size = round_up(size, PAGE_SIZE);
+
++ if (size == 0)
++ return ERR_PTR(-EINVAL);
++
+ mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
+ if (!mtk_gem_obj)
+ return ERR_PTR(-ENOMEM);
+@@ -121,7 +124,14 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ int ret;
+
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+- args->size = args->pitch * args->height;
++
++ /*
++ * Multiply 2 variables of different types,
++ * for example: args->size = args->spacing * args->height;
++ * may cause coverity issue with unintentional overflow.
++ */
++ args->size = args->pitch;
++ args->size *= args->height;
+
+ mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+ if (IS_ERR(mtk_gem))
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+index db2f70ae060d6f..f10d4cc6c2234f 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+@@ -141,6 +141,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ dma_addr_t addr;
+ dma_addr_t hdr_addr = 0;
+ unsigned int hdr_pitch = 0;
++ int offset;
+
+ gem = fb->obj[0];
+ mtk_gem = to_mtk_gem_obj(gem);
+@@ -150,8 +151,15 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ modifier = fb->modifier;
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+- addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+- addr += (new_state->src.y1 >> 16) * pitch;
++ /*
++ * Using dma_addr_t variable to calculate with multiplier of different types,
++ * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
++ * may cause coverity issue with unintentional overflow.
++ */
++ offset = (new_state->src.x1 >> 16) * fb->format->cpp[0];
++ addr += offset;
++ offset = (new_state->src.y1 >> 16) * pitch;
++ addr += offset;
+ } else {
+ int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH)
+ / AFBC_DATA_BLOCK_WIDTH;
+@@ -159,21 +167,34 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+ / AFBC_DATA_BLOCK_HEIGHT;
+ int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH;
+ int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT;
+- int hdr_size;
++ int hdr_size, hdr_offset;
+
+ hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE;
+ pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH *
+ AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0];
+
+ hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT);
++ hdr_offset = hdr_pitch * y_offset_in_blocks +
++ AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
++
++ /*
++ * Using dma_addr_t variable to calculate with multiplier of different types,
++ * for example: addr += hdr_pitch * y_offset_in_blocks;
++ * may cause coverity issue with unintentional overflow.
++ */
++ hdr_addr = addr + hdr_offset;
+
+- hdr_addr = addr + hdr_pitch * y_offset_in_blocks +
+- AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
+ /* The data plane is offset by 1 additional block. */
+- addr = addr + hdr_size +
+- pitch * y_offset_in_blocks +
+- AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
+- fb->format->cpp[0] * (x_offset_in_blocks + 1);
++ offset = pitch * y_offset_in_blocks +
++ AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
++ fb->format->cpp[0] * (x_offset_in_blocks + 1);
++
++ /*
++ * Using dma_addr_t variable to calculate with multiplier of different types,
++ * for example: addr += pitch * y_offset_in_blocks;
++ * may cause coverity issue with unintentional overflow.
++ */
++ addr = addr + hdr_size + offset;
+ }
+
+ mtk_plane_state->pending.enable = true;
+@@ -206,9 +227,11 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+- swap(plane->state->fb, new_state->fb);
++ plane->state->dst.x1 = new_state->dst.x1;
++ plane->state->dst.y1 = new_state->dst.y1;
+
+ mtk_plane_update_new_state(new_state, new_plane_state);
++ swap(plane->state->fb, new_state->fb);
+ wmb(); /* Make sure the above parameters are set before update */
+ new_plane_state->pending.async_dirty = true;
+ mtk_drm_crtc_async_update(new_state->crtc, plane, state);
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index d8bfc2cce54dc6..0d96264ec5c6da 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -71,8 +71,8 @@
+ #define DSI_PS_WC 0x3fff
+ #define DSI_PS_SEL (3 << 16)
+ #define PACKED_PS_16BIT_RGB565 (0 << 16)
+-#define LOOSELY_PS_18BIT_RGB666 (1 << 16)
+-#define PACKED_PS_18BIT_RGB666 (2 << 16)
++#define PACKED_PS_18BIT_RGB666 (1 << 16)
++#define LOOSELY_PS_24BIT_RGB666 (2 << 16)
+ #define PACKED_PS_24BIT_RGB888 (3 << 16)
+
+ #define DSI_VSA_NL 0x20
+@@ -367,10 +367,10 @@ static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
+ ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+- ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
++ ps_bpp_mode |= LOOSELY_PS_24BIT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+- ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
++ ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
+@@ -407,7 +407,7 @@ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
+ if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ tmp_reg |= HSTX_CKLP_EN;
+
+- if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
++ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ tmp_reg |= DIS_EOT;
+
+ writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
+@@ -424,7 +424,7 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
+ dsi_tmp_buf_bpp = 3;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+- tmp_reg = LOOSELY_PS_18BIT_RGB666;
++ tmp_reg = LOOSELY_PS_24BIT_RGB666;
+ dsi_tmp_buf_bpp = 3;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+@@ -484,7 +484,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+ timing->da_hs_zero + timing->da_hs_exit + 3;
+
+ delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
+- delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0;
++ delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2;
+
+ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
+ horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
+diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
+index db7ac666ec5e11..0cf8c889941565 100644
+--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
++++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
+@@ -50,7 +50,6 @@
+
+ #define MIXER_INX_MODE_BYPASS 0
+ #define MIXER_INX_MODE_EVEN_EXTEND 1
+-#define DEFAULT_9BIT_ALPHA 0x100
+ #define MIXER_ALPHA_AEN BIT(8)
+ #define MIXER_ALPHA 0xff
+ #define ETHDR_CLK_NUM 13
+@@ -154,13 +153,19 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
+ unsigned int offset = (pending->x & 1) << 31 | pending->y << 16 | pending->x;
+ unsigned int align_width = ALIGN_DOWN(pending->width, 2);
+ unsigned int alpha_con = 0;
++ bool replace_src_a = false;
+
+ dev_dbg(dev, "%s+ idx:%d", __func__, idx);
+
+ if (idx >= 4)
+ return;
+
+- if (!pending->enable) {
++ if (!pending->enable || !pending->width || !pending->height) {
++ /*
++ * instead of disabling layer with MIX_SRC_CON directly
++ * set the size to 0 to avoid screen shift due to mixer
++ * mode switch (hardware behavior)
++ */
+ mtk_ddp_write(cmdq_pkt, 0, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_SIZE(idx));
+ return;
+ }
+@@ -168,8 +173,16 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
+ if (state->base.fb && state->base.fb->format->has_alpha)
+ alpha_con = MIXER_ALPHA_AEN | MIXER_ALPHA;
+
+- mtk_mmsys_mixer_in_config(priv->mmsys_dev, idx + 1, alpha_con ? false : true,
+- DEFAULT_9BIT_ALPHA,
++ if (state->base.fb && !state->base.fb->format->has_alpha) {
++ /*
++ * Mixer doesn't support CONST_BLD mode,
++ * use a trick to make the output equivalent
++ */
++ replace_src_a = true;
++ }
++
++ mtk_mmsys_mixer_in_config(priv->mmsys_dev, idx + 1, replace_src_a,
++ MIXER_ALPHA,
+ pending->x & 1 ? MIXER_INX_MODE_EVEN_EXTEND :
+ MIXER_INX_MODE_BYPASS, align_width / 2 - 1, cmdq_pkt);
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+index c3adaeefd551a2..c7233d0ac210f1 100644
+--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
++++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+@@ -246,8 +246,7 @@ int mtk_mdp_rdma_clk_enable(struct device *dev)
+ {
+ struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
+
+- clk_prepare_enable(rdma->clk);
+- return 0;
++ return clk_prepare_enable(rdma->clk);
+ }
+
+ void mtk_mdp_rdma_clk_disable(struct device *dev)
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index cb674966e9aca7..095f634ff7c799 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -250,29 +250,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ if (ret)
+ goto free_drm;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+- if (ret) {
+- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+- goto free_drm;
+- }
++ if (ret)
++ goto free_canvas_osd1;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+- if (ret) {
+- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+- goto free_drm;
+- }
++ if (ret)
++ goto free_canvas_vd1_0;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+- if (ret) {
+- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+- goto free_drm;
+- }
++ if (ret)
++ goto free_canvas_vd1_1;
+
+ priv->vsync_irq = platform_get_irq(pdev, 0);
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret)
+- goto free_drm;
++ goto free_canvas_vd1_2;
+
+ /* Assign limits per soc revision/package */
+ for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
+@@ -288,11 +279,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ */
+ ret = drm_aperture_remove_framebuffers(&meson_driver);
+ if (ret)
+- goto free_drm;
++ goto free_canvas_vd1_2;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+- goto free_drm;
++ goto free_canvas_vd1_2;
+ drm->mode_config.max_width = 3840;
+ drm->mode_config.max_height = 2160;
+ drm->mode_config.funcs = &meson_mode_config_funcs;
+@@ -307,7 +298,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ if (priv->afbcd.ops) {
+ ret = priv->afbcd.ops->init(priv);
+ if (ret)
+- goto free_drm;
++ goto free_canvas_vd1_2;
+ }
+
+ /* Encoder Initialization */
+@@ -371,6 +362,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ exit_afbcd:
+ if (priv->afbcd.ops)
+ priv->afbcd.ops->exit(priv);
++free_canvas_vd1_2:
++ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
++free_canvas_vd1_1:
++ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
++free_canvas_vd1_0:
++ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
++free_canvas_osd1:
++ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ free_drm:
+ drm_dev_put(drm);
+
+diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
+index 5a9538bc0e26f8..5565f7777529f8 100644
+--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
++++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
+@@ -106,6 +106,8 @@
+ #define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */
+ #define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */
+ #define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
++#define PHY_CNTL1_INIT 0x03900000
++#define PHY_INVERT BIT(17)
+ #define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
+ #define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
+ #define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */
+@@ -130,6 +132,8 @@ struct meson_dw_hdmi_data {
+ unsigned int addr);
+ void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data);
++ u32 cntl0_init;
++ u32 cntl1_init;
+ };
+
+ struct meson_dw_hdmi {
+@@ -384,26 +388,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
+ dw_hdmi_bus_fmt_is_420(hdmi))
+ mode_is_420 = true;
+
+- /* Enable clocks */
+- regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
+-
+- /* Bring HDMITX MEM output of power down */
+- regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
+-
+- /* Bring out of reset */
+- dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+-
+- /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
+- dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
+- 0x3, 0x3);
+-
+- /* Enable cec_clk and hdcp22_tmdsclk_en */
+- dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
+- 0x3 << 4, 0x3 << 4);
+-
+- /* Enable normal output to PHY */
+- dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
+-
+ /* TMDS pattern setup */
+ if (mode->clock > 340000 && !mode_is_420) {
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+@@ -425,20 +409,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
+ /* Setup PHY parameters */
+ meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420);
+
+- /* Setup PHY */
+- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+- 0xffff << 16, 0x0390 << 16);
+-
+- /* BIT_INVERT */
+- if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
+- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+- BIT(17), 0);
+- else
+- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
+- BIT(17), BIT(17));
+-
+ /* Disable clock, fifo, fifo_wr */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0);
+
+@@ -492,7 +462,9 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi,
+
+ DRM_DEBUG_DRIVER("\n");
+
+- regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
++ /* Fallback to init mode */
++ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, dw_hdmi->data->cntl1_init);
++ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, dw_hdmi->data->cntl0_init);
+ }
+
+ static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
+@@ -610,11 +582,22 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
+ .fast_io = true,
+ };
+
+-static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = {
++static const struct meson_dw_hdmi_data meson_dw_hdmi_gxbb_data = {
+ .top_read = dw_hdmi_top_read,
+ .top_write = dw_hdmi_top_write,
+ .dwc_read = dw_hdmi_dwc_read,
+ .dwc_write = dw_hdmi_dwc_write,
++ .cntl0_init = 0x0,
++ .cntl1_init = PHY_CNTL1_INIT | PHY_INVERT,
++};
++
++static const struct meson_dw_hdmi_data meson_dw_hdmi_gxl_data = {
++ .top_read = dw_hdmi_top_read,
++ .top_write = dw_hdmi_top_write,
++ .dwc_read = dw_hdmi_dwc_read,
++ .dwc_write = dw_hdmi_dwc_write,
++ .cntl0_init = 0x0,
++ .cntl1_init = PHY_CNTL1_INIT,
+ };
+
+ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
+@@ -622,6 +605,8 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
+ .top_write = dw_hdmi_g12a_top_write,
+ .dwc_read = dw_hdmi_g12a_dwc_read,
+ .dwc_write = dw_hdmi_g12a_dwc_write,
++ .cntl0_init = 0x000b4242, /* Bandgap */
++ .cntl1_init = PHY_CNTL1_INIT,
+ };
+
+ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
+@@ -656,6 +641,13 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_CLK_CNTL, 0xff);
+
++ /* Enable normal output to PHY */
++ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
++
++ /* Setup PHY */
++ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, meson_dw_hdmi->data->cntl1_init);
++ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, meson_dw_hdmi->data->cntl0_init);
++
+ /* Enable HDMI-TX Interrupt */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
+@@ -865,11 +857,11 @@ static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
+
+ static const struct of_device_id meson_dw_hdmi_of_table[] = {
+ { .compatible = "amlogic,meson-gxbb-dw-hdmi",
+- .data = &meson_dw_hdmi_gx_data },
++ .data = &meson_dw_hdmi_gxbb_data },
+ { .compatible = "amlogic,meson-gxl-dw-hdmi",
+- .data = &meson_dw_hdmi_gx_data },
++ .data = &meson_dw_hdmi_gxl_data },
+ { .compatible = "amlogic,meson-gxm-dw-hdmi",
+- .data = &meson_dw_hdmi_gx_data },
++ .data = &meson_dw_hdmi_gxl_data },
+ { .compatible = "amlogic,meson-g12a-dw-hdmi",
+ .data = &meson_dw_hdmi_g12a_data },
+ { }
+diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+index e5fe4e994f43b1..72abe2057ec31e 100644
+--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
++++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+@@ -95,6 +95,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
+ return ret;
+ }
+
++ clk_disable_unprepare(mipi_dsi->px_clk);
+ ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->mode->clock * 1000);
+
+ if (ret) {
+@@ -103,6 +104,12 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
+ return ret;
+ }
+
++ ret = clk_prepare_enable(mipi_dsi->px_clk);
++ if (ret) {
++ dev_err(mipi_dsi->dev, "Failed to enable DSI Pixel clock (ret %d)\n", ret);
++ return ret;
++ }
++
+ switch (mipi_dsi->dsi_device->format) {
+ case MIPI_DSI_FMT_RGB888:
+ dpi_data_format = DPI_COLOR_24BIT;
+diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+index 3f73b211fa8e3e..3407450435e205 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
++++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
+ if (priv->encoders[MESON_ENC_CVBS]) {
+ meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
+ drm_bridge_remove(&meson_encoder_cvbs->bridge);
+- drm_bridge_remove(meson_encoder_cvbs->next_bridge);
+ }
+ }
+diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
+index 3f93c70488cad1..311b91630fbe53 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
++++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
+@@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
+ if (priv->encoders[MESON_ENC_DSI]) {
+ meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
+ drm_bridge_remove(&meson_encoder_dsi->bridge);
+- drm_bridge_remove(meson_encoder_dsi->next_bridge);
+ }
+ }
+diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+index 25ea765586908f..c4686568c9ca5d 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
++++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+@@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
+ if (priv->encoders[MESON_ENC_HDMI]) {
+ meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
+ drm_bridge_remove(&meson_encoder_hdmi->bridge);
+- drm_bridge_remove(meson_encoder_hdmi->next_bridge);
+ }
+ }
+diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
+index 815dfe30492b6c..b43ac61201f312 100644
+--- a/drivers/gpu/drm/meson/meson_plane.c
++++ b/drivers/gpu/drm/meson/meson_plane.c
+@@ -534,6 +534,7 @@ int meson_plane_create(struct meson_drm *priv)
+ struct meson_plane *meson_plane;
+ struct drm_plane *plane;
+ const uint64_t *format_modifiers = format_modifiers_default;
++ int ret;
+
+ meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
+ GFP_KERNEL);
+@@ -548,12 +549,16 @@ int meson_plane_create(struct meson_drm *priv)
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ format_modifiers = format_modifiers_afbc_g12a;
+
+- drm_universal_plane_init(priv->drm, plane, 0xFF,
+- &meson_plane_funcs,
+- supported_drm_formats,
+- ARRAY_SIZE(supported_drm_formats),
+- format_modifiers,
+- DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
++ ret = drm_universal_plane_init(priv->drm, plane, 0xFF,
++ &meson_plane_funcs,
++ supported_drm_formats,
++ ARRAY_SIZE(supported_drm_formats),
++ format_modifiers,
++ DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
++ if (ret) {
++ devm_kfree(priv->drm->dev, meson_plane);
++ return ret;
++ }
+
+ drm_plane_helper_add(plane, &meson_plane_helper_funcs);
+
+diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
+index 2a82119eb58ed8..2a942dc6a6dc23 100644
+--- a/drivers/gpu/drm/meson/meson_vclk.c
++++ b/drivers/gpu/drm/meson/meson_vclk.c
+@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ FREQ_1000_1001(params[i].pixel_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ i, params[i].phy_freq,
+- FREQ_1000_1001(params[i].phy_freq/10)*10);
++ FREQ_1000_1001(params[i].phy_freq/1000)*1000);
+ /* Match strict frequency */
+ if (phy_freq == params[i].phy_freq &&
+ vclk_freq == params[i].vclk_freq)
+ return MODE_OK;
+ /* Match 1000/1001 variant */
+- if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
++ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
+ vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
+ return MODE_OK;
+ }
+@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+
+ for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+ if ((phy_freq == params[freq].phy_freq ||
+- phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
++ phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
+ (vclk_freq == params[freq].vclk_freq ||
+ vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (vclk_freq != params[freq].vclk_freq)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
+index abddf37f0ea119..2fb18b782b0536 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
+@@ -10,6 +10,7 @@
+ #include <linux/pci.h>
+
+ #include <drm/drm_aperture.h>
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
+ #include <drm/drm_file.h>
+@@ -278,6 +279,12 @@ static void mgag200_pci_remove(struct pci_dev *pdev)
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(dev);
++ drm_atomic_helper_shutdown(dev);
++}
++
++static void mgag200_pci_shutdown(struct pci_dev *pdev)
++{
++ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+ }
+
+ static struct pci_driver mgag200_pci_driver = {
+@@ -285,6 +292,7 @@ static struct pci_driver mgag200_pci_driver = {
+ .id_table = mgag200_pciidlist,
+ .probe = mgag200_pci_probe,
+ .remove = mgag200_pci_remove,
++ .shutdown = mgag200_pci_shutdown,
+ };
+
+ drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset);
+diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
+index 57c7edcab6029a..765e49fd891112 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
+@@ -392,6 +392,11 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ .destroy = drm_plane_cleanup, \
+ DRM_GEM_SHADOW_PLANE_FUNCS
+
++void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
++void mgag200_crtc_set_gamma(struct mga_device *mdev,
++ const struct drm_format_info *format,
++ struct drm_color_lut *lut);
++
+ enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
+index bce267e0f7de3c..8d4538b7104776 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
++++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
+@@ -202,6 +202,11 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+
+ mgag200_g200er_reset_tagfifo(mdev);
+
++ if (crtc_state->gamma_lut)
++ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
++ else
++ mgag200_crtc_set_gamma_linear(mdev, format);
++
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+index ac957f42abe182..56e6f986bff311 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
++++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+@@ -203,6 +203,11 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+
+ mgag200_g200ev_set_hiprilvl(mdev);
+
++ if (crtc_state->gamma_lut)
++ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
++ else
++ mgag200_crtc_set_gamma_linear(mdev, format);
++
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
+index bd6e573c9a1a31..ff2b3c6622e7aa 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
++++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
+@@ -334,6 +334,11 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+
+ mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
+
++ if (crtc_state->gamma_lut)
++ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
++ else
++ mgag200_crtc_set_gamma_linear(mdev, format);
++
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
+index 0c48bdf3e7f800..f5c5d06d0d4bb7 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
++++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
+@@ -31,6 +31,8 @@
+ #include <linux/i2c.h>
+ #include <linux/pci.h>
+
++#include <drm/drm_managed.h>
++
+ #include "mgag200_drv.h"
+
+ static int mga_i2c_read_gpio(struct mga_device *mdev)
+@@ -86,7 +88,7 @@ static int mga_gpio_getscl(void *data)
+ return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+ }
+
+-static void mgag200_i2c_release(void *res)
++static void mgag200_i2c_release(struct drm_device *dev, void *res)
+ {
+ struct mga_i2c_chan *i2c = res;
+
+@@ -115,7 +117,7 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 10;
+- i2c->bit.timeout = 2;
++ i2c->bit.timeout = usecs_to_jiffies(2200);
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = mga_gpio_setsda;
+ i2c->bit.setscl = mga_gpio_setscl;
+@@ -126,5 +128,5 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
+ if (ret)
+ return ret;
+
+- return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c);
++ return drmm_add_action_or_reset(dev, mgag200_i2c_release, i2c);
+ }
+diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
+index af3ce5a6a636ac..0f0d59938c3a07 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
+@@ -28,8 +28,8 @@
+ * This file contains setup code for the CRTC.
+ */
+
+-static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
+- const struct drm_format_info *format)
++void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
++ const struct drm_format_info *format)
+ {
+ int i;
+
+@@ -65,9 +65,9 @@ static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
+ }
+ }
+
+-static void mgag200_crtc_set_gamma(struct mga_device *mdev,
+- const struct drm_format_info *format,
+- struct drm_color_lut *lut)
++void mgag200_crtc_set_gamma(struct mga_device *mdev,
++ const struct drm_format_info *format,
++ struct drm_color_lut *lut)
+ {
+ int i;
+
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index e5916c10679679..8c2758a18a19cf 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+
+ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ {
++ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
++ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = submit->ring;
+ struct drm_gem_object *obj;
+ uint32_t *ptr, dwords;
+@@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
+ }
+ }
+
++ a5xx_gpu->last_seqno[ring->id] = submit->seqno;
+ a5xx_flush(gpu, ring, true);
+ a5xx_preempt_trigger(gpu);
+
+@@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+- /* Enable local preemption for finegrain preemption */
++ /*
++ * Disable local preemption by default because it requires
++ * user-space to be aware of it and provide additional handling
++ * to restore rendering state or do various flushes on switch.
++ */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+- OUT_RING(ring, 0x1);
++ OUT_RING(ring, 0x0);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+@@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
++ a5xx_gpu->last_seqno[ring->id] = submit->seqno;
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+index c7187bcc5e9082..9c0d701fe4b85b 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+@@ -34,8 +34,10 @@ struct a5xx_gpu {
+ struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
++ uint32_t last_seqno[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
++ spinlock_t preempt_start_lock;
+ struct timer_list preempt_timer;
+
+ struct drm_gem_object *shadow_bo;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+index f58dd564d122ba..0469fea5501083 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+@@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+ /* Return the highest priority ringbuffer with something in it */
+ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+ {
++ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
++ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ int i;
+
+@@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
++ if (!empty && ring == a5xx_gpu->cur_ring)
++ empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i];
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ if (!empty)
+@@ -97,12 +101,19 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
+ if (gpu->nr_rings == 1)
+ return;
+
++ /*
++ * Serialize preemption start to ensure that we always make
++ * decision on latest state. Otherwise we can get stuck in
++ * lower priority or empty ring.
++ */
++ spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags);
++
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+- return;
++ goto out;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+@@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
+ set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+- return;
++ goto out;
+ }
+
++ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
++
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+@@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
++ return;
++
++out:
++ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
+ }
+
+ void a5xx_preempt_irq(struct msm_gpu *gpu)
+@@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
++
++ /*
++ * Try to trigger preemption again in case there was a submit or
++ * retire during ring switch
++ */
++ a5xx_preempt_trigger(gpu);
+ }
+
+ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+@@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
++ a5xx_gpu->preempt[i]->data = 0;
++ a5xx_gpu->preempt[i]->info = 0;
+ a5xx_gpu->preempt[i]->wptr = 0;
+ a5xx_gpu->preempt[i]->rptr = 0;
+ a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
+@@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
+ }
+ }
+
++ spin_lock_init(&a5xx_gpu->preempt_start_lock);
+ timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index d4e85e24002fb7..3664c1476a83ad 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -2237,7 +2237,7 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i
+ DRM_DEV_ERROR(dev,
+ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
+ speedbin);
+- return UINT_MAX;
++ supp_hw = BIT(0); /* Default */
+ }
+
+ ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+@@ -2343,7 +2343,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+
+ ret = a6xx_set_supported_hw(&pdev->dev, config->info);
+ if (ret) {
+- a6xx_destroy(&(a6xx_gpu->base.base));
++ a6xx_llc_slices_destroy(a6xx_gpu);
++ kfree(a6xx_gpu);
+ return ERR_PTR(ret);
+ }
+
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index 575e7c56219ff1..b7b527e21dac8a 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -331,7 +331,7 @@ static const struct adreno_info gpulist[] = {
+ ),
+ }, {
+ .machine = "qcom,sm6375",
+- .chip_ids = ADRENO_CHIP_IDS(0x06010900),
++ .chip_ids = ADRENO_CHIP_IDS(0x06010901),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 619,
+ .fw = {
+@@ -462,7 +462,7 @@ static const struct adreno_info gpulist[] = {
+ { 190, 1 },
+ ),
+ }, {
+- .chip_ids = ADRENO_CHIP_IDS(0x06080000),
++ .chip_ids = ADRENO_CHIP_IDS(0x06080001),
+ .family = ADRENO_6XX_GEN2,
+ .revn = 680,
+ .fw = {
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 8090dde0328082..a2df8bd7aa940f 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -99,7 +99,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ * was a bad idea, and is only provided for backwards
+ * compatibility for older targets.
+ */
+- return -ENODEV;
++ return -ENOENT;
+ }
+
+ if (IS_ERR(fw)) {
+@@ -468,7 +468,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
+ ret = request_firmware_direct(&fw, fwname, drm->dev);
+ if (!ret) {
+ DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
+- newname);
++ fwname);
+ adreno_gpu->fwloc = FW_LOCATION_LEGACY;
+ goto out;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+@@ -1071,6 +1071,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ adreno_gpu->chip_id = config->chip_id;
+
+ gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
++ gpu->pdev = pdev;
+
+ /* Only handle the core clock when GMU is not in use (or is absent). */
+ if (adreno_has_gmu_wrapper(adreno_gpu) ||
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index 99acaf917e4304..f0c3804f425879 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_0,
++ .sblk = &sm8150_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+@@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_1,
++ .sblk = &sm8150_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+@@ -93,7 +93,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_2,
++ .sblk = &sm8150_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+@@ -101,7 +101,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_3,
++ .sblk = &sm8150_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index f3de21025ca734..47de71e71e3108 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -76,7 +76,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_0,
++ .sblk = &sm8150_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+@@ -84,7 +84,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_1,
++ .sblk = &sm8150_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+@@ -92,7 +92,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_2,
++ .sblk = &sm8150_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+@@ -100,7 +100,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_3,
++ .sblk = &sm8150_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+@@ -377,6 +377,7 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
++ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+index 5f9b437b82a689..ee781037ada93e 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+@@ -32,7 +32,7 @@ static const struct dpu_mdp_cfg sm8250_mdp = {
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
++ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ },
+ };
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+index d030c08636b4c3..69d3f7e5e095b0 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+@@ -25,7 +25,7 @@ static const struct dpu_mdp_cfg sc7180_mdp = {
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
++ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ },
+ };
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+index f8d16f9bf528d8..428bcbcfbf1925 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+@@ -31,6 +31,7 @@ static const struct dpu_mdp_cfg sm8350_mdp = {
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
++ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+ };
+@@ -304,6 +305,21 @@ static const struct dpu_dsc_cfg sm8350_dsc[] = {
+ },
+ };
+
++static const struct dpu_wb_cfg sm8350_wb[] = {
++ {
++ .name = "wb_2", .id = WB_2,
++ .base = 0x65000, .len = 0x2c8,
++ .features = WB_SM8250_MASK,
++ .format_list = wb2_formats,
++ .num_formats = ARRAY_SIZE(wb2_formats),
++ .clk_ctrl = DPU_CLK_CTRL_WB2,
++ .xin_id = 6,
++ .vbif_idx = VBIF_RT,
++ .maxlinewidth = 4096,
++ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
++ },
++};
++
+ static const struct dpu_intf_cfg sm8350_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+@@ -401,6 +417,8 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = {
+ .dsc = sm8350_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8350_merge_3d),
+ .merge_3d = sm8350_merge_3d,
++ .wb_count = ARRAY_SIZE(sm8350_wb),
++ .wb = sm8350_wb,
+ .intf_count = ARRAY_SIZE(sm8350_intf),
+ .intf = sm8350_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+index 3b5061c4402a67..9195cb996f444b 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+@@ -25,7 +25,7 @@ static const struct dpu_mdp_cfg sc7280_mdp = {
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
++ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ },
+ };
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+index 58f5e25679b153..ff9adb8000acdb 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+@@ -419,6 +419,7 @@ static const struct dpu_perf_cfg sc8280xp_perf_data = {
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
++ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc8180x_qos_linear),
+ .entries = sc8180x_qos_linear
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+index 1b12178dfbcab7..72a1726371cae2 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+@@ -32,6 +32,7 @@ static const struct dpu_mdp_cfg sm8450_mdp = {
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
++ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+ };
+@@ -76,7 +77,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+- .sblk = &sm8250_vig_sblk_0,
++ .sblk = &sm8450_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+@@ -84,7 +85,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+- .sblk = &sm8250_vig_sblk_1,
++ .sblk = &sm8450_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+@@ -92,7 +93,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+- .sblk = &sm8250_vig_sblk_2,
++ .sblk = &sm8450_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+@@ -100,7 +101,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+- .sblk = &sm8250_vig_sblk_3,
++ .sblk = &sm8450_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+@@ -326,6 +327,21 @@ static const struct dpu_dsc_cfg sm8450_dsc[] = {
+ },
+ };
+
++static const struct dpu_wb_cfg sm8450_wb[] = {
++ {
++ .name = "wb_2", .id = WB_2,
++ .base = 0x65000, .len = 0x2c8,
++ .features = WB_SM8250_MASK,
++ .format_list = wb2_formats,
++ .num_formats = ARRAY_SIZE(wb2_formats),
++ .clk_ctrl = DPU_CLK_CTRL_WB2,
++ .xin_id = 6,
++ .vbif_idx = VBIF_RT,
++ .maxlinewidth = 4096,
++ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
++ },
++};
++
+ static const struct dpu_intf_cfg sm8450_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+@@ -423,6 +439,8 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = {
+ .dsc = sm8450_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8450_merge_3d),
+ .merge_3d = sm8450_merge_3d,
++ .wb_count = ARRAY_SIZE(sm8450_wb),
++ .wb = sm8450_wb,
+ .intf_count = ARRAY_SIZE(sm8450_intf),
+ .intf = sm8450_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+index b5b6e7031fb9e9..ba06312cbb163e 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+@@ -53,7 +53,7 @@ u32 dpu_core_irq_read(
+ int dpu_core_irq_register_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+- void (*irq_cb)(void *arg, int irq_idx),
++ void (*irq_cb)(void *arg),
+ void *irq_arg);
+
+ /**
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+index ef871239adb2a3..68fae048a9a837 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+@@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, entry,
+ (u32 *)&perf->enable_bw_release);
+- debugfs_create_u32("threshold_low", 0600, entry,
++ debugfs_create_u32("threshold_low", 0400, entry,
+ (u32 *)&perf->perf_cfg->max_bw_low);
+- debugfs_create_u32("threshold_high", 0600, entry,
++ debugfs_create_u32("threshold_high", 0400, entry,
+ (u32 *)&perf->perf_cfg->max_bw_high);
+- debugfs_create_u32("min_core_ib", 0600, entry,
++ debugfs_create_u32("min_core_ib", 0400, entry,
+ (u32 *)&perf->perf_cfg->min_core_ib);
+- debugfs_create_u32("min_llcc_ib", 0600, entry,
++ debugfs_create_u32("min_llcc_ib", 0400, entry,
+ (u32 *)&perf->perf_cfg->min_llcc_ib);
+- debugfs_create_u32("min_dram_ib", 0600, entry,
++ debugfs_create_u32("min_dram_ib", 0400, entry,
+ (u32 *)&perf->perf_cfg->min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, entry,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index 8ce7586e2ddf74..e238e4e8116caf 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+@@ -125,7 +125,7 @@ static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
+ continue;
+
+ /* Calculate MISR over 1 frame */
+- m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
++ m->hw_lm->ops.setup_misr(m->hw_lm);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index d34e684a417890..6262ec5e40204c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2,7 +2,7 @@
+ /*
+ * Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+@@ -39,6 +39,9 @@
+ #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
++#define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
++ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
++
+ /*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+@@ -121,6 +124,8 @@ enum dpu_enc_rc_states {
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enabled: True if the encoder is active, protected by enc_lock
++ * @commit_done_timedout: True if there has been a timeout on commit after
++ * enabling the encoder.
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+@@ -169,6 +174,7 @@ struct dpu_encoder_virt {
+ spinlock_t enc_spinlock;
+
+ bool enabled;
++ bool commit_done_timedout;
+
+ unsigned int num_phys_encs;
+ struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+@@ -223,6 +229,13 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
+ return dpu_enc->wide_bus_en;
+ }
+
++bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
++{
++ const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
++
++ return dpu_enc->dsc ? true : false;
++}
++
+ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
+ {
+ struct dpu_encoder_virt *dpu_enc;
+@@ -255,7 +268,7 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
+ if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
+ continue;
+
+- phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1);
++ phys->hw_intf->ops.setup_misr(phys->hw_intf);
+ }
+ }
+
+@@ -347,8 +360,8 @@ static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+ u32 irq_idx, struct dpu_encoder_wait_info *info);
+
+ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+- int irq,
+- void (*func)(void *arg, int irq_idx),
++ int irq_idx,
++ void (*func)(void *arg),
+ struct dpu_encoder_wait_info *wait_info)
+ {
+ u32 irq_status;
+@@ -362,54 +375,54 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+- DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
++ DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
+ DRMID(phys_enc->parent), func,
+- irq);
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return -EWOULDBLOCK;
+ }
+
+- if (irq < 0) {
++ if (irq_idx < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
+ DRMID(phys_enc->parent), func);
+ return 0;
+ }
+
+- DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
++ DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+- irq, phys_enc->hw_pp->idx - PINGPONG_0,
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+
+ ret = dpu_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+- irq,
++ irq_idx,
+ wait_info);
+
+ if (ret <= 0) {
+- irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
++ irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx);
+ if (irq_status) {
+ unsigned long flags;
+
+- DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
++ DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
+ DRMID(phys_enc->parent), func,
+- irq,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ local_irq_save(flags);
+- func(phys_enc, irq);
++ func(phys_enc);
+ local_irq_restore(flags);
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+- DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
++ DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
+ DRMID(phys_enc->parent), func,
+- irq,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+ } else {
+ ret = 0;
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+- func, irq,
++ func, irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+@@ -1106,8 +1119,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
+
+ cstate->num_mixers = num_lm;
+
+- dpu_enc->connector = conn_state->connector;
+-
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+@@ -1200,6 +1211,11 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
+ dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
++
++ dpu_enc->commit_done_timedout = false;
++
++ dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
++
+ cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+@@ -1255,7 +1271,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
+ trace_dpu_enc_disable(DRMID(drm_enc));
+
+ /* wait for idle */
+- dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
++ dpu_encoder_wait_for_tx_complete(drm_enc);
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+@@ -1662,8 +1678,7 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+ phys = dpu_enc->phys_encs[i];
+
+ ctl = phys->hw_ctl;
+- if (ctl->ops.clear_pending_flush)
+- ctl->ops.clear_pending_flush(ctl);
++ ctl->ops.clear_pending_flush(ctl);
+
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+@@ -1845,7 +1860,9 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
+ dsc_common_mode = 0;
+ pic_width = dsc->pic_width;
+
+- dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
++ dsc_common_mode = DSC_MODE_SPLIT_PANEL;
++ if (dpu_encoder_use_dsc_merge(enc_master->parent))
++ dsc_common_mode |= DSC_MODE_MULTIPLEX;
+ if (enc_master->intf_mode == INTF_MODE_VIDEO)
+ dsc_common_mode |= DSC_MODE_VIDEO;
+
+@@ -2060,7 +2077,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+ }
+
+ /* reset the merge 3D HW block */
+- if (phys_enc->hw_pp->merge_3d) {
++ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ BLEND_3D_NONE);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
+@@ -2082,7 +2099,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
+- if (phys_enc->hw_pp->merge_3d)
++ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ if (ctl->ops.reset_intf_cfg)
+@@ -2161,6 +2178,7 @@ static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+ }
+
+ static int dpu_encoder_virt_add_phys_encs(
++ struct drm_device *dev,
+ struct msm_display_info *disp_info,
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_enc_phys_init_params *params)
+@@ -2182,7 +2200,7 @@ static int dpu_encoder_virt_add_phys_encs(
+
+
+ if (disp_info->intf_type == INTF_WB) {
+- enc = dpu_encoder_phys_wb_init(params);
++ enc = dpu_encoder_phys_wb_init(dev, params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
+@@ -2193,7 +2211,7 @@ static int dpu_encoder_virt_add_phys_encs(
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ } else if (disp_info->is_cmd_mode) {
+- enc = dpu_encoder_phys_cmd_init(params);
++ enc = dpu_encoder_phys_cmd_init(dev, params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+@@ -2204,7 +2222,7 @@ static int dpu_encoder_virt_add_phys_encs(
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ } else {
+- enc = dpu_encoder_phys_vid_init(params);
++ enc = dpu_encoder_phys_vid_init(dev, params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+@@ -2293,7 +2311,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+ break;
+ }
+
+- ret = dpu_encoder_virt_add_phys_encs(disp_info,
++ ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
+ dpu_enc, &phys_params);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+@@ -2327,7 +2345,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+ return;
+ }
+
+- DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
++ DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
+
+ event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+@@ -2405,10 +2423,18 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ return ERR_PTR(ret);
+ }
+
+-int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+- enum msm_event_wait event)
++/**
++ * dpu_encoder_wait_for_commit_done() - Wait for encoder to flush pending state
++ * @drm_enc: encoder pointer
++ *
++ * Wait for hardware to have flushed the current pending changes to hardware at
++ * a vblank or CTL_START. Physical encoders will map this differently depending
++ * on the type: vid mode -> vsync_irq, cmd mode -> CTL_START.
++ *
++ * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
++ */
++int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
+ {
+- int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+@@ -2422,26 +2448,51 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+- switch (event) {
+- case MSM_ENC_COMMIT_DONE:
+- fn_wait = phys->ops.wait_for_commit_done;
+- break;
+- case MSM_ENC_TX_COMPLETE:
+- fn_wait = phys->ops.wait_for_tx_complete;
+- break;
+- case MSM_ENC_VBLANK:
+- fn_wait = phys->ops.wait_for_vblank;
+- break;
+- default:
+- DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+- event);
+- return -EINVAL;
++ if (phys->ops.wait_for_commit_done) {
++ DPU_ATRACE_BEGIN("wait_for_commit_done");
++ ret = phys->ops.wait_for_commit_done(phys);
++ DPU_ATRACE_END("wait_for_commit_done");
++ if (ret == -ETIMEDOUT && !dpu_enc->commit_done_timedout) {
++ dpu_enc->commit_done_timedout = true;
++ msm_disp_snapshot_state(drm_enc->dev);
++ }
++ if (ret)
++ return ret;
+ }
++ }
++
++ return ret;
++}
++
++/**
++ * dpu_encoder_wait_for_tx_complete() - Wait for encoder to transfer pixels to panel
++ * @drm_enc: encoder pointer
++ *
++ * Wait for the hardware to transfer all the pixels to the panel. Physical
++ * encoders will map this differently depending on the type: vid mode -> vsync_irq,
++ * cmd mode -> pp_done.
++ *
++ * Return: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
++ */
++int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
++{
++ struct dpu_encoder_virt *dpu_enc = NULL;
++ int i, ret = 0;
++
++ if (!drm_enc) {
++ DPU_ERROR("invalid encoder\n");
++ return -EINVAL;
++ }
++ dpu_enc = to_dpu_encoder_virt(drm_enc);
++ DPU_DEBUG_ENC(dpu_enc, "\n");
++
++ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
++ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+- if (fn_wait) {
+- DPU_ATRACE_BEGIN("wait_for_completion_event");
+- ret = fn_wait(phys);
+- DPU_ATRACE_END("wait_for_completion_event");
++ if (phys->ops.wait_for_tx_complete) {
++ DPU_ATRACE_BEGIN("wait_for_tx_complete");
++ ret = phys->ops.wait_for_tx_complete(phys);
++ DPU_ATRACE_END("wait_for_tx_complete");
+ if (ret)
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+index 4c05fd5e9ed18d..0c928d1876e4ae 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+@@ -93,25 +93,9 @@ void dpu_encoder_kickoff(struct drm_encoder *encoder);
+ */
+ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
+
+-/**
+- * dpu_encoder_wait_for_event - Waits for encoder events
+- * @encoder: encoder pointer
+- * @event: event to wait for
+- * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
+- * frames to hardware at a vblank or ctl_start
+- * Encoders will map this differently depending on the
+- * panel type.
+- * vid mode -> vsync_irq
+- * cmd mode -> ctl_start
+- * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
+- * the panel. Encoders will map this differently
+- * depending on the panel type.
+- * vid mode -> vsync_irq
+- * cmd mode -> pp_done
+- * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+- */
+-int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+- enum msm_event_wait event);
++int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
++
++int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder);
+
+ /*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+@@ -158,6 +142,13 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
+
+ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
+
++/**
++ * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
++ * for the encoder.
++ * @drm_enc: Pointer to previously created drm encoder structure
++ */
++bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc);
++
+ /**
+ * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
+ * in virtual encoder that can collect CRC values
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+index d48558ede488d5..57a3598f2a303c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+@@ -106,7 +106,6 @@ struct dpu_encoder_phys_ops {
+ int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+- int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+@@ -281,22 +280,24 @@ struct dpu_encoder_wait_info {
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+-struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
++struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
+ struct dpu_enc_phys_init_params *p);
+
+ /**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
++ * @dev: Corresponding device for devres management
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
++struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
+ struct dpu_enc_phys_init_params *p);
+
+ /**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
++ * @dev: Corresponding device for devres management
+ * @init: Pointer to init info structure with initialization params
+ */
+-struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
++struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
+ struct dpu_enc_phys_init_params *p);
+
+ /**
+@@ -365,7 +366,7 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ */
+ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ int irq,
+- void (*func)(void *arg, int irq_idx),
++ void (*func)(void *arg),
+ struct dpu_encoder_wait_info *wait_info);
+
+ /**
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+index df88358e7037bf..83a804ebf8d7ef 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+@@ -13,6 +13,8 @@
+ #include "dpu_trace.h"
+ #include "disp/msm_disp_snapshot.h"
+
++#include <drm/drm_managed.h>
++
+ #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+@@ -76,7 +78,7 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
+ phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
+ }
+
+-static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
++static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg)
+ {
+ struct dpu_encoder_phys *phys_enc = arg;
+ unsigned long lock_flags;
+@@ -103,7 +105,7 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+ DPU_ATRACE_END("pp_done_irq");
+ }
+
+-static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
++static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg)
+ {
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+@@ -126,7 +128,7 @@ static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
+ DPU_ATRACE_END("rd_ptr_irq");
+ }
+
+-static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
++static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg)
+ {
+ struct dpu_encoder_phys *phys_enc = arg;
+
+@@ -139,7 +141,7 @@ static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+ DPU_ATRACE_END("ctl_start_irq");
+ }
+
+-static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
++static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
+ {
+ struct dpu_encoder_phys *phys_enc = arg;
+
+@@ -449,9 +451,6 @@ static void dpu_encoder_phys_cmd_enable_helper(
+
+ _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+- if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+- return;
+-
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
+ }
+@@ -567,14 +566,6 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+ }
+
+-static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+-{
+- struct dpu_encoder_phys_cmd *cmd_enc =
+- to_dpu_encoder_phys_cmd(phys_enc);
+-
+- kfree(cmd_enc);
+-}
+-
+ static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+ {
+@@ -690,33 +681,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
+ return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+ }
+
+-static int dpu_encoder_phys_cmd_wait_for_vblank(
+- struct dpu_encoder_phys *phys_enc)
+-{
+- int rc = 0;
+- struct dpu_encoder_phys_cmd *cmd_enc;
+- struct dpu_encoder_wait_info wait_info;
+-
+- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+-
+- /* only required for master controller */
+- if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+- return rc;
+-
+- wait_info.wq = &cmd_enc->pending_vblank_wq;
+- wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+- wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+-
+- atomic_inc(&cmd_enc->pending_vblank_cnt);
+-
+- rc = dpu_encoder_helper_wait_for_irq(phys_enc,
+- phys_enc->irq[INTR_IDX_RDPTR],
+- dpu_encoder_phys_cmd_te_rd_ptr_irq,
+- &wait_info);
+-
+- return rc;
+-}
+-
+ static void dpu_encoder_phys_cmd_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+ {
+@@ -740,12 +704,10 @@ static void dpu_encoder_phys_cmd_init_ops(
+ ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_cmd_enable;
+ ops->disable = dpu_encoder_phys_cmd_disable;
+- ops->destroy = dpu_encoder_phys_cmd_destroy;
+ ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+ ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+- ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+ ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+ ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+ ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+@@ -755,7 +717,7 @@ static void dpu_encoder_phys_cmd_init_ops(
+ ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+ }
+
+-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
++struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
+ struct dpu_enc_phys_init_params *p)
+ {
+ struct dpu_encoder_phys *phys_enc = NULL;
+@@ -763,7 +725,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+
+ DPU_DEBUG("intf\n");
+
+- cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
++ cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ DPU_ERROR("failed to allocate\n");
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+index c2189e58de6af2..daaf0e60475380 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+@@ -11,6 +11,8 @@
+ #include "dpu_trace.h"
+ #include "disp/msm_disp_snapshot.h"
+
++#include <drm/drm_managed.h>
++
+ #define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
+@@ -100,6 +102,7 @@ static void drm_mode_to_intf_timing_params(
+ }
+
+ timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
++ timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
+
+ /*
+ * for DP, divide the horizonal parameters by 2 when
+@@ -257,12 +260,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
++ mode.hskew >>= 1;
+
+ DPU_DEBUG_VIDENC(phys_enc,
+- "split_role %d, halve horizontal %d %d %d %d\n",
++ "split_role %d, halve horizontal %d %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+- mode.hsync_start, mode.hsync_end);
++ mode.hsync_start, mode.hsync_end,
++ mode.hskew);
+ }
+
+ drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
+@@ -297,7 +302,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
+ programmable_fetch_config(phys_enc, &timing_params);
+ }
+
+-static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
++static void dpu_encoder_phys_vid_vblank_irq(void *arg)
+ {
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_hw_ctl *hw_ctl;
+@@ -334,7 +339,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+ DPU_ATRACE_END("vblank_irq");
+ }
+
+-static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
++static void dpu_encoder_phys_vid_underrun_irq(void *arg)
+ {
+ struct dpu_encoder_phys *phys_enc = arg;
+
+@@ -438,13 +443,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+ phys_enc->enable_state = DPU_ENC_ENABLING;
+ }
+
+-static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+-{
+- DPU_DEBUG_VIDENC(phys_enc, "\n");
+- kfree(phys_enc);
+-}
+-
+-static int dpu_encoder_phys_vid_wait_for_vblank(
++static int dpu_encoder_phys_vid_wait_for_tx_complete(
+ struct dpu_encoder_phys *phys_enc)
+ {
+ struct dpu_encoder_wait_info wait_info;
+@@ -558,7 +557,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+ * scanout buffer) don't latch properly..
+ */
+ if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
++ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+@@ -578,7 +577,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
++ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+@@ -681,11 +680,9 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+ ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_vid_enable;
+ ops->disable = dpu_encoder_phys_vid_disable;
+- ops->destroy = dpu_encoder_phys_vid_destroy;
+ ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
+- ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+- ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
++ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
+ ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+@@ -694,7 +691,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+ ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
+ }
+
+-struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
++struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
+ struct dpu_enc_phys_init_params *p)
+ {
+ struct dpu_encoder_phys *phys_enc = NULL;
+@@ -704,7 +701,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ return ERR_PTR(-EINVAL);
+ }
+
+- phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
++ phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL);
+ if (!phys_enc) {
+ DPU_ERROR("failed to create encoder due to memory allocation error\n");
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+index 78037a697633b6..0a45c546b03f2b 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+@@ -8,6 +8,7 @@
+ #include <linux/debugfs.h>
+
+ #include <drm/drm_framebuffer.h>
++#include <drm/drm_managed.h>
+
+ #include "dpu_encoder_phys.h"
+ #include "dpu_formats.h"
+@@ -345,7 +346,11 @@ static void dpu_encoder_phys_wb_setup(
+
+ }
+
+-static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
++/**
++ * dpu_encoder_phys_wb_done_irq - writeback interrupt handler
++ * @arg: Pointer to writeback encoder
++ */
++static void dpu_encoder_phys_wb_done_irq(void *arg)
+ {
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+@@ -371,16 +376,6 @@ static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ }
+
+-/**
+- * dpu_encoder_phys_wb_done_irq - writeback interrupt handler
+- * @arg: Pointer to writeback encoder
+- * @irq_idx: interrupt index
+- */
+-static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+-{
+- _dpu_encoder_phys_wb_frame_done_helper(arg);
+-}
+-
+ /**
+ * dpu_encoder_phys_wb_irq_ctrl - irq control of WB
+ * @phys: Pointer to physical encoder
+@@ -534,8 +529,7 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
+ }
+
+ /* reset h/w before final flush */
+- if (phys_enc->hw_ctl->ops.clear_pending_flush)
+- phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
++ phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+
+ /*
+ * New CTL reset sequence from 5.0 MDP onwards.
+@@ -553,20 +547,6 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+ }
+
+-/**
+- * dpu_encoder_phys_wb_destroy - destroy writeback encoder
+- * @phys_enc: Pointer to physical encoder
+- */
+-static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
+-{
+- if (!phys_enc)
+- return;
+-
+- DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+-
+- kfree(phys_enc);
+-}
+-
+ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+ {
+@@ -662,7 +642,6 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
+ ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_wb_enable;
+ ops->disable = dpu_encoder_phys_wb_disable;
+- ops->destroy = dpu_encoder_phys_wb_destroy;
+ ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
+ ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
+@@ -678,9 +657,10 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
+
+ /**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
++ * @dev: Corresponding device for devres management
+ * @p: Pointer to init info structure with initialization params
+ */
+-struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
++struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
+ struct dpu_enc_phys_init_params *p)
+ {
+ struct dpu_encoder_phys *phys_enc = NULL;
+@@ -693,7 +673,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ return ERR_PTR(-EINVAL);
+ }
+
+- wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
++ wb_enc = drmm_kzalloc(dev, sizeof(*wb_enc), GFP_KERNEL);
+ if (!wb_enc) {
+ DPU_ERROR("failed to allocate wb phys_enc enc\n");
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 713dfc0797181d..77d09f961d8669 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -250,14 +250,17 @@ static const uint32_t wb2_formats[] = {
+ * SSPP sub blocks config
+ *************************************************************/
+
++#define SSPP_SCALER_VER(maj, min) (((maj) << 16) | (min))
++
+ /* SSPP common configuration */
+-#define _VIG_SBLK(sdma_pri, qseed_ver) \
++#define _VIG_SBLK(sdma_pri, qseed_ver, scaler_ver) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .scaler_blk = {.name = "scaler", \
+ .id = qseed_ver, \
++ .version = scaler_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = "csc", \
+ .id = DPU_SSPP_CSC_10BIT, \
+@@ -269,13 +272,14 @@ static const uint32_t wb2_formats[] = {
+ .rotation_cfg = NULL, \
+ }
+
+-#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \
++#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, scaler_ver, rot_cfg) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .scaler_blk = {.name = "scaler", \
+ .id = qseed_ver, \
++ .version = scaler_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = "csc", \
+ .id = DPU_SSPP_CSC_10BIT, \
+@@ -299,13 +303,17 @@ static const uint32_t wb2_formats[] = {
+ }
+
+ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
+- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 2));
+ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
+- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 2));
+ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
+- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 2));
+ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
+- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 2));
+
+ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
+ .rot_maxheight = 1088,
+@@ -314,13 +322,30 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
+ };
+
+ static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
+- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 3));
+ static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
+- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 3));
+ static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
+- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 3));
+ static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
+- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 3));
++
++static const struct dpu_sspp_sub_blks sm8150_vig_sblk_0 =
++ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 4));
++static const struct dpu_sspp_sub_blks sm8150_vig_sblk_1 =
++ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 4));
++static const struct dpu_sspp_sub_blks sm8150_vig_sblk_2 =
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 4));
++static const struct dpu_sspp_sub_blks sm8150_vig_sblk_3 =
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 4));
+
+ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
+ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
+@@ -328,34 +353,60 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
+ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
+
+ static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
+- _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+
+ static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
+- _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
++ _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0),
++ &dpu_rot_sc7280_cfg_v2);
+
+ static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
+- _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+
+ static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
+- _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE);
++ _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE,
++ SSPP_SCALER_VER(2, 4));
+
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
++
++static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 =
++ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 1));
++static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 =
++ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 1));
++static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 =
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 1));
++static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 =
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 1));
+
+ static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
+- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 2));
+ static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
+- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 2));
+ static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
+- _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 2));
+ static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
+- _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 2));
+ static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
+ static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+index 6c9634209e9fc7..3f82d84bd1c907 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+@@ -269,7 +269,8 @@ enum {
+ /**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+- * @version: qseed block revision
++ * @version: qseed block revision, on QSEED3+ platforms this is the value of
++ * scaler_blk.base + QSEED3_HW_VERSION registers.
+ */
+ struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+index 1c242298ff2ee0..dca87ea78e251c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+@@ -81,7 +81,8 @@ struct dpu_hw_ctl_ops {
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+- * No effect on hardware
++ * No effect on hardware.
++ * Required to be implemented.
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+index e3c50439f80a13..c8d7929ce52323 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+@@ -197,8 +197,18 @@ static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
+ },
+ };
+
+-#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
+-#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
++#define DPU_IRQ_MASK(irq_idx) (BIT(DPU_IRQ_BIT(irq_idx)))
++
++static inline bool dpu_core_irq_is_valid(int irq_idx)
++{
++ return irq_idx >= 0 && irq_idx < DPU_NUM_IRQS;
++}
++
++static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
++ int irq_idx)
++{
++ return &intr->irq_tbl[irq_idx];
++}
+
+ /**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+@@ -207,17 +217,22 @@ static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
+ */
+ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
+ {
+- VERB("irq_idx=%d\n", irq_idx);
++ struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
+
+- if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
+- DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
++ VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+
+- atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
++ if (!irq_entry->cb) {
++ DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
++ return;
++ }
++
++ atomic_inc(&irq_entry->count);
+
+ /*
+ * Perform registered function callback
+ */
+- dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
++ irq_entry->cb(irq_entry->arg);
+ }
+
+ irqreturn_t dpu_core_irq(struct msm_kms *kms)
+@@ -291,8 +306,9 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+ if (!intr)
+ return -EINVAL;
+
+- if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+- pr_err("invalid IRQ index: [%d]\n", irq_idx);
++ if (!dpu_core_irq_is_valid(irq_idx)) {
++ pr_err("invalid IRQ=[%d, %d]\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return -EINVAL;
+ }
+
+@@ -328,7 +344,8 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+- pr_debug("DPU IRQ %d %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
++ pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
+ DPU_IRQ_MASK(irq_idx), cache_irq_mask);
+
+ return 0;
+@@ -344,8 +361,9 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+ if (!intr)
+ return -EINVAL;
+
+- if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+- pr_err("invalid IRQ index: [%d]\n", irq_idx);
++ if (!dpu_core_irq_is_valid(irq_idx)) {
++ pr_err("invalid IRQ=[%d, %d]\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return -EINVAL;
+ }
+
+@@ -377,7 +395,8 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+- pr_debug("DPU IRQ %d %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
++ pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
+ DPU_IRQ_MASK(irq_idx), cache_irq_mask);
+
+ return 0;
+@@ -429,14 +448,8 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
+ if (!intr)
+ return 0;
+
+- if (irq_idx < 0) {
+- DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+- __builtin_return_address(0), irq_idx);
+- return 0;
+- }
+-
+- if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+- pr_err("invalid IRQ index: [%d]\n", irq_idx);
++ if (!dpu_core_irq_is_valid(irq_idx)) {
++ pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return 0;
+ }
+
+@@ -462,13 +475,12 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+ {
+ struct dpu_hw_intr *intr;
+- int nirq = MDP_INTR_MAX * 32;
+ unsigned int i;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+- intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
++ intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+@@ -479,8 +491,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+
+ intr->hw.blk_addr = addr + m->mdp[0].base;
+
+- intr->total_irqs = nirq;
+-
+ intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
+ BIT(MDP_SSPP_TOP0_INTR2) |
+ BIT(MDP_SSPP_TOP0_HIST_INTR);
+@@ -507,42 +517,47 @@ void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+ }
+
+ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+- void (*irq_cb)(void *arg, int irq_idx),
++ void (*irq_cb)(void *arg),
+ void *irq_arg)
+ {
++ struct dpu_hw_intr_entry *irq_entry;
+ unsigned long irq_flags;
+ int ret;
+
+ if (!irq_cb) {
+- DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
++ DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return -EINVAL;
+ }
+
+- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
++ if (!dpu_core_irq_is_valid(irq_idx)) {
++ DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
+ return -EINVAL;
+ }
+
+- VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
++ VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+- if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
++ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
++ if (unlikely(WARN_ON(irq_entry->cb))) {
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return -EBUSY;
+ }
+
+ trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
+- dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
+- dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
++ irq_entry->arg = irq_arg;
++ irq_entry->cb = irq_cb;
+
+ ret = dpu_hw_intr_enable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+- irq_idx);
++ DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ trace_dpu_irq_register_success(irq_idx);
+@@ -552,26 +567,30 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+
+ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
+ {
++ struct dpu_hw_intr_entry *irq_entry;
+ unsigned long irq_flags;
+ int ret;
+
+- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
++ if (!dpu_core_irq_is_valid(irq_idx)) {
++ DPU_ERROR("invalid IRQ=[%d, %d]\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return -EINVAL;
+ }
+
+- VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
++ VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx);
+
+ ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
+ if (ret)
+- DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
+- irq_idx, ret);
++ DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
++ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
+
+- dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
+- dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
++ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
++ irq_entry->cb = NULL;
++ irq_entry->arg = NULL;
+
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+@@ -584,18 +603,21 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
+ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+ {
+ struct dpu_kms *dpu_kms = s->private;
++ struct dpu_hw_intr_entry *irq_entry;
+ unsigned long irq_flags;
+ int i, irq_count;
+ void *cb;
+
+- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
++ for (i = 0; i < DPU_NUM_IRQS; i++) {
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+- irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
+- cb = dpu_kms->hw_intr->irq_tbl[i].cb;
++ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
++ irq_count = atomic_read(&irq_entry->count);
++ cb = irq_entry->cb;
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ if (irq_count || cb)
+- seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
++ seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
++ DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
+ }
+
+ return 0;
+@@ -614,6 +636,7 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ void dpu_core_irq_preinstall(struct msm_kms *kms)
+ {
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
++ struct dpu_hw_intr_entry *irq_entry;
+ int i;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+@@ -621,22 +644,28 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+- atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
++ for (i = 0; i < DPU_NUM_IRQS; i++) {
++ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
++ atomic_set(&irq_entry->count, 0);
++ }
+ }
+
+ void dpu_core_irq_uninstall(struct msm_kms *kms)
+ {
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
++ struct dpu_hw_intr_entry *irq_entry;
+ int i;
+
+ if (!dpu_kms->hw_intr)
+ return;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+- if (dpu_kms->hw_intr->irq_tbl[i].cb)
+- DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
++ for (i = 0; i < DPU_NUM_IRQS; i++) {
++ irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
++ if (irq_entry->cb)
++ DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
++ DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
++ }
+
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+index dab761e548636f..9df5d6e737a116 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+@@ -37,6 +37,16 @@ enum dpu_hw_intr_reg {
+ #define MDP_INTFn_INTR(intf) (MDP_INTF0_INTR + (intf - INTF_0))
+
+ #define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
++#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
++#define DPU_IRQ_BIT(irq_idx) (irq_idx % 32)
++
++#define DPU_NUM_IRQS (MDP_INTR_MAX * 32)
++
++struct dpu_hw_intr_entry {
++ void (*cb)(void *arg);
++ void *arg;
++ atomic_t count;
++};
+
+ /**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+@@ -44,7 +54,6 @@ enum dpu_hw_intr_reg {
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+- * @total_irqs: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock: spinlock for accessing IRQ resources
+ * @irq_cb_tbl: array of IRQ callbacks
+ */
+@@ -52,16 +61,11 @@ struct dpu_hw_intr {
+ struct dpu_hw_blk_reg_map hw;
+ u32 cache_irq_mask[MDP_INTR_MAX];
+ u32 *save_irq_status;
+- u32 total_irqs;
+ spinlock_t irq_lock;
+ unsigned long irq_mask;
+ const struct dpu_intr_reg *intr_set;
+
+- struct {
+- void (*cb)(void *arg, int irq_idx);
+- void *arg;
+- atomic_t count;
+- } irq_tbl[];
++ struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS];
+ };
+
+ /**
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+index 8ec6505d9e7860..9cdd2d8bf79ba1 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+@@ -161,13 +161,8 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+- /*
+- * DATA_HCTL_EN controls data timing which can be different from
+- * video timing. It is recommended to enable it for all cases, except
+- * if compression is enabled in 1 pixel per clock mode
+- */
+ if (p->wide_bus_en)
+- intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
++ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
+
+ data_width = p->width;
+
+@@ -227,6 +222,14 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+ if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
++ /*
++ * DATA_HCTL_EN controls data timing which can be different from
++ * video timing. It is recommended to enable it for all cases, except
++ * if compression is enabled in 1 pixel per clock mode
++ */
++ if (!(p->compression_en && !p->wide_bus_en))
++ intf_cfg2 |= INTF_CFG2_DATA_HCTL_EN;
++
+ DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
+ DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
+@@ -318,9 +321,9 @@ static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+ return DPU_REG_READ(c, INTF_LINE_COUNT);
+ }
+
+-static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, bool enable, u32 frame_count)
++static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf)
+ {
+- dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, enable, frame_count);
++ dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1);
+ }
+
+ static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+index 77f80531782b59..192f4e67b1732f 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+@@ -33,6 +33,7 @@ struct dpu_hw_intf_timing_params {
+ u32 hsync_skew;
+
+ bool wide_bus_en;
++ bool compression_en;
+ };
+
+ struct dpu_hw_intf_prog_fetch {
+@@ -94,7 +95,7 @@ struct dpu_hw_intf_ops {
+
+ void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
+ const enum dpu_pingpong pp);
+- void (*setup_misr)(struct dpu_hw_intf *intf, bool enable, u32 frame_count);
++ void (*setup_misr)(struct dpu_hw_intf *intf);
+ int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value);
+
+ // Tearcheck on INTF since DPU 5.0.0
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+index d1c3bd8379ea94..a590c1f7465fbd 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+@@ -81,9 +81,9 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ }
+ }
+
+-static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count)
++static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
+ {
+- dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, enable, frame_count);
++ dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
+ }
+
+ static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+index 36992d046a533b..98b77cda65472d 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+@@ -1,5 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+@@ -57,7 +58,7 @@ struct dpu_hw_lm_ops {
+ /**
+ * setup_misr: Enable/disable MISR
+ */
+- void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count);
++ void (*setup_misr)(struct dpu_hw_mixer *ctx);
+
+ /**
+ * collect_misr: Read MISR signature
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+index 9d2273fd2fed58..6eee9f68ab4c7d 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+@@ -481,9 +481,11 @@ void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
+ cfg->danger_safe_en ? QOS_QOS_CTRL_DANGER_SAFE_EN : 0);
+ }
+
++/*
++ * note: Aside from encoders, input_sel should be set to 0x0 by default
++ */
+ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+- u32 misr_ctrl_offset,
+- bool enable, u32 frame_count)
++ u32 misr_ctrl_offset, u8 input_sel)
+ {
+ u32 config = 0;
+
+@@ -492,15 +494,9 @@ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+ /* Clear old MISR value (in case it's read before a new value is calculated)*/
+ wmb();
+
+- if (enable) {
+- config = (frame_count & MISR_FRAME_COUNT_MASK) |
+- MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK;
+-
+- DPU_REG_WRITE(c, misr_ctrl_offset, config);
+- } else {
+- DPU_REG_WRITE(c, misr_ctrl_offset, 0);
+- }
+-
++ config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK |
++ ((input_sel & 0xF) << 24);
++ DPU_REG_WRITE(c, misr_ctrl_offset, config);
+ }
+
+ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+index 1f6079f4707109..0aed54d7f6c942 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+@@ -13,7 +13,7 @@
+ #include "dpu_hw_catalog.h"
+
+ #define REG_MASK(n) ((BIT(n)) - 1)
+-#define MISR_FRAME_COUNT_MASK 0xFF
++#define MISR_FRAME_COUNT 0x1
+ #define MISR_CTRL_ENABLE BIT(8)
+ #define MISR_CTRL_STATUS BIT(9)
+ #define MISR_CTRL_STATUS_CLEAR BIT(10)
+@@ -358,9 +358,7 @@ void _dpu_hw_setup_qos_lut(struct dpu_hw_blk_reg_map *c, u32 offset,
+ const struct dpu_hw_qos_cfg *cfg);
+
+ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+- u32 misr_ctrl_offset,
+- bool enable,
+- u32 frame_count);
++ u32 misr_ctrl_offset, u8 input_sel);
+
+ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+index ebc41640038220..0aa598b355e9ec 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+@@ -86,6 +86,9 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
++ if (DPU_FORMAT_IS_YUV(fmt))
++ dst_format |= BIT(15);
++
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+index aa6ba2cf4b8406..6ba289e04b3b22 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+@@ -490,7 +490,7 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+ * mode panels. This may be a no-op for command mode panels.
+ */
+ trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+- ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
++ ret = dpu_encoder_wait_for_commit_done(encoder);
+ if (ret && ret != -EWOULDBLOCK) {
+ DPU_ERROR("wait for commit done returned %d\n", ret);
+ break;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+index b6f53ca6e96285..8cb3cf842c52c0 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+@@ -31,26 +31,17 @@
+ * @fmt: Pointer to format string
+ */
+ #define DPU_DEBUG(fmt, ...) \
+- do { \
+- if (drm_debug_enabled(DRM_UT_KMS)) \
+- DRM_DEBUG(fmt, ##__VA_ARGS__); \
+- else \
+- pr_debug(fmt, ##__VA_ARGS__); \
+- } while (0)
++ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
+ /**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+ #define DPU_DEBUG_DRIVER(fmt, ...) \
+- do { \
+- if (drm_debug_enabled(DRM_UT_DRIVER)) \
+- DRM_ERROR(fmt, ##__VA_ARGS__); \
+- else \
+- pr_debug(fmt, ##__VA_ARGS__); \
+- } while (0)
++ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
+ #define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
++#define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
+
+ /**
+ * ktime_compare_safe - compare two ktime structures
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+index 0be195f9149c56..637f50a8d42e62 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+@@ -679,6 +679,9 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ new_state->fb, &layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
++ if (pstate->aspace)
++ msm_framebuffer_cleanup(new_state->fb, pstate->aspace,
++ pstate->needs_dirtyfb);
+ return ret;
+ }
+
+@@ -792,6 +795,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
+ plane);
+ int ret = 0, min_scale;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
++ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
++ u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+@@ -860,14 +865,20 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
+
+ max_linewidth = pdpu->catalog->caps->max_linewidth;
+
+- if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
++ drm_rect_rotate(&pipe_cfg->src_rect,
++ new_plane_state->fb->width, new_plane_state->fb->height,
++ new_plane_state->rotation);
++
++ if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
++ _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
+ /*
+ * In parallel multirect case only the half of the usual width
+ * is supported for tiled formats. If we are here, we know that
+ * full width is more than max_linewidth, thus each rect is
+ * wider than allowed.
+ */
+- if (DPU_FORMAT_IS_UBWC(fmt)) {
++ if (DPU_FORMAT_IS_UBWC(fmt) &&
++ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
+ return -E2BIG;
+@@ -907,6 +918,14 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
+ r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
+ }
+
++ drm_rect_rotate_inv(&pipe_cfg->src_rect,
++ new_plane_state->fb->width, new_plane_state->fb->height,
++ new_plane_state->rotation);
++ if (r_pipe->sspp)
++ drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
++ new_plane_state->fb->width, new_plane_state->fb->height,
++ new_plane_state->rotation);
++
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+index 169f9de4a12a73..3100957225a70f 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+@@ -269,6 +269,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ {
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
++ unsigned long flags;
+
+ DBG("%s", mdp4_crtc->name);
+
+@@ -281,6 +282,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
+ mdp4_disable(mdp4_kms);
+
++ if (crtc->state->event && !crtc->state->active) {
++ WARN_ON(mdp4_crtc->event);
++ spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
++ drm_crtc_send_vblank_event(crtc, crtc->state->event);
++ crtc->state->event = NULL;
++ spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
++ }
++
+ mdp4_crtc->enabled = false;
+ }
+
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+index 56a3063545ec46..12d07e93a4c47e 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+@@ -356,7 +356,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
+
+ drm_printf(p, "%s:%d\t%d\t%s\n",
+ pipe2name(pipe), j, inuse,
+- plane ? plane->name : NULL);
++ plane ? plane->name : "(null)");
+
+ total += inuse;
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
+index 8e3b677f35e64f..559809a5cbcfb1 100644
+--- a/drivers/gpu/drm/msm/dp/dp_aux.c
++++ b/drivers/gpu/drm/msm/dp/dp_aux.c
+@@ -35,6 +35,7 @@ struct dp_aux_private {
+ bool no_send_stop;
+ bool initted;
+ bool is_edp;
++ bool enable_xfers;
+ u32 offset;
+ u32 segment;
+
+@@ -297,6 +298,17 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+ goto exit;
+ }
+
++ /*
++ * If we're using DP and an external display isn't connected then the
++ * transfer won't succeed. Return right away. If we don't do this we
++ * can end up with long timeouts if someone tries to access the DP AUX
++ * character device when no DP device is connected.
++ */
++ if (!aux->is_edp && !aux->enable_xfers) {
++ ret = -ENXIO;
++ goto exit;
++ }
++
+ /*
+ * For eDP it's important to give a reasonably long wait here for HPD
+ * to be asserted. This is because the panel driver may have _just_
+@@ -428,6 +440,14 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
+ return IRQ_HANDLED;
+ }
+
++void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled)
++{
++ struct dp_aux_private *aux;
++
++ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
++ aux->enable_xfers = enabled;
++}
++
+ void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+ {
+ struct dp_aux_private *aux;
+diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
+index 511305da4f66df..f3052cb43306bc 100644
+--- a/drivers/gpu/drm/msm/dp/dp_aux.h
++++ b/drivers/gpu/drm/msm/dp/dp_aux.h
+@@ -12,6 +12,7 @@
+ int dp_aux_register(struct drm_dp_aux *dp_aux);
+ void dp_aux_unregister(struct drm_dp_aux *dp_aux);
+ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux);
++void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled);
+ void dp_aux_init(struct drm_dp_aux *dp_aux);
+ void dp_aux_deinit(struct drm_dp_aux *dp_aux);
+ void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index 77a8d9366ed7b0..7472dfd631b837 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -135,11 +135,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+ tbd = dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->dp_mode.bpp);
+
+- if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
+- pr_debug("BIT_DEPTH not set. Configure default\n");
+- tbd = DP_TEST_BIT_DEPTH_8;
+- }
+-
+ config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
+
+ /* Num of Lanes */
+@@ -1024,14 +1019,14 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+ if (ret)
+ return ret;
+
+- if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
++ if (voltage_swing_level >= DP_TRAIN_LEVEL_MAX) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "max. voltage swing level reached %d\n",
+ voltage_swing_level);
+ max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
+ }
+
+- if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
++ if (pre_emphasis_level >= DP_TRAIN_LEVEL_MAX) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "max. pre-emphasis level reached %d\n",
+ pre_emphasis_level);
+@@ -1122,7 +1117,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+ }
+
+ if (ctrl->link->phy_params.v_level >=
+- DP_TRAIN_VOLTAGE_SWING_MAX) {
++ DP_TRAIN_LEVEL_MAX) {
+ DRM_ERROR_RATELIMITED("max v_level reached\n");
+ return -EAGAIN;
+ }
+@@ -1258,6 +1253,8 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
+ link_info.rate = ctrl->link->link_params.rate;
+ link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
+
++ dp_link_reset_phy_params_vx_px(ctrl->link);
++
+ dp_aux_link_configure(ctrl->aux, &link_info);
+
+ if (drm_dp_max_downspread(dpcd))
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 76f13954015b18..ed77c957eceba3 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -171,6 +171,11 @@ static const struct msm_dp_desc sm8350_dp_descs[] = {
+ {}
+ };
+
++static const struct msm_dp_desc sm8650_dp_descs[] = {
++ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
++ {}
++};
++
+ static const struct of_device_id dp_dt_match[] = {
+ { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs },
+ { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs },
+@@ -181,6 +186,7 @@ static const struct of_device_id dp_dt_match[] = {
+ { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_edp_descs },
+ { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs },
+ { .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_descs },
++ { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs },
+ {}
+ };
+
+@@ -580,6 +586,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+ u32 state;
+ int ret;
+
++ dp_aux_enable_xfers(dp->aux, true);
++
+ mutex_lock(&dp->event_mutex);
+
+ state = dp->hpd_state;
+@@ -636,6 +644,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ {
+ u32 state;
+
++ dp_aux_enable_xfers(dp->aux, false);
++
+ mutex_lock(&dp->event_mutex);
+
+ state = dp->hpd_state;
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
+index 6375daaeb98e1c..a198af7b2d4499 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.c
++++ b/drivers/gpu/drm/msm/dp/dp_link.c
+@@ -7,6 +7,7 @@
+
+ #include <drm/drm_print.h>
+
++#include "dp_reg.h"
+ #include "dp_link.h"
+ #include "dp_panel.h"
+
+@@ -1114,7 +1115,7 @@ int dp_link_process_request(struct dp_link *dp_link)
+
+ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ {
+- u32 cc;
++ u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
+ struct dp_link_private *link;
+
+ if (!dp_link) {
+@@ -1128,10 +1129,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ * Unless a video pattern CTS test is ongoing, use RGB_VESA
+ * Only RGB_VESA and RGB_CEA supported for now
+ */
+- if (dp_link_is_video_pattern_requested(link))
+- cc = link->dp_link.test_video.test_dyn_range;
+- else
+- cc = DP_TEST_DYNAMIC_RANGE_VESA;
++ if (dp_link_is_video_pattern_requested(link)) {
++ if (link->dp_link.test_video.test_dyn_range &
++ DP_TEST_DYNAMIC_RANGE_CEA)
++ cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
++ }
+
+ return cc;
+ }
+@@ -1139,6 +1141,7 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+ {
+ int i;
++ u8 max_p_level;
+ int v_max = 0, p_max = 0;
+ struct dp_link_private *link;
+
+@@ -1170,30 +1173,29 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+ * Adjust the voltage swing and pre-emphasis level combination to within
+ * the allowable range.
+ */
+- if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
++ if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
+ drm_dbg_dp(link->drm_dev,
+ "Requested vSwingLevel=%d, change to %d\n",
+ dp_link->phy_params.v_level,
+- DP_TRAIN_VOLTAGE_SWING_MAX);
+- dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
++ DP_TRAIN_LEVEL_MAX);
++ dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
+ }
+
+- if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
++ if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
+ drm_dbg_dp(link->drm_dev,
+ "Requested preEmphasisLevel=%d, change to %d\n",
+ dp_link->phy_params.p_level,
+- DP_TRAIN_PRE_EMPHASIS_MAX);
+- dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
++ DP_TRAIN_LEVEL_MAX);
++ dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
+ }
+
+- if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
+- && (dp_link->phy_params.v_level ==
+- DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
++ max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level;
++ if (dp_link->phy_params.p_level > max_p_level) {
+ drm_dbg_dp(link->drm_dev,
+ "Requested preEmphasisLevel=%d, change to %d\n",
+ dp_link->phy_params.p_level,
+- DP_TRAIN_PRE_EMPHASIS_LVL_1);
+- dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
++ max_p_level);
++ dp_link->phy_params.p_level = max_p_level;
+ }
+
+ drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
+@@ -1211,6 +1213,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
+ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+ {
+ u32 tbd;
++ struct dp_link_private *link;
++
++ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ /*
+ * Few simplistic rules and assumptions made here:
+@@ -1228,12 +1233,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+ tbd = DP_TEST_BIT_DEPTH_10;
+ break;
+ default:
+- tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
++ drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
++ bpp);
++ tbd = DP_TEST_BIT_DEPTH_8;
+ break;
+ }
+
+- if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
+- tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
++ tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+
+ return tbd;
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
+index 9dd4dd92653046..79c3a02b8dacd7 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.h
++++ b/drivers/gpu/drm/msm/dp/dp_link.h
+@@ -19,19 +19,7 @@ struct dp_link_info {
+ unsigned long capabilities;
+ };
+
+-enum dp_link_voltage_level {
+- DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0,
+- DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1,
+- DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2,
+- DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2,
+-};
+-
+-enum dp_link_preemaphasis_level {
+- DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0,
+- DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1,
+- DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2,
+- DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2,
+-};
++#define DP_TRAIN_LEVEL_MAX 3
+
+ struct dp_link_test_video {
+ u32 test_video_pattern;
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
+index 42d52510ffd4ab..d26589eb8b218b 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.c
++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
+@@ -136,22 +136,22 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+ u32 mode_edid_bpp, u32 mode_pclk_khz)
+ {
+- struct dp_link_info *link_info;
++ const struct dp_link_info *link_info;
+ const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+- u32 bpp = 0, data_rate_khz = 0;
++ u32 bpp, data_rate_khz;
+
+- bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
++ bpp = min(mode_edid_bpp, max_supported_bpp);
+
+ link_info = &dp_panel->link_info;
+ data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+- while (bpp > min_supported_bpp) {
++ do {
+ if (mode_pclk_khz * bpp <= data_rate_khz)
+- break;
++ return bpp;
+ bpp -= 6;
+- }
++ } while (bpp > min_supported_bpp);
+
+- return bpp;
++ return min_supported_bpp;
+ }
+
+ static int dp_panel_update_modes(struct drm_connector *connector,
+@@ -289,26 +289,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
+
+ static u8 dp_panel_get_edid_checksum(struct edid *edid)
+ {
+- struct edid *last_block;
+- u8 *raw_edid;
+- bool is_edid_corrupt = false;
+-
+- if (!edid) {
+- DRM_ERROR("invalid edid input\n");
+- return 0;
+- }
+-
+- raw_edid = (u8 *)edid;
+- raw_edid += (edid->extensions * EDID_LENGTH);
+- last_block = (struct edid *)raw_edid;
+-
+- /* block type extension */
+- drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
+- if (!is_edid_corrupt)
+- return last_block->checksum;
++ edid += edid->extensions;
+
+- DRM_ERROR("Invalid block, no checksum\n");
+- return 0;
++ return edid->checksum;
+ }
+
+ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+@@ -461,8 +444,9 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+ drm_mode->clock);
+ drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
+
+- dp_panel->dp_mode.bpp = max_t(u32, 18,
+- min_t(u32, dp_panel->dp_mode.bpp, 30));
++ dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp,
++ dp_panel->dp_mode.drm_mode.clock);
++
+ drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
+ dp_panel->dp_mode.bpp);
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
+index ea85a691e72b5c..78785ed4b40c49 100644
+--- a/drivers/gpu/drm/msm/dp/dp_reg.h
++++ b/drivers/gpu/drm/msm/dp/dp_reg.h
+@@ -143,6 +143,9 @@
+ #define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
+ #define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+
++#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0)
++#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04)
++
+ #define REG_DP_VALID_BOUNDARY (0x00000030)
+ #define REG_DP_VALID_BOUNDARY_2 (0x00000034)
+
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
+index baab79ab6e745e..32f965bacdc309 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.c
++++ b/drivers/gpu/drm/msm/dsi/dsi.c
+@@ -126,6 +126,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+
++ msm_dsi_tx_buf_free(msm_dsi->host);
+ priv->dsi[msm_dsi->id] = NULL;
+ }
+
+diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
+index bd3763a5d72340..3b46617a59f202 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi.h
++++ b/drivers/gpu/drm/msm/dsi/dsi.h
+@@ -125,6 +125,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+ void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+ void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+ void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
+ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 3d6fb708dc223e..77b805eacb1b18 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -147,6 +147,7 @@ struct msm_dsi_host {
+
+ /* DSI 6G TX buffer*/
+ struct drm_gem_object *tx_gem_obj;
++ struct msm_gem_address_space *aspace;
+
+ /* DSI v2 TX buffer */
+ void *tx_buf;
+@@ -365,8 +366,8 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
+ {
+ int ret;
+
+- DBG("Set clk rates: pclk=%d, byteclk=%lu",
+- msm_host->mode->clock, msm_host->byte_clk_rate);
++ DBG("Set clk rates: pclk=%lu, byteclk=%lu",
++ msm_host->pixel_clk_rate, msm_host->byte_clk_rate);
+
+ ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
+ msm_host->byte_clk_rate);
+@@ -439,9 +440,9 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
+ {
+ int ret;
+
+- DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
+- msm_host->mode->clock, msm_host->byte_clk_rate,
+- msm_host->esc_clk_rate, msm_host->src_clk_rate);
++ DBG("Set clk rates: pclk=%lu, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
++ msm_host->pixel_clk_rate, msm_host->byte_clk_rate,
++ msm_host->esc_clk_rate, msm_host->src_clk_rate);
+
+ ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ if (ret) {
+@@ -831,6 +832,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ u32 slice_per_intf, total_bytes_per_intf;
+ u32 pkt_per_line;
+ u32 eol_byte_num;
++ u32 bytes_per_pkt;
+
+ /* first calculate dsc parameters and then program
+ * compress mode registers
+@@ -838,6 +840,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
+
+ total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
++ bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */
+
+ eol_byte_num = total_bytes_per_intf % 3;
+
+@@ -875,6 +878,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
+ dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
+ dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+ } else {
++ reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(bytes_per_pkt);
+ dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg);
+ }
+ }
+@@ -1111,8 +1115,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
+ uint64_t iova;
+ u8 *data;
+
++ msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
++
+ data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
+- priv->kms->aspace,
++ msm_host->aspace,
+ &msm_host->tx_gem_obj, &iova);
+
+ if (IS_ERR(data)) {
+@@ -1141,10 +1147,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+ return 0;
+ }
+
+-static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
++void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
+ {
++ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct drm_device *dev = msm_host->dev;
+- struct msm_drm_private *priv;
+
+ /*
+ * This is possible if we're tearing down before we've had a chance to
+@@ -1155,11 +1161,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+ if (!dev)
+ return;
+
+- priv = dev->dev_private;
+ if (msm_host->tx_gem_obj) {
+- msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+- drm_gem_object_put(msm_host->tx_gem_obj);
++ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
++ msm_gem_address_space_put(msm_host->aspace);
+ msm_host->tx_gem_obj = NULL;
++ msm_host->aspace = NULL;
+ }
+
+ if (msm_host->tx_buf)
+@@ -1945,7 +1951,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+- dsi_tx_buf_free(msm_host);
+ if (msm_host->workqueue) {
+ destroy_workqueue(msm_host->workqueue);
+ msm_host->workqueue = NULL;
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+index 05621e5e7d6343..e49ebd9f6326f3 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+@@ -516,7 +516,9 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
+ struct device *dev = &phy->pdev->dev;
+ int ret;
+
+- pm_runtime_get_sync(dev);
++ ret = pm_runtime_resume_and_get(dev);
++ if (ret)
++ return ret;
+
+ ret = clk_prepare_enable(phy->ahb_clk);
+ if (ret) {
+@@ -689,6 +691,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
+ return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
+ "Unable to get ahb clk\n");
+
++ ret = devm_pm_runtime_enable(&pdev->dev);
++ if (ret)
++ return ret;
++
+ /* PLL init will call into clk_register which requires
+ * register access, so we need to enable power and ahb clock.
+ */
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+index 3b1ed02f644d28..f72ce6a3c456d5 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+@@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
+- } else {
++ } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ if (pll_freq <= 1000000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+@@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
++ } else {
++ /* 4.2, 4.3 */
++ if (pll_freq <= 1000000000ULL)
++ config->pll_clock_inverters = 0xa0;
++ else if (pll_freq <= 2500000000ULL)
++ config->pll_clock_inverters = 0x20;
++ else if (pll_freq <= 3500000000ULL)
++ config->pll_clock_inverters = 0x00;
++ else
++ config->pll_clock_inverters = 0x40;
+ }
+
+ config->decimal_div_start = dec;
+@@ -918,7 +928,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if (phy->cphy_mode) {
+ vreg_ctrl_0 = 0x45;
+- vreg_ctrl_1 = 0x45;
++ vreg_ctrl_1 = 0x41;
+ glbl_rescode_top_ctrl = 0x00;
+ glbl_rescode_bot_ctrl = 0x00;
+ } else {
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index 02fd6c7d0bb7b9..48e1a8c6942c9f 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -74,18 +74,6 @@ enum msm_dsi_controller {
+ #define MSM_GPU_MAX_RINGS 4
+ #define MAX_H_TILES_PER_DISPLAY 2
+
+-/**
+- * enum msm_event_wait - type of HW events to wait for
+- * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+- * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+- * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+- */
+-enum msm_event_wait {
+- MSM_ENC_COMMIT_DONE = 0,
+- MSM_ENC_TX_COMPLETE,
+- MSM_ENC_VBLANK,
+-};
+-
+ /**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
+index 5f68e31a3e4e1c..0915f3b68752e3 100644
+--- a/drivers/gpu/drm/msm/msm_gem_prime.c
++++ b/drivers/gpu/drm/msm/msm_gem_prime.c
+@@ -26,7 +26,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+ {
+ void *vaddr;
+
+- vaddr = msm_gem_get_vaddr(obj);
++ vaddr = msm_gem_get_vaddr_locked(obj);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ iosys_map_set_vaddr(map, vaddr);
+@@ -36,7 +36,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+
+ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+ {
+- msm_gem_put_vaddr(obj);
++ msm_gem_put_vaddr_locked(obj);
+ }
+
+ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+index f38296ad87434e..0641f5bb8649ac 100644
+--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+@@ -76,7 +76,7 @@ static bool
+ wait_for_idle(struct drm_gem_object *obj)
+ {
+ enum dma_resv_usage usage = dma_resv_usage_rw(true);
+- return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
++ return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
+ }
+
+ static bool
+diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
+index 7f64c66673002f..5a7541597d0ce8 100644
+--- a/drivers/gpu/drm/msm/msm_gpu.c
++++ b/drivers/gpu/drm/msm/msm_gpu.c
+@@ -749,12 +749,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned long flags;
+
+- pm_runtime_get_sync(&gpu->pdev->dev);
++ WARN_ON(!mutex_is_locked(&gpu->lock));
+
+- mutex_lock(&gpu->lock);
++ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ msm_gpu_hw_init(gpu);
+
++ submit->seqno = submit->hw_fence->seqno;
++
+ update_sw_cntrs(gpu);
+
+ /*
+@@ -779,11 +781,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ gpu->funcs->submit(gpu, submit);
+ gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+
+- hangcheck_timer_reset(gpu);
+-
+- mutex_unlock(&gpu->lock);
+-
+ pm_runtime_put(&gpu->pdev->dev);
++ hangcheck_timer_reset(gpu);
+ }
+
+ /*
+@@ -928,7 +927,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ if (IS_ERR(gpu->gpu_cx))
+ gpu->gpu_cx = NULL;
+
+- gpu->pdev = pdev;
+ platform_set_drvdata(pdev, &gpu->adreno_smmu);
+
+ msm_devfreq_init(gpu);
+diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
+index 5cc8d358cc9759..d5512037c38bcd 100644
+--- a/drivers/gpu/drm/msm/msm_iommu.c
++++ b/drivers/gpu/drm/msm/msm_iommu.c
+@@ -21,6 +21,8 @@ struct msm_iommu_pagetable {
+ struct msm_mmu base;
+ struct msm_mmu *parent;
+ struct io_pgtable_ops *pgtbl_ops;
++ const struct iommu_flush_ops *tlb;
++ struct device *iommu_dev;
+ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
+ phys_addr_t ttbr;
+ u32 asid;
+@@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = {
+
+ static void msm_iommu_tlb_flush_all(void *cookie)
+ {
++ struct msm_iommu_pagetable *pagetable = cookie;
++ struct adreno_smmu_priv *adreno_smmu;
++
++ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
++ return;
++
++ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
++
++ pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
++
++ pm_runtime_put_autosuspend(pagetable->iommu_dev);
+ }
+
+ static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+ {
++ struct msm_iommu_pagetable *pagetable = cookie;
++ struct adreno_smmu_priv *adreno_smmu;
++
++ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
++ return;
++
++ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
++
++ pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
++
++ pm_runtime_put_autosuspend(pagetable->iommu_dev);
+ }
+
+ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+@@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+ {
+ }
+
+-static const struct iommu_flush_ops null_tlb_ops = {
++static const struct iommu_flush_ops tlb_ops = {
+ .tlb_flush_all = msm_iommu_tlb_flush_all,
+ .tlb_flush_walk = msm_iommu_tlb_flush_walk,
+ .tlb_add_page = msm_iommu_tlb_add_page,
+@@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+
+ /* The incoming cfg will have the TTBR1 quirk enabled */
+ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
+- ttbr0_cfg.tlb = &null_tlb_ops;
++ ttbr0_cfg.tlb = &tlb_ops;
+
+ pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
+- &ttbr0_cfg, iommu->domain);
++ &ttbr0_cfg, pagetable);
+
+ if (!pagetable->pgtbl_ops) {
+ kfree(pagetable);
+@@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+
+ /* Needed later for TLB flush */
+ pagetable->parent = parent;
++ pagetable->tlb = ttbr1_cfg->tlb;
++ pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
+ pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
+ pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
+
+diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
+index 348c66b1468341..83b49d489cb1c5 100644
+--- a/drivers/gpu/drm/msm/msm_mdss.c
++++ b/drivers/gpu/drm/msm/msm_mdss.c
+@@ -28,6 +28,8 @@
+
+ #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
+
++#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
++
+ struct msm_mdss {
+ struct device *dev;
+
+@@ -40,8 +42,9 @@ struct msm_mdss {
+ struct irq_domain *domain;
+ } irq_controller;
+ const struct msm_mdss_data *mdss_data;
+- struct icc_path *path[2];
+- u32 num_paths;
++ struct icc_path *mdp_path[2];
++ u32 num_mdp_paths;
++ struct icc_path *reg_bus_path;
+ };
+
+ static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
+@@ -49,38 +52,34 @@ static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
+ {
+ struct icc_path *path0;
+ struct icc_path *path1;
++ struct icc_path *reg_bus_path;
+
+- path0 = of_icc_get(dev, "mdp0-mem");
++ path0 = devm_of_icc_get(dev, "mdp0-mem");
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+- msm_mdss->path[0] = path0;
+- msm_mdss->num_paths = 1;
++ msm_mdss->mdp_path[0] = path0;
++ msm_mdss->num_mdp_paths = 1;
+
+- path1 = of_icc_get(dev, "mdp1-mem");
++ path1 = devm_of_icc_get(dev, "mdp1-mem");
+ if (!IS_ERR_OR_NULL(path1)) {
+- msm_mdss->path[1] = path1;
+- msm_mdss->num_paths++;
++ msm_mdss->mdp_path[1] = path1;
++ msm_mdss->num_mdp_paths++;
+ }
+
+- return 0;
+-}
+-
+-static void msm_mdss_put_icc_path(void *data)
+-{
+- struct msm_mdss *msm_mdss = data;
+- int i;
++ reg_bus_path = of_icc_get(dev, "cpu-cfg");
++ if (!IS_ERR_OR_NULL(reg_bus_path))
++ msm_mdss->reg_bus_path = reg_bus_path;
+
+- for (i = 0; i < msm_mdss->num_paths; i++)
+- icc_put(msm_mdss->path[i]);
++ return 0;
+ }
+
+ static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
+ {
+ int i;
+
+- for (i = 0; i < msm_mdss->num_paths; i++)
+- icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
++ for (i = 0; i < msm_mdss->num_mdp_paths; i++)
++ icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(bw));
+ }
+
+ static void msm_mdss_irq(struct irq_desc *desc)
+@@ -245,6 +244,13 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
+ */
+ msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+
++ if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw)
++ icc_set_bw(msm_mdss->reg_bus_path, 0,
++ msm_mdss->mdss_data->reg_bus_bw);
++ else
++ icc_set_bw(msm_mdss->reg_bus_path, 0,
++ DEFAULT_REG_BW);
++
+ ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
+ if (ret) {
+ dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
+@@ -298,6 +304,9 @@ static int msm_mdss_disable(struct msm_mdss *msm_mdss)
+ clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
+ msm_mdss_icc_request_bw(msm_mdss, 0);
+
++ if (msm_mdss->reg_bus_path)
++ icc_set_bw(msm_mdss->reg_bus_path, 0, 0);
++
+ return 0;
+ }
+
+@@ -384,6 +393,8 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
+ if (!msm_mdss)
+ return ERR_PTR(-ENOMEM);
+
++ msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev);
++
+ msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
+ if (IS_ERR(msm_mdss->mmio))
+ return ERR_CAST(msm_mdss->mmio);
+@@ -391,9 +402,6 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
+ dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+
+ ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
+- if (ret)
+- return ERR_PTR(ret);
+- ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+
+@@ -477,8 +485,6 @@ static int mdss_probe(struct platform_device *pdev)
+ if (IS_ERR(mdss))
+ return PTR_ERR(mdss);
+
+- mdss->mdss_data = of_device_get_match_data(&pdev->dev);
+-
+ platform_set_drvdata(pdev, mdss);
+
+ /*
+@@ -512,18 +518,21 @@ static const struct msm_mdss_data msm8998_data = {
+ .ubwc_enc_version = UBWC_1_0,
+ .ubwc_dec_version = UBWC_1_0,
+ .highest_bank_bit = 2,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data qcm2290_data = {
+ /* no UBWC */
+ .highest_bank_bit = 0x2,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sc7180_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .ubwc_static = 0x1e,
+- .highest_bank_bit = 0x3,
++ .highest_bank_bit = 0x1,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sc7280_data = {
+@@ -533,6 +542,7 @@ static const struct msm_mdss_data sc7280_data = {
+ .ubwc_static = 1,
+ .highest_bank_bit = 1,
+ .macrotile_mode = 1,
++ .reg_bus_bw = 74000,
+ };
+
+ static const struct msm_mdss_data sc8180x_data = {
+@@ -540,6 +550,7 @@ static const struct msm_mdss_data sc8180x_data = {
+ .ubwc_dec_version = UBWC_3_0,
+ .highest_bank_bit = 3,
+ .macrotile_mode = 1,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sc8280xp_data = {
+@@ -549,12 +560,14 @@ static const struct msm_mdss_data sc8280xp_data = {
+ .ubwc_static = 1,
+ .highest_bank_bit = 2,
+ .macrotile_mode = 1,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sdm845_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .highest_bank_bit = 2,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sm6350_data = {
+@@ -563,12 +576,14 @@ static const struct msm_mdss_data sm6350_data = {
+ .ubwc_swizzle = 6,
+ .ubwc_static = 0x1e,
+ .highest_bank_bit = 1,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sm8150_data = {
+ .ubwc_enc_version = UBWC_3_0,
+ .ubwc_dec_version = UBWC_3_0,
+ .highest_bank_bit = 2,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sm6115_data = {
+@@ -577,6 +592,7 @@ static const struct msm_mdss_data sm6115_data = {
+ .ubwc_swizzle = 7,
+ .ubwc_static = 0x11f,
+ .highest_bank_bit = 0x1,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sm6125_data = {
+@@ -584,6 +600,7 @@ static const struct msm_mdss_data sm6125_data = {
+ .ubwc_dec_version = UBWC_3_0,
+ .ubwc_swizzle = 1,
+ .highest_bank_bit = 1,
++ .reg_bus_bw = 76800,
+ };
+
+ static const struct msm_mdss_data sm8250_data = {
+@@ -594,6 +611,18 @@ static const struct msm_mdss_data sm8250_data = {
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ .highest_bank_bit = 3,
+ .macrotile_mode = 1,
++ .reg_bus_bw = 76800,
++};
++
++static const struct msm_mdss_data sm8350_data = {
++ .ubwc_enc_version = UBWC_4_0,
++ .ubwc_dec_version = UBWC_4_0,
++ .ubwc_swizzle = 6,
++ .ubwc_static = 1,
++ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
++ .highest_bank_bit = 3,
++ .macrotile_mode = 1,
++ .reg_bus_bw = 74000,
+ };
+
+ static const struct msm_mdss_data sm8550_data = {
+@@ -604,6 +633,7 @@ static const struct msm_mdss_data sm8550_data = {
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ .highest_bank_bit = 3,
+ .macrotile_mode = 1,
++ .reg_bus_bw = 57000,
+ };
+ static const struct of_device_id mdss_dt_match[] = {
+ { .compatible = "qcom,mdss" },
+@@ -620,8 +650,8 @@ static const struct of_device_id mdss_dt_match[] = {
+ { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
+ { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
+ { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
+- { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
+- { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
++ { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },
++ { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
+ { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
+ {}
+ };
+diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
+index 02bbab42adbc0e..3afef4b1786d28 100644
+--- a/drivers/gpu/drm/msm/msm_mdss.h
++++ b/drivers/gpu/drm/msm/msm_mdss.h
+@@ -14,6 +14,7 @@ struct msm_mdss_data {
+ u32 ubwc_static;
+ u32 highest_bank_bit;
+ u32 macrotile_mode;
++ u32 reg_bus_bw;
+ };
+
+ #define UBWC_1_0 0x10000000
+diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
+index 40c0bc35a44cee..7f5e0a961bba72 100644
+--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
++++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
+@@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
+
+ msm_fence_init(submit->hw_fence, fctx);
+
+- submit->seqno = submit->hw_fence->seqno;
+-
+ mutex_lock(&priv->lru.lock);
+
+ for (i = 0; i < submit->nr_bos; i++) {
+@@ -34,8 +32,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
+
+ mutex_unlock(&priv->lru.lock);
+
++ /* TODO move submit path over to using a per-ring lock.. */
++ mutex_lock(&gpu->lock);
++
+ msm_gpu_submit(gpu, submit);
+
++ mutex_unlock(&gpu->lock);
++
+ return dma_fence_get(submit->hw_fence);
+ }
+
+diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
+index 18de2f17e2491c..6494e827075690 100644
+--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
++++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
+@@ -340,6 +340,9 @@ static int __maybe_unused lcdif_suspend(struct device *dev)
+ if (ret)
+ return ret;
+
++ if (pm_runtime_suspended(dev))
++ return 0;
++
+ return lcdif_rpm_suspend(dev);
+ }
+
+@@ -347,7 +350,8 @@ static int __maybe_unused lcdif_resume(struct device *dev)
+ {
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+- lcdif_rpm_resume(dev);
++ if (!pm_runtime_suspended(dev))
++ lcdif_rpm_resume(dev);
+
+ return drm_mode_config_helper_resume(drm);
+ }
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+index a34917b048f96f..8c7fff19c97bb0 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+@@ -1157,7 +1157,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ chan = drm->channel;
+ if (!chan)
+ return -ENODEV;
+- cli = (void *)chan->user.client;
++ cli = chan->cli;
+ push = chan->chan.push;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+index 670c9739e5e18c..2033214c4b7848 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+@@ -209,6 +209,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(encoder->dev, tv_mode);
++ if (!mode)
++ continue;
+
+ mode->clock = tv_norm->tv_enc_mode.vrefresh *
+ mode->htotal / 1000 *
+@@ -258,6 +260,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+ if (modes[i].hdisplay == output_mode->hdisplay &&
+ modes[i].vdisplay == output_mode->vdisplay) {
+ mode = drm_mode_duplicate(encoder->dev, output_mode);
++ if (!mode)
++ continue;
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ } else {
+@@ -265,6 +269,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+ modes[i].vdisplay, 60, false,
+ (output_mode->flags &
+ DRM_MODE_FLAG_INTERLACE), false);
++ if (!mode)
++ continue;
+ }
+
+ /* CVT modes are sometimes unsuitable... */
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 4e7c9c353c5112..de8041c94de5d8 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -966,8 +966,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
+ const int clock = crtc_state->adjusted_mode.clock;
+
+ asyh->or.bpc = connector->display_info.bpc;
+- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
+- false);
++ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3 << 4);
+ }
+
+ mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
+@@ -2310,7 +2309,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
+
+ err_cleanup:
+ if (ret)
+- drm_atomic_helper_cleanup_planes(dev, state);
++ drm_atomic_helper_unprepare_planes(dev, state);
+ done:
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+index 0d9fc741a71932..932c9fd0b2d89c 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+@@ -11,6 +11,7 @@ struct nvkm_client {
+ u32 debug;
+
+ struct rb_root objroot;
++ spinlock_t obj_lock;
+
+ void *data;
+ int (*event)(u64 token, void *argv, u32 argc);
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
+index 82b267c111470a..460459af272d6f 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
+@@ -14,7 +14,7 @@ struct nvkm_event {
+ int index_nr;
+
+ spinlock_t refs_lock;
+- spinlock_t list_lock;
++ rwlock_t list_lock;
+ int *refs;
+
+ struct list_head ntfy;
+@@ -38,7 +38,7 @@ nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
+ int types_nr, int index_nr, struct nvkm_event *event)
+ {
+ spin_lock_init(&event->refs_lock);
+- spin_lock_init(&event->list_lock);
++ rwlock_init(&event->list_lock);
+ return __nvkm_event_init(func, subdev, types_nr, index_nr, event);
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
+index 2edd7bb13faea5..2e177ebab30398 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
++++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
+@@ -204,6 +204,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvif_device *device = &drm->client.device;
++ struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
+ struct nvkm_gr *gr = nvxx_gr(device);
+ struct drm_nouveau_getparam *getparam = data;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+@@ -268,6 +269,17 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
+ getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
+ break;
+ }
++ case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
++ getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
++ break;
++ case NOUVEAU_GETPARAM_VRAM_USED: {
++ struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
++ getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
++ break;
++ }
++ case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
++ getparam->value = 1;
++ break;
+ default:
+ NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
+ return -EINVAL;
+@@ -339,7 +351,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
+ list_add(&chan->head, &abi16->channels);
+
+ /* create channel object and initialise dma and fence management */
+- ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
++ ret = nouveau_channel_new(cli, false, runm, init->fb_ctxdma_handle,
+ init->tt_ctxdma_handle, &chan->chan);
+ if (ret)
+ goto done;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index 189903b65edc99..48cf593383b341 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -23,6 +23,7 @@
+ */
+
+ #include "nouveau_drv.h"
++#include "nouveau_bios.h"
+ #include "nouveau_reg.h"
+ #include "dispnv04/hw.h"
+ #include "nouveau_encoder.h"
+@@ -1675,7 +1676,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
+ */
+ if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
+ if (*conn == 0xf2005014 && *conf == 0xffffffff) {
+- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
++ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
+ return false;
+ }
+ }
+@@ -1761,26 +1762,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+ #ifdef __powerpc__
+ /* Apple iMac G4 NV17 */
+ if (of_machine_is_compatible("PowerMac4,5")) {
+- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
+- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
++ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
++ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
+ return;
+ }
+ #endif
+
+ /* Make up some sane defaults */
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
+- bios->legacy.i2c_indices.crt, 1, 1);
++ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
+
+ if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
+ bios->legacy.i2c_indices.tv,
+- all_heads, 0);
++ all_heads, DCB_OUTPUT_A);
+
+ else if (bios->tmds.output0_script_ptr ||
+ bios->tmds.output1_script_ptr)
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
+ bios->legacy.i2c_indices.panel,
+- all_heads, 1);
++ all_heads, DCB_OUTPUT_B);
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 0f3bd187ede67d..5d398a422459e1 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -234,28 +234,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
+ }
+
+ nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+- if (!nouveau_cli_uvmm(cli) || internal) {
+- /* for BO noVM allocs, don't assign kinds */
+- if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+- nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+- kfree(nvbo);
+- return ERR_PTR(-EINVAL);
+- }
+
+- nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+- } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+- nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+- nvbo->comp = (tile_flags & 0x00030000) >> 16;
+- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+- kfree(nvbo);
+- return ERR_PTR(-EINVAL);
+- }
+- } else {
+- nvbo->zeta = (tile_flags & 0x00000007);
++ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
++ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
++ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
++ kfree(nvbo);
++ return ERR_PTR(-EINVAL);
++ }
++
++ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
++ } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
++ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
++ nvbo->comp = (tile_flags & 0x00030000) >> 16;
++ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
++ kfree(nvbo);
++ return ERR_PTR(-EINVAL);
+ }
+- nvbo->mode = tile_mode;
++ } else {
++ nvbo->zeta = (tile_flags & 0x00000007);
++ }
++ nvbo->mode = tile_mode;
+
++ if (!nouveau_cli_uvmm(cli) || internal) {
+ /* Determine the desirable target GPU page size for the buffer. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Because we cannot currently allow VMM maps to fail
+@@ -297,12 +297,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
+ }
+ nvbo->page = vmm->page[pi].shift;
+ } else {
+- /* reject other tile flags when in VM mode. */
+- if (tile_mode)
+- return ERR_PTR(-EINVAL);
+- if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
+- return ERR_PTR(-EINVAL);
+-
+ /* Determine the desirable target GPU page size for the buffer. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Because we cannot currently allow VMM maps to fail
+@@ -318,8 +312,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
+
+- if (pi < 0)
+- pi = i;
++ /* pick the last one as it will be smallest. */
++ pi = i;
++
+ /* Stop once the buffer is larger than the current page size. */
+ if (*size >= 1ULL << vmm->page[i].shift)
+ break;
+@@ -848,7 +843,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
+ {
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_channel *chan = drm->ttm.chan;
+- struct nouveau_cli *cli = (void *)chan->user.client;
++ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_fence *fence;
+ int ret;
+
+@@ -1254,6 +1249,8 @@ nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
+ drm_vma_node_unmap(&nvbo->bo.base.vma_node,
+ bdev->dev_mapping);
+ nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
++ nvbo->bo.resource->bus.offset = 0;
++ nvbo->bo.resource->bus.addr = NULL;
+ goto retry;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
+index 7c97b288680760..cee36b1efd3917 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
++++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
+@@ -52,7 +52,7 @@ static int
+ nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
+ {
+ struct nouveau_channel *chan = container_of(event, typeof(*chan), kill);
+- struct nouveau_cli *cli = (void *)chan->user.client;
++ struct nouveau_cli *cli = chan->cli;
+
+ NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
+
+@@ -66,7 +66,7 @@ int
+ nouveau_channel_idle(struct nouveau_channel *chan)
+ {
+ if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
+- struct nouveau_cli *cli = (void *)chan->user.client;
++ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+@@ -142,10 +142,11 @@ nouveau_channel_wait(struct nvif_push *push, u32 size)
+ }
+
+ static int
+-nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
++nouveau_channel_prep(struct nouveau_cli *cli,
+ u32 size, struct nouveau_channel **pchan)
+ {
+- struct nouveau_cli *cli = (void *)device->object.client;
++ struct nouveau_drm *drm = cli->drm;
++ struct nvif_device *device = &cli->device;
+ struct nv_dma_v0 args = {};
+ struct nouveau_channel *chan;
+ u32 target;
+@@ -155,6 +156,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
+ if (!chan)
+ return -ENOMEM;
+
++ chan->cli = cli;
+ chan->device = device;
+ chan->drm = drm;
+ chan->vmm = nouveau_cli_vmm(cli);
+@@ -254,7 +256,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
+ }
+
+ static int
+-nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
++nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
+ struct nouveau_channel **pchan)
+ {
+ const struct nvif_mclass hosts[] = {
+@@ -279,7 +281,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
+ struct nvif_chan_v0 chan;
+ char name[TASK_COMM_LEN+16];
+ } args;
+- struct nouveau_cli *cli = (void *)device->object.client;
++ struct nvif_device *device = &cli->device;
+ struct nouveau_channel *chan;
+ const u64 plength = 0x10000;
+ const u64 ioffset = plength;
+@@ -298,7 +300,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
+ size = ioffset + ilength;
+
+ /* allocate dma push buffer */
+- ret = nouveau_channel_prep(drm, device, size, &chan);
++ ret = nouveau_channel_prep(cli, size, &chan);
+ *pchan = chan;
+ if (ret)
+ return ret;
+@@ -493,13 +495,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
+ }
+
+ int
+-nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
++nouveau_channel_new(struct nouveau_cli *cli,
+ bool priv, u64 runm, u32 vram, u32 gart, struct nouveau_channel **pchan)
+ {
+- struct nouveau_cli *cli = (void *)device->object.client;
+ int ret;
+
+- ret = nouveau_channel_ctor(drm, device, priv, runm, pchan);
++ ret = nouveau_channel_ctor(cli, priv, runm, pchan);
+ if (ret) {
+ NV_PRINTK(dbg, cli, "channel create, %d\n", ret);
+ return ret;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
+index 5de2ef4e98c2bb..260febd634ee21 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
++++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
+@@ -12,6 +12,7 @@ struct nouveau_channel {
+ struct nvif_push *push;
+ } chan;
+
++ struct nouveau_cli *cli;
+ struct nvif_device *device;
+ struct nouveau_drm *drm;
+ struct nouveau_vmm *vmm;
+@@ -62,7 +63,7 @@ struct nouveau_channel {
+ int nouveau_channels_init(struct nouveau_drm *);
+ void nouveau_channels_fini(struct nouveau_drm *);
+
+-int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv, u64 runm,
++int nouveau_channel_new(struct nouveau_cli *, bool priv, u64 runm,
+ u32 vram, u32 gart, struct nouveau_channel **);
+ void nouveau_channel_del(struct nouveau_channel **);
+ int nouveau_channel_idle(struct nouveau_channel *);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 79ea30aac31fb8..22a125243d81f7 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -983,6 +983,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(dev, nv_connector->native_mode);
++ if (!mode)
++ return 0;
++
+ drm_mode_probed_add(connector, mode);
+ ret = 1;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+index 12feecf71e752d..097bd3af0719e0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+@@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
+ if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ goto done;
+
+- dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
++ dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
+ if (!dpage)
+ goto done;
+
+@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
+ dma_addr_t *dma_addrs;
+ struct nouveau_fence *fence;
+
+- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+- dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
++ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
++ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
++ dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
+
+ migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
+ npages);
+@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ nouveau_dmem_fence_done(&fence);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+- kfree(src_pfns);
+- kfree(dst_pfns);
++ kvfree(src_pfns);
++ kvfree(dst_pfns);
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+- kfree(dma_addrs);
++ kvfree(dma_addrs);
+ }
+
+ void
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
+index 6a4980b2d4d4e1..bf2ae67b03d944 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
+@@ -108,12 +108,15 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
+ u8 *dpcd = nv_encoder->dp.dpcd;
+ int ret = NOUVEAU_DP_NONE, hpd;
+
+- /* If we've already read the DPCD on an eDP device, we don't need to
+- * reread it as it won't change
++ /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
++ * haven't probed them once before.
+ */
+- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+- dpcd[DP_DPCD_REV] != 0)
+- return NOUVEAU_DP_SST;
++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++ if (connector->status == connector_status_connected)
++ return NOUVEAU_DP_SST;
++ else if (connector->status == connector_status_disconnected)
++ return NOUVEAU_DP_NONE;
++ }
+
+ mutex_lock(&nv_encoder->dp.hpd_irq_lock);
+ if (mstm) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index 4396f501b16a3f..ac15a662e06042 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -343,7 +343,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
+ return;
+ }
+
+- ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
++ ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan);
+ if (ret)
+ NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
+ }
+@@ -371,7 +371,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
+ return;
+ }
+
+- ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
++ ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
+ if (ret) {
+ NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
+ nouveau_accel_gr_fini(drm);
+@@ -708,10 +708,11 @@ nouveau_drm_device_fini(struct drm_device *dev)
+ }
+ mutex_unlock(&drm->clients_lock);
+
+- nouveau_sched_fini(drm);
+-
+ nouveau_cli_fini(&drm->client);
+ nouveau_cli_fini(&drm->master);
++
++ nouveau_sched_fini(drm);
++
+ nvif_parent_dtor(&drm->parent);
+ mutex_destroy(&drm->clients_lock);
+ kfree(drm);
+@@ -1133,7 +1134,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
+ }
+
+ get_task_comm(tmpname, current);
+- snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
++ rcu_read_lock();
++ snprintf(name, sizeof(name), "%s[%d]",
++ tmpname, pid_nr(rcu_dereference(fpriv->pid)));
++ rcu_read_unlock();
+
+ if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
+ ret = -ENOMEM;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+index ca762ea5541361..93f08f9479d89b 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -103,6 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
+ void
+ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+ {
++ cancel_work_sync(&fctx->uevent_work);
+ nouveau_fence_context_kill(fctx, 0);
+ nvif_event_dtor(&fctx->event);
+ fctx->dead = 1;
+@@ -145,12 +146,13 @@ nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fc
+ return drop;
+ }
+
+-static int
+-nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
++static void
++nouveau_fence_uevent_work(struct work_struct *work)
+ {
+- struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
++ struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
++ uevent_work);
+ unsigned long flags;
+- int ret = NVIF_EVENT_KEEP;
++ int drop = 0;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ if (!list_empty(&fctx->pending)) {
+@@ -160,11 +162,20 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
+ fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (nouveau_fence_update(chan, fctx))
+- ret = NVIF_EVENT_DROP;
++ drop = 1;
+ }
++ if (drop)
++ nvif_event_block(&fctx->event);
++
+ spin_unlock_irqrestore(&fctx->lock, flags);
++}
+
+- return ret;
++static int
++nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
++{
++ struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
++ schedule_work(&fctx->uevent_work);
++ return NVIF_EVENT_KEEP;
+ }
+
+ void
+@@ -178,6 +189,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
+ } args;
+ int ret;
+
++ INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
+ INIT_LIST_HEAD(&fctx->flip);
+ INIT_LIST_HEAD(&fctx->pending);
+ spin_lock_init(&fctx->lock);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
+index 64d33ae7f35610..8bc065acfe3587 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
+@@ -44,6 +44,7 @@ struct nouveau_fence_chan {
+ u32 context;
+ char name[32];
+
++ struct work_struct uevent_work;
+ struct nvif_event event;
+ int notify_ref, dead, killed;
+ };
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index a0d303e5ce3d8d..7b69e6df57486a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -758,7 +758,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
+ return -ENOMEM;
+
+ if (unlikely(nouveau_cli_uvmm(cli)))
+- return -ENOSYS;
++ return nouveau_abi16_put(abi16, -ENOSYS);
+
+ list_for_each_entry(temp, &abi16->channels, head) {
+ if (temp->chan->chid == req->channel) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
+index 1b2ff0c40fc1c9..6c599a9f49ee40 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
++++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
+@@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
+ ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
+ if (ret) {
+- nouveau_bo_ref(NULL, &nvbo);
++ drm_gem_object_release(&nvbo->bo.base);
++ kfree(nvbo);
+ obj = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
+index 186351ecf72fd7..ec9f307370fa8a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
+@@ -1011,7 +1011,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
+ if (ret)
+ return ret;
+
+- buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
++ buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
+ if (!buffer->fault)
+ return -ENOMEM;
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+index aae780e4a4aa37..3d41e590d4712d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+@@ -804,15 +804,15 @@ op_remap(struct drm_gpuva_op_remap *r,
+ struct drm_gpuva_op_unmap *u = r->unmap;
+ struct nouveau_uvma *uvma = uvma_from_va(u->va);
+ u64 addr = uvma->va.va.addr;
+- u64 range = uvma->va.va.range;
++ u64 end = uvma->va.va.addr + uvma->va.va.range;
+
+ if (r->prev)
+ addr = r->prev->va.addr + r->prev->va.range;
+
+ if (r->next)
+- range = r->next->va.addr - addr;
++ end = r->next->va.addr;
+
+- op_unmap_range(u, addr, range);
++ op_unmap_range(u, addr, end - addr);
+ }
+
+ static int
+@@ -1320,6 +1320,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
++ struct nouveau_bo *nvbo;
+
+ if (unlikely(!obj))
+ continue;
+@@ -1330,8 +1331,9 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+ if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
+ continue;
+
+- ret = nouveau_bo_validate(nouveau_gem_object(obj),
+- true, false);
++ nvbo = nouveau_gem_object(obj);
++ nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
++ ret = nouveau_bo_validate(nvbo, true, false);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
+index a6602c01267156..3dda885df5b223 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
+@@ -108,6 +108,9 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
+ } else {
+ ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
+ mem->mem.size, &tmp);
++ if (ret)
++ goto done;
++
+ vma->addr = tmp.addr;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
+index 5b71a5a5cd85ce..cdbc75e3d1f669 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fence.c
++++ b/drivers/gpu/drm/nouveau/nv04_fence.c
+@@ -39,7 +39,7 @@ struct nv04_fence_priv {
+ static int
+ nv04_fence_emit(struct nouveau_fence *fence)
+ {
+- struct nvif_push *push = fence->channel->chan.push;
++ struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
+ int ret = PUSH_WAIT(push, 2);
+ if (ret == 0) {
+ PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
+index ebdeb8eb9e7741..c55662937ab22c 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
++++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
+@@ -180,6 +180,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
+ client->device = device;
+ client->debug = nvkm_dbgopt(dbg, "CLIENT");
+ client->objroot = RB_ROOT;
++ spin_lock_init(&client->obj_lock);
+ client->event = event;
+ INIT_LIST_HEAD(&client->umem);
+ spin_lock_init(&client->lock);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c
+index a6c877135598f7..61fed7792e415c 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/core/event.c
++++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c
+@@ -81,17 +81,17 @@ nvkm_event_ntfy_state(struct nvkm_event_ntfy *ntfy)
+ static void
+ nvkm_event_ntfy_remove(struct nvkm_event_ntfy *ntfy)
+ {
+- spin_lock_irq(&ntfy->event->list_lock);
++ write_lock_irq(&ntfy->event->list_lock);
+ list_del_init(&ntfy->head);
+- spin_unlock_irq(&ntfy->event->list_lock);
++ write_unlock_irq(&ntfy->event->list_lock);
+ }
+
+ static void
+ nvkm_event_ntfy_insert(struct nvkm_event_ntfy *ntfy)
+ {
+- spin_lock_irq(&ntfy->event->list_lock);
++ write_lock_irq(&ntfy->event->list_lock);
+ list_add_tail(&ntfy->head, &ntfy->event->ntfy);
+- spin_unlock_irq(&ntfy->event->list_lock);
++ write_unlock_irq(&ntfy->event->list_lock);
+ }
+
+ static void
+@@ -176,7 +176,7 @@ nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
+ return;
+
+ nvkm_trace(event->subdev, "event: ntfy %08x on %d\n", bits, id);
+- spin_lock_irqsave(&event->list_lock, flags);
++ read_lock_irqsave(&event->list_lock, flags);
+
+ list_for_each_entry_safe(ntfy, ntmp, &event->ntfy, head) {
+ if (ntfy->id == id && ntfy->bits & bits) {
+@@ -185,7 +185,7 @@ nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
+ }
+ }
+
+- spin_unlock_irqrestore(&event->list_lock, flags);
++ read_unlock_irqrestore(&event->list_lock, flags);
+ }
+
+ void
+diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+index 91fb494d400935..afc8d4e3e16f47 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
++++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+@@ -187,7 +187,8 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw)
+ break;
+ case NVKM_FIRMWARE_IMG_DMA:
+ nvkm_memory_unref(&memory);
+- dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys);
++ dma_free_noncoherent(fw->device->dev, sg_dma_len(&fw->mem.sgl),
++ fw->img, fw->phys, DMA_TO_DEVICE);
+ break;
+ default:
+ WARN_ON(1);
+@@ -212,10 +213,12 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
+ break;
+ case NVKM_FIRMWARE_IMG_DMA: {
+ dma_addr_t addr;
+-
+ len = ALIGN(fw->len, PAGE_SIZE);
+
+- fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL);
++ fw->img = dma_alloc_noncoherent(fw->device->dev,
++ len, &addr,
++ DMA_TO_DEVICE,
++ GFP_KERNEL);
+ if (fw->img) {
+ memcpy(fw->img, src, fw->len);
+ fw->phys = addr;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
+index 7c554c14e8841d..aea3ba72027abf 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
++++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
+@@ -30,8 +30,10 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
+ const struct nvkm_object_func *func)
+ {
+ struct nvkm_object *object;
++ unsigned long flags;
+
+ if (handle) {
++ spin_lock_irqsave(&client->obj_lock, flags);
+ struct rb_node *node = client->objroot.rb_node;
+ while (node) {
+ object = rb_entry(node, typeof(*object), node);
+@@ -40,9 +42,12 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
+ else
+ if (handle > object->object)
+ node = node->rb_right;
+- else
++ else {
++ spin_unlock_irqrestore(&client->obj_lock, flags);
+ goto done;
++ }
+ }
++ spin_unlock_irqrestore(&client->obj_lock, flags);
+ return ERR_PTR(-ENOENT);
+ } else {
+ object = &client->object;
+@@ -57,30 +62,39 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
+ void
+ nvkm_object_remove(struct nvkm_object *object)
+ {
++ unsigned long flags;
++
++ spin_lock_irqsave(&object->client->obj_lock, flags);
+ if (!RB_EMPTY_NODE(&object->node))
+ rb_erase(&object->node, &object->client->objroot);
++ spin_unlock_irqrestore(&object->client->obj_lock, flags);
+ }
+
+ bool
+ nvkm_object_insert(struct nvkm_object *object)
+ {
+- struct rb_node **ptr = &object->client->objroot.rb_node;
++ struct rb_node **ptr;
+ struct rb_node *parent = NULL;
++ unsigned long flags;
+
++ spin_lock_irqsave(&object->client->obj_lock, flags);
++ ptr = &object->client->objroot.rb_node;
+ while (*ptr) {
+ struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
+ parent = *ptr;
+- if (object->object < this->object)
++ if (object->object < this->object) {
+ ptr = &parent->rb_left;
+- else
+- if (object->object > this->object)
++ } else if (object->object > this->object) {
+ ptr = &parent->rb_right;
+- else
++ } else {
++ spin_unlock_irqrestore(&object->client->obj_lock, flags);
+ return false;
++ }
+ }
+
+ rb_link_node(&object->node, parent, ptr);
+ rb_insert_color(&object->node, &object->client->objroot);
++ spin_unlock_irqrestore(&object->client->obj_lock, flags);
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
+index 80a480b1217468..a1c8545f1249a1 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
++++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
+@@ -89,6 +89,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
+ nvkm_falcon_fw_dtor_sigs(fw);
+ }
+
++ /* after last write to the img, sync dma mappings */
++ dma_sync_single_for_device(fw->fw.device->dev,
++ fw->fw.phys,
++ sg_dma_len(&fw->fw.mem.sgl),
++ DMA_TO_DEVICE);
++
+ FLCNFW_DBG(fw, "resetting");
+ fw->func->reset(fw);
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+index 19188683c8fca9..8c2bf1c16f2a95 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
+ return (void *)fw;
+ }
+
++static void
++shadow_fw_release(void *fw)
++{
++ release_firmware(fw);
++}
++
+ static const struct nvbios_source
+ shadow_fw = {
+ .name = "firmware",
+ .init = shadow_fw_init,
+- .fini = (void(*)(void *))release_firmware,
++ .fini = shadow_fw_release,
+ .read = shadow_fw_read,
+ .rw = false,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+index 4bf486b5710136..cb05f7f48a98bb 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
+ return ERR_PTR(-EINVAL);
+ }
+
++static void of_fini(void *p)
++{
++ kfree(p);
++}
++
+ const struct nvbios_source
+ nvbios_of = {
+ .name = "OpenFirmware",
+ .init = of_init,
+- .fini = (void(*)(void *))kfree,
++ .fini = of_fini,
+ .read = of_read,
+ .size = of_size,
+ .rw = false,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+index 50f0c1914f58e8..4c3f7439657987 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+@@ -46,6 +46,8 @@ u32 gm107_ram_probe_fbp(const struct nvkm_ram_func *,
+ u32 gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
+ struct nvkm_device *, int, int *);
+
++int gp100_ram_init(struct nvkm_ram *);
++
+ /* RAM type-specific MR calculation routines */
+ int nvkm_sddr2_calc(struct nvkm_ram *);
+ int nvkm_sddr3_calc(struct nvkm_ram *);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+index 378f6fb7099077..8987a21e81d174 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+@@ -27,7 +27,7 @@
+ #include <subdev/bios/init.h>
+ #include <subdev/bios/rammap.h>
+
+-static int
++int
+ gp100_ram_init(struct nvkm_ram *ram)
+ {
+ struct nvkm_subdev *subdev = &ram->fb->subdev;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
+index 8550f5e473474b..b6b6ee59019d70 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
+@@ -5,6 +5,7 @@
+
+ static const struct nvkm_ram_func
+ gp102_ram = {
++ .init = gp100_ram_init,
+ };
+
+ int
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+index 4b2d7465d22f75..f4989f0526ecb8 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+@@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
+ void __iomem *map = NULL;
+
+ /* Already mapped? */
+- if (refcount_inc_not_zero(&iobj->maps))
++ if (refcount_inc_not_zero(&iobj->maps)) {
++ /* read barrier match the wmb on refcount set */
++ smp_rmb();
+ return iobj->map;
++ }
+
+ /* Take the lock, and re-check that another thread hasn't
+ * already mapped the object in the meantime.
+@@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
+ iobj->base.memory.ptrs = &nv50_instobj_fast;
+ else
+ iobj->base.memory.ptrs = &nv50_instobj_slow;
++ /* barrier to ensure the ptrs are written before refcount is set */
++ smp_wmb();
+ refcount_set(&iobj->maps, 1);
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+index 6cb5eefa45e9aa..5a08458fe1b7f5 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+@@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
+
+ type |= 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+- type |= 0x00000004; /* HUB_ONLY */
++ type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
+
+ mutex_lock(&vmm->mmu->mutex);
+
+diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
+index b715301ec79f66..6c49270cb290a4 100644
+--- a/drivers/gpu/drm/omapdrm/Kconfig
++++ b/drivers/gpu/drm/omapdrm/Kconfig
+@@ -4,7 +4,7 @@ config DRM_OMAP
+ depends on DRM && OF
+ depends on ARCH_OMAP2PLUS
+ select DRM_KMS_HELPER
+- select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
++ select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
+ select VIDEOMODE_HELPERS
+ select HDMI
+ default n
+diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
+index afeeb773755252..21996b713d1c3a 100644
+--- a/drivers/gpu/drm/omapdrm/omap_drv.c
++++ b/drivers/gpu/drm/omapdrm/omap_drv.c
+@@ -69,7 +69,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ {
+ struct drm_device *dev = old_state->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+- bool fence_cookie = dma_fence_begin_signalling();
+
+ dispc_runtime_get(priv->dispc);
+
+@@ -92,6 +91,8 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ omap_atomic_wait_for_completion(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
++
++ drm_atomic_helper_commit_hw_done(old_state);
+ } else {
+ /*
+ * OMAP3 DSS seems to have issues with the work-around above,
+@@ -101,11 +102,9 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+- }
+-
+- drm_atomic_helper_commit_hw_done(old_state);
+
+- dma_fence_end_signalling(fence_cookie);
++ drm_atomic_helper_commit_hw_done(old_state);
++ }
+
+ /*
+ * Wait for completion of the page flips to ensure that old buffers
+@@ -696,6 +695,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
+ soc = soc_device_match(omapdrm_soc_devices);
+ priv->omaprev = soc ? (uintptr_t)soc->data : 0;
+ priv->wq = alloc_ordered_workqueue("omapdrm", 0);
++ if (!priv->wq) {
++ ret = -ENOMEM;
++ goto err_alloc_workqueue;
++ }
+
+ mutex_init(&priv->list_lock);
+ INIT_LIST_HEAD(&priv->obj_list);
+@@ -754,6 +757,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
+ drm_mode_config_cleanup(ddev);
+ omap_gem_deinit(ddev);
+ destroy_workqueue(priv->wq);
++err_alloc_workqueue:
+ omap_disconnect_pipelines(ddev);
+ drm_dev_put(ddev);
+ return ret;
+diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
+index 6b08b137af1ad8..523be34682caf1 100644
+--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
++++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
+@@ -51,6 +51,10 @@ static void pan_worker(struct work_struct *work)
+ omap_gem_roll(bo, fbi->var.yoffset * npages);
+ }
+
++FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev,
++ drm_fb_helper_damage_range,
++ drm_fb_helper_damage_area)
++
+ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+ {
+@@ -78,11 +82,9 @@ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+
+ static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+ {
+- struct drm_fb_helper *helper = info->par;
+- struct drm_framebuffer *fb = helper->fb;
+- struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
++ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+- return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
++ return fb_deferred_io_mmap(info, vma);
+ }
+
+ static void omap_fbdev_fb_destroy(struct fb_info *info)
+@@ -94,6 +96,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
+
+ DBG();
+
++ fb_deferred_io_cleanup(info);
+ drm_fb_helper_fini(helper);
+
+ omap_gem_unpin(bo);
+@@ -104,15 +107,19 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
+ kfree(fbdev);
+ }
+
++/*
++ * For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap()
++ * because we use write-combine.
++ */
+ static const struct fb_ops omap_fb_ops = {
+ .owner = THIS_MODULE,
+- __FB_DEFAULT_DMAMEM_OPS_RDWR,
++ __FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev),
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = omap_fbdev_pan_display,
+- __FB_DEFAULT_DMAMEM_OPS_DRAW,
++ __FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev),
+ .fb_ioctl = drm_fb_helper_ioctl,
+ .fb_mmap = omap_fbdev_fb_mmap,
+ .fb_destroy = omap_fbdev_fb_destroy,
+@@ -213,6 +220,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
+ fbi->fix.smem_start = dma_addr;
+ fbi->fix.smem_len = bo->size;
+
++ /* deferred I/O */
++ helper->fbdefio.delay = HZ / 20;
++ helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
++
++ fbi->fbdefio = &helper->fbdefio;
++ ret = fb_deferred_io_init(fbi);
++ if (ret)
++ goto fail;
++
+ /* if we have DMM, then we can use it for scrolling by just
+ * shuffling pages around in DMM rather than doing sw blit.
+ */
+@@ -238,8 +254,20 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
+ return ret;
+ }
+
++static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
++{
++ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
++ return 0;
++
++ if (helper->fb->funcs->dirty)
++ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
++
++ return 0;
++}
++
+ static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+ .fb_probe = omap_fbdev_create,
++ .fb_dirty = omap_fbdev_dirty,
+ };
+
+ static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
+index 869e535faefa38..3a2f4a9f1d4665 100644
+--- a/drivers/gpu/drm/panel/Kconfig
++++ b/drivers/gpu/drm/panel/Kconfig
+@@ -184,7 +184,7 @@ config DRM_PANEL_ILITEK_IL9322
+
+ config DRM_PANEL_ILITEK_ILI9341
+ tristate "Ilitek ILI9341 240x320 QVGA panels"
+- depends on OF && SPI
++ depends on SPI
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ depends on BACKLIGHT_CLASS_DEVICE
+diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
+index abb0788843c60c..503ecea72c5eac 100644
+--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
++++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
+@@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel,
+ connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
+
+ mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
++ if (!mode)
++ return -ENOMEM;
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+index c9087f474cbc5a..e6328991c87e93 100644
+--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
++++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+@@ -1847,7 +1847,11 @@ static int boe_panel_prepare(struct drm_panel *panel)
+ usleep_range(10000, 11000);
+
+ if (boe->desc->lp11_before_reset) {
+- mipi_dsi_dcs_nop(boe->dsi);
++ ret = mipi_dsi_dcs_nop(boe->dsi);
++ if (ret < 0) {
++ dev_err(&boe->dsi->dev, "Failed to send NOP: %d\n", ret);
++ goto poweroff;
++ }
+ usleep_range(1000, 2000);
+ }
+ gpiod_set_value(boe->enable_gpio, 1);
+@@ -1868,13 +1872,13 @@ static int boe_panel_prepare(struct drm_panel *panel)
+ return 0;
+
+ poweroff:
++ gpiod_set_value(boe->enable_gpio, 0);
+ regulator_disable(boe->avee);
+ poweroffavdd:
+ regulator_disable(boe->avdd);
+ poweroff1v8:
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+- gpiod_set_value(boe->enable_gpio, 0);
+
+ return ret;
+ }
+@@ -2049,6 +2053,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_b101uan08_3_init_cmd,
++ .lp11_before_reset = true,
+ };
+
+ static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+@@ -2103,14 +2108,15 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = starry_qfh032011_53g_init_cmd,
++ .lp11_before_reset = true,
+ };
+
+ static const struct drm_display_mode starry_himax83102_j02_default_mode = {
+- .clock = 161600,
++ .clock = 162680,
+ .hdisplay = 1200,
+- .hsync_start = 1200 + 40,
+- .hsync_end = 1200 + 40 + 20,
+- .htotal = 1200 + 40 + 20 + 40,
++ .hsync_start = 1200 + 60,
++ .hsync_end = 1200 + 60 + 20,
++ .htotal = 1200 + 60 + 20 + 40,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 116,
+ .vsync_end = 1920 + 116 + 8,
+@@ -2237,6 +2243,8 @@ static int boe_panel_add(struct boe_panel *boe)
+
+ gpiod_set_value(boe->enable_gpio, 0);
+
++ boe->base.prepare_prev_first = true;
++
+ drm_panel_init(&boe->base, dev, &boe_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index 95c8472d878a99..94fe2f3836a9a3 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -203,6 +203,9 @@ struct edp_panel_entry {
+
+ /** @name: Name of this panel (for printing to logs). */
+ const char *name;
++
++ /** @override_edid_mode: Override the mode obtained by edid. */
++ const struct drm_display_mode *override_edid_mode;
+ };
+
+ struct panel_edp {
+@@ -301,6 +304,24 @@ static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
+ return num;
+ }
+
++static int panel_edp_override_edid_mode(struct panel_edp *panel,
++ struct drm_connector *connector,
++ const struct drm_display_mode *override_mode)
++{
++ struct drm_display_mode *mode;
++
++ mode = drm_mode_duplicate(connector->dev, override_mode);
++ if (!mode) {
++ dev_err(panel->base.dev, "failed to add additional mode\n");
++ return 0;
++ }
++
++ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
++ drm_mode_set_name(mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++}
++
+ static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+ {
+@@ -376,6 +397,7 @@ static int panel_edp_suspend(struct device *dev)
+ {
+ struct panel_edp *p = dev_get_drvdata(dev);
+
++ drm_dp_dpcd_set_powered(p->aux, false);
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get_boottime();
+@@ -392,8 +414,7 @@ static int panel_edp_unprepare(struct drm_panel *panel)
+ if (!p->prepared)
+ return 0;
+
+- pm_runtime_mark_last_busy(panel->dev);
+- ret = pm_runtime_put_autosuspend(panel->dev);
++ ret = pm_runtime_put_sync_suspend(panel->dev);
+ if (ret < 0)
+ return ret;
+ p->prepared = false;
+@@ -433,6 +454,7 @@ static int panel_edp_prepare_once(struct panel_edp *p)
+ }
+
+ gpiod_set_value_cansleep(p->enable_gpio, 1);
++ drm_dp_dpcd_set_powered(p->aux, true);
+
+ delay = p->desc->delay.hpd_reliable;
+ if (p->no_hpd)
+@@ -469,6 +491,7 @@ static int panel_edp_prepare_once(struct panel_edp *p)
+ return 0;
+
+ error:
++ drm_dp_dpcd_set_powered(p->aux, false);
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get_boottime();
+@@ -568,6 +591,9 @@ static int panel_edp_get_modes(struct drm_panel *panel,
+ {
+ struct panel_edp *p = to_panel_edp(panel);
+ int num = 0;
++ bool has_override_edid_mode = p->detected_panel &&
++ p->detected_panel != ERR_PTR(-EINVAL) &&
++ p->detected_panel->override_edid_mode;
+
+ /* probe EDID if a DDC bus is available */
+ if (p->ddc) {
+@@ -575,9 +601,18 @@ static int panel_edp_get_modes(struct drm_panel *panel,
+
+ if (!p->edid)
+ p->edid = drm_get_edid(connector, p->ddc);
+-
+- if (p->edid)
+- num += drm_add_edid_modes(connector, p->edid);
++ if (p->edid) {
++ if (has_override_edid_mode) {
++ /*
++ * override_edid_mode is specified. Use
++ * override_edid_mode instead of from edid.
++ */
++ num += panel_edp_override_edid_mode(p, connector,
++ p->detected_panel->override_edid_mode);
++ } else {
++ num += drm_add_edid_modes(connector, p->edid);
++ }
++ }
+
+ pm_runtime_mark_last_busy(panel->dev);
+ pm_runtime_put_autosuspend(panel->dev);
+@@ -973,6 +1008,8 @@ static const struct panel_desc auo_b116xak01 = {
+ },
+ .delay = {
+ .hpd_absent = 200,
++ .unprepare = 500,
++ .enable = 50,
+ },
+ };
+
+@@ -1828,6 +1865,15 @@ static const struct panel_delay delay_200_500_e200 = {
+ .delay = _delay \
+ }
+
++#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
++{ \
++ .name = _name, \
++ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
++ product_id), \
++ .delay = _delay, \
++ .override_edid_mode = _mode \
++}
++
+ /*
+ * This table is used to figure out power sequencing delays for panels that
+ * are detected by EDID. Entries here may point to entries in the
+@@ -1840,7 +1886,8 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
+- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
++ EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
++ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+@@ -1848,8 +1895,10 @@ static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
++ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
++ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+index e7be15b681021e..6de11723234644 100644
+--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
++++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+@@ -104,6 +104,8 @@ static int kd35t133_unprepare(struct drm_panel *panel)
+ return ret;
+ }
+
++ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vdd);
+
+diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
+index c73243d85de718..631420d28be4c9 100644
+--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
++++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
+@@ -234,8 +234,7 @@ static int hx8394_enable(struct drm_panel *panel)
+
+ sleep_in:
+ /* This will probably fail, but let's try orderly power off anyway. */
+- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+- if (!ret)
++ if (!mipi_dsi_dcs_enter_sleep_mode(dsi))
+ msleep(50);
+
+ return ret;
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+index 3574681891e816..b933380b7eb783 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+@@ -22,8 +22,9 @@
+ #include <linux/bitops.h>
+ #include <linux/delay.h>
+ #include <linux/gpio/consumer.h>
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+-#include <linux/of.h>
++#include <linux/property.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/spi/spi.h>
+
+@@ -421,7 +422,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel)
+
+ ili9341_dpi_init(ili);
+
+- return ret;
++ return 0;
+ }
+
+ static int ili9341_dpi_enable(struct drm_panel *panel)
+@@ -691,7 +692,7 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
+ * Every new incarnation of this display must have a unique
+ * data entry for the system in this driver.
+ */
+- ili->conf = of_device_get_match_data(dev);
++ ili->conf = device_get_match_data(dev);
+ if (!ili->conf) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+@@ -714,18 +715,18 @@ static int ili9341_probe(struct spi_device *spi)
+
+ reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+- dev_err(dev, "Failed to get gpio 'reset'\n");
++ return dev_err_probe(dev, PTR_ERR(reset), "Failed to get gpio 'reset'\n");
+
+ dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc))
+- dev_err(dev, "Failed to get gpio 'dc'\n");
++ return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n");
+
+ if (!strcmp(id->name, "sf-tc240t-9370-t"))
+ return ili9341_dpi_probe(spi, dc, reset);
+ else if (!strcmp(id->name, "yx240qv29"))
+ return ili9341_dbi_probe(spi, dc, reset);
+
+- return -1;
++ return -ENODEV;
+ }
+
+ static void ili9341_remove(struct spi_device *spi)
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+index 7838947a1bf3c9..bb201f848ae97a 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+@@ -883,10 +883,10 @@ static int ili9881c_prepare(struct drm_panel *panel)
+ msleep(5);
+
+ /* And reset it */
+- gpiod_set_value(ctx->reset, 1);
++ gpiod_set_value_cansleep(ctx->reset, 1);
+ msleep(20);
+
+- gpiod_set_value(ctx->reset, 0);
++ gpiod_set_value_cansleep(ctx->reset, 0);
+ msleep(20);
+
+ for (i = 0; i < ctx->desc->init_length; i++) {
+@@ -941,7 +941,7 @@ static int ili9881c_unprepare(struct drm_panel *panel)
+
+ mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ regulator_disable(ctx->power);
+- gpiod_set_value(ctx->reset, 1);
++ gpiod_set_value_cansleep(ctx->reset, 1);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+index ad98dd9322b4a3..227937afe2572e 100644
+--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
++++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+@@ -261,6 +261,8 @@ static int panel_nv3051d_unprepare(struct drm_panel *panel)
+
+ usleep_range(10000, 15000);
+
++ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
+ regulator_disable(ctx->vdd);
+
+ return 0;
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+index 412ca84d058110..4be5013330ec27 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+@@ -565,10 +565,8 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
+ }
+ dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ of_node_put(dsi_r);
+- if (!dsi_r_host) {
+- dev_err(dev, "Cannot get secondary DSI host\n");
+- return -EPROBE_DEFER;
+- }
++ if (!dsi_r_host)
++ return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
+
+ nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
+ if (!nt->dsi[1]) {
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+index 9632b9e95b7159..aab8937595a2a0 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+@@ -935,8 +935,7 @@ static int j606f_boe_init_sequence(struct panel_info *pinfo)
+
+ static const struct drm_display_mode elish_boe_modes[] = {
+ {
+- /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
+- .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 104 / 1000,
++ .clock = (1600 + 60 + 8 + 60) * (2560 + 26 + 4 + 168) * 120 / 1000,
+ .hdisplay = 1600,
+ .hsync_start = 1600 + 60,
+ .hsync_end = 1600 + 60 + 8,
+@@ -950,8 +949,7 @@ static const struct drm_display_mode elish_boe_modes[] = {
+
+ static const struct drm_display_mode elish_csot_modes[] = {
+ {
+- /* There is only one 120 Hz timing, but it doesn't work perfectly, 104 Hz preferred */
+- .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 104 / 1000,
++ .clock = (1600 + 200 + 40 + 52) * (2560 + 26 + 4 + 168) * 120 / 1000,
+ .hdisplay = 1600,
+ .hsync_start = 1600 + 200,
+ .hsync_end = 1600 + 200 + 40,
+@@ -1266,9 +1264,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
+ return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
+
+ pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
+- if (!pinfo->dsi[1]) {
++ if (IS_ERR(pinfo->dsi[1])) {
+ dev_err(dev, "cannot get secondary DSI device\n");
+- return -ENODEV;
++ return PTR_ERR(pinfo->dsi[1]);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+index 5703f4712d96e5..9c336c71562b93 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
++++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+@@ -72,6 +72,7 @@ static int atana33xc20_suspend(struct device *dev)
+ if (p->el3_was_on)
+ atana33xc20_wait(p->el_on3_off_time, 150);
+
++ drm_dp_dpcd_set_powered(p->aux, false);
+ ret = regulator_disable(p->supply);
+ if (ret)
+ return ret;
+@@ -93,6 +94,7 @@ static int atana33xc20_resume(struct device *dev)
+ ret = regulator_enable(p->supply);
+ if (ret)
+ return ret;
++ drm_dp_dpcd_set_powered(p->aux, true);
+ p->powered_on_time = ktime_get_boottime();
+
+ if (p->no_hpd) {
+@@ -107,19 +109,17 @@ static int atana33xc20_resume(struct device *dev)
+ if (hpd_asserted < 0)
+ ret = hpd_asserted;
+
+- if (ret)
++ if (ret) {
+ dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret);
+-
+- return ret;
+- }
+-
+- if (p->aux->wait_hpd_asserted) {
++ goto error;
++ }
++ } else if (p->aux->wait_hpd_asserted) {
+ ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US);
+
+- if (ret)
++ if (ret) {
+ dev_warn(dev, "Controller error waiting for HPD: %d\n", ret);
+-
+- return ret;
++ goto error;
++ }
+ }
+
+ /*
+@@ -131,6 +131,12 @@ static int atana33xc20_resume(struct device *dev)
+ * right times.
+ */
+ return 0;
++
++error:
++ drm_dp_dpcd_set_powered(p->aux, false);
++ regulator_disable(p->supply);
++
++ return ret;
+ }
+
+ static int atana33xc20_disable(struct drm_panel *panel)
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+index ea5a857793827a..f23d8832a1ad05 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+@@ -309,7 +309,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
+ .off_func = s6d7aa0_lsl080al02_off,
+ .drm_mode = &s6d7aa0_lsl080al02_mode,
+ .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
+- .bus_flags = DRM_BUS_FLAG_DE_HIGH,
++ .bus_flags = 0,
+
+ .has_backlight = false,
+ .use_passwd3 = false,
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index dd7928d9570f72..11ade6bac592f7 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2326,13 +2326,13 @@ static const struct panel_desc innolux_g070y2_t02 = {
+ static const struct display_timing innolux_g101ice_l01_timing = {
+ .pixelclock = { 60400000, 71100000, 74700000 },
+ .hactive = { 1280, 1280, 1280 },
+- .hfront_porch = { 41, 80, 100 },
+- .hback_porch = { 40, 79, 99 },
+- .hsync_len = { 1, 1, 1 },
++ .hfront_porch = { 30, 60, 70 },
++ .hback_porch = { 30, 60, 70 },
++ .hsync_len = { 22, 40, 60 },
+ .vactive = { 800, 800, 800 },
+- .vfront_porch = { 5, 11, 14 },
+- .vback_porch = { 4, 11, 14 },
+- .vsync_len = { 1, 1, 1 },
++ .vfront_porch = { 3, 8, 14 },
++ .vback_porch = { 3, 8, 14 },
++ .vsync_len = { 4, 7, 12 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+ };
+
+@@ -2349,6 +2349,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+@@ -2406,6 +2407,9 @@ static const struct panel_desc innolux_g121x1_l03 = {
+ .unprepare = 200,
+ .disable = 400,
+ },
++ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
++ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+ static const struct display_timing innolux_g156hce_l01_timings = {
+@@ -2519,6 +2523,7 @@ static const struct display_timing koe_tx26d202vm0bwa_timing = {
+ .vfront_porch = { 3, 5, 10 },
+ .vback_porch = { 2, 5, 10 },
+ .vsync_len = { 5, 5, 5 },
++ .flags = DISPLAY_FLAGS_DE_HIGH,
+ };
+
+ static const struct panel_desc koe_tx26d202vm0bwa = {
+@@ -3781,6 +3786,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ };
+
+ static const struct panel_desc tianma_tm070jvhg33 = {
+@@ -3793,6 +3799,7 @@ static const struct panel_desc tianma_tm070jvhg33 = {
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ };
+
+ static const struct display_timing tianma_tm070rvhg71_timing = {
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+index 0459965e1b4f7b..036ac403ed2138 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+@@ -288,7 +288,7 @@ static void st7701_init_sequence(struct st7701 *st7701)
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
+ DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
+- DIV_ROUND_CLOSEST(-4400 + desc->avcl_mv, 200)));
++ DIV_ROUND_CLOSEST(-4400 - desc->avcl_mv, 200)));
+
+ /* T2D = 0.2us * T2D[3:0] */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+index 6a394563953501..7bb723d445ade4 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+@@ -506,29 +506,30 @@ static int st7703_prepare(struct drm_panel *panel)
+ return 0;
+
+ dev_dbg(ctx->dev, "Resetting the panel\n");
+- ret = regulator_enable(ctx->vcc);
++ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
++ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+- dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ return ret;
+ }
+- ret = regulator_enable(ctx->iovcc);
++
++ ret = regulator_enable(ctx->vcc);
+ if (ret < 0) {
+- dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+- goto disable_vcc;
++ dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++ regulator_disable(ctx->iovcc);
++ return ret;
+ }
+
+- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+- usleep_range(20, 40);
++ /* Give power supplies time to stabilize before deasserting reset. */
++ usleep_range(10000, 20000);
++
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+- msleep(20);
++ usleep_range(15000, 20000);
+
+ ctx->prepared = true;
+
+ return 0;
+-
+-disable_vcc:
+- regulator_disable(ctx->vcc);
+- return ret;
+ }
+
+ static const u32 mantix_bus_formats[] = {
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+index 88e80fe98112da..28bfc48a912729 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+@@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
+ static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
+ .clock = 6000,
+ .hdisplay = 240,
+- .hsync_start = 240 + 28,
+- .hsync_end = 240 + 28 + 10,
+- .htotal = 240 + 28 + 10 + 10,
++ .hsync_start = 240 + 38,
++ .hsync_end = 240 + 38 + 10,
++ .htotal = 240 + 38 + 10 + 10,
+ .vdisplay = 280,
+- .vsync_start = 280 + 8,
+- .vsync_end = 280 + 8 + 4,
+- .vtotal = 280 + 8 + 4 + 4,
+- .width_mm = 43,
+- .height_mm = 37,
++ .vsync_start = 280 + 48,
++ .vsync_end = 280 + 48 + 4,
++ .vtotal = 280 + 48 + 4 + 4,
++ .width_mm = 37,
++ .height_mm = 43,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+ };
+
+@@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi)
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+- of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
++ ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
++ if (ret)
++ return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n");
+
+ drm_panel_add(&ctx->panel);
+
+diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+index 845304435e2356..f6a212e542cb93 100644
+--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
++++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+@@ -379,6 +379,8 @@ static int tpg110_get_modes(struct drm_panel *panel,
+ connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
+
+ mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
++ if (!mode)
++ return -ENOMEM;
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+index c2806e4fd553b1..6e946e5a036eeb 100644
+--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
++++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+@@ -261,8 +261,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+ struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(ctx->dsi);
+- mipi_dsi_device_unregister(ctx->dsi);
+-
+ drm_panel_remove(&ctx->panel);
+ }
+
+diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
+index a2ab99698ca80a..ddcc8259061bbd 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -731,3 +731,4 @@ module_platform_driver(panfrost_driver);
+ MODULE_AUTHOR("Panfrost Project Developers");
+ MODULE_DESCRIPTION("Panfrost DRM Driver");
+ MODULE_LICENSE("GPL v2");
++MODULE_SOFTDEP("pre: governor_simpleondemand");
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+index d28b99732ddeb6..c067ff550692ad 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+@@ -71,7 +71,12 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
+ }
+
+ gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
+- gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
++
++ /* Only enable the interrupts we care about */
++ gpu_write(pfdev, GPU_INT_MASK,
++ GPU_IRQ_MASK_ERROR |
++ GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |
++ GPU_IRQ_CLEAN_CACHES_COMPLETED);
+
+ return 0;
+ }
+@@ -321,28 +326,38 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
+ pfdev->features.shader_present, pfdev->features.l2_present);
+ }
+
++static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
++{
++ u64 core_mask;
++
++ if (pfdev->features.l2_present == 1)
++ return U64_MAX;
++
++ /*
++ * Only support one core group now.
++ * ~(l2_present - 1) unsets all bits in l2_present except
++ * the bottom bit. (l2_present - 2) has all the bits in
++ * the first core group set. AND them together to generate
++ * a mask of cores in the first core group.
++ */
++ core_mask = ~(pfdev->features.l2_present - 1) &
++ (pfdev->features.l2_present - 2);
++ dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
++ hweight64(core_mask),
++ hweight64(pfdev->features.shader_present));
++
++ return core_mask;
++}
++
+ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+ {
+ int ret;
+ u32 val;
+- u64 core_mask = U64_MAX;
++ u64 core_mask;
+
+ panfrost_gpu_init_quirks(pfdev);
++ core_mask = panfrost_get_core_mask(pfdev);
+
+- if (pfdev->features.l2_present != 1) {
+- /*
+- * Only support one core group now.
+- * ~(l2_present - 1) unsets all bits in l2_present except
+- * the bottom bit. (l2_present - 2) has all the bits in
+- * the first core group set. AND them together to generate
+- * a mask of cores in the first core group.
+- */
+- core_mask = ~(pfdev->features.l2_present - 1) &
+- (pfdev->features.l2_present - 2);
+- dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
+- hweight64(core_mask),
+- hweight64(pfdev->features.shader_present));
+- }
+ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
+ val, val == (pfdev->features.l2_present & core_mask),
+@@ -367,9 +382,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+
+ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
+ {
+- gpu_write(pfdev, TILER_PWROFF_LO, 0);
+- gpu_write(pfdev, SHADER_PWROFF_LO, 0);
+- gpu_write(pfdev, L2_PWROFF_LO, 0);
++ int ret;
++ u32 val;
++
++ gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
++ ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
++ val, !val, 1, 2000);
++ if (ret)
++ dev_err(pfdev->dev, "shader power transition timeout");
++
++ gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
++ ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
++ val, !val, 1, 2000);
++ if (ret)
++ dev_err(pfdev->dev, "tiler power transition timeout");
++
++ gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
++ ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
++ val, !val, 0, 2000);
++ if (ret)
++ dev_err(pfdev->dev, "l2 power transition timeout");
+ }
+
+ int panfrost_gpu_init(struct panfrost_device *pfdev)
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index c0123d09f699c7..83fa384f6a24ce 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -500,11 +500,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ mapping_set_unevictable(mapping);
+
+ for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
++ /* Can happen if the last fault only partially filled this
++ * section of the pages array before failing. In that case
++ * we skip already filled pages.
++ */
++ if (pages[i])
++ continue;
++
+ pages[i] = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(pages[i])) {
+ ret = PTR_ERR(pages[i]);
+ pages[i] = NULL;
+- goto err_pages;
++ goto err_unlock;
+ }
+ }
+
+@@ -512,7 +519,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
+ NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
+ if (ret)
+- goto err_pages;
++ goto err_unlock;
+
+ ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret)
+@@ -534,8 +541,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+
+ err_map:
+ sg_free_table(sgt);
+-err_pages:
+- drm_gem_shmem_put_pages(&bo->base);
+ err_unlock:
+ dma_resv_unlock(obj->resv);
+ err_bo:
+diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
+index ba3b5b5f0cdfe4..02e6b74d501669 100644
+--- a/drivers/gpu/drm/pl111/pl111_drv.c
++++ b/drivers/gpu/drm/pl111/pl111_drv.c
+@@ -323,12 +323,18 @@ static void pl111_amba_remove(struct amba_device *amba_dev)
+ struct pl111_drm_dev_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
++ drm_atomic_helper_shutdown(drm);
+ if (priv->panel)
+ drm_panel_bridge_remove(priv->bridge);
+ drm_dev_put(drm);
+ of_reserved_mem_device_release(dev);
+ }
+
++static void pl111_amba_shutdown(struct amba_device *amba_dev)
++{
++ drm_atomic_helper_shutdown(amba_get_drvdata(amba_dev));
++}
++
+ /*
+ * This early variant lacks the 565 and 444 pixel formats.
+ */
+@@ -431,6 +437,7 @@ static struct amba_driver pl111_amba_driver __maybe_unused = {
+ },
+ .probe = pl111_amba_probe,
+ .remove = pl111_amba_remove,
++ .shutdown = pl111_amba_shutdown,
+ .id_table = pl111_id_table,
+ };
+
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 6492a70e3c396a..8ee614be9adf36 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -236,6 +236,9 @@ static int qxl_add_mode(struct drm_connector *connector,
+ return 0;
+
+ mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
++ if (!mode)
++ return 0;
++
+ if (preferred)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->hdisplay = width;
+@@ -1229,6 +1232,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
+ if (!qdev->monitors_config_bo)
+ return 0;
+
++ kfree(qdev->dumb_heads);
++ qdev->dumb_heads = NULL;
++
+ qdev->monitors_config = NULL;
+ qdev->ram_header->monitors_config = 0;
+
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
+index b30ede1cf62d32..91930e84a9cd26 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.c
++++ b/drivers/gpu/drm/qxl/qxl_drv.c
+@@ -283,7 +283,7 @@ static const struct drm_ioctl_desc qxl_ioctls[] = {
+ };
+
+ static struct drm_driver qxl_driver = {
+- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
++ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
+
+ .dumb_create = qxl_mode_dumb_create,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 4f06356d9ce2e1..f0ae087be914ee 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4821,14 +4821,15 @@ int evergreen_irq_process(struct radeon_device *rdev)
+ break;
+ case 44: /* hdmi */
+ afmt_idx = src_data;
+- if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
+- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+-
+ if (afmt_idx > 5) {
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ src_id, src_data);
+ break;
+ }
++
++ if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
++ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
+ afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
+diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
+index 0de79f3a7e3ffc..820c2c3641d388 100644
+--- a/drivers/gpu/drm/radeon/evergreen_cs.c
++++ b/drivers/gpu/drm/radeon/evergreen_cs.c
+@@ -395,7 +395,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+- unsigned long offset;
++ u64 offset;
+ int r;
+
+ mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
+@@ -433,14 +433,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
+ return r;
+ }
+
+- offset = track->cb_color_bo_offset[id] << 8;
++ offset = (u64)track->cb_color_bo_offset[id] << 8;
+ if (offset & (surf.base_align - 1)) {
+- dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
++ dev_warn(p->dev, "%s:%d cb[%d] bo base %llu not aligned with %ld\n",
+ __func__, __LINE__, id, offset, surf.base_align);
+ return -EINVAL;
+ }
+
+- offset += surf.layer_size * mslice;
++ offset += (u64)surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+ /* old ddx are broken they allocate bo with w*h*bpp but
+ * program slice with ALIGN(h, 8), catch this and patch
+@@ -448,14 +448,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
+ */
+ if (!surf.mode) {
+ uint32_t *ib = p->ib.ptr;
+- unsigned long tmp, nby, bsize, size, min = 0;
++ u64 tmp, nby, bsize, size, min = 0;
+
+ /* find the height the ddx wants */
+ if (surf.nby > 8) {
+ min = surf.nby - 8;
+ }
+ bsize = radeon_bo_size(track->cb_color_bo[id]);
+- tmp = track->cb_color_bo_offset[id] << 8;
++ tmp = (u64)track->cb_color_bo_offset[id] << 8;
+ for (nby = surf.nby; nby > min; nby--) {
+ size = nby * surf.nbx * surf.bpe * surf.nsamples;
+ if ((tmp + size * mslice) <= bsize) {
+@@ -467,7 +467,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
+ slice = ((nby * surf.nbx) / 64) - 1;
+ if (!evergreen_surface_check(p, &surf, "cb")) {
+ /* check if this one works */
+- tmp += surf.layer_size * mslice;
++ tmp += (u64)surf.layer_size * mslice;
+ if (tmp <= bsize) {
+ ib[track->cb_color_slice_idx[id]] = slice;
+ goto old_ddx_ok;
+@@ -476,9 +476,9 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
+ }
+ }
+ dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
+- "offset %d, max layer %d, bo size %ld, slice %d)\n",
++ "offset %llu, max layer %d, bo size %ld, slice %d)\n",
+ __func__, __LINE__, id, surf.layer_size,
+- track->cb_color_bo_offset[id] << 8, mslice,
++ (u64)track->cb_color_bo_offset[id] << 8, mslice,
+ radeon_bo_size(track->cb_color_bo[id]), slice);
+ dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+ __func__, __LINE__, surf.nbx, surf.nby,
+@@ -562,7 +562,7 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+- unsigned long offset;
++ u64 offset;
+ int r;
+
+ mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+@@ -608,18 +608,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
+ return r;
+ }
+
+- offset = track->db_s_read_offset << 8;
++ offset = (u64)track->db_s_read_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
++ dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+- offset += surf.layer_size * mslice;
++ offset += (u64)surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_s_read_bo)) {
+ dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
+- "offset %ld, max layer %d, bo size %ld)\n",
++ "offset %llu, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+- (unsigned long)track->db_s_read_offset << 8, mslice,
++ (u64)track->db_s_read_offset << 8, mslice,
+ radeon_bo_size(track->db_s_read_bo));
+ dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+@@ -627,18 +627,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
+ return -EINVAL;
+ }
+
+- offset = track->db_s_write_offset << 8;
++ offset = (u64)track->db_s_write_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
++ dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+- offset += surf.layer_size * mslice;
++ offset += (u64)surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_s_write_bo)) {
+ dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
+- "offset %ld, max layer %d, bo size %ld)\n",
++ "offset %llu, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+- (unsigned long)track->db_s_write_offset << 8, mslice,
++ (u64)track->db_s_write_offset << 8, mslice,
+ radeon_bo_size(track->db_s_write_bo));
+ return -EINVAL;
+ }
+@@ -659,7 +659,7 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+- unsigned long offset;
++ u64 offset;
+ int r;
+
+ mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+@@ -706,34 +706,34 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
+ return r;
+ }
+
+- offset = track->db_z_read_offset << 8;
++ offset = (u64)track->db_z_read_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+- dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
++ dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+- offset += surf.layer_size * mslice;
++ offset += (u64)surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_z_read_bo)) {
+ dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
+- "offset %ld, max layer %d, bo size %ld)\n",
++ "offset %llu, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+- (unsigned long)track->db_z_read_offset << 8, mslice,
++ (u64)track->db_z_read_offset << 8, mslice,
+ radeon_bo_size(track->db_z_read_bo));
+ return -EINVAL;
+ }
+
+- offset = track->db_z_write_offset << 8;
++ offset = (u64)track->db_z_write_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+- dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
++ dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+- offset += surf.layer_size * mslice;
++ offset += (u64)surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_z_write_bo)) {
+ dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
+- "offset %ld, max layer %d, bo size %ld)\n",
++ "offset %llu, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+- (unsigned long)track->db_z_write_offset << 8, mslice,
++ (u64)track->db_z_write_offset << 8, mslice,
+ radeon_bo_size(track->db_z_write_bo));
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 927e5f42e97d01..3e48cbb522a1ca 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -813,7 +813,7 @@ int ni_init_microcode(struct radeon_device *rdev)
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
+- rdev->mc_fw->size, fw_name);
++ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ }
+diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
+index 4c2eec49dadc93..ce8832916704f9 100644
+--- a/drivers/gpu/drm/radeon/pptable.h
++++ b/drivers/gpu/drm/radeon/pptable.h
+@@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ typedef struct _ATOM_PPLIB_STATE_V2
+ {
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
++ //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+@@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+- UCHAR clockInfoIndex[1];
++ UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
+ } ATOM_PPLIB_STATE_V2;
+
+ typedef struct _StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+- ATOM_PPLIB_STATE_V2 states[1];
++ ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
+ }StateArray;
+
+
+@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+- UCHAR clockInfo[1];
++ UCHAR clockInfo[] __counted_by(ucNumEntries);
+ }ClockInfoArray;
+
+ typedef struct _NonClockInfoArray{
+@@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
++ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
+ }NonClockInfoArray;
+
+ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index affa9e0309b274..b63b6b4e9b2818 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -1015,45 +1015,65 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
+
+ DRM_DEBUG_KMS("\n");
+
+- if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
+- (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
+- (rdev->family == CHIP_RS200)) {
++ switch (rdev->family) {
++ case CHIP_R100:
++ case CHIP_RV100:
++ case CHIP_RV200:
++ case CHIP_RS100:
++ case CHIP_RS200:
+ DRM_INFO("Loading R100 Microcode\n");
+ fw_name = FIRMWARE_R100;
+- } else if ((rdev->family == CHIP_R200) ||
+- (rdev->family == CHIP_RV250) ||
+- (rdev->family == CHIP_RV280) ||
+- (rdev->family == CHIP_RS300)) {
++ break;
++
++ case CHIP_R200:
++ case CHIP_RV250:
++ case CHIP_RV280:
++ case CHIP_RS300:
+ DRM_INFO("Loading R200 Microcode\n");
+ fw_name = FIRMWARE_R200;
+- } else if ((rdev->family == CHIP_R300) ||
+- (rdev->family == CHIP_R350) ||
+- (rdev->family == CHIP_RV350) ||
+- (rdev->family == CHIP_RV380) ||
+- (rdev->family == CHIP_RS400) ||
+- (rdev->family == CHIP_RS480)) {
++ break;
++
++ case CHIP_R300:
++ case CHIP_R350:
++ case CHIP_RV350:
++ case CHIP_RV380:
++ case CHIP_RS400:
++ case CHIP_RS480:
+ DRM_INFO("Loading R300 Microcode\n");
+ fw_name = FIRMWARE_R300;
+- } else if ((rdev->family == CHIP_R420) ||
+- (rdev->family == CHIP_R423) ||
+- (rdev->family == CHIP_RV410)) {
++ break;
++
++ case CHIP_R420:
++ case CHIP_R423:
++ case CHIP_RV410:
+ DRM_INFO("Loading R400 Microcode\n");
+ fw_name = FIRMWARE_R420;
+- } else if ((rdev->family == CHIP_RS690) ||
+- (rdev->family == CHIP_RS740)) {
++ break;
++
++ case CHIP_RS690:
++ case CHIP_RS740:
+ DRM_INFO("Loading RS690/RS740 Microcode\n");
+ fw_name = FIRMWARE_RS690;
+- } else if (rdev->family == CHIP_RS600) {
++ break;
++
++ case CHIP_RS600:
+ DRM_INFO("Loading RS600 Microcode\n");
+ fw_name = FIRMWARE_RS600;
+- } else if ((rdev->family == CHIP_RV515) ||
+- (rdev->family == CHIP_R520) ||
+- (rdev->family == CHIP_RV530) ||
+- (rdev->family == CHIP_R580) ||
+- (rdev->family == CHIP_RV560) ||
+- (rdev->family == CHIP_RV570)) {
++ break;
++
++ case CHIP_RV515:
++ case CHIP_R520:
++ case CHIP_RV530:
++ case CHIP_R580:
++ case CHIP_RV560:
++ case CHIP_RV570:
+ DRM_INFO("Loading R500 Microcode\n");
+ fw_name = FIRMWARE_R520;
++ break;
++
++ default:
++ DRM_ERROR("Unsupported Radeon family %u\n", rdev->family);
++ return -EINVAL;
+ }
+
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+@@ -2321,7 +2341,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ switch (prim_walk) {
+ case 1:
+ for (i = 0; i < track->num_arrays; i++) {
+- size = track->arrays[i].esize * track->max_indx * 4;
++ size = track->arrays[i].esize * track->max_indx * 4UL;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+@@ -2340,7 +2360,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ break;
+ case 2:
+ for (i = 0; i < track->num_arrays; i++) {
+- size = track->arrays[i].esize * (nverts - 1) * 4;
++ size = track->arrays[i].esize * (nverts - 1) * 4UL;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index 638f861af80fa1..6cf54a747749d3 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -1275,7 +1275,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 4;
+- track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
++ track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+@@ -1302,7 +1302,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+- track->htile_offset = radeon_get_ib_value(p, idx) << 8;
++ track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ track->htile_bo = reloc->robj;
+ track->db_dirty = true;
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 8afb03bbce2984..426a49851e3497 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -132,7 +132,6 @@ extern int radeon_cik_support;
+ /* RADEON_IB_POOL_SIZE must be a power of 2 */
+ #define RADEON_IB_POOL_SIZE 16
+ #define RADEON_DEBUGFS_MAX_COMPONENTS 32
+-#define RADEONFB_CONN_LIMIT 4
+ #define RADEON_BIOS_NUM_SCRATCH 8
+
+ /* internal ring indices */
+@@ -2215,10 +2214,6 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+-int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+-int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
+ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 85c4bb186203c3..53c7273eb6a5cf 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -922,8 +922,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+ for (i = 0; i < max_device; i++) {
+- ATOM_CONNECTOR_INFO_I2C ci =
+- supported_devices->info.asConnInfo[i];
++ ATOM_CONNECTOR_INFO_I2C ci;
++
++ if (frev > 1)
++ ci = supported_devices->info_2d1.asConnInfo[i];
++ else
++ ci = supported_devices->info.asConnInfo[i];
+
+ bios_connectors[i].valid = false;
+
+@@ -1712,26 +1716,29 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+ fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+ if (fake_edid_record->ucFakeEDIDLength) {
+ struct edid *edid;
+- int edid_size =
+- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+- edid = kmalloc(edid_size, GFP_KERNEL);
++ int edid_size;
++
++ if (fake_edid_record->ucFakeEDIDLength == 128)
++ edid_size = fake_edid_record->ucFakeEDIDLength;
++ else
++ edid_size = fake_edid_record->ucFakeEDIDLength * 128;
++ edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0],
++ edid_size, GFP_KERNEL);
+ if (edid) {
+- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+- fake_edid_record->ucFakeEDIDLength);
+-
+ if (drm_edid_is_valid(edid)) {
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ rdev->mode_info.bios_hardcoded_edid_size = edid_size;
+- } else
++ } else {
+ kfree(edid);
++ }
+ }
++ record += struct_size(fake_edid_record,
++ ucFakeEDIDString,
++ edid_size);
++ } else {
++ /* empty fake edid record must be 3 bytes long */
++ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
+ }
+- record += fake_edid_record->ucFakeEDIDLength ?
+- struct_size(fake_edid_record,
+- ucFakeEDIDString,
+- fake_edid_record->ucFakeEDIDLength) :
+- /* empty fake edid record must be 3 bytes long */
+- sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
+ break;
+ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index d2f02c3dfce297..b84b58926106a4 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1119,6 +1119,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
+ else {
+ /* only 800x600 is supported right now on pre-avivo chips */
+ tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
++ if (!tv_mode)
++ return 0;
+ tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, tv_mode);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 901e75ec70ff41..5f1d24d3120c4a 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -683,15 +683,20 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc;
+
+- radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
++ radeon_crtc = kzalloc(sizeof(*radeon_crtc), GFP_KERNEL);
+ if (radeon_crtc == NULL)
+ return;
+
++ radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
++ if (!radeon_crtc->flip_queue) {
++ kfree(radeon_crtc);
++ return;
++ }
++
+ drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+ radeon_crtc->crtc_id = index;
+- radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
+ rdev->mode_info.crtcs[index] = radeon_crtc;
+
+ if (rdev->family >= CHIP_BONAIRE) {
+@@ -704,12 +709,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+ dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
+
+-#if 0
+- radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+- radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+- radeon_crtc->mode_set.num_connectors = 0;
+-#endif
+-
+ if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+ radeon_atombios_init_crtc(dev, radeon_crtc);
+ else
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index fa531493b11134..7bf08164140ef7 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -555,8 +555,6 @@ static const struct drm_ioctl_desc radeon_ioctls_kms[] = {
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+- DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
+- DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
+index 9cb6401fe97ed3..bb908f125269dc 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -42,7 +42,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *clone_encoder;
+- uint32_t index_mask = 0;
++ uint32_t index_mask = drm_encoder_mask(encoder);
+ int count;
+
+ /* DIG routing gets problematic */
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index 358d19242f4ba2..27225d1fe8d2e7 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -311,22 +311,6 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ return 0;
+ }
+
+-int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp)
+-{
+- /* TODO: implement */
+- DRM_ERROR("unimplemented %s\n", __func__);
+- return -EOPNOTSUPP;
+-}
+-
+-int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *filp)
+-{
+- /* TODO: implement */
+- DRM_ERROR("unimplemented %s\n", __func__);
+- return -EOPNOTSUPP;
+-}
+-
+ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+ {
+@@ -657,7 +641,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
+ if (r)
+ goto error_unlock;
+
+- if (bo_va->it.start)
++ if (bo_va->it.start && bo_va->bo)
+ r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
+
+ error_unlock:
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index 987cabbf1318e9..c38b4d5d6a14f5 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -1204,13 +1204,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+ r = radeon_bo_create(rdev, pd_size, align, true,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ NULL, &vm->page_directory);
+- if (r)
++ if (r) {
++ kfree(vm->page_tables);
++ vm->page_tables = NULL;
+ return r;
+-
++ }
+ r = radeon_vm_clear_bo(rdev, vm->page_directory);
+ if (r) {
+ radeon_bo_unref(&vm->page_directory);
+ vm->page_directory = NULL;
++ kfree(vm->page_tables);
++ vm->page_tables = NULL;
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index a91012447b56ed..85e9cba49cecb2 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -3611,6 +3611,10 @@ static int si_cp_start(struct radeon_device *rdev)
+ for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
+ ring = &rdev->ring[i];
+ r = radeon_ring_lock(rdev, ring, 2);
++ if (r) {
++ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
++ return r;
++ }
+
+ /* clear the compute context state */
+ radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
+diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
+index f74f381af05fdf..f7f1ddc6cdd810 100644
+--- a/drivers/gpu/drm/radeon/sumo_dpm.c
++++ b/drivers/gpu/drm/radeon/sumo_dpm.c
+@@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+- if (!rdev->pm.power_state[i].clock_info)
++ if (!rdev->pm.power_state[i].clock_info) {
++ kfree(rdev->pm.dpm.ps);
+ return -EINVAL;
++ }
+ ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+@@ -1619,6 +1621,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ if (table[i].ulSupportedSCLK != 0) {
++ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
++ continue;
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+ table[i].usVoltageID;
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
+index 08ea1c864cb238..ef1cc7bad20a76 100644
+--- a/drivers/gpu/drm/radeon/trinity_dpm.c
++++ b/drivers/gpu/drm/radeon/trinity_dpm.c
+@@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+- if (!rdev->pm.power_state[i].clock_info)
++ if (!rdev->pm.power_state[i].clock_info) {
++ kfree(rdev->pm.dpm.ps);
+ return -EINVAL;
++ }
+ ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index a29fbafce39366..3793863c210ebd 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -1177,6 +1177,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ struct cdn_dp_device *dp;
+ struct extcon_dev *extcon;
+ struct phy *phy;
++ int ret;
+ int i;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+@@ -1217,9 +1218,19 @@ static int cdn_dp_probe(struct platform_device *pdev)
+ mutex_init(&dp->lock);
+ dev_set_drvdata(dev, dp);
+
+- cdn_dp_audio_codec_init(dp, dev);
++ ret = cdn_dp_audio_codec_init(dp, dev);
++ if (ret)
++ return ret;
++
++ ret = component_add(dev, &cdn_dp_component_ops);
++ if (ret)
++ goto err_audio_deinit;
+
+- return component_add(dev, &cdn_dp_component_ops);
++ return 0;
++
++err_audio_deinit:
++ platform_device_unregister(dp->audio_pdev);
++ return ret;
+ }
+
+ static void cdn_dp_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+index 341550199111f9..89bc86d620146c 100644
+--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
++++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+@@ -435,6 +435,8 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
+ HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
+ RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
+ RK3328_HDMI_HPD_IOE));
++
++ dw_hdmi_rk3328_read_hpd(dw_hdmi, data);
+ }
+
+ static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = {
+diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
+index 6e5b922a121e24..345253e033c538 100644
+--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
++++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
+@@ -412,7 +412,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
+
+- value = mode->hsync_start - mode->hdisplay;
++ value = mode->htotal - mode->hsync_start;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
+
+@@ -427,7 +427,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
+ value = mode->vtotal - mode->vdisplay;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
+
+- value = mode->vsync_start - mode->vdisplay;
++ value = mode->vtotal - mode->vsync_start;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
+
+ value = mode->vsync_end - mode->vsync_start;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index b8f8b45ebf5940..93ed841f5dceae 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -40,7 +40,7 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
+
+ ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
+ prot);
+- if (ret < rk_obj->base.size) {
++ if (ret < (ssize_t)rk_obj->base.size) {
+ DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
+ ret, rk_obj->base.size);
+ ret = -ENOMEM;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 14320bc73e5bfc..ee72e8c6ad69bd 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -247,14 +247,22 @@ static inline void vop_cfg_done(struct vop *vop)
+ VOP_REG_SET(vop, common, cfg_done, 1);
+ }
+
+-static bool has_rb_swapped(uint32_t format)
++static bool has_rb_swapped(uint32_t version, uint32_t format)
+ {
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+- case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_BGR565:
+ return true;
++ /*
++ * full framework (IP version 3.x) only need rb swapped for RGB888 and
++ * little framework (IP version 2.x) only need rb swapped for BGR888,
++ * check for 3.x to also only rb swap BGR888 for unknown vop version
++ */
++ case DRM_FORMAT_RGB888:
++ return VOP_MAJOR(version) == 3;
++ case DRM_FORMAT_BGR888:
++ return VOP_MAJOR(version) != 3;
+ default:
+ return false;
+ }
+@@ -373,8 +381,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
+ if (info->is_yuv)
+ is_yuv = true;
+
+- if (dst_w > 3840) {
+- DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
++ if (dst_w > 4096) {
++ DRM_DEV_ERROR(vop->dev, "Maximum dst width (4096) exceeded\n");
+ return;
+ }
+
+@@ -1013,7 +1021,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
+ VOP_WIN_SET(vop, win, dsp_info, dsp_info);
+ VOP_WIN_SET(vop, win, dsp_st, dsp_st);
+
+- rb_swap = has_rb_swapped(fb->format->format);
++ rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
+ VOP_WIN_SET(vop, win, rb_swap, rb_swap);
+
+ /*
+@@ -1558,6 +1566,10 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
+ VOP_AFBC_SET(vop, enable, s->enable_afbc);
+ vop_cfg_done(vop);
+
++ /* Ack the DMA transfer of the previous frame (RK3066). */
++ if (VOP_HAS_REG(vop, common, dma_stop))
++ VOP_REG_SET(vop, common, dma_stop, 0);
++
+ spin_unlock(&vop->reg_lock);
+
+ /*
+@@ -1614,7 +1626,8 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+- rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
++ rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state),
++ sizeof(*rockchip_state), GFP_KERNEL);
+ if (!rockchip_state)
+ return NULL;
+
+@@ -1639,7 +1652,10 @@ static void vop_crtc_reset(struct drm_crtc *crtc)
+ if (crtc->state)
+ vop_crtc_destroy_state(crtc, crtc->state);
+
+- __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++ if (crtc_state)
++ __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
++ else
++ __drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+
+ #ifdef CONFIG_DRM_ANALOGIX_DP
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+index 5f56e0597df84a..c5c716a69171a8 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+@@ -122,6 +122,7 @@ struct vop_common {
+ struct vop_reg lut_buffer_index;
+ struct vop_reg gate_en;
+ struct vop_reg mmu_en;
++ struct vop_reg dma_stop;
+ struct vop_reg out_mode;
+ struct vop_reg standby;
+ };
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 583df4d22f7e90..d1de12e850e746 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -609,6 +609,8 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
+ const struct drm_format_info *info;
+ u16 hor_scl_mode, ver_scl_mode;
+ u16 hscl_filter_mode, vscl_filter_mode;
++ uint16_t cbcr_src_w = src_w;
++ uint16_t cbcr_src_h = src_h;
+ u8 gt2 = 0;
+ u8 gt4 = 0;
+ u32 val;
+@@ -666,27 +668,27 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
+ vop2_win_write(win, VOP2_WIN_YRGB_VSCL_FILTER_MODE, vscl_filter_mode);
+
+ if (info->is_yuv) {
+- src_w /= info->hsub;
+- src_h /= info->vsub;
++ cbcr_src_w /= info->hsub;
++ cbcr_src_h /= info->vsub;
+
+ gt4 = 0;
+ gt2 = 0;
+
+- if (src_h >= (4 * dst_h)) {
++ if (cbcr_src_h >= (4 * dst_h)) {
+ gt4 = 1;
+- src_h >>= 2;
+- } else if (src_h >= (2 * dst_h)) {
++ cbcr_src_h >>= 2;
++ } else if (cbcr_src_h >= (2 * dst_h)) {
+ gt2 = 1;
+- src_h >>= 1;
++ cbcr_src_h >>= 1;
+ }
+
+- hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
+- ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
++ hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
++ ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
+
+- val = vop2_scale_factor(src_w, dst_w);
++ val = vop2_scale_factor(cbcr_src_w, dst_w);
+ vop2_win_write(win, VOP2_WIN_SCALE_CBCR_X, val);
+
+- val = vop2_scale_factor(src_h, dst_h);
++ val = vop2_scale_factor(cbcr_src_h, dst_h);
+ vop2_win_write(win, VOP2_WIN_SCALE_CBCR_Y, val);
+
+ vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT4, gt4);
+@@ -1258,6 +1260,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
+ vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_270, rotate_270);
+ vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_90, rotate_90);
+ } else {
++ if (vop2_cluster_window(win)) {
++ vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0);
++ vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0);
++ }
++
+ vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4));
+ }
+
+@@ -1925,7 +1932,7 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
+ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX,
+ (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1));
+ else
+- port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8);
++ port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8);
+
+ layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL);
+
+@@ -2079,30 +2086,15 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
+ .atomic_disable = vop2_crtc_atomic_disable,
+ };
+
+-static void vop2_crtc_reset(struct drm_crtc *crtc)
+-{
+- struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+-
+- if (crtc->state) {
+- __drm_atomic_helper_crtc_destroy_state(crtc->state);
+- kfree(vcstate);
+- }
+-
+- vcstate = kzalloc(sizeof(*vcstate), GFP_KERNEL);
+- if (!vcstate)
+- return;
+-
+- crtc->state = &vcstate->base;
+- crtc->state->crtc = crtc;
+-}
+-
+ static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
+ {
+- struct rockchip_crtc_state *vcstate, *old_vcstate;
++ struct rockchip_crtc_state *vcstate;
+
+- old_vcstate = to_rockchip_crtc_state(crtc->state);
++ if (WARN_ON(!crtc->state))
++ return NULL;
+
+- vcstate = kmemdup(old_vcstate, sizeof(*old_vcstate), GFP_KERNEL);
++ vcstate = kmemdup(to_rockchip_crtc_state(crtc->state),
++ sizeof(*vcstate), GFP_KERNEL);
+ if (!vcstate)
+ return NULL;
+
+@@ -2120,6 +2112,20 @@ static void vop2_crtc_destroy_state(struct drm_crtc *crtc,
+ kfree(vcstate);
+ }
+
++static void vop2_crtc_reset(struct drm_crtc *crtc)
++{
++ struct rockchip_crtc_state *vcstate =
++ kzalloc(sizeof(*vcstate), GFP_KERNEL);
++
++ if (crtc->state)
++ vop2_crtc_destroy_state(crtc, crtc->state);
++
++ if (vcstate)
++ __drm_atomic_helper_crtc_reset(crtc, &vcstate->base);
++ else
++ __drm_atomic_helper_crtc_reset(crtc, NULL);
++}
++
+ static const struct drm_crtc_funcs vop2_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+index 5828593877923f..1b6e0b210aa530 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+@@ -577,8 +577,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
+ ret = -EINVAL;
+ goto err_put_port;
+ } else if (ret) {
+- DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
+- ret = -EPROBE_DEFER;
++ dev_err_probe(dev, ret, "failed to find panel and bridge node\n");
+ goto err_put_port;
+ }
+ if (lvds->panel)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+index 7b28050067769d..f7d0edd762b36c 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
++++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+@@ -435,6 +435,7 @@ static const struct vop_output rk3066_output = {
+ };
+
+ static const struct vop_common rk3066_common = {
++ .dma_stop = VOP_REG(RK3066_SYS_CTRL0, 0x1, 0),
+ .standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
+ .out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
+ .cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
+@@ -483,6 +484,7 @@ static const struct vop_data rk3066_vop = {
+ .output = &rk3066_output,
+ .win = rk3066_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3066_vop_win_data),
++ .feature = VOP_FEATURE_INTERNAL_RGB,
+ .max_output = { 1920, 1080 },
+ };
+
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index a42763e1429dc1..d3462be7493037 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -111,8 +111,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ {
+ WARN_ON(!num_sched_list || !sched_list);
+
++ spin_lock(&entity->rq_lock);
+ entity->sched_list = sched_list;
+ entity->num_sched_list = num_sched_list;
++ spin_unlock(&entity->rq_lock);
+ }
+ EXPORT_SYMBOL(drm_sched_entity_modify_sched);
+
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index 5a80b228d18cae..deec6acdcf6462 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -267,7 +267,7 @@ static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x)
+
+ pwm_init_state(ssd130x->pwm, &pwmstate);
+ pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
+- pwm_apply_state(ssd130x->pwm, &pwmstate);
++ pwm_apply_might_sleep(ssd130x->pwm, &pwmstate);
+
+ /* Enable the PWM */
+ pwm_enable(ssd130x->pwm);
+@@ -553,14 +553,45 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
+ struct ssd130x_plane_state *ssd130x_state)
+ {
+- struct drm_rect fullscreen = {
+- .x1 = 0,
+- .x2 = ssd130x->width,
+- .y1 = 0,
+- .y2 = ssd130x->height,
+- };
+-
+- ssd130x_update_rect(ssd130x, ssd130x_state, &fullscreen);
++ unsigned int page_height = ssd130x->device_info->page_height;
++ unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
++ u8 *data_array = ssd130x_state->data_array;
++ unsigned int width = ssd130x->width;
++ int ret, i;
++
++ if (!ssd130x->page_address_mode) {
++ memset(data_array, 0, width * pages);
++
++ /* Set address range for horizontal addressing mode */
++ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset, width);
++ if (ret < 0)
++ return;
++
++ ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset, pages);
++ if (ret < 0)
++ return;
++
++ /* Write out update in one go if we aren't using page addressing mode */
++ ssd130x_write_data(ssd130x, data_array, width * pages);
++ } else {
++ /*
++ * In page addressing mode, the start address needs to be reset,
++ * and each page then needs to be written out separately.
++ */
++ memset(data_array, 0, width);
++
++ for (i = 0; i < pages; i++) {
++ ret = ssd130x_set_page_pos(ssd130x,
++ ssd130x->page_offset + i,
++ ssd130x->col_offset);
++ if (ret < 0)
++ return;
++
++ ret = ssd130x_write_data(ssd130x, data_array, width);
++ if (ret < 0)
++ return;
++ }
++ }
+ }
+
+ static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
+diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
+index c68c831136c9b0..e1232f74dfa537 100644
+--- a/drivers/gpu/drm/stm/drv.c
++++ b/drivers/gpu/drm/stm/drv.c
+@@ -25,6 +25,7 @@
+ #include <drm/drm_module.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/drm_vblank.h>
++#include <drm/drm_managed.h>
+
+ #include "ltdc.h"
+
+@@ -75,7 +76,7 @@ static int drv_load(struct drm_device *ddev)
+
+ DRM_DEBUG("%s\n", __func__);
+
+- ldev = devm_kzalloc(ddev->dev, sizeof(*ldev), GFP_KERNEL);
++ ldev = drmm_kzalloc(ddev, sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return -ENOMEM;
+
+@@ -114,6 +115,7 @@ static void drv_unload(struct drm_device *ddev)
+ DRM_DEBUG("%s\n", __func__);
+
+ drm_kms_helper_poll_fini(ddev);
++ drm_atomic_helper_shutdown(ddev);
+ ltdc_unload(ddev);
+ }
+
+@@ -202,12 +204,14 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+- goto err_put;
++ goto err_unload;
+
+ drm_fbdev_dma_setup(ddev, 16);
+
+ return 0;
+
++err_unload:
++ drv_unload(ddev);
+ err_put:
+ drm_dev_put(ddev);
+
+@@ -225,6 +229,11 @@ static void stm_drm_platform_remove(struct platform_device *pdev)
+ drm_dev_put(ddev);
+ }
+
++static void stm_drm_platform_shutdown(struct platform_device *pdev)
++{
++ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
++}
++
+ static const struct of_device_id drv_dt_ids[] = {
+ { .compatible = "st,stm32-ltdc"},
+ { /* end node */ },
+@@ -234,6 +243,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids);
+ static struct platform_driver stm_drm_platform_driver = {
+ .probe = stm_drm_platform_probe,
+ .remove_new = stm_drm_platform_remove,
++ .shutdown = stm_drm_platform_shutdown,
+ .driver = {
+ .name = "stm32-display",
+ .of_match_table = drv_dt_ids,
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 5576fdae496233..0832b749b66e7f 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -36,6 +36,7 @@
+ #include <drm/drm_probe_helper.h>
+ #include <drm/drm_simple_kms_helper.h>
+ #include <drm/drm_vblank.h>
++#include <drm/drm_managed.h>
+
+ #include <video/videomode.h>
+
+@@ -1199,7 +1200,6 @@ static void ltdc_crtc_atomic_print_state(struct drm_printer *p,
+ }
+
+ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
+- .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+@@ -1212,7 +1212,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
+ };
+
+ static const struct drm_crtc_funcs ltdc_crtc_with_crc_support_funcs = {
+- .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+@@ -1514,6 +1513,9 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
+ /* Disable layer */
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, 0);
+
++ /* Reset the layer transparency to hide any related background color */
++ regmap_write_bits(ldev->regmap, LTDC_L1CACR + lofs, LXCACR_CONSTA, 0x00);
++
+ /* Commit shadow registers = update plane at next vblank */
+ if (ldev->caps.plane_reg_shadow)
+ regmap_write_bits(ldev->regmap, LTDC_L1RCR + lofs,
+@@ -1545,7 +1547,6 @@ static void ltdc_plane_atomic_print_state(struct drm_printer *p,
+ static const struct drm_plane_funcs ltdc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+- .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+@@ -1572,7 +1573,6 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
+ const u64 *modifiers = ltdc_format_modifiers;
+ u32 lofs = index * LAY_OFS;
+ u32 val;
+- int ret;
+
+ /* Allocate the biggest size according to supported color formats */
+ formats = devm_kzalloc(dev, (ldev->caps.pix_fmt_nb +
+@@ -1580,6 +1580,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
+ ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) +
+ ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) *
+ sizeof(*formats), GFP_KERNEL);
++ if (!formats)
++ return NULL;
+
+ for (i = 0; i < ldev->caps.pix_fmt_nb; i++) {
+ drm_fmt = ldev->caps.pix_fmt_drm[i];
+@@ -1613,14 +1615,10 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
+ }
+ }
+
+- plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
+- if (!plane)
+- return NULL;
+-
+- ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
+- &ltdc_plane_funcs, formats, nb_fmt,
+- modifiers, type, NULL);
+- if (ret < 0)
++ plane = drmm_universal_plane_alloc(ddev, struct drm_plane, dev,
++ possible_crtcs, &ltdc_plane_funcs, formats,
++ nb_fmt, modifiers, type, NULL);
++ if (IS_ERR(plane))
+ return NULL;
+
+ if (ldev->caps.ycbcr_input) {
+@@ -1643,15 +1641,6 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
+ return plane;
+ }
+
+-static void ltdc_plane_destroy_all(struct drm_device *ddev)
+-{
+- struct drm_plane *plane, *plane_temp;
+-
+- list_for_each_entry_safe(plane, plane_temp,
+- &ddev->mode_config.plane_list, head)
+- drm_plane_cleanup(plane);
+-}
+-
+ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
+ {
+ struct ltdc_device *ldev = ddev->dev_private;
+@@ -1677,14 +1666,14 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
+
+ /* Init CRTC according to its hardware features */
+ if (ldev->caps.crc)
+- ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+- &ltdc_crtc_with_crc_support_funcs, NULL);
++ ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
++ &ltdc_crtc_with_crc_support_funcs, NULL);
+ else
+- ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+- &ltdc_crtc_funcs, NULL);
++ ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
++ &ltdc_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("Can not initialize CRTC\n");
+- goto cleanup;
++ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &ltdc_crtc_helper_funcs);
+@@ -1698,9 +1687,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
+ for (i = 1; i < ldev->caps.nb_layers; i++) {
+ overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
+ if (!overlay) {
+- ret = -ENOMEM;
+ DRM_ERROR("Can not create overlay plane %d\n", i);
+- goto cleanup;
++ return -ENOMEM;
+ }
+ if (ldev->caps.dynamic_zorder)
+ drm_plane_create_zpos_property(overlay, i, 0, ldev->caps.nb_layers - 1);
+@@ -1713,10 +1701,6 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
+ }
+
+ return 0;
+-
+-cleanup:
+- ltdc_plane_destroy_all(ddev);
+- return ret;
+ }
+
+ static void ltdc_encoder_disable(struct drm_encoder *encoder)
+@@ -1776,23 +1760,19 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
+ struct drm_encoder *encoder;
+ int ret;
+
+- encoder = devm_kzalloc(ddev->dev, sizeof(*encoder), GFP_KERNEL);
+- if (!encoder)
+- return -ENOMEM;
++ encoder = drmm_simple_encoder_alloc(ddev, struct drm_encoder, dev,
++ DRM_MODE_ENCODER_DPI);
++ if (IS_ERR(encoder))
++ return PTR_ERR(encoder);
+
+ encoder->possible_crtcs = CRTC_MASK;
+ encoder->possible_clones = 0; /* No cloning support */
+
+- drm_simple_encoder_init(ddev, encoder, DRM_MODE_ENCODER_DPI);
+-
+ drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
+
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
+- if (ret) {
+- if (ret != -EPROBE_DEFER)
+- drm_encoder_cleanup(encoder);
++ if (ret)
+ return ret;
+- }
+
+ DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
+
+@@ -1962,8 +1942,7 @@ int ltdc_load(struct drm_device *ddev)
+ goto err;
+
+ if (panel) {
+- bridge = drm_panel_bridge_add_typed(panel,
+- DRM_MODE_CONNECTOR_DPI);
++ bridge = drmm_panel_bridge_add(ddev, panel);
+ if (IS_ERR(bridge)) {
+ DRM_ERROR("panel-bridge endpoint %d\n", i);
+ ret = PTR_ERR(bridge);
+@@ -2045,7 +2024,7 @@ int ltdc_load(struct drm_device *ddev)
+ }
+ }
+
+- crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
++ crtc = drmm_kzalloc(ddev, sizeof(*crtc), GFP_KERNEL);
+ if (!crtc) {
+ DRM_ERROR("Failed to allocate crtc\n");
+ ret = -ENOMEM;
+@@ -2072,9 +2051,6 @@ int ltdc_load(struct drm_device *ddev)
+
+ return 0;
+ err:
+- for (i = 0; i < nb_endpoints; i++)
+- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
+-
+ clk_disable_unprepare(ldev->pixel_clk);
+
+ return ret;
+@@ -2082,16 +2058,8 @@ int ltdc_load(struct drm_device *ddev)
+
+ void ltdc_unload(struct drm_device *ddev)
+ {
+- struct device *dev = ddev->dev;
+- int nb_endpoints, i;
+-
+ DRM_DEBUG_DRIVER("\n");
+
+- nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
+-
+- for (i = 0; i < nb_endpoints; i++)
+- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
+-
+ pm_runtime_disable(ddev->dev);
+ }
+
+diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+index 152375f3de2e29..bae69d69676546 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+@@ -82,7 +82,8 @@ static int sun4i_hdmi_atomic_check(struct drm_encoder *encoder,
+ return 0;
+ }
+
+-static void sun4i_hdmi_disable(struct drm_encoder *encoder)
++static void sun4i_hdmi_disable(struct drm_encoder *encoder,
++ struct drm_atomic_state *state)
+ {
+ struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
+ u32 val;
+@@ -96,37 +97,17 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
+ clk_disable_unprepare(hdmi->tmds_clk);
+ }
+
+-static void sun4i_hdmi_enable(struct drm_encoder *encoder)
++static void sun4i_hdmi_enable(struct drm_encoder *encoder,
++ struct drm_atomic_state *state)
+ {
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
+ struct drm_display_info *display = &hdmi->connector.display_info;
++ unsigned int x, y;
+ u32 val = 0;
+
+ DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
+
+- clk_prepare_enable(hdmi->tmds_clk);
+-
+- sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
+- val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
+- val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
+- writel(val, hdmi->base + SUN4I_HDMI_PKT_CTRL_REG(0));
+-
+- val = SUN4I_HDMI_VID_CTRL_ENABLE;
+- if (display->is_hdmi)
+- val |= SUN4I_HDMI_VID_CTRL_HDMI_MODE;
+-
+- writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
+-}
+-
+-static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
+- struct drm_display_mode *mode,
+- struct drm_display_mode *adjusted_mode)
+-{
+- struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
+- unsigned int x, y;
+- u32 val;
+-
+ clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000);
+ clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000);
+
+@@ -178,6 +159,19 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
+ val |= SUN4I_HDMI_VID_TIMING_POL_VSYNC;
+
+ writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
++
++ clk_prepare_enable(hdmi->tmds_clk);
++
++ sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
++ val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
++ val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
++ writel(val, hdmi->base + SUN4I_HDMI_PKT_CTRL_REG(0));
++
++ val = SUN4I_HDMI_VID_CTRL_ENABLE;
++ if (display->is_hdmi)
++ val |= SUN4I_HDMI_VID_CTRL_HDMI_MODE;
++
++ writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
+ }
+
+ static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
+@@ -201,9 +195,8 @@ static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
+
+ static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
+ .atomic_check = sun4i_hdmi_atomic_check,
+- .disable = sun4i_hdmi_disable,
+- .enable = sun4i_hdmi_enable,
+- .mode_set = sun4i_hdmi_mode_set,
++ .atomic_disable = sun4i_hdmi_disable,
++ .atomic_enable = sun4i_hdmi_enable,
+ .mode_valid = sun4i_hdmi_mode_valid,
+ };
+
+diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
+index ef02d530f78d75..ae12d001a04bfb 100644
+--- a/drivers/gpu/drm/tegra/dpaux.c
++++ b/drivers/gpu/drm/tegra/dpaux.c
+@@ -522,7 +522,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ if (err < 0) {
+ dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
+ dpaux->irq, err);
+- return err;
++ goto err_pm_disable;
+ }
+
+ disable_irq(dpaux->irq);
+@@ -542,7 +542,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ */
+ err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
+ if (err < 0)
+- return err;
++ goto err_pm_disable;
+
+ #ifdef CONFIG_GENERIC_PINCONF
+ dpaux->desc.name = dev_name(&pdev->dev);
+@@ -555,7 +555,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
+ if (IS_ERR(dpaux->pinctrl)) {
+ dev_err(&pdev->dev, "failed to register pincontrol\n");
+- return PTR_ERR(dpaux->pinctrl);
++ err = PTR_ERR(dpaux->pinctrl);
++ goto err_pm_disable;
+ }
+ #endif
+ /* enable and clear all interrupts */
+@@ -571,10 +572,15 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
+ err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
+ if (err < 0) {
+ dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
+- return err;
++ goto err_pm_disable;
+ }
+
+ return 0;
++
++err_pm_disable:
++ pm_runtime_put_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ return err;
+ }
+
+ static void tegra_dpaux_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index ff36171c8fb700..373bcd79257e0c 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -1242,9 +1242,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
+
+ drm_mode_config_reset(drm);
+
+- err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+- if (err < 0)
+- goto hub;
++ /*
++ * Only take over from a potential firmware framebuffer if any CRTCs
++ * have been registered. This must not be a fatal error because there
++ * are other accelerators that are exposed via this driver.
++ *
++ * Another case where this happens is on Tegra234 where the display
++ * hardware is no longer part of the host1x complex, so this driver
++ * will not expose any modesetting features.
++ */
++ if (drm->mode_config.num_crtc > 0) {
++ err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
++ if (err < 0)
++ goto hub;
++ } else {
++ /*
++ * Indicate to userspace that this doesn't expose any display
++ * capabilities.
++ */
++ drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
++ }
+
+ err = drm_dev_register(drm, 0);
+ if (err < 0)
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index a9870c82837499..839dbad9bc483d 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1543,9 +1543,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+ np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
+ if (np) {
+ struct platform_device *gangster = of_find_device_by_node(np);
++ of_node_put(np);
++ if (!gangster)
++ return -EPROBE_DEFER;
+
+ dsi->slave = platform_get_drvdata(gangster);
+- of_node_put(np);
+
+ if (!dsi->slave) {
+ put_device(&gangster->dev);
+@@ -1593,44 +1595,58 @@ static int tegra_dsi_probe(struct platform_device *pdev)
+
+ if (!pdev->dev.pm_domain) {
+ dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+- if (IS_ERR(dsi->rst))
+- return PTR_ERR(dsi->rst);
++ if (IS_ERR(dsi->rst)) {
++ err = PTR_ERR(dsi->rst);
++ goto remove;
++ }
+ }
+
+ dsi->clk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(dsi->clk))
+- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
+- "cannot get DSI clock\n");
++ if (IS_ERR(dsi->clk)) {
++ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
++ "cannot get DSI clock\n");
++ goto remove;
++ }
+
+ dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
+- if (IS_ERR(dsi->clk_lp))
+- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
+- "cannot get low-power clock\n");
++ if (IS_ERR(dsi->clk_lp)) {
++ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
++ "cannot get low-power clock\n");
++ goto remove;
++ }
+
+ dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+- if (IS_ERR(dsi->clk_parent))
+- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
+- "cannot get parent clock\n");
++ if (IS_ERR(dsi->clk_parent)) {
++ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
++ "cannot get parent clock\n");
++ goto remove;
++ }
+
+ dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
+- if (IS_ERR(dsi->vdd))
+- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
+- "cannot get VDD supply\n");
++ if (IS_ERR(dsi->vdd)) {
++ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
++ "cannot get VDD supply\n");
++ goto remove;
++ }
+
+ err = tegra_dsi_setup_clocks(dsi);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot setup clocks\n");
+- return err;
++ goto remove;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+- if (IS_ERR(dsi->regs))
+- return PTR_ERR(dsi->regs);
++ if (IS_ERR(dsi->regs)) {
++ err = PTR_ERR(dsi->regs);
++ goto remove;
++ }
+
+ dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
+- if (IS_ERR(dsi->mipi))
+- return PTR_ERR(dsi->mipi);
++ if (IS_ERR(dsi->mipi)) {
++ err = PTR_ERR(dsi->mipi);
++ goto remove;
++ }
+
+ dsi->host.ops = &tegra_dsi_host_ops;
+ dsi->host.dev = &pdev->dev;
+@@ -1658,9 +1674,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
+ return 0;
+
+ unregister:
++ pm_runtime_disable(&pdev->dev);
+ mipi_dsi_host_unregister(&dsi->host);
+ mipi_free:
+ tegra_mipi_free(dsi->mipi);
++remove:
++ tegra_output_remove(&dsi->output);
+ return err;
+ }
+
+diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
+index a719af1dc9a573..46170753699dc0 100644
+--- a/drivers/gpu/drm/tegra/fb.c
++++ b/drivers/gpu/drm/tegra/fb.c
+@@ -159,6 +159,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+
+ if (gem->size < size) {
+ err = -EINVAL;
++ drm_gem_object_put(gem);
+ goto unreference;
+ }
+
+diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
+index a4023163493dca..a825fbbc01af6b 100644
+--- a/drivers/gpu/drm/tegra/gem.c
++++ b/drivers/gpu/drm/tegra/gem.c
+@@ -177,7 +177,7 @@ static void tegra_bo_unpin(struct host1x_bo_mapping *map)
+ static void *tegra_bo_mmap(struct host1x_bo *bo)
+ {
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+- struct iosys_map map;
++ struct iosys_map map = { 0 };
+ int ret;
+
+ if (obj->vaddr) {
+diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
+index 80c760986d9e98..58c2ba94e7dd65 100644
+--- a/drivers/gpu/drm/tegra/hdmi.c
++++ b/drivers/gpu/drm/tegra/hdmi.c
+@@ -1854,12 +1854,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
+ return err;
+
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(hdmi->regs))
+- return PTR_ERR(hdmi->regs);
++ if (IS_ERR(hdmi->regs)) {
++ err = PTR_ERR(hdmi->regs);
++ goto remove;
++ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+- return err;
++ goto remove;
+
+ hdmi->irq = err;
+
+@@ -1868,18 +1870,18 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hdmi->irq, err);
+- return err;
++ goto remove;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+
+ err = devm_pm_runtime_enable(&pdev->dev);
+ if (err)
+- return err;
++ goto remove;
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+- return err;
++ goto remove;
+
+ INIT_LIST_HEAD(&hdmi->client.list);
+ hdmi->client.ops = &hdmi_client_ops;
+@@ -1889,10 +1891,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+- return err;
++ goto remove;
+ }
+
+ return 0;
++
++remove:
++ tegra_output_remove(&hdmi->output);
++ return err;
+ }
+
+ static void tegra_hdmi_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
+index dc2dcb5ca1c894..d7d2389ac2f5ab 100644
+--- a/drivers/gpu/drm/tegra/output.c
++++ b/drivers/gpu/drm/tegra/output.c
+@@ -142,8 +142,10 @@ int tegra_output_probe(struct tegra_output *output)
+ GPIOD_IN,
+ "HDMI hotplug detect");
+ if (IS_ERR(output->hpd_gpio)) {
+- if (PTR_ERR(output->hpd_gpio) != -ENOENT)
+- return PTR_ERR(output->hpd_gpio);
++ if (PTR_ERR(output->hpd_gpio) != -ENOENT) {
++ err = PTR_ERR(output->hpd_gpio);
++ goto put_i2c;
++ }
+
+ output->hpd_gpio = NULL;
+ }
+@@ -152,7 +154,7 @@ int tegra_output_probe(struct tegra_output *output)
+ err = gpiod_to_irq(output->hpd_gpio);
+ if (err < 0) {
+ dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
+- return err;
++ goto put_i2c;
+ }
+
+ output->hpd_irq = err;
+@@ -165,7 +167,7 @@ int tegra_output_probe(struct tegra_output *output)
+ if (err < 0) {
+ dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+ output->hpd_irq, err);
+- return err;
++ goto put_i2c;
+ }
+
+ output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+@@ -179,6 +181,12 @@ int tegra_output_probe(struct tegra_output *output)
+ }
+
+ return 0;
++
++put_i2c:
++ if (output->ddc)
++ i2c_put_adapter(output->ddc);
++
++ return err;
+ }
+
+ void tegra_output_remove(struct tegra_output *output)
+diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
+index 79566c9ea8ff2b..d6424abd3c45d3 100644
+--- a/drivers/gpu/drm/tegra/rgb.c
++++ b/drivers/gpu/drm/tegra/rgb.c
+@@ -215,26 +215,28 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
+ rgb->clk = devm_clk_get(dc->dev, NULL);
+ if (IS_ERR(rgb->clk)) {
+ dev_err(dc->dev, "failed to get clock\n");
+- return PTR_ERR(rgb->clk);
++ err = PTR_ERR(rgb->clk);
++ goto remove;
+ }
+
+ rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+ if (IS_ERR(rgb->clk_parent)) {
+ dev_err(dc->dev, "failed to get parent clock\n");
+- return PTR_ERR(rgb->clk_parent);
++ err = PTR_ERR(rgb->clk_parent);
++ goto remove;
+ }
+
+ err = clk_set_parent(rgb->clk, rgb->clk_parent);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+- return err;
++ goto remove;
+ }
+
+ rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
+ if (IS_ERR(rgb->pll_d_out0)) {
+ err = PTR_ERR(rgb->pll_d_out0);
+ dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
+- return err;
++ goto remove;
+ }
+
+ if (dc->soc->has_pll_d2_out0) {
+@@ -242,13 +244,19 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
+ if (IS_ERR(rgb->pll_d2_out0)) {
+ err = PTR_ERR(rgb->pll_d2_out0);
+ dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
+- return err;
++ goto put_pll;
+ }
+ }
+
+ dc->rgb = &rgb->output;
+
+ return 0;
++
++put_pll:
++ clk_put(rgb->pll_d_out0);
++remove:
++ tegra_output_remove(&rgb->output);
++ return err;
+ }
+
+ void tegra_dc_rgb_remove(struct tegra_dc *dc)
+diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+index 545beea33e8c70..e3c818dfc0e6d2 100644
+--- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+@@ -42,13 +42,13 @@ static const struct drm_dp_mst_calc_pbn_mode_test drm_dp_mst_calc_pbn_mode_cases
+ .clock = 332880,
+ .bpp = 24,
+ .dsc = true,
+- .expected = 50
++ .expected = 1191
+ },
+ {
+ .clock = 324540,
+ .bpp = 24,
+ .dsc = true,
+- .expected = 49
++ .expected = 1161
+ },
+ };
+
+@@ -56,7 +56,7 @@ static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
+ {
+ const struct drm_dp_mst_calc_pbn_mode_test *params = test->param_value;
+
+- KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp, params->dsc),
++ KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp << 4),
+ params->expected);
+ }
+
+diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
+index 5e5e466f35d10c..1baa4ace12e150 100644
+--- a/drivers/gpu/drm/tidss/tidss_crtc.c
++++ b/drivers/gpu/drm/tidss/tidss_crtc.c
+@@ -169,13 +169,13 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct tidss_device *tidss = to_tidss(ddev);
+ unsigned long flags;
+
+- dev_dbg(ddev->dev,
+- "%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
+- crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
+- crtc->state->enable, crtc->state->event);
++ dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n",
++ __func__, crtc->name, crtc->state->active ? "" : "not ",
++ drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
++ crtc->state->event);
+
+ /* There is nothing to do if CRTC is not going to be enabled. */
+- if (!crtc->state->enable)
++ if (!crtc->state->active)
+ return;
+
+ /*
+@@ -269,6 +269,16 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
+
+ reinit_completion(&tcrtc->framedone_completion);
+
++ /*
++ * If a layer is left enabled when the videoport is disabled, and the
++ * vid pipeline that was used for the layer is taken into use on
++ * another videoport, the DSS will report sync lost issues. Disable all
++ * the layers here as a work-around.
++ */
++ for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
++ dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
++ false);
++
+ dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
+
+ if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
+diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
+index 9d9dee7abaefdd..98efbaf3b0c23f 100644
+--- a/drivers/gpu/drm/tidss/tidss_dispc.c
++++ b/drivers/gpu/drm/tidss/tidss_dispc.c
+@@ -2702,18 +2702,69 @@ static void dispc_init_errata(struct dispc_device *dispc)
+ }
+ }
+
+-static void dispc_softreset(struct dispc_device *dispc)
++static int dispc_softreset(struct dispc_device *dispc)
+ {
+ u32 val;
+ int ret = 0;
+
++ /* K2G display controller does not support soft reset */
++ if (dispc->feat->subrev == DISPC_K2G)
++ return 0;
++
+ /* Soft reset */
+ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
+ /* Wait for reset to complete */
+ ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
+ val, val & 1, 100, 5000);
++ if (ret) {
++ dev_err(dispc->dev, "failed to reset dispc\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int dispc_init_hw(struct dispc_device *dispc)
++{
++ struct device *dev = dispc->dev;
++ int ret;
++
++ ret = pm_runtime_set_active(dev);
++ if (ret) {
++ dev_err(dev, "Failed to set DSS PM to active\n");
++ return ret;
++ }
++
++ ret = clk_prepare_enable(dispc->fclk);
++ if (ret) {
++ dev_err(dev, "Failed to enable DSS fclk\n");
++ goto err_runtime_suspend;
++ }
++
++ ret = dispc_softreset(dispc);
+ if (ret)
+- dev_warn(dispc->dev, "failed to reset dispc\n");
++ goto err_clk_disable;
++
++ clk_disable_unprepare(dispc->fclk);
++ ret = pm_runtime_set_suspended(dev);
++ if (ret) {
++ dev_err(dev, "Failed to set DSS PM to suspended\n");
++ return ret;
++ }
++
++ return 0;
++
++err_clk_disable:
++ clk_disable_unprepare(dispc->fclk);
++
++err_runtime_suspend:
++ ret = pm_runtime_set_suspended(dev);
++ if (ret) {
++ dev_err(dev, "Failed to set DSS PM to suspended\n");
++ return ret;
++ }
++
++ return ret;
+ }
+
+ int dispc_init(struct tidss_device *tidss)
+@@ -2777,10 +2828,6 @@ int dispc_init(struct tidss_device *tidss)
+ return r;
+ }
+
+- /* K2G display controller does not support soft reset */
+- if (feat->subrev != DISPC_K2G)
+- dispc_softreset(dispc);
+-
+ for (i = 0; i < dispc->feat->num_vps; i++) {
+ u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
+ u32 *gamma_table;
+@@ -2829,6 +2876,10 @@ int dispc_init(struct tidss_device *tidss)
+ of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
+ &dispc->memory_bandwidth_limit);
+
++ r = dispc_init_hw(dispc);
++ if (r)
++ return r;
++
+ tidss->dispc = dispc;
+
+ return 0;
+diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
+index c979ad1af23660..d096d8d2bc8f84 100644
+--- a/drivers/gpu/drm/tidss/tidss_kms.c
++++ b/drivers/gpu/drm/tidss/tidss_kms.c
+@@ -4,8 +4,6 @@
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+-#include <linux/dma-fence.h>
+-
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_bridge.h>
+@@ -25,7 +23,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+ {
+ struct drm_device *ddev = old_state->dev;
+ struct tidss_device *tidss = to_tidss(ddev);
+- bool fence_cookie = dma_fence_begin_signalling();
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+@@ -36,7 +33,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+ drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+- dma_fence_end_signalling(fence_cookie);
+ drm_atomic_helper_wait_for_flip_done(ddev, old_state);
+
+ drm_atomic_helper_cleanup_planes(ddev, old_state);
+diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
+index e1c0ef0c3894c8..68fed531f6a7f8 100644
+--- a/drivers/gpu/drm/tidss/tidss_plane.c
++++ b/drivers/gpu/drm/tidss/tidss_plane.c
+@@ -213,7 +213,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+
+ drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
+
+- drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
++ drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0,
+ num_planes - 1);
+
+ ret = drm_plane_create_color_properties(&tplane->plane,
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index fe56beea3e93f1..2f6eaac7f659b4 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -138,7 +138,7 @@ static int tilcdc_irq_install(struct drm_device *dev, unsigned int irq)
+ if (ret)
+ return ret;
+
+- priv->irq_enabled = false;
++ priv->irq_enabled = true;
+
+ return 0;
+ }
+@@ -175,6 +175,7 @@ static void tilcdc_fini(struct drm_device *dev)
+ drm_dev_unregister(dev);
+
+ drm_kms_helper_poll_fini(dev);
++ drm_atomic_helper_shutdown(dev);
+ tilcdc_irq_uninstall(dev);
+ drm_mode_config_cleanup(dev);
+
+@@ -389,6 +390,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
+
+ init_failed:
+ tilcdc_fini(ddev);
++ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+ }
+@@ -537,7 +539,8 @@ static void tilcdc_unbind(struct device *dev)
+ if (!ddev->dev_private)
+ return;
+
+- tilcdc_fini(dev_get_drvdata(dev));
++ tilcdc_fini(ddev);
++ dev_set_drvdata(dev, NULL);
+ }
+
+ static const struct component_master_ops tilcdc_comp_ops = {
+@@ -582,6 +585,11 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++static void tilcdc_pdev_shutdown(struct platform_device *pdev)
++{
++ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
++}
++
+ static const struct of_device_id tilcdc_of_match[] = {
+ { .compatible = "ti,am33xx-tilcdc", },
+ { .compatible = "ti,da850-tilcdc", },
+@@ -592,6 +600,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
+ static struct platform_driver tilcdc_platform_driver = {
+ .probe = tilcdc_pdev_probe,
+ .remove = tilcdc_pdev_remove,
++ .shutdown = tilcdc_pdev_shutdown,
+ .driver = {
+ .name = "tilcdc",
+ .pm = pm_sleep_ptr(&tilcdc_pm_ops),
+diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
+index b1b423b68cdf16..19eaff22e6ae04 100644
+--- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c
++++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
+@@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
+
+ if (params->pools_init_expected) {
+ for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+- for (int j = 0; j <= MAX_ORDER; ++j) {
++ for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
+ pt = pool->caching[i].orders[j];
+ KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
+ KUNIT_EXPECT_EQ(test, pt.caching, i);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index e58b7e2498166a..b3e5185835c37e 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -764,7 +764,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+- * -ENOMEM: Could not allocate memory for the buffer object, either due to
++ * -ENOSPC: Could not allocate space for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
+ */
+@@ -824,7 +824,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ goto error;
+ }
+
+- ret = -ENOMEM;
++ ret = -ENOSPC;
+ if (!type_found) {
+ pr_err(TTM_PFX "No compatible memory type found\n");
+ ret = -EINVAL;
+@@ -910,6 +910,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ return -EINVAL;
+
+ ret = ttm_bo_move_buffer(bo, placement, ctx);
++ /* For backward compatibility with userspace */
++ if (ret == -ENOSPC)
++ return -ENOMEM;
+ if (ret)
+ return ret;
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+index fd9fd3d15101c8..0b3f4267130c45 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -294,7 +294,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ enum ttm_caching caching;
+
+ man = ttm_manager_type(bo->bdev, res->mem_type);
+- caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
++ if (man->use_tt) {
++ caching = bo->ttm->caching;
++ if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
++ tmp = pgprot_decrypted(tmp);
++ } else {
++ caching = res->bus.caching;
++ }
+
+ return ttm_prot_from_caching(caching, tmp);
+ }
+@@ -337,6 +343,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ .no_wait_gpu = false
+ };
+ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_resource_manager *man =
++ ttm_manager_type(bo->bdev, bo->resource->mem_type);
+ pgprot_t prot;
+ int ret;
+
+@@ -346,7 +354,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ if (ret)
+ return ret;
+
+- if (num_pages == 1 && ttm->caching == ttm_cached) {
++ if (num_pages == 1 && ttm->caching == ttm_cached &&
++ !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
+ /*
+ * We're mapping a single page, and the desired
+ * page protection is consistent with the bo.
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index cddb9151d20f44..37c08fac7e7d01 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
+
+ static atomic_long_t allocated_pages;
+
+-static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
+-static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
++static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
++static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
+
+-static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
+-static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
++static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
++static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
+
+ static spinlock_t shrinker_lock;
+ static struct list_head shrinker_list;
+@@ -287,17 +287,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
+ enum ttm_caching caching,
+ unsigned int order)
+ {
+- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
++ if (pool->use_dma_alloc)
+ return &pool->caching[caching].orders[order];
+
+ #ifdef CONFIG_X86
+ switch (caching) {
+ case ttm_write_combined:
++ if (pool->nid != NUMA_NO_NODE)
++ return &pool->caching[caching].orders[order];
++
+ if (pool->use_dma32)
+ return &global_dma32_write_combined[order];
+
+ return &global_write_combined[order];
+ case ttm_uncached:
++ if (pool->nid != NUMA_NO_NODE)
++ return &pool->caching[caching].orders[order];
++
+ if (pool->use_dma32)
+ return &global_dma32_uncached[order];
+
+@@ -384,7 +390,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
+ enum ttm_caching caching,
+ pgoff_t start_page, pgoff_t end_page)
+ {
+- struct page **pages = tt->pages;
++ struct page **pages = &tt->pages[start_page];
+ unsigned int order;
+ pgoff_t i, nr;
+
+@@ -563,11 +569,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ pool->use_dma_alloc = use_dma_alloc;
+ pool->use_dma32 = use_dma32;
+
+- if (use_dma_alloc || nid != NUMA_NO_NODE) {
+- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+- for (j = 0; j <= MAX_ORDER; ++j)
+- ttm_pool_type_init(&pool->caching[i].orders[j],
+- pool, i, j);
++ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
++ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
++ struct ttm_pool_type *pt;
++
++ /* Initialize only pool types which are actually used */
++ pt = ttm_pool_select_type(pool, i, j);
++ if (pt != &pool->caching[i].orders[j])
++ continue;
++
++ ttm_pool_type_init(pt, pool, i, j);
++ }
+ }
+ }
+ EXPORT_SYMBOL(ttm_pool_init);
+@@ -584,10 +596,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
+ {
+ unsigned int i, j;
+
+- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
+- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+- for (j = 0; j <= MAX_ORDER; ++j)
+- ttm_pool_type_fini(&pool->caching[i].orders[j]);
++ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
++ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
++ struct ttm_pool_type *pt;
++
++ pt = ttm_pool_select_type(pool, i, j);
++ if (pt != &pool->caching[i].orders[j])
++ continue;
++
++ ttm_pool_type_fini(pt);
++ }
+ }
+
+ /* We removed the pool types from the LRU, but we need to also make sure
+@@ -641,7 +659,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
+ unsigned int i;
+
+ seq_puts(m, "\t ");
+- for (i = 0; i <= MAX_ORDER; ++i)
++ for (i = 0; i < NR_PAGE_ORDERS; ++i)
+ seq_printf(m, " ---%2u---", i);
+ seq_puts(m, "\n");
+ }
+@@ -652,7 +670,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
+ {
+ unsigned int i;
+
+- for (i = 0; i <= MAX_ORDER; ++i)
++ for (i = 0; i < NR_PAGE_ORDERS; ++i)
+ seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
+ seq_puts(m, "\n");
+ }
+@@ -761,7 +779,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
+ spin_lock_init(&shrinker_lock);
+ INIT_LIST_HEAD(&shrinker_list);
+
+- for (i = 0; i <= MAX_ORDER; ++i) {
++ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
+ ttm_pool_type_init(&global_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+@@ -794,7 +812,7 @@ void ttm_pool_mgr_fini(void)
+ {
+ unsigned int i;
+
+- for (i = 0; i <= MAX_ORDER; ++i) {
++ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
+ ttm_pool_type_fini(&global_write_combined[i]);
+ ttm_pool_type_fini(&global_uncached[i]);
+
+diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
+index e0a77671edd6c1..bf9601351fa354 100644
+--- a/drivers/gpu/drm/ttm/ttm_tt.c
++++ b/drivers/gpu/drm/ttm/ttm_tt.c
+@@ -31,11 +31,14 @@
+
+ #define pr_fmt(fmt) "[TTM] " fmt
+
++#include <linux/cc_platform.h>
+ #include <linux/sched.h>
+ #include <linux/shmem_fs.h>
+ #include <linux/file.h>
+ #include <linux/module.h>
+ #include <drm/drm_cache.h>
++#include <drm/drm_device.h>
++#include <drm/drm_util.h>
+ #include <drm/ttm/ttm_bo.h>
+ #include <drm/ttm/ttm_tt.h>
+
+@@ -60,6 +63,7 @@ static atomic_long_t ttm_dma32_pages_allocated;
+ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
+ {
+ struct ttm_device *bdev = bo->bdev;
++ struct drm_device *ddev = bo->base.dev;
+ uint32_t page_flags = 0;
+
+ dma_resv_assert_held(bo->base.resv);
+@@ -81,6 +85,15 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
+ pr_err("Illegal buffer object type\n");
+ return -EINVAL;
+ }
++ /*
++ * When using dma_alloc_coherent with memory encryption the
++ * mapped TT pages need to be decrypted or otherwise the drivers
++ * will end up sending encrypted mem to the gpu.
++ */
++ if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
++ page_flags |= TTM_TT_FLAG_DECRYPTED;
++ drm_info_once(ddev, "TT memory decryption enabled.");
++ }
+
+ bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
+ if (unlikely(bo->ttm == NULL))
+diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
+index 0bb56d06353666..acce210e255470 100644
+--- a/drivers/gpu/drm/tve200/tve200_drv.c
++++ b/drivers/gpu/drm/tve200/tve200_drv.c
+@@ -242,6 +242,7 @@ static void tve200_remove(struct platform_device *pdev)
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
++ drm_atomic_helper_shutdown(drm);
+ if (priv->panel)
+ drm_panel_bridge_remove(priv->bridge);
+ drm_mode_config_cleanup(drm);
+@@ -249,6 +250,11 @@ static void tve200_remove(struct platform_device *pdev)
+ drm_dev_put(drm);
+ }
+
++static void tve200_shutdown(struct platform_device *pdev)
++{
++ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
++}
++
+ static const struct of_device_id tve200_of_match[] = {
+ {
+ .compatible = "faraday,tve200",
+@@ -263,6 +269,7 @@ static struct platform_driver tve200_driver = {
+ },
+ .probe = tve200_probe,
+ .remove_new = tve200_remove,
++ .shutdown = tve200_shutdown,
+ };
+ drm_module_platform_driver(tve200_driver);
+
+diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
+index 40876bcdd79a47..5a1539914ce89d 100644
+--- a/drivers/gpu/drm/udl/udl_modeset.c
++++ b/drivers/gpu/drm/udl/udl_modeset.c
+@@ -512,8 +512,7 @@ struct drm_connector *udl_connector_init(struct drm_device *dev)
+
+ drm_connector_helper_add(connector, &udl_connector_helper_funcs);
+
+- connector->polled = DRM_CONNECTOR_POLL_HPD |
+- DRM_CONNECTOR_POLL_CONNECT |
++ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return connector;
+diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
+index e1be7368b87dfc..73b9c92dc0fc5b 100644
+--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
++++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
+@@ -103,6 +103,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
+ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
+ {
+ struct v3d_perfmon *perfmon = elem;
++ struct v3d_dev *v3d = (struct v3d_dev *)data;
++
++ /* If the active perfmon is being destroyed, stop it first */
++ if (perfmon == v3d->active_perfmon)
++ v3d_perfmon_stop(v3d, perfmon, false);
+
+ v3d_perfmon_put(perfmon);
+
+@@ -111,8 +116,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
+
+ void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv)
+ {
++ struct v3d_dev *v3d = v3d_priv->v3d;
++
+ mutex_lock(&v3d_priv->perfmon.lock);
+- idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL);
++ idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d);
+ idr_destroy(&v3d_priv->perfmon.idr);
+ mutex_unlock(&v3d_priv->perfmon.lock);
+ mutex_destroy(&v3d_priv->perfmon.lock);
+diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+index 4fee15c97c3410..cd9e66a06596a7 100644
+--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
++++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+@@ -12,6 +12,7 @@
+ #include <linux/vt_kern.h>
+
+ #include <drm/drm_aperture.h>
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
+ #include <drm/drm_file.h>
+@@ -97,11 +98,19 @@ static void vbox_pci_remove(struct pci_dev *pdev)
+ struct vbox_private *vbox = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(&vbox->ddev);
++ drm_atomic_helper_shutdown(&vbox->ddev);
+ vbox_irq_fini(vbox);
+ vbox_mode_fini(vbox);
+ vbox_hw_fini(vbox);
+ }
+
++static void vbox_pci_shutdown(struct pci_dev *pdev)
++{
++ struct vbox_private *vbox = pci_get_drvdata(pdev);
++
++ drm_atomic_helper_shutdown(&vbox->ddev);
++}
++
+ static int vbox_pm_suspend(struct device *dev)
+ {
+ struct vbox_private *vbox = dev_get_drvdata(dev);
+@@ -165,6 +174,7 @@ static struct pci_driver vbox_pci_driver = {
+ .id_table = pciidlist,
+ .probe = vbox_pci_probe,
+ .remove = vbox_pci_remove,
++ .shutdown = vbox_pci_shutdown,
+ .driver.pm = pm_sleep_ptr(&vbox_pm_ops),
+ };
+
+@@ -172,7 +182,7 @@ DEFINE_DRM_GEM_FOPS(vbox_fops);
+
+ static const struct drm_driver driver = {
+ .driver_features =
+- DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
++ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
+
+ .fops = &vbox_fops,
+ .name = DRIVER_NAME,
+diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
+index 5d12d7beef0eb3..ade3309ae042f1 100644
+--- a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
++++ b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
+@@ -26,7 +26,7 @@ struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
+ struct vc4_crtc *vc4_crtc;
+ int ret;
+
+- dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL);
++ dummy_crtc = drmm_kzalloc(drm, sizeof(*dummy_crtc), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dummy_crtc);
+
+ vc4_crtc = &dummy_crtc->crtc;
+diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+index 6e11fcc9ef45e0..e70d7c3076acf1 100644
+--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
++++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+@@ -32,7 +32,7 @@ struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
+ struct drm_encoder *enc;
+ int ret;
+
+- dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL);
++ dummy_output = drmm_kzalloc(drm, sizeof(*dummy_output), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
+ dummy_output->encoder.type = vc4_encoder_type;
+
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 25c9c71256d355..c6e986f71a26f8 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -458,6 +458,7 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
+ {
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ enum drm_connector_status status = connector_status_disconnected;
++ int ret;
+
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but
+@@ -470,7 +471,12 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
+ * the lock for now.
+ */
+
+- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
++ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
++ if (ret) {
++ drm_err_once(connector->dev, "Failed to retain HDMI power domain: %d\n",
++ ret);
++ return connector_status_unknown;
++ }
+
+ if (vc4_hdmi->hpd_gpio) {
+ if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
+@@ -508,7 +514,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
+ edid = drm_get_edid(connector, vc4_hdmi->ddc);
+ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ if (!edid)
+- return -ENODEV;
++ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+@@ -2729,6 +2735,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
+ index = 1;
+
+ addr = of_get_address(dev->of_node, index, NULL, NULL);
++ if (!addr)
++ return -EINVAL;
+
+ vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
+ vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
+index c4ac2c94623815..c00a5cc2316d20 100644
+--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
++++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
+@@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
+ static int vc4_perfmon_idr_del(int id, void *elem, void *data)
+ {
+ struct vc4_perfmon *perfmon = elem;
++ struct vc4_dev *vc4 = (struct vc4_dev *)data;
++
++ /* If the active perfmon is being destroyed, stop it first */
++ if (perfmon == vc4->active_perfmon)
++ vc4_perfmon_stop(vc4, perfmon, false);
+
+ vc4_perfmon_put(perfmon);
+
+@@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
+ return;
+
+ mutex_lock(&vc4file->perfmon.lock);
+- idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
++ idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
+ idr_destroy(&vc4file->perfmon.idr);
+ mutex_unlock(&vc4file->perfmon.lock);
+ mutex_destroy(&vc4file->perfmon.lock);
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index 00e713faecd5ac..5948e34f7f813b 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -1505,9 +1505,6 @@ static int vc4_prepare_fb(struct drm_plane *plane,
+
+ drm_gem_plane_helper_prepare_fb(plane, state);
+
+- if (plane->state->fb == state->fb)
+- return 0;
+-
+ return vc4_bo_inc_usecnt(bo);
+ }
+
+@@ -1516,7 +1513,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
+ {
+ struct vc4_bo *bo;
+
+- if (plane->state->fb == state->fb || !state->fb)
++ if (!state->fb)
+ return;
+
+ bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
+diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
+index 644b8ee51009bf..c5716fd0aed380 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
+@@ -94,6 +94,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
+ goto err_free;
+ }
+
++ dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
+ ret = virtio_gpu_init(vdev, dev);
+ if (ret)
+ goto err_free;
+@@ -177,7 +178,7 @@ static const struct drm_driver driver = {
+ * out via drm_device::driver_features:
+ */
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
+- DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
++ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT,
+ .open = virtio_gpu_driver_open,
+ .postclose = virtio_gpu_driver_postclose,
+
+diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
+index a2e045f3a0004a..a1ef657eba0774 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
++++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
+@@ -79,6 +79,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
+ {
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
++ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
++ plane);
+ bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+@@ -86,6 +88,14 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
+ if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
+ return 0;
+
++ /*
++ * Ignore damage clips if the framebuffer attached to the plane's state
++ * has changed since the last plane update (page-flip). In this case, a
++ * full plane update should happen because uploads are done per-buffer.
++ */
++ if (old_plane_state->fb != new_plane_state->fb)
++ new_plane_state->ignore_damage_clips = true;
++
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
+index 5c514946bbad97..d530c058f53e2c 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
++++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
+@@ -48,7 +48,7 @@ struct virtio_gpu_submit {
+ static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
+ struct dma_fence *in_fence)
+ {
+- u32 context = submit->fence_ctx + submit->ring_idx;
++ u64 context = submit->fence_ctx + submit->ring_idx;
+
+ if (dma_fence_match_context(in_fence, context))
+ return 0;
+diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
+index 3c99fb8b54e2df..e7441b227b3cea 100644
+--- a/drivers/gpu/drm/vkms/vkms_composer.c
++++ b/drivers/gpu/drm/vkms/vkms_composer.c
+@@ -123,6 +123,8 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
+ enum lut_channel channel)
+ {
+ s64 lut_index = get_lut_index(lut, channel_value);
++ u16 *floor_lut_value, *ceil_lut_value;
++ u16 floor_channel_value, ceil_channel_value;
+
+ /*
+ * This checks if `struct drm_color_lut` has any gap added by the compiler
+@@ -130,11 +132,15 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
+ */
+ static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
+
+- u16 *floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
+- u16 *ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
++ floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
++ if (drm_fixp2int(lut_index) == (lut->lut_length - 1))
++ /* We're at the end of the LUT array, use same value for ceil and floor */
++ ceil_lut_value = floor_lut_value;
++ else
++ ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
+
+- u16 floor_channel_value = floor_lut_value[channel];
+- u16 ceil_channel_value = ceil_lut_value[channel];
++ floor_channel_value = floor_lut_value[channel];
++ ceil_channel_value = ceil_lut_value[channel];
+
+ return lerp_u16(floor_channel_value, ceil_channel_value,
+ lut_index & DRM_FIXED_DECIMAL_MASK);
+diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
+index faddae3d6ac2e0..6f1ac940cbae70 100644
+--- a/drivers/gpu/drm/vmwgfx/Kconfig
++++ b/drivers/gpu/drm/vmwgfx/Kconfig
+@@ -2,7 +2,7 @@
+ config DRM_VMWGFX
+ tristate "DRM driver for VMware Virtual GPU"
+ depends on DRM && PCI && MMU
+- depends on X86 || ARM64
++ depends on (X86 && HYPERVISOR_GUEST) || ARM64
+ select DRM_TTM
+ select DRM_TTM_HELPER
+ select MAPPING_DIRTY_HELPERS
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+index c52c7bf1485b1f..890a66a2361f4e 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+@@ -27,6 +27,8 @@
+ **************************************************************************/
+
+ #include "vmwgfx_drv.h"
++
++#include "vmwgfx_bo.h"
+ #include <linux/highmem.h>
+
+ /*
+@@ -420,13 +422,105 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
+ return 0;
+ }
+
++static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
++{
++ struct vmw_private *vmw =
++ container_of(bo->tbo.bdev, struct vmw_private, bdev);
++ void *ptr = NULL;
++ int ret;
++
++ if (bo->tbo.base.import_attach) {
++ ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
++ if (ret) {
++ drm_dbg_driver(&vmw->drm,
++ "Wasn't able to map external bo!\n");
++ goto out;
++ }
++ ptr = map->vaddr;
++ } else {
++ ptr = vmw_bo_map_and_cache(bo);
++ }
++
++out:
++ return ptr;
++}
++
++static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
++{
++ if (bo->tbo.base.import_attach)
++ dma_buf_vunmap(bo->tbo.base.dma_buf, map);
++ else
++ vmw_bo_unmap(bo);
++}
++
++static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
++ u32 dst_stride, struct vmw_bo *src,
++ u32 src_offset, u32 src_stride,
++ u32 width_in_bytes, u32 height,
++ struct vmw_diff_cpy *diff)
++{
++ struct vmw_private *vmw =
++ container_of(dst->tbo.bdev, struct vmw_private, bdev);
++ size_t dst_size = dst->tbo.resource->size;
++ size_t src_size = src->tbo.resource->size;
++ struct iosys_map dst_map = {0};
++ struct iosys_map src_map = {0};
++ int ret, i;
++ int x_in_bytes;
++ u8 *vsrc;
++ u8 *vdst;
++
++ vsrc = map_external(src, &src_map);
++ if (!vsrc) {
++ drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ vdst = map_external(dst, &dst_map);
++ if (!vdst) {
++ drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ vsrc += src_offset;
++ vdst += dst_offset;
++ if (src_stride == dst_stride) {
++ dst_size -= dst_offset;
++ src_size -= src_offset;
++ memcpy(vdst, vsrc,
++ min(dst_stride * height, min(dst_size, src_size)));
++ } else {
++ WARN_ON(dst_stride < width_in_bytes);
++ for (i = 0; i < height; ++i) {
++ memcpy(vdst, vsrc, width_in_bytes);
++ vsrc += src_stride;
++ vdst += dst_stride;
++ }
++ }
++
++ x_in_bytes = (dst_offset % dst_stride);
++ diff->rect.x1 = x_in_bytes / diff->cpp;
++ diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
++ diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
++ diff->rect.y2 = diff->rect.y1 + height;
++
++ ret = 0;
++out:
++ unmap_external(src, &src_map);
++ unmap_external(dst, &dst_map);
++
++ return ret;
++}
++
+ /**
+ * vmw_bo_cpu_blit - in-kernel cpu blit.
+ *
+- * @dst: Destination buffer object.
++ * @vmw_dst: Destination buffer object.
+ * @dst_offset: Destination offset of blit start in bytes.
+ * @dst_stride: Destination stride in bytes.
+- * @src: Source buffer object.
++ * @vmw_src: Source buffer object.
+ * @src_offset: Source offset of blit start in bytes.
+ * @src_stride: Source stride in bytes.
+ * @w: Width of blit.
+@@ -444,20 +538,29 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
+ * Neither of the buffer objects may be placed in PCI memory
+ * (Fixed memory in TTM terminology) when using this function.
+ */
+-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
++int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
+ u32 dst_offset, u32 dst_stride,
+- struct ttm_buffer_object *src,
++ struct vmw_bo *vmw_src,
+ u32 src_offset, u32 src_stride,
+ u32 w, u32 h,
+ struct vmw_diff_cpy *diff)
+ {
++ struct ttm_buffer_object *src = &vmw_src->tbo;
++ struct ttm_buffer_object *dst = &vmw_dst->tbo;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ u32 j, initial_line = dst_offset / dst_stride;
+- struct vmw_bo_blit_line_data d;
++ struct vmw_bo_blit_line_data d = {0};
+ int ret = 0;
++ struct page **dst_pages = NULL;
++ struct page **src_pages = NULL;
++ bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
++ bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
++
++ if (WARN_ON(dst == src))
++ return -EINVAL;
+
+ /* Buffer objects need to be either pinned or reserved: */
+ if (!(dst->pin_count))
+@@ -477,12 +580,40 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+ return ret;
+ }
+
++ if (src_external || dst_external)
++ return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
++ vmw_src, src_offset, src_stride,
++ w, h, diff);
++
++ if (!src->ttm->pages && src->ttm->sg) {
++ src_pages = kvmalloc_array(src->ttm->num_pages,
++ sizeof(struct page *), GFP_KERNEL);
++ if (!src_pages)
++ return -ENOMEM;
++ ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
++ src->ttm->num_pages);
++ if (ret)
++ goto out;
++ }
++ if (!dst->ttm->pages && dst->ttm->sg) {
++ dst_pages = kvmalloc_array(dst->ttm->num_pages,
++ sizeof(struct page *), GFP_KERNEL);
++ if (!dst_pages) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
++ dst->ttm->num_pages);
++ if (ret)
++ goto out;
++ }
++
+ d.mapped_dst = 0;
+ d.mapped_src = 0;
+ d.dst_addr = NULL;
+ d.src_addr = NULL;
+- d.dst_pages = dst->ttm->pages;
+- d.src_pages = src->ttm->pages;
++ d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
++ d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
+ d.dst_num_pages = PFN_UP(dst->resource->size);
+ d.src_num_pages = PFN_UP(src->resource->size);
+ d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
+@@ -504,6 +635,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+ kunmap_atomic(d.src_addr);
+ if (d.dst_addr)
+ kunmap_atomic(d.dst_addr);
++ if (src_pages)
++ kvfree(src_pages);
++ if (dst_pages)
++ kvfree(dst_pages);
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index 2bfac3aad7b7d6..fdc34283eeb97f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -204,6 +204,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
+ VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_VRAM);
+ buf->places[0].lpfn = PFN_UP(bo->resource->size);
++ buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
+
+ /* For some reason we didn't end up at the start of vram */
+@@ -330,6 +331,8 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
+ void *virtual;
+ int ret;
+
++ atomic_inc(&vbo->map_count);
++
+ virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+ if (virtual)
+ return virtual;
+@@ -352,11 +355,17 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
+ */
+ void vmw_bo_unmap(struct vmw_bo *vbo)
+ {
++ int map_count;
++
+ if (vbo->map.bo == NULL)
+ return;
+
+- ttm_bo_kunmap(&vbo->map);
+- vbo->map.bo = NULL;
++ map_count = atomic_dec_return(&vbo->map_count);
++
++ if (!map_count) {
++ ttm_bo_kunmap(&vbo->map);
++ vbo->map.bo = NULL;
++ }
+ }
+
+
+@@ -377,7 +386,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
+ {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = params->bo_type != ttm_bo_type_kernel,
+- .no_wait_gpu = false
++ .no_wait_gpu = false,
++ .resv = params->resv,
+ };
+ struct ttm_device *bdev = &dev_priv->bdev;
+ struct drm_device *vdev = &dev_priv->drm;
+@@ -388,14 +398,15 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
+ BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+ vmw_bo->tbo.priority = 3;
+ vmw_bo->res_tree = RB_ROOT;
++ atomic_set(&vmw_bo->map_count, 0);
+
+ params->size = ALIGN(params->size, PAGE_SIZE);
+ drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
+
+ vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
+ ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
+- &vmw_bo->placement, 0, &ctx, NULL,
+- NULL, destroy);
++ &vmw_bo->placement, 0, &ctx,
++ params->sg, params->resv, destroy);
+ if (unlikely(ret))
+ return ret;
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+index 0d496dc9c6af7a..156ea612fc2a48 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+@@ -55,6 +55,8 @@ struct vmw_bo_params {
+ enum ttm_bo_type bo_type;
+ size_t size;
+ bool pin;
++ struct dma_resv *resv;
++ struct sg_table *sg;
+ };
+
+ /**
+@@ -66,6 +68,8 @@ struct vmw_bo_params {
+ * @map: Kmap object for semi-persistent mappings
+ * @res_tree: RB tree of resources using this buffer object as a backing MOB
+ * @res_prios: Eviction priority counts for attached resources
++ * @map_count: The number of currently active maps. Will differ from the
++ * cpu_writers because it includes kernel maps.
+ * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+ * increased. May be decreased without reservation.
+ * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
+@@ -84,6 +88,7 @@ struct vmw_bo {
+ struct rb_root res_tree;
+ u32 res_prios[TTM_MAX_BO_PRIORITY];
+
++ atomic_t map_count;
+ atomic_t cpu_writers;
+ /* Not ref-counted. Protected by binding_mutex */
+ struct vmw_resource *dx_query_ctx;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 8b24ecf60e3ec5..bea576434e475c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
+ [vmw_dma_map_populate] = "Caching DMA mappings.",
+ [vmw_dma_map_bind] = "Giving up DMA mappings early."};
+
+- /* TTM currently doesn't fully support SEV encryption. */
+- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+- return -EINVAL;
+-
+- if (vmw_force_coherent)
++ /*
++ * When running with SEV we always want dma mappings, because
++ * otherwise ttm tt pool pages will bounce through swiotlb running
++ * out of available space.
++ */
++ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+ dev_priv->map_mode = vmw_dma_alloc_coherent;
+ else if (vmw_restrict_iommu)
+ dev_priv->map_mode = vmw_dma_map_bind;
+@@ -955,13 +956,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ vmw_read(dev_priv,
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+
+- /*
+- * Workaround for low memory 2D VMs to compensate for the
+- * allocation taken by fbdev
+- */
+- if (!(dev_priv->capabilities & SVGA_CAP_3D))
+- mem_size *= 3;
+-
+ dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+ dev_priv->max_primary_mem =
+ vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
+@@ -1444,12 +1438,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
+ root, "system_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
+ root, "vram_ttm");
+- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
+- root, "gmr_ttm");
+- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
+- root, "mob_ttm");
+- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
+- root, "system_mob_ttm");
++ if (vmw->has_gmr)
++ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
++ root, "gmr_ttm");
++ if (vmw->has_mob) {
++ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
++ root, "mob_ttm");
++ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
++ root, "system_mob_ttm");
++ }
+ }
+
+ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+@@ -1611,7 +1608,7 @@ static const struct file_operations vmwgfx_driver_fops = {
+
+ static const struct drm_driver driver = {
+ .driver_features =
+- DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
++ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT,
+ .ioctls = vmw_ioctls,
+ .num_ioctls = ARRAY_SIZE(vmw_ioctls),
+ .master_set = vmw_master_set,
+@@ -1624,6 +1621,7 @@ static const struct drm_driver driver = {
+
+ .prime_fd_to_handle = vmw_prime_fd_to_handle,
+ .prime_handle_to_fd = vmw_prime_handle_to_fd,
++ .gem_prime_import_sg_table = vmw_prime_import_sg_table,
+
+ .fops = &vmwgfx_driver_fops,
+ .name = VMWGFX_DRIVER_NAME,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 3cd5090dedfc5b..ac3d7ff3f5bb9f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1067,9 +1067,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ unsigned width, unsigned height, unsigned pitch,
+ unsigned bpp, unsigned depth);
+-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+- uint32_t pitch,
+- uint32_t height);
+ int vmw_kms_present(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+@@ -1131,6 +1128,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
++struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
++ struct dma_buf_attachment *attach,
++ struct sg_table *table);
+
+ /*
+ * MemoryOBject management - vmwgfx_mob.c
+@@ -1355,9 +1355,9 @@ void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+
+ void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
+
+-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
++int vmw_bo_cpu_blit(struct vmw_bo *dst,
+ u32 dst_offset, u32 dst_stride,
+- struct ttm_buffer_object *src,
++ struct vmw_bo *src,
+ u32 src_offset, u32 src_stride,
+ u32 w, u32 h,
+ struct vmw_diff_cpy *diff);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 36987ef3fc3006..5fef0b31c11798 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ vmw_res_type(ctx) == vmw_res_dx_context) {
+ for (i = 0; i < cotable_max; ++i) {
+ res = vmw_context_cotable(ctx, i);
+- if (IS_ERR(res))
++ if (IS_ERR_OR_NULL(res))
+ continue;
+
+ ret = vmw_execbuf_res_val_add(sw_context, res,
+@@ -1266,6 +1266,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+ return -EINVAL;
+
+ cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
++ if (IS_ERR_OR_NULL(cotable_res))
++ return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
+ ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
+
+ return ret;
+@@ -2484,6 +2486,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+ return ret;
+
+ res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
++ if (IS_ERR_OR_NULL(res))
++ return res ? PTR_ERR(res) : -EINVAL;
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+ if (unlikely(ret != 0))
+ return ret;
+@@ -2569,8 +2573,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+
+ so_type = vmw_so_cmd_to_type(header->id);
+ res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
+- if (IS_ERR(res))
+- return PTR_ERR(res);
++ if (IS_ERR_OR_NULL(res))
++ return res ? PTR_ERR(res) : -EINVAL;
+ cmd = container_of(header, typeof(*cmd), header);
+ ret = vmw_cotable_notify(res, cmd->defined_id);
+
+@@ -2689,6 +2693,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+ return -EINVAL;
+
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
++ if (IS_ERR_OR_NULL(res))
++ return res ? PTR_ERR(res) : -EINVAL;
+ ret = vmw_cotable_notify(res, cmd->body.shaderId);
+ if (ret)
+ return ret;
+@@ -3010,6 +3016,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
+ }
+
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
++ if (IS_ERR_OR_NULL(res))
++ return res ? PTR_ERR(res) : -EINVAL;
+ ret = vmw_cotable_notify(res, cmd->body.soid);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index 2a0cda32470314..588d50ababf604 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -32,7 +32,6 @@
+ #define VMW_FENCE_WRAP (1 << 31)
+
+ struct vmw_fence_manager {
+- int num_fence_objects;
+ struct vmw_private *dev_priv;
+ spinlock_t lock;
+ struct list_head fence_list;
+@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
+ {
+ struct vmw_fence_obj *fence =
+ container_of(f, struct vmw_fence_obj, base);
+-
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
+
+- spin_lock(&fman->lock);
+- list_del_init(&fence->head);
+- --fman->num_fence_objects;
+- spin_unlock(&fman->lock);
++ if (!list_empty(&fence->head)) {
++ spin_lock(&fman->lock);
++ list_del_init(&fence->head);
++ spin_unlock(&fman->lock);
++ }
+ fence->destroy(fence);
+ }
+
+@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = {
+ .release = vmw_fence_obj_destroy,
+ };
+
+-
+ /*
+ * Execute signal actions on fences recently signaled.
+ * This is done from a workqueue so we don't have to execute
+@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
+ goto out_unlock;
+ }
+ list_add_tail(&fence->head, &fman->fence_list);
+- ++fman->num_fence_objects;
+
+ out_unlock:
+ spin_unlock(&fman->lock);
+@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
+ u32 passed_seqno)
+ {
+ u32 goal_seqno;
+- struct vmw_fence_obj *fence;
++ struct vmw_fence_obj *fence, *next_fence;
+
+ if (likely(!fman->seqno_valid))
+ return false;
+@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
+ return false;
+
+ fman->seqno_valid = false;
+- list_for_each_entry(fence, &fman->fence_list, head) {
++ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
+ if (!list_empty(&fence->seq_passed_actions)) {
+ fman->seqno_valid = true;
+ vmw_fence_goal_write(fman->dev_priv,
+@@ -991,7 +988,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
+ }
+
+ event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+- event->event.base.length = sizeof(*event);
++ event->event.base.length = sizeof(event->event);
+ event->event.user_data = user_data;
+
+ ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+index 8b1eb0061610c7..d6bcaf078b1f40 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+@@ -149,6 +149,38 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ return ret;
+ }
+
++struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
++ struct dma_buf_attachment *attach,
++ struct sg_table *table)
++{
++ int ret;
++ struct vmw_private *dev_priv = vmw_priv(dev);
++ struct drm_gem_object *gem = NULL;
++ struct vmw_bo *vbo;
++ struct vmw_bo_params params = {
++ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
++ .busy_domain = VMW_BO_DOMAIN_SYS,
++ .bo_type = ttm_bo_type_sg,
++ .size = attach->dmabuf->size,
++ .pin = false,
++ .resv = attach->dmabuf->resv,
++ .sg = table,
++
++ };
++
++ dma_resv_lock(params.resv, NULL);
++
++ ret = vmw_bo_create(dev_priv, &params, &vbo);
++ if (ret != 0)
++ goto out_no_bo;
++
++ vbo->tbo.base.funcs = &vmw_gem_object_funcs;
++
++ gem = &vbo->tbo.base;
++out_no_bo:
++ dma_resv_unlock(params.resv);
++ return gem;
++}
+
+ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+@@ -244,6 +276,7 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct task_struct *task;
+ struct drm_gem_object *gobj;
++ struct pid *pid;
+ int id;
+
+ /*
+@@ -253,8 +286,9 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+- task = pid_task(file->pid, PIDTYPE_TGID);
+- seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
++ pid = rcu_dereference(file->pid);
++ task = pid_task(pid, PIDTYPE_TGID);
++ seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+index ceb4d3d3b965aa..a0b47c9b33f552 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+@@ -64,8 +64,11 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
+ ttm_resource_init(bo, place, *res);
+
+ id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
+- if (id < 0)
++ if (id < 0) {
++ ttm_resource_fini(man, *res);
++ kfree(*res);
+ return id;
++ }
+
+ spin_lock(&gman->lock);
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 818b7f109f5380..11f7c0e5420e04 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -35,6 +35,7 @@
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_rect.h>
+ #include <drm/drm_sysfs.h>
++#include <drm/drm_edid.h>
+
+ void vmw_du_cleanup(struct vmw_display_unit *du)
+ {
+@@ -184,13 +185,12 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
+ */
+ static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
+ {
+- bool is_iomem;
+ if (vps->surf) {
+ if (vps->surf_mapped)
+ return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
+ return vps->surf->snooper.image;
+ } else if (vps->bo)
+- return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
++ return vmw_bo_map_and_cache(vps->bo);
+ return NULL;
+ }
+
+@@ -216,7 +216,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
+ new_image = vmw_du_cursor_plane_acquire_image(new_vps);
+
+ changed = false;
+- if (old_image && new_image)
++ if (old_image && new_image && old_image != new_image)
+ changed = memcmp(old_image, new_image, size) != 0;
+
+ return changed;
+@@ -272,6 +272,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
+ u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
++ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+@@ -313,7 +314,15 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
+ if (ret != 0)
+ goto teardown;
+
+- vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
++ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
++ if (ret != 0) {
++ ttm_bo_unreserve(&vps->cursor.bo->tbo);
++ goto teardown;
++ }
++
++ dma_fence_wait(&fence->base, false);
++ dma_fence_put(&fence->base);
++
+ ttm_bo_unreserve(&vps->cursor.bo->tbo);
+ return 0;
+
+@@ -643,22 +652,12 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ {
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+- bool is_iomem;
+
+ if (vps->surf_mapped) {
+ vmw_bo_unmap(vps->surf->res.guest_memory_bo);
+ vps->surf_mapped = false;
+ }
+
+- if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
+- const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
+-
+- if (likely(ret == 0)) {
+- ttm_bo_kunmap(&vps->bo->map);
+- ttm_bo_unreserve(&vps->bo->tbo);
+- }
+- }
+-
+ vmw_du_cursor_plane_unmap_cm(vps);
+ vmw_du_put_cursor_mob(vcp, vps);
+
+@@ -694,6 +693,10 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
+ int ret = 0;
+
+ if (vps->surf) {
++ if (vps->surf_mapped) {
++ vmw_bo_unmap(vps->surf->res.guest_memory_bo);
++ vps->surf_mapped = false;
++ }
+ vmw_surface_unreference(&vps->surf);
+ vps->surf = NULL;
+ }
+@@ -924,6 +927,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
+ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+ {
++ struct vmw_private *vmw = vmw_priv(crtc->dev);
+ struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
+@@ -931,9 +935,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
+ bool has_primary = new_state->plane_mask &
+ drm_plane_mask(crtc->primary);
+
+- /* We always want to have an active plane with an active CRTC */
+- if (has_primary != new_state->enable)
+- return -EINVAL;
++ /*
++ * This is fine in general, but broken userspace might expect
++ * some actual rendering so give a clue as why it's blank.
++ */
++ if (new_state->enable && !has_primary)
++ drm_dbg_driver(&vmw->drm,
++ "CRTC without a primary plane will be blank.\n");
+
+
+ if (new_state->connector_mask != connector_mask &&
+@@ -1651,6 +1659,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ DRM_ERROR("Surface size cannot exceed %dx%d\n",
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
++ ret = -EINVAL;
+ goto err_out;
+ }
+
+@@ -2143,13 +2152,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ return 0;
+ }
+
++static
+ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+- uint32_t pitch,
+- uint32_t height)
++ u64 pitch,
++ u64 height)
+ {
+- return ((u64) pitch * (u64) height) < (u64)
+- ((dev_priv->active_display_unit == vmw_du_screen_target) ?
+- dev_priv->max_primary_mem : dev_priv->vram_size);
++ return (pitch * height) < (u64)dev_priv->vram_size;
+ }
+
+ /**
+@@ -2272,107 +2280,6 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
+ connector_status_connected : connector_status_disconnected);
+ }
+
+-static struct drm_display_mode vmw_kms_connector_builtin[] = {
+- /* 640x480@60Hz */
+- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+- 752, 800, 0, 480, 489, 492, 525, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+- /* 800x600@60Hz */
+- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+- 968, 1056, 0, 600, 601, 605, 628, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1024x768@60Hz */
+- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+- 1184, 1344, 0, 768, 771, 777, 806, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+- /* 1152x864@75Hz */
+- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+- 1344, 1600, 0, 864, 865, 868, 900, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1280x720@60Hz */
+- { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
+- 1472, 1664, 0, 720, 723, 728, 748, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1280x768@60Hz */
+- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+- 1472, 1664, 0, 768, 771, 778, 798, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1280x800@60Hz */
+- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+- 1480, 1680, 0, 800, 803, 809, 831, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+- /* 1280x960@60Hz */
+- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+- 1488, 1800, 0, 960, 961, 964, 1000, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1280x1024@60Hz */
+- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1360x768@60Hz */
+- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+- 1536, 1792, 0, 768, 771, 777, 795, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1440x1050@60Hz */
+- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1440x900@60Hz */
+- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+- 1672, 1904, 0, 900, 903, 909, 934, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1600x1200@60Hz */
+- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1680x1050@60Hz */
+- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1792x1344@60Hz */
+- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1853x1392@60Hz */
+- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1920x1080@60Hz */
+- { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
+- 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1920x1200@60Hz */
+- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 1920x1440@60Hz */
+- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 2560x1440@60Hz */
+- { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
+- 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+- /* 2560x1600@60Hz */
+- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+- /* 2880x1800@60Hz */
+- { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
+- 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+- /* 3840x2160@60Hz */
+- { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
+- 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+- /* 3840x2400@60Hz */
+- { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
+- 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
+- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+- /* Terminate */
+- { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+-};
+-
+ /**
+ * vmw_guess_mode_timing - Provide fake timings for a
+ * 60Hz vrefresh mode.
+@@ -2394,88 +2301,6 @@ void vmw_guess_mode_timing(struct drm_display_mode *mode)
+ }
+
+
+-int vmw_du_connector_fill_modes(struct drm_connector *connector,
+- uint32_t max_width, uint32_t max_height)
+-{
+- struct vmw_display_unit *du = vmw_connector_to_du(connector);
+- struct drm_device *dev = connector->dev;
+- struct vmw_private *dev_priv = vmw_priv(dev);
+- struct drm_display_mode *mode = NULL;
+- struct drm_display_mode *bmode;
+- struct drm_display_mode prefmode = { DRM_MODE("preferred",
+- DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+- };
+- int i;
+- u32 assumed_bpp = 4;
+-
+- if (dev_priv->assume_16bpp)
+- assumed_bpp = 2;
+-
+- max_width = min(max_width, dev_priv->texture_max_width);
+- max_height = min(max_height, dev_priv->texture_max_height);
+-
+- /*
+- * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
+- * HEIGHT registers.
+- */
+- if (dev_priv->active_display_unit == vmw_du_screen_target) {
+- max_width = min(max_width, dev_priv->stdu_max_width);
+- max_height = min(max_height, dev_priv->stdu_max_height);
+- }
+-
+- /* Add preferred mode */
+- mode = drm_mode_duplicate(dev, &prefmode);
+- if (!mode)
+- return 0;
+- mode->hdisplay = du->pref_width;
+- mode->vdisplay = du->pref_height;
+- vmw_guess_mode_timing(mode);
+- drm_mode_set_name(mode);
+-
+- if (vmw_kms_validate_mode_vram(dev_priv,
+- mode->hdisplay * assumed_bpp,
+- mode->vdisplay)) {
+- drm_mode_probed_add(connector, mode);
+- } else {
+- drm_mode_destroy(dev, mode);
+- mode = NULL;
+- }
+-
+- if (du->pref_mode) {
+- list_del_init(&du->pref_mode->head);
+- drm_mode_destroy(dev, du->pref_mode);
+- }
+-
+- /* mode might be null here, this is intended */
+- du->pref_mode = mode;
+-
+- for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
+- bmode = &vmw_kms_connector_builtin[i];
+- if (bmode->hdisplay > max_width ||
+- bmode->vdisplay > max_height)
+- continue;
+-
+- if (!vmw_kms_validate_mode_vram(dev_priv,
+- bmode->hdisplay * assumed_bpp,
+- bmode->vdisplay))
+- continue;
+-
+- mode = drm_mode_duplicate(dev, bmode);
+- if (!mode)
+- return 0;
+-
+- drm_mode_probed_add(connector, mode);
+- }
+-
+- drm_connector_list_update(connector);
+- /* Move the prefered mode first, help apps pick the right mode. */
+- drm_mode_sort(&connector->modes);
+-
+- return 1;
+-}
+-
+ /**
+ * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
+ * @dev: drm device for the ioctl
+@@ -3016,3 +2841,84 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+ vmw_validation_unref_lists(&val_ctx);
+ return ret;
+ }
++
++/**
++ * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
++ *
++ * @connector: the drm connector, part of a DU container
++ * @mode: drm mode to check
++ *
++ * Returns MODE_OK on success, or a drm_mode_status error code.
++ */
++enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ enum drm_mode_status ret;
++ struct drm_device *dev = connector->dev;
++ struct vmw_private *dev_priv = vmw_priv(dev);
++ u32 assumed_cpp = 4;
++
++ if (dev_priv->assume_16bpp)
++ assumed_cpp = 2;
++
++ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
++ dev_priv->texture_max_height);
++ if (ret != MODE_OK)
++ return ret;
++
++ if (!vmw_kms_validate_mode_vram(dev_priv,
++ mode->hdisplay * assumed_cpp,
++ mode->vdisplay))
++ return MODE_MEM;
++
++ return MODE_OK;
++}
++
++/**
++ * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
++ *
++ * @connector: the drm connector, part of a DU container
++ *
++ * Returns the number of added modes.
++ */
++int vmw_connector_get_modes(struct drm_connector *connector)
++{
++ struct vmw_display_unit *du = vmw_connector_to_du(connector);
++ struct drm_device *dev = connector->dev;
++ struct vmw_private *dev_priv = vmw_priv(dev);
++ struct drm_display_mode *mode = NULL;
++ struct drm_display_mode prefmode = { DRM_MODE("preferred",
++ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
++ };
++ u32 max_width;
++ u32 max_height;
++ u32 num_modes;
++
++ /* Add preferred mode */
++ mode = drm_mode_duplicate(dev, &prefmode);
++ if (!mode)
++ return 0;
++
++ mode->hdisplay = du->pref_width;
++ mode->vdisplay = du->pref_height;
++ vmw_guess_mode_timing(mode);
++ drm_mode_set_name(mode);
++
++ drm_mode_probed_add(connector, mode);
++ drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
++
++ /* Probe connector for all modes not exceeding our geom limits */
++ max_width = dev_priv->texture_max_width;
++ max_height = dev_priv->texture_max_height;
++
++ if (dev_priv->active_display_unit == vmw_du_screen_target) {
++ max_width = min(dev_priv->stdu_max_width, max_width);
++ max_height = min(dev_priv->stdu_max_height, max_height);
++ }
++
++ num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
++
++ return num_modes;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+index db81e635dc061f..19a843da87b789 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
+
+
+ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
+- DRM_FORMAT_XRGB1555,
+- DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
++ DRM_FORMAT_RGB565,
++ DRM_FORMAT_XRGB1555,
+ };
+
+ static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
+@@ -378,7 +378,6 @@ struct vmw_display_unit {
+ unsigned pref_width;
+ unsigned pref_height;
+ bool pref_active;
+- struct drm_display_mode *pref_mode;
+
+ /*
+ * Gui positioning
+@@ -428,8 +427,6 @@ void vmw_du_connector_save(struct drm_connector *connector);
+ void vmw_du_connector_restore(struct drm_connector *connector);
+ enum drm_connector_status
+ vmw_du_connector_detect(struct drm_connector *connector, bool force);
+-int vmw_du_connector_fill_modes(struct drm_connector *connector,
+- uint32_t max_width, uint32_t max_height);
+ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ const struct drm_clip_rect *clips,
+@@ -438,6 +435,9 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ int num_clips,
+ int increment,
+ struct vmw_kms_dirty *dirty);
++enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode);
++int vmw_connector_get_modes(struct drm_connector *connector);
+
+ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+index a82fa97003705f..c4db4aecca6c35 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -304,7 +304,7 @@ static void vmw_ldu_connector_destroy(struct drm_connector *connector)
+ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
+ .dpms = vmw_du_connector_dpms,
+ .detect = vmw_du_connector_detect,
+- .fill_modes = vmw_du_connector_fill_modes,
++ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vmw_ldu_connector_destroy,
+ .reset = vmw_du_connector_reset,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+@@ -313,6 +313,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
+
+ static const struct
+ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
++ .get_modes = vmw_connector_get_modes,
++ .mode_valid = vmw_connector_mode_valid
+ };
+
+ static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+@@ -449,7 +451,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
+ ldu->base.pref_active = (unit == 0);
+ ldu->base.pref_width = dev_priv->initial_width;
+ ldu->base.pref_height = dev_priv->initial_height;
+- ldu->base.pref_mode = NULL;
+
+ /*
+ * Remove this after enabling atomic because property values can
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+index c45b4724e4141d..e20f64b67b2669 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
+ {
+ struct vmw_escape_video_flush *flush;
+ size_t fifo_size;
+- bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
++ bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
+ int i, num_items;
+ SVGAGuestPtr ptr;
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+index 2d72a5ee7c0c71..c99cad44499157 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
+ int fd, u32 *handle)
+ {
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++ int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
+
+- return ttm_prime_fd_to_handle(tfile, fd, handle);
++ if (ret)
++ ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
++
++ return ret;
+ }
+
+ int vmw_prime_handle_to_fd(struct drm_device *dev,
+@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
+ int *prime_fd)
+ {
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+- return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
++ int ret;
++
++ if (handle > VMWGFX_NUM_MOB)
++ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
++ else
++ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
++
++ return ret;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+index 556a403b7eb56a..30c3ad27b6629a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -347,7 +347,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
+ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
+ .dpms = vmw_du_connector_dpms,
+ .detect = vmw_du_connector_detect,
+- .fill_modes = vmw_du_connector_fill_modes,
++ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vmw_sou_connector_destroy,
+ .reset = vmw_du_connector_reset,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+@@ -357,6 +357,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
+
+ static const struct
+ drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
++ .get_modes = vmw_connector_get_modes,
++ .mode_valid = vmw_connector_mode_valid
+ };
+
+
+@@ -826,7 +828,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
+ sou->base.pref_active = (unit == 0);
+ sou->base.pref_width = dev_priv->initial_width;
+ sou->base.pref_height = dev_priv->initial_height;
+- sou->base.pref_mode = NULL;
+
+ /*
+ * Remove this after enabling atomic because property values can
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index ba0c0e12cfe9d0..b22ae25db4e17c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -41,7 +41,14 @@
+ #define vmw_connector_to_stdu(x) \
+ container_of(x, struct vmw_screen_target_display_unit, base.connector)
+
+-
++/*
++ * Some renderers such as llvmpipe will align the width and height of their
++ * buffers to match their tile size. We need to keep this in mind when exposing
++ * modes to userspace so that this possible over-allocation will not exceed
++ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
++ * tile size of current renderers.
++ */
++#define GPU_TILE_SIZE 64
+
+ enum stdu_content_type {
+ SAME_AS_DISPLAY = 0,
+@@ -490,7 +497,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
+ container_of(dirty->unit, typeof(*stdu), base);
+ s32 width, height;
+ s32 src_pitch, dst_pitch;
+- struct ttm_buffer_object *src_bo, *dst_bo;
++ struct vmw_bo *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
+
+@@ -505,11 +512,11 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
+
+ /* Assume we are blitting from Guest (bo) to Host (display_srf) */
+ src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+- src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
++ src_bo = stdu->display_srf->res.guest_memory_bo;
+ src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp;
+
+ dst_pitch = ddirty->pitch;
+- dst_bo = &ddirty->buf->tbo;
++ dst_bo = ddirty->buf;
+ dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
+
+ (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
+@@ -825,12 +832,72 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+ vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+ }
+
++static enum drm_mode_status
++vmw_stdu_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ enum drm_mode_status ret;
++ struct drm_device *dev = connector->dev;
++ struct vmw_private *dev_priv = vmw_priv(dev);
++ u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
++ /* Align width and height to account for GPU tile over-alignment */
++ u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
++ ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
++ assumed_cpp;
++ required_mem = ALIGN(required_mem, PAGE_SIZE);
++
++ ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
++ dev_priv->stdu_max_height);
++ if (ret != MODE_OK)
++ return ret;
++
++ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
++ dev_priv->texture_max_height);
++ if (ret != MODE_OK)
++ return ret;
++
++ if (required_mem > dev_priv->max_primary_mem)
++ return MODE_MEM;
++
++ if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
++ return MODE_MEM;
++
++ if (required_mem > dev_priv->max_mob_size)
++ return MODE_MEM;
++
++ return MODE_OK;
++}
+
++/*
++ * Trigger a modeset if the X,Y position of the Screen Target changes.
++ * This is needed when multi-mon is cycled. The original Screen Target will have
++ * the same mode but its relative X,Y position in the topology will change.
++ */
++static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
++ struct drm_atomic_state *state)
++{
++ struct drm_connector_state *conn_state;
++ struct vmw_screen_target_display_unit *du;
++ struct drm_crtc_state *new_crtc_state;
++
++ conn_state = drm_atomic_get_connector_state(state, conn);
++ du = vmw_connector_to_stdu(conn);
++
++ if (!conn_state->crtc)
++ return 0;
++
++ new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
++ if (du->base.gui_x != du->base.set_gui_x ||
++ du->base.gui_y != du->base.set_gui_y)
++ new_crtc_state->mode_changed = true;
++
++ return 0;
++}
+
+ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ .dpms = vmw_du_connector_dpms,
+ .detect = vmw_du_connector_detect,
+- .fill_modes = vmw_du_connector_fill_modes,
++ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vmw_stdu_connector_destroy,
+ .reset = vmw_du_connector_reset,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+@@ -840,6 +907,9 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+
+ static const struct
+ drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
++ .get_modes = vmw_connector_get_modes,
++ .mode_valid = vmw_stdu_connector_mode_valid,
++ .atomic_check = vmw_stdu_connector_atomic_check,
+ };
+
+
+@@ -1066,7 +1136,7 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+ struct vmw_stdu_update_gb_image *cmd_img = cmd;
+ struct vmw_stdu_update *cmd_update;
+- struct ttm_buffer_object *src_bo, *dst_bo;
++ struct vmw_bo *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ s32 src_pitch, dst_pitch;
+ s32 width, height;
+@@ -1080,11 +1150,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
+
+ diff.cpp = stdu->cpp;
+
+- dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
++ dst_bo = stdu->display_srf->res.guest_memory_bo;
+ dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+ dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+
+- src_bo = &vfbbo->buffer->tbo;
++ src_bo = vfbbo->buffer;
+ src_pitch = update->vfb->base.pitches[0];
+ src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+ stdu->cpp;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 3829be282ff00f..17463aeeef28f2 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -774,9 +774,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ sizeof(metadata->mip_levels));
+ metadata->num_sizes = num_sizes;
+ metadata->sizes =
+- memdup_user((struct drm_vmw_size __user *)(unsigned long)
++ memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+- sizeof(*metadata->sizes) * metadata->num_sizes);
++ metadata->num_sizes, sizeof(*metadata->sizes));
+ if (IS_ERR(metadata->sizes)) {
+ ret = PTR_ERR(metadata->sizes);
+ goto out_no_sizes;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+index af8562c95cc35b..fcb87d83760ef6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+@@ -220,13 +220,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
+ switch (dev_priv->map_mode) {
+ case vmw_dma_map_bind:
+ case vmw_dma_map_populate:
+- vsgt->sgt = &vmw_tt->sgt;
+- ret = sg_alloc_table_from_pages_segment(
+- &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+- (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+- dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+- if (ret)
+- goto out_sg_alloc_fail;
++ if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
++ vsgt->sgt = vmw_tt->dma_ttm.sg;
++ } else {
++ vsgt->sgt = &vmw_tt->sgt;
++ ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
++ vsgt->pages, vsgt->num_pages, 0,
++ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
++ dma_get_max_seg_size(dev_priv->drm.dev),
++ GFP_KERNEL);
++ if (ret)
++ goto out_sg_alloc_fail;
++ }
+
+ ret = vmw_ttm_map_for_dma(vmw_tt);
+ if (unlikely(ret != 0))
+@@ -241,8 +246,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
+ return 0;
+
+ out_map_fail:
+- sg_free_table(vmw_tt->vsgt.sgt);
+- vmw_tt->vsgt.sgt = NULL;
++ drm_warn(&dev_priv->drm, "VSG table map failed!");
++ sg_free_table(vsgt->sgt);
++ vsgt->sgt = NULL;
+ out_sg_alloc_fail:
+ return ret;
+ }
+@@ -388,15 +394,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
+ static int vmw_ttm_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+ {
+- int ret;
++ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+- /* TODO: maybe completely drop this ? */
+ if (ttm_tt_is_populated(ttm))
+ return 0;
+
+- ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
++ if (external && ttm->sg)
++ return drm_prime_sg_to_dma_addr_array(ttm->sg,
++ ttm->dma_address,
++ ttm->num_pages);
+
+- return ret;
++ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ }
+
+ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
+@@ -404,6 +412,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
+ {
+ struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
+ dma_ttm);
++ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
++
++ if (external)
++ return;
+
+ vmw_ttm_unbind(bdev, ttm);
+
+@@ -422,6 +434,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
+ {
+ struct vmw_ttm_tt *vmw_be;
+ int ret;
++ bool external = bo->type == ttm_bo_type_sg;
+
+ vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
+ if (!vmw_be)
+@@ -430,7 +443,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
+ vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
+ vmw_be->mob = NULL;
+
+- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
++ if (external)
++ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
++
++ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
+ ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+ ttm_cached);
+ else
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+index 88eb33acd5f0dd..f5781939de9c35 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+@@ -256,12 +256,12 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_dp;
+
++ drm_bridge_add(dpsub->bridge);
++
+ if (dpsub->dma_enabled) {
+ ret = zynqmp_dpsub_drm_init(dpsub);
+ if (ret)
+ goto err_disp;
+- } else {
+- drm_bridge_add(dpsub->bridge);
+ }
+
+ dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
+@@ -269,6 +269,7 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
+ return 0;
+
+ err_disp:
++ drm_bridge_remove(dpsub->bridge);
+ zynqmp_disp_remove(dpsub);
+ err_dp:
+ zynqmp_dp_remove(dpsub);
+@@ -288,9 +289,8 @@ static void zynqmp_dpsub_remove(struct platform_device *pdev)
+
+ if (dpsub->drm)
+ zynqmp_dpsub_drm_cleanup(dpsub);
+- else
+- drm_bridge_remove(dpsub->bridge);
+
++ drm_bridge_remove(dpsub->bridge);
+ zynqmp_disp_remove(dpsub);
+ zynqmp_dp_remove(dpsub);
+
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
+index a7f8611be6f420..44d4a510ad7d68 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
+@@ -434,23 +434,28 @@ static int zynqmp_dpsub_kms_init(struct zynqmp_dpsub *dpsub)
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(dpsub->dev, "failed to attach bridge to encoder\n");
+- return ret;
++ goto err_encoder;
+ }
+
+ /* Create the connector for the chain of bridges. */
+ connector = drm_bridge_connector_init(&dpsub->drm->dev, encoder);
+ if (IS_ERR(connector)) {
+ dev_err(dpsub->dev, "failed to created connector\n");
+- return PTR_ERR(connector);
++ ret = PTR_ERR(connector);
++ goto err_encoder;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ dev_err(dpsub->dev, "failed to attach connector to encoder\n");
+- return ret;
++ goto err_encoder;
+ }
+
+ return 0;
++
++err_encoder:
++ drm_encoder_cleanup(encoder);
++ return ret;
+ }
+
+ static void zynqmp_dpsub_drm_release(struct drm_device *drm, void *res)
+@@ -530,5 +535,6 @@ void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub)
+
+ drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
++ drm_encoder_cleanup(&dpsub->drm->encoder);
+ drm_kms_helper_poll_fini(drm);
+ }
+diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
+index 84d042796d2e66..3937889fa912d4 100644
+--- a/drivers/gpu/host1x/bus.c
++++ b/drivers/gpu/host1x/bus.c
+@@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev,
+ return 0;
+ }
+
+-static int host1x_dma_configure(struct device *dev)
+-{
+- return of_dma_configure(dev, dev->of_node, true);
+-}
+-
+ static const struct dev_pm_ops host1x_device_pm_ops = {
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+@@ -369,7 +364,6 @@ struct bus_type host1x_bus_type = {
+ .name = "host1x",
+ .match = host1x_device_match,
+ .uevent = host1x_device_uevent,
+- .dma_configure = host1x_dma_configure,
+ .pm = &host1x_device_pm_ops,
+ };
+
+@@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x,
+ device->dev.bus = &host1x_bus_type;
+ device->dev.parent = host1x->dev;
+
+- of_dma_configure(&device->dev, host1x->dev->of_node, true);
+-
+ device->dev.dma_parms = &device->dma_parms;
+ dma_set_max_seg_size(&device->dev, UINT_MAX);
+
+diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
+index a3f336edd991b9..955c971c528d42 100644
+--- a/drivers/gpu/host1x/context.c
++++ b/drivers/gpu/host1x/context.c
+@@ -34,10 +34,10 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ if (err < 0)
+ return 0;
+
+- cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
++ cdl->len = err / 4;
++ cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
+ if (!cdl->devs)
+ return -ENOMEM;
+- cdl->len = err / 4;
+
+ for (i = 0; i < cdl->len; i++) {
+ ctx = &cdl->devs[i];
+diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
+index 9ec949a438ef67..52ef6be9d44996 100644
+--- a/drivers/greybus/interface.c
++++ b/drivers/greybus/interface.c
+@@ -694,6 +694,7 @@ static void gb_interface_release(struct device *dev)
+
+ trace_gb_interface_release(intf);
+
++ cancel_work_sync(&intf->mode_switch_work);
+ kfree(intf);
+ }
+
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 790aa908e2a788..9e2cde55b465ce 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -1300,6 +1300,15 @@ config HID_ALPS
+ Say Y here if you have a Alps touchpads over i2c-hid or usbhid
+ and want support for its special functionalities.
+
++config HID_MCP2200
++ tristate "Microchip MCP2200 HID USB-to-GPIO bridge"
++ depends on USB_HID && GPIOLIB
++ help
++ Provides GPIO functionality over USB-HID through MCP2200 device.
++
++ To compile this driver as a module, choose M here: the module
++ will be called hid-mcp2200.ko.
++
+ config HID_MCP2221
+ tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support"
+ depends on USB_HID && I2C
+diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
+index 8a06d0f840bcbe..082a728eac6004 100644
+--- a/drivers/hid/Makefile
++++ b/drivers/hid/Makefile
+@@ -79,6 +79,7 @@ obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
+ obj-$(CONFIG_HID_MACALLY) += hid-macally.o
+ obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
+ obj-$(CONFIG_HID_MALTRON) += hid-maltron.o
++obj-$(CONFIG_HID_MCP2200) += hid-mcp2200.o
+ obj-$(CONFIG_HID_MCP2221) += hid-mcp2221.o
+ obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
+ obj-$(CONFIG_HID_MEGAWORLD_FF) += hid-megaworld.o
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index bdb578e0899f55..3438d392920fad 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -236,9 +236,9 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ cl_data->in_data = in_data;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+- in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
+- &cl_data->sensor_dma_addr[i],
+- GFP_KERNEL);
++ in_data->sensor_virt_addr[i] = dmam_alloc_coherent(dev, sizeof(int) * 8,
++ &cl_data->sensor_dma_addr[i],
++ GFP_KERNEL);
+ if (!in_data->sensor_virt_addr[i]) {
+ rc = -ENOMEM;
+ goto cleanup;
+@@ -288,12 +288,22 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ mp2_ops->start(privdata, info);
+ cl_data->sensor_sts[i] = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
++
++ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
++ cl_data->is_any_sensor_enabled = true;
++ }
++
++ if (!cl_data->is_any_sensor_enabled ||
++ (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
++ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
++ cl_data->is_any_sensor_enabled);
++ rc = -EOPNOTSUPP;
++ goto cleanup;
+ }
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ cl_data->cur_hid_dev = i;
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+- cl_data->is_any_sensor_enabled = true;
+ rc = amdtp_hid_probe(i, cl_data);
+ if (rc)
+ goto cleanup;
+@@ -305,12 +315,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ cl_data->sensor_sts[i]);
+ }
+
+- if (!cl_data->is_any_sensor_enabled ||
+- (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
+- dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
+- rc = -EOPNOTSUPP;
+- goto cleanup;
+- }
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ return 0;
+
+@@ -327,7 +331,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
+ {
+ struct amdtp_cl_data *cl_data = privdata->cl_data;
+- struct amd_input_data *in_data = cl_data->in_data;
+ int i, status;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+@@ -347,12 +350,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amdtp_hid_remove(cl_data);
+
+- for (i = 0; i < cl_data->num_hid_devices; i++) {
+- if (in_data->sensor_virt_addr[i]) {
+- dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
+- in_data->sensor_virt_addr[i],
+- cl_data->sensor_dma_addr[i]);
+- }
+- }
+ return 0;
+ }
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+index 705b5233706845..81f3024b7b1b51 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+@@ -171,11 +171,13 @@ int amdtp_hid_probe(u32 cur_hid_dev, struct amdtp_cl_data *cli_data)
+ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
+ {
+ int i;
++ struct amdtp_hid_data *hid_data;
+
+ for (i = 0; i < cli_data->num_hid_devices; ++i) {
+ if (cli_data->hid_sensor_hubs[i]) {
+- kfree(cli_data->hid_sensor_hubs[i]->driver_data);
++ hid_data = cli_data->hid_sensor_hubs[i]->driver_data;
+ hid_destroy_device(cli_data->hid_sensor_hubs[i]);
++ kfree(hid_data);
+ cli_data->hid_sensor_hubs[i] = NULL;
+ }
+ }
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+index 2530fa98b568be..ce449da08e9ba8 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+@@ -35,6 +35,8 @@ static int sensor_mask_override = -1;
+ module_param_named(sensor_mask, sensor_mask_override, int, 0444);
+ MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+
++static bool intr_disable = true;
++
+ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
+ {
+ union cmd_response cmd_resp;
+@@ -55,7 +57,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
+
+ cmd_base.ul = 0;
+ cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+- cmd_base.cmd_v2.intr_disable = 1;
++ cmd_base.cmd_v2.intr_disable = intr_disable;
+ cmd_base.cmd_v2.period = info.period;
+ cmd_base.cmd_v2.sensor_id = info.sensor_idx;
+ cmd_base.cmd_v2.length = 16;
+@@ -73,7 +75,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
+
+ cmd_base.ul = 0;
+ cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+- cmd_base.cmd_v2.intr_disable = 1;
++ cmd_base.cmd_v2.intr_disable = intr_disable;
+ cmd_base.cmd_v2.period = 0;
+ cmd_base.cmd_v2.sensor_id = sensor_idx;
+ cmd_base.cmd_v2.length = 16;
+@@ -87,7 +89,7 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
+ union sfh_cmd_base cmd_base;
+
+ cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+- cmd_base.cmd_v2.intr_disable = 1;
++ cmd_base.cmd_v2.intr_disable = intr_disable;
+ cmd_base.cmd_v2.period = 0;
+ cmd_base.cmd_v2.sensor_id = 0;
+
+@@ -292,6 +294,26 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+ return 0;
+ }
+
++static int mp2_disable_intr(const struct dmi_system_id *id)
++{
++ intr_disable = false;
++ return 0;
++}
++
++static const struct dmi_system_id dmi_sfh_table[] = {
++ {
++ /*
++ * https://bugzilla.kernel.org/show_bug.cgi?id=218104
++ */
++ .callback = mp2_disable_intr,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook x360 435 G7"),
++ },
++ },
++ {}
++};
++
+ static const struct dmi_system_id dmi_nodevs[] = {
+ {
+ /*
+@@ -315,6 +337,8 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
+ if (dmi_first_match(dmi_nodevs))
+ return -ENODEV;
+
++ dmi_check_system(dmi_sfh_table);
++
+ privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
+ if (!privdata)
+ return -ENOMEM;
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+index 70add75fc50660..05e400a4a83e40 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+@@ -90,10 +90,10 @@ enum mem_use_type {
+ struct hpd_status {
+ union {
+ struct {
+- u32 human_presence_report : 4;
+- u32 human_presence_actual : 4;
+- u32 probablity : 8;
+ u32 object_distance : 16;
++ u32 probablity : 8;
++ u32 human_presence_actual : 4;
++ u32 human_presence_report : 4;
+ } shpd;
+ u32 val;
+ };
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+index e9c6413af24a07..862ca8d0723262 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+@@ -210,6 +210,11 @@ static void amd_sfh_resume(struct amd_mp2_dev *mp2)
+ struct amd_mp2_sensor_info info;
+ int i, status;
+
++ if (!cl_data->is_any_sensor_enabled) {
++ amd_sfh_clear_intr(mp2);
++ return;
++ }
++
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
+ info.sensor_idx = cl_data->sensor_idx[i];
+@@ -235,6 +240,11 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ int i, status;
+
++ if (!cl_data->is_any_sensor_enabled) {
++ amd_sfh_clear_intr(mp2);
++ return;
++ }
++
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] != HPD_IDX &&
+ cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
+index d9ef45fcaeab13..7903c8638e8173 100644
+--- a/drivers/hid/bpf/hid_bpf_dispatch.c
++++ b/drivers/hid/bpf/hid_bpf_dispatch.c
+@@ -241,6 +241,39 @@ int hid_bpf_reconnect(struct hid_device *hdev)
+ return 0;
+ }
+
++static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog,
++ __u32 flags)
++{
++ int fd, err, prog_type;
++
++ prog_type = hid_bpf_get_prog_attach_type(prog);
++ if (prog_type < 0)
++ return prog_type;
++
++ if (prog_type >= HID_BPF_PROG_TYPE_MAX)
++ return -EINVAL;
++
++ if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
++ err = hid_bpf_allocate_event_data(hdev);
++ if (err)
++ return err;
++ }
++
++ fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags);
++ if (fd < 0)
++ return fd;
++
++ if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
++ err = hid_bpf_reconnect(hdev);
++ if (err) {
++ close_fd(fd);
++ return err;
++ }
++ }
++
++ return fd;
++}
++
+ /**
+ * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
+ *
+@@ -257,18 +290,13 @@ noinline int
+ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
+ {
+ struct hid_device *hdev;
++ struct bpf_prog *prog;
+ struct device *dev;
+- int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
++ int err, fd;
+
+ if (!hid_bpf_ops)
+ return -EINVAL;
+
+- if (prog_type < 0)
+- return prog_type;
+-
+- if (prog_type >= HID_BPF_PROG_TYPE_MAX)
+- return -EINVAL;
+-
+ if ((flags & ~HID_BPF_FLAG_MASK))
+ return -EINVAL;
+
+@@ -278,25 +306,29 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
+
+ hdev = to_hid_device(dev);
+
+- if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
+- err = hid_bpf_allocate_event_data(hdev);
+- if (err)
+- return err;
++ /*
++ * take a ref on the prog itself, it will be released
++ * on errors or when it'll be detached
++ */
++ prog = bpf_prog_get(prog_fd);
++ if (IS_ERR(prog)) {
++ err = PTR_ERR(prog);
++ goto out_dev_put;
+ }
+
+- fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
+- if (fd < 0)
+- return fd;
+-
+- if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
+- err = hid_bpf_reconnect(hdev);
+- if (err) {
+- close_fd(fd);
+- return err;
+- }
++ fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags);
++ if (fd < 0) {
++ err = fd;
++ goto out_prog_put;
+ }
+
+ return fd;
++
++ out_prog_put:
++ bpf_prog_put(prog);
++ out_dev_put:
++ put_device(dev);
++ return err;
+ }
+
+ /**
+@@ -323,8 +355,10 @@ hid_bpf_allocate_context(unsigned int hid_id)
+ hdev = to_hid_device(dev);
+
+ ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
+- if (!ctx_kern)
++ if (!ctx_kern) {
++ put_device(dev);
+ return NULL;
++ }
+
+ ctx_kern->ctx.hid = hdev;
+
+@@ -341,10 +375,15 @@ noinline void
+ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
+ {
+ struct hid_bpf_ctx_kern *ctx_kern;
++ struct hid_device *hid;
+
+ ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
++ hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
+
+ kfree(ctx_kern);
++
++ /* get_device() is called by bus_find_device() */
++ put_device(&hid->dev);
+ }
+
+ /**
+diff --git a/drivers/hid/bpf/hid_bpf_dispatch.h b/drivers/hid/bpf/hid_bpf_dispatch.h
+index 63dfc8605cd21e..fbe0639d09f260 100644
+--- a/drivers/hid/bpf/hid_bpf_dispatch.h
++++ b/drivers/hid/bpf/hid_bpf_dispatch.h
+@@ -12,9 +12,9 @@ struct hid_bpf_ctx_kern {
+
+ int hid_bpf_preload_skel(void);
+ void hid_bpf_free_links_and_skel(void);
+-int hid_bpf_get_prog_attach_type(int prog_fd);
++int hid_bpf_get_prog_attach_type(struct bpf_prog *prog);
+ int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, int prog_fd,
+- __u32 flags);
++ struct bpf_prog *prog, __u32 flags);
+ void __hid_bpf_destroy_device(struct hid_device *hdev);
+ int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
+ struct hid_bpf_ctx_kern *ctx_kern);
+diff --git a/drivers/hid/bpf/hid_bpf_jmp_table.c b/drivers/hid/bpf/hid_bpf_jmp_table.c
+index eca34b7372f951..aa8e1c79cdf551 100644
+--- a/drivers/hid/bpf/hid_bpf_jmp_table.c
++++ b/drivers/hid/bpf/hid_bpf_jmp_table.c
+@@ -196,6 +196,7 @@ static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx)
+ static void hid_bpf_release_progs(struct work_struct *work)
+ {
+ int i, j, n, map_fd = -1;
++ bool hdev_destroyed;
+
+ if (!jmp_table.map)
+ return;
+@@ -220,6 +221,12 @@ static void hid_bpf_release_progs(struct work_struct *work)
+ if (entry->hdev) {
+ hdev = entry->hdev;
+ type = entry->type;
++ /*
++ * hdev is still valid, even if we are called after hid_destroy_device():
++ * when hid_bpf_attach() gets called, it takes a ref on the dev through
++ * bus_find_device()
++ */
++ hdev_destroyed = hdev->bpf.destroyed;
+
+ hid_bpf_populate_hdev(hdev, type);
+
+@@ -232,12 +239,19 @@ static void hid_bpf_release_progs(struct work_struct *work)
+ if (test_bit(next->idx, jmp_table.enabled))
+ continue;
+
+- if (next->hdev == hdev && next->type == type)
++ if (next->hdev == hdev && next->type == type) {
++ /*
++ * clear the hdev reference and decrement the device ref
++ * that was taken during bus_find_device() while calling
++ * hid_bpf_attach()
++ */
+ next->hdev = NULL;
++ put_device(&hdev->dev);
++ }
+ }
+
+- /* if type was rdesc fixup, reconnect device */
+- if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP)
++ /* if type was rdesc fixup and the device is not gone, reconnect device */
++ if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP && !hdev_destroyed)
+ hid_bpf_reconnect(hdev);
+ }
+ }
+@@ -333,15 +347,10 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
+ return err;
+ }
+
+-int hid_bpf_get_prog_attach_type(int prog_fd)
++int hid_bpf_get_prog_attach_type(struct bpf_prog *prog)
+ {
+- struct bpf_prog *prog = NULL;
+- int i;
+ int prog_type = HID_BPF_PROG_TYPE_UNDEF;
+-
+- prog = bpf_prog_get(prog_fd);
+- if (IS_ERR(prog))
+- return PTR_ERR(prog);
++ int i;
+
+ for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) {
+ if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) {
+@@ -350,8 +359,6 @@ int hid_bpf_get_prog_attach_type(int prog_fd)
+ }
+ }
+
+- bpf_prog_put(prog);
+-
+ return prog_type;
+ }
+
+@@ -388,19 +395,13 @@ static const struct bpf_link_ops hid_bpf_link_lops = {
+ /* called from syscall */
+ noinline int
+ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
+- int prog_fd, __u32 flags)
++ int prog_fd, struct bpf_prog *prog, __u32 flags)
+ {
+ struct bpf_link_primer link_primer;
+ struct hid_bpf_link *link;
+- struct bpf_prog *prog = NULL;
+ struct hid_bpf_prog_entry *prog_entry;
+ int cnt, err = -EINVAL, prog_table_idx = -1;
+
+- /* take a ref on the prog itself */
+- prog = bpf_prog_get(prog_fd);
+- if (IS_ERR(prog))
+- return PTR_ERR(prog);
+-
+ mutex_lock(&hid_bpf_attach_lock);
+
+ link = kzalloc(sizeof(*link), GFP_USER);
+@@ -467,7 +468,6 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
+ err_unlock:
+ mutex_unlock(&hid_bpf_attach_lock);
+
+- bpf_prog_put(prog);
+ kfree(link);
+
+ return err;
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 3ca45975c686ee..d9e9829b22001a 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -345,6 +345,8 @@ static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
+ { "AONE" },
+ { "GANSS" },
+ { "Hailuck" },
++ { "Jamesdonkey" },
++ { "A3R" },
+ };
+
+ static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index fd61dba882338e..84625e817ce950 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -335,36 +335,20 @@ static int asus_raw_event(struct hid_device *hdev,
+ if (drvdata->quirks & QUIRK_MEDION_E1239T)
+ return asus_e1239t_event(drvdata, data, size);
+
+- if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
++ /*
++ * Skip these report ID, the device emits a continuous stream associated
++ * with the AURA mode it is in which looks like an 'echo'.
++ */
++ if (report->id == FEATURE_KBD_LED_REPORT_ID1 || report->id == FEATURE_KBD_LED_REPORT_ID2)
++ return -1;
++ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ /*
+- * Skip these report ID, the device emits a continuous stream associated
+- * with the AURA mode it is in which looks like an 'echo'.
++ * G713 and G733 send these codes on some keypresses, depending on
++ * the key pressed it can trigger a shutdown event if not caught.
+ */
+- if (report->id == FEATURE_KBD_LED_REPORT_ID1 ||
+- report->id == FEATURE_KBD_LED_REPORT_ID2) {
++ if (data[0] == 0x02 && data[1] == 0x30) {
+ return -1;
+- /* Additional report filtering */
+- } else if (report->id == FEATURE_KBD_REPORT_ID) {
+- /*
+- * G14 and G15 send these codes on some keypresses with no
+- * discernable reason for doing so. We'll filter them out to avoid
+- * unmapped warning messages later.
+- */
+- if (data[1] == 0xea || data[1] == 0xec || data[1] == 0x02 ||
+- data[1] == 0x8a || data[1] == 0x9e) {
+- return -1;
+- }
+- }
+- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+- /*
+- * G713 and G733 send these codes on some keypresses, depending on
+- * the key pressed it can trigger a shutdown event if not caught.
+- */
+- if(data[0] == 0x02 && data[1] == 0x30) {
+- return -1;
+- }
+ }
+-
+ }
+
+ if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) {
+@@ -381,7 +365,7 @@ static int asus_raw_event(struct hid_device *hdev,
+ return 0;
+ }
+
+-static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size)
++static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size)
+ {
+ unsigned char *dmabuf;
+ int ret;
+@@ -404,7 +388,7 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size
+
+ static int asus_kbd_init(struct hid_device *hdev)
+ {
+- u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
++ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
+ 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
+ int ret;
+
+@@ -418,7 +402,7 @@ static int asus_kbd_init(struct hid_device *hdev)
+ static int asus_kbd_get_functions(struct hid_device *hdev,
+ unsigned char *kbd_func)
+ {
+- u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
++ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
+ u8 *readbuf;
+ int ret;
+
+@@ -449,7 +433,7 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
+
+ static int rog_nkey_led_init(struct hid_device *hdev)
+ {
+- u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
++ const u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
+ u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20,
+ 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
+ u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1,
+@@ -897,7 +881,10 @@ static int asus_input_mapping(struct hid_device *hdev,
+ case 0xb3: asus_map_key_clear(KEY_PROG3); break; /* Fn+Left next aura */
+ case 0x6a: asus_map_key_clear(KEY_F13); break; /* Screenpad toggle */
+ case 0x4b: asus_map_key_clear(KEY_F14); break; /* Arrows/Pg-Up/Dn toggle */
+-
++ case 0xa5: asus_map_key_clear(KEY_F15); break; /* ROG Ally left back */
++ case 0xa6: asus_map_key_clear(KEY_F16); break; /* ROG Ally QAM button */
++ case 0xa7: asus_map_key_clear(KEY_F17); break; /* ROG Ally ROG long-press */
++ case 0xa8: asus_map_key_clear(KEY_F18); break; /* ROG Ally ROG long-press-release */
+
+ default:
+ /* ASUS lazily declares 256 usages, ignore the rest,
+@@ -1000,6 +987,24 @@ static int asus_start_multitouch(struct hid_device *hdev)
+ return 0;
+ }
+
++static int __maybe_unused asus_resume(struct hid_device *hdev) {
++ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
++ int ret = 0;
++
++ if (drvdata->kbd_backlight) {
++ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4,
++ drvdata->kbd_backlight->cdev.brightness };
++ ret = asus_kbd_set_report(hdev, buf, sizeof(buf));
++ if (ret < 0) {
++ hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret);
++ goto asus_resume_err;
++ }
++ }
++
++asus_resume_err:
++ return ret;
++}
++
+ static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
+ {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+@@ -1232,6 +1237,19 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ rdesc[205] = 0x01;
+ }
+
++ /* match many more n-key devices */
++ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) {
++ for (int i = 0; i < *rsize - 15; i++) {
++ /* offset to the count from 0x5a report part always 14 */
++ if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a &&
++ rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) {
++ hid_info(hdev, "Fixing up Asus N-Key report descriptor\n");
++ rdesc[i + 15] = 0x01;
++ break;
++ }
++ }
++ }
++
+ return rdesc;
+ }
+
+@@ -1258,6 +1276,15 @@ static const struct hid_device_id asus_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
++ USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR),
++ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
++ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY),
++ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
++ USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X),
++ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD),
+ QUIRK_ROG_CLAYMORE_II_KEYBOARD },
+@@ -1294,10 +1321,11 @@ static struct hid_driver asus_driver = {
+ .input_configured = asus_input_configured,
+ #ifdef CONFIG_PM
+ .reset_resume = asus_reset_resume,
++ .resume = asus_resume,
+ #endif
+ .event = asus_event,
+ .raw_event = asus_raw_event
+ };
+ module_hid_driver(asus_driver);
+
+-MODULE_LICENSE("GPL");
+\ No newline at end of file
++MODULE_LICENSE("GPL");
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 8992e3c1e7698e..85ddeb13a3fae8 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -702,15 +702,22 @@ static void hid_close_report(struct hid_device *device)
+ * Free a device structure, all reports, and all fields.
+ */
+
+-static void hid_device_release(struct device *dev)
++void hiddev_free(struct kref *ref)
+ {
+- struct hid_device *hid = to_hid_device(dev);
++ struct hid_device *hid = container_of(ref, struct hid_device, ref);
+
+ hid_close_report(hid);
+ kfree(hid->dev_rdesc);
+ kfree(hid);
+ }
+
++static void hid_device_release(struct device *dev)
++{
++ struct hid_device *hid = to_hid_device(dev);
++
++ kref_put(&hid->ref, hiddev_free);
++}
++
+ /*
+ * Fetch a report description item from the data stream. We support long
+ * items, though they are not used yet.
+@@ -1441,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report,
+ hid_warn(hid,
+ "%s() called with too large value %d (n: %d)! (%s)\n",
+ __func__, value, n, current->comm);
+- WARN_ON(1);
+ value &= m;
+ }
+ }
+@@ -2846,6 +2852,7 @@ struct hid_device *hid_allocate_device(void)
+ spin_lock_init(&hdev->debug_list_lock);
+ sema_init(&hdev->driver_input_lock, 1);
+ mutex_init(&hdev->ll_open_lock);
++ kref_init(&hdev->ref);
+
+ hid_bpf_device_init(hdev);
+
+diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
+index cb8bd8aae15b51..0fa785f52707ac 100644
+--- a/drivers/hid/hid-cougar.c
++++ b/drivers/hid/hid-cougar.c
+@@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void)
+ static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+- if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
++ if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
+ hid_info(hdev,
+ "usage count exceeds max: fixing up report descriptor\n");
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 54c33a24f8442c..20a0d1315d90fa 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -1151,8 +1151,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+- INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+-
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+@@ -1168,7 +1166,11 @@ static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ cp2112_gpio_irq_mask(d);
+- cancel_delayed_work_sync(&dev->gpio_poll_worker);
++
++ if (!dev->irq_mask) {
++ dev->gpio_poll = false;
++ cancel_delayed_work_sync(&dev->gpio_poll_worker);
++ }
+ }
+
+ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+@@ -1307,6 +1309,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
++ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
++
+ ret = gpiochip_add_data(&dev->gc, dev);
+ if (ret < 0) {
+ hid_err(hdev, "error registering gpio chip\n");
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index e7ef1ea107c9e6..5302bfd527d86a 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -974,6 +974,8 @@ static const char *keys[KEY_MAX + 1] = {
+ [KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable",
+ [KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable",
+ [KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle",
++ [KEY_ACCESSIBILITY] = "Accessibility",
++ [KEY_DO_NOT_DISTURB] = "DoNotDisturb",
+ [KEY_DICTATE] = "Dictate",
+ [KEY_MICMUTE] = "MicrophoneMute",
+ [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
+@@ -1135,6 +1137,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
+ goto out;
+ }
+ list->hdev = (struct hid_device *) inode->i_private;
++ kref_get(&list->hdev->ref);
+ file->private_data = list;
+ mutex_init(&list->read_mutex);
+
+@@ -1227,6 +1230,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
+ list_del(&list->node);
+ spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
+ kfifo_free(&list->hid_debug_fifo);
++
++ kref_put(&list->hdev->ref, hiddev_free);
+ kfree(list);
+
+ return 0;
+diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c
+index 558eb08c19ef9d..281b3a7187cec2 100644
+--- a/drivers/hid/hid-glorious.c
++++ b/drivers/hid/hid-glorious.c
+@@ -21,6 +21,10 @@ MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice");
+ * Glorious Model O and O- specify the const flag in the consumer input
+ * report descriptor, which leads to inputs being ignored. Fix this
+ * by patching the descriptor.
++ *
++ * Glorious Model I incorrectly specifes the Usage Minimum for its
++ * keyboard HID report, causing keycodes to be misinterpreted.
++ * Fix this by setting Usage Minimum to 0 in that report.
+ */
+ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+@@ -32,6 +36,10 @@ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ rdesc[85] = rdesc[113] = rdesc[141] = \
+ HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE;
+ }
++ if (*rsize == 156 && rdesc[41] == 1) {
++ hid_info(hdev, "patching Glorious Model I keyboard report descriptor\n");
++ rdesc[41] = 0;
++ }
+ return rdesc;
+ }
+
+@@ -44,6 +52,8 @@ static void glorious_update_name(struct hid_device *hdev)
+ model = "Model O"; break;
+ case USB_DEVICE_ID_GLORIOUS_MODEL_D:
+ model = "Model D"; break;
++ case USB_DEVICE_ID_GLORIOUS_MODEL_I:
++ model = "Model I"; break;
+ }
+
+ snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model);
+@@ -66,10 +76,12 @@ static int glorious_probe(struct hid_device *hdev,
+ }
+
+ static const struct hid_device_id glorious_devices[] = {
+- { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
++ { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
+ USB_DEVICE_ID_GLORIOUS_MODEL_O) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
++ { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
+ USB_DEVICE_ID_GLORIOUS_MODEL_D) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LAVIEW,
++ USB_DEVICE_ID_GLORIOUS_MODEL_I) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, glorious_devices);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e4d2dfd5d2536e..d4f6066dbbc596 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -208,6 +208,9 @@
+ #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
+ #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
+ #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3 0x1a30
++#define USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR 0x18c6
++#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY 0x1abe
++#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c
+ #define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b
+ #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
+
+@@ -298,6 +301,9 @@
+
+ #define USB_VENDOR_ID_CIDC 0x1677
+
++#define I2C_VENDOR_ID_CIRQUE 0x0488
++#define I2C_PRODUCT_ID_CIRQUE_1063 0x1063
++
+ #define USB_VENDOR_ID_CJTOUCH 0x24b8
+ #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
+ #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
+@@ -366,6 +372,7 @@
+
+ #define USB_VENDOR_ID_DELL 0x413c
+ #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
++#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W 0x4503
+
+ #define USB_VENDOR_ID_DELORME 0x1163
+ #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
+@@ -410,22 +417,9 @@
+ #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
+ #define USB_DEVICE_ID_HP_X2 0x074d
+ #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+-#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05
+-#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
+-#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9
+-#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
+-#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
+-#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
+-#define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82
+ #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
+ #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
+-#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
+-#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
+-#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
+-#define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5
+-#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED
+-#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE
+-#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG 0x2D02
++#define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM 0x2F81
+
+ #define USB_VENDOR_ID_ELECOM 0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084 0x0061
+@@ -510,11 +504,10 @@
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+
+-#define USB_VENDOR_ID_GLORIOUS 0x258a
+-#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
+-#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
+-
+ #define I2C_VENDOR_ID_GOODIX 0x27c6
++#define I2C_DEVICE_ID_GOODIX_01E0 0x01e0
++#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8
++#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9
+ #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
+
+ #define USB_VENDOR_ID_GOODTOUCH 0x1aad
+@@ -743,6 +736,10 @@
+
+ #define USB_VENDOR_ID_LABTEC 0x1020
+ #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
++#define USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE 0x8888
++
++#define USB_VENDOR_ID_LAVIEW 0x22D4
++#define USB_DEVICE_ID_GLORIOUS_MODEL_I 0x1503
+
+ #define USB_VENDOR_ID_LCPOWER 0x1241
+ #define USB_DEVICE_ID_LCPOWER_LC1000 0xf767
+@@ -798,6 +795,7 @@
+ #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
+ #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
+ #define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
++#define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae
+ #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
+@@ -818,6 +816,7 @@
+ #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
+ #define USB_DEVICE_ID_LOGITECH_T651 0xb00c
+ #define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309
++#define USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD 0xbb00
+ #define USB_DEVICE_ID_LOGITECH_C007 0xc007
+ #define USB_DEVICE_ID_LOGITECH_C077 0xc077
+ #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
+@@ -868,7 +867,6 @@
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
+-#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc547
+ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
+ #define USB_DEVICE_ID_SPACETRAVELLER 0xc623
+ #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
+@@ -916,6 +914,7 @@
+ #define USB_DEVICE_ID_PICK16F1454 0x0042
+ #define USB_DEVICE_ID_PICK16F1454_V2 0xf2f7
+ #define USB_DEVICE_ID_LUXAFOR 0xf372
++#define USB_DEVICE_ID_MCP2200 0x00df
+ #define USB_DEVICE_ID_MCP2221 0x00dd
+
+ #define USB_VENDOR_ID_MICROSOFT 0x045e
+@@ -1034,6 +1033,8 @@
+ #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
+ #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057
+ #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES 0x430c
++#define USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES 0x431e
+
+ #define USB_VENDOR_ID_PANASONIC 0x04da
+ #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
+@@ -1159,6 +1160,10 @@
+ #define USB_VENDOR_ID_SIGMATEL 0x066F
+ #define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+
++#define USB_VENDOR_ID_SINOWEALTH 0x258a
++#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
++#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
++
+ #define USB_VENDOR_ID_SIS_TOUCH 0x0457
+ #define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
+ #define USB_DEVICE_ID_SIS817_TOUCH 0x0817
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index c8b20d44b14724..fda9dce3da9980 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -373,10 +373,6 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
+ HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN),
+- HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+@@ -387,30 +383,13 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ HID_BATTERY_QUIRK_AVOID_QUERY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW),
+ HID_BATTERY_QUIRK_AVOID_QUERY },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2),
+- HID_BATTERY_QUIRK_IGNORE },
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG),
+- HID_BATTERY_QUIRK_IGNORE },
++ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM),
++ HID_BATTERY_QUIRK_AVOID_QUERY },
++ /*
++ * Elan I2C-HID touchscreens seem to all report a non present battery,
++ * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C-HID devices.
++ */
++ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
+ {}
+ };
+
+@@ -831,9 +810,18 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ break;
+ }
+
++ if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
++ switch (usage->hid & 0xf) {
++ case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
++ default: goto ignore;
++ }
++ break;
++ }
++
+ if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */
+ switch (usage->hid & 0xf) {
+ case 0x9: map_key_clear(KEY_MICMUTE); break;
++ case 0xa: map_key_clear(KEY_ACCESSIBILITY); break;
+ default: goto ignore;
+ }
+ break;
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 44763c0da44411..f86c1ea83a0378 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -51,8 +51,13 @@ struct lenovo_drvdata {
+ int select_right;
+ int sensitivity;
+ int press_speed;
+- u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
++ /* 0: Up
++ * 1: Down (undecided)
++ * 2: Scrolling
++ */
++ u8 middlebutton_state;
+ bool fn_lock;
++ bool middleclick_workaround_cptkbd;
+ };
+
+ #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+@@ -521,6 +526,19 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
+ int ret;
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+
++ /*
++ * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
++ * regular keys
++ */
++ ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
++ if (ret)
++ hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
++
++ /* Switch middle button to native mode */
++ ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
++ if (ret)
++ hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
++
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
+ if (ret)
+ hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
+@@ -603,6 +621,36 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
+ return count;
+ }
+
++static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct hid_device *hdev = to_hid_device(dev);
++ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
++
++ return snprintf(buf, PAGE_SIZE, "%u\n",
++ cptkbd_data->middleclick_workaround_cptkbd);
++}
++
++static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
++{
++ struct hid_device *hdev = to_hid_device(dev);
++ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
++ int value;
++
++ if (kstrtoint(buf, 10, &value))
++ return -EINVAL;
++ if (value < 0 || value > 1)
++ return -EINVAL;
++
++ cptkbd_data->middleclick_workaround_cptkbd = !!value;
++
++ return count;
++}
++
+
+ static struct device_attribute dev_attr_fn_lock =
+ __ATTR(fn_lock, S_IWUSR | S_IRUGO,
+@@ -614,10 +662,16 @@ static struct device_attribute dev_attr_sensitivity_cptkbd =
+ attr_sensitivity_show_cptkbd,
+ attr_sensitivity_store_cptkbd);
+
++static struct device_attribute dev_attr_middleclick_workaround_cptkbd =
++ __ATTR(middleclick_workaround, S_IWUSR | S_IRUGO,
++ attr_middleclick_workaround_show_cptkbd,
++ attr_middleclick_workaround_store_cptkbd);
++
+
+ static struct attribute *lenovo_attributes_cptkbd[] = {
+ &dev_attr_fn_lock.attr,
+ &dev_attr_sensitivity_cptkbd.attr,
++ &dev_attr_middleclick_workaround_cptkbd.attr,
+ NULL
+ };
+
+@@ -668,31 +722,33 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ {
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+
+- /* "wheel" scroll events */
+- if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
+- usage->code == REL_HWHEEL)) {
+- /* Scroll events disable middle-click event */
+- cptkbd_data->middlebutton_state = 2;
+- return 0;
+- }
++ if (cptkbd_data->middleclick_workaround_cptkbd) {
++ /* "wheel" scroll events */
++ if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
++ usage->code == REL_HWHEEL)) {
++ /* Scroll events disable middle-click event */
++ cptkbd_data->middlebutton_state = 2;
++ return 0;
++ }
+
+- /* Middle click events */
+- if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
+- if (value == 1) {
+- cptkbd_data->middlebutton_state = 1;
+- } else if (value == 0) {
+- if (cptkbd_data->middlebutton_state == 1) {
+- /* No scrolling inbetween, send middle-click */
+- input_event(field->hidinput->input,
+- EV_KEY, BTN_MIDDLE, 1);
+- input_sync(field->hidinput->input);
+- input_event(field->hidinput->input,
+- EV_KEY, BTN_MIDDLE, 0);
+- input_sync(field->hidinput->input);
++ /* Middle click events */
++ if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
++ if (value == 1) {
++ cptkbd_data->middlebutton_state = 1;
++ } else if (value == 0) {
++ if (cptkbd_data->middlebutton_state == 1) {
++ /* No scrolling inbetween, send middle-click */
++ input_event(field->hidinput->input,
++ EV_KEY, BTN_MIDDLE, 1);
++ input_sync(field->hidinput->input);
++ input_event(field->hidinput->input,
++ EV_KEY, BTN_MIDDLE, 0);
++ input_sync(field->hidinput->input);
++ }
++ cptkbd_data->middlebutton_state = 0;
+ }
+- cptkbd_data->middlebutton_state = 0;
++ return 1;
+ }
+- return 1;
+ }
+
+ if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) {
+@@ -1126,26 +1182,11 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
+ }
+ hid_set_drvdata(hdev, cptkbd_data);
+
+- /*
+- * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
+- * regular keys (Compact only)
+- */
+- if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
+- hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
+- ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
+- if (ret)
+- hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
+- }
+-
+- /* Switch middle button to native mode */
+- ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
+- if (ret)
+- hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
+-
+ /* Set keyboard settings to known state */
+ cptkbd_data->middlebutton_state = 0;
+ cptkbd_data->fn_lock = true;
+ cptkbd_data->sensitivity = 0x05;
++ cptkbd_data->middleclick_workaround_cptkbd = true;
+ lenovo_features_set_cptkbd(hdev);
+
+ ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd);
+@@ -1264,6 +1305,24 @@ static int lenovo_probe(struct hid_device *hdev,
+ return ret;
+ }
+
++#ifdef CONFIG_PM
++static int lenovo_reset_resume(struct hid_device *hdev)
++{
++ switch (hdev->product) {
++ case USB_DEVICE_ID_LENOVO_CUSBKBD:
++ case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
++ if (hdev->type == HID_TYPE_USBMOUSE)
++ lenovo_features_set_cptkbd(hdev);
++
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++#endif
++
+ static void lenovo_remove_tpkbd(struct hid_device *hdev)
+ {
+ struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+@@ -1380,6 +1439,9 @@ static struct hid_driver lenovo_driver = {
+ .raw_event = lenovo_raw_event,
+ .event = lenovo_event,
+ .report_fixup = lenovo_report_fixup,
++#ifdef CONFIG_PM
++ .reset_resume = lenovo_reset_resume,
++#endif
+ };
+ module_hid_driver(lenovo_driver);
+
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 8afe3be683ba25..37958edec55f5f 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
+ }
+ break;
+ case REPORT_TYPE_MOUSE:
+- workitem->reports_supported |= STD_MOUSE | HIDPP;
+- if (djrcv_dev->type == recvr_type_mouse_only)
+- workitem->reports_supported |= MULTIMEDIA;
++ workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
+ break;
+ }
+ }
+@@ -1286,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ */
+ msleep(50);
+
+- if (retval)
++ if (retval) {
++ kfree(dj_report);
+ return retval;
++ }
+ }
+
+ /*
+@@ -1695,12 +1695,11 @@ static int logi_dj_raw_event(struct hid_device *hdev,
+ }
+ /*
+ * Mouse-only receivers send unnumbered mouse data. The 27 MHz
+- * receiver uses 6 byte packets, the nano receiver 8 bytes,
+- * the lightspeed receiver (Pro X Superlight) 13 bytes.
++ * receiver uses 6 byte packets, the nano receiver 8 bytes.
+ */
+ if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
+- size <= 13){
+- u8 mouse_report[14];
++ size <= 8) {
++ u8 mouse_report[9];
+
+ /* Prepend report id */
+ mouse_report[0] = REPORT_TYPE_MOUSE;
+@@ -1984,10 +1983,6 @@ static const struct hid_device_id logi_dj_receivers[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
+ .driver_data = recvr_type_gaming_hidpp},
+- { /* Logitech lightspeed receiver (0xc547) */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
+- .driver_data = recvr_type_gaming_hidpp},
+
+ { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index a209d51bd2476b..4519ee377aa767 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -1835,15 +1835,14 @@ static int hidpp_battery_get_property(struct power_supply *psy,
+ /* -------------------------------------------------------------------------- */
+ #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b
+
+-static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
++static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index)
+ {
+ u8 feature_type;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+- &hidpp->wireless_feature_index,
+- &feature_type);
++ feature_index, &feature_type);
+
+ return ret;
+ }
+@@ -4249,6 +4248,13 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
+ }
+ }
+
++ if (hidpp->protocol_major >= 2) {
++ u8 feature_index;
++
++ if (!hidpp_get_wireless_feature_index(hidpp, &feature_index))
++ hidpp->wireless_feature_index = feature_index;
++ }
++
+ if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
+ name = hidpp_get_device_name(hidpp);
+ if (name) {
+@@ -4394,7 +4400,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ bool connected;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ struct hidpp_ff_private_data data;
+- bool will_restart = false;
+
+ /* report_fixup needs drvdata to be set before we call hid_parse */
+ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
+@@ -4445,10 +4450,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ return ret;
+ }
+
+- if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
+- hidpp->quirks & HIDPP_QUIRK_UNIFYING)
+- will_restart = true;
+-
+ INIT_WORK(&hidpp->work, delayed_work_cb);
+ mutex_init(&hidpp->send_mutex);
+ init_waitqueue_head(&hidpp->wait);
+@@ -4460,10 +4461,12 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ hdev->name);
+
+ /*
+- * Plain USB connections need to actually call start and open
+- * on the transport driver to allow incoming data.
++ * First call hid_hw_start(hdev, 0) to allow IO without connecting any
++ * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's
++ * name and serial number and store these in hdev->name and hdev->uniq,
++ * before the hid-input and hidraw drivers expose these to userspace.
+ */
+- ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
++ ret = hid_hw_start(hdev, 0);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto hid_hw_start_fail;
+@@ -4496,15 +4499,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ hidpp_overwrite_name(hdev);
+ }
+
+- if (connected && hidpp->protocol_major >= 2) {
+- ret = hidpp_set_wireless_feature_index(hidpp);
+- if (ret == -ENOENT)
+- hidpp->wireless_feature_index = 0;
+- else if (ret)
+- goto hid_hw_init_fail;
+- ret = 0;
+- }
+-
+ if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
+ ret = wtp_get_config(hidpp);
+ if (ret)
+@@ -4518,21 +4512,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ schedule_work(&hidpp->work);
+ flush_work(&hidpp->work);
+
+- if (will_restart) {
+- /* Reset the HID node state */
+- hid_device_io_stop(hdev);
+- hid_hw_close(hdev);
+- hid_hw_stop(hdev);
+-
+- if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
+- connect_mask &= ~HID_CONNECT_HIDINPUT;
++ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
++ connect_mask &= ~HID_CONNECT_HIDINPUT;
+
+- /* Now export the actual inputs and hidraw nodes to the world */
+- ret = hid_hw_start(hdev, connect_mask);
+- if (ret) {
+- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+- goto hid_hw_start_fail;
+- }
++ /* Now export the actual inputs and hidraw nodes to the world */
++ ret = hid_connect(hdev, connect_mask);
++ if (ret) {
++ hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret);
++ goto hid_hw_init_fail;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+@@ -4543,6 +4530,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ ret);
+ }
+
++ /*
++ * This relies on logi_dj_ll_close() being a no-op so that DJ connection
++ * events will still be received.
++ */
++ hid_hw_close(hdev);
+ return ret;
+
+ hid_hw_init_fail:
+@@ -4658,6 +4650,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
+ { /* Logitech G Pro X Superlight Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
++ { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
+
+ { /* G935 Gaming Headset */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
+diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
+new file mode 100644
+index 00000000000000..bf57f7f6caa084
+--- /dev/null
++++ b/drivers/hid/hid-mcp2200.c
+@@ -0,0 +1,392 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * MCP2200 - Microchip USB to GPIO bridge
++ *
++ * Copyright (c) 2023, Johannes Roith <johannes@gnu-linux.rocks>
++ *
++ * Datasheet: https://ww1.microchip.com/downloads/en/DeviceDoc/22228A.pdf
++ * App Note for HID: https://ww1.microchip.com/downloads/en/DeviceDoc/93066A.pdf
++ */
++#include <linux/completion.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/gpio/driver.h>
++#include <linux/hid.h>
++#include <linux/hidraw.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include "hid-ids.h"
++
++/* Commands codes in a raw output report */
++#define SET_CLEAR_OUTPUTS 0x08
++#define CONFIGURE 0x10
++#define READ_EE 0x20
++#define WRITE_EE 0x40
++#define READ_ALL 0x80
++
++/* MCP GPIO direction encoding */
++enum MCP_IO_DIR {
++ MCP2200_DIR_OUT = 0x00,
++ MCP2200_DIR_IN = 0x01,
++};
++
++/* Altternative pin assignments */
++#define TXLED 2
++#define RXLED 3
++#define USBCFG 6
++#define SSPND 7
++#define MCP_NGPIO 8
++
++/* CMD to set or clear a GPIO output */
++struct mcp_set_clear_outputs {
++ u8 cmd;
++ u8 dummys1[10];
++ u8 set_bmap;
++ u8 clear_bmap;
++ u8 dummys2[3];
++} __packed;
++
++/* CMD to configure the IOs */
++struct mcp_configure {
++ u8 cmd;
++ u8 dummys1[3];
++ u8 io_bmap;
++ u8 config_alt_pins;
++ u8 io_default_val_bmap;
++ u8 config_alt_options;
++ u8 baud_h;
++ u8 baud_l;
++ u8 dummys2[6];
++} __packed;
++
++/* CMD to read all parameters */
++struct mcp_read_all {
++ u8 cmd;
++ u8 dummys[15];
++} __packed;
++
++/* Response to the read all cmd */
++struct mcp_read_all_resp {
++ u8 cmd;
++ u8 eep_addr;
++ u8 dummy;
++ u8 eep_val;
++ u8 io_bmap;
++ u8 config_alt_pins;
++ u8 io_default_val_bmap;
++ u8 config_alt_options;
++ u8 baud_h;
++ u8 baud_l;
++ u8 io_port_val_bmap;
++ u8 dummys[5];
++} __packed;
++
++struct mcp2200 {
++ struct hid_device *hdev;
++ struct mutex lock;
++ struct completion wait_in_report;
++ u8 gpio_dir;
++ u8 gpio_val;
++ u8 gpio_inval;
++ u8 baud_h;
++ u8 baud_l;
++ u8 config_alt_pins;
++ u8 gpio_reset_val;
++ u8 config_alt_options;
++ int status;
++ struct gpio_chip gc;
++ u8 hid_report[16];
++};
++
++/* this executes the READ_ALL cmd */
++static int mcp_cmd_read_all(struct mcp2200 *mcp)
++{
++ struct mcp_read_all *read_all;
++ int len, t;
++
++ reinit_completion(&mcp->wait_in_report);
++
++ mutex_lock(&mcp->lock);
++
++ read_all = (struct mcp_read_all *) mcp->hid_report;
++ read_all->cmd = READ_ALL;
++ len = hid_hw_output_report(mcp->hdev, (u8 *) read_all,
++ sizeof(struct mcp_read_all));
++
++ mutex_unlock(&mcp->lock);
++
++ if (len != sizeof(struct mcp_read_all))
++ return -EINVAL;
++
++ t = wait_for_completion_timeout(&mcp->wait_in_report,
++ msecs_to_jiffies(4000));
++ if (!t)
++ return -ETIMEDOUT;
++
++ /* return status, negative value if wrong response was received */
++ return mcp->status;
++}
++
++static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
++ unsigned long *bits)
++{
++ struct mcp2200 *mcp = gpiochip_get_data(gc);
++ u8 value;
++ int status;
++ struct mcp_set_clear_outputs *cmd;
++
++ mutex_lock(&mcp->lock);
++ cmd = (struct mcp_set_clear_outputs *) mcp->hid_report;
++
++ value = mcp->gpio_val & ~*mask;
++ value |= (*mask & *bits);
++
++ cmd->cmd = SET_CLEAR_OUTPUTS;
++ cmd->set_bmap = value;
++ cmd->clear_bmap = ~(value);
++
++ status = hid_hw_output_report(mcp->hdev, (u8 *) cmd,
++ sizeof(struct mcp_set_clear_outputs));
++
++ if (status == sizeof(struct mcp_set_clear_outputs))
++ mcp->gpio_val = value;
++
++ mutex_unlock(&mcp->lock);
++}
++
++static void mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
++{
++ unsigned long mask = 1 << gpio_nr;
++ unsigned long bmap_value = value << gpio_nr;
++
++ mcp_set_multiple(gc, &mask, &bmap_value);
++}
++
++static int mcp_get_multiple(struct gpio_chip *gc, unsigned long *mask,
++ unsigned long *bits)
++{
++ u32 val;
++ struct mcp2200 *mcp = gpiochip_get_data(gc);
++ int status;
++
++ status = mcp_cmd_read_all(mcp);
++ if (status)
++ return status;
++
++ val = mcp->gpio_inval;
++ *bits = (val & *mask);
++ return 0;
++}
++
++static int mcp_get(struct gpio_chip *gc, unsigned int gpio_nr)
++{
++ unsigned long mask = 0, bits = 0;
++
++ mask = (1 << gpio_nr);
++ mcp_get_multiple(gc, &mask, &bits);
++ return bits > 0;
++}
++
++static int mcp_get_direction(struct gpio_chip *gc, unsigned int gpio_nr)
++{
++ struct mcp2200 *mcp = gpiochip_get_data(gc);
++
++ return (mcp->gpio_dir & (MCP2200_DIR_IN << gpio_nr))
++ ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
++}
++
++static int mcp_set_direction(struct gpio_chip *gc, unsigned int gpio_nr,
++ enum MCP_IO_DIR io_direction)
++{
++ struct mcp2200 *mcp = gpiochip_get_data(gc);
++ struct mcp_configure *conf;
++ int status;
++ /* after the configure cmd we will need to set the outputs again */
++ unsigned long mask = ~(mcp->gpio_dir); /* only set outputs */
++ unsigned long bits = mcp->gpio_val;
++ /* Offsets of alternative pins in config_alt_pins, 0 is not used */
++ u8 alt_pin_conf[8] = {SSPND, USBCFG, 0, 0, 0, 0, RXLED, TXLED};
++ u8 config_alt_pins = mcp->config_alt_pins;
++
++ /* Read in the reset baudrate first, we need it later */
++ status = mcp_cmd_read_all(mcp);
++ if (status != 0)
++ return status;
++
++ mutex_lock(&mcp->lock);
++ conf = (struct mcp_configure *) mcp->hid_report;
++
++ /* configure will reset the chip! */
++ conf->cmd = CONFIGURE;
++ conf->io_bmap = (mcp->gpio_dir & ~(1 << gpio_nr))
++ | (io_direction << gpio_nr);
++ /* Don't overwrite the reset parameters */
++ conf->baud_h = mcp->baud_h;
++ conf->baud_l = mcp->baud_l;
++ conf->config_alt_options = mcp->config_alt_options;
++ conf->io_default_val_bmap = mcp->gpio_reset_val;
++ /* Adjust alt. func if necessary */
++ if (alt_pin_conf[gpio_nr])
++ config_alt_pins &= ~(1 << alt_pin_conf[gpio_nr]);
++ conf->config_alt_pins = config_alt_pins;
++
++ status = hid_hw_output_report(mcp->hdev, (u8 *) conf,
++ sizeof(struct mcp_set_clear_outputs));
++
++ if (status == sizeof(struct mcp_set_clear_outputs)) {
++ mcp->gpio_dir = conf->io_bmap;
++ mcp->config_alt_pins = config_alt_pins;
++ } else {
++ mutex_unlock(&mcp->lock);
++ return -EIO;
++ }
++
++ mutex_unlock(&mcp->lock);
++
++ /* Configure CMD will clear all IOs -> rewrite them */
++ mcp_set_multiple(gc, &mask, &bits);
++ return 0;
++}
++
++static int mcp_direction_input(struct gpio_chip *gc, unsigned int gpio_nr)
++{
++ return mcp_set_direction(gc, gpio_nr, MCP2200_DIR_IN);
++}
++
++static int mcp_direction_output(struct gpio_chip *gc, unsigned int gpio_nr,
++ int value)
++{
++ int ret;
++ unsigned long mask, bmap_value;
++
++ mask = 1 << gpio_nr;
++ bmap_value = value << gpio_nr;
++
++ ret = mcp_set_direction(gc, gpio_nr, MCP2200_DIR_OUT);
++ if (!ret)
++ mcp_set_multiple(gc, &mask, &bmap_value);
++ return ret;
++}
++
++static const struct gpio_chip template_chip = {
++ .label = "mcp2200",
++ .owner = THIS_MODULE,
++ .get_direction = mcp_get_direction,
++ .direction_input = mcp_direction_input,
++ .direction_output = mcp_direction_output,
++ .set = mcp_set,
++ .set_multiple = mcp_set_multiple,
++ .get = mcp_get,
++ .get_multiple = mcp_get_multiple,
++ .base = -1,
++ .ngpio = MCP_NGPIO,
++ .can_sleep = true,
++};
++
++/*
++ * MCP2200 uses interrupt endpoint for input reports. This function
++ * is called by HID layer when it receives i/p report from mcp2200,
++ * which is actually a response to the previously sent command.
++ */
++static int mcp2200_raw_event(struct hid_device *hdev, struct hid_report *report,
++ u8 *data, int size)
++{
++ struct mcp2200 *mcp = hid_get_drvdata(hdev);
++ struct mcp_read_all_resp *all_resp;
++
++ switch (data[0]) {
++ case READ_ALL:
++ all_resp = (struct mcp_read_all_resp *) data;
++ mcp->status = 0;
++ mcp->gpio_inval = all_resp->io_port_val_bmap;
++ mcp->baud_h = all_resp->baud_h;
++ mcp->baud_l = all_resp->baud_l;
++ mcp->gpio_reset_val = all_resp->io_default_val_bmap;
++ mcp->config_alt_pins = all_resp->config_alt_pins;
++ mcp->config_alt_options = all_resp->config_alt_options;
++ break;
++ default:
++ mcp->status = -EIO;
++ break;
++ }
++
++ complete(&mcp->wait_in_report);
++ return 0;
++}
++
++static int mcp2200_probe(struct hid_device *hdev, const struct hid_device_id *id)
++{
++ int ret;
++ struct mcp2200 *mcp;
++
++ mcp = devm_kzalloc(&hdev->dev, sizeof(*mcp), GFP_KERNEL);
++ if (!mcp)
++ return -ENOMEM;
++
++ ret = hid_parse(hdev);
++ if (ret) {
++ hid_err(hdev, "can't parse reports\n");
++ return ret;
++ }
++
++ ret = hid_hw_start(hdev, 0);
++ if (ret) {
++ hid_err(hdev, "can't start hardware\n");
++ return ret;
++ }
++
++ hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8,
++ hdev->version & 0xff, hdev->name, hdev->phys);
++
++ ret = hid_hw_open(hdev);
++ if (ret) {
++ hid_err(hdev, "can't open device\n");
++ hid_hw_stop(hdev);
++ return ret;
++ }
++
++ mutex_init(&mcp->lock);
++ init_completion(&mcp->wait_in_report);
++ hid_set_drvdata(hdev, mcp);
++ mcp->hdev = hdev;
++
++ mcp->gc = template_chip;
++ mcp->gc.parent = &hdev->dev;
++
++ ret = devm_gpiochip_add_data(&hdev->dev, &mcp->gc, mcp);
++ if (ret < 0) {
++ hid_err(hdev, "Unable to register gpiochip\n");
++ hid_hw_close(hdev);
++ hid_hw_stop(hdev);
++ return ret;
++ }
++
++ return 0;
++}
++
++static void mcp2200_remove(struct hid_device *hdev)
++{
++ hid_hw_close(hdev);
++ hid_hw_stop(hdev);
++}
++
++static const struct hid_device_id mcp2200_devices[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_MCP2200) },
++ { }
++};
++MODULE_DEVICE_TABLE(hid, mcp2200_devices);
++
++static struct hid_driver mcp2200_driver = {
++ .name = "mcp2200",
++ .id_table = mcp2200_devices,
++ .probe = mcp2200_probe,
++ .remove = mcp2200_remove,
++ .raw_event = mcp2200_raw_event,
++};
++
++/* Register with HID core */
++module_hid_driver(mcp2200_driver);
++
++MODULE_AUTHOR("Johannes Roith <johannes@gnu-linux.rocks>");
++MODULE_DESCRIPTION("MCP2200 Microchip HID USB to GPIO bridge");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
+index 72883e0ce75758..c5bfca8ac5e6e8 100644
+--- a/drivers/hid/hid-mcp2221.c
++++ b/drivers/hid/hid-mcp2221.c
+@@ -922,9 +922,11 @@ static void mcp2221_hid_unregister(void *ptr)
+ /* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
+ static void mcp2221_remove(struct hid_device *hdev)
+ {
++#if IS_REACHABLE(CONFIG_IIO)
+ struct mcp2221 *mcp = hid_get_drvdata(hdev);
+
+ cancel_delayed_work_sync(&mcp->init_work);
++#endif
+ }
+
+ #if IS_REACHABLE(CONFIG_IIO)
+@@ -1142,6 +1144,8 @@ static int mcp2221_probe(struct hid_device *hdev,
+ if (ret)
+ return ret;
+
++ hid_device_io_start(hdev);
++
+ /* Set I2C bus clock diviser */
+ if (i2c_clk_freq > 400)
+ i2c_clk_freq = 400;
+@@ -1157,12 +1161,12 @@ static int mcp2221_probe(struct hid_device *hdev,
+ snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
+ "MCP2221 usb-i2c bridge");
+
++ i2c_set_adapdata(&mcp->adapter, mcp);
+ ret = devm_i2c_add_adapter(&hdev->dev, &mcp->adapter);
+ if (ret) {
+ hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret);
+ return ret;
+ }
+- i2c_set_adapdata(&mcp->adapter, mcp);
+
+ #if IS_REACHABLE(CONFIG_GPIOLIB)
+ /* Setup GPIO chip */
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 8db4ae05febc8f..e7199ae2e3d918 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1442,6 +1442,31 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
+ return 0;
+ }
+
++static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++ unsigned int *size)
++{
++ if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
++ (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
++ hdev->product == I2C_DEVICE_ID_GOODIX_01E9 ||
++ hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) {
++ if (rdesc[607] == 0x15) {
++ rdesc[607] = 0x25;
++ dev_info(
++ &hdev->dev,
++ "GT7868Q report descriptor fixup is applied.\n");
++ } else {
++ dev_info(
++ &hdev->dev,
++ "The byte is not expected for fixing the report descriptor. \
++It's possible that the touchpad firmware is not suitable for applying the fix. \
++got: %x\n",
++ rdesc[607]);
++ }
++ }
++
++ return rdesc;
++}
++
+ static void mt_report(struct hid_device *hid, struct hid_report *report)
+ {
+ struct mt_device *td = hid_get_drvdata(hid);
+@@ -2038,6 +2063,17 @@ static const struct hid_device_id mt_devices[] = {
+ MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL,
+ USB_DEVICE_ID_GAMETEL_MT_MODE) },
+
++ /* Goodix GT7868Q devices */
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++ HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
++ I2C_DEVICE_ID_GOODIX_01E8) },
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++ HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
++ I2C_DEVICE_ID_GOODIX_01E9) },
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++ HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
++ I2C_DEVICE_ID_GOODIX_01E0) },
++
+ /* GoodTouch panels */
+ { .driver_data = MT_CLS_NSMU,
+ MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
+@@ -2048,6 +2084,11 @@ static const struct hid_device_id mt_devices[] = {
+ MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
+ USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+
++ /* HONOR GLO-GXXX panel */
++ { .driver_data = MT_CLS_VTL,
++ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++ 0x347d, 0x7853) },
++
+ /* Ilitek dual touch panel */
+ { .driver_data = MT_CLS_NSMU,
+ MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
+@@ -2079,6 +2120,18 @@ static const struct hid_device_id mt_devices[] = {
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X12_TAB) },
+
++ /* Lenovo X12 TAB Gen 2 */
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_LENOVO,
++ USB_DEVICE_ID_LENOVO_X12_TAB2) },
++
++ /* Logitech devices */
++ { .driver_data = MT_CLS_NSMU,
++ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_LOGITECH,
++ USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) },
++
+ /* MosArt panels */
+ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+ MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
+@@ -2148,6 +2201,10 @@ static const struct hid_device_id mt_devices[] = {
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xcd7e) },
+
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
++
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+@@ -2258,6 +2315,7 @@ static struct hid_driver mt_driver = {
+ .feature_mapping = mt_feature_mapping,
+ .usage_table = mt_grabbed_usages,
+ .event = mt_event,
++ .report_fixup = mt_report_fixup,
+ .report = mt_report,
+ #ifdef CONFIG_PM
+ .suspend = mt_suspend,
+diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
+index 10468f727e5bb0..4850e915a57d4d 100644
+--- a/drivers/hid/hid-nintendo.c
++++ b/drivers/hid/hid-nintendo.c
+@@ -325,28 +325,28 @@ struct joycon_imu_cal {
+ * All the controller's button values are stored in a u32.
+ * They can be accessed with bitwise ANDs.
+ */
+-static const u32 JC_BTN_Y = BIT(0);
+-static const u32 JC_BTN_X = BIT(1);
+-static const u32 JC_BTN_B = BIT(2);
+-static const u32 JC_BTN_A = BIT(3);
+-static const u32 JC_BTN_SR_R = BIT(4);
+-static const u32 JC_BTN_SL_R = BIT(5);
+-static const u32 JC_BTN_R = BIT(6);
+-static const u32 JC_BTN_ZR = BIT(7);
+-static const u32 JC_BTN_MINUS = BIT(8);
+-static const u32 JC_BTN_PLUS = BIT(9);
+-static const u32 JC_BTN_RSTICK = BIT(10);
+-static const u32 JC_BTN_LSTICK = BIT(11);
+-static const u32 JC_BTN_HOME = BIT(12);
+-static const u32 JC_BTN_CAP = BIT(13); /* capture button */
+-static const u32 JC_BTN_DOWN = BIT(16);
+-static const u32 JC_BTN_UP = BIT(17);
+-static const u32 JC_BTN_RIGHT = BIT(18);
+-static const u32 JC_BTN_LEFT = BIT(19);
+-static const u32 JC_BTN_SR_L = BIT(20);
+-static const u32 JC_BTN_SL_L = BIT(21);
+-static const u32 JC_BTN_L = BIT(22);
+-static const u32 JC_BTN_ZL = BIT(23);
++#define JC_BTN_Y BIT(0)
++#define JC_BTN_X BIT(1)
++#define JC_BTN_B BIT(2)
++#define JC_BTN_A BIT(3)
++#define JC_BTN_SR_R BIT(4)
++#define JC_BTN_SL_R BIT(5)
++#define JC_BTN_R BIT(6)
++#define JC_BTN_ZR BIT(7)
++#define JC_BTN_MINUS BIT(8)
++#define JC_BTN_PLUS BIT(9)
++#define JC_BTN_RSTICK BIT(10)
++#define JC_BTN_LSTICK BIT(11)
++#define JC_BTN_HOME BIT(12)
++#define JC_BTN_CAP BIT(13) /* capture button */
++#define JC_BTN_DOWN BIT(16)
++#define JC_BTN_UP BIT(17)
++#define JC_BTN_RIGHT BIT(18)
++#define JC_BTN_LEFT BIT(19)
++#define JC_BTN_SR_L BIT(20)
++#define JC_BTN_SL_L BIT(21)
++#define JC_BTN_L BIT(22)
++#define JC_BTN_ZL BIT(23)
+
+ enum joycon_msg_type {
+ JOYCON_MSG_TYPE_NONE,
+@@ -896,14 +896,27 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr)
+ */
+ static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr)
+ {
+- int i;
++ int i, divz = 0;
+
+ for (i = 0; i < 3; i++) {
+ ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] -
+ ctlr->accel_cal.offset[i];
+ ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] -
+ ctlr->gyro_cal.offset[i];
++
++ if (ctlr->imu_cal_accel_divisor[i] == 0) {
++ ctlr->imu_cal_accel_divisor[i] = 1;
++ divz++;
++ }
++
++ if (ctlr->imu_cal_gyro_divisor[i] == 0) {
++ ctlr->imu_cal_gyro_divisor[i] = 1;
++ divz++;
++ }
+ }
++
++ if (divz)
++ hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz);
+ }
+
+ static const s16 DFLT_ACCEL_OFFSET /*= 0*/;
+@@ -1132,16 +1145,16 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
+ JC_IMU_SAMPLES_PER_DELTA_AVG) {
+ ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum /
+ ctlr->imu_delta_samples_count;
+- /* don't ever want divide by zero shenanigans */
+- if (ctlr->imu_avg_delta_ms == 0) {
+- ctlr->imu_avg_delta_ms = 1;
+- hid_warn(ctlr->hdev,
+- "calculated avg imu delta of 0\n");
+- }
+ ctlr->imu_delta_samples_count = 0;
+ ctlr->imu_delta_samples_sum = 0;
+ }
+
++ /* don't ever want divide by zero shenanigans */
++ if (ctlr->imu_avg_delta_ms == 0) {
++ ctlr->imu_avg_delta_ms = 1;
++ hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n");
++ }
++
+ /* useful for debugging IMU sample rate */
+ hid_dbg(ctlr->hdev,
+ "imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n",
+diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
+index c463e54decbce1..97dfa3694ff047 100644
+--- a/drivers/hid/hid-nvidia-shield.c
++++ b/drivers/hid/hid-nvidia-shield.c
+@@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create(
+ return haptics;
+
+ input_set_capability(haptics, EV_FF, FF_RUMBLE);
+- input_ff_create_memless(haptics, NULL, play_effect);
++ ret = input_ff_create_memless(haptics, NULL, play_effect);
++ if (ret)
++ goto err;
+
+ ret = input_register_device(haptics);
+ if (ret)
+@@ -800,6 +802,8 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
+
+ led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
+ "thunderstrike%d:blue:led", ts->id);
++ if (!led->name)
++ return -ENOMEM;
+ led->max_brightness = 1;
+ led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN;
+ led->brightness_get = &thunderstrike_led_get_brightness;
+@@ -831,6 +835,8 @@ static inline int thunderstrike_psy_create(struct shield_device *shield_dev)
+ shield_dev->battery_dev.desc.name =
+ devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
+ "thunderstrike_%d", ts->id);
++ if (!shield_dev->battery_dev.desc.name)
++ return -ENOMEM;
+
+ shield_dev->battery_dev.psy = power_supply_register(
+ &hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg);
+diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
+index 3d414ae194acbd..25cfd964dc25d9 100644
+--- a/drivers/hid/hid-plantronics.c
++++ b/drivers/hid/hid-plantronics.c
+@@ -38,8 +38,10 @@
+ (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
+
+ #define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
++#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1)
+
+ #define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
++#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */
+
+ struct plt_drv_data {
+ unsigned long device_type;
+@@ -137,6 +139,21 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
+
+ drv_data->last_volume_key_ts = cur_ts;
+ }
++ if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) {
++ unsigned long prev_ts, cur_ts;
++
++ /* Usages are filtered in plantronics_usages. */
++
++ if (!value) /* Handle key presses only. */
++ return 0;
++
++ prev_ts = drv_data->last_volume_key_ts;
++ cur_ts = jiffies;
++ if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT)
++ return 1; /* Ignore the followed opposite volume key. */
++
++ drv_data->last_volume_key_ts = cur_ts;
++ }
+
+ return 0;
+ }
+@@ -210,6 +227,12 @@ static const struct hid_device_id plantronics_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES),
++ .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++ USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES),
++ .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+ { }
+ };
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 3983b4f282f8f8..e0bbf0c6345d68 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -33,6 +33,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
+@@ -66,6 +67,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+@@ -119,6 +121,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_T609A), HID_QUIRK_MULTI_INPUT },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index 2eba152e8b9053..26e93a331a5107 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -632,7 +632,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
+ }
+ INIT_LIST_HEAD(&hdev->inputs);
+
+- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
++ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | HID_CONNECT_DRIVER);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+diff --git a/drivers/hid/hid-uclogic-core-test.c b/drivers/hid/hid-uclogic-core-test.c
+index 2bb916226a3897..cb274cde3ad23a 100644
+--- a/drivers/hid/hid-uclogic-core-test.c
++++ b/drivers/hid/hid-uclogic-core-test.c
+@@ -56,6 +56,11 @@ static struct uclogic_raw_event_hook_test test_events[] = {
+ },
+ };
+
++static void fake_work(struct work_struct *work)
++{
++
++}
++
+ static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
+ {
+ struct uclogic_params p = {0, };
+@@ -77,6 +82,8 @@ static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filter->event);
+ memcpy(filter->event, &hook_events[n].event[0], filter->size);
+
++ INIT_WORK(&filter->work, fake_work);
++
+ list_add_tail(&filter->list, &p.event_hooks->list);
+ }
+
+diff --git a/drivers/hid/hid-uclogic-params-test.c b/drivers/hid/hid-uclogic-params-test.c
+index 678f50cbb160b8..a30121419a292e 100644
+--- a/drivers/hid/hid-uclogic-params-test.c
++++ b/drivers/hid/hid-uclogic-params-test.c
+@@ -174,12 +174,26 @@ static void hid_test_uclogic_parse_ugee_v2_desc(struct kunit *test)
+ KUNIT_EXPECT_EQ(test, params->frame_type, frame_type);
+ }
+
++struct fake_device {
++ unsigned long quirks;
++};
++
+ static void hid_test_uclogic_params_cleanup_event_hooks(struct kunit *test)
+ {
+ int res, n;
++ struct hid_device *hdev;
++ struct fake_device *fake_dev;
+ struct uclogic_params p = {0, };
+
+- res = uclogic_params_ugee_v2_init_event_hooks(NULL, &p);
++ hdev = kunit_kzalloc(test, sizeof(struct hid_device), GFP_KERNEL);
++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hdev);
++
++ fake_dev = kunit_kzalloc(test, sizeof(struct fake_device), GFP_KERNEL);
++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fake_dev);
++
++ hid_set_drvdata(hdev, fake_dev);
++
++ res = uclogic_params_ugee_v2_init_event_hooks(hdev, &p);
+ KUNIT_ASSERT_EQ(test, res, 0);
+
+ /* Check that the function can be called repeatedly */
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 13c8dd8cd35060..2bc762d31ac70d 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -357,8 +357,11 @@ static int hidraw_release(struct inode * inode, struct file * file)
+ down_write(&minors_rwsem);
+
+ spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+- for (int i = list->tail; i < list->head; i++)
+- kfree(list->buffer[i].value);
++ while (list->tail != list->head) {
++ kfree(list->buffer[list->tail].value);
++ list->buffer[list->tail].value = NULL;
++ list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
++ }
+ list_del(&list->node);
+ spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
+ kfree(list);
+diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
+index ac918a9ea8d344..1b49243adb16a5 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
++++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
+@@ -40,6 +40,11 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
+ * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
+ */
+ { "CHPN0001" },
++ /*
++ * The IDEA5002 ACPI device causes high interrupt usage and spurious
++ * wakeups from suspend.
++ */
++ { "IDEA5002" },
+ { }
+ };
+
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index 2735cd585af0df..045db6f0fb4c4f 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -44,12 +44,12 @@
+ #include "i2c-hid.h"
+
+ /* quirks to control the device */
+-#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
+-#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
+-#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
+-#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
+-#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
+-#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
++#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(0)
++#define I2C_HID_QUIRK_BOGUS_IRQ BIT(1)
++#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(2)
++#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3)
++#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
++#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
+
+ /* Command opcodes */
+ #define I2C_HID_OPCODE_RESET 0x01
+@@ -64,7 +64,6 @@
+ /* flags */
+ #define I2C_HID_STARTED 0
+ #define I2C_HID_RESET_PENDING 1
+-#define I2C_HID_READ_PENDING 2
+
+ #define I2C_HID_PWR_ON 0x00
+ #define I2C_HID_PWR_SLEEP 0x01
+@@ -120,8 +119,6 @@ static const struct i2c_hid_quirks {
+ __u16 idProduct;
+ __u32 quirks;
+ } i2c_hid_quirks[] = {
+- { USB_VENDOR_ID_WEIDA, HID_ANY_ID,
+- I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15,
+@@ -134,6 +131,8 @@ static const struct i2c_hid_quirks {
+ I2C_HID_QUIRK_RESET_ON_RESUME },
+ { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+ I2C_HID_QUIRK_BAD_INPUT_SIZE },
++ { I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
++ I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
+ /*
+ * Sending the wakeup after reset actually break ELAN touchscreen controller
+ */
+@@ -190,15 +189,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
+ msgs[n].len = recv_len;
+ msgs[n].buf = recv_buf;
+ n++;
+-
+- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
+ }
+
+ ret = i2c_transfer(client->adapter, msgs, n);
+
+- if (recv_len)
+- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
+-
+ if (ret != n)
+ return ret < 0 ? ret : -EIO;
+
+@@ -395,8 +389,7 @@ static int i2c_hid_set_power(struct i2c_hid *ihid, int power_state)
+ * The call will get a return value (EREMOTEIO) but device will be
+ * triggered and activated. After that, it goes like a normal device.
+ */
+- if (power_state == I2C_HID_PWR_ON &&
+- ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
++ if (power_state == I2C_HID_PWR_ON) {
+ ret = i2c_hid_set_power_command(ihid, I2C_HID_PWR_ON);
+
+ /* Device was already activated */
+@@ -566,9 +559,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
+ {
+ struct i2c_hid *ihid = dev_id;
+
+- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
+- return IRQ_HANDLED;
+-
+ i2c_hid_get_input(ihid);
+
+ return IRQ_HANDLED;
+@@ -958,7 +948,8 @@ static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
+ return ret;
+
+ /* Save some power */
+- i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
++ if (!(ihid->quirks & I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND))
++ i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
+
+ disable_irq(client->irq);
+
+diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+index 31abab57ad443e..78ce140ce94943 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
++++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+@@ -31,6 +31,7 @@ struct i2c_hid_of_elan {
+ struct regulator *vcc33;
+ struct regulator *vccio;
+ struct gpio_desc *reset_gpio;
++ bool no_reset_on_power_off;
+ const struct elan_i2c_hid_chip_data *chip_data;
+ };
+
+@@ -40,17 +41,17 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
+ container_of(ops, struct i2c_hid_of_elan, ops);
+ int ret;
+
++ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
++
+ if (ihid_elan->vcc33) {
+ ret = regulator_enable(ihid_elan->vcc33);
+ if (ret)
+- return ret;
++ goto err_deassert_reset;
+ }
+
+ ret = regulator_enable(ihid_elan->vccio);
+- if (ret) {
+- regulator_disable(ihid_elan->vcc33);
+- return ret;
+- }
++ if (ret)
++ goto err_disable_vcc33;
+
+ if (ihid_elan->chip_data->post_power_delay_ms)
+ msleep(ihid_elan->chip_data->post_power_delay_ms);
+@@ -60,6 +61,15 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
+ msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms);
+
+ return 0;
++
++err_disable_vcc33:
++ if (ihid_elan->vcc33)
++ regulator_disable(ihid_elan->vcc33);
++err_deassert_reset:
++ if (ihid_elan->no_reset_on_power_off)
++ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
++
++ return ret;
+ }
+
+ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+@@ -67,7 +77,14 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+ struct i2c_hid_of_elan *ihid_elan =
+ container_of(ops, struct i2c_hid_of_elan, ops);
+
+- gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
++ /*
++ * Do not assert reset when the hardware allows for it to remain
++ * deasserted regardless of the state of the (shared) power supply to
++ * avoid wasting power when the supply is left on.
++ */
++ if (!ihid_elan->no_reset_on_power_off)
++ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
++
+ if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms)
+ msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms);
+
+@@ -79,6 +96,7 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+ static int i2c_hid_of_elan_probe(struct i2c_client *client)
+ {
+ struct i2c_hid_of_elan *ihid_elan;
++ int ret;
+
+ ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
+ if (!ihid_elan)
+@@ -93,21 +111,38 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
+ if (IS_ERR(ihid_elan->reset_gpio))
+ return PTR_ERR(ihid_elan->reset_gpio);
+
++ ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node,
++ "no-reset-on-power-off");
++
+ ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio");
+- if (IS_ERR(ihid_elan->vccio))
+- return PTR_ERR(ihid_elan->vccio);
++ if (IS_ERR(ihid_elan->vccio)) {
++ ret = PTR_ERR(ihid_elan->vccio);
++ goto err_deassert_reset;
++ }
+
+ ihid_elan->chip_data = device_get_match_data(&client->dev);
+
+ if (ihid_elan->chip_data->main_supply_name) {
+ ihid_elan->vcc33 = devm_regulator_get(&client->dev,
+ ihid_elan->chip_data->main_supply_name);
+- if (IS_ERR(ihid_elan->vcc33))
+- return PTR_ERR(ihid_elan->vcc33);
++ if (IS_ERR(ihid_elan->vcc33)) {
++ ret = PTR_ERR(ihid_elan->vcc33);
++ goto err_deassert_reset;
++ }
+ }
+
+- return i2c_hid_core_probe(client, &ihid_elan->ops,
+- ihid_elan->chip_data->hid_descriptor_address, 0);
++ ret = i2c_hid_core_probe(client, &ihid_elan->ops,
++ ihid_elan->chip_data->hid_descriptor_address, 0);
++ if (ret)
++ goto err_deassert_reset;
++
++ return 0;
++
++err_deassert_reset:
++ if (ihid_elan->no_reset_on_power_off)
++ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
++
++ return ret;
+ }
+
+ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
+diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c
+index c4e1fa0273c84c..8be4d576da7733 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-of.c
++++ b/drivers/hid/i2c-hid/i2c-hid-of.c
+@@ -87,6 +87,7 @@ static int i2c_hid_of_probe(struct i2c_client *client)
+ if (!ihid_of)
+ return -ENOMEM;
+
++ ihid_of->client = client;
+ ihid_of->ops.power_up = i2c_hid_of_power_up;
+ ihid_of->ops.power_down = i2c_hid_of_power_down;
+
+diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
+index a49c6affd7c4c4..dd5fc60874ba1d 100644
+--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
++++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
+@@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+ if (!dev)
+ return NULL;
+
++ dev->devc = &pdev->dev;
+ ishtp_device_init(dev);
+
+ init_waitqueue_head(&dev->wait_hw_ready);
+@@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+ }
+
+ dev->ops = &ish_hw_ops;
+- dev->devc = &pdev->dev;
+ dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
+ return dev;
+ }
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 710fda5f19e1c9..916d427163ca23 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -216,6 +216,11 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ /* request and enable interrupt */
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
++ if (ret < 0) {
++ dev_err(dev, "ISH: Failed to allocate IRQ vectors\n");
++ return ret;
++ }
++
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ irq_flag = IRQF_SHARED;
+
+diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+index 16aa030af8453c..983be15fedcc92 100644
+--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
++++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+@@ -635,7 +635,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ const struct shim_fw_info fw_info)
+ {
+- int rv;
++ int rv = 0;
+ void *dma_buf;
+ dma_addr_t dma_buf_phy;
+ u32 fragment_offset, fragment_size, payload_max_size;
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 3f704b8072e8a0..7659c98d942920 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2080,7 +2080,7 @@ static int wacom_allocate_inputs(struct wacom *wacom)
+ return 0;
+ }
+
+-static int wacom_register_inputs(struct wacom *wacom)
++static int wacom_setup_inputs(struct wacom *wacom)
+ {
+ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+@@ -2099,10 +2099,6 @@ static int wacom_register_inputs(struct wacom *wacom)
+ input_free_device(pen_input_dev);
+ wacom_wac->pen_input = NULL;
+ pen_input_dev = NULL;
+- } else {
+- error = input_register_device(pen_input_dev);
+- if (error)
+- goto fail;
+ }
+
+ error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
+@@ -2111,10 +2107,6 @@ static int wacom_register_inputs(struct wacom *wacom)
+ input_free_device(touch_input_dev);
+ wacom_wac->touch_input = NULL;
+ touch_input_dev = NULL;
+- } else {
+- error = input_register_device(touch_input_dev);
+- if (error)
+- goto fail;
+ }
+
+ error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
+@@ -2123,7 +2115,34 @@ static int wacom_register_inputs(struct wacom *wacom)
+ input_free_device(pad_input_dev);
+ wacom_wac->pad_input = NULL;
+ pad_input_dev = NULL;
+- } else {
++ }
++
++ return 0;
++}
++
++static int wacom_register_inputs(struct wacom *wacom)
++{
++ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
++ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
++ int error = 0;
++
++ pen_input_dev = wacom_wac->pen_input;
++ touch_input_dev = wacom_wac->touch_input;
++ pad_input_dev = wacom_wac->pad_input;
++
++ if (pen_input_dev) {
++ error = input_register_device(pen_input_dev);
++ if (error)
++ goto fail;
++ }
++
++ if (touch_input_dev) {
++ error = input_register_device(touch_input_dev);
++ if (error)
++ goto fail;
++ }
++
++ if (pad_input_dev) {
+ error = input_register_device(pad_input_dev);
+ if (error)
+ goto fail;
+@@ -2376,6 +2395,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
+ if (error)
+ goto fail;
+
++ error = wacom_setup_inputs(wacom);
++ if (error)
++ goto fail;
++
++ if (features->type == HID_GENERIC)
++ connect_mask |= HID_CONNECT_DRIVER;
++
++ /* Regular HID work starts now */
++ error = hid_hw_start(hdev, connect_mask);
++ if (error) {
++ hid_err(hdev, "hw start failed\n");
++ goto fail;
++ }
++
+ error = wacom_register_inputs(wacom);
+ if (error)
+ goto fail;
+@@ -2390,16 +2423,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
+ goto fail;
+ }
+
+- if (features->type == HID_GENERIC)
+- connect_mask |= HID_CONNECT_DRIVER;
+-
+- /* Regular HID work starts now */
+- error = hid_hw_start(hdev, connect_mask);
+- if (error) {
+- hid_err(hdev, "hw start failed\n");
+- goto fail;
+- }
+-
+ if (!wireless) {
+ /* Note that if query fails it is not a hard failure */
+ wacom_query_tablet_data(wacom);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 471db78dbbf02d..18b5cd0234d213 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -714,13 +714,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
+ case 0x8e2: /* IntuosHT2 pen */
+ case 0x022:
+ case 0x200: /* Pro Pen 3 */
+- case 0x04200: /* Pro Pen 3 */
+ case 0x10842: /* MobileStudio Pro Pro Pen slim */
+ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x16802: /* Cintiq 13HD Pro Pen */
+ case 0x18802: /* DTH2242 Pen */
+ case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
+- case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
++ case 0x8842: /* Intuos Pro and Cintiq Pro 3D Pen */
+ tool_type = BTN_TOOL_PEN;
+ break;
+
+@@ -1925,12 +1924,14 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ int fmax = field->logical_maximum;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int resolution_code = code;
+- int resolution = hidinput_calc_abs_res(field, resolution_code);
++ int resolution;
+
+ if (equivalent_usage == HID_DG_TWIST) {
+ resolution_code = ABS_RZ;
+ }
+
++ resolution = hidinput_calc_abs_res(field, resolution_code);
++
+ if (equivalent_usage == HID_GD_X) {
+ fmin += features->offset_left;
+ fmax -= features->offset_right;
+@@ -2367,6 +2368,9 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS3, 0);
+ features->quirks &= ~WACOM_QUIRK_PEN_BUTTON3;
+ break;
++ case WACOM_HID_WD_SEQUENCENUMBER:
++ wacom_wac->hid_data.sequence_number = -1;
++ break;
+ }
+ }
+
+@@ -2491,9 +2495,15 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
+ wacom_wac->hid_data.barrelswitch3 = value;
+ return;
+ case WACOM_HID_WD_SEQUENCENUMBER:
+- if (wacom_wac->hid_data.sequence_number != value)
+- hid_warn(hdev, "Dropped %hu packets", (unsigned short)(value - wacom_wac->hid_data.sequence_number));
++ if (wacom_wac->hid_data.sequence_number != value &&
++ wacom_wac->hid_data.sequence_number >= 0) {
++ int sequence_size = field->logical_maximum - field->logical_minimum + 1;
++ int drop_count = (value - wacom_wac->hid_data.sequence_number) % sequence_size;
++ hid_warn(hdev, "Dropped %d packets", drop_count);
++ }
+ wacom_wac->hid_data.sequence_number = value + 1;
++ if (wacom_wac->hid_data.sequence_number > field->logical_maximum)
++ wacom_wac->hid_data.sequence_number = field->logical_minimum;
+ return;
+ }
+
+@@ -2574,7 +2584,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
+ wacom_wac->hid_data.tipswitch);
+ input_report_key(input, wacom_wac->tool[0], sense);
+ if (wacom_wac->serial[0]) {
+- input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
++ /*
++ * xf86-input-wacom does not accept a serial number
++ * of '0'. Report the low 32 bits if possible, but
++ * if they are zero, report the upper ones instead.
++ */
++ __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu;
++ __u32 serial_hi = wacom_wac->serial[0] >> 32;
++ input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi));
+ input_report_abs(input, ABS_MISC, sense ? id : 0);
+ }
+
+@@ -2649,8 +2666,8 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ {
+ struct hid_data *hid_data = &wacom_wac->hid_data;
+ bool mt = wacom_wac->features.touch_max > 1;
+- bool prox = hid_data->tipswitch &&
+- report_touch_events(wacom_wac);
++ bool touch_down = hid_data->tipswitch && hid_data->confidence;
++ bool prox = touch_down && report_touch_events(wacom_wac);
+
+ if (touch_is_muted(wacom_wac)) {
+ if (!wacom_wac->shared->touch_down)
+@@ -2700,24 +2717,6 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ }
+ }
+
+-static bool wacom_wac_slot_is_active(struct input_dev *dev, int key)
+-{
+- struct input_mt *mt = dev->mt;
+- struct input_mt_slot *s;
+-
+- if (!mt)
+- return false;
+-
+- for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
+- if (s->key == key &&
+- input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) {
+- return true;
+- }
+- }
+-
+- return false;
+-}
+-
+ static void wacom_wac_finger_event(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage, __s32 value)
+ {
+@@ -2768,14 +2767,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
+ }
+
+ if (usage->usage_index + 1 == field->report_count) {
+- if (equivalent_usage == wacom_wac->hid_data.last_slot_field) {
+- bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input,
+- wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch;
+-
+- if (wacom_wac->hid_data.confidence || touch_removed) {
+- wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+- }
+- }
++ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
++ wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+ }
+ }
+
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 57e185f18d53da..61073fe81ead22 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -324,7 +324,7 @@ struct hid_data {
+ int bat_connected;
+ int ps_connected;
+ bool pad_input_event_flag;
+- unsigned short sequence_number;
++ int sequence_number;
+ ktime_t time_delayed;
+ };
+
+diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
+index ba37a5efbf8201..ab2edff018eb68 100644
+--- a/drivers/hte/hte-tegra194-test.c
++++ b/drivers/hte/hte-tegra194-test.c
+@@ -153,8 +153,10 @@ static int tegra_hte_test_probe(struct platform_device *pdev)
+ }
+
+ cnt = of_hte_req_count(hte.pdev);
+- if (cnt < 0)
++ if (cnt < 0) {
++ ret = cnt;
+ goto free_irq;
++ }
+
+ dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
+
+diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
+index 00242107d62e02..862c47b191afe8 100644
+--- a/drivers/hv/Kconfig
++++ b/drivers/hv/Kconfig
+@@ -16,6 +16,7 @@ config HYPERV
+ config HYPERV_VTL_MODE
+ bool "Enable Linux to boot in VTL context"
+ depends on X86_64 && HYPERV
++ depends on SMP
+ default n
+ help
+ Virtual Secure Mode (VSM) is a set of hypervisor capabilities and
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 56f7e06c673e42..47e1bd8de9fcf0 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -153,7 +153,9 @@ void vmbus_free_ring(struct vmbus_channel *channel)
+ hv_ringbuffer_cleanup(&channel->inbound);
+
+ if (channel->ringbuffer_page) {
+- __free_pages(channel->ringbuffer_page,
++ /* In a CoCo VM leak the memory if it didn't get re-encrypted */
++ if (!channel->ringbuffer_gpadlhandle.decrypted)
++ __free_pages(channel->ringbuffer_page,
+ get_order(channel->ringbuffer_pagecount
+ << PAGE_SHIFT));
+ channel->ringbuffer_page = NULL;
+@@ -472,9 +474,18 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
+ (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
+
+ ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
+- if (ret)
++ if (ret) {
++ gpadl->decrypted = false;
+ return ret;
++ }
+
++ /*
++ * Set the "decrypted" flag to true for the set_memory_decrypted()
++ * success case. In the failure case, the encryption state of the
++ * memory is unknown. Leave "decrypted" as true to ensure the
++ * memory will be leaked instead of going back on the free list.
++ */
++ gpadl->decrypted = true;
+ ret = set_memory_decrypted((unsigned long)kbuffer,
+ PFN_UP(size));
+ if (ret) {
+@@ -563,9 +574,15 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
+
+ kfree(msginfo);
+
+- if (ret)
+- set_memory_encrypted((unsigned long)kbuffer,
+- PFN_UP(size));
++ if (ret) {
++ /*
++ * If set_memory_encrypted() fails, the decrypted flag is
++ * left as true so the memory is leaked instead of being
++ * put back on the free list.
++ */
++ if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
++ gpadl->decrypted = false;
++ }
+
+ return ret;
+ }
+@@ -886,6 +903,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpad
+ if (ret)
+ pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+
++ gpadl->decrypted = ret;
++
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 3cabeeabb1cacf..f001ae880e1dbe 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -237,8 +237,17 @@ int vmbus_connect(void)
+ vmbus_connection.monitor_pages[0], 1);
+ ret |= set_memory_decrypted((unsigned long)
+ vmbus_connection.monitor_pages[1], 1);
+- if (ret)
++ if (ret) {
++ /*
++ * If set_memory_decrypted() fails, the encryption state
++ * of the memory is unknown. So leak the memory instead
++ * of risking returning decrypted memory to the free list.
++ * For simplicity, always handle both pages the same.
++ */
++ vmbus_connection.monitor_pages[0] = NULL;
++ vmbus_connection.monitor_pages[1] = NULL;
+ goto cleanup;
++ }
+
+ /*
+ * Set_memory_decrypted() will change the memory contents if
+@@ -337,13 +346,19 @@ void vmbus_disconnect(void)
+ vmbus_connection.int_page = NULL;
+ }
+
+- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
+- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
++ if (vmbus_connection.monitor_pages[0]) {
++ if (!set_memory_encrypted(
++ (unsigned long)vmbus_connection.monitor_pages[0], 1))
++ hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
++ vmbus_connection.monitor_pages[0] = NULL;
++ }
+
+- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+- vmbus_connection.monitor_pages[0] = NULL;
+- vmbus_connection.monitor_pages[1] = NULL;
++ if (vmbus_connection.monitor_pages[1]) {
++ if (!set_memory_encrypted(
++ (unsigned long)vmbus_connection.monitor_pages[1], 1))
++ hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
++ vmbus_connection.monitor_pages[1] = NULL;
++ }
+ }
+
+ /*
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index edbb38f6956b94..756aebf324735f 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1962,6 +1962,7 @@ void vmbus_device_unregister(struct hv_device *device_obj)
+ */
+ device_unregister(&device_obj->device);
+ }
++EXPORT_SYMBOL_GPL(vmbus_device_unregister);
+
+ #ifdef CONFIG_ACPI
+ /*
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index ec38c88921589e..a4c361b6619c16 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -174,6 +174,7 @@ config SENSORS_ADM9240
+ tristate "Analog Devices ADM9240 and compatibles"
+ depends on I2C
+ select HWMON_VID
++ select REGMAP_I2C
+ help
+ If you say yes here you get support for Analog Devices ADM9240,
+ Dallas DS1780, National Semiconductor LM81 sensor chips.
+@@ -235,6 +236,7 @@ config SENSORS_ADT7462
+ config SENSORS_ADT7470
+ tristate "Analog Devices ADT7470"
+ depends on I2C
++ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Analog Devices
+ ADT7470 temperature monitoring chips.
+@@ -1200,6 +1202,7 @@ config SENSORS_MAX31790
+ config SENSORS_MC34VR500
+ tristate "NXP MC34VR500 hardware monitoring driver"
+ depends on I2C
++ select REGMAP_I2C
+ help
+ If you say yes here you get support for the temperature and input
+ voltage sensors of the NXP MC34VR500.
+@@ -2137,6 +2140,7 @@ config SENSORS_TMP464
+ config SENSORS_TMP513
+ tristate "Texas Instruments TMP513 and compatibles"
+ depends on I2C
++ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP512,
+ and TMP513 temperature and power supply sensor chips.
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index fa28d447f0dfbd..b772c076a5aed7 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -31,6 +31,7 @@
+ #define POWER_METER_CAN_NOTIFY (1 << 3)
+ #define POWER_METER_IS_BATTERY (1 << 8)
+ #define UNKNOWN_HYSTERESIS 0xFFFFFFFF
++#define UNKNOWN_POWER 0xFFFFFFFF
+
+ #define METER_NOTIFY_CONFIG 0x80
+ #define METER_NOTIFY_TRIP 0x81
+@@ -348,6 +349,9 @@ static ssize_t show_power(struct device *dev,
+ update_meter(resource);
+ mutex_unlock(&resource->lock);
+
++ if (resource->power == UNKNOWN_POWER)
++ return -ENODATA;
++
+ return sprintf(buf, "%llu\n", resource->power * 1000);
+ }
+
+diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
+index 46e3c8c5076573..73fd967998472c 100644
+--- a/drivers/hwmon/adc128d818.c
++++ b/drivers/hwmon/adc128d818.c
+@@ -176,7 +176,7 @@ static ssize_t adc128_in_store(struct device *dev,
+
+ mutex_lock(&data->update_lock);
+ /* 10 mV LSB on limit registers */
+- regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
++ regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
+ data->in[index][nr] = regval << 4;
+ reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
+ i2c_smbus_write_byte_data(data->client, reg, regval);
+@@ -214,7 +214,7 @@ static ssize_t adc128_temp_store(struct device *dev,
+ return err;
+
+ mutex_lock(&data->update_lock);
+- regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
++ regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+ data->temp[index] = regval << 1;
+ i2c_smbus_write_byte_data(data->client,
+ index == 1 ? ADC128_REG_TEMP_MAX
+diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
+index 03acadc3a6cb42..14b2547adae8d3 100644
+--- a/drivers/hwmon/adt7475.c
++++ b/drivers/hwmon/adt7475.c
+@@ -1862,7 +1862,7 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
+ data->pwm[CONTROL][index] &= ~0xE0;
+ data->pwm[CONTROL][index] |= (7 << 5);
+
+- i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
++ i2c_smbus_write_byte_data(client, PWM_REG(index),
+ data->pwm[INPUT][index]);
+
+ i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
+diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
+index 2a7a4b6b00942b..9b02b304c2f5d4 100644
+--- a/drivers/hwmon/amc6821.c
++++ b/drivers/hwmon/amc6821.c
+@@ -934,10 +934,21 @@ static const struct i2c_device_id amc6821_id[] = {
+
+ MODULE_DEVICE_TABLE(i2c, amc6821_id);
+
++static const struct of_device_id __maybe_unused amc6821_of_match[] = {
++ {
++ .compatible = "ti,amc6821",
++ .data = (void *)amc6821,
++ },
++ { }
++};
++
++MODULE_DEVICE_TABLE(of, amc6821_of_match);
++
+ static struct i2c_driver amc6821_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "amc6821",
++ .of_match_table = of_match_ptr(amc6821_of_match),
+ },
+ .probe = amc6821_probe,
+ .id_table = amc6821_id,
+diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
+index 997df4b405098f..b2ae2176f11fe0 100644
+--- a/drivers/hwmon/aspeed-pwm-tacho.c
++++ b/drivers/hwmon/aspeed-pwm-tacho.c
+@@ -193,6 +193,8 @@ struct aspeed_pwm_tacho_data {
+ u8 fan_tach_ch_source[16];
+ struct aspeed_cooling_device *cdev[8];
+ const struct attribute_group *groups[3];
++ /* protects access to shared ASPEED_PTCR_RESULT */
++ struct mutex tach_lock;
+ };
+
+ enum type { TYPEM, TYPEN, TYPEO };
+@@ -527,6 +529,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+ u8 fan_tach_ch_source, type, mode, both;
+ int ret;
+
++ mutex_lock(&priv->tach_lock);
++
+ regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
+ regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
+
+@@ -544,6 +548,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+ ASPEED_RPM_STATUS_SLEEP_USEC,
+ usec);
+
++ mutex_unlock(&priv->tach_lock);
++
+ /* return -ETIMEDOUT if we didn't get an answer. */
+ if (ret)
+ return ret;
+@@ -903,6 +909,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
++ mutex_init(&priv->tach_lock);
+ priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
+ &aspeed_pwm_tacho_regmap_config);
+ if (IS_ERR(priv->regmap))
+diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
+index 51f9c2db403e75..f20b864c1bb201 100644
+--- a/drivers/hwmon/asus-ec-sensors.c
++++ b/drivers/hwmon/asus-ec-sensors.c
+@@ -402,7 +402,7 @@ static const struct ec_board_info board_info_strix_b550_i_gaming = {
+
+ static const struct ec_board_info board_info_strix_x570_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
++ SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
+index 5fd136baf1cd31..19b9bf3d75ef94 100644
+--- a/drivers/hwmon/axi-fan-control.c
++++ b/drivers/hwmon/axi-fan-control.c
+@@ -496,6 +496,21 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ return -ENODEV;
+ }
+
++ ret = axi_fan_control_init(ctl, pdev->dev.of_node);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to initialize device\n");
++ return ret;
++ }
++
++ ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
++ name,
++ ctl,
++ &axi_chip_info,
++ axi_fan_control_groups);
++
++ if (IS_ERR(ctl->hdev))
++ return PTR_ERR(ctl->hdev);
++
+ ctl->irq = platform_get_irq(pdev, 0);
+ if (ctl->irq < 0)
+ return ctl->irq;
+@@ -509,19 +524,7 @@ static int axi_fan_control_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- ret = axi_fan_control_init(ctl, pdev->dev.of_node);
+- if (ret) {
+- dev_err(&pdev->dev, "Failed to initialize device\n");
+- return ret;
+- }
+-
+- ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
+- name,
+- ctl,
+- &axi_chip_info,
+- axi_fan_control_groups);
+-
+- return PTR_ERR_OR_ZERO(ctl->hdev);
++ return 0;
+ }
+
+ static struct platform_driver axi_fan_control_driver = {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index eba94f68585a8f..b8fc8d1ef20dfc 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -41,8 +41,8 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
+ #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
+ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
+-#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
+-#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
++#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
++#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
+ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+@@ -419,7 +419,7 @@ static ssize_t show_temp(struct device *dev,
+ }
+
+ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
+- int attr_no)
++ int index)
+ {
+ int i;
+ static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
+@@ -431,13 +431,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
+ };
+
+ for (i = 0; i < tdata->attr_size; i++) {
++ /*
++ * We map the attr number to core id of the CPU
++ * The attr number is always core id + 2
++ * The Pkgtemp will always show up as temp1_*, if available
++ */
++ int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
++
+ snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
+ "temp%d_%s", attr_no, suffixes[i]);
+ sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
+ tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
+ tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
+ tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
+- tdata->sd_attrs[i].index = attr_no;
++ tdata->sd_attrs[i].index = index;
+ tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
+ }
+ tdata->attr_group.attrs = tdata->attrs;
+@@ -495,30 +502,25 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
+ struct platform_data *pdata = platform_get_drvdata(pdev);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u32 eax, edx;
+- int err, index, attr_no;
++ int err, index;
+
+ if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
+ return 0;
+
+ /*
+- * Find attr number for sysfs:
+- * We map the attr number to core id of the CPU
+- * The attr number is always core id + 2
+- * The Pkgtemp will always show up as temp1_*, if available
++ * Get the index of tdata in pdata->core_data[]
++ * tdata for package: pdata->core_data[1]
++ * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
+ */
+ if (pkg_flag) {
+- attr_no = PKG_SYSFS_ATTR_NO;
++ index = PKG_SYSFS_ATTR_NO;
+ } else {
+- index = ida_alloc(&pdata->ida, GFP_KERNEL);
++ index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
+ if (index < 0)
+ return index;
+- pdata->cpu_map[index] = topology_core_id(cpu);
+- attr_no = index + BASE_SYSFS_ATTR_NO;
+- }
+
+- if (attr_no > MAX_CORE_DATA - 1) {
+- err = -ERANGE;
+- goto ida_free;
++ pdata->cpu_map[index] = topology_core_id(cpu);
++ index += BASE_SYSFS_ATTR_NO;
+ }
+
+ tdata = init_temp_data(cpu, pkg_flag);
+@@ -544,20 +546,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
+ if (get_ttarget(tdata, &pdev->dev) >= 0)
+ tdata->attr_size++;
+
+- pdata->core_data[attr_no] = tdata;
++ pdata->core_data[index] = tdata;
+
+ /* Create sysfs interfaces */
+- err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
++ err = create_core_attrs(tdata, pdata->hwmon_dev, index);
+ if (err)
+ goto exit_free;
+
+ return 0;
+ exit_free:
+- pdata->core_data[attr_no] = NULL;
++ pdata->core_data[index] = NULL;
+ kfree(tdata);
+ ida_free:
+ if (!pkg_flag)
+- ida_free(&pdata->ida, index);
++ ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
+ return err;
+ }
+
+diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
+index 463ab4296ede5c..280b90646a8735 100644
+--- a/drivers/hwmon/corsair-cpro.c
++++ b/drivers/hwmon/corsair-cpro.c
+@@ -16,6 +16,7 @@
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/spinlock.h>
+ #include <linux/types.h>
+
+ #define USB_VENDOR_ID_CORSAIR 0x1b1c
+@@ -77,8 +78,11 @@
+ struct ccp_device {
+ struct hid_device *hdev;
+ struct device *hwmon_dev;
++ /* For reinitializing the completion below */
++ spinlock_t wait_input_report_lock;
+ struct completion wait_input_report;
+ struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */
++ u8 *cmd_buffer;
+ u8 *buffer;
+ int target[6];
+ DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
+@@ -111,15 +115,23 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2,
+ unsigned long t;
+ int ret;
+
+- memset(ccp->buffer, 0x00, OUT_BUFFER_SIZE);
+- ccp->buffer[0] = command;
+- ccp->buffer[1] = byte1;
+- ccp->buffer[2] = byte2;
+- ccp->buffer[3] = byte3;
+-
++ memset(ccp->cmd_buffer, 0x00, OUT_BUFFER_SIZE);
++ ccp->cmd_buffer[0] = command;
++ ccp->cmd_buffer[1] = byte1;
++ ccp->cmd_buffer[2] = byte2;
++ ccp->cmd_buffer[3] = byte3;
++
++ /*
++ * Disable raw event parsing for a moment to safely reinitialize the
++ * completion. Reinit is done because hidraw could have triggered
++ * the raw event parsing and marked the ccp->wait_input_report
++ * completion as done.
++ */
++ spin_lock_bh(&ccp->wait_input_report_lock);
+ reinit_completion(&ccp->wait_input_report);
++ spin_unlock_bh(&ccp->wait_input_report_lock);
+
+- ret = hid_hw_output_report(ccp->hdev, ccp->buffer, OUT_BUFFER_SIZE);
++ ret = hid_hw_output_report(ccp->hdev, ccp->cmd_buffer, OUT_BUFFER_SIZE);
+ if (ret < 0)
+ return ret;
+
+@@ -135,11 +147,12 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8
+ struct ccp_device *ccp = hid_get_drvdata(hdev);
+
+ /* only copy buffer when requested */
+- if (completion_done(&ccp->wait_input_report))
+- return 0;
+-
+- memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
+- complete(&ccp->wait_input_report);
++ spin_lock(&ccp->wait_input_report_lock);
++ if (!completion_done(&ccp->wait_input_report)) {
++ memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
++ complete_all(&ccp->wait_input_report);
++ }
++ spin_unlock(&ccp->wait_input_report_lock);
+
+ return 0;
+ }
+@@ -492,7 +505,11 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ if (!ccp)
+ return -ENOMEM;
+
+- ccp->buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL);
++ ccp->cmd_buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL);
++ if (!ccp->cmd_buffer)
++ return -ENOMEM;
++
++ ccp->buffer = devm_kmalloc(&hdev->dev, IN_BUFFER_SIZE, GFP_KERNEL);
+ if (!ccp->buffer)
+ return -ENOMEM;
+
+@@ -510,7 +527,9 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+
+ ccp->hdev = hdev;
+ hid_set_drvdata(hdev, ccp);
++
+ mutex_init(&ccp->mutex);
++ spin_lock_init(&ccp->wait_input_report_lock);
+ init_completion(&ccp->wait_input_report);
+
+ hid_device_io_start(hdev);
+diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
+index 904890598c116b..f8f22b8a67cdfb 100644
+--- a/drivers/hwmon/corsair-psu.c
++++ b/drivers/hwmon/corsair-psu.c
+@@ -875,15 +875,16 @@ static const struct hid_device_id corsairpsu_idtable[] = {
+ { HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */
+- { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Series 2022 */
+- { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */
++ { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Legacy */
++ { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i Legacy */
+ { HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i Series 2023 */
+- { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Series 2022 and 2023 */
++ { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Legacy and Series 2023 */
++ { HID_USB_DEVICE(0x1b1c, 0x1c23) }, /* Corsair HX1200i Series 2023 */
+ { },
+ };
+ MODULE_DEVICE_TABLE(hid, corsairpsu_idtable);
+@@ -899,7 +900,23 @@ static struct hid_driver corsairpsu_driver = {
+ .reset_resume = corsairpsu_resume,
+ #endif
+ };
+-module_hid_driver(corsairpsu_driver);
++
++static int __init corsair_init(void)
++{
++ return hid_register_driver(&corsairpsu_driver);
++}
++
++static void __exit corsair_exit(void)
++{
++ hid_unregister_driver(&corsairpsu_driver);
++}
++
++/*
++ * With module_init() the driver would load before the HID bus when
++ * built-in, so use late_initcall() instead.
++ */
++late_initcall(corsair_init);
++module_exit(corsair_exit);
+
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Wilken Gottwalt <wilken.gottwalt@posteo.net>");
+diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c
+index 17ae62f88bbf44..dfa1d6926deacc 100644
+--- a/drivers/hwmon/hp-wmi-sensors.c
++++ b/drivers/hwmon/hp-wmi-sensors.c
+@@ -17,6 +17,8 @@
+ * Available: https://github.com/linuxhw/ACPI
+ * [4] P. Rohár, "bmfdec - Decompile binary MOF file (BMF) from WMI buffer",
+ * 2017. [Online]. Available: https://github.com/pali/bmfdec
++ * [5] Microsoft Corporation, "Driver-Defined WMI Data Items", 2017. [Online].
++ * Available: https://learn.microsoft.com/en-us/windows-hardware/drivers/kernel/driver-defined-wmi-data-items
+ */
+
+ #include <linux/acpi.h>
+@@ -24,6 +26,7 @@
+ #include <linux/hwmon.h>
+ #include <linux/jiffies.h>
+ #include <linux/mutex.h>
++#include <linux/nls.h>
+ #include <linux/units.h>
+ #include <linux/wmi.h>
+
+@@ -395,6 +398,50 @@ struct hp_wmi_sensors {
+ struct mutex lock; /* Lock polling WMI and driver state changes. */
+ };
+
++static bool is_raw_wmi_string(const u8 *pointer, u32 length)
++{
++ const u16 *ptr;
++ u16 len;
++
++ /* WMI strings are length-prefixed UTF-16 [5]. */
++ if (length <= sizeof(*ptr))
++ return false;
++
++ length -= sizeof(*ptr);
++ ptr = (const u16 *)pointer;
++ len = *ptr;
++
++ return len <= length && !(len & 1);
++}
++
++static char *convert_raw_wmi_string(const u8 *buf)
++{
++ const wchar_t *src;
++ unsigned int cps;
++ unsigned int len;
++ char *dst;
++ int i;
++
++ src = (const wchar_t *)buf;
++
++ /* Count UTF-16 code points. Exclude trailing null padding. */
++ cps = *src / sizeof(*src);
++ while (cps && !src[cps])
++ cps--;
++
++ /* Each code point becomes up to 3 UTF-8 characters. */
++ len = min(cps * 3, HP_WMI_MAX_STR_SIZE - 1);
++
++ dst = kmalloc((len + 1) * sizeof(*dst), GFP_KERNEL);
++ if (!dst)
++ return NULL;
++
++ i = utf16s_to_utf8s(++src, cps, UTF16_LITTLE_ENDIAN, dst, len);
++ dst[i] = '\0';
++
++ return dst;
++}
++
+ /* hp_wmi_strdup - devm_kstrdup, but length-limited */
+ static char *hp_wmi_strdup(struct device *dev, const char *src)
+ {
+@@ -412,6 +459,23 @@ static char *hp_wmi_strdup(struct device *dev, const char *src)
+ return dst;
+ }
+
++/* hp_wmi_wstrdup - hp_wmi_strdup, but for a raw WMI string */
++static char *hp_wmi_wstrdup(struct device *dev, const u8 *buf)
++{
++ char *src;
++ char *dst;
++
++ src = convert_raw_wmi_string(buf);
++ if (!src)
++ return NULL;
++
++ dst = hp_wmi_strdup(dev, strim(src)); /* Note: Copy is trimmed. */
++
++ kfree(src);
++
++ return dst;
++}
++
+ /*
+ * hp_wmi_get_wobj - poll WMI for a WMI object instance
+ * @guid: WMI object GUID
+@@ -462,8 +526,14 @@ static int check_wobj(const union acpi_object *wobj,
+ for (prop = 0; prop <= last_prop; prop++) {
+ type = elements[prop].type;
+ valid_type = property_map[prop];
+- if (type != valid_type)
++ if (type != valid_type) {
++ if (type == ACPI_TYPE_BUFFER &&
++ valid_type == ACPI_TYPE_STRING &&
++ is_raw_wmi_string(elements[prop].buffer.pointer,
++ elements[prop].buffer.length))
++ continue;
+ return -EINVAL;
++ }
+ }
+
+ return 0;
+@@ -480,7 +550,9 @@ static int extract_acpi_value(struct device *dev,
+ break;
+
+ case ACPI_TYPE_STRING:
+- *out_string = hp_wmi_strdup(dev, strim(element->string.pointer));
++ *out_string = element->type == ACPI_TYPE_BUFFER ?
++ hp_wmi_wstrdup(dev, element->buffer.pointer) :
++ hp_wmi_strdup(dev, strim(element->string.pointer));
+ if (!*out_string)
+ return -ENOMEM;
+ break;
+@@ -861,7 +933,9 @@ update_numeric_sensor_from_wobj(struct device *dev,
+ {
+ const union acpi_object *elements;
+ const union acpi_object *element;
+- const char *string;
++ const char *new_string;
++ char *trimmed;
++ char *string;
+ bool is_new;
+ int offset;
+ u8 size;
+@@ -885,11 +959,21 @@ update_numeric_sensor_from_wobj(struct device *dev,
+ offset = is_new ? size - 1 : -2;
+
+ element = &elements[HP_WMI_PROPERTY_CURRENT_STATE + offset];
+- string = strim(element->string.pointer);
+-
+- if (strcmp(string, nsensor->current_state)) {
+- devm_kfree(dev, nsensor->current_state);
+- nsensor->current_state = hp_wmi_strdup(dev, string);
++ string = element->type == ACPI_TYPE_BUFFER ?
++ convert_raw_wmi_string(element->buffer.pointer) :
++ element->string.pointer;
++
++ if (string) {
++ trimmed = strim(string);
++ if (strcmp(trimmed, nsensor->current_state)) {
++ new_string = hp_wmi_strdup(dev, trimmed);
++ if (new_string) {
++ devm_kfree(dev, nsensor->current_state);
++ nsensor->current_state = new_string;
++ }
++ }
++ if (element->type == ACPI_TYPE_BUFFER)
++ kfree(string);
+ }
+
+ /* Old variant: -2 (not -1) because it lacks the Size property. */
+@@ -996,11 +1080,15 @@ static int check_event_wobj(const union acpi_object *wobj)
+ HP_WMI_EVENT_PROPERTY_STATUS);
+ }
+
+-static int populate_event_from_wobj(struct hp_wmi_event *event,
++static int populate_event_from_wobj(struct device *dev,
++ struct hp_wmi_event *event,
+ union acpi_object *wobj)
+ {
+ int prop = HP_WMI_EVENT_PROPERTY_NAME;
+ union acpi_object *element;
++ acpi_object_type type;
++ char *string;
++ u32 value;
+ int err;
+
+ err = check_event_wobj(wobj);
+@@ -1009,20 +1097,24 @@ static int populate_event_from_wobj(struct hp_wmi_event *event,
+
+ element = wobj->package.elements;
+
+- /* Extracted strings are NOT device-managed copies. */
+-
+ for (; prop <= HP_WMI_EVENT_PROPERTY_CATEGORY; prop++, element++) {
++ type = hp_wmi_event_property_map[prop];
++
++ err = extract_acpi_value(dev, element, type, &value, &string);
++ if (err)
++ return err;
++
+ switch (prop) {
+ case HP_WMI_EVENT_PROPERTY_NAME:
+- event->name = strim(element->string.pointer);
++ event->name = string;
+ break;
+
+ case HP_WMI_EVENT_PROPERTY_DESCRIPTION:
+- event->description = strim(element->string.pointer);
++ event->description = string;
+ break;
+
+ case HP_WMI_EVENT_PROPERTY_CATEGORY:
+- event->category = element->integer.value;
++ event->category = value;
+ break;
+
+ default:
+@@ -1511,8 +1603,8 @@ static void hp_wmi_notify(u32 value, void *context)
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct hp_wmi_sensors *state = context;
+ struct device *dev = &state->wdev->dev;
++ struct hp_wmi_event event = {};
+ struct hp_wmi_info *fan_info;
+- struct hp_wmi_event event;
+ union acpi_object *wobj;
+ acpi_status err;
+ int event_type;
+@@ -1545,8 +1637,10 @@ static void hp_wmi_notify(u32 value, void *context)
+ goto out_unlock;
+
+ wobj = out.pointer;
++ if (!wobj)
++ goto out_unlock;
+
+- err = populate_event_from_wobj(&event, wobj);
++ err = populate_event_from_wobj(dev, &event, wobj);
+ if (err) {
+ dev_warn(dev, "Bad event data (ACPI type %d)\n", wobj->type);
+ goto out_free_wobj;
+@@ -1577,6 +1671,9 @@ static void hp_wmi_notify(u32 value, void *context)
+ out_free_wobj:
+ kfree(wobj);
+
++ devm_kfree(dev, event.name);
++ devm_kfree(dev, event.description);
++
+ out_unlock:
+ mutex_unlock(&state->lock);
+ }
+diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
+index 6500ca548f9c73..96397ae6ff18fc 100644
+--- a/drivers/hwmon/intel-m10-bmc-hwmon.c
++++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
+@@ -358,7 +358,7 @@ static const struct m10bmc_sdata n6000bmc_temp_tbl[] = {
+ { 0x4f0, 0x4f4, 0x4f8, 0x52c, 0x0, 500, "Board Top Near FPGA Temperature" },
+ { 0x4fc, 0x500, 0x504, 0x52c, 0x0, 500, "Board Bottom Near CVL Temperature" },
+ { 0x508, 0x50c, 0x510, 0x52c, 0x0, 500, "Board Top East Near VRs Temperature" },
+- { 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "Columbiaville Die Temperature" },
++ { 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "CVL Die Temperature" },
+ { 0x520, 0x524, 0x528, 0x52c, 0x0, 500, "Board Rear Side Temperature" },
+ { 0x530, 0x534, 0x538, 0x52c, 0x0, 500, "Board Front Side Temperature" },
+ { 0x53c, 0x540, 0x544, 0x0, 0x0, 500, "QSFP1 Case Temperature" },
+@@ -429,7 +429,7 @@ static const struct m10bmc_sdata n6000bmc_curr_tbl[] = {
+ };
+
+ static const struct m10bmc_sdata n6000bmc_power_tbl[] = {
+- { 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" },
++ { 0x724, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" },
+ };
+
+ static const struct hwmon_channel_info * const n6000bmc_hinfo[] = {
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index bae0becfa24be9..c906731c6c2d3e 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -153,8 +153,9 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+
+ static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
+ {
+- amd_smn_read(amd_pci_dev_to_node_id(pdev),
+- ZEN_REPORTED_TEMP_CTRL_BASE, regval);
++ if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
++ ZEN_REPORTED_TEMP_CTRL_BASE, regval))
++ *regval = 0;
+ }
+
+ static long get_raw_temp(struct k10temp_data *data)
+@@ -205,6 +206,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
+ {
+ struct k10temp_data *data = dev_get_drvdata(dev);
++ int ret = -EOPNOTSUPP;
+ u32 regval;
+
+ switch (attr) {
+@@ -221,13 +223,17 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ *val = 0;
+ break;
+ case 2 ... 13: /* Tccd{1-12} */
+- amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+- ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
+- &regval);
++ ret = amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
++ ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
++ &regval);
++
++ if (ret)
++ return ret;
++
+ *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
+ break;
+ default:
+- return -EOPNOTSUPP;
++ return ret;
+ }
+ break;
+ case hwmon_temp_max:
+@@ -243,7 +249,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ - ((regval >> 24) & 0xf)) * 500 + 52000;
+ break;
+ default:
+- return -EOPNOTSUPP;
++ return ret;
+ }
+ return 0;
+ }
+@@ -381,8 +387,20 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev,
+ int i;
+
+ for (i = 0; i < limit; i++) {
+- amd_smn_read(amd_pci_dev_to_node_id(pdev),
+- ZEN_CCD_TEMP(data->ccd_offset, i), &regval);
++ /*
++ * Ignore inaccessible CCDs.
++ *
++ * Some systems will return a register value of 0, and the TEMP_VALID
++ * bit check below will naturally fail.
++ *
++ * Other systems will return a PCI_ERROR_RESPONSE (0xFFFFFFFF) for
++ * the register value. And this will incorrectly pass the TEMP_VALID
++ * bit check.
++ */
++ if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
++ ZEN_CCD_TEMP(data->ccd_offset, i), &regval))
++ continue;
++
+ if (regval & ZEN_CCD_TEMP_VALID)
+ data->show_temp |= BIT(TCCD_BIT(i));
+ }
+@@ -527,6 +545,7 @@ static const struct pci_device_id k10temp_id_table[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
+ { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ {}
+ };
+diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
+index 67b9d7636ee427..37e8e9679aeb6b 100644
+--- a/drivers/hwmon/lm95234.c
++++ b/drivers/hwmon/lm95234.c
+@@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
+ if (ret < 0)
+ return ret;
+
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
++ 1000);
+
+ mutex_lock(&data->update_lock);
+ data->tcrit2[index] = val;
+@@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
+ if (ret < 0)
+ return ret;
+
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
+
+ mutex_lock(&data->update_lock);
+ data->tcrit1[index] = val;
+@@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev,
+ if (ret < 0)
+ return ret;
+
+- val = DIV_ROUND_CLOSEST(val, 1000);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
+ val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
+
+ mutex_lock(&data->update_lock);
+@@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
+ return ret;
+
+ /* Accuracy is 1/2 degrees C */
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
+
+ mutex_lock(&data->update_lock);
+ data->toffset[index] = val;
+diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
+index 589bcd07ce7f71..b8548105cd67af 100644
+--- a/drivers/hwmon/ltc2992.c
++++ b/drivers/hwmon/ltc2992.c
+@@ -875,8 +875,14 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
+ }
+
+ ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
+- if (!ret)
++ if (!ret) {
++ if (!val) {
++ fwnode_handle_put(child);
++ return dev_err_probe(&st->client->dev, -EINVAL,
++ "shunt resistor value cannot be zero\n");
++ }
+ st->r_sense_uohm[addr] = val;
++ }
+ }
+
+ return 0;
+diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
+index aa38c45adc09e2..0ccb5eb596fc40 100644
+--- a/drivers/hwmon/max16065.c
++++ b/drivers/hwmon/max16065.c
+@@ -79,7 +79,7 @@ static const bool max16065_have_current[] = {
+ };
+
+ struct max16065_data {
+- enum chips type;
++ enum chips chip;
+ struct i2c_client *client;
+ const struct attribute_group *groups[4];
+ struct mutex update_lock;
+@@ -114,9 +114,10 @@ static inline int LIMIT_TO_MV(int limit, int range)
+ return limit * range / 256;
+ }
+
+-static inline int MV_TO_LIMIT(int mv, int range)
++static inline int MV_TO_LIMIT(unsigned long mv, int range)
+ {
+- return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
++ mv = clamp_val(mv, 0, ULONG_MAX / 256);
++ return DIV_ROUND_CLOSEST(clamp_val(mv * 256, 0, range * 255), range);
+ }
+
+ static inline int ADC_TO_CURR(int adc, int gain)
+@@ -161,10 +162,17 @@ static struct max16065_data *max16065_update_device(struct device *dev)
+ MAX16065_CURR_SENSE);
+ }
+
+- for (i = 0; i < DIV_ROUND_UP(data->num_adc, 8); i++)
++ for (i = 0; i < 2; i++)
+ data->fault[i]
+ = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
+
++ /*
++ * MAX16067 and MAX16068 have separate undervoltage and
++ * overvoltage alarm bits. Squash them together.
++ */
++ if (data->chip == max16067 || data->chip == max16068)
++ data->fault[0] |= data->fault[1];
++
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+@@ -493,8 +501,6 @@ static const struct attribute_group max16065_max_group = {
+ .is_visible = max16065_secondary_is_visible,
+ };
+
+-static const struct i2c_device_id max16065_id[];
+-
+ static int max16065_probe(struct i2c_client *client)
+ {
+ struct i2c_adapter *adapter = client->adapter;
+@@ -505,7 +511,7 @@ static int max16065_probe(struct i2c_client *client)
+ bool have_secondary; /* true if chip has secondary limits */
+ bool secondary_is_max = false; /* secondary limits reflect max */
+ int groups = 0;
+- const struct i2c_device_id *id = i2c_match_id(max16065_id, client);
++ enum chips chip = (uintptr_t)i2c_get_match_data(client);
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_READ_WORD_DATA))
+@@ -515,12 +521,13 @@ static int max16065_probe(struct i2c_client *client)
+ if (unlikely(!data))
+ return -ENOMEM;
+
++ data->chip = chip;
+ data->client = client;
+ mutex_init(&data->update_lock);
+
+- data->num_adc = max16065_num_adc[id->driver_data];
+- data->have_current = max16065_have_current[id->driver_data];
+- have_secondary = max16065_have_secondary[id->driver_data];
++ data->num_adc = max16065_num_adc[chip];
++ data->have_current = max16065_have_current[chip];
++ have_secondary = max16065_have_secondary[chip];
+
+ if (have_secondary) {
+ val = i2c_smbus_read_byte_data(client, MAX16065_SW_ENABLE);
+diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
+index 7d10dd434f2e11..a338dd4e990d51 100644
+--- a/drivers/hwmon/max6697.c
++++ b/drivers/hwmon/max6697.c
+@@ -311,6 +311,7 @@ static ssize_t temp_store(struct device *dev,
+ return ret;
+
+ mutex_lock(&data->update_lock);
++ temp = clamp_val(temp, -1000000, 1000000); /* prevent underflow */
+ temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
+ temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
+ data->temp[nr][index] = temp;
+@@ -428,14 +429,14 @@ static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, alarm, 20);
+ static SENSOR_DEVICE_ATTR_RO(temp7_max_alarm, alarm, 21);
+ static SENSOR_DEVICE_ATTR_RO(temp8_max_alarm, alarm, 23);
+
+-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 14);
++static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 15);
+ static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8);
+ static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 9);
+ static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 10);
+ static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, alarm, 11);
+ static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, alarm, 12);
+ static SENSOR_DEVICE_ATTR_RO(temp7_crit_alarm, alarm, 13);
+-static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 15);
++static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 14);
+
+ static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 1);
+ static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index b5b81bd83bb157..8da7aa1614d7d1 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -1614,17 +1614,21 @@ struct nct6775_data *nct6775_update_device(struct device *dev)
+ data->fan_div[i]);
+
+ if (data->has_fan_min & BIT(i)) {
+- err = nct6775_read_value(data, data->REG_FAN_MIN[i], &reg);
++ u16 tmp;
++
++ err = nct6775_read_value(data, data->REG_FAN_MIN[i], &tmp);
+ if (err)
+ goto out;
+- data->fan_min[i] = reg;
++ data->fan_min[i] = tmp;
+ }
+
+ if (data->REG_FAN_PULSES[i]) {
+- err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &reg);
++ u16 tmp;
++
++ err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &tmp);
+ if (err)
+ goto out;
+- data->fan_pulses[i] = (reg >> data->FAN_PULSE_SHIFT[i]) & 0x03;
++ data->fan_pulses[i] = (tmp >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+ }
+
+ err = nct6775_select_fan_div(dev, data, i, reg);
+@@ -2258,7 +2262,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
+ if (err < 0)
+ return err;
+
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+
+ mutex_lock(&data->update_lock);
+ data->temp_offset[nr] = val;
+@@ -2549,6 +2553,13 @@ store_pwm(struct device *dev, struct device_attribute *attr, const char *buf,
+ int err;
+ u16 reg;
+
++ /*
++ * The fan control mode should be set to manual if the user wants to adjust
++ * the fan speed. Otherwise, it will fail to set.
++ */
++ if (index == 0 && data->pwm_enable[nr] > manual)
++ return -EBUSY;
++
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+@@ -3501,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
+ const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
+ int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
++ int num_reg_temp_config;
+ struct device *hwmon_dev;
+ struct sensor_template_group tsi_temp_tg;
+
+@@ -3583,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6106_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+ reg_temp_config = NCT6106_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+ reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+@@ -3658,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6106_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+ reg_temp_config = NCT6106_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+ reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+@@ -3735,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6775_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6775_REG_TEMP_HYST;
+ reg_temp_config = NCT6775_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6775_REG_TEMP_CRIT;
+
+@@ -3810,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6775_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6775_REG_TEMP_HYST;
+ reg_temp_config = NCT6776_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6776_REG_TEMP_CRIT;
+
+@@ -3889,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6779_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6779_REG_TEMP_HYST;
+ reg_temp_config = NCT6779_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6779_REG_TEMP_CRIT;
+
+@@ -4023,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6779_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6779_REG_TEMP_HYST;
+ reg_temp_config = NCT6779_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6779_REG_TEMP_CRIT;
+
+@@ -4112,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6798_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6798_REG_TEMP_HYST;
+ reg_temp_config = NCT6779_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6798_REG_TEMP_CRIT;
+
+@@ -4193,7 +4212,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ = reg_temp_crit[src - 1];
+ if (reg_temp_crit_l && reg_temp_crit_l[i])
+ data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
+- data->reg_temp_config[src - 1] = reg_temp_config[i];
++ if (i < num_reg_temp_config)
++ data->reg_temp_config[src - 1] = reg_temp_config[i];
+ data->temp_src[src - 1] = src;
+ continue;
+ }
+@@ -4206,7 +4226,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ data->reg_temp[0][s] = reg_temp[i];
+ data->reg_temp[1][s] = reg_temp_over[i];
+ data->reg_temp[2][s] = reg_temp_hyst[i];
+- data->reg_temp_config[s] = reg_temp_config[i];
++ if (i < num_reg_temp_config)
++ data->reg_temp_config[s] = reg_temp_config[i];
+ if (reg_temp_crit_h && reg_temp_crit_h[i])
+ data->reg_temp[3][s] = reg_temp_crit_h[i];
+ else if (reg_temp_crit[src - 1])
+diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
+index 81bf03dad6bbc5..706a662dd077d1 100644
+--- a/drivers/hwmon/nct6775-platform.c
++++ b/drivers/hwmon/nct6775-platform.c
+@@ -1269,6 +1269,7 @@ static const char * const asus_msi_boards[] = {
+ "EX-B760M-V5 D4",
+ "EX-H510M-V3",
+ "EX-H610M-V3 D4",
++ "G15CF",
+ "PRIME A620M-A",
+ "PRIME B560-PLUS",
+ "PRIME B560-PLUS AC-HES",
+diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
+index ef75b63f5894e5..b5352900463fb9 100644
+--- a/drivers/hwmon/ntc_thermistor.c
++++ b/drivers/hwmon/ntc_thermistor.c
+@@ -62,6 +62,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
+ [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 },
+ [NTC_LAST] = { },
+ };
++MODULE_DEVICE_TABLE(platform, ntc_thermistor_id);
+
+ /*
+ * A compensation table should be sorted by the values of .ohm
+diff --git a/drivers/hwmon/nzxt-kraken2.c b/drivers/hwmon/nzxt-kraken2.c
+index 428c77b5fce5a2..7caf387eb1449f 100644
+--- a/drivers/hwmon/nzxt-kraken2.c
++++ b/drivers/hwmon/nzxt-kraken2.c
+@@ -161,13 +161,13 @@ static int kraken2_probe(struct hid_device *hdev,
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "hid hw start failed with %d\n", ret);
+- goto fail_and_stop;
++ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "hid hw open failed with %d\n", ret);
+- goto fail_and_close;
++ goto fail_and_stop;
+ }
+
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "kraken2",
+diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
+index a4adc8bd531ff1..534a6072036c99 100644
+--- a/drivers/hwmon/pc87360.c
++++ b/drivers/hwmon/pc87360.c
+@@ -323,7 +323,11 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
+ }
+
+ /* Voltages */
+- for (i = 0; i < data->innr; i++) {
++ /*
++ * The min() below does not have any practical meaning and is
++ * only needed to silence a warning observed with gcc 12+.
++ */
++ for (i = 0; i < min(data->innr, ARRAY_SIZE(data->in)); i++) {
+ data->in_status[i] = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS);
+ /* Clear bits */
+diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
+index 26ba506331007e..b9bb469e2d8feb 100644
+--- a/drivers/hwmon/pmbus/mp2975.c
++++ b/drivers/hwmon/pmbus/mp2975.c
+@@ -297,6 +297,11 @@ static int mp2973_read_word_data(struct i2c_client *client, int page,
+ int ret;
+
+ switch (reg) {
++ case PMBUS_STATUS_WORD:
++ /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */
++ ret = pmbus_read_word_data(client, page, phase, reg);
++ ret ^= PB_STATUS_POWER_GOOD_N;
++ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(7, 0));
+@@ -380,11 +385,6 @@ static int mp2975_read_word_data(struct i2c_client *client, int page,
+ int ret;
+
+ switch (reg) {
+- case PMBUS_STATUS_WORD:
+- /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */
+- ret = pmbus_read_word_data(client, page, phase, reg);
+- ret ^= PB_STATUS_POWER_GOOD_N;
+- break;
+ case PMBUS_OT_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(7, 0));
+diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
+index b0832a4c690d7f..76c2b364c3fe40 100644
+--- a/drivers/hwmon/pmbus/pmbus.h
++++ b/drivers/hwmon/pmbus/pmbus.h
+@@ -409,6 +409,12 @@ enum pmbus_sensor_classes {
+ enum pmbus_data_format { linear = 0, ieee754, direct, vid };
+ enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
+
++/* PMBus revision identifiers */
++#define PMBUS_REV_10 0x00 /* PMBus revision 1.0 */
++#define PMBUS_REV_11 0x11 /* PMBus revision 1.1 */
++#define PMBUS_REV_12 0x22 /* PMBus revision 1.2 */
++#define PMBUS_REV_13 0x33 /* PMBus revision 1.3 */
++
+ struct pmbus_driver_info {
+ int pages; /* Total number of pages */
+ u8 phases[PMBUS_PAGES]; /* Number of phases per page */
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 1363d9f89181d2..728c07c42651ce 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -85,6 +85,8 @@ struct pmbus_data {
+
+ u32 flags; /* from platform data */
+
++ u8 revision; /* The PMBus revision the device is compliant with */
++
+ int exponent[PMBUS_PAGES];
+ /* linear mode: exponent for output voltages */
+
+@@ -1095,9 +1097,14 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
+
+ regval = status & mask;
+ if (regval) {
+- ret = _pmbus_write_byte_data(client, page, reg, regval);
+- if (ret)
+- goto unlock;
++ if (data->revision >= PMBUS_REV_12) {
++ ret = _pmbus_write_byte_data(client, page, reg, regval);
++ if (ret)
++ goto unlock;
++ } else {
++ pmbus_clear_fault_page(client, page);
++ }
++
+ }
+ if (s1 && s2) {
+ s64 v1, v2;
+@@ -2640,6 +2647,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ }
+
++ ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION);
++ if (ret >= 0)
++ data->revision = ret;
++
+ if (data->info->pages)
+ pmbus_clear_faults(client);
+ else
+diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
+index 8d9d422450e5cf..d817c719b90bd5 100644
+--- a/drivers/hwmon/pmbus/ucd9000.c
++++ b/drivers/hwmon/pmbus/ucd9000.c
+@@ -80,11 +80,11 @@ struct ucd9000_debugfs_entry {
+ * It has been observed that the UCD90320 randomly fails register access when
+ * doing another access right on the back of a register write. To mitigate this
+ * make sure that there is a minimum delay between a write access and the
+- * following access. The 250us is based on experimental data. At a delay of
+- * 200us the issue seems to go away. Add a bit of extra margin to allow for
++ * following access. The 500 is based on experimental data. At a delay of
++ * 350us the issue seems to go away. Add a bit of extra margin to allow for
+ * system to system differences.
+ */
+-#define UCD90320_WAIT_DELAY_US 250
++#define UCD90320_WAIT_DELAY_US 500
+
+ static inline void ucd90320_wait(const struct ucd9000_data *data)
+ {
+diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
+index 6e4516c2ab894f..b67bc9e833c01e 100644
+--- a/drivers/hwmon/pwm-fan.c
++++ b/drivers/hwmon/pwm-fan.c
+@@ -151,7 +151,7 @@ static int pwm_fan_power_on(struct pwm_fan_ctx *ctx)
+ }
+
+ state->enabled = true;
+- ret = pwm_apply_state(ctx->pwm, state);
++ ret = pwm_apply_might_sleep(ctx->pwm, state);
+ if (ret) {
+ dev_err(ctx->dev, "failed to enable PWM\n");
+ goto disable_regulator;
+@@ -181,7 +181,7 @@ static int pwm_fan_power_off(struct pwm_fan_ctx *ctx)
+
+ state->enabled = false;
+ state->duty_cycle = 0;
+- ret = pwm_apply_state(ctx->pwm, state);
++ ret = pwm_apply_might_sleep(ctx->pwm, state);
+ if (ret) {
+ dev_err(ctx->dev, "failed to disable PWM\n");
+ return ret;
+@@ -207,7 +207,7 @@ static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
+
+ period = state->period;
+ state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
+- ret = pwm_apply_state(ctx->pwm, state);
++ ret = pwm_apply_might_sleep(ctx->pwm, state);
+ if (ret)
+ return ret;
+ ret = pwm_fan_power_on(ctx);
+@@ -278,7 +278,7 @@ static int pwm_fan_update_enable(struct pwm_fan_ctx *ctx, long val)
+ state,
+ &enable_regulator);
+
+- pwm_apply_state(ctx->pwm, state);
++ pwm_apply_might_sleep(ctx->pwm, state);
+ pwm_fan_switch_power(ctx, enable_regulator);
+ pwm_fan_update_state(ctx, 0);
+ }
+diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
+index 1bbda3b05532e5..bf408e35e2c329 100644
+--- a/drivers/hwmon/sch5627.c
++++ b/drivers/hwmon/sch5627.c
+@@ -6,6 +6,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/bits.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+@@ -32,6 +33,10 @@
+ #define SCH5627_REG_PRIMARY_ID 0x3f
+ #define SCH5627_REG_CTRL 0x40
+
++#define SCH5627_CTRL_START BIT(0)
++#define SCH5627_CTRL_LOCK BIT(1)
++#define SCH5627_CTRL_VBAT BIT(4)
++
+ #define SCH5627_NO_TEMPS 8
+ #define SCH5627_NO_FANS 4
+ #define SCH5627_NO_IN 5
+@@ -147,7 +152,8 @@ static int sch5627_update_in(struct sch5627_data *data)
+
+ /* Trigger a Vbat voltage measurement every 5 minutes */
+ if (time_after(jiffies, data->last_battery + 300 * HZ)) {
+- sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | 0x10);
++ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
++ data->control | SCH5627_CTRL_VBAT);
+ data->last_battery = jiffies;
+ }
+
+@@ -226,6 +232,14 @@ static int reg_to_rpm(u16 reg)
+ static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+ {
++ const struct sch5627_data *data = drvdata;
++
++ /* Once the lock bit is set, the virtual registers become read-only
++ * until the next power cycle.
++ */
++ if (data->control & SCH5627_CTRL_LOCK)
++ return 0444;
++
+ if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp)
+ return 0644;
+
+@@ -483,14 +497,13 @@ static int sch5627_probe(struct platform_device *pdev)
+ return val;
+
+ data->control = val;
+- if (!(data->control & 0x01)) {
++ if (!(data->control & SCH5627_CTRL_START)) {
+ pr_err("hardware monitoring not enabled\n");
+ return -ENODEV;
+ }
+ /* Trigger a Vbat voltage measurement, so that we get a valid reading
+ the first time we read Vbat */
+- sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
+- data->control | 0x10);
++ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | SCH5627_CTRL_VBAT);
+ data->last_battery = jiffies;
+
+ /*
+diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
+index de3a0886c2f726..ac1f7258071551 100644
+--- a/drivers/hwmon/sch56xx-common.c
++++ b/drivers/hwmon/sch56xx-common.c
+@@ -7,10 +7,8 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/module.h>
+-#include <linux/mod_devicetable.h>
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+-#include <linux/dmi.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/acpi.h>
+@@ -21,10 +19,7 @@
+ #include <linux/slab.h>
+ #include "sch56xx-common.h"
+
+-static bool ignore_dmi;
+-module_param(ignore_dmi, bool, 0);
+-MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)");
+-
++/* Insmod parameters */
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+@@ -523,66 +518,11 @@ static int __init sch56xx_device_add(int address, const char *name)
+ return PTR_ERR_OR_ZERO(sch56xx_pdev);
+ }
+
+-static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = {
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"),
+- },
+- },
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"),
+- },
+- },
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"),
+- },
+- },
+- { }
+-};
+-
+-/* For autoloading only */
+-static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- },
+- },
+- { }
+-};
+-MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table);
+-
+ static int __init sch56xx_init(void)
+ {
+- const char *name = NULL;
+ int address;
++ const char *name = NULL;
+
+- if (!ignore_dmi) {
+- if (!dmi_check_system(sch56xx_dmi_table))
+- return -ENODEV;
+-
+- if (!dmi_check_system(sch56xx_dmi_override_table)) {
+- /*
+- * Some machines like the Esprimo P720 and Esprimo C700 have
+- * onboard devices named " Antiope"/" Theseus" instead of
+- * "Antiope"/"Theseus", so we need to check for both.
+- */
+- if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
+- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
+- return -ENODEV;
+- }
+- }
+-
+- /*
+- * Some devices like the Esprimo C700 have both onboard devices,
+- * so we still have to check manually
+- */
+ address = sch56xx_find(0x4e, &name);
+ if (address < 0)
+ address = sch56xx_find(0x2e, &name);
+diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
+index 1f96e94967ee8d..439dd3dba5fc81 100644
+--- a/drivers/hwmon/shtc1.c
++++ b/drivers/hwmon/shtc1.c
+@@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
+
+ if (np) {
+ data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
+- data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
++ data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
+ } else {
+ if (client->dev.platform_data)
+ data->setup = *(struct shtc1_platform_data *)dev->platform_data;
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index fe960c0a624f77..7d7d70afde6552 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -895,7 +895,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
+ if (err < 0)
+ return err;
+
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
+
+ mutex_lock(&data->update_lock);
+ data->target_temp[nr] = val;
+@@ -920,7 +920,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
+ return err;
+
+ /* Limit the temp to 0C - 15C */
+- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
++ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
+
+ mutex_lock(&data->update_lock);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index ada694ba9f958c..f279dd010b73e0 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -302,6 +302,34 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ }
+ EXPORT_SYMBOL_GPL(__hwspin_unlock);
+
++/**
++ * hwspin_lock_bust() - bust a specific hwspinlock
++ * @hwlock: a previously-acquired hwspinlock which we want to bust
++ * @id: identifier of the remote lock holder, if applicable
++ *
++ * This function will bust a hwspinlock that was previously acquired as
++ * long as the current owner of the lock matches the id given by the caller.
++ *
++ * Context: Process context.
++ *
++ * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or
++ * the bust operation fails, and -EOPNOTSUPP if the bust operation is not
++ * defined for the hwspinlock.
++ */
++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
++{
++ if (WARN_ON(!hwlock))
++ return -EINVAL;
++
++ if (!hwlock->bank->ops->bust) {
++ pr_err("bust operation not defined\n");
++ return -EOPNOTSUPP;
++ }
++
++ return hwlock->bank->ops->bust(hwlock, id);
++}
++EXPORT_SYMBOL_GPL(hwspin_lock_bust);
++
+ /**
+ * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
+ * @bank: the hwspinlock device bank
+diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
+index 29892767bb7a0a..f298fc0ee5adbc 100644
+--- a/drivers/hwspinlock/hwspinlock_internal.h
++++ b/drivers/hwspinlock/hwspinlock_internal.h
+@@ -21,6 +21,8 @@ struct hwspinlock_device;
+ * @trylock: make a single attempt to take the lock. returns 0 on
+ * failure and true on success. may _not_ sleep.
+ * @unlock: release the lock. always succeed. may _not_ sleep.
++ * @bust: optional, platform-specific bust handler, called by hwspinlock
++ * core to bust a specific lock.
+ * @relax: optional, platform-specific relax handler, called by hwspinlock
+ * core while spinning on a lock, between two successive
+ * invocations of @trylock. may _not_ sleep.
+@@ -28,6 +30,7 @@ struct hwspinlock_device;
+ struct hwspinlock_ops {
+ int (*trylock)(struct hwspinlock *lock);
+ void (*unlock)(struct hwspinlock *lock);
++ int (*bust)(struct hwspinlock *lock, unsigned int id);
+ void (*relax)(struct hwspinlock *lock);
+ };
+
+diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
+index 9fabe00a40d6a0..4b80026db1ab61 100644
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -441,8 +441,26 @@ static void coresight_disable_helpers(struct coresight_device *csdev)
+ }
+ }
+
++/*
++ * Helper function to call source_ops(csdev)->disable and also disable the
++ * helpers.
++ *
++ * There is an imbalance between coresight_enable_path() and
++ * coresight_disable_path(). Enabling also enables the source's helpers as part
++ * of the path, but disabling always skips the first item in the path (which is
++ * the source), so sources and their helpers don't get disabled as part of that
++ * function and we need the extra step here.
++ */
++void coresight_disable_source(struct coresight_device *csdev, void *data)
++{
++ if (source_ops(csdev)->disable)
++ source_ops(csdev)->disable(csdev, data);
++ coresight_disable_helpers(csdev);
++}
++EXPORT_SYMBOL_GPL(coresight_disable_source);
++
+ /**
+- * coresight_disable_source - Drop the reference count by 1 and disable
++ * coresight_disable_source_sysfs - Drop the reference count by 1 and disable
+ * the device if there are no users left.
+ *
+ * @csdev: The coresight device to disable
+@@ -451,17 +469,15 @@ static void coresight_disable_helpers(struct coresight_device *csdev)
+ *
+ * Returns true if the device has been disabled.
+ */
+-bool coresight_disable_source(struct coresight_device *csdev, void *data)
++static bool coresight_disable_source_sysfs(struct coresight_device *csdev,
++ void *data)
+ {
+ if (atomic_dec_return(&csdev->refcnt) == 0) {
+- if (source_ops(csdev)->disable)
+- source_ops(csdev)->disable(csdev, data);
+- coresight_disable_helpers(csdev);
++ coresight_disable_source(csdev, data);
+ csdev->enable = false;
+ }
+ return !csdev->enable;
+ }
+-EXPORT_SYMBOL_GPL(coresight_disable_source);
+
+ /*
+ * coresight_disable_path_from : Disable components in the given path beyond
+@@ -1202,7 +1218,7 @@ void coresight_disable(struct coresight_device *csdev)
+ if (ret)
+ goto out;
+
+- if (!csdev->enable || !coresight_disable_source(csdev, NULL))
++ if (!csdev->enable || !coresight_disable_source_sysfs(csdev, NULL))
+ goto out;
+
+ switch (csdev->subtype.source_subtype) {
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index 5ca6278baff4fa..58b32b399fac26 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -493,7 +493,7 @@ static void etm_event_start(struct perf_event *event, int flags)
+ goto fail_end_stop;
+
+ /* Finally enable the tracer */
+- if (coresight_enable_source(csdev, CS_MODE_PERF, event))
++ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
+ goto fail_disable_path;
+
+ /*
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index 77b0271ce6eb98..840e4cccf8c4ba 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -1160,6 +1160,7 @@ static void etm4_init_arch_data(void *info)
+ struct etm4_init_arg *init_arg = info;
+ struct etmv4_drvdata *drvdata;
+ struct csdev_access *csa;
++ struct device *dev = init_arg->dev;
+ int i;
+
+ drvdata = dev_get_drvdata(init_arg->dev);
+@@ -1173,6 +1174,10 @@ static void etm4_init_arch_data(void *info)
+ if (!etm4_init_csdev_access(drvdata, csa))
+ return;
+
++ if (!csa->io_mem ||
++ fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
++ drvdata->skip_power_up = true;
++
+ /* Detect the support for OS Lock before we actually use it */
+ etm_detect_os_lock(drvdata, csa);
+
+@@ -1199,6 +1204,8 @@ static void etm4_init_arch_data(void *info)
+ drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
+ /* QSUPP, bits[16:15] Q element support field */
+ drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
++ if (drvdata->q_support)
++ drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT);
+ /* TSSIZE, bits[28:24] Global timestamp size field */
+ drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
+
+@@ -1689,16 +1696,14 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
+ state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
+ state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
+- state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
++ if (drvdata->q_filt)
++ state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
+
+ state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
+ state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
+ state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
+ if (drvdata->nr_pe_cmp)
+ state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
+- state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
+- state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
+- state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
+@@ -1715,7 +1720,8 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
+ }
+
+- for (i = 0; i < drvdata->nr_resource * 2; i++)
++ /* Resource selector pair 0 is reserved */
++ for (i = 2; i < drvdata->nr_resource * 2; i++)
+ state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+@@ -1800,8 +1806,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ {
+ int i;
+ struct etmv4_save_state *state = drvdata->save_state;
+- struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+- struct csdev_access *csa = &tmp_csa;
++ struct csdev_access *csa = &drvdata->csdev->access;
++
++ if (WARN_ON(!drvdata->csdev))
++ return;
+
+ etm4_cs_unlock(drvdata, csa);
+ etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
+@@ -1820,16 +1828,14 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
+ etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
+ etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
+- etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
++ if (drvdata->q_filt)
++ etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
+
+ etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
+ etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
+ etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
+ if (drvdata->nr_pe_cmp)
+ etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
+- etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
+- etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
+- etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
+@@ -1846,7 +1852,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
+ }
+
+- for (i = 0; i < drvdata->nr_resource * 2; i++)
++ /* Resource selector pair 0 is reserved */
++ for (i = 2; i < drvdata->nr_resource * 2; i++)
+ etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+@@ -1998,11 +2005,6 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
+ if (!drvdata->arch)
+ return -EINVAL;
+
+- /* TRCPDCR is not accessible with system instructions. */
+- if (!desc.access.io_mem ||
+- fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+- drvdata->skip_power_up = true;
+-
+ major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
+ minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
+
+@@ -2175,6 +2177,9 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
+ ret = etm4_probe(&pdev->dev);
+
+ pm_runtime_put(&pdev->dev);
++ if (ret)
++ pm_runtime_disable(&pdev->dev);
++
+ return ret;
+ }
+
+@@ -2224,7 +2229,7 @@ static void clear_etmdrvdata(void *info)
+ per_cpu(delayed_probe, cpu) = NULL;
+ }
+
+-static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
++static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
+ {
+ bool had_delayed_probe;
+ /*
+@@ -2253,7 +2258,7 @@ static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
+ }
+ }
+
+-static void __exit etm4_remove_amba(struct amba_device *adev)
++static void etm4_remove_amba(struct amba_device *adev)
+ {
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+@@ -2261,7 +2266,7 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
+ etm4_remove_dev(drvdata);
+ }
+
+-static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
++static int etm4_remove_platform_dev(struct platform_device *pdev)
+ {
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
+
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index 20e2e4cb761462..6b6760e49ed357 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -43,9 +43,6 @@
+ #define TRCVIIECTLR 0x084
+ #define TRCVISSCTLR 0x088
+ #define TRCVIPCSSCTLR 0x08C
+-#define TRCVDCTLR 0x0A0
+-#define TRCVDSACCTLR 0x0A4
+-#define TRCVDARCCTLR 0x0A8
+ /* Derived resources registers */
+ #define TRCSEQEVRn(n) (0x100 + (n * 4)) /* n = 0-2 */
+ #define TRCSEQRSTEVR 0x118
+@@ -90,9 +87,6 @@
+ /* Address Comparator registers n = 0-15 */
+ #define TRCACVRn(n) (0x400 + (n * 8))
+ #define TRCACATRn(n) (0x480 + (n * 8))
+-/* Data Value Comparator Value registers, n = 0-7 */
+-#define TRCDVCVRn(n) (0x500 + (n * 16))
+-#define TRCDVCMRn(n) (0x580 + (n * 16))
+ /* ContextID/Virtual ContextID comparators, n = 0-7 */
+ #define TRCCIDCVRn(n) (0x600 + (n * 8))
+ #define TRCVMIDCVRn(n) (0x640 + (n * 8))
+@@ -141,6 +135,7 @@
+ #define TRCIDR0_TRCCCI BIT(7)
+ #define TRCIDR0_RETSTACK BIT(9)
+ #define TRCIDR0_NUMEVENT_MASK GENMASK(11, 10)
++#define TRCIDR0_QFILT BIT(14)
+ #define TRCIDR0_QSUPP_MASK GENMASK(16, 15)
+ #define TRCIDR0_TSSIZE_MASK GENMASK(28, 24)
+
+@@ -272,9 +267,6 @@
+ /* List of registers accessible via System instructions */
+ #define ETM4x_ONLY_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCPROCSELR) \
+- CASE_##op((val), TRCVDCTLR) \
+- CASE_##op((val), TRCVDSACCTLR) \
+- CASE_##op((val), TRCVDARCCTLR) \
+ CASE_##op((val), TRCOSLAR)
+
+ #define ETM_COMMON_SYSREG_LIST(op, val) \
+@@ -422,22 +414,6 @@
+ CASE_##op((val), TRCACATRn(13)) \
+ CASE_##op((val), TRCACATRn(14)) \
+ CASE_##op((val), TRCACATRn(15)) \
+- CASE_##op((val), TRCDVCVRn(0)) \
+- CASE_##op((val), TRCDVCVRn(1)) \
+- CASE_##op((val), TRCDVCVRn(2)) \
+- CASE_##op((val), TRCDVCVRn(3)) \
+- CASE_##op((val), TRCDVCVRn(4)) \
+- CASE_##op((val), TRCDVCVRn(5)) \
+- CASE_##op((val), TRCDVCVRn(6)) \
+- CASE_##op((val), TRCDVCVRn(7)) \
+- CASE_##op((val), TRCDVCMRn(0)) \
+- CASE_##op((val), TRCDVCMRn(1)) \
+- CASE_##op((val), TRCDVCMRn(2)) \
+- CASE_##op((val), TRCDVCMRn(3)) \
+- CASE_##op((val), TRCDVCMRn(4)) \
+- CASE_##op((val), TRCDVCMRn(5)) \
+- CASE_##op((val), TRCDVCMRn(6)) \
+- CASE_##op((val), TRCDVCMRn(7)) \
+ CASE_##op((val), TRCCIDCVRn(0)) \
+ CASE_##op((val), TRCCIDCVRn(1)) \
+ CASE_##op((val), TRCCIDCVRn(2)) \
+@@ -907,9 +883,6 @@ struct etmv4_save_state {
+ u32 trcviiectlr;
+ u32 trcvissctlr;
+ u32 trcvipcssctlr;
+- u32 trcvdctlr;
+- u32 trcvdsacctlr;
+- u32 trcvdarcctlr;
+
+ u32 trcseqevr[ETM_MAX_SEQ_STATES];
+ u32 trcseqrstevr;
+@@ -982,6 +955,7 @@ struct etmv4_save_state {
+ * @os_unlock: True if access to management registers is allowed.
+ * @instrp0: Tracing of load and store instructions
+ * as P0 elements is supported.
++ * @q_filt: Q element filtering support, if Q elements are supported.
+ * @trcbb: Indicates if the trace unit supports branch broadcast tracing.
+ * @trccond: If the trace unit supports conditional
+ * instruction tracing.
+@@ -1036,7 +1010,7 @@ struct etmv4_drvdata {
+ u8 ctxid_size;
+ u8 vmid_size;
+ u8 ccsize;
+- u8 ccitmin;
++ u16 ccitmin;
+ u8 s_ex_level;
+ u8 ns_ex_level;
+ u8 q_support;
+@@ -1045,6 +1019,7 @@ struct etmv4_drvdata {
+ bool boot_enable;
+ bool os_unlock;
+ bool instrp0;
++ bool q_filt;
+ bool trcbb;
+ bool trccond;
+ bool retstack;
+diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
+index 9d550f5697fa82..57a009552cc5c0 100644
+--- a/drivers/hwtracing/coresight/coresight-platform.c
++++ b/drivers/hwtracing/coresight/coresight-platform.c
+@@ -297,8 +297,10 @@ static int of_get_coresight_platform_data(struct device *dev,
+ continue;
+
+ ret = of_coresight_parse_endpoint(dev, ep, pdata);
+- if (ret)
++ if (ret) {
++ of_node_put(ep);
+ return ret;
++ }
+ }
+
+ return 0;
+diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
+index 767076e0797011..30c051055e54b3 100644
+--- a/drivers/hwtracing/coresight/coresight-priv.h
++++ b/drivers/hwtracing/coresight/coresight-priv.h
+@@ -233,6 +233,6 @@ void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
+ struct coresight_device *coresight_get_percpu_sink(int cpu);
+ int coresight_enable_source(struct coresight_device *csdev, enum cs_mode mode,
+ void *data);
+-bool coresight_disable_source(struct coresight_device *csdev, void *data);
++void coresight_disable_source(struct coresight_device *csdev, void *data);
+
+ #endif
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index 8311e1028ddb03..f3312fbcdc0f82 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -255,6 +255,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
+ {
+ tmc_free_table_pages(sg_table);
+ tmc_free_data_pages(sg_table);
++ kfree(sg_table);
+ }
+ EXPORT_SYMBOL_GPL(tmc_free_sg_table);
+
+@@ -336,7 +337,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ rc = tmc_alloc_table_pages(sg_table);
+ if (rc) {
+ tmc_free_sg_table(sg_table);
+- kfree(sg_table);
+ return ERR_PTR(rc);
+ }
+
+diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
+index e9a32a97fbee69..6e32d31a95fe08 100644
+--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
++++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
+@@ -99,7 +99,7 @@ static int smb_open(struct inode *inode, struct file *file)
+ struct smb_drv_data, miscdev);
+ int ret = 0;
+
+- mutex_lock(&drvdata->mutex);
++ spin_lock(&drvdata->spinlock);
+
+ if (drvdata->reading) {
+ ret = -EBUSY;
+@@ -115,7 +115,7 @@ static int smb_open(struct inode *inode, struct file *file)
+
+ drvdata->reading = true;
+ out:
+- mutex_unlock(&drvdata->mutex);
++ spin_unlock(&drvdata->spinlock);
+
+ return ret;
+ }
+@@ -132,10 +132,8 @@ static ssize_t smb_read(struct file *file, char __user *data, size_t len,
+ if (!len)
+ return 0;
+
+- mutex_lock(&drvdata->mutex);
+-
+ if (!sdb->data_size)
+- goto out;
++ return 0;
+
+ to_copy = min(sdb->data_size, len);
+
+@@ -145,20 +143,15 @@ static ssize_t smb_read(struct file *file, char __user *data, size_t len,
+
+ if (copy_to_user(data, sdb->buf_base + sdb->buf_rdptr, to_copy)) {
+ dev_dbg(dev, "Failed to copy data to user\n");
+- to_copy = -EFAULT;
+- goto out;
++ return -EFAULT;
+ }
+
+ *ppos += to_copy;
+-
+ smb_update_read_ptr(drvdata, to_copy);
+-
+- dev_dbg(dev, "%zu bytes copied\n", to_copy);
+-out:
+ if (!sdb->data_size)
+ smb_reset_buffer(drvdata);
+- mutex_unlock(&drvdata->mutex);
+
++ dev_dbg(dev, "%zu bytes copied\n", to_copy);
+ return to_copy;
+ }
+
+@@ -167,9 +160,9 @@ static int smb_release(struct inode *inode, struct file *file)
+ struct smb_drv_data *drvdata = container_of(file->private_data,
+ struct smb_drv_data, miscdev);
+
+- mutex_lock(&drvdata->mutex);
++ spin_lock(&drvdata->spinlock);
+ drvdata->reading = false;
+- mutex_unlock(&drvdata->mutex);
++ spin_unlock(&drvdata->spinlock);
+
+ return 0;
+ }
+@@ -262,7 +255,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
+ struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret = 0;
+
+- mutex_lock(&drvdata->mutex);
++ spin_lock(&drvdata->spinlock);
+
+ /* Do nothing, the trace data is reading by other interface now */
+ if (drvdata->reading) {
+@@ -294,7 +287,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
+
+ dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
+ out:
+- mutex_unlock(&drvdata->mutex);
++ spin_unlock(&drvdata->spinlock);
+
+ return ret;
+ }
+@@ -304,7 +297,7 @@ static int smb_disable(struct coresight_device *csdev)
+ struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret = 0;
+
+- mutex_lock(&drvdata->mutex);
++ spin_lock(&drvdata->spinlock);
+
+ if (drvdata->reading) {
+ ret = -EBUSY;
+@@ -327,7 +320,7 @@ static int smb_disable(struct coresight_device *csdev)
+
+ dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
+ out:
+- mutex_unlock(&drvdata->mutex);
++ spin_unlock(&drvdata->spinlock);
+
+ return ret;
+ }
+@@ -408,7 +401,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
+ if (!buf)
+ return 0;
+
+- mutex_lock(&drvdata->mutex);
++ spin_lock(&drvdata->spinlock);
+
+ /* Don't do anything if another tracer is using this sink. */
+ if (atomic_read(&csdev->refcnt) != 1)
+@@ -432,7 +425,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
+ if (!buf->snapshot && lost)
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ out:
+- mutex_unlock(&drvdata->mutex);
++ spin_unlock(&drvdata->spinlock);
+
+ return data_size;
+ }
+@@ -484,7 +477,6 @@ static int smb_init_data_buffer(struct platform_device *pdev,
+ static void smb_init_hw(struct smb_drv_data *drvdata)
+ {
+ smb_disable_hw(drvdata);
+- smb_reset_buffer(drvdata);
+
+ writel(SMB_LB_CFG_LO_DEFAULT, drvdata->base + SMB_LB_CFG_LO_REG);
+ writel(SMB_LB_CFG_HI_DEFAULT, drvdata->base + SMB_LB_CFG_HI_REG);
+@@ -590,37 +582,33 @@ static int smb_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- mutex_init(&drvdata->mutex);
++ ret = smb_config_inport(dev, true);
++ if (ret)
++ return ret;
++
++ smb_reset_buffer(drvdata);
++ platform_set_drvdata(pdev, drvdata);
++ spin_lock_init(&drvdata->spinlock);
+ drvdata->pid = -1;
+
+ ret = smb_register_sink(pdev, drvdata);
+ if (ret) {
++ smb_config_inport(&pdev->dev, false);
+ dev_err(dev, "Failed to register SMB sink\n");
+ return ret;
+ }
+
+- ret = smb_config_inport(dev, true);
+- if (ret) {
+- smb_unregister_sink(drvdata);
+- return ret;
+- }
+-
+- platform_set_drvdata(pdev, drvdata);
+-
+ return 0;
+ }
+
+ static int smb_remove(struct platform_device *pdev)
+ {
+ struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
+- int ret;
+-
+- ret = smb_config_inport(&pdev->dev, false);
+- if (ret)
+- return ret;
+
+ smb_unregister_sink(drvdata);
+
++ smb_config_inport(&pdev->dev, false);
++
+ return 0;
+ }
+
+diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
+index d2e14e8d2c8a8c..82a44c14a8829c 100644
+--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
++++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
+@@ -8,7 +8,7 @@
+ #define _ULTRASOC_SMB_H
+
+ #include <linux/miscdevice.h>
+-#include <linux/mutex.h>
++#include <linux/spinlock.h>
+
+ /* Offset of SMB global registers */
+ #define SMB_GLB_CFG_REG 0x00
+@@ -105,7 +105,7 @@ struct smb_data_buffer {
+ * @csdev: Component vitals needed by the framework.
+ * @sdb: Data buffer for SMB.
+ * @miscdev: Specifics to handle "/dev/xyz.smb" entry.
+- * @mutex: Control data access to one at a time.
++ * @spinlock: Control data access to one at a time.
+ * @reading: Synchronise user space access to SMB buffer.
+ * @pid: Process ID of the process being monitored by the
+ * session that is using this component.
+@@ -116,7 +116,7 @@ struct smb_drv_data {
+ struct coresight_device *csdev;
+ struct smb_data_buffer sdb;
+ struct miscdevice miscdev;
+- struct mutex mutex;
++ spinlock_t spinlock;
+ bool reading;
+ pid_t pid;
+ enum cs_mode mode;
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 147d338c191e77..8dad239aba2cea 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -289,6 +289,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
++ {
++ /* Meteor Lake-S CPU */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
++ {
++ /* Meteor Lake-S */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7f26),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
+ {
+ /* Raptor Lake-S */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
+@@ -299,6 +309,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa76f),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
++ {
++ /* Granite Rapids */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0963),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
++ {
++ /* Granite Rapids SOC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3256),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
++ {
++ /* Sapphire Rapids SOC */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3456),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
++ {
++ /* Lunar Lake */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
+ {
+ /* Alder Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
+index 49ea1b0f748903..24a1f7797aeb6a 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.c
++++ b/drivers/hwtracing/ptt/hisi_ptt.c
+@@ -342,9 +342,9 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
+ return ret;
+
+ hisi_ptt->trace_irq = pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ);
+- ret = devm_request_threaded_irq(&pdev->dev, hisi_ptt->trace_irq,
+- NULL, hisi_ptt_isr, 0,
+- DRV_NAME, hisi_ptt);
++ ret = devm_request_irq(&pdev->dev, hisi_ptt->trace_irq, hisi_ptt_isr,
++ IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
++ hisi_ptt);
+ if (ret) {
+ pci_err(pdev, "failed to request irq %d, ret = %d\n",
+ hisi_ptt->trace_irq, ret);
+@@ -995,13 +995,16 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
+ int ret;
+ u32 val;
+
++ if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
++ return -ENOENT;
++
+ if (event->cpu < 0) {
+ dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+- if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
+- return -ENOENT;
++ if (event->attach_state & PERF_ATTACH_TASK)
++ return -EOPNOTSUPP;
+
+ ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
+ if (ret < 0)
+@@ -1178,6 +1181,10 @@ static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
+ hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
+ }
+
++static void hisi_ptt_pmu_read(struct perf_event *event)
++{
++}
++
+ static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
+ {
+ cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node);
+@@ -1221,6 +1228,7 @@ static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
+ .stop = hisi_ptt_pmu_stop,
+ .add = hisi_ptt_pmu_add,
+ .del = hisi_ptt_pmu_del,
++ .read = hisi_ptt_pmu_read,
+ };
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION);
+diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
+index 534fbefc7f6aab..20895d39156236 100644
+--- a/drivers/hwtracing/stm/core.c
++++ b/drivers/hwtracing/stm/core.c
+@@ -868,8 +868,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ return -ENOMEM;
+
+ stm->major = register_chrdev(0, stm_data->name, &stm_fops);
+- if (stm->major < 0)
+- goto err_free;
++ if (stm->major < 0) {
++ err = stm->major;
++ vfree(stm);
++ return err;
++ }
+
+ device_initialize(&stm->dev);
+ stm->dev.devt = MKDEV(stm->major, 0);
+@@ -913,10 +916,8 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ err_device:
+ unregister_chrdev(stm->major, stm_data->name);
+
+- /* matches device_initialize() above */
++ /* calls stm_device_release() */
+ put_device(&stm->dev);
+-err_free:
+- vfree(stm);
+
+ return err;
+ }
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 6644eebedaf3b7..97d27e01a6ee27 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -158,6 +158,7 @@ config I2C_I801
+ Alder Lake (PCH)
+ Raptor Lake (PCH)
+ Meteor Lake (SOC and PCH)
++ Birch Stream (SOC)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
+index af56fe2c75c09c..9be9fdb07f3dca 100644
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -90,10 +90,8 @@ obj-$(CONFIG_I2C_NPCM) += i2c-npcm7xx.o
+ obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
+ obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
+ obj-$(CONFIG_I2C_OWL) += i2c-owl.o
+-i2c-pasemi-objs := i2c-pasemi-core.o i2c-pasemi-pci.o
+-obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
+-i2c-apple-objs := i2c-pasemi-core.o i2c-pasemi-platform.o
+-obj-$(CONFIG_I2C_APPLE) += i2c-apple.o
++obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi-core.o i2c-pasemi-pci.o
++obj-$(CONFIG_I2C_APPLE) += i2c-pasemi-core.o i2c-pasemi-platform.o
+ obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
+ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
+ obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index 28e2a5fc45282d..83e6714901b289 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -170,6 +170,13 @@ struct aspeed_i2c_bus {
+
+ static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
+
++/* precondition: bus.lock has been acquired. */
++static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
++{
++ bus->master_state = ASPEED_I2C_MASTER_STOP;
++ writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
++}
++
+ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
+ {
+ unsigned long time_left, flags;
+@@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
+ command);
+
+ reinit_completion(&bus->cmd_complete);
+- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
++ aspeed_i2c_do_stop(bus);
+ spin_unlock_irqrestore(&bus->lock, flags);
+
+ time_left = wait_for_completion_timeout(
+@@ -249,18 +256,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ if (!slave)
+ return 0;
+
+- command = readl(bus->base + ASPEED_I2C_CMD_REG);
++ /*
++ * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
++ * transfers with low enough latency between the nak/stop phase of the current
++ * command and the start/address phase of the following command that the
++ * interrupts are coalesced by the time we process them.
++ */
++ if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
++ irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
++ bus->slave_state = ASPEED_I2C_SLAVE_STOP;
++ }
++
++ if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
++ bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
++ irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
++ bus->slave_state = ASPEED_I2C_SLAVE_STOP;
++ }
++
++ /* Propagate any stop conditions to the slave implementation. */
++ if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
++ i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
++ bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
++ }
+
+- /* Slave was requested, restart state machine. */
++ /*
++ * Now that we've dealt with any potentially coalesced stop conditions,
++ * address any start conditions.
++ */
+ if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
+ irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
+ bus->slave_state = ASPEED_I2C_SLAVE_START;
+ }
+
+- /* Slave is not currently active, irq was for someone else. */
++ /*
++ * If the slave has been stopped and not started then slave interrupt
++ * handling is complete.
++ */
+ if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+ return irq_handled;
+
++ command = readl(bus->base + ASPEED_I2C_CMD_REG);
+ dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
+ irq_status, command);
+
+@@ -279,17 +314,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
+ }
+
+- /* Slave was asked to stop. */
+- if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
+- irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
+- bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+- }
+- if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
+- bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
+- irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
+- bus->slave_state = ASPEED_I2C_SLAVE_STOP;
+- }
+-
+ switch (bus->slave_state) {
+ case ASPEED_I2C_SLAVE_READ_REQUESTED:
+ if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
+@@ -324,8 +348,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+ break;
+ case ASPEED_I2C_SLAVE_STOP:
+- i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+- bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
++ /* Stop event handling is done early. Unreachable. */
+ break;
+ case ASPEED_I2C_SLAVE_START:
+ /* Slave was just started. Waiting for the next event. */;
+@@ -374,13 +397,6 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
+ writel(command, bus->base + ASPEED_I2C_CMD_REG);
+ }
+
+-/* precondition: bus.lock has been acquired. */
+-static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
+-{
+- bus->master_state = ASPEED_I2C_MASTER_STOP;
+- writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
+-}
+-
+ /* precondition: bus.lock has been acquired. */
+ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
+ {
+diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
+index d6eeea5166c04f..131a67d9d4a689 100644
+--- a/drivers/i2c/busses/i2c-at91-slave.c
++++ b/drivers/i2c/busses/i2c-at91-slave.c
+@@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
+
+ static u32 at91_twi_func(struct i2c_adapter *adapter)
+ {
+- return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
+- | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
++ return I2C_FUNC_SLAVE;
+ }
+
+ static const struct i2c_algorithm at91_twi_algorithm_slave = {
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 51aab662050b1f..e905734c26a049 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init(
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ }
+
+-static void bcm_iproc_i2c_check_slave_status(
+- struct bcm_iproc_i2c_dev *iproc_i2c)
++static bool bcm_iproc_i2c_check_slave_status
++ (struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
+ {
+ u32 val;
++ bool recover = false;
+
+- val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+- /* status is valid only when START_BUSY is cleared after it was set */
+- if (val & BIT(S_CMD_START_BUSY_SHIFT))
+- return;
++ /* check slave transmit status only if slave is transmitting */
++ if (!iproc_i2c->slave_rx_only) {
++ val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
++ /* status is valid only when START_BUSY is cleared */
++ if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) {
++ val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
++ if (val == S_CMD_STATUS_TIMEOUT ||
++ val == S_CMD_STATUS_MASTER_ABORT) {
++ dev_warn(iproc_i2c->device,
++ (val == S_CMD_STATUS_TIMEOUT) ?
++ "slave random stretch time timeout\n" :
++ "Master aborted read transaction\n");
++ recover = true;
++ }
++ }
++ }
++
++ /* RX_EVENT is not valid when START_BUSY is set */
++ if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
++ (status & BIT(IS_S_START_BUSY_SHIFT))) {
++ dev_warn(iproc_i2c->device, "Slave aborted read transaction\n");
++ recover = true;
++ }
+
+- val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+- if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
+- dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
+- "slave random stretch time timeout\n" :
+- "Master aborted read transaction\n");
++ if (recover) {
+ /* re-initialize i2c for recovery */
+ bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ bcm_iproc_i2c_slave_init(iproc_i2c, true);
+ bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+ }
++
++ return recover;
+ }
+
+ static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+@@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 val;
+ u8 value;
+
+- /*
+- * Slave events in case of master-write, master-write-read and,
+- * master-read
+- *
+- * Master-write : only IS_S_RX_EVENT_SHIFT event
+- * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+- * events
+- * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+- * events or only IS_S_RD_EVENT_SHIFT
+- *
+- * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+- * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+- * full. This can happen if Master issues write requests of more than
+- * 64 bytes.
+- */
+- if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+- status & BIT(IS_S_RD_EVENT_SHIFT) ||
+- status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+- /* disable slave interrupts */
+- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+- val &= ~iproc_i2c->slave_int_mask;
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+-
+- if (status & BIT(IS_S_RD_EVENT_SHIFT))
+- /* Master-write-read request */
+- iproc_i2c->slave_rx_only = false;
+- else
+- /* Master-write request only */
+- iproc_i2c->slave_rx_only = true;
+-
+- /* schedule tasklet to read data later */
+- tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+-
+- /*
+- * clear only IS_S_RX_EVENT_SHIFT and
+- * IS_S_RX_FIFO_FULL_SHIFT interrupt.
+- */
+- val = BIT(IS_S_RX_EVENT_SHIFT);
+- if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
+- val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+- }
+
+ if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+ iproc_i2c->tx_underrun++;
+@@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ * less than PKT_LENGTH bytes were output on the SMBUS
+ */
+ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+- iproc_i2c->slave_int_mask);
++ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++ val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+ /* End of SMBUS for Master Read */
+ val = BIT(S_TX_WR_STATUS_SHIFT);
+@@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ BIT(IS_S_START_BUSY_SHIFT));
+ }
+
+- /* check slave transmit status only if slave is transmitting */
+- if (!iproc_i2c->slave_rx_only)
+- bcm_iproc_i2c_check_slave_status(iproc_i2c);
++ /* if the controller has been reset, immediately return from the ISR */
++ if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status))
++ return true;
++
++ /*
++ * Slave events in case of master-write, master-write-read and,
++ * master-read
++ *
++ * Master-write : only IS_S_RX_EVENT_SHIFT event
++ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events
++ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events or only IS_S_RD_EVENT_SHIFT
++ *
++ * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
++ * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
++ * full. This can happen if Master issues write requests of more than
++ * 64 bytes.
++ */
++ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
++ status & BIT(IS_S_RD_EVENT_SHIFT) ||
++ status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++ /* disable slave interrupts */
++ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++ val &= ~iproc_i2c->slave_int_mask;
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++
++ if (status & BIT(IS_S_RD_EVENT_SHIFT))
++ /* Master-write-read request */
++ iproc_i2c->slave_rx_only = false;
++ else
++ /* Master-write request only */
++ iproc_i2c->slave_rx_only = true;
++
++ /* schedule tasklet to read data later */
++ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
++
++ /* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */
++ if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
++ val = BIT(IS_S_RX_FIFO_FULL_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
++ }
++ }
+
+ return true;
+ }
+diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
+index de3f58b60dce5d..6f7d753a8197ce 100644
+--- a/drivers/i2c/busses/i2c-cadence.c
++++ b/drivers/i2c/busses/i2c-cadence.c
+@@ -633,6 +633,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
+
+ if (hold_clear) {
+ ctrl_reg &= ~CDNS_I2C_CR_HOLD;
++ ctrl_reg &= ~CDNS_I2C_CR_CLR_FIFO;
+ /*
+ * In case of Xilinx Zynq SOC, clear the HOLD bit before transfer size
+ * register reaches '0'. This is an IP bug which causes transfer size
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index affcfb243f0f52..58562700c85ee4 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -63,7 +63,7 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- *val = readl_relaxed(dev->base + reg);
++ *val = readl(dev->base + reg);
+
+ return 0;
+ }
+@@ -72,7 +72,7 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- writel_relaxed(val, dev->base + reg);
++ writel(val, dev->base + reg);
+
+ return 0;
+ }
+@@ -81,7 +81,7 @@ static int dw_reg_read_swab(void *context, unsigned int reg, unsigned int *val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- *val = swab32(readl_relaxed(dev->base + reg));
++ *val = swab32(readl(dev->base + reg));
+
+ return 0;
+ }
+@@ -90,7 +90,7 @@ static int dw_reg_write_swab(void *context, unsigned int reg, unsigned int val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- writel_relaxed(swab32(val), dev->base + reg);
++ writel(swab32(val), dev->base + reg);
+
+ return 0;
+ }
+@@ -99,8 +99,8 @@ static int dw_reg_read_word(void *context, unsigned int reg, unsigned int *val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- *val = readw_relaxed(dev->base + reg) |
+- (readw_relaxed(dev->base + reg + 2) << 16);
++ *val = readw(dev->base + reg) |
++ (readw(dev->base + reg + 2) << 16);
+
+ return 0;
+ }
+@@ -109,8 +109,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- writew_relaxed(val, dev->base + reg);
+- writew_relaxed(val >> 16, dev->base + reg + 2);
++ writew(val, dev->base + reg);
++ writew(val >> 16, dev->base + reg + 2);
+
+ return 0;
+ }
+@@ -441,6 +441,7 @@ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
+
+ void __i2c_dw_disable(struct dw_i2c_dev *dev)
+ {
++ struct i2c_timings *t = &dev->timings;
+ unsigned int raw_intr_stats;
+ unsigned int enable;
+ int timeout = 100;
+@@ -453,6 +454,19 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
+
+ abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
+ if (abort_needed) {
++ if (!(enable & DW_IC_ENABLE_ENABLE)) {
++ regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE);
++ /*
++ * Wait 10 times the signaling period of the highest I2C
++ * transfer supported by the driver (for 400KHz this is
++ * 25us) to ensure the I2C ENABLE bit is already set
++ * as described in the DesignWare I2C databook.
++ */
++ fsleep(DIV_ROUND_CLOSEST_ULL(10 * MICRO, t->bus_freq_hz));
++ /* Set ENABLE bit before setting ABORT */
++ enable |= DW_IC_ENABLE_ENABLE;
++ }
++
+ regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT);
+ ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable,
+ !(enable & DW_IC_ENABLE_ABORT), 10,
+diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
+index a7f6f3eafad7dd..99d8c6bbc0320d 100644
+--- a/drivers/i2c/busses/i2c-designware-core.h
++++ b/drivers/i2c/busses/i2c-designware-core.h
+@@ -109,6 +109,7 @@
+ DW_IC_INTR_RX_UNDER | \
+ DW_IC_INTR_RD_REQ)
+
++#define DW_IC_ENABLE_ENABLE BIT(0)
+ #define DW_IC_ENABLE_ABORT BIT(1)
+
+ #define DW_IC_STATUS_ACTIVITY BIT(0)
+@@ -318,7 +319,7 @@ struct dw_i2c_dev {
+ #define AMD_UCSI_INTR_EN 0xd
+
+ #define TXGBE_TX_FIFO_DEPTH 4
+-#define TXGBE_RX_FIFO_DEPTH 0
++#define TXGBE_RX_FIFO_DEPTH 1
+
+ struct i2c_dw_semaphore_callbacks {
+ int (*probe)(struct dw_i2c_dev *dev);
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index ca1035e010c722..579c668cb78a6d 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -253,6 +253,34 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+ regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
+ }
+
++/*
++ * This function waits for the controller to be idle before disabling I2C
++ * When the controller is not in the IDLE state, the MST_ACTIVITY bit
++ * (IC_STATUS[5]) is set.
++ *
++ * Values:
++ * 0x1 (ACTIVE): Controller not idle
++ * 0x0 (IDLE): Controller is idle
++ *
++ * The function is called after completing the current transfer.
++ *
++ * Returns:
++ * False when the controller is in the IDLE state.
++ * True when the controller is in the ACTIVE state.
++ */
++static bool i2c_dw_is_controller_active(struct dw_i2c_dev *dev)
++{
++ u32 status;
++
++ regmap_read(dev->map, DW_IC_STATUS, &status);
++ if (!(status & DW_IC_STATUS_MASTER_ACTIVITY))
++ return false;
++
++ return regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
++ !(status & DW_IC_STATUS_MASTER_ACTIVITY),
++ 1100, 20000) != 0;
++}
++
+ static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
+ {
+ u32 val;
+@@ -519,10 +547,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
+
+ /*
+ * Because we don't know the buffer length in the
+- * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+- * the transaction here.
++ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
++ * transaction here. Also disable the TX_EMPTY IRQ
++ * while waiting for the data length byte to avoid the
++ * bogus interrupts flood.
+ */
+- if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
++ if (flags & I2C_M_RECV_LEN) {
++ dev->status |= STATUS_WRITE_IN_PROGRESS;
++ intr_mask &= ~DW_IC_INTR_TX_EMPTY;
++ break;
++ } else if (buf_len > 0) {
+ /* more bytes to be written */
+ dev->status |= STATUS_WRITE_IN_PROGRESS;
+ break;
+@@ -558,6 +592,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+ msgs[dev->msg_read_idx].len = len;
+ msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+
++ /*
++ * Received buffer length, re-enable TX_EMPTY interrupt
++ * to resume the SMBUS transaction.
++ */
++ regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
++ DW_IC_INTR_TX_EMPTY);
++
+ return len;
+ }
+
+@@ -681,6 +722,16 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+ goto done;
+ }
+
++ /*
++ * This happens rarely (~1:500) and is hard to reproduce. Debug trace
++ * showed that IC_STATUS had value of 0x23 when STOP_DET occurred,
++ * if disable IC_ENABLE.ENABLE immediately that can result in
++ * IC_RAW_INTR_STAT.MASTER_ON_HOLD holding SCL low. Check if
++ * controller is still ACTIVE before disabling I2C.
++ */
++ if (i2c_dw_is_controller_active(dev))
++ dev_err(dev->dev, "controller active\n");
++
+ /*
+ * We must disable the adapter before returning and signaling the end
+ * of the current transfer. Otherwise the hardware might continue
+diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
+index 2e079cf20bb5b4..78e2c47e3d7da7 100644
+--- a/drivers/i2c/busses/i2c-designware-slave.c
++++ b/drivers/i2c/busses/i2c-designware-slave.c
+@@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
+
+ void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
+ {
+- dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
++ dev->functionality = I2C_FUNC_SLAVE;
+
+ dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
+ DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 1d855258a45dc3..2b8bcd121ffa5d 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -79,6 +79,7 @@
+ * Meteor Lake-P (SOC) 0x7e22 32 hard yes yes yes
+ * Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes
+ * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
++ * Birch Stream (SOC) 0x5796 32 hard yes yes yes
+ *
+ * Features supported by this driver:
+ * Software PEC no
+@@ -231,6 +232,7 @@
+ #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
++#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
+ #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
+ #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
+@@ -498,11 +500,10 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
+ /* Set block buffer mode */
+ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+
+- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
+-
+ if (read_write == I2C_SMBUS_WRITE) {
+ len = data->block[0];
+ outb_p(len, SMBHSTDAT0(priv));
++ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
+ for (i = 0; i < len; i++)
+ outb_p(data->block[i+1], SMBBLKDAT(priv));
+ }
+@@ -520,6 +521,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
+ }
+
+ data->block[0] = len;
++ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
+ for (i = 0; i < len; i++)
+ data->block[i + 1] = inb_p(SMBBLKDAT(priv));
+ }
+@@ -679,15 +681,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ return result ? priv->status : -ETIMEDOUT;
+ }
+
+- for (i = 1; i <= len; i++) {
+- if (i == len && read_write == I2C_SMBUS_READ)
+- smbcmd |= SMBHSTCNT_LAST_BYTE;
+- outb_p(smbcmd, SMBHSTCNT(priv));
+-
+- if (i == 1)
+- outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
+- SMBHSTCNT(priv));
++ if (len == 1 && read_write == I2C_SMBUS_READ)
++ smbcmd |= SMBHSTCNT_LAST_BYTE;
++ outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+
++ for (i = 1; i <= len; i++) {
+ status = i801_wait_byte_done(priv);
+ if (status)
+ return status;
+@@ -710,9 +708,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ data->block[0] = len;
+ }
+
+- /* Retrieve/store value in SMBBLKDAT */
+- if (read_write == I2C_SMBUS_READ)
++ if (read_write == I2C_SMBUS_READ) {
+ data->block[i] = inb_p(SMBBLKDAT(priv));
++ if (i == len - 1)
++ outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
++ }
++
+ if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
+ outb_p(data->block[i+1], SMBBLKDAT(priv));
+
+@@ -1044,13 +1045,14 @@ static const struct pci_device_id i801_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
++ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { 0, }
+ };
+
+ MODULE_DEVICE_TABLE(pci, i801_ids);
+
+ #if defined CONFIG_X86 && defined CONFIG_DMI
+-static unsigned char apanel_addr;
++static unsigned char apanel_addr __ro_after_init;
+
+ /* Scan the system ROM for the signature "FJKEYINF" */
+ static __init const void __iomem *bios_signature(const void __iomem *bios)
+@@ -1415,7 +1417,6 @@ static void i801_add_mux(struct i801_priv *priv)
+ lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip,
+ mux_config->gpios[i], "mux", 0);
+ gpiod_add_lookup_table(lookup);
+- priv->lookup = lookup;
+
+ /*
+ * Register the mux device, we use PLATFORM_DEVID_NONE here
+@@ -1429,7 +1430,10 @@ static void i801_add_mux(struct i801_priv *priv)
+ sizeof(struct i2c_mux_gpio_platform_data));
+ if (IS_ERR(priv->mux_pdev)) {
+ gpiod_remove_lookup_table(lookup);
++ devm_kfree(dev, lookup);
+ dev_err(dev, "Failed to register i2c-mux-gpio device\n");
++ } else {
++ priv->lookup = lookup;
+ }
+ }
+
+@@ -1750,8 +1754,15 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+
+ i801_add_tco(priv);
+
++ /*
++ * adapter.name is used by platform code to find the main I801 adapter
++ * to instantiante i2c_clients, do not change.
++ */
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+- "SMBus I801 adapter at %04lx", priv->smba);
++ "SMBus %s adapter at %04lx",
++ (priv->features & FEATURE_IDF) ? "I801 IDF" : "I801",
++ priv->smba);
++
+ err = i2c_add_adapter(&priv->adapter);
+ if (err) {
+ platform_device_unregister(priv->tco_pdev);
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index 1775a79aeba2af..0951bfdc89cfa5 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
+ ctl &= ~I2CR_MTX;
+ imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
++
++ /* flag the last byte as processed */
++ i2c_imx_slave_event(i2c_imx,
++ I2C_SLAVE_READ_PROCESSED, &value);
++
+ i2c_imx_slave_finish_op(i2c_imx);
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
+index 1dc1ceaa44439f..8c16c469557475 100644
+--- a/drivers/i2c/busses/i2c-isch.c
++++ b/drivers/i2c/busses/i2c-isch.c
+@@ -99,8 +99,7 @@ static int sch_transaction(void)
+ if (retries > MAX_RETRIES) {
+ dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
+ result = -ETIMEDOUT;
+- }
+- if (temp & 0x04) {
++ } else if (temp & 0x04) {
+ result = -EIO;
+ dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
+ "locked until next hard reset. (sorry!)\n");
+diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
+index 041a76f71a49cc..350ccfbe86340e 100644
+--- a/drivers/i2c/busses/i2c-ocores.c
++++ b/drivers/i2c/busses/i2c-ocores.c
+@@ -442,8 +442,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
+ oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
+
+ /* Init the device */
+- oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
+ oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN);
++ oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
+
+ return 0;
+ }
+@@ -771,8 +771,8 @@ static int ocores_i2c_resume(struct device *dev)
+ return ocores_init(dev, i2c);
+ }
+
+-static DEFINE_SIMPLE_DEV_PM_OPS(ocores_i2c_pm,
+- ocores_i2c_suspend, ocores_i2c_resume);
++static DEFINE_NOIRQ_DEV_PM_OPS(ocores_i2c_pm,
++ ocores_i2c_suspend, ocores_i2c_resume);
+
+ static struct platform_driver ocores_i2c_driver = {
+ .probe = ocores_i2c_probe,
+diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
+index 7d54a9f34c74b5..bd8becbdeeb28f 100644
+--- a/drivers/i2c/busses/i2c-pasemi-core.c
++++ b/drivers/i2c/busses/i2c-pasemi-core.c
+@@ -369,6 +369,7 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus)
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(pasemi_i2c_common_probe);
+
+ irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
+ {
+@@ -378,3 +379,8 @@ irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
+ complete(&smbus->irq_completion);
+ return IRQ_HANDLED;
+ }
++EXPORT_SYMBOL_GPL(pasemi_irq_handler);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Olof Johansson <olof@lixom.net>");
++MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver");
+diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
+index a12525b3186bcf..f448505d546827 100644
+--- a/drivers/i2c/busses/i2c-pnx.c
++++ b/drivers/i2c/busses/i2c-pnx.c
+@@ -15,7 +15,6 @@
+ #include <linux/ioport.h>
+ #include <linux/delay.h>
+ #include <linux/i2c.h>
+-#include <linux/timer.h>
+ #include <linux/completion.h>
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+@@ -32,7 +31,6 @@ struct i2c_pnx_mif {
+ int ret; /* Return value */
+ int mode; /* Interface mode */
+ struct completion complete; /* I/O completion */
+- struct timer_list timer; /* Timeout */
+ u8 * buf; /* Data buffer */
+ int len; /* Length of data buffer */
+ int order; /* RX Bytes to order via TX */
+@@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data)
+ return (timeout <= 0);
+ }
+
+-static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
+-{
+- struct timer_list *timer = &alg_data->mif.timer;
+- unsigned long expires = msecs_to_jiffies(alg_data->timeout);
+-
+- if (expires <= 1)
+- expires = 2;
+-
+- del_timer_sync(timer);
+-
+- dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n",
+- jiffies, expires);
+-
+- timer->expires = jiffies + expires;
+-
+- add_timer(timer);
+-}
+-
+ /**
+ * i2c_pnx_start - start a device
+ * @slave_addr: slave address
+@@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
+ ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+ I2C_REG_CTL(alg_data));
+
+- del_timer_sync(&alg_data->mif.timer);
+-
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): Waking up xfer routine.\n",
+ __func__);
+@@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
+ ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+ I2C_REG_CTL(alg_data));
+
+- /* Stop timer. */
+- del_timer_sync(&alg_data->mif.timer);
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): Waking up xfer routine after zero-xfer.\n",
+ __func__);
+@@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
+ mcntrl_drmie | mcntrl_daie);
+ iowrite32(ctl, I2C_REG_CTL(alg_data));
+
+- /* Kill timer. */
+- del_timer_sync(&alg_data->mif.timer);
+ complete(&alg_data->mif.complete);
+ }
+ }
+@@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ mcntrl_drmie);
+ iowrite32(ctl, I2C_REG_CTL(alg_data));
+
+- /* Stop timer, to prevent timeout. */
+- del_timer_sync(&alg_data->mif.timer);
+ complete(&alg_data->mif.complete);
+ } else if (stat & mstatus_nai) {
+ /* Slave did not acknowledge, generate a STOP */
+@@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ /* Our return value. */
+ alg_data->mif.ret = -EIO;
+
+- /* Stop timer, to prevent timeout. */
+- del_timer_sync(&alg_data->mif.timer);
+ complete(&alg_data->mif.complete);
+ } else {
+ /*
+@@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ return IRQ_HANDLED;
+ }
+
+-static void i2c_pnx_timeout(struct timer_list *t)
++static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data)
+ {
+- struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
+ u32 ctl;
+
+ dev_err(&alg_data->adapter.dev,
+@@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t)
+ iowrite32(ctl, I2C_REG_CTL(alg_data));
+ wait_reset(alg_data);
+ alg_data->mif.ret = -EIO;
+- complete(&alg_data->mif.complete);
+ }
+
+ static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
+@@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ struct i2c_msg *pmsg;
+ int rc = 0, completed = 0, i;
+ struct i2c_pnx_algo_data *alg_data = adap->algo_data;
++ unsigned long time_left;
+ u32 stat;
+
+ dev_dbg(&alg_data->adapter.dev,
+@@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
+ __func__, alg_data->mif.mode, alg_data->mif.len);
+
+- i2c_pnx_arm_timer(alg_data);
+
+ /* initialize the completion var */
+ init_completion(&alg_data->mif.complete);
+@@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ break;
+
+ /* Wait for completion */
+- wait_for_completion(&alg_data->mif.complete);
++ time_left = wait_for_completion_timeout(&alg_data->mif.complete,
++ alg_data->timeout);
++ if (time_left == 0)
++ i2c_pnx_timeout(alg_data);
+
+ if (!(rc = alg_data->mif.ret))
+ completed++;
+@@ -653,7 +624,10 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+ alg_data->adapter.algo_data = alg_data;
+ alg_data->adapter.nr = pdev->id;
+
+- alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT;
++ alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT);
++ if (alg_data->timeout <= 1)
++ alg_data->timeout = 2;
++
+ #ifdef CONFIG_OF
+ alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
+ if (pdev->dev.of_node) {
+@@ -673,8 +647,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+ if (IS_ERR(alg_data->clk))
+ return PTR_ERR(alg_data->clk);
+
+- timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
+-
+ snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
+ "%s", pdev->name);
+
+diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
+index 29be05af826b05..3bd406470940fb 100644
+--- a/drivers/i2c/busses/i2c-pxa.c
++++ b/drivers/i2c/busses/i2c-pxa.c
+@@ -264,6 +264,9 @@ struct pxa_i2c {
+ u32 hs_mask;
+
+ struct i2c_bus_recovery_info recovery;
++ struct pinctrl *pinctrl;
++ struct pinctrl_state *pinctrl_default;
++ struct pinctrl_state *pinctrl_recovery;
+ };
+
+ #define _IBMR(i2c) ((i2c)->reg_ibmr)
+@@ -1300,12 +1303,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap)
+ */
+ gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS);
+ gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS);
++
++ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery));
+ }
+
+ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ {
+ struct pxa_i2c *i2c = adap->algo_data;
+- struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ u32 isr;
+
+ /*
+@@ -1319,7 +1323,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ i2c_pxa_do_reset(i2c);
+ }
+
+- WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default));
++ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default));
+
+ dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n",
+ readl(_IBMR(i2c)), readl(_ISR(i2c)));
+@@ -1341,20 +1345,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c)
+ if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
+ return 0;
+
+- bri->pinctrl = devm_pinctrl_get(dev);
+- if (PTR_ERR(bri->pinctrl) == -ENODEV) {
+- bri->pinctrl = NULL;
++ i2c->pinctrl = devm_pinctrl_get(dev);
++ if (PTR_ERR(i2c->pinctrl) == -ENODEV)
++ i2c->pinctrl = NULL;
++ if (IS_ERR(i2c->pinctrl))
++ return PTR_ERR(i2c->pinctrl);
++
++ if (!i2c->pinctrl)
++ return 0;
++
++ i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl,
++ PINCTRL_STATE_DEFAULT);
++ i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery");
++
++ if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) {
++ dev_info(dev, "missing pinmux recovery information: %ld %ld\n",
++ PTR_ERR(i2c->pinctrl_default),
++ PTR_ERR(i2c->pinctrl_recovery));
++ return 0;
++ }
++
++ /*
++ * Claiming GPIOs can influence the pinmux state, and may glitch the
++ * I2C bus. Do this carefully.
++ */
++ bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
++ if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER))
++ return -EPROBE_DEFER;
++ if (IS_ERR(bri->scl_gpiod)) {
++ dev_info(dev, "missing scl gpio recovery information: %pe\n",
++ bri->scl_gpiod);
++ return 0;
++ }
++
++ /*
++ * We have SCL. Pull SCL low and wait a bit so that SDA glitches
++ * have no effect.
++ */
++ gpiod_direction_output(bri->scl_gpiod, 0);
++ udelay(10);
++ bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN);
++
++ /* Wait a bit in case of a SDA glitch, and then release SCL. */
++ udelay(10);
++ gpiod_direction_output(bri->scl_gpiod, 1);
++
++ if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER))
++ return -EPROBE_DEFER;
++
++ if (IS_ERR(bri->sda_gpiod)) {
++ dev_info(dev, "missing sda gpio recovery information: %pe\n",
++ bri->sda_gpiod);
+ return 0;
+ }
+- if (IS_ERR(bri->pinctrl))
+- return PTR_ERR(bri->pinctrl);
+
+ bri->prepare_recovery = i2c_pxa_prepare_recovery;
+ bri->unprepare_recovery = i2c_pxa_unprepare_recovery;
++ bri->recover_bus = i2c_generic_scl_recovery;
+
+ i2c->adap.bus_recovery_info = bri;
+
+- return 0;
++ /*
++ * Claiming GPIOs can change the pinmux state, which confuses the
++ * pinctrl since pinctrl's idea of the current setting is unaffected
++ * by the pinmux change caused by claiming the GPIO. Work around that
++ * by switching pinctrl to the GPIO state here. We do it this way to
++ * avoid glitching the I2C bus.
++ */
++ pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery);
++
++ return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default);
+ }
+
+ static int i2c_pxa_probe(struct platform_device *dev)
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index 229353e96e0954..350f7827fbacaa 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -613,20 +613,20 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
+
+ peripheral.addr = msgs[i].addr;
+
++ ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
++ &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
++ if (ret)
++ goto err;
++
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
+ &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
+ if (ret)
+ goto err;
+- }
+
+- ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
+- &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
+- if (ret)
+- goto err;
+-
+- if (msgs[i].flags & I2C_M_RD)
+ dma_async_issue_pending(gi2c->rx_c);
++ }
++
+ dma_async_issue_pending(gi2c->tx_c);
+
+ timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+@@ -819,15 +819,13 @@ static int geni_i2c_probe(struct platform_device *pdev)
+ init_completion(&gi2c->done);
+ spin_lock_init(&gi2c->lock);
+ platform_set_drvdata(pdev, gi2c);
+- ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, 0,
++ ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN,
+ dev_name(dev), gi2c);
+ if (ret) {
+ dev_err(dev, "Request_irq failed:%d: err:%d\n",
+ gi2c->irq, ret);
+ return ret;
+ }
+- /* Disable the interrupt so that the system can enter low-power mode */
+- disable_irq(gi2c->irq);
+ i2c_set_adapdata(&gi2c->adap, gi2c);
+ gi2c->adap.dev.parent = dev;
+ gi2c->adap.dev.of_node = dev->of_node;
+@@ -857,6 +855,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
+ ret = geni_se_resources_on(&gi2c->se);
+ if (ret) {
+ dev_err(dev, "Error turning on resources %d\n", ret);
++ clk_disable_unprepare(gi2c->core_clk);
+ return ret;
+ }
+ proto = geni_se_read_proto(&gi2c->se);
+@@ -876,8 +875,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
+ /* FIFO is disabled, so we can only use GPI DMA */
+ gi2c->gpi_mode = true;
+ ret = setup_gpi_dma(gi2c);
+- if (ret)
++ if (ret) {
++ geni_se_resources_off(&gi2c->se);
++ clk_disable_unprepare(gi2c->core_clk);
+ return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
++ }
+
+ dev_dbg(dev, "Using GPI DMA mode for I2C\n");
+ } else {
+@@ -890,6 +892,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
+
+ if (!tx_depth) {
+ dev_err(dev, "Invalid TX FIFO depth\n");
++ geni_se_resources_off(&gi2c->se);
++ clk_disable_unprepare(gi2c->core_clk);
+ return -EINVAL;
+ }
+
+@@ -981,12 +985,17 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
+ return ret;
+
+ ret = clk_prepare_enable(gi2c->core_clk);
+- if (ret)
++ if (ret) {
++ geni_icc_disable(&gi2c->se);
+ return ret;
++ }
+
+ ret = geni_se_resources_on(&gi2c->se);
+- if (ret)
++ if (ret) {
++ clk_disable_unprepare(gi2c->core_clk);
++ geni_icc_disable(&gi2c->se);
+ return ret;
++ }
+
+ enable_irq(gi2c->irq);
+ gi2c->suspended = 0;
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index a32a93f9a60d03..84fdd3f5cc8445 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -114,6 +114,7 @@ enum rcar_i2c_type {
+ I2C_RCAR_GEN1,
+ I2C_RCAR_GEN2,
+ I2C_RCAR_GEN3,
++ I2C_RCAR_GEN4,
+ };
+
+ struct rcar_i2c_priv {
+@@ -223,6 +224,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
+
+ }
+
++static void rcar_i2c_reset_slave(struct rcar_i2c_priv *priv)
++{
++ rcar_i2c_write(priv, ICSIER, 0);
++ rcar_i2c_write(priv, ICSSR, 0);
++ rcar_i2c_write(priv, ICSCR, SDBS);
++ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
++}
++
+ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
+ {
+ int ret;
+@@ -386,8 +395,8 @@ static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate)
+ dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
+ sg_dma_len(&priv->sg), priv->dma_direction);
+
+- /* Gen3 can only do one RXDMA per transfer and we just completed it */
+- if (priv->devtype == I2C_RCAR_GEN3 &&
++ /* Gen3+ can only do one RXDMA per transfer and we just completed it */
++ if (priv->devtype >= I2C_RCAR_GEN3 &&
+ priv->dma_direction == DMA_FROM_DEVICE)
+ priv->flags |= ID_P_NO_RXDMA;
+
+@@ -815,6 +824,10 @@ static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+ {
+ int ret;
+
++ /* Don't reset if a slave instance is currently running */
++ if (priv->slave)
++ return -EISCONN;
++
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ return ret;
+@@ -841,14 +854,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
+ if (ret < 0)
+ goto out;
+
+- /* Gen3 needs a reset before allowing RXDMA once */
+- if (priv->devtype == I2C_RCAR_GEN3) {
+- priv->flags |= ID_P_NO_RXDMA;
+- if (!IS_ERR(priv->rstc)) {
+- ret = rcar_i2c_do_reset(priv);
+- if (ret == 0)
+- priv->flags &= ~ID_P_NO_RXDMA;
+- }
++ /* Gen3+ needs a reset. That also allows RXDMA once */
++ if (priv->devtype >= I2C_RCAR_GEN3) {
++ ret = rcar_i2c_do_reset(priv);
++ if (ret)
++ goto out;
++ priv->flags &= ~ID_P_NO_RXDMA;
+ }
+
+ rcar_i2c_init(priv);
+@@ -975,11 +986,8 @@ static int rcar_unreg_slave(struct i2c_client *slave)
+
+ /* ensure no irq is running before clearing ptr */
+ disable_irq(priv->irq);
+- rcar_i2c_write(priv, ICSIER, 0);
+- rcar_i2c_write(priv, ICSSR, 0);
++ rcar_i2c_reset_slave(priv);
+ enable_irq(priv->irq);
+- rcar_i2c_write(priv, ICSCR, SDBS);
+- rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+
+ priv->slave = NULL;
+
+@@ -1032,7 +1040,7 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
+ { .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
+ { .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
+ { .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
+- { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN3 },
++ { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN4 },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
+@@ -1092,22 +1100,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ goto out_pm_disable;
+ }
+
+- rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
++ /* Bring hardware to known state */
++ rcar_i2c_init(priv);
++ rcar_i2c_reset_slave(priv);
+
+ if (priv->devtype < I2C_RCAR_GEN3) {
+ irqflags |= IRQF_NO_THREAD;
+ irqhandler = rcar_i2c_gen2_irq;
+ }
+
+- if (priv->devtype == I2C_RCAR_GEN3) {
+- priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+- if (!IS_ERR(priv->rstc)) {
+- ret = reset_control_status(priv->rstc);
+- if (ret < 0)
+- priv->rstc = ERR_PTR(-ENOTSUPP);
+- }
+- }
+-
+ /* Stay always active when multi-master to keep arbitration working */
+ if (of_property_read_bool(dev->of_node, "multi-master"))
+ priv->flags |= ID_P_PM_BLOCKED;
+@@ -1117,6 +1118,22 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ if (of_property_read_bool(dev->of_node, "smbus"))
+ priv->flags |= ID_P_HOST_NOTIFY;
+
++ /* R-Car Gen3+ needs a reset before every transfer */
++ if (priv->devtype >= I2C_RCAR_GEN3) {
++ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
++ if (IS_ERR(priv->rstc)) {
++ ret = PTR_ERR(priv->rstc);
++ goto out_pm_put;
++ }
++
++ ret = reset_control_status(priv->rstc);
++ if (ret < 0)
++ goto out_pm_put;
++
++ /* hard reset disturbs HostNotify local target, so disable it */
++ priv->flags &= ~ID_P_HOST_NOTIFY;
++ }
++
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto out_pm_put;
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index f0ee8871d5ae1b..e43ff483c56ece 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -313,7 +313,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
+ * frequency with only 62 clock ticks max (31 high, 31 low).
+ * Aim for a duty of 60% LOW, 40% HIGH.
+ */
+- total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
++ total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
+
+ for (cks = 0; cks < 7; cks++) {
+ /*
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index a044ca0c35a193..086fdf262e7b60 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -178,6 +178,7 @@ struct rk3x_i2c_soc_data {
+ * @clk: function clk for rk3399 or function & Bus clks for others
+ * @pclk: Bus clk for rk3399
+ * @clk_rate_nb: i2c clk rate change notify
++ * @irq: irq number
+ * @t: I2C known timing information
+ * @lock: spinlock for the i2c bus
+ * @wait: the waitqueue to wait for i2c transfer
+@@ -200,6 +201,7 @@ struct rk3x_i2c {
+ struct clk *clk;
+ struct clk *pclk;
+ struct notifier_block clk_rate_nb;
++ int irq;
+
+ /* Settings */
+ struct i2c_timings t;
+@@ -1087,13 +1089,18 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
+
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+- rk3x_i2c_start(i2c);
+-
+ if (!polling) {
++ rk3x_i2c_start(i2c);
++
+ timeout = wait_event_timeout(i2c->wait, !i2c->busy,
+ msecs_to_jiffies(WAIT_TIMEOUT));
+ } else {
++ disable_irq(i2c->irq);
++ rk3x_i2c_start(i2c);
++
+ timeout = rk3x_i2c_wait_xfer_poll(i2c);
++
++ enable_irq(i2c->irq);
+ }
+
+ spin_lock_irqsave(&i2c->lock, flags);
+@@ -1288,8 +1295,12 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
+- /* 27+i: write mask, 11+i: value */
+- value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
++ /* rv1126 i2c2 uses non-sequential write mask 20, value 4 */
++ if (i2c->soc_data == &rv1126_soc_data && bus_nr == 2)
++ value = BIT(20) | BIT(4);
++ else
++ /* 27+i: write mask, 11+i: value */
++ value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
+
+ ret = regmap_write(grf, i2c->soc_data->grf_offset, value);
+ if (ret != 0) {
+@@ -1310,6 +1321,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ i2c->irq = irq;
++
+ platform_set_drvdata(pdev, i2c);
+
+ if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index 127eb3805facb5..c324cb3c97e2bd 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -216,8 +216,17 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
+ int tries;
+
+ for (tries = 50; tries; --tries) {
+- if (readl(i2c->regs + S3C2410_IICCON)
+- & S3C2410_IICCON_IRQPEND) {
++ unsigned long tmp = readl(i2c->regs + S3C2410_IICCON);
++
++ if (!(tmp & S3C2410_IICCON_ACKEN)) {
++ /*
++ * Wait a bit for the bus to stabilize,
++ * delay estimated experimentally.
++ */
++ usleep_range(100, 200);
++ return true;
++ }
++ if (tmp & S3C2410_IICCON_IRQPEND) {
+ if (!(readl(i2c->regs + S3C2410_IICSTAT)
+ & S3C2410_IICSTAT_LASTBIT))
+ return true;
+@@ -270,16 +279,6 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
+
+ stat |= S3C2410_IICSTAT_START;
+ writel(stat, i2c->regs + S3C2410_IICSTAT);
+-
+- if (i2c->quirks & QUIRK_POLL) {
+- while ((i2c->msg_num != 0) && is_ack(i2c)) {
+- i2c_s3c_irq_nextbyte(i2c, stat);
+- stat = readl(i2c->regs + S3C2410_IICSTAT);
+-
+- if (stat & S3C2410_IICSTAT_ARBITR)
+- dev_err(i2c->dev, "deal with arbitration loss\n");
+- }
+- }
+ }
+
+ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
+@@ -686,7 +685,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
+ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
+ struct i2c_msg *msgs, int num)
+ {
+- unsigned long timeout;
++ unsigned long timeout = 0;
+ int ret;
+
+ ret = s3c24xx_i2c_set_master(i2c);
+@@ -706,16 +705,19 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
+ s3c24xx_i2c_message_start(i2c, msgs);
+
+ if (i2c->quirks & QUIRK_POLL) {
+- ret = i2c->msg_idx;
++ while ((i2c->msg_num != 0) && is_ack(i2c)) {
++ unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT);
+
+- if (ret != num)
+- dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
++ i2c_s3c_irq_nextbyte(i2c, stat);
+
+- goto out;
++ stat = readl(i2c->regs + S3C2410_IICSTAT);
++ if (stat & S3C2410_IICSTAT_ARBITR)
++ dev_err(i2c->dev, "deal with arbitration loss\n");
++ }
++ } else {
++ timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+ }
+
+- timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
+-
+ ret = i2c->msg_idx;
+
+ /*
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index 0d3c9a041b5611..b4f10ff31102b6 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -357,6 +357,7 @@ struct stm32f7_i2c_dev {
+ u32 dnf_dt;
+ u32 dnf;
+ struct stm32f7_i2c_alert *alert;
++ bool atomic;
+ };
+
+ /*
+@@ -915,7 +916,8 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
+
+ /* Configure DMA or enable RX/TX interrupt */
+ i2c_dev->use_dma = false;
+- if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN) {
++ if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN
++ && !i2c_dev->atomic) {
+ ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma,
+ msg->flags & I2C_M_RD,
+ f7_msg->count, f7_msg->buf,
+@@ -939,6 +941,9 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
+ cr1 |= STM32F7_I2C_CR1_TXDMAEN;
+ }
+
++ if (i2c_dev->atomic)
++ cr1 &= ~STM32F7_I2C_ALL_IRQ_MASK; /* Disable all interrupts */
++
+ /* Configure Start/Repeated Start */
+ cr2 |= STM32F7_I2C_CR2_START;
+
+@@ -1673,7 +1678,22 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+-static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
++static int stm32f7_i2c_wait_polling(struct stm32f7_i2c_dev *i2c_dev)
++{
++ ktime_t timeout = ktime_add_ms(ktime_get(), i2c_dev->adap.timeout);
++
++ while (ktime_compare(ktime_get(), timeout) < 0) {
++ udelay(5);
++ stm32f7_i2c_isr_event(0, i2c_dev);
++
++ if (completion_done(&i2c_dev->complete))
++ return 1;
++ }
++
++ return 0;
++}
++
++static int stm32f7_i2c_xfer_core(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+ {
+ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+@@ -1697,8 +1717,12 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+
+ stm32f7_i2c_xfer_msg(i2c_dev, msgs);
+
+- time_left = wait_for_completion_timeout(&i2c_dev->complete,
+- i2c_dev->adap.timeout);
++ if (!i2c_dev->atomic)
++ time_left = wait_for_completion_timeout(&i2c_dev->complete,
++ i2c_dev->adap.timeout);
++ else
++ time_left = stm32f7_i2c_wait_polling(i2c_dev);
++
+ ret = f7_msg->result;
+ if (ret) {
+ if (i2c_dev->use_dma)
+@@ -1730,6 +1754,24 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+ return (ret < 0) ? ret : num;
+ }
+
++static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
++ struct i2c_msg msgs[], int num)
++{
++ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
++
++ i2c_dev->atomic = false;
++ return stm32f7_i2c_xfer_core(i2c_adap, msgs, num);
++}
++
++static int stm32f7_i2c_xfer_atomic(struct i2c_adapter *i2c_adap,
++ struct i2c_msg msgs[], int num)
++{
++ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
++
++ i2c_dev->atomic = true;
++ return stm32f7_i2c_xfer_core(i2c_adap, msgs, num);
++}
++
+ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size,
+@@ -2098,6 +2140,7 @@ static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
+
+ static const struct i2c_algorithm stm32f7_i2c_algo = {
+ .master_xfer = stm32f7_i2c_xfer,
++ .master_xfer_atomic = stm32f7_i2c_xfer_atomic,
+ .smbus_xfer = stm32f7_i2c_smbus_xfer,
+ .functionality = stm32f7_i2c_func,
+ .reg_slave = stm32f7_i2c_reg_slave,
+@@ -2351,7 +2394,7 @@ static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev)
+ struct stm32f7_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev))
+- clk_disable_unprepare(i2c_dev->clk);
++ clk_disable(i2c_dev->clk);
+
+ return 0;
+ }
+@@ -2362,9 +2405,9 @@ static int __maybe_unused stm32f7_i2c_runtime_resume(struct device *dev)
+ int ret;
+
+ if (!stm32f7_i2c_is_slave_registered(i2c_dev)) {
+- ret = clk_prepare_enable(i2c_dev->clk);
++ ret = clk_enable(i2c_dev->clk);
+ if (ret) {
+- dev_err(dev, "failed to prepare_enable clock\n");
++ dev_err(dev, "failed to enable clock\n");
+ return ret;
+ }
+ }
+diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+index fa6020dced595d..85e035e7a1d75e 100644
+--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
++++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+@@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ if (clk_freq == 0) {
++ dev_err(dev, "clock-frequency is set to 0 in DT\n");
++ return -EINVAL;
++ }
++
+ if (of_get_child_count(np) > 1) {
+ dev_err(dev, "P2WI only supports one slave device\n");
+ return -EINVAL;
+diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
+index bbea521b05dda7..9bb69a8ab6582e 100644
+--- a/drivers/i2c/busses/i2c-synquacer.c
++++ b/drivers/i2c/busses/i2c-synquacer.c
+@@ -138,7 +138,6 @@ struct synquacer_i2c {
+ int irq;
+ struct device *dev;
+ void __iomem *base;
+- struct clk *pclk;
+ u32 pclkrate;
+ u32 speed_khz;
+ u32 timeout_ms;
+@@ -535,6 +534,7 @@ static const struct i2c_adapter synquacer_i2c_ops = {
+ static int synquacer_i2c_probe(struct platform_device *pdev)
+ {
+ struct synquacer_i2c *i2c;
++ struct clk *pclk;
+ u32 bus_speed;
+ int ret;
+
+@@ -550,17 +550,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
+ device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
+ &i2c->pclkrate);
+
+- i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
+- if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
+- if (!IS_ERR_OR_NULL(i2c->pclk)) {
+- dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
++ pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk");
++ if (IS_ERR(pclk))
++ return dev_err_probe(&pdev->dev, PTR_ERR(pclk),
++ "failed to get and enable clock\n");
+
+- ret = clk_prepare_enable(i2c->pclk);
+- if (ret)
+- return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n");
+- i2c->pclkrate = clk_get_rate(i2c->pclk);
+- }
++ if (pclk)
++ i2c->pclkrate = clk_get_rate(pclk);
+
+ if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
+ i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
+@@ -615,8 +611,6 @@ static void synquacer_i2c_remove(struct platform_device *pdev)
+ struct synquacer_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adapter);
+- if (!IS_ERR(i2c->pclk))
+- clk_disable_unprepare(i2c->pclk);
+ };
+
+ static const struct of_device_id synquacer_i2c_dt_ids[] __maybe_unused = {
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 920d5a8cbf4c75..91be04b534fe61 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -1804,9 +1804,9 @@ static int tegra_i2c_probe(struct platform_device *pdev)
+ * domain.
+ *
+ * VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't
+- * be used for atomic transfers.
++ * be used for atomic transfers. ACPI device is not IRQ safe also.
+ */
+- if (!IS_VI(i2c_dev))
++ if (!IS_VI(i2c_dev) && !has_acpi_companion(i2c_dev->dev))
+ pm_runtime_irq_safe(i2c_dev->dev);
+
+ pm_runtime_enable(i2c_dev->dev);
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index 71391b590adaeb..1d68177241a6b3 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -772,14 +772,17 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
+ goto out;
+ }
+
+- xiic_fill_tx_fifo(i2c);
+-
+- /* current message sent and there is space in the fifo */
+- if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
++ if (xiic_tx_space(i2c)) {
++ xiic_fill_tx_fifo(i2c);
++ } else {
++ /* current message fully written */
+ dev_dbg(i2c->adap.dev.parent,
+ "%s end of message sent, nmsgs: %d\n",
+ __func__, i2c->nmsgs);
+- if (i2c->nmsgs > 1) {
++ /* Don't move onto the next message until the TX FIFO empties,
++ * to ensure that a NAK is not missed.
++ */
++ if (i2c->nmsgs > 1 && (pend & XIIC_INTR_TX_EMPTY_MASK)) {
+ i2c->nmsgs--;
+ i2c->tx_msg++;
+ xfer_more = 1;
+@@ -790,11 +793,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
+ "%s Got TX IRQ but no more to do...\n",
+ __func__);
+ }
+- } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
+- /* current frame is sent and is last,
+- * make sure to disable tx half
+- */
+- xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
++ }
+ }
+
+ if (pend & XIIC_INTR_BNB_MASK) {
+@@ -844,23 +843,11 @@ static int xiic_bus_busy(struct xiic_i2c *i2c)
+ return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
+ }
+
+-static int xiic_busy(struct xiic_i2c *i2c)
++static int xiic_wait_not_busy(struct xiic_i2c *i2c)
+ {
+ int tries = 3;
+ int err;
+
+- if (i2c->tx_msg || i2c->rx_msg)
+- return -EBUSY;
+-
+- /* In single master mode bus can only be busy, when in use by this
+- * driver. If the register indicates bus being busy for some reason we
+- * should ignore it, since bus will never be released and i2c will be
+- * stuck forever.
+- */
+- if (i2c->singlemaster) {
+- return 0;
+- }
+-
+ /* for instance if previous transfer was terminated due to TX error
+ * it might be that the bus is on it's way to become available
+ * give it at most 3 ms to wake
+@@ -1104,9 +1091,35 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
+
+ mutex_lock(&i2c->lock);
+
+- ret = xiic_busy(i2c);
+- if (ret)
++ if (i2c->tx_msg || i2c->rx_msg) {
++ dev_err(i2c->adap.dev.parent,
++ "cannot start a transfer while busy\n");
++ ret = -EBUSY;
+ goto out;
++ }
++
++ /* In single master mode bus can only be busy, when in use by this
++ * driver. If the register indicates bus being busy for some reason we
++ * should ignore it, since bus will never be released and i2c will be
++ * stuck forever.
++ */
++ if (!i2c->singlemaster) {
++ ret = xiic_wait_not_busy(i2c);
++ if (ret) {
++ /* If the bus is stuck in a busy state, such as due to spurious low
++ * pulses on the bus causing a false start condition to be detected,
++ * then try to recover by re-initializing the controller and check
++ * again if the bus is still busy.
++ */
++ dev_warn(i2c->adap.dev.parent, "I2C bus busy timeout, reinitializing\n");
++ ret = xiic_reinit(i2c);
++ if (ret)
++ goto out;
++ ret = xiic_wait_not_busy(i2c);
++ if (ret)
++ goto out;
++ }
++ }
+
+ i2c->tx_msg = msgs;
+ i2c->rx_msg = NULL;
+@@ -1164,10 +1177,8 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ return err;
+
+ err = xiic_start_xfer(i2c, msgs, num);
+- if (err < 0) {
+- dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
++ if (err < 0)
+ goto out;
+- }
+
+ err = wait_for_completion_timeout(&i2c->completion, XIIC_XFER_TIMEOUT);
+ mutex_lock(&i2c->lock);
+@@ -1326,8 +1337,8 @@ static int xiic_i2c_probe(struct platform_device *pdev)
+ return 0;
+
+ err_pm_disable:
+- pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
++ pm_runtime_set_suspended(&pdev->dev);
+
+ return ret;
+ }
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index d6037a32866905..14ae0cfc325efb 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -445,6 +445,11 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
+ return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev));
+ }
+
++static struct i2c_adapter *i2c_acpi_find_adapter_by_adev(struct acpi_device *adev)
++{
++ return i2c_find_adapter_by_fwnode(acpi_fwnode_handle(adev));
++}
++
+ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
+ void *arg)
+ {
+@@ -471,11 +476,17 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
+ break;
+
+ client = i2c_acpi_find_client_by_adev(adev);
+- if (!client)
+- break;
++ if (client) {
++ i2c_unregister_device(client);
++ put_device(&client->dev);
++ }
++
++ adapter = i2c_acpi_find_adapter_by_adev(adev);
++ if (adapter) {
++ acpi_unbind_one(&adapter->dev);
++ put_device(&adapter->dev);
++ }
+
+- i2c_unregister_device(client);
+- put_device(&client->dev);
+ break;
+ }
+
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 60746652fd5255..943f0021d6a2c1 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -16,6 +16,7 @@
+ #include <linux/acpi.h>
+ #include <linux/clk/clk-conf.h>
+ #include <linux/completion.h>
++#include <linux/debugfs.h>
+ #include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/errno.h>
+@@ -67,6 +68,8 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
+ static DEFINE_STATIC_KEY_FALSE(i2c_trace_msg_key);
+ static bool is_registered;
+
++static struct dentry *i2c_debugfs_root;
++
+ int i2c_transfer_trace_reg(void)
+ {
+ static_branch_inc(&i2c_trace_msg_key);
+@@ -912,6 +915,27 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ return 0;
+ }
+
++/*
++ * Serialize device instantiation in case it can be instantiated explicitly
++ * and by auto-detection
++ */
++static int i2c_lock_addr(struct i2c_adapter *adap, unsigned short addr,
++ unsigned short flags)
++{
++ if (!(flags & I2C_CLIENT_TEN) &&
++ test_and_set_bit(addr, adap->addrs_in_instantiation))
++ return -EBUSY;
++
++ return 0;
++}
++
++static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr,
++ unsigned short flags)
++{
++ if (!(flags & I2C_CLIENT_TEN))
++ clear_bit(addr, adap->addrs_in_instantiation);
++}
++
+ /**
+ * i2c_new_client_device - instantiate an i2c device
+ * @adap: the adapter managing the device
+@@ -931,8 +955,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ struct i2c_client *
+ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+ {
+- struct i2c_client *client;
+- int status;
++ struct i2c_client *client;
++ bool need_put = false;
++ int status;
+
+ client = kzalloc(sizeof *client, GFP_KERNEL);
+ if (!client)
+@@ -958,6 +983,10 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ goto out_err_silent;
+ }
+
++ status = i2c_lock_addr(adap, client->addr, client->flags);
++ if (status)
++ goto out_err_silent;
++
+ /* Check for address business */
+ status = i2c_check_addr_busy(adap, i2c_encode_flags_to_addr(client));
+ if (status)
+@@ -970,7 +999,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ client->dev.fwnode = info->fwnode;
+
+ device_enable_async_suspend(&client->dev);
+- i2c_dev_set_name(adap, client, info);
+
+ if (info->swnode) {
+ status = device_add_software_node(&client->dev, info->swnode);
+@@ -982,6 +1010,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ }
+ }
+
++ i2c_dev_set_name(adap, client, info);
+ status = device_register(&client->dev);
+ if (status)
+ goto out_remove_swnode;
+@@ -989,18 +1018,25 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
+ client->name, dev_name(&client->dev));
+
++ i2c_unlock_addr(adap, client->addr, client->flags);
++
+ return client;
+
+ out_remove_swnode:
+ device_remove_software_node(&client->dev);
++ need_put = true;
+ out_err_put_of_node:
+ of_node_put(info->of_node);
+ out_err:
+ dev_err(&adap->dev,
+ "Failed to register i2c client %s at 0x%02x (%d)\n",
+ client->name, client->addr, status);
++ i2c_unlock_addr(adap, client->addr, client->flags);
+ out_err_silent:
+- kfree(client);
++ if (need_put)
++ put_device(&client->dev);
++ else
++ kfree(client);
+ return ERR_PTR(status);
+ }
+ EXPORT_SYMBOL_GPL(i2c_new_client_device);
+@@ -1059,6 +1095,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode);
+
+ static const struct i2c_device_id dummy_id[] = {
+ { "dummy", 0 },
++ { "smbus_host_notify", 0 },
+ { },
+ };
+
+@@ -1517,6 +1554,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
+ goto out_list;
+ }
+
++ adap->debugfs = debugfs_create_dir(dev_name(&adap->dev), i2c_debugfs_root);
++
+ res = i2c_setup_smbus_alert(adap);
+ if (res)
+ goto out_reg;
+@@ -1556,6 +1595,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
+ return 0;
+
+ out_reg:
++ debugfs_remove_recursive(adap->debugfs);
+ init_completion(&adap->dev_released);
+ device_unregister(&adap->dev);
+ wait_for_completion(&adap->dev_released);
+@@ -1757,6 +1797,8 @@ void i2c_del_adapter(struct i2c_adapter *adap)
+
+ i2c_host_notify_irq_teardown(adap);
+
++ debugfs_remove_recursive(adap->debugfs);
++
+ /* wait until all references to the device are gone
+ *
+ * FIXME: This is old code and should ideally be replaced by an
+@@ -2054,6 +2096,8 @@ static int __init i2c_init(void)
+
+ is_registered = true;
+
++ i2c_debugfs_root = debugfs_create_dir("i2c", NULL);
++
+ #ifdef CONFIG_I2C_COMPAT
+ i2c_adapter_compat_class = class_compat_register("i2c-adapter");
+ if (!i2c_adapter_compat_class) {
+@@ -2092,6 +2136,7 @@ static void __exit i2c_exit(void)
+ #ifdef CONFIG_I2C_COMPAT
+ class_compat_unregister(i2c_adapter_compat_class);
+ #endif
++ debugfs_remove_recursive(i2c_debugfs_root);
+ bus_unregister(&i2c_bus_type);
+ tracepoint_synchronize_unregister();
+ }
+@@ -2182,13 +2227,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ * Returns negative errno, else the number of messages executed.
+ *
+ * Adapter lock must be held when calling this function. No debug logging
+- * takes place. adap->algo->master_xfer existence isn't checked.
++ * takes place.
+ */
+ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ {
+ unsigned long orig_jiffies;
+ int ret, try;
+
++ if (!adap->algo->master_xfer) {
++ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
++ return -EOPNOTSUPP;
++ }
++
+ if (WARN_ON(!msgs || num < 1))
+ return -EINVAL;
+
+@@ -2255,11 +2305,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ {
+ int ret;
+
+- if (!adap->algo->master_xfer) {
+- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+- return -EOPNOTSUPP;
+- }
+-
+ /* REVISIT the fault reporting model here is weak:
+ *
+ * - When we get an error after receiving N bytes from a slave,
+diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
+index 1247e6e6e97517..36587f38dff3d2 100644
+--- a/drivers/i2c/i2c-core.h
++++ b/drivers/i2c/i2c-core.h
+@@ -3,6 +3,7 @@
+ * i2c-core.h - interfaces internal to the I2C framework
+ */
+
++#include <linux/kconfig.h>
+ #include <linux/rwsem.h>
+
+ struct i2c_devinfo {
+@@ -29,7 +30,8 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ */
+ static inline bool i2c_in_atomic_xfer_mode(void)
+ {
+- return system_state > SYSTEM_RUNNING && irqs_disabled();
++ return system_state > SYSTEM_RUNNING &&
++ (IS_ENABLED(CONFIG_PREEMPT_COUNT) ? !preemptible() : irqs_disabled());
+ }
+
+ static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index a01b59e3599b53..7d337380a05d99 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -450,8 +450,8 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ return -EINVAL;
+
+- rdwr_pa = memdup_user(rdwr_arg.msgs,
+- rdwr_arg.nmsgs * sizeof(struct i2c_msg));
++ rdwr_pa = memdup_array_user(rdwr_arg.msgs,
++ rdwr_arg.nmsgs, sizeof(struct i2c_msg));
+ if (IS_ERR(rdwr_pa))
+ return PTR_ERR(rdwr_pa);
+
+diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
+index a49642bbae4b70..23a11e4e925672 100644
+--- a/drivers/i2c/i2c-slave-testunit.c
++++ b/drivers/i2c/i2c-slave-testunit.c
+@@ -118,9 +118,19 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ queue_delayed_work(system_long_wq, &tu->worker,
+ msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
+ }
+- fallthrough;
++
++ /*
++ * Reset reg_idx to avoid that work gets queued again in case of
++ * STOP after a following read message. But do not clear TU regs
++ * here because we still need them in the workqueue!
++ */
++ tu->reg_idx = 0;
++ break;
+
+ case I2C_SLAVE_WRITE_REQUESTED:
++ if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
++ return -EBUSY;
++
+ memset(tu->regs, 0, TU_NUM_REGS);
+ tu->reg_idx = 0;
+ break;
+diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
+index 138c3f5e0093a5..6520e097439120 100644
+--- a/drivers/i2c/i2c-smbus.c
++++ b/drivers/i2c/i2c-smbus.c
+@@ -34,6 +34,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct alert_data *data = addrp;
+ struct i2c_driver *driver;
++ int ret;
+
+ if (!client || client->addr != data->addr)
+ return 0;
+@@ -47,16 +48,47 @@ static int smbus_do_alert(struct device *dev, void *addrp)
+ device_lock(dev);
+ if (client->dev.driver) {
+ driver = to_i2c_driver(client->dev.driver);
+- if (driver->alert)
++ if (driver->alert) {
++ /* Stop iterating after we find the device */
+ driver->alert(client, data->type, data->data);
+- else
++ ret = -EBUSY;
++ } else {
+ dev_warn(&client->dev, "no driver alert()!\n");
+- } else
++ ret = -EOPNOTSUPP;
++ }
++ } else {
+ dev_dbg(&client->dev, "alert with no driver\n");
++ ret = -ENODEV;
++ }
++ device_unlock(dev);
++
++ return ret;
++}
++
++/* Same as above, but call back all drivers with alert handler */
++
++static int smbus_do_alert_force(struct device *dev, void *addrp)
++{
++ struct i2c_client *client = i2c_verify_client(dev);
++ struct alert_data *data = addrp;
++ struct i2c_driver *driver;
++
++ if (!client || (client->flags & I2C_CLIENT_TEN))
++ return 0;
++
++ /*
++ * Drivers should either disable alerts, or provide at least
++ * a minimal handler. Lock so the driver won't change.
++ */
++ device_lock(dev);
++ if (client->dev.driver) {
++ driver = to_i2c_driver(client->dev.driver);
++ if (driver->alert)
++ driver->alert(client, data->type, data->data);
++ }
+ device_unlock(dev);
+
+- /* Stop iterating after we find the device */
+- return -EBUSY;
++ return 0;
+ }
+
+ /*
+@@ -67,6 +99,7 @@ static irqreturn_t smbus_alert(int irq, void *d)
+ {
+ struct i2c_smbus_alert *alert = d;
+ struct i2c_client *ara;
++ unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */
+
+ ara = alert->ara;
+
+@@ -94,8 +127,25 @@ static irqreturn_t smbus_alert(int irq, void *d)
+ data.addr, data.data);
+
+ /* Notify driver for the device which issued the alert */
+- device_for_each_child(&ara->adapter->dev, &data,
+- smbus_do_alert);
++ status = device_for_each_child(&ara->adapter->dev, &data,
++ smbus_do_alert);
++ /*
++ * If we read the same address more than once, and the alert
++ * was not handled by a driver, it won't do any good to repeat
++ * the loop because it will never terminate. Try again, this
++ * time calling the alert handlers of all devices connected to
++ * the bus, and abort the loop afterwards. If this helps, we
++ * are all set. If it doesn't, there is nothing else we can do,
++ * so we might as well abort the loop.
++ * Note: This assumes that a driver with alert handler handles
++ * the alert properly and clears it if necessary.
++ */
++ if (data.addr == prev_addr && status != -EBUSY) {
++ device_for_each_child(&ara->adapter->dev, &data,
++ smbus_do_alert_force);
++ break;
++ }
++ prev_addr = data.addr;
+ }
+
+ return IRQ_HANDLED;
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 87283e4a46076e..0e9ff5500a7771 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1525,9 +1525,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+ desc->dev->dev.of_node = desc->boardinfo->of_node;
+
+ ret = device_register(&desc->dev->dev);
+- if (ret)
++ if (ret) {
+ dev_err(&master->dev,
+ "Failed to add I3C device (err = %d)\n", ret);
++ put_device(&desc->dev->dev);
++ }
+ }
+ }
+
+diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
+index 9332ae5f641903..235235613c1b99 100644
+--- a/drivers/i3c/master/dw-i3c-master.c
++++ b/drivers/i3c/master/dw-i3c-master.c
+@@ -1163,8 +1163,10 @@ static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
+ global = reg == 0xffffffff;
+ reg &= ~BIT(idx);
+ } else {
+- global = reg == 0;
++ bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK);
++
+ reg |= BIT(idx);
++ global = (reg == 0xffffffff) && hj_rejected;
+ }
+ writel(reg, master->regs + IBI_SIR_REQ_REJECT);
+
+diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
+index 49551db71bc96b..d8426847c2837b 100644
+--- a/drivers/i3c/master/i3c-master-cdns.c
++++ b/drivers/i3c/master/i3c-master-cdns.c
+@@ -76,7 +76,8 @@
+ #define PRESCL_CTRL0 0x14
+ #define PRESCL_CTRL0_I2C(x) ((x) << 16)
+ #define PRESCL_CTRL0_I3C(x) (x)
+-#define PRESCL_CTRL0_MAX GENMASK(9, 0)
++#define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0)
++#define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0)
+
+ #define PRESCL_CTRL1 0x18
+ #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
+@@ -191,7 +192,7 @@
+ #define SLV_STATUS1_HJ_DIS BIT(18)
+ #define SLV_STATUS1_MR_DIS BIT(17)
+ #define SLV_STATUS1_PROT_ERR BIT(16)
+-#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
++#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
+ #define SLV_STATUS1_HAS_DA BIT(8)
+ #define SLV_STATUS1_DDR_RX_FULL BIT(7)
+ #define SLV_STATUS1_DDR_TX_FULL BIT(6)
+@@ -1233,7 +1234,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
+ return -EINVAL;
+
+ pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
+- if (pres > PRESCL_CTRL0_MAX)
++ if (pres > PRESCL_CTRL0_I3C_MAX)
+ return -ERANGE;
+
+ bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
+@@ -1246,7 +1247,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
+ max_i2cfreq = bus->scl_rate.i2c;
+
+ pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
+- if (pres > PRESCL_CTRL0_MAX)
++ if (pres > PRESCL_CTRL0_I2C_MAX)
+ return -ERANGE;
+
+ bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
+@@ -1623,13 +1624,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
+ /* Device ID0 is reserved to describe this master. */
+ master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
+ master->free_rr_slots = GENMASK(master->maxdevs, 1);
++ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
++ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ val = readl(master->regs + CONF_STATUS1);
+ master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
+ master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
+ master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
+- master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+- master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
+@@ -1665,6 +1666,7 @@ static void cdns_i3c_master_remove(struct platform_device *pdev)
+ {
+ struct cdns_i3c_master *master = platform_get_drvdata(pdev);
+
++ cancel_work_sync(&master->hj_work);
+ i3c_master_unregister(&master->base);
+
+ clk_disable_unprepare(master->sysclk);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+index 97bb49ff5b53bd..47b9b4d4ed3fc0 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+@@ -64,15 +64,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
+ return -EOPNOTSUPP;
+ }
+
+- /* use a bitmap for faster free slot search */
+- hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+- if (!hci->DAT_data)
+- return -ENOMEM;
+-
+- /* clear them */
+- for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+- dat_w0_write(dat_idx, 0);
+- dat_w1_write(dat_idx, 0);
++ if (!hci->DAT_data) {
++ /* use a bitmap for faster free slot search */
++ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
++ if (!hci->DAT_data)
++ return -ENOMEM;
++
++ /* clear them */
++ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
++ dat_w0_write(dat_idx, 0);
++ dat_w1_write(dat_idx, 0);
++ }
+ }
+
+ return 0;
+@@ -87,7 +89,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+ static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+ {
+ unsigned int dat_idx;
++ int ret;
+
++ if (!hci->DAT_data) {
++ ret = hci_dat_v1_init(hci);
++ if (ret)
++ return ret;
++ }
+ dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
+ if (dat_idx >= hci->DAT_entries)
+ return -ENOENT;
+@@ -103,7 +111,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+ {
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+- __clear_bit(dat_idx, hci->DAT_data);
++ if (hci->DAT_data)
++ __clear_bit(dat_idx, hci->DAT_data);
+ }
+
+ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 2990ac9eaade77..edc3a69bfe31fa 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -291,7 +291,10 @@ static int hci_dma_init(struct i3c_hci *hci)
+
+ rh->ibi_chunk_sz = dma_get_cache_alignment();
+ rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
+- BUG_ON(rh->ibi_chunk_sz > 256);
++ if (rh->ibi_chunk_sz > 256) {
++ ret = -EINVAL;
++ goto err_out;
++ }
+
+ ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
+ ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
+@@ -345,6 +348,8 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
+
+ for (i = 0; i < n; i++) {
+ xfer = xfer_list + i;
++ if (!xfer->data)
++ continue;
+ dma_unmap_single(&hci->master.dev,
+ xfer->data_dma, xfer->data_len,
+ xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+@@ -450,10 +455,9 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
+ /*
+ * We're deep in it if ever this condition is ever met.
+ * Hardware might still be writing to memory, etc.
+- * Better suspend the world than risking silent corruption.
+ */
+ dev_crit(&hci->master.dev, "unable to abort the ring\n");
+- BUG();
++ WARN_ON(1);
+ }
+
+ for (i = 0; i < n; i++) {
+@@ -734,7 +738,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+ unsigned int i;
+ bool handled = false;
+
+- for (i = 0; mask && i < 8; i++) {
++ for (i = 0; mask && i < rings->total; i++) {
+ struct hci_rh_data *rh;
+ u32 status;
+
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 8f8295acdadb3a..f344f8733f8324 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -93,6 +93,7 @@
+ #define SVC_I3C_MINTMASKED 0x098
+ #define SVC_I3C_MERRWARN 0x09C
+ #define SVC_I3C_MERRWARN_NACK BIT(2)
++#define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
+ #define SVC_I3C_MDMACTRL 0x0A0
+ #define SVC_I3C_MDATACTRL 0x0AC
+ #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
+@@ -133,7 +134,8 @@ struct svc_i3c_cmd {
+ u8 *in;
+ const void *out;
+ unsigned int len;
+- unsigned int read_len;
++ unsigned int actual_len;
++ struct i3c_priv_xfer *xfer;
+ bool continued;
+ };
+
+@@ -175,6 +177,7 @@ struct svc_i3c_regs_save {
+ * @ibi.slots: Available IBI slots
+ * @ibi.tbq_slot: To be queued IBI slot
+ * @ibi.lock: IBI lock
++ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ */
+ struct svc_i3c_master {
+ struct i3c_master_controller base;
+@@ -203,6 +206,7 @@ struct svc_i3c_master {
+ /* Prevent races within IBI handlers */
+ spinlock_t lock;
+ } ibi;
++ struct mutex lock;
+ };
+
+ /**
+@@ -225,6 +229,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
+ if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
+ merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
+ writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
++
++ /* Ignore timeout error */
++ if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
++ dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
++ mstatus, merrwarn);
++ return false;
++ }
++
+ dev_err(master->dev,
+ "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+@@ -331,6 +343,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ struct i3c_ibi_slot *slot;
+ unsigned int count;
+ u32 mdatactrl;
++ int ret, val;
+ u8 *buf;
+
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+@@ -340,6 +353,13 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ slot->len = 0;
+ buf = slot->data;
+
++ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
++ SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
++ if (ret) {
++ dev_err(master->dev, "Timeout when polling for COMPLETE\n");
++ return ret;
++ }
++
+ while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
+ slot->len < SVC_I3C_FIFO_SIZE) {
+ mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+@@ -384,6 +404,20 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ u32 status, val;
+ int ret;
+
++ mutex_lock(&master->lock);
++ /*
++ * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
++ * readl_relaxed_poll_timeout() to return immediately. Consequently,
++ * ibitype will be 0 since it was last updated only after the 8th SCL
++ * cycle, leading to missed client IBI handlers.
++ *
++ * A typical scenario is when IBIWON occurs and bus arbitration is lost
++ * at svc_i3c_master_priv_xfers().
++ *
++ * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
++ */
++ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
++
+ /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ SVC_I3C_MCTRL_IBIRESP_AUTO,
+@@ -394,12 +428,10 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for IBIWON\n");
++ svc_i3c_master_emit_stop(master);
+ goto reenable_ibis;
+ }
+
+- /* Clear the interrupt status */
+- writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+-
+ status = readl(master->regs + SVC_I3C_MSTATUS);
+ ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
+ ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
+@@ -460,12 +492,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+
+ reenable_ibis:
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
++ mutex_unlock(&master->lock);
+ }
+
+ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
+ {
+ struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
+- u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
++ u32 active = readl(master->regs + SVC_I3C_MSTATUS);
+
+ if (!SVC_I3C_MSTATUS_SLVSTART(active))
+ return IRQ_NONE;
+@@ -1002,26 +1035,78 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
+ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ bool rnw, unsigned int xfer_type, u8 addr,
+ u8 *in, const u8 *out, unsigned int xfer_len,
+- unsigned int *read_len, bool continued)
++ unsigned int *actual_len, bool continued)
+ {
++ int retry = 2;
+ u32 reg;
+ int ret;
+
+- writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+- xfer_type |
+- SVC_I3C_MCTRL_IBIRESP_NACK |
+- SVC_I3C_MCTRL_DIR(rnw) |
+- SVC_I3C_MCTRL_ADDR(addr) |
+- SVC_I3C_MCTRL_RDTERM(*read_len),
+- master->regs + SVC_I3C_MCTRL);
++ /* clean SVC_I3C_MINT_IBIWON w1c bits */
++ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
+- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
++
++ while (retry--) {
++ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
++ xfer_type |
++ SVC_I3C_MCTRL_IBIRESP_NACK |
++ SVC_I3C_MCTRL_DIR(rnw) |
++ SVC_I3C_MCTRL_ADDR(addr) |
++ SVC_I3C_MCTRL_RDTERM(*actual_len),
++ master->regs + SVC_I3C_MCTRL);
++
++ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
+- if (ret)
+- goto emit_stop;
++ if (ret)
++ goto emit_stop;
+
+- if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
+- ret = -ENXIO;
++ if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
++ /*
++ * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
++ * If the Controller chooses to start an I3C Message with an I3C Dynamic
++ * Address, then special provisions shall be made because that same I3C
++ * Target may be initiating an IBI or a Controller Role Request. So, one of
++ * three things may happen: (skip 1, 2)
++ *
++ * 3. The Addresses match and the RnW bits also match, and so neither
++ * Controller nor Target will ACK since both are expecting the other side to
++ * provide ACK. As a result, each side might think it had "won" arbitration,
++ * but neither side would continue, as each would subsequently see that the
++ * other did not provide ACK.
++ * ...
++ * For either value of RnW: Due to the NACK, the Controller shall defer the
++ * Private Write or Private Read, and should typically transmit the Target
++ * Address again after a Repeated START (i.e., the next one or any one prior
++ * to a STOP in the Frame). Since the Address Header following a Repeated
++ * START is not arbitrated, the Controller will always win (see Section
++ * 5.1.2.2.4).
++ */
++ if (retry && addr != 0x7e) {
++ writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
++ } else {
++ ret = -ENXIO;
++ *actual_len = 0;
++ goto emit_stop;
++ }
++ } else {
++ break;
++ }
++ }
++
++ /*
++ * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
++ * with I3C Target Address.
++ *
++ * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
++ * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
++ * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
++ * a Hot-Join Request has been made.
++ *
++ * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
++ * and yield the above events handler.
++ */
++ if (SVC_I3C_MSTATUS_IBIWON(reg)) {
++ ret = -EAGAIN;
++ *actual_len = 0;
+ goto emit_stop;
+ }
+
+@@ -1033,7 +1118,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ goto emit_stop;
+
+ if (rnw)
+- *read_len = ret;
++ *actual_len = ret;
+
+ ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
+@@ -1115,8 +1200,12 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
+
+ ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
+ cmd->addr, cmd->in, cmd->out,
+- cmd->len, &cmd->read_len,
++ cmd->len, &cmd->actual_len,
+ cmd->continued);
++ /* cmd->xfer is NULL if I2C or CCC transfer */
++ if (cmd->xfer)
++ cmd->xfer->actual_len = cmd->actual_len;
++
+ if (ret)
+ break;
+ }
+@@ -1201,12 +1290,14 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
+ cmd->in = NULL;
+ cmd->out = buf;
+ cmd->len = xfer_len;
+- cmd->read_len = 0;
++ cmd->actual_len = 0;
+ cmd->continued = false;
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ kfree(buf);
+@@ -1219,7 +1310,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+ {
+ unsigned int xfer_len = ccc->dests[0].payload.len;
+- unsigned int read_len = ccc->rnw ? xfer_len : 0;
++ unsigned int actual_len = ccc->rnw ? xfer_len : 0;
+ struct svc_i3c_xfer *xfer;
+ struct svc_i3c_cmd *cmd;
+ int ret;
+@@ -1237,7 +1328,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ cmd->in = NULL;
+ cmd->out = &ccc->id;
+ cmd->len = 1;
+- cmd->read_len = 0;
++ cmd->actual_len = 0;
+ cmd->continued = true;
+
+ /* Directed message */
+@@ -1247,15 +1338,17 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
+ cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
+ cmd->len = xfer_len;
+- cmd->read_len = read_len;
++ cmd->actual_len = actual_len;
+ cmd->continued = false;
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+- if (cmd->read_len != xfer_len)
+- ccc->dests[0].payload.len = cmd->read_len;
++ if (cmd->actual_len != xfer_len)
++ ccc->dests[0].payload.len = cmd->actual_len;
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+@@ -1300,18 +1393,21 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ for (i = 0; i < nxfers; i++) {
+ struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+
++ cmd->xfer = &xfers[i];
+ cmd->addr = master->addrs[data->index];
+ cmd->rnw = xfers[i].rnw;
+ cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
+ cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
+ cmd->len = xfers[i].len;
+- cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
++ cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
+ cmd->continued = (i + 1) < nxfers;
+ }
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+@@ -1343,13 +1439,15 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ cmd->in = cmd->rnw ? xfers[i].buf : NULL;
+ cmd->out = cmd->rnw ? NULL : xfers[i].buf;
+ cmd->len = xfers[i].len;
+- cmd->read_len = cmd->rnw ? xfers[i].len : 0;
++ cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
+ cmd->continued = (i + 1 < nxfers);
+ }
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+@@ -1540,6 +1638,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
+
+ INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
+ INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
++ mutex_init(&master->lock);
++
+ ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
+ IRQF_NO_SUSPEND, "svc-i3c-irq", master);
+ if (ret)
+@@ -1597,6 +1697,7 @@ static void svc_i3c_master_remove(struct platform_device *pdev)
+ {
+ struct svc_i3c_master *master = platform_get_drvdata(pdev);
+
++ cancel_work_sync(&master->hj_work);
+ i3c_master_unregister(&master->base);
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index ea5a6a14c5537a..45500d2d5b4bb5 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -131,11 +131,12 @@ static unsigned int mwait_substates __initdata;
+ #define MWAIT2flg(eax) ((eax & 0xFF) << 24)
+
+ static __always_inline int __intel_idle(struct cpuidle_device *dev,
+- struct cpuidle_driver *drv, int index)
++ struct cpuidle_driver *drv,
++ int index, bool irqoff)
+ {
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+- unsigned long ecx = 1; /* break on interrupt flag */
++ unsigned long ecx = 1*irqoff; /* break on interrupt flag */
+
+ mwait_idle_with_hints(eax, ecx);
+
+@@ -159,19 +160,13 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
+ static __cpuidle int intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+ {
+- return __intel_idle(dev, drv, index);
++ return __intel_idle(dev, drv, index, true);
+ }
+
+ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+ {
+- int ret;
+-
+- raw_local_irq_enable();
+- ret = __intel_idle(dev, drv, index);
+- raw_local_irq_disable();
+-
+- return ret;
++ return __intel_idle(dev, drv, index, false);
+ }
+
+ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
+@@ -184,7 +179,7 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
+ if (smt_active)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
+- ret = __intel_idle(dev, drv, index);
++ ret = __intel_idle(dev, drv, index, true);
+
+ if (smt_active)
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
+@@ -196,7 +191,7 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+ {
+ fpu_idle_fpregs();
+- return __intel_idle(dev, drv, index);
++ return __intel_idle(dev, drv, index, true);
+ }
+
+ /**
+diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
+index 52eb46ef84c1b2..9c351ffc7bed6d 100644
+--- a/drivers/iio/Kconfig
++++ b/drivers/iio/Kconfig
+@@ -71,6 +71,15 @@ config IIO_TRIGGERED_EVENT
+ help
+ Provides helper functions for setting up triggered events.
+
++config IIO_BACKEND
++ tristate
++ help
++ Framework to handle complex IIO aggregate devices. The typical
++ architecture that can make use of this framework is to have one
++ device as the frontend device which can be "linked" against one or
++ multiple backend devices. The framework then makes it easy to get
++ and control such backend devices.
++
+ source "drivers/iio/accel/Kconfig"
+ source "drivers/iio/adc/Kconfig"
+ source "drivers/iio/addac/Kconfig"
+diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
+index 9622347a1c1bef..0ba0e1521ba4f0 100644
+--- a/drivers/iio/Makefile
++++ b/drivers/iio/Makefile
+@@ -13,6 +13,7 @@ obj-$(CONFIG_IIO_GTS_HELPER) += industrialio-gts-helper.o
+ obj-$(CONFIG_IIO_SW_DEVICE) += industrialio-sw-device.o
+ obj-$(CONFIG_IIO_SW_TRIGGER) += industrialio-sw-trigger.o
+ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
++obj-$(CONFIG_IIO_BACKEND) += industrialio-backend.o
+
+ obj-y += accel/
+ obj-y += adc/
+diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
+index b6b45d359f287f..df60986d98369e 100644
+--- a/drivers/iio/accel/Kconfig
++++ b/drivers/iio/accel/Kconfig
+@@ -219,10 +219,12 @@ config BMA400
+
+ config BMA400_I2C
+ tristate
++ select REGMAP_I2C
+ depends on BMA400
+
+ config BMA400_SPI
+ tristate
++ select REGMAP_SPI
+ depends on BMA400
+
+ config BMC150_ACCEL
+@@ -323,6 +325,8 @@ config DMARD10
+ config FXLS8962AF
+ tristate
+ depends on I2C || !I2C # cannot be built-in for modular I2C
++ select IIO_BUFFER
++ select IIO_KFIFO_BUF
+
+ config FXLS8962AF_I2C
+ tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
+@@ -411,6 +415,8 @@ config IIO_ST_ACCEL_SPI_3AXIS
+
+ config IIO_KX022A
+ tristate
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+
+ config IIO_KX022A_SPI
+ tristate "Kionix KX022A tri-axis digital accelerometer SPI interface"
+diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
+index 90b7ae6d42b770..484fe2e9fb1742 100644
+--- a/drivers/iio/accel/adxl367.c
++++ b/drivers/iio/accel/adxl367.c
+@@ -1429,9 +1429,11 @@ static int adxl367_verify_devid(struct adxl367_state *st)
+ unsigned int val;
+ int ret;
+
+- ret = regmap_read_poll_timeout(st->regmap, ADXL367_REG_DEVID, val,
+- val == ADXL367_DEVID_AD, 1000, 10000);
++ ret = regmap_read(st->regmap, ADXL367_REG_DEVID, &val);
+ if (ret)
++ return dev_err_probe(st->dev, ret, "Failed to read dev id\n");
++
++ if (val != ADXL367_DEVID_AD)
+ return dev_err_probe(st->dev, -ENODEV,
+ "Invalid dev id 0x%02X, expected 0x%02X\n",
+ val, ADXL367_DEVID_AD);
+@@ -1510,6 +1512,8 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
+ if (ret)
+ return ret;
+
++ fsleep(15000);
++
+ ret = adxl367_verify_devid(st);
+ if (ret)
+ return ret;
+diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c
+index b595fe94f3a321..62c74bdc0d77bf 100644
+--- a/drivers/iio/accel/adxl367_i2c.c
++++ b/drivers/iio/accel/adxl367_i2c.c
+@@ -11,7 +11,7 @@
+
+ #include "adxl367.h"
+
+-#define ADXL367_I2C_FIFO_DATA 0x42
++#define ADXL367_I2C_FIFO_DATA 0x18
+
+ struct adxl367_i2c_state {
+ struct regmap *regmap;
+diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
+index 4ea3c6718ed49d..971fc60efef01b 100644
+--- a/drivers/iio/accel/kionix-kx022a.c
++++ b/drivers/iio/accel/kionix-kx022a.c
+@@ -273,17 +273,17 @@ static const unsigned int kx022a_odrs[] = {
+ * (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
+ * => KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed
+ * in low-power mode(?) )
+- * => +/-2G => 4 / 2^16 * 9,80665 * 10^6 (to scale to micro)
+- * => +/-2G - 598.550415
+- * +/-4G - 1197.10083
+- * +/-8G - 2394.20166
+- * +/-16G - 4788.40332
++ * => +/-2G => 4 / 2^16 * 9,80665
++ * => +/-2G - 0.000598550415
++ * +/-4G - 0.00119710083
++ * +/-8G - 0.00239420166
++ * +/-16G - 0.00478840332
+ */
+ static const int kx022a_scale_table[][2] = {
+- { 598, 550415 },
+- { 1197, 100830 },
+- { 2394, 201660 },
+- { 4788, 403320 },
++ { 0, 598550 },
++ { 0, 1197101 },
++ { 0, 2394202 },
++ { 0, 4788403 },
+ };
+
+ static int kx022a_read_avail(struct iio_dev *indio_dev,
+@@ -302,7 +302,7 @@ static int kx022a_read_avail(struct iio_dev *indio_dev,
+ *vals = (const int *)kx022a_scale_table;
+ *length = ARRAY_SIZE(kx022a_scale_table) *
+ ARRAY_SIZE(kx022a_scale_table[0]);
+- *type = IIO_VAL_INT_PLUS_MICRO;
++ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+@@ -366,6 +366,20 @@ static int kx022a_turn_on_unlock(struct kx022a_data *data)
+ return ret;
+ }
+
++static int kx022a_write_raw_get_fmt(struct iio_dev *idev,
++ struct iio_chan_spec const *chan,
++ long mask)
++{
++ switch (mask) {
++ case IIO_CHAN_INFO_SCALE:
++ return IIO_VAL_INT_PLUS_NANO;
++ case IIO_CHAN_INFO_SAMP_FREQ:
++ return IIO_VAL_INT_PLUS_MICRO;
++ default:
++ return -EINVAL;
++ }
++}
++
+ static int kx022a_write_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+@@ -510,7 +524,7 @@ static int kx022a_read_raw(struct iio_dev *idev,
+
+ kx022a_reg2scale(regval, val, val2);
+
+- return IIO_VAL_INT_PLUS_MICRO;
++ return IIO_VAL_INT_PLUS_NANO;
+ }
+
+ return -EINVAL;
+@@ -712,6 +726,7 @@ static int kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples)
+ static const struct iio_info kx022a_info = {
+ .read_raw = &kx022a_read_raw,
+ .write_raw = &kx022a_write_raw,
++ .write_raw_get_fmt = &kx022a_write_raw_get_fmt,
+ .read_avail = &kx022a_read_avail,
+
+ .validate_trigger = iio_validate_own_trigger,
+diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
+index 75d142bc14b4f2..49e30b87732f59 100644
+--- a/drivers/iio/accel/mxc4005.c
++++ b/drivers/iio/accel/mxc4005.c
+@@ -5,6 +5,7 @@
+ * Copyright (c) 2014, Intel Corporation.
+ */
+
++#include <linux/delay.h>
+ #include <linux/module.h>
+ #include <linux/i2c.h>
+ #include <linux/iio/iio.h>
+@@ -27,11 +28,16 @@
+ #define MXC4005_REG_ZOUT_UPPER 0x07
+ #define MXC4005_REG_ZOUT_LOWER 0x08
+
++#define MXC4005_REG_INT_MASK0 0x0A
++
+ #define MXC4005_REG_INT_MASK1 0x0B
+ #define MXC4005_REG_INT_MASK1_BIT_DRDYE 0x01
+
++#define MXC4005_REG_INT_CLR0 0x00
++
+ #define MXC4005_REG_INT_CLR1 0x01
+ #define MXC4005_REG_INT_CLR1_BIT_DRDYC 0x01
++#define MXC4005_REG_INT_CLR1_SW_RST 0x10
+
+ #define MXC4005_REG_CONTROL 0x0D
+ #define MXC4005_REG_CONTROL_MASK_FSR GENMASK(6, 5)
+@@ -39,6 +45,9 @@
+
+ #define MXC4005_REG_DEVICE_ID 0x0E
+
++/* Datasheet does not specify a reset time, this is a conservative guess */
++#define MXC4005_RESET_TIME_US 2000
++
+ enum mxc4005_axis {
+ AXIS_X,
+ AXIS_Y,
+@@ -62,6 +71,8 @@ struct mxc4005_data {
+ s64 timestamp __aligned(8);
+ } scan;
+ bool trigger_enabled;
++ unsigned int control;
++ unsigned int int_mask1;
+ };
+
+ /*
+@@ -113,7 +124,9 @@ static bool mxc4005_is_readable_reg(struct device *dev, unsigned int reg)
+ static bool mxc4005_is_writeable_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
++ case MXC4005_REG_INT_CLR0:
+ case MXC4005_REG_INT_CLR1:
++ case MXC4005_REG_INT_MASK0:
+ case MXC4005_REG_INT_MASK1:
+ case MXC4005_REG_CONTROL:
+ return true;
+@@ -330,23 +343,20 @@ static int mxc4005_set_trigger_state(struct iio_trigger *trig,
+ {
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mxc4005_data *data = iio_priv(indio_dev);
++ unsigned int val;
+ int ret;
+
+ mutex_lock(&data->mutex);
+- if (state) {
+- ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1,
+- MXC4005_REG_INT_MASK1_BIT_DRDYE);
+- } else {
+- ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1,
+- ~MXC4005_REG_INT_MASK1_BIT_DRDYE);
+- }
+
++ val = state ? MXC4005_REG_INT_MASK1_BIT_DRDYE : 0;
++ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, val);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ dev_err(data->dev, "failed to update reg_int_mask1");
+ return ret;
+ }
+
++ data->int_mask1 = val;
+ data->trigger_enabled = state;
+ mutex_unlock(&data->mutex);
+
+@@ -382,6 +392,21 @@ static int mxc4005_chip_init(struct mxc4005_data *data)
+
+ dev_dbg(data->dev, "MXC4005 chip id %02x\n", reg);
+
++ ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1,
++ MXC4005_REG_INT_CLR1_SW_RST);
++ if (ret < 0)
++ return dev_err_probe(data->dev, ret, "resetting chip\n");
++
++ fsleep(MXC4005_RESET_TIME_US);
++
++ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
++ if (ret < 0)
++ return dev_err_probe(data->dev, ret, "writing INT_MASK0\n");
++
++ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, 0);
++ if (ret < 0)
++ return dev_err_probe(data->dev, ret, "writing INT_MASK1\n");
++
+ return 0;
+ }
+
+@@ -469,6 +494,58 @@ static int mxc4005_probe(struct i2c_client *client)
+ return devm_iio_device_register(&client->dev, indio_dev);
+ }
+
++static int mxc4005_suspend(struct device *dev)
++{
++ struct iio_dev *indio_dev = dev_get_drvdata(dev);
++ struct mxc4005_data *data = iio_priv(indio_dev);
++ int ret;
++
++ /* Save control to restore it on resume */
++ ret = regmap_read(data->regmap, MXC4005_REG_CONTROL, &data->control);
++ if (ret < 0)
++ dev_err(data->dev, "failed to read reg_control\n");
++
++ return ret;
++}
++
++static int mxc4005_resume(struct device *dev)
++{
++ struct iio_dev *indio_dev = dev_get_drvdata(dev);
++ struct mxc4005_data *data = iio_priv(indio_dev);
++ int ret;
++
++ ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1,
++ MXC4005_REG_INT_CLR1_SW_RST);
++ if (ret) {
++ dev_err(data->dev, "failed to reset chip: %d\n", ret);
++ return ret;
++ }
++
++ fsleep(MXC4005_RESET_TIME_US);
++
++ ret = regmap_write(data->regmap, MXC4005_REG_CONTROL, data->control);
++ if (ret) {
++ dev_err(data->dev, "failed to restore control register\n");
++ return ret;
++ }
++
++ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
++ if (ret) {
++ dev_err(data->dev, "failed to restore interrupt 0 mask\n");
++ return ret;
++ }
++
++ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, data->int_mask1);
++ if (ret) {
++ dev_err(data->dev, "failed to restore interrupt 1 mask\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static DEFINE_SIMPLE_DEV_PM_OPS(mxc4005_pm_ops, mxc4005_suspend, mxc4005_resume);
++
+ static const struct acpi_device_id mxc4005_acpi_match[] = {
+ {"MXC4005", 0},
+ {"MXC6655", 0},
+@@ -476,6 +553,13 @@ static const struct acpi_device_id mxc4005_acpi_match[] = {
+ };
+ MODULE_DEVICE_TABLE(acpi, mxc4005_acpi_match);
+
++static const struct of_device_id mxc4005_of_match[] = {
++ { .compatible = "memsic,mxc4005", },
++ { .compatible = "memsic,mxc6655", },
++ { },
++};
++MODULE_DEVICE_TABLE(of, mxc4005_of_match);
++
+ static const struct i2c_device_id mxc4005_id[] = {
+ {"mxc4005", 0},
+ {"mxc6655", 0},
+@@ -487,6 +571,8 @@ static struct i2c_driver mxc4005_driver = {
+ .driver = {
+ .name = MXC4005_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(mxc4005_acpi_match),
++ .of_match_table = mxc4005_of_match,
++ .pm = pm_sleep_ptr(&mxc4005_pm_ops),
+ },
+ .probe = mxc4005_probe,
+ .id_table = mxc4005_id,
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index 517b3db114b8ee..e46817cb5581cf 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -275,7 +275,7 @@ config AD799X
+ config AD9467
+ tristate "Analog Devices AD9467 High Speed ADC driver"
+ depends on SPI
+- depends on ADI_AXI_ADC
++ select IIO_BACKEND
+ help
+ Say yes here to build support for Analog Devices:
+ * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
+@@ -292,8 +292,8 @@ config ADI_AXI_ADC
+ select IIO_BUFFER
+ select IIO_BUFFER_HW_CONSUMER
+ select IIO_BUFFER_DMAENGINE
+- depends on HAS_IOMEM
+- depends on OF
++ select REGMAP_MMIO
++ select IIO_BACKEND
+ help
+ Say yes here to build support for Analog Devices Generic
+ AXI ADC IP core. The IP core is used for interfacing with
+@@ -1286,6 +1286,8 @@ config TI_ADS8344
+ config TI_ADS8688
+ tristate "Texas Instruments ADS8688"
+ depends on SPI
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADS8684 and
+ and ADS8688 ADC chips
+@@ -1296,6 +1298,8 @@ config TI_ADS8688
+ config TI_ADS124S08
+ tristate "Texas Instruments ADS124S08"
+ depends on SPI
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADS124S08
+ and ADS124S06 ADC chips
+@@ -1330,6 +1334,7 @@ config TI_AM335X_ADC
+ config TI_LMP92064
+ tristate "Texas Instruments LMP92064 ADC driver"
+ depends on SPI
++ select REGMAP_SPI
+ help
+ Say yes here to build support for the LMP92064 Precision Current and Voltage
+ sensor.
+diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
+index 5a5dd5e87ffc44..e650ebd167b038 100644
+--- a/drivers/iio/adc/ad4130.c
++++ b/drivers/iio/adc/ad4130.c
+@@ -1826,7 +1826,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
+ {
+ struct device *dev = &st->spi->dev;
+ struct device_node *of_node = dev_of_node(dev);
+- struct clk_init_data init;
++ struct clk_init_data init = {};
+ const char *clk_name;
+ struct clk *clk;
+ int ret;
+@@ -1900,10 +1900,14 @@ static int ad4130_setup(struct iio_dev *indio_dev)
+ return ret;
+
+ /*
+- * Configure all GPIOs for output. If configured, the interrupt function
+- * of P2 takes priority over the GPIO out function.
++ * Configure unused GPIOs for output. If configured, the interrupt
++ * function of P2 takes priority over the GPIO out function.
+ */
+- val = AD4130_IO_CONTROL_GPIO_CTRL_MASK;
++ val = 0;
++ for (i = 0; i < AD4130_MAX_GPIOS; i++)
++ if (st->pins_fn[i + AD4130_AIN2_P1] == AD4130_PIN_FN_NONE)
++ val |= FIELD_PREP(AD4130_IO_CONTROL_GPIO_CTRL_MASK, BIT(i));
++
+ val |= FIELD_PREP(AD4130_IO_CONTROL_INT_PIN_SEL_MASK, st->int_pin_sel);
+
+ ret = regmap_write(st->regmap, AD4130_IO_CONTROL_REG, val);
+diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
+index 8e252cde735b99..76002b91c86a4a 100644
+--- a/drivers/iio/adc/ad7091r-base.c
++++ b/drivers/iio/adc/ad7091r-base.c
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/bitops.h>
++#include <linux/bitfield.h>
+ #include <linux/iio/events.h>
+ #include <linux/iio/iio.h>
+ #include <linux/interrupt.h>
+@@ -28,6 +29,7 @@
+ #define AD7091R_REG_RESULT_CONV_RESULT(x) ((x) & 0xfff)
+
+ /* AD7091R_REG_CONF */
++#define AD7091R_REG_CONF_ALERT_EN BIT(4)
+ #define AD7091R_REG_CONF_AUTO BIT(8)
+ #define AD7091R_REG_CONF_CMD BIT(10)
+
+@@ -49,6 +51,27 @@ struct ad7091r_state {
+ struct mutex lock; /*lock to prevent concurent reads */
+ };
+
++const struct iio_event_spec ad7091r_events[] = {
++ {
++ .type = IIO_EV_TYPE_THRESH,
++ .dir = IIO_EV_DIR_RISING,
++ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
++ BIT(IIO_EV_INFO_ENABLE),
++ },
++ {
++ .type = IIO_EV_TYPE_THRESH,
++ .dir = IIO_EV_DIR_FALLING,
++ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
++ BIT(IIO_EV_INFO_ENABLE),
++ },
++ {
++ .type = IIO_EV_TYPE_THRESH,
++ .dir = IIO_EV_DIR_EITHER,
++ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
++ },
++};
++EXPORT_SYMBOL_NS_GPL(ad7091r_events, IIO_AD7091R);
++
+ static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+ {
+ int ret, conf;
+@@ -168,14 +191,148 @@ static int ad7091r_read_raw(struct iio_dev *iio_dev,
+ return ret;
+ }
+
++static int ad7091r_read_event_config(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir)
++{
++ struct ad7091r_state *st = iio_priv(indio_dev);
++ int val, ret;
++
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++ &val);
++ if (ret)
++ return ret;
++ return val != AD7091R_HIGH_LIMIT;
++ case IIO_EV_DIR_FALLING:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++ &val);
++ if (ret)
++ return ret;
++ return val != AD7091R_LOW_LIMIT;
++ default:
++ return -EINVAL;
++ }
++}
++
++static int ad7091r_write_event_config(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir, int state)
++{
++ struct ad7091r_state *st = iio_priv(indio_dev);
++
++ if (state) {
++ return regmap_set_bits(st->map, AD7091R_REG_CONF,
++ AD7091R_REG_CONF_ALERT_EN);
++ } else {
++ /*
++ * Set thresholds either to 0 or to 2^12 - 1 as appropriate to
++ * prevent alerts and thus disable event generation.
++ */
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++ AD7091R_HIGH_LIMIT);
++ case IIO_EV_DIR_FALLING:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++ AD7091R_LOW_LIMIT);
++ default:
++ return -EINVAL;
++ }
++ }
++}
++
++static int ad7091r_read_event_value(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir,
++ enum iio_event_info info, int *val, int *val2)
++{
++ struct ad7091r_state *st = iio_priv(indio_dev);
++ int ret;
++
++ switch (info) {
++ case IIO_EV_INFO_VALUE:
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++ val);
++ if (ret)
++ return ret;
++ return IIO_VAL_INT;
++ case IIO_EV_DIR_FALLING:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++ val);
++ if (ret)
++ return ret;
++ return IIO_VAL_INT;
++ default:
++ return -EINVAL;
++ }
++ case IIO_EV_INFO_HYSTERESIS:
++ ret = regmap_read(st->map,
++ AD7091R_REG_CH_HYSTERESIS(chan->channel),
++ val);
++ if (ret)
++ return ret;
++ return IIO_VAL_INT;
++ default:
++ return -EINVAL;
++ }
++}
++
++static int ad7091r_write_event_value(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir,
++ enum iio_event_info info, int val, int val2)
++{
++ struct ad7091r_state *st = iio_priv(indio_dev);
++
++ switch (info) {
++ case IIO_EV_INFO_VALUE:
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
++ val);
++ case IIO_EV_DIR_FALLING:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_LOW_LIMIT(chan->channel),
++ val);
++ default:
++ return -EINVAL;
++ }
++ case IIO_EV_INFO_HYSTERESIS:
++ return regmap_write(st->map,
++ AD7091R_REG_CH_HYSTERESIS(chan->channel),
++ val);
++ default:
++ return -EINVAL;
++ }
++}
++
+ static const struct iio_info ad7091r_info = {
+ .read_raw = ad7091r_read_raw,
++ .read_event_config = &ad7091r_read_event_config,
++ .write_event_config = &ad7091r_write_event_config,
++ .read_event_value = &ad7091r_read_event_value,
++ .write_event_value = &ad7091r_write_event_value,
+ };
+
+ static irqreturn_t ad7091r_event_handler(int irq, void *private)
+ {
+- struct ad7091r_state *st = (struct ad7091r_state *) private;
+- struct iio_dev *iio_dev = dev_get_drvdata(st->dev);
++ struct iio_dev *iio_dev = private;
++ struct ad7091r_state *st = iio_priv(iio_dev);
+ unsigned int i, read_val;
+ int ret;
+ s64 timestamp = iio_get_time_ns(iio_dev);
+@@ -232,9 +389,14 @@ int ad7091r_probe(struct device *dev, const char *name,
+ iio_dev->channels = chip_info->channels;
+
+ if (irq) {
++ ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
++ AD7091R_REG_CONF_ALERT_EN, BIT(4));
++ if (ret)
++ return ret;
++
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ ad7091r_event_handler,
+- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st);
++ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, iio_dev);
+ if (ret)
+ return ret;
+ }
+@@ -243,7 +405,14 @@ int ad7091r_probe(struct device *dev, const char *name,
+ if (IS_ERR(st->vref)) {
+ if (PTR_ERR(st->vref) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
++
+ st->vref = NULL;
++ /* Enable internal vref */
++ ret = regmap_set_bits(st->map, AD7091R_REG_CONF,
++ AD7091R_REG_CONF_INT_VREF);
++ if (ret)
++ return dev_err_probe(st->dev, ret,
++ "Error on enable internal reference\n");
+ } else {
+ ret = regulator_enable(st->vref);
+ if (ret)
+diff --git a/drivers/iio/adc/ad7091r-base.h b/drivers/iio/adc/ad7091r-base.h
+index 509748aef9b196..b9e1c8bf3440a4 100644
+--- a/drivers/iio/adc/ad7091r-base.h
++++ b/drivers/iio/adc/ad7091r-base.h
+@@ -8,6 +8,12 @@
+ #ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+ #define __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+
++#define AD7091R_REG_CONF_INT_VREF BIT(0)
++
++/* AD7091R_REG_CH_LIMIT */
++#define AD7091R_HIGH_LIMIT 0xFFF
++#define AD7091R_LOW_LIMIT 0x0
++
+ struct device;
+ struct ad7091r_state;
+
+@@ -17,6 +23,8 @@ struct ad7091r_chip_info {
+ unsigned int vref_mV;
+ };
+
++extern const struct iio_event_spec ad7091r_events[3];
++
+ extern const struct regmap_config ad7091r_regmap_config;
+
+ int ad7091r_probe(struct device *dev, const char *name,
+diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
+index 2f048527b7b786..dae98c95ebb87b 100644
+--- a/drivers/iio/adc/ad7091r5.c
++++ b/drivers/iio/adc/ad7091r5.c
+@@ -12,26 +12,6 @@
+
+ #include "ad7091r-base.h"
+
+-static const struct iio_event_spec ad7091r5_events[] = {
+- {
+- .type = IIO_EV_TYPE_THRESH,
+- .dir = IIO_EV_DIR_RISING,
+- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+- BIT(IIO_EV_INFO_ENABLE),
+- },
+- {
+- .type = IIO_EV_TYPE_THRESH,
+- .dir = IIO_EV_DIR_FALLING,
+- .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+- BIT(IIO_EV_INFO_ENABLE),
+- },
+- {
+- .type = IIO_EV_TYPE_THRESH,
+- .dir = IIO_EV_DIR_EITHER,
+- .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+- },
+-};
+-
+ #define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+@@ -44,10 +24,10 @@ static const struct iio_event_spec ad7091r5_events[] = {
+ .scan_type.realbits = bits, \
+ }
+ static const struct iio_chan_spec ad7091r5_channels_irq[] = {
+- AD7091R_CHANNEL(0, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+- AD7091R_CHANNEL(1, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+- AD7091R_CHANNEL(2, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+- AD7091R_CHANNEL(3, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
++ AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++ AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++ AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
++ AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ };
+
+ static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
+diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
+index b9b206fcd748f5..d2fe0269b6d3af 100644
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -14,7 +14,8 @@
+ #include <linux/kernel.h>
+ #include <linux/kfifo.h>
+ #include <linux/module.h>
+-#include <linux/of.h>
++#include <linux/mod_devicetable.h>
++#include <linux/property.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/spi/spi.h>
+
+@@ -146,15 +147,18 @@ struct ad7124_chip_info {
+ struct ad7124_channel_config {
+ bool live;
+ unsigned int cfg_slot;
+- enum ad7124_ref_sel refsel;
+- bool bipolar;
+- bool buf_positive;
+- bool buf_negative;
+- unsigned int vref_mv;
+- unsigned int pga_bits;
+- unsigned int odr;
+- unsigned int odr_sel_bits;
+- unsigned int filter_type;
++ /* Following fields are used to compare equality. */
++ struct_group(config_props,
++ enum ad7124_ref_sel refsel;
++ bool bipolar;
++ bool buf_positive;
++ bool buf_negative;
++ unsigned int vref_mv;
++ unsigned int pga_bits;
++ unsigned int odr;
++ unsigned int odr_sel_bits;
++ unsigned int filter_type;
++ );
+ };
+
+ struct ad7124_channel {
+@@ -333,11 +337,12 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
+ ptrdiff_t cmp_size;
+ int i;
+
+- cmp_size = (u8 *)&cfg->live - (u8 *)cfg;
++ cmp_size = sizeof_field(struct ad7124_channel_config, config_props);
+ for (i = 0; i < st->num_channels; i++) {
+ cfg_aux = &st->channels[i].cfg;
+
+- if (cfg_aux->live && !memcmp(cfg, cfg_aux, cmp_size))
++ if (cfg_aux->live &&
++ !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
+ return cfg_aux;
+ }
+
+@@ -761,6 +766,7 @@ static int ad7124_soft_reset(struct ad7124_state *st)
+ if (ret < 0)
+ return ret;
+
++ fsleep(200);
+ timeout = 100;
+ do {
+ ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval);
+@@ -807,22 +813,19 @@ static int ad7124_check_chip_id(struct ad7124_state *st)
+ return 0;
+ }
+
+-static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
+- struct device_node *np)
++static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
++ struct device *dev)
+ {
+ struct ad7124_state *st = iio_priv(indio_dev);
+ struct ad7124_channel_config *cfg;
+ struct ad7124_channel *channels;
+- struct device_node *child;
+ struct iio_chan_spec *chan;
+ unsigned int ain[2], channel = 0, tmp;
+ int ret;
+
+- st->num_channels = of_get_available_child_count(np);
+- if (!st->num_channels) {
+- dev_err(indio_dev->dev.parent, "no channel children\n");
+- return -ENODEV;
+- }
++ st->num_channels = device_get_child_node_count(dev);
++ if (!st->num_channels)
++ return dev_err_probe(dev, -ENODEV, "no channel children\n");
+
+ chan = devm_kcalloc(indio_dev->dev.parent, st->num_channels,
+ sizeof(*chan), GFP_KERNEL);
+@@ -838,39 +841,37 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
+ indio_dev->num_channels = st->num_channels;
+ st->channels = channels;
+
+- for_each_available_child_of_node(np, child) {
+- cfg = &st->channels[channel].cfg;
+-
+- ret = of_property_read_u32(child, "reg", &channel);
++ device_for_each_child_node_scoped(dev, child) {
++ ret = fwnode_property_read_u32(child, "reg", &channel);
+ if (ret)
+- goto err;
++ return ret;
+
+- if (channel >= indio_dev->num_channels) {
+- dev_err(indio_dev->dev.parent,
++ if (channel >= indio_dev->num_channels)
++ return dev_err_probe(dev, -EINVAL,
+ "Channel index >= number of channels\n");
+- ret = -EINVAL;
+- goto err;
+- }
+
+- ret = of_property_read_u32_array(child, "diff-channels",
+- ain, 2);
++ ret = fwnode_property_read_u32_array(child, "diff-channels",
++ ain, 2);
+ if (ret)
+- goto err;
++ return ret;
+
+ st->channels[channel].nr = channel;
+ st->channels[channel].ain = AD7124_CHANNEL_AINP(ain[0]) |
+ AD7124_CHANNEL_AINM(ain[1]);
+
+- cfg->bipolar = of_property_read_bool(child, "bipolar");
++ cfg = &st->channels[channel].cfg;
++ cfg->bipolar = fwnode_property_read_bool(child, "bipolar");
+
+- ret = of_property_read_u32(child, "adi,reference-select", &tmp);
++ ret = fwnode_property_read_u32(child, "adi,reference-select", &tmp);
+ if (ret)
+ cfg->refsel = AD7124_INT_REF;
+ else
+ cfg->refsel = tmp;
+
+- cfg->buf_positive = of_property_read_bool(child, "adi,buffered-positive");
+- cfg->buf_negative = of_property_read_bool(child, "adi,buffered-negative");
++ cfg->buf_positive =
++ fwnode_property_read_bool(child, "adi,buffered-positive");
++ cfg->buf_negative =
++ fwnode_property_read_bool(child, "adi,buffered-negative");
+
+ chan[channel] = ad7124_channel_template;
+ chan[channel].address = channel;
+@@ -880,10 +881,6 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
+ }
+
+ return 0;
+-err:
+- of_node_put(child);
+-
+- return ret;
+ }
+
+ static int ad7124_setup(struct ad7124_state *st)
+@@ -943,9 +940,7 @@ static int ad7124_probe(struct spi_device *spi)
+ struct iio_dev *indio_dev;
+ int i, ret;
+
+- info = of_device_get_match_data(&spi->dev);
+- if (!info)
+- info = (void *)spi_get_device_id(spi)->driver_data;
++ info = spi_get_device_match_data(spi);
+ if (!info)
+ return -ENODEV;
+
+@@ -965,7 +960,7 @@ static int ad7124_probe(struct spi_device *spi)
+ if (ret < 0)
+ return ret;
+
+- ret = ad7124_of_parse_channel_config(indio_dev, spi->dev.of_node);
++ ret = ad7124_parse_channel_config(indio_dev, &spi->dev);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
+index 468c2656d2be7b..98648c679a55c1 100644
+--- a/drivers/iio/adc/ad7266.c
++++ b/drivers/iio/adc/ad7266.c
+@@ -157,6 +157,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
+ ret = ad7266_read_single(st, val, chan->address);
+ iio_device_release_direct_mode(indio_dev);
+
++ if (ret < 0)
++ return ret;
+ *val = (*val >> 2) & 0xfff;
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(*val,
+diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
+index 1928d9ae5bcffd..4d755ffc3f4148 100644
+--- a/drivers/iio/adc/ad7606.c
++++ b/drivers/iio/adc/ad7606.c
+@@ -49,7 +49,7 @@ static const unsigned int ad7616_oversampling_avail[8] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+ };
+
+-static int ad7606_reset(struct ad7606_state *st)
++int ad7606_reset(struct ad7606_state *st)
+ {
+ if (st->gpio_reset) {
+ gpiod_set_value(st->gpio_reset, 1);
+@@ -60,6 +60,7 @@ static int ad7606_reset(struct ad7606_state *st)
+
+ return -ENODEV;
+ }
++EXPORT_SYMBOL_NS_GPL(ad7606_reset, IIO_AD7606);
+
+ static int ad7606_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+@@ -88,31 +89,6 @@ static int ad7606_read_samples(struct ad7606_state *st)
+ {
+ unsigned int num = st->chip_info->num_channels - 1;
+ u16 *data = st->data;
+- int ret;
+-
+- /*
+- * The frstdata signal is set to high while and after reading the sample
+- * of the first channel and low for all other channels. This can be used
+- * to check that the incoming data is correctly aligned. During normal
+- * operation the data should never become unaligned, but some glitch or
+- * electrostatic discharge might cause an extra read or clock cycle.
+- * Monitoring the frstdata signal allows to recover from such failure
+- * situations.
+- */
+-
+- if (st->gpio_frstdata) {
+- ret = st->bops->read_block(st->dev, 1, data);
+- if (ret)
+- return ret;
+-
+- if (!gpiod_get_value(st->gpio_frstdata)) {
+- ad7606_reset(st);
+- return -EIO;
+- }
+-
+- data++;
+- num--;
+- }
+
+ return st->bops->read_block(st->dev, num, data);
+ }
+@@ -239,9 +215,9 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val)
+ struct ad7606_state *st = iio_priv(indio_dev);
+ DECLARE_BITMAP(values, 3);
+
+- values[0] = val;
++ values[0] = val & GENMASK(2, 0);
+
+- gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
++ gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc,
+ st->gpio_os->info, values);
+
+ /* AD7616 requires a reset to update value */
+@@ -446,7 +422,7 @@ static int ad7606_request_gpios(struct ad7606_state *st)
+ return PTR_ERR(st->gpio_range);
+
+ st->gpio_standby = devm_gpiod_get_optional(dev, "standby",
+- GPIOD_OUT_HIGH);
++ GPIOD_OUT_LOW);
+ if (IS_ERR(st->gpio_standby))
+ return PTR_ERR(st->gpio_standby);
+
+@@ -689,7 +665,7 @@ static int ad7606_suspend(struct device *dev)
+
+ if (st->gpio_standby) {
+ gpiod_set_value(st->gpio_range, 1);
+- gpiod_set_value(st->gpio_standby, 0);
++ gpiod_set_value(st->gpio_standby, 1);
+ }
+
+ return 0;
+diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
+index 0c6a88cc469585..6649e84d25de64 100644
+--- a/drivers/iio/adc/ad7606.h
++++ b/drivers/iio/adc/ad7606.h
+@@ -151,6 +151,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
+ const char *name, unsigned int id,
+ const struct ad7606_bus_ops *bops);
+
++int ad7606_reset(struct ad7606_state *st);
++
+ enum ad7606_supported_device_ids {
+ ID_AD7605_4,
+ ID_AD7606_8,
+diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
+index d8408052262e4d..6bc587b20f05da 100644
+--- a/drivers/iio/adc/ad7606_par.c
++++ b/drivers/iio/adc/ad7606_par.c
+@@ -7,6 +7,7 @@
+
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/platform_device.h>
+ #include <linux/types.h>
+ #include <linux/err.h>
+@@ -21,8 +22,29 @@ static int ad7606_par16_read_block(struct device *dev,
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+- insw((unsigned long)st->base_address, buf, count);
+
++ /*
++ * On the parallel interface, the frstdata signal is set to high while
++ * and after reading the sample of the first channel and low for all
++ * other channels. This can be used to check that the incoming data is
++ * correctly aligned. During normal operation the data should never
++ * become unaligned, but some glitch or electrostatic discharge might
++ * cause an extra read or clock cycle. Monitoring the frstdata signal
++ * allows to recover from such failure situations.
++ */
++ int num = count;
++ u16 *_buf = buf;
++
++ if (st->gpio_frstdata) {
++ insw((unsigned long)st->base_address, _buf, 1);
++ if (!gpiod_get_value(st->gpio_frstdata)) {
++ ad7606_reset(st);
++ return -EIO;
++ }
++ _buf++;
++ num--;
++ }
++ insw((unsigned long)st->base_address, _buf, num);
+ return 0;
+ }
+
+@@ -35,8 +57,28 @@ static int ad7606_par8_read_block(struct device *dev,
+ {
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7606_state *st = iio_priv(indio_dev);
+-
+- insb((unsigned long)st->base_address, buf, count * 2);
++ /*
++ * On the parallel interface, the frstdata signal is set to high while
++ * and after reading the sample of the first channel and low for all
++ * other channels. This can be used to check that the incoming data is
++ * correctly aligned. During normal operation the data should never
++ * become unaligned, but some glitch or electrostatic discharge might
++ * cause an extra read or clock cycle. Monitoring the frstdata signal
++ * allows to recover from such failure situations.
++ */
++ int num = count;
++ u16 *_buf = buf;
++
++ if (st->gpio_frstdata) {
++ insb((unsigned long)st->base_address, _buf, 2);
++ if (!gpiod_get_value(st->gpio_frstdata)) {
++ ad7606_reset(st);
++ return -EIO;
++ }
++ _buf++;
++ num--;
++ }
++ insb((unsigned long)st->base_address, _buf, num * 2);
+
+ return 0;
+ }
+diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
+index 263a778bcf2539..287a0591533b6a 100644
+--- a/drivers/iio/adc/ad7606_spi.c
++++ b/drivers/iio/adc/ad7606_spi.c
+@@ -249,8 +249,9 @@ static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
+ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
+ {
+ struct ad7606_state *st = iio_priv(indio_dev);
+- unsigned long os[3] = {1};
++ DECLARE_BITMAP(os, 3);
+
++ bitmap_fill(os, 3);
+ /*
+ * Software mode is enabled when all three oversampling
+ * pins are set to high. If oversampling gpios are defined
+@@ -258,7 +259,7 @@ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
+ * otherwise, they must be hardwired to VDD
+ */
+ if (st->gpio_os) {
+- gpiod_set_array_value(ARRAY_SIZE(os),
++ gpiod_set_array_value(st->gpio_os->ndescs,
+ st->gpio_os->desc, st->gpio_os->info, os);
+ }
+ /* OS of 128 and 256 are available only in software mode */
+diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
+index 39eccc28debe4c..4c08f8a04f9620 100644
+--- a/drivers/iio/adc/ad9467.c
++++ b/drivers/iio/adc/ad9467.c
+@@ -4,8 +4,9 @@
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+-
++#include <linux/cleanup.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/device.h>
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -16,13 +17,12 @@
+ #include <linux/of.h>
+
+
++#include <linux/iio/backend.h>
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+
+ #include <linux/clk.h>
+
+-#include <linux/iio/adc/adi-axi-adc.h>
+-
+ /*
+ * ADI High-Speed ADC common spi interface registers
+ * See Application-Note AN-877:
+@@ -100,28 +100,29 @@
+ #define AD9467_DEF_OUTPUT_MODE 0x08
+ #define AD9467_REG_VREF_MASK 0x0F
+
+-enum {
+- ID_AD9265,
+- ID_AD9434,
+- ID_AD9467,
+-};
+-
+ struct ad9467_chip_info {
+- struct adi_axi_adc_chip_info axi_adc_info;
+- unsigned int default_output_mode;
+- unsigned int vref_mask;
++ const char *name;
++ unsigned int id;
++ const struct iio_chan_spec *channels;
++ unsigned int num_channels;
++ const unsigned int (*scale_table)[2];
++ int num_scales;
++ unsigned long max_rate;
++ unsigned int default_output_mode;
++ unsigned int vref_mask;
+ };
+
+-#define to_ad9467_chip_info(_info) \
+- container_of(_info, struct ad9467_chip_info, axi_adc_info)
+-
+ struct ad9467_state {
++ const struct ad9467_chip_info *info;
++ struct iio_backend *back;
+ struct spi_device *spi;
+ struct clk *clk;
+ unsigned int output_mode;
++ unsigned int (*scales)[2];
+
+ struct gpio_desc *pwrdown_gpio;
+- struct gpio_desc *reset_gpio;
++ /* ensure consistent state obtained on multiple related accesses */
++ struct mutex lock;
+ };
+
+ static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
+@@ -154,18 +155,20 @@ static int ad9467_spi_write(struct spi_device *spi, unsigned int reg,
+ return spi_write(spi, buf, ARRAY_SIZE(buf));
+ }
+
+-static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
++static int ad9467_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+ {
+- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++ struct ad9467_state *st = iio_priv(indio_dev);
+ struct spi_device *spi = st->spi;
+ int ret;
+
+ if (readval == NULL) {
++ guard(mutex)(&st->lock);
+ ret = ad9467_spi_write(spi, reg, writeval);
+- ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+- AN877_ADC_TRANSFER_SYNC);
+- return ret;
++ if (ret)
++ return ret;
++ return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
++ AN877_ADC_TRANSFER_SYNC);
+ }
+
+ ret = ad9467_spi_read(spi, reg);
+@@ -192,10 +195,10 @@ static const unsigned int ad9467_scale_table[][2] = {
+ {2300, 8}, {2400, 9}, {2500, 10},
+ };
+
+-static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
++static void __ad9467_get_scale(struct ad9467_state *st, int index,
+ unsigned int *val, unsigned int *val2)
+ {
+- const struct adi_axi_adc_chip_info *info = conv->chip_info;
++ const struct ad9467_chip_info *info = st->info;
+ const struct iio_chan_spec *chan = &info->channels[0];
+ unsigned int tmp;
+
+@@ -212,6 +215,7 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+ .channel = _chan, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
++ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = _sign, \
+@@ -221,62 +225,60 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+ }
+
+ static const struct iio_chan_spec ad9434_channels[] = {
+- AD9467_CHAN(0, 0, 12, 'S'),
++ AD9467_CHAN(0, 0, 12, 's'),
+ };
+
+ static const struct iio_chan_spec ad9467_channels[] = {
+- AD9467_CHAN(0, 0, 16, 'S'),
++ AD9467_CHAN(0, 0, 16, 's'),
+ };
+
+-static const struct ad9467_chip_info ad9467_chip_tbl[] = {
+- [ID_AD9265] = {
+- .axi_adc_info = {
+- .id = CHIPID_AD9265,
+- .max_rate = 125000000UL,
+- .scale_table = ad9265_scale_table,
+- .num_scales = ARRAY_SIZE(ad9265_scale_table),
+- .channels = ad9467_channels,
+- .num_channels = ARRAY_SIZE(ad9467_channels),
+- },
+- .default_output_mode = AD9265_DEF_OUTPUT_MODE,
+- .vref_mask = AD9265_REG_VREF_MASK,
+- },
+- [ID_AD9434] = {
+- .axi_adc_info = {
+- .id = CHIPID_AD9434,
+- .max_rate = 500000000UL,
+- .scale_table = ad9434_scale_table,
+- .num_scales = ARRAY_SIZE(ad9434_scale_table),
+- .channels = ad9434_channels,
+- .num_channels = ARRAY_SIZE(ad9434_channels),
+- },
+- .default_output_mode = AD9434_DEF_OUTPUT_MODE,
+- .vref_mask = AD9434_REG_VREF_MASK,
+- },
+- [ID_AD9467] = {
+- .axi_adc_info = {
+- .id = CHIPID_AD9467,
+- .max_rate = 250000000UL,
+- .scale_table = ad9467_scale_table,
+- .num_scales = ARRAY_SIZE(ad9467_scale_table),
+- .channels = ad9467_channels,
+- .num_channels = ARRAY_SIZE(ad9467_channels),
+- },
+- .default_output_mode = AD9467_DEF_OUTPUT_MODE,
+- .vref_mask = AD9467_REG_VREF_MASK,
+- },
++static const struct ad9467_chip_info ad9467_chip_tbl = {
++ .name = "ad9467",
++ .id = CHIPID_AD9467,
++ .max_rate = 250000000UL,
++ .scale_table = ad9467_scale_table,
++ .num_scales = ARRAY_SIZE(ad9467_scale_table),
++ .channels = ad9467_channels,
++ .num_channels = ARRAY_SIZE(ad9467_channels),
++ .default_output_mode = AD9467_DEF_OUTPUT_MODE,
++ .vref_mask = AD9467_REG_VREF_MASK,
++};
++
++static const struct ad9467_chip_info ad9434_chip_tbl = {
++ .name = "ad9434",
++ .id = CHIPID_AD9434,
++ .max_rate = 500000000UL,
++ .scale_table = ad9434_scale_table,
++ .num_scales = ARRAY_SIZE(ad9434_scale_table),
++ .channels = ad9434_channels,
++ .num_channels = ARRAY_SIZE(ad9434_channels),
++ .default_output_mode = AD9434_DEF_OUTPUT_MODE,
++ .vref_mask = AD9434_REG_VREF_MASK,
+ };
+
+-static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
++static const struct ad9467_chip_info ad9265_chip_tbl = {
++ .name = "ad9265",
++ .id = CHIPID_AD9265,
++ .max_rate = 125000000UL,
++ .scale_table = ad9265_scale_table,
++ .num_scales = ARRAY_SIZE(ad9265_scale_table),
++ .channels = ad9467_channels,
++ .num_channels = ARRAY_SIZE(ad9467_channels),
++ .default_output_mode = AD9265_DEF_OUTPUT_MODE,
++ .vref_mask = AD9265_REG_VREF_MASK,
++};
++
++static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2)
+ {
+- const struct adi_axi_adc_chip_info *info = conv->chip_info;
+- const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info);
+- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++ const struct ad9467_chip_info *info = st->info;
+ unsigned int i, vref_val;
++ int ret;
+
+- vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
++ ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
++ if (ret < 0)
++ return ret;
+
+- vref_val &= info1->vref_mask;
++ vref_val = ret & info->vref_mask;
+
+ for (i = 0; i < info->num_scales; i++) {
+ if (vref_val == info->scale_table[i][1])
+@@ -286,45 +288,48 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
+ if (i == info->num_scales)
+ return -ERANGE;
+
+- __ad9467_get_scale(conv, i, val, val2);
++ __ad9467_get_scale(st, i, val, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+-static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
++static int ad9467_set_scale(struct ad9467_state *st, int val, int val2)
+ {
+- const struct adi_axi_adc_chip_info *info = conv->chip_info;
+- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++ const struct ad9467_chip_info *info = st->info;
+ unsigned int scale_val[2];
+ unsigned int i;
++ int ret;
+
+ if (val != 0)
+ return -EINVAL;
+
+ for (i = 0; i < info->num_scales; i++) {
+- __ad9467_get_scale(conv, i, &scale_val[0], &scale_val[1]);
++ __ad9467_get_scale(st, i, &scale_val[0], &scale_val[1]);
+ if (scale_val[0] != val || scale_val[1] != val2)
+ continue;
+
+- ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+- info->scale_table[i][1]);
+- ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+- AN877_ADC_TRANSFER_SYNC);
+- return 0;
++ guard(mutex)(&st->lock);
++ ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
++ info->scale_table[i][1]);
++ if (ret < 0)
++ return ret;
++
++ return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
++ AN877_ADC_TRANSFER_SYNC);
+ }
+
+ return -EINVAL;
+ }
+
+-static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
++static int ad9467_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+ {
+- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++ struct ad9467_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+- return ad9467_get_scale(conv, val, val2);
++ return ad9467_get_scale(st, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(st->clk);
+
+@@ -334,17 +339,17 @@ static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
+ }
+ }
+
+-static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
++static int ad9467_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+ {
+- const struct adi_axi_adc_chip_info *info = conv->chip_info;
+- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++ struct ad9467_state *st = iio_priv(indio_dev);
++ const struct ad9467_chip_info *info = st->info;
+ long r_clk;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+- return ad9467_set_scale(conv, val, val2);
++ return ad9467_set_scale(st, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ r_clk = clk_round_rate(st->clk, val);
+ if (r_clk < 0 || r_clk > info->max_rate) {
+@@ -359,6 +364,53 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
+ }
+ }
+
++static int ad9467_read_avail(struct iio_dev *indio_dev,
++ struct iio_chan_spec const *chan,
++ const int **vals, int *type, int *length,
++ long mask)
++{
++ struct ad9467_state *st = iio_priv(indio_dev);
++ const struct ad9467_chip_info *info = st->info;
++
++ switch (mask) {
++ case IIO_CHAN_INFO_SCALE:
++ *vals = (const int *)st->scales;
++ *type = IIO_VAL_INT_PLUS_MICRO;
++ /* Values are stored in a 2D matrix */
++ *length = info->num_scales * 2;
++ return IIO_AVAIL_LIST;
++ default:
++ return -EINVAL;
++ }
++}
++
++static int ad9467_update_scan_mode(struct iio_dev *indio_dev,
++ const unsigned long *scan_mask)
++{
++ struct ad9467_state *st = iio_priv(indio_dev);
++ unsigned int c;
++ int ret;
++
++ for (c = 0; c < st->info->num_channels; c++) {
++ if (test_bit(c, scan_mask))
++ ret = iio_backend_chan_enable(st->back, c);
++ else
++ ret = iio_backend_chan_disable(st->back, c);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static const struct iio_info ad9467_info = {
++ .read_raw = ad9467_read_raw,
++ .write_raw = ad9467_write_raw,
++ .update_scan_mode = ad9467_update_scan_mode,
++ .debugfs_reg_access = ad9467_reg_access,
++ .read_avail = ad9467_read_avail,
++};
++
+ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+ {
+ int ret;
+@@ -371,34 +423,122 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+ AN877_ADC_TRANSFER_SYNC);
+ }
+
+-static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
++static int ad9467_scale_fill(struct ad9467_state *st)
+ {
+- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
++ const struct ad9467_chip_info *info = st->info;
++ unsigned int i, val1, val2;
+
+- return ad9467_outputmode_set(st->spi, st->output_mode);
++ st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales,
++ sizeof(*st->scales), GFP_KERNEL);
++ if (!st->scales)
++ return -ENOMEM;
++
++ for (i = 0; i < info->num_scales; i++) {
++ __ad9467_get_scale(st, i, &val1, &val2);
++ st->scales[i][0] = val1;
++ st->scales[i][1] = val2;
++ }
++
++ return 0;
++}
++
++static int ad9467_setup(struct ad9467_state *st)
++{
++ struct iio_backend_data_fmt data = {
++ .sign_extend = true,
++ .enable = true,
++ };
++ unsigned int c, mode;
++ int ret;
++
++ mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
++ ret = ad9467_outputmode_set(st->spi, mode);
++ if (ret)
++ return ret;
++
++ for (c = 0; c < st->info->num_channels; c++) {
++ ret = iio_backend_data_format_set(st->back, c, &data);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static int ad9467_reset(struct device *dev)
++{
++ struct gpio_desc *gpio;
++
++ gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
++ if (IS_ERR_OR_NULL(gpio))
++ return PTR_ERR_OR_ZERO(gpio);
++
++ fsleep(1);
++ gpiod_set_value_cansleep(gpio, 0);
++ fsleep(10 * USEC_PER_MSEC);
++
++ return 0;
++}
++
++static int ad9467_iio_backend_get(struct ad9467_state *st)
++{
++ struct device *dev = &st->spi->dev;
++ struct device_node *__back;
++
++ st->back = devm_iio_backend_get(dev, NULL);
++ if (!IS_ERR(st->back))
++ return 0;
++ /* If not found, don't error out as we might have legacy DT property */
++ if (PTR_ERR(st->back) != -ENOENT)
++ return PTR_ERR(st->back);
++
++ /*
++ * if we don't get the backend using the normal API's, use the legacy
++ * 'adi,adc-dev' property. So we get all nodes with that property, and
++ * look for the one pointing at us. Then we directly lookup that fwnode
++ * on the backend list of registered devices. This is done so we don't
++ * make io-backends mandatory which would break DT ABI.
++ */
++ for_each_node_with_property(__back, "adi,adc-dev") {
++ struct device_node *__me;
++
++ __me = of_parse_phandle(__back, "adi,adc-dev", 0);
++ if (!__me)
++ continue;
++
++ if (!device_match_of_node(dev, __me)) {
++ of_node_put(__me);
++ continue;
++ }
++
++ of_node_put(__me);
++ st->back = __devm_iio_backend_get_from_fwnode_lookup(dev,
++ of_fwnode_handle(__back));
++ of_node_put(__back);
++ return PTR_ERR_OR_ZERO(st->back);
++ }
++
++ return -ENODEV;
+ }
+
+ static int ad9467_probe(struct spi_device *spi)
+ {
+- const struct ad9467_chip_info *info;
+- struct adi_axi_adc_conv *conv;
++ struct iio_dev *indio_dev;
+ struct ad9467_state *st;
+ unsigned int id;
+ int ret;
+
+- info = of_device_get_match_data(&spi->dev);
+- if (!info)
+- info = (void *)spi_get_device_id(spi)->driver_data;
+- if (!info)
+- return -ENODEV;
+-
+- conv = devm_adi_axi_adc_conv_register(&spi->dev, sizeof(*st));
+- if (IS_ERR(conv))
+- return PTR_ERR(conv);
++ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
++ if (!indio_dev)
++ return -ENOMEM;
+
+- st = adi_axi_adc_conv_priv(conv);
++ st = iio_priv(indio_dev);
+ st->spi = spi;
+
++ st->info = spi_get_device_match_data(spi);
++ if (!st->info)
++ return -ENODEV;
++
+ st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
+ if (IS_ERR(st->clk))
+ return PTR_ERR(st->clk);
+@@ -408,51 +548,57 @@ static int ad9467_probe(struct spi_device *spi)
+ if (IS_ERR(st->pwrdown_gpio))
+ return PTR_ERR(st->pwrdown_gpio);
+
+- st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+- GPIOD_OUT_LOW);
+- if (IS_ERR(st->reset_gpio))
+- return PTR_ERR(st->reset_gpio);
+-
+- if (st->reset_gpio) {
+- udelay(1);
+- ret = gpiod_direction_output(st->reset_gpio, 1);
+- if (ret)
+- return ret;
+- mdelay(10);
+- }
++ ret = ad9467_reset(&spi->dev);
++ if (ret)
++ return ret;
+
+- conv->chip_info = &info->axi_adc_info;
++ ret = ad9467_scale_fill(st);
++ if (ret)
++ return ret;
+
+ id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
+- if (id != conv->chip_info->id) {
++ if (id != st->info->id) {
+ dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
+- id, conv->chip_info->id);
++ id, st->info->id);
+ return -ENODEV;
+ }
+
+- conv->reg_access = ad9467_reg_access;
+- conv->write_raw = ad9467_write_raw;
+- conv->read_raw = ad9467_read_raw;
+- conv->preenable_setup = ad9467_preenable_setup;
++ indio_dev->name = st->info->name;
++ indio_dev->channels = st->info->channels;
++ indio_dev->num_channels = st->info->num_channels;
++ indio_dev->info = &ad9467_info;
+
+- st->output_mode = info->default_output_mode |
+- AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
++ ret = ad9467_iio_backend_get(st);
++ if (ret)
++ return ret;
+
+- return 0;
++ ret = devm_iio_backend_request_buffer(&spi->dev, st->back, indio_dev);
++ if (ret)
++ return ret;
++
++ ret = devm_iio_backend_enable(&spi->dev, st->back);
++ if (ret)
++ return ret;
++
++ ret = ad9467_setup(st);
++ if (ret)
++ return ret;
++
++ return devm_iio_device_register(&spi->dev, indio_dev);
+ }
+
+ static const struct of_device_id ad9467_of_match[] = {
+- { .compatible = "adi,ad9265", .data = &ad9467_chip_tbl[ID_AD9265], },
+- { .compatible = "adi,ad9434", .data = &ad9467_chip_tbl[ID_AD9434], },
+- { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl[ID_AD9467], },
++ { .compatible = "adi,ad9265", .data = &ad9265_chip_tbl, },
++ { .compatible = "adi,ad9434", .data = &ad9434_chip_tbl, },
++ { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl, },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, ad9467_of_match);
+
+ static const struct spi_device_id ad9467_ids[] = {
+- { "ad9265", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9265] },
+- { "ad9434", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9434] },
+- { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9467] },
++ { "ad9265", (kernel_ulong_t)&ad9265_chip_tbl },
++ { "ad9434", (kernel_ulong_t)&ad9434_chip_tbl },
++ { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl },
+ {}
+ };
+ MODULE_DEVICE_TABLE(spi, ad9467_ids);
+@@ -470,4 +616,4 @@ module_spi_driver(ad9467_driver);
+ MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+ MODULE_DESCRIPTION("Analog Devices AD9467 ADC driver");
+ MODULE_LICENSE("GPL v2");
+-MODULE_IMPORT_NS(IIO_ADI_AXI);
++MODULE_IMPORT_NS(IIO_BACKEND);
+diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
+index aff0532a974aa0..e3b21588294168 100644
+--- a/drivers/iio/adc/adi-axi-adc.c
++++ b/drivers/iio/adc/adi-axi-adc.c
+@@ -8,21 +8,22 @@
+
+ #include <linux/bitfield.h>
+ #include <linux/clk.h>
++#include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/property.h>
++#include <linux/regmap.h>
+ #include <linux/slab.h>
+
+-#include <linux/iio/iio.h>
+-#include <linux/iio/sysfs.h>
+-#include <linux/iio/buffer.h>
+-#include <linux/iio/buffer-dmaengine.h>
+-
+ #include <linux/fpga/adi-axi-common.h>
+-#include <linux/iio/adc/adi-axi-adc.h>
++
++#include <linux/iio/backend.h>
++#include <linux/iio/buffer-dmaengine.h>
++#include <linux/iio/buffer.h>
++#include <linux/iio/iio.h>
+
+ /*
+ * Register definitions:
+@@ -43,6 +44,7 @@
+ #define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10)
+ #define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9)
+ #define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8)
++#define ADI_AXI_REG_CHAN_CTRL_FMT_MASK GENMASK(6, 4)
+ #define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6)
+ #define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5)
+ #define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4)
+@@ -54,394 +56,175 @@
+ ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
+ ADI_AXI_REG_CHAN_CTRL_ENABLE)
+
+-struct adi_axi_adc_core_info {
+- unsigned int version;
+-};
+-
+ struct adi_axi_adc_state {
+- struct mutex lock;
+-
+- struct adi_axi_adc_client *client;
+- void __iomem *regs;
+-};
+-
+-struct adi_axi_adc_client {
+- struct list_head entry;
+- struct adi_axi_adc_conv conv;
+- struct adi_axi_adc_state *state;
++ struct regmap *regmap;
+ struct device *dev;
+- const struct adi_axi_adc_core_info *info;
+ };
+
+-static LIST_HEAD(registered_clients);
+-static DEFINE_MUTEX(registered_clients_lock);
+-
+-static struct adi_axi_adc_client *conv_to_client(struct adi_axi_adc_conv *conv)
++static int axi_adc_enable(struct iio_backend *back)
+ {
+- return container_of(conv, struct adi_axi_adc_client, conv);
+-}
++ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
++ int ret;
+
+-void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
+-{
+- struct adi_axi_adc_client *cl = conv_to_client(conv);
++ ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
++ ADI_AXI_REG_RSTN_MMCM_RSTN);
++ if (ret)
++ return ret;
+
+- return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client),
+- IIO_DMA_MINALIGN);
++ fsleep(10000);
++ return regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
++ ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+ }
+-EXPORT_SYMBOL_NS_GPL(adi_axi_adc_conv_priv, IIO_ADI_AXI);
+
+-static void adi_axi_adc_write(struct adi_axi_adc_state *st,
+- unsigned int reg,
+- unsigned int val)
++static void axi_adc_disable(struct iio_backend *back)
+ {
+- iowrite32(val, st->regs + reg);
+-}
++ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+-static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st,
+- unsigned int reg)
+-{
+- return ioread32(st->regs + reg);
++ regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
+ }
+
+-static int adi_axi_adc_config_dma_buffer(struct device *dev,
+- struct iio_dev *indio_dev)
++static int axi_adc_data_format_set(struct iio_backend *back, unsigned int chan,
++ const struct iio_backend_data_fmt *data)
+ {
+- const char *dma_name;
+-
+- if (!device_property_present(dev, "dmas"))
+- return 0;
+-
+- if (device_property_read_string(dev, "dma-names", &dma_name))
+- dma_name = "rx";
+-
+- return devm_iio_dmaengine_buffer_setup(indio_dev->dev.parent,
+- indio_dev, dma_name);
++ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
++ u32 val;
++
++ if (!data->enable)
++ return regmap_clear_bits(st->regmap,
++ ADI_AXI_REG_CHAN_CTRL(chan),
++ ADI_AXI_REG_CHAN_CTRL_FMT_EN);
++
++ val = FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_EN, true);
++ if (data->sign_extend)
++ val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT, true);
++ if (data->type == IIO_BACKEND_OFFSET_BINARY)
++ val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_TYPE, true);
++
++ return regmap_update_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
++ ADI_AXI_REG_CHAN_CTRL_FMT_MASK, val);
+ }
+
+-static int adi_axi_adc_read_raw(struct iio_dev *indio_dev,
+- struct iio_chan_spec const *chan,
+- int *val, int *val2, long mask)
++static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan)
+ {
+- struct adi_axi_adc_state *st = iio_priv(indio_dev);
+- struct adi_axi_adc_conv *conv = &st->client->conv;
++ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+- if (!conv->read_raw)
+- return -EOPNOTSUPP;
+-
+- return conv->read_raw(conv, chan, val, val2, mask);
++ return regmap_set_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
++ ADI_AXI_REG_CHAN_CTRL_ENABLE);
+ }
+
+-static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
+- struct iio_chan_spec const *chan,
+- int val, int val2, long mask)
++static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan)
+ {
+- struct adi_axi_adc_state *st = iio_priv(indio_dev);
+- struct adi_axi_adc_conv *conv = &st->client->conv;
+-
+- if (!conv->write_raw)
+- return -EOPNOTSUPP;
++ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+- return conv->write_raw(conv, chan, val, val2, mask);
++ return regmap_clear_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
++ ADI_AXI_REG_CHAN_CTRL_ENABLE);
+ }
+
+-static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
+- const unsigned long *scan_mask)
++static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
++ struct iio_dev *indio_dev)
+ {
+- struct adi_axi_adc_state *st = iio_priv(indio_dev);
+- struct adi_axi_adc_conv *conv = &st->client->conv;
+- unsigned int i, ctrl;
+-
+- for (i = 0; i < conv->chip_info->num_channels; i++) {
+- ctrl = adi_axi_adc_read(st, ADI_AXI_REG_CHAN_CTRL(i));
++ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
++ struct iio_buffer *buffer;
++ const char *dma_name;
++ int ret;
+
+- if (test_bit(i, scan_mask))
+- ctrl |= ADI_AXI_REG_CHAN_CTRL_ENABLE;
+- else
+- ctrl &= ~ADI_AXI_REG_CHAN_CTRL_ENABLE;
++ if (device_property_read_string(st->dev, "dma-names", &dma_name))
++ dma_name = "rx";
+
+- adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), ctrl);
++ buffer = iio_dmaengine_buffer_alloc(st->dev, dma_name);
++ if (IS_ERR(buffer)) {
++ dev_err(st->dev, "Could not get DMA buffer, %ld\n",
++ PTR_ERR(buffer));
++ return ERR_CAST(buffer);
+ }
+
+- return 0;
+-}
+-
+-static struct adi_axi_adc_conv *adi_axi_adc_conv_register(struct device *dev,
+- size_t sizeof_priv)
+-{
+- struct adi_axi_adc_client *cl;
+- size_t alloc_size;
+-
+- alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_DMA_MINALIGN);
+- if (sizeof_priv)
+- alloc_size += ALIGN(sizeof_priv, IIO_DMA_MINALIGN);
+-
+- cl = kzalloc(alloc_size, GFP_KERNEL);
+- if (!cl)
+- return ERR_PTR(-ENOMEM);
+-
+- mutex_lock(&registered_clients_lock);
+-
+- cl->dev = get_device(dev);
+-
+- list_add_tail(&cl->entry, &registered_clients);
+-
+- mutex_unlock(&registered_clients_lock);
+-
+- return &cl->conv;
+-}
+-
+-static void adi_axi_adc_conv_unregister(struct adi_axi_adc_conv *conv)
+-{
+- struct adi_axi_adc_client *cl = conv_to_client(conv);
+-
+- mutex_lock(&registered_clients_lock);
+-
+- list_del(&cl->entry);
+- put_device(cl->dev);
+-
+- mutex_unlock(&registered_clients_lock);
+-
+- kfree(cl);
+-}
+-
+-static void devm_adi_axi_adc_conv_release(void *conv)
+-{
+- adi_axi_adc_conv_unregister(conv);
+-}
+-
+-struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+- size_t sizeof_priv)
+-{
+- struct adi_axi_adc_conv *conv;
+- int ret;
+-
+- conv = adi_axi_adc_conv_register(dev, sizeof_priv);
+- if (IS_ERR(conv))
+- return conv;
+-
+- ret = devm_add_action_or_reset(dev, devm_adi_axi_adc_conv_release,
+- conv);
++ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
++ ret = iio_device_attach_buffer(indio_dev, buffer);
+ if (ret)
+ return ERR_PTR(ret);
+
+- return conv;
++ return buffer;
+ }
+-EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI);
+
+-static ssize_t in_voltage_scale_available_show(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
++static void axi_adc_free_buffer(struct iio_backend *back,
++ struct iio_buffer *buffer)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+- struct adi_axi_adc_state *st = iio_priv(indio_dev);
+- struct adi_axi_adc_conv *conv = &st->client->conv;
+- size_t len = 0;
+- int i;
+-
+- for (i = 0; i < conv->chip_info->num_scales; i++) {
+- const unsigned int *s = conv->chip_info->scale_table[i];
+-
+- len += scnprintf(buf + len, PAGE_SIZE - len,
+- "%u.%06u ", s[0], s[1]);
+- }
+- buf[len - 1] = '\n';
+-
+- return len;
++ iio_dmaengine_buffer_free(buffer);
+ }
+
+-static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
+-
+-enum {
+- ADI_AXI_ATTR_SCALE_AVAIL,
++static const struct regmap_config axi_adc_regmap_config = {
++ .val_bits = 32,
++ .reg_bits = 32,
++ .reg_stride = 4,
++ .max_register = 0x0800,
+ };
+
+-#define ADI_AXI_ATTR(_en_, _file_) \
+- [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr
+-
+-static struct attribute *adi_axi_adc_attributes[] = {
+- ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available),
+- NULL
++static const struct iio_backend_ops adi_axi_adc_generic = {
++ .enable = axi_adc_enable,
++ .disable = axi_adc_disable,
++ .data_format_set = axi_adc_data_format_set,
++ .chan_enable = axi_adc_chan_enable,
++ .chan_disable = axi_adc_chan_disable,
++ .request_buffer = axi_adc_request_buffer,
++ .free_buffer = axi_adc_free_buffer,
+ };
+
+-static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
+- struct attribute *attr, int n)
+-{
+- struct device *dev = kobj_to_dev(kobj);
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+- struct adi_axi_adc_state *st = iio_priv(indio_dev);
+- struct adi_axi_adc_conv *conv = &st->client->conv;
+-
+- switch (n) {
+- case ADI_AXI_ATTR_SCALE_AVAIL:
+- if (!conv->chip_info->num_scales)
+- return 0;
+- return attr->mode;
+- default:
+- return attr->mode;
+- }
+-}
+-
+-static const struct attribute_group adi_axi_adc_attribute_group = {
+- .attrs = adi_axi_adc_attributes,
+- .is_visible = axi_adc_attr_is_visible,
+-};
+-
+-static const struct iio_info adi_axi_adc_info = {
+- .read_raw = &adi_axi_adc_read_raw,
+- .write_raw = &adi_axi_adc_write_raw,
+- .attrs = &adi_axi_adc_attribute_group,
+- .update_scan_mode = &adi_axi_adc_update_scan_mode,
+-};
+-
+-static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
+- .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+-};
+-
+-static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
+-{
+- const struct adi_axi_adc_core_info *info;
+- struct adi_axi_adc_client *cl;
+- struct device_node *cln;
+-
+- info = of_device_get_match_data(dev);
+- if (!info)
+- return ERR_PTR(-ENODEV);
+-
+- cln = of_parse_phandle(dev->of_node, "adi,adc-dev", 0);
+- if (!cln) {
+- dev_err(dev, "No 'adi,adc-dev' node defined\n");
+- return ERR_PTR(-ENODEV);
+- }
+-
+- mutex_lock(&registered_clients_lock);
+-
+- list_for_each_entry(cl, &registered_clients, entry) {
+- if (!cl->dev)
+- continue;
+-
+- if (cl->dev->of_node != cln)
+- continue;
+-
+- if (!try_module_get(cl->dev->driver->owner)) {
+- mutex_unlock(&registered_clients_lock);
+- of_node_put(cln);
+- return ERR_PTR(-ENODEV);
+- }
+-
+- get_device(cl->dev);
+- cl->info = info;
+- mutex_unlock(&registered_clients_lock);
+- of_node_put(cln);
+- return cl;
+- }
+-
+- mutex_unlock(&registered_clients_lock);
+- of_node_put(cln);
+-
+- return ERR_PTR(-EPROBE_DEFER);
+-}
+-
+-static int adi_axi_adc_setup_channels(struct device *dev,
+- struct adi_axi_adc_state *st)
+-{
+- struct adi_axi_adc_conv *conv = &st->client->conv;
+- int i, ret;
+-
+- if (conv->preenable_setup) {
+- ret = conv->preenable_setup(conv);
+- if (ret)
+- return ret;
+- }
+-
+- for (i = 0; i < conv->chip_info->num_channels; i++) {
+- adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i),
+- ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
+- }
+-
+- return 0;
+-}
+-
+-static void axi_adc_reset(struct adi_axi_adc_state *st)
+-{
+- adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 0);
+- mdelay(10);
+- adi_axi_adc_write(st, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN);
+- mdelay(10);
+- adi_axi_adc_write(st, ADI_AXI_REG_RSTN,
+- ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+-}
+-
+-static void adi_axi_adc_cleanup(void *data)
+-{
+- struct adi_axi_adc_client *cl = data;
+-
+- put_device(cl->dev);
+- module_put(cl->dev->driver->owner);
+-}
+-
+ static int adi_axi_adc_probe(struct platform_device *pdev)
+ {
+- struct adi_axi_adc_conv *conv;
+- struct iio_dev *indio_dev;
+- struct adi_axi_adc_client *cl;
++ const unsigned int *expected_ver;
+ struct adi_axi_adc_state *st;
++ void __iomem *base;
+ unsigned int ver;
++ struct clk *clk;
+ int ret;
+
+- cl = adi_axi_adc_attach_client(&pdev->dev);
+- if (IS_ERR(cl))
+- return PTR_ERR(cl);
+-
+- ret = devm_add_action_or_reset(&pdev->dev, adi_axi_adc_cleanup, cl);
+- if (ret)
+- return ret;
+-
+- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+- if (indio_dev == NULL)
++ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
++ if (!st)
+ return -ENOMEM;
+
+- st = iio_priv(indio_dev);
+- st->client = cl;
+- cl->state = st;
+- mutex_init(&st->lock);
++ base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ st->dev = &pdev->dev;
++ st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
++ &axi_adc_regmap_config);
++ if (IS_ERR(st->regmap))
++ return PTR_ERR(st->regmap);
+
+- st->regs = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(st->regs))
+- return PTR_ERR(st->regs);
++ expected_ver = device_get_match_data(&pdev->dev);
++ if (!expected_ver)
++ return -ENODEV;
+
+- conv = &st->client->conv;
++ clk = devm_clk_get_enabled(&pdev->dev, NULL);
++ if (IS_ERR(clk))
++ return PTR_ERR(clk);
+
+- axi_adc_reset(st);
++ /*
++ * Force disable the core. Up to the frontend to enable us. And we can
++ * still read/write registers...
++ */
++ ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
++ if (ret)
++ return ret;
+
+- ver = adi_axi_adc_read(st, ADI_AXI_REG_VERSION);
++ ret = regmap_read(st->regmap, ADI_AXI_REG_VERSION, &ver);
++ if (ret)
++ return ret;
+
+- if (cl->info->version > ver) {
++ if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) {
+ dev_err(&pdev->dev,
+- "IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
+- ADI_AXI_PCORE_VER_MAJOR(cl->info->version),
+- ADI_AXI_PCORE_VER_MINOR(cl->info->version),
+- ADI_AXI_PCORE_VER_PATCH(cl->info->version),
++ "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
++ ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
++ ADI_AXI_PCORE_VER_MINOR(*expected_ver),
++ ADI_AXI_PCORE_VER_PATCH(*expected_ver),
+ ADI_AXI_PCORE_VER_MAJOR(ver),
+ ADI_AXI_PCORE_VER_MINOR(ver),
+ ADI_AXI_PCORE_VER_PATCH(ver));
+ return -ENODEV;
+ }
+
+- indio_dev->info = &adi_axi_adc_info;
+- indio_dev->name = "adi-axi-adc";
+- indio_dev->modes = INDIO_DIRECT_MODE;
+- indio_dev->num_channels = conv->chip_info->num_channels;
+- indio_dev->channels = conv->chip_info->channels;
+-
+- ret = adi_axi_adc_config_dma_buffer(&pdev->dev, indio_dev);
+- if (ret)
+- return ret;
+-
+- ret = adi_axi_adc_setup_channels(&pdev->dev, st);
+- if (ret)
+- return ret;
+-
+- ret = devm_iio_device_register(&pdev->dev, indio_dev);
++ ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st);
+ if (ret)
+ return ret;
+
+@@ -453,6 +236,8 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
+ return 0;
+ }
+
++static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a');
++
+ /* Match table for of_platform binding */
+ static const struct of_device_id adi_axi_adc_of_match[] = {
+ { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
+@@ -472,3 +257,5 @@ module_platform_driver(adi_axi_adc_driver);
+ MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+ MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
+ MODULE_LICENSE("GPL v2");
++MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER);
++MODULE_IMPORT_NS(IIO_BACKEND);
+diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
+index dce9ec91e4a778..512d7b95b08e6f 100644
+--- a/drivers/iio/adc/imx93_adc.c
++++ b/drivers/iio/adc/imx93_adc.c
+@@ -93,6 +93,10 @@ static const struct iio_chan_spec imx93_adc_iio_channels[] = {
+ IMX93_ADC_CHAN(1),
+ IMX93_ADC_CHAN(2),
+ IMX93_ADC_CHAN(3),
++ IMX93_ADC_CHAN(4),
++ IMX93_ADC_CHAN(5),
++ IMX93_ADC_CHAN(6),
++ IMX93_ADC_CHAN(7),
+ };
+
+ static void imx93_adc_power_down(struct imx93_adc *adc)
+diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
+index 320e3e7e3d4d4a..57cfabe80c8267 100644
+--- a/drivers/iio/adc/meson_saradc.c
++++ b/drivers/iio/adc/meson_saradc.c
+@@ -1239,6 +1239,20 @@ static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
+ .cmv_select = 1,
+ };
+
++static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
++ .has_bl30_integration = true,
++ .clock_rate = 1200000,
++ .bandgap_reg = MESON_SAR_ADC_REG11,
++ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
++ .resolution = 12,
++ .disable_ring_counter = 1,
++ .has_reg11 = true,
++ .vref_volatge = 1,
++ .has_vref_select = true,
++ .vref_select = VREF_VDDA,
++ .cmv_select = 1,
++};
++
+ static const struct meson_sar_adc_param meson_sar_adc_g12a_param = {
+ .has_bl30_integration = false,
+ .clock_rate = 1200000,
+@@ -1283,7 +1297,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
+ };
+
+ static const struct meson_sar_adc_data meson_sar_adc_axg_data = {
+- .param = &meson_sar_adc_gxl_param,
++ .param = &meson_sar_adc_axg_param,
+ .name = "meson-axg-saradc",
+ };
+
+diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
+index dd94667a623bd9..1c0042fbbb5481 100644
+--- a/drivers/iio/adc/rockchip_saradc.c
++++ b/drivers/iio/adc/rockchip_saradc.c
+@@ -52,7 +52,7 @@
+ #define SARADC2_START BIT(4)
+ #define SARADC2_SINGLE_MODE BIT(5)
+
+-#define SARADC2_CONV_CHANNELS GENMASK(15, 0)
++#define SARADC2_CONV_CHANNELS GENMASK(3, 0)
+
+ struct rockchip_saradc;
+
+@@ -102,12 +102,12 @@ static void rockchip_saradc_start_v2(struct rockchip_saradc *info, int chn)
+ writel_relaxed(0xc, info->regs + SARADC_T_DAS_SOC);
+ writel_relaxed(0x20, info->regs + SARADC_T_PD_SOC);
+ val = FIELD_PREP(SARADC2_EN_END_INT, 1);
+- val |= val << 16;
++ val |= SARADC2_EN_END_INT << 16;
+ writel_relaxed(val, info->regs + SARADC2_END_INT_EN);
+ val = FIELD_PREP(SARADC2_START, 1) |
+ FIELD_PREP(SARADC2_SINGLE_MODE, 1) |
+ FIELD_PREP(SARADC2_CONV_CHANNELS, chn);
+- val |= val << 16;
++ val |= (SARADC2_START | SARADC2_SINGLE_MODE | SARADC2_CONV_CHANNELS) << 16;
+ writel(val, info->regs + SARADC2_CONV_CON);
+ }
+
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 2f082006550fd8..bbd5bdd732f01b 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -708,6 +708,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ struct stm32_adc_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id;
++
+ struct resource *res;
+ u32 max_rate;
+ int ret;
+@@ -720,8 +722,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, &priv->common);
+
+- priv->cfg = (const struct stm32_adc_priv_cfg *)
+- of_match_device(dev->driver->of_match_table, dev)->data;
++ of_id = of_match_device(dev->driver->of_match_table, dev);
++ if (!of_id)
++ return -ENODEV;
++
++ priv->cfg = (const struct stm32_adc_priv_cfg *)of_id->data;
+ priv->nb_adc_max = priv->cfg->num_adcs;
+ spin_lock_init(&priv->common.lock);
+
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index f7613efb870d58..0b3e487440a665 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -2234,6 +2234,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
+ if (vin[0] != val || vin[1] >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
+ vin[0], vin[1]);
++ ret = -EINVAL;
+ goto err;
+ }
+ } else if (ret != -EINVAL) {
+diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
+index 8db7a01cb5fbf3..5f879598699545 100644
+--- a/drivers/iio/adc/ti_am335x_adc.c
++++ b/drivers/iio/adc/ti_am335x_adc.c
+@@ -670,8 +670,10 @@ static int tiadc_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, indio_dev);
+
+ err = tiadc_request_dma(pdev, adc_dev);
+- if (err && err == -EPROBE_DEFER)
++ if (err && err != -ENODEV) {
++ dev_err_probe(&pdev->dev, err, "DMA request failed\n");
+ goto err_dma;
++ }
+
+ return 0;
+
+diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
+index f0b71a1220e02e..f52abf759260f2 100644
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -414,8 +414,12 @@ static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
+
+ /* Run calibration of PS & PL as part of the sequence */
+ scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
+- for (i = 0; i < indio_dev->num_channels; i++)
+- scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
++ for (i = 0; i < indio_dev->num_channels; i++) {
++ const struct iio_chan_spec *chan = &indio_dev->channels[i];
++
++ if (chan->scan_index < AMS_CTRL_SEQ_BASE)
++ scan_mask |= BIT_ULL(chan->scan_index);
++ }
+
+ if (ams->ps_base) {
+ /* put sysmon in a soft reset to change the sequence */
+diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig
+index b54fe01734b0d7..55eb16b32f6c9a 100644
+--- a/drivers/iio/amplifiers/Kconfig
++++ b/drivers/iio/amplifiers/Kconfig
+@@ -27,6 +27,7 @@ config AD8366
+ config ADA4250
+ tristate "Analog Devices ADA4250 Instrumentation Amplifier"
+ depends on SPI
++ select REGMAP_SPI
+ help
+ Say yes here to build support for Analog Devices ADA4250
+ SPI Amplifier's support. The driver provides direct access via
+diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+index 5f85ba38e6f6e7..db5dbd60cf6759 100644
+--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
++++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+@@ -159,7 +159,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
+ * Once done using the buffer iio_dmaengine_buffer_free() should be used to
+ * release it.
+ */
+-static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
++struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+ const char *channel)
+ {
+ struct dmaengine_buffer *dmaengine_buffer;
+@@ -180,7 +180,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+
+ ret = dma_get_slave_caps(chan, &caps);
+ if (ret < 0)
+- goto err_free;
++ goto err_release;
+
+ /* Needs to be aligned to the maximum of the minimums */
+ if (caps.src_addr_widths)
+@@ -206,10 +206,13 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+
+ return &dmaengine_buffer->queue.buffer;
+
++err_release:
++ dma_release_channel(chan);
+ err_free:
+ kfree(dmaengine_buffer);
+ return ERR_PTR(ret);
+ }
++EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
+
+ /**
+ * iio_dmaengine_buffer_free() - Free dmaengine buffer
+@@ -217,7 +220,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+ *
+ * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
+ */
+-static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
++void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+ {
+ struct dmaengine_buffer *dmaengine_buffer =
+ iio_buffer_to_dmaengine_buffer(buffer);
+@@ -227,6 +230,7 @@ static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+
+ iio_buffer_put(buffer);
+ }
++EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
+
+ static void __devm_iio_dmaengine_buffer_free(void *buffer)
+ {
+@@ -288,7 +292,7 @@ int devm_iio_dmaengine_buffer_setup(struct device *dev,
+
+ return iio_device_attach_buffer(indio_dev, buffer);
+ }
+-EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_setup);
++EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
+
+ MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+ MODULE_DESCRIPTION("DMA buffer for the IIO framework");
+diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
+index c7671b1f5eadac..c06515987e7a7c 100644
+--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
++++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
+@@ -46,6 +46,16 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer;
+ int ret;
+
++ /*
++ * iio_triggered_buffer_cleanup() assumes that the buffer allocated here
++ * is assigned to indio_dev->buffer but this is only the case if this
++ * function is the first caller to iio_device_attach_buffer(). If
++ * indio_dev->buffer is already set then we can't proceed otherwise the
++ * cleanup function will try to free a buffer that was not allocated here.
++ */
++ if (indio_dev->buffer)
++ return -EADDRINUSE;
++
+ buffer = iio_kfifo_allocate();
+ if (!buffer) {
+ ret = -ENOMEM;
+diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
+index 4edc5d21cb9fa6..f959252a4fe665 100644
+--- a/drivers/iio/chemical/bme680.h
++++ b/drivers/iio/chemical/bme680.h
+@@ -54,7 +54,9 @@
+ #define BME680_NB_CONV_MASK GENMASK(3, 0)
+
+ #define BME680_REG_MEAS_STAT_0 0x1D
++#define BME680_NEW_DATA_BIT BIT(7)
+ #define BME680_GAS_MEAS_BIT BIT(6)
++#define BME680_MEAS_BIT BIT(5)
+
+ /* Calibration Parameters */
+ #define BME680_T2_LSB_REG 0x8A
+diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
+index ef5e0e46fd3447..a6bf689833dad7 100644
+--- a/drivers/iio/chemical/bme680_core.c
++++ b/drivers/iio/chemical/bme680_core.c
+@@ -10,6 +10,8 @@
+ */
+ #include <linux/acpi.h>
+ #include <linux/bitfield.h>
++#include <linux/cleanup.h>
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/module.h>
+ #include <linux/log2.h>
+@@ -38,7 +40,7 @@ struct bme680_calib {
+ s8 par_h3;
+ s8 par_h4;
+ s8 par_h5;
+- s8 par_h6;
++ u8 par_h6;
+ s8 par_h7;
+ s8 par_gh1;
+ s16 par_gh2;
+@@ -51,6 +53,7 @@ struct bme680_calib {
+ struct bme680_data {
+ struct regmap *regmap;
+ struct bme680_calib bme680;
++ struct mutex lock; /* Protect multiple serial R/W ops to device. */
+ u8 oversampling_temp;
+ u8 oversampling_press;
+ u8 oversampling_humid;
+@@ -342,10 +345,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
+ if (!calib->par_t2)
+ bme680_read_calib(data, calib);
+
+- var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
++ var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
+ var2 = (var1 * calib->par_t2) >> 11;
+ var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
+- var3 = (var3 * (calib->par_t3 << 4)) >> 14;
++ var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
+ data->t_fine = var2 + var3;
+ calc_temp = (data->t_fine * 5 + 128) >> 8;
+
+@@ -368,9 +371,9 @@ static u32 bme680_compensate_press(struct bme680_data *data,
+ var1 = (data->t_fine >> 1) - 64000;
+ var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
+ var2 = var2 + (var1 * calib->par_p5 << 1);
+- var2 = (var2 >> 2) + (calib->par_p4 << 16);
++ var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
+ var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
+- (calib->par_p3 << 5)) >> 3) +
++ ((s32)calib->par_p3 << 5)) >> 3) +
+ ((calib->par_p2 * var1) >> 1);
+ var1 = var1 >> 18;
+ var1 = ((32768 + var1) * calib->par_p1) >> 15;
+@@ -388,7 +391,7 @@ static u32 bme680_compensate_press(struct bme680_data *data,
+ var3 = ((press_comp >> 8) * (press_comp >> 8) *
+ (press_comp >> 8) * calib->par_p10) >> 17;
+
+- press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
++ press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4;
+
+ return press_comp;
+ }
+@@ -414,7 +417,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data,
+ (((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
+ >> 6) / 100) + (1 << 14))) >> 10;
+ var3 = var1 * var2;
+- var4 = calib->par_h6 << 7;
++ var4 = (s32)calib->par_h6 << 7;
+ var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
+ var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
+ var6 = (var4 * var5) >> 1;
+@@ -532,6 +535,43 @@ static u8 bme680_oversampling_to_reg(u8 val)
+ return ilog2(val) + 1;
+ }
+
++/*
++ * Taken from Bosch BME680 API:
++ * https://github.com/boschsensortec/BME68x_SensorAPI/blob/v4.4.8/bme68x.c#L490
++ */
++static int bme680_wait_for_eoc(struct bme680_data *data)
++{
++ struct device *dev = regmap_get_device(data->regmap);
++ unsigned int check;
++ int ret;
++ /*
++ * (Sum of oversampling ratios * time per oversampling) +
++ * TPH measurement + gas measurement + wait transition from forced mode
++ * + heater duration
++ */
++ int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
++ data->oversampling_humid) * 1936) + (477 * 4) +
++ (477 * 5) + 1000 + (data->heater_dur * 1000);
++
++ usleep_range(wait_eoc_us, wait_eoc_us + 100);
++
++ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
++ if (ret) {
++ dev_err(dev, "failed to read measurement status register.\n");
++ return ret;
++ }
++ if (check & BME680_MEAS_BIT) {
++ dev_err(dev, "Device measurement cycle incomplete.\n");
++ return -EBUSY;
++ }
++ if (!(check & BME680_NEW_DATA_BIT)) {
++ dev_err(dev, "No new data available from the device.\n");
++ return -ENODATA;
++ }
++
++ return 0;
++}
++
+ static int bme680_chip_config(struct bme680_data *data)
+ {
+ struct device *dev = regmap_get_device(data->regmap);
+@@ -622,6 +662,10 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
+ if (ret < 0)
+ return ret;
+
++ ret = bme680_wait_for_eoc(data);
++ if (ret)
++ return ret;
++
+ ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
+ &tmp, 3);
+ if (ret < 0) {
+@@ -678,7 +722,7 @@ static int bme680_read_press(struct bme680_data *data,
+ }
+
+ *val = bme680_compensate_press(data, adc_press);
+- *val2 = 100;
++ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+@@ -738,6 +782,10 @@ static int bme680_read_gas(struct bme680_data *data,
+ if (ret < 0)
+ return ret;
+
++ ret = bme680_wait_for_eoc(data);
++ if (ret)
++ return ret;
++
+ ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ if (check & BME680_GAS_MEAS_BIT) {
+ dev_err(dev, "gas measurement incomplete\n");
+@@ -781,6 +829,8 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
+ {
+ struct bme680_data *data = iio_priv(indio_dev);
+
++ guard(mutex)(&data->lock);
++
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+@@ -825,6 +875,8 @@ static int bme680_write_raw(struct iio_dev *indio_dev,
+ {
+ struct bme680_data *data = iio_priv(indio_dev);
+
++ guard(mutex)(&data->lock);
++
+ if (val2 != 0)
+ return -EINVAL;
+
+@@ -921,6 +973,7 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
+ name = bme680_match_acpi_device(dev);
+
+ data = iio_priv(indio_dev);
++ mutex_init(&data->lock);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+ indio_dev->name = name;
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index ad8910e6ad59df..abb09fefc792c5 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -32,7 +32,7 @@ static ssize_t _hid_sensor_set_report_latency(struct device *dev,
+ latency = integer * 1000 + fract / 1000;
+ ret = hid_sensor_set_report_latency(attrb, latency);
+ if (ret < 0)
+- return len;
++ return ret;
+
+ attrb->latency_ms = hid_sensor_get_report_latency(attrb);
+
+diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+index 03823ee57f5980..7b19c94ef87d9c 100644
+--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
++++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+@@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
+ int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ uint32_t period, bool fifo)
+ {
++ uint32_t mult;
++
+ /* when FIFO is on, prevent odr change if one is already pending */
+ if (fifo && ts->new_mult != 0)
+ return -EAGAIN;
+
+- ts->new_mult = period / ts->chip.clock_period;
++ mult = period / ts->chip.clock_period;
++ if (mult != ts->mult)
++ ts->new_mult = mult;
+
+ return 0;
+ }
+@@ -101,6 +105,9 @@ static bool inv_update_chip_period(struct inv_sensors_timestamp *ts,
+
+ static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
+ {
++ const int64_t period_min = ts->min_period * ts->mult;
++ const int64_t period_max = ts->max_period * ts->mult;
++ int64_t add_max, sub_max;
+ int64_t delta, jitter;
+ int64_t adjust;
+
+@@ -108,11 +115,13 @@ static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
+ delta = ts->it.lo - ts->timestamp;
+
+ /* adjust timestamp while respecting jitter */
++ add_max = period_max - (int64_t)ts->period;
++ sub_max = period_min - (int64_t)ts->period;
+ jitter = INV_SENSORS_TIMESTAMP_JITTER((int64_t)ts->period, ts->chip.jitter);
+ if (delta > jitter)
+- adjust = jitter;
++ adjust = add_max;
+ else if (delta < -jitter)
+- adjust = -jitter;
++ adjust = sub_max;
+ else
+ adjust = 0;
+
+diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+index 6633b35a94e69a..9c9bc77003c7ff 100644
+--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
++++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+@@ -15,8 +15,8 @@
+ /* Conversion times in us */
+ static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000,
+ 13000, 7000 };
+-static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000,
+- 5000, 8000 };
++static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000,
++ 3000, 8000 };
+ static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100,
+ 4100, 8220, 16440 };
+
+diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
+index 93b8be183de6b4..b8ff547bc4dada 100644
+--- a/drivers/iio/dac/Kconfig
++++ b/drivers/iio/dac/Kconfig
+@@ -9,6 +9,8 @@ menu "Digital to analog converters"
+ config AD3552R
+ tristate "Analog Devices AD3552R DAC driver"
+ depends on SPI_MASTER
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD3552R
+ Digital to Analog Converter.
+@@ -214,6 +216,8 @@ config AD5764
+ config AD5766
+ tristate "Analog Devices AD5766/AD5767 DAC driver"
+ depends on SPI_MASTER
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD5766, AD5767
+ Digital to Analog Converter.
+@@ -224,6 +228,7 @@ config AD5766
+ config AD5770R
+ tristate "Analog Devices AD5770R IDAC driver"
+ depends on SPI_MASTER
++ select REGMAP_SPI
+ help
+ Say yes here to build support for Analog Devices AD5770R Digital to
+ Analog Converter.
+@@ -315,6 +320,7 @@ config LPC18XX_DAC
+ config LTC1660
+ tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver"
+ depends on SPI
++ select REGMAP_SPI
+ help
+ Say yes here to build support for Linear Technology
+ LTC1660 and LTC1665 Digital to Analog Converters.
+@@ -424,6 +430,7 @@ config STM32_DAC
+
+ config STM32_DAC_CORE
+ tristate
++ select REGMAP_MMIO
+
+ config TI_DAC082S085
+ tristate "Texas Instruments 8/10/12-bit 2/4-channel DAC driver"
+diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
+index 076bc9ecfb4994..4763402dbcd66d 100644
+--- a/drivers/iio/dac/ad5592r-base.c
++++ b/drivers/iio/dac/ad5592r-base.c
+@@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
+ s64 tmp = *val * (3767897513LL / 25LL);
+ *val = div_s64_rem(tmp, 1000000000LL, val2);
+
+- return IIO_VAL_INT_PLUS_MICRO;
++ return IIO_VAL_INT_PLUS_NANO;
+ }
+
+ mutex_lock(&st->lock);
+diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
+index 9e85dfa585081c..71de6cc4a1584f 100644
+--- a/drivers/iio/frequency/Kconfig
++++ b/drivers/iio/frequency/Kconfig
+@@ -53,6 +53,7 @@ config ADF4371
+ config ADF4377
+ tristate "Analog Devices ADF4377 Microwave Wideband Synthesizer"
+ depends on SPI && COMMON_CLK
++ select REGMAP_SPI
+ help
+ Say yes here to build support for Analog Devices ADF4377 Microwave
+ Wideband Synthesizer.
+diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
+index 85e289700c3c56..4abf80f75ef5d9 100644
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -33,7 +33,6 @@ enum {
+
+ struct adf4350_state {
+ struct spi_device *spi;
+- struct regulator *reg;
+ struct gpio_desc *lock_detect_gpiod;
+ struct adf4350_platform_data *pdata;
+ struct clk *clk;
+@@ -469,6 +468,15 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
+ return pdata;
+ }
+
++static void adf4350_power_down(void *data)
++{
++ struct iio_dev *indio_dev = data;
++ struct adf4350_state *st = iio_priv(indio_dev);
++
++ st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
++ adf4350_sync_config(st);
++}
++
+ static int adf4350_probe(struct spi_device *spi)
+ {
+ struct adf4350_platform_data *pdata;
+@@ -491,31 +499,21 @@ static int adf4350_probe(struct spi_device *spi)
+ }
+
+ if (!pdata->clkin) {
+- clk = devm_clk_get(&spi->dev, "clkin");
++ clk = devm_clk_get_enabled(&spi->dev, "clkin");
+ if (IS_ERR(clk))
+- return -EPROBE_DEFER;
+-
+- ret = clk_prepare_enable(clk);
+- if (ret < 0)
+- return ret;
++ return PTR_ERR(clk);
+ }
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+- if (indio_dev == NULL) {
+- ret = -ENOMEM;
+- goto error_disable_clk;
+- }
++ if (indio_dev == NULL)
++ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+- st->reg = devm_regulator_get(&spi->dev, "vcc");
+- if (!IS_ERR(st->reg)) {
+- ret = regulator_enable(st->reg);
+- if (ret)
+- goto error_disable_clk;
+- }
++ ret = devm_regulator_get_enable(&spi->dev, "vcc");
++ if (ret)
++ return ret;
+
+- spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+ st->pdata = pdata;
+
+@@ -544,47 +542,21 @@ static int adf4350_probe(struct spi_device *spi)
+
+ st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
+ GPIOD_IN);
+- if (IS_ERR(st->lock_detect_gpiod)) {
+- ret = PTR_ERR(st->lock_detect_gpiod);
+- goto error_disable_reg;
+- }
++ if (IS_ERR(st->lock_detect_gpiod))
++ return PTR_ERR(st->lock_detect_gpiod);
+
+ if (pdata->power_up_frequency) {
+ ret = adf4350_set_freq(st, pdata->power_up_frequency);
+ if (ret)
+- goto error_disable_reg;
++ return ret;
+ }
+
+- ret = iio_device_register(indio_dev);
++ ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
+ if (ret)
+- goto error_disable_reg;
+-
+- return 0;
+-
+-error_disable_reg:
+- if (!IS_ERR(st->reg))
+- regulator_disable(st->reg);
+-error_disable_clk:
+- clk_disable_unprepare(clk);
+-
+- return ret;
+-}
+-
+-static void adf4350_remove(struct spi_device *spi)
+-{
+- struct iio_dev *indio_dev = spi_get_drvdata(spi);
+- struct adf4350_state *st = iio_priv(indio_dev);
+- struct regulator *reg = st->reg;
+-
+- st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
+- adf4350_sync_config(st);
+-
+- iio_device_unregister(indio_dev);
+-
+- clk_disable_unprepare(st->clk);
++ return dev_err_probe(&spi->dev, ret,
++ "Failed to add action to managed power down\n");
+
+- if (!IS_ERR(reg))
+- regulator_disable(reg);
++ return devm_iio_device_register(&spi->dev, indio_dev);
+ }
+
+ static const struct of_device_id adf4350_of_match[] = {
+@@ -607,7 +579,6 @@ static struct spi_driver adf4350_driver = {
+ .of_match_table = adf4350_of_match,
+ },
+ .probe = adf4350_probe,
+- .remove = adf4350_remove,
+ .id_table = adf4350_id,
+ };
+ module_spi_driver(adf4350_driver);
+diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c
+index b4defb82f37e30..3f46032c927527 100644
+--- a/drivers/iio/frequency/adrf6780.c
++++ b/drivers/iio/frequency/adrf6780.c
+@@ -9,7 +9,6 @@
+ #include <linux/bits.h>
+ #include <linux/clk.h>
+ #include <linux/clkdev.h>
+-#include <linux/clk-provider.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/iio/iio.h>
+diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
+index 17275a53ca2cd2..b24e1e27f2da68 100644
+--- a/drivers/iio/imu/adis16475.c
++++ b/drivers/iio/imu/adis16475.c
+@@ -63,8 +63,8 @@
+ #define ADIS16475_MAX_SCAN_DATA 20
+ /* spi max speed in brust mode */
+ #define ADIS16475_BURST_MAX_SPEED 1000000
+-#define ADIS16475_LSB_DEC_MASK BIT(0)
+-#define ADIS16475_LSB_FIR_MASK BIT(1)
++#define ADIS16475_LSB_DEC_MASK 0
++#define ADIS16475_LSB_FIR_MASK 1
+
+ enum {
+ ADIS16475_SYNC_DIRECT = 1,
+@@ -1127,6 +1127,7 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
+ struct device *dev = &st->adis.spi->dev;
+ const struct adis16475_sync *sync;
+ u32 sync_mode;
++ u16 val;
+
+ /* default to internal clk */
+ st->clk_freq = st->info->int_clk * 1000;
+@@ -1188,8 +1189,9 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
+ * I'm keeping this for simplicity and avoiding extra variables
+ * in chip_info.
+ */
++ val = ADIS16475_SYNC_MODE(sync->sync_mode);
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+- ADIS16475_SYNC_MODE_MASK, sync->sync_mode);
++ ADIS16475_SYNC_MODE_MASK, val);
+ if (ret)
+ return ret;
+
+@@ -1244,50 +1246,6 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
+ return 0;
+ }
+
+-static const struct of_device_id adis16475_of_match[] = {
+- { .compatible = "adi,adis16470",
+- .data = &adis16475_chip_info[ADIS16470] },
+- { .compatible = "adi,adis16475-1",
+- .data = &adis16475_chip_info[ADIS16475_1] },
+- { .compatible = "adi,adis16475-2",
+- .data = &adis16475_chip_info[ADIS16475_2] },
+- { .compatible = "adi,adis16475-3",
+- .data = &adis16475_chip_info[ADIS16475_3] },
+- { .compatible = "adi,adis16477-1",
+- .data = &adis16475_chip_info[ADIS16477_1] },
+- { .compatible = "adi,adis16477-2",
+- .data = &adis16475_chip_info[ADIS16477_2] },
+- { .compatible = "adi,adis16477-3",
+- .data = &adis16475_chip_info[ADIS16477_3] },
+- { .compatible = "adi,adis16465-1",
+- .data = &adis16475_chip_info[ADIS16465_1] },
+- { .compatible = "adi,adis16465-2",
+- .data = &adis16475_chip_info[ADIS16465_2] },
+- { .compatible = "adi,adis16465-3",
+- .data = &adis16475_chip_info[ADIS16465_3] },
+- { .compatible = "adi,adis16467-1",
+- .data = &adis16475_chip_info[ADIS16467_1] },
+- { .compatible = "adi,adis16467-2",
+- .data = &adis16475_chip_info[ADIS16467_2] },
+- { .compatible = "adi,adis16467-3",
+- .data = &adis16475_chip_info[ADIS16467_3] },
+- { .compatible = "adi,adis16500",
+- .data = &adis16475_chip_info[ADIS16500] },
+- { .compatible = "adi,adis16505-1",
+- .data = &adis16475_chip_info[ADIS16505_1] },
+- { .compatible = "adi,adis16505-2",
+- .data = &adis16475_chip_info[ADIS16505_2] },
+- { .compatible = "adi,adis16505-3",
+- .data = &adis16475_chip_info[ADIS16505_3] },
+- { .compatible = "adi,adis16507-1",
+- .data = &adis16475_chip_info[ADIS16507_1] },
+- { .compatible = "adi,adis16507-2",
+- .data = &adis16475_chip_info[ADIS16507_2] },
+- { .compatible = "adi,adis16507-3",
+- .data = &adis16475_chip_info[ADIS16507_3] },
+- { },
+-};
+-MODULE_DEVICE_TABLE(of, adis16475_of_match);
+
+ static int adis16475_probe(struct spi_device *spi)
+ {
+@@ -1301,7 +1259,7 @@ static int adis16475_probe(struct spi_device *spi)
+
+ st = iio_priv(indio_dev);
+
+- st->info = device_get_match_data(&spi->dev);
++ st->info = spi_get_device_match_data(spi);
+ if (!st->info)
+ return -EINVAL;
+
+@@ -1341,12 +1299,83 @@ static int adis16475_probe(struct spi_device *spi)
+ return 0;
+ }
+
++static const struct of_device_id adis16475_of_match[] = {
++ { .compatible = "adi,adis16470",
++ .data = &adis16475_chip_info[ADIS16470] },
++ { .compatible = "adi,adis16475-1",
++ .data = &adis16475_chip_info[ADIS16475_1] },
++ { .compatible = "adi,adis16475-2",
++ .data = &adis16475_chip_info[ADIS16475_2] },
++ { .compatible = "adi,adis16475-3",
++ .data = &adis16475_chip_info[ADIS16475_3] },
++ { .compatible = "adi,adis16477-1",
++ .data = &adis16475_chip_info[ADIS16477_1] },
++ { .compatible = "adi,adis16477-2",
++ .data = &adis16475_chip_info[ADIS16477_2] },
++ { .compatible = "adi,adis16477-3",
++ .data = &adis16475_chip_info[ADIS16477_3] },
++ { .compatible = "adi,adis16465-1",
++ .data = &adis16475_chip_info[ADIS16465_1] },
++ { .compatible = "adi,adis16465-2",
++ .data = &adis16475_chip_info[ADIS16465_2] },
++ { .compatible = "adi,adis16465-3",
++ .data = &adis16475_chip_info[ADIS16465_3] },
++ { .compatible = "adi,adis16467-1",
++ .data = &adis16475_chip_info[ADIS16467_1] },
++ { .compatible = "adi,adis16467-2",
++ .data = &adis16475_chip_info[ADIS16467_2] },
++ { .compatible = "adi,adis16467-3",
++ .data = &adis16475_chip_info[ADIS16467_3] },
++ { .compatible = "adi,adis16500",
++ .data = &adis16475_chip_info[ADIS16500] },
++ { .compatible = "adi,adis16505-1",
++ .data = &adis16475_chip_info[ADIS16505_1] },
++ { .compatible = "adi,adis16505-2",
++ .data = &adis16475_chip_info[ADIS16505_2] },
++ { .compatible = "adi,adis16505-3",
++ .data = &adis16475_chip_info[ADIS16505_3] },
++ { .compatible = "adi,adis16507-1",
++ .data = &adis16475_chip_info[ADIS16507_1] },
++ { .compatible = "adi,adis16507-2",
++ .data = &adis16475_chip_info[ADIS16507_2] },
++ { .compatible = "adi,adis16507-3",
++ .data = &adis16475_chip_info[ADIS16507_3] },
++ { },
++};
++MODULE_DEVICE_TABLE(of, adis16475_of_match);
++
++static const struct spi_device_id adis16475_ids[] = {
++ { "adis16470", (kernel_ulong_t)&adis16475_chip_info[ADIS16470] },
++ { "adis16475-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_1] },
++ { "adis16475-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_2] },
++ { "adis16475-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_3] },
++ { "adis16477-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_1] },
++ { "adis16477-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_2] },
++ { "adis16477-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_3] },
++ { "adis16465-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_1] },
++ { "adis16465-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_2] },
++ { "adis16465-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_3] },
++ { "adis16467-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_1] },
++ { "adis16467-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_2] },
++ { "adis16467-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_3] },
++ { "adis16500", (kernel_ulong_t)&adis16475_chip_info[ADIS16500] },
++ { "adis16505-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_1] },
++ { "adis16505-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_2] },
++ { "adis16505-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_3] },
++ { "adis16507-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_1] },
++ { "adis16507-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_2] },
++ { "adis16507-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_3] },
++ { }
++};
++MODULE_DEVICE_TABLE(spi, adis16475_ids);
++
+ static struct spi_driver adis16475_driver = {
+ .driver = {
+ .name = "adis16475",
+ .of_match_table = adis16475_of_match,
+ },
+ .probe = adis16475_probe,
++ .id_table = adis16475_ids,
+ };
+ module_spi_driver(adis16475_driver);
+
+diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
+index 83e53acfbe8801..c7f5866a177d90 100644
+--- a/drivers/iio/imu/bno055/Kconfig
++++ b/drivers/iio/imu/bno055/Kconfig
+@@ -8,6 +8,7 @@ config BOSCH_BNO055
+ config BOSCH_BNO055_SERIAL
+ tristate "Bosch BNO055 attached via UART"
+ depends on SERIAL_DEV_BUS
++ select REGMAP
+ select BOSCH_BNO055
+ help
+ Enable this to support Bosch BNO055 IMUs attached via UART.
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+index b1e4fde27d2560..72e95413810277 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+@@ -129,10 +129,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
+ /* update data FIFO write */
+ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
+ ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+- if (ret)
+- goto out_unlock;
+-
+- ret = inv_icm42600_buffer_update_watermark(st);
+
+ out_unlock:
+ mutex_unlock(&st->lock);
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+index 3bf946e56e1dfd..f1629f77d6063f 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+@@ -129,10 +129,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
+ /* update data FIFO write */
+ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
+ ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+- if (ret)
+- goto out_unlock;
+-
+- ret = inv_icm42600_buffer_update_watermark(st);
+
+ out_unlock:
+ mutex_unlock(&st->lock);
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index 29f906c884bd8b..a9a5fb266ef138 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -749,13 +749,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
+ chan->channel2, val);
+ mutex_unlock(&st->lock);
+- return IIO_VAL_INT;
++ return ret;
+ case IIO_ACCEL:
+ mutex_lock(&st->lock);
+ ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
+ chan->channel2, val);
+ mutex_unlock(&st->lock);
+- return IIO_VAL_INT;
++ return ret;
+
+ default:
+ return -EINVAL;
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+index 66d4ba088e70ff..d4f9b5d8d28d6d 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+@@ -109,6 +109,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+ /* compute and process only all complete datum */
+ nb = fifo_count / bytes_per_datum;
+ fifo_count = nb * bytes_per_datum;
++ if (nb == 0)
++ goto end_session;
+ /* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
+ fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
+ inv_sensors_timestamp_interrupt(&st->timestamp, fifo_period, nb, nb, pf->timestamp);
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+index 676704f9151fcb..e6e6e94452a328 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+@@ -111,6 +111,7 @@ int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable)
+ if (enable) {
+ /* reset timestamping */
+ inv_sensors_timestamp_reset(&st->timestamp);
++ inv_sensors_timestamp_apply_odr(&st->timestamp, 0, 0, 0);
+ /* reset FIFO */
+ d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
+ ret = regmap_write(st->map, st->reg->user_ctrl, d);
+@@ -184,6 +185,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
+ if (result)
+ goto error_power_off;
+ } else {
++ st->chip_config.gyro_fifo_enable = 0;
++ st->chip_config.accl_fifo_enable = 0;
++ st->chip_config.temp_fifo_enable = 0;
++ st->chip_config.magn_fifo_enable = 0;
+ result = inv_mpu6050_prepare_fifo(st, false);
+ if (result)
+ goto error_power_off;
+diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
+new file mode 100644
+index 00000000000000..2fea2bbbe47fd0
+--- /dev/null
++++ b/drivers/iio/industrialio-backend.c
+@@ -0,0 +1,418 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Framework to handle complex IIO aggregate devices.
++ *
++ * The typical architecture is to have one device as the frontend device which
++ * can be "linked" against one or multiple backend devices. All the IIO and
++ * userspace interface is expected to be registers/managed by the frontend
++ * device which will callback into the backends when needed (to get/set some
++ * configuration that it does not directly control).
++ *
++ * -------------------------------------------------------
++ * ------------------ | ------------ ------------ ------- FPGA|
++ * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | |
++ * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | |
++ * | |------------------------| ------------ ------------ ------- |
++ * ------------------ -------------------------------------------------------
++ *
++ * The framework interface is pretty simple:
++ * - Backends should register themselves with devm_iio_backend_register()
++ * - Frontend devices should get backends with devm_iio_backend_get()
++ *
++ * Also to note that the primary target for this framework are converters like
++ * ADC/DACs so iio_backend_ops will have some operations typical of converter
++ * devices. On top of that, this is "generic" for all IIO which means any kind
++ * of device can make use of the framework. That said, If the iio_backend_ops
++ * struct begins to grow out of control, we can always refactor things so that
++ * the industrialio-backend.c is only left with the really generic stuff. Then,
++ * we can build on top of it depending on the needs.
++ *
++ * Copyright (C) 2023-2024 Analog Devices Inc.
++ */
++#define dev_fmt(fmt) "iio-backend: " fmt
++
++#include <linux/cleanup.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/property.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++
++#include <linux/iio/backend.h>
++
++struct iio_backend {
++ struct list_head entry;
++ const struct iio_backend_ops *ops;
++ struct device *dev;
++ struct module *owner;
++ void *priv;
++};
++
++/*
++ * Helper struct for requesting buffers. This ensures that we have all data
++ * that we need to free the buffer in a device managed action.
++ */
++struct iio_backend_buffer_pair {
++ struct iio_backend *back;
++ struct iio_buffer *buffer;
++};
++
++static LIST_HEAD(iio_back_list);
++static DEFINE_MUTEX(iio_back_lock);
++
++/*
++ * Helper macros to call backend ops. Makes sure the option is supported.
++ */
++#define iio_backend_check_op(back, op) ({ \
++ struct iio_backend *____back = back; \
++ int ____ret = 0; \
++ \
++ if (!____back->ops->op) \
++ ____ret = -EOPNOTSUPP; \
++ \
++ ____ret; \
++})
++
++#define iio_backend_op_call(back, op, args...) ({ \
++ struct iio_backend *__back = back; \
++ int __ret; \
++ \
++ __ret = iio_backend_check_op(__back, op); \
++ if (!__ret) \
++ __ret = __back->ops->op(__back, ##args); \
++ \
++ __ret; \
++})
++
++#define iio_backend_ptr_op_call(back, op, args...) ({ \
++ struct iio_backend *__back = back; \
++ void *ptr_err; \
++ int __ret; \
++ \
++ __ret = iio_backend_check_op(__back, op); \
++ if (__ret) \
++ ptr_err = ERR_PTR(__ret); \
++ else \
++ ptr_err = __back->ops->op(__back, ##args); \
++ \
++ ptr_err; \
++})
++
++#define iio_backend_void_op_call(back, op, args...) { \
++ struct iio_backend *__back = back; \
++ int __ret; \
++ \
++ __ret = iio_backend_check_op(__back, op); \
++ if (!__ret) \
++ __back->ops->op(__back, ##args); \
++}
++
++/**
++ * iio_backend_chan_enable - Enable a backend channel
++ * @back: Backend device
++ * @chan: Channel number
++ *
++ * RETURNS:
++ * 0 on success, negative error number on failure.
++ */
++int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan)
++{
++ return iio_backend_op_call(back, chan_enable, chan);
++}
++EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, IIO_BACKEND);
++
++/**
++ * iio_backend_chan_disable - Disable a backend channel
++ * @back: Backend device
++ * @chan: Channel number
++ *
++ * RETURNS:
++ * 0 on success, negative error number on failure.
++ */
++int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan)
++{
++ return iio_backend_op_call(back, chan_disable, chan);
++}
++EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, IIO_BACKEND);
++
++static void __iio_backend_disable(void *back)
++{
++ iio_backend_void_op_call(back, disable);
++}
++
++/**
++ * devm_iio_backend_enable - Device managed backend enable
++ * @dev: Consumer device for the backend
++ * @back: Backend device
++ *
++ * RETURNS:
++ * 0 on success, negative error number on failure.
++ */
++int devm_iio_backend_enable(struct device *dev, struct iio_backend *back)
++{
++ int ret;
++
++ ret = iio_backend_op_call(back, enable);
++ if (ret)
++ return ret;
++
++ return devm_add_action_or_reset(dev, __iio_backend_disable, back);
++}
++EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, IIO_BACKEND);
++
++/**
++ * iio_backend_data_format_set - Configure the channel data format
++ * @back: Backend device
++ * @chan: Channel number
++ * @data: Data format
++ *
++ * Properly configure a channel with respect to the expected data format. A
++ * @struct iio_backend_data_fmt must be passed with the settings.
++ *
++ * RETURNS:
++ * 0 on success, negative error number on failure.
++ */
++int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
++ const struct iio_backend_data_fmt *data)
++{
++ if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX)
++ return -EINVAL;
++
++ return iio_backend_op_call(back, data_format_set, chan, data);
++}
++EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND);
++
++static void iio_backend_free_buffer(void *arg)
++{
++ struct iio_backend_buffer_pair *pair = arg;
++
++ iio_backend_void_op_call(pair->back, free_buffer, pair->buffer);
++}
++
++/**
++ * devm_iio_backend_request_buffer - Device managed buffer request
++ * @dev: Consumer device for the backend
++ * @back: Backend device
++ * @indio_dev: IIO device
++ *
++ * Request an IIO buffer from the backend. The type of the buffer (typically
++ * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because,
++ * normally, the backend dictates what kind of buffering we can get.
++ *
++ * The backend .free_buffer() hooks is automatically called on @dev detach.
++ *
++ * RETURNS:
++ * 0 on success, negative error number on failure.
++ */
++int devm_iio_backend_request_buffer(struct device *dev,
++ struct iio_backend *back,
++ struct iio_dev *indio_dev)
++{
++ struct iio_backend_buffer_pair *pair;
++ struct iio_buffer *buffer;
++
++ pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL);
++ if (!pair)
++ return -ENOMEM;
++
++ buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev);
++ if (IS_ERR(buffer))
++ return PTR_ERR(buffer);
++
++ /* weak reference should be all what we need */
++ pair->back = back;
++ pair->buffer = buffer;
++
++ return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair);
++}
++EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND);
++
++static void iio_backend_release(void *arg)
++{
++ struct iio_backend *back = arg;
++
++ module_put(back->owner);
++}
++
++static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back)
++{
++ struct device_link *link;
++ int ret;
++
++ /*
++ * Make sure the provider cannot be unloaded before the consumer module.
++ * Note that device_links would still guarantee that nothing is
++ * accessible (and breaks) but this makes it explicit that the consumer
++ * module must be also unloaded.
++ */
++ if (!try_module_get(back->owner))
++ return dev_err_probe(dev, -ENODEV,
++ "Cannot get module reference\n");
++
++ ret = devm_add_action_or_reset(dev, iio_backend_release, back);
++ if (ret)
++ return ret;
++
++ link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
++ if (!link)
++ return dev_err_probe(dev, -EINVAL,
++ "Could not link to supplier(%s)\n",
++ dev_name(back->dev));
++
++ dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev));
++
++ return 0;
++}
++
++/**
++ * devm_iio_backend_get - Device managed backend device get
++ * @dev: Consumer device for the backend
++ * @name: Backend name
++ *
++ * Get's the backend associated with @dev.
++ *
++ * RETURNS:
++ * A backend pointer, negative error pointer otherwise.
++ */
++struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
++{
++ struct fwnode_handle *fwnode;
++ struct iio_backend *back;
++ unsigned int index;
++ int ret;
++
++ if (name) {
++ ret = device_property_match_string(dev, "io-backend-names",
++ name);
++ if (ret < 0)
++ return ERR_PTR(ret);
++ index = ret;
++ } else {
++ index = 0;
++ }
++
++ fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index);
++ if (IS_ERR(fwnode)) {
++ dev_err_probe(dev, PTR_ERR(fwnode),
++ "Cannot get Firmware reference\n");
++ return ERR_CAST(fwnode);
++ }
++
++ guard(mutex)(&iio_back_lock);
++ list_for_each_entry(back, &iio_back_list, entry) {
++ if (!device_match_fwnode(back->dev, fwnode))
++ continue;
++
++ fwnode_handle_put(fwnode);
++ ret = __devm_iio_backend_get(dev, back);
++ if (ret)
++ return ERR_PTR(ret);
++
++ return back;
++ }
++
++ fwnode_handle_put(fwnode);
++ return ERR_PTR(-EPROBE_DEFER);
++}
++EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND);
++
++/**
++ * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get
++ * @dev: Consumer device for the backend
++ * @fwnode: Firmware node of the backend device
++ *
++ * Search the backend list for a device matching @fwnode.
++ * This API should not be used and it's only present for preventing the first
++ * user of this framework to break it's DT ABI.
++ *
++ * RETURNS:
++ * A backend pointer, negative error pointer otherwise.
++ */
++struct iio_backend *
++__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
++ struct fwnode_handle *fwnode)
++{
++ struct iio_backend *back;
++ int ret;
++
++ guard(mutex)(&iio_back_lock);
++ list_for_each_entry(back, &iio_back_list, entry) {
++ if (!device_match_fwnode(back->dev, fwnode))
++ continue;
++
++ ret = __devm_iio_backend_get(dev, back);
++ if (ret)
++ return ERR_PTR(ret);
++
++ return back;
++ }
++
++ return ERR_PTR(-EPROBE_DEFER);
++}
++EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, IIO_BACKEND);
++
++/**
++ * iio_backend_get_priv - Get driver private data
++ * @back: Backend device
++ */
++void *iio_backend_get_priv(const struct iio_backend *back)
++{
++ return back->priv;
++}
++EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, IIO_BACKEND);
++
++static void iio_backend_unregister(void *arg)
++{
++ struct iio_backend *back = arg;
++
++ guard(mutex)(&iio_back_lock);
++ list_del(&back->entry);
++}
++
++/**
++ * devm_iio_backend_register - Device managed backend device register
++ * @dev: Backend device being registered
++ * @ops: Backend ops
++ * @priv: Device private data
++ *
++ * @ops is mandatory. Not providing it results in -EINVAL.
++ *
++ * RETURNS:
++ * 0 on success, negative error number on failure.
++ */
++int devm_iio_backend_register(struct device *dev,
++ const struct iio_backend_ops *ops, void *priv)
++{
++ struct iio_backend *back;
++
++ if (!ops)
++ return dev_err_probe(dev, -EINVAL, "No backend ops given\n");
++
++ /*
++ * Through device_links, we guarantee that a frontend device cannot be
++ * bound/exist if the backend driver is not around. Hence, we can bind
++ * the backend object lifetime with the device being passed since
++ * removing it will tear the frontend/consumer down.
++ */
++ back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL);
++ if (!back)
++ return -ENOMEM;
++
++ back->ops = ops;
++ back->owner = dev->driver->owner;
++ back->dev = dev;
++ back->priv = priv;
++ scoped_guard(mutex, &iio_back_lock)
++ list_add(&back->entry, &iio_back_list);
++
++ return devm_add_action_or_reset(dev, iio_backend_unregister, back);
++}
++EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, IIO_BACKEND);
++
++MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
++MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index d752e9c0499b96..121bde49ccb7d4 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -752,9 +752,11 @@ static ssize_t iio_read_channel_info(struct device *dev,
+ INDIO_MAX_RAW_ELEMENTS,
+ vals, &val_len,
+ this_attr->address);
+- else
++ else if (indio_dev->info->read_raw)
+ ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
+ &vals[0], &vals[1], this_attr->address);
++ else
++ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+@@ -836,6 +838,9 @@ static ssize_t iio_read_channel_info_avail(struct device *dev,
+ int length;
+ int type;
+
++ if (!indio_dev->info->read_avail)
++ return -EINVAL;
++
+ ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
+ &vals, &type, &length,
+ this_attr->address);
+@@ -1577,10 +1582,13 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
+ ret = iio_device_register_sysfs_group(indio_dev,
+ &iio_dev_opaque->chan_attr_group);
+ if (ret)
+- goto error_clear_attrs;
++ goto error_free_chan_attrs;
+
+ return 0;
+
++error_free_chan_attrs:
++ kfree(iio_dev_opaque->chan_attr_group.attrs);
++ iio_dev_opaque->chan_attr_group.attrs = NULL;
+ error_clear_attrs:
+ iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
+
+@@ -1646,8 +1654,10 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
+ return NULL;
+
+ indio_dev = &iio_dev_opaque->indio_dev;
+- indio_dev->priv = (char *)iio_dev_opaque +
+- ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
++
++ if (sizeof_priv)
++ indio_dev->priv = (char *)iio_dev_opaque +
++ ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN);
+
+ indio_dev->dev.parent = parent;
+ indio_dev->dev.type = &iio_device_type;
+diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
+index 19f7a91157ee4d..f67e4afa5f94b3 100644
+--- a/drivers/iio/industrialio-event.c
++++ b/drivers/iio/industrialio-event.c
+@@ -285,6 +285,9 @@ static ssize_t iio_ev_state_store(struct device *dev,
+ if (ret < 0)
+ return ret;
+
++ if (!indio_dev->info->write_event_config)
++ return -EINVAL;
++
+ ret = indio_dev->info->write_event_config(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), val);
+@@ -300,6 +303,9 @@ static ssize_t iio_ev_state_show(struct device *dev,
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int val;
+
++ if (!indio_dev->info->read_event_config)
++ return -EINVAL;
++
+ val = indio_dev->info->read_event_config(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr));
+@@ -318,6 +324,9 @@ static ssize_t iio_ev_value_show(struct device *dev,
+ int val, val2, val_arr[2];
+ int ret;
+
++ if (!indio_dev->info->read_event_value)
++ return -EINVAL;
++
+ ret = indio_dev->info->read_event_value(indio_dev,
+ this_attr->c, iio_ev_attr_type(this_attr),
+ iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
+diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
+index 7653261d2dc2bf..59d7615c0f565c 100644
+--- a/drivers/iio/industrialio-gts-helper.c
++++ b/drivers/iio/industrialio-gts-helper.c
+@@ -34,24 +34,11 @@
+ static int iio_gts_get_gain(const u64 max, const u64 scale)
+ {
+ u64 full = max;
+- int tmp = 1;
+
+ if (scale > full || !scale)
+ return -EINVAL;
+
+- if (U64_MAX - full < scale) {
+- /* Risk of overflow */
+- if (full - scale < scale)
+- return 1;
+-
+- full -= scale;
+- tmp++;
+- }
+-
+- while (full > scale * (u64)tmp)
+- tmp++;
+-
+- return tmp;
++ return div64_u64(full, scale);
+ }
+
+ /**
+@@ -375,17 +362,20 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts)
+ for (i = gts->num_itime - 1; i >= 0; i--) {
+ int new = gts->itime_table[i].time_us;
+
+- if (times[idx] < new) {
++ if (idx == 0 || times[idx - 1] < new) {
+ times[idx++] = new;
+ continue;
+ }
+
+- for (j = 0; j <= idx; j++) {
++ for (j = 0; j < idx; j++) {
++ if (times[j] == new)
++ break;
+ if (times[j] > new) {
+ memmove(&times[j + 1], &times[j],
+ (idx - j) * sizeof(int));
+ times[j] = new;
+ idx++;
++ break;
+ }
+ }
+ }
+diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
+index 18f83158f637f1..b5fed8a000ea98 100644
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -322,7 +322,7 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ * this is the case if the IIO device and the trigger device share the
+ * same parent device.
+ */
+- if (iio_validate_own_trigger(pf->indio_dev, trig))
++ if (!iio_validate_own_trigger(pf->indio_dev, trig))
+ trig->attached_own_device = true;
+
+ return ret;
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 7a1f6713318a36..80e1c45485c9b0 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -562,6 +562,7 @@ EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
+ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
+ enum iio_chan_info_enum info)
+ {
++ const struct iio_info *iio_info = chan->indio_dev->info;
+ int unused;
+ int vals[INDIO_MAX_RAW_ELEMENTS];
+ int ret;
+@@ -573,15 +574,18 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
+ if (!iio_channel_has_info(chan->channel, info))
+ return -EINVAL;
+
+- if (chan->indio_dev->info->read_raw_multi) {
+- ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
+- chan->channel, INDIO_MAX_RAW_ELEMENTS,
+- vals, &val_len, info);
++ if (iio_info->read_raw_multi) {
++ ret = iio_info->read_raw_multi(chan->indio_dev,
++ chan->channel,
++ INDIO_MAX_RAW_ELEMENTS,
++ vals, &val_len, info);
+ *val = vals[0];
+ *val2 = vals[1];
++ } else if (iio_info->read_raw) {
++ ret = iio_info->read_raw(chan->indio_dev,
++ chan->channel, val, val2, info);
+ } else {
+- ret = chan->indio_dev->info->read_raw(chan->indio_dev,
+- chan->channel, val, val2, info);
++ return -EINVAL;
+ }
+
+ return ret;
+@@ -676,17 +680,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
+ break;
+ case IIO_VAL_INT_PLUS_MICRO:
+ if (scale_val2 < 0)
+- *processed = -raw64 * scale_val;
++ *processed = -raw64 * scale_val * scale;
+ else
+- *processed = raw64 * scale_val;
++ *processed = raw64 * scale_val * scale;
+ *processed += div_s64(raw64 * (s64)scale_val2 * scale,
+ 1000000LL);
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ if (scale_val2 < 0)
+- *processed = -raw64 * scale_val;
++ *processed = -raw64 * scale_val * scale;
+ else
+- *processed = raw64 * scale_val;
++ *processed = raw64 * scale_val * scale;
+ *processed += div_s64(raw64 * (s64)scale_val2 * scale,
+ 1000000000LL);
+ break;
+@@ -801,11 +805,15 @@ static int iio_channel_read_avail(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum info)
+ {
++ const struct iio_info *iio_info = chan->indio_dev->info;
++
+ if (!iio_channel_has_available(chan->channel, info))
+ return -EINVAL;
+
+- return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
+- vals, type, length, info);
++ if (iio_info->read_avail)
++ return iio_info->read_avail(chan->indio_dev, chan->channel,
++ vals, type, length, info);
++ return -EINVAL;
+ }
+
+ int iio_read_avail_channel_attribute(struct iio_channel *chan,
+@@ -995,8 +1003,12 @@ EXPORT_SYMBOL_GPL(iio_get_channel_type);
+ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
+ enum iio_chan_info_enum info)
+ {
+- return chan->indio_dev->info->write_raw(chan->indio_dev,
+- chan->channel, val, val2, info);
++ const struct iio_info *iio_info = chan->indio_dev->info;
++
++ if (iio_info->write_raw)
++ return iio_info->write_raw(chan->indio_dev,
++ chan->channel, val, val2, info);
++ return -EINVAL;
+ }
+
+ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
+index 45edba797e4c7e..ddbd1838650ec8 100644
+--- a/drivers/iio/light/Kconfig
++++ b/drivers/iio/light/Kconfig
+@@ -294,6 +294,8 @@ config ROHM_BU27008
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_GTS_HELPER
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+ help
+ Enable support for the ROHM BU27008 color sensor.
+ The ROHM BU27008 is a sensor with 5 photodiodes (red, green,
+diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
+index eb1aedad7edcc9..3c8b9aab5da7cf 100644
+--- a/drivers/iio/light/hid-sensor-als.c
++++ b/drivers/iio/light/hid-sensor-als.c
+@@ -226,6 +226,7 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
+ case HID_USAGE_SENSOR_TIME_TIMESTAMP:
+ als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
+ *(s64 *)raw_data);
++ ret = 0;
+ break;
+ default:
+ break;
+diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
+index cb41e5ee8ec10b..dc529cbe3805e2 100644
+--- a/drivers/iio/light/opt3001.c
++++ b/drivers/iio/light/opt3001.c
+@@ -138,6 +138,10 @@ static const struct opt3001_scale opt3001_scales[] = {
+ .val = 20966,
+ .val2 = 400000,
+ },
++ {
++ .val = 41932,
++ .val2 = 800000,
++ },
+ {
+ .val = 83865,
+ .val2 = 600000,
+diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
+index 043f233d9bdb06..433f58e1dd66c5 100644
+--- a/drivers/iio/light/veml6030.c
++++ b/drivers/iio/light/veml6030.c
+@@ -99,9 +99,8 @@ static const char * const period_values[] = {
+ static ssize_t in_illuminance_period_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
++ struct veml6030_data *data = iio_priv(dev_to_iio_dev(dev));
+ int ret, reg, x;
+- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+- struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+@@ -780,7 +779,7 @@ static int veml6030_hw_init(struct iio_dev *indio_dev)
+
+ /* Cache currently active measurement parameters */
+ data->cur_gain = 3;
+- data->cur_resolution = 4608;
++ data->cur_resolution = 5376;
+ data->cur_integration_time = 3;
+
+ return ret;
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index eb706d0bf70bc0..3a98d6bae1b203 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -204,7 +204,6 @@ static long ak09912_raw_to_gauss(u16 data)
+
+ /* Compatible Asahi Kasei Compass parts */
+ enum asahi_compass_chipset {
+- AKXXXX = 0,
+ AK8975,
+ AK8963,
+ AK09911,
+@@ -248,7 +247,7 @@ struct ak_def {
+ };
+
+ static const struct ak_def ak_def_array[] = {
+- {
++ [AK8975] = {
+ .type = AK8975,
+ .raw_to_gauss = ak8975_raw_to_gauss,
+ .range = 4096,
+@@ -273,7 +272,7 @@ static const struct ak_def ak_def_array[] = {
+ AK8975_REG_HYL,
+ AK8975_REG_HZL},
+ },
+- {
++ [AK8963] = {
+ .type = AK8963,
+ .raw_to_gauss = ak8963_09911_raw_to_gauss,
+ .range = 8190,
+@@ -298,7 +297,7 @@ static const struct ak_def ak_def_array[] = {
+ AK8975_REG_HYL,
+ AK8975_REG_HZL},
+ },
+- {
++ [AK09911] = {
+ .type = AK09911,
+ .raw_to_gauss = ak8963_09911_raw_to_gauss,
+ .range = 8192,
+@@ -323,7 +322,7 @@ static const struct ak_def ak_def_array[] = {
+ AK09912_REG_HYL,
+ AK09912_REG_HZL},
+ },
+- {
++ [AK09912] = {
+ .type = AK09912,
+ .raw_to_gauss = ak09912_raw_to_gauss,
+ .range = 32752,
+@@ -348,7 +347,7 @@ static const struct ak_def ak_def_array[] = {
+ AK09912_REG_HYL,
+ AK09912_REG_HZL},
+ },
+- {
++ [AK09916] = {
+ .type = AK09916,
+ .raw_to_gauss = ak09912_raw_to_gauss,
+ .range = 32752,
+@@ -693,22 +692,8 @@ static int ak8975_start_read_axis(struct ak8975_data *data,
+ if (ret < 0)
+ return ret;
+
+- /* This will be executed only for non-interrupt based waiting case */
+- if (ret & data->def->ctrl_masks[ST1_DRDY]) {
+- ret = i2c_smbus_read_byte_data(client,
+- data->def->ctrl_regs[ST2]);
+- if (ret < 0) {
+- dev_err(&client->dev, "Error in reading ST2\n");
+- return ret;
+- }
+- if (ret & (data->def->ctrl_masks[ST2_DERR] |
+- data->def->ctrl_masks[ST2_HOFL])) {
+- dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
+- return -EINVAL;
+- }
+- }
+-
+- return 0;
++ /* Return with zero if the data is ready. */
++ return !data->def->ctrl_regs[ST1_DRDY];
+ }
+
+ /* Retrieve raw flux value for one of the x, y, or z axis. */
+@@ -735,6 +720,20 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+ if (ret < 0)
+ goto exit;
+
++ /* Read out ST2 for release lock on measurment data. */
++ ret = i2c_smbus_read_byte_data(client, data->def->ctrl_regs[ST2]);
++ if (ret < 0) {
++ dev_err(&client->dev, "Error in reading ST2\n");
++ goto exit;
++ }
++
++ if (ret & (data->def->ctrl_masks[ST2_DERR] |
++ data->def->ctrl_masks[ST2_HOFL])) {
++ dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
++ ret = -EINVAL;
++ goto exit;
++ }
++
+ mutex_unlock(&data->lock);
+
+ pm_runtime_mark_last_busy(&data->client->dev);
+@@ -813,13 +812,13 @@ static const struct iio_info ak8975_info = {
+ };
+
+ static const struct acpi_device_id ak_acpi_match[] = {
+- {"AK8975", AK8975},
+- {"AK8963", AK8963},
+- {"INVN6500", AK8963},
+- {"AK009911", AK09911},
+- {"AK09911", AK09911},
+- {"AKM9911", AK09911},
+- {"AK09912", AK09912},
++ {"AK8975", (kernel_ulong_t)&ak_def_array[AK8975] },
++ {"AK8963", (kernel_ulong_t)&ak_def_array[AK8963] },
++ {"INVN6500", (kernel_ulong_t)&ak_def_array[AK8963] },
++ {"AK009911", (kernel_ulong_t)&ak_def_array[AK09911] },
++ {"AK09911", (kernel_ulong_t)&ak_def_array[AK09911] },
++ {"AKM9911", (kernel_ulong_t)&ak_def_array[AK09911] },
++ {"AK09912", (kernel_ulong_t)&ak_def_array[AK09912] },
+ { }
+ };
+ MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
+@@ -883,10 +882,7 @@ static int ak8975_probe(struct i2c_client *client)
+ struct iio_dev *indio_dev;
+ struct gpio_desc *eoc_gpiod;
+ struct gpio_desc *reset_gpiod;
+- const void *match;
+- unsigned int i;
+ int err;
+- enum asahi_compass_chipset chipset;
+ const char *name = NULL;
+
+ /*
+@@ -928,27 +924,15 @@ static int ak8975_probe(struct i2c_client *client)
+ return err;
+
+ /* id will be NULL when enumerated via ACPI */
+- match = device_get_match_data(&client->dev);
+- if (match) {
+- chipset = (uintptr_t)match;
+- name = dev_name(&client->dev);
+- } else if (id) {
+- chipset = (enum asahi_compass_chipset)(id->driver_data);
+- name = id->name;
+- } else
+- return -ENOSYS;
+-
+- for (i = 0; i < ARRAY_SIZE(ak_def_array); i++)
+- if (ak_def_array[i].type == chipset)
+- break;
+-
+- if (i == ARRAY_SIZE(ak_def_array)) {
+- dev_err(&client->dev, "AKM device type unsupported: %d\n",
+- chipset);
++ data->def = i2c_get_match_data(client);
++ if (!data->def)
+ return -ENODEV;
+- }
+
+- data->def = &ak_def_array[i];
++ /* If enumerated via firmware node, fix the ABI */
++ if (dev_fwnode(&client->dev))
++ name = dev_name(&client->dev);
++ else
++ name = id->name;
+
+ /* Fetch the regulators */
+ data->vdd = devm_regulator_get(&client->dev, "vdd");
+@@ -1077,28 +1061,27 @@ static DEFINE_RUNTIME_DEV_PM_OPS(ak8975_dev_pm_ops, ak8975_runtime_suspend,
+ ak8975_runtime_resume, NULL);
+
+ static const struct i2c_device_id ak8975_id[] = {
+- {"ak8975", AK8975},
+- {"ak8963", AK8963},
+- {"AK8963", AK8963},
+- {"ak09911", AK09911},
+- {"ak09912", AK09912},
+- {"ak09916", AK09916},
++ {"ak8975", (kernel_ulong_t)&ak_def_array[AK8975] },
++ {"ak8963", (kernel_ulong_t)&ak_def_array[AK8963] },
++ {"AK8963", (kernel_ulong_t)&ak_def_array[AK8963] },
++ {"ak09911", (kernel_ulong_t)&ak_def_array[AK09911] },
++ {"ak09912", (kernel_ulong_t)&ak_def_array[AK09912] },
++ {"ak09916", (kernel_ulong_t)&ak_def_array[AK09916] },
+ {}
+ };
+
+ MODULE_DEVICE_TABLE(i2c, ak8975_id);
+
+ static const struct of_device_id ak8975_of_match[] = {
+- { .compatible = "asahi-kasei,ak8975", },
+- { .compatible = "ak8975", },
+- { .compatible = "asahi-kasei,ak8963", },
+- { .compatible = "ak8963", },
+- { .compatible = "asahi-kasei,ak09911", },
+- { .compatible = "ak09911", },
+- { .compatible = "asahi-kasei,ak09912", },
+- { .compatible = "ak09912", },
+- { .compatible = "asahi-kasei,ak09916", },
+- { .compatible = "ak09916", },
++ { .compatible = "asahi-kasei,ak8975", .data = &ak_def_array[AK8975] },
++ { .compatible = "ak8975", .data = &ak_def_array[AK8975] },
++ { .compatible = "asahi-kasei,ak8963", .data = &ak_def_array[AK8963] },
++ { .compatible = "ak8963", .data = &ak_def_array[AK8963] },
++ { .compatible = "asahi-kasei,ak09911", .data = &ak_def_array[AK09911] },
++ { .compatible = "ak09911", .data = &ak_def_array[AK09911] },
++ { .compatible = "asahi-kasei,ak09912", .data = &ak_def_array[AK09912] },
++ { .compatible = "ak09912", .data = &ak_def_array[AK09912] },
++ { .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, ak8975_of_match);
+diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
+index 69938204456f8b..42b70cd42b3935 100644
+--- a/drivers/iio/magnetometer/rm3100-core.c
++++ b/drivers/iio/magnetometer/rm3100-core.c
+@@ -530,6 +530,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
+ struct rm3100_data *data;
+ unsigned int tmp;
+ int ret;
++ int samp_rate_index;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+@@ -586,9 +587,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
+ ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp);
+ if (ret < 0)
+ return ret;
++
++ samp_rate_index = tmp - RM3100_TMRC_OFFSET;
++ if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) {
++ dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n");
++ return -EINVAL;
++ }
+ /* Initializing max wait time, which is double conversion time. */
+- data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2]
+- * 2;
++ data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2;
+
+ /* Cycle count values may not be what we want. */
+ if ((tmp - RM3100_TMRC_OFFSET) == 0)
+diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
+index c5e5c4ad681e64..e8c4ca142d21d6 100644
+--- a/drivers/iio/magnetometer/tmag5273.c
++++ b/drivers/iio/magnetometer/tmag5273.c
+@@ -356,7 +356,7 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+- *val = -266314;
++ *val = -16005;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index a2ef1373a274e2..84f6b333c91958 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -51,7 +51,6 @@
+ */
+ enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
+
+-
+ enum bmp380_odr {
+ BMP380_ODR_200HZ,
+ BMP380_ODR_100HZ,
+@@ -180,18 +179,19 @@ static int bmp280_read_calib(struct bmp280_data *data)
+ struct bmp280_calib *calib = &data->calib.bmp280;
+ int ret;
+
+-
+ /* Read temperature and pressure calibration values. */
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+- data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
++ data->bmp280_cal_buf,
++ sizeof(data->bmp280_cal_buf));
+ if (ret < 0) {
+ dev_err(data->dev,
+- "failed to read temperature and pressure calibration parameters\n");
++ "failed to read calibration parameters\n");
+ return ret;
+ }
+
+- /* Toss the temperature and pressure calibration data into the entropy pool */
+- add_device_randomness(data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
++ /* Toss calibration data into the entropy pool */
++ add_device_randomness(data->bmp280_cal_buf,
++ sizeof(data->bmp280_cal_buf));
+
+ /* Parse temperature calibration values. */
+ calib->T1 = le16_to_cpu(data->bmp280_cal_buf[T1]);
+@@ -222,7 +222,7 @@ static int bme280_read_calib(struct bmp280_data *data)
+ /* Load shared calibration params with bmp280 first */
+ ret = bmp280_read_calib(data);
+ if (ret < 0) {
+- dev_err(dev, "failed to read common bmp280 calibration parameters\n");
++ dev_err(dev, "failed to read calibration parameters\n");
+ return ret;
+ }
+
+@@ -234,14 +234,14 @@ static int bme280_read_calib(struct bmp280_data *data)
+ * Humidity data is only available on BME280.
+ */
+
+- ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &tmp);
++ ret = regmap_read(data->regmap, BME280_REG_COMP_H1, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H1 comp value\n");
+ return ret;
+ }
+ calib->H1 = tmp;
+
+- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2,
++ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H2,
+ &data->le16, sizeof(data->le16));
+ if (ret < 0) {
+ dev_err(dev, "failed to read H2 comp value\n");
+@@ -249,14 +249,14 @@ static int bme280_read_calib(struct bmp280_data *data)
+ }
+ calib->H2 = sign_extend32(le16_to_cpu(data->le16), 15);
+
+- ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
++ ret = regmap_read(data->regmap, BME280_REG_COMP_H3, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H3 comp value\n");
+ return ret;
+ }
+ calib->H3 = tmp;
+
+- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4,
++ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H4,
+ &data->be16, sizeof(data->be16));
+ if (ret < 0) {
+ dev_err(dev, "failed to read H4 comp value\n");
+@@ -265,15 +265,15 @@ static int bme280_read_calib(struct bmp280_data *data)
+ calib->H4 = sign_extend32(((be16_to_cpu(data->be16) >> 4) & 0xff0) |
+ (be16_to_cpu(data->be16) & 0xf), 11);
+
+- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5,
++ ret = regmap_bulk_read(data->regmap, BME280_REG_COMP_H5,
+ &data->le16, sizeof(data->le16));
+ if (ret < 0) {
+ dev_err(dev, "failed to read H5 comp value\n");
+ return ret;
+ }
+- calib->H5 = sign_extend32(FIELD_GET(BMP280_COMP_H5_MASK, le16_to_cpu(data->le16)), 11);
++ calib->H5 = sign_extend32(FIELD_GET(BME280_COMP_H5_MASK, le16_to_cpu(data->le16)), 11);
+
+- ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
++ ret = regmap_read(data->regmap, BME280_REG_COMP_H6, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H6 comp value\n");
+ return ret;
+@@ -282,13 +282,14 @@ static int bme280_read_calib(struct bmp280_data *data)
+
+ return 0;
+ }
++
+ /*
+ * Returns humidity in percent, resolution is 0.01 percent. Output value of
+ * "47445" represents 47445/1024 = 46.333 %RH.
+ *
+ * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
+ */
+-static u32 bmp280_compensate_humidity(struct bmp280_data *data,
++static u32 bme280_compensate_humidity(struct bmp280_data *data,
+ s32 adc_humidity)
+ {
+ struct bmp280_calib *calib = &data->calib.bmp280;
+@@ -304,7 +305,7 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ var = clamp_val(var, 0, 419430400);
+
+ return var >> 12;
+-};
++}
+
+ /*
+ * Returns temperature in DegC, resolution is 0.01 DegC. Output value of
+@@ -428,7 +429,7 @@ static int bmp280_read_press(struct bmp280_data *data,
+ return IIO_VAL_FRACTIONAL;
+ }
+
+-static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
++static int bme280_read_humid(struct bmp280_data *data, int *val, int *val2)
+ {
+ u32 comp_humidity;
+ s32 adc_humidity;
+@@ -439,7 +440,7 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+ if (ret < 0)
+ return ret;
+
+- ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
++ ret = regmap_bulk_read(data->regmap, BME280_REG_HUMIDITY_MSB,
+ &data->be16, sizeof(data->be16));
+ if (ret < 0) {
+ dev_err(data->dev, "failed to read humidity\n");
+@@ -452,7 +453,7 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+ dev_err(data->dev, "reading humidity skipped\n");
+ return -EIO;
+ }
+- comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
++ comp_humidity = bme280_compensate_humidity(data, adc_humidity);
+
+ *val = comp_humidity * 1000 / 1024;
+
+@@ -536,8 +537,8 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
+ return ret;
+ }
+
+-static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
+- int val)
++static int bme280_write_oversampling_ratio_humid(struct bmp280_data *data,
++ int val)
+ {
+ const int *avail = data->chip_info->oversampling_humid_avail;
+ const int n = data->chip_info->num_oversampling_humid_avail;
+@@ -562,7 +563,7 @@ static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
+ }
+
+ static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
+- int val)
++ int val)
+ {
+ const int *avail = data->chip_info->oversampling_temp_avail;
+ const int n = data->chip_info->num_oversampling_temp_avail;
+@@ -587,7 +588,7 @@ static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
+ }
+
+ static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
+- int val)
++ int val)
+ {
+ const int *avail = data->chip_info->oversampling_press_avail;
+ const int n = data->chip_info->num_oversampling_press_avail;
+@@ -680,7 +681,7 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
+ mutex_lock(&data->lock);
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+- ret = bmp280_write_oversampling_ratio_humid(data, val);
++ ret = bme280_write_oversampling_ratio_humid(data, val);
+ break;
+ case IIO_PRESSURE:
+ ret = bmp280_write_oversampling_ratio_press(data, val);
+@@ -771,13 +772,12 @@ static int bmp280_chip_config(struct bmp280_data *data)
+ int ret;
+
+ ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+- BMP280_OSRS_TEMP_MASK |
+- BMP280_OSRS_PRESS_MASK |
+- BMP280_MODE_MASK,
+- osrs | BMP280_MODE_NORMAL);
++ BMP280_OSRS_TEMP_MASK |
++ BMP280_OSRS_PRESS_MASK |
++ BMP280_MODE_MASK,
++ osrs | BMP280_MODE_NORMAL);
+ if (ret < 0) {
+- dev_err(data->dev,
+- "failed to write ctrl_meas register\n");
++ dev_err(data->dev, "failed to write ctrl_meas register\n");
+ return ret;
+ }
+
+@@ -785,8 +785,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
+ BMP280_FILTER_MASK,
+ BMP280_FILTER_4X);
+ if (ret < 0) {
+- dev_err(data->dev,
+- "failed to write config register\n");
++ dev_err(data->dev, "failed to write config register\n");
+ return ret;
+ }
+
+@@ -794,10 +793,12 @@ static int bmp280_chip_config(struct bmp280_data *data)
+ }
+
+ static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
++static const u8 bmp280_chip_ids[] = { BMP280_CHIP_ID };
+
+ const struct bmp280_chip_info bmp280_chip_info = {
+ .id_reg = BMP280_REG_ID,
+- .chip_id = BMP280_CHIP_ID,
++ .chip_id = bmp280_chip_ids,
++ .num_chip_id = ARRAY_SIZE(bmp280_chip_ids),
+ .regmap_config = &bmp280_regmap_config,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+@@ -830,26 +831,28 @@ EXPORT_SYMBOL_NS(bmp280_chip_info, IIO_BMP280);
+
+ static int bme280_chip_config(struct bmp280_data *data)
+ {
+- u8 osrs = FIELD_PREP(BMP280_OSRS_HUMIDITY_MASK, data->oversampling_humid + 1);
++ u8 osrs = FIELD_PREP(BME280_OSRS_HUMIDITY_MASK, data->oversampling_humid + 1);
+ int ret;
+
+ /*
+ * Oversampling of humidity must be set before oversampling of
+ * temperature/pressure is set to become effective.
+ */
+- ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
+- BMP280_OSRS_HUMIDITY_MASK, osrs);
+-
++ ret = regmap_update_bits(data->regmap, BME280_REG_CTRL_HUMIDITY,
++ BME280_OSRS_HUMIDITY_MASK, osrs);
+ if (ret < 0)
+ return ret;
+
+ return bmp280_chip_config(data);
+ }
+
++static const u8 bme280_chip_ids[] = { BME280_CHIP_ID };
++
+ const struct bmp280_chip_info bme280_chip_info = {
+ .id_reg = BMP280_REG_ID,
+- .chip_id = BME280_CHIP_ID,
+- .regmap_config = &bmp280_regmap_config,
++ .chip_id = bme280_chip_ids,
++ .num_chip_id = ARRAY_SIZE(bme280_chip_ids),
++ .regmap_config = &bme280_regmap_config,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+ .num_channels = 3,
+@@ -864,12 +867,12 @@ const struct bmp280_chip_info bme280_chip_info = {
+
+ .oversampling_humid_avail = bmp280_oversampling_avail,
+ .num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+- .oversampling_humid_default = BMP280_OSRS_HUMIDITY_16X - 1,
++ .oversampling_humid_default = BME280_OSRS_HUMIDITY_16X - 1,
+
+ .chip_config = bme280_chip_config,
+ .read_temp = bmp280_read_temp,
+ .read_press = bmp280_read_press,
+- .read_humid = bmp280_read_humid,
++ .read_humid = bme280_read_humid,
+ .read_calib = bme280_read_calib,
+ };
+ EXPORT_SYMBOL_NS(bme280_chip_info, IIO_BMP280);
+@@ -920,8 +923,8 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
+ }
+
+ /*
+- * Returns temperature in Celsius dregrees, resolution is 0.01º C. Output value of
+- * "5123" equals 51.2º C. t_fine carries fine temperature as global value.
++ * Returns temperature in Celsius degrees, resolution is 0.01º C. Output value
++ * of "5123" equals 51.2º C. t_fine carries fine temperature as global value.
+ *
+ * Taken from datasheet, Section Appendix 9, "Compensation formula" and repo
+ * https://github.com/BoschSensortec/BMP3-Sensor-API.
+@@ -1063,7 +1066,8 @@ static int bmp380_read_calib(struct bmp280_data *data)
+
+ /* Read temperature and pressure calibration data */
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_CALIB_TEMP_START,
+- data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
++ data->bmp380_cal_buf,
++ sizeof(data->bmp380_cal_buf));
+ if (ret) {
+ dev_err(data->dev,
+ "failed to read temperature calibration parameters\n");
+@@ -1071,7 +1075,8 @@ static int bmp380_read_calib(struct bmp280_data *data)
+ }
+
+ /* Toss the temperature calibration data into the entropy pool */
+- add_device_randomness(data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
++ add_device_randomness(data->bmp380_cal_buf,
++ sizeof(data->bmp380_cal_buf));
+
+ /* Parse calibration values */
+ calib->T1 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_T1]);
+@@ -1153,7 +1158,8 @@ static int bmp380_chip_config(struct bmp280_data *data)
+
+ /* Configure output data rate */
+ ret = regmap_update_bits_check(data->regmap, BMP380_REG_ODR,
+- BMP380_ODRS_MASK, data->sampling_freq, &aux);
++ BMP380_ODRS_MASK, data->sampling_freq,
++ &aux);
+ if (ret) {
+ dev_err(data->dev, "failed to write ODR selection register\n");
+ return ret;
+@@ -1172,12 +1178,13 @@ static int bmp380_chip_config(struct bmp280_data *data)
+
+ if (change) {
+ /*
+- * The configurations errors are detected on the fly during a measurement
+- * cycle. If the sampling frequency is too low, it's faster to reset
+- * the measurement loop than wait until the next measurement is due.
++ * The configurations errors are detected on the fly during a
++ * measurement cycle. If the sampling frequency is too low, it's
++ * faster to reset the measurement loop than wait until the next
++ * measurement is due.
+ *
+- * Resets sensor measurement loop toggling between sleep and normal
+- * operating modes.
++ * Resets sensor measurement loop toggling between sleep and
++ * normal operating modes.
+ */
+ ret = regmap_write_bits(data->regmap, BMP380_REG_POWER_CONTROL,
+ BMP380_MODE_MASK,
+@@ -1195,22 +1202,22 @@ static int bmp380_chip_config(struct bmp280_data *data)
+ return ret;
+ }
+ /*
+- * Waits for measurement before checking configuration error flag.
+- * Selected longest measure time indicated in section 3.9.1
+- * in the datasheet.
++ * Waits for measurement before checking configuration error
++ * flag. Selected longest measurement time, calculated from
++ * formula in datasheet section 3.9.2 with an offset of ~+15%
++ * as it seen as well in table 3.9.1.
+ */
+- msleep(80);
++ msleep(150);
+
+ /* Check config error flag */
+ ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
+ if (ret) {
+- dev_err(data->dev,
+- "failed to read error register\n");
++ dev_err(data->dev, "failed to read error register\n");
+ return ret;
+ }
+ if (tmp & BMP380_ERR_CONF_MASK) {
+ dev_warn(data->dev,
+- "sensor flagged configuration as incompatible\n");
++ "sensor flagged configuration as incompatible\n");
+ return -EINVAL;
+ }
+ }
+@@ -1220,10 +1227,12 @@ static int bmp380_chip_config(struct bmp280_data *data)
+
+ static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 };
+ static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128};
++static const u8 bmp380_chip_ids[] = { BMP380_CHIP_ID };
+
+ const struct bmp280_chip_info bmp380_chip_info = {
+ .id_reg = BMP380_REG_ID,
+- .chip_id = BMP380_CHIP_ID,
++ .chip_id = bmp380_chip_ids,
++ .num_chip_id = ARRAY_SIZE(bmp380_chip_ids),
+ .regmap_config = &bmp380_regmap_config,
+ .start_up_time = 2000,
+ .channels = bmp380_channels,
+@@ -1308,9 +1317,11 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
+ }
+
+ /* Start NVM operation sequence */
+- ret = regmap_write(data->regmap, BMP580_REG_CMD, BMP580_CMD_NVM_OP_SEQ_0);
++ ret = regmap_write(data->regmap, BMP580_REG_CMD,
++ BMP580_CMD_NVM_OP_SEQ_0);
+ if (ret) {
+- dev_err(data->dev, "failed to send nvm operation's first sequence\n");
++ dev_err(data->dev,
++ "failed to send nvm operation's first sequence\n");
+ return ret;
+ }
+ if (is_write) {
+@@ -1318,7 +1329,8 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
+ ret = regmap_write(data->regmap, BMP580_REG_CMD,
+ BMP580_CMD_NVM_WRITE_SEQ_1);
+ if (ret) {
+- dev_err(data->dev, "failed to send nvm write sequence\n");
++ dev_err(data->dev,
++ "failed to send nvm write sequence\n");
+ return ret;
+ }
+ /* Datasheet says on 4.8.1.2 it takes approximately 10ms */
+@@ -1329,7 +1341,8 @@ static int bmp580_nvm_operation(struct bmp280_data *data, bool is_write)
+ ret = regmap_write(data->regmap, BMP580_REG_CMD,
+ BMP580_CMD_NVM_READ_SEQ_1);
+ if (ret) {
+- dev_err(data->dev, "failed to send nvm read sequence\n");
++ dev_err(data->dev,
++ "failed to send nvm read sequence\n");
+ return ret;
+ }
+ /* Datasheet says on 4.8.1.1 it takes approximately 200us */
+@@ -1385,12 +1398,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
+
+ /*
+ * Temperature is returned in Celsius degrees in fractional
+- * form down 2^16. We reescale by x1000 to return milli Celsius
+- * to respect IIO ABI.
++ * form down 2^16. We rescale by x1000 to return millidegrees
++ * Celsius to respect IIO ABI.
+ */
+- *val = raw_temp * 1000;
+- *val2 = 16;
+- return IIO_VAL_FRACTIONAL_LOG2;
++ raw_temp = sign_extend32(raw_temp, 23);
++ *val = ((s64)raw_temp * 1000) / (1 << 16);
++ return IIO_VAL_INT;
+ }
+
+ static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
+@@ -1412,7 +1425,7 @@ static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
+ }
+ /*
+ * Pressure is returned in Pascals in fractional form down 2^16.
+- * We reescale /1000 to convert to kilopascal to respect IIO ABI.
++ * We rescale /1000 to convert to kilopascal to respect IIO ABI.
+ */
+ *val = raw_press;
+ *val2 = 64000; /* 2^6 * 1000 */
+@@ -1492,8 +1505,8 @@ static int bmp580_nvmem_read(void *priv, unsigned int offset, void *val,
+ if (ret)
+ goto exit;
+
+- ret = regmap_bulk_read(data->regmap, BMP580_REG_NVM_DATA_LSB, &data->le16,
+- sizeof(data->le16));
++ ret = regmap_bulk_read(data->regmap, BMP580_REG_NVM_DATA_LSB,
++ &data->le16, sizeof(data->le16));
+ if (ret) {
+ dev_err(data->dev, "error reading nvm data regs\n");
+ goto exit;
+@@ -1537,7 +1550,8 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
+ while (bytes >= sizeof(*buf)) {
+ addr = bmp580_nvmem_addrs[offset / sizeof(*buf)];
+
+- ret = regmap_write(data->regmap, BMP580_REG_NVM_ADDR, BMP580_NVM_PROG_EN |
++ ret = regmap_write(data->regmap, BMP580_REG_NVM_ADDR,
++ BMP580_NVM_PROG_EN |
+ FIELD_PREP(BMP580_NVM_ROW_ADDR_MASK, addr));
+ if (ret) {
+ dev_err(data->dev, "error writing nvm address\n");
+@@ -1545,8 +1559,8 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
+ }
+ data->le16 = cpu_to_le16(*buf++);
+
+- ret = regmap_bulk_write(data->regmap, BMP580_REG_NVM_DATA_LSB, &data->le16,
+- sizeof(data->le16));
++ ret = regmap_bulk_write(data->regmap, BMP580_REG_NVM_DATA_LSB,
++ &data->le16, sizeof(data->le16));
+ if (ret) {
+ dev_err(data->dev, "error writing LSB NVM data regs\n");
+ goto exit;
+@@ -1653,7 +1667,8 @@ static int bmp580_chip_config(struct bmp280_data *data)
+ BMP580_OSR_PRESS_EN;
+
+ ret = regmap_update_bits_check(data->regmap, BMP580_REG_OSR_CONFIG,
+- BMP580_OSR_TEMP_MASK | BMP580_OSR_PRESS_MASK |
++ BMP580_OSR_TEMP_MASK |
++ BMP580_OSR_PRESS_MASK |
+ BMP580_OSR_PRESS_EN,
+ reg_val, &aux);
+ if (ret) {
+@@ -1704,7 +1719,8 @@ static int bmp580_chip_config(struct bmp280_data *data)
+ */
+ ret = regmap_read(data->regmap, BMP580_REG_EFF_OSR, &tmp);
+ if (ret) {
+- dev_err(data->dev, "error reading effective OSR register\n");
++ dev_err(data->dev,
++ "error reading effective OSR register\n");
+ return ret;
+ }
+ if (!(tmp & BMP580_EFF_OSR_VALID_ODR)) {
+@@ -1720,10 +1736,12 @@ static int bmp580_chip_config(struct bmp280_data *data)
+ }
+
+ static const int bmp580_oversampling_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128 };
++static const u8 bmp580_chip_ids[] = { BMP580_CHIP_ID, BMP580_CHIP_ID_ALT };
+
+ const struct bmp280_chip_info bmp580_chip_info = {
+ .id_reg = BMP580_REG_CHIP_ID,
+- .chip_id = BMP580_CHIP_ID,
++ .chip_id = bmp580_chip_ids,
++ .num_chip_id = ARRAY_SIZE(bmp580_chip_ids),
+ .regmap_config = &bmp580_regmap_config,
+ .start_up_time = 2000,
+ .channels = bmp380_channels,
+@@ -1837,7 +1855,8 @@ static int bmp180_read_calib(struct bmp280_data *data)
+ }
+
+ /* Toss the calibration data into the entropy pool */
+- add_device_randomness(data->bmp180_cal_buf, sizeof(data->bmp180_cal_buf));
++ add_device_randomness(data->bmp180_cal_buf,
++ sizeof(data->bmp180_cal_buf));
+
+ calib->AC1 = be16_to_cpu(data->bmp180_cal_buf[AC1]);
+ calib->AC2 = be16_to_cpu(data->bmp180_cal_buf[AC2]);
+@@ -1952,8 +1971,7 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
+ return p + ((x1 + x2 + 3791) >> 4);
+ }
+
+-static int bmp180_read_press(struct bmp280_data *data,
+- int *val, int *val2)
++static int bmp180_read_press(struct bmp280_data *data, int *val, int *val2)
+ {
+ u32 comp_press;
+ s32 adc_press;
+@@ -1983,10 +2001,12 @@ static int bmp180_chip_config(struct bmp280_data *data)
+
+ static const int bmp180_oversampling_temp_avail[] = { 1 };
+ static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
++static const u8 bmp180_chip_ids[] = { BMP180_CHIP_ID };
+
+ const struct bmp280_chip_info bmp180_chip_info = {
+ .id_reg = BMP280_REG_ID,
+- .chip_id = BMP180_CHIP_ID,
++ .chip_id = bmp180_chip_ids,
++ .num_chip_id = ARRAY_SIZE(bmp180_chip_ids),
+ .regmap_config = &bmp180_regmap_config,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+@@ -2077,6 +2097,7 @@ int bmp280_common_probe(struct device *dev,
+ struct bmp280_data *data;
+ struct gpio_desc *gpiod;
+ unsigned int chip_id;
++ unsigned int i;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+@@ -2142,12 +2163,17 @@ int bmp280_common_probe(struct device *dev,
+ ret = regmap_read(regmap, data->chip_info->id_reg, &chip_id);
+ if (ret < 0)
+ return ret;
+- if (chip_id != data->chip_info->chip_id) {
+- dev_err(dev, "bad chip id: expected %x got %x\n",
+- data->chip_info->chip_id, chip_id);
+- return -EINVAL;
++
++ for (i = 0; i < data->chip_info->num_chip_id; i++) {
++ if (chip_id == data->chip_info->chip_id[i]) {
++ dev_info(dev, "0x%x is a known chip id for %s\n", chip_id, name);
++ break;
++ }
+ }
+
++ if (i == data->chip_info->num_chip_id)
++ dev_warn(dev, "bad chip id: 0x%x is not a known chip id\n", chip_id);
++
+ if (data->chip_info->preinit) {
+ ret = data->chip_info->preinit(data);
+ if (ret)
+@@ -2222,6 +2248,7 @@ static int bmp280_runtime_resume(struct device *dev)
+ ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
+ if (ret)
+ return ret;
++
+ usleep_range(data->start_up_time, data->start_up_time + 100);
+ return data->chip_info->chip_config(data);
+ }
+diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
+index 3ee56720428c5d..d27d68edd90656 100644
+--- a/drivers/iio/pressure/bmp280-regmap.c
++++ b/drivers/iio/pressure/bmp280-regmap.c
+@@ -41,11 +41,23 @@ const struct regmap_config bmp180_regmap_config = {
+ };
+ EXPORT_SYMBOL_NS(bmp180_regmap_config, IIO_BMP280);
+
++static bool bme280_is_writeable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case BMP280_REG_CONFIG:
++ case BME280_REG_CTRL_HUMIDITY:
++ case BMP280_REG_CTRL_MEAS:
++ case BMP280_REG_RESET:
++ return true;
++ default:
++ return false;
++ }
++}
++
+ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+- case BMP280_REG_CTRL_HUMIDITY:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+@@ -57,8 +69,6 @@ static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+- case BMP280_REG_HUMIDITY_LSB:
+- case BMP280_REG_HUMIDITY_MSB:
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+@@ -72,6 +82,23 @@ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+ }
+ }
+
++static bool bme280_is_volatile_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case BME280_REG_HUMIDITY_LSB:
++ case BME280_REG_HUMIDITY_MSB:
++ case BMP280_REG_TEMP_XLSB:
++ case BMP280_REG_TEMP_LSB:
++ case BMP280_REG_TEMP_MSB:
++ case BMP280_REG_PRESS_XLSB:
++ case BMP280_REG_PRESS_LSB:
++ case BMP280_REG_PRESS_MSB:
++ case BMP280_REG_STATUS:
++ return true;
++ default:
++ return false;
++ }
++}
+ static bool bmp380_is_writeable_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+@@ -167,7 +194,7 @@ const struct regmap_config bmp280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+- .max_register = BMP280_REG_HUMIDITY_LSB,
++ .max_register = BMP280_REG_TEMP_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp280_is_writeable_reg,
+@@ -175,6 +202,18 @@ const struct regmap_config bmp280_regmap_config = {
+ };
+ EXPORT_SYMBOL_NS(bmp280_regmap_config, IIO_BMP280);
+
++const struct regmap_config bme280_regmap_config = {
++ .reg_bits = 8,
++ .val_bits = 8,
++
++ .max_register = BME280_REG_HUMIDITY_LSB,
++ .cache_type = REGCACHE_RBTREE,
++
++ .writeable_reg = bme280_is_writeable_reg,
++ .volatile_reg = bme280_is_volatile_reg,
++};
++EXPORT_SYMBOL_NS(bme280_regmap_config, IIO_BMP280);
++
+ const struct regmap_config bmp380_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
+index 1dff9bb7c4e906..47122da8e716df 100644
+--- a/drivers/iio/pressure/bmp280-spi.c
++++ b/drivers/iio/pressure/bmp280-spi.c
+@@ -12,7 +12,7 @@
+ #include "bmp280.h"
+
+ static int bmp280_regmap_spi_write(void *context, const void *data,
+- size_t count)
++ size_t count)
+ {
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+@@ -29,7 +29,7 @@ static int bmp280_regmap_spi_write(void *context, const void *data,
+ }
+
+ static int bmp280_regmap_spi_read(void *context, const void *reg,
+- size_t reg_size, void *val, size_t val_size)
++ size_t reg_size, void *val, size_t val_size)
+ {
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+@@ -83,7 +83,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
+ { .compatible = "bosch,bmp180", .data = &bmp180_chip_info },
+ { .compatible = "bosch,bmp181", .data = &bmp180_chip_info },
+ { .compatible = "bosch,bmp280", .data = &bmp280_chip_info },
+- { .compatible = "bosch,bme280", .data = &bmp280_chip_info },
++ { .compatible = "bosch,bme280", .data = &bme280_chip_info },
+ { .compatible = "bosch,bmp380", .data = &bmp380_chip_info },
+ { .compatible = "bosch,bmp580", .data = &bmp580_chip_info },
+ { },
+@@ -91,10 +91,11 @@ static const struct of_device_id bmp280_of_spi_match[] = {
+ MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
+
+ static const struct spi_device_id bmp280_spi_id[] = {
++ { "bmp085", (kernel_ulong_t)&bmp180_chip_info },
+ { "bmp180", (kernel_ulong_t)&bmp180_chip_info },
+ { "bmp181", (kernel_ulong_t)&bmp180_chip_info },
+ { "bmp280", (kernel_ulong_t)&bmp280_chip_info },
+- { "bme280", (kernel_ulong_t)&bmp280_chip_info },
++ { "bme280", (kernel_ulong_t)&bme280_chip_info },
+ { "bmp380", (kernel_ulong_t)&bmp380_chip_info },
+ { "bmp580", (kernel_ulong_t)&bmp580_chip_info },
+ { }
+diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
+index 5c0563ce757251..b60e551b7d3185 100644
+--- a/drivers/iio/pressure/bmp280.h
++++ b/drivers/iio/pressure/bmp280.h
+@@ -192,8 +192,6 @@
+ #define BMP380_PRESS_SKIPPED 0x800000
+
+ /* BMP280 specific registers */
+-#define BMP280_REG_HUMIDITY_LSB 0xFE
+-#define BMP280_REG_HUMIDITY_MSB 0xFD
+ #define BMP280_REG_TEMP_XLSB 0xFC
+ #define BMP280_REG_TEMP_LSB 0xFB
+ #define BMP280_REG_TEMP_MSB 0xFA
+@@ -207,15 +205,6 @@
+ #define BMP280_REG_CONFIG 0xF5
+ #define BMP280_REG_CTRL_MEAS 0xF4
+ #define BMP280_REG_STATUS 0xF3
+-#define BMP280_REG_CTRL_HUMIDITY 0xF2
+-
+-/* Due to non linear mapping, and data sizes we can't do a bulk read */
+-#define BMP280_REG_COMP_H1 0xA1
+-#define BMP280_REG_COMP_H2 0xE1
+-#define BMP280_REG_COMP_H3 0xE3
+-#define BMP280_REG_COMP_H4 0xE4
+-#define BMP280_REG_COMP_H5 0xE5
+-#define BMP280_REG_COMP_H6 0xE7
+
+ #define BMP280_REG_COMP_TEMP_START 0x88
+ #define BMP280_COMP_TEMP_REG_COUNT 6
+@@ -223,8 +212,6 @@
+ #define BMP280_REG_COMP_PRESS_START 0x8E
+ #define BMP280_COMP_PRESS_REG_COUNT 18
+
+-#define BMP280_COMP_H5_MASK GENMASK(15, 4)
+-
+ #define BMP280_CONTIGUOUS_CALIB_REGS (BMP280_COMP_TEMP_REG_COUNT + \
+ BMP280_COMP_PRESS_REG_COUNT)
+
+@@ -235,14 +222,6 @@
+ #define BMP280_FILTER_8X 3
+ #define BMP280_FILTER_16X 4
+
+-#define BMP280_OSRS_HUMIDITY_MASK GENMASK(2, 0)
+-#define BMP280_OSRS_HUMIDITY_SKIP 0
+-#define BMP280_OSRS_HUMIDITY_1X 1
+-#define BMP280_OSRS_HUMIDITY_2X 2
+-#define BMP280_OSRS_HUMIDITY_4X 3
+-#define BMP280_OSRS_HUMIDITY_8X 4
+-#define BMP280_OSRS_HUMIDITY_16X 5
+-
+ #define BMP280_OSRS_TEMP_MASK GENMASK(7, 5)
+ #define BMP280_OSRS_TEMP_SKIP 0
+ #define BMP280_OSRS_TEMP_1X 1
+@@ -264,6 +243,30 @@
+ #define BMP280_MODE_FORCED 1
+ #define BMP280_MODE_NORMAL 3
+
++/* BME280 specific registers */
++#define BME280_REG_HUMIDITY_LSB 0xFE
++#define BME280_REG_HUMIDITY_MSB 0xFD
++
++#define BME280_REG_CTRL_HUMIDITY 0xF2
++
++/* Due to non linear mapping, and data sizes we can't do a bulk read */
++#define BME280_REG_COMP_H1 0xA1
++#define BME280_REG_COMP_H2 0xE1
++#define BME280_REG_COMP_H3 0xE3
++#define BME280_REG_COMP_H4 0xE4
++#define BME280_REG_COMP_H5 0xE5
++#define BME280_REG_COMP_H6 0xE7
++
++#define BME280_COMP_H5_MASK GENMASK(15, 4)
++
++#define BME280_OSRS_HUMIDITY_MASK GENMASK(2, 0)
++#define BME280_OSRS_HUMIDITY_SKIP 0
++#define BME280_OSRS_HUMIDITY_1X 1
++#define BME280_OSRS_HUMIDITY_2X 2
++#define BME280_OSRS_HUMIDITY_4X 3
++#define BME280_OSRS_HUMIDITY_8X 4
++#define BME280_OSRS_HUMIDITY_16X 5
++
+ /* BMP180 specific registers */
+ #define BMP180_REG_OUT_XLSB 0xF8
+ #define BMP180_REG_OUT_LSB 0xF7
+@@ -410,7 +413,7 @@ struct bmp280_data {
+ __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
+ __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
+ u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT];
+- /* Miscellaneous, endianess-aware data buffers */
++ /* Miscellaneous, endianness-aware data buffers */
+ __le16 le16;
+ __be16 be16;
+ } __aligned(IIO_DMA_MINALIGN);
+@@ -418,7 +421,8 @@ struct bmp280_data {
+
+ struct bmp280_chip_info {
+ unsigned int id_reg;
+- const unsigned int chip_id;
++ const u8 *chip_id;
++ int num_chip_id;
+
+ const struct regmap_config *regmap_config;
+
+@@ -464,6 +468,7 @@ extern const struct bmp280_chip_info bmp580_chip_info;
+ /* Regmap configurations */
+ extern const struct regmap_config bmp180_regmap_config;
+ extern const struct regmap_config bmp280_regmap_config;
++extern const struct regmap_config bme280_regmap_config;
+ extern const struct regmap_config bmp380_regmap_config;
+ extern const struct regmap_config bmp580_regmap_config;
+
+diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
+index 1ff091b2f764d4..d0a516d56da476 100644
+--- a/drivers/iio/pressure/dps310.c
++++ b/drivers/iio/pressure/dps310.c
+@@ -730,7 +730,7 @@ static int dps310_read_pressure(struct dps310_data *data, int *val, int *val2,
+ }
+ }
+
+-static int dps310_calculate_temp(struct dps310_data *data)
++static int dps310_calculate_temp(struct dps310_data *data, int *val)
+ {
+ s64 c0;
+ s64 t;
+@@ -746,7 +746,9 @@ static int dps310_calculate_temp(struct dps310_data *data)
+ t = c0 + ((s64)data->temp_raw * (s64)data->c1);
+
+ /* Convert to milliCelsius and scale the temperature */
+- return (int)div_s64(t * 1000LL, kt);
++ *val = (int)div_s64(t * 1000LL, kt);
++
++ return 0;
+ }
+
+ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
+@@ -768,11 +770,10 @@ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
+ if (rc)
+ return rc;
+
+- rc = dps310_calculate_temp(data);
+- if (rc < 0)
++ rc = dps310_calculate_temp(data, val);
++ if (rc)
+ return rc;
+
+- *val = rc;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+diff --git a/drivers/iio/pressure/mprls0025pa.c b/drivers/iio/pressure/mprls0025pa.c
+index 30fb2de368210f..e3f0de020a40c9 100644
+--- a/drivers/iio/pressure/mprls0025pa.c
++++ b/drivers/iio/pressure/mprls0025pa.c
+@@ -323,6 +323,7 @@ static int mpr_probe(struct i2c_client *client)
+ struct iio_dev *indio_dev;
+ struct device *dev = &client->dev;
+ s64 scale, offset;
++ u32 func;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE))
+ return dev_err_probe(dev, -EOPNOTSUPP,
+@@ -362,10 +363,11 @@ static int mpr_probe(struct i2c_client *client)
+ return dev_err_probe(dev, ret,
+ "honeywell,pmax-pascal could not be read\n");
+ ret = device_property_read_u32(dev,
+- "honeywell,transfer-function", &data->function);
++ "honeywell,transfer-function", &func);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,transfer-function could not be read\n");
++ data->function = func - 1;
+ if (data->function > MPR_FUNCTION_C)
+ return dev_err_probe(dev, -EINVAL,
+ "honeywell,transfer-function %d invalid\n",
+diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
+index 2ca3b0bc5eba10..931eaea046b328 100644
+--- a/drivers/iio/proximity/Kconfig
++++ b/drivers/iio/proximity/Kconfig
+@@ -72,6 +72,8 @@ config LIDAR_LITE_V2
+ config MB1232
+ tristate "MaxSonar I2CXL family ultrasonic sensors"
+ depends on I2C
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y to build a driver for the ultrasonic sensors I2CXL of
+ MaxBotix which have an i2c interface. It can be used to measure
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index 7acc0f936dad38..0b88203720b059 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -794,7 +794,6 @@ static struct ib_gid_table *alloc_gid_table(int sz)
+ static void release_gid_table(struct ib_device *device,
+ struct ib_gid_table *table)
+ {
+- bool leak = false;
+ int i;
+
+ if (!table)
+@@ -803,15 +802,12 @@ static void release_gid_table(struct ib_device *device,
+ for (i = 0; i < table->sz; i++) {
+ if (is_gid_entry_free(table->data_vec[i]))
+ continue;
+- if (kref_read(&table->data_vec[i]->kref) > 1) {
+- dev_err(&device->dev,
+- "GID entry ref leak for index %d ref=%u\n", i,
+- kref_read(&table->data_vec[i]->kref));
+- leak = true;
+- }
++
++ WARN_ONCE(true,
++ "GID entry ref leak for dev %s index %d ref=%u\n",
++ dev_name(&device->dev), i,
++ kref_read(&table->data_vec[i]->kref));
+ }
+- if (leak)
+- return;
+
+ mutex_destroy(&table->lock);
+ kfree(table->data_vec);
+@@ -1644,8 +1640,10 @@ int ib_cache_setup_one(struct ib_device *device)
+
+ rdma_for_each_port (device, p) {
+ err = ib_cache_update(device, p, true, true, true);
+- if (err)
++ if (err) {
++ gid_table_cleanup_one(device);
+ return err;
++ }
+ }
+
+ return 0;
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index ff58058aeadca7..07fb8d3c037f00 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -34,6 +34,7 @@ MODULE_AUTHOR("Sean Hefty");
+ MODULE_DESCRIPTION("InfiniBand CM");
+ MODULE_LICENSE("Dual BSD/GPL");
+
++#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
+ static const char * const ibcm_rej_reason_strs[] = {
+ [IB_CM_REJ_NO_QP] = "no QP",
+ [IB_CM_REJ_NO_EEC] = "no EEC",
+@@ -1025,13 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
+ }
+ }
+
++static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
++ enum ib_cm_state old_state)
++{
++ struct cm_id_private *cm_id_priv;
++
++ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
++ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
++ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
++}
++
+ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ {
+ struct cm_id_private *cm_id_priv;
++ enum ib_cm_state old_state;
+ struct cm_work *work;
++ int ret;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ spin_lock_irq(&cm_id_priv->lock);
++ old_state = cm_id->state;
+ retest:
+ switch (cm_id->state) {
+ case IB_CM_LISTEN:
+@@ -1135,7 +1149,14 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+
+ xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
+ cm_deref_id(cm_id_priv);
+- wait_for_completion(&cm_id_priv->comp);
++ do {
++ ret = wait_for_completion_timeout(&cm_id_priv->comp,
++ msecs_to_jiffies(
++ CM_DESTROY_ID_WAIT_TIMEOUT));
++ if (!ret) /* timeout happened */
++ cm_destroy_id_wait_timeout(cm_id, old_state);
++ } while (!ret);
++
+ while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
+ cm_free_work(work);
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 1e2cd7c8716e81..64ace0b968f07f 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -715,8 +715,10 @@ cma_validate_port(struct ib_device *device, u32 port,
+ rcu_read_lock();
+ ndev = rcu_dereference(sgid_attr->ndev);
+ if (!net_eq(dev_net(ndev), dev_addr->net) ||
+- ndev->ifindex != bound_if_index)
++ ndev->ifindex != bound_if_index) {
++ rdma_put_gid_attr(sgid_attr);
+ sgid_attr = ERR_PTR(-ENODEV);
++ }
+ rcu_read_unlock();
+ goto out;
+ }
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index a666847bd7143e..56dd030045a206 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -804,7 +804,7 @@ static int alloc_port_data(struct ib_device *device)
+ * empty slots at the beginning.
+ */
+ pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
+- rdma_end_port(device) + 1),
++ size_add(rdma_end_port(device), 1)),
+ GFP_KERNEL);
+ if (!pdata_rcu)
+ return -ENOMEM;
+@@ -1730,7 +1730,7 @@ static int assign_client_id(struct ib_client *client)
+ {
+ int ret;
+
+- down_write(&clients_rwsem);
++ lockdep_assert_held(&clients_rwsem);
+ /*
+ * The add/remove callbacks must be called in FIFO/LIFO order. To
+ * achieve this we assign client_ids so they are sorted in
+@@ -1739,14 +1739,11 @@ static int assign_client_id(struct ib_client *client)
+ client->client_id = highest_client_id;
+ ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
+ if (ret)
+- goto out;
++ return ret;
+
+ highest_client_id++;
+ xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
+-
+-out:
+- up_write(&clients_rwsem);
+- return ret;
++ return 0;
+ }
+
+ static void remove_client_id(struct ib_client *client)
+@@ -1776,25 +1773,35 @@ int ib_register_client(struct ib_client *client)
+ {
+ struct ib_device *device;
+ unsigned long index;
++ bool need_unreg = false;
+ int ret;
+
+ refcount_set(&client->uses, 1);
+ init_completion(&client->uses_zero);
++
++ /*
++ * The devices_rwsem is held in write mode to ensure that a racing
++ * ib_register_device() sees a consisent view of clients and devices.
++ */
++ down_write(&devices_rwsem);
++ down_write(&clients_rwsem);
+ ret = assign_client_id(client);
+ if (ret)
+- return ret;
++ goto out;
+
+- down_read(&devices_rwsem);
++ need_unreg = true;
+ xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
+ ret = add_client_context(device, client);
+- if (ret) {
+- up_read(&devices_rwsem);
+- ib_unregister_client(client);
+- return ret;
+- }
++ if (ret)
++ goto out;
+ }
+- up_read(&devices_rwsem);
+- return 0;
++ ret = 0;
++out:
++ up_write(&clients_rwsem);
++ up_write(&devices_rwsem);
++ if (need_unreg && ret)
++ ib_unregister_client(client);
++ return ret;
+ }
+ EXPORT_SYMBOL(ib_register_client);
+
+@@ -2139,6 +2146,9 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
+ unsigned long flags;
+ int ret;
+
++ if (!rdma_is_port_valid(ib_dev, port))
++ return -EINVAL;
++
+ /*
+ * Drivers wish to call this before ib_register_driver, so we have to
+ * setup the port data early.
+@@ -2147,9 +2157,6 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
+ if (ret)
+ return ret;
+
+- if (!rdma_is_port_valid(ib_dev, port))
+- return -EINVAL;
+-
+ pdata = &ib_dev->port_data[port];
+ spin_lock_irqsave(&pdata->netdev_lock, flags);
+ old_ndev = rcu_dereference_protected(
+@@ -2159,17 +2166,12 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
+ return 0;
+ }
+
+- if (old_ndev)
+- netdev_tracker_free(ndev, &pdata->netdev_tracker);
+- if (ndev)
+- netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
+ rcu_assign_pointer(pdata->netdev, ndev);
++ netdev_put(old_ndev, &pdata->netdev_tracker);
++ netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
+ spin_unlock_irqrestore(&pdata->netdev_lock, flags);
+
+ add_ndev_hash(pdata);
+- if (old_ndev)
+- __dev_put(old_ndev);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(ib_device_set_netdev);
+@@ -2228,8 +2230,7 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ spin_lock(&pdata->netdev_lock);
+ res = rcu_dereference_protected(
+ pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
+- if (res)
+- dev_hold(res);
++ dev_hold(res);
+ spin_unlock(&pdata->netdev_lock);
+ }
+
+@@ -2304,9 +2305,7 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
+
+ if (filter(ib_dev, port, idev, filter_cookie))
+ cb(ib_dev, port, idev, cookie);
+-
+- if (idev)
+- dev_put(idev);
++ dev_put(idev);
+ }
+ }
+
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index 2b47073c61a65e..3e4941754b48d0 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -369,8 +369,10 @@ EXPORT_SYMBOL(iw_cm_disconnect);
+ *
+ * Clean up all resources associated with the connection and release
+ * the initial reference taken by iw_create_cm_id.
++ *
++ * Returns true if and only if the last cm_id_priv reference has been dropped.
+ */
+-static void destroy_cm_id(struct iw_cm_id *cm_id)
++static bool destroy_cm_id(struct iw_cm_id *cm_id)
+ {
+ struct iwcm_id_private *cm_id_priv;
+ struct ib_qp *qp;
+@@ -440,7 +442,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
+ iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
+ }
+
+- (void)iwcm_deref_id(cm_id_priv);
++ return iwcm_deref_id(cm_id_priv);
+ }
+
+ /*
+@@ -451,7 +453,8 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
+ */
+ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
+ {
+- destroy_cm_id(cm_id);
++ if (!destroy_cm_id(cm_id))
++ flush_workqueue(iwcm_wq);
+ }
+ EXPORT_SYMBOL(iw_destroy_cm_id);
+
+@@ -1035,7 +1038,7 @@ static void cm_work_handler(struct work_struct *_work)
+ if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
+ ret = process_event(cm_id_priv, &levent);
+ if (ret)
+- destroy_cm_id(&cm_id_priv->id);
++ WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
+ } else
+ pr_debug("dropping event %d\n", levent.event);
+ if (iwcm_deref_id(cm_id_priv))
+@@ -1188,7 +1191,7 @@ static int __init iw_cm_init(void)
+ if (ret)
+ return ret;
+
+- iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
++ iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
+ if (!iwcm_wq)
+ goto err_alloc;
+
+diff --git a/drivers/infiniband/core/lag.c b/drivers/infiniband/core/lag.c
+index c77d7d2559a11d..66c7e1e6600dcd 100644
+--- a/drivers/infiniband/core/lag.c
++++ b/drivers/infiniband/core/lag.c
+@@ -93,8 +93,7 @@ static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
+ slave = netdev_get_xmit_slave(master, skb,
+ !!(device->lag_flags &
+ RDMA_LAG_FLAGS_HASH_ALL_SLAVES));
+- if (slave)
+- dev_hold(slave);
++ dev_hold(slave);
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return slave;
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 674344eb8e2f48..58befbaaf0ad54 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -2616,14 +2616,16 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
+
+ static void timeout_sends(struct work_struct *work)
+ {
++ struct ib_mad_send_wr_private *mad_send_wr, *n;
+ struct ib_mad_agent_private *mad_agent_priv;
+- struct ib_mad_send_wr_private *mad_send_wr;
+ struct ib_mad_send_wc mad_send_wc;
++ struct list_head local_list;
+ unsigned long flags, delay;
+
+ mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+ timed_work.work);
+ mad_send_wc.vendor_err = 0;
++ INIT_LIST_HEAD(&local_list);
+
+ spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ while (!list_empty(&mad_agent_priv->wait_list)) {
+@@ -2641,13 +2643,16 @@ static void timeout_sends(struct work_struct *work)
+ break;
+ }
+
+- list_del(&mad_send_wr->agent_list);
++ list_del_init(&mad_send_wr->agent_list);
+ if (mad_send_wr->status == IB_WC_SUCCESS &&
+ !retry_send(mad_send_wr))
+ continue;
+
+- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
++ list_add_tail(&mad_send_wr->agent_list, &local_list);
++ }
++ spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+
++ list_for_each_entry_safe(mad_send_wr, n, &local_list, agent_list) {
+ if (mad_send_wr->status == IB_WC_SUCCESS)
+ mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
+ else
+@@ -2655,11 +2660,8 @@ static void timeout_sends(struct work_struct *work)
+ mad_send_wc.send_buf = &mad_send_wr->send_buf;
+ mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
+ &mad_send_wc);
+-
+ deref_mad_agent(mad_agent_priv);
+- spin_lock_irqsave(&mad_agent_priv->lock, flags);
+ }
+- spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+ }
+
+ /*
+diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
+index 01a499a8b88dbd..438ed35881752d 100644
+--- a/drivers/infiniband/core/restrack.c
++++ b/drivers/infiniband/core/restrack.c
+@@ -37,22 +37,6 @@ int rdma_restrack_init(struct ib_device *dev)
+ return 0;
+ }
+
+-static const char *type2str(enum rdma_restrack_type type)
+-{
+- static const char * const names[RDMA_RESTRACK_MAX] = {
+- [RDMA_RESTRACK_PD] = "PD",
+- [RDMA_RESTRACK_CQ] = "CQ",
+- [RDMA_RESTRACK_QP] = "QP",
+- [RDMA_RESTRACK_CM_ID] = "CM_ID",
+- [RDMA_RESTRACK_MR] = "MR",
+- [RDMA_RESTRACK_CTX] = "CTX",
+- [RDMA_RESTRACK_COUNTER] = "COUNTER",
+- [RDMA_RESTRACK_SRQ] = "SRQ",
+- };
+-
+- return names[type];
+-};
+-
+ /**
+ * rdma_restrack_clean() - clean resource tracking
+ * @dev: IB device
+@@ -60,47 +44,14 @@ static const char *type2str(enum rdma_restrack_type type)
+ void rdma_restrack_clean(struct ib_device *dev)
+ {
+ struct rdma_restrack_root *rt = dev->res;
+- struct rdma_restrack_entry *e;
+- char buf[TASK_COMM_LEN];
+- bool found = false;
+- const char *owner;
+ int i;
+
+ for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
+ struct xarray *xa = &dev->res[i].xa;
+
+- if (!xa_empty(xa)) {
+- unsigned long index;
+-
+- if (!found) {
+- pr_err("restrack: %s", CUT_HERE);
+- dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
+- }
+- xa_for_each(xa, index, e) {
+- if (rdma_is_kernel_res(e)) {
+- owner = e->kern_name;
+- } else {
+- /*
+- * There is no need to call get_task_struct here,
+- * because we can be here only if there are more
+- * get_task_struct() call than put_task_struct().
+- */
+- get_task_comm(buf, e->task);
+- owner = buf;
+- }
+-
+- pr_err("restrack: %s %s object allocated by %s is not freed\n",
+- rdma_is_kernel_res(e) ? "Kernel" :
+- "User",
+- type2str(e->type), owner);
+- }
+- found = true;
+- }
++ WARN_ON(!xa_empty(xa));
+ xa_destroy(xa);
+ }
+- if (found)
+- pr_err("restrack: %s", CUT_HERE);
+-
+ kfree(rt);
+ }
+
+diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
+index e958c43dd28fdf..d5131b3ba8ab04 100644
+--- a/drivers/infiniband/core/roce_gid_mgmt.c
++++ b/drivers/infiniband/core/roce_gid_mgmt.c
+@@ -601,8 +601,7 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
+
+ rcu_read_lock();
+ master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
+- if (master_ndev)
+- dev_hold(master_ndev);
++ dev_hold(master_ndev);
+ rcu_read_unlock();
+
+ if (master_ndev) {
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index 59179cfc20ef95..8175dde60b0a84 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -2159,7 +2159,9 @@ static int ib_sa_add_one(struct ib_device *device)
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
+
+- sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
++ sa_dev = kzalloc(struct_size(sa_dev, port,
++ size_add(size_sub(e, s), 1)),
++ GFP_KERNEL);
+ if (!sa_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index ee59d739156899..ec5efdc1666013 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -903,7 +903,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+- data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+@@ -1009,7 +1009,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+- data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
++ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
+ GFP_KERNEL);
+ if (!data)
+ goto err_free_stats;
+@@ -1140,7 +1140,7 @@ static int setup_gid_attrs(struct ib_port *port,
+ int ret;
+
+ gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
+- attr->gid_tbl_len * 2),
++ size_mul(attr->gid_tbl_len, 2)),
+ GFP_KERNEL);
+ if (!gid_attr_group)
+ return -ENOMEM;
+@@ -1205,8 +1205,8 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
+ int ret;
+
+ p = kvzalloc(struct_size(p, attrs_list,
+- attr->gid_tbl_len + attr->pkey_tbl_len),
+- GFP_KERNEL);
++ size_add(attr->gid_tbl_len, attr->pkey_tbl_len)),
++ GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ p->ibdev = device;
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index f9ab671c8eda55..07c571c7b69992 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -96,12 +96,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ return page_size;
+ }
+
+- /* rdma_for_each_block() has a bug if the page size is smaller than the
+- * page size used to build the umem. For now prevent smaller page sizes
+- * from being returned.
+- */
+- pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
+-
+ /* The best result is the smallest page size that results in the minimum
+ * number of required pages. Compute the largest page size that could
+ * work based on VA address bits that don't change.
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 7e5c33aad1619d..2ed749f50a29ff 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -63,6 +63,8 @@ MODULE_AUTHOR("Roland Dreier");
+ MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
+ MODULE_LICENSE("Dual BSD/GPL");
+
++#define MAX_UMAD_RECV_LIST_SIZE 200000
++
+ enum {
+ IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
+ IB_UMAD_MAX_AGENTS = 32,
+@@ -113,6 +115,7 @@ struct ib_umad_file {
+ struct mutex mutex;
+ struct ib_umad_port *port;
+ struct list_head recv_list;
++ atomic_t recv_list_size;
+ struct list_head send_list;
+ struct list_head port_list;
+ spinlock_t send_lock;
+@@ -180,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
+ return file->agents_dead ? NULL : file->agent[id];
+ }
+
+-static int queue_packet(struct ib_umad_file *file,
+- struct ib_mad_agent *agent,
+- struct ib_umad_packet *packet)
++static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
++ struct ib_umad_packet *packet, bool is_recv_mad)
+ {
+ int ret = 1;
+
+ mutex_lock(&file->mutex);
+
++ if (is_recv_mad &&
++ atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
++ goto unlock;
++
+ for (packet->mad.hdr.id = 0;
+ packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
+ packet->mad.hdr.id++)
+ if (agent == __get_agent(file, packet->mad.hdr.id)) {
+ list_add_tail(&packet->list, &file->recv_list);
++ atomic_inc(&file->recv_list_size);
+ wake_up_interruptible(&file->recv_wait);
+ ret = 0;
+ break;
+ }
+-
++unlock:
+ mutex_unlock(&file->mutex);
+
+ return ret;
+@@ -224,7 +231,7 @@ static void send_handler(struct ib_mad_agent *agent,
+ if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
+ packet->length = IB_MGMT_MAD_HDR;
+ packet->mad.hdr.status = ETIMEDOUT;
+- if (!queue_packet(file, agent, packet))
++ if (!queue_packet(file, agent, packet, false))
+ return;
+ }
+ kfree(packet);
+@@ -284,7 +291,7 @@ static void recv_handler(struct ib_mad_agent *agent,
+ rdma_destroy_ah_attr(&ah_attr);
+ }
+
+- if (queue_packet(file, agent, packet))
++ if (queue_packet(file, agent, packet, true))
+ goto err2;
+ return;
+
+@@ -409,6 +416,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+
+ packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+ list_del(&packet->list);
++ atomic_dec(&file->recv_list_size);
+
+ mutex_unlock(&file->mutex);
+
+@@ -421,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ /* Requeue packet */
+ mutex_lock(&file->mutex);
+ list_add(&packet->list, &file->recv_list);
++ atomic_inc(&file->recv_list_size);
+ mutex_unlock(&file->mutex);
+ } else {
+ if (packet->recv_wc)
+@@ -1378,7 +1387,9 @@ static int ib_umad_add_one(struct ib_device *device)
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
+
+- umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
++ umad_dev = kzalloc(struct_size(umad_dev, ports,
++ size_add(size_sub(e, s), 1)),
++ GFP_KERNEL);
+ if (!umad_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 41ff5595c86062..186ed3c22ec9e3 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -1968,7 +1968,7 @@ int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
+ int rc;
+ u32 netdev_speed;
+ struct net_device *netdev;
+- struct ethtool_link_ksettings lksettings;
++ struct ethtool_link_ksettings lksettings = {};
+
+ if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
+ return -EINVAL;
+diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+index 9fd9849ebdd142..5b481d8539eee3 100644
+--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+@@ -106,8 +106,6 @@ struct bnxt_re_gsi_context {
+ struct bnxt_re_sqp_entries *sqp_tbl;
+ };
+
+-#define BNXT_RE_MIN_MSIX 2
+-#define BNXT_RE_MAX_MSIX 9
+ #define BNXT_RE_AEQ_IDX 0
+ #define BNXT_RE_NQ_IDX 1
+ #define BNXT_RE_GEN_P5_MAX_VF 64
+@@ -166,7 +164,7 @@ struct bnxt_re_dev {
+ struct bnxt_qplib_rcfw rcfw;
+
+ /* NQ */
+- struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
++ struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
+
+ /* Device Resources */
+ struct bnxt_qplib_dev_attr dev_attr;
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index faa88d12ee8681..b4d3e7dfc939f6 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -1184,7 +1184,8 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
+ }
+
+ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+- struct ib_qp_init_attr *init_attr)
++ struct ib_qp_init_attr *init_attr,
++ struct bnxt_re_ucontext *uctx)
+ {
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+@@ -1213,7 +1214,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty.
+ */
+- entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
++ entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
+ rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->q_full_delta = 0;
+ rq->sg_info.pgsize = PAGE_SIZE;
+@@ -1243,7 +1244,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+
+ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+- struct ib_udata *udata)
++ struct bnxt_re_ucontext *uctx)
+ {
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+@@ -1272,7 +1273,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ /* Allocate 128 + 1 more than what's provided */
+ diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
+ 0 : BNXT_QPLIB_RESERVED_QP_WRS;
+- entries = roundup_pow_of_two(entries + diff + 1);
++ entries = bnxt_re_init_depth(entries + diff + 1, uctx);
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ sq->q_full_delta = diff + 1;
+ /*
+@@ -1288,7 +1289,8 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ }
+
+ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+- struct ib_qp_init_attr *init_attr)
++ struct ib_qp_init_attr *init_attr,
++ struct bnxt_re_ucontext *uctx)
+ {
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+@@ -1300,7 +1302,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ dev_attr = &rdev->dev_attr;
+
+ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+- entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
++ entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
+ qplqp->sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
+ qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
+@@ -1338,6 +1340,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_udata *udata)
+ {
+ struct bnxt_qplib_dev_attr *dev_attr;
++ struct bnxt_re_ucontext *uctx;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+@@ -1347,6 +1350,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
++ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ /* Setup misc params */
+ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
+ qplqp->pd = &pd->qplib_pd;
+@@ -1388,18 +1392,18 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ }
+
+ /* Setup RQ/SRQ */
+- rc = bnxt_re_init_rq_attr(qp, init_attr);
++ rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_rq_attr(qp);
+
+ /* Setup SQ */
+- rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
++ rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+- bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
++ bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
+
+ if (udata) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+@@ -1715,6 +1719,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
+ {
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_nq *nq = NULL;
++ struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ struct bnxt_re_pd *pd;
+@@ -1739,13 +1744,14 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
+ goto exit;
+ }
+
++ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ srq->rdev = rdev;
+ srq->qplib_srq.pd = &pd->qplib_pd;
+ srq->qplib_srq.dpi = &rdev->dpi_privileged;
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty
+ */
+- entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
++ entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
+ if (entries > dev_attr->max_srq_wqes + 1)
+ entries = dev_attr->max_srq_wqes + 1;
+ srq->qplib_srq.max_wqe = entries;
+@@ -1809,7 +1815,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+ /* SRQ resize is not supported */
+- break;
++ return -EINVAL;
+ case IB_SRQ_LIMIT:
+ /* Change the SRQ threshold */
+ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+@@ -1824,13 +1830,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+- break;
++ return 0;
+ default:
+ ibdev_err(&rdev->ibdev,
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ return -EINVAL;
+ }
+- return 0;
+ }
+
+ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+@@ -2103,6 +2108,9 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
+ }
+ if (qp_attr_mask & IB_QP_CAP) {
++ struct bnxt_re_ucontext *uctx =
++ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
++
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
+@@ -2119,7 +2127,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ "Create QP failed - max exceeded");
+ return -EINVAL;
+ }
+- entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
++ entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
+ qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+@@ -2132,7 +2140,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ qp->qplib_qp.sq.q_full_delta -= 1;
+ qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
+ if (qp->qplib_qp.rq.max_wqe) {
+- entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
++ entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
+ qp->qplib_qp.rq.max_wqe =
+ min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+@@ -2459,7 +2467,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
+- wqe->send.imm_data = wr->ex.imm_data;
++ wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
+@@ -2489,7 +2497,7 @@ static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
+- wqe->rdma.imm_data = wr->ex.imm_data;
++ wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
+ break;
+ case IB_WR_RDMA_READ:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
+@@ -2920,9 +2928,11 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+ {
++ struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
++ struct bnxt_re_ucontext *uctx =
++ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+- struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
+ int rc, entries;
+ int cqe = attr->cqe;
+ struct bnxt_qplib_nq *nq = NULL;
+@@ -2941,7 +2951,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ cq->rdev = rdev;
+ cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
+
+- entries = roundup_pow_of_two(cqe + 1);
++ entries = bnxt_re_init_depth(cqe + 1, uctx);
+ if (entries > dev_attr->max_cq_wqes + 1)
+ entries = dev_attr->max_cq_wqes + 1;
+
+@@ -2949,8 +2959,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
+ if (udata) {
+ struct bnxt_re_cq_req req;
+- struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
+- udata, struct bnxt_re_ucontext, ib_uctx);
+ if (ib_copy_from_udata(&req, udata, sizeof(req))) {
+ rc = -EFAULT;
+ goto fail;
+@@ -3072,12 +3080,11 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+ return -EINVAL;
+ }
+
+- entries = roundup_pow_of_two(cqe + 1);
++ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
++ entries = bnxt_re_init_depth(cqe + 1, uctx);
+ if (entries > dev_attr->max_cq_wqes + 1)
+ entries = dev_attr->max_cq_wqes + 1;
+
+- uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
+- ib_uctx);
+ /* uverbs consumer */
+ if (ib_copy_from_udata(&req, udata, sizeof(req))) {
+ rc = -EFAULT;
+@@ -3538,7 +3545,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
+ wc->byte_len = orig_cqe->length;
+ wc->qp = &gsi_qp->ib_qp;
+
+- wc->ex.imm_data = orig_cqe->immdata;
++ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata));
+ wc->src_qp = orig_cqe->src_qp;
+ memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
+ if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
+@@ -3683,7 +3690,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
+ (unsigned long)(cqe->qp_handle),
+ struct bnxt_re_qp, qplib_qp);
+ wc->qp = &qp->ib_qp;
+- wc->ex.imm_data = cqe->immdata;
++ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata));
+ wc->src_qp = cqe->src_qp;
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->port_num = 1;
+@@ -4108,6 +4115,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_user_mmap_entry *entry;
+ struct bnxt_re_uctx_resp resp = {};
++ struct bnxt_re_uctx_req ureq = {};
+ u32 chip_met_rev_num = 0;
+ int rc;
+
+@@ -4157,6 +4165,16 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
+ if (rdev->pacing.dbr_pacing)
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
+
++ if (udata->inlen >= sizeof(ureq)) {
++ rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
++ if (rc)
++ goto cfail;
++ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
++ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
++ uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
++ }
++ }
++
+ rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
+ if (rc) {
+ ibdev_err(ibdev, "Failed to copy user context");
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+index 84715b7e7a4e4f..98baea98fc1761 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+@@ -140,6 +140,7 @@ struct bnxt_re_ucontext {
+ void *shpg;
+ spinlock_t sh_lock; /* protect shpg */
+ struct rdma_user_mmap_entry *shpage_mmap;
++ u64 cmask;
+ };
+
+ enum bnxt_re_mmap_flag {
+@@ -167,6 +168,12 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
+ return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
+ }
+
++static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
++{
++ return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ?
++ ent : roundup_pow_of_two(ent) : ent;
++}
++
+ int bnxt_re_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *ib_attr,
+ struct ib_udata *udata);
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index c9066aade4125b..039801d93ed8aa 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -71,7 +71,7 @@ static char version[] =
+ BNXT_RE_DESC "\n";
+
+ MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
+-MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
++MODULE_DESCRIPTION(BNXT_RE_DESC);
+ MODULE_LICENSE("Dual BSD/GPL");
+
+ /* globals */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index abbabea7f5fa38..3b28878f62062f 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -237,18 +237,15 @@ static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
+ struct bnxt_qplib_hwq *hwq = &nq->hwq;
+ struct nq_base *nqe, **nq_ptr;
+ int budget = nq->budget;
+- u32 sw_cons, raw_cons;
+ uintptr_t q_handle;
+ u16 type;
+
+ spin_lock_bh(&hwq->lock);
+ /* Service the NQ until empty */
+- raw_cons = hwq->cons;
+ while (budget--) {
+- sw_cons = HWQ_CMP(raw_cons, hwq);
+ nq_ptr = (struct nq_base **)hwq->pbl_ptr;
+- nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
+- if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
++ nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
++ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
+ break;
+
+ /*
+@@ -276,7 +273,8 @@ static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
+ default:
+ break;
+ }
+- raw_cons++;
++ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
++ 1, &nq->nq_db.dbinfo.flags);
+ }
+ spin_unlock_bh(&hwq->lock);
+ }
+@@ -302,18 +300,16 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
+ struct bnxt_qplib_hwq *hwq = &nq->hwq;
+ struct bnxt_qplib_cq *cq;
+ int budget = nq->budget;
+- u32 sw_cons, raw_cons;
+ struct nq_base *nqe;
+ uintptr_t q_handle;
++ u32 hw_polled = 0;
+ u16 type;
+
+ spin_lock_bh(&hwq->lock);
+ /* Service the NQ until empty */
+- raw_cons = hwq->cons;
+ while (budget--) {
+- sw_cons = HWQ_CMP(raw_cons, hwq);
+- nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
+- if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
++ nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
++ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
+ break;
+
+ /*
+@@ -372,12 +368,12 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
+ "nqe with type = 0x%x not handled\n", type);
+ break;
+ }
+- raw_cons++;
++ hw_polled++;
++ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
++ 1, &nq->nq_db.dbinfo.flags);
+ }
+- if (hwq->cons != raw_cons) {
+- hwq->cons = raw_cons;
++ if (hw_polled)
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
+- }
+ spin_unlock_bh(&hwq->lock);
+ }
+
+@@ -505,6 +501,7 @@ static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
+ pdev = nq->pdev;
+ nq_db = &nq->nq_db;
+
++ nq_db->dbinfo.flags = 0;
+ nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
+ nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
+ if (!nq_db->reg.bar_base) {
+@@ -649,7 +646,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ rc = -ENOMEM;
+ goto fail;
+ }
+-
++ srq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_SRQ,
+ sizeof(req));
+@@ -703,13 +700,9 @@ int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+ {
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+- u32 sw_prod, sw_cons, count = 0;
+-
+- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+- sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
++ u32 count;
+
+- count = sw_prod > sw_cons ? sw_prod - sw_cons :
+- srq_hwq->max_elements - sw_cons + sw_prod;
++ count = __bnxt_qplib_get_avail(srq_hwq);
+ if (count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+@@ -748,7 +741,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+- srq->threshold = le16_to_cpu(sb->srq_limit);
++ if (!rc)
++ srq->threshold = le16_to_cpu(sb->srq_limit);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+
+@@ -761,7 +755,7 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe;
+ struct sq_sge *hw_sge;
+- u32 sw_prod, sw_cons, count = 0;
++ u32 count = 0;
+ int i, next;
+
+ spin_lock(&srq_hwq->lock);
+@@ -775,8 +769,7 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ srq->start_idx = srq->swq[next].next_idx;
+ spin_unlock(&srq_hwq->lock);
+
+- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+- srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
++ srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
+ memset(srqe, 0, srq->wqe_size);
+ /* Calculate wqe_size16 and data_len */
+ for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
+@@ -792,17 +785,10 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ srqe->wr_id[0] = cpu_to_le32((u32)next);
+ srq->swq[next].wr_id = wqe->wr_id;
+
+- srq_hwq->prod++;
++ bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
+
+ spin_lock(&srq_hwq->lock);
+- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+- /* retaining srq_hwq->cons for this logic
+- * actually the lock is only required to
+- * read srq_hwq->cons.
+- */
+- sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+- count = sw_prod > sw_cons ? sw_prod - sw_cons :
+- srq_hwq->max_elements - sw_cons + sw_prod;
++ count = __bnxt_qplib_get_avail(srq_hwq);
+ spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
+@@ -849,6 +835,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ u32 tbl_indx;
+ int rc;
+
++ sq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_QP1,
+ sizeof(req));
+@@ -885,6 +872,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+
+ /* RQ */
+ if (rq->max_wqe) {
++ rq->dbinfo.flags = 0;
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+@@ -992,6 +980,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ u32 tbl_indx;
+ u16 nsge;
+
++ if (res->dattr)
++ qp->dev_cap_flags = res->dattr->dev_cap_flags;
++
++ sq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_QP,
+ sizeof(req));
+@@ -1006,6 +998,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
++
++ if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
++ psn_sz = sizeof(struct sq_msn_search);
++ qp->msn = 0;
++ }
+ }
+
+ hwq_attr.res = res;
+@@ -1013,7 +1010,15 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ hwq_attr.stride = sizeof(struct sq_sge);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.aux_stride = psn_sz;
+- hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
++ hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
++ : 0;
++ /* Update msn tbl size */
++ if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
++ hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
++ qp->msn_tbl_sz = hwq_attr.aux_depth;
++ qp->msn = 0;
++ }
++
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
+ if (rc)
+@@ -1040,6 +1045,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+
+ /* RQ */
+ if (!qp->srq) {
++ rq->dbinfo.flags = 0;
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+@@ -1454,12 +1460,15 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
+ {
+ struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
++ u32 peek_flags, peek_cons;
+ struct cq_base *hw_cqe;
+ int i;
+
++ peek_flags = cq->dbinfo.flags;
++ peek_cons = cq_hwq->cons;
+ for (i = 0; i < cq_hwq->max_elements; i++) {
+- hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
+- if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
++ hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
++ if (!CQE_CMP_VALID(hw_cqe, peek_flags))
+ continue;
+ /*
+ * The valid test of the entry must be done first before
+@@ -1489,6 +1498,8 @@ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
+ default:
+ break;
+ }
++ bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
++ 1, &peek_flags);
+ }
+ }
+
+@@ -1590,6 +1601,27 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
+ return NULL;
+ }
+
++/* Fil the MSN table into the next psn row */
++static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
++ struct bnxt_qplib_swqe *wqe,
++ struct bnxt_qplib_swq *swq)
++{
++ struct sq_msn_search *msns;
++ u32 start_psn, next_psn;
++ u16 start_idx;
++
++ msns = (struct sq_msn_search *)swq->psn_search;
++ msns->start_idx_next_psn_start_psn = 0;
++
++ start_psn = swq->start_psn;
++ next_psn = swq->next_psn;
++ start_idx = swq->slot_idx;
++ msns->start_idx_next_psn_start_psn |=
++ bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
++ qp->msn++;
++ qp->msn %= qp->msn_tbl_sz;
++}
++
+ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+@@ -1601,6 +1633,12 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+
+ if (!swq->psn_search)
+ return;
++ /* Handle MSN differently on cap flags */
++ if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
++ bnxt_qplib_fill_msn_search(qp, wqe, swq);
++ return;
++ }
++ psns = (struct sq_psn_search *)swq->psn_search;
+ psns = swq->psn_search;
+ psns_ext = swq->psn_ext;
+
+@@ -1709,8 +1747,8 @@ static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
+ return slot;
+ }
+
+-static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
+- struct bnxt_qplib_swq *swq)
++static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
++ struct bnxt_qplib_swq *swq, bool hw_retx)
+ {
+ struct bnxt_qplib_hwq *hwq;
+ u32 pg_num, pg_indx;
+@@ -1721,6 +1759,11 @@ static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
+ if (!hwq->pad_pg)
+ return;
+ tail = swq->slot_idx / sq->dbinfo.max_slot;
++ if (hw_retx) {
++ /* For HW retx use qp msn index */
++ tail = qp->msn;
++ tail %= qp->msn_tbl_sz;
++ }
+ pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
+ pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
+ buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
+@@ -1745,6 +1788,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swq *swq;
+ bool sch_handler = false;
+ u16 wqe_sz, qdf = 0;
++ bool msn_update;
+ void *base_hdr;
+ void *ext_hdr;
+ __le32 temp32;
+@@ -1772,7 +1816,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ }
+
+ swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
+- bnxt_qplib_pull_psn_buff(sq, swq);
++ bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
+
+ idx = 0;
+ swq->slot_idx = hwq->prod;
+@@ -1804,6 +1848,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ &idx);
+ if (data_len < 0)
+ goto queue_err;
++ /* Make sure we update MSN table only for wired wqes */
++ msn_update = true;
+ /* Specifics */
+ switch (wqe->type) {
+ case BNXT_QPLIB_SWQE_TYPE_SEND:
+@@ -1844,6 +1890,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ SQ_SEND_DST_QP_MASK);
+ ext_sqe->avid = cpu_to_le32(wqe->send.avid &
+ SQ_SEND_AVID_MASK);
++ msn_update = false;
+ } else {
+ sqe->length = cpu_to_le32(data_len);
+ if (qp->mtu)
+@@ -1901,7 +1948,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
+-
++ msn_update = false;
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
+@@ -1933,6 +1980,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ PTU_PTE_VALID);
+ ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
+ ext_sqe->va = cpu_to_le64(wqe->frmr.va);
++ msn_update = false;
+
+ break;
+ }
+@@ -1950,6 +1998,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ sqe->l_key = cpu_to_le32(wqe->bind.r_key);
+ ext_sqe->va = cpu_to_le64(wqe->bind.va);
+ ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
++ msn_update = false;
+ break;
+ }
+ default:
+@@ -1957,11 +2006,13 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ rc = -EINVAL;
+ goto done;
+ }
+- swq->next_psn = sq->psn & BTH_PSN_MASK;
+- bnxt_qplib_fill_psn_search(qp, wqe, swq);
++ if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
++ swq->next_psn = sq->psn & BTH_PSN_MASK;
++ bnxt_qplib_fill_psn_search(qp, wqe, swq);
++ }
+ queue_err:
+ bnxt_qplib_swq_mod_start(sq, wqe_idx);
+- bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
++ bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
+ qp->wqe_cnt++;
+ done:
+ if (sch_handler) {
+@@ -2049,7 +2100,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
+ base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
+ queue_err:
+ bnxt_qplib_swq_mod_start(rq, wqe_idx);
+- bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
++ bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
+ done:
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+@@ -2086,6 +2137,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+ return -EINVAL;
+ }
+
++ cq->dbinfo.flags = 0;
+ hwq_attr.res = res;
+ hwq_attr.depth = cq->max_wqe;
+ hwq_attr.stride = sizeof(struct cq_base);
+@@ -2101,7 +2153,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+
+ req.dpi = cpu_to_le32(cq->dpi->dpi);
+ req.cq_handle = cpu_to_le64(cq->cq_handle);
+- req.cq_size = cpu_to_le32(cq->hwq.max_elements);
++ req.cq_size = cpu_to_le32(cq->max_wqe);
+ pbl = &cq->hwq.pbl[PBL_LVL_0];
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
+ CMDQ_CREATE_CQ_PG_SIZE_SFT);
+@@ -2144,6 +2196,8 @@ void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
+ {
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+ memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
++ /* Reset only the cons bit in the flags */
++ cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
+ }
+
+ int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
+@@ -2240,7 +2294,8 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
+ cqe++;
+ (*budget)--;
+ skip_compl:
+- bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
++ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
++ sq->swq[last].slots, &sq->dbinfo.flags);
+ sq->swq_last = sq->swq[last].next_idx;
+ }
+ *pcqe = cqe;
+@@ -2287,7 +2342,8 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
+ cqe->wr_id = rq->swq[last].wr_id;
+ cqe++;
+ (*budget)--;
+- bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
++ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
++ rq->swq[last].slots, &rq->dbinfo.flags);
+ rq->swq_last = rq->swq[last].next_idx;
+ }
+ *pcqe = cqe;
+@@ -2316,7 +2372,7 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
+ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
+ u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
+ {
+- u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
++ u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct cq_req *peek_req_hwcqe;
+ struct bnxt_qplib_qp *peek_qp;
+@@ -2347,16 +2403,14 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
+ }
+ if (sq->condition) {
+ /* Peek at the completions */
+- peek_raw_cq_cons = cq->hwq.cons;
++ peek_flags = cq->dbinfo.flags;
+ peek_sw_cq_cons = cq_cons;
+ i = cq->hwq.max_elements;
+ while (i--) {
+- peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
+ peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
+ peek_sw_cq_cons, NULL);
+ /* If the next hwcqe is VALID */
+- if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
+- cq->hwq.max_elements)) {
++ if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+@@ -2399,8 +2453,9 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
+ rc = -EINVAL;
+ goto out;
+ }
+- peek_sw_cq_cons++;
+- peek_raw_cq_cons++;
++ bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
++ &peek_sw_cq_cons,
++ 1, &peek_flags);
+ }
+ dev_err(&cq->hwq.pdev->dev,
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
+@@ -2487,7 +2542,8 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
+ }
+ }
+ skip:
+- bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
++ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
++ swq->slots, &sq->dbinfo.flags);
+ sq->swq_last = swq->next_idx;
+ if (sq->single)
+ break;
+@@ -2514,7 +2570,8 @@ static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
+ srq->swq[srq->last_idx].next_idx = (int)tag;
+ srq->last_idx = (int)tag;
+ srq->swq[srq->last_idx].next_idx = -1;
+- srq->hwq.cons++; /* Support for SRQE counter */
++ bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
++ srq->dbinfo.max_slot, &srq->dbinfo.flags);
+ spin_unlock(&srq->hwq.lock);
+ }
+
+@@ -2583,7 +2640,8 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
+ cqe->wr_id = swq->wr_id;
+ cqe++;
+ (*budget)--;
+- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
++ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
++ swq->slots, &rq->dbinfo.flags);
+ rq->swq_last = swq->next_idx;
+ *pcqe = cqe;
+
+@@ -2669,7 +2727,8 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
+ cqe->wr_id = swq->wr_id;
+ cqe++;
+ (*budget)--;
+- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
++ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
++ swq->slots, &rq->dbinfo.flags);
+ rq->swq_last = swq->next_idx;
+ *pcqe = cqe;
+
+@@ -2686,14 +2745,11 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
+ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
+ {
+ struct cq_base *hw_cqe;
+- u32 sw_cons, raw_cons;
+ bool rc = true;
+
+- raw_cons = cq->hwq.cons;
+- sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
+- hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
++ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
+ /* Check for Valid bit. If the CQE is valid, return false */
+- rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
++ rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
+ return rc;
+ }
+
+@@ -2775,7 +2831,8 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
+ cqe->wr_id = swq->wr_id;
+ cqe++;
+ (*budget)--;
+- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
++ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
++ swq->slots, &rq->dbinfo.flags);
+ rq->swq_last = swq->next_idx;
+ *pcqe = cqe;
+
+@@ -2848,7 +2905,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
+ cqe++;
+ (*budget)--;
+ }
+- bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
++ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
++ sq->swq[swq_last].slots, &sq->dbinfo.flags);
+ sq->swq_last = sq->swq[swq_last].next_idx;
+ }
+ *pcqe = cqe;
+@@ -2933,19 +2991,17 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
+ int num_cqes, struct bnxt_qplib_qp **lib_qp)
+ {
+ struct cq_base *hw_cqe;
+- u32 sw_cons, raw_cons;
+ int budget, rc = 0;
++ u32 hw_polled = 0;
+ u8 type;
+
+- raw_cons = cq->hwq.cons;
+ budget = num_cqes;
+
+ while (budget) {
+- sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
+- hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
++ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
+
+ /* Check for Valid bit */
+- if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
++ if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
+ break;
+
+ /*
+@@ -2960,7 +3016,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
+ rc = bnxt_qplib_cq_process_req(cq,
+ (struct cq_req *)hw_cqe,
+ &cqe, &budget,
+- sw_cons, lib_qp);
++ cq->hwq.cons, lib_qp);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RC:
+ rc = bnxt_qplib_cq_process_res_rc(cq,
+@@ -3006,12 +3062,13 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
+ dev_err(&cq->hwq.pdev->dev,
+ "process_cqe error rc = 0x%x\n", rc);
+ }
+- raw_cons++;
++ hw_polled++;
++ bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
++ 1, &cq->dbinfo.flags);
++
+ }
+- if (cq->hwq.cons != raw_cons) {
+- cq->hwq.cons = raw_cons;
++ if (hw_polled)
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
+- }
+ exit:
+ return num_cqes - budget;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 404b851091ca26..a6f38d8f12efe2 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -164,7 +164,7 @@ struct bnxt_qplib_swqe {
+ /* Send, with imm, inval key */
+ struct {
+ union {
+- __be32 imm_data;
++ u32 imm_data;
+ u32 inv_key;
+ };
+ u32 q_key;
+@@ -182,7 +182,7 @@ struct bnxt_qplib_swqe {
+ /* RDMA write, with imm, read */
+ struct {
+ union {
+- __be32 imm_data;
++ u32 imm_data;
+ u32 inv_key;
+ };
+ u64 remote_va;
+@@ -338,6 +338,9 @@ struct bnxt_qplib_qp {
+ dma_addr_t rq_hdr_buf_map;
+ struct list_head sq_flush;
+ struct list_head rq_flush;
++ u32 msn;
++ u32 msn_tbl_sz;
++ u16 dev_cap_flags;
+ };
+
+ #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
+@@ -348,9 +351,21 @@ struct bnxt_qplib_qp {
+ #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
+
+ #define ROCE_CQE_CMP_V 0
+-#define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \
++#define CQE_CMP_VALID(hdr, pass) \
+ (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
+- !((raw_cons) & (cp_bit)))
++ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
++
++static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq)
++{
++ int cons, prod, avail;
++
++ cons = hwq->cons;
++ prod = hwq->prod;
++ avail = cons - prod;
++ if (cons <= prod)
++ avail += hwq->depth;
++ return avail;
++}
+
+ static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que,
+ u8 slots)
+@@ -374,7 +389,7 @@ struct bnxt_qplib_cqe {
+ u16 cfa_meta;
+ u64 wr_id;
+ union {
+- __be32 immdata;
++ __le32 immdata;
+ u32 invrkey;
+ };
+ u64 qp_handle;
+@@ -443,9 +458,9 @@ struct bnxt_qplib_cq {
+ #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
+ #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
+
+-#define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \
++#define NQE_CMP_VALID(hdr, pass) \
+ (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
+- !((raw_cons) & (cp_bit)))
++ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+
+ #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
+
+@@ -614,4 +629,15 @@ static inline u16 bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe *wqe, u16 max)
+
+ return size;
+ }
++
++/* MSN table update inlin */
++static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn)
++{
++ return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) &
++ SQ_MSN_SEARCH_START_IDX_MASK) |
++ (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) &
++ SQ_MSN_SEARCH_NEXT_PSN_MASK) |
++ (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
++ SQ_MSN_SEARCH_START_PSN_MASK));
++}
+ #endif /* __BNXT_QPLIB_FP_H__ */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index e47b4ca64d33ef..5680fe8b890ad1 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -734,17 +734,15 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+ u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
+ struct bnxt_qplib_hwq *hwq = &creq->hwq;
+ struct creq_base *creqe;
+- u32 sw_cons, raw_cons;
+ unsigned long flags;
+ u32 num_wakeup = 0;
++ u32 hw_polled = 0;
+
+ /* Service the CREQ until budget is over */
+ spin_lock_irqsave(&hwq->lock, flags);
+- raw_cons = hwq->cons;
+ while (budget > 0) {
+- sw_cons = HWQ_CMP(raw_cons, hwq);
+- creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
+- if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
++ creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
++ if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
+ break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+@@ -775,15 +773,15 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
+ type);
+ break;
+ }
+- raw_cons++;
+ budget--;
++ hw_polled++;
++ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
++ 1, &creq->creq_db.dbinfo.flags);
+ }
+
+- if (hwq->cons != raw_cons) {
+- hwq->cons = raw_cons;
++ if (hw_polled)
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
+ rcfw->res->cctx, true);
+- }
+ spin_unlock_irqrestore(&hwq->lock, flags);
+ if (num_wakeup)
+ wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
+@@ -907,6 +905,8 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
+ req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
+
+ skip_ctx_setup:
++ if (BNXT_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
++ req.flags |= cpu_to_le16(CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED);
+ req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+@@ -1113,6 +1113,7 @@ static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
+ pdev = rcfw->pdev;
+ creq_db = &rcfw->creq.creq_db;
+
++ creq_db->dbinfo.flags = 0;
+ creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
+ creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
+ if (!creq_db->reg.bar_id)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+index 7b31bee3e00054..45996e60a0d03e 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+@@ -141,9 +141,9 @@ struct bnxt_qplib_crsbe {
+ /* Allocate 1 per QP for async error notification for now */
+ #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
+ #define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
+-#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
++#define CREQ_CMP_VALID(hdr, pass) \
+ (!!((hdr)->v & CREQ_BASE_V) == \
+- !((raw_cons) & (cp_bit)))
++ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+ #define CREQ_ENTRY_POLL_BUDGET 0x100
+
+ /* HWQ */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 157db6b7e11937..ae2bde34e785b7 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -343,7 +343,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ hwq->cons = 0;
+ hwq->pdev = pdev;
+ hwq->depth = hwq_attr->depth;
+- hwq->max_elements = depth;
++ hwq->max_elements = hwq->depth;
+ hwq->element_size = stride;
+ hwq->qe_ppg = pg_size / stride;
+ /* For direct access to the elements */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+index 5949f004f78561..534db462216ac9 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -186,6 +186,14 @@ struct bnxt_qplib_db_info {
+ struct bnxt_qplib_hwq *hwq;
+ u32 xid;
+ u32 max_slot;
++ u32 flags;
++};
++
++enum bnxt_qplib_db_info_flags_mask {
++ BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
++ BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
++ BNXT_QPLIB_FLAG_EPOCH_CONS_MASK = 0x1UL,
++ BNXT_QPLIB_FLAG_EPOCH_PROD_MASK = 0x2UL,
+ };
+
+ /* Tables */
+@@ -396,24 +404,34 @@ void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
+
+ int bnxt_qplib_determine_atomics(struct pci_dev *dev);
+
+-static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_hwq *hwq, u32 cnt)
++static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
++ struct bnxt_qplib_hwq *hwq, u32 cnt)
+ {
+- hwq->prod = (hwq->prod + cnt) % hwq->depth;
++ /* move prod and update toggle/epoch if wrap around */
++ hwq->prod += cnt;
++ if (hwq->prod >= hwq->depth) {
++ hwq->prod %= hwq->depth;
++ dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT;
++ }
+ }
+
+-static inline void bnxt_qplib_hwq_incr_cons(struct bnxt_qplib_hwq *hwq,
+- u32 cnt)
++static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons, u32 cnt,
++ u32 *dbinfo_flags)
+ {
+- hwq->cons = (hwq->cons + cnt) % hwq->depth;
++ /* move cons and update toggle/epoch if wrap around */
++ *cons += cnt;
++ if (*cons >= max_elements) {
++ *cons %= max_elements;
++ *dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT;
++ }
+ }
+
+ static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
+ bool arm)
+ {
+- u32 key;
++ u32 key = 0;
+
+- key = info->hwq->cons & (info->hwq->max_elements - 1);
+- key |= (CMPL_DOORBELL_IDX_VALID |
++ key |= info->hwq->cons | (CMPL_DOORBELL_IDX_VALID |
+ (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
+ if (!arm)
+ key |= CMPL_DOORBELL_MASK;
+@@ -427,8 +445,7 @@ static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
+
+ key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
+ key <<= 32;
+- key |= (info->hwq->cons & (info->hwq->max_elements - 1)) &
+- DBC_DBC_INDEX_MASK;
++ key |= (info->hwq->cons & DBC_DBC_INDEX_MASK);
+ writeq(key, info->db);
+ }
+
+@@ -483,6 +500,15 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
+ CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
+ }
+
++static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
++{
++ return dev_cap_flags &
++ (CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
++ CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
++}
++
++#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
++
+ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
+ {
+ return cctx->modes.dbr_pacing;
+diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+index 4a10303e039254..2909608f4b5de4 100644
+--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
++++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+@@ -555,7 +555,12 @@ struct cmdq_modify_qp {
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+- u8 reserved8;
++ u8 qp_type;
++ #define CMDQ_MODIFY_QP_QP_TYPE_RC 0x2UL
++ #define CMDQ_MODIFY_QP_QP_TYPE_UD 0x4UL
++ #define CMDQ_MODIFY_QP_QP_TYPE_RAW_ETHERTYPE 0x6UL
++ #define CMDQ_MODIFY_QP_QP_TYPE_GSI 0x7UL
++ #define CMDQ_MODIFY_QP_QP_TYPE_LAST CMDQ_MODIFY_QP_QP_TYPE_GSI
+ __le64 resp_addr;
+ __le32 modify_mask;
+ #define CMDQ_MODIFY_QP_MODIFY_MASK_STATE 0x1UL
+@@ -611,14 +616,12 @@ struct cmdq_modify_qp {
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 (0x3UL << 6)
+ #define CMDQ_MODIFY_QP_NETWORK_TYPE_LAST CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6
+ u8 access;
+- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK \
+- 0xffUL
+- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT \
+- 0
+- #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL
+- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL
+- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL
+- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL
++ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK 0xffUL
++ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT 0
++ #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL
++ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL
++ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL
++ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL
+ __le16 pkey;
+ __le32 qkey;
+ __le32 dgid[4];
+@@ -673,6 +676,13 @@ struct cmdq_modify_qp {
+ #define CMDQ_MODIFY_QP_VLAN_PCP_SFT 13
+ __le64 irrq_addr;
+ __le64 orrq_addr;
++ __le32 ext_modify_mask;
++ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_EXT_STATS_CTX 0x1UL
++ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_SCHQ_ID_VALID 0x2UL
++ __le32 ext_stats_ctx_id;
++ __le16 schq_id;
++ __le16 unused_0;
++ __le32 reserved32;
+ };
+
+ /* creq_modify_qp_resp (size:128b/16B) */
+@@ -3017,6 +3027,17 @@ struct sq_psn_search_ext {
+ __le32 reserved32;
+ };
+
++/* sq_msn_search (size:64b/8B) */
++struct sq_msn_search {
++ __le64 start_idx_next_psn_start_psn;
++ #define SQ_MSN_SEARCH_START_PSN_MASK 0xffffffUL
++ #define SQ_MSN_SEARCH_START_PSN_SFT 0
++ #define SQ_MSN_SEARCH_NEXT_PSN_MASK 0xffffff000000ULL
++ #define SQ_MSN_SEARCH_NEXT_PSN_SFT 24
++ #define SQ_MSN_SEARCH_START_IDX_MASK 0xffff000000000000ULL
++ #define SQ_MSN_SEARCH_START_IDX_SFT 48
++};
++
+ /* sq_send (size:1024b/128B) */
+ struct sq_send {
+ u8 wqe_type;
+@@ -3705,13 +3726,35 @@ struct cq_base {
+ #define CQ_BASE_CQE_TYPE_RES_UD (0x2UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_CFA (0x4UL << 1)
++ #define CQ_BASE_CQE_TYPE_REQ_V3 (0x8UL << 1)
++ #define CQ_BASE_CQE_TYPE_RES_RC_V3 (0x9UL << 1)
++ #define CQ_BASE_CQE_TYPE_RES_UD_V3 (0xaUL << 1)
++ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1_V3 (0xbUL << 1)
++ #define CQ_BASE_CQE_TYPE_RES_UD_CFA_V3 (0xcUL << 1)
+ #define CQ_BASE_CQE_TYPE_NO_OP (0xdUL << 1)
+ #define CQ_BASE_CQE_TYPE_TERMINAL (0xeUL << 1)
+ #define CQ_BASE_CQE_TYPE_CUT_OFF (0xfUL << 1)
+ #define CQ_BASE_CQE_TYPE_LAST CQ_BASE_CQE_TYPE_CUT_OFF
+ u8 status;
++ #define CQ_BASE_STATUS_OK 0x0UL
++ #define CQ_BASE_STATUS_BAD_RESPONSE_ERR 0x1UL
++ #define CQ_BASE_STATUS_LOCAL_LENGTH_ERR 0x2UL
++ #define CQ_BASE_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL
++ #define CQ_BASE_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
++ #define CQ_BASE_STATUS_LOCAL_PROTECTION_ERR 0x5UL
++ #define CQ_BASE_STATUS_LOCAL_ACCESS_ERROR 0x6UL
++ #define CQ_BASE_STATUS_MEMORY_MGT_OPERATION_ERR 0x7UL
++ #define CQ_BASE_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL
++ #define CQ_BASE_STATUS_REMOTE_ACCESS_ERR 0x9UL
++ #define CQ_BASE_STATUS_REMOTE_OPERATION_ERR 0xaUL
++ #define CQ_BASE_STATUS_RNR_NAK_RETRY_CNT_ERR 0xbUL
++ #define CQ_BASE_STATUS_TRANSPORT_RETRY_CNT_ERR 0xcUL
++ #define CQ_BASE_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
++ #define CQ_BASE_STATUS_HW_FLUSH_ERR 0xeUL
++ #define CQ_BASE_STATUS_OVERFLOW_ERR 0xfUL
++ #define CQ_BASE_STATUS_LAST CQ_BASE_STATUS_OVERFLOW_ERR
+ __le16 reserved16;
+- __le32 reserved32;
++ __le32 opaque;
+ };
+
+ /* cq_req (size:256b/32B) */
+@@ -4326,6 +4369,8 @@ struct cq_cutoff {
+ #define CQ_CUTOFF_CQE_TYPE_SFT 1
+ #define CQ_CUTOFF_CQE_TYPE_CUT_OFF (0xfUL << 1)
+ #define CQ_CUTOFF_CQE_TYPE_LAST CQ_CUTOFF_CQE_TYPE_CUT_OFF
++ #define CQ_CUTOFF_RESIZE_TOGGLE_MASK 0x60UL
++ #define CQ_CUTOFF_RESIZE_TOGGLE_SFT 5
+ u8 status;
+ #define CQ_CUTOFF_STATUS_OK 0x0UL
+ #define CQ_CUTOFF_STATUS_LAST CQ_CUTOFF_STATUS_OK
+@@ -4377,6 +4422,8 @@ struct nq_srq_event {
+ #define NQ_SRQ_EVENT_TYPE_SFT 0
+ #define NQ_SRQ_EVENT_TYPE_SRQ_EVENT 0x32UL
+ #define NQ_SRQ_EVENT_TYPE_LAST NQ_SRQ_EVENT_TYPE_SRQ_EVENT
++ #define NQ_SRQ_EVENT_TOGGLE_MASK 0xc0UL
++ #define NQ_SRQ_EVENT_TOGGLE_SFT 6
+ u8 event;
+ #define NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT 0x1UL
+ #define NQ_SRQ_EVENT_EVENT_LAST NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 040ba2224f9ff6..b3757c6a0457a1 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -1222,6 +1222,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+ int ret;
+
+ ep = lookup_atid(t, atid);
++ if (!ep)
++ return -EINVAL;
+
+ pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
+ be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
+@@ -2279,6 +2281,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+ int ret = 0;
+
+ ep = lookup_atid(t, atid);
++ if (!ep)
++ return -EINVAL;
++
+ la = (struct sockaddr_in *)&ep->com.local_addr;
+ ra = (struct sockaddr_in *)&ep->com.remote_addr;
+ la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
+index 16a24a05fc2a60..bafd210dd43e86 100644
+--- a/drivers/infiniband/hw/efa/efa_com.c
++++ b/drivers/infiniband/hw/efa/efa_com.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+ /*
+- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
++ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+ #include "efa_com.h"
+@@ -406,8 +406,8 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue
+ return comp_ctx;
+ }
+
+-static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
+- struct efa_admin_acq_entry *cqe)
++static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
++ struct efa_admin_acq_entry *cqe)
+ {
+ struct efa_comp_ctx *comp_ctx;
+ u16 cmd_id;
+@@ -416,11 +416,11 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
+ EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
+
+ comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
+- if (!comp_ctx) {
++ if (comp_ctx->status != EFA_CMD_SUBMITTED) {
+ ibdev_err(aq->efa_dev,
+- "comp_ctx is NULL. Changing the admin queue running state\n");
+- clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
+- return;
++ "Received completion with unexpected command id[%d], sq producer: %d, sq consumer: %d, cq consumer: %d\n",
++ cmd_id, aq->sq.pc, aq->sq.cc, aq->cq.cc);
++ return -EINVAL;
+ }
+
+ comp_ctx->status = EFA_CMD_COMPLETED;
+@@ -428,14 +428,17 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
+
+ if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
+ complete(&comp_ctx->wait_event);
++
++ return 0;
+ }
+
+ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ {
+ struct efa_admin_acq_entry *cqe;
+ u16 queue_size_mask;
+- u16 comp_num = 0;
++ u16 comp_cmds = 0;
+ u8 phase;
++ int err;
+ u16 ci;
+
+ queue_size_mask = aq->depth - 1;
+@@ -453,10 +456,12 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ * phase bit was validated
+ */
+ dma_rmb();
+- efa_com_handle_single_admin_completion(aq, cqe);
++ err = efa_com_handle_single_admin_completion(aq, cqe);
++ if (!err)
++ comp_cmds++;
+
++ aq->cq.cc++;
+ ci++;
+- comp_num++;
+ if (ci == aq->depth) {
+ ci = 0;
+ phase = !phase;
+@@ -465,10 +470,9 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
+ cqe = &aq->cq.entries[ci];
+ }
+
+- aq->cq.cc += comp_num;
+ aq->cq.phase = phase;
+- aq->sq.cc += comp_num;
+- atomic64_add(comp_num, &aq->stats.completed_cmd);
++ aq->sq.cc += comp_cmds;
++ atomic64_add(comp_cmds, &aq->stats.completed_cmd);
+ }
+
+ static int efa_com_comp_status_to_errno(u8 comp_status)
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index c317947563fbc6..b010c4209ea381 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -1540,11 +1540,31 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ return ret;
+ }
+
++static enum ib_qp_state query_qp_state(struct erdma_qp *qp)
++{
++ switch (qp->attrs.state) {
++ case ERDMA_QP_STATE_IDLE:
++ return IB_QPS_INIT;
++ case ERDMA_QP_STATE_RTR:
++ return IB_QPS_RTR;
++ case ERDMA_QP_STATE_RTS:
++ return IB_QPS_RTS;
++ case ERDMA_QP_STATE_CLOSING:
++ return IB_QPS_ERR;
++ case ERDMA_QP_STATE_TERMINATE:
++ return IB_QPS_ERR;
++ case ERDMA_QP_STATE_ERROR:
++ return IB_QPS_ERR;
++ default:
++ return IB_QPS_ERR;
++ }
++}
++
+ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+ {
+- struct erdma_qp *qp;
+ struct erdma_dev *dev;
++ struct erdma_qp *qp;
+
+ if (ibqp && qp_attr && qp_init_attr) {
+ qp = to_eqp(ibqp);
+@@ -1571,6 +1591,9 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+
+ qp_init_attr->cap = qp_attr->cap;
+
++ qp_attr->qp_state = query_qp_state(qp);
++ qp_attr->cur_qp_state = query_qp_state(qp);
++
+ return 0;
+ }
+
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 0814291a04120f..9b542f7c6c115a 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -13185,15 +13185,16 @@ static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
+ {
+ u64 reg;
+ u16 idx = src / BITS_PER_REGISTER;
++ unsigned long flags;
+
+- spin_lock(&dd->irq_src_lock);
++ spin_lock_irqsave(&dd->irq_src_lock, flags);
+ reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
+ if (set)
+ reg |= bits;
+ else
+ reg &= ~bits;
+ write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
+- spin_unlock(&dd->irq_src_lock);
++ spin_unlock_irqrestore(&dd->irq_src_lock, flags);
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
+index 7741a1d69097c6..2b5d264f41e51b 100644
+--- a/drivers/infiniband/hw/hfi1/efivar.c
++++ b/drivers/infiniband/hw/hfi1/efivar.c
+@@ -112,7 +112,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ unsigned long *size, void **return_data)
+ {
+ char prefix_name[64];
+- char name[64];
++ char name[128];
+ int result;
+
+ /* create a common prefix */
+diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
+index 08732e1ac96627..c132a9c073bffd 100644
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -3,6 +3,7 @@
+ * Copyright(c) 2015 - 2019 Intel Corporation.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/pci.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+@@ -210,12 +211,6 @@ static u32 extract_speed(u16 linkstat)
+ return speed;
+ }
+
+-/* return the PCIe link speed from the given link status */
+-static u32 extract_width(u16 linkstat)
+-{
+- return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+-}
+-
+ /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
+ static void update_lbus_info(struct hfi1_devdata *dd)
+ {
+@@ -228,7 +223,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
+ return;
+ }
+
+- dd->lbus_width = extract_width(linkstat);
++ dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
+ dd->lbus_speed = extract_speed(linkstat);
+ snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
+diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
+index dfea53e0fdeb8a..5eb309ead70768 100644
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd)
+ "Unable to allocate credit return DMA range for NUMA %d\n",
+ i);
+ ret = -ENOMEM;
+- goto done;
++ goto free_cr_base;
+ }
+ }
+ set_dev_node(&dd->pcidev->dev, dd->node);
+@@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd)
+ ret = 0;
+ done:
+ return ret;
++
++free_cr_base:
++ free_credit_return(dd);
++ goto done;
+ }
+
+ void free_credit_return(struct hfi1_devdata *dd)
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index 26c62162759bab..969c5c3ab859e7 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ {
+ int rval = 0;
+
+- if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
++ if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ rval = _extend_sdma_tx_descs(dd, tx);
+ if (rval) {
+ __sdma_txclean(dd, tx);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
+index e77fcc74f15c49..3df032ddda1891 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -33,7 +33,9 @@
+ #include <linux/pci.h>
+ #include <rdma/ib_addr.h>
+ #include <rdma/ib_cache.h>
++#include "hnae3.h"
+ #include "hns_roce_device.h"
++#include "hns_roce_hw_v2.h"
+
+ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
+ {
+@@ -57,6 +59,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
+ int ret = 0;
++ u32 max_sl;
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
+ return -EOPNOTSUPP;
+@@ -70,9 +73,17 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.flowlabel = grh->flow_label;
+ ah->av.udp_sport = get_ah_udp_sport(ah_attr);
+- ah->av.sl = rdma_ah_get_sl(ah_attr);
+ ah->av.tclass = get_tclass(grh);
+
++ ah->av.sl = rdma_ah_get_sl(ah_attr);
++ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++ if (unlikely(ah->av.sl > max_sl)) {
++ ibdev_err_ratelimited(&hr_dev->ib_dev,
++ "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
++ ah->av.sl, max_sl);
++ return -EINVAL;
++ }
++
+ memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
+ memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index 736dc2f993b403..ff177466de9b49 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -151,7 +151,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ return ret;
+ }
+
+- ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
++ ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
+ if (ret) {
+ ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
+ goto err_put;
+@@ -164,7 +164,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ return 0;
+
+ err_xa:
+- xa_erase(&cq_table->array, hr_cq->cqn);
++ xa_erase_irq(&cq_table->array, hr_cq->cqn);
+ err_put:
+ hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+
+@@ -183,7 +183,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
+ hr_cq->cqn);
+
+- xa_erase(&cq_table->array, hr_cq->cqn);
++ xa_erase_irq(&cq_table->array, hr_cq->cqn);
+
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+@@ -472,13 +472,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+ struct ib_event event;
+ struct ib_cq *ibcq;
+
+- hr_cq = xa_load(&hr_dev->cq_table.array,
+- cqn & (hr_dev->caps.num_cqs - 1));
+- if (!hr_cq) {
+- dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
+- return;
+- }
+-
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+@@ -487,7 +480,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+ return;
+ }
+
+- refcount_inc(&hr_cq->refcount);
++ xa_lock(&hr_dev->cq_table.array);
++ hr_cq = xa_load(&hr_dev->cq_table.array,
++ cqn & (hr_dev->caps.num_cqs - 1));
++ if (hr_cq)
++ refcount_inc(&hr_cq->refcount);
++ xa_unlock(&hr_dev->cq_table.array);
++ if (!hr_cq) {
++ dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
++ return;
++ }
+
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->event_handler) {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 7f0d0288beb1e0..cd593d651e4caf 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -82,6 +82,7 @@
+ #define MR_TYPE_DMA 0x03
+
+ #define HNS_ROCE_FRMR_MAX_PA 512
++#define HNS_ROCE_FRMR_ALIGN_SIZE 128
+
+ #define PKEY_ID 0xffff
+ #define NODE_DESC_SIZE 64
+@@ -90,6 +91,8 @@
+ /* Configure to HW for PAGE_SIZE larger than 4KB */
+ #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
+
++#define ATOMIC_WR_LEN 8
++
+ #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
+ #define SRQ_DB_REG 0x230
+
+@@ -181,6 +184,9 @@ enum {
+ #define HNS_HW_PAGE_SHIFT 12
+ #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
+
++#define HNS_HW_MAX_PAGE_SHIFT 27
++#define HNS_HW_MAX_PAGE_SIZE (1 << HNS_HW_MAX_PAGE_SHIFT)
++
+ struct hns_roce_uar {
+ u64 pfn;
+ unsigned long index;
+@@ -581,6 +587,13 @@ struct hns_roce_work {
+ u32 queue_num;
+ };
+
++enum hns_roce_cong_type {
++ CONG_TYPE_DCQCN,
++ CONG_TYPE_LDCP,
++ CONG_TYPE_HC3,
++ CONG_TYPE_DIP,
++};
++
+ struct hns_roce_qp {
+ struct ib_qp ibqp;
+ struct hns_roce_wq rq;
+@@ -624,6 +637,7 @@ struct hns_roce_qp {
+ struct list_head sq_node; /* all send qps are on a list */
+ struct hns_user_mmap_entry *dwqe_mmap_entry;
+ u32 config;
++ enum hns_roce_cong_type cong_type;
+ };
+
+ struct hns_roce_ib_iboe {
+@@ -695,13 +709,6 @@ struct hns_roce_eq_table {
+ struct hns_roce_eq *eq;
+ };
+
+-enum cong_type {
+- CONG_TYPE_DCQCN,
+- CONG_TYPE_LDCP,
+- CONG_TYPE_HC3,
+- CONG_TYPE_DIP,
+-};
+-
+ struct hns_roce_caps {
+ u64 fw_ver;
+ u8 num_ports;
+@@ -831,7 +838,7 @@ struct hns_roce_caps {
+ u16 default_aeq_period;
+ u16 default_aeq_arm_st;
+ u16 default_ceq_arm_st;
+- enum cong_type cong_type;
++ enum hns_roce_cong_type cong_type;
+ };
+
+ enum hns_roce_device_state {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index c4ac06a3386969..7ebf80504fd125 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -1098,9 +1098,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
+ * @bt_level: base address table level
+ * @unit: ba entries per bt page
+ */
+-static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
++static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
+ {
+- u32 step;
++ u64 step;
+ int max;
+ int i;
+
+@@ -1136,7 +1136,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
+ {
+ struct hns_roce_buf_region *r;
+ int total = 0;
+- int step;
++ u64 step;
+ int i;
+
+ for (i = 0; i < region_cnt; i++) {
+@@ -1167,7 +1167,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+ int ret = 0;
+ int max_ofs;
+ int level;
+- u32 step;
++ u64 step;
+ int end;
+
+ if (hopnum <= 1)
+@@ -1191,10 +1191,12 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+
+ /* config L1 bt to last bt and link them to corresponding parent */
+ for (level = 1; level < hopnum; level++) {
+- cur = hem_list_search_item(&mid_bt[level], offset);
+- if (cur) {
+- hem_ptrs[level] = cur;
+- continue;
++ if (!hem_list_is_bottom_bt(hopnum, level)) {
++ cur = hem_list_search_item(&mid_bt[level], offset);
++ if (cur) {
++ hem_ptrs[level] = cur;
++ continue;
++ }
+ }
+
+ step = hem_list_calc_ba_range(hopnum, level, unit);
+@@ -1204,7 +1206,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+ }
+
+ start_aligned = (distance / step) * step + r->offset;
+- end = min_t(int, start_aligned + step - 1, max_ofs);
++ end = min_t(u64, start_aligned + step - 1, max_ofs);
+ cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
+ true);
+ if (!cur) {
+@@ -1293,7 +1295,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ struct hns_roce_hem_item *hem, *temp_hem;
+ int total = 0;
+ int offset;
+- int step;
++ u64 step;
+
+ step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ if (step < 1)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
+index 7d23d3c51da46b..fea6d7d508b605 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -61,16 +61,16 @@ enum {
+ (sizeof(struct scatterlist) + sizeof(void *)))
+
+ #define check_whether_bt_num_3(type, hop_num) \
+- (type < HEM_TYPE_MTT && hop_num == 2)
++ ((type) < HEM_TYPE_MTT && (hop_num) == 2)
+
+ #define check_whether_bt_num_2(type, hop_num) \
+- ((type < HEM_TYPE_MTT && hop_num == 1) || \
+- (type >= HEM_TYPE_MTT && hop_num == 2))
++ (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
++ ((type) >= HEM_TYPE_MTT && (hop_num) == 2))
+
+ #define check_whether_bt_num_1(type, hop_num) \
+- ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
+- (type >= HEM_TYPE_MTT && hop_num == 1) || \
+- (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
++ (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
++ ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
++ ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
+
+ struct hns_roce_hem_chunk {
+ struct list_head list;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index d82daff2d9bd5c..8066750afab908 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -270,7 +270,7 @@ static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ int mtu = ib_mtu_enum_to_int(qp->path_mtu);
+
+- if (len > qp->max_inline_data || len > mtu) {
++ if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
+ len, qp->max_inline_data, mtu);
+@@ -595,11 +595,16 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
++ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
++ if (msg_len != ATOMIC_WR_LEN)
++ return -EINVAL;
+ set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
+- else if (wr->opcode != IB_WR_REG_MR)
++ } else if (wr->opcode != IB_WR_REG_MR) {
+ ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
+ &curr_idx, valid_num_sge);
++ if (ret)
++ return ret;
++ }
+
+ /*
+ * The pipeline can sequentially post all valid WQEs into WQ buffer,
+@@ -1648,8 +1653,8 @@ static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
+
+ for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
+ bd_idx = i / CNT_PER_DESC;
+- if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) &&
+- bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC)
++ if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC &&
++ !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT)))
+ break;
+
+ cnt_data = (__le64 *)&desc[bd_idx].data[0];
+@@ -2088,7 +2093,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
+ caps->gid_table_len[0] = caps->gmv_bt_num *
+ (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
+
+- caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
++ caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
+ caps->gmv_entry_sz);
+ } else {
+ u32 func_num = max_t(u32, 1, hr_dev->func_num);
+@@ -2443,14 +2448,16 @@ static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
+ static struct hns_roce_link_table *
+ alloc_link_table_buf(struct hns_roce_dev *hr_dev)
+ {
++ u16 total_sl = hr_dev->caps.sl_num * hr_dev->func_num;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_link_table *link_tbl;
+ u32 pg_shift, size, min_size;
+
+ link_tbl = &priv->ext_llm;
+ pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
+- size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
+- min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift;
++ size = hr_dev->caps.num_qps * hr_dev->func_num *
++ HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
++ min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(total_sl) << pg_shift;
+
+ /* Alloc data table */
+ size = max(size, min_size);
+@@ -2693,6 +2700,10 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
+ return 0;
+
+ create_failed_qp:
++ for (i--; i >= 0; i--) {
++ hns_roce_v2_destroy_qp(&free_mr->rsv_qp[i]->ibqp, NULL);
++ kfree(free_mr->rsv_qp[i]);
++ }
+ hns_roce_destroy_cq(cq, NULL);
+ kfree(cq);
+
+@@ -2921,6 +2932,9 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+
+ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
+ {
++ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
++ free_mr_exit(hr_dev);
++
+ hns_roce_function_clear(hr_dev);
+
+ if (!hr_dev->is_vf)
+@@ -3694,8 +3708,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ wc->status == IB_WC_WR_FLUSH_ERR))
+ return;
+
+- ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
+- print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
++ ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
++ cqe_status);
++ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ cq->cqe_size, false);
+ wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
+
+@@ -4379,12 +4394,14 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
+ hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
+
+- context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
+- qpc_mask->rq_nxt_blk_addr = 0;
+-
+- hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
+- upper_32_bits(to_hr_hw_page_addr(mtts[1])));
+- hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
++ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
++ context->rq_nxt_blk_addr =
++ cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
++ qpc_mask->rq_nxt_blk_addr = 0;
++ hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
++ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
++ hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
++ }
+
+ return 0;
+ }
+@@ -4724,9 +4741,15 @@ static int check_cong_type(struct ib_qp *ibqp,
+ struct hns_roce_congestion_algorithm *cong_alg)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
++ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
++
++ if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_GSI)
++ hr_qp->cong_type = CONG_TYPE_DCQCN;
++ else
++ hr_qp->cong_type = hr_dev->caps.cong_type;
+
+ /* different congestion types match different configurations */
+- switch (hr_dev->caps.cong_type) {
++ switch (hr_qp->cong_type) {
+ case CONG_TYPE_DCQCN:
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+@@ -4752,10 +4775,15 @@ static int check_cong_type(struct ib_qp *ibqp,
+ cong_alg->wnd_mode_sel = WND_LIMIT;
+ break;
+ default:
+- ibdev_err(&hr_dev->ib_dev,
+- "error type(%u) for congestion selection.\n",
+- hr_dev->caps.cong_type);
+- return -EINVAL;
++ ibdev_warn(&hr_dev->ib_dev,
++ "invalid type(%u) for congestion selection.\n",
++ hr_qp->cong_type);
++ hr_qp->cong_type = CONG_TYPE_DCQCN;
++ cong_alg->alg_sel = CONG_DCQCN;
++ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
++ cong_alg->dip_vld = DIP_INVALID;
++ cong_alg->wnd_mode_sel = WND_LIMIT;
++ break;
+ }
+
+ return 0;
+@@ -4769,6 +4797,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ struct hns_roce_congestion_algorithm cong_field;
+ struct ib_device *ibdev = ibqp->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
++ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ u32 dip_idx = 0;
+ int ret;
+
+@@ -4781,7 +4810,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ return ret;
+
+ hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
+- hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
++ hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
+ hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
+ hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
+ hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
+@@ -4821,22 +4850,32 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ const struct ib_gid_attr *gid_attr = NULL;
++ u8 sl = rdma_ah_get_sl(&attr->ah_attr);
+ int is_roce_protocol;
+ u16 vlan_id = 0xffff;
+ bool is_udp = false;
++ u32 max_sl;
+ u8 ib_port;
+ u8 hr_port;
+ int ret;
+
++ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
++ if (unlikely(sl > max_sl)) {
++ ibdev_err_ratelimited(ibdev,
++ "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
++ sl, max_sl);
++ return -EINVAL;
++ }
++
+ /*
+ * If free_mr_en of qp is set, it means that this qp comes from
+ * free mr. This qp will perform the loopback operation.
+ * In the loopback scenario, only sl needs to be set.
+ */
+ if (hr_qp->free_mr_en) {
+- hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
++ hr_reg_write(context, QPC_SL, sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
++ hr_qp->sl = sl;
+ return 0;
+ }
+
+@@ -4903,14 +4942,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+
+- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+- if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
+- ibdev_err(ibdev,
+- "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
+- hr_qp->sl, MAX_SERVICE_LEVEL);
+- return -EINVAL;
+- }
+-
++ hr_qp->sl = sl;
+ hr_reg_write(context, QPC_SL, hr_qp->sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+
+@@ -5623,7 +5655,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+
+ /* Resizing SRQs is not supported yet */
+ if (srq_attr_mask & IB_SRQ_MAX_WR)
+- return -EINVAL;
++ return -EOPNOTSUPP;
+
+ if (srq_attr_mask & IB_SRQ_LIMIT) {
+ if (srq_attr->srq_limit > srq->wqe_cnt)
+@@ -5804,7 +5836,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+- ibdev_warn(ibdev, "send queue drained.\n");
++ ibdev_dbg(ibdev, "send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
+@@ -5819,10 +5851,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
+ irq_work->queue_num, irq_work->sub_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+- ibdev_warn(ibdev, "SRQ limit reach.\n");
++ ibdev_dbg(ibdev, "SRQ limit reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+- ibdev_warn(ibdev, "SRQ last wqe reach.\n");
++ ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ ibdev_err(ibdev, "SRQ catas error.\n");
+@@ -6038,6 +6070,7 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
+ struct pci_dev *pdev = hr_dev->pci_dev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops = ae_dev->ops;
++ enum hnae3_reset_type reset_type;
+ irqreturn_t int_work = IRQ_NONE;
+ u32 int_en;
+
+@@ -6049,10 +6082,12 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
+ 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
+
++ reset_type = hr_dev->is_vf ?
++ HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET;
++
+ /* Set reset level for reset_event() */
+ if (ops->set_default_reset_request)
+- ops->set_default_reset_request(ae_dev,
+- HNAE3_FUNC_RESET);
++ ops->set_default_reset_request(ae_dev, reset_type);
+ if (ops->reset_event)
+ ops->reset_event(pdev, NULL);
+
+@@ -6122,7 +6157,7 @@ static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
+ res_type == ECC_RESOURCE_SCCC)
+ return le64_to_cpu(*data);
+
+- return le64_to_cpu(*data) << PAGE_SHIFT;
++ return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT;
+ }
+
+ static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
+@@ -6236,9 +6271,16 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
+ }
+
+-static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
++static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
++{
++ hns_roce_mtr_destroy(hr_dev, &eq->mtr);
++}
++
++static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev,
++ struct hns_roce_eq *eq)
+ {
+ struct device *dev = hr_dev->dev;
++ int eqn = eq->eqn;
+ int ret;
+ u8 cmd;
+
+@@ -6249,12 +6291,9 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
+
+ ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M);
+ if (ret)
+- dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
+-}
++ dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+
+-static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+-{
+- hns_roce_mtr_destroy(hr_dev, &eq->mtr);
++ free_eq_buf(hr_dev, eq);
+ }
+
+ static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+@@ -6560,7 +6599,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+
+ err_create_eq_fail:
+ for (i -= 1; i >= 0; i--)
+- free_eq_buf(hr_dev, &eq_table->eq[i]);
++ hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]);
+ kfree(eq_table->eq);
+
+ return ret;
+@@ -6580,11 +6619,8 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+ __hns_roce_free_irq(hr_dev);
+ destroy_workqueue(hr_dev->irq_workq);
+
+- for (i = 0; i < eq_num; i++) {
+- hns_roce_v2_destroy_eqc(hr_dev, i);
+-
+- free_eq_buf(hr_dev, &eq_table->eq[i]);
+- }
++ for (i = 0; i < eq_num; i++)
++ hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]);
+
+ kfree(eq_table->eq);
+ }
+@@ -6749,9 +6785,6 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
+ hns_roce_handle_device_err(hr_dev);
+
+- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+- free_mr_exit(hr_dev);
+-
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index d9d546cdef525e..c8c49110a3378d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -37,6 +37,7 @@
+ #include <rdma/ib_smi.h>
+ #include <rdma/ib_user_verbs.h>
+ #include <rdma/ib_cache.h>
++#include "hnae3.h"
+ #include "hns_roce_common.h"
+ #include "hns_roce_device.h"
+ #include "hns_roce_hem.h"
+@@ -547,17 +548,12 @@ static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
+ struct ib_device *device, u32 port_num)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+- u32 port = port_num - 1;
+
+- if (port > hr_dev->caps.num_ports) {
++ if (port_num > hr_dev->caps.num_ports) {
+ ibdev_err(device, "invalid port num.\n");
+ return NULL;
+ }
+
+- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
+- hr_dev->is_vf)
+- return NULL;
+-
+ return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
+ ARRAY_SIZE(hns_roce_port_stats_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+@@ -577,10 +573,6 @@ static int hns_roce_get_hw_stats(struct ib_device *device,
+ if (port > hr_dev->caps.num_ports)
+ return -EINVAL;
+
+- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
+- hr_dev->is_vf)
+- return -EOPNOTSUPP;
+-
+ ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
+ &num_counters);
+ if (ret) {
+@@ -634,8 +626,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
+ .query_pkey = hns_roce_query_pkey,
+ .query_port = hns_roce_query_port,
+ .reg_user_mr = hns_roce_reg_user_mr,
+- .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
+- .get_hw_stats = hns_roce_get_hw_stats,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
+@@ -644,6 +634,11 @@ static const struct ib_device_ops hns_roce_dev_ops = {
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
+ };
+
++static const struct ib_device_ops hns_roce_dev_hw_stats_ops = {
++ .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
++ .get_hw_stats = hns_roce_get_hw_stats,
++};
++
+ static const struct ib_device_ops hns_roce_dev_mr_ops = {
+ .rereg_user_mr = hns_roce_rereg_user_mr,
+ };
+@@ -720,6 +715,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+ ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
+
++ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 &&
++ !hr_dev->is_vf)
++ ib_set_device_ops(ib_dev, &hns_roce_dev_hw_stats_ops);
++
+ ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 14376490ac226a..980261969b0c0a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -421,18 +421,23 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+- int ret = 0;
++ int ret, sg_num = 0;
++
++ if (!IS_ALIGNED(*sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) ||
++ ibmr->page_size < HNS_HW_PAGE_SIZE ||
++ ibmr->page_size > HNS_HW_MAX_PAGE_SIZE)
++ return sg_num;
+
+ mr->npages = 0;
+ mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!mr->page_list)
+- return ret;
++ return sg_num;
+
+- ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+- if (ret < 1) {
++ sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
++ if (sg_num < 1) {
+ ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
+- mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
++ mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
+ goto err_page_list;
+ }
+
+@@ -443,17 +448,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
+ if (ret) {
+ ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
+- ret = 0;
++ sg_num = 0;
+ } else {
+ mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
+- ret = mr->npages;
+ }
+
+ err_page_list:
+ kvfree(mr->page_list);
+ mr->page_list = NULL;
+
+- return ret;
++ return sg_num;
+ }
+
+ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
+index 783e71852c503a..bd1fe89ca205e9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
+@@ -150,7 +150,7 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
+ int ret;
+
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+- return -EINVAL;
++ return -EOPNOTSUPP;
+
+ ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
+ if (ret)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index cdc1c6de43a174..04063cfacae5fc 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -531,13 +531,15 @@ static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi,
+ {
+ unsigned int inline_sge;
+
+- inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
++ if (!max_inline_data)
++ return 0;
+
+ /*
+ * if max_inline_data less than
+ * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE,
+ * In addition to ud's mode, no need to extend sge.
+ */
++ inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
+ if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE)
+ inline_sge = 0;
+
+@@ -1064,7 +1066,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ {
+ struct hns_roce_ib_create_qp_resp resp = {};
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+- struct hns_roce_ib_create_qp ucmd;
++ struct hns_roce_ib_create_qp ucmd = {};
+ int ret;
+
+ mutex_init(&hr_qp->mutex);
+@@ -1377,19 +1379,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
+ __acquire(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
+- spin_lock_irq(&send_cq->lock);
++ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
+- spin_lock_irq(&recv_cq->lock);
++ spin_lock(&recv_cq->lock);
+ __acquire(&send_cq->lock);
+ } else if (send_cq == recv_cq) {
+- spin_lock_irq(&send_cq->lock);
++ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
+- spin_lock_irq(&send_cq->lock);
++ spin_lock(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+- spin_lock_irq(&recv_cq->lock);
++ spin_lock(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+ }
+@@ -1409,13 +1411,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
+ spin_unlock(&recv_cq->lock);
+ } else if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
+- spin_unlock_irq(&send_cq->lock);
++ spin_unlock(&send_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
+ spin_unlock(&recv_cq->lock);
+- spin_unlock_irq(&send_cq->lock);
++ spin_unlock(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+- spin_unlock_irq(&recv_cq->lock);
++ spin_unlock(&recv_cq->lock);
+ }
+ }
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index 8dae98f827eb2d..727f926500712c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -122,7 +122,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ return ret;
+ }
+
+- ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
++ ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
+ if (ret) {
+ ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
+ goto err_put;
+@@ -135,7 +135,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ return 0;
+
+ err_xa:
+- xa_erase(&srq_table->xa, srq->srqn);
++ xa_erase_irq(&srq_table->xa, srq->srqn);
+ err_put:
+ hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
+
+@@ -153,7 +153,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
+ ret, srq->srqn);
+
+- xa_erase(&srq_table->xa, srq->srqn);
++ xa_erase_irq(&srq_table->xa, srq->srqn);
+
+ if (refcount_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+@@ -296,7 +296,7 @@ static int set_srq_basic_param(struct hns_roce_srq *srq,
+
+ max_sge = proc_srq_sge(hr_dev, srq, !!udata);
+ if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
+- attr->max_sge > max_sge) {
++ attr->max_sge > max_sge || !attr->max_sge) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid SRQ attr, depth = %u, sge = %u.\n",
+ attr->max_wr, attr->max_sge);
+diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
+index d06e45d2c23fdd..9052e8932dc187 100644
+--- a/drivers/infiniband/hw/irdma/defs.h
++++ b/drivers/infiniband/hw/irdma/defs.h
+@@ -346,6 +346,7 @@ enum irdma_cqp_op_type {
+ #define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+ #define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+ #define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
++#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
+ #define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+ #define IRDMA_AE_RESET_SENT 0x0601
+ #define IRDMA_AE_TERMINATE_SENT 0x0602
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index 7cbdd5433dba52..1745f40b075fd1 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -321,7 +321,11 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
+ break;
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ if (iwqp->iwdev->vsi.tc_change_pending) {
+- atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
++ if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
++ wake_up(&iwqp->iwdev->suspend_wq);
++ }
++ if (iwqp->suspend_pending) {
++ iwqp->suspend_pending = false;
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ break;
+@@ -383,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
++ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ default:
+@@ -566,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
+ dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
+ irq_update_affinity_hint(msix_vec->irq, NULL);
+ free_irq(msix_vec->irq, dev_id);
++ if (rf == dev_id) {
++ tasklet_kill(&rf->dpc_tasklet);
++ } else {
++ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
++
++ tasklet_kill(&iwceq->dpc_tasklet);
++ }
+ }
+
+ /**
+@@ -581,9 +593,6 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf)
+ struct irdma_cqp *cqp = &rf->cqp;
+ int status = 0;
+
+- if (rf->cqp_cmpl_wq)
+- destroy_workqueue(rf->cqp_cmpl_wq);
+-
+ status = irdma_sc_cqp_destroy(dev->cqp);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
+@@ -748,6 +757,9 @@ static void irdma_destroy_ccq(struct irdma_pci_f *rf)
+ struct irdma_ccq *ccq = &rf->ccq;
+ int status = 0;
+
++ if (rf->cqp_cmpl_wq)
++ destroy_workqueue(rf->cqp_cmpl_wq);
++
+ if (!rf->reset)
+ status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
+ if (status)
+@@ -1180,7 +1192,6 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ int status;
+ struct irdma_ceq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+- u64 scratch;
+ u32 ceq_size;
+
+ info.ceq_id = ceq_id;
+@@ -1201,14 +1212,13 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ iwceq->sc_ceq.ceq_id = ceq_id;
+ info.dev = dev;
+ info.vsi = vsi;
+- scratch = (uintptr_t)&rf->cqp.sc_cqp;
+ status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
+ if (!status) {
+ if (dev->ceq_valid)
+ status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_CREATE);
+ else
+- status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
++ status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
+ }
+
+ if (status) {
+diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
+index 514453777e07da..be1030d1adfaf7 100644
+--- a/drivers/infiniband/hw/irdma/main.c
++++ b/drivers/infiniband/hw/irdma/main.c
+@@ -48,7 +48,7 @@ static void irdma_prep_tc_change(struct irdma_device *iwdev)
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+- IRDMA_EVENT_TIMEOUT);
++ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ irdma_ws_reset(&iwdev->vsi);
+ }
+
+diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
+index 82fc5f5b002c04..cbf0db72e1088a 100644
+--- a/drivers/infiniband/hw/irdma/main.h
++++ b/drivers/infiniband/hw/irdma/main.h
+@@ -78,7 +78,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
+
+ #define MAX_DPC_ITERATIONS 128
+
+-#define IRDMA_EVENT_TIMEOUT 50000
++#define IRDMA_EVENT_TIMEOUT_MS 5000
+ #define IRDMA_VCHNL_EVENT_TIMEOUT 100000
+ #define IRDMA_RST_TIMEOUT_HZ 4
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 3eb7a7a3a975dc..38cecb28d322e4 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -719,7 +719,6 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
+ info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+- ukinfo->qp_id = iwqp->ibqp.qp_num;
+
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+@@ -839,7 +838,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
+
+ if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
+ init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
+- init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
++ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
++ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
++ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+@@ -942,7 +943,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
+ iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
+
+ init_info.pd = &iwpd->sc_pd;
+- init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
++ init_info.qp_uk_init_info.qp_id = qp_num;
+ if (!rdma_protocol_roce(&iwdev->ibdev, 1))
+ init_info.qp_uk_init_info.first_sq_wq = 1;
+ iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
+@@ -1157,6 +1158,21 @@ static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
+ return prio;
+ }
+
++static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
++{
++ if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
++ !iwqp->suspend_pending,
++ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
++ iwqp->suspend_pending = false;
++ ibdev_warn(&iwqp->iwdev->ibdev,
++ "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
++ iwqp->ibqp.qp_num, iwqp->last_aeq);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
+ /**
+ * irdma_modify_qp_roce - modify qp request
+ * @ibqp: qp's pointer for modify
+@@ -1331,7 +1347,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
+ ibdev_err(&iwdev->ibdev,
+ "rd_atomic = %d, above max_hw_ird=%d\n",
+- attr->max_rd_atomic,
++ attr->max_dest_rd_atomic,
+ dev->hw_attrs.max_hw_ird);
+ return -EINVAL;
+ }
+@@ -1420,17 +1436,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+
+ info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+ issue_modify_qp = 1;
++ iwqp->suspend_pending = true;
+ break;
+ case IB_QPS_SQE:
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
+- spin_unlock_irqrestore(&iwqp->lock, flags);
+- info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+- irdma_hw_modify_qp(iwdev, iwqp, &info, true);
+- spin_lock_irqsave(&iwqp->lock, flags);
+- }
+-
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata && udata->inlen) {
+@@ -1467,6 +1477,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ ctx_info->rem_endpoint_idx = udp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
++ if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
++ ret = irdma_wait_for_suspend(iwqp);
++ if (ret)
++ return ret;
++ }
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+@@ -2170,9 +2185,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ info.cq_base_pa = iwcq->kmem.pa;
+ }
+
+- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+- info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+- (u32)IRDMA_MAX_CQ_READ_THRESH);
++ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
++ (u32)IRDMA_MAX_CQ_READ_THRESH);
+
+ if (irdma_sc_cq_init(cq, &info)) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
+@@ -2889,7 +2903,7 @@ static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
+ iwmr->type = reg_type;
+
+ pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
+- iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
++ iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
+
+ iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
+ if (unlikely(!iwmr->page_size)) {
+@@ -2921,6 +2935,11 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+ int err;
+ u8 lvl;
+
++ /* iWarp: Catch page not starting on OS page boundary */
++ if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
++ ib_umem_offset(iwmr->region))
++ return -EINVAL;
++
+ total = req.sq_pages + req.rq_pages + 1;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 5d7b983f47a24f..20297a14c9a61d 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -196,6 +196,7 @@ struct irdma_qp {
+ u8 flush_issued : 1;
+ u8 sig_all : 1;
+ u8 pau_mode : 1;
++ u8 suspend_pending : 1;
+ u8 rsvd : 1;
+ u8 iwarp_state;
+ u16 term_sq_flush_code;
+diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
+index 7be4c3adb4e2bb..85717482a616e7 100644
+--- a/drivers/infiniband/hw/mana/main.c
++++ b/drivers/infiniband/hw/mana/main.c
+@@ -358,8 +358,8 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ sizeof(struct gdma_create_dma_region_resp));
+
+ create_req->length = umem->length;
+- create_req->offset_in_page = umem->address & (page_sz - 1);
+- create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
++ create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
++ create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
+ create_req->page_count = num_pages_total;
+
+ ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
+@@ -460,13 +460,13 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+ PAGE_SHIFT;
+ prot = pgprot_writecombine(vma->vm_page_prot);
+
+- ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot,
++ ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
+ NULL);
+ if (ret)
+ ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
+ else
+- ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n",
+- pfn, gc->db_page_size, ret);
++ ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
++ pfn, PAGE_SIZE, ret);
+
+ return ret;
+ }
+diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
+index 351207c60eb65d..af79b6e3a5818a 100644
+--- a/drivers/infiniband/hw/mana/mr.c
++++ b/drivers/infiniband/hw/mana/mr.c
+@@ -118,6 +118,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
+ start, iova, length, access_flags);
+
++ access_flags &= ~IB_ACCESS_OPTIONAL;
+ if (access_flags & ~VALID_MR_FLAGS)
+ return ERR_PTR(-EINVAL);
+
+diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
+index 111fa88a3be44f..9a439569ffcf3b 100644
+--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
++++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
+@@ -829,7 +829,7 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
+
+ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
+ {
+- char alias_wq_name[15];
++ char alias_wq_name[22];
+ int ret = 0;
+ int i, j;
+ union ib_gid gid;
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index a37cfac5e23f96..dc9cf45d2d3209 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -2158,7 +2158,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+ struct mlx4_ib_demux_ctx *ctx,
+ int port)
+ {
+- char name[12];
++ char name[21];
+ int ret = 0;
+ int i;
+
+diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
+index f87531318feb80..a78a067e3ce7f3 100644
+--- a/drivers/infiniband/hw/mlx5/cong.c
++++ b/drivers/infiniband/hw/mlx5/cong.c
+@@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
+ dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
+
+ for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
++ if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
++ i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
++ if (!MLX5_CAP_GEN(mdev, roce) ||
++ !MLX5_CAP_ROCE(mdev, roce_cc_general))
++ continue;
++
+ dbg_cc_params->params[i].offset = i;
+ dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].port_num = port_num;
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index 8ba53edf23119f..6e19974ecf6e71 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -2949,7 +2949,7 @@ DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
+ UVERBS_IDR_ANY_OBJECT,
+- UVERBS_ACCESS_WRITE,
++ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
+diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
+index 8102ef113b7e08..64dae68c43e628 100644
+--- a/drivers/infiniband/hw/mlx5/mad.c
++++ b/drivers/infiniband/hw/mlx5/mad.c
+@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
++ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
++ !mlx5_core_mp_enabled(mdev)) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 555629b798b956..296af7a5c2794d 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -24,6 +24,7 @@
+ #include <linux/mlx5/vport.h>
+ #include <linux/mlx5/fs.h>
+ #include <linux/mlx5/eswitch.h>
++#include <linux/mlx5/driver.h>
+ #include <linux/list.h>
+ #include <rdma/ib_smi.h>
+ #include <rdma/ib_umem_odp.h>
+@@ -443,7 +444,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+- case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
++ case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
+ *active_width = IB_WIDTH_8X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+@@ -538,7 +539,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
+ if (!ndev)
+ goto out;
+
+- if (dev->lag_active) {
++ if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) {
+ rcu_read_lock();
+ upper = netdev_master_upper_dev_get_rcu(ndev);
+ if (upper) {
+@@ -3175,6 +3176,13 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+
+ lockdep_assert_held(&mlx5_ib_multiport_mutex);
+
++ mlx5_core_mp_event_replay(ibdev->mdev,
++ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
++ NULL);
++ mlx5_core_mp_event_replay(mpi->mdev,
++ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
++ NULL);
++
+ mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
+
+ spin_lock(&port->mp.mpi_lock);
+@@ -3226,6 +3234,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+ {
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
++ u64 key;
+ int err;
+
+ lockdep_assert_held(&mlx5_ib_multiport_mutex);
+@@ -3254,6 +3263,14 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+
+ mlx5_ib_init_cong_debugfs(ibdev, port_num);
+
++ key = mpi->mdev->priv.adev_idx;
++ mlx5_core_mp_event_replay(mpi->mdev,
++ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
++ &key);
++ mlx5_core_mp_event_replay(ibdev->mdev,
++ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
++ &key);
++
+ return true;
+
+ unbind:
+@@ -3715,10 +3732,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+ spin_lock_init(&dev->dm.lock);
+ dev->dm.dev = mdev;
+ return 0;
+-err:
+- mlx5r_macsec_dealloc_gids(dev);
+ err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
++err:
++ mlx5r_macsec_dealloc_gids(dev);
+ return err;
+ }
+
+@@ -4071,10 +4088,8 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+ return ret;
+
+ ret = mlx5_mkey_cache_init(dev);
+- if (ret) {
++ if (ret)
+ mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
+- mlx5r_umr_resource_cleanup(dev);
+- }
+ return ret;
+ }
+
+diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
+index 96ffbbaf0a73d1..5a22be14d958f2 100644
+--- a/drivers/infiniband/hw/mlx5/mem.c
++++ b/drivers/infiniband/hw/mlx5/mem.c
+@@ -30,6 +30,7 @@
+ * SOFTWARE.
+ */
+
++#include <linux/io.h>
+ #include <rdma/ib_umem_odp.h>
+ #include "mlx5_ib.h"
+ #include <linux/jiffies.h>
+@@ -108,7 +109,6 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ __be32 mmio_wqe[16] = {};
+ unsigned long flags;
+ unsigned int idx;
+- int i;
+
+ if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ return -EIO;
+@@ -148,10 +148,8 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ * we hit doorbell
+ */
+ wmb();
+- for (i = 0; i < 8; i++)
+- mlx5_write64(&mmio_wqe[i * 2],
+- bf->bfreg->map + bf->offset + i * 8);
+- io_stop_wc();
++ __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
++ sizeof(mmio_wqe) / 8);
+
+ bf->offset ^= bf->buf_size;
+
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 16713baf0d0601..43a963e205eb40 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -115,6 +115,19 @@ unsigned long __mlx5_umem_find_best_quantized_pgoff(
+ __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
+ page_offset_quantized)
+
++static inline unsigned long
++mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf)
++{
++ /*
++ * mkeys used for dmabuf are fixed at PAGE_SIZE because we must be able
++ * to hold any sgl after a move operation. Ideally the mkc page size
++ * could be changed at runtime to be optimal, but right now the driver
++ * cannot do that.
++ */
++ return ib_umem_find_best_pgsz(&umem_dmabuf->umem, PAGE_SIZE,
++ umem_dmabuf->umem.iova);
++}
++
+ enum {
+ MLX5_IB_MMAP_OFFSET_START = 9,
+ MLX5_IB_MMAP_OFFSET_END = 255,
+@@ -643,7 +656,7 @@ struct mlx5_ib_mkey {
+ unsigned int ndescs;
+ struct wait_queue_head wait;
+ refcount_t usecount;
+- /* User Mkey must hold either a rb_key or a cache_ent. */
++ /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
+ struct mlx5r_cache_rb_key rb_key;
+ struct mlx5_cache_ent *cache_ent;
+ };
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 8a3762d9ff58c1..9e465cf99733ee 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -48,6 +48,7 @@ enum {
+ MAX_PENDING_REG_MR = 8,
+ };
+
++#define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4
+ #define MLX5_UMR_ALIGN 2048
+
+ static void
+@@ -308,6 +309,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
+ MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2,
+ (ent->rb_key.access_mode >> 2) & 0x7);
++ MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
+
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_mkc_octo_size(ent->rb_key.access_mode,
+@@ -697,10 +699,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
+ new = &((*new)->rb_left);
+ if (cmp < 0)
+ new = &((*new)->rb_right);
+- if (cmp == 0) {
+- mutex_unlock(&cache->rb_lock);
++ if (cmp == 0)
+ return -EEXIST;
+- }
+ }
+
+ /* Add new node and rebalance tree. */
+@@ -716,6 +716,7 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
+ {
+ struct rb_node *node = dev->cache.rb_root.rb_node;
+ struct mlx5_cache_ent *cur, *smallest = NULL;
++ u64 ndescs_limit;
+ int cmp;
+
+ /*
+@@ -734,10 +735,18 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
+ return cur;
+ }
+
++ /*
++ * Limit the usage of mkeys larger than twice the required size while
++ * also allowing the usage of smallest cache entry for small MRs.
++ */
++ ndescs_limit = max_t(u64, rb_key.ndescs * 2,
++ MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS);
++
+ return (smallest &&
+ smallest->rb_key.access_mode == rb_key.access_mode &&
+ smallest->rb_key.access_flags == rb_key.access_flags &&
+- smallest->rb_key.ats == rb_key.ats) ?
++ smallest->rb_key.ats == rb_key.ats &&
++ smallest->rb_key.ndescs <= ndescs_limit) ?
+ smallest :
+ NULL;
+ }
+@@ -987,7 +996,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
+ mlx5_mkey_cache_debugfs_init(dev);
+ mutex_lock(&cache->rb_lock);
+ for (i = 0; i <= mkey_cache_max_order(dev); i++) {
+- rb_key.ndescs = 1 << (i + 2);
++ rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i;
+ ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
+ if (IS_ERR(ent)) {
+ ret = PTR_ERR(ent);
+@@ -1026,11 +1035,13 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
+ return;
+
+ mutex_lock(&dev->cache.rb_lock);
++ cancel_delayed_work(&dev->cache.remove_ent_dwork);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+ xa_lock_irq(&ent->mkeys);
+ ent->disabled = true;
+ xa_unlock_irq(&ent->mkeys);
++ cancel_delayed_work(&ent->dwork);
+ }
+ mutex_unlock(&dev->cache.rb_lock);
+
+@@ -1592,7 +1603,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
+ unsigned int diffs = current_access_flags ^ target_access_flags;
+
+ if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+- IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
++ IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
++ IB_ACCESS_REMOTE_ATOMIC))
+ return false;
+ return mlx5r_umr_can_reconfig(dev, current_access_flags,
+ target_access_flags);
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 4a04cbc5b78a4a..3a4605fda6d57e 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -705,10 +705,8 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
+ return err;
+ }
+
+- page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
+- log_page_size, 0,
+- umem_dmabuf->umem.iova);
+- if (unlikely(page_size < PAGE_SIZE)) {
++ page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf);
++ if (!page_size) {
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ err = -EINVAL;
+ } else {
+@@ -735,24 +733,31 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
+ * >0: Number of pages mapped
+ */
+ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
+- u32 *bytes_mapped, u32 flags)
++ u32 *bytes_mapped, u32 flags, bool permissive_fault)
+ {
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+- if (unlikely(io_virt < mr->ibmr.iova))
++ if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault)
+ return -EFAULT;
+
+ if (mr->umem->is_dmabuf)
+ return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
+
+ if (!odp->is_implicit_odp) {
++ u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova;
+ u64 user_va;
+
+- if (check_add_overflow(io_virt - mr->ibmr.iova,
+- (u64)odp->umem.address, &user_va))
++ if (check_add_overflow(offset, (u64)odp->umem.address,
++ &user_va))
+ return -EFAULT;
+- if (unlikely(user_va >= ib_umem_end(odp) ||
+- ib_umem_end(odp) - user_va < bcnt))
++
++ if (permissive_fault) {
++ if (user_va < ib_umem_start(odp))
++ user_va = ib_umem_start(odp);
++ if ((user_va + bcnt) > ib_umem_end(odp))
++ bcnt = ib_umem_end(odp) - user_va;
++ } else if (unlikely(user_va >= ib_umem_end(odp) ||
++ ib_umem_end(odp) - user_va < bcnt))
+ return -EFAULT;
+ return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
+ flags);
+@@ -859,7 +864,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ case MLX5_MKEY_MR:
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+
+- ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
++ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
+ if (ret < 0)
+ goto end;
+
+@@ -1712,7 +1717,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
+ for (i = 0; i < work->num_sge; ++i) {
+ ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+ work->frags[i].length, &bytes_mapped,
+- work->pf_flags);
++ work->pf_flags, false);
+ if (ret <= 0)
+ continue;
+ mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
+@@ -1763,7 +1768,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+ ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
+- &bytes_mapped, pf_flags);
++ &bytes_mapped, pf_flags, false);
+ if (ret < 0) {
+ mlx5r_deref_odp_mkey(&mr->mmkey);
+ return ret;
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 78b96bfb4e6ac9..2340baaba8e670 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4045,6 +4045,30 @@ static unsigned int get_tx_affinity(struct ib_qp *qp,
+ return tx_affinity;
+ }
+
++static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
++ struct mlx5_core_dev *mdev)
++{
++ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
++ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
++ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
++ void *rqc;
++
++ if (!qp->rq.wqe_cnt)
++ return 0;
++
++ MLX5_SET(modify_rq_in, in, rq_state, rq->state);
++ MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
++
++ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
++ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
++
++ MLX5_SET64(modify_rq_in, in, modify_bitmask,
++ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
++ MLX5_SET(rqc, rqc, counter_set_id, set_id);
++
++ return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
++}
++
+ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ struct rdma_counter *counter)
+ {
+@@ -4060,6 +4084,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ else
+ set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
+
++ if (mqp->type == IB_QPT_RAW_PACKET)
++ return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
++
+ base = &mqp->trans_qp.base;
+ MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
+index a056ea835da549..84be0c3d569959 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
+ int err;
+ struct mlx5_srq_attr in = {};
+ __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
++ __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
++ sizeof(struct mlx5_wqe_data_seg);
+
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC &&
+ init_attr->srq_type != IB_SRQT_TM)
+ return -EOPNOTSUPP;
+
+- /* Sanity check SRQ size before proceeding */
+- if (init_attr->attr.max_wr >= max_srq_wqes) {
+- mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
+- init_attr->attr.max_wr,
+- max_srq_wqes);
++ /* Sanity check SRQ and sge size before proceeding */
++ if (init_attr->attr.max_wr >= max_srq_wqes ||
++ init_attr->attr.max_sge > max_sge_sz) {
++ mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
++ init_attr->attr.max_wr, max_srq_wqes,
++ init_attr->attr.max_sge, max_sge_sz);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
+index df1d1b0a3ef72b..9947feb7fb8a0b 100644
+--- a/drivers/infiniband/hw/mlx5/wr.c
++++ b/drivers/infiniband/hw/mlx5/wr.c
+@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ */
+ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
+ left);
+- memcpy(eseg->inline_hdr.start, pdata, copysz);
++ memcpy(eseg->inline_hdr.data, pdata, copysz);
+ stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
+ sizeof(eseg->inline_hdr.start) + copysz, 16);
+ *size += stride / 16;
+diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
+index f330ce895d8849..8fe0cef7e2be62 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
++++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
+@@ -635,7 +635,7 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
+
+ int mthca_SYS_EN(struct mthca_dev *dev)
+ {
+- u64 out;
++ u64 out = 0;
+ int ret;
+
+ ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
+@@ -1955,7 +1955,7 @@ int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
+ int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ u16 *hash)
+ {
+- u64 imm;
++ u64 imm = 0;
+ int err;
+
+ err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
+diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
+index b54bc8865daecb..1ab268b7709689 100644
+--- a/drivers/infiniband/hw/mthca/mthca_main.c
++++ b/drivers/infiniband/hw/mthca/mthca_main.c
+@@ -382,7 +382,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
+ struct mthca_init_hca_param *init_hca,
+ u64 icm_size)
+ {
+- u64 aux_pages;
++ u64 aux_pages = 0;
+ int err;
+
+ err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 7887a6786ed43d..f118ce0a9a617b 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
+ /* RQ - read access only (0) */
+ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
+ ureq.rq_len, true, 0, alloc_and_init);
+- if (rc)
++ if (rc) {
++ ib_umem_release(qp->usq.umem);
++ qp->usq.umem = NULL;
++ if (rdma_protocol_roce(&dev->ibdev, 1)) {
++ qedr_free_pbl(dev, &qp->usq.pbl_info,
++ qp->usq.pbl_tbl);
++ } else {
++ kfree(qp->usq.pbl_tbl);
++ }
+ return rc;
++ }
+ }
+
+ memset(&in_params, 0, sizeof(in_params));
+diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
+index ed7d4b02f45a63..11155e0fb8395c 100644
+--- a/drivers/infiniband/hw/qib/qib_fs.c
++++ b/drivers/infiniband/hw/qib/qib_fs.c
+@@ -439,6 +439,7 @@ static int remove_device_files(struct super_block *sb,
+ return PTR_ERR(dir);
+ }
+ simple_recursive_removal(dir, NULL);
++ dput(dir);
+ return 0;
+ }
+
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 54c723a6eddace..6f9ec8db014c79 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
+
+ if (rxe->tfm)
+ crypto_free_shash(rxe->tfm);
++
++ mutex_destroy(&rxe->usdev_lock);
+ }
+
+ /* initialize rxe device parameters */
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
+index d0bdc2d8adc824..acd2172bf092bd 100644
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -131,12 +131,12 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+ {
+ int must_sched;
+
+- skb_queue_tail(&qp->resp_pkts, skb);
+-
+- must_sched = skb_queue_len(&qp->resp_pkts) > 1;
++ must_sched = skb_queue_len(&qp->resp_pkts) > 0;
+ if (must_sched != 0)
+ rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
+
++ skb_queue_tail(&qp->resp_pkts, skb);
++
+ if (must_sched)
+ rxe_sched_task(&qp->comp.task);
+ else
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index cd59666158b18a..e5827064ab1e2a 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -366,18 +366,10 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+ rxe_get(pkt->qp);
+ atomic_inc(&pkt->qp->skb_out);
+
+- if (skb->protocol == htons(ETH_P_IP)) {
++ if (skb->protocol == htons(ETH_P_IP))
+ err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+- } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ else
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+- } else {
+- rxe_dbg_qp(pkt->qp, "Unknown layer 3 protocol: %d\n",
+- skb->protocol);
+- atomic_dec(&pkt->qp->skb_out);
+- rxe_put(pkt->qp);
+- kfree_skb(skb);
+- return -EINVAL;
+- }
+
+ if (unlikely(net_xmit_eval(err))) {
+ rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err);
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index d8c41fd626a948..7a36080d2baef5 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -424,7 +424,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+ int paylen;
+ int solicited;
+ u32 qp_num;
+- int ack_req;
++ int ack_req = 0;
+
+ /* length from start of bth to end of icrc */
+ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
+@@ -445,8 +445,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+ qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
+ qp->attr.dest_qp_num;
+
+- ack_req = ((pkt->mask & RXE_END_MASK) ||
+- (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
++ if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
++ ack_req = ((pkt->mask & RXE_END_MASK) ||
++ (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
+ if (ack_req)
+ qp->req.noack_pkts = 0;
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index da470a925efc7b..c02aa27fe5d817 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -354,6 +354,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
+ * receive buffer later. For rmda operations additional
+ * length checks are performed in check_rkey.
+ */
++ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
++ unsigned int payload = payload_size(pkt);
++ unsigned int recv_buffer_len = 0;
++ int i;
++
++ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
++ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
++ if (payload + 40 > recv_buffer_len) {
++ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
++ return RESPST_ERR_LENGTH;
++ }
++ }
++
+ if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
+ (qp_type(qp) == IB_QPT_UC))) {
+ unsigned int mtu = qp->mtu;
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 48f86839d36a8e..9f46b9f74825ff 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
+ int i;
+
+ for (i = 0; i < ibwr->num_sge; i++, sge++) {
+- memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
++ memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
+ p += sge->length;
+ }
+ }
+@@ -888,6 +888,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
+ {
+ int err = 0;
+ unsigned long flags;
++ int good = 0;
+
+ spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ while (ibwr) {
+@@ -895,12 +896,15 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
+ if (err) {
+ *bad_wr = ibwr;
+ break;
++ } else {
++ good++;
+ }
+ ibwr = ibwr->next;
+ }
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+
+- if (!err)
++ /* kickoff processing of any posted wqes */
++ if (good)
+ rxe_sched_task(&qp->req.task);
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index 5b3154503bf498..319d4288edddea 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -531,21 +531,18 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ rec.join_state = SENDONLY_FULLMEMBER_JOIN;
+ }
+- spin_unlock_irq(&priv->lock);
+
+ multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
+- &rec, comp_mask, GFP_KERNEL,
++ &rec, comp_mask, GFP_ATOMIC,
+ ipoib_mcast_join_complete, mcast);
+- spin_lock_irq(&priv->lock);
+ if (IS_ERR(multicast)) {
+ ret = PTR_ERR(multicast);
+ ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+- spin_unlock_irq(&priv->lock);
+ complete(&mcast->done);
+- spin_lock_irq(&priv->lock);
++ return ret;
+ }
+ return 0;
+ }
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+index 4bd161e86f8dde..562df2b3ef1876 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -184,8 +184,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+
+ ppriv = ipoib_priv(pdev);
+
+- snprintf(intf_name, sizeof(intf_name), "%s.%04x",
+- ppriv->dev->name, pkey);
++ /* If you increase IFNAMSIZ, update snprintf below
++ * to allow longer names.
++ */
++ BUILD_BUG_ON(IFNAMSIZ != 16);
++ snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name,
++ pkey);
+
+ ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ if (IS_ERR(ndev)) {
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index dee8c97ff0568b..d967d553245961 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -317,12 +317,10 @@ struct iser_device {
+ *
+ * @mr: memory region
+ * @sig_mr: signature memory region
+- * @mr_valid: is mr valid indicator
+ */
+ struct iser_reg_resources {
+ struct ib_mr *mr;
+ struct ib_mr *sig_mr;
+- u8 mr_valid:1;
+ };
+
+ /**
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 39ea73f690168c..f5f090dc4f1eb4 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -581,7 +581,10 @@ static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
+ return -EINVAL;
+ }
+
+- desc->rsc.mr_valid = 0;
++ if (desc->sig_protected)
++ desc->rsc.sig_mr->need_inval = false;
++ else
++ desc->rsc.mr->need_inval = false;
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index 29ae2c6a250a30..6efcb79c8efe3f 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -264,7 +264,7 @@ static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+
+ iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
+
+- if (rsc->mr_valid)
++ if (rsc->sig_mr->need_inval)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+@@ -288,7 +288,7 @@ static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+- rsc->mr_valid = 1;
++ rsc->sig_mr->need_inval = true;
+
+ sig_reg->sge.lkey = mr->lkey;
+ sig_reg->rkey = mr->rkey;
+@@ -313,7 +313,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ struct ib_reg_wr *wr = &tx_desc->reg_wr;
+ int n;
+
+- if (rsc->mr_valid)
++ if (rsc->mr->need_inval)
+ iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+@@ -336,7 +336,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+- rsc->mr_valid = 1;
++ rsc->mr->need_inval = true;
+
+ reg->sge.lkey = mr->lkey;
+ reg->rkey = mr->rkey;
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 95b8eebf7e045f..6801b70dc9e0ec 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -129,7 +129,6 @@ iser_create_fastreg_desc(struct iser_device *device,
+ goto err_alloc_mr_integrity;
+ }
+ }
+- desc->rsc.mr_valid = 0;
+
+ return desc;
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+index d3c436ead69461..4aa80c9388f058 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+@@ -133,7 +133,7 @@ static ssize_t mpath_policy_store(struct device *dev,
+
+ /* distinguish "mi" and "min-latency" with length */
+ len = strnlen(buf, NAME_MAX);
+- if (buf[len - 1] == '\n')
++ if (len && buf[len - 1] == '\n')
+ len--;
+
+ if (!strncasecmp(buf, "round-robin", 11) ||
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index b6ee801fd0ffbf..82aa47efb8078d 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -384,7 +384,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ struct rtrs_clt_path *clt_path;
+ int err;
+
+- if (WARN_ON(!req->in_use))
++ if (!req->in_use)
+ return;
+ if (WARN_ON(!req->con))
+ return;
+@@ -626,6 +626,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+ */
+ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
+ return;
++ clt_path->s.hb_missed_cnt = 0;
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (imm_type == RTRS_IO_RSP_IMM ||
+@@ -643,7 +644,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+ return rtrs_clt_recv_done(con, wc);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+- clt_path->s.hb_missed_cnt = 0;
+ clt_path->s.hb_cur_latency =
+ ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
+@@ -670,6 +670,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+ /*
+ * Key invalidations from server side
+ */
++ clt_path->s.hb_missed_cnt = 0;
+ WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
+ wc->wc_flags & IB_WC_WITH_IMM));
+ WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
+@@ -1694,7 +1695,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ clt_path->s.dev_ref++;
+ max_send_wr = min_t(int, wr_limit,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+- clt_path->queue_depth * 3 + 1);
++ clt_path->queue_depth * 4 + 1);
+ max_recv_wr = min_t(int, wr_limit,
+ clt_path->queue_depth * 3 + 1);
+ max_send_sge = 2;
+@@ -2341,12 +2342,16 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+ if (err)
+ goto destroy;
+ }
++
++ /*
++ * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds.
++ */
++ cid = clt_path->s.con_num - 1;
++
+ err = alloc_path_reqs(clt_path);
+ if (err)
+ goto destroy;
+
+- rtrs_start_hb(&clt_path->s);
+-
+ return 0;
+
+ destroy:
+@@ -2620,6 +2625,7 @@ static int init_path(struct rtrs_clt_path *clt_path)
+ goto out;
+ }
+ rtrs_clt_path_up(clt_path);
++ rtrs_start_hb(&clt_path->s);
+ out:
+ mutex_unlock(&clt_path->init_mutex);
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 75e56604e46286..758a3d9c2844d1 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -65,8 +65,9 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ {
+ enum rtrs_srv_state old_state;
+ bool changed = false;
++ unsigned long flags;
+
+- spin_lock_irq(&srv_path->state_lock);
++ spin_lock_irqsave(&srv_path->state_lock, flags);
+ old_state = srv_path->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+@@ -87,7 +88,7 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ }
+ if (changed)
+ srv_path->state = new_state;
+- spin_unlock_irq(&srv_path->state_lock);
++ spin_unlock_irqrestore(&srv_path->state_lock, flags);
+
+ return changed;
+ }
+@@ -550,7 +551,10 @@ static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
+ struct rtrs_srv_mr *srv_mr;
+
+ srv_mr = &srv_path->mrs[i];
+- rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
++
++ if (always_invalidate)
++ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
++
+ ib_dereg_mr(srv_mr->mr);
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
+ srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+@@ -709,20 +713,23 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+ WARN_ON(wc->opcode != IB_WC_SEND);
+ }
+
+-static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
++static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
+ {
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+- int up;
++ int up, ret = 0;
+
+ mutex_lock(&srv->paths_ev_mutex);
+ up = ++srv->paths_up;
+ if (up == 1)
+- ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
++ ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ mutex_unlock(&srv->paths_ev_mutex);
+
+ /* Mark session as established */
+- srv_path->established = true;
++ if (!ret)
++ srv_path->established = true;
++
++ return ret;
+ }
+
+ static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
+@@ -851,7 +858,12 @@ static int process_info_req(struct rtrs_srv_con *con,
+ goto iu_free;
+ kobject_get(&srv_path->kobj);
+ get_device(&srv_path->srv->dev);
+- rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
++ err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
++ if (!err) {
++ rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
++ goto iu_free;
++ }
++
+ rtrs_srv_start_hb(srv_path);
+
+ /*
+@@ -860,7 +872,11 @@ static int process_info_req(struct rtrs_srv_con *con,
+ * all connections are successfully established. Thus, simply notify
+ * listener with a proper event if we are the first path.
+ */
+- rtrs_srv_path_up(srv_path);
++ err = rtrs_srv_path_up(srv_path);
++ if (err) {
++ rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
++ goto iu_free;
++ }
+
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
+@@ -915,12 +931,11 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+ if (err)
+ goto close;
+
+-out:
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
+ return;
+ close:
++ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
+ close_path(srv_path);
+- goto out;
+ }
+
+ static int post_recv_info_req(struct rtrs_srv_con *con)
+@@ -971,6 +986,16 @@ static int post_recv_path(struct rtrs_srv_path *srv_path)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = srv->queue_depth;
++ if (srv_path->state != RTRS_SRV_CONNECTING) {
++ rtrs_err(s, "Path state invalid. state %s\n",
++ rtrs_srv_state_str(srv_path->state));
++ return -EIO;
++ }
++
++ if (!srv_path->s.con[cid]) {
++ rtrs_err(s, "Conn not set for %d\n", cid);
++ return -EIO;
++ }
+
+ err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
+ if (err) {
+@@ -1213,6 +1238,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+ */
+ if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
+ return;
++ srv_path->s.hb_missed_cnt = 0;
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (err) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
+@@ -1516,7 +1542,6 @@ static void rtrs_srv_close_work(struct work_struct *work)
+
+ srv_path = container_of(work, typeof(*srv_path), close_work);
+
+- rtrs_srv_destroy_path_files(srv_path);
+ rtrs_srv_stop_hb(srv_path);
+
+ for (i = 0; i < srv_path->s.con_num; i++) {
+@@ -1536,6 +1561,8 @@ static void rtrs_srv_close_work(struct work_struct *work)
+ /* Wait for all completion */
+ wait_for_completion(&srv_path->complete_done);
+
++ rtrs_srv_destroy_path_files(srv_path);
++
+ /* Notify upper layer if we are the last path */
+ rtrs_srv_path_down(srv_path);
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index 3696f367ff5151..d80edfffd2e495 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -255,7 +255,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
+ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
+ {
+- struct ib_qp_init_attr init_attr = {NULL};
++ struct ib_qp_init_attr init_attr = {};
+ struct rdma_cm_id *cm_id = con->cm_id;
+ int ret;
+
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index c12005eab14c19..45547bf281e312 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
+ MODULE_PARM_DESC(srpt_srq_size,
+ "Shared receive queue (SRQ) size.");
+
++static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
++{
++ return kstrtou64(buffer, 16, (u64 *)kp->arg);
++}
+ static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
+ {
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
+ }
+-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+- 0444);
++module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
++ &srpt_service_guid, 0444);
+ MODULE_PARM_DESC(srpt_service_guid,
+ "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
+
+@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
+ /**
+ * srpt_qp_event - QP event callback function
+ * @event: Description of the event that occurred.
+- * @ch: SRPT RDMA channel.
++ * @ptr: SRPT RDMA channel.
+ */
+-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
++static void srpt_qp_event(struct ib_event *event, void *ptr)
+ {
++ struct srpt_rdma_ch *ch = ptr;
++
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+@@ -1807,8 +1813,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+ ch->cq_size = ch->rq_size + sq_size;
+
+ qp_init->qp_context = (void *)ch;
+- qp_init->event_handler
+- = (void(*)(struct ib_event *, void*))srpt_qp_event;
++ qp_init->event_handler = srpt_qp_event;
+ qp_init->send_cq = ch->cq;
+ qp_init->recv_cq = ch->cq;
+ qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+@@ -3204,7 +3209,6 @@ static int srpt_add_one(struct ib_device *device)
+
+ INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
+ srpt_event_handler);
+- ib_register_event_handler(&sdev->event_handler);
+
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+@@ -3227,6 +3231,7 @@ static int srpt_add_one(struct ib_device *device)
+ }
+ }
+
++ ib_register_event_handler(&sdev->event_handler);
+ spin_lock(&srpt_dev_lock);
+ list_add_tail(&sdev->list, &srpt_dev_list);
+ spin_unlock(&srpt_dev_lock);
+@@ -3237,7 +3242,6 @@ static int srpt_add_one(struct ib_device *device)
+
+ err_port:
+ srpt_unregister_mad_agent(sdev, i);
+- ib_unregister_event_handler(&sdev->event_handler);
+ err_cm:
+ if (sdev->cm_id)
+ ib_destroy_cm_id(sdev->cm_id);
+diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
+index 16231fe080b006..609a5f01761bd3 100644
+--- a/drivers/input/ff-core.c
++++ b/drivers/input/ff-core.c
+@@ -9,8 +9,10 @@
+ /* #define DEBUG */
+
+ #include <linux/input.h>
++#include <linux/limits.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/overflow.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+
+@@ -315,9 +317,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
+ return -EINVAL;
+ }
+
+- ff_dev_size = sizeof(struct ff_device) +
+- max_effects * sizeof(struct file *);
+- if (ff_dev_size < max_effects) /* overflow */
++ ff_dev_size = struct_size(ff, effect_owners, max_effects);
++ if (ff_dev_size == SIZE_MAX) /* overflow */
+ return -EINVAL;
+
+ ff = kzalloc(ff_dev_size, GFP_KERNEL);
+diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
+index 14b53dac1253bf..6b04a674f832a0 100644
+--- a/drivers/input/input-mt.c
++++ b/drivers/input/input-mt.c
+@@ -46,6 +46,9 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
+ return 0;
+ if (mt)
+ return mt->num_slots != num_slots ? -EINVAL : 0;
++ /* Arbitrary limit for avoiding too large memory allocation. */
++ if (num_slots > 1024)
++ return -EINVAL;
+
+ mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
+ if (!mt)
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index 8c5fdb0f858ab5..9bb1d3de723ee1 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -1378,19 +1378,19 @@ static int input_print_modalias_bits(char *buf, int size,
+ char name, const unsigned long *bm,
+ unsigned int min_bit, unsigned int max_bit)
+ {
+- int len = 0, i;
++ int bit = min_bit;
++ int len = 0;
+
+ len += snprintf(buf, max(size, 0), "%c", name);
+- for (i = min_bit; i < max_bit; i++)
+- if (bm[BIT_WORD(i)] & BIT_MASK(i))
+- len += snprintf(buf + len, max(size - len, 0), "%X,", i);
++ for_each_set_bit_from(bit, bm, max_bit)
++ len += snprintf(buf + len, max(size - len, 0), "%X,", bit);
+ return len;
+ }
+
+-static int input_print_modalias(char *buf, int size, const struct input_dev *id,
+- int add_cr)
++static int input_print_modalias_parts(char *buf, int size, int full_len,
++ const struct input_dev *id)
+ {
+- int len;
++ int len, klen, remainder, space;
+
+ len = snprintf(buf, max(size, 0),
+ "input:b%04Xv%04Xp%04Xe%04X-",
+@@ -1399,8 +1399,48 @@ static int input_print_modalias(char *buf, int size, const struct input_dev *id,
+
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'e', id->evbit, 0, EV_MAX);
+- len += input_print_modalias_bits(buf + len, size - len,
++
++ /*
++ * Calculate the remaining space in the buffer making sure we
++ * have place for the terminating 0.
++ */
++ space = max(size - (len + 1), 0);
++
++ klen = input_print_modalias_bits(buf + len, size - len,
+ 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
++ len += klen;
++
++ /*
++ * If we have more data than we can fit in the buffer, check
++ * if we can trim key data to fit in the rest. We will indicate
++ * that key data is incomplete by adding "+" sign at the end, like
++ * this: * "k1,2,3,45,+,".
++ *
++ * Note that we shortest key info (if present) is "k+," so we
++ * can only try to trim if key data is longer than that.
++ */
++ if (full_len && size < full_len + 1 && klen > 3) {
++ remainder = full_len - len;
++ /*
++ * We can only trim if we have space for the remainder
++ * and also for at least "k+," which is 3 more characters.
++ */
++ if (remainder <= space - 3) {
++ /*
++ * We are guaranteed to have 'k' in the buffer, so
++ * we need at least 3 additional bytes for storing
++ * "+," in addition to the remainder.
++ */
++ for (int i = size - 1 - remainder - 3; i >= 0; i--) {
++ if (buf[i] == 'k' || buf[i] == ',') {
++ strcpy(buf + i + 1, "+,");
++ len = i + 3; /* Not counting '\0' */
++ break;
++ }
++ }
++ }
++ }
++
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'r', id->relbit, 0, REL_MAX);
+ len += input_print_modalias_bits(buf + len, size - len,
+@@ -1416,12 +1456,25 @@ static int input_print_modalias(char *buf, int size, const struct input_dev *id,
+ len += input_print_modalias_bits(buf + len, size - len,
+ 'w', id->swbit, 0, SW_MAX);
+
+- if (add_cr)
+- len += snprintf(buf + len, max(size - len, 0), "\n");
+-
+ return len;
+ }
+
++static int input_print_modalias(char *buf, int size, const struct input_dev *id)
++{
++ int full_len;
++
++ /*
++ * Printing is done in 2 passes: first one figures out total length
++ * needed for the modalias string, second one will try to trim key
++ * data in case when buffer is too small for the entire modalias.
++ * If the buffer is too small regardless, it will fill as much as it
++ * can (without trimming key data) into the buffer and leave it to
++ * the caller to figure out what to do with the result.
++ */
++ full_len = input_print_modalias_parts(NULL, 0, 0, id);
++ return input_print_modalias_parts(buf, size, full_len, id);
++}
++
+ static ssize_t input_dev_show_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+@@ -1429,7 +1482,9 @@ static ssize_t input_dev_show_modalias(struct device *dev,
+ struct input_dev *id = to_input_dev(dev);
+ ssize_t len;
+
+- len = input_print_modalias(buf, PAGE_SIZE, id, 1);
++ len = input_print_modalias(buf, PAGE_SIZE, id);
++ if (len < PAGE_SIZE - 2)
++ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+
+ return min_t(int, len, PAGE_SIZE);
+ }
+@@ -1641,6 +1696,23 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
+ return 0;
+ }
+
++/*
++ * This is a pretty gross hack. When building uevent data the driver core
++ * may try adding more environment variables to kobj_uevent_env without
++ * telling us, so we have no idea how much of the buffer we can use to
++ * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially
++ * reduce amount of memory we will use for the modalias environment variable.
++ *
++ * The potential additions are:
++ *
++ * SEQNUM=18446744073709551615 - (%llu - 28 bytes)
++ * HOME=/ (6 bytes)
++ * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes)
++ *
++ * 68 bytes total. Allow extra buffer - 96 bytes
++ */
++#define UEVENT_ENV_EXTRA_LEN 96
++
+ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
+ const struct input_dev *dev)
+ {
+@@ -1650,9 +1722,11 @@ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
+ return -ENOMEM;
+
+ len = input_print_modalias(&env->buf[env->buflen - 1],
+- sizeof(env->buf) - env->buflen,
+- dev, 0);
+- if (len >= (sizeof(env->buf) - env->buflen))
++ (int)sizeof(env->buf) - env->buflen -
++ UEVENT_ENV_EXTRA_LEN,
++ dev);
++ if (len >= ((int)sizeof(env->buf) - env->buflen -
++ UEVENT_ENV_EXTRA_LEN))
+ return -ENOMEM;
+
+ env->buflen += len;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index f5c21565bb3cec..1cb47488375be4 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -130,7 +130,12 @@ static const struct xpad_device {
+ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
++ { 0x03f0, 0x038D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wired */
++ { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */
+ { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
++ { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
++ { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */
++ { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
+ { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+@@ -202,6 +207,8 @@ static const struct xpad_device {
+ { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
+ { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
+ { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
++ { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
++ { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
+ { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+@@ -210,6 +217,7 @@ static const struct xpad_device {
+ { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
++ { 0x0db0, 0x1901, "Micro Star International Xbox360 Controller for Windows", 0, XTYPE_XBOX360 },
+ { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
+@@ -286,6 +294,7 @@ static const struct xpad_device {
+ { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
++ { 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE },
+ { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
+@@ -293,6 +302,7 @@ static const struct xpad_device {
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
++ { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
+ { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+@@ -359,6 +369,8 @@ static const struct xpad_device {
+ { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
++ { 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
++ { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
+ { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+ { 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+@@ -461,6 +473,7 @@ static const struct usb_device_id xpad_table[] = {
+ { USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */
+ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */
+ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
++ XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
+ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
+ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
+@@ -472,7 +485,9 @@ static const struct usb_device_id xpad_table[] = {
+ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
+ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
+ XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz Gamepad */
++ XPAD_XBOXONE_VENDOR(0x0b05), /* ASUS controllers */
+ XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */
++ XPAD_XBOX360_VENDOR(0x0db0), /* Micro Star International X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f Xbox 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f Xbox One controllers */
+ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori controllers */
+@@ -490,6 +505,7 @@ static const struct usb_device_id xpad_table[] = {
+ XPAD_XBOX360_VENDOR(0x15e4), /* Numark Xbox 360 controllers */
+ XPAD_XBOX360_VENDOR(0x162e), /* Joytech Xbox 360 controllers */
+ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
++ XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
+ XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
+@@ -498,6 +514,7 @@ static const struct usb_device_id xpad_table[] = {
+ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */
+ XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
+ XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
++ XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */
+ XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
+diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
+index 61e8e43e9c2bbd..48981327440193 100644
+--- a/drivers/input/keyboard/adp5588-keys.c
++++ b/drivers/input/keyboard/adp5588-keys.c
+@@ -627,7 +627,7 @@ static int adp5588_setup(struct adp5588_kpad *kpad)
+
+ for (i = 0; i < KEYP_MAX_EVENT; i++) {
+ ret = adp5588_read(client, KEY_EVENTA);
+- if (ret)
++ if (ret < 0)
+ return ret;
+ }
+
+diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
+index 8996e00cd63a82..922d3ab998f3a5 100644
+--- a/drivers/input/keyboard/adp5589-keys.c
++++ b/drivers/input/keyboard/adp5589-keys.c
+@@ -391,10 +391,17 @@ static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off)
+ struct adp5589_kpad *kpad = gpiochip_get_data(chip);
+ unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
+ unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
++ int val;
+
+- return !!(adp5589_read(kpad->client,
+- kpad->var->reg(ADP5589_GPI_STATUS_A) + bank) &
+- bit);
++ mutex_lock(&kpad->gpio_lock);
++ if (kpad->dir[bank] & bit)
++ val = kpad->dat_out[bank];
++ else
++ val = adp5589_read(kpad->client,
++ kpad->var->reg(ADP5589_GPI_STATUS_A) + bank);
++ mutex_unlock(&kpad->gpio_lock);
++
++ return !!(val & bit);
+ }
+
+ static void adp5589_gpio_set_value(struct gpio_chip *chip,
+@@ -936,10 +943,9 @@ static int adp5589_keypad_add(struct adp5589_kpad *kpad, unsigned int revid)
+
+ static void adp5589_clear_config(void *data)
+ {
+- struct i2c_client *client = data;
+- struct adp5589_kpad *kpad = i2c_get_clientdata(client);
++ struct adp5589_kpad *kpad = data;
+
+- adp5589_write(client, kpad->var->reg(ADP5589_GENERAL_CFG), 0);
++ adp5589_write(kpad->client, kpad->var->reg(ADP5589_GENERAL_CFG), 0);
+ }
+
+ static int adp5589_probe(struct i2c_client *client)
+@@ -983,7 +989,7 @@ static int adp5589_probe(struct i2c_client *client)
+ }
+
+ error = devm_add_action_or_reset(&client->dev, adp5589_clear_config,
+- client);
++ kpad);
+ if (error)
+ return error;
+
+@@ -1010,8 +1016,6 @@ static int adp5589_probe(struct i2c_client *client)
+ if (error)
+ return error;
+
+- i2c_set_clientdata(client, kpad);
+-
+ dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
+ return 0;
+ }
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index c92e544c792df8..c229bd6b3f7f2f 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -765,6 +765,44 @@ static void atkbd_deactivate(struct atkbd *atkbd)
+ ps2dev->serio->phys);
+ }
+
++#ifdef CONFIG_X86
++static bool atkbd_is_portable_device(void)
++{
++ static const char * const chassis_types[] = {
++ "8", /* Portable */
++ "9", /* Laptop */
++ "10", /* Notebook */
++ "14", /* Sub-Notebook */
++ "31", /* Convertible */
++ "32", /* Detachable */
++ };
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(chassis_types); i++)
++ if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i]))
++ return true;
++
++ return false;
++}
++
++/*
++ * On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops
++ * the controller is always in translated mode. In this mode mice/touchpads will
++ * not work. So in this case simply assume a keyboard is connected to avoid
++ * confusing some laptop keyboards.
++ *
++ * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using the standard
++ * 0xab83 id is ok in translated mode, only atkbd_select_set() checks atkbd->id
++ * and in translated mode that is a no-op.
++ */
++static bool atkbd_skip_getid(struct atkbd *atkbd)
++{
++ return atkbd->translated && atkbd_is_portable_device();
++}
++#else
++static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; }
++#endif
++
+ /*
+ * atkbd_probe() probes for an AT keyboard on a serio port.
+ */
+@@ -786,6 +824,11 @@ static int atkbd_probe(struct atkbd *atkbd)
+ "keyboard reset failed on %s\n",
+ ps2dev->serio->phys);
+
++ if (atkbd_skip_getid(atkbd)) {
++ atkbd->id = 0xab83;
++ return 0;
++ }
++
+ /*
+ * Then we check the keyboard ID. We should get 0xab83 under normal conditions.
+ * Some keyboards report different values, but the first byte is always 0xab or
+@@ -797,9 +840,9 @@ static int atkbd_probe(struct atkbd *atkbd)
+ if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+
+ /*
+- * If the get ID command failed, we check if we can at least set the LEDs on
+- * the keyboard. This should work on every keyboard out there. It also turns
+- * the LEDs off, which we want anyway.
++ * If the get ID command failed, we check if we can at least set
++ * the LEDs on the keyboard. This should work on every keyboard out there.
++ * It also turns the LEDs off, which we want anyway.
+ */
+ param[0] = 0;
+ if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
+diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
+index ba00ecfbd343bc..b41fd1240f4312 100644
+--- a/drivers/input/keyboard/gpio_keys_polled.c
++++ b/drivers/input/keyboard/gpio_keys_polled.c
+@@ -315,12 +315,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
+
+ error = devm_gpio_request_one(dev, button->gpio,
+ flags, button->desc ? : DRV_NAME);
+- if (error) {
+- dev_err(dev,
+- "unable to claim gpio %u, err=%d\n",
+- button->gpio, error);
+- return error;
+- }
++ if (error)
++ return dev_err_probe(dev, error,
++ "unable to claim gpio %u\n",
++ button->gpio);
+
+ bdata->gpiod = gpio_to_desc(button->gpio);
+ if (!bdata->gpiod) {
+diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c
+index 7b509bce2b332f..1d71dd79ffd289 100644
+--- a/drivers/input/keyboard/ipaq-micro-keys.c
++++ b/drivers/input/keyboard/ipaq-micro-keys.c
+@@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev)
+ keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
+ keys->input->keycodesize * keys->input->keycodemax,
+ GFP_KERNEL);
++ if (!keys->codes)
++ return -ENOMEM;
++
+ keys->input->keycode = keys->codes;
+
+ __set_bit(EV_KEY, keys->input->evbit);
+diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c
+index 6953097db4456f..cd2f4216daf865 100644
+--- a/drivers/input/keyboard/qt1050.c
++++ b/drivers/input/keyboard/qt1050.c
+@@ -226,7 +226,12 @@ static bool qt1050_identify(struct qt1050_priv *ts)
+ int err;
+
+ /* Read Chip ID */
+- regmap_read(ts->regmap, QT1050_CHIP_ID, &val);
++ err = regmap_read(ts->regmap, QT1050_CHIP_ID, &val);
++ if (err) {
++ dev_err(&ts->client->dev, "Failed to read chip ID: %d\n", err);
++ return false;
++ }
++
+ if (val != QT1050_CHIP_ID_VER) {
+ dev_err(&ts->client->dev, "ID %d not supported\n", val);
+ return false;
+diff --git a/drivers/input/misc/da7280.c b/drivers/input/misc/da7280.c
+index ce82548916bbc6..c1fa75c0f970ad 100644
+--- a/drivers/input/misc/da7280.c
++++ b/drivers/input/misc/da7280.c
+@@ -352,7 +352,7 @@ static int da7280_haptic_set_pwm(struct da7280_haptic *haptics, bool enabled)
+ state.duty_cycle = period_mag_multi;
+ }
+
+- error = pwm_apply_state(haptics->pwm_dev, &state);
++ error = pwm_apply_might_sleep(haptics->pwm_dev, &state);
+ if (error)
+ dev_err(haptics->dev, "Failed to apply pwm state: %d\n", error);
+
+@@ -1175,7 +1175,7 @@ static int da7280_probe(struct i2c_client *client)
+ /* Sync up PWM state and ensure it is off. */
+ pwm_init_state(haptics->pwm_dev, &state);
+ state.enabled = false;
+- error = pwm_apply_state(haptics->pwm_dev, &state);
++ error = pwm_apply_might_sleep(haptics->pwm_dev, &state);
+ if (error) {
+ dev_err(dev, "Failed to apply PWM state: %d\n", error);
+ return error;
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index b2f1292e27ef7d..180d90e46061e7 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -42,8 +42,8 @@ struct ims_pcu_backlight {
+ #define IMS_PCU_PART_NUMBER_LEN 15
+ #define IMS_PCU_SERIAL_NUMBER_LEN 8
+ #define IMS_PCU_DOM_LEN 8
+-#define IMS_PCU_FW_VERSION_LEN (9 + 1)
+-#define IMS_PCU_BL_VERSION_LEN (9 + 1)
++#define IMS_PCU_FW_VERSION_LEN 16
++#define IMS_PCU_BL_VERSION_LEN 16
+ #define IMS_PCU_BL_RESET_REASON_LEN (2 + 1)
+
+ #define IMS_PCU_PCU_B_DEVICE_ID 5
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index 36aeeae7761101..9ca5a743f19feb 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -620,6 +620,118 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
+ },
+ },
+ },
++ {
++ .prod_num = IQS7222_PROD_NUM_D,
++ .fw_major = 1,
++ .fw_minor = 2,
++ .touch_link = 1770,
++ .allow_offset = 9,
++ .event_offset = 10,
++ .comms_offset = 11,
++ .reg_grps = {
++ [IQS7222_REG_GRP_STAT] = {
++ .base = IQS7222_SYS_STATUS,
++ .num_row = 1,
++ .num_col = 7,
++ },
++ [IQS7222_REG_GRP_CYCLE] = {
++ .base = 0x8000,
++ .num_row = 7,
++ .num_col = 2,
++ },
++ [IQS7222_REG_GRP_GLBL] = {
++ .base = 0x8700,
++ .num_row = 1,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_BTN] = {
++ .base = 0x9000,
++ .num_row = 14,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_CHAN] = {
++ .base = 0xA000,
++ .num_row = 14,
++ .num_col = 4,
++ },
++ [IQS7222_REG_GRP_FILT] = {
++ .base = 0xAE00,
++ .num_row = 1,
++ .num_col = 2,
++ },
++ [IQS7222_REG_GRP_TPAD] = {
++ .base = 0xB000,
++ .num_row = 1,
++ .num_col = 24,
++ },
++ [IQS7222_REG_GRP_GPIO] = {
++ .base = 0xC000,
++ .num_row = 3,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_SYS] = {
++ .base = IQS7222_SYS_SETUP,
++ .num_row = 1,
++ .num_col = 12,
++ },
++ },
++ },
++ {
++ .prod_num = IQS7222_PROD_NUM_D,
++ .fw_major = 1,
++ .fw_minor = 1,
++ .touch_link = 1774,
++ .allow_offset = 9,
++ .event_offset = 10,
++ .comms_offset = 11,
++ .reg_grps = {
++ [IQS7222_REG_GRP_STAT] = {
++ .base = IQS7222_SYS_STATUS,
++ .num_row = 1,
++ .num_col = 7,
++ },
++ [IQS7222_REG_GRP_CYCLE] = {
++ .base = 0x8000,
++ .num_row = 7,
++ .num_col = 2,
++ },
++ [IQS7222_REG_GRP_GLBL] = {
++ .base = 0x8700,
++ .num_row = 1,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_BTN] = {
++ .base = 0x9000,
++ .num_row = 14,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_CHAN] = {
++ .base = 0xA000,
++ .num_row = 14,
++ .num_col = 4,
++ },
++ [IQS7222_REG_GRP_FILT] = {
++ .base = 0xAE00,
++ .num_row = 1,
++ .num_col = 2,
++ },
++ [IQS7222_REG_GRP_TPAD] = {
++ .base = 0xB000,
++ .num_row = 1,
++ .num_col = 24,
++ },
++ [IQS7222_REG_GRP_GPIO] = {
++ .base = 0xC000,
++ .num_row = 3,
++ .num_col = 3,
++ },
++ [IQS7222_REG_GRP_SYS] = {
++ .base = IQS7222_SYS_SETUP,
++ .num_row = 1,
++ .num_col = 12,
++ },
++ },
++ },
+ {
+ .prod_num = IQS7222_PROD_NUM_D,
+ .fw_major = 0,
+diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
+index 5c288fe7accf1f..79f478d3a9b370 100644
+--- a/drivers/input/misc/pm8xxx-vibrator.c
++++ b/drivers/input/misc/pm8xxx-vibrator.c
+@@ -13,7 +13,8 @@
+
+ #define VIB_MAX_LEVEL_mV (3100)
+ #define VIB_MIN_LEVEL_mV (1200)
+-#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV)
++#define VIB_PER_STEP_mV (100)
++#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV + VIB_PER_STEP_mV)
+
+ #define MAX_FF_SPEED 0xff
+
+@@ -117,10 +118,10 @@ static void pm8xxx_work_handler(struct work_struct *work)
+ vib->active = true;
+ vib->level = ((VIB_MAX_LEVELS * vib->speed) / MAX_FF_SPEED) +
+ VIB_MIN_LEVEL_mV;
+- vib->level /= 100;
++ vib->level /= VIB_PER_STEP_mV;
+ } else {
+ vib->active = false;
+- vib->level = VIB_MIN_LEVEL_mV / 100;
++ vib->level = VIB_MIN_LEVEL_mV / VIB_PER_STEP_mV;
+ }
+
+ pm8xxx_vib_set(vib, vib->active);
+diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
+index 1e731d8397c6f5..5b9aedf4362f49 100644
+--- a/drivers/input/misc/pwm-beeper.c
++++ b/drivers/input/misc/pwm-beeper.c
+@@ -39,7 +39,7 @@ static int pwm_beeper_on(struct pwm_beeper *beeper, unsigned long period)
+ state.period = period;
+ pwm_set_relative_duty_cycle(&state, 50, 100);
+
+- error = pwm_apply_state(beeper->pwm, &state);
++ error = pwm_apply_might_sleep(beeper->pwm, &state);
+ if (error)
+ return error;
+
+@@ -138,7 +138,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
+ /* Sync up PWM state and ensure it is off. */
+ pwm_init_state(beeper->pwm, &state);
+ state.enabled = false;
+- error = pwm_apply_state(beeper->pwm, &state);
++ error = pwm_apply_might_sleep(beeper->pwm, &state);
+ if (error) {
+ dev_err(dev, "failed to apply initial PWM state: %d\n",
+ error);
+diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
+index acac79c488aa15..3e5ed685ed8f50 100644
+--- a/drivers/input/misc/pwm-vibra.c
++++ b/drivers/input/misc/pwm-vibra.c
+@@ -56,7 +56,7 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
+ pwm_set_relative_duty_cycle(&state, vibrator->level, 0xffff);
+ state.enabled = true;
+
+- err = pwm_apply_state(vibrator->pwm, &state);
++ err = pwm_apply_might_sleep(vibrator->pwm, &state);
+ if (err) {
+ dev_err(pdev, "failed to apply pwm state: %d\n", err);
+ return err;
+@@ -67,7 +67,7 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
+ state.duty_cycle = vibrator->direction_duty_cycle;
+ state.enabled = true;
+
+- err = pwm_apply_state(vibrator->pwm_dir, &state);
++ err = pwm_apply_might_sleep(vibrator->pwm_dir, &state);
+ if (err) {
+ dev_err(pdev, "failed to apply dir-pwm state: %d\n", err);
+ pwm_disable(vibrator->pwm);
+@@ -160,7 +160,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
+ /* Sync up PWM state and ensure it is off. */
+ pwm_init_state(vibrator->pwm, &state);
+ state.enabled = false;
+- err = pwm_apply_state(vibrator->pwm, &state);
++ err = pwm_apply_might_sleep(vibrator->pwm, &state);
+ if (err) {
+ dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
+ err);
+@@ -174,7 +174,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
+ /* Sync up PWM state and ensure it is off. */
+ pwm_init_state(vibrator->pwm_dir, &state);
+ state.enabled = false;
+- err = pwm_apply_state(vibrator->pwm_dir, &state);
++ err = pwm_apply_might_sleep(vibrator->pwm_dir, &state);
+ if (err) {
+ dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
+ err);
+diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
+index e79f5497948b8c..9116f4248fd099 100644
+--- a/drivers/input/misc/soc_button_array.c
++++ b/drivers/input/misc/soc_button_array.c
+@@ -299,6 +299,11 @@ static int soc_button_parse_btn_desc(struct device *dev,
+ info->name = "power";
+ info->event_code = KEY_POWER;
+ info->wakeup = true;
++ } else if (upage == 0x01 && usage == 0xc6) {
++ info->name = "airplane mode switch";
++ info->event_type = EV_SW;
++ info->event_code = SW_RFKILL_ALL;
++ info->active_low = false;
+ } else if (upage == 0x01 && usage == 0xca) {
+ info->name = "rotation lock switch";
+ info->event_type = EV_SW;
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index d98212d55108c7..2c973f15cab7da 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -417,6 +417,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
+ return -EINVAL;
+ }
+
++ /*
++ * Limit number of contacts to a reasonable value (100). This
++ * ensures that we need less than 2 pages for struct input_mt
++ * (we are not using in-kernel slot assignment so not going to
++ * allocate memory for the "red" table), and we should have no
++ * trouble getting this much memory.
++ */
++ if (code == ABS_MT_SLOT && max > 99) {
++ printk(KERN_DEBUG
++ "%s: unreasonably large number of slots requested: %d\n",
++ UINPUT_NAME, max);
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
+index 05851bc32541f7..40b01cabaa3375 100644
+--- a/drivers/input/mouse/cyapa.c
++++ b/drivers/input/mouse/cyapa.c
+@@ -1356,10 +1356,16 @@ static int cyapa_suspend(struct device *dev)
+ u8 power_mode;
+ int error;
+
+- error = mutex_lock_interruptible(&cyapa->state_sync_lock);
++ error = mutex_lock_interruptible(&cyapa->input->mutex);
+ if (error)
+ return error;
+
++ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
++ if (error) {
++ mutex_unlock(&cyapa->input->mutex);
++ return error;
++ }
++
+ /*
+ * Runtime PM is enable only when device is in operational mode and
+ * users in use, so need check it before disable it to
+@@ -1394,6 +1400,8 @@ static int cyapa_suspend(struct device *dev)
+ cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
+
+ mutex_unlock(&cyapa->state_sync_lock);
++ mutex_unlock(&cyapa->input->mutex);
++
+ return 0;
+ }
+
+@@ -1403,6 +1411,7 @@ static int cyapa_resume(struct device *dev)
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+ int error;
+
++ mutex_lock(&cyapa->input->mutex);
+ mutex_lock(&cyapa->state_sync_lock);
+
+ if (device_may_wakeup(dev) && cyapa->irq_wake) {
+@@ -1421,6 +1430,7 @@ static int cyapa_resume(struct device *dev)
+ enable_irq(client->irq);
+
+ mutex_unlock(&cyapa->state_sync_lock);
++ mutex_unlock(&cyapa->input->mutex);
+ return 0;
+ }
+
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index 148a601396f92c..dc80e407fb8603 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1356,6 +1356,8 @@ static int elan_suspend(struct device *dev)
+ }
+
+ err:
++ if (ret)
++ enable_irq(client->irq);
+ mutex_unlock(&data->sysfs_mutex);
+ return ret;
+ }
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 4e38229404b4b0..b4723ea395eb9f 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1476,16 +1476,47 @@ static void elantech_disconnect(struct psmouse *psmouse)
+ psmouse->private = NULL;
+ }
+
++/*
++ * Some hw_version 4 models fail to properly activate absolute mode on
++ * resume without going through disable/enable cycle.
++ */
++static const struct dmi_system_id elantech_needs_reenable[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++ {
++ /* Lenovo N24 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "81AF"),
++ },
++ },
++#endif
++ { }
++};
++
+ /*
+ * Put the touchpad back into absolute mode when reconnecting
+ */
+ static int elantech_reconnect(struct psmouse *psmouse)
+ {
++ int err;
++
+ psmouse_reset(psmouse);
+
+ if (elantech_detect(psmouse, 0))
+ return -1;
+
++ if (dmi_check_system(elantech_needs_reenable)) {
++ err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE);
++ if (err)
++ psmouse_warn(psmouse, "failed to deactivate mouse on %s: %d\n",
++ psmouse->ps2dev.serio->phys, err);
++
++ err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
++ if (err)
++ psmouse_warn(psmouse, "failed to reactivate mouse on %s: %d\n",
++ psmouse->ps2dev.serio->phys, err);
++ }
++
+ if (elantech_set_absolute_mode(psmouse)) {
+ psmouse_err(psmouse,
+ "failed to put touchpad back into absolute mode.\n");
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 22d16d80efb938..cff3393f0dd000 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -183,11 +183,13 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN009b", /* T580 */
+ "LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */
+ "LEN040f", /* P1 Gen 3 */
++ "LEN0411", /* L14 Gen 1 */
+ "LEN200f", /* T450s */
+ "LEN2044", /* L470 */
+ "LEN2054", /* E480 */
+ "LEN2055", /* E580 */
+ "LEN2068", /* T14 Gen 1 */
++ "SYN3015", /* HP EliteBook 840 G2 */
+ "SYN3052", /* HP EliteBook 840 G4 */
+ "SYN3221", /* HP 15-ay000 */
+ "SYN323d", /* HP Spectre X360 13-w013dx */
+diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
+index f2e093b0b9982d..1b45b1d3077de7 100644
+--- a/drivers/input/rmi4/rmi_bus.c
++++ b/drivers/input/rmi4/rmi_bus.c
+@@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn)
+
+ device_del(&fn->dev);
+ of_node_put(fn->dev.of_node);
+- put_device(&fn->dev);
+
+ for (i = 0; i < fn->num_of_irqs; i++)
+ irq_dispose_mapping(fn->irq[i]);
+
++ put_device(&fn->dev);
+ }
+
+ /**
+diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
+index 258d5fe3d395c4..ef9ea295f9e035 100644
+--- a/drivers/input/rmi4/rmi_driver.c
++++ b/drivers/input/rmi4/rmi_driver.c
+@@ -978,12 +978,12 @@ static int rmi_driver_remove(struct device *dev)
+
+ rmi_disable_irq(rmi_dev, false);
+
+- irq_domain_remove(data->irqdomain);
+- data->irqdomain = NULL;
+-
+ rmi_f34_remove_sysfs(rmi_dev);
+ rmi_free_function_list(rmi_dev);
+
++ irq_domain_remove(data->irqdomain);
++ data->irqdomain = NULL;
++
+ return 0;
+ }
+
+@@ -1196,7 +1196,11 @@ static int rmi_driver_probe(struct device *dev)
+ }
+ rmi_driver_set_input_params(rmi_dev, data->input);
+ data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
+- "%s/input0", dev_name(dev));
++ "%s/input0", dev_name(dev));
++ if (!data->input->phys) {
++ retval = -ENOMEM;
++ goto err;
++ }
+ }
+
+ retval = rmi_init_functions(data);
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index 9c39553d30fa27..34d1f07ea4c304 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -76,13 +76,14 @@ static inline void i8042_write_command(int val)
+ #define SERIO_QUIRK_PROBE_DEFER BIT(5)
+ #define SERIO_QUIRK_RESET_ALWAYS BIT(6)
+ #define SERIO_QUIRK_RESET_NEVER BIT(7)
+-#define SERIO_QUIRK_DIECT BIT(8)
++#define SERIO_QUIRK_DIRECT BIT(8)
+ #define SERIO_QUIRK_DUMBKBD BIT(9)
+ #define SERIO_QUIRK_NOLOOP BIT(10)
+ #define SERIO_QUIRK_NOTIMEOUT BIT(11)
+ #define SERIO_QUIRK_KBDRESET BIT(12)
+ #define SERIO_QUIRK_DRITEK BIT(13)
+ #define SERIO_QUIRK_NOPNP BIT(14)
++#define SERIO_QUIRK_FORCENORESTORE BIT(15)
+
+ /* Quirk table for different mainboards. Options similar or identical to i8042
+ * module parameters.
+@@ -360,6 +361,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ },
+ .driver_data = (void *)(SERIO_QUIRK_DRITEK)
+ },
++ {
++ /* Acer TravelMate P459-G2-M */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
++ },
+ {
+ /* Amoi M636/A737 */
+ .matches = {
+@@ -618,6 +627,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
++ {
++ /* Fujitsu Lifebook E756 */
++ /* https://bugzilla.suse.com/show_bug.cgi?id=1229056 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E756"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
++ },
+ {
+ /* Fujitsu Lifebook E5411 */
+ .matches = {
+@@ -626,6 +644,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
++ {
++ /* Fujitsu Lifebook U728 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
++ },
+ {
+ /* Gigabyte M912 */
+ .matches = {
+@@ -1094,6 +1120,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOLOOP)
+ },
++ /*
++ * Some TongFang barebones have touchpad and/or keyboard issues after
++ * suspend fixable with nomux + reset + noloop + nopnp. Luckily, none of
++ * them have an external PS/2 port so this can safely be set for all of
++ * them.
++ * TongFang barebones come with board_vendor and/or system_vendor set to
++ * a different value for each individual reseller. The only somewhat
++ * universal way to identify them is by board_name.
++ */
++ {
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++ },
+ /*
+ * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+ * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
+@@ -1133,18 +1196,10 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+- /*
+- * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
+- * the keyboard very laggy for ~5 seconds after boot and
+- * sometimes also after resume.
+- * However both are required for the keyboard to not fail
+- * completely sometimes after boot or resume.
+- */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
+ },
+- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+@@ -1200,6 +1255,12 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
+ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
++ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
+@@ -1310,6 +1371,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
++ {
++ /*
++ * The Ayaneo Kun is a handheld device where some the buttons
++ * are handled by an AT keyboard. The keyboard is usually
++ * detected as raw, but sometimes, usually after a cold boot,
++ * it is detected as translated. Make sure that the keyboard
++ * is always in raw mode.
++ */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
++ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_DIRECT)
++ },
+ { }
+ };
+
+@@ -1633,7 +1708,7 @@ static void __init i8042_check_quirks(void)
+ if (quirks & SERIO_QUIRK_RESET_NEVER)
+ i8042_reset = I8042_RESET_NEVER;
+ }
+- if (quirks & SERIO_QUIRK_DIECT)
++ if (quirks & SERIO_QUIRK_DIRECT)
+ i8042_direct = true;
+ if (quirks & SERIO_QUIRK_DUMBKBD)
+ i8042_dumbkbd = true;
+@@ -1649,6 +1724,8 @@ static void __init i8042_check_quirks(void)
+ if (quirks & SERIO_QUIRK_NOPNP)
+ i8042_nopnp = true;
+ #endif
++ if (quirks & SERIO_QUIRK_FORCENORESTORE)
++ i8042_forcenorestore = true;
+ }
+ #else
+ static inline void i8042_check_quirks(void) {}
+@@ -1682,7 +1759,7 @@ static int __init i8042_platform_init(void)
+
+ i8042_check_quirks();
+
+- pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
++ pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ i8042_nokbd ? " nokbd" : "",
+ i8042_noaux ? " noaux" : "",
+ i8042_nomux ? " nomux" : "",
+@@ -1702,10 +1779,11 @@ static int __init i8042_platform_init(void)
+ "",
+ #endif
+ #ifdef CONFIG_PNP
+- i8042_nopnp ? " nopnp" : "");
++ i8042_nopnp ? " nopnp" : "",
+ #else
+- "");
++ "",
+ #endif
++ i8042_forcenorestore ? " forcenorestore" : "");
+
+ retval = i8042_pnp_init();
+ if (retval)
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 6dac7c1853a541..29340f8095bb25 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -115,6 +115,10 @@ module_param_named(nopnp, i8042_nopnp, bool, 0);
+ MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings");
+ #endif
+
++static bool i8042_forcenorestore;
++module_param_named(forcenorestore, i8042_forcenorestore, bool, 0);
++MODULE_PARM_DESC(forcenorestore, "Force no restore on s3 resume, copying s2idle behaviour");
++
+ #define DEBUG
+ #ifdef DEBUG
+ static bool i8042_debug;
+@@ -1232,7 +1236,7 @@ static int i8042_pm_suspend(struct device *dev)
+ {
+ int i;
+
+- if (pm_suspend_via_firmware())
++ if (!i8042_forcenorestore && pm_suspend_via_firmware())
+ i8042_controller_reset(true);
+
+ /* Set up serio interrupts for system wakeup. */
+@@ -1248,7 +1252,7 @@ static int i8042_pm_suspend(struct device *dev)
+
+ static int i8042_pm_resume_noirq(struct device *dev)
+ {
+- if (!pm_resume_via_firmware())
++ if (i8042_forcenorestore || !pm_resume_via_firmware())
+ i8042_interrupt(0, NULL);
+
+ return 0;
+@@ -1271,7 +1275,7 @@ static int i8042_pm_resume(struct device *dev)
+ * not restore the controller state to whatever it had been at boot
+ * time, so we do not need to do anything.
+ */
+- if (!pm_suspend_via_firmware())
++ if (i8042_forcenorestore || !pm_suspend_via_firmware())
+ return 0;
+
+ /*
+diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
+index faea40dd66d018..8b8c43b3c27f29 100644
+--- a/drivers/input/touchscreen/ads7846.c
++++ b/drivers/input/touchscreen/ads7846.c
+@@ -808,7 +808,7 @@ static void ads7846_read_state(struct ads7846 *ts)
+ m = &ts->msg[msg_idx];
+ error = spi_sync(ts->spi, m);
+ if (error) {
+- dev_err(&ts->spi->dev, "spi_sync --> %d\n", error);
++ dev_err_ratelimited(&ts->spi->dev, "spi_sync --> %d\n", error);
+ packet->ignore = true;
+ return;
+ }
+@@ -1114,6 +1114,16 @@ static const struct of_device_id ads7846_dt_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(of, ads7846_dt_ids);
+
++static const struct spi_device_id ads7846_spi_ids[] = {
++ { "tsc2046", 7846 },
++ { "ads7843", 7843 },
++ { "ads7845", 7845 },
++ { "ads7846", 7846 },
++ { "ads7873", 7873 },
++ { },
++};
++MODULE_DEVICE_TABLE(spi, ads7846_spi_ids);
++
+ static const struct ads7846_platform_data *ads7846_get_props(struct device *dev)
+ {
+ struct ads7846_platform_data *pdata;
+@@ -1392,10 +1402,10 @@ static struct spi_driver ads7846_driver = {
+ },
+ .probe = ads7846_probe,
+ .remove = ads7846_remove,
++ .id_table = ads7846_spi_ids,
+ };
+
+ module_spi_driver(ads7846_driver);
+
+ MODULE_DESCRIPTION("ADS7846 TouchScreen Driver");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("spi:ads7846");
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index af32fbe57b6303..b068ff8afbc9ad 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -884,7 +884,8 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
+ }
+ }
+
+- if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
++ /* Some devices with gpio_int_idx 0 list a third unused GPIO */
++ if ((ts->gpio_count == 2 || ts->gpio_count == 3) && ts->gpio_int_idx == 0) {
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
+ gpio_mapping = acpi_goodix_int_first_gpios;
+ } else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) {
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index ad6828e4f2e2df..6a77babcf7228e 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
+ if (!error && data[0] == 2) {
+ error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
+ ILI251X_DATA_SIZE2);
+- if (error >= 0 && error != ILI251X_DATA_SIZE2)
+- error = -EIO;
++ if (error >= 0)
++ error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO;
+ }
+
+ return error;
+@@ -597,7 +597,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw,
+ * once, copy them all into this buffer at the right locations, and then
+ * do all operations on this linear buffer.
+ */
+- fw_buf = kzalloc(SZ_64K, GFP_KERNEL);
++ fw_buf = kvmalloc(SZ_64K, GFP_KERNEL);
+ if (!fw_buf)
+ return -ENOMEM;
+
+@@ -627,7 +627,7 @@ static int ili251x_firmware_to_buffer(const struct firmware *fw,
+ return 0;
+
+ err_big:
+- kfree(fw_buf);
++ kvfree(fw_buf);
+ return error;
+ }
+
+@@ -870,7 +870,7 @@ static ssize_t ili210x_firmware_update_store(struct device *dev,
+ ili210x_hardware_reset(priv->reset_gpio);
+ dev_dbg(dev, "Firmware update ended, error=%i\n", error);
+ enable_irq(client->irq);
+- kfree(fwbuf);
++ kvfree(fwbuf);
+ return error;
+ }
+
+diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c
+index 2f872e95fbbade..e719f5da68bf50 100644
+--- a/drivers/input/touchscreen/ilitek_ts_i2c.c
++++ b/drivers/input/touchscreen/ilitek_ts_i2c.c
+@@ -37,6 +37,8 @@
+ #define ILITEK_TP_CMD_GET_MCU_VER 0x61
+ #define ILITEK_TP_CMD_GET_IC_MODE 0xC0
+
++#define ILITEK_TP_I2C_REPORT_ID 0x48
++
+ #define REPORT_COUNT_ADDRESS 61
+ #define ILITEK_SUPPORT_MAX_POINT 40
+
+@@ -160,15 +162,19 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
+ error = ilitek_i2c_write_and_read(ts, NULL, 0, 0, buf, 64);
+ if (error) {
+ dev_err(dev, "get touch info failed, err:%d\n", error);
+- goto err_sync_frame;
++ return error;
++ }
++
++ if (buf[0] != ILITEK_TP_I2C_REPORT_ID) {
++ dev_err(dev, "get touch info failed. Wrong id: 0x%02X\n", buf[0]);
++ return -EINVAL;
+ }
+
+ report_max_point = buf[REPORT_COUNT_ADDRESS];
+ if (report_max_point > ts->max_tp) {
+ dev_err(dev, "FW report max point:%d > panel info. max:%d\n",
+ report_max_point, ts->max_tp);
+- error = -EINVAL;
+- goto err_sync_frame;
++ return -EINVAL;
+ }
+
+ count = DIV_ROUND_UP(report_max_point, packet_max_point);
+@@ -178,7 +184,7 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
+ if (error) {
+ dev_err(dev, "get touch info. failed, cnt:%d, err:%d\n",
+ count, error);
+- goto err_sync_frame;
++ return error;
+ }
+ }
+
+@@ -203,10 +209,10 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts)
+ ilitek_touch_down(ts, id, x, y);
+ }
+
+-err_sync_frame:
+ input_mt_sync_frame(input);
+ input_sync(input);
+- return error;
++
++ return 0;
+ }
+
+ /* APIs of cmds for ILITEK Touch IC */
+diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
+index 07111ca2445561..55ecebe9814456 100644
+--- a/drivers/input/touchscreen/imagis.c
++++ b/drivers/input/touchscreen/imagis.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+
++#include <linux/bitfield.h>
+ #include <linux/bits.h>
+ #include <linux/delay.h>
+ #include <linux/i2c.h>
+@@ -23,12 +24,9 @@
+ #define IST3038C_I2C_RETRY_COUNT 3
+ #define IST3038C_MAX_FINGER_NUM 10
+ #define IST3038C_X_MASK GENMASK(23, 12)
+-#define IST3038C_X_SHIFT 12
+ #define IST3038C_Y_MASK GENMASK(11, 0)
+ #define IST3038C_AREA_MASK GENMASK(27, 24)
+-#define IST3038C_AREA_SHIFT 24
+ #define IST3038C_FINGER_COUNT_MASK GENMASK(15, 12)
+-#define IST3038C_FINGER_COUNT_SHIFT 12
+ #define IST3038C_FINGER_STATUS_MASK GENMASK(9, 0)
+
+ struct imagis_ts {
+@@ -92,8 +90,7 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+ goto out;
+ }
+
+- finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
+- IST3038C_FINGER_COUNT_SHIFT;
++ finger_count = FIELD_GET(IST3038C_FINGER_COUNT_MASK, intr_message);
+ if (finger_count > IST3038C_MAX_FINGER_NUM) {
+ dev_err(&ts->client->dev,
+ "finger count %d is more than maximum supported\n",
+@@ -101,7 +98,7 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+ goto out;
+ }
+
+- finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
++ finger_pressed = FIELD_GET(IST3038C_FINGER_STATUS_MASK, intr_message);
+
+ for (i = 0; i < finger_count; i++) {
+ error = imagis_i2c_read_reg(ts,
+@@ -118,12 +115,11 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
+ finger_pressed & BIT(i));
+ touchscreen_report_pos(ts->input_dev, &ts->prop,
+- (finger_status & IST3038C_X_MASK) >>
+- IST3038C_X_SHIFT,
+- finger_status & IST3038C_Y_MASK, 1);
++ FIELD_GET(IST3038C_X_MASK, finger_status),
++ FIELD_GET(IST3038C_Y_MASK, finger_status),
++ true);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+- (finger_status & IST3038C_AREA_MASK) >>
+- IST3038C_AREA_SHIFT);
++ FIELD_GET(IST3038C_AREA_MASK, finger_status));
+ }
+
+ input_mt_sync_frame(ts->input_dev);
+@@ -210,7 +206,7 @@ static int imagis_init_input_dev(struct imagis_ts *ts)
+
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
++ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 16, 0, 0);
+
+ touchscreen_parse_properties(input_dev, true, &ts->prop);
+ if (!ts->prop.max_x || !ts->prop.max_y) {
+diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
+index 62f562ad50263c..050fa9ca4ec94d 100644
+--- a/drivers/input/touchscreen/silead.c
++++ b/drivers/input/touchscreen/silead.c
+@@ -71,7 +71,6 @@ struct silead_ts_data {
+ struct regulator_bulk_data regulators[2];
+ char fw_name[64];
+ struct touchscreen_properties prop;
+- u32 max_fingers;
+ u32 chip_id;
+ struct input_mt_pos pos[SILEAD_MAX_FINGERS];
+ int slots[SILEAD_MAX_FINGERS];
+@@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
+ touchscreen_parse_properties(data->input, true, &data->prop);
+ silead_apply_efi_fw_min_max(data);
+
+- input_mt_init_slots(data->input, data->max_fingers,
++ input_mt_init_slots(data->input, SILEAD_MAX_FINGERS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK);
+
+@@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client)
+ return;
+ }
+
+- if (buf[0] > data->max_fingers) {
++ if (buf[0] > SILEAD_MAX_FINGERS) {
+ dev_warn(dev, "More touches reported then supported %d > %d\n",
+- buf[0], data->max_fingers);
+- buf[0] = data->max_fingers;
++ buf[0], SILEAD_MAX_FINGERS);
++ buf[0] = SILEAD_MAX_FINGERS;
+ }
+
+ if (silead_ts_handle_pen_data(data, buf))
+@@ -315,7 +314,6 @@ static void silead_ts_read_data(struct i2c_client *client)
+
+ static int silead_ts_init(struct i2c_client *client)
+ {
+- struct silead_ts_data *data = i2c_get_clientdata(client);
+ int error;
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+@@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client)
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
+- data->max_fingers);
++ SILEAD_MAX_FINGERS);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+@@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client)
+ const char *str;
+ int error;
+
+- error = device_property_read_u32(dev, "silead,max-fingers",
+- &data->max_fingers);
+- if (error) {
+- dev_dbg(dev, "Max fingers read error %d\n", error);
+- data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
+- }
+-
+ error = device_property_read_string(dev, "firmware-name", &str);
+ if (!error)
+ snprintf(data->fw_name, sizeof(data->fw_name),
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index dfab160ca52934..68edb07d4443e9 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
+
+ path->num_nodes = num_nodes;
+
++ mutex_lock(&icc_bw_lock);
++
+ for (i = num_nodes - 1; i >= 0; i--) {
+ node->provider->users++;
+ hlist_add_head(&path->reqs[i].req_node, &node->req_list);
+@@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
+ node = node->reverse;
+ }
+
++ mutex_unlock(&icc_bw_lock);
++
+ return path;
+ }
+
+@@ -395,6 +399,9 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
+ }
+ mutex_unlock(&icc_lock);
+
++ if (!node)
++ return ERR_PTR(-EINVAL);
++
+ if (IS_ERR(node))
+ return ERR_CAST(node);
+
+@@ -789,12 +796,16 @@ void icc_put(struct icc_path *path)
+ pr_err("%s: error (%d)\n", __func__, ret);
+
+ mutex_lock(&icc_lock);
++ mutex_lock(&icc_bw_lock);
++
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ hlist_del(&path->reqs[i].req_node);
+ if (!WARN_ON(!node->provider->users))
+ node->provider->users--;
+ }
++
++ mutex_unlock(&icc_bw_lock);
+ mutex_unlock(&icc_lock);
+
+ kfree_const(path->name);
+diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
+index d787f2ea36d97b..a91df709cfb2f3 100644
+--- a/drivers/interconnect/icc-clk.c
++++ b/drivers/interconnect/icc-clk.c
+@@ -87,6 +87,7 @@ struct icc_provider *icc_clk_register(struct device *dev,
+ onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL);
+ if (!onecell)
+ return ERR_PTR(-ENOMEM);
++ onecell->num_nodes = 2 * num_clocks;
+
+ qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL);
+ if (!qp)
+@@ -133,8 +134,6 @@ struct icc_provider *icc_clk_register(struct device *dev,
+ onecell->nodes[j++] = node;
+ }
+
+- onecell->num_nodes = j;
+-
+ ret = icc_provider_register(provider);
+ if (ret)
+ goto err;
+diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
+index 2c16917ba1fdae..e76356f91125f5 100644
+--- a/drivers/interconnect/qcom/icc-rpm.c
++++ b/drivers/interconnect/qcom/icc-rpm.c
+@@ -497,7 +497,7 @@ int qnoc_probe(struct platform_device *pdev)
+
+ ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
+ if (ret)
+- return ret;
++ goto err_disable_unprepare_clk;
+
+ provider = &qp->provider;
+ provider->dev = dev;
+@@ -512,13 +512,15 @@ int qnoc_probe(struct platform_device *pdev)
+ /* If this fails, bus accesses will crash the platform! */
+ ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
+ if (ret)
+- return ret;
++ goto err_disable_unprepare_clk;
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
++ clk_bulk_disable_unprepare(qp->num_intf_clks,
++ qp->intf_clks);
+ ret = PTR_ERR(node);
+ goto err_remove_nodes;
+ }
+@@ -534,8 +536,11 @@ int qnoc_probe(struct platform_device *pdev)
+ if (qnodes[i]->qos.ap_owned &&
+ qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
+ ret = qcom_icc_qos_set(node);
+- if (ret)
+- return ret;
++ if (ret) {
++ clk_bulk_disable_unprepare(qp->num_intf_clks,
++ qp->intf_clks);
++ goto err_remove_nodes;
++ }
+ }
+
+ data->nodes[i] = node;
+@@ -563,6 +568,7 @@ int qnoc_probe(struct platform_device *pdev)
+ icc_provider_deregister(provider);
+ err_remove_nodes:
+ icc_nodes_remove(provider);
++err_disable_unprepare_clk:
+ clk_disable_unprepare(qp->bus_clk);
+
+ return ret;
+diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
+index dc321bb86d0be9..e97478bbc28253 100644
+--- a/drivers/interconnect/qcom/osm-l3.c
++++ b/drivers/interconnect/qcom/osm-l3.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
++#include <linux/args.h>
+ #include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/interconnect-provider.h>
+@@ -78,7 +79,7 @@ enum {
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
++ .num_links = COUNT_ARGS(__VA_ARGS__), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
+index 5bc4b7516608b4..69960a357a682e 100644
+--- a/drivers/interconnect/qcom/qcm2290.c
++++ b/drivers/interconnect/qcom/qcm2290.c
+@@ -161,9 +161,9 @@ static struct qcom_icc_node mas_snoc_bimc = {
+ .name = "mas_snoc_bimc",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+- .qos.qos_port = 2,
++ .qos.qos_port = 6,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+- .mas_rpm_id = 164,
++ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ .links = mas_snoc_bimc_links,
+diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c
+index bf800dd7d4ba1c..a7392eb73d4a99 100644
+--- a/drivers/interconnect/qcom/qdu1000.c
++++ b/drivers/interconnect/qcom/qdu1000.c
+@@ -769,6 +769,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
+index d94ab9b39f3dbb..af2be15438403e 100644
+--- a/drivers/interconnect/qcom/sc7180.c
++++ b/drivers/interconnect/qcom/sc7180.c
+@@ -1238,6 +1238,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
+index 6592839b4d94b3..a626dbc719995f 100644
+--- a/drivers/interconnect/qcom/sc7280.c
++++ b/drivers/interconnect/qcom/sc7280.c
+@@ -1285,6 +1285,7 @@ static struct qcom_icc_node srvc_snoc = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
+index 0fb4898dabcfef..a741badaa966e0 100644
+--- a/drivers/interconnect/qcom/sc8180x.c
++++ b/drivers/interconnect/qcom/sc8180x.c
+@@ -1345,6 +1345,7 @@ static struct qcom_icc_node slv_qup_core_2 = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &slv_ebi }
+ };
+@@ -1371,6 +1372,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
+
+ static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
++ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &slv_qns_cdsp_mem_noc }
+ };
+diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
+index b82c5493cbb566..0270f6c64481a9 100644
+--- a/drivers/interconnect/qcom/sc8280xp.c
++++ b/drivers/interconnect/qcom/sc8280xp.c
+@@ -1712,6 +1712,7 @@ static struct qcom_icc_node srvc_snoc = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+ };
+diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c
+index 540a2108b77c1c..907e1ff4ff8179 100644
+--- a/drivers/interconnect/qcom/sdm670.c
++++ b/drivers/interconnect/qcom/sdm670.c
+@@ -1047,6 +1047,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
+index b9243c0aa626cc..855802be93fea1 100644
+--- a/drivers/interconnect/qcom/sdm845.c
++++ b/drivers/interconnect/qcom/sdm845.c
+@@ -1265,6 +1265,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
+index 49aed492e9b807..f41d7e19ba269c 100644
+--- a/drivers/interconnect/qcom/sm6350.c
++++ b/drivers/interconnect/qcom/sm6350.c
+@@ -1164,6 +1164,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
+index c7c9cf7f746b05..edfe824cad3533 100644
+--- a/drivers/interconnect/qcom/sm8150.c
++++ b/drivers/interconnect/qcom/sm8150.c
+@@ -1282,6 +1282,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
+index d4a4ecef11f010..2a2f56b9937337 100644
+--- a/drivers/interconnect/qcom/sm8250.c
++++ b/drivers/interconnect/qcom/sm8250.c
+@@ -1397,6 +1397,7 @@ static struct qcom_icc_node qup2_core_slave = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+@@ -1994,6 +1995,7 @@ static struct platform_driver qnoc_driver = {
+ .driver = {
+ .name = "qnoc-sm8250",
+ .of_match_table = qnoc_of_match,
++ .sync_state = icc_sync_state,
+ },
+ };
+ module_platform_driver(qnoc_driver);
+diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
+index bdf75839e6d177..562322d4fc3c4a 100644
+--- a/drivers/interconnect/qcom/sm8350.c
++++ b/drivers/interconnect/qcom/sm8350.c
+@@ -1356,6 +1356,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = {
+
+ static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
++ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
+index a10c8b6549ee6a..16b2dfd794b409 100644
+--- a/drivers/interconnect/qcom/sm8550.c
++++ b/drivers/interconnect/qcom/sm8550.c
+@@ -2223,6 +2223,7 @@ static struct platform_driver qnoc_driver = {
+ .driver = {
+ .name = "qnoc-sm8550",
+ .of_match_table = qnoc_of_match,
++ .sync_state = icc_sync_state,
+ },
+ };
+
+diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
+index 2b12b583ef4b1e..d57c5adf932e36 100644
+--- a/drivers/iommu/Kconfig
++++ b/drivers/iommu/Kconfig
+@@ -191,7 +191,7 @@ source "drivers/iommu/iommufd/Kconfig"
+ config IRQ_REMAP
+ bool "Support for Interrupt Remapping"
+ depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
+- select DMAR_TABLE
++ select DMAR_TABLE if INTEL_IOMMU
+ help
+ Supports Interrupt remapping for IO-APIC and MSI devices.
+ To use x2apic mode in the CPU's which support x2APIC enhancements or
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 45efb7e5d72546..ef3fae113dd643 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -1692,8 +1692,17 @@ static void __init free_pci_segments(void)
+ }
+ }
+
++static void __init free_sysfs(struct amd_iommu *iommu)
++{
++ if (iommu->iommu.dev) {
++ iommu_device_unregister(&iommu->iommu);
++ iommu_device_sysfs_remove(&iommu->iommu);
++ }
++}
++
+ static void __init free_iommu_one(struct amd_iommu *iommu)
+ {
++ free_sysfs(iommu);
+ free_cwwb_sem(iommu);
+ free_command_buffer(iommu);
+ free_event_buffer(iommu);
+@@ -2084,6 +2093,9 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
+ /* Prevent binding other PCI device drivers to IOMMU devices */
+ iommu->dev->match_driver = false;
+
++ /* ACPI _PRT won't have an IRQ for IOMMU */
++ iommu->dev->irq_managed = 1;
++
+ pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
+ &iommu->cap);
+
+diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
+index e9ef2e0a62f670..cbf0c46015125a 100644
+--- a/drivers/iommu/amd/io_pgtable_v2.c
++++ b/drivers/iommu/amd/io_pgtable_v2.c
+@@ -50,7 +50,7 @@ static inline u64 set_pgtable_attr(u64 *page)
+ u64 prot;
+
+ prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
+- prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
++ prot |= IOMMU_PAGE_ACCESS;
+
+ return (iommu_virt_to_phys(page) | prot);
+ }
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index bd0a596f9863a3..68b81f9c2f4b17 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -3193,7 +3193,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
+ smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
+
+ /* Add callback to free MSIs on teardown */
+- devm_add_action(dev, arm_smmu_free_msis, dev);
++ devm_add_action_or_reset(dev, arm_smmu_free_msis, dev);
+ }
+
+ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index 7f52ac67495fd1..d4915893601979 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -243,6 +243,7 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
+
+ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
+ { .compatible = "qcom,adreno" },
++ { .compatible = "qcom,adreno-gmu" },
+ { .compatible = "qcom,mdp4" },
+ { .compatible = "qcom,mdss" },
+ { .compatible = "qcom,sc7180-mdss" },
+@@ -251,6 +252,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
+ { .compatible = "qcom,sc7280-mss-pil" },
+ { .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sc8280xp-mdss" },
++ { .compatible = "qcom,sdm670-mdss" },
+ { .compatible = "qcom,sdm845-mdss" },
+ { .compatible = "qcom,sdm845-mss-pil" },
+ { .compatible = "qcom,sm6350-mdss" },
+@@ -276,6 +278,20 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+ u32 smr;
+ int i;
+
++ /*
++ * MSM8998 LPASS SMMU reports 13 context banks, but accessing
++ * the last context bank crashes the system.
++ */
++ if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") &&
++ smmu->num_context_banks == 13) {
++ smmu->num_context_banks = 12;
++ } else if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2")) {
++ if (smmu->num_context_banks == 21) /* SDM630 / SDM660 A2NOC SMMU */
++ smmu->num_context_banks = 7;
++ else if (smmu->num_context_banks == 14) /* SDM630 / SDM660 LPASS SMMU */
++ smmu->num_context_banks = 13;
++ }
++
+ /*
+ * Some platforms support more than the Arm SMMU architected maximum of
+ * 128 stream matching groups. For unknown reasons, the additional
+@@ -332,6 +348,19 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+ return 0;
+ }
+
++static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu)
++{
++ /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */
++ smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K;
++
++ /* TZ protects several last context banks, hide them from Linux */
++ if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2") &&
++ smmu->num_context_banks == 5)
++ smmu->num_context_banks = 2;
++
++ return 0;
++}
++
+ static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
+ {
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
+@@ -422,6 +451,7 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = {
+
+ static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
+ .init_context = qcom_adreno_smmu_init_context,
++ .cfg_probe = qcom_adreno_smmuv2_cfg_probe,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
+ .write_sctlr = qcom_adreno_smmu_write_sctlr,
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 4b1a88f514c9c0..2da969fc899004 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -29,6 +29,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/swiotlb.h>
+ #include <linux/vmalloc.h>
++#include <trace/events/swiotlb.h>
+
+ #include "dma-iommu.h"
+
+@@ -1052,6 +1053,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ return DMA_MAPPING_ERROR;
+ }
+
++ trace_swiotlb_bounced(dev, phys, size);
++
+ aligned_size = iova_align(iovad, size);
+ phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+ iova_mask(iovad), dir, attrs);
+@@ -1599,6 +1602,14 @@ static size_t iommu_dma_opt_mapping_size(void)
+ return iova_rcache_range();
+ }
+
++static size_t iommu_dma_max_mapping_size(struct device *dev)
++{
++ if (dev_is_untrusted(dev))
++ return swiotlb_max_mapping_size(dev);
++
++ return SIZE_MAX;
++}
++
+ static const struct dma_map_ops iommu_dma_ops = {
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
+ .alloc = iommu_dma_alloc,
+@@ -1621,6 +1632,7 @@ static const struct dma_map_ops iommu_dma_ops = {
+ .unmap_resource = iommu_dma_unmap_resource,
+ .get_merge_boundary = iommu_dma_get_merge_boundary,
+ .opt_mapping_size = iommu_dma_opt_mapping_size,
++ .max_mapping_size = iommu_dma_max_mapping_size,
+ };
+
+ /*
+diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
+index 7af3b8a4f2a005..29d26a4371327c 100644
+--- a/drivers/iommu/intel/Makefile
++++ b/drivers/iommu/intel/Makefile
+@@ -5,5 +5,7 @@ obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
+ obj-$(CONFIG_DMAR_PERF) += perf.o
+ obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
+ obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
++ifdef CONFIG_INTEL_IOMMU
+ obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
++endif
+ obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index a3414afe11b07e..7a38e18b18196b 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1202,9 +1202,7 @@ static void free_iommu(struct intel_iommu *iommu)
+ */
+ static inline void reclaim_free_desc(struct q_inval *qi)
+ {
+- while (qi->desc_status[qi->free_tail] == QI_DONE ||
+- qi->desc_status[qi->free_tail] == QI_ABORT) {
+- qi->desc_status[qi->free_tail] = QI_FREE;
++ while (qi->desc_status[qi->free_tail] == QI_FREE && qi->free_tail != qi->free_head) {
+ qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
+ qi->free_cnt++;
+ }
+@@ -1422,7 +1420,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ */
+ writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
+
+- while (qi->desc_status[wait_index] != QI_DONE) {
++ while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
+ /*
+ * We will leave the interrupts disabled, to prevent interrupt
+ * context to queue another cmd while a cmd is already submitted
+@@ -1439,8 +1437,16 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ raw_spin_lock(&qi->q_lock);
+ }
+
+- for (i = 0; i < count; i++)
+- qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
++ /*
++ * The reclaim code can free descriptors from multiple submissions
++ * starting from the tail of the queue. When count == 0, the
++ * status of the standalone wait descriptor at the tail of the queue
++ * must be set to QI_FREE to allow the reclaim code to proceed.
++ * It is also possible that descriptors from one of the previous
++ * submissions has to be reclaimed by a subsequent submission.
++ */
++ for (i = 0; i <= count; i++)
++ qi->desc_status[(index + i) % QI_LENGTH] = QI_FREE;
+
+ reclaim_free_desc(qi);
+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+@@ -1522,6 +1528,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ {
+ struct qi_desc desc;
+
++ /*
++ * VT-d spec, section 4.3:
++ *
++ * Software is recommended to not submit any Device-TLB invalidation
++ * requests while address remapping hardware is disabled.
++ */
++ if (!(iommu->gcmd & DMA_GCMD_TE))
++ return;
++
+ if (mask) {
+ addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+@@ -1587,6 +1602,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
+ struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
++ /*
++ * VT-d spec, section 4.3:
++ *
++ * Software is recommended to not submit any Device-TLB invalidation
++ * requests while address remapping hardware is disabled.
++ */
++ if (!(iommu->gcmd & DMA_GCMD_TE))
++ return;
++
+ desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+ QI_DEV_IOTLB_PFSID(pfsid);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 3685ba90ec88e8..3a7c647d3affa8 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1692,10 +1692,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
+ * entry for first-level or pass-through translation modes should
+ * be programmed with a domain id different from those used for
+ * second-level or nested translation. We reserve a domain id for
+- * this purpose.
++ * this purpose. This domain id is also used for identity domain
++ * in legacy mode.
+ */
+- if (sm_supported(iommu))
+- set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
++ set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
+
+ return 0;
+ }
+@@ -2204,6 +2204,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ attr |= DMA_FL_PTE_DIRTY;
+ }
+
++ domain->has_mappings = true;
++
+ pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
+
+ while (nr_pages > 0) {
+@@ -2409,7 +2411,7 @@ static int __init si_domain_init(int hw)
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ ret = iommu_domain_identity_map(si_domain,
+ mm_to_dma_pfn_start(start_pfn),
+- mm_to_dma_pfn_end(end_pfn));
++ mm_to_dma_pfn_end(end_pfn-1));
+ if (ret)
+ return ret;
+ }
+@@ -2487,7 +2489,8 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
+ return ret;
+ }
+
+- iommu_enable_pci_caps(info);
++ if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
++ iommu_enable_pci_caps(info);
+
+ return 0;
+ }
+@@ -3922,8 +3925,10 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
+ */
+ static void domain_context_clear(struct device_domain_info *info)
+ {
+- if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
++ if (!dev_is_pci(info->dev)) {
++ domain_context_clear_one(info, info->bus, info->devfn);
+ return;
++ }
+
+ pci_for_each_dma_alias(to_pci_dev(info->dev),
+ &domain_context_clear_one_cb, info);
+@@ -4308,7 +4313,8 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
+ return true;
+
+ spin_lock_irqsave(&dmar_domain->lock, flags);
+- if (!domain_support_force_snooping(dmar_domain)) {
++ if (!domain_support_force_snooping(dmar_domain) ||
++ (!dmar_domain->use_first_level && dmar_domain->has_mappings)) {
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ return false;
+ }
+@@ -4928,7 +4934,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
+ ver = (dev->device >> 8) & 0xff;
+ if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
+ ver != 0x4e && ver != 0x8a && ver != 0x98 &&
+- ver != 0x9a && ver != 0xa7)
++ ver != 0x9a && ver != 0xa7 && ver != 0x7d)
+ return;
+
+ if (risky_device(dev))
+diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
+index 7dac94f62b4ec6..e6a3e70656166a 100644
+--- a/drivers/iommu/intel/iommu.h
++++ b/drivers/iommu/intel/iommu.h
+@@ -592,6 +592,9 @@ struct dmar_domain {
+ * otherwise, goes through the second
+ * level.
+ */
++ u8 has_mappings:1; /* Has mappings configured through
++ * iommu_map() interface.
++ */
+
+ spinlock_t lock; /* Protect device tracking lists */
+ struct list_head devices; /* all devices' list */
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index 8f92b92f3d2aba..8faa93cffac45d 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -428,6 +428,9 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
+ if (!info || !info->ats_enabled)
+ return;
+
++ if (pci_dev_is_disconnected(to_pci_dev(dev)))
++ return;
++
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ pfsid = info->pfsid;
+diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
+index cf43e798eca499..44083d01852dbf 100644
+--- a/drivers/iommu/intel/perfmon.c
++++ b/drivers/iommu/intel/perfmon.c
+@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
+ iommu_pmu_set_filter(domain, event->attr.config1,
+ IOMMU_PMU_FILTER_DOMAIN, idx,
+ event->attr.config1);
+- iommu_pmu_set_filter(pasid, event->attr.config1,
++ iommu_pmu_set_filter(pasid, event->attr.config2,
+ IOMMU_PMU_FILTER_PASID, idx,
+ event->attr.config1);
+ iommu_pmu_set_filter(ats, event->attr.config2,
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index 50a481c895b867..6010b93c514c5c 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -67,7 +67,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
+ struct page *pages;
+ int irq, ret;
+
+- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
++ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ if (!pages) {
+ pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
+ iommu->name);
+@@ -216,6 +216,27 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
+ rcu_read_unlock();
+ }
+
++static void intel_flush_svm_all(struct intel_svm *svm)
++{
++ struct device_domain_info *info;
++ struct intel_svm_dev *sdev;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(sdev, &svm->devs, list) {
++ info = dev_iommu_priv_get(sdev->dev);
++
++ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
++ if (info->ats_enabled) {
++ qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
++ svm->pasid, sdev->qdep,
++ 0, 64 - VTD_PAGE_SHIFT);
++ quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
++ svm->pasid, sdev->qdep);
++ }
++ }
++ rcu_read_unlock();
++}
++
+ /* Pages have been freed at this point */
+ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+@@ -223,6 +244,11 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
+ {
+ struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+
++ if (start == 0 && end == -1UL) {
++ intel_flush_svm_all(svm);
++ return;
++ }
++
+ intel_flush_svm_range(svm, start,
+ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
+ }
+diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
+index 75f244a3e12df6..06ffc683b28fee 100644
+--- a/drivers/iommu/io-pgtable-arm-v7s.c
++++ b/drivers/iommu/io-pgtable-arm-v7s.c
+@@ -552,9 +552,8 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ paddr >= (1ULL << data->iop.cfg.oas)))
+ return -ERANGE;
+
+- /* If no access, then nothing to do */
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+- return 0;
++ return -EINVAL;
+
+ while (pgcount--) {
+ ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index 72dcdd468cf30d..934dc97f5df9ed 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -480,9 +480,8 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ if (WARN_ON(iaext || paddr >> cfg->oas))
+ return -ERANGE;
+
+- /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+- return 0;
++ return -EINVAL;
+
+ prot = arm_lpae_prot_to_pte(data, iommu_prot);
+ ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
+diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
+index 74b1ef2b96bee1..10811e0b773d3b 100644
+--- a/drivers/iommu/io-pgtable-dart.c
++++ b/drivers/iommu/io-pgtable-dart.c
+@@ -250,9 +250,8 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ if (WARN_ON(paddr >> cfg->oas))
+ return -ERANGE;
+
+- /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+- return 0;
++ return -EINVAL;
+
+ tbl = dart_get_table(data, iova);
+
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index c146378c7d032c..3f1029c0825e95 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -479,11 +479,12 @@ static void iommu_deinit_device(struct device *dev)
+ dev_iommu_free(dev);
+ }
+
++DEFINE_MUTEX(iommu_probe_device_lock);
++
+ static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
+ {
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_group *group;
+- static DEFINE_MUTEX(iommu_probe_device_lock);
+ struct group_device *gdev;
+ int ret;
+
+@@ -496,17 +497,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ * probably be able to use device_lock() here to minimise the scope,
+ * but for now enforcing a simple global ordering is fine.
+ */
+- mutex_lock(&iommu_probe_device_lock);
++ lockdep_assert_held(&iommu_probe_device_lock);
+
+ /* Device is probed already if in a group */
+- if (dev->iommu_group) {
+- ret = 0;
+- goto out_unlock;
+- }
++ if (dev->iommu_group)
++ return 0;
+
+ ret = iommu_init_device(dev, ops);
+ if (ret)
+- goto out_unlock;
++ return ret;
+
+ group = dev->iommu_group;
+ gdev = iommu_group_alloc_device(group, dev);
+@@ -542,7 +541,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ list_add_tail(&group->entry, group_list);
+ }
+ mutex_unlock(&group->mutex);
+- mutex_unlock(&iommu_probe_device_lock);
+
+ if (dev_is_pci(dev))
+ iommu_dma_set_pci_32bit_workaround(dev);
+@@ -556,8 +554,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ iommu_deinit_device(dev);
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+-out_unlock:
+- mutex_unlock(&iommu_probe_device_lock);
+
+ return ret;
+ }
+@@ -567,7 +563,9 @@ int iommu_probe_device(struct device *dev)
+ const struct iommu_ops *ops;
+ int ret;
+
++ mutex_lock(&iommu_probe_device_lock);
+ ret = __iommu_probe_device(dev, NULL);
++ mutex_unlock(&iommu_probe_device_lock);
+ if (ret)
+ return ret;
+
+@@ -1783,7 +1781,9 @@ static int probe_iommu_group(struct device *dev, void *data)
+ struct list_head *group_list = data;
+ int ret;
+
++ mutex_lock(&iommu_probe_device_lock);
+ ret = __iommu_probe_device(dev, group_list);
++ mutex_unlock(&iommu_probe_device_lock);
+ if (ret == -ENODEV)
+ ret = 0;
+
+@@ -3369,15 +3369,26 @@ EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+ static int __iommu_set_group_pasid(struct iommu_domain *domain,
+ struct iommu_group *group, ioasid_t pasid)
+ {
+- struct group_device *device;
+- int ret = 0;
++ struct group_device *device, *last_gdev;
++ int ret;
+
+ for_each_group_device(group, device) {
+ ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
+ if (ret)
+- break;
++ goto err_revert;
+ }
+
++ return 0;
++
++err_revert:
++ last_gdev = device;
++ for_each_group_device(group, device) {
++ const struct iommu_ops *ops = dev_iommu_ops(device->dev);
++
++ if (device == last_gdev)
++ break;
++ ops->remove_dev_pasid(device->dev, pasid);
++ }
+ return ret;
+ }
+
+@@ -3423,10 +3434,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
+ }
+
+ ret = __iommu_set_group_pasid(domain, group, pasid);
+- if (ret) {
+- __iommu_remove_group_pasid(group, pasid);
++ if (ret)
+ xa_erase(&group->pasid_array, pasid);
+- }
+ out_unlock:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
+index 3a598182b76191..e76b2293999481 100644
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -111,6 +111,7 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova,
+ unsigned long page_offset = uptr % PAGE_SIZE;
+ struct interval_tree_double_span_iter used_span;
+ struct interval_tree_span_iter allowed_span;
++ unsigned long max_alignment = PAGE_SIZE;
+ unsigned long iova_alignment;
+
+ lockdep_assert_held(&iopt->iova_rwsem);
+@@ -130,6 +131,13 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova,
+ roundup_pow_of_two(length),
+ 1UL << __ffs64(uptr));
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ max_alignment = HPAGE_SIZE;
++#endif
++ /* Protect against ALIGN() overflow */
++ if (iova_alignment >= max_alignment)
++ iova_alignment = max_alignment;
++
+ if (iova_alignment < iopt->iova_alignment)
+ return -EINVAL;
+
+@@ -221,6 +229,18 @@ static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area,
+ return 0;
+ }
+
++static struct iopt_area *iopt_area_alloc(void)
++{
++ struct iopt_area *area;
++
++ area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++ if (!area)
++ return NULL;
++ RB_CLEAR_NODE(&area->node.rb);
++ RB_CLEAR_NODE(&area->pages_node.rb);
++ return area;
++}
++
+ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
+ struct list_head *pages_list,
+ unsigned long length, unsigned long *dst_iova,
+@@ -231,7 +251,7 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
+ int rc = 0;
+
+ list_for_each_entry(elm, pages_list, next) {
+- elm->area = kzalloc(sizeof(*elm->area), GFP_KERNEL_ACCOUNT);
++ elm->area = iopt_area_alloc();
+ if (!elm->area)
+ return -ENOMEM;
+ }
+@@ -1005,11 +1025,11 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
+ iopt_area_start_byte(area, new_start) & (alignment - 1))
+ return -EINVAL;
+
+- lhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++ lhs = iopt_area_alloc();
+ if (!lhs)
+ return -ENOMEM;
+
+- rhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
++ rhs = iopt_area_alloc();
+ if (!rhs) {
+ rc = -ENOMEM;
+ goto err_free_lhs;
+@@ -1048,6 +1068,16 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
+ if (WARN_ON(rc))
+ goto err_remove_lhs;
+
++ /*
++ * If the original area has filled a domain, domains_itree has to be
++ * updated.
++ */
++ if (area->storage_domain) {
++ interval_tree_remove(&area->pages_node, &pages->domains_itree);
++ interval_tree_insert(&lhs->pages_node, &pages->domains_itree);
++ interval_tree_insert(&rhs->pages_node, &pages->domains_itree);
++ }
++
+ lhs->storage_domain = area->storage_domain;
+ lhs->pages = area->pages;
+ rhs->storage_domain = area->storage_domain;
+@@ -1136,20 +1166,23 @@ int iopt_disable_large_pages(struct io_pagetable *iopt)
+
+ int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
+ {
++ u32 new_id;
+ int rc;
+
+ down_write(&iopt->domains_rwsem);
+ down_write(&iopt->iova_rwsem);
+- rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access,
+- xa_limit_16b, GFP_KERNEL_ACCOUNT);
++ rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
++ GFP_KERNEL_ACCOUNT);
++
+ if (rc)
+ goto out_unlock;
+
+ rc = iopt_calculate_iova_alignment(iopt);
+ if (rc) {
+- xa_erase(&iopt->access_list, access->iopt_access_list_id);
++ xa_erase(&iopt->access_list, new_id);
+ goto out_unlock;
+ }
++ access->iopt_access_list_id = new_id;
+
+ out_unlock:
+ up_write(&iopt->iova_rwsem);
+diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c
+index d5624577f79f1b..0407e2b758ef43 100644
+--- a/drivers/iommu/iommufd/ioas.c
++++ b/drivers/iommu/iommufd/ioas.c
+@@ -213,6 +213,10 @@ int iommufd_ioas_map(struct iommufd_ucmd *ucmd)
+ if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
+ return -EOVERFLOW;
+
++ if (!(cmd->flags &
++ (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
++ return -EINVAL;
++
+ ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
+ if (IS_ERR(ioas))
+ return PTR_ERR(ioas);
+@@ -253,6 +257,10 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
+ cmd->dst_iova >= ULONG_MAX)
+ return -EOVERFLOW;
+
++ if (!(cmd->flags &
++ (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
++ return -EINVAL;
++
+ src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id);
+ if (IS_ERR(src_ioas))
+ return PTR_ERR(src_ioas);
+diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
+index 8d9aa297c117e4..528f356238b343 100644
+--- a/drivers/iommu/iommufd/pages.c
++++ b/drivers/iommu/iommufd/pages.c
+@@ -1507,6 +1507,8 @@ void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages)
+ area, domain, iopt_area_index(area),
+ iopt_area_last_index(area));
+
++ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
++ WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb));
+ interval_tree_remove(&area->pages_node, &pages->domains_itree);
+ iopt_area_unfill_domain(area, pages, area->storage_domain);
+ area->storage_domain = NULL;
+diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
+index 56506d5753f15c..00b794d74e03be 100644
+--- a/drivers/iommu/iommufd/selftest.c
++++ b/drivers/iommu/iommufd/selftest.c
+@@ -44,8 +44,8 @@ enum {
+ * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
+ * value. This has a much smaller randomization space and syzkaller can hit it.
+ */
+-static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
+- u64 *iova)
++static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
++ u64 *iova)
+ {
+ struct syz_layout {
+ __u32 nth_area;
+@@ -69,6 +69,21 @@ static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
+ return 0;
+ }
+
++static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
++ u64 *iova)
++{
++ unsigned long ret;
++
++ mutex_lock(&access->ioas_lock);
++ if (!access->ioas) {
++ mutex_unlock(&access->ioas_lock);
++ return 0;
++ }
++ ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
++ mutex_unlock(&access->ioas_lock);
++ return ret;
++}
++
+ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
+ unsigned int ioas_id, u64 *iova, u32 *flags)
+ {
+@@ -81,7 +96,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
+ ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
+ if (IS_ERR(ioas))
+ return;
+- *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
++ *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
+ iommufd_put_object(&ioas->obj);
+ }
+
+@@ -852,7 +867,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
+ }
+
+ if (flags & MOCK_FLAGS_ACCESS_SYZ)
+- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
++ iova = iommufd_test_syz_conv_iova(staccess->access,
+ &cmd->access_pages.iova);
+
+ npages = (ALIGN(iova + length, PAGE_SIZE) -
+@@ -954,8 +969,8 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
+ }
+
+ if (flags & MOCK_FLAGS_ACCESS_SYZ)
+- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
+- &cmd->access_rw.iova);
++ iova = iommufd_test_syz_conv_iova(staccess->access,
++ &cmd->access_rw.iova);
+
+ rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
+ if (rc)
+diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
+index 83314b9d8f38bf..ee59647c20501e 100644
+--- a/drivers/iommu/irq_remapping.c
++++ b/drivers/iommu/irq_remapping.c
+@@ -99,7 +99,8 @@ int __init irq_remapping_prepare(void)
+ if (disable_irq_remap)
+ return -ENOSYS;
+
+- if (intel_irq_remap_ops.prepare() == 0)
++ if (IS_ENABLED(CONFIG_INTEL_IOMMU) &&
++ intel_irq_remap_ops.prepare() == 0)
+ remap_ops = &intel_irq_remap_ops;
+ else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
+ amd_iommu_irq_ops.prepare() == 0)
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index fab6c347ce578e..de698463e94ad9 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -1773,6 +1773,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
+ { .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
+ {}
+ };
++MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
+
+ static struct platform_driver mtk_iommu_driver = {
+ .probe = mtk_iommu_probe,
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index 8a0a5e5d049f4a..f1754efcfe74e6 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -600,6 +600,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = {
+ { .compatible = "mediatek,mt2701-m4u", },
+ {}
+ };
++MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
+
+ static const struct component_master_ops mtk_iommu_v1_com_ops = {
+ .bind = mtk_iommu_v1_bind,
+diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
+index 157b286e36bf3a..42cffb0ee5e289 100644
+--- a/drivers/iommu/of_iommu.c
++++ b/drivers/iommu/of_iommu.c
+@@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
+ const u32 *id)
+ {
+ const struct iommu_ops *ops = NULL;
+- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
++ struct iommu_fwspec *fwspec;
+ int err = NO_IOMMU;
+
+ if (!master_np)
+ return NULL;
+
++ /* Serialise to make dev->iommu stable under our potential fwspec */
++ mutex_lock(&iommu_probe_device_lock);
++ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec) {
+- if (fwspec->ops)
++ if (fwspec->ops) {
++ mutex_unlock(&iommu_probe_device_lock);
+ return fwspec->ops;
+-
++ }
+ /* In the deferred case, start again from scratch */
+ iommu_fwspec_free(dev);
+ }
+@@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
+ fwspec = dev_iommu_fwspec_get(dev);
+ ops = fwspec->ops;
+ }
++ mutex_unlock(&iommu_probe_device_lock);
++
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+ * probe for dev, replay it to get things in order.
+@@ -191,7 +197,7 @@ iommu_resv_region_get_type(struct device *dev,
+ if (start == phys->start && end == phys->end)
+ return IOMMU_RESV_DIRECT;
+
+- dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
++ dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
+ &start, &end);
+ return IOMMU_RESV_RESERVED;
+ }
+@@ -254,7 +260,14 @@ void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
+ phys_addr_t iova;
+ size_t length;
+
++ if (of_dma_is_coherent(dev->of_node))
++ prot |= IOMMU_CACHE;
++
+ maps = of_translate_dma_region(np, maps, &iova, &length);
++ if (length == 0) {
++ dev_warn(dev, "Cannot reserve IOVA region of 0 size\n");
++ continue;
++ }
+ type = iommu_resv_region_get_type(dev, &phys, iova, length);
+
+ region = iommu_alloc_resv_region(iova, length, prot, type,
+diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
+index 2fa9afebd4f5f0..c8e79a2d8b4c69 100644
+--- a/drivers/iommu/sprd-iommu.c
++++ b/drivers/iommu/sprd-iommu.c
+@@ -236,8 +236,8 @@ static void sprd_iommu_cleanup(struct sprd_iommu_domain *dom)
+
+ pgt_size = sprd_iommu_pgt_size(&dom->domain);
+ dma_free_coherent(dom->sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
+- dom->sdev = NULL;
+ sprd_iommu_hw_en(dom->sdev, false);
++ dom->sdev = NULL;
+ }
+
+ static void sprd_iommu_domain_free(struct iommu_domain *domain)
+diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
+index 74c5cb93e90027..94bd7f25f6f26b 100644
+--- a/drivers/iommu/sun50i-iommu.c
++++ b/drivers/iommu/sun50i-iommu.c
+@@ -449,6 +449,7 @@ static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
++ iommu_write(iommu, IOMMU_BYPASS_REG, 0);
+ iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
+ iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index f7149d0f3d45ca..e7b736800dd023 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -557,7 +557,7 @@ config IRQ_LOONGARCH_CPU
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
++ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+ select LOONGSON_HTVEC
+ select LOONGSON_LIOINTC
+ select LOONGSON_EIOINTC
+diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
+index 9c8b1349ee17b8..a1430ab60a8a3f 100644
+--- a/drivers/irqchip/irq-alpine-msi.c
++++ b/drivers/irqchip/irq-alpine-msi.c
+@@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
+ return 0;
+
+ err_sgi:
+- irq_domain_free_irqs_parent(domain, virq, i - 1);
++ irq_domain_free_irqs_parent(domain, virq, i);
+ alpine_msix_free_sgi(priv, sgi, nr_irqs);
+ return err;
+ }
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index a55528469278c7..91a42e2d7a1319 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -566,6 +566,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
+ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+ {
++ /* IRQs 0 and 1 cannot be mapped, they are handled internally */
++ if (hw <= 1)
++ return -EINVAL;
++
+ armada_370_xp_irq_mask(irq_get_irq_data(virq));
+ if (!is_percpu_irq(hw))
+ writel(hw, per_cpu_int_base +
+diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
+index 5559c943f03f97..2b0b3175cea068 100644
+--- a/drivers/irqchip/irq-brcmstb-l2.c
++++ b/drivers/irqchip/irq-brcmstb-l2.c
+@@ -2,7 +2,7 @@
+ /*
+ * Generic Broadcom Set Top Box Level 2 Interrupt controller driver
+ *
+- * Copyright (C) 2014-2017 Broadcom
++ * Copyright (C) 2014-2024 Broadcom
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
+ generic_handle_domain_irq(b->domain, irq);
+ } while (status);
+ out:
++ /* Don't ack parent before all device writes are done */
++ wmb();
++
+ chained_irq_exit(chip, desc);
+ }
+
+diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
+index f2ff4387870d69..d83c2c85962c37 100644
+--- a/drivers/irqchip/irq-gic-v2m.c
++++ b/drivers/irqchip/irq-gic-v2m.c
+@@ -438,12 +438,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
+
+ ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
+ &res, 0);
+- if (ret) {
+- of_node_put(child);
++ if (ret)
+ break;
+- }
+ }
+
++ if (ret && child)
++ of_node_put(child);
+ if (!ret)
+ ret = gicv2m_allocate_domains(parent);
+ if (ret)
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index a8c89df1a99786..b1e60c13c1e1e7 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
+ return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
+ }
+
++static bool rdists_support_shareable(void)
++{
++ return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
++}
++
+ static u16 get_its_list(struct its_vm *vm)
+ {
+ struct its_node *its;
+@@ -781,6 +786,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+ {
++ struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
+ unsigned long vpt_addr, vconf_addr;
+ u64 target;
+ bool alloc;
+@@ -790,9 +796,14 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+ its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+
+ if (!desc->its_vmapp_cmd.valid) {
++ alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+ if (is_v4_1(its)) {
+- alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
+ its_encode_alloc(cmd, alloc);
++ /*
++ * Unmapping a VPE is self-synchronizing on GICv4.1,
++ * no need to issue a VSYNC.
++ */
++ vpe = NULL;
+ }
+
+ goto out;
+@@ -805,13 +816,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+ its_encode_vpt_addr(cmd, vpt_addr);
+ its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+
++ alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
++
+ if (!is_v4_1(its))
+ goto out;
+
+ vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
+
+- alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+-
+ its_encode_alloc(cmd, alloc);
+
+ /*
+@@ -827,7 +838,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+ out:
+ its_fixup_cmd(cmd);
+
+- return valid_vpe(its, desc->its_vmapp_cmd.vpe);
++ return vpe;
+ }
+
+ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
+@@ -1835,28 +1846,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+ {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+- int ret = 0;
+
+ if (!info->map)
+ return -EINVAL;
+
+- raw_spin_lock(&its_dev->event_map.vlpi_lock);
+-
+ if (!its_dev->event_map.vm) {
+ struct its_vlpi_map *maps;
+
+ maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
+ GFP_ATOMIC);
+- if (!maps) {
+- ret = -ENOMEM;
+- goto out;
+- }
++ if (!maps)
++ return -ENOMEM;
+
+ its_dev->event_map.vm = info->map->vm;
+ its_dev->event_map.vlpi_maps = maps;
+ } else if (its_dev->event_map.vm != info->map->vm) {
+- ret = -EINVAL;
+- goto out;
++ return -EINVAL;
+ }
+
+ /* Get our private copy of the mapping information */
+@@ -1888,46 +1893,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+ its_dev->event_map.nr_vlpis++;
+ }
+
+-out:
+- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+- return ret;
++ return 0;
+ }
+
+ static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+ {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_vlpi_map *map;
+- int ret = 0;
+-
+- raw_spin_lock(&its_dev->event_map.vlpi_lock);
+
+ map = get_vlpi_map(d);
+
+- if (!its_dev->event_map.vm || !map) {
+- ret = -EINVAL;
+- goto out;
+- }
++ if (!its_dev->event_map.vm || !map)
++ return -EINVAL;
+
+ /* Copy our mapping information to the incoming request */
+ *info->map = *map;
+
+-out:
+- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+- return ret;
++ return 0;
+ }
+
+ static int its_vlpi_unmap(struct irq_data *d)
+ {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+- int ret = 0;
+
+- raw_spin_lock(&its_dev->event_map.vlpi_lock);
+-
+- if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+- ret = -EINVAL;
+- goto out;
+- }
++ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
++ return -EINVAL;
+
+ /* Drop the virtual mapping */
+ its_send_discard(its_dev, event);
+@@ -1951,9 +1942,7 @@ static int its_vlpi_unmap(struct irq_data *d)
+ kfree(its_dev->event_map.vlpi_maps);
+ }
+
+-out:
+- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+- return ret;
++ return 0;
+ }
+
+ static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
+@@ -1981,6 +1970,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+ if (!is_v4(its_dev->its))
+ return -EINVAL;
+
++ guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
++
+ /* Unmap request? */
+ if (!info)
+ return its_vlpi_unmap(d);
+@@ -2379,12 +2370,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
+ break;
+ }
+
++ if (!shr)
++ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
++
+ its_write_baser(its, baser, val);
+ tmp = baser->val;
+
+- if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
+- tmp &= ~GITS_BASER_SHAREABILITY_MASK;
+-
+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+ /*
+ * Shareability didn't stick. Just use
+@@ -2394,10 +2385,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
+ * non-cacheable as well.
+ */
+ shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+- if (!shr) {
++ if (!shr)
+ cache = GITS_BASER_nC;
+- gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
+- }
++
+ goto retry_baser;
+ }
+
+@@ -2609,6 +2599,11 @@ static int its_alloc_tables(struct its_node *its)
+ /* erratum 24313: ignore memory access type */
+ cache = GITS_BASER_nCnB;
+
++ if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
++ cache = GITS_BASER_nC;
++ shr = 0;
++ }
++
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ struct its_baser *baser = its->tables + i;
+ u64 val = its_read_baser(its, baser);
+@@ -2706,10 +2701,12 @@ static u64 inherit_vpe_l1_table_from_its(void)
+ break;
+ }
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
+- val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+- FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+- val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+- FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
++ if (rdists_support_shareable()) {
++ val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
++ FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
++ val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
++ FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
++ }
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
+
+ return val;
+@@ -2932,8 +2929,10 @@ static int allocate_vpe_l1_table(void)
+ WARN_ON(!IS_ALIGNED(pa, psz));
+
+ val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
+- val |= GICR_VPROPBASER_RaWb;
+- val |= GICR_VPROPBASER_InnerShareable;
++ if (rdists_support_shareable()) {
++ val |= GICR_VPROPBASER_RaWb;
++ val |= GICR_VPROPBASER_InnerShareable;
++ }
+ val |= GICR_VPROPBASER_4_1_Z;
+ val |= GICR_VPROPBASER_4_1_VALID;
+
+@@ -3122,7 +3121,7 @@ static void its_cpu_init_lpis(void)
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+ tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
+
+- if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
++ if (!rdists_support_shareable())
+ tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
+
+ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
+@@ -3149,7 +3148,7 @@ static void its_cpu_init_lpis(void)
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+ tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
+
+- if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
++ if (!rdists_support_shareable())
+ tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
+
+ if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
+@@ -3168,6 +3167,7 @@ static void its_cpu_init_lpis(void)
+ val |= GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
++out:
+ if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+
+@@ -3203,7 +3203,6 @@ static void its_cpu_init_lpis(void)
+
+ /* Make sure the GIC has seen the above */
+ dsb(sy);
+-out:
+ gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
+ pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
+ smp_processor_id(),
+@@ -3813,8 +3812,16 @@ static int its_vpe_set_affinity(struct irq_data *d,
+ bool force)
+ {
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+- int from, cpu = cpumask_first(mask_val);
++ struct cpumask common, *table_mask;
+ unsigned long flags;
++ int from, cpu;
++
++ /*
++ * Check if we're racing against a VPE being destroyed, for
++ * which we don't want to allow a VMOVP.
++ */
++ if (!atomic_read(&vpe->vmapp_count))
++ return -EINVAL;
+
+ /*
+ * Changing affinity is mega expensive, so let's be as lazy as
+@@ -3830,19 +3837,22 @@ static int its_vpe_set_affinity(struct irq_data *d,
+ * taken on any vLPI handling path that evaluates vpe->col_idx.
+ */
+ from = vpe_to_cpuid_lock(vpe, &flags);
+- if (from == cpu)
+- goto out;
+-
+- vpe->col_idx = cpu;
++ table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
+
+ /*
+- * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
+- * is sharing its VPE table with the current one.
++ * If we are offered another CPU in the same GICv4.1 ITS
++ * affinity, pick this one. Otherwise, any CPU will do.
+ */
+- if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
+- cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
++ if (table_mask && cpumask_and(&common, mask_val, table_mask))
++ cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
++ else
++ cpu = cpumask_first(mask_val);
++
++ if (from == cpu)
+ goto out;
+
++ vpe->col_idx = cpu;
++
+ its_send_vmovp(vpe);
+ its_vpe_db_proxy_move(vpe, from, cpu);
+
+@@ -3876,14 +3886,18 @@ static void its_vpe_schedule(struct its_vpe *vpe)
+ val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
+ GENMASK_ULL(51, 12);
+ val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+- val |= GICR_VPROPBASER_RaWb;
+- val |= GICR_VPROPBASER_InnerShareable;
++ if (rdists_support_shareable()) {
++ val |= GICR_VPROPBASER_RaWb;
++ val |= GICR_VPROPBASER_InnerShareable;
++ }
+ gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+ val = virt_to_phys(page_address(vpe->vpt_page)) &
+ GENMASK_ULL(51, 16);
+- val |= GICR_VPENDBASER_RaWaWb;
+- val |= GICR_VPENDBASER_InnerShareable;
++ if (rdists_support_shareable()) {
++ val |= GICR_VPENDBASER_RaWaWb;
++ val |= GICR_VPENDBASER_InnerShareable;
++ }
+ /*
+ * There is no good way of finding out if the pending table is
+ * empty as we can race against the doorbell interrupt very
+@@ -4449,9 +4463,8 @@ static int its_vpe_init(struct its_vpe *vpe)
+ raw_spin_lock_init(&vpe->vpe_lock);
+ vpe->vpe_id = vpe_id;
+ vpe->vpt_page = vpt_page;
+- if (gic_rdists->has_rvpeid)
+- atomic_set(&vpe->vmapp_count, 0);
+- else
++ atomic_set(&vpe->vmapp_count, 0);
++ if (!gic_rdists->has_rvpeid)
+ vpe->vpe_proxy_event = -1;
+
+ return 0;
+@@ -4500,8 +4513,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+ struct page *vprop_page;
+ int base, nr_ids, i, err = 0;
+
+- BUG_ON(!vm);
+-
+ bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
+ if (!bitmap)
+ return -ENOMEM;
+@@ -4540,13 +4551,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+ irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
+ }
+
+- if (err) {
+- if (i > 0)
+- its_vpe_irq_domain_free(domain, virq, i);
+-
+- its_lpi_free(bitmap, base, nr_ids);
+- its_free_prop_table(vprop_page);
+- }
++ if (err)
++ its_vpe_irq_domain_free(domain, virq, i);
+
+ return err;
+ }
+@@ -5074,6 +5080,8 @@ static int __init its_probe_one(struct its_node *its)
+ u32 ctlr;
+ int err;
+
++ its_enable_quirks(its);
++
+ if (is_v4(its)) {
+ if (!(its->typer & GITS_TYPER_VMOVP)) {
+ err = its_compute_its_list_map(its);
+@@ -5425,7 +5433,6 @@ static int __init its_of_probe(struct device_node *node)
+ if (!its)
+ return -ENOMEM;
+
+- its_enable_quirks(its);
+ err = its_probe_one(its);
+ if (err) {
+ its_node_destroy(its);
+diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
+index bd954331453992..7df53b4532b439 100644
+--- a/drivers/irqchip/irq-imx-irqsteer.c
++++ b/drivers/irqchip/irq-imx-irqsteer.c
+@@ -36,6 +36,7 @@ struct irqsteer_data {
+ int channel;
+ struct irq_domain *domain;
+ u32 *saved_reg;
++ struct device *dev;
+ };
+
+ static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
+@@ -72,10 +73,26 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+ }
+
++static void imx_irqsteer_irq_bus_lock(struct irq_data *d)
++{
++ struct irqsteer_data *data = d->chip_data;
++
++ pm_runtime_get_sync(data->dev);
++}
++
++static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d)
++{
++ struct irqsteer_data *data = d->chip_data;
++
++ pm_runtime_put_autosuspend(data->dev);
++}
++
+ static const struct irq_chip imx_irqsteer_irq_chip = {
+- .name = "irqsteer",
+- .irq_mask = imx_irqsteer_irq_mask,
+- .irq_unmask = imx_irqsteer_irq_unmask,
++ .name = "irqsteer",
++ .irq_mask = imx_irqsteer_irq_mask,
++ .irq_unmask = imx_irqsteer_irq_unmask,
++ .irq_bus_lock = imx_irqsteer_irq_bus_lock,
++ .irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock,
+ };
+
+ static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
+@@ -150,6 +167,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
+ if (!data)
+ return -ENOMEM;
+
++ data->dev = &pdev->dev;
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
+diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
+index 9d8f2c40604310..b35903a06902f7 100644
+--- a/drivers/irqchip/irq-loongarch-cpu.c
++++ b/drivers/irqchip/irq-loongarch-cpu.c
+@@ -18,11 +18,13 @@ struct fwnode_handle *cpuintc_handle;
+
+ static u32 lpic_gsi_to_irq(u32 gsi)
+ {
++ int irq = 0;
++
+ /* Only pch irqdomain transferring is required for LoongArch. */
+ if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
+- return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
++ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+
+- return 0;
++ return (irq > 0) ? irq : 0;
+ }
+
+ static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index 1623cd77917523..08e95fad5b12e3 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -15,6 +15,7 @@
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/kernel.h>
+ #include <linux/syscore_ops.h>
++#include <asm/numa.h>
+
+ #define EIOINTC_REG_NODEMAP 0x14a0
+ #define EIOINTC_REG_IPMAP 0x14c0
+@@ -241,7 +242,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ int ret;
+ unsigned int i, type;
+ unsigned long hwirq = 0;
+- struct eiointc *priv = domain->host_data;
++ struct eiointc_priv *priv = domain->host_data;
+
+ ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+ if (ret)
+@@ -349,7 +350,7 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
+ int node;
+
+ if (cpu_has_flatmode)
+- node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
++ node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
+ else
+ node = eiointc_priv[nr_pics - 1]->node;
+
+@@ -441,7 +442,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+ goto out_free_handle;
+
+ if (cpu_has_flatmode)
+- node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
++ node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
+ else
+ node = acpi_eiointc->node;
+ acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
+diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
+index e4b33aed1c97b3..7c4fe7ab4b830e 100644
+--- a/drivers/irqchip/irq-loongson-liointc.c
++++ b/drivers/irqchip/irq-loongson-liointc.c
+@@ -28,7 +28,7 @@
+
+ #define LIOINTC_INTC_CHIP_START 0x20
+
+-#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
++#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
+ #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
+ #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
+ #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
+@@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
+ goto out_free_priv;
+
+ for (i = 0; i < LIOINTC_NUM_CORES; i++)
+- priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
++ priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i);
+
+ for (i = 0; i < LIOINTC_NUM_PARENT; i++)
+ priv->handler[i].parent_int_map = parent_int_map[i];
+diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
+index 6e1e1f011bb292..dd4d699170f4ec 100644
+--- a/drivers/irqchip/irq-loongson-pch-msi.c
++++ b/drivers/irqchip/irq-loongson-pch-msi.c
+@@ -136,7 +136,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
+
+ err_hwirq:
+ pch_msi_free_hwirq(priv, hwirq, nr_irqs);
+- irq_domain_free_irqs_parent(domain, virq, i - 1);
++ irq_domain_free_irqs_parent(domain, virq, i);
+
+ return err;
+ }
+diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
+index 5101a3fb11df5b..244a8d489cac68 100644
+--- a/drivers/irqchip/irq-mbigen.c
++++ b/drivers/irqchip/irq-mbigen.c
+@@ -64,6 +64,20 @@ struct mbigen_device {
+ void __iomem *base;
+ };
+
++static inline unsigned int get_mbigen_node_offset(unsigned int nid)
++{
++ unsigned int offset = nid * MBIGEN_NODE_OFFSET;
++
++ /*
++ * To avoid touched clear register in unexpected way, we need to directly
++ * skip clear register when access to more than 10 mbigen nodes.
++ */
++ if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET))
++ offset += MBIGEN_NODE_OFFSET;
++
++ return offset;
++}
++
+ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
+ {
+ unsigned int nid, pin;
+@@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
+ nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
+ pin = hwirq % IRQS_PER_MBIGEN_NODE;
+
+- return pin * 4 + nid * MBIGEN_NODE_OFFSET
+- + REG_MBIGEN_VEC_OFFSET;
++ return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET;
+ }
+
+ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
+@@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
+ *mask = 1 << (irq_ofst % 32);
+ ofst = irq_ofst / 32 * 4;
+
+- *addr = ofst + nid * MBIGEN_NODE_OFFSET
+- + REG_MBIGEN_TYPE_OFFSET;
++ *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET;
+ }
+
+ static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
+@@ -235,22 +247,17 @@ static const struct irq_domain_ops mbigen_domain_ops = {
+ static int mbigen_of_create_domain(struct platform_device *pdev,
+ struct mbigen_device *mgn_chip)
+ {
+- struct device *parent;
+ struct platform_device *child;
+ struct irq_domain *domain;
+ struct device_node *np;
+ u32 num_pins;
+ int ret = 0;
+
+- parent = bus_get_dev_root(&platform_bus_type);
+- if (!parent)
+- return -ENODEV;
+-
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ continue;
+
+- child = of_platform_device_create(np, NULL, parent);
++ child = of_platform_device_create(np, NULL, NULL);
+ if (!child) {
+ ret = -ENOMEM;
+ break;
+@@ -273,7 +280,6 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
+ }
+ }
+
+- put_device(parent);
+ if (ret)
+ of_node_put(np);
+
+diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
+index f88df39f41291e..471e04eaf3230f 100644
+--- a/drivers/irqchip/irq-meson-gpio.c
++++ b/drivers/irqchip/irq-meson-gpio.c
+@@ -173,7 +173,7 @@ struct meson_gpio_irq_controller {
+ void __iomem *base;
+ u32 channel_irqs[MAX_NUM_CHANNEL];
+ DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ };
+
+ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+@@ -182,14 +182,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+ unsigned long flags;
+ u32 tmp;
+
+- spin_lock_irqsave(&ctl->lock, flags);
++ raw_spin_lock_irqsave(&ctl->lock, flags);
+
+ tmp = readl_relaxed(ctl->base + reg);
+ tmp &= ~mask;
+ tmp |= val;
+ writel_relaxed(tmp, ctl->base + reg);
+
+- spin_unlock_irqrestore(&ctl->lock, flags);
++ raw_spin_unlock_irqrestore(&ctl->lock, flags);
+ }
+
+ static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
+@@ -239,12 +239,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ unsigned long flags;
+ unsigned int idx;
+
+- spin_lock_irqsave(&ctl->lock, flags);
++ raw_spin_lock_irqsave(&ctl->lock, flags);
+
+ /* Find a free channel */
+ idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
+ if (idx >= ctl->params->nr_channels) {
+- spin_unlock_irqrestore(&ctl->lock, flags);
++ raw_spin_unlock_irqrestore(&ctl->lock, flags);
+ pr_err("No channel available\n");
+ return -ENOSPC;
+ }
+@@ -252,7 +252,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ /* Mark the channel as used */
+ set_bit(idx, ctl->channel_map);
+
+- spin_unlock_irqrestore(&ctl->lock, flags);
++ raw_spin_unlock_irqrestore(&ctl->lock, flags);
+
+ /*
+ * Setup the mux of the channel to route the signal of the pad
+@@ -562,7 +562,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
+ if (!ctl)
+ return -ENOMEM;
+
+- spin_lock_init(&ctl->lock);
++ raw_spin_lock_init(&ctl->lock);
+
+ ctl->base = of_iomap(node, 0);
+ if (!ctl->base) {
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index 96f4e322ed6b72..ea4b921e5e1588 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -28,8 +28,7 @@
+ #define ISCR 0x10
+ #define IITSR 0x14
+ #define TSCR 0x20
+-#define TITSR0 0x24
+-#define TITSR1 0x28
++#define TITSR(n) (0x24 + (n) * 4)
+ #define TITSR0_MAX_INT 16
+ #define TITSEL_WIDTH 0x2
+ #define TSSR(n) (0x30 + ((n) * 4))
+@@ -67,28 +66,43 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
+ return data->domain->host_data;
+ }
+
+-static void rzg2l_irq_eoi(struct irq_data *d)
++static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
+ {
+- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
++ unsigned int hw_irq = hwirq - IRQC_IRQ_START;
+ u32 bit = BIT(hw_irq);
+- u32 reg;
++ u32 iitsr, iscr;
+
+- reg = readl_relaxed(priv->base + ISCR);
+- if (reg & bit)
+- writel_relaxed(reg & ~bit, priv->base + ISCR);
++ iscr = readl_relaxed(priv->base + ISCR);
++ iitsr = readl_relaxed(priv->base + IITSR);
++
++ /*
++ * ISCR can only be cleared if the type is falling-edge, rising-edge or
++ * falling/rising-edge.
++ */
++ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
++ writel_relaxed(iscr & ~bit, priv->base + ISCR);
++ /*
++ * Enforce that the posted write is flushed to prevent that the
++ * just handled interrupt is raised again.
++ */
++ readl_relaxed(priv->base + ISCR);
++ }
+ }
+
+-static void rzg2l_tint_eoi(struct irq_data *d)
++static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
+ {
+- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
+- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+- u32 bit = BIT(hw_irq);
++ u32 bit = BIT(hwirq - IRQC_TINT_START);
+ u32 reg;
+
+ reg = readl_relaxed(priv->base + TSCR);
+- if (reg & bit)
++ if (reg & bit) {
+ writel_relaxed(reg & ~bit, priv->base + TSCR);
++ /*
++ * Enforce that the posted write is flushed to prevent that the
++ * just handled interrupt is raised again.
++ */
++ readl_relaxed(priv->base + TSCR);
++ }
+ }
+
+ static void rzg2l_irqc_eoi(struct irq_data *d)
+@@ -98,9 +112,9 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
+
+ raw_spin_lock(&priv->lock);
+ if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
+- rzg2l_irq_eoi(d);
++ rzg2l_clear_irq_int(priv, hw_irq);
+ else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
+- rzg2l_tint_eoi(d);
++ rzg2l_clear_tint_int(priv, hw_irq);
+ raw_spin_unlock(&priv->lock);
+ irq_chip_eoi_parent(d);
+ }
+@@ -118,7 +132,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
+
+ raw_spin_lock(&priv->lock);
+ reg = readl_relaxed(priv->base + TSSR(tssr_index));
+- reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
++ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+ }
+@@ -130,7 +144,6 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
+ unsigned int hw_irq = irqd_to_hwirq(d);
+
+ if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
+- unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d);
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ u32 offset = hw_irq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(offset);
+@@ -139,7 +152,7 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
+
+ raw_spin_lock(&priv->lock);
+ reg = readl_relaxed(priv->base + TSSR(tssr_index));
+- reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
++ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+ }
+@@ -148,8 +161,10 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
+
+ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
++ unsigned int hwirq = irqd_to_hwirq(d);
++ u32 iitseln = hwirq - IRQC_IRQ_START;
++ bool clear_irq_int = false;
+ u16 sense, tmp;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+@@ -159,14 +174,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+
+ case IRQ_TYPE_EDGE_FALLING:
+ sense = IITSR_IITSEL_EDGE_FALLING;
++ clear_irq_int = true;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ sense = IITSR_IITSEL_EDGE_RISING;
++ clear_irq_int = true;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ sense = IITSR_IITSEL_EDGE_BOTH;
++ clear_irq_int = true;
+ break;
+
+ default:
+@@ -175,22 +193,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
+
+ raw_spin_lock(&priv->lock);
+ tmp = readl_relaxed(priv->base + IITSR);
+- tmp &= ~IITSR_IITSEL_MASK(hw_irq);
+- tmp |= IITSR_IITSEL(hw_irq, sense);
++ tmp &= ~IITSR_IITSEL_MASK(iitseln);
++ tmp |= IITSR_IITSEL(iitseln, sense);
++ if (clear_irq_int)
++ rzg2l_clear_irq_int(priv, hwirq);
+ writel_relaxed(tmp, priv->base + IITSR);
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+ }
+
++static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
++ u32 reg, u32 tssr_offset, u8 tssr_index)
++{
++ u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
++ u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
++
++ /* Clear the relevant byte in reg */
++ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
++ /* Set TINT and leave TIEN clear */
++ reg |= tint << TSSEL_SHIFT(tssr_offset);
++ writel_relaxed(reg, priv->base + TSSR(tssr_index));
++
++ return reg | tien;
++}
++
+ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+ {
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 titseln = hwirq - IRQC_TINT_START;
+- u32 offset;
+- u8 sense;
+- u32 reg;
++ u32 tssr_offset = TSSR_OFFSET(titseln);
++ u8 tssr_index = TSSR_INDEX(titseln);
++ u8 index, sense;
++ u32 reg, tssr;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+@@ -205,17 +241,21 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
+ return -EINVAL;
+ }
+
+- offset = TITSR0;
++ index = 0;
+ if (titseln >= TITSR0_MAX_INT) {
+ titseln -= TITSR0_MAX_INT;
+- offset = TITSR1;
++ index = 1;
+ }
+
+ raw_spin_lock(&priv->lock);
+- reg = readl_relaxed(priv->base + offset);
++ tssr = readl_relaxed(priv->base + TSSR(tssr_index));
++ tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
++ reg = readl_relaxed(priv->base + TITSR(index));
+ reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
+ reg |= sense << (titseln * TITSEL_WIDTH);
+- writel_relaxed(reg, priv->base + offset);
++ writel_relaxed(reg, priv->base + TITSR(index));
++ rzg2l_clear_tint_int(priv, hwirq);
++ writel_relaxed(tssr, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+
+ return 0;
+diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
+index e8d01b14ccdde7..627beae9649a21 100644
+--- a/drivers/irqchip/irq-riscv-intc.c
++++ b/drivers/irqchip/irq-riscv-intc.c
+@@ -17,17 +17,19 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/smp.h>
++#include <linux/soc/andes/irq.h>
+
+ static struct irq_domain *intc_domain;
++static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
++static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
++static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
+
+ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+ {
+ unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
+
+- if (unlikely(cause >= BITS_PER_LONG))
+- panic("unexpected interrupt cause");
+-
+- generic_handle_domain_irq(intc_domain, cause);
++ if (generic_handle_domain_irq(intc_domain, cause))
++ pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
+ }
+
+ /*
+@@ -47,6 +49,31 @@ static void riscv_intc_irq_unmask(struct irq_data *d)
+ csr_set(CSR_IE, BIT(d->hwirq));
+ }
+
++static void andes_intc_irq_mask(struct irq_data *d)
++{
++ /*
++ * Andes specific S-mode local interrupt causes (hwirq)
++ * are defined as (256 + n) and controlled by n-th bit
++ * of SLIE.
++ */
++ unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
++
++ if (d->hwirq < ANDES_SLI_CAUSE_BASE)
++ csr_clear(CSR_IE, mask);
++ else
++ csr_clear(ANDES_CSR_SLIE, mask);
++}
++
++static void andes_intc_irq_unmask(struct irq_data *d)
++{
++ unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
++
++ if (d->hwirq < ANDES_SLI_CAUSE_BASE)
++ csr_set(CSR_IE, mask);
++ else
++ csr_set(ANDES_CSR_SLIE, mask);
++}
++
+ static void riscv_intc_irq_eoi(struct irq_data *d)
+ {
+ /*
+@@ -70,12 +97,21 @@ static struct irq_chip riscv_intc_chip = {
+ .irq_eoi = riscv_intc_irq_eoi,
+ };
+
++static struct irq_chip andes_intc_chip = {
++ .name = "RISC-V INTC",
++ .irq_mask = andes_intc_irq_mask,
++ .irq_unmask = andes_intc_irq_unmask,
++ .irq_eoi = riscv_intc_irq_eoi,
++};
++
+ static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+ {
++ struct irq_chip *chip = d->host_data;
++
+ irq_set_percpu_devid(irq);
+- irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
+- handle_percpu_devid_irq, NULL, NULL);
++ irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
++ NULL, NULL);
+
+ return 0;
+ }
+@@ -93,6 +129,14 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
+ if (ret)
+ return ret;
+
++ /*
++ * Only allow hwirq for which we have corresponding standard or
++ * custom interrupt enable register.
++ */
++ if ((hwirq >= riscv_intc_nr_irqs && hwirq < riscv_intc_custom_base) ||
++ (hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
++ return -EINVAL;
++
+ for (i = 0; i < nr_irqs; i++) {
+ ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+@@ -113,12 +157,12 @@ static struct fwnode_handle *riscv_intc_hwnode(void)
+ return intc_domain->fwnode;
+ }
+
+-static int __init riscv_intc_init_common(struct fwnode_handle *fn)
++static int __init riscv_intc_init_common(struct fwnode_handle *fn,
++ struct irq_chip *chip)
+ {
+ int rc;
+
+- intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG,
+- &riscv_intc_domain_ops, NULL);
++ intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
+ if (!intc_domain) {
+ pr_err("unable to add IRQ domain\n");
+ return -ENXIO;
+@@ -132,7 +176,11 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
+
+ riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
+
+- pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
++ pr_info("%d local interrupts mapped\n", riscv_intc_nr_irqs);
++ if (riscv_intc_custom_nr_irqs) {
++ pr_info("%d custom local interrupts mapped\n",
++ riscv_intc_custom_nr_irqs);
++ }
+
+ return 0;
+ }
+@@ -140,8 +188,9 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
+ static int __init riscv_intc_init(struct device_node *node,
+ struct device_node *parent)
+ {
+- int rc;
++ struct irq_chip *chip = &riscv_intc_chip;
+ unsigned long hartid;
++ int rc;
+
+ rc = riscv_of_parent_hartid(node, &hartid);
+ if (rc < 0) {
+@@ -166,18 +215,26 @@ static int __init riscv_intc_init(struct device_node *node,
+ return 0;
+ }
+
+- return riscv_intc_init_common(of_node_to_fwnode(node));
++ if (of_device_is_compatible(node, "andestech,cpu-intc")) {
++ riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
++ riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
++ chip = &andes_intc_chip;
++ }
++
++ return riscv_intc_init_common(of_node_to_fwnode(node), chip);
+ }
+
+ IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
++IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
+
+ #ifdef CONFIG_ACPI
+
+ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
+ const unsigned long end)
+ {
+- struct fwnode_handle *fn;
+ struct acpi_madt_rintc *rintc;
++ struct fwnode_handle *fn;
++ int rc;
+
+ rintc = (struct acpi_madt_rintc *)header;
+
+@@ -196,7 +253,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
+ return -ENOMEM;
+ }
+
+- return riscv_intc_init_common(fn);
++ rc = riscv_intc_init_common(fn, &riscv_intc_chip);
++ if (rc)
++ irq_domain_free_fwnode(fn);
++
++ return rc;
+ }
+
+ IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index e1484905b7bdbc..57289966915492 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -120,16 +120,6 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
+ }
+ }
+
+-static void plic_irq_enable(struct irq_data *d)
+-{
+- plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
+-}
+-
+-static void plic_irq_disable(struct irq_data *d)
+-{
+- plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
+-}
+-
+ static void plic_irq_unmask(struct irq_data *d)
+ {
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+@@ -144,11 +134,28 @@ static void plic_irq_mask(struct irq_data *d)
+ writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+ }
+
++static void plic_irq_enable(struct irq_data *d)
++{
++ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
++ plic_irq_unmask(d);
++}
++
++static void plic_irq_disable(struct irq_data *d)
++{
++ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
++}
++
+ static void plic_irq_eoi(struct irq_data *d)
+ {
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++ if (unlikely(irqd_irq_disabled(d))) {
++ plic_toggle(handler, d->hwirq, 1);
++ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++ plic_toggle(handler, d->hwirq, 0);
++ } else {
++ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++ }
+ }
+
+ #ifdef CONFIG_SMP
+@@ -532,17 +539,18 @@ static int __init __plic_init(struct device_node *node,
+ }
+
+ /*
+- * We can have multiple PLIC instances so setup cpuhp state only
+- * when context handler for current/boot CPU is present.
++ * We can have multiple PLIC instances so setup cpuhp state
++ * and register syscore operations only when context handler
++ * for current/boot CPU is present.
+ */
+ handler = this_cpu_ptr(&plic_handlers);
+ if (handler->present && !plic_cpuhp_setup_done) {
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ "irqchip/sifive/plic:starting",
+ plic_starting_cpu, plic_dying_cpu);
++ register_syscore_ops(&plic_irq_syscore_ops);
+ plic_cpuhp_setup_done = true;
+ }
+- register_syscore_ops(&plic_irq_syscore_ops);
+
+ pr_info("%pOFP: mapped %d interrupts with %d handlers for"
+ " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
+diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
+index 238d3d34494969..7e08714d507f47 100644
+--- a/drivers/irqchip/irq-xilinx-intc.c
++++ b/drivers/irqchip/irq-xilinx-intc.c
+@@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
+ irqc->intr_mask = 0;
+ }
+
+- if (irqc->intr_mask >> irqc->nr_irq)
++ if ((u64)irqc->intr_mask >> irqc->nr_irq)
+ pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+
+ pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
+diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
+index 2e5cb9dde3ec50..44383cec1f47ad 100644
+--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
+@@ -1900,7 +1900,7 @@ hfcmulti_dtmf(struct hfc_multi *hc)
+ static void
+ hfcmulti_tx(struct hfc_multi *hc, int ch)
+ {
+- int i, ii, temp, len = 0;
++ int i, ii, temp, tmp_len, len = 0;
+ int Zspace, z1, z2; /* must be int for calculation */
+ int Fspace, f1, f2;
+ u_char *d;
+@@ -2121,14 +2121,15 @@ hfcmulti_tx(struct hfc_multi *hc, int ch)
+ HFC_wait_nodebug(hc);
+ }
+
++ tmp_len = (*sp)->len;
+ dev_kfree_skb(*sp);
+ /* check for next frame */
+ if (bch && get_next_bframe(bch)) {
+- len = (*sp)->len;
++ len = tmp_len;
+ goto next_frame;
+ }
+ if (dch && get_next_dframe(dch)) {
+- len = (*sp)->len;
++ len = tmp_len;
+ goto next_frame;
+ }
+
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index 2776ca5fc33f39..b215b28cad7b76 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -401,23 +401,23 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ }
+
+ static int data_sock_setsockopt(struct socket *sock, int level, int optname,
+- sockptr_t optval, unsigned int len)
++ sockptr_t optval, unsigned int optlen)
+ {
+ struct sock *sk = sock->sk;
+ int err = 0, opt = 0;
+
+ if (*debug & DEBUG_SOCKET)
+ printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock,
+- level, optname, len);
++ level, optname, optlen);
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case MISDN_TIME_STAMP:
+- if (copy_from_sockptr(&opt, optval, sizeof(int))) {
+- err = -EFAULT;
++ err = copy_safe_from_sockptr(&opt, sizeof(opt),
++ optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ _pms(sk)->cmask |= MISDN_TIME_STAMP;
+diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
+index b92208eccdea9a..3132439f99e031 100644
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -110,6 +110,7 @@ config LEDS_AW200XX
+ config LEDS_AW2013
+ tristate "LED support for Awinic AW2013"
+ depends on LEDS_CLASS && I2C && OF
++ select REGMAP_I2C
+ help
+ This option enables support for the AW2013 3-channel
+ LED driver.
+diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c
+index 1af6c589834348..fdf0812774ceee 100644
+--- a/drivers/leds/flash/leds-mt6360.c
++++ b/drivers/leds/flash/leds-mt6360.c
+@@ -633,14 +633,17 @@ static int mt6360_init_isnk_properties(struct mt6360_led *led,
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg > MT6360_LED_ISNK3 ||
+- priv->leds_active & BIT(reg))
++ priv->leds_active & BIT(reg)) {
++ fwnode_handle_put(child);
+ return -EINVAL;
++ }
+
+ ret = fwnode_property_read_u32(child, "color", &color);
+ if (ret) {
+ dev_err(priv->dev,
+ "led %d, no color specified\n",
+ led->led_no);
++ fwnode_handle_put(child);
+ return ret;
+ }
+
+diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
+index a73d3ea5c97a33..17391aefeb941f 100644
+--- a/drivers/leds/flash/leds-qcom-flash.c
++++ b/drivers/leds/flash/leds-qcom-flash.c
+@@ -505,6 +505,7 @@ qcom_flash_v4l2_init(struct device *dev, struct qcom_flash_led *led, struct fwno
+ struct qcom_flash_data *flash_data = led->flash_data;
+ struct v4l2_flash_config v4l2_cfg = { 0 };
+ struct led_flash_setting *intensity = &v4l2_cfg.intensity;
++ struct v4l2_flash *v4l2_flash;
+
+ if (!(led->flash.led_cdev.flags & LED_DEV_CAP_FLASH))
+ return 0;
+@@ -523,9 +524,12 @@ qcom_flash_v4l2_init(struct device *dev, struct qcom_flash_led *led, struct fwno
+ LED_FAULT_OVER_TEMPERATURE |
+ LED_FAULT_TIMEOUT;
+
+- flash_data->v4l2_flash[flash_data->leds_count] =
+- v4l2_flash_init(dev, fwnode, &led->flash, &qcom_v4l2_flash_ops, &v4l2_cfg);
+- return PTR_ERR_OR_ZERO(flash_data->v4l2_flash);
++ v4l2_flash = v4l2_flash_init(dev, fwnode, &led->flash, &qcom_v4l2_flash_ops, &v4l2_cfg);
++ if (IS_ERR(v4l2_flash))
++ return PTR_ERR(v4l2_flash);
++
++ flash_data->v4l2_flash[flash_data->leds_count] = v4l2_flash;
++ return 0;
+ }
+ # else
+ static int
+diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
+index d3f50dca513660..991b3db63ebe7e 100644
+--- a/drivers/leds/flash/leds-sgm3140.c
++++ b/drivers/leds/flash/leds-sgm3140.c
+@@ -114,8 +114,11 @@ static int sgm3140_brightness_set(struct led_classdev *led_cdev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
++ gpiod_set_value_cansleep(priv->flash_gpio, 0);
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ } else {
++ del_timer_sync(&priv->powerdown_timer);
++ gpiod_set_value_cansleep(priv->flash_gpio, 0);
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+ ret = regulator_disable(priv->vin_regulator);
+ if (ret) {
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 974b84f6bd6af7..c66d1bead0a4a3 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -75,19 +75,6 @@ static ssize_t max_brightness_show(struct device *dev,
+ }
+ static DEVICE_ATTR_RO(max_brightness);
+
+-static ssize_t color_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- const char *color_text = "invalid";
+- struct led_classdev *led_cdev = dev_get_drvdata(dev);
+-
+- if (led_cdev->color < LED_COLOR_ID_MAX)
+- color_text = led_colors[led_cdev->color];
+-
+- return sysfs_emit(buf, "%s\n", color_text);
+-}
+-static DEVICE_ATTR_RO(color);
+-
+ #ifdef CONFIG_LEDS_TRIGGERS
+ static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
+ static struct bin_attribute *led_trigger_bin_attrs[] = {
+@@ -102,7 +89,6 @@ static const struct attribute_group led_trigger_group = {
+ static struct attribute *led_class_attrs[] = {
+ &dev_attr_brightness.attr,
+ &dev_attr_max_brightness.attr,
+- &dev_attr_color.attr,
+ NULL,
+ };
+
+@@ -272,7 +258,6 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
+
+ led_dev = class_find_device_by_of_node(&leds_class, led_node);
+ of_node_put(led_node);
+- put_device(led_dev);
+
+ return led_module_get(led_dev);
+ }
+diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
+index 6a5e1f41f9a452..72fd2fe8f6fe83 100644
+--- a/drivers/leds/led-triggers.c
++++ b/drivers/leds/led-triggers.c
+@@ -179,9 +179,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
+
+ cancel_work_sync(&led_cdev->set_brightness_work);
+ led_stop_software_blink(led_cdev);
++ device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
+ if (led_cdev->trigger->deactivate)
+ led_cdev->trigger->deactivate(led_cdev);
+- device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
+ led_cdev->trigger = NULL;
+ led_cdev->trigger_data = NULL;
+ led_cdev->activated = false;
+@@ -194,11 +194,24 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
+ spin_unlock(&trig->leddev_list_lock);
+ led_cdev->trigger = trig;
+
++ /*
++ * Some activate() calls use led_trigger_event() to initialize
++ * the brightness of the LED for which the trigger is being set.
++ * Ensure the led_cdev is visible on trig->led_cdevs for this.
++ */
++ synchronize_rcu();
++
++ /*
++ * If "set brightness to 0" is pending in workqueue,
++ * we don't want that to be reordered after ->activate()
++ */
++ flush_work(&led_cdev->set_brightness_work);
++
++ ret = 0;
+ if (trig->activate)
+ ret = trig->activate(led_cdev);
+ else
+- ret = 0;
+-
++ led_set_brightness(led_cdev, trig->brightness);
+ if (ret)
+ goto err_activate;
+
+@@ -269,19 +282,6 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
+ }
+ EXPORT_SYMBOL_GPL(led_trigger_set_default);
+
+-void led_trigger_rename_static(const char *name, struct led_trigger *trig)
+-{
+- /* new name must be on a temporary string to prevent races */
+- BUG_ON(name == trig->name);
+-
+- down_write(&triggers_list_lock);
+- /* this assumes that trig->name was originaly allocated to
+- * non constant storage */
+- strcpy((char *)trig->name, name);
+- up_write(&triggers_list_lock);
+-}
+-EXPORT_SYMBOL_GPL(led_trigger_rename_static);
+-
+ /* LED Trigger Interface */
+
+ int led_trigger_register(struct led_trigger *trig)
+@@ -386,6 +386,8 @@ void led_trigger_event(struct led_trigger *trig,
+ if (!trig)
+ return;
+
++ trig->brightness = brightness;
++
+ rcu_read_lock();
+ list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list)
+ led_set_brightness(led_cdev, brightness);
+diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
+index 0216afed3b6e72..decfca447d8a7a 100644
+--- a/drivers/leds/leds-an30259a.c
++++ b/drivers/leds/leds-an30259a.c
+@@ -283,7 +283,10 @@ static int an30259a_probe(struct i2c_client *client)
+ if (err < 0)
+ return err;
+
+- mutex_init(&chip->mutex);
++ err = devm_mutex_init(&client->dev, &chip->mutex);
++ if (err)
++ return err;
++
+ chip->client = client;
+ i2c_set_clientdata(client, chip);
+
+@@ -317,17 +320,9 @@ static int an30259a_probe(struct i2c_client *client)
+ return 0;
+
+ exit:
+- mutex_destroy(&chip->mutex);
+ return err;
+ }
+
+-static void an30259a_remove(struct i2c_client *client)
+-{
+- struct an30259a *chip = i2c_get_clientdata(client);
+-
+- mutex_destroy(&chip->mutex);
+-}
+-
+ static const struct of_device_id an30259a_match_table[] = {
+ { .compatible = "panasonic,an30259a", },
+ { /* sentinel */ },
+@@ -347,7 +342,6 @@ static struct i2c_driver an30259a_driver = {
+ .of_match_table = an30259a_match_table,
+ },
+ .probe = an30259a_probe,
+- .remove = an30259a_remove,
+ .id_table = an30259a_id,
+ };
+
+diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
+index 691a743cc9b0fd..5142efea2339d7 100644
+--- a/drivers/leds/leds-aw200xx.c
++++ b/drivers/leds/leds-aw200xx.c
+@@ -74,6 +74,10 @@
+ #define AW200XX_LED2REG(x, columns) \
+ ((x) + (((x) / (columns)) * (AW200XX_DSIZE_COLUMNS_MAX - (columns))))
+
++/* DIM current configuration register on page 1 */
++#define AW200XX_REG_DIM_PAGE1(x, columns) \
++ AW200XX_REG(AW200XX_PAGE1, AW200XX_LED2REG(x, columns))
++
+ /*
+ * DIM current configuration register (page 4).
+ * The even address for current DIM configuration.
+@@ -153,7 +157,8 @@ static ssize_t dim_store(struct device *dev, struct device_attribute *devattr,
+
+ if (dim >= 0) {
+ ret = regmap_write(chip->regmap,
+- AW200XX_REG_DIM(led->num, columns), dim);
++ AW200XX_REG_DIM_PAGE1(led->num, columns),
++ dim);
+ if (ret)
+ goto out_unlock;
+ }
+diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
+index 91f44b23cb113d..17235a5e576aef 100644
+--- a/drivers/leds/leds-aw2013.c
++++ b/drivers/leds/leds-aw2013.c
+@@ -405,6 +405,7 @@ static int aw2013_probe(struct i2c_client *client)
+ chip->regulators);
+
+ error:
++ mutex_unlock(&chip->mutex);
+ mutex_destroy(&chip->mutex);
+ return ret;
+ }
+diff --git a/drivers/leds/leds-bd2606mvv.c b/drivers/leds/leds-bd2606mvv.c
+index 3fda712d2f8095..c1181a35d0f762 100644
+--- a/drivers/leds/leds-bd2606mvv.c
++++ b/drivers/leds/leds-bd2606mvv.c
+@@ -69,16 +69,14 @@ static const struct regmap_config bd2606mvv_regmap = {
+
+ static int bd2606mvv_probe(struct i2c_client *client)
+ {
+- struct fwnode_handle *np, *child;
+ struct device *dev = &client->dev;
+ struct bd2606mvv_priv *priv;
+ struct fwnode_handle *led_fwnodes[BD2606_MAX_LEDS] = { 0 };
+ int active_pairs[BD2606_MAX_LEDS / 2] = { 0 };
+ int err, reg;
+- int i;
++ int i, j;
+
+- np = dev_fwnode(dev);
+- if (!np)
++ if (!dev_fwnode(dev))
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -94,20 +92,18 @@ static int bd2606mvv_probe(struct i2c_client *client)
+
+ i2c_set_clientdata(client, priv);
+
+- fwnode_for_each_available_child_node(np, child) {
++ device_for_each_child_node_scoped(dev, child) {
+ struct bd2606mvv_led *led;
+
+ err = fwnode_property_read_u32(child, "reg", &reg);
+- if (err) {
+- fwnode_handle_put(child);
++ if (err)
+ return err;
+- }
+- if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) {
+- fwnode_handle_put(child);
++
++ if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg])
+ return -EINVAL;
+- }
++
+ led = &priv->leds[reg];
+- led_fwnodes[reg] = child;
++ led_fwnodes[reg] = fwnode_handle_get(child);
+ active_pairs[reg / 2]++;
+ led->priv = priv;
+ led->led_no = reg;
+@@ -130,7 +126,8 @@ static int bd2606mvv_probe(struct i2c_client *client)
+ &priv->leds[i].ldev,
+ &init_data);
+ if (err < 0) {
+- fwnode_handle_put(child);
++ for (j = i; j < BD2606_MAX_LEDS; j++)
++ fwnode_handle_put(led_fwnodes[j]);
+ return dev_err_probe(dev, err,
+ "couldn't register LED %s\n",
+ priv->leds[i].ldev.name);
+diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c
+index 78215dff14997c..11c7bb69573e8c 100644
+--- a/drivers/leds/leds-pca995x.c
++++ b/drivers/leds/leds-pca995x.c
+@@ -19,10 +19,6 @@
+ #define PCA995X_MODE1 0x00
+ #define PCA995X_MODE2 0x01
+ #define PCA995X_LEDOUT0 0x02
+-#define PCA9955B_PWM0 0x08
+-#define PCA9952_PWM0 0x0A
+-#define PCA9952_IREFALL 0x43
+-#define PCA9955B_IREFALL 0x45
+
+ /* Auto-increment disabled. Normal mode */
+ #define PCA995X_MODE1_CFG 0x00
+@@ -34,17 +30,38 @@
+ #define PCA995X_LDRX_MASK 0x3
+ #define PCA995X_LDRX_BITS 2
+
+-#define PCA995X_MAX_OUTPUTS 16
++#define PCA995X_MAX_OUTPUTS 24
+ #define PCA995X_OUTPUTS_PER_REG 4
+
+ #define PCA995X_IREFALL_FULL_CFG 0xFF
+ #define PCA995X_IREFALL_HALF_CFG (PCA995X_IREFALL_FULL_CFG / 2)
+
+-#define PCA995X_TYPE_NON_B 0
+-#define PCA995X_TYPE_B 1
+-
+ #define ldev_to_led(c) container_of(c, struct pca995x_led, ldev)
+
++struct pca995x_chipdef {
++ unsigned int num_leds;
++ u8 pwm_base;
++ u8 irefall;
++};
++
++static const struct pca995x_chipdef pca9952_chipdef = {
++ .num_leds = 16,
++ .pwm_base = 0x0a,
++ .irefall = 0x43,
++};
++
++static const struct pca995x_chipdef pca9955b_chipdef = {
++ .num_leds = 16,
++ .pwm_base = 0x08,
++ .irefall = 0x45,
++};
++
++static const struct pca995x_chipdef pca9956b_chipdef = {
++ .num_leds = 24,
++ .pwm_base = 0x0a,
++ .irefall = 0x40,
++};
++
+ struct pca995x_led {
+ unsigned int led_no;
+ struct led_classdev ldev;
+@@ -54,7 +71,7 @@ struct pca995x_led {
+ struct pca995x_chip {
+ struct regmap *regmap;
+ struct pca995x_led leds[PCA995X_MAX_OUTPUTS];
+- int btype;
++ const struct pca995x_chipdef *chipdef;
+ };
+
+ static int pca995x_brightness_set(struct led_classdev *led_cdev,
+@@ -62,10 +79,11 @@ static int pca995x_brightness_set(struct led_classdev *led_cdev,
+ {
+ struct pca995x_led *led = ldev_to_led(led_cdev);
+ struct pca995x_chip *chip = led->chip;
++ const struct pca995x_chipdef *chipdef = chip->chipdef;
+ u8 ledout_addr, pwmout_addr;
+ int shift, ret;
+
+- pwmout_addr = (chip->btype ? PCA9955B_PWM0 : PCA9952_PWM0) + led->led_no;
++ pwmout_addr = chipdef->pwm_base + led->led_no;
+ ledout_addr = PCA995X_LEDOUT0 + (led->led_no / PCA995X_OUTPUTS_PER_REG);
+ shift = PCA995X_LDRX_BITS * (led->led_no % PCA995X_OUTPUTS_PER_REG);
+
+@@ -102,43 +120,38 @@ static const struct regmap_config pca995x_regmap = {
+ static int pca995x_probe(struct i2c_client *client)
+ {
+ struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 };
+- struct fwnode_handle *np, *child;
+ struct device *dev = &client->dev;
++ const struct pca995x_chipdef *chipdef;
+ struct pca995x_chip *chip;
+ struct pca995x_led *led;
+- int i, btype, reg, ret;
++ int i, j, reg, ret;
+
+- btype = (unsigned long)device_get_match_data(&client->dev);
++ chipdef = device_get_match_data(&client->dev);
+
+- np = dev_fwnode(dev);
+- if (!np)
++ if (!dev_fwnode(dev))
+ return -ENODEV;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+- chip->btype = btype;
++ chip->chipdef = chipdef;
+ chip->regmap = devm_regmap_init_i2c(client, &pca995x_regmap);
+ if (IS_ERR(chip->regmap))
+ return PTR_ERR(chip->regmap);
+
+ i2c_set_clientdata(client, chip);
+
+- fwnode_for_each_available_child_node(np, child) {
++ device_for_each_child_node_scoped(dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+- if (ret) {
+- fwnode_handle_put(child);
++ if (ret)
+ return ret;
+- }
+
+- if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) {
+- fwnode_handle_put(child);
++ if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg])
+ return -EINVAL;
+- }
+
+ led = &chip->leds[reg];
+- led_fwnodes[reg] = child;
++ led_fwnodes[reg] = fwnode_handle_get(child);
+ led->chip = chip;
+ led->led_no = reg;
+ led->ldev.brightness_set_blocking = pca995x_brightness_set;
+@@ -157,7 +170,8 @@ static int pca995x_probe(struct i2c_client *client)
+ &chip->leds[i].ldev,
+ &init_data);
+ if (ret < 0) {
+- fwnode_handle_put(child);
++ for (j = i; j < PCA995X_MAX_OUTPUTS; j++)
++ fwnode_handle_put(led_fwnodes[j]);
+ return dev_err_probe(dev, ret,
+ "Could not register LED %s\n",
+ chip->leds[i].ldev.name);
+@@ -170,21 +184,21 @@ static int pca995x_probe(struct i2c_client *client)
+ return ret;
+
+ /* IREF Output current value for all LEDn outputs */
+- return regmap_write(chip->regmap,
+- btype ? PCA9955B_IREFALL : PCA9952_IREFALL,
+- PCA995X_IREFALL_HALF_CFG);
++ return regmap_write(chip->regmap, chipdef->irefall, PCA995X_IREFALL_HALF_CFG);
+ }
+
+ static const struct i2c_device_id pca995x_id[] = {
+- { "pca9952", .driver_data = (kernel_ulong_t)PCA995X_TYPE_NON_B },
+- { "pca9955b", .driver_data = (kernel_ulong_t)PCA995X_TYPE_B },
++ { "pca9952", .driver_data = (kernel_ulong_t)&pca9952_chipdef },
++ { "pca9955b", .driver_data = (kernel_ulong_t)&pca9955b_chipdef },
++ { "pca9956b", .driver_data = (kernel_ulong_t)&pca9956b_chipdef },
+ {}
+ };
+ MODULE_DEVICE_TABLE(i2c, pca995x_id);
+
+ static const struct of_device_id pca995x_of_match[] = {
+- { .compatible = "nxp,pca9952", .data = (void *)PCA995X_TYPE_NON_B },
+- { .compatible = "nxp,pca9955b", .data = (void *)PCA995X_TYPE_B },
++ { .compatible = "nxp,pca9952", .data = &pca9952_chipdef },
++ { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef },
++ { .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, pca995x_of_match);
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index 419b710984ab62..e1b414b4035347 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -53,8 +53,14 @@ static int led_pwm_set(struct led_classdev *led_cdev,
+ duty = led_dat->pwmstate.period - duty;
+
+ led_dat->pwmstate.duty_cycle = duty;
+- led_dat->pwmstate.enabled = duty > 0;
+- return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
++ /*
++ * Disabling a PWM doesn't guarantee that it emits the inactive level.
++ * So keep it on. Only for suspending the PWM should be disabled because
++ * otherwise it refuses to suspend. The possible downside is that the
++ * LED might stay (or even go) on.
++ */
++ led_dat->pwmstate.enabled = !(led_cdev->flags & LED_SUSPENDED);
++ return pwm_apply_might_sleep(led_dat->pwm, &led_dat->pwmstate);
+ }
+
+ __attribute__((nonnull))
+diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
+index 9d91f21842f2b5..afe9bff7c7c163 100644
+--- a/drivers/leds/leds-spi-byte.c
++++ b/drivers/leds/leds-spi-byte.c
+@@ -91,7 +91,6 @@ static int spi_byte_probe(struct spi_device *spi)
+ dev_err(dev, "Device must have exactly one LED sub-node.");
+ return -EINVAL;
+ }
+- child = of_get_next_available_child(dev_of_node(dev), NULL);
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+@@ -107,11 +106,13 @@ static int spi_byte_probe(struct spi_device *spi)
+ led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
+ led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
+
++ child = of_get_next_available_child(dev_of_node(dev), NULL);
+ state = of_get_property(child, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "on")) {
+ led->ldev.brightness = led->ldev.max_brightness;
+ } else if (strcmp(state, "off")) {
++ of_node_put(child);
+ /* all other cases except "off" */
+ dev_err(dev, "default-state can only be 'on' or 'off'");
+ return -EINVAL;
+@@ -122,9 +123,12 @@ static int spi_byte_probe(struct spi_device *spi)
+
+ ret = devm_led_classdev_register(&spi->dev, &led->ldev);
+ if (ret) {
++ of_node_put(child);
+ mutex_destroy(&led->mutex);
+ return ret;
+ }
++
++ of_node_put(child);
+ spi_set_drvdata(spi, led);
+
+ return 0;
+diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
+index fcaa34706b6caa..2ef9fc7371bd1f 100644
+--- a/drivers/leds/leds-ss4200.c
++++ b/drivers/leds/leds-ss4200.c
+@@ -356,8 +356,10 @@ static int ich7_lpc_probe(struct pci_dev *dev,
+
+ nas_gpio_pci_dev = dev;
+ status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base);
+- if (status)
++ if (status) {
++ status = pcibios_err_to_errno(status);
+ goto out;
++ }
+ g_pm_io_base &= 0x00000ff80;
+
+ status = pci_read_config_dword(dev, GPIO_CTRL, &gc);
+@@ -369,8 +371,9 @@ static int ich7_lpc_probe(struct pci_dev *dev,
+ }
+
+ status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base);
+- if (0 > status) {
++ if (status) {
+ dev_info(&dev->dev, "Unable to read GPIOBASE.\n");
++ status = pcibios_err_to_errno(status);
+ goto out;
+ }
+ dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base);
+diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
+index b8a95a917cfa49..b13a547e72c49d 100644
+--- a/drivers/leds/leds-turris-omnia.c
++++ b/drivers/leds/leds-turris-omnia.c
+@@ -2,7 +2,7 @@
+ /*
+ * CZ.NIC's Turris Omnia LEDs driver
+ *
+- * 2020 by Marek Behún <kabel@kernel.org>
++ * 2020, 2023 by Marek Behún <kabel@kernel.org>
+ */
+
+ #include <linux/i2c.h>
+@@ -41,6 +41,37 @@ struct omnia_leds {
+ struct omnia_led leds[];
+ };
+
++static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val)
++{
++ u8 buf[2] = { cmd, val };
++
++ return i2c_master_send(client, buf, sizeof(buf));
++}
++
++static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd)
++{
++ struct i2c_msg msgs[2];
++ u8 reply;
++ int ret;
++
++ msgs[0].addr = client->addr;
++ msgs[0].flags = 0;
++ msgs[0].len = 1;
++ msgs[0].buf = &cmd;
++ msgs[1].addr = client->addr;
++ msgs[1].flags = I2C_M_RD;
++ msgs[1].len = 1;
++ msgs[1].buf = &reply;
++
++ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++ if (likely(ret == ARRAY_SIZE(msgs)))
++ return reply;
++ else if (ret < 0)
++ return ret;
++ else
++ return -EIO;
++}
++
+ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ enum led_brightness brightness)
+ {
+@@ -64,7 +95,7 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
+ if (buf[2] || buf[3] || buf[4])
+ state |= CMD_LED_STATE_ON;
+
+- ret = i2c_smbus_write_byte_data(leds->client, CMD_LED_STATE, state);
++ ret = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state);
+ if (ret >= 0 && (state & CMD_LED_STATE_ON))
+ ret = i2c_master_send(leds->client, buf, 5);
+
+@@ -114,9 +145,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ cdev->brightness_set_blocking = omnia_led_brightness_set_blocking;
+
+ /* put the LED into software mode */
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+- CMD_LED_MODE_LED(led->reg) |
+- CMD_LED_MODE_USER);
++ ret = omnia_cmd_write_u8(client, CMD_LED_MODE,
++ CMD_LED_MODE_LED(led->reg) |
++ CMD_LED_MODE_USER);
+ if (ret < 0) {
+ dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
+ ret);
+@@ -124,8 +155,8 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
+ }
+
+ /* disable the LED */
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE,
+- CMD_LED_STATE_LED(led->reg));
++ ret = omnia_cmd_write_u8(client, CMD_LED_STATE,
++ CMD_LED_STATE_LED(led->reg));
+ if (ret < 0) {
+ dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
+ return ret;
+@@ -158,7 +189,7 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+- ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS);
++ ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS);
+
+ if (ret < 0)
+ return ret;
+@@ -179,8 +210,7 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
+ if (brightness > 100)
+ return -EINVAL;
+
+- ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
+- (u8)brightness);
++ ret = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness);
+
+ return ret < 0 ? ret : count;
+ }
+@@ -237,8 +267,8 @@ static void omnia_leds_remove(struct i2c_client *client)
+ u8 buf[5];
+
+ /* put all LEDs into default (HW triggered) mode */
+- i2c_smbus_write_byte_data(client, CMD_LED_MODE,
+- CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
++ omnia_cmd_write_u8(client, CMD_LED_MODE,
++ CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
+
+ /* set all LEDs color to [255, 255, 255] */
+ buf[0] = CMD_LED_COLOR;
+diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
+index 46cd062b8b24c8..e1a81e0109e8a5 100644
+--- a/drivers/leds/rgb/leds-pwm-multicolor.c
++++ b/drivers/leds/rgb/leds-pwm-multicolor.c
+@@ -51,8 +51,8 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
+
+ priv->leds[i].state.duty_cycle = duty;
+ priv->leds[i].state.enabled = duty > 0;
+- ret = pwm_apply_state(priv->leds[i].pwm,
+- &priv->leds[i].state);
++ ret = pwm_apply_might_sleep(priv->leds[i].pwm,
++ &priv->leds[i].state);
+ if (ret)
+ break;
+ }
+diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
+index 8af4f9bb9cde89..05848a2fecff66 100644
+--- a/drivers/leds/trigger/ledtrig-cpu.c
++++ b/drivers/leds/trigger/ledtrig-cpu.c
+@@ -130,7 +130,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu)
+
+ static int __init ledtrig_cpu_init(void)
+ {
+- int cpu;
++ unsigned int cpu;
+ int ret;
+
+ /* Supports up to 9999 cpu cores */
+@@ -152,7 +152,7 @@ static int __init ledtrig_cpu_init(void)
+ if (cpu >= 8)
+ continue;
+
+- snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
++ snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
+
+ led_trigger_register_simple(trig->name, &trig->_trig);
+ }
+diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
+index 58f3352539e8e5..79719fc8a08fb4 100644
+--- a/drivers/leds/trigger/ledtrig-netdev.c
++++ b/drivers/leds/trigger/ledtrig-netdev.c
+@@ -221,8 +221,16 @@ static ssize_t device_name_show(struct device *dev,
+ static int set_device_name(struct led_netdev_data *trigger_data,
+ const char *name, size_t size)
+ {
++ if (size >= IFNAMSIZ)
++ return -EINVAL;
++
+ cancel_delayed_work_sync(&trigger_data->work);
+
++ /*
++ * Take RTNL lock before trigger_data lock to prevent potential
++ * deadlock with netdev notifier registration.
++ */
++ rtnl_lock();
+ mutex_lock(&trigger_data->lock);
+
+ if (trigger_data->net_dev) {
+@@ -242,16 +250,14 @@ static int set_device_name(struct led_netdev_data *trigger_data,
+ trigger_data->carrier_link_up = false;
+ trigger_data->link_speed = SPEED_UNKNOWN;
+ trigger_data->duplex = DUPLEX_UNKNOWN;
+- if (trigger_data->net_dev != NULL) {
+- rtnl_lock();
++ if (trigger_data->net_dev)
+ get_device_state(trigger_data);
+- rtnl_unlock();
+- }
+
+ trigger_data->last_activity = 0;
+
+ set_baseline_state(trigger_data);
+ mutex_unlock(&trigger_data->lock);
++ rtnl_unlock();
+
+ return 0;
+ }
+@@ -263,9 +269,6 @@ static ssize_t device_name_store(struct device *dev,
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+ int ret;
+
+- if (size >= IFNAMSIZ)
+- return -EINVAL;
+-
+ ret = set_device_name(trigger_data, buf, size);
+
+ if (ret < 0)
+@@ -459,12 +462,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
+ trigger_data->duplex = DUPLEX_UNKNOWN;
+ switch (evt) {
+ case NETDEV_CHANGENAME:
+- get_device_state(trigger_data);
+- fallthrough;
+ case NETDEV_REGISTER:
+ dev_put(trigger_data->net_dev);
+ dev_hold(dev);
+ trigger_data->net_dev = dev;
++ if (evt == NETDEV_CHANGENAME)
++ get_device_state(trigger_data);
+ break;
+ case NETDEV_UNREGISTER:
+ dev_put(trigger_data->net_dev);
+diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
+index 64abf2e91608a5..5a6b21bfeb9af4 100644
+--- a/drivers/leds/trigger/ledtrig-panic.c
++++ b/drivers/leds/trigger/ledtrig-panic.c
+@@ -64,10 +64,13 @@ static long led_panic_blink(int state)
+
+ static int __init ledtrig_panic_init(void)
+ {
++ led_trigger_register_simple("panic", &trigger);
++ if (!trigger)
++ return -ENOMEM;
++
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &led_trigger_panic_nb);
+
+- led_trigger_register_simple("panic", &trigger);
+ panic_blink = led_panic_blink;
+ return 0;
+ }
+diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
+index b4688d1d9d2b24..1d213c999d40a5 100644
+--- a/drivers/leds/trigger/ledtrig-timer.c
++++ b/drivers/leds/trigger/ledtrig-timer.c
+@@ -110,11 +110,6 @@ static int timer_trig_activate(struct led_classdev *led_cdev)
+ led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
+ }
+
+- /*
+- * If "set brightness to 0" is pending in workqueue, we don't
+- * want that to be reordered after blink_set()
+- */
+- flush_work(&led_cdev->set_brightness_work);
+ led_blink_set(led_cdev, &led_cdev->blink_delay_on,
+ &led_cdev->blink_delay_off);
+
+diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
+index 8ae0d2d284aff7..3e69a7bde92840 100644
+--- a/drivers/leds/trigger/ledtrig-tty.c
++++ b/drivers/leds/trigger/ledtrig-tty.c
+@@ -168,6 +168,10 @@ static void ledtrig_tty_deactivate(struct led_classdev *led_cdev)
+
+ cancel_delayed_work_sync(&trigger_data->dwork);
+
++ kfree(trigger_data->ttyname);
++ tty_kref_put(trigger_data->tty);
++ trigger_data->tty = NULL;
++
+ kfree(trigger_data);
+ }
+
+diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
+index 3c1b29476ce24a..5c001105cdd9e2 100644
+--- a/drivers/macintosh/therm_windtunnel.c
++++ b/drivers/macintosh/therm_windtunnel.c
+@@ -551,7 +551,7 @@ g4fan_exit( void )
+ platform_driver_unregister( &therm_of_driver );
+
+ if( x.of_dev )
+- of_device_unregister( x.of_dev );
++ of_platform_device_destroy(&x.of_dev->dev, NULL);
+ }
+
+ module_init(g4fan_init);
+diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
+index db9270da5b8e9c..b6ddf1d47cb4e2 100644
+--- a/drivers/macintosh/via-macii.c
++++ b/drivers/macintosh/via-macii.c
+@@ -140,24 +140,19 @@ static int macii_probe(void)
+ /* Initialize the driver */
+ static int macii_init(void)
+ {
+- unsigned long flags;
+ int err;
+
+- local_irq_save(flags);
+-
+ err = macii_init_via();
+ if (err)
+- goto out;
++ return err;
+
+ err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
+ macii_interrupt);
+ if (err)
+- goto out;
++ return err;
+
+ macii_state = idle;
+-out:
+- local_irq_restore(flags);
+- return err;
++ return 0;
+ }
+
+ /* initialize the hardware */
+diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
+index c6d4957c4da83e..0ec21dcdbde723 100644
+--- a/drivers/mailbox/arm_mhuv2.c
++++ b/drivers/mailbox/arm_mhuv2.c
+@@ -553,7 +553,8 @@ static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
+ priv = chan->con_priv;
+
+ if (!IS_PROTOCOL_DOORBELL(priv)) {
+- writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
++ for (i = 0; i < priv->windows; i++)
++ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + i].int_clr);
+
+ if (chan->cl) {
+ mbox_chan_txdone(chan, 0);
+diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
+index fbfd0202047c37..ea12fb8d24015c 100644
+--- a/drivers/mailbox/bcm2835-mailbox.c
++++ b/drivers/mailbox/bcm2835-mailbox.c
+@@ -145,7 +145,8 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
+ spin_lock_init(&mbox->lock);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+- bcm2835_mbox_irq, 0, dev_name(dev), mbox);
++ bcm2835_mbox_irq, IRQF_NO_SUSPEND, dev_name(dev),
++ mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ ret);
+diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
+index 8ffad059e8984e..4d966cb2ed0367 100644
+--- a/drivers/mailbox/rockchip-mailbox.c
++++ b/drivers/mailbox/rockchip-mailbox.c
+@@ -159,7 +159,7 @@ static const struct of_device_id rockchip_mbox_of_match[] = {
+ { .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
+ { },
+ };
+-MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
++MODULE_DEVICE_TABLE(of, rockchip_mbox_of_match);
+
+ static int rockchip_mbox_probe(struct platform_device *pdev)
+ {
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index 0cac5bead84fa3..d4eec090098097 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -246,6 +246,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
+ return 0;
+
+ out:
++ put_device(&dev->dev);
+
+ return ret;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 656b6b71c76823..1ae37e693de045 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -106,7 +106,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ return 0;
+
+ err:
+- put_device(&mdev->dev);
++ mcb_free_dev(mdev);
+
+ return ret;
+ }
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index 2a8b081bce7dd8..3ff87cb4dc4948 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -660,6 +660,7 @@ config DM_ZONED
+
+ config DM_AUDIT
+ bool "DM audit events"
++ depends on BLK_DEV_DM
+ depends on AUDIT
+ help
+ Generate audit events for device-mapper.
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 5a79bb3c272f1b..83eb7f27db3d41 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -265,6 +265,7 @@ struct bcache_device {
+ #define BCACHE_DEV_WB_RUNNING 3
+ #define BCACHE_DEV_RATE_DW_RUNNING 4
+ int nr_stripes;
++#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT)
+ unsigned int stripe_size;
+ atomic_t *stripe_sectors_dirty;
+ unsigned long *full_dirty_stripes;
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index 2bba4d6aaaa28c..463eb13bd0b2a7 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -54,7 +54,7 @@ void bch_dump_bucket(struct btree_keys *b)
+ int __bch_count_data(struct btree_keys *b)
+ {
+ unsigned int ret = 0;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ struct bkey *k;
+
+ if (b->ops->is_extents)
+@@ -67,7 +67,7 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+ {
+ va_list args;
+ struct bkey *k, *p = NULL;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ const char *err;
+
+ for_each_key(b, k, &iter) {
+@@ -879,7 +879,7 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
+ struct bset *i = bset_tree_last(b)->data;
+ struct bkey *m, *prev = NULL;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ struct bkey preceding_key_on_stack = ZERO_KEY;
+ struct bkey *preceding_key_p = &preceding_key_on_stack;
+
+@@ -895,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ else
+ preceding_key(k, &preceding_key_p);
+
+- m = bch_btree_iter_init(b, &iter, preceding_key_p);
++ m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
+
+- if (b->ops->insert_fixup(b, k, &iter, replace_key))
++ if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
+ return status;
+
+ status = BTREE_INSERT_STATUS_INSERT;
+@@ -1100,33 +1100,33 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ btree_iter_cmp));
+ }
+
+-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+- struct btree_iter *iter,
+- struct bkey *search,
+- struct bset_tree *start)
++static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
++ struct btree_iter_stack *iter,
++ struct bkey *search,
++ struct bset_tree *start)
+ {
+ struct bkey *ret = NULL;
+
+- iter->size = ARRAY_SIZE(iter->data);
+- iter->used = 0;
++ iter->iter.size = ARRAY_SIZE(iter->stack_data);
++ iter->iter.used = 0;
+
+ #ifdef CONFIG_BCACHE_DEBUG
+- iter->b = b;
++ iter->iter.b = b;
+ #endif
+
+ for (; start <= bset_tree_last(b); start++) {
+ ret = bch_bset_search(b, start, search);
+- bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
++ bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
+ }
+
+ return ret;
+ }
+
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+- struct btree_iter *iter,
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++ struct btree_iter_stack *iter,
+ struct bkey *search)
+ {
+- return __bch_btree_iter_init(b, iter, search, b->set);
++ return __bch_btree_iter_stack_init(b, iter, search, b->set);
+ }
+
+ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+@@ -1293,10 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ struct bset_sort_state *state)
+ {
+ size_t order = b->page_order, keys = 0;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ int oldsize = bch_count_data(b);
+
+- __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
++ __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
+
+ if (start) {
+ unsigned int i;
+@@ -1307,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ order = get_order(__set_bytes(b->set->data, keys));
+ }
+
+- __btree_sort(b, &iter, start, order, false, state);
++ __btree_sort(b, &iter.iter, start, order, false, state);
+
+ EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
+ }
+@@ -1323,11 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ struct bset_sort_state *state)
+ {
+ uint64_t start_time = local_clock();
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+
+- bch_btree_iter_init(b, &iter, NULL);
++ bch_btree_iter_stack_init(b, &iter, NULL);
+
+- btree_mergesort(b, new->set->data, &iter, false, true);
++ btree_mergesort(b, new->set->data, &iter.iter, false, true);
+
+ bch_time_stats_update(&state->time, start_time);
+
+diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
+index d795c84246b018..011f6062c4c04f 100644
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -321,7 +321,14 @@ struct btree_iter {
+ #endif
+ struct btree_iter_set {
+ struct bkey *k, *end;
+- } data[MAX_BSETS];
++ } data[];
++};
++
++/* Fixed-size btree_iter that can be allocated on the stack */
++
++struct btree_iter_stack {
++ struct btree_iter iter;
++ struct btree_iter_set stack_data[MAX_BSETS];
+ };
+
+ typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
+@@ -333,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+
+ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ struct bkey *end);
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+- struct btree_iter *iter,
+- struct bkey *search);
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++ struct btree_iter_stack *iter,
++ struct bkey *search);
+
+ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+ const struct bkey *search);
+@@ -350,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
+ return search ? __bch_bset_search(b, t, search) : t->data->start;
+ }
+
+-#define for_each_key_filter(b, k, iter, filter) \
+- for (bch_btree_iter_init((b), (iter), NULL); \
+- ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
++#define for_each_key_filter(b, k, stack_iter, filter) \
++ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
++ ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
++ filter));)
+
+-#define for_each_key(b, k, iter) \
+- for (bch_btree_iter_init((b), (iter), NULL); \
+- ((k) = bch_btree_iter_next(iter));)
++#define for_each_key(b, k, stack_iter) \
++ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
++ ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
+
+ /* Sorting */
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index fd121a61f17cca..30d6973de258b8 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -995,6 +995,9 @@ static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
+ *
+ * The btree node will have either a read or a write lock held, depending on
+ * level and op->lock.
++ *
++ * Note: Only error code or btree pointer will be returned, it is unncessary
++ * for callers to check NULL pointer.
+ */
+ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
+ struct bkey *k, int level, bool write,
+@@ -1106,6 +1109,10 @@ static void btree_node_free(struct btree *b)
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
++/*
++ * Only error code or btree pointer will be returned, it is unncessary for
++ * callers to check NULL pointer.
++ */
+ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ int level, bool wait,
+ struct btree *parent)
+@@ -1297,7 +1304,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
+ uint8_t stale = 0;
+ unsigned int keys = 0, good_keys = 0;
+ struct bkey *k;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ struct bset_tree *t;
+
+ gc->nodes++;
+@@ -1363,7 +1370,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ memset(new_nodes, 0, sizeof(new_nodes));
+ closure_init_stack(&cl);
+
+- while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
++ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
+ keys += r[nodes++].keys;
+
+ blocks = btree_default_blocks(b->c) * 2 / 3;
+@@ -1510,7 +1517,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ bch_keylist_free(&keylist);
+
+ for (i = 0; i < nodes; i++)
+- if (!IS_ERR(new_nodes[i])) {
++ if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ btree_node_free(new_nodes[i]);
+ rw_unlock(true, new_nodes[i]);
+ }
+@@ -1527,6 +1534,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ return 0;
+
+ n = btree_node_alloc_replacement(replace, NULL);
++ if (IS_ERR(n))
++ return 0;
+
+ /* recheck reserve after allocating replacement node */
+ if (btree_check_reserve(b, NULL)) {
+@@ -1556,7 +1565,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ static unsigned int btree_gc_count_keys(struct btree *b)
+ {
+ struct bkey *k;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ unsigned int ret = 0;
+
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+@@ -1597,17 +1606,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ int ret = 0;
+ bool should_rewrite;
+ struct bkey *k;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ struct gc_merge_info r[GC_MERGE_NODES];
+ struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
+
+- bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
++ bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
+
+ for (i = r; i < r + ARRAY_SIZE(r); i++)
+ i->b = ERR_PTR(-EINTR);
+
+ while (1) {
+- k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
++ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++ bch_ptr_bad);
+ if (k) {
+ r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
+ true, b);
+@@ -1897,7 +1907,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+ {
+ int ret = 0;
+ struct bkey *k, *p = NULL;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+
+ for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
+ bch_initial_mark_key(b->c, b->level, k);
+@@ -1905,10 +1915,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+ bch_initial_mark_key(b->c, b->level + 1, &b->key);
+
+ if (b->level) {
+- bch_btree_iter_init(&b->keys, &iter, NULL);
++ bch_btree_iter_stack_init(&b->keys, &iter, NULL);
+
+ do {
+- k = bch_btree_iter_next_filter(&iter, &b->keys,
++ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad);
+ if (k) {
+ btree_node_prefetch(b, k);
+@@ -1936,7 +1946,7 @@ static int bch_btree_check_thread(void *arg)
+ struct btree_check_info *info = arg;
+ struct btree_check_state *check_state = info->state;
+ struct cache_set *c = check_state->c;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ struct bkey *k, *p;
+ int cur_idx, prev_idx, skip_nr;
+
+@@ -1945,8 +1955,8 @@ static int bch_btree_check_thread(void *arg)
+ ret = 0;
+
+ /* root node keys are checked before thread created */
+- bch_btree_iter_init(&c->root->keys, &iter, NULL);
+- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ BUG_ON(!k);
+
+ p = k;
+@@ -1964,7 +1974,7 @@ static int bch_btree_check_thread(void *arg)
+ skip_nr = cur_idx - prev_idx;
+
+ while (skip_nr) {
+- k = bch_btree_iter_next_filter(&iter,
++ k = bch_btree_iter_next_filter(&iter.iter,
+ &c->root->keys,
+ bch_ptr_bad);
+ if (k)
+@@ -2037,7 +2047,7 @@ int bch_btree_check(struct cache_set *c)
+ int ret = 0;
+ int i;
+ struct bkey *k = NULL;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ struct btree_check_state check_state;
+
+ /* check and mark root node keys */
+@@ -2533,11 +2543,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+
+ if (b->level) {
+ struct bkey *k;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+
+- bch_btree_iter_init(&b->keys, &iter, from);
++ bch_btree_iter_stack_init(&b->keys, &iter, from);
+
+- while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
++ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad))) {
+ ret = bcache_btree(map_nodes_recurse, k, b,
+ op, from, fn, flags);
+@@ -2566,11 +2576,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ {
+ int ret = MAP_CONTINUE;
+ struct bkey *k;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+
+- bch_btree_iter_init(&b->keys, &iter, from);
++ bch_btree_iter_stack_init(&b->keys, &iter, from);
+
+- while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
++ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++ bch_ptr_bad))) {
+ ret = !b->level
+ ? fn(op, b, k)
+ : bcache_btree(map_keys_recurse, k,
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 0ae2b367629307..fa0c699515b7c9 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -905,6 +905,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
+
+ if (!d->stripe_size)
+ d->stripe_size = 1 << 31;
++ else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
++ d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
+
+ n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
+ if (!n || n > max_stripes) {
+@@ -1911,8 +1913,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ INIT_LIST_HEAD(&c->btree_cache_freed);
+ INIT_LIST_HEAD(&c->data_buckets);
+
+- iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
+- sizeof(struct btree_iter_set);
++ iter_size = sizeof(struct btree_iter) +
++ ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
++ sizeof(struct btree_iter_set);
+
+ c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
+ if (!c->devices)
+@@ -2015,7 +2018,7 @@ static int run_cache_set(struct cache_set *c)
+ c->root = bch_btree_node_get(c, NULL, k,
+ j->btree_level,
+ true, NULL);
+- if (IS_ERR_OR_NULL(c->root))
++ if (IS_ERR(c->root))
+ goto err;
+
+ list_del_init(&c->root->list);
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 0e2c1880f60b29..b3a34f3ac081c9 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -660,7 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c)
+ unsigned int bytes = 0;
+ struct bkey *k;
+ struct btree *b;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+
+ goto lock_root;
+
+@@ -1103,7 +1103,7 @@ SHOW(__bch_cache)
+ sum += INITIAL_PRIO - cached[i];
+
+ if (n)
+- do_div(sum, n);
++ sum = div64_u64(sum, n);
+
+ for (i = 0; i < ARRAY_SIZE(q); i++)
+ q[i] = INITIAL_PRIO - cached[n * (i + 1) /
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 24c049067f61ae..39b498020d935b 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -908,15 +908,15 @@ static int bch_dirty_init_thread(void *arg)
+ struct dirty_init_thrd_info *info = arg;
+ struct bch_dirty_init_state *state = info->state;
+ struct cache_set *c = state->c;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ struct bkey *k, *p;
+ int cur_idx, prev_idx, skip_nr;
+
+ k = p = NULL;
+- cur_idx = prev_idx = 0;
++ prev_idx = 0;
+
+- bch_btree_iter_init(&c->root->keys, &iter, NULL);
+- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ BUG_ON(!k);
+
+ p = k;
+@@ -930,7 +930,7 @@ static int bch_dirty_init_thread(void *arg)
+ skip_nr = cur_idx - prev_idx;
+
+ while (skip_nr) {
+- k = bch_btree_iter_next_filter(&iter,
++ k = bch_btree_iter_next_filter(&iter.iter,
+ &c->root->keys,
+ bch_ptr_bad);
+ if (k)
+@@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
+ void bch_sectors_dirty_init(struct bcache_device *d)
+ {
+ int i;
++ struct btree *b = NULL;
+ struct bkey *k = NULL;
+- struct btree_iter iter;
++ struct btree_iter_stack iter;
+ struct sectors_dirty_init op;
+ struct cache_set *c = d->c;
+ struct bch_dirty_init_state state;
+
++retry_lock:
++ b = c->root;
++ rw_lock(0, b, b->level);
++ if (b != c->root) {
++ rw_unlock(0, b);
++ goto retry_lock;
++ }
++
+ /* Just count root keys if no leaf node */
+- rw_lock(0, c->root, c->root->level);
+ if (c->root->level == 0) {
+ bch_btree_op_init(&op.op, -1);
+ op.inode = d->id;
+ op.count = 0;
+
+ for_each_key_filter(&c->root->keys,
+- k, &iter, bch_ptr_invalid)
++ k, &iter, bch_ptr_invalid) {
++ if (KEY_INODE(k) != op.inode)
++ continue;
+ sectors_dirty_init_fn(&op.op, c->root, k);
++ }
+
+- rw_unlock(0, c->root);
++ rw_unlock(0, b);
+ return;
+ }
+
+@@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
+ if (atomic_read(&state.enough))
+ break;
+
++ atomic_inc(&state.started);
+ state.infos[i].state = &state;
+ state.infos[i].thread =
+ kthread_run(bch_dirty_init_thread, &state.infos[i],
+ "bch_dirtcnt[%d]", i);
+ if (IS_ERR(state.infos[i].thread)) {
+ pr_err("fails to run thread bch_dirty_init[%d]\n", i);
++ atomic_dec(&state.started);
+ for (--i; i >= 0; i--)
+ kthread_stop(state.infos[i].thread);
+ goto out;
+ }
+- atomic_inc(&state.started);
+ }
+
+ out:
+ /* Must wait for all threads to stop. */
+ wait_event(state.wait, atomic_read(&state.started) == 0);
+- rw_unlock(0, c->root);
++ rw_unlock(0, b);
+ }
+
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index bc309e41d074ae..f1345781c861a8 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -254,7 +254,7 @@ enum evict_result {
+
+ typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
+
+-static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context)
++static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
+ {
+ unsigned long tested = 0;
+ struct list_head *h = lru->cursor;
+@@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
+
+ h = h->next;
+
+- cond_resched();
++ if (!no_sleep)
++ cond_resched();
+ }
+
+ return NULL;
+@@ -382,7 +383,10 @@ struct dm_buffer {
+ */
+
+ struct buffer_tree {
+- struct rw_semaphore lock;
++ union {
++ struct rw_semaphore lock;
++ rwlock_t spinlock;
++ } u;
+ struct rb_root root;
+ } ____cacheline_aligned_in_smp;
+
+@@ -393,9 +397,12 @@ struct dm_buffer_cache {
+ * on the locks.
+ */
+ unsigned int num_locks;
++ bool no_sleep;
+ struct buffer_tree trees[];
+ };
+
++static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
++
+ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
+ {
+ return dm_hash_locks_index(block, num_locks);
+@@ -403,22 +410,34 @@ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
+
+ static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- down_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- up_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- down_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- up_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ /*
+@@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
+
+ static void __lh_lock(struct lock_history *lh, unsigned int index)
+ {
+- if (lh->write)
+- down_write(&lh->cache->trees[index].lock);
+- else
+- down_read(&lh->cache->trees[index].lock);
++ if (lh->write) {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ write_lock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ down_write(&lh->cache->trees[index].u.lock);
++ } else {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ read_lock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ down_read(&lh->cache->trees[index].u.lock);
++ }
+ }
+
+ static void __lh_unlock(struct lock_history *lh, unsigned int index)
+ {
+- if (lh->write)
+- up_write(&lh->cache->trees[index].lock);
+- else
+- up_read(&lh->cache->trees[index].lock);
++ if (lh->write) {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ write_unlock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ up_write(&lh->cache->trees[index].u.lock);
++ } else {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ read_unlock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ up_read(&lh->cache->trees[index].u.lock);
++ }
+ }
+
+ /*
+@@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
+ return le_to_buffer(le);
+ }
+
+-static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks)
++static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
+ {
+ unsigned int i;
+
+ bc->num_locks = num_locks;
++ bc->no_sleep = no_sleep;
+
+ for (i = 0; i < bc->num_locks; i++) {
+- init_rwsem(&bc->trees[i].lock);
++ if (no_sleep)
++ rwlock_init(&bc->trees[i].u.spinlock);
++ else
++ init_rwsem(&bc->trees[i].u.lock);
+ bc->trees[i].root = RB_ROOT;
+ }
+
+@@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
+ struct lru_entry *le;
+ struct dm_buffer *b;
+
+- le = lru_evict(&bc->lru[list_mode], __evict_pred, &w);
++ le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
+ if (!le)
+ return NULL;
+
+@@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
+ struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
+
+ while (true) {
+- le = lru_evict(&bc->lru[old_mode], __evict_pred, &w);
++ le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
+ if (!le)
+ break;
+
+@@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
+ {
+ unsigned int i;
+
++ BUG_ON(bc->no_sleep);
+ for (i = 0; i < bc->num_locks; i++) {
+- down_write(&bc->trees[i].lock);
++ down_write(&bc->trees[i].u.lock);
+ __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
+- up_write(&bc->trees[i].lock);
++ up_write(&bc->trees[i].u.lock);
+ }
+ }
+
+@@ -979,8 +1017,6 @@ struct dm_bufio_client {
+ struct dm_buffer_cache cache; /* must be last member */
+ };
+
+-static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+-
+ /*----------------------------------------------------------------*/
+
+ #define dm_bufio_in_request() (!!current->bio_list)
+@@ -1279,7 +1315,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
+ io_req.mem.ptr.vma = (char *)b->data + offset;
+ }
+
+- r = dm_io(&io_req, 1, &region, NULL);
++ r = dm_io(&io_req, 1, &region, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r))
+ b->end_io(b, errno_to_blk_status(r));
+ }
+@@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
+ if (need_submit)
+ submit_io(b, REQ_OP_READ, read_endio);
+
+- wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
++ if (nf != NF_GET) /* we already tested this condition above */
++ wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
+
+ if (b->read_error) {
+ int error = blk_status_to_errno(b->read_error);
+@@ -2130,7 +2167,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
+ if (WARN_ON_ONCE(dm_bufio_in_request()))
+ return -EINVAL;
+
+- return dm_io(&io_req, 1, &io_reg, NULL);
++ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
+
+@@ -2154,7 +2191,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
+ if (WARN_ON_ONCE(dm_bufio_in_request()))
+ return -EINVAL; /* discards are optional */
+
+- return dm_io(&io_req, 1, &io_reg, NULL);
++ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
+
+@@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ r = -ENOMEM;
+ goto bad_client;
+ }
+- cache_init(&c->cache, num_locks);
++ cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
+
+ c->bdev = bdev;
+ c->block_size = block_size;
+diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
+index c43d55672bce03..47c1fa7aad8b5b 100644
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -465,11 +465,6 @@ static void __destroy_persistent_data_structures(struct dm_clone_metadata *cmd)
+
+ /*---------------------------------------------------------------------------*/
+
+-static size_t bitmap_size(unsigned long nr_bits)
+-{
+- return BITS_TO_LONGS(nr_bits) * sizeof(long);
+-}
+-
+ static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
+ unsigned long nr_regions)
+ {
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 095b9b49aa8250..e6757a30dccad1 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -22,6 +22,8 @@
+ #include "dm-ima.h"
+
+ #define DM_RESERVED_MAX_IOS 1024
++#define DM_MAX_TARGETS 1048576
++#define DM_MAX_TARGET_PARAMS 1024
+
+ struct dm_io;
+
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 5315fd261c23b7..aa6bb5b4704ba6 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -53,15 +53,17 @@
+ struct convert_context {
+ struct completion restart;
+ struct bio *bio_in;
+- struct bio *bio_out;
+ struct bvec_iter iter_in;
++ struct bio *bio_out;
+ struct bvec_iter iter_out;
+- u64 cc_sector;
+ atomic_t cc_pending;
++ u64 cc_sector;
+ union {
+ struct skcipher_request *req;
+ struct aead_request *req_aead;
+ } r;
++ bool aead_recheck;
++ bool aead_failed;
+
+ };
+
+@@ -73,10 +75,8 @@ struct dm_crypt_io {
+ struct bio *base_bio;
+ u8 *integrity_metadata;
+ bool integrity_metadata_from_pool:1;
+- bool in_tasklet:1;
+
+ struct work_struct work;
+- struct tasklet_struct tasklet;
+
+ struct convert_context ctx;
+
+@@ -84,6 +84,8 @@ struct dm_crypt_io {
+ blk_status_t error;
+ sector_t sector;
+
++ struct bvec_iter saved_bi_iter;
++
+ struct rb_node rb_node;
+ } CRYPTO_MINALIGN_ATTR;
+
+@@ -1378,10 +1380,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
+ if (r == -EBADMSG) {
+ sector_t s = le64_to_cpu(*sector);
+
+- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+- ctx->bio_in->bi_bdev, s);
+- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+- ctx->bio_in, s, 0);
++ ctx->aead_failed = true;
++ if (ctx->aead_recheck) {
++ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
++ ctx->bio_in->bi_bdev, s);
++ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
++ ctx->bio_in, s, 0);
++ }
+ }
+
+ if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
+@@ -1679,7 +1684,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
+ unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
+ unsigned int remaining_size;
+- unsigned int order = MAX_ORDER - 1;
++ unsigned int order = MAX_ORDER;
+
+ retry:
+ if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
+@@ -1699,11 +1704,17 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
+ order = min(order, remaining_order);
+
+ while (order > 0) {
++ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) +
++ (1 << order) > dm_crypt_pages_per_client))
++ goto decrease_order;
+ pages = alloc_pages(gfp_mask
+ | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
+ order);
+- if (likely(pages != NULL))
++ if (likely(pages != NULL)) {
++ percpu_counter_add(&cc->n_allocated_pages, 1 << order);
+ goto have_pages;
++ }
++decrease_order:
+ order--;
+ }
+
+@@ -1741,10 +1752,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
+
+ if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
+ bio_for_each_folio_all(fi, clone) {
+- if (folio_test_large(fi.folio))
++ if (folio_test_large(fi.folio)) {
++ percpu_counter_sub(&cc->n_allocated_pages,
++ 1 << folio_order(fi.folio));
+ folio_put(fi.folio);
+- else
++ } else {
+ mempool_free(&fi.folio->page, &cc->page_pool);
++ }
+ }
+ }
+ }
+@@ -1756,10 +1770,11 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
+ io->base_bio = bio;
+ io->sector = sector;
+ io->error = 0;
++ io->ctx.aead_recheck = false;
++ io->ctx.aead_failed = false;
+ io->ctx.r.req = NULL;
+ io->integrity_metadata = NULL;
+ io->integrity_metadata_from_pool = false;
+- io->in_tasklet = false;
+ atomic_set(&io->io_pending, 0);
+ }
+
+@@ -1768,12 +1783,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
+ atomic_inc(&io->io_pending);
+ }
+
+-static void kcryptd_io_bio_endio(struct work_struct *work)
+-{
+- struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+-
+- bio_endio(io->base_bio);
+-}
++static void kcryptd_queue_read(struct dm_crypt_io *io);
+
+ /*
+ * One of the bios was finished. Check for completion of
+@@ -1788,6 +1798,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
+ if (!atomic_dec_and_test(&io->io_pending))
+ return;
+
++ if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
++ cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
++ io->ctx.aead_recheck = true;
++ io->ctx.aead_failed = false;
++ io->error = 0;
++ kcryptd_queue_read(io);
++ return;
++ }
++
+ if (io->ctx.r.req)
+ crypt_free_req(cc, io->ctx.r.req, base_bio);
+
+@@ -1798,20 +1817,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
+
+ base_bio->bi_status = error;
+
+- /*
+- * If we are running this function from our tasklet,
+- * we can't call bio_endio() here, because it will call
+- * clone_endio() from dm.c, which in turn will
+- * free the current struct dm_crypt_io structure with
+- * our tasklet. In this case we need to delay bio_endio()
+- * execution to after the tasklet is done and dequeued.
+- */
+- if (io->in_tasklet) {
+- INIT_WORK(&io->work, kcryptd_io_bio_endio);
+- queue_work(cc->io_queue, &io->work);
+- return;
+- }
+-
+ bio_endio(base_bio);
+ }
+
+@@ -1837,15 +1842,19 @@ static void crypt_endio(struct bio *clone)
+ struct dm_crypt_io *io = clone->bi_private;
+ struct crypt_config *cc = io->cc;
+ unsigned int rw = bio_data_dir(clone);
+- blk_status_t error;
++ blk_status_t error = clone->bi_status;
++
++ if (io->ctx.aead_recheck && !error) {
++ kcryptd_queue_crypt(io);
++ return;
++ }
+
+ /*
+ * free the processed pages
+ */
+- if (rw == WRITE)
++ if (rw == WRITE || io->ctx.aead_recheck)
+ crypt_free_buffer_pages(cc, clone);
+
+- error = clone->bi_status;
+ bio_put(clone);
+
+ if (rw == READ && !error) {
+@@ -1866,6 +1875,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
+ struct crypt_config *cc = io->cc;
+ struct bio *clone;
+
++ if (io->ctx.aead_recheck) {
++ if (!(gfp & __GFP_DIRECT_RECLAIM))
++ return 1;
++ crypt_inc_pending(io);
++ clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
++ if (unlikely(!clone)) {
++ crypt_dec_pending(io);
++ return 1;
++ }
++ clone->bi_iter.bi_sector = cc->start + io->sector;
++ crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
++ io->saved_bi_iter = clone->bi_iter;
++ dm_submit_bio_remap(io->base_bio, clone);
++ return 0;
++ }
++
+ /*
+ * We need the original biovec array in order to decrypt the whole bio
+ * data *afterwards* -- thanks to immutable biovecs we don't need to
+@@ -2092,6 +2117,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ io->ctx.bio_out = clone;
+ io->ctx.iter_out = clone->bi_iter;
+
++ if (crypt_integrity_aead(cc)) {
++ bio_copy_data(clone, io->base_bio);
++ io->ctx.bio_in = clone;
++ io->ctx.iter_in = clone->bi_iter;
++ }
++
+ sector += bio_sectors(clone);
+
+ crypt_inc_pending(io);
+@@ -2128,6 +2159,14 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+
+ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
+ {
++ if (io->ctx.aead_recheck) {
++ if (!io->error) {
++ io->ctx.bio_in->bi_iter = io->saved_bi_iter;
++ bio_copy_data(io->base_bio, io->ctx.bio_in);
++ }
++ crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
++ bio_put(io->ctx.bio_in);
++ }
+ crypt_dec_pending(io);
+ }
+
+@@ -2157,11 +2196,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
+
+ crypt_inc_pending(io);
+
+- crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+- io->sector);
++ if (io->ctx.aead_recheck) {
++ io->ctx.cc_sector = io->sector + cc->iv_offset;
++ r = crypt_convert(cc, &io->ctx,
++ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++ } else {
++ crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
++ io->sector);
+
+- r = crypt_convert(cc, &io->ctx,
+- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++ r = crypt_convert(cc, &io->ctx,
++ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++ }
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+@@ -2203,10 +2248,13 @@ static void kcryptd_async_done(void *data, int error)
+ if (error == -EBADMSG) {
+ sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
+
+- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+- ctx->bio_in->bi_bdev, s);
+- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+- ctx->bio_in, s, 0);
++ ctx->aead_failed = true;
++ if (ctx->aead_recheck) {
++ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
++ ctx->bio_in->bi_bdev, s);
++ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
++ ctx->bio_in, s, 0);
++ }
+ io->error = BLK_STS_PROTECTION;
+ } else if (error < 0)
+ io->error = BLK_STS_IOERR;
+@@ -2243,11 +2291,6 @@ static void kcryptd_crypt(struct work_struct *work)
+ kcryptd_crypt_write_convert(io);
+ }
+
+-static void kcryptd_crypt_tasklet(unsigned long work)
+-{
+- kcryptd_crypt((struct work_struct *)work);
+-}
+-
+ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
+ {
+ struct crypt_config *cc = io->cc;
+@@ -2259,15 +2302,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
+ * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
+ * it is being executed with irqs disabled.
+ */
+- if (in_hardirq() || irqs_disabled()) {
+- io->in_tasklet = true;
+- tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
+- tasklet_schedule(&io->tasklet);
++ if (!(in_hardirq() || irqs_disabled())) {
++ kcryptd_crypt(&io->work);
+ return;
+ }
+-
+- kcryptd_crypt(&io->work);
+- return;
+ }
+
+ INIT_WORK(&io->work, kcryptd_crypt);
+@@ -3142,7 +3180,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
+ sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
+ if (!strcasecmp(sval, "aead")) {
+ set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
+- } else if (strcasecmp(sval, "none")) {
++ } else if (strcasecmp(sval, "none")) {
+ ti->error = "Unknown integrity profile";
+ return -EINVAL;
+ }
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 7433525e598560..3726fae3006e3d 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -31,7 +31,7 @@ struct delay_c {
+ struct workqueue_struct *kdelayd_wq;
+ struct work_struct flush_expired_bios;
+ struct list_head delayed_bios;
+- atomic_t may_delay;
++ bool may_delay;
+
+ struct delay_class read;
+ struct delay_class write;
+@@ -192,7 +192,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+ INIT_LIST_HEAD(&dc->delayed_bios);
+ mutex_init(&dc->timer_lock);
+- atomic_set(&dc->may_delay, 1);
++ dc->may_delay = true;
+ dc->argc = argc;
+
+ ret = delay_class_ctr(ti, &dc->read, argv);
+@@ -247,7 +247,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ struct dm_delay_info *delayed;
+ unsigned long expires = 0;
+
+- if (!c->delay || !atomic_read(&dc->may_delay))
++ if (!c->delay)
+ return DM_MAPIO_REMAPPED;
+
+ delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
+@@ -256,6 +256,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
+ delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
+
+ mutex_lock(&delayed_bios_lock);
++ if (unlikely(!dc->may_delay)) {
++ mutex_unlock(&delayed_bios_lock);
++ return DM_MAPIO_REMAPPED;
++ }
+ c->ops++;
+ list_add_tail(&delayed->list, &dc->delayed_bios);
+ mutex_unlock(&delayed_bios_lock);
+@@ -269,7 +273,10 @@ static void delay_presuspend(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- atomic_set(&dc->may_delay, 0);
++ mutex_lock(&delayed_bios_lock);
++ dc->may_delay = false;
++ mutex_unlock(&delayed_bios_lock);
++
+ del_timer_sync(&dc->delay_timer);
+ flush_bios(flush_delayed_bios(dc, 1));
+ }
+@@ -278,7 +285,7 @@ static void delay_resume(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- atomic_set(&dc->may_delay, 1);
++ dc->may_delay = true;
+ }
+
+ static int delay_map(struct dm_target *ti, struct bio *bio)
+diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
+index 2a71bcdba92d14..b37bbe76250034 100644
+--- a/drivers/md/dm-init.c
++++ b/drivers/md/dm-init.c
+@@ -212,8 +212,10 @@ static char __init *dm_parse_device_entry(struct dm_device *dev, char *str)
+ strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid));
+ /* minor */
+ if (strlen(field[2])) {
+- if (kstrtoull(field[2], 0, &dev->dmi.dev))
++ if (kstrtoull(field[2], 0, &dev->dmi.dev) ||
++ dev->dmi.dev >= (1 << MINORBITS))
+ return ERR_PTR(-EINVAL);
++ dev->dmi.dev = huge_encode_dev((dev_t)dev->dmi.dev);
+ dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG;
+ }
+ /* flags */
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 97a8d5fc9ebb65..a36dd749c688e1 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -278,6 +278,8 @@ struct dm_integrity_c {
+
+ atomic64_t number_of_mismatches;
+
++ mempool_t recheck_pool;
++
+ struct notifier_block reboot_notifier;
+ };
+
+@@ -563,7 +565,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
+ }
+ }
+
+- r = dm_io(&io_req, 1, &io_loc, NULL);
++ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r))
+ return r;
+
+@@ -1081,7 +1083,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
+ io_loc.sector = ic->start + SB_SECTORS + sector;
+ io_loc.count = n_sectors;
+
+- r = dm_io(&io_req, 1, &io_loc, NULL);
++ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
+ "reading journal" : "writing journal", r);
+@@ -1198,7 +1200,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u
+ io_loc.sector = target;
+ io_loc.count = n_sectors;
+
+- r = dm_io(&io_req, 1, &io_loc, NULL);
++ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r)) {
+ WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
+ fn(-1UL, data);
+@@ -1527,7 +1529,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
+ fr.io_reg.count = 0,
+ fr.ic = ic;
+ init_completion(&fr.comp);
+- r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
++ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
+ BUG_ON(r);
+ }
+
+@@ -1699,6 +1701,85 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
+ get_random_bytes(result, ic->tag_size);
+ }
+
++static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
++{
++ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
++ struct dm_integrity_c *ic = dio->ic;
++ struct bvec_iter iter;
++ struct bio_vec bv;
++ sector_t sector, logical_sector, area, offset;
++ struct page *page;
++
++ get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
++ dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
++ &dio->metadata_offset);
++ sector = get_data_sector(ic, area, offset);
++ logical_sector = dio->range.logical_sector;
++
++ page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
++
++ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
++ unsigned pos = 0;
++
++ do {
++ sector_t alignment;
++ char *mem;
++ char *buffer = page_to_virt(page);
++ int r;
++ struct dm_io_request io_req;
++ struct dm_io_region io_loc;
++ io_req.bi_opf = REQ_OP_READ;
++ io_req.mem.type = DM_IO_KMEM;
++ io_req.mem.ptr.addr = buffer;
++ io_req.notify.fn = NULL;
++ io_req.client = ic->io;
++ io_loc.bdev = ic->dev->bdev;
++ io_loc.sector = sector;
++ io_loc.count = ic->sectors_per_block;
++
++ /* Align the bio to logical block size */
++ alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
++ alignment &= -alignment;
++ io_loc.sector = round_down(io_loc.sector, alignment);
++ io_loc.count += sector - io_loc.sector;
++ buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
++ io_loc.count = round_up(io_loc.count, alignment);
++
++ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
++ if (unlikely(r)) {
++ dio->bi_status = errno_to_blk_status(r);
++ goto free_ret;
++ }
++
++ integrity_sector_checksum(ic, logical_sector, buffer, checksum);
++ r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
++ &dio->metadata_offset, ic->tag_size, TAG_CMP);
++ if (r) {
++ if (r > 0) {
++ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
++ bio->bi_bdev, logical_sector);
++ atomic64_inc(&ic->number_of_mismatches);
++ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
++ bio, logical_sector, 0);
++ r = -EILSEQ;
++ }
++ dio->bi_status = errno_to_blk_status(r);
++ goto free_ret;
++ }
++
++ mem = bvec_kmap_local(&bv);
++ memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
++ kunmap_local(mem);
++
++ pos += ic->sectors_per_block << SECTOR_SHIFT;
++ sector += ic->sectors_per_block;
++ logical_sector += ic->sectors_per_block;
++ } while (pos < bv.bv_len);
++ }
++free_ret:
++ mempool_free(page, &ic->recheck_pool);
++}
++
+ static void integrity_metadata(struct work_struct *w)
+ {
+ struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+@@ -1765,11 +1846,12 @@ static void integrity_metadata(struct work_struct *w)
+ sectors_to_process = dio->range.n_sectors;
+
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
++ struct bio_vec bv_copy = bv;
+ unsigned int pos;
+ char *mem, *checksums_ptr;
+
+ again:
+- mem = bvec_kmap_local(&bv);
++ mem = bvec_kmap_local(&bv_copy);
+ pos = 0;
+ checksums_ptr = checksums;
+ do {
+@@ -1778,34 +1860,27 @@ static void integrity_metadata(struct work_struct *w)
+ sectors_to_process -= ic->sectors_per_block;
+ pos += ic->sectors_per_block << SECTOR_SHIFT;
+ sector += ic->sectors_per_block;
+- } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
++ } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
+ kunmap_local(mem);
+
+ r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+ checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
+ if (unlikely(r)) {
+- if (r > 0) {
+- sector_t s;
+-
+- s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
+- DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+- bio->bi_bdev, s);
+- r = -EILSEQ;
+- atomic64_inc(&ic->number_of_mismatches);
+- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+- bio, s, 0);
+- }
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
++ if (r > 0) {
++ integrity_recheck(dio, checksums_onstack);
++ goto skip_io;
++ }
+ goto error;
+ }
+
+ if (!sectors_to_process)
+ break;
+
+- if (unlikely(pos < bv.bv_len)) {
+- bv.bv_offset += pos;
+- bv.bv_len -= pos;
++ if (unlikely(pos < bv_copy.bv_len)) {
++ bv_copy.bv_offset += pos;
++ bv_copy.bv_len -= pos;
+ goto again;
+ }
+ }
+@@ -2108,6 +2183,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ unsigned int journal_section, journal_entry;
+ unsigned int journal_read_pos;
++ sector_t recalc_sector;
+ struct completion read_comp;
+ bool discard_retried = false;
+ bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+@@ -2248,6 +2324,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ goto lock_retry;
+ }
+ }
++ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ if (unlikely(journal_read_pos != NOT_FOUND)) {
+@@ -2302,7 +2379,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ if (need_sync_io) {
+ wait_for_completion_io(&read_comp);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+- dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
++ dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
+ goto skip_check;
+ if (ic->mode == 'B') {
+ if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
+@@ -2749,7 +2826,7 @@ static void integrity_recalc(struct work_struct *w)
+ io_loc.sector = get_data_sector(ic, area, offset);
+ io_loc.count = n_sectors;
+
+- r = dm_io(&io_req, 1, &io_loc, NULL);
++ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+@@ -4156,7 +4233,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
+ } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
+ log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
+ } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
+- if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
++ if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ r = -EINVAL;
+ ti->error = "Invalid bitmap_flush_interval argument";
+ goto bad;
+@@ -4270,6 +4347,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
+ goto bad;
+ }
+
++ r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
++ if (r) {
++ ti->error = "Cannot allocate mempool";
++ goto bad;
++ }
++
+ ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
+ WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
+ if (!ic->metadata_wq) {
+@@ -4618,6 +4701,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ kvfree(ic->bbs);
+ if (ic->bufio)
+ dm_bufio_client_destroy(ic->bufio);
++ mempool_exit(&ic->recheck_pool);
+ mempool_exit(&ic->journal_io_mempool);
+ if (ic->io)
+ dm_io_client_destroy(ic->io);
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index f053ce2458147c..7409490259d1d4 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -305,7 +305,7 @@ static void km_dp_init(struct dpages *dp, void *data)
+ */
+ static void do_region(const blk_opf_t opf, unsigned int region,
+ struct dm_io_region *where, struct dpages *dp,
+- struct io *io)
++ struct io *io, unsigned short ioprio)
+ {
+ struct bio *bio;
+ struct page *page;
+@@ -354,6 +354,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
+ &io->client->bios);
+ bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
+ bio->bi_end_io = endio;
++ bio->bi_ioprio = ioprio;
+ store_io_and_region_in_bio(bio, io, region);
+
+ if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
+@@ -383,7 +384,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
+
+ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
+ struct dm_io_region *where, struct dpages *dp,
+- struct io *io, int sync)
++ struct io *io, int sync, unsigned short ioprio)
+ {
+ int i;
+ struct dpages old_pages = *dp;
+@@ -400,7 +401,7 @@ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
+ for (i = 0; i < num_regions; i++) {
+ *dp = old_pages;
+ if (where[i].count || (opf & REQ_PREFLUSH))
+- do_region(opf, i, where + i, dp, io);
++ do_region(opf, i, where + i, dp, io, ioprio);
+ }
+
+ /*
+@@ -425,7 +426,7 @@ static void sync_io_complete(unsigned long error, void *context)
+
+ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
+- unsigned long *error_bits)
++ unsigned long *error_bits, unsigned short ioprio)
+ {
+ struct io *io;
+ struct sync_io sio;
+@@ -447,7 +448,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ io->vma_invalidate_address = dp->vma_invalidate_address;
+ io->vma_invalidate_size = dp->vma_invalidate_size;
+
+- dispatch_io(opf, num_regions, where, dp, io, 1);
++ dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
+
+ wait_for_completion_io(&sio.wait);
+
+@@ -459,7 +460,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+
+ static int async_io(struct dm_io_client *client, unsigned int num_regions,
+ struct dm_io_region *where, blk_opf_t opf,
+- struct dpages *dp, io_notify_fn fn, void *context)
++ struct dpages *dp, io_notify_fn fn, void *context,
++ unsigned short ioprio)
+ {
+ struct io *io;
+
+@@ -479,7 +481,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
+ io->vma_invalidate_address = dp->vma_invalidate_address;
+ io->vma_invalidate_size = dp->vma_invalidate_size;
+
+- dispatch_io(opf, num_regions, where, dp, io, 0);
++ dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
+ return 0;
+ }
+
+@@ -521,7 +523,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
+ }
+
+ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+- struct dm_io_region *where, unsigned long *sync_error_bits)
++ struct dm_io_region *where, unsigned long *sync_error_bits,
++ unsigned short ioprio)
+ {
+ int r;
+ struct dpages dp;
+@@ -532,11 +535,11 @@ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+
+ if (!io_req->notify.fn)
+ return sync_io(io_req->client, num_regions, where,
+- io_req->bi_opf, &dp, sync_error_bits);
++ io_req->bi_opf, &dp, sync_error_bits, ioprio);
+
+ return async_io(io_req->client, num_regions, where,
+ io_req->bi_opf, &dp, io_req->notify.fn,
+- io_req->notify.context);
++ io_req->notify.context, ioprio);
+ }
+ EXPORT_SYMBOL(dm_io);
+
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 21ebb6c39394b6..5bb76aab7755ff 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1181,8 +1181,26 @@ static int do_resume(struct dm_ioctl *param)
+ suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+ if (param->flags & DM_NOFLUSH_FLAG)
+ suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+- if (!dm_suspended_md(md))
+- dm_suspend(md, suspend_flags);
++ if (!dm_suspended_md(md)) {
++ r = dm_suspend(md, suspend_flags);
++ if (r) {
++ down_write(&_hash_lock);
++ hc = dm_get_mdptr(md);
++ if (hc && !hc->new_map) {
++ hc->new_map = new_map;
++ new_map = NULL;
++ } else {
++ r = -ENXIO;
++ }
++ up_write(&_hash_lock);
++ if (new_map) {
++ dm_sync_table(md);
++ dm_table_destroy(new_map);
++ }
++ dm_put(md);
++ return r;
++ }
++ }
+
+ old_size = dm_get_size(md);
+ old_map = dm_swap_table(md, new_map);
+@@ -1941,7 +1959,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
+ minimum_data_size - sizeof(param_kernel->version)))
+ return -EFAULT;
+
+- if (param_kernel->data_size < minimum_data_size) {
++ if (unlikely(param_kernel->data_size < minimum_data_size) ||
++ unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) {
+ DMERR("Invalid data size in the ioctl structure: %u",
+ param_kernel->data_size);
+ return -EINVAL;
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index d01807c50f20b9..79c65c9ad5fa81 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -578,9 +578,9 @@ static int run_io_job(struct kcopyd_job *job)
+ io_job_start(job->kc->throttle);
+
+ if (job->op == REQ_OP_READ)
+- r = dm_io(&io_req, 1, &job->source, NULL);
++ r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
+ else
+- r = dm_io(&io_req, job->num_dests, job->dests, NULL);
++ r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
+
+ return r;
+ }
+diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
+index f9f84236dfcd75..f7f9c2100937ba 100644
+--- a/drivers/md/dm-log.c
++++ b/drivers/md/dm-log.c
+@@ -300,7 +300,7 @@ static int rw_header(struct log_c *lc, enum req_op op)
+ {
+ lc->io_req.bi_opf = op;
+
+- return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
++ return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT);
+ }
+
+ static int flush_header(struct log_c *lc)
+@@ -313,7 +313,7 @@ static int flush_header(struct log_c *lc)
+
+ lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+- return dm_io(&lc->io_req, 1, &null_location, NULL);
++ return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT);
+ }
+
+ static int read_header(struct log_c *log)
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 5f9991765f270a..385e24f55ec002 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3322,14 +3322,14 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
+ struct mddev *mddev = &rs->md;
+
+ /*
+- * If we're reshaping to add disk(s)), ti->len and
++ * If we're reshaping to add disk(s), ti->len and
+ * mddev->array_sectors will differ during the process
+ * (ti->len > mddev->array_sectors), so we have to requeue
+ * bios with addresses > mddev->array_sectors here or
+ * there will occur accesses past EOD of the component
+ * data images thus erroring the raid set.
+ */
+- if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
++ if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
+ return DM_MAPIO_REQUEUE;
+
+ md_handle_request(mddev, bio);
+@@ -4042,7 +4042,9 @@ static void raid_resume(struct dm_target *ti)
+ * Take this opportunity to check whether any failed
+ * devices are reachable again.
+ */
++ mddev_lock_nointr(mddev);
+ attempt_restore_of_faulty_devices(rs);
++ mddev_unlock(mddev);
+ }
+
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index ddcb2bc4a6179a..9511dae5b556a9 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -278,7 +278,7 @@ static int mirror_flush(struct dm_target *ti)
+ }
+
+ error_bits = -1;
+- dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
++ dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
+ if (unlikely(error_bits != 0)) {
+ for (i = 0; i < ms->nr_mirrors; i++)
+ if (test_bit(i, &error_bits))
+@@ -554,7 +554,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
+
+ map_region(&io, m, bio);
+ bio_set_m(bio, m);
+- BUG_ON(dm_io(&io_req, 1, &io, NULL));
++ BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT));
+ }
+
+ static inline int region_in_sync(struct mirror_set *ms, region_t region,
+@@ -681,7 +681,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
+ */
+ bio_set_m(bio, get_default_mirror(ms));
+
+- BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
++ BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
+ }
+
+ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index f7e9a3632eb3d9..499f8cc8a39fbf 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -496,8 +496,10 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+
+ map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
++ DMERR_LIMIT("%s: mapping table unavailable, erroring io",
++ dm_device_name(md));
+ dm_put_live_table(md, srcu_idx);
+- return BLK_STS_RESOURCE;
++ return BLK_STS_IOERR;
+ }
+ ti = dm_table_find_target(map, 0);
+ dm_put_live_table(md, srcu_idx);
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 15649921f2a9b1..568d10842b1f46 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -223,7 +223,7 @@ static void do_metadata(struct work_struct *work)
+ {
+ struct mdata_req *req = container_of(work, struct mdata_req, work);
+
+- req->result = dm_io(req->io_req, 1, req->where, NULL);
++ req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
+ }
+
+ /*
+@@ -247,7 +247,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
+ struct mdata_req req;
+
+ if (!metadata)
+- return dm_io(&io_req, 1, &where, NULL);
++ return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
+
+ req.where = &where;
+ req.io_req = &io_req;
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index bf7a574499a34d..0ace06d1bee384 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -684,8 +684,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
+ for (i = 0; i < size; i++) {
+ slot = et->table + i;
+
+- hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
++ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
+ kmem_cache_free(mem, ex);
++ cond_resched();
++ }
+ }
+
+ kvfree(et->table);
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 37b48f63ae6a5e..fd84e06670e8d7 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -129,7 +129,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
+ int dm_table_create(struct dm_table **result, blk_mode_t mode,
+ unsigned int num_targets, struct mapped_device *md)
+ {
+- struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
++ struct dm_table *t;
++
++ if (num_targets > DM_MAX_TARGETS)
++ return -EOVERFLOW;
++
++ t = kzalloc(sizeof(*t), GFP_KERNEL);
+
+ if (!t)
+ return -ENOMEM;
+@@ -144,7 +149,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
+
+ if (!num_targets) {
+ kfree(t);
+- return -ENOMEM;
++ return -EOVERFLOW;
+ }
+
+ if (alloc_targets(t, num_targets)) {
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 3ef9f018da60ce..b475200d8586a6 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
+ */
+ static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
+ {
+- return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
++ return (struct dm_verity_fec_io *)
++ ((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
+ }
+
+ /*
+@@ -185,7 +186,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+ {
+ if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
+ data, 1 << v->data_dev_block_bits,
+- verity_io_real_digest(v, io))))
++ verity_io_real_digest(v, io), true)))
+ return 0;
+
+ return memcmp(verity_io_real_digest(v, io), want_digest,
+@@ -386,7 +387,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+ /* Always re-validate the corrected block against the expected hash */
+ r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
+ 1 << v->data_dev_block_bits,
+- verity_io_real_digest(v, io));
++ verity_io_real_digest(v, io), true);
+ if (unlikely(r < 0))
+ return r;
+
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 26adcfea030229..3636387ce565ae 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -135,20 +135,21 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
+ * Wrapper for crypto_ahash_init, which handles verity salting.
+ */
+ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
+- struct crypto_wait *wait)
++ struct crypto_wait *wait, bool may_sleep)
+ {
+ int r;
+
+ ahash_request_set_tfm(req, v->tfm);
+- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+- CRYPTO_TFM_REQ_MAY_BACKLOG,
+- crypto_req_done, (void *)wait);
++ ahash_request_set_callback(req,
++ may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
++ crypto_req_done, (void *)wait);
+ crypto_init_wait(wait);
+
+ r = crypto_wait_req(crypto_ahash_init(req), wait);
+
+ if (unlikely(r < 0)) {
+- DMERR("crypto_ahash_init failed: %d", r);
++ if (r != -ENOMEM)
++ DMERR("crypto_ahash_init failed: %d", r);
+ return r;
+ }
+
+@@ -179,12 +180,12 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
+ }
+
+ int verity_hash(struct dm_verity *v, struct ahash_request *req,
+- const u8 *data, size_t len, u8 *digest)
++ const u8 *data, size_t len, u8 *digest, bool may_sleep)
+ {
+ int r;
+ struct crypto_wait wait;
+
+- r = verity_hash_init(v, req, &wait);
++ r = verity_hash_init(v, req, &wait, may_sleep);
+ if (unlikely(r < 0))
+ goto out;
+
+@@ -322,7 +323,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+
+ r = verity_hash(v, verity_io_hash_req(v, io),
+ data, 1 << v->hash_dev_block_bits,
+- verity_io_real_digest(v, io));
++ verity_io_real_digest(v, io), !io->in_tasklet);
+ if (unlikely(r < 0))
+ goto release_ret_r;
+
+@@ -481,6 +482,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ return 0;
+ }
+
++static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
++ u8 *data, size_t len)
++{
++ memcpy(data, io->recheck_buffer, len);
++ io->recheck_buffer += len;
++
++ return 0;
++}
++
++static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
++ struct bvec_iter start, sector_t cur_block)
++{
++ struct page *page;
++ void *buffer;
++ int r;
++ struct dm_io_request io_req;
++ struct dm_io_region io_loc;
++
++ page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
++ buffer = page_to_virt(page);
++
++ io_req.bi_opf = REQ_OP_READ;
++ io_req.mem.type = DM_IO_KMEM;
++ io_req.mem.ptr.addr = buffer;
++ io_req.notify.fn = NULL;
++ io_req.client = v->io;
++ io_loc.bdev = v->data_dev->bdev;
++ io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
++ io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
++ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
++ if (unlikely(r))
++ goto free_ret;
++
++ r = verity_hash(v, verity_io_hash_req(v, io), buffer,
++ 1 << v->data_dev_block_bits,
++ verity_io_real_digest(v, io), true);
++ if (unlikely(r))
++ goto free_ret;
++
++ if (memcmp(verity_io_real_digest(v, io),
++ verity_io_want_digest(v, io), v->digest_size)) {
++ r = -EIO;
++ goto free_ret;
++ }
++
++ io->recheck_buffer = buffer;
++ r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
++ if (unlikely(r))
++ goto free_ret;
++
++ r = 0;
++free_ret:
++ mempool_free(page, &v->recheck_pool);
++
++ return r;
++}
++
+ static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len)
+ {
+@@ -507,9 +565,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ {
+ bool is_zero;
+ struct dm_verity *v = io->v;
+-#if defined(CONFIG_DM_VERITY_FEC)
+ struct bvec_iter start;
+-#endif
+ struct bvec_iter iter_copy;
+ struct bvec_iter *iter;
+ struct crypto_wait wait;
+@@ -556,14 +612,11 @@ static int verity_verify_io(struct dm_verity_io *io)
+ continue;
+ }
+
+- r = verity_hash_init(v, req, &wait);
++ r = verity_hash_init(v, req, &wait, !io->in_tasklet);
+ if (unlikely(r < 0))
+ return r;
+
+-#if defined(CONFIG_DM_VERITY_FEC)
+- if (verity_fec_is_enabled(v))
+- start = *iter;
+-#endif
++ start = *iter;
+ r = verity_for_io_block(v, io, iter, &wait);
+ if (unlikely(r < 0))
+ return r;
+@@ -585,6 +638,10 @@ static int verity_verify_io(struct dm_verity_io *io)
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ return -EAGAIN;
++ } else if (verity_recheck(v, io, start, cur_block) == 0) {
++ if (v->validated_blocks)
++ set_bit(cur_block, v->validated_blocks);
++ continue;
+ #if defined(CONFIG_DM_VERITY_FEC)
+ } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
+ cur_block, NULL, &start) == 0) {
+@@ -641,44 +698,23 @@ static void verity_work(struct work_struct *w)
+
+ io->in_tasklet = false;
+
+- verity_fec_init_io(io);
+ verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
+ }
+
+-static void verity_tasklet(unsigned long data)
+-{
+- struct dm_verity_io *io = (struct dm_verity_io *)data;
+- int err;
+-
+- io->in_tasklet = true;
+- err = verity_verify_io(io);
+- if (err == -EAGAIN) {
+- /* fallback to retrying with work-queue */
+- INIT_WORK(&io->work, verity_work);
+- queue_work(io->v->verify_wq, &io->work);
+- return;
+- }
+-
+- verity_finish_io(io, errno_to_blk_status(err));
+-}
+-
+ static void verity_end_io(struct bio *bio)
+ {
+ struct dm_verity_io *io = bio->bi_private;
+
+ if (bio->bi_status &&
+- (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
++ (!verity_fec_is_enabled(io->v) ||
++ verity_is_system_shutting_down() ||
++ (bio->bi_opf & REQ_RAHEAD))) {
+ verity_finish_io(io, bio->bi_status);
+ return;
+ }
+
+- if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
+- tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
+- tasklet_schedule(&io->tasklet);
+- } else {
+- INIT_WORK(&io->work, verity_work);
+- queue_work(io->v->verify_wq, &io->work);
+- }
++ INIT_WORK(&io->work, verity_work);
++ queue_work(io->v->verify_wq, &io->work);
+ }
+
+ /*
+@@ -791,6 +827,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+ bio->bi_private = io;
+ io->iter = bio->bi_iter;
+
++ verity_fec_init_io(io);
++
+ verity_submit_prefetch(v, io);
+
+ submit_bio_noacct(bio);
+@@ -959,6 +997,10 @@ static void verity_dtr(struct dm_target *ti)
+ if (v->verify_wq)
+ destroy_workqueue(v->verify_wq);
+
++ mempool_exit(&v->recheck_pool);
++ if (v->io)
++ dm_io_client_destroy(v->io);
++
+ if (v->bufio)
+ dm_bufio_client_destroy(v->bufio);
+
+@@ -1033,7 +1075,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
+ goto out;
+
+ r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
+- v->zero_digest);
++ v->zero_digest, true);
+
+ out:
+ kfree(req);
+@@ -1397,6 +1439,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ }
+ v->hash_blocks = hash_position;
+
++ r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
++ if (unlikely(r)) {
++ ti->error = "Cannot allocate mempool";
++ goto bad;
++ }
++
++ v->io = dm_io_client_create();
++ if (IS_ERR(v->io)) {
++ r = PTR_ERR(v->io);
++ v->io = NULL;
++ ti->error = "Cannot allocate dm io";
++ goto bad;
++ }
++
+ v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
+ 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
+ dm_bufio_alloc_callback, NULL,
+@@ -1455,14 +1511,6 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ return r;
+ }
+
+-/*
+- * Check whether a DM target is a verity target.
+- */
+-bool dm_is_verity_target(struct dm_target *ti)
+-{
+- return ti->type->module == THIS_MODULE;
+-}
+-
+ /*
+ * Get the verity mode (error behavior) of a verity target.
+ *
+@@ -1516,6 +1564,14 @@ static struct target_type verity_target = {
+ };
+ module_dm(verity);
+
++/*
++ * Check whether a DM target is a verity target.
++ */
++bool dm_is_verity_target(struct dm_target *ti)
++{
++ return ti->type == &verity_target;
++}
++
+ MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
+ MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
+ MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index 2f555b4203679a..db93a91169d5e6 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -11,6 +11,7 @@
+ #ifndef DM_VERITY_H
+ #define DM_VERITY_H
+
++#include <linux/dm-io.h>
+ #include <linux/dm-bufio.h>
+ #include <linux/device-mapper.h>
+ #include <linux/interrupt.h>
+@@ -68,6 +69,9 @@ struct dm_verity {
+ unsigned long *validated_blocks; /* bitset blocks validated */
+
+ char *signature_key_desc; /* signature keyring reference */
++
++ struct dm_io_client *io;
++ mempool_t recheck_pool;
+ };
+
+ struct dm_verity_io {
+@@ -76,14 +80,15 @@ struct dm_verity_io {
+ /* original value of bio->bi_end_io */
+ bio_end_io_t *orig_bi_end_io;
+
++ struct bvec_iter iter;
++
+ sector_t block;
+ unsigned int n_blocks;
+ bool in_tasklet;
+
+- struct bvec_iter iter;
+-
+ struct work_struct work;
+- struct tasklet_struct tasklet;
++
++ char *recheck_buffer;
+
+ /*
+ * Three variably-size fields follow this struct:
+@@ -115,12 +120,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
+ return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
+ }
+
+-static inline u8 *verity_io_digest_end(struct dm_verity *v,
+- struct dm_verity_io *io)
+-{
+- return verity_io_want_digest(v, io) + v->digest_size;
+-}
+-
+ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter *iter,
+ int (*process)(struct dm_verity *v,
+@@ -128,7 +127,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len));
+
+ extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
+- const u8 *data, size_t len, u8 *digest);
++ const u8 *data, size_t len, u8 *digest, bool may_sleep);
+
+ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+ sector_t block, u8 *digest, bool *is_zero);
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 074cb785eafc19..6a4279bfb1e778 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
+ req.notify.context = &endio;
+
+ /* writing via async dm-io (implied by notify.fn above) won't return an error */
+- (void) dm_io(&req, 1, &region, NULL);
++ (void) dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
+ i = j;
+ }
+
+@@ -568,7 +568,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
+ req.notify.fn = NULL;
+ req.notify.context = NULL;
+
+- r = dm_io(&req, 1, &region, NULL);
++ r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r))
+ writecache_error(wc, r, "error writing superblock");
+ }
+@@ -596,7 +596,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
+ req.client = wc->dm_io;
+ req.notify.fn = NULL;
+
+- r = dm_io(&req, 1, &region, NULL);
++ r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r))
+ writecache_error(wc, r, "error flushing metadata: %d", r);
+ }
+@@ -990,7 +990,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
+ req.client = wc->dm_io;
+ req.notify.fn = NULL;
+
+- return dm_io(&req, 1, &region, NULL);
++ return dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
+ }
+
+ static void writecache_resume(struct dm_target *ti)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 64a1f306c96c11..5dd0a42463a2b8 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1817,10 +1817,15 @@ static void dm_submit_bio(struct bio *bio)
+ struct dm_table *map;
+
+ map = dm_get_live_table(md, &srcu_idx);
++ if (unlikely(!map)) {
++ DMERR_LIMIT("%s: mapping table unavailable, erroring io",
++ dm_device_name(md));
++ bio_io_error(bio);
++ goto out;
++ }
+
+- /* If suspended, or map not yet available, queue this IO for later */
+- if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
+- unlikely(!map)) {
++ /* If suspended, queue this IO for later */
++ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
+ if (bio->bi_opf & REQ_NOWAIT)
+ bio_wouldblock_error(bio);
+ else if (bio->bi_opf & REQ_RAHEAD)
+@@ -2531,7 +2536,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
+ break;
+
+ if (signal_pending_state(task_state, current)) {
+- r = -EINTR;
++ r = -ERESTARTSYS;
+ break;
+ }
+
+@@ -2556,7 +2561,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st
+ break;
+
+ if (signal_pending_state(task_state, current)) {
+- r = -EINTR;
++ r = -ERESTARTSYS;
+ break;
+ }
+
+@@ -2918,6 +2923,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend
+
+ static void __dm_internal_resume(struct mapped_device *md)
+ {
++ int r;
++ struct dm_table *map;
++
+ BUG_ON(!md->internal_suspend_count);
+
+ if (--md->internal_suspend_count)
+@@ -2926,12 +2934,23 @@ static void __dm_internal_resume(struct mapped_device *md)
+ if (dm_suspended_md(md))
+ goto done; /* resume from nested suspend */
+
+- /*
+- * NOTE: existing callers don't need to call dm_table_resume_targets
+- * (which may fail -- so best to avoid it for now by passing NULL map)
+- */
+- (void) __dm_resume(md, NULL);
+-
++ map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
++ r = __dm_resume(md, map);
++ if (r) {
++ /*
++ * If a preresume method of some target failed, we are in a
++ * tricky situation. We can't return an error to the caller. We
++ * can't fake success because then the "resume" and
++ * "postsuspend" methods would not be paired correctly, and it
++ * would break various targets, for example it would cause list
++ * corruption in the "origin" target.
++ *
++ * So, we fake normal suspend here, to make sure that the
++ * "resume" and "postsuspend" methods will be paired correctly.
++ */
++ DMERR("Preresume method failed: %d", r);
++ set_bit(DMF_SUSPENDED, &md->flags);
++ }
+ done:
+ clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
+ smp_mb__after_atomic();
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 6f9ff14971f982..be65472d8f8b35 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -227,6 +227,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
+ struct block_device *bdev;
+ struct mddev *mddev = bitmap->mddev;
+ struct bitmap_storage *store = &bitmap->storage;
++ unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) <<
++ PAGE_SHIFT;
+ loff_t sboff, offset = mddev->bitmap_info.offset;
+ sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
+ unsigned int size = PAGE_SIZE;
+@@ -234,7 +236,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
+ sector_t doff;
+
+ bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
+- if (pg_index == store->file_pages - 1) {
++ /* we compare length (page numbers), not page offset. */
++ if ((pg_index - store->sb_index) == store->file_pages - 1) {
+ unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
+
+ if (last_page_size == 0)
+@@ -268,11 +271,9 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
+ if (size == 0)
+ /* bitmap runs in to data */
+ return -EINVAL;
+- } else {
+- /* DATA METADATA BITMAP - no problems */
+ }
+
+- md_super_write(mddev, rdev, sboff + ps, (int) size, page);
++ md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page);
+ return 0;
+ }
+
+@@ -438,8 +439,8 @@ static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index,
+ struct page *page = store->filemap[pg_index];
+
+ if (mddev_is_clustered(bitmap->mddev)) {
+- pg_index += bitmap->cluster_slot *
+- DIV_ROUND_UP(store->bytes, PAGE_SIZE);
++ /* go to node bitmap area starting point */
++ pg_index += store->sb_index;
+ }
+
+ if (store->file)
+@@ -952,6 +953,7 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
+ unsigned long index = file_page_index(store, chunk);
+ unsigned long node_offset = 0;
+
++ index += store->sb_index;
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * store->file_pages;
+
+@@ -982,6 +984,7 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
+ unsigned long index = file_page_index(store, chunk);
+ unsigned long node_offset = 0;
+
++ index += store->sb_index;
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * store->file_pages;
+
+@@ -1424,7 +1427,7 @@ __acquires(bitmap->lock)
+ sector_t chunk = offset >> bitmap->chunkshift;
+ unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
+ unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
+- sector_t csize;
++ sector_t csize = ((sector_t)1) << bitmap->chunkshift;
+ int err;
+
+ if (page >= bitmap->pages) {
+@@ -1433,6 +1436,7 @@ __acquires(bitmap->lock)
+ * End-of-device while looking for a whole page or
+ * user set a huge number to sysfs bitmap_set_bits.
+ */
++ *blocks = csize - (offset & (csize - 1));
+ return NULL;
+ }
+ err = md_bitmap_checkpage(bitmap, page, create, 0);
+@@ -1441,8 +1445,7 @@ __acquires(bitmap->lock)
+ bitmap->bp[page].map == NULL)
+ csize = ((sector_t)1) << (bitmap->chunkshift +
+ PAGE_COUNTER_SHIFT);
+- else
+- csize = ((sector_t)1) << bitmap->chunkshift;
++
+ *blocks = csize - (offset & (csize - 1));
+
+ if (err < 0)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index a104a025084dc7..d1f6770c5cc094 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -449,14 +449,13 @@ void mddev_suspend(struct mddev *mddev)
+ set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ percpu_ref_kill(&mddev->active_io);
+
+- if (mddev->pers->prepare_suspend)
++ if (mddev->pers && mddev->pers->prepare_suspend)
+ mddev->pers->prepare_suspend(mddev);
+
+ wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
+ clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
+
+- del_timer_sync(&mddev->safemode_timer);
+ /* restrict memory reclaim I/O during raid array is suspend */
+ mddev->noio_flag = memalloc_noio_save();
+ }
+@@ -493,10 +492,9 @@ static void md_end_flush(struct bio *bio)
+
+ rdev_dec_pending(rdev, mddev);
+
+- if (atomic_dec_and_test(&mddev->flush_pending)) {
++ if (atomic_dec_and_test(&mddev->flush_pending))
+ /* The pre-request flush has finished */
+ queue_work(md_wq, &mddev->flush_work);
+- }
+ }
+
+ static void md_submit_flush_data(struct work_struct *ws);
+@@ -513,12 +511,8 @@ static void submit_flushes(struct work_struct *ws)
+ rdev_for_each_rcu(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags)) {
+- /* Take two references, one is dropped
+- * when request finishes, one after
+- * we reclaim rcu_read_lock
+- */
+ struct bio *bi;
+- atomic_inc(&rdev->nr_pending);
++
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ bi = bio_alloc_bioset(rdev->bdev, 0,
+@@ -529,7 +523,6 @@ static void submit_flushes(struct work_struct *ws)
+ atomic_inc(&mddev->flush_pending);
+ submit_bio(bi);
+ rcu_read_lock();
+- rdev_dec_pending(rdev, mddev);
+ }
+ rcu_read_unlock();
+ if (atomic_dec_and_test(&mddev->flush_pending))
+@@ -558,8 +551,20 @@ static void md_submit_flush_data(struct work_struct *ws)
+ bio_endio(bio);
+ } else {
+ bio->bi_opf &= ~REQ_PREFLUSH;
+- md_handle_request(mddev, bio);
++
++ /*
++ * make_requst() will never return error here, it only
++ * returns error in raid5_make_request() by dm-raid.
++ * Since dm always splits data and flush operation into
++ * two separate io, io size of flush submitted by dm
++ * always is 0, make_request() will not be called here.
++ */
++ if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio)))
++ bio_io_error(bio);;
+ }
++
++ /* The pair is percpu_ref_get() from md_flush_request() */
++ percpu_ref_put(&mddev->active_io);
+ }
+
+ /*
+@@ -582,6 +587,18 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio)
+ /* new request after previous flush is completed */
+ if (ktime_after(req_start, mddev->prev_flush_start)) {
+ WARN_ON(mddev->flush_bio);
++ /*
++ * Grab a reference to make sure mddev_suspend() will wait for
++ * this flush to be done.
++ *
++ * md_flush_reqeust() is called under md_handle_request() and
++ * 'active_io' is already grabbed, hence percpu_ref_is_zero()
++ * won't pass, percpu_ref_tryget_live() can't be used because
++ * percpu_ref_kill() can be called by mddev_suspend()
++ * concurrently.
++ */
++ WARN_ON(percpu_ref_is_zero(&mddev->active_io));
++ percpu_ref_get(&mddev->active_io);
+ mddev->flush_bio = bio;
+ bio = NULL;
+ }
+@@ -930,9 +947,10 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
+ return;
+
+ bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
+- 1,
+- REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
+- GFP_NOIO, &mddev->sync_set);
++ 1,
++ REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META
++ | REQ_PREFLUSH | REQ_FUA,
++ GFP_NOIO, &mddev->sync_set);
+
+ atomic_inc(&rdev->nr_pending);
+
+@@ -1112,6 +1130,7 @@ struct super_type {
+ struct md_rdev *refdev,
+ int minor_version);
+ int (*validate_super)(struct mddev *mddev,
++ struct md_rdev *freshest,
+ struct md_rdev *rdev);
+ void (*sync_super)(struct mddev *mddev,
+ struct md_rdev *rdev);
+@@ -1249,8 +1268,9 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+
+ /*
+ * validate_super for 0.90.0
++ * note: we are not using "freshest" for 0.9 superblock
+ */
+-static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
++static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
+ {
+ mdp_disk_t *desc;
+ mdp_super_t *sb = page_address(rdev->sb_page);
+@@ -1762,7 +1782,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+ return ret;
+ }
+
+-static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
++static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
+ {
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ __u64 ev1 = le64_to_cpu(sb->events);
+@@ -1858,13 +1878,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+ }
+ } else if (mddev->pers == NULL) {
+ /* Insist of good event counter while assembling, except for
+- * spares (which don't need an event count) */
+- ++ev1;
++ * spares (which don't need an event count).
++ * Similar to mdadm, we allow event counter difference of 1
++ * from the freshest device.
++ */
+ if (rdev->desc_nr >= 0 &&
+ rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
+- if (ev1 < mddev->events)
++ if (ev1 + 1 < mddev->events)
+ return -EINVAL;
+ } else if (mddev->bitmap) {
+ /* If adding to array with a bitmap, then we can accept an
+@@ -1885,8 +1907,38 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+ rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
+ role = MD_DISK_ROLE_SPARE;
+ rdev->desc_nr = -1;
+- } else
++ } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
++ /*
++ * If we are assembling, and our event counter is smaller than the
++ * highest event counter, we cannot trust our superblock about the role.
++ * It could happen that our rdev was marked as Faulty, and all other
++ * superblocks were updated with +1 event counter.
++ * Then, before the next superblock update, which typically happens when
++ * remove_and_add_spares() removes the device from the array, there was
++ * a crash or reboot.
++ * If we allow current rdev without consulting the freshest superblock,
++ * we could cause data corruption.
++ * Note that in this case our event counter is smaller by 1 than the
++ * highest, otherwise, this rdev would not be allowed into array;
++ * both kernel and mdadm allow event counter difference of 1.
++ */
++ struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
++ u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
++
++ if (rdev->desc_nr >= freshest_max_dev) {
++ /* this is unexpected, better not proceed */
++ pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
++ mdname(mddev), rdev->bdev, rdev->desc_nr,
++ freshest->bdev, freshest_max_dev);
++ return -EUCLEAN;
++ }
++
++ role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
++ pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
++ mdname(mddev), rdev->bdev, role, role, freshest->bdev);
++ } else {
+ role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
++ }
+ switch(role) {
+ case MD_DISK_ROLE_SPARE: /* spare */
+ break;
+@@ -2436,6 +2488,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
+ fail:
+ pr_warn("md: failed to register dev-%s for %s\n",
+ b, mdname(mddev));
++ mddev_destroy_serial_pool(mddev, rdev, false);
+ return err;
+ }
+
+@@ -2794,7 +2847,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
+ * and should be added immediately.
+ */
+ super_types[mddev->major_version].
+- validate_super(mddev, rdev);
++ validate_super(mddev, NULL/*freshest*/, rdev);
+ if (add_journal)
+ mddev_suspend(mddev);
+ err = mddev->pers->hot_add_disk(mddev, rdev);
+@@ -3732,7 +3785,7 @@ static int analyze_sbs(struct mddev *mddev)
+ }
+
+ super_types[mddev->major_version].
+- validate_super(mddev, freshest);
++ validate_super(mddev, NULL/*freshest*/, freshest);
+
+ i = 0;
+ rdev_for_each_safe(rdev, tmp, mddev) {
+@@ -3747,7 +3800,7 @@ static int analyze_sbs(struct mddev *mddev)
+ }
+ if (rdev != freshest) {
+ if (super_types[mddev->major_version].
+- validate_super(mddev, rdev)) {
++ validate_super(mddev, freshest, rdev)) {
+ pr_warn("md: kicking non-fresh %pg from array!\n",
+ rdev->bdev);
+ md_kick_rdev_from_array(rdev);
+@@ -6186,7 +6239,15 @@ static void md_clean(struct mddev *mddev)
+ mddev->persistent = 0;
+ mddev->level = LEVEL_NONE;
+ mddev->clevel[0] = 0;
+- mddev->flags = 0;
++ /*
++ * Don't clear MD_CLOSING, or mddev can be opened again.
++ * 'hold_active != 0' means mddev is still in the creation
++ * process and will be used later.
++ */
++ if (mddev->hold_active)
++ mddev->flags = 0;
++ else
++ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
+ mddev->sb_flags = 0;
+ mddev->ro = MD_RDWR;
+ mddev->metadata_type[0] = 0;
+@@ -6316,6 +6377,9 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ int err = 0;
+ int did_freeze = 0;
+
++ if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
++ return -EBUSY;
++
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+@@ -6330,8 +6394,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ */
+ md_wakeup_thread_directly(mddev->sync_thread);
+
+- if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+- return -EBUSY;
+ mddev_unlock(mddev);
+ wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
+ &mddev->recovery));
+@@ -6344,29 +6406,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ pr_warn("md: %s still in use.\n",mdname(mddev));
+- if (did_freeze) {
+- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+- md_wakeup_thread(mddev->thread);
+- }
+ err = -EBUSY;
+ goto out;
+ }
++
+ if (mddev->pers) {
+ __md_stop_writes(mddev);
+
+- err = -ENXIO;
+- if (mddev->ro == MD_RDONLY)
++ if (mddev->ro == MD_RDONLY) {
++ err = -ENXIO;
+ goto out;
++ }
++
+ mddev->ro = MD_RDONLY;
+ set_disk_ro(mddev->gendisk, 1);
++ }
++
++out:
++ if ((mddev->pers && !err) || did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
+- err = 0;
+ }
+-out:
++
+ mutex_unlock(&mddev->open_mutex);
+ return err;
+ }
+@@ -6797,7 +6860,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
+ rdev->saved_raid_disk = rdev->raid_disk;
+ } else
+ super_types[mddev->major_version].
+- validate_super(mddev, rdev);
++ validate_super(mddev, NULL/*freshest*/, rdev);
+ if ((info->state & (1<<MD_DISK_SYNC)) &&
+ rdev->raid_disk != info->raid_disk) {
+ /* This was a hot-add request, but events doesn't
+@@ -7558,7 +7621,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
+ int err = 0;
+ void __user *argp = (void __user *)arg;
+ struct mddev *mddev = NULL;
+- bool did_set_md_closing = false;
+
+ if (!md_ioctl_valid(cmd))
+ return -ENOTTY;
+@@ -7590,11 +7652,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
+
+ mddev = bdev->bd_disk->private_data;
+
+- if (!mddev) {
+- BUG();
+- goto out;
+- }
+-
+ /* Some actions do not requires the mutex */
+ switch (cmd) {
+ case GET_ARRAY_INFO:
+@@ -7621,12 +7678,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
+
+ }
+
+- if (cmd == HOT_REMOVE_DISK)
+- /* need to ensure recovery thread has run */
+- wait_event_interruptible_timeout(mddev->sb_wait,
+- !test_bit(MD_RECOVERY_NEEDED,
+- &mddev->recovery),
+- msecs_to_jiffies(5000));
+ if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
+ /* Need to flush page cache, and ensure no-one else opens
+ * and writes
+@@ -7642,7 +7693,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
+ err = -EBUSY;
+ goto out;
+ }
+- did_set_md_closing = true;
+ mutex_unlock(&mddev->open_mutex);
+ sync_blockdev(bdev);
+ }
+@@ -7776,7 +7826,7 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
+ mddev->hold_active = 0;
+ mddev_unlock(mddev);
+ out:
+- if(did_set_md_closing)
++ if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
+ clear_bit(MD_CLOSING, &mddev->flags);
+ return err;
+ }
+@@ -8669,7 +8719,8 @@ static void md_end_clone_io(struct bio *bio)
+ struct bio *orig_bio = md_io_clone->orig_bio;
+ struct mddev *mddev = md_io_clone->mddev;
+
+- orig_bio->bi_status = bio->bi_status;
++ if (bio->bi_status && !orig_bio->bi_status)
++ orig_bio->bi_status = bio->bi_status;
+
+ if (md_io_clone->start_time)
+ bio_end_io_acct(orig_bio, md_io_clone->start_time);
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 04698fd03e6069..d48c4fafc77989 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -277,7 +277,7 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
+ {
+ struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+- kfree(smm);
++ kvfree(smm);
+ }
+
+ static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+@@ -772,7 +772,7 @@ struct dm_space_map *dm_sm_metadata_init(void)
+ {
+ struct sm_metadata *smm;
+
+- smm = kmalloc(sizeof(*smm), GFP_KERNEL);
++ smm = kvmalloc(sizeof(*smm), GFP_KERNEL);
+ if (!smm)
+ return ERR_PTR(-ENOMEM);
+
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 2aabac773fe72a..cc02e7ec72c08c 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1473,7 +1473,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ for (j = 0; j < i; j++)
+ if (r1_bio->bios[j])
+ rdev_dec_pending(conf->mirrors[j].rdev, mddev);
+- free_r1bio(r1_bio);
++ mempool_free(r1_bio, &conf->r1bio_pool);
+ allow_barrier(conf, bio->bi_iter.bi_sector);
+
+ if (bio->bi_opf & REQ_NOWAIT) {
+@@ -1983,12 +1983,12 @@ static void end_sync_write(struct bio *bio)
+ }
+
+ static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
+- int sectors, struct page *page, int rw)
++ int sectors, struct page *page, blk_opf_t rw)
+ {
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ /* success */
+ return 1;
+- if (rw == WRITE) {
++ if (rw == REQ_OP_WRITE) {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &rdev->flags))
+@@ -2105,7 +2105,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+ rdev = conf->mirrors[d].rdev;
+ if (r1_sync_page_io(rdev, sect, s,
+ pages[idx],
+- WRITE) == 0) {
++ REQ_OP_WRITE) == 0) {
+ r1_bio->bios[d]->bi_end_io = NULL;
+ rdev_dec_pending(rdev, mddev);
+ }
+@@ -2120,7 +2120,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+ rdev = conf->mirrors[d].rdev;
+ if (r1_sync_page_io(rdev, sect, s,
+ pages[idx],
+- READ) != 0)
++ REQ_OP_READ) != 0)
+ atomic_add(s, &rdev->corrected_errors);
+ }
+ sectors -= s;
+@@ -2332,7 +2332,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ r1_sync_page_io(rdev, sect, s,
+- conf->tmppage, WRITE);
++ conf->tmppage, REQ_OP_WRITE);
+ rdev_dec_pending(rdev, mddev);
+ } else
+ rcu_read_unlock();
+@@ -2349,7 +2349,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ if (r1_sync_page_io(rdev, sect, s,
+- conf->tmppage, READ)) {
++ conf->tmppage, REQ_OP_READ)) {
+ atomic_add(s, &rdev->corrected_errors);
+ pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
+ mdname(mddev), s,
+diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
+index 518b7cfa78b9d4..889bba60d6ff71 100644
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -327,8 +327,9 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+ void r5c_check_stripe_cache_usage(struct r5conf *conf)
+ {
+ int total_cached;
++ struct r5l_log *log = READ_ONCE(conf->log);
+
+- if (!r5c_is_writeback(conf->log))
++ if (!r5c_is_writeback(log))
+ return;
+
+ total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+@@ -344,7 +345,7 @@ void r5c_check_stripe_cache_usage(struct r5conf *conf)
+ */
+ if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+ atomic_read(&conf->empty_inactive_list_nr) > 0)
+- r5l_wake_reclaim(conf->log, 0);
++ r5l_wake_reclaim(log, 0);
+ }
+
+ /*
+@@ -353,7 +354,9 @@ void r5c_check_stripe_cache_usage(struct r5conf *conf)
+ */
+ void r5c_check_cached_full_stripe(struct r5conf *conf)
+ {
+- if (!r5c_is_writeback(conf->log))
++ struct r5l_log *log = READ_ONCE(conf->log);
++
++ if (!r5c_is_writeback(log))
+ return;
+
+ /*
+@@ -363,7 +366,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf)
+ if (atomic_read(&conf->r5c_cached_full_stripes) >=
+ min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
+ conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
+- r5l_wake_reclaim(conf->log, 0);
++ r5l_wake_reclaim(log, 0);
+ }
+
+ /*
+@@ -396,7 +399,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf)
+ */
+ static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
+ {
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+
+ if (!r5c_is_writeback(log))
+ return 0;
+@@ -449,7 +452,7 @@ static inline void r5c_update_log_state(struct r5l_log *log)
+ void r5c_make_stripe_write_out(struct stripe_head *sh)
+ {
+ struct r5conf *conf = sh->raid_conf;
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+
+ BUG_ON(!r5c_is_writeback(log));
+
+@@ -491,7 +494,7 @@ static void r5c_handle_parity_cached(struct stripe_head *sh)
+ */
+ static void r5c_finish_cache_stripe(struct stripe_head *sh)
+ {
+- struct r5l_log *log = sh->raid_conf->log;
++ struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
+
+ if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+ BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+@@ -692,7 +695,7 @@ static void r5c_disable_writeback_async(struct work_struct *work)
+
+ /* wait superblock change before suspend */
+ wait_event(mddev->sb_wait,
+- conf->log == NULL ||
++ !READ_ONCE(conf->log) ||
+ (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
+ (locked = mddev_trylock(mddev))));
+ if (locked) {
+@@ -1151,7 +1154,7 @@ static void r5l_run_no_space_stripes(struct r5l_log *log)
+ static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+ {
+ struct stripe_head *sh;
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+ sector_t new_cp;
+ unsigned long flags;
+
+@@ -1159,12 +1162,12 @@ static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+ return log->next_checkpoint;
+
+ spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+- if (list_empty(&conf->log->stripe_in_journal_list)) {
++ if (list_empty(&log->stripe_in_journal_list)) {
+ /* all stripes flushed */
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+ return log->next_checkpoint;
+ }
+- sh = list_first_entry(&conf->log->stripe_in_journal_list,
++ sh = list_first_entry(&log->stripe_in_journal_list,
+ struct stripe_head, r5c);
+ new_cp = sh->log_start;
+ spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+@@ -1399,7 +1402,7 @@ void r5c_flush_cache(struct r5conf *conf, int num)
+ struct stripe_head *sh, *next;
+
+ lockdep_assert_held(&conf->device_lock);
+- if (!conf->log)
++ if (!READ_ONCE(conf->log))
+ return;
+
+ count = 0;
+@@ -1420,7 +1423,7 @@ void r5c_flush_cache(struct r5conf *conf, int num)
+
+ static void r5c_do_reclaim(struct r5conf *conf)
+ {
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+ struct stripe_head *sh;
+ int count = 0;
+ unsigned long flags;
+@@ -1549,7 +1552,7 @@ static void r5l_reclaim_thread(struct md_thread *thread)
+ {
+ struct mddev *mddev = thread->mddev;
+ struct r5conf *conf = mddev->private;
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+
+ if (!log)
+ return;
+@@ -1591,7 +1594,7 @@ void r5l_quiesce(struct r5l_log *log, int quiesce)
+
+ bool r5l_log_disk_error(struct r5conf *conf)
+ {
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+
+ /* don't allow write if journal disk is missing */
+ if (!log)
+@@ -2635,7 +2638,7 @@ int r5c_try_caching_write(struct r5conf *conf,
+ struct stripe_head_state *s,
+ int disks)
+ {
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+ int i;
+ struct r5dev *dev;
+ int to_cache = 0;
+@@ -2802,7 +2805,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s)
+ {
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+ int i;
+ int do_wakeup = 0;
+ sector_t tree_index;
+@@ -2941,7 +2944,7 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
+ /* check whether this big stripe is in write back cache. */
+ bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
+ {
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+ sector_t tree_index;
+ void *slot;
+
+@@ -3049,14 +3052,14 @@ int r5l_start(struct r5l_log *log)
+ void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ struct r5conf *conf = mddev->private;
+- struct r5l_log *log = conf->log;
++ struct r5l_log *log = READ_ONCE(conf->log);
+
+ if (!log)
+ return;
+
+ if ((raid5_calc_degraded(conf) > 0 ||
+ test_bit(Journal, &rdev->flags)) &&
+- conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
++ log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ schedule_work(&log->disable_writeback_work);
+ }
+
+@@ -3145,7 +3148,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
+ spin_lock_init(&log->stripe_in_journal_lock);
+ atomic_set(&log->stripe_in_journal_count, 0);
+
+- conf->log = log;
++ WRITE_ONCE(conf->log, log);
+
+ set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+ return 0;
+@@ -3173,7 +3176,7 @@ void r5l_exit_log(struct r5conf *conf)
+ * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
+ * ensure disable_writeback_work wakes up and exits.
+ */
+- conf->log = NULL;
++ WRITE_ONCE(conf->log, NULL);
+ wake_up(&conf->mddev->sb_wait);
+ flush_work(&log->disable_writeback_work);
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 284cd71bcc685b..2c7f11e5766735 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -36,7 +36,6 @@
+ */
+
+ #include <linux/blkdev.h>
+-#include <linux/delay.h>
+ #include <linux/kthread.h>
+ #include <linux/raid/pq.h>
+ #include <linux/async_tx.h>
+@@ -2420,7 +2419,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
+ atomic_inc(&conf->active_stripes);
+
+ raid5_release_stripe(sh);
+- conf->max_nr_stripes++;
++ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1);
+ return 1;
+ }
+
+@@ -2717,7 +2716,7 @@ static int drop_one_stripe(struct r5conf *conf)
+ shrink_buffers(sh);
+ free_stripe(conf->slab_cache, sh);
+ atomic_dec(&conf->active_stripes);
+- conf->max_nr_stripes--;
++ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1);
+ return 1;
+ }
+
+@@ -5892,11 +5891,11 @@ static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
+ int dd_idx;
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+- if (dd_idx == sh->pd_idx)
++ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ continue;
+
+ min_sector = min(min_sector, sh->dev[dd_idx].sector);
+- max_sector = min(max_sector, sh->dev[dd_idx].sector);
++ max_sector = max(max_sector, sh->dev[dd_idx].sector);
+ }
+
+ spin_lock_irq(&conf->device_lock);
+@@ -6327,7 +6326,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
+ safepos = conf->reshape_safe;
+ sector_div(safepos, data_disks);
+ if (mddev->reshape_backwards) {
+- BUG_ON(writepos < reshape_sectors);
++ if (WARN_ON(writepos < reshape_sectors))
++ return MaxSector;
++
+ writepos -= reshape_sectors;
+ readpos += reshape_sectors;
+ safepos += reshape_sectors;
+@@ -6345,14 +6346,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
+ * to set 'stripe_addr' which is where we will write to.
+ */
+ if (mddev->reshape_backwards) {
+- BUG_ON(conf->reshape_progress == 0);
++ if (WARN_ON(conf->reshape_progress == 0))
++ return MaxSector;
++
+ stripe_addr = writepos;
+- BUG_ON((mddev->dev_sectors &
+- ~((sector_t)reshape_sectors - 1))
+- - reshape_sectors - stripe_addr
+- != sector_nr);
++ if (WARN_ON((mddev->dev_sectors &
++ ~((sector_t)reshape_sectors - 1)) -
++ reshape_sectors - stripe_addr != sector_nr))
++ return MaxSector;
+ } else {
+- BUG_ON(writepos != sector_nr + reshape_sectors);
++ if (WARN_ON(writepos != sector_nr + reshape_sectors))
++ return MaxSector;
++
+ stripe_addr = sector_nr;
+ }
+
+@@ -6807,6 +6812,9 @@ static void raid5d(struct md_thread *thread)
+ int batch_size, released;
+ unsigned int offset;
+
++ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
++ break;
++
+ released = release_stripe_list(conf, conf->temp_inactive_list);
+ if (released)
+ clear_bit(R5_DID_ALLOC, &conf->cache_state);
+@@ -6843,18 +6851,7 @@ static void raid5d(struct md_thread *thread)
+ spin_unlock_irq(&conf->device_lock);
+ md_check_recovery(mddev);
+ spin_lock_irq(&conf->device_lock);
+-
+- /*
+- * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+- * seeing md_check_recovery() is needed to clear
+- * the flag when using mdmon.
+- */
+- continue;
+ }
+-
+- wait_event_lock_irq(mddev->sb_wait,
+- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+- conf->device_lock);
+ }
+ pr_debug("%d stripes handled\n", handled);
+
+@@ -6901,7 +6898,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
+ if (size <= 16 || size > 32768)
+ return -EINVAL;
+
+- conf->min_nr_stripes = size;
++ WRITE_ONCE(conf->min_nr_stripes, size);
+ mutex_lock(&conf->cache_size_mutex);
+ while (size < conf->max_nr_stripes &&
+ drop_one_stripe(conf))
+@@ -6913,7 +6910,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
+ mutex_lock(&conf->cache_size_mutex);
+ while (size > conf->max_nr_stripes)
+ if (!grow_one_stripe(conf, GFP_KERNEL)) {
+- conf->min_nr_stripes = conf->max_nr_stripes;
++ WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes);
+ result = -ENOMEM;
+ break;
+ }
+@@ -7478,11 +7475,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+ {
+ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
++ int max_stripes = READ_ONCE(conf->max_nr_stripes);
++ int min_stripes = READ_ONCE(conf->min_nr_stripes);
+
+- if (conf->max_nr_stripes < conf->min_nr_stripes)
++ if (max_stripes < min_stripes)
+ /* unlikely, but not impossible */
+ return 0;
+- return conf->max_nr_stripes - conf->min_nr_stripes;
++ return max_stripes - min_stripes;
+ }
+
+ static struct r5conf *setup_conf(struct mddev *mddev)
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index 09ca83c2332997..3b67e922f9815e 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -490,6 +490,15 @@ int cec_thread_func(void *_adap)
+ goto unlock;
+ }
+
++ if (adap->transmit_in_progress &&
++ adap->transmit_in_progress_aborted) {
++ if (adap->transmitting)
++ cec_data_cancel(adap->transmitting,
++ CEC_TX_STATUS_ABORTED, 0);
++ adap->transmit_in_progress = false;
++ adap->transmit_in_progress_aborted = false;
++ goto unlock;
++ }
+ if (adap->transmit_in_progress && timeout) {
+ /*
+ * If we timeout, then log that. Normally this does
+@@ -744,6 +753,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ {
+ struct cec_data *data;
+ bool is_raw = msg_is_raw(msg);
++ int err;
+
+ if (adap->devnode.unregistered)
+ return -ENODEV;
+@@ -908,11 +918,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ * Release the lock and wait, retake the lock afterwards.
+ */
+ mutex_unlock(&adap->lock);
+- wait_for_completion_killable(&data->c);
+- if (!data->completed)
+- cancel_delayed_work_sync(&data->work);
++ err = wait_for_completion_killable(&data->c);
++ cancel_delayed_work_sync(&data->work);
+ mutex_lock(&adap->lock);
+
++ if (err)
++ adap->transmit_in_progress_aborted = true;
++
+ /* Cancel the transmit if it was interrupted */
+ if (!data->completed) {
+ if (data->msg.tx_status & CEC_TX_STATUS_OK)
+@@ -1124,20 +1136,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
+ if (valid_la && min_len) {
+ /* These messages have special length requirements */
+ switch (cmd) {
+- case CEC_MSG_TIMER_STATUS:
+- if (msg->msg[2] & 0x10) {
+- switch (msg->msg[2] & 0xf) {
+- case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+- case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+- if (msg->len < 5)
+- valid_la = false;
+- break;
+- }
+- } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+- if (msg->len < 5)
+- valid_la = false;
+- }
+- break;
+ case CEC_MSG_RECORD_ON:
+ switch (msg->msg[2]) {
+ case CEC_OP_RECORD_SRC_OWN:
+@@ -1562,9 +1560,12 @@ static int cec_config_thread_func(void *arg)
+ */
+ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+ {
+- if (WARN_ON(adap->is_configuring || adap->is_configured))
++ if (WARN_ON(adap->is_claiming_log_addrs ||
++ adap->is_configuring || adap->is_configured))
+ return;
+
++ adap->is_claiming_log_addrs = true;
++
+ init_completion(&adap->config_completion);
+
+ /* Ready to kick off the thread */
+@@ -1579,6 +1580,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+ wait_for_completion(&adap->config_completion);
+ mutex_lock(&adap->lock);
+ }
++ adap->is_claiming_log_addrs = false;
+ }
+
+ /*
+diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
+index 67dc79ef170506..3ef91534430444 100644
+--- a/drivers/media/cec/core/cec-api.c
++++ b/drivers/media/cec/core/cec-api.c
+@@ -178,7 +178,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
+ CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
+ CEC_LOG_ADDRS_FL_CDC_ONLY;
+ mutex_lock(&adap->lock);
+- if (!adap->is_configuring &&
++ if (!adap->is_claiming_log_addrs && !adap->is_configuring &&
+ (!log_addrs.num_log_addrs || !adap->is_configured) &&
+ !cec_is_busy(adap, fh)) {
+ err = __cec_s_log_addrs(adap, &log_addrs, block);
+@@ -664,6 +664,8 @@ static int cec_release(struct inode *inode, struct file *filp)
+ list_del_init(&data->xfer_list);
+ }
+ mutex_unlock(&adap->lock);
++
++ mutex_lock(&fh->lock);
+ while (!list_empty(&fh->msgs)) {
+ struct cec_msg_entry *entry =
+ list_first_entry(&fh->msgs, struct cec_msg_entry, list);
+@@ -681,6 +683,7 @@ static int cec_release(struct inode *inode, struct file *filp)
+ kfree(entry);
+ }
+ }
++ mutex_unlock(&fh->lock);
+ kfree(fh);
+
+ cec_put_device(devnode);
+diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
+index 26d2bc7783944e..a51e98ab4958dd 100644
+--- a/drivers/media/cec/platform/Makefile
++++ b/drivers/media/cec/platform/Makefile
+@@ -6,7 +6,7 @@
+ # Please keep it in alphabetic order
+ obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
+ obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+-obj-$(CONFIG_CEC_MESON_AO) += meson/
++obj-y += meson/
+ obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
+ obj-$(CONFIG_CEC_SECO) += seco/
+ obj-$(CONFIG_CEC_STI) += sti/
+diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+index a366566f22c3b7..642c48e8c1f584 100644
+--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
++++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+@@ -113,6 +113,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
+ {
+ unsigned pat;
+ unsigned plane;
++ int ret = 0;
+
+ tpg->max_line_width = max_w;
+ for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
+@@ -121,14 +122,18 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
+
+ tpg->lines[pat][plane] =
+ vzalloc(array3_size(max_w, 2, pixelsz));
+- if (!tpg->lines[pat][plane])
+- return -ENOMEM;
++ if (!tpg->lines[pat][plane]) {
++ ret = -ENOMEM;
++ goto free_lines;
++ }
+ if (plane == 0)
+ continue;
+ tpg->downsampled_lines[pat][plane] =
+ vzalloc(array3_size(max_w, 2, pixelsz));
+- if (!tpg->downsampled_lines[pat][plane])
+- return -ENOMEM;
++ if (!tpg->downsampled_lines[pat][plane]) {
++ ret = -ENOMEM;
++ goto free_lines;
++ }
+ }
+ }
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+@@ -136,18 +141,45 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
+
+ tpg->contrast_line[plane] =
+ vzalloc(array_size(pixelsz, max_w));
+- if (!tpg->contrast_line[plane])
+- return -ENOMEM;
++ if (!tpg->contrast_line[plane]) {
++ ret = -ENOMEM;
++ goto free_contrast_line;
++ }
+ tpg->black_line[plane] =
+ vzalloc(array_size(pixelsz, max_w));
+- if (!tpg->black_line[plane])
+- return -ENOMEM;
++ if (!tpg->black_line[plane]) {
++ ret = -ENOMEM;
++ goto free_contrast_line;
++ }
+ tpg->random_line[plane] =
+ vzalloc(array3_size(max_w, 2, pixelsz));
+- if (!tpg->random_line[plane])
+- return -ENOMEM;
++ if (!tpg->random_line[plane]) {
++ ret = -ENOMEM;
++ goto free_contrast_line;
++ }
+ }
+ return 0;
++
++free_contrast_line:
++ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
++ vfree(tpg->contrast_line[plane]);
++ vfree(tpg->black_line[plane]);
++ vfree(tpg->random_line[plane]);
++ tpg->contrast_line[plane] = NULL;
++ tpg->black_line[plane] = NULL;
++ tpg->random_line[plane] = NULL;
++ }
++free_lines:
++ for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
++ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
++ vfree(tpg->lines[pat][plane]);
++ tpg->lines[pat][plane] = NULL;
++ if (plane == 0)
++ continue;
++ vfree(tpg->downsampled_lines[pat][plane]);
++ tpg->downsampled_lines[pat][plane] = NULL;
++ }
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(tpg_alloc);
+
+diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
+index cf6727d9c81f36..29bfc2bf796b65 100644
+--- a/drivers/media/common/videobuf2/videobuf2-core.c
++++ b/drivers/media/common/videobuf2/videobuf2-core.c
+@@ -302,6 +302,10 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
+ p->mem_priv = NULL;
+ p->dbuf = NULL;
+ p->dbuf_mapped = 0;
++ p->bytesused = 0;
++ p->length = 0;
++ p->m.fd = 0;
++ p->data_offset = 0;
+ }
+
+ /*
+@@ -1296,10 +1300,6 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
+
+ /* Release previously acquired memory if present */
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+- vb->planes[plane].bytesused = 0;
+- vb->planes[plane].length = 0;
+- vb->planes[plane].m.fd = 0;
+- vb->planes[plane].data_offset = 0;
+
+ /* Acquire each plane's memory */
+ mem_priv = call_ptr_memop(attach_dmabuf,
+@@ -2648,9 +2648,14 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
+ return -EBUSY;
+
+ /*
+- * Start with count 1, driver can increase it in queue_setup()
++ * Start with q->min_buffers_needed + 1, driver can increase it in
++ * queue_setup()
++ *
++ * 'min_buffers_needed' buffers need to be queued up before you
++ * can start streaming, plus 1 for userspace (or in this case,
++ * kernelspace) processing.
+ */
+- count = 1;
++ count = max(2, q->min_buffers_needed + 1);
+
+ dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
+ (read) ? "read" : "write", count, q->fileio_read_once,
+diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+index 28f3fdfe23a298..6975a71d740f6d 100644
+--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
++++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+@@ -487,9 +487,15 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
+ struct iosys_map *map)
+ {
+- struct vb2_dma_sg_buf *buf = dbuf->priv;
++ struct vb2_dma_sg_buf *buf;
++ void *vaddr;
++
++ buf = dbuf->priv;
++ vaddr = vb2_dma_sg_vaddr(buf->vb, buf);
++ if (!vaddr)
++ return -EINVAL;
+
+- iosys_map_set_vaddr(map, buf->vaddr);
++ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+ }
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 9293b058ab9974..93d3378a0df4b8 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -2168,7 +2168,8 @@ static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd,
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+- tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp));
++ tvp = memdup_array_user(compat_ptr(tvps->props),
++ tvps->num, sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+@@ -2199,7 +2200,8 @@ static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd,
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+- tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp));
++ tvp = memdup_array_user(compat_ptr(tvps->props),
++ tvps->num, sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+@@ -2379,7 +2381,8 @@ static int dvb_get_property(struct dvb_frontend *fe, struct file *file,
+ if (!tvps->num || tvps->num > DTV_IOCTL_MAX_MSGS)
+ return -EINVAL;
+
+- tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp));
++ tvp = memdup_array_user((void __user *)tvps->props,
++ tvps->num, sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+@@ -2457,7 +2460,8 @@ static int dvb_frontend_handle_ioctl(struct file *file,
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+- tvp = memdup_user((void __user *)tvps->props, tvps->num * sizeof(*tvp));
++ tvp = memdup_array_user((void __user *)tvps->props,
++ tvps->num, sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 305bb21d843c8b..b43695bc51e754 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -104,6 +104,8 @@ static int dvb_device_open(struct inode *inode, struct file *file)
+ err = file->f_op->open(inode, file);
+ up_read(&minor_rwsem);
+ mutex_unlock(&dvbdev_mutex);
++ if (err)
++ dvb_device_put(dvbdev);
+ return err;
+ }
+ fail:
+@@ -488,6 +490,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+ if (!dvbdevfops) {
+ kfree(dvbdev);
++ *pdvbdev = NULL;
+ mutex_unlock(&dvbdev_register_lock);
+ return -ENOMEM;
+ }
+@@ -496,6 +499,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ if (!new_node) {
+ kfree(dvbdevfops);
+ kfree(dvbdev);
++ *pdvbdev = NULL;
+ mutex_unlock(&dvbdev_register_lock);
+ return -ENOMEM;
+ }
+@@ -529,6 +533,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ }
+ list_del(&dvbdev->list_head);
+ kfree(dvbdev);
++ *pdvbdev = NULL;
+ up_write(&minor_rwsem);
+ mutex_unlock(&dvbdev_register_lock);
+ return -EINVAL;
+@@ -551,6 +556,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ dvb_media_device_free(dvbdev);
+ list_del(&dvbdev->list_head);
+ kfree(dvbdev);
++ *pdvbdev = NULL;
+ mutex_unlock(&dvbdev_register_lock);
+ return ret;
+ }
+@@ -569,6 +575,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ dvb_media_device_free(dvbdev);
+ list_del(&dvbdev->list_head);
+ kfree(dvbdev);
++ *pdvbdev = NULL;
+ mutex_unlock(&dvbdev_register_lock);
+ return PTR_ERR(clsdev);
+ }
+@@ -949,7 +956,7 @@ int dvb_usercopy(struct file *file,
+ int (*func)(struct file *file,
+ unsigned int cmd, void *arg))
+ {
+- char sbuf[128];
++ char sbuf[128] = {};
+ void *mbuf = NULL;
+ void *parg = NULL;
+ int err = -EINVAL;
+diff --git a/drivers/media/dvb-frontends/as102_fe_types.h b/drivers/media/dvb-frontends/as102_fe_types.h
+index 297f9520ebf9d8..8a4e392c889653 100644
+--- a/drivers/media/dvb-frontends/as102_fe_types.h
++++ b/drivers/media/dvb-frontends/as102_fe_types.h
+@@ -174,6 +174,6 @@ struct as10x_register_addr {
+ uint32_t addr;
+ /* register mode access */
+ uint8_t mode;
+-};
++} __packed;
+
+ #endif
+diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
+index 2638875924153a..231b45632ad5a3 100644
+--- a/drivers/media/dvb-frontends/lgdt3306a.c
++++ b/drivers/media/dvb-frontends/lgdt3306a.c
+@@ -2176,6 +2176,11 @@ static int lgdt3306a_probe(struct i2c_client *client)
+ struct dvb_frontend *fe;
+ int ret;
+
++ if (!client->dev.platform_data) {
++ dev_err(&client->dev, "platform data is mandatory\n");
++ return -EINVAL;
++ }
++
+ config = kmemdup(client->dev.platform_data,
+ sizeof(struct lgdt3306a_config), GFP_KERNEL);
+ if (config == NULL) {
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index cf037b61b226bc..affaa36d314779 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -1894,7 +1894,7 @@ static int m88ds3103_probe(struct i2c_client *client)
+ /* get frontend address */
+ ret = regmap_read(dev->regmap, 0x29, &utmp);
+ if (ret)
+- goto err_kfree;
++ goto err_del_adapters;
+ dev->dt_addr = ((utmp & 0x80) == 0) ? 0x42 >> 1 : 0x40 >> 1;
+ dev_dbg(&client->dev, "dt addr is 0x%02x\n", dev->dt_addr);
+
+@@ -1902,11 +1902,14 @@ static int m88ds3103_probe(struct i2c_client *client)
+ dev->dt_addr);
+ if (IS_ERR(dev->dt_client)) {
+ ret = PTR_ERR(dev->dt_client);
+- goto err_kfree;
++ goto err_del_adapters;
+ }
+ }
+
+ return 0;
++
++err_del_adapters:
++ i2c_mux_del_adapters(dev->muxc);
+ err_kfree:
+ kfree(dev);
+ err:
+diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
+index 4ebbcf05cc09ef..91e9c378397c81 100644
+--- a/drivers/media/dvb-frontends/mxl5xx.c
++++ b/drivers/media/dvb-frontends/mxl5xx.c
+@@ -1381,57 +1381,57 @@ static int config_ts(struct mxl *state, enum MXL_HYDRA_DEMOD_ID_E demod_id,
+ u32 nco_count_min = 0;
+ u32 clk_type = 0;
+
+- struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700010, 8, 1}, {0x90700010, 9, 1},
+ {0x90700010, 10, 1}, {0x90700010, 11, 1},
+ {0x90700010, 12, 1}, {0x90700010, 13, 1},
+ {0x90700010, 14, 1}, {0x90700010, 15, 1} };
+- struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700010, 16, 1}, {0x90700010, 17, 1},
+ {0x90700010, 18, 1}, {0x90700010, 19, 1},
+ {0x90700010, 20, 1}, {0x90700010, 21, 1},
+ {0x90700010, 22, 1}, {0x90700010, 23, 1} };
+- struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700014, 0, 1}, {0x90700014, 1, 1},
+ {0x90700014, 2, 1}, {0x90700014, 3, 1},
+ {0x90700014, 4, 1}, {0x90700014, 5, 1},
+ {0x90700014, 6, 1}, {0x90700014, 7, 1} };
+- struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700018, 0, 3}, {0x90700018, 4, 3},
+ {0x90700018, 8, 3}, {0x90700018, 12, 3},
+ {0x90700018, 16, 3}, {0x90700018, 20, 3},
+ {0x90700018, 24, 3}, {0x90700018, 28, 3} };
+- struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
+ {0x9070000C, 16, 1}, {0x9070000C, 17, 1},
+ {0x9070000C, 18, 1}, {0x9070000C, 19, 1},
+ {0x9070000C, 20, 1}, {0x9070000C, 21, 1},
+ {0x9070000C, 22, 1}, {0x9070000C, 23, 1} };
+- struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700010, 0, 1}, {0x90700010, 1, 1},
+ {0x90700010, 2, 1}, {0x90700010, 3, 1},
+ {0x90700010, 4, 1}, {0x90700010, 5, 1},
+ {0x90700010, 6, 1}, {0x90700010, 7, 1} };
+- struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
+ {0x9070000C, 0, 1}, {0x9070000C, 1, 1},
+ {0x9070000C, 2, 1}, {0x9070000C, 3, 1},
+ {0x9070000C, 4, 1}, {0x9070000C, 5, 1},
+ {0x9070000C, 6, 1}, {0x9070000C, 7, 1} };
+- struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
+ {0x9070000C, 24, 1}, {0x9070000C, 25, 1},
+ {0x9070000C, 26, 1}, {0x9070000C, 27, 1},
+ {0x9070000C, 28, 1}, {0x9070000C, 29, 1},
+ {0x9070000C, 30, 1}, {0x9070000C, 31, 1} };
+- struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700014, 8, 1}, {0x90700014, 9, 1},
+ {0x90700014, 10, 1}, {0x90700014, 11, 1},
+ {0x90700014, 12, 1}, {0x90700014, 13, 1},
+ {0x90700014, 14, 1}, {0x90700014, 15, 1} };
+- struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
+ {0x907001D4, 0, 1}, {0x907001D4, 1, 1},
+ {0x907001D4, 2, 1}, {0x907001D4, 3, 1},
+ {0x907001D4, 4, 1}, {0x907001D4, 5, 1},
+ {0x907001D4, 6, 1}, {0x907001D4, 7, 1} };
+- struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
++ static const struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700044, 16, 80}, {0x90700044, 16, 81},
+ {0x90700044, 16, 82}, {0x90700044, 16, 83},
+ {0x90700044, 16, 84}, {0x90700044, 16, 85},
+diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
+index 35c969fd2cb5e6..4e59734ec53e2d 100644
+--- a/drivers/media/dvb-frontends/rtl2830.c
++++ b/drivers/media/dvb-frontends/rtl2830.c
+@@ -609,7 +609,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on
+ index, pid, onoff);
+
+ /* skip invalid PIDs (0x2000) */
+- if (pid > 0x1fff || index > 32)
++ if (pid > 0x1fff || index >= 32)
+ return 0;
+
+ if (onoff)
+diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
+index 601cf45c39358c..e6a7877a985413 100644
+--- a/drivers/media/dvb-frontends/rtl2832.c
++++ b/drivers/media/dvb-frontends/rtl2832.c
+@@ -983,7 +983,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid,
+ index, pid, onoff, dev->slave_ts);
+
+ /* skip invalid PIDs (0x2000) */
+- if (pid > 0x1fff || index > 32)
++ if (pid > 0x1fff || index >= 32)
+ return 0;
+
+ if (onoff)
+diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
+index 48326434488c4f..72540ef4e5f889 100644
+--- a/drivers/media/dvb-frontends/stv0367.c
++++ b/drivers/media/dvb-frontends/stv0367.c
+@@ -118,50 +118,32 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S
+ }
+ };
+
+-static
+-int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
++static noinline_for_stack
++int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
+ {
+- u8 buf[MAX_XFER_SIZE];
++ u8 buf[3] = { MSB(reg), LSB(reg), data };
+ struct i2c_msg msg = {
+ .addr = state->config->demod_address,
+ .flags = 0,
+ .buf = buf,
+- .len = len + 2
++ .len = 3,
+ };
+ int ret;
+
+- if (2 + len > sizeof(buf)) {
+- printk(KERN_WARNING
+- "%s: i2c wr reg=%04x: len=%d is too big!\n",
+- KBUILD_MODNAME, reg, len);
+- return -EINVAL;
+- }
+-
+-
+- buf[0] = MSB(reg);
+- buf[1] = LSB(reg);
+- memcpy(buf + 2, data, len);
+-
+ if (i2cdebug)
+ printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
+- state->config->demod_address, reg, buf[2]);
++ state->config->demod_address, reg, data);
+
+ ret = i2c_transfer(state->i2c, &msg, 1);
+ if (ret != 1)
+ printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n",
+- __func__, state->config->demod_address, reg, buf[2]);
++ __func__, state->config->demod_address, reg, data);
+
+ return (ret != 1) ? -EREMOTEIO : 0;
+ }
+
+-static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
+-{
+- u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+-
+- return stv0367_writeregs(state, reg, &tmp, 1);
+-}
+-
+-static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
++static noinline_for_stack
++u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
+ {
+ u8 b0[] = { 0, 0 };
+ u8 b1[] = { 0 };
+diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
+index 5d5e4e9e4422e6..3e725cdcc66bdb 100644
+--- a/drivers/media/dvb-frontends/tda10048.c
++++ b/drivers/media/dvb-frontends/tda10048.c
+@@ -410,6 +410,7 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+ struct tda10048_config *config = &state->config;
+ int i;
+ u32 if_freq_khz;
++ u64 sample_freq;
+
+ dprintk(1, "%s(bw = %d)\n", __func__, bw);
+
+@@ -451,9 +452,11 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+ dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
+
+ /* Calculate the sample frequency */
+- state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45);
+- state->sample_freq /= (state->pll_nfactor + 1);
+- state->sample_freq /= (state->pll_pfactor + 4);
++ sample_freq = state->xtal_hz;
++ sample_freq *= state->pll_mfactor + 45;
++ do_div(sample_freq, state->pll_nfactor + 1);
++ do_div(sample_freq, state->pll_pfactor + 4);
++ state->sample_freq = sample_freq;
+ dprintk(1, "- sample_freq = %d\n", state->sample_freq);
+
+ /* Update the I/F */
+diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
+index a3483448794338..fd928787207edf 100644
+--- a/drivers/media/dvb-frontends/tda18271c2dd.c
++++ b/drivers/media/dvb-frontends/tda18271c2dd.c
+@@ -328,7 +328,7 @@ static int CalcMainPLL(struct tda_state *state, u32 freq)
+
+ OscFreq = (u64) freq * (u64) Div;
+ OscFreq *= (u64) 16384;
+- do_div(OscFreq, (u64)16000000);
++ do_div(OscFreq, 16000000);
+ MainDiv = OscFreq;
+
+ state->m_Regs[MPD] = PostDiv & 0x77;
+@@ -352,7 +352,7 @@ static int CalcCalPLL(struct tda_state *state, u32 freq)
+ OscFreq = (u64)freq * (u64)Div;
+ /* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
+ OscFreq *= (u64)16384;
+- do_div(OscFreq, (u64)16000000);
++ do_div(OscFreq, 16000000);
+ CalDiv = OscFreq;
+
+ state->m_Regs[CPD] = PostDiv;
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 74ff833ff48cab..53b443be5a59ee 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -99,6 +99,7 @@ config VIDEO_IMX214
+
+ config VIDEO_IMX219
+ tristate "Sony IMX219 sensor support"
++ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX219 camera.
+diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
+index a4e39871e8f75c..375284a59fd1ce 100644
+--- a/drivers/media/i2c/ar0521.c
++++ b/drivers/media/i2c/ar0521.c
+@@ -847,7 +847,8 @@ static int ar0521_power_off(struct device *dev)
+ clk_disable_unprepare(sensor->extclk);
+
+ if (sensor->reset_gpio)
+- gpiod_set_value(sensor->reset_gpio, 1); /* assert RESET signal */
++ /* assert RESET signal */
++ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+
+ for (i = ARRAY_SIZE(ar0521_supply_names) - 1; i >= 0; i--) {
+ if (sensor->supplies[i])
+@@ -881,7 +882,7 @@ static int ar0521_power_on(struct device *dev)
+
+ if (sensor->reset_gpio)
+ /* deassert RESET signal */
+- gpiod_set_value(sensor->reset_gpio, 0);
++ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ usleep_range(4500, 5000); /* min 45000 clocks */
+
+ for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) {
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index 49e0d9a0953028..6f8fbd82e21c8f 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3097,7 +3097,7 @@ static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+ try_fmt->code = sensor->internal_csi_format->code;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+- if (ssd != sensor->pixel_array)
++ if (ssd == sensor->pixel_array)
+ continue;
+
+ try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
+diff --git a/drivers/media/i2c/ccs/ccs-quirk.h b/drivers/media/i2c/ccs/ccs-quirk.h
+index 5838fcda92fd49..0b1a64958d714e 100644
+--- a/drivers/media/i2c/ccs/ccs-quirk.h
++++ b/drivers/media/i2c/ccs/ccs-quirk.h
+@@ -32,12 +32,10 @@ struct ccs_sensor;
+ * @reg: Pointer to the register to access
+ * @value: Register value, set by the caller on write, or
+ * by the quirk on read
+- *
+- * @flags: Quirk flags
+- *
+ * @return: 0 on success, -ENOIOCTLCMD if no register
+ * access may be done by the caller (default read
+ * value is zero), else negative error code on error
++ * @flags: Quirk flags
+ */
+ struct ccs_quirk {
+ int (*limits)(struct ccs_sensor *sensor);
+diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
+index d6fc843f9368e5..0d6f0f8506f76f 100644
+--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
++++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
+@@ -1460,7 +1460,7 @@ static int et8ek8_probe(struct i2c_client *client)
+ return ret;
+ }
+
+-static void __exit et8ek8_remove(struct i2c_client *client)
++static void et8ek8_remove(struct i2c_client *client)
+ {
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+@@ -1502,7 +1502,7 @@ static struct i2c_driver et8ek8_i2c_driver = {
+ .of_match_table = et8ek8_of_table,
+ },
+ .probe = et8ek8_probe,
+- .remove = __exit_p(et8ek8_remove),
++ .remove = et8ek8_remove,
+ .id_table = et8ek8_id_table,
+ };
+
+diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
+index ec53abe2e84e53..a9a8cd148f4fcf 100644
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -21,40 +21,56 @@
+ #include <linux/module.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/regulator/consumer.h>
++
++#include <media/v4l2-cci.h>
+ #include <media/v4l2-ctrls.h>
+ #include <media/v4l2-device.h>
+ #include <media/v4l2-event.h>
+ #include <media/v4l2-fwnode.h>
+ #include <media/v4l2-mediabus.h>
+-#include <asm/unaligned.h>
+
+-#define IMX219_REG_VALUE_08BIT 1
+-#define IMX219_REG_VALUE_16BIT 2
++/* Chip ID */
++#define IMX219_REG_CHIP_ID CCI_REG16(0x0000)
++#define IMX219_CHIP_ID 0x0219
+
+-#define IMX219_REG_MODE_SELECT 0x0100
++#define IMX219_REG_MODE_SELECT CCI_REG8(0x0100)
+ #define IMX219_MODE_STANDBY 0x00
+ #define IMX219_MODE_STREAMING 0x01
+
+-/* Chip ID */
+-#define IMX219_REG_CHIP_ID 0x0000
+-#define IMX219_CHIP_ID 0x0219
++#define IMX219_REG_CSI_LANE_MODE CCI_REG8(0x0114)
++#define IMX219_CSI_2_LANE_MODE 0x01
++#define IMX219_CSI_4_LANE_MODE 0x03
+
+-/* External clock frequency is 24.0M */
+-#define IMX219_XCLK_FREQ 24000000
++#define IMX219_REG_DPHY_CTRL CCI_REG8(0x0128)
++#define IMX219_DPHY_CTRL_TIMING_AUTO 0
++#define IMX219_DPHY_CTRL_TIMING_MANUAL 1
+
+-/* Pixel rate is fixed for all the modes */
+-#define IMX219_PIXEL_RATE 182400000
+-#define IMX219_PIXEL_RATE_4LANE 280800000
++#define IMX219_REG_EXCK_FREQ CCI_REG16(0x012a)
++#define IMX219_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */
+
+-#define IMX219_DEFAULT_LINK_FREQ 456000000
+-#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
++/* Analog gain control */
++#define IMX219_REG_ANALOG_GAIN CCI_REG8(0x0157)
++#define IMX219_ANA_GAIN_MIN 0
++#define IMX219_ANA_GAIN_MAX 232
++#define IMX219_ANA_GAIN_STEP 1
++#define IMX219_ANA_GAIN_DEFAULT 0x0
+
+-#define IMX219_REG_CSI_LANE_MODE 0x0114
+-#define IMX219_CSI_2_LANE_MODE 0x01
+-#define IMX219_CSI_4_LANE_MODE 0x03
++/* Digital gain control */
++#define IMX219_REG_DIGITAL_GAIN CCI_REG16(0x0158)
++#define IMX219_DGTL_GAIN_MIN 0x0100
++#define IMX219_DGTL_GAIN_MAX 0x0fff
++#define IMX219_DGTL_GAIN_DEFAULT 0x0100
++#define IMX219_DGTL_GAIN_STEP 1
++
++/* Exposure control */
++#define IMX219_REG_EXPOSURE CCI_REG16(0x015a)
++#define IMX219_EXPOSURE_MIN 4
++#define IMX219_EXPOSURE_STEP 1
++#define IMX219_EXPOSURE_DEFAULT 0x640
++#define IMX219_EXPOSURE_MAX 65535
+
+ /* V_TIMING internal */
+-#define IMX219_REG_VTS 0x0160
++#define IMX219_REG_VTS CCI_REG16(0x0160)
+ #define IMX219_VTS_15FPS 0x0dc6
+ #define IMX219_VTS_30FPS_1080P 0x06e3
+ #define IMX219_VTS_30FPS_BINNED 0x06e3
+@@ -72,37 +88,37 @@
+ /* HBLANK control - read only */
+ #define IMX219_PPL_DEFAULT 3448
+
+-/* Exposure control */
+-#define IMX219_REG_EXPOSURE 0x015a
+-#define IMX219_EXPOSURE_MIN 4
+-#define IMX219_EXPOSURE_STEP 1
+-#define IMX219_EXPOSURE_DEFAULT 0x640
+-#define IMX219_EXPOSURE_MAX 65535
+-
+-/* Analog gain control */
+-#define IMX219_REG_ANALOG_GAIN 0x0157
+-#define IMX219_ANA_GAIN_MIN 0
+-#define IMX219_ANA_GAIN_MAX 232
+-#define IMX219_ANA_GAIN_STEP 1
+-#define IMX219_ANA_GAIN_DEFAULT 0x0
+-
+-/* Digital gain control */
+-#define IMX219_REG_DIGITAL_GAIN 0x0158
+-#define IMX219_DGTL_GAIN_MIN 0x0100
+-#define IMX219_DGTL_GAIN_MAX 0x0fff
+-#define IMX219_DGTL_GAIN_DEFAULT 0x0100
+-#define IMX219_DGTL_GAIN_STEP 1
+-
+-#define IMX219_REG_ORIENTATION 0x0172
++#define IMX219_REG_LINE_LENGTH_A CCI_REG16(0x0162)
++#define IMX219_REG_X_ADD_STA_A CCI_REG16(0x0164)
++#define IMX219_REG_X_ADD_END_A CCI_REG16(0x0166)
++#define IMX219_REG_Y_ADD_STA_A CCI_REG16(0x0168)
++#define IMX219_REG_Y_ADD_END_A CCI_REG16(0x016a)
++#define IMX219_REG_X_OUTPUT_SIZE CCI_REG16(0x016c)
++#define IMX219_REG_Y_OUTPUT_SIZE CCI_REG16(0x016e)
++#define IMX219_REG_X_ODD_INC_A CCI_REG8(0x0170)
++#define IMX219_REG_Y_ODD_INC_A CCI_REG8(0x0171)
++#define IMX219_REG_ORIENTATION CCI_REG8(0x0172)
+
+ /* Binning Mode */
+-#define IMX219_REG_BINNING_MODE 0x0174
++#define IMX219_REG_BINNING_MODE CCI_REG16(0x0174)
+ #define IMX219_BINNING_NONE 0x0000
+ #define IMX219_BINNING_2X2 0x0101
+ #define IMX219_BINNING_2X2_ANALOG 0x0303
+
++#define IMX219_REG_CSI_DATA_FORMAT_A CCI_REG16(0x018c)
++
++/* PLL Settings */
++#define IMX219_REG_VTPXCK_DIV CCI_REG8(0x0301)
++#define IMX219_REG_VTSYCK_DIV CCI_REG8(0x0303)
++#define IMX219_REG_PREPLLCK_VT_DIV CCI_REG8(0x0304)
++#define IMX219_REG_PREPLLCK_OP_DIV CCI_REG8(0x0305)
++#define IMX219_REG_PLL_VT_MPY CCI_REG16(0x0306)
++#define IMX219_REG_OPPXCK_DIV CCI_REG8(0x0309)
++#define IMX219_REG_OPSYCK_DIV CCI_REG8(0x030b)
++#define IMX219_REG_PLL_OP_MPY CCI_REG16(0x030c)
++
+ /* Test Pattern Control */
+-#define IMX219_REG_TEST_PATTERN 0x0600
++#define IMX219_REG_TEST_PATTERN CCI_REG16(0x0600)
+ #define IMX219_TEST_PATTERN_DISABLE 0
+ #define IMX219_TEST_PATTERN_SOLID_COLOR 1
+ #define IMX219_TEST_PATTERN_COLOR_BARS 2
+@@ -110,10 +126,10 @@
+ #define IMX219_TEST_PATTERN_PN9 4
+
+ /* Test pattern colour components */
+-#define IMX219_REG_TESTP_RED 0x0602
+-#define IMX219_REG_TESTP_GREENR 0x0604
+-#define IMX219_REG_TESTP_BLUE 0x0606
+-#define IMX219_REG_TESTP_GREENB 0x0608
++#define IMX219_REG_TESTP_RED CCI_REG16(0x0602)
++#define IMX219_REG_TESTP_GREENR CCI_REG16(0x0604)
++#define IMX219_REG_TESTP_BLUE CCI_REG16(0x0606)
++#define IMX219_REG_TESTP_GREENB CCI_REG16(0x0608)
+ #define IMX219_TESTP_COLOUR_MIN 0
+ #define IMX219_TESTP_COLOUR_MAX 0x03ff
+ #define IMX219_TESTP_COLOUR_STEP 1
+@@ -122,6 +138,19 @@
+ #define IMX219_TESTP_BLUE_DEFAULT 0
+ #define IMX219_TESTP_GREENB_DEFAULT 0
+
++#define IMX219_REG_TP_WINDOW_WIDTH CCI_REG16(0x0624)
++#define IMX219_REG_TP_WINDOW_HEIGHT CCI_REG16(0x0626)
++
++/* External clock frequency is 24.0M */
++#define IMX219_XCLK_FREQ 24000000
++
++/* Pixel rate is fixed for all the modes */
++#define IMX219_PIXEL_RATE 182400000
++#define IMX219_PIXEL_RATE_4LANE 280800000
++
++#define IMX219_DEFAULT_LINK_FREQ 456000000
++#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
++
+ /* IMX219 native and active pixel array size. */
+ #define IMX219_NATIVE_WIDTH 3296U
+ #define IMX219_NATIVE_HEIGHT 2480U
+@@ -130,14 +159,9 @@
+ #define IMX219_PIXEL_ARRAY_WIDTH 3280U
+ #define IMX219_PIXEL_ARRAY_HEIGHT 2464U
+
+-struct imx219_reg {
+- u16 address;
+- u8 val;
+-};
+-
+ struct imx219_reg_list {
+ unsigned int num_of_regs;
+- const struct imx219_reg *regs;
++ const struct cci_reg_sequence *regs;
+ };
+
+ /* Mode : resolution and related config&values */
+@@ -160,53 +184,48 @@ struct imx219_mode {
+ bool binning;
+ };
+
+-static const struct imx219_reg imx219_common_regs[] = {
+- {0x0100, 0x00}, /* Mode Select */
++static const struct cci_reg_sequence imx219_common_regs[] = {
++ { IMX219_REG_MODE_SELECT, 0x00 }, /* Mode Select */
+
+ /* To Access Addresses 3000-5fff, send the following commands */
+- {0x30eb, 0x0c},
+- {0x30eb, 0x05},
+- {0x300a, 0xff},
+- {0x300b, 0xff},
+- {0x30eb, 0x05},
+- {0x30eb, 0x09},
++ { CCI_REG8(0x30eb), 0x05 },
++ { CCI_REG8(0x30eb), 0x0c },
++ { CCI_REG8(0x300a), 0xff },
++ { CCI_REG8(0x300b), 0xff },
++ { CCI_REG8(0x30eb), 0x05 },
++ { CCI_REG8(0x30eb), 0x09 },
+
+ /* PLL Clock Table */
+- {0x0301, 0x05}, /* VTPXCK_DIV */
+- {0x0303, 0x01}, /* VTSYSCK_DIV */
+- {0x0304, 0x03}, /* PREPLLCK_VT_DIV 0x03 = AUTO set */
+- {0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */
+- {0x0306, 0x00}, /* PLL_VT_MPY */
+- {0x0307, 0x39},
+- {0x030b, 0x01}, /* OP_SYS_CLK_DIV */
+- {0x030c, 0x00}, /* PLL_OP_MPY */
+- {0x030d, 0x72},
++ { IMX219_REG_VTPXCK_DIV, 5 },
++ { IMX219_REG_VTSYCK_DIV, 1 },
++ { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
++ { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
++ { IMX219_REG_PLL_VT_MPY, 57 },
++ { IMX219_REG_OPSYCK_DIV, 1 },
++ { IMX219_REG_PLL_OP_MPY, 114 },
+
+ /* Undocumented registers */
+- {0x455e, 0x00},
+- {0x471e, 0x4b},
+- {0x4767, 0x0f},
+- {0x4750, 0x14},
+- {0x4540, 0x00},
+- {0x47b4, 0x14},
+- {0x4713, 0x30},
+- {0x478b, 0x10},
+- {0x478f, 0x10},
+- {0x4793, 0x10},
+- {0x4797, 0x0e},
+- {0x479b, 0x0e},
++ { CCI_REG8(0x455e), 0x00 },
++ { CCI_REG8(0x471e), 0x4b },
++ { CCI_REG8(0x4767), 0x0f },
++ { CCI_REG8(0x4750), 0x14 },
++ { CCI_REG8(0x4540), 0x00 },
++ { CCI_REG8(0x47b4), 0x14 },
++ { CCI_REG8(0x4713), 0x30 },
++ { CCI_REG8(0x478b), 0x10 },
++ { CCI_REG8(0x478f), 0x10 },
++ { CCI_REG8(0x4793), 0x10 },
++ { CCI_REG8(0x4797), 0x0e },
++ { CCI_REG8(0x479b), 0x0e },
+
+ /* Frame Bank Register Group "A" */
+- {0x0162, 0x0d}, /* Line_Length_A */
+- {0x0163, 0x78},
+- {0x0170, 0x01}, /* X_ODD_INC_A */
+- {0x0171, 0x01}, /* Y_ODD_INC_A */
++ { IMX219_REG_LINE_LENGTH_A, 3448 },
++ { IMX219_REG_X_ODD_INC_A, 1 },
++ { IMX219_REG_Y_ODD_INC_A, 1 },
+
+ /* Output setup registers */
+- {0x0114, 0x01}, /* CSI 2-Lane Mode */
+- {0x0128, 0x00}, /* DPHY Auto Mode */
+- {0x012a, 0x18}, /* EXCK_Freq */
+- {0x012b, 0x00},
++ { IMX219_REG_DPHY_CTRL, IMX219_DPHY_CTRL_TIMING_AUTO },
++ { IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) },
+ };
+
+ /*
+@@ -214,92 +233,58 @@ static const struct imx219_reg imx219_common_regs[] = {
+ * driver.
+ * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
+ */
+-static const struct imx219_reg mode_3280x2464_regs[] = {
+- {0x0164, 0x00},
+- {0x0165, 0x00},
+- {0x0166, 0x0c},
+- {0x0167, 0xcf},
+- {0x0168, 0x00},
+- {0x0169, 0x00},
+- {0x016a, 0x09},
+- {0x016b, 0x9f},
+- {0x016c, 0x0c},
+- {0x016d, 0xd0},
+- {0x016e, 0x09},
+- {0x016f, 0xa0},
+- {0x0624, 0x0c},
+- {0x0625, 0xd0},
+- {0x0626, 0x09},
+- {0x0627, 0xa0},
++static const struct cci_reg_sequence mode_3280x2464_regs[] = {
++ { IMX219_REG_X_ADD_STA_A, 0 },
++ { IMX219_REG_X_ADD_END_A, 3279 },
++ { IMX219_REG_Y_ADD_STA_A, 0 },
++ { IMX219_REG_Y_ADD_END_A, 2463 },
++ { IMX219_REG_X_OUTPUT_SIZE, 3280 },
++ { IMX219_REG_Y_OUTPUT_SIZE, 2464 },
++ { IMX219_REG_TP_WINDOW_WIDTH, 3280 },
++ { IMX219_REG_TP_WINDOW_HEIGHT, 2464 },
+ };
+
+-static const struct imx219_reg mode_1920_1080_regs[] = {
+- {0x0164, 0x02},
+- {0x0165, 0xa8},
+- {0x0166, 0x0a},
+- {0x0167, 0x27},
+- {0x0168, 0x02},
+- {0x0169, 0xb4},
+- {0x016a, 0x06},
+- {0x016b, 0xeb},
+- {0x016c, 0x07},
+- {0x016d, 0x80},
+- {0x016e, 0x04},
+- {0x016f, 0x38},
+- {0x0624, 0x07},
+- {0x0625, 0x80},
+- {0x0626, 0x04},
+- {0x0627, 0x38},
++static const struct cci_reg_sequence mode_1920_1080_regs[] = {
++ { IMX219_REG_X_ADD_STA_A, 680 },
++ { IMX219_REG_X_ADD_END_A, 2599 },
++ { IMX219_REG_Y_ADD_STA_A, 692 },
++ { IMX219_REG_Y_ADD_END_A, 1771 },
++ { IMX219_REG_X_OUTPUT_SIZE, 1920 },
++ { IMX219_REG_Y_OUTPUT_SIZE, 1080 },
++ { IMX219_REG_TP_WINDOW_WIDTH, 1920 },
++ { IMX219_REG_TP_WINDOW_HEIGHT, 1080 },
+ };
+
+-static const struct imx219_reg mode_1640_1232_regs[] = {
+- {0x0164, 0x00},
+- {0x0165, 0x00},
+- {0x0166, 0x0c},
+- {0x0167, 0xcf},
+- {0x0168, 0x00},
+- {0x0169, 0x00},
+- {0x016a, 0x09},
+- {0x016b, 0x9f},
+- {0x016c, 0x06},
+- {0x016d, 0x68},
+- {0x016e, 0x04},
+- {0x016f, 0xd0},
+- {0x0624, 0x06},
+- {0x0625, 0x68},
+- {0x0626, 0x04},
+- {0x0627, 0xd0},
++static const struct cci_reg_sequence mode_1640_1232_regs[] = {
++ { IMX219_REG_X_ADD_STA_A, 0 },
++ { IMX219_REG_X_ADD_END_A, 3279 },
++ { IMX219_REG_Y_ADD_STA_A, 0 },
++ { IMX219_REG_Y_ADD_END_A, 2463 },
++ { IMX219_REG_X_OUTPUT_SIZE, 1640 },
++ { IMX219_REG_Y_OUTPUT_SIZE, 1232 },
++ { IMX219_REG_TP_WINDOW_WIDTH, 1640 },
++ { IMX219_REG_TP_WINDOW_HEIGHT, 1232 },
+ };
+
+-static const struct imx219_reg mode_640_480_regs[] = {
+- {0x0164, 0x03},
+- {0x0165, 0xe8},
+- {0x0166, 0x08},
+- {0x0167, 0xe7},
+- {0x0168, 0x02},
+- {0x0169, 0xf0},
+- {0x016a, 0x06},
+- {0x016b, 0xaf},
+- {0x016c, 0x02},
+- {0x016d, 0x80},
+- {0x016e, 0x01},
+- {0x016f, 0xe0},
+- {0x0624, 0x06},
+- {0x0625, 0x68},
+- {0x0626, 0x04},
+- {0x0627, 0xd0},
++static const struct cci_reg_sequence mode_640_480_regs[] = {
++ { IMX219_REG_X_ADD_STA_A, 1000 },
++ { IMX219_REG_X_ADD_END_A, 2279 },
++ { IMX219_REG_Y_ADD_STA_A, 752 },
++ { IMX219_REG_Y_ADD_END_A, 1711 },
++ { IMX219_REG_X_OUTPUT_SIZE, 640 },
++ { IMX219_REG_Y_OUTPUT_SIZE, 480 },
++ { IMX219_REG_TP_WINDOW_WIDTH, 1640 },
++ { IMX219_REG_TP_WINDOW_HEIGHT, 1232 },
+ };
+
+-static const struct imx219_reg raw8_framefmt_regs[] = {
+- {0x018c, 0x08},
+- {0x018d, 0x08},
+- {0x0309, 0x08},
++static const struct cci_reg_sequence raw8_framefmt_regs[] = {
++ { IMX219_REG_CSI_DATA_FORMAT_A, 0x0808 },
++ { IMX219_REG_OPPXCK_DIV, 8 },
+ };
+
+-static const struct imx219_reg raw10_framefmt_regs[] = {
+- {0x018c, 0x0a},
+- {0x018d, 0x0a},
+- {0x0309, 0x0a},
++static const struct cci_reg_sequence raw10_framefmt_regs[] = {
++ { IMX219_REG_CSI_DATA_FORMAT_A, 0x0a0a },
++ { IMX219_REG_OPPXCK_DIV, 10 },
+ };
+
+ static const s64 imx219_link_freq_menu[] = {
+@@ -460,6 +445,7 @@ struct imx219 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
++ struct regmap *regmap;
+ struct clk *xclk; /* system clock to IMX219 */
+ u32 xclk_freq;
+
+@@ -491,78 +477,6 @@ static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd)
+ return container_of(_sd, struct imx219, sd);
+ }
+
+-/* Read registers up to 2 at a time */
+-static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+- struct i2c_msg msgs[2];
+- u8 addr_buf[2] = { reg >> 8, reg & 0xff };
+- u8 data_buf[4] = { 0, };
+- int ret;
+-
+- if (len > 4)
+- return -EINVAL;
+-
+- /* Write register address */
+- msgs[0].addr = client->addr;
+- msgs[0].flags = 0;
+- msgs[0].len = ARRAY_SIZE(addr_buf);
+- msgs[0].buf = addr_buf;
+-
+- /* Read data from register */
+- msgs[1].addr = client->addr;
+- msgs[1].flags = I2C_M_RD;
+- msgs[1].len = len;
+- msgs[1].buf = &data_buf[4 - len];
+-
+- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+- if (ret != ARRAY_SIZE(msgs))
+- return -EIO;
+-
+- *val = get_unaligned_be32(data_buf);
+-
+- return 0;
+-}
+-
+-/* Write registers up to 2 at a time */
+-static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+- u8 buf[6];
+-
+- if (len > 4)
+- return -EINVAL;
+-
+- put_unaligned_be16(reg, buf);
+- put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+- if (i2c_master_send(client, buf, len + 2) != len + 2)
+- return -EIO;
+-
+- return 0;
+-}
+-
+-/* Write a list of registers */
+-static int imx219_write_regs(struct imx219 *imx219,
+- const struct imx219_reg *regs, u32 len)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+- unsigned int i;
+- int ret;
+-
+- for (i = 0; i < len; i++) {
+- ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val);
+- if (ret) {
+- dev_err_ratelimited(&client->dev,
+- "Failed to write reg 0x%4.4x. error = %d\n",
+- regs[i].address, ret);
+-
+- return ret;
+- }
+- }
+-
+- return 0;
+-}
+-
+ /* Get bayer order based on flip setting. */
+ static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
+ {
+@@ -586,7 +500,7 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
+ struct imx219 *imx219 =
+ container_of(ctrl->handler, struct imx219, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+- int ret;
++ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max, exposure_def;
+@@ -610,48 +524,45 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+- ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN,
+- IMX219_REG_VALUE_08BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_ANALOG_GAIN,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_EXPOSURE:
+- ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_EXPOSURE,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+- ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_DIGITAL_GAIN,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+- ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN,
+- IMX219_REG_VALUE_16BIT,
+- imx219_test_pattern_val[ctrl->val]);
++ cci_write(imx219->regmap, IMX219_REG_TEST_PATTERN,
++ imx219_test_pattern_val[ctrl->val], &ret);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+- ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1,
+- imx219->hflip->val |
+- imx219->vflip->val << 1);
++ cci_write(imx219->regmap, IMX219_REG_ORIENTATION,
++ imx219->hflip->val | imx219->vflip->val << 1, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+- ret = imx219_write_reg(imx219, IMX219_REG_VTS,
+- IMX219_REG_VALUE_16BIT,
+- imx219->mode->height + ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_VTS,
++ imx219->mode->height + ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_RED:
+- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_TESTP_RED,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENR:
+- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_TESTP_GREENR,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_BLUE:
+- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_TESTP_BLUE,
++ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENB:
+- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB,
+- IMX219_REG_VALUE_16BIT, ctrl->val);
++ cci_write(imx219->regmap, IMX219_REG_TESTP_GREENB,
++ ctrl->val, &ret);
+ break;
+ default:
+ dev_info(&client->dev,
+@@ -802,15 +713,15 @@ static int imx219_set_framefmt(struct imx219 *imx219,
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+- return imx219_write_regs(imx219, raw8_framefmt_regs,
+- ARRAY_SIZE(raw8_framefmt_regs));
++ return cci_multi_reg_write(imx219->regmap, raw8_framefmt_regs,
++ ARRAY_SIZE(raw8_framefmt_regs), NULL);
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+- return imx219_write_regs(imx219, raw10_framefmt_regs,
+- ARRAY_SIZE(raw10_framefmt_regs));
++ return cci_multi_reg_write(imx219->regmap, raw10_framefmt_regs,
++ ARRAY_SIZE(raw10_framefmt_regs), NULL);
+ }
+
+ return -EINVAL;
+@@ -819,28 +730,24 @@ static int imx219_set_framefmt(struct imx219 *imx219,
+ static int imx219_set_binning(struct imx219 *imx219,
+ const struct v4l2_mbus_framefmt *format)
+ {
+- if (!imx219->mode->binning) {
+- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+- IMX219_REG_VALUE_16BIT,
+- IMX219_BINNING_NONE);
+- }
++ if (!imx219->mode->binning)
++ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
++ IMX219_BINNING_NONE, NULL);
+
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+- IMX219_REG_VALUE_16BIT,
+- IMX219_BINNING_2X2_ANALOG);
++ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
++ IMX219_BINNING_2X2_ANALOG, NULL);
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
+- IMX219_REG_VALUE_16BIT,
+- IMX219_BINNING_2X2);
++ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
++ IMX219_BINNING_2X2, NULL);
+ }
+
+ return -EINVAL;
+@@ -879,9 +786,9 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
+
+ static int imx219_configure_lanes(struct imx219 *imx219)
+ {
+- return imx219_write_reg(imx219, IMX219_REG_CSI_LANE_MODE,
+- IMX219_REG_VALUE_08BIT, (imx219->lanes == 2) ?
+- IMX219_CSI_2_LANE_MODE : IMX219_CSI_4_LANE_MODE);
++ return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE,
++ imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE :
++ IMX219_CSI_4_LANE_MODE, NULL);
+ };
+
+ static int imx219_start_streaming(struct imx219 *imx219,
+@@ -897,7 +804,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
+ return ret;
+
+ /* Send all registers that are common to all modes */
+- ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs));
++ ret = cci_multi_reg_write(imx219->regmap, imx219_common_regs,
++ ARRAY_SIZE(imx219_common_regs), NULL);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to send mfg header\n", __func__);
+ goto err_rpm_put;
+@@ -912,7 +820,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
+
+ /* Apply default values of current mode */
+ reg_list = &imx219->mode->reg_list;
+- ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
++ ret = cci_multi_reg_write(imx219->regmap, reg_list->regs,
++ reg_list->num_of_regs, NULL);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ goto err_rpm_put;
+@@ -939,8 +848,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
+ goto err_rpm_put;
+
+ /* set stream on register */
+- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
++ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
++ IMX219_MODE_STREAMING, NULL);
+ if (ret)
+ goto err_rpm_put;
+
+@@ -961,8 +870,8 @@ static void imx219_stop_streaming(struct imx219 *imx219)
+ int ret;
+
+ /* set stream off register */
+- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+- IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
++ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
++ IMX219_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(&client->dev, "%s failed to set stream\n", __func__);
+
+@@ -1101,10 +1010,9 @@ static int imx219_identify_module(struct imx219 *imx219)
+ {
+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ int ret;
+- u32 val;
++ u64 val;
+
+- ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID,
+- IMX219_REG_VALUE_16BIT, &val);
++ ret = cci_read(imx219->regmap, IMX219_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(&client->dev, "failed to read chip id %x\n",
+ IMX219_CHIP_ID);
+@@ -1112,7 +1020,7 @@ static int imx219_identify_module(struct imx219 *imx219)
+ }
+
+ if (val != IMX219_CHIP_ID) {
+- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
++ dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
+ IMX219_CHIP_ID, val);
+ return -EIO;
+ }
+@@ -1336,6 +1244,13 @@ static int imx219_probe(struct i2c_client *client)
+ if (imx219_check_hwcfg(dev, imx219))
+ return -EINVAL;
+
++ imx219->regmap = devm_cci_regmap_init_i2c(client, 16);
++ if (IS_ERR(imx219->regmap)) {
++ ret = PTR_ERR(imx219->regmap);
++ dev_err(dev, "failed to initialize CCI: %d\n", ret);
++ return ret;
++ }
++
+ /* Get system clock (xclk) */
+ imx219->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(imx219->xclk)) {
+@@ -1379,17 +1294,19 @@ static int imx219_probe(struct i2c_client *client)
+ * streaming is started, so upon power up switch the modes to:
+ * streaming -> standby
+ */
+- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
++ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
++ IMX219_MODE_STREAMING, NULL);
+ if (ret < 0)
+ goto error_power_off;
++
+ usleep_range(100, 110);
+
+ /* put sensor back to standby mode */
+- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+- IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
++ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
++ IMX219_MODE_STANDBY, NULL);
+ if (ret < 0)
+ goto error_power_off;
++
+ usleep_range(100, 110);
+
+ ret = imx219_init_controls(imx219);
+diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
+index 29098612813cb9..6dacd38ae947a1 100644
+--- a/drivers/media/i2c/imx290.c
++++ b/drivers/media/i2c/imx290.c
+@@ -41,18 +41,18 @@
+ #define IMX290_WINMODE_720P (1 << 4)
+ #define IMX290_WINMODE_CROP (4 << 4)
+ #define IMX290_FR_FDG_SEL CCI_REG8(0x3009)
+-#define IMX290_BLKLEVEL CCI_REG16(0x300a)
++#define IMX290_BLKLEVEL CCI_REG16_LE(0x300a)
+ #define IMX290_GAIN CCI_REG8(0x3014)
+-#define IMX290_VMAX CCI_REG24(0x3018)
++#define IMX290_VMAX CCI_REG24_LE(0x3018)
+ #define IMX290_VMAX_MAX 0x3ffff
+-#define IMX290_HMAX CCI_REG16(0x301c)
++#define IMX290_HMAX CCI_REG16_LE(0x301c)
+ #define IMX290_HMAX_MAX 0xffff
+-#define IMX290_SHS1 CCI_REG24(0x3020)
++#define IMX290_SHS1 CCI_REG24_LE(0x3020)
+ #define IMX290_WINWV_OB CCI_REG8(0x303a)
+-#define IMX290_WINPV CCI_REG16(0x303c)
+-#define IMX290_WINWV CCI_REG16(0x303e)
+-#define IMX290_WINPH CCI_REG16(0x3040)
+-#define IMX290_WINWH CCI_REG16(0x3042)
++#define IMX290_WINPV CCI_REG16_LE(0x303c)
++#define IMX290_WINWV CCI_REG16_LE(0x303e)
++#define IMX290_WINPH CCI_REG16_LE(0x3040)
++#define IMX290_WINWH CCI_REG16_LE(0x3042)
+ #define IMX290_OUT_CTRL CCI_REG8(0x3046)
+ #define IMX290_ODBIT_10BIT (0 << 0)
+ #define IMX290_ODBIT_12BIT (1 << 0)
+@@ -78,28 +78,28 @@
+ #define IMX290_ADBIT2 CCI_REG8(0x317c)
+ #define IMX290_ADBIT2_10BIT 0x12
+ #define IMX290_ADBIT2_12BIT 0x00
+-#define IMX290_CHIP_ID CCI_REG16(0x319a)
++#define IMX290_CHIP_ID CCI_REG16_LE(0x319a)
+ #define IMX290_ADBIT3 CCI_REG8(0x31ec)
+ #define IMX290_ADBIT3_10BIT 0x37
+ #define IMX290_ADBIT3_12BIT 0x0e
+ #define IMX290_REPETITION CCI_REG8(0x3405)
+ #define IMX290_PHY_LANE_NUM CCI_REG8(0x3407)
+ #define IMX290_OPB_SIZE_V CCI_REG8(0x3414)
+-#define IMX290_Y_OUT_SIZE CCI_REG16(0x3418)
+-#define IMX290_CSI_DT_FMT CCI_REG16(0x3441)
++#define IMX290_Y_OUT_SIZE CCI_REG16_LE(0x3418)
++#define IMX290_CSI_DT_FMT CCI_REG16_LE(0x3441)
+ #define IMX290_CSI_DT_FMT_RAW10 0x0a0a
+ #define IMX290_CSI_DT_FMT_RAW12 0x0c0c
+ #define IMX290_CSI_LANE_MODE CCI_REG8(0x3443)
+-#define IMX290_EXTCK_FREQ CCI_REG16(0x3444)
+-#define IMX290_TCLKPOST CCI_REG16(0x3446)
+-#define IMX290_THSZERO CCI_REG16(0x3448)
+-#define IMX290_THSPREPARE CCI_REG16(0x344a)
+-#define IMX290_TCLKTRAIL CCI_REG16(0x344c)
+-#define IMX290_THSTRAIL CCI_REG16(0x344e)
+-#define IMX290_TCLKZERO CCI_REG16(0x3450)
+-#define IMX290_TCLKPREPARE CCI_REG16(0x3452)
+-#define IMX290_TLPX CCI_REG16(0x3454)
+-#define IMX290_X_OUT_SIZE CCI_REG16(0x3472)
++#define IMX290_EXTCK_FREQ CCI_REG16_LE(0x3444)
++#define IMX290_TCLKPOST CCI_REG16_LE(0x3446)
++#define IMX290_THSZERO CCI_REG16_LE(0x3448)
++#define IMX290_THSPREPARE CCI_REG16_LE(0x344a)
++#define IMX290_TCLKTRAIL CCI_REG16_LE(0x344c)
++#define IMX290_THSTRAIL CCI_REG16_LE(0x344e)
++#define IMX290_TCLKZERO CCI_REG16_LE(0x3450)
++#define IMX290_TCLKPREPARE CCI_REG16_LE(0x3452)
++#define IMX290_TLPX CCI_REG16_LE(0x3454)
++#define IMX290_X_OUT_SIZE CCI_REG16_LE(0x3472)
+ #define IMX290_INCKSEL7 CCI_REG8(0x3480)
+
+ #define IMX290_PGCTRL_REGEN BIT(0)
+@@ -150,10 +150,10 @@
+
+ #define IMX290_PIXEL_ARRAY_WIDTH 1945
+ #define IMX290_PIXEL_ARRAY_HEIGHT 1097
+-#define IMX920_PIXEL_ARRAY_MARGIN_LEFT 12
+-#define IMX920_PIXEL_ARRAY_MARGIN_RIGHT 13
+-#define IMX920_PIXEL_ARRAY_MARGIN_TOP 8
+-#define IMX920_PIXEL_ARRAY_MARGIN_BOTTOM 9
++#define IMX290_PIXEL_ARRAY_MARGIN_LEFT 12
++#define IMX290_PIXEL_ARRAY_MARGIN_RIGHT 13
++#define IMX290_PIXEL_ARRAY_MARGIN_TOP 8
++#define IMX290_PIXEL_ARRAY_MARGIN_BOTTOM 9
+ #define IMX290_PIXEL_ARRAY_RECORDING_WIDTH 1920
+ #define IMX290_PIXEL_ARRAY_RECORDING_HEIGHT 1080
+
+@@ -1161,10 +1161,10 @@ static int imx290_get_selection(struct v4l2_subdev *sd,
+ * The sensor moves the readout by 1 pixel based on flips to
+ * keep the Bayer order the same.
+ */
+- sel->r.top = IMX920_PIXEL_ARRAY_MARGIN_TOP
++ sel->r.top = IMX290_PIXEL_ARRAY_MARGIN_TOP
+ + (IMX290_PIXEL_ARRAY_RECORDING_HEIGHT - format->height) / 2
+ + imx290->vflip->val;
+- sel->r.left = IMX920_PIXEL_ARRAY_MARGIN_LEFT
++ sel->r.left = IMX290_PIXEL_ARRAY_MARGIN_LEFT
+ + (IMX290_PIXEL_ARRAY_RECORDING_WIDTH - format->width) / 2
+ + imx290->hflip->val;
+ sel->r.width = format->width;
+@@ -1183,8 +1183,8 @@ static int imx290_get_selection(struct v4l2_subdev *sd,
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+- sel->r.top = IMX920_PIXEL_ARRAY_MARGIN_TOP;
+- sel->r.left = IMX920_PIXEL_ARRAY_MARGIN_LEFT;
++ sel->r.top = IMX290_PIXEL_ARRAY_MARGIN_TOP;
++ sel->r.left = IMX290_PIXEL_ARRAY_MARGIN_LEFT;
+ sel->r.width = IMX290_PIXEL_ARRAY_RECORDING_WIDTH;
+ sel->r.height = IMX290_PIXEL_ARRAY_RECORDING_HEIGHT;
+
+diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
+index 482a0b7f040a54..cb3f4fc66a1740 100644
+--- a/drivers/media/i2c/imx335.c
++++ b/drivers/media/i2c/imx335.c
+@@ -75,6 +75,12 @@ struct imx335_reg_list {
+ const struct imx335_reg *regs;
+ };
+
++static const char * const imx335_supply_name[] = {
++ "avdd", /* Analog (2.9V) supply */
++ "ovdd", /* Digital I/O (1.8V) supply */
++ "dvdd", /* Digital Core (1.2V) supply */
++};
++
+ /**
+ * struct imx335_mode - imx335 sensor mode structure
+ * @width: Frame width
+@@ -108,6 +114,7 @@ struct imx335_mode {
+ * @sd: V4L2 sub-device
+ * @pad: Media pad. Only one pad supported
+ * @reset_gpio: Sensor reset gpio
++ * @supplies: Regulator supplies to handle power control
+ * @inclk: Sensor input clock
+ * @ctrl_handler: V4L2 control handler
+ * @link_freq_ctrl: Pointer to link frequency control
+@@ -127,6 +134,8 @@ struct imx335 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct gpio_desc *reset_gpio;
++ struct regulator_bulk_data supplies[ARRAY_SIZE(imx335_supply_name)];
++
+ struct clk *inclk;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *link_freq_ctrl;
+@@ -783,13 +792,24 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
+
+ /* Request optional reset pin */
+ imx335->reset_gpio = devm_gpiod_get_optional(imx335->dev, "reset",
+- GPIOD_OUT_LOW);
++ GPIOD_OUT_HIGH);
+ if (IS_ERR(imx335->reset_gpio)) {
+ dev_err(imx335->dev, "failed to get reset gpio %ld",
+ PTR_ERR(imx335->reset_gpio));
+ return PTR_ERR(imx335->reset_gpio);
+ }
+
++ for (i = 0; i < ARRAY_SIZE(imx335_supply_name); i++)
++ imx335->supplies[i].supply = imx335_supply_name[i];
++
++ ret = devm_regulator_bulk_get(imx335->dev,
++ ARRAY_SIZE(imx335_supply_name),
++ imx335->supplies);
++ if (ret) {
++ dev_err(imx335->dev, "Failed to get regulators\n");
++ return ret;
++ }
++
+ /* Get sensor input clock */
+ imx335->inclk = devm_clk_get(imx335->dev, NULL);
+ if (IS_ERR(imx335->inclk)) {
+@@ -868,7 +888,17 @@ static int imx335_power_on(struct device *dev)
+ struct imx335 *imx335 = to_imx335(sd);
+ int ret;
+
+- gpiod_set_value_cansleep(imx335->reset_gpio, 1);
++ ret = regulator_bulk_enable(ARRAY_SIZE(imx335_supply_name),
++ imx335->supplies);
++ if (ret) {
++ dev_err(dev, "%s: failed to enable regulators\n",
++ __func__);
++ return ret;
++ }
++
++ usleep_range(500, 550); /* Tlow */
++
++ gpiod_set_value_cansleep(imx335->reset_gpio, 0);
+
+ ret = clk_prepare_enable(imx335->inclk);
+ if (ret) {
+@@ -876,12 +906,13 @@ static int imx335_power_on(struct device *dev)
+ goto error_reset;
+ }
+
+- usleep_range(20, 22);
++ usleep_range(20, 22); /* T4 */
+
+ return 0;
+
+ error_reset:
+- gpiod_set_value_cansleep(imx335->reset_gpio, 0);
++ gpiod_set_value_cansleep(imx335->reset_gpio, 1);
++ regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies);
+
+ return ret;
+ }
+@@ -897,9 +928,9 @@ static int imx335_power_off(struct device *dev)
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx335 *imx335 = to_imx335(sd);
+
+- gpiod_set_value_cansleep(imx335->reset_gpio, 0);
+-
++ gpiod_set_value_cansleep(imx335->reset_gpio, 1);
+ clk_disable_unprepare(imx335->inclk);
++ regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies);
+
+ return 0;
+ }
+@@ -971,8 +1002,8 @@ static int imx335_init_controls(struct imx335 *imx335)
+ imx335->hblank_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
+ &imx335_ctrl_ops,
+ V4L2_CID_HBLANK,
+- IMX335_REG_MIN,
+- IMX335_REG_MAX,
++ mode->hblank,
++ mode->hblank,
+ 1, mode->hblank);
+ if (imx335->hblank_ctrl)
+ imx335->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
+index 9c79ae8dc84284..059a41b7eefc49 100644
+--- a/drivers/media/i2c/imx355.c
++++ b/drivers/media/i2c/imx355.c
+@@ -1788,10 +1788,6 @@ static int imx355_probe(struct i2c_client *client)
+ goto error_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&imx355->sd);
+- if (ret < 0)
+- goto error_media_entity;
+-
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+@@ -1800,9 +1796,15 @@ static int imx355_probe(struct i2c_client *client)
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&imx355->sd);
++ if (ret < 0)
++ goto error_media_entity_runtime_pm;
++
+ return 0;
+
+-error_media_entity:
++error_media_entity_runtime_pm:
++ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&imx355->sd.entity);
+
+ error_handler_free:
+diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
+index c7e862ae4040f5..8597f98a8dcf80 100644
+--- a/drivers/media/i2c/imx412.c
++++ b/drivers/media/i2c/imx412.c
+@@ -544,14 +544,13 @@ static int imx412_update_controls(struct imx412 *imx412,
+ */
+ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain)
+ {
+- u32 lpfr, shutter;
++ u32 lpfr;
+ int ret;
+
+ lpfr = imx412->vblank + imx412->cur_mode->height;
+- shutter = lpfr - exposure;
+
+- dev_dbg(imx412->dev, "Set exp %u, analog gain %u, shutter %u, lpfr %u",
+- exposure, gain, shutter, lpfr);
++ dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u",
++ exposure, gain, lpfr);
+
+ ret = imx412_write_reg(imx412, IMX412_REG_HOLD, 1, 1);
+ if (ret)
+@@ -561,7 +560,7 @@ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain)
+ if (ret)
+ goto error_release_group_hold;
+
+- ret = imx412_write_reg(imx412, IMX412_REG_EXPOSURE_CIT, 2, shutter);
++ ret = imx412_write_reg(imx412, IMX412_REG_EXPOSURE_CIT, 2, exposure);
+ if (ret)
+ goto error_release_group_hold;
+
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index be84ff1e2b1705..fc1cf196ef0151 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -1449,7 +1449,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+
+ i2c_mux_mask |= BIT(id);
+ }
+- of_node_put(node);
+ of_node_put(i2c_mux);
+
+ /* Parse the endpoints */
+@@ -1513,7 +1512,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
+ priv->source_mask |= BIT(ep.port);
+ priv->nsources++;
+ }
+- of_node_put(node);
+
+ of_property_read_u32(dev->of_node, "maxim,bus-width", &priv->bus_width);
+ switch (priv->bus_width) {
+diff --git a/drivers/media/i2c/ov01a10.c b/drivers/media/i2c/ov01a10.c
+index 2b9e1b3a3bf4fc..9afe9bf50334a9 100644
+--- a/drivers/media/i2c/ov01a10.c
++++ b/drivers/media/i2c/ov01a10.c
+@@ -907,6 +907,7 @@ static void ov01a10_remove(struct i2c_client *client)
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ }
+
+ static int ov01a10_probe(struct i2c_client *client)
+@@ -953,17 +954,26 @@ static int ov01a10_probe(struct i2c_client *client)
+ goto err_media_entity_cleanup;
+ }
+
++ /*
++ * Device is already turned on by i2c-core with ACPI domain PM.
++ * Enable runtime PM and turn off the device.
++ */
++ pm_runtime_set_active(&client->dev);
++ pm_runtime_enable(dev);
++ pm_runtime_idle(dev);
++
+ ret = v4l2_async_register_subdev_sensor(&ov01a10->sd);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev: %d\n", ret);
+- goto err_media_entity_cleanup;
++ goto err_pm_disable;
+ }
+
+- pm_runtime_enable(dev);
+- pm_runtime_idle(dev);
+-
+ return 0;
+
++err_pm_disable:
++ pm_runtime_disable(dev);
++ pm_runtime_set_suspended(&client->dev);
++
+ err_media_entity_cleanup:
+ media_entity_cleanup(&ov01a10->sd.entity);
+
+diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
+index dbc642c5995b62..124eaa4f9e2ca8 100644
+--- a/drivers/media/i2c/ov13b10.c
++++ b/drivers/media/i2c/ov13b10.c
+@@ -1501,7 +1501,7 @@ static int ov13b10_probe(struct i2c_client *client)
+
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+- ov13b10_power_on(&client->dev);
++ ret = ov13b10_power_on(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to power on\n");
+ return ret;
+@@ -1536,24 +1536,27 @@ static int ov13b10_probe(struct i2c_client *client)
+ goto error_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
+- if (ret < 0)
+- goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+-
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
++ if (ret < 0)
++ goto error_media_entity_runtime_pm;
++
+ return 0;
+
+-error_media_entity:
++error_media_entity_runtime_pm:
++ pm_runtime_disable(&client->dev);
++ if (full_power)
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&ov13b->sd.entity);
+
+ error_handler_free:
+@@ -1576,6 +1579,7 @@ static void ov13b10_remove(struct i2c_client *client)
+ ov13b10_free_controls(ov13b);
+
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ }
+
+ static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend,
+diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
+index 72bab0ff8a36a9..6436879f95c015 100644
+--- a/drivers/media/i2c/ov2680.c
++++ b/drivers/media/i2c/ov2680.c
+@@ -1104,25 +1104,24 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
+ sensor->pixel_rate = sensor->link_freq[0] * 2;
+ do_div(sensor->pixel_rate, 10);
+
+- /* Verify bus cfg */
+- if (bus_cfg.bus.mipi_csi2.num_data_lanes != 1) {
+- ret = dev_err_probe(dev, -EINVAL,
+- "only a 1-lane CSI2 config is supported");
+- goto out_free_bus_cfg;
++ if (!bus_cfg.nr_of_link_frequencies) {
++ dev_warn(dev, "Consider passing 'link-frequencies' in DT\n");
++ goto skip_link_freq_validation;
+ }
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ if (bus_cfg.link_frequencies[i] == sensor->link_freq[0])
+ break;
+
+- if (bus_cfg.nr_of_link_frequencies == 0 ||
+- bus_cfg.nr_of_link_frequencies == i) {
++ if (bus_cfg.nr_of_link_frequencies == i) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "supported link freq %lld not found\n",
+ sensor->link_freq[0]);
+ goto out_free_bus_cfg;
+ }
+
++skip_link_freq_validation:
++ ret = 0;
+ out_free_bus_cfg:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return ret;
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 5fe85aa2d2ec42..40532f7bcabea8 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -2850,12 +2850,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
+ return 0;
+ }
+
++static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank)
++{
++ const struct ov5640_mode_info *mode = sensor->current_mode;
++
++ __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
++ OV5640_MAX_VTS - mode->height, 1, vblank);
++
++ __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++}
++
+ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ {
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
+ struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
+- const struct ov5640_timings *timings;
++ const struct ov5640_timings *timings = ov5640_timings(sensor, mode);
+ s32 exposure_val, exposure_max;
+ unsigned int hblank;
+ unsigned int i = 0;
+@@ -2874,6 +2884,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
+
++ __v4l2_ctrl_vblank_update(sensor, timings->vblank_def);
++
+ return 0;
+ }
+
+@@ -2916,15 +2928,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
+ __v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
+
+- timings = ov5640_timings(sensor, mode);
+ hblank = timings->htot - mode->width;
+ __v4l2_ctrl_modify_range(sensor->ctrls.hblank,
+ hblank, hblank, 1, hblank);
+
+ vblank = timings->vblank_def;
+- __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+- OV5640_MAX_VTS - mode->height, 1, vblank);
+- __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
++ __v4l2_ctrl_vblank_update(sensor, vblank);
+
+ exposure_max = timings->crop.height + vblank - 4;
+ exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
+@@ -3919,7 +3928,7 @@ static int ov5640_probe(struct i2c_client *client)
+ ret = ov5640_sensor_resume(dev);
+ if (ret) {
+ dev_err(dev, "failed to power on\n");
+- goto entity_cleanup;
++ goto free_ctrls;
+ }
+
+ pm_runtime_set_active(dev);
+@@ -3944,8 +3953,9 @@ static int ov5640_probe(struct i2c_client *client)
+ err_pm_runtime:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+- v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ ov5640_sensor_suspend(dev);
++free_ctrls:
++ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+ entity_cleanup:
+ media_entity_cleanup(&sensor->sd.entity);
+ mutex_destroy(&sensor->lock);
+diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
+index d5a2a5f823124b..c499e7e93c54b9 100644
+--- a/drivers/media/i2c/ov5675.c
++++ b/drivers/media/i2c/ov5675.c
+@@ -979,12 +979,10 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
+
+ static int ov5675_power_off(struct device *dev)
+ {
+- /* 512 xvclk cycles after the last SCCB transation or MIPI frame end */
+- u32 delay_us = DIV_ROUND_UP(512, OV5675_XVCLK_19_2 / 1000 / 1000);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov5675 *ov5675 = to_ov5675(sd);
+
+- usleep_range(delay_us, delay_us * 2);
++ usleep_range(90, 100);
+
+ clk_disable_unprepare(ov5675->xvclk);
+ gpiod_set_value_cansleep(ov5675->reset_gpio, 1);
+@@ -995,7 +993,6 @@ static int ov5675_power_off(struct device *dev)
+
+ static int ov5675_power_on(struct device *dev)
+ {
+- u32 delay_us = DIV_ROUND_UP(8192, OV5675_XVCLK_19_2 / 1000 / 1000);
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov5675 *ov5675 = to_ov5675(sd);
+ int ret;
+@@ -1021,8 +1018,11 @@ static int ov5675_power_on(struct device *dev)
+
+ gpiod_set_value_cansleep(ov5675->reset_gpio, 0);
+
+- /* 8192 xvclk cycles prior to the first SCCB transation */
+- usleep_range(delay_us, delay_us * 2);
++ /* Worst case quiesence gap is 1.365 milliseconds @ 6MHz XVCLK
++ * Add an additional threshold grace period to ensure reset
++ * completion before initiating our first I2C transaction.
++ */
++ usleep_range(1500, 1600);
+
+ return 0;
+ }
+diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
+index b6244772bc5933..b36fc0fedad481 100644
+--- a/drivers/media/i2c/ov9734.c
++++ b/drivers/media/i2c/ov9734.c
+@@ -939,6 +939,7 @@ static void ov9734_remove(struct i2c_client *client)
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ mutex_destroy(&ov9734->mutex);
+ }
+
+@@ -984,13 +985,6 @@ static int ov9734_probe(struct i2c_client *client)
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
+- if (ret < 0) {
+- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+- ret);
+- goto probe_error_media_entity_cleanup;
+- }
+-
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+@@ -999,9 +993,18 @@ static int ov9734_probe(struct i2c_client *client)
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
++ if (ret < 0) {
++ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
++ ret);
++ goto probe_error_media_entity_cleanup_pm;
++ }
++
+ return 0;
+
+-probe_error_media_entity_cleanup:
++probe_error_media_entity_cleanup_pm:
++ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&ov9734->sd.entity);
+
+ probe_error_v4l2_ctrl_handler_free:
+diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
+index fa27638edc0723..dab14787116b6d 100644
+--- a/drivers/media/i2c/st-mipid02.c
++++ b/drivers/media/i2c/st-mipid02.c
+@@ -770,6 +770,7 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
+ struct v4l2_subdev_format *format)
+ {
+ struct mipid02_dev *bridge = to_mipid02_dev(sd);
++ struct v4l2_subdev_format source_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+
+ format->format.code = get_fmt_code(format->format.code);
+@@ -781,8 +782,12 @@ static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
+
+ *fmt = format->format;
+
+- /* Propagate the format change to the source pad */
+- mipid02_set_fmt_source(sd, sd_state, format);
++ /*
++ * Propagate the format change to the source pad, taking
++ * care not to update the format pointer given back to user
++ */
++ source_fmt = *format;
++ mipid02_set_fmt_source(sd, sd_state, &source_fmt);
+ }
+
+ static int mipid02_set_fmt(struct v4l2_subdev *sd,
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index 2785935da497b8..558152575d1022 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -2091,9 +2091,6 @@ static int tc358743_probe(struct i2c_client *client)
+ state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
+
+ sd->dev = &client->dev;
+- err = v4l2_async_register_subdev(sd);
+- if (err < 0)
+- goto err_hdl;
+
+ mutex_init(&state->confctl_mutex);
+
+@@ -2151,6 +2148,10 @@ static int tc358743_probe(struct i2c_client *client)
+ if (err)
+ goto err_work_queues;
+
++ err = v4l2_async_register_subdev(sd);
++ if (err < 0)
++ goto err_work_queues;
++
+ v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+ client->addr << 1, client->adapter->name);
+
+diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
+index 680fbb3a934022..94abd042045dab 100644
+--- a/drivers/media/mc/mc-devnode.c
++++ b/drivers/media/mc/mc-devnode.c
+@@ -246,15 +246,14 @@ int __must_check media_devnode_register(struct media_device *mdev,
+ kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor);
+
+ /* Part 3: Add the media and char device */
++ set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ ret = cdev_device_add(&devnode->cdev, &devnode->dev);
+ if (ret < 0) {
++ clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ pr_err("%s: cdev_device_add failed\n", __func__);
+ goto cdev_add_error;
+ }
+
+- /* Part 4: Activate this minor. The char device can now be used. */
+- set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+-
+ return 0;
+
+ cdev_add_error:
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 83468d4a440b35..951b79ca125cdd 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -522,14 +522,15 @@ static int media_pipeline_walk_push(struct media_pipeline_walk *walk,
+
+ /*
+ * Move the top entry link cursor to the next link. If all links of the entry
+- * have been visited, pop the entry itself.
++ * have been visited, pop the entry itself. Return true if the entry has been
++ * popped.
+ */
+-static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
++static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ {
+ struct media_pipeline_walk_entry *entry;
+
+ if (WARN_ON(walk->stack.top < 0))
+- return;
++ return false;
+
+ entry = media_pipeline_walk_top(walk);
+
+@@ -539,7 +540,7 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ walk->stack.top);
+
+ walk->stack.top--;
+- return;
++ return true;
+ }
+
+ entry->links = entry->links->next;
+@@ -547,6 +548,8 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: moved entry %u to next link\n",
+ walk->stack.top);
++
++ return false;
+ }
+
+ /* Free all memory allocated while walking the pipeline. */
+@@ -592,30 +595,30 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ struct media_pipeline_walk *walk)
+ {
+ struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk);
+- struct media_pad *pad;
++ struct media_pad *origin;
+ struct media_link *link;
+ struct media_pad *local;
+ struct media_pad *remote;
++ bool last_link;
+ int ret;
+
+- pad = entry->pad;
++ origin = entry->pad;
+ link = list_entry(entry->links, typeof(*link), list);
+- media_pipeline_walk_pop(walk);
++ last_link = media_pipeline_walk_pop(walk);
++
++ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) {
++ dev_dbg(walk->mdev->dev,
++ "media pipeline: skipping link (not data-link)\n");
++ return 0;
++ }
+
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: exploring link '%s':%u -> '%s':%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+
+- /* Skip links that are not enabled. */
+- if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+- dev_dbg(walk->mdev->dev,
+- "media pipeline: skipping link (disabled)\n");
+- return 0;
+- }
+-
+ /* Get the local pad and remote pad. */
+- if (link->source->entity == pad->entity) {
++ if (link->source->entity == origin->entity) {
+ local = link->source;
+ remote = link->sink;
+ } else {
+@@ -627,25 +630,64 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ * Skip links that originate from a different pad than the incoming pad
+ * that is not connected internally in the entity to the incoming pad.
+ */
+- if (pad != local &&
+- !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) {
++ if (origin != local &&
++ !media_entity_has_pad_interdep(origin->entity, origin->index,
++ local->index)) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (no route)\n");
+- return 0;
++ goto done;
+ }
+
+ /*
+- * Add the local and remote pads of the link to the pipeline and push
+- * them to the stack, if they're not already present.
++ * Add the local pad of the link to the pipeline and push it to the
++ * stack, if not already present.
+ */
+ ret = media_pipeline_add_pad(pipe, walk, local);
+ if (ret)
+ return ret;
+
++ /* Similarly, add the remote pad, but only if the link is enabled. */
++ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
++ dev_dbg(walk->mdev->dev,
++ "media pipeline: skipping link (disabled)\n");
++ goto done;
++ }
++
+ ret = media_pipeline_add_pad(pipe, walk, remote);
+ if (ret)
+ return ret;
+
++done:
++ /*
++ * If we're done iterating over links, iterate over pads of the entity.
++ * This is necessary to discover pads that are not connected with any
++ * link. Those are dead ends from a pipeline exploration point of view,
++ * but are still part of the pipeline and need to be added to enable
++ * proper validation.
++ */
++ if (!last_link)
++ return 0;
++
++ dev_dbg(walk->mdev->dev,
++ "media pipeline: adding unconnected pads of '%s'\n",
++ local->entity->name);
++
++ media_entity_for_each_pad(origin->entity, local) {
++ /*
++ * Skip the origin pad (already handled), pad that have links
++ * (already discovered through iterating over links) and pads
++ * not internally connected.
++ */
++ if (origin == local || !local->num_links ||
++ !media_entity_has_pad_interdep(origin->entity, origin->index,
++ local->index))
++ continue;
++
++ ret = media_pipeline_add_pad(pipe, walk, local);
++ if (ret)
++ return ret;
++ }
++
+ return 0;
+ }
+
+@@ -757,7 +799,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ struct media_pad *pad = ppad->pad;
+ struct media_entity *entity = pad->entity;
+ bool has_enabled_link = false;
+- bool has_link = false;
+ struct media_link *link;
+
+ dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name,
+@@ -787,7 +828,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ /* Record if the pad has links and enabled links. */
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
+ has_enabled_link = true;
+- has_link = true;
+
+ /*
+ * Validate the link if it's enabled and has the
+@@ -825,7 +865,7 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
+ * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set,
+ * ensure that it has either no link or an enabled link.
+ */
+- if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link &&
++ if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) &&
+ !has_enabled_link) {
+ dev_dbg(mdev->dev,
+ "Pad '%s':%u must be connected by an enabled link\n",
+@@ -1025,6 +1065,9 @@ static void __media_entity_remove_link(struct media_entity *entity,
+
+ /* Remove the reverse links for a data link. */
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) {
++ link->source->num_links--;
++ link->sink->num_links--;
++
+ if (link->source->entity == entity)
+ remote = link->sink->entity;
+ else
+@@ -1079,6 +1122,11 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+ struct media_link *link;
+ struct media_link *backlink;
+
++ if (flags & MEDIA_LNK_FL_LINK_TYPE)
++ return -EINVAL;
++
++ flags |= MEDIA_LNK_FL_DATA_LINK;
++
+ if (WARN_ON(!source || !sink) ||
+ WARN_ON(source_pad >= source->num_pads) ||
+ WARN_ON(sink_pad >= sink->num_pads))
+@@ -1094,7 +1142,7 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+
+ link->source = &source->pads[source_pad];
+ link->sink = &sink->pads[sink_pad];
+- link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK;
++ link->flags = flags;
+
+ /* Initialize graph object embedded at the new link */
+ media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK,
+@@ -1125,6 +1173,9 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
+ sink->num_links++;
+ source->num_links++;
+
++ link->source->num_links++;
++ link->sink->num_links++;
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(media_create_pad_link);
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index aa708a0e5eac67..49a3dd70ec0f77 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -1536,13 +1536,11 @@ static void buf_cleanup(struct vb2_buffer *vb)
+
+ static int start_streaming(struct vb2_queue *q, unsigned int count)
+ {
+- int ret = 1;
+ int seqnr = 0;
+ struct bttv_buffer *buf;
+ struct bttv *btv = vb2_get_drv_priv(q);
+
+- ret = check_alloc_btres_lock(btv, RESOURCE_VIDEO_STREAM);
+- if (ret == 0) {
++ if (!check_alloc_btres_lock(btv, RESOURCE_VIDEO_STREAM)) {
+ if (btv->field_count)
+ seqnr++;
+ while (!list_empty(&btv->capture)) {
+@@ -1553,7 +1551,7 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
+ vb2_buffer_done(&buf->vbuf.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+- return !ret;
++ return -EBUSY;
+ }
+ if (!vb2_is_streaming(&btv->vbiq)) {
+ init_irqreg(btv);
+@@ -2774,6 +2772,27 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
+ return;
+ wakeup->vbuf.vb2_buf.timestamp = ktime_get_ns();
+ wakeup->vbuf.sequence = btv->field_count >> 1;
++
++ /*
++ * Ugly hack for backwards compatibility.
++ * Some applications expect that the last 4 bytes of
++ * the VBI data contains the sequence number.
++ *
++ * This makes it possible to associate the VBI data
++ * with the video frame if you use read() to get the
++ * VBI data.
++ */
++ if (vb2_fileio_is_active(wakeup->vbuf.vb2_buf.vb2_queue)) {
++ u32 *vaddr = vb2_plane_vaddr(&wakeup->vbuf.vb2_buf, 0);
++ unsigned long size =
++ vb2_get_plane_payload(&wakeup->vbuf.vb2_buf, 0) / 4;
++
++ if (vaddr && size) {
++ vaddr += size - 1;
++ *vaddr = wakeup->vbuf.sequence;
++ }
++ }
++
+ vb2_buffer_done(&wakeup->vbuf.vb2_buf, state);
+ if (btv->field_count == 0)
+ btor(BT848_INT_VSYNC, BT848_INT_MASK);
+@@ -3474,6 +3493,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
+
+ /* free resources */
+ free_irq(btv->c.pci->irq,btv);
++ del_timer_sync(&btv->timeout);
+ iounmap(btv->bt848_mmio);
+ release_mem_region(pci_resource_start(btv->c.pci,0),
+ pci_resource_len(btv->c.pci,0));
+diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
+index ab213e51ec95f1..e489a3acb4b98a 100644
+--- a/drivers/media/pci/bt8xx/bttv-vbi.c
++++ b/drivers/media/pci/bt8xx/bttv-vbi.c
+@@ -123,14 +123,12 @@ static void buf_cleanup_vbi(struct vb2_buffer *vb)
+
+ static int start_streaming_vbi(struct vb2_queue *q, unsigned int count)
+ {
+- int ret;
+ int seqnr = 0;
+ struct bttv_buffer *buf;
+ struct bttv *btv = vb2_get_drv_priv(q);
+
+ btv->framedrop = 0;
+- ret = check_alloc_btres_lock(btv, RESOURCE_VBI);
+- if (ret == 0) {
++ if (!check_alloc_btres_lock(btv, RESOURCE_VBI)) {
+ if (btv->field_count)
+ seqnr++;
+ while (!list_empty(&btv->vcapture)) {
+@@ -141,13 +139,13 @@ static int start_streaming_vbi(struct vb2_queue *q, unsigned int count)
+ vb2_buffer_done(&buf->vbuf.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+- return !ret;
++ return -EBUSY;
+ }
+ if (!vb2_is_streaming(&btv->capq)) {
+ init_irqreg(btv);
+ btv->field_count = 0;
+ }
+- return !ret;
++ return 0;
+ }
+
+ static void stop_streaming_vbi(struct vb2_queue *q)
+diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
+index 74edcc76d12f40..6e1a0614e6d069 100644
+--- a/drivers/media/pci/cobalt/cobalt-driver.c
++++ b/drivers/media/pci/cobalt/cobalt-driver.c
+@@ -8,6 +8,7 @@
+ * All rights reserved.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <media/i2c/adv7604.h>
+ #include <media/i2c/adv7842.h>
+@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
+ cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
+ capa, get_link_speed(capa),
+- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+ cobalt_info("PCIe link control 0x%04x\n", ctrl);
+ cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
+ stat, get_link_speed(stat),
+- (stat & PCI_EXP_LNKSTA_NLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
+
+ /* Bus */
+ pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
+ cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
+ capa, get_link_speed(capa),
+- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+
+ /* Slot */
+ pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
+@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
+ if (!pci_is_pcie(pci_dev))
+ return 0;
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
+- return (link & PCI_EXP_LNKSTA_NLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
+ }
+
+ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+ if (!pci_is_pcie(pci_dev))
+ return 0;
+ pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
+- return (link & PCI_EXP_LNKCAP_MLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
+ }
+
+ static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
+diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
+index 9af2c5596121cb..51d7d720ec48b1 100644
+--- a/drivers/media/pci/cx23885/cx23885-video.c
++++ b/drivers/media/pci/cx23885/cx23885-video.c
+@@ -1354,6 +1354,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
+ /* register Video device */
+ dev->video_dev = cx23885_vdev_init(dev, dev->pci,
+ &cx23885_video_template, "video");
++ if (!dev->video_dev) {
++ err = -ENOMEM;
++ goto fail_unreg;
++ }
+ dev->video_dev->queue = &dev->vb2_vidq;
+ dev->video_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE;
+@@ -1382,6 +1386,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
+ /* register VBI device */
+ dev->vbi_dev = cx23885_vdev_init(dev, dev->pci,
+ &cx23885_vbi_template, "vbi");
++ if (!dev->vbi_dev) {
++ err = -ENOMEM;
++ goto fail_unreg;
++ }
+ dev->vbi_dev->queue = &dev->vb2_vbiq;
+ dev->vbi_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE;
+diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c
+index 91733ab9f58c35..363badab7cf07b 100644
+--- a/drivers/media/pci/ddbridge/ddbridge-main.c
++++ b/drivers/media/pci/ddbridge/ddbridge-main.c
+@@ -238,7 +238,7 @@ static int ddb_probe(struct pci_dev *pdev,
+ ddb_unmap(dev);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+- return -1;
++ return stat;
+ }
+
+ /****************************************************************************/
+diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
+index e38198e259c03c..bd58adb4c2b456 100644
+--- a/drivers/media/pci/intel/ipu-bridge.c
++++ b/drivers/media/pci/intel/ipu-bridge.c
+@@ -14,6 +14,8 @@
+ #include <media/ipu-bridge.h>
+ #include <media/v4l2-fwnode.h>
+
++#define ADEV_DEV(adev) ACPI_PTR(&((adev)->dev))
++
+ /*
+ * 92335fcf-3203-4472-af93-7b4453ac29da
+ *
+@@ -84,6 +86,7 @@ static const char * const ipu_vcm_types[] = {
+ "lc898212axb",
+ };
+
++#if IS_ENABLED(CONFIG_ACPI)
+ /*
+ * Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
+ * instead of device and driver match to probe IVSC device.
+@@ -97,13 +100,13 @@ static const struct acpi_device_id ivsc_acpi_ids[] = {
+
+ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
+ {
+- acpi_handle handle = acpi_device_handle(adev);
+- struct acpi_device *consumer, *ivsc_adev;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ivsc_acpi_ids); i++) {
+ const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
++ struct acpi_device *consumer, *ivsc_adev;
+
++ acpi_handle handle = acpi_device_handle(adev);
+ for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
+ /* camera sensor depends on IVSC in DSDT if exist */
+ for_each_acpi_consumer_dev(ivsc_adev, consumer)
+@@ -115,6 +118,12 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
+
+ return NULL;
+ }
++#else
++static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
++{
++ return NULL;
++}
++#endif
+
+ static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
+ {
+@@ -160,7 +169,7 @@ static int ipu_bridge_check_ivsc_dev(struct ipu_sensor *sensor,
+ csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
+ if (!csi_dev) {
+ acpi_dev_put(adev);
+- dev_err(&adev->dev, "Failed to find MEI CSI dev\n");
++ dev_err(ADEV_DEV(adev), "Failed to find MEI CSI dev\n");
+ return -ENODEV;
+ }
+
+@@ -179,24 +188,25 @@ static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
+ acpi_status status;
+ int ret = 0;
+
+- status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
++ status = acpi_evaluate_object(ACPI_PTR(adev->handle),
++ id, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obj = buffer.pointer;
+ if (!obj) {
+- dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
++ dev_err(ADEV_DEV(adev), "Couldn't locate ACPI buffer\n");
+ return -ENODEV;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+- dev_err(&adev->dev, "Not an ACPI buffer\n");
++ dev_err(ADEV_DEV(adev), "Not an ACPI buffer\n");
+ ret = -ENODEV;
+ goto out_free_buff;
+ }
+
+ if (obj->buffer.length > size) {
+- dev_err(&adev->dev, "Given buffer is too small\n");
++ dev_err(ADEV_DEV(adev), "Given buffer is too small\n");
+ ret = -EINVAL;
+ goto out_free_buff;
+ }
+@@ -217,7 +227,7 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
+ case IPU_SENSOR_ROTATION_INVERTED:
+ return 180;
+ default:
+- dev_warn(&adev->dev,
++ dev_warn(ADEV_DEV(adev),
+ "Unknown rotation %d. Assume 0 degree rotation\n",
+ ssdb->degree);
+ return 0;
+@@ -227,12 +237,14 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
+ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_device *adev)
+ {
+ enum v4l2_fwnode_orientation orientation;
+- struct acpi_pld_info *pld;
+- acpi_status status;
++ struct acpi_pld_info *pld = NULL;
++ acpi_status status = AE_ERROR;
+
++#if IS_ENABLED(CONFIG_ACPI)
+ status = acpi_get_physical_device_location(adev->handle, &pld);
++#endif
+ if (ACPI_FAILURE(status)) {
+- dev_warn(&adev->dev, "_PLD call failed, using default orientation\n");
++ dev_warn(ADEV_DEV(adev), "_PLD call failed, using default orientation\n");
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ }
+
+@@ -250,7 +262,8 @@ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_dev
+ orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ break;
+ default:
+- dev_warn(&adev->dev, "Unknown _PLD panel val %d\n", pld->panel);
++ dev_warn(ADEV_DEV(adev), "Unknown _PLD panel val %d\n",
++ pld->panel);
+ orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ break;
+ }
+@@ -269,12 +282,12 @@ int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor)
+ return ret;
+
+ if (ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) {
+- dev_warn(&adev->dev, "Unknown VCM type %d\n", ssdb.vcmtype);
++ dev_warn(ADEV_DEV(adev), "Unknown VCM type %d\n", ssdb.vcmtype);
+ ssdb.vcmtype = 0;
+ }
+
+ if (ssdb.lanes > IPU_MAX_LANES) {
+- dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n");
++ dev_err(ADEV_DEV(adev), "Number of lanes in SSDB is invalid\n");
+ return -EINVAL;
+ }
+
+@@ -462,8 +475,14 @@ static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
+ sensor->ipu_properties);
+
+ if (sensor->csi_dev) {
++ const char *device_hid = "";
++
++#if IS_ENABLED(CONFIG_ACPI)
++ device_hid = acpi_device_hid(sensor->ivsc_adev);
++#endif
++
+ snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
+- acpi_device_hid(sensor->ivsc_adev), sensor->link);
++ device_hid, sensor->link);
+
+ nodes[SWNODE_IVSC_HID] = NODE_SENSOR(sensor->ivsc_name,
+ sensor->ivsc_properties);
+@@ -628,11 +647,15 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
+ {
+ struct fwnode_handle *fwnode, *primary;
+ struct ipu_sensor *sensor;
+- struct acpi_device *adev;
++ struct acpi_device *adev = NULL;
+ int ret;
+
++#if IS_ENABLED(CONFIG_ACPI)
+ for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+- if (!adev->status.enabled)
++#else
++ while (true) {
++#endif
++ if (!ACPI_PTR(adev->status.enabled))
+ continue;
+
+ if (bridge->n_sensors >= IPU_MAX_PORTS) {
+@@ -668,7 +691,7 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
+ goto err_free_swnodes;
+ }
+
+- sensor->adev = acpi_dev_get(adev);
++ sensor->adev = ACPI_PTR(acpi_dev_get(adev));
+
+ primary = acpi_fwnode_handle(adev);
+ primary->secondary = fwnode;
+@@ -724,11 +747,16 @@ static int ipu_bridge_ivsc_is_ready(void)
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
++#if IS_ENABLED(CONFIG_ACPI)
+ const struct ipu_sensor_config *cfg =
+ &ipu_supported_sensors[i];
+
+ for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
+- if (!sensor_adev->status.enabled)
++#else
++ while (true) {
++ sensor_adev = NULL;
++#endif
++ if (!ACPI_PTR(sensor_adev->status.enabled))
+ continue;
+
+ adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+index 5dd69a251b6a9c..423842d2a5b2b2 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+@@ -1803,11 +1803,6 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
+
+ v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
+
+- /* Register notifier for subdevices we care */
+- r = cio2_parse_firmware(cio2);
+- if (r)
+- goto fail_clean_notifier;
+-
+ r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
+ CIO2_NAME, cio2);
+ if (r) {
+@@ -1815,6 +1810,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
+ goto fail_clean_notifier;
+ }
+
++ /* Register notifier for subdevices we care */
++ r = cio2_parse_firmware(cio2);
++ if (r)
++ goto fail_clean_notifier;
++
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
+
+diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
+index 00ba611e0f68dd..685b2ec96071a4 100644
+--- a/drivers/media/pci/intel/ivsc/mei_csi.c
++++ b/drivers/media/pci/intel/ivsc/mei_csi.c
+@@ -72,8 +72,8 @@ enum ivsc_privacy_status {
+ };
+
+ enum csi_pads {
+- CSI_PAD_SOURCE,
+ CSI_PAD_SINK,
++ CSI_PAD_SOURCE,
+ CSI_NUM_PADS
+ };
+
+@@ -124,6 +124,8 @@ struct mei_csi {
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *freq_ctrl;
+ struct v4l2_ctrl *privacy_ctrl;
++ /* lock for v4l2 controls */
++ struct mutex ctrl_lock;
+ unsigned int remote_pad;
+ /* start streaming or not */
+ int streaming;
+@@ -189,7 +191,11 @@ static int mei_csi_send(struct mei_csi *csi, u8 *buf, size_t len)
+
+ /* command response status */
+ ret = csi->cmd_response.status;
+- if (ret) {
++ if (ret == -1) {
++ /* notify privacy on instead of reporting error */
++ ret = 0;
++ v4l2_ctrl_s_ctrl(csi->privacy_ctrl, 1);
++ } else if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -585,7 +591,7 @@ static int mei_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ csi->remote_pad = pad;
+
+ return media_create_pad_link(&subdev->entity, pad,
+- &csi->subdev.entity, 1,
++ &csi->subdev.entity, CSI_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ }
+@@ -609,11 +615,13 @@ static int mei_csi_init_controls(struct mei_csi *csi)
+ u32 max;
+ int ret;
+
++ mutex_init(&csi->ctrl_lock);
++
+ ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 2);
+ if (ret)
+ return ret;
+
+- csi->ctrl_handler.lock = &csi->lock;
++ csi->ctrl_handler.lock = &csi->ctrl_lock;
+
+ max = ARRAY_SIZE(link_freq_menu_items) - 1;
+ csi->freq_ctrl = v4l2_ctrl_new_int_menu(&csi->ctrl_handler,
+@@ -772,6 +780,7 @@ static int mei_csi_probe(struct mei_cl_device *cldev,
+
+ err_ctrl_handler:
+ v4l2_ctrl_handler_free(&csi->ctrl_handler);
++ mutex_destroy(&csi->ctrl_lock);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+
+@@ -791,6 +800,7 @@ static void mei_csi_remove(struct mei_cl_device *cldev)
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+ v4l2_ctrl_handler_free(&csi->ctrl_handler);
++ mutex_destroy(&csi->ctrl_lock);
+ v4l2_async_unregister_subdev(&csi->subdev);
+ v4l2_subdev_cleanup(&csi->subdev);
+ media_entity_cleanup(&csi->subdev.entity);
+diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
+index 99b9f55ca82922..f467a00492f4b0 100644
+--- a/drivers/media/pci/ivtv/ivtv-udma.c
++++ b/drivers/media/pci/ivtv/ivtv-udma.c
+@@ -131,6 +131,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
+
+ /* Fill SG List with new values */
+ if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
++ IVTV_DEBUG_WARN("%s: could not allocate bounce buffers for highmem userspace buffers\n",
++ __func__);
+ unpin_user_pages(dma->map, dma->page_count);
+ dma->page_count = 0;
+ return -ENOMEM;
+@@ -139,6 +141,12 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
+ /* Map SG List */
+ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ dma->page_count, DMA_TO_DEVICE);
++ if (!dma->SG_length) {
++ IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__);
++ unpin_user_pages(dma->map, dma->page_count);
++ dma->page_count = 0;
++ return -EINVAL;
++ }
+
+ /* Fill SG Array with new values */
+ ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
+diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
+index 582146f8d70d5f..2d9274537725af 100644
+--- a/drivers/media/pci/ivtv/ivtv-yuv.c
++++ b/drivers/media/pci/ivtv/ivtv-yuv.c
+@@ -114,6 +114,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
+ }
+ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ dma->page_count, DMA_TO_DEVICE);
++ if (!dma->SG_length) {
++ IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__);
++ unpin_user_pages(dma->map, dma->page_count);
++ dma->page_count = 0;
++ return -EINVAL;
++ }
+
+ /* Fill SG Array with new values */
+ ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
+diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
+index 23c8c094e791b9..9cdd14a3033c98 100644
+--- a/drivers/media/pci/ivtv/ivtvfb.c
++++ b/drivers/media/pci/ivtv/ivtvfb.c
+@@ -281,10 +281,10 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
+ /* Map User DMA */
+ if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
+ mutex_unlock(&itv->udma.lock);
+- IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with pin_user_pages: %d bytes, %d pages returned\n",
+- size_in_bytes, itv->udma.page_count);
++ IVTVFB_WARN("%s, Error in ivtv_udma_setup: %d bytes, %d pages returned\n",
++ __func__, size_in_bytes, itv->udma.page_count);
+
+- /* pin_user_pages must have failed completely */
++ /* pin_user_pages or DMA must have failed completely */
+ return -EIO;
+ }
+
+diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
+index 7481f553f95958..24ec576dc3bff5 100644
+--- a/drivers/media/pci/ngene/ngene-core.c
++++ b/drivers/media/pci/ngene/ngene-core.c
+@@ -1488,7 +1488,9 @@ static int init_channel(struct ngene_channel *chan)
+ }
+
+ if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
+- dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
++ ret = dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
++ if (ret != 0)
++ goto err;
+ set_transfer(chan, 1);
+ chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
+ set_transfer(&chan->dev->channel[2], 1);
+diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
+index 9c6cfef03331d0..a66df6adfaad8c 100644
+--- a/drivers/media/pci/saa7134/saa7134-dvb.c
++++ b/drivers/media/pci/saa7134/saa7134-dvb.c
+@@ -466,7 +466,9 @@ static int philips_europa_tuner_sleep(struct dvb_frontend *fe)
+ /* switch the board to analog mode */
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+- i2c_transfer(&dev->i2c_adap, &analog_msg, 1);
++ if (i2c_transfer(&dev->i2c_adap, &analog_msg, 1) != 1)
++ return -EIO;
++
+ return 0;
+ }
+
+@@ -1018,7 +1020,9 @@ static int md8800_set_voltage2(struct dvb_frontend *fe,
+ else
+ wbuf[1] = rbuf & 0xef;
+ msg[0].len = 2;
+- i2c_transfer(&dev->i2c_adap, msg, 1);
++ if (i2c_transfer(&dev->i2c_adap, msg, 1) != 1)
++ return -EIO;
++
+ return 0;
+ }
+
+diff --git a/drivers/media/pci/solo6x10/solo6x10-offsets.h b/drivers/media/pci/solo6x10/solo6x10-offsets.h
+index f414ee1316f29c..fdbb817e63601c 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-offsets.h
++++ b/drivers/media/pci/solo6x10/solo6x10-offsets.h
+@@ -57,16 +57,16 @@
+ #define SOLO_MP4E_EXT_ADDR(__solo) \
+ (SOLO_EREF_EXT_ADDR(__solo) + SOLO_EREF_EXT_AREA(__solo))
+ #define SOLO_MP4E_EXT_SIZE(__solo) \
+- max((__solo->nr_chans * 0x00080000), \
+- min(((__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo)) - \
+- __SOLO_JPEG_MIN_SIZE(__solo)), 0x00ff0000))
++ clamp(__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo) - \
++ __SOLO_JPEG_MIN_SIZE(__solo), \
++ __solo->nr_chans * 0x00080000, 0x00ff0000)
+
+ #define __SOLO_JPEG_MIN_SIZE(__solo) (__solo->nr_chans * 0x00080000)
+ #define SOLO_JPEG_EXT_ADDR(__solo) \
+ (SOLO_MP4E_EXT_ADDR(__solo) + SOLO_MP4E_EXT_SIZE(__solo))
+ #define SOLO_JPEG_EXT_SIZE(__solo) \
+- max(__SOLO_JPEG_MIN_SIZE(__solo), \
+- min((__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo)), 0x00ff0000))
++ clamp(__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo), \
++ __SOLO_JPEG_MIN_SIZE(__solo), 0x00ff0000)
+
+ #define SOLO_SDRAM_END(__solo) \
+ (SOLO_JPEG_EXT_ADDR(__solo) + SOLO_JPEG_EXT_SIZE(__solo))
+diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
+index e4cf9d63e926df..364ce9e5701827 100644
+--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
++++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
+@@ -757,7 +757,7 @@ static const struct video_device video_dev_template = {
+ /**
+ * vip_irq - interrupt routine
+ * @irq: Number of interrupt ( not used, correct number is assumed )
+- * @vip: local data structure containing all information
++ * @data: local data structure containing all information
+ *
+ * check for both frame interrupts set ( top and bottom ).
+ * check FIFO overflow, but limit number of log messages after open.
+@@ -767,8 +767,9 @@ static const struct video_device video_dev_template = {
+ *
+ * IRQ_HANDLED, interrupt done.
+ */
+-static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
++static irqreturn_t vip_irq(int irq, void *data)
+ {
++ struct sta2x11_vip *vip = data;
+ unsigned int status;
+
+ status = reg_read(vip, DVP_ITS);
+@@ -1053,9 +1054,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
+
+ spin_lock_init(&vip->slock);
+
+- ret = request_irq(pdev->irq,
+- (irq_handler_t) vip_irq,
+- IRQF_SHARED, KBUILD_MODNAME, vip);
++ ret = request_irq(pdev->irq, vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ ret = -ENODEV;
+diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
+index 230b104a7cdf07..a47c5850ef8758 100644
+--- a/drivers/media/pci/ttpci/budget-av.c
++++ b/drivers/media/pci/ttpci/budget-av.c
+@@ -1463,7 +1463,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
+ budget_av->has_saa7113 = 1;
+ err = saa7146_vv_init(dev, &vv_data);
+ if (err != 0) {
+- /* fixme: proper cleanup here */
++ ttpci_budget_deinit(&budget_av->budget);
++ kfree(budget_av);
+ ERR("cannot init vv subsystem\n");
+ return err;
+ }
+@@ -1472,9 +1473,10 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
+
+ if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
+- /* fixme: proper cleanup here */
+- ERR("cannot register capture v4l2 device\n");
+ saa7146_vv_release(dev);
++ ttpci_budget_deinit(&budget_av->budget);
++ kfree(budget_av);
++ ERR("cannot register capture v4l2 device\n");
+ return err;
+ }
+
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index 133d77d1ea0c30..4f438eaa7d385a 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -195,7 +195,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+ struct vdec_t *vdec = inst->priv;
+ int ret = 0;
+
+- vpu_inst_lock(inst);
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
+ vdec->params.display_delay_enable = ctrl->val;
+@@ -207,7 +206,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+ ret = -EINVAL;
+ break;
+ }
+- vpu_inst_unlock(inst);
+
+ return ret;
+ }
+diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
+index 4eb57d793a9c0d..16ed4d21519cdb 100644
+--- a/drivers/media/platform/amphion/venc.c
++++ b/drivers/media/platform/amphion/venc.c
+@@ -518,7 +518,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+ struct venc_t *venc = inst->priv;
+ int ret = 0;
+
+- vpu_inst_lock(inst);
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ venc->params.profile = ctrl->val;
+@@ -579,7 +578,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+ ret = -EINVAL;
+ break;
+ }
+- vpu_inst_unlock(inst);
+
+ return ret;
+ }
+diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
+index 5a701f64289ef8..0246cf0ac3a8bc 100644
+--- a/drivers/media/platform/amphion/vpu.h
++++ b/drivers/media/platform/amphion/vpu.h
+@@ -154,7 +154,6 @@ struct vpu_core {
+ struct vpu_mbox tx_type;
+ struct vpu_mbox tx_data;
+ struct vpu_mbox rx;
+- unsigned long cmd_seq;
+
+ wait_queue_head_t ack_wq;
+ struct completion cmp;
+@@ -253,6 +252,8 @@ struct vpu_inst {
+
+ struct list_head cmd_q;
+ void *pending;
++ unsigned long cmd_seq;
++ atomic_long_t last_response_cmd;
+
+ struct vpu_inst_ops *ops;
+ const struct vpu_format *formats;
+diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
+index c2337812573ef0..5695f5c1cb3e84 100644
+--- a/drivers/media/platform/amphion/vpu_cmds.c
++++ b/drivers/media/platform/amphion/vpu_cmds.c
+@@ -32,6 +32,7 @@ struct vpu_cmd_t {
+ struct vpu_cmd_request *request;
+ struct vpu_rpc_event *pkt;
+ unsigned long key;
++ atomic_long_t *last_response_cmd;
+ };
+
+ static struct vpu_cmd_request vpu_cmd_requests[] = {
+@@ -115,6 +116,8 @@ static void vpu_free_cmd(struct vpu_cmd_t *cmd)
+ {
+ if (!cmd)
+ return;
++ if (cmd->last_response_cmd)
++ atomic_long_set(cmd->last_response_cmd, cmd->key);
+ vfree(cmd->pkt);
+ vfree(cmd);
+ }
+@@ -172,7 +175,8 @@ static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data,
+ return -ENOMEM;
+
+ mutex_lock(&core->cmd_lock);
+- cmd->key = core->cmd_seq++;
++ cmd->key = ++inst->cmd_seq;
++ cmd->last_response_cmd = &inst->last_response_cmd;
+ if (key)
+ *key = cmd->key;
+ if (sync)
+@@ -246,26 +250,12 @@ void vpu_clear_request(struct vpu_inst *inst)
+
+ static bool check_is_responsed(struct vpu_inst *inst, unsigned long key)
+ {
+- struct vpu_core *core = inst->core;
+- struct vpu_cmd_t *cmd;
+- bool flag = true;
++ unsigned long last_response = atomic_long_read(&inst->last_response_cmd);
+
+- mutex_lock(&core->cmd_lock);
+- cmd = inst->pending;
+- if (cmd && key == cmd->key) {
+- flag = false;
+- goto exit;
+- }
+- list_for_each_entry(cmd, &inst->cmd_q, list) {
+- if (key == cmd->key) {
+- flag = false;
+- break;
+- }
+- }
+-exit:
+- mutex_unlock(&core->cmd_lock);
++ if (key <= last_response && (last_response - key) < (ULONG_MAX >> 1))
++ return true;
+
+- return flag;
++ return false;
+ }
+
+ static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try)
+diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
+index 1af6fc9460d4db..3a2030d02e45e6 100644
+--- a/drivers/media/platform/amphion/vpu_core.c
++++ b/drivers/media/platform/amphion/vpu_core.c
+@@ -642,7 +642,7 @@ static int vpu_core_probe(struct platform_device *pdev)
+ return -ENODEV;
+
+ core->type = core->res->type;
+- core->id = of_alias_get_id(dev->of_node, "vpu_core");
++ core->id = of_alias_get_id(dev->of_node, "vpu-core");
+ if (core->id < 0) {
+ dev_err(dev, "can't get vpu core id\n");
+ return core->id;
+diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
+index 667637eedb5d45..7320852668d647 100644
+--- a/drivers/media/platform/amphion/vpu_defs.h
++++ b/drivers/media/platform/amphion/vpu_defs.h
+@@ -71,6 +71,7 @@ enum {
+ VPU_MSG_ID_TIMESTAMP_INFO,
+ VPU_MSG_ID_FIRMWARE_XCPT,
+ VPU_MSG_ID_PIC_SKIPPED,
++ VPU_MSG_ID_DBG_MSG,
+ };
+
+ enum VPU_ENC_MEMORY_RESOURSE {
+diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
+index af3b336e5dc32d..d12310af9ebce1 100644
+--- a/drivers/media/platform/amphion/vpu_helpers.c
++++ b/drivers/media/platform/amphion/vpu_helpers.c
+@@ -489,6 +489,7 @@ const char *vpu_id_name(u32 id)
+ case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
+ case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
+ case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
++ case VPU_MSG_ID_DBG_MSG: return "debug msg";
+ }
+ return "<unknown>";
+ }
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index f771661980c012..d3425de7bccd31 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -745,6 +745,7 @@ static struct vpu_pair malone_msgs[] = {
+ {VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
+ {VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
+ {VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED},
++ {VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC},
+ };
+
+ static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
+diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
+index d0ead051f7d18d..b74a407a19f225 100644
+--- a/drivers/media/platform/amphion/vpu_msgs.c
++++ b/drivers/media/platform/amphion/vpu_msgs.c
+@@ -23,6 +23,7 @@
+ struct vpu_msg_handler {
+ u32 id;
+ void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
++ u32 is_str;
+ };
+
+ static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+@@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event
+ {
+ char *str = (char *)pkt->data;
+
+- if (strlen(str))
++ if (*str)
+ dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
+ else
+ dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
+@@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc
+ vpu_inst_unlock(inst);
+ }
+
++static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
++{
++ char *str = (char *)pkt->data;
++
++ if (*str)
++ dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
++}
++
++static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
++{
++ if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
++ pkt->hdr.num--;
++ pkt->data[pkt->hdr.num] = 0;
++}
++
+ static struct vpu_msg_handler handlers[] = {
+ {VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
+ {VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
+@@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = {
+ {VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
+ {VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
+ {VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
+- {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
+- {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
++ {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
++ {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
+ {VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
++ {VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
+ };
+
+ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
+@@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
+ }
+ }
+
+- if (handler && handler->done)
+- handler->done(inst, msg);
++ if (handler) {
++ if (handler->is_str)
++ vpu_terminate_string_msg(msg);
++ if (handler->done)
++ handler->done(inst, msg);
++ }
+
+ vpu_response_cmd(inst, msg_id, 1);
+
+diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
+index 0f6e4c666440ea..d7e0de49b3dcef 100644
+--- a/drivers/media/platform/amphion/vpu_v4l2.c
++++ b/drivers/media/platform/amphion/vpu_v4l2.c
+@@ -716,6 +716,7 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
+ func = &vpu->decoder;
+
+ atomic_set(&inst->ref_count, 0);
++ atomic_long_set(&inst->last_response_cmd, 0);
+ vpu_inst_get(inst);
+ inst->vpu = vpu;
+ inst->core = vpu_request_core(vpu, inst->type);
+diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
+index 0d879d71d81850..2d803cf31e9d1c 100644
+--- a/drivers/media/platform/cadence/cdns-csi2rx.c
++++ b/drivers/media/platform/cadence/cdns-csi2rx.c
+@@ -164,10 +164,6 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+
+ writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
+
+- ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
+- if (ret)
+- goto err_disable_pclk;
+-
+ /* Enable DPHY clk and data lanes. */
+ if (csi2rx->dphy) {
+ reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
+@@ -177,6 +173,13 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+ }
+
+ writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
++
++ ret = csi2rx_configure_ext_dphy(csi2rx);
++ if (ret) {
++ dev_err(csi2rx->dev,
++ "Failed to configure external DPHY: %d\n", ret);
++ goto err_disable_pclk;
++ }
+ }
+
+ /*
+@@ -213,14 +216,9 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+
+ reset_control_deassert(csi2rx->sys_rst);
+
+- if (csi2rx->dphy) {
+- ret = csi2rx_configure_ext_dphy(csi2rx);
+- if (ret) {
+- dev_err(csi2rx->dev,
+- "Failed to configure external DPHY: %d\n", ret);
+- goto err_disable_sysclk;
+- }
+- }
++ ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
++ if (ret)
++ goto err_disable_sysclk;
+
+ clk_disable_unprepare(csi2rx->p_clk);
+
+@@ -234,6 +232,10 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+ clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
+ }
+
++ if (csi2rx->dphy) {
++ writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
++ phy_power_off(csi2rx->dphy);
++ }
+ err_disable_pclk:
+ clk_disable_unprepare(csi2rx->p_clk);
+
+@@ -319,7 +321,7 @@ static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
+
+ csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
+- s_subdev->fwnode,
++ asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (csi2rx->source_pad < 0) {
+ dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n",
+@@ -479,8 +481,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
+ struct v4l2_async_connection);
+ of_node_put(ep);
+- if (IS_ERR(asd))
++ if (IS_ERR(asd)) {
++ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ return PTR_ERR(asd);
++ }
+
+ csi2rx->notifier.ops = &csi2rx_notifier_ops;
+
+@@ -543,6 +547,7 @@ static int csi2rx_probe(struct platform_device *pdev)
+ return 0;
+
+ err_cleanup:
++ v4l2_async_nf_unregister(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ err_free_priv:
+ kfree(csi2rx);
+@@ -553,6 +558,8 @@ static void csi2rx_remove(struct platform_device *pdev)
+ {
+ struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
+
++ v4l2_async_nf_unregister(&csi2rx->notifier);
++ v4l2_async_nf_cleanup(&csi2rx->notifier);
+ v4l2_async_unregister_subdev(&csi2rx->subdev);
+ kfree(csi2rx);
+ }
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index 7194f88edc0fb4..c3456c700c07e2 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -1021,13 +1021,13 @@ static void mtk_jpeg_dec_device_run(void *priv)
+ if (ret < 0)
+ goto dec_end;
+
+- schedule_delayed_work(&jpeg->job_timeout_work,
+- msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+-
+ mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
+ if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
+ goto dec_end;
+
++ schedule_delayed_work(&jpeg->job_timeout_work,
++ msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
++
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+ mtk_jpeg_dec_reset(jpeg->reg_base);
+ mtk_jpeg_dec_set_config(jpeg->reg_base,
+@@ -1403,7 +1403,6 @@ static void mtk_jpeg_remove(struct platform_device *pdev)
+ {
+ struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);
+
+- cancel_delayed_work_sync(&jpeg->job_timeout_work);
+ pm_runtime_disable(&pdev->dev);
+ video_unregister_device(jpeg->vdev);
+ v4l2_m2m_release(jpeg->m2m_dev);
+@@ -1750,9 +1749,6 @@ static void mtk_jpegdec_worker(struct work_struct *work)
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+- schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
+- msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+-
+ mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
+ if (mtk_jpeg_set_dec_dst(ctx,
+ &jpeg_src_buf->dec_param,
+@@ -1762,6 +1758,9 @@ static void mtk_jpegdec_worker(struct work_struct *work)
+ goto setdst_end;
+ }
+
++ schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
++ msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
++
+ spin_lock_irqsave(&comp_jpeg[hw_id]->hw_lock, flags);
+ ctx->total_frame_num++;
+ mtk_jpeg_dec_reset(comp_jpeg[hw_id]->reg_base);
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+index 2bbc48c7402ca5..f8fa3b841ccfb0 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+@@ -127,6 +127,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
+ u32 img_stride;
+ u32 mem_stride;
+ u32 i, enc_quality;
++ u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality);
+
+ value = width << 16 | height;
+ writel(value, base + JPEG_ENC_IMG_SIZE);
+@@ -157,8 +158,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
+ writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
+ writel(mem_stride, base + JPEG_ENC_STRIDE);
+
+- enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
+- for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
++ enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value;
++ for (i = 0; i < nr_enc_quality; i++) {
+ if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
+ enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
+ break;
+diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
+index b065ccd0691404..378a1cba0144fa 100644
+--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
++++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
+@@ -26,7 +26,7 @@ static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg)
+ vpu->inst_addr = msg->vpu_inst_addr;
+ }
+
+-static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len,
++static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len,
+ void *priv)
+ {
+ const struct mdp_ipi_comm_ack *msg = data;
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+index 3177592490bee4..6adac857a4779d 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
+@@ -261,11 +261,11 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose;
+ u32 out = 0;
+
++ ctx = &path->comps[index];
+ if (CFG_CHECK(MT8183, p_id))
+ out = CFG_COMP(MT8183, ctx->param, outputs[0]);
+
+ compose = path->composes[out];
+- ctx = &path->comps[index];
+ ret = call_op(ctx, config_frame, cmd, compose);
+ if (ret)
+ return ret;
+diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+index 9e744d07a1e8ea..774487fb72a319 100644
+--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
++++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+@@ -79,6 +79,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
+ }
+
+ fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
++ if (!fw)
++ return ERR_PTR(-ENOMEM);
+ fw->type = SCP;
+ fw->ops = &mtk_vcodec_rproc_msg;
+ fw->scp = scp;
+diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+index 5e03b08865599a..42ce58c41e4f4a 100644
+--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
++++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+@@ -29,15 +29,7 @@ static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv)
+ {
+- /*
+- * The handler we receive takes a void * as its first argument. We
+- * cannot change this because it needs to be passed down to the rproc
+- * subsystem when SCP is used. VPU takes a const argument, which is
+- * more constrained, so the conversion below is safe.
+- */
+- ipi_handler_t handler_const = (ipi_handler_t)handler;
+-
+- return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
++ return vpu_ipi_register(fw->pdev, id, handler, name, priv);
+ }
+
+ static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+@@ -58,12 +50,12 @@ static void mtk_vcodec_vpu_reset_dec_handler(void *priv)
+
+ dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
+
+- mutex_lock(&dev->dev_mutex);
++ mutex_lock(&dev->dev_ctx_lock);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
+ }
+- mutex_unlock(&dev->dev_mutex);
++ mutex_unlock(&dev->dev_ctx_lock);
+ }
+
+ static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
+@@ -73,12 +65,12 @@ static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
+
+ dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
+
+- mutex_lock(&dev->dev_mutex);
++ mutex_lock(&dev->dev_ctx_lock);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
+ }
+- mutex_unlock(&dev->dev_mutex);
++ mutex_unlock(&dev->dev_ctx_lock);
+ }
+
+ static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
+diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
+index 908602031fd0e3..9ce34a3b5ee67d 100644
+--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
++++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
+@@ -47,20 +47,32 @@ EXPORT_SYMBOL(mtk_vcodec_write_vdecsys);
+
+ int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem)
+ {
++ enum mtk_instance_type inst_type = *((unsigned int *)priv);
++ struct platform_device *plat_dev;
+ unsigned long size = mem->size;
+- struct mtk_vcodec_dec_ctx *ctx = priv;
+- struct device *dev = &ctx->dev->plat_dev->dev;
++ int id;
+
+- mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
++ if (inst_type == MTK_INST_ENCODER) {
++ struct mtk_vcodec_enc_ctx *enc_ctx = priv;
++
++ plat_dev = enc_ctx->dev->plat_dev;
++ id = enc_ctx->id;
++ } else {
++ struct mtk_vcodec_dec_ctx *dec_ctx = priv;
++
++ plat_dev = dec_ctx->dev->plat_dev;
++ id = dec_ctx->id;
++ }
++
++ mem->va = dma_alloc_coherent(&plat_dev->dev, size, &mem->dma_addr, GFP_KERNEL);
+ if (!mem->va) {
+- mtk_v4l2_vdec_err(ctx, "%s dma_alloc size=%ld failed!", dev_name(dev), size);
++ mtk_v4l2_err(plat_dev, "%s dma_alloc size=%ld failed!",
++ dev_name(&plat_dev->dev), size);
+ return -ENOMEM;
+ }
+
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
+- (unsigned long)mem->dma_addr);
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
++ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
++ (unsigned long)mem->dma_addr, size);
+
+ return 0;
+ }
+@@ -68,21 +80,33 @@ EXPORT_SYMBOL(mtk_vcodec_mem_alloc);
+
+ void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem)
+ {
++ enum mtk_instance_type inst_type = *((unsigned int *)priv);
++ struct platform_device *plat_dev;
+ unsigned long size = mem->size;
+- struct mtk_vcodec_dec_ctx *ctx = priv;
+- struct device *dev = &ctx->dev->plat_dev->dev;
++ int id;
++
++ if (inst_type == MTK_INST_ENCODER) {
++ struct mtk_vcodec_enc_ctx *enc_ctx = priv;
++
++ plat_dev = enc_ctx->dev->plat_dev;
++ id = enc_ctx->id;
++ } else {
++ struct mtk_vcodec_dec_ctx *dec_ctx = priv;
++
++ plat_dev = dec_ctx->dev->plat_dev;
++ id = dec_ctx->id;
++ }
+
+ if (!mem->va) {
+- mtk_v4l2_vdec_err(ctx, "%s dma_free size=%ld failed!", dev_name(dev), size);
++ mtk_v4l2_err(plat_dev, "%s dma_free size=%ld failed!",
++ dev_name(&plat_dev->dev), size);
+ return;
+ }
+
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
+- (unsigned long)mem->dma_addr);
+- mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
++ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
++ (unsigned long)mem->dma_addr, size);
+
+- dma_free_coherent(dev, size, mem->va, mem->dma_addr);
++ dma_free_coherent(&plat_dev->dev, size, mem->va, mem->dma_addr);
+ mem->va = NULL;
+ mem->dma_addr = 0;
+ mem->size = 0;
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+index 0a89ce452ac329..20c0b1acf6186c 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+@@ -268,7 +268,9 @@ static int fops_vcodec_open(struct file *file)
+
+ ctx->dev->vdec_pdata->init_vdec_params(ctx);
+
++ mutex_lock(&dev->dev_ctx_lock);
+ list_add(&ctx->list, &dev->ctx_list);
++ mutex_unlock(&dev->dev_ctx_lock);
+ mtk_vcodec_dbgfs_create(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+@@ -311,7 +313,9 @@ static int fops_vcodec_release(struct file *file)
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+ mtk_vcodec_dbgfs_remove(dev, ctx->id);
++ mutex_lock(&dev->dev_ctx_lock);
+ list_del_init(&ctx->list);
++ mutex_unlock(&dev->dev_ctx_lock);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+@@ -378,6 +382,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
+ for (i = 0; i < MTK_VDEC_HW_MAX; i++)
+ mutex_init(&dev->dec_mutex[i]);
+ mutex_init(&dev->dev_mutex);
++ mutex_init(&dev->dev_ctx_lock);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+index 7e36b2c69b7d12..b3a59af7c8ac52 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
++++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+@@ -231,6 +231,7 @@ struct mtk_vcodec_dec_ctx {
+ *
+ * @dec_mutex: decoder hardware lock
+ * @dev_mutex: video_device lock
++ * @dev_ctx_lock: the lock of context list
+ * @decode_workqueue: decode work queue
+ *
+ * @irqlock: protect data access by irq handler and work thread
+@@ -270,6 +271,7 @@ struct mtk_vcodec_dec_dev {
+ /* decoder hardware mutex lock */
+ struct mutex dec_mutex[MTK_VDEC_HW_MAX];
+ struct mutex dev_mutex;
++ struct mutex dev_ctx_lock;
+ struct workqueue_struct *decode_workqueue;
+
+ spinlock_t irqlock;
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
+index 2b6a5adbc41994..b0e2e59f61b5d7 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
+@@ -1023,18 +1023,26 @@ static void vdec_av1_slice_free_working_buffer(struct vdec_av1_slice_instance *i
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(instance->mv); i++)
+- mtk_vcodec_mem_free(ctx, &instance->mv[i]);
++ if (instance->mv[i].va)
++ mtk_vcodec_mem_free(ctx, &instance->mv[i]);
+
+ for (i = 0; i < ARRAY_SIZE(instance->seg); i++)
+- mtk_vcodec_mem_free(ctx, &instance->seg[i]);
++ if (instance->seg[i].va)
++ mtk_vcodec_mem_free(ctx, &instance->seg[i]);
+
+ for (i = 0; i < ARRAY_SIZE(instance->cdf); i++)
+- mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
++ if (instance->cdf[i].va)
++ mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
++
+
+- mtk_vcodec_mem_free(ctx, &instance->tile);
+- mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
+- mtk_vcodec_mem_free(ctx, &instance->cdf_table);
+- mtk_vcodec_mem_free(ctx, &instance->iq_table);
++ if (instance->tile.va)
++ mtk_vcodec_mem_free(ctx, &instance->tile);
++ if (instance->cdf_temp.va)
++ mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
++ if (instance->cdf_table.va)
++ mtk_vcodec_mem_free(ctx, &instance->cdf_table);
++ if (instance->iq_table.va)
++ mtk_vcodec_mem_free(ctx, &instance->iq_table);
+
+ instance->level = AV1_RES_NONE;
+ }
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
+index 5600f1df653d2f..b55fbd6a8a669c 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
+@@ -347,11 +347,16 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ return vpu_dec_reset(vpu);
+
+ fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
++ if (!fb) {
++ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
++ return -ENOMEM;
++ }
++
+ src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+ dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+
+- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
++ y_fb_dma = fb->base_y.dma_addr;
++ c_fb_dma = fb->base_c.dma_addr;
+
+ mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
+index 0e741e0dc8bacd..bb0ad93c6b789f 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
+@@ -724,11 +724,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
+ return vpu_dec_reset(vpu);
+
+ fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
++ if (!fb) {
++ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
++ return -ENOMEM;
++ }
++
+ src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+ dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+
+- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
++ y_fb_dma = fb->base_y.dma_addr;
++ c_fb_dma = fb->base_c.dma_addr;
+ mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
+ inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
+
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+index 06ed47df693bfd..21836dd6ef85a3 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+@@ -869,7 +869,6 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
+ inst->vpu.codec_type = ctx->current_codec;
+ inst->vpu.capture_type = ctx->capture_fourcc;
+
+- ctx->drv_handle = inst;
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vdec_err(ctx, "vdec_hevc init err=%d", err);
+@@ -898,6 +897,7 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
+ mtk_vdec_debug(ctx, "lat hevc instance >> %p, codec_type = 0x%x",
+ inst, inst->vpu.codec_type);
+
++ ctx->drv_handle = inst;
+ return 0;
+ error_free_inst:
+ kfree(inst);
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
+index f64b21c0716967..86b93055f16337 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
+@@ -335,14 +335,18 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+
+ fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+- dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
++ if (!fb) {
++ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
++ return -ENOMEM;
++ }
+
+- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
++ dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
++ y_fb_dma = fb->base_y.dma_addr;
+ if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
+ c_fb_dma = y_fb_dma +
+ inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
+ else
+- c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
++ c_fb_dma = fb->base_c.dma_addr;
+
+ inst->vsi->dec.bs_dma = (u64)bs->dma_addr;
+ inst->vsi->dec.bs_sz = bs->size;
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
+index 82e57ae983d557..145958206e38a4 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
+@@ -77,12 +77,14 @@ static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vde
+ struct mtk_vcodec_dec_ctx *ctx;
+ int ret = false;
+
++ mutex_lock(&dec_dev->dev_ctx_lock);
+ list_for_each_entry(ctx, &dec_dev->ctx_list, list) {
+ if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
+ ret = true;
+ break;
+ }
+ }
++ mutex_unlock(&dec_dev->dev_ctx_lock);
+
+ return ret;
+ }
+@@ -231,6 +233,12 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
+ mtk_vdec_debug(vpu->ctx, "vdec_inst=%p", vpu);
+
+ err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
++
++ if (IS_ERR_OR_NULL(vpu->vsi)) {
++ mtk_vdec_err(vpu->ctx, "invalid vdec vsi, status=%d", err);
++ return -EINVAL;
++ }
++
+ mtk_vdec_debug(vpu->ctx, "- ret=%d", err);
+ return err;
+ }
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
+index 04948d3eb011a6..eb381fa6e7d14e 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
+@@ -866,7 +866,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
+ {
+ struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q);
+ struct venc_enc_param param;
+- int ret, pm_ret;
++ int ret;
+ int i;
+
+ /* Once state turn into MTK_STATE_ABORT, we need stop_streaming
+@@ -886,18 +886,12 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
+ return 0;
+ }
+
+- ret = pm_runtime_resume_and_get(&ctx->dev->plat_dev->dev);
+- if (ret < 0) {
+- mtk_v4l2_venc_err(ctx, "pm_runtime_resume_and_get fail %d", ret);
+- goto err_start_stream;
+- }
+-
+ mtk_venc_set_param(ctx, &param);
+ ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, &param);
+ if (ret) {
+ mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret);
+ ctx->state = MTK_STATE_ABORT;
+- goto err_set_param;
++ goto err_start_stream;
+ }
+ ctx->param_change = MTK_ENCODE_PARAM_NONE;
+
+@@ -910,18 +904,13 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
+ if (ret) {
+ mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret);
+ ctx->state = MTK_STATE_ABORT;
+- goto err_set_param;
++ goto err_start_stream;
+ }
+ ctx->state = MTK_STATE_HEADER;
+ }
+
+ return 0;
+
+-err_set_param:
+- pm_ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
+- if (pm_ret < 0)
+- mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", pm_ret);
+-
+ err_start_stream:
+ for (i = 0; i < q->num_buffers; ++i) {
+ struct vb2_buffer *buf = vb2_get_buffer(q, i);
+@@ -1004,10 +993,6 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
+ if (ret)
+ mtk_v4l2_venc_err(ctx, "venc_if_deinit failed=%d", ret);
+
+- ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
+- if (ret < 0)
+- mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", ret);
+-
+ ctx->state = MTK_STATE_FREE;
+ }
+
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+index 6319f24bc714b5..3cb8a16222220e 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+@@ -177,7 +177,9 @@ static int fops_vcodec_open(struct file *file)
+ mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ",
+ ctx->id, ctx, ctx->m2m_ctx);
+
++ mutex_lock(&dev->dev_ctx_lock);
+ list_add(&ctx->list, &dev->ctx_list);
++ mutex_unlock(&dev->dev_ctx_lock);
+
+ mutex_unlock(&dev->dev_mutex);
+ mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
+@@ -212,7 +214,9 @@ static int fops_vcodec_release(struct file *file)
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
++ mutex_lock(&dev->dev_ctx_lock);
+ list_del_init(&ctx->list);
++ mutex_unlock(&dev->dev_ctx_lock);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+@@ -294,6 +298,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
+
+ mutex_init(&dev->enc_mutex);
+ mutex_init(&dev->dev_mutex);
++ mutex_init(&dev->dev_ctx_lock);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+index a042f607ed8d16..0bd85d0fb379ac 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+@@ -178,6 +178,7 @@ struct mtk_vcodec_enc_ctx {
+ *
+ * @enc_mutex: encoder hardware lock.
+ * @dev_mutex: video_device lock
++ * @dev_ctx_lock: the lock of context list
+ * @encode_workqueue: encode work queue
+ *
+ * @enc_irq: h264 encoder irq resource
+@@ -205,6 +206,7 @@ struct mtk_vcodec_enc_dev {
+ /* encoder hardware mutex lock */
+ struct mutex enc_mutex;
+ struct mutex dev_mutex;
++ struct mutex dev_ctx_lock;
+ struct workqueue_struct *encode_workqueue;
+
+ int enc_irq;
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
+index 3fce936e61b9f2..1a2b14a3e219c2 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
+@@ -58,6 +58,26 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *mtkdev)
+ return 0;
+ }
+
++int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
++{
++ int ret;
++
++ ret = pm_runtime_resume_and_get(pm->dev);
++ if (ret)
++ dev_err(pm->dev, "pm_runtime_resume_and_get fail: %d", ret);
++
++ return ret;
++}
++
++void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm)
++{
++ int ret;
++
++ ret = pm_runtime_put(pm->dev);
++ if (ret && ret != -EAGAIN)
++ dev_err(pm->dev, "pm_runtime_put fail %d", ret);
++}
++
+ void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
+ {
+ struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
+index e50be0575190a9..2e28f25e36cc42 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
+@@ -10,7 +10,8 @@
+ #include "mtk_vcodec_enc_drv.h"
+
+ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *dev);
+-
++int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
++void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm);
+ void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
+ void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
+
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+index a68dac72c4e426..f8145998fcaf78 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+@@ -301,11 +301,12 @@ static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
+ * other buffers need to be freed by AP.
+ */
+ for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
+- if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME)
++ if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME && inst->work_bufs[i].va)
+ mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
+ }
+
+- mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
++ if (inst->pps_buf.va)
++ mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
+ }
+
+ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
+index 1bdaecdd64a795..e83747b8d69ab3 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
+@@ -32,9 +32,7 @@ int venc_if_init(struct mtk_vcodec_enc_ctx *ctx, unsigned int fourcc)
+ }
+
+ mtk_venc_lock(ctx);
+- mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->init(ctx);
+- mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ return ret;
+@@ -46,9 +44,7 @@ int venc_if_set_param(struct mtk_vcodec_enc_ctx *ctx,
+ int ret = 0;
+
+ mtk_venc_lock(ctx);
+- mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->set_param(ctx->drv_handle, type, in);
+- mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ return ret;
+@@ -68,15 +64,20 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
+ ctx->dev->curr_ctx = ctx;
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+
++ ret = mtk_vcodec_enc_pw_on(&ctx->dev->pm);
++ if (ret)
++ goto venc_if_encode_pw_on_err;
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf,
+ bs_buf, result);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
++ mtk_vcodec_enc_pw_off(&ctx->dev->pm);
+
+ spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ ctx->dev->curr_ctx = NULL;
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+
++venc_if_encode_pw_on_err:
+ mtk_venc_unlock(ctx);
+ return ret;
+ }
+@@ -89,9 +90,7 @@ int venc_if_deinit(struct mtk_vcodec_enc_ctx *ctx)
+ return 0;
+
+ mtk_venc_lock(ctx);
+- mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->deinit(ctx->drv_handle);
+- mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ ctx->drv_handle = NULL;
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+index ae6290d28f8e98..51bb7ee141b9e5 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+@@ -47,12 +47,14 @@ static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct ven
+ struct mtk_vcodec_enc_ctx *ctx;
+ int ret = false;
+
++ mutex_lock(&enc_dev->dev_ctx_lock);
+ list_for_each_entry(ctx, &enc_dev->ctx_list, list) {
+ if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
+ ret = true;
+ break;
+ }
+ }
++ mutex_unlock(&enc_dev->dev_ctx_lock);
+
+ return ret;
+ }
+@@ -154,6 +156,11 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
+ return -EINVAL;
+ }
+
++ if (IS_ERR_OR_NULL(vpu->vsi)) {
++ mtk_venc_err(vpu->ctx, "invalid venc vsi");
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+index 7243604a82a5bb..724ae7c2ab3ba2 100644
+--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
++++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+@@ -635,7 +635,7 @@ int vpu_load_firmware(struct platform_device *pdev)
+ }
+ EXPORT_SYMBOL_GPL(vpu_load_firmware);
+
+-static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv)
++static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
+ {
+ struct mtk_vpu *vpu = priv;
+ const struct vpu_run *run = data;
+diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.h b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
+index a56053ff135af7..da05f3e7408108 100644
+--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.h
++++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
+@@ -17,7 +17,7 @@
+ * VPU interfaces with other blocks by share memory and interrupt.
+ */
+
+-typedef void (*ipi_handler_t) (const void *data,
++typedef void (*ipi_handler_t) (void *data,
+ unsigned int len,
+ void *priv);
+
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index b7a720198ce57a..2007152cd7a40d 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -1322,6 +1322,20 @@ static bool mxc_jpeg_compare_format(const struct mxc_jpeg_fmt *fmt1,
+ return false;
+ }
+
++static void mxc_jpeg_set_last_buffer(struct mxc_jpeg_ctx *ctx)
++{
++ struct vb2_v4l2_buffer *next_dst_buf;
++
++ next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
++ if (!next_dst_buf) {
++ ctx->fh.m2m_ctx->is_draining = true;
++ ctx->fh.m2m_ctx->next_buf_last = true;
++ return;
++ }
++
++ v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, next_dst_buf);
++}
++
+ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ struct mxc_jpeg_src_buf *jpeg_src_buf)
+ {
+@@ -1334,7 +1348,8 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ q_data_cap = mxc_jpeg_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (mxc_jpeg_compare_format(q_data_cap->fmt, jpeg_src_buf->fmt))
+ jpeg_src_buf->fmt = q_data_cap->fmt;
+- if (q_data_cap->fmt != jpeg_src_buf->fmt ||
++ if (ctx->need_initial_source_change_evt ||
++ q_data_cap->fmt != jpeg_src_buf->fmt ||
+ q_data_cap->w != jpeg_src_buf->w ||
+ q_data_cap->h != jpeg_src_buf->h) {
+ dev_dbg(dev, "Detected jpeg res=(%dx%d)->(%dx%d), pixfmt=%c%c%c%c\n",
+@@ -1378,6 +1393,9 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
+ mxc_jpeg_sizeimage(q_data_cap);
+ notify_src_chg(ctx);
+ ctx->source_change = 1;
++ ctx->need_initial_source_change_evt = false;
++ if (vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
++ mxc_jpeg_set_last_buffer(ctx);
+ }
+
+ return ctx->source_change ? true : false;
+@@ -1595,6 +1613,9 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q,
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = mxc_jpeg_get_plane_size(q_data, i);
+
++ if (V4L2_TYPE_IS_OUTPUT(q->type))
++ ctx->need_initial_source_change_evt = true;
++
+ return 0;
+ }
+
+@@ -1611,6 +1632,9 @@ static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
+ dev_dbg(ctx->mxc_jpeg->dev, "Start streaming ctx=%p", ctx);
+ q_data->sequence = 0;
+
++ if (V4L2_TYPE_IS_CAPTURE(q->type))
++ ctx->need_initial_source_change_evt = false;
++
+ ret = pm_runtime_resume_and_get(ctx->mxc_jpeg->dev);
+ if (ret < 0) {
+ dev_err(ctx->mxc_jpeg->dev, "Failed to power up jpeg\n");
+@@ -1638,8 +1662,13 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+
+- if (V4L2_TYPE_IS_OUTPUT(q->type) || !ctx->source_change)
+- v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
++ v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
++ /* if V4L2_DEC_CMD_STOP is sent before the source change triggered,
++ * restore the is_draining flag
++ */
++ if (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->source_change && ctx->fh.m2m_ctx->last_src_buf)
++ ctx->fh.m2m_ctx->is_draining = true;
++
+ if (V4L2_TYPE_IS_OUTPUT(q->type) &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) {
+ notify_eos(ctx);
+@@ -1916,7 +1945,7 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
+ return -EINVAL;
+ for (i = 0; i < q_data->fmt->mem_planes; i++) {
+ sizeimage = mxc_jpeg_get_plane_size(q_data, i);
+- if (vb2_plane_size(vb, i) < sizeimage) {
++ if (!ctx->source_change && vb2_plane_size(vb, i) < sizeimage) {
+ dev_err(dev, "plane %d too small (%lu < %lu)",
+ i, vb2_plane_size(vb, i), sizeimage);
+ return -EINVAL;
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+index d80e94cc9d9924..dc4afeeff5b65b 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+@@ -99,6 +99,7 @@ struct mxc_jpeg_ctx {
+ enum mxc_jpeg_enc_state enc_state;
+ int slot;
+ unsigned int source_change;
++ bool need_initial_source_change_evt;
+ bool header_parsed;
+ struct v4l2_ctrl_handler ctrl_handler;
+ u8 jpeg_quality;
+diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
+index 5f93712bf48540..142ac7b73e14a0 100644
+--- a/drivers/media/platform/nxp/imx-mipi-csis.c
++++ b/drivers/media/platform/nxp/imx-mipi-csis.c
+@@ -1437,24 +1437,18 @@ static int mipi_csis_probe(struct platform_device *pdev)
+ /* Reset PHY and enable the clocks. */
+ mipi_csis_phy_reset(csis);
+
+- ret = mipi_csis_clk_enable(csis);
+- if (ret < 0) {
+- dev_err(csis->dev, "failed to enable clocks: %d\n", ret);
+- return ret;
+- }
+-
+ /* Now that the hardware is initialized, request the interrupt. */
+ ret = devm_request_irq(dev, irq, mipi_csis_irq_handler, 0,
+ dev_name(dev), csis);
+ if (ret) {
+ dev_err(dev, "Interrupt request failed\n");
+- goto err_disable_clock;
++ return ret;
+ }
+
+ /* Initialize and register the subdev. */
+ ret = mipi_csis_subdev_init(csis);
+ if (ret < 0)
+- goto err_disable_clock;
++ return ret;
+
+ platform_set_drvdata(pdev, &csis->sd);
+
+@@ -1488,8 +1482,6 @@ static int mipi_csis_probe(struct platform_device *pdev)
+ v4l2_async_nf_unregister(&csis->notifier);
+ v4l2_async_nf_cleanup(&csis->notifier);
+ v4l2_async_unregister_subdev(&csis->sd);
+-err_disable_clock:
+- mipi_csis_clk_disable(csis);
+
+ return ret;
+ }
+@@ -1504,9 +1496,10 @@ static void mipi_csis_remove(struct platform_device *pdev)
+ v4l2_async_nf_cleanup(&csis->notifier);
+ v4l2_async_unregister_subdev(&csis->sd);
+
++ if (!pm_runtime_enabled(&pdev->dev))
++ mipi_csis_runtime_suspend(&pdev->dev);
++
+ pm_runtime_disable(&pdev->dev);
+- mipi_csis_runtime_suspend(&pdev->dev);
+- mipi_csis_clk_disable(csis);
+ v4l2_subdev_cleanup(&csis->sd);
+ media_entity_cleanup(&csis->sd.entity);
+ pm_runtime_set_suspended(&pdev->dev);
+diff --git a/drivers/media/platform/nxp/imx-pxp.c b/drivers/media/platform/nxp/imx-pxp.c
+index e62dc5c1a4aeae..e4427e6487fba7 100644
+--- a/drivers/media/platform/nxp/imx-pxp.c
++++ b/drivers/media/platform/nxp/imx-pxp.c
+@@ -1805,6 +1805,9 @@ static int pxp_probe(struct platform_device *pdev)
+ return PTR_ERR(mmio);
+ dev->regmap = devm_regmap_init_mmio(&pdev->dev, mmio,
+ &pxp_regmap_config);
++ if (IS_ERR(dev->regmap))
++ return dev_err_probe(&pdev->dev, PTR_ERR(dev->regmap),
++ "Failed to init regmap\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+index 792f031e032ae9..c9a4d091b57074 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+@@ -161,7 +161,6 @@ mxc_isi_crossbar_xlate_streams(struct mxc_isi_crossbar *xbar,
+
+ pad = media_pad_remote_pad_first(&xbar->pads[sink_pad]);
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+-
+ if (!sd) {
+ dev_dbg(xbar->isi->dev,
+ "no entity connected to crossbar input %u\n",
+@@ -465,7 +464,8 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi)
+ }
+
+ for (i = 0; i < xbar->num_sinks; ++i)
+- xbar->pads[i].flags = MEDIA_PAD_FL_SINK;
++ xbar->pads[i].flags = MEDIA_PAD_FL_SINK
++ | MEDIA_PAD_FL_MUST_CONNECT;
+ for (i = 0; i < xbar->num_sources; ++i)
+ xbar->pads[i + xbar->num_sinks].flags = MEDIA_PAD_FL_SOURCE;
+
+diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+index 0f8ac29d038db8..0147cc062e1ae7 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
++++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+@@ -352,12 +352,21 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ phy_sel = csid->phy.csiphy_id;
+
+ if (enable) {
+- u8 dt_id = vc;
++ /*
++ * DT_ID is a two bit bitfield that is concatenated with
++ * the four least significant bits of the five bit VC
++ * bitfield to generate an internal CID value.
++ *
++ * CSID_RDI_CFG0(vc)
++ * DT_ID : 28:27
++ * VC : 26:22
++ * DT : 21:16
++ *
++ * CID : VC 3:0 << 2 | DT_ID 1:0
++ */
++ u8 dt_id = vc & 0x03;
+
+ if (tg->enabled) {
+- /* Config Test Generator */
+- vc = 0xa;
+-
+ /* configure one DT, infinite frames */
+ val = vc << TPG_VC_CFG0_VC_NUM;
+ val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
+@@ -370,14 +379,14 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+
+ writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
+
+- val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
+- val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
++ val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT;
++ val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
+
+ val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
+
+- val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
++ val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE;
+ val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
+ val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
+@@ -449,6 +458,8 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
++ if (vc > 3)
++ val |= 1 << CSI2_RX_CFG1_VC_MODE;
+ val |= 1 << CSI2_RX_CFG1_MISR_EN;
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
+
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+index 04baa80494c667..4dba61b8d3f2a6 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+@@ -476,7 +476,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+
+ settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
+
+- val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
++ val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
+ for (i = 0; i < c->num_data; i++)
+ val |= BIT(c->data[i].pos * 2);
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+index 02494c89da91c8..168baaa80d4e60 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+@@ -7,7 +7,6 @@
+ * Copyright (C) 2020-2021 Linaro Ltd.
+ */
+
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -494,35 +493,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ return 0;
+ }
+
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+- bool done;
+- int timeout = 0;
+-
+- do {
+- spin_lock_irqsave(&vfe->output_lock, flags);
+- done = !output->gen2.active_num;
+- spin_unlock_irqrestore(&vfe->output_lock, flags);
+- usleep_range(10000, 20000);
+-
+- if (timeout++ == 100) {
+- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+- vfe_reset(vfe);
+- output->gen2.active_num = 0;
+- return 0;
+- }
+- } while (!done);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ vfe_wm_stop(vfe, output->wm_idx[i]);
++ output->gen2.active_num = 0;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+- return 0;
++ vfe_reset(vfe);
+ }
+
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index f70aad2e8c2378..8ddb8016434ae9 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -8,7 +8,6 @@
+ * Copyright (C) 2021 Jonathan Marek
+ */
+
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -328,35 +327,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ return 0;
+ }
+
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+- bool done;
+- int timeout = 0;
+-
+- do {
+- spin_lock_irqsave(&vfe->output_lock, flags);
+- done = !output->gen2.active_num;
+- spin_unlock_irqrestore(&vfe->output_lock, flags);
+- usleep_range(10000, 20000);
+-
+- if (timeout++ == 100) {
+- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+- vfe_reset(vfe);
+- output->gen2.active_num = 0;
+- return 0;
+- }
+- } while (!done);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ vfe_wm_stop(vfe, output->wm_idx[i]);
++ output->gen2.active_num = 0;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+- return 0;
++ vfe_reset(vfe);
+ }
+
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index 06c95568e5af4e..965500b83d073b 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -535,7 +535,8 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
+ struct camss_clock *clock = &vfe->clock[i];
+
+ if (!strcmp(clock->name, "vfe0") ||
+- !strcmp(clock->name, "vfe1")) {
++ !strcmp(clock->name, "vfe1") ||
++ !strcmp(clock->name, "vfe_lite")) {
+ u64 min_rate = 0;
+ unsigned long rate;
+
+@@ -611,7 +612,7 @@ int vfe_get(struct vfe_device *vfe)
+ } else {
+ ret = vfe_check_clock_rates(vfe);
+ if (ret < 0)
+- goto error_pm_runtime_get;
++ goto error_pm_domain;
+ }
+ vfe->power_count++;
+
+diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
+index 8640db30602680..184a8062580c87 100644
+--- a/drivers/media/platform/qcom/camss/camss-video.c
++++ b/drivers/media/platform/qcom/camss/camss-video.c
+@@ -557,12 +557,6 @@ static void video_stop_streaming(struct vb2_queue *q)
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+
+- if (entity->use_count > 1) {
+- /* Don't stop if other instances of the pipeline are still running */
+- dev_dbg(video->camss->dev, "Video pipeline still used, don't stop streaming.\n");
+- return;
+- }
+-
+ if (ret) {
+ dev_err(video->camss->dev, "Video pipeline stop failed: %d\n", ret);
+ return;
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index f11dc59135a5ac..0754645d26acba 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1038,8 +1038,11 @@ static int camss_of_parse_endpoint_node(struct device *dev,
+ struct v4l2_mbus_config_mipi_csi2 *mipi_csi2;
+ struct v4l2_fwnode_endpoint vep = { { 0 } };
+ unsigned int i;
++ int ret;
+
+- v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
++ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
++ if (ret)
++ return ret;
+
+ csd->interface.csiphy_id = vep.base.port;
+
+@@ -1538,6 +1541,20 @@ static int camss_icc_get(struct camss *camss)
+ return 0;
+ }
+
++static void camss_genpd_cleanup(struct camss *camss)
++{
++ int i;
++
++ if (camss->genpd_num == 1)
++ return;
++
++ if (camss->genpd_num > camss->vfe_num)
++ device_link_del(camss->genpd_link[camss->genpd_num - 1]);
++
++ for (i = 0; i < camss->genpd_num; i++)
++ dev_pm_domain_detach(camss->genpd[i], true);
++}
++
+ /*
+ * camss_probe - Probe CAMSS platform device
+ * @pdev: Pointer to CAMSS platform device
+@@ -1617,15 +1634,21 @@ static int camss_probe(struct platform_device *pdev)
+
+ ret = camss_icc_get(camss);
+ if (ret < 0)
+- goto err_cleanup;
++ return ret;
++
++ ret = camss_configure_pd(camss);
++ if (ret < 0) {
++ dev_err(dev, "Failed to configure power domains: %d\n", ret);
++ return ret;
++ }
+
+ ret = camss_init_subdevices(camss);
+ if (ret < 0)
+- goto err_cleanup;
++ goto err_genpd_cleanup;
+
+ ret = dma_set_mask_and_coherent(dev, 0xffffffff);
+ if (ret)
+- goto err_cleanup;
++ goto err_genpd_cleanup;
+
+ camss->media_dev.dev = camss->dev;
+ strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
+@@ -1637,20 +1660,22 @@ static int camss_probe(struct platform_device *pdev)
+ ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
+- goto err_cleanup;
++ goto err_genpd_cleanup;
+ }
+
+ v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
+
++ pm_runtime_enable(dev);
++
+ num_subdevs = camss_of_parse_ports(camss);
+ if (num_subdevs < 0) {
+ ret = num_subdevs;
+- goto err_cleanup;
++ goto err_v4l2_device_unregister;
+ }
+
+ ret = camss_register_entities(camss);
+ if (ret < 0)
+- goto err_cleanup;
++ goto err_v4l2_device_unregister;
+
+ if (num_subdevs) {
+ camss->notifier.ops = &camss_subdev_notifier_ops;
+@@ -1678,43 +1703,27 @@ static int camss_probe(struct platform_device *pdev)
+ }
+ }
+
+- ret = camss_configure_pd(camss);
+- if (ret < 0) {
+- dev_err(dev, "Failed to configure power domains: %d\n", ret);
+- return ret;
+- }
+-
+- pm_runtime_enable(dev);
+-
+ return 0;
+
+ err_register_subdevs:
+ camss_unregister_entities(camss);
+-err_cleanup:
++err_v4l2_device_unregister:
+ v4l2_device_unregister(&camss->v4l2_dev);
+ v4l2_async_nf_cleanup(&camss->notifier);
++ pm_runtime_disable(dev);
++err_genpd_cleanup:
++ camss_genpd_cleanup(camss);
+
+ return ret;
+ }
+
+ void camss_delete(struct camss *camss)
+ {
+- int i;
+-
+ v4l2_device_unregister(&camss->v4l2_dev);
+ media_device_unregister(&camss->media_dev);
+ media_device_cleanup(&camss->media_dev);
+
+ pm_runtime_disable(camss->dev);
+-
+- if (camss->genpd_num == 1)
+- return;
+-
+- if (camss->genpd_num > camss->vfe_num)
+- device_link_del(camss->genpd_link[camss->genpd_num - 1]);
+-
+- for (i = 0; i < camss->genpd_num; i++)
+- dev_pm_domain_detach(camss->genpd[i], true);
+ }
+
+ /*
+@@ -1733,6 +1742,8 @@ static void camss_remove(struct platform_device *pdev)
+
+ if (atomic_read(&camss->ref_count) == 0)
+ camss_delete(camss);
++
++ camss_genpd_cleanup(camss);
+ }
+
+ static const struct of_device_id camss_dt_match[] = {
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index 054b8e74ba4f51..0fc9414f8f1849 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -424,6 +424,7 @@ static void venus_remove(struct platform_device *pdev)
+ struct device *dev = core->dev;
+ int ret;
+
++ cancel_delayed_work_sync(&core->work);
+ ret = pm_runtime_get_sync(dev);
+ WARN_ON(ret < 0);
+
+diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
+index 7cab685a2ec804..0a041b4db9efc5 100644
+--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
+@@ -398,7 +398,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
+ memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
+ idx++;
+
+- if (idx > HFI_BUFFER_TYPE_MAX)
++ if (idx >= HFI_BUFFER_TYPE_MAX)
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ req_bytes -= sizeof(struct hfi_buffer_requirements);
+diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
+index 6cf74b2bc5ae38..c43839539d4dda 100644
+--- a/drivers/media/platform/qcom/venus/hfi_parser.c
++++ b/drivers/media/platform/qcom/venus/hfi_parser.c
+@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
+ struct hfi_plat_caps *caps = core->caps, *cap;
+ unsigned long bit;
+
++ if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
++ return;
++
+ for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+@@ -86,6 +89,9 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
+ {
+ const struct hfi_profile_level *pl = data;
+
++ if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
++ return;
++
+ memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
+ cap->num_pl += num;
+ }
+@@ -111,6 +117,9 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
+ {
+ const struct hfi_capability *caps = data;
+
++ if (cap->num_caps + num >= MAX_CAP_ENTRIES)
++ return;
++
+ memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
+ cap->num_caps += num;
+ }
+@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
+ {
+ const struct raw_formats *formats = fmts;
+
++ if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
++ return;
++
+ memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
+ cap->num_fmts += num_fmts;
+ }
+@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ rawfmts[i].buftype = fmt->buffer_type;
+ i++;
+
++ if (i >= MAX_FMT_ENTRIES)
++ return;
++
+ if (pinfo->num_planes > MAX_PLANES)
+ break;
+
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index 19fc6575a48910..f9437b6412b91c 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -205,6 +205,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
+
+ new_wr_idx = wr_idx + dwords;
+ wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
++
++ if (wr_ptr < (u32 *)queue->qmem.kva ||
++ wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
++ return -EINVAL;
++
+ if (new_wr_idx < qsize) {
+ memcpy(wr_ptr, packet, dwords << 2);
+ } else {
+@@ -272,6 +277,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
+ }
+
+ rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
++
++ if (rd_ptr < (u32 *)queue->qmem.kva ||
++ rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
++ return -EINVAL;
++
+ dwords = *rd_ptr >> 2;
+ if (!dwords)
+ return -EINVAL;
+diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
+index 48c9084bb4dba8..a1b127caa90a76 100644
+--- a/drivers/media/platform/qcom/venus/pm_helpers.c
++++ b/drivers/media/platform/qcom/venus/pm_helpers.c
+@@ -870,7 +870,7 @@ static int vcodec_domains_get(struct venus_core *core)
+ pd = dev_pm_domain_attach_by_name(dev,
+ res->vcodec_pmdomains[i]);
+ if (IS_ERR_OR_NULL(pd))
+- return PTR_ERR(pd) ? : -ENODATA;
++ return pd ? PTR_ERR(pd) : -ENODATA;
+ core->pmdomains[i] = pd;
+ }
+
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
+index dbf305cec12024..884ee6e9d4bd1a 100644
+--- a/drivers/media/platform/qcom/venus/vdec.c
++++ b/drivers/media/platform/qcom/venus/vdec.c
+@@ -1255,7 +1255,7 @@ static int vdec_stop_output(struct venus_inst *inst)
+ break;
+ case VENUS_DEC_STATE_INIT:
+ case VENUS_DEC_STATE_CAPTURE_SETUP:
+- ret = hfi_session_flush(inst, HFI_FLUSH_INPUT, true);
++ ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
+ break;
+ default:
+ break;
+@@ -1747,6 +1747,7 @@ static int vdec_close(struct file *file)
+
+ vdec_pm_get(inst);
+
++ cancel_work_sync(&inst->delayed_process_work);
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+ v4l2_m2m_release(inst->m2m_dev);
+ vdec_ctrl_deinit(inst);
+diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+index f6326df0b09bef..109cca91f733a0 100644
+--- a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
++++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+@@ -1914,12 +1914,14 @@ static int rcsi2_probe(struct platform_device *pdev)
+
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (ret < 0)
+- goto error_async;
++ goto error_pm_runtime;
+
+ dev_info(priv->dev, "%d lanes found\n", priv->lanes);
+
+ return 0;
+
++error_pm_runtime:
++ pm_runtime_disable(&pdev->dev);
+ error_async:
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+@@ -1936,6 +1938,7 @@ static void rcsi2_remove(struct platform_device *pdev)
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+ v4l2_async_unregister_subdev(&priv->subdev);
++ v4l2_subdev_cleanup(&priv->subdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+index 2a77353f10b592..bb4774e2f335ef 100644
+--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
++++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+@@ -742,12 +742,22 @@ static int rvin_setup(struct rvin_dev *vin)
+ */
+ switch (vin->mbus_code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+- /* BT.601/BT.1358 16bit YCbCr422 */
+- vnmc |= VNMC_INF_YUV16;
++ if (vin->is_csi)
++ /* YCbCr422 8-bit */
++ vnmc |= VNMC_INF_YUV8_BT601;
++ else
++ /* BT.601/BT.1358 16bit YCbCr422 */
++ vnmc |= VNMC_INF_YUV16;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+- vnmc |= VNMC_INF_YUV16 | VNMC_YCAL;
++ if (vin->is_csi)
++ /* YCbCr422 8-bit */
++ vnmc |= VNMC_INF_YUV8_BT601;
++ else
++ /* BT.601/BT.1358 16bit YCbCr422 */
++ vnmc |= VNMC_INF_YUV16;
++ vnmc |= VNMC_YCAL;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+index 792336dada447a..997a66318a2931 100644
+--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
++++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+@@ -59,7 +59,7 @@ enum rvin_isp_id {
+
+ #define RVIN_REMOTES_MAX \
+ (((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
+- RVIN_CSI_MAX : RVIN_ISP_MAX)
++ (unsigned int)RVIN_CSI_MAX : (unsigned int)RVIN_ISP_MAX)
+
+ /**
+ * enum rvin_dma_state - DMA states
+diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+index ad2bd71037abdb..246eec259c5d7c 100644
+--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
++++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+@@ -854,6 +854,7 @@ static const struct of_device_id rzg2l_csi2_of_table[] = {
+ { .compatible = "renesas,rzg2l-csi2", },
+ { /* sentinel */ }
+ };
++MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table);
+
+ static struct platform_driver rzg2l_csi2_pdrv = {
+ .remove_new = rzg2l_csi2_remove,
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_histo.c b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
+index f22449dd654cb5..c0f1002f4ecf17 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_histo.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
+@@ -36,9 +36,8 @@ struct vsp1_histogram_buffer *
+ vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
+ {
+ struct vsp1_histogram_buffer *buf = NULL;
+- unsigned long flags;
+
+- spin_lock_irqsave(&histo->irqlock, flags);
++ spin_lock(&histo->irqlock);
+
+ if (list_empty(&histo->irqqueue))
+ goto done;
+@@ -49,7 +48,7 @@ vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
+ histo->readout = true;
+
+ done:
+- spin_unlock_irqrestore(&histo->irqlock, flags);
++ spin_unlock(&histo->irqlock);
+ return buf;
+ }
+
+@@ -58,7 +57,6 @@ void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ size_t size)
+ {
+ struct vsp1_pipeline *pipe = histo->entity.pipe;
+- unsigned long flags;
+
+ /*
+ * The pipeline pointer is guaranteed to be valid as this function is
+@@ -70,10 +68,10 @@ void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+
+- spin_lock_irqsave(&histo->irqlock, flags);
++ spin_lock(&histo->irqlock);
+ histo->readout = false;
+ wake_up(&histo->wait_queue);
+- spin_unlock_irqrestore(&histo->irqlock, flags);
++ spin_unlock(&histo->irqlock);
+ }
+
+ /* -----------------------------------------------------------------------------
+@@ -124,11 +122,10 @@ static void histo_buffer_queue(struct vb2_buffer *vb)
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
+- unsigned long flags;
+
+- spin_lock_irqsave(&histo->irqlock, flags);
++ spin_lock_irq(&histo->irqlock);
+ list_add_tail(&buf->queue, &histo->irqqueue);
+- spin_unlock_irqrestore(&histo->irqlock, flags);
++ spin_unlock_irq(&histo->irqlock);
+ }
+
+ static int histo_start_streaming(struct vb2_queue *vq, unsigned int count)
+@@ -140,9 +137,8 @@ static void histo_stop_streaming(struct vb2_queue *vq)
+ {
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
+ struct vsp1_histogram_buffer *buffer;
+- unsigned long flags;
+
+- spin_lock_irqsave(&histo->irqlock, flags);
++ spin_lock_irq(&histo->irqlock);
+
+ /* Remove all buffers from the IRQ queue. */
+ list_for_each_entry(buffer, &histo->irqqueue, queue)
+@@ -152,7 +148,7 @@ static void histo_stop_streaming(struct vb2_queue *vq)
+ /* Wait for the buffer being read out (if any) to complete. */
+ wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock);
+
+- spin_unlock_irqrestore(&histo->irqlock, flags);
++ spin_unlock_irq(&histo->irqlock);
+ }
+
+ static const struct vb2_ops histo_video_queue_qops = {
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
+index f8093ba9539e93..68d05243c3ee55 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
+@@ -373,7 +373,7 @@ int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
+ (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+- v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
++ vsp1_wpf_stop(pipe->output);
+
+ return ret;
+ }
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
+index 674b5748d929e2..85ecd53cda4950 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
++++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
+@@ -73,7 +73,7 @@ struct vsp1_partition_window {
+ * @wpf: The WPF partition window configuration
+ */
+ struct vsp1_partition {
+- struct vsp1_partition_window rpf;
++ struct vsp1_partition_window rpf[VSP1_MAX_RPF];
+ struct vsp1_partition_window uds_sink;
+ struct vsp1_partition_window uds_source;
+ struct vsp1_partition_window sru;
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+index 3b17f5fa4067fb..78b6cefc5a019d 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+@@ -43,14 +43,6 @@ static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
+ data);
+ }
+
+-/* -----------------------------------------------------------------------------
+- * V4L2 Subdevice Operations
+- */
+-
+-static const struct v4l2_subdev_ops rpf_ops = {
+- .pad = &vsp1_rwpf_pad_ops,
+-};
+-
+ /* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+@@ -323,8 +315,8 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
+ * 'width' need to be adjusted.
+ */
+ if (pipe->partitions > 1) {
+- crop.width = pipe->partition->rpf.width;
+- crop.left += pipe->partition->rpf.left;
++ crop.width = pipe->partition->rpf[rpf->entity.index].width;
++ crop.left += pipe->partition->rpf[rpf->entity.index].left;
+ }
+
+ if (pipe->interlaced) {
+@@ -379,7 +371,9 @@ static void rpf_partition(struct vsp1_entity *entity,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+ {
+- partition->rpf = *window;
++ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
++
++ partition->rpf[rpf->entity.index] = *window;
+ }
+
+ static const struct vsp1_entity_operations rpf_entity_ops = {
+@@ -411,7 +405,7 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
+ rpf->entity.index = index;
+
+ sprintf(name, "rpf.%u", index);
+- ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops,
++ ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
+index 22a82d218152fd..e0f87c8103ca56 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
+@@ -24,7 +24,7 @@ struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+ }
+
+ /* -----------------------------------------------------------------------------
+- * V4L2 Subdevice Pad Operations
++ * V4L2 Subdevice Operations
+ */
+
+ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
+@@ -243,7 +243,7 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+ return ret;
+ }
+
+-const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
++static const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
+ .enum_frame_size = vsp1_rwpf_enum_frame_size,
+@@ -253,6 +253,10 @@ const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
+ .set_selection = vsp1_rwpf_set_selection,
+ };
+
++const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops = {
++ .pad = &vsp1_rwpf_pad_ops,
++};
++
+ /* -----------------------------------------------------------------------------
+ * Controls
+ */
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
+index eac5c04c223934..e0d212c70b2f99 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
++++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
+@@ -79,9 +79,11 @@ static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
+ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
+ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
+
++void vsp1_wpf_stop(struct vsp1_rwpf *wpf);
++
+ int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
+
+-extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
++extern const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops;
+
+ struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+ struct v4l2_subdev_state *sd_state);
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
+index d0074ca009209c..cab4445eca696e 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
+@@ -186,17 +186,13 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
+ }
+
+ /* -----------------------------------------------------------------------------
+- * V4L2 Subdevice Core Operations
++ * VSP1 Entity Operations
+ */
+
+-static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
++void vsp1_wpf_stop(struct vsp1_rwpf *wpf)
+ {
+- struct vsp1_rwpf *wpf = to_rwpf(subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+
+- if (enable)
+- return 0;
+-
+ /*
+ * Write to registers directly when stopping the stream as there will be
+ * no pipeline run to apply the display list.
+@@ -204,27 +200,8 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
+ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
+ vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
+ VI6_WPF_SRCRPF, 0);
+-
+- return 0;
+ }
+
+-/* -----------------------------------------------------------------------------
+- * V4L2 Subdevice Operations
+- */
+-
+-static const struct v4l2_subdev_video_ops wpf_video_ops = {
+- .s_stream = wpf_s_stream,
+-};
+-
+-static const struct v4l2_subdev_ops wpf_ops = {
+- .video = &wpf_video_ops,
+- .pad = &vsp1_rwpf_pad_ops,
+-};
+-
+-/* -----------------------------------------------------------------------------
+- * VSP1 Entity Operations
+- */
+-
+ static void vsp1_wpf_destroy(struct vsp1_entity *entity)
+ {
+ struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
+@@ -583,7 +560,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
+ wpf->entity.index = index;
+
+ sprintf(name, "wpf.%u", index);
+- ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops,
++ ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
+index f1c532a5802ac2..25f5b5eebf13ff 100644
+--- a/drivers/media/platform/rockchip/rga/rga.c
++++ b/drivers/media/platform/rockchip/rga/rga.c
+@@ -184,25 +184,16 @@ static int rga_setup_ctrls(struct rga_ctx *ctx)
+ static struct rga_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+- .color_swap = RGA_COLOR_RB_SWAP,
++ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+- {
+- .fourcc = V4L2_PIX_FMT_XRGB32,
+- .color_swap = RGA_COLOR_RB_SWAP,
+- .hw_format = RGA_COLOR_FMT_XBGR8888,
+- .depth = 32,
+- .uv_factor = 1,
+- .y_div = 1,
+- .x_div = 1,
+- },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+- .color_swap = RGA_COLOR_ALPHA_SWAP,
++ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+@@ -211,7 +202,7 @@ static struct rga_fmt formats[] = {
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+- .color_swap = RGA_COLOR_ALPHA_SWAP,
++ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+index 8f3cba31976234..c584bb6d319983 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+@@ -723,6 +723,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
+ unsigned int i;
+ u32 status;
+
++ if (!rkisp1->irqs_enabled)
++ return IRQ_NONE;
++
+ status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
+ if (!status)
+ return IRQ_NONE;
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+index d30f0ecb1bfd84..e9bc6c155d2fcf 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+@@ -61,6 +61,14 @@ struct dentry;
+ RKISP1_CIF_ISP_EXP_END | \
+ RKISP1_CIF_ISP_HIST_MEASURE_RDY)
+
++/* IRQ lines */
++enum rkisp1_irq_line {
++ RKISP1_IRQ_ISP = 0,
++ RKISP1_IRQ_MI,
++ RKISP1_IRQ_MIPI,
++ RKISP1_NUM_IRQS,
++};
++
+ /* enum for the resizer pads */
+ enum rkisp1_rsz_pad {
+ RKISP1_RSZ_PAD_SINK,
+@@ -441,7 +449,6 @@ struct rkisp1_debug {
+ * struct rkisp1_device - ISP platform device
+ *
+ * @base_addr: base register address
+- * @irq: the irq number
+ * @dev: a pointer to the struct device
+ * @clk_size: number of clocks
+ * @clks: array of clocks
+@@ -459,6 +466,8 @@ struct rkisp1_debug {
+ * @stream_lock: serializes {start/stop}_streaming callbacks between the capture devices.
+ * @debug: debug params to be exposed on debugfs
+ * @info: version-specific ISP information
++ * @irqs: IRQ line numbers
++ * @irqs_enabled: the hardware is enabled and can cause interrupts
+ */
+ struct rkisp1_device {
+ void __iomem *base_addr;
+@@ -479,6 +488,8 @@ struct rkisp1_device {
+ struct mutex stream_lock; /* serialize {start/stop}_streaming cb between capture devices */
+ struct rkisp1_debug debug;
+ const struct rkisp1_info *info;
++ int irqs[RKISP1_NUM_IRQS];
++ bool irqs_enabled;
+ };
+
+ /*
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+index fdff3d0da4e506..1537dccbd2e28f 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+@@ -141,8 +141,20 @@ static void rkisp1_csi_disable(struct rkisp1_csi *csi)
+ struct rkisp1_device *rkisp1 = csi->rkisp1;
+ u32 val;
+
+- /* Mask and clear interrupts. */
++ /* Mask MIPI interrupts. */
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC, 0);
++
++ /* Flush posted writes */
++ rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
++
++ /*
++ * Wait until the IRQ handler has ended. The IRQ handler may get called
++ * even after this, but it will return immediately as the MIPI
++ * interrupts have been masked.
++ */
++ synchronize_irq(rkisp1->irqs[RKISP1_IRQ_MIPI]);
++
++ /* Clear MIPI interrupt status */
+ rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, ~0);
+
+ val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
+@@ -199,6 +211,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ u32 val, status;
+
++ if (!rkisp1->irqs_enabled)
++ return IRQ_NONE;
++
+ status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
+ if (!status)
+ return IRQ_NONE;
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+index c41abd2833f12f..73cf08a740118c 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+@@ -114,6 +114,7 @@
+ struct rkisp1_isr_data {
+ const char *name;
+ irqreturn_t (*isr)(int irq, void *ctx);
++ u32 line_mask;
+ };
+
+ /* ----------------------------------------------------------------------------
+@@ -304,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
+ {
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+
++ rkisp1->irqs_enabled = false;
++ /* Make sure the IRQ handler will see the above */
++ mb();
++
++ /*
++ * Wait until any running IRQ handler has returned. The IRQ handler
++ * may get called even after this (as it's a shared interrupt line)
++ * but the 'irqs_enabled' flag will make the handler return immediately.
++ */
++ for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
++ if (rkisp1->irqs[il] == -1)
++ continue;
++
++ /* Skip if the irq line is the same as previous */
++ if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il])
++ synchronize_irq(rkisp1->irqs[il]);
++ }
++
+ clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
+ return pinctrl_pm_select_sleep_state(dev);
+ }
+@@ -320,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
+ if (ret)
+ return ret;
+
++ rkisp1->irqs_enabled = true;
++ /* Make sure the IRQ handler will see the above */
++ mb();
++
+ return 0;
+ }
+
+@@ -442,17 +465,25 @@ static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
+
+ static irqreturn_t rkisp1_isr(int irq, void *ctx)
+ {
++ irqreturn_t ret = IRQ_NONE;
++
+ /*
+ * Call rkisp1_capture_isr() first to handle the frame that
+ * potentially completed using the current frame_sequence number before
+ * it is potentially incremented by rkisp1_isp_isr() in the vertical
+ * sync.
+ */
+- rkisp1_capture_isr(irq, ctx);
+- rkisp1_isp_isr(irq, ctx);
+- rkisp1_csi_isr(irq, ctx);
+
+- return IRQ_HANDLED;
++ if (rkisp1_capture_isr(irq, ctx) == IRQ_HANDLED)
++ ret = IRQ_HANDLED;
++
++ if (rkisp1_isp_isr(irq, ctx) == IRQ_HANDLED)
++ ret = IRQ_HANDLED;
++
++ if (rkisp1_csi_isr(irq, ctx) == IRQ_HANDLED)
++ ret = IRQ_HANDLED;
++
++ return ret;
+ }
+
+ static const char * const px30_isp_clks[] = {
+@@ -463,9 +494,9 @@ static const char * const px30_isp_clks[] = {
+ };
+
+ static const struct rkisp1_isr_data px30_isp_isrs[] = {
+- { "isp", rkisp1_isp_isr },
+- { "mi", rkisp1_capture_isr },
+- { "mipi", rkisp1_csi_isr },
++ { "isp", rkisp1_isp_isr, BIT(RKISP1_IRQ_ISP) },
++ { "mi", rkisp1_capture_isr, BIT(RKISP1_IRQ_MI) },
++ { "mipi", rkisp1_csi_isr, BIT(RKISP1_IRQ_MIPI) },
+ };
+
+ static const struct rkisp1_info px30_isp_info = {
+@@ -484,7 +515,7 @@ static const char * const rk3399_isp_clks[] = {
+ };
+
+ static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
+- { NULL, rkisp1_isr },
++ { NULL, rkisp1_isr, BIT(RKISP1_IRQ_ISP) | BIT(RKISP1_IRQ_MI) | BIT(RKISP1_IRQ_MIPI) },
+ };
+
+ static const struct rkisp1_info rk3399_isp_info = {
+@@ -535,6 +566,9 @@ static int rkisp1_probe(struct platform_device *pdev)
+ if (IS_ERR(rkisp1->base_addr))
+ return PTR_ERR(rkisp1->base_addr);
+
++ for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il)
++ rkisp1->irqs[il] = -1;
++
+ for (i = 0; i < info->isr_size; i++) {
+ irq = info->isrs[i].name
+ ? platform_get_irq_byname(pdev, info->isrs[i].name)
+@@ -542,6 +576,11 @@ static int rkisp1_probe(struct platform_device *pdev)
+ if (irq < 0)
+ return irq;
+
++ for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
++ if (info->isrs[i].line_mask & BIT(il))
++ rkisp1->irqs[il] = irq;
++ }
++
+ ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED,
+ dev_driver_string(dev), dev);
+ if (ret) {
+@@ -582,7 +621,7 @@ static int rkisp1_probe(struct platform_device *pdev)
+
+ ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev);
+ if (ret)
+- goto err_pm_runtime_disable;
++ goto err_media_dev_cleanup;
+
+ ret = media_device_register(&rkisp1->media_dev);
+ if (ret) {
+@@ -617,6 +656,8 @@ static int rkisp1_probe(struct platform_device *pdev)
+ media_device_unregister(&rkisp1->media_dev);
+ err_unreg_v4l2_dev:
+ v4l2_device_unregister(&rkisp1->v4l2_dev);
++err_media_dev_cleanup:
++ media_device_cleanup(&rkisp1->media_dev);
+ err_pm_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+@@ -637,6 +678,8 @@ static void rkisp1_remove(struct platform_device *pdev)
+ media_device_unregister(&rkisp1->media_dev);
+ v4l2_device_unregister(&rkisp1->v4l2_dev);
+
++ media_device_cleanup(&rkisp1->media_dev);
++
+ pm_runtime_disable(&pdev->dev);
+ }
+
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+index 07fbb77ce2349e..8fc9c1c116f1dc 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+@@ -281,11 +281,25 @@ static void rkisp1_isp_stop(struct rkisp1_isp *isp)
+ * ISP(mi) stop in mi frame end -> Stop ISP(mipi) ->
+ * Stop ISP(isp) ->wait for ISP isp off
+ */
+- /* stop and clear MI and ISP interrupts */
+- rkisp1_write(rkisp1, RKISP1_CIF_ISP_IMSC, 0);
+- rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, ~0);
+
++ /* Mask MI and ISP interrupts */
++ rkisp1_write(rkisp1, RKISP1_CIF_ISP_IMSC, 0);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_IMSC, 0);
++
++ /* Flush posted writes */
++ rkisp1_read(rkisp1, RKISP1_CIF_MI_IMSC);
++
++ /*
++ * Wait until the IRQ handler has ended. The IRQ handler may get called
++ * even after this, but it will return immediately as the MI and ISP
++ * interrupts have been masked.
++ */
++ synchronize_irq(rkisp1->irqs[RKISP1_IRQ_ISP]);
++ if (rkisp1->irqs[RKISP1_IRQ_ISP] != rkisp1->irqs[RKISP1_IRQ_MI])
++ synchronize_irq(rkisp1->irqs[RKISP1_IRQ_MI]);
++
++ /* Clear MI and ISP interrupt status */
++ rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, ~0);
+ rkisp1_write(rkisp1, RKISP1_CIF_MI_ICR, ~0);
+
+ /* stop ISP */
+@@ -1013,6 +1027,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ u32 status, isp_err;
+
++ if (!rkisp1->irqs_enabled)
++ return IRQ_NONE;
++
+ status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
+ if (!status)
+ return IRQ_NONE;
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+index c15ae0218118c0..eb0aae56d2c7f8 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+@@ -363,12 +363,8 @@ static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
+ {
+ struct rkisp1_resizer *rsz =
+ container_of(sd, struct rkisp1_resizer, sd);
+- struct v4l2_subdev_pad_config dummy_cfg;
+- struct v4l2_subdev_state pad_state = {
+- .pads = &dummy_cfg
+- };
+- u32 pad = code->pad;
+- int ret;
++ unsigned int index = code->index;
++ unsigned int i;
+
+ if (code->pad == RKISP1_RSZ_PAD_SRC) {
+ /* supported mbus codes on the src are the same as in the capture */
+@@ -388,15 +384,29 @@ static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
+ return 0;
+ }
+
+- /* supported mbus codes on the sink pad are the same as isp src pad */
+- code->pad = RKISP1_ISP_PAD_SOURCE_VIDEO;
+- ret = v4l2_subdev_call(&rsz->rkisp1->isp.sd, pad, enum_mbus_code,
+- &pad_state, code);
++ /*
++ * Supported mbus codes on the sink pad are the same as on the ISP
++ * source pad.
++ */
++ for (i = 0; ; i++) {
++ const struct rkisp1_mbus_info *fmt =
++ rkisp1_mbus_info_get_by_index(i);
+
+- /* restore pad */
+- code->pad = pad;
+- code->flags = 0;
+- return ret;
++ if (!fmt)
++ break;
++
++ if (!(fmt->direction & RKISP1_ISP_SD_SRC))
++ continue;
++
++ if (!index) {
++ code->code = fmt->mbus_code;
++ return 0;
++ }
++
++ index--;
++ }
++
++ return -EINVAL;
+ }
+
+ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
+diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+index 76634d242b1030..0f5b3845d7b94f 100644
+--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
++++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+@@ -1133,12 +1133,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+
+ ret = vb2_queue_init(q);
+ if (ret)
+- goto err_vd_rel;
++ return ret;
+
+ vp->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
+ if (ret)
+- goto err_vd_rel;
++ return ret;
+
+ video_set_drvdata(vfd, vp);
+
+@@ -1171,8 +1171,6 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+ v4l2_ctrl_handler_free(&vp->ctrl_handler);
+ err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+-err_vd_rel:
+- video_device_release(vfd);
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+index f62703cebb77c7..4b4c129c09e70f 100644
+--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+@@ -1297,7 +1297,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+ if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0)
+ src_ready = false;
+ if (!src_ready || ctx->dst_queue_cnt == 0)
+- clear_work_bit(ctx);
++ clear_work_bit_irqsave(ctx);
+
+ return 0;
+ }
+diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+index ad13d447d48346..aa1df66d5ac62d 100644
+--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
++++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+@@ -39,6 +39,10 @@ static const struct media_entity_operations sun4i_csi_video_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ };
+
++static const struct media_entity_operations sun4i_csi_subdev_entity_ops = {
++ .link_validate = v4l2_subdev_link_validate,
++};
++
+ static int sun4i_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+@@ -213,6 +217,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
+ v4l2_subdev_init(subdev, &sun4i_csi_subdev_ops);
+ subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
++ subdev->entity.ops = &sun4i_csi_subdev_entity_ops;
+ subdev->owner = THIS_MODULE;
+ snprintf(subdev->name, sizeof(subdev->name), "sun4i-csi-0");
+ v4l2_set_subdevdata(subdev, csi);
+diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
+index 47a8c0fb7eb9f2..99c401e653bc4e 100644
+--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
++++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
+@@ -8,6 +8,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ select REGMAP_MMIO
++ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Support for the Allwinner A83T MIPI CSI-2 controller and D-PHY.
+diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+index 90ab1d77b6a5e7..f7ff0937828cf1 100644
+--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
++++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+@@ -66,6 +66,7 @@ static void deinterlace_device_run(void *priv)
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned int hstep, vstep;
+ dma_addr_t addr;
++ int i;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+@@ -160,6 +161,26 @@ static void deinterlace_device_run(void *priv)
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
+
++ /* neutral filter coefficients */
++ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
++ DEINTERLACE_FRM_CTRL_COEF_ACCESS);
++ readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
++ val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
++
++ for (i = 0; i < 32; i++) {
++ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
++ DEINTERLACE_IDENTITY_COEF);
++ deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
++ DEINTERLACE_IDENTITY_COEF);
++ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
++ DEINTERLACE_IDENTITY_COEF);
++ deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
++ DEINTERLACE_IDENTITY_COEF);
++ }
++
++ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
++ DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
++
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
+@@ -248,7 +269,6 @@ static irqreturn_t deinterlace_irq(int irq, void *data)
+ static void deinterlace_init(struct deinterlace_dev *dev)
+ {
+ u32 val;
+- int i;
+
+ deinterlace_write(dev, DEINTERLACE_BYPASS,
+ DEINTERLACE_BYPASS_CSC);
+@@ -284,27 +304,7 @@ static void deinterlace_init(struct deinterlace_dev *dev)
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
+ DEINTERLACE_CHROMA_DIFF_TH_MSK,
+- DEINTERLACE_CHROMA_DIFF_TH(5));
+-
+- /* neutral filter coefficients */
+- deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+- DEINTERLACE_FRM_CTRL_COEF_ACCESS);
+- readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
+- val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
+-
+- for (i = 0; i < 32; i++) {
+- deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
+- DEINTERLACE_IDENTITY_COEF);
+- deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
+- DEINTERLACE_IDENTITY_COEF);
+- deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
+- DEINTERLACE_IDENTITY_COEF);
+- deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
+- DEINTERLACE_IDENTITY_COEF);
+- }
+-
+- deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+- DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
++ DEINTERLACE_CHROMA_DIFF_TH(31));
+ }
+
+ static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
+@@ -929,11 +929,18 @@ static int deinterlace_runtime_resume(struct device *device)
+ return ret;
+ }
+
++ ret = reset_control_deassert(dev->rstc);
++ if (ret) {
++ dev_err(dev->dev, "Failed to apply reset\n");
++
++ goto err_exclusive_rate;
++ }
++
+ ret = clk_prepare_enable(dev->bus_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable bus clock\n");
+
+- goto err_exclusive_rate;
++ goto err_rst;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+@@ -950,23 +957,16 @@ static int deinterlace_runtime_resume(struct device *device)
+ goto err_mod_clk;
+ }
+
+- ret = reset_control_deassert(dev->rstc);
+- if (ret) {
+- dev_err(dev->dev, "Failed to apply reset\n");
+-
+- goto err_ram_clk;
+- }
+-
+ deinterlace_init(dev);
+
+ return 0;
+
+-err_ram_clk:
+- clk_disable_unprepare(dev->ram_clk);
+ err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+ err_bus_clk:
+ clk_disable_unprepare(dev->bus_clk);
++err_rst:
++ reset_control_assert(dev->rstc);
+ err_exclusive_rate:
+ clk_rate_exclusive_put(dev->mod_clk);
+
+@@ -977,11 +977,12 @@ static int deinterlace_runtime_suspend(struct device *device)
+ {
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+
+- reset_control_assert(dev->rstc);
+-
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->bus_clk);
++
++ reset_control_assert(dev->rstc);
++
+ clk_rate_exclusive_put(dev->mod_clk);
+
+ return 0;
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 423fc85d79ee36..1874c976081f8e 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work)
+ ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ if (ctx) {
+ vpu_err("frame processing timed out!\n");
+- ctx->codec_ops->reset(ctx);
++ if (ctx->codec_ops->reset)
++ ctx->codec_ops->reset(ctx);
+ hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
+ }
+ }
+@@ -903,6 +904,8 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ vpu->encoder = func;
++ v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
++ v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
+ } else {
+ vpu->decoder = func;
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
+index 0224ff68ab3fcf..64d6fb852ae9b0 100644
+--- a/drivers/media/platform/verisilicon/hantro_postproc.c
++++ b/drivers/media/platform/verisilicon/hantro_postproc.c
+@@ -107,7 +107,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
+
+ static int down_scale_factor(struct hantro_ctx *ctx)
+ {
+- if (ctx->src_fmt.width == ctx->dst_fmt.width)
++ if (ctx->src_fmt.width <= ctx->dst_fmt.width)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
+diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
+index b3ae037a50f612..db145519fc5d38 100644
+--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
++++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
+@@ -785,6 +785,9 @@ const struct v4l2_ioctl_ops hantro_ioctl_ops = {
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+
++ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
++ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
++
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = vidioc_encoder_cmd,
+ };
+diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+index 816ffa905a4bb4..f9752767078355 100644
+--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
++++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+@@ -648,7 +648,7 @@ static const char * const rockchip_vpu_clk_names[] = {
+ };
+
+ static const char * const rk3588_vpu981_vpu_clk_names[] = {
+- "aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root"
++ "aclk", "hclk",
+ };
+
+ /* VDPU1/VEPU1 */
+diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
+index c591c0851fa28b..ad49151f5ff094 100644
+--- a/drivers/media/radio/radio-isa.c
++++ b/drivers/media/radio/radio-isa.c
+@@ -36,7 +36,7 @@ static int radio_isa_querycap(struct file *file, void *priv,
+
+ strscpy(v->driver, isa->drv->driver.driver.name, sizeof(v->driver));
+ strscpy(v->card, isa->drv->card, sizeof(v->card));
+- snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", isa->v4l2_dev.name);
++ snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev_name(isa->v4l2_dev.dev));
+ return 0;
+ }
+
+diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
+index f1c5c0a6a335cb..e3e6aa87fe081f 100644
+--- a/drivers/media/radio/radio-shark2.c
++++ b/drivers/media/radio/radio-shark2.c
+@@ -62,7 +62,7 @@ struct shark_device {
+ #ifdef SHARK_USE_LEDS
+ struct work_struct led_work;
+ struct led_classdev leds[NO_LEDS];
+- char led_names[NO_LEDS][32];
++ char led_names[NO_LEDS][64];
+ atomic_t brightness[NO_LEDS];
+ unsigned long brightness_new;
+ #endif
+diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
+index fe17c7f98e8101..52d82cbe7685f5 100644
+--- a/drivers/media/rc/bpf-lirc.c
++++ b/drivers/media/rc/bpf-lirc.c
+@@ -253,7 +253,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+ if (attr->attach_flags)
+ return -EINVAL;
+
+- rcdev = rc_dev_get_from_fd(attr->target_fd);
++ rcdev = rc_dev_get_from_fd(attr->target_fd, true);
+ if (IS_ERR(rcdev))
+ return PTR_ERR(rcdev);
+
+@@ -278,7 +278,7 @@ int lirc_prog_detach(const union bpf_attr *attr)
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+- rcdev = rc_dev_get_from_fd(attr->target_fd);
++ rcdev = rc_dev_get_from_fd(attr->target_fd, true);
+ if (IS_ERR(rcdev)) {
+ bpf_prog_put(prog);
+ return PTR_ERR(rcdev);
+@@ -303,7 +303,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
+ if (attr->query.query_flags)
+ return -EINVAL;
+
+- rcdev = rc_dev_get_from_fd(attr->query.target_fd);
++ rcdev = rc_dev_get_from_fd(attr->query.target_fd, false);
+ if (IS_ERR(rcdev))
+ return PTR_ERR(rcdev);
+
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 74546f7e34691e..e5590a708f1c5d 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -1148,10 +1148,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto)
+
+ memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
+
+- if (!mutex_is_locked(&ictx->lock)) {
+- unlock = true;
+- mutex_lock(&ictx->lock);
+- }
++ unlock = mutex_trylock(&ictx->lock);
+
+ retval = send_packet(ictx);
+ if (retval)
+@@ -2427,6 +2424,12 @@ static int imon_probe(struct usb_interface *interface,
+ goto fail;
+ }
+
++ if (first_if->dev.driver != interface->dev.driver) {
++ dev_err(&interface->dev, "inconsistent driver matching\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
+ if (ifnum == 0) {
+ ictx = imon_init_intf0(interface, id);
+ if (!ictx) {
+diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
+index 13e81bf8005df0..39a7e2db63a78b 100644
+--- a/drivers/media/rc/ir-rx51.c
++++ b/drivers/media/rc/ir-rx51.c
+@@ -34,13 +34,13 @@ struct ir_rx51 {
+ static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
+ {
+ ir_rx51->state.enabled = true;
+- pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
++ pwm_apply_might_sleep(ir_rx51->pwm, &ir_rx51->state);
+ }
+
+ static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
+ {
+ ir_rx51->state.enabled = false;
+- pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
++ pwm_apply_might_sleep(ir_rx51->pwm, &ir_rx51->state);
+ }
+
+ static int init_timing_params(struct ir_rx51 *ir_rx51)
+diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
+index 3d8488c39c5615..3311099cbd573b 100644
+--- a/drivers/media/rc/ir-sharp-decoder.c
++++ b/drivers/media/rc/ir-sharp-decoder.c
+@@ -15,7 +15,9 @@
+ #define SHARP_UNIT 40 /* us */
+ #define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
+ #define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
+-#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
++#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680us space) */
++#define SHARP_BIT_0_SPACE (17 * SHARP_UNIT) /* 680us space */
++#define SHARP_BIT_1_SPACE (42 * SHARP_UNIT) /* 1680us space */
+ #define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
+ #define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
+
+@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
+ .header_pulse = 0,
+ .header_space = 0,
+ .bit_pulse = SHARP_BIT_PULSE,
+- .bit_space[0] = SHARP_BIT_0_PERIOD,
+- .bit_space[1] = SHARP_BIT_1_PERIOD,
++ .bit_space[0] = SHARP_BIT_0_SPACE,
++ .bit_space[1] = SHARP_BIT_1_SPACE,
+ .trailer_pulse = SHARP_BIT_PULSE,
+ .trailer_space = SHARP_ECHO_SPACE,
+ .msb_first = 1,
+diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
+index 19680670925949..69e630d85262f6 100644
+--- a/drivers/media/rc/ir_toy.c
++++ b/drivers/media/rc/ir_toy.c
+@@ -332,6 +332,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
+ sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
+ if (err) {
+ dev_err(irtoy->dev, "exit sample mode: %d\n", err);
++ kfree(buf);
+ return err;
+ }
+
+@@ -339,6 +340,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
+ sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
+ if (err) {
+ dev_err(irtoy->dev, "enter sample mode: %d\n", err);
++ kfree(buf);
+ return err;
+ }
+
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index 043d23aaa3cbcc..f8901d6fbe9bf1 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -276,7 +276,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
+ if (ret < 0)
+ goto out_kfree_raw;
+
+- count = ret;
++ /* drop trailing space */
++ if (!(ret % 2))
++ count = ret - 1;
++ else
++ count = ret;
+
+ txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
+ if (!txbuf) {
+@@ -810,7 +814,7 @@ void __exit lirc_dev_exit(void)
+ unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX);
+ }
+
+-struct rc_dev *rc_dev_get_from_fd(int fd)
++struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
+ {
+ struct fd f = fdget(fd);
+ struct lirc_fh *fh;
+@@ -824,6 +828,11 @@ struct rc_dev *rc_dev_get_from_fd(int fd)
+ return ERR_PTR(-EINVAL);
+ }
+
++ if (write && !(f.file->f_mode & FMODE_WRITE)) {
++ fdput(f);
++ return ERR_PTR(-EPERM);
++ }
++
+ fh = f.file->private_data;
+ dev = fh->rc;
+
+diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
+index 7732054c4621e6..4a6fafe7a249e3 100644
+--- a/drivers/media/rc/pwm-ir-tx.c
++++ b/drivers/media/rc/pwm-ir-tx.c
+@@ -67,7 +67,7 @@ static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+
+ for (i = 0; i < count; i++) {
+ state.enabled = !(i % 2);
+- pwm_apply_state(pwm, &state);
++ pwm_apply_might_sleep(pwm, &state);
+
+ edge = ktime_add_us(edge, txbuf[i]);
+ delta = ktime_us_delta(edge, ktime_get());
+@@ -76,7 +76,7 @@ static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+ }
+
+ state.enabled = false;
+- pwm_apply_state(pwm, &state);
++ pwm_apply_might_sleep(pwm, &state);
+
+ return count;
+ }
+diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
+index ef1e95e1af7fcc..7df949fc65e2b6 100644
+--- a/drivers/media/rc/rc-core-priv.h
++++ b/drivers/media/rc/rc-core-priv.h
+@@ -325,7 +325,7 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev);
+ void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc);
+ int lirc_register(struct rc_dev *dev);
+ void lirc_unregister(struct rc_dev *dev);
+-struct rc_dev *rc_dev_get_from_fd(int fd);
++struct rc_dev *rc_dev_get_from_fd(int fd, bool write);
+ #else
+ static inline int lirc_dev_init(void) { return 0; }
+ static inline void lirc_dev_exit(void) {}
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+index b51e6a3b8cbeb5..f99878eff7acea 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
+@@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
+ m->priv = args->priv;
+ m->network_id = args->network_id;
+ m->network_name = kstrdup(args->network_name, GFP_KERNEL);
++ if (!m->network_name)
++ goto free_mux_buf;
++
+ m->timing.current_jiffies = get_jiffies_64();
+
+ if (args->channels)
+ m->channels = args->channels;
+ else
+ if (vidtv_channels_init(m) < 0)
+- goto free_mux_buf;
++ goto free_mux_network_name;
+
+ /* will alloc data for pmt_sections after initializing pat */
+ if (vidtv_channel_si_init(m) < 0)
+@@ -527,6 +530,8 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
+ vidtv_channel_si_destroy(m);
+ free_channels:
+ vidtv_channels_destroy(m);
++free_mux_network_name:
++ kfree(m->network_name);
+ free_mux_buf:
+ vfree(m->mux_buf);
+ free_mux:
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+index ce0b7a6e92dc33..2a51c898c11ebd 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+@@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
+
+ desc->service_name_len = service_name_len;
+
+- if (service_name && service_name_len)
++ if (service_name && service_name_len) {
+ desc->service_name = kstrdup(service_name, GFP_KERNEL);
++ if (!desc->service_name)
++ goto free_desc;
++ }
+
+ desc->provider_name_len = provider_name_len;
+
+- if (provider_name && provider_name_len)
++ if (provider_name && provider_name_len) {
+ desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
++ if (!desc->provider_name)
++ goto free_desc_service_name;
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
++
++free_desc_service_name:
++ if (service_name && service_name_len)
++ kfree(desc->service_name);
++free_desc:
++ kfree(desc);
++ return NULL;
+ }
+
+ struct vidtv_psi_desc_registration
+@@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name
+
+ desc->length = network_name_len;
+
+- if (network_name && network_name_len)
++ if (network_name && network_name_len) {
+ desc->network_name = kstrdup(network_name, GFP_KERNEL);
++ if (!desc->network_name) {
++ kfree(desc);
++ return NULL;
++ }
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+@@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event
+ iso_language_code = "eng";
+
+ desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
++ if (!desc->iso_language_code)
++ goto free_desc;
+
+- if (event_name && event_name_len)
++ if (event_name && event_name_len) {
+ desc->event_name = kstrdup(event_name, GFP_KERNEL);
++ if (!desc->event_name)
++ goto free_desc_language_code;
++ }
+
+- if (text && text_len)
++ if (text && text_len) {
+ desc->text = kstrdup(text, GFP_KERNEL);
++ if (!desc->text)
++ goto free_desc_event_name;
++ }
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
++
++free_desc_event_name:
++ if (event_name && event_name_len)
++ kfree(desc->event_name);
++free_desc_language_code:
++ kfree(desc->iso_language_code);
++free_desc:
++ kfree(desc);
++ return NULL;
+ }
+
+ struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
+diff --git a/drivers/media/test-drivers/visl/visl-video.c b/drivers/media/test-drivers/visl/visl-video.c
+index 7cac6a6456eb6e..9303a3e118d771 100644
+--- a/drivers/media/test-drivers/visl/visl-video.c
++++ b/drivers/media/test-drivers/visl/visl-video.c
+@@ -525,6 +525,9 @@ const struct v4l2_ioctl_ops visl_ioctl_ops = {
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
++ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
++ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
++
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ };
+diff --git a/drivers/media/test-drivers/vivid/vivid-rds-gen.c b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+index b5b104ee64c99f..c57771119a34b0 100644
+--- a/drivers/media/test-drivers/vivid/vivid-rds-gen.c
++++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ rds->ta = alt;
+ rds->ms = true;
+ snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
+- freq / 16, ((freq & 0xf) * 10) / 16);
++ (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
+ if (alt)
+ strscpy(rds->radiotext,
+ " The Radio Data System can switch between different Radio Texts ",
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index 3a06df35a2d7ce..99325bfed64310 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -106,8 +106,9 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
+ if (*nplanes != buffers)
+ return -EINVAL;
+ for (p = 0; p < buffers; p++) {
+- if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
+- dev->fmt_cap->data_offset[p])
++ if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h /
++ dev->fmt_cap->vdownsampling[p] +
++ dev->fmt_cap->data_offset[p])
+ return -EINVAL;
+ }
+ } else {
+@@ -1556,8 +1557,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
+ return -EINVAL;
+ if (edid->blocks == 0) {
+ dev->edid_blocks = 0;
+- v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
+- v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
++ if (dev->num_outputs) {
++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
++ }
+ phys_addr = CEC_PHYS_ADDR_INVALID;
+ goto set_phys_addr;
+ }
+@@ -1581,8 +1584,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
+ display_present |=
+ dev->display_present[i] << j++;
+
+- v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
+- v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
++ if (dev->num_outputs) {
++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
++ v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
++ }
+
+ set_phys_addr:
+ /* TODO: a proper hotplug detect cycle should be emulated here */
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+index 184a6df2c29fe2..d05f547a587cd2 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+@@ -63,14 +63,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
+ if (sizes[0] < size)
+ return -EINVAL;
+ for (p = 1; p < planes; p++) {
+- if (sizes[p] < dev->bytesperline_out[p] * h +
+- vfmt->data_offset[p])
++ if (sizes[p] < dev->bytesperline_out[p] * h /
++ vfmt->vdownsampling[p] +
++ vfmt->data_offset[p])
+ return -EINVAL;
+ }
+ } else {
+ for (p = 0; p < planes; p++)
+- sizes[p] = p ? dev->bytesperline_out[p] * h +
+- vfmt->data_offset[p] : size;
++ sizes[p] = p ? dev->bytesperline_out[p] * h /
++ vfmt->vdownsampling[p] +
++ vfmt->data_offset[p] : size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+@@ -127,7 +129,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
+
+ for (p = 0; p < planes; p++) {
+ if (p)
+- size = dev->bytesperline_out[p] * h;
++ size = dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+ size += vb->planes[p].data_offset;
+
+ if (vb2_get_plane_payload(vb, p) < size) {
+@@ -334,8 +336,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
+ for (p = 0; p < mp->num_planes; p++) {
+ mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
+ mp->plane_fmt[p].sizeimage =
+- mp->plane_fmt[p].bytesperline * mp->height +
+- fmt->data_offset[p];
++ mp->plane_fmt[p].bytesperline * mp->height /
++ fmt->vdownsampling[p] + fmt->data_offset[p];
+ }
+ for (p = fmt->buffers; p < fmt->planes; p++) {
+ unsigned stride = dev->bytesperline_out[p];
+diff --git a/drivers/media/tuners/tuner-i2c.h b/drivers/media/tuners/tuner-i2c.h
+index 07aeead0644a31..724952e001cd13 100644
+--- a/drivers/media/tuners/tuner-i2c.h
++++ b/drivers/media/tuners/tuner-i2c.h
+@@ -133,10 +133,8 @@ static inline int tuner_i2c_xfer_send_recv(struct tuner_i2c_props *props,
+ } \
+ if (0 == __ret) { \
+ state = kzalloc(sizeof(type), GFP_KERNEL); \
+- if (!state) { \
+- __ret = -ENOMEM; \
++ if (NULL == state) \
+ goto __fail; \
+- } \
+ state->i2c_props.addr = i2caddr; \
+ state->i2c_props.adap = i2cadap; \
+ state->i2c_props.name = devname; \
+diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c
+index 5a967edceca93d..352b8a3679b721 100644
+--- a/drivers/media/tuners/xc2028.c
++++ b/drivers/media/tuners/xc2028.c
+@@ -1361,9 +1361,16 @@ static void load_firmware_cb(const struct firmware *fw,
+ void *context)
+ {
+ struct dvb_frontend *fe = context;
+- struct xc2028_data *priv = fe->tuner_priv;
++ struct xc2028_data *priv;
+ int rc;
+
++ if (!fe) {
++ pr_warn("xc2028: No frontend in %s\n", __func__);
++ return;
++ }
++
++ priv = fe->tuner_priv;
++
+ tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error");
+ if (!fw) {
+ tuner_err("Could not load firmware %s.\n", priv->fname);
+diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
+index 57ded9ff3f0435..29bc63021c5aae 100644
+--- a/drivers/media/tuners/xc4000.c
++++ b/drivers/media/tuners/xc4000.c
+@@ -1515,10 +1515,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ struct xc4000_priv *priv = fe->tuner_priv;
+
++ mutex_lock(&priv->lock);
+ *freq = priv->freq_hz + priv->freq_offset;
+
+ if (debug) {
+- mutex_lock(&priv->lock);
+ if ((priv->cur_fw.type
+ & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
+ u16 snr = 0;
+@@ -1529,8 +1529,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ return 0;
+ }
+ }
+- mutex_unlock(&priv->lock);
+ }
++ mutex_unlock(&priv->lock);
+
+ dprintk(1, "%s()\n", __func__);
+
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
+index 790787f0eba840..bcb24d8964981c 100644
+--- a/drivers/media/usb/b2c2/flexcop-usb.c
++++ b/drivers/media/usb/b2c2/flexcop-usb.c
+@@ -515,7 +515,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
+
+ alt = fc_usb->uintf->cur_altsetting;
+
+- if (alt->desc.bNumEndpoints < 1)
++ if (alt->desc.bNumEndpoints < 2)
+ return -ENODEV;
+ if (!usb_endpoint_is_isoc_in(&alt->endpoint[0].desc))
+ return -ENODEV;
+diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
+index 727e6268567f75..f1feccc28bf053 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-core.c
++++ b/drivers/media/usb/cx231xx/cx231xx-core.c
+@@ -1024,6 +1024,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
+ if (!dev->video_mode.isoc_ctl.urb) {
+ dev_err(dev->dev,
+ "cannot alloc memory for usb buffers\n");
++ kfree(dma_q->p_left_data);
+ return -ENOMEM;
+ }
+
+@@ -1033,6 +1034,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
+ dev_err(dev->dev,
+ "cannot allocate memory for usbtransfer\n");
+ kfree(dev->video_mode.isoc_ctl.urb);
++ kfree(dma_q->p_left_data);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 33a2aa8907e653..4eb7dd4599b7e6 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -322,8 +322,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ (msg[0].addr == state->af9033_i2c_addr[1])) {
+- if (msg[0].len < 3 || msg[1].len < 1)
+- return -EOPNOTSUPP;
++ if (msg[0].len < 3 || msg[1].len < 1) {
++ ret = -EOPNOTSUPP;
++ goto unlock;
++ }
+ /* demod access via firmware interface */
+ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+@@ -383,8 +385,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ (msg[0].addr == state->af9033_i2c_addr[1])) {
+- if (msg[0].len < 3)
+- return -EOPNOTSUPP;
++ if (msg[0].len < 3) {
++ ret = -EOPNOTSUPP;
++ goto unlock;
++ }
+ /* demod access via firmware interface */
+ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+@@ -459,6 +463,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ ret = -EOPNOTSUPP;
+ }
+
++unlock:
+ mutex_unlock(&d->i2c_mutex);
+
+ if (ret < 0)
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 3af594134a6de9..6ddc2051339396 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -2412,7 +2412,12 @@ static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap)
+
+ adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
+
+- return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
++ if (!adap->fe_adap[0].fe) {
++ release_firmware(state->frontend_firmware);
++ return -ENODEV;
++ }
++
++ return 0;
+ }
+
+ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
+@@ -2485,8 +2490,10 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+ dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
+ adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
+
+- if (adap->fe_adap[0].fe == NULL)
++ if (!adap->fe_adap[0].fe) {
++ release_firmware(state->frontend_firmware);
+ return -ENODEV;
++ }
+
+ i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
+ dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
+@@ -2494,7 +2501,12 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+ fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
+ dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
+
+- return fe_slave == NULL ? -ENODEV : 0;
++ if (!fe_slave) {
++ release_firmware(state->frontend_firmware);
++ return -ENODEV;
++ }
++
++ return 0;
+ }
+
+ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index b3bb1805829adb..f31d3835430e74 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -716,6 +716,7 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ {
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct dw2102_state *state;
++ int j;
+
+ if (!d)
+ return -ENODEV;
+@@ -729,11 +730,11 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ return -EAGAIN;
+ }
+
+- switch (num) {
+- case 1:
+- switch (msg[0].addr) {
++ j = 0;
++ while (j < num) {
++ switch (msg[j].addr) {
+ case SU3000_STREAM_CTRL:
+- state->data[0] = msg[0].buf[0] + 0x36;
++ state->data[0] = msg[j].buf[0] + 0x36;
+ state->data[1] = 3;
+ state->data[2] = 0;
+ if (dvb_usb_generic_rw(d, state->data, 3,
+@@ -745,61 +746,86 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ if (dvb_usb_generic_rw(d, state->data, 1,
+ state->data, 2, 0) < 0)
+ err("i2c transfer failed.");
+- msg[0].buf[1] = state->data[0];
+- msg[0].buf[0] = state->data[1];
++ msg[j].buf[1] = state->data[0];
++ msg[j].buf[0] = state->data[1];
+ break;
+ default:
+- if (3 + msg[0].len > sizeof(state->data)) {
+- warn("i2c wr: len=%d is too big!\n",
+- msg[0].len);
++ /* if the current write msg is followed by a another
++ * read msg to/from the same address
++ */
++ if ((j+1 < num) && (msg[j+1].flags & I2C_M_RD) &&
++ (msg[j].addr == msg[j+1].addr)) {
++ /* join both i2c msgs to one usb read command */
++ if (4 + msg[j].len > sizeof(state->data)) {
++ warn("i2c combined wr/rd: write len=%d is too big!\n",
++ msg[j].len);
++ num = -EOPNOTSUPP;
++ break;
++ }
++ if (1 + msg[j+1].len > sizeof(state->data)) {
++ warn("i2c combined wr/rd: read len=%d is too big!\n",
++ msg[j+1].len);
++ num = -EOPNOTSUPP;
++ break;
++ }
++
++ state->data[0] = 0x09;
++ state->data[1] = msg[j].len;
++ state->data[2] = msg[j+1].len;
++ state->data[3] = msg[j].addr;
++ memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++ if (dvb_usb_generic_rw(d, state->data, msg[j].len + 4,
++ state->data, msg[j+1].len + 1, 0) < 0)
++ err("i2c transfer failed.");
++
++ memcpy(msg[j+1].buf, &state->data[1], msg[j+1].len);
++ j++;
++ break;
++ }
++
++ if (msg[j].flags & I2C_M_RD) {
++ /* single read */
++ if (4 + msg[j].len > sizeof(state->data)) {
++ warn("i2c rd: len=%d is too big!\n", msg[j].len);
++ num = -EOPNOTSUPP;
++ break;
++ }
++
++ state->data[0] = 0x09;
++ state->data[1] = 0;
++ state->data[2] = msg[j].len;
++ state->data[3] = msg[j].addr;
++ memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++ if (dvb_usb_generic_rw(d, state->data, 4,
++ state->data, msg[j].len + 1, 0) < 0)
++ err("i2c transfer failed.");
++
++ memcpy(msg[j].buf, &state->data[1], msg[j].len);
++ break;
++ }
++
++ /* single write */
++ if (3 + msg[j].len > sizeof(state->data)) {
++ warn("i2c wr: len=%d is too big!\n", msg[j].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+
+- /* always i2c write*/
+ state->data[0] = 0x08;
+- state->data[1] = msg[0].addr;
+- state->data[2] = msg[0].len;
++ state->data[1] = msg[j].addr;
++ state->data[2] = msg[j].len;
+
+- memcpy(&state->data[3], msg[0].buf, msg[0].len);
++ memcpy(&state->data[3], msg[j].buf, msg[j].len);
+
+- if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
++ if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3,
+ state->data, 1, 0) < 0)
+ err("i2c transfer failed.");
++ } // switch
++ j++;
+
+- }
+- break;
+- case 2:
+- /* always i2c read */
+- if (4 + msg[0].len > sizeof(state->data)) {
+- warn("i2c rd: len=%d is too big!\n",
+- msg[0].len);
+- num = -EOPNOTSUPP;
+- break;
+- }
+- if (1 + msg[1].len > sizeof(state->data)) {
+- warn("i2c rd: len=%d is too big!\n",
+- msg[1].len);
+- num = -EOPNOTSUPP;
+- break;
+- }
+-
+- state->data[0] = 0x09;
+- state->data[1] = msg[0].len;
+- state->data[2] = msg[1].len;
+- state->data[3] = msg[0].addr;
+- memcpy(&state->data[4], msg[0].buf, msg[0].len);
+-
+- if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+- state->data, msg[1].len + 1, 0) < 0)
+- err("i2c transfer failed.");
+-
+- memcpy(msg[1].buf, &state->data[1], msg[1].len);
+- break;
+- default:
+- warn("more than 2 i2c messages at a time is not handled yet.");
+- break;
+- }
++ } // while
+ mutex_unlock(&d->data_mutex);
+ mutex_unlock(&d->i2c_mutex);
+ return num;
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 4d037c92af7c58..bae76023cf71d3 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -4094,6 +4094,10 @@ static int em28xx_usb_probe(struct usb_interface *intf,
+ * topology will likely change after the load of the em28xx subdrivers.
+ */
+ #ifdef CONFIG_MEDIA_CONTROLLER
++ /*
++ * No need to check the return value, the device will still be
++ * usable without media controller API.
++ */
+ retval = media_device_register(dev->media_dev);
+ #endif
+
+diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
+index 0c24e298430487..eb03f98b2ef113 100644
+--- a/drivers/media/usb/go7007/go7007-driver.c
++++ b/drivers/media/usb/go7007/go7007-driver.c
+@@ -80,7 +80,7 @@ static int go7007_load_encoder(struct go7007 *go)
+ const struct firmware *fw_entry;
+ char fw_name[] = "go7007/go7007fw.bin";
+ void *bounce;
+- int fw_len, rv = 0;
++ int fw_len;
+ u16 intr_val, intr_data;
+
+ if (go->boot_fw == NULL) {
+@@ -109,9 +109,11 @@ static int go7007_load_encoder(struct go7007 *go)
+ go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
+ (intr_val & ~0x1) != 0x5a5a) {
+ v4l2_err(go, "error transferring firmware\n");
+- rv = -1;
++ kfree(go->boot_fw);
++ go->boot_fw = NULL;
++ return -1;
+ }
+- return rv;
++ return 0;
+ }
+
+ MODULE_FIRMWARE("go7007/go7007fw.bin");
+diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
+index eeb85981e02b67..762c13e49bfa5c 100644
+--- a/drivers/media/usb/go7007/go7007-usb.c
++++ b/drivers/media/usb/go7007/go7007-usb.c
+@@ -1201,7 +1201,9 @@ static int go7007_usb_probe(struct usb_interface *intf,
+ u16 channel;
+
+ /* read channel number from GPIO[1:0] */
+- go7007_read_addr(go, 0x3c81, &channel);
++ if (go7007_read_addr(go, 0x3c81, &channel))
++ goto allocfail;
++
+ channel &= 0x3;
+ go->board_id = GO7007_BOARDID_ADLINK_MPG24;
+ usb->board = board = &board_adlink_mpg24;
+diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
+index 46ed95483e2222..5f5fa851ca640d 100644
+--- a/drivers/media/usb/gspca/cpia1.c
++++ b/drivers/media/usb/gspca/cpia1.c
+@@ -18,6 +18,7 @@
+
+ #include <linux/input.h>
+ #include <linux/sched/signal.h>
++#include <linux/bitops.h>
+
+ #include "gspca.h"
+
+@@ -1028,6 +1029,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
+ sd->params.exposure.expMode = 2;
+ sd->exposure_status = EXPOSURE_NORMAL;
+ }
++ if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
++ return -EINVAL;
+ currentexp = currentexp << sd->params.exposure.gain;
+ sd->params.exposure.gain = 0;
+ /* round down current exposure to nearest value */
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+index 14170a5d72b350..73c95ba2328a41 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
+@@ -90,8 +90,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp)
+ }
+
+
+-static void pvr2_context_notify(struct pvr2_context *mp)
++static void pvr2_context_notify(void *ptr)
+ {
++ struct pvr2_context *mp = ptr;
++
+ pvr2_context_set_notify(mp,!0);
+ }
+
+@@ -106,9 +108,7 @@ static void pvr2_context_check(struct pvr2_context *mp)
+ pvr2_trace(PVR2_TRACE_CTXT,
+ "pvr2_context %p (initialize)", mp);
+ /* Finish hardware initialization */
+- if (pvr2_hdw_initialize(mp->hdw,
+- (void (*)(void *))pvr2_context_notify,
+- mp)) {
++ if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) {
+ mp->video_stream.stream =
+ pvr2_hdw_get_video_stream(mp->hdw);
+ /* Trigger interface initialization. By doing this
+@@ -267,8 +267,9 @@ static void pvr2_context_exit(struct pvr2_context *mp)
+ void pvr2_context_disconnect(struct pvr2_context *mp)
+ {
+ pvr2_hdw_disconnect(mp->hdw);
++ if (!pvr2_context_shutok())
++ pvr2_context_notify(mp);
+ mp->disconnect_flag = !0;
+- pvr2_context_notify(mp);
+ }
+
+
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+index 26811efe0fb58b..9a9bae21c61475 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+@@ -88,8 +88,10 @@ static int pvr2_dvb_feed_thread(void *data)
+ return stat;
+ }
+
+-static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap)
++static void pvr2_dvb_notify(void *ptr)
+ {
++ struct pvr2_dvb_adapter *adap = ptr;
++
+ wake_up(&adap->buffer_wait_data);
+ }
+
+@@ -149,7 +151,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
+ }
+
+ pvr2_stream_set_callback(pvr->video_stream.stream,
+- (pvr2_stream_callback) pvr2_dvb_notify, adap);
++ pvr2_dvb_notify, adap);
+
+ ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT);
+ if (ret < 0) return ret;
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+index c04ab7258d6452..d608b793fa847b 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+@@ -1033,8 +1033,10 @@ static int pvr2_v4l2_open(struct file *file)
+ }
+
+
+-static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
++static void pvr2_v4l2_notify(void *ptr)
+ {
++ struct pvr2_v4l2_fh *fhp = ptr;
++
+ wake_up(&fhp->wait_data);
+ }
+
+@@ -1067,7 +1069,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
+
+ hdw = fh->channel.mc_head->hdw;
+ sp = fh->pdi->stream->stream;
+- pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
++ pvr2_stream_set_callback(sp, pvr2_v4l2_notify, fh);
+ pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
+ if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
+ return pvr2_ioread_set_enabled(fh->rhp,!0);
+@@ -1198,11 +1200,6 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
+ dip->minor_type = pvr2_v4l_type_video;
+ nr_ptr = video_nr;
+ caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
+- if (!dip->stream) {
+- pr_err(KBUILD_MODNAME
+- ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
+- return;
+- }
+ break;
+ case VFL_TYPE_VBI:
+ dip->config = pvr2_config_vbi;
+diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
+index 3c2627712fe9d5..6f78be3c42d6ab 100644
+--- a/drivers/media/usb/s2255/s2255drv.c
++++ b/drivers/media/usb/s2255/s2255drv.c
+@@ -247,7 +247,7 @@ struct s2255_vc {
+ struct s2255_dev {
+ struct s2255_vc vc[MAX_CHANNELS];
+ struct v4l2_device v4l2_dev;
+- atomic_t num_channels;
++ refcount_t num_channels;
+ int frames;
+ struct mutex lock; /* channels[].vdev.lock */
+ struct mutex cmdlock; /* protects cmdbuf */
+@@ -1550,11 +1550,11 @@ static void s2255_video_device_release(struct video_device *vdev)
+ container_of(vdev, struct s2255_vc, vdev);
+
+ dprintk(dev, 4, "%s, chnls: %d\n", __func__,
+- atomic_read(&dev->num_channels));
++ refcount_read(&dev->num_channels));
+
+ v4l2_ctrl_handler_free(&vc->hdl);
+
+- if (atomic_dec_and_test(&dev->num_channels))
++ if (refcount_dec_and_test(&dev->num_channels))
+ s2255_destroy(dev);
+ return;
+ }
+@@ -1659,7 +1659,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+ "failed to register video device!\n");
+ break;
+ }
+- atomic_inc(&dev->num_channels);
++ refcount_inc(&dev->num_channels);
+ v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(&vc->vdev));
+
+@@ -1667,11 +1667,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+ pr_info("Sensoray 2255 V4L driver Revision: %s\n",
+ S2255_VERSION);
+ /* if no channels registered, return error and probe will fail*/
+- if (atomic_read(&dev->num_channels) == 0) {
++ if (refcount_read(&dev->num_channels) == 0) {
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
+ }
+- if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
++ if (refcount_read(&dev->num_channels) != MAX_CHANNELS)
+ pr_warn("s2255: Not all channels available.\n");
+ return 0;
+ }
+@@ -2220,7 +2220,7 @@ static int s2255_probe(struct usb_interface *interface,
+ goto errorFWDATA1;
+ }
+
+- atomic_set(&dev->num_channels, 0);
++ refcount_set(&dev->num_channels, 0);
+ dev->pid = id->idProduct;
+ dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
+ if (!dev->fw_data)
+@@ -2340,12 +2340,12 @@ static void s2255_disconnect(struct usb_interface *interface)
+ {
+ struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
+ int i;
+- int channels = atomic_read(&dev->num_channels);
++ int channels = refcount_read(&dev->num_channels);
+ mutex_lock(&dev->lock);
+ v4l2_device_disconnect(&dev->v4l2_dev);
+ mutex_unlock(&dev->lock);
+ /*see comments in the uvc_driver.c usb disconnect function */
+- atomic_inc(&dev->num_channels);
++ refcount_inc(&dev->num_channels);
+ /* unregister each video device. */
+ for (i = 0; i < channels; i++)
+ video_unregister_device(&dev->vc[i].vdev);
+@@ -2358,7 +2358,7 @@ static void s2255_disconnect(struct usb_interface *interface)
+ dev->vc[i].vidstatus_ready = 1;
+ wake_up(&dev->vc[i].wait_vidstatus);
+ }
+- if (atomic_dec_and_test(&dev->num_channels))
++ if (refcount_dec_and_test(&dev->num_channels))
+ s2255_destroy(dev);
+ dev_info(&interface->dev, "%s\n", __func__);
+ }
+diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
+index 4e966f6bf608de..e79c45db60ab56 100644
+--- a/drivers/media/usb/stk1160/stk1160-video.c
++++ b/drivers/media/usb/stk1160/stk1160-video.c
+@@ -99,7 +99,7 @@ void stk1160_buffer_done(struct stk1160 *dev)
+ static inline
+ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ {
+- int linesdone, lineoff, lencopy;
++ int linesdone, lineoff, lencopy, offset;
+ int bytesperline = dev->width * 2;
+ struct stk1160_buffer *buf = dev->isoc_ctl.buf;
+ u8 *dst = buf->mem;
+@@ -107,8 +107,7 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+
+ /*
+ * TODO: These stk1160_dbg are very spammy!
+- * We should 1) check why we are getting them
+- * and 2) add ratelimit.
++ * We should check why we are getting them.
+ *
+ * UPDATE: One of the reasons (the only one?) for getting these
+ * is incorrect standard (mismatch between expected and configured).
+@@ -140,8 +139,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ * Check if we have enough space left in the buffer.
+ * In that case, we force loop exit after copy.
+ */
+- if (lencopy > buf->bytesused - buf->length) {
+- lencopy = buf->bytesused - buf->length;
++ offset = dst - (u8 *)buf->mem;
++ if (offset > buf->length) {
++ dev_warn_ratelimited(dev->dev, "out of bounds offset\n");
++ return;
++ }
++ if (lencopy > buf->length - offset) {
++ lencopy = buf->length - offset;
+ remain = lencopy;
+ }
+
+@@ -151,7 +155,7 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+
+ /* Let the bug hunt begin! sanity checks! */
+ if (lencopy < 0) {
+- stk1160_dbg("copy skipped: negative lencopy\n");
++ printk_ratelimited(KERN_DEBUG "copy skipped: negative lencopy\n");
+ return;
+ }
+
+@@ -183,8 +187,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ * Check if we have enough space left in the buffer.
+ * In that case, we force loop exit after copy.
+ */
+- if (lencopy > buf->bytesused - buf->length) {
+- lencopy = buf->bytesused - buf->length;
++ offset = dst - (u8 *)buf->mem;
++ if (offset > buf->length) {
++ dev_warn_ratelimited(dev->dev, "offset out of bounds\n");
++ return;
++ }
++ if (lencopy > buf->length - offset) {
++ lencopy = buf->length - offset;
+ remain = lencopy;
+ }
+
+diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
+index 1e30e05953dc64..7495df6b519125 100644
+--- a/drivers/media/usb/usbtv/usbtv-video.c
++++ b/drivers/media/usb/usbtv/usbtv-video.c
+@@ -962,15 +962,8 @@ int usbtv_video_init(struct usbtv *usbtv)
+
+ void usbtv_video_free(struct usbtv *usbtv)
+ {
+- mutex_lock(&usbtv->vb2q_lock);
+- mutex_lock(&usbtv->v4l2_lock);
+-
+- usbtv_stop(usbtv);
+ vb2_video_unregister_device(&usbtv->vdev);
+ v4l2_device_disconnect(&usbtv->v4l2_dev);
+
+- mutex_unlock(&usbtv->v4l2_lock);
+- mutex_unlock(&usbtv->vb2q_lock);
+-
+ v4l2_device_put(&usbtv->v4l2_dev);
+ }
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index e59a463c27618e..07158e9451fed1 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -2029,7 +2029,13 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
+ else
+ ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id,
+ dev->intfnum, info->selector, data, 1);
+- if (!ret)
++
++ if (!ret) {
++ info->flags &= ~(UVC_CTRL_FLAG_GET_CUR |
++ UVC_CTRL_FLAG_SET_CUR |
++ UVC_CTRL_FLAG_AUTO_UPDATE |
++ UVC_CTRL_FLAG_ASYNCHRONOUS);
++
+ info->flags |= (data[0] & UVC_CONTROL_CAP_GET ?
+ UVC_CTRL_FLAG_GET_CUR : 0)
+ | (data[0] & UVC_CONTROL_CAP_SET ?
+@@ -2038,6 +2044,7 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
+ UVC_CTRL_FLAG_AUTO_UPDATE : 0)
+ | (data[0] & UVC_CONTROL_CAP_ASYNCHRONOUS ?
+ UVC_CTRL_FLAG_ASYNCHRONOUS : 0);
++ }
+
+ kfree(data);
+ return ret;
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 08fcd2ffa727b2..04e7f58553db1d 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/usb.h>
++#include <linux/usb/quirks.h>
+ #include <linux/usb/uvc.h>
+ #include <linux/videodev2.h>
+ #include <linux/vmalloc.h>
+@@ -686,16 +687,26 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+ goto error;
+ }
+
+- size = nformats * sizeof(*format) + nframes * sizeof(*frame)
++ /*
++ * Allocate memory for the formats, the frames and the intervals,
++ * plus any required padding to guarantee that everything has the
++ * correct alignment.
++ */
++ size = nformats * sizeof(*format);
++ size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame);
++ size = ALIGN(size, __alignof__(*interval))
+ + nintervals * sizeof(*interval);
++
+ format = kzalloc(size, GFP_KERNEL);
+- if (format == NULL) {
++ if (!format) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+- frame = (struct uvc_frame *)&format[nformats];
+- interval = (u32 *)&frame[nframes];
++ frame = (void *)format + nformats * sizeof(*format);
++ frame = PTR_ALIGN(frame, __alignof__(*frame));
++ interval = (void *)frame + nframes * sizeof(*frame);
++ interval = PTR_ALIGN(interval, __alignof__(*interval));
+
+ streaming->formats = format;
+ streaming->nformats = 0;
+@@ -2232,8 +2243,14 @@ static int uvc_probe(struct usb_interface *intf,
+ goto error;
+ }
+
++ if (dev->quirks & UVC_QUIRK_NO_RESET_RESUME)
++ udev->quirks &= ~USB_QUIRK_RESET_RESUME;
++
++ if (!(dev->quirks & UVC_QUIRK_DISABLE_AUTOSUSPEND))
++ usb_enable_autosuspend(udev);
++
+ uvc_dbg(dev, PROBE, "UVC device initialized\n");
+- usb_enable_autosuspend(udev);
++
+ return 0;
+
+ error:
+@@ -2573,7 +2590,44 @@ static const struct usb_device_id uvc_ids[] = {
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+- .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT) },
++ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT
++ | UVC_QUIRK_INVALID_DEVICE_SOF) },
++ /* Logitech HD Pro Webcam C922 */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x046d,
++ .idProduct = 0x085c,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 0,
++ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_INVALID_DEVICE_SOF) },
++ /* Logitech Rally Bar Huddle */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x046d,
++ .idProduct = 0x087c,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 0,
++ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
++ /* Logitech Rally Bar */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x046d,
++ .idProduct = 0x089b,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 0,
++ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
++ /* Logitech Rally Bar Mini */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x046d,
++ .idProduct = 0x08d3,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 0,
++ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
+ /* Chicony CNF7129 (Asus EEE 100HE) */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+@@ -2592,6 +2646,15 @@ static const struct usb_device_id uvc_ids[] = {
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
++ /* Chicony Electronics Co., Ltd Integrated Camera */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x04f2,
++ .idProduct = 0xb67c,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
++ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_uvc11 },
+ /* Chicony EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+@@ -2994,6 +3057,24 @@ static const struct usb_device_id uvc_ids[] = {
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
++ /* SunplusIT Inc HD Camera */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x2b7e,
++ .idProduct = 0xb752,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
++ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_uvc11 },
++ /* Insta360 Link */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x2e1a,
++ .idProduct = 0x4c01,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 0,
++ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) },
+ /* Lenovo Integrated Camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index 28dde08ec6c5d9..91c350b2541267 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -214,13 +214,13 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
+ * Compute a bandwidth estimation by multiplying the frame
+ * size by the number of video frames per second, divide the
+ * result by the number of USB frames (or micro-frames for
+- * high-speed devices) per second and add the UVC header size
+- * (assumed to be 12 bytes long).
++ * high- and super-speed devices) per second and add the UVC
++ * header size (assumed to be 12 bytes long).
+ */
+ bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp;
+ bandwidth *= 10000000 / interval + 1;
+ bandwidth /= 1000;
+- if (stream->dev->udev->speed == USB_SPEED_HIGH)
++ if (stream->dev->udev->speed >= USB_SPEED_HIGH)
+ bandwidth /= 8;
+ bandwidth += 12;
+
+@@ -478,6 +478,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ ktime_t time;
+ u16 host_sof;
+ u16 dev_sof;
++ u32 dev_stc;
+
+ switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) {
+ case UVC_STREAM_PTS | UVC_STREAM_SCR:
+@@ -526,9 +527,48 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ if (dev_sof == stream->clock.last_sof)
+ return;
+
++ dev_stc = get_unaligned_le32(&data[header_size - 6]);
++
++ /*
++ * STC (Source Time Clock) is the clock used by the camera. The UVC 1.5
++ * standard states that it "must be captured when the first video data
++ * of a video frame is put on the USB bus". This is generally understood
++ * as requiring devices to clear the payload header's SCR bit before
++ * the first packet containing video data.
++ *
++ * Most vendors follow that interpretation, but some (namely SunplusIT
++ * on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR
++ * field with 0's,and expect that the driver only processes the SCR if
++ * there is data in the packet.
++ *
++ * Ignore all the hardware timestamp information if we haven't received
++ * any data for this frame yet, the packet contains no data, and both
++ * STC and SOF are zero. This heuristics should be safe on compliant
++ * devices. This should be safe with compliant devices, as in the very
++ * unlikely case where a UVC 1.1 device would send timing information
++ * only before the first packet containing data, and both STC and SOF
++ * happen to be zero for a particular frame, we would only miss one
++ * clock sample from many and the clock recovery algorithm wouldn't
++ * suffer from this condition.
++ */
++ if (buf && buf->bytesused == 0 && len == header_size &&
++ dev_stc == 0 && dev_sof == 0)
++ return;
++
+ stream->clock.last_sof = dev_sof;
+
+ host_sof = usb_get_current_frame_number(stream->dev->udev);
++
++ /*
++ * On some devices, like the Logitech C922, the device SOF does not run
++ * at a stable rate of 1kHz. For those devices use the host SOF instead.
++ * In the tests performed so far, this improves the timestamp precision.
++ * This is probably explained by a small packet handling jitter from the
++ * host, but the exact reason hasn't been fully determined.
++ */
++ if (stream->dev->quirks & UVC_QUIRK_INVALID_DEVICE_SOF)
++ dev_sof = host_sof;
++
+ time = uvc_video_get_time();
+
+ /*
+@@ -564,7 +604,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ spin_lock_irqsave(&stream->clock.lock, flags);
+
+ sample = &stream->clock.samples[stream->clock.head];
+- sample->dev_stc = get_unaligned_le32(&data[header_size - 6]);
++ sample->dev_stc = dev_stc;
+ sample->dev_sof = dev_sof;
+ sample->host_sof = host_sof;
+ sample->host_time = time;
+@@ -709,11 +749,11 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ unsigned long flags;
+ u64 timestamp;
+ u32 delta_stc;
+- u32 y1, y2;
++ u32 y1;
+ u32 x1, x2;
+ u32 mean;
+ u32 sof;
+- u64 y;
++ u64 y, y2;
+
+ if (!uvc_hw_timestamps_param)
+ return;
+@@ -753,7 +793,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ sof = y;
+
+ uvc_dbg(stream->dev, CLOCK,
+- "%s: PTS %u y %llu.%06llu SOF %u.%06llu (x1 %u x2 %u y1 %u y2 %u SOF offset %u)\n",
++ "%s: PTS %u y %llu.%06llu SOF %u.%06llu (x1 %u x2 %u y1 %u y2 %llu SOF offset %u)\n",
+ stream->dev->name, buf->pts,
+ y >> 16, div_u64((y & 0xffff) * 1000000, 65536),
+ sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+@@ -768,7 +808,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ goto done;
+
+ y1 = NSEC_PER_SEC;
+- y2 = (u32)ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1;
++ y2 = ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1;
+
+ /*
+ * Interpolated and host SOF timestamps can wrap around at slightly
+@@ -789,7 +829,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ timestamp = ktime_to_ns(first->host_time) + y - y1;
+
+ uvc_dbg(stream->dev, CLOCK,
+- "%s: SOF %u.%06llu y %llu ts %llu buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n",
++ "%s: SOF %u.%06llu y %llu ts %llu buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %llu)\n",
+ stream->dev->name,
+ sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+ y, timestamp, vbuf->vb2_buf.timestamp,
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 6fb0a78b1b0097..e5b12717016fa3 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -73,6 +73,9 @@
+ #define UVC_QUIRK_FORCE_Y8 0x00000800
+ #define UVC_QUIRK_FORCE_BPP 0x00001000
+ #define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000
++#define UVC_QUIRK_NO_RESET_RESUME 0x00004000
++#define UVC_QUIRK_DISABLE_AUTOSUSPEND 0x00008000
++#define UVC_QUIRK_INVALID_DEVICE_SOF 0x00010000
+
+ /* Format flags */
+ #define UVC_FMT_FLAG_COMPRESSED 0x00000001
+diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
+index 091e8cf4114ba2..ac4d987bba2556 100644
+--- a/drivers/media/v4l2-core/v4l2-async.c
++++ b/drivers/media/v4l2-core/v4l2-async.c
+@@ -324,6 +324,9 @@ static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
+ sd->entity.function != MEDIA_ENT_F_FLASH)
+ return 0;
+
++ if (!n->sd)
++ return 0;
++
+ link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
+
+ #endif
+@@ -563,6 +566,7 @@ void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ {
+ INIT_LIST_HEAD(&notifier->waiting_list);
+ INIT_LIST_HEAD(&notifier->done_list);
++ INIT_LIST_HEAD(&notifier->notifier_entry);
+ notifier->v4l2_dev = v4l2_dev;
+ }
+ EXPORT_SYMBOL(v4l2_async_nf_init);
+@@ -572,6 +576,7 @@ void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ {
+ INIT_LIST_HEAD(&notifier->waiting_list);
+ INIT_LIST_HEAD(&notifier->done_list);
++ INIT_LIST_HEAD(&notifier->notifier_entry);
+ notifier->sd = sd;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
+@@ -618,16 +623,10 @@ static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+
+ int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+ {
+- int ret;
+-
+ if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
+ return -EINVAL;
+
+- ret = __v4l2_async_nf_register(notifier);
+- if (ret)
+- notifier->v4l2_dev = NULL;
+-
+- return ret;
++ return __v4l2_async_nf_register(notifier);
+ }
+ EXPORT_SYMBOL(v4l2_async_nf_register);
+
+@@ -639,7 +638,7 @@ __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+
+ v4l2_async_nf_unbind_all_subdevs(notifier);
+
+- list_del(&notifier->notifier_entry);
++ list_del_init(&notifier->notifier_entry);
+ }
+
+ void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+@@ -880,7 +879,6 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
+ &asc->notifier->waiting_list);
+
+ v4l2_async_unbind_subdev_one(asc->notifier, asc);
+- list_del(&asc->asc_subdev_entry);
+ }
+ }
+
+diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
+index bc2dbec019b04c..1ff94affbaf3a3 100644
+--- a/drivers/media/v4l2-core/v4l2-cci.c
++++ b/drivers/media/v4l2-core/v4l2-cci.c
+@@ -18,19 +18,30 @@
+
+ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ {
++ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+
++ /*
++ * TODO: Fix smatch. Assign *val to 0 here in order to avoid
++ * failing a smatch check on caller when the caller proceeds to
++ * read *val without initialising it on caller's side. *val is set
++ * to a valid value whenever this function returns 0 but smatch
++ * can't figure that out currently.
++ */
++ *val = 0;
++
+ if (err && *err)
+ return *err;
+
+- len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
+- reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
++ little_endian = reg & CCI_REG_LE;
++ len = CCI_REG_WIDTH_BYTES(reg);
++ reg = CCI_REG_ADDR(reg);
+
+ ret = regmap_bulk_read(map, reg, buf, len);
+ if (ret) {
+- dev_err(regmap_get_device(map), "Error reading reg 0x%4x: %d\n",
++ dev_err(regmap_get_device(map), "Error reading reg 0x%04x: %d\n",
+ reg, ret);
+ goto out;
+ }
+@@ -40,16 +51,28 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+ *val = buf[0];
+ break;
+ case 2:
+- *val = get_unaligned_be16(buf);
++ if (little_endian)
++ *val = get_unaligned_le16(buf);
++ else
++ *val = get_unaligned_be16(buf);
+ break;
+ case 3:
+- *val = get_unaligned_be24(buf);
++ if (little_endian)
++ *val = get_unaligned_le24(buf);
++ else
++ *val = get_unaligned_be24(buf);
+ break;
+ case 4:
+- *val = get_unaligned_be32(buf);
++ if (little_endian)
++ *val = get_unaligned_le32(buf);
++ else
++ *val = get_unaligned_be32(buf);
+ break;
+ case 8:
+- *val = get_unaligned_be64(buf);
++ if (little_endian)
++ *val = get_unaligned_le64(buf);
++ else
++ *val = get_unaligned_be64(buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+@@ -68,6 +91,7 @@ EXPORT_SYMBOL_GPL(cci_read);
+
+ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+ {
++ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+@@ -75,24 +99,37 @@ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+ if (err && *err)
+ return *err;
+
+- len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
+- reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
++ little_endian = reg & CCI_REG_LE;
++ len = CCI_REG_WIDTH_BYTES(reg);
++ reg = CCI_REG_ADDR(reg);
+
+ switch (len) {
+ case 1:
+ buf[0] = val;
+ break;
+ case 2:
+- put_unaligned_be16(val, buf);
++ if (little_endian)
++ put_unaligned_le16(val, buf);
++ else
++ put_unaligned_be16(val, buf);
+ break;
+ case 3:
+- put_unaligned_be24(val, buf);
++ if (little_endian)
++ put_unaligned_le24(val, buf);
++ else
++ put_unaligned_be24(val, buf);
+ break;
+ case 4:
+- put_unaligned_be32(val, buf);
++ if (little_endian)
++ put_unaligned_le32(val, buf);
++ else
++ put_unaligned_be32(val, buf);
+ break;
+ case 8:
+- put_unaligned_be64(val, buf);
++ if (little_endian)
++ put_unaligned_le64(val, buf);
++ else
++ put_unaligned_be64(val, buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+@@ -103,7 +140,7 @@ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+
+ ret = regmap_bulk_write(map, reg, buf, len);
+ if (ret)
+- dev_err(regmap_get_device(map), "Error writing reg 0x%4x: %d\n",
++ dev_err(regmap_get_device(map), "Error writing reg 0x%04x: %d\n",
+ reg, ret);
+
+ out:
+diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
+index f8127949268229..77bbf276ae89df 100644
+--- a/drivers/media/v4l2-core/v4l2-dev.c
++++ b/drivers/media/v4l2-core/v4l2-dev.c
+@@ -1034,8 +1034,10 @@ int __video_register_device(struct video_device *vdev,
+ vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
+ vdev->dev.parent = vdev->dev_parent;
+ dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
++ mutex_lock(&videodev_lock);
+ ret = device_register(&vdev->dev);
+ if (ret < 0) {
++ mutex_unlock(&videodev_lock);
+ pr_err("%s: device_register failed\n", __func__);
+ goto cleanup;
+ }
+@@ -1055,6 +1057,7 @@ int __video_register_device(struct video_device *vdev,
+
+ /* Part 6: Activate this minor. The char device can now be used. */
+ set_bit(V4L2_FL_REGISTERED, &vdev->flags);
++ mutex_unlock(&videodev_lock);
+
+ return 0;
+
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index 0cc30397fbad5f..8db9ac9c1433f8 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -1084,11 +1084,17 @@ static int v4l2_m2m_register_entity(struct media_device *mdev,
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+- if (ret)
++ if (ret) {
++ kfree(entity->name);
++ entity->name = NULL;
+ return ret;
++ }
+ ret = media_device_register_entity(mdev, entity);
+- if (ret)
++ if (ret) {
++ kfree(entity->name);
++ entity->name = NULL;
+ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
+index 31752c06d1f0c8..a32ef739eb4490 100644
+--- a/drivers/media/v4l2-core/v4l2-subdev.c
++++ b/drivers/media/v4l2-core/v4l2-subdev.c
+@@ -359,20 +359,37 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
+ {
+ int ret;
+
+-#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+- if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+- if (enable)
+- led_set_brightness(sd->privacy_led,
+- sd->privacy_led->max_brightness);
+- else
+- led_set_brightness(sd->privacy_led, 0);
+- }
+-#endif
++ /*
++ * The .s_stream() operation must never be called to start or stop an
++ * already started or stopped subdev. Catch offenders but don't return
++ * an error yet to avoid regressions.
++ *
++ * As .s_stream() is mutually exclusive with the .enable_streams() and
++ * .disable_streams() operation, we can use the enabled_streams field
++ * to store the subdev streaming state.
++ */
++ if (WARN_ON(!!sd->enabled_streams == !!enable))
++ return 0;
++
+ ret = sd->ops->video->s_stream(sd, enable);
+
+ if (!enable && ret < 0) {
+ dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
+- return 0;
++ ret = 0;
++ }
++
++ if (!ret) {
++ sd->enabled_streams = enable ? BIT(0) : 0;
++
++#if IS_REACHABLE(CONFIG_LEDS_CLASS)
++ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
++ if (enable)
++ led_set_brightness(sd->privacy_led,
++ sd->privacy_led->max_brightness);
++ else
++ led_set_brightness(sd->privacy_led, 0);
++ }
++#endif
+ }
+
+ return ret;
+@@ -664,6 +681,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
++ sel.stream = crop->stream;
+ sel.target = V4L2_SEL_TGT_CROP;
+
+ rval = v4l2_subdev_call(
+@@ -688,6 +706,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
++ sel.stream = crop->stream;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = crop->rect;
+
+diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
+index 8efdd1f9713950..c82d8d8a16eaf1 100644
+--- a/drivers/memory/Kconfig
++++ b/drivers/memory/Kconfig
+@@ -167,7 +167,7 @@ config FSL_CORENET_CF
+ represents a coherency violation.
+
+ config FSL_IFC
+- bool "Freescale IFC driver" if COMPILE_TEST
++ bool "Freescale IFC driver"
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
+
+diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
+index 9015e8277dc8af..871f3de69102ea 100644
+--- a/drivers/memory/stm32-fmc2-ebi.c
++++ b/drivers/memory/stm32-fmc2-ebi.c
+@@ -181,8 +181,11 @@ static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi,
+ int cs)
+ {
+ u32 bcr;
++ int ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
+
+ if (bcr & FMC2_BCR_MTYP)
+ return 0;
+@@ -195,8 +198,11 @@ static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi,
+ int cs)
+ {
+ u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
++ int ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
+
+ if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
+ return 0;
+@@ -209,8 +215,11 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi,
+ int cs)
+ {
+ u32 bcr;
++ int ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
+
+ if (bcr & FMC2_BCR_BURSTEN)
+ return 0;
+@@ -223,8 +232,11 @@ static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi,
+ int cs)
+ {
+ u32 bcr;
++ int ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
+
+ if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW))
+ return 0;
+@@ -237,8 +249,11 @@ static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi,
+ int cs)
+ {
+ u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
++ int ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
+
+ if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
+ return 0;
+@@ -251,12 +266,18 @@ static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi,
+ int cs)
+ {
+ u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
++ int ret;
++
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (prop->reg_type == FMC2_REG_BWTR)
+- regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
++ ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ else
+- regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
++ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
++ if (ret)
++ return ret;
+
+ if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) &&
+ ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN))
+@@ -270,12 +291,19 @@ static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi,
+ int cs)
+ {
+ u32 bcr, bcr1;
++ int ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+- if (cs)
+- regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
+- else
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
++
++ if (cs) {
++ ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
++ if (ret)
++ return ret;
++ } else {
+ bcr1 = bcr;
++ }
+
+ if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN)))
+ return 0;
+@@ -307,12 +335,18 @@ static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi,
+ {
+ u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup);
+ u32 bcr, btr, clk_period;
++ int ret;
++
++ ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
++ if (ret)
++ return ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
+ if (bcr & FMC2_BCR1_CCLKEN || !cs)
+- regmap_read(ebi->regmap, FMC2_BTR1, &btr);
++ ret = regmap_read(ebi->regmap, FMC2_BTR1, &btr);
+ else
+- regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
++ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
++ if (ret)
++ return ret;
+
+ clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1;
+
+@@ -571,11 +605,16 @@ static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi,
+ if (ret)
+ return ret;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
++
+ if (prop->reg_type == FMC2_REG_BWTR)
+- regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
++ ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ else
+- regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
++ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
++ if (ret)
++ return ret;
+
+ if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)
+ val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX);
+@@ -693,11 +732,14 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
+ int cs, u32 setup)
+ {
+ u32 old_val, new_val, pcscntr;
++ int ret;
+
+ if (setup < 1)
+ return 0;
+
+- regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
++ ret = regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
++ if (ret)
++ return ret;
+
+ /* Enable counter for the bank */
+ regmap_update_bits(ebi->regmap, FMC2_PCSCNTR,
+@@ -944,17 +986,20 @@ static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs)
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0);
+ }
+
+-static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
++static int stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
+ {
+ unsigned int cs;
++ int ret;
+
+ for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
+- regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
+- regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
++ ret |= regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
++ ret |= regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
++ if (ret)
++ return ret;
+ }
+
+- regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
++ return regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
+ }
+
+ static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi)
+@@ -983,22 +1028,29 @@ static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi)
+ }
+
+ /* NWAIT signal can not be connected to EBI controller and NAND controller */
+-static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
++static int stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
+ {
++ struct device *dev = ebi->dev;
+ unsigned int cs;
+ u32 bcr;
++ int ret;
+
+ for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+ if (!(ebi->bank_assigned & BIT(cs)))
+ continue;
+
+- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
++ if (ret)
++ return ret;
++
+ if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) &&
+- ebi->bank_assigned & BIT(FMC2_NAND))
+- return true;
++ ebi->bank_assigned & BIT(FMC2_NAND)) {
++ dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
++ return -EINVAL;
++ }
+ }
+
+- return false;
++ return 0;
+ }
+
+ static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi)
+@@ -1085,10 +1137,9 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
+ return -ENODEV;
+ }
+
+- if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) {
+- dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
+- return -EINVAL;
+- }
++ ret = stm32_fmc2_ebi_nwait_used_by_ctrls(ebi);
++ if (ret)
++ return ret;
+
+ stm32_fmc2_ebi_enable(ebi);
+
+@@ -1133,7 +1184,10 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_release;
+
+- stm32_fmc2_ebi_save_setup(ebi);
++ ret = stm32_fmc2_ebi_save_setup(ebi);
++ if (ret)
++ goto err_release;
++
+ platform_set_drvdata(pdev, ebi);
+
+ return 0;
+diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
+index 4007f4e16d74f0..909b15ad207556 100644
+--- a/drivers/memory/tegra/tegra186-emc.c
++++ b/drivers/memory/tegra/tegra186-emc.c
+@@ -35,11 +35,6 @@ struct tegra186_emc {
+ struct icc_provider provider;
+ };
+
+-static inline struct tegra186_emc *to_tegra186_emc(struct icc_provider *provider)
+-{
+- return container_of(provider, struct tegra186_emc, provider);
+-}
+-
+ /*
+ * debugfs interface
+ *
+diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
+index 533f85a4b2bdb7..7633481e547d2c 100644
+--- a/drivers/memory/tegra/tegra186.c
++++ b/drivers/memory/tegra/tegra186.c
+@@ -75,6 +75,9 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
+ {
+ u32 value, old;
+
++ if (client->regs.sid.security == 0 && client->regs.sid.override == 0)
++ return;
++
+ value = readl(mc->regs + client->regs.sid.security);
+ if ((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0) {
+ /*
+diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
+index 9e5b5dbd9c8dfb..fa40c49b070d07 100644
+--- a/drivers/memory/tegra/tegra234.c
++++ b/drivers/memory/tegra/tegra234.c
+@@ -121,7 +121,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDB,
+- .name = "dla0rdb",
++ .name = "dla1rdb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+@@ -407,7 +407,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDB1,
+- .name = "dla0rdb1",
++ .name = "dla1rdb1",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+@@ -417,7 +417,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1WRB,
+- .name = "dla0wrb",
++ .name = "dla1wrb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+@@ -663,7 +663,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDA,
+- .name = "dla0rda",
++ .name = "dla1rda",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+@@ -673,7 +673,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1FALRDB,
+- .name = "dla0falrdb",
++ .name = "dla1falrdb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+@@ -683,7 +683,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1WRA,
+- .name = "dla0wra",
++ .name = "dla1wra",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+@@ -693,7 +693,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1FALWRB,
+- .name = "dla0falwrb",
++ .name = "dla1falwrb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+@@ -857,7 +857,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDA1,
+- .name = "dla0rda1",
++ .name = "dla1rda1",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+@@ -986,6 +986,10 @@ static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+ msg.rx.data = &bwmgr_resp;
+ msg.rx.size = sizeof(bwmgr_resp);
+
++ if (pclient->bpmp_id >= TEGRA_ICC_BPMP_CPU_CLUSTER0 &&
++ pclient->bpmp_id <= TEGRA_ICC_BPMP_CPU_CLUSTER2)
++ msg.flags = TEGRA_BPMP_MESSAGE_RESET;
++
+ ret = tegra_bpmp_transfer(mc->bpmp, &msg);
+ if (ret < 0) {
+ dev_err(mc->dev, "BPMP transfer failed: %d\n", ret);
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 90ce58fd629e5f..68d71b4b55bd35 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -1483,6 +1483,7 @@ config MFD_SYSCON
+
+ config MFD_TI_AM335X_TSCADC
+ tristate "TI ADC / Touch Screen chip support"
++ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP
+ select REGMAP_MMIO
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index c66f07edcd0e62..db1ba39de3b590 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -280,7 +280,5 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o
+ obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
+ obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
+
+-rsmu-i2c-objs := rsmu_core.o rsmu_i2c.o
+-rsmu-spi-objs := rsmu_core.o rsmu_spi.o
+-obj-$(CONFIG_MFD_RSMU_I2C) += rsmu-i2c.o
+-obj-$(CONFIG_MFD_RSMU_SPI) += rsmu-spi.o
++obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o
++obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o
+diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
+index 0e52bd2ebd74bd..fb5f988e61f373 100644
+--- a/drivers/mfd/altera-sysmgr.c
++++ b/drivers/mfd/altera-sysmgr.c
+@@ -109,7 +109,9 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+
+ dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver,
+ (void *)sysmgr_np);
+- of_node_put(sysmgr_np);
++ if (property)
++ of_node_put(sysmgr_np);
++
+ if (!dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
+index 02cf4f3e91d767..de5d894ac04af8 100644
+--- a/drivers/mfd/arizona-spi.c
++++ b/drivers/mfd/arizona-spi.c
+@@ -159,6 +159,9 @@ static int arizona_spi_acpi_probe(struct arizona *arizona)
+ arizona->pdata.micd_ranges = arizona_micd_aosp_ranges;
+ arizona->pdata.num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges);
+
++ /* Use left headphone speaker for HP vs line-out detection */
++ arizona->pdata.hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
++
+ return 0;
+ }
+
+diff --git a/drivers/mfd/cs42l43-sdw.c b/drivers/mfd/cs42l43-sdw.c
+index 7392b3d2e6b965..4be4df9dd8cf1a 100644
+--- a/drivers/mfd/cs42l43-sdw.c
++++ b/drivers/mfd/cs42l43-sdw.c
+@@ -17,13 +17,12 @@
+
+ #include "cs42l43.h"
+
+-enum cs42l43_sdw_ports {
+- CS42L43_DMIC_DEC_ASP_PORT = 1,
+- CS42L43_SPK_TX_PORT,
+- CS42L43_SPDIF_HP_PORT,
+- CS42L43_SPK_RX_PORT,
+- CS42L43_ASP_PORT,
+-};
++#define CS42L43_SDW_PORT(port, chans) { \
++ .num = port, \
++ .max_ch = chans, \
++ .type = SDW_DPN_FULL, \
++ .max_word = 24, \
++}
+
+ static const struct regmap_config cs42l43_sdw_regmap = {
+ .reg_bits = 32,
+@@ -42,65 +41,48 @@ static const struct regmap_config cs42l43_sdw_regmap = {
+ .num_reg_defaults = ARRAY_SIZE(cs42l43_reg_default),
+ };
+
++static const struct sdw_dpn_prop cs42l43_src_port_props[] = {
++ CS42L43_SDW_PORT(1, 4),
++ CS42L43_SDW_PORT(2, 2),
++ CS42L43_SDW_PORT(3, 2),
++ CS42L43_SDW_PORT(4, 2),
++};
++
++static const struct sdw_dpn_prop cs42l43_sink_port_props[] = {
++ CS42L43_SDW_PORT(5, 2),
++ CS42L43_SDW_PORT(6, 2),
++ CS42L43_SDW_PORT(7, 2),
++};
++
+ static int cs42l43_read_prop(struct sdw_slave *sdw)
+ {
+ struct sdw_slave_prop *prop = &sdw->prop;
+ struct device *dev = &sdw->dev;
+- struct sdw_dpn_prop *dpn;
+- unsigned long addr;
+- int nval;
+ int i;
+- u32 bit;
+
+ prop->use_domain_irq = true;
+ prop->paging_support = true;
+ prop->wake_capable = true;
+- prop->source_ports = BIT(CS42L43_DMIC_DEC_ASP_PORT) | BIT(CS42L43_SPK_TX_PORT);
+- prop->sink_ports = BIT(CS42L43_SPDIF_HP_PORT) |
+- BIT(CS42L43_SPK_RX_PORT) | BIT(CS42L43_ASP_PORT);
+ prop->quirks = SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY;
+ prop->scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY |
+ SDW_SCP_INT1_IMPL_DEF;
+
+- nval = hweight32(prop->source_ports);
+- prop->src_dpn_prop = devm_kcalloc(dev, nval, sizeof(*prop->src_dpn_prop),
+- GFP_KERNEL);
++ for (i = 0; i < ARRAY_SIZE(cs42l43_src_port_props); i++)
++ prop->source_ports |= BIT(cs42l43_src_port_props[i].num);
++
++ prop->src_dpn_prop = devm_kmemdup(dev, cs42l43_src_port_props,
++ sizeof(cs42l43_src_port_props), GFP_KERNEL);
+ if (!prop->src_dpn_prop)
+ return -ENOMEM;
+
+- i = 0;
+- dpn = prop->src_dpn_prop;
+- addr = prop->source_ports;
+- for_each_set_bit(bit, &addr, 32) {
+- dpn[i].num = bit;
+- dpn[i].max_ch = 2;
+- dpn[i].type = SDW_DPN_FULL;
+- dpn[i].max_word = 24;
+- i++;
+- }
+- /*
+- * All ports are 2 channels max, except the first one,
+- * CS42L43_DMIC_DEC_ASP_PORT.
+- */
+- dpn[CS42L43_DMIC_DEC_ASP_PORT].max_ch = 4;
++ for (i = 0; i < ARRAY_SIZE(cs42l43_sink_port_props); i++)
++ prop->sink_ports |= BIT(cs42l43_sink_port_props[i].num);
+
+- nval = hweight32(prop->sink_ports);
+- prop->sink_dpn_prop = devm_kcalloc(dev, nval, sizeof(*prop->sink_dpn_prop),
+- GFP_KERNEL);
++ prop->sink_dpn_prop = devm_kmemdup(dev, cs42l43_sink_port_props,
++ sizeof(cs42l43_sink_port_props), GFP_KERNEL);
+ if (!prop->sink_dpn_prop)
+ return -ENOMEM;
+
+- i = 0;
+- dpn = prop->sink_dpn_prop;
+- addr = prop->sink_ports;
+- for_each_set_bit(bit, &addr, 32) {
+- dpn[i].num = bit;
+- dpn[i].max_ch = 2;
+- dpn[i].type = SDW_DPN_FULL;
+- dpn[i].max_word = 24;
+- i++;
+- }
+-
+ return 0;
+ }
+
+diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
+index 7b6d07cbe6fc6f..1cea3f8f467d43 100644
+--- a/drivers/mfd/cs42l43.c
++++ b/drivers/mfd/cs42l43.c
+@@ -84,7 +84,7 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
+ { CS42L43_DRV_CTRL_5, 0x136C00C0 },
+ { CS42L43_GPIO_CTRL1, 0x00000707 },
+ { CS42L43_GPIO_CTRL2, 0x00000000 },
+- { CS42L43_GPIO_FN_SEL, 0x00000000 },
++ { CS42L43_GPIO_FN_SEL, 0x00000004 },
+ { CS42L43_MCLK_SRC_SEL, 0x00000000 },
+ { CS42L43_SAMPLE_RATE1, 0x00000003 },
+ { CS42L43_SAMPLE_RATE2, 0x00000003 },
+@@ -131,38 +131,38 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
+ { CS42L43_ASP_TX_CH4_CTRL, 0x00170091 },
+ { CS42L43_ASP_TX_CH5_CTRL, 0x001700C1 },
+ { CS42L43_ASP_TX_CH6_CTRL, 0x001700F1 },
+- { CS42L43_ASPTX1_INPUT, 0x00800000 },
+- { CS42L43_ASPTX2_INPUT, 0x00800000 },
+- { CS42L43_ASPTX3_INPUT, 0x00800000 },
+- { CS42L43_ASPTX4_INPUT, 0x00800000 },
+- { CS42L43_ASPTX5_INPUT, 0x00800000 },
+- { CS42L43_ASPTX6_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00800000 },
+- { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00800000 },
+- { CS42L43_ASRC_INT1_INPUT1, 0x00800000 },
+- { CS42L43_ASRC_INT2_INPUT1, 0x00800000 },
+- { CS42L43_ASRC_INT3_INPUT1, 0x00800000 },
+- { CS42L43_ASRC_INT4_INPUT1, 0x00800000 },
+- { CS42L43_ASRC_DEC1_INPUT1, 0x00800000 },
+- { CS42L43_ASRC_DEC2_INPUT1, 0x00800000 },
+- { CS42L43_ASRC_DEC3_INPUT1, 0x00800000 },
+- { CS42L43_ASRC_DEC4_INPUT1, 0x00800000 },
+- { CS42L43_ISRC1INT1_INPUT1, 0x00800000 },
+- { CS42L43_ISRC1INT2_INPUT1, 0x00800000 },
+- { CS42L43_ISRC1DEC1_INPUT1, 0x00800000 },
+- { CS42L43_ISRC1DEC2_INPUT1, 0x00800000 },
+- { CS42L43_ISRC2INT1_INPUT1, 0x00800000 },
+- { CS42L43_ISRC2INT2_INPUT1, 0x00800000 },
+- { CS42L43_ISRC2DEC1_INPUT1, 0x00800000 },
+- { CS42L43_ISRC2DEC2_INPUT1, 0x00800000 },
++ { CS42L43_ASPTX1_INPUT, 0x00000000 },
++ { CS42L43_ASPTX2_INPUT, 0x00000000 },
++ { CS42L43_ASPTX3_INPUT, 0x00000000 },
++ { CS42L43_ASPTX4_INPUT, 0x00000000 },
++ { CS42L43_ASPTX5_INPUT, 0x00000000 },
++ { CS42L43_ASPTX6_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00000000 },
++ { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00000000 },
++ { CS42L43_ASRC_INT1_INPUT1, 0x00000000 },
++ { CS42L43_ASRC_INT2_INPUT1, 0x00000000 },
++ { CS42L43_ASRC_INT3_INPUT1, 0x00000000 },
++ { CS42L43_ASRC_INT4_INPUT1, 0x00000000 },
++ { CS42L43_ASRC_DEC1_INPUT1, 0x00000000 },
++ { CS42L43_ASRC_DEC2_INPUT1, 0x00000000 },
++ { CS42L43_ASRC_DEC3_INPUT1, 0x00000000 },
++ { CS42L43_ASRC_DEC4_INPUT1, 0x00000000 },
++ { CS42L43_ISRC1INT1_INPUT1, 0x00000000 },
++ { CS42L43_ISRC1INT2_INPUT1, 0x00000000 },
++ { CS42L43_ISRC1DEC1_INPUT1, 0x00000000 },
++ { CS42L43_ISRC1DEC2_INPUT1, 0x00000000 },
++ { CS42L43_ISRC2INT1_INPUT1, 0x00000000 },
++ { CS42L43_ISRC2INT2_INPUT1, 0x00000000 },
++ { CS42L43_ISRC2DEC1_INPUT1, 0x00000000 },
++ { CS42L43_ISRC2DEC2_INPUT1, 0x00000000 },
+ { CS42L43_EQ1MIX_INPUT1, 0x00800000 },
+ { CS42L43_EQ1MIX_INPUT2, 0x00800000 },
+ { CS42L43_EQ1MIX_INPUT3, 0x00800000 },
+@@ -171,8 +171,8 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
+ { CS42L43_EQ2MIX_INPUT2, 0x00800000 },
+ { CS42L43_EQ2MIX_INPUT3, 0x00800000 },
+ { CS42L43_EQ2MIX_INPUT4, 0x00800000 },
+- { CS42L43_SPDIF1_INPUT1, 0x00800000 },
+- { CS42L43_SPDIF2_INPUT1, 0x00800000 },
++ { CS42L43_SPDIF1_INPUT1, 0x00000000 },
++ { CS42L43_SPDIF2_INPUT1, 0x00000000 },
+ { CS42L43_AMP1MIX_INPUT1, 0x00800000 },
+ { CS42L43_AMP1MIX_INPUT2, 0x00800000 },
+ { CS42L43_AMP1MIX_INPUT3, 0x00800000 },
+@@ -217,7 +217,7 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
+ { CS42L43_CTRL_REG, 0x00000006 },
+ { CS42L43_FDIV_FRAC, 0x40000000 },
+ { CS42L43_CAL_RATIO, 0x00000080 },
+- { CS42L43_SPI_CLK_CONFIG1, 0x00000000 },
++ { CS42L43_SPI_CLK_CONFIG1, 0x00000001 },
+ { CS42L43_SPI_CONFIG1, 0x00000000 },
+ { CS42L43_SPI_CONFIG2, 0x00000000 },
+ { CS42L43_SPI_CONFIG3, 0x00000001 },
+diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
+index c7510434380a43..fbbe82c6e75b5c 100644
+--- a/drivers/mfd/dln2.c
++++ b/drivers/mfd/dln2.c
+@@ -826,7 +826,6 @@ static int dln2_probe(struct usb_interface *interface,
+ dln2_stop_rx_urbs(dln2);
+
+ out_free:
+- usb_put_dev(dln2->usb_dev);
+ dln2_free(dln2);
+
+ return ret;
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index 699f44ffff0e4b..ae5759200622c4 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -561,6 +561,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info },
++ /* LNL-M */
++ { PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa878), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info },
+ { }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 9591b354072ad1..00e7b578bb3e83 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -301,8 +301,8 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
+
+ snprintf(name, sizeof(name), "%s-div", devname);
+ tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
++ 0, lpss->priv, 1, 15, 16, 15,
+ CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
+- lpss->priv, 1, 15, 16, 15, 0,
+ NULL);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
+index 7fce3ef7ab453d..2a83f540d4c9d8 100644
+--- a/drivers/mfd/intel_soc_pmic_chtwc.c
++++ b/drivers/mfd/intel_soc_pmic_chtwc.c
+@@ -178,7 +178,6 @@ static const struct dmi_system_id cht_wc_model_dmi_ids[] = {
+ .driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YT3_X90,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
+ },
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 0ed7c0d7784e1b..2b85509a90fc29 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -146,6 +146,7 @@ static int mfd_add_device(struct device *parent, int id,
+ struct platform_device *pdev;
+ struct device_node *np = NULL;
+ struct mfd_of_node_entry *of_entry, *tmp;
++ bool disabled = false;
+ int ret = -ENOMEM;
+ int platform_id;
+ int r;
+@@ -183,11 +184,10 @@ static int mfd_add_device(struct device *parent, int id,
+ if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
+ for_each_child_of_node(parent->of_node, np) {
+ if (of_device_is_compatible(np, cell->of_compatible)) {
+- /* Ignore 'disabled' devices error free */
++ /* Skip 'disabled' devices */
+ if (!of_device_is_available(np)) {
+- of_node_put(np);
+- ret = 0;
+- goto fail_alias;
++ disabled = true;
++ continue;
+ }
+
+ ret = mfd_match_of_node_to_dev(pdev, np, cell);
+@@ -197,10 +197,17 @@ static int mfd_add_device(struct device *parent, int id,
+ if (ret)
+ goto fail_alias;
+
+- break;
++ goto match;
+ }
+ }
+
++ if (disabled) {
++ /* Ignore 'disabled' devices error free */
++ ret = 0;
++ goto fail_alias;
++ }
++
++match:
+ if (!pdev->dev.of_node)
+ pr_warn("%s: Failed to locate of_node [id: %d]\n",
+ cell->name, platform_id);
+diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
+index 906353735c7820..5ba2b2352749db 100644
+--- a/drivers/mfd/omap-usb-tll.c
++++ b/drivers/mfd/omap-usb-tll.c
+@@ -230,8 +230,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
+ break;
+ }
+
+- tll = devm_kzalloc(dev, sizeof(*tll) + sizeof(tll->ch_clk[nch]),
+- GFP_KERNEL);
++ tll = devm_kzalloc(dev, struct_size(tll, ch_clk, nch), GFP_KERNEL);
+ if (!tll) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
+index 7e2cd79d17ebf8..8e449cff5cec40 100644
+--- a/drivers/mfd/qcom-spmi-pmic.c
++++ b/drivers/mfd/qcom-spmi-pmic.c
+@@ -30,6 +30,8 @@ struct qcom_spmi_dev {
+ struct qcom_spmi_pmic pmic;
+ };
+
++static DEFINE_MUTEX(pmic_spmi_revid_lock);
++
+ #define N_USIDS(n) ((void *)n)
+
+ static const struct of_device_id pmic_spmi_id_table[] = {
+@@ -76,24 +78,21 @@ static const struct of_device_id pmic_spmi_id_table[] = {
+ *
+ * This only supports PMICs with 1 or 2 USIDs.
+ */
+-static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
++static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
+ {
+- struct spmi_device *sdev;
+- struct qcom_spmi_dev *ctx;
+ struct device_node *spmi_bus;
+- struct device_node *other_usid = NULL;
++ struct device_node *child;
+ int function_parent_usid, ret;
+ u32 pmic_addr;
+
+- sdev = to_spmi_device(dev);
+- ctx = dev_get_drvdata(&sdev->dev);
+-
+ /*
+ * Quick return if the function device is already in the base
+ * USID. This will always be hit for PMICs with only 1 USID.
+ */
+- if (sdev->usid % ctx->num_usids == 0)
++ if (sdev->usid % ctx->num_usids == 0) {
++ get_device(&sdev->dev);
+ return sdev;
++ }
+
+ function_parent_usid = sdev->usid;
+
+@@ -105,28 +104,61 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
+ * device for USID 2.
+ */
+ spmi_bus = of_get_parent(sdev->dev.of_node);
+- do {
+- other_usid = of_get_next_child(spmi_bus, other_usid);
+-
+- ret = of_property_read_u32_index(other_usid, "reg", 0, &pmic_addr);
+- if (ret)
+- return ERR_PTR(ret);
++ sdev = ERR_PTR(-ENODATA);
++ for_each_child_of_node(spmi_bus, child) {
++ ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr);
++ if (ret) {
++ of_node_put(child);
++ sdev = ERR_PTR(ret);
++ break;
++ }
+
+- sdev = spmi_device_from_of(other_usid);
+ if (pmic_addr == function_parent_usid - (ctx->num_usids - 1)) {
+- if (!sdev)
++ sdev = spmi_device_from_of(child);
++ if (!sdev) {
+ /*
+- * If the base USID for this PMIC hasn't probed yet
+- * but the secondary USID has, then we need to defer
+- * the function driver so that it will attempt to
+- * probe again when the base USID is ready.
++ * If the base USID for this PMIC hasn't been
++ * registered yet then we need to defer.
+ */
+- return ERR_PTR(-EPROBE_DEFER);
+- return sdev;
++ sdev = ERR_PTR(-EPROBE_DEFER);
++ }
++ of_node_put(child);
++ break;
+ }
+- } while (other_usid->sibling);
++ }
+
+- return ERR_PTR(-ENODATA);
++ of_node_put(spmi_bus);
++
++ return sdev;
++}
++
++static int pmic_spmi_get_base_revid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
++{
++ struct qcom_spmi_dev *base_ctx;
++ struct spmi_device *base;
++ int ret = 0;
++
++ base = qcom_pmic_get_base_usid(sdev, ctx);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ /*
++ * Copy revid info from base device if it has probed and is still
++ * bound to its driver.
++ */
++ mutex_lock(&pmic_spmi_revid_lock);
++ base_ctx = spmi_device_get_drvdata(base);
++ if (!base_ctx) {
++ ret = -EPROBE_DEFER;
++ goto out_unlock;
++ }
++ memcpy(&ctx->pmic, &base_ctx->pmic, sizeof(ctx->pmic));
++out_unlock:
++ mutex_unlock(&pmic_spmi_revid_lock);
++
++ put_device(&base->dev);
++
++ return ret;
+ }
+
+ static int pmic_spmi_load_revid(struct regmap *map, struct device *dev,
+@@ -204,11 +236,7 @@ const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev)
+ if (!of_match_device(pmic_spmi_id_table, dev->parent))
+ return ERR_PTR(-EINVAL);
+
+- sdev = qcom_pmic_get_base_usid(dev->parent);
+-
+- if (IS_ERR(sdev))
+- return ERR_CAST(sdev);
+-
++ sdev = to_spmi_device(dev->parent);
+ spmi = dev_get_drvdata(&sdev->dev);
+
+ return &spmi->pmic;
+@@ -243,16 +271,31 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
+ ret = pmic_spmi_load_revid(regmap, &sdev->dev, &ctx->pmic);
+ if (ret < 0)
+ return ret;
++ } else {
++ ret = pmic_spmi_get_base_revid(sdev, ctx);
++ if (ret)
++ return ret;
+ }
++
++ mutex_lock(&pmic_spmi_revid_lock);
+ spmi_device_set_drvdata(sdev, ctx);
++ mutex_unlock(&pmic_spmi_revid_lock);
+
+ return devm_of_platform_populate(&sdev->dev);
+ }
+
++static void pmic_spmi_remove(struct spmi_device *sdev)
++{
++ mutex_lock(&pmic_spmi_revid_lock);
++ spmi_device_set_drvdata(sdev, NULL);
++ mutex_unlock(&pmic_spmi_revid_lock);
++}
++
+ MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
+
+ static struct spmi_driver pmic_spmi_driver = {
+ .probe = pmic_spmi_probe,
++ .remove = pmic_spmi_remove,
+ .driver = {
+ .name = "pmic-spmi",
+ .of_match_table = pmic_spmi_id_table,
+diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
+index 11a831e92da83a..a577f950c6324d 100644
+--- a/drivers/mfd/rk8xx-core.c
++++ b/drivers/mfd/rk8xx-core.c
+@@ -53,76 +53,68 @@ static const struct resource rk817_charger_resources[] = {
+ };
+
+ static const struct mfd_cell rk805s[] = {
+- { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
+- { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
+- { .name = "rk805-pinctrl", .id = PLATFORM_DEVID_NONE, },
++ { .name = "rk808-clkout", },
++ { .name = "rk808-regulator", },
++ { .name = "rk805-pinctrl", },
+ {
+ .name = "rk808-rtc",
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resources = &rtc_resources[0],
+- .id = PLATFORM_DEVID_NONE,
+ },
+ { .name = "rk805-pwrkey",
+ .num_resources = ARRAY_SIZE(rk805_key_resources),
+ .resources = &rk805_key_resources[0],
+- .id = PLATFORM_DEVID_NONE,
+ },
+ };
+
+ static const struct mfd_cell rk806s[] = {
+- { .name = "rk805-pinctrl", .id = PLATFORM_DEVID_AUTO, },
+- { .name = "rk808-regulator", .id = PLATFORM_DEVID_AUTO, },
++ { .name = "rk805-pinctrl", },
++ { .name = "rk808-regulator", },
+ {
+ .name = "rk805-pwrkey",
+ .resources = rk806_pwrkey_resources,
+ .num_resources = ARRAY_SIZE(rk806_pwrkey_resources),
+- .id = PLATFORM_DEVID_AUTO,
+ },
+ };
+
+ static const struct mfd_cell rk808s[] = {
+- { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
+- { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
++ { .name = "rk808-clkout", },
++ { .name = "rk808-regulator", },
+ {
+ .name = "rk808-rtc",
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resources = rtc_resources,
+- .id = PLATFORM_DEVID_NONE,
+ },
+ };
+
+ static const struct mfd_cell rk817s[] = {
+- { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
+- { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
++ { .name = "rk808-clkout", },
++ { .name = "rk808-regulator", },
+ {
+ .name = "rk805-pwrkey",
+ .num_resources = ARRAY_SIZE(rk817_pwrkey_resources),
+ .resources = &rk817_pwrkey_resources[0],
+- .id = PLATFORM_DEVID_NONE,
+ },
+ {
+ .name = "rk808-rtc",
+ .num_resources = ARRAY_SIZE(rk817_rtc_resources),
+ .resources = &rk817_rtc_resources[0],
+- .id = PLATFORM_DEVID_NONE,
+ },
+- { .name = "rk817-codec", .id = PLATFORM_DEVID_NONE, },
++ { .name = "rk817-codec", },
+ {
+ .name = "rk817-charger",
+ .num_resources = ARRAY_SIZE(rk817_charger_resources),
+ .resources = &rk817_charger_resources[0],
+- .id = PLATFORM_DEVID_NONE,
+ },
+ };
+
+ static const struct mfd_cell rk818s[] = {
+- { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
+- { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
++ { .name = "rk808-clkout", },
++ { .name = "rk808-regulator", },
+ {
+ .name = "rk808-rtc",
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resources = rtc_resources,
+- .id = PLATFORM_DEVID_NONE,
+ },
+ };
+
+@@ -680,7 +672,7 @@ int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap
+ pre_init_reg[i].addr);
+ }
+
+- ret = devm_mfd_add_devices(dev, 0, cells, nr_cells, NULL, 0,
++ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, nr_cells, NULL, 0,
+ regmap_irq_get_domain(rk808->irq_data));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add MFD devices\n");
+diff --git a/drivers/mfd/rsmu_core.c b/drivers/mfd/rsmu_core.c
+index 29437fd0bd5bf6..fd04a6e5dfa31a 100644
+--- a/drivers/mfd/rsmu_core.c
++++ b/drivers/mfd/rsmu_core.c
+@@ -78,11 +78,13 @@ int rsmu_core_init(struct rsmu_ddata *rsmu)
+
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(rsmu_core_init);
+
+ void rsmu_core_exit(struct rsmu_ddata *rsmu)
+ {
+ mutex_destroy(&rsmu->lock);
+ }
++EXPORT_SYMBOL_GPL(rsmu_core_exit);
+
+ MODULE_DESCRIPTION("Renesas SMU core driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
+index 57b29c3251312b..7d0e91164cbaa3 100644
+--- a/drivers/mfd/syscon.c
++++ b/drivers/mfd/syscon.c
+@@ -105,6 +105,10 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
+ }
+
+ syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
++ if (!syscon_config.name) {
++ ret = -ENOMEM;
++ goto err_regmap;
++ }
+ syscon_config.reg_stride = reg_io_width;
+ syscon_config.val_bits = reg_io_width * 8;
+ syscon_config.max_register = resource_size(&res) - reg_io_width;
+@@ -234,7 +238,9 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
+ return ERR_PTR(-ENODEV);
+
+ regmap = syscon_node_to_regmap(syscon_np);
+- of_node_put(syscon_np);
++
++ if (property)
++ of_node_put(syscon_np);
+
+ return regmap;
+ }
+diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c
+index 0fb9c5cf213a47..783ee59901e86b 100644
+--- a/drivers/mfd/tps6594-core.c
++++ b/drivers/mfd/tps6594-core.c
+@@ -433,6 +433,9 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
+ tps6594_irq_chip.name = devm_kasprintf(dev, GFP_KERNEL, "%s-%ld-0x%02x",
+ dev->driver->name, tps->chip_id, tps->reg);
+
++ if (!tps6594_irq_chip.name)
++ return -ENOMEM;
++
+ ret = devm_regmap_add_irq_chip(dev, tps->regmap, tps->irq, IRQF_SHARED | IRQF_ONESHOT,
+ 0, &tps6594_irq_chip, &tps->irq_data);
+ if (ret)
+diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
+index dbbf7db4ff2f49..c290e849b2ed82 100644
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -581,6 +581,31 @@ static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len)
+ }
+ }
+
++static void at24_probe_temp_sensor(struct i2c_client *client)
++{
++ struct at24_data *at24 = i2c_get_clientdata(client);
++ struct i2c_board_info info = { .type = "jc42" };
++ int ret;
++ u8 val;
++
++ /*
++ * Byte 2 has value 11 for DDR3, earlier versions don't
++ * support the thermal sensor present flag
++ */
++ ret = at24_read(at24, 2, &val, 1);
++ if (ret || val != 11)
++ return;
++
++ /* Byte 32, bit 7 is set if temp sensor is present */
++ ret = at24_read(at24, 32, &val, 1);
++ if (ret || !(val & BIT(7)))
++ return;
++
++ info.addr = 0x18 | (client->addr & 7);
++
++ i2c_new_client_device(client->adapter, &info);
++}
++
+ static int at24_probe(struct i2c_client *client)
+ {
+ struct regmap_config regmap_config = { };
+@@ -756,15 +781,6 @@ static int at24_probe(struct i2c_client *client)
+ }
+ pm_runtime_enable(dev);
+
+- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+- if (IS_ERR(at24->nvmem)) {
+- pm_runtime_disable(dev);
+- if (!pm_runtime_status_suspended(dev))
+- regulator_disable(at24->vcc_reg);
+- return dev_err_probe(dev, PTR_ERR(at24->nvmem),
+- "failed to register nvmem\n");
+- }
+-
+ /*
+ * Perform a one-byte test read to verify that the chip is functional,
+ * unless powering on the device is to be avoided during probe (i.e.
+@@ -780,6 +796,19 @@ static int at24_probe(struct i2c_client *client)
+ }
+ }
+
++ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
++ if (IS_ERR(at24->nvmem)) {
++ pm_runtime_disable(dev);
++ if (!pm_runtime_status_suspended(dev))
++ regulator_disable(at24->vcc_reg);
++ return dev_err_probe(dev, PTR_ERR(at24->nvmem),
++ "failed to register nvmem\n");
++ }
++
++ /* If this a SPD EEPROM, probe for DDR3 thermal sensor */
++ if (cdata == &at24_data_spd)
++ at24_probe_temp_sensor(client);
++
+ pm_runtime_idle(dev);
+
+ if (writable)
+diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
+index f1f766b709657b..4eddc5ba1af9c8 100644
+--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
++++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
+@@ -42,7 +42,7 @@ static void digsy_mtc_op_finish(void *p)
+ }
+
+ struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = {
+- .flags = EE_ADDR8,
++ .flags = EE_ADDR8 | EE_SIZE1K,
+ .prepare = digsy_mtc_op_prepare,
+ .finish = digsy_mtc_op_finish,
+ };
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 1c6c62a7f7f553..4df0d7a0cd1184 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -263,7 +263,6 @@ struct fastrpc_channel_ctx {
+ int domain_id;
+ int sesscount;
+ int vmcount;
+- u64 perms;
+ struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
+ struct rpmsg_device *rpdev;
+ struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
+@@ -1239,6 +1238,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+ struct fastrpc_phy_page pages[1];
+ char *name;
+ int err;
++ bool scm_done = false;
+ struct {
+ int pgid;
+ u32 namelen;
+@@ -1279,15 +1279,18 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+
+ /* Map if we have any heap VMIDs associated with this ADSP Static Process. */
+ if (fl->cctx->vmcount) {
++ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
++
+ err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
+ (u64)fl->cctx->remote_heap->size,
+- &fl->cctx->perms,
++ &src_perms,
+ fl->cctx->vmperms, fl->cctx->vmcount);
+ if (err) {
+ dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
+ fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
+ goto err_map;
+ }
++ scm_done = true;
+ }
+ }
+
+@@ -1319,10 +1322,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+ goto err_invoke;
+
+ kfree(args);
++ kfree(name);
+
+ return 0;
+ err_invoke:
+- if (fl->cctx->vmcount) {
++ if (fl->cctx->vmcount && scm_done) {
+ u64 src_perms = 0;
+ struct qcom_scm_vmperm dst_perms;
+ u32 i;
+@@ -1692,16 +1696,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
+ {
+ struct fastrpc_invoke_args args[2] = { 0 };
+
+- /* Capability filled in userspace */
++ /*
++ * Capability filled in userspace. This carries the information
++ * about the remoteproc support which is fetched from the remoteproc
++ * sysfs node by userspace.
++ */
+ dsp_attr_buf[0] = 0;
++ dsp_attr_buf_len -= 1;
+
+ args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
+ args[0].length = sizeof(dsp_attr_buf_len);
+ args[0].fd = -1;
+ args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
+- args[1].length = dsp_attr_buf_len;
++ args[1].length = dsp_attr_buf_len * sizeof(u32);
+ args[1].fd = -1;
+- fl->pd = USER_PD;
+
+ return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
+ FASTRPC_SCALARS(0, 1, 1), args);
+@@ -1729,7 +1737,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
+ if (!dsp_attributes)
+ return -ENOMEM;
+
+- err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
++ err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
+ if (err == DSP_UNSUPPORTED_API) {
+ dev_info(&cctx->rpdev->dev,
+ "Warning: DSP capabilities not supported on domain: %d\n", domain);
+@@ -1782,7 +1790,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
+ if (err)
+ return err;
+
+- if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
++ if (copy_to_user(argp, &cap, sizeof(cap)))
+ return -EFAULT;
+
+ return 0;
+@@ -1904,7 +1912,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+ &args[0]);
+ if (err) {
+ dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
+- goto err_invoke;
++ fastrpc_buf_free(buf);
++ return err;
+ }
+
+ /* update the buffer to be able to deallocate the memory on the DSP */
+@@ -1915,8 +1924,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+
+ /* Add memory to static PD pool, protection thru hypervisor */
+ if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
++ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
++
+ err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
+- &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
++ &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
+ if (err) {
+ dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
+ buf->phys, buf->size, err);
+@@ -1940,8 +1951,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+
+ err_assign:
+ fastrpc_req_munmap_impl(fl, buf);
+-err_invoke:
+- fastrpc_buf_free(buf);
+
+ return err;
+ }
+@@ -2191,7 +2200,7 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
+ int i;
+
+ spin_lock_irqsave(&cctx->lock, flags);
+- for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
++ for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
+ if (cctx->session[i].sid == sess->sid) {
+ cctx->session[i].valid = false;
+ cctx->sesscount--;
+@@ -2290,7 +2299,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+
+ if (vmcount) {
+ data->vmcount = vmcount;
+- data->perms = BIT(QCOM_SCM_VMID_HLOS);
+ for (i = 0; i < data->vmcount; i++) {
+ data->vmperms[i].vmid = vmids[i];
+ data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+index 3882e97e96a70f..15119584473caf 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+@@ -150,6 +150,7 @@ static int lis3lv02d_i2c_probe(struct i2c_client *client)
+ lis3_dev.init = lis3_i2c_init;
+ lis3_dev.read = lis3_i2c_read;
+ lis3_dev.write = lis3_i2c_write;
++ lis3_dev.reg_ctrl = lis3_reg_ctrl;
+ lis3_dev.irq = client->irq;
+ lis3_dev.ac = lis3lv02d_axis_map;
+ lis3_dev.pm_dev = &client->dev;
+@@ -197,8 +198,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+- if (!lis3->pdata || !lis3->pdata->wakeup_flags)
++ /* Turn on for wakeup if turned off by runtime suspend */
++ if (lis3->pdata && lis3->pdata->wakeup_flags) {
++ if (pm_runtime_suspended(dev))
++ lis3lv02d_poweron(lis3);
++ /* For non wakeup turn off if not already turned off by runtime suspend */
++ } else if (!pm_runtime_suspended(dev))
+ lis3lv02d_poweroff(lis3);
++
+ return 0;
+ }
+
+@@ -207,13 +214,12 @@ static int lis3lv02d_i2c_resume(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+- /*
+- * pm_runtime documentation says that devices should always
+- * be powered on at resume. Pm_runtime turns them off after system
+- * wide resume is complete.
+- */
+- if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
+- pm_runtime_suspended(dev))
++ /* Turn back off if turned on for wakeup and runtime suspended*/
++ if (lis3->pdata && lis3->pdata->wakeup_flags) {
++ if (pm_runtime_suspended(dev))
++ lis3lv02d_poweroff(lis3);
++ /* For non wakeup turn back on if not runtime suspended */
++ } else if (!pm_runtime_suspended(dev))
+ lis3lv02d_poweron(lis3);
+
+ return 0;
+diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
+index 95ef971b5e1cb4..b28701138b4bc2 100644
+--- a/drivers/misc/lkdtm/Makefile
++++ b/drivers/misc/lkdtm/Makefile
+@@ -19,7 +19,7 @@ KASAN_SANITIZE_rodata.o := n
+ KCSAN_SANITIZE_rodata.o := n
+ KCOV_INSTRUMENT_rodata.o := n
+ OBJECT_FILES_NON_STANDARD_rodata.o := y
+-CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
++CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) $(CC_FLAGS_CFI)
+
+ OBJCOPYFLAGS :=
+ OBJCOPYFLAGS_rodata_objcopy.o := \
+diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
+index b93404d6565092..5b861dbff27e9a 100644
+--- a/drivers/misc/lkdtm/perms.c
++++ b/drivers/misc/lkdtm/perms.c
+@@ -61,7 +61,7 @@ static void *setup_function_descriptor(func_desc_t *fdesc, void *dst)
+ return fdesc;
+ }
+
+-static noinline void execute_location(void *dst, bool write)
++static noinline __nocfi void execute_location(void *dst, bool write)
+ {
+ void (*func)(void);
+ func_desc_t fdesc;
+diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+index 32af2b14ff3448..34c9be437432a3 100644
+--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+@@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+
+ aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
+ GFP_KERNEL);
+- if (!aux_bus->aux_device_wrapper[1])
+- return -ENOMEM;
++ if (!aux_bus->aux_device_wrapper[1]) {
++ retval = -ENOMEM;
++ goto err_aux_dev_add_0;
++ }
+
+ retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
+ if (retval < 0)
+@@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+
+ err_aux_dev_add_1:
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
++ goto err_aux_dev_add_0;
+
+ err_aux_dev_init_1:
+ ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
+@@ -120,6 +123,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+
+ err_aux_dev_add_0:
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
++ goto err_ret;
+
+ err_aux_dev_init_0:
+ ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
+@@ -127,6 +131,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ err_ida_alloc_0:
+ kfree(aux_bus->aux_device_wrapper[0]);
+
++err_ret:
+ return retval;
+ }
+
+diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
+index 16695cb5e69c79..a2ed477e0370bc 100644
+--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
+@@ -153,7 +153,6 @@ static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off,
+
+ buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
+ }
+- ret = byte;
+ error:
+ release_sys_lock(priv);
+ return ret;
+@@ -197,7 +196,6 @@ static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off,
+ goto error;
+ }
+ }
+- ret = byte;
+ error:
+ release_sys_lock(priv);
+ return ret;
+@@ -258,7 +256,6 @@ static int pci1xxxx_otp_read(void *priv_t, unsigned int off,
+
+ buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET));
+ }
+- ret = byte;
+ error:
+ release_sys_lock(priv);
+ return ret;
+@@ -315,7 +312,6 @@ static int pci1xxxx_otp_write(void *priv_t, unsigned int off,
+ goto error;
+ }
+ }
+- ret = byte;
+ error:
+ release_sys_lock(priv);
+ return ret;
+@@ -368,6 +364,7 @@ static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev,
+ if (is_eeprom_responsive(priv)) {
+ priv->nvmem_config_eeprom.type = NVMEM_TYPE_EEPROM;
+ priv->nvmem_config_eeprom.name = EEPROM_NAME;
++ priv->nvmem_config_eeprom.id = NVMEM_DEVID_AUTO;
+ priv->nvmem_config_eeprom.dev = &aux_dev->dev;
+ priv->nvmem_config_eeprom.owner = THIS_MODULE;
+ priv->nvmem_config_eeprom.reg_read = pci1xxxx_eeprom_read;
+@@ -387,6 +384,7 @@ static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev,
+
+ priv->nvmem_config_otp.type = NVMEM_TYPE_OTP;
+ priv->nvmem_config_otp.name = OTP_NAME;
++ priv->nvmem_config_otp.id = NVMEM_DEVID_AUTO;
+ priv->nvmem_config_otp.dev = &aux_dev->dev;
+ priv->nvmem_config_otp.owner = THIS_MODULE;
+ priv->nvmem_config_otp.reg_read = pci1xxxx_otp_read;
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index 5c19097266fe06..32f2287823184e 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -2011,7 +2011,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
+
+ mei_hdr = mei_msg_hdr_init(cb);
+ if (IS_ERR(mei_hdr)) {
+- rets = -PTR_ERR(mei_hdr);
++ rets = PTR_ERR(mei_hdr);
+ mei_hdr = NULL;
+ goto err;
+ }
+@@ -2032,7 +2032,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
+
+ hbuf_slots = mei_hbuf_empty_slots(dev);
+ if (hbuf_slots < 0) {
+- rets = -EOVERFLOW;
++ buf_len = -EOVERFLOW;
+ goto out;
+ }
+
+diff --git a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
+index be52b113aea937..89364bdbb1290f 100644
+--- a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
++++ b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
+@@ -96,7 +96,8 @@ static const struct component_master_ops mei_component_master_ops = {
+ *
+ * The function checks if the device is pci device and
+ * Intel VGA adapter, the subcomponent is SW Proxy
+- * and the parent of MEI PCI and the parent of VGA are the same PCH device.
++ * and the VGA is on the bus 0 reserved for built-in devices
++ * to reject discrete GFX.
+ *
+ * @dev: master device
+ * @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY)
+@@ -123,7 +124,8 @@ static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent,
+ if (subcomponent != I915_COMPONENT_GSC_PROXY)
+ return 0;
+
+- return component_compare_dev(dev->parent, ((struct device *)data)->parent);
++ /* Only built-in GFX */
++ return (pdev->bus->number == 0);
+ }
+
+ static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
+@@ -146,7 +148,7 @@ static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
+ }
+
+ component_match_add_typed(&cldev->dev, &master_match,
+- mei_gsc_proxy_component_match, cldev->dev.parent);
++ mei_gsc_proxy_component_match, NULL);
+ if (IS_ERR_OR_NULL(master_match)) {
+ ret = -ENOMEM;
+ goto err_exit;
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index bdc65d50b945fc..d3c03d4edbeff3 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -112,6 +112,10 @@
+ #define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
+
+ #define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */
++#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */
++#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */
++
++#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+
+ /*
+ * MEI HW Section
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index bb4e9eabda9789..c018534c780f9b 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
+ }
+
+ if (!mei_cl_is_connected(cl)) {
+- cl_err(dev, cl, "is not connected");
++ cl_dbg(dev, cl, "is not connected");
+ rets = -ENODEV;
+ goto out;
+ }
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 676d566f38ddfd..6c4f5e9fe834dc 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -116,9 +116,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
+
+- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
++ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
++ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
++ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
++
++ {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+
+ /* required last entry */
+ {0, }
+@@ -396,8 +400,10 @@ static int mei_me_pci_resume(struct device *device)
+ }
+
+ err = mei_restart(dev);
+- if (err)
++ if (err) {
++ free_irq(pdev->irq, dev);
+ return err;
++ }
+
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&dev->timer_work, HZ);
+diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
+index 8aea2d070a40c2..d279a4f195e2a3 100644
+--- a/drivers/misc/open-dice.c
++++ b/drivers/misc/open-dice.c
+@@ -140,7 +140,6 @@ static int __init open_dice_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ *drvdata = (struct open_dice_drvdata){
+- .lock = __MUTEX_INITIALIZER(drvdata->lock),
+ .rmem = rmem,
+ .misc = (struct miscdevice){
+ .parent = dev,
+@@ -150,6 +149,7 @@ static int __init open_dice_probe(struct platform_device *pdev)
+ .mode = 0600,
+ },
+ };
++ mutex_init(&drvdata->lock);
+
+ /* Index overflow check not needed, misc_register() will fail. */
+ snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index ed4d0ef5e5c319..af519088732d9a 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -71,6 +71,7 @@
+ #define PCI_DEVICE_ID_TI_AM654 0xb00c
+ #define PCI_DEVICE_ID_TI_J7200 0xb00f
+ #define PCI_DEVICE_ID_TI_AM64 0xb010
++#define PCI_DEVICE_ID_TI_J721S2 0xb013
+ #define PCI_DEVICE_ID_LS1088A 0x80c0
+ #define PCI_DEVICE_ID_IMX8 0x0808
+
+@@ -81,6 +82,7 @@
+ #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
+ #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
+ #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
++#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
+
+ static DEFINE_IDA(pci_endpoint_test_ida);
+
+@@ -990,6 +992,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
++ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
++ .driver_data = (kernel_ulong_t)&default_data,
++ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+@@ -999,6 +1004,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
++ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
++ .driver_data = (kernel_ulong_t)&j721e_data,
++ },
+ { }
+ };
+ MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
+diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
+index eb97167c03fb4b..9715798acce3de 100644
+--- a/drivers/misc/pvpanic/pvpanic-mmio.c
++++ b/drivers/misc/pvpanic/pvpanic-mmio.c
+@@ -24,52 +24,9 @@ MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
+ MODULE_DESCRIPTION("pvpanic-mmio device driver");
+ MODULE_LICENSE("GPL");
+
+-static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-
+- return sysfs_emit(buf, "%x\n", pi->capability);
+-}
+-static DEVICE_ATTR_RO(capability);
+-
+-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-
+- return sysfs_emit(buf, "%x\n", pi->events);
+-}
+-
+-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct pvpanic_instance *pi = dev_get_drvdata(dev);
+- unsigned int tmp;
+- int err;
+-
+- err = kstrtouint(buf, 16, &tmp);
+- if (err)
+- return err;
+-
+- if ((tmp & pi->capability) != tmp)
+- return -EINVAL;
+-
+- pi->events = tmp;
+-
+- return count;
+-}
+-static DEVICE_ATTR_RW(events);
+-
+-static struct attribute *pvpanic_mmio_dev_attrs[] = {
+- &dev_attr_capability.attr,
+- &dev_attr_events.attr,
+- NULL
+-};
+-ATTRIBUTE_GROUPS(pvpanic_mmio_dev);
+-
+ static int pvpanic_mmio_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+- struct pvpanic_instance *pi;
+ struct resource *res;
+ void __iomem *base;
+
+@@ -92,18 +49,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
+- pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL);
+- if (!pi)
+- return -ENOMEM;
+-
+- pi->base = base;
+- pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+-
+- /* initialize capability by RDPT */
+- pi->capability &= ioread8(base);
+- pi->events = pi->capability;
+-
+- return devm_pvpanic_probe(dev, pi);
++ return devm_pvpanic_probe(dev, base);
+ }
+
+ static const struct of_device_id pvpanic_mmio_match[] = {
+@@ -123,7 +69,7 @@ static struct platform_driver pvpanic_mmio_driver = {
+ .name = "pvpanic-mmio",
+ .of_match_table = pvpanic_mmio_match,
+ .acpi_match_table = pvpanic_device_ids,
+- .dev_groups = pvpanic_mmio_dev_groups,
++ .dev_groups = pvpanic_dev_groups,
+ },
+ .probe = pvpanic_mmio_probe,
+ };
+diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
+index 07eddb5ea30fa1..2494725dfacfad 100644
+--- a/drivers/misc/pvpanic/pvpanic-pci.c
++++ b/drivers/misc/pvpanic/pvpanic-pci.c
+@@ -22,51 +22,8 @@ MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
+ MODULE_DESCRIPTION("pvpanic device driver");
+ MODULE_LICENSE("GPL");
+
+-static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-
+- return sysfs_emit(buf, "%x\n", pi->capability);
+-}
+-static DEVICE_ATTR_RO(capability);
+-
+-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct pvpanic_instance *pi = dev_get_drvdata(dev);
+-
+- return sysfs_emit(buf, "%x\n", pi->events);
+-}
+-
+-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct pvpanic_instance *pi = dev_get_drvdata(dev);
+- unsigned int tmp;
+- int err;
+-
+- err = kstrtouint(buf, 16, &tmp);
+- if (err)
+- return err;
+-
+- if ((tmp & pi->capability) != tmp)
+- return -EINVAL;
+-
+- pi->events = tmp;
+-
+- return count;
+-}
+-static DEVICE_ATTR_RW(events);
+-
+-static struct attribute *pvpanic_pci_dev_attrs[] = {
+- &dev_attr_capability.attr,
+- &dev_attr_events.attr,
+- NULL
+-};
+-ATTRIBUTE_GROUPS(pvpanic_pci_dev);
+-
+ static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+- struct pvpanic_instance *pi;
+ void __iomem *base;
+ int ret;
+
+@@ -78,18 +35,7 @@ static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
+ if (!base)
+ return -ENOMEM;
+
+- pi = devm_kmalloc(&pdev->dev, sizeof(*pi), GFP_KERNEL);
+- if (!pi)
+- return -ENOMEM;
+-
+- pi->base = base;
+- pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
+-
+- /* initlize capability by RDPT */
+- pi->capability &= ioread8(base);
+- pi->events = pi->capability;
+-
+- return devm_pvpanic_probe(&pdev->dev, pi);
++ return devm_pvpanic_probe(&pdev->dev, base);
+ }
+
+ static const struct pci_device_id pvpanic_pci_id_tbl[] = {
+@@ -102,8 +48,6 @@ static struct pci_driver pvpanic_pci_driver = {
+ .name = "pvpanic-pci",
+ .id_table = pvpanic_pci_id_tbl,
+ .probe = pvpanic_pci_probe,
+- .driver = {
+- .dev_groups = pvpanic_pci_dev_groups,
+- },
++ .dev_groups = pvpanic_dev_groups,
+ };
+ module_pci_driver(pvpanic_pci_driver);
+diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
+index 049a1200634890..305b367e0ce346 100644
+--- a/drivers/misc/pvpanic/pvpanic.c
++++ b/drivers/misc/pvpanic/pvpanic.c
+@@ -7,6 +7,7 @@
+ * Copyright (C) 2021 Oracle.
+ */
+
++#include <linux/device.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/kexec.h>
+@@ -26,6 +27,13 @@ MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
+ MODULE_DESCRIPTION("pvpanic device driver");
+ MODULE_LICENSE("GPL");
+
++struct pvpanic_instance {
++ void __iomem *base;
++ unsigned int capability;
++ unsigned int events;
++ struct list_head list;
++};
++
+ static struct list_head pvpanic_list;
+ static spinlock_t pvpanic_lock;
+
+@@ -81,11 +89,75 @@ static void pvpanic_remove(void *param)
+ spin_unlock(&pvpanic_lock);
+ }
+
+-int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi)
++static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct pvpanic_instance *pi = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%x\n", pi->capability);
++}
++static DEVICE_ATTR_RO(capability);
++
++static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct pvpanic_instance *pi = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%x\n", pi->events);
++}
++
++static ssize_t events_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct pvpanic_instance *pi = dev_get_drvdata(dev);
++ unsigned int tmp;
++ int err;
++
++ err = kstrtouint(buf, 16, &tmp);
++ if (err)
++ return err;
++
++ if ((tmp & pi->capability) != tmp)
++ return -EINVAL;
++
++ pi->events = tmp;
++
++ return count;
++}
++static DEVICE_ATTR_RW(events);
++
++static struct attribute *pvpanic_dev_attrs[] = {
++ &dev_attr_capability.attr,
++ &dev_attr_events.attr,
++ NULL
++};
++
++static const struct attribute_group pvpanic_dev_group = {
++ .attrs = pvpanic_dev_attrs,
++};
++
++const struct attribute_group *pvpanic_dev_groups[] = {
++ &pvpanic_dev_group,
++ NULL
++};
++EXPORT_SYMBOL_GPL(pvpanic_dev_groups);
++
++int devm_pvpanic_probe(struct device *dev, void __iomem *base)
+ {
+- if (!pi || !pi->base)
++ struct pvpanic_instance *pi;
++
++ if (!base)
+ return -EINVAL;
+
++ pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL);
++ if (!pi)
++ return -ENOMEM;
++
++ pi->base = base;
++ pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
++
++ /* initlize capability by RDPT */
++ pi->capability &= ioread8(base);
++ pi->events = pi->capability;
++
+ spin_lock(&pvpanic_lock);
+ list_add(&pi->list, &pvpanic_list);
+ spin_unlock(&pvpanic_lock);
+diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h
+index 49354595175487..46ffb10438adf6 100644
+--- a/drivers/misc/pvpanic/pvpanic.h
++++ b/drivers/misc/pvpanic/pvpanic.h
+@@ -8,13 +8,7 @@
+ #ifndef PVPANIC_H_
+ #define PVPANIC_H_
+
+-struct pvpanic_instance {
+- void __iomem *base;
+- unsigned int capability;
+- unsigned int events;
+- struct list_head list;
+-};
+-
+-int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi);
++int devm_pvpanic_probe(struct device *dev, void __iomem *base);
++extern const struct attribute_group *pvpanic_dev_groups[];
+
+ #endif /* PVPANIC_H_ */
+diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
+index c1a134bd8ba7b7..b878431553abce 100644
+--- a/drivers/misc/ti-st/st_core.c
++++ b/drivers/misc/ti-st/st_core.c
+@@ -15,6 +15,7 @@
+ #include <linux/skbuff.h>
+
+ #include <linux/ti_wilink_st.h>
++#include <linux/netdevice.h>
+
+ /*
+ * function pointer pointing to either,
+@@ -429,7 +430,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ case ST_LL_AWAKE_TO_ASLEEP:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ case ST_LL_ASLEEP:
+ skb_queue_tail(&st_gdata->tx_waitq, skb);
+@@ -438,7 +439,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+ default:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ }
+
+@@ -492,7 +493,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ break;
+ }
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ }
+ /* if wake-up is set in another context- restart sending */
+diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
+index f50d22882476f9..a0ad1f3a69f7e9 100644
+--- a/drivers/misc/vmw_vmci/vmci_datagram.c
++++ b/drivers/misc/vmw_vmci/vmci_datagram.c
+@@ -234,7 +234,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
+
+ dg_info->in_dg_host_queue = true;
+ dg_info->entry = dst_entry;
+- memcpy(&dg_info->msg, dg, dg_size);
++ dg_info->msg = *dg;
++ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
+
+ INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ schedule_work(&dg_info->work);
+@@ -377,7 +378,8 @@ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
+
+ dg_info->in_dg_host_queue = false;
+ dg_info->entry = dst_entry;
+- memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
++ dg_info->msg = *dg;
++ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
+
+ INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ schedule_work(&dg_info->work);
+diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
+index 5d7ac07623c273..9a41ab65378de0 100644
+--- a/drivers/misc/vmw_vmci/vmci_event.c
++++ b/drivers/misc/vmw_vmci/vmci_event.c
+@@ -9,6 +9,7 @@
+ #include <linux/vmw_vmci_api.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+@@ -86,9 +87,12 @@ static void event_deliver(struct vmci_event_msg *event_msg)
+ {
+ struct vmci_subscription *cur;
+ struct list_head *subscriber_list;
++ u32 sanitized_event, max_vmci_event;
+
+ rcu_read_lock();
+- subscriber_list = &subscriber_array[event_msg->event_data.event];
++ max_vmci_event = ARRAY_SIZE(subscriber_array);
++ sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event);
++ subscriber_list = &subscriber_array[sanitized_event];
+ list_for_each_entry_rcu(cur, subscriber_list, node) {
+ cur->callback(cur->id, &event_msg->event_data,
+ cur->callback_data);
+diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
+index 4f8d962bb5b2a3..1300ccab3d21b1 100644
+--- a/drivers/misc/vmw_vmci/vmci_guest.c
++++ b/drivers/misc/vmw_vmci/vmci_guest.c
+@@ -625,7 +625,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ if (!vmci_dev) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for VMCI device\n");
+- return -ENOMEM;
++ error = -ENOMEM;
++ goto err_unmap_mmio_base;
+ }
+
+ vmci_dev->dev = &pdev->dev;
+@@ -642,7 +643,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ if (!vmci_dev->tx_buffer) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for datagram tx buffer\n");
+- return -ENOMEM;
++ error = -ENOMEM;
++ goto err_unmap_mmio_base;
+ }
+
+ vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
+@@ -893,6 +895,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ err_free_data_buffers:
+ vmci_free_dg_buffers(vmci_dev);
+
++err_unmap_mmio_base:
++ if (mmio_base != NULL)
++ pci_iounmap(pdev, mmio_base);
++
+ /* The rest are managed resources and will be freed by PCI core */
+ return error;
+ }
+diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
+index 692daa9eff3411..19c9d2cdd277bf 100644
+--- a/drivers/misc/vmw_vmci/vmci_resource.c
++++ b/drivers/misc/vmw_vmci/vmci_resource.c
+@@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource)
+ spin_lock(&vmci_resource_table.lock);
+
+ hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
+- if (vmci_handle_is_equal(r->handle, resource->handle)) {
++ if (vmci_handle_is_equal(r->handle, resource->handle) &&
++ resource->type == r->type) {
+ hlist_del_init_rcu(&r->node);
+ break;
+ }
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 3a8f27c3e310a5..3564a0f63c9c77 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -400,6 +400,10 @@ struct mmc_blk_ioc_data {
+ struct mmc_ioc_cmd ic;
+ unsigned char *buf;
+ u64 buf_bytes;
++ unsigned int flags;
++#define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */
++#define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */
++
+ struct mmc_rpmb_data *rpmb;
+ };
+
+@@ -409,7 +413,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+ struct mmc_blk_ioc_data *idata;
+ int err;
+
+- idata = kmalloc(sizeof(*idata), GFP_KERNEL);
++ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ err = -ENOMEM;
+ goto out;
+@@ -465,7 +469,7 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
+ }
+
+ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+- struct mmc_blk_ioc_data *idata)
++ struct mmc_blk_ioc_data **idatas, int i)
+ {
+ struct mmc_command cmd = {}, sbc = {};
+ struct mmc_data data = {};
+@@ -475,10 +479,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ unsigned int busy_timeout_ms;
+ int err;
+ unsigned int target_part;
++ struct mmc_blk_ioc_data *idata = idatas[i];
++ struct mmc_blk_ioc_data *prev_idata = NULL;
+
+ if (!card || !md || !idata)
+ return -EINVAL;
+
++ if (idata->flags & MMC_BLK_IOC_DROP)
++ return 0;
++
++ if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
++ prev_idata = idatas[i - 1];
++
+ /*
+ * The RPMB accesses comes in from the character device, so we
+ * need to target these explicitly. Else we just target the
+@@ -532,7 +544,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ return err;
+ }
+
+- if (idata->rpmb) {
++ if (idata->rpmb || prev_idata) {
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ /*
+ * We don't do any blockcount validation because the max size
+@@ -540,6 +552,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ * 'Reliable Write' bit here.
+ */
+ sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
++ if (prev_idata)
++ sbc.arg = prev_idata->ic.arg;
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ mrq.sbc = &sbc;
+ }
+@@ -557,6 +571,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ mmc_wait_for_req(card->host, &mrq);
+ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
+
++ if (prev_idata) {
++ memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp));
++ if (sbc.error) {
++ dev_err(mmc_dev(card->host), "%s: sbc error %d\n",
++ __func__, sbc.error);
++ return sbc.error;
++ }
++ }
++
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+@@ -851,9 +874,11 @@ static const struct block_device_operations mmc_bdops = {
+ static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ unsigned int part_type)
+ {
++ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
++ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ int ret = 0;
+
+- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
++ if ((part_type & mask) == rpmb) {
+ if (card->ext_csd.cmdq_en) {
+ ret = mmc_cmdq_disable(card);
+ if (ret)
+@@ -868,9 +893,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
+ static int mmc_blk_part_switch_post(struct mmc_card *card,
+ unsigned int part_type)
+ {
++ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
++ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ int ret = 0;
+
+- if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
++ if ((part_type & mask) == rpmb) {
+ mmc_retune_unpause(card->host);
+ if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ ret = mmc_cmdq_enable(card);
+@@ -1032,6 +1059,20 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+ md->reset_done &= ~type;
+ }
+
++static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq)
++{
++ struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data;
++ int i;
++
++ for (i = 1; i < mq_rq->ioc_count; i++) {
++ if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT &&
++ mmc_op_multi(idata[i]->ic.opcode)) {
++ idata[i - 1]->flags |= MMC_BLK_IOC_DROP;
++ idata[i]->flags |= MMC_BLK_IOC_SBC;
++ }
++ }
++}
++
+ /*
+ * The non-block commands come back from the block layer after it queued it and
+ * processed it with all other requests and then they get issued in this
+@@ -1059,11 +1100,14 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+ if (ret)
+ break;
+ }
++
++ mmc_blk_check_sbc(mq_rq);
++
+ fallthrough;
+ case MMC_DRV_OP_IOCTL_RPMB:
+ idata = mq_rq->drv_op_data;
+ for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
+- ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
++ ret = __mmc_blk_ioctl_cmd(card, md, idata, i);
+ if (ret)
+ break;
+ }
+@@ -1482,6 +1526,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
++ } else if (mq->in_recovery) {
++ blk_mq_requeue_request(req, true);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+@@ -2381,8 +2427,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+ }
+ ret = mmc_blk_cqe_issue_flush(mq, req);
+ break;
+- case REQ_OP_READ:
+ case REQ_OP_WRITE:
++ card->written_flag = true;
++ fallthrough;
++ case REQ_OP_READ:
+ if (host->cqe_enabled)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+@@ -3141,4 +3189,3 @@ module_exit(mmc_blk_exit);
+
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
+-
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index 4edf9057fa79d3..b7754a1b8d9788 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -280,4 +280,8 @@ static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
+ return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
+ }
+
++static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
++{
++ return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
++}
+ #endif
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 3d3e0ca5261481..a8c17b4cd73792 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -551,7 +551,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+- mmc_wait_for_cmd(host, &cmd, 0);
++ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
++
++ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+@@ -559,10 +561,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+- err = mmc_wait_for_cmd(host, &cmd, 0);
++ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
++ if (err)
++ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
++
+ mmc_retune_release(host);
+
+ return err;
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 096093f7be0063..cf396e8f34e986 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -119,13 +119,12 @@ void mmc_retune_enable(struct mmc_host *host)
+
+ /*
+ * Pause re-tuning for a small set of operations. The pause begins after the
+- * next command and after first doing re-tuning.
++ * next command.
+ */
+ void mmc_retune_pause(struct mmc_host *host)
+ {
+ if (!host->retune_paused) {
+ host->retune_paused = 1;
+- mmc_retune_needed(host);
+ mmc_retune_hold(host);
+ }
+ }
+@@ -692,6 +691,7 @@ EXPORT_SYMBOL(mmc_remove_host);
+ */
+ void mmc_free_host(struct mmc_host *host)
+ {
++ cancel_delayed_work_sync(&host->detect);
+ mmc_pwrseq_free(host);
+ put_device(&host->class_dev);
+ }
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 4a4bab9aa7263e..7e39017e440fb2 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
+ case 3: /* MMC v3.1 - v3.3 */
+ case 4: /* MMC v4 */
+ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
+- card->cid.oemid = UNSTUFF_BITS(resp, 104, 8);
++ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+@@ -1007,10 +1007,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
+ static unsigned ext_csd_bits[] = {
+ EXT_CSD_BUS_WIDTH_8,
+ EXT_CSD_BUS_WIDTH_4,
++ EXT_CSD_BUS_WIDTH_1,
+ };
+ static unsigned bus_widths[] = {
+ MMC_BUS_WIDTH_8,
+ MMC_BUS_WIDTH_4,
++ MMC_BUS_WIDTH_1,
+ };
+ struct mmc_host *host = card->host;
+ unsigned idx, bus_width = 0;
+@@ -1817,8 +1819,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+
+ if (err)
+ goto free_card;
+-
+- } else if (!mmc_card_hs400es(card)) {
++ } else if (mmc_card_hs400es(card)) {
++ if (host->ops->execute_hs400_tuning) {
++ err = host->ops->execute_hs400_tuning(host, card);
++ if (err)
++ goto free_card;
++ }
++ } else {
+ /* Select the desired bus width optionally */
+ err = mmc_select_bus_width(card);
+ if (err > 0 && mmc_card_hs(card)) {
+@@ -2081,13 +2088,17 @@ static int _mmc_flush_cache(struct mmc_host *host)
+ {
+ int err = 0;
+
++ if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
++ return 0;
++
+ if (_mmc_cache_enabled(host)) {
+ err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ CACHE_FLUSH_TIMEOUT_MS);
+ if (err)
+- pr_err("%s: cache flush error %d\n",
+- mmc_hostname(host), err);
++ pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
++ else
++ host->card->written_flag = false;
+ }
+
+ return err;
+diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
+index 0f6a563103fd23..d780880ddd14b4 100644
+--- a/drivers/mmc/core/mmc_test.c
++++ b/drivers/mmc/core/mmc_test.c
+@@ -3104,13 +3104,13 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+ test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
+ #ifdef CONFIG_HIGHMEM
+ test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
++ if (!test->highmem) {
++ count = -ENOMEM;
++ goto free_test_buffer;
++ }
+ #endif
+
+-#ifdef CONFIG_HIGHMEM
+- if (test->buffer && test->highmem) {
+-#else
+ if (test->buffer) {
+-#endif
+ mutex_lock(&mmc_test_lock);
+ mmc_test_run(test, testcase);
+ mutex_unlock(&mmc_test_lock);
+@@ -3118,6 +3118,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+
+ #ifdef CONFIG_HIGHMEM
+ __free_pages(test->highmem, BUFFER_ORDER);
++free_test_buffer:
+ #endif
+ kfree(test->buffer);
+ kfree(test);
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 32b64b564fb1fd..92905fc46436dd 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -15,6 +15,19 @@
+
+ #include "card.h"
+
++static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
++ /*
++ * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
++ * This has so far only been observed on cards from 11/2019, while new
++ * cards from 2023/05 do not exhibit this behavior.
++ */
++ _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
++ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
++ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
++
++ END_FIXUP
++};
++
+ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ #define INAND_CMD38_ARG_EXT_CSD 113
+ #define INAND_CMD38_ARG_ERASE 0x00
+@@ -53,15 +66,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+- /*
+- * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
+- * This has so far only been observed on cards from 11/2019, while new
+- * cards from 2023/05 do not exhibit this behavior.
+- */
+- _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
+- 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+- MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+-
+ /*
+ * Some SD cards lockup while using CMD23 multiblock transfers.
+ */
+@@ -110,11 +114,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
+- * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
+- * support being used to offload WRITE_ZEROES.
++ * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support
++ * WRITE_ZEROES offloading. It also supports caching, but the cache can
++ * only be flushed after a write has occurred.
+ */
+ MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+- MMC_QUIRK_TRIM_BROKEN),
++ MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH),
+
+ /*
+ * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index c3e554344c99f9..240469a881a272 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -26,6 +26,7 @@
+ #include "host.h"
+ #include "bus.h"
+ #include "mmc_ops.h"
++#include "quirks.h"
+ #include "sd.h"
+ #include "sd_ops.h"
+
+@@ -1475,6 +1476,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
+ goto free_card;
+ }
+
++ /* Apply quirks prior to card setup */
++ mmc_fixup_device(card, mmc_sd_fixups);
++
+ err = mmc_sd_setup_card(host, card, oldcard != NULL);
+ if (err)
+ goto free_card;
+diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
+index 2a2d949a9344ea..8791656e9e20d2 100644
+--- a/drivers/mmc/core/slot-gpio.c
++++ b/drivers/mmc/core/slot-gpio.c
+@@ -75,11 +75,15 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_irq);
+ int mmc_gpio_get_ro(struct mmc_host *host)
+ {
+ struct mmc_gpio *ctx = host->slot.handler_priv;
++ int cansleep;
+
+ if (!ctx || !ctx->ro_gpio)
+ return -ENOSYS;
+
+- return gpiod_get_value_cansleep(ctx->ro_gpio);
++ cansleep = gpiod_cansleep(ctx->ro_gpio);
++ return cansleep ?
++ gpiod_get_value_cansleep(ctx->ro_gpio) :
++ gpiod_get_value(ctx->ro_gpio);
+ }
+ EXPORT_SYMBOL(mmc_gpio_get_ro);
+
+@@ -217,6 +221,26 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ }
+ EXPORT_SYMBOL(mmc_gpiod_request_cd);
+
++/**
++ * mmc_gpiod_set_cd_config - set config for card-detection GPIO
++ * @host: mmc host
++ * @config: Generic pinconf config (from pinconf_to_config_packed())
++ *
++ * This can be used by mmc host drivers to fixup a card-detection GPIO's config
++ * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor
++ * through mmc_gpiod_request_cd().
++ *
++ * Returns:
++ * 0 on success, or a negative errno value on error.
++ */
++int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config)
++{
++ struct mmc_gpio *ctx = host->slot.handler_priv;
++
++ return gpiod_set_config(ctx->cd_gpio, config);
++}
++EXPORT_SYMBOL(mmc_gpiod_set_cd_config);
++
+ bool mmc_can_gpio_cd(struct mmc_host *host)
+ {
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 554e67103c1a1e..bc7e2ad3700215 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -1018,14 +1018,15 @@ config MMC_SDHCI_XENON
+
+ config MMC_SDHCI_OMAP
+ tristate "TI SDHCI Controller Support"
++ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM && OF
+ select THERMAL
+ imply TI_SOC_THERMAL
+ select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+- support present in TI's DRA7 SOCs. The controller supports
+- SD/MMC/SDIO devices.
++ support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller
++ supports SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+@@ -1033,14 +1034,15 @@ config MMC_SDHCI_OMAP
+
+ config MMC_SDHCI_AM654
+ tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
++ depends on ARCH_K3 || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM && OF
+ select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
+ select REGMAP_MMIO
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+- support present in TI's AM654 SOCs. The controller supports
+- SD/MMC/SDIO devices.
++ support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller
++ supports SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
+index b3d7d6d8d65485..fe7a4eac9595c0 100644
+--- a/drivers/mmc/host/cqhci-core.c
++++ b/drivers/mmc/host/cqhci-core.c
+@@ -612,7 +612,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+ mmc->cqe_on = true;
+ pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+- if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
++ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
+ pr_err("%s: cqhci: CQE failed to exit halt state\n",
+ mmc_hostname(mmc));
+ }
+@@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+ ret = cqhci_tasks_cleared(cq_host);
+
+ if (!ret)
+- pr_debug("%s: cqhci: Failed to clear tasks\n",
+- mmc_hostname(mmc));
++ pr_warn("%s: cqhci: Failed to clear tasks\n",
++ mmc_hostname(mmc));
+
+ return ret;
+ }
+@@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+ ret = cqhci_halted(cq_host);
+
+ if (!ret)
+- pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
++ pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+
+ return ret;
+ }
+@@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+ /*
+ * After halting we expect to be able to use the command line. We interpret the
+ * failure to halt to mean the data lines might still be in use (and the upper
+- * layers will need to send a STOP command), so we set the timeout based on a
+- * generous command timeout.
++ * layers will need to send a STOP command), however failing to halt complicates
++ * the recovery, so set a timeout that would reasonably allow I/O to complete.
+ */
+-#define CQHCI_START_HALT_TIMEOUT 5
++#define CQHCI_START_HALT_TIMEOUT 500
+
+ static void cqhci_recovery_start(struct mmc_host *mmc)
+ {
+@@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
+
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+- ok = false;
+-
+ /*
+ * The specification contradicts itself, by saying that tasks cannot be
+ * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ * be disabled/re-enabled, but not to disable before clearing tasks.
+ * Have a go anyway.
+ */
+- if (!ok) {
+- pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+- cqcfg &= ~CQHCI_ENABLE;
+- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+- cqcfg |= CQHCI_ENABLE;
+- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+- /* Be sure that there are no tasks */
+- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+- ok = false;
+- WARN_ON(!ok);
+- }
++ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
++ ok = false;
++
++ /* Disable to make sure tasks really are cleared */
++ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
++ cqcfg &= ~CQHCI_ENABLE;
++ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
++
++ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
++ cqcfg |= CQHCI_ENABLE;
++ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
++
++ cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
++
++ if (!ok)
++ cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
+
+ cqhci_recover_mrqs(cq_host);
+
+diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
+index ee3b1a4e08485b..8fa6796787f4b3 100644
+--- a/drivers/mmc/host/davinci_mmc.c
++++ b/drivers/mmc/host/davinci_mmc.c
+@@ -1344,7 +1344,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static void __exit davinci_mmcsd_remove(struct platform_device *pdev)
++static void davinci_mmcsd_remove(struct platform_device *pdev)
+ {
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+@@ -1399,7 +1399,7 @@ static struct platform_driver davinci_mmcsd_driver = {
+ .of_match_table = davinci_mmc_dt_ids,
+ },
+ .probe = davinci_mmcsd_probe,
+- .remove_new = __exit_p(davinci_mmcsd_remove),
++ .remove_new = davinci_mmcsd_remove,
+ .id_table = davinci_mmc_devtype,
+ };
+
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 829af2c98a4488..2f0bc79ef856a8 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2952,8 +2952,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
+ if (host->use_dma == TRANS_MODE_IDMAC) {
+ mmc->max_segs = host->ring_size;
+ mmc->max_blk_size = 65535;
+- mmc->max_seg_size = 0x1000;
+- mmc->max_req_size = mmc->max_seg_size * host->ring_size;
++ mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
++ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_blk_count = mmc->max_req_size / 512;
+ } else if (host->use_dma == TRANS_MODE_EDMAC) {
+ mmc->max_segs = 64;
+@@ -3294,6 +3294,10 @@ int dw_mci_probe(struct dw_mci *host)
+ host->biu_clk = devm_clk_get(host->dev, "biu");
+ if (IS_ERR(host->biu_clk)) {
+ dev_dbg(host->dev, "biu clock not available\n");
++ ret = PTR_ERR(host->biu_clk);
++ if (ret == -EPROBE_DEFER)
++ return ret;
++
+ } else {
+ ret = clk_prepare_enable(host->biu_clk);
+ if (ret) {
+@@ -3305,6 +3309,10 @@ int dw_mci_probe(struct dw_mci *host)
+ host->ciu_clk = devm_clk_get(host->dev, "ciu");
+ if (IS_ERR(host->ciu_clk)) {
+ dev_dbg(host->dev, "ciu clock not available\n");
++ ret = PTR_ERR(host->ciu_clk);
++ if (ret == -EPROBE_DEFER)
++ goto err_clk_biu;
++
+ host->bus_hz = host->pdata->bus_hz;
+ } else {
+ ret = clk_prepare_enable(host->ciu_clk);
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index 9837dab096e640..c7c067b9415a41 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -801,7 +801,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+
+ cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
+ cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
+- cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
+
+ meson_mmc_set_response_bits(cmd, &cmd_cfg);
+
+diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
+index 528ec8166e7c36..1ed9731e77ef59 100644
+--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
++++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
+@@ -269,7 +269,7 @@ static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc)
+ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
+ {
+ struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+- u32 rx_clk_phase;
++ u32 val, rx_clk_phase;
+ int ret;
+
+ meson_mx_sdhc_disable_clks(mmc);
+@@ -290,27 +290,11 @@ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
+ mmc->actual_clock = clk_get_rate(host->sd_clk);
+
+ /*
+- * according to Amlogic the following latching points are
+- * selected with empirical values, there is no (known) formula
+- * to calculate these.
++ * Phase 90 should work in most cases. For data transmission,
++ * meson_mx_sdhc_execute_tuning() will find a accurate value
+ */
+- if (mmc->actual_clock > 100000000) {
+- rx_clk_phase = 1;
+- } else if (mmc->actual_clock > 45000000) {
+- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+- rx_clk_phase = 15;
+- else
+- rx_clk_phase = 11;
+- } else if (mmc->actual_clock >= 25000000) {
+- rx_clk_phase = 15;
+- } else if (mmc->actual_clock > 5000000) {
+- rx_clk_phase = 23;
+- } else if (mmc->actual_clock > 1000000) {
+- rx_clk_phase = 55;
+- } else {
+- rx_clk_phase = 1061;
+- }
+-
++ regmap_read(host->regmap, MESON_SDHC_CLKC, &val);
++ rx_clk_phase = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val) / 4;
+ regmap_update_bits(host->regmap, MESON_SDHC_CLK2,
+ MESON_SDHC_CLK2_RX_CLK_PHASE,
+ FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE,
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index cc333ad67cac81..2a99ffb61f8c04 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -15,7 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/bio.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direction.h>
+ #include <linux/crc7.h>
+ #include <linux/crc-itu-t.h>
+ #include <linux/scatterlist.h>
+@@ -119,19 +119,14 @@ struct mmc_spi_host {
+ struct spi_transfer status;
+ struct spi_message readback;
+
+- /* underlying DMA-aware controller, or null */
+- struct device *dma_dev;
+-
+ /* buffer used for commands and for message "overhead" */
+ struct scratch *data;
+- dma_addr_t data_dma;
+
+ /* Specs say to write ones most of the time, even when the card
+ * has no need to read its input data; and many cards won't care.
+ * This is our source of those ones.
+ */
+ void *ones;
+- dma_addr_t ones_dma;
+ };
+
+
+@@ -147,11 +142,8 @@ static inline int mmc_cs_off(struct mmc_spi_host *host)
+ return spi_setup(host->spi);
+ }
+
+-static int
+-mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
++static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned int len)
+ {
+- int status;
+-
+ if (len > sizeof(*host->data)) {
+ WARN_ON(1);
+ return -EIO;
+@@ -159,19 +151,7 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
+
+ host->status.len = len;
+
+- if (host->dma_dev)
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*host->data),
+- DMA_FROM_DEVICE);
+-
+- status = spi_sync_locked(host->spi, &host->readback);
+-
+- if (host->dma_dev)
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*host->data),
+- DMA_FROM_DEVICE);
+-
+- return status;
++ return spi_sync_locked(host->spi, &host->readback);
+ }
+
+ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
+@@ -506,23 +486,11 @@ mmc_spi_command_send(struct mmc_spi_host *host,
+ t = &host->t;
+ memset(t, 0, sizeof(*t));
+ t->tx_buf = t->rx_buf = data->status;
+- t->tx_dma = t->rx_dma = host->data_dma;
+ t->len = cp - data->status;
+ t->cs_change = 1;
+ spi_message_add_tail(t, &host->m);
+
+- if (host->dma_dev) {
+- host->m.is_dma_mapped = 1;
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+- }
+ status = spi_sync_locked(host->spi, &host->m);
+-
+- if (host->dma_dev)
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+ if (status < 0) {
+ dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
+ cmd->error = status;
+@@ -540,9 +508,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
+ * We always provide TX data for data and CRC. The MMC/SD protocol
+ * requires us to write ones; but Linux defaults to writing zeroes;
+ * so we explicitly initialize it to all ones on RX paths.
+- *
+- * We also handle DMA mapping, so the underlying SPI controller does
+- * not need to (re)do it for each message.
+ */
+ static void
+ mmc_spi_setup_data_message(
+@@ -552,11 +517,8 @@ mmc_spi_setup_data_message(
+ {
+ struct spi_transfer *t;
+ struct scratch *scratch = host->data;
+- dma_addr_t dma = host->data_dma;
+
+ spi_message_init(&host->m);
+- if (dma)
+- host->m.is_dma_mapped = 1;
+
+ /* for reads, readblock() skips 0xff bytes before finding
+ * the token; for writes, this transfer issues that token.
+@@ -570,8 +532,6 @@ mmc_spi_setup_data_message(
+ else
+ scratch->data_token = SPI_TOKEN_SINGLE;
+ t->tx_buf = &scratch->data_token;
+- if (dma)
+- t->tx_dma = dma + offsetof(struct scratch, data_token);
+ spi_message_add_tail(t, &host->m);
+ }
+
+@@ -581,7 +541,6 @@ mmc_spi_setup_data_message(
+ t = &host->t;
+ memset(t, 0, sizeof(*t));
+ t->tx_buf = host->ones;
+- t->tx_dma = host->ones_dma;
+ /* length and actual buffer info are written later */
+ spi_message_add_tail(t, &host->m);
+
+@@ -591,14 +550,9 @@ mmc_spi_setup_data_message(
+ if (direction == DMA_TO_DEVICE) {
+ /* the actual CRC may get written later */
+ t->tx_buf = &scratch->crc_val;
+- if (dma)
+- t->tx_dma = dma + offsetof(struct scratch, crc_val);
+ } else {
+ t->tx_buf = host->ones;
+- t->tx_dma = host->ones_dma;
+ t->rx_buf = &scratch->crc_val;
+- if (dma)
+- t->rx_dma = dma + offsetof(struct scratch, crc_val);
+ }
+ spi_message_add_tail(t, &host->m);
+
+@@ -621,10 +575,7 @@ mmc_spi_setup_data_message(
+ memset(t, 0, sizeof(*t));
+ t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
+ t->tx_buf = host->ones;
+- t->tx_dma = host->ones_dma;
+ t->rx_buf = scratch->status;
+- if (dma)
+- t->rx_dma = dma + offsetof(struct scratch, status);
+ t->cs_change = 1;
+ spi_message_add_tail(t, &host->m);
+ }
+@@ -653,23 +604,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
+
+ if (host->mmc->use_spi_crc)
+ scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
+- if (host->dma_dev)
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+
+ status = spi_sync_locked(spi, &host->m);
+-
+ if (status != 0) {
+ dev_dbg(&spi->dev, "write error (%d)\n", status);
+ return status;
+ }
+
+- if (host->dma_dev)
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+-
+ /*
+ * Get the transmission data-response reply. It must follow
+ * immediately after the data block we transferred. This reply
+@@ -718,8 +659,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ }
+
+ t->tx_buf += t->len;
+- if (host->dma_dev)
+- t->tx_dma += t->len;
+
+ /* Return when not busy. If we didn't collect that status yet,
+ * we'll need some more I/O.
+@@ -783,30 +722,12 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ }
+ leftover = status << 1;
+
+- if (host->dma_dev) {
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+- dma_sync_single_for_device(host->dma_dev,
+- t->rx_dma, t->len,
+- DMA_FROM_DEVICE);
+- }
+-
+ status = spi_sync_locked(spi, &host->m);
+ if (status < 0) {
+ dev_dbg(&spi->dev, "read error %d\n", status);
+ return status;
+ }
+
+- if (host->dma_dev) {
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+- dma_sync_single_for_cpu(host->dma_dev,
+- t->rx_dma, t->len,
+- DMA_FROM_DEVICE);
+- }
+-
+ if (bitshift) {
+ /* Walk through the data and the crc and do
+ * all the magic to get byte-aligned data.
+@@ -841,8 +762,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ }
+
+ t->rx_buf += t->len;
+- if (host->dma_dev)
+- t->rx_dma += t->len;
+
+ return 0;
+ }
+@@ -857,7 +776,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ struct mmc_data *data, u32 blk_size)
+ {
+ struct spi_device *spi = host->spi;
+- struct device *dma_dev = host->dma_dev;
+ struct spi_transfer *t;
+ enum dma_data_direction direction = mmc_get_dma_dir(data);
+ struct scatterlist *sg;
+@@ -884,31 +802,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ */
+ for_each_sg(data->sg, sg, data->sg_len, n_sg) {
+ int status = 0;
+- dma_addr_t dma_addr = 0;
+ void *kmap_addr;
+ unsigned length = sg->length;
+- enum dma_data_direction dir = direction;
+-
+- /* set up dma mapping for controller drivers that might
+- * use DMA ... though they may fall back to PIO
+- */
+- if (dma_dev) {
+- /* never invalidate whole *shared* pages ... */
+- if ((sg->offset != 0 || length != PAGE_SIZE)
+- && dir == DMA_FROM_DEVICE)
+- dir = DMA_BIDIRECTIONAL;
+-
+- dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
+- PAGE_SIZE, dir);
+- if (dma_mapping_error(dma_dev, dma_addr)) {
+- data->error = -EFAULT;
+- break;
+- }
+- if (direction == DMA_TO_DEVICE)
+- t->tx_dma = dma_addr + sg->offset;
+- else
+- t->rx_dma = dma_addr + sg->offset;
+- }
+
+ /* allow pio too; we don't allow highmem */
+ kmap_addr = kmap(sg_page(sg));
+@@ -941,8 +836,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ if (direction == DMA_FROM_DEVICE)
+ flush_dcache_page(sg_page(sg));
+ kunmap(sg_page(sg));
+- if (dma_dev)
+- dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
+
+ if (status < 0) {
+ data->error = status;
+@@ -977,21 +870,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ scratch->status[0] = SPI_TOKEN_STOP_TRAN;
+
+ host->early_status.tx_buf = host->early_status.rx_buf;
+- host->early_status.tx_dma = host->early_status.rx_dma;
+ host->early_status.len = statlen;
+
+- if (host->dma_dev)
+- dma_sync_single_for_device(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+-
+ tmp = spi_sync_locked(spi, &host->m);
+-
+- if (host->dma_dev)
+- dma_sync_single_for_cpu(host->dma_dev,
+- host->data_dma, sizeof(*scratch),
+- DMA_BIDIRECTIONAL);
+-
+ if (tmp < 0) {
+ if (!data->error)
+ data->error = tmp;
+@@ -1265,52 +1146,6 @@ mmc_spi_detect_irq(int irq, void *mmc)
+ return IRQ_HANDLED;
+ }
+
+-#ifdef CONFIG_HAS_DMA
+-static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
+-{
+- struct spi_device *spi = host->spi;
+- struct device *dev;
+-
+- if (!spi->master->dev.parent->dma_mask)
+- return 0;
+-
+- dev = spi->master->dev.parent;
+-
+- host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(dev, host->ones_dma))
+- return -ENOMEM;
+-
+- host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+- if (dma_mapping_error(dev, host->data_dma)) {
+- dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+- DMA_TO_DEVICE);
+- return -ENOMEM;
+- }
+-
+- dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+-
+- host->dma_dev = dev;
+- return 0;
+-}
+-
+-static void mmc_spi_dma_free(struct mmc_spi_host *host)
+-{
+- if (!host->dma_dev)
+- return;
+-
+- dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+- DMA_TO_DEVICE);
+- dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
+- DMA_BIDIRECTIONAL);
+-}
+-#else
+-static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
+-static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
+-#endif
+-
+ static int mmc_spi_probe(struct spi_device *spi)
+ {
+ void *ones;
+@@ -1402,24 +1237,17 @@ static int mmc_spi_probe(struct spi_device *spi)
+ host->powerup_msecs = 250;
+ }
+
+- /* preallocate dma buffers */
++ /* Preallocate buffers */
+ host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
+ if (!host->data)
+ goto fail_nobuf1;
+
+- status = mmc_spi_dma_alloc(host);
+- if (status)
+- goto fail_dma;
+-
+ /* setup message for status/busy readback */
+ spi_message_init(&host->readback);
+- host->readback.is_dma_mapped = (host->dma_dev != NULL);
+
+ spi_message_add_tail(&host->status, &host->readback);
+ host->status.tx_buf = host->ones;
+- host->status.tx_dma = host->ones_dma;
+ host->status.rx_buf = &host->data->status;
+- host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
+ host->status.cs_change = 1;
+
+ /* register card detect irq */
+@@ -1464,9 +1292,8 @@ static int mmc_spi_probe(struct spi_device *spi)
+ if (!status)
+ has_ro = true;
+
+- dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
++ dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n",
+ dev_name(&mmc->class_dev),
+- host->dma_dev ? "" : ", no DMA",
+ has_ro ? "" : ", no WP",
+ (host->pdata && host->pdata->setpower)
+ ? "" : ", no poweroff",
+@@ -1477,8 +1304,6 @@ static int mmc_spi_probe(struct spi_device *spi)
+ fail_gpiod_request:
+ mmc_remove_host(mmc);
+ fail_glue_init:
+- mmc_spi_dma_free(host);
+-fail_dma:
+ kfree(host->data);
+ fail_nobuf1:
+ mmc_spi_put_pdata(spi);
+@@ -1500,7 +1325,6 @@ static void mmc_spi_remove(struct spi_device *spi)
+
+ mmc_remove_host(mmc);
+
+- mmc_spi_dma_free(host);
+ kfree(host->data);
+ kfree(host->ones);
+
+diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
+index 35067e1e6cd801..f5da7f9baa52d4 100644
+--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
++++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
+@@ -225,6 +225,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+ struct scatterlist *sg;
+ int i;
+
++ host->dma_in_progress = true;
++
+ if (!host->variant->dma_lli || data->sg_len == 1 ||
+ idma->use_bounce_buffer) {
+ u32 dma_addr;
+@@ -263,9 +265,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+ return 0;
+ }
+
++static void sdmmc_idma_error(struct mmci_host *host)
++{
++ struct mmc_data *data = host->data;
++ struct sdmmc_idma *idma = host->dma_priv;
++
++ if (!dma_inprogress(host))
++ return;
++
++ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
++ host->dma_in_progress = false;
++ data->host_cookie = 0;
++
++ if (!idma->use_bounce_buffer)
++ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++ mmc_get_dma_dir(data));
++}
++
+ static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
+ {
++ if (!dma_inprogress(host))
++ return;
++
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
++ host->dma_in_progress = false;
+
+ if (!data->host_cookie)
+ sdmmc_idma_unprep_data(host, data, 0);
+@@ -676,6 +699,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
+ .dma_setup = sdmmc_idma_setup,
+ .dma_start = sdmmc_idma_start,
+ .dma_finalize = sdmmc_idma_finalize,
++ .dma_error = sdmmc_idma_error,
+ .set_clkreg = mmci_sdmmc_set_clkreg,
+ .set_pwrreg = mmci_sdmmc_set_pwrreg,
+ .busy_complete = sdmmc_busy_complete,
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 97f7c3d4be6ea9..8b755f16273258 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -1222,7 +1222,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
+ }
+
+ if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
+- if (events & MSDC_INT_CMDTMO ||
++ if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) ||
+ (!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning))
+ /*
+ * should not clear fifo/interrupt as the tune data
+@@ -1315,9 +1315,9 @@ static void msdc_start_command(struct msdc_host *host,
+ static void msdc_cmd_next(struct msdc_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+ {
+- if ((cmd->error &&
+- !(cmd->error == -EILSEQ &&
+- (mmc_op_tuning(cmd->opcode) || host->hs400_tuning))) ||
++ if ((cmd->error && !host->hs400_tuning &&
++ !(cmd->error == -EILSEQ &&
++ mmc_op_tuning(cmd->opcode))) ||
+ (mrq->sbc && mrq->sbc->error))
+ msdc_request_done(host, mrq);
+ else if (cmd == mrq->sbc)
+diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
+index 9fb8995b43a1c8..13fa8588e38c10 100644
+--- a/drivers/mmc/host/omap.c
++++ b/drivers/mmc/host/omap.c
+@@ -1119,10 +1119,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
+
+ host = slot->host;
+
+- if (slot->vsd)
+- gpiod_set_value(slot->vsd, power_on);
+- if (slot->vio)
+- gpiod_set_value(slot->vio, power_on);
++ if (power_on) {
++ if (slot->vsd) {
++ gpiod_set_value(slot->vsd, power_on);
++ msleep(1);
++ }
++ if (slot->vio) {
++ gpiod_set_value(slot->vio, power_on);
++ msleep(1);
++ }
++ } else {
++ if (slot->vio) {
++ gpiod_set_value(slot->vio, power_on);
++ msleep(50);
++ }
++ if (slot->vsd) {
++ gpiod_set_value(slot->vsd, power_on);
++ msleep(50);
++ }
++ }
+
+ if (slot->pdata->set_power != NULL)
+ slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
+@@ -1259,18 +1274,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+ slot->pdata = &host->pdata->slots[id];
+
+ /* Check for some optional GPIO controls */
+- slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
+- id, GPIOD_OUT_LOW);
++ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
++ id, GPIOD_OUT_LOW);
+ if (IS_ERR(slot->vsd))
+ return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
+ "error looking up VSD GPIO\n");
+- slot->vio = gpiod_get_index_optional(host->dev, "vio",
+- id, GPIOD_OUT_LOW);
++ slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
++ id, GPIOD_OUT_LOW);
+ if (IS_ERR(slot->vio))
+ return dev_err_probe(host->dev, PTR_ERR(slot->vio),
+ "error looking up VIO GPIO\n");
+- slot->cover = gpiod_get_index_optional(host->dev, "cover",
+- id, GPIOD_IN);
++ slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
++ id, GPIOD_IN);
+ if (IS_ERR(slot->cover))
+ return dev_err_probe(host->dev, PTR_ERR(slot->cover),
+ "error looking up cover switch GPIO\n");
+@@ -1384,13 +1399,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
+ if (IS_ERR(host->virt_base))
+ return PTR_ERR(host->virt_base);
+
+- host->slot_switch = gpiod_get_optional(host->dev, "switch",
+- GPIOD_OUT_LOW);
+- if (IS_ERR(host->slot_switch))
+- return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+- "error looking up slot switch GPIO\n");
+-
+-
+ INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
+ INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
+
+@@ -1409,6 +1417,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
+ host->dev = &pdev->dev;
+ platform_set_drvdata(pdev, host);
+
++ host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
++ GPIOD_OUT_LOW);
++ if (IS_ERR(host->slot_switch))
++ return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
++ "error looking up slot switch GPIO\n");
++
+ host->id = pdev->id;
+ host->irq = irq;
+ host->phys_base = res->start;
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index acf5fc3ad7e415..eb8f427f9770d5 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -10,6 +10,7 @@
+ #include <linux/export.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
++#include <linux/pinctrl/pinconf-generic.h>
+ #include <linux/platform_device.h>
+ #include <linux/ioport.h>
+ #include <linux/io.h>
+@@ -80,6 +81,8 @@ struct sdhci_acpi_host {
+ enum {
+ DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0),
+ DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1),
++ DMI_QUIRK_SD_CD_ACTIVE_HIGH = BIT(2),
++ DMI_QUIRK_SD_CD_ENABLE_PULL_UP = BIT(3),
+ };
+
+ static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
+@@ -719,7 +722,28 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+
++/* Please keep this list sorted alphabetically */
+ static const struct dmi_system_id sdhci_acpi_quirks[] = {
++ {
++ /*
++ * The Acer Aspire Switch 10 (SW5-012) microSD slot always
++ * reports the card being write-protected even though microSD
++ * cards do not have a write-protect switch at all.
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
++ },
++ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++ },
++ {
++ /* Asus T100TA, needs pull-up for cd but DSDT GpioInt has NoPull set */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
++ },
++ .driver_data = (void *)DMI_QUIRK_SD_CD_ENABLE_PULL_UP,
++ },
+ {
+ /*
+ * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
+@@ -736,15 +760,23 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
+ },
+ {
+ /*
+- * The Acer Aspire Switch 10 (SW5-012) microSD slot always
+- * reports the card being write-protected even though microSD
+- * cards do not have a write-protect switch at all.
++ * Lenovo Yoga Tablet 2 Pro 1380F/L (13" Android version) this
++ * has broken WP reporting and an inverted CD signal.
++ * Note this has more or less the same BIOS as the Lenovo Yoga
++ * Tablet 2 830F/L or 1050F/L (8" and 10" Android), but unlike
++ * the 830 / 1050 models which share the same mainboard this
++ * model has a different mainboard and the inverted CD and
++ * broken WP are unique to this board.
+ */
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
++ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
++ /* Full match so as to NOT match the 830/1050 BIOS */
++ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"),
+ },
+- .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++ .driver_data = (void *)(DMI_QUIRK_SD_NO_WRITE_PROTECT |
++ DMI_QUIRK_SD_CD_ACTIVE_HIGH),
+ },
+ {
+ /*
+@@ -757,6 +789,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
++ {
++ /*
++ * The Toshiba WT10-A's microSD slot always reports the card being
++ * write-protected.
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A"),
++ },
++ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++ },
+ {} /* Terminating entry */
+ };
+
+@@ -866,12 +909,18 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
+ bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
+
++ if (quirks & DMI_QUIRK_SD_CD_ACTIVE_HIGH)
++ host->mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
++
+ err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ goto err_free;
+ dev_warn(dev, "failed to setup card detect gpio\n");
+ c->use_runtime_pm = false;
++ } else if (quirks & DMI_QUIRK_SD_CD_ENABLE_PULL_UP) {
++ mmc_gpiod_set_cd_config(host->mmc,
++ PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 20000));
+ }
+
+ if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index c23251bb95f38f..25664cd5e90f4e 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -23,6 +23,7 @@
+ #define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
+ #define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
+ #define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2)
++#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY BIT(4)
+
+ #define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
+ #define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1)
+@@ -325,6 +326,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+ if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
++ if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY))
++ host->mmc_host_ops.card_busy = NULL;
++
+ /* Change the base clock frequency if the DT property exists */
+ if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->base_freq_hz) != 0)
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 668e0aceeebac9..e113b99a3eab59 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
++ unsigned long flags;
++
++ spin_lock_irqsave(&host->lock, flags);
++ host->runtime_suspended = true;
++ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Drop the performance vote */
+ dev_pm_opp_set_rate(dev, 0);
+@@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
++ unsigned long flags;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+@@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
+
+ dev_pm_opp_set_rate(dev, msm_host->clk_rate);
+
+- return sdhci_msm_ice_resume(msm_host);
++ ret = sdhci_msm_ice_resume(msm_host);
++ if (ret)
++ return ret;
++
++ spin_lock_irqsave(&host->lock, flags);
++ host->runtime_suspended = false;
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ return ret;
+ }
+
+ static const struct dev_pm_ops sdhci_msm_pm_ops = {
+diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
+index 42d54532cabe6c..8379a0620c8fed 100644
+--- a/drivers/mmc/host/sdhci-of-aspeed.c
++++ b/drivers/mmc/host/sdhci-of-aspeed.c
+@@ -510,6 +510,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = {
+ { .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, },
+ { }
+ };
++MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match);
+
+ static struct platform_driver aspeed_sdhci_driver = {
+ .driver = {
+diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
+index 3a3bae6948a899..a0524127ca073d 100644
+--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
++++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
+@@ -584,6 +584,17 @@ static int dwcmshc_probe(struct platform_device *pdev)
+ return err;
+ }
+
++static void dwcmshc_disable_card_clk(struct sdhci_host *host)
++{
++ u16 ctrl;
++
++ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++ if (ctrl & SDHCI_CLOCK_CARD_EN) {
++ ctrl &= ~SDHCI_CLOCK_CARD_EN;
++ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
++ }
++}
++
+ static void dwcmshc_remove(struct platform_device *pdev)
+ {
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+@@ -591,8 +602,14 @@ static void dwcmshc_remove(struct platform_device *pdev)
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ struct rk35xx_priv *rk_priv = priv->priv;
+
++ pm_runtime_get_sync(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
++ pm_runtime_put_noidle(&pdev->dev);
++
+ sdhci_remove_host(host, 0);
+
++ dwcmshc_disable_card_clk(host);
++
+ clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(priv->bus_clk);
+ if (rk_priv)
+@@ -684,17 +701,6 @@ static void dwcmshc_enable_card_clk(struct sdhci_host *host)
+ }
+ }
+
+-static void dwcmshc_disable_card_clk(struct sdhci_host *host)
+-{
+- u16 ctrl;
+-
+- ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+- if (ctrl & SDHCI_CLOCK_CARD_EN) {
+- ctrl &= ~SDHCI_CLOCK_CARD_EN;
+- sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+- }
+-}
+-
+ static int dwcmshc_runtime_suspend(struct device *dev)
+ {
+ struct sdhci_host *host = dev_get_drvdata(dev);
+diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
+index 1e0bc7bace1b08..0a26831b3b67d4 100644
+--- a/drivers/mmc/host/sdhci-omap.c
++++ b/drivers/mmc/host/sdhci-omap.c
+@@ -1439,6 +1439,9 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
++ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
++ mmc_retune_needed(host->mmc);
++
+ if (omap_host->con != -EINVAL)
+ sdhci_runtime_suspend_host(host);
+
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 7c14feb5db7702..7039af2680ffd3 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -1325,7 +1325,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
+
+ ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
+ if (ret)
+- return ret;
++ goto fail;
+
+ /*
+ * Turn PMOS on [bit 0], set over current detection to 2.4 V
+@@ -1336,7 +1336,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
+ else
+ scratch &= ~0x47;
+
+- return pci_write_config_byte(chip->pdev, 0xAE, scratch);
++ ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
++
++fail:
++ return pcibios_err_to_errno(ret);
+ }
+
+ static int jmicron_probe(struct sdhci_pci_chip *chip)
+@@ -2201,7 +2204,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
+
+ ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
+ if (ret)
+- return ret;
++ return pcibios_err_to_errno(ret);
+
+ slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
+ dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
+@@ -2210,7 +2213,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
+
+ ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
+ if (ret)
+- return ret;
++ return pcibios_err_to_errno(ret);
+
+ first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
+
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 109d4b010f9786..77911a57b12cfc 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -25,6 +25,12 @@
+ #define GLI_9750_WT_EN_ON 0x1
+ #define GLI_9750_WT_EN_OFF 0x0
+
++#define PCI_GLI_9750_PM_CTRL 0xFC
++#define PCI_GLI_9750_PM_STATE GENMASK(1, 0)
++
++#define PCI_GLI_9750_CORRERR_MASK 0x214
++#define PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
++
+ #define SDHCI_GLI_9750_CFG2 0x848
+ #define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24)
+ #define GLI_9750_CFG2_L1DLY_VALUE 0x1F
+@@ -149,6 +155,9 @@
+ #define PCI_GLI_9755_PM_CTRL 0xFC
+ #define PCI_GLI_9755_PM_STATE GENMASK(1, 0)
+
++#define PCI_GLI_9755_CORRERR_MASK 0x214
++#define PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
++
+ #define SDHCI_GLI_9767_GM_BURST_SIZE 0x510
+ #define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
+
+@@ -536,8 +545,12 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
+
+ static void gl9750_hw_setting(struct sdhci_host *host)
+ {
++ struct sdhci_pci_slot *slot = sdhci_priv(host);
++ struct pci_dev *pdev;
+ u32 value;
+
++ pdev = slot->chip->pdev;
++
+ gl9750_wt_on(host);
+
+ value = sdhci_readl(host, SDHCI_GLI_9750_CFG2);
+@@ -547,6 +560,18 @@ static void gl9750_hw_setting(struct sdhci_host *host)
+ GLI_9750_CFG2_L1DLY_VALUE);
+ sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
+
++ /* toggle PM state to allow GL9750 to enter ASPM L1.2 */
++ pci_read_config_dword(pdev, PCI_GLI_9750_PM_CTRL, &value);
++ value |= PCI_GLI_9750_PM_STATE;
++ pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++ value &= ~PCI_GLI_9750_PM_STATE;
++ pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++
++ /* mask the replay timer timeout of AER */
++ pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value);
++ value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++ pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value);
++
+ gl9750_wt_off(host);
+ }
+
+@@ -756,6 +781,11 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
+ value &= ~PCI_GLI_9755_PM_STATE;
+ pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
+
++ /* mask the replay timer timeout of AER */
++ pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value);
++ value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++ pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value);
++
+ gl9755_wt_off(pdev);
+ }
+
+@@ -1159,6 +1189,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
+ sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
+ }
+
++static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
++ bool enable)
++{
++ struct pci_dev *pdev = slot->chip->pdev;
++ u32 value;
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
++ value &= ~GLI_9763E_VHS_REV;
++ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
++
++ if (enable)
++ value &= ~GLI_9763E_CFG_LPSN_DIS;
++ else
++ value |= GLI_9763E_CFG_LPSN_DIS;
++
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
++ value &= ~GLI_9763E_VHS_REV;
++ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
++}
++
+ static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
+ unsigned int timing)
+ {
+@@ -1267,6 +1323,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
+ if (ret)
+ goto cleanup;
+
++ /* Disable LPM negotiation to avoid entering L1 state. */
++ gl9763e_set_low_power_negotiation(slot, false);
++
+ return 0;
+
+ cleanup:
+@@ -1310,31 +1369,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+ }
+
+ #ifdef CONFIG_PM
+-static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
+-{
+- struct pci_dev *pdev = slot->chip->pdev;
+- u32 value;
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+- value &= ~GLI_9763E_VHS_REV;
+- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+-
+- if (enable)
+- value &= ~GLI_9763E_CFG_LPSN_DIS;
+- else
+- value |= GLI_9763E_CFG_LPSN_DIS;
+-
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+- value &= ~GLI_9763E_VHS_REV;
+- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+-}
+-
+ static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
+ {
+ struct sdhci_pci_slot *slot = chip->slots[0];
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index 7bfee28116af12..058bef1c7e4190 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -693,6 +693,35 @@ static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *io
+ return 0;
+ }
+
++static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
++{
++ struct sdhci_pci_chip *chip;
++ struct sdhci_pci_slot *slot = sdhci_priv(host);
++ u32 scratch_32 = 0;
++ u8 scratch_8 = 0;
++
++ chip = slot->chip;
++
++ if (mode == MMC_POWER_OFF) {
++ /* UnLock WP */
++ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
++ scratch_8 &= 0x7f;
++ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
++
++ /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */
++ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
++ scratch_32 &= ~(O2_SD_SEL_DLL);
++ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
++
++ /* Lock WP */
++ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
++ scratch_8 |= 0x80;
++ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
++ }
++
++ sdhci_set_power(host, mode, vdd);
++}
++
+ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ struct sdhci_pci_chip *chip;
+@@ -794,7 +823,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+@@ -805,7 +834,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_CLKREQ, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch |= 0x20;
+ pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+@@ -814,7 +843,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch |= 0x01;
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+@@ -827,7 +856,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_INF_MOD, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch |= 0x08;
+ pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+@@ -835,7 +864,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+@@ -846,7 +875,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+@@ -857,7 +886,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ O2_SD_FUNC_REG0,
+ &scratch_32);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
+
+ /* Check Whether subId is 0x11 or 0x12 */
+@@ -869,7 +898,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ O2_SD_FUNC_REG4,
+ &scratch_32);
+ if (ret)
+- return ret;
++ goto read_fail;
+
+ /* Enable Base Clk setting change */
+ scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
+@@ -892,7 +921,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CLK_SETTING, &scratch_32);
+ if (ret)
+- return ret;
++ goto read_fail;
+
+ scratch_32 &= ~(0xFF00);
+ scratch_32 |= 0x07E0C800;
+@@ -902,14 +931,14 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CLKREQ, &scratch_32);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch_32 |= 0x3;
+ pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+ if (ret)
+- return ret;
++ goto read_fail;
+
+ scratch_32 &= ~(0x1F3F070E);
+ scratch_32 |= 0x18270106;
+@@ -920,7 +949,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CAP_REG2, &scratch_32);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch_32 &= ~(0xE0);
+ pci_write_config_dword(chip->pdev,
+ O2_SD_CAP_REG2, scratch_32);
+@@ -932,7 +961,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+@@ -942,7 +971,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+@@ -950,7 +979,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+ if (ret)
+- return ret;
++ goto read_fail;
+
+ if ((scratch_32 & 0xff000000) == 0x01000000) {
+ scratch_32 &= 0x0000FFFF;
+@@ -969,7 +998,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ O2_SD_FUNC_REG4,
+ &scratch_32);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch_32 |= (1 << 22);
+ pci_write_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4, scratch_32);
+@@ -988,7 +1017,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+@@ -999,7 +1028,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+@@ -1028,13 +1057,16 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+- return ret;
++ goto read_fail;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ }
+
+ return 0;
++
++read_fail:
++ return pcibios_err_to_errno(ret);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+@@ -1051,6 +1083,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
++ .set_power = sdhci_pci_o2_set_power,
+ };
+
+ const struct sdhci_pci_fixes sdhci_o2 = {
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index 6b84ba27e6ab0d..bed57a1c64b522 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -239,15 +239,19 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
+ div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+ sdhci_enable_clk(host, div);
+
++ val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
++ mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+ /* Enable CLK_AUTO when the clock is greater than 400K. */
+ if (clk > 400000) {
+- val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
+- mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
+- SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+ if (mask != (val & mask)) {
+ val |= mask;
+ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ }
++ } else {
++ if (val & mask) {
++ val &= ~mask;
++ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
++ }
+ }
+ }
+
+@@ -416,12 +420,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
+ mmc_request_done(host->mmc, mrq);
+ }
+
++static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
++ unsigned short vdd)
++{
++ struct mmc_host *mmc = host->mmc;
++
++ switch (mode) {
++ case MMC_POWER_OFF:
++ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
++
++ mmc_regulator_disable_vqmmc(mmc);
++ break;
++ case MMC_POWER_ON:
++ mmc_regulator_enable_vqmmc(mmc);
++ break;
++ case MMC_POWER_UP:
++ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
++ break;
++ }
++}
++
+ static struct sdhci_ops sdhci_sprd_ops = {
+ .read_l = sdhci_sprd_readl,
+ .write_l = sdhci_sprd_writel,
+ .write_w = sdhci_sprd_writew,
+ .write_b = sdhci_sprd_writeb,
+ .set_clock = sdhci_sprd_set_clock,
++ .set_power = sdhci_sprd_set_power,
+ .get_max_clock = sdhci_sprd_get_max_clock,
+ .get_min_clock = sdhci_sprd_get_min_clock,
+ .set_bus_width = sdhci_set_bus_width,
+@@ -823,6 +848,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_DDR50);
+
++ ret = mmc_regulator_get_supply(host->mmc);
++ if (ret)
++ goto pm_runtime_disable;
++
+ ret = sdhci_setup_host(host);
+ if (ret)
+ goto pm_runtime_disable;
+diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
+index 8cf3a375de659a..cc9d28b75eb911 100644
+--- a/drivers/mmc/host/sdhci-xenon-phy.c
++++ b/drivers/mmc/host/sdhci-xenon-phy.c
+@@ -11,6 +11,7 @@
+ #include <linux/slab.h>
+ #include <linux/delay.h>
+ #include <linux/ktime.h>
++#include <linux/iopoll.h>
+ #include <linux/of_address.h>
+
+ #include "sdhci-pltfm.h"
+@@ -109,6 +110,8 @@
+ #define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
+ #define XENON_LOGIC_TIMING_VALUE 0x00AA8977
+
++#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
++
+ /*
+ * List offset of PHY registers and some special register values
+ * in eMMC PHY 5.0 or eMMC PHY 5.1
+@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
+ return 0;
+ }
+
++static int xenon_check_stability_internal_clk(struct sdhci_host *host)
++{
++ u32 reg;
++ int err;
++
++ err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
++ 1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
++ if (err)
++ dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
++
++ return err;
++}
++
+ /*
+ * eMMC 5.0/5.1 PHY init/re-init.
+ * eMMC PHY init should be executed after:
+@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
+ struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+
++ int ret = xenon_check_stability_internal_clk(host);
++
++ if (ret)
++ return ret;
++
+ reg = sdhci_readl(host, phy_regs->timing_adj);
+ reg |= XENON_PHY_INITIALIZAION;
+ sdhci_writel(host, reg, phy_regs->timing_adj);
+@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
+ /* get the wait time */
+ wait /= clock;
+ wait++;
+- /* wait for host eMMC PHY init completes */
+- udelay(wait);
+
+- reg = sdhci_readl(host, phy_regs->timing_adj);
+- reg &= XENON_PHY_INITIALIZAION;
+- if (reg) {
++ /*
++ * AC5X spec says bit must be polled until zero.
++ * We see cases in which timeout can take longer
++ * than the standard calculation on AC5X, which is
++ * expected following the spec comment above.
++ * According to the spec, we must wait as long as
++ * it takes for that bit to toggle on AC5X.
++ * Cap that with 100 delay loops so we won't get
++ * stuck here forever:
++ */
++
++ ret = read_poll_timeout(sdhci_readl, reg,
++ !(reg & XENON_PHY_INITIALIZAION),
++ wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
++ false, host, phy_regs->timing_adj);
++ if (ret)
+ dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
+- wait);
+- return -ETIMEDOUT;
+- }
++ wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
+
+- return 0;
++ return ret;
+ }
+
+ #define ARMADA_3700_SOC_PAD_1_8V 0x1
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index ff41aa56564eaa..9796a3cb3ca62c 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2515,26 +2515,29 @@ EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
+
+ static int sdhci_check_ro(struct sdhci_host *host)
+ {
+- unsigned long flags;
++ bool allow_invert = false;
+ int is_readonly;
+
+- spin_lock_irqsave(&host->lock, flags);
+-
+- if (host->flags & SDHCI_DEVICE_DEAD)
++ if (host->flags & SDHCI_DEVICE_DEAD) {
+ is_readonly = 0;
+- else if (host->ops->get_ro)
++ } else if (host->ops->get_ro) {
+ is_readonly = host->ops->get_ro(host);
+- else if (mmc_can_gpio_ro(host->mmc))
++ } else if (mmc_can_gpio_ro(host->mmc)) {
+ is_readonly = mmc_gpio_get_ro(host->mmc);
+- else
++ /* Do not invert twice */
++ allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
++ } else {
+ is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
+ & SDHCI_WRITE_PROTECT);
++ allow_invert = true;
++ }
+
+- spin_unlock_irqrestore(&host->lock, flags);
++ if (is_readonly >= 0 &&
++ allow_invert &&
++ (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
++ is_readonly = !is_readonly;
+
+- /* This quirk needs to be replaced by a callback-function later */
+- return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
+- !is_readonly : is_readonly;
++ return is_readonly;
+ }
+
+ #define SAMPLE_COUNT 5
+@@ -3438,12 +3441,18 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ host->data->error = -EILSEQ;
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+- } else if ((intmask & SDHCI_INT_DATA_CRC) &&
++ } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) &&
+ SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+ != MMC_BUS_TEST_R) {
+ host->data->error = -EILSEQ;
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
++ if (intmask & SDHCI_INT_TUNING_ERROR) {
++ u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++
++ ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
++ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
++ }
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
+ intmask);
+@@ -3978,7 +3987,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
+ } else
+ *cmd_error = 0;
+
+- if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
++ if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) {
+ *data_error = -EILSEQ;
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ sdhci_err_stats_inc(host, DAT_CRC);
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index f219bdea8f280d..a315cee698094f 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -158,6 +158,7 @@
+ #define SDHCI_INT_BUS_POWER 0x00800000
+ #define SDHCI_INT_AUTO_CMD_ERR 0x01000000
+ #define SDHCI_INT_ADMA_ERROR 0x02000000
++#define SDHCI_INT_TUNING_ERROR 0x04000000
+
+ #define SDHCI_INT_NORMAL_MASK 0x00007FFF
+ #define SDHCI_INT_ERROR_MASK 0xFFFF8000
+@@ -169,7 +170,7 @@
+ SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
+ SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
+ SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
+- SDHCI_INT_BLK_GAP)
++ SDHCI_INT_BLK_GAP | SDHCI_INT_TUNING_ERROR)
+ #define SDHCI_INT_ALL_MASK ((unsigned int)-1)
+
+ #define SDHCI_CQE_INT_ERR_MASK ( \
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index c125485ba80e9c..562034af653ebb 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -141,19 +141,26 @@ static const struct timing_data td[] = {
+
+ struct sdhci_am654_data {
+ struct regmap *base;
+- bool legacy_otapdly;
+ int otap_del_sel[ARRAY_SIZE(td)];
+ int itap_del_sel[ARRAY_SIZE(td)];
++ u32 itap_del_ena[ARRAY_SIZE(td)];
+ int clkbuf_sel;
+ int trm_icp;
+ int drv_strength;
+ int strb_sel;
+ u32 flags;
+ u32 quirks;
++ bool dll_enable;
+
+ #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
+ };
+
++struct window {
++ u8 start;
++ u8 end;
++ u8 length;
++};
++
+ struct sdhci_am654_driver_data {
+ const struct sdhci_pltfm_data *pdata;
+ u32 flags;
+@@ -233,11 +240,13 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
+ }
+
+ static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654,
+- u32 itapdly)
++ u32 itapdly, u32 enable)
+ {
+ /* Set ITAPCHGWIN before writing to ITAPDLY */
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
+ 1 << ITAPCHGWIN_SHIFT);
++ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
++ enable << ITAPDLYENA_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK,
+ itapdly << ITAPDLYSEL_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
+@@ -254,8 +263,8 @@ static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654,
+ mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK;
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
+
+- sdhci_am654_write_itapdly(sdhci_am654,
+- sdhci_am654->itap_del_sel[timing]);
++ sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
++ sdhci_am654->itap_del_ena[timing]);
+ }
+
+ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+@@ -264,7 +273,6 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ unsigned char timing = host->mmc->ios.timing;
+ u32 otap_del_sel;
+- u32 otap_del_ena;
+ u32 mask, val;
+
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
+@@ -272,15 +280,10 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+ sdhci_set_clock(host, clock);
+
+ /* Setup DLL Output TAP delay */
+- if (sdhci_am654->legacy_otapdly)
+- otap_del_sel = sdhci_am654->otap_del_sel[0];
+- else
+- otap_del_sel = sdhci_am654->otap_del_sel[timing];
+-
+- otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
++ otap_del_sel = sdhci_am654->otap_del_sel[timing];
+
+ mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
+- val = (otap_del_ena << OTAPDLYENA_SHIFT) |
++ val = (0x1 << OTAPDLYENA_SHIFT) |
+ (otap_del_sel << OTAPDLYSEL_SHIFT);
+
+ /* Write to STRBSEL for HS400 speed mode */
+@@ -295,10 +298,21 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+
+- if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ)
++ if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) {
+ sdhci_am654_setup_dll(host, clock);
+- else
++ sdhci_am654->dll_enable = true;
++
++ if (timing == MMC_TIMING_MMC_HS400) {
++ sdhci_am654->itap_del_ena[timing] = 0x1;
++ sdhci_am654->itap_del_sel[timing] = sdhci_am654->itap_del_sel[timing - 1];
++ }
++
++ sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
++ sdhci_am654->itap_del_ena[timing]);
++ } else {
+ sdhci_am654_setup_delay_chain(sdhci_am654, timing);
++ sdhci_am654->dll_enable = false;
++ }
+
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
+ sdhci_am654->clkbuf_sel);
+@@ -311,19 +325,29 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ unsigned char timing = host->mmc->ios.timing;
+ u32 otap_del_sel;
++ u32 itap_del_ena;
++ u32 itap_del_sel;
+ u32 mask, val;
+
+ /* Setup DLL Output TAP delay */
+- if (sdhci_am654->legacy_otapdly)
+- otap_del_sel = sdhci_am654->otap_del_sel[0];
+- else
+- otap_del_sel = sdhci_am654->otap_del_sel[timing];
++ otap_del_sel = sdhci_am654->otap_del_sel[timing];
+
+ mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
+ val = (0x1 << OTAPDLYENA_SHIFT) |
+ (otap_del_sel << OTAPDLYSEL_SHIFT);
+- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+
++ /* Setup Input TAP delay */
++ itap_del_ena = sdhci_am654->itap_del_ena[timing];
++ itap_del_sel = sdhci_am654->itap_del_sel[timing];
++
++ mask |= ITAPDLYENA_MASK | ITAPDLYSEL_MASK;
++ val |= (itap_del_ena << ITAPDLYENA_SHIFT) |
++ (itap_del_sel << ITAPDLYSEL_SHIFT);
++
++ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
++ 1 << ITAPCHGWIN_SHIFT);
++ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
++ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
+ sdhci_am654->clkbuf_sel);
+
+@@ -416,40 +440,105 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+ return 0;
+ }
+
+-#define ITAP_MAX 32
++#define ITAPDLY_LENGTH 32
++#define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1)
++
++static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
++ *fail_window, u8 num_fails, bool circular_buffer)
++{
++ u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0;
++ u8 first_fail_start = 0, last_fail_end = 0;
++ struct device *dev = mmc_dev(host->mmc);
++ struct window pass_window = {0, 0, 0};
++ int prev_fail_end = -1;
++ u8 i;
++
++ if (!num_fails)
++ return ITAPDLY_LAST_INDEX >> 1;
++
++ if (fail_window->length == ITAPDLY_LENGTH) {
++ dev_err(dev, "No passing ITAPDLY, return 0\n");
++ return 0;
++ }
++
++ first_fail_start = fail_window->start;
++ last_fail_end = fail_window[num_fails - 1].end;
++
++ for (i = 0; i < num_fails; i++) {
++ start_fail = fail_window[i].start;
++ end_fail = fail_window[i].end;
++ pass_length = start_fail - (prev_fail_end + 1);
++
++ if (pass_length > pass_window.length) {
++ pass_window.start = prev_fail_end + 1;
++ pass_window.length = pass_length;
++ }
++ prev_fail_end = end_fail;
++ }
++
++ if (!circular_buffer)
++ pass_length = ITAPDLY_LAST_INDEX - last_fail_end;
++ else
++ pass_length = ITAPDLY_LAST_INDEX - last_fail_end + first_fail_start;
++
++ if (pass_length > pass_window.length) {
++ pass_window.start = last_fail_end + 1;
++ pass_window.length = pass_length;
++ }
++
++ if (!circular_buffer)
++ itap = pass_window.start + (pass_window.length >> 1);
++ else
++ itap = (pass_window.start + (pass_window.length >> 1)) % ITAPDLY_LENGTH;
++
++ return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap;
++}
++
+ static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
+ u32 opcode)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+- int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len;
+- u32 itap;
++ unsigned char timing = host->mmc->ios.timing;
++ struct window fail_window[ITAPDLY_LENGTH];
++ u8 curr_pass, itap;
++ u8 fail_index = 0;
++ u8 prev_pass = 1;
++
++ memset(fail_window, 0, sizeof(fail_window));
+
+ /* Enable ITAPDLY */
+- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
+- 1 << ITAPDLYENA_SHIFT);
++ sdhci_am654->itap_del_ena[timing] = 0x1;
++
++ for (itap = 0; itap < ITAPDLY_LENGTH; itap++) {
++ sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
+
+- for (itap = 0; itap < ITAP_MAX; itap++) {
+- sdhci_am654_write_itapdly(sdhci_am654, itap);
++ curr_pass = !mmc_send_tuning(host->mmc, opcode, NULL);
+
+- cur_val = !mmc_send_tuning(host->mmc, opcode, NULL);
+- if (cur_val && !prev_val)
+- pass_window = itap;
++ if (!curr_pass && prev_pass)
++ fail_window[fail_index].start = itap;
++
++ if (!curr_pass) {
++ fail_window[fail_index].end = itap;
++ fail_window[fail_index].length++;
++ }
+
+- if (!cur_val)
+- fail_len++;
++ if (curr_pass && !prev_pass)
++ fail_index++;
+
+- prev_val = cur_val;
++ prev_pass = curr_pass;
+ }
+- /*
+- * Having determined the length of the failing window and start of
+- * the passing window calculate the length of the passing window and
+- * set the final value halfway through it considering the range as a
+- * circular buffer
+- */
+- pass_len = ITAP_MAX - fail_len;
+- itap = (pass_window + (pass_len >> 1)) % ITAP_MAX;
+- sdhci_am654_write_itapdly(sdhci_am654, itap);
++
++ if (fail_window[fail_index].length != 0)
++ fail_index++;
++
++ itap = sdhci_am654_calculate_itap(host, fail_window, fail_index,
++ sdhci_am654->dll_enable);
++
++ sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
++
++ /* Save ITAPDLY */
++ sdhci_am654->itap_del_sel[timing] = itap;
+
+ return 0;
+ }
+@@ -577,32 +666,15 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ int i;
+ int ret;
+
+- ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding,
+- &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]);
+- if (ret) {
+- /*
+- * ti,otap-del-sel-legacy is mandatory, look for old binding
+- * if not found.
+- */
+- ret = device_property_read_u32(dev, "ti,otap-del-sel",
+- &sdhci_am654->otap_del_sel[0]);
+- if (ret) {
+- dev_err(dev, "Couldn't find otap-del-sel\n");
+-
+- return ret;
+- }
+-
+- dev_info(dev, "Using legacy binding ti,otap-del-sel\n");
+- sdhci_am654->legacy_otapdly = true;
+-
+- return 0;
+- }
+-
+- for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
++ for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
+
+ ret = device_property_read_u32(dev, td[i].otap_binding,
+ &sdhci_am654->otap_del_sel[i]);
+ if (ret) {
++ if (i == MMC_TIMING_LEGACY) {
++ dev_err(dev, "Couldn't find mandatory ti,otap-del-sel-legacy\n");
++ return ret;
++ }
+ dev_dbg(dev, "Couldn't find %s\n",
+ td[i].otap_binding);
+ /*
+@@ -615,9 +687,12 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ host->mmc->caps2 &= ~td[i].capability;
+ }
+
+- if (td[i].itap_binding)
+- device_property_read_u32(dev, td[i].itap_binding,
+- &sdhci_am654->itap_del_sel[i]);
++ if (td[i].itap_binding) {
++ ret = device_property_read_u32(dev, td[i].itap_binding,
++ &sdhci_am654->itap_del_sel[i]);
++ if (!ret)
++ sdhci_am654->itap_del_ena[i] = 0x1;
++ }
+ }
+
+ return 0;
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index be7f18fd4836ab..c253d176db6918 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work)
+ else
+ mrq->cmd->error = -ETIMEDOUT;
+
++ /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
++ host->mrq = ERR_PTR(-EBUSY);
+ host->cmd = NULL;
+ host->data = NULL;
+
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 9ec593d52f0fa9..cef0e716ad16fd 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2309,6 +2309,7 @@ static int vub300_probe(struct usb_interface *interface,
+ vub300->read_only =
+ (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ } else {
++ retval = -EINVAL;
+ goto error5;
+ }
+ usb_set_intfdata(interface, vub300);
+diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
+index 77d5f1d244899f..860380931b6cde 100644
+--- a/drivers/mmc/host/wmt-sdmmc.c
++++ b/drivers/mmc/host/wmt-sdmmc.c
+@@ -883,7 +883,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
+ {
+ struct mmc_host *mmc;
+ struct wmt_mci_priv *priv;
+- struct resource *res;
+ u32 reg_tmp;
+
+ mmc = platform_get_drvdata(pdev);
+@@ -911,9 +910,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
+ clk_disable_unprepare(priv->clk_sdmmc);
+ clk_put(priv->clk_sdmmc);
+
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- release_mem_region(res->start, resource_size(res));
+-
+ mmc_free_host(mmc);
+
+ dev_info(&pdev->dev, "WMT MCI device removed\n");
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index 11b06fefaa0e29..c10693ba265bae 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -422,9 +422,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
+ extra_size = 0;
+
+ /* Protection Register info */
+- if (extp->NumProtectionFields)
++ if (extp->NumProtectionFields) {
++ struct cfi_intelext_otpinfo *otp =
++ (struct cfi_intelext_otpinfo *)&extp->extra[0];
++
+ extra_size += (extp->NumProtectionFields - 1) *
+- sizeof(struct cfi_intelext_otpinfo);
++ sizeof(struct cfi_intelext_otpinfo);
++
++ if (extp_size >= sizeof(*extp) + extra_size) {
++ int i;
++
++ /* Do some byteswapping if necessary */
++ for (i = 0; i < extp->NumProtectionFields - 1; i++) {
++ otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
++ otp->FactGroups = le16_to_cpu(otp->FactGroups);
++ otp->UserGroups = le16_to_cpu(otp->UserGroups);
++ otp++;
++ }
++ }
++ }
+ }
+
+ if (extp->MinorVersion >= '1') {
+diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
+index 36e060386e59df..59e1b3a4406ed1 100644
+--- a/drivers/mtd/devices/powernv_flash.c
++++ b/drivers/mtd/devices/powernv_flash.c
+@@ -207,6 +207,9 @@ static int powernv_flash_set_driver_info(struct device *dev,
+ * get them
+ */
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
++ if (!mtd->name)
++ return -ENOMEM;
++
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_WRITEABLE;
+ mtd->size = size;
+diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
+index 28131a127d065e..8297b366a06699 100644
+--- a/drivers/mtd/devices/slram.c
++++ b/drivers/mtd/devices/slram.c
+@@ -296,10 +296,12 @@ static int __init init_slram(void)
+ T("slram: devname = %s\n", devname);
+ if ((!map) || (!(devstart = strsep(&map, ",")))) {
+ E("slram: No devicestart specified.\n");
++ break;
+ }
+ T("slram: devstart = %s\n", devstart);
+ if ((!map) || (!(devlength = strsep(&map, ",")))) {
+ E("slram: No devicelength / -end specified.\n");
++ break;
+ }
+ T("slram: devlength = %s\n", devlength);
+ if (parse_cmdline(devname, devstart, devlength) != 0) {
+diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
+index fc872133928247..9be71b045b259e 100644
+--- a/drivers/mtd/maps/physmap-core.c
++++ b/drivers/mtd/maps/physmap-core.c
+@@ -523,7 +523,7 @@ static int physmap_flash_probe(struct platform_device *dev)
+ if (!info->maps[i].phys)
+ info->maps[i].phys = res->start;
+
+- info->win_order = get_bitmask_order(resource_size(res)) - 1;
++ info->win_order = fls64(resource_size(res)) - 1;
+ info->maps[i].size = BIT(info->win_order +
+ (info->gpios ?
+ info->gpios->ndescs : 0));
+diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
+index a7ec947a3ebb1d..53019d313db71d 100644
+--- a/drivers/mtd/maps/vmu-flash.c
++++ b/drivers/mtd/maps/vmu-flash.c
+@@ -719,7 +719,7 @@ static int vmu_can_unload(struct maple_device *mdev)
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mtd = &((card->mtd)[x]);
+- if (mtd->usecount > 0)
++ if (kref_read(&mtd->refcnt))
+ return 0;
+ }
+ return 1;
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index ff18636e088973..5bc32108ca03b0 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -463,7 +463,7 @@ static void blktrans_notify_add(struct mtd_info *mtd)
+ {
+ struct mtd_blktrans_ops *tr;
+
+- if (mtd->type == MTD_ABSENT)
++ if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME)
+ return;
+
+ list_for_each_entry(tr, &blktrans_majors, list)
+@@ -503,7 +503,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
+ mutex_lock(&mtd_table_mutex);
+ list_add(&tr->list, &blktrans_majors);
+ mtd_for_each_device(mtd)
+- if (mtd->type != MTD_ABSENT)
++ if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME)
+ tr->add_mtd(tr, mtd);
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 9bd661be3ae93d..97ca2a897f1d49 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -552,6 +552,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
+ config.dev = &mtd->dev;
+ config.name = dev_name(&mtd->dev);
+ config.owner = THIS_MODULE;
++ config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
+ config.reg_read = mtd_nvmem_reg_read;
+ config.size = mtd->size;
+ config.word_size = 1;
+@@ -898,6 +899,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
+ config.name = compatible;
+ config.id = NVMEM_DEVID_AUTO;
+ config.owner = THIS_MODULE;
++ config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
+ config.type = NVMEM_TYPE_OTP;
+ config.root_only = true;
+ config.ignore_wp = true;
+@@ -953,8 +955,10 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+
+ if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
+ size = mtd_otp_size(mtd, true);
+- if (size < 0)
+- return size;
++ if (size < 0) {
++ err = size;
++ goto err;
++ }
+
+ if (size > 0) {
+ nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
+diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
+index cbf8ae85e1ae0e..6142573085169a 100644
+--- a/drivers/mtd/nand/raw/Kconfig
++++ b/drivers/mtd/nand/raw/Kconfig
+@@ -234,8 +234,7 @@ config MTD_NAND_FSL_IFC
+ tristate "Freescale IFC NAND controller"
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
+- select FSL_IFC
+- select MEMORY
++ depends on FSL_IFC
+ help
+ Various Freescale chips e.g P1010, include a NAND Flash machine
+ with built-in hardware ECC capabilities.
+diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
+index 5d2ddb037a9a24..2068025d56396a 100644
+--- a/drivers/mtd/nand/raw/diskonchip.c
++++ b/drivers/mtd/nand/raw/diskonchip.c
+@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
+ 0xe8000, 0xea000, 0xec000, 0xee000,
+ #endif
+ #endif
+- 0xffffffff };
++};
+
+ static struct mtd_info *doclist = NULL;
+
+@@ -1552,7 +1552,7 @@ static int __init init_nanddoc(void)
+ if (ret < 0)
+ return ret;
+ } else {
+- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
++ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
+ doc_probe(doc_locations[i]);
+ }
+ }
+diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
+index 20bb1e0cb5ebfd..f0e2318ce088c0 100644
+--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
++++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
+@@ -21,7 +21,7 @@
+
+ #define ERR_BYTE 0xFF /* Value returned for read
+ bytes when read failed */
+-#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
++#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait
+ for IFC NAND Machine */
+
+ struct fsl_ifc_ctrl;
+diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
+index cb5d88f42297b2..f0ad2308f6d503 100644
+--- a/drivers/mtd/nand/raw/intel-nand-controller.c
++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
+@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ ebu_host->cs_num = cs;
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
++ if (!resname) {
++ ret = -ENOMEM;
++ goto err_of_node_put;
++ }
++
+ ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
+ resname);
+ if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
+@@ -649,6 +654,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ }
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
++ if (!resname) {
++ ret = -ENOMEM;
++ goto err_cleanup_dma;
++ }
++
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+ if (!res) {
+ ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
+index 488fd452611a66..677fcb03f9bef1 100644
+--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
++++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
+@@ -303,8 +303,9 @@ static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
+ return 0;
+ }
+
+-static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
++static irqreturn_t lpc3xxx_nand_irq(int irq, void *data)
+ {
++ struct lpc32xx_nand_host *host = data;
+ uint8_t sr;
+
+ /* Clear interrupt flag by reading status */
+@@ -780,7 +781,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
+ goto release_dma_chan;
+ }
+
+- if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
++ if (request_irq(host->irq, &lpc3xxx_nand_irq,
+ IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+ dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+ res = -ENXIO;
+diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
+index b841a81cb12822..93d8c6da555b97 100644
+--- a/drivers/mtd/nand/raw/marvell_nand.c
++++ b/drivers/mtd/nand/raw/marvell_nand.c
+@@ -290,16 +290,13 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
+ MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
+ MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30),
+- MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30),
+- MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30),
++ MARVELL_LAYOUT( 2048, 512, 16, 4, 4, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
+- MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+- MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30),
+- MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30),
++ MARVELL_LAYOUT( 4096, 512, 8, 4, 4, 1024, 0, 30, 0, 64, 30),
++ MARVELL_LAYOUT( 4096, 512, 16, 8, 8, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
+- MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
+- MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30),
+- MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30),
++ MARVELL_LAYOUT( 8192, 512, 8, 8, 8, 1024, 0, 30, 0, 160, 30),
++ MARVELL_LAYOUT( 8192, 512, 16, 16, 16, 512, 0, 30, 0, 32, 30),
+ };
+
+ /**
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index 25e3c1cb605e7f..439e9593c8ed17 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -63,7 +63,7 @@
+ #define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
+ ( \
+ (cmd_dir) | \
+- ((ran) << 19) | \
++ (ran) | \
+ ((bch) << 14) | \
+ ((short_mode) << 13) | \
+ (((page_size) & 0x7f) << 6) | \
+@@ -1134,6 +1134,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
+ init.name = devm_kasprintf(nfc->dev,
+ GFP_KERNEL, "%s#div",
+ dev_name(nfc->dev));
++ if (!init.name)
++ return -ENOMEM;
++
+ init.ops = &clk_divider_ops;
+ nfc_divider_parent_data[0].fw_name = "device";
+ init.parent_data = nfc_divider_parent_data;
+diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
+index 29c8bddde67ff6..161a409ca4ed21 100644
+--- a/drivers/mtd/nand/raw/mtk_nand.c
++++ b/drivers/mtd/nand/raw/mtk_nand.c
+@@ -1429,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
+ return 0;
+ }
+
++static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc)
++{
++ struct mtk_nfc_nand_chip *mtk_chip;
++ struct nand_chip *chip;
++ int ret;
++
++ while (!list_empty(&nfc->chips)) {
++ mtk_chip = list_first_entry(&nfc->chips,
++ struct mtk_nfc_nand_chip, node);
++ chip = &mtk_chip->nand;
++ ret = mtd_device_unregister(nand_to_mtd(chip));
++ WARN_ON(ret);
++ nand_cleanup(chip);
++ list_del(&mtk_chip->node);
++ }
++}
++
+ static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
+ {
+ struct device_node *np = dev->of_node;
+- struct device_node *nand_np;
+ int ret;
+
+- for_each_child_of_node(np, nand_np) {
++ for_each_child_of_node_scoped(np, nand_np) {
+ ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+- of_node_put(nand_np);
++ mtk_nfc_nand_chips_cleanup(nfc);
+ return ret;
+ }
+ }
+@@ -1570,20 +1586,8 @@ static int mtk_nfc_probe(struct platform_device *pdev)
+ static void mtk_nfc_remove(struct platform_device *pdev)
+ {
+ struct mtk_nfc *nfc = platform_get_drvdata(pdev);
+- struct mtk_nfc_nand_chip *mtk_chip;
+- struct nand_chip *chip;
+- int ret;
+-
+- while (!list_empty(&nfc->chips)) {
+- mtk_chip = list_first_entry(&nfc->chips,
+- struct mtk_nfc_nand_chip, node);
+- chip = &mtk_chip->nand;
+- ret = mtd_device_unregister(nand_to_mtd(chip));
+- WARN_ON(ret);
+- nand_cleanup(chip);
+- list_del(&mtk_chip->node);
+- }
+
++ mtk_nfc_nand_chips_cleanup(nfc);
+ mtk_ecc_release(nfc->ecc);
+ }
+
+diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
+index 1fcac403cee60f..7c3e3d70be8b00 100644
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -1090,28 +1090,32 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
+ {
+ struct mtd_info *mtd = nand_to_mtd(chip);
++ bool ident_stage = !mtd->writesize;
+
+- /* Make sure the offset is less than the actual page size. */
+- if (offset_in_page > mtd->writesize + mtd->oobsize)
+- return -EINVAL;
++ /* Bypass all checks during NAND identification */
++ if (likely(!ident_stage)) {
++ /* Make sure the offset is less than the actual page size. */
++ if (offset_in_page > mtd->writesize + mtd->oobsize)
++ return -EINVAL;
+
+- /*
+- * On small page NANDs, there's a dedicated command to access the OOB
+- * area, and the column address is relative to the start of the OOB
+- * area, not the start of the page. Asjust the address accordingly.
+- */
+- if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+- offset_in_page -= mtd->writesize;
++ /*
++ * On small page NANDs, there's a dedicated command to access the OOB
++ * area, and the column address is relative to the start of the OOB
++ * area, not the start of the page. Asjust the address accordingly.
++ */
++ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
++ offset_in_page -= mtd->writesize;
+
+- /*
+- * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+- * wide, then it must be divided by 2.
+- */
+- if (chip->options & NAND_BUSWIDTH_16) {
+- if (WARN_ON(offset_in_page % 2))
+- return -EINVAL;
++ /*
++ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
++ * wide, then it must be divided by 2.
++ */
++ if (chip->options & NAND_BUSWIDTH_16) {
++ if (WARN_ON(offset_in_page % 2))
++ return -EINVAL;
+
+- offset_in_page /= 2;
++ offset_in_page /= 2;
++ }
+ }
+
+ addrs[0] = offset_in_page;
+@@ -1120,7 +1124,7 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+- if (mtd->writesize <= 512)
++ if (!ident_stage && mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+@@ -1208,6 +1212,23 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ return nand_exec_op(chip, &op);
+ }
+
++static void rawnand_cap_cont_reads(struct nand_chip *chip)
++{
++ struct nand_memory_organization *memorg;
++ unsigned int pages_per_lun, first_lun, last_lun;
++
++ memorg = nanddev_get_memorg(&chip->base);
++ pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
++ first_lun = chip->cont_read.first_page / pages_per_lun;
++ last_lun = chip->cont_read.last_page / pages_per_lun;
++
++ /* Prevent sequential cache reads across LUN boundaries */
++ if (first_lun != last_lun)
++ chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
++ else
++ chip->cont_read.pause_page = chip->cont_read.last_page;
++}
++
+ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool check_only)
+@@ -1226,7 +1247,7 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_op_instr cont_instrs[] = {
+- NAND_OP_CMD(page == chip->cont_read.last_page ?
++ NAND_OP_CMD(page == chip->cont_read.pause_page ?
+ NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+@@ -1263,16 +1284,29 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
+ }
+
+ if (page == chip->cont_read.first_page)
+- return nand_exec_op(chip, &start_op);
++ ret = nand_exec_op(chip, &start_op);
+ else
+- return nand_exec_op(chip, &cont_op);
++ ret = nand_exec_op(chip, &cont_op);
++ if (ret)
++ return ret;
++
++ if (!chip->cont_read.ongoing)
++ return 0;
++
++ if (page == chip->cont_read.pause_page &&
++ page != chip->cont_read.last_page) {
++ chip->cont_read.first_page = chip->cont_read.pause_page + 1;
++ rawnand_cap_cont_reads(chip);
++ } else if (page == chip->cont_read.last_page) {
++ chip->cont_read.ongoing = false;
++ }
++
++ return 0;
+ }
+
+ static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
+ {
+- return chip->cont_read.ongoing &&
+- page >= chip->cont_read.first_page &&
+- page <= chip->cont_read.last_page;
++ return chip->cont_read.ongoing && page >= chip->cont_read.first_page;
+ }
+
+ /**
+@@ -1389,16 +1423,19 @@ int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int len, bool force_8bit)
+ {
+ struct mtd_info *mtd = nand_to_mtd(chip);
++ bool ident_stage = !mtd->writesize;
+
+ if (len && !buf)
+ return -EINVAL;
+
+- if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+- return -EINVAL;
++ if (!ident_stage) {
++ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++ return -EINVAL;
+
+- /* Small page NANDs do not support column change. */
+- if (mtd->writesize <= 512)
+- return -ENOTSUPP;
++ /* Small page NANDs do not support column change. */
++ if (mtd->writesize <= 512)
++ return -ENOTSUPP;
++ }
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_interface_config *conf =
+@@ -2124,7 +2161,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
+ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit, bool check_only)
+ {
+- if (!len || !buf)
++ if (!len || (!check_only && !buf))
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip)) {
+@@ -3431,21 +3468,48 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
+ u32 readlen, int col)
+ {
+ struct mtd_info *mtd = nand_to_mtd(chip);
++ unsigned int first_page, last_page;
++
++ chip->cont_read.ongoing = false;
+
+ if (!chip->controller->supported_op.cont_read)
+ return;
+
+- if ((col && col + readlen < (3 * mtd->writesize)) ||
+- (!col && readlen < (2 * mtd->writesize))) {
+- chip->cont_read.ongoing = false;
++ /*
++ * Don't bother making any calculations if the length is too small.
++ * Side effect: avoids possible integer underflows below.
++ */
++ if (readlen < (2 * mtd->writesize))
+ return;
+- }
+
+- chip->cont_read.ongoing = true;
+- chip->cont_read.first_page = page;
++ /* Derive the page where continuous read should start (the first full page read) */
++ first_page = page;
+ if (col)
++ first_page++;
++
++ /* Derive the page where continuous read should stop (the last full page read) */
++ last_page = page + ((col + readlen) / mtd->writesize) - 1;
++
++ /* Configure and enable continuous read when suitable */
++ if (first_page < last_page) {
++ chip->cont_read.first_page = first_page;
++ chip->cont_read.last_page = last_page;
++ chip->cont_read.ongoing = true;
++ /* May reset the ongoing flag */
++ rawnand_cap_cont_reads(chip);
++ }
++}
++
++static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
++{
++ if (!chip->cont_read.ongoing || page != chip->cont_read.first_page)
++ return;
++
++ chip->cont_read.first_page++;
++ if (chip->cont_read.first_page == chip->cont_read.pause_page)
+ chip->cont_read.first_page++;
+- chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
++ if (chip->cont_read.first_page >= chip->cont_read.last_page)
++ chip->cont_read.ongoing = false;
+ }
+
+ /**
+@@ -3521,7 +3585,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
+ oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
+
+- rawnand_enable_cont_reads(chip, page, readlen, col);
++ if (likely(ops->mode != MTD_OPS_RAW))
++ rawnand_enable_cont_reads(chip, page, readlen, col);
+
+ while (1) {
+ struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
+@@ -3622,6 +3687,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagecache.bitflips);
++
++ rawnand_cont_read_skip_first_page(chip, page);
+ }
+
+ readlen -= bytes;
+@@ -5126,9 +5193,26 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip)
+ /* The supported_op fields should not be set by individual drivers */
+ WARN_ON_ONCE(chip->controller->supported_op.cont_read);
+
++ /*
++ * Too many devices do not support sequential cached reads with on-die
++ * ECC correction enabled, so in this case refuse to perform the
++ * automation.
++ */
++ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE)
++ return;
++
+ if (!nand_has_exec_op(chip))
+ return;
+
++ /*
++ * For now, continuous reads can only be used with the core page helpers.
++ * This can be extended later.
++ */
++ if (!(chip->ecc.read_page == nand_read_page_hwecc ||
++ chip->ecc.read_page == nand_read_page_syndrome ||
++ chip->ecc.read_page == nand_read_page_swecc))
++ return;
++
+ rawnand_check_cont_read_support(chip);
+ }
+
+@@ -6205,6 +6289,7 @@ static const struct nand_ops rawnand_ops = {
+ static int nand_scan_tail(struct nand_chip *chip)
+ {
+ struct mtd_info *mtd = nand_to_mtd(chip);
++ struct nand_device *base = &chip->base;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i;
+
+@@ -6349,9 +6434,13 @@ static int nand_scan_tail(struct nand_chip *chip)
+ if (!ecc->write_oob_raw)
+ ecc->write_oob_raw = ecc->write_oob;
+
+- /* propagate ecc info to mtd_info */
++ /* Propagate ECC info to the generic NAND and MTD layers */
+ mtd->ecc_strength = ecc->strength;
++ if (!base->ecc.ctx.conf.strength)
++ base->ecc.ctx.conf.strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
++ if (!base->ecc.ctx.conf.step_size)
++ base->ecc.ctx.conf.step_size = ecc->size;
+
+ /*
+ * Set the number of read / write steps for one page depending on ECC
+@@ -6359,6 +6448,8 @@ static int nand_scan_tail(struct nand_chip *chip)
+ */
+ if (!ecc->steps)
+ ecc->steps = mtd->writesize / ecc->size;
++ if (!base->ecc.ctx.nsteps)
++ base->ecc.ctx.nsteps = ecc->steps;
+ if (ecc->steps * ecc->size != mtd->writesize) {
+ WARN(1, "Invalid ECC parameters\n");
+ ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
+index 39076735a3fbb0..9695f07b5eb26c 100644
+--- a/drivers/mtd/nand/raw/nand_hynix.c
++++ b/drivers/mtd/nand/raw/nand_hynix.c
+@@ -402,7 +402,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
+ if (ret)
+ pr_warn("failed to initialize read-retry infrastructure");
+
+- return 0;
++ return ret;
+ }
+
+ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index b079605c84d382..b8cff9240b286c 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -2815,7 +2815,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
+ host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
+ instrs = 3;
+- } else {
++ } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
+ return 0;
+ }
+
+@@ -2830,9 +2830,8 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+- (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
+- 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
+- NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++ if (q_op.cmd_reg == OP_BLOCK_ERASE)
++ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+index 5bc90ffa721f0d..2a95dd63b8c203 100644
+--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
++++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+@@ -420,13 +420,13 @@ static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
+ u32 rate, tc2rw, trwpw, trw2c;
+ u32 temp;
+
+- if (target < 0)
+- return 0;
+-
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -EOPNOTSUPP;
+
++ if (target < 0)
++ return 0;
++
+ if (IS_ERR(nfc->nfc_clk))
+ rate = clk_get_rate(nfc->ahb_clk);
+ else
+diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
+index eb0b9d16e8dae9..a553e3ac8ff41d 100644
+--- a/drivers/mtd/nand/raw/tegra_nand.c
++++ b/drivers/mtd/nand/raw/tegra_nand.c
+@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
+ init_completion(&ctrl->dma_complete);
+
+ ctrl->irq = platform_get_irq(pdev, 0);
++ if (ctrl->irq < 0) {
++ err = ctrl->irq;
++ goto err_put_pm;
++ }
+ err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ dev_name(&pdev->dev), ctrl);
+ if (err) {
+diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
+index 31c439a557b184..4597a82de23a45 100644
+--- a/drivers/mtd/nand/spi/esmt.c
++++ b/drivers/mtd/nand/spi/esmt.c
+@@ -104,7 +104,8 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
+
+ static const struct spinand_info esmt_c8_spinand_table[] = {
+ SPINAND_INFO("F50L1G41LB",
+- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01),
++ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
++ 0x7f, 0x7f),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+@@ -113,7 +114,8 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
+ 0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_INFO("F50D1G41LB",
+- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11),
++ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
++ 0x7f, 0x7f),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+@@ -122,7 +124,8 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
+ 0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_INFO("F50D2G41KA",
+- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
++ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
++ 0x7f, 0x7f),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
+index 987710e09441ad..6023cba748bb85 100644
+--- a/drivers/mtd/nand/spi/gigadevice.c
++++ b/drivers/mtd/nand/spi/gigadevice.c
+@@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
+ {
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+- &status2);
++ spinand->scratchbuf);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+@@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
+ * report the maximum of 4 in this case
+ */
+ /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
++ status2 = *(spinand->scratchbuf);
+ return ((status & STATUS_ECC_MASK) >> 2) |
+ ((status2 & STATUS_ECC_MASK) >> 4);
+
+@@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
+ {
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+- &status2);
++ spinand->scratchbuf);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+@@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
+ * 1 ... 4 bits are flipped (and corrected)
+ */
+ /* bits sorted this way (1...0): ECCSE1, ECCSE0 */
++ status2 = *(spinand->scratchbuf);
+ return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
+
+ case STATUS_ECC_UNCOR_ERROR:
+diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
+index a16b42a8858168..3b55b676ca6b9c 100644
+--- a/drivers/mtd/parsers/redboot.c
++++ b/drivers/mtd/parsers/redboot.c
+@@ -102,7 +102,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
+ offset -= master->erasesize;
+ }
+ } else {
+- offset = directory * master->erasesize;
++ offset = (unsigned long) directory * master->erasesize;
+ while (mtd_block_isbad(master, offset)) {
+ offset += master->erasesize;
+ if (offset == master->size)
+diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
+index 5de0378f90dbdc..7dae831ee8b6bf 100644
+--- a/drivers/mtd/tests/Makefile
++++ b/drivers/mtd/tests/Makefile
+@@ -1,19 +1,19 @@
+ # SPDX-License-Identifier: GPL-2.0
+-obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
+-obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
++obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o mtd_test.o
++obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o mtd_test.o
+
+-mtd_oobtest-objs := oobtest.o mtd_test.o
+-mtd_pagetest-objs := pagetest.o mtd_test.o
+-mtd_readtest-objs := readtest.o mtd_test.o
+-mtd_speedtest-objs := speedtest.o mtd_test.o
+-mtd_stresstest-objs := stresstest.o mtd_test.o
+-mtd_subpagetest-objs := subpagetest.o mtd_test.o
+-mtd_torturetest-objs := torturetest.o mtd_test.o
+-mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
++mtd_oobtest-objs := oobtest.o
++mtd_pagetest-objs := pagetest.o
++mtd_readtest-objs := readtest.o
++mtd_speedtest-objs := speedtest.o
++mtd_stresstest-objs := stresstest.o
++mtd_subpagetest-objs := subpagetest.o
++mtd_torturetest-objs := torturetest.o
++mtd_nandbiterrs-objs := nandbiterrs.o
+diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
+index c84250beffdc91..f391e0300cdc94 100644
+--- a/drivers/mtd/tests/mtd_test.c
++++ b/drivers/mtd/tests/mtd_test.c
+@@ -25,6 +25,7 @@ int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_erase_eraseblock);
+
+ static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
+ {
+@@ -57,6 +58,7 @@ int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_scan_for_bad_eraseblocks);
+
+ int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+ unsigned int eb, int ebcnt)
+@@ -75,6 +77,7 @@ int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_erase_good_eraseblocks);
+
+ int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+ {
+@@ -92,6 +95,7 @@ int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+
+ return err;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_read);
+
+ int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+ const void *buf)
+@@ -107,3 +111,8 @@ int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+
+ return err;
+ }
++EXPORT_SYMBOL_GPL(mtdtest_write);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MTD function test helpers");
++MODULE_AUTHOR("Akinobu Mita");
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 655ff41863e2be..3b71924f492091 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1560,6 +1560,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
++ kfree(scan_eba[i]);
+ goto out_free;
+ }
+
+@@ -1595,7 +1596,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ }
+
+ out_free:
+- for (i = 0; i < num_volumes; i++) {
++ while (--i >= 0) {
+ if (!ubi->volumes[i])
+ continue;
+
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 28c8151a0725d5..2cdc29483aee0d 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
+ sizeof(struct ubi_fm_scan_pool) +
+ sizeof(struct ubi_fm_scan_pool) +
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
+- (sizeof(struct ubi_fm_eba) +
+- (ubi->peb_count * sizeof(__be32))) +
+- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
++ ((sizeof(struct ubi_fm_eba) +
++ sizeof(struct ubi_fm_volhdr)) *
++ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
++ (ubi->peb_count * sizeof(__be32));
+ return roundup(size, ubi->leb_size);
+ }
+
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index f700f0e4f2ec4d..6e5489e233dd2a 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ * The number of supported volumes is limited by the eraseblock size
+ * and by the UBI_MAX_VOLUMES constant.
+ */
++
++ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
++ ubi_err(ubi, "LEB size too small for a volume record");
++ return -EINVAL;
++ }
++
+ ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
+ if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
+ ubi->vtbl_slots = UBI_MAX_VOLUMES;
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index e26f98f897c55d..e15939e77122b9 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -48,7 +48,9 @@ obj-$(CONFIG_ARCNET) += arcnet/
+ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
+ obj-$(CONFIG_CAIF) += caif/
+ obj-$(CONFIG_CAN) += can/
+-obj-$(CONFIG_NET_DSA) += dsa/
++ifdef CONFIG_NET_DSA
++obj-y += dsa/
++endif
+ obj-$(CONFIG_ETHERNET) += ethernet/
+ obj-$(CONFIG_FDDI) += fddi/
+ obj-$(CONFIG_HIPPI) += hippi/
+diff --git a/drivers/net/amt.c b/drivers/net/amt.c
+index 2d20be6ffb7e59..ddd087c2c3ed45 100644
+--- a/drivers/net/amt.c
++++ b/drivers/net/amt.c
+@@ -11,7 +11,7 @@
+ #include <linux/net.h>
+ #include <linux/igmp.h>
+ #include <linux/workqueue.h>
+-#include <net/sch_generic.h>
++#include <net/pkt_sched.h>
+ #include <net/net_namespace.h>
+ #include <net/ip.h>
+ #include <net/udp.h>
+@@ -80,11 +80,11 @@ static struct mld2_grec mldv2_zero_grec;
+
+ static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
+ {
+- BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
++ BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct tc_skb_cb) >
+ sizeof_field(struct sk_buff, cb));
+
+ return (struct amt_skb_cb *)((void *)skb->cb +
+- sizeof(struct qdisc_skb_cb));
++ sizeof(struct tc_skb_cb));
+ }
+
+ static void __amt_source_gc_work(void)
+diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
+index 19e996a829c9db..b54275389f8acf 100644
+--- a/drivers/net/arcnet/arcdevice.h
++++ b/drivers/net/arcnet/arcdevice.h
+@@ -186,6 +186,8 @@ do { \
+ #define ARC_IS_5MBIT 1 /* card default speed is 5MBit */
+ #define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit,
+ but default is 2.5MBit. */
++#define ARC_HAS_LED 4 /* card has software controlled LEDs */
++#define ARC_HAS_ROTARY 8 /* card has rotary encoder */
+
+ /* information needed to define an encapsulation driver */
+ struct ArcProto {
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index c580acb8b1d34e..7b5c8bb02f1194 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -213,12 +213,13 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15))
+ lp->backplane = 1;
+
+- /* Get the dev_id from the PLX rotary coder */
+- if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+- dev_id_mask = 0x3;
+- dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
+-
+- snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
++ if (ci->flags & ARC_HAS_ROTARY) {
++ /* Get the dev_id from the PLX rotary coder */
++ if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
++ dev_id_mask = 0x3;
++ dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
++ snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
++ }
+
+ if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
+ pr_err("IO address %Xh is empty!\n", ioaddr);
+@@ -230,6 +231,10 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ goto err_free_arcdev;
+ }
+
++ ret = com20020_found(dev, IRQF_SHARED);
++ if (ret)
++ goto err_free_arcdev;
++
+ card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
+ GFP_KERNEL);
+ if (!card) {
+@@ -239,41 +244,39 @@ static int com20020pci_probe(struct pci_dev *pdev,
+
+ card->index = i;
+ card->pci_priv = priv;
+- card->tx_led.brightness_set = led_tx_set;
+- card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+- GFP_KERNEL, "arc%d-%d-tx",
+- dev->dev_id, i);
+- card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+- "pci:green:tx:%d-%d",
+- dev->dev_id, i);
+-
+- card->tx_led.dev = &dev->dev;
+- card->recon_led.brightness_set = led_recon_set;
+- card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+- GFP_KERNEL, "arc%d-%d-recon",
+- dev->dev_id, i);
+- card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+- "pci:red:recon:%d-%d",
+- dev->dev_id, i);
+- card->recon_led.dev = &dev->dev;
+- card->dev = dev;
+-
+- ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+- if (ret)
+- goto err_free_arcdev;
+
+- ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+- if (ret)
+- goto err_free_arcdev;
+-
+- dev_set_drvdata(&dev->dev, card);
+-
+- ret = com20020_found(dev, IRQF_SHARED);
+- if (ret)
+- goto err_free_arcdev;
+-
+- devm_arcnet_led_init(dev, dev->dev_id, i);
++ if (ci->flags & ARC_HAS_LED) {
++ card->tx_led.brightness_set = led_tx_set;
++ card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
++ GFP_KERNEL, "arc%d-%d-tx",
++ dev->dev_id, i);
++ card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
++ "pci:green:tx:%d-%d",
++ dev->dev_id, i);
++
++ card->tx_led.dev = &dev->dev;
++ card->recon_led.brightness_set = led_recon_set;
++ card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
++ GFP_KERNEL, "arc%d-%d-recon",
++ dev->dev_id, i);
++ card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
++ "pci:red:recon:%d-%d",
++ dev->dev_id, i);
++ card->recon_led.dev = &dev->dev;
++
++ ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
++ if (ret)
++ goto err_free_arcdev;
++
++ ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
++ if (ret)
++ goto err_free_arcdev;
++
++ dev_set_drvdata(&dev->dev, card);
++ devm_arcnet_led_init(dev, dev->dev_id, i);
++ }
+
++ card->dev = dev;
+ list_add(&card->list, &priv->list_dev);
+ continue;
+
+@@ -329,7 +332,7 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ };
+
+ static struct com20020_pci_card_info card_info_sohard = {
+- .name = "PLX-PCI",
++ .name = "SOHARD SH ARC-PCI",
+ .devcount = 1,
+ /* SOHARD needs PCI base addr 4 */
+ .chan_map_tbl = {
+@@ -364,7 +367,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = {
+ },
+ },
+ .rotary = 0x0,
+- .flags = ARC_CAN_10MBIT,
++ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+
+ static struct com20020_pci_card_info card_info_eae_ma1 = {
+@@ -396,7 +399,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
+ },
+ },
+ .rotary = 0x0,
+- .flags = ARC_CAN_10MBIT,
++ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+
+ static struct com20020_pci_card_info card_info_eae_fb2 = {
+@@ -421,7 +424,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = {
+ },
+ },
+ .rotary = 0x0,
+- .flags = ARC_CAN_10MBIT,
++ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+
+ static const struct pci_device_id com20020pci_id_table[] = {
+diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
+index 683203f87ae2b2..54767154de265c 100644
+--- a/drivers/net/bareudp.c
++++ b/drivers/net/bareudp.c
+@@ -67,6 +67,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ __be16 proto;
+ void *oiph;
+ int err;
++ int nh;
+
+ bareudp = rcu_dereference_sk_user_data(sk);
+ if (!bareudp)
+@@ -82,7 +83,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+
+ if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
+ sizeof(ipversion))) {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ ipversion >>= 4;
+@@ -92,7 +93,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ } else if (ipversion == 6 && bareudp->multi_proto_mode) {
+ proto = htons(ETH_P_IPV6);
+ } else {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
+@@ -106,7 +107,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ ipv4_is_multicast(tunnel_hdr->daddr)) {
+ proto = htons(ETH_P_MPLS_MC);
+ } else {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ } else {
+@@ -122,7 +123,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ (addr_type & IPV6_ADDR_MULTICAST)) {
+ proto = htons(ETH_P_MPLS_MC);
+ } else {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ }
+@@ -134,20 +135,35 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ proto,
+ !net_eq(bareudp->net,
+ dev_net(bareudp->dev)))) {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+ if (!tun_dst) {
+- bareudp->dev->stats.rx_dropped++;
++ DEV_STATS_INC(bareudp->dev, rx_dropped);
+ goto drop;
+ }
+ skb_dst_set(skb, &tun_dst->dst);
+ skb->dev = bareudp->dev;
+- oiph = skb_network_header(skb);
+- skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
+
++ /* Save offset of outer header relative to skb->head,
++ * because we are going to reset the network header to the inner header
++ * and might change skb->head.
++ */
++ nh = skb_network_header(skb) - skb->head;
++
++ skb_reset_network_header(skb);
++
++ if (!pskb_inet_may_pull(skb)) {
++ DEV_STATS_INC(bareudp->dev, rx_length_errors);
++ DEV_STATS_INC(bareudp->dev, rx_errors);
++ goto drop;
++ }
++
++ /* Get the outer header. */
++ oiph = skb->head + nh;
++
+ if (!ipv6_mod_enabled() || family == AF_INET)
+ err = IP_ECN_decapsulate(oiph, skb);
+ else
+@@ -165,8 +181,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ &((struct ipv6hdr *)oiph)->saddr);
+ }
+ if (err > 1) {
+- ++bareudp->dev->stats.rx_frame_errors;
+- ++bareudp->dev->stats.rx_errors;
++ DEV_STATS_INC(bareudp->dev, rx_frame_errors);
++ DEV_STATS_INC(bareudp->dev, rx_errors);
+ goto drop;
+ }
+ }
+@@ -303,6 +319,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be32 saddr;
+ int err;
+
++ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
++ return -EINVAL;
++
+ if (!sock)
+ return -ESHUTDOWN;
+
+@@ -366,6 +385,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
++ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
++ return -EINVAL;
++
+ if (!sock)
+ return -ESHUTDOWN;
+
+@@ -462,11 +484,11 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
+ dev_kfree_skb(skb);
+
+ if (err == -ELOOP)
+- dev->stats.collisions++;
++ DEV_STATS_INC(dev, collisions);
+ else if (err == -ENETUNREACH)
+- dev->stats.tx_carrier_errors++;
++ DEV_STATS_INC(dev, tx_carrier_errors);
+
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index dc2c7b97965634..7edf0fd58c3463 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -985,7 +985,8 @@ static int alb_upper_dev_walk(struct net_device *upper,
+ if (netif_is_macvlan(upper) && !strict_match) {
+ tags = bond_verify_device_path(bond->dev, upper, 0);
+ if (IS_ERR_OR_NULL(tags))
+- BUG();
++ return -ENOMEM;
++
+ alb_send_lp_vid(slave, upper->dev_addr,
+ tags[0].vlan_proto, tags[0].vlan_id);
+ kfree(tags);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 51d47eda1c873d..14b4780b73c724 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -427,6 +427,8 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
+ {
+ struct net_device *bond_dev = xs->xso.dev;
++ struct net_device *real_dev;
++ netdevice_tracker tracker;
+ struct bond_ipsec *ipsec;
+ struct bonding *bond;
+ struct slave *slave;
+@@ -438,74 +440,80 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
+ rcu_read_lock();
+ bond = netdev_priv(bond_dev);
+ slave = rcu_dereference(bond->curr_active_slave);
+- if (!slave) {
+- rcu_read_unlock();
+- return -ENODEV;
++ real_dev = slave ? slave->dev : NULL;
++ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
++ rcu_read_unlock();
++ if (!real_dev) {
++ err = -ENODEV;
++ goto out;
+ }
+
+- if (!slave->dev->xfrmdev_ops ||
+- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+- netif_is_bond_master(slave->dev)) {
++ if (!real_dev->xfrmdev_ops ||
++ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
++ netif_is_bond_master(real_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
+- rcu_read_unlock();
+- return -EINVAL;
++ err = -EINVAL;
++ goto out;
+ }
+
+- ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
++ ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL);
+ if (!ipsec) {
+- rcu_read_unlock();
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto out;
+ }
+- xs->xso.real_dev = slave->dev;
+
+- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
++ xs->xso.real_dev = real_dev;
++ err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
+ if (!err) {
+ ipsec->xs = xs;
+ INIT_LIST_HEAD(&ipsec->list);
+- spin_lock_bh(&bond->ipsec_lock);
++ mutex_lock(&bond->ipsec_lock);
+ list_add(&ipsec->list, &bond->ipsec_list);
+- spin_unlock_bh(&bond->ipsec_lock);
++ mutex_unlock(&bond->ipsec_lock);
+ } else {
+ kfree(ipsec);
+ }
+- rcu_read_unlock();
++out:
++ netdev_put(real_dev, &tracker);
+ return err;
+ }
+
+ static void bond_ipsec_add_sa_all(struct bonding *bond)
+ {
+ struct net_device *bond_dev = bond->dev;
++ struct net_device *real_dev;
+ struct bond_ipsec *ipsec;
+ struct slave *slave;
+
+- rcu_read_lock();
+- slave = rcu_dereference(bond->curr_active_slave);
+- if (!slave)
+- goto out;
++ slave = rtnl_dereference(bond->curr_active_slave);
++ real_dev = slave ? slave->dev : NULL;
++ if (!real_dev)
++ return;
+
+- if (!slave->dev->xfrmdev_ops ||
+- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+- netif_is_bond_master(slave->dev)) {
+- spin_lock_bh(&bond->ipsec_lock);
++ mutex_lock(&bond->ipsec_lock);
++ if (!real_dev->xfrmdev_ops ||
++ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
++ netif_is_bond_master(real_dev)) {
+ if (!list_empty(&bond->ipsec_list))
+- slave_warn(bond_dev, slave->dev,
++ slave_warn(bond_dev, real_dev,
+ "%s: no slave xdo_dev_state_add\n",
+ __func__);
+- spin_unlock_bh(&bond->ipsec_lock);
+ goto out;
+ }
+
+- spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+- ipsec->xs->xso.real_dev = slave->dev;
+- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
+- slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
++ /* If new state is added before ipsec_lock acquired */
++ if (ipsec->xs->xso.real_dev == real_dev)
++ continue;
++
++ ipsec->xs->xso.real_dev = real_dev;
++ if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
++ slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
+ ipsec->xs->xso.real_dev = NULL;
+ }
+ }
+- spin_unlock_bh(&bond->ipsec_lock);
+ out:
+- rcu_read_unlock();
++ mutex_unlock(&bond->ipsec_lock);
+ }
+
+ /**
+@@ -515,6 +523,8 @@ static void bond_ipsec_add_sa_all(struct bonding *bond)
+ static void bond_ipsec_del_sa(struct xfrm_state *xs)
+ {
+ struct net_device *bond_dev = xs->xso.dev;
++ struct net_device *real_dev;
++ netdevice_tracker tracker;
+ struct bond_ipsec *ipsec;
+ struct bonding *bond;
+ struct slave *slave;
+@@ -525,6 +535,9 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
+ rcu_read_lock();
+ bond = netdev_priv(bond_dev);
+ slave = rcu_dereference(bond->curr_active_slave);
++ real_dev = slave ? slave->dev : NULL;
++ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
++ rcu_read_unlock();
+
+ if (!slave)
+ goto out;
+@@ -532,18 +545,19 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
+ if (!xs->xso.real_dev)
+ goto out;
+
+- WARN_ON(xs->xso.real_dev != slave->dev);
++ WARN_ON(xs->xso.real_dev != real_dev);
+
+- if (!slave->dev->xfrmdev_ops ||
+- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+- netif_is_bond_master(slave->dev)) {
+- slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
++ if (!real_dev->xfrmdev_ops ||
++ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
++ netif_is_bond_master(real_dev)) {
++ slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
+ goto out;
+ }
+
+- slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
++ real_dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+ out:
+- spin_lock_bh(&bond->ipsec_lock);
++ netdev_put(real_dev, &tracker);
++ mutex_lock(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (ipsec->xs == xs) {
+ list_del(&ipsec->list);
+@@ -551,41 +565,72 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
+ break;
+ }
+ }
+- spin_unlock_bh(&bond->ipsec_lock);
+- rcu_read_unlock();
++ mutex_unlock(&bond->ipsec_lock);
+ }
+
+ static void bond_ipsec_del_sa_all(struct bonding *bond)
+ {
+ struct net_device *bond_dev = bond->dev;
++ struct net_device *real_dev;
+ struct bond_ipsec *ipsec;
+ struct slave *slave;
+
+- rcu_read_lock();
+- slave = rcu_dereference(bond->curr_active_slave);
+- if (!slave) {
+- rcu_read_unlock();
++ slave = rtnl_dereference(bond->curr_active_slave);
++ real_dev = slave ? slave->dev : NULL;
++ if (!real_dev)
+ return;
+- }
+
+- spin_lock_bh(&bond->ipsec_lock);
++ mutex_lock(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (!ipsec->xs->xso.real_dev)
+ continue;
+
+- if (!slave->dev->xfrmdev_ops ||
+- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+- netif_is_bond_master(slave->dev)) {
+- slave_warn(bond_dev, slave->dev,
++ if (!real_dev->xfrmdev_ops ||
++ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
++ netif_is_bond_master(real_dev)) {
++ slave_warn(bond_dev, real_dev,
+ "%s: no slave xdo_dev_state_delete\n",
+ __func__);
+ } else {
+- slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
++ real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
++ if (real_dev->xfrmdev_ops->xdo_dev_state_free)
++ real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
+ }
+- ipsec->xs->xso.real_dev = NULL;
+ }
+- spin_unlock_bh(&bond->ipsec_lock);
++ mutex_unlock(&bond->ipsec_lock);
++}
++
++static void bond_ipsec_free_sa(struct xfrm_state *xs)
++{
++ struct net_device *bond_dev = xs->xso.dev;
++ struct net_device *real_dev;
++ netdevice_tracker tracker;
++ struct bonding *bond;
++ struct slave *slave;
++
++ if (!bond_dev)
++ return;
++
++ rcu_read_lock();
++ bond = netdev_priv(bond_dev);
++ slave = rcu_dereference(bond->curr_active_slave);
++ real_dev = slave ? slave->dev : NULL;
++ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
+ rcu_read_unlock();
++
++ if (!slave)
++ goto out;
++
++ if (!xs->xso.real_dev)
++ goto out;
++
++ WARN_ON(xs->xso.real_dev != real_dev);
++
++ if (real_dev && real_dev->xfrmdev_ops &&
++ real_dev->xfrmdev_ops->xdo_dev_state_free)
++ real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
++out:
++ netdev_put(real_dev, &tracker);
+ }
+
+ /**
+@@ -599,39 +644,36 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+ struct net_device *real_dev;
+ struct slave *curr_active;
+ struct bonding *bond;
+- int err;
++ bool ok = false;
+
+ bond = netdev_priv(bond_dev);
+ rcu_read_lock();
+ curr_active = rcu_dereference(bond->curr_active_slave);
++ if (!curr_active)
++ goto out;
+ real_dev = curr_active->dev;
+
+- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+- err = false;
++ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+ goto out;
+- }
+
+- if (!xs->xso.real_dev) {
+- err = false;
++ if (!xs->xso.real_dev)
+ goto out;
+- }
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
+- netif_is_bond_master(real_dev)) {
+- err = false;
++ netif_is_bond_master(real_dev))
+ goto out;
+- }
+
+- err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
++ ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ out:
+ rcu_read_unlock();
+- return err;
++ return ok;
+ }
+
+ static const struct xfrmdev_ops bond_xfrmdev_ops = {
+ .xdo_dev_state_add = bond_ipsec_add_sa,
+ .xdo_dev_state_delete = bond_ipsec_del_sa,
++ .xdo_dev_state_free = bond_ipsec_free_sa,
+ .xdo_dev_offload_ok = bond_ipsec_offload_ok,
+ };
+ #endif /* CONFIG_XFRM_OFFLOAD */
+@@ -1121,13 +1163,10 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
+ return bestslave;
+ }
+
++/* must be called in RCU critical section or with RTNL held */
+ static bool bond_should_notify_peers(struct bonding *bond)
+ {
+- struct slave *slave;
+-
+- rcu_read_lock();
+- slave = rcu_dereference(bond->curr_active_slave);
+- rcu_read_unlock();
++ struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
+
+ if (!slave || !bond->send_peer_notif ||
+ bond->send_peer_notif %
+@@ -1500,6 +1539,10 @@ static void bond_compute_features(struct bonding *bond)
+ static void bond_setup_by_slave(struct net_device *bond_dev,
+ struct net_device *slave_dev)
+ {
++ bool was_up = !!(bond_dev->flags & IFF_UP);
++
++ dev_close(bond_dev);
++
+ bond_dev->header_ops = slave_dev->header_ops;
+
+ bond_dev->type = slave_dev->type;
+@@ -1514,6 +1557,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
+ bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ }
++ if (was_up)
++ dev_open(bond_dev, NULL);
+ }
+
+ /* On bonding slaves other than the currently active slave, suppress
+@@ -1805,7 +1850,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
+
+ ASSERT_RTNL();
+
+- if (!bond_xdp_check(bond)) {
++ if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
+ xdp_clear_features_flag(bond_dev);
+ return;
+ }
+@@ -1813,6 +1858,8 @@ void bond_xdp_set_features(struct net_device *bond_dev)
+ bond_for_each_slave(bond, slave, iter)
+ val &= slave->dev->xdp_features;
+
++ val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY;
++
+ xdp_set_features_flag(bond_dev, val);
+ }
+
+@@ -5487,9 +5534,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
+ break;
+
+ default:
+- /* Should never happen. Mode guarded by bond_xdp_check() */
+- netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
+- WARN_ON_ONCE(1);
++ if (net_ratelimit())
++ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n",
++ BOND_MODE(bond));
+ return NULL;
+ }
+
+@@ -5897,7 +5944,7 @@ void bond_setup(struct net_device *bond_dev)
+ /* set up xfrm device ops (only supported in active-backup right now) */
+ bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
+ INIT_LIST_HEAD(&bond->ipsec_list);
+- spin_lock_init(&bond->ipsec_lock);
++ mutex_init(&bond->ipsec_lock);
+ #endif /* CONFIG_XFRM_OFFLOAD */
+
+ /* don't acquire bond device's netif_tx_lock when transmitting */
+@@ -5928,9 +5975,6 @@ void bond_setup(struct net_device *bond_dev)
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
+ bond_dev->features |= BOND_XFRM_FEATURES;
+ #endif /* CONFIG_XFRM_OFFLOAD */
+-
+- if (bond_xdp_check(bond))
+- bond_dev->xdp_features = NETDEV_XDP_ACT_MASK;
+ }
+
+ /* Destroy a bonding device.
+@@ -5949,6 +5993,10 @@ static void bond_uninit(struct net_device *bond_dev)
+ __bond_release_one(bond_dev, slave->dev, true, true);
+ netdev_info(bond_dev, "Released all slaves\n");
+
++#ifdef CONFIG_XFRM_OFFLOAD
++ mutex_destroy(&bond->ipsec_lock);
++#endif /* CONFIG_XFRM_OFFLOAD */
++
+ bond_set_slave_arr(bond, NULL, NULL);
+
+ list_del(&bond->bond_list);
+@@ -6479,16 +6527,16 @@ static int __init bonding_init(void)
+ if (res)
+ goto out;
+
++ bond_create_debugfs();
++
+ res = register_pernet_subsys(&bond_net_ops);
+ if (res)
+- goto out;
++ goto err_net_ops;
+
+ res = bond_netlink_init();
+ if (res)
+ goto err_link;
+
+- bond_create_debugfs();
+-
+ for (i = 0; i < max_bonds; i++) {
+ res = bond_create(&init_net, NULL);
+ if (res)
+@@ -6503,10 +6551,11 @@ static int __init bonding_init(void)
+ out:
+ return res;
+ err:
+- bond_destroy_debugfs();
+ bond_netlink_fini();
+ err_link:
+ unregister_pernet_subsys(&bond_net_ops);
++err_net_ops:
++ bond_destroy_debugfs();
+ goto out;
+
+ }
+@@ -6515,11 +6564,11 @@ static void __exit bonding_exit(void)
+ {
+ unregister_netdevice_notifier(&bond_netdev_notifier);
+
+- bond_destroy_debugfs();
+-
+ bond_netlink_fini();
+ unregister_pernet_subsys(&bond_net_ops);
+
++ bond_destroy_debugfs();
++
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ /* Make sure we don't have an imbalance on our netpoll blocking */
+ WARN_ON(atomic_read(&netpoll_block_tx));
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index f3f27f0bd2a6cd..d1208d058eea18 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -920,7 +920,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
+ /* check to see if we are clearing active */
+ if (!slave_dev) {
+ netdev_dbg(bond->dev, "Clearing current active slave\n");
+- RCU_INIT_POINTER(bond->curr_active_slave, NULL);
++ bond_change_active_slave(bond, NULL);
+ bond_select_active_slave(bond);
+ } else {
+ struct slave *old_active = rtnl_dereference(bond->curr_active_slave);
+@@ -1198,9 +1198,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
+ __be32 target;
+
+ if (newval->string) {
+- if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
+- netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
+- &target);
++ if (strlen(newval->string) < 1 ||
++ !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
++ netdev_err(bond->dev, "invalid ARP target specified\n");
+ return ret;
+ }
+ if (newval->string[0] == '+')
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+index 7f9334a8af5000..735d5de3caa0e3 100644
+--- a/drivers/net/can/dev/dev.c
++++ b/drivers/net/can/dev/dev.c
+@@ -132,7 +132,8 @@ static void can_restart(struct net_device *dev)
+ struct can_frame *cf;
+ int err;
+
+- BUG_ON(netif_carrier_ok(dev));
++ if (netif_carrier_ok(dev))
++ netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
+
+ /* No synchronization needed because the device is bus-off and
+ * no messages can come in or go out.
+@@ -153,11 +154,12 @@ static void can_restart(struct net_device *dev)
+ priv->can_stats.restarts++;
+
+ /* Now restart the device */
+- err = priv->do_set_mode(dev, CAN_MODE_START);
+-
+ netif_carrier_on(dev);
+- if (err)
++ err = priv->do_set_mode(dev, CAN_MODE_START);
++ if (err) {
+ netdev_err(dev, "Error %d during restart", err);
++ netif_carrier_off(dev);
++ }
+ }
+
+ static void can_restart_work(struct work_struct *work)
+diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
+index 036d85ef07f5ba..01aacdcda26066 100644
+--- a/drivers/net/can/dev/netlink.c
++++ b/drivers/net/can/dev/netlink.c
+@@ -65,15 +65,6 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ if (!data)
+ return 0;
+
+- if (data[IFLA_CAN_BITTIMING]) {
+- struct can_bittiming bt;
+-
+- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+- err = can_validate_bittiming(&bt, extack);
+- if (err)
+- return err;
+- }
+-
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
+@@ -114,6 +105,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ }
+ }
+
++ if (data[IFLA_CAN_BITTIMING]) {
++ struct can_bittiming bt;
++
++ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
++ err = can_validate_bittiming(&bt, extack);
++ if (err)
++ return err;
++ }
++
+ if (is_can_fd) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+ return -EOPNOTSUPP;
+@@ -195,48 +195,6 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+- if (data[IFLA_CAN_BITTIMING]) {
+- struct can_bittiming bt;
+-
+- /* Do not allow changing bittiming while running */
+- if (dev->flags & IFF_UP)
+- return -EBUSY;
+-
+- /* Calculate bittiming parameters based on
+- * bittiming_const if set, otherwise pass bitrate
+- * directly via do_set_bitrate(). Bail out if neither
+- * is given.
+- */
+- if (!priv->bittiming_const && !priv->do_set_bittiming &&
+- !priv->bitrate_const)
+- return -EOPNOTSUPP;
+-
+- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+- err = can_get_bittiming(dev, &bt,
+- priv->bittiming_const,
+- priv->bitrate_const,
+- priv->bitrate_const_cnt,
+- extack);
+- if (err)
+- return err;
+-
+- if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+- NL_SET_ERR_MSG_FMT(extack,
+- "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+- bt.bitrate, priv->bitrate_max);
+- return -EINVAL;
+- }
+-
+- memcpy(&priv->bittiming, &bt, sizeof(bt));
+-
+- if (priv->do_set_bittiming) {
+- /* Finally, set the bit-timing registers */
+- err = priv->do_set_bittiming(dev);
+- if (err)
+- return err;
+- }
+- }
+-
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic;
+@@ -284,6 +242,48 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
+ }
+
++ if (data[IFLA_CAN_BITTIMING]) {
++ struct can_bittiming bt;
++
++ /* Do not allow changing bittiming while running */
++ if (dev->flags & IFF_UP)
++ return -EBUSY;
++
++ /* Calculate bittiming parameters based on
++ * bittiming_const if set, otherwise pass bitrate
++ * directly via do_set_bitrate(). Bail out if neither
++ * is given.
++ */
++ if (!priv->bittiming_const && !priv->do_set_bittiming &&
++ !priv->bitrate_const)
++ return -EOPNOTSUPP;
++
++ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
++ err = can_get_bittiming(dev, &bt,
++ priv->bittiming_const,
++ priv->bitrate_const,
++ priv->bitrate_const_cnt,
++ extack);
++ if (err)
++ return err;
++
++ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
++ NL_SET_ERR_MSG_FMT(extack,
++ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
++ bt.bitrate, priv->bitrate_max);
++ return -EINVAL;
++ }
++
++ memcpy(&priv->bittiming, &bt, sizeof(bt));
++
++ if (priv->do_set_bittiming) {
++ /* Finally, set the bit-timing registers */
++ err = priv->do_set_bittiming(dev);
++ if (err)
++ return err;
++ }
++ }
++
+ if (data[IFLA_CAN_RESTART_MS]) {
+ /* Do not allow changing restart delay while running */
+ if (dev->flags & IFF_UP)
+@@ -346,7 +346,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ /* Neither of TDC parameters nor TDC flags are
+ * provided: do calculation
+ */
+- can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
++ can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
+ &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
+index f6d05b3ef59abf..3ebd4f779b9bdf 100644
+--- a/drivers/net/can/dev/skb.c
++++ b/drivers/net/can/dev/skb.c
+@@ -49,7 +49,11 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ {
+ struct can_priv *priv = netdev_priv(dev);
+
+- BUG_ON(idx >= priv->echo_skb_max);
++ if (idx >= priv->echo_skb_max) {
++ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++ __func__, idx, priv->echo_skb_max);
++ return -EINVAL;
++ }
+
+ /* check flag whether this packet has to be looped back */
+ if (!(dev->flags & IFF_ECHO) ||
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index a57005faa04f51..c490b4ba065ba4 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -1580,23 +1580,15 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
+ return res;
+ }
+
+-static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
++static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+ {
+ u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+
+- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
++ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+ kvaser_pciefd_read_buffer(pcie, 0);
+- /* Reset DMA buffer 0 */
+- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+- }
+
+- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
++ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+ kvaser_pciefd_read_buffer(pcie, 1);
+- /* Reset DMA buffer 1 */
+- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+- }
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+@@ -1605,6 +1597,7 @@ static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+ dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
+
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
++ return irq;
+ }
+
+ static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+@@ -1631,27 +1624,31 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+ {
+ struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
+ const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
+- u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
++ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
++ u32 srb_irq = 0;
++ u32 srb_release = 0;
+ int i;
+
+- if (!(board_irq & irq_mask->all))
++ if (!(pci_irq & irq_mask->all))
+ return IRQ_NONE;
+
+- if (board_irq & irq_mask->kcan_rx0)
+- kvaser_pciefd_receive_irq(pcie);
++ if (pci_irq & irq_mask->kcan_rx0)
++ srb_irq = kvaser_pciefd_receive_irq(pcie);
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+- if (!pcie->can[i]) {
+- dev_err(&pcie->pci->dev,
+- "IRQ mask points to unallocated controller\n");
+- break;
+- }
+-
+- /* Check that mask matches channel (i) IRQ mask */
+- if (board_irq & irq_mask->kcan_tx[i])
++ if (pci_irq & irq_mask->kcan_tx[i])
+ kvaser_pciefd_transmit_irq(pcie->can[i]);
+ }
+
++ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
++ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
++
++ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
++ srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
++
++ if (srb_release)
++ iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 16ecc11c7f62af..97666a7595959d 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -418,6 +418,13 @@ static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
+
+ static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
+ {
++ if (!cdev->net->irq) {
++ dev_dbg(cdev->dev, "Start hrtimer\n");
++ hrtimer_start(&cdev->hrtimer,
++ ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
++ HRTIMER_MODE_REL_PINNED);
++ }
++
+ /* Only interrupt line 0 is used in this driver */
+ m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
+ }
+@@ -425,6 +432,11 @@ static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
+ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
+ {
+ m_can_write(cdev, M_CAN_ILE, 0x0);
++
++ if (!cdev->net->irq) {
++ dev_dbg(cdev->dev, "Stop hrtimer\n");
++ hrtimer_cancel(&cdev->hrtimer);
++ }
+ }
+
+ /* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
+@@ -1417,12 +1429,6 @@ static int m_can_start(struct net_device *dev)
+
+ m_can_enable_all_interrupts(cdev);
+
+- if (!dev->irq) {
+- dev_dbg(cdev->dev, "Start hrtimer\n");
+- hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+- HRTIMER_MODE_REL_PINNED);
+- }
+-
+ return 0;
+ }
+
+@@ -1577,11 +1583,6 @@ static void m_can_stop(struct net_device *dev)
+ {
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+- if (!dev->irq) {
+- dev_dbg(cdev->dev, "Stop hrtimer\n");
+- hrtimer_cancel(&cdev->hrtimer);
+- }
+-
+ /* disable all interrupts */
+ m_can_disable_all_interrupts(cdev);
+
+@@ -1598,11 +1599,7 @@ static int m_can_close(struct net_device *dev)
+
+ netif_stop_queue(dev);
+
+- if (!cdev->is_peripheral)
+- napi_disable(&cdev->napi);
+-
+ m_can_stop(dev);
+- m_can_clk_stop(cdev);
+ free_irq(dev->irq, dev);
+
+ if (cdev->is_peripheral) {
+@@ -1610,10 +1607,13 @@ static int m_can_close(struct net_device *dev)
+ destroy_workqueue(cdev->tx_wq);
+ cdev->tx_wq = NULL;
+ can_rx_offload_disable(&cdev->offload);
++ } else {
++ napi_disable(&cdev->napi);
+ }
+
+ close_candev(dev);
+
++ m_can_clk_stop(cdev);
+ phy_power_off(cdev->transceiver);
+
+ return 0;
+@@ -1841,6 +1841,8 @@ static int m_can_open(struct net_device *dev)
+
+ if (cdev->is_peripheral)
+ can_rx_offload_enable(&cdev->offload);
++ else
++ napi_enable(&cdev->napi);
+
+ /* register interrupt handler */
+ if (cdev->is_peripheral) {
+@@ -1870,21 +1872,23 @@ static int m_can_open(struct net_device *dev)
+ /* start the m_can controller */
+ err = m_can_start(dev);
+ if (err)
+- goto exit_irq_fail;
+-
+- if (!cdev->is_peripheral)
+- napi_enable(&cdev->napi);
++ goto exit_start_fail;
+
+ netif_start_queue(dev);
+
+ return 0;
+
++exit_start_fail:
++ if (cdev->is_peripheral || dev->irq)
++ free_irq(dev->irq, dev);
+ exit_irq_fail:
+ if (cdev->is_peripheral)
+ destroy_workqueue(cdev->tx_wq);
+ out_wq_fail:
+ if (cdev->is_peripheral)
+ can_rx_offload_disable(&cdev->offload);
++ else
++ napi_disable(&cdev->napi);
+ close_candev(dev);
+ exit_disable_clks:
+ m_can_clk_stop(cdev);
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 79c4bab5f7246e..8c56f85e87c1af 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -753,7 +753,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
+ int ret;
+
+ /* Force wakeup interrupt to wake device, but don't execute IST */
+- disable_irq(spi->irq);
++ disable_irq_nosync(spi->irq);
+ mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
+
+ /* Wait for oscillator startup timer after wake up */
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index eebf967f4711a8..6fecfe4cd08041 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -2,7 +2,7 @@
+ //
+ // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ //
+-// Copyright (c) 2019, 2020, 2021 Pengutronix,
++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+ // Marc Kleine-Budde <kernel@pengutronix.de>
+ //
+ // Based on:
+@@ -744,6 +744,7 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+
+ mcp251xfd_chip_interrupts_disable(priv);
+ mcp251xfd_chip_rx_int_disable(priv);
++ mcp251xfd_timestamp_stop(priv);
+ mcp251xfd_chip_sleep(priv);
+ }
+
+@@ -763,6 +764,8 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
+ if (err)
+ goto out_chip_stop;
+
++ mcp251xfd_timestamp_start(priv);
++
+ err = mcp251xfd_set_bittiming(priv);
+ if (err)
+ goto out_chip_stop;
+@@ -791,7 +794,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
+
+ return 0;
+
+- out_chip_stop:
++out_chip_stop:
+ mcp251xfd_dump(priv);
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+
+@@ -867,18 +870,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
+
+ static struct sk_buff *
+ mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
+- struct can_frame **cf, u32 *timestamp)
++ struct can_frame **cf, u32 *ts_raw)
+ {
+ struct sk_buff *skb;
+ int err;
+
+- err = mcp251xfd_get_timestamp(priv, timestamp);
++ err = mcp251xfd_get_timestamp_raw(priv, ts_raw);
+ if (err)
+ return NULL;
+
+ skb = alloc_can_err_skb(priv->ndev, cf);
+ if (skb)
+- mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
++ mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw);
+
+ return skb;
+ }
+@@ -889,7 +892,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
+ struct mcp251xfd_rx_ring *ring;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+- u32 timestamp, rxovif;
++ u32 ts_raw, rxovif;
+ int err, i;
+
+ stats->rx_over_errors++;
+@@ -924,14 +927,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
+ return err;
+ }
+
+- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
++ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
+ if (!skb)
+ return 0;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
++ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
+ if (err)
+ stats->rx_fifo_errors++;
+
+@@ -948,12 +951,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
+ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
+ {
+ struct net_device_stats *stats = &priv->ndev->stats;
+- u32 bdiag1, timestamp;
++ u32 bdiag1, ts_raw;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ int err;
+
+- err = mcp251xfd_get_timestamp(priv, &timestamp);
++ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
+ if (err)
+ return err;
+
+@@ -1035,8 +1038,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
+ if (!cf)
+ return 0;
+
+- mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
+- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
++ mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw);
++ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
+ if (err)
+ stats->rx_fifo_errors++;
+
+@@ -1049,7 +1052,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ enum can_state new_state, rx_state, tx_state;
+- u32 trec, timestamp;
++ u32 trec, ts_raw;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
+@@ -1079,7 +1082,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
++ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
+ can_change_state(priv->ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+@@ -1110,7 +1113,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
+ cf->data[7] = bec.rxerr;
+ }
+
+- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
++ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
+ if (err)
+ stats->rx_fifo_errors++;
+
+@@ -1576,7 +1579,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
+ handled = IRQ_HANDLED;
+ } while (1);
+
+- out_fail:
++out_fail:
+ can_rx_offload_threaded_irq_finish(&priv->offload);
+
+ netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
+@@ -1610,19 +1613,29 @@ static int mcp251xfd_open(struct net_device *ndev)
+ if (err)
+ goto out_mcp251xfd_ring_free;
+
++ mcp251xfd_timestamp_init(priv);
++
+ err = mcp251xfd_chip_start(priv);
+ if (err)
+ goto out_transceiver_disable;
+
+- mcp251xfd_timestamp_init(priv);
+ clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ can_rx_offload_enable(&priv->offload);
+
++ priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
++ WQ_FREEZABLE | WQ_MEM_RECLAIM,
++ dev_name(&spi->dev));
++ if (!priv->wq) {
++ err = -ENOMEM;
++ goto out_can_rx_offload_disable;
++ }
++ INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
++
+ err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
+ IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&spi->dev), priv);
+ if (err)
+- goto out_can_rx_offload_disable;
++ goto out_destroy_workqueue;
+
+ err = mcp251xfd_chip_interrupts_enable(priv);
+ if (err)
+@@ -1632,20 +1645,21 @@ static int mcp251xfd_open(struct net_device *ndev)
+
+ return 0;
+
+- out_free_irq:
++out_free_irq:
+ free_irq(spi->irq, priv);
+- out_can_rx_offload_disable:
++out_destroy_workqueue:
++ destroy_workqueue(priv->wq);
++out_can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+- mcp251xfd_timestamp_stop(priv);
+- out_transceiver_disable:
++out_transceiver_disable:
+ mcp251xfd_transceiver_disable(priv);
+- out_mcp251xfd_ring_free:
++out_mcp251xfd_ring_free:
+ mcp251xfd_ring_free(priv);
+- out_pm_runtime_put:
++out_pm_runtime_put:
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ pm_runtime_put(ndev->dev.parent);
+- out_close_candev:
++out_close_candev:
+ close_candev(ndev);
+
+ return err;
+@@ -1661,8 +1675,8 @@ static int mcp251xfd_stop(struct net_device *ndev)
+ hrtimer_cancel(&priv->tx_irq_timer);
+ mcp251xfd_chip_interrupts_disable(priv);
+ free_irq(ndev->irq, priv);
++ destroy_workqueue(priv->wq);
+ can_rx_offload_disable(&priv->offload);
+- mcp251xfd_timestamp_stop(priv);
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ mcp251xfd_transceiver_disable(priv);
+ mcp251xfd_ring_free(priv);
+@@ -1808,9 +1822,9 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
+ *effective_speed_hz_slow = xfer[0].effective_speed_hz;
+ *effective_speed_hz_fast = xfer[1].effective_speed_hz;
+
+- out_kfree_buf_tx:
++out_kfree_buf_tx:
+ kfree(buf_tx);
+- out_kfree_buf_rx:
++out_kfree_buf_rx:
+ kfree(buf_rx);
+
+ return err;
+@@ -1924,13 +1938,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
+
+ return 0;
+
+- out_unregister_candev:
++out_unregister_candev:
+ unregister_candev(ndev);
+- out_chip_sleep:
++out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
+- out_runtime_disable:
++out_runtime_disable:
+ pm_runtime_disable(ndev->dev.parent);
+- out_runtime_put_noidle:
++out_runtime_put_noidle:
+ pm_runtime_put_noidle(ndev->dev.parent);
+ mcp251xfd_clks_and_vdd_disable(priv);
+
+@@ -2150,9 +2164,9 @@ static int mcp251xfd_probe(struct spi_device *spi)
+
+ return 0;
+
+- out_can_rx_offload_del:
++out_can_rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+- out_free_candev:
++out_free_candev:
+ spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+
+ free_candev(ndev);
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+index 004eaf96262bfd..050321345304be 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+@@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv,
+ kfree(buf);
+ }
+
+- out:
++out:
+ mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
+ }
+
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+index 9e8e82cdba4612..61b0d6fa52dd80 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
+@@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout,
+ if (ring) {
+ u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+
+- num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
++ /* If the ring parameters have been configured in
++ * CAN-CC mode, but and we are in CAN-FD mode now,
++ * they might be to big. Use the default CAN-FD values
++ * in this case.
++ */
++ num_rx = ring->rx_pending;
++ if (num_rx > layout->max_rx)
++ num_rx = layout->default_rx;
++
++ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+index 92b7bc7f14b9eb..65150e76200720 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+@@ -397,7 +397,7 @@ mcp251xfd_regmap_crc_read(void *context,
+
+ return err;
+ }
+- out:
++out:
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+index bfe4caa0c99d45..83c18035b2a24d 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+@@ -206,6 +206,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+ int i, j;
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
++ rx_ring->last_valid = timecounter_read(&priv->tc);
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+ rx_ring->base = *base;
+@@ -289,7 +290,7 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u16 base = 0, ram_used;
+ u8 fifo_nr = 1;
+- int i;
++ int err = 0, i;
+
+ netdev_reset_queue(priv->ndev);
+
+@@ -385,10 +386,18 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+ netdev_err(priv->ndev,
+ "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
+ ram_used, MCP251XFD_RAM_SIZE);
+- return -ENOMEM;
++ err = -ENOMEM;
+ }
+
+- return 0;
++ if (priv->tx_obj_num_coalesce_irq &&
++ priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
++ netdev_err(priv->ndev,
++ "Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
++ priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
++ err = -EINVAL;
++ }
++
++ return err;
+ }
+
+ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
+@@ -468,11 +477,25 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+
+ /* switching from CAN-2.0 to CAN-FD mode or vice versa */
+ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
++ const struct ethtool_ringparam ring = {
++ .rx_pending = priv->rx_obj_num,
++ .tx_pending = priv->tx->obj_num,
++ };
++ const struct ethtool_coalesce ec = {
++ .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
++ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
++ .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
++ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
++ };
+ struct can_ram_layout layout;
+
+- can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+- priv->rx_obj_num = layout.default_rx;
+- tx_ring->obj_num = layout.default_tx;
++ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
++
++ priv->rx_obj_num = layout.cur_rx;
++ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
++
++ tx_ring->obj_num = layout.cur_tx;
++ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ }
+
+ if (fd_mode) {
+@@ -485,6 +508,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+ clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
+ }
+
++ tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
++ ilog2(tx_ring->obj_num);
+ tx_ring->obj_size = tx_obj_size;
+
+ rem = priv->rx_obj_num;
+@@ -507,6 +532,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+ }
+
+ rx_ring->obj_num = rx_obj_num;
++ rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
++ ilog2(rx_obj_num);
+ rx_ring->obj_size = rx_obj_size;
+ priv->rx[i] = rx_ring;
+ }
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+index ced8d9c81f8c6b..fe897f3e4c12a2 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+@@ -2,7 +2,7 @@
+ //
+ // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ //
+-// Copyright (c) 2019, 2020, 2021 Pengutronix,
++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+ // Marc Kleine-Budde <kernel@pengutronix.de>
+ //
+ // Based on:
+@@ -16,23 +16,14 @@
+
+ #include "mcp251xfd.h"
+
+-static inline int
+-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
+- const struct mcp251xfd_rx_ring *ring,
+- u8 *rx_head, bool *fifo_empty)
++static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta)
+ {
+- u32 fifo_sta;
+- int err;
+-
+- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+- &fifo_sta);
+- if (err)
+- return err;
+-
+- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+- *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
++ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
++}
+
+- return 0;
++static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta)
++{
++ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
+ }
+
+ static inline int
+@@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
+ }
+
+ static int
+-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
+- struct mcp251xfd_rx_ring *ring)
++mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv,
++ const struct mcp251xfd_rx_ring *ring,
++ u8 *len_p)
+ {
+- u32 new_head;
+- u8 chip_rx_head;
+- bool fifo_empty;
++ const u8 shift = ring->obj_num_shift_to_u8;
++ u8 chip_head, tail, len;
++ u32 fifo_sta;
+ int err;
+
+- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
+- &fifo_empty);
+- if (err || fifo_empty)
++ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
++ &fifo_sta);
++ if (err)
++ return err;
++
++ if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) {
++ *len_p = 0;
++ return 0;
++ }
++
++ if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) {
++ *len_p = ring->obj_num;
++ return 0;
++ }
++
++ chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
++
++ err = mcp251xfd_check_rx_tail(priv, ring);
++ if (err)
+ return err;
++ tail = mcp251xfd_get_rx_tail(ring);
+
+- /* chip_rx_head, is the next RX-Object filled by the HW.
+- * The new RX head must be >= the old head.
++ /* First shift to full u8. The subtraction works on signed
++ * values, that keeps the difference steady around the u8
++ * overflow. The right shift acts on len, which is an u8.
+ */
+- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
+- if (new_head <= ring->head)
+- new_head += ring->obj_num;
++ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head));
++ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail));
++ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len));
+
+- ring->head = new_head;
++ len = (chip_head << shift) - (tail << shift);
++ *len_p = len >> shift;
+
+- return mcp251xfd_check_rx_tail(priv, ring);
++ return 0;
+ }
+
+ static void
+@@ -148,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
+
+ if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+-
+- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
+ }
+
+ static int
+@@ -160,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct canfd_frame *cfd;
++ u64 timestamp;
+ int err;
+
++ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
++ * bits of a FIFOSTA register, here the RX FIFO head index
++ * might be corrupted and we might process past the RX FIFO's
++ * head into old CAN frames.
++ *
++ * Compare the timestamp of currently processed CAN frame with
++ * last valid frame received. Abort with -EBADMSG if an old
++ * CAN frame is detected.
++ */
++ timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts);
++ if (timestamp <= ring->last_valid) {
++ stats->rx_fifo_errors++;
++
++ return -EBADMSG;
++ }
++ ring->last_valid = timestamp;
++
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ else
+@@ -172,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ return 0;
+ }
+
++ mcp251xfd_skb_set_timestamp(skb, timestamp);
+ mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
+ if (err)
+@@ -197,52 +225,81 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
+ return err;
+ }
+
++static int
++mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv,
++ struct mcp251xfd_rx_ring *ring,
++ u8 len)
++{
++ int offset;
++ int err;
++
++ if (!len)
++ return 0;
++
++ ring->head += len;
++
++ /* Increment the RX FIFO tail pointer 'len' times in a
++ * single SPI message.
++ *
++ * Note:
++ * Calculate offset, so that the SPI transfer ends on
++ * the last message of the uinc_xfer array, which has
++ * "cs_change == 0", to properly deactivate the chip
++ * select.
++ */
++ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
++ err = spi_sync_transfer(priv->spi,
++ ring->uinc_xfer + offset, len);
++ if (err)
++ return err;
++
++ ring->tail += len;
++
++ return 0;
++}
++
+ static int
+ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+ {
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
+- u8 rx_tail, len;
++ u8 rx_tail, len, l;
+ int err, i;
+
+- err = mcp251xfd_rx_ring_update(priv, ring);
++ err = mcp251xfd_get_rx_len(priv, ring, &len);
+ if (err)
+ return err;
+
+- while ((len = mcp251xfd_get_rx_linear_len(ring))) {
+- int offset;
+-
++ while ((l = mcp251xfd_get_rx_linear_len(ring, len))) {
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+
+ err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
+- rx_tail, len);
++ rx_tail, l);
+ if (err)
+ return err;
+
+- for (i = 0; i < len; i++) {
++ for (i = 0; i < l; i++) {
+ err = mcp251xfd_handle_rxif_one(priv, ring,
+ (void *)hw_rx_obj +
+ i * ring->obj_size);
+- if (err)
++
++ /* -EBADMSG means we're affected by mcp2518fd
++ * erratum DS80000789E 6., i.e. the timestamp
++ * in the RX object is older that the last
++ * valid received CAN frame. Don't process any
++ * further and mark processed frames as good.
++ */
++ if (err == -EBADMSG)
++ return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i);
++ else if (err)
+ return err;
+ }
+
+- /* Increment the RX FIFO tail pointer 'len' times in a
+- * single SPI message.
+- *
+- * Note:
+- * Calculate offset, so that the SPI transfer ends on
+- * the last message of the uinc_xfer array, which has
+- * "cs_change == 0", to properly deactivate the chip
+- * select.
+- */
+- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+- err = spi_sync_transfer(priv->spi,
+- ring->uinc_xfer + offset, len);
++ err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l);
+ if (err)
+ return err;
+
+- ring->tail += len;
++ len -= l;
+ }
+
+ return 0;
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+index e5bd57b65aafed..f732556d233a7b 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+@@ -2,7 +2,7 @@
+ //
+ // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ //
+-// Copyright (c) 2019, 2020, 2021 Pengutronix,
++// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+ // Marc Kleine-Budde <kernel@pengutronix.de>
+ //
+ // Based on:
+@@ -16,6 +16,11 @@
+
+ #include "mcp251xfd.h"
+
++static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta)
++{
++ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
++}
++
+ static inline int
+ mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tef_tail)
+@@ -55,61 +60,44 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
+ return 0;
+ }
+
+-static int
+-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
+-{
+- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+- u32 tef_sta;
+- int err;
+-
+- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
+- if (err)
+- return err;
+-
+- if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
+- netdev_err(priv->ndev,
+- "Transmit Event FIFO buffer overflow.\n");
+- return -ENOBUFS;
+- }
+-
+- netdev_info(priv->ndev,
+- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
+- tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
+- "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
+- "not empty" : "empty",
+- seq, priv->tef->tail, priv->tef->head, tx_ring->head);
+-
+- /* The Sequence Number in the TEF doesn't match our tef_tail. */
+- return -EAGAIN;
+-}
+-
+ static int
+ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ unsigned int *frame_len_ptr)
+ {
+ struct net_device_stats *stats = &priv->ndev->stats;
++ u32 seq, tef_tail_masked, tef_tail;
+ struct sk_buff *skb;
+- u32 seq, seq_masked, tef_tail_masked, tef_tail;
+
+- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
++ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
++ * compare 7 bits, this is enough to detect old TEF objects.
++ */
++ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK,
+ hw_tef_obj->flags);
+-
+- /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+- * compare 7 bits, this should be enough to detect
+- * net-yet-completed, i.e. old TEF objects.
+- */
+- seq_masked = seq &
+- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ tef_tail_masked = priv->tef->tail &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+- if (seq_masked != tef_tail_masked)
+- return mcp251xfd_handle_tefif_recover(priv, seq);
++
++ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
++ * bits of a FIFOSTA register, here the TX FIFO tail index
++ * might be corrupted and we might process past the TEF FIFO's
++ * head into old CAN frames.
++ *
++ * Compare the sequence number of the currently processed CAN
++ * frame with the expected sequence number. Abort with
++ * -EBADMSG if an old CAN frame is detected.
++ */
++ if (seq != tef_tail_masked) {
++ netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__,
++ seq, tef_tail_masked);
++ stats->tx_fifo_errors++;
++
++ return -EBADMSG;
++ }
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ skb = priv->can.echo_skb[tef_tail];
+ if (skb)
+- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
++ mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
+ tef_tail, hw_tef_obj->ts,
+@@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ return 0;
+ }
+
+-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
++static int
++mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
+ {
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+- unsigned int new_head;
+- u8 chip_tx_tail;
++ const u8 shift = tx_ring->obj_num_shift_to_u8;
++ u8 chip_tx_tail, tail, len;
++ u32 fifo_sta;
+ int err;
+
+- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
++ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
++ &fifo_sta);
+ if (err)
+ return err;
+
+- /* chip_tx_tail, is the next TX-Object send by the HW.
+- * The new TEF head must be >= the old head, ...
++ if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) {
++ *len_p = tx_ring->obj_num;
++ return 0;
++ }
++
++ chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
++
++ err = mcp251xfd_check_tef_tail(priv);
++ if (err)
++ return err;
++ tail = mcp251xfd_get_tef_tail(priv);
++
++ /* First shift to full u8. The subtraction works on signed
++ * values, that keeps the difference steady around the u8
++ * overflow. The right shift acts on len, which is an u8.
+ */
+- new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
+- if (new_head <= priv->tef->head)
+- new_head += tx_ring->obj_num;
++ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail));
++ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail));
++ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
+
+- /* ... but it cannot exceed the TX head. */
+- priv->tef->head = min(new_head, tx_ring->head);
++ len = (chip_tx_tail << shift) - (tail << shift);
++ *len_p = len >> shift;
+
+- return mcp251xfd_check_tef_tail(priv);
++ return 0;
+ }
+
+ static inline int
+@@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+ u8 tef_tail, len, l;
+ int err, i;
+
+- err = mcp251xfd_tef_ring_update(priv);
++ err = mcp251xfd_get_tef_len(priv, &len);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+- len = mcp251xfd_get_tef_len(priv);
+- l = mcp251xfd_get_tef_linear_len(priv);
++ l = mcp251xfd_get_tef_linear_len(priv, len);
+ err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
+ if (err)
+ return err;
+@@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+ unsigned int frame_len = 0;
+
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
+- /* -EAGAIN means the Sequence Number in the TEF
+- * doesn't match our tef_tail. This can happen if we
+- * read the TEF objects too early. Leave loop let the
+- * interrupt handler call us again.
++ /* -EBADMSG means we're affected by mcp2518fd erratum
++ * DS80000789E 6., i.e. the Sequence Number in the TEF
++ * doesn't match our tef_tail. Don't process any
++ * further and mark processed frames as good.
+ */
+- if (err == -EAGAIN)
++ if (err == -EBADMSG)
+ goto out_netif_wake_queue;
+ if (err)
+ return err;
+@@ -216,13 +219,15 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+ total_frame_len += frame_len;
+ }
+
+- out_netif_wake_queue:
++out_netif_wake_queue:
+ len = i; /* number of handled goods TEFs */
+ if (len) {
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int offset;
+
++ ring->head += len;
++
+ /* Increment the TEF FIFO tail pointer 'len' times in
+ * a single SPI message.
+ *
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+index 712e091869870c..202ca0d24d03b9 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+@@ -2,7 +2,7 @@
+ //
+ // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ //
+-// Copyright (c) 2021 Pengutronix,
++// Copyright (c) 2021, 2023 Pengutronix,
+ // Marc Kleine-Budde <kernel@pengutronix.de>
+ //
+
+@@ -11,20 +11,20 @@
+
+ #include "mcp251xfd.h"
+
+-static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
++static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc)
+ {
+ const struct mcp251xfd_priv *priv;
+- u32 timestamp = 0;
++ u32 ts_raw = 0;
+ int err;
+
+ priv = container_of(cc, struct mcp251xfd_priv, cc);
+- err = mcp251xfd_get_timestamp(priv, &timestamp);
++ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
+ if (err)
+ netdev_err(priv->ndev,
+ "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ err);
+
+- return timestamp;
++ return ts_raw;
+ }
+
+ static void mcp251xfd_timestamp_work(struct work_struct *work)
+@@ -39,28 +39,21 @@ static void mcp251xfd_timestamp_work(struct work_struct *work)
+ MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
+ }
+
+-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
+- struct sk_buff *skb, u32 timestamp)
+-{
+- struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+- u64 ns;
+-
+- ns = timecounter_cyc2time(&priv->tc, timestamp);
+- hwtstamps->hwtstamp = ns_to_ktime(ns);
+-}
+-
+ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
+ {
+ struct cyclecounter *cc = &priv->cc;
+
+- cc->read = mcp251xfd_timestamp_read;
++ cc->read = mcp251xfd_timestamp_raw_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+ cc->shift = 1;
+ cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
+
+- timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+-
+ INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work);
++}
++
++void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv)
++{
++ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+ schedule_delayed_work(&priv->timestamp,
+ MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
+ }
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+index 160528d3cc26b1..b1de8052a45ccb 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+@@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
+ tx_obj->xfer[0].len = len;
+ }
+
++static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
++ struct mcp251xfd_tx_ring *tx_ring,
++ int err)
++{
++ struct net_device *ndev = priv->ndev;
++ struct net_device_stats *stats = &ndev->stats;
++ unsigned int frame_len = 0;
++ u8 tx_head;
++
++ tx_ring->head--;
++ stats->tx_dropped++;
++ tx_head = mcp251xfd_get_tx_head(tx_ring);
++ can_free_echo_skb(ndev, tx_head, &frame_len);
++ netdev_completed_queue(ndev, 1, frame_len);
++ netif_wake_queue(ndev);
++
++ if (net_ratelimit())
++ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
++}
++
++void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
++{
++ struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
++ tx_work);
++ struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
++ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
++ int err;
++
++ err = spi_sync(priv->spi, &tx_obj->msg);
++ if (err)
++ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
++}
++
+ static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj)
+ {
+@@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
+ return false;
+ }
+
++static bool mcp251xfd_work_busy(struct work_struct *work)
++{
++ return work_busy(work);
++}
++
+ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+ {
+@@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+- if (mcp251xfd_tx_busy(priv, tx_ring))
++ if (mcp251xfd_tx_busy(priv, tx_ring) ||
++ mcp251xfd_work_busy(&priv->tx_work))
+ return NETDEV_TX_BUSY;
+
+ tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
+@@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ netdev_sent_queue(priv->ndev, frame_len);
+
+ err = mcp251xfd_tx_obj_write(priv, tx_obj);
+- if (err)
+- goto out_err;
+-
+- return NETDEV_TX_OK;
+-
+- out_err:
+- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
++ if (err == -EBUSY) {
++ netif_stop_queue(ndev);
++ priv->tx_work_obj = tx_obj;
++ queue_work(priv->wq, &priv->tx_work);
++ } else if (err) {
++ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
++ }
+
+ return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+index 24510b3b80203e..dcbbd2b2fae827 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+@@ -2,7 +2,7 @@
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+- * Copyright (c) 2019, 2020, 2021 Pengutronix,
++ * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+ */
+@@ -524,6 +524,7 @@ struct mcp251xfd_tef_ring {
+
+ /* u8 obj_num equals tx_ring->obj_num */
+ /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
++ /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */
+
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+@@ -542,6 +543,7 @@ struct mcp251xfd_tx_ring {
+ u8 nr;
+ u8 fifo_nr;
+ u8 obj_num;
++ u8 obj_num_shift_to_u8;
+ u8 obj_size;
+
+ struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
+@@ -552,10 +554,14 @@ struct mcp251xfd_rx_ring {
+ unsigned int head;
+ unsigned int tail;
+
++ /* timestamp of the last valid received CAN frame */
++ u64 last_valid;
++
+ u16 base;
+ u8 nr;
+ u8 fifo_nr;
+ u8 obj_num;
++ u8 obj_num_shift_to_u8;
+ u8 obj_size;
+
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+@@ -633,6 +639,10 @@ struct mcp251xfd_priv {
+ struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
+ struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+
++ struct workqueue_struct *wq;
++ struct work_struct tx_work;
++ struct mcp251xfd_tx_obj *tx_work_obj;
++
+ DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
+
+ u8 rx_ring_num;
+@@ -805,10 +815,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
+ return data;
+ }
+
+-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
+- u32 *timestamp)
++static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv,
++ u32 *ts_raw)
++{
++ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw);
++}
++
++static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns)
+ {
+- return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
++ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
++
++ hwtstamps->hwtstamp = ns_to_ktime(ns);
++}
++
++static inline
++void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv,
++ struct sk_buff *skb, u32 ts_raw)
++{
++ u64 ns;
++
++ ns = timecounter_cyc2time(&priv->tc, ts_raw);
++ mcp251xfd_skb_set_timestamp(skb, ns);
+ }
+
+ static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
+@@ -857,17 +884,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
+ return priv->tef->tail & (priv->tx->obj_num - 1);
+ }
+
+-static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
+-{
+- return priv->tef->head - priv->tef->tail;
+-}
+-
+-static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
++static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len)
+ {
+- u8 len;
+-
+- len = mcp251xfd_get_tef_len(priv);
+-
+ return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
+ }
+
+@@ -910,18 +928,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
+ return ring->tail & (ring->obj_num - 1);
+ }
+
+-static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
+-{
+- return ring->head - ring->tail;
+-}
+-
+ static inline u8
+-mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
++mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len)
+ {
+- u8 len;
+-
+- len = mcp251xfd_get_rx_len(ring);
+-
+ return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
+ }
+
+@@ -947,11 +956,11 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
+ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
+ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
+ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
+-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
+- struct sk_buff *skb, u32 timestamp);
+ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
++void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv);
+ void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+
++void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
+ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev);
+
+diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
+index 41a0e4261d15e9..03ad10b01867d8 100644
+--- a/drivers/net/can/usb/esd_usb.c
++++ b/drivers/net/can/usb/esd_usb.c
+@@ -3,7 +3,7 @@
+ * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro
+ *
+ * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
+- * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
++ * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
+ */
+
+ #include <linux/can.h>
+@@ -1116,9 +1116,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ flags |= ESD_USB_3_BAUDRATE_FLAG_LOM;
+
+- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+- flags |= ESD_USB_3_BAUDRATE_FLAG_TRS;
+-
+ baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1));
+ baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1));
+ baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1)
+@@ -1219,7 +1216,6 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
+ switch (le16_to_cpu(dev->udev->descriptor.idProduct)) {
+ case ESD_USB_CANUSB3_PRODUCT_ID:
+ priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
+- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
+ priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const;
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
+index 0c7f7505632cd7..5e3a72b7c46919 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
+@@ -2230,6 +2230,7 @@ static int es58x_probe(struct usb_interface *intf,
+
+ for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
+ int ret = es58x_init_netdev(es58x_dev, ch_idx);
++
+ if (ret) {
+ es58x_free_netdevs(es58x_dev);
+ return ret;
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
+index c1ba1a4e8857b3..2e183bdeedd725 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
++++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
+@@ -378,13 +378,13 @@ struct es58x_sw_version {
+
+ /**
+ * struct es58x_hw_revision - Hardware revision number.
+- * @letter: Revision letter.
++ * @letter: Revision letter, an alphanumeric character.
+ * @major: Version major number, represented on three digits.
+ * @minor: Version minor number, represented on three digits.
+ *
+ * The hardware revision uses its own format: "axxx/xxx" where 'a' is
+- * a letter and 'x' a digit. It can be retrieved from the product
+- * information string.
++ * an alphanumeric character and 'x' a digit. It can be retrieved from
++ * the product information string.
+ */
+ struct es58x_hw_revision {
+ char letter;
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+index 9fba29e2f57c6c..635edeb8f68cdf 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+@@ -125,14 +125,28 @@ static int es58x_parse_hw_rev(struct es58x_device *es58x_dev,
+ * firmware version, the bootloader version and the hardware
+ * revision.
+ *
+- * If the function fails, simply emit a log message and continue
+- * because product information is not critical for the driver to
+- * operate.
++ * If the function fails, set the version or revision to an invalid
++ * value and emit an informal message. Continue probing because the
++ * product information is not critical for the driver to operate.
+ */
+ void es58x_parse_product_info(struct es58x_device *es58x_dev)
+ {
++ static const struct es58x_sw_version sw_version_not_set = {
++ .major = -1,
++ .minor = -1,
++ .revision = -1,
++ };
++ static const struct es58x_hw_revision hw_revision_not_set = {
++ .letter = '\0',
++ .major = -1,
++ .minor = -1,
++ };
+ char *prod_info;
+
++ es58x_dev->firmware_version = sw_version_not_set;
++ es58x_dev->bootloader_version = sw_version_not_set;
++ es58x_dev->hardware_revision = hw_revision_not_set;
++
+ prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX);
+ if (!prod_info) {
+ dev_warn(es58x_dev->dev,
+@@ -150,29 +164,36 @@ void es58x_parse_product_info(struct es58x_device *es58x_dev)
+ }
+
+ /**
+- * es58x_sw_version_is_set() - Check if the version is a valid number.
++ * es58x_sw_version_is_valid() - Check if the version is a valid number.
+ * @sw_ver: Version number of either the firmware or the bootloader.
+ *
+- * If &es58x_sw_version.major, &es58x_sw_version.minor and
+- * &es58x_sw_version.revision are all zero, the product string could
+- * not be parsed and the version number is invalid.
++ * If any of the software version sub-numbers do not fit on two
++ * digits, the version is invalid, most probably because the product
++ * string could not be parsed.
++ *
++ * Return: @true if the software version is valid, @false otherwise.
+ */
+-static inline bool es58x_sw_version_is_set(struct es58x_sw_version *sw_ver)
++static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver)
+ {
+- return sw_ver->major || sw_ver->minor || sw_ver->revision;
++ return sw_ver->major < 100 && sw_ver->minor < 100 &&
++ sw_ver->revision < 100;
+ }
+
+ /**
+- * es58x_hw_revision_is_set() - Check if the revision is a valid number.
++ * es58x_hw_revision_is_valid() - Check if the revision is a valid number.
+ * @hw_rev: Revision number of the hardware.
+ *
+- * If &es58x_hw_revision.letter is the null character, the product
+- * string could not be parsed and the hardware revision number is
+- * invalid.
++ * If &es58x_hw_revision.letter is not a alphanumeric character or if
++ * any of the hardware revision sub-numbers do not fit on three
++ * digits, the revision is invalid, most probably because the product
++ * string could not be parsed.
++ *
++ * Return: @true if the hardware revision is valid, @false otherwise.
+ */
+-static inline bool es58x_hw_revision_is_set(struct es58x_hw_revision *hw_rev)
++static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev)
+ {
+- return hw_rev->letter != '\0';
++ return isalnum(hw_rev->letter) && hw_rev->major < 1000 &&
++ hw_rev->minor < 1000;
+ }
+
+ /**
+@@ -197,7 +218,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
+ int ret = 0;
+
+- if (es58x_sw_version_is_set(fw_ver)) {
++ if (es58x_sw_version_is_valid(fw_ver)) {
+ snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ fw_ver->major, fw_ver->minor, fw_ver->revision);
+ ret = devlink_info_version_running_put(req,
+@@ -207,7 +228,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ return ret;
+ }
+
+- if (es58x_sw_version_is_set(bl_ver)) {
++ if (es58x_sw_version_is_valid(bl_ver)) {
+ snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ bl_ver->major, bl_ver->minor, bl_ver->revision);
+ ret = devlink_info_version_running_put(req,
+@@ -217,7 +238,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ return ret;
+ }
+
+- if (es58x_hw_revision_is_set(hw_rev)) {
++ if (es58x_hw_revision_is_valid(hw_rev)) {
+ snprintf(buf, sizeof(buf), "%c%03u/%03u",
+ hw_rev->letter, hw_rev->major, hw_rev->minor);
+ ret = devlink_info_version_fixed_put(req,
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 71ef4db5c09f68..15f28b6fe758ee 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -124,6 +124,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste
+
+ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ .quirks = 0,
++ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+ };
+
+@@ -291,7 +292,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ }
+ usb_free_urb(urb);
+
+- return 0;
++ return err;
+ }
+
+ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 4e27dc913cf713..4a2c9a9134d8c8 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -27,6 +27,7 @@
+ #include <linux/phylink.h>
+ #include <linux/etherdevice.h>
+ #include <linux/if_bridge.h>
++#include <linux/if_vlan.h>
+ #include <net/dsa.h>
+
+ #include "b53_regs.h"
+@@ -224,6 +225,9 @@ static const struct b53_mib_desc b53_mibs_58xx[] = {
+
+ #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
+
++#define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
++#define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
++
+ static int b53_do_vlan_op(struct b53_device *dev, u8 op)
+ {
+ unsigned int i;
+@@ -2263,17 +2267,25 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
+ bool allow_10_100;
+
+ if (is5325(dev) || is5365(dev))
+- return -EOPNOTSUPP;
++ return 0;
++
++ if (!dsa_is_cpu_port(ds, port))
++ return 0;
+
+- enable_jumbo = (mtu >= JMS_MIN_SIZE);
+- allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
++ enable_jumbo = (mtu > ETH_DATA_LEN);
++ allow_10_100 = !is63xx(dev);
+
+ return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
+ }
+
+ static int b53_get_max_mtu(struct dsa_switch *ds, int port)
+ {
+- return JMS_MAX_SIZE;
++ struct b53_device *dev = ds->priv;
++
++ if (is5325(dev) || is5365(dev))
++ return B53_MAX_MTU_25;
++
++ return B53_MAX_MTU;
+ }
+
+ static const struct dsa_switch_ops b53_switch_ops = {
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index cd1f240c90f396..257df167687506 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -678,8 +678,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+ of_remove_property(child, prop);
+
+ phydev = of_phy_find_device(child);
+- if (phydev)
++ if (phydev) {
+ phy_device_remove(phydev);
++ phy_device_free(phydev);
++ }
+ }
+
+ err = mdiobus_register(priv->slave_mii_bus);
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index ee67adeb2cdbfa..8b05a70c0ff3f9 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -6,6 +6,7 @@
+ #include <linux/module.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/regmap.h>
++#include <linux/iopoll.h>
+ #include <linux/mutex.h>
+ #include <linux/mii.h>
+ #include <linux/of.h>
+@@ -839,6 +840,8 @@ static void lan9303_handle_reset(struct lan9303 *chip)
+ if (!chip->reset_gpio)
+ return;
+
++ gpiod_set_value_cansleep(chip->reset_gpio, 1);
++
+ if (chip->reset_duration != 0)
+ msleep(chip->reset_duration);
+
+@@ -864,8 +867,34 @@ static int lan9303_disable_processing(struct lan9303 *chip)
+ static int lan9303_check_device(struct lan9303 *chip)
+ {
+ int ret;
++ int err;
+ u32 reg;
+
++ /* In I2C-managed configurations this polling loop will clash with
++ * switch's reading of EEPROM right after reset and this behaviour is
++ * not configurable. While lan9303_read() already has quite long retry
++ * timeout, seems not all cases are being detected as arbitration error.
++ *
++ * According to datasheet, EEPROM loader has 30ms timeout (in case of
++ * missing EEPROM).
++ *
++ * Loading of the largest supported EEPROM is expected to take at least
++ * 5.9s.
++ */
++ err = read_poll_timeout(lan9303_read, ret,
++ !ret && reg & LAN9303_HW_CFG_READY,
++ 20000, 6000000, false,
++ chip->regmap, LAN9303_HW_CFG, &reg);
++ if (ret) {
++ dev_err(chip->dev, "failed to read HW_CFG reg: %pe\n",
++ ERR_PTR(ret));
++ return ret;
++ }
++ if (err) {
++ dev_err(chip->dev, "HW_CFG not ready: 0x%08x\n", reg);
++ return err;
++ }
++
+ ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to read chip revision register: %d\n",
+@@ -1048,31 +1077,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
+ return ARRAY_SIZE(lan9303_mib);
+ }
+
+-static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
++static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum)
+ {
+ struct lan9303 *chip = ds->priv;
+ int phy_base = chip->phy_addr_base;
+
+- if (phy == phy_base)
++ if (port == 0)
+ return lan9303_virt_phy_reg_read(chip, regnum);
+- if (phy > phy_base + 2)
++ if (port > 2)
+ return -ENODEV;
+
+- return chip->ops->phy_read(chip, phy, regnum);
++ return chip->ops->phy_read(chip, phy_base + port, regnum);
+ }
+
+-static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
++static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum,
+ u16 val)
+ {
+ struct lan9303 *chip = ds->priv;
+ int phy_base = chip->phy_addr_base;
+
+- if (phy == phy_base)
++ if (port == 0)
+ return lan9303_virt_phy_reg_write(chip, regnum, val);
+- if (phy > phy_base + 2)
++ if (port > 2)
+ return -ENODEV;
+
+- return chip->ops->phy_write(chip, phy, regnum, val);
++ return chip->ops->phy_write(chip, phy_base + port, regnum, val);
+ }
+
+ static int lan9303_port_enable(struct dsa_switch *ds, int port,
+@@ -1100,7 +1129,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
+ vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+
+ lan9303_disable_processing_port(chip, port);
+- lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
++ lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN);
+ }
+
+ static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
+@@ -1355,8 +1384,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
+
+ static int lan9303_register_switch(struct lan9303 *chip)
+ {
+- int base;
+-
+ chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
+ if (!chip->ds)
+ return -ENOMEM;
+@@ -1365,8 +1392,7 @@ static int lan9303_register_switch(struct lan9303 *chip)
+ chip->ds->num_ports = LAN9303_NUM_PORTS;
+ chip->ds->priv = chip;
+ chip->ds->ops = &lan9303_switch_ops;
+- base = chip->phy_addr_base;
+- chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
++ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0);
+
+ return dsa_register_switch(chip->ds);
+ }
+diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
+index d8ab2b77d201e0..167a86f39f2771 100644
+--- a/drivers/net/dsa/lan9303_mdio.c
++++ b/drivers/net/dsa/lan9303_mdio.c
+@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ reg <<= 2; /* reg num to offset */
+- mutex_lock(&sw_dev->device->bus->mdio_lock);
++ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
+ lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ reg <<= 2; /* reg num to offset */
+- mutex_lock(&sw_dev->device->bus->mdio_lock);
++ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ *val = lan9303_mdio_real_read(sw_dev->device, reg);
+ *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 91aba470fb2fae..28d7ada3ec0677 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -49,9 +49,9 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+- ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
++ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (!ret)
+- ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
++ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+
+ mutex_unlock(&dev->alu_mutex);
+
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index 83b7f2d5c1ea6d..a7e8fcdf25768b 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -174,10 +174,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
+ SPI_AUTO_EDGE_DETECTION, 0);
+
+ /* default configuration */
+- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
+- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
+- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
++ ksz_write8(dev, REG_SW_LUE_CTRL_1,
++ SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
+
+ /* disable interrupts */
+ ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+@@ -1114,6 +1112,10 @@ int ksz9477_setup(struct dsa_switch *ds)
+ /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+
++ /* Use collision based back pressure mode. */
++ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
++ SW_BACK_PRESSURE_COLLISION);
++
+ /* Now we can configure default MTU value */
+ ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
+ VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
+index cba3dba58bc378..a2ef4b18349c41 100644
+--- a/drivers/net/dsa/microchip/ksz9477_reg.h
++++ b/drivers/net/dsa/microchip/ksz9477_reg.h
+@@ -267,6 +267,7 @@
+ #define REG_SW_MAC_CTRL_1 0x0331
+
+ #define SW_BACK_PRESSURE BIT(5)
++#define SW_BACK_PRESSURE_COLLISION 0
+ #define FAIR_FLOW_CTRL BIT(4)
+ #define NO_EXC_COLLISION_DROP BIT(3)
+ #define SW_JUMBO_PACKET BIT(2)
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 42db7679c36068..1c3f1864999895 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -1973,7 +1973,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+- ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
++ ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+@@ -2624,10 +2624,18 @@ static int ksz_connect_tag_protocol(struct dsa_switch *ds,
+ {
+ struct ksz_tagger_data *tagger_data;
+
+- tagger_data = ksz_tagger_data(ds);
+- tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
+-
+- return 0;
++ switch (proto) {
++ case DSA_TAG_PROTO_KSZ8795:
++ return 0;
++ case DSA_TAG_PROTO_KSZ9893:
++ case DSA_TAG_PROTO_KSZ9477:
++ case DSA_TAG_PROTO_LAN937X:
++ tagger_data = ksz_tagger_data(ds);
++ tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
++ return 0;
++ default:
++ return -EPROTONOSUPPORT;
++ }
+ }
+
+ static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
+@@ -2856,7 +2864,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+ else
+ interface = PHY_INTERFACE_MODE_MII;
+ } else if (val == bitval[P_RMII_SEL]) {
+- interface = PHY_INTERFACE_MODE_RGMII;
++ interface = PHY_INTERFACE_MODE_RMII;
+ } else {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
+index 4e22a695a64c3a..7ef5fac69657ff 100644
+--- a/drivers/net/dsa/microchip/ksz_ptp.c
++++ b/drivers/net/dsa/microchip/ksz_ptp.c
+@@ -266,7 +266,6 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
+ struct ksz_port *prt;
+ struct dsa_port *dp;
+ bool tag_en = false;
+- int ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ prt = &dev->ports[dp->index];
+@@ -277,9 +276,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
+ }
+
+ if (tag_en) {
+- ret = ptp_schedule_worker(ptp_data->clock, 0);
+- if (ret)
+- return ret;
++ ptp_schedule_worker(ptp_data->clock, 0);
+ } else {
+ ptp_cancel_worker_sync(ptp_data->clock);
+ }
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 035a34b50f31bd..53ead0989777ff 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -999,20 +999,217 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
+ mutex_unlock(&priv->reg_mutex);
+ }
+
++/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
++ * of the Open Systems Interconnection basic reference model (OSI/RM) are
++ * described; the medium access control (MAC) and logical link control (LLC)
++ * sublayers. The MAC sublayer is the one facing the physical layer.
++ *
++ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
++ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
++ * of the Bridge, at least two Ports, and higher layer entities with at least a
++ * Spanning Tree Protocol Entity included.
++ *
++ * Each Bridge Port also functions as an end station and shall provide the MAC
++ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
++ * distinct LLC Entity that supports protocol identification, multiplexing, and
++ * demultiplexing, for protocol data unit (PDU) transmission and reception by
++ * one or more higher layer entities.
++ *
++ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
++ * Entity associated with each Bridge Port is modeled as being directly
++ * connected to the attached Local Area Network (LAN).
++ *
++ * On the switch with CPU port architecture, CPU port functions as Management
++ * Port, and the Management Port functionality is provided by software which
++ * functions as an end station. Software is connected to an IEEE 802 LAN that is
++ * wholly contained within the system that incorporates the Bridge. Software
++ * provides access to the LLC Entity associated with each Bridge Port by the
++ * value of the source port field on the special tag on the frame received by
++ * software.
++ *
++ * We call frames that carry control information to determine the active
++ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
++ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
++ * Protocol Data Units (MVRPDUs), and frames from other link constrained
++ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
++ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
++ * forwarded by a Bridge. Permanently configured entries in the filtering
++ * database (FDB) ensure that such frames are discarded by the Forwarding
++ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
++ *
++ * Each of the reserved MAC addresses specified in Table 8-1
++ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
++ * permanently configured in the FDB in C-VLAN components and ERs.
++ *
++ * Each of the reserved MAC addresses specified in Table 8-2
++ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
++ * configured in the FDB in S-VLAN components.
++ *
++ * Each of the reserved MAC addresses specified in Table 8-3
++ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
++ * TPMR components.
++ *
++ * The FDB entries for reserved MAC addresses shall specify filtering for all
++ * Bridge Ports and all VIDs. Management shall not provide the capability to
++ * modify or remove entries for reserved MAC addresses.
++ *
++ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
++ * propagation of PDUs within a Bridged Network, as follows:
++ *
++ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
++ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
++ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
++ * PDUs transmitted using this destination address, or any other addresses
++ * that appear in Table 8-1, Table 8-2, and Table 8-3
++ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
++ * therefore travel no further than those stations that can be reached via a
++ * single individual LAN from the originating station.
++ *
++ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
++ * address that no conformant S-VLAN component, C-VLAN component, or MAC
++ * Bridge can forward; however, this address is relayed by a TPMR component.
++ * PDUs using this destination address, or any of the other addresses that
++ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
++ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
++ * any TPMRs but will propagate no further than the nearest S-VLAN component,
++ * C-VLAN component, or MAC Bridge.
++ *
++ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
++ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
++ * relayed by TPMR components and S-VLAN components. PDUs using this
++ * destination address, or any of the other addresses that appear in Table 8-1
++ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
++ * will be relayed by TPMR components and S-VLAN components but will propagate
++ * no further than the nearest C-VLAN component or MAC Bridge.
++ *
++ * Because the LLC Entity associated with each Bridge Port is provided via CPU
++ * port, we must not filter these frames but forward them to CPU port.
++ *
++ * In a Bridge, the transmission Port is majorly decided by ingress and egress
++ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
++ * For link-local frames, only CPU port should be designated as destination port
++ * in the FDB, and the other functions of the Forwarding Process must not
++ * interfere with the decision of the transmission Port. We call this process
++ * trapping frames to CPU port.
++ *
++ * Therefore, on the switch with CPU port architecture, link-local frames must
++ * be trapped to CPU port, and certain link-local frames received by a Port of a
++ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
++ * from it.
++ *
++ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
++ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
++ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
++ * doesn't count) of this architecture will either function as a standard MAC
++ * Bridge or a standard VLAN Bridge.
++ *
++ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
++ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
++ * we don't need to relay PDUs using the destination addresses specified on the
++ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
++ * section where they must be relayed by TPMR components.
++ *
++ * One option to trap link-local frames to CPU port is to add static FDB entries
++ * with CPU port designated as destination port. However, because that
++ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
++ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
++ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
++ * entries. This switch intellectual property can only hold a maximum of 2048
++ * entries. Using this option, there also isn't a mechanism to prevent
++ * link-local frames from being discarded when the spanning tree Port State of
++ * the reception Port is discarding.
++ *
++ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
++ * registers. Whilst this applies to every VID, it doesn't contain all of the
++ * reserved MAC addresses without affecting the remaining Standard Group MAC
++ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
++ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
++ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
++ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
++ * The latter option provides better but not complete conformance.
++ *
++ * This switch intellectual property also does not provide a mechanism to trap
++ * link-local frames with specific destination addresses to CPU port by Bridge,
++ * to conform to the filtering rules for the distinct Bridge components.
++ *
++ * Therefore, regardless of the type of the Bridge component, link-local frames
++ * with these destination addresses will be trapped to CPU port:
++ *
++ * 01-80-C2-00-00-[00,01,02,03,0E]
++ *
++ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
++ *
++ * Link-local frames with these destination addresses won't be trapped to CPU
++ * port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
++ *
++ * In a Bridge comprising an S-VLAN component:
++ *
++ * Link-local frames with these destination addresses will be trapped to CPU
++ * port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ * 01-80-C2-00-00-00
++ *
++ * Link-local frames with these destination addresses won't be trapped to CPU
++ * port which won't conform to IEEE Std 802.1Q-2022:
++ *
++ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
++ *
++ * To trap link-local frames to CPU port as conformant as this switch
++ * intellectual property can allow, link-local frames are made to be regarded as
++ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
++ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
++ * State function of the Forwarding Process.
++ *
++ * The only remaining interference is the ingress rules. When the reception Port
++ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
++ * There doesn't seem to be a mechanism on the switch intellectual property to
++ * have link-local frames bypass this function of the Forwarding Process.
++ */
+ static void
+ mt753x_trap_frames(struct mt7530_priv *priv)
+ {
+- /* Trap BPDUs to the CPU port(s) */
+- mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+- MT753X_BPDU_CPU_ONLY);
+-
+- /* Trap 802.1X PAE frames to the CPU port(s) */
+- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
+- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
+-
+- /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
+- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
+- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
++ /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
++ * VLAN-untagged.
++ */
++ mt7530_rmw(priv, MT753X_BPC,
++ MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
++ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
++ MT753X_BPDU_PORT_FW_MASK,
++ MT753X_PAE_BPDU_FR |
++ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_BPDU_CPU_ONLY);
++
++ /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
++ * them VLAN-untagged.
++ */
++ mt7530_rmw(priv, MT753X_RGAC1,
++ MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
++ MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
++ MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
++ MT753X_R02_BPDU_FR |
++ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++ MT753X_R01_BPDU_FR |
++ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_BPDU_CPU_ONLY);
++
++ /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
++ * them VLAN-untagged.
++ */
++ mt7530_rmw(priv, MT753X_RGAC2,
++ MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
++ MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
++ MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
++ MT753X_R0E_BPDU_FR |
++ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
++ MT753X_R03_BPDU_FR |
++ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
++ MT753X_BPDU_CPU_ONLY);
+ }
+
+ static int
+@@ -1751,14 +1948,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
+
+ static int mt753x_mirror_port_get(unsigned int id, u32 val)
+ {
+- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
+- MIRROR_PORT(val);
++ return (id == ID_MT7531 || id == ID_MT7988) ?
++ MT7531_MIRROR_PORT_GET(val) :
++ MIRROR_PORT(val);
+ }
+
+ static int mt753x_mirror_port_set(unsigned int id, u32 val)
+ {
+- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
+- MIRROR_PORT(val);
++ return (id == ID_MT7531 || id == ID_MT7988) ?
++ MT7531_MIRROR_PORT_SET(val) :
++ MIRROR_PORT(val);
+ }
+
+ static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
+@@ -2244,11 +2443,11 @@ mt7530_setup(struct dsa_switch *ds)
+ */
+ if (priv->mcm) {
+ reset_control_assert(priv->rstc);
+- usleep_range(1000, 1100);
++ usleep_range(5000, 5100);
+ reset_control_deassert(priv->rstc);
+ } else {
+ gpiod_set_value_cansleep(priv->reset, 0);
+- usleep_range(1000, 1100);
++ usleep_range(5000, 5100);
+ gpiod_set_value_cansleep(priv->reset, 1);
+ }
+
+@@ -2273,8 +2472,6 @@ mt7530_setup(struct dsa_switch *ds)
+ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ SYS_CTRL_REG_RST);
+
+- mt7530_pll_setup(priv);
+-
+ /* Lower Tx driving for TRGMII path */
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+@@ -2292,6 +2489,9 @@ mt7530_setup(struct dsa_switch *ds)
+
+ priv->p6_interface = PHY_INTERFACE_MODE_NA;
+
++ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
++ mt7530_pll_setup(priv);
++
+ mt753x_trap_frames(priv);
+
+ /* Enable and reset MIB counters */
+@@ -2321,6 +2521,9 @@ mt7530_setup(struct dsa_switch *ds)
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
++ /* Allow mirroring frames received on the local port (monitor port). */
++ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
++
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ ret = mt7530_setup_vlan0(priv);
+ if (ret)
+@@ -2429,6 +2632,9 @@ mt7531_setup_common(struct dsa_switch *ds)
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
++ /* Allow mirroring frames received on the local port (monitor port). */
++ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
++
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ if (ret < 0)
+@@ -2450,11 +2656,11 @@ mt7531_setup(struct dsa_switch *ds)
+ */
+ if (priv->mcm) {
+ reset_control_assert(priv->rstc);
+- usleep_range(1000, 1100);
++ usleep_range(5000, 5100);
+ reset_control_deassert(priv->rstc);
+ } else {
+ gpiod_set_value_cansleep(priv->reset, 0);
+- usleep_range(1000, 1100);
++ usleep_range(5000, 5100);
+ gpiod_set_value_cansleep(priv->reset, 1);
+ }
+
+@@ -2507,18 +2713,25 @@ mt7531_setup(struct dsa_switch *ds)
+ priv->p5_interface = PHY_INTERFACE_MODE_NA;
+ priv->p6_interface = PHY_INTERFACE_MODE_NA;
+
+- /* Enable PHY core PLL, since phy_device has not yet been created
+- * provided for phy_[read,write]_mmd_indirect is called, we provide
+- * our own mt7531_ind_mmd_phy_[read,write] to complete this
+- * function.
++ /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
++ * phy_device has not yet been created provided for
++ * phy_[read,write]_mmd_indirect is called, we provide our own
++ * mt7531_ind_mmd_phy_[read,write] to complete this function.
+ */
+ val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4);
+- val |= MT7531_PHY_PLL_BYPASS_MODE;
++ val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
+ val &= ~MT7531_PHY_PLL_OFF;
+ mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
+ CORE_PLL_GROUP4, val);
+
++ /* Disable EEE advertisement on the switch PHYs. */
++ for (i = MT753X_CTRL_PHY_ADDR;
++ i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
++ mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
++ 0);
++ }
++
+ mt7531_setup_common(ds);
+
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+@@ -2848,8 +3061,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ /* MT753x MAC works in 1G full duplex mode for all up-clocked
+ * variants.
+ */
+- if (interface == PHY_INTERFACE_MODE_INTERNAL ||
+- interface == PHY_INTERFACE_MODE_TRGMII ||
++ if (interface == PHY_INTERFACE_MODE_TRGMII ||
+ (phy_interface_mode_is_8023z(interface))) {
+ speed = SPEED_1000;
+ duplex = DUPLEX_FULL;
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index 17e42d30fff4bd..0ad52d3cbfebb9 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -32,6 +32,10 @@ enum mt753x_id {
+ #define SYSC_REG_RSTCTRL 0x34
+ #define RESET_MCM BIT(2)
+
++/* Register for ARL global control */
++#define MT753X_AGC 0xc
++#define LOCAL_EN BIT(7)
++
+ /* Registers to mac forward control for unknown frames */
+ #define MT7530_MFC 0x10
+ #define BC_FFP(x) (((x) & 0xff) << 24)
+@@ -65,14 +69,38 @@ enum mt753x_id {
+
+ /* Registers for BPDU and PAE frame control*/
+ #define MT753X_BPC 0x24
+-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
++#define MT753X_PAE_BPDU_FR BIT(25)
++#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
++#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
+ #define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
+ #define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
++#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6)
++#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
++#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
++
++/* Register for :01 and :02 MAC DA frame control */
++#define MT753X_RGAC1 0x28
++#define MT753X_R02_BPDU_FR BIT(25)
++#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
++#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
++#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
++#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
++#define MT753X_R01_BPDU_FR BIT(9)
++#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
++#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
++#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
+
+ /* Register for :03 and :0E MAC DA frame control */
+ #define MT753X_RGAC2 0x2c
++#define MT753X_R0E_BPDU_FR BIT(25)
++#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
++#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
+ #define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
+ #define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
++#define MT753X_R03_BPDU_FR BIT(9)
++#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
++#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
++#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
+
+ enum mt753x_bpdu_port_fw {
+ MT753X_BPDU_FOLLOW_MFC,
+@@ -253,6 +281,7 @@ enum mt7530_port_mode {
+ enum mt7530_vlan_port_eg_tag {
+ MT7530_VLAN_EG_DISABLED = 0,
+ MT7530_VLAN_EG_CONSISTENT = 1,
++ MT7530_VLAN_EG_UNTAGGED = 4,
+ };
+
+ enum mt7530_vlan_port_attr {
+@@ -605,6 +634,7 @@ enum mt7531_clk_skew {
+ #define RG_SYSPLL_DDSFBK_EN BIT(12)
+ #define RG_SYSPLL_BIAS_EN BIT(11)
+ #define RG_SYSPLL_BIAS_LPF_EN BIT(10)
++#define MT7531_RG_SYSPLL_DMY2 BIT(6)
+ #define MT7531_PHY_PLL_OFF BIT(5)
+ #define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index ab434a77b059a5..3877744193e2a0 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -131,8 +131,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+ {
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+
+- mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+- list);
++ mdio_bus = list_first_entry_or_null(&chip->mdios,
++ struct mv88e6xxx_mdio_bus, list);
+ if (!mdio_bus)
+ return NULL;
+
+@@ -566,15 +566,75 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
+ phy_interface_set_rgmii(supported);
+ }
+
++static void
++mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
++ struct phylink_config *config)
++{
++ unsigned long *supported = config->supported_interfaces;
++ int err;
++ u16 reg;
++
++ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
++ if (err) {
++ dev_err(chip->dev, "p%d: failed to read port status\n", port);
++ return;
++ }
++
++ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
++ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
++ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
++ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
++ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
++ __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
++ break;
++
++ case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
++ case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
++ __set_bit(PHY_INTERFACE_MODE_MII, supported);
++ break;
++
++ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
++ case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
++ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
++ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
++ __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
++ break;
++
++ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
++ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
++ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
++ break;
++
++ case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
++ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
++ break;
++
++ default:
++ dev_err(chip->dev,
++ "p%d: invalid port mode in status register: %04x\n",
++ port, reg);
++ }
++}
++
+ static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
++{
++ if (!mv88e6xxx_phy_is_internal(chip, port))
++ mv88e6250_setup_supported_interfaces(chip, port, config);
++
++ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
++}
++
++static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
++ struct phylink_config *config)
+ {
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+- config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
++ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
++ MAC_1000FD;
+ }
+
+ static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
+@@ -637,6 +697,18 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ }
+ }
+
++static void mv88e632x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
++ struct phylink_config *config)
++{
++ unsigned long *supported = config->supported_interfaces;
++
++ /* Translate the default cmode */
++ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
++
++ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
++ MAC_1000FD;
++}
++
+ static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+ {
+@@ -2949,6 +3021,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
+ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ {
+ struct gpio_desc *gpiod = chip->reset;
++ int err;
+
+ /* If there is a GPIO connected to the reset pin, toggle it */
+ if (gpiod) {
+@@ -2957,17 +3030,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ * mid-byte, causing the first EEPROM read after the reset
+ * from the wrong location resulting in the switch booting
+ * to wrong mode and inoperable.
++ * For this reason, switch families with EEPROM support
++ * generally wait for EEPROM loads to complete as their pre-
++ * and post-reset handlers.
+ */
+- if (chip->info->ops->get_eeprom)
+- mv88e6xxx_g2_eeprom_wait(chip);
++ if (chip->info->ops->hardware_reset_pre) {
++ err = chip->info->ops->hardware_reset_pre(chip);
++ if (err)
++ dev_err(chip->dev, "pre-reset error: %d\n", err);
++ }
+
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+ usleep_range(10000, 20000);
+
+- if (chip->info->ops->get_eeprom)
+- mv88e6xxx_g2_eeprom_wait(chip);
++ if (chip->info->ops->hardware_reset_post) {
++ err = chip->info->ops->hardware_reset_post(chip);
++ if (err)
++ dev_err(chip->dev, "post-reset error: %d\n", err);
++ }
+ }
+ }
+
+@@ -3408,7 +3490,8 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->ops->port_set_jumbo_size)
+ ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
+- else if (chip->info->ops->set_max_frame_size)
++ else if (chip->info->ops->set_max_frame_size &&
++ dsa_is_cpu_port(ds, port))
+ ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
+ mv88e6xxx_reg_unlock(chip);
+
+@@ -3533,7 +3616,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
+ int err;
+
+ if (!chip->info->ops->phy_read_c45)
+- return -EOPNOTSUPP;
++ return 0xffff;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
+@@ -3880,7 +3963,8 @@ static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+- if (chip->info->ops->pcs_ops->pcs_init) {
++ if (chip->info->ops->pcs_ops &&
++ chip->info->ops->pcs_ops->pcs_init) {
+ err = chip->info->ops->pcs_ops->pcs_init(chip, port);
+ if (err)
+ return err;
+@@ -3895,7 +3979,8 @@ static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
+
+ mv88e6xxx_teardown_devlink_regions_port(ds, port);
+
+- if (chip->info->ops->pcs_ops->pcs_teardown)
++ if (chip->info->ops->pcs_ops &&
++ chip->info->ops->pcs_ops->pcs_teardown)
+ chip->info->ops->pcs_ops->pcs_teardown(chip, port);
+ }
+
+@@ -4192,6 +4277,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4340,7 +4427,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e6351_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6172_ops = {
+@@ -4382,6 +4469,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4440,7 +4529,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e6351_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6176_ops = {
+@@ -4482,6 +4571,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4576,6 +4667,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4634,6 +4727,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4690,6 +4785,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4749,6 +4846,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4802,6 +4901,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
+ .watchdog_ops = &mv88e6250_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset,
++ .hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done,
+ .reset = mv88e6250_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+@@ -4849,6 +4950,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4908,13 +5011,15 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e632x_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6321_ops = {
+@@ -4954,13 +5059,15 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e632x_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6341_ops = {
+@@ -5004,6 +5111,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5069,7 +5178,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e6351_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6351_ops = {
+@@ -5117,7 +5226,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+- .phylink_get_caps = mv88e6185_phylink_get_caps,
++ .phylink_get_caps = mv88e6351_phylink_get_caps,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6352_ops = {
+@@ -5159,6 +5268,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5221,6 +5332,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5283,6 +5396,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5348,6 +5463,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
+ .watchdog_ops = &mv88e6393x_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
++ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5372,8 +5489,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6020",
+ .num_databases = 64,
+- .num_ports = 4,
++ /* Ports 2-4 are not routed to pins
++ * => usable ports 0, 1, 5, 6
++ */
++ .num_ports = 7,
+ .num_internal_phys = 2,
++ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
+ .max_vid = 4095,
+ .port_base_addr = 0x8,
+ .phy_base_addr = 0x0,
+@@ -5522,7 +5643,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6141",
+- .num_databases = 4096,
++ .num_databases = 256,
+ .num_macs = 2048,
+ .num_ports = 6,
+ .num_internal_phys = 5,
+@@ -5981,7 +6102,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6341",
+- .num_databases = 4096,
++ .num_databases = 256,
+ .num_macs = 2048,
+ .num_internal_phys = 5,
+ .num_ports = 6,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
+index 44383a03ef2ff9..f48a3c0ac7f968 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.h
++++ b/drivers/net/dsa/mv88e6xxx/chip.h
+@@ -476,6 +476,12 @@ struct mv88e6xxx_ops {
+ int (*ppu_enable)(struct mv88e6xxx_chip *chip);
+ int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+
++ /* Additional handlers to run before and after hard reset, to make sure
++ * that the switch and EEPROM are in a good state.
++ */
++ int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip);
++ int (*hardware_reset_post)(struct mv88e6xxx_chip *chip);
++
+ /* Switch Software Reset */
+ int (*reset)(struct mv88e6xxx_chip *chip);
+
+@@ -601,8 +607,8 @@ struct mv88e6xxx_ops {
+ int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
+ int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
+ uint8_t *data);
+- int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
+- uint64_t *data);
++ size_t (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
++ uint64_t *data);
+
+ /* SERDES registers for ethtool */
+ int (*serdes_get_regs_len)(struct mv88e6xxx_chip *chip, int port);
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
+index 174c773b38c2bd..7ef0f4426ad717 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+ }
+
++static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip)
++{
++ /* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */
++ int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM);
++ u16 val;
++ int err;
++
++ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
++ if (err)
++ return err;
++
++ val |= MV88E6185_G1_CTL1_RELOAD_EEPROM;
++
++ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
++ if (err)
++ return err;
++
++ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0);
++}
++
++/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */
++static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip)
++{
++ u16 val;
++ int err;
++
++ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
++ if (err < 0) {
++ dev_err(chip->dev, "Error reading status");
++ return err;
++ }
++
++ /* If the switch is still resetting, it may not
++ * respond on the bus, and so MDIO read returns
++ * 0xffff. Differentiate between that, and waiting for
++ * the EEPROM to be done by bit 0 being set.
++ */
++ if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)))
++ return -EBUSY;
++
++ return 0;
++}
++
++/* As the EEInt (EEPROM done) flag clears on read if the status register, this
++ * function must be called directly after a hard reset or EEPROM ReLoad request,
++ * or the done condition may have been missed
++ */
++int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
++{
++ const unsigned long timeout = jiffies + 1 * HZ;
++ int ret;
++
++ /* Wait up to 1 second for the switch to finish reading the
++ * EEPROM.
++ */
++ while (time_before(jiffies, timeout)) {
++ ret = mv88e6xxx_g1_is_eeprom_done(chip);
++ if (ret != -EBUSY)
++ return ret;
++ }
++
++ dev_err(chip->dev, "Timeout waiting for EEPROM done");
++ return -ETIMEDOUT;
++}
++
++int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip)
++{
++ int ret;
++
++ ret = mv88e6xxx_g1_is_eeprom_done(chip);
++ if (ret != -EBUSY)
++ return ret;
++
++ /* Pre-reset, we don't know the state of the switch - when
++ * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because
++ * the switch is actually busy reading the EEPROM, or because
++ * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated
++ * status register read already.
++ *
++ * To account for the latter case, trigger another EEPROM reload for
++ * another chance at seeing the done flag.
++ */
++ ret = mv88e6250_g1_eeprom_reload(chip);
++ if (ret)
++ return ret;
++
++ return mv88e6xxx_g1_wait_eeprom_done(chip);
++}
++
+ /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+ * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+ * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index 1095261f5b490a..3dbb7a1b8fe118 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -282,6 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
++int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
++int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip);
+
+ int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+index ce3b3690c3c057..c47f068f56b32a 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+@@ -457,7 +457,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
+- chip->ports[spid].atu_full_violation++;
++ if (spid < ARRAY_SIZE(chip->ports))
++ chip->ports[spid].atu_full_violation++;
+ }
+
+ return IRQ_HANDLED;
+diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+index ba373656bfe147..c31f0e54f1e64c 100644
+--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
++++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+@@ -465,6 +465,7 @@ mv88e639x_pcs_select(struct mv88e6xxx_chip *chip, int port,
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
++ case PHY_INTERFACE_MODE_USXGMII:
+ return &mpcs->xg_pcs;
+
+ default:
+@@ -873,7 +874,8 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ int err;
+
+- if (interface == PHY_INTERFACE_MODE_10GBASER) {
++ if (interface == PHY_INTERFACE_MODE_10GBASER ||
++ interface == PHY_INTERFACE_MODE_USXGMII) {
+ err = mv88e6393x_erratum_5_2(mpcs);
+ if (err)
+ return err;
+@@ -886,12 +888,37 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
+ return mv88e639x_xg_pcs_enable(mpcs);
+ }
+
++static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
++ struct phylink_link_state *state)
++{
++ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
++ u16 status, lp_status;
++ int err;
++
++ if (state->interface != PHY_INTERFACE_MODE_USXGMII)
++ return mv88e639x_xg_pcs_get_state(pcs, state);
++
++ state->link = false;
++
++ err = mv88e639x_read(mpcs, MV88E6390_USXGMII_PHY_STATUS, &status);
++ err = err ? : mv88e639x_read(mpcs, MV88E6390_USXGMII_LP_STATUS, &lp_status);
++ if (err) {
++ dev_err(mpcs->mdio.dev.parent,
++ "can't read USXGMII status: %pe\n", ERR_PTR(err));
++ return;
++ }
++
++ state->link = !!(status & MDIO_USXGMII_LINK);
++ state->an_complete = state->link;
++ phylink_decode_usxgmii_word(state, lp_status);
++}
++
+ static const struct phylink_pcs_ops mv88e6393x_xg_pcs_ops = {
+ .pcs_enable = mv88e6393x_xg_pcs_enable,
+ .pcs_disable = mv88e6393x_xg_pcs_disable,
+ .pcs_pre_config = mv88e6393x_xg_pcs_pre_config,
+ .pcs_post_config = mv88e6393x_xg_pcs_post_config,
+- .pcs_get_state = mv88e639x_xg_pcs_get_state,
++ .pcs_get_state = mv88e6393x_xg_pcs_get_state,
+ .pcs_config = mv88e639x_xg_pcs_config,
+ };
+
+diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
+index 86deeb347cbc1d..ddadeb9bfdaeed 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.h
++++ b/drivers/net/dsa/mv88e6xxx/port.h
+@@ -25,10 +25,25 @@
+ #define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
+ #define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
+ #define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
+-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
+-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
+-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
+-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
++/* - Modes with PHY suffix use output instead of input clock
++ * - Modes without RMII or RGMII use MII
++ * - Modes without speed do not have a fixed speed specified in the manual
++ * ("DC to x MHz" - variable clock support?)
++ */
++#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
++#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
++#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
++#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
++#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
++#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
++#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
++#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
++#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
++#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
++#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
++#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
++#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
++#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
+ #define MV88E6XXX_PORT_STS_LINK 0x0800
+ #define MV88E6XXX_PORT_STS_DUPLEX 0x0400
+ #define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
+diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
+index 3b4b42651fa3d7..01ea53940786d0 100644
+--- a/drivers/net/dsa/mv88e6xxx/serdes.c
++++ b/drivers/net/dsa/mv88e6xxx/serdes.c
+@@ -177,8 +177,8 @@ static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip,
+ return val;
+ }
+
+-int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+- uint64_t *data)
++size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
++ uint64_t *data)
+ {
+ struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
+ struct mv88e6352_serdes_hw_stat *stat;
+@@ -187,7 +187,7 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+- return err;
++ return 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
+ ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
+@@ -429,8 +429,8 @@ static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane,
+ return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32);
+ }
+
+-int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+- uint64_t *data)
++size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
++ uint64_t *data)
+ {
+ struct mv88e6390_serdes_hw_stat *stat;
+ int lane;
+diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
+index aac95cab46e3de..ff5c3ab31e155e 100644
+--- a/drivers/net/dsa/mv88e6xxx/serdes.h
++++ b/drivers/net/dsa/mv88e6xxx/serdes.h
+@@ -127,13 +127,13 @@ unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
+ int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+ int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+-int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+- uint64_t *data);
++size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
++ uint64_t *data);
+ int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+-int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+- uint64_t *data);
++size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
++ uint64_t *data);
+
+ int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
+ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 9a3e5ec169726e..b0b4b4af9a1df7 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -528,7 +528,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
+ */
++ ocelot_lock_xtr_grp_bh(ocelot, 0);
+ ocelot_drain_cpu_queue(ocelot, 0);
++ ocelot_unlock_xtr_grp_bh(ocelot, 0);
+
+ return 0;
+ }
+@@ -1504,6 +1506,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
+ int port = xmit_work->dp->index;
+ int retries = 10;
+
++ ocelot_lock_inj_grp(ocelot, 0);
++
+ do {
+ if (ocelot_can_inject(ocelot, 0))
+ break;
+@@ -1512,6 +1516,7 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
+ } while (--retries);
+
+ if (!retries) {
++ ocelot_unlock_inj_grp(ocelot, 0);
+ dev_err(ocelot->dev, "port %d failed to inject skb\n",
+ port);
+ ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
+@@ -1521,6 +1526,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
+
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+
++ ocelot_unlock_inj_grp(ocelot, 0);
++
+ consume_skb(skb);
+ kfree(xmit_work);
+ }
+@@ -1671,6 +1678,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
+ if (!felix->info->quirk_no_xtr_irq)
+ return false;
+
++ ocelot_lock_xtr_grp(ocelot, grp);
++
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
+ struct sk_buff *skb;
+ unsigned int type;
+@@ -1707,6 +1716,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
+ ocelot_drain_cpu_queue(ocelot, 0);
+ }
+
++ ocelot_unlock_xtr_grp(ocelot, grp);
++
+ return true;
+ }
+
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 3c5509e75a5486..afb5dae4439ce6 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1474,10 +1474,13 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
+ /* Hardware errata - Admin config could not be overwritten if
+ * config is pending, need reset the TAS module
+ */
+- val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+- ret = -EBUSY;
+- goto err_reset_tc;
++ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
++ if (val & QSYS_TAG_CONFIG_ENABLE) {
++ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
++ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
++ ret = -EBUSY;
++ goto err_reset_tc;
++ }
+ }
+
+ ocelot_rmw_rix(ocelot,
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index 4ce68e655a632d..17c28fe2d74337 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -949,10 +949,15 @@ qca8k_mdio_register(struct qca8k_priv *priv)
+ struct dsa_switch *ds = priv->ds;
+ struct device_node *mdio;
+ struct mii_bus *bus;
++ int err;
++
++ mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+
+ bus = devm_mdiobus_alloc(ds->dev);
+- if (!bus)
+- return -ENOMEM;
++ if (!bus) {
++ err = -ENOMEM;
++ goto out_put_node;
++ }
+
+ bus->priv = (void *)priv;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+@@ -962,12 +967,12 @@ qca8k_mdio_register(struct qca8k_priv *priv)
+ ds->slave_mii_bus = bus;
+
+ /* Check if the devicetree declare the port:phy mapping */
+- mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ bus->name = "qca8k slave mii";
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
+- return devm_of_mdiobus_register(priv->dev, bus, mdio);
++ err = devm_of_mdiobus_register(priv->dev, bus, mdio);
++ goto out_put_node;
+ }
+
+ /* If a mapping can't be found the legacy mapping is used,
+@@ -976,7 +981,13 @@ qca8k_mdio_register(struct qca8k_priv *priv)
+ bus->name = "qca8k-legacy slave mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
+- return devm_mdiobus_register(priv->dev, bus);
++
++ err = devm_mdiobus_register(priv->dev, bus);
++
++out_put_node:
++ of_node_put(mdio);
++
++ return err;
+ }
+
+ static int
+@@ -2038,12 +2049,11 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
+ priv->info = of_device_get_match_data(priv->dev);
+
+ priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
+- GPIOD_ASIS);
++ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ if (priv->reset_gpio) {
+- gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ /* The active low duration must be greater than 10 ms
+ * and checkpatch.pl wants 20 ms.
+ */
+diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
+index e8c16e76e34bb9..77a79c24940222 100644
+--- a/drivers/net/dsa/qca/qca8k-leds.c
++++ b/drivers/net/dsa/qca/qca8k-leds.c
+@@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
+ init_data.devname_mandatory = true;
+ init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->slave_mii_bus->id,
+ port_num);
+- if (!init_data.devicename)
++ if (!init_data.devicename) {
++ fwnode_handle_put(led);
++ fwnode_handle_put(leds);
+ return -ENOMEM;
++ }
+
+ ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
+ if (ret)
+@@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
+ kfree(init_data.devicename);
+ }
+
++ fwnode_handle_put(leds);
+ return 0;
+ }
+
+@@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
+ * the correct port for LED setup.
+ */
+ ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
+- if (ret)
++ if (ret) {
++ fwnode_handle_put(port);
++ fwnode_handle_put(ports);
+ return ret;
++ }
+ }
+
++ fwnode_handle_put(ports);
+ return 0;
+ }
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
+index 7868ef237f6c00..4accfec7c73e64 100644
+--- a/drivers/net/dsa/realtek/rtl8366rb.c
++++ b/drivers/net/dsa/realtek/rtl8366rb.c
+@@ -186,7 +186,12 @@
+ #define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+ #define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
++/* LED trigger event for each group */
+ #define RTL8366RB_LED_CTRL_REG 0x0431
++#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
++ (4 * (led_group))
++#define RTL8366RB_LED_CTRL_MASK(led_group) \
++ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+ #define RTL8366RB_LED_OFF 0x0
+ #define RTL8366RB_LED_DUP_COL 0x1
+ #define RTL8366RB_LED_LINK_ACT 0x2
+@@ -203,6 +208,11 @@
+ #define RTL8366RB_LED_LINK_TX 0xd
+ #define RTL8366RB_LED_MASTER 0xe
+ #define RTL8366RB_LED_FORCE 0xf
++
++/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
++ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
++ * RTL8366RB_LED_FORCE. Otherwise, it is ignored.
++ */
+ #define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+ #define RTL8366RB_LED_1_OFFSET 6
+ #define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+@@ -998,28 +1008,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
+ */
+ if (priv->leds_disabled) {
+ /* Turn everything off */
+- regmap_update_bits(priv->map,
+- RTL8366RB_LED_0_1_CTRL_REG,
+- 0x0FFF, 0);
+- regmap_update_bits(priv->map,
+- RTL8366RB_LED_2_3_CTRL_REG,
+- 0x0FFF, 0);
+ regmap_update_bits(priv->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ 0);
+- val = RTL8366RB_LED_OFF;
+- } else {
+- /* TODO: make this configurable per LED */
+- val = RTL8366RB_LED_FORCE;
+- }
+- for (i = 0; i < 4; i++) {
+- ret = regmap_update_bits(priv->map,
+- RTL8366RB_LED_CTRL_REG,
+- 0xf << (i * 4),
+- val << (i * 4));
+- if (ret)
+- return ret;
++
++ for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) {
++ val = RTL8366RB_LED_OFF << RTL8366RB_LED_CTRL_OFFSET(i);
++ ret = regmap_update_bits(priv->map,
++ RTL8366RB_LED_CTRL_REG,
++ RTL8366RB_LED_CTRL_MASK(i),
++ val);
++ if (ret)
++ return ret;
++ }
+ }
+
+ ret = rtl8366_reset_vlan(priv);
+@@ -1134,52 +1136,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ }
+ }
+
+-static void rb8366rb_set_port_led(struct realtek_priv *priv,
+- int port, bool enable)
+-{
+- u16 val = enable ? 0x3f : 0;
+- int ret;
+-
+- if (priv->leds_disabled)
+- return;
+-
+- switch (port) {
+- case 0:
+- ret = regmap_update_bits(priv->map,
+- RTL8366RB_LED_0_1_CTRL_REG,
+- 0x3F, val);
+- break;
+- case 1:
+- ret = regmap_update_bits(priv->map,
+- RTL8366RB_LED_0_1_CTRL_REG,
+- 0x3F << RTL8366RB_LED_1_OFFSET,
+- val << RTL8366RB_LED_1_OFFSET);
+- break;
+- case 2:
+- ret = regmap_update_bits(priv->map,
+- RTL8366RB_LED_2_3_CTRL_REG,
+- 0x3F, val);
+- break;
+- case 3:
+- ret = regmap_update_bits(priv->map,
+- RTL8366RB_LED_2_3_CTRL_REG,
+- 0x3F << RTL8366RB_LED_3_OFFSET,
+- val << RTL8366RB_LED_3_OFFSET);
+- break;
+- case 4:
+- ret = regmap_update_bits(priv->map,
+- RTL8366RB_INTERRUPT_CONTROL_REG,
+- RTL8366RB_P4_RGMII_LED,
+- enable ? RTL8366RB_P4_RGMII_LED : 0);
+- break;
+- default:
+- dev_err(priv->dev, "no LED for port %d\n", port);
+- return;
+- }
+- if (ret)
+- dev_err(priv->dev, "error updating LED on port %d\n", port);
+-}
+-
+ static int
+ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+@@ -1193,7 +1149,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ if (ret)
+ return ret;
+
+- rb8366rb_set_port_led(priv, port, true);
+ return 0;
+ }
+
+@@ -1208,8 +1163,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
+ BIT(port));
+ if (ret)
+ return;
+-
+- rb8366rb_set_port_led(priv, port, false);
+ }
+
+ static int
+diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
+index 833e55e4b96129..52ddb4ef259e93 100644
+--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
++++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
+@@ -94,7 +94,7 @@ int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
+ return tmp & 0xffff;
+ }
+
+-int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
++int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val)
+ {
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index 4f09e7438f3b93..a28bf5433ea727 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -17,6 +17,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
++#include <linux/iopoll.h>
+ #include <linux/of.h>
+ #include <linux/of_mdio.h>
+ #include <linux/bitops.h>
+@@ -33,10 +34,14 @@
+ #define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
+ #define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
+-#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
++#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
+ #define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+
++/* MII Block subblock */
++#define VSC73XX_BLOCK_MII_INTERNAL 0x0 /* Internal MDIO subblock */
++#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */
++
+ #define CPU_PORT 6 /* CPU port */
+
+ /* MAC Block registers */
+@@ -195,6 +200,8 @@
+ #define VSC73XX_MII_CMD 0x1
+ #define VSC73XX_MII_DATA 0x2
+
++#define VSC73XX_MII_STAT_BUSY BIT(3)
++
+ /* Arbiter block 5 registers */
+ #define VSC73XX_ARBEMPTY 0x0c
+ #define VSC73XX_ARBDISC 0x0e
+@@ -268,6 +275,10 @@
+ #define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
+ #define IS_739X(a) (IS_7395(a) || IS_7398(a))
+
++#define VSC73XX_POLL_SLEEP_US 1000
++#define VSC73XX_MDIO_POLL_SLEEP_US 5
++#define VSC73XX_POLL_TIMEOUT_US 10000
++
+ struct vsc73xx_counter {
+ u8 counter;
+ const char *name;
+@@ -359,13 +370,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+ break;
+
+ case VSC73XX_BLOCK_MII:
+- case VSC73XX_BLOCK_CAPTURE:
+ case VSC73XX_BLOCK_ARBITER:
+ switch (subblock) {
+ case 0 ... 1:
+ return 1;
+ }
+ break;
++ case VSC73XX_BLOCK_CAPTURE:
++ switch (subblock) {
++ case 0 ... 4:
++ case 6 ... 7:
++ return 1;
++ }
++ break;
+ }
+
+ return 0;
+@@ -483,6 +500,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
+ return 0;
+ }
+
++static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc)
++{
++ int ret, err;
++ u32 val;
++
++ ret = read_poll_timeout(vsc73xx_read, err,
++ err < 0 || !(val & VSC73XX_MII_STAT_BUSY),
++ VSC73XX_MDIO_POLL_SLEEP_US,
++ VSC73XX_POLL_TIMEOUT_US, false, vsc,
++ VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
++ VSC73XX_MII_STAT, &val);
++ if (ret)
++ return ret;
++ return err;
++}
++
+ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
+ {
+ struct vsc73xx *vsc = ds->priv;
+@@ -490,12 +523,20 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
+ u32 val;
+ int ret;
+
++ ret = vsc73xx_mdio_busy_check(vsc);
++ if (ret)
++ return ret;
++
+ /* Setting bit 26 means "read" */
+ cmd = BIT(26) | (phy << 21) | (regnum << 16);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ if (ret)
+ return ret;
+- msleep(2);
++
++ ret = vsc73xx_mdio_busy_check(vsc);
++ if (ret)
++ return ret;
++
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+ if (ret)
+ return ret;
+@@ -519,6 +560,10 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u32 cmd;
+ int ret;
+
++ ret = vsc73xx_mdio_busy_check(vsc);
++ if (ret)
++ return ret;
++
+ /* It was found through tedious experiments that this router
+ * chip really hates to have it's PHYs reset. They
+ * never recover if that happens: autonegotiation stops
+@@ -530,7 +575,7 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ return 0;
+ }
+
+- cmd = (phy << 21) | (regnum << 16);
++ cmd = (phy << 21) | (regnum << 16) | val;
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ if (ret)
+ return ret;
+@@ -779,7 +824,7 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
+ * after a PHY or the CPU port comes up or down.
+ */
+ if (!phydev->link) {
+- int maxloop = 10;
++ int ret, err;
+
+ dev_dbg(vsc->dev, "port %d: went down\n",
+ port);
+@@ -794,19 +839,17 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
+ VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+ /* Wait until queue is empty */
+- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+- VSC73XX_ARBEMPTY, &val);
+- while (!(val & BIT(port))) {
+- msleep(1);
+- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+- VSC73XX_ARBEMPTY, &val);
+- if (--maxloop == 0) {
+- dev_err(vsc->dev,
+- "timeout waiting for block arbiter\n");
+- /* Continue anyway */
+- break;
+- }
+- }
++ ret = read_poll_timeout(vsc73xx_read, err,
++ err < 0 || (val & BIT(port)),
++ VSC73XX_POLL_SLEEP_US,
++ VSC73XX_POLL_TIMEOUT_US, false,
++ vsc, VSC73XX_BLOCK_ARBITER, 0,
++ VSC73XX_ARBEMPTY, &val);
++ if (ret)
++ dev_err(vsc->dev,
++ "timeout waiting for block arbiter\n");
++ else if (err < 0)
++ dev_err(vsc->dev, "error reading arbiter\n");
+
+ /* Put this port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+@@ -1118,6 +1161,8 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
+
+ vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
+ vsc->chipid);
++ if (!vsc->gc.label)
++ return -ENOMEM;
+ vsc->gc.ngpio = 4;
+ vsc->gc.owner = THIS_MODULE;
+ vsc->gc.parent = vsc->dev;
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index c4b1b0aa438ac5..5560cf683eb736 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -71,6 +71,7 @@ static int dummy_dev_init(struct net_device *dev)
+ if (!dev->lstats)
+ return -ENOMEM;
+
++ netdev_lockdep_set_classes(dev);
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
+index d7c274af6d4dae..3c26176316a38b 100644
+--- a/drivers/net/ethernet/adi/adin1110.c
++++ b/drivers/net/ethernet/adi/adin1110.c
+@@ -318,11 +318,11 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+ * from the ADIN1110 frame header.
+ */
+ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
+- return ret;
++ return -EINVAL;
+
+ round_len = adin1110_round_len(frame_size);
+ if (round_len < 0)
+- return ret;
++ return -EINVAL;
+
+ frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+ memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
+diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
+index f1f752a8f7bb4f..6ab615365172e8 100644
+--- a/drivers/net/ethernet/amazon/ena/Makefile
++++ b/drivers/net/ethernet/amazon/ena/Makefile
+@@ -5,4 +5,4 @@
+
+ obj-$(CONFIG_ENA_ETHERNET) += ena.o
+
+-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
++ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 633b321d7fdd97..276f6a8631fb12 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+
+- sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+- &sq->dma_addr, GFP_KERNEL);
++ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
+
+ if (!sq->entries) {
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+
+- cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+- &cq->dma_addr, GFP_KERNEL);
++ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
+
+ if (!cq->entries) {
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+
+ ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+- aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
+- &aenq->dma_addr, GFP_KERNEL);
++ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
+
+ if (!aenq->entries) {
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+
+ aenq_caps = 0;
+ aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+- aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
+- << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+- ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
++ aenq_caps |=
++ (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
++ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+
+ if (unlikely(!aenq_handlers)) {
+- netdev_err(ena_dev->net_device,
+- "AENQ handlers pointer is NULL\n");
++ netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
+ return -EINVAL;
+ }
+
+@@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
+ }
+
+ if (unlikely(!admin_queue->comp_ctx)) {
+- netdev_err(admin_queue->ena_dev->net_device,
+- "Completion context is NULL\n");
++ netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
+ return NULL;
+ }
+
+ if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
+- netdev_err(admin_queue->ena_dev->net_device,
+- "Completion context is occupied\n");
++ netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
+ return NULL;
+ }
+
+@@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
+ /* In case of queue FULL */
+ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
+ if (cnt >= admin_queue->q_depth) {
+- netdev_dbg(admin_queue->ena_dev->net_device,
+- "Admin queue is full.\n");
++ netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
+ admin_queue->stats.out_of_space++;
+ return ERR_PTR(-ENOSPC);
+ }
+@@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+- admin_queue->comp_ctx =
+- devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
++ admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ if (unlikely(!admin_queue->comp_ctx)) {
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+ return -ENOMEM;
+@@ -320,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq)
+ {
+ size_t size;
+- int dev_node = 0;
+
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+
+@@ -333,23 +324,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+- dev_node = dev_to_node(ena_dev->dmadev);
+- set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_sq->desc_addr.virt_addr =
+- dma_alloc_coherent(ena_dev->dmadev, size,
+- &io_sq->desc_addr.phys_addr,
++ dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
+- set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, size,
+- &io_sq->desc_addr.phys_addr,
+- GFP_KERNEL);
++ &io_sq->desc_addr.phys_addr, GFP_KERNEL);
+ }
+
+ if (!io_sq->desc_addr.virt_addr) {
+- netdev_err(ena_dev->net_device,
+- "Memory allocation failed\n");
++ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+@@ -362,21 +347,16 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+- size = io_sq->bounce_buf_ctrl.buffer_size *
++ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
+ io_sq->bounce_buf_ctrl.buffers_num;
+
+- dev_node = dev_to_node(ena_dev->dmadev);
+- set_dev_node(ena_dev->dmadev, ctx->numa_node);
+- io_sq->bounce_buf_ctrl.base_buffer =
+- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+- set_dev_node(ena_dev->dmadev, dev_node);
++ io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
+ io_sq->bounce_buf_ctrl.base_buffer =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+
+ if (!io_sq->bounce_buf_ctrl.base_buffer) {
+- netdev_err(ena_dev->net_device,
+- "Bounce buffer memory allocation failed\n");
++ netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+@@ -410,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+ {
+ size_t size;
+- int prev_node = 0;
+
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
+
+@@ -422,16 +401,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+- prev_node = dev_to_node(ena_dev->dmadev);
+- set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_cq->cdesc_addr.virt_addr =
+- dma_alloc_coherent(ena_dev->dmadev, size,
+- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+- set_dev_node(ena_dev->dmadev, prev_node);
++ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ if (!io_cq->cdesc_addr.virt_addr) {
+ io_cq->cdesc_addr.virt_addr =
+- dma_alloc_coherent(ena_dev->dmadev, size,
+- &io_cq->cdesc_addr.phys_addr,
++ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
+ GFP_KERNEL);
+ }
+
+@@ -514,8 +488,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+ u8 comp_status)
+ {
+ if (unlikely(comp_status != 0))
+- netdev_err(admin_queue->ena_dev->net_device,
+- "Admin command failed[%u]\n", comp_status);
++ netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
++ comp_status);
+
+ switch (comp_status) {
+ case ENA_ADMIN_SUCCESS:
+@@ -580,8 +554,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
+ }
+
+ if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+- netdev_err(admin_queue->ena_dev->net_device,
+- "Command was aborted\n");
++ netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ admin_queue->stats.aborted_cmd++;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+@@ -589,8 +562,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
+ goto err;
+ }
+
+- WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
+- comp_ctx->status);
++ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
+
+ ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
+ err:
+@@ -634,8 +606,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set LLQ configurations: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
+
+ return ret;
+ }
+@@ -658,8 +629,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ llq_default_cfg->llq_header_location;
+ } else {
+ netdev_err(ena_dev->net_device,
+- "Invalid header location control, supported: 0x%x\n",
+- supported_feat);
++ "Invalid header location control, supported: 0x%x\n", supported_feat);
+ return -EINVAL;
+ }
+
+@@ -681,8 +651,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+
+ netdev_err(ena_dev->net_device,
+ "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+- llq_default_cfg->llq_stride_ctrl,
+- supported_feat, llq_info->desc_stride_ctrl);
++ llq_default_cfg->llq_stride_ctrl, supported_feat,
++ llq_info->desc_stride_ctrl);
+ }
+ } else {
+ llq_info->desc_stride_ctrl = 0;
+@@ -704,8 +674,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ llq_info->desc_list_entry_size = 256;
+ } else {
+ netdev_err(ena_dev->net_device,
+- "Invalid entry_size_ctrl, supported: 0x%x\n",
+- supported_feat);
++ "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
+ return -EINVAL;
+ }
+
+@@ -750,8 +719,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+
+ netdev_err(ena_dev->net_device,
+ "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+- llq_default_cfg->llq_num_decs_before_header,
+- supported_feat, llq_info->descs_num_before_header);
++ llq_default_cfg->llq_num_decs_before_header, supported_feat,
++ llq_info->descs_num_before_header);
+ }
+ /* Check for accelerated queue supported */
+ llq_accel_mode_get = llq_features->accel_mode.u.get;
+@@ -767,8 +736,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+
+ rc = ena_com_set_llq(ena_dev);
+ if (rc)
+- netdev_err(ena_dev->net_device,
+- "Cannot set LLQ configuration: %d\n", rc);
++ netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
+
+ return rc;
+ }
+@@ -780,8 +748,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
+ int ret;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+- usecs_to_jiffies(
+- admin_queue->completion_timeout));
++ usecs_to_jiffies(admin_queue->completion_timeout));
+
+ /* In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+@@ -797,8 +764,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
+ if (comp_ctx->status == ENA_CMD_COMPLETED) {
+ netdev_err(admin_queue->ena_dev->net_device,
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+- comp_ctx->cmd_opcode,
+- admin_queue->auto_polling ? "ON" : "OFF");
++ comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
+ /* Check if fallback to polling is enabled */
+ if (admin_queue->auto_polling)
+ admin_queue->polling = true;
+@@ -867,15 +833,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+ if (unlikely(i == timeout)) {
+ netdev_err(ena_dev->net_device,
+ "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
+- mmio_read->seq_num, offset, read_resp->req_id,
+- read_resp->reg_off);
++ mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
+ ret = ENA_MMIO_READ_TIMEOUT;
+ goto err;
+ }
+
+ if (read_resp->reg_off != offset) {
+- netdev_err(ena_dev->net_device,
+- "Read failure: wrong offset provided\n");
++ netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
+ ret = ENA_MMIO_READ_TIMEOUT;
+ } else {
+ ret = read_resp->reg_val;
+@@ -934,8 +898,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != -ENODEV)))
+- netdev_err(ena_dev->net_device,
+- "Failed to destroy io sq error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
+
+ return ret;
+ }
+@@ -949,8 +912,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ if (io_cq->cdesc_addr.virt_addr) {
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+- dma_free_coherent(ena_dev->dmadev, size,
+- io_cq->cdesc_addr.virt_addr,
++ dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr);
+
+ io_cq->cdesc_addr.virt_addr = NULL;
+@@ -959,8 +921,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ if (io_sq->desc_addr.virt_addr) {
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+- dma_free_coherent(ena_dev->dmadev, size,
+- io_sq->desc_addr.virt_addr,
++ dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr);
+
+ io_sq->desc_addr.virt_addr = NULL;
+@@ -985,8 +946,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+- netdev_err(ena_dev->net_device,
+- "Reg read timeout occurred\n");
++ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+@@ -1026,8 +986,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+- feature_id);
++ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
+ return -EOPNOTSUPP;
+ }
+
+@@ -1064,8 +1023,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+
+ if (unlikely(ret))
+ netdev_err(ena_dev->net_device,
+- "Failed to submit get_feature command %d error: %d\n",
+- feature_id, ret);
++ "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
+
+ return ret;
+ }
+@@ -1104,13 +1062,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+ {
+ struct ena_rss *rss = &ena_dev->rss;
+
+- if (!ena_com_check_supported_feature_id(ena_dev,
+- ENA_ADMIN_RSS_HASH_FUNCTION))
++ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
+ return -EOPNOTSUPP;
+
+- rss->hash_key =
+- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+- &rss->hash_key_dma_addr, GFP_KERNEL);
++ rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
++ &rss->hash_key_dma_addr, GFP_KERNEL);
+
+ if (unlikely(!rss->hash_key))
+ return -ENOMEM;
+@@ -1123,8 +1079,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_key)
+- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+- rss->hash_key, rss->hash_key_dma_addr);
++ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
++ rss->hash_key_dma_addr);
+ rss->hash_key = NULL;
+ }
+
+@@ -1132,9 +1088,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+ {
+ struct ena_rss *rss = &ena_dev->rss;
+
+- rss->hash_ctrl =
+- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
++ rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
++ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+
+ if (unlikely(!rss->hash_ctrl))
+ return -ENOMEM;
+@@ -1147,8 +1102,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_ctrl)
+- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+- rss->hash_ctrl, rss->hash_ctrl_dma_addr);
++ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
++ rss->hash_ctrl_dma_addr);
+ rss->hash_ctrl = NULL;
+ }
+
+@@ -1177,15 +1132,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+- rss->rss_ind_tbl =
+- dma_alloc_coherent(ena_dev->dmadev, tbl_size,
+- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
++ rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
++ GFP_KERNEL);
+ if (unlikely(!rss->rss_ind_tbl))
+ goto mem_err1;
+
+ tbl_size = (1ULL << log_size) * sizeof(u16);
+- rss->host_rss_ind_tbl =
+- devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
++ rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ if (unlikely(!rss->host_rss_ind_tbl))
+ goto mem_err2;
+
+@@ -1197,8 +1150,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
+- rss->rss_ind_tbl_dma_addr);
++ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
+ rss->rss_ind_tbl = NULL;
+ mem_err1:
+ rss->tbl_log_size = 0;
+@@ -1261,8 +1213,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ &create_cmd.sq_ba,
+ io_sq->desc_addr.phys_addr);
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Memory address set failed\n");
++ netdev_err(ena_dev->net_device, "Memory address set failed\n");
+ return ret;
+ }
+ }
+@@ -1273,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Failed to create IO SQ. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
+ return ret;
+ }
+
+@@ -1292,8 +1242,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ cmd_completion.llq_descriptors_offset);
+ }
+
+- netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
+- io_sq->idx, io_sq->q_depth);
++ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+
+ return ret;
+ }
+@@ -1420,8 +1369,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Failed to create IO CQ. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
+ return ret;
+ }
+
+@@ -1440,8 +1388,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.numa_node_register_offset);
+
+- netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
+- io_cq->idx, io_cq->q_depth);
++ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+ return ret;
+ }
+@@ -1451,8 +1398,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_cq **io_cq)
+ {
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+- netdev_err(ena_dev->net_device,
+- "Invalid queue number %d but the max is %d\n", qid,
++ netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
+ ENA_TOTAL_NUM_QUEUES);
+ return -EINVAL;
+ }
+@@ -1492,8 +1438,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+- ena_delay_exponential_backoff_us(exp++,
+- ena_dev->ena_min_poll_delay_us);
++ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ }
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+@@ -1519,8 +1464,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != -ENODEV)))
+- netdev_err(ena_dev->net_device,
+- "Failed to destroy IO CQ. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
+
+ return ret;
+ }
+@@ -1588,8 +1532,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to config AENQ ret: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
+
+ return ret;
+ }
+@@ -1610,8 +1553,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+ netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
+
+ if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+- netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
+- width);
++ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
+ return -EINVAL;
+ }
+
+@@ -1633,19 +1575,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
+ ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+ ENA_REGS_CONTROLLER_VERSION_OFF);
+
+- if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+- (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
++ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
+- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
++ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+
+- dev_info(ena_dev->dmadev,
+- "ENA controller version: %d.%d.%d implementation version %d\n",
++ dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+@@ -1694,20 +1633,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+- dma_free_coherent(ena_dev->dmadev, size, sq->entries,
+- sq->dma_addr);
++ dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+- dma_free_coherent(ena_dev->dmadev, size, cq->entries,
+- cq->dma_addr);
++ dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
+ if (ena_dev->aenq.entries)
+- dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
+- aenq->dma_addr);
++ dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
+ aenq->entries = NULL;
+ }
+
+@@ -1733,10 +1669,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ spin_lock_init(&mmio_read->lock);
+- mmio_read->read_resp =
+- dma_alloc_coherent(ena_dev->dmadev,
+- sizeof(*mmio_read->read_resp),
+- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
++ mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
++ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ if (unlikely(!mmio_read->read_resp))
+ goto err;
+
+@@ -1767,8 +1701,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+ writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+
+- dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+- mmio_read->read_resp, mmio_read->read_resp_dma_addr);
++ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
++ mmio_read->read_resp_dma_addr);
+
+ mmio_read->read_resp = NULL;
+ }
+@@ -1800,8 +1734,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ }
+
+ if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+- netdev_err(ena_dev->net_device,
+- "Device isn't ready, abort com init\n");
++ netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
+ return -ENODEV;
+ }
+
+@@ -1878,8 +1811,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ int ret;
+
+ if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+- netdev_err(ena_dev->net_device,
+- "Qid (%d) is bigger than max num of queues (%d)\n",
++ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
+ ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ return -EINVAL;
+ }
+@@ -1905,8 +1837,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+
+ if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ /* header length is limited to 8 bits */
+- io_sq->tx_max_header_size =
+- min_t(u32, ena_dev->tx_max_header_size, SZ_256);
++ io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+
+ ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+ if (ret)
+@@ -1938,8 +1869,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+ struct ena_com_io_cq *io_cq;
+
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+- netdev_err(ena_dev->net_device,
+- "Qid (%d) is bigger than max num of queues (%d)\n",
++ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return;
+ }
+@@ -1983,8 +1913,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ if (rc)
+ return rc;
+
+- if (get_resp.u.max_queue_ext.version !=
+- ENA_FEATURE_MAX_QUEUE_EXT_VER)
++ if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ return -EINVAL;
+
+ memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
+@@ -2025,18 +1954,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
+
+ if (!rc)
+- memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+- sizeof(get_resp.u.hw_hints));
++ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
+ else if (rc == -EOPNOTSUPP)
+- memset(&get_feat_ctx->hw_hints, 0x0,
+- sizeof(get_feat_ctx->hw_hints));
++ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
+ if (!rc)
+- memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+- sizeof(get_resp.u.llq));
++ memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
+ else if (rc == -EOPNOTSUPP)
+ memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+ else
+@@ -2084,8 +2010,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+- while ((READ_ONCE(aenq_common->flags) &
+- ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
++ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Make sure the phase bit (ownership) is as expected before
+ * reading the rest of the descriptor.
+ */
+@@ -2094,8 +2019,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+ timestamp = (u64)aenq_common->timestamp_low |
+ ((u64)aenq_common->timestamp_high << 32);
+
+- netdev_dbg(ena_dev->net_device,
+- "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
++ netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrome, timestamp);
+
+ /* Handle specific event*/
+@@ -2124,8 +2048,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+
+ /* write the aenq doorbell after all AENQ descriptors were read */
+ mb();
+- writel_relaxed((u32)aenq->head,
+- ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
++ writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ }
+
+ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+@@ -2137,15 +2060,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+
+- if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+- (cap == ENA_MMIO_READ_TIMEOUT))) {
++ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
+ netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
+ return -ETIME;
+ }
+
+ if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+- netdev_err(ena_dev->net_device,
+- "Device isn't ready, can't reset device\n");
++ netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
+ return -EINVAL;
+ }
+
+@@ -2168,8 +2089,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ rc = wait_for_reset_state(ena_dev, timeout,
+ ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ if (rc != 0) {
+- netdev_err(ena_dev->net_device,
+- "Reset indication didn't turn on\n");
++ netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
+ return rc;
+ }
+
+@@ -2177,8 +2097,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+ rc = wait_for_reset_state(ena_dev, timeout, 0);
+ if (rc != 0) {
+- netdev_err(ena_dev->net_device,
+- "Reset indication didn't turn off\n");
++ netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
+ return rc;
+ }
+
+@@ -2215,8 +2134,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to get stats. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+
+ return ret;
+ }
+@@ -2228,8 +2146,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ int ret;
+
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+- netdev_err(ena_dev->net_device,
+- "Capability %d isn't supported\n",
++ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
+@@ -2266,8 +2183,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+- ENA_ADMIN_MTU);
++ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return -EOPNOTSUPP;
+ }
+
+@@ -2286,8 +2202,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set mtu %d. error: %d\n", mtu, ret);
++ netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
+
+ return ret;
+ }
+@@ -2301,8 +2216,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ ret = ena_com_get_feature(ena_dev, &resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Failed to get offload capabilities %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
+ return ret;
+ }
+
+@@ -2320,8 +2234,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+- if (!ena_com_check_supported_feature_id(ena_dev,
+- ENA_ADMIN_RSS_HASH_FUNCTION)) {
++ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return -EOPNOTSUPP;
+@@ -2334,8 +2247,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ return ret;
+
+ if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
+- netdev_err(ena_dev->net_device,
+- "Func hash %d isn't supported by device, abort\n",
++ netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
+ return -EOPNOTSUPP;
+ }
+@@ -2365,8 +2277,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Failed to set hash function %d. error: %d\n",
++ netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
+ return -EINVAL;
+ }
+@@ -2398,16 +2309,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ return rc;
+
+ if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
+- netdev_err(ena_dev->net_device,
+- "Flow hash function %d isn't supported\n", func);
++ netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
+ return -EOPNOTSUPP;
+ }
+
+ if ((func == ENA_ADMIN_TOEPLITZ) && key) {
+ if (key_len != sizeof(hash_key->key)) {
+ netdev_err(ena_dev->net_device,
+- "key len (%u) doesn't equal the supported size (%zu)\n",
+- key_len, sizeof(hash_key->key));
++ "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
++ sizeof(hash_key->key));
+ return -EINVAL;
+ }
+ memcpy(hash_key->key, key, key_len);
+@@ -2495,8 +2405,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+- if (!ena_com_check_supported_feature_id(ena_dev,
+- ENA_ADMIN_RSS_HASH_INPUT)) {
++ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
+ return -EOPNOTSUPP;
+@@ -2527,8 +2436,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set hash input. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
+
+ return ret;
+ }
+@@ -2605,8 +2513,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ int rc;
+
+ if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+- netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
+- proto);
++ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
+ return -EINVAL;
+ }
+
+@@ -2658,8 +2565,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+- if (!ena_com_check_supported_feature_id(
+- ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
++ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
+ return -EOPNOTSUPP;
+@@ -2699,8 +2605,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set indirect table. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
+
+ return ret;
+ }
+@@ -2779,9 +2684,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+ {
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+- host_attr->host_info =
+- dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+- &host_attr->host_info_dma_addr, GFP_KERNEL);
++ host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
++ &host_attr->host_info_dma_addr, GFP_KERNEL);
+ if (unlikely(!host_attr->host_info))
+ return -ENOMEM;
+
+@@ -2827,8 +2731,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+
+ if (host_attr->debug_area_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
+- host_attr->debug_area_virt_addr,
+- host_attr->debug_area_dma_addr);
++ host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr = NULL;
+ }
+ }
+@@ -2877,8 +2780,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set host attributes: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
+
+ return ret;
+ }
+@@ -2896,8 +2798,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
+ u32 *intr_moder_interval)
+ {
+ if (!intr_delay_resolution) {
+- netdev_err(ena_dev->net_device,
+- "Illegal interrupt delay granularity value\n");
++ netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
+ return -EFAULT;
+ }
+
+@@ -2935,14 +2836,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+- netdev_dbg(ena_dev->net_device,
+- "Feature %d isn't supported\n",
++ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
+ rc = 0;
+ } else {
+ netdev_err(ena_dev->net_device,
+- "Failed to get interrupt moderation admin cmd. rc: %d\n",
+- rc);
++ "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
+ }
+
+ /* no moderation supported, disable adaptive support */
+@@ -2990,8 +2889,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
+
+ if (unlikely(ena_dev->tx_max_header_size == 0)) {
+- netdev_err(ena_dev->net_device,
+- "The size of the LLQ entry is smaller than needed\n");
++ netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+index 3d6f0a466a9ed4..933e619b3a3134 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+
+- desc_phase = (READ_ONCE(cdesc->status) &
+- ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
++ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+
+ if (desc_phase != expected_phase)
+@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+
+ io_sq->entries_in_tx_burst_left--;
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
+- io_sq->qid, io_sq->entries_in_tx_burst_left);
++ "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
++ io_sq->entries_in_tx_burst_left);
+ }
+
+ /* Make sure everything was written into the bounce buffer before
+@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+ wmb();
+
+ /* The line is completed. Copy it to dev */
+- __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+- bounce_buffer, (llq_info->desc_list_entry_size) / 8);
++ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
++ (llq_info->desc_list_entry_size) / 8);
+
+ io_sq->tail++;
+
+@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+ header_offset =
+ llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+- if (unlikely((header_offset + header_len) >
+- llq_info->desc_list_entry_size)) {
++ if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Trying to write header larger than llq entry can accommodate\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(!bounce_buffer)) {
+- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Bounce buffer is NULL\n");
++ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
+ return -EFAULT;
+ }
+
+@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+ bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+ if (unlikely(!bounce_buffer)) {
+- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Bounce buffer is NULL\n");
++ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
+ return NULL;
+ }
+
+@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+
+ ena_com_cq_inc_head(io_cq);
+ count++;
+- last = (READ_ONCE(cdesc->status) &
+- ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
++ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ } while (!last);
+
+@@ -328,9 +323,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ * compare it to the stored version, just create the meta
+ */
+ if (io_sq->disable_meta_caching) {
+- if (unlikely(!ena_tx_ctx->meta_valid))
+- return -EINVAL;
+-
+ *have_meta = true;
+ return ena_com_create_meta(io_sq, ena_meta);
+ }
+@@ -372,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
+
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
+- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
+- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
+- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
++ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
++ ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ }
+
+ /*****************************************************************************/
+@@ -406,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+
+ if (unlikely(header_len > io_sq->tx_max_header_size)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Header size is too large %d max header: %d\n",
+- header_len, io_sq->tx_max_header_size);
++ "Header size is too large %d max header: %d\n", header_len,
++ io_sq->tx_max_header_size);
+ return -EINVAL;
+ }
+
+- if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+- !buffer_to_push)) {
++ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Push header wasn't provided in LLQ mode\n");
+ return -EINVAL;
+@@ -559,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ }
+
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+- "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+- nb_hw_desc);
++ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
+
+ if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+- "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
+- ena_rx_ctx->max_bufs);
++ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
+ return -ENOSPC;
+ }
+
+@@ -589,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ io_sq->next_to_comp += nb_hw_desc;
+
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+- "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+- io_sq->qid, io_sq->next_to_comp);
++ "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
++ io_sq->next_to_comp);
+
+ /* Get rx flags from the last pkt */
+ ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
+@@ -627,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ desc->req_id = req_id;
+
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
+- __func__, io_sq->qid, req_id);
++ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
++ req_id);
+
+ desc->buff_addr_lo = (u32)ena_buf->paddr;
+ desc->buff_addr_hi =
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+index 372b259279eca3..6eba0346465253 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+@@ -145,8 +145,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
+ }
+
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Queue: %d num_descs: %d num_entries_needed: %d\n",
+- io_sq->qid, num_descs, num_entries_needed);
++ "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
++ num_entries_needed);
+
+ return num_entries_needed > io_sq->entries_in_tx_burst_left;
+ }
+@@ -157,15 +157,14 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+ u16 tail = io_sq->tail;
+
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Write submission queue doorbell for queue: %d tail: %d\n",
+- io_sq->qid, tail);
++ "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
+
+ writel(tail, io_sq->db_addr);
+
+ if (is_llq_max_tx_burst_exists(io_sq)) {
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Reset available entries in tx burst for queue %d to %d\n",
+- io_sq->qid, max_entries_in_tx_burst);
++ "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
++ max_entries_in_tx_burst);
+ io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
+ }
+
+@@ -248,8 +247,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+
+ *req_id = READ_ONCE(cdesc->req_id);
+ if (unlikely(*req_id >= io_cq->q_depth)) {
+- netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+- "Invalid req id %d\n", cdesc->req_id);
++ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
++ cdesc->req_id);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index d671df4b76bc71..d901877544445e 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -7,6 +7,7 @@
+ #include <linux/pci.h>
+
+ #include "ena_netdev.h"
++#include "ena_xdp.h"
+
+ struct ena_stats {
+ char name[ETH_GSTRING_LEN];
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index f955bde10cf90a..0d201a57d7e29e 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -19,8 +19,8 @@
+ #include <net/ip.h>
+
+ #include "ena_netdev.h"
+-#include <linux/bpf_trace.h>
+ #include "ena_pci_id_tbl.h"
++#include "ena_xdp.h"
+
+ MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
+ MODULE_DESCRIPTION(DEVICE_NAME);
+@@ -45,51 +45,6 @@ static void check_for_admin_com_state(struct ena_adapter *adapter);
+ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+ static int ena_restore_device(struct ena_adapter *adapter);
+
+-static void ena_init_io_rings(struct ena_adapter *adapter,
+- int first_index, int count);
+-static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
+- int count);
+-static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
+- int count);
+-static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
+-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+- int first_index,
+- int count);
+-static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
+-static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
+-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
+-static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
+-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
+-static void ena_napi_disable_in_range(struct ena_adapter *adapter,
+- int first_index, int count);
+-static void ena_napi_enable_in_range(struct ena_adapter *adapter,
+- int first_index, int count);
+-static int ena_up(struct ena_adapter *adapter);
+-static void ena_down(struct ena_adapter *adapter);
+-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
+- struct ena_ring *rx_ring);
+-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+- struct ena_ring *rx_ring);
+-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+- struct ena_tx_buffer *tx_info);
+-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+- int first_index, int count);
+-
+-/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
+-static void ena_increase_stat(u64 *statp, u64 cnt,
+- struct u64_stats_sync *syncp)
+-{
+- u64_stats_update_begin(syncp);
+- (*statp) += cnt;
+- u64_stats_update_end(syncp);
+-}
+-
+-static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
+-{
+- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+- ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
+-}
+-
+ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ struct ena_adapter *adapter = netdev_priv(dev);
+@@ -133,19 +88,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
+ return ret;
+ }
+
+-static int ena_xmit_common(struct net_device *dev,
+- struct ena_ring *ring,
+- struct ena_tx_buffer *tx_info,
+- struct ena_com_tx_ctx *ena_tx_ctx,
+- u16 next_to_use,
+- u32 bytes)
++int ena_xmit_common(struct ena_adapter *adapter,
++ struct ena_ring *ring,
++ struct ena_tx_buffer *tx_info,
++ struct ena_com_tx_ctx *ena_tx_ctx,
++ u16 next_to_use,
++ u32 bytes)
+ {
+- struct ena_adapter *adapter = netdev_priv(dev);
+ int rc, nb_hw_desc;
+
+ if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
+ ena_tx_ctx))) {
+- netif_dbg(adapter, tx_queued, dev,
++ netif_dbg(adapter, tx_queued, adapter->netdev,
+ "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+ ring->qid);
+ ena_ring_tx_doorbell(ring);
+@@ -160,13 +114,11 @@ static int ena_xmit_common(struct net_device *dev,
+ * ena_com_prepare_tx() are fatal and therefore require a device reset.
+ */
+ if (unlikely(rc)) {
+- netif_err(adapter, tx_queued, dev,
++ netif_err(adapter, tx_queued, adapter->netdev,
+ "Failed to prepare tx bufs\n");
+- ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
+- &ring->syncp);
++ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
+ if (rc != -ENOMEM)
+- ena_reset_device(adapter,
+- ENA_REGS_RESET_DRIVER_INVALID_STATE);
++ ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ return rc;
+ }
+
+@@ -184,468 +136,6 @@ static int ena_xmit_common(struct net_device *dev,
+ return 0;
+ }
+
+-/* This is the XDP napi callback. XDP queues use a separate napi callback
+- * than Rx/Tx queues.
+- */
+-static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+-{
+- struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+- u32 xdp_work_done, xdp_budget;
+- struct ena_ring *xdp_ring;
+- int napi_comp_call = 0;
+- int ret;
+-
+- xdp_ring = ena_napi->xdp_ring;
+-
+- xdp_budget = budget;
+-
+- if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
+- test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
+- napi_complete_done(napi, 0);
+- return 0;
+- }
+-
+- xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
+-
+- /* If the device is about to reset or down, avoid unmask
+- * the interrupt and return 0 so NAPI won't reschedule
+- */
+- if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
+- napi_complete_done(napi, 0);
+- ret = 0;
+- } else if (xdp_budget > xdp_work_done) {
+- napi_comp_call = 1;
+- if (napi_complete_done(napi, xdp_work_done))
+- ena_unmask_interrupt(xdp_ring, NULL);
+- ena_update_ring_numa_node(xdp_ring, NULL);
+- ret = xdp_work_done;
+- } else {
+- ret = xdp_budget;
+- }
+-
+- u64_stats_update_begin(&xdp_ring->syncp);
+- xdp_ring->tx_stats.napi_comp += napi_comp_call;
+- xdp_ring->tx_stats.tx_poll++;
+- u64_stats_update_end(&xdp_ring->syncp);
+- xdp_ring->tx_stats.last_napi_jiffies = jiffies;
+-
+- return ret;
+-}
+-
+-static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
+- struct ena_tx_buffer *tx_info,
+- struct xdp_frame *xdpf,
+- struct ena_com_tx_ctx *ena_tx_ctx)
+-{
+- struct ena_adapter *adapter = xdp_ring->adapter;
+- struct ena_com_buf *ena_buf;
+- int push_len = 0;
+- dma_addr_t dma;
+- void *data;
+- u32 size;
+-
+- tx_info->xdpf = xdpf;
+- data = tx_info->xdpf->data;
+- size = tx_info->xdpf->len;
+-
+- if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+- /* Designate part of the packet for LLQ */
+- push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+-
+- ena_tx_ctx->push_header = data;
+-
+- size -= push_len;
+- data += push_len;
+- }
+-
+- ena_tx_ctx->header_len = push_len;
+-
+- if (size > 0) {
+- dma = dma_map_single(xdp_ring->dev,
+- data,
+- size,
+- DMA_TO_DEVICE);
+- if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
+- goto error_report_dma_error;
+-
+- tx_info->map_linear_data = 0;
+-
+- ena_buf = tx_info->bufs;
+- ena_buf->paddr = dma;
+- ena_buf->len = size;
+-
+- ena_tx_ctx->ena_bufs = ena_buf;
+- ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+- }
+-
+- return 0;
+-
+-error_report_dma_error:
+- ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
+- &xdp_ring->syncp);
+- netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
+-
+- return -EINVAL;
+-}
+-
+-static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
+- struct net_device *dev,
+- struct xdp_frame *xdpf,
+- int flags)
+-{
+- struct ena_com_tx_ctx ena_tx_ctx = {};
+- struct ena_tx_buffer *tx_info;
+- u16 next_to_use, req_id;
+- int rc;
+-
+- next_to_use = xdp_ring->next_to_use;
+- req_id = xdp_ring->free_ids[next_to_use];
+- tx_info = &xdp_ring->tx_buffer_info[req_id];
+- tx_info->num_of_bufs = 0;
+-
+- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
+- if (unlikely(rc))
+- return rc;
+-
+- ena_tx_ctx.req_id = req_id;
+-
+- rc = ena_xmit_common(dev,
+- xdp_ring,
+- tx_info,
+- &ena_tx_ctx,
+- next_to_use,
+- xdpf->len);
+- if (rc)
+- goto error_unmap_dma;
+-
+- /* trigger the dma engine. ena_ring_tx_doorbell()
+- * calls a memory barrier inside it.
+- */
+- if (flags & XDP_XMIT_FLUSH)
+- ena_ring_tx_doorbell(xdp_ring);
+-
+- return rc;
+-
+-error_unmap_dma:
+- ena_unmap_tx_buff(xdp_ring, tx_info);
+- tx_info->xdpf = NULL;
+- return rc;
+-}
+-
+-static int ena_xdp_xmit(struct net_device *dev, int n,
+- struct xdp_frame **frames, u32 flags)
+-{
+- struct ena_adapter *adapter = netdev_priv(dev);
+- struct ena_ring *xdp_ring;
+- int qid, i, nxmit = 0;
+-
+- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+- return -EINVAL;
+-
+- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+- return -ENETDOWN;
+-
+- /* We assume that all rings have the same XDP program */
+- if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
+- return -ENXIO;
+-
+- qid = smp_processor_id() % adapter->xdp_num_queues;
+- qid += adapter->xdp_first_ring;
+- xdp_ring = &adapter->tx_ring[qid];
+-
+- /* Other CPU ids might try to send thorugh this queue */
+- spin_lock(&xdp_ring->xdp_tx_lock);
+-
+- for (i = 0; i < n; i++) {
+- if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
+- break;
+- nxmit++;
+- }
+-
+- /* Ring doorbell to make device aware of the packets */
+- if (flags & XDP_XMIT_FLUSH)
+- ena_ring_tx_doorbell(xdp_ring);
+-
+- spin_unlock(&xdp_ring->xdp_tx_lock);
+-
+- /* Return number of packets sent */
+- return nxmit;
+-}
+-
+-static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+-{
+- u32 verdict = ENA_XDP_PASS;
+- struct bpf_prog *xdp_prog;
+- struct ena_ring *xdp_ring;
+- struct xdp_frame *xdpf;
+- u64 *xdp_stat;
+-
+- xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
+-
+- if (!xdp_prog)
+- goto out;
+-
+- verdict = bpf_prog_run_xdp(xdp_prog, xdp);
+-
+- switch (verdict) {
+- case XDP_TX:
+- xdpf = xdp_convert_buff_to_frame(xdp);
+- if (unlikely(!xdpf)) {
+- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+- verdict = ENA_XDP_DROP;
+- break;
+- }
+-
+- /* Find xmit queue */
+- xdp_ring = rx_ring->xdp_ring;
+-
+- /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
+- spin_lock(&xdp_ring->xdp_tx_lock);
+-
+- if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
+- XDP_XMIT_FLUSH))
+- xdp_return_frame(xdpf);
+-
+- spin_unlock(&xdp_ring->xdp_tx_lock);
+- xdp_stat = &rx_ring->rx_stats.xdp_tx;
+- verdict = ENA_XDP_TX;
+- break;
+- case XDP_REDIRECT:
+- if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
+- xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+- verdict = ENA_XDP_REDIRECT;
+- break;
+- }
+- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+- verdict = ENA_XDP_DROP;
+- break;
+- case XDP_ABORTED:
+- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+- verdict = ENA_XDP_DROP;
+- break;
+- case XDP_DROP:
+- xdp_stat = &rx_ring->rx_stats.xdp_drop;
+- verdict = ENA_XDP_DROP;
+- break;
+- case XDP_PASS:
+- xdp_stat = &rx_ring->rx_stats.xdp_pass;
+- verdict = ENA_XDP_PASS;
+- break;
+- default:
+- bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
+- xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+- verdict = ENA_XDP_DROP;
+- }
+-
+- ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
+-out:
+- return verdict;
+-}
+-
+-static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+-{
+- adapter->xdp_first_ring = adapter->num_io_queues;
+- adapter->xdp_num_queues = adapter->num_io_queues;
+-
+- ena_init_io_rings(adapter,
+- adapter->xdp_first_ring,
+- adapter->xdp_num_queues);
+-}
+-
+-static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+-{
+- int rc = 0;
+-
+- rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
+- adapter->xdp_num_queues);
+- if (rc)
+- goto setup_err;
+-
+- rc = ena_create_io_tx_queues_in_range(adapter,
+- adapter->xdp_first_ring,
+- adapter->xdp_num_queues);
+- if (rc)
+- goto create_err;
+-
+- return 0;
+-
+-create_err:
+- ena_free_all_io_tx_resources(adapter);
+-setup_err:
+- return rc;
+-}
+-
+-/* Provides a way for both kernel and bpf-prog to know
+- * more about the RX-queue a given XDP frame arrived on.
+- */
+-static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
+-{
+- int rc;
+-
+- rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
+-
+- if (rc) {
+- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+- "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
+- rx_ring->qid, rc);
+- goto err;
+- }
+-
+- rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+- NULL);
+-
+- if (rc) {
+- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+- "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
+- rx_ring->qid, rc);
+- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+- }
+-
+-err:
+- return rc;
+-}
+-
+-static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
+-{
+- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
+- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+-}
+-
+-static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+- struct bpf_prog *prog,
+- int first, int count)
+-{
+- struct bpf_prog *old_bpf_prog;
+- struct ena_ring *rx_ring;
+- int i = 0;
+-
+- for (i = first; i < count; i++) {
+- rx_ring = &adapter->rx_ring[i];
+- old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
+-
+- if (!old_bpf_prog && prog) {
+- ena_xdp_register_rxq_info(rx_ring);
+- rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+- } else if (old_bpf_prog && !prog) {
+- ena_xdp_unregister_rxq_info(rx_ring);
+- rx_ring->rx_headroom = NET_SKB_PAD;
+- }
+- }
+-}
+-
+-static void ena_xdp_exchange_program(struct ena_adapter *adapter,
+- struct bpf_prog *prog)
+-{
+- struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
+-
+- ena_xdp_exchange_program_rx_in_range(adapter,
+- prog,
+- 0,
+- adapter->num_io_queues);
+-
+- if (old_bpf_prog)
+- bpf_prog_put(old_bpf_prog);
+-}
+-
+-static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
+-{
+- bool was_up;
+- int rc;
+-
+- was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+-
+- if (was_up)
+- ena_down(adapter);
+-
+- adapter->xdp_first_ring = 0;
+- adapter->xdp_num_queues = 0;
+- ena_xdp_exchange_program(adapter, NULL);
+- if (was_up) {
+- rc = ena_up(adapter);
+- if (rc)
+- return rc;
+- }
+- return 0;
+-}
+-
+-static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
+-{
+- struct ena_adapter *adapter = netdev_priv(netdev);
+- struct bpf_prog *prog = bpf->prog;
+- struct bpf_prog *old_bpf_prog;
+- int rc, prev_mtu;
+- bool is_up;
+-
+- is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+- rc = ena_xdp_allowed(adapter);
+- if (rc == ENA_XDP_ALLOWED) {
+- old_bpf_prog = adapter->xdp_bpf_prog;
+- if (prog) {
+- if (!is_up) {
+- ena_init_all_xdp_queues(adapter);
+- } else if (!old_bpf_prog) {
+- ena_down(adapter);
+- ena_init_all_xdp_queues(adapter);
+- }
+- ena_xdp_exchange_program(adapter, prog);
+-
+- if (is_up && !old_bpf_prog) {
+- rc = ena_up(adapter);
+- if (rc)
+- return rc;
+- }
+- xdp_features_set_redirect_target(netdev, false);
+- } else if (old_bpf_prog) {
+- xdp_features_clear_redirect_target(netdev);
+- rc = ena_destroy_and_free_all_xdp_queues(adapter);
+- if (rc)
+- return rc;
+- }
+-
+- prev_mtu = netdev->max_mtu;
+- netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
+-
+- if (!old_bpf_prog)
+- netif_info(adapter, drv, adapter->netdev,
+- "XDP program is set, changing the max_mtu from %d to %d",
+- prev_mtu, netdev->max_mtu);
+-
+- } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
+- netif_err(adapter, drv, adapter->netdev,
+- "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
+- netdev->mtu, ENA_XDP_MAX_MTU);
+- NL_SET_ERR_MSG_MOD(bpf->extack,
+- "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
+- return -EINVAL;
+- } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
+- netif_err(adapter, drv, adapter->netdev,
+- "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
+- adapter->num_io_queues, adapter->max_num_io_queues);
+- NL_SET_ERR_MSG_MOD(bpf->extack,
+- "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
+- return -EINVAL;
+- }
+-
+- return 0;
+-}
+-
+-/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
+- * program as well as to query the current xdp program id.
+- */
+-static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+-{
+- switch (bpf->command) {
+- case XDP_SETUP_PROG:
+- return ena_xdp_set(netdev, bpf);
+- default:
+- return -EINVAL;
+- }
+- return 0;
+-}
+-
+ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
+ {
+ #ifdef CONFIG_RFS_ACCEL
+@@ -687,8 +177,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
+ u64_stats_init(&ring->syncp);
+ }
+
+-static void ena_init_io_rings(struct ena_adapter *adapter,
+- int first_index, int count)
++void ena_init_io_rings(struct ena_adapter *adapter,
++ int first_index, int count)
+ {
+ struct ena_com_dev *ena_dev;
+ struct ena_ring *txr, *rxr;
+@@ -819,9 +309,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
+ tx_ring->push_buf_intermediate_buf = NULL;
+ }
+
+-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+- int first_index,
+- int count)
++int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
++ int first_index, int count)
+ {
+ int i, rc = 0;
+
+@@ -844,8 +333,8 @@ static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ return rc;
+ }
+
+-static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+- int first_index, int count)
++void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
++ int first_index, int count)
+ {
+ int i;
+
+@@ -858,7 +347,7 @@ static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ *
+ * Free all transmit software resources
+ */
+-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
++void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
+ {
+ ena_free_all_io_tx_resources_in_range(adapter,
+ 0,
+@@ -993,8 +482,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
+ */
+ page = dev_alloc_page();
+ if (!page) {
+- ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
+- &rx_ring->syncp);
++ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
+ return ERR_PTR(-ENOSPC);
+ }
+
+@@ -1053,8 +541,8 @@ static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info,
+ unsigned long attrs)
+ {
+- dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
+- DMA_BIDIRECTIONAL, attrs);
++ dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
++ attrs);
+ }
+
+ static void ena_free_rx_page(struct ena_ring *rx_ring,
+@@ -1168,8 +656,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
+ ena_free_rx_bufs(adapter, i);
+ }
+
+-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+- struct ena_tx_buffer *tx_info)
++void ena_unmap_tx_buff(struct ena_ring *tx_ring,
++ struct ena_tx_buffer *tx_info)
+ {
+ struct ena_com_buf *ena_buf;
+ u32 cnt;
+@@ -1204,8 +692,11 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+ {
+ bool print_once = true;
++ bool is_xdp_ring;
+ u32 i;
+
++ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
++
+ for (i = 0; i < tx_ring->ring_size; i++) {
+ struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
+
+@@ -1225,10 +716,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
+
+ ena_unmap_tx_buff(tx_ring, tx_info);
+
+- dev_kfree_skb_any(tx_info->skb);
++ if (is_xdp_ring)
++ xdp_return_frame(tx_info->xdpf);
++ else
++ dev_kfree_skb_any(tx_info->skb);
+ }
+- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+- tx_ring->qid));
++
++ if (!is_xdp_ring)
++ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
++ tx_ring->qid));
+ }
+
+ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
+@@ -1271,8 +767,8 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
+ ena_destroy_all_rx_queues(adapter);
+ }
+
+-static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+- struct ena_tx_buffer *tx_info, bool is_xdp)
++int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
++ struct ena_tx_buffer *tx_info, bool is_xdp)
+ {
+ if (tx_info)
+ netif_err(ring->adapter,
+@@ -1304,17 +800,6 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
+ return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
+ }
+
+-static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
+-{
+- struct ena_tx_buffer *tx_info;
+-
+- tx_info = &xdp_ring->tx_buffer_info[req_id];
+- if (likely(tx_info->xdpf))
+- return 0;
+-
+- return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
+-}
+-
+ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+ {
+ struct netdev_queue *txq;
+@@ -1337,8 +822,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+ &req_id);
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+- handle_invalid_req_id(tx_ring, req_id, NULL,
+- false);
++ handle_invalid_req_id(tx_ring, req_id, NULL, false);
+ break;
+ }
+
+@@ -1492,11 +976,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ if (unlikely(!skb))
+ return NULL;
+
+- /* sync this buffer for CPU use */
+- dma_sync_single_for_cpu(rx_ring->dev,
+- dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+- len,
+- DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
+ dma_sync_single_for_device(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+@@ -1515,17 +994,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+
+ buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
+
+- pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
+-
+ /* If XDP isn't loaded try to reuse part of the RX buffer */
+ reuse_rx_buf_page = !is_xdp_loaded &&
+ ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
+
+- dma_sync_single_for_cpu(rx_ring->dev,
+- pre_reuse_paddr + pkt_offset,
+- len,
+- DMA_FROM_DEVICE);
+-
+ if (!reuse_rx_buf_page)
+ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
+
+@@ -1576,8 +1048,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ DMA_FROM_DEVICE);
+
+ if (!reuse_rx_buf_page)
+- ena_unmap_rx_buff_attrs(rx_ring, rx_info,
+- DMA_ATTR_SKIP_CPU_SYNC);
++ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+ page_offset + buf_offset, len, buf_len);
+@@ -1671,20 +1142,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
+ }
+ }
+
+-static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
++static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
+ {
+ struct ena_rx_buffer *rx_info;
+ int ret;
+
++ /* XDP multi-buffer packets not supported */
++ if (unlikely(num_descs > 1)) {
++ netdev_err_once(rx_ring->adapter->netdev,
++ "xdp: dropped unsupported multi-buffer packets\n");
++ ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
++ return ENA_XDP_DROP;
++ }
++
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ xdp_prepare_buff(xdp, page_address(rx_info->page),
+ rx_info->buf_offset,
+ rx_ring->ena_bufs[0].len, false);
+- /* If for some reason we received a bigger packet than
+- * we expect, then we simply drop it
+- */
+- if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
+- return ENA_XDP_DROP;
+
+ ret = ena_xdp_execute(rx_ring, xdp);
+
+@@ -1696,6 +1170,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+
+ return ret;
+ }
++
+ /* ena_clean_rx_irq - Cleanup RX irq
+ * @rx_ring: RX ring to clean
+ * @napi: napi handler
+@@ -1719,6 +1194,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ int xdp_flags = 0;
+ int total_len = 0;
+ int xdp_verdict;
++ u8 pkt_offset;
+ int rc = 0;
+ int i;
+
+@@ -1745,15 +1221,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+
+ /* First descriptor might have an offset set by the device */
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+- rx_info->buf_offset += ena_rx_ctx.pkt_offset;
++ pkt_offset = ena_rx_ctx.pkt_offset;
++ rx_info->buf_offset += pkt_offset;
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
+ rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
+ ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+
++ dma_sync_single_for_cpu(rx_ring->dev,
++ dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
++ rx_ring->ena_bufs[0].len,
++ DMA_FROM_DEVICE);
++
+ if (ena_xdp_present_ring(rx_ring))
+- xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
++ xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
+
+ /* allocate skb and fill it */
+ if (xdp_verdict == ENA_XDP_PASS)
+@@ -1777,7 +1259,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ if (xdp_verdict & ENA_XDP_FORWARDED) {
+ ena_unmap_rx_buff_attrs(rx_ring,
+ &rx_ring->rx_buffer_info[req_id],
+- 0);
++ DMA_ATTR_SKIP_CPU_SYNC);
+ rx_ring->rx_buffer_info[req_id].page = NULL;
+ }
+ }
+@@ -1839,8 +1321,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ adapter = netdev_priv(rx_ring->netdev);
+
+ if (rc == -ENOSPC) {
+- ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
+- &rx_ring->syncp);
++ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
+ ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ } else {
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
+@@ -1881,8 +1362,8 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
+ rx_ring->per_napi_packets = 0;
+ }
+
+-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
+- struct ena_ring *rx_ring)
++void ena_unmask_interrupt(struct ena_ring *tx_ring,
++ struct ena_ring *rx_ring)
+ {
+ u32 rx_interval = tx_ring->smoothed_interval;
+ struct ena_eth_io_intr_reg intr_reg;
+@@ -1914,8 +1395,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
+ }
+
+-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+- struct ena_ring *rx_ring)
++void ena_update_ring_numa_node(struct ena_ring *tx_ring,
++ struct ena_ring *rx_ring)
+ {
+ int cpu = get_cpu();
+ int numa_node;
+@@ -1950,67 +1431,6 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ put_cpu();
+ }
+
+-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
+-{
+- u32 total_done = 0;
+- u16 next_to_clean;
+- int tx_pkts = 0;
+- u16 req_id;
+- int rc;
+-
+- if (unlikely(!xdp_ring))
+- return 0;
+- next_to_clean = xdp_ring->next_to_clean;
+-
+- while (tx_pkts < budget) {
+- struct ena_tx_buffer *tx_info;
+- struct xdp_frame *xdpf;
+-
+- rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
+- &req_id);
+- if (rc) {
+- if (unlikely(rc == -EINVAL))
+- handle_invalid_req_id(xdp_ring, req_id, NULL,
+- true);
+- break;
+- }
+-
+- /* validate that the request id points to a valid xdp_frame */
+- rc = validate_xdp_req_id(xdp_ring, req_id);
+- if (rc)
+- break;
+-
+- tx_info = &xdp_ring->tx_buffer_info[req_id];
+- xdpf = tx_info->xdpf;
+-
+- tx_info->xdpf = NULL;
+- tx_info->last_jiffies = 0;
+- ena_unmap_tx_buff(xdp_ring, tx_info);
+-
+- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+- "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
+- xdpf);
+-
+- tx_pkts++;
+- total_done += tx_info->tx_descs;
+-
+- xdp_return_frame(xdpf);
+- xdp_ring->free_ids[next_to_clean] = req_id;
+- next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+- xdp_ring->ring_size);
+- }
+-
+- xdp_ring->next_to_clean = next_to_clean;
+- ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
+- ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
+-
+- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
+- "tx_poll: q %d done. total pkts: %d\n",
+- xdp_ring->qid, tx_pkts);
+-
+- return tx_pkts;
+-}
+-
+ static int ena_io_poll(struct napi_struct *napi, int budget)
+ {
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+@@ -2327,8 +1747,8 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
+ for (i = first_index; i < first_index + count; i++) {
+ netif_napi_del(&adapter->ena_napi[i].napi);
+
+- WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
+- adapter->ena_napi[i].xdp_ring);
++ WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
++ adapter->ena_napi[i].rx_ring);
+ }
+ }
+
+@@ -2343,12 +1763,10 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
+ netif_napi_add(adapter->netdev, &napi->napi,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
+
+- if (!ENA_IS_XDP_INDEX(adapter, i)) {
++ if (!ENA_IS_XDP_INDEX(adapter, i))
+ napi->rx_ring = &adapter->rx_ring[i];
+- napi->tx_ring = &adapter->tx_ring[i];
+- } else {
+- napi->xdp_ring = &adapter->tx_ring[i];
+- }
++
++ napi->tx_ring = &adapter->tx_ring[i];
+ napi->qid = i;
+ }
+ }
+@@ -2383,8 +1801,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
+ if (!ena_dev->rss.tbl_log_size) {
+ rc = ena_rss_init_default(adapter);
+ if (rc && (rc != -EOPNOTSUPP)) {
+- netif_err(adapter, ifup, adapter->netdev,
+- "Failed to init RSS rc: %d\n", rc);
++ netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
+ return rc;
+ }
+ }
+@@ -2476,8 +1893,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
+ return rc;
+ }
+
+-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+- int first_index, int count)
++int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
++ int first_index, int count)
+ {
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int rc, i;
+@@ -2687,7 +2104,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
+ }
+ }
+
+-static int ena_up(struct ena_adapter *adapter)
++int ena_up(struct ena_adapter *adapter)
+ {
+ int io_queue_count, rc, i;
+
+@@ -2749,7 +2166,7 @@ static int ena_up(struct ena_adapter *adapter)
+ return rc;
+ }
+
+-static void ena_down(struct ena_adapter *adapter)
++void ena_down(struct ena_adapter *adapter)
+ {
+ int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
+
+@@ -3180,7 +2597,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ /* set flags and meta data */
+ ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
+
+- rc = ena_xmit_common(dev,
++ rc = ena_xmit_common(adapter,
+ tx_ring,
+ tx_info,
+ &ena_tx_ctx,
+@@ -3239,22 +2656,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ return NETDEV_TX_OK;
+ }
+
+-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
+- struct net_device *sb_dev)
+-{
+- u16 qid;
+- /* we suspect that this is good for in--kernel network services that
+- * want to loop incoming skb rx to tx in normal user generated traffic,
+- * most probably we will not get to this
+- */
+- if (skb_rx_queue_recorded(skb))
+- qid = skb_get_rx_queue(skb);
+- else
+- qid = netdev_pick_tx(dev, skb, NULL);
+-
+- return qid;
+-}
+-
+ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
+ {
+ struct device *dev = &pdev->dev;
+@@ -3333,8 +2734,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
+ rc = ena_com_set_host_attributes(adapter->ena_dev);
+ if (rc) {
+ if (rc == -EOPNOTSUPP)
+- netif_warn(adapter, drv, adapter->netdev,
+- "Cannot set host attributes\n");
++ netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
+ else
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot set host attributes\n");
+@@ -3425,7 +2825,6 @@ static const struct net_device_ops ena_netdev_ops = {
+ .ndo_open = ena_open,
+ .ndo_stop = ena_close,
+ .ndo_start_xmit = ena_start_xmit,
+- .ndo_select_queue = ena_select_queue,
+ .ndo_get_stats64 = ena_get_stats64,
+ .ndo_tx_timeout = ena_tx_timeout,
+ .ndo_change_mtu = ena_change_mtu,
+@@ -4000,10 +3399,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
+ {
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+- int i, budget, rc;
++ int qid, budget, rc;
+ int io_queue_count;
+
+ io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
++
+ /* Make sure the driver doesn't turn the device in other process */
+ smp_rmb();
+
+@@ -4016,27 +3416,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
+ if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+- budget = ENA_MONITORED_TX_QUEUES;
++ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
+
+- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
+- tx_ring = &adapter->tx_ring[i];
+- rx_ring = &adapter->rx_ring[i];
++ qid = adapter->last_monitored_tx_qid;
++
++ while (budget) {
++ qid = (qid + 1) % io_queue_count;
++
++ tx_ring = &adapter->tx_ring[qid];
++ rx_ring = &adapter->rx_ring[qid];
+
+ rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+ if (unlikely(rc))
+ return;
+
+- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
++ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
+ check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
+ if (unlikely(rc))
+ return;
+
+ budget--;
+- if (!budget)
+- break;
+ }
+
+- adapter->last_monitored_tx_qid = i % io_queue_count;
++ adapter->last_monitored_tx_qid = qid;
+ }
+
+ /* trigger napi schedule after 2 consecutive detections */
+@@ -4324,8 +3726,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
+ }
+ }
+
+- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
+- ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
++ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
++ 0xFFFFFFFF);
+ if (unlikely(rc && (rc != -EOPNOTSUPP))) {
+ dev_err(dev, "Cannot fill hash function\n");
+ goto err_fill_indir;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index 33c923e1261a3e..b364febab011ef 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -110,19 +110,6 @@
+
+ #define ENA_MMIO_DISABLE_REG_READ BIT(0)
+
+-/* The max MTU size is configured to be the ethernet frame size without
+- * the overhead of the ethernet header, which can have a VLAN header, and
+- * a frame check sequence (FCS).
+- * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
+- */
+-
+-#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+- VLAN_HLEN - XDP_PACKET_HEADROOM - \
+- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+-
+-#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
+- ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+-
+ struct ena_irq {
+ irq_handler_t handler;
+ void *data;
+@@ -138,7 +125,6 @@ struct ena_napi {
+ struct napi_struct napi;
+ struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
+- struct ena_ring *xdp_ring;
+ u32 qid;
+ struct dim dim;
+ };
+@@ -421,47 +407,44 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+
+-enum ena_xdp_errors_t {
+- ENA_XDP_ALLOWED = 0,
+- ENA_XDP_CURRENT_MTU_TOO_LARGE,
+- ENA_XDP_NO_ENOUGH_QUEUES,
+-};
+-
+-enum ENA_XDP_ACTIONS {
+- ENA_XDP_PASS = 0,
+- ENA_XDP_TX = BIT(0),
+- ENA_XDP_REDIRECT = BIT(1),
+- ENA_XDP_DROP = BIT(2)
+-};
+-
+-#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
++int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
++ struct ena_tx_buffer *tx_info, bool is_xdp);
+
+-static inline bool ena_xdp_present(struct ena_adapter *adapter)
++/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
++static inline void ena_increase_stat(u64 *statp, u64 cnt,
++ struct u64_stats_sync *syncp)
+ {
+- return !!adapter->xdp_bpf_prog;
++ u64_stats_update_begin(syncp);
++ (*statp) += cnt;
++ u64_stats_update_end(syncp);
+ }
+
+-static inline bool ena_xdp_present_ring(struct ena_ring *ring)
++static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
+ {
+- return !!ring->xdp_bpf_prog;
+-}
+-
+-static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+- u32 queues)
+-{
+- return 2 * queues <= adapter->max_num_io_queues;
+-}
+-
+-static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+-{
+- enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
+-
+- if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
+- rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
+- else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+- rc = ENA_XDP_NO_ENOUGH_QUEUES;
+-
+- return rc;
++ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
++ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
+ }
+
++int ena_xmit_common(struct ena_adapter *adapter,
++ struct ena_ring *ring,
++ struct ena_tx_buffer *tx_info,
++ struct ena_com_tx_ctx *ena_tx_ctx,
++ u16 next_to_use,
++ u32 bytes);
++void ena_unmap_tx_buff(struct ena_ring *tx_ring,
++ struct ena_tx_buffer *tx_info);
++void ena_init_io_rings(struct ena_adapter *adapter,
++ int first_index, int count);
++int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
++ int first_index, int count);
++int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
++ int first_index, int count);
++void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
++ int first_index, int count);
++void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
++void ena_down(struct ena_adapter *adapter);
++int ena_up(struct ena_adapter *adapter);
++void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring);
++void ena_update_ring_numa_node(struct ena_ring *tx_ring,
++ struct ena_ring *rx_ring);
+ #endif /* !(ENA_H) */
+diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
+new file mode 100644
+index 00000000000000..25de2f511649fb
+--- /dev/null
++++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
+@@ -0,0 +1,466 @@
++// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
++/*
++ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
++ */
++
++#include "ena_xdp.h"
++
++static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id)
++{
++ struct ena_tx_buffer *tx_info;
++
++ tx_info = &tx_ring->tx_buffer_info[req_id];
++ if (likely(tx_info->xdpf))
++ return 0;
++
++ return handle_invalid_req_id(tx_ring, req_id, tx_info, true);
++}
++
++static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring,
++ struct ena_tx_buffer *tx_info,
++ struct xdp_frame *xdpf,
++ struct ena_com_tx_ctx *ena_tx_ctx)
++{
++ struct ena_adapter *adapter = tx_ring->adapter;
++ struct ena_com_buf *ena_buf;
++ int push_len = 0;
++ dma_addr_t dma;
++ void *data;
++ u32 size;
++
++ tx_info->xdpf = xdpf;
++ data = tx_info->xdpf->data;
++ size = tx_info->xdpf->len;
++
++ if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
++ /* Designate part of the packet for LLQ */
++ push_len = min_t(u32, size, tx_ring->tx_max_header_size);
++
++ ena_tx_ctx->push_header = data;
++
++ size -= push_len;
++ data += push_len;
++ }
++
++ ena_tx_ctx->header_len = push_len;
++
++ if (size > 0) {
++ dma = dma_map_single(tx_ring->dev,
++ data,
++ size,
++ DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
++ goto error_report_dma_error;
++
++ tx_info->map_linear_data = 0;
++
++ ena_buf = tx_info->bufs;
++ ena_buf->paddr = dma;
++ ena_buf->len = size;
++
++ ena_tx_ctx->ena_bufs = ena_buf;
++ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
++ }
++
++ return 0;
++
++error_report_dma_error:
++ ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
++ &tx_ring->syncp);
++ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
++
++ return -EINVAL;
++}
++
++int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
++ struct ena_adapter *adapter,
++ struct xdp_frame *xdpf,
++ int flags)
++{
++ struct ena_com_tx_ctx ena_tx_ctx = {};
++ struct ena_tx_buffer *tx_info;
++ u16 next_to_use, req_id;
++ int rc;
++
++ next_to_use = tx_ring->next_to_use;
++ req_id = tx_ring->free_ids[next_to_use];
++ tx_info = &tx_ring->tx_buffer_info[req_id];
++ tx_info->num_of_bufs = 0;
++
++ rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
++ if (unlikely(rc))
++ goto err;
++
++ ena_tx_ctx.req_id = req_id;
++
++ rc = ena_xmit_common(adapter,
++ tx_ring,
++ tx_info,
++ &ena_tx_ctx,
++ next_to_use,
++ xdpf->len);
++ if (rc)
++ goto error_unmap_dma;
++
++ /* trigger the dma engine. ena_ring_tx_doorbell()
++ * calls a memory barrier inside it.
++ */
++ if (flags & XDP_XMIT_FLUSH)
++ ena_ring_tx_doorbell(tx_ring);
++
++ return rc;
++
++error_unmap_dma:
++ ena_unmap_tx_buff(tx_ring, tx_info);
++err:
++ tx_info->xdpf = NULL;
++
++ return rc;
++}
++
++int ena_xdp_xmit(struct net_device *dev, int n,
++ struct xdp_frame **frames, u32 flags)
++{
++ struct ena_adapter *adapter = netdev_priv(dev);
++ struct ena_ring *tx_ring;
++ int qid, i, nxmit = 0;
++
++ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
++ return -EINVAL;
++
++ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
++ return -ENETDOWN;
++
++ /* We assume that all rings have the same XDP program */
++ if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
++ return -ENXIO;
++
++ qid = smp_processor_id() % adapter->xdp_num_queues;
++ qid += adapter->xdp_first_ring;
++ tx_ring = &adapter->tx_ring[qid];
++
++ /* Other CPU ids might try to send thorugh this queue */
++ spin_lock(&tx_ring->xdp_tx_lock);
++
++ for (i = 0; i < n; i++) {
++ if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0))
++ break;
++ nxmit++;
++ }
++
++ /* Ring doorbell to make device aware of the packets */
++ if (flags & XDP_XMIT_FLUSH)
++ ena_ring_tx_doorbell(tx_ring);
++
++ spin_unlock(&tx_ring->xdp_tx_lock);
++
++ /* Return number of packets sent */
++ return nxmit;
++}
++
++static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
++{
++ adapter->xdp_first_ring = adapter->num_io_queues;
++ adapter->xdp_num_queues = adapter->num_io_queues;
++
++ ena_init_io_rings(adapter,
++ adapter->xdp_first_ring,
++ adapter->xdp_num_queues);
++}
++
++int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
++{
++ u32 xdp_first_ring = adapter->xdp_first_ring;
++ u32 xdp_num_queues = adapter->xdp_num_queues;
++ int rc = 0;
++
++ rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
++ if (rc)
++ goto setup_err;
++
++ rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
++ if (rc)
++ goto create_err;
++
++ return 0;
++
++create_err:
++ ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
++setup_err:
++ return rc;
++}
++
++/* Provides a way for both kernel and bpf-prog to know
++ * more about the RX-queue a given XDP frame arrived on.
++ */
++static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
++{
++ int rc;
++
++ rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
++
++ if (rc) {
++ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
++ "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
++ rx_ring->qid, rc);
++ goto err;
++ }
++
++ rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL);
++
++ if (rc) {
++ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
++ "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
++ rx_ring->qid, rc);
++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
++ }
++
++err:
++ return rc;
++}
++
++static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
++{
++ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
++}
++
++void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
++ struct bpf_prog *prog,
++ int first, int count)
++{
++ struct bpf_prog *old_bpf_prog;
++ struct ena_ring *rx_ring;
++ int i = 0;
++
++ for (i = first; i < count; i++) {
++ rx_ring = &adapter->rx_ring[i];
++ old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
++
++ if (!old_bpf_prog && prog) {
++ ena_xdp_register_rxq_info(rx_ring);
++ rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
++ } else if (old_bpf_prog && !prog) {
++ ena_xdp_unregister_rxq_info(rx_ring);
++ rx_ring->rx_headroom = NET_SKB_PAD;
++ }
++ }
++}
++
++static void ena_xdp_exchange_program(struct ena_adapter *adapter,
++ struct bpf_prog *prog)
++{
++ struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
++
++ ena_xdp_exchange_program_rx_in_range(adapter,
++ prog,
++ 0,
++ adapter->num_io_queues);
++
++ if (old_bpf_prog)
++ bpf_prog_put(old_bpf_prog);
++}
++
++static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
++{
++ bool was_up;
++ int rc;
++
++ was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
++
++ if (was_up)
++ ena_down(adapter);
++
++ adapter->xdp_first_ring = 0;
++ adapter->xdp_num_queues = 0;
++ ena_xdp_exchange_program(adapter, NULL);
++ if (was_up) {
++ rc = ena_up(adapter);
++ if (rc)
++ return rc;
++ }
++ return 0;
++}
++
++static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
++{
++ struct ena_adapter *adapter = netdev_priv(netdev);
++ struct bpf_prog *prog = bpf->prog;
++ struct bpf_prog *old_bpf_prog;
++ int rc, prev_mtu;
++ bool is_up;
++
++ is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
++ rc = ena_xdp_allowed(adapter);
++ if (rc == ENA_XDP_ALLOWED) {
++ old_bpf_prog = adapter->xdp_bpf_prog;
++ if (prog) {
++ if (!is_up) {
++ ena_init_all_xdp_queues(adapter);
++ } else if (!old_bpf_prog) {
++ ena_down(adapter);
++ ena_init_all_xdp_queues(adapter);
++ }
++ ena_xdp_exchange_program(adapter, prog);
++
++ if (is_up && !old_bpf_prog) {
++ rc = ena_up(adapter);
++ if (rc)
++ return rc;
++ }
++ xdp_features_set_redirect_target(netdev, false);
++ } else if (old_bpf_prog) {
++ xdp_features_clear_redirect_target(netdev);
++ rc = ena_destroy_and_free_all_xdp_queues(adapter);
++ if (rc)
++ return rc;
++ }
++
++ prev_mtu = netdev->max_mtu;
++ netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
++
++ if (!old_bpf_prog)
++ netif_info(adapter, drv, adapter->netdev,
++ "XDP program is set, changing the max_mtu from %d to %d",
++ prev_mtu, netdev->max_mtu);
++
++ } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
++ netif_err(adapter, drv, adapter->netdev,
++ "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
++ netdev->mtu, ENA_XDP_MAX_MTU);
++ NL_SET_ERR_MSG_MOD(bpf->extack,
++ "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
++ return -EINVAL;
++ } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
++ netif_err(adapter, drv, adapter->netdev,
++ "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
++ adapter->num_io_queues, adapter->max_num_io_queues);
++ NL_SET_ERR_MSG_MOD(bpf->extack,
++ "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
++ * program as well as to query the current xdp program id.
++ */
++int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
++{
++ switch (bpf->command) {
++ case XDP_SETUP_PROG:
++ return ena_xdp_set(netdev, bpf);
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
++{
++ u32 total_done = 0;
++ u16 next_to_clean;
++ int tx_pkts = 0;
++ u16 req_id;
++ int rc;
++
++ if (unlikely(!tx_ring))
++ return 0;
++ next_to_clean = tx_ring->next_to_clean;
++
++ while (tx_pkts < budget) {
++ struct ena_tx_buffer *tx_info;
++ struct xdp_frame *xdpf;
++
++ rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
++ &req_id);
++ if (rc) {
++ if (unlikely(rc == -EINVAL))
++ handle_invalid_req_id(tx_ring, req_id, NULL, true);
++ break;
++ }
++
++ /* validate that the request id points to a valid xdp_frame */
++ rc = validate_xdp_req_id(tx_ring, req_id);
++ if (rc)
++ break;
++
++ tx_info = &tx_ring->tx_buffer_info[req_id];
++ xdpf = tx_info->xdpf;
++
++ tx_info->xdpf = NULL;
++ tx_info->last_jiffies = 0;
++ ena_unmap_tx_buff(tx_ring, tx_info);
++
++ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
++ "tx_poll: q %d skb %p completed\n", tx_ring->qid,
++ xdpf);
++
++ tx_pkts++;
++ total_done += tx_info->tx_descs;
++
++ xdp_return_frame(xdpf);
++ tx_ring->free_ids[next_to_clean] = req_id;
++ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
++ tx_ring->ring_size);
++ }
++
++ tx_ring->next_to_clean = next_to_clean;
++ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
++ ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
++
++ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
++ "tx_poll: q %d done. total pkts: %d\n",
++ tx_ring->qid, tx_pkts);
++
++ return tx_pkts;
++}
++
++/* This is the XDP napi callback. XDP queues use a separate napi callback
++ * than Rx/Tx queues.
++ */
++int ena_xdp_io_poll(struct napi_struct *napi, int budget)
++{
++ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
++ u32 xdp_work_done, xdp_budget;
++ struct ena_ring *tx_ring;
++ int napi_comp_call = 0;
++ int ret;
++
++ tx_ring = ena_napi->tx_ring;
++
++ xdp_budget = budget;
++
++ if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
++ test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
++ napi_complete_done(napi, 0);
++ return 0;
++ }
++
++ xdp_work_done = ena_clean_xdp_irq(tx_ring, xdp_budget);
++
++ /* If the device is about to reset or down, avoid unmask
++ * the interrupt and return 0 so NAPI won't reschedule
++ */
++ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
++ napi_complete_done(napi, 0);
++ ret = 0;
++ } else if (xdp_budget > xdp_work_done) {
++ napi_comp_call = 1;
++ if (napi_complete_done(napi, xdp_work_done))
++ ena_unmask_interrupt(tx_ring, NULL);
++ ena_update_ring_numa_node(tx_ring, NULL);
++ ret = xdp_work_done;
++ } else {
++ ret = xdp_budget;
++ }
++
++ u64_stats_update_begin(&tx_ring->syncp);
++ tx_ring->tx_stats.napi_comp += napi_comp_call;
++ tx_ring->tx_stats.tx_poll++;
++ u64_stats_update_end(&tx_ring->syncp);
++ tx_ring->tx_stats.last_napi_jiffies = jiffies;
++
++ return ret;
++}
+diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.h b/drivers/net/ethernet/amazon/ena/ena_xdp.h
+new file mode 100644
+index 00000000000000..3fa8e80b18a9e6
+--- /dev/null
++++ b/drivers/net/ethernet/amazon/ena/ena_xdp.h
+@@ -0,0 +1,152 @@
++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
++/*
++ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
++ */
++
++#ifndef ENA_XDP_H
++#define ENA_XDP_H
++
++#include "ena_netdev.h"
++#include <linux/bpf_trace.h>
++
++/* The max MTU size is configured to be the ethernet frame size without
++ * the overhead of the ethernet header, which can have a VLAN header, and
++ * a frame check sequence (FCS).
++ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
++ */
++#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
++ VLAN_HLEN - XDP_PACKET_HEADROOM - \
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
++
++#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
++ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
++
++enum ENA_XDP_ACTIONS {
++ ENA_XDP_PASS = 0,
++ ENA_XDP_TX = BIT(0),
++ ENA_XDP_REDIRECT = BIT(1),
++ ENA_XDP_DROP = BIT(2)
++};
++
++#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
++
++int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
++void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
++ struct bpf_prog *prog,
++ int first, int count);
++int ena_xdp_io_poll(struct napi_struct *napi, int budget);
++int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
++ struct ena_adapter *adapter,
++ struct xdp_frame *xdpf,
++ int flags);
++int ena_xdp_xmit(struct net_device *dev, int n,
++ struct xdp_frame **frames, u32 flags);
++int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
++
++enum ena_xdp_errors_t {
++ ENA_XDP_ALLOWED = 0,
++ ENA_XDP_CURRENT_MTU_TOO_LARGE,
++ ENA_XDP_NO_ENOUGH_QUEUES,
++};
++
++static inline bool ena_xdp_present(struct ena_adapter *adapter)
++{
++ return !!adapter->xdp_bpf_prog;
++}
++
++static inline bool ena_xdp_present_ring(struct ena_ring *ring)
++{
++ return !!ring->xdp_bpf_prog;
++}
++
++static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
++ u32 queues)
++{
++ return 2 * queues <= adapter->max_num_io_queues;
++}
++
++static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
++{
++ enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
++
++ if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
++ rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
++ else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
++ rc = ENA_XDP_NO_ENOUGH_QUEUES;
++
++ return rc;
++}
++
++static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
++{
++ u32 verdict = ENA_XDP_PASS;
++ struct bpf_prog *xdp_prog;
++ struct ena_ring *xdp_ring;
++ struct xdp_frame *xdpf;
++ u64 *xdp_stat;
++
++ xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
++
++ if (!xdp_prog)
++ return verdict;
++
++ verdict = bpf_prog_run_xdp(xdp_prog, xdp);
++
++ switch (verdict) {
++ case XDP_TX:
++ xdpf = xdp_convert_buff_to_frame(xdp);
++ if (unlikely(!xdpf)) {
++ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
++ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
++ verdict = ENA_XDP_DROP;
++ break;
++ }
++
++ /* Find xmit queue */
++ xdp_ring = rx_ring->xdp_ring;
++
++ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
++ spin_lock(&xdp_ring->xdp_tx_lock);
++
++ if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
++ XDP_XMIT_FLUSH))
++ xdp_return_frame(xdpf);
++
++ spin_unlock(&xdp_ring->xdp_tx_lock);
++ xdp_stat = &rx_ring->rx_stats.xdp_tx;
++ verdict = ENA_XDP_TX;
++ break;
++ case XDP_REDIRECT:
++ if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
++ xdp_stat = &rx_ring->rx_stats.xdp_redirect;
++ verdict = ENA_XDP_REDIRECT;
++ break;
++ }
++ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
++ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
++ verdict = ENA_XDP_DROP;
++ break;
++ case XDP_ABORTED:
++ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
++ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
++ verdict = ENA_XDP_DROP;
++ break;
++ case XDP_DROP:
++ xdp_stat = &rx_ring->rx_stats.xdp_drop;
++ verdict = ENA_XDP_DROP;
++ break;
++ case XDP_PASS:
++ xdp_stat = &rx_ring->rx_stats.xdp_pass;
++ verdict = ENA_XDP_PASS;
++ break;
++ default:
++ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
++ xdp_stat = &rx_ring->rx_stats.xdp_invalid;
++ verdict = ENA_XDP_DROP;
++ }
++
++ ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
++
++ return verdict;
++}
++#endif /* ENA_XDP_H */
+diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
+index 045fe133f6ee99..ea773cfa0af67b 100644
+--- a/drivers/net/ethernet/amd/pds_core/adminq.c
++++ b/drivers/net/ethernet/amd/pds_core/adminq.c
+@@ -63,6 +63,15 @@ static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
+ return nq_work;
+ }
+
++static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
++{
++ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
++ pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
++ return false;
++
++ return refcount_inc_not_zero(&pdsc->adminq_refcnt);
++}
++
+ void pdsc_process_adminq(struct pdsc_qcq *qcq)
+ {
+ union pds_core_adminq_comp *comp;
+@@ -75,9 +84,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
+ int aq_work = 0;
+ int credits;
+
+- /* Don't process AdminQ when shutting down */
+- if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
+- dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
++ /* Don't process AdminQ when it's not up */
++ if (!pdsc_adminq_inc_if_up(pdsc)) {
++ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
+ __func__);
+ return;
+ }
+@@ -124,6 +133,7 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
+ pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
+ credits,
+ PDS_CORE_INTR_CRED_REARM);
++ refcount_dec(&pdsc->adminq_refcnt);
+ }
+
+ void pdsc_work_thread(struct work_struct *work)
+@@ -135,18 +145,20 @@ void pdsc_work_thread(struct work_struct *work)
+
+ irqreturn_t pdsc_adminq_isr(int irq, void *data)
+ {
+- struct pdsc_qcq *qcq = data;
+- struct pdsc *pdsc = qcq->pdsc;
++ struct pdsc *pdsc = data;
++ struct pdsc_qcq *qcq;
+
+- /* Don't process AdminQ when shutting down */
+- if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
+- dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
++ /* Don't process AdminQ when it's not up */
++ if (!pdsc_adminq_inc_if_up(pdsc)) {
++ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
++ qcq = &pdsc->adminqcq;
+ queue_work(pdsc->wq, &qcq->work);
+- pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
++ pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
++ refcount_dec(&pdsc->adminq_refcnt);
+
+ return IRQ_HANDLED;
+ }
+@@ -179,10 +191,16 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
+
+ /* Check that the FW is running */
+ if (!pdsc_is_fw_running(pdsc)) {
+- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+-
+- dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
+- __func__, fw_status);
++ if (pdsc->info_regs) {
++ u8 fw_status =
++ ioread8(&pdsc->info_regs->fw_status);
++
++ dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
++ __func__, fw_status);
++ } else {
++ dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
++ __func__);
++ }
+ ret = -ENXIO;
+
+ goto err_out_unlock;
+@@ -230,6 +248,12 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ int err = 0;
+ int index;
+
++ if (!pdsc_adminq_inc_if_up(pdsc)) {
++ dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
++ __func__, cmd->opcode);
++ return -ENXIO;
++ }
++
+ wc.qcq = &pdsc->adminqcq;
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ if (index < 0) {
+@@ -248,10 +272,16 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ break;
+
+ if (!pdsc_is_fw_running(pdsc)) {
+- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+-
+- dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
+- __func__, fw_status);
++ if (pdsc->info_regs) {
++ u8 fw_status =
++ ioread8(&pdsc->info_regs->fw_status);
++
++ dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
++ __func__, fw_status);
++ } else {
++ dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
++ __func__);
++ }
+ err = -ENXIO;
+ break;
+ }
+@@ -285,6 +315,8 @@ int pdsc_adminq_post(struct pdsc *pdsc,
+ queue_work(pdsc->wq, &pdsc->health_work);
+ }
+
++ refcount_dec(&pdsc->adminq_refcnt);
++
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(pdsc_adminq_post);
+diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
+index 11c23a7f3172d3..fd1a5149c00319 100644
+--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
++++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
+@@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
+ if (err < 0) {
+ dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
+ name, ERR_PTR(err));
+- goto err_out;
++ kfree(padev);
++ return ERR_PTR(err);
+ }
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
+ name, ERR_PTR(err));
+- goto err_out_uninit;
++ auxiliary_device_uninit(aux_dev);
++ return ERR_PTR(err);
+ }
+
+ return padev;
+-
+-err_out_uninit:
+- auxiliary_device_uninit(aux_dev);
+-err_out:
+- kfree(padev);
+- return ERR_PTR(err);
+ }
+
+ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
+index 36f9b932b9e2aa..eb73c921dc1ed9 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.c
++++ b/drivers/net/ethernet/amd/pds_core/core.c
+@@ -125,7 +125,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+
+ snprintf(name, sizeof(name), "%s-%d-%s",
+ PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
+- index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq);
++ index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
+ if (index < 0)
+ return index;
+ qcq->intx = index;
+@@ -407,10 +407,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
+ int numdescs;
+ int err;
+
+- if (init)
+- err = pdsc_dev_init(pdsc);
+- else
+- err = pdsc_dev_reinit(pdsc);
++ err = pdsc_dev_init(pdsc);
+ if (err)
+ return err;
+
+@@ -452,6 +449,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
+ if (init)
+ pdsc_debugfs_add_viftype(pdsc);
+
++ refcount_set(&pdsc->adminq_refcnt, 1);
+ clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
+ return 0;
+
+@@ -466,6 +464,8 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
+
+ if (!pdsc->pdev->is_virtfn)
+ pdsc_devcmd_reset(pdsc);
++ if (pdsc->adminqcq.work.func)
++ cancel_work_sync(&pdsc->adminqcq.work);
+ pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+@@ -476,10 +476,9 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
+ for (i = 0; i < pdsc->nintrs; i++)
+ pdsc_intr_free(pdsc, i);
+
+- if (removing) {
+- kfree(pdsc->intr_info);
+- pdsc->intr_info = NULL;
+- }
++ kfree(pdsc->intr_info);
++ pdsc->intr_info = NULL;
++ pdsc->nintrs = 0;
+ }
+
+ if (pdsc->kern_dbpage) {
+@@ -487,6 +486,7 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
+ pdsc->kern_dbpage = NULL;
+ }
+
++ pci_free_irq_vectors(pdsc->pdev);
+ set_bit(PDSC_S_FW_DEAD, &pdsc->state);
+ }
+
+@@ -512,7 +512,25 @@ void pdsc_stop(struct pdsc *pdsc)
+ PDS_CORE_INTR_MASK_SET);
+ }
+
+-static void pdsc_fw_down(struct pdsc *pdsc)
++static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
++{
++ /* The driver initializes the adminq_refcnt to 1 when the adminq is
++ * allocated and ready for use. Other users/requesters will increment
++ * the refcnt while in use. If the refcnt is down to 1 then the adminq
++ * is not in use and the refcnt can be cleared and adminq freed. Before
++ * calling this function the driver will set PDSC_S_FW_DEAD, which
++ * prevent subsequent attempts to use the adminq and increment the
++ * refcnt to fail. This guarantees that this function will eventually
++ * exit.
++ */
++ while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
++ dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
++ __func__);
++ cpu_relax();
++ }
++}
++
++void pdsc_fw_down(struct pdsc *pdsc)
+ {
+ union pds_core_notifyq_comp reset_event = {
+ .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
+@@ -520,10 +538,15 @@ static void pdsc_fw_down(struct pdsc *pdsc)
+ };
+
+ if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+- dev_err(pdsc->dev, "%s: already happening\n", __func__);
++ dev_warn(pdsc->dev, "%s: already happening\n", __func__);
+ return;
+ }
+
++ if (pdsc->pdev->is_virtfn)
++ return;
++
++ pdsc_adminq_wait_and_dec_once_unused(pdsc);
++
+ /* Notify clients of fw_down */
+ if (pdsc->fw_reporter)
+ devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
+@@ -533,7 +556,7 @@ static void pdsc_fw_down(struct pdsc *pdsc)
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
+ }
+
+-static void pdsc_fw_up(struct pdsc *pdsc)
++void pdsc_fw_up(struct pdsc *pdsc)
+ {
+ union pds_core_notifyq_comp reset_event = {
+ .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
+@@ -546,6 +569,11 @@ static void pdsc_fw_up(struct pdsc *pdsc)
+ return;
+ }
+
++ if (pdsc->pdev->is_virtfn) {
++ clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
++ return;
++ }
++
+ err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
+ if (err)
+ goto err_out;
+diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
+index e545fafc481969..f410f7d132056b 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -15,7 +15,7 @@
+ #define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
+
+ #define PDSC_WATCHDOG_SECS 5
+-#define PDSC_QUEUE_NAME_MAX_SZ 32
++#define PDSC_QUEUE_NAME_MAX_SZ 16
+ #define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+ #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
+ #define PDSC_TEARDOWN_RECOVERY false
+@@ -184,6 +184,7 @@ struct pdsc {
+ struct mutex devcmd_lock; /* lock for dev_cmd operations */
+ struct mutex config_lock; /* lock for configuration operations */
+ spinlock_t adminq_lock; /* lock for adminq operations */
++ refcount_t adminq_refcnt;
+ struct pds_core_dev_info_regs __iomem *info_regs;
+ struct pds_core_dev_cmd_regs __iomem *cmd_regs;
+ struct pds_core_intr __iomem *intr_ctrl;
+@@ -280,7 +281,6 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds);
+ int pdsc_devcmd_init(struct pdsc *pdsc);
+ int pdsc_devcmd_reset(struct pdsc *pdsc);
+-int pdsc_dev_reinit(struct pdsc *pdsc);
+ int pdsc_dev_init(struct pdsc *pdsc);
+
+ int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
+@@ -309,4 +309,8 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data);
+
+ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
++
++void pdsc_fw_down(struct pdsc *pdsc);
++void pdsc_fw_up(struct pdsc *pdsc);
++
+ #endif /* _PDSC_H_ */
+diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
+index 8ec392299b7dcf..4e8579ca1c8c71 100644
+--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
++++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
+@@ -64,6 +64,10 @@ DEFINE_SHOW_ATTRIBUTE(identity);
+
+ void pdsc_debugfs_add_ident(struct pdsc *pdsc)
+ {
++ /* This file will already exist in the reset flow */
++ if (debugfs_lookup("identity", pdsc->dentry))
++ return;
++
+ debugfs_create_file("identity", 0400, pdsc->dentry,
+ pdsc, &identity_fops);
+ }
+diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
+index f77cd9f5a2fda5..f0e39ab4004503 100644
+--- a/drivers/net/ethernet/amd/pds_core/dev.c
++++ b/drivers/net/ethernet/amd/pds_core/dev.c
+@@ -55,6 +55,9 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
+
+ bool pdsc_is_fw_running(struct pdsc *pdsc)
+ {
++ if (!pdsc->info_regs)
++ return false;
++
+ pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status);
+ pdsc->last_fw_time = jiffies;
+ pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat);
+@@ -175,13 +178,17 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ {
+ int err;
+
++ if (!pdsc->cmd_regs)
++ return -ENXIO;
++
+ memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
+ pdsc_devcmd_dbell(pdsc);
+ err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
+- memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
+
+ if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
+ queue_work(pdsc->wq, &pdsc->health_work);
++ else
++ memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
+
+ return err;
+ }
+@@ -254,10 +261,14 @@ static int pdsc_identify(struct pdsc *pdsc)
+ struct pds_core_drv_identity drv = {};
+ size_t sz;
+ int err;
++ int n;
+
+ drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
+- snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
+- "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
++ /* Catching the return quiets a Wformat-truncation complaint */
++ n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
++ "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
++ if (n > sizeof(drv.driver_ver_str))
++ dev_dbg(pdsc->dev, "release name truncated, don't care\n");
+
+ /* Next let's get some info about the device
+ * We use the devcmd_lock at this level in order to
+@@ -298,13 +309,6 @@ static int pdsc_identify(struct pdsc *pdsc)
+ return 0;
+ }
+
+-int pdsc_dev_reinit(struct pdsc *pdsc)
+-{
+- pdsc_init_devinfo(pdsc);
+-
+- return pdsc_identify(pdsc);
+-}
+-
+ int pdsc_dev_init(struct pdsc *pdsc)
+ {
+ unsigned int nintrs;
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index d9607033bbf21f..d8218bb153d9ed 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -104,14 +104,15 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct pds_core_fw_list_info fw_list;
+ struct pdsc *pdsc = devlink_priv(dl);
+ union pds_core_dev_comp comp;
+- char buf[16];
++ char buf[32];
+ int listlen;
+ int err;
+ int i;
+
+ mutex_lock(&pdsc->devcmd_lock);
+ err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2);
+- memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
++ if (!err)
++ memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err && err != -EIO)
+ return err;
+diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c
+index 90a811f3878ae9..fa626719e68d1b 100644
+--- a/drivers/net/ethernet/amd/pds_core/fw.c
++++ b/drivers/net/ethernet/amd/pds_core/fw.c
+@@ -107,6 +107,9 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+
+ dev_info(pdsc->dev, "Installing firmware\n");
+
++ if (!pdsc->cmd_regs)
++ return -ENXIO;
++
+ dl = priv_to_devlink(pdsc);
+ devlink_flash_update_status_notify(dl, "Preparing to flash",
+ NULL, 0, 0);
+diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
+index 3a45bf474a19a0..eddbf0acdde77f 100644
+--- a/drivers/net/ethernet/amd/pds_core/main.c
++++ b/drivers/net/ethernet/amd/pds_core/main.c
+@@ -37,6 +37,11 @@ static void pdsc_unmap_bars(struct pdsc *pdsc)
+ struct pdsc_dev_bar *bars = pdsc->bars;
+ unsigned int i;
+
++ pdsc->info_regs = NULL;
++ pdsc->cmd_regs = NULL;
++ pdsc->intr_status = NULL;
++ pdsc->intr_ctrl = NULL;
++
+ for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
+ if (bars[i].vaddr)
+ pci_iounmap(pdsc->pdev, bars[i].vaddr);
+@@ -293,7 +298,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
+ err_out_teardown:
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
+ err_out_unmap_bars:
+- del_timer_sync(&pdsc->wdtimer);
++ timer_shutdown_sync(&pdsc->wdtimer);
+ if (pdsc->wq)
+ destroy_workqueue(pdsc->wq);
+ mutex_destroy(&pdsc->config_lock);
+@@ -420,7 +425,7 @@ static void pdsc_remove(struct pci_dev *pdev)
+ */
+ pdsc_sriov_configure(pdev, 0);
+
+- del_timer_sync(&pdsc->wdtimer);
++ timer_shutdown_sync(&pdsc->wdtimer);
+ if (pdsc->wq)
+ destroy_workqueue(pdsc->wq);
+
+@@ -433,7 +438,6 @@ static void pdsc_remove(struct pci_dev *pdev)
+ mutex_destroy(&pdsc->config_lock);
+ mutex_destroy(&pdsc->devcmd_lock);
+
+- pci_free_irq_vectors(pdev);
+ pdsc_unmap_bars(pdsc);
+ pci_release_regions(pdev);
+ }
+@@ -445,12 +449,82 @@ static void pdsc_remove(struct pci_dev *pdev)
+ devlink_free(dl);
+ }
+
++static void pdsc_stop_health_thread(struct pdsc *pdsc)
++{
++ if (pdsc->pdev->is_virtfn)
++ return;
++
++ timer_shutdown_sync(&pdsc->wdtimer);
++ if (pdsc->health_work.func)
++ cancel_work_sync(&pdsc->health_work);
++}
++
++static void pdsc_restart_health_thread(struct pdsc *pdsc)
++{
++ if (pdsc->pdev->is_virtfn)
++ return;
++
++ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
++ mod_timer(&pdsc->wdtimer, jiffies + 1);
++}
++
++static void pdsc_reset_prepare(struct pci_dev *pdev)
++{
++ struct pdsc *pdsc = pci_get_drvdata(pdev);
++
++ pdsc_stop_health_thread(pdsc);
++ pdsc_fw_down(pdsc);
++
++ pdsc_unmap_bars(pdsc);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++}
++
++static void pdsc_reset_done(struct pci_dev *pdev)
++{
++ struct pdsc *pdsc = pci_get_drvdata(pdev);
++ struct device *dev = pdsc->dev;
++ int err;
++
++ err = pci_enable_device(pdev);
++ if (err) {
++ dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
++ return;
++ }
++ pci_set_master(pdev);
++
++ if (!pdev->is_virtfn) {
++ pcie_print_link_status(pdsc->pdev);
++
++ err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
++ if (err) {
++ dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
++ ERR_PTR(err));
++ return;
++ }
++
++ err = pdsc_map_bars(pdsc);
++ if (err)
++ return;
++ }
++
++ pdsc_fw_up(pdsc);
++ pdsc_restart_health_thread(pdsc);
++}
++
++static const struct pci_error_handlers pdsc_err_handler = {
++ /* FLR handling */
++ .reset_prepare = pdsc_reset_prepare,
++ .reset_done = pdsc_reset_done,
++};
++
+ static struct pci_driver pdsc_driver = {
+ .name = PDS_CORE_DRV_NAME,
+ .id_table = pdsc_id_table,
+ .probe = pdsc_probe,
+ .remove = pdsc_remove,
+ .sriov_configure = pdsc_sriov_configure,
++ .err_handler = &pdsc_err_handler,
+ };
+
+ void *pdsc_get_pf_struct(struct pci_dev *vf_pdev)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 614c0278419bcf..6b73648b377936 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
+ static void xgbe_service_timer(struct timer_list *t)
+ {
+ struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
++ struct xgbe_channel *channel;
++ unsigned int i;
+
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
+
+ mod_timer(&pdata->service_timer, jiffies + HZ);
++
++ if (!pdata->tx_usecs)
++ return;
++
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
++ if (!channel->tx_ring || channel->tx_timer_active)
++ break;
++ channel->tx_timer_active = 1;
++ mod_timer(&channel->tx_timer,
++ jiffies + usecs_to_jiffies(pdata->tx_usecs));
++ }
+ }
+
+ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index 6e83ff59172a36..32fab5e7724626 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
+
+ cmd->base.phy_address = pdata->phy.address;
+
+- cmd->base.autoneg = pdata->phy.autoneg;
+- cmd->base.speed = pdata->phy.speed;
+- cmd->base.duplex = pdata->phy.duplex;
++ if (netif_carrier_ok(netdev)) {
++ cmd->base.speed = pdata->phy.speed;
++ cmd->base.duplex = pdata->phy.duplex;
++ } else {
++ cmd->base.speed = SPEED_UNKNOWN;
++ cmd->base.duplex = DUPLEX_UNKNOWN;
++ }
+
++ cmd->base.autoneg = pdata->phy.autoneg;
+ cmd->base.port = PORT_NONE;
+
+ XGBE_LM_COPY(cmd, supported, lks, supported);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 32d2c6fac65266..4a2dc705b52801 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1193,7 +1193,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+- xgbe_set_mode(pdata, mode);
++ /* Force the mode change for SFI in Fixed PHY config.
++ * Fixed PHY configs needs PLL to be enabled while doing mode set.
++ * When the SFP module isn't connected during boot, driver assumes
++ * AN is ON and attempts autonegotiation. However, if the connected
++ * SFP comes up in Fixed PHY config, the link will not come up as
++ * PLL isn't enabled while the initial mode set command is issued.
++ * So, force the mode change for SFI in Fixed PHY configuration to
++ * fix link issues.
++ */
++ if (mode == XGBE_MODE_SFI)
++ xgbe_change_mode(pdata, mode);
++ else
++ xgbe_set_mode(pdata, mode);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+index ac4ea93bd8ddad..eaef14ea5dd2e0 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+@@ -265,7 +265,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
+ const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
+ const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
+ char tc_string[8];
+- int tc;
++ unsigned int tc;
+
+ memset(tc_string, 0, sizeof(tc_string));
+ memcpy(p, aq_ethtool_stat_names,
+@@ -274,7 +274,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
+
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ if (cfg->is_qos)
+- snprintf(tc_string, 8, "TC%d ", tc);
++ snprintf(tc_string, 8, "TC%u ", tc);
+
+ for (i = 0; i < cfg->vecs; i++) {
+ for (si = 0; si < rx_stat_cnt; si++) {
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+index 80b44043e6c53f..5acb3e16b5677b 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+@@ -553,17 +553,17 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
+
+ /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
+- * @skb: particular skb to send timestamp with
++ * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+-static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
++static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps,
+ u64 timestamp)
+ {
+ timestamp -= atomic_read(&aq_ptp->offset_ingress);
+- aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
++ aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp);
+ }
+
+ void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+@@ -639,7 +639,7 @@ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+ &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
+ }
+
+-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
++u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len)
+ {
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+@@ -648,7 +648,7 @@ u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ p, len, &timestamp);
+
+ if (ret > 0)
+- aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
++ aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp);
+
+ return ret;
+ }
+@@ -953,8 +953,6 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+ {
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ unsigned int tx_ring_idx, rx_ring_idx;
+- struct aq_ring_s *hwts;
+- struct aq_ring_s *ring;
+ int err;
+
+ if (!aq_ptp)
+@@ -962,29 +960,23 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+
+ tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
+
+- ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
+- tx_ring_idx, &aq_nic->aq_nic_cfg);
+- if (!ring) {
+- err = -ENOMEM;
++ err = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
++ tx_ring_idx, &aq_nic->aq_nic_cfg);
++ if (err)
+ goto err_exit;
+- }
+
+ rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
+
+- ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
+- rx_ring_idx, &aq_nic->aq_nic_cfg);
+- if (!ring) {
+- err = -ENOMEM;
++ err = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
++ rx_ring_idx, &aq_nic->aq_nic_cfg);
++ if (err)
+ goto err_exit_ptp_tx;
+- }
+
+- hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
+- aq_nic->aq_nic_cfg.rxds,
+- aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
+- if (!hwts) {
+- err = -ENOMEM;
++ err = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
++ aq_nic->aq_nic_cfg.rxds,
++ aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
++ if (err)
+ goto err_exit_ptp_rx;
+- }
+
+ err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
+ if (err != 0) {
+@@ -1001,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+ return 0;
+
+ err_exit_hwts_rx:
+- aq_ring_free(&aq_ptp->hwts_rx);
++ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
+ err_exit_ptp_rx:
+ aq_ring_free(&aq_ptp->ptp_rx);
+ err_exit_ptp_tx:
+@@ -1019,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
+
+ aq_ring_free(&aq_ptp->ptp_tx);
+ aq_ring_free(&aq_ptp->ptp_rx);
+- aq_ring_free(&aq_ptp->hwts_rx);
++ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
+
+ aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+index 28ccb7ca2df9e7..210b723f22072c 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+@@ -67,7 +67,7 @@ int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ /* Return either ring is belong to PTP or not*/
+ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
+
+-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
++u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len);
+
+ struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
+@@ -143,7 +143,7 @@ static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+ }
+
+ static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
+- struct sk_buff *skb, u8 *p,
++ struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len)
+ {
+ return 0;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 4de22eed099a84..f7433abd659159 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -132,8 +132,8 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
+ return 0;
+ }
+
+-static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
+- struct aq_nic_s *aq_nic)
++static int aq_ring_alloc(struct aq_ring_s *self,
++ struct aq_nic_s *aq_nic)
+ {
+ int err = 0;
+
+@@ -156,46 +156,29 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
+ err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+- self = NULL;
+ }
+
+- return self;
++ return err;
+ }
+
+-struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+- struct aq_nic_s *aq_nic,
+- unsigned int idx,
+- struct aq_nic_cfg_s *aq_nic_cfg)
++int aq_ring_tx_alloc(struct aq_ring_s *self,
++ struct aq_nic_s *aq_nic,
++ unsigned int idx,
++ struct aq_nic_cfg_s *aq_nic_cfg)
+ {
+- int err = 0;
+-
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = aq_nic_cfg->txds;
+ self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
+
+- self = aq_ring_alloc(self, aq_nic);
+- if (!self) {
+- err = -ENOMEM;
+- goto err_exit;
+- }
+-
+-err_exit:
+- if (err < 0) {
+- aq_ring_free(self);
+- self = NULL;
+- }
+-
+- return self;
++ return aq_ring_alloc(self, aq_nic);
+ }
+
+-struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+- struct aq_nic_s *aq_nic,
+- unsigned int idx,
+- struct aq_nic_cfg_s *aq_nic_cfg)
++int aq_ring_rx_alloc(struct aq_ring_s *self,
++ struct aq_nic_s *aq_nic,
++ unsigned int idx,
++ struct aq_nic_cfg_s *aq_nic_cfg)
+ {
+- int err = 0;
+-
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = aq_nic_cfg->rxds;
+@@ -217,22 +200,10 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+ self->tail_size = 0;
+ }
+
+- self = aq_ring_alloc(self, aq_nic);
+- if (!self) {
+- err = -ENOMEM;
+- goto err_exit;
+- }
+-
+-err_exit:
+- if (err < 0) {
+- aq_ring_free(self);
+- self = NULL;
+- }
+-
+- return self;
++ return aq_ring_alloc(self, aq_nic);
+ }
+
+-struct aq_ring_s *
++int
+ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, unsigned int size, unsigned int dx_size)
+ {
+@@ -250,10 +221,10 @@ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+ GFP_KERNEL);
+ if (!self->dx_ring) {
+ aq_ring_free(self);
+- return NULL;
++ return -ENOMEM;
+ }
+
+- return self;
++ return 0;
+ }
+
+ int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
+@@ -647,7 +618,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ }
+ if (is_ptp_ring)
+ buff->len -=
+- aq_ptp_extract_ts(self->aq_nic, skb,
++ aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+@@ -742,6 +713,8 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
++ u16 ptp_hwtstamp_len = 0;
++ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+@@ -810,11 +783,12 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+- if (is_ptp_ring)
+- buff->len -=
+- aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+- aq_buf_vaddr(&buff->rxdata),
+- buff->len);
++ if (is_ptp_ring) {
++ ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
++ aq_buf_vaddr(&buff->rxdata),
++ buff->len);
++ buff->len -= ptp_hwtstamp_len;
++ }
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+@@ -834,6 +808,9 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ if (IS_ERR(skb) || !skb)
+ continue;
+
++ if (ptp_hwtstamp_len > 0)
++ *skb_hwtstamps(skb) = shhwtstamps;
++
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+@@ -932,11 +909,27 @@ void aq_ring_free(struct aq_ring_s *self)
+ return;
+
+ kfree(self->buff_ring);
++ self->buff_ring = NULL;
+
+- if (self->dx_ring)
++ if (self->dx_ring) {
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size, self->dx_ring,
+ self->dx_ring_pa);
++ self->dx_ring = NULL;
++ }
++}
++
++void aq_ring_hwts_rx_free(struct aq_ring_s *self)
++{
++ if (!self)
++ return;
++
++ if (self->dx_ring) {
++ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
++ self->size * self->dx_size + AQ_CFG_RXDS_DEF,
++ self->dx_ring, self->dx_ring_pa);
++ self->dx_ring = NULL;
++ }
+ }
+
+ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+index 0a6c34438c1d0e..d627ace850ff54 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+@@ -183,14 +183,14 @@ static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
+ self->sw_head - self->sw_tail - 1);
+ }
+
+-struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+- struct aq_nic_s *aq_nic,
+- unsigned int idx,
+- struct aq_nic_cfg_s *aq_nic_cfg);
+-struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+- struct aq_nic_s *aq_nic,
+- unsigned int idx,
+- struct aq_nic_cfg_s *aq_nic_cfg);
++int aq_ring_tx_alloc(struct aq_ring_s *self,
++ struct aq_nic_s *aq_nic,
++ unsigned int idx,
++ struct aq_nic_cfg_s *aq_nic_cfg);
++int aq_ring_rx_alloc(struct aq_ring_s *self,
++ struct aq_nic_s *aq_nic,
++ unsigned int idx,
++ struct aq_nic_cfg_s *aq_nic_cfg);
+
+ int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
+ void aq_ring_rx_deinit(struct aq_ring_s *self);
+@@ -207,9 +207,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ int budget);
+ int aq_ring_rx_fill(struct aq_ring_s *self);
+
+-struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+- struct aq_nic_s *aq_nic, unsigned int idx,
+- unsigned int size, unsigned int dx_size);
++int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
++ struct aq_nic_s *aq_nic, unsigned int idx,
++ unsigned int size, unsigned int dx_size);
++void aq_ring_hwts_rx_free(struct aq_ring_s *self);
+ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
+
+ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+index f5db1c44e9b917..9769ab4f9bef01 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+@@ -136,35 +136,32 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
+ const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
+ i, idx);
+
+- ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
+- idx_ring, aq_nic_cfg);
+- if (!ring) {
+- err = -ENOMEM;
++ ring = &self->ring[i][AQ_VEC_TX_ID];
++ err = aq_ring_tx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg);
++ if (err)
+ goto err_exit;
+- }
+
+ ++self->tx_rings;
+
+ aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+
+- if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
++ ring = &self->ring[i][AQ_VEC_RX_ID];
++ if (xdp_rxq_info_reg(&ring->xdp_rxq,
+ aq_nic->ndev, idx,
+ self->napi.napi_id) < 0) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+- if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
++ if (xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+- xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+- ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
+- idx_ring, aq_nic_cfg);
+- if (!ring) {
+- xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+- err = -ENOMEM;
++ err = aq_ring_rx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg);
++ if (err) {
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ goto err_exit;
+ }
+
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+index 43d821fe7a5424..63ba64dbb73107 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
+ u16 next_to_use;
+ u16 next_to_clean;
+ struct napi_struct napi;
+- struct page *rx_page;
+- unsigned int rx_page_offset;
+ };
+
+ /* board specific private data structure */
+ struct atl1c_adapter {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+- unsigned int rx_frag_size;
+ struct atl1c_hw hw;
+ struct atl1c_hw_stats hw_stats;
+ struct mii_if_info mii; /* MII interface info */
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 940c5d1ff9cfce..74b78164cf74a8 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -483,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
+ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
+ struct net_device *dev)
+ {
+- unsigned int head_size;
+ int mtu = dev->mtu;
+
+ adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
+ roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+-
+- head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
+- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+- adapter->rx_frag_size = roundup_pow_of_two(head_size);
+ }
+
+ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+@@ -964,7 +959,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
+ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+- int i;
+
+ dma_free_coherent(&pdev->dev, adapter->ring_header.size,
+ adapter->ring_header.desc, adapter->ring_header.dma);
+@@ -977,12 +971,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ kfree(adapter->tpd_ring[0].buffer_info);
+ adapter->tpd_ring[0].buffer_info = NULL;
+ }
+- for (i = 0; i < adapter->rx_queue_count; ++i) {
+- if (adapter->rrd_ring[i].rx_page) {
+- put_page(adapter->rrd_ring[i].rx_page);
+- adapter->rrd_ring[i].rx_page = NULL;
+- }
+- }
+ }
+
+ /**
+@@ -1754,48 +1742,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
+ skb_checksum_none_assert(skb);
+ }
+
+-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
+- u32 queue, bool napi_mode)
+-{
+- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+- struct sk_buff *skb;
+- struct page *page;
+-
+- if (adapter->rx_frag_size > PAGE_SIZE) {
+- if (likely(napi_mode))
+- return napi_alloc_skb(&rrd_ring->napi,
+- adapter->rx_buffer_len);
+- else
+- return netdev_alloc_skb_ip_align(adapter->netdev,
+- adapter->rx_buffer_len);
+- }
+-
+- page = rrd_ring->rx_page;
+- if (!page) {
+- page = alloc_page(GFP_ATOMIC);
+- if (unlikely(!page))
+- return NULL;
+- rrd_ring->rx_page = page;
+- rrd_ring->rx_page_offset = 0;
+- }
+-
+- skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
+- adapter->rx_frag_size);
+- if (likely(skb)) {
+- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+- rrd_ring->rx_page_offset += adapter->rx_frag_size;
+- if (rrd_ring->rx_page_offset >= PAGE_SIZE)
+- rrd_ring->rx_page = NULL;
+- else
+- get_page(page);
+- }
+- return skb;
+-}
+-
+ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ bool napi_mode)
+ {
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
++ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1c_buffer *buffer_info, *next_info;
+ struct sk_buff *skb;
+@@ -1814,13 +1765,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ while (next_info->flags & ATL1C_BUFFER_FREE) {
+ rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
+
+- skb = atl1c_alloc_skb(adapter, queue, napi_mode);
++ /* When DMA RX address is set to something like
++ * 0x....fc0, it will be very likely to cause DMA
++ * RFD overflow issue.
++ *
++ * To work around it, we apply rx skb with 64 bytes
++ * longer space, and offset the address whenever
++ * 0x....fc0 is detected.
++ */
++ if (likely(napi_mode))
++ skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
++ else
++ skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev, "alloc rx buffer failed\n");
+ break;
+ }
+
++ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
++ skb_reserve(skb, 64);
++
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 5935be190b9e22..5f2a6fcba96708 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
+ netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
+ offset, adapter->ring_size);
+ err = -1;
+- goto failed;
++ goto free_buffer;
+ }
+
+ return 0;
++free_buffer:
++ kfree(tx_ring->tx_buffer);
++ tx_ring->tx_buffer = NULL;
+ failed:
+ if (adapter->ring_vir_addr != NULL) {
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+index 41a6098eb0c2f6..d9e9ec2e8945d7 100644
+--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
++++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+@@ -535,9 +535,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ int j = 0, i;
+
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+- if (j == *rule_cnt)
+- return -EMSGSIZE;
+-
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+@@ -547,6 +544,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
++ if (j == *rule_cnt)
++ return -EMSGSIZE;
++
+ rule_locs[j++] = priv->net_filters[i].fs.location;
+ }
+
+@@ -1306,6 +1306,7 @@ static int bcmasp_probe(struct platform_device *pdev)
+ dev_err(dev, "Cannot create eth interface %d\n", i);
+ bcmasp_remove_intfs(priv);
+ of_node_put(intf_node);
++ ret = -ENOMEM;
+ goto of_put_exit;
+ }
+ list_add_tail(&intf->list, &priv->intfs);
+diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+index 53e5428812552b..6bf149d6459418 100644
+--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
++++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+@@ -391,7 +391,9 @@ static void umac_reset(struct bcmasp_intf *intf)
+ umac_wl(intf, 0x0, UMC_CMD);
+ umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
+ usleep_range(10, 100);
+- umac_wl(intf, 0x0, UMC_CMD);
++ /* We hold the umac in reset and bring it out of
++ * reset when phy link is up.
++ */
+ }
+
+ static void umac_set_hw_addr(struct bcmasp_intf *intf,
+@@ -411,6 +413,8 @@ static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_CMD);
++ if (reg & UMC_CMD_SW_RESET)
++ return;
+ if (enable)
+ reg |= mask;
+ else
+@@ -429,13 +433,10 @@ static void umac_init(struct bcmasp_intf *intf)
+ umac_wl(intf, 0x800, UMC_FRM_LEN);
+ umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
+ umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
+- umac_enable_set(intf, UMC_CMD_PROMISC, 1);
+ }
+
+-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
++static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
+ {
+- struct bcmasp_intf *intf =
+- container_of(napi, struct bcmasp_intf, tx_napi);
+ struct bcmasp_intf_stats64 *stats = &intf->stats64;
+ struct device *kdev = &intf->parent->pdev->dev;
+ unsigned long read, released = 0;
+@@ -478,10 +479,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+ DESC_RING_COUNT);
+ }
+
+- /* Ensure all descriptors have been written to DRAM for the hardware
+- * to see updated contents.
+- */
+- wmb();
++ return released;
++}
++
++static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
++{
++ struct bcmasp_intf *intf =
++ container_of(napi, struct bcmasp_intf, tx_napi);
++ int released = 0;
++
++ released = bcmasp_tx_reclaim(intf);
+
+ napi_complete(&intf->tx_napi);
+
+@@ -656,6 +663,12 @@ static void bcmasp_adj_link(struct net_device *dev)
+ UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
+ UMC_CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
++ if (reg & UMC_CMD_SW_RESET) {
++ reg &= ~UMC_CMD_SW_RESET;
++ umac_wl(intf, reg, UMC_CMD);
++ udelay(2);
++ reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
++ }
+ umac_wl(intf, reg, UMC_CMD);
+
+ intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+@@ -785,6 +798,7 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
+
+ intf->tx_spb_index = 0;
+ intf->tx_spb_clean_index = 0;
++ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
+
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
+
+@@ -895,6 +909,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
+ } while (timeout-- > 0);
+ tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+
++ bcmasp_tx_reclaim(intf);
++
+ umac_enable_set(intf, UMC_CMD_TX_EN, 0);
+
+ phy_stop(dev->phydev);
+@@ -1048,6 +1064,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
+ netdev_err(dev, "could not attach to PHY\n");
+ goto err_phy_disable;
+ }
++
++ /* Indicate that the MAC is responsible for PHY PM */
++ phydev->mac_managed_pm = true;
+ } else if (!intf->wolopts) {
+ ret = phy_resume(dev->phydev);
+ if (ret)
+@@ -1058,9 +1077,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
+
+ umac_init(intf);
+
+- /* Disable the UniMAC RX/TX */
+- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
+-
+ umac_set_hw_addr(intf, dev->dev_addr);
+
+ intf->old_duplex = -1;
+@@ -1080,9 +1096,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
+
+ bcmasp_enable_rx(intf, 1);
+
+- /* Turn on UniMAC TX/RX */
+- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
+-
+ intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
+
+ bcmasp_netif_start(dev);
+@@ -1318,7 +1331,14 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
+ if (intf->wolopts & WAKE_FILTER)
+ bcmasp_netfilt_suspend(intf);
+
+- /* UniMAC receive needs to be turned on */
++ /* Bring UniMAC out of reset if needed and enable RX */
++ reg = umac_rl(intf, UMC_CMD);
++ if (reg & UMC_CMD_SW_RESET)
++ reg &= ~UMC_CMD_SW_RESET;
++
++ reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
++ umac_wl(intf, reg, UMC_CMD);
++
+ umac_enable_set(intf, UMC_CMD_RX_EN, 1);
+
+ if (intf->parent->wol_irq > 0) {
+diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
+index 3e4fb3c3e8342a..1be6d14030bcff 100644
+--- a/drivers/net/ethernet/broadcom/b44.c
++++ b/drivers/net/ethernet/broadcom/b44.c
+@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
+ bp->flags |= B44_FLAG_TX_PAUSE;
+ else
+ bp->flags &= ~B44_FLAG_TX_PAUSE;
+- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+- b44_halt(bp);
+- b44_init_rings(bp);
+- b44_init_hw(bp, B44_FULL_RESET);
+- } else {
+- __b44_set_flow_ctrl(bp, bp->flags);
++ if (netif_running(dev)) {
++ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
++ b44_halt(bp);
++ b44_init_rings(bp);
++ b44_init_hw(bp, B44_FULL_RESET);
++ } else {
++ __b44_set_flow_ctrl(bp, bp->flags);
++ }
+ }
+ spin_unlock_irq(&bp->lock);
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index e2a4e1088b7f49..9580ab83d387ce 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -1262,7 +1262,7 @@ enum {
+
+ struct bnx2x_fw_stats_req {
+ struct stats_query_header hdr;
+- struct stats_query_entry query[FP_SB_MAX_E1x+
++ struct stats_query_entry query[FP_SB_MAX_E2 +
+ BNX2X_FIRST_QUEUE_QUERY_IDX];
+ };
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index e9c1e1bb558060..528441b28c4efe 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
+
+ phy_fw_ver[0] = '\0';
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+- phy_fw_ver, PHY_FW_VER_LEN);
+- strscpy(buf, bp->fw_ver, buf_len);
+- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+- "bc %d.%d.%d%s%s",
++ phy_fw_ver, sizeof(phy_fw_ver));
++ /* This may become truncated. */
++ scnprintf(buf, buf_len,
++ "%sbc %d.%d.%d%s%s",
++ bp->fw_ver,
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+index d8b1824c334d3b..0bc1367fd64924 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+@@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+ struct bnx2x_alloc_pool *pool)
+ {
+- if (!pool->page)
+- return;
+-
+ put_page(pool->page);
+
+ pool->page = NULL;
+@@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+ {
+ int i;
+
++ if (!fp->page_pool.page)
++ return;
++
+ if (fp->mode == TPA_MODE_DISABLED)
+ return;
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+index bda3ccc28eca67..f920976c36f0c6 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
+ }
+
+ memset(version, 0, sizeof(version));
+- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
++ bnx2x_fill_fw_str(bp, version, sizeof(version));
+ strlcat(info->fw_version, version, sizeof(info->fw_version));
+
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+index 02808513ffe45b..ea310057fe3aff 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
+
+ static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+ {
+- str[0] = '\0';
+- (*len)--;
++ if (*len)
++ str[0] = '\0';
+ return 0;
+ }
+
+@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+ u16 ret;
+
+ if (*len < 10) {
+- /* Need more than 10chars for this format */
++ /* Need more than 10 chars for this format */
+ bnx2x_null_format_ver(num, str, len);
+ return -EINVAL;
+ }
+@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
+ {
+ u16 ret;
+
+- if (*len < 10) {
+- /* Need more than 10chars for this format */
++ if (*len < 9) {
++ /* Need more than 9 chars for this format */
+ bnx2x_null_format_ver(num, str, len);
+ return -EINVAL;
+ }
+@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+ int status = 0;
+ u8 *ver_p = version;
+ u16 remain_len = len;
+- if (version == NULL || params == NULL)
++ if (version == NULL || params == NULL || len == 0)
+ return -EINVAL;
+ bp = params->bp;
+
+@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+ str[2] = (spirom_ver & 0xFF0000) >> 16;
+ str[3] = (spirom_ver & 0xFF000000) >> 24;
+ str[4] = '\0';
+- *len -= 5;
++ *len -= 4;
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 7551aa8068f8f7..58a7bb75506a3e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -656,9 +656,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ return NETDEV_TX_OK;
+
+ tx_dma_error:
+- if (BNXT_TX_PTP_IS_SET(lflags))
+- atomic_inc(&bp->ptp_cfg->tx_avail);
+-
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+@@ -680,6 +677,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ tx_free:
+ dev_kfree_skb_any(skb);
+ tx_kick_pending:
++ if (BNXT_TX_PTP_IS_SET(lflags))
++ atomic_inc(&bp->ptp_cfg->tx_avail);
+ if (txr->kick_pending)
+ bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+ txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+@@ -1659,7 +1658,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
+ if (!skb) {
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
+- cpr->sw_stats.rx.rx_oom_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ return NULL;
+ }
+ } else {
+@@ -1669,7 +1668,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
+ if (!new_data) {
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
+- cpr->sw_stats.rx.rx_oom_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ return NULL;
+ }
+
+@@ -1685,7 +1684,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ if (!skb) {
+ skb_free_frag(data);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
+- cpr->sw_stats.rx.rx_oom_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ return NULL;
+ }
+ skb_reserve(skb, bp->rx_offset);
+@@ -1696,7 +1695,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ if (!skb) {
+ /* Page reuse already handled by bnxt_rx_pages(). */
+- cpr->sw_stats.rx.rx_oom_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ return NULL;
+ }
+ }
+@@ -1749,16 +1748,32 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct sk_buff *skb)
+ {
++ skb_mark_for_recycle(skb);
++
+ if (skb->dev != bp->dev) {
+ /* this packet belongs to a vf-rep */
+ bnxt_vf_rep_rx(bp, skb);
+ return;
+ }
+ skb_record_rx_queue(skb, bnapi->index);
+- skb_mark_for_recycle(skb);
+ napi_gro_receive(&bnapi->napi, skb);
+ }
+
++static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
++ struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
++{
++ u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
++
++ if (BNXT_PTP_RX_TS_VALID(flags))
++ goto ts_valid;
++ if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
++ return false;
++
++ts_valid:
++ *cmpl_ts = ts;
++ return true;
++}
++
+ /* returns the following:
+ * 1 - 1 packet successfully received
+ * 0 - successful TPA_START, packet not completed yet
+@@ -1784,6 +1799,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ u32 flags, misc;
++ u32 cmpl_ts;
+ void *data;
+ int rc = 0;
+
+@@ -1897,11 +1913,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ cp_cons, agg_bufs,
+ false);
+- if (!frag_len) {
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
+- }
++ if (!frag_len)
++ goto oom_next_rx;
+ }
+ xdp_active = true;
+ }
+@@ -1924,9 +1937,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ else
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ }
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
++ goto oom_next_rx;
+ }
+ } else {
+ u32 payload;
+@@ -1937,29 +1948,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ payload = 0;
+ skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
+ payload | len);
+- if (!skb) {
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
+- }
++ if (!skb)
++ goto oom_next_rx;
+ }
+
+ if (agg_bufs) {
+ if (!xdp_active) {
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+- if (!skb) {
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
+- }
++ if (!skb)
++ goto oom_next_rx;
+ } else {
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ if (!skb) {
+ /* we should be able to free the old skb here */
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+- cpr->sw_stats.rx.rx_oom_discards += 1;
+- rc = -ENOMEM;
+- goto next_rx;
++ goto oom_next_rx;
+ }
+ }
+ }
+@@ -2006,10 +2009,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ }
+ }
+
+- if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
+- RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
++ if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+- u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+ u64 ns, ts;
+
+ if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
+@@ -2039,6 +2040,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ *raw_cons = tmp_raw_cons;
+
+ return rc;
++
++oom_next_rx:
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
++ rc = -ENOMEM;
++ goto next_rx;
+ }
+
+ /* In netpoll mode, if we are using a combined completion ring, we need to
+@@ -2084,7 +2090,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
+ }
+ rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
+ if (rc && rc != -EBUSY)
+- cpr->sw_stats.rx.rx_netpoll_discards += 1;
++ cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
+ return rc;
+ }
+
+@@ -10534,6 +10540,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ /* VF-reps may need to be re-opened after the PF is re-opened */
+ if (BNXT_PF(bp))
+ bnxt_vf_reps_open(bp);
++ if (bp->ptp_cfg)
++ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
+ bnxt_ptp_init_rtc(bp, true);
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ return 0;
+@@ -10583,10 +10591,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto half_open_err;
+ }
++ bnxt_init_napi(bp);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
+ if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
++ bnxt_del_napi(bp);
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto half_open_err;
+ }
+@@ -10605,6 +10615,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
+ void bnxt_half_close_nic(struct bnxt *bp)
+ {
+ bnxt_hwrm_resource_free(bp, false, true);
++ bnxt_del_napi(bp);
+ bnxt_free_skbs(bp);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+@@ -10703,10 +10714,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+ bnxt_free_mem(bp, irq_re_init);
+ }
+
+-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
++void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ {
+- int rc = 0;
+-
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ /* If we get here, it means firmware reset is in progress
+ * while we are trying to close. We can safely proceed with
+@@ -10721,15 +10730,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+
+ #ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
++ int rc;
++
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+- if (rc)
+- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
++ if (!rc)
++ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
++ else if (rc < 0)
++ netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
+ }
+ #endif
+ __bnxt_close_nic(bp, irq_re_init, link_re_init);
+- return rc;
+ }
+
+ static int bnxt_close(struct net_device *dev)
+@@ -11786,6 +11798,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
+ bnxt_rtnl_unlock_sp(bp);
+ }
+
++static void bnxt_fw_fatal_close(struct bnxt *bp)
++{
++ bnxt_tx_disable(bp);
++ bnxt_disable_napi(bp);
++ bnxt_disable_int_sync(bp);
++ bnxt_free_irq(bp);
++ bnxt_clear_int_mode(bp);
++ pci_disable_device(bp->pdev);
++}
++
+ static void bnxt_fw_reset_close(struct bnxt *bp)
+ {
+ bnxt_ulp_stop(bp);
+@@ -11799,12 +11821,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ if (val == 0xffff)
+ bp->fw_reset_min_dsecs = 0;
+- bnxt_tx_disable(bp);
+- bnxt_disable_napi(bp);
+- bnxt_disable_int_sync(bp);
+- bnxt_free_irq(bp);
+- bnxt_clear_int_mode(bp);
+- pci_disable_device(bp->pdev);
++ bnxt_fw_fatal_close(bp);
+ }
+ __bnxt_close_nic(bp, true, false);
+ bnxt_vf_reps_free(bp);
+@@ -12057,6 +12074,8 @@ static void bnxt_sp_task(struct work_struct *work)
+ bnxt_cfg_ntp_filters(bp);
+ if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_exec_fwd_req(bp);
++ if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
++ netdev_info(bp->dev, "Receive PF driver unload event!\n");
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_port_qstats(bp, 0);
+ bnxt_hwrm_port_qstats_ext(bp, 0);
+@@ -12243,6 +12262,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
+
+ bp->fw_cap = 0;
+ rc = bnxt_hwrm_ver_get(bp);
++ /* FW may be unresponsive after FLR. FLR must complete within 100 msec
++ * so wait before continuing with recovery.
++ */
++ if (rc)
++ msleep(100);
+ bnxt_try_map_fw_health_reg(bp);
+ if (rc) {
+ rc = bnxt_try_recover_fw(bp);
+@@ -13036,8 +13060,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+ }
+ }
+ }
+- if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+- netdev_info(bp->dev, "Receive PF driver unload event!\n");
+ }
+
+ #else
+@@ -13897,6 +13919,8 @@ static int bnxt_resume(struct device *device)
+ if (rc)
+ goto resume_exit;
+
++ bnxt_clear_reservations(bp, true);
++
+ if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
+ rc = -ENODEV;
+ goto resume_exit;
+@@ -13939,6 +13963,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
++ bool abort = false;
+
+ netdev_info(netdev, "PCI I/O error detected\n");
+
+@@ -13947,16 +13972,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
+
+ bnxt_ulp_stop(bp);
+
+- if (state == pci_channel_io_perm_failure) {
++ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
++ netdev_err(bp->dev, "Firmware reset already in progress\n");
++ abort = true;
++ }
++
++ if (abort || state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+- if (state == pci_channel_io_frozen)
++ /* Link is not reliable anymore if state is pci_channel_io_frozen
++ * so we disable bus master to prevent any potential bad DMAs before
++ * freeing kernel memory.
++ */
++ if (state == pci_channel_io_frozen) {
+ set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
++ bnxt_fw_fatal_close(bp);
++ }
+
+ if (netif_running(netdev))
+- bnxt_close(netdev);
++ __bnxt_close_nic(bp, true, true);
+
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+@@ -14042,6 +14078,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
+ }
+
+ reset_exit:
++ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_clear_reservations(bp, true);
+ rtnl_unlock();
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 84cbcfa61bc12f..0116f67593e3a0 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -161,7 +161,7 @@ struct rx_cmp {
+ #define RX_CMP_FLAGS_ERROR (1 << 6)
+ #define RX_CMP_FLAGS_PLACEMENT (7 << 7)
+ #define RX_CMP_FLAGS_RSS_VALID (1 << 10)
+- #define RX_CMP_FLAGS_UNUSED (1 << 11)
++ #define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11)
+ #define RX_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_CMP_FLAGS_ITYPES_MASK 0xf000
+ #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
+@@ -188,6 +188,12 @@ struct rx_cmp {
+ __le32 rx_cmp_rss_hash;
+ };
+
++#define BNXT_PTP_RX_TS_VALID(flags) \
++ (((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS)
++
++#define BNXT_ALL_RX_TS_VALID(flags) \
++ !((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT)
++
+ #define RX_CMP_HASH_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+@@ -2362,7 +2368,7 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
+ int bnxt_half_open_nic(struct bnxt *bp);
+ void bnxt_half_close_nic(struct bnxt *bp);
+ void bnxt_reenable_sriov(struct bnxt *bp);
+-int bnxt_close_nic(struct bnxt *, bool, bool);
++void bnxt_close_nic(struct bnxt *, bool, bool);
+ void bnxt_get_ring_err_stats(struct bnxt *bp,
+ struct bnxt_total_ring_err_stats *stats);
+ int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 8b3e7697390f7b..9d39f194b260f5 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -478,15 +478,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
+ return -ENODEV;
+ }
+ bnxt_ulp_stop(bp);
+- if (netif_running(bp->dev)) {
+- rc = bnxt_close_nic(bp, true, true);
+- if (rc) {
+- NL_SET_ERR_MSG_MOD(extack, "Failed to close");
+- dev_close(bp->dev);
+- rtnl_unlock();
+- break;
+- }
+- }
++ if (netif_running(bp->dev))
++ bnxt_close_nic(bp, true, true);
+ bnxt_vf_reps_free(bp);
+ rc = bnxt_hwrm_func_drv_unrgtr(bp);
+ if (rc) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 547247d98eba21..2e7ddbca9d53b1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -164,9 +164,8 @@ static int bnxt_set_coalesce(struct net_device *dev,
+ reset_coalesce:
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ if (update_stats) {
+- rc = bnxt_close_nic(bp, true, false);
+- if (!rc)
+- rc = bnxt_open_nic(bp, true, false);
++ bnxt_close_nic(bp, true, false);
++ rc = bnxt_open_nic(bp, true, false);
+ } else {
+ rc = bnxt_hwrm_set_coal(bp);
+ }
+@@ -955,12 +954,7 @@ static int bnxt_set_channels(struct net_device *dev,
+ * before PF unload
+ */
+ }
+- rc = bnxt_close_nic(bp, true, false);
+- if (rc) {
+- netdev_err(bp->dev, "Set channel failure rc :%x\n",
+- rc);
+- return rc;
+- }
++ bnxt_close_nic(bp, true, false);
+ }
+
+ if (sh) {
+@@ -3027,7 +3021,7 @@ static void bnxt_get_pkgver(struct net_device *dev)
+
+ if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
+ len = strlen(bp->fw_ver_str);
+- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
++ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
+ "/pkg %s", buf);
+ }
+ }
+@@ -3737,12 +3731,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ bnxt_run_fw_tests(bp, test_mask, &test_results);
+ } else {
+ bnxt_ulp_stop(bp);
+- rc = bnxt_close_nic(bp, true, false);
+- if (rc) {
+- etest->flags |= ETH_TEST_FL_FAILED;
+- bnxt_ulp_start(bp, rc);
+- return;
+- }
++ bnxt_close_nic(bp, true, false);
+ bnxt_run_fw_tests(bp, test_mask, &test_results);
+
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+index 132442f16fe676..7a4e08b5a8c1b9 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+@@ -678,7 +678,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+- req_type, token->seq_id, rc);
++ req_type, le16_to_cpu(ctx->req->seq_id), rc);
+ rc = __hwrm_to_stderr(rc);
+ exit:
+ if (token)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+index f3886710e77873..6e3da3362bd617 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+@@ -521,9 +521,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+
+ if (netif_running(bp->dev)) {
+ if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
+- rc = bnxt_close_nic(bp, false, false);
+- if (!rc)
+- rc = bnxt_open_nic(bp, false, false);
++ bnxt_close_nic(bp, false, false);
++ rc = bnxt_open_nic(bp, false, false);
+ } else {
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index 38d89d80b4a9c7..273c9ba48f09a1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -2075,6 +2075,7 @@ int bnxt_init_tc(struct bnxt *bp)
+ rhashtable_destroy(&tc_info->flow_table);
+ free_tc_info:
+ kfree(tc_info);
++ bp->tc_info = NULL;
+ return rc;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index 6ba2b939863330..7689086371e03c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -213,6 +213,9 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
+ if (err)
+ return;
+
++ if (edev->ulp_tbl->msix_requested)
++ bnxt_fill_msix_vecs(bp, edev->msix_entries);
++
+ if (aux_priv) {
+ struct auxiliary_device *adev;
+
+@@ -394,12 +397,13 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
+ if (!edev)
+ goto aux_dev_uninit;
+
++ aux_priv->edev = edev;
++
+ ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ if (!ulp)
+ goto aux_dev_uninit;
+
+ edev->ulp_tbl = ulp;
+- aux_priv->edev = edev;
+ bp->edev = edev;
+ bnxt_set_edev_info(edev, bp);
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index 96f5ca778c67d6..2845796f782c24 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -59,7 +59,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ for (i = 0; i < num_frags ; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct bnxt_sw_tx_bd *frag_tx_buf;
+- struct pci_dev *pdev = bp->pdev;
+ dma_addr_t frag_mapping;
+ int frag_len;
+
+@@ -73,16 +72,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ frag_len = skb_frag_size(frag);
+- frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
+- frag_len, DMA_TO_DEVICE);
+-
+- if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
+- return NULL;
+-
+- dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
+-
+ flags = frag_len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
++ frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
++ skb_frag_off(frag);
+ txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
+
+ len = frag_len;
+@@ -304,11 +297,6 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+ * redirect is coming from a frame received by the
+ * bnxt_en driver.
+ */
+- rx_buf = &rxr->rx_buf_ring[cons];
+- mapping = rx_buf->mapping - bp->rx_dma_offset;
+- dma_unmap_page_attrs(&pdev->dev, mapping,
+- BNXT_RX_PAGE_SIZE, bp->rx_dir,
+- DMA_ATTR_WEAK_ORDERING);
+
+ /* if we are unable to allocate a new buffer, abort and reuse */
+ if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 24bade875ca6a1..79d096a371ae77 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2,7 +2,7 @@
+ /*
+ * Broadcom GENET (Gigabit Ethernet) controller driver
+ *
+- * Copyright (c) 2014-2020 Broadcom
++ * Copyright (c) 2014-2024 Broadcom
+ */
+
+ #define pr_fmt(fmt) "bcmgenet: " fmt
+@@ -2132,8 +2132,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+ /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+ * will need to restore software padding of "runt" packets
+ */
++ len_stat |= DMA_TX_APPEND_CRC;
++
+ if (!i) {
+- len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
++ len_stat |= DMA_SOP;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_stat |= DMA_TX_DO_CSUM;
+ }
+@@ -2467,14 +2469,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
+ {
+ u32 reg;
+
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+- if (reg & CMD_SW_RESET)
++ if (reg & CMD_SW_RESET) {
++ spin_unlock_bh(&priv->reg_lock);
+ return;
++ }
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+
+ /* UniMAC stops on a packet boundary, wait for a full-size packet
+ * to be processed
+@@ -2490,8 +2496,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
+ udelay(10);
+
+ /* issue soft reset and disable MAC while updating its registers */
++ spin_lock_bh(&priv->reg_lock);
+ bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+ udelay(2);
++ spin_unlock_bh(&priv->reg_lock);
+ }
+
+ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+@@ -3297,7 +3305,7 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
+ }
+
+ /* Returns a reusable dma control register value */
+-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
++static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
+ {
+ unsigned int i;
+ u32 reg;
+@@ -3322,6 +3330,14 @@ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
++ if (flush_rx) {
++ reg = bcmgenet_rbuf_ctrl_get(priv);
++ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
++ udelay(10);
++ bcmgenet_rbuf_ctrl_set(priv, reg);
++ udelay(10);
++ }
++
+ return dma_ctrl;
+ }
+
+@@ -3343,7 +3359,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Start the network engine */
++ netif_addr_lock_bh(dev);
+ bcmgenet_set_rx_mode(dev);
++ netif_addr_unlock_bh(dev);
+ bcmgenet_enable_rx_napi(priv);
+
+ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
+@@ -3385,8 +3403,8 @@ static int bcmgenet_open(struct net_device *dev)
+
+ bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+- /* Disable RX/TX DMA and flush TX queues */
+- dma_ctrl = bcmgenet_dma_disable(priv);
++ /* Disable RX/TX DMA and flush TX and RX queues */
++ dma_ctrl = bcmgenet_dma_disable(priv, true);
+
+ /* Reinitialize TDMA and RDMA and SW housekeeping */
+ ret = bcmgenet_init_dma(priv);
+@@ -3604,16 +3622,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
+ * 3. The number of filters needed exceeds the number filters
+ * supported by the hardware.
+ */
++ spin_lock(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+ (nfilter > MAX_MDF_FILTER)) {
+ reg |= CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock(&priv->reg_lock);
+ bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+ return;
+ } else {
+ reg &= ~CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock(&priv->reg_lock);
+ }
+
+ /* update MDF filter */
+@@ -4015,6 +4036,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
+ goto err;
+ }
+
++ spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
+
+ /* Set default pause parameters */
+@@ -4257,7 +4279,7 @@ static int bcmgenet_resume(struct device *d)
+ bcmgenet_hfb_create_rxnfc_filter(priv, rule);
+
+ /* Disable RX/TX DMA and flush TX queues */
+- dma_ctrl = bcmgenet_dma_disable(priv);
++ dma_ctrl = bcmgenet_dma_disable(priv, false);
+
+ /* Reinitialize TDMA and RDMA and SW housekeeping */
+ ret = bcmgenet_init_dma(priv);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index 1985c0ec4da2ab..28e2c94ef835c9 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+- * Copyright (c) 2014-2020 Broadcom
++ * Copyright (c) 2014-2024 Broadcom
+ */
+
+ #ifndef __BCMGENET_H__
+@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
+ /* device context */
+ struct bcmgenet_priv {
+ void __iomem *base;
++ /* reg_lock: lock to serialize access to shared registers */
++ spinlock_t reg_lock;
+ enum bcmgenet_version version;
+ struct net_device *dev;
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+index 7a41cad5788f4e..0715ea5bf13ed9 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+@@ -2,7 +2,7 @@
+ /*
+ * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
+ *
+- * Copyright (c) 2014-2020 Broadcom
++ * Copyright (c) 2014-2024 Broadcom
+ */
+
+ #define pr_fmt(fmt) "bcmgenet_wol: " fmt
+@@ -42,19 +42,15 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+
+- if (dev->phydev) {
++ if (dev->phydev)
+ phy_ethtool_get_wol(dev->phydev, wol);
+- if (wol->supported)
+- return;
+- }
+
+- if (!device_can_wakeup(kdev)) {
+- wol->supported = 0;
+- wol->wolopts = 0;
++ /* MAC is not wake-up capable, return what the PHY does */
++ if (!device_can_wakeup(kdev))
+ return;
+- }
+
+- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
++ /* Overlay MAC capabilities with that of the PHY queried before */
++ wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
+ wol->wolopts = priv->wolopts;
+ memset(wol->sopass, 0, sizeof(wol->sopass));
+
+@@ -151,6 +147,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ }
+
+ /* Can't suspend with WoL if MAC is still in reset */
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+@@ -158,6 +155,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ /* disable RX */
+ reg &= ~CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+ mdelay(10);
+
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+@@ -203,6 +201,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ }
+
+ /* Enable CRC forward */
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ priv->crc_fwd_en = 1;
+ reg |= CMD_CRC_FWD;
+@@ -210,6 +209,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ /* Receiver must be enabled for WOL MP detection */
+ reg |= CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+
+ reg = UMAC_IRQ_MPD_R;
+ if (hfb_enable)
+@@ -256,7 +256,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ }
+
+ /* Disable CRC Forward */
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_CRC_FWD;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+ }
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 97ea76d443abee..e7c659cd39746f 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -2,7 +2,7 @@
+ /*
+ * Broadcom GENET MDIO routines
+ *
+- * Copyright (c) 2014-2017 Broadcom
++ * Copyright (c) 2014-2024 Broadcom
+ */
+
+ #include <linux/acpi.h>
+@@ -75,6 +75,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+@@ -87,6 +88,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
+ reg |= CMD_TX_EN | CMD_RX_EN;
+ }
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+
+ priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ bcmgenet_eee_enable_set(dev,
+@@ -274,6 +276,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+ * block for the interface to work, unconditionally clear the
+ * Out-of-band disable since we do not need it.
+ */
++ mutex_lock(&phydev->lock);
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ if (priv->ext_phy) {
+@@ -285,6 +288,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+ reg |= RGMII_MODE_EN;
+ }
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
++ mutex_unlock(&phydev->lock);
+
+ if (init)
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 14b311196b8f85..f1c8ff5b63acde 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6439,6 +6439,14 @@ static void tg3_dump_state(struct tg3 *tp)
+ int i;
+ u32 *regs;
+
++ /* If it is a PCI error, all registers will be 0xffff,
++ * we don't dump them out, just report the error and return
++ */
++ if (tp->pdev->error_state != pci_channel_io_normal) {
++ netdev_err(tp->dev, "PCI channel ERROR!\n");
++ return;
++ }
++
+ regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
+ if (!regs)
+ return;
+@@ -6845,7 +6853,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+ desc_idx, *post_ptr);
+ drop_it_no_recycle:
+ /* Other statistics kept track of by card. */
+- tp->rx_dropped++;
++ tnapi->rx_dropped++;
+ goto next_pkt;
+ }
+
+@@ -7874,8 +7882,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
+
+ segs = skb_gso_segment(skb, tp->dev->features &
+ ~(NETIF_F_TSO | NETIF_F_TSO6));
+- if (IS_ERR(segs) || !segs)
++ if (IS_ERR(segs) || !segs) {
++ tnapi->tx_dropped++;
+ goto tg3_tso_bug_end;
++ }
+
+ skb_list_walk_safe(segs, seg, next) {
+ skb_mark_not_on_list(seg);
+@@ -8146,7 +8156,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ drop:
+ dev_kfree_skb_any(skb);
+ drop_nofree:
+- tp->tx_dropped++;
++ tnapi->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+@@ -9325,7 +9335,7 @@ static void __tg3_set_rx_mode(struct net_device *);
+ /* tp->lock is held. */
+ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
+ {
+- int err;
++ int err, i;
+
+ tg3_stop_fw(tp);
+
+@@ -9346,6 +9356,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
+
+ /* And make sure the next sample is new data */
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
++
++ for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ tnapi->rx_dropped = 0;
++ tnapi->tx_dropped = 0;
++ }
+ }
+
+ return err;
+@@ -11170,7 +11187,8 @@ static void tg3_reset_task(struct work_struct *work)
+ rtnl_lock();
+ tg3_full_lock(tp, 0);
+
+- if (tp->pcierr_recovery || !netif_running(tp->dev)) {
++ if (tp->pcierr_recovery || !netif_running(tp->dev) ||
++ tp->pdev->error_state != pci_channel_io_normal) {
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ tg3_full_unlock(tp);
+ rtnl_unlock();
+@@ -11895,6 +11913,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
+ {
+ struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
++ unsigned long rx_dropped;
++ unsigned long tx_dropped;
++ int i;
+
+ stats->rx_packets = old_stats->rx_packets +
+ get_stat64(&hw_stats->rx_ucast_packets) +
+@@ -11941,8 +11962,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
+ stats->rx_missed_errors = old_stats->rx_missed_errors +
+ get_stat64(&hw_stats->rx_discards);
+
+- stats->rx_dropped = tp->rx_dropped;
+- stats->tx_dropped = tp->tx_dropped;
++ /* Aggregate per-queue counters. The per-queue counters are updated
++ * by a single writer, race-free. The result computed by this loop
++ * might not be 100% accurate (counters can be updated in the middle of
++ * the loop) but the next tg3_get_nstats() will recompute the current
++ * value so it is acceptable.
++ *
++ * Note that these counters wrap around at 4G on 32bit machines.
++ */
++ rx_dropped = (unsigned long)(old_stats->rx_dropped);
++ tx_dropped = (unsigned long)(old_stats->tx_dropped);
++
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ rx_dropped += tnapi->rx_dropped;
++ tx_dropped += tnapi->tx_dropped;
++ }
++
++ stats->rx_dropped = rx_dropped;
++ stats->tx_dropped = tx_dropped;
+ }
+
+ static int tg3_get_regs_len(struct net_device *dev)
+@@ -18078,7 +18117,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
+ if (netif_running(dev))
+ dev_close(dev);
+
+- tg3_power_down(tp);
++ if (system_state == SYSTEM_POWER_OFF)
++ tg3_power_down(tp);
+
+ rtnl_unlock();
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 1000c894064f00..8d753f8c5b0657 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -3018,6 +3018,7 @@ struct tg3_napi {
+ u16 *rx_rcb_prod_idx;
+ struct tg3_rx_prodring_set prodring;
+ struct tg3_rx_buffer_desc *rx_rcb;
++ unsigned long rx_dropped;
+
+ u32 tx_prod ____cacheline_aligned;
+ u32 tx_cons;
+@@ -3026,6 +3027,7 @@ struct tg3_napi {
+ u32 prodmbox;
+ struct tg3_tx_buffer_desc *tx_ring;
+ struct tg3_tx_ring_info *tx_buffers;
++ unsigned long tx_dropped;
+
+ dma_addr_t status_mapping;
+ dma_addr_t rx_rcb_mapping;
+@@ -3219,8 +3221,6 @@ struct tg3 {
+
+
+ /* begin "everything else" cacheline(s) section */
+- unsigned long rx_dropped;
+- unsigned long tx_dropped;
+ struct rtnl_link_stats64 net_stats_prev;
+ struct tg3_ethtool_stats estats_prev;
+
+diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
+index a5ebd7110e0736..986f43d277119d 100644
+--- a/drivers/net/ethernet/brocade/bna/bna_types.h
++++ b/drivers/net/ethernet/brocade/bna/bna_types.h
+@@ -416,7 +416,7 @@ struct bna_ib {
+ /* Tx object */
+
+ /* Tx datapath control structure */
+-#define BNA_Q_NAME_SIZE 16
++#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6)
+ struct bna_tcb {
+ /* Fast path */
+ void **sw_qpt;
+diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
+index 31191b520b5875..6cf06a93bedfbb 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -1534,8 +1534,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
+
+ for (i = 0; i < num_txqs; i++) {
+ vector_num = tx_info->tcb[i]->intr_vector;
+- sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
+- tx_id + tx_info->tcb[i]->id);
++ snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
++ bnad->netdev->name,
++ tx_id + tx_info->tcb[i]->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_tx, 0,
+ tx_info->tcb[i]->name,
+@@ -1585,9 +1586,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
+
+ for (i = 0; i < num_rxps; i++) {
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+- sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
+- bnad->netdev->name,
+- rx_id + rx_info->rx_ctrl[i].ccb->id);
++ snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
++ "%s CQ %d", bnad->netdev->name,
++ rx_id + rx_info->rx_ctrl[i].ccb->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_rx, 0,
+ rx_info->rx_ctrl[i].ccb->name,
+diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+index 7246e13dd559fc..97291bfbeea589 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
++++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ void *kern_buf;
+
+ /* Copy the user space buf */
+- kern_buf = memdup_user(buf, nbytes);
++ kern_buf = memdup_user_nul(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ void *kern_buf;
+
+ /* Copy the user space buf */
+- kern_buf = memdup_user(buf, nbytes);
++ kern_buf = memdup_user_nul(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index b940dcd3ace681..8f61731e4554ba 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -930,9 +930,6 @@ static int macb_mdiobus_register(struct macb *bp)
+ return ret;
+ }
+
+- if (of_phy_is_fixed_link(np))
+- return mdiobus_register(bp->mii_bus);
+-
+ /* Only create the PHY from the device tree if at least one PHY is
+ * described. Otherwise scan the entire MDIO bus. We do this to support
+ * old device tree that did not follow the best practices and did not
+@@ -953,8 +950,19 @@ static int macb_mdiobus_register(struct macb *bp)
+
+ static int macb_mii_init(struct macb *bp)
+ {
++ struct device_node *child, *np = bp->pdev->dev.of_node;
+ int err = -ENXIO;
+
++ /* With fixed-link, we don't need to register the MDIO bus,
++ * except if we have a child named "mdio" in the device tree.
++ * In that case, some devices may be attached to the MACB's MDIO bus.
++ */
++ child = of_get_child_by_name(np, "mdio");
++ if (child)
++ of_node_put(child);
++ else if (of_phy_is_fixed_link(np))
++ return macb_mii_probe(bp->dev);
++
+ /* Enable management port */
+ macb_writel(bp, NCR, MACB_BIT(MPE));
+
+diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+index 600de587d7a989..e70b9ccca380e7 100644
+--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
++ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
++ pg_info->page,
++ pg_info->page_offset + MIN_SKB_SIZE,
++ len - MIN_SKB_SIZE,
++ LIO_RXBUFFER_SZ);
+ }
+-
+- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+- pg_info->page,
+- pg_info->page_offset + MIN_SKB_SIZE,
+- len - MIN_SKB_SIZE,
+- LIO_RXBUFFER_SZ);
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 786ceae3448875..dd9e68465e6978 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -1244,7 +1244,8 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
+ * in the Compressed Filter Tuple.
+ */
+ if (tp->vlan_shift >= 0 && fs->mask.ivlan)
+- ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
++ ntuple |= (u64)(FT_VLAN_VLD_F |
++ fs->val.ivlan) << tp->vlan_shift;
+
+ if (tp->port_shift >= 0 && fs->mask.iport)
+ ntuple |= (u64)fs->val.iport << tp->port_shift;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 98dd78551d89a6..fff1ce835bc0d7 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2684,12 +2684,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
+ lb->loopback = 1;
+
+ q = &adap->sge.ethtxq[pi->first_qset];
+- __netif_tx_lock(q->txq, smp_processor_id());
++ __netif_tx_lock_bh(q->txq);
+
+ reclaim_completed_tx(adap, &q->q, -1, true);
+ credits = txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+- __netif_tx_unlock(q->txq);
++ __netif_tx_unlock_bh(q->txq);
+ return -ENOMEM;
+ }
+
+@@ -2724,7 +2724,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
+ init_completion(&lb->completion);
+ txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+- __netif_tx_unlock(q->txq);
++ __netif_tx_unlock_bh(q->txq);
+
+ /* wait for the pkt to return */
+ ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+index 7750702900fa60..6f6525983130e7 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+@@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
+
+ if (tp->snd_una != snd_una) {
+ tp->snd_una = snd_una;
+- tp->rcv_tstamp = tcp_time_stamp(tp);
++ tp->rcv_tstamp = tcp_jiffies32;
+ if (tp->snd_una == tp->snd_nxt &&
+ !csk_flag_nochk(csk, CSK_TX_FAILOVER))
+ csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 37bd38d772e809..cccf0db2fb4e58 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
+ pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+
+ if (port[IFLA_PORT_PROFILE]) {
++ if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
++ memcpy(pp, &prev_pp, sizeof(*pp));
++ return -EINVAL;
++ }
+ pp->set |= ENIC_SET_NAME;
+ memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
+ PORT_PROFILE_MAX);
+ }
+
+ if (port[IFLA_PORT_INSTANCE_UUID]) {
++ if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
++ memcpy(pp, &prev_pp, sizeof(*pp));
++ return -EINVAL;
++ }
+ pp->set |= ENIC_SET_INSTANCE;
+ memcpy(pp->instance_uuid,
+ nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
+ }
+
+ if (port[IFLA_PORT_HOST_UUID]) {
++ if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
++ memcpy(pp, &prev_pp, sizeof(*pp));
++ return -EINVAL;
++ }
+ pp->set |= ENIC_SET_HOST;
+ memcpy(pp->host_uuid,
+ nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index a8b9d1a3e4d57e..5af98fba74803e 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -79,8 +79,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+ #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
+
+ #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
+- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+- NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
++ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
++ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
+ /**
+ * struct gmac_queue_page - page buffer per-page info
+@@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
+ .val = CONFIG0_MAXLEN_1536,
+ },
+ {
+- .max_l3_len = 1542,
+- .val = CONFIG0_MAXLEN_1542,
++ .max_l3_len = 1548,
++ .val = CONFIG0_MAXLEN_1548,
+ },
+ {
+ .max_l3_len = 9212,
+@@ -1108,10 +1108,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
+ {
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
++ unsigned long flags;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+
++ spin_lock_irqsave(&geth->irq_lock, flags);
++
+ mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
+
+ if (en)
+@@ -1120,6 +1123,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = en ? val | mask : val & ~mask;
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
++
++ spin_unlock_irqrestore(&geth->irq_lock, flags);
+ }
+
+ static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
+@@ -1143,25 +1148,51 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ struct gmac_txdesc *txd;
+ skb_frag_t *skb_frag;
+ dma_addr_t mapping;
+- unsigned short mtu;
+ void *buffer;
+-
+- mtu = ETH_HLEN;
+- mtu += netdev->mtu;
+- if (skb->protocol == htons(ETH_P_8021Q))
+- mtu += VLAN_HLEN;
++ u16 mss;
++ int ret;
+
+ word1 = skb->len;
+ word3 = SOF_BIT;
+
+- if (word1 > mtu) {
++ mss = skb_shinfo(skb)->gso_size;
++ if (mss) {
++ /* This means we are dealing with TCP and skb->len is the
++ * sum total of all the segments. The TSO will deal with
++ * chopping this up for us.
++ */
++ /* The accelerator needs the full frame size here */
++ mss += skb_tcp_all_headers(skb);
++ netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n",
++ mss, skb->len);
+ word1 |= TSS_MTU_ENABLE_BIT;
+- word3 |= mtu;
++ word3 |= mss;
++ } else if (skb->len >= ETH_FRAME_LEN) {
++ /* Hardware offloaded checksumming isn't working on frames
++ * bigger than 1514 bytes. A hypothesis about this is that the
++ * checksum buffer is only 1518 bytes, so when the frames get
++ * bigger they get truncated, or the last few bytes get
++ * overwritten by the FCS.
++ *
++ * Just use software checksumming and bypass on bigger frames.
++ */
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ ret = skb_checksum_help(skb);
++ if (ret)
++ return ret;
++ }
++ word1 |= TSS_BYPASS_BIT;
+ }
+
+- if (skb->ip_summed != CHECKSUM_NONE) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int tcp = 0;
+
++ /* We do not switch off the checksumming on non TCP/UDP
++ * frames: as is shown from tests, the checksumming engine
++ * is smart enough to see that a frame is not actually TCP
++ * or UDP and then just pass it through without any changes
++ * to the frame.
++ */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ word1 |= TSS_IP_CHKSUM_BIT;
+ tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+@@ -1404,15 +1435,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
+ union gmac_rxdesc_3 word3;
+ struct page *page = NULL;
+ unsigned int page_offs;
++ unsigned long flags;
+ unsigned short r, w;
+ union dma_rwptr rw;
+ dma_addr_t mapping;
+ int frag_nr = 0;
+
++ spin_lock_irqsave(&geth->irq_lock, flags);
+ rw.bits32 = readl(ptr_reg);
+ /* Reset interrupt as all packages until here are taken into account */
+ writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
+ geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
++ spin_unlock_irqrestore(&geth->irq_lock, flags);
++
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+@@ -1715,10 +1750,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
+ gmac_update_hw_stats(netdev);
+
+ if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
++ spin_lock(&geth->irq_lock);
+ writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+-
+- spin_lock(&geth->irq_lock);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ ++port->stats.rx_fifo_errors;
+ u64_stats_update_end(&port->ir_stats_syncp);
+@@ -1978,15 +2012,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+ return 0;
+ }
+
+-static netdev_features_t gmac_fix_features(struct net_device *netdev,
+- netdev_features_t features)
+-{
+- if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+- features &= ~GMAC_OFFLOAD_FEATURES;
+-
+- return features;
+-}
+-
+ static int gmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+ {
+@@ -2212,7 +2237,6 @@ static const struct net_device_ops gmac_351x_ops = {
+ .ndo_set_mac_address = gmac_set_mac_address,
+ .ndo_get_stats64 = gmac_get_stats64,
+ .ndo_change_mtu = gmac_change_mtu,
+- .ndo_fix_features = gmac_fix_features,
+ .ndo_set_features = gmac_set_features,
+ };
+
+@@ -2464,11 +2488,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
+
+ netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+- /* We can handle jumbo frames up to 10236 bytes so, let's accept
+- * payloads of 10236 bytes minus VLAN and ethernet header
++ /* We can receive jumbo frames up to 10236 bytes but only
++ * transmit 2047 bytes so, let's accept payloads of 2047
++ * bytes minus VLAN and ethernet header
+ */
+ netdev->min_mtu = ETH_MIN_MTU;
+- netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
++ netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
+
+ port->freeq_refill = 0;
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
+diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
+index 9fdf77d5eb3740..24bb989981f233 100644
+--- a/drivers/net/ethernet/cortina/gemini.h
++++ b/drivers/net/ethernet/cortina/gemini.h
+@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
+ #define SOF_BIT 0x80000000
+ #define EOF_BIT 0x40000000
+ #define EOFIE_BIT BIT(29)
+-#define MTU_SIZE_BIT_MASK 0x1fff
++#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
+
+ /* GMAC Tx Descriptor */
+ struct gmac_txdesc {
+@@ -787,7 +787,7 @@ union gmac_config0 {
+ #define CONFIG0_MAXLEN_1536 0
+ #define CONFIG0_MAXLEN_1518 1
+ #define CONFIG0_MAXLEN_1522 2
+-#define CONFIG0_MAXLEN_1542 3
++#define CONFIG0_MAXLEN_1548 3
+ #define CONFIG0_MAXLEN_9k 4 /* 9212 */
+ #define CONFIG0_MAXLEN_10k 5 /* 10236 */
+ #define CONFIG0_MAXLEN_1518__6 6
+diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
+index 6e14c918e3fb71..f188fba021a621 100644
+--- a/drivers/net/ethernet/engleder/tsnep.h
++++ b/drivers/net/ethernet/engleder/tsnep.h
+@@ -143,7 +143,7 @@ struct tsnep_rx {
+
+ struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+- char name[IFNAMSIZ + 9];
++ char name[IFNAMSIZ + 16];
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 8b992dc9bb52b4..4f36b29d66c860 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -668,17 +668,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
+
+ static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
+ struct xdp_buff *xdp,
+- struct netdev_queue *tx_nq, struct tsnep_tx *tx)
++ struct netdev_queue *tx_nq, struct tsnep_tx *tx,
++ bool zc)
+ {
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ bool xmit;
++ u32 type;
+
+ if (unlikely(!xdpf))
+ return false;
+
++ /* no page pool for zero copy */
++ if (zc)
++ type = TSNEP_TX_TYPE_XDP_NDO;
++ else
++ type = TSNEP_TX_TYPE_XDP_TX;
++
+ __netif_tx_lock(tx_nq, smp_processor_id());
+
+- xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
++ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
+
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ if (xmit)
+@@ -1222,7 +1230,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
++ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+@@ -1272,7 +1280,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
++ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+@@ -1434,7 +1442,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
+
+ xdp_prepare_buff(&xdp, page_address(entry->page),
+ XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
+- length, false);
++ length - ETH_FCS_LEN, false);
+
+ consume = tsnep_xdp_run_prog(rx, prog, &xdp,
+ &xdp_status, tx_nq, tx);
+@@ -1517,7 +1525,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
+ prefetch(entry->xdp->data);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+- xsk_buff_set_size(entry->xdp, length);
++ xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
+ xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+
+ /* RX metadata with timestamps is in front of actual data,
+@@ -1711,6 +1719,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
+ allocated--;
+ }
+ }
++
++ /* set need wakeup flag immediately if ring is not filled completely,
++ * first polling would be too late as need wakeup signalisation would
++ * be delayed for an indefinite time
++ */
++ if (xsk_uses_need_wakeup(rx->xsk_pool)) {
++ int desc_available = tsnep_rx_desc_available(rx);
++
++ if (desc_available)
++ xsk_set_rx_need_wakeup(rx->xsk_pool);
++ else
++ xsk_clear_rx_need_wakeup(rx->xsk_pool);
++ }
+ }
+
+ static bool tsnep_pending(struct tsnep_queue *queue)
+@@ -1779,14 +1800,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+- sprintf(queue->name, "%s-txrx-%d", name,
+- queue->rx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
++ name, queue->rx->queue_index);
+ else if (queue->tx)
+- sprintf(queue->name, "%s-tx-%d", name,
+- queue->tx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
++ name, queue->tx->queue_index);
+ else
+- sprintf(queue->name, "%s-rx-%d", name,
+- queue->rx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
++ name, queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 9135b918dd4907..848e41a4b1dbb1 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -572,7 +572,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
+ (*processed)++;
+ return true;
+
+- drop:
++drop:
+ /* Clean rxdes0 (which resets own bit) */
+ rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
+@@ -656,6 +656,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
+ ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+ txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+
++ /* Ensure the descriptor config is visible before setting the tx
++ * pointer.
++ */
++ smp_wmb();
++
+ priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
+
+ return true;
+@@ -809,6 +814,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ dma_wmb();
+ first->txdes0 = cpu_to_le32(f_ctl_stat);
+
++ /* Ensure the descriptor config is visible before setting the tx
++ * pointer.
++ */
++ smp_wmb();
++
+ /* Update next TX pointer */
+ priv->tx_pointer = pointer;
+
+@@ -829,7 +839,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+
+ return NETDEV_TX_OK;
+
+- dma_err:
++dma_err:
+ if (net_ratelimit())
+ netdev_err(netdev, "map tx fragment failed\n");
+
+@@ -851,7 +861,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ * last fragment, so we know ftgmac100_free_tx_packet()
+ * hasn't freed the skb yet.
+ */
+- drop:
++drop:
+ /* Drop the packet */
+ dev_kfree_skb_any(skb);
+ netdev->stats.tx_dropped++;
+@@ -1344,7 +1354,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
+ ftgmac100_init_all(priv, true);
+
+ netdev_dbg(netdev, "Reset done !\n");
+- bail:
++bail:
+ if (priv->mii_bus)
+ mutex_unlock(&priv->mii_bus->mdio_lock);
+ if (netdev->phydev)
+@@ -1543,15 +1553,15 @@ static int ftgmac100_open(struct net_device *netdev)
+
+ return 0;
+
+- err_ncsi:
++err_ncsi:
+ napi_disable(&priv->napi);
+ netif_stop_queue(netdev);
+- err_alloc:
++err_alloc:
+ ftgmac100_free_buffers(priv);
+ free_irq(netdev->irq, netdev);
+- err_irq:
++err_irq:
+ netif_napi_del(&priv->napi);
+- err_hw:
++err_hw:
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ ftgmac100_free_rings(priv);
+ return err;
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
+index 63b3e02fab162e..4968f6f0bdbc25 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.h
++++ b/drivers/net/ethernet/faraday/ftgmac100.h
+@@ -84,7 +84,7 @@
+ FTGMAC100_INT_RPKT_BUF)
+
+ /* All the interrupts we care about */
+-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
++#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \
+ FTGMAC100_INT_BAD)
+
+ /*
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index dcbc598b11c6c8..e7bf70ac9a4ca5 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -931,14 +931,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
+ }
+ }
+
+-static void dpaa_fq_setup(struct dpaa_priv *priv,
+- const struct dpaa_fq_cbs *fq_cbs,
+- struct fman_port *tx_port)
++static int dpaa_fq_setup(struct dpaa_priv *priv,
++ const struct dpaa_fq_cbs *fq_cbs,
++ struct fman_port *tx_port)
+ {
+ int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
+ const cpumask_t *affine_cpus = qman_affine_cpus();
+- u16 channels[NR_CPUS];
+ struct dpaa_fq *fq;
++ u16 *channels;
++
++ channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
++ if (!channels)
++ return -ENOMEM;
+
+ for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
+ channels[num_portals++] = qman_affine_channel(cpu);
+@@ -997,6 +1001,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
+ break;
+ }
+ }
++
++ kfree(channels);
++
++ return 0;
+ }
+
+ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
+@@ -2277,12 +2285,12 @@ static netdev_tx_t
+ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+ {
+ const int queue_mapping = skb_get_queue_mapping(skb);
+- bool nonlinear = skb_is_nonlinear(skb);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct netdev_queue *txq;
+ struct dpaa_priv *priv;
+ struct qm_fd fd;
++ bool nonlinear;
+ int offset = 0;
+ int err = 0;
+
+@@ -2292,6 +2300,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+
+ qm_fd_clear_fd(&fd);
+
++ /* Packet data is always read as 32-bit words, so zero out any part of
++ * the skb which might be sent if we have to pad the packet
++ */
++ if (__skb_put_padto(skb, ETH_ZLEN, false))
++ goto enomem;
++
++ nonlinear = skb_is_nonlinear(skb);
+ if (!nonlinear) {
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+@@ -3416,7 +3431,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
+ */
+ dpaa_eth_add_channel(priv->channel, &pdev->dev);
+
+- dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
++ err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
++ if (err)
++ goto free_dpaa_bps;
+
+ /* Create a congestion group for this netdev, with
+ * dynamically-allocated CGR ID.
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+index 5bd0b36d1feb58..3f8cd4a7d84576 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+@@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ struct netlink_ext_ack *extack)
+ {
+ const cpumask_t *cpus = qman_affine_cpus();
+- bool needs_revert[NR_CPUS] = {false};
+ struct qman_portal *portal;
+ u32 period, prev_period;
+ u8 thresh, prev_thresh;
++ bool *needs_revert;
+ int cpu, res;
+
++ needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
++ if (!needs_revert)
++ return -ENOMEM;
++
+ period = c->rx_coalesce_usecs;
+ thresh = c->rx_max_coalesced_frames;
+
+@@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ needs_revert[cpu] = true;
+ }
+
++ kfree(needs_revert);
++
+ return 0;
+
+ revert_values:
+@@ -498,6 +504,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ qman_dqrr_set_ithresh(portal, prev_thresh);
+ }
+
++ kfree(needs_revert);
++
+ return res;
+ }
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index 15bab41cee48df..40e88182959519 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
+
+ memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
+
+- dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+-
+ return skb;
+ }
+
+@@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
++ bool recycle_rx_buf = false;
+ void *buf_data;
+ u32 xdp_act;
+
+@@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
++ } else {
++ recycle_rx_buf = true;
+ }
+ } else if (fd_format == dpaa2_fd_sg) {
+ WARN_ON(priv->xdp_prog);
+@@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ goto err_build_skb;
+
+ dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
++
++ if (recycle_rx_buf)
++ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+ return;
+
+ err_build_skb:
+@@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ dma_addr_t addr;
+
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
+-
+- /* If there's enough room to align the FD address, do it.
+- * It will help hardware optimize accesses.
+- */
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= skb->head)
+ buffer_start = aligned_start;
++ else
++ return -ENOMEM;
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+@@ -2894,11 +2896,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
+ static int update_xps(struct dpaa2_eth_priv *priv)
+ {
+ struct net_device *net_dev = priv->net_dev;
+- struct cpumask xps_mask;
+- struct dpaa2_eth_fq *fq;
+ int i, num_queues, netdev_queues;
++ struct dpaa2_eth_fq *fq;
++ cpumask_var_t xps_mask;
+ int err = 0;
+
++ if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
++ return -ENOMEM;
++
+ num_queues = dpaa2_eth_queue_count(priv);
+ netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
+
+@@ -2908,16 +2913,17 @@ static int update_xps(struct dpaa2_eth_priv *priv)
+ for (i = 0; i < netdev_queues; i++) {
+ fq = &priv->fq[i % num_queues];
+
+- cpumask_clear(&xps_mask);
+- cpumask_set_cpu(fq->target_cpu, &xps_mask);
++ cpumask_clear(xps_mask);
++ cpumask_set_cpu(fq->target_cpu, xps_mask);
+
+- err = netif_set_xps_queue(net_dev, &xps_mask, i);
++ err = netif_set_xps_queue(net_dev, xps_mask, i);
+ if (err) {
+ netdev_warn_once(net_dev, "Error setting XPS queue\n");
+ break;
+ }
+ }
+
++ free_cpumask_var(xps_mask);
+ return err;
+ }
+
+@@ -4967,6 +4973,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+ if (err)
+ goto err_dl_port_add;
+
++ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
++
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() failed\n");
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+index bfb6c96c3b2f08..834cba8c3a4163 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+@@ -740,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
+
+ static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
+ {
+- unsigned int headroom = DPAA2_ETH_SWA_SIZE;
++ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
+ /* If we don't have an skb (e.g. XDP buffer), we only need space for
+ * the software annotation area
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+index 4798fb7fe35d14..b6a534a3e0b123 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+@@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
+ err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ filter_block->acl_id, acl_entry_cfg);
+
+- dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
++ dma_unmap_single(dev, acl_entry_cfg->key_iova,
++ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+@@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
+ err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ block->acl_id, acl_entry_cfg);
+
+- dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+- DMA_TO_DEVICE);
++ dma_unmap_single(dev, acl_entry_cfg->key_iova,
++ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+ kfree(cmd_buff);
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 97d3151076d534..a05a8525caa459 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1998,9 +1998,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
+ return notifier_from_errno(err);
+ }
+
+-static struct notifier_block dpaa2_switch_port_switchdev_nb;
+-static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
+-
+ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+@@ -2043,9 +2040,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ goto err_egress_flood;
+
+ err = switchdev_bridge_port_offload(netdev, netdev, NULL,
+- &dpaa2_switch_port_switchdev_nb,
+- &dpaa2_switch_port_switchdev_blocking_nb,
+- false, extack);
++ NULL, NULL, false, extack);
+ if (err)
+ goto err_switchdev_offload;
+
+@@ -2079,9 +2074,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
+
+ static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
+ {
+- switchdev_bridge_port_unoffload(netdev, NULL,
+- &dpaa2_switch_port_switchdev_nb,
+- &dpaa2_switch_port_switchdev_blocking_nb);
++ switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
+ }
+
+ static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
+@@ -2610,13 +2603,14 @@ static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
+
+ static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
+ {
+- int *count, i;
++ int *count, ret, i;
+
+ for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
++ ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ count = &ethsw->buf_count;
+- *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
++ *count += ret;
+
+- if (unlikely(*count < BUFS_PER_CMD))
++ if (unlikely(ret < BUFS_PER_CMD))
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 35461165de0d2a..c17b9e3385168f 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -902,6 +902,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+
+ if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
+ __netif_subqueue_stopped(ndev, tx_ring->index) &&
++ !test_bit(ENETC_TX_DOWN, &priv->flags) &&
+ (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ netif_wake_subqueue(ndev, tx_ring->index);
+ }
+@@ -1380,6 +1381,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ int xdp_tx_bd_cnt, i, k;
+ int xdp_tx_frm_cnt = 0;
+
++ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
++ return -ENETDOWN;
++
+ enetc_lock_mdio();
+
+ tx_ring = priv->xdp_tx_ring[smp_processor_id()];
+@@ -1524,7 +1528,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+- rx_ring->stats.xdp_drops++;
+ }
+
+ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+@@ -1589,6 +1592,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ fallthrough;
+ case XDP_DROP:
+ enetc_xdp_drop(rx_ring, orig_i, i);
++ rx_ring->stats.xdp_drops++;
+ break;
+ case XDP_PASS:
+ rxbd = orig_rxbd;
+@@ -1605,6 +1609,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ break;
+ case XDP_TX:
+ tx_ring = priv->xdp_tx_ring[rx_ring->index];
++ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
++ enetc_xdp_drop(rx_ring, orig_i, i);
++ tx_ring->stats.xdp_tx_drops++;
++ break;
++ }
++
+ xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
+ rx_ring,
+ orig_i, i);
+@@ -2226,18 +2236,24 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+ }
+
+-static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
++static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
+ {
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+- for (i = 0; i < priv->num_tx_rings; i++)
+- enetc_enable_txbdr(hw, priv->tx_ring[i]);
+-
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_enable_rxbdr(hw, priv->rx_ring[i]);
+ }
+
++static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
++{
++ struct enetc_hw *hw = &priv->si->hw;
++ int i;
++
++ for (i = 0; i < priv->num_tx_rings; i++)
++ enetc_enable_txbdr(hw, priv->tx_ring[i]);
++}
++
+ static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+ {
+ int idx = rx_ring->index;
+@@ -2254,18 +2270,24 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+ }
+
+-static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
++static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
+ {
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+- for (i = 0; i < priv->num_tx_rings; i++)
+- enetc_disable_txbdr(hw, priv->tx_ring[i]);
+-
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_disable_rxbdr(hw, priv->rx_ring[i]);
+ }
+
++static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
++{
++ struct enetc_hw *hw = &priv->si->hw;
++ int i;
++
++ for (i = 0; i < priv->num_tx_rings; i++)
++ enetc_disable_txbdr(hw, priv->tx_ring[i]);
++}
++
+ static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+ {
+ int delay = 8, timeout = 100;
+@@ -2305,12 +2327,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
+
+ snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
+ priv->ndev->name, i);
+- err = request_irq(irq, enetc_msix, 0, v->name, v);
++ err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
+ if (err) {
+ dev_err(priv->dev, "request_irq() failed!\n");
+ goto irq_err;
+ }
+- disable_irq(irq);
+
+ v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
+ v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
+@@ -2464,9 +2485,13 @@ void enetc_start(struct net_device *ndev)
+ enable_irq(irq);
+ }
+
+- enetc_enable_bdrs(priv);
++ enetc_enable_tx_bdrs(priv);
++
++ enetc_enable_rx_bdrs(priv);
+
+ netif_tx_start_all_queues(ndev);
++
++ clear_bit(ENETC_TX_DOWN, &priv->flags);
+ }
+ EXPORT_SYMBOL_GPL(enetc_start);
+
+@@ -2524,9 +2549,15 @@ void enetc_stop(struct net_device *ndev)
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
++ set_bit(ENETC_TX_DOWN, &priv->flags);
++
+ netif_tx_stop_all_queues(ndev);
+
+- enetc_disable_bdrs(priv);
++ enetc_disable_rx_bdrs(priv);
++
++ enetc_wait_bdrs(priv);
++
++ enetc_disable_tx_bdrs(priv);
+
+ for (i = 0; i < priv->bdr_int_num; i++) {
+ int irq = pci_irq_vector(priv->si->pdev,
+@@ -2537,8 +2568,6 @@ void enetc_stop(struct net_device *ndev)
+ napi_disable(&priv->int_vector[i]->napi);
+ }
+
+- enetc_wait_bdrs(priv);
+-
+ enetc_clear_interrupts(priv);
+ }
+ EXPORT_SYMBOL_GPL(enetc_stop);
+@@ -2769,7 +2798,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
+ if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ priv->num_tx_rings) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+- "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
++ "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
+ num_xdp_tx_queues,
+ priv->min_num_stack_tx_queues,
+ priv->num_tx_rings);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
+index 7439739cd81a23..fcadb0848d2541 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
+@@ -328,6 +328,7 @@ enum enetc_active_offloads {
+
+ enum enetc_flags_bit {
+ ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
++ ENETC_TX_DOWN,
+ };
+
+ /* interrupt coalescing modes */
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index a8fbcada6b01ff..733af928caffc6 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -691,10 +691,19 @@ struct fec_enet_private {
+ /* XDP BPF Program */
+ struct bpf_prog *xdp_prog;
+
++ struct {
++ int pps_enable;
++ u64 ns_sys, ns_phc;
++ u32 at_corr;
++ u8 at_inc_corr;
++ } ptp_saved_state;
++
+ u64 ethtool_stats[];
+ };
+
+ void fec_ptp_init(struct platform_device *pdev, int irq_idx);
++void fec_ptp_restore_state(struct fec_enet_private *fep);
++void fec_ptp_save_state(struct fec_enet_private *fep);
+ void fec_ptp_stop(struct platform_device *pdev);
+ void fec_ptp_start_cyclecounter(struct net_device *ndev);
+ int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config,
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 77c8e9cfb44562..e8d9a0eba4d6b5 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -283,8 +283,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+ #define PKT_MINBUF_SIZE 64
+
+ /* FEC receive acceleration */
+-#define FEC_RACC_IPDIS (1 << 1)
+-#define FEC_RACC_PRODIS (1 << 2)
++#define FEC_RACC_IPDIS BIT(1)
++#define FEC_RACC_PRODIS BIT(2)
+ #define FEC_RACC_SHIFT16 BIT(7)
+ #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+
+@@ -316,8 +316,23 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+ #define FEC_MMFR_TA (2 << 16)
+ #define FEC_MMFR_DATA(v) (v & 0xffff)
+ /* FEC ECR bits definition */
+-#define FEC_ECR_MAGICEN (1 << 2)
+-#define FEC_ECR_SLEEP (1 << 3)
++#define FEC_ECR_RESET BIT(0)
++#define FEC_ECR_ETHEREN BIT(1)
++#define FEC_ECR_MAGICEN BIT(2)
++#define FEC_ECR_SLEEP BIT(3)
++#define FEC_ECR_EN1588 BIT(4)
++#define FEC_ECR_BYTESWP BIT(8)
++/* FEC RCR bits definition */
++#define FEC_RCR_LOOP BIT(0)
++#define FEC_RCR_HALFDPX BIT(1)
++#define FEC_RCR_MII BIT(2)
++#define FEC_RCR_PROMISC BIT(3)
++#define FEC_RCR_BC_REJ BIT(4)
++#define FEC_RCR_FLOWCTL BIT(5)
++#define FEC_RCR_RMII BIT(8)
++#define FEC_RCR_10BASET BIT(9)
++/* TX WMARK bits */
++#define FEC_TXWMRK_STRFWD BIT(8)
+
+ #define FEC_MII_TIMEOUT 30000 /* us */
+
+@@ -1041,7 +1056,10 @@ fec_restart(struct net_device *ndev)
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 temp_mac[2];
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
+- u32 ecntl = 0x2; /* ETHEREN */
++ u32 ecntl = FEC_ECR_ETHEREN;
++
++ if (fep->bufdesc_ex)
++ fec_ptp_save_state(fep);
+
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+@@ -1116,18 +1134,18 @@ fec_restart(struct net_device *ndev)
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rcntl |= (1 << 6);
+ else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+- rcntl |= (1 << 8);
++ rcntl |= FEC_RCR_RMII;
+ else
+- rcntl &= ~(1 << 8);
++ rcntl &= ~FEC_RCR_RMII;
+
+ /* 1G, 100M or 10M */
+ if (ndev->phydev) {
+ if (ndev->phydev->speed == SPEED_1000)
+ ecntl |= (1 << 5);
+ else if (ndev->phydev->speed == SPEED_100)
+- rcntl &= ~(1 << 9);
++ rcntl &= ~FEC_RCR_10BASET;
+ else
+- rcntl |= (1 << 9);
++ rcntl |= FEC_RCR_10BASET;
+ }
+ } else {
+ #ifdef FEC_MIIGSK_ENR
+@@ -1186,13 +1204,13 @@ fec_restart(struct net_device *ndev)
+
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ /* enable ENET endian swap */
+- ecntl |= (1 << 8);
++ ecntl |= FEC_ECR_BYTESWP;
+ /* enable ENET store and forward mode */
+- writel(1 << 8, fep->hwp + FEC_X_WMRK);
++ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
+ }
+
+ if (fep->bufdesc_ex)
+- ecntl |= (1 << 4);
++ ecntl |= FEC_ECR_EN1588;
+
+ if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
+ fep->rgmii_txc_dly)
+@@ -1210,8 +1228,10 @@ fec_restart(struct net_device *ndev)
+ writel(ecntl, fep->hwp + FEC_ECNTRL);
+ fec_enet_active_rxring(ndev);
+
+- if (fep->bufdesc_ex)
++ if (fep->bufdesc_ex) {
+ fec_ptp_start_cyclecounter(ndev);
++ fec_ptp_restore_state(fep);
++ }
+
+ /* Enable interrupts we wish to service */
+ if (fep->link)
+@@ -1291,7 +1311,7 @@ static void
+ fec_stop(struct net_device *ndev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+- u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
++ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
+ u32 val;
+
+ /* We cannot expect a graceful transmit stop without link !!! */
+@@ -1302,6 +1322,9 @@ fec_stop(struct net_device *ndev)
+ netdev_err(ndev, "Graceful transmit stop did not complete!\n");
+ }
+
++ if (fep->bufdesc_ex)
++ fec_ptp_save_state(fep);
++
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+@@ -1310,7 +1333,7 @@ fec_stop(struct net_device *ndev)
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+- writel(1, fep->hwp + FEC_ECNTRL);
++ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+@@ -1324,11 +1347,19 @@ fec_stop(struct net_device *ndev)
+ /* We have to keep ENET enabled to have MII interrupt stay working */
+ if (fep->quirks & FEC_QUIRK_ENET_MAC &&
+ !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+- writel(2, fep->hwp + FEC_ECNTRL);
++ writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
+ writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+ }
+-}
+
++ if (fep->bufdesc_ex) {
++ val = readl(fep->hwp + FEC_ECNTRL);
++ val |= FEC_ECR_EN1588;
++ writel(val, fep->hwp + FEC_ECNTRL);
++
++ fec_ptp_start_cyclecounter(ndev);
++ fec_ptp_restore_state(fep);
++ }
++}
+
+ static void
+ fec_timeout(struct net_device *ndev, unsigned int txqueue)
+@@ -2011,6 +2042,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
+
+ /* if any of the above changed restart the FEC */
+ if (status_change) {
++ netif_stop_queue(ndev);
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_restart(ndev);
+@@ -2020,6 +2052,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
+ }
+ } else {
+ if (fep->link) {
++ netif_stop_queue(ndev);
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_stop(ndev);
+@@ -2379,8 +2412,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
+ fep->link = 0;
+ fep->full_duplex = 0;
+
+- phy_dev->mac_managed_pm = true;
+-
+ phy_attached_info(phy_dev);
+
+ return 0;
+@@ -2392,10 +2423,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ bool suppress_preamble = false;
++ struct phy_device *phydev;
+ struct device_node *node;
+ int err = -ENXIO;
+ u32 mii_speed, holdtime;
+ u32 bus_freq;
++ int addr;
+
+ /*
+ * The i.MX28 dual fec interfaces are not equal.
+@@ -2509,6 +2542,13 @@ static int fec_enet_mii_init(struct platform_device *pdev)
+ goto err_out_free_mdiobus;
+ of_node_put(node);
+
++ /* find all the PHY devices on the bus and set mac_managed_pm to true */
++ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
++ phydev = mdiobus_get_phy(fep->mii_bus, addr);
++ if (phydev)
++ phydev->mac_managed_pm = true;
++ }
++
+ mii_cnt++;
+
+ /* save fec0 mii_bus */
+@@ -3648,29 +3688,6 @@ fec_set_mac_address(struct net_device *ndev, void *p)
+ return 0;
+ }
+
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-/**
+- * fec_poll_controller - FEC Poll controller function
+- * @dev: The FEC network adapter
+- *
+- * Polled functionality used by netconsole and others in non interrupt mode
+- *
+- */
+-static void fec_poll_controller(struct net_device *dev)
+-{
+- int i;
+- struct fec_enet_private *fep = netdev_priv(dev);
+-
+- for (i = 0; i < FEC_IRQ_NUM; i++) {
+- if (fep->irq[i] > 0) {
+- disable_irq(fep->irq[i]);
+- fec_enet_interrupt(fep->irq[i], dev);
+- enable_irq(fep->irq[i]);
+- }
+- }
+-}
+-#endif
+-
+ static inline void fec_enet_set_netdev_features(struct net_device *netdev,
+ netdev_features_t features)
+ {
+@@ -3710,31 +3727,26 @@ static int fec_set_features(struct net_device *netdev,
+ return 0;
+ }
+
+-static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
+-{
+- struct vlan_ethhdr *vhdr;
+- unsigned short vlan_TCI = 0;
+-
+- if (skb->protocol == htons(ETH_P_ALL)) {
+- vhdr = (struct vlan_ethhdr *)(skb->data);
+- vlan_TCI = ntohs(vhdr->h_vlan_TCI);
+- }
+-
+- return vlan_TCI;
+-}
+-
+ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+- u16 vlan_tag;
++ u16 vlan_tag = 0;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ return netdev_pick_tx(ndev, skb, NULL);
+
+- vlan_tag = fec_enet_get_raw_vlan_tci(skb);
+- if (!vlan_tag)
++ /* VLAN is present in the payload.*/
++ if (eth_type_vlan(skb->protocol)) {
++ struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
++
++ vlan_tag = ntohs(vhdr->h_vlan_TCI);
++ /* VLAN is present in the skb but not yet pushed in the payload.*/
++ } else if (skb_vlan_tag_present(skb)) {
++ vlan_tag = skb->vlan_tci;
++ } else {
+ return vlan_tag;
++ }
+
+ return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
+ }
+@@ -3982,9 +3994,6 @@ static const struct net_device_ops fec_netdev_ops = {
+ .ndo_tx_timeout = fec_timeout,
+ .ndo_set_mac_address = fec_set_mac_address,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+- .ndo_poll_controller = fec_poll_controller,
+-#endif
+ .ndo_set_features = fec_set_features,
+ .ndo_bpf = fec_enet_bpf,
+ .ndo_xdp_xmit = fec_enet_xdp_xmit,
+@@ -4135,6 +4144,14 @@ static int fec_enet_init(struct net_device *ndev)
+ return ret;
+ }
+
++static void fec_enet_deinit(struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++
++ netif_napi_del(&fep->napi);
++ fec_enet_free_queue(ndev);
++}
++
+ #ifdef CONFIG_OF
+ static int fec_reset_phy(struct platform_device *pdev)
+ {
+@@ -4531,6 +4548,7 @@ fec_probe(struct platform_device *pdev)
+ fec_enet_mii_remove(fep);
+ failed_mii_init:
+ failed_irq:
++ fec_enet_deinit(ndev);
+ failed_init:
+ fec_ptp_stop(pdev);
+ failed_reset:
+@@ -4594,6 +4612,7 @@ fec_drv_remove(struct platform_device *pdev)
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
++ fec_enet_deinit(ndev);
+ free_netdev(ndev);
+ }
+
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index 181d9bfbee220f..a4eb6edb850add 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -90,6 +90,30 @@
+ #define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
+ #define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
+
++/**
++ * fec_ptp_read - read raw cycle counter (to be used by time counter)
++ * @cc: the cyclecounter structure
++ *
++ * this function reads the cyclecounter registers and is called by the
++ * cyclecounter structure used to construct a ns counter from the
++ * arbitrary fixed point registers
++ */
++static u64 fec_ptp_read(const struct cyclecounter *cc)
++{
++ struct fec_enet_private *fep =
++ container_of(cc, struct fec_enet_private, cc);
++ u32 tempval;
++
++ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
++ tempval |= FEC_T_CTRL_CAPTURE;
++ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
++
++ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
++ udelay(1);
++
++ return readl(fep->hwp + FEC_ATIME);
++}
++
+ /**
+ * fec_ptp_enable_pps
+ * @fep: the fec_enet_private structure handle
+@@ -104,14 +128,13 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+ struct timespec64 ts;
+ u64 ns;
+
+- if (fep->pps_enable == enable)
+- return 0;
+-
+- fep->pps_channel = DEFAULT_PPS_CHANNEL;
+- fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+-
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
++ if (fep->pps_enable == enable) {
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++ return 0;
++ }
++
+ if (enable) {
+ /* clear capture or output compare interrupt status if have.
+ */
+@@ -137,7 +160,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+ * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
+ * to current timer would be next second.
+ */
+- tempval = fep->cc.read(&fep->cc);
++ tempval = fec_ptp_read(&fep->cc);
+ /* Convert the ptp local counter to 1588 timestamp */
+ ns = timecounter_cyc2time(&fep->tc, tempval);
+ ts = ns_to_timespec64(ns);
+@@ -212,13 +235,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
+ timecounter_read(&fep->tc);
+
+ /* Get the current ptp hardware time counter */
+- temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
+- temp_val |= FEC_T_CTRL_CAPTURE;
+- writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
+- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+- udelay(1);
+-
+- ptp_hc = readl(fep->hwp + FEC_ATIME);
++ ptp_hc = fec_ptp_read(&fep->cc);
+
+ /* Convert the ptp local counter to 1588 timestamp */
+ curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
+@@ -272,30 +289,6 @@ static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
+ return HRTIMER_NORESTART;
+ }
+
+-/**
+- * fec_ptp_read - read raw cycle counter (to be used by time counter)
+- * @cc: the cyclecounter structure
+- *
+- * this function reads the cyclecounter registers and is called by the
+- * cyclecounter structure used to construct a ns counter from the
+- * arbitrary fixed point registers
+- */
+-static u64 fec_ptp_read(const struct cyclecounter *cc)
+-{
+- struct fec_enet_private *fep =
+- container_of(cc, struct fec_enet_private, cc);
+- u32 tempval;
+-
+- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+- tempval |= FEC_T_CTRL_CAPTURE;
+- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+-
+- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+- udelay(1);
+-
+- return readl(fep->hwp + FEC_ATIME);
+-}
+-
+ /**
+ * fec_ptp_start_cyclecounter - create the cycle counter from hw
+ * @ndev: network device
+@@ -532,6 +525,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ int ret = 0;
+
+ if (rq->type == PTP_CLK_REQ_PPS) {
++ fep->pps_channel = DEFAULT_PPS_CHANNEL;
++ fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
++
+ ret = fec_ptp_enable_pps(fep, on);
+
+ return ret;
+@@ -768,11 +764,64 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
+ schedule_delayed_work(&fep->time_keep, HZ);
+ }
+
++void fec_ptp_save_state(struct fec_enet_private *fep)
++{
++ unsigned long flags;
++ u32 atime_inc_corr;
++
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
++
++ fep->ptp_saved_state.pps_enable = fep->pps_enable;
++
++ fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc);
++ fep->ptp_saved_state.ns_sys = ktime_get_ns();
++
++ fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
++ atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
++ fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
++
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++}
++
++/* Restore PTP functionality after a reset */
++void fec_ptp_restore_state(struct fec_enet_private *fep)
++{
++ u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
++ unsigned long flags;
++ u32 counter;
++ u64 ns;
++
++ spin_lock_irqsave(&fep->tmreg_lock, flags);
++
++ /* Reset turned it off, so adjust our status flag */
++ fep->pps_enable = 0;
++
++ writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
++ atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
++ writel(atime_inc, fep->hwp + FEC_ATIME_INC);
++
++ ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc;
++ counter = ns & fep->cc.mask;
++ writel(counter, fep->hwp + FEC_ATIME);
++ timecounter_init(&fep->tc, &fep->cc, ns);
++
++ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++
++ /* Restart PPS if needed */
++ if (fep->ptp_saved_state.pps_enable) {
++ /* Re-enable PPS */
++ fec_ptp_enable_pps(fep, 1);
++ }
++}
++
+ void fec_ptp_stop(struct platform_device *pdev)
+ {
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
++ if (fep->pps_enable)
++ fec_ptp_enable_pps(fep, 0);
++
+ cancel_delayed_work_sync(&fep->time_keep);
+ hrtimer_cancel(&fep->perout_timer);
+ if (fep->ptp_clock)
+diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
+index 3b75cc543be93f..76e5181789cb90 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
++++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
+@@ -1074,6 +1074,14 @@ int memac_initialization(struct mac_device *mac_dev,
+ unsigned long capabilities;
+ unsigned long *supported;
+
++ /* The internal connection to the serdes is XGMII, but this isn't
++ * really correct for the phy mode (which is the external connection).
++ * However, this is how all older device trees say that they want
++ * 10GBASE-R (aka XFI), so just convert it for them.
++ */
++ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
++ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
++
+ mac_dev->phylink_ops = &memac_mac_ops;
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+@@ -1140,7 +1148,7 @@ int memac_initialization(struct mac_device *mac_dev,
+ * (and therefore that xfi_pcs cannot be set). If we are defaulting to
+ * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
+ */
+- if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
++ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER)
+ memac->xfi_pcs = pcs;
+ else
+ memac->sgmii_pcs = pcs;
+@@ -1154,14 +1162,6 @@ int memac_initialization(struct mac_device *mac_dev,
+ goto _return_fm_mac_free;
+ }
+
+- /* The internal connection to the serdes is XGMII, but this isn't
+- * really correct for the phy mode (which is the external connection).
+- * However, this is how all older device trees say that they want
+- * 10GBASE-R (aka XFI), so just convert it for them.
+- */
+- if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+- mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+-
+ /* TODO: The following interface modes are supported by (some) hardware
+ * but not by this driver:
+ * - 1000BASE-KX
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 5704b5f57cd0da..5703240474e5b2 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -190,7 +190,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
+ rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+ priv->rx_cfg.num_queues;
+ priv->stats_report_len = struct_size(priv->stats_report, stats,
+- tx_stats_num + rx_stats_num);
++ size_add(tx_stats_num, rx_stats_num));
+ priv->stats_report =
+ dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
+ &priv->stats_report_bus, GFP_KERNEL);
+@@ -254,10 +254,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll(block, budget);
+- else
++ else if (budget)
+ reschedule |= gve_xdp_poll(block, budget);
+ }
+
++ if (!budget)
++ return 0;
++
+ if (block->rx) {
+ work_done = gve_rx_poll(block, budget);
+ reschedule |= work_done == budget;
+@@ -298,6 +301,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+ if (block->tx)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+
++ if (!budget)
++ return 0;
++
+ if (block->rx) {
+ work_done = gve_rx_poll_dqo(block, budget);
+ reschedule |= work_done == budget;
+diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
+index e84a066aa1a40a..93ff7c8ec90512 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx.c
++++ b/drivers/net/ethernet/google/gve/gve_rx.c
+@@ -362,7 +362,7 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
+
+ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info,
+- u16 packet_buffer_size, u16 len,
++ unsigned int truesize, u16 len,
+ struct gve_rx_ctx *ctx)
+ {
+ u32 offset = page_info->page_offset + page_info->pad;
+@@ -395,10 +395,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
+ if (skb != ctx->skb_head) {
+ ctx->skb_head->len += len;
+ ctx->skb_head->data_len += len;
+- ctx->skb_head->truesize += packet_buffer_size;
++ ctx->skb_head->truesize += truesize;
+ }
+ skb_add_rx_frag(skb, num_frags, page_info->page,
+- offset, len, packet_buffer_size);
++ offset, len, truesize);
+
+ return ctx->skb_head;
+ }
+@@ -492,7 +492,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
+
+ memcpy(alloc_page_info.page_address, src, page_info->pad + len);
+ skb = gve_rx_add_frags(napi, &alloc_page_info,
+- rx->packet_buffer_size,
++ PAGE_SIZE,
+ len, ctx);
+
+ u64_stats_update_begin(&rx->statss);
+@@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget)
+
+ feat = block->napi.dev->features;
+
+- /* If budget is 0, do all the work */
+- if (budget == 0)
+- budget = INT_MAX;
+-
+ if (budget > 0)
+ work_done = gve_clean_rx_done(rx, budget, feat);
+
+diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+index f281e42a7ef968..3d60ea25711fc8 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+@@ -506,11 +506,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
+ skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
+ }
+
+-static void gve_rx_free_skb(struct gve_rx_ring *rx)
++static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
+ {
+ if (!rx->ctx.skb_head)
+ return;
+
++ if (rx->ctx.skb_head == napi->skb)
++ napi->skb = NULL;
+ dev_kfree_skb_any(rx->ctx.skb_head);
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
+@@ -783,7 +785,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+
+ err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+ if (err < 0) {
+- gve_rx_free_skb(rx);
++ gve_rx_free_skb(napi, rx);
+ u64_stats_update_begin(&rx->statss);
+ if (err == -ENOMEM)
+ rx->rx_skb_alloc_fail++;
+@@ -826,7 +828,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+
+ /* gve_rx_complete_skb() will consume skb if successful */
+ if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
+- gve_rx_free_skb(rx);
++ gve_rx_free_skb(napi, rx);
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_desc_err_dropped_pkt++;
+ u64_stats_update_end(&rx->statss);
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 6957a865cff37c..2ae891a62875c7 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -158,15 +158,16 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
+ u32 to_do)
+ {
+ struct gve_tx_buffer_state *info;
+- u32 clean_end = tx->done + to_do;
+ u64 pkts = 0, bytes = 0;
+ size_t space_freed = 0;
+ u32 xsk_complete = 0;
+ u32 idx;
++ int i;
+
+- for (; tx->done < clean_end; tx->done++) {
++ for (i = 0; i < to_do; i++) {
+ idx = tx->done & tx->mask;
+ info = &tx->info[idx];
++ tx->done++;
+
+ if (unlikely(!info->xdp.size))
+ continue;
+@@ -925,10 +926,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+ bool repoll;
+ u32 to_do;
+
+- /* If budget is 0, do all the work */
+- if (budget == 0)
+- budget = INT_MAX;
+-
+ /* Find out how much work there is to be done */
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+index 1e19b834a6130e..89b62b8d16e14b 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+@@ -501,28 +501,18 @@ static int gve_prep_tso(struct sk_buff *skb)
+ if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
+ return -1;
+
++ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
++ return -EINVAL;
++
+ /* Needed because we will modify header. */
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ tcp = tcp_hdr(skb);
+-
+- /* Remove payload length from checksum. */
+ paylen = skb->len - skb_transport_offset(skb);
+-
+- switch (skb_shinfo(skb)->gso_type) {
+- case SKB_GSO_TCPV4:
+- case SKB_GSO_TCPV6:
+- csum_replace_by_diff(&tcp->check,
+- (__force __wsum)htonl(paylen));
+-
+- /* Compute length of segmentation header. */
+- header_len = skb_tcp_all_headers(skb);
+- break;
+- default:
+- return -EINVAL;
+- }
++ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
++ header_len = skb_tcp_all_headers(skb);
+
+ if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
+ return -EINVAL;
+@@ -822,22 +812,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
+ const int header_len = skb_tcp_all_headers(skb);
+ const int gso_size = shinfo->gso_size;
+ int cur_seg_num_bufs;
++ int prev_frag_size;
+ int cur_seg_size;
+ int i;
+
+ cur_seg_size = skb_headlen(skb) - header_len;
++ prev_frag_size = skb_headlen(skb);
+ cur_seg_num_bufs = cur_seg_size > 0;
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ if (cur_seg_size >= gso_size) {
+ cur_seg_size %= gso_size;
+ cur_seg_num_bufs = cur_seg_size > 0;
++
++ if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
++ int prev_frag_remain = prev_frag_size %
++ GVE_TX_MAX_BUF_SIZE_DQO;
++
++ /* If the last descriptor of the previous frag
++ * is less than cur_seg_size, the segment will
++ * span two descriptors in the previous frag.
++ * Since max gso size (9728) is less than
++ * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
++ * for the segment to span more than two
++ * descriptors.
++ */
++ if (prev_frag_remain &&
++ cur_seg_size > prev_frag_remain)
++ cur_seg_num_bufs++;
++ }
+ }
+
+ if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
+ return false;
+
+- cur_seg_size += skb_frag_size(&shinfo->frags[i]);
++ prev_frag_size = skb_frag_size(&shinfo->frags[i]);
++ cur_seg_size += prev_frag_size;
+ }
+
+ return true;
+diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
+index ecf92a5d56bbf2..4b893d162e85d0 100644
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -947,6 +947,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
+ priv->tx_coalesce_timer.function = tx_done;
+
+ priv->map = syscon_node_to_regmap(arg.np);
++ of_node_put(arg.np);
+ if (IS_ERR(priv->map)) {
+ dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
+ ret = PTR_ERR(priv->map);
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+index 928d934cb21a5a..616a2768e5048a 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+@@ -66,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+ }
+ }
+
++static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv)
++{
++#define HNS_MAC_LINK_WAIT_TIME 5
++#define HNS_MAC_LINK_WAIT_CNT 40
++
++ u32 link_status = 0;
++ int i;
++
++ if (!mac_ctrl_drv->get_link_status)
++ return link_status;
++
++ for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) {
++ msleep(HNS_MAC_LINK_WAIT_TIME);
++ mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status);
++ if (!link_status)
++ break;
++ }
++
++ return link_status;
++}
++
+ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+ {
+ struct mac_driver *mac_ctrl_drv;
+@@ -83,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+ &sfp_prsnt);
+ if (!ret)
+ *link_status = *link_status && sfp_prsnt;
++
++ /* for FIBER port, it may have a fake link up.
++ * when the link status changes from down to up, we need to do
++ * anti-shake. the anti-shake time is base on tests.
++ * only FIBER port need to do this.
++ */
++ if (*link_status && !mac_cb->link)
++ *link_status = hns_mac_link_anti_shake(mac_ctrl_drv);
+ }
+
+ mac_cb->link = *link_status;
+@@ -904,6 +933,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ syscon = syscon_node_to_regmap(cpld_args.np);
++ of_node_put(cpld_args.np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ mac_cb->cpld_ctrl = NULL;
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index 7cf10d1e2b3117..85722afe21770e 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -142,7 +142,8 @@ MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
+
+ static void fill_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+- int buf_num, enum hns_desc_type type, int mtu)
++ int buf_num, enum hns_desc_type type, int mtu,
++ bool is_gso)
+ {
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+@@ -275,6 +276,15 @@ static int hns_nic_maybe_stop_tso(
+ return 0;
+ }
+
++static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum,
++ struct hnae_ring *ring)
++{
++ if (skb_is_gso(*out_skb))
++ return hns_nic_maybe_stop_tso(out_skb, bnum, ring);
++ else
++ return hns_nic_maybe_stop_tx(out_skb, bnum, ring);
++}
++
+ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+@@ -300,6 +310,19 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ mtu);
+ }
+
++static void fill_desc_v2(struct hnae_ring *ring, void *priv,
++ int size, dma_addr_t dma, int frag_end,
++ int buf_num, enum hns_desc_type type, int mtu,
++ bool is_gso)
++{
++ if (is_gso)
++ fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type,
++ mtu);
++ else
++ fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type,
++ mtu);
++}
++
+ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
+@@ -313,6 +336,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ int seg_num;
+ dma_addr_t dma;
+ int size, next_to_use;
++ bool is_gso;
+ int i;
+
+ switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
+@@ -339,8 +363,9 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ ring->stats.sw_err_cnt++;
+ goto out_err_tx_ok;
+ }
++ is_gso = skb_is_gso(skb);
+ priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
+- buf_num, DESC_TYPE_SKB, ndev->mtu);
++ buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso);
+
+ /* fill the fragments */
+ for (i = 1; i < seg_num; i++) {
+@@ -354,7 +379,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ }
+ priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
+ seg_num - 1 == i ? 1 : 0, buf_num,
+- DESC_TYPE_PAGE, ndev->mtu);
++ DESC_TYPE_PAGE, ndev->mtu, is_gso);
+ }
+
+ /*complete translate all packets*/
+@@ -1776,15 +1801,6 @@ static int hns_nic_set_features(struct net_device *netdev,
+ netdev_info(netdev, "enet v1 do not support tso!\n");
+ break;
+ default:
+- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+- priv->ops.fill_desc = fill_tso_desc;
+- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+- /* The chip only support 7*4096 */
+- netif_set_tso_max_size(netdev, 7 * 4096);
+- } else {
+- priv->ops.fill_desc = fill_v2_desc;
+- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+- }
+ break;
+ }
+ netdev->features = features;
+@@ -2159,16 +2175,9 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ } else {
+ priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
+- if ((netdev->features & NETIF_F_TSO) ||
+- (netdev->features & NETIF_F_TSO6)) {
+- priv->ops.fill_desc = fill_tso_desc;
+- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+- /* This chip only support 7*4096 */
+- netif_set_tso_max_size(netdev, 7 * 4096);
+- } else {
+- priv->ops.fill_desc = fill_v2_desc;
+- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+- }
++ priv->ops.fill_desc = fill_desc_v2;
++ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2;
++ netif_set_tso_max_size(netdev, 7 * 4096);
+ /* enable tso when init
+ * control tso on/off through TSE bit in bd
+ */
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+index ffa9d6573f54bc..3f3ee032f631c4 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+@@ -44,7 +44,8 @@ struct hns_nic_ring_data {
+ struct hns_nic_ops {
+ void (*fill_desc)(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+- int buf_num, enum hns_desc_type type, int mtu);
++ int buf_num, enum hns_desc_type type, int mtu,
++ bool is_gso);
+ int (*maybe_stop_tx)(struct sk_buff **out_skb,
+ int *bnum, struct hnae_ring *ring);
+ void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index aaf1f42624a79b..57787c380fa07f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -890,7 +890,7 @@ struct hnae3_handle {
+ struct hnae3_roce_private_info rinfo;
+ };
+
+- u32 numa_node_mask; /* for multi-chip support */
++ nodemask_t numa_node_mask; /* for multi-chip support */
+
+ enum hnae3_port_base_vlan_state port_base_vlan_state;
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+index f3c9395d8351cb..618f66d9586b39 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+@@ -85,7 +85,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
+ true);
+
+- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
++ desc.data[0] = cpu_to_le32(tqp->index);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+index 3b6dbf158b98db..f72dc0cee30e55 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+@@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+- if (h->kinfo.dcb_ops->ieee_setapp)
++ if (h->kinfo.dcb_ops->ieee_delapp)
+ return h->kinfo.dcb_ops->ieee_delapp(h, app);
+
+ return -EOPNOTSUPP;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index b8508533878bea..4f385a18d288e4 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -500,11 +500,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ }
+
+ sprintf(result[j++], "%d", i);
+- sprintf(result[j++], "%s", dim_state_str[dim->state]);
++ sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
++ dim_state_str[dim->state] : "unknown");
+ sprintf(result[j++], "%u", dim->profile_ix);
+- sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
++ sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
++ dim_cqe_mode_str[dim->mode] : "unknown");
+ sprintf(result[j++], "%s",
+- dim_tune_stat_str[dim->tune_state]);
++ dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
++ dim_tune_stat_str[dim->tune_state] : "unknown");
+ sprintf(result[j++], "%u", dim->steps_left);
+ sprintf(result[j++], "%u", dim->steps_right);
+ sprintf(result[j++], "%u", dim->tired);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index cf50368441b783..14d086b535a2dc 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3539,6 +3539,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
+ ret = hns3_alloc_and_attach_buffer(ring, i);
+ if (ret)
+ goto out_buffer_fail;
++
++ if (!(i % HNS3_RESCHED_BD_NUM))
++ cond_resched();
+ }
+
+ return 0;
+@@ -5112,6 +5115,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
+ }
+
+ u64_stats_init(&priv->ring[i].syncp);
++ cond_resched();
+ }
+
+ return 0;
+@@ -5140,7 +5144,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hnae3_handle *h = priv->ae_handle;
+- u8 mac_addr_temp[ETH_ALEN];
++ u8 mac_addr_temp[ETH_ALEN] = {0};
+ int ret = 0;
+
+ if (h->ae_algo->ops->get_mac_addr)
+@@ -5725,6 +5729,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
++ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
++ hns3_nic_net_stop(netdev);
++
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+index acd756b0c7c9a4..d36c4ed16d8dd2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -214,6 +214,8 @@ enum hns3_nic_state {
+ #define HNS3_CQ_MODE_EQE 1U
+ #define HNS3_CQ_MODE_CQE 0U
+
++#define HNS3_RESCHED_BD_NUM 1024
++
+ enum hns3_pkt_l2t_type {
+ HNS3_L2_TYPE_UNICAST,
+ HNS3_L2_TYPE_MULTICAST,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index 682239f33082b0..78181eea93c1c1 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -78,6 +78,9 @@ static const struct hns3_stats hns3_rxq_stats[] = {
+ #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
+ #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
+ #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
++#define HNS3_NIC_LB_TEST_UNEXECUTED 4
++
++static int hns3_get_sset_count(struct net_device *netdev, int stringset);
+
+ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
+ {
+@@ -418,18 +421,26 @@ static void hns3_do_external_lb(struct net_device *ndev,
+ static void hns3_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+ {
++ int cnt = hns3_get_sset_count(ndev, ETH_SS_TEST);
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNAE3_LOOP_NONE][2];
+ bool if_running = netif_running(ndev);
++ int i;
++
++ /* initialize the loopback test result, avoid marking an unexcuted
++ * loopback test as PASS.
++ */
++ for (i = 0; i < cnt; i++)
++ data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
+
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+- return;
++ goto failure;
+ }
+
+ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
+- return;
++ goto failure;
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+@@ -451,6 +462,10 @@ static void hns3_self_test(struct net_device *ndev,
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
++ return;
++
++failure:
++ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ static void hns3_update_limit_promisc_mode(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index c42574e297476b..9650ce594e2fdd 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
+ static void hclge_update_fec_stats(struct hclge_dev *hdev);
+ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt);
++static int hclge_update_port_info(struct hclge_dev *hdev);
+
+ static struct hnae3_ae_algo ae_algo;
+
+@@ -1525,6 +1526,9 @@ static int hclge_configure(struct hclge_dev *hdev)
+ cfg.default_speed, ret);
+ return ret;
+ }
++ hdev->hw.mac.req_speed = hdev->hw.mac.speed;
++ hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
++ hdev->hw.mac.req_duplex = DUPLEX_FULL;
+
+ hclge_parse_link_mode(hdev, cfg.speed_ability);
+
+@@ -1754,7 +1758,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
+
+ nic->pdev = hdev->pdev;
+ nic->ae_algo = &ae_algo;
+- nic->numa_node_mask = hdev->numa_node_mask;
++ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
++ MAX_NUMNODES);
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
+
+ ret = hclge_knic_setup(vport, num_tqps,
+@@ -2446,7 +2451,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+- roce->numa_node_mask = nic->numa_node_mask;
++ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
++ MAX_NUMNODES);
+
+ return 0;
+ }
+@@ -2592,8 +2598,17 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
+ {
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
++ int ret;
++
++ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+
+- return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
++ if (ret)
++ return ret;
++
++ hdev->hw.mac.req_speed = speed;
++ hdev->hw.mac.req_duplex = duplex;
++
++ return 0;
+ }
+
+ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
+@@ -2891,11 +2906,9 @@ static int hclge_mac_init(struct hclge_dev *hdev)
+ int ret;
+
+ hdev->support_sfp_query = true;
+- hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+- ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
+- hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
+- if (ret)
+- return ret;
++
++ if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
++ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
+ if (hdev->hw.mac.support_autoneg) {
+ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
+@@ -2903,6 +2916,14 @@ static int hclge_mac_init(struct hclge_dev *hdev)
+ return ret;
+ }
+
++ if (!hdev->hw.mac.autoneg) {
++ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
++ hdev->hw.mac.req_duplex,
++ hdev->hw.mac.lane_num);
++ if (ret)
++ return ret;
++ }
++
+ mac->link = 0;
+
+ if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
+@@ -3022,9 +3043,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
+
+ static void hclge_update_link_status(struct hclge_dev *hdev)
+ {
+- struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+- struct hnae3_client *rclient = hdev->roce_client;
+ struct hnae3_client *client = hdev->nic_client;
+ int state;
+ int ret;
+@@ -3043,10 +3062,20 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
+
+ if (state != hdev->hw.mac.link) {
+ hdev->hw.mac.link = state;
++ if (state == HCLGE_LINK_STATUS_UP)
++ hclge_update_port_info(hdev);
++
+ client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
+- if (rclient && rclient->ops->link_status_change)
+- rclient->ops->link_status_change(rhandle, state);
++
++ if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
++ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
++ struct hnae3_client *rclient = hdev->roce_client;
++
++ if (rclient && rclient->ops->link_status_change)
++ rclient->ops->link_status_change(rhandle,
++ state);
++ }
+
+ hclge_push_link_status(hdev);
+ }
+@@ -3324,9 +3353,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
+ return ret;
+ }
+
+- hdev->hw.mac.autoneg = cmd->base.autoneg;
+- hdev->hw.mac.speed = cmd->base.speed;
+- hdev->hw.mac.duplex = cmd->base.duplex;
++ hdev->hw.mac.req_autoneg = cmd->base.autoneg;
++ hdev->hw.mac.req_speed = cmd->base.speed;
++ hdev->hw.mac.req_duplex = cmd->base.duplex;
+ linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
+
+ return 0;
+@@ -3359,9 +3388,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return 0;
+
+- cmd.base.autoneg = hdev->hw.mac.autoneg;
+- cmd.base.speed = hdev->hw.mac.speed;
+- cmd.base.duplex = hdev->hw.mac.duplex;
++ cmd.base.autoneg = hdev->hw.mac.req_autoneg;
++ cmd.base.speed = hdev->hw.mac.req_speed;
++ cmd.base.duplex = hdev->hw.mac.req_duplex;
+ linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
+
+ return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
+@@ -7933,8 +7962,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
+ /* Set the DOWN flag here to disable link updating */
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+- /* flush memory to make sure DOWN is seen by service task */
+- smp_mb__before_atomic();
++ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
+ hclge_flush_link_update(hdev);
+ }
+ }
+@@ -9887,6 +9915,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
+ {
+ struct hclge_vport *vport;
++ bool enable = true;
+ int ret;
+ int i;
+
+@@ -9906,8 +9935,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
+ vport->cur_vlan_fltr_en = true;
+ }
+
++ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
++ !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
++ enable = false;
++
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+- HCLGE_FILTER_FE_INGRESS, true, 0);
++ HCLGE_FILTER_FE_INGRESS, enable, 0);
+ }
+
+ static int hclge_init_vlan_type(struct hclge_dev *hdev)
+@@ -10026,8 +10059,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+- mutex_lock(&hdev->vport_lock);
+-
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ if (is_write_tbl && vlan->hd_tbl_status)
+@@ -10042,8 +10073,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ break;
+ }
+ }
+-
+- mutex_unlock(&hdev->vport_lock);
+ }
+
+ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
+@@ -10452,11 +10481,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ * handle mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
++ mutex_lock(&hdev->vport_lock);
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
++ mutex_unlock(&hdev->vport_lock);
+ return -EBUSY;
++ } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
++ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ }
++ mutex_unlock(&hdev->vport_lock);
+
+ /* when port base vlan enabled, we use port base vlan as the vlan
+ * filter entry. In this case, we don't update vlan filter table
+@@ -10471,17 +10505,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ }
+
+ if (!ret) {
+- if (!is_kill)
++ if (!is_kill) {
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+- else if (is_kill && vlan_id != 0)
++ } else if (is_kill && vlan_id != 0) {
++ mutex_lock(&hdev->vport_lock);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
++ mutex_unlock(&hdev->vport_lock);
++ }
+ } else if (is_kill) {
+ /* when remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack
+ */
++ mutex_lock(&hdev->vport_lock);
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
++ mutex_unlock(&hdev->vport_lock);
+ }
+
+ hclge_set_vport_vlan_fltr_change(vport);
+@@ -10521,6 +10560,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ int i, ret, sync_cnt = 0;
+ u16 vlan_id;
+
++ mutex_lock(&hdev->vport_lock);
+ /* start from vport 1 for PF is always alive */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+@@ -10531,21 +10571,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id, vlan_id,
+ true);
+- if (ret && ret != -EINVAL)
++ if (ret && ret != -EINVAL) {
++ mutex_unlock(&hdev->vport_lock);
+ return;
++ }
+
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ hclge_set_vport_vlan_fltr_change(vport);
+
+ sync_cnt++;
+- if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
++ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
++ mutex_unlock(&hdev->vport_lock);
+ return;
++ }
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ }
+ }
++ mutex_unlock(&hdev->vport_lock);
+
+ hclge_sync_vlan_fltr_state(hdev);
+ }
+@@ -11205,6 +11250,12 @@ static int hclge_init_client_instance(struct hnae3_client *client,
+ return ret;
+ }
+
++static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
++{
++ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
++ test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
++}
++
+ static void hclge_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+ {
+@@ -11213,7 +11264,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
+
+ if (hdev->roce_client) {
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
++ while (hclge_uninit_need_wait(hdev))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+@@ -11319,7 +11370,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
+
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
+ pci_free_irq_vectors(pdev);
+- pci_release_mem_regions(pdev);
++ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ }
+
+@@ -11391,8 +11442,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+- up(&hdev->reset_sem);
++ if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
++ up(&hdev->reset_sem);
+ }
+
+ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+@@ -11591,14 +11642,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ if (ret)
+ goto out;
+
+- ret = hclge_devlink_init(hdev);
+- if (ret)
+- goto err_pci_uninit;
+-
+ /* Firmware command queue initialize */
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
+ if (ret)
+- goto err_devlink_uninit;
++ goto err_pci_uninit;
+
+ /* Firmware command initialize */
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+@@ -11652,6 +11699,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ goto err_msi_irq_uninit;
+
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
++ clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ if (hnae3_dev_phy_imp_supported(hdev))
+ ret = hclge_update_tp_port_info(hdev);
+ else
+@@ -11725,7 +11773,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+- goto err_mdiobus_unreg;
++ goto err_ptp_uninit;
+
+ INIT_KFIFO(hdev->mac_tnl_log);
+
+@@ -11765,6 +11813,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ dev_warn(&pdev->dev,
+ "failed to wake on lan init, ret = %d\n", ret);
+
++ ret = hclge_devlink_init(hdev);
++ if (ret)
++ goto err_ptp_uninit;
++
+ hclge_state_init(hdev);
+ hdev->last_reset_time = jiffies;
+
+@@ -11772,9 +11824,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ HCLGE_DRIVER_NAME);
+
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+-
+ return 0;
+
++err_ptp_uninit:
++ hclge_ptp_uninit(hdev);
+ err_mdiobus_unreg:
+ if (hdev->hw.mac.phydev)
+ mdiobus_unregister(hdev->hw.mac.mdio_bus);
+@@ -11784,8 +11837,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ pci_free_irq_vectors(pdev);
+ err_cmd_uninit:
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+-err_devlink_uninit:
+- hclge_devlink_uninit(hdev);
+ err_pci_uninit:
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
+ pci_release_regions(pdev);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+index 7bc2049b723daa..76a5edfe7d2e5e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -263,11 +263,14 @@ struct hclge_mac {
+ u8 media_type; /* port media type, e.g. fibre/copper/backplane */
+ u8 mac_addr[ETH_ALEN];
+ u8 autoneg;
++ u8 req_autoneg;
+ u8 duplex;
++ u8 req_duplex;
+ u8 support_autoneg;
+ u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
+ u32 speed;
++ u32 req_speed;
+ u32 max_speed;
+ u32 speed_ability; /* speed ability supported by current media */
+ u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
+@@ -875,7 +878,7 @@ struct hclge_dev {
+
+ u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
+ u16 num_alloc_vport; /* Num vports this driver supports */
+- u32 numa_node_mask;
++ nodemask_t numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_tx_desc; /* desc num of per tx queue */
+ u16 num_rx_desc; /* desc num of per rx queue */
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index 04ff9bf121853a..61e155c4d441ef 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -1077,12 +1077,13 @@ static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+- if (cmd_func)
+- ret = cmd_func(param);
+- else
++ if (!cmd_func) {
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
++ return;
++ }
++ ret = cmd_func(param);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+@@ -1123,10 +1124,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+- if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
++ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) ||
++ req->mbx_src_vfid > hdev->num_req_vfs)) {
+ dev_warn(&hdev->pdev->dev,
+- "dropped invalid mailbox message, code = %u\n",
+- req->msg.code);
++ "dropped invalid mailbox message, code = %u, vfid = %u\n",
++ req->msg.code, req->mbx_src_vfid);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+index 85fb11de43a124..80079657afebe0 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -191,6 +191,9 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
+ if (ret)
+ netdev_err(netdev, "failed to adjust link.\n");
+
++ hdev->hw.mac.req_speed = (u32)speed;
++ hdev->hw.mac.req_duplex = (u8)duplex;
++
+ ret = hclge_cfg_flowctrl(hdev);
+ if (ret)
+ netdev_err(netdev, "failed to configure flow control.\n");
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+index 80a2a0073d97aa..507d7ce26d8317 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+@@ -108,7 +108,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u64 ns = nsec;
+ u32 sec_h;
+
+- if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
++ if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ return;
+
+ /* Since the BD does not have enough space for the higher 16 bits of
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+index 8510b88d49820a..f3cd5a376eca90 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+@@ -24,7 +24,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
++ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_GET_MBX_LEN)
+ ),
+
+@@ -33,7 +33,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
++ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+@@ -56,7 +56,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
++ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_SEND_MBX_LEN)
+ ),
+
+@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
+ __entry->vfid = req->dest_vfid;
+ __entry->code = le16_to_cpu(req->msg.code);
+ __assign_str(pciname, pci_name(hdev->pdev));
+- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
++ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index a4d68fb216fb92..affdd9d70549ac 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -412,7 +412,8 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+
+ nic->ae_algo = &ae_algovf;
+ nic->pdev = hdev->pdev;
+- nic->numa_node_mask = hdev->numa_node_mask;
++ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
++ MAX_NUMNODES);
+ nic->flags |= HNAE3_SUPPORT_VF;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
+
+@@ -1206,6 +1207,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ return -EBUSY;
++ } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
++ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ }
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+@@ -1233,20 +1236,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+ int ret, sync_cnt = 0;
+ u16 vlan_id;
+
++ if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
++ return;
++
++ rtnl_lock();
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ vlan_id, true);
+ if (ret)
+- return;
++ break;
+
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ sync_cnt++;
+ if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+- return;
++ break;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ }
++ rtnl_unlock();
+ }
+
+ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+@@ -1702,8 +1710,8 @@ static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
+ ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+- up(&hdev->reset_sem);
++ if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
++ up(&hdev->reset_sem);
+ }
+
+ static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+@@ -1974,8 +1982,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
+ }
+
++static void hclgevf_reset_timer(struct timer_list *t)
++{
++ struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
++
++ hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
++ hclgevf_reset_task_schedule(hdev);
++}
++
+ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ {
++#define HCLGEVF_RESET_DELAY 5
++
+ enum hclgevf_evt_cause event_cause;
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+@@ -1987,7 +2005,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+- hclgevf_reset_task_schedule(hdev);
++ mod_timer(&hdev->reset_timer,
++ jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+@@ -2064,8 +2083,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+- roce->numa_node_mask = nic->numa_node_mask;
+-
++ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
++ MAX_NUMNODES);
+ return 0;
+ }
+
+@@ -2162,8 +2181,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
+ } else {
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+- /* flush memory to make sure DOWN is seen by service task */
+- smp_mb__before_atomic();
++ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
+ hclgevf_flush_link_update(hdev);
+ }
+ }
+@@ -2827,10 +2845,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ if (ret)
+ return ret;
+
+- ret = hclgevf_devlink_init(hdev);
+- if (ret)
+- goto err_devlink_init;
+-
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
+ if (ret)
+ goto err_cmd_queue_init;
+@@ -2923,6 +2937,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+
+ hclgevf_init_rxd_adv_layout(hdev);
+
++ ret = hclgevf_devlink_init(hdev);
++ if (ret)
++ goto err_config;
++
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
+
+ hdev->last_reset_time = jiffies;
+@@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ HCLGEVF_DRIVER_NAME);
+
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
++ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+
+ return 0;
+
+@@ -2941,8 +2960,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ err_cmd_init:
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+ err_cmd_queue_init:
+- hclgevf_devlink_uninit(hdev);
+-err_devlink_init:
+ hclgevf_pci_uninit(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ return ret;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index 81c16b8c8da296..cccef32284616b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -219,6 +219,7 @@ struct hclgevf_dev {
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
++ struct timer_list reset_timer;
+
+ #define HCLGEVF_RESET_REQUESTED 0
+ #define HCLGEVF_RESET_PENDING 1
+@@ -235,7 +236,7 @@ struct hclgevf_dev {
+ u16 rss_size_max; /* HW defined max RSS task queue */
+
+ u16 num_alloc_vport; /* num vports this driver supports */
+- u32 numa_node_mask;
++ nodemask_t numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_tx_desc; /* desc num of per tx queue */
+ u16 num_rx_desc; /* desc num of per rx queue */
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+index bbf7b14079de3c..85c2a634c8f96a 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ i++;
+ }
+
++ /* ensure additional_info will be seen after received_resp */
++ smp_rmb();
++
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
+@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
+ resp->resp_status = hclgevf_resp_to_errno(resp_status);
+ memcpy(resp->additional_info, req->msg.resp_data,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
++
++ /* ensure additional_info will be seen before setting received_resp */
++ smp_wmb();
++
+ if (match_id) {
+ /* If match_id is not zero, it means PF support match_id.
+ * if the match_id is right, VF get the right response, or
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+index 5d4895bb57a17d..b259e95dd53c26 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+@@ -23,7 +23,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+- __string(devname, &hdev->nic.kinfo.netdev->name)
++ __string(devname, hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_GET_MBX_LEN)
+ ),
+
+@@ -31,7 +31,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
+ __entry->vfid = req->dest_vfid;
+ __entry->code = le16_to_cpu(req->msg.code);
+ __assign_str(pciname, pci_name(hdev->pdev));
+- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
++ __assign_str(devname, hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+@@ -55,7 +55,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+- __string(devname, &hdev->nic.kinfo.netdev->name)
++ __string(devname, hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_SEND_MBX_LEN)
+ ),
+
+@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
++ __assign_str(devname, hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
+index 409a89d8022087..9ffd479c750881 100644
+--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
+@@ -575,6 +575,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
+ MDIO_SC_RESET_ST;
+ }
+ }
++ of_node_put(reg_args.np);
+ } else {
+ dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
+ mdio_dev->subctrl_vbase = NULL;
+diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
+index 5e27470c6b1ef3..f2d4669c81cf29 100644
+--- a/drivers/net/ethernet/i825xx/sun3_82586.c
++++ b/drivers/net/ethernet/i825xx/sun3_82586.c
+@@ -987,7 +987,7 @@ static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ #ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+- printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
++ printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status));
+ printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+ #endif
+ sun3_82586_close(dev);
+diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
+index c3236b59e7e934..3578b7d720c068 100644
+--- a/drivers/net/ethernet/ibm/emac/mal.c
++++ b/drivers/net/ethernet/ibm/emac/mal.c
+@@ -578,7 +578,7 @@ static int mal_probe(struct platform_device *ofdev)
+ printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
+ ofdev->dev.of_node);
+ err = -ENODEV;
+- goto fail;
++ goto fail_unmap;
+ #endif
+ }
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index cdf5251e567955..61685c3053ad7e 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2478,6 +2478,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
+
+ tx_buff = &tx_pool->tx_buff[bufidx];
++
++ /* Sanity checks on our free map to make sure it points to an index
++ * that is not being occupied by another skb. If skb memory is
++ * not freed then we see congestion control kick in and halt tx.
++ */
++ if (unlikely(tx_buff->skb)) {
++ dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
++ skb_is_gso(skb) ? "tso_pool" : "tx_pool",
++ queue_num, bufidx);
++ dev_kfree_skb_any(tx_buff->skb);
++ }
++
+ tx_buff->skb = skb;
+ tx_buff->index = bufidx;
+ tx_buff->pool_index = queue_num;
+@@ -4057,6 +4069,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
+ adapter->num_active_tx_scrqs = 0;
+ }
+
++ /* Clean any remaining outstanding SKBs
++ * we freed the irq so we won't be hearing
++ * from them
++ */
++ clean_tx_pools(adapter);
++
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+ if (!adapter->rx_scrq[i])
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
+index 4542e2bc28e8d2..f9328f2e669f8b 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
+@@ -5,6 +5,7 @@
+ * Shared functions for accessing and configuring the MAC
+ */
+
++#include <linux/bitfield.h>
+ #include "e1000.h"
+
+ static s32 e1000_check_downshift(struct e1000_hw *hw);
+@@ -3260,8 +3261,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ return ret_val;
+
+ phy_info->mdix_mode =
+- (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
+- IGP01E1000_PSSR_MDIX_SHIFT);
++ (e1000_auto_x_mode)FIELD_GET(IGP01E1000_PSSR_MDIX, phy_data);
+
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+@@ -3272,11 +3272,11 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ if (ret_val)
+ return ret_val;
+
+- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
++ phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS,
++ phy_data) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
++ phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS,
++ phy_data) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ /* Get cable length */
+@@ -3326,14 +3326,12 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ return ret_val;
+
+ phy_info->extended_10bt_distance =
+- ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+- M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
++ FIELD_GET(M88E1000_PSCR_10BT_EXT_DIST_ENABLE, phy_data) ?
+ e1000_10bt_ext_dist_enable_lower :
+ e1000_10bt_ext_dist_enable_normal;
+
+ phy_info->polarity_correction =
+- ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+- M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
++ FIELD_GET(M88E1000_PSCR_POLARITY_REVERSAL, phy_data) ?
+ e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+@@ -3347,27 +3345,25 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ return ret_val;
+
+ phy_info->mdix_mode =
+- (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
+- M88E1000_PSSR_MDIX_SHIFT);
++ (e1000_auto_x_mode)FIELD_GET(M88E1000_PSSR_MDIX, phy_data);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ /* Cable Length Estimation and Local/Remote Receiver Information
+ * are only valid at 1000 Mbps.
+ */
+ phy_info->cable_length =
+- (e1000_cable_length) ((phy_data &
+- M88E1000_PSSR_CABLE_LENGTH) >>
+- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
++ (e1000_cable_length)FIELD_GET(M88E1000_PSSR_CABLE_LENGTH,
++ phy_data);
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
++ phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS,
++ phy_data) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
++ phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS,
++ phy_data) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ }
+
+@@ -3515,7 +3511,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
+ if (ret_val)
+ return ret_val;
+ eeprom_size =
+- (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
++ FIELD_GET(EEPROM_SIZE_MASK, eeprom_size);
+ /* 256B eeprom size was not supported in earlier hardware, so we
+ * bump eeprom_size up one to ensure that "1" (which maps to
+ * 256B) is never the result used in the shifting logic below.
+@@ -4891,8 +4887,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+- cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
++ cable_length = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
+
+ /* Convert the enum value to ranged values */
+ switch (cable_length) {
+@@ -5001,8 +4996,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+- *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+- M88E1000_PSSR_REV_POLARITY_SHIFT) ?
++ *polarity = FIELD_GET(M88E1000_PSSR_REV_POLARITY, phy_data) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+ } else if (hw->phy_type == e1000_phy_igp) {
+@@ -5072,8 +5066,8 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
+ if (ret_val)
+ return ret_val;
+
+- hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+- M88E1000_PSSR_DOWNSHIFT_SHIFT;
++ hw->speed_downgraded = FIELD_GET(M88E1000_PSSR_DOWNSHIFT,
++ phy_data);
+ }
+
+ return E1000_SUCCESS;
+diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+index be9c695dde1275..c51fb6bf9c4e0c 100644
+--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
++++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+@@ -92,8 +92,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+
+ nvm->type = e1000_nvm_eeprom_spi;
+
+- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+- E1000_EECD_SIZE_EX_SHIFT);
++ size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
+
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index 0b1e890dd583bf..969f855a79ee65 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -157,8 +157,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+ fallthrough;
+ default:
+ nvm->type = e1000_nvm_eeprom_spi;
+- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+- E1000_EECD_SIZE_EX_SHIFT);
++ size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index a187582d22994c..ba9c19e6994c9d 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
+ * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
+ * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
+ * bits to count nanoseconds leaving the rest for fractional nonseconds.
++ *
++ * Any given INCVALUE also has an associated maximum adjustment value. This
++ * maximum adjustment value is the largest increase (or decrease) which can be
++ * safely applied without overflowing the INCVALUE. Since INCVALUE has
++ * a maximum range of 24 bits, its largest value is 0xFFFFFF.
++ *
++ * To understand where the maximum value comes from, consider the following
++ * equation:
++ *
++ * new_incval = base_incval + (base_incval * adjustment) / 1billion
++ *
++ * To avoid overflow that means:
++ * max_incval = base_incval + (base_incval * max_adj) / billion
++ *
++ * Re-arranging:
++ * max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion)
+ */
+ #define INCVALUE_96MHZ 125
+ #define INCVALUE_SHIFT_96MHZ 17
+ #define INCPERIOD_SHIFT_96MHZ 2
+ #define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ)
++#define MAX_PPB_96MHZ 23999900 /* 23,999,900 ppb */
+
+ #define INCVALUE_25MHZ 40
+ #define INCVALUE_SHIFT_25MHZ 18
+ #define INCPERIOD_25MHZ 1
++#define MAX_PPB_25MHZ 599999900 /* 599,999,900 ppb */
+
+ #define INCVALUE_24MHZ 125
+ #define INCVALUE_SHIFT_24MHZ 14
+ #define INCPERIOD_24MHZ 3
++#define MAX_PPB_24MHZ 999999999 /* 999,999,999 ppb */
+
+ #define INCVALUE_38400KHZ 26
+ #define INCVALUE_SHIFT_38400KHZ 19
+ #define INCPERIOD_38400KHZ 1
++#define MAX_PPB_38400KHZ 230769100 /* 230,769,100 ppb */
+
+ /* Another drawback of scaling the incvalue by a large factor is the
+ * 64-bit SYSTIM register overflows more quickly. This is dealt with
+diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
+index 9835e6a90d56ce..fc0f98ea61332f 100644
+--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
++++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
+@@ -654,8 +654,8 @@ static void e1000_get_drvinfo(struct net_device *netdev,
+ */
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
+- (adapter->eeprom_vers & 0xF000) >> 12,
+- (adapter->eeprom_vers & 0x0FF0) >> 4,
++ FIELD_GET(0xF000, adapter->eeprom_vers),
++ FIELD_GET(0x0FF0, adapter->eeprom_vers),
+ (adapter->eeprom_vers & 0x000F));
+
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
+@@ -925,8 +925,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+ }
+
+ if (mac->type >= e1000_pch_lpt)
+- wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
+- E1000_FWSM_WLOCK_MAC_SHIFT;
++ wlock_mac = FIELD_GET(E1000_FWSM_WLOCK_MAC_MASK, er32(FWSM));
+
+ for (i = 0; i < mac->rar_entry_count; i++) {
+ if (mac->type >= e1000_pch_lpt) {
+diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
+index 1fef6bb5a5fbc8..fc8ed38aa09554 100644
+--- a/drivers/net/ethernet/intel/e1000e/hw.h
++++ b/drivers/net/ethernet/intel/e1000e/hw.h
+@@ -108,8 +108,8 @@ struct e1000_hw;
+ #define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8
+ #define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
+ #define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
+-#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
+-#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
++#define E1000_DEV_ID_PCH_ADP_I219_LM19 0x550C
++#define E1000_DEV_ID_PCH_ADP_I219_V19 0x550D
+ #define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E
+ #define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
+ #define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
+@@ -628,6 +628,7 @@ struct e1000_phy_info {
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
++ u32 retry_count;
+
+ enum e1000_media_type media_type;
+
+@@ -644,6 +645,7 @@ struct e1000_phy_info {
+ bool polarity_correction;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
++ bool retry_enabled;
+ };
+
+ struct e1000_nvm_info {
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index 39e9fc601bf5a6..ce227b56cf7243 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -222,11 +222,18 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
+ if (hw->mac.type >= e1000_pch_lpt) {
+ /* Only unforce SMBus if ME is not active */
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
++ /* Switching PHY interface always returns MDI error
++ * so disable retry mechanism to avoid wasting time
++ */
++ e1000e_disable_phy_retry(hw);
++
+ /* Unforce SMBus mode in PHY */
+ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+
++ e1000e_enable_phy_retry(hw);
++
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+@@ -310,6 +317,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+ goto out;
+ }
+
++ /* There is no guarantee that the PHY is accessible at this time
++ * so disable retry mechanism to avoid wasting time
++ */
++ e1000e_disable_phy_retry(hw);
++
+ /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
+ * inaccessible and resetting the PHY is not blocked, toggle the
+ * LANPHYPC Value bit to force the interconnect to PCIe mode.
+@@ -380,6 +392,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+ break;
+ }
+
++ e1000e_enable_phy_retry(hw);
++
+ hw->phy.ops.release(hw);
+ if (!ret_val) {
+
+@@ -449,6 +463,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+
+ phy->id = e1000_phy_unknown;
+
++ if (hw->mac.type == e1000_pch_mtp) {
++ phy->retry_count = 2;
++ e1000e_enable_phy_retry(hw);
++ }
++
+ ret_val = e1000_init_phy_workarounds_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+@@ -1072,13 +1091,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
+
+ lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
+ (1U << (E1000_LTRV_SCALE_FACTOR *
+- ((lat_enc & E1000_LTRV_SCALE_MASK)
+- >> E1000_LTRV_SCALE_SHIFT)));
++ FIELD_GET(E1000_LTRV_SCALE_MASK, lat_enc)));
+
+ max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
+- (1U << (E1000_LTRV_SCALE_FACTOR *
+- ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
+- >> E1000_LTRV_SCALE_SHIFT)));
++ (1U << (E1000_LTRV_SCALE_FACTOR *
++ FIELD_GET(E1000_LTRV_SCALE_MASK, max_ltr_enc)));
+
+ if (lat_enc_d > max_ltr_enc_d)
+ lat_enc = max_ltr_enc;
+@@ -1091,6 +1108,46 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
+ return 0;
+ }
+
++/**
++ * e1000e_force_smbus - Force interfaces to transition to SMBUS mode.
++ * @hw: pointer to the HW structure
++ *
++ * Force the MAC and the PHY to SMBUS mode. Assumes semaphore already
++ * acquired.
++ *
++ * Return: 0 on success, negative errno on failure.
++ **/
++static s32 e1000e_force_smbus(struct e1000_hw *hw)
++{
++ u16 smb_ctrl = 0;
++ u32 ctrl_ext;
++ s32 ret_val;
++
++ /* Switching PHY interface always returns MDI error
++ * so disable retry mechanism to avoid wasting time
++ */
++ e1000e_disable_phy_retry(hw);
++
++ /* Force SMBus mode in the PHY */
++ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl);
++ if (ret_val) {
++ e1000e_enable_phy_retry(hw);
++ return ret_val;
++ }
++
++ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
++ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, smb_ctrl);
++
++ e1000e_enable_phy_retry(hw);
++
++ /* Force SMBus mode in the MAC */
++ ctrl_ext = er32(CTRL_EXT);
++ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
++ ew32(CTRL_EXT, ctrl_ext);
++
++ return 0;
++}
++
+ /**
+ * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+@@ -1148,17 +1205,13 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+ if (ret_val)
+ goto out;
+
+- /* Force SMBus mode in PHY */
+- ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+- if (ret_val)
+- goto release;
+- phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+- e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+-
+- /* Force SMBus mode in MAC */
+- mac_reg = er32(CTRL_EXT);
+- mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+- ew32(CTRL_EXT, mac_reg);
++ if (hw->mac.type != e1000_pch_mtp) {
++ ret_val = e1000e_force_smbus(hw);
++ if (ret_val) {
++ e_dbg("Failed to force SMBUS: %d\n", ret_val);
++ goto release;
++ }
++ }
+
+ /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
+ * LPLU and disable Gig speed when entering ULP
+@@ -1220,6 +1273,13 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+ }
+
+ release:
++ if (hw->mac.type == e1000_pch_mtp) {
++ ret_val = e1000e_force_smbus(hw);
++ if (ret_val)
++ e_dbg("Failed to force SMBUS over MTL system: %d\n",
++ ret_val);
++ }
++
+ hw->phy.ops.release(hw);
+ out:
+ if (ret_val)
+@@ -1315,6 +1375,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+
++ /* Switching PHY interface always returns MDI error
++ * so disable retry mechanism to avoid wasting time
++ */
++ e1000e_disable_phy_retry(hw);
++
+ /* Unforce SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+@@ -1335,6 +1400,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
++ e1000e_enable_phy_retry(hw);
++
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+@@ -2075,8 +2142,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+ {
+ u16 phy_data;
+ u32 strap = er32(STRAP);
+- u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
+- E1000_STRAP_SMT_FREQ_SHIFT;
++ u32 freq = FIELD_GET(E1000_STRAP_SMT_FREQ_MASK, strap);
+ s32 ret_val;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+@@ -2562,8 +2628,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+ hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+- (u16)((mac_reg & E1000_RAH_AV)
+- >> 16));
++ (u16)((mac_reg & E1000_RAH_AV) >> 16));
+ }
+
+ e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+@@ -3205,7 +3270,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+ &nvm_dword);
+ if (ret_val)
+ return ret_val;
+- sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
++ sig_byte = FIELD_GET(0xFF00, nvm_dword);
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 0;
+@@ -3218,7 +3283,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+ &nvm_dword);
+ if (ret_val)
+ return ret_val;
+- sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
++ sig_byte = FIELD_GET(0xFF00, nvm_dword);
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 1;
+diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
+index 5df7ad93f3d77c..30515bfb259ea3 100644
+--- a/drivers/net/ethernet/intel/e1000e/mac.c
++++ b/drivers/net/ethernet/intel/e1000e/mac.c
+@@ -52,7 +52,7 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+ * for the device regardless of function swap state.
+ */
+ reg = er32(STATUS);
+- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
++ bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index f536c856727cb8..721c098f2bb1b2 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -1788,8 +1788,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
++ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+@@ -1868,8 +1867,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
++ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+@@ -5031,8 +5029,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
++ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
+ }
+ }
+
+@@ -6249,7 +6246,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
+ phy_reg |= BM_RCTL_MPE;
+ phy_reg &= ~(BM_RCTL_MO_MASK);
+ if (mac_reg & E1000_RCTL_MO_3)
+- phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
++ phy_reg |= (FIELD_GET(E1000_RCTL_MO_3, mac_reg)
+ << BM_RCTL_MO_SHIFT);
+ if (mac_reg & E1000_RCTL_BAM)
+ phy_reg |= BM_RCTL_BAM;
+@@ -6366,49 +6363,49 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
+ mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ ew32(EXTCNF_CTRL, mac_data);
+
+- /* Enable the Dynamic Power Gating in the MAC */
+- mac_data = er32(FEXTNVM7);
+- mac_data |= BIT(22);
+- ew32(FEXTNVM7, mac_data);
+-
+ /* Disable disconnected cable conditioning for Power Gating */
+ mac_data = er32(DPGFR);
+ mac_data |= BIT(2);
+ ew32(DPGFR, mac_data);
+
+- /* Don't wake from dynamic Power Gating with clock request */
+- mac_data = er32(FEXTNVM12);
+- mac_data |= BIT(12);
+- ew32(FEXTNVM12, mac_data);
+-
+- /* Ungate PGCB clock */
+- mac_data = er32(FEXTNVM9);
+- mac_data &= ~BIT(28);
+- ew32(FEXTNVM9, mac_data);
+-
+- /* Enable K1 off to enable mPHY Power Gating */
+- mac_data = er32(FEXTNVM6);
+- mac_data |= BIT(31);
+- ew32(FEXTNVM6, mac_data);
+-
+- /* Enable mPHY power gating for any link and speed */
+- mac_data = er32(FEXTNVM8);
+- mac_data |= BIT(9);
+- ew32(FEXTNVM8, mac_data);
+-
+ /* Enable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, mac_data);
+-
+- /* No MAC DPG gating SLP_S0 in modern standby
+- * Switch the logic of the lanphypc to use PMC counter
+- */
+- mac_data = er32(FEXTNVM5);
+- mac_data |= BIT(7);
+- ew32(FEXTNVM5, mac_data);
+ }
+
++ /* Enable the Dynamic Power Gating in the MAC */
++ mac_data = er32(FEXTNVM7);
++ mac_data |= BIT(22);
++ ew32(FEXTNVM7, mac_data);
++
++ /* Don't wake from dynamic Power Gating with clock request */
++ mac_data = er32(FEXTNVM12);
++ mac_data |= BIT(12);
++ ew32(FEXTNVM12, mac_data);
++
++ /* Ungate PGCB clock */
++ mac_data = er32(FEXTNVM9);
++ mac_data &= ~BIT(28);
++ ew32(FEXTNVM9, mac_data);
++
++ /* Enable K1 off to enable mPHY Power Gating */
++ mac_data = er32(FEXTNVM6);
++ mac_data |= BIT(31);
++ ew32(FEXTNVM6, mac_data);
++
++ /* Enable mPHY power gating for any link and speed */
++ mac_data = er32(FEXTNVM8);
++ mac_data |= BIT(9);
++ ew32(FEXTNVM8, mac_data);
++
++ /* No MAC DPG gating SLP_S0 in modern standby
++ * Switch the logic of the lanphypc to use PMC counter
++ */
++ mac_data = er32(FEXTNVM5);
++ mac_data |= BIT(7);
++ ew32(FEXTNVM5, mac_data);
++
+ /* Disable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(31);
+@@ -6501,33 +6498,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ } else {
+ /* Request driver unconfigure the device from S0ix */
+
+- /* Disable the Dynamic Power Gating in the MAC */
+- mac_data = er32(FEXTNVM7);
+- mac_data &= 0xFFBFFFFF;
+- ew32(FEXTNVM7, mac_data);
+-
+- /* Disable mPHY power gating for any link and speed */
+- mac_data = er32(FEXTNVM8);
+- mac_data &= ~BIT(9);
+- ew32(FEXTNVM8, mac_data);
+-
+- /* Disable K1 off */
+- mac_data = er32(FEXTNVM6);
+- mac_data &= ~BIT(31);
+- ew32(FEXTNVM6, mac_data);
+-
+- /* Disable Ungate PGCB clock */
+- mac_data = er32(FEXTNVM9);
+- mac_data |= BIT(28);
+- ew32(FEXTNVM9, mac_data);
+-
+- /* Cancel not waking from dynamic
+- * Power Gating with clock request
+- */
+- mac_data = er32(FEXTNVM12);
+- mac_data &= ~BIT(12);
+- ew32(FEXTNVM12, mac_data);
+-
+ /* Cancel disable disconnected cable conditioning
+ * for Power Gating
+ */
+@@ -6540,13 +6510,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ mac_data &= 0xFFF7FFFF;
+ ew32(CTRL_EXT, mac_data);
+
+- /* Revert the lanphypc logic to use the internal Gbe counter
+- * and not the PMC counter
+- */
+- mac_data = er32(FEXTNVM5);
+- mac_data &= 0xFFFFFF7F;
+- ew32(FEXTNVM5, mac_data);
+-
+ /* Enable the periodic inband message,
+ * Request PCIe clock in K1 page770_17[10:9] =01b
+ */
+@@ -6584,6 +6547,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ mac_data &= ~BIT(31);
+ mac_data |= BIT(0);
+ ew32(FEXTNVM7, mac_data);
++
++ /* Disable the Dynamic Power Gating in the MAC */
++ mac_data = er32(FEXTNVM7);
++ mac_data &= 0xFFBFFFFF;
++ ew32(FEXTNVM7, mac_data);
++
++ /* Disable mPHY power gating for any link and speed */
++ mac_data = er32(FEXTNVM8);
++ mac_data &= ~BIT(9);
++ ew32(FEXTNVM8, mac_data);
++
++ /* Disable K1 off */
++ mac_data = er32(FEXTNVM6);
++ mac_data &= ~BIT(31);
++ ew32(FEXTNVM6, mac_data);
++
++ /* Disable Ungate PGCB clock */
++ mac_data = er32(FEXTNVM9);
++ mac_data |= BIT(28);
++ ew32(FEXTNVM9, mac_data);
++
++ /* Cancel not waking from dynamic
++ * Power Gating with clock request
++ */
++ mac_data = er32(FEXTNVM12);
++ mac_data &= ~BIT(12);
++ ew32(FEXTNVM12, mac_data);
++
++ /* Revert the lanphypc logic to use the internal Gbe counter
++ * and not the PMC counter
++ */
++ mac_data = er32(FEXTNVM5);
++ mac_data &= 0xFFFFFF7F;
++ ew32(FEXTNVM5, mac_data);
+ }
+
+ static int e1000e_pm_freeze(struct device *dev)
+@@ -6674,8 +6671,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+ /* enable wakeup by the PHY */
+ retval = e1000_init_phy_wakeup(adapter, wufc);
+- if (retval)
+- return retval;
++ if (retval) {
++ e_err("Failed to enable wakeup\n");
++ goto skip_phy_configurations;
++ }
+ } else {
+ /* enable wakeup by the MAC */
+ ew32(WUFC, wufc);
+@@ -6691,14 +6690,16 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+ if (adapter->hw.phy.type == e1000_phy_igp_3) {
+ e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+ } else if (hw->mac.type >= e1000_pch_lpt) {
+- if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
++ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) {
+ /* ULP does not support wake from unicast, multicast
+ * or broadcast.
+ */
+ retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
+-
+- if (retval)
+- return retval;
++ if (retval) {
++ e_err("Failed to enable ULP\n");
++ goto skip_phy_configurations;
++ }
++ }
+ }
+
+ /* Ensure that the appropriate bits are set in LPI_CTRL
+@@ -6729,6 +6730,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+ hw->phy.ops.release(hw);
+ }
+
++skip_phy_configurations:
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+@@ -6971,15 +6973,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
+ e1000e_pm_freeze(dev);
+
+ rc = __e1000_shutdown(pdev, false);
+- if (rc) {
+- e1000e_pm_thaw(dev);
+- } else {
++ if (!rc) {
+ /* Introduce S0ix implementation */
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ e1000e_s0ix_entry_flow(adapter);
+ }
+
+- return rc;
++ return 0;
+ }
+
+ static __maybe_unused int e1000e_pm_resume(struct device *dev)
+@@ -7899,10 +7899,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM19), board_pch_adp },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp },
+diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
+index 08c3d477dd6f79..8bf44103fb9102 100644
+--- a/drivers/net/ethernet/intel/e1000e/phy.c
++++ b/drivers/net/ethernet/intel/e1000e/phy.c
+@@ -107,6 +107,16 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
+ return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
+ }
+
++void e1000e_disable_phy_retry(struct e1000_hw *hw)
++{
++ hw->phy.retry_enabled = false;
++}
++
++void e1000e_enable_phy_retry(struct e1000_hw *hw)
++{
++ hw->phy.retry_enabled = true;
++}
++
+ /**
+ * e1000e_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+@@ -118,57 +128,73 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
+ **/
+ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+ {
++ u32 i, mdic = 0, retry_counter, retry_max;
+ struct e1000_phy_info *phy = &hw->phy;
+- u32 i, mdic = 0;
++ bool success;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
++ retry_max = phy->retry_enabled ? phy->retry_count : 0;
++
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+- mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+- (phy->addr << E1000_MDIC_PHY_SHIFT) |
+- (E1000_MDIC_OP_READ));
++ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
++ success = true;
+
+- ew32(MDIC, mdic);
++ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
++ (phy->addr << E1000_MDIC_PHY_SHIFT) |
++ (E1000_MDIC_OP_READ));
+
+- /* Poll the ready bit to see if the MDI read completed
+- * Increasing the time out as testing showed failures with
+- * the lower time out
+- */
+- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+- udelay(50);
+- mdic = er32(MDIC);
+- if (mdic & E1000_MDIC_READY)
+- break;
+- }
+- if (!(mdic & E1000_MDIC_READY)) {
+- e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
+- return -E1000_ERR_PHY;
+- }
+- if (mdic & E1000_MDIC_ERROR) {
+- e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
+- return -E1000_ERR_PHY;
+- }
+- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+- e_dbg("MDI Read offset error - requested %d, returned %d\n",
+- offset,
+- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+- return -E1000_ERR_PHY;
+- }
+- *data = (u16)mdic;
++ ew32(MDIC, mdic);
+
+- /* Allow some time after each MDIC transaction to avoid
+- * reading duplicate data in the next MDIC transaction.
+- */
+- if (hw->mac.type == e1000_pch2lan)
+- udelay(100);
++ /* Poll the ready bit to see if the MDI read completed
++ * Increasing the time out as testing showed failures with
++ * the lower time out
++ */
++ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
++ udelay(50);
++ mdic = er32(MDIC);
++ if (mdic & E1000_MDIC_READY)
++ break;
++ }
++ if (!(mdic & E1000_MDIC_READY)) {
++ e_dbg("MDI Read PHY Reg Address %d did not complete\n",
++ offset);
++ success = false;
++ }
++ if (mdic & E1000_MDIC_ERROR) {
++ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
++ success = false;
++ }
++ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
++ e_dbg("MDI Read offset error - requested %d, returned %d\n",
++ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
++ success = false;
++ }
+
+- return 0;
++ /* Allow some time after each MDIC transaction to avoid
++ * reading duplicate data in the next MDIC transaction.
++ */
++ if (hw->mac.type == e1000_pch2lan)
++ udelay(100);
++
++ if (success) {
++ *data = (u16)mdic;
++ return 0;
++ }
++
++ if (retry_counter != retry_max) {
++ e_dbg("Perform retry on PHY transaction...\n");
++ mdelay(10);
++ }
++ }
++
++ return -E1000_ERR_PHY;
+ }
+
+ /**
+@@ -181,57 +207,72 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+ **/
+ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+ {
++ u32 i, mdic = 0, retry_counter, retry_max;
+ struct e1000_phy_info *phy = &hw->phy;
+- u32 i, mdic = 0;
++ bool success;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
++ retry_max = phy->retry_enabled ? phy->retry_count : 0;
++
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+- mdic = (((u32)data) |
+- (offset << E1000_MDIC_REG_SHIFT) |
+- (phy->addr << E1000_MDIC_PHY_SHIFT) |
+- (E1000_MDIC_OP_WRITE));
++ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
++ success = true;
+
+- ew32(MDIC, mdic);
++ mdic = (((u32)data) |
++ (offset << E1000_MDIC_REG_SHIFT) |
++ (phy->addr << E1000_MDIC_PHY_SHIFT) |
++ (E1000_MDIC_OP_WRITE));
+
+- /* Poll the ready bit to see if the MDI read completed
+- * Increasing the time out as testing showed failures with
+- * the lower time out
+- */
+- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+- udelay(50);
+- mdic = er32(MDIC);
+- if (mdic & E1000_MDIC_READY)
+- break;
+- }
+- if (!(mdic & E1000_MDIC_READY)) {
+- e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
+- return -E1000_ERR_PHY;
+- }
+- if (mdic & E1000_MDIC_ERROR) {
+- e_dbg("MDI Write PHY Red Address %d Error\n", offset);
+- return -E1000_ERR_PHY;
+- }
+- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+- e_dbg("MDI Write offset error - requested %d, returned %d\n",
+- offset,
+- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+- return -E1000_ERR_PHY;
+- }
++ ew32(MDIC, mdic);
+
+- /* Allow some time after each MDIC transaction to avoid
+- * reading duplicate data in the next MDIC transaction.
+- */
+- if (hw->mac.type == e1000_pch2lan)
+- udelay(100);
++ /* Poll the ready bit to see if the MDI read completed
++ * Increasing the time out as testing showed failures with
++ * the lower time out
++ */
++ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
++ udelay(50);
++ mdic = er32(MDIC);
++ if (mdic & E1000_MDIC_READY)
++ break;
++ }
++ if (!(mdic & E1000_MDIC_READY)) {
++ e_dbg("MDI Write PHY Reg Address %d did not complete\n",
++ offset);
++ success = false;
++ }
++ if (mdic & E1000_MDIC_ERROR) {
++ e_dbg("MDI Write PHY Reg Address %d Error\n", offset);
++ success = false;
++ }
++ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
++ e_dbg("MDI Write offset error - requested %d, returned %d\n",
++ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
++ success = false;
++ }
+
+- return 0;
++ /* Allow some time after each MDIC transaction to avoid
++ * reading duplicate data in the next MDIC transaction.
++ */
++ if (hw->mac.type == e1000_pch2lan)
++ udelay(100);
++
++ if (success)
++ return 0;
++
++ if (retry_counter != retry_max) {
++ e_dbg("Perform retry on PHY transaction...\n");
++ mdelay(10);
++ }
++ }
++
++ return -E1000_ERR_PHY;
+ }
+
+ /**
+@@ -1793,8 +1834,7 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
+ if (ret_val)
+ return ret_val;
+
+- index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
++ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
+@@ -3234,8 +3274,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+ if (ret_val)
+ return ret_val;
+
+- length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+- I82577_DSTATUS_CABLE_LENGTH_SHIFT);
++ length = FIELD_GET(I82577_DSTATUS_CABLE_LENGTH, phy_data);
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ return -E1000_ERR_PHY;
+diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
+index c48777d0952352..049bb325b4b14f 100644
+--- a/drivers/net/ethernet/intel/e1000e/phy.h
++++ b/drivers/net/ethernet/intel/e1000e/phy.h
+@@ -51,6 +51,8 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+ void e1000_power_up_phy_copper(struct e1000_hw *hw);
+ void e1000_power_down_phy_copper(struct e1000_hw *hw);
++void e1000e_disable_phy_retry(struct e1000_hw *hw);
++void e1000e_enable_phy_retry(struct e1000_hw *hw);
+ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
+index 02d871bc112a73..bbcfd529399b0f 100644
+--- a/drivers/net/ethernet/intel/e1000e/ptp.c
++++ b/drivers/net/ethernet/intel/e1000e/ptp.c
+@@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
++ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
++ break;
+ case e1000_pch_lpt:
++ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
++ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
++ else
++ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
++ break;
+ case e1000_pch_spt:
++ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
++ break;
+ case e1000_pch_cnp:
+ case e1000_pch_tgp:
+ case e1000_pch_adp:
+@@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ case e1000_pch_nvp:
+- if ((hw->mac.type < e1000_pch_lpt) ||
+- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+- adapter->ptp_clock_info.max_adj = 24000000 - 1;
+- break;
+- }
+- fallthrough;
++ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
++ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
++ else
++ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
++ break;
+ case e1000_82574:
+ case e1000_82583:
+- adapter->ptp_clock_info.max_adj = 600000000 - 1;
++ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
+ break;
+ default:
+ break;
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+index af1b0cde36703a..aed5e0bf6313e9 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2019 Intel Corporation. */
+
++#include <linux/bitfield.h>
+ #include "fm10k_pf.h"
+ #include "fm10k_vf.h"
+
+@@ -1575,8 +1576,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
+ if (func & FM10K_FAULT_FUNC_PF)
+ fault->func = 0;
+ else
+- fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
+- FM10K_FAULT_FUNC_VF_SHIFT);
++ fault->func = 1 + FIELD_GET(FM10K_FAULT_FUNC_VF_MASK, func);
+
+ /* record fault type */
+ fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+index dc8ccd378ec921..7fb1961f292101 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2019 Intel Corporation. */
+
++#include <linux/bitfield.h>
+ #include "fm10k_vf.h"
+
+ /**
+@@ -126,15 +127,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
+ hw->mac.max_queues = i;
+
+ /* fetch default VLAN and ITR scale */
+- hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) &
+- FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
++ hw->mac.default_vid = FIELD_GET(FM10K_TXQCTL_VID_MASK,
++ fm10k_read_reg(hw, FM10K_TXQCTL(0)));
+ /* Read the ITR scale from TDLEN. See the definition of
+ * FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is
+ * used here.
+ */
+- hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) &
+- FM10K_TDLEN_ITR_SCALE_MASK) >>
+- FM10K_TDLEN_ITR_SCALE_SHIFT;
++ hw->mac.itr_scale = FIELD_GET(FM10K_TDLEN_ITR_SCALE_MASK,
++ fm10k_read_reg(hw, FM10K_TDLEN(0)));
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 55bb0b5310d5b4..3e6839ac1f0f1e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -4,47 +4,20 @@
+ #ifndef _I40E_H_
+ #define _I40E_H_
+
+-#include <net/tcp.h>
+-#include <net/udp.h>
+-#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/module.h>
+-#include <linux/pci.h>
+-#include <linux/netdevice.h>
+-#include <linux/ioport.h>
+-#include <linux/iommu.h>
+-#include <linux/slab.h>
+-#include <linux/list.h>
+-#include <linux/hashtable.h>
+-#include <linux/string.h>
+-#include <linux/in.h>
+-#include <linux/ip.h>
+-#include <linux/sctp.h>
+-#include <linux/pkt_sched.h>
+-#include <linux/ipv6.h>
+-#include <net/checksum.h>
+-#include <net/ip6_checksum.h>
+ #include <linux/ethtool.h>
+-#include <linux/if_vlan.h>
+-#include <linux/if_macvlan.h>
+-#include <linux/if_bridge.h>
+-#include <linux/clocksource.h>
+-#include <linux/net_tstamp.h>
++#include <linux/pci.h>
+ #include <linux/ptp_clock_kernel.h>
++#include <linux/types.h>
++#include <linux/avf/virtchnl.h>
++#include <linux/net/intel/i40e_client.h>
+ #include <net/pkt_cls.h>
+-#include <net/pkt_sched.h>
+-#include <net/tc_act/tc_gact.h>
+-#include <net/tc_act/tc_mirred.h>
+ #include <net/udp_tunnel.h>
+-#include <net/xdp_sock.h>
+-#include <linux/bitfield.h>
+-#include "i40e_type.h"
++#include "i40e_dcb.h"
++#include "i40e_debug.h"
++#include "i40e_io.h"
+ #include "i40e_prototype.h"
+-#include <linux/net/intel/i40e_client.h>
+-#include <linux/avf/virtchnl.h>
+-#include "i40e_virtchnl_pf.h"
++#include "i40e_register.h"
+ #include "i40e_txrx.h"
+-#include "i40e_dcb.h"
+
+ /* Useful i40e defaults */
+ #define I40E_MAX_VEB 16
+@@ -108,7 +81,7 @@
+ #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */
+
+ /* driver state flags */
+-enum i40e_state_t {
++enum i40e_state {
+ __I40E_TESTING,
+ __I40E_CONFIG_BUSY,
+ __I40E_CONFIG_DONE,
+@@ -156,7 +129,7 @@ enum i40e_state_t {
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
+
+ /* VSI state flags */
+-enum i40e_vsi_state_t {
++enum i40e_vsi_state {
+ __I40E_VSI_DOWN,
+ __I40E_VSI_NEEDS_RESTART,
+ __I40E_VSI_SYNCING_FILTERS,
+@@ -992,6 +965,7 @@ struct i40e_q_vector {
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[I40E_INT_NAME_STR_LEN];
+ bool arm_wb_state;
++ bool in_busy_poll;
+ int irq_num; /* IRQ assigned to this q_vector */
+ } ____cacheline_internodealigned_in_smp;
+
+@@ -1321,4 +1295,15 @@ static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+ return pf->flags & I40E_FLAG_TC_MQPRIO;
+ }
+
++/**
++ * i40e_hw_to_pf - get pf pointer from the hardware structure
++ * @hw: pointer to the device HW structure
++ **/
++static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw)
++{
++ return container_of(hw, struct i40e_pf, hw);
++}
++
++struct device *i40e_hw_to_dev(struct i40e_hw *hw);
++
+ #endif /* _I40E_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+index 100eb77b8dfe6b..9ce6e633cc2f01 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+@@ -1,9 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+-#include "i40e_type.h"
++#include <linux/delay.h>
++#include "i40e_alloc.h"
+ #include "i40e_register.h"
+-#include "i40e_adminq.h"
+ #include "i40e_prototype.h"
+
+ static void i40e_resume_aq(struct i40e_hw *hw);
+@@ -51,7 +51,6 @@ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+ int ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+- i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+@@ -78,7 +77,6 @@ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+ int ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+- i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+@@ -136,7 +134,6 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+- i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+@@ -198,7 +195,6 @@ static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+- i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+index 267f2e0a21ce88..290c23cec2fcaf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+@@ -4,7 +4,8 @@
+ #ifndef _I40E_ADMINQ_H_
+ #define _I40E_ADMINQ_H_
+
+-#include "i40e_osdep.h"
++#include <linux/mutex.h>
++#include "i40e_alloc.h"
+ #include "i40e_adminq_cmd.h"
+
+ #define I40E_ADMINQ_DESC(R, i) \
+@@ -115,10 +116,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+ -EFBIG, /* I40E_AQ_RC_EFBIG */
+ };
+
+- /* aq_rc is invalid if AQ timed out */
+- if (aq_ret == -EIO)
+- return -EAGAIN;
+-
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ return -ERANGE;
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+index 3357d65a906bf2..c8f35d4de271ad 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+@@ -4,6 +4,9 @@
+ #ifndef _I40E_ADMINQ_CMD_H_
+ #define _I40E_ADMINQ_CMD_H_
+
++#include <linux/bits.h>
++#include <linux/types.h>
++
+ /* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+index a6c9a9e343d114..e0dde326255d69 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+@@ -4,25 +4,25 @@
+ #ifndef _I40E_ALLOC_H_
+ #define _I40E_ALLOC_H_
+
++#include <linux/types.h>
++
+ struct i40e_hw;
+
+-/* Memory allocation types */
+-enum i40e_memory_type {
+- i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+- i40e_mem_asq_buf = 1,
+- i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+- i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+- i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+- i40e_mem_pd = 5, /* Page Descriptor */
+- i40e_mem_bp = 6, /* Backing Page - 4KB */
+- i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+- i40e_mem_reserved
++/* memory allocation tracking */
++struct i40e_dma_mem {
++ void *va;
++ dma_addr_t pa;
++ u32 size;
++};
++
++struct i40e_virt_mem {
++ void *va;
++ u32 size;
+ };
+
+ /* prototype for functions used for dynamic memory allocation */
+ int i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+- enum i40e_memory_type type,
+ u64 size, u32 alignment);
+ int i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index 639c5a1ca853b7..306758428aefd7 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -6,7 +6,6 @@
+ #include <linux/net/intel/i40e_client.h>
+
+ #include "i40e.h"
+-#include "i40e_prototype.h"
+
+ static LIST_HEAD(i40e_devices);
+ static DEFINE_MUTEX(i40e_device_mutex);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index 1b493854f52292..4d7caa11997199 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1,11 +1,15 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+-#include "i40e.h"
+-#include "i40e_type.h"
+-#include "i40e_adminq.h"
+-#include "i40e_prototype.h"
+ #include <linux/avf/virtchnl.h>
++#include <linux/bitfield.h>
++#include <linux/delay.h>
++#include <linux/etherdevice.h>
++#include <linux/pci.h>
++#include "i40e_adminq_cmd.h"
++#include "i40e_devids.h"
++#include "i40e_prototype.h"
++#include "i40e_register.h"
+
+ /**
+ * i40e_set_mac_type - Sets MAC type
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+index f81e744c0fb368..d57dd30b024fa9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+@@ -1,9 +1,11 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2021 Intel Corporation. */
+
++#include <linux/bitfield.h>
+ #include "i40e_adminq.h"
+-#include "i40e_prototype.h"
++#include "i40e_alloc.h"
+ #include "i40e_dcb.h"
++#include "i40e_prototype.h"
+
+ /**
+ * i40e_get_dcbx_status
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+index 195421d863ab1d..077a95dad32cff 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+@@ -2,8 +2,8 @@
+ /* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+ #ifdef CONFIG_I40E_DCB
+-#include "i40e.h"
+ #include <net/dcbnl.h>
++#include "i40e.h"
+
+ #define I40E_DCBNL_STATUS_SUCCESS 0
+ #define I40E_DCBNL_STATUS_ERROR 1
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+index 0e72abd178ae3f..21b3518c409681 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+@@ -1,9 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
++#include <linux/firmware.h>
+ #include "i40e.h"
+
+-#include <linux/firmware.h>
+
+ /**
+ * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h
+new file mode 100644
+index 00000000000000..27ebc72d8bfe5f
+--- /dev/null
++++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h
+@@ -0,0 +1,47 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Copyright(c) 2023 Intel Corporation. */
++
++#ifndef _I40E_DEBUG_H_
++#define _I40E_DEBUG_H_
++
++#include <linux/dev_printk.h>
++
++/* debug masks - set these bits in hw->debug_mask to control output */
++enum i40e_debug_mask {
++ I40E_DEBUG_INIT = 0x00000001,
++ I40E_DEBUG_RELEASE = 0x00000002,
++
++ I40E_DEBUG_LINK = 0x00000010,
++ I40E_DEBUG_PHY = 0x00000020,
++ I40E_DEBUG_HMC = 0x00000040,
++ I40E_DEBUG_NVM = 0x00000080,
++ I40E_DEBUG_LAN = 0x00000100,
++ I40E_DEBUG_FLOW = 0x00000200,
++ I40E_DEBUG_DCB = 0x00000400,
++ I40E_DEBUG_DIAG = 0x00000800,
++ I40E_DEBUG_FD = 0x00001000,
++ I40E_DEBUG_PACKAGE = 0x00002000,
++ I40E_DEBUG_IWARP = 0x00F00000,
++ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
++ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
++ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
++ I40E_DEBUG_AQ_COMMAND = 0x06000000,
++ I40E_DEBUG_AQ = 0x0F000000,
++
++ I40E_DEBUG_USER = 0xF0000000,
++
++ I40E_DEBUG_ALL = 0xFFFFFFFF
++};
++
++struct i40e_hw;
++struct device *i40e_hw_to_dev(struct i40e_hw *hw);
++
++#define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A)
++
++#define i40e_debug(h, m, s, ...) \
++do { \
++ if (((m) & (h)->debug_mask)) \
++ dev_info(i40e_hw_to_dev(hw), s, ##__VA_ARGS__); \
++} while (0)
++
++#endif /* _I40E_DEBUG_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+index 1a497cb0771007..999c9708def533 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+@@ -5,8 +5,9 @@
+
+ #include <linux/fs.h>
+ #include <linux/debugfs.h>
+-
++#include <linux/if_bridge.h>
+ #include "i40e.h"
++#include "i40e_virtchnl_pf.h"
+
+ static struct dentry *i40e_dbg_root;
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+index c3ce5f35211f03..ab20202a3da3ca 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+@@ -4,7 +4,11 @@
+ #ifndef _I40E_DIAG_H_
+ #define _I40E_DIAG_H_
+
+-#include "i40e_type.h"
++#include <linux/types.h>
++#include "i40e_adminq_cmd.h"
++
++/* forward-declare the HW struct for the compiler */
++struct i40e_hw;
+
+ enum i40e_lb_mode {
+ I40E_LB_MODE_NONE = 0x0,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index bd1321bf7e2681..4e90570ba7803a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -3,9 +3,10 @@
+
+ /* ethtool support for i40e */
+
+-#include "i40e.h"
++#include "i40e_devids.h"
+ #include "i40e_diag.h"
+ #include "i40e_txrx_common.h"
++#include "i40e_virtchnl_pf.h"
+
+ /* ethtool statistics helpers */
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+index 96ee63aca7a10a..1742624ca62edf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+@@ -1,10 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+-#include "i40e.h"
+-#include "i40e_osdep.h"
+-#include "i40e_register.h"
+ #include "i40e_alloc.h"
++#include "i40e_debug.h"
+ #include "i40e_hmc.h"
+ #include "i40e_type.h"
+
+@@ -22,7 +20,6 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+ {
+- enum i40e_memory_type mem_type __attribute__((unused));
+ struct i40e_hmc_sd_entry *sd_entry;
+ bool dma_mem_alloc_done = false;
+ struct i40e_dma_mem mem;
+@@ -43,16 +40,13 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw,
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+- if (I40E_SD_TYPE_PAGED == type) {
+- mem_type = i40e_mem_pd;
++ if (type == I40E_SD_TYPE_PAGED)
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+- } else {
+- mem_type = i40e_mem_bp_jumbo;
++ else
+ alloc_len = direct_mode_sz;
+- }
+
+ /* allocate a 4K pd page or 2M backing page */
+- ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
++ ret_code = i40e_allocate_dma_mem(hw, &mem, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+@@ -140,7 +134,7 @@ int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+- ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
++ ret_code = i40e_allocate_dma_mem(hw, page,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+index 9960da07a57327..480e3a883cc7a1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+@@ -4,6 +4,10 @@
+ #ifndef _I40E_HMC_H_
+ #define _I40E_HMC_H_
+
++#include "i40e_alloc.h"
++#include "i40e_io.h"
++#include "i40e_register.h"
++
+ #define I40E_HMC_MAX_BP_COUNT 512
+
+ /* forward-declare the HW struct for the compiler */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_io.h b/drivers/net/ethernet/intel/i40e/i40e_io.h
+new file mode 100644
+index 00000000000000..2a2ed9a1d476b5
+--- /dev/null
++++ b/drivers/net/ethernet/intel/i40e/i40e_io.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Copyright(c) 2023 Intel Corporation. */
++
++#ifndef _I40E_IO_H_
++#define _I40E_IO_H_
++
++/* get readq/writeq support for 32 bit kernels, use the low-first version */
++#include <linux/io-64-nonatomic-lo-hi.h>
++
++#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
++#define rd32(a, reg) readl((a)->hw_addr + (reg))
++
++#define rd64(a, reg) readq((a)->hw_addr + (reg))
++#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
++
++#endif /* _I40E_IO_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+index 474365bf064804..beaaf5c309d510 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+@@ -1,13 +1,10 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+-#include "i40e.h"
+-#include "i40e_osdep.h"
+-#include "i40e_register.h"
+-#include "i40e_type.h"
+-#include "i40e_hmc.h"
++#include "i40e_alloc.h"
++#include "i40e_debug.h"
+ #include "i40e_lan_hmc.h"
+-#include "i40e_prototype.h"
++#include "i40e_type.h"
+
+ /* lan specific interface functions */
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+index 9f960404c2b379..305a276953b019 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+@@ -4,6 +4,8 @@
+ #ifndef _I40E_LAN_HMC_H_
+ #define _I40E_LAN_HMC_H_
+
++#include "i40e_hmc.h"
++
+ /* forward-declare the HW struct for the compiler */
+ struct i40e_hw;
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index de7fd43dc11c8d..80472aa1deba4e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1,19 +1,22 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2021 Intel Corporation. */
+
+-#include <linux/etherdevice.h>
+-#include <linux/of_net.h>
+-#include <linux/pci.h>
+-#include <linux/bpf.h>
+ #include <generated/utsrelease.h>
+ #include <linux/crash_dump.h>
++#include <linux/if_bridge.h>
++#include <linux/if_macvlan.h>
++#include <linux/module.h>
++#include <net/pkt_cls.h>
++#include <net/xdp_sock_drv.h>
+
+ /* Local includes */
+ #include "i40e.h"
++#include "i40e_devids.h"
+ #include "i40e_diag.h"
++#include "i40e_lan_hmc.h"
++#include "i40e_virtchnl_pf.h"
+ #include "i40e_xsk.h"
+-#include <net/udp_tunnel.h>
+-#include <net/xdp_sock_drv.h>
++
+ /* All i40e tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+@@ -104,12 +107,18 @@ static struct workqueue_struct *i40e_wq;
+ static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+ struct net_device *netdev, int delta)
+ {
++ struct netdev_hw_addr_list *ha_list;
+ struct netdev_hw_addr *ha;
+
+ if (!f || !netdev)
+ return;
+
+- netdev_for_each_mc_addr(ha, netdev) {
++ if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
++ ha_list = &netdev->uc;
++ else
++ ha_list = &netdev->mc;
++
++ netdev_hw_addr_list_for_each(ha, ha_list) {
+ if (ether_addr_equal(ha->addr, f->macaddr)) {
+ ha->refcount += delta;
+ if (ha->refcount <= 0)
+@@ -120,16 +129,27 @@ static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+ }
+
+ /**
+- * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
++ * i40e_hw_to_dev - get device pointer from the hardware structure
++ * @hw: pointer to the device HW structure
++ **/
++struct device *i40e_hw_to_dev(struct i40e_hw *hw)
++{
++ struct i40e_pf *pf = i40e_hw_to_pf(hw);
++
++ return &pf->pdev->dev;
++}
++
++/**
++ * i40e_allocate_dma_mem - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+-int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+- u64 size, u32 alignment)
++int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
++ u64 size, u32 alignment)
+ {
+- struct i40e_pf *pf = (struct i40e_pf *)hw->back;
++ struct i40e_pf *pf = i40e_hw_to_pf(hw);
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
+@@ -141,13 +161,13 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ }
+
+ /**
+- * i40e_free_dma_mem_d - OS specific memory free for shared code
++ * i40e_free_dma_mem - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+-int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
++int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+ {
+- struct i40e_pf *pf = (struct i40e_pf *)hw->back;
++ struct i40e_pf *pf = i40e_hw_to_pf(hw);
+
+ dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
+ mem->va = NULL;
+@@ -158,13 +178,13 @@ int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+ }
+
+ /**
+- * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
++ * i40e_allocate_virt_mem - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+-int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+- u32 size)
++int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem,
++ u32 size)
+ {
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+@@ -176,11 +196,11 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+ }
+
+ /**
+- * i40e_free_virt_mem_d - OS specific memory free for shared code
++ * i40e_free_virt_mem - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+-int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
++int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+ {
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+@@ -1243,8 +1263,11 @@ int i40e_count_filters(struct i40e_vsi *vsi)
+ int bkt;
+ int cnt = 0;
+
+- hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+- ++cnt;
++ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
++ if (f->state == I40E_FILTER_NEW ||
++ f->state == I40E_FILTER_ACTIVE)
++ ++cnt;
++ }
+
+ return cnt;
+ }
+@@ -1721,6 +1744,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ struct hlist_node *h;
+ int bkt;
+
++ lockdep_assert_held(&vsi->mac_filter_hash_lock);
+ if (vsi->info.pvid)
+ return i40e_add_filter(vsi, macaddr,
+ le16_to_cpu(vsi->info.pvid));
+@@ -3572,40 +3596,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ struct i40e_hmc_obj_rxq rx_ctx;
+ int err = 0;
+ bool ok;
+- int ret;
+
+ bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+- if (ring->vsi->type == I40E_VSI_MAIN)
+- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
++ ring->rx_buf_len = vsi->rx_buf_len;
++
++ /* XDP RX-queue info only needed for RX rings exposed to XDP */
++ if (ring->vsi->type != I40E_VSI_MAIN)
++ goto skip;
++
++ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->queue_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
++ }
+
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- ring->rx_buf_len =
+- xsk_pool_get_rx_frame_size(ring->xsk_pool);
+- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
++ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->queue_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
++ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+- if (ret)
+- return ret;
++ if (err)
++ return err;
+ dev_info(&vsi->back->pdev->dev,
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->queue_index);
+
+ } else {
+- ring->rx_buf_len = vsi->rx_buf_len;
+- if (ring->vsi->type == I40E_VSI_MAIN) {
+- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+- MEM_TYPE_PAGE_SHARED,
+- NULL);
+- if (ret)
+- return ret;
+- }
++ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
++ MEM_TYPE_PAGE_SHARED,
++ NULL);
++ if (err)
++ return err;
+ }
+
++skip:
+ xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+@@ -3884,6 +3923,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+ q_vector->tx.target_itr >> 1);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+
++ /* Set ITR for software interrupts triggered after exiting
++ * busy-loop polling.
++ */
++ wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1),
++ I40E_ITR_20K);
++
+ wr32(hw, I40E_PFINT_RATEN(vector - 1),
+ i40e_intrl_usec_to_reg(vsi->int_rate_limit));
+
+@@ -5330,7 +5375,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
+ {
+ int v, ret = 0;
+
+- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
++ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v]) {
+ ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
+ if (ret)
+@@ -13328,6 +13373,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ bool need_reset;
+ int i;
+
++ /* VSI shall be deleted in a moment, block loading new programs */
++ if (prog && test_bit(__I40E_IN_REMOVE, pf->state))
++ return -EINVAL;
++
+ /* Don't allow frames that span over multiple buffers */
+ if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
+@@ -13336,14 +13385,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+
+ /* When turning XDP on->off/off->on we reset and rebuild the rings. */
+ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
+-
+ if (need_reset)
+ i40e_prep_for_reset(pf);
+
+- /* VSI shall be deleted in a moment, just return EINVAL */
+- if (test_bit(__I40E_IN_REMOVE, pf->state))
+- return -EINVAL;
+-
+ old_prog = xchg(&vsi->xdp_prog, prog);
+
+ if (need_reset) {
+@@ -13596,9 +13640,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
+ return err;
+
+ i40e_queue_pair_disable_irq(vsi, queue_pair);
++ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
+- i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
+ i40e_queue_pair_clean_rings(vsi, queue_pair);
+ i40e_queue_pair_reset_stats(vsi, queue_pair);
+
+@@ -15623,10 +15667,10 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+ **/
+ static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
+ {
+- struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
++ struct i40e_pf *pf = i40e_hw_to_pf(hw);
+
+- hw->subsystem_device_id = pdev->subsystem_device ?
+- pdev->subsystem_device :
++ hw->subsystem_device_id = pf->pdev->subsystem_device ?
++ pf->pdev->subsystem_device :
+ (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
+ }
+
+@@ -15696,7 +15740,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ set_bit(__I40E_DOWN, pf->state);
+
+ hw = &pf->hw;
+- hw->back = pf;
+
+ pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
+ I40E_MAX_CSR_SPACE);
+@@ -16194,8 +16237,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
+ I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ if (val < MAX_FRAME_SIZE_DEFAULT)
+- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+- i, val);
++ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
++ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
+
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+@@ -16320,11 +16363,15 @@ static void i40e_remove(struct pci_dev *pdev)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+- /* Now we can shutdown the PF's VSI, just before we kill
++ /* Now we can shutdown the PF's VSIs, just before we kill
+ * adminq and hmc.
+ */
+- if (pf->vsi[pf->lan_vsi])
+- i40e_vsi_release(pf->vsi[pf->lan_vsi]);
++ for (i = pf->num_alloc_vsi; i--;)
++ if (pf->vsi[i]) {
++ i40e_vsi_close(pf->vsi[i]);
++ i40e_vsi_release(pf->vsi[i]);
++ pf->vsi[i] = NULL;
++ }
+
+ i40e_cloud_filter_exit(pf);
+
+@@ -16475,6 +16522,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
+ return;
+
+ i40e_reset_and_rebuild(pf, false, false);
++#ifdef CONFIG_PCI_IOV
++ i40e_restore_all_vfs_msi_state(pdev);
++#endif /* CONFIG_PCI_IOV */
+ }
+
+ /**
+@@ -16728,7 +16778,7 @@ static int __init i40e_init_module(void)
+ * since we need to be able to guarantee forward progress even under
+ * memory pressure.
+ */
+- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
++ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
+ if (!i40e_wq) {
+ pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index 07a46adeab38e5..e5aec09d58e27e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -1,6 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
++#include <linux/bitfield.h>
++#include <linux/delay.h>
++#include "i40e_alloc.h"
+ #include "i40e_prototype.h"
+
+ /**
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+deleted file mode 100644
+index 2bd4de03dafa2b..00000000000000
+--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
++++ /dev/null
+@@ -1,59 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+-
+-#ifndef _I40E_OSDEP_H_
+-#define _I40E_OSDEP_H_
+-
+-#include <linux/types.h>
+-#include <linux/if_ether.h>
+-#include <linux/if_vlan.h>
+-#include <linux/tcp.h>
+-#include <linux/pci.h>
+-#include <linux/highuid.h>
+-
+-/* get readq/writeq support for 32 bit kernels, use the low-first version */
+-#include <linux/io-64-nonatomic-lo-hi.h>
+-
+-/* File to be the magic between shared code and
+- * actual OS primitives
+- */
+-
+-#define hw_dbg(hw, S, A...) \
+-do { \
+- dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \
+-} while (0)
+-
+-#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+-#define rd32(a, reg) readl((a)->hw_addr + (reg))
+-
+-#define rd64(a, reg) readq((a)->hw_addr + (reg))
+-#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
+-
+-/* memory allocation tracking */
+-struct i40e_dma_mem {
+- void *va;
+- dma_addr_t pa;
+- u32 size;
+-};
+-
+-#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+- i40e_allocate_dma_mem_d(h, m, s, a)
+-#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+-
+-struct i40e_virt_mem {
+- void *va;
+- u32 size;
+-};
+-
+-#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+-#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+-
+-#define i40e_debug(h, m, s, ...) \
+-do { \
+- if (((m) & (h)->debug_mask)) \
+- pr_info("i40e %02x:%02x.%x " s, \
+- (h)->bus.bus_id, (h)->bus.device, \
+- (h)->bus.func, ##__VA_ARGS__); \
+-} while (0)
+-
+-#endif /* _I40E_OSDEP_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+index 3eeee224f1fb29..2001fefa0c52d6 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+@@ -4,9 +4,9 @@
+ #ifndef _I40E_PROTOTYPE_H_
+ #define _I40E_PROTOTYPE_H_
+
+-#include "i40e_type.h"
+-#include "i40e_alloc.h"
+ #include <linux/avf/virtchnl.h>
++#include "i40e_debug.h"
++#include "i40e_type.h"
+
+ /* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+index 8a26811140b479..65c714d0bfffd1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+@@ -1,9 +1,10 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+-#include "i40e.h"
+ #include <linux/ptp_classify.h>
+ #include <linux/posix-clock.h>
++#include "i40e.h"
++#include "i40e_devids.h"
+
+ /* The XL710 timesync is very much like Intel's 82599 design when it comes to
+ * the fundamental clock design. However, the clock operations are much simpler
+@@ -34,7 +35,7 @@ enum i40e_ptp_pin {
+ GPIO_4
+ };
+
+-enum i40e_can_set_pins_t {
++enum i40e_can_set_pins {
+ CANT_DO_PINS = -1,
+ CAN_SET_PINS,
+ CAN_DO_PINS
+@@ -192,7 +193,7 @@ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw)
+ * return CAN_DO_PINS if pins can be manipulated within a NIC or
+ * return CANT_DO_PINS otherwise.
+ **/
+-static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf)
++static enum i40e_can_set_pins i40e_can_set_pins(struct i40e_pf *pf)
+ {
+ if (!i40e_is_ptp_pin_dev(&pf->hw)) {
+ dev_warn(&pf->pdev->dev,
+@@ -1070,7 +1071,7 @@ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
+ static int i40e_ptp_set_pins(struct i40e_pf *pf,
+ struct i40e_ptp_pins_settings *pins)
+ {
+- enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf);
++ enum i40e_can_set_pins pin_caps = i40e_can_set_pins(pf);
+ int i = 0;
+
+ if (pin_caps == CANT_DO_PINS)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
+index 7339003aa17cd3..d3c82ba3835a78 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
+@@ -202,7 +202,9 @@
+ #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+ #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+ #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
++#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT)
+ #define I40E_GLGEN_MSCA_STCODE_SHIFT 28
++#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT)
+ #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+ #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+ #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+@@ -328,8 +330,11 @@
+ #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+ #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+ #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
++#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+ #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+ #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
++#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
++#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+ #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+ #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+ #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index b047c587629b67..c962987d8b51bb 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1,14 +1,13 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+-#include <linux/prefetch.h>
+ #include <linux/bpf_trace.h>
++#include <linux/prefetch.h>
++#include <linux/sctp.h>
+ #include <net/mpls.h>
+ #include <net/xdp.h>
+-#include "i40e.h"
+-#include "i40e_trace.h"
+-#include "i40e_prototype.h"
+ #include "i40e_txrx_common.h"
++#include "i40e_trace.h"
+ #include "i40e_xsk.h"
+
+ #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+@@ -1556,7 +1555,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+ {
+ struct device *dev = rx_ring->dev;
+- int err;
+
+ u64_stats_init(&rx_ring->syncp);
+
+@@ -1577,14 +1575,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+ rx_ring->next_to_process = 0;
+ rx_ring->next_to_use = 0;
+
+- /* XDP RX-queue info only needed for RX rings exposed to XDP */
+- if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
+- if (err < 0)
+- return err;
+- }
+-
+ rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+
+ rx_ring->rx_bi =
+@@ -2100,7 +2090,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
+ struct xdp_buff *xdp)
+ {
+- u32 next = rx_ring->next_to_clean;
++ u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
++ u32 next = rx_ring->next_to_clean, i = 0;
+ struct i40e_rx_buffer *rx_buffer;
+
+ xdp->flags = 0;
+@@ -2113,10 +2104,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
+ if (!rx_buffer->page)
+ continue;
+
+- if (xdp_res == I40E_XDP_CONSUMED)
+- rx_buffer->pagecnt_bias++;
+- else
++ if (xdp_res != I40E_XDP_CONSUMED)
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
++ else if (i++ <= nr_frags)
++ rx_buffer->pagecnt_bias++;
+
+ /* EOP buffer will be put in i40e_clean_rx_irq() */
+ if (next == rx_ring->next_to_process)
+@@ -2130,20 +2121,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
+ * i40e_construct_skb - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @xdp: xdp_buff pointing to the data
+- * @nr_frags: number of buffers for the packet
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+- struct xdp_buff *xdp,
+- u32 nr_frags)
++ struct xdp_buff *xdp)
+ {
+ unsigned int size = xdp->data_end - xdp->data;
+ struct i40e_rx_buffer *rx_buffer;
++ struct skb_shared_info *sinfo;
+ unsigned int headlen;
+ struct sk_buff *skb;
++ u32 nr_frags = 0;
+
+ /* prefetch first cache line of first page */
+ net_prefetch(xdp->data);
+@@ -2181,6 +2172,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ memcpy(__skb_put(skb, headlen), xdp->data,
+ ALIGN(headlen, sizeof(long)));
+
++ if (unlikely(xdp_buff_has_frags(xdp))) {
++ sinfo = xdp_get_shared_info_from_buff(xdp);
++ nr_frags = sinfo->nr_frags;
++ }
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ /* update all of the pointers */
+ size -= headlen;
+@@ -2200,9 +2195,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ }
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+- struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
++ struct skb_shared_info *skinfo = skb_shinfo(skb);
+
+- sinfo = xdp_get_shared_info_from_buff(xdp);
+ memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
+ sizeof(skb_frag_t) * nr_frags);
+
+@@ -2225,17 +2219,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @xdp: xdp_buff pointing to the data
+- * @nr_frags: number of buffers for the packet
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+- struct xdp_buff *xdp,
+- u32 nr_frags)
++ struct xdp_buff *xdp)
+ {
+ unsigned int metasize = xdp->data - xdp->data_meta;
++ struct skb_shared_info *sinfo;
+ struct sk_buff *skb;
++ u32 nr_frags;
+
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+@@ -2244,6 +2238,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ */
+ net_prefetch(xdp->data_meta);
+
++ if (unlikely(xdp_buff_has_frags(xdp))) {
++ sinfo = xdp_get_shared_info_from_buff(xdp);
++ nr_frags = sinfo->nr_frags;
++ }
++
+ /* build an skb around the page buffer */
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
+ if (unlikely(!skb))
+@@ -2256,9 +2255,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ skb_metadata_set(skb, metasize);
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+- struct skb_shared_info *sinfo;
+-
+- sinfo = xdp_get_shared_info_from_buff(xdp);
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+@@ -2603,9 +2599,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
+ total_rx_bytes += size;
+ } else {
+ if (ring_uses_build_skb(rx_ring))
+- skb = i40e_build_skb(rx_ring, xdp, nfrags);
++ skb = i40e_build_skb(rx_ring, xdp);
+ else
+- skb = i40e_construct_skb(rx_ring, xdp, nfrags);
++ skb = i40e_construct_skb(rx_ring, xdp);
+
+ /* drop if we failed to retrieve a buffer */
+ if (!skb) {
+@@ -2647,7 +2643,22 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
+ return failure ? budget : (int)total_rx_packets;
+ }
+
+-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
++/**
++ * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
++ * @itr_idx: interrupt throttling index
++ * @interval: interrupt throttling interval value in usecs
++ * @force_swint: force software interrupt
++ *
++ * The function builds a value for I40E_PFINT_DYN_CTLN register that
++ * is used to update interrupt throttling interval for specified ITR index
++ * and optionally enforces a software interrupt. If the @itr_idx is equal
++ * to I40E_ITR_NONE then no interval change is applied and only @force_swint
++ * parameter is taken into account. If the interval change and enforced
++ * software interrupt are not requested then the built value just enables
++ * appropriate vector interrupt.
++ **/
++static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
++ bool force_swint)
+ {
+ u32 val;
+
+@@ -2661,23 +2672,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+ * an event in the PBA anyway so we need to rely on the automask
+ * to hold pending events for us until the interrupt is re-enabled
+ *
+- * The itr value is reported in microseconds, and the register
+- * value is recorded in 2 microsecond units. For this reason we
+- * only need to shift by the interval shift - 1 instead of the
+- * full value.
++ * We have to shift the given value as it is reported in microseconds
++ * and the register value is recorded in 2 microsecond units.
+ */
+- itr &= I40E_ITR_MASK;
++ interval >>= 1;
+
++ /* 1. Enable vector interrupt
++ * 2. Update the interval for the specified ITR index
++ * (I40E_ITR_NONE in the register is used to indicate that
++ * no interval update is requested)
++ */
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+- (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+- (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
++ FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
++ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
++
++ /* 3. Enforce software interrupt trigger if requested
++ * (These software interrupts rate is limited by ITR2 that is
++ * set to 20K interrupts per second)
++ */
++ if (force_swint)
++ val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
++ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
++ FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
++ I40E_SW_ITR);
+
+ return val;
+ }
+
+-/* a small macro to shorten up some long lines */
+-#define INTREG I40E_PFINT_DYN_CTLN
+-
+ /* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+@@ -2696,8 +2717,10 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+ {
++ enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
+ struct i40e_hw *hw = &vsi->back->hw;
+- u32 intval;
++ u16 interval = 0;
++ u32 itr_val;
+
+ /* If we don't have MSIX, then we only need to re-enable icr0 */
+ if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
+@@ -2719,8 +2742,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ */
+ if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+ /* Rx ITR needs to be reduced, this is highest priority */
+- intval = i40e_buildreg_itr(I40E_RX_ITR,
+- q_vector->rx.target_itr);
++ itr_idx = I40E_RX_ITR;
++ interval = q_vector->rx.target_itr;
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+@@ -2729,25 +2752,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ /* Tx ITR needs to be reduced, this is second priority
+ * Tx ITR needs to be increased more than Rx, fourth priority
+ */
+- intval = i40e_buildreg_itr(I40E_TX_ITR,
+- q_vector->tx.target_itr);
++ itr_idx = I40E_TX_ITR;
++ interval = q_vector->tx.target_itr;
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+ /* Rx ITR needs to be increased, third priority */
+- intval = i40e_buildreg_itr(I40E_RX_ITR,
+- q_vector->rx.target_itr);
++ itr_idx = I40E_RX_ITR;
++ interval = q_vector->rx.target_itr;
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else {
+ /* No ITR update, lowest priority */
+- intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
+ }
+
+- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
+- wr32(hw, INTREG(q_vector->reg_idx), intval);
++ /* Do not update interrupt control register if VSI is down */
++ if (test_bit(__I40E_VSI_DOWN, vsi->state))
++ return;
++
++ /* Update ITR interval if necessary and enforce software interrupt
++ * if we are exiting busy poll.
++ */
++ if (q_vector->in_busy_poll) {
++ itr_val = i40e_buildreg_itr(itr_idx, interval, true);
++ q_vector->in_busy_poll = false;
++ } else {
++ itr_val = i40e_buildreg_itr(itr_idx, interval, false);
++ }
++ wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
+ }
+
+ /**
+@@ -2862,6 +2896,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ i40e_update_enable_itr(vsi, q_vector);
++ else
++ q_vector->in_busy_poll = true;
+
+ return min(work_done, budget - 1);
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+index 900b0d9ede9f51..2b1d50873a4d16 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+@@ -5,6 +5,7 @@
+ #define _I40E_TXRX_H_
+
+ #include <net/xdp.h>
++#include "i40e_type.h"
+
+ /* Interrupt Throttling and Rate Limiting Goodies */
+ #define I40E_DEFAULT_IRQ_WORK 256
+@@ -57,7 +58,7 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl)
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+-enum i40e_dyn_idx_t {
++enum i40e_dyn_idx {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+@@ -67,6 +68,7 @@ enum i40e_dyn_idx_t {
+ /* these are indexes into ITRN registers */
+ #define I40E_RX_ITR I40E_IDX_ITR0
+ #define I40E_TX_ITR I40E_IDX_ITR1
++#define I40E_SW_ITR I40E_IDX_ITR2
+
+ /* Supported RSS offloads */
+ #define I40E_DEFAULT_RSS_HENA ( \
+@@ -305,7 +307,7 @@ struct i40e_rx_queue_stats {
+ u64 page_busy_count;
+ };
+
+-enum i40e_ring_state_t {
++enum i40e_ring_state {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_RING_STATE_NBITS /* must be last */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+index 8c5118c8baafb1..e26807fd212327 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+@@ -4,6 +4,8 @@
+ #ifndef I40E_TXRX_COMMON_
+ #define I40E_TXRX_COMMON_
+
++#include "i40e.h"
++
+ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
+ void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
+index 232131bedc3e79..6e7cb2081ab37e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
+@@ -4,12 +4,9 @@
+ #ifndef _I40E_TYPE_H_
+ #define _I40E_TYPE_H_
+
+-#include "i40e_osdep.h"
+-#include "i40e_register.h"
++#include <uapi/linux/if_ether.h>
+ #include "i40e_adminq.h"
+ #include "i40e_hmc.h"
+-#include "i40e_lan_hmc.h"
+-#include "i40e_devids.h"
+
+ /* I40E_MASK is a macro used on 32 bit registers */
+ #define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
+@@ -43,48 +40,14 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+ #define I40E_QTX_CTL_VM_QUEUE 0x1
+ #define I40E_QTX_CTL_PF_QUEUE 0x2
+
+-/* debug masks - set these bits in hw->debug_mask to control output */
+-enum i40e_debug_mask {
+- I40E_DEBUG_INIT = 0x00000001,
+- I40E_DEBUG_RELEASE = 0x00000002,
+-
+- I40E_DEBUG_LINK = 0x00000010,
+- I40E_DEBUG_PHY = 0x00000020,
+- I40E_DEBUG_HMC = 0x00000040,
+- I40E_DEBUG_NVM = 0x00000080,
+- I40E_DEBUG_LAN = 0x00000100,
+- I40E_DEBUG_FLOW = 0x00000200,
+- I40E_DEBUG_DCB = 0x00000400,
+- I40E_DEBUG_DIAG = 0x00000800,
+- I40E_DEBUG_FD = 0x00001000,
+- I40E_DEBUG_PACKAGE = 0x00002000,
+- I40E_DEBUG_IWARP = 0x00F00000,
+- I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+- I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+- I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+- I40E_DEBUG_AQ_COMMAND = 0x06000000,
+- I40E_DEBUG_AQ = 0x0F000000,
+-
+- I40E_DEBUG_USER = 0xF0000000,
+-
+- I40E_DEBUG_ALL = 0xFFFFFFFF
+-};
+-
+-#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
+- I40E_GLGEN_MSCA_STCODE_SHIFT)
+-#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
+- I40E_GLGEN_MSCA_OPCODE_SHIFT)
+-#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
+- I40E_GLGEN_MSCA_OPCODE_SHIFT)
+-
+-#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+- I40E_GLGEN_MSCA_STCODE_SHIFT)
+-#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+- I40E_GLGEN_MSCA_OPCODE_SHIFT)
+-#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
+- I40E_GLGEN_MSCA_OPCODE_SHIFT)
+-#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
+- I40E_GLGEN_MSCA_OPCODE_SHIFT)
++#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1)
++#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
++#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2)
++
++#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0)
++#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0)
++#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
++#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3)
+
+ #define I40E_PHY_COM_REG_PAGE 0x1E
+ #define I40E_PHY_LED_LINK_MODE_MASK 0xF0
+@@ -525,7 +488,6 @@ struct i40e_dcbx_config {
+ /* Port hardware description */
+ struct i40e_hw {
+ u8 __iomem *hw_addr;
+- void *back;
+
+ /* subsystem structs */
+ struct i40e_phy_info phy;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index d3d6415553ed67..d5509bc16d0d57 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2,6 +2,8 @@
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+ #include "i40e.h"
++#include "i40e_lan_hmc.h"
++#include "i40e_virtchnl_pf.h"
+
+ /*********************notification routines***********************/
+
+@@ -152,6 +154,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+ }
+
++#ifdef CONFIG_PCI_IOV
++void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
++{
++ u16 vf_id;
++ u16 pos;
++
++ /* Continue only if this is a PF */
++ if (!pdev->is_physfn)
++ return;
++
++ if (!pci_num_vf(pdev))
++ return;
++
++ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
++ if (pos) {
++ struct pci_dev *vf_dev = NULL;
++
++ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
++ while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
++ if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
++ pci_restore_msi_state(vf_dev);
++ }
++ }
++}
++#endif /* CONFIG_PCI_IOV */
++
+ /**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the VF structure
+@@ -1602,8 +1630,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ {
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+- int i, v;
+ u32 reg;
++ int i;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!pf->num_alloc_vfs)
+@@ -1614,11 +1642,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ return false;
+
+ /* Begin reset on all VFs at once */
+- for (v = 0; v < pf->num_alloc_vfs; v++) {
+- vf = &pf->vf[v];
++ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
+ /* If VF is being reset no need to trigger reset again */
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+- i40e_trigger_vf_reset(&pf->vf[v], flr);
++ i40e_trigger_vf_reset(vf, flr);
+ }
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+@@ -1627,14 +1654,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ * the VFs using a simple iterator that increments once that VF has
+ * finished resetting.
+ */
+- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
++ for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
+ usleep_range(10000, 20000);
+
+ /* Check each VF in sequence, beginning with the VF to fail
+ * the previous check.
+ */
+- while (v < pf->num_alloc_vfs) {
+- vf = &pf->vf[v];
++ while (vf < &pf->vf[pf->num_alloc_vfs]) {
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+@@ -1644,7 +1670,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+ */
+- v++;
++ ++vf;
+ }
+ }
+
+@@ -1654,39 +1680,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ /* Display a warning if at least one VF didn't manage to reset in
+ * time, but continue on with the operation.
+ */
+- if (v < pf->num_alloc_vfs)
++ if (vf < &pf->vf[pf->num_alloc_vfs])
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+- pf->vf[v].vf_id);
++ vf->vf_id);
+ usleep_range(10000, 20000);
+
+ /* Begin disabling all the rings associated with VFs, but do not wait
+ * between each VF.
+ */
+- for (v = 0; v < pf->num_alloc_vfs; v++) {
++ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
+ /* On initial reset, we don't have any queues to disable */
+- if (pf->vf[v].lan_vsi_idx == 0)
++ if (vf->lan_vsi_idx == 0)
+ continue;
+
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
+- i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
++ i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
+ }
+
+ /* Now that we've notified HW to disable all of the VF rings, wait
+ * until they finish.
+ */
+- for (v = 0; v < pf->num_alloc_vfs; v++) {
++ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
+ /* On initial reset, we don't have any queues to disable */
+- if (pf->vf[v].lan_vsi_idx == 0)
++ if (vf->lan_vsi_idx == 0)
+ continue;
+
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
+- i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
++ i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
+ }
+
+ /* Hw may need up to 50ms to finish disabling the RX queues. We
+@@ -1695,12 +1721,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+ mdelay(50);
+
+ /* Finish the reset on each VF */
+- for (v = 0; v < pf->num_alloc_vfs; v++) {
++ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
+- i40e_cleanup_reset_vf(&pf->vf[v]);
++ i40e_cleanup_reset_vf(vf);
+ }
+
+ i40e_flush(hw);
+@@ -2193,8 +2219,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+ vfres->vsi_res[0].qset_handle
+ = le16_to_cpu(vsi->info.qs_handle[0]);
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
++ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+ eth_zero_addr(vf->default_lan_addr.addr);
++ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+ vf->default_lan_addr.addr);
+@@ -2579,6 +2607,14 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ int aq_ret = 0;
+ int i;
+
++ if (vf->is_disabled_from_host) {
++ aq_ret = -EPERM;
++ dev_info(&pf->pdev->dev,
++ "Admin has disabled VF %d, will not enable queues\n",
++ vf->vf_id);
++ goto error_param;
++ }
++
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = -EINVAL;
+ goto error_param;
+@@ -2814,6 +2850,24 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+ (u8 *)&stats, sizeof(stats));
+ }
+
++/**
++ * i40e_can_vf_change_mac
++ * @vf: pointer to the VF info
++ *
++ * Return true if the VF is allowed to change its MAC filters, false otherwise
++ */
++static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
++{
++ /* If the VF MAC address has been set administratively (via the
++ * ndo_set_vf_mac command), then deny permission to the VF to
++ * add/delete unicast MAC addresses, unless the VF is trusted
++ */
++ if (vf->pf_set_mac && !vf->trusted)
++ return false;
++
++ return true;
++}
++
+ #define I40E_MAX_MACVLAN_PER_HW 3072
+ #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
+ (num_ports))
+@@ -2873,8 +2927,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
+ * The VF may request to set the MAC address filter already
+ * assigned to it so do not return an error in that case.
+ */
+- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+- !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
++ if (!i40e_can_vf_change_mac(vf) &&
++ !is_multicast_ether_addr(addr) &&
+ !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ dev_err(&pf->pdev->dev,
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+@@ -3080,19 +3134,30 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ ret = -EINVAL;
+ goto error_param;
+ }
+- if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+- was_unimac_deleted = true;
+ }
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ /* delete addresses from the list */
+- for (i = 0; i < al->num_elements; i++)
++ for (i = 0; i < al->num_elements; i++) {
++ const u8 *addr = al->list[i].addr;
++
++ /* Allow to delete VF primary MAC only if it was not set
++ * administratively by PF or if VF is trusted.
++ */
++ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
++ if (i40e_can_vf_change_mac(vf))
++ was_unimac_deleted = true;
++ else
++ continue;
++ }
++
+ if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
+ ret = -EINVAL;
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ }
++ }
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+@@ -3519,16 +3584,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
+ bool found = false;
+ int bkt;
+
+- if (!tc_filter->action) {
++ if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
+ dev_info(&pf->pdev->dev,
+- "VF %d: Currently ADq doesn't support Drop Action\n",
+- vf->vf_id);
++ "VF %d: ADQ doesn't support this action (%d)\n",
++ vf->vf_id, tc_filter->action);
+ goto err;
+ }
+
+ /* action_meta is TC number here to which the filter is applied */
+ if (!tc_filter->action_meta ||
+- tc_filter->action_meta > I40E_MAX_VF_VSI) {
++ tc_filter->action_meta > vf->num_tc) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
+ vf->vf_id, tc_filter->action_meta);
+ goto err;
+@@ -3842,7 +3907,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int aq_ret = 0;
+- int i, ret;
++ int i;
+
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ aq_ret = -EINVAL;
+@@ -3866,8 +3931,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ }
+
+ cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+- if (!cfilter)
+- return -ENOMEM;
++ if (!cfilter) {
++ aq_ret = -ENOMEM;
++ goto err_out;
++ }
+
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+@@ -3915,13 +3982,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+
+ /* Adding cloud filter programmed as TC filter */
+ if (tcf.dst_port)
+- ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
++ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ else
+- ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+- if (ret) {
++ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
++ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
+- vf->vf_id, ERR_PTR(ret),
++ vf->vf_id, ERR_PTR(aq_ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err_free;
+ }
+@@ -4704,9 +4771,12 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ struct virtchnl_pf_event pfe;
+ struct i40e_hw *hw = &pf->hw;
++ struct i40e_vsi *vsi;
++ unsigned long q_map;
+ struct i40e_vf *vf;
+ int abs_vf_id;
+ int ret = 0;
++ int tmp;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+@@ -4729,17 +4799,38 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
++ vf->is_disabled_from_host = false;
++ /* reset needed to reinit VF resources */
++ i40e_vc_reset_vf(vf, true);
+ i40e_set_vf_link_state(vf, &pfe, ls);
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
++ vf->is_disabled_from_host = false;
++ /* reset needed to reinit VF resources */
++ i40e_vc_reset_vf(vf, true);
+ i40e_set_vf_link_state(vf, &pfe, ls);
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ i40e_set_vf_link_state(vf, &pfe, ls);
++
++ vsi = pf->vsi[vf->lan_vsi_idx];
++ q_map = BIT(vsi->num_queue_pairs) - 1;
++
++ vf->is_disabled_from_host = true;
++
++ /* Try to stop both Tx&Rx rings even if one of the calls fails
++ * to ensure we stop the rings even in case of errors.
++ * If any of them returns with an error then the first
++ * error that occurred will be returned.
++ */
++ tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
++ ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
++
++ ret = tmp ? tmp : ret;
+ break;
+ default:
+ ret = -EINVAL;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+index 895b8feb2567ce..66f95e2f3146a8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+@@ -4,7 +4,9 @@
+ #ifndef _I40E_VIRTCHNL_PF_H_
+ #define _I40E_VIRTCHNL_PF_H_
+
+-#include "i40e.h"
++#include <linux/avf/virtchnl.h>
++#include <linux/netdevice.h>
++#include "i40e_type.h"
+
+ #define I40E_MAX_VLANID 4095
+
+@@ -98,6 +100,7 @@ struct i40e_vf {
+ bool link_forced;
+ bool link_up; /* only valid if VF link is forced */
+ bool spoofchk;
++ bool is_disabled_from_host; /* PF ctrl of VF enable/disable */
+ u16 num_vlan;
+
+ /* ADq related variables */
+@@ -135,6 +138,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
+
+ void i40e_vc_notify_link_state(struct i40e_pf *pf);
+ void i40e_vc_notify_reset(struct i40e_pf *pf);
++#ifdef CONFIG_PCI_IOV
++void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev);
++#endif /* CONFIG_PCI_IOV */
+ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index 7d991e4d9b896c..65f38a57b3dfe7 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -2,11 +2,7 @@
+ /* Copyright(c) 2018 Intel Corporation. */
+
+ #include <linux/bpf_trace.h>
+-#include <linux/stringify.h>
+ #include <net/xdp_sock_drv.h>
+-#include <net/xdp.h>
+-
+-#include "i40e.h"
+ #include "i40e_txrx_common.h"
+ #include "i40e_xsk.h"
+
+@@ -418,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+- virt_to_page(xdp->data_hard_start), 0, size);
++ virt_to_page(xdp->data_hard_start),
++ XDP_PACKET_HEADROOM, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+@@ -503,7 +500,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
+ xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
+ i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
+ &rx_bytes, xdp_res, &failure);
+- first->flags = 0;
+ next_to_clean = next_to_process;
+ if (failure)
+ break;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+index 821df248f8bee9..ef156fad52f262 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+@@ -4,6 +4,8 @@
+ #ifndef _I40E_XSK_H_
+ #define _I40E_XSK_H_
+
++#include <linux/types.h>
++
+ /* This value should match the pragma in the loop_unrolled_for
+ * macro. Why 4? It is strictly empirical. It seems to be a good
+ * compromise between the advantage of having simultaneous outstanding
+@@ -20,7 +22,9 @@
+ #define loop_unrolled_for for
+ #endif
+
++struct i40e_ring;
+ struct i40e_vsi;
++struct net_device;
+ struct xsk_buff_pool;
+
+ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index e110ba3461857b..431d9d62c8c668 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -298,13 +298,12 @@ struct iavf_adapter {
+ #define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
+ #define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
+ #define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
+-#define IAVF_FLAG_PROMISC_ON BIT(13)
+-#define IAVF_FLAG_ALLMULTI_ON BIT(14)
+ #define IAVF_FLAG_LEGACY_RX BIT(15)
+ #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
+ #define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+ #define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
+ #define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
++#define IAVF_FLAG_FDIR_ENABLED BIT(21)
+ /* duplicates for common code */
+ #define IAVF_FLAG_DCB_ENABLED 0
+ /* flags for admin queue service task */
+@@ -325,10 +324,7 @@ struct iavf_adapter {
+ #define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+ #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
+ #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
+-#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
+-#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
+-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
+-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
++#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
+ #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
+ #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
+ #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
+@@ -365,6 +361,12 @@ struct iavf_adapter {
+ (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+
++ /* Lock to prevent possible clobbering of
++ * current_netdev_promisc_flags
++ */
++ spinlock_t current_netdev_promisc_flags_lock;
++ netdev_features_t current_netdev_promisc_flags;
++
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+@@ -551,7 +553,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_del_ether_addrs(struct iavf_adapter *adapter);
+ void iavf_add_vlans(struct iavf_adapter *adapter);
+ void iavf_del_vlans(struct iavf_adapter *adapter);
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
++void iavf_set_promiscuous(struct iavf_adapter *adapter);
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
+ void iavf_request_stats(struct iavf_adapter *adapter);
+ int iavf_request_reset(struct iavf_adapter *adapter);
+ void iavf_get_hena(struct iavf_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
+index 1afd761d805208..f7988cf5efa58d 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
+@@ -1,10 +1,11 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
++#include <linux/avf/virtchnl.h>
++#include <linux/bitfield.h>
+ #include "iavf_type.h"
+ #include "iavf_adminq.h"
+ #include "iavf_prototype.h"
+-#include <linux/avf/virtchnl.h>
+
+ /**
+ * iavf_set_mac_type - Sets MAC type
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index 90397293525f71..1ac97bd606e38f 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -1,11 +1,12 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
++#include <linux/bitfield.h>
++#include <linux/uaccess.h>
++
+ /* ethtool support for iavf */
+ #include "iavf.h"
+
+-#include <linux/uaccess.h>
+-
+ /* ethtool statistics helpers */
+
+ /**
+@@ -829,18 +830,10 @@ static int __iavf_set_coalesce(struct net_device *netdev,
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+- if (ec->rx_coalesce_usecs == 0) {
+- if (ec->use_adaptive_rx_coalesce)
+- netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+- } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
+- (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
++ if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
+ netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
+ return -EINVAL;
+- } else if (ec->tx_coalesce_usecs == 0) {
+- if (ec->use_adaptive_tx_coalesce)
+- netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+- } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
+- (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
++ } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
+ netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
+ return -EINVAL;
+ }
+@@ -1071,7 +1064,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *rule = NULL;
+ int ret = 0;
+
+- if (!FDIR_FLTR_SUPPORT(adapter))
++ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+@@ -1213,7 +1206,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
+ unsigned int cnt = 0;
+ int val = 0;
+
+- if (!FDIR_FLTR_SUPPORT(adapter))
++ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ return -EOPNOTSUPP;
+
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
+@@ -1405,7 +1398,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ int count = 50;
+ int err;
+
+- if (!FDIR_FLTR_SUPPORT(adapter))
++ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ return -EOPNOTSUPP;
+
+ if (fsp->flow_type & FLOW_MAC_EXT)
+@@ -1446,12 +1439,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ iavf_fdir_list_add_fltr(adapter, fltr);
+ adapter->fdir_active_fltr++;
+- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+- adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
++ if (adapter->link_up) {
++ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
++ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
++ } else {
++ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
++ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+-
++ if (adapter->link_up)
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ ret:
+ if (err && fltr)
+ kfree(fltr);
+@@ -1473,7 +1470,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ struct iavf_fdir_fltr *fltr = NULL;
+ int err = 0;
+
+- if (!FDIR_FLTR_SUPPORT(adapter))
++ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+@@ -1482,6 +1479,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
+ if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
++ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
++ list_del(&fltr->list);
++ kfree(fltr);
++ adapter->fdir_active_fltr--;
++ fltr = NULL;
+ } else {
+ err = -EBUSY;
+ }
+@@ -1790,7 +1792,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+- if (!FDIR_FLTR_SUPPORT(adapter))
++ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ break;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ cmd->rule_cnt = adapter->fdir_active_fltr;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+index 03e774bd2a5b43..65ddcd81c993e7 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+@@ -3,6 +3,7 @@
+
+ /* flow director ethtool support for iavf */
+
++#include <linux/bitfield.h>
+ #include "iavf.h"
+
+ #define GTPU_PORT 2152
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+index 9eb9f73f6adf3a..d31bd923ba8cbf 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+@@ -6,12 +6,25 @@
+
+ struct iavf_adapter;
+
+-/* State of Flow Director filter */
++/* State of Flow Director filter
++ *
++ * *_REQUEST states are used to mark filter to be sent to PF driver to perform
++ * an action (either add or delete filter). *_PENDING states are an indication
++ * that request was sent to PF and the driver is waiting for response.
++ *
++ * Both DELETE and DISABLE states are being used to delete a filter in PF.
++ * The difference is that after a successful response filter in DEL_PENDING
++ * state is being deleted from VF driver as well and filter in DIS_PENDING state
++ * is being changed to INACTIVE state.
++ */
+ enum iavf_fdir_fltr_state_t {
+ IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
+ IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
+ IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
+ IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
++ IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */
++ IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */
++ IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */
+ IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
+ };
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index b3434dbc90d6fe..ce0b9199952649 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -277,27 +277,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
+ kfree(mem->va);
+ }
+
+-/**
+- * iavf_lock_timeout - try to lock mutex but give up after timeout
+- * @lock: mutex that should be locked
+- * @msecs: timeout in msecs
+- *
+- * Returns 0 on success, negative on failure
+- **/
+-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
+-{
+- unsigned int wait, delay = 10;
+-
+- for (wait = 0; wait < msecs; wait += delay) {
+- if (mutex_trylock(lock))
+- return 0;
+-
+- msleep(delay);
+- }
+-
+- return -1;
+-}
+-
+ /**
+ * iavf_schedule_reset - Set the flags and schedule a reset event
+ * @adapter: board private structure
+@@ -1186,6 +1165,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
+ return 0;
+ }
+
++/**
++ * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
++ * @adapter: device specific adapter
++ */
++bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
++{
++ return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
++ (IFF_PROMISC | IFF_ALLMULTI);
++}
++
+ /**
+ * iavf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+@@ -1199,19 +1188,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
+ __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+- if (netdev->flags & IFF_PROMISC &&
+- !(adapter->flags & IAVF_FLAG_PROMISC_ON))
+- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
+- else if (!(netdev->flags & IFF_PROMISC) &&
+- adapter->flags & IAVF_FLAG_PROMISC_ON)
+- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
+-
+- if (netdev->flags & IFF_ALLMULTI &&
+- !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
+- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+- else if (!(netdev->flags & IFF_ALLMULTI) &&
+- adapter->flags & IAVF_FLAG_ALLMULTI_ON)
+- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
++ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
++ if (iavf_promiscuous_mode_changed(adapter))
++ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+ }
+
+ /**
+@@ -1355,18 +1335,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
+ **/
+ static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
+ {
+- struct iavf_fdir_fltr *fdir, *fdirtmp;
++ struct iavf_fdir_fltr *fdir;
+
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+- list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+- list) {
++ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+- list_del(&fdir->list);
+- kfree(fdir);
+- adapter->fdir_active_fltr--;
+- } else {
+- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
++ /* Cancel a request, keep filter as inactive */
++ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
++ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
++ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
++ /* Disable filters which are active or have a pending
++ * request to PF to be added
++ */
++ fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+@@ -2162,19 +2144,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
+ return 0;
+ }
+
+- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
+- iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+- FLAG_VF_MULTICAST_PROMISC);
+- return 0;
+- }
+-
+- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
+- iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
+- return 0;
+- }
+- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
+- (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+- iavf_set_promiscuous(adapter, 0);
++ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
++ iavf_set_promiscuous(adapter);
+ return 0;
+ }
+
+@@ -3603,6 +3574,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ }
+
++/**
++ * iavf_is_tc_config_same - Compare the mqprio TC config with the
++ * TC config already configured on this adapter.
++ * @adapter: board private structure
++ * @mqprio_qopt: TC config received from kernel.
++ *
++ * This function compares the TC config received from the kernel
++ * with the config already configured on the adapter.
++ *
++ * Return: True if configuration is same, false otherwise.
++ **/
++static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
++ struct tc_mqprio_qopt *mqprio_qopt)
++{
++ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
++ int i;
++
++ if (adapter->num_tc != mqprio_qopt->num_tc)
++ return false;
++
++ for (i = 0; i < adapter->num_tc; i++) {
++ if (ch[i].count != mqprio_qopt->count[i] ||
++ ch[i].offset != mqprio_qopt->offset[i])
++ return false;
++ }
++ return true;
++}
++
+ /**
+ * __iavf_setup_tc - configure multiple traffic classes
+ * @netdev: network interface device structure
+@@ -3660,7 +3659,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
+ if (ret)
+ return ret;
+ /* Return if same TC config is requested */
+- if (adapter->num_tc == num_tc)
++ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
+ return 0;
+ adapter->num_tc = num_tc;
+
+@@ -4184,6 +4183,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ }
+ }
+
++/**
++ * iavf_restore_fdir_filters
++ * @adapter: board private structure
++ *
++ * Restore existing FDIR filters when VF netdev comes back up.
++ **/
++static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
++{
++ struct iavf_fdir_fltr *f;
++
++ spin_lock_bh(&adapter->fdir_fltr_lock);
++ list_for_each_entry(f, &adapter->fdir_list_head, list) {
++ if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
++ /* Cancel a request, keep filter as active */
++ f->state = IAVF_FDIR_FLTR_ACTIVE;
++ } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
++ f->state == IAVF_FDIR_FLTR_INACTIVE) {
++ /* Add filters which are inactive or have a pending
++ * request to PF to be deleted
++ */
++ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
++ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
++ }
++ }
++ spin_unlock_bh(&adapter->fdir_fltr_lock);
++}
++
+ /**
+ * iavf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+@@ -4251,8 +4277,9 @@ static int iavf_open(struct net_device *netdev)
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+- /* Restore VLAN filters that were removed with IFF_DOWN */
++ /* Restore filters that were removed with IFF_DOWN */
+ iavf_restore_filters(adapter);
++ iavf_restore_fdir_filters(adapter);
+
+ iavf_configure(adapter);
+
+@@ -4389,6 +4416,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
+ return ret;
+ }
+
++/**
++ * iavf_disable_fdir - disable Flow Director and clear existing filters
++ * @adapter: board private structure
++ **/
++static void iavf_disable_fdir(struct iavf_adapter *adapter)
++{
++ struct iavf_fdir_fltr *fdir, *fdirtmp;
++ bool del_filters = false;
++
++ adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
++
++ /* remove all Flow Director filters */
++ spin_lock_bh(&adapter->fdir_fltr_lock);
++ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
++ list) {
++ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
++ fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
++ /* Delete filters not registered in PF */
++ list_del(&fdir->list);
++ kfree(fdir);
++ adapter->fdir_active_fltr--;
++ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
++ fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
++ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
++ /* Filters registered in PF, schedule their deletion */
++ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
++ del_filters = true;
++ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
++ /* Request to delete filter already sent to PF, change
++ * state to DEL_PENDING to delete filter after PF's
++ * response, not set as INACTIVE
++ */
++ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
++ }
++ }
++ spin_unlock_bh(&adapter->fdir_fltr_lock);
++
++ if (del_filters) {
++ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
++ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
++ }
++}
++
+ #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+@@ -4411,6 +4481,13 @@ static int iavf_set_features(struct net_device *netdev,
+ iavf_set_vlan_offload_features(adapter, netdev->features,
+ features);
+
++ if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
++ if (features & NETIF_F_NTUPLE)
++ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
++ else
++ iavf_disable_fdir(adapter);
++ }
++
+ return 0;
+ }
+
+@@ -4706,6 +4783,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
+ {
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
++ if (!FDIR_FLTR_SUPPORT(adapter))
++ features &= ~NETIF_F_NTUPLE;
++
+ return iavf_fix_netdev_vlan_features(adapter, features);
+ }
+
+@@ -4823,6 +4903,12 @@ int iavf_process_config(struct iavf_adapter *adapter)
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
++ if (FDIR_FLTR_SUPPORT(adapter)) {
++ netdev->hw_features |= NETIF_F_NTUPLE;
++ netdev->features |= NETIF_F_NTUPLE;
++ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
++ }
++
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Do not turn on offloads when they are requested to be turned off.
+@@ -4846,34 +4932,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
+ return 0;
+ }
+
+-/**
+- * iavf_shutdown - Shutdown the device in preparation for a reboot
+- * @pdev: pci device structure
+- **/
+-static void iavf_shutdown(struct pci_dev *pdev)
+-{
+- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
+- struct net_device *netdev = adapter->netdev;
+-
+- netif_device_detach(netdev);
+-
+- if (netif_running(netdev))
+- iavf_close(netdev);
+-
+- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
+- dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
+- /* Prevent the watchdog from running. */
+- iavf_change_state(adapter, __IAVF_REMOVE);
+- adapter->aq_required = 0;
+- mutex_unlock(&adapter->crit_lock);
+-
+-#ifdef CONFIG_PM
+- pci_save_state(pdev);
+-
+-#endif
+- pci_disable_device(pdev);
+-}
+-
+ /**
+ * iavf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+@@ -4970,6 +5028,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ spin_lock_init(&adapter->cloud_filter_list_lock);
+ spin_lock_init(&adapter->fdir_fltr_lock);
+ spin_lock_init(&adapter->adv_rss_lock);
++ spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
+
+ INIT_LIST_HEAD(&adapter->mac_filter_list);
+ INIT_LIST_HEAD(&adapter->vlan_filter_list);
+@@ -5086,17 +5145,22 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
+ **/
+ static void iavf_remove(struct pci_dev *pdev)
+ {
+- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
+ struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_cloud_filter *cf, *cftmp;
+ struct iavf_adv_rss *rss, *rsstmp;
+ struct iavf_mac_filter *f, *ftmp;
++ struct iavf_adapter *adapter;
+ struct net_device *netdev;
+ struct iavf_hw *hw;
+ int err;
+
+- netdev = adapter->netdev;
++ /* Don't proceed with remove if netdev is already freed */
++ netdev = pci_get_drvdata(pdev);
++ if (!netdev)
++ return;
++
++ adapter = iavf_pdev_to_adapter(pdev);
+ hw = &adapter->hw;
+
+ if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
+@@ -5224,11 +5288,25 @@ static void iavf_remove(struct pci_dev *pdev)
+
+ destroy_workqueue(adapter->wq);
+
++ pci_set_drvdata(pdev, NULL);
++
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+ }
+
++/**
++ * iavf_shutdown - Shutdown the device in preparation for a reboot
++ * @pdev: pci device structure
++ **/
++static void iavf_shutdown(struct pci_dev *pdev)
++{
++ iavf_remove(pdev);
++
++ if (system_state == SYSTEM_POWER_OFF)
++ pci_set_power_state(pdev, PCI_D3hot);
++}
++
+ static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
+
+ static struct pci_driver iavf_driver = {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+index 8c5f6096b0022c..f998ecf743c46a 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2013 - 2018 Intel Corporation. */
+
++#include <linux/bitfield.h>
+ #include <linux/prefetch.h>
+
+ #include "iavf.h"
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+index 7e6ee32d19b696..10ba36602c0c14 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+@@ -15,7 +15,6 @@
+ */
+ #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+ #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
+-#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */
+ #define IAVF_ITR_100K 10 /* all values below must be even */
+ #define IAVF_ITR_50K 20
+ #define IAVF_ITR_20K 50
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index f9727e9c3d630f..b95a4f903204b4 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -936,14 +936,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ /**
+ * iavf_set_promiscuous
+ * @adapter: adapter structure
+- * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
++void iavf_set_promiscuous(struct iavf_adapter *adapter)
+ {
++ struct net_device *netdev = adapter->netdev;
+ struct virtchnl_promisc_info vpi;
+- int promisc_all;
++ unsigned int flags;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+@@ -952,36 +952,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
+ return;
+ }
+
+- promisc_all = FLAG_VF_UNICAST_PROMISC |
+- FLAG_VF_MULTICAST_PROMISC;
+- if ((flags & promisc_all) == promisc_all) {
+- adapter->flags |= IAVF_FLAG_PROMISC_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
+- dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+- }
++ /* prevent changes to promiscuous flags */
++ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
+
+- if (flags & FLAG_VF_MULTICAST_PROMISC) {
+- adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
+- dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+- adapter->netdev->name);
++ /* sanity check to prevent duplicate AQ calls */
++ if (!iavf_promiscuous_mode_changed(adapter)) {
++ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++ dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
++ /* allow changes to promiscuous flags */
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++ return;
+ }
+
+- if (!flags) {
+- if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+- adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+- }
++ /* there are 2 bits, but only 3 states */
++ if (!(netdev->flags & IFF_PROMISC) &&
++ netdev->flags & IFF_ALLMULTI) {
++ /* State 1 - only multicast promiscuous mode enabled
++ * - !IFF_PROMISC && IFF_ALLMULTI
++ */
++ flags = FLAG_VF_MULTICAST_PROMISC;
++ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++ adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
++ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
++ } else if (!(netdev->flags & IFF_PROMISC) &&
++ !(netdev->flags & IFF_ALLMULTI)) {
++ /* State 2 - unicast/multicast promiscuous mode disabled
++ * - !IFF_PROMISC && !IFF_ALLMULTI
++ */
++ flags = 0;
++ adapter->current_netdev_promisc_flags &=
++ ~(IFF_PROMISC | IFF_ALLMULTI);
++ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
++ } else {
++ /* State 3 - unicast/multicast promiscuous mode enabled
++ * - IFF_PROMISC && IFF_ALLMULTI
++ * - IFF_PROMISC && !IFF_ALLMULTI
++ */
++ flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
++ adapter->current_netdev_promisc_flags |= IFF_PROMISC;
++ if (netdev->flags & IFF_ALLMULTI)
++ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
++ else
++ adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
+
+- if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+- adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+- dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+- adapter->netdev->name);
+- }
++ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ }
+
++ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
++
++ /* allow changes to promiscuous flags */
++ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
++
+ adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ vpi.vsi_id = adapter->vsi_res->vsi_id;
+ vpi.flags = flags;
+@@ -1717,8 +1738,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
+ **/
+ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
+ {
++ struct virtchnl_fdir_del f = {};
+ struct iavf_fdir_fltr *fdir;
+- struct virtchnl_fdir_del f;
+ bool process_fltr = false;
+ int len;
+
+@@ -1735,11 +1756,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
+ process_fltr = true;
+- memset(&f, 0, len);
+ f.vsi_id = fdir->vc_add_msg.vsi_id;
+ f.flow_id = fdir->flow_id;
+ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ break;
++ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
++ process_fltr = true;
++ f.vsi_id = fdir->vc_add_msg.vsi_id;
++ f.flow_id = fdir->flow_id;
++ fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
++ break;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+@@ -1883,6 +1909,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
+ netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
++/**
++ * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
++ * @adapter: private adapter structure
++ *
++ * Called after a reset to re-add all FDIR filters and delete some of them
++ * if they were pending to be deleted.
++ */
++static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
++{
++ struct iavf_fdir_fltr *f, *ftmp;
++ bool add_filters = false;
++
++ spin_lock_bh(&adapter->fdir_fltr_lock);
++ list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
++ if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
++ f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
++ f->state == IAVF_FDIR_FLTR_ACTIVE) {
++ /* All filters and requests have been removed in PF,
++ * restore them
++ */
++ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
++ add_filters = true;
++ } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
++ f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
++ /* Link down state, leave filters as inactive */
++ f->state = IAVF_FDIR_FLTR_INACTIVE;
++ } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
++ f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
++ /* Delete filters that were pending to be deleted, the
++ * list on PF is already cleared after a reset
++ */
++ list_del(&f->list);
++ kfree(f);
++ adapter->fdir_active_fltr--;
++ }
++ }
++ spin_unlock_bh(&adapter->fdir_fltr_lock);
++
++ if (add_filters)
++ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
++}
++
+ /**
+ * iavf_virtchnl_completion
+ * @adapter: adapter structure
+@@ -2060,7 +2128,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head,
+ list) {
+- if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
++ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
++ fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
+ iavf_stat_str(&adapter->hw,
+@@ -2196,6 +2265,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
++ iavf_activate_fdir_filters(adapter);
++
+ iavf_parse_vf_resource_msg(adapter);
+
+ /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
+@@ -2385,7 +2456,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+- if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
++ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
++ del_fltr->status ==
++ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ fdir->loc);
+ list_del(&fdir->list);
+@@ -2397,6 +2470,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ del_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
++ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
++ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
++ del_fltr->status ==
++ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
++ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
++ } else {
++ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
++ dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
++ del_fltr->status);
++ iavf_print_fdir_fltr(adapter, fdir);
++ }
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 5022b036ca4f9e..f943964ec05ae7 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -313,6 +313,7 @@ enum ice_vsi_state {
+ ICE_VSI_UMAC_FLTR_CHANGED,
+ ICE_VSI_MMAC_FLTR_CHANGED,
+ ICE_VSI_PROMISC_CHANGED,
++ ICE_VSI_REBUILD_PENDING,
+ ICE_VSI_STATE_NBITS /* must be last */
+ };
+
+@@ -407,9 +408,9 @@ struct ice_vsi {
+ struct ice_tc_cfg tc_cfg;
+ struct bpf_prog *xdp_prog;
+ struct ice_tx_ring **xdp_rings; /* XDP ring array */
+- unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
+ u16 num_xdp_txq; /* Used XDP queues */
+ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
++ struct mutex xdp_state_lock;
+
+ struct net_device **target_netdevs;
+
+@@ -517,7 +518,7 @@ enum ice_misc_thread_tasks {
+ ICE_MISC_THREAD_NBITS /* must be last */
+ };
+
+-struct ice_switchdev_info {
++struct ice_eswitch {
+ struct ice_vsi *control_vsi;
+ struct ice_vsi *uplink_vsi;
+ struct ice_esw_br_offloads *br_offloads;
+@@ -630,7 +631,7 @@ struct ice_pf {
+ struct ice_link_default_override_tlv link_dflt_override;
+ struct ice_lag *lag; /* Link Aggregation information */
+
+- struct ice_switchdev_info switchdev;
++ struct ice_eswitch eswitch;
+ struct ice_esw_br_port *br_port;
+
+ #define ICE_INVALID_AGG_NODE_ID 0
+@@ -714,6 +715,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
+ ring->flags |= ICE_TX_FLAGS_RING_XDP;
+ }
+
++/**
++ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
++ * @vsi: pointer to VSI
++ * @qid: index of a queue to look at XSK buff pool presence
++ *
++ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
++ * attached and configured as zero-copy, NULL otherwise.
++ */
++static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
++ u16 qid)
++{
++ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
++
++ if (!ice_is_xdp_ena_vsi(vsi))
++ return NULL;
++
++ return (pool && pool->dev) ? pool : NULL;
++}
++
+ /**
+ * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: Rx ring to use
+@@ -726,10 +746,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+ struct ice_vsi *vsi = ring->vsi;
+ u16 qid = ring->q_index;
+
+- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+- return NULL;
+-
+- return xsk_get_pool_from_qid(vsi->netdev, qid);
++ return ice_get_xp_from_qid(vsi, qid);
+ }
+
+ /**
+@@ -754,12 +771,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
+ if (!ring)
+ return;
+
+- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+- ring->xsk_pool = NULL;
+- return;
+- }
+-
+- ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
++ ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
+ }
+
+ /**
+@@ -826,7 +838,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
+ */
+ static inline bool ice_is_switchdev_running(struct ice_pf *pf)
+ {
+- return pf->switchdev.is_running;
++ return pf->eswitch.is_running;
+ }
+
+ #define ICE_FD_STAT_CTR_BLOCK_COUNT 256
+@@ -882,9 +894,16 @@ int ice_down(struct ice_vsi *vsi);
+ int ice_down_up(struct ice_vsi *vsi);
+ int ice_vsi_cfg_lan(struct ice_vsi *vsi);
+ struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
++
++enum ice_xdp_cfg {
++ ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
++ ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
++};
++
+ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
+-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
+-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
++int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
++ enum ice_xdp_cfg cfg_type);
++int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+ int
+ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+index 29f7a9852aec6f..72ca2199c95723 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+@@ -421,10 +421,10 @@ struct ice_aqc_vsi_props {
+ #define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2)
+ #define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3
+ #define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+-#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
++#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH 0x0U
++#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP 0x1U
++#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR 0x2U
++#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING 0x3U
+ u8 inner_vlan_reserved2[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+@@ -490,11 +490,11 @@ struct ice_aqc_vsi_props {
+ #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
+ #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+ #define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
+-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+-#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M GENMASK(7, 6)
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ 0x0U
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ 0x1U
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR 0x2U
++#define ICE_AQ_VSI_Q_OPT_RSS_HASH_JHASH 0x3U
+ u8 q_opt_tc;
+ #define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
+ #define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+@@ -592,8 +592,9 @@ struct ice_aqc_recipe_data_elem {
+ struct ice_aqc_recipe_to_profile {
+ __le16 profile_id;
+ u8 rsvd[6];
+- DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
++ __le64 recipe_assoc;
+ };
++static_assert(sizeof(struct ice_aqc_recipe_to_profile) == 16);
+
+ /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
+ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 7fa43827a3f06c..9a0682b05c4ff2 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -519,6 +519,25 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
+ return 0;
+ }
+
++/**
++ * ice_get_frame_sz - calculate xdp_buff::frame_sz
++ * @rx_ring: the ring being configured
++ *
++ * Return frame size based on underlying PAGE_SIZE
++ */
++static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
++{
++ unsigned int frame_sz;
++
++#if (PAGE_SIZE >= 8192)
++ frame_sz = rx_ring->rx_buf_len;
++#else
++ frame_sz = ice_rx_pg_size(rx_ring) / 2;
++#endif
++
++ return frame_sz;
++}
++
+ /**
+ * ice_vsi_cfg_rxq - Configure an Rx queue
+ * @ring: the ring being configured
+@@ -534,19 +553,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ ring->rx_buf_len = ring->vsi->rx_buf_len;
+
+ if (ring->vsi->type == ICE_VSI_PF) {
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+- /* coverity[check_return] */
+- __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+- ring->q_index,
+- ring->q_vector->napi.napi_id,
+- ring->vsi->rx_buf_len);
++ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->q_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
++ }
+
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
+
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->q_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+@@ -557,13 +584,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->q_index);
+ } else {
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+- /* coverity[check_return] */
+- __xdp_rxq_info_reg(&ring->xdp_rxq,
+- ring->netdev,
+- ring->q_index,
+- ring->q_vector->napi.napi_id,
+- ring->vsi->rx_buf_len);
++ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->q_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
++ }
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+@@ -573,7 +601,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ }
+ }
+
+- xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
++ xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
+ ring->xdp.data = NULL;
+ err = ice_setup_rx_ctx(ring);
+ if (err) {
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index a655d499abfa85..e7f1e53314d768 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -16,12 +16,12 @@
+ * @vf: pointer to VF struct
+ *
+ * This function adds advanced rule that forwards packets with
+- * VF's VSI index to the corresponding switchdev ctrl VSI queue.
++ * VF's VSI index to the corresponding eswitch ctrl VSI queue.
+ */
+ static int
+ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
+ {
+- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
++ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_adv_lkup_elem *list;
+ struct ice_hw *hw = &pf->hw;
+@@ -59,7 +59,7 @@ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
+ * @vf: pointer to the VF struct
+ *
+ * Delete the advanced rule that was used to forward packets with the VF's VSI
+- * index to the corresponding switchdev ctrl VSI queue.
++ * index to the corresponding eswitch ctrl VSI queue.
+ */
+ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
+ {
+@@ -70,7 +70,7 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
+ }
+
+ /**
+- * ice_eswitch_setup_env - configure switchdev HW filters
++ * ice_eswitch_setup_env - configure eswitch HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function adds HW filters configuration specific for switchdev
+@@ -78,18 +78,18 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
+ */
+ static int ice_eswitch_setup_env(struct ice_pf *pf)
+ {
+- struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+- struct net_device *uplink_netdev = uplink_vsi->netdev;
+- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
++ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
++ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
++ struct net_device *netdev = uplink_vsi->netdev;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ bool rule_added = false;
+
+ ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+
+- netif_addr_lock_bh(uplink_netdev);
+- __dev_uc_unsync(uplink_netdev, NULL);
+- __dev_mc_unsync(uplink_netdev, NULL);
+- netif_addr_unlock_bh(uplink_netdev);
++ netif_addr_lock_bh(netdev);
++ __dev_uc_unsync(netdev, NULL);
++ __dev_mc_unsync(netdev, NULL);
++ netif_addr_unlock_bh(netdev);
+
+ if (ice_vsi_add_vlan_zero(uplink_vsi))
+ goto err_def_rx;
+@@ -132,10 +132,10 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
+ }
+
+ /**
+- * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
++ * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
+ * @pf: pointer to PF struct
+ *
+- * In switchdev number of allocated Tx/Rx rings is equal.
++ * In eswitch number of allocated Tx/Rx rings is equal.
+ *
+ * This function fills q_vectors structures associated with representor and
+ * move each ring pairs to port representor netdevs. Each port representor
+@@ -144,7 +144,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
+ */
+ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
+ {
+- struct ice_vsi *vsi = pf->switchdev.control_vsi;
++ struct ice_vsi *vsi = pf->eswitch.control_vsi;
+ int q_id;
+
+ ice_for_each_txq(vsi, q_id) {
+@@ -189,7 +189,7 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
+ /**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+- * @ctrl_vsi: pointer to switchdev control VSI
++ * @ctrl_vsi: pointer to eswitch control VSI
+ */
+ static void
+ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+@@ -223,7 +223,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+ */
+ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
+ {
+- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
++ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ int max_vsi_num = 0;
+ struct ice_vf *vf;
+ unsigned int bkt;
+@@ -359,7 +359,7 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ }
+
+ /**
+- * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
++ * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
+ * @skb: pointer to send buffer
+ * @off: pointer to offload struct
+ */
+@@ -382,7 +382,7 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ }
+
+ /**
+- * ice_eswitch_release_env - clear switchdev HW filters
++ * ice_eswitch_release_env - clear eswitch HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function removes HW filters configuration specific for switchdev
+@@ -390,8 +390,8 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ */
+ static void ice_eswitch_release_env(struct ice_pf *pf)
+ {
+- struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
++ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
++ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
+@@ -407,7 +407,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
+ }
+
+ /**
+- * ice_eswitch_vsi_setup - configure switchdev control VSI
++ * ice_eswitch_vsi_setup - configure eswitch control VSI
+ * @pf: pointer to PF structure
+ * @pi: pointer to port_info structure
+ */
+@@ -486,12 +486,12 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
+ return -EINVAL;
+ }
+
+- pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
+- if (!pf->switchdev.control_vsi)
++ pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
++ if (!pf->eswitch.control_vsi)
+ return -ENODEV;
+
+- ctrl_vsi = pf->switchdev.control_vsi;
+- pf->switchdev.uplink_vsi = uplink_vsi;
++ ctrl_vsi = pf->eswitch.control_vsi;
++ pf->eswitch.uplink_vsi = uplink_vsi;
+
+ if (ice_eswitch_setup_env(pf))
+ goto err_vsi;
+@@ -526,12 +526,12 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
+ }
+
+ /**
+- * ice_eswitch_disable_switchdev - disable switchdev resources
++ * ice_eswitch_disable_switchdev - disable eswitch resources
+ * @pf: pointer to PF structure
+ */
+ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
+ {
+- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
++ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_br_offloads_deinit(pf);
+@@ -625,7 +625,7 @@ void ice_eswitch_release(struct ice_pf *pf)
+ return;
+
+ ice_eswitch_disable_switchdev(pf);
+- pf->switchdev.is_running = false;
++ pf->eswitch.is_running = false;
+ }
+
+ /**
+@@ -636,14 +636,15 @@ int ice_eswitch_configure(struct ice_pf *pf)
+ {
+ int status;
+
+- if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
++ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY ||
++ pf->eswitch.is_running)
+ return 0;
+
+ status = ice_eswitch_enable_switchdev(pf);
+ if (status)
+ return status;
+
+- pf->switchdev.is_running = true;
++ pf->eswitch.is_running = true;
+ return 0;
+ }
+
+@@ -693,7 +694,7 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
+ */
+ int ice_eswitch_rebuild(struct ice_pf *pf)
+ {
+- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
++ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ int status;
+
+ ice_eswitch_napi_disable(pf);
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+index 67bfd1f61cdd49..4750198d0c0cad 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+@@ -582,10 +582,13 @@ ice_eswitch_br_switchdev_event(struct notifier_block *nb,
+ return NOTIFY_DONE;
+ }
+
+-static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
++void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
+ {
+ struct ice_esw_br_fdb_entry *entry, *tmp;
+
++ if (!bridge)
++ return;
++
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+ }
+@@ -947,7 +950,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
+ static int
+ ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
+ {
+- struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
++ struct ice_vsi *vsi = pf->eswitch.uplink_vsi;
+ struct ice_esw_br_port *br_port;
+ int err;
+
+@@ -1185,7 +1188,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb,
+ static void
+ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
+ {
+- struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
++ struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads;
+
+ ASSERT_RTNL();
+
+@@ -1194,7 +1197,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
+
+ ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
+
+- pf->switchdev.br_offloads = NULL;
++ pf->eswitch.br_offloads = NULL;
+ kfree(br_offloads);
+ }
+
+@@ -1205,14 +1208,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
+
+ ASSERT_RTNL();
+
+- if (pf->switchdev.br_offloads)
++ if (pf->eswitch.br_offloads)
+ return ERR_PTR(-EEXIST);
+
+ br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
+ if (!br_offloads)
+ return ERR_PTR(-ENOMEM);
+
+- pf->switchdev.br_offloads = br_offloads;
++ pf->eswitch.br_offloads = br_offloads;
+ br_offloads->pf = pf;
+
+ return br_offloads;
+@@ -1223,7 +1226,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
+ {
+ struct ice_esw_br_offloads *br_offloads;
+
+- br_offloads = pf->switchdev.br_offloads;
++ br_offloads = pf->eswitch.br_offloads;
+ if (!br_offloads)
+ return;
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
+index 85a8fadb2928e6..3920e506119148 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
+@@ -116,5 +116,6 @@ void
+ ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
+ int
+ ice_eswitch_br_offloads_init(struct ice_pf *pf);
++void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge);
+
+ #endif /* _ICE_ESWITCH_BR_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index ad4d4702129f0f..39b5f24be7e4fc 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -1757,14 +1757,14 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
+ linkmode_zero(ks->link_modes.supported);
+ linkmode_zero(ks->link_modes.advertising);
+
+- for (i = 0; i < BITS_PER_TYPE(u64); i++) {
++ for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
+ if (phy_types_low & BIT_ULL(i))
+ ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
+ req_speeds, advert_phy_type_lo,
+ i);
+ }
+
+- for (i = 0; i < BITS_PER_TYPE(u64); i++) {
++ for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
+ if (phy_types_high & BIT_ULL(i))
+ ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
+ req_speeds, advert_phy_type_hi,
+@@ -3429,7 +3429,6 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ struct ice_pf *pf = vsi->back;
+ int new_rx = 0, new_tx = 0;
+ bool locked = false;
+- u32 curr_combined;
+ int ret = 0;
+
+ /* do not support changing channels in Safe Mode */
+@@ -3451,22 +3450,8 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ return -EOPNOTSUPP;
+ }
+
+- curr_combined = ice_get_combined_cnt(vsi);
+-
+- /* these checks are for cases where user didn't specify a particular
+- * value on cmd line but we get non-zero value anyway via
+- * get_channels(); look at ethtool.c in ethtool repository (the user
+- * space part), particularly, do_schannels() routine
+- */
+- if (ch->rx_count == vsi->num_rxq - curr_combined)
+- ch->rx_count = 0;
+- if (ch->tx_count == vsi->num_txq - curr_combined)
+- ch->tx_count = 0;
+- if (ch->combined_count == curr_combined)
+- ch->combined_count = 0;
+-
+- if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
+- netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
++ if (ch->rx_count && ch->tx_count) {
++ netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+index 8c6e13f87b7d3f..1839a37139dc16 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+@@ -531,7 +531,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ *
+ * Returns the number of available flow director filters to this VSI
+ */
+-static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
++int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+ {
+ u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ u16 num_guar;
+diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
+index 1b9b844906899e..b384d2a4ab1981 100644
+--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
++++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
+@@ -202,6 +202,8 @@ struct ice_fdir_base_pkt {
+ const u8 *tun_pkt;
+ };
+
++struct ice_vsi;
++
+ int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+ int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+ int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+@@ -213,6 +215,7 @@ int
+ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun);
+ int ice_get_fdir_cnt_all(struct ice_hw *hw);
++int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi);
+ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+ bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
+ struct ice_fdir_fltr *
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
+index 7b1256992dcf6d..4e675c7c199fa1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.c
++++ b/drivers/net/ethernet/intel/ice/ice_lag.c
+@@ -536,6 +536,50 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
+ dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
+ }
+
++/**
++ * ice_lag_build_netdev_list - populate the lag struct's netdev list
++ * @lag: local lag struct
++ * @ndlist: pointer to netdev list to populate
++ */
++static void ice_lag_build_netdev_list(struct ice_lag *lag,
++ struct ice_lag_netdev_list *ndlist)
++{
++ struct ice_lag_netdev_list *nl;
++ struct net_device *tmp_nd;
++
++ INIT_LIST_HEAD(&ndlist->node);
++ rcu_read_lock();
++ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
++ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
++ if (!nl)
++ break;
++
++ nl->netdev = tmp_nd;
++ list_add(&nl->node, &ndlist->node);
++ }
++ rcu_read_unlock();
++ lag->netdev_head = &ndlist->node;
++}
++
++/**
++ * ice_lag_destroy_netdev_list - free lag struct's netdev list
++ * @lag: pointer to local lag struct
++ * @ndlist: pointer to lag struct netdev list
++ */
++static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
++ struct ice_lag_netdev_list *ndlist)
++{
++ struct ice_lag_netdev_list *entry, *n;
++
++ rcu_read_lock();
++ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
++ list_del(&entry->node);
++ kfree(entry);
++ }
++ rcu_read_unlock();
++ lag->netdev_head = NULL;
++}
++
+ /**
+ * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
+ * @lag: primary interface LAG struct
+@@ -564,7 +608,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
+ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
+ {
+ struct ice_lag_netdev_list ndlist;
+- struct list_head *tmp, *n;
+ u8 pri_port, act_port;
+ struct ice_lag *lag;
+ struct ice_vsi *vsi;
+@@ -588,38 +631,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
+ pri_port = pf->hw.port_info->lport;
+ act_port = lag->active_port;
+
+- if (lag->upper_netdev) {
+- struct ice_lag_netdev_list *nl;
+- struct net_device *tmp_nd;
+-
+- INIT_LIST_HEAD(&ndlist.node);
+- rcu_read_lock();
+- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+- if (!nl)
+- break;
+-
+- nl->netdev = tmp_nd;
+- list_add(&nl->node, &ndlist.node);
+- }
+- rcu_read_unlock();
+- }
+-
+- lag->netdev_head = &ndlist.node;
++ if (lag->upper_netdev)
++ ice_lag_build_netdev_list(lag, &ndlist);
+
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
+ lag->bonded && lag->primary && pri_port != act_port &&
+ !list_empty(lag->netdev_head))
+ ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
+
+- list_for_each_safe(tmp, n, &ndlist.node) {
+- struct ice_lag_netdev_list *entry;
+-
+- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+- list_del(&entry->node);
+- kfree(entry);
+- }
+- lag->netdev_head = NULL;
++ ice_lag_destroy_netdev_list(lag, &ndlist);
+
+ new_vf_unlock:
+ mutex_unlock(&pf->lag_mutex);
+@@ -646,6 +666,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
+ ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
+ }
+
++/**
++ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
++ * @lag: local lag struct
++ * @src_prt: lport value for source port
++ * @dst_prt: lport value for destination port
++ *
++ * This function is used to move nodes during an out-of-netdev-event situation,
++ * primarily when the driver needs to reconfigure or recreate resources.
++ *
++ * Must be called while holding the lag_mutex to avoid lag events from
++ * processing while out-of-sync moves are happening. Also, paired moves,
++ * such as used in a reset flow, should both be called under the same mutex
++ * lock to avoid changes between start of reset and end of reset.
++ */
++void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
++{
++ struct ice_lag_netdev_list ndlist;
++
++ ice_lag_build_netdev_list(lag, &ndlist);
++ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
++ ice_lag_destroy_netdev_list(lag, &ndlist);
++}
++
+ #define ICE_LAG_SRIOV_CP_RECIPE 10
+ #define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
+
+@@ -1529,18 +1572,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
+ */
+ static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
+ {
+- struct ice_lag_netdev_list *entry;
+ struct ice_netdev_priv *np;
+- struct net_device *netdev;
+ struct ice_pf *pf;
+
+- list_for_each_entry(entry, lag->netdev_head, node) {
+- netdev = entry->netdev;
+- np = netdev_priv(netdev);
+- pf = np->vsi->back;
+-
+- ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+- }
++ np = netdev_priv(lag->netdev);
++ pf = np->vsi->back;
++ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+ }
+
+ /**
+@@ -1672,7 +1709,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
+- nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
++ nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
+ if (!nd_list)
+ break;
+
+@@ -1926,6 +1963,8 @@ int ice_init_lag(struct ice_pf *pf)
+ int n, err;
+
+ ice_lag_init_feature_support_flag(pf);
++ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
++ return 0;
+
+ pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
+ if (!pf->lag)
+@@ -1961,14 +2000,14 @@ int ice_init_lag(struct ice_pf *pf)
+ /* associate recipes to profiles */
+ for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
+ err = ice_aq_get_recipe_to_profile(&pf->hw, n,
+- (u8 *)&recipe_bits, NULL);
++ &recipe_bits, NULL);
+ if (err)
+ continue;
+
+ if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
+ recipe_bits |= BIT(lag->pf_recipe);
+ ice_aq_map_recipe_to_profile(&pf->hw, n,
+- (u8 *)&recipe_bits, NULL);
++ recipe_bits, NULL);
+ }
+ }
+
+@@ -2028,7 +2067,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
+ {
+ struct ice_lag_netdev_list ndlist;
+ struct ice_lag *lag, *prim_lag;
+- struct list_head *tmp, *n;
+ u8 act_port, loc_port;
+
+ if (!pf->lag || !pf->lag->bonded)
+@@ -2040,21 +2078,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
+ if (lag->primary) {
+ prim_lag = lag;
+ } else {
+- struct ice_lag_netdev_list *nl;
+- struct net_device *tmp_nd;
+-
+- INIT_LIST_HEAD(&ndlist.node);
+- rcu_read_lock();
+- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+- if (!nl)
+- break;
+-
+- nl->netdev = tmp_nd;
+- list_add(&nl->node, &ndlist.node);
+- }
+- rcu_read_unlock();
+- lag->netdev_head = &ndlist.node;
++ ice_lag_build_netdev_list(lag, &ndlist);
+ prim_lag = ice_lag_find_primary(lag);
+ }
+
+@@ -2084,13 +2108,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
+
+ ice_clear_rdma_cap(pf);
+ lag_rebuild_out:
+- list_for_each_safe(tmp, n, &ndlist.node) {
+- struct ice_lag_netdev_list *entry;
+-
+- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+- list_del(&entry->node);
+- kfree(entry);
+- }
++ ice_lag_destroy_netdev_list(lag, &ndlist);
+ mutex_unlock(&pf->lag_mutex);
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
+index facb6c894b6dd1..7f229876750123 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.h
++++ b/drivers/net/ethernet/intel/ice/ice_lag.h
+@@ -63,4 +63,5 @@ int ice_init_lag(struct ice_pf *pf);
+ void ice_deinit_lag(struct ice_pf *pf);
+ void ice_lag_rebuild(struct ice_pf *pf);
+ bool ice_lag_is_switchdev_running(struct ice_pf *pf);
++void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
+ #endif /* _ICE_LAG_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 73bbf06a76db9d..3a0ef56d3edcac 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
+ if (!vsi->q_vectors)
+ goto err_vectors;
+
+- vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+- if (!vsi->af_xdp_zc_qps)
+- goto err_zc_qps;
+-
+ return 0;
+
+-err_zc_qps:
+- devm_kfree(dev, vsi->q_vectors);
+ err_vectors:
+ devm_kfree(dev, vsi->rxq_map);
+ err_rxq_map:
+@@ -321,8 +315,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
+
+ dev = ice_pf_to_dev(pf);
+
+- bitmap_free(vsi->af_xdp_zc_qps);
+- vsi->af_xdp_zc_qps = NULL;
+ /* free the ring and vector containers */
+ devm_kfree(dev, vsi->q_vectors);
+ vsi->q_vectors = NULL;
+@@ -467,6 +459,7 @@ static void ice_vsi_free(struct ice_vsi *vsi)
+
+ ice_vsi_free_stats(vsi);
+ ice_vsi_free_arrays(vsi);
++ mutex_destroy(&vsi->xdp_state_lock);
+ mutex_unlock(&pf->sw_mutex);
+ devm_kfree(dev, vsi);
+ }
+@@ -668,6 +661,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
+
++ mutex_init(&vsi->xdp_state_lock);
++
+ unlock_pf:
+ mutex_unlock(&pf->sw_mutex);
+ return vsi;
+@@ -979,7 +974,8 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
+ */
+ if (ice_is_dvm_ena(hw)) {
+ ctxt->info.inner_vlan_flags |=
+- ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
++ FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
++ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+@@ -1186,12 +1182,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+ case ICE_VSI_PF:
+ /* PF VSI will inherit RSS instance of PF */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
+- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
++ hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table which is a VSI LUT type */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
++ hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ break;
+ default:
+ dev_dbg(dev, "Unsupported VSI type %s\n",
+@@ -2384,6 +2380,9 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
++
++ if (vsi->type == ICE_VSI_PF)
++ max_txqs[i] += vsi->num_xdp_txq;
+ }
+
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
+@@ -2466,7 +2465,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto unroll_vector_base;
+- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
++ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
++ ICE_XDP_CFG_PART);
+ if (ret)
+ goto unroll_vector_base;
+ }
+@@ -2600,13 +2600,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+- /* The Rx rule will only exist to remove if the LLDP FW
+- * engine is currently stopped
+- */
+- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+- ice_cfg_sw_lldp(vsi, false, false);
+-
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+@@ -2617,7 +2610,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+- ice_destroy_xdp_rings(vsi);
++ ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
+
+ ice_vsi_clear_rings(vsi);
+ ice_vsi_free_q_vectors(vsi);
+@@ -2633,10 +2626,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
+ if (vsi->type == ICE_VSI_VF &&
+ vsi->agg_node && vsi->agg_node->valid)
+ vsi->agg_node->num_vsis--;
+- if (vsi->agg_node) {
+- vsi->agg_node->valid = false;
+- vsi->agg_node->agg_id = 0;
+- }
+ }
+
+ /**
+@@ -2957,6 +2946,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
+ ice_rss_clean(vsi);
+
+ ice_vsi_close(vsi);
++
++ /* The Rx rule will only exist to remove if the LLDP FW
++ * engine is currently stopped
++ */
++ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
++ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
++ ice_cfg_sw_lldp(vsi, false, false);
++
+ ice_vsi_decfg(vsi);
+
+ /* retain SW VSI data structure since it is needed to unregister and
+@@ -3084,27 +3081,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ }
+
+ /**
+- * ice_vsi_realloc_stat_arrays - Frees unused stat structures
++ * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
+ * @vsi: VSI pointer
+- * @prev_txq: Number of Tx rings before ring reallocation
+- * @prev_rxq: Number of Rx rings before ring reallocation
+ */
+-static void
+-ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
++static int
++ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
+ {
++ u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
++ u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
++ struct ice_ring_stats **tx_ring_stats;
++ struct ice_ring_stats **rx_ring_stats;
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
++ u16 prev_txq = vsi->alloc_txq;
++ u16 prev_rxq = vsi->alloc_rxq;
+ int i;
+
+- if (!prev_txq || !prev_rxq)
+- return;
+- if (vsi->type == ICE_VSI_CHNL)
+- return;
+-
+ vsi_stat = pf->vsi_stats[vsi->idx];
+
+- if (vsi->num_txq < prev_txq) {
+- for (i = vsi->num_txq; i < prev_txq; i++) {
++ if (req_txq < prev_txq) {
++ for (i = req_txq; i < prev_txq; i++) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+@@ -3112,14 +3108,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
+ }
+ }
+
+- if (vsi->num_rxq < prev_rxq) {
+- for (i = vsi->num_rxq; i < prev_rxq; i++) {
++ tx_ring_stats = vsi_stat->tx_ring_stats;
++ vsi_stat->tx_ring_stats =
++ krealloc_array(vsi_stat->tx_ring_stats, req_txq,
++ sizeof(*vsi_stat->tx_ring_stats),
++ GFP_KERNEL | __GFP_ZERO);
++ if (!vsi_stat->tx_ring_stats) {
++ vsi_stat->tx_ring_stats = tx_ring_stats;
++ return -ENOMEM;
++ }
++
++ if (req_rxq < prev_rxq) {
++ for (i = req_rxq; i < prev_rxq; i++) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+ }
++
++ rx_ring_stats = vsi_stat->rx_ring_stats;
++ vsi_stat->rx_ring_stats =
++ krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
++ sizeof(*vsi_stat->rx_ring_stats),
++ GFP_KERNEL | __GFP_ZERO);
++ if (!vsi_stat->rx_ring_stats) {
++ vsi_stat->rx_ring_stats = rx_ring_stats;
++ return -ENOMEM;
++ }
++
++ return 0;
+ }
+
+ /**
+@@ -3136,9 +3154,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
+ {
+ struct ice_vsi_cfg_params params = {};
+ struct ice_coalesce_stored *coalesce;
+- int ret, prev_txq, prev_rxq;
+- int prev_num_q_vectors = 0;
++ int prev_num_q_vectors;
+ struct ice_pf *pf;
++ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+@@ -3150,43 +3168,47 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
+ return -EINVAL;
+
+- coalesce = kcalloc(vsi->num_q_vectors,
+- sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+- if (!coalesce)
+- return -ENOMEM;
++ mutex_lock(&vsi->xdp_state_lock);
+
+- prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+-
+- prev_txq = vsi->num_txq;
+- prev_rxq = vsi->num_rxq;
++ ret = ice_vsi_realloc_stat_arrays(vsi);
++ if (ret)
++ goto unlock;
+
+ ice_vsi_decfg(vsi);
+ ret = ice_vsi_cfg_def(vsi, &params);
+ if (ret)
+- goto err_vsi_cfg;
++ goto unlock;
++
++ coalesce = kcalloc(vsi->num_q_vectors,
++ sizeof(struct ice_coalesce_stored), GFP_KERNEL);
++ if (!coalesce) {
++ ret = -ENOMEM;
++ goto decfg;
++ }
++
++ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
+
+ ret = ice_vsi_cfg_tc_lan(pf, vsi);
+ if (ret) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
+ ret = -EIO;
+- goto err_vsi_cfg_tc_lan;
++ goto free_coalesce;
+ }
+
+- kfree(coalesce);
+- return ice_schedule_reset(pf, ICE_RESET_PFR);
++ ret = ice_schedule_reset(pf, ICE_RESET_PFR);
++ goto free_coalesce;
+ }
+
+- ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
+-
+ ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
+- kfree(coalesce);
+-
+- return 0;
++ clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
+
+-err_vsi_cfg_tc_lan:
+- ice_vsi_decfg(vsi);
+-err_vsi_cfg:
++free_coalesce:
+ kfree(coalesce);
++decfg:
++ if (ret)
++ ice_vsi_decfg(vsi);
++unlock:
++ mutex_unlock(&vsi->xdp_state_lock);
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 7784135160fd20..9f12c9a0fe2968 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -84,7 +84,8 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
+
+ bool netif_is_ice(const struct net_device *dev)
+ {
+- return dev && (dev->netdev_ops == &ice_netdev_ops);
++ return dev && (dev->netdev_ops == &ice_netdev_ops ||
++ dev->netdev_ops == &ice_netdev_safe_mode_ops);
+ }
+
+ /**
+@@ -517,25 +518,6 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
+ pf->vf_agg_node[node].num_vsis = 0;
+ }
+
+-/**
+- * ice_clear_sw_switch_recipes - clear switch recipes
+- * @pf: board private structure
+- *
+- * Mark switch recipes as not created in sw structures. There are cases where
+- * rules (especially advanced rules) need to be restored, either re-read from
+- * hardware or added again. For example after the reset. 'recp_created' flag
+- * prevents from doing that and need to be cleared upfront.
+- */
+-static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
+-{
+- struct ice_sw_recipe *recp;
+- u8 i;
+-
+- recp = pf->hw.switch_info->recp_list;
+- for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
+- recp[i].recp_created = false;
+-}
+-
+ /**
+ * ice_prepare_for_reset - prep for reset
+ * @pf: board private structure
+@@ -557,6 +539,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+ if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
+ return;
+
++ synchronize_irq(pf->oicr_irq.virq);
++
+ ice_unplug_aux_dev(pf);
+
+ /* Notify VFs of impending reset */
+@@ -570,8 +554,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+ mutex_unlock(&pf->vfs.table_lock);
+
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+- if (reset_type != ICE_RESET_PFR)
+- ice_clear_sw_switch_recipes(pf);
++ rtnl_lock();
++ ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
++ rtnl_unlock();
+ }
+
+ /* release ADQ specific HW and SW resources */
+@@ -604,11 +589,15 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+ memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
+ }
+ }
++
++ if (vsi->netdev)
++ netif_device_detach(vsi->netdev);
+ skip:
+
+ /* clear SW filtering DB */
+ ice_clear_hw_tbls(hw);
+ /* disable the VSIs and their queues that are not already DOWN */
++ set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
+ ice_pf_dis_all_vsi(pf, false);
+
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+@@ -2146,7 +2135,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
+
+ /* Ensure we have media as we cannot configure a medialess port */
+ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+- return -EPERM;
++ return -ENOMEDIUM;
+
+ ice_print_topo_conflict(vsi);
+
+@@ -2657,10 +2646,12 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
+ * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
+ * @vsi: VSI to bring up Tx rings used by XDP
+ * @prog: bpf program that will be assigned to VSI
++ * @cfg_type: create from scratch or restore the existing configuration
+ *
+ * Return 0 on success and negative value on error
+ */
+-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
++int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
++ enum ice_xdp_cfg cfg_type)
+ {
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ int xdp_rings_rem = vsi->num_xdp_txq;
+@@ -2736,7 +2727,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+ * taken into account at the end of ice_vsi_rebuild, where
+ * ice_cfg_vsi_lan is being called
+ */
+- if (ice_is_reset_in_progress(pf->state))
++ if (cfg_type == ICE_XDP_CFG_PART)
+ return 0;
+
+ /* tell the Tx scheduler that right now we have
+@@ -2788,22 +2779,21 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+ /**
+ * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
+ * @vsi: VSI to remove XDP rings
++ * @cfg_type: disable XDP permanently or allow it to be restored later
+ *
+ * Detach XDP rings from irq vectors, clean up the PF bitmap and free
+ * resources
+ */
+-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
++int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
+ {
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf = vsi->back;
+ int i, v_idx;
+
+ /* q_vectors are freed in reset path so there's no point in detaching
+- * rings; in case of rebuild being triggered not from reset bits
+- * in pf->state won't be set, so additionally check first q_vector
+- * against NULL
++ * rings
+ */
+- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
++ if (cfg_type == ICE_XDP_CFG_PART)
+ goto free_qmap;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+@@ -2844,7 +2834,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+ if (static_key_enabled(&ice_xdp_locking_key))
+ static_branch_dec(&ice_xdp_locking_key);
+
+- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
++ if (cfg_type == ICE_XDP_CFG_PART)
+ return 0;
+
+ ice_vsi_assign_bpf_prog(vsi, NULL);
+@@ -2924,8 +2914,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+ {
+ unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+- bool if_running = netif_running(vsi->netdev);
+ int ret = 0, xdp_ring_err = 0;
++ bool if_running;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (frame_size > ice_max_xdp_frame_size(vsi)) {
+@@ -2936,13 +2926,17 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ }
+
+ /* hot swap progs and avoid toggling link */
+- if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
++ if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
++ test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
+ ice_vsi_assign_bpf_prog(vsi, prog);
+ return 0;
+ }
+
++ if_running = netif_running(vsi->netdev) &&
++ !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
++
+ /* need to stop netdev while setting up the program for Rx rings */
+- if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
++ if (if_running) {
+ ret = ice_down(vsi);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
+@@ -2955,7 +2949,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ if (xdp_ring_err) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ } else {
+- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
++ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
++ ICE_XDP_CFG_FULL);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ }
+@@ -2966,7 +2961,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
+ } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
+- xdp_ring_err = ice_destroy_xdp_rings(vsi);
++ xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+ /* reallocate Rx queues that were used for zero-copy */
+@@ -3007,21 +3002,28 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ {
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
++ int ret;
+
+ if (vsi->type != ICE_VSI_PF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
+ return -EINVAL;
+ }
+
++ mutex_lock(&vsi->xdp_state_lock);
++
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+- return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
++ ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
++ break;
+ case XDP_SETUP_XSK_POOL:
+- return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
+- xdp->xsk.queue_id);
++ ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
++ break;
+ default:
+- return -EINVAL;
++ ret = -EINVAL;
+ }
++
++ mutex_unlock(&vsi->xdp_state_lock);
++ return ret;
+ }
+
+ /**
+@@ -3956,7 +3958,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)
+ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
+ {
+ struct ice_pf *pf = vsi->back;
+- int err = 0, timeout = 50;
++ int i, err = 0, timeout = 50;
+
+ if (!new_rx && !new_tx)
+ return -EINVAL;
+@@ -3975,15 +3977,32 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
+
+ /* set for the next time the netdev is started */
+ if (!netif_running(vsi->netdev)) {
+- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++ if (err)
++ goto rebuild_err;
+ dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
+ goto done;
+ }
+
+ ice_vsi_close(vsi);
+- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++ if (err)
++ goto rebuild_err;
++
++ ice_for_each_traffic_class(i) {
++ if (vsi->tc_cfg.ena_tc & BIT(i))
++ netdev_set_tc_queue(vsi->netdev,
++ vsi->tc_cfg.tc_info[i].netdev_tc,
++ vsi->tc_cfg.tc_info[i].qcount_tx,
++ vsi->tc_cfg.tc_info[i].qoffset);
++ }
+ ice_pf_dcb_recfg(pf, locked);
+ ice_vsi_open(vsi);
++ goto done;
++
++rebuild_err:
++ dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
++ err);
+ done:
+ clear_bit(ICE_CFG_BUSY, pf->state);
+ return err;
+@@ -5346,7 +5365,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
+ */
+ disabled = ice_service_task_stop(pf);
+
+- ice_unplug_aux_dev(pf);
++ ice_deinit_rdma(pf);
+
+ /* Already suspended?, then there is nothing to do */
+ if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
+@@ -5426,6 +5445,11 @@ static int __maybe_unused ice_resume(struct device *dev)
+ if (ret)
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+
++ ret = ice_init_rdma(pf);
++ if (ret)
++ dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
++ ret);
++
+ clear_bit(ICE_DOWN, pf->state);
+ /* Now perform PF reset and rebuild */
+ reset_type = ICE_RESET_PFR;
+@@ -6557,6 +6581,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
+ {
+ struct rtnl_link_stats64 *net_stats, *stats_prev;
+ struct rtnl_link_stats64 *vsi_stats;
++ struct ice_pf *pf = vsi->back;
+ u64 pkts, bytes;
+ int i;
+
+@@ -6602,21 +6627,18 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
+ net_stats = &vsi->net_stats;
+ stats_prev = &vsi->net_stats_prev;
+
+- /* clear prev counters after reset */
+- if (vsi_stats->tx_packets < stats_prev->tx_packets ||
+- vsi_stats->rx_packets < stats_prev->rx_packets) {
+- stats_prev->tx_packets = 0;
+- stats_prev->tx_bytes = 0;
+- stats_prev->rx_packets = 0;
+- stats_prev->rx_bytes = 0;
++ /* Update netdev counters, but keep in mind that values could start at
++ * random value after PF reset. And as we increase the reported stat by
++ * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
++ * let's skip this round.
++ */
++ if (likely(pf->stat_prev_loaded)) {
++ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
++ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
++ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
++ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
+ }
+
+- /* update netdev counters */
+- net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+- net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+- net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+- net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
+-
+ stats_prev->tx_packets = vsi_stats->tx_packets;
+ stats_prev->tx_bytes = vsi_stats->tx_bytes;
+ stats_prev->rx_packets = vsi_stats->rx_packets;
+@@ -7271,6 +7293,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
+ */
+ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+ {
++ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ bool dvm;
+@@ -7423,6 +7446,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+ ice_rebuild_arfs(pf);
+ }
+
++ if (vsi && vsi->netdev)
++ netif_device_attach(vsi->netdev);
++
+ ice_update_pf_netdev_link(pf);
+
+ /* tell the firmware we are up */
+@@ -7786,6 +7812,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ pf_sw = pf->first_sw;
+ /* find the attribute in the netlink message */
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++ if (!br_spec)
++ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+@@ -9173,8 +9201,14 @@ int ice_stop(struct net_device *netdev)
+ int link_err = ice_force_phys_link_state(vsi, false);
+
+ if (link_err) {
+- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+- vsi->vsi_num, link_err);
++ if (link_err == -ENOMEDIUM)
++ netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
++ vsi->vsi_num);
++ else
++ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
++ vsi->vsi_num, link_err);
++
++ ice_vsi_close(vsi);
+ return -EIO;
+ }
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
+index f6f52a24806622..2fb43cded572c0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
++++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
+@@ -441,8 +441,7 @@ int
+ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type)
+ {
+- u16 pfa_len, pfa_ptr;
+- u16 next_tlv;
++ u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
+ int status;
+
+ status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+@@ -455,11 +454,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ return status;
+ }
++
++ /* The Preserved Fields Area contains a sequence of Type-Length-Value
++ * structures which define its contents. The PFA length includes all
++ * of the TLVs, plus the initial length word itself, *and* one final
++ * word at the end after all of the TLVs.
++ */
++ if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
++ dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
++ pfa_ptr, pfa_len);
++ return -EINVAL;
++ }
++
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+- while (next_tlv < pfa_ptr + pfa_len) {
++ while (next_tlv < max_tlv) {
+ u16 tlv_sub_module_type;
+ u16 tlv_len;
+
+@@ -483,10 +494,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ }
+ return -EINVAL;
+ }
+- /* Check next TLV, i.e. current TLV pointer + length + 2 words
+- * (for current TLV's type and length)
+- */
+- next_tlv = next_tlv + tlv_len + 2;
++
++ if (check_add_overflow(next_tlv, 2, &next_tlv) ||
++ check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
++ dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
++ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
++ return -EINVAL;
++ }
+ }
+ /* Module does not exist */
+ return -ENOENT;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index 81d96a40d5a743..c4270708a76947 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -2246,18 +2246,20 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ static void
+ ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
+ {
+- info->n_per_out = N_PER_OUT_E810;
+-
+- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+- info->n_ext_ts = N_EXT_TS_E810;
+-
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810;
++ info->n_per_out = N_PER_OUT_E810T;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
++ } else if (ice_is_e810t(&pf->hw)) {
++ info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
++ info->n_per_out = N_PER_OUT_NO_SMA_E810T;
++ } else {
++ info->n_per_out = N_PER_OUT_E810;
++ info->n_ext_ts = N_EXT_TS_E810;
+ }
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
+index c0533d7b66b996..908bcd07380333 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sched.c
++++ b/drivers/net/ethernet/intel/ice/ice_sched.c
+@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
+ if (!root)
+ return -ENOMEM;
+
+- /* coverity[suspicious_sizeof] */
+ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
+- sizeof(*root), GFP_KERNEL);
++ sizeof(*root->children), GFP_KERNEL);
+ if (!root->children) {
+ devm_kfree(ice_hw_to_dev(hw), root);
+ return -ENOMEM;
+@@ -186,10 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
+ if (!node)
+ return -ENOMEM;
+ if (hw->max_children[layer]) {
+- /* coverity[suspicious_sizeof] */
+ node->children = devm_kcalloc(ice_hw_to_dev(hw),
+ hw->max_children[layer],
+- sizeof(*node), GFP_KERNEL);
++ sizeof(*node->children), GFP_KERNEL);
+ if (!node->children) {
+ devm_kfree(ice_hw_to_dev(hw), node);
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 2f77b684ff765d..19f730a68fa21c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -1829,7 +1829,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
+ lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ lkup_type == ICE_SW_LKUP_PROMISC ||
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+- lkup_type == ICE_SW_LKUP_DFLT) {
++ lkup_type == ICE_SW_LKUP_DFLT ||
++ lkup_type == ICE_SW_LKUP_LAST) {
+ sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
+ } else if (lkup_type == ICE_SW_LKUP_VLAN) {
+ if (opc == ice_aqc_opc_alloc_res)
+@@ -2032,12 +2033,12 @@ ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ * ice_aq_map_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
++ * @r_assoc: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Recipe to profile association (0x0291)
+ */
+ int
+-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
++ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
+ struct ice_sq_cd *cd)
+ {
+ struct ice_aqc_recipe_to_profile *cmd;
+@@ -2049,7 +2050,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ /* Set the recipe ID bit in the bitmask to let the device know which
+ * profile we are associating the recipe to
+ */
+- memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
++ cmd->recipe_assoc = cpu_to_le64(r_assoc);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ }
+@@ -2058,12 +2059,12 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ * ice_aq_get_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
++ * @r_assoc: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Associate profile ID with given recipe (0x0293)
+ */
+ int
+-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
++ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
+ struct ice_sq_cd *cd)
+ {
+ struct ice_aqc_recipe_to_profile *cmd;
+@@ -2076,7 +2077,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status)
+- memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
++ *r_assoc = le64_to_cpu(cmd->recipe_assoc);
+
+ return status;
+ }
+@@ -2121,6 +2122,7 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
+ {
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
++ u64 recp_assoc;
+ u16 i;
+
+ for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
+@@ -2128,8 +2130,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
+
+ bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
+ bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
+- if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
++ if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))
+ continue;
++ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
+ bitmap_copy(profile_to_recipe[i], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
+@@ -2268,10 +2271,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
+ /* Propagate some data to the recipe database */
+ recps[idx].is_root = !!is_root;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+- recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
+- ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+- recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
+- ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
++ recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl &
++ ICE_AQ_RECIPE_ACT_NEED_PASS_L2);
++ recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl &
++ ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2);
+ bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
+ recps[idx].chain_idx = root_bufs.content.result_indx &
+@@ -2773,7 +2776,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
+ lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ lkup_type == ICE_SW_LKUP_PROMISC ||
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+- lkup_type == ICE_SW_LKUP_DFLT)
++ lkup_type == ICE_SW_LKUP_DFLT ||
++ lkup_type == ICE_SW_LKUP_LAST)
+ rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_VSI_LIST_SET;
+ else if (lkup_type == ICE_SW_LKUP_VLAN)
+@@ -3068,7 +3072,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
+
+ /* A rule already exists with the new VSI being added */
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+- return 0;
++ return -EEXIST;
+
+ /* Update the previously created VSI list set with
+ * the new VSI ID passed in
+@@ -3138,7 +3142,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+- if (list_itr->vsi_list_info) {
++ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
+ map_info = list_itr->vsi_list_info;
+ if (test_bit(vsi_handle, map_info->vsi_map)) {
+ *vsi_list_id = map_info->vsi_list_id;
+@@ -5431,22 +5435,24 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ */
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
++ u64 recp_assoc;
+ u16 j;
+
+ status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
+- (u8 *)r_bitmap, NULL);
++ &recp_assoc, NULL);
+ if (status)
+ goto err_unroll;
+
++ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
+ bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
++ bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
+ status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
+- (u8 *)r_bitmap,
+- NULL);
++ recp_assoc, NULL);
+ ice_release_change_lock(hw);
+
+ if (status)
+@@ -6320,8 +6326,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
+ if (!itr->vsi_list_info ||
+ !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
+ continue;
+- /* Clearing it so that the logic can add it back */
+- clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
+ f_entry.fltr_info.vsi_handle = vsi_handle;
+ f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ /* update the src in case it is VSI num */
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
+index db7e501b7e0a48..89ffa1b51b5ad1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.h
++++ b/drivers/net/ethernet/intel/ice/ice_switch.h
+@@ -424,10 +424,10 @@ int ice_aq_add_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 num_recipes, struct ice_sq_cd *cd);
+ int
+-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
++ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
+ struct ice_sq_cd *cd);
+ int
+-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
++ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
+ struct ice_sq_cd *cd);
+
+ #endif /* _ICE_SWITCH_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index 37b54db91df275..c213121aa5010a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
+ * - Tunnel flag (present if tunnel)
+ */
++ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
++ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
+ lkups_cnt++;
+@@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
+ /* Always add direction metadata */
+ ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+
++ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
++ ice_rule_add_src_vsi_metadata(&list[i]);
++ i++;
++ }
++
+ rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
+ if (tc_fltr->tunnel_type != TNL_LAST) {
+ i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
+@@ -630,32 +637,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
+ return ice_tc_tun_get_type(dev) != TNL_LAST;
+ }
+
+-static int
+-ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
+- struct flow_action_entry *act)
++static bool ice_tc_is_dev_uplink(struct net_device *dev)
++{
++ return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
++}
++
++static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
++ struct ice_tc_flower_fltr *fltr,
++ struct net_device *target_dev)
+ {
+ struct ice_repr *repr;
+
++ fltr->action.fltr_act = ICE_FWD_TO_VSI;
++
++ if (ice_is_port_repr_netdev(filter_dev) &&
++ ice_is_port_repr_netdev(target_dev)) {
++ repr = ice_netdev_to_repr(target_dev);
++
++ fltr->dest_vsi = repr->src_vsi;
++ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
++ } else if (ice_is_port_repr_netdev(filter_dev) &&
++ ice_tc_is_dev_uplink(target_dev)) {
++ repr = ice_netdev_to_repr(filter_dev);
++
++ fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
++ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
++ } else if (ice_tc_is_dev_uplink(filter_dev) &&
++ ice_is_port_repr_netdev(target_dev)) {
++ repr = ice_netdev_to_repr(target_dev);
++
++ fltr->dest_vsi = repr->src_vsi;
++ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
++ } else {
++ NL_SET_ERR_MSG_MOD(fltr->extack,
++ "Unsupported netdevice in switchdev mode");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int
++ice_tc_setup_drop_action(struct net_device *filter_dev,
++ struct ice_tc_flower_fltr *fltr)
++{
++ fltr->action.fltr_act = ICE_DROP_PACKET;
++
++ if (ice_is_port_repr_netdev(filter_dev)) {
++ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
++ } else if (ice_tc_is_dev_uplink(filter_dev)) {
++ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
++ } else {
++ NL_SET_ERR_MSG_MOD(fltr->extack,
++ "Unsupported netdevice in switchdev mode");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
++ struct ice_tc_flower_fltr *fltr,
++ struct flow_action_entry *act)
++{
++ int err;
++
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+- fltr->action.fltr_act = ICE_DROP_PACKET;
++ err = ice_tc_setup_drop_action(filter_dev, fltr);
++ if (err)
++ return err;
++
+ break;
+
+ case FLOW_ACTION_REDIRECT:
+- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+-
+- if (ice_is_port_repr_netdev(act->dev)) {
+- repr = ice_netdev_to_repr(act->dev);
+-
+- fltr->dest_vsi = repr->src_vsi;
+- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+- } else if (netif_is_ice(act->dev) ||
+- ice_is_tunnel_supported(act->dev)) {
+- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+- } else {
+- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
+- return -EINVAL;
+- }
++ err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
++ if (err)
++ return err;
+
+ break;
+
+@@ -680,7 +738,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ int ret;
+ int i;
+
+- if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
++ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+@@ -696,10 +754,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ goto exit;
+ }
+
+- /* egress traffic is always redirect to uplink */
+- if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+- fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
+-
+ rule_info.sw_act.fltr_act = fltr->action.fltr_act;
+ if (fltr->action.fltr_act != ICE_DROP_PACKET)
+ rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
+@@ -713,17 +767,37 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ rule_info.flags_info.act_valid = true;
+
+ if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
++ /* Uplink to VF */
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
+- } else {
++ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
++ fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
++ /* VF to Uplink */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
++ /* This is a specific case. The destination VSI index is
++ * overwritten by the source VSI index. This type of filter
++ * should allow the packet to go to the LAN, not to the
++ * VSI passed here. It should set LAN_EN bit only. However,
++ * the VSI must be a valid one. Setting source VSI index
++ * here is safe. Even if the result from switch is set LAN_EN
++ * and LB_EN (which normally will pass the packet to this VSI)
++ * packet won't be seen on the VSI, because local loopback is
++ * turned off.
++ */
++ rule_info.sw_act.vsi_handle = vsi->idx;
++ } else {
++ /* VF to VF */
++ rule_info.sw_act.flag |= ICE_FLTR_TX;
++ rule_info.sw_act.src = vsi->idx;
++ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = fltr->cookie;
++ rule_info.src_vsi = vsi->idx;
+
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
+@@ -1385,7 +1459,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+- BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
++ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
++ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
++ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
++ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
+ return -EOPNOTSUPP;
+ } else {
+@@ -1745,16 +1822,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
+
+ /**
+ * ice_parse_tc_flower_actions - Parse the actions for a TC filter
++ * @filter_dev: Pointer to device on which filter is being added
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Parse the actions for a TC filter
+ */
+-static int
+-ice_parse_tc_flower_actions(struct ice_vsi *vsi,
+- struct flow_cls_offload *cls_flower,
+- struct ice_tc_flower_fltr *fltr)
++static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
++ struct ice_vsi *vsi,
++ struct flow_cls_offload *cls_flower,
++ struct ice_tc_flower_fltr *fltr)
+ {
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_action *flow_action = &rule->action;
+@@ -1769,7 +1847,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
+
+ flow_action_for_each(i, act, flow_action) {
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+- err = ice_eswitch_tc_parse_action(fltr, act);
++ err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
+ else
+ err = ice_tc_parse_action(vsi, fltr, act);
+ if (err)
+@@ -1856,7 +1934,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
+ if (err < 0)
+ goto err;
+
+- err = ice_parse_tc_flower_actions(vsi, f, fltr);
++ err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 52d0a126eb6161..429afffa4c3169 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
+ if (rx_ring->vsi->type == ICE_VSI_PF)
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+- rx_ring->xdp_prog = NULL;
++ WRITE_ONCE(rx_ring->xdp_prog, NULL);
+ if (rx_ring->xsk_pool) {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
+ if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
+
+- if (rx_ring->vsi->type == ICE_VSI_PF &&
+- !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+- rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
+- goto err;
+ return 0;
+
+ err:
+@@ -526,30 +521,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
+ return -ENOMEM;
+ }
+
+-/**
+- * ice_rx_frame_truesize
+- * @rx_ring: ptr to Rx ring
+- * @size: size
+- *
+- * calculate the truesize with taking into the account PAGE_SIZE of
+- * underlying arch
+- */
+-static unsigned int
+-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
+-{
+- unsigned int truesize;
+-
+-#if (PAGE_SIZE < 8192)
+- truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+-#else
+- truesize = rx_ring->rx_offset ?
+- SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
+- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+- SKB_DATA_ALIGN(size);
+-#endif
+- return truesize;
+-}
+-
+ /**
+ * ice_run_xdp - Executes an XDP program on initialized xdp_buff
+ * @rx_ring: Rx ring
+@@ -600,9 +571,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ ret = ICE_XDP_CONSUMED;
+ }
+ exit:
+- rx_buf->act = ret;
+- if (unlikely(xdp_buff_has_frags(xdp)))
+- ice_set_rx_bufs_act(xdp, rx_ring, ret);
++ ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ }
+
+ /**
+@@ -841,16 +810,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+ if (!dev_page_is_reusable(page))
+ return false;
+
+-#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
+ return false;
+-#else
++#if (PAGE_SIZE >= 8192)
+ #define ICE_LAST_OFFSET \
+- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
++ (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
+ if (rx_buf->page_offset > ICE_LAST_OFFSET)
+ return false;
+-#endif /* PAGE_SIZE < 8192) */
++#endif /* PAGE_SIZE >= 8192) */
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+@@ -890,14 +858,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+- if (unlikely(xdp_buff_has_frags(xdp)))
+- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
++ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ rx_buf->page_offset, size);
+ sinfo->xdp_frags_size += size;
++ /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
++ * can pop off frags but driver has to handle it on its own
++ */
++ rx_ring->nr_frags = sinfo->nr_frags;
+
+ if (page_is_pfmemalloc(rx_buf->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+@@ -950,12 +921,7 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ struct ice_rx_buf *rx_buf;
+
+ rx_buf = &rx_ring->rx_buf[ntc];
+- rx_buf->pgcnt =
+-#if (PAGE_SIZE < 8192)
+- page_count(rx_buf->page);
+-#else
+- 0;
+-#endif
++ rx_buf->pgcnt = page_count(rx_buf->page);
+ prefetchw(rx_buf->page);
+
+ if (!size)
+@@ -1162,11 +1128,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ bool failure;
+ u32 first;
+
+- /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+-#if (PAGE_SIZE < 8192)
+- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+-#endif
+-
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (xdp_prog) {
+ xdp_ring = rx_ring->xdp_ring;
+@@ -1226,10 +1187,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
+-#if (PAGE_SIZE > 4096)
+- /* At larger PAGE_SIZE, frame_sz depend on len size */
+- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
+-#endif
+ xdp_buff_clear_frags_flag(xdp);
+ } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ break;
+@@ -1249,6 +1206,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
++ rx_ring->nr_frags = 0;
+ continue;
+ construct_skb:
+ if (likely(ice_ring_uses_build_skb(rx_ring)))
+@@ -1264,10 +1222,12 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ ICE_XDP_CONSUMED);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
++ rx_ring->nr_frags = 0;
+ break;
+ }
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
++ rx_ring->nr_frags = 0;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index 166413fc33f48f..407d4c320097f6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -333,6 +333,7 @@ struct ice_rx_ring {
+ struct ice_channel *ch;
+ struct ice_tx_ring *xdp_ring;
+ struct xsk_buff_pool *xsk_pool;
++ u32 nr_frags;
+ dma_addr_t dma; /* physical address of ring */
+ u64 cached_phctime;
+ u16 rx_buf_len;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+index 115969ecdf7b97..b0e56675f98b2a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+@@ -12,26 +12,39 @@
+ * act: action to store onto Rx buffers related to XDP buffer parts
+ *
+ * Set action that should be taken before putting Rx buffer from first frag
+- * to one before last. Last one is handled by caller of this function as it
+- * is the EOP frag that is currently being processed. This function is
+- * supposed to be called only when XDP buffer contains frags.
++ * to the last.
+ */
+ static inline void
+ ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
+ const unsigned int act)
+ {
+- const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+- u32 first = rx_ring->first_desc;
+- u32 nr_frags = sinfo->nr_frags;
++ u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
++ u32 nr_frags = rx_ring->nr_frags + 1;
++ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+ struct ice_rx_buf *buf;
+
+ for (int i = 0; i < nr_frags; i++) {
+- buf = &rx_ring->rx_buf[first];
++ buf = &rx_ring->rx_buf[idx];
+ buf->act = act;
+
+- if (++first == cnt)
+- first = 0;
++ if (++idx == cnt)
++ idx = 0;
++ }
++
++ /* adjust pagecnt_bias on frags freed by XDP prog */
++ if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
++ u32 delta = rx_ring->nr_frags - sinfo_frags;
++
++ while (delta) {
++ if (idx == 0)
++ idx = cnt - 1;
++ else
++ idx--;
++ buf = &rx_ring->rx_buf[idx];
++ buf->pagecnt_bias--;
++ delta--;
++ }
+ }
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+index 24e4f4d897b666..03b9d7d748518c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+@@ -827,12 +827,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
+ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ {
+ struct ice_pf *pf = vf->pf;
++ struct ice_lag *lag;
+ struct ice_vsi *vsi;
++ u8 act_prt, pri_prt;
+ struct device *dev;
+ int err = 0;
+ bool rsd;
+
+ dev = ice_pf_to_dev(pf);
++ act_prt = ICE_LAG_INVALID_PORT;
++ pri_prt = pf->hw.port_info->lport;
+
+ if (flags & ICE_VF_RESET_NOTIFY)
+ ice_notify_vf_reset(vf);
+@@ -848,6 +852,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
++ lag = pf->lag;
++ mutex_lock(&pf->lag_mutex);
++ if (lag && lag->bonded && lag->primary) {
++ act_prt = lag->active_port;
++ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
++ lag->upper_netdev)
++ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
++ else
++ act_prt = ICE_LAG_INVALID_PORT;
++ }
++
+ if (ice_is_vf_disabled(vf)) {
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+@@ -932,6 +947,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
+ ice_mbx_clear_malvf(&vf->mbx_info);
+
+ out_unlock:
++ if (lag && lag->bonded && lag->primary &&
++ act_prt != ICE_LAG_INVALID_PORT)
++ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
++ mutex_unlock(&pf->lag_mutex);
++
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+index d7b10dc67f0352..b3e1bdcb80f84d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+@@ -26,29 +26,31 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+
+- if (ice_is_dvm_ena(&pf->hw)) {
+- vlan_ops = &vsi->outer_vlan_ops;
+-
+- /* setup outer VLAN ops */
+- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
++ /* setup inner VLAN ops */
++ vlan_ops = &vsi->inner_vlan_ops;
+
+- /* setup inner VLAN ops */
+- vlan_ops = &vsi->inner_vlan_ops;
++ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops->add_vlan = noop_vlan_arg;
+ vlan_ops->del_vlan = noop_vlan_arg;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+- } else {
+- vlan_ops = &vsi->inner_vlan_ops;
+
++ /* setup outer VLAN ops */
++ vlan_ops = &vsi->outer_vlan_ops;
++ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
++ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
++ } else {
+ vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
+- vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
+ }
++
++ /* all Rx traffic should be in the domain of the assigned port VLAN,
++ * so prevent disabling Rx VLAN filtering
++ */
++ vlan_ops->dis_rx_filtering = noop_vlan;
++
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ }
+
+@@ -77,6 +79,8 @@ static void ice_port_vlan_off(struct ice_vsi *vsi)
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ }
+
++ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
++
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+@@ -141,7 +145,6 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+ &vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index db97353efd067f..6c6f267dcccc3f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+- VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+ vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
+ vf->driver_caps);
+
+- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
++ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+- } else {
+- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+- else
+- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+- }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+@@ -503,7 +496,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+ vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
+ vfres->max_mtu = ice_vc_get_max_frame_size(vf);
+
+- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
++ vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+@@ -549,27 +542,20 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+ */
+ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+ {
+- struct ice_pf *pf = vf->pf;
+- struct ice_vsi *vsi;
+-
+- vsi = ice_find_vsi(pf, vsi_id);
+-
+- return (vsi && (vsi->vf == vf));
++ return vsi_id == ICE_VF_VSI_ID;
+ }
+
+ /**
+ * ice_vc_isvalid_q_id
+- * @vf: pointer to the VF info
+- * @vsi_id: VSI ID
++ * @vsi: VSI to check queue ID against
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
++static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
+ {
+- struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+- return (vsi && (qid < vsi->alloc_txq));
++ return qid < vsi->alloc_txq;
+ }
+
+ /**
+@@ -820,8 +806,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+ int status;
+
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
+- ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
++ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
++ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+@@ -829,11 +815,9 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+ goto error_param;
+ }
+
+- ctx->info.q_opt_rss = ((lut_type <<
+- ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+- (hash_type &
+- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
++ ctx->info.q_opt_rss =
++ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
++ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
+@@ -1271,7 +1255,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+ */
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
++ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+@@ -1293,7 +1277,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
++ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+@@ -1398,7 +1382,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
++ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+@@ -1424,7 +1408,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
++ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+@@ -1480,7 +1464,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
++ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+@@ -1494,7 +1478,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
++ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+@@ -1600,9 +1584,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
++ struct ice_lag *lag;
+ struct ice_vsi *vsi;
++ u8 act_prt, pri_prt;
+ int i = -1, q_idx;
+
++ lag = pf->lag;
++ mutex_lock(&pf->lag_mutex);
++ act_prt = ICE_LAG_INVALID_PORT;
++ pri_prt = pf->hw.port_info->lport;
++ if (lag && lag->bonded && lag->primary) {
++ act_prt = lag->active_port;
++ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
++ lag->upper_netdev)
++ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
++ else
++ act_prt = ICE_LAG_INVALID_PORT;
++ }
++
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+@@ -1628,7 +1627,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+- !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
++ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
+ goto error_param;
+ }
+
+@@ -1710,6 +1709,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ }
+ }
+
++ if (lag && lag->bonded && lag->primary &&
++ act_prt != ICE_LAG_INVALID_PORT)
++ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
++ mutex_unlock(&pf->lag_mutex);
++
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+@@ -1724,6 +1728,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ vf->vf_id, i);
+ }
+
++ if (lag && lag->bonded && lag->primary &&
++ act_prt != ICE_LAG_INVALID_PORT)
++ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
++ mutex_unlock(&pf->lag_mutex);
++
+ ice_lag_move_new_vf_nodes(vf);
+
+ /* send the response to the VF */
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+index cd747718de7380..a0d03f350dfc7d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+@@ -19,6 +19,15 @@
+ #define ICE_MAX_MACADDR_PER_VF 18
+ #define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+
++/* VFs only get a single VSI. For ice hardware, the VF does not need to know
++ * its VSI index. However, the virtchnl interface requires a VSI number,
++ * mainly due to legacy hardware.
++ *
++ * Since the VF doesn't need this information, report a static value to the VF
++ * instead of leaking any information about the PF or hardware setup.
++ */
++#define ICE_VF_VSI_ID 1
++
+ struct ice_virtchnl_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+index 7d547fa616fa69..588b77f1a4bf67 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+@@ -13,8 +13,6 @@
+ * - opcodes needed by VF when caps are activated
+ *
+ * Caps that don't use new opcodes (no opcodes should be allowed):
+- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
+- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
+ * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
+ * - VIRTCHNL_VF_OFFLOAD_CRC
+ * - VIRTCHNL_VF_OFFLOAD_RX_POLLING
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index daa6a1e894cfc2..974c71490d97c0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -107,9 +107,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
+ return -EINVAL;
+
+- if (vsi_id != vf->lan_vsi_num)
+- return -EINVAL;
+-
+ if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
+ return -EINVAL;
+
+@@ -554,6 +551,8 @@ static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
+ fdir->fdir_fltr_cnt[flow][0] = 0;
+ fdir->fdir_fltr_cnt[flow][1] = 0;
+ }
++
++ fdir->fdir_fltr_cnt_total = 0;
+ }
+
+ /**
+@@ -1570,6 +1569,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ resp->status = status;
+ resp->flow_id = conf->flow_id;
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
++ vf->fdir.fdir_fltr_cnt_total++;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+@@ -1634,6 +1634,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
+ resp->status = status;
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
++ vf->fdir.fdir_fltr_cnt_total--;
+
+ ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
+ (u8 *)resp, len);
+@@ -1800,6 +1801,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ struct virtchnl_fdir_add *stat = NULL;
+ struct virtchnl_fdir_fltr_conf *conf;
+ enum virtchnl_status_code v_ret;
++ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ int is_tun = 0;
+@@ -1808,6 +1810,17 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
++ vf_vsi = ice_get_vf_vsi(vf);
++
++#define ICE_VF_MAX_FDIR_FILTERS 128
++ if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
++ vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
++ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
++ dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
++ vf->vf_id);
++ goto err_exit;
++ }
++
+ ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+index c5bcc8d7481ca6..ac6dcab454b499 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+@@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx {
+ struct ice_vf_fdir {
+ u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
++ u16 fdir_fltr_cnt_total;
+ struct ice_fd_hw_prof **fdir_prof;
+
+ struct idr fdir_rule_idr;
+diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+index 76266e709a392e..3ecab12baea334 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+ return -EINVAL;
+
+ err = ice_fltr_add_vlan(vsi, vlan);
+- if (err && err != -EEXIST) {
++ if (!err)
++ vsi->num_vlan++;
++ else if (err == -EEXIST)
++ err = 0;
++ else
+ dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
+ vlan->vid, vsi->vsi_num, err);
+- return err;
+- }
+
+- vsi->num_vlan++;
+- return 0;
++ return err;
+ }
+
+ /**
+@@ -131,6 +132,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+ {
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
++ u8 *ivf;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+@@ -143,19 +145,24 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+ if (!ctxt)
+ return -ENOMEM;
+
++ ivf = &ctxt->info.inner_vlan_flags;
++
+ /* Here we are configuring what the VSI should do with the VLAN tag in
+ * the Rx packet. We can either leave the tag in the packet or put it in
+ * the Rx descriptor.
+ */
+- if (ena)
++ if (ena) {
+ /* Strip VLAN tag from Rx packet and put it in the desc */
+- ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
+- else
++ *ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
++ ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH);
++ } else {
+ /* Disable stripping. Leave tag in packet */
+- ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
++ *ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
++ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
++ }
+
+ /* Allow all packets untagged/tagged */
+- ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
++ *ivf |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 2a3f0834e13917..9a9b8698881b4c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -52,10 +52,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+ static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+ {
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+- if (ice_is_xdp_ena_vsi(vsi)) {
+- synchronize_rcu();
++ if (ice_is_xdp_ena_vsi(vsi))
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+- }
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+ }
+
+@@ -179,8 +177,13 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
++
++ synchronize_net();
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
++ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
++ ice_qvec_toggle_napi(vsi, q_vector, false);
++
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (err)
+@@ -195,13 +198,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ if (err)
+ return err;
+ }
+- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+-
+- err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
+- if (err)
+- return err;
+
+- ice_qvec_toggle_napi(vsi, q_vector, false);
++ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+@@ -264,11 +262,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+ if (err)
+ goto free_buf;
+
+- clear_bit(ICE_CFG_BUSY, vsi->state);
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
++ clear_bit(ICE_CFG_BUSY, vsi->state);
+ free_buf:
+ kfree(qg_buf);
+ return err;
+@@ -288,7 +286,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
+ if (!pool)
+ return -EINVAL;
+
+- clear_bit(qid, vsi->af_xdp_zc_qps);
+ xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
+
+ return 0;
+@@ -319,8 +316,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+ if (err)
+ return err;
+
+- set_bit(qid, vsi->af_xdp_zc_qps);
+-
+ return 0;
+ }
+
+@@ -368,11 +363,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+ {
+ struct ice_rx_ring *rx_ring;
+- unsigned long q;
++ uint i;
++
++ ice_for_each_rxq(vsi, i) {
++ rx_ring = vsi->rx_rings[i];
++ if (!rx_ring->xsk_pool)
++ continue;
+
+- for_each_set_bit(q, vsi->af_xdp_zc_qps,
+- max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+- rx_ring = vsi->rx_rings[q];
+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ return -ENOMEM;
+ }
+@@ -399,7 +396,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+ goto failure;
+ }
+
+- if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
++ if_running = !test_bit(ICE_VSI_DOWN, vsi->state) &&
++ ice_is_xdp_ena_vsi(vsi);
+
+ if (if_running) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
+@@ -826,7 +824,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+- virt_to_page(xdp->data_hard_start), 0, size);
++ virt_to_page(xdp->data_hard_start),
++ XDP_PACKET_HEADROOM, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+@@ -897,7 +896,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+
+ if (!first) {
+ first = xdp;
+- xdp_buff_clear_frags_flag(first);
+ } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ break;
+ }
+@@ -1068,6 +1066,10 @@ bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+
+ ice_clean_xdp_irq_zc(xdp_ring);
+
++ if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
++ !netif_running(xdp_ring->vsi->netdev))
++ return true;
++
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
+
+@@ -1111,7 +1113,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_tx_ring *ring;
+
+- if (test_bit(ICE_VSI_DOWN, vsi->state))
++ if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
+index 8d6e44ee1895af..64dfc362d1dc49 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
+@@ -222,8 +222,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
+ }
+
+ /* set lan id */
+- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+- E1000_STATUS_FUNC_SHIFT;
++ hw->bus.func = FIELD_GET(E1000_STATUS_FUNC_MASK, rd32(E1000_STATUS));
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = igb_get_phy_id_82575(hw);
+@@ -262,8 +261,8 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
+ if (ret_val)
+ goto out;
+
+- data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+- E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
++ data = FIELD_GET(E1000_M88E1112_MAC_CTRL_1_MODE_MASK,
++ data);
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+@@ -330,8 +329,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+ u32 eecd = rd32(E1000_EECD);
+ u16 size;
+
+- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+- E1000_EECD_SIZE_EX_SHIFT);
++ size = FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
+
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+@@ -2798,7 +2796,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+ return 0;
+
+ hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
++ if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+@@ -2808,10 +2806,8 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+
+ for (i = 1; i < num_sensors; i++) {
+ hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+- NVM_ETS_DATA_INDEX_SHIFT);
+- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+- NVM_ETS_DATA_LOC_SHIFT);
++ sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor);
++ sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor);
+
+ if (sensor_location != 0)
+ hw->phy.ops.read_i2c_byte(hw,
+@@ -2859,20 +2855,17 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+ return 0;
+
+ hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
++ if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+- low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
+- NVM_ETS_LTHRES_DELTA_SHIFT);
++ low_thresh_delta = FIELD_GET(NVM_ETS_LTHRES_DELTA_MASK, ets_cfg);
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+
+ for (i = 1; i <= num_sensors; i++) {
+ hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+- NVM_ETS_DATA_INDEX_SHIFT);
+- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+- NVM_ETS_DATA_LOC_SHIFT);
++ sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor);
++ sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor);
+ therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
+index b9b9d35494d273..503b239868e8e8 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
+@@ -5,9 +5,9 @@
+ * e1000_i211
+ */
+
+-#include <linux/types.h>
++#include <linux/bitfield.h>
+ #include <linux/if_ether.h>
+-
++#include <linux/types.h>
+ #include "e1000_hw.h"
+ #include "e1000_i210.h"
+
+@@ -473,7 +473,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
++ version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record);
+ status = 0;
+ break;
+ }
+@@ -483,8 +483,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+- version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+- >> 13;
++ version = FIELD_GET(E1000_INVM_VER_FIELD_TWO,
++ *next_record);
+ status = 0;
+ break;
+ }
+@@ -493,15 +493,15 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
++ version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record);
+ status = 0;
+ break;
+ }
+ }
+
+ if (!status) {
+- invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+- >> E1000_INVM_MAJOR_SHIFT;
++ invm_ver->invm_major = FIELD_GET(E1000_INVM_MAJOR_MASK,
++ version);
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+@@ -520,7 +520,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+- (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
++ FIELD_GET(E1000_INVM_IMGTYPE_FIELD,
++ *next_record);
+ status = 0;
+ break;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
+index caf91c6f52b4d0..ceaec2cf08a435 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
++++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
+@@ -56,7 +56,7 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
+ }
+
+ reg = rd32(E1000_STATUS);
+- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
++ bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
+index fa136e6e932855..2dcd64d6dec317 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
++++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
+@@ -1,9 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2007 - 2018 Intel Corporation. */
+
+-#include <linux/if_ether.h>
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+-
++#include <linux/if_ether.h>
+ #include "e1000_mac.h"
+ #include "e1000_nvm.h"
+
+@@ -708,10 +708,10 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+- >> NVM_MAJOR_SHIFT;
+- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+- >> NVM_MINOR_SHIFT;
++ fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK,
++ fw_version);
++ fw_vers->eep_minor = FIELD_GET(NVM_MINOR_MASK,
++ fw_version);
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
+@@ -753,15 +753,13 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+- >> NVM_MAJOR_SHIFT;
++ fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK, fw_version);
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+- eeprom_verl = (fw_version & NVM_MINOR_MASK)
+- >> NVM_MINOR_SHIFT;
++ eeprom_verl = FIELD_GET(NVM_MINOR_MASK, fw_version);
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
+index a018000f7db92e..bed94e50a66933 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
+@@ -1,9 +1,9 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2007 - 2018 Intel Corporation. */
+
+-#include <linux/if_ether.h>
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+-
++#include <linux/if_ether.h>
+ #include "e1000_mac.h"
+ #include "e1000_phy.h"
+
+@@ -1682,8 +1682,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
+ if (ret_val)
+ goto out;
+
+- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
++ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
+ if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+@@ -1796,8 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
+ if (ret_val)
+ goto out;
+
+- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
++ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
+ if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+@@ -2578,8 +2576,7 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
+ if (ret_val)
+ goto out;
+
+- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
+- I82580_DSTATUS_CABLE_LENGTH_SHIFT;
++ length = FIELD_GET(I82580_DSTATUS_CABLE_LENGTH, phy_data);
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ ret_val = -E1000_ERR_PHY;
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 4ee849985e2b8a..92b2be06a6e930 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2434,7 +2434,7 @@ static int igb_get_ts_info(struct net_device *dev,
+ }
+ }
+
+-#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
++#define ETHER_TYPE_FULL_MASK cpu_to_be16(FIELD_MAX(U16_MAX))
+ static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+ {
+@@ -2733,8 +2733,8 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
+ u32 vlapqf;
+
+ vlapqf = rd32(E1000_VLAPQF);
+- vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
+- >> VLAN_PRIO_SHIFT;
++ vlan_priority = FIELD_GET(VLAN_PRIO_MASK,
++ ntohs(input->filter.vlan_tci));
+ queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK;
+
+ /* check whether this vlan prio is already set */
+@@ -2817,7 +2817,7 @@ static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter,
+ u8 vlan_priority;
+ u32 vlapqf;
+
+- vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
++ vlan_priority = FIELD_GET(VLAN_PRIO_MASK, vlan_tci);
+
+ vlapqf = rd32(E1000_VLAPQF);
+ vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 76b34cee1da3c8..49b349fa22542a 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -33,6 +33,7 @@
+ #include <linux/bpf_trace.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/etherdevice.h>
++#include <linux/lockdep.h>
+ #ifdef CONFIG_IGB_DCA
+ #include <linux/dca.h>
+ #endif
+@@ -2939,8 +2940,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ }
+ }
+
++/* This function assumes __netif_tx_lock is held by the caller. */
+ static void igb_xdp_ring_update_tail(struct igb_ring *ring)
+ {
++ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
++
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+@@ -3025,11 +3029,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
+ nxmit++;
+ }
+
+- __netif_tx_unlock(nq);
+-
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ igb_xdp_ring_update_tail(tx_ring);
+
++ __netif_tx_unlock(nq);
++
+ return nxmit;
+ }
+
+@@ -4833,6 +4837,7 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+
+ #if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
++ IGB_2K_TOO_SMALL_WITH_PADDING ||
+ rd32(E1000_RCTL) & E1000_RCTL_SBP)
+ set_ring_uses_large_buffer(rx_ring);
+ #endif
+@@ -6984,45 +6989,42 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+
+ static void igb_tsync_interrupt(struct igb_adapter *adapter)
+ {
++ const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
++ TSINTR_TT0 | TSINTR_TT1 |
++ TSINTR_AUTT0 | TSINTR_AUTT1);
+ struct e1000_hw *hw = &adapter->hw;
+- u32 ack = 0, tsicr = rd32(E1000_TSICR);
++ u32 tsicr = rd32(E1000_TSICR);
+ struct ptp_clock_event event;
+
++ if (hw->mac.type == e1000_82580) {
++ /* 82580 has a hardware bug that requires an explicit
++ * write to clear the TimeSync interrupt cause.
++ */
++ wr32(E1000_TSICR, tsicr & mask);
++ }
++
+ if (tsicr & TSINTR_SYS_WRAP) {
+ event.type = PTP_CLOCK_PPS;
+ if (adapter->ptp_caps.pps)
+ ptp_clock_event(adapter->ptp_clock, &event);
+- ack |= TSINTR_SYS_WRAP;
+ }
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+- ack |= E1000_TSICR_TXTS;
+ }
+
+- if (tsicr & TSINTR_TT0) {
++ if (tsicr & TSINTR_TT0)
+ igb_perout(adapter, 0);
+- ack |= TSINTR_TT0;
+- }
+
+- if (tsicr & TSINTR_TT1) {
++ if (tsicr & TSINTR_TT1)
+ igb_perout(adapter, 1);
+- ack |= TSINTR_TT1;
+- }
+
+- if (tsicr & TSINTR_AUTT0) {
++ if (tsicr & TSINTR_AUTT0)
+ igb_extts(adapter, 0);
+- ack |= TSINTR_AUTT0;
+- }
+
+- if (tsicr & TSINTR_AUTT1) {
++ if (tsicr & TSINTR_AUTT1)
+ igb_extts(adapter, 1);
+- ack |= TSINTR_AUTT1;
+- }
+-
+- /* acknowledge the interrupts */
+- wr32(E1000_TSICR, ack);
+ }
+
+ static irqreturn_t igb_msix_other(int irq, void *data)
+@@ -7296,7 +7298,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+ {
+- int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
++ int n = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
+ u16 *hash_list = (u16 *)&msgbuf[1];
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ int i;
+@@ -7556,7 +7558,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
+
+ static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+ {
+- int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
++ int add = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
+ int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+ int ret;
+
+@@ -8891,12 +8893,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+
+ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ {
++ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+- struct sk_buff *skb = rx_ring->skb;
+- unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
++ struct sk_buff *skb = rx_ring->skb;
++ int cpu = smp_processor_id();
+ unsigned int xdp_xmit = 0;
++ struct netdev_queue *nq;
+ struct xdp_buff xdp;
+ u32 frame_sz = 0;
+ int rx_buf_pgcnt;
+@@ -9024,7 +9028,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ if (xdp_xmit & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
++ nq = txring_txq(tx_ring);
++ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
++ __netif_tx_unlock(nq);
+ }
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+@@ -9665,6 +9672,10 @@ static void igb_io_resume(struct pci_dev *pdev)
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
++ if (!test_bit(__IGB_DOWN, &adapter->state)) {
++ dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
++ return;
++ }
+ if (igb_up(adapter)) {
+ dev_err(&pdev->dev, "igb_up failed after reset\n");
+ return;
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 319c544b9f04ce..f9457055612004 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+
+ igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ /* adjust timestamp for the TX latency based on link speed */
+- if (adapter->hw.mac.type == e1000_i210) {
++ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_TX_LATENCY_10;
+@@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ ktime_t *timestamp)
+ {
+ struct igb_adapter *adapter = q_vector->adapter;
++ struct e1000_hw *hw = &adapter->hw;
+ struct skb_shared_hwtstamps ts;
+ __le64 *regval = (__le64 *)va;
+ int adjust = 0;
+@@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
+
+ /* adjust timestamp for the RX latency based on link speed */
+- if (adapter->hw.mac.type == e1000_i210) {
++ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_RX_LATENCY_10;
+diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
+index a3cd7ac48d4b67..d15282ee5ea8f7 100644
+--- a/drivers/net/ethernet/intel/igbvf/mbx.c
++++ b/drivers/net/ethernet/intel/igbvf/mbx.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2009 - 2018 Intel Corporation. */
+
++#include <linux/bitfield.h>
+ #include "mbx.h"
+
+ /**
+diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
+index 7ff2752dd763af..c5012fa36af2fa 100644
+--- a/drivers/net/ethernet/intel/igbvf/netdev.c
++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
+@@ -3,25 +3,25 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/init.h>
+-#include <linux/pci.h>
+-#include <linux/vmalloc.h>
+-#include <linux/pagemap.h>
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+-#include <linux/netdevice.h>
+-#include <linux/tcp.h>
+-#include <linux/ipv6.h>
+-#include <linux/slab.h>
+-#include <net/checksum.h>
+-#include <net/ip6_checksum.h>
+-#include <linux/mii.h>
+ #include <linux/ethtool.h>
+ #include <linux/if_vlan.h>
++#include <linux/init.h>
++#include <linux/ipv6.h>
++#include <linux/mii.h>
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/pagemap.h>
++#include <linux/pci.h>
+ #include <linux/prefetch.h>
+ #include <linux/sctp.h>
+-
++#include <linux/slab.h>
++#include <linux/tcp.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <net/checksum.h>
++#include <net/ip6_checksum.h>
+ #include "igbvf.h"
+
+ char igbvf_driver_name[] = "igbvf";
+@@ -273,9 +273,8 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
+ * that case, it fills the header buffer and spills the rest
+ * into the page.
+ */
+- hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
+- & E1000_RXDADV_HDRBUFLEN_MASK) >>
+- E1000_RXDADV_HDRBUFLEN_SHIFT;
++ hlen = le16_get_bits(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info,
++ E1000_RXDADV_HDRBUFLEN_MASK);
+ if (hlen > adapter->rx_ps_hdr_size)
+ hlen = adapter->rx_ps_hdr_size;
+
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index f48f82d5e274b1..85cc163965062e 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -568,6 +568,7 @@ struct igc_nfc_filter {
+ u16 etype;
+ __be16 vlan_etype;
+ u16 vlan_tci;
++ u16 vlan_tci_mask;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
+ u8 user_data[8];
+diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
+index b3037016f31d29..a18af5c87cde48 100644
+--- a/drivers/net/ethernet/intel/igc/igc_defines.h
++++ b/drivers/net/ethernet/intel/igc/igc_defines.h
+@@ -402,6 +402,12 @@
+ #define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
+ #define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+
++/* Retry Buffer Control */
++#define IGC_RETX_CTL 0x041C
++#define IGC_RETX_CTL_WATERMARK_MASK 0xF
++#define IGC_RETX_CTL_QBVFULLTH_SHIFT 8 /* QBV Retry Buffer Full Threshold */
++#define IGC_RETX_CTL_QBVFULLEN 0x1000 /* Enable QBV Retry Buffer Full Threshold */
++
+ /* Transmit Scheduling Latency */
+ /* Latency between transmission scheduling (LaunchTime) and the time
+ * the packet is transmitted to the network in nanosecond.
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index dd8a9d27a1670c..f7284fa4324a44 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -957,6 +957,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
+ }
+
+ #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
++#define VLAN_TCI_FULL_MASK ((__force __be16)~0)
+ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+ {
+@@ -979,10 +980,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+
++ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
++ fsp->flow_type |= FLOW_EXT;
++ fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
++ fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
++ }
++
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci);
+- fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
++ fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask);
+ }
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+@@ -1217,6 +1224,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
+
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci);
++ rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci);
+ rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
+ }
+
+@@ -1254,11 +1262,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
+ memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
+ }
+
+- /* When multiple filter options or user data or vlan etype is set, use a
+- * flex filter.
++ /* The i225/i226 has various different filters. Flex filters provide a
++ * way to match up to the first 128 bytes of a packet. Use them for:
++ * a) For specific user data
++ * b) For VLAN EtherType
++ * c) For full TCI match
++ * d) Or in case multiple filter criteria are set
++ *
++ * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters.
+ */
+ if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
+ (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
++ ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) &&
++ rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) ||
+ (rule->filter.match_flags & (rule->filter.match_flags - 1)))
+ rule->flex = true;
+ else
+@@ -1328,6 +1344,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
+ return -EINVAL;
+ }
+
++ /* There are two ways to match the VLAN TCI:
++ * 1. Match on PCP field and use vlan prio filter for it
++ * 2. Match on complete TCI field and use flex filter for it
++ */
++ if ((fsp->flow_type & FLOW_EXT) &&
++ fsp->m_ext.vlan_tci &&
++ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) &&
++ fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) {
++ netdev_dbg(netdev, "VLAN mask not supported\n");
++ return -EOPNOTSUPP;
++ }
++
++ /* VLAN EtherType can only be matched by full mask. */
++ if ((fsp->flow_type & FLOW_EXT) &&
++ fsp->m_ext.vlan_etype &&
++ fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) {
++ netdev_dbg(netdev, "VLAN EtherType mask not supported\n");
++ return -EOPNOTSUPP;
++ }
++
+ if (fsp->location >= IGC_MAX_RXNFC_RULES) {
+ netdev_dbg(netdev, "Invalid location\n");
+ return -EINVAL;
+diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
+index 17546a035ab197..d2562c8e8015e7 100644
+--- a/drivers/net/ethernet/intel/igc/igc_i225.c
++++ b/drivers/net/ethernet/intel/igc/igc_i225.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright (c) 2018 Intel Corporation */
+
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+
+ #include "igc_hw.h"
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 98de34d0ce07e1..da1018d8326220 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1640,10 +1640,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+
+ if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+- /* FIXME: add support for retrieving timestamps from
+- * the other timer registers before skipping the
+- * timestamping request.
+- */
+ unsigned long flags;
+ u32 tstamp_flags;
+
+@@ -5304,25 +5300,22 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
+
+ static void igc_tsync_interrupt(struct igc_adapter *adapter)
+ {
+- u32 ack, tsauxc, sec, nsec, tsicr;
+ struct igc_hw *hw = &adapter->hw;
++ u32 tsauxc, sec, nsec, tsicr;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+
+ tsicr = rd32(IGC_TSICR);
+- ack = 0;
+
+ if (tsicr & IGC_TSICR_SYS_WRAP) {
+ event.type = PTP_CLOCK_PPS;
+ if (adapter->ptp_caps.pps)
+ ptp_clock_event(adapter->ptp_clock, &event);
+- ack |= IGC_TSICR_SYS_WRAP;
+ }
+
+ if (tsicr & IGC_TSICR_TXTS) {
+ /* retrieve hardware timestamp */
+ igc_ptp_tx_tstamp_event(adapter);
+- ack |= IGC_TSICR_TXTS;
+ }
+
+ if (tsicr & IGC_TSICR_TT0) {
+@@ -5336,7 +5329,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
+ wr32(IGC_TSAUXC, tsauxc);
+ adapter->perout[0].start = ts;
+ spin_unlock(&adapter->tmreg_lock);
+- ack |= IGC_TSICR_TT0;
+ }
+
+ if (tsicr & IGC_TSICR_TT1) {
+@@ -5350,7 +5342,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
+ wr32(IGC_TSAUXC, tsauxc);
+ adapter->perout[1].start = ts;
+ spin_unlock(&adapter->tmreg_lock);
+- ack |= IGC_TSICR_TT1;
+ }
+
+ if (tsicr & IGC_TSICR_AUTT0) {
+@@ -5360,7 +5351,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
+ event.index = 0;
+ event.timestamp = sec * NSEC_PER_SEC + nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+- ack |= IGC_TSICR_AUTT0;
+ }
+
+ if (tsicr & IGC_TSICR_AUTT1) {
+@@ -5370,11 +5360,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
+ event.index = 1;
+ event.timestamp = sec * NSEC_PER_SEC + nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+- ack |= IGC_TSICR_AUTT1;
+ }
+-
+- /* acknowledge the interrupts */
+- wr32(IGC_TSICR, ack);
+ }
+
+ /**
+@@ -6222,21 +6208,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ size_t n;
+ int i;
+
+- switch (qopt->cmd) {
+- case TAPRIO_CMD_REPLACE:
+- break;
+- case TAPRIO_CMD_DESTROY:
+- return igc_tsn_clear_schedule(adapter);
+- case TAPRIO_CMD_STATS:
+- igc_taprio_stats(adapter->netdev, &qopt->stats);
+- return 0;
+- case TAPRIO_CMD_QUEUE_STATS:
+- igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+- return 0;
+- default:
+- return -EOPNOTSUPP;
+- }
+-
+ if (qopt->base_time < 0)
+ return -ERANGE;
+
+@@ -6246,12 +6217,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ if (!validate_schedule(adapter, qopt))
+ return -EINVAL;
+
++ igc_ptp_read(adapter, &now);
++
++ if (igc_tsn_is_taprio_activated_by_user(adapter) &&
++ is_base_time_past(qopt->base_time, &now))
++ adapter->qbv_config_change_errors++;
++
+ adapter->cycle_time = qopt->cycle_time;
+ adapter->base_time = qopt->base_time;
+ adapter->taprio_offload_enable = true;
+
+- igc_ptp_read(adapter, &now);
+-
+ for (n = 0; n < qopt->num_entries; n++) {
+ struct tc_taprio_sched_entry *e = &qopt->entries[n];
+
+@@ -6345,7 +6320,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+- err = igc_save_qbv_schedule(adapter, qopt);
++ switch (qopt->cmd) {
++ case TAPRIO_CMD_REPLACE:
++ err = igc_save_qbv_schedule(adapter, qopt);
++ break;
++ case TAPRIO_CMD_DESTROY:
++ err = igc_tsn_clear_schedule(adapter);
++ break;
++ case TAPRIO_CMD_STATS:
++ igc_taprio_stats(adapter->netdev, &qopt->stats);
++ return 0;
++ case TAPRIO_CMD_QUEUE_STATS:
++ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++
+ if (err)
+ return err;
+
+@@ -6489,7 +6480,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+- int i, drops;
++ int i, nxmit;
+
+ if (unlikely(!netif_carrier_ok(dev)))
+ return -ENETDOWN;
+@@ -6505,16 +6496,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+
+- drops = 0;
++ nxmit = 0;
+ for (i = 0; i < num_frames; i++) {
+ int err;
+ struct xdp_frame *xdpf = frames[i];
+
+ err = igc_xdp_init_tx_descriptor(ring, xdpf);
+- if (err) {
+- xdp_return_frame_rx_napi(xdpf);
+- drops++;
+- }
++ if (err)
++ break;
++ nxmit++;
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+@@ -6522,7 +6512,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+
+ __netif_tx_unlock(nq);
+
+- return num_frames - drops;
++ return nxmit;
+ }
+
+ static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
+@@ -7298,6 +7288,7 @@ static void igc_io_resume(struct pci_dev *pdev)
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ if (igc_open(netdev)) {
++ rtnl_unlock();
+ netdev_err(netdev, "igc_open failed after reset\n");
+ return;
+ }
+diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
+index 53b77c969c8579..d0d9e7170154ca 100644
+--- a/drivers/net/ethernet/intel/igc/igc_phy.c
++++ b/drivers/net/ethernet/intel/igc/igc_phy.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright (c) 2018 Intel Corporation */
+
++#include <linux/bitfield.h>
+ #include "igc_phy.h"
+
+ /**
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
+index a9c08321aca901..d68fa7f3d5f078 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
+@@ -49,12 +49,19 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
+ return new_flags;
+ }
+
++static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
++{
++ struct igc_hw *hw = &adapter->hw;
++
++ return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
++}
++
+ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
+ {
+ struct igc_hw *hw = &adapter->hw;
+ u16 txoffset;
+
+- if (!is_any_launchtime(adapter))
++ if (!igc_tsn_is_tx_mode_in_tsn(adapter))
+ return;
+
+ switch (adapter->link_speed) {
+@@ -78,6 +85,23 @@ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
+ wr32(IGC_GTXOFFSET, txoffset);
+ }
+
++static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
++{
++ struct igc_hw *hw = &adapter->hw;
++ u32 retxctl;
++
++ retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
++ wr32(IGC_RETX_CTL, retxctl);
++}
++
++bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
++{
++ struct igc_hw *hw = &adapter->hw;
++
++ return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
++ adapter->taprio_offload_enable;
++}
++
+ /* Returns the TSN specific registers to their default values after
+ * the adapter is reset.
+ */
+@@ -91,6 +115,9 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+ wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+
++ if (igc_is_device_id_i226(hw))
++ igc_tsn_restore_retx_default(adapter);
++
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+@@ -111,6 +138,25 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+ return 0;
+ }
+
++/* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
++ * to 88 Bytes by setting RETX_CTL register using the recommendation from:
++ * a) Ethernet Controller I225/I226 Specification Update Rev 2.1
++ * Item 9: TSN: Packet Transmission Might Cross the Qbv Window
++ * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
++ */
++static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
++{
++ struct igc_hw *hw = &adapter->hw;
++ u32 retxctl, watermark;
++
++ retxctl = rd32(IGC_RETX_CTL);
++ watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
++ /* Set QBVFULLTH value using watermark and set QBVFULLEN */
++ retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
++ IGC_RETX_CTL_QBVFULLEN;
++ wr32(IGC_RETX_CTL, retxctl);
++}
++
+ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ {
+ struct igc_hw *hw = &adapter->hw;
+@@ -123,6 +169,9 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+
++ if (igc_is_device_id_i226(hw))
++ igc_tsn_set_retx_qbvfullthreshold(adapter);
++
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txqctl = 0;
+@@ -227,7 +276,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ wr32(IGC_TQAVCC(i), tqavcc);
+
+ wr32(IGC_TQAVHC(i),
+- 0x80000000 + ring->hicredit * 0x7735);
++ 0x80000000 + ring->hicredit * 0x7736);
+ } else {
+ /* Disable any CBS for the queue */
+ txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);
+@@ -262,14 +311,6 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+ s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
+
+ base_time = ktime_add_ns(base_time, (n + 1) * cycle);
+-
+- /* Increase the counter if scheduling into the past while
+- * Gate Control List (GCL) is running.
+- */
+- if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
+- (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) &&
+- (adapter->qbv_count > 1))
+- adapter->qbv_config_change_errors++;
+ } else {
+ if (igc_is_device_id_i226(hw)) {
+ ktime_t adjust_time, expires_time;
+@@ -331,15 +372,22 @@ int igc_tsn_reset(struct igc_adapter *adapter)
+ return err;
+ }
+
+-int igc_tsn_offload_apply(struct igc_adapter *adapter)
++static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter)
+ {
+- struct igc_hw *hw = &adapter->hw;
++ bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) &
++ IGC_FLAG_TSN_ANY_ENABLED);
+
+- /* Per I225/6 HW Design Section 7.5.2.1, transmit mode
+- * cannot be changed dynamically. Require reset the adapter.
++ return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) ||
++ (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter));
++}
++
++int igc_tsn_offload_apply(struct igc_adapter *adapter)
++{
++ /* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change
++ * from legacy->tsn or tsn->legacy, then reset adapter is needed.
+ */
+ if (netif_running(adapter->netdev) &&
+- (igc_is_device_id_i225(hw) || !adapter->qbv_count)) {
++ igc_tsn_will_tx_mode_change(adapter)) {
+ schedule_work(&adapter->reset_task);
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
+index b53e6af560b738..98ec845a86bf00 100644
+--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
++++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
+@@ -7,5 +7,6 @@
+ int igc_tsn_offload_apply(struct igc_adapter *adapter);
+ int igc_tsn_reset(struct igc_adapter *adapter);
+ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
++bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
+
+ #endif /* _IGC_BASE_H */
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+index 100388968e4dbd..3d56481e16bc97 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+@@ -123,14 +123,14 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+ if (ret_val)
+ return ret_val;
+ if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+
+ /* Check to see if SFP+ module is supported */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+ &list_offset,
+ &data_offset);
+ if (ret_val)
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+@@ -213,7 +213,7 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ break;
+
+ default:
+- return IXGBE_ERR_LINK_SETUP;
++ return -EIO;
+ }
+
+ return 0;
+@@ -283,7 +283,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time)
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+@@ -292,7 +292,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+ }
+ }
+ }
+@@ -369,7 +369,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+ break;
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ /* Set 802.3x based flow control settings. */
+@@ -438,7 +438,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
++ status = -EIO;
+ hw_dbg(hw, "Autonegotiation did not complete.\n");
+ }
+ }
+@@ -478,7 +478,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ hw_dbg(hw, "Link was indicated but link is down\n");
+- return IXGBE_ERR_LINK_SETUP;
++ return -EIO;
+ }
+
+ return 0;
+@@ -594,7 +594,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+- return IXGBE_ERR_LINK_SETUP;
++ return -EINVAL;
+
+ /* Set KX4/KX support according to speed requested */
+ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+@@ -701,9 +701,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+
+ /* Init PHY and function pointers, perform SFP setup */
+ phy_status = hw->phy.ops.init(hw);
+- if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++ if (phy_status == -EOPNOTSUPP)
+ return phy_status;
+- if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
++ if (phy_status == -ENOENT)
+ goto mac_reset_top;
+
+ hw->phy.ops.reset(hw);
+@@ -727,7 +727,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+ udelay(1);
+ }
+ if (ctrl & IXGBE_CTRL_RST) {
+- status = IXGBE_ERR_RESET_FAILED;
++ status = -EIO;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+@@ -789,7 +789,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+@@ -814,7 +814,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+@@ -845,7 +845,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ u32 vftabyte;
+
+ if (vlan > 4095)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /* Determine 32-bit word position in array */
+ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
+@@ -964,7 +964,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ /*
+@@ -993,7 +993,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+ hw_dbg(hw, "EEPROM read did not pass.\n");
+- status = IXGBE_ERR_SFP_NOT_PRESENT;
++ status = -ENOENT;
+ goto out;
+ }
+
+@@ -1003,7 +1003,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+
+ *eeprom_data = (u8)(sfp_data >> 8);
+ } else {
+- status = IXGBE_ERR_PHY;
++ status = -EIO;
+ }
+
+ out:
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+index 58ea959a448225..339e106a5732d1 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+@@ -117,7 +117,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
+@@ -144,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+
+ if (ret_val) {
+ hw_dbg(hw, " sfp module setup not complete\n");
+- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
++ return -EIO;
+ }
+ }
+
+@@ -159,7 +159,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+ usleep_range(hw->eeprom.semaphore_delay * 1000,
+ hw->eeprom.semaphore_delay * 2000);
+ hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
++ return -EIO;
+ }
+
+ /**
+@@ -184,7 +184,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ *locked = true;
+ }
+@@ -219,7 +219,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ locked = true;
+ }
+@@ -400,7 +400,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ break;
+
+ default:
+- return IXGBE_ERR_LINK_SETUP;
++ return -EIO;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+@@ -541,7 +541,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
++ status = -EIO;
+ hw_dbg(hw, "Autoneg did not complete.\n");
+ }
+ }
+@@ -794,7 +794,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+- return IXGBE_ERR_LINK_SETUP;
++ return -EINVAL;
+
+ /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+ if (hw->mac.orig_link_settings_stored)
+@@ -861,8 +861,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+- status =
+- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
++ status = -EIO;
+ hw_dbg(hw, "Autoneg did not complete.\n");
+ }
+ }
+@@ -927,7 +926,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++ if (status == -EOPNOTSUPP)
+ return status;
+
+ /* Setup SFP module if there is one present. */
+@@ -936,7 +935,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+ hw->phy.sfp_setup_needed = false;
+ }
+
+- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++ if (status == -EOPNOTSUPP)
+ return status;
+
+ /* Reset PHY */
+@@ -974,7 +973,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+- status = IXGBE_ERR_RESET_FAILED;
++ status = -EIO;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+@@ -1093,7 +1092,7 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+ udelay(10);
+ }
+
+- return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
++ return -EIO;
+ }
+
+ /**
+@@ -1155,7 +1154,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
+- return IXGBE_ERR_FDIR_REINIT_FAILED;
++ return -EIO;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+@@ -1387,7 +1386,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ break;
+ default:
+ hw_dbg(hw, " Error on flow type input\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ /* configure FDIRCMD register */
+@@ -1546,7 +1545,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ break;
+ default:
+ hw_dbg(hw, " Error on vm pool mask\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+@@ -1555,14 +1554,14 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ hw_dbg(hw, " Error on src/dst port mask\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+ break;
+ case IXGBE_ATR_L4TYPE_MASK:
+ break;
+ default:
+ hw_dbg(hw, " Error on flow type mask\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
+@@ -1583,7 +1582,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ break;
+ default:
+ hw_dbg(hw, " Error on VLAN mask\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
+@@ -1595,7 +1594,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ break;
+ default:
+ hw_dbg(hw, " Error on flexible byte mask\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+@@ -1824,7 +1823,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+
+ /* Return error if SFP module has been detected but is not supported */
+ if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+
+ return status;
+ }
+@@ -1863,13 +1862,13 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+ * Verifies that installed the firmware version is 0.6 or higher
+ * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+- * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+- * if the FW version is not supported.
++ * Return: -EACCES if the FW is not present or if the FW version is
++ * not supported.
+ **/
+ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+ {
+- s32 status = IXGBE_ERR_EEPROM_VERSION;
+ u16 fw_offset, fw_ptp_cfg_offset;
++ s32 status = -EACCES;
+ u16 offset;
+ u16 fw_version = 0;
+
+@@ -1883,7 +1882,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+ goto fw_version_err;
+
+ if (fw_offset == 0 || fw_offset == 0xFFFF)
+- return IXGBE_ERR_EEPROM_VERSION;
++ return -EACCES;
+
+ /* get the offset to the Pass Through Patch Configuration block */
+ offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
+@@ -1891,7 +1890,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+ goto fw_version_err;
+
+ if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
+- return IXGBE_ERR_EEPROM_VERSION;
++ return -EACCES;
+
+ /* get the firmware version */
+ offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
+@@ -1905,7 +1904,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+
+ fw_version_err:
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
+- return IXGBE_ERR_EEPROM_VERSION;
++ return -EACCES;
+ }
+
+ /**
+@@ -2038,7 +2037,7 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+
+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ hw_dbg(hw, "auto negotiation not completed\n");
+- ret_val = IXGBE_ERR_RESET_FAILED;
++ ret_val = -EIO;
+ goto reset_pipeline_out;
+ }
+
+@@ -2087,7 +2086,7 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+- status = IXGBE_ERR_I2C;
++ status = -EIO;
+ goto release_i2c_access;
+ }
+ }
+@@ -2141,7 +2140,7 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+- status = IXGBE_ERR_I2C;
++ status = -EIO;
+ goto release_i2c_access;
+ }
+ }
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+index 878dd8dff5285c..2e6e0365154a11 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+@@ -124,7 +124,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+ */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+ }
+
+ /*
+@@ -215,7 +215,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+ break;
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ if (hw->mac.type != ixgbe_mac_X540) {
+@@ -500,7 +500,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+
+ if (pba_num == NULL) {
+ hw_dbg(hw, "PBA string buffer was null\n");
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+@@ -526,7 +526,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+- return IXGBE_ERR_NO_SPACE;
++ return -ENOSPC;
+ }
+
+ /* extract hex string from data and pba_ptr */
+@@ -563,13 +563,13 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+
+ if (length == 0xFFFF || length == 0) {
+ hw_dbg(hw, "NVM PBA number section invalid length\n");
+- return IXGBE_ERR_PBA_SECTION;
++ return -EIO;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+- return IXGBE_ERR_NO_SPACE;
++ return -ENOSPC;
+ }
+
+ /* trim pba length from start of string */
+@@ -684,7 +684,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+- bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
++ bus->func = FIELD_GET(IXGBE_STATUS_LAN_ID, reg);
+ bus->lan_id = bus->func;
+
+ /* check for a port swap */
+@@ -695,8 +695,8 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+- bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+- IXGBE_EE_CTRL_4_INST_ID_SHIFT;
++ bus->instance_id = FIELD_GET(IXGBE_EE_CTRL_4_INST_ID,
++ ee_ctrl_4);
+ }
+ }
+
+@@ -805,7 +805,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ if (index > 3)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /* To turn on the LED, set mode to ON. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+@@ -826,7 +826,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ if (index > 3)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /* To turn off the LED, set mode to OFF. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+@@ -870,10 +870,9 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+ * SPI EEPROM is assumed here. This code would need to
+ * change if a future EEPROM is not SPI.
+ */
+- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+- IXGBE_EEC_SIZE_SHIFT);
++ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
+ eeprom->word_size = BIT(eeprom_size +
+- IXGBE_EEPROM_WORD_SIZE_SHIFT);
++ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ }
+
+ if (eec & IXGBE_EEC_ADDR_SIZE)
+@@ -904,11 +903,8 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+
+ hw->eeprom.ops.init_params(hw);
+
+- if (words == 0)
+- return IXGBE_ERR_INVALID_ARGUMENT;
+-
+- if (offset + words > hw->eeprom.word_size)
+- return IXGBE_ERR_EEPROM;
++ if (words == 0 || (offset + words > hw->eeprom.word_size))
++ return -EINVAL;
+
+ /*
+ * The EEPROM page size cannot be queried from the chip. We do lazy
+@@ -962,7 +958,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+
+ if (ixgbe_ready_eeprom(hw) != 0) {
+ ixgbe_release_eeprom(hw);
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ for (i = 0; i < words; i++) {
+@@ -1028,7 +1024,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size)
+- return IXGBE_ERR_EEPROM;
++ return -EINVAL;
+
+ return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+ }
+@@ -1050,11 +1046,8 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+
+ hw->eeprom.ops.init_params(hw);
+
+- if (words == 0)
+- return IXGBE_ERR_INVALID_ARGUMENT;
+-
+- if (offset + words > hw->eeprom.word_size)
+- return IXGBE_ERR_EEPROM;
++ if (words == 0 || (offset + words > hw->eeprom.word_size))
++ return -EINVAL;
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+@@ -1099,7 +1092,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+
+ if (ixgbe_ready_eeprom(hw) != 0) {
+ ixgbe_release_eeprom(hw);
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ for (i = 0; i < words; i++) {
+@@ -1142,7 +1135,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size)
+- return IXGBE_ERR_EEPROM;
++ return -EINVAL;
+
+ return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ }
+@@ -1165,11 +1158,8 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+
+ hw->eeprom.ops.init_params(hw);
+
+- if (words == 0)
+- return IXGBE_ERR_INVALID_ARGUMENT;
+-
+- if (offset >= hw->eeprom.word_size)
+- return IXGBE_ERR_EEPROM;
++ if (words == 0 || offset >= hw->eeprom.word_size)
++ return -EINVAL;
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+@@ -1262,11 +1252,8 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+
+ hw->eeprom.ops.init_params(hw);
+
+- if (words == 0)
+- return IXGBE_ERR_INVALID_ARGUMENT;
+-
+- if (offset >= hw->eeprom.word_size)
+- return IXGBE_ERR_EEPROM;
++ if (words == 0 || offset >= hw->eeprom.word_size)
++ return -EINVAL;
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+@@ -1328,7 +1315,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+ }
+ udelay(5);
+ }
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ /**
+@@ -1344,7 +1331,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+ u32 i;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
+
+@@ -1366,7 +1353,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+ hw_dbg(hw, "Could not acquire EEPROM grant\n");
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ /* Setup EEPROM for Read/Write */
+@@ -1419,7 +1406,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
+ if (swsm & IXGBE_SWSM_SMBI) {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+ }
+
+@@ -1447,7 +1434,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+ if (i >= timeout) {
+ hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
+ ixgbe_release_eeprom_semaphore(hw);
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ return 0;
+@@ -1503,7 +1490,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+ */
+ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+ hw_dbg(hw, "SPI EEPROM Status error\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ return 0;
+@@ -1715,7 +1702,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (hw->eeprom.ops.read(hw, i, &pointer)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ /* If the pointer seems invalid */
+@@ -1724,7 +1711,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+
+ if (hw->eeprom.ops.read(hw, pointer, &length)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ if (length == 0xFFFF || length == 0)
+@@ -1733,7 +1720,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+ checksum += word;
+ }
+@@ -1786,7 +1773,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+- status = IXGBE_ERR_EEPROM_CHECKSUM;
++ status = -EIO;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+@@ -1845,7 +1832,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", index);
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+ }
+
+ /* setup VMDq pool selection before this RAR gets enabled */
+@@ -1897,7 +1884,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", index);
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+ }
+
+ /*
+@@ -2146,7 +2133,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+
+ /* Validate the water mark configuration. */
+ if (!hw->fc.pause_time)
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+@@ -2155,7 +2142,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+ }
+ }
+ }
+@@ -2212,7 +2199,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+ break;
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ /* Set 802.3x based flow control settings. */
+@@ -2269,7 +2256,7 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+ {
+ if ((!(adv_reg)) || (!(lp_reg)))
+- return IXGBE_ERR_FC_NOT_NEGOTIATED;
++ return -EINVAL;
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+ /*
+@@ -2321,7 +2308,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
+- return IXGBE_ERR_FC_NOT_NEGOTIATED;
++ return -EIO;
+
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+@@ -2353,12 +2340,12 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+ */
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
+- return IXGBE_ERR_FC_NOT_NEGOTIATED;
++ return -EIO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
+- return IXGBE_ERR_FC_NOT_NEGOTIATED;
++ return -EIO;
+ }
+ /*
+ * Read the 10g AN autoc and LP ability registers and resolve
+@@ -2407,8 +2394,8 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+ **/
+ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+ {
+- s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
++ s32 ret_val = -EIO;
+ bool link_up;
+
+ /*
+@@ -2510,7 +2497,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+ * @hw: pointer to hardware structure
+ *
+ * Disables PCI-Express primary access and verifies there are no pending
+- * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
++ * requests. -EALREADY is returned if primary disable
+ * bit hasn't caused the primary requests to be disabled, else 0
+ * is returned signifying primary requests disabled.
+ **/
+@@ -2575,7 +2562,7 @@ static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
+ }
+
+ hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+- return IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
++ return -EALREADY;
+ }
+
+ /**
+@@ -2600,7 +2587,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_eeprom_semaphore(hw))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ if (!(gssr & (fwmask | swmask))) {
+@@ -2620,7 +2607,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+ ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
+
+ usleep_range(5000, 10000);
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ }
+
+ /**
+@@ -2757,7 +2744,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+ s32 ret_val;
+
+ if (index > 3)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /*
+ * Link must be up to auto-blink the LEDs;
+@@ -2803,7 +2790,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+ s32 ret_val;
+
+ if (index > 3)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val)
+@@ -2963,7 +2950,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+ }
+
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+@@ -3014,7 +3001,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+ }
+
+ if (vmdq < 32) {
+@@ -3091,7 +3078,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+ * will simply bypass the VLVF if there are no entries present in the
+ * VLVF that contain our VLAN
+ */
+- first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
++ first_empty_slot = vlvf_bypass ? -ENOSPC : 0;
+
+ /* add VLAN enable bit for comparison */
+ vlan |= IXGBE_VLVF_VIEN;
+@@ -3115,7 +3102,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+ if (!first_empty_slot)
+ hw_dbg(hw, "No space in VLVF.\n");
+
+- return first_empty_slot ? : IXGBE_ERR_NO_SPACE;
++ return first_empty_slot ? : -ENOSPC;
+ }
+
+ /**
+@@ -3135,7 +3122,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ s32 vlvf_index;
+
+ if ((vlan > 4095) || (vind > 63))
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+@@ -3611,7 +3598,8 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+ *
+ * Communicates with the manageability block. On success return 0
+ * else returns semaphore error when encountering an error acquiring
+- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
++ * semaphore, -EINVAL when incorrect parameters passed or -EIO when
++ * command fails.
+ *
+ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
+ * by the caller.
+@@ -3624,7 +3612,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
+- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++ return -EINVAL;
+ }
+
+ /* Set bit 9 of FWSTS clearing FW reset indication */
+@@ -3635,13 +3623,13 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_EN)) {
+ hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
+- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++ return -EIO;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if (length % sizeof(u32)) {
+ hw_dbg(hw, "Buffer length failure, not aligned to dword");
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+ }
+
+ dword_len = length >> 2;
+@@ -3666,7 +3654,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ /* Check command successful completion. */
+ if ((timeout && i == timeout) ||
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
+- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++ return -EIO;
+
+ return 0;
+ }
+@@ -3686,7 +3674,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+ * in these cases.
+ *
+ * Communicates with the manageability block. On success return 0
+- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
++ * else return -EIO or -EINVAL.
+ **/
+ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+ u32 length, u32 timeout,
+@@ -3701,7 +3689,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
+- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++ return -EINVAL;
+ }
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+@@ -3731,7 +3719,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+
+ if (length < round_up(buf_len, 4) + hdr_size) {
+ hw_dbg(hw, "Buffer not large enough for reply message.\n");
+- status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
++ status = -EIO;
+ goto rel_out;
+ }
+
+@@ -3762,8 +3750,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
++ * else returns -EBUSY when encountering an error acquiring
++ * semaphore or -EIO when command fails.
+ **/
+ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, __always_unused u16 len,
+@@ -3799,7 +3787,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = 0;
+ else
+- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
++ ret_val = -EIO;
+
+ break;
+ }
+@@ -3897,14 +3885,14 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
+ return status;
+
+ if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
+- return IXGBE_NOT_IMPLEMENTED;
++ return -EOPNOTSUPP;
+
+ status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
+ if (status)
+ return status;
+
+ if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
+- return IXGBE_NOT_IMPLEMENTED;
++ return -EOPNOTSUPP;
+
+ return 0;
+ }
+@@ -3927,7 +3915,7 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+
+ /* Only support thermal sensors attached to physical port 0 */
+ if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+- return IXGBE_NOT_IMPLEMENTED;
++ return -EOPNOTSUPP;
+
+ status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
+ if (status)
+@@ -3946,10 +3934,10 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+ if (status)
+ return status;
+
+- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+- IXGBE_ETS_DATA_INDEX_SHIFT);
+- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+- IXGBE_ETS_DATA_LOC_SHIFT);
++ sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK,
++ ets_sensor);
++ sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK,
++ ets_sensor);
+
+ if (sensor_location != 0) {
+ status = hw->phy.ops.read_i2c_byte(hw,
+@@ -3987,14 +3975,13 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+
+ /* Only support thermal sensors attached to physical port 0 */
+ if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+- return IXGBE_NOT_IMPLEMENTED;
++ return -EOPNOTSUPP;
+
+ status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
+ if (status)
+ return status;
+
+- low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
+- IXGBE_ETS_LTHRES_DELTA_SHIFT);
++ low_thresh_delta = FIELD_GET(IXGBE_ETS_LTHRES_DELTA_MASK, ets_cfg);
+ num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+ if (num_sensors > IXGBE_MAX_SENSORS)
+ num_sensors = IXGBE_MAX_SENSORS;
+@@ -4008,10 +3995,10 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+ ets_offset + 1 + i);
+ continue;
+ }
+- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+- IXGBE_ETS_DATA_INDEX_SHIFT);
+- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+- IXGBE_ETS_DATA_LOC_SHIFT);
++ sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK,
++ ets_sensor);
++ sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK,
++ ets_sensor);
+ therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+index 0bbad4a5cc2f5e..9f2820a08b72e9 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+@@ -3370,7 +3370,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
+ {
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+- s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
++ s32 status = -EFAULT;
+ u8 databyte = 0xFF;
+ int i = 0;
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+index 13a6fca31004a8..866024f2b9eeb3 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+@@ -914,7 +914,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+ goto err_out;
+ }
+
+- xs = kzalloc(sizeof(*xs), GFP_KERNEL);
++ algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
++ if (unlikely(!algo)) {
++ err = -ENOENT;
++ goto err_out;
++ }
++
++ xs = kzalloc(sizeof(*xs), GFP_ATOMIC);
+ if (unlikely(!xs)) {
+ err = -ENOMEM;
+ goto err_out;
+@@ -930,14 +936,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+ memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
+ xs->xso.dev = adapter->netdev;
+
+- algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
+- if (unlikely(!algo)) {
+- err = -ENOENT;
+- goto err_xs;
+- }
+-
+ aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
+- xs->aead = kzalloc(aead_len, GFP_KERNEL);
++ xs->aead = kzalloc(aead_len, GFP_ATOMIC);
+ if (unlikely(!xs->aead)) {
+ err = -ENOMEM;
+ goto err_xs;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index dd03b017dfc518..f245f3df40fcac 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -2756,7 +2756,6 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
+ {
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr = adapter->interrupt_event;
+- s32 rc;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+@@ -2790,14 +2789,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
+ }
+
+ /* Check if this is not due to overtemp */
+- if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
++ if (!hw->phy.ops.check_overtemp(hw))
+ return;
+
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+- rc = hw->phy.ops.check_overtemp(hw);
+- if (rc != IXGBE_ERR_OVERTEMP)
++ if (!hw->phy.ops.check_overtemp(hw))
+ return;
+ break;
+ default:
+@@ -2941,8 +2939,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
+ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+ {
+- u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
++ u32 mask;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+@@ -5512,7 +5510,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
+ {
+ u32 speed;
+ bool autoneg, link_up = false;
+- int ret = IXGBE_ERR_LINK_SETUP;
++ int ret = -EIO;
+
+ if (hw->mac.ops.check_link)
+ ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
+@@ -5983,13 +5981,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
+ err = hw->mac.ops.init_hw(hw);
+ switch (err) {
+ case 0:
+- case IXGBE_ERR_SFP_NOT_PRESENT:
+- case IXGBE_ERR_SFP_NOT_SUPPORTED:
++ case -ENOENT:
++ case -EOPNOTSUPP:
+ break;
+- case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
++ case -EALREADY:
+ e_dev_err("primary disable timed out\n");
+ break;
+- case IXGBE_ERR_EEPROM_VERSION:
++ case -EACCES:
+ /* We are running on a pre-production device, log a warning */
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issues associated with "
+@@ -7829,10 +7827,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
+ adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
+
+ err = hw->phy.ops.identify_sfp(hw);
+- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
++ if (err == -EOPNOTSUPP)
+ goto sfp_out;
+
+- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
++ if (err == -ENOENT) {
+ /* If no cable is present, then we need to reset
+ * the next time we find a good cable. */
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+@@ -7858,7 +7856,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
+ else
+ err = hw->mac.ops.setup_sfp(hw);
+
+- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
++ if (err == -EOPNOTSUPP)
+ goto sfp_out;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+@@ -7867,8 +7865,8 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
+ sfp_out:
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+
+- if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
+- (adapter->netdev->reg_state == NETREG_REGISTERED)) {
++ if (err == -EOPNOTSUPP &&
++ adapter->netdev->reg_state == NETREG_REGISTERED) {
+ e_dev_err("failed to initialize because an unsupported "
+ "SFP+ module type was detected.\n");
+ e_dev_err("Reload the driver after installing a "
+@@ -7938,7 +7936,7 @@ static void ixgbe_service_timer(struct timer_list *t)
+ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
+ {
+ struct ixgbe_hw *hw = &adapter->hw;
+- u32 status;
++ bool overtemp;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
+ return;
+@@ -7948,11 +7946,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
+ if (!hw->phy.ops.handle_lasi)
+ return;
+
+- status = hw->phy.ops.handle_lasi(&adapter->hw);
+- if (status != IXGBE_ERR_OVERTEMP)
+- return;
+-
+- e_crit(drv, "%s\n", ixgbe_overheat_msg);
++ hw->phy.ops.handle_lasi(&adapter->hw, &overtemp);
++ if (overtemp)
++ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ }
+
+ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
+@@ -10528,6 +10524,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+ }
+
++/**
++ * ixgbe_irq_disable_single - Disable single IRQ vector
++ * @adapter: adapter structure
++ * @ring: ring index
++ **/
++static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
++{
++ struct ixgbe_hw *hw = &adapter->hw;
++ u64 qmask = BIT_ULL(ring);
++ u32 mask;
++
++ switch (adapter->hw.mac.type) {
++ case ixgbe_mac_82598EB:
++ mask = qmask & IXGBE_EIMC_RTX_QUEUE;
++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
++ break;
++ case ixgbe_mac_82599EB:
++ case ixgbe_mac_X540:
++ case ixgbe_mac_X550:
++ case ixgbe_mac_X550EM_x:
++ case ixgbe_mac_x550em_a:
++ mask = (qmask & 0xFFFFFFFF);
++ if (mask)
++ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
++ mask = (qmask >> 32);
++ if (mask)
++ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
++ break;
++ default:
++ break;
++ }
++ IXGBE_WRITE_FLUSH(&adapter->hw);
++ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
++ synchronize_irq(adapter->msix_entries[ring].vector);
++ else
++ synchronize_irq(adapter->pdev->irq);
++}
++
+ /**
+ * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+@@ -10544,6 +10578,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+ tx_ring = adapter->tx_ring[ring];
+ xdp_ring = adapter->xdp_ring[ring];
+
++ ixgbe_irq_disable_single(adapter, ring);
++
++ /* Rx/Tx/XDP Tx share the same napi context. */
++ napi_disable(&rx_ring->q_vector->napi);
++
+ ixgbe_disable_txr(adapter, tx_ring);
+ if (xdp_ring)
+ ixgbe_disable_txr(adapter, xdp_ring);
+@@ -10552,9 +10591,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+ if (xdp_ring)
+ synchronize_rcu();
+
+- /* Rx/Tx/XDP Tx share the same napi context. */
+- napi_disable(&rx_ring->q_vector->napi);
+-
+ ixgbe_clean_tx_ring(tx_ring);
+ if (xdp_ring)
+ ixgbe_clean_tx_ring(xdp_ring);
+@@ -10582,9 +10618,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+ tx_ring = adapter->tx_ring[ring];
+ xdp_ring = adapter->xdp_ring[ring];
+
+- /* Rx/Tx/XDP Tx share the same napi context. */
+- napi_enable(&rx_ring->q_vector->napi);
+-
+ ixgbe_configure_tx_ring(adapter, tx_ring);
+ if (xdp_ring)
+ ixgbe_configure_tx_ring(adapter, xdp_ring);
+@@ -10593,6 +10626,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+ clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+ if (xdp_ring)
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
++
++ /* Rx/Tx/XDP Tx share the same napi context. */
++ napi_enable(&rx_ring->q_vector->napi);
++ ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
++ IXGBE_WRITE_FLUSH(&adapter->hw);
+ }
+
+ /**
+@@ -10922,9 +10960,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ err = hw->mac.ops.reset_hw(hw);
+ hw->phy.reset_if_overtemp = false;
+ ixgbe_set_eee_capable(adapter);
+- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
++ if (err == -ENOENT) {
+ err = 0;
+- } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
++ } else if (err == -EOPNOTSUPP) {
+ e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported module.\n");
+ goto err_sw_init;
+@@ -11143,7 +11181,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ /* reset the hardware with the new settings */
+ err = hw->mac.ops.start_hw(hw);
+- if (err == IXGBE_ERR_EEPROM_VERSION) {
++ if (err == -EACCES) {
+ /* We are running on a pre-production device, log a warning */
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issues associated "
+@@ -11371,7 +11409,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
+ if ((pf_func & 1) == (pdev->devfn & 1)) {
+ unsigned int device_id;
+
+- vf = (req_id & 0x7F) >> 1;
++ vf = FIELD_GET(0x7F, req_id);
+ e_dev_err("VF %d has caused a PCIe error\n", vf);
+ e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
+ "%8.8x\tdw3: %8.8x\n",
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+index 5679293e53f7af..fe7ef5773369a4 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+@@ -24,7 +24,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ size = mbx->size;
+
+ if (!mbx->ops)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ return mbx->ops->read(hw, msg, size, mbx_id);
+ }
+@@ -43,10 +43,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (size > mbx->size)
+- return IXGBE_ERR_MBX;
++ return -EINVAL;
+
+ if (!mbx->ops)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ return mbx->ops->write(hw, msg, size, mbx_id);
+ }
+@@ -63,7 +63,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (!mbx->ops)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ return mbx->ops->check_for_msg(hw, mbx_id);
+ }
+@@ -80,7 +80,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (!mbx->ops)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ return mbx->ops->check_for_ack(hw, mbx_id);
+ }
+@@ -97,7 +97,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (!mbx->ops)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ return mbx->ops->check_for_rst(hw, mbx_id);
+ }
+@@ -115,12 +115,12 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ while (mbx->ops->check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+ udelay(mbx->usec_delay);
+ }
+
+@@ -140,12 +140,12 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ while (mbx->ops->check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+ udelay(mbx->usec_delay);
+ }
+
+@@ -169,7 +169,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ s32 ret_val;
+
+ if (!mbx->ops)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+ if (ret_val)
+@@ -197,7 +197,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops || !mbx->timeout)
+- return IXGBE_ERR_MBX;
++ return -EIO;
+
+ /* send msg */
+ ret_val = mbx->ops->write(hw, msg, size, mbx_id);
+@@ -217,7 +217,7 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+ return 0;
+ }
+
+- return IXGBE_ERR_MBX;
++ return -EIO;
+ }
+
+ /**
+@@ -238,7 +238,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+ return 0;
+ }
+
+- return IXGBE_ERR_MBX;
++ return -EIO;
+ }
+
+ /**
+@@ -259,7 +259,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+ return 0;
+ }
+
+- return IXGBE_ERR_MBX;
++ return -EIO;
+ }
+
+ /**
+@@ -295,7 +295,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+ return 0;
+ }
+
+- return IXGBE_ERR_MBX;
++ return -EIO;
+ }
+
+ /**
+@@ -317,7 +317,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ return 0;
+
+- return IXGBE_ERR_MBX;
++ return -EIO;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+index 8f4316b19278ce..6434c190e7a4cf 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+@@ -7,7 +7,6 @@
+ #include "ixgbe_type.h"
+
+ #define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+-#define IXGBE_ERR_MBX -100
+
+ #define IXGBE_VFMAILBOX 0x002FC
+ #define IXGBE_VFMBMEM 0x00200
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+index 689470c1e8ad57..f28140a05f091c 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+@@ -102,7 +102,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ csum = ~csum;
+ do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+@@ -150,7 +150,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ hw_dbg(hw, "I2C byte read combined error.\n");
+ } while (retry < max_retry);
+
+- return IXGBE_ERR_I2C;
++ return -EIO;
+ }
+
+ /**
+@@ -179,7 +179,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ csum = ~csum;
+ do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+@@ -215,7 +215,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ hw_dbg(hw, "I2C byte write combined error.\n");
+ } while (retry < max_retry);
+
+- return IXGBE_ERR_I2C;
++ return -EIO;
+ }
+
+ /**
+@@ -262,8 +262,8 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
+ **/
+ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+ {
++ u32 status = -EFAULT;
+ u32 phy_addr;
+- u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+
+ if (!hw->phy.phy_semaphore_mask) {
+ if (hw->bus.lan_id)
+@@ -276,13 +276,12 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+ return 0;
+
+ if (hw->phy.nw_mng_if_sel) {
+- phy_addr = (hw->phy.nw_mng_if_sel &
+- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
++ phy_addr = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD,
++ hw->phy.nw_mng_if_sel);
+ if (ixgbe_probe_phy(hw, phy_addr))
+ return 0;
+ else
+- return IXGBE_ERR_PHY_ADDR_INVALID;
++ return -EFAULT;
+ }
+
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+@@ -408,8 +407,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+ return status;
+
+ /* Don't reset PHY if it's shut down due to overtemp. */
+- if (!hw->phy.reset_if_overtemp &&
+- (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
++ if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw))
+ return 0;
+
+ /* Blocked by MNG FW so bail */
+@@ -457,7 +455,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+
+ if (ctrl & MDIO_CTRL1_RESET) {
+ hw_dbg(hw, "PHY reset polling failed to complete.\n");
+- return IXGBE_ERR_RESET_FAILED;
++ return -EIO;
+ }
+
+ return 0;
+@@ -500,7 +498,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address command did not complete.\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ /* Address cycle complete, setup and write the read
+@@ -527,7 +525,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY read command didn't complete\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ /* Read operation is complete. Get the data
+@@ -559,7 +557,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ }
+
+ return status;
+@@ -604,7 +602,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address cmd didn't complete\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ /*
+@@ -632,7 +630,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY write cmd didn't complete\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ return 0;
+@@ -657,7 +655,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ }
+
+ return status;
+@@ -1430,7 +1428,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+
+ if ((phy_data & MDIO_CTRL1_RESET) != 0) {
+ hw_dbg(hw, "PHY reset did not complete.\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ /* Get init offsets */
+@@ -1448,8 +1446,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ if (ret_val)
+ goto err_eeprom;
+- control = (eword & IXGBE_CONTROL_MASK_NL) >>
+- IXGBE_CONTROL_SHIFT_NL;
++ control = FIELD_GET(IXGBE_CONTROL_MASK_NL, eword);
+ edata = eword & IXGBE_DATA_MASK_NL;
+ switch (control) {
+ case IXGBE_DELAY_NL:
+@@ -1487,12 +1484,12 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+ hw_dbg(hw, "SOL\n");
+ } else {
+ hw_dbg(hw, "Bad control value\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+ break;
+ default:
+ hw_dbg(hw, "Bad control type\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+ }
+
+@@ -1500,7 +1497,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+
+ err_eeprom:
+ hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ /**
+@@ -1518,10 +1515,10 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+ return ixgbe_identify_qsfp_module_generic(hw);
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+- return IXGBE_ERR_SFP_NOT_PRESENT;
++ return -ENOENT;
+ }
+
+- return IXGBE_ERR_SFP_NOT_PRESENT;
++ return -ENOENT;
+ }
+
+ /**
+@@ -1546,7 +1543,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+- return IXGBE_ERR_SFP_NOT_PRESENT;
++ return -ENOENT;
+ }
+
+ /* LAN ID is needed for sfp_type determination */
+@@ -1561,7 +1558,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ }
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES,
+@@ -1752,7 +1749,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ }
+
+ /* Anything else 82598-based is supported */
+@@ -1776,7 +1773,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+ }
+ hw_dbg(hw, "SFP+ module not supported\n");
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ }
+ return 0;
+
+@@ -1786,7 +1783,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
+- return IXGBE_ERR_SFP_NOT_PRESENT;
++ return -ENOENT;
+ }
+
+ /**
+@@ -1813,7 +1810,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+- return IXGBE_ERR_SFP_NOT_PRESENT;
++ return -ENOENT;
+ }
+
+ /* LAN ID is needed for sfp_type determination */
+@@ -1827,7 +1824,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ }
+
+ hw->phy.id = identifier;
+@@ -1895,7 +1892,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+ } else {
+ /* unsupported module type */
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ }
+ }
+
+@@ -1955,7 +1952,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+ }
+ hw_dbg(hw, "QSFP module not supported\n");
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ }
+ return 0;
+ }
+@@ -1966,7 +1963,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+
+- return IXGBE_ERR_SFP_NOT_PRESENT;
++ return -ENOENT;
+ }
+
+ /**
+@@ -1986,14 +1983,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 sfp_type = hw->phy.sfp_type;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+- return IXGBE_ERR_SFP_NOT_PRESENT;
++ return -ENOENT;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+ (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+@@ -2014,11 +2011,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+ hw_err(hw, "eeprom read at %d failed\n",
+ IXGBE_PHY_INIT_OFFSET_NL);
+- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
++ return -EIO;
+ }
+
+ if ((!*list_offset) || (*list_offset == 0xFFFF))
+- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
++ return -EIO;
+
+ /* Shift offset to first ID word */
+ (*list_offset)++;
+@@ -2037,7 +2034,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ goto err_phy;
+ if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+ hw_dbg(hw, "SFP+ module not supported\n");
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ } else {
+ break;
+ }
+@@ -2050,14 +2047,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+
+ if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+ hw_dbg(hw, "No matching SFP+ module found\n");
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ }
+
+ return 0;
+
+ err_phy:
+ hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ /**
+@@ -2152,7 +2149,7 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+
+ do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ ixgbe_i2c_start(hw);
+
+@@ -2268,7 +2265,7 @@ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ do {
+ ixgbe_i2c_start(hw);
+@@ -2510,7 +2507,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+
+ if (ack == 1) {
+ hw_dbg(hw, "I2C ack was not received.\n");
+- status = IXGBE_ERR_I2C;
++ status = -EIO;
+ }
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+@@ -2582,7 +2579,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+ udelay(IXGBE_I2C_T_LOW);
+ } else {
+ hw_dbg(hw, "I2C data was not set to %X\n", data);
+- return IXGBE_ERR_I2C;
++ return -EIO;
+ }
+
+ return 0;
+@@ -2678,7 +2675,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
+ hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
+- return IXGBE_ERR_I2C;
++ return -EIO;
+ }
+
+ return 0;
+@@ -2748,22 +2745,24 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
++ *
++ * Return true when an overtemp event detected, otherwise false.
+ **/
+-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
++bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+ {
+ u16 phy_data = 0;
++ u32 status;
+
+ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+- return 0;
++ return false;
+
+ /* Check that the LASI temp alarm status was triggered */
+- hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+- MDIO_MMD_PMAPMD, &phy_data);
+-
+- if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+- return 0;
++ status = hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
++ MDIO_MMD_PMAPMD, &phy_data);
++ if (status)
++ return false;
+
+- return IXGBE_ERR_OVERTEMP;
++ return !!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM);
+ }
+
+ /** ixgbe_set_copper_phy_power - Control power for copper phy
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+index 6544c4539c0de3..ef72729d7c9339 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+@@ -155,7 +155,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset);
+-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
++bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index ea88ac04ab9ade..d0a6c220a12ac1 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -363,8 +363,7 @@ int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+ {
+- int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+- >> IXGBE_VT_MSGINFO_SHIFT;
++ int entries = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
+ u16 *hash_list = (u16 *)&msgbuf[1];
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ struct ixgbe_hw *hw = &adapter->hw;
+@@ -971,7 +970,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
+ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+ {
+- u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
++ u32 add = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
+ u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ u8 tcs = adapter->hw_tcs;
+
+@@ -994,8 +993,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+ {
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+- int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+- IXGBE_VT_MSGINFO_SHIFT;
++ int index = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
+ int err;
+
+ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
+@@ -1329,7 +1327,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+ break;
+ default:
+ e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
+- retval = IXGBE_ERR_MBX;
++ retval = -EIO;
+ break;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+index 2b00db92b08f51..c24a72d1e2737a 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+@@ -3509,10 +3509,10 @@ struct ixgbe_phy_operations {
+ s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+- s32 (*check_overtemp)(struct ixgbe_hw *);
++ bool (*check_overtemp)(struct ixgbe_hw *);
+ s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*enter_lplu)(struct ixgbe_hw *);
+- s32 (*handle_lasi)(struct ixgbe_hw *hw);
++ s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *);
+ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ u8 *value);
+ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+@@ -3665,45 +3665,6 @@ struct ixgbe_info {
+ const u32 *mvals;
+ };
+
+-
+-/* Error Codes */
+-#define IXGBE_ERR_EEPROM -1
+-#define IXGBE_ERR_EEPROM_CHECKSUM -2
+-#define IXGBE_ERR_PHY -3
+-#define IXGBE_ERR_CONFIG -4
+-#define IXGBE_ERR_PARAM -5
+-#define IXGBE_ERR_MAC_TYPE -6
+-#define IXGBE_ERR_UNKNOWN_PHY -7
+-#define IXGBE_ERR_LINK_SETUP -8
+-#define IXGBE_ERR_ADAPTER_STOPPED -9
+-#define IXGBE_ERR_INVALID_MAC_ADDR -10
+-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
+-#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
+-#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
+-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
+-#define IXGBE_ERR_RESET_FAILED -15
+-#define IXGBE_ERR_SWFW_SYNC -16
+-#define IXGBE_ERR_PHY_ADDR_INVALID -17
+-#define IXGBE_ERR_I2C -18
+-#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
+-#define IXGBE_ERR_SFP_NOT_PRESENT -20
+-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+-#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
+-#define IXGBE_ERR_FDIR_REINIT_FAILED -23
+-#define IXGBE_ERR_EEPROM_VERSION -24
+-#define IXGBE_ERR_NO_SPACE -25
+-#define IXGBE_ERR_OVERTEMP -26
+-#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+-#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+-#define IXGBE_ERR_PBA_SECTION -31
+-#define IXGBE_ERR_INVALID_ARGUMENT -32
+-#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
+-#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
+-#define IXGBE_ERR_FW_RESP_INVALID -39
+-#define IXGBE_ERR_TOKEN_RETRY -40
+-#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+-
+ #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
+ #define IXGBE_FUSES0_300MHZ BIT(5)
+ #define IXGBE_FUSES0_REV_MASK (3u << 6)
+@@ -3712,9 +3673,7 @@ struct ixgbe_info {
+ #define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
+ #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+ #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+-#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
+ #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+-#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
+ #define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
+ #define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
+ #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+@@ -3724,7 +3683,6 @@ struct ixgbe_info {
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
+ #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
+ #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+-#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
+
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+index d5cfb51ff648d3..57a912e4653fc3 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+@@ -84,7 +84,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_dbg(hw, "semaphore failed with %d", status);
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ }
+
+ ctrl = IXGBE_CTRL_RST;
+@@ -103,7 +103,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+- status = IXGBE_ERR_RESET_FAILED;
++ status = -EIO;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+ msleep(100);
+@@ -187,16 +187,16 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+ {
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+- u32 eec;
+- u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
++ u16 eeprom_size;
++ u32 eec;
++
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
+- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+- IXGBE_EEC_SIZE_SHIFT);
++ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+@@ -220,7 +220,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+
+@@ -243,7 +243,7 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
+
+@@ -264,7 +264,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ status = ixgbe_write_eewr_generic(hw, offset, data);
+
+@@ -287,7 +287,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data);
+
+@@ -324,7 +324,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+ for (i = 0; i < checksum_last_word; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+ checksum += word;
+ }
+@@ -349,7 +349,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+
+ if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ /* Skip pointer section if length is invalid. */
+@@ -360,7 +360,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word)) {
+ hw_dbg(hw, "EEPROM read failed\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+ checksum += word;
+ }
+@@ -397,7 +397,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+@@ -418,7 +418,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ */
+ if (read_checksum != checksum) {
+ hw_dbg(hw, "Invalid EEPROM checksum");
+- status = IXGBE_ERR_EEPROM_CHECKSUM;
++ status = -EIO;
+ }
+
+ /* If the user cares, return the calculated checksum */
+@@ -455,7 +455,7 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+@@ -490,7 +490,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+ s32 status;
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+- if (status == IXGBE_ERR_EEPROM) {
++ if (status == -EIO) {
+ hw_dbg(hw, "Flash update time out\n");
+ return status;
+ }
+@@ -540,7 +540,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+ return 0;
+ udelay(5);
+ }
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ /**
+@@ -575,7 +575,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+@@ -599,7 +599,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+ * bits in the SW_FW_SYNC register.
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
+ if (swfw_sync & (fwmask | hwmask)) {
+ swfw_sync |= swmask;
+@@ -622,11 +622,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_release_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_semaphore(hw);
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ }
+ ixgbe_release_swfw_sync_semaphore(hw);
+
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ }
+
+ /**
+@@ -680,7 +680,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+ if (i == timeout) {
+ hw_dbg(hw,
+ "Software semaphore SMBI between device drivers not granted.\n");
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+@@ -697,7 +697,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+ */
+ hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
+ ixgbe_release_swfw_sync_semaphore(hw);
+- return IXGBE_ERR_EEPROM;
++ return -EIO;
+ }
+
+ /**
+@@ -768,7 +768,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+ bool link_up;
+
+ if (index > 3)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /* Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+@@ -804,7 +804,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+ u32 ledctl_reg;
+
+ if (index > 3)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /* Restore the LED to its default value. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+index aa4bf6c9a2f7cd..f806fbf25ec7c7 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+@@ -206,13 +206,13 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+ }
+ if (retry == IXGBE_CS4227_RETRIES) {
+ hw_err(hw, "CS4227 reset did not complete\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
+ if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
+ hw_err(hw, "CS4227 EEPROM did not load successfully\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ return 0;
+@@ -350,13 +350,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+ static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+ {
+- return IXGBE_NOT_IMPLEMENTED;
++ return -EOPNOTSUPP;
+ }
+
+ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+ {
+- return IXGBE_NOT_IMPLEMENTED;
++ return -EOPNOTSUPP;
+ }
+
+ /**
+@@ -463,7 +463,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ --retries;
+ } while (retries > 0);
+
+- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++ return -EIO;
+ }
+
+ static const struct {
+@@ -511,7 +511,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+- return IXGBE_ERR_PHY_ADDR_INVALID;
++ return -EFAULT;
+
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+@@ -568,7 +568,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+ }
+
+ switch (hw->fc.requested_mode) {
+@@ -600,8 +600,10 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ if (rc)
+ return rc;
++
+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+- return IXGBE_ERR_OVERTEMP;
++ return -EIO;
++
+ return 0;
+ }
+
+@@ -628,16 +630,16 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+ {
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+- u32 eec;
+- u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
++ u16 eeprom_size;
++ u32 eec;
++
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
+- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+- IXGBE_EEC_SIZE_SHIFT);
++ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+@@ -675,7 +677,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+ *ctrl = command;
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ hw_dbg(hw, "IOSF wait timed out\n");
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ return 0;
+@@ -712,10 +714,10 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ ret = ixgbe_iosf_wait(hw, &command);
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
++ error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
+ hw_dbg(hw, "Failed to read, error %x\n", error);
+- return IXGBE_ERR_PHY;
++ ret = -EIO;
++ goto out;
+ }
+
+ if (!ret)
+@@ -750,9 +752,9 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+- return IXGBE_ERR_FW_RESP_INVALID;
++ return -EIO;
+
+- return IXGBE_ERR_TOKEN_RETRY;
++ return -EAGAIN;
+ }
+
+ /**
+@@ -778,7 +780,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+- return IXGBE_ERR_FW_RESP_INVALID;
++ return -EIO;
+ }
+
+ /**
+@@ -942,7 +944,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ local_buffer = buf;
+ } else {
+ if (buffer_size < ptr)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+ local_buffer = &buffer[ptr];
+ }
+
+@@ -960,7 +962,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ }
+
+ if (buffer && ((u32)start + (u32)length > buffer_size))
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ for (i = start; length; i++, length--) {
+ if (i == bufsz && !buffer) {
+@@ -1012,7 +1014,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+ local_buffer = eeprom_ptrs;
+ } else {
+ if (buffer_size < IXGBE_EEPROM_LAST_WORD)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+ local_buffer = buffer;
+ }
+
+@@ -1148,7 +1150,7 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+- status = IXGBE_ERR_EEPROM_CHECKSUM;
++ status = -EIO;
+ hw_dbg(hw, "Invalid EEPROM checksum");
+ }
+
+@@ -1203,7 +1205,7 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ hw_dbg(hw, "write ee hostif failed to get semaphore");
+- status = IXGBE_ERR_SWFW_SYNC;
++ status = -EBUSY;
+ }
+
+ return status;
+@@ -1412,10 +1414,9 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ ret = ixgbe_iosf_wait(hw, &command);
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
++ error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
+ hw_dbg(hw, "Failed to write, error %x\n", error);
+- return IXGBE_ERR_PHY;
++ return -EIO;
+ }
+
+ out:
+@@ -1558,7 +1559,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+
+ /* iXFI is only supported with X552 */
+ if (mac->type != ixgbe_mac_X550EM_x)
+- return IXGBE_ERR_LINK_SETUP;
++ return -EIO;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+@@ -1580,7 +1581,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+- return IXGBE_ERR_LINK_SETUP;
++ return -EINVAL;
+ }
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+@@ -1611,7 +1612,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+ {
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_not_present:
+- return IXGBE_ERR_SFP_NOT_PRESENT;
++ return -ENOENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ *linear = true;
+@@ -1630,7 +1631,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+ case ixgbe_sfp_type_1g_cu_core0:
+ case ixgbe_sfp_type_1g_cu_core1:
+ default:
+- return IXGBE_ERR_SFP_NOT_SUPPORTED;
++ return -EOPNOTSUPP;
+ }
+
+ return 0;
+@@ -1660,7 +1661,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+ * there is no reason to configure CS4227 and SFP not present error is
+ * not accepted in the setup MAC link flow.
+ */
+- if (status == IXGBE_ERR_SFP_NOT_PRESENT)
++ if (status == -ENOENT)
+ return 0;
+
+ if (status)
+@@ -1718,62 +1719,12 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+ break;
+ default:
+ /* Other link speeds are not supported by internal PHY. */
+- return IXGBE_ERR_LINK_SETUP;
++ return -EINVAL;
+ }
+
+- (void)mac->ops.write_iosf_sb_reg(hw,
+- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+- /* change mode enforcement rules to hybrid */
+- (void)mac->ops.read_iosf_sb_reg(hw,
+- IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+- reg_val |= 0x0400;
+-
+- (void)mac->ops.write_iosf_sb_reg(hw,
+- IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+- /* manually control the config */
+- (void)mac->ops.read_iosf_sb_reg(hw,
+- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+- reg_val |= 0x20002240;
+-
+- (void)mac->ops.write_iosf_sb_reg(hw,
+- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+- /* move the AN base page values */
+- (void)mac->ops.read_iosf_sb_reg(hw,
+- IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+- reg_val |= 0x1;
+-
+- (void)mac->ops.write_iosf_sb_reg(hw,
+- IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+- /* set the AN37 over CB mode */
+- (void)mac->ops.read_iosf_sb_reg(hw,
+- IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+- reg_val |= 0x20000000;
+-
+- (void)mac->ops.write_iosf_sb_reg(hw,
+- IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+- /* restart AN manually */
+- (void)mac->ops.read_iosf_sb_reg(hw,
+- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+-
+- (void)mac->ops.write_iosf_sb_reg(hw,
+- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
++ status = mac->ops.write_iosf_sb_reg(hw,
++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
+@@ -1803,7 +1754,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
++ if (ret_val == -ENOENT)
+ return 0;
+
+ if (ret_val)
+@@ -1853,7 +1804,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
++ if (ret_val == -ENOENT)
+ return 0;
+
+ if (ret_val)
+@@ -1863,7 +1814,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
+- return IXGBE_ERR_PHY_ADDR_INVALID;
++ return -EFAULT;
+
+ /* Get external PHY SKU id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
+@@ -1962,7 +1913,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+ u16 i, autoneg_status;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+
+ status = ixgbe_check_mac_link_generic(hw, speed, link_up,
+ link_up_wait_to_complete);
+@@ -2145,9 +2096,9 @@ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ */
+ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+ {
+- s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ ixgbe_link_speed speed;
++ s32 status = -EIO;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+@@ -2165,7 +2116,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+ /* Check if auto-negotiation has completed */
+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
+ if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
+- status = IXGBE_ERR_FC_NOT_NEGOTIATED;
++ status = -EIO;
+ goto out;
+ }
+
+@@ -2369,18 +2320,18 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ * @hw: pointer to hardware structure
+ * @lsc: pointer to boolean flag which indicates whether external Base T
+ * PHY interrupt is lsc
++ * @is_overtemp: indicate whether an overtemp event encountered
+ *
+ * Determime if external Base T PHY interrupt cause is high temperature
+ * failure alarm or link status change.
+- *
+- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+- * failure alarm, else return PHY access status.
+ **/
+-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
++static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
++ bool *is_overtemp)
+ {
+ u32 status;
+ u16 reg;
+
++ *is_overtemp = false;
+ *lsc = false;
+
+ /* Vendor alarm triggered */
+@@ -2412,7 +2363,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
+ /* power down the PHY in case the PHY FW didn't already */
+ ixgbe_set_copper_phy_power(hw, false);
+- return IXGBE_ERR_OVERTEMP;
++ *is_overtemp = true;
++ return -EIO;
+ }
+ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
+ /* device fault alarm triggered */
+@@ -2426,7 +2378,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+ if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
+ /* power down the PHY in case the PHY FW didn't */
+ ixgbe_set_copper_phy_power(hw, false);
+- return IXGBE_ERR_OVERTEMP;
++ *is_overtemp = true;
++ return -EIO;
+ }
+ }
+
+@@ -2462,12 +2415,12 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+ **/
+ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+ {
++ bool lsc, overtemp;
+ u32 status;
+ u16 reg;
+- bool lsc;
+
+ /* Clear interrupt flags */
+- status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
++ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, &overtemp);
+
+ /* Enable link status change alarm */
+
+@@ -2546,21 +2499,20 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+ /**
+ * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
++ * @is_overtemp: indicate whether an overtemp event encountered
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+- *
+- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+- * failure alarm, else return PHY access status.
+ **/
+-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
++static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
++ bool *is_overtemp)
+ {
+ struct ixgbe_phy_info *phy = &hw->phy;
+ bool lsc;
+ u32 status;
+
+- status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
++ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, is_overtemp);
+ if (status)
+ return status;
+
+@@ -2692,7 +2644,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+ u16 speed;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+
+ if (!(hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) {
+@@ -2735,7 +2687,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+ break;
+ default:
+ /* Internal PHY does not support anything else */
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+ }
+
+ return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+@@ -2767,7 +2719,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+ u16 phy_data;
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /* To turn on the LED, set mode to ON. */
+ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+@@ -2789,7 +2741,7 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+ u16 phy_data;
+
+ if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
+- return IXGBE_ERR_PARAM;
++ return -EINVAL;
+
+ /* To turn on the LED, set mode to ON. */
+ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
+@@ -2813,8 +2765,9 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
++ * else returns -EBUSY when encountering an error acquiring
++ * semaphore, -EIO when command fails or -ENIVAL when incorrect
++ * params passed.
+ **/
+ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+@@ -2825,7 +2778,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ int i;
+
+ if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
+- return IXGBE_ERR_INVALID_ARGUMENT;
++ return -EINVAL;
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+@@ -2850,7 +2803,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status !=
+ FW_CEM_RESP_STATUS_SUCCESS)
+- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
++ return -EIO;
+ return 0;
+ }
+
+@@ -2907,7 +2860,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+ }
+
+ /* 10gig parts do not have a word in the EEPROM to determine the
+@@ -2942,7 +2895,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+ break;
+ default:
+ hw_err(hw, "Flow control param set incorrectly\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ switch (hw->device_id) {
+@@ -2986,8 +2939,8 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
+ {
+ u32 link_s1, lp_an_page_low, an_cntl_1;
+- s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
++ s32 status = -EIO;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+@@ -3013,7 +2966,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
+
+ if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
+ hw_dbg(hw, "Auto-Negotiation did not complete\n");
+- status = IXGBE_ERR_FC_NOT_NEGOTIATED;
++ status = -EIO;
+ goto out;
+ }
+
+@@ -3187,21 +3140,23 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+ /**
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
++ *
++ * Return true when an overtemp event detected, otherwise false.
+ */
+-static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
++static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+ {
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
+ if (rc)
+- return rc;
++ return false;
+
+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ ixgbe_shutdown_fw_phy(hw);
+- return IXGBE_ERR_OVERTEMP;
++ return true;
+ }
+- return 0;
++ return false;
+ }
+
+ /**
+@@ -3222,9 +3177,8 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+ */
+ if (hw->mac.type == ixgbe_mac_x550em_a &&
+ hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
+- hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
+- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
++ hw->phy.mdio.prtad = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD,
++ hw->phy.nw_mng_if_sel);
+ }
+ }
+
+@@ -3251,8 +3205,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+- ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
++ if (ret_val == -EOPNOTSUPP || ret_val == -EFAULT)
+ return ret_val;
+
+ /* Setup function pointers based on detected hardware */
+@@ -3460,8 +3413,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+
+ /* PHY ops must be identified and initialized prior to reset */
+ status = hw->phy.ops.init(hw);
+- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+- status == IXGBE_ERR_PHY_ADDR_INVALID)
++ if (status == -EOPNOTSUPP || status == -EFAULT)
+ return status;
+
+ /* start the external PHY */
+@@ -3477,7 +3429,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+ hw->phy.sfp_setup_needed = false;
+ }
+
+- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++ if (status == -EOPNOTSUPP)
+ return status;
+
+ /* Reset PHY */
+@@ -3501,7 +3453,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_dbg(hw, "semaphore failed with %d", status);
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+@@ -3519,7 +3471,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+- status = IXGBE_ERR_RESET_FAILED;
++ status = -EIO;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+@@ -3615,7 +3567,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+- return IXGBE_ERR_INVALID_LINK_SETTINGS;
++ return -EINVAL;
+ }
+
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+@@ -3672,7 +3624,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+ break;
+ default:
+ hw_err(hw, "Flow control param set incorrectly\n");
+- return IXGBE_ERR_CONFIG;
++ return -EIO;
+ }
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+@@ -3768,7 +3720,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+ return 0;
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+- if (status != IXGBE_ERR_TOKEN_RETRY)
++ if (status != -EAGAIN)
+ return status;
+ msleep(FW_PHY_TOKEN_DELAY);
+ }
+@@ -3812,7 +3764,7 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+@@ -3838,7 +3790,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+- return IXGBE_ERR_SWFW_SYNC;
++ return -EBUSY;
+
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
+index 1732ec3c3dbdc4..a718207988f2c4 100644
+--- a/drivers/net/ethernet/jme.c
++++ b/drivers/net/ethernet/jme.c
+@@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb)
+ if (skb->protocol != htons(ETH_P_IP))
+ return csum;
+ skb_set_network_header(skb, ETH_HLEN);
+- if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+- (skb->len < (ETH_HLEN +
+- (ip_hdr(skb)->ihl << 2) +
+- sizeof(struct udphdr)))) {
++
++ if (ip_hdr(skb)->protocol != IPPROTO_UDP ||
++ skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) {
+ skb_reset_network_header(skb);
+ return csum;
+ }
+- skb_set_transport_header(skb,
+- ETH_HLEN + (ip_hdr(skb)->ihl << 2));
++ skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb));
+ csum = udp_hdr(skb)->check;
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
+index f5961bdcc48096..c33c31019562f6 100644
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
+ if (ch->dma.irq)
+ free_irq(ch->dma.irq, priv);
+ if (IS_RX(ch->idx)) {
+- int desc;
++ struct ltq_dma_channel *dma = &ch->dma;
+
+- for (desc = 0; desc < LTQ_DESC_NUM; desc++)
++ for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
+ dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ }
+ }
+@@ -482,7 +482,9 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
+ unsigned long flags;
+ u32 byte_offset;
+
+- len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
++ if (skb_put_padto(skb, ETH_ZLEN))
++ return NETDEV_TX_OK;
++ len = skb->len;
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
+ netdev_err(dev, "tx ring full\n");
+diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
+index 674913184ebf5b..2ef613a237d862 100644
+--- a/drivers/net/ethernet/marvell/mvmdio.c
++++ b/drivers/net/ethernet/marvell/mvmdio.c
+@@ -23,6 +23,7 @@
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/kernel.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+@@ -58,11 +59,6 @@
+ * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled)
+ */
+ #define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */
+-#define MVMDIO_SMI_POLL_INTERVAL_MIN 45
+-#define MVMDIO_SMI_POLL_INTERVAL_MAX 55
+-
+-#define MVMDIO_XSMI_POLL_INTERVAL_MIN 150
+-#define MVMDIO_XSMI_POLL_INTERVAL_MAX 160
+
+ struct orion_mdio_dev {
+ void __iomem *regs;
+@@ -84,8 +80,6 @@ enum orion_mdio_bus_type {
+
+ struct orion_mdio_ops {
+ int (*is_done)(struct orion_mdio_dev *);
+- unsigned int poll_interval_min;
+- unsigned int poll_interval_max;
+ };
+
+ /* Wait for the SMI unit to be ready for another operation
+@@ -94,34 +88,23 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops,
+ struct mii_bus *bus)
+ {
+ struct orion_mdio_dev *dev = bus->priv;
+- unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT);
+- unsigned long end = jiffies + timeout;
+- int timedout = 0;
++ unsigned long timeout;
++ int done;
+
+- while (1) {
+- if (ops->is_done(dev))
++ if (dev->err_interrupt <= 0) {
++ if (!read_poll_timeout_atomic(ops->is_done, done, done, 2,
++ MVMDIO_SMI_TIMEOUT, false, dev))
++ return 0;
++ } else {
++ /* wait_event_timeout does not guarantee a delay of at
++ * least one whole jiffie, so timeout must be no less
++ * than two.
++ */
++ timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2);
++
++ if (wait_event_timeout(dev->smi_busy_wait,
++ ops->is_done(dev), timeout))
+ return 0;
+- else if (timedout)
+- break;
+-
+- if (dev->err_interrupt <= 0) {
+- usleep_range(ops->poll_interval_min,
+- ops->poll_interval_max);
+-
+- if (time_is_before_jiffies(end))
+- ++timedout;
+- } else {
+- /* wait_event_timeout does not guarantee a delay of at
+- * least one whole jiffie, so timeout must be no less
+- * than two.
+- */
+- if (timeout < 2)
+- timeout = 2;
+- wait_event_timeout(dev->smi_busy_wait,
+- ops->is_done(dev), timeout);
+-
+- ++timedout;
+- }
+ }
+
+ dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+@@ -135,8 +118,6 @@ static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
+
+ static const struct orion_mdio_ops orion_mdio_smi_ops = {
+ .is_done = orion_mdio_smi_is_done,
+- .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN,
+- .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX,
+ };
+
+ static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
+@@ -194,8 +175,6 @@ static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev)
+
+ static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
+ .is_done = orion_mdio_xsmi_is_done,
+- .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN,
+- .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
+ };
+
+ static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id,
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index d483b8c00ec0e2..165f76d1231c19 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+ {
+ if (sset == ETH_SS_STATS) {
++ struct mvneta_port *pp = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+- data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+- page_pool_ethtool_stats_get_strings(data);
++ if (!pp->bm_priv) {
++ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
++ page_pool_ethtool_stats_get_strings(data);
++ }
+ }
+ }
+
+@@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+ struct page_pool_stats stats = {};
+ int i;
+
+- for (i = 0; i < rxq_number; i++)
+- page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++ for (i = 0; i < rxq_number; i++) {
++ if (pp->rxqs[i].page_pool)
++ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+ }
+@@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ *data++ = pp->ethtool_stats[i];
+
+- mvneta_ethtool_pp_stats(pp, data);
++ if (!pp->bm_priv)
++ mvneta_ethtool_pp_stats(pp, data);
+ }
+
+ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+ {
+- if (sset == ETH_SS_STATS)
+- return ARRAY_SIZE(mvneta_statistics) +
+- page_pool_ethtool_stats_get_count();
++ if (sset == ETH_SS_STATS) {
++ int count = ARRAY_SIZE(mvneta_statistics);
++ struct mvneta_port *pp = netdev_priv(dev);
++
++ if (!pp->bm_priv)
++ count += page_pool_ethtool_stats_get_count();
++
++ return count;
++ }
+
+ return -EOPNOTSUPP;
+ }
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+index e809f91c08fb9d..9e02e4367bec81 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+@@ -1088,7 +1088,7 @@ struct mvpp2 {
+ unsigned int max_port_rxqs;
+
+ /* Workqueue to gather hardware statistics */
+- char queue_name[30];
++ char queue_name[31];
+ struct workqueue_struct *stats_queue;
+
+ /* Debugfs root entry */
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 21c3f9b015c85d..34051c9abd97df 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
+ mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+ }
+
++/* Cleanup pool before actual initialization in the OS */
++static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
++{
++ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
++ u32 val;
++ int i;
++
++ /* Drain the BM from all possible residues left by firmware */
++ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
++ mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
++
++ put_cpu();
++
++ /* Stop the BM pool */
++ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
++ val |= MVPP2_BM_STOP_MASK;
++ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
++}
++
+ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+ {
+ enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
+ int i, err, poolnum = MVPP2_BM_POOLS_NUM;
+ struct mvpp2_port *port;
+
++ if (priv->percpu_pools)
++ poolnum = mvpp2_get_nrxqs(priv) * 2;
++
++ /* Clean up the pool state in case it contains stale state */
++ for (i = 0; i < poolnum; i++)
++ mvpp2_bm_pool_cleanup(priv, i);
++
+ if (priv->percpu_pools) {
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+ }
+ }
+
+- poolnum = mvpp2_get_nrxqs(priv) * 2;
+ for (i = 0; i < poolnum; i++) {
+ /* the pool in use */
+ int pn = i / (poolnum / 2);
+@@ -928,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
+ {
+ struct mvpp2_port *port;
+- int i;
++ int i, j;
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->priv->percpu_pools) {
+- for (i = 0; i < port->nrxqs; i++)
+- mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
++ for (j = 0; j < port->nrxqs; j++)
++ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
+ port->tx_fc & en);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+@@ -3976,7 +4001,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+ }
+ }
+
+- skb = build_skb(data, frag_size);
++ if (frag_size)
++ skb = build_skb(data, frag_size);
++ else
++ skb = slab_build_skb(data);
+ if (!skb) {
+ netdev_warn(port->dev, "skb build failed\n");
+ goto err_drop_frame;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 5b46ca47c8e597..2ee1374db4c06e 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -1076,7 +1076,8 @@ static bool get_fw_ready_status(struct pci_dev *pdev)
+
+ pci_read_config_byte(pdev, (pos + 8), &status);
+ dev_info(&pdev->dev, "Firmware ready status = %u\n", status);
+- return status;
++#define FW_STATUS_READY 1ULL
++ return status == FW_STATUS_READY;
+ }
+ return false;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index e06f77ad6106ba..2539c985f695a7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -808,6 +808,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
++ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
++ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
++ cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
++ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
++
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+@@ -1340,7 +1345,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+
+ /* Release thread waiting for completion */
+ lmac->cmd_pend = false;
+- wake_up_interruptible(&lmac->wq_cmd_cmplt);
++ wake_up(&lmac->wq_cmd_cmplt);
+ break;
+ case CGX_EVT_ASYNC:
+ if (cgx_event_is_linkevent(event))
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+index 9690ac01f02c8d..7d741e3ba8c514 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+@@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+ }
+ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+
+-void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
++static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
+ {
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
++ u64 intr_val;
+
+ tx_hdr = hw_mbase + mbox->tx_start;
+ rx_hdr = hw_mbase + mbox->rx_start;
+@@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+
+ spin_unlock(&mdev->mbox_lock);
+
++ /* Check if interrupt pending */
++ intr_val = readq((void __iomem *)mbox->reg_base +
++ (mbox->trigger | (devid << mbox->tr_shift)));
++
++ intr_val |= data;
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+- writeq(1, (void __iomem *)mbox->reg_base +
++ writeq(intr_val, (void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+ }
++
++void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
++{
++ otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
++}
+ EXPORT_SYMBOL(otx2_mbox_msg_send);
+
++void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
++{
++ otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
++}
++EXPORT_SYMBOL(otx2_mbox_msg_send_up);
++
++bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
++{
++ u64 data;
++
++ data = readq((void __iomem *)mbox->reg_base +
++ (mbox->trigger | (devid << mbox->tr_shift)));
++
++ /* If data is non-zero wait for ~1ms and return to caller
++ * whether data has changed to zero or not after the wait.
++ */
++ if (!data)
++ return true;
++
++ usleep_range(950, 1000);
++
++ data = readq((void __iomem *)mbox->reg_base +
++ (mbox->trigger | (devid << mbox->tr_shift)));
++
++ return data == 0;
++}
++EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
++
+ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp)
+ {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index 6b5b06c2b4e996..e883c0929b1a9b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -16,6 +16,9 @@
+
+ #define MBOX_SIZE SZ_64K
+
++#define MBOX_DOWN_MSG 1
++#define MBOX_UP_MSG 2
++
+ /* AF/PF: PF initiated, PF/VF VF initiated */
+ #define MBOX_DOWN_RX_START 0
+ #define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+@@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs, unsigned long *bmap);
+ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
++void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+@@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+ }
+
++bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
++
+ /* Mailbox message types */
+ #define MBOX_MSG_MASK 0xFFFF
+ #define MBOX_MSG_INVALID 0xFFFE
+@@ -1655,7 +1661,7 @@ struct cpt_lf_alloc_req_msg {
+ u16 nix_pf_func;
+ u16 sso_pf_func;
+ u16 eng_grpmsk;
+- int blkaddr;
++ u8 blkaddr;
+ u8 ctx_ilen_valid : 1;
+ u8 ctx_ilen : 7;
+ };
+@@ -1938,7 +1944,7 @@ struct mcs_hw_info {
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+- u8 sa_entries; /* PN table entries = SA entries */
++ u16 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+index c43f19dfbd7440..c1775bd01c2b48 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+@@ -117,7 +117,7 @@ void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+- reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
++ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+@@ -215,7 +215,7 @@ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+- reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
++ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+@@ -1219,6 +1219,17 @@ struct mcs *mcs_get_pdata(int mcs_id)
+ return NULL;
+ }
+
++bool is_mcs_bypass(int mcs_id)
++{
++ struct mcs *mcs_dev;
++
++ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
++ if (mcs_dev->mcs_id == mcs_id)
++ return mcs_dev->bypass;
++ }
++ return true;
++}
++
+ void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+ {
+ u64 val = 0;
+@@ -1436,7 +1447,7 @@ static int mcs_x2p_calibration(struct mcs *mcs)
+ return err;
+ }
+
+-static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
++static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
+ {
+ u64 val;
+
+@@ -1447,6 +1458,7 @@ static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
++ mcs->bypass = bypass;
+ }
+
+ static void mcs_global_cfg(struct mcs *mcs)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+index 0f89dcb764654b..f927cc61dfd21f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+@@ -149,6 +149,7 @@ struct mcs {
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
++ bool bypass;
+ };
+
+ struct mcs_ops {
+@@ -206,6 +207,7 @@ void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *
+ int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+ int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+ int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
++bool is_mcs_bypass(int mcs_id);
+
+ /* CN10K-B APIs */
+ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+index f3ab01fc363c8d..f4c6de89002c1d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+@@ -810,14 +810,37 @@
+ offset = 0x9d8ull; \
+ offset; })
+
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \
++ u64 offset; \
++ \
++ offset = 0xee80ull; \
++ if (mcs->hw->mcs_blks > 1) \
++ offset = 0xe818ull; \
++ offset += (a) * 0x8ull; \
++ offset; })
++
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({ \
++ u64 offset; \
++ \
++ offset = 0xa680ull; \
++ if (mcs->hw->mcs_blks > 1) \
++ offset = 0xd018ull; \
++ offset += (a) * 0x8ull; \
++ offset; })
++
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) ({ \
++ u64 offset; \
++ \
++ offset = 0xf680ull; \
++ if (mcs->hw->mcs_blks > 1) \
++ offset = 0xe018ull; \
++ offset += (a) * 0x8ull; \
++ offset; })
++
+ #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+index dfd23580e3b8e1..d39d86e694ccf7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+@@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+ {
+ struct mcs_intr_info *req;
+- int err, pf;
++ int pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
++ mutex_lock(&rvu->mbox_lock);
++
+ req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+- if (!req)
++ if (!req) {
++ mutex_unlock(&rvu->mbox_lock);
+ return -ENOMEM;
++ }
+
+ req->mcs_id = event->mcs_id;
+ req->intr_mask = event->intr_mask;
+@@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+ req->hdr.pcifunc = event->pcifunc;
+ req->lmac_id = event->lmac_id;
+
+- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+- if (err)
+- dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
++ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
++
++ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
++
++ mutex_unlock(&rvu->mbox_lock);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+index de9fbd98dfb76c..2c028a81bbc518 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+@@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype {
+ NPC_LT_LB_CUSTOM1 = 0xF,
+ };
+
++/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
++ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
++ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
++ */
+ enum npc_kpu_lc_ltype {
+- NPC_LT_LC_IP = 1,
++ NPC_LT_LC_PTP = 1,
++ NPC_LT_LC_IP,
+ NPC_LT_LC_IP_OPT,
+ NPC_LT_LC_IP6,
+ NPC_LT_LC_IP6_EXT,
+@@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype {
+ NPC_LT_LC_RARP,
+ NPC_LT_LC_MPLS,
+ NPC_LT_LC_NSH,
+- NPC_LT_LC_PTP,
+ NPC_LT_LC_FCOE,
+ NPC_LT_LC_NGIO,
+ NPC_LT_LC_CUSTOM0 = 0xE,
+@@ -520,7 +524,7 @@ struct npc_lt_def {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+-};
++} __packed;
+
+ struct npc_lt_def_ipsec {
+ u8 ltype_mask;
+@@ -528,7 +532,7 @@ struct npc_lt_def_ipsec {
+ u8 lid;
+ u8 spi_offset;
+ u8 spi_nz;
+-};
++} __packed;
+
+ struct npc_lt_def_apad {
+ u8 ltype_mask;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+index af21e2030cff28..76218f1cb45958 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+@@ -373,6 +373,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
++ /* Disable forward pause to driver */
++ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
++ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
++ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
++
+ /* Enable channel mask for all LMACS */
+ if (is_dev_rpm2(rpm))
+ rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
+@@ -501,6 +506,7 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
+ rpm_t *rpm = rpmd;
+ u8 num_lmacs;
+ u32 fifo_len;
++ u16 max_lmac;
+
+ lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
+ /* LMACs are divided into two groups and each group
+@@ -508,7 +514,11 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
+ * Group0 lmac_id range {0..3}
+ * Group1 lmac_id range {4..7}
+ */
+- fifo_len = rpm->mac_ops->fifo_len / 2;
++ max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
++ if (max_lmac > 4)
++ fifo_len = rpm->mac_ops->fifo_len / 2;
++ else
++ fifo_len = rpm->mac_ops->fifo_len;
+
+ if (lmac_id < 4) {
+ num_lmacs = hweight8(lmac_info & 0xF);
+@@ -616,12 +626,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
++ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+- RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
++ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
+ }
+
+ if (tx_pause) {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 22c395c7d040b4..5906f5f8d19041 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -1638,7 +1638,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+ if (req->ssow > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSOW req, %d > max %d\n",
+- pcifunc, req->sso, block->lf.max);
++ pcifunc, req->ssow, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+@@ -2114,7 +2114,7 @@ MBOX_MESSAGES
+ }
+ }
+
+-static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
++static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
+ {
+ struct rvu *rvu = mwork->rvu;
+ int offset, err, id, devid;
+@@ -2181,6 +2181,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+ }
+ mw->mbox_wrk[devid].num_msgs = 0;
+
++ if (poll)
++ otx2_mbox_wait_for_zero(mbox, devid);
++
+ /* Send mbox responses to VF/PF */
+ otx2_mbox_msg_send(mbox, devid);
+ }
+@@ -2188,15 +2191,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+ static inline void rvu_afpf_mbox_handler(struct work_struct *work)
+ {
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
++ struct rvu *rvu = mwork->rvu;
+
+- __rvu_mbox_handler(mwork, TYPE_AFPF);
++ mutex_lock(&rvu->mbox_lock);
++ __rvu_mbox_handler(mwork, TYPE_AFPF, true);
++ mutex_unlock(&rvu->mbox_lock);
+ }
+
+ static inline void rvu_afvf_mbox_handler(struct work_struct *work)
+ {
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+- __rvu_mbox_handler(mwork, TYPE_AFVF);
++ __rvu_mbox_handler(mwork, TYPE_AFVF, false);
+ }
+
+ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+@@ -2371,6 +2377,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ }
+ }
+
++ mutex_init(&rvu->mbox_lock);
++
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions) {
+ err = -ENOMEM;
+@@ -2520,10 +2528,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ }
+ }
+
+-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
++static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
+ {
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+- int vfs = rvu->vfs;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+@@ -2537,6 +2544,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+
+ rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
++{
++ struct rvu *rvu = (struct rvu *)rvu_irq;
++ int vfs = rvu->vfs;
++ u64 intr;
++
++ /* Sync with mbox memory region */
++ rmb();
++
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+@@ -2631,6 +2650,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+ rvu_mac_reset(rvu, pcifunc);
+
++ if (rvu->mcs_blk_cnt)
++ rvu_mcs_flr_handler(rvu, pcifunc);
++
+ mutex_unlock(&rvu->flr_lock);
+ }
+
+@@ -2871,7 +2893,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+- rvu_mbox_intr_handler, 0,
++ rvu_mbox_pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index c4d999ef5ab4b2..e81cfcaf9ce4fe 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -290,6 +290,7 @@ struct nix_mark_format {
+
+ /* smq(flush) to tl1 cir/pir info */
+ struct nix_smq_tree_ctx {
++ u16 schq;
+ u64 cir_off;
+ u64 cir_val;
+ u64 pir_off;
+@@ -299,8 +300,6 @@ struct nix_smq_tree_ctx {
+ /* smq flush context */
+ struct nix_smq_flush_ctx {
+ int smq;
+- u16 tl1_schq;
+- u16 tl2_schq;
+ struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
+ };
+
+@@ -345,6 +344,7 @@ struct nix_hw {
+ struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
+ u64 *tx_credits;
++ u8 cc_mcs_cnt;
+ };
+
+ /* RVU block's capabilities or functionality,
+@@ -550,6 +550,8 @@ struct rvu {
+ spinlock_t mcs_intrq_lock;
+ /* CPT interrupt lock */
+ spinlock_t cpt_intr_lock;
++
++ struct mutex mbox_lock; /* Serialize mbox up and down msgs */
+ };
+
+ static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+@@ -904,6 +906,7 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu);
+ void *rvu_first_cgx_pdata(struct rvu *rvu);
+ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
++int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable);
+ int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+ int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index f2b1edf1bb43c0..19075f217d00c5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+ continue;
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
++ if (iter >= MAX_LMAC_COUNT)
++ continue;
+ lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ iter);
+ rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+@@ -232,7 +234,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+ struct cgx_link_user_info *linfo;
+ struct cgx_link_info_msg *msg;
+ unsigned long pfmap;
+- int err, pfid;
++ int pfid;
+
+ linfo = &event->link_uinfo;
+ pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+@@ -255,16 +257,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+ continue;
+ }
+
++ mutex_lock(&rvu->mbox_lock);
++
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
+- if (!msg)
++ if (!msg) {
++ mutex_unlock(&rvu->mbox_lock);
+ continue;
++ }
++
+ msg->link_info = *linfo;
+- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+- if (err)
+- dev_warn(rvu->dev, "notification to pf %d failed\n",
+- pfid);
++
++ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
++
++ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
++
++ mutex_unlock(&rvu->mbox_lock);
+ } while (pfmap);
+ }
+
+@@ -465,6 +473,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+ }
+
++int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
++{
++ int pf = rvu_get_pf(pcifunc);
++ struct mac_ops *mac_ops;
++ u8 cgx_id, lmac_id;
++ void *cgxd;
++
++ if (!is_cgx_config_permitted(rvu, pcifunc))
++ return LMAC_AF_ERR_PERM_DENIED;
++
++ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
++ cgxd = rvu_cgx_pdata(cgx_id, rvu);
++ mac_ops = get_mac_ops(cgxd);
++
++ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
++}
++
+ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+ {
+ struct mac_ops *mac_ops;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+index f047185f38e0f3..daf4b951e90591 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+@@ -632,7 +632,9 @@ int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ return ret;
+ }
+
+-static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
++static bool validate_and_update_reg_offset(struct rvu *rvu,
++ struct cpt_rd_wr_reg_msg *req,
++ u64 *reg_offset)
+ {
+ u64 offset = req->reg_offset;
+ int blkaddr, num_lfs, lf;
+@@ -663,6 +665,11 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+ if (lf < 0)
+ return false;
+
++ /* Translate local LF's offset to global CPT LF's offset to
++ * access LFX register.
++ */
++ *reg_offset = (req->reg_offset & 0xFF000) + (lf << 3);
++
+ return true;
+ } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Registers that can be accessed from PF */
+@@ -696,6 +703,7 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ struct cpt_rd_wr_reg_msg *rsp)
+ {
++ u64 offset = req->reg_offset;
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+@@ -707,17 +715,17 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
++ if (!validate_and_update_reg_offset(rvu, req, &offset))
++ return CPT_AF_ERR_ACCESS_DENIED;
++
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+- if (!is_valid_offset(rvu, req))
+- return CPT_AF_ERR_ACCESS_DENIED;
+-
+ if (req->is_write)
+- rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
++ rvu_write64(rvu, blkaddr, offset, req->val);
+ else
+- rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
++ rsp->val = rvu_read64(rvu, blkaddr, offset);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index d30e84803481da..feca86e429df20 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ u16 pcifunc;
+ int ret, lf;
+
+- cmd_buf = memdup_user(buffer, count + 1);
++ cmd_buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+- cmd_buf[count] = '\0';
+-
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+index 41df5ac23f927f..bffe04e6d0254a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+@@ -642,7 +642,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+- goto err;
++ return -ENOMEM;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+@@ -650,9 +650,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+
+ return 0;
+-err:
+- rvu_nix_health_reporters_destroy(rvu_dl);
+- return -ENOMEM;
+ }
+
+ static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+@@ -1285,7 +1282,7 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+- goto err;
++ return -ENOMEM;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
+@@ -1293,9 +1290,6 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+ INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
+
+ return 0;
+-err:
+- rvu_npa_health_reporters_destroy(rvu_dl);
+- return -ENOMEM;
+ }
+
+ static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 23c2f2ed2fb832..224a025283ca7d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -12,6 +12,7 @@
+ #include "rvu_reg.h"
+ #include "rvu.h"
+ #include "npc.h"
++#include "mcs.h"
+ #include "cgx.h"
+ #include "lmac_common.h"
+ #include "rvu_npc_hash.h"
+@@ -2145,14 +2146,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
+ schq = smq;
+ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
++ smq_tree_ctx->schq = schq;
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+- smq_flush_ctx->tl1_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
+ smq_tree_ctx->pir_off = 0;
+ smq_tree_ctx->pir_val = 0;
+ parent_off = 0;
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+- smq_flush_ctx->tl2_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
+ parent_off = NIX_AF_TL2X_PARENT(schq);
+@@ -2187,8 +2187,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+ {
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
++ int tl2, tl2_schq;
+ u64 regoff;
+- int tl2;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+@@ -2196,16 +2196,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+
+ /* loop through all TL2s with matching PF_FUNC */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
++ tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
+ for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
+ /* skip the smq(flush) TL2 */
+- if (tl2 == smq_flush_ctx->tl2_schq)
++ if (tl2 == tl2_schq)
+ continue;
+ /* skip unused TL2s */
+ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
+ continue;
+ /* skip if PF_FUNC doesn't match */
+ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
+- (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
++ (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ /* enable/disable XOFF */
+@@ -2247,10 +2248,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+ {
+ struct nix_smq_flush_ctx *smq_flush_ctx;
++ int err, restore_tx_en = 0, i;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+- int err, restore_tx_en = 0;
+- u64 cfg;
++ u16 tl2_tl3_link_schq;
++ u8 link, link_level;
++ u64 cfg, bmap = 0;
+
+ if (!is_rvu_otx2(rvu)) {
+ /* Skip SMQ flush if pkt count is zero */
+@@ -2274,16 +2277,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
+
+- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+- /* Do SMQ flush and set enqueue xoff */
+- cfg |= BIT_ULL(50) | BIT_ULL(49);
+- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+-
+ /* Disable backpressure from physical link,
+ * otherwise SMQ flush may stall.
+ */
+ rvu_cgx_enadis_rx_bp(rvu, pf, false);
+
++ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
++ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
++ tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
++ link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
++
++ /* SMQ set enqueue xoff */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
++ cfg |= BIT_ULL(50);
++ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++
++ /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
++ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
++ cfg = rvu_read64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
++ if (!(cfg & BIT_ULL(12)))
++ continue;
++ bmap |= (1 << i);
++ cfg &= ~BIT_ULL(12);
++ rvu_write64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
++ }
++
++ /* Do SMQ flush and set enqueue xoff */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
++ cfg |= BIT_ULL(50) | BIT_ULL(49);
++ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+@@ -2292,6 +2317,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
+ nixlf, smq);
+
++ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
++ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
++ if (!(bmap & (1 << i)))
++ continue;
++ cfg = rvu_read64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
++ cfg |= BIT_ULL(12);
++ rvu_write64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
++ }
++
+ /* clear XOFF on TL2s */
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
+@@ -3516,6 +3552,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
+ return -ERANGE;
+ }
+
++/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
++#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
++/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
++#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
++
+ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ {
+ int idx, nr_field, key_off, field_marker, keyoff_marker;
+@@ -3585,7 +3626,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ field->hdr_offset = 9; /* offset */
+ field->bytesm1 = 0; /* 1 byte */
+ field->ltype_match = NPC_LT_LC_IP;
+- field->ltype_mask = 0xF;
++ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV4:
+ case NIX_FLOW_KEY_TYPE_INNR_IPV4:
+@@ -3612,8 +3653,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ field->bytesm1 = 3; /* DIP, 4 bytes */
+ }
+ }
+-
+- field->ltype_mask = 0xF; /* Match only IPv4 */
++ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
+ keyoff_marker = false;
+ break;
+ case NIX_FLOW_KEY_TYPE_IPV6:
+@@ -3642,7 +3682,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ field->bytesm1 = 15; /* DIP,16 bytes */
+ }
+ }
+- field->ltype_mask = 0xF; /* Match only IPv6 */
++ field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
+ break;
+ case NIX_FLOW_KEY_TYPE_TCP:
+ case NIX_FLOW_KEY_TYPE_UDP:
+@@ -4142,90 +4182,18 @@ static void nix_find_link_frs(struct rvu *rvu,
+ req->minlen = minlen;
+ }
+
+-static int
+-nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
+- u16 pcifunc, u64 tx_credits)
+-{
+- struct rvu_hwinfo *hw = rvu->hw;
+- int pf = rvu_get_pf(pcifunc);
+- u8 cgx_id = 0, lmac_id = 0;
+- unsigned long poll_tmo;
+- bool restore_tx_en = 0;
+- struct nix_hw *nix_hw;
+- u64 cfg, sw_xoff = 0;
+- u32 schq = 0;
+- u32 credits;
+- int rc;
+-
+- nix_hw = get_nix_hw(rvu->hw, blkaddr);
+- if (!nix_hw)
+- return NIX_AF_ERR_INVALID_NIXBLK;
+-
+- if (tx_credits == nix_hw->tx_credits[link])
+- return 0;
+-
+- /* Enable cgx tx if disabled for credits to be back */
+- if (is_pf_cgxmapped(rvu, pf)) {
+- rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+- restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+- lmac_id, true);
+- }
+-
+- mutex_lock(&rvu->rsrc_lock);
+- /* Disable new traffic to link */
+- if (hw->cap.nix_shaping) {
+- schq = nix_get_tx_link(rvu, pcifunc);
+- sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
+- rvu_write64(rvu, blkaddr,
+- NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
+- }
+-
+- rc = NIX_AF_ERR_LINK_CREDITS;
+- poll_tmo = jiffies + usecs_to_jiffies(200000);
+- /* Wait for credits to return */
+- do {
+- if (time_after(jiffies, poll_tmo))
+- goto exit;
+- usleep_range(100, 200);
+-
+- cfg = rvu_read64(rvu, blkaddr,
+- NIX_AF_TX_LINKX_NORM_CREDIT(link));
+- credits = (cfg >> 12) & 0xFFFFFULL;
+- } while (credits != nix_hw->tx_credits[link]);
+-
+- cfg &= ~(0xFFFFFULL << 12);
+- cfg |= (tx_credits << 12);
+- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+- rc = 0;
+-
+- nix_hw->tx_credits[link] = tx_credits;
+-
+-exit:
+- /* Enable traffic back */
+- if (hw->cap.nix_shaping && !sw_xoff)
+- rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
+-
+- /* Restore state of cgx tx */
+- if (restore_tx_en)
+- rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+-
+- mutex_unlock(&rvu->rsrc_lock);
+- return rc;
+-}
+-
+ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp)
+ {
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+- int blkaddr, schq, link = -1;
+- struct nix_txsch *txsch;
+- u64 cfg, lmac_fifo_len;
++ int blkaddr, link = -1;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ u8 cgx = 0, lmac = 0;
+ u16 max_mtu;
++ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+@@ -4246,25 +4214,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+- /* Check if requester wants to update SMQ's */
+- if (!req->update_smq)
+- goto rx_frscfg;
+-
+- /* Update min/maxlen in each of the SMQ attached to this PF/VF */
+- txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+- mutex_lock(&rvu->rsrc_lock);
+- for (schq = 0; schq < txsch->schq.max; schq++) {
+- if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+- continue;
+- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+- cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
+- if (req->update_minlen)
+- cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
+- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+- }
+- mutex_unlock(&rvu->rsrc_lock);
+-
+-rx_frscfg:
+ /* Check if config is for SDP link */
+ if (req->sdp_link) {
+ if (!hw->sdp_links)
+@@ -4287,7 +4236,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ if (link < 0)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+
+-
+ linkcfg:
+ nix_find_link_frs(rvu, req, pcifunc);
+
+@@ -4297,19 +4245,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ cfg = (cfg & ~0xFFFFULL) | req->minlen;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+
+- if (req->sdp_link || pf == 0)
+- return 0;
+-
+- /* Update transmit credits for CGX links */
+- lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
+- if (!lmac_fifo_len) {
+- dev_err(rvu->dev,
+- "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+- __func__, cgx, lmac);
+- return 0;
+- }
+- return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
+- (lmac_fifo_len - req->maxlen) / 16);
++ return 0;
+ }
+
+ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+@@ -4389,6 +4325,12 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
+ SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
++ /* Get MCS external bypass status for CN10K-B */
++ if (mcs_get_blkcnt() == 1) {
++ /* Adjust for 2 credits when external bypass is disabled */
++ nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
++ }
++
+ /* Set credits for Tx links assuming max packet length allowed.
+ * This will be reconfigured based on MTU set for PF/VF.
+ */
+@@ -4412,6 +4354,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
+ tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
++ cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
+
+ link = iter + slink;
+ nix_hw->tx_credits[link] = tx_credits;
+@@ -4561,18 +4504,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
++ }
+
+- /* Set chan/link to backpressure TL3 instead of TL2 */
+- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
++ /* Set chan/link to backpressure TL3 instead of TL2 */
++ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+- * This sticky mode is known to cause SQ stalls when multiple
+- * SQs are mapped to same SMQ and transmitting pkts at a time.
+- */
+- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+- cfg &= ~BIT_ULL(15);
+- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+- }
++ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
++ * This sticky mode is known to cause SQ stalls when multiple
++ * SQs are mapped to same SMQ and transmitting pkts at a time.
++ */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
++ cfg &= ~BIT_ULL(15);
++ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+
+ ltdefs = rvu->kpu.lt_def;
+ /* Calibrate X2P bus to check if CGX/LBK links are fine */
+@@ -4833,7 +4776,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+- return rvu_cgx_start_stop_io(rvu, pcifunc, false);
++ err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
++ if (err)
++ return err;
++
++ rvu_cgx_tx_enable(rvu, pcifunc, true);
++
++ return 0;
+ }
+
+ #define RX_SA_BASE GENMASK_ULL(52, 7)
+@@ -5505,6 +5454,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
++ if (idx == MAX_BANDPROF_PER_PFFUNC)
++ break;
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+@@ -5518,8 +5469,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+- if (idx == MAX_BANDPROF_PER_PFFUNC)
+- break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 16cfc802e348d9..00ef6d201b973a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -389,7 +389,13 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+- nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
++ if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
++ dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
++ __func__, pf_func);
++ /* Action 0 is drop */
++ return 0;
++ }
++
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+@@ -431,6 +437,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ return;
+ }
+
++ /* AF modifies given action iff PF/VF has requested for it */
++ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
++ return;
++
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
+@@ -665,6 +675,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int blkaddr, ucast_idx, index;
+ struct nix_rx_action action = { 0 };
+ u64 relaxed_mask;
++ u8 flow_key_alg;
+
+ if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
+ return;
+@@ -695,6 +706,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ }
+
++ flow_key_alg = action.flow_key_alg;
++
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+@@ -734,7 +747,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ req.vf = pcifunc;
+ req.index = action.index;
+ req.match_id = action.match_id;
+- req.flow_key_alg = action.flow_key_alg;
++ req.flow_key_alg = flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+ }
+@@ -848,6 +861,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action = { 0 };
+ struct rvu_pfvf *pfvf;
++ u8 flow_key_alg;
+ u16 vf_func;
+
+ /* Only CGX PF/VF can add allmulticast entry */
+@@ -882,6 +896,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
++ flow_key_alg = action.flow_key_alg;
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+@@ -918,7 +933,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ req.vf = pcifunc | vf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+- req.flow_key_alg = action.flow_key_alg;
++ req.flow_key_alg = flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+ }
+@@ -984,11 +999,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ mutex_unlock(&mcam->lock);
+ }
+
++static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
++ struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
++ int alg_idx)
++
++{
++ struct npc_mcam *mcam = &rvu->hw->mcam;
++ struct rvu_hwinfo *hw = rvu->hw;
++ int bank, op_rss;
++
++ if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
++ return;
++
++ op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
++
++ bank = npc_get_bank(mcam, mcam_index);
++ mcam_index &= (mcam->banksize - 1);
++
++ /* If Rx action is MCAST update only RSS algorithm index */
++ if (!op_rss) {
++ *(u64 *)&action = rvu_read64(rvu, blkaddr,
++ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
++
++ action.flow_key_alg = alg_idx;
++ }
++ rvu_write64(rvu, blkaddr,
++ NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
++}
++
+ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index)
+ {
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+- struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+ struct rvu_pfvf *pfvf;
+@@ -1044,15 +1086,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ /* If PF's promiscuous entry is enabled,
+ * Set RSS action for that entry as well
+ */
+- if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
+- is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+- bank = npc_get_bank(mcam, index);
+- index &= (mcam->banksize - 1);
++ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
++ alg_idx);
+
+- rvu_write64(rvu, blkaddr,
+- NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
+- *(u64 *)&action);
+- }
++ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
++ nixlf, NIXLF_ALLMULTI_ENTRY);
++ /* If PF's allmulti entry is enabled,
++ * Set RSS action for that entry as well
++ */
++ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
++ alg_idx);
+ }
+
+ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+@@ -1626,7 +1669,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ struct npc_coalesced_kpu_prfl *img_data = NULL;
+ int i = 0, rc = -EINVAL;
+ void __iomem *kpu_prfl_addr;
+- u16 offset;
++ u32 offset;
+
+ img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+@@ -2463,7 +2506,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ * - when available free entries are less.
+ * Lower priority ones out of avaialble free entries are always
+ * chosen when 'high vs low' question arises.
++ *
++ * For a VF base MCAM match rule is set by its PF. And all the
++ * further MCAM rules installed by VF on its own are
++ * concatenated with the base rule set by its PF. Hence PF entries
++ * should be at lower priority compared to VF entries. Otherwise
++ * base rule is hit always and rules installed by VF will be of
++ * no use. Hence if the request is from PF then allocate low
++ * priority entries.
+ */
++ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
++ goto lprio_alloc;
+
+ /* Get the search range for priority allocation request */
+ if (req->priority) {
+@@ -2472,17 +2525,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ goto alloc;
+ }
+
+- /* For a VF base MCAM match rule is set by its PF. And all the
+- * further MCAM rules installed by VF on its own are
+- * concatenated with the base rule set by its PF. Hence PF entries
+- * should be at lower priority compared to VF entries. Otherwise
+- * base rule is hit always and rules installed by VF will be of
+- * no use. Hence if the request is from PF and NOT a priority
+- * allocation request then allocate low priority entries.
+- */
+- if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+- goto lprio_alloc;
+-
+ /* Find out the search range for non-priority allocation request
+ *
+ * Get MCAM free entry count in middle zone.
+@@ -2512,6 +2554,18 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
++ /* Ensure PF requests are always at bottom and if PF requests
++ * for higher/lower priority entry wrt reference entry then
++ * honour that criteria and start search for entries from bottom
++ * and not in mid zone.
++ */
++ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
++ req->priority == NPC_MCAM_HIGHER_PRIO)
++ end = req->ref_entry;
++
++ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
++ req->priority == NPC_MCAM_LOWER_PRIO)
++ start = req->ref_entry;
+ }
+
+ alloc:
+@@ -2639,18 +2693,17 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ rsp->entry = NPC_MCAM_ENTRY_INVALID;
+ rsp->free_count = 0;
+
+- /* Check if ref_entry is within range */
+- if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+- dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+- __func__, req->ref_entry);
+- return NPC_MCAM_INVALID_REQ;
+- }
++ /* Check if ref_entry is greater that the range
++ * then set it to max value.
++ */
++ if (req->ref_entry > mcam->bmap_entries)
++ req->ref_entry = mcam->bmap_entries;
+
+ /* ref_entry can't be '0' if requested priority is high.
+ * Can't be last entry if requested priority is low.
+ */
+ if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+- ((req->ref_entry == (mcam->bmap_entries - 1)) &&
++ ((req->ref_entry == mcam->bmap_entries) &&
+ req->priority == NPC_MCAM_LOWER_PRIO))
+ return NPC_MCAM_INVALID_REQ;
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+index b3150f05329196..d46ac29adb966d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+@@ -31,8 +31,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+ {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+ {0x1200, 0x12E0} } },
+ {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+- {0x1610, 0x1618}, {0x1700, 0x17B0} } },
+- {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
++ {0x1610, 0x1618}, {0x1700, 0x17C8} } },
++ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } },
+ {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+index b42e631e52d0fd..18c1c9f361cc62 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+@@ -437,6 +437,7 @@
+
+ #define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
+ #define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
++#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+
+ /* SSO */
+ #define SSO_AF_CONST (0x1000)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+index 5664f768cb0cd1..64a97a0a10ed6a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+@@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
+ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_devlink.o qos_sq.o qos.o
+-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
++rvu_nicvf-y := otx2_vf.o
+
+ rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+ rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+
+ ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+index a4a258da8dd59a..c1c99d7054f87f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+@@ -450,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
++ aq->prof.hl_en = 0;
++ aq->prof_mask.hl_en = 1;
++
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 818ce76185b2f3..b3064377510ed9 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -648,14 +648,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
+ } else if (lvl == NIX_TXSCH_LVL_TL4) {
+ parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
+ req->reg[0] = NIX_AF_TL4X_PARENT(schq);
+- req->regval[0] = parent << 16;
++ req->regval[0] = (u64)parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
+ req->regval[1] = dwrr_val;
+ } else if (lvl == NIX_TXSCH_LVL_TL3) {
+ parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
+ req->reg[0] = NIX_AF_TL3X_PARENT(schq);
+- req->regval[0] = parent << 16;
++ req->regval[0] = (u64)parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
+ req->regval[1] = dwrr_val;
+@@ -670,11 +670,11 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+ parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
+ req->reg[0] = NIX_AF_TL2X_PARENT(schq);
+- req->regval[0] = parent << 16;
++ req->regval[0] = (u64)parent << 16;
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
+- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
++ req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
+
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+@@ -698,7 +698,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
+
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
+- req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
++ req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
+
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL1X_CIR(schq);
+@@ -818,7 +818,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
+ int qidx, sqe_tail, sqe_head;
+ struct otx2_snd_queue *sq;
+ u64 incr, *ptr, val;
+- int timeout = 1000;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
+@@ -827,15 +826,11 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
+ continue;
+
+ incr = (u64)qidx << 32;
+- while (timeout) {
+- val = otx2_atomic64_add(incr, ptr);
+- sqe_head = (val >> 20) & 0x3F;
+- sqe_tail = (val >> 28) & 0x3F;
+- if (sqe_head == sqe_tail)
+- break;
+- usleep_range(1, 3);
+- timeout--;
+- }
++ val = otx2_atomic64_add(incr, ptr);
++ sqe_head = (val >> 20) & 0x3F;
++ sqe_tail = (val >> 28) & 0x3F;
++ if (sqe_head != sqe_tail)
++ usleep_range(50, 60);
+ }
+ }
+
+@@ -956,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ sizeof(*sq->timestamps));
+- if (err)
++ if (err) {
++ kfree(sq->sg);
++ sq->sg = NULL;
+ return err;
++ }
+ }
+
+ sq->head = 0;
+@@ -973,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+ sq->stats.bytes = 0;
+ sq->stats.pkts = 0;
+
+- return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
++ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
++ if (err) {
++ kfree(sq->sg);
++ sq->sg = NULL;
++ return err;
++ }
++
++ return 0;
+
+ }
+
+@@ -1587,7 +1592,7 @@ int otx2_detach_resources(struct mbox *mbox)
+ detach->partial = false;
+
+ /* Send detach request to AF */
+- otx2_mbox_msg_send(&mbox->mbox, 0);
++ otx2_sync_mbox_msg(mbox);
+ mutex_unlock(&mbox->lock);
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index c04a8ee53a82f1..7e16a341ec588f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -815,7 +815,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
+
+ if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
+ return 0;
+- otx2_mbox_msg_send(&mbox->mbox_up, devid);
++ otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
+ err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
+ if (err)
+ return err;
+@@ -977,6 +977,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
+ int otx2_txsch_alloc(struct otx2_nic *pfvf);
+ void otx2_txschq_stop(struct otx2_nic *pfvf);
+ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
++void otx2_free_pending_sqe(struct otx2_nic *pfvf);
+ void otx2_sqb_flush(struct otx2_nic *pfvf);
+ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma);
+@@ -1069,6 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic);
+ void otx2_shutdown_tc(struct otx2_nic *nic);
+ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
++void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
++
+ /* CGX/RPM DMAC filters support */
+ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+index bfddbff7bcdfbf..aa01110f04a339 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
+
+ return 0;
+ }
++EXPORT_SYMBOL(otx2_pfc_txschq_config);
+
+ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+ {
+@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+
+ return 0;
+ }
++EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
+
+ static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+ {
+@@ -260,6 +262,7 @@ int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
+
+ return 0;
+ }
++EXPORT_SYMBOL(otx2_pfc_txschq_update);
+
+ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+ {
+@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+
+ return 0;
+ }
++EXPORT_SYMBOL(otx2_pfc_txschq_stop);
+
+ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+ {
+@@ -321,6 +325,7 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
++EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
+
+ void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+@@ -385,6 +390,7 @@ void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+ }
++EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
+
+ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ {
+@@ -399,9 +405,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ {
+ struct otx2_nic *pfvf = netdev_priv(dev);
++ u8 old_pfc_en;
+ int err;
+
+- /* Save PFC configuration to interface */
++ old_pfc_en = pfvf->pfc_en;
+ pfvf->pfc_en = pfc->pfc_en;
+
+ if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+@@ -411,13 +418,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ * supported by the tx queue configuration
+ */
+ err = otx2_check_pfc_config(pfvf);
+- if (err)
++ if (err) {
++ pfvf->pfc_en = old_pfc_en;
+ return err;
++ }
+
+ process_pfc:
+ err = otx2_config_priority_flow_ctrl(pfvf);
+- if (err)
++ if (err) {
++ pfvf->pfc_en = old_pfc_en;
+ return err;
++ }
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+@@ -425,6 +436,12 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+
+ err = otx2_pfc_txschq_update(pfvf);
+ if (err) {
++ if (pfc->pfc_en)
++ otx2_nix_config_bp(pfvf, false);
++
++ otx2_pfc_txschq_stop(pfvf);
++ pfvf->pfc_en = old_pfc_en;
++ otx2_config_priority_flow_ctrl(pfvf);
+ dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ return err;
+ }
+@@ -461,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
+
+ return 0;
+ }
++EXPORT_SYMBOL(otx2_dcbnl_set_ops);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+index 4e1130496573ef..05956bf03c05d5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+@@ -112,6 +112,7 @@ int otx2_register_dl(struct otx2_nic *pfvf)
+ devlink_free(dl);
+ return err;
+ }
++EXPORT_SYMBOL(otx2_register_dl);
+
+ void otx2_unregister_dl(struct otx2_nic *pfvf)
+ {
+@@ -123,3 +124,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
+ ARRAY_SIZE(otx2_dl_params));
+ devlink_free(dl);
+ }
++EXPORT_SYMBOL(otx2_unregister_dl);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+index 9efcec549834e8..8b7fc0af91ced2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+@@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev,
+ pfvf->hw.tx_queues = channel->tx_count;
+ if (pfvf->xdp_prog)
+ pfvf->hw.xdp_queues = channel->rx_count;
+- pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
+
+ if (if_up)
+ err = dev->netdev_ops->ndo_open(dev);
+@@ -334,9 +333,12 @@ static void otx2_get_pauseparam(struct net_device *netdev,
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return;
+
++ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+- if (!req)
++ if (!req) {
++ mutex_unlock(&pfvf->mbox.lock);
+ return;
++ }
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pause_frm_cfg *)
+@@ -344,6 +346,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
+ pause->rx_pause = rsp->rx_pause;
+ pause->tx_pause = rsp->tx_pause;
+ }
++ mutex_unlock(&pfvf->mbox.lock);
+ }
+
+ static int otx2_set_pauseparam(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 4762dbea64a12b..97a71e9b856372 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -1088,6 +1088,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ struct ethhdr *eth_hdr;
+ bool new = false;
+ int err = 0;
++ u64 vf_num;
+ u32 ring;
+
+ if (!flow_cfg->max_flows) {
+@@ -1100,7 +1101,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return -ENOMEM;
+
+- if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
++ /* Number of queues on a VF can be greater or less than
++ * the PF's queue. Hence no need to check for the
++ * queue count. Hence no need to check queue count if PF
++ * is installing for its VF. Below is the expected vf_num value
++ * based on the ethtool commands.
++ *
++ * e.g.
++ * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
++ * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
++ * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
++ * vf_num:vf_idx+1
++ */
++ vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
++ if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
++ ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
+@@ -1182,6 +1197,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+ flow_cfg->nr_flows++;
+ }
+
++ if (flow->is_vf)
++ netdev_info(pfvf->netdev,
++ "Make sure that VF's queue number is within its queue limit\n");
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 6daf4d58c25d63..3f46d5e0fb2ecb 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -292,8 +292,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
+ return 0;
+ }
+
+-static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+- int first, int mdevs, u64 intr, int type)
++static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
++ int first, int mdevs, u64 intr)
+ {
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+@@ -307,40 +307,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+- if (type == TYPE_PFAF)
+- otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ /* The hdr->num_msgs is set to zero immediately in the interrupt
+- * handler to ensure that it holds a correct value next time
+- * when the interrupt handler is called.
+- * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
+- * pf>mbox.up_num_msgs holds the data for use in
+- * pfaf_mbox_up_handler.
++ * handler to ensure that it holds a correct value next time
++ * when the interrupt handler is called. pf->mw[i].num_msgs
++ * holds the data for use in otx2_pfvf_mbox_handler and
++ * pf->mw[i].up_num_msgs holds the data for use in
++ * otx2_pfvf_mbox_up_handler.
+ */
+ if (hdr->num_msgs) {
+ mw[i].num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+- if (type == TYPE_PFAF)
+- memset(mbox->hwbase + mbox->rx_start, 0,
+- ALIGN(sizeof(struct mbox_hdr),
+- sizeof(u64)));
+-
+ queue_work(mbox_wq, &mw[i].mbox_wrk);
+ }
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+- if (type == TYPE_PFAF)
+- otx2_sync_mbox_bbuf(mbox, i);
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs) {
+ mw[i].up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+- if (type == TYPE_PFAF)
+- memset(mbox->hwbase + mbox->rx_start, 0,
+- ALIGN(sizeof(struct mbox_hdr),
+- sizeof(u64)));
+-
+ queue_work(mbox_wq, &mw[i].mbox_up_wrk);
+ }
+ }
+@@ -356,8 +342,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
+ /* Msgs are already copied, trigger VF's mbox irq */
+ smp_wmb();
+
++ otx2_mbox_wait_for_zero(pfvf_mbox, devid);
++
+ offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
+- writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
++ writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
+
+ /* Restore VF's mbox bounce buffer region address */
+ src_mdev->mbase = bbuf_base;
+@@ -547,7 +535,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
+ end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
+- __otx2_mbox_reset(mbox, 0);
++ __otx2_mbox_reset(mbox, vf_idx);
+ mdev->msgs_acked++;
+ }
+ }
+@@ -564,17 +552,19 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+ if (vfs > 64) {
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+- TYPE_PFVF);
+- vfs -= 64;
++ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
++ if (intr)
++ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
++ vfs = 64;
+ }
+
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
++ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
+
+- trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
++ if (intr)
++ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+
+ return IRQ_HANDLED;
+ }
+@@ -594,8 +584,9 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
+ if (!pf->mbox_pfvf)
+ return -ENOMEM;
+
+- pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
+- WQ_HIGHPRI | WQ_MEM_RECLAIM);
++ pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
++ WQ_UNBOUND | WQ_HIGHPRI |
++ WQ_MEM_RECLAIM, 0);
+ if (!pf->mbox_pfvf_wq)
+ return -ENOMEM;
+
+@@ -818,20 +809,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
+ struct mbox *af_mbox;
+ struct otx2_nic *pf;
+ int offset, id;
++ u16 num_msgs;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++ num_msgs = rsp_hdr->num_msgs;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ pf = af_mbox->pfvf;
+
+- for (id = 0; id < af_mbox->num_msgs; id++) {
++ for (id = 0; id < num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2_process_pfaf_mbox_msg(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+- if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
++ if (mdev->msgs_acked == (num_msgs - 1))
+ __otx2_mbox_reset(mbox, 0);
+ mdev->msgs_acked++;
+ }
+@@ -942,12 +935,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+ int offset, id, devid = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
++ u16 num_msgs;
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++ num_msgs = rsp_hdr->num_msgs;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+- for (id = 0; id < af_mbox->up_num_msgs; id++) {
++ for (id = 0; id < num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+@@ -956,10 +951,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+ otx2_process_mbox_msg_up(pf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+- if (devid) {
++ /* Forward to VF iff VFs are really present */
++ if (devid && pci_num_vf(pf->pdev)) {
+ otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
+ MBOX_DIR_PFVF_UP, devid - 1,
+- af_mbox->up_num_msgs);
++ num_msgs);
+ return;
+ }
+
+@@ -969,16 +965,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
+ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+ {
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+- struct mbox *mbox;
++ struct mbox *mw = &pf->mbox;
++ struct otx2_mbox_dev *mdev;
++ struct otx2_mbox *mbox;
++ struct mbox_hdr *hdr;
++ u64 mbox_data;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+
+- mbox = &pf->mbox;
+
+- trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
++ mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
++
++ if (mbox_data & MBOX_UP_MSG) {
++ mbox_data &= ~MBOX_UP_MSG;
++ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+- otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
++ mbox = &mw->mbox_up;
++ mdev = &mbox->dev[0];
++ otx2_sync_mbox_bbuf(mbox, 0);
++
++ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++ if (hdr->num_msgs)
++ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
++
++ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
++ BIT_ULL(0));
++ }
++
++ if (mbox_data & MBOX_DOWN_MSG) {
++ mbox_data &= ~MBOX_DOWN_MSG;
++ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
++
++ mbox = &mw->mbox;
++ mdev = &mbox->dev[0];
++ otx2_sync_mbox_bbuf(mbox, 0);
++
++ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++ if (hdr->num_msgs)
++ queue_work(pf->mbox_wq, &mw->mbox_wrk);
++
++ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
++ BIT_ULL(0));
++ }
+
+ return IRQ_HANDLED;
+ }
+@@ -1193,31 +1222,32 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ };
+
+ static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
+- "NIX_SND_STATUS_GOOD",
+- "NIX_SND_STATUS_SQ_CTX_FAULT",
+- "NIX_SND_STATUS_SQ_CTX_POISON",
+- "NIX_SND_STATUS_SQB_FAULT",
+- "NIX_SND_STATUS_SQB_POISON",
+- "NIX_SND_STATUS_HDR_ERR",
+- "NIX_SND_STATUS_EXT_ERR",
+- "NIX_SND_STATUS_JUMP_FAULT",
+- "NIX_SND_STATUS_JUMP_POISON",
+- "NIX_SND_STATUS_CRC_ERR",
+- "NIX_SND_STATUS_IMM_ERR",
+- "NIX_SND_STATUS_SG_ERR",
+- "NIX_SND_STATUS_MEM_ERR",
+- "NIX_SND_STATUS_INVALID_SUBDC",
+- "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+- "NIX_SND_STATUS_DATA_FAULT",
+- "NIX_SND_STATUS_DATA_POISON",
+- "NIX_SND_STATUS_NPC_DROP_ACTION",
+- "NIX_SND_STATUS_LOCK_VIOL",
+- "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
+- "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
+- "NIX_SND_STATUS_NPC_MCAST_ABORT",
+- "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+- "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+- "NIX_SND_STATUS_SEND_STATS_ERR",
++ [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
++ [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
++ [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
++ [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
++ [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
++ [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
++ [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
++ [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
++ [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
++ [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
++ [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
++ [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
++ [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
++ [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
++ [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
++ [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
++ [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
++ [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
++ [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
++ [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
++ [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
++ [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
++ [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
++ [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
++ [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
++ [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
+ };
+
+ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+@@ -1238,14 +1268,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ continue;
+
+ if (val & BIT_ULL(42)) {
+- netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++ netdev_err(pf->netdev,
++ "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ } else {
+ if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ qidx);
+ if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+- netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
++ netdev_err(pf->netdev,
++ "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ qidx);
+ }
+
+@@ -1272,7 +1304,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ (val & NIX_SQINT_BITS));
+
+ if (val & BIT_ULL(42)) {
+- netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
++ netdev_err(pf->netdev,
++ "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ goto done;
+ }
+@@ -1282,8 +1315,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ goto chk_mnq_err_dbg;
+
+ sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
+- qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n",
++ qidx, sq_op_err_dbg,
++ nix_sqoperr_e_str[sq_op_err_code],
++ sq_op_err_code);
+
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+
+@@ -1300,16 +1336,21 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
+ goto chk_snd_err_dbg;
+
+ mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
+- qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n",
++ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code],
++ mnq_err_code);
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+
+ chk_snd_err_dbg:
+ snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ if (snd_err_dbg & BIT(44)) {
+ snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
+- qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
++ netdev_err(pf->netdev,
++ "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
++ qidx, snd_err_dbg,
++ nix_snd_status_e_str[snd_err_code],
++ snd_err_code);
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ }
+
+@@ -1589,6 +1630,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
+ else
+ otx2_cleanup_tx_cqes(pf, cq);
+ }
++ otx2_free_pending_sqe(pf);
+
+ otx2_free_sq_res(pf);
+
+@@ -1634,6 +1676,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
+ mutex_unlock(&mbox->lock);
+ }
+
++static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
++{
++ int vf;
++
++ /* The AF driver will determine whether to allow the VF netdev or not */
++ if (is_otx2_vf(pfvf->pcifunc))
++ return true;
++
++ /* check if there are any trusted VFs associated with the PF netdev */
++ for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
++ if (pfvf->vf_configs[vf].trusted)
++ return true;
++ return false;
++}
++
+ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+ {
+ struct net_device *netdev = pf->netdev;
+@@ -1666,12 +1723,21 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+- req->mode |= NIX_RX_MODE_USE_MCE;
++ if (otx2_promisc_use_mce_list(pf))
++ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+ }
+
++static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
++{
++ int cint;
++
++ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
++ otx2_config_irq_coalescing(pfvf, cint);
++}
++
+ static void otx2_dim_work(struct work_struct *w)
+ {
+ struct dim_cq_moder cur_moder;
+@@ -1687,6 +1753,7 @@ static void otx2_dim_work(struct work_struct *w)
+ CQ_TIMER_THRESH_MAX : cur_moder.usec;
+ pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
+ NAPI_POLL_WEIGHT : cur_moder.pkts;
++ otx2_set_irq_coalesce(pfvf);
+ dim->state = DIM_START_MEASURE;
+ }
+
+@@ -1703,6 +1770,7 @@ int otx2_open(struct net_device *netdev)
+ /* RQ and SQs are mapped to different CQs,
+ * so find out max CQ IRQs (i.e CINTs) needed.
+ */
++ pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues;
+ pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
+ pf->hw.tc_tx_queues);
+
+@@ -1857,13 +1925,15 @@ int otx2_open(struct net_device *netdev)
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
++ otx2_tc_apply_ingress_police_rules(pf);
++
+ err = otx2_rxtx_enable(pf, true);
+ /* If a mbox communication error happens at this point then interface
+ * will end up in a state such that it is in down state but hardware
+ * mcam entries are enabled to receive the packets. Hence disable the
+ * packet I/O.
+ */
+- if (err == EIO)
++ if (err == -EIO)
+ goto err_disable_rxtx;
+ else if (err)
+ goto err_tx_stop_queues;
+@@ -1921,6 +1991,8 @@ int otx2_stop(struct net_device *netdev)
+ /* Clear RSS enable flag */
+ rss = &pf->hw.rss_info;
+ rss->enable = false;
++ if (!netif_is_rxfh_configured(netdev))
++ kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
+
+ /* Cleanup Queue IRQ */
+ vec = pci_irq_vector(pf->pdev,
+@@ -2598,8 +2670,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+ xdp_features_clear_redirect_target(dev);
+ }
+
+- pf->hw.non_qos_queues += pf->hw.xdp_queues;
+-
+ if (if_up)
+ otx2_open(pf->netdev);
+
+@@ -2662,11 +2732,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
+ pf->vf_configs[vf].trusted = enable;
+ rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
+
+- if (rc)
++ if (rc) {
+ pf->vf_configs[vf].trusted = !enable;
+- else
++ } else {
+ netdev_info(pf->netdev, "VF %d is %strusted\n",
+ vf, enable ? "" : "not ");
++ otx2_set_rx_mode(netdev);
++ }
++
+ return rc;
+ }
+
+@@ -3040,6 +3113,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
+ struct otx2_vf_config *config;
+ struct cgx_link_info_msg *req;
+ struct mbox_msghdr *msghdr;
++ struct delayed_work *dwork;
+ struct otx2_nic *pf;
+ int vf_idx;
+
+@@ -3048,10 +3122,24 @@ static void otx2_vf_link_event_task(struct work_struct *work)
+ vf_idx = config - config->pf->vf_configs;
+ pf = config->pf;
+
++ if (config->intf_down)
++ return;
++
++ mutex_lock(&pf->mbox.lock);
++
++ dwork = &config->link_event_work;
++
++ if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
++ schedule_delayed_work(dwork, msecs_to_jiffies(100));
++ mutex_unlock(&pf->mbox.lock);
++ return;
++ }
++
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
++ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
+@@ -3060,7 +3148,11 @@ static void otx2_vf_link_event_task(struct work_struct *work)
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+
++ otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
++
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
++
++ mutex_unlock(&pf->mbox.lock);
+ }
+
+ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+index 45a32e4b49d1cb..e3aee6e3621517 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+@@ -139,33 +139,34 @@
+ #define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
+
+ /* NIX AF transmit scheduler registers */
+-#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+-#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
+-#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
+-#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
+-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
+-#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
+-#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
+-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
+-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
+-#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
+-#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
+-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
+-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
+-#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
+-#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
+-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
++#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
++#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
++#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
++#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
++#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
++#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
++#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
++#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
++#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
++#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
++#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
++#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
++#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
++#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
++#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
++#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
++#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
++#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
++#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
++#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
++#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
++#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
++#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
++#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
++#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
++#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
++#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
++#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
+
+ /* LMT LF registers */
+ #define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+index fa37b9f312cae4..4e5899d8fa2e6e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+@@ -318,23 +318,23 @@ enum nix_snd_status_e {
+ NIX_SND_STATUS_EXT_ERR = 0x6,
+ NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ NIX_SND_STATUS_JUMP_POISON = 0x8,
+- NIX_SND_STATUS_CRC_ERR = 0x9,
+- NIX_SND_STATUS_IMM_ERR = 0x10,
+- NIX_SND_STATUS_SG_ERR = 0x11,
+- NIX_SND_STATUS_MEM_ERR = 0x12,
+- NIX_SND_STATUS_INVALID_SUBDC = 0x13,
+- NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
+- NIX_SND_STATUS_DATA_FAULT = 0x15,
+- NIX_SND_STATUS_DATA_POISON = 0x16,
+- NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
+- NIX_SND_STATUS_LOCK_VIOL = 0x18,
+- NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
+- NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
+- NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
+- NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
+- NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
+- NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
+- NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
++ NIX_SND_STATUS_CRC_ERR = 0x10,
++ NIX_SND_STATUS_IMM_ERR = 0x11,
++ NIX_SND_STATUS_SG_ERR = 0x12,
++ NIX_SND_STATUS_MEM_ERR = 0x13,
++ NIX_SND_STATUS_INVALID_SUBDC = 0x14,
++ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
++ NIX_SND_STATUS_DATA_FAULT = 0x16,
++ NIX_SND_STATUS_DATA_POISON = 0x17,
++ NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
++ NIX_SND_STATUS_LOCK_VIOL = 0x21,
++ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
++ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
++ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
++ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
++ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
++ NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
++ NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
+ NIX_SND_STATUS_MAX,
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+index fab9d85bfb3717..46bdbee9d38adf 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+@@ -45,6 +45,9 @@ struct otx2_tc_flow {
+ bool is_act_police;
+ u32 prio;
+ struct npc_install_flow_req req;
++ u64 rate;
++ u32 burst;
++ bool is_pps;
+ };
+
+ static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+@@ -282,21 +285,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
+ return err;
+ }
+
+-static int otx2_tc_act_set_police(struct otx2_nic *nic,
+- struct otx2_tc_flow *node,
+- struct flow_cls_offload *f,
+- u64 rate, u32 burst, u32 mark,
+- struct npc_install_flow_req *req, bool pps)
++static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
++ struct otx2_tc_flow *node)
+ {
+- struct netlink_ext_ack *extack = f->common.extack;
+- struct otx2_hw *hw = &nic->hw;
+- int rq_idx, rc;
+-
+- rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+- if (rq_idx >= hw->rx_queues) {
+- NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+- return -EINVAL;
+- }
++ int rc;
+
+ mutex_lock(&nic->mbox.lock);
+
+@@ -306,23 +298,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ return rc;
+ }
+
+- rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
++ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
++ node->burst, node->rate, node->is_pps);
+ if (rc)
+ goto free_leaf;
+
+- rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
++ rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+- req->match_id = mark & 0xFFFFULL;
+- req->index = rq_idx;
+- req->op = NIX_RX_ACTIONOP_UCAST;
+- set_bit(rq_idx, &nic->rq_bmap);
+- node->is_act_police = true;
+- node->rq = rq_idx;
+-
+ return 0;
+
+ free_leaf:
+@@ -334,6 +320,39 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ return rc;
+ }
+
++static int otx2_tc_act_set_police(struct otx2_nic *nic,
++ struct otx2_tc_flow *node,
++ struct flow_cls_offload *f,
++ u64 rate, u32 burst, u32 mark,
++ struct npc_install_flow_req *req, bool pps)
++{
++ struct netlink_ext_ack *extack = f->common.extack;
++ struct otx2_hw *hw = &nic->hw;
++ int rq_idx, rc;
++
++ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
++ if (rq_idx >= hw->rx_queues) {
++ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
++ return -EINVAL;
++ }
++
++ req->match_id = mark & 0xFFFFULL;
++ req->index = rq_idx;
++ req->op = NIX_RX_ACTIONOP_UCAST;
++
++ node->is_act_police = true;
++ node->rq = rq_idx;
++ node->burst = burst;
++ node->rate = rate;
++ node->is_pps = pps;
++
++ rc = otx2_tc_act_set_hw_police(nic, node);
++ if (!rc)
++ set_bit(rq_idx, &nic->rq_bmap);
++
++ return rc;
++}
++
+ static int otx2_tc_parse_actions(struct otx2_nic *nic,
+ struct flow_action *flow_action,
+ struct npc_install_flow_req *req,
+@@ -569,6 +588,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
++ u32 val;
+
+ flow_rule_match_control(rule, &match);
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+@@ -577,12 +597,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ }
+
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
++ val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
+ if (ntohs(flow_spec->etype) == ETH_P_IP) {
+- flow_spec->ip_flag = IPV4_FLAG_MORE;
++ flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
+ flow_mask->ip_flag = IPV4_FLAG_MORE;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
+ } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
+- flow_spec->next_header = IPPROTO_FRAGMENT;
++ flow_spec->next_header = val ?
++ IPPROTO_FRAGMENT : 0;
+ flow_mask->next_header = 0xff;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
+ } else {
+@@ -986,6 +1008,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ }
+
+ if (flow_node->is_act_police) {
++ __clear_bit(flow_node->rq, &nic->rq_bmap);
++
++ if (nic->flags & OTX2_FLAG_INTF_DOWN)
++ goto free_mcam_flow;
++
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+@@ -1001,11 +1028,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+- __clear_bit(flow_node->rq, &nic->rq_bmap);
+-
+ mutex_unlock(&nic->mbox.lock);
+ }
+
++free_mcam_flow:
+ otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
+ otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
+ kfree_rcu(flow_node, rcu);
+@@ -1025,6 +1051,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ return -ENOMEM;
+
++ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
++ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
++ return -EINVAL;
++ }
++
+ if (flow_cfg->nr_flows == flow_cfg->max_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Free MCAM entry not available to add the flow");
+@@ -1384,3 +1415,45 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
+ otx2_destroy_tc_flow_list(nic);
+ }
+ EXPORT_SYMBOL(otx2_shutdown_tc);
++
++static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
++ struct otx2_tc_flow *node)
++{
++ struct npc_install_flow_req *req;
++
++ if (otx2_tc_act_set_hw_police(nic, node))
++ return;
++
++ mutex_lock(&nic->mbox.lock);
++
++ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
++ if (!req)
++ goto err;
++
++ memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
++
++ if (otx2_sync_mbox_msg(&nic->mbox))
++ netdev_err(nic->netdev,
++ "Failed to install MCAM flow entry for ingress rule");
++err:
++ mutex_unlock(&nic->mbox.lock);
++}
++
++void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
++{
++ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
++ struct otx2_tc_flow *node;
++
++ /* If any ingress policer rules exist for the interface then
++ * apply those rules. Ingress policer rules depend on bandwidth
++ * profiles linked to the receive queues. Since no receive queues
++ * exist when interface is down, ingress policer rules are stored
++ * and configured in hardware after all receive queues are allocated
++ * in otx2_open.
++ */
++ list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
++ if (node->is_act_police)
++ otx2_tc_config_ingress_rule(nic, node);
++ }
++}
++EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 53b2a4ef529852..0ca9f2ffd932d1 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -510,13 +510,20 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+
+ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
+ {
+- struct dim_sample dim_sample;
++ struct dim_sample dim_sample = { 0 };
+ u64 rx_frames, rx_bytes;
++ u64 tx_frames, tx_bytes;
+
+ rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
+ OTX2_GET_RX_STATS(RX_UCAST);
+ rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+- dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
++ tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
++ tx_frames = OTX2_GET_TX_STATS(TX_UCAST);
++
++ dim_update_sample(pfvf->napi_events,
++ rx_frames + tx_frames,
++ rx_bytes + tx_bytes,
++ &dim_sample);
+ net_dim(&cq_poll->dim, dim_sample);
+ }
+
+@@ -558,16 +565,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return workdone;
+
+- /* Check for adaptive interrupt coalesce */
+- if (workdone != 0 &&
+- ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+- OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+- /* Adjust irq coalese using net_dim */
++ /* Adjust irq coalese using net_dim */
++ if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
+ otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+- /* Update irq coalescing */
+- for (i = 0; i < pfvf->hw.cint_cnt; i++)
+- otx2_config_irq_coalescing(pfvf, i);
+- }
+
+ if (unlikely(!filled_cnt)) {
+ struct refill_work *work;
+@@ -1171,8 +1171,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+
+ if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ /* Insert vlan tag before giving pkt to tso */
+- if (skb_vlan_tag_present(skb))
++ if (skb_vlan_tag_present(skb)) {
+ skb = __vlan_hwaccel_push_inside(skb);
++ if (!skb)
++ return true;
++ }
+ otx2_sq_append_tso(pfvf, sq, skb, qidx);
+ return true;
+ }
+@@ -1247,9 +1250,11 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
+
+ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ {
++ int tx_pkts = 0, tx_bytes = 0;
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ struct nix_cqe_tx_s *cqe;
++ struct netdev_queue *txq;
+ int processed_cqe = 0;
+ struct sg_list *sg;
+ int qidx;
+@@ -1270,12 +1275,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+ sg = &sq->sg[cqe->comp.sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
++ tx_bytes += skb->len;
++ tx_pkts++;
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+ }
+
++ if (likely(tx_pkts)) {
++ if (qidx >= pfvf->hw.tx_queues)
++ qidx -= pfvf->hw.xdp_queues;
++ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
++ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++ }
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+@@ -1302,6 +1315,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
+ return err;
+ }
+
++void otx2_free_pending_sqe(struct otx2_nic *pfvf)
++{
++ int tx_pkts = 0, tx_bytes = 0;
++ struct sk_buff *skb = NULL;
++ struct otx2_snd_queue *sq;
++ struct netdev_queue *txq;
++ struct sg_list *sg;
++ int sq_idx, sqe;
++
++ for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
++ sq = &pfvf->qset.sq[sq_idx];
++ for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
++ sg = &sq->sg[sqe];
++ skb = (struct sk_buff *)sg->skb;
++ if (skb) {
++ tx_bytes += skb->len;
++ tx_pkts++;
++ otx2_dma_unmap_skb_frags(pfvf, sg);
++ dev_kfree_skb_any(skb);
++ sg->skb = (u64)NULL;
++ }
++ }
++
++ if (!tx_pkts)
++ continue;
++ txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
++ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
++ tx_pkts = 0;
++ tx_bytes = 0;
++ }
++}
++
+ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+ {
+@@ -1361,7 +1406,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush)
+ {
+- unsigned char *hard_start, *data;
++ unsigned char *hard_start;
+ int qidx = cq->cq_idx;
+ struct xdp_buff xdp;
+ struct page *page;
+@@ -1375,9 +1420,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+
+ xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
+
+- data = (unsigned char *)phys_to_virt(pa);
+- hard_start = page_address(page);
+- xdp_prepare_buff(&xdp, hard_start, data - hard_start,
++ hard_start = (unsigned char *)phys_to_virt(pa);
++ xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
+ cqe->sg.seg_size, false);
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index 35e06048356f4d..cf0aa16d754070 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ int offset, id;
++ u16 num_msgs;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+- if (af_mbox->num_msgs == 0)
++ num_msgs = rsp_hdr->num_msgs;
++
++ if (num_msgs == 0)
+ return;
++
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+- for (id = 0; id < af_mbox->num_msgs; id++) {
++ for (id = 0; id < num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+@@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
+ struct mbox *vf_mbox;
+ struct otx2_nic *vf;
+ int offset, id;
++ u16 num_msgs;
+
+ vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ vf = vf_mbox->pfvf;
+@@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
+ mdev = &mbox->dev[0];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+- if (vf_mbox->up_num_msgs == 0)
++ num_msgs = rsp_hdr->num_msgs;
++
++ if (num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+- for (id = 0; id < vf_mbox->up_num_msgs; id++) {
++ for (id = 0; id < num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_mbox_msg_up(vf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+@@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
++ u64 mbox_data;
+
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+
++ mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
++
+ /* Read latest mbox data */
+ smp_rmb();
+
+- /* Check for PF => VF response messages */
+- mbox = &vf->mbox.mbox;
+- mdev = &mbox->dev[0];
+- otx2_sync_mbox_bbuf(mbox, 0);
++ if (mbox_data & MBOX_DOWN_MSG) {
++ mbox_data &= ~MBOX_DOWN_MSG;
++ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
++
++ /* Check for PF => VF response messages */
++ mbox = &vf->mbox.mbox;
++ mdev = &mbox->dev[0];
++ otx2_sync_mbox_bbuf(mbox, 0);
+
+- trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
++ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++ if (hdr->num_msgs)
++ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+
+- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+- if (hdr->num_msgs) {
+- vf->mbox.num_msgs = hdr->num_msgs;
+- hdr->num_msgs = 0;
+- memset(mbox->hwbase + mbox->rx_start, 0,
+- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+- queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
++ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
++ BIT_ULL(0));
+ }
+- /* Check for PF => VF notification messages */
+- mbox = &vf->mbox.mbox_up;
+- mdev = &mbox->dev[0];
+- otx2_sync_mbox_bbuf(mbox, 0);
+
+- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+- if (hdr->num_msgs) {
+- vf->mbox.up_num_msgs = hdr->num_msgs;
+- hdr->num_msgs = 0;
+- memset(mbox->hwbase + mbox->rx_start, 0,
+- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+- queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
++ if (mbox_data & MBOX_UP_MSG) {
++ mbox_data &= ~MBOX_UP_MSG;
++ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
++
++ /* Check for PF => VF notification messages */
++ mbox = &vf->mbox.mbox_up;
++ mdev = &mbox->dev[0];
++ otx2_sync_mbox_bbuf(mbox, 0);
++
++ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
++ if (hdr->num_msgs)
++ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
++
++ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
++ BIT_ULL(0));
+ }
+
+ return IRQ_HANDLED;
+@@ -760,8 +775,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
+ otx2_mcam_flow_del(vf);
+ otx2_shutdown_tc(vf);
+ otx2_shutdown_qos(vf);
+- otx2vf_disable_mbox_intr(vf);
+ otx2_detach_resources(&vf->mbox);
++ otx2vf_disable_mbox_intr(vf);
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+index 1e77bbf5d22a1a..4995a2d54d7d08 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+@@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
+ num_regs++;
+
+ otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+-
+ } else if (level == NIX_TXSCH_LVL_TL4) {
+ otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+ } else if (level == NIX_TXSCH_LVL_TL3) {
+@@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
+ /* check if node is root */
+ if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
+ cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
+- cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 |
++ cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
+ mtu_to_dwrr_weight(pfvf,
+ pfvf->tx_max_pktlen);
+ num_regs++;
+@@ -382,6 +381,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
+ otx2_qos_read_txschq_cfg_tl(node, cfg);
+ cnt = cfg->static_node_pos[node->level];
+ cfg->schq_contig_list[node->level][cnt] = node->schq;
++ cfg->schq_index_used[node->level][cnt] = true;
+ cfg->schq_contig[node->level]++;
+ cfg->static_node_pos[node->level]++;
+ otx2_qos_read_txschq_cfg_schq(node, cfg);
+@@ -1406,7 +1406,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
+ otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
+
+ /* delete the txschq nodes allocated for this node */
++ otx2_qos_disable_sq(pfvf, qid);
++ otx2_qos_free_hw_node_schq(pfvf, node);
+ otx2_qos_free_sw_node_schq(pfvf, node);
++ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
+
+ /* mark this node as htb inner node */
+ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
+@@ -1553,6 +1556,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
+ dwrr_del_node = true;
+
+ /* destroy the leaf node */
++ otx2_qos_disable_sq(pfvf, qid);
+ otx2_qos_destroy_node(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 20afe79f380a24..bdc424123ee6cf 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -676,8 +676,7 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
+- MAC_MCR_RX_FIFO_CLR_DIS;
++ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
+
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+@@ -693,7 +692,7 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+
+- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
++ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ }
+
+@@ -802,7 +801,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
+ if (rx_pause)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+- mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
++ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ }
+
+@@ -4757,7 +4756,10 @@ static int mtk_probe(struct platform_device *pdev)
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
+- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
++ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
++ if (!err)
++ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
++
+ if (err) {
+ dev_err(&pdev->dev, "Wrong DMA config\n");
+ return -EINVAL;
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
+index 86f32f48604375..6e222a000bf7eb 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -992,7 +992,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
+ MTK_PPE_KEEPALIVE_DISABLE) |
+ FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
+ FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
+- MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
++ MTK_PPE_SCAN_MODE_CHECK_AGE) |
+ FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
+ MTK_PPE_ENTRIES_SHIFT);
+ if (mtk_is_netsys_v2_or_greater(ppe->eth))
+@@ -1088,17 +1088,21 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
+
+ mtk_ppe_cache_enable(ppe, false);
+
+- /* disable offload engine */
+- ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
+- ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
+-
+ /* disable aging */
+ val = MTK_PPE_TB_CFG_AGE_NON_L4 |
+ MTK_PPE_TB_CFG_AGE_UNBIND |
+ MTK_PPE_TB_CFG_AGE_TCP |
+ MTK_PPE_TB_CFG_AGE_UDP |
+- MTK_PPE_TB_CFG_AGE_TCP_FIN;
++ MTK_PPE_TB_CFG_AGE_TCP_FIN |
++ MTK_PPE_TB_CFG_SCAN_MODE;
+ ppe_clear(ppe, MTK_PPE_TB_CFG, val);
+
+- return mtk_ppe_wait_busy(ppe);
++ if (mtk_ppe_wait_busy(ppe))
++ return -ETIMEDOUT;
++
++ /* disable offload engine */
++ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
++ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
++
++ return 0;
+ }
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index 31aebeb2e28585..25989c79c92e61 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev)
+ {
+ struct device_node *of_node;
+ struct mtk_star_priv *priv;
++ struct phy_device *phydev;
+ struct net_device *ndev;
+ struct device *dev;
+ void __iomem *base;
+@@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev)
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
+ netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
+
++ phydev = of_phy_find_device(priv->phy_node);
++ if (phydev) {
++ phydev->mac_managed_pm = true;
++ put_device(&phydev->mdio.dev);
++ }
++
+ return devm_register_netdev(dev, ndev);
+ }
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
+index 94376aa2b34c57..85a9ad2b86bfff 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -598,13 +598,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
+ static void
+ mtk_wed_stop(struct mtk_wed_device *dev)
+ {
++ mtk_wed_dma_disable(dev);
+ mtk_wed_set_ext_int(dev, false);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+- wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+ if (dev->hw->version == 1)
+ return;
+@@ -617,7 +617,6 @@ static void
+ mtk_wed_deinit(struct mtk_wed_device *dev)
+ {
+ mtk_wed_stop(dev);
+- mtk_wed_dma_disable(dev);
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+@@ -1703,9 +1702,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+ static void
+ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
+ {
+- if (!dev->running)
+- return;
+-
+ mtk_wed_set_ext_int(dev, !!mask);
+ wed_w32(dev, MTK_WED_INT_MASK, mask);
+ }
+@@ -1766,14 +1762,15 @@ mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri
+ {
+ struct mtk_wed_flow_block_priv *priv = cb_priv;
+ struct flow_cls_offload *cls = type_data;
+- struct mtk_wed_hw *hw = priv->hw;
++ struct mtk_wed_hw *hw = NULL;
+
+- if (!tc_can_offload(priv->dev))
++ if (!priv || !tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
++ hw = priv->hw;
+ return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
+ }
+
+@@ -1829,6 +1826,7 @@ mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ kfree(block_cb->cb_priv);
++ block_cb->cb_priv = NULL;
+ }
+ return 0;
+ default:
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+index 071ed3dea860d5..72bcdaed12a949 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -68,6 +68,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb)
+ struct mtk_wed_wo_rx_stats *stats;
+ int i;
+
++ if (!wed->wlan.update_wo_rx_stats)
++ return;
++
+ if (count * sizeof(*stats) > skb->len - sizeof(u32))
+ return;
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+index 47ea69feb3b246..f87ab9b8a5901b 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -64,8 +64,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
+diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+index 3bd51a3d665001..ae44ad5f8ce8a1 100644
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+@@ -291,6 +291,9 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+ for (i = 0; i < q->n_desc; i++) {
+ struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
+
++ if (!entry->buf)
++ continue;
++
+ dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
+ DMA_TO_DEVICE);
+ skb_free_frag(entry->buf);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index c22b0ad0c8701d..48dc4ae87af092 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -156,15 +156,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
+ return token;
+ }
+
+-static int cmd_alloc_index(struct mlx5_cmd *cmd)
++static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
+ {
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
+- if (ret < cmd->vars.max_reg_cmds)
++ if (ret < cmd->vars.max_reg_cmds) {
+ clear_bit(ret, &cmd->vars.bitmask);
++ ent->idx = ret;
++ cmd->ent_arr[ent->idx] = ent;
++ }
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+ return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
+@@ -964,20 +967,33 @@ static void cmd_work_handler(struct work_struct *work)
+ bool poll_cmd = ent->polling;
+ struct mlx5_cmd_layout *lay;
+ struct mlx5_core_dev *dev;
+- unsigned long cb_timeout;
+- struct semaphore *sem;
++ unsigned long timeout;
+ unsigned long flags;
+ int alloc_ret;
+ int cmd_mode;
+
++ complete(&ent->handling);
++
+ dev = container_of(cmd, struct mlx5_core_dev, cmd);
+- cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
++ timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+
+- complete(&ent->handling);
+- sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
+- down(sem);
+ if (!ent->page_queue) {
+- alloc_ret = cmd_alloc_index(cmd);
++ if (down_timeout(&cmd->vars.sem, timeout)) {
++ mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
++ mlx5_command_str(ent->op), ent->op);
++ if (ent->callback) {
++ ent->callback(-EBUSY, ent->context);
++ mlx5_free_cmd_msg(dev, ent->out);
++ free_msg(dev, ent->in);
++ cmd_ent_put(ent);
++ } else {
++ ent->ret = -EBUSY;
++ complete(&ent->done);
++ }
++ complete(&ent->slotted);
++ return;
++ }
++ alloc_ret = cmd_alloc_index(cmd, ent);
+ if (alloc_ret < 0) {
+ mlx5_core_err_rl(dev, "failed to allocate command entry\n");
+ if (ent->callback) {
+@@ -989,18 +1005,20 @@ static void cmd_work_handler(struct work_struct *work)
+ ent->ret = -EAGAIN;
+ complete(&ent->done);
+ }
+- up(sem);
++ up(&cmd->vars.sem);
+ return;
+ }
+- ent->idx = alloc_ret;
+ } else {
++ down(&cmd->vars.pages_sem);
+ ent->idx = cmd->vars.max_reg_cmds;
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ clear_bit(ent->idx, &cmd->vars.bitmask);
++ cmd->ent_arr[ent->idx] = ent;
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ }
+
+- cmd->ent_arr[ent->idx] = ent;
++ complete(&ent->slotted);
++
+ lay = get_inst(cmd, ent->idx);
+ ent->lay = lay;
+ memset(lay, 0, sizeof(*lay));
+@@ -1019,7 +1037,7 @@ static void cmd_work_handler(struct work_struct *work)
+ ent->ts1 = ktime_get_ns();
+ cmd_mode = cmd->mode;
+
+- if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
++ if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
+ cmd_ent_get(ent);
+ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
+
+@@ -1139,6 +1157,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+ ent->ret = -ECANCELED;
+ goto out_err;
+ }
++
++ wait_for_completion(&ent->slotted);
++
+ if (cmd->mode == CMD_MODE_POLLING || ent->polling)
+ wait_for_completion(&ent->done);
+ else if (!wait_for_completion_timeout(&ent->done, timeout))
+@@ -1153,6 +1174,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+ } else if (err == -ECANCELED) {
+ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+ mlx5_command_str(ent->op), ent->op);
++ } else if (err == -EBUSY) {
++ mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
++ mlx5_command_str(ent->op), ent->op);
+ }
+ mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ err, deliv_status_to_str(ent->status), ent->status);
+@@ -1204,6 +1228,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ ent->polling = force_polling;
+
+ init_completion(&ent->handling);
++ init_completion(&ent->slotted);
+ if (!callback)
+ init_completion(&ent->done);
+
+@@ -1221,7 +1246,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ return 0; /* mlx5_cmd_comp_handler() will put(ent) */
+
+ err = wait_func(dev, ent);
+- if (err == -ETIMEDOUT || err == -ECANCELED)
++ if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
+ goto out_free;
+
+ ds = ent->ts2 - ent->ts1;
+@@ -1607,6 +1632,9 @@ static int cmd_comp_notifier(struct notifier_block *nb,
+ dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ eqe = data;
+
++ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
++ return NOTIFY_DONE;
++
+ mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
+
+ return NOTIFY_OK;
+@@ -1919,6 +1947,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ {
+ const char *namep = mlx5_command_str(opcode);
+ struct mlx5_cmd_stats *stats;
++ unsigned long flags;
+
+ if (!err || !(strcmp(namep, "unknown command opcode")))
+ return;
+@@ -1926,7 +1955,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ stats = xa_load(&dev->cmd.stats, opcode);
+ if (!stats)
+ return;
+- spin_lock_irq(&stats->lock);
++ spin_lock_irqsave(&stats->lock, flags);
+ stats->failed++;
+ if (err < 0)
+ stats->last_failed_errno = -err;
+@@ -1935,7 +1964,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ stats->last_failed_mbox_status = status;
+ stats->last_failed_syndrome = syndrome;
+ }
+- spin_unlock_irq(&stats->lock);
++ spin_unlock_irqrestore(&stats->lock, flags);
+ }
+
+ /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index af8460bb257b93..1bccb5633ab4be 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -168,6 +168,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ return -EOPNOTSUPP;
+ }
+
++ if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
++ !dev->priv.fw_reset) {
++ NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
++ return -EOPNOTSUPP;
++ }
++
+ if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
+ NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index ad789349c06e6b..85d3bfa0780c69 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -718,7 +718,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
+
+ while (block_timestamp > tracer->last_timestamp) {
+ /* Check block override if it's not the first block */
+- if (!tracer->last_timestamp) {
++ if (tracer->last_timestamp) {
+ u64 *ts_event;
+ /* To avoid block override be the HW in case of buffer
+ * wraparound, the time stamp of the previous block
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 86f2690c5e0156..20a6bc1a234f4e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -818,6 +818,7 @@ enum {
+ MLX5E_STATE_DESTROYING,
+ MLX5E_STATE_XDP_TX_ENABLED,
+ MLX5E_STATE_XDP_ACTIVE,
++ MLX5E_STATE_CHANNELS_ACTIVE,
+ };
+
+ struct mlx5e_modify_sq_param {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+index be83ad9db82a47..671adbad0a40f6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+@@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
++ ft->g = NULL;
+ kvfree(in);
+ return -ENOMEM;
+ }
+@@ -435,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in || !ft->g) {
+ kfree(ft->g);
++ ft->g = NULL;
+ kvfree(in);
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+index e097f336e1c4a0..30507b7c2fb179 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+@@ -1062,8 +1062,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ bool allow_swp;
+
+- allow_swp =
+- mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
++ allow_swp = mlx5_geneve_tx_allowed(mdev) ||
++ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
+ mlx5e_build_sq_param_common(mdev, param);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index bb11e644d24f7b..15d97c685ad332 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
+
+ WARN_ON_ONCE(tracker->inuse);
+ tracker->inuse = true;
+- spin_lock(&list->tracker_list_lock);
++ spin_lock_bh(&list->tracker_list_lock);
+ list_add_tail(&tracker->entry, &list->tracker_list_head);
+- spin_unlock(&list->tracker_list_lock);
++ spin_unlock_bh(&list->tracker_list_lock);
+ }
+
+ static void
+@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
+
+ WARN_ON_ONCE(!tracker->inuse);
+ tracker->inuse = false;
+- spin_lock(&list->tracker_list_lock);
++ spin_lock_bh(&list->tracker_list_lock);
+ list_del(&tracker->entry);
+- spin_unlock(&list->tracker_list_lock);
++ spin_unlock_bh(&list->tracker_list_lock);
+ }
+
+ void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
+@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
+ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
+ struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
+
+- spin_lock(&cqe_list->tracker_list_lock);
++ spin_lock_bh(&cqe_list->tracker_list_lock);
+ list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
+ struct sk_buff *skb =
+ mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
+@@ -170,13 +170,15 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
+ pos->inuse = false;
+ list_del(&pos->entry);
+ }
+- spin_unlock(&cqe_list->tracker_list_lock);
++ spin_unlock_bh(&cqe_list->tracker_list_lock);
+ }
+
+ #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
+ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ struct mlx5_cqe64 *cqe,
++ u8 *md_buff,
++ u8 *md_buff_sz,
+ int budget)
+ {
+ struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
+@@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
+ out:
+ napi_consume_skb(skb, budget);
+- mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
++ md_buff[(*md_buff_sz)++] = metadata_id;
+ if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
+ }
+
+-static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
++static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
+ {
+ struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
+- struct mlx5_cqwq *cqwq = &cq->wq;
++ int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
++ u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
++ u8 metadata_buff_sz = 0;
++ struct mlx5_cqwq *cqwq;
+ struct mlx5_cqe64 *cqe;
+ int work_done = 0;
+
++ cqwq = &cq->wq;
++
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
+ return false;
+
+@@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ do {
+ mlx5_cqwq_pop(cqwq);
+
+- mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
++ mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
++ metadata_buff, &metadata_buff_sz, napi_budget);
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+
+ mlx5_cqwq_update_db_record(cqwq);
+@@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
++ while (metadata_buff_sz > 0)
++ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
++ metadata_buff[--metadata_buff_sz]);
++
+ mlx5e_txqsq_wake(&ptpsq->txqsq);
+
+ return work_done == budget;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+index 7b700d0f956a88..b171cd8f11e04a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
+ }
+
+ static inline u8
++mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
++{
++ return fifo->data[fifo->mask & fifo->cc];
++}
++
++static inline void
+ mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
+ {
+- return fifo->data[fifo->mask & fifo->cc++];
++ fifo->cc++;
+ }
+
+ static inline void
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+index 244bc15a42abff..d9acc37afe1c86 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+@@ -82,24 +82,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+
+ txq_ix = mlx5e_qid_from_qos(chs, node_qid);
+
+- WARN_ON(node_qid > priv->htb_max_qos_sqs);
+- if (node_qid == priv->htb_max_qos_sqs) {
+- struct mlx5e_sq_stats *stats, **stats_list = NULL;
+-
+- if (priv->htb_max_qos_sqs == 0) {
+- stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+- sizeof(*stats_list),
+- GFP_KERNEL);
+- if (!stats_list)
+- return -ENOMEM;
+- }
++ WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
++ if (!priv->htb_qos_sq_stats) {
++ struct mlx5e_sq_stats **stats_list;
++
++ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
++ sizeof(*stats_list), GFP_KERNEL);
++ if (!stats_list)
++ return -ENOMEM;
++
++ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
++ }
++
++ if (!priv->htb_qos_sq_stats[node_qid]) {
++ struct mlx5e_sq_stats *stats;
++
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+- if (!stats) {
+- kvfree(stats_list);
++ if (!stats)
+ return -ENOMEM;
+- }
+- if (stats_list)
+- WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
++
+ WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
+ /* Order htb_max_qos_sqs increment after writing the array pointer.
+ * Pairs with smp_load_acquire in en_stats.c.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+index e8eea9ffd5eb62..03b119a434bc94 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+@@ -702,11 +702,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
+
+ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ {
+- char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_icosq *icosq = rq->icosq;
+ struct mlx5e_priv *priv = rq->priv;
+ struct mlx5e_err_ctx err_ctx = {};
++ char icosq_str[32] = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+@@ -715,7 +715,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ if (icosq)
+ snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
+ snprintf(err_str, sizeof(err_str),
+- "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
++ "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
+ rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index ff8242f67c5456..51a23345caa180 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -149,7 +149,9 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
+ return err;
+ }
+
++ mutex_lock(&priv->state_lock);
+ err = mlx5e_safe_reopen_channels(priv);
++ mutex_unlock(&priv->state_lock);
+ if (!err) {
+ to_ctx->status = 1; /* all channels recovered */
+ return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+index f675b1926340f9..f66bbc8464645e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
+
+ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+ {
++ mutex_lock(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ kvfree(selq->standby);
+@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
++ mutex_unlock(selq->state_lock);
+ }
+
+ void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+index f63402c480280c..1b418095b79a39 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+@@ -197,7 +197,7 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ }
+ esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
+ esw_attr->out_count++;
+- /* attr->dests[].rep is resolved when we handle encap */
++ /* attr->dests[].vport is resolved when we handle encap */
+
+ return 0;
+ }
+@@ -270,7 +270,8 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+
+ out_priv = netdev_priv(out_dev);
+ rpriv = out_priv->ppriv;
+- esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
++ esw_attr->dests[esw_attr->out_count].vport_valid = true;
++ esw_attr->dests[esw_attr->out_count].vport = rpriv->rep->vport;
+ esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+
+ esw_attr->out_count++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+index 4e923a2874aefe..b500cc2c9689d1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+
+ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
+- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
++ mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
+ err = -EOPNOTSUPP;
+ goto err_check;
+ }
+@@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5_flow_spec *spec;
+ int err;
+
++ if (IS_ERR(post_act))
++ return PTR_ERR(post_act);
++
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+@@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po
+ struct mlx5e_post_act_handle *handle;
+ int err;
+
++ if (IS_ERR(post_act))
++ return ERR_CAST(post_act);
++
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+index fadfa8b50bebeb..8c4e3ecef5901c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+@@ -920,6 +920,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+ err_mod_hdr:
++ *attr = *old_attr;
+ kfree(old_attr);
+ err_attr:
+ kvfree(spec);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index b10e40e1a9c141..f1d1e1542e81b2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -1064,7 +1064,8 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
+
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+- esw_attr->dests[out_index].rep = rpriv->rep;
++ esw_attr->dests[out_index].vport_valid = true;
++ esw_attr->dests[out_index].vport = rpriv->rep->vport;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+index d4239e3b3c88ef..11f724ad90dbfb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+@@ -23,6 +23,9 @@ struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify)
+ struct mlx5e_tir_builder *builder;
+
+ builder = kvzalloc(sizeof(*builder), GFP_KERNEL);
++ if (!builder)
++ return NULL;
++
+ builder->modify = modify;
+
+ return builder;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+index 8bed17d8fe5649..b723ff5e5249cf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+@@ -493,6 +493,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
+ dma_addr_t dma_addr = xdptxd->dma_addr;
+ u32 dma_len = xdptxd->len;
+ u16 ds_cnt, inline_hdr_sz;
++ unsigned int frags_size;
+ u8 num_wqebbs = 1;
+ int num_frags = 0;
+ bool inline_ok;
+@@ -503,8 +504,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
+
+ inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
+ dma_len >= MLX5E_XDP_MIN_INLINE;
++ frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0;
+
+- if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
++ if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) {
+ stats->err++;
+ return false;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index 36826b58248478..78739fe138ca4d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5_core_dev *mdev)
+ {
+- /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
+- if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
++ /* AF_XDP doesn't support frames larger than PAGE_SIZE,
++ * and xsk->chunk_size is limited to 65535 bytes.
++ */
++ if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
+ MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
+ return false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+index caa34b9c161e51..33e32584b07f57 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+@@ -102,8 +102,14 @@ static inline void
+ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
+ {
+ int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
++ struct udphdr *udphdr;
+
+- udp_hdr(skb)->len = htons(payload_len);
++ if (skb->encapsulation)
++ udphdr = (struct udphdr *)skb_inner_transport_header(skb);
++ else
++ udphdr = udp_hdr(skb);
++
++ udphdr->len = htons(payload_len);
+ }
+
+ struct mlx5e_accel_tx_state {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index 7d4ceb9b9c16fe..015faddabc8e09 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -67,7 +67,6 @@ static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
+ return;
+
+ spin_lock_bh(&x->lock);
+- xfrm_state_check_expire(x);
+ if (x->km.state == XFRM_STATE_EXPIRED) {
+ sa_entry->attrs.drop = true;
+ spin_unlock_bh(&x->lock);
+@@ -75,6 +74,13 @@ static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
+ mlx5e_accel_ipsec_fs_modify(sa_entry);
+ return;
+ }
++
++ if (x->km.state != XFRM_STATE_VALID) {
++ spin_unlock_bh(&x->lock);
++ return;
++ }
++
++ xfrm_state_check_expire(x);
+ spin_unlock_bh(&x->lock);
+
+ queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
+@@ -121,7 +127,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
+ if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
+ esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+
+- sa_entry->esn_state.esn = esn;
++ if (sa_entry->esn_state.esn_msb)
++ sa_entry->esn_state.esn = esn;
++ else
++ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
++ * the first packet sent using a given SA will contain a sequence
++ * number of 1.
++ */
++ sa_entry->esn_state.esn = max_t(u32, esn, 1);
+ sa_entry->esn_state.esn_msb = esn_msb;
+
+ if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
+@@ -329,15 +342,41 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ /* iv len */
+ aes_gcm->icv_len = x->aead->alg_icv_len;
+
++ attrs->dir = x->xso.dir;
++
+ /* esn */
+ if (x->props.flags & XFRM_STATE_ESN) {
+ attrs->replay_esn.trigger = true;
+ attrs->replay_esn.esn = sa_entry->esn_state.esn;
+ attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
+ attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
++ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
++ goto skip_replay_window;
++
++ switch (x->replay_esn->replay_window) {
++ case 32:
++ attrs->replay_esn.replay_window =
++ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
++ break;
++ case 64:
++ attrs->replay_esn.replay_window =
++ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
++ break;
++ case 128:
++ attrs->replay_esn.replay_window =
++ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
++ break;
++ case 256:
++ attrs->replay_esn.replay_window =
++ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
++ break;
++ default:
++ WARN_ON(true);
++ return;
++ }
+ }
+
+- attrs->dir = x->xso.dir;
++skip_replay_window:
+ /* spi */
+ attrs->spi = be32_to_cpu(x->id.spi);
+
+@@ -473,7 +512,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
+ return -EINVAL;
+ }
+
+- if (x->replay_esn && x->replay_esn->replay_window != 32 &&
++ if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
++ x->replay_esn->replay_window != 32 &&
+ x->replay_esn->replay_window != 64 &&
+ x->replay_esn->replay_window != 128 &&
+ x->replay_esn->replay_window != 256) {
+@@ -901,9 +941,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
+ return;
+
+ mlx5e_accel_ipsec_fs_cleanup(ipsec);
+- if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
++ if (ipsec->netevent_nb.notifier_call) {
+ unregister_netevent_notifier(&ipsec->netevent_nb);
+- if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
++ ipsec->netevent_nb.notifier_call = NULL;
++ }
++ if (ipsec->aso)
+ mlx5e_ipsec_aso_cleanup(ipsec);
+ destroy_workqueue(ipsec->wq);
+ kfree(ipsec);
+@@ -1012,6 +1054,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
+ }
+ }
+
++ if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
++ !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
++ NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+@@ -1107,14 +1155,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_free = mlx5e_xfrm_free_state,
+ .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
+-};
+-
+-static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
+- .xdo_dev_state_add = mlx5e_xfrm_add_state,
+- .xdo_dev_state_delete = mlx5e_xfrm_del_state,
+- .xdo_dev_state_free = mlx5e_xfrm_free_state,
+- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
+- .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
+
+ .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+ .xdo_dev_policy_add = mlx5e_xfrm_add_policy,
+@@ -1132,11 +1172,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
+
+ mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
+
+- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+- netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
+- else
+- netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+-
++ netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+ netdev->features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+index 7dba4221993f05..61288066830d94 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+@@ -128,63 +128,166 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
+ return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ }
+
+-static int ipsec_status_rule(struct mlx5_core_dev *mdev,
+- struct mlx5e_ipsec_rx *rx,
+- struct mlx5_flow_destination *dest)
++static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
++ struct mlx5e_ipsec_rx *rx)
+ {
+- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
++ mlx5_del_flow_rules(rx->status_drop.rule);
++ mlx5_destroy_flow_group(rx->status_drop.group);
++ mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
++}
++
++static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
++ struct mlx5e_ipsec_rx *rx)
++{
++ mlx5_del_flow_rules(rx->status.rule);
++
++ if (rx != ipsec->rx_esw)
++ return;
++
++#ifdef CONFIG_MLX5_ESWITCH
++ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
++#endif
++}
++
++static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
++ struct mlx5e_ipsec_rx *rx)
++{
++ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
++ struct mlx5_flow_table *ft = rx->ft.status;
++ struct mlx5_core_dev *mdev = ipsec->mdev;
++ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+- struct mlx5_modify_hdr *modify_hdr;
+- struct mlx5_flow_handle *fte;
++ struct mlx5_flow_handle *rule;
++ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+- int err;
++ struct mlx5_flow_group *g;
++ u32 *flow_group_in;
++ int err = 0;
+
++ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+- if (!spec)
+- return -ENOMEM;
++ if (!flow_group_in || !spec) {
++ err = -ENOMEM;
++ goto err_out;
++ }
+
+- /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
+- MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+- MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
+- MLX5_SET(copy_action_in, action, src_offset, 0);
+- MLX5_SET(copy_action_in, action, length, 7);
+- MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+- MLX5_SET(copy_action_in, action, dst_offset, 24);
++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
++ g = mlx5_create_flow_group(ft, flow_group_in);
++ if (IS_ERR(g)) {
++ err = PTR_ERR(g);
++ mlx5_core_err(mdev,
++ "Failed to add ipsec rx status drop flow group, err=%d\n", err);
++ goto err_out;
++ }
+
+- modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
+- 1, action);
++ flow_counter = mlx5_fc_create(mdev, false);
++ if (IS_ERR(flow_counter)) {
++ err = PTR_ERR(flow_counter);
++ mlx5_core_err(mdev,
++ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
++ goto err_cnt;
++ }
+
+- if (IS_ERR(modify_hdr)) {
+- err = PTR_ERR(modify_hdr);
++ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
++ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
++ dest.counter_id = mlx5_fc_id(flow_counter);
++ if (rx == ipsec->rx_esw)
++ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
++ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
++ if (IS_ERR(rule)) {
++ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+- "fail to alloc ipsec copy modify_header_id err=%d\n", err);
+- goto out_spec;
++ "Failed to add ipsec rx status drop rule, err=%d\n", err);
++ goto err_rule;
+ }
+
+- /* create fte */
+- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
++ rx->status_drop.group = g;
++ rx->status_drop.rule = rule;
++ rx->status_drop_cnt = flow_counter;
++
++ kvfree(flow_group_in);
++ kvfree(spec);
++ return 0;
++
++err_rule:
++ mlx5_fc_destroy(mdev, flow_counter);
++err_cnt:
++ mlx5_destroy_flow_group(g);
++err_out:
++ kvfree(flow_group_in);
++ kvfree(spec);
++ return err;
++}
++
++static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
++ struct mlx5e_ipsec_rx *rx,
++ struct mlx5_flow_destination *dest)
++{
++ struct mlx5_flow_act flow_act = {};
++ struct mlx5_flow_handle *rule;
++ struct mlx5_flow_spec *spec;
++ int err;
++
++ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
++ if (!spec)
++ return -ENOMEM;
++
++ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
++ misc_parameters_2.ipsec_syndrome);
++ MLX5_SET(fte_match_param, spec->match_value,
++ misc_parameters_2.ipsec_syndrome, 0);
++ if (rx == ipsec->rx_esw)
++ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
++ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
++ flow_act.flags = FLOW_ACT_NO_APPEND;
++ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+- flow_act.modify_hdr = modify_hdr;
+- fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+- if (IS_ERR(fte)) {
+- err = PTR_ERR(fte);
+- mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
+- goto out;
++ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
++ if (IS_ERR(rule)) {
++ err = PTR_ERR(rule);
++ mlx5_core_warn(ipsec->mdev,
++ "Failed to add ipsec rx status pass rule, err=%d\n", err);
++ goto err_rule;
+ }
+
++ rx->status.rule = rule;
+ kvfree(spec);
+- rx->status.rule = fte;
+- rx->status.modify_hdr = modify_hdr;
+ return 0;
+
+-out:
+- mlx5_modify_header_dealloc(mdev, modify_hdr);
+-out_spec:
++err_rule:
+ kvfree(spec);
+ return err;
+ }
+
++static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
++ struct mlx5e_ipsec_rx *rx)
++{
++ ipsec_rx_status_pass_destroy(ipsec, rx);
++ ipsec_rx_status_drop_destroy(ipsec, rx);
++}
++
++static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
++ struct mlx5e_ipsec_rx *rx,
++ struct mlx5_flow_destination *dest)
++{
++ int err;
++
++ err = ipsec_rx_status_drop_create(ipsec, rx);
++ if (err)
++ return err;
++
++ err = ipsec_rx_status_pass_create(ipsec, rx, dest);
++ if (err)
++ goto err_pass_create;
++
++ return 0;
++
++err_pass_create:
++ ipsec_rx_status_drop_destroy(ipsec, rx);
++ return err;
++}
++
+ static int ipsec_miss_create(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_table *ft,
+ struct mlx5e_ipsec_miss *miss,
+@@ -256,12 +359,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ mlx5_destroy_flow_table(rx->ft.sa);
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
+- if (rx == ipsec->rx_esw) {
+- mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
+- } else {
+- mlx5_del_flow_rules(rx->status.rule);
+- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+- }
++ mlx5_ipsec_rx_status_destroy(ipsec, rx);
+ mlx5_destroy_flow_table(rx->ft.status);
+
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
+@@ -351,10 +449,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+- if (rx == ipsec->rx_esw)
+- err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
+- else
+- err = ipsec_status_rule(mdev, rx, dest);
++ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
+ if (err)
+ goto err_add;
+
+@@ -417,8 +512,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ err_fs_ft:
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
+- mlx5_del_flow_rules(rx->status.rule);
+- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
++ mlx5_ipsec_rx_status_destroy(ipsec, rx);
+ err_add:
+ mlx5_destroy_flow_table(rx->ft.status);
+ err_fs_ft_status:
+@@ -879,13 +973,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec)
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
+ }
+
+-static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
++static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
+ {
+ /* SPI number */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
+- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
+- MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
++ if (encap) {
++ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
++ misc_parameters.inner_esp_spi);
++ MLX5_SET(fte_match_param, spec->match_value,
++ misc_parameters.inner_esp_spi, spi);
++ } else {
++ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
++ misc_parameters.outer_esp_spi);
++ MLX5_SET(fte_match_param, spec->match_value,
++ misc_parameters.outer_esp_spi, spi);
++ }
+ }
+
+ static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
+@@ -1244,8 +1347,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+- setup_fte_spi(spec, attrs->spi);
+- setup_fte_esp(spec);
++ setup_fte_spi(spec, attrs->spi, attrs->encap);
++ if (!attrs->encap)
++ setup_fte_esp(spec);
+ setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
+
+@@ -1348,7 +1452,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+
+ switch (attrs->type) {
+ case XFRM_DEV_OFFLOAD_CRYPTO:
+- setup_fte_spi(spec, attrs->spi);
++ setup_fte_spi(spec, attrs->spi, false);
+ setup_fte_esp(spec);
+ setup_fte_reg_a(spec);
+ break;
+@@ -1729,8 +1833,11 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int err = 0;
+
+- if (esw)
+- down_write(&esw->mode_lock);
++ if (esw) {
++ err = mlx5_esw_lock(esw);
++ if (err)
++ return err;
++ }
+
+ if (mdev->num_block_ipsec) {
+ err = -EBUSY;
+@@ -1741,7 +1848,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
+
+ unlock:
+ if (esw)
+- up_write(&esw->mode_lock);
++ mlx5_esw_unlock(esw);
+
+ return err;
+ }
+@@ -1758,7 +1865,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
+
+ static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
+ {
+- mdev->num_block_tc++;
++ mdev->num_block_tc--;
+ }
+
+ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+index 3245d1c9d53929..de83567aae7913 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+@@ -5,6 +5,8 @@
+ #include "en.h"
+ #include "ipsec.h"
+ #include "lib/crypto.h"
++#include "fs_core.h"
++#include "eswitch.h"
+
+ enum {
+ MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+@@ -37,7 +39,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+ MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
+ caps |= MLX5_IPSEC_CAP_CRYPTO;
+
+- if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
++ if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
++ (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
++ (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
++ is_mdev_legacy_mode(mdev)))) {
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+ reformat_add_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+@@ -45,9 +50,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
+ caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+
+- if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
+- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
++ if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
++ ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
++ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
++ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
+ caps |= MLX5_IPSEC_CAP_PRIO;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+@@ -94,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
+
+ if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
+ MLX5_SET(ipsec_aso, aso_ctx, window_sz,
+- attrs->replay_esn.replay_window / 64);
++ attrs->replay_esn.replay_window);
+ MLX5_SET(ipsec_aso, aso_ctx, mode,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION);
+ }
+@@ -558,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
+ dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
+ DMA_BIDIRECTIONAL);
+ kfree(aso);
++ ipsec->aso = NULL;
+ }
+
+ static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+index 9ee014a8ad24a8..ff59c6adbb9634 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+@@ -99,18 +99,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
+ if (!x || !x->xso.offload_handle)
+ goto out_disable;
+
+- if (xo->inner_ipproto) {
+- /* Cannot support tunnel packet over IPsec tunnel mode
+- * because we cannot offload three IP header csum
+- */
+- if (x->props.mode == XFRM_MODE_TUNNEL)
+- goto out_disable;
+-
+- /* Only support UDP or TCP L4 checksum */
+- if (xo->inner_ipproto != IPPROTO_UDP &&
+- xo->inner_ipproto != IPPROTO_TCP)
+- goto out_disable;
+- }
++ /* Only support UDP or TCP L4 checksum */
++ if (xo->inner_ipproto &&
++ xo->inner_ipproto != IPPROTO_UDP &&
++ xo->inner_ipproto != IPPROTO_TCP)
++ goto out_disable;
+
+ return features;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index d4ebd874311457..cc9bcc42003242 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ }
+
+-static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+- struct mlx5e_macsec_sa *sa,
+- bool is_tx, struct net_device *netdev, u32 fs_id)
++static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
++ struct mlx5e_macsec_sa *sa, bool is_tx,
++ struct net_device *netdev, u32 fs_id)
+ {
+ int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+
+ mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
+ fs_id);
+- mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+ sa->macsec_rule = NULL;
+ }
+
++static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
++ struct mlx5e_macsec_sa *sa, bool is_tx,
++ struct net_device *netdev, u32 fs_id)
++{
++ mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
++ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
++}
++
++static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
++ struct mlx5e_macsec_sa *sa, bool encrypt,
++ bool is_tx, u32 *fs_id)
++{
++ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
++ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
++ struct mlx5_macsec_rule_attrs rule_attrs;
++ union mlx5_macsec_rule *macsec_rule;
++
++ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
++ rule_attrs.sci = sa->sci;
++ rule_attrs.assoc_num = sa->assoc_num;
++ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
++ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
++
++ macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
++ if (!macsec_rule)
++ return -ENOMEM;
++
++ sa->macsec_rule = macsec_rule;
++
++ return 0;
++}
++
+ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa,
+ bool encrypt, bool is_tx, u32 *fs_id)
+ {
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5e_macsec *macsec = priv->macsec;
+- struct mlx5_macsec_rule_attrs rule_attrs;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_macsec_obj_attrs obj_attrs;
+- union mlx5_macsec_rule *macsec_rule;
+ int err;
+
+ obj_attrs.next_pn = sa->next_pn;
+@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
+ if (err)
+ return err;
+
+- rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+- rule_attrs.sci = sa->sci;
+- rule_attrs.assoc_num = sa->assoc_num;
+- rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+- MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+-
+- macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
+- if (!macsec_rule) {
+- err = -ENOMEM;
+- goto destroy_macsec_object;
++ if (sa->active) {
++ err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
++ if (err)
++ goto destroy_macsec_object;
+ }
+
+- sa->macsec_rule = macsec_rule;
+-
+ return 0;
+
+ destroy_macsec_object:
+@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
+ goto destroy_sa;
+
+ macsec_device->tx_sa[assoc_num] = tx_sa;
+- if (!secy->operational ||
+- assoc_num != tx_sc->encoding_sa ||
+- !tx_sa->active)
++ if (!secy->operational)
+ goto out;
+
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
+ goto out;
+
+ if (ctx_tx_sa->active) {
+- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
++ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ if (err)
+ goto out;
+ } else {
+@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
+ goto out;
+ }
+
+- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
++ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ }
+ out:
+ mutex_unlock(&macsec->lock);
+@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
+ goto out;
+ }
+
+- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+- rx_sc->sc_xarray_element->fs_id);
++ if (rx_sa->active)
++ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
++ rx_sc->sc_xarray_element->fs_id);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[assoc_num] = NULL;
+@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
+ if (!rx_sa || !rx_sa->macsec_rule)
+ continue;
+
+- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+- rx_sc->sc_xarray_element->fs_id);
++ mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
++ rx_sc->sc_xarray_element->fs_id);
+ }
+ }
+
+@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
+ continue;
+
+ if (rx_sa->active) {
+- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
+- &rx_sc->sc_xarray_element->fs_id);
++ err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
++ &rx_sc->sc_xarray_element->fs_id);
+ if (err)
+ goto out;
+ }
+@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
+ if (!tx_sa)
+ continue;
+
+- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
++ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
+ continue;
+
+ if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
+- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
++ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ if (err)
+ goto out;
+ }
+@@ -1620,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
+ .mdo_add_secy = mlx5e_macsec_add_secy,
+ .mdo_upd_secy = mlx5e_macsec_upd_secy,
+ .mdo_del_secy = mlx5e_macsec_del_secy,
++ .rx_uses_md_dst = true,
+ };
+
+ bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+index bb7f86c993e557..415fec7763bd26 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+@@ -45,6 +45,10 @@ struct arfs_table {
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+ };
+
++enum {
++ MLX5E_ARFS_STATE_ENABLED,
++};
++
+ enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+@@ -59,6 +63,7 @@ struct mlx5e_arfs_tables {
+ spinlock_t arfs_lock;
+ int last_filter_id;
+ struct workqueue_struct *wq;
++ unsigned long state;
+ };
+
+ struct arfs_tuple {
+@@ -169,6 +174,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
+ return err;
+ }
+ }
++ set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
++
+ return 0;
+ }
+
+@@ -254,11 +261,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+
+ ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+ sizeof(*ft->g), GFP_KERNEL);
+- in = kvzalloc(inlen, GFP_KERNEL);
+- if (!in || !ft->g) {
+- kfree(ft->g);
+- kvfree(in);
++ if (!ft->g)
+ return -ENOMEM;
++
++ in = kvzalloc(inlen, GFP_KERNEL);
++ if (!in) {
++ err = -ENOMEM;
++ goto err_free_g;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+@@ -278,7 +287,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ break;
+ default:
+ err = -EINVAL;
+- goto out;
++ goto err_free_in;
+ }
+
+ switch (type) {
+@@ -300,7 +309,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ break;
+ default:
+ err = -EINVAL;
+- goto out;
++ goto err_free_in;
+ }
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+@@ -309,7 +318,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+- goto err;
++ goto err_clean_group;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+@@ -318,18 +327,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+- goto err;
++ goto err_clean_group;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+-err:
++err_clean_group:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+-out:
++err_free_in:
+ kvfree(in);
+-
++err_free_g:
++ kfree(ft->g);
++ ft->g = NULL;
+ return err;
+ }
+
+@@ -450,6 +461,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
+ int i;
+ int j;
+
++ clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
++
+ spin_lock_bh(&arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
+ hlist_del_init(&rule->hlist);
+@@ -622,17 +635,8 @@ static void arfs_handle_work(struct work_struct *work)
+ struct mlx5_flow_handle *rule;
+
+ arfs = mlx5e_fs_get_arfs(priv->fs);
+- mutex_lock(&priv->state_lock);
+- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+- spin_lock_bh(&arfs->arfs_lock);
+- hlist_del(&arfs_rule->hlist);
+- spin_unlock_bh(&arfs->arfs_lock);
+-
+- mutex_unlock(&priv->state_lock);
+- kfree(arfs_rule);
+- goto out;
+- }
+- mutex_unlock(&priv->state_lock);
++ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
++ return;
+
+ if (!arfs_rule->rule) {
+ rule = arfs_add_rule(priv, arfs_rule);
+@@ -748,6 +752,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ return -EPROTONOSUPPORT;
+
+ spin_lock_bh(&arfs->arfs_lock);
++ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
++ spin_unlock_bh(&arfs->arfs_lock);
++ return -EPERM;
++ }
++
+ arfs_rule = arfs_find_rule(arfs_t, &fk);
+ if (arfs_rule) {
+ if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index dff02434ff458a..54379297a7489e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo)
+ {
+ struct mlx5_core_dev *mdev = priv->mdev;
++ int count;
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+- "%d.%d.%04d (%.16s)",
+- fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
+- mdev->board_id);
++ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++ if (count >= sizeof(drvinfo->fw_version))
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev));
++
+ strscpy(drvinfo->bus_info, dev_name(mdev->device),
+ sizeof(drvinfo->bus_info));
+ }
+@@ -131,6 +136,10 @@ void mlx5e_build_ptys2ethtool_map(void)
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy,
++ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy,
++ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
+@@ -196,6 +205,12 @@ void mlx5e_build_ptys2ethtool_map(void)
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_8_400GBASE_CR8, ext,
++ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
++ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
++ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
++ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
++ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext,
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+@@ -1218,7 +1233,12 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ if (!an_changes && link_modes == eproto.admin)
+ goto out;
+
+- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
++ err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
++ if (err) {
++ netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
++ goto out;
++ }
++
+ mlx5_toggle_port_link(mdev);
+
+ out:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index 3eccdadc035781..773624bb2c5d54 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -734,7 +734,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ if (num_tuples <= 0) {
+ netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
+ __func__, num_tuples);
+- return num_tuples;
++ return num_tuples < 0 ? num_tuples : -EINVAL;
+ }
+
+ eth_ft = get_flow_table(priv, fs, num_tuples);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index acb40770cf0cf7..a65c407aa60bdf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2668,6 +2668,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
+ {
+ int i;
+
++ ASSERT_RTNL();
+ if (chs->ptp) {
+ mlx5e_ptp_close(chs->ptp);
+ chs->ptp = NULL;
+@@ -2945,17 +2946,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
+ if (mlx5e_is_vport_rep(priv))
+ mlx5e_rep_activate_channels(priv);
+
++ set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
++
+ mlx5e_wait_channels_min_rx_wqes(&priv->channels);
+
+ if (priv->rx_res)
+ mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
+ }
+
++static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
++{
++ WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
++ if (current_work() != &priv->tx_timeout_work)
++ cancel_work_sync(&priv->tx_timeout_work);
++}
++
+ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
+ {
+ if (priv->rx_res)
+ mlx5e_rx_res_channels_deactivate(priv->rx_res);
+
++ clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
++ mlx5e_cancel_tx_timeout_work(priv);
++
+ if (mlx5e_is_vport_rep(priv))
+ mlx5e_rep_deactivate_channels(priv);
+
+@@ -3743,7 +3756,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ mlx5e_fold_sw_stats64(priv, stats);
+ }
+
+- stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
++ stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+
+ stats->rx_length_errors =
+ PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+@@ -4691,7 +4704,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
+
+ /* Verify if UDP port is being offloaded by HW */
+ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
+- return features;
++ return vxlan_features_check(skb, features);
+
+ #if IS_ENABLED(CONFIG_GENEVE)
+ /* Support Geneve offload for default UDP port */
+@@ -4717,7 +4730,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ features = vlan_features_check(skb, features);
+- features = vxlan_features_check(skb, features);
+
+ /* Validate if the tunneled packet is being offloaded by HW */
+ if (skb->encapsulation &&
+@@ -4734,8 +4746,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
+ struct net_device *netdev = priv->netdev;
+ int i;
+
+- rtnl_lock();
+- mutex_lock(&priv->state_lock);
++ /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
++ * through this flow. However, channel closing flows have to wait for
++ * this work to finish while holding rtnl lock too. So either get the
++ * lock or find that channels are being closed for other reason and
++ * this work is not relevant anymore.
++ */
++ while (!rtnl_trylock()) {
++ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
++ return;
++ msleep(20);
++ }
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+@@ -4754,7 +4775,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
+ }
+
+ unlock:
+- mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+ }
+
+@@ -5673,15 +5693,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
+ kfree(priv->tx_rates);
+ kfree(priv->txq2sq);
+ destroy_workqueue(priv->wq);
+- mutex_lock(&priv->state_lock);
+ mlx5e_selq_cleanup(&priv->selq);
+- mutex_unlock(&priv->state_lock);
+ free_cpumask_var(priv->scratchpad.cpumask);
+
+ for (i = 0; i < priv->htb_max_qos_sqs; i++)
+ kfree(priv->htb_qos_sq_stats[i]);
+ kvfree(priv->htb_qos_sq_stats);
+
++ if (priv->mqprio_rl) {
++ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
++ mlx5e_mqprio_rl_free(priv->mqprio_rl);
++ }
++
+ memset(priv, 0, sizeof(*priv));
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index fd1cce542b680f..751d3ffcd2f6ce 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
++ int count;
+
+ strscpy(drvinfo->driver, mlx5e_rep_driver_name,
+ sizeof(drvinfo->driver));
+- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+- "%d.%d.%04d (%.16s)",
+- fw_rev_maj(mdev), fw_rev_min(mdev),
+- fw_rev_sub(mdev), mdev->board_id);
++ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++ if (count >= sizeof(drvinfo->fw_version))
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev));
+ }
+
+ static const struct counter_desc sw_rep_stats_desc[] = {
+@@ -1499,7 +1503,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
+ rpriv->rep->vport);
+- if (dl_port) {
++ if (!IS_ERR(dl_port)) {
+ SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
+ mlx5e_rep_vnic_reporter_create(priv, dl_port);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 8d9743a5e42c7c..57b0e26696e306 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -2369,11 +2369,15 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
+ if (flush)
+ mlx5e_shampo_flush_skb(rq, cqe, match);
+ free_hd_entry:
+- mlx5e_free_rx_shampo_hd_entry(rq, header_index);
++ if (likely(head_size))
++ mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+ mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ return;
+
++ if (unlikely(!cstrides))
++ return;
++
+ wq = &rq->mpwqe.wq;
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index c8590483ddc64b..dc9b157a449935 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
+ struct mlx5e_flow_meter_handle *meter;
+ enum mlx5e_post_meter_type type;
+
++ if (IS_ERR(post_act))
++ return PTR_ERR(post_act);
++
+ meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
+ if (IS_ERR(meter)) {
+ mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
+@@ -2009,9 +2012,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
+ list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
+ if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
+ continue;
++
++ list_del(&peer_flow->peer_flows);
+ if (refcount_dec_and_test(&peer_flow->refcnt)) {
+ mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
+- list_del(&peer_flow->peer_flows);
+ kfree(peer_flow);
+ }
+ }
+@@ -3145,7 +3149,7 @@ static struct mlx5_fields fields[] = {
+ OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
+ OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+- OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
++ OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
+
+ OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
+ OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
+@@ -3156,21 +3160,31 @@ static struct mlx5_fields fields[] = {
+ OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
+ };
+
+-static unsigned long mask_to_le(unsigned long mask, int size)
++static u32 mask_field_get(void *mask, struct mlx5_fields *f)
+ {
+- __be32 mask_be32;
+- __be16 mask_be16;
+-
+- if (size == 32) {
+- mask_be32 = (__force __be32)(mask);
+- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+- } else if (size == 16) {
+- mask_be32 = (__force __be32)(mask);
+- mask_be16 = *(__be16 *)&mask_be32;
+- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
++ switch (f->field_bsize) {
++ case 32:
++ return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
++ case 16:
++ return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
++ default:
++ return *(u8 *)mask & (u8)f->field_mask;
+ }
++}
+
+- return mask;
++static void mask_field_clear(void *mask, struct mlx5_fields *f)
++{
++ switch (f->field_bsize) {
++ case 32:
++ *(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
++ break;
++ case 16:
++ *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
++ break;
++ default:
++ *(u8 *)mask &= ~(u8)f->field_mask;
++ break;
++ }
+ }
+
+ static int offload_pedit_fields(struct mlx5e_priv *priv,
+@@ -3182,11 +3196,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
+ void *headers_c, *headers_v, *action, *vals_p;
+- u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
+- unsigned long mask, field_mask;
++ void *s_masks_p, *a_masks_p;
+ int i, first, last, next_z;
+ struct mlx5_fields *f;
++ unsigned long mask;
++ u32 s_mask, a_mask;
+ u8 cmd;
+
+ mod_acts = &parse_attr->mod_hdr_acts;
+@@ -3202,15 +3217,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ bool skip;
+
+ f = &fields[i];
+- /* avoid seeing bits set from previous iterations */
+- s_mask = 0;
+- a_mask = 0;
+-
+ s_masks_p = (void *)set_masks + f->offset;
+ a_masks_p = (void *)add_masks + f->offset;
+
+- s_mask = *s_masks_p & f->field_mask;
+- a_mask = *a_masks_p & f->field_mask;
++ s_mask = mask_field_get(s_masks_p, f);
++ a_mask = mask_field_get(a_masks_p, f);
+
+ if (!s_mask && !a_mask) /* nothing to offload here */
+ continue;
+@@ -3237,22 +3248,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ match_mask, f->field_bsize))
+ skip = true;
+ /* clear to denote we consumed this field */
+- *s_masks_p &= ~f->field_mask;
++ mask_field_clear(s_masks_p, f);
+ } else {
+ cmd = MLX5_ACTION_TYPE_ADD;
+ mask = a_mask;
+ vals_p = (void *)add_vals + f->offset;
+ /* add 0 is no change */
+- if ((*(u32 *)vals_p & f->field_mask) == 0)
++ if (!mask_field_get(vals_p, f))
+ skip = true;
+ /* clear to denote we consumed this field */
+- *a_masks_p &= ~f->field_mask;
++ mask_field_clear(a_masks_p, f);
+ }
+ if (skip)
+ continue;
+
+- mask = mask_to_le(mask, f->field_bsize);
+-
+ first = find_first_bit(&mask, f->field_bsize);
+ next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+ last = find_last_bit(&mask, f->field_bsize);
+@@ -3279,10 +3288,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ MLX5_SET(set_action_in, action, field, f->field);
+
+ if (cmd == MLX5_ACTION_TYPE_SET) {
++ unsigned long field_mask = f->field_mask;
+ int start;
+
+- field_mask = mask_to_le(f->field_mask, f->field_bsize);
+-
+ /* if field is bit sized it can start not from first bit */
+ start = find_first_bit(&field_mask, f->field_bsize);
+
+@@ -3732,6 +3740,20 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
+ return err;
+ }
+
++static int
++set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr)
++{
++ struct mlx5e_post_act *post_act = get_post_action(priv);
++
++ if (IS_ERR(post_act))
++ return PTR_ERR(post_act);
++
++ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
++ attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
++
++ return 0;
++}
++
+ static int
+ alloc_branch_attr(struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_act_branch_ctrl *cond,
+@@ -3755,8 +3777,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
+ break;
+ case FLOW_ACTION_ACCEPT:
+ case FLOW_ACTION_PIPE:
+- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+- attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
++ err = set_branch_dest_ft(flow->priv, attr);
++ if (err)
++ goto out_err;
+ break;
+ case FLOW_ACTION_JUMP:
+ if (*jump_count) {
+@@ -3765,8 +3788,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
+ goto out_err;
+ }
+ *jump_count = cond->extval;
+- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+- attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
++ err = set_branch_dest_ft(flow->priv, attr);
++ if (err)
++ goto out_err;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+@@ -5713,8 +5737,10 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a
+
+ esw = priv->mdev->priv.eswitch;
+ attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
+- if (IS_ERR(attr->act_id_restore_rule))
++ if (IS_ERR(attr->act_id_restore_rule)) {
++ err = PTR_ERR(attr->act_id_restore_rule);
+ goto err_rule;
++ }
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index d41435c22ce56f..85d6334308e318 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
+
+ *hopbyhop = 0;
+ if (skb->encapsulation) {
+- ihs = skb_inner_tcp_all_headers(skb);
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
++ ihs = skb_inner_transport_offset(skb) +
++ sizeof(struct udphdr);
++ else
++ ihs = skb_inner_tcp_all_headers(skb);
+ stats->tso_inner_packets++;
+ stats->tso_inner_bytes += skb->len - ihs;
+ } else {
+@@ -398,10 +402,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+
++ mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
++
+ mlx5e_skb_cb_hwtstamp_init(skb);
+- mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
+ mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
+ metadata_index);
++ /* ensure skb is put on metadata_map before tracking the index */
++ wmb();
++ mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
+ if (!netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
+ netif_tx_stop_queue(sq->txq);
+@@ -495,9 +503,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ err_drop:
+ stats->dropped++;
+ dev_kfree_skb_any(skb);
+- if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+- mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
+- be32_to_cpu(eseg->flow_table_metadata));
+ mlx5e_tx_flush(sq);
+ }
+
+@@ -637,7 +642,6 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ return;
+
+ err_unmap:
+- mlx5e_dma_unmap_wqe_err(sq, 1);
+ sq->stats->dropped++;
+ dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
+@@ -655,7 +659,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+ {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ eseg->flow_table_metadata =
+- cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
++ cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
+ }
+
+ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index ea0405e0a43fac..40a6cb052a2da3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
+ {
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq *irq;
++ int cpu;
+
+ irq = xa_load(&table->comp_irqs, vecidx);
+ if (!irq)
+ return;
+
++ cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
++ cpumask_clear_cpu(cpu, &table->used_cpus);
+ xa_erase(&table->comp_irqs, vecidx);
+ mlx5_irq_affinity_irq_release(dev, irq);
+ }
+@@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
+ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
+ {
+ struct mlx5_eq_table *table = dev->priv.eq_table;
++ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
++ struct irq_affinity_desc af_desc = {};
+ struct mlx5_irq *irq;
+
+- irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
+- if (IS_ERR(irq)) {
+- /* In case SF irq pool does not exist, fallback to the PF irqs*/
+- if (PTR_ERR(irq) == -ENOENT)
+- return comp_irq_request_pci(dev, vecidx);
++ /* In case SF irq pool does not exist, fallback to the PF irqs*/
++ if (!mlx5_irq_pool_is_sf_pool(pool))
++ return comp_irq_request_pci(dev, vecidx);
+
++ af_desc.is_managed = 1;
++ cpumask_copy(&af_desc.mask, cpu_online_mask);
++ cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
++ irq = mlx5_irq_affinity_request(pool, &af_desc);
++ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+- }
++
++ cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
++ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
++ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
++ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
++ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+
+ return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+index 50d2ea32397982..a436ce895e45a6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+@@ -6,6 +6,9 @@
+ #include "helper.h"
+ #include "ofld.h"
+
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
++
+ static bool
+ esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
+@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
+ {
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
++ bool created = false;
+ int err = 0;
+
++ if (!vport->ingress.acl) {
++ err = acl_ingress_ofld_setup(esw, vport);
++ if (err)
++ return err;
++ created = true;
++ }
++
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_act.fg = vport->ingress.offloads.drop_grp;
+ flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+- goto out;
++ goto err_out;
+ }
+
+ vport->ingress.offloads.drop_rule = flow_rule;
+-out:
++
++ return 0;
++err_out:
++ /* Only destroy ingress acl created in this function. */
++ if (created)
++ esw_acl_ingress_ofld_cleanup(esw, vport);
+ return err;
+ }
+
+@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
+ }
+ }
+
+-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+- struct mlx5_vport *vport)
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+ {
+ int num_ftes = 0;
+ int err;
+
+- if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+- !esw_acl_ingress_prio_tag_enabled(esw, vport))
+- return 0;
+-
+ esw_acl_ingress_allow_rule_destroy(vport);
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+@@ -347,6 +359,15 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+ return err;
+ }
+
++int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
++{
++ if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
++ !esw_acl_ingress_prio_tag_enabled(esw, vport))
++ return 0;
++
++ return acl_ingress_ofld_setup(esw, vport);
++}
++
+ void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+index 1b9bc32efd6fa9..c5ea1d1d2b035c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+@@ -1874,7 +1874,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
+ "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ addr, vid, vport_num);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+- "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
++ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ addr, vid, vport_num);
+ return -EINVAL;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+index 7a01714b378003..22dd30cf8033f9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+@@ -78,9 +78,12 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
+ xa_for_each(&entry->ports, idx, port) {
+ dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dests[i].ft = port->mcast.ft;
++ if (port->vport_num == MLX5_VPORT_UPLINK)
++ dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT;
+ i++;
+ }
+
++ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
+ ether_addr_copy(dmac_v, entry->key.addr);
+@@ -585,10 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+- if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
+- port->vport_num == MLX5_VPORT_UPLINK)
+- rule_spec->flow_context.flow_source =
+- MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
++ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+@@ -660,15 +660,11 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+- if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
+- port->vport_num == MLX5_VPORT_UPLINK)
+- rule_spec->flow_context.flow_source =
+- MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+-
+ if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
+ dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ dest.vport.vhca_id = port->esw_owner_vhca_id;
+ }
++ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
+ handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+index 095f31f380fa3a..13b5916b64e224 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+@@ -21,158 +21,6 @@ enum {
+ MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
+ };
+
+-static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx)
+-{
+- mlx5_del_flow_rules(rx->status_drop.rule);
+- mlx5_destroy_flow_group(rx->status_drop.group);
+- mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
+-}
+-
+-static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx)
+-{
+- mlx5_del_flow_rules(rx->status.rule);
+- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+-}
+-
+-static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx)
+-{
+- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+- struct mlx5_flow_table *ft = rx->ft.status;
+- struct mlx5_core_dev *mdev = ipsec->mdev;
+- struct mlx5_flow_destination dest = {};
+- struct mlx5_flow_act flow_act = {};
+- struct mlx5_flow_handle *rule;
+- struct mlx5_fc *flow_counter;
+- struct mlx5_flow_spec *spec;
+- struct mlx5_flow_group *g;
+- u32 *flow_group_in;
+- int err = 0;
+-
+- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+- if (!flow_group_in || !spec) {
+- err = -ENOMEM;
+- goto err_out;
+- }
+-
+- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+- g = mlx5_create_flow_group(ft, flow_group_in);
+- if (IS_ERR(g)) {
+- err = PTR_ERR(g);
+- mlx5_core_err(mdev,
+- "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+- goto err_out;
+- }
+-
+- flow_counter = mlx5_fc_create(mdev, false);
+- if (IS_ERR(flow_counter)) {
+- err = PTR_ERR(flow_counter);
+- mlx5_core_err(mdev,
+- "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+- goto err_cnt;
+- }
+-
+- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+- dest.counter_id = mlx5_fc_id(flow_counter);
+- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+- if (IS_ERR(rule)) {
+- err = PTR_ERR(rule);
+- mlx5_core_err(mdev,
+- "Failed to add ipsec rx status drop rule, err=%d\n", err);
+- goto err_rule;
+- }
+-
+- rx->status_drop.group = g;
+- rx->status_drop.rule = rule;
+- rx->status_drop_cnt = flow_counter;
+-
+- kvfree(flow_group_in);
+- kvfree(spec);
+- return 0;
+-
+-err_rule:
+- mlx5_fc_destroy(mdev, flow_counter);
+-err_cnt:
+- mlx5_destroy_flow_group(g);
+-err_out:
+- kvfree(flow_group_in);
+- kvfree(spec);
+- return err;
+-}
+-
+-static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx,
+- struct mlx5_flow_destination *dest)
+-{
+- struct mlx5_flow_act flow_act = {};
+- struct mlx5_flow_handle *rule;
+- struct mlx5_flow_spec *spec;
+- int err;
+-
+- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+- if (!spec)
+- return -ENOMEM;
+-
+- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+- misc_parameters_2.ipsec_syndrome);
+- MLX5_SET(fte_match_param, spec->match_value,
+- misc_parameters_2.ipsec_syndrome, 0);
+- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+- flow_act.flags = FLOW_ACT_NO_APPEND;
+- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+- rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+- if (IS_ERR(rule)) {
+- err = PTR_ERR(rule);
+- mlx5_core_warn(ipsec->mdev,
+- "Failed to add ipsec rx status pass rule, err=%d\n", err);
+- goto err_rule;
+- }
+-
+- rx->status.rule = rule;
+- kvfree(spec);
+- return 0;
+-
+-err_rule:
+- kvfree(spec);
+- return err;
+-}
+-
+-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx)
+-{
+- esw_ipsec_rx_status_pass_destroy(ipsec, rx);
+- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+-}
+-
+-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx,
+- struct mlx5_flow_destination *dest)
+-{
+- int err;
+-
+- err = esw_ipsec_rx_status_drop_create(ipsec, rx);
+- if (err)
+- return err;
+-
+- err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
+- if (err)
+- goto err_pass_create;
+-
+- return 0;
+-
+-err_pass_create:
+- esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+- return err;
+-}
+-
+ void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+index 0c90f7a8b0d32c..ac9c65b89166e6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+@@ -8,11 +8,6 @@ struct mlx5e_ipsec;
+ struct mlx5e_ipsec_sa_entry;
+
+ #ifdef CONFIG_MLX5_ESWITCH
+-void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx);
+-int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx,
+- struct mlx5_flow_destination *dest);
+ void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr);
+ int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+@@ -26,16 +21,6 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx_create_attr *attr);
+ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+ #else
+-static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx) {}
+-
+-static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+- struct mlx5e_ipsec_rx *rx,
+- struct mlx5_flow_destination *dest)
+-{
+- return -EINVAL;
+-}
+-
+ static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr) {}
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+index 255bc8b749f9a5..8587cd572da536 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+@@ -319,7 +319,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
+ return -EPERM;
+
+ mutex_lock(&esw->state_lock);
+- if (esw->mode != MLX5_ESWITCH_LEGACY) {
++ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+@@ -339,7 +339,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+- if (esw->mode != MLX5_ESWITCH_LEGACY)
++ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
+ return -EOPNOTSUPP;
+
+ *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 1887a24ee414d0..cc0f2be21a265a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -311,6 +311,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
+ return err;
+ }
+
++static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
++{
++ switch (type) {
++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
++ return MLX5_CAP_QOS(dev, esw_element_type) &
++ ELEMENT_TYPE_CAP_MASK_TSAR;
++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
++ return MLX5_CAP_QOS(dev, esw_element_type) &
++ ELEMENT_TYPE_CAP_MASK_VPORT;
++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
++ return MLX5_CAP_QOS(dev, esw_element_type) &
++ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
++ return MLX5_CAP_QOS(dev, esw_element_type) &
++ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
++ }
++ return false;
++}
++
+ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u32 max_rate, u32 bw_share)
+@@ -322,6 +341,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
+ void *vport_elem;
+ int err;
+
++ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
++ return -EOPNOTSUPP;
++
+ parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+@@ -420,6 +442,7 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
+ {
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_esw_rate_group *group;
++ __be32 *attr;
+ u32 divider;
+ int err;
+
+@@ -427,6 +450,12 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
+ if (!group)
+ return ERR_PTR(-ENOMEM);
+
++ MLX5_SET(scheduling_context, tsar_ctx, element_type,
++ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
++
++ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
++ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
++
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ esw->qos.root_tsar_ix);
+ err = mlx5_create_scheduling_element_cmd(esw->dev,
+@@ -525,25 +554,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ return err;
+ }
+
+-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
+-{
+- switch (type) {
+- case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+- return MLX5_CAP_QOS(dev, esw_element_type) &
+- ELEMENT_TYPE_CAP_MASK_TASR;
+- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+- return MLX5_CAP_QOS(dev, esw_element_type) &
+- ELEMENT_TYPE_CAP_MASK_VPORT;
+- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+- return MLX5_CAP_QOS(dev, esw_element_type) &
+- ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+- case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+- return MLX5_CAP_QOS(dev, esw_element_type) &
+- ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+- }
+- return false;
+-}
+-
+ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+ {
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+@@ -554,7 +564,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ return -EOPNOTSUPP;
+
+- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
++ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
++ !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 8d0b915a31214e..1789800faaeb62 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1463,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+ {
+ int err;
+
+- lockdep_assert_held(&esw->mode_lock);
++ devl_assert_locked(priv_to_devlink(esw->dev));
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
+ esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
+@@ -1531,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
+ if (toggle_lag)
+ mlx5_lag_disable_change(esw->dev);
+
+- down_write(&esw->mode_lock);
+ if (!mlx5_esw_is_fdb_created(esw)) {
+ ret = mlx5_eswitch_enable_locked(esw, num_vfs);
+ } else {
+@@ -1554,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
+ }
+ }
+
+- up_write(&esw->mode_lock);
+-
+ if (toggle_lag)
+ mlx5_lag_enable_change(esw->dev);
+
+@@ -1569,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
+ return;
+
+ devl_assert_locked(priv_to_devlink(esw->dev));
+- down_write(&esw->mode_lock);
+ /* If driver is unloaded, this function is called twice by remove_one()
+ * and mlx5_unload(). Prevent the second call.
+ */
+ if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
+- goto unlock;
++ return;
+
+ esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+@@ -1603,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
+ esw->esw_funcs.num_vfs = 0;
+ else
+ esw->esw_funcs.num_ec_vfs = 0;
+-
+-unlock:
+- up_write(&esw->mode_lock);
+ }
+
+ /* Free resources for corresponding eswitch mode. It is called by devlink
+@@ -1647,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
+
+ devl_assert_locked(priv_to_devlink(esw->dev));
+ mlx5_lag_disable_change(esw->dev);
+- down_write(&esw->mode_lock);
+ mlx5_eswitch_disable_locked(esw);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+- up_write(&esw->mode_lock);
+ mlx5_lag_enable_change(esw->dev);
+ }
+
+@@ -1877,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
+ if (err)
+ goto abort;
+
++ dev->priv.eswitch = esw;
+ err = esw_offloads_init(esw);
+ if (err)
+ goto reps_err;
+@@ -1901,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
+ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
+ else
+ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+- if (MLX5_ESWITCH_MANAGER(dev) &&
+- mlx5_esw_vport_match_metadata_supported(esw))
+- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+-
+- dev->priv.eswitch = esw;
+ BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
+
+ esw_info(dev,
+@@ -1917,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
+
+ reps_err:
+ mlx5_esw_vports_cleanup(esw);
++ dev->priv.eswitch = NULL;
+ abort:
+ if (esw->work_queue)
+ destroy_workqueue(esw->work_queue);
+@@ -1935,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
+
+ esw_info(esw->dev, "cleanup\n");
+
+- esw->dev->priv.eswitch = NULL;
+ destroy_workqueue(esw->work_queue);
+ WARN_ON(refcount_read(&esw->qos.refcnt));
+ mutex_destroy(&esw->state_lock);
+@@ -1946,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
+ mutex_destroy(&esw->offloads.encap_tbl_lock);
+ mutex_destroy(&esw->offloads.decap_tbl_lock);
+ esw_offloads_cleanup(esw);
++ esw->dev->priv.eswitch = NULL;
+ mlx5_esw_vports_cleanup(esw);
+ debugfs_remove_recursive(esw->debugfs_root);
+ devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
+@@ -2254,8 +2242,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
+ if (!mlx5_esw_allowed(esw))
+ return true;
+
+- if (down_read_trylock(&esw->mode_lock) != 0)
++ if (down_read_trylock(&esw->mode_lock) != 0) {
++ if (esw->eswitch_operation_in_progress) {
++ up_read(&esw->mode_lock);
++ return false;
++ }
+ return true;
++ }
+
+ return false;
+ }
+@@ -2312,7 +2305,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
+ if (down_write_trylock(&esw->mode_lock) == 0)
+ return -EINVAL;
+
+- if (atomic64_read(&esw->user_count) > 0) {
++ if (esw->eswitch_operation_in_progress ||
++ atomic64_read(&esw->user_count) > 0) {
+ up_write(&esw->mode_lock);
+ return -EBUSY;
+ }
+@@ -2320,6 +2314,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
+ return esw->mode;
+ }
+
++int mlx5_esw_lock(struct mlx5_eswitch *esw)
++{
++ down_write(&esw->mode_lock);
++
++ if (esw->eswitch_operation_in_progress) {
++ up_write(&esw->mode_lock);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
+ /**
+ * mlx5_esw_unlock() - Release write lock on esw mode lock
+ * @esw: eswitch device.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 37ab66e7b403f1..9b771b572593b5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -383,6 +383,7 @@ struct mlx5_eswitch {
+ struct xarray paired;
+ struct mlx5_devcom_comp_dev *devcom;
+ u16 enabled_ipsec_vf_count;
++ bool eswitch_operation_in_progress;
+ };
+
+ void esw_offloads_disable(struct mlx5_eswitch *esw);
+@@ -525,7 +526,8 @@ struct mlx5_esw_flow_attr {
+ u8 total_vlan;
+ struct {
+ u32 flags;
+- struct mlx5_eswitch_rep *rep;
++ bool vport_valid;
++ u16 vport;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_termtbl_handle *termtbl;
+@@ -827,6 +829,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev);
+ void mlx5_esw_get(struct mlx5_core_dev *dev);
+ void mlx5_esw_put(struct mlx5_core_dev *dev);
+ int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
++int mlx5_esw_lock(struct mlx5_eswitch *esw);
+ void mlx5_esw_unlock(struct mlx5_eswitch *esw);
+
+ void esw_vport_change_handle_locked(struct mlx5_vport *vport);
+@@ -837,7 +840,7 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw, int max_slaves);
+ void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw);
+-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
++int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
+
+ bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
+ void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
+@@ -929,7 +932,7 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
+ static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
+
+ static inline int
+-mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
++mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
+ {
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index b296ac52a43974..58529d1a98b37b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -287,10 +287,9 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
+ for (i = from; i < to; i++)
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ mlx5_chains_put_table(chains, 0, 1, 0);
+- else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
++ else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
+ esw_attr->dests[i].mdev))
+- mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
+- false);
++ mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
+ }
+
+ static bool
+@@ -358,8 +357,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
+ * this criteria.
+ */
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
+- if (esw_attr->dests[i].rep &&
+- mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
++ if (esw_attr->dests[i].vport_valid &&
++ mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
+ esw_attr->dests[i].mdev)) {
+ result = true;
+ } else {
+@@ -388,7 +387,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
+- esw_attr->dests[j].rep->vport, false);
++ esw_attr->dests[j].vport, false);
+ if (IS_ERR(dest[*i].ft)) {
+ err = PTR_ERR(dest[*i].ft);
+ goto err_indir_tbl_get;
+@@ -432,11 +431,11 @@ static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
+ int attr_idx)
+ {
+ if (esw->offloads.ft_ipsec_tx_pol &&
+- esw_attr->dests[attr_idx].rep &&
+- esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
++ esw_attr->dests[attr_idx].vport_valid &&
++ esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK &&
+ /* To be aligned with software, encryption is needed only for tunnel device */
+ (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
+- esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
++ esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport &&
+ esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
+ return true;
+
+@@ -469,7 +468,7 @@ esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_ac
+ int attr_idx, int dest_idx, bool pkt_reformat)
+ {
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+- dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
++ dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport;
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ dest[dest_idx].vport.vhca_id =
+ MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
+@@ -536,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
+ }
+
+ static bool
+-esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
++esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
+ {
+- bool vf_dest = false, pf_dest = false;
++ bool internal_dest = false, external_dest = false;
+ int i;
+
+ for (i = 0; i < max_dest; i++) {
+- if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
++ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
++ dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
+ continue;
+
+- if (dests[i].vport.num == MLX5_VPORT_UPLINK)
+- pf_dest = true;
++ /* Uplink dest is external, but considered as internal
++ * if there is reformat because firmware uses LB+hairpin to support it.
++ */
++ if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
++ !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
++ external_dest = true;
+ else
+- vf_dest = true;
++ internal_dest = true;
+
+- if (vf_dest && pf_dest)
++ if (internal_dest && external_dest)
+ return true;
+ }
+
+@@ -696,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
+
+ /* Header rewrite with combined wire+loopback in FDB is not allowed */
+ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
+- esw_dests_to_vf_pf_vports(dest, i)) {
++ esw_dests_to_int_external(dest, i)) {
+ esw_warn(esw->dev,
+- "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
++ "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
+ rule = ERR_PTR(-EINVAL);
+ goto err_esw_get;
+ }
+@@ -984,7 +988,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+- if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
++ if (rep->vport == MLX5_VPORT_UPLINK &&
++ on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
+ dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+@@ -1176,9 +1181,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *flow;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_vport *vport;
++ int err, pfindex;
+ unsigned long i;
+ void *misc;
+- int err;
+
+ if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+@@ -1254,7 +1259,15 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ flows[vport->index] = flow;
+ }
+ }
+- esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
++
++ pfindex = mlx5_get_dev_index(peer_dev);
++ if (pfindex >= MLX5_MAX_PORTS) {
++ esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
++ pfindex, MLX5_MAX_PORTS);
++ err = -EINVAL;
++ goto add_ec_vf_flow_err;
++ }
++ esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
+
+ kvfree(spec);
+ return 0;
+@@ -2463,6 +2476,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
+ if (err)
+ return err;
+
++ if (MLX5_ESWITCH_MANAGER(esw->dev) &&
++ mlx5_esw_vport_match_metadata_supported(esw))
++ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
++
+ err = devl_params_register(priv_to_devlink(esw->dev),
+ esw_devlink_params,
+ ARRAY_SIZE(esw_devlink_params));
+@@ -2484,6 +2501,16 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+ esw_offloads_cleanup_reps(esw);
+ }
+
++static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
++ struct mlx5_eswitch_rep *rep, u8 rep_type)
++{
++ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
++ REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
++ return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
++
++ return 0;
++}
++
+ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+ {
+@@ -2508,13 +2535,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
+ int err;
+
+ rep = mlx5_eswitch_get_rep(esw, vport_num);
+- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
+- if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+- REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
+- err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+- if (err)
+- goto err_reps;
+- }
++ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
++ err = __esw_offloads_load_rep(esw, rep, rep_type);
++ if (err)
++ goto err_reps;
++ }
+
+ return 0;
+
+@@ -3259,7 +3284,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
+ }
+
+-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
++int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
+ {
+ struct mlx5_eswitch_rep *rep;
+ unsigned long i;
+@@ -3272,13 +3297,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
+ return 0;
+
+- ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
++ ret = __esw_offloads_load_rep(esw, rep, REP_IB);
+ if (ret)
+ return ret;
+
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
+- mlx5_esw_offloads_rep_load(esw, rep->vport);
++ __esw_offloads_load_rep(esw, rep, REP_IB);
+ }
+
+ return 0;
+@@ -3650,18 +3675,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
+ return 0;
+ }
+
+-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
+-{
+- struct net *devl_net, *netdev_net;
+- struct mlx5_eswitch *esw;
+-
+- esw = mlx5_devlink_eswitch_nocheck_get(devlink);
+- netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
+- devl_net = devlink_net(devlink);
+-
+- return net_eq(devl_net, netdev_net);
+-}
+-
+ int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+@@ -3706,13 +3719,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ if (esw_mode_from_devlink(mode, &mlx5_mode))
+ return -EINVAL;
+
+- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+- !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
+- NL_SET_ERR_MSG_MOD(extack,
+- "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+- return -EPERM;
+- }
+-
+ mlx5_lag_disable_change(esw->dev);
+ err = mlx5_esw_try_lock(esw);
+ if (err < 0) {
+@@ -3732,13 +3738,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ goto unlock;
+ }
+
++ esw->eswitch_operation_in_progress = true;
++ up_write(&esw->mode_lock);
++
+ mlx5_eswitch_disable_locked(esw);
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+ if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change mode while devlink traps are active");
+ err = -EOPNOTSUPP;
+- goto unlock;
++ goto skip;
+ }
+ err = esw_offloads_start(esw, extack);
+ } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
+@@ -3748,6 +3757,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ err = -EINVAL;
+ }
+
++skip:
++ down_write(&esw->mode_lock);
++ esw->eswitch_operation_in_progress = false;
+ unlock:
+ mlx5_esw_unlock(esw);
+ enable_lag:
+@@ -3758,16 +3770,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+ {
+ struct mlx5_eswitch *esw;
+- int err;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+- down_read(&esw->mode_lock);
+- err = esw_mode_to_devlink(esw->mode, mode);
+- up_read(&esw->mode_lock);
+- return err;
++ return esw_mode_to_devlink(esw->mode, mode);
+ }
+
+ static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
+@@ -3861,11 +3869,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+ if (err)
+ goto out;
+
++ esw->eswitch_operation_in_progress = true;
++ up_write(&esw->mode_lock);
++
+ err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
+- if (err)
+- goto out;
++ if (!err)
++ esw->offloads.inline_mode = mlx5_mode;
+
+- esw->offloads.inline_mode = mlx5_mode;
++ down_write(&esw->mode_lock);
++ esw->eswitch_operation_in_progress = false;
+ up_write(&esw->mode_lock);
+ return 0;
+
+@@ -3877,16 +3889,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
+ {
+ struct mlx5_eswitch *esw;
+- int err;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+- down_read(&esw->mode_lock);
+- err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
+- up_read(&esw->mode_lock);
+- return err;
++ return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
+ }
+
+ bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+@@ -3968,6 +3976,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
+ goto unlock;
+ }
+
++ esw->eswitch_operation_in_progress = true;
++ up_write(&esw->mode_lock);
++
+ esw_destroy_offloads_fdb_tables(esw);
+
+ esw->offloads.encap = encap;
+@@ -3981,6 +3992,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
+ (void)esw_create_offloads_fdb_tables(esw);
+ }
+
++ down_write(&esw->mode_lock);
++ esw->eswitch_operation_in_progress = false;
++
+ unlock:
+ up_write(&esw->mode_lock);
+ return err;
+@@ -3995,9 +4009,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+- down_read(&esw->mode_lock);
+ *encap = esw->offloads.encap;
+- up_read(&esw->mode_lock);
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+index edd91025831441..40bdc677f051dc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+@@ -233,8 +233,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+
+ /* hairpin */
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+- if (!esw_attr->dest_int_port && esw_attr->dests[i].rep &&
+- esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
++ if (!esw_attr->dest_int_port && esw_attr->dests[i].vport_valid &&
++ esw_attr->dests[i].vport == MLX5_VPORT_UPLINK)
+ return true;
+
+ return false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+index a4b92533166182..b29299c49ab3df 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
+ fte->flow_context.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, flow_source,
+ fte->flow_context.flow_source);
++ MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
++ !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
+
+ MLX5_SET(flow_context, in_flow_context, extended_destination,
+ extended_dest);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index a13b9c2bd144bd..e2f7cecce6f1a0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
+ return err;
+ }
+
++static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
++ struct mlx5_pkt_reformat *p2)
++{
++ return p1->owner == p2->owner &&
++ (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
++ p1->id == p2->id :
++ mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
++ mlx5_fs_dr_action_get_pkt_reformat_id(p2));
++}
++
+ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ struct mlx5_flow_destination *d2)
+ {
+@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
+ (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
+- (d1->vport.pkt_reformat->id ==
+- d2->vport.pkt_reformat->id) : true)) ||
++ mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
++ d2->vport.pkt_reformat) : true)) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ d1->ft == d2->ft) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
+ }
+ trace_mlx5_fs_set_fte(fte, false);
+
++ /* Link newly added rules into the tree. */
+ for (i = 0; i < handle->num_rules; i++) {
+- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
++ if (!handle->rule[i]->node.parent) {
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ trace_mlx5_fs_add_rule(handle->rule[i]);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+index 58f4c0d0fafa25..70898f0a9866cd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
+ do {
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ break;
++ if (pci_channel_offline(dev->pdev)) {
++ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
++ return -EACCES;
++ }
+
+ cond_resched();
+ } while (!time_after(jiffies, end));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index b568988e92e3e9..6b17346aa4cef2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -206,6 +206,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
+ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
+ {
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
++ struct devlink *devlink = priv_to_devlink(dev);
+
+ /* if this is the driver that initiated the fw reset, devlink completed the reload */
+ if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
+@@ -217,9 +218,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ else
+ mlx5_load_one(dev, true);
+- devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
++ devl_lock(devlink);
++ devlink_remote_reload_actions_performed(devlink, 0,
+ BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
++ devl_unlock(devlink);
+ }
+ }
+
+@@ -325,6 +328,29 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
+ mlx5_core_err(dev, "Failed to reload FW tracer\n");
+ }
+
++#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
++static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
++{
++ struct pci_dev *bridge = dev->pdev->bus->self;
++ u16 reg16;
++ int err;
++
++ if (!bridge)
++ return -EOPNOTSUPP;
++
++ err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
++ if (err)
++ return err;
++
++ if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) {
++ mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n");
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++#endif
++
+ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
+ {
+ struct pci_bus *bridge_bus = dev->pdev->bus;
+@@ -357,6 +383,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
+ return false;
+ }
+
++#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
++ err = mlx5_check_hotplug_interrupt(dev);
++ if (err)
++ return false;
++#endif
++
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
+ if (err)
+ return false;
+@@ -650,19 +682,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
++ if (!fw_reset)
++ return;
++
+ MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
+ mlx5_eq_notifier_register(dev, &fw_reset->nb);
+ }
+
+ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
+ {
+- mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
++
++ if (!fw_reset)
++ return;
++
++ mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
+ }
+
+ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
++ if (!fw_reset)
++ return;
++
+ set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
+ cancel_work_sync(&fw_reset->fw_live_patch_work);
+ cancel_work_sync(&fw_reset->reset_request_work);
+@@ -680,9 +723,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
+
+ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
+ {
+- struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
++ struct mlx5_fw_reset *fw_reset;
+ int err;
+
++ if (!MLX5_CAP_MCAM_REG(dev, mfrl))
++ return 0;
++
++ fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ if (!fw_reset)
+ return -ENOMEM;
+ fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
+@@ -718,6 +765,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
++ if (!fw_reset)
++ return;
++
+ devl_params_unregister(priv_to_devlink(dev),
+ mlx5_fw_reset_devlink_params,
+ ARRAY_SIZE(mlx5_fw_reset_devlink_params));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 2fb2598b775efd..d798834c4e755d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -248,6 +248,10 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
+ do {
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ break;
++ if (pci_channel_offline(dev->pdev)) {
++ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
++ goto unlock;
++ }
+
+ msleep(20);
+ } while (!time_after(jiffies, end));
+@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
+ mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
+ return -ENODEV;
+ }
++ if (pci_channel_offline(dev->pdev)) {
++ mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
++ return -EACCES;
++ }
+ msleep(100);
+ }
+ return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+index 047d5fed5f89e6..e2230c8f18152f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+@@ -48,6 +48,7 @@ static struct mlx5_irq *
+ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+ {
+ struct irq_affinity_desc auto_desc = {};
++ struct mlx5_irq *irq;
+ u32 irq_index;
+ int err;
+
+@@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
+ else
+ cpu_get(pool, cpumask_first(&af_desc->mask));
+ }
+- return mlx5_irq_alloc(pool, irq_index,
+- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
+- NULL);
++ irq = mlx5_irq_alloc(pool, irq_index,
++ cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
++ NULL);
++ if (IS_ERR(irq))
++ xa_erase(&pool->irqs, irq_index);
++ return irq;
+ }
+
+ /* Looking for the IRQ with the smallest refcount that fits req_mask.
+@@ -168,45 +172,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
+ if (pool->irqs_per_cpu)
+ cpu_put(pool, cpu);
+ }
+-
+-/**
+- * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
+- * @dev: mlx5 device that is requesting the IRQ.
+- * @used_cpus: cpumask of bounded cpus by the device
+- * @vecidx: vector index to request an IRQ for.
+- *
+- * Each IRQ is bounded to at most 1 CPU.
+- * This function is requesting an IRQ according to the default assignment.
+- * The default assignment policy is:
+- * - request the least loaded IRQ which is not bound to any
+- * CPU of the previous IRQs requested.
+- *
+- * On success, this function updates used_cpus mask and returns an irq pointer.
+- * In case of an error, an appropriate error pointer is returned.
+- */
+-struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+- struct cpumask *used_cpus, u16 vecidx)
+-{
+- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+- struct irq_affinity_desc af_desc = {};
+- struct mlx5_irq *irq;
+-
+- if (!mlx5_irq_pool_is_sf_pool(pool))
+- return ERR_PTR(-ENOENT);
+-
+- af_desc.is_managed = 1;
+- cpumask_copy(&af_desc.mask, cpu_online_mask);
+- cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
+- irq = mlx5_irq_affinity_request(pool, &af_desc);
+-
+- if (IS_ERR(irq))
+- return irq;
+-
+- cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
+- mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+- pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+- cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+- mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+-
+- return irq;
+-}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index af3fac090b828b..18cf756bad8cc3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
+ return err;
+ }
+
+- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
++ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
+ mlx5_lag_port_sel_destroy(ldev);
++ ldev->buckets = 1;
++ }
+ if (mlx5_lag_has_drop_rule(ldev))
+ mlx5_lag_drop_rule_cleanup(ldev);
+
+@@ -718,6 +720,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+ struct mlx5_core_dev *dev;
+ u8 mode;
+ #endif
++ bool roce_support;
+ int i;
+
+ for (i = 0; i < ldev->ports; i++)
+@@ -744,6 +747,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+ if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
+ return false;
+ #endif
++ roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
++ for (i = 1; i < ldev->ports; i++)
++ if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
++ return false;
++
+ return true;
+ }
+
+@@ -812,7 +820,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
+ if (shared_fdb)
+ for (i = 0; i < ldev->ports; i++)
+ if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
+- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
++ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
+
+ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+@@ -911,8 +919,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ } else if (roce_lag) {
+ dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+- for (i = 1; i < ldev->ports; i++)
+- mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
++ for (i = 1; i < ldev->ports; i++) {
++ if (mlx5_get_roce_state(ldev->pf[i].dev))
++ mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
++ }
+ } else if (shared_fdb) {
+ int i;
+
+@@ -920,7 +930,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ mlx5_rescan_drivers_locked(dev0);
+
+ for (i = 0; i < ldev->ports; i++) {
+- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
++ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ if (err)
+ break;
+ }
+@@ -931,7 +941,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ mlx5_deactivate_lag(ldev);
+ mlx5_lag_add_devices(ldev);
+ for (i = 0; i < ldev->ports; i++)
+- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
++ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_core_err(dev0, "Failed to enable lag\n");
+ return;
+ }
+@@ -1502,7 +1512,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ goto unlock;
+
+ for (i = 0; i < ldev->ports; i++) {
+- if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
++ if (ldev->pf[i].netdev == slave) {
+ port = i;
+ break;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+index 4bf15391525c59..6b0413a3987ce0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+@@ -65,12 +65,12 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
+ return err;
+ }
+
+-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2
++#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
+ static int enable_mpesw(struct mlx5_lag *ldev)
+ {
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ int err;
++ int i;
+
+ if (ldev->mode != MLX5_LAG_MODE_NONE)
+ return -EINVAL;
+@@ -98,11 +98,11 @@ static int enable_mpesw(struct mlx5_lag *ldev)
+
+ dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+- err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+- if (!err)
+- err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+- if (err)
+- goto err_rescan_drivers;
++ for (i = 0; i < ldev->ports; i++) {
++ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
++ if (err)
++ goto err_rescan_drivers;
++ }
+
+ return 0;
+
+@@ -112,8 +112,8 @@ static int enable_mpesw(struct mlx5_lag *ldev)
+ mlx5_deactivate_lag(ldev);
+ err_add_devices:
+ mlx5_lag_add_devices(ldev);
+- mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+- mlx5_eswitch_reload_reps(dev1->priv.eswitch);
++ for (i = 0; i < ldev->ports; i++)
++ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_mpesw_metadata_cleanup(ldev);
+ return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+index 7d9bbb494d95b3..005661248c7e9c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
+ &dest, 1);
+ if (IS_ERR(lag_definer->rules[idx])) {
+ err = PTR_ERR(lag_definer->rules[idx]);
+- while (i--)
+- while (j--)
++ do {
++ while (j--) {
++ idx = i * ldev->buckets + j;
+ mlx5_del_flow_rules(lag_definer->rules[idx]);
++ }
++ j = ldev->buckets;
++ } while (i--);
+ goto destroy_fg;
+ }
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+index 40c7be12404168..58bd749b5e4de0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+- MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
++ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index aa29f09e835642..0c83ef174275a7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+
+ static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
+ {
+- return mlx5_ptp_adjtime(ptp, delta);
++ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
++ struct mlx5_core_dev *mdev;
++
++ mdev = container_of(clock, struct mlx5_core_dev, clock);
++
++ return mlx5_ptp_adjtime_real_time(mdev, delta);
+ }
+
+ static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+index 6b774e0c276659..432c98f2626db9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+@@ -24,6 +24,11 @@
+ pci_write_config_dword((dev)->pdev, (dev)->vsc_addr + (offset), (val))
+ #define VSC_MAX_RETRIES 2048
+
++/* Reading VSC registers can take relatively long time.
++ * Yield the cpu every 128 registers read.
++ */
++#define VSC_GW_READ_BLOCK_COUNT 128
++
+ enum {
+ VSC_CTRL_OFFSET = 0x4,
+ VSC_COUNTER_OFFSET = 0x8,
+@@ -74,6 +79,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
+ ret = -EBUSY;
+ goto pci_unlock;
+ }
++ if (pci_channel_offline(dev->pdev)) {
++ ret = -EACCES;
++ goto pci_unlock;
++ }
+
+ /* Check if semaphore is already locked */
+ ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
+@@ -269,6 +278,7 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
+ {
+ unsigned int next_read_addr = 0;
+ unsigned int read_addr = 0;
++ unsigned int count = 0;
+
+ while (read_addr < length) {
+ if (mlx5_vsc_gw_read_fast(dev, read_addr, &next_read_addr,
+@@ -276,6 +286,10 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
+ return read_addr;
+
+ read_addr = next_read_addr;
++ if (++count == VSC_GW_READ_BLOCK_COUNT) {
++ cond_resched();
++ count = 0;
++ }
+ }
+ return length;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 15561965d2afa8..96136229b1b070 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -361,6 +361,12 @@ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
+ }
+ EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
+
++void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data)
++{
++ mlx5_blocking_notifier_call_chain(dev, event, data);
++}
++EXPORT_SYMBOL(mlx5_core_mp_event_replay);
++
+ int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
+ {
+@@ -1281,6 +1287,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+
+ if (!err)
+ mlx5_function_disable(dev, boot);
++ else
++ mlx5_stop_health_poll(dev, boot);
++
+ return err;
+ }
+
+@@ -1463,6 +1472,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
+ if (err)
+ goto err_register;
+
++ err = mlx5_crdump_enable(dev);
++ if (err)
++ mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
++
++ err = mlx5_hwmon_dev_register(dev);
++ if (err)
++ mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
++
+ mutex_unlock(&dev->intf_state_mutex);
+ return 0;
+
+@@ -1488,7 +1505,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
+ int err;
+
+ devl_lock(devlink);
++ devl_register(devlink);
+ err = mlx5_init_one_devl_locked(dev);
++ if (err)
++ devl_unregister(devlink);
+ devl_unlock(devlink);
+ return err;
+ }
+@@ -1500,6 +1520,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
+ devl_lock(devlink);
+ mutex_lock(&dev->intf_state_mutex);
+
++ mlx5_hwmon_dev_unregister(dev);
++ mlx5_crdump_disable(dev);
+ mlx5_unregister_device(dev);
+
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+@@ -1517,6 +1539,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
+ mlx5_function_teardown(dev, true);
+ out:
+ mutex_unlock(&dev->intf_state_mutex);
++ devl_unregister(devlink);
+ devl_unlock(devlink);
+ }
+
+@@ -1663,16 +1686,20 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
+ }
+
+ devl_lock(devlink);
++ devl_register(devlink);
++
+ err = mlx5_devlink_params_register(priv_to_devlink(dev));
+- devl_unlock(devlink);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
+ goto query_hca_caps_err;
+ }
+
++ devl_unlock(devlink);
+ return 0;
+
+ query_hca_caps_err:
++ devl_unregister(devlink);
++ devl_unlock(devlink);
+ mlx5_function_disable(dev, true);
+ out:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+@@ -1685,6 +1712,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
+
+ devl_lock(devlink);
+ mlx5_devlink_params_unregister(priv_to_devlink(dev));
++ devl_unregister(devlink);
+ devl_unlock(devlink);
+ if (dev->state != MLX5_DEVICE_STATE_UP)
+ return;
+@@ -1926,16 +1954,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto err_init_one;
+ }
+
+- err = mlx5_crdump_enable(dev);
+- if (err)
+- dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
+-
+- err = mlx5_hwmon_dev_register(dev);
+- if (err)
+- mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
+-
+ pci_save_state(pdev);
+- devlink_register(devlink);
+ return 0;
+
+ err_init_one:
+@@ -1956,16 +1975,9 @@ static void remove_one(struct pci_dev *pdev)
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+- /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
+- * devlink notify APIs.
+- * Hence, we must drain them before unregistering the devlink.
+- */
+ mlx5_drain_fw_reset(dev);
+ mlx5_drain_health_wq(dev);
+- devlink_unregister(devlink);
+ mlx5_sriov_disable(pdev, false);
+- mlx5_hwmon_dev_unregister(dev);
+- mlx5_crdump_disable(dev);
+ mlx5_uninit_one(dev);
+ mlx5_pci_close(dev);
+ mlx5_mdev_uninit(dev);
+@@ -2118,7 +2130,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
+ /* Panic tear down fw command will stop the PCI bus communication
+ * with the HCA, so the health poll is no longer needed.
+ */
+- mlx5_drain_health_wq(dev);
+ mlx5_stop_health_poll(dev, false);
+
+ ret = mlx5_cmd_fast_teardown_hca(dev);
+@@ -2153,6 +2164,7 @@ static void shutdown(struct pci_dev *pdev)
+
+ mlx5_core_info(dev, "Shutdown was called\n");
+ set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
++ mlx5_drain_health_wq(dev);
+ err = mlx5_try_fast_unload(dev);
+ if (err)
+ mlx5_unload_one(dev, false);
+@@ -2193,6 +2205,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
+ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
+ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
++ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
+ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 653648216730ac..6bac8ad70ba60b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -19,6 +19,7 @@
+ #define MLX5_IRQ_CTRL_SF_MAX 8
+ /* min num of vectors for SFs to be enabled */
+ #define MLX5_IRQ_VEC_COMP_BASE_SF 2
++#define MLX5_IRQ_VEC_COMP_BASE 1
+
+ #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
+ #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
+@@ -28,7 +29,7 @@
+ struct mlx5_irq {
+ struct atomic_notifier_head nh;
+ cpumask_var_t mask;
+- char name[MLX5_MAX_IRQ_NAME];
++ char name[MLX5_MAX_IRQ_FORMATTED_NAME];
+ struct mlx5_irq_pool *pool;
+ int refcount;
+ struct msi_map map;
+@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
+ return;
+ }
+
++ vecidx -= MLX5_IRQ_VEC_COMP_BASE;
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
+ }
+
+@@ -292,8 +294,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ else
+ irq_sf_set_name(pool, name, i);
+ ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
+- snprintf(irq->name, MLX5_MAX_IRQ_NAME,
+- "%s@pci:%s", name, pci_name(dev->pdev));
++ snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
++ MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
+ err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
+ &irq->nh);
+ if (err) {
+@@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool = table->pcif_pool;
+ struct irq_affinity_desc af_desc;
+- int offset = 1;
++ int offset = MLX5_IRQ_VEC_COMP_BASE;
+
+ if (!pool->xa_num_irqs.max)
+ offset = 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+index d3a77a0ab8488b..c4d377f8df3089 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+@@ -7,6 +7,9 @@
+ #include <linux/mlx5/driver.h>
+
+ #define MLX5_MAX_IRQ_NAME (32)
++#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
++#define MLX5_MAX_IRQ_FORMATTED_NAME \
++ (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
+ /* max irq_index is 2047, so four chars */
+ #define MLX5_MAX_IRQ_IDX_CHARS (4)
+ #define MLX5_EQ_REFS_PER_IRQ (2)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index be70d1f23a5da3..749f0fc2c189ad 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -1098,7 +1098,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
+- [MLX5E_400GAUI_8] = 400000,
++ [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+index 8bce730b5c5bef..db2bd3ad63ba36 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+@@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ {
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+
++ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
++ return -EOPNOTSUPP;
++
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
+@@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ void *attr;
+
++ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
++ !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
++ return -EOPNOTSUPP;
++
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+index 8fe82f1191bb92..2028acbe85ca2f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+@@ -69,24 +69,29 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
+ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
+ {
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+- struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
++ struct mlx5_core_dev *mdev = sf_dev->mdev;
++ struct devlink *devlink;
+
+- mlx5_drain_health_wq(sf_dev->mdev);
+- devlink_unregister(devlink);
+- if (mlx5_dev_is_lightweight(sf_dev->mdev))
+- mlx5_uninit_one_light(sf_dev->mdev);
++ devlink = priv_to_devlink(mdev);
++ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
++ mlx5_drain_health_wq(mdev);
++ if (mlx5_dev_is_lightweight(mdev))
++ mlx5_uninit_one_light(mdev);
+ else
+- mlx5_uninit_one(sf_dev->mdev);
+- iounmap(sf_dev->mdev->iseg);
+- mlx5_mdev_uninit(sf_dev->mdev);
++ mlx5_uninit_one(mdev);
++ iounmap(mdev->iseg);
++ mlx5_mdev_uninit(mdev);
+ mlx5_devlink_free(devlink);
+ }
+
+ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
+ {
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
++ struct mlx5_core_dev *mdev = sf_dev->mdev;
+
+- mlx5_unload_one(sf_dev->mdev, false);
++ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
++ mlx5_drain_health_wq(mdev);
++ mlx5_unload_one(mdev, false);
+ }
+
+ static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+index 5b83da08692d69..90c38cbbde181e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+@@ -781,6 +781,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ switch (action_type) {
+ case DR_ACTION_TYP_DROP:
+ attr.final_icm_addr = nic_dmn->drop_icm_addr;
++ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+ break;
+ case DR_ACTION_TYP_FT:
+ dest_action = action;
+@@ -866,11 +867,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ action->sampler->tx_icm_addr;
+ break;
+ case DR_ACTION_TYP_VPORT:
+- attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+- dest_action = action;
+- attr.final_icm_addr = rx_rule ?
+- action->vport->caps->icm_address_rx :
+- action->vport->caps->icm_address_tx;
++ if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
++ /* can't go to uplink on RX rule - dropping instead */
++ attr.final_icm_addr = nic_dmn->drop_icm_addr;
++ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
++ } else {
++ attr.hit_gvmi = action->vport->caps->vhca_gvmi;
++ dest_action = action;
++ attr.final_icm_addr = rx_rule ?
++ action->vport->caps->icm_address_rx :
++ action->vport->caps->icm_address_tx;
++ }
+ break;
+ case DR_ACTION_TYP_POP_VLAN:
+ if (!rx_rule && !(dmn->ste_ctx->actions_caps &
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+index 042ca034912433..d1db04baa1fa6f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+@@ -7,7 +7,7 @@
+ /* don't try to optimize STE allocation if the stack is too constaraining */
+ #define DR_RULE_MAX_STES_OPTIMIZED 0
+ #else
+-#define DR_RULE_MAX_STES_OPTIMIZED 5
++#define DR_RULE_MAX_STES_OPTIMIZED 2
+ #endif
+ #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+index 4e8527a724f504..6fa06ba2d34653 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+@@ -52,7 +52,6 @@ struct dr_qp_init_attr {
+ u32 cqn;
+ u32 pdn;
+ u32 max_send_wr;
+- u32 max_send_sge;
+ struct mlx5_uars_page *uar;
+ u8 isolate_vl_tc:1;
+ };
+@@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
+ return err == CQ_POLL_ERR ? err : npolled;
+ }
+
+-static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
+-{
+- return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
+- sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
+-}
+-
+-/* We calculate for specific RC QP with the required functionality */
+-static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
+-{
+- int update_arg_size;
+- int inl_size = 0;
+- int tot_size;
+- int size;
+-
+- update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
+-
+- size = sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_raddr_seg);
+- inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
+- DR_STE_SIZE, 16);
+-
+- size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
+-
+- size = max(size, update_arg_size);
+-
+- tot_size = max(size, inl_size);
+-
+- return ALIGN(tot_size, MLX5_SEND_WQE_BB);
+-}
+-
+ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+ struct dr_qp_init_attr *attr)
+ {
+@@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
+ struct mlx5_wq_param wqp;
+ struct mlx5dr_qp *dr_qp;
+- int wqe_size;
+ int inlen;
+ void *qpc;
+ void *in;
+@@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+ if (err)
+ goto err_in;
+ dr_qp->uar = attr->uar;
+- wqe_size = dr_qp_calc_rc_send_wqe(attr);
+- dr_qp->max_inline_data = min(wqe_size -
+- (sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_raddr_seg) +
+- sizeof(struct mlx5_wqe_inline_seg)),
+- (2 * MLX5_SEND_WQE_BB -
+- (sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_raddr_seg) +
+- sizeof(struct mlx5_wqe_inline_seg))));
+
+ return dr_qp;
+
+@@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+ MLX5_SEND_WQE_DS;
+ }
+
+-static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
+- struct dr_data_seg *data_seg, void *wqe)
+-{
+- int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
+- sizeof(struct mlx5_wqe_raddr_seg) +
+- sizeof(struct mlx5_wqe_inline_seg);
+- struct mlx5_wqe_inline_seg *seg;
+- int left_space;
+- int inl = 0;
+- void *addr;
+- int len;
+- int idx;
+-
+- seg = wqe;
+- wqe += sizeof(*seg);
+- addr = (void *)(unsigned long)(data_seg->addr);
+- len = data_seg->length;
+- inl += len;
+- left_space = MLX5_SEND_WQE_BB - inline_header_size;
+-
+- if (likely(len > left_space)) {
+- memcpy(wqe, addr, left_space);
+- len -= left_space;
+- addr += left_space;
+- idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
+- wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
+- }
+-
+- memcpy(wqe, addr, len);
+-
+- if (likely(inl)) {
+- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+- return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
+- MLX5_SEND_WQE_DS);
+- } else {
+- return 0;
+- }
+-}
+-
+ static void
+-dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
+- struct mlx5_wqe_ctrl_seg *wq_ctrl,
++dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+ u64 remote_addr,
+ u32 rkey,
+ struct dr_data_seg *data_seg,
+@@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
+ wq_raddr->reserved = 0;
+
+ wq_dseg = (void *)(wq_raddr + 1);
+- /* WQE ctrl segment + WQE remote addr segment */
+- *size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
+
+- if (data_seg->send_flags & IB_SEND_INLINE) {
+- *size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
+- } else {
+- wq_dseg->byte_count = cpu_to_be32(data_seg->length);
+- wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
+- wq_dseg->addr = cpu_to_be64(data_seg->addr);
+- *size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
+- }
++ wq_dseg->byte_count = cpu_to_be32(data_seg->length);
++ wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
++ wq_dseg->addr = cpu_to_be64(data_seg->addr);
++
++ *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
++ sizeof(*wq_dseg) + /* WQE data segment */
++ sizeof(*wq_raddr)) / /* WQE remote addr segment */
++ MLX5_SEND_WQE_DS;
+ }
+
+ static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+@@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
+ switch (opcode) {
+ case MLX5_OPCODE_RDMA_READ:
+ case MLX5_OPCODE_RDMA_WRITE:
+- dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
++ dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
+ rkey, data_seg, &size);
+ break;
+ case MLX5_OPCODE_FLOW_TBL_ACCESS:
+@@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->write.send_flags |= IB_SEND_SIGNALED;
+ else
+- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
++ send_info->write.send_flags = 0;
+ }
+
+ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
+@@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
+ }
+
+ send_ring->pending_wqe++;
+- if (!send_info->write.lkey)
+- send_info->write.send_flags |= IB_SEND_INLINE;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+ send_info->write.send_flags |= IB_SEND_SIGNALED;
+- else
+- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
+
+ send_ring->pending_wqe++;
+ send_info->read.length = send_info->write.length;
+@@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
+ send_info->read.lkey = send_ring->sync_mr->mkey;
+
+ if (send_ring->pending_wqe % send_ring->signal_th == 0)
+- send_info->read.send_flags |= IB_SEND_SIGNALED;
++ send_info->read.send_flags = IB_SEND_SIGNALED;
+ else
+- send_info->read.send_flags &= ~IB_SEND_SIGNALED;
++ send_info->read.send_flags = 0;
+ }
+
+ static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
+@@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
+ dmn->send_ring->cq->qp = dmn->send_ring->qp;
+
+ dmn->info.max_send_wr = QUEUE_SIZE;
+- init_attr.max_send_sge = 1;
+ dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
+ DR_STE_SIZE);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index 5a31fb47ffa58b..21753f32786850 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ req_list_size = max_list_size;
+ }
+
+- out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
++ out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
+ req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
+
+ out = kvzalloc(out_sz, GFP_KERNEL);
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+index bc94e75a7aebd1..e7777700ee18a7 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+@@ -40,6 +40,7 @@
+ */
+ #define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
+ #define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
++#define MLXBF_GIGE_MAX_FILTER_IDX 3
+
+ /* Define for broadcast MAC literal */
+ #define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
+@@ -175,6 +176,13 @@ enum mlxbf_gige_res {
+ int mlxbf_gige_mdio_probe(struct platform_device *pdev,
+ struct mlxbf_gige *priv);
+ void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
++
++void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv);
++void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv);
++void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
++ unsigned int index);
++void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
++ unsigned int index);
+ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 dmac);
+ void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+index 694de9513b9fc1..57e68bfd3b1a8f 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/rtnetlink.h>
+ #include <linux/skbuff.h>
+
+ #include "mlxbf_gige.h"
+@@ -130,16 +131,19 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ {
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
++ u64 control;
+ u64 int_en;
+ int err;
+
+- err = mlxbf_gige_request_irqs(priv);
+- if (err)
+- return err;
++ /* Perform general init of GigE block */
++ control = readq(priv->base + MLXBF_GIGE_CONTROL);
++ control |= MLXBF_GIGE_CONTROL_PORT_EN;
++ writeq(control, priv->base + MLXBF_GIGE_CONTROL);
++
+ mlxbf_gige_cache_stats(priv);
+ err = mlxbf_gige_clean_port(priv);
+ if (err)
+- goto free_irqs;
++ return err;
+
+ /* Clear driver's valid_polarity to match hardware,
+ * since the above call to clean_port() resets the
+@@ -147,19 +151,27 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ */
+ priv->valid_polarity = 0;
+
+- err = mlxbf_gige_rx_init(priv);
+- if (err)
+- goto free_irqs;
++ phy_start(phydev);
++
+ err = mlxbf_gige_tx_init(priv);
+ if (err)
+- goto rx_deinit;
+-
+- phy_start(phydev);
++ goto phy_deinit;
++ err = mlxbf_gige_rx_init(priv);
++ if (err)
++ goto tx_deinit;
+
+ netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
+ napi_enable(&priv->napi);
+ netif_start_queue(netdev);
+
++ err = mlxbf_gige_request_irqs(priv);
++ if (err)
++ goto napi_deinit;
++
++ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
++ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
++ mlxbf_gige_enable_multicast_rx(priv);
++
+ /* Set bits in INT_EN that we care about */
+ int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
+ MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
+@@ -176,11 +188,17 @@ static int mlxbf_gige_open(struct net_device *netdev)
+
+ return 0;
+
+-rx_deinit:
++napi_deinit:
++ netif_stop_queue(netdev);
++ napi_disable(&priv->napi);
++ netif_napi_del(&priv->napi);
+ mlxbf_gige_rx_deinit(priv);
+
+-free_irqs:
+- mlxbf_gige_free_irqs(priv);
++tx_deinit:
++ mlxbf_gige_tx_deinit(priv);
++
++phy_deinit:
++ phy_stop(phydev);
+ return err;
+ }
+
+@@ -365,7 +383,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ void __iomem *plu_base;
+ void __iomem *base;
+ int addr, phy_irq;
+- u64 control;
++ unsigned int i;
+ int err;
+
+ base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
+@@ -380,11 +398,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ if (IS_ERR(plu_base))
+ return PTR_ERR(plu_base);
+
+- /* Perform general init of GigE block */
+- control = readq(base + MLXBF_GIGE_CONTROL);
+- control |= MLXBF_GIGE_CONTROL_PORT_EN;
+- writeq(control, base + MLXBF_GIGE_CONTROL);
+-
+ netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
+ if (!netdev)
+ return -ENOMEM;
+@@ -415,6 +428,11 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
+ priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
+ priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+
++ for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++)
++ mlxbf_gige_disable_mac_rx_filter(priv, i);
++ mlxbf_gige_disable_multicast_rx(priv);
++ mlxbf_gige_disable_promisc(priv);
++
+ /* Write initial MAC address to hardware */
+ mlxbf_gige_initial_mac(priv);
+
+@@ -487,8 +505,13 @@ static void mlxbf_gige_shutdown(struct platform_device *pdev)
+ {
+ struct mlxbf_gige *priv = platform_get_drvdata(pdev);
+
+- writeq(0, priv->base + MLXBF_GIGE_INT_EN);
+- mlxbf_gige_clean_port(priv);
++ rtnl_lock();
++ netif_device_detach(priv->netdev);
++
++ if (netif_running(priv->netdev))
++ dev_close(priv->netdev);
++
++ rtnl_unlock();
+ }
+
+ static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+index cd0973229c9bb9..74bd46bab4c050 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+@@ -62,6 +62,8 @@
+ #define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1)
+ #define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520
+ #define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528
++#define MLXBF_GIGE_RX_MAC_FILTER_GENERAL 0x0530
++#define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST BIT(1)
+ #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540
+ #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0)
+ #define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+index 0d5a41a2ae0109..eb62620b63c7fc 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+@@ -11,15 +11,31 @@
+ #include "mlxbf_gige.h"
+ #include "mlxbf_gige_regs.h"
+
+-void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+- unsigned int index, u64 dmac)
++void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv)
+ {
+ void __iomem *base = priv->base;
+- u64 control;
++ u64 data;
+
+- /* Write destination MAC to specified MAC RX filter */
+- writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+- (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
++ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
++ data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
++ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
++}
++
++void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv)
++{
++ void __iomem *base = priv->base;
++ u64 data;
++
++ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
++ data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
++ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
++}
++
++void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
++ unsigned int index)
++{
++ void __iomem *base = priv->base;
++ u64 control;
+
+ /* Enable MAC receive filter mask for specified index */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+@@ -27,6 +43,28 @@ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+ }
+
++void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
++ unsigned int index)
++{
++ void __iomem *base = priv->base;
++ u64 control;
++
++ /* Disable MAC receive filter mask for specified index */
++ control = readq(base + MLXBF_GIGE_CONTROL);
++ control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
++ writeq(control, base + MLXBF_GIGE_CONTROL);
++}
++
++void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
++ unsigned int index, u64 dmac)
++{
++ void __iomem *base = priv->base;
++
++ /* Write destination MAC to specified MAC RX filter */
++ writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
++ (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
++}
++
+ void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 *dmac)
+ {
+@@ -142,6 +180,9 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+ writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
+ priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
+
++ writeq(ilog2(priv->rx_q_entries),
++ priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
++
+ /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+ * indicate readiness to receive interrupts
+ */
+@@ -154,9 +195,6 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+ data |= MLXBF_GIGE_RX_DMA_EN;
+ writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+
+- writeq(ilog2(priv->rx_q_entries),
+- priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
+-
+ return 0;
+
+ free_wqe_and_skb:
+@@ -267,6 +305,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+ priv->stats.rx_truncate_errors++;
+ }
+
++ /* Read receive consumer index before replenish so that this routine
++ * returns accurate return value even if packet is received into
++ * just-replenished buffer prior to exiting this routine.
++ */
++ rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
++ rx_ci_rem = rx_ci % priv->rx_q_entries;
++
+ /* Let hardware know we've replenished one buffer */
+ rx_pi++;
+
+@@ -279,8 +324,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+ rx_pi_rem = rx_pi % priv->rx_q_entries;
+ if (rx_pi_rem == 0)
+ priv->valid_polarity ^= 1;
+- rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
+- rx_ci_rem = rx_ci % priv->rx_q_entries;
+
+ if (skb)
+ netif_receive_skb(skb);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index 1ccf3b73ed7245..85507d01fd4575 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -835,7 +835,7 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
+
+ static const struct mlxsw_listener mlxsw_emad_rx_listener =
+ MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
+- EMAD, DISCARD);
++ EMAD, FORWARD);
+
+ static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+index faa63ea9b83e1a..1915fa41c62246 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+@@ -95,7 +95,7 @@ struct mlxsw_afa_set {
+ */
+ has_trap:1,
+ has_police:1;
+- unsigned int ref_count;
++ refcount_t ref_count;
+ struct mlxsw_afa_set *next; /* Pointer to the next set. */
+ struct mlxsw_afa_set *prev; /* Pointer to the previous set,
+ * note that set may have multiple
+@@ -120,7 +120,7 @@ struct mlxsw_afa_fwd_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_afa_fwd_entry_ht_key ht_key;
+ u32 kvdl_index;
+- unsigned int ref_count;
++ refcount_t ref_count;
+ };
+
+ static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
+@@ -282,7 +282,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
+ /* Need to initialize the set to pass by default */
+ mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
+ set->ht_key.is_first = is_first;
+- set->ref_count = 1;
++ refcount_set(&set->ref_count, 1);
+ return set;
+ }
+
+@@ -330,7 +330,7 @@ static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
+ static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_set *set)
+ {
+- if (--set->ref_count)
++ if (!refcount_dec_and_test(&set->ref_count))
+ return;
+ if (set->shared)
+ mlxsw_afa_set_unshare(mlxsw_afa, set);
+@@ -350,7 +350,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
+ set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
+ mlxsw_afa_set_ht_params);
+ if (set) {
+- set->ref_count++;
++ refcount_inc(&set->ref_count);
+ mlxsw_afa_set_put(mlxsw_afa, orig_set);
+ } else {
+ set = orig_set;
+@@ -564,7 +564,7 @@ mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
+ if (!fwd_entry)
+ return ERR_PTR(-ENOMEM);
+ fwd_entry->ht_key.local_port = local_port;
+- fwd_entry->ref_count = 1;
++ refcount_set(&fwd_entry->ref_count, 1);
+
+ err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
+ &fwd_entry->ht_node,
+@@ -607,7 +607,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
+ fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
+ mlxsw_afa_fwd_entry_ht_params);
+ if (fwd_entry) {
+- fwd_entry->ref_count++;
++ refcount_inc(&fwd_entry->ref_count);
+ return fwd_entry;
+ }
+ return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
+@@ -616,7 +616,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
+ static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_fwd_entry *fwd_entry)
+ {
+- if (--fwd_entry->ref_count)
++ if (!refcount_dec_and_test(&fwd_entry->ref_count))
+ return;
+ mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+index 70f9b5e85a26fc..bf140e7416e194 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+@@ -5,6 +5,7 @@
+ #include <linux/slab.h>
+ #include <linux/list.h>
+ #include <linux/errno.h>
++#include <linux/refcount.h>
+
+ #include "item.h"
+ #include "core_acl_flex_keys.h"
+@@ -105,7 +106,7 @@ EXPORT_SYMBOL(mlxsw_afk_destroy);
+
+ struct mlxsw_afk_key_info {
+ struct list_head list;
+- unsigned int ref_count;
++ refcount_t ref_count;
+ unsigned int blocks_count;
+ int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
+ * is index inside "blocks"
+@@ -282,7 +283,7 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
+ if (err)
+ goto err_picker;
+ list_add(&key_info->list, &mlxsw_afk->key_info_list);
+- key_info->ref_count = 1;
++ refcount_set(&key_info->ref_count, 1);
+ return key_info;
+
+ err_picker:
+@@ -304,7 +305,7 @@ mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
+
+ key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
+ if (key_info) {
+- key_info->ref_count++;
++ refcount_inc(&key_info->ref_count);
+ return key_info;
+ }
+ return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
+@@ -313,7 +314,7 @@ EXPORT_SYMBOL(mlxsw_afk_key_info_get);
+
+ void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
+ {
+- if (--key_info->ref_count)
++ if (!refcount_dec_and_test(&key_info->ref_count))
+ return;
+ mlxsw_afk_key_info_destroy(key_info);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+index d637c0348fa15e..b71bc23245fe2e 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+@@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
+ .got_inactive = mlxsw_env_got_inactive,
+ };
+
+-static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
++static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+ {
+ char mcam_pl[MLXSW_REG_MCAM_LEN];
+- bool mcia_128b_supported;
++ bool mcia_128b_supported = false;
+ int err;
+
+ mlxsw_reg_mcam_pack(mcam_pl,
+ MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
+ err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
+- if (err)
+- return err;
+-
+- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+- &mcia_128b_supported);
++ if (!err)
++ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
++ &mcia_128b_supported);
+
+ mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
+-
+- return 0;
+ }
+
+ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
+@@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
+ if (err)
+ goto err_type_set;
+
+- err = mlxsw_env_max_module_eeprom_len_query(env);
+- if (err)
+- goto err_eeprom_len_query;
+-
++ mlxsw_env_max_module_eeprom_len_query(env);
+ env->line_cards[0]->active = true;
+
+ return 0;
+
+-err_eeprom_len_query:
+ err_type_set:
+ mlxsw_env_module_event_disable(env, 0);
+ err_mlxsw_env_module_event_enable:
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+index 025e0db983feba..b032d5a4b3b84c 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+@@ -1484,6 +1484,7 @@ static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ vfree(types_info->data);
+ err_data_alloc:
+ kfree(types_info);
++ linecards->types_info = NULL;
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+index 7c59c8a1358405..b01b000bc71c14 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+@@ -9,6 +9,7 @@
+ #include <linux/rhashtable.h>
+ #include <linux/netdevice.h>
+ #include <linux/mutex.h>
++#include <linux/refcount.h>
+ #include <net/net_namespace.h>
+ #include <net/tc_act/tc_vlan.h>
+
+@@ -55,7 +56,7 @@ struct mlxsw_sp_acl_ruleset {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct mlxsw_sp_acl_ruleset_ht_key ht_key;
+ struct rhashtable rule_ht;
+- unsigned int ref_count;
++ refcount_t ref_count;
+ unsigned int min_prio;
+ unsigned int max_prio;
+ unsigned long priv[];
+@@ -99,7 +100,7 @@ static bool
+ mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
+ {
+ /* We hold a reference on ruleset ourselves */
+- return ruleset->ref_count == 2;
++ return refcount_read(&ruleset->ref_count) == 2;
+ }
+
+ int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+@@ -176,7 +177,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+ ruleset = kzalloc(alloc_size, GFP_KERNEL);
+ if (!ruleset)
+ return ERR_PTR(-ENOMEM);
+- ruleset->ref_count = 1;
++ refcount_set(&ruleset->ref_count, 1);
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
+ ruleset->ht_key.ops = ops;
+@@ -222,13 +223,13 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+
+ static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
+ {
+- ruleset->ref_count++;
++ refcount_inc(&ruleset->ref_count);
+ }
+
+ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+ {
+- if (--ruleset->ref_count)
++ if (!refcount_dec_and_test(&ruleset->ref_count))
+ return;
+ mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+index 4b713832fdd559..f5c0a4214c4e56 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+@@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ if (err)
+ return err;
+
+- lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id);
++ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
++ erp_id);
+ if (IS_ERR(lkey_id))
+ return PTR_ERR(lkey_id);
+ aentry->lkey_id = lkey_id;
+@@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
+ priority, region->tcam_region_info,
+- aentry->enc_key, erp_id,
++ aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+@@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+
+ mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
+ region->tcam_region_info,
+- aentry->enc_key, erp_id,
++ aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+@@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
+ priority, region->tcam_region_info,
+- aentry->enc_key, erp_id,
++ aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+@@ -480,15 +481,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ int err;
+
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values,
+- aentry->ht_key.full_enc_key, mask);
++ aentry->ht_key.enc_key, mask);
+
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
+- memcpy(aentry->enc_key, aentry->ht_key.full_enc_key,
+- sizeof(aentry->enc_key));
+
+ /* Compute all needed delta information and clear the delta bits
+ * from the encrypted key.
+@@ -497,9 +496,8 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
+ aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
+ aentry->delta_info.value =
+- mlxsw_sp_acl_erp_delta_value(delta,
+- aentry->ht_key.full_enc_key);
+- mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key);
++ mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key);
++ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
+
+ /* Add rule to the list of A-TCAM rules, assuming this
+ * rule is intended to A-TCAM. In case this rule does
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+index e2aced7ab45476..a54eedb69a3f5b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+@@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ memcpy(chunk + pad_bytes, &erp_region_id,
+ sizeof(erp_region_id));
+ memcpy(chunk + key_offset,
+- &aentry->enc_key[chunk_key_offsets[chunk_index]],
++ &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
+ chunk_key_len);
+ chunk += chunk_len;
+ }
+@@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
+ * is 2^ACL_MAX_BF_LOG
+ */
+ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
+- bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
++ bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
+ GFP_KERNEL);
+ if (!bf)
+ return ERR_PTR(-ENOMEM);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+index 4c98950380d536..9eee229303cced 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+@@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ unsigned long *p_index)
+ {
+ unsigned int num_rows, entry_size;
++ unsigned long index;
+
+ /* We only allow allocations of entire rows */
+ if (num_erps % erp_core->num_erp_banks != 0)
+@@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
+ entry_size = erp_core->erpt_entries_size[region_type];
+ num_rows = num_erps / erp_core->num_erp_banks;
+
+- *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+- if (*p_index == 0)
++ index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
++ if (!index)
+ return -ENOBUFS;
+- *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
++
++ *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+ return 0;
+ }
+@@ -1215,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj,
+ return err ? false : true;
+ }
+
+-static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2)
+-{
+- const struct mlxsw_sp_acl_erp_key *key1 = obj1;
+- const struct mlxsw_sp_acl_erp_key *key2 = obj2;
+-
+- /* For hints purposes, two objects are considered equal
+- * in case the masks are the same. Does not matter what
+- * the "ctcam" value is.
+- */
+- return memcmp(key1->mask, key2->mask, sizeof(key1->mask));
+-}
+-
+ static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
+ void *obj)
+ {
+@@ -1306,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
+ static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
+ .obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+ .delta_check = mlxsw_sp_acl_erp_delta_check,
+- .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp,
+ .delta_create = mlxsw_sp_acl_erp_delta_create,
+ .delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
+ .root_create = mlxsw_sp_acl_erp_root_create,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+index d50786b0a6ce47..92a406f02eae74 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+@@ -9,6 +9,8 @@
+ #include <linux/rhashtable.h>
+ #include <linux/netdevice.h>
+ #include <linux/mutex.h>
++#include <linux/refcount.h>
++#include <linux/idr.h>
+ #include <net/devlink.h>
+ #include <trace/events/mlxsw.h>
+
+@@ -57,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
+ u16 *p_id)
+ {
+- u16 id;
++ int id;
+
+- id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
+- if (id < tcam->max_regions) {
+- __set_bit(id, tcam->used_regions);
+- *p_id = id;
+- return 0;
+- }
+- return -ENOBUFS;
++ id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
++ GFP_KERNEL);
++ if (id < 0)
++ return id;
++
++ *p_id = id;
++
++ return 0;
+ }
+
+ static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
+ u16 id)
+ {
+- __clear_bit(id, tcam->used_regions);
++ ida_free(&tcam->used_regions, id);
+ }
+
+ static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
+ u16 *p_id)
+ {
+- u16 id;
++ int id;
+
+- id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
+- if (id < tcam->max_groups) {
+- __set_bit(id, tcam->used_groups);
+- *p_id = id;
+- return 0;
+- }
+- return -ENOBUFS;
++ id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
++ GFP_KERNEL);
++ if (id < 0)
++ return id;
++
++ *p_id = id;
++
++ return 0;
+ }
+
+ static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
+ u16 id)
+ {
+- __clear_bit(id, tcam->used_groups);
++ ida_free(&tcam->used_groups, id);
+ }
+
+ struct mlxsw_sp_acl_tcam_pattern {
+@@ -155,7 +159,7 @@ struct mlxsw_sp_acl_tcam_vregion {
+ struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
+ } rehash;
+ struct mlxsw_sp *mlxsw_sp;
+- unsigned int ref_count;
++ refcount_t ref_count;
+ };
+
+ struct mlxsw_sp_acl_tcam_vchunk;
+@@ -176,7 +180,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
+ unsigned int priority; /* Priority within the vregion and group */
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup;
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+- unsigned int ref_count;
++ refcount_t ref_count;
+ };
+
+ struct mlxsw_sp_acl_tcam_entry {
+@@ -681,13 +685,13 @@ static void
+ mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+ {
++ struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->region_fini(mlxsw_sp, region->priv);
+ mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
+ mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
+- mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
+- region->id);
++ mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
+ kfree(region);
+ }
+
+@@ -714,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
+ rehash.dw.work);
+ int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+
++ mutex_lock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
++ mutex_unlock(&vregion->lock);
+ if (credits < 0)
+ /* Rehash gone out of credits so it was interrupted.
+ * Schedule the work as soon as possible to continue.
+@@ -724,6 +730,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
+ }
+
++static void
++mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
++{
++ /* The entry markers are relative to the current chunk and therefore
++ * needs to be reset together with the chunk marker.
++ */
++ ctx->current_vchunk = NULL;
++ ctx->start_ventry = NULL;
++ ctx->stop_ventry = NULL;
++}
++
+ static void
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
+ {
+@@ -746,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
+ * the current chunk pointer to make sure all chunks
+ * are properly migrated.
+ */
+- vregion->rehash.ctx.current_vchunk = NULL;
++ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
+ }
+
+ static struct mlxsw_sp_acl_tcam_vregion *
+@@ -769,7 +786,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
+ vregion->tcam = tcam;
+ vregion->mlxsw_sp = mlxsw_sp;
+ vregion->vgroup = vgroup;
+- vregion->ref_count = 1;
++ refcount_set(&vregion->ref_count, 1);
+
+ vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
+ if (IS_ERR(vregion->key_info)) {
+@@ -819,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
+
+ if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
++ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
++
+ mutex_lock(&tcam->lock);
+ list_del(&vregion->tlist);
+ mutex_unlock(&tcam->lock);
+- cancel_delayed_work_sync(&vregion->rehash.dw);
++ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
++ ctx->hints_priv)
++ ops->region_rehash_hints_put(ctx->hints_priv);
+ }
+ mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
+ if (vregion->region2)
+@@ -856,7 +877,7 @@ mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
+ */
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+- vregion->ref_count++;
++ refcount_inc(&vregion->ref_count);
+ return vregion;
+ }
+
+@@ -871,7 +892,7 @@ static void
+ mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
+ {
+- if (--vregion->ref_count)
++ if (!refcount_dec_and_test(&vregion->ref_count))
+ return;
+ mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
+ }
+@@ -924,7 +945,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
+ INIT_LIST_HEAD(&vchunk->ventry_list);
+ vchunk->priority = priority;
+ vchunk->vgroup = vgroup;
+- vchunk->ref_count = 1;
++ refcount_set(&vchunk->ref_count, 1);
+
+ vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
+ priority, elusage);
+@@ -1008,7 +1029,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
+ if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
+ elusage)))
+ return ERR_PTR(-EINVAL);
+- vchunk->ref_count++;
++ refcount_inc(&vchunk->ref_count);
+ return vchunk;
+ }
+ return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
+@@ -1019,7 +1040,7 @@ static void
+ mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk)
+ {
+- if (--vchunk->ref_count)
++ if (!refcount_dec_and_test(&vchunk->ref_count))
+ return;
+ mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
+ }
+@@ -1153,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
+ bool *activity)
+ {
+- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
+- ventry->entry, activity);
++ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
++ int err;
++
++ mutex_lock(&vregion->lock);
++ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
++ activity);
++ mutex_unlock(&vregion->lock);
++ return err;
+ }
+
+ static int
+@@ -1188,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
+ {
+ struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+
++ WARN_ON(vchunk->chunk2);
++
+ new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
+ if (IS_ERR(new_chunk))
+ return PTR_ERR(new_chunk);
+@@ -1206,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
+ {
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
+ vchunk->chunk2 = NULL;
+- ctx->current_vchunk = NULL;
++ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+ }
+
+ static int
+@@ -1229,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ return 0;
+ }
+
++ if (list_empty(&vchunk->ventry_list))
++ goto out;
++
+ /* If the migration got interrupted, we have the ventry to start from
+ * stored in context.
+ */
+@@ -1238,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ ventry = list_first_entry(&vchunk->ventry_list,
+ typeof(*ventry), list);
+
++ WARN_ON(ventry->vchunk != vchunk);
++
+ list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
+ /* During rollback, once we reach the ventry that failed
+ * to migrate, we are done.
+@@ -1278,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
+ }
+ }
+
++out:
+ mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
+ return 0;
+ }
+@@ -1291,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+ int err;
+
++ if (list_empty(&vregion->vchunk_list))
++ return 0;
++
+ /* If the migration got interrupted, we have the vchunk
+ * we are working on stored in context.
+ */
+@@ -1319,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
+ int err, err2;
+
+ trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
+- mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err) {
++ if (ctx->this_is_rollback)
++ return err;
+ /* In case migration was not successful, we need to swap
+ * so the original region pointer is assigned again
+ * to vregion->region.
+ */
+ swap(vregion->region, vregion->region2);
+- ctx->current_vchunk = NULL;
++ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+ ctx->this_is_rollback = true;
+ err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
+ ctx, credits);
+@@ -1339,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
+ /* Let the rollback to be continued later on. */
+ }
+ }
+- mutex_unlock(&vregion->lock);
+ trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
+ return err;
+ }
+@@ -1388,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
+
+ ctx->hints_priv = hints_priv;
+ ctx->this_is_rollback = false;
++ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
+
+ return 0;
+
+@@ -1440,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
+ err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
+ ctx, credits);
+ if (err) {
+- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
++ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
++ return;
+ }
+
+ if (*credits >= 0)
+@@ -1548,22 +1588,16 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ if (max_tcam_regions < max_regions)
+ max_regions = max_tcam_regions;
+
+- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
+- if (!tcam->used_regions) {
+- err = -ENOMEM;
+- goto err_alloc_used_regions;
+- }
++ ida_init(&tcam->used_regions);
+ tcam->max_regions = max_regions;
+
+ max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
+- if (!tcam->used_groups) {
+- err = -ENOMEM;
+- goto err_alloc_used_groups;
+- }
++ ida_init(&tcam->used_groups);
+ tcam->max_groups = max_groups;
+ tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_GROUP_SIZE);
++ tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
++ MLXSW_REG_PAGT_ACL_MAX_NUM);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+@@ -1572,10 +1606,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ return 0;
+
+ err_tcam_init:
+- bitmap_free(tcam->used_groups);
+-err_alloc_used_groups:
+- bitmap_free(tcam->used_regions);
+-err_alloc_used_regions:
++ ida_destroy(&tcam->used_groups);
++ ida_destroy(&tcam->used_regions);
+ mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
+ err_rehash_params_register:
+ mutex_destroy(&tcam->lock);
+@@ -1588,8 +1620,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->fini(mlxsw_sp, tcam->priv);
+- bitmap_free(tcam->used_groups);
+- bitmap_free(tcam->used_regions);
++ ida_destroy(&tcam->used_groups);
++ ida_destroy(&tcam->used_regions);
+ mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
+ mutex_destroy(&tcam->lock);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+index 462bf448497d33..010204f73ea46b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+@@ -6,15 +6,16 @@
+
+ #include <linux/list.h>
+ #include <linux/parman.h>
++#include <linux/idr.h>
+
+ #include "reg.h"
+ #include "spectrum.h"
+ #include "core_acl_flex_keys.h"
+
+ struct mlxsw_sp_acl_tcam {
+- unsigned long *used_regions; /* bit array */
++ struct ida used_regions;
+ unsigned int max_regions;
+- unsigned long *used_groups; /* bit array */
++ struct ida used_groups;
+ unsigned int max_groups;
+ unsigned int max_group_size;
+ struct mutex lock; /* guards vregion list */
+@@ -166,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region {
+ };
+
+ struct mlxsw_sp_acl_atcam_entry_ht_key {
+- char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded
+- * key.
+- */
++ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus
++ * delta bits.
++ */
+ u8 erp_id;
+ };
+
+@@ -180,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry {
+ struct rhash_head ht_node;
+ struct list_head list; /* Member in entries_list */
+ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+- * minus delta bits.
+- */
+ struct {
+ u16 start;
+ u8 mask;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+index c9f1c79f3f9d07..ba090262e27ef8 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+@@ -1607,8 +1607,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+ {
++ u16 local_port, local_port_1, first_local_port, last_local_port;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+- u16 local_port, local_port_1, last_local_port;
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count, current_page = 0;
+ unsigned long cb_priv = 0;
+@@ -1628,6 +1628,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
++ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
+@@ -1645,9 +1646,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ if (local_port != MLXSW_PORT_CPU_PORT) {
+ /* Ingress quotas are not supported for the CPU port */
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
+- local_port, 1);
++ local_port - first_local_port,
++ 1);
+ }
+- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
++ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
++ local_port - first_local_port,
++ 1);
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ &bulk_list);
+@@ -1684,7 +1688,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+ {
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+- u16 local_port, last_local_port;
++ u16 local_port, first_local_port, last_local_port;
+ LIST_HEAD(bulk_list);
+ unsigned int masked_count;
+ u8 current_page = 0;
+@@ -1702,6 +1706,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
++ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
+@@ -1719,9 +1724,12 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ if (local_port != MLXSW_PORT_CPU_PORT) {
+ /* Ingress quotas are not supported for the CPU port */
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
+- local_port, 1);
++ local_port - first_local_port,
++ 1);
+ }
+- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
++ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
++ local_port - first_local_port,
++ 1);
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ &bulk_list);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index debd2c466f11cb..d15aa6b25a8884 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -501,7 +501,7 @@ struct mlxsw_sp_rt6 {
+
+ struct mlxsw_sp_lpm_tree {
+ u8 id; /* tree ID */
+- unsigned int ref_count;
++ refcount_t ref_count;
+ enum mlxsw_sp_l3proto proto;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
+ struct mlxsw_sp_prefix_usage prefix_usage;
+@@ -578,7 +578,7 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
+
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
+- if (lpm_tree->ref_count == 0)
++ if (refcount_read(&lpm_tree->ref_count) == 0)
+ return lpm_tree;
+ }
+ return NULL;
+@@ -654,7 +654,7 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ sizeof(lpm_tree->prefix_usage));
+ memset(&lpm_tree->prefix_ref_count, 0,
+ sizeof(lpm_tree->prefix_ref_count));
+- lpm_tree->ref_count = 1;
++ refcount_set(&lpm_tree->ref_count, 1);
+ return lpm_tree;
+
+ err_left_struct_set:
+@@ -678,7 +678,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
+
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
+- if (lpm_tree->ref_count != 0 &&
++ if (refcount_read(&lpm_tree->ref_count) &&
+ lpm_tree->proto == proto &&
+ mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
+ prefix_usage)) {
+@@ -691,14 +691,15 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
+
+ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
+ {
+- lpm_tree->ref_count++;
++ refcount_inc(&lpm_tree->ref_count);
+ }
+
+ static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+ {
+- if (--lpm_tree->ref_count == 0)
+- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
++ if (!refcount_dec_and_test(&lpm_tree->ref_count))
++ return;
++ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ }
+
+ #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
+@@ -11458,6 +11459,13 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ if (err)
+ goto err_register_netevent_notifier;
+
++ mlxsw_sp->router->netdevice_nb.notifier_call =
++ mlxsw_sp_router_netdevice_event;
++ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
++ &mlxsw_sp->router->netdevice_nb);
++ if (err)
++ goto err_register_netdev_notifier;
++
+ mlxsw_sp->router->nexthop_nb.notifier_call =
+ mlxsw_sp_nexthop_obj_event;
+ err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+@@ -11473,22 +11481,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ if (err)
+ goto err_register_fib_notifier;
+
+- mlxsw_sp->router->netdevice_nb.notifier_call =
+- mlxsw_sp_router_netdevice_event;
+- err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+- &mlxsw_sp->router->netdevice_nb);
+- if (err)
+- goto err_register_netdev_notifier;
+-
+ return 0;
+
+-err_register_netdev_notifier:
+- unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+- &mlxsw_sp->router->fib_nb);
+ err_register_fib_notifier:
+ unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb);
+ err_register_nexthop_notifier:
++ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
++ &router->netdevice_nb);
++err_register_netdev_notifier:
+ unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ err_register_netevent_notifier:
+ unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
+@@ -11536,11 +11537,11 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+ {
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+
+- unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+- &router->netdevice_nb);
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
+ unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &router->nexthop_nb);
++ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
++ &router->netdevice_nb);
+ unregister_netevent_notifier(&router->netevent_nb);
+ unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
+ unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 6c749c148148d3..6397ff0dc951cd 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -61,7 +61,7 @@ struct mlxsw_sp_bridge_port {
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct list_head list;
+ struct list_head vlans_list;
+- unsigned int ref_count;
++ refcount_t ref_count;
+ u8 stp_state;
+ unsigned long flags;
+ bool mrouter;
+@@ -495,7 +495,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
+ BR_MCAST_FLOOD;
+ INIT_LIST_HEAD(&bridge_port->vlans_list);
+ list_add(&bridge_port->list, &bridge_device->ports_list);
+- bridge_port->ref_count = 1;
++ refcount_set(&bridge_port->ref_count, 1);
+
+ err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+@@ -531,7 +531,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
+
+ bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
+ if (bridge_port) {
+- bridge_port->ref_count++;
++ refcount_inc(&bridge_port->ref_count);
+ return bridge_port;
+ }
+
+@@ -558,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
+ {
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+- if (--bridge_port->ref_count != 0)
++ if (!refcount_dec_and_test(&bridge_port->ref_count))
+ return;
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_bridge_port_destroy(bridge_port);
+diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
+index fecd43754cead1..31f75b4a67fd79 100644
+--- a/drivers/net/ethernet/micrel/ks8851.h
++++ b/drivers/net/ethernet/micrel/ks8851.h
+@@ -350,6 +350,8 @@ union ks8851_tx_hdr {
+ * @rxd: Space for receiving SPI data, in DMA-able space.
+ * @txd: Space for transmitting SPI data, in DMA-able space.
+ * @msg_enable: The message flags controlling driver output (see ethtool).
++ * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR).
++ * @queued_len: Space required in hardware TX buffer for queued packets in txq.
+ * @fid: Incrementing frame id tag.
+ * @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
+@@ -366,7 +368,6 @@ union ks8851_tx_hdr {
+ * @rdfifo: FIFO read callback
+ * @wrfifo: FIFO write callback
+ * @start_xmit: start_xmit() implementation callback
+- * @rx_skb: rx_skb() implementation callback
+ * @flush_tx_work: flush_tx_work() implementation callback
+ *
+ * The @statelock is used to protect information in the structure which may
+@@ -399,6 +400,7 @@ struct ks8851_net {
+ struct work_struct rxctrl_work;
+
+ struct sk_buff_head txq;
++ unsigned int queued_len;
+
+ struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
+@@ -420,8 +422,6 @@ struct ks8851_net {
+ struct sk_buff *txp, bool irq);
+ netdev_tx_t (*start_xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+- void (*rx_skb)(struct ks8851_net *ks,
+- struct sk_buff *skb);
+ void (*flush_tx_work)(struct ks8851_net *ks);
+ };
+
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index cfbc900d4aeb9e..7fa1820db9cce6 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -231,25 +231,16 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
+ rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+ }
+
+-/**
+- * ks8851_rx_skb - receive skbuff
+- * @ks: The device state.
+- * @skb: The skbuff
+- */
+-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
+-{
+- ks->rx_skb(ks, skb);
+-}
+-
+ /**
+ * ks8851_rx_pkts - receive packets from the host
+ * @ks: The device information.
++ * @rxq: Queue of packets received in this function.
+ *
+ * This is called from the IRQ work queue when the system detects that there
+ * are packets in the receive queue. Find out how many packets there are and
+ * read them from the FIFO.
+ */
+-static void ks8851_rx_pkts(struct ks8851_net *ks)
++static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq)
+ {
+ struct sk_buff *skb;
+ unsigned rxfc;
+@@ -309,7 +300,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ ks8851_dbg_dumpkkt(ks, rxpkt);
+
+ skb->protocol = eth_type_trans(skb, ks->netdev);
+- ks8851_rx_skb(ks, skb);
++ __skb_queue_tail(rxq, skb);
+
+ ks->netdev->stats.rx_packets++;
+ ks->netdev->stats.rx_bytes += rxlen;
+@@ -336,61 +327,50 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ {
+ struct ks8851_net *ks = _ks;
+- unsigned handled = 0;
++ struct sk_buff_head rxq;
+ unsigned long flags;
+ unsigned int status;
++ struct sk_buff *skb;
+
+ ks8851_lock(ks, &flags);
+
+ status = ks8851_rdreg16(ks, KS_ISR);
++ ks8851_wrreg16(ks, KS_ISR, status);
+
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: status 0x%04x\n", __func__, status);
+
+- if (status & IRQ_LCI)
+- handled |= IRQ_LCI;
+-
+ if (status & IRQ_LDI) {
+ u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_WKEVT_MASK;
+ ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+-
+- handled |= IRQ_LDI;
+ }
+
+- if (status & IRQ_RXPSI)
+- handled |= IRQ_RXPSI;
+-
+ if (status & IRQ_TXI) {
+- handled |= IRQ_TXI;
+-
+- /* no lock here, tx queue should have been stopped */
+-
+- /* update our idea of how much tx space is available to the
+- * system */
+- ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
++ unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+
+ netif_dbg(ks, intr, ks->netdev,
+- "%s: txspace %d\n", __func__, ks->tx_space);
+- }
++ "%s: txspace %d\n", __func__, tx_space);
+
+- if (status & IRQ_RXI)
+- handled |= IRQ_RXI;
++ spin_lock_bh(&ks->statelock);
++ ks->tx_space = tx_space;
++ if (netif_queue_stopped(ks->netdev))
++ netif_wake_queue(ks->netdev);
++ spin_unlock_bh(&ks->statelock);
++ }
+
+ if (status & IRQ_SPIBEI) {
+ netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
+- handled |= IRQ_SPIBEI;
+ }
+
+- ks8851_wrreg16(ks, KS_ISR, handled);
+-
+ if (status & IRQ_RXI) {
+ /* the datasheet says to disable the rx interrupt during
+ * packet read-out, however we're masking the interrupt
+ * from the device so do not bother masking just the RX
+ * from the device. */
+
+- ks8851_rx_pkts(ks);
++ __skb_queue_head_init(&rxq);
++ ks8851_rx_pkts(ks, &rxq);
+ }
+
+ /* if something stopped the rx process, probably due to wanting
+@@ -414,8 +394,9 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ if (status & IRQ_LCI)
+ mii_check_link(&ks->mii);
+
+- if (status & IRQ_TXI)
+- netif_wake_queue(ks->netdev);
++ if (status & IRQ_RXI)
++ while ((skb = __skb_dequeue(&rxq)))
++ netif_rx(skb);
+
+ return IRQ_HANDLED;
+ }
+@@ -500,6 +481,8 @@ static int ks8851_net_open(struct net_device *dev)
+ ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
+ ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
+
++ ks->queued_len = 0;
++ ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+ netif_start_queue(ks->netdev);
+
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+@@ -653,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
+
+ /* schedule work to do the actual set of the data if needed */
+
+- spin_lock(&ks->statelock);
++ spin_lock_bh(&ks->statelock);
+
+ if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
+ memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
+ schedule_work(&ks->rxctrl_work);
+ }
+
+- spin_unlock(&ks->statelock);
++ spin_unlock_bh(&ks->statelock);
+ }
+
+ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
+@@ -1119,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+ int ret;
+
+ ks->netdev = netdev;
+- ks->tx_space = 6144;
+
+ ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(ks->gpio);
+diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
+index 7f49042484bdce..96fb0ffcedb902 100644
+--- a/drivers/net/ethernet/micrel/ks8851_par.c
++++ b/drivers/net/ethernet/micrel/ks8851_par.c
+@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
+ iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
+ }
+
+-/**
+- * ks8851_rx_skb_par - receive skbuff
+- * @ks: The device state.
+- * @skb: The skbuff
+- */
+-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
+-{
+- netif_rx(skb);
+-}
+-
+ static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
+ {
+ return ks8851_rdreg16_par(ks, KS_TXQCR);
+@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
+ ks->rdfifo = ks8851_rdfifo_par;
+ ks->wrfifo = ks8851_wrfifo_par;
+ ks->start_xmit = ks8851_start_xmit_par;
+- ks->rx_skb = ks8851_rx_skb_par;
+
+ #define STD_IRQ (IRQ_LCI | /* Link Change */ \
+ IRQ_RXI | /* RX done */ \
+diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
+index 70bc7253454f6b..e33a5e7beb39ec 100644
+--- a/drivers/net/ethernet/micrel/ks8851_spi.c
++++ b/drivers/net/ethernet/micrel/ks8851_spi.c
+@@ -287,13 +287,15 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
+ }
+
+ /**
+- * ks8851_rx_skb_spi - receive skbuff
+- * @ks: The device state
+- * @skb: The skbuff
++ * calc_txlen - calculate size of message to send packet
++ * @len: Length of data
++ *
++ * Returns the size of the TXFIFO message needed to send
++ * this packet.
+ */
+-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
++static unsigned int calc_txlen(unsigned int len)
+ {
+- netif_rx(skb);
++ return ALIGN(len + 4, 4);
+ }
+
+ /**
+@@ -305,7 +307,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
+ */
+ static void ks8851_tx_work(struct work_struct *work)
+ {
++ unsigned int dequeued_len = 0;
+ struct ks8851_net_spi *kss;
++ unsigned short tx_space;
+ struct ks8851_net *ks;
+ unsigned long flags;
+ struct sk_buff *txb;
+@@ -322,6 +326,8 @@ static void ks8851_tx_work(struct work_struct *work)
+ last = skb_queue_empty(&ks->txq);
+
+ if (txb) {
++ dequeued_len += calc_txlen(txb->len);
++
+ ks8851_wrreg16_spi(ks, KS_RXQCR,
+ ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrfifo_spi(ks, txb, last);
+@@ -332,6 +338,13 @@ static void ks8851_tx_work(struct work_struct *work)
+ }
+ }
+
++ tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
++
++ spin_lock_bh(&ks->statelock);
++ ks->queued_len -= dequeued_len;
++ ks->tx_space = tx_space;
++ spin_unlock_bh(&ks->statelock);
++
+ ks8851_unlock_spi(ks, &flags);
+ }
+
+@@ -346,18 +359,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks)
+ flush_work(&kss->tx_work);
+ }
+
+-/**
+- * calc_txlen - calculate size of message to send packet
+- * @len: Length of data
+- *
+- * Returns the size of the TXFIFO message needed to send
+- * this packet.
+- */
+-static unsigned int calc_txlen(unsigned int len)
+-{
+- return ALIGN(len + 4, 4);
+-}
+-
+ /**
+ * ks8851_start_xmit_spi - transmit packet using SPI
+ * @skb: The buffer to transmit
+@@ -386,16 +387,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb,
+
+ spin_lock(&ks->statelock);
+
+- if (needed > ks->tx_space) {
++ if (ks->queued_len + needed > ks->tx_space) {
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ } else {
+- ks->tx_space -= needed;
++ ks->queued_len += needed;
+ skb_queue_tail(&ks->txq, skb);
+ }
+
+ spin_unlock(&ks->statelock);
+- schedule_work(&kss->tx_work);
++ if (ret == NETDEV_TX_OK)
++ schedule_work(&kss->tx_work);
+
+ return ret;
+ }
+@@ -423,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
+ ks->rdfifo = ks8851_rdfifo_spi;
+ ks->wrfifo = ks8851_wrfifo_spi;
+ ks->start_xmit = ks8851_start_xmit_spi;
+- ks->rx_skb = ks8851_rx_skb_spi;
+ ks->flush_tx_work = ks8851_flush_tx_work_spi;
+
+ #define STD_IRQ (IRQ_LCI | /* Link Change */ \
+diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+index 2db5949b4c7e4e..72b3092d35f712 100644
+--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
++++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+@@ -1146,8 +1146,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
+ if (netdev->phydev)
+ phy_ethtool_get_wol(netdev->phydev, wol);
+
+- wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+- WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
++ if (wol->supported != adapter->phy_wol_supported)
++ netif_warn(adapter, drv, adapter->netdev,
++ "PHY changed its supported WOL! old=%x, new=%x\n",
++ adapter->phy_wol_supported, wol->supported);
++
++ wol->supported |= MAC_SUPPORTED_WAKES;
+
+ if (adapter->is_pci11x1x)
+ wol->supported |= WAKE_MAGICSECURE;
+@@ -1162,7 +1166,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ {
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
++ /* WAKE_MAGICSEGURE is a modifier of and only valid together with
++ * WAKE_MAGIC
++ */
++ if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
++ return -EINVAL;
++
++ if (netdev->phydev) {
++ struct ethtool_wolinfo phy_wol;
++ int ret;
++
++ phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
++
++ /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
++ * for PHYs that do not support WAKE_MAGICSECURE
++ */
++ if (wol->wolopts & WAKE_MAGICSECURE &&
++ !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
++ phy_wol.wolopts &= ~WAKE_MAGIC;
++
++ ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
++ if (ret && (ret != -EOPNOTSUPP))
++ return ret;
++
++ if (ret == -EOPNOTSUPP)
++ adapter->phy_wolopts = 0;
++ else
++ adapter->phy_wolopts = phy_wol.wolopts;
++ } else {
++ adapter->phy_wolopts = 0;
++ }
++
+ adapter->wolopts = 0;
++ wol->wolopts &= ~adapter->phy_wolopts;
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wolopts |= WAKE_UCAST;
+ if (wol->wolopts & WAKE_MCAST)
+@@ -1183,10 +1219,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
+ }
+
++ wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
+ device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+
+- return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
+- : -ENETDOWN;
++ return 0;
+ }
+ #endif /* CONFIG_PM */
+
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index c81cdeb4d4e7ee..92010bfe5e4133 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -25,6 +25,8 @@
+ #define PCS_POWER_STATE_DOWN 0x6
+ #define PCS_POWER_STATE_UP 0x4
+
++#define RFE_RD_FIFO_TH_3_DWORDS 0x3
++
+ static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
+ {
+ u32 chip_rev;
+@@ -3060,6 +3062,17 @@ static int lan743x_netdev_open(struct net_device *netdev)
+ if (ret)
+ goto close_tx;
+ }
++
++#ifdef CONFIG_PM
++ if (adapter->netdev->phydev) {
++ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
++
++ phy_ethtool_get_wol(netdev->phydev, &wol);
++ adapter->phy_wol_supported = wol.supported;
++ adapter->phy_wolopts = wol.wolopts;
++ }
++#endif
++
+ return 0;
+
+ close_tx:
+@@ -3223,6 +3236,21 @@ static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
+ lan743x_pci_cleanup(adapter);
+ }
+
++static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter)
++{
++ u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
++
++ if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) {
++ u32 misc_ctl;
++
++ misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0);
++ misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_;
++ misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_,
++ RFE_RD_FIFO_TH_3_DWORDS);
++ lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl);
++ }
++}
++
+ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ struct pci_dev *pdev)
+ {
+@@ -3238,6 +3266,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ pci11x1x_strap_get_status(adapter);
+ spin_lock_init(&adapter->eth_syslock_spinlock);
+ mutex_init(&adapter->sgmii_rw_lock);
++ pci11x1x_set_rfe_rd_fifo_threshold(adapter);
+ } else {
+ adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
+@@ -3501,7 +3530,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+
+ /* clear wake settings */
+ pmtctl = lan743x_csr_read(adapter, PMT_CTL);
+- pmtctl |= PMT_CTL_WUPS_MASK_;
++ pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
+ pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
+ PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
+ PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
+@@ -3513,10 +3542,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+
+ pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
+
+- if (adapter->wolopts & WAKE_PHY) {
+- pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
++ if (adapter->phy_wolopts)
+ pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
+- }
++
+ if (adapter->wolopts & WAKE_MAGIC) {
+ wucsr |= MAC_WUCSR_MPEN_;
+ macrx |= MAC_RX_RXEN_;
+@@ -3612,7 +3640,7 @@ static int lan743x_pm_suspend(struct device *dev)
+ lan743x_csr_write(adapter, MAC_WUCSR2, 0);
+ lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
+
+- if (adapter->wolopts)
++ if (adapter->wolopts || adapter->phy_wolopts)
+ lan743x_pm_set_wol(adapter);
+
+ if (adapter->is_pci11x1x) {
+@@ -3636,6 +3664,7 @@ static int lan743x_pm_resume(struct device *dev)
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
++ u32 data;
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+@@ -3654,6 +3683,30 @@ static int lan743x_pm_resume(struct device *dev)
+ return ret;
+ }
+
++ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
++ netif_dbg(adapter, drv, adapter->netdev,
++ "Wakeup source : 0x%08X\n", ret);
++
++ /* Clear the wol configuration and status bits. Note that
++ * the status bits are "Write One to Clear (W1C)"
++ */
++ data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
++ MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
++ MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
++ lan743x_csr_write(adapter, MAC_WUCSR, data);
++
++ data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
++ MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
++ lan743x_csr_write(adapter, MAC_WUCSR2, data);
++
++ data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
++ MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
++ MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
++ MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
++ MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
++ MAC_WK_SRC_WK_FR_SAVED_;
++ lan743x_csr_write(adapter, MAC_WK_SRC, data);
++
+ /* open netdev when netdev is at running state while resume.
+ * For instance, it is true when system wakesup after pm-suspend
+ * However, it is false when system wakes up after suspend GUI menu
+@@ -3662,9 +3715,6 @@ static int lan743x_pm_resume(struct device *dev)
+ lan743x_netdev_open(netdev);
+
+ netif_device_attach(netdev);
+- ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+- netif_info(adapter, drv, adapter->netdev,
+- "Wakeup source : 0x%08X\n", ret);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index 52609fc13ad950..3b2c6046eb3ad5 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -26,6 +26,7 @@
+ #define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
+ #define ID_REV_CHIP_REV_A0_ (0x00000000)
+ #define ID_REV_CHIP_REV_B0_ (0x00000010)
++#define ID_REV_CHIP_REV_PCI11X1X_B0_ (0x000000B0)
+
+ #define FPGA_REV (0x04)
+ #define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
+@@ -60,6 +61,7 @@
+ #define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
+ #define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
+ #define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
++#define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8)
+ #define PMT_CTL_READY_ BIT(7)
+ #define PMT_CTL_ETH_PHY_RST_ BIT(4)
+ #define PMT_CTL_WOL_EN_ BIT(3)
+@@ -226,12 +228,31 @@
+ #define MAC_WUCSR (0x140)
+ #define MAC_MP_SO_EN_ BIT(21)
+ #define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
++#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13)
++#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11)
++#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9)
++#define MAC_WUCSR_PFDA_FR_ BIT(7)
++#define MAC_WUCSR_WUFR_ BIT(6)
++#define MAC_WUCSR_MPR_ BIT(5)
++#define MAC_WUCSR_BCAST_FR_ BIT(4)
+ #define MAC_WUCSR_PFDA_EN_ BIT(3)
+ #define MAC_WUCSR_WAKE_EN_ BIT(2)
+ #define MAC_WUCSR_MPEN_ BIT(1)
+ #define MAC_WUCSR_BCST_EN_ BIT(0)
+
+ #define MAC_WK_SRC (0x144)
++#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17)
++#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16)
++#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15)
++#define MAC_WK_SRC_EEE_TX_WK_ BIT(14)
++#define MAC_WK_SRC_EEE_RX_WK_ BIT(13)
++#define MAC_WK_SRC_RFE_FR_WK_ BIT(12)
++#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11)
++#define MAC_WK_SRC_MP_FR_WK_ BIT(10)
++#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9)
++#define MAC_WK_SRC_WU_FR_WK_ BIT(8)
++#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7)
++
+ #define MAC_MP_SO_HI (0x148)
+ #define MAC_MP_SO_LO (0x14C)
+
+@@ -294,6 +315,10 @@
+ #define RFE_INDX(index) (0x580 + (index << 2))
+
+ #define MAC_WUCSR2 (0x600)
++#define MAC_WUCSR2_NS_RCD_ BIT(7)
++#define MAC_WUCSR2_ARP_RCD_ BIT(6)
++#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5)
++#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4)
+
+ #define SGMII_ACC (0x720)
+ #define SGMII_ACC_SGMII_BZY_ BIT(31)
+@@ -311,6 +336,9 @@
+ #define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
+ #define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+
++#define MISC_CTL_0 (0x920)
++#define MISC_CTL_0_RFE_READ_FIFO_MASK_ GENMASK(6, 4)
++
+ /* Vendor Specific SGMII MMD details */
+ #define SR_VSMMD_PCS_ID1 0x0004
+ #define SR_VSMMD_PCS_ID2 0x0005
+@@ -1006,6 +1034,8 @@ enum lan743x_sgmii_lsd {
+ LINK_2500_SLAVE
+ };
+
++#define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
++ WAKE_MAGIC | WAKE_ARP)
+ struct lan743x_adapter {
+ struct net_device *netdev;
+ struct mii_bus *mdiobus;
+@@ -1013,6 +1043,8 @@ struct lan743x_adapter {
+ #ifdef CONFIG_PM
+ u32 wolopts;
+ u8 sopass[SOPASS_MAX];
++ u32 phy_wolopts;
++ u32 phy_wol_supported;
+ #endif
+ struct pci_dev *pdev;
+ struct lan743x_csr csr;
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+index 41fa2523d91d3b..5f2cd9a8cf8fb3 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+@@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
+
+ /* Now, set PGIDs for each active LAG */
+ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
+- struct net_device *bond = lan966x->ports[lag]->bond;
++ struct lan966x_port *port = lan966x->ports[lag];
+ int num_active_ports = 0;
++ struct net_device *bond;
+ unsigned long bond_mask;
+ u8 aggr_idx[16];
+
+- if (!bond || (visited & BIT(lag)))
++ if (!port || !port->bond || (visited & BIT(lag)))
+ continue;
+
++ bond = port->bond;
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+
+ for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
+ struct lan966x_port *port = lan966x->ports[p];
+
++ if (!port)
++ continue;
++
+ lan_wr(ANA_PGID_PGID_SET(bond_mask),
+ lan966x, ANA_PGID(p));
+ if (port->lag_tx_active)
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 0d6e79af241067..c3f6c10bc23936 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -474,14 +474,14 @@ static int lan966x_port_hwtstamp_set(struct net_device *dev,
+ cfg->source != HWTSTAMP_SOURCE_PHYLIB)
+ return -EOPNOTSUPP;
+
++ if (cfg->source == HWTSTAMP_SOURCE_NETDEV && !port->lan966x->ptp)
++ return -EOPNOTSUPP;
++
+ err = lan966x_ptp_setup_traps(port, cfg);
+ if (err)
+ return err;
+
+ if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
+- if (!port->lan966x->ptp)
+- return -EOPNOTSUPP;
+-
+ err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
+ if (err) {
+ lan966x_ptp_del_traps(port);
+@@ -1088,8 +1088,6 @@ static int lan966x_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, lan966x);
+ lan966x->dev = &pdev->dev;
+
+- lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+-
+ if (!device_get_mac_address(&pdev->dev, mac_addr)) {
+ ether_addr_copy(lan966x->base_mac, mac_addr);
+ } else {
+@@ -1180,6 +1178,8 @@ static int lan966x_probe(struct platform_device *pdev)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "no ethernet-ports child found\n");
+
++ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
++
+ /* init switch */
+ lan966x_init(lan966x);
+ lan966x_stats_init(lan966x);
+@@ -1258,6 +1258,8 @@ static int lan966x_probe(struct platform_device *pdev)
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
++ debugfs_remove_recursive(lan966x->debugfs_root);
++
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+index 92108d354051c3..2e83bbb9477e06 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+@@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port)
+ lan966x_taprio_speed_set(port, config->speed);
+
+ /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
+- * port speed for QSGMII ports.
++ * port speed for QSGMII or SGMII ports.
+ */
+- if (phy_interface_num_ports(config->portmode) == 4)
++ if (phy_interface_num_ports(config->portmode) == 4 ||
++ config->portmode == PHY_INTERFACE_MODE_SGMII)
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+
+ lan_wr(config->duplex | mode,
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+index 4af285918ea2a4..75868b3f548ec4 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+@@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5,
+ list) {
+ if ((vid == 0 || mact_entry->vid == vid) &&
+ ether_addr_equal(addr, mact_entry->mac)) {
++ sparx5_mact_forget(sparx5, addr, mact_entry->vid);
++
+ list_del(&mact_entry->list);
+ devm_kfree(sparx5->dev, mact_entry);
+-
+- sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+ }
+ }
+ mutex_unlock(&sparx5->mact_lock);
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+index dc9af480bfea17..8f116982c08a26 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+@@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, sparx5);
+ sparx5->pdev = pdev;
+ sparx5->dev = &pdev->dev;
++ spin_lock_init(&sparx5->tx_lock);
+
+ /* Do switch core reset if available */
+ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+index 6f565c0c0c3dcd..316fed5f273552 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+@@ -280,6 +280,7 @@ struct sparx5 {
+ int xtr_irq;
+ /* Frame DMA */
+ int fdma_irq;
++ spinlock_t tx_lock; /* lock for frame transmission */
+ struct sparx5_rx rx;
+ struct sparx5_tx tx;
+ /* PTP */
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+index 6db6ac6a3bbc26..dcf2e342fc14ad 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+@@ -45,8 +45,12 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
+ fwd = (fwd >> 5);
+ info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+
++ /*
++ * Bit 270-271 are occasionally unexpectedly set by the hardware,
++ * clear bits before extracting timestamp
++ */
+ info->timestamp =
+- ((u64)xtr_hdr[2] << 24) |
++ ((u64)(xtr_hdr[2] & GENMASK(5, 0)) << 24) |
+ ((u64)xtr_hdr[3] << 16) |
+ ((u64)xtr_hdr[4] << 8) |
+ ((u64)xtr_hdr[5] << 0);
+@@ -244,10 +248,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ skb_tx_timestamp(skb);
++ spin_lock(&sparx5->tx_lock);
+ if (sparx5->fdma_irq > 0)
+ ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ else
+ ret = sparx5_inject(sparx5, ifh, skb, dev);
++ spin_unlock(&sparx5->tx_lock);
+
+ if (ret == -EBUSY)
+ goto busy;
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+index 3a1b1a1f5a1951..60dd2fd603a855 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+@@ -731,7 +731,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
+ bool sgmii = false, inband_aneg = false;
+ int err;
+
+- if (port->conf.inband) {
++ if (conf->inband) {
+ if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
+ conf->portmode == PHY_INTERFACE_MODE_QSGMII)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+@@ -948,7 +948,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
+ if (err)
+ return -EINVAL;
+
+- if (port->conf.inband) {
++ if (conf->inband) {
+ /* Enable/disable 1G counters in ASM */
+ spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+ ASM_PORT_CFG_CSC_STAT_DIS,
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+index 523e0c470894f7..55f255a3c9db69 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+@@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
+ u16 l3_proto; /* protocol specified in the template */
+ };
+
++/* SparX-5 VCAP fragment types:
++ * 0 = no fragment, 1 = initial fragment,
++ * 2 = suspicious fragment, 3 = valid follow-up fragment
++ */
++enum { /* key / mask */
++ FRAG_NOT = 0x03, /* 0 / 3 */
++ FRAG_SOME = 0x11, /* 1 / 1 */
++ FRAG_FIRST = 0x13, /* 1 / 3 */
++ FRAG_LATER = 0x33, /* 3 / 3 */
++ FRAG_INVAL = 0xff, /* invalid */
++};
++
++/* Flower fragment flag to VCAP fragment type mapping */
++static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
++ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
++ { FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
++ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
++ { FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
++ /* 0/0 0/1 1/0 1/1 <-- first_frag */
++};
++
+ static int
+ sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
+ {
+@@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
+ flow_rule_match_control(st->frule, &mt);
+
+ if (mt.mask->flags) {
+- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
+- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
+- value = 1; /* initial fragment */
+- mask = 0x3;
+- } else {
+- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+- value = 3; /* follow up fragment */
+- mask = 0x3;
+- } else {
+- value = 0; /* no fragment */
+- mask = 0x3;
+- }
+- }
+- } else {
+- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+- value = 3; /* follow up fragment */
+- mask = 0x3;
+- } else {
+- value = 0; /* no fragment */
+- mask = 0x3;
+- }
++ u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
++ u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
++ u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
++
++ u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
++ u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
++ u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
++
++ /* Lookup verdict based on the 2 + 2 input bits */
++ u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
++
++ if (vdt == FRAG_INVAL) {
++ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
++ "Match on invalid fragment flag combination");
++ return -EINVAL;
+ }
+
++ /* Extract VCAP fragment key and mask from verdict */
++ value = (vdt >> 4) & 0x3;
++ mask = vdt & 0x3;
++
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ value, mask);
+diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+index fe4e166de8a045..66ef14d95bf6f7 100644
+--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
++++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+@@ -1442,18 +1442,10 @@ static void vcap_api_encode_rule_test(struct kunit *test)
+ vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
+ rule->cookie, false);
+
+- vcap_free_rule(rule);
+-
+- /* Check that the rule has been freed: tricky to access since this
+- * memory should not be accessible anymore
+- */
+- KUNIT_EXPECT_PTR_NE(test, NULL, rule);
+- ret = list_empty(&rule->keyfields);
+- KUNIT_EXPECT_EQ(test, true, ret);
+- ret = list_empty(&rule->actionfields);
+- KUNIT_EXPECT_EQ(test, true, ret);
++ ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
++ KUNIT_EXPECT_EQ(test, 0, ret);
+
+- vcap_del_rule(&test_vctrl, &test_netdev, id);
++ vcap_free_rule(rule);
+ }
+
+ static void vcap_api_set_rule_counter_test(struct kunit *test)
+diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
+index 090e6b9832431c..901fbffbf718ee 100644
+--- a/drivers/net/ethernet/microsoft/Kconfig
++++ b/drivers/net/ethernet/microsoft/Kconfig
+@@ -17,9 +17,11 @@ if NET_VENDOR_MICROSOFT
+
+ config MICROSOFT_MANA
+ tristate "Microsoft Azure Network Adapter (MANA) support"
+- depends on PCI_MSI && X86_64
++ depends on PCI_MSI
++ depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN)
+ depends on PCI_HYPERV
+ select AUXILIARY_BUS
++ select PAGE_POOL
+ help
+ This driver supports Microsoft Azure Network Adapter (MANA).
+ So far, the driver is only supported on X86_64.
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index 6367de0c2c2e8f..ae014e21eb6056 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -179,7 +179,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
+ dma_addr_t dma_handle;
+ void *buf;
+
+- if (length < PAGE_SIZE || !is_power_of_2(length))
++ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
+ return -EINVAL;
+
+ gmi->dev = gc->dev;
+@@ -720,7 +720,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
+ static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ struct gdma_mem_info *gmi)
+ {
+- unsigned int num_page = gmi->length / PAGE_SIZE;
++ unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
+ struct gdma_create_dma_region_req *req = NULL;
+ struct gdma_create_dma_region_resp resp = {};
+ struct gdma_context *gc = gd->gdma_context;
+@@ -730,10 +730,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ int err;
+ int i;
+
+- if (length < PAGE_SIZE || !is_power_of_2(length))
++ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
+ return -EINVAL;
+
+- if (offset_in_page(gmi->virt_addr) != 0)
++ if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
+ return -EINVAL;
+
+ hwc = gc->hwc.driver_data;
+@@ -754,7 +754,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
+ req->page_addr_list_len = num_page;
+
+ for (i = 0; i < num_page; i++)
+- req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
++ req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;
+
+ err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
+ if (err)
+diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+index 9d1cd3bfcf6620..9d6426d4158e31 100644
+--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
++++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+@@ -51,9 +51,33 @@ static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
+ return 0;
+ }
+
++static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
++ struct hwc_work_request *req)
++{
++ struct device *dev = hwc_rxq->hwc->dev;
++ struct gdma_sge *sge;
++ int err;
++
++ sge = &req->sge;
++ sge->address = (u64)req->buf_sge_addr;
++ sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
++ sge->size = req->buf_len;
++
++ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
++ req->wqe_req.sgl = sge;
++ req->wqe_req.num_sge = 1;
++ req->wqe_req.client_data_unit = 0;
++
++ err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
++ if (err)
++ dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
++ return err;
++}
++
+ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+- const struct gdma_resp_hdr *resp_msg)
++ struct hwc_work_request *rx_req)
+ {
++ const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
+ struct hwc_caller_ctx *ctx;
+ int err;
+
+@@ -61,6 +85,7 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+ hwc->inflight_msg_res.map)) {
+ dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
+ resp_msg->response.hwc_msg_id);
++ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+ return;
+ }
+
+@@ -74,30 +99,13 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+ memcpy(ctx->output_buf, resp_msg, resp_len);
+ out:
+ ctx->error = err;
+- complete(&ctx->comp_event);
+-}
+-
+-static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
+- struct hwc_work_request *req)
+-{
+- struct device *dev = hwc_rxq->hwc->dev;
+- struct gdma_sge *sge;
+- int err;
+-
+- sge = &req->sge;
+- sge->address = (u64)req->buf_sge_addr;
+- sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
+- sge->size = req->buf_len;
+
+- memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+- req->wqe_req.sgl = sge;
+- req->wqe_req.num_sge = 1;
+- req->wqe_req.client_data_unit = 0;
++ /* Must post rx wqe before complete(), otherwise the next rx may
++ * hit no_wqe error.
++ */
++ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+
+- err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
+- if (err)
+- dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
+- return err;
++ complete(&ctx->comp_event);
+ }
+
+ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
+@@ -234,14 +242,12 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
+ return;
+ }
+
+- mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
++ mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
+
+- /* Do no longer use 'resp', because the buffer is posted to the HW
+- * in the below mana_hwc_post_rx_wqe().
++ /* Can no longer use 'resp', because the buffer is posted to the HW
++ * in mana_hwc_handle_resp() above.
+ */
+ resp = NULL;
+-
+- mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
+ }
+
+ static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
+@@ -360,12 +366,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
+ int err;
+
+ eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
+- if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+- eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
++ if (eq_size < MANA_MIN_QSIZE)
++ eq_size = MANA_MIN_QSIZE;
+
+ cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
+- if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+- cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
++ if (cq_size < MANA_MIN_QSIZE)
++ cq_size = MANA_MIN_QSIZE;
+
+ hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
+ if (!hwc_cq)
+@@ -427,7 +433,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
+
+ dma_buf->num_reqs = q_depth;
+
+- buf_size = PAGE_ALIGN(q_depth * max_msg_size);
++ buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
+
+ gmi = &dma_buf->mem_info;
+ err = mana_gd_alloc_memory(gc, buf_size, gmi);
+@@ -495,8 +501,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
+ else
+ queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
+
+- if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
+- queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
++ if (queue_size < MANA_MIN_QSIZE)
++ queue_size = MANA_MIN_QSIZE;
+
+ hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
+ if (!hwc_wq)
+@@ -847,7 +853,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
+ }
+
+ if (!wait_for_completion_timeout(&ctx->comp_event,
+- (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
++ (msecs_to_jiffies(hwc->hwc_timeout)))) {
+ dev_err(hwc->dev, "HWC: Request timed out!\n");
+ err = -ETIMEDOUT;
+ goto out;
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index 48ea4aeeea5d4c..89852bbc877c11 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -599,9 +599,13 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
+ else
+ *headroom = XDP_PACKET_HEADROOM;
+
+- *alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
++ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+
+- *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
++ /* Using page pool in this case, so alloc_size is PAGE_SIZE */
++ if (*alloc_size < PAGE_SIZE)
++ *alloc_size = PAGE_SIZE;
++
++ *datasize = mtu + ETH_HLEN;
+ }
+
+ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
+@@ -1774,7 +1778,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
+ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
+ {
+ struct mana_cq *cq = context;
+- u8 arm_bit;
+ int w;
+
+ WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
+@@ -1785,16 +1788,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
+ mana_poll_tx_cq(cq);
+
+ w = cq->work_done;
+-
+- if (w < cq->budget &&
+- napi_complete_done(&cq->napi, w)) {
+- arm_bit = SET_ARM_BIT;
+- } else {
+- arm_bit = 0;
++ cq->work_done_since_doorbell += w;
++
++ if (w < cq->budget) {
++ mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
++ cq->work_done_since_doorbell = 0;
++ napi_complete_done(&cq->napi, w);
++ } else if (cq->work_done_since_doorbell >
++ cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
++ /* MANA hardware requires at least one doorbell ring every 8
++ * wraparounds of CQ even if there is no need to arm the CQ.
++ * This driver rings the doorbell as soon as we have exceeded
++ * 4 wraparounds.
++ */
++ mana_gd_ring_cq(gdma_queue, 0);
++ cq->work_done_since_doorbell = 0;
+ }
+
+- mana_gd_ring_cq(gdma_queue, arm_bit);
+-
+ return w;
+ }
+
+@@ -1848,10 +1858,12 @@ static void mana_destroy_txq(struct mana_port_context *apc)
+
+ for (i = 0; i < apc->num_queues; i++) {
+ napi = &apc->tx_qp[i].tx_cq.napi;
+- napi_synchronize(napi);
+- napi_disable(napi);
+- netif_napi_del(napi);
+-
++ if (apc->tx_qp[i].txq.napi_initialized) {
++ napi_synchronize(napi);
++ napi_disable(napi);
++ netif_napi_del(napi);
++ apc->tx_qp[i].txq.napi_initialized = false;
++ }
+ mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
+
+ mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
+@@ -1890,10 +1902,10 @@ static int mana_create_txq(struct mana_port_context *apc,
+ * to prevent overflow.
+ */
+ txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
+- BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
++ BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
+
+ cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
+- cq_size = PAGE_ALIGN(cq_size);
++ cq_size = MANA_PAGE_ALIGN(cq_size);
+
+ gc = gd->gdma_context;
+
+@@ -1907,6 +1919,7 @@ static int mana_create_txq(struct mana_port_context *apc,
+ txq->ndev = net;
+ txq->net_txq = netdev_get_tx_queue(net, i);
+ txq->vp_offset = apc->tx_vp_offset;
++ txq->napi_initialized = false;
+ skb_queue_head_init(&txq->pending_skbs);
+
+ memset(&spec, 0, sizeof(spec));
+@@ -1973,6 +1986,7 @@ static int mana_create_txq(struct mana_port_context *apc,
+
+ netif_napi_add_tx(net, &cq->napi, mana_poll);
+ napi_enable(&cq->napi);
++ txq->napi_initialized = true;
+
+ mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
+ }
+@@ -1984,7 +1998,7 @@ static int mana_create_txq(struct mana_port_context *apc,
+ }
+
+ static void mana_destroy_rxq(struct mana_port_context *apc,
+- struct mana_rxq *rxq, bool validate_state)
++ struct mana_rxq *rxq, bool napi_initialized)
+
+ {
+ struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
+@@ -1999,15 +2013,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
+
+ napi = &rxq->rx_cq.napi;
+
+- if (validate_state)
++ if (napi_initialized) {
+ napi_synchronize(napi);
+
+- napi_disable(napi);
++ napi_disable(napi);
+
++ netif_napi_del(napi);
++ }
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+- netif_napi_del(napi);
+-
+ mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
+
+ mana_deinit_cq(apc, &rxq->rx_cq);
+@@ -2189,8 +2203,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
+ if (err)
+ goto out;
+
+- rq_size = PAGE_ALIGN(rq_size);
+- cq_size = PAGE_ALIGN(cq_size);
++ rq_size = MANA_PAGE_ALIGN(rq_size);
++ cq_size = MANA_PAGE_ALIGN(cq_size);
+
+ /* Create RQ */
+ memset(&spec, 0, sizeof(spec));
+@@ -2752,6 +2766,8 @@ static int add_adev(struct gdma_dev *gd)
+ if (ret)
+ goto init_fail;
+
++ /* madev is owned by the auxiliary device */
++ madev = NULL;
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ goto add_fail;
+diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
+index 5553af9c8085a1..0f1679ebad96ba 100644
+--- a/drivers/net/ethernet/microsoft/mana/shm_channel.c
++++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
+@@ -6,6 +6,7 @@
+ #include <linux/io.h>
+ #include <linux/mm.h>
+
++#include <net/mana/gdma.h>
+ #include <net/mana/shm_channel.h>
+
+ #define PAGE_FRAME_L48_WIDTH_BYTES 6
+@@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+ return err;
+ }
+
+- if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
+- !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
++ if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
++ !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
+ return -EINVAL;
+
+ if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
+@@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+
+ /* EQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+- frame_addr = PHYS_PFN(eq_addr);
++ frame_addr = MANA_PFN(eq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+@@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+
+ /* CQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+- frame_addr = PHYS_PFN(cq_addr);
++ frame_addr = MANA_PFN(cq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+@@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+
+ /* RQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+- frame_addr = PHYS_PFN(rq_addr);
++ frame_addr = MANA_PFN(rq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+@@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
+
+ /* SQ addr: low 48 bits of frame address */
+ shmem = (u64 *)ptr;
+- frame_addr = PHYS_PFN(sq_addr);
++ frame_addr = MANA_PFN(sq_addr);
+ *shmem = frame_addr & PAGE_FRAME_L48_MASK;
+ all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
+ (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 56ccbd4c37fe6d..c2118bde908b11 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1099,6 +1099,48 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+ }
+ EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
+
++void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp)
++ __acquires(&ocelot->inj_lock)
++{
++ spin_lock(&ocelot->inj_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_lock_inj_grp);
++
++void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp)
++ __releases(&ocelot->inj_lock)
++{
++ spin_unlock(&ocelot->inj_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_unlock_inj_grp);
++
++void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp)
++ __acquires(&ocelot->inj_lock)
++{
++ spin_lock(&ocelot->inj_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp);
++
++void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp)
++ __releases(&ocelot->inj_lock)
++{
++ spin_unlock(&ocelot->inj_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp);
++
++void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp)
++ __acquires(&ocelot->xtr_lock)
++{
++ spin_lock_bh(&ocelot->xtr_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp_bh);
++
++void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp)
++ __releases(&ocelot->xtr_lock)
++{
++ spin_unlock_bh(&ocelot->xtr_lock);
++}
++EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp_bh);
++
+ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+ {
+ u64 timestamp, src_port, len;
+@@ -1109,6 +1151,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+ u32 val, *buf;
+ int err;
+
++ lockdep_assert_held(&ocelot->xtr_lock);
++
+ err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
+ if (err)
+ return err;
+@@ -1184,6 +1228,8 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
+ {
+ u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
+
++ lockdep_assert_held(&ocelot->inj_lock);
++
+ if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
+ return false;
+ if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
+@@ -1193,28 +1239,55 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
+ }
+ EXPORT_SYMBOL(ocelot_can_inject);
+
+-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
++/**
++ * ocelot_ifh_set_basic - Set basic information in Injection Frame Header
++ * @ifh: Pointer to Injection Frame Header memory
++ * @ocelot: Switch private data structure
++ * @port: Egress port number
++ * @rew_op: Egress rewriter operation for PTP
++ * @skb: Pointer to socket buffer (packet)
++ *
++ * Populate the Injection Frame Header with basic information for this skb: the
++ * analyzer bypass bit, destination port, VLAN info, egress rewriter info.
++ */
++void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
++ u32 rew_op, struct sk_buff *skb)
+ {
++ struct ocelot_port *ocelot_port = ocelot->ports[port];
++ struct net_device *dev = skb->dev;
++ u64 vlan_tci, tag_type;
++ int qos_class;
++
++ ocelot_xmit_get_vlan_info(skb, ocelot_port->bridge, &vlan_tci,
++ &tag_type);
++
++ qos_class = netdev_get_num_tc(dev) ?
++ netdev_get_prio_tc_map(dev, skb->priority) : skb->priority;
++
++ memset(ifh, 0, OCELOT_TAG_LEN);
+ ocelot_ifh_set_bypass(ifh, 1);
++ ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
+ ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+- ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
+- if (vlan_tag)
+- ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
++ ocelot_ifh_set_qos_class(ifh, qos_class);
++ ocelot_ifh_set_tag_type(ifh, tag_type);
++ ocelot_ifh_set_vlan_tci(ifh, vlan_tci);
+ if (rew_op)
+ ocelot_ifh_set_rew_op(ifh, rew_op);
+ }
+-EXPORT_SYMBOL(ocelot_ifh_port_set);
++EXPORT_SYMBOL(ocelot_ifh_set_basic);
+
+ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
+ u32 rew_op, struct sk_buff *skb)
+ {
+- u32 ifh[OCELOT_TAG_LEN / 4] = {0};
++ u32 ifh[OCELOT_TAG_LEN / 4];
+ unsigned int i, count, last;
+
++ lockdep_assert_held(&ocelot->inj_lock);
++
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
+
+- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
++ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
+
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
+ ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+@@ -1247,6 +1320,8 @@ EXPORT_SYMBOL(ocelot_port_inject_frame);
+
+ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
+ {
++ lockdep_assert_held(&ocelot->xtr_lock);
++
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
+ ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ }
+@@ -2929,6 +3004,8 @@ int ocelot_init(struct ocelot *ocelot)
+ mutex_init(&ocelot->fwd_domain_lock);
+ spin_lock_init(&ocelot->ptp_clock_lock);
+ spin_lock_init(&ocelot->ts_id_lock);
++ spin_lock_init(&ocelot->inj_lock);
++ spin_lock_init(&ocelot->xtr_lock);
+
+ ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
+ if (!ocelot->owq)
+diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
+index 312a468321544d..00326ae8c708b0 100644
+--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
++++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
+@@ -665,8 +665,7 @@ static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
+
+ ifh = skb_push(skb, OCELOT_TAG_LEN);
+ skb_put(skb, ETH_FCS_LEN);
+- memset(ifh, 0, OCELOT_TAG_LEN);
+- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
++ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
+index 5c55197c7327df..c018783757fb2f 100644
+--- a/drivers/net/ethernet/mscc/ocelot_stats.c
++++ b/drivers/net/ethernet/mscc/ocelot_stats.c
+@@ -582,10 +582,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri
+ rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
+ rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
+ rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
+- rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
+- rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
+- rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
+- rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
++ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511];
++ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023];
++ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526];
++ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX];
+ }
+
+ static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
+@@ -610,10 +610,10 @@ static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
+ rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64];
+ rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127];
+ rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255];
+- rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255];
+- rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511];
+- rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023];
+- rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526];
++ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_256_511];
++ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_512_1023];
++ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_1024_1526];
++ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1527_MAX];
+ }
+
+ void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+index 151b4246534839..bc20bd1ef05c77 100644
+--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
++++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+@@ -51,6 +51,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+ struct ocelot *ocelot = arg;
+ int grp = 0, err;
+
++ ocelot_lock_xtr_grp(ocelot, grp);
++
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
+ struct sk_buff *skb;
+
+@@ -69,6 +71,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+ if (err < 0)
+ ocelot_drain_cpu_queue(ocelot, 0);
+
++ ocelot_unlock_xtr_grp(ocelot, grp);
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+index 2967bab7250561..15180538b80a15 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+@@ -1424,10 +1424,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_
+ mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+ return;
+
++ /* Both struct tcphdr and struct udphdr start with
++ * __be16 source;
++ * __be16 dest;
++ * so we can use the same code for both.
++ */
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+- mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
+- mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
++ if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) {
++ mangle_action->mangle.val =
++ (__force u32)cpu_to_be32(mangle_action->mangle.val << 16);
++ /* The mask of mangle action is inverse mask,
++ * so clear the dest tp port with 0xFFFF to
++ * instead of rotate-left operation.
++ */
++ mangle_action->mangle.mask =
++ (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF);
++ }
++ if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) {
++ mangle_action->mangle.offset = 0;
++ mangle_action->mangle.val =
++ (__force u32)cpu_to_be32(mangle_action->mangle.val);
++ mangle_action->mangle.mask =
++ (__force u32)cpu_to_be32(mangle_action->mangle.mask);
++ }
+ return;
+
+ default:
+@@ -1864,10 +1884,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ {
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct nfp_fl_ct_flow_entry *ct_entry;
++ struct flow_action_entry *ct_goto;
+ struct nfp_fl_ct_zone_entry *zt;
++ struct flow_action_entry *act;
+ bool wildcarded = false;
+ struct flow_match_ct ct;
+- struct flow_action_entry *ct_goto;
++ int i;
++
++ flow_action_for_each(i, act, &rule->action) {
++ switch (act->id) {
++ case FLOW_ACTION_REDIRECT:
++ case FLOW_ACTION_REDIRECT_INGRESS:
++ case FLOW_ACTION_MIRRED:
++ case FLOW_ACTION_MIRRED_INGRESS:
++ if (act->dev->rtnl_link_ops &&
++ !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) {
++ NL_SET_ERR_MSG_MOD(extack,
++ "unsupported offload: out port is openvswitch internal port");
++ return -EOPNOTSUPP;
++ }
++ break;
++ default:
++ break;
++ }
++ }
+
+ flow_rule_match_ct(rule, &ct);
+ if (!ct.mask->ct_zone) {
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+index 88d6d992e7d07d..86db8e81414077 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+@@ -338,6 +338,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
+
+ acti_netdevs = kmalloc_array(entry->slave_cnt,
+ sizeof(*acti_netdevs), GFP_KERNEL);
++ if (!acti_netdevs) {
++ schedule_delayed_work(&lag->work,
++ NFP_FL_LAG_DELAY);
++ continue;
++ }
+
+ /* Include sanity check in the loop. It may be that a bond has
+ * changed between processing the last notification and the
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+index 060a77f2265d9a..0d7d138d6e0d7e 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+@@ -160,6 +160,18 @@ struct nfp_tun_mac_addr_offload {
+ u8 addr[ETH_ALEN];
+ };
+
++/**
++ * struct nfp_neigh_update_work - update neighbour information to nfp
++ * @work: Work queue for writing neigh to the nfp
++ * @n: neighbour entry
++ * @app: Back pointer to app
++ */
++struct nfp_neigh_update_work {
++ struct work_struct work;
++ struct neighbour *n;
++ struct nfp_app *app;
++};
++
+ enum nfp_flower_mac_offload_cmd {
+ NFP_TUNNEL_MAC_OFFLOAD_ADD = 0,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL = 1,
+@@ -607,38 +619,30 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
+ nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n");
+ }
+
+-static int
+-nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+- void *ptr)
++static void
++nfp_tun_release_neigh_update_work(struct nfp_neigh_update_work *update_work)
+ {
+- struct nfp_flower_priv *app_priv;
+- struct netevent_redirect *redir;
+- struct neighbour *n;
++ neigh_release(update_work->n);
++ kfree(update_work);
++}
++
++static void nfp_tun_neigh_update(struct work_struct *work)
++{
++ struct nfp_neigh_update_work *update_work;
+ struct nfp_app *app;
++ struct neighbour *n;
+ bool neigh_invalid;
+ int err;
+
+- switch (event) {
+- case NETEVENT_REDIRECT:
+- redir = (struct netevent_redirect *)ptr;
+- n = redir->neigh;
+- break;
+- case NETEVENT_NEIGH_UPDATE:
+- n = (struct neighbour *)ptr;
+- break;
+- default:
+- return NOTIFY_DONE;
+- }
+-
+- neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
+-
+- app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
+- app = app_priv->app;
++ update_work = container_of(work, struct nfp_neigh_update_work, work);
++ app = update_work->app;
++ n = update_work->n;
+
+ if (!nfp_flower_get_port_id_from_netdev(app, n->dev))
+- return NOTIFY_DONE;
++ goto out;
+
+ #if IS_ENABLED(CONFIG_INET)
++ neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
+ if (n->tbl->family == AF_INET6) {
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct flowi6 flow6 = {};
+@@ -655,13 +659,11 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+ dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL,
+ &flow6, NULL);
+ if (IS_ERR(dst))
+- return NOTIFY_DONE;
++ goto out;
+
+ dst_release(dst);
+ }
+ nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
+-#else
+- return NOTIFY_DONE;
+ #endif /* CONFIG_IPV6 */
+ } else {
+ struct flowi4 flow4 = {};
+@@ -678,17 +680,71 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+ rt = ip_route_output_key(dev_net(n->dev), &flow4);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+- return NOTIFY_DONE;
++ goto out;
+
+ ip_rt_put(rt);
+ }
+ nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
+ }
+-#else
+- return NOTIFY_DONE;
+ #endif /* CONFIG_INET */
++out:
++ nfp_tun_release_neigh_update_work(update_work);
++}
+
+- return NOTIFY_OK;
++static struct nfp_neigh_update_work *
++nfp_tun_alloc_neigh_update_work(struct nfp_app *app, struct neighbour *n)
++{
++ struct nfp_neigh_update_work *update_work;
++
++ update_work = kzalloc(sizeof(*update_work), GFP_ATOMIC);
++ if (!update_work)
++ return NULL;
++
++ INIT_WORK(&update_work->work, nfp_tun_neigh_update);
++ neigh_hold(n);
++ update_work->n = n;
++ update_work->app = app;
++
++ return update_work;
++}
++
++static int
++nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
++ void *ptr)
++{
++ struct nfp_neigh_update_work *update_work;
++ struct nfp_flower_priv *app_priv;
++ struct netevent_redirect *redir;
++ struct neighbour *n;
++ struct nfp_app *app;
++
++ switch (event) {
++ case NETEVENT_REDIRECT:
++ redir = (struct netevent_redirect *)ptr;
++ n = redir->neigh;
++ break;
++ case NETEVENT_NEIGH_UPDATE:
++ n = (struct neighbour *)ptr;
++ break;
++ default:
++ return NOTIFY_DONE;
++ }
++#if IS_ENABLED(CONFIG_IPV6)
++ if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
++#else
++ if (n->tbl != &arp_tbl)
++#endif
++ return NOTIFY_DONE;
++
++ app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
++ app = app_priv->app;
++ update_work = nfp_tun_alloc_neigh_update_work(app, n);
++ if (!update_work)
++ return NOTIFY_DONE;
++
++ queue_work(system_highpri_wq, &update_work->work);
++
++ return NOTIFY_DONE;
+ }
+
+ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
+@@ -706,6 +762,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
+ if (!netdev)
+ goto fail_rcu_unlock;
++ dev_hold(netdev);
+
+ flow.daddr = payload->ipv4_addr;
+ flow.flowi4_proto = IPPROTO_UDP;
+@@ -725,13 +782,16 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
+ ip_rt_put(rt);
+ if (!n)
+ goto fail_rcu_unlock;
++ rcu_read_unlock();
++
+ nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
+ neigh_release(n);
+- rcu_read_unlock();
++ dev_put(netdev);
+ return;
+
+ fail_rcu_unlock:
+ rcu_read_unlock();
++ dev_put(netdev);
+ nfp_flower_cmsg_warn(app, "Requested route not found.\n");
+ }
+
+@@ -749,6 +809,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
+ if (!netdev)
+ goto fail_rcu_unlock;
++ dev_hold(netdev);
+
+ flow.daddr = payload->ipv6_addr;
+ flow.flowi6_proto = IPPROTO_UDP;
+@@ -766,14 +827,16 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
+ dst_release(dst);
+ if (!n)
+ goto fail_rcu_unlock;
++ rcu_read_unlock();
+
+ nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
+ neigh_release(n);
+- rcu_read_unlock();
++ dev_put(netdev);
+ return;
+
+ fail_rcu_unlock:
+ rcu_read_unlock();
++ dev_put(netdev);
+ nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
+ }
+
+@@ -1021,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
+ u16 nfp_mac_idx = 0;
+
+ entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
+- if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
++ if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
+ if (entry->bridge_count ||
+ !nfp_flower_is_supported_bridge(netdev)) {
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+index de0a5d5ded305b..fceb4abea2365d 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+@@ -821,14 +821,13 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+
+ snprintf(r_vec->name, sizeof(r_vec->name),
+ "%s-rxtx-%d", nfp_net_name(nn), idx);
+- err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
+- r_vec);
++ err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
++ r_vec->name, r_vec);
+ if (err) {
+ nfp_net_napi_del(&nn->dp, r_vec);
+ nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
+ return err;
+ }
+- disable_irq(r_vec->irq_vector);
+
+ irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
+
+@@ -2588,6 +2587,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
+ case NFP_NFD_VER_NFD3:
+ netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+ netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
++ netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ break;
+ case NFP_NFD_VER_NFDK:
+ netdev->netdev_ops = &nfp_nfdk_netdev_ops;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+index 33b4c28563162e..3f10c5365c80eb 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+@@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
+ const u32 barcfg_msix_general =
+ NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
+- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
++ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
++ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
+ const u32 barcfg_msix_xpb =
+ NFP_PCIE_BAR_PCIE2CPP_MapType(
+ NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
+- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
++ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
++ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
+ NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
+ NFP_CPP_TARGET_ISLAND_XPB);
+ const u32 barcfg_explicit[4] = {
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+index d6ce113a4210b5..35099ad5eccc8c 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+@@ -215,9 +215,16 @@ static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs)
+
+ static void ionic_clear_pci(struct ionic *ionic)
+ {
++ ionic->idev.dev_info_regs = NULL;
++ ionic->idev.dev_cmd_regs = NULL;
++ ionic->idev.intr_status = NULL;
++ ionic->idev.intr_ctrl = NULL;
++
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+- pci_disable_device(ionic->pdev);
++
++ if (pci_is_enabled(ionic->pdev))
++ pci_disable_device(ionic->pdev);
+ }
+
+ static int ionic_setup_one(struct ionic *ionic)
+@@ -392,6 +399,10 @@ static void ionic_remove(struct pci_dev *pdev)
+ del_timer_sync(&ionic->watchdog_timer);
+
+ if (ionic->lif) {
++ /* prevent adminq cmds if already known as down */
++ if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
++ set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
++
+ ionic_lif_unregister(ionic->lif);
+ ionic_devlink_unregister(ionic);
+ ionic_lif_deinit(ionic->lif);
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+index c06576f4391614..e242166f0afe7c 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+@@ -165,9 +165,19 @@ void ionic_dev_teardown(struct ionic *ionic)
+ }
+
+ /* Devcmd Interface */
+-bool ionic_is_fw_running(struct ionic_dev *idev)
++static bool __ionic_is_fw_running(struct ionic_dev *idev, u8 *status_ptr)
+ {
+- u8 fw_status = ioread8(&idev->dev_info_regs->fw_status);
++ u8 fw_status;
++
++ if (!idev->dev_info_regs) {
++ if (status_ptr)
++ *status_ptr = 0xff;
++ return false;
++ }
++
++ fw_status = ioread8(&idev->dev_info_regs->fw_status);
++ if (status_ptr)
++ *status_ptr = fw_status;
+
+ /* firmware is useful only if the running bit is set and
+ * fw_status != 0xff (bad PCI read)
+@@ -175,6 +185,11 @@ bool ionic_is_fw_running(struct ionic_dev *idev)
+ return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
+ }
+
++bool ionic_is_fw_running(struct ionic_dev *idev)
++{
++ return __ionic_is_fw_running(idev, NULL);
++}
++
+ int ionic_heartbeat_check(struct ionic *ionic)
+ {
+ unsigned long check_time, last_check_time;
+@@ -199,10 +214,8 @@ int ionic_heartbeat_check(struct ionic *ionic)
+ goto do_check_time;
+ }
+
+- fw_status = ioread8(&idev->dev_info_regs->fw_status);
+-
+ /* If fw_status is not ready don't bother with the generation */
+- if (!ionic_is_fw_running(idev)) {
++ if (!__ionic_is_fw_running(idev, &fw_status)) {
+ fw_status_ready = false;
+ } else {
+ fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
+@@ -306,21 +319,32 @@ int ionic_heartbeat_check(struct ionic *ionic)
+
+ u8 ionic_dev_cmd_status(struct ionic_dev *idev)
+ {
++ if (!idev->dev_cmd_regs)
++ return (u8)PCI_ERROR_RESPONSE;
+ return ioread8(&idev->dev_cmd_regs->comp.comp.status);
+ }
+
+ bool ionic_dev_cmd_done(struct ionic_dev *idev)
+ {
++ if (!idev->dev_cmd_regs)
++ return false;
+ return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
+ }
+
+ void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
+ {
++ if (!idev->dev_cmd_regs)
++ return;
+ memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
+ }
+
+ void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
+ {
++ idev->opcode = cmd->cmd.opcode;
++
++ if (!idev->dev_cmd_regs)
++ return;
++
+ memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
+ iowrite32(0, &idev->dev_cmd_regs->done);
+ iowrite32(1, &idev->dev_cmd_regs->doorbell);
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+index aae4131f146a88..23f9d3b8029a96 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+@@ -152,6 +152,7 @@ struct ionic_dev {
+ bool fw_hb_ready;
+ bool fw_status_ready;
+ u8 fw_generation;
++ u8 opcode;
+
+ u64 __iomem *db_pages;
+ dma_addr_t phy_db_pages;
+@@ -222,7 +223,7 @@ struct ionic_desc_info {
+ void *cb_arg;
+ };
+
+-#define IONIC_QUEUE_NAME_MAX_SZ 32
++#define IONIC_QUEUE_NAME_MAX_SZ 16
+
+ struct ionic_queue {
+ struct device *dev;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+index 3a6b0a9bc24147..35829a2851fa7a 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+ {
+ struct ionic_lif *lif = netdev_priv(netdev);
++ struct ionic_dev *idev;
+ unsigned int offset;
+ unsigned int size;
+
+ regs->version = IONIC_DEV_CMD_REG_VERSION;
+
++ idev = &lif->ionic->idev;
++ if (!idev->dev_info_regs)
++ return;
++
+ offset = 0;
+ size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
+ memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
+
+ offset += size;
+ size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
+- memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
++ memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
+ }
+
+ static void ionic_get_link_ext_stats(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+index 5f40324cd243fe..3c209c1a233733 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
+ dl = priv_to_devlink(ionic);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+
++ if (!idev->dev_cmd_regs) {
++ err = -ENXIO;
++ goto err_out;
++ }
++
+ buf_sz = sizeof(idev->dev_cmd_regs->data);
+
+ netdev_dbg(netdev,
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 2c3e36b2dd7f24..9d724d228b831c 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -49,24 +49,24 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif);
+ static void ionic_dim_work(struct work_struct *work)
+ {
+ struct dim *dim = container_of(work, struct dim, work);
++ struct ionic_intr_info *intr;
+ struct dim_cq_moder cur_moder;
+ struct ionic_qcq *qcq;
++ struct ionic_lif *lif;
+ u32 new_coal;
+
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ qcq = container_of(dim, struct ionic_qcq, dim);
+- new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
++ lif = qcq->q.lif;
++ new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
+ new_coal = new_coal ? new_coal : 1;
+
+- if (qcq->intr.dim_coal_hw != new_coal) {
+- unsigned int qi = qcq->cq.bound_q->index;
+- struct ionic_lif *lif = qcq->q.lif;
+-
+- qcq->intr.dim_coal_hw = new_coal;
++ intr = &qcq->intr;
++ if (intr->dim_coal_hw != new_coal) {
++ intr->dim_coal_hw = new_coal;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+- lif->rxqcqs[qi]->intr.index,
+- qcq->intr.dim_coal_hw);
++ intr->index, intr->dim_coal_hw);
+ }
+
+ dim->state = DIM_START_MEASURE;
+@@ -234,7 +234,7 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
+ name = dev_name(dev);
+
+ snprintf(intr->name, sizeof(intr->name),
+- "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
++ "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
+
+ return devm_request_irq(dev, intr->vector, ionic_isr,
+ 0, intr->name, &qcq->napi);
+@@ -296,10 +296,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
+ if (ret)
+ return ret;
+
+- if (qcq->napi.poll)
+- napi_enable(&qcq->napi);
+-
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
++ napi_enable(&qcq->napi);
+ irq_set_affinity_hint(qcq->intr.vector,
+ &qcq->intr.affinity_mask);
+ ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+@@ -3238,6 +3236,9 @@ static void ionic_lif_reset(struct ionic_lif *lif)
+ {
+ struct ionic_dev *idev = &lif->ionic->idev;
+
++ if (!ionic_is_fw_running(idev))
++ return;
++
+ mutex_lock(&lif->ionic->dev_cmd_lock);
+ ionic_dev_cmd_lif_reset(idev, lif->index);
+ ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
+@@ -3465,9 +3466,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
+
+ napi_enable(&qcq->napi);
+
+- if (qcq->flags & IONIC_QCQ_F_INTR)
++ if (qcq->flags & IONIC_QCQ_F_INTR) {
++ irq_set_affinity_hint(qcq->intr.vector,
++ &qcq->intr.affinity_mask);
+ ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+ IONIC_INTR_MASK_CLEAR);
++ }
+
+ qcq->flags |= IONIC_QCQ_F_INITED;
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+index 1dc79cecc5cc14..3ca6893d1bf26a 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
+@@ -410,28 +410,37 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ do_msg);
+ }
+
+-int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
++static int __ionic_adminq_post_wait(struct ionic_lif *lif,
++ struct ionic_admin_ctx *ctx,
++ const bool do_msg)
+ {
+ int err;
+
++ if (!ionic_is_fw_running(&lif->ionic->idev))
++ return 0;
++
+ err = ionic_adminq_post(lif, ctx);
+
+- return ionic_adminq_wait(lif, ctx, err, true);
++ return ionic_adminq_wait(lif, ctx, err, do_msg);
+ }
+
+-int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
++int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+ {
+- int err;
+-
+- err = ionic_adminq_post(lif, ctx);
++ return __ionic_adminq_post_wait(lif, ctx, true);
++}
+
+- return ionic_adminq_wait(lif, ctx, err, false);
++int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
++{
++ return __ionic_adminq_post_wait(lif, ctx, false);
+ }
+
+ static void ionic_dev_cmd_clean(struct ionic *ionic)
+ {
+ struct ionic_dev *idev = &ionic->idev;
+
++ if (!idev->dev_cmd_regs)
++ return;
++
+ iowrite32(0, &idev->dev_cmd_regs->doorbell);
+ memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
+ }
+@@ -465,7 +474,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ */
+ max_wait = jiffies + (max_seconds * HZ);
+ try_again:
+- opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
++ opcode = idev->opcode;
+ start_time = jiffies;
+ for (fw_up = ionic_is_fw_running(idev);
+ !done && fw_up && time_before(jiffies, max_wait);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+index 65e20693c549e1..33f4f58ee51c68 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+@@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+ p_dma->virt_addr = NULL;
+ }
+ kfree(p_mngr->ilt_shadow);
++ p_mngr->ilt_shadow = NULL;
+ }
+
+ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index c278f8893042b3..8159b4c315b5d8 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -1206,7 +1206,6 @@ static void qed_slowpath_task(struct work_struct *work)
+ static int qed_slowpath_wq_start(struct qed_dev *cdev)
+ {
+ struct qed_hwfn *hwfn;
+- char name[NAME_SIZE];
+ int i;
+
+ if (IS_VF(cdev))
+@@ -1215,11 +1214,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+
+- snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+- cdev->pdev->bus->number,
+- PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
++ hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
++ 0, 0, cdev->pdev->bus->number,
++ PCI_SLOT(cdev->pdev->devfn),
++ hwfn->abs_pf_id);
+
+- hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+ if (!hwfn->slowpath_wq) {
+ DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index a5ac21a0ee33ff..cb6b33a228ea20 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct flow_cls_offload *f)
+ {
+ struct qede_arfs_fltr_node *n;
+- int min_hlen, rc = -EINVAL;
+ struct qede_arfs_tuple t;
++ int min_hlen, rc;
+
+ __qede_lock(edev);
+
+@@ -1879,7 +1879,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ }
+
+ /* parse flower attribute and prepare filter */
+- if (qede_parse_flow_attr(edev, proto, f->rule, &t))
++ rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
++ if (rc)
+ goto unlock;
+
+ /* Validate profile mode and number of filters */
+@@ -1888,11 +1889,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ DP_NOTICE(edev,
+ "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
+ t.mode, edev->arfs->mode, edev->arfs->filter_count);
++ rc = -EINVAL;
+ goto unlock;
+ }
+
+ /* parse tc actions and get the vf_id */
+- if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
++ rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
++ if (rc)
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+@@ -1998,10 +2001,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+- if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
+- err = -EINVAL;
++ err = qede_parse_flow_attr(edev, proto, flow->rule, t);
++ if (err)
+ goto err_out;
+- }
+
+ /* Make sure location is valid and filter isn't already set */
+ err = qede_flow_spec_validate(edev, &flow->rule->action, t,
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index 0d57ffcedf0c6b..fc78bc959ded81 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+
+ if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "lBufQ failed\n");
++ kfree(qdev->lrg_buf);
+ return -ENOMEM;
+ }
+ qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
+@@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
++ kfree(qdev->lrg_buf);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
+index 6f2fa2a42770aa..2ac1b1b96e6a41 100644
+--- a/drivers/net/ethernet/qualcomm/qca_debug.c
++++ b/drivers/net/ethernet/qualcomm/qca_debug.c
+@@ -30,6 +30,8 @@
+
+ #define QCASPI_MAX_REGS 0x20
+
++#define QCASPI_RX_MAX_FRAMES 4
++
+ static const u16 qcaspi_spi_regs[] = {
+ SPI_REG_BFR_SIZE,
+ SPI_REG_WRBUF_SPC_AVA,
+@@ -109,10 +111,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
+
+ seq_printf(s, "IRQ : %d\n",
+ qca->spi_dev->irq);
+- seq_printf(s, "INTR REQ : %u\n",
+- qca->intr_req);
+- seq_printf(s, "INTR SVC : %u\n",
+- qca->intr_svc);
++ seq_printf(s, "INTR : %lx\n",
++ qca->intr);
+
+ seq_printf(s, "SPI max speed : %lu\n",
+ (unsigned long)qca->spi_dev->max_speed_hz);
+@@ -252,9 +252,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ {
+ struct qcaspi *qca = netdev_priv(dev);
+
+- ring->rx_max_pending = 4;
++ ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
+ ring->tx_max_pending = TX_RING_MAX_LEN;
+- ring->rx_pending = 4;
++ ring->rx_pending = QCASPI_RX_MAX_FRAMES;
+ ring->tx_pending = qca->txr.count;
+ }
+
+@@ -263,22 +263,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+ {
+- const struct net_device_ops *ops = dev->netdev_ops;
+ struct qcaspi *qca = netdev_priv(dev);
+
+- if ((ring->rx_pending) ||
++ if (ring->rx_pending != QCASPI_RX_MAX_FRAMES ||
+ (ring->rx_mini_pending) ||
+ (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+- if (netif_running(dev))
+- ops->ndo_stop(dev);
++ if (qca->spi_thread)
++ kthread_park(qca->spi_thread);
+
+ qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
+ qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
+
+- if (netif_running(dev))
+- ops->ndo_open(dev);
++ if (qca->spi_thread)
++ kthread_unpark(qca->spi_thread);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index bec723028e96c9..b697a9e6face6a 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -48,6 +48,8 @@
+
+ #define MAX_DMA_BURST_LEN 5000
+
++#define SPI_INTR 0
++
+ /* Modules parameters */
+ #define QCASPI_CLK_SPEED_MIN 1000000
+ #define QCASPI_CLK_SPEED_MAX 16000000
+@@ -580,14 +582,26 @@ qcaspi_spi_thread(void *data)
+ netdev_info(qca->net_dev, "SPI thread created\n");
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- if ((qca->intr_req == qca->intr_svc) &&
++ if (kthread_should_park()) {
++ netif_tx_disable(qca->net_dev);
++ netif_carrier_off(qca->net_dev);
++ qcaspi_flush_tx_ring(qca);
++ kthread_parkme();
++ if (qca->sync == QCASPI_SYNC_READY) {
++ netif_carrier_on(qca->net_dev);
++ netif_wake_queue(qca->net_dev);
++ }
++ continue;
++ }
++
++ if (!test_bit(SPI_INTR, &qca->intr) &&
+ !qca->txr.skb[qca->txr.head])
+ schedule();
+
+ set_current_state(TASK_RUNNING);
+
+- netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
+- qca->intr_req - qca->intr_svc,
++ netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
++ qca->intr,
+ qca->txr.skb[qca->txr.head]);
+
+ qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
+@@ -601,18 +615,23 @@ qcaspi_spi_thread(void *data)
+ msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
+ }
+
+- if (qca->intr_svc != qca->intr_req) {
+- qca->intr_svc = qca->intr_req;
++ if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
+ start_spi_intr_handling(qca, &intr_cause);
+
+ if (intr_cause & SPI_INT_CPU_ON) {
+ qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
+
++ /* Frame decoding in progress */
++ if (qca->frm_handle.state != qca->frm_handle.init)
++ qca->net_dev->stats.rx_dropped++;
++
++ qcafrm_fsm_init_spi(&qca->frm_handle);
++ qca->stats.device_reset++;
++
+ /* not synced. */
+ if (qca->sync != QCASPI_SYNC_READY)
+ continue;
+
+- qca->stats.device_reset++;
+ netif_wake_queue(qca->net_dev);
+ netif_carrier_on(qca->net_dev);
+ }
+@@ -658,7 +677,7 @@ qcaspi_intr_handler(int irq, void *data)
+ {
+ struct qcaspi *qca = data;
+
+- qca->intr_req++;
++ set_bit(SPI_INTR, &qca->intr);
+ if (qca->spi_thread)
+ wake_up_process(qca->spi_thread);
+
+@@ -674,8 +693,7 @@ qcaspi_netdev_open(struct net_device *dev)
+ if (!qca)
+ return -EINVAL;
+
+- qca->intr_req = 1;
+- qca->intr_svc = 0;
++ set_bit(SPI_INTR, &qca->intr);
+ qca->sync = QCASPI_SYNC_UNKNOWN;
+ qcafrm_fsm_init_spi(&qca->frm_handle);
+
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
+index 3067356106f0b7..58ad910068d4bc 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.h
++++ b/drivers/net/ethernet/qualcomm/qca_spi.h
+@@ -93,8 +93,7 @@ struct qcaspi {
+ struct qcafrm_handle frm_handle;
+ struct sk_buff *rx_skb;
+
+- unsigned int intr_req;
+- unsigned int intr_svc;
++ unsigned long intr;
+ u16 reset_count;
+
+ #ifdef CONFIG_DEBUG_FS
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+index 39d24e07f30670..5b69b9268c757f 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+@@ -396,7 +396,7 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
+
+ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
+ .kind = "rmnet",
+- .maxtype = __IFLA_RMNET_MAX,
++ .maxtype = IFLA_RMNET_MAX,
+ .priv_size = sizeof(struct rmnet_priv),
+ .setup = rmnet_vnd_setup,
+ .validate = rmnet_rtnl_validate,
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 361b90007148b0..b499d8ea6d216f 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -196,6 +196,7 @@ enum rtl_registers {
+ /* No threshold before first PCI xfer */
+ #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
+ #define RX_EARLY_OFF (1 << 11)
++#define RX_PAUSE_SLOT_ON (1 << 11) /* 8125b and later */
+ #define RXCFG_DMA_SHIFT 8
+ /* Unlimited maximum PCI burst. */
+ #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
+@@ -565,7 +566,34 @@ struct rtl8169_counters {
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+- __le16 tx_underun;
++ __le16 tx_underrun;
++ /* new since RTL8125 */
++ __le64 tx_octets;
++ __le64 rx_octets;
++ __le64 rx_multicast64;
++ __le64 tx_unicast64;
++ __le64 tx_broadcast64;
++ __le64 tx_multicast64;
++ __le32 tx_pause_on;
++ __le32 tx_pause_off;
++ __le32 tx_pause_all;
++ __le32 tx_deferred;
++ __le32 tx_late_collision;
++ __le32 tx_all_collision;
++ __le32 tx_aborted32;
++ __le32 align_errors32;
++ __le32 rx_frame_too_long;
++ __le32 rx_runt;
++ __le32 rx_pause_on;
++ __le32 rx_pause_off;
++ __le32 rx_pause_all;
++ __le32 rx_unknown_opcode;
++ __le32 rx_mac_error;
++ __le32 tx_underrun32;
++ __le32 rx_mac_missed;
++ __le32 rx_tcam_dropped;
++ __le32 tdu;
++ __le32 rdu;
+ };
+
+ struct rtl8169_tc_offsets {
+@@ -579,6 +607,7 @@ struct rtl8169_tc_offsets {
+ enum rtl_flag {
+ RTL_FLAG_TASK_ENABLED = 0,
+ RTL_FLAG_TASK_RESET_PENDING,
++ RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
+ RTL_FLAG_TASK_TX_TIMEOUT,
+ RTL_FLAG_MAX
+ };
+@@ -624,6 +653,7 @@ struct rtl8169_private {
+
+ unsigned supports_gmii:1;
+ unsigned aspm_manageable:1;
++ unsigned dash_enabled:1;
+ dma_addr_t counters_phys_addr;
+ struct rtl8169_counters *counters;
+ struct rtl8169_tc_offsets tc_offset;
+@@ -1198,17 +1228,40 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
+ RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
+ }
+
++static void rtl_dash_loop_wait(struct rtl8169_private *tp,
++ const struct rtl_cond *c,
++ unsigned long usecs, int n, bool high)
++{
++ if (!tp->dash_enabled)
++ return;
++ rtl_loop_wait(tp, c, usecs, n, high);
++}
++
++static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
++ const struct rtl_cond *c,
++ unsigned long d, int n)
++{
++ rtl_dash_loop_wait(tp, c, d, n, true);
++}
++
++static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
++ const struct rtl_cond *c,
++ unsigned long d, int n)
++{
++ rtl_dash_loop_wait(tp, c, d, n, false);
++}
++
+ static void rtl8168dp_driver_start(struct rtl8169_private *tp)
+ {
+ r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
+- rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
++ rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ }
+
+ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
+ {
+ r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+- rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
++ rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ }
+
+ static void rtl8168_driver_start(struct rtl8169_private *tp)
+@@ -1222,7 +1275,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
+ static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
+ {
+ r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+- rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
++ rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ }
+
+ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
+@@ -1230,7 +1283,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
+ rtl8168ep_stop_cmac(tp);
+ r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+- rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
++ rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ }
+
+ static void rtl8168_driver_stop(struct rtl8169_private *tp)
+@@ -1253,14 +1306,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
+ return r8168ep_ocp_read(tp, 0x128) & BIT(0);
+ }
+
+-static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
++static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
++{
++ switch (tp->dash_type) {
++ case RTL_DASH_DP:
++ return r8168dp_check_dash(tp);
++ case RTL_DASH_EP:
++ return r8168ep_check_dash(tp);
++ default:
++ return false;
++ }
++}
++
++static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
+ {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+- return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
++ return RTL_DASH_DP;
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+- return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
++ return RTL_DASH_EP;
+ default:
+ return RTL_DASH_NONE;
+ }
+@@ -1453,7 +1518,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+
+ device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+
+- if (tp->dash_type == RTL_DASH_NONE) {
++ if (!tp->dash_enabled) {
+ rtl_set_d3_pll_down(tp, !wolopts);
+ tp->dev->wol_enabled = wolopts ? 1 : 0;
+ }
+@@ -1688,7 +1753,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
+ data[9] = le64_to_cpu(counters->rx_broadcast);
+ data[10] = le32_to_cpu(counters->rx_multicast);
+ data[11] = le16_to_cpu(counters->tx_aborted);
+- data[12] = le16_to_cpu(counters->tx_underun);
++ data[12] = le16_to_cpu(counters->tx_underrun);
+ }
+
+ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+@@ -2292,9 +2357,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
+- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
++ case RTL_GIGA_MAC_VER_61:
+ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
+ break;
++ case RTL_GIGA_MAC_VER_63:
++ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
++ RX_PAUSE_SLOT_ON);
++ break;
+ default:
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
+ break;
+@@ -2512,7 +2581,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
+
+ static void rtl_prepare_power_down(struct rtl8169_private *tp)
+ {
+- if (tp->dash_type != RTL_DASH_NONE)
++ if (tp->dash_enabled)
+ return;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+@@ -2582,6 +2651,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_mode |= AcceptAllPhys;
++ } else if (!(dev->flags & IFF_MULTICAST)) {
++ rx_mode &= ~AcceptMulticast;
+ } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ dev->flags & IFF_ALLMULTI ||
+ tp->mac_version == RTL_GIGA_MAC_VER_35) {
+@@ -4202,17 +4273,18 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
+ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+ {
+- unsigned int frags = skb_shinfo(skb)->nr_frags;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned int entry = tp->cur_tx % NUM_TX_DESC;
+ struct TxDesc *txd_first, *txd_last;
+ bool stop_queue, door_bell;
++ unsigned int frags;
+ u32 opts[2];
+
+ if (unlikely(!rtl_tx_slots_avail(tp))) {
+ if (net_ratelimit())
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+- goto err_stop_0;
++ netif_stop_queue(dev);
++ return NETDEV_TX_BUSY;
+ }
+
+ opts[1] = rtl8169_tx_vlan_tag(skb);
+@@ -4229,6 +4301,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+
+ txd_first = tp->TxDescArray + entry;
+
++ frags = skb_shinfo(skb)->nr_frags;
+ if (frags) {
+ if (rtl8169_xmit_frags(tp, skb, opts, entry))
+ goto err_dma_1;
+@@ -4267,11 +4340,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+-
+-err_stop_0:
+- netif_stop_queue(dev);
+- dev->stats.tx_dropped++;
+- return NETDEV_TX_BUSY;
+ }
+
+ static unsigned int rtl_last_frag_len(struct sk_buff *skb)
+@@ -4522,10 +4590,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
+ }
+
+- if (napi_schedule_prep(&tp->napi)) {
+- rtl_irq_disable(tp);
+- __napi_schedule(&tp->napi);
+- }
++ rtl_irq_disable(tp);
++ napi_schedule(&tp->napi);
+ out:
+ rtl_ack_events(tp, status);
+
+@@ -4567,6 +4633,8 @@ static void rtl_task(struct work_struct *work)
+ reset:
+ rtl_reset_work(tp);
+ netif_wake_queue(tp->dev);
++ } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
++ rtl_reset_work(tp);
+ }
+ out_unlock:
+ rtnl_unlock();
+@@ -4596,7 +4664,11 @@ static void r8169_phylink_handler(struct net_device *ndev)
+ if (netif_carrier_ok(ndev)) {
+ rtl_link_chg_patch(tp);
+ pm_request_resume(d);
++ netif_wake_queue(tp->dev);
+ } else {
++ /* In few cases rx is broken after link-down otherwise */
++ if (rtl_is_8125(tp))
++ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
+ pm_runtime_idle(d);
+ }
+
+@@ -4640,10 +4712,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
+ rtl8169_cleanup(tp);
+ rtl_disable_exit_l1(tp);
+ rtl_prepare_power_down(tp);
++
++ if (tp->dash_type != RTL_DASH_NONE)
++ rtl8168_driver_stop(tp);
+ }
+
+ static void rtl8169_up(struct rtl8169_private *tp)
+ {
++ if (tp->dash_type != RTL_DASH_NONE)
++ rtl8168_driver_start(tp);
++
+ pci_set_master(tp->pci_dev);
+ phy_init_hw(tp->phydev);
+ phy_resume(tp->phydev);
+@@ -4666,7 +4744,7 @@ static int rtl8169_close(struct net_device *dev)
+ rtl8169_down(tp);
+ rtl8169_rx_clear(tp);
+
+- cancel_work_sync(&tp->wk.work);
++ cancel_work(&tp->wk.work);
+
+ free_irq(tp->irq, tp);
+
+@@ -4861,7 +4939,7 @@ static int rtl8169_runtime_idle(struct device *device)
+ {
+ struct rtl8169_private *tp = dev_get_drvdata(device);
+
+- if (tp->dash_type != RTL_DASH_NONE)
++ if (tp->dash_enabled)
+ return -EBUSY;
+
+ if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
+@@ -4887,8 +4965,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
+ /* Restore original MAC address */
+ rtl_rar_set(tp, tp->dev->perm_addr);
+
+- if (system_state == SYSTEM_POWER_OFF &&
+- tp->dash_type == RTL_DASH_NONE) {
++ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
+ pci_wake_from_d3(pdev, tp->saved_wolopts);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+@@ -4901,6 +4978,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
++ cancel_work_sync(&tp->wk.work);
++
+ unregister_netdev(tp->dev);
+
+ if (tp->dash_type != RTL_DASH_NONE)
+@@ -5021,6 +5100,15 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
+ struct mii_bus *new_bus;
+ int ret;
+
++ /* On some boards with this chip version the BIOS is buggy and misses
++ * to reset the PHY page selector. This results in the PHY ID read
++ * accessing registers on a different page, returning a more or
++ * less random value. Fix this by resetting the page selector first.
++ */
++ if (tp->mac_version == RTL_GIGA_MAC_VER_25 ||
++ tp->mac_version == RTL_GIGA_MAC_VER_26)
++ r8169_mdio_write(tp, 0x1f, 0);
++
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus)
+ return -ENOMEM;
+@@ -5246,7 +5334,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ tp->aspm_manageable = !rc;
+
+- tp->dash_type = rtl_check_dash(tp);
++ tp->dash_type = rtl_get_dash_type(tp);
++ tp->dash_enabled = rtl_dash_is_enabled(tp);
+
+ tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
+
+@@ -5317,7 +5406,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* configure chip for default features */
+ rtl8169_set_features(dev, dev->features);
+
+- if (tp->dash_type == RTL_DASH_NONE) {
++ if (!tp->dash_enabled) {
+ rtl_set_d3_pll_down(tp, true);
+ } else {
+ rtl_set_d3_pll_down(tp, false);
+@@ -5357,7 +5446,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ "ok" : "ko");
+
+ if (tp->dash_type != RTL_DASH_NONE) {
+- netdev_info(dev, "DASH enabled\n");
++ netdev_info(dev, "DASH %s\n",
++ tp->dash_enabled ? "enabled" : "disabled");
+ rtl8168_driver_start(tp);
+ }
+
+diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
+index b50f16786c246a..6ab89f4782857d 100644
+--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
++++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
+@@ -1060,6 +1060,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
+ phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
+ rtl8168g_enable_gphy_10m(phydev);
+
++ rtl8168g_disable_aldps(phydev);
+ rtl8125a_config_eee_phy(phydev);
+ }
+
+@@ -1099,6 +1100,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
+ phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
+
+ rtl8125_legacy_force_mode(phydev);
++ rtl8168g_disable_aldps(phydev);
+ rtl8125b_config_eee_phy(phydev);
+ }
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 0ef0b88b714590..c6897e6ea362d9 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -66,16 +66,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
+ return -ETIMEDOUT;
+ }
+
+-static int ravb_config(struct net_device *ndev)
++static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
+ {
++ u32 csr_ops = 1U << (opmode & CCC_OPC);
++ u32 ccc_mask = CCC_OPC;
+ int error;
+
+- /* Set config mode */
+- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
+- /* Check if the operating mode is changed to the config mode */
+- error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
+- if (error)
+- netdev_err(ndev, "failed to switch device to config mode\n");
++ /* If gPTP active in config mode is supported it needs to be configured
++ * along with CSEL and operating mode in the same access. This is a
++ * hardware limitation.
++ */
++ if (opmode & CCC_GAC)
++ ccc_mask |= CCC_GAC | CCC_CSEL;
++
++ /* Set operating mode */
++ ravb_modify(ndev, CCC, ccc_mask, opmode);
++ /* Check if the operating mode is changed to the requested one */
++ error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
++ if (error) {
++ netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
++ opmode & CCC_OPC);
++ }
+
+ return error;
+ }
+@@ -515,6 +526,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
+ {
+ struct ravb_private *priv = netdev_priv(ndev);
+
++ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
++ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
++ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
++ } else {
++ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
++ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
++ CXR31_SEL_LINK0);
++ }
++
+ /* Receive frame limit set register */
+ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+
+@@ -537,14 +557,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
+-
+- if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+- ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+- } else {
+- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+- CXR31_SEL_LINK0);
+- }
+ }
+
+ static void ravb_emac_init_rcar(struct net_device *ndev)
+@@ -672,7 +684,7 @@ static int ravb_dmac_init(struct net_device *ndev)
+ int error;
+
+ /* Set CONFIG mode */
+- error = ravb_config(ndev);
++ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ return error;
+
+@@ -681,9 +693,7 @@ static int ravb_dmac_init(struct net_device *ndev)
+ return error;
+
+ /* Setting the control will start the AVB-DMAC process. */
+- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
+-
+- return 0;
++ return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
+ }
+
+ static void ravb_get_tx_tstamp(struct net_device *ndev)
+@@ -1045,7 +1055,7 @@ static int ravb_stop_dma(struct net_device *ndev)
+ return error;
+
+ /* Stop AVB-DMAC process */
+- return ravb_config(ndev);
++ return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ }
+
+ /* E-MAC interrupt handler */
+@@ -1278,25 +1288,16 @@ static int ravb_poll(struct napi_struct *napi, int budget)
+ struct net_device *ndev = napi->dev;
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+- bool gptp = info->gptp || info->ccc_gac;
+- struct ravb_rx_desc *desc;
+ unsigned long flags;
+ int q = napi - priv->napi;
+ int mask = BIT(q);
+ int quota = budget;
+- unsigned int entry;
++ bool unmask;
+
+- if (!gptp) {
+- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+- desc = &priv->gbeth_rx_ring[entry];
+- }
+ /* Processing RX Descriptor Ring */
+ /* Clear RX interrupt */
+ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
+- if (gptp || desc->die_dt != DT_FEMPTY) {
+- if (ravb_rx(ndev, &quota, q))
+- goto out;
+- }
++ unmask = !ravb_rx(ndev, &quota, q);
+
+ /* Processing TX Descriptor Ring */
+ spin_lock_irqsave(&priv->lock, flags);
+@@ -1306,6 +1307,18 @@ static int ravb_poll(struct napi_struct *napi, int budget)
+ netif_wake_subqueue(ndev, q);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
++ /* Receive error message handling */
++ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
++ if (info->nc_queues)
++ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
++ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
++ ndev->stats.rx_over_errors = priv->rx_over_errors;
++ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
++ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
++
++ if (!unmask)
++ goto out;
++
+ napi_complete(napi);
+
+ /* Re-enable RX/TX interrupts */
+@@ -1319,14 +1332,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+- /* Receive error message handling */
+- priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+- if (info->nc_queues)
+- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+- if (priv->rx_over_errors != ndev->stats.rx_over_errors)
+- ndev->stats.rx_over_errors = priv->rx_over_errors;
+- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
+- ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+ out:
+ return budget - quota;
+ }
+@@ -1811,19 +1816,20 @@ static int ravb_open(struct net_device *ndev)
+ if (info->gptp)
+ ravb_ptp_init(ndev, priv->pdev);
+
+- netif_tx_start_all_queues(ndev);
+-
+ /* PHY control start */
+ error = ravb_phy_start(ndev);
+ if (error)
+ goto out_ptp_stop;
+
++ netif_tx_start_all_queues(ndev);
++
+ return 0;
+
+ out_ptp_stop:
+ /* Stop PTP Clock driver */
+ if (info->gptp)
+ ravb_ptp_stop(ndev);
++ ravb_stop_dma(ndev);
+ out_free_irq_mgmta:
+ if (!info->multi_irqs)
+ goto out_free_irq;
+@@ -1874,6 +1880,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ struct net_device *ndev = priv->ndev;
+ int error;
+
++ if (!rtnl_trylock()) {
++ usleep_range(1000, 2000);
++ schedule_work(&priv->work);
++ return;
++ }
++
+ netif_tx_stop_all_queues(ndev);
+
+ /* Stop PTP Clock driver */
+@@ -1907,7 +1919,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ */
+ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+- return;
++ goto out_unlock;
+ }
+ ravb_emac_init(ndev);
+
+@@ -1917,6 +1929,9 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
++
++out_unlock:
++ rtnl_unlock();
+ }
+
+ /* Packet transmit function for Ethernet AVB */
+@@ -1929,7 +1944,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ struct ravb_tstamp_skb *ts_skb;
+ struct ravb_tx_desc *desc;
+ unsigned long flags;
+- u32 dma_addr;
++ dma_addr_t dma_addr;
+ void *buffer;
+ u32 entry;
+ u32 len;
+@@ -2549,21 +2564,25 @@ static int ravb_set_gti(struct net_device *ndev)
+ return 0;
+ }
+
+-static void ravb_set_config_mode(struct net_device *ndev)
++static int ravb_set_config_mode(struct net_device *ndev)
+ {
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
++ int error;
+
+ if (info->gptp) {
+- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
++ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
++ if (error)
++ return error;
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ } else if (info->ccc_gac) {
+- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
+- CCC_GAC | CCC_CSEL_HPB);
++ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ } else {
+- ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
++ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ }
++
++ return error;
+ }
+
+ /* Set tx and rx clock internal delay modes */
+@@ -2645,9 +2664,14 @@ static int ravb_probe(struct platform_device *pdev)
+ ndev->features = info->net_features;
+ ndev->hw_features = info->net_hw_features;
+
+- reset_control_deassert(rstc);
++ error = reset_control_deassert(rstc);
++ if (error)
++ goto out_free_netdev;
++
+ pm_runtime_enable(&pdev->dev);
+- pm_runtime_get_sync(&pdev->dev);
++ error = pm_runtime_resume_and_get(&pdev->dev);
++ if (error < 0)
++ goto out_rpm_disable;
+
+ if (info->multi_irqs) {
+ if (info->err_mgmt_irqs)
+@@ -2778,7 +2802,9 @@ static int ravb_probe(struct platform_device *pdev)
+ ndev->ethtool_ops = &ravb_ethtool_ops;
+
+ /* Set AVB config mode */
+- ravb_set_config_mode(ndev);
++ error = ravb_set_config_mode(ndev);
++ if (error)
++ goto out_disable_gptp_clk;
+
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+@@ -2872,11 +2898,12 @@ static int ravb_probe(struct platform_device *pdev)
+ out_disable_refclk:
+ clk_disable_unprepare(priv->refclk);
+ out_release:
+- free_netdev(ndev);
+-
+ pm_runtime_put(&pdev->dev);
++out_rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(rstc);
++out_free_netdev:
++ free_netdev(ndev);
+ return error;
+ }
+
+@@ -2886,22 +2913,25 @@ static int ravb_remove(struct platform_device *pdev)
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+- /* Stop PTP Clock driver */
+- if (info->ccc_gac)
+- ravb_ptp_stop(ndev);
+-
+- clk_disable_unprepare(priv->gptp_clk);
+- clk_disable_unprepare(priv->refclk);
+-
+- /* Set reset mode */
+- ravb_write(ndev, CCC_OPC_RESET, CCC);
+ unregister_netdev(ndev);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
++
+ ravb_mdio_release(priv);
++
++ /* Stop PTP Clock driver */
++ if (info->ccc_gac)
++ ravb_ptp_stop(ndev);
++
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
++
++ ravb_set_opmode(ndev, CCC_OPC_RESET);
++
++ clk_disable_unprepare(priv->gptp_clk);
++ clk_disable_unprepare(priv->refclk);
++
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(priv->rstc);
+@@ -2981,8 +3011,11 @@ static int __maybe_unused ravb_resume(struct device *dev)
+ int ret = 0;
+
+ /* If WoL is enabled set reset mode to rearm the WoL logic */
+- if (priv->wol_enabled)
+- ravb_write(ndev, CCC_OPC_RESET, CCC);
++ if (priv->wol_enabled) {
++ ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
++ if (ret)
++ return ret;
++ }
+
+ /* All register have been reset to default values.
+ * Restore all registers which where setup at probe time and
+@@ -2990,7 +3023,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
+ */
+
+ /* Set AVB config mode */
+- ravb_set_config_mode(ndev);
++ ret = ravb_set_config_mode(ndev);
++ if (ret)
++ return ret;
+
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 0fc0b6bea75305..ae9d8722b76f77 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -1501,8 +1501,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ {
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_queue *gq = rdev->tx_queue;
++ netdev_tx_t ret = NETDEV_TX_OK;
+ struct rswitch_ext_desc *desc;
+- int ret = NETDEV_TX_OK;
+ dma_addr_t dma_addr;
+
+ if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
+@@ -1514,10 +1514,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ return ret;
+
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
+- if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
+- dev_kfree_skb_any(skb);
+- return ret;
+- }
++ if (dma_mapping_error(ndev->dev.parent, dma_addr))
++ goto err_kfree;
+
+ gq->skbs[gq->cur] = skb;
+ desc = &gq->tx_ring[gq->cur];
+@@ -1530,10 +1528,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ struct rswitch_gwca_ts_info *ts_info;
+
+ ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
+- if (!ts_info) {
+- dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+- return -ENOMEM;
+- }
++ if (!ts_info)
++ goto err_unmap;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ rdev->ts_tag++;
+@@ -1555,6 +1551,14 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ gq->cur = rswitch_next_queue_index(gq, true, 1);
+ rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
+
++ return ret;
++
++err_unmap:
++ dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
++
++err_kfree:
++ dev_kfree_skb_any(skb);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
+index c672f92d65e976..9319a2675e7b65 100644
+--- a/drivers/net/ethernet/seeq/ether3.c
++++ b/drivers/net/ethernet/seeq/ether3.c
+@@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec)
+ {
+ struct net_device *dev = ecard_get_drvdata(ec);
+
++ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
+ ecard_set_drvdata(ec, NULL);
+
+ unregister_netdev(dev);
++ del_timer_sync(&priv(dev)->timer);
+ free_netdev(dev);
+ ecard_release_resources(ec);
+ }
+diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
+index d2f35ee15effeb..fac227d372db4c 100644
+--- a/drivers/net/ethernet/sfc/rx_common.c
++++ b/drivers/net/ethernet/sfc/rx_common.c
+@@ -823,8 +823,10 @@ int efx_probe_filters(struct efx_nic *efx)
+ }
+
+ if (!success) {
+- efx_for_each_channel(channel, efx)
++ efx_for_each_channel(channel, efx) {
+ kfree(channel->rps_flow_id);
++ channel->rps_flow_id = NULL;
++ }
+ efx->type->filter_table_remove(efx);
+ rc = -ENOMEM;
+ goto out_unlock;
+diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
+index c521ea8f94f2fd..9c74d254214140 100644
+--- a/drivers/net/ethernet/smsc/smc91x.h
++++ b/drivers/net/ethernet/smsc/smc91x.h
+@@ -175,8 +175,8 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
+ writew(*wp++, a);
+ }
+
+-#define SMC_inw(a, r) _swapw(readw((a) + (r)))
+-#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
++#define SMC_inw(a, r) ioread16be((a) + (r))
++#define SMC_outw(lp, v, a, r) iowrite16be(v, (a) + (r))
+ #define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
+ #define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+index 06c6871f87886c..92d7d5a00b84c7 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+@@ -165,9 +165,9 @@ config DWMAC_STARFIVE
+ help
+ Support for ethernet controllers on StarFive RISC-V SoCs
+
+- This selects the StarFive platform specific glue layer support for
+- the stmmac device driver. This driver is used for StarFive JH7110
+- ethernet controller.
++ This selects the StarFive platform specific glue layer support
++ for the stmmac device driver. This driver is used for the
++ StarFive JH7100 and JH7110 ethernet controllers.
+
+ config DWMAC_STI
+ tristate "STi GMAC support"
+@@ -269,7 +269,7 @@ config DWMAC_INTEL
+ config DWMAC_LOONGSON
+ tristate "Loongson PCI DWMAC support"
+ default MACH_LOONGSON64
+- depends on STMMAC_ETH && PCI
++ depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
+ depends on COMMON_CLK
+ help
+ This selects the LOONGSON PCI bus support for the stmmac driver,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index 1e996c29043dcb..4dbc076f72d65a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -59,28 +59,51 @@
+ #undef FRAME_FILTER_DEBUG
+ /* #define FRAME_FILTER_DEBUG */
+
++struct stmmac_q_tx_stats {
++ u64_stats_t tx_bytes;
++ u64_stats_t tx_set_ic_bit;
++ u64_stats_t tx_tso_frames;
++ u64_stats_t tx_tso_nfrags;
++};
++
++struct stmmac_napi_tx_stats {
++ u64_stats_t tx_packets;
++ u64_stats_t tx_pkt_n;
++ u64_stats_t poll;
++ u64_stats_t tx_clean;
++ u64_stats_t tx_set_ic_bit;
++};
++
+ struct stmmac_txq_stats {
+- u64 tx_bytes;
+- u64 tx_packets;
+- u64 tx_pkt_n;
+- u64 tx_normal_irq_n;
+- u64 napi_poll;
+- u64 tx_clean;
+- u64 tx_set_ic_bit;
+- u64 tx_tso_frames;
+- u64 tx_tso_nfrags;
+- struct u64_stats_sync syncp;
++ /* Updates protected by tx queue lock. */
++ struct u64_stats_sync q_syncp;
++ struct stmmac_q_tx_stats q;
++
++ /* Updates protected by NAPI poll logic. */
++ struct u64_stats_sync napi_syncp;
++ struct stmmac_napi_tx_stats napi;
+ } ____cacheline_aligned_in_smp;
+
++struct stmmac_napi_rx_stats {
++ u64_stats_t rx_bytes;
++ u64_stats_t rx_packets;
++ u64_stats_t rx_pkt_n;
++ u64_stats_t poll;
++};
++
+ struct stmmac_rxq_stats {
+- u64 rx_bytes;
+- u64 rx_packets;
+- u64 rx_pkt_n;
+- u64 rx_normal_irq_n;
+- u64 napi_poll;
+- struct u64_stats_sync syncp;
++ /* Updates protected by NAPI poll logic. */
++ struct u64_stats_sync napi_syncp;
++ struct stmmac_napi_rx_stats napi;
+ } ____cacheline_aligned_in_smp;
+
++/* Updates on each CPU protected by not allowing nested irqs. */
++struct stmmac_pcpu_stats {
++ struct u64_stats_sync syncp;
++ u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
++ u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
++};
++
+ /* Extra statistic and debug information exposed by ethtool */
+ struct stmmac_extra_stats {
+ /* Transmit errors */
+@@ -205,6 +228,7 @@ struct stmmac_extra_stats {
+ /* per queue statistics */
+ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
+ struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
++ struct stmmac_pcpu_stats __percpu *pcpu_stats;
+ unsigned long rx_dropped;
+ unsigned long rx_errors;
+ unsigned long tx_dropped;
+@@ -216,6 +240,7 @@ struct stmmac_safety_stats {
+ unsigned long mac_errors[32];
+ unsigned long mtl_errors[32];
+ unsigned long dma_errors[32];
++ unsigned long dma_dpp_errors[32];
+ };
+
+ /* Number of fields in Safety Stats */
+@@ -525,6 +550,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
+ extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+
+ struct mac_link {
++ u32 caps;
+ u32 speed_mask;
+ u32 speed10;
+ u32 speed100;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index 2cd6fce5c9934c..ee3604f58def52 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -35,6 +35,9 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
+
++ plat->clk_ref_rate = 125000000;
++ plat->clk_ptp_rate = 125000000;
++
+ /* Default to phy auto-detection */
+ plat->phy_addr = -1;
+
+@@ -59,26 +62,19 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ return -ENODEV;
+ }
+
+- if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
+- pr_info("dwmac_loongson_pci: Incompatible OF node\n");
+- return -ENODEV;
+- }
+-
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
++ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
++ sizeof(*plat->mdio_bus_data),
++ GFP_KERNEL);
++ if (!plat->mdio_bus_data)
++ return -ENOMEM;
++
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ if (plat->mdio_node) {
+ dev_info(&pdev->dev, "Found MDIO subnode\n");
+-
+- plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+- sizeof(*plat->mdio_bus_data),
+- GFP_KERNEL);
+- if (!plat->mdio_bus_data) {
+- ret = -ENOMEM;
+- goto err_put_node;
+- }
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+index d3bf42d0fceb69..ded1bbda5266f8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+@@ -21,6 +21,7 @@
+ #define RGMII_IO_MACRO_CONFIG2 0x1C
+ #define RGMII_IO_MACRO_DEBUG1 0x20
+ #define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
++#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4
+
+ /* RGMII_IO_MACRO_CONFIG fields */
+ #define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
+@@ -34,6 +35,7 @@
+ #define RGMII_CONFIG_LOOPBACK_EN BIT(2)
+ #define RGMII_CONFIG_PROG_SWAP BIT(1)
+ #define RGMII_CONFIG_DDR_MODE BIT(0)
++#define RGMII_CONFIG_SGMII_CLK_DVDR GENMASK(18, 10)
+
+ /* SDCC_HC_REG_DLL_CONFIG fields */
+ #define SDCC_DLL_CONFIG_DLL_RST BIT(30)
+@@ -78,6 +80,11 @@
+ #define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
+ #define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
+
++/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */
++#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3)
++
++#define SGMII_10M_RX_CLK_DVDR 0x31
++
+ struct ethqos_emac_por {
+ unsigned int offset;
+ unsigned int value;
+@@ -90,7 +97,9 @@ struct ethqos_emac_driver_data {
+ bool has_emac_ge_3;
+ const char *link_clk_name;
+ bool has_integrated_pcs;
++ u32 dma_addr_width;
+ struct dwmac4_addrs dwmac4_addrs;
++ bool needs_sgmii_loopback;
+ };
+
+ struct qcom_ethqos {
+@@ -109,6 +118,7 @@ struct qcom_ethqos {
+ unsigned int num_por;
+ bool rgmii_config_loopback_en;
+ bool has_emac_ge_3;
++ bool needs_sgmii_loopback;
+ };
+
+ static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
+@@ -183,8 +193,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+ clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
+ }
+
++static void
++qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable)
++{
++ if (!ethqos->needs_sgmii_loopback ||
++ ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX)
++ return;
++
++ rgmii_updatel(ethqos,
++ SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN,
++ enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0,
++ EMAC_WRAPPER_SGMII_PHY_CNTRL1);
++}
++
+ static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
+ {
++ qcom_ethqos_set_sgmii_loopback(ethqos, true);
+ rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
+ RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
+ }
+@@ -264,11 +288,13 @@ static const struct ethqos_emac_por emac_v4_0_0_por[] = {
+
+ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
+ .por = emac_v4_0_0_por,
+- .num_por = ARRAY_SIZE(emac_v3_0_0_por),
++ .num_por = ARRAY_SIZE(emac_v4_0_0_por),
+ .rgmii_config_loopback_en = false,
+ .has_emac_ge_3 = true,
+ .link_clk_name = "phyaux",
+ .has_integrated_pcs = true,
++ .needs_sgmii_loopback = true,
++ .dma_addr_width = 36,
+ .dwmac4_addrs = {
+ .dma_chan = 0x00008100,
+ .dma_chan_offset = 0x1000,
+@@ -598,6 +624,9 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
+ return 0;
+ }
+
++/* On interface toggle MAC registers gets reset.
++ * Configure MAC block for SGMII on ethernet phy link up
++ */
+ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
+ {
+ int val;
+@@ -617,6 +646,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
+ case SPEED_10:
+ val |= ETHQOS_MAC_CTRL_PORT_SEL;
+ val &= ~ETHQOS_MAC_CTRL_SPEED_MODE;
++ rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR,
++ FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
++ SGMII_10M_RX_CLK_DVDR),
++ RGMII_IO_MACRO_CONFIG);
+ break;
+ }
+
+@@ -634,6 +667,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo
+ {
+ struct qcom_ethqos *ethqos = priv;
+
++ qcom_ethqos_set_sgmii_loopback(ethqos, false);
+ ethqos->speed = speed;
+ ethqos_update_link_clk(ethqos, speed);
+ ethqos_configure(ethqos);
+@@ -769,6 +803,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
+ ethqos->num_por = data->num_por;
+ ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en;
+ ethqos->has_emac_ge_3 = data->has_emac_ge_3;
++ ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback;
+
+ ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii");
+ if (IS_ERR(ethqos->link_clk))
+@@ -806,6 +841,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
+ plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
+ if (data->has_integrated_pcs)
+ plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
++ if (data->dma_addr_width)
++ plat_dat->host_dma_width = data->dma_addr_width;
+
+ if (ethqos->serdes_phy) {
+ plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+index 9289bb87c3e3a8..0c713257193de6 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+@@ -15,13 +15,20 @@
+
+ #include "stmmac_platform.h"
+
+-#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
+-#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
+-#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
++#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
++#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
++#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
++
++#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8
++
++struct starfive_dwmac_data {
++ unsigned int gtxclk_dlychain;
++};
+
+ struct starfive_dwmac {
+ struct device *dev;
+ struct clk *clk_tx;
++ const struct starfive_dwmac_data *data;
+ };
+
+ static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+@@ -67,6 +74,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
++ case PHY_INTERFACE_MODE_RGMII_RXID:
++ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
+ break;
+
+@@ -89,6 +98,14 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
+ if (err)
+ return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
+
++ if (dwmac->data) {
++ err = regmap_write(regmap, JH7100_SYSMAIN_REGISTER49_DLYCHAIN,
++ dwmac->data->gtxclk_dlychain);
++ if (err)
++ return dev_err_probe(dwmac->dev, err,
++ "error selecting gtxclk delay chain\n");
++ }
++
+ return 0;
+ }
+
+@@ -114,6 +131,8 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
+ if (!dwmac)
+ return -ENOMEM;
+
++ dwmac->data = device_get_match_data(&pdev->dev);
++
+ dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(dwmac->clk_tx))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
+@@ -150,8 +169,13 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
+ return 0;
+ }
+
++static const struct starfive_dwmac_data jh7100_data = {
++ .gtxclk_dlychain = 4,
++};
++
+ static const struct of_device_id starfive_dwmac_match[] = {
+- { .compatible = "starfive,jh7110-dwmac" },
++ { .compatible = "starfive,jh7100-dwmac", .data = &jh7100_data },
++ { .compatible = "starfive,jh7110-dwmac" },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+index 465ff1fd478554..63998d65fef8eb 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+@@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
+ struct stmmac_extra_stats *x, u32 chan,
+ u32 dir)
+ {
+- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
++ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
+ int ret = 0;
+ u32 v;
+
+@@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
+
+ if (v & EMAC_TX_INT) {
+ ret |= handle_tx;
+- u64_stats_update_begin(&txq_stats->syncp);
+- txq_stats->tx_normal_irq_n++;
+- u64_stats_update_end(&txq_stats->syncp);
++ u64_stats_update_begin(&stats->syncp);
++ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
++ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (v & EMAC_TX_DMA_STOP_INT)
+@@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
+
+ if (v & EMAC_RX_INT) {
+ ret |= handle_rx;
+- u64_stats_update_begin(&rxq_stats->syncp);
+- rxq_stats->rx_normal_irq_n++;
+- u64_stats_update_end(&rxq_stats->syncp);
++ u64_stats_update_begin(&stats->syncp);
++ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
++ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (v & EMAC_RX_BUF_UA_INT)
+@@ -1097,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
++ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
++ MAC_10 | MAC_100 | MAC_1000;
+ /* The loopback bit seems to be re-set when link change
+ * Simply mask it each time
+ * Speed 10/100/1000 are set in BIT(2)/BIT(3)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+index 3927609abc4411..8555299443f4ed 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
++ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
++ MAC_10 | MAC_100 | MAC_1000;
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+index a6e8d7bd95886f..7667d103cd0ebd 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+@@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
+ dev_info(priv->device, "\tDWMAC100\n");
+
+ mac->pcsr = priv->ioaddr;
++ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
++ MAC_10 | MAC_100;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed10 = 0;
+ mac->link.speed100 = 0;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index c6ff1fa0e04d84..a9837985a483d8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/ethtool.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include "stmmac.h"
+ #include "stmmac_pcs.h"
+ #include "dwmac4.h"
+@@ -70,7 +71,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
+
+ static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
+ {
+- priv->phylink_config.mac_capabilities |= MAC_2500FD;
++ if (priv->plat->tx_queues_to_use > 1)
++ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
++ else
++ priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
+ }
+
+ static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
+@@ -92,19 +96,41 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
+ u32 prio, u32 queue)
+ {
+ void __iomem *ioaddr = hw->pcsr;
+- u32 base_register;
+- u32 value;
++ u32 clear_mask = 0;
++ u32 ctrl2, ctrl3;
++ int i;
+
+- base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+- if (queue >= 4)
+- queue -= 4;
++ ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
++ ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
+
+- value = readl(ioaddr + base_register);
++ /* The software must ensure that the same priority
++ * is not mapped to multiple Rx queues
++ */
++ for (i = 0; i < 4; i++)
++ clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
++ GMAC_RXQCTRL_PSRQX_MASK(i));
++
++ ctrl2 &= ~clear_mask;
++ ctrl3 &= ~clear_mask;
++
++ /* First assign new priorities to a queue, then
++ * clear them from others queues
++ */
++ if (queue < 4) {
++ ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
++ GMAC_RXQCTRL_PSRQX_MASK(queue);
++
++ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
++ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
++ } else {
++ queue -= 4;
+
+- value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
+- value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
++ ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ GMAC_RXQCTRL_PSRQX_MASK(queue);
+- writel(value, ioaddr + base_register);
++
++ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
++ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
++ }
+ }
+
+ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
+@@ -450,7 +476,7 @@ static int dwmac4_write_vlan_filter(struct net_device *dev,
+ u8 index, u32 data)
+ {
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+- int i, timeout = 10;
++ int ret;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+@@ -466,16 +492,15 @@ static int dwmac4_write_vlan_filter(struct net_device *dev,
+
+ writel(val, ioaddr + GMAC_VLAN_TAG);
+
+- for (i = 0; i < timeout; i++) {
+- val = readl(ioaddr + GMAC_VLAN_TAG);
+- if (!(val & GMAC_VLAN_TAG_CTRL_OB))
+- return 0;
+- udelay(1);
++ ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val,
++ !(val & GMAC_VLAN_TAG_CTRL_OB),
++ 1000, 500000);
++ if (ret) {
++ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
++ return -EBUSY;
+ }
+
+- netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+-
+- return -EBUSY;
++ return 0;
+ }
+
+ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
+@@ -957,7 +982,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
+ }
+
+ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+- __le16 perfect_match, bool is_double)
++ u16 perfect_match, bool is_double)
+ {
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+@@ -1325,6 +1350,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
++ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
++ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+ mac->link.duplex = GMAC_CONFIG_DM;
+ mac->link.speed10 = GMAC_CONFIG_PS;
+ mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+index 9470d3fd2dede2..0d185e54eb7e24 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+@@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
+ const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
+ u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
+- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
++ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
+ int ret = 0;
+
+ if (dir == DMA_DIR_RX)
+@@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
+- u64_stats_update_begin(&rxq_stats->syncp);
+- rxq_stats->rx_normal_irq_n++;
+- u64_stats_update_end(&rxq_stats->syncp);
++ u64_stats_update_begin(&stats->syncp);
++ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
++ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
+- u64_stats_update_begin(&txq_stats->syncp);
+- txq_stats->tx_normal_irq_n++;
+- u64_stats_update_end(&txq_stats->syncp);
++ u64_stats_update_begin(&stats->syncp);
++ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
++ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index e95d35f1e5a0c8..8fd167501fa0ea 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -710,28 +710,22 @@ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ }
+ }
+
+-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ u32 num_txq, u32 num_rxq,
+ bool enable)
+ {
+ u32 value;
+
+- if (!enable) {
+- value = readl(ioaddr + MAC_FPE_CTRL_STS);
+-
+- value &= ~EFPE;
+-
+- writel(value, ioaddr + MAC_FPE_CTRL_STS);
+- return;
++ if (enable) {
++ cfg->fpe_csr = EFPE;
++ value = readl(ioaddr + GMAC_RXQ_CTRL1);
++ value &= ~GMAC_RXQCTRL_FPRQ;
++ value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
++ writel(value, ioaddr + GMAC_RXQ_CTRL1);
++ } else {
++ cfg->fpe_csr = 0;
+ }
+-
+- value = readl(ioaddr + GMAC_RXQ_CTRL1);
+- value &= ~GMAC_RXQCTRL_FPRQ;
+- value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+- writel(value, ioaddr + GMAC_RXQ_CTRL1);
+-
+- value = readl(ioaddr + MAC_FPE_CTRL_STS);
+- value |= EFPE;
+- writel(value, ioaddr + MAC_FPE_CTRL_STS);
++ writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
+ }
+
+ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+@@ -741,6 +735,9 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+
+ status = FPE_EVENT_UNKNOWN;
+
++ /* Reads from the MAC_FPE_CTRL_STS register should only be performed
++ * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
++ */
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ if (value & TRSP) {
+@@ -766,19 +763,15 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+ return status;
+ }
+
+-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type)
++void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ enum stmmac_mpacket_type type)
+ {
+- u32 value;
++ u32 value = cfg->fpe_csr;
+
+- value = readl(ioaddr + MAC_FPE_CTRL_STS);
+-
+- if (type == MPACKET_VERIFY) {
+- value &= ~SRSP;
++ if (type == MPACKET_VERIFY)
+ value |= SVER;
+- } else {
+- value &= ~SVER;
++ else if (type == MPACKET_RESPONSE)
+ value |= SRSP;
+- }
+
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+index 53c138d0ff4808..34e620790eb371 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+@@ -153,9 +153,11 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ u32 num_txq, u32 num_rxq,
+ bool enable);
+ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
++ struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type);
+ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+index 7907d62d343759..85e18f9a22f920 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+@@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status)
+ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan, u32 dir)
+ {
+- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
++ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+@@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_INTR_ENA_RIE)) {
+- u64_stats_update_begin(&rxq_stats->syncp);
+- rxq_stats->rx_normal_irq_n++;
+- u64_stats_update_end(&rxq_stats->syncp);
++ u64_stats_update_begin(&stats->syncp);
++ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
++ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_STATUS_TI)) {
+- u64_stats_update_begin(&txq_stats->syncp);
+- txq_stats->tx_normal_irq_n++;
+- u64_stats_update_end(&txq_stats->syncp);
++ u64_stats_update_begin(&stats->syncp);
++ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
++ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ERI))
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+index 7a8f47e7b728bd..17394847476f33 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+@@ -259,7 +259,7 @@
+ ((val) << XGMAC_PPS_MINIDX(x))
+ #define XGMAC_PPSCMD_START 0x2
+ #define XGMAC_PPSCMD_STOP 0x5
+-#define XGMAC_PPSEN0 BIT(4)
++#define XGMAC_PPSENx(x) BIT(4 + (x) * 8)
+ #define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
+ #define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
+ #define XGMAC_TRGTBUSY0 BIT(31)
+@@ -319,6 +319,8 @@
+ #define XGMAC_RXCEIE BIT(4)
+ #define XGMAC_TXCEIE BIT(0)
+ #define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
++#define XGMAC_MTL_DPP_CONTROL 0x000010e0
++#define XGMAC_DPP_DISABLE BIT(0)
+ #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
+ #define XGMAC_TQS GENMASK(25, 16)
+ #define XGMAC_TQS_SHIFT 16
+@@ -401,6 +403,7 @@
+ #define XGMAC_DCEIE BIT(1)
+ #define XGMAC_TCEIE BIT(0)
+ #define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
++#define XGMAC_DMA_DPP_INT_STATUS 0x00003074
+ #define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+ #define XGMAC_SPH BIT(24)
+ #define XGMAC_PBLx8 BIT(16)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index f352be269deb54..052566f5b7f361 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
+ writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+ }
+
+-static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
+-{
+- priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
+- MAC_10000FD | MAC_25000FD |
+- MAC_40000FD | MAC_50000FD |
+- MAC_100000FD;
+-}
+-
+ static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
+ {
+ u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
+@@ -105,17 +97,41 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+ {
+ void __iomem *ioaddr = hw->pcsr;
+- u32 value, reg;
++ u32 clear_mask = 0;
++ u32 ctrl2, ctrl3;
++ int i;
+
+- reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+- if (queue >= 4)
++ ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
++ ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
++
++ /* The software must ensure that the same priority
++ * is not mapped to multiple Rx queues
++ */
++ for (i = 0; i < 4; i++)
++ clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
++ XGMAC_PSRQ(i));
++
++ ctrl2 &= ~clear_mask;
++ ctrl3 &= ~clear_mask;
++
++ /* First assign new priorities to a queue, then
++ * clear them from others queues
++ */
++ if (queue < 4) {
++ ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
++ XGMAC_PSRQ(queue);
++
++ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
++ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
++ } else {
+ queue -= 4;
+
+- value = readl(ioaddr + reg);
+- value &= ~XGMAC_PSRQ(queue);
+- value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
++ ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
++ XGMAC_PSRQ(queue);
+
+- writel(value, ioaddr + reg);
++ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
++ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
++ }
+ }
+
+ static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
+@@ -599,7 +615,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
+ }
+
+ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+- __le16 perfect_match, bool is_double)
++ u16 perfect_match, bool is_double)
+ {
+ void __iomem *ioaddr = hw->pcsr;
+
+@@ -830,6 +846,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+ };
+
++#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
++#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
++
++static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
++ { true, "TDPES0", DPP_TX_ERR },
++ { true, "TDPES1", DPP_TX_ERR },
++ { true, "TDPES2", DPP_TX_ERR },
++ { true, "TDPES3", DPP_TX_ERR },
++ { true, "TDPES4", DPP_TX_ERR },
++ { true, "TDPES5", DPP_TX_ERR },
++ { true, "TDPES6", DPP_TX_ERR },
++ { true, "TDPES7", DPP_TX_ERR },
++ { true, "TDPES8", DPP_TX_ERR },
++ { true, "TDPES9", DPP_TX_ERR },
++ { true, "TDPES10", DPP_TX_ERR },
++ { true, "TDPES11", DPP_TX_ERR },
++ { true, "TDPES12", DPP_TX_ERR },
++ { true, "TDPES13", DPP_TX_ERR },
++ { true, "TDPES14", DPP_TX_ERR },
++ { true, "TDPES15", DPP_TX_ERR },
++ { true, "RDPES0", DPP_RX_ERR },
++ { true, "RDPES1", DPP_RX_ERR },
++ { true, "RDPES2", DPP_RX_ERR },
++ { true, "RDPES3", DPP_RX_ERR },
++ { true, "RDPES4", DPP_RX_ERR },
++ { true, "RDPES5", DPP_RX_ERR },
++ { true, "RDPES6", DPP_RX_ERR },
++ { true, "RDPES7", DPP_RX_ERR },
++ { true, "RDPES8", DPP_RX_ERR },
++ { true, "RDPES9", DPP_RX_ERR },
++ { true, "RDPES10", DPP_RX_ERR },
++ { true, "RDPES11", DPP_RX_ERR },
++ { true, "RDPES12", DPP_RX_ERR },
++ { true, "RDPES13", DPP_RX_ERR },
++ { true, "RDPES14", DPP_RX_ERR },
++ { true, "RDPES15", DPP_RX_ERR },
++};
++
+ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+@@ -841,6 +895,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+
+ dwxgmac3_log_error(ndev, value, correctable, "DMA",
+ dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
++
++ value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
++ writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
++
++ dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
++ dwxgmac3_dma_dpp_errors,
++ STAT_OFF(dma_dpp_errors), stats);
+ }
+
+ static int
+@@ -881,6 +942,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
+ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+
++ /* 5. Enable Data Path Parity Protection */
++ value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
++ /* already enabled by default, explicit enable it again */
++ value &= ~XGMAC_DPP_DISABLE;
++ writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
++
+ return 0;
+ }
+
+@@ -914,7 +981,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
+ ret |= !corr;
+ }
+
+- err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
++ /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
++ * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
++ * Parity Errors here
++ */
++ err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
+ corr = dma & XGMAC_DECIS;
+ if (err) {
+ dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
+@@ -930,6 +1001,7 @@ static const struct dwxgmac3_error {
+ { dwxgmac3_mac_errors },
+ { dwxgmac3_mtl_errors },
+ { dwxgmac3_dma_errors },
++ { dwxgmac3_dma_dpp_errors },
+ };
+
+ static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
+@@ -1178,7 +1250,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+- val |= XGMAC_PPSEN0;
++
++ /* XGMAC Core has 4 PPS outputs at most.
++ *
++ * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
++ * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
++ * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
++ * read-only reserved to 0.
++ * But we always set PPSEN{1,2,3} do not make things worse ;-)
++ *
++ * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
++ * be set, or the PPS outputs stay in Fixed PPS mode by default.
++ */
++ val |= XGMAC_PPSENx(index);
+
+ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+
+@@ -1472,7 +1556,8 @@ static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ return 0;
+ }
+
+-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
++static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ u32 num_txq,
+ u32 num_rxq, bool enable)
+ {
+ u32 value;
+@@ -1498,7 +1583,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
+
+ const struct stmmac_ops dwxgmac210_ops = {
+ .core_init = dwxgmac2_core_init,
+- .phylink_get_caps = xgmac_phylink_get_caps,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxgmac2_rx_queue_enable,
+@@ -1560,7 +1644,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
+
+ const struct stmmac_ops dwxlgmac2_ops = {
+ .core_init = dwxgmac2_core_init,
+- .phylink_get_caps = xgmac_phylink_get_caps,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxlgmac2_rx_queue_enable,
+@@ -1621,6 +1704,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
++ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
++ MAC_1000FD | MAC_2500FD | MAC_5000FD |
++ MAC_10000FD;
+ mac->link.duplex = 0;
+ mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
+ mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
+@@ -1658,6 +1744,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
++ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
++ MAC_1000FD | MAC_2500FD | MAC_5000FD |
++ MAC_10000FD | MAC_25000FD |
++ MAC_40000FD | MAC_50000FD |
++ MAC_100000FD;
+ mac->link.duplex = 0;
+ mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
+ mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+index 3cde695fec91bd..dd2ab6185c40e8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+@@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
+ struct stmmac_extra_stats *x, u32 chan,
+ u32 dir)
+ {
+- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
++ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
+ u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ int ret = 0;
+@@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & XGMAC_NIS)) {
+ if (likely(intr_status & XGMAC_RI)) {
+- u64_stats_update_begin(&rxq_stats->syncp);
+- rxq_stats->rx_normal_irq_n++;
+- u64_stats_update_end(&rxq_stats->syncp);
++ u64_stats_update_begin(&stats->syncp);
++ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
++ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
+- u64_stats_update_begin(&txq_stats->syncp);
+- txq_stats->tx_normal_irq_n++;
+- u64_stats_update_end(&txq_stats->syncp);
++ u64_stats_update_begin(&stats->syncp);
++ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
++ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
+ }
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+index b95d3e1378136e..47fb8e1646c2e9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+@@ -386,7 +386,7 @@ struct stmmac_ops {
+ struct stmmac_rss *cfg, u32 num_rxq);
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+- __le16 perfect_match, bool is_double);
++ u16 perfect_match, bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+@@ -412,9 +412,11 @@ struct stmmac_ops {
+ unsigned int ptp_rate);
+ void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+- void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++ void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ u32 num_txq, u32 num_rxq,
+ bool enable);
+ void (*fpe_send_mpacket)(void __iomem *ioaddr,
++ struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type);
+ int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+index ea4910ae0921ac..6a7c1d325c464e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -177,8 +177,10 @@
+ #define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
+ #define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+
++#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
+ #define MMC_XGMAC_TX_FPE_FRAG 0x208
+ #define MMC_XGMAC_TX_HOLD_REQ 0x20c
++#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
+ #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
+ #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
+ #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
+@@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+ {
+ writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
+ writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
++ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
++ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+index cd7a9768de5f12..85a55f459af018 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -248,6 +248,8 @@ struct stmmac_priv {
+ struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
+ struct stmmac_safety_stats sstats;
+ struct plat_stmmacenet_data *plat;
++ /* Protect est parameters */
++ struct mutex est_lock;
+ struct dma_features dma_cap;
+ struct stmmac_counters mmc;
+ int hw_cap_support;
+@@ -255,6 +257,7 @@ struct stmmac_priv {
+ u32 msg_enable;
+ int wolopts;
+ int wol_irq;
++ bool wol_irq_disabled;
+ int clk_csr;
+ struct timer_list eee_ctrl_timer;
+ int lpi_irq;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index 6aa5c0556d2203..521b1b5ffebb4f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -311,8 +311,9 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+- if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+- priv->hw->pcs & STMMAC_PCS_SGMII) {
++ if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
++ (priv->hw->pcs & STMMAC_PCS_RGMII ||
++ priv->hw->pcs & STMMAC_PCS_SGMII)) {
+ struct rgmii_adv adv;
+ u32 supported, advertising, lp_advertising;
+
+@@ -397,8 +398,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+- if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+- priv->hw->pcs & STMMAC_PCS_SGMII) {
++ if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
++ (priv->hw->pcs & STMMAC_PCS_RGMII ||
++ priv->hw->pcs & STMMAC_PCS_SGMII)) {
+ /* Only support ANE */
+ if (cmd->base.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+@@ -537,49 +539,79 @@ stmmac_set_pauseparam(struct net_device *netdev,
+ }
+ }
+
++static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
++{
++ u64 total;
++ int cpu;
++
++ total = 0;
++ for_each_possible_cpu(cpu) {
++ struct stmmac_pcpu_stats *pcpu;
++ unsigned int start;
++ u64 irq_n;
++
++ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
++ do {
++ start = u64_stats_fetch_begin(&pcpu->syncp);
++ irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
++ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
++ total += irq_n;
++ }
++ return total;
++}
++
++static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
++{
++ u64 total;
++ int cpu;
++
++ total = 0;
++ for_each_possible_cpu(cpu) {
++ struct stmmac_pcpu_stats *pcpu;
++ unsigned int start;
++ u64 irq_n;
++
++ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
++ do {
++ start = u64_stats_fetch_begin(&pcpu->syncp);
++ irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
++ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
++ total += irq_n;
++ }
++ return total;
++}
++
+ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
+ {
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int start;
+- int q, stat;
+- u64 *pos;
+- char *p;
++ int q;
+
+- pos = data;
+ for (q = 0; q < tx_cnt; q++) {
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
+- struct stmmac_txq_stats snapshot;
++ u64 pkt_n;
+
+- data = pos;
+ do {
+- start = u64_stats_fetch_begin(&txq_stats->syncp);
+- snapshot = *txq_stats;
+- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
++ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
++ pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
++ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
+
+- p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
+- for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
+- *data++ += (*(u64 *)p);
+- p += sizeof(u64);
+- }
++ *data++ = pkt_n;
++ *data++ = stmmac_get_tx_normal_irq_n(priv, q);
+ }
+
+- pos = data;
+ for (q = 0; q < rx_cnt; q++) {
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
+- struct stmmac_rxq_stats snapshot;
++ u64 pkt_n;
+
+- data = pos;
+ do {
+- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+- snapshot = *rxq_stats;
+- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
++ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
++ pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
++ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
+
+- p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
+- for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
+- *data++ += (*(u64 *)p);
+- p += sizeof(u64);
+- }
++ *data++ = pkt_n;
++ *data++ = stmmac_get_rx_normal_irq_n(priv, q);
+ }
+ }
+
+@@ -638,39 +670,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
+ pos = j;
+ for (i = 0; i < rx_queues_count; i++) {
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
+- struct stmmac_rxq_stats snapshot;
++ struct stmmac_napi_rx_stats snapshot;
++ u64 n_irq;
+
+ j = pos;
+ do {
+- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+- snapshot = *rxq_stats;
+- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+-
+- data[j++] += snapshot.rx_pkt_n;
+- data[j++] += snapshot.rx_normal_irq_n;
+- normal_irq_n += snapshot.rx_normal_irq_n;
+- napi_poll += snapshot.napi_poll;
++ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
++ snapshot = rxq_stats->napi;
++ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
++
++ data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
++ n_irq = stmmac_get_rx_normal_irq_n(priv, i);
++ data[j++] += n_irq;
++ normal_irq_n += n_irq;
++ napi_poll += u64_stats_read(&snapshot.poll);
+ }
+
+ pos = j;
+ for (i = 0; i < tx_queues_count; i++) {
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
+- struct stmmac_txq_stats snapshot;
++ struct stmmac_napi_tx_stats napi_snapshot;
++ struct stmmac_q_tx_stats q_snapshot;
++ u64 n_irq;
+
+ j = pos;
+ do {
+- start = u64_stats_fetch_begin(&txq_stats->syncp);
+- snapshot = *txq_stats;
+- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+-
+- data[j++] += snapshot.tx_pkt_n;
+- data[j++] += snapshot.tx_normal_irq_n;
+- normal_irq_n += snapshot.tx_normal_irq_n;
+- data[j++] += snapshot.tx_clean;
+- data[j++] += snapshot.tx_set_ic_bit;
+- data[j++] += snapshot.tx_tso_frames;
+- data[j++] += snapshot.tx_tso_nfrags;
+- napi_poll += snapshot.napi_poll;
++ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
++ q_snapshot = txq_stats->q;
++ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
++ do {
++ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
++ napi_snapshot = txq_stats->napi;
++ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
++
++ data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
++ n_irq = stmmac_get_tx_normal_irq_n(priv, i);
++ data[j++] += n_irq;
++ normal_irq_n += n_irq;
++ data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
++ data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
++ u64_stats_read(&napi_snapshot.tx_set_ic_bit);
++ data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
++ data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
++ napi_poll += u64_stats_read(&napi_snapshot.poll);
+ }
+ normal_irq_n += priv->xstats.rx_early_irq;
+ data[j++] = normal_irq_n;
+@@ -825,10 +867,16 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ if (wol->wolopts) {
+ pr_info("stmmac: wakeup enable\n");
+ device_set_wakeup_enable(priv->device, 1);
+- enable_irq_wake(priv->wol_irq);
++ /* Avoid unbalanced enable_irq_wake calls */
++ if (priv->wol_irq_disabled)
++ enable_irq_wake(priv->wol_irq);
++ priv->wol_irq_disabled = false;
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+- disable_irq_wake(priv->wol_irq);
++ /* Avoid unbalanced disable_irq_wake calls */
++ if (!priv->wol_irq_disabled)
++ disable_irq_wake(priv->wol_irq);
++ priv->wol_irq_disabled = true;
+ }
+
+ mutex_lock(&priv->lock);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+index 540f6a4ec0b81f..5ef52ef2698fbe 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -218,6 +218,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
+ {
+ u32 num_snapshot, ts_status, tsync_int;
+ struct ptp_clock_event event;
++ u32 acr_value, channel;
+ unsigned long flags;
+ u64 ptp_time;
+ int i;
+@@ -237,18 +238,21 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
+ */
+ ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS);
+
+- if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
++ if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN))
+ return;
+
+ num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
+ GMAC_TIMESTAMP_ATSNS_SHIFT;
+
++ acr_value = readl(priv->ptpaddr + PTP_ACR);
++ channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
++
+ for (i = 0; i < num_snapshot; i++) {
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ get_ptptime(priv->ptpaddr, &ptp_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+ event.type = PTP_CLOCK_EXTTS;
+- event.index = 0;
++ event.index = channel;
+ event.timestamp = ptp_time;
+ ptp_clock_event(priv->ptp_clock, &event);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 5801f4d50f9512..d6167a7b19f21b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -964,7 +964,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (is_up && *hs_enable) {
+- stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
++ stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
++ MPACKET_VERIFY);
+ } else {
+ *lo_state = FPE_STATE_OFF;
+ *lp_state = FPE_STATE_OFF;
+@@ -1197,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
+ return ret;
+ }
+
+-static void stmmac_set_half_duplex(struct stmmac_priv *priv)
+-{
+- /* Half-Duplex can only work with single tx queue */
+- if (priv->plat->tx_queues_to_use > 1)
+- priv->phylink_config.mac_capabilities &=
+- ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+- else
+- priv->phylink_config.mac_capabilities |=
+- (MAC_10HD | MAC_100HD | MAC_1000HD);
+-}
+-
+ static int stmmac_phy_setup(struct stmmac_priv *priv)
+ {
+ struct stmmac_mdio_bus_data *mdio_bus_data;
+@@ -1235,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
+ xpcs_get_interfaces(priv->hw->xpcs,
+ priv->phylink_config.supported_interfaces);
+
+- priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+- MAC_10FD | MAC_100FD |
+- MAC_1000FD;
+-
+- stmmac_set_half_duplex(priv);
+-
+ /* Get the MAC specific capabilities */
+ stmmac_mac_phylink_get_caps(priv);
+
++ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
++
+ max_speed = priv->plat->max_speed;
+ if (max_speed)
+ phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+@@ -2441,7 +2427,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+ struct xdp_desc xdp_desc;
+ bool work_done = true;
+ u32 tx_set_ic_bit = 0;
+- unsigned long flags;
+
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+@@ -2514,9 +2499,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+ entry = tx_q->cur_tx;
+ }
+- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+- txq_stats->tx_set_ic_bit += tx_set_ic_bit;
+- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
++ u64_stats_update_begin(&txq_stats->napi_syncp);
++ u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
++ u64_stats_update_end(&txq_stats->napi_syncp);
+
+ if (tx_desc) {
+ stmmac_flush_tx_descriptors(priv, queue);
+@@ -2560,7 +2545,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int entry, xmits = 0, count = 0;
+ u32 tx_packets = 0, tx_errors = 0;
+- unsigned long flags;
+
+ __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+@@ -2716,11 +2700,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ stmmac_tx_timer_arm(priv, queue);
+
+- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+- txq_stats->tx_packets += tx_packets;
+- txq_stats->tx_pkt_n += tx_packets;
+- txq_stats->tx_clean++;
+- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
++ u64_stats_update_begin(&txq_stats->napi_syncp);
++ u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
++ u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
++ u64_stats_inc(&txq_stats->napi.tx_clean);
++ u64_stats_update_end(&txq_stats->napi_syncp);
+
+ priv->xstats.tx_errors += tx_errors;
+
+@@ -3548,6 +3532,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
++ priv->wol_irq_disabled = true;
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ int_name = priv->int_name_wol;
+ sprintf(int_name, "%s:%s", dev->name, "wol");
+@@ -3851,6 +3836,9 @@ static int __stmmac_open(struct net_device *dev,
+ priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+
+ buf_sz = dma_conf->dma_buf_sz;
++ for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
++ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
++ dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
+ memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
+
+ stmmac_reset_queues_param(priv);
+@@ -3923,8 +3911,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
+ {
+ set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+- if (priv->fpe_wq)
++ if (priv->fpe_wq) {
+ destroy_workqueue(priv->fpe_wq);
++ priv->fpe_wq = NULL;
++ }
+
+ netdev_info(priv->dev, "FPE workqueue stop");
+ }
+@@ -4129,7 +4119,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
+ u8 proto_hdr_len, hdr;
+- unsigned long flags;
+ u32 pay_len, mss;
+ dma_addr_t des;
+ int i;
+@@ -4294,13 +4283,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+- txq_stats->tx_bytes += skb->len;
+- txq_stats->tx_tso_frames++;
+- txq_stats->tx_tso_nfrags += nfrags;
++ u64_stats_update_begin(&txq_stats->q_syncp);
++ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
++ u64_stats_inc(&txq_stats->q.tx_tso_frames);
++ u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
+ if (set_ic)
+- txq_stats->tx_set_ic_bit++;
+- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
++ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
++ u64_stats_update_end(&txq_stats->q_syncp);
+
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+@@ -4354,6 +4343,28 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ return NETDEV_TX_OK;
+ }
+
++/**
++ * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
++ * @skb: socket buffer to check
++ *
++ * Check if a packet has an ethertype that will trigger the IP header checks
++ * and IP/TCP checksum engine of the stmmac core.
++ *
++ * Return: true if the ethertype can trigger the checksum engine, false
++ * otherwise
++ */
++static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
++{
++ int depth = 0;
++ __be16 proto;
++
++ proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
++ &depth);
++
++ return (depth <= ETH_HLEN) &&
++ (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
++}
++
+ /**
+ * stmmac_xmit - Tx entry point of the driver
+ * @skb : the socket buffer
+@@ -4377,7 +4388,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
+ int entry, first_tx;
+- unsigned long flags;
+ dma_addr_t des;
+
+ tx_q = &priv->dma_conf.tx_queue[queue];
+@@ -4415,6 +4425,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
++ /* DWMAC IPs can be synthesized to support tx coe only for a few tx
++ * queues. In that case, checksum offloading for those queues that don't
++ * support tx coe needs to fallback to software checksum calculation.
++ *
++ * Packets that won't trigger the COE e.g. most DSA-tagged packets will
++ * also have to be checksummed in software.
++ */
++ if (csum_insertion &&
++ (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
++ !stmmac_has_ip_ethertype(skb))) {
++ if (unlikely(skb_checksum_help(skb)))
++ goto dma_map_err;
++ csum_insertion = !csum_insertion;
++ }
+
+ if (likely(priv->extend_desc))
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+@@ -4533,11 +4557,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+- txq_stats->tx_bytes += skb->len;
++ u64_stats_update_begin(&txq_stats->q_syncp);
++ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
+ if (set_ic)
+- txq_stats->tx_set_ic_bit++;
+- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
++ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
++ u64_stats_update_end(&txq_stats->q_syncp);
+
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+@@ -4801,12 +4825,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
+ set_ic = false;
+
+ if (set_ic) {
+- unsigned long flags;
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+- txq_stats->tx_set_ic_bit++;
+- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
++ u64_stats_update_begin(&txq_stats->q_syncp);
++ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
++ u64_stats_update_end(&txq_stats->q_syncp);
+ }
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+@@ -4956,7 +4979,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
+ unsigned int len = xdp->data_end - xdp->data;
+ enum pkt_hash_types hash_type;
+ int coe = priv->hw->rx_csum;
+- unsigned long flags;
+ struct sk_buff *skb;
+ u32 hash;
+
+@@ -4970,7 +4992,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+- if (unlikely(!coe))
++ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -4981,10 +5003,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rxtx_napi, skb);
+
+- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+- rxq_stats->rx_pkt_n++;
+- rxq_stats->rx_bytes += len;
+- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
++ u64_stats_update_begin(&rxq_stats->napi_syncp);
++ u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
++ u64_stats_add(&rxq_stats->napi.rx_bytes, len);
++ u64_stats_update_end(&rxq_stats->napi_syncp);
+ }
+
+ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+@@ -5066,7 +5088,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
+ unsigned int desc_size;
+ struct bpf_prog *prog;
+ bool failure = false;
+- unsigned long flags;
+ int xdp_status = 0;
+ int status = 0;
+
+@@ -5221,9 +5242,9 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
+
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
+- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+- rxq_stats->rx_pkt_n += count;
+- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
++ u64_stats_update_begin(&rxq_stats->napi_syncp);
++ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
++ u64_stats_update_end(&rxq_stats->napi_syncp);
+
+ priv->xstats.rx_dropped += rx_dropped;
+ priv->xstats.rx_errors += rx_errors;
+@@ -5261,12 +5282,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ unsigned int desc_size;
+ struct sk_buff *skb = NULL;
+ struct stmmac_xdp_buff ctx;
+- unsigned long flags;
+ int xdp_status = 0;
+ int buf_sz;
+
+ dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
++ limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+@@ -5302,10 +5323,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ len = 0;
+ }
+
++read_again:
+ if (count >= limit)
+ break;
+
+-read_again:
+ buf1_len = 0;
+ buf2_len = 0;
+ entry = next_entry;
+@@ -5485,7 +5506,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+- if (unlikely(!coe))
++ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -5513,11 +5534,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+
+ stmmac_rx_refill(priv, queue);
+
+- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+- rxq_stats->rx_packets += rx_packets;
+- rxq_stats->rx_bytes += rx_bytes;
+- rxq_stats->rx_pkt_n += count;
+- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
++ u64_stats_update_begin(&rxq_stats->napi_syncp);
++ u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
++ u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
++ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
++ u64_stats_update_end(&rxq_stats->napi_syncp);
+
+ priv->xstats.rx_dropped += rx_dropped;
+ priv->xstats.rx_errors += rx_errors;
+@@ -5532,13 +5553,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
+ struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_rxq_stats *rxq_stats;
+ u32 chan = ch->index;
+- unsigned long flags;
+ int work_done;
+
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+- rxq_stats->napi_poll++;
+- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
++ u64_stats_update_begin(&rxq_stats->napi_syncp);
++ u64_stats_inc(&rxq_stats->napi.poll);
++ u64_stats_update_end(&rxq_stats->napi_syncp);
+
+ work_done = stmmac_rx(priv, budget, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+@@ -5559,13 +5579,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
+ struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_txq_stats *txq_stats;
+ u32 chan = ch->index;
+- unsigned long flags;
+ int work_done;
+
+ txq_stats = &priv->xstats.txq_stats[chan];
+- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+- txq_stats->napi_poll++;
+- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
++ u64_stats_update_begin(&txq_stats->napi_syncp);
++ u64_stats_inc(&txq_stats->napi.poll);
++ u64_stats_update_end(&txq_stats->napi_syncp);
+
+ work_done = stmmac_tx_clean(priv, budget, chan);
+ work_done = min(work_done, budget);
+@@ -5590,17 +5609,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
+ struct stmmac_rxq_stats *rxq_stats;
+ struct stmmac_txq_stats *txq_stats;
+ u32 chan = ch->index;
+- unsigned long flags;
+
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+- rxq_stats->napi_poll++;
+- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
++ u64_stats_update_begin(&rxq_stats->napi_syncp);
++ u64_stats_inc(&rxq_stats->napi.poll);
++ u64_stats_update_end(&rxq_stats->napi_syncp);
+
+ txq_stats = &priv->xstats.txq_stats[chan];
+- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+- txq_stats->napi_poll++;
+- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
++ u64_stats_update_begin(&txq_stats->napi_syncp);
++ u64_stats_inc(&txq_stats->napi.poll);
++ u64_stats_update_end(&txq_stats->napi_syncp);
+
+ tx_done = stmmac_tx_clean(priv, budget, chan);
+ tx_done = min(tx_done, budget);
+@@ -5802,6 +5820,7 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+ /* If user has requested FPE enable, quickly response */
+ if (*hs_enable)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++ fpe_cfg,
+ MPACKET_RESPONSE);
+ }
+
+@@ -5919,11 +5938,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+- if (unlikely(!dev)) {
+- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+- return IRQ_NONE;
+- }
+-
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+@@ -5939,11 +5953,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+- if (unlikely(!dev)) {
+- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+- return IRQ_NONE;
+- }
+-
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+@@ -5965,11 +5974,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
+ dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
+
+- if (unlikely(!data)) {
+- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+- return IRQ_NONE;
+- }
+-
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+@@ -5996,11 +6000,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
+ dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
+
+- if (unlikely(!data)) {
+- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+- return IRQ_NONE;
+- }
+-
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+@@ -6474,7 +6473,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
+ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+ {
+ u32 crc, hash = 0;
+- __le16 pmatch = 0;
++ u16 pmatch = 0;
+ int count = 0;
+ u16 vid = 0;
+
+@@ -6489,7 +6488,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+ if (count > 2) /* VID = 0 always passes filter */
+ return -EOPNOTSUPP;
+
+- pmatch = cpu_to_le16(vid);
++ pmatch = vid;
+ hash = 0;
+ }
+
+@@ -6921,10 +6920,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
+ u64 tx_bytes;
+
+ do {
+- start = u64_stats_fetch_begin(&txq_stats->syncp);
+- tx_packets = txq_stats->tx_packets;
+- tx_bytes = txq_stats->tx_bytes;
+- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
++ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
++ tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
++ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
++ do {
++ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
++ tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
++ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
+
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+@@ -6936,10 +6938,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
+ u64 rx_bytes;
+
+ do {
+- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+- rx_packets = rxq_stats->rx_packets;
+- rx_bytes = rxq_stats->rx_bytes;
+- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
++ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
++ rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
++ rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
++ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+@@ -7167,6 +7169,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0, i;
++ int max_speed;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+@@ -7180,7 +7183,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i,
+ rx_cnt);
+
+- stmmac_set_half_duplex(priv);
++ stmmac_mac_phylink_get_caps(priv);
++
++ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
++
++ max_speed = priv->plat->max_speed;
++ if (max_speed)
++ phylink_limit_mac_speed(&priv->phylink_config, max_speed);
++
+ stmmac_napi_add(dev);
+
+ if (netif_running(dev))
+@@ -7226,6 +7236,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
+ if (*lo_state == FPE_STATE_ENTERING_ON &&
+ *lp_state == FPE_STATE_ENTERING_ON) {
+ stmmac_fpe_configure(priv, priv->ioaddr,
++ fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ *enable);
+@@ -7244,6 +7255,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
+ netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
+ *lo_state, *lp_state);
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++ fpe_cfg,
+ MPACKET_VERIFY);
+ }
+ /* Sleep then retry */
+@@ -7258,6 +7270,7 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+ if (priv->plat->fpe_cfg->hs_enable != enable) {
+ if (enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++ priv->plat->fpe_cfg,
+ MPACKET_VERIFY);
+ } else {
+ priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
+@@ -7330,9 +7343,16 @@ int stmmac_dvr_probe(struct device *device,
+ priv->dev = ndev;
+
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
+- u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
+- for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
+- u64_stats_init(&priv->xstats.txq_stats[i].syncp);
++ u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
++ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
++ u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
++ u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
++ }
++
++ priv->xstats.pcpu_stats =
++ devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
++ if (!priv->xstats.pcpu_stats)
++ return -ENOMEM;
+
+ stmmac_set_ethtool_ops(ndev);
+ priv->pause = pause;
+@@ -7398,6 +7418,9 @@ int stmmac_dvr_probe(struct device *device,
+ dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
+ ERR_PTR(ret));
+
++ /* Wait a bit for the reset to take effect */
++ udelay(10);
++
+ /* Init MAC and get the capabilities */
+ ret = stmmac_hw_init(priv);
+ if (ret)
+@@ -7718,6 +7741,7 @@ int stmmac_suspend(struct device *dev)
+ if (priv->dma_cap.fpesel) {
+ /* Disable FPE */
+ stmmac_fpe_configure(priv, priv->ioaddr,
++ priv->plat->fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, false);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+index fa9e7e7040b945..0542cfd1817e62 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+@@ -591,7 +591,11 @@ int stmmac_mdio_register(struct net_device *ndev)
+ new_bus->parent = priv->device;
+
+ err = of_mdiobus_register(new_bus, mdio_node);
+- if (err != 0) {
++ if (err == -ENODEV) {
++ err = 0;
++ dev_info(dev, "MDIO bus is disabled\n");
++ goto bus_register_fail;
++ } else if (err) {
+ dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
+ goto bus_register_fail;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 2f0678f15fb7e7..30d5e635190e66 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -276,6 +276,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
+ plat->tx_queues_cfg[queue].use_prio = true;
+ }
+
++ plat->tx_queues_cfg[queue].coe_unsupported =
++ of_property_read_bool(q_node, "snps,coe-unsupported");
++
+ queue++;
+ }
+ if (queue != plat->tx_queues_to_use) {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+index 3d7825cb30bb1d..a04bb2e42c4ee1 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+@@ -70,11 +70,11 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+ /* If EST is enabled, disabled it before adjust ptp time. */
+ if (priv->plat->est && priv->plat->est->enable) {
+ est_rst = true;
+- mutex_lock(&priv->plat->est->lock);
++ mutex_lock(&priv->est_lock);
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+- mutex_unlock(&priv->plat->est->lock);
++ mutex_unlock(&priv->est_lock);
+ }
+
+ write_lock_irqsave(&priv->ptp_lock, flags);
+@@ -87,7 +87,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+ ktime_t current_time_ns, basetime;
+ u64 cycle_time;
+
+- mutex_lock(&priv->plat->est->lock);
++ mutex_lock(&priv->est_lock);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ time.tv_nsec = priv->plat->est->btr_reserve[0];
+@@ -104,7 +104,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+ priv->plat->est->enable = true;
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+- mutex_unlock(&priv->plat->est->lock);
++ mutex_unlock(&priv->est_lock);
+ if (ret)
+ netdev_err(priv->dev, "failed to configure EST\n");
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index ac41ef4cbd2f02..97ff2dd8f2aecd 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt)
+ {
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
++ s64 port_transmit_rate_kbps;
+ u32 queue = qopt->queue;
+- u32 ptr, speed_div;
+ u32 mode_to_use;
+ u64 value;
++ u32 ptr;
+ int ret;
+
+ /* Queue 0 is not AVB capable */
+@@ -355,30 +356,30 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ if (!priv->dma_cap.av)
+ return -EOPNOTSUPP;
+
+- /* Port Transmit Rate and Speed Divider */
+- switch (priv->speed) {
+- case SPEED_10000:
+- ptr = 32;
+- speed_div = 10000000;
+- break;
+- case SPEED_5000:
+- ptr = 32;
+- speed_div = 5000000;
+- break;
+- case SPEED_2500:
+- ptr = 8;
+- speed_div = 2500000;
+- break;
+- case SPEED_1000:
+- ptr = 8;
+- speed_div = 1000000;
+- break;
+- case SPEED_100:
+- ptr = 4;
+- speed_div = 100000;
+- break;
+- default:
+- return -EOPNOTSUPP;
++ port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
++
++ if (qopt->enable) {
++ /* Port Transmit Rate and Speed Divider */
++ switch (div_s64(port_transmit_rate_kbps, 1000)) {
++ case SPEED_10000:
++ case SPEED_5000:
++ ptr = 32;
++ break;
++ case SPEED_2500:
++ case SPEED_1000:
++ ptr = 8;
++ break;
++ case SPEED_100:
++ ptr = 4;
++ break;
++ default:
++ netdev_err(priv->dev,
++ "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
++ port_transmit_rate_kbps);
++ return -EINVAL;
++ }
++ } else {
++ ptr = 0;
+ }
+
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+@@ -395,13 +396,14 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
++ return 0;
+ }
+
+ /* Final adjustments for HW */
+- value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
++ value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
+ priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+
+- value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
++ value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
+ priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+
+ value = qopt->hicredit * 1024ll * 8;
+@@ -982,17 +984,19 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
+ if (!plat->est)
+ return -ENOMEM;
+
+- mutex_init(&priv->plat->est->lock);
++ mutex_init(&priv->est_lock);
+ } else {
++ mutex_lock(&priv->est_lock);
+ memset(plat->est, 0, sizeof(*plat->est));
++ mutex_unlock(&priv->est_lock);
+ }
+
+ size = qopt->num_entries;
+
+- mutex_lock(&priv->plat->est->lock);
++ mutex_lock(&priv->est_lock);
+ priv->plat->est->gcl_size = size;
+ priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
+- mutex_unlock(&priv->plat->est->lock);
++ mutex_unlock(&priv->est_lock);
+
+ for (i = 0; i < size; i++) {
+ s64 delta_ns = qopt->entries[i].interval;
+@@ -1023,7 +1027,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
+ priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ }
+
+- mutex_lock(&priv->plat->est->lock);
++ mutex_lock(&priv->est_lock);
+ /* Adjust for real system time */
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+@@ -1042,7 +1046,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
+ priv->plat->est->ctr[1] = (u32)ctr;
+
+ if (fpe && !priv->dma_cap.fpesel) {
+- mutex_unlock(&priv->plat->est->lock);
++ mutex_unlock(&priv->est_lock);
+ return -EOPNOTSUPP;
+ }
+
+@@ -1053,7 +1057,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
+
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+- mutex_unlock(&priv->plat->est->lock);
++ mutex_unlock(&priv->est_lock);
+ if (ret) {
+ netdev_err(priv->dev, "failed to configure EST\n");
+ goto disable;
+@@ -1070,15 +1074,16 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
+
+ disable:
+ if (priv->plat->est) {
+- mutex_lock(&priv->plat->est->lock);
++ mutex_lock(&priv->est_lock);
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+- mutex_unlock(&priv->plat->est->lock);
++ mutex_unlock(&priv->est_lock);
+ }
+
+ priv->plat->fpe_cfg->enable = false;
+ stmmac_fpe_configure(priv, priv->ioaddr,
++ priv->plat->fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false);
+diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
+index 9bd1df8308d24a..d3a2fbb14140e9 100644
+--- a/drivers/net/ethernet/sun/sungem.c
++++ b/drivers/net/ethernet/sun/sungem.c
+@@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
+ return IRQ_HANDLED;
+ }
+
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-static void gem_poll_controller(struct net_device *dev)
+-{
+- struct gem *gp = netdev_priv(dev);
+-
+- disable_irq(gp->pdev->irq);
+- gem_interrupt(gp->pdev->irq, dev);
+- enable_irq(gp->pdev->irq);
+-}
+-#endif
+-
+ static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ struct gem *gp = netdev_priv(dev);
+@@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = {
+ .ndo_change_mtu = gem_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = gem_set_mac_address,
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+- .ndo_poll_controller = gem_poll_controller,
+-#endif
+ };
+
+ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 24120605502f9e..d556e705ec000d 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -56,7 +56,7 @@
+ #define AM65_CPSW_MAX_PORTS 8
+
+ #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
+-#define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
++#define AM65_CPSW_MAX_PACKET_SIZE 2024
+
+ #define AM65_CPSW_REG_CTL 0x004
+ #define AM65_CPSW_REG_STAT_PORT_EN 0x014
+@@ -2167,7 +2167,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
+ eth_hw_addr_set(port->ndev, port->slave.mac_addr);
+
+ port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
+- port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
++ port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
++ (VLAN_ETH_HLEN + ETH_FCS_LEN);
+ port->ndev->hw_features = NETIF_F_SG |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
+@@ -2715,6 +2716,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
+
+ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ {
++ struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
++ struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
+ struct device *dev = common->dev;
+ struct am65_cpsw_port *port;
+ int ret = 0, i;
+@@ -2727,6 +2730,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ if (ret)
+ return ret;
+
++ /* The DMA Channels are not guaranteed to be in a clean state.
++ * Reset and disable them to ensure that they are back to the
++ * clean state and ready to be used.
++ */
++ for (i = 0; i < common->tx_ch_num; i++) {
++ k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
++ am65_cpsw_nuss_tx_cleanup);
++ k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
++ }
++
++ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
++ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
++ am65_cpsw_nuss_rx_cleanup, !!i);
++
++ k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
++
+ ret = am65_cpsw_nuss_register_devlink(common);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
+index c66618d91c28fe..f89716b1cfb640 100644
+--- a/drivers/net/ethernet/ti/am65-cpts.c
++++ b/drivers/net/ethernet/ti/am65-cpts.c
+@@ -784,6 +784,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
+ struct am65_cpts_skb_cb_data *skb_cb =
+ (struct am65_cpts_skb_cb_data *)skb->cb;
+
++ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
++ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
++ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
++ mtype_seqid = skb_cb->skb_mtype_seqid;
++
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ u64 ns = event->timestamp;
+
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index ca4d4548f85e30..2ed165dcdbdcf0 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+ }
+ }
+
++ phy->mac_managed_pm = true;
++
+ slave->phy = phy;
+
+ phy_attached_info(slave->phy);
+diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
+index 0e4f526b17532e..9061dca97fcbfd 100644
+--- a/drivers/net/ethernet/ti/cpsw_new.c
++++ b/drivers/net/ethernet/ti/cpsw_new.c
+@@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+ slave->slave_num);
+ return;
+ }
++
++ phy->mac_managed_pm = true;
++
+ slave->phy = phy;
+
+ phy_attached_info(slave->phy);
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 4cf2a52e43783f..3025e9c189702b 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -177,7 +177,7 @@ static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ writel(upper_32_bits(ns), iep->base +
+ iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+- writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
++ writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+ }
+
+ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+index 6df53ab17fbc50..902a2717785cb9 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+@@ -360,7 +360,7 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
+ {
+ const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
+
+- rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
++ rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
+ rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
+ rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index 4914d0ef58e9b9..fb120baee55324 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -421,12 +421,14 @@ static int prueth_init_rx_chns(struct prueth_emac *emac,
+ if (!i)
+ fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+- rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+- if (rx_chn->irq[i] <= 0) {
+- ret = rx_chn->irq[i];
++ ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
++ if (ret <= 0) {
++ if (!ret)
++ ret = -ENXIO;
+ netdev_err(ndev, "Failed to get rx dma irq");
+ goto fail;
+ }
++ rx_chn->irq[i] = ret;
+ }
+
+ return 0;
+@@ -2050,7 +2052,7 @@ static int prueth_probe(struct platform_device *pdev)
+ &prueth->shram);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
+- pruss_put(prueth->pruss);
++ goto put_pruss;
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+@@ -2092,10 +2094,7 @@ static int prueth_probe(struct platform_device *pdev)
+ prueth->iep1 = icss_iep_get_idx(np, 1);
+ if (IS_ERR(prueth->iep1)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
+- icss_iep_put(prueth->iep0);
+- prueth->iep0 = NULL;
+- prueth->iep1 = NULL;
+- goto free_pool;
++ goto put_iep0;
+ }
+
+ if (prueth->pdata.quirk_10m_link_issue) {
+@@ -2137,7 +2136,12 @@ static int prueth_probe(struct platform_device *pdev)
+
+ prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
+
+- emac_phy_connect(prueth->emac[PRUETH_MAC0]);
++ ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
++ if (ret) {
++ dev_err(dev,
++ "can't connect to MII0 PHY, error -%d", ret);
++ goto netdev_unregister;
++ }
+ phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
+ }
+
+@@ -2149,7 +2153,12 @@ static int prueth_probe(struct platform_device *pdev)
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
+- emac_phy_connect(prueth->emac[PRUETH_MAC1]);
++ ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
++ if (ret) {
++ dev_err(dev,
++ "can't connect to MII1 PHY, error %d", ret);
++ goto netdev_unregister;
++ }
+ phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
+ }
+
+@@ -2185,6 +2194,12 @@ static int prueth_probe(struct platform_device *pdev)
+ exit_iep:
+ if (prueth->pdata.quirk_10m_link_issue)
+ icss_iep_exit_fw(prueth->iep1);
++ icss_iep_put(prueth->iep1);
++
++put_iep0:
++ icss_iep_put(prueth->iep0);
++ prueth->iep0 = NULL;
++ prueth->iep1 = NULL;
+
+ free_pool:
+ gen_pool_free(prueth->sram_pool,
+@@ -2192,6 +2207,8 @@ static int prueth_probe(struct platform_device *pdev)
+
+ put_mem:
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
++
++put_pruss:
+ pruss_put(prueth->pruss);
+
+ put_cores:
+diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
+index 50d7eacfec5827..87e67121477cb5 100644
+--- a/drivers/net/ethernet/toshiba/spider_net.c
++++ b/drivers/net/ethernet/toshiba/spider_net.c
+@@ -2332,7 +2332,7 @@ spider_net_alloc_card(void)
+ struct spider_net_card *card;
+
+ netdev = alloc_etherdev(struct_size(card, darray,
+- tx_descriptors + rx_descriptors));
++ size_add(tx_descriptors, rx_descriptors)));
+ if (!netdev)
+ return NULL;
+
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index 85dc16faca5440..52130df26aee53 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -1677,10 +1677,12 @@ int wx_sw_init(struct wx *wx)
+ wx->subsystem_device_id = pdev->subsystem_device;
+ } else {
+ err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
+- if (!err)
+- wx->subsystem_device_id = swab16((u16)ssid);
++ if (err < 0) {
++ wx_err(wx, "read of internal subsystem device id failed\n");
++ return err;
++ }
+
+- return err;
++ wx->subsystem_device_id = swab16((u16)ssid);
+ }
+
+ wx->mac_table = kcalloc(wx->mac.num_rar_entries,
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index e04d4a5eed7ba0..c37500aa063791 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+ }
+
+-static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
+- int rx_buffer_pgcnt)
+-{
+- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+- struct page *page = rx_buffer->page;
+-
+- /* avoid re-using remote and pfmemalloc pages */
+- if (!dev_page_is_reusable(page))
+- return false;
+-
+-#if (PAGE_SIZE < 8192)
+- /* if we are only owner of page we can reuse it */
+- if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+- return false;
+-#endif
+-
+- /* If we have drained the page fragment pool we need to update
+- * the pagecnt_bias and page count so that we fully restock the
+- * number of references the driver holds.
+- */
+- if (unlikely(pagecnt_bias == 1)) {
+- page_ref_add(page, USHRT_MAX - 1);
+- rx_buffer->pagecnt_bias = USHRT_MAX;
+- }
+-
+- return true;
+-}
+-
+-/**
+- * wx_reuse_rx_page - page flip buffer and store it back on the ring
+- * @rx_ring: rx descriptor ring to store buffers on
+- * @old_buff: donor buffer to have page reused
+- *
+- * Synchronizes page for reuse by the adapter
+- **/
+-static void wx_reuse_rx_page(struct wx_ring *rx_ring,
+- struct wx_rx_buffer *old_buff)
+-{
+- u16 nta = rx_ring->next_to_alloc;
+- struct wx_rx_buffer *new_buff;
+-
+- new_buff = &rx_ring->rx_buffer_info[nta];
+-
+- /* update, and store next to alloc */
+- nta++;
+- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+-
+- /* transfer page from old buffer to new buffer */
+- new_buff->page = old_buff->page;
+- new_buff->page_dma = old_buff->page_dma;
+- new_buff->page_offset = old_buff->page_offset;
+- new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+-}
+-
+ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer)
+ {
+@@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
+ size,
+ DMA_FROM_DEVICE);
+ skip_sync:
+- rx_buffer->pagecnt_bias--;
+-
+ return rx_buffer;
+ }
+
+@@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
+ {
+- if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
+- /* hand second half of page back to the ring */
+- wx_reuse_rx_page(rx_ring, rx_buffer);
+- } else {
+- if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
+- /* the page has been released from the ring */
+- WX_CB(skb)->page_released = true;
+- else
+- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+-
+- __page_frag_cache_drain(rx_buffer->page,
+- rx_buffer->pagecnt_bias);
+- }
++ if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
++ /* the page has been released from the ring */
++ WX_CB(skb)->page_released = true;
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+@@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
+ if (size <= WX_RXBUFFER_256) {
+ memcpy(__skb_put(skb, size), page_addr,
+ ALIGN(size, sizeof(long)));
+- rx_buffer->pagecnt_bias++;
+-
++ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true);
+ return skb;
+ }
+
++ skb_mark_for_recycle(skb);
++
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
+ WX_CB(skb)->dma = rx_buffer->dma;
+
+@@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
+ bi->page_dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+- page_ref_add(page, USHRT_MAX - 1);
+- bi->pagecnt_bias = USHRT_MAX;
+
+ return true;
+ }
+@@ -721,7 +654,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+- rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+@@ -1725,6 +1657,7 @@ static int wx_set_interrupt_capability(struct wx *wx)
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
++ wx->num_q_vectors = 1;
+
+ return 0;
+ }
+@@ -1965,11 +1898,11 @@ void wx_reset_interrupt_capability(struct wx *wx)
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return;
+
+- pci_free_irq_vectors(wx->pdev);
+ if (pdev->msix_enabled) {
+ kfree(wx->msix_entries);
+ wx->msix_entries = NULL;
+ }
++ pci_free_irq_vectors(wx->pdev);
+ }
+ EXPORT_SYMBOL(wx_reset_interrupt_capability);
+
+@@ -2241,8 +2174,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
+
+ /* free resources associated with mapping */
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+- __page_frag_cache_drain(rx_buffer->page,
+- rx_buffer->pagecnt_bias);
+
+ i++;
+ rx_buffer++;
+@@ -2716,12 +2647,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
+ else
+ wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
+
++ netdev->features = features;
++
+ if (changed &
+ (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX))
+ wx_set_rx_mode(netdev);
+
+- return 1;
++ return 0;
+ }
+ EXPORT_SYMBOL(wx_set_features);
+
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+index c5cbd177ef6275..c555af9ed51b29 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+@@ -759,7 +759,6 @@ struct wx_rx_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ unsigned int page_offset;
+- u16 pagecnt_bias;
+ };
+
+ struct wx_queue_stats {
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+index 2b431db6085a61..a4d63d2f3c5bbe 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+@@ -121,10 +121,8 @@ static int ngbe_sw_init(struct wx *wx)
+
+ /* PCI config space info */
+ err = wx_sw_init(wx);
+- if (err < 0) {
+- wx_err(wx, "read of internal subsystem device id failed\n");
++ if (err < 0)
+ return err;
+- }
+
+ /* mac type, phy type , oem type */
+ ngbe_init_type_code(wx);
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+index 591f5b7b6da658..5007addd119aa5 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+@@ -215,10 +215,14 @@ int ngbe_phy_connect(struct wx *wx)
+ {
+ int ret;
+
++ /* The MAC only has add the Tx delay and it can not be modified.
++ * So just disable TX delay in PHY, and it is does not matter to
++ * internal phy.
++ */
+ ret = phy_connect_direct(wx->netdev,
+ wx->phydev,
+ ngbe_handle_link_change,
+- PHY_INTERFACE_MODE_RGMII_ID);
++ PHY_INTERFACE_MODE_RGMII_RXID);
+ if (ret) {
+ wx_err(wx, "PHY connect failed.\n");
+ return ret;
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+index 5c3aed516ac208..d60c26ba0ba4c9 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -362,10 +362,8 @@ static int txgbe_sw_init(struct wx *wx)
+
+ /* PCI config space info */
+ err = wx_sw_init(wx);
+- if (err < 0) {
+- wx_err(wx, "read of internal subsystem device id failed\n");
++ if (err < 0)
+ return err;
+- }
+
+ txgbe_init_type_code(wx);
+
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 1444b855e7aa36..c10f94d69dad3b 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1443,7 +1443,7 @@ static int temac_probe(struct platform_device *pdev)
+ }
+
+ /* map device registers */
+- lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
++ lp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lp->regs)) {
+ dev_err(&pdev->dev, "could not map TEMAC registers\n");
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index 575ff9de8985b7..2facbdfbb319e7 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -159,16 +159,17 @@
+ #define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
+ #define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
+ #define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
+-#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
+-#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
++#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
++#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */
+ #define XAE_ID_OFFSET 0x000004F8 /* Identification register */
+-#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
+-#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
+-#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
+-#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
++#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
++#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
++#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
++#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */
+ #define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
+ #define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
+-#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
++#define XAE_FMI_OFFSET 0x00000708 /* Frame Filter Control */
++#define XAE_FFE_OFFSET 0x0000070C /* Frame Filter Enable */
+ #define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
+ #define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
+
+@@ -307,7 +308,7 @@
+ */
+ #define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
+
+-/* Bit masks for Axi Ethernet FMI register */
++/* Bit masks for Axi Ethernet FMC register */
+ #define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
+ #define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
+
+@@ -418,6 +419,8 @@ struct axidma_bd {
+ * @tx_bytes: TX byte count for statistics
+ * @tx_stat_sync: Synchronization object for TX stats
+ * @dma_err_task: Work structure to process Axi DMA errors
++ * @stopping: Set when @dma_err_task shouldn't do anything because we are
++ * about to stop the device.
+ * @tx_irq: Axidma TX IRQ number
+ * @rx_irq: Axidma RX IRQ number
+ * @eth_irq: Ethernet core IRQ number
+@@ -480,6 +483,7 @@ struct axienet_local {
+ struct u64_stats_sync tx_stat_sync;
+
+ struct work_struct dma_err_task;
++ bool stopping;
+
+ int tx_irq;
+ int rx_irq;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index b7ec4dafae90cb..62c10eb4f0adf1 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -412,7 +412,7 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p)
+ */
+ static void axienet_set_multicast_list(struct net_device *ndev)
+ {
+- int i;
++ int i = 0;
+ u32 reg, af0reg, af1reg;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+@@ -430,7 +430,10 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ } else if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+- i = 0;
++ reg = axienet_ior(lp, XAE_FMI_OFFSET);
++ reg &= ~XAE_FMI_PM_MASK;
++ axienet_iow(lp, XAE_FMI_OFFSET, reg);
++
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
+ break;
+@@ -449,6 +452,7 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
+ axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
++ axienet_iow(lp, XAE_FFE_OFFSET, 1);
+ i++;
+ }
+ } else {
+@@ -456,18 +460,15 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ reg &= ~XAE_FMI_PM_MASK;
+
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+-
+- for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
+- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+- reg |= i;
+-
+- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+- axienet_iow(lp, XAE_AF0_OFFSET, 0);
+- axienet_iow(lp, XAE_AF1_OFFSET, 0);
+- }
+-
+ dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
+ }
++
++ for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
++ reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
++ reg |= i;
++ axienet_iow(lp, XAE_FMI_OFFSET, reg);
++ axienet_iow(lp, XAE_FFE_OFFSET, 0);
++ }
+ }
+
+ /**
+@@ -651,15 +652,15 @@ static int axienet_device_reset(struct net_device *ndev)
+ *
+ * Would either be called after a successful transmit operation, or after
+ * there was an error when setting up the chain.
+- * Returns the number of descriptors handled.
++ * Returns the number of packets handled.
+ */
+ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
+ int nr_bds, bool force, u32 *sizep, int budget)
+ {
+ struct axidma_bd *cur_p;
+ unsigned int status;
++ int i, packets = 0;
+ dma_addr_t phys;
+- int i;
+
+ for (i = 0; i < nr_bds; i++) {
+ cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
+@@ -678,8 +679,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+
+- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
++ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ napi_consume_skb(cur_p->skb, budget);
++ packets++;
++ }
+
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+@@ -695,7 +698,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
+ *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ }
+
+- return i;
++ if (!force) {
++ lp->tx_bd_ci += i;
++ if (lp->tx_bd_ci >= lp->tx_bd_num)
++ lp->tx_bd_ci %= lp->tx_bd_num;
++ }
++
++ return packets;
+ }
+
+ /**
+@@ -746,13 +755,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
+ u32 size = 0;
+ int packets;
+
+- packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
++ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
++ &size, budget);
+
+ if (packets) {
+- lp->tx_bd_ci += packets;
+- if (lp->tx_bd_ci >= lp->tx_bd_num)
+- lp->tx_bd_ci %= lp->tx_bd_num;
+-
+ u64_stats_update_begin(&lp->tx_stat_sync);
+ u64_stats_add(&lp->tx_packets, packets);
+ u64_stats_add(&lp->tx_bytes, size);
+@@ -822,7 +828,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
+ /* Tx Full Checksum Offload Enabled */
+ cur_p->app0 |= 2;
+- } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
++ } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
+ csum_start_off = skb_transport_offset(skb);
+ csum_index_off = csum_start_off + skb->csum_offset;
+ /* Tx Partial Checksum Offload Enabled */
+@@ -1041,9 +1047,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
+ u32 cr = lp->tx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+-
+- napi_schedule(&lp->napi_tx);
++ if (napi_schedule_prep(&lp->napi_tx)) {
++ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
++ __napi_schedule(&lp->napi_tx);
++ }
+ }
+
+ return IRQ_HANDLED;
+@@ -1085,9 +1092,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
+ u32 cr = lp->rx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+-
+- napi_schedule(&lp->napi_rx);
++ if (napi_schedule_prep(&lp->napi_rx)) {
++ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
++ __napi_schedule(&lp->napi_rx);
++ }
+ }
+
+ return IRQ_HANDLED;
+@@ -1161,6 +1169,7 @@ static int axienet_open(struct net_device *ndev)
+ phylink_start(lp->phylink);
+
+ /* Enable worker thread for Axi DMA error handling */
++ lp->stopping = false;
+ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+
+ napi_enable(&lp->napi_rx);
+@@ -1216,6 +1225,9 @@ static int axienet_stop(struct net_device *ndev)
+
+ dev_dbg(&ndev->dev, "axienet_close()\n");
+
++ WRITE_ONCE(lp->stopping, true);
++ flush_work(&lp->dma_err_task);
++
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+
+@@ -1760,6 +1772,10 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ dma_err_task);
+ struct net_device *ndev = lp->ndev;
+
++ /* Don't bother if we are going to stop anyway */
++ if (READ_ONCE(lp->stopping))
++ return;
++
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+
+@@ -1826,9 +1842,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+- axienet_setoptions(ndev, lp->options);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
++ axienet_setoptions(ndev, lp->options);
+ }
+
+ /**
+diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
+index 704e949484d0c1..b9b5554ea8620e 100644
+--- a/drivers/net/fjes/fjes_hw.c
++++ b/drivers/net/fjes/fjes_hw.c
+@@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+
+ mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
+ hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
+- if (!(hw->hw_info.req_buf))
+- return -ENOMEM;
++ if (!(hw->hw_info.req_buf)) {
++ result = -ENOMEM;
++ goto free_ep_info;
++ }
+
+ hw->hw_info.req_buf_size = mem_size;
+
+ mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
+ hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
+- if (!(hw->hw_info.res_buf))
+- return -ENOMEM;
++ if (!(hw->hw_info.res_buf)) {
++ result = -ENOMEM;
++ goto free_req_buf;
++ }
+
+ hw->hw_info.res_buf_size = mem_size;
+
+ result = fjes_hw_alloc_shared_status_region(hw);
+ if (result)
+- return result;
++ goto free_res_buf;
+
+ hw->hw_info.buffer_share_bit = 0;
+ hw->hw_info.buffer_unshare_reserve_bit = 0;
+@@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+
+ result = fjes_hw_alloc_epbuf(&buf_pair->tx);
+ if (result)
+- return result;
++ goto free_epbuf;
+
+ result = fjes_hw_alloc_epbuf(&buf_pair->rx);
+ if (result)
+- return result;
++ goto free_epbuf;
+
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ fjes_hw_setup_epbuf(&buf_pair->tx, mac,
+@@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
+ fjes_hw_init_command_registers(hw, &param);
+
+ return 0;
++
++free_epbuf:
++ for (epidx = 0; epidx < hw->max_epid ; epidx++) {
++ if (epidx == hw->my_epid)
++ continue;
++ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
++ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
++ }
++ fjes_hw_free_shared_status_region(hw);
++free_res_buf:
++ kfree(hw->hw_info.res_buf);
++ hw->hw_info.res_buf = NULL;
++free_req_buf:
++ kfree(hw->hw_info.req_buf);
++ hw->hw_info.req_buf = NULL;
++free_ep_info:
++ kfree(hw->ep_shm_info);
++ hw->ep_shm_info = NULL;
++ return result;
+ }
+
+ static void fjes_hw_cleanup(struct fjes_hw *hw)
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 78f9d588f7129d..8333a5620deffd 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -221,7 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ struct genevehdr *gnvh = geneve_hdr(skb);
+ struct metadata_dst *tun_dst = NULL;
+ unsigned int len;
+- int err = 0;
++ int nh, err = 0;
+ void *oiph;
+
+ if (ip_tunnel_collect_metadata() || gs->collect_md) {
+@@ -272,9 +272,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ skb->pkt_type = PACKET_HOST;
+ }
+
+- oiph = skb_network_header(skb);
++ /* Save offset of outer header relative to skb->head,
++ * because we are going to reset the network header to the inner header
++ * and might change skb->head.
++ */
++ nh = skb_network_header(skb) - skb->head;
++
+ skb_reset_network_header(skb);
+
++ if (!pskb_inet_may_pull(skb)) {
++ DEV_STATS_INC(geneve->dev, rx_length_errors);
++ DEV_STATS_INC(geneve->dev, rx_errors);
++ goto drop;
++ }
++
++ /* Get the outer header. */
++ oiph = skb->head + nh;
++
+ if (geneve_get_sk_family(gs) == AF_INET)
+ err = IP_ECN_decapsulate(oiph, skb);
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -335,6 +349,7 @@ static int geneve_init(struct net_device *dev)
+ gro_cells_destroy(&geneve->gro_cells);
+ return err;
+ }
++ netdev_lockdep_set_classes(dev);
+ return 0;
+ }
+
+@@ -900,6 +915,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct geneve_dev *geneve,
+ const struct ip_tunnel_info *info)
+ {
++ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
+ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
+ const struct ip_tunnel_key *key = &info->key;
+@@ -911,7 +927,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
+- if (!pskb_inet_may_pull(skb))
++ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
+ return -EINVAL;
+
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+@@ -984,7 +1000,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ }
+
+ err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
+- geneve->cfg.inner_proto_inherit);
++ inner_proto_inherit);
+ if (unlikely(err))
+ return err;
+
+@@ -1000,6 +1016,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct geneve_dev *geneve,
+ const struct ip_tunnel_info *info)
+ {
++ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
+ bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
+ const struct ip_tunnel_key *key = &info->key;
+@@ -1009,7 +1026,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ __be16 sport;
+ int err;
+
+- if (!pskb_inet_may_pull(skb))
++ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
+ return -EINVAL;
+
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+@@ -1064,7 +1081,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ ttl = ttl ? : ip6_dst_hoplimit(dst);
+ }
+ err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
+- geneve->cfg.inner_proto_inherit);
++ inner_proto_inherit);
+ if (unlikely(err))
+ return err;
+
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index b22596b18ee8c5..9b0b22b65cb257 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -630,7 +630,7 @@ static void __gtp_encap_destroy(struct sock *sk)
+ gtp->sk0 = NULL;
+ else
+ gtp->sk1u = NULL;
+- udp_sk(sk)->encap_type = 0;
++ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ rcu_assign_sk_user_data(sk, NULL);
+ release_sock(sk);
+ sock_put(sk);
+@@ -682,7 +682,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
+
+ netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+
+- switch (udp_sk(sk)->encap_type) {
++ switch (READ_ONCE(udp_sk(sk)->encap_type)) {
+ case UDP_ENCAP_GTP0:
+ netdev_dbg(gtp->dev, "received GTP0 packet\n");
+ ret = gtp0_udp_encap_recv(gtp, skb);
+@@ -901,6 +901,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto tx_err;
+
++ if (!pskb_inet_may_pull(skb))
++ goto tx_err;
++
+ skb_reset_inner_headers(skb);
+
+ /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
+@@ -1111,11 +1114,12 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ static void gtp_dellink(struct net_device *dev, struct list_head *head)
+ {
+ struct gtp_dev *gtp = netdev_priv(dev);
++ struct hlist_node *next;
+ struct pdp_ctx *pctx;
+ int i;
+
+ for (i = 0; i < gtp->hash_size; i++)
+- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
++ hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
+ pdp_context_delete(pctx);
+
+ list_del_rcu(&gtp->list);
+@@ -1216,7 +1220,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ pr_debug("gtp socket fd=%d not found\n", fd);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ sk = sock->sk;
+@@ -1903,26 +1907,26 @@ static int __init gtp_init(void)
+
+ get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
+
+- err = rtnl_link_register(&gtp_link_ops);
++ err = register_pernet_subsys(&gtp_net_ops);
+ if (err < 0)
+ goto error_out;
+
+- err = genl_register_family(&gtp_genl_family);
++ err = rtnl_link_register(&gtp_link_ops);
+ if (err < 0)
+- goto unreg_rtnl_link;
++ goto unreg_pernet_subsys;
+
+- err = register_pernet_subsys(&gtp_net_ops);
++ err = genl_register_family(&gtp_genl_family);
+ if (err < 0)
+- goto unreg_genl_family;
++ goto unreg_rtnl_link;
+
+ pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
+ sizeof(struct pdp_ctx));
+ return 0;
+
+-unreg_genl_family:
+- genl_unregister_family(&gtp_genl_family);
+ unreg_rtnl_link:
+ rtnl_link_unregister(&gtp_link_ops);
++unreg_pernet_subsys:
++ unregister_pernet_subsys(&gtp_net_ops);
+ error_out:
+ pr_err("error loading GTP module loaded\n");
+ return err;
+diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
+index ca7bf7f897d36b..c8cbd85adcf995 100644
+--- a/drivers/net/hyperv/Kconfig
++++ b/drivers/net/hyperv/Kconfig
+@@ -3,5 +3,6 @@ config HYPERV_NET
+ tristate "Microsoft Hyper-V virtual network driver"
+ depends on HYPERV
+ select UCS2_STRING
++ select NLS
+ help
+ Select this option to enable the Hyper-V virtual network driver.
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 82e9796c8f5e5f..b2f27e505f76c6 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -154,8 +154,11 @@ static void free_netvsc_device(struct rcu_head *head)
+ int i;
+
+ kfree(nvdev->extension);
+- vfree(nvdev->recv_buf);
+- vfree(nvdev->send_buf);
++
++ if (!nvdev->recv_buf_gpadl_handle.decrypted)
++ vfree(nvdev->recv_buf);
++ if (!nvdev->send_buf_gpadl_handle.decrypted)
++ vfree(nvdev->send_buf);
+ bitmap_free(nvdev->send_section_map);
+
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+@@ -708,7 +711,10 @@ void netvsc_device_remove(struct hv_device *device)
+ /* Disable NAPI and disassociate its context from the device. */
+ for (i = 0; i < net_device->num_chn; i++) {
+ /* See also vmbus_reset_channel_cb(). */
+- napi_disable(&net_device->chan_table[i].napi);
++ /* only disable enabled NAPI channel */
++ if (i < ndev->real_num_rx_queues)
++ napi_disable(&net_device->chan_table[i].napi);
++
+ netif_napi_del(&net_device->chan_table[i].napi);
+ }
+
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 3ba3c8fb28a5d6..9d2d66a4aafd56 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -42,9 +42,13 @@
+ #define LINKCHANGE_INT (2 * HZ)
+ #define VF_TAKEOVER_INT (HZ / 10)
+
++/* Macros to define the context of vf registration */
++#define VF_REG_IN_PROBE 1
++#define VF_REG_IN_NOTIFIER 2
++
+ static unsigned int ring_size __ro_after_init = 128;
+ module_param(ring_size, uint, 0444);
+-MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
++MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
+ unsigned int netvsc_ring_bytes __ro_after_init;
+
+ static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+@@ -2183,7 +2187,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
+ }
+
+ static int netvsc_vf_join(struct net_device *vf_netdev,
+- struct net_device *ndev)
++ struct net_device *ndev, int context)
+ {
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ int ret;
+@@ -2206,10 +2210,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
+ goto upper_link_failed;
+ }
+
+- /* set slave flag before open to prevent IPv6 addrconf */
+- vf_netdev->flags |= IFF_SLAVE;
+-
+- schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
++ /* If this registration is called from probe context vf_takeover
++ * is taken care of later in probe itself.
++ */
++ if (context == VF_REG_IN_NOTIFIER)
++ schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+
+ call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
+
+@@ -2315,16 +2320,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+
+ }
+
+- /* Fallback path to check synthetic vf with
+- * help of mac addr
++ /* Fallback path to check synthetic vf with help of mac addr.
++ * Because this function can be called before vf_netdev is
++ * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
++ * from dev_addr, also try to match to its dev_addr.
++ * Note: On Hyper-V and Azure, it's not possible to set a MAC address
++ * on a VF that matches to the MAC of a unrelated NETVSC device.
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+- if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
+- netdev_notice(vf_netdev,
+- "falling back to mac addr based matching\n");
++ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
++ ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
+ return ndev;
+- }
+ }
+
+ netdev_notice(vf_netdev,
+@@ -2332,7 +2339,20 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+ return NULL;
+ }
+
+-static int netvsc_register_vf(struct net_device *vf_netdev)
++static int netvsc_prepare_bonding(struct net_device *vf_netdev)
++{
++ struct net_device *ndev;
++
++ ndev = get_netvsc_byslot(vf_netdev);
++ if (!ndev)
++ return NOTIFY_DONE;
++
++ /* set slave flag before open to prevent IPv6 addrconf */
++ vf_netdev->flags |= IFF_SLAVE;
++ return NOTIFY_DONE;
++}
++
++static int netvsc_register_vf(struct net_device *vf_netdev, int context)
+ {
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device *netvsc_dev;
+@@ -2372,7 +2392,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
+
+ netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
+
+- if (netvsc_vf_join(vf_netdev, ndev) != 0)
++ if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
+ return NOTIFY_DONE;
+
+ dev_hold(vf_netdev);
+@@ -2470,10 +2490,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
+ return NOTIFY_OK;
+ }
+
++static int check_dev_is_matching_vf(struct net_device *event_ndev)
++{
++ /* Skip NetVSC interfaces */
++ if (event_ndev->netdev_ops == &device_ops)
++ return -ENODEV;
++
++ /* Avoid non-Ethernet type devices */
++ if (event_ndev->type != ARPHRD_ETHER)
++ return -ENODEV;
++
++ /* Avoid Vlan dev with same MAC registering as VF */
++ if (is_vlan_dev(event_ndev))
++ return -ENODEV;
++
++ /* Avoid Bonding master dev with same MAC registering as VF */
++ if (netif_is_bond_master(event_ndev))
++ return -ENODEV;
++
++ return 0;
++}
++
+ static int netvsc_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+ {
+- struct net_device *net = NULL;
++ struct net_device *net = NULL, *vf_netdev;
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device_info *device_info = NULL;
+ struct netvsc_device *nvdev;
+@@ -2531,15 +2572,6 @@ static int netvsc_probe(struct hv_device *dev,
+ goto devinfo_failed;
+ }
+
+- nvdev = rndis_filter_device_add(dev, device_info);
+- if (IS_ERR(nvdev)) {
+- ret = PTR_ERR(nvdev);
+- netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+- goto rndis_failed;
+- }
+-
+- eth_hw_addr_set(net, device_info->mac_adr);
+-
+ /* We must get rtnl lock before scheduling nvdev->subchan_work,
+ * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+ * all subchannels to show up, but that may not happen because
+@@ -2547,9 +2579,23 @@ static int netvsc_probe(struct hv_device *dev,
+ * -> ... -> device_add() -> ... -> __device_attach() can't get
+ * the device lock, so all the subchannels can't be processed --
+ * finally netvsc_subchan_work() hangs forever.
++ *
++ * The rtnl lock also needs to be held before rndis_filter_device_add()
++ * which advertises nvsp_2_vsc_capability / sriov bit, and triggers
++ * VF NIC offering and registering. If VF NIC finished register_netdev()
++ * earlier it may cause name based config failure.
+ */
+ rtnl_lock();
+
++ nvdev = rndis_filter_device_add(dev, device_info);
++ if (IS_ERR(nvdev)) {
++ ret = PTR_ERR(nvdev);
++ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
++ goto rndis_failed;
++ }
++
++ eth_hw_addr_set(net, device_info->mac_adr);
++
+ if (nvdev->num_chn > 1)
+ schedule_work(&nvdev->subchan_work);
+
+@@ -2580,15 +2626,39 @@ static int netvsc_probe(struct hv_device *dev,
+ }
+
+ list_add(&net_device_ctx->list, &netvsc_dev_list);
++
++ /* When the hv_netvsc driver is unloaded and reloaded, the
++ * NET_DEVICE_REGISTER for the vf device is replayed before probe
++ * is complete. This is because register_netdevice_notifier() gets
++ * registered before vmbus_driver_register() so that callback func
++ * is set before probe and we don't miss events like NETDEV_POST_INIT
++ * So, in this section we try to register the matching vf device that
++ * is present as a netdevice, knowing that its register call is not
++ * processed in the netvsc_netdev_notifier(as probing is progress and
++ * get_netvsc_byslot fails).
++ */
++ for_each_netdev(dev_net(net), vf_netdev) {
++ ret = check_dev_is_matching_vf(vf_netdev);
++ if (ret != 0)
++ continue;
++
++ if (net != get_netvsc_byslot(vf_netdev))
++ continue;
++
++ netvsc_prepare_bonding(vf_netdev);
++ netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
++ __netvsc_vf_setup(net, vf_netdev);
++ break;
++ }
+ rtnl_unlock();
+
+ netvsc_devinfo_put(device_info);
+ return 0;
+
+ register_failed:
+- rtnl_unlock();
+ rndis_filter_device_remove(dev, nvdev);
+ rndis_failed:
++ rtnl_unlock();
+ netvsc_devinfo_put(device_info);
+ devinfo_failed:
+ free_percpu(net_device_ctx->vf_stats);
+@@ -2735,26 +2805,17 @@ static int netvsc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+ {
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
++ int ret = 0;
+
+- /* Skip our own events */
+- if (event_dev->netdev_ops == &device_ops)
+- return NOTIFY_DONE;
+-
+- /* Avoid non-Ethernet type devices */
+- if (event_dev->type != ARPHRD_ETHER)
+- return NOTIFY_DONE;
+-
+- /* Avoid Vlan dev with same MAC registering as VF */
+- if (is_vlan_dev(event_dev))
+- return NOTIFY_DONE;
+-
+- /* Avoid Bonding master dev with same MAC registering as VF */
+- if (netif_is_bond_master(event_dev))
++ ret = check_dev_is_matching_vf(event_dev);
++ if (ret != 0)
+ return NOTIFY_DONE;
+
+ switch (event) {
++ case NETDEV_POST_INIT:
++ return netvsc_prepare_bonding(event_dev);
+ case NETDEV_REGISTER:
+- return netvsc_register_vf(event_dev);
++ return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
+ case NETDEV_UNREGISTER:
+ return netvsc_unregister_vf(event_dev);
+ case NETDEV_UP:
+@@ -2786,14 +2847,19 @@ static int __init netvsc_drv_init(void)
+ pr_info("Increased ring_size to %u (min allowed)\n",
+ ring_size);
+ }
+- netvsc_ring_bytes = ring_size * PAGE_SIZE;
++ netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
++
++ register_netdevice_notifier(&netvsc_netdev_notifier);
+
+ ret = vmbus_driver_register(&netvsc_drv);
+ if (ret)
+- return ret;
++ goto err_vmbus_reg;
+
+- register_netdevice_notifier(&netvsc_netdev_notifier);
+ return 0;
++
++err_vmbus_reg:
++ unregister_netdevice_notifier(&netvsc_netdev_notifier);
++ return ret;
+ }
+
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
+index 95da876c561384..1075e24b11defc 100644
+--- a/drivers/net/ieee802154/Kconfig
++++ b/drivers/net/ieee802154/Kconfig
+@@ -101,6 +101,7 @@ config IEEE802154_CA8210_DEBUGFS
+
+ config IEEE802154_MCR20A
+ tristate "MCR20A transceiver driver"
++ select REGMAP_SPI
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
+ help
+diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
+index 87abe3b46316e4..bab92f19c4f48e 100644
+--- a/drivers/net/ieee802154/mcr20a.c
++++ b/drivers/net/ieee802154/mcr20a.c
+@@ -1303,16 +1303,13 @@ mcr20a_probe(struct spi_device *spi)
+ irq_type = IRQF_TRIGGER_FALLING;
+
+ ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr,
+- irq_type, dev_name(&spi->dev), lp);
++ irq_type | IRQF_NO_AUTOEN, dev_name(&spi->dev), lp);
+ if (ret) {
+ dev_err(&spi->dev, "could not request_irq for mcr20a\n");
+ ret = -ENODEV;
+ goto free_dev;
+ }
+
+- /* disable_irq by default and wait for starting hardware */
+- disable_irq(spi->irq);
+-
+ ret = ieee802154_register_hw(hw);
+ if (ret) {
+ dev_crit(&spi->dev, "ieee802154_register_hw failed\n");
+diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
+index 4bc05948f772d8..a78c692f2d3c5d 100644
+--- a/drivers/net/ipa/ipa_interrupt.c
++++ b/drivers/net/ipa/ipa_interrupt.c
+@@ -212,7 +212,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+ u32 unit_count;
+ u32 unit;
+
+- unit_count = roundup(ipa->endpoint_count, 32);
++ unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct reg *reg;
+ u32 val;
+diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c
+index d7b81a36d673bb..145eb0bd096d60 100644
+--- a/drivers/net/ipa/reg/gsi_reg-v5.0.c
++++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c
+@@ -78,7 +78,7 @@ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001c000 + 0x12000 * GSI_EE_AP, 0x80);
+
+ static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+- [R_LENGTH] = GENMASK(19, 0),
++ [R_LENGTH] = GENMASK(23, 0),
+ };
+
+ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index c0c49f1813673a..fef4eff7753a7a 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ return addr;
+ }
+
+-static int ipvlan_process_v4_outbound(struct sk_buff *skb)
++static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ {
+ const struct iphdr *ip4h = ip_hdr(skb);
+ struct net_device *dev = skb->dev;
+@@ -439,27 +439,25 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
+- err = ip_local_out(net, skb->sk, skb);
++ err = ip_local_out(net, NULL, skb);
+ if (unlikely(net_xmit_eval(err)))
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+ err:
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ out:
+ return ret;
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
+-static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++
++static noinline_for_stack int
++ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
+ {
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+- struct net_device *dev = skb->dev;
+- struct net *net = dev_net(dev);
+- struct dst_entry *dst;
+- int err, ret = NET_XMIT_DROP;
+ struct flowi6 fl6 = {
+ .flowi6_oif = dev->ifindex,
+ .daddr = ip6h->daddr,
+@@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
++ struct dst_entry *dst;
++ int err;
+
+- dst = ip6_route_output(net, NULL, &fl6);
+- if (dst->error) {
+- ret = dst->error;
++ dst = ip6_route_output(dev_net(dev), NULL, &fl6);
++ err = dst->error;
++ if (err) {
+ dst_release(dst);
+- goto err;
++ return err;
+ }
+ skb_dst_set(skb, dst);
++ return 0;
++}
++
++static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dev;
++ int err, ret = NET_XMIT_DROP;
++
++ err = ipvlan_route_v6_outbound(dev, skb);
++ if (unlikely(err)) {
++ DEV_STATS_INC(dev, tx_errors);
++ kfree_skb(skb);
++ return err;
++ }
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+- err = ip6_local_out(net, skb->sk, skb);
++ err = ip6_local_out(dev_net(dev), NULL, skb);
+ if (unlikely(net_xmit_eval(err)))
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+- goto out;
+-err:
+- dev->stats.tx_errors++;
+- kfree_skb(skb);
+-out:
+ return ret;
+ }
+ #else
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 1b55928e89b8a1..57c79f5f29916b 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -324,6 +324,7 @@ static void ipvlan_get_stats64(struct net_device *dev,
+ s->rx_dropped = rx_errs;
+ s->tx_dropped = tx_drps;
+ }
++ s->tx_errors = DEV_STATS_READ(dev, tx_errors);
+ }
+
+ static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index f6d53e63ef4ecc..f6eab66c266081 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -144,6 +144,7 @@ static int loopback_dev_init(struct net_device *dev)
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
+ if (!dev->lstats)
+ return -ENOMEM;
++ netdev_lockdep_set_classes(dev);
+ return 0;
+ }
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index c5cd4551c67ca3..778fb77c5a9372 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -996,10 +996,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ struct metadata_dst *md_dst;
+ struct macsec_rxh_data *rxd;
+ struct macsec_dev *macsec;
++ bool is_macsec_md_dst;
+
+ rcu_read_lock();
+ rxd = macsec_data_rcu(skb->dev);
+ md_dst = skb_metadata_dst(skb);
++ is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
+
+ list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
+ struct sk_buff *nskb;
+@@ -1010,14 +1012,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ * the SecTAG, so we have to deduce which port to deliver to.
+ */
+ if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
+- struct macsec_rx_sc *rx_sc = NULL;
++ const struct macsec_ops *ops;
+
+- if (md_dst && md_dst->type == METADATA_MACSEC)
+- rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
++ ops = macsec_get_ops(macsec, NULL);
+
+- if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
++ if (ops->rx_uses_md_dst && !is_macsec_md_dst)
+ continue;
+
++ if (is_macsec_md_dst) {
++ struct macsec_rx_sc *rx_sc;
++
++ /* All drivers that implement MACsec offload
++ * support using skb metadata destinations must
++ * indicate that they do so.
++ */
++ DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
++ rx_sc = find_rx_sc(&macsec->secy,
++ md_dst->u.macsec_info.sci);
++ if (!rx_sc)
++ continue;
++ /* device indicated macsec offload occurred */
++ skb->dev = ndev;
++ skb->pkt_type = PACKET_HOST;
++ eth_skb_pkt_type(skb, ndev);
++ ret = RX_HANDLER_ANOTHER;
++ goto out;
++ }
++
++ /* This datapath is insecure because it is unable to
++ * enforce isolation of broadcast/multicast traffic and
++ * unicast traffic with promiscuous mode on the macsec
++ * netdev. Since the core stack has no mechanism to
++ * check that the hardware did indeed receive MACsec
++ * traffic, it is possible that the response handling
++ * done by the MACsec port was to a plaintext packet.
++ * This violates the MACsec protocol standard.
++ */
+ if (ether_addr_equal_64bits(hdr->h_dest,
+ ndev->dev_addr)) {
+ /* exact match, divert skb to this port */
+@@ -1033,14 +1063,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ break;
+
+ nskb->dev = ndev;
+- if (ether_addr_equal_64bits(hdr->h_dest,
+- ndev->broadcast))
+- nskb->pkt_type = PACKET_BROADCAST;
+- else
+- nskb->pkt_type = PACKET_MULTICAST;
++ eth_skb_pkt_type(nskb, ndev);
+
+ __netif_rx(nskb);
+- } else if (rx_sc || ndev->flags & IFF_PROMISC) {
++ } else if (ndev->flags & IFF_PROMISC) {
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ ret = RX_HANDLER_ANOTHER;
+@@ -3657,9 +3683,9 @@ static void macsec_get_stats64(struct net_device *dev,
+
+ dev_fetch_sw_netstats(s, dev->tstats);
+
+- s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
+- s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
+- s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
++ s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
++ s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
++ s->rx_errors = DEV_STATS_READ(dev, rx_errors);
+ }
+
+ static int macsec_get_iflink(const struct net_device *dev)
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 02bd201bc7e58e..c8da94af4161a5 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -780,7 +780,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+- if (change & IFF_PROMISC)
++ if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
+ dev_set_promiscuity(lowerdev,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+
+diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
+index 5bf6fdff701cd6..346e6ad36054eb 100644
+--- a/drivers/net/mctp/mctp-serial.c
++++ b/drivers/net/mctp/mctp-serial.c
+@@ -91,8 +91,8 @@ static int next_chunk_len(struct mctp_serial *dev)
+ * will be those non-escaped bytes, and does not include the escaped
+ * byte.
+ */
+- for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) {
+- if (needs_escape(dev->txbuf[dev->txpos + i + 1]))
++ for (i = 1; i + dev->txpos < dev->txlen; i++) {
++ if (needs_escape(dev->txbuf[dev->txpos + i]))
+ break;
+ }
+
+diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
+index 3111e16485921d..9c2e71b9c0324e 100644
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -770,6 +770,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ /* rtnl_lock already held
+ * we might sleep in __netpoll_cleanup()
+ */
++ nt->enabled = false;
+ spin_unlock_irqrestore(&target_list_lock, flags);
+
+ __netpoll_cleanup(&nt->np);
+@@ -777,7 +778,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ spin_lock_irqsave(&target_list_lock, flags);
+ netdev_put(nt->np.dev, &nt->np.dev_tracker);
+ nt->np.dev = NULL;
+- nt->enabled = false;
+ stopped = true;
+ netconsole_target_put(nt);
+ goto restart;
+diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
+index f60eb97e3a627e..608953d4f98da9 100644
+--- a/drivers/net/netdevsim/bpf.c
++++ b/drivers/net/netdevsim/bpf.c
+@@ -93,7 +93,7 @@ static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
+ {
+ struct nsim_bpf_bound_prog *state;
+
+- if (!prog || !prog->aux->offload)
++ if (!prog || !bpf_prog_is_offloaded(prog->aux))
+ return;
+
+ state = prog->aux->offload->dev_priv;
+@@ -311,7 +311,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+ if (!bpf->prog)
+ return 0;
+
+- if (!bpf->prog->aux->offload) {
++ if (!bpf_prog_is_offloaded(bpf->prog->aux)) {
+ NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
+ return -EINVAL;
+ }
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index b4d3b9cde8bd68..92a7a36b93ac0c 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
+ trap_report_dw.work);
+ nsim_dev = nsim_trap_data->nsim_dev;
+
+- /* For each running port and enabled packet trap, generate a UDP
+- * packet with a random 5-tuple and report it.
+- */
+ if (!devl_trylock(priv_to_devlink(nsim_dev))) {
+- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
++ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
+ return;
+ }
+
++ /* For each running port and enabled packet trap, generate a UDP
++ * packet with a random 5-tuple and report it.
++ */
+ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
+ if (!netif_running(nsim_dev_port->ns->netdev))
+ continue;
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index 2eac92f49631f8..d8ca82addfe1bd 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -369,6 +369,12 @@ static int nsim_init_netdevsim_vf(struct netdevsim *ns)
+ return err;
+ }
+
++static void nsim_exit_netdevsim(struct netdevsim *ns)
++{
++ nsim_udp_tunnels_info_destroy(ns->netdev);
++ mock_phc_destroy(ns->phc);
++}
++
+ struct netdevsim *
+ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
+ {
+@@ -417,8 +423,7 @@ void nsim_destroy(struct netdevsim *ns)
+ }
+ rtnl_unlock();
+ if (nsim_dev_port_is_pf(ns->nsim_dev_port))
+- nsim_udp_tunnels_info_destroy(dev);
+- mock_phc_destroy(ns->phc);
++ nsim_exit_netdevsim(ns);
+ free_netdev(dev);
+ }
+
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
+index 536bd6564f8b8a..dade51cf599c62 100644
+--- a/drivers/net/ntb_netdev.c
++++ b/drivers/net/ntb_netdev.c
+@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+- if (__netif_rx(skb) == NET_RX_DROP) {
++ if (netif_rx(skb) == NET_RX_DROP) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ } else {
+diff --git a/drivers/net/pcs/pcs-xpcs-wx.c b/drivers/net/pcs/pcs-xpcs-wx.c
+index 19c75886f070ea..5f5cd3596cb846 100644
+--- a/drivers/net/pcs/pcs-xpcs-wx.c
++++ b/drivers/net/pcs/pcs-xpcs-wx.c
+@@ -109,7 +109,7 @@ static void txgbe_pma_config_1g(struct dw_xpcs *xpcs)
+ txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0);
+ val = txgbe_read_pma(xpcs, TXGBE_RX_GEN_CTL3);
+ val = u16_replace_bits(val, 0x4, TXGBE_RX_GEN_CTL3_LOS_TRSHLD0);
+- txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val);
++ txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL3, val);
+
+ txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x20);
+ txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0x46);
+diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
+index 4dbc21f604f20a..f0f41e86a4fb32 100644
+--- a/drivers/net/pcs/pcs-xpcs.c
++++ b/drivers/net/pcs/pcs-xpcs.c
+@@ -293,7 +293,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
+ dev = MDIO_MMD_VEND2;
+ break;
+ default:
+- return -1;
++ return -EINVAL;
+ }
+
+ ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
+@@ -891,7 +891,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ return ret;
+ break;
+ default:
+- return -1;
++ return -EINVAL;
+ }
+
+ if (compat->pma_config) {
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index 37fb033e1c29e3..ef203b0807e588 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -2104,7 +2104,7 @@ static struct phy_driver at803x_driver[] = {
+ .write_page = at803x_write_page,
+ .get_features = at803x_get_features,
+ .read_status = at803x_read_status,
+- .config_intr = &at803x_config_intr,
++ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+@@ -2134,7 +2134,7 @@ static struct phy_driver at803x_driver[] = {
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+- .config_intr = &at803x_config_intr,
++ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
+@@ -2150,7 +2150,7 @@ static struct phy_driver at803x_driver[] = {
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+- .config_intr = &at803x_config_intr,
++ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
+diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
+index 9717a1626f3fa1..37a64a37b2ae38 100644
+--- a/drivers/net/phy/bcm84881.c
++++ b/drivers/net/phy/bcm84881.c
+@@ -120,7 +120,7 @@ static int bcm84881_aneg_done(struct phy_device *phydev)
+
+ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
+ if (bmsr < 0)
+- return val;
++ return bmsr;
+
+ return !!(val & MDIO_AN_STAT1_COMPLETE) &&
+ !!(bmsr & BMSR_ANEGCOMPLETE);
+@@ -146,7 +146,7 @@ static int bcm84881_read_status(struct phy_device *phydev)
+
+ bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
+ if (bmsr < 0)
+- return val;
++ return bmsr;
+
+ phydev->autoneg_complete = !!(val & MDIO_AN_STAT1_COMPLETE) &&
+ !!(bmsr & BMSR_ANEGCOMPLETE);
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index b7cb71817780ce..29e1cbea6dc0c3 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -380,7 +380,7 @@ static int dp83822_config_init(struct phy_device *phydev)
+ {
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+- int rgmii_delay;
++ int rgmii_delay = 0;
+ s32 rx_int_delay;
+ s32 tx_int_delay;
+ int err = 0;
+@@ -390,30 +390,33 @@ static int dp83822_config_init(struct phy_device *phydev)
+ rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
+ true);
+
+- if (rx_int_delay <= 0)
+- rgmii_delay = 0;
+- else
+- rgmii_delay = DP83822_RX_CLK_SHIFT;
++ /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
++ if (rx_int_delay > 0)
++ rgmii_delay |= DP83822_RX_CLK_SHIFT;
+
+ tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
+ false);
++
++ /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
+ if (tx_int_delay <= 0)
+- rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
+- else
+ rgmii_delay |= DP83822_TX_CLK_SHIFT;
+
+- if (rgmii_delay) {
+- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+- MII_DP83822_RCSR, rgmii_delay);
+- if (err)
+- return err;
+- }
++ err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
++ DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
++ if (err)
++ return err;
++
++ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
++ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+
+- phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
++ if (err)
++ return err;
+ } else {
+- phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
++ err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
++ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
++
++ if (err)
++ return err;
+ }
+
+ if (dp83822->fx_enabled) {
+diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
+index fa8c6fdcf30181..5f056d7db83eed 100644
+--- a/drivers/net/phy/dp83869.c
++++ b/drivers/net/phy/dp83869.c
+@@ -645,7 +645,6 @@ static int dp83869_configure_fiber(struct phy_device *phydev,
+ phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+- linkmode_set_bit(ADVERTISED_FIBRE, phydev->advertising);
+
+ if (dp83869->mode == DP83869_RGMII_1000_BASE) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+@@ -695,7 +694,8 @@ static int dp83869_configure_mode(struct phy_device *phydev,
+ phy_ctrl_val = dp83869->mode;
+ if (phydev->interface == PHY_INTERFACE_MODE_MII) {
+ if (dp83869->mode == DP83869_100M_MEDIA_CONVERT ||
+- dp83869->mode == DP83869_RGMII_100_BASE) {
++ dp83869->mode == DP83869_RGMII_100_BASE ||
++ dp83869->mode == DP83869_RGMII_COPPER_ETHERNET) {
+ phy_ctrl_val |= DP83869_OP_MODE_MII;
+ } else {
+ phydev_err(phydev, "selected op-mode is not valid with MII mode\n");
+diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
+index 8a20d9889f105b..f4f9412d0cd7e2 100644
+--- a/drivers/net/phy/mediatek-ge-soc.c
++++ b/drivers/net/phy/mediatek-ge-soc.c
+@@ -216,6 +216,9 @@
+ #define MTK_PHY_LED_ON_LINK1000 BIT(0)
+ #define MTK_PHY_LED_ON_LINK100 BIT(1)
+ #define MTK_PHY_LED_ON_LINK10 BIT(2)
++#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\
++ MTK_PHY_LED_ON_LINK100 |\
++ MTK_PHY_LED_ON_LINK1000)
+ #define MTK_PHY_LED_ON_LINKDOWN BIT(3)
+ #define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
+ #define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
+@@ -231,6 +234,12 @@
+ #define MTK_PHY_LED_BLINK_100RX BIT(3)
+ #define MTK_PHY_LED_BLINK_10TX BIT(4)
+ #define MTK_PHY_LED_BLINK_10RX BIT(5)
++#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\
++ MTK_PHY_LED_BLINK_100RX |\
++ MTK_PHY_LED_BLINK_1000RX)
++#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\
++ MTK_PHY_LED_BLINK_100TX |\
++ MTK_PHY_LED_BLINK_1000TX)
+ #define MTK_PHY_LED_BLINK_COLLISION BIT(6)
+ #define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
+ #define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
+@@ -489,7 +498,7 @@ static int tx_r50_fill_result(struct phy_device *phydev, u16 tx_r50_cal_val,
+ u16 reg, val;
+
+ if (phydev->drv->phy_id == MTK_GPHY_ID_MT7988)
+- bias = -2;
++ bias = -1;
+
+ val = clamp_val(bias + tx_r50_cal_val, 0, 63);
+
+@@ -705,6 +714,11 @@ static int tx_vcm_cal_sw(struct phy_device *phydev, u8 rg_txreserve_x)
+ static void mt798x_phy_common_finetune(struct phy_device *phydev)
+ {
+ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
++ /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
++ __phy_write(phydev, 0x11, 0xc71);
++ __phy_write(phydev, 0x12, 0xc);
++ __phy_write(phydev, 0x10, 0x8fae);
++
+ /* EnabRandUpdTrig = 1 */
+ __phy_write(phydev, 0x11, 0x2f00);
+ __phy_write(phydev, 0x12, 0xe);
+@@ -715,15 +729,56 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x83aa);
+
+- /* TrFreeze = 0 */
++ /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */
++ __phy_write(phydev, 0x11, 0x240);
++ __phy_write(phydev, 0x12, 0x0);
++ __phy_write(phydev, 0x10, 0x9680);
++
++ /* TrFreeze = 0 (mt7988 default) */
+ __phy_write(phydev, 0x11, 0x0);
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x9686);
+
++ /* SSTrKp100 = 5 */
++ /* SSTrKf100 = 6 */
++ /* SSTrKp1000Mas = 5 */
++ /* SSTrKf1000Mas = 6 */
+ /* SSTrKp1000Slv = 5 */
++ /* SSTrKf1000Slv = 6 */
+ __phy_write(phydev, 0x11, 0xbaef);
+ __phy_write(phydev, 0x12, 0x2e);
+ __phy_write(phydev, 0x10, 0x968c);
++ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
++}
++
++static void mt7981_phy_finetune(struct phy_device *phydev)
++{
++ u16 val[8] = { 0x01ce, 0x01c1,
++ 0x020f, 0x0202,
++ 0x03d0, 0x03c0,
++ 0x0013, 0x0005 };
++ int i, k;
++
++ /* 100M eye finetune:
++ * Keep middle level of TX MLT3 shapper as default.
++ * Only change TX MLT3 overshoot level here.
++ */
++ for (k = 0, i = 1; i < 12; i++) {
++ if (i % 3 == 0)
++ continue;
++ phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]);
++ }
++
++ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
++ /* ResetSyncOffset = 6 */
++ __phy_write(phydev, 0x11, 0x600);
++ __phy_write(phydev, 0x12, 0x0);
++ __phy_write(phydev, 0x10, 0x8fc0);
++
++ /* VgaDecRate = 1 */
++ __phy_write(phydev, 0x11, 0x4c2a);
++ __phy_write(phydev, 0x12, 0x3e);
++ __phy_write(phydev, 0x10, 0x8fa4);
+
+ /* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2,
+ * MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2
+@@ -738,7 +793,7 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
+ __phy_write(phydev, 0x10, 0x8ec0);
+ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+
+- /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9*/
++ /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
+ MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK,
+ BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0x9));
+@@ -771,48 +826,6 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_OUTPUT_V, 0x2222);
+ }
+
+-static void mt7981_phy_finetune(struct phy_device *phydev)
+-{
+- u16 val[8] = { 0x01ce, 0x01c1,
+- 0x020f, 0x0202,
+- 0x03d0, 0x03c0,
+- 0x0013, 0x0005 };
+- int i, k;
+-
+- /* 100M eye finetune:
+- * Keep middle level of TX MLT3 shapper as default.
+- * Only change TX MLT3 overshoot level here.
+- */
+- for (k = 0, i = 1; i < 12; i++) {
+- if (i % 3 == 0)
+- continue;
+- phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]);
+- }
+-
+- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
+- __phy_write(phydev, 0x11, 0xc71);
+- __phy_write(phydev, 0x12, 0xc);
+- __phy_write(phydev, 0x10, 0x8fae);
+-
+- /* ResetSyncOffset = 6 */
+- __phy_write(phydev, 0x11, 0x600);
+- __phy_write(phydev, 0x12, 0x0);
+- __phy_write(phydev, 0x10, 0x8fc0);
+-
+- /* VgaDecRate = 1 */
+- __phy_write(phydev, 0x11, 0x4c2a);
+- __phy_write(phydev, 0x12, 0x3e);
+- __phy_write(phydev, 0x10, 0x8fa4);
+-
+- /* FfeUpdGainForce = 4 */
+- __phy_write(phydev, 0x11, 0x240);
+- __phy_write(phydev, 0x12, 0x0);
+- __phy_write(phydev, 0x10, 0x9680);
+-
+- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+-}
+-
+ static void mt7988_phy_finetune(struct phy_device *phydev)
+ {
+ u16 val[12] = { 0x0187, 0x01cd, 0x01c8, 0x0182,
+@@ -827,17 +840,7 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
+ /* TCT finetune */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5);
+
+- /* Disable TX power saving */
+- phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
+- MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8);
+-
+ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+-
+- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 12 */
+- __phy_write(phydev, 0x11, 0x671);
+- __phy_write(phydev, 0x12, 0xc);
+- __phy_write(phydev, 0x10, 0x8fae);
+-
+ /* ResetSyncOffset = 5 */
+ __phy_write(phydev, 0x11, 0x500);
+ __phy_write(phydev, 0x12, 0x0);
+@@ -845,13 +848,27 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
+
+ /* VgaDecRate is 1 at default on mt7988 */
+
+- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
++ /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7,
++ * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7
++ */
++ __phy_write(phydev, 0x11, 0xb90a);
++ __phy_write(phydev, 0x12, 0x6f);
++ __phy_write(phydev, 0x10, 0x8f82);
++
++ /* RemAckCntLimitCtrl = 1 */
++ __phy_write(phydev, 0x11, 0xfbba);
++ __phy_write(phydev, 0x12, 0xc3);
++ __phy_write(phydev, 0x10, 0x87f8);
+
+- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_2A30);
+- /* TxClkOffset = 2 */
+- __phy_modify(phydev, MTK_PHY_ANARG_RG, MTK_PHY_TCLKOFFSET_MASK,
+- FIELD_PREP(MTK_PHY_TCLKOFFSET_MASK, 0x2));
+ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
++
++ /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */
++ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
++ MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK,
++ BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0xa));
++
++ /* rg_tr_lpf_cnt_val = 1023 */
++ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_LPF_CNT_VAL, 0x3ff);
+ }
+
+ static void mt798x_phy_eee(struct phy_device *phydev)
+@@ -884,11 +901,11 @@ static void mt798x_phy_eee(struct phy_device *phydev)
+ MTK_PHY_LPI_SLV_SEND_TX_EN,
+ FIELD_PREP(MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK, 0x120));
+
+- phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
+- MTK_PHY_LPI_SEND_LOC_TIMER_MASK |
+- MTK_PHY_LPI_TXPCS_LOC_RCV,
+- FIELD_PREP(MTK_PHY_LPI_SEND_LOC_TIMER_MASK, 0x117));
++ /* Keep MTK_PHY_LPI_SEND_LOC_TIMER as 375 */
++ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
++ MTK_PHY_LPI_TXPCS_LOC_RCV);
+
++ /* This also fixes some IoT issues, such as CH340 */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG2C7,
+ MTK_PHY_MAX_GAIN_MASK | MTK_PHY_MIN_GAIN_MASK,
+ FIELD_PREP(MTK_PHY_MAX_GAIN_MASK, 0x8) |
+@@ -922,7 +939,7 @@ static void mt798x_phy_eee(struct phy_device *phydev)
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x9690);
+
+- /* REG_EEE_st2TrKf1000 = 3 */
++ /* REG_EEE_st2TrKf1000 = 2 */
+ __phy_write(phydev, 0x11, 0x114f);
+ __phy_write(phydev, 0x12, 0x2);
+ __phy_write(phydev, 0x10, 0x969a);
+@@ -947,7 +964,7 @@ static void mt798x_phy_eee(struct phy_device *phydev)
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x96b8);
+
+- /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 1 */
++ /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */
+ __phy_write(phydev, 0x11, 0x1463);
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x96ca);
+@@ -1239,11 +1256,9 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
+ if (blink < 0)
+ return -EIO;
+
+- if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
+- MTK_PHY_LED_ON_LINK10)) ||
+- (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
+- MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
+- MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
++ if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
++ MTK_PHY_LED_ON_LINKDOWN)) ||
++ (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
+ set_bit(bit_netdev, &priv->led_state);
+ else
+ clear_bit(bit_netdev, &priv->led_state);
+@@ -1261,7 +1276,7 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
+ if (!rules)
+ return 0;
+
+- if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
++ if (on & MTK_PHY_LED_ON_LINK)
+ *rules |= BIT(TRIGGER_NETDEV_LINK);
+
+ if (on & MTK_PHY_LED_ON_LINK10)
+@@ -1279,10 +1294,10 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
+ if (on & MTK_PHY_LED_ON_HDX)
+ *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
+
+- if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
++ if (blink & MTK_PHY_LED_BLINK_RX)
+ *rules |= BIT(TRIGGER_NETDEV_RX);
+
+- if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
++ if (blink & MTK_PHY_LED_BLINK_TX)
+ *rules |= BIT(TRIGGER_NETDEV_TX);
+
+ return 0;
+@@ -1315,15 +1330,19 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
+ on |= MTK_PHY_LED_ON_LINK1000;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX)) {
+- blink |= MTK_PHY_LED_BLINK_10RX |
+- MTK_PHY_LED_BLINK_100RX |
+- MTK_PHY_LED_BLINK_1000RX;
++ blink |= (on & MTK_PHY_LED_ON_LINK) ?
++ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) |
++ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) |
++ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) :
++ MTK_PHY_LED_BLINK_RX;
+ }
+
+ if (rules & BIT(TRIGGER_NETDEV_TX)) {
+- blink |= MTK_PHY_LED_BLINK_10TX |
+- MTK_PHY_LED_BLINK_100TX |
+- MTK_PHY_LED_BLINK_1000TX;
++ blink |= (on & MTK_PHY_LED_ON_LINK) ?
++ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) |
++ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) |
++ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) :
++ MTK_PHY_LED_BLINK_TX;
+ }
+
+ if (blink || on)
+@@ -1336,9 +1355,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
+ MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_FDX |
+ MTK_PHY_LED_ON_HDX |
+- MTK_PHY_LED_ON_LINK10 |
+- MTK_PHY_LED_ON_LINK100 |
+- MTK_PHY_LED_ON_LINK1000,
++ MTK_PHY_LED_ON_LINK,
+ on);
+
+ if (ret)
+@@ -1459,6 +1476,13 @@ static int mt7988_phy_probe(struct phy_device *phydev)
+ if (err)
+ return err;
+
++ /* Disable TX power saving at probing to:
++ * 1. Meet common mode compliance test criteria
++ * 2. Make sure that TX-VCM calibration works fine
++ */
++ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
++ MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8);
++
+ return mt798x_phy_calibration(phydev);
+ }
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 927d3d54658ef8..9a0432145645f4 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -120,6 +120,11 @@
+ */
+ #define LAN8814_1PPM_FORMAT 17179
+
++#define PTP_RX_VERSION 0x0248
++#define PTP_TX_VERSION 0x0288
++#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
++#define PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
++
+ #define PTP_RX_MOD 0x024F
+ #define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+ #define PTP_RX_TIMESTAMP_EN 0x024D
+@@ -765,6 +770,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
+ {
+ int ret;
+
++ /* Chip can be powered down by the bootstrap code. */
++ ret = phy_read(phydev, MII_BMCR);
++ if (ret < 0)
++ return ret;
++ if (ret & BMCR_PDOWN) {
++ ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
++ if (ret < 0)
++ return ret;
++ usleep_range(1000, 2000);
++ }
++
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+@@ -1277,6 +1293,8 @@ static int ksz9131_config_init(struct phy_device *phydev)
+ const struct device *dev_walker;
+ int ret;
+
++ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
++
+ dev_walker = &phydev->mdio.dev;
+ do {
+ of_node = dev_walker->of_node;
+@@ -1326,28 +1344,30 @@ static int ksz9131_config_init(struct phy_device *phydev)
+ #define MII_KSZ9131_AUTO_MDIX 0x1C
+ #define MII_KSZ9131_AUTO_MDI_SET BIT(7)
+ #define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6)
++#define MII_KSZ9131_DIG_AXAN_STS 0x14
++#define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14)
++#define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12)
+
+ static int ksz9131_mdix_update(struct phy_device *phydev)
+ {
+ int ret;
+
+- ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
+- if (ret < 0)
+- return ret;
+-
+- if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) {
+- if (ret & MII_KSZ9131_AUTO_MDI_SET)
+- phydev->mdix_ctrl = ETH_TP_MDI;
+- else
+- phydev->mdix_ctrl = ETH_TP_MDI_X;
++ if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) {
++ phydev->mdix = phydev->mdix_ctrl;
+ } else {
+- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+- }
++ ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS);
++ if (ret < 0)
++ return ret;
+
+- if (ret & MII_KSZ9131_AUTO_MDI_SET)
+- phydev->mdix = ETH_TP_MDI;
+- else
+- phydev->mdix = ETH_TP_MDI_X;
++ if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) {
++ if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT)
++ phydev->mdix = ETH_TP_MDI;
++ else
++ phydev->mdix = ETH_TP_MDI_X;
++ } else {
++ phydev->mdix = ETH_TP_MDI_INVALID;
++ }
++ }
+
+ return 0;
+ }
+@@ -1816,7 +1836,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
+ {0x1c, 0x20, 0xeeee},
+ };
+
+-static int ksz9477_config_init(struct phy_device *phydev)
++static int ksz9477_phy_errata(struct phy_device *phydev)
+ {
+ int err;
+ int i;
+@@ -1844,16 +1864,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
+ return err;
+ }
+
++ err = genphy_restart_aneg(phydev);
++ if (err)
++ return err;
++
++ return err;
++}
++
++static int ksz9477_config_init(struct phy_device *phydev)
++{
++ int err;
++
++ /* Only KSZ9897 family of switches needs this fix. */
++ if ((phydev->phy_id & 0xf) == 1) {
++ err = ksz9477_phy_errata(phydev);
++ if (err)
++ return err;
++ }
++
+ /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
+ * in this switch shall be regarded as broken.
+ */
+ if (phydev->dev_flags & MICREL_NO_EEE)
+ phydev->eee_broken_modes = -1;
+
+- err = genphy_restart_aneg(phydev);
+- if (err)
+- return err;
+-
+ return kszphy_config_init(phydev);
+ }
+
+@@ -1962,6 +1996,71 @@ static int kszphy_resume(struct phy_device *phydev)
+ return 0;
+ }
+
++static int ksz9477_resume(struct phy_device *phydev)
++{
++ int ret;
++
++ /* No need to initialize registers if not powered down. */
++ ret = phy_read(phydev, MII_BMCR);
++ if (ret < 0)
++ return ret;
++ if (!(ret & BMCR_PDOWN))
++ return 0;
++
++ genphy_resume(phydev);
++
++ /* After switching from power-down to normal mode, an internal global
++ * reset is automatically generated. Wait a minimum of 1 ms before
++ * read/write access to the PHY registers.
++ */
++ usleep_range(1000, 2000);
++
++ /* Only KSZ9897 family of switches needs this fix. */
++ if ((phydev->phy_id & 0xf) == 1) {
++ ret = ksz9477_phy_errata(phydev);
++ if (ret)
++ return ret;
++ }
++
++ /* Enable PHY Interrupts */
++ if (phy_interrupt_is_valid(phydev)) {
++ phydev->interrupts = PHY_INTERRUPT_ENABLED;
++ if (phydev->drv->config_intr)
++ phydev->drv->config_intr(phydev);
++ }
++
++ return 0;
++}
++
++static int ksz8061_resume(struct phy_device *phydev)
++{
++ int ret;
++
++ /* This function can be called twice when the Ethernet device is on. */
++ ret = phy_read(phydev, MII_BMCR);
++ if (ret < 0)
++ return ret;
++ if (!(ret & BMCR_PDOWN))
++ return 0;
++
++ genphy_resume(phydev);
++ usleep_range(1000, 2000);
++
++ /* Re-program the value after chip is reset. */
++ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
++ if (ret)
++ return ret;
++
++ /* Enable PHY Interrupts */
++ if (phy_interrupt_is_valid(phydev)) {
++ phydev->interrupts = PHY_INTERRUPT_ENABLED;
++ if (phydev->drv->config_intr)
++ phydev->drv->config_intr(phydev);
++ }
++
++ return 0;
++}
++
+ static int kszphy_probe(struct phy_device *phydev)
+ {
+ const struct kszphy_type *type = phydev->drv->driver_data;
+@@ -2383,6 +2482,7 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+ struct hwtstamp_config config;
+ int txcfg = 0, rxcfg = 0;
+ int pkt_ts_enable;
++ int tx_mod;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+@@ -2432,9 +2532,14 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+
+- if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
++ tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
++ if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
++ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
++ tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
++ } else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+- PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
++ tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
++ }
+
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ lan8814_config_ts_intr(ptp_priv->phydev, true);
+@@ -2492,7 +2597,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
+ }
+ }
+
+-static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
++static bool lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+ {
+ struct ptp_header *ptp_header;
+ u32 type;
+@@ -2502,7 +2607,11 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+ ptp_header = ptp_parse_header(skb, type);
+ skb_pull_inline(skb, ETH_HLEN);
+
++ if (!ptp_header)
++ return false;
++
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
++ return true;
+ }
+
+ static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
+@@ -2514,7 +2623,8 @@ static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
+ bool ret = false;
+ u16 skb_sig;
+
+- lan8814_get_sig_rx(skb, &skb_sig);
++ if (!lan8814_get_sig_rx(skb, &skb_sig))
++ return ret;
+
+ /* Iterate over all RX timestamps and match it with the received skbs */
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+@@ -2794,7 +2904,7 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
+ return 0;
+ }
+
+-static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
++static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+ {
+ struct ptp_header *ptp_header;
+ u32 type;
+@@ -2802,7 +2912,11 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+ type = ptp_classify_raw(skb);
+ ptp_header = ptp_parse_header(skb, type);
+
++ if (!ptp_header)
++ return false;
++
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
++ return true;
+ }
+
+ static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
+@@ -2816,7 +2930,8 @@ static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
+
+ spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
+ skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
+- lan8814_get_sig_tx(skb, &skb_sig);
++ if (!lan8814_get_sig_tx(skb, &skb_sig))
++ continue;
+
+ if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
+ continue;
+@@ -2870,7 +2985,8 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
+
+ spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
+ skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
+- lan8814_get_sig_rx(skb, &skb_sig);
++ if (!lan8814_get_sig_rx(skb, &skb_sig))
++ continue;
+
+ if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
+ continue;
+@@ -3125,6 +3241,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+
++ /* Disable checking for minorVersionPTP field */
++ lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
++ PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
++ lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
++ PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
++
+ skb_queue_head_init(&ptp_priv->tx_queue);
+ skb_queue_head_init(&ptp_priv->rx_queue);
+ INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
+@@ -3313,8 +3435,10 @@ static int lan8814_probe(struct phy_device *phydev)
+ #define LAN8841_ADC_CHANNEL_MASK 198
+ #define LAN8841_PTP_RX_PARSE_L2_ADDR_EN 370
+ #define LAN8841_PTP_RX_PARSE_IP_ADDR_EN 371
++#define LAN8841_PTP_RX_VERSION 374
+ #define LAN8841_PTP_TX_PARSE_L2_ADDR_EN 434
+ #define LAN8841_PTP_TX_PARSE_IP_ADDR_EN 435
++#define LAN8841_PTP_TX_VERSION 438
+ #define LAN8841_PTP_CMD_CTL 256
+ #define LAN8841_PTP_CMD_CTL_PTP_ENABLE BIT(2)
+ #define LAN8841_PTP_CMD_CTL_PTP_DISABLE BIT(1)
+@@ -3358,6 +3482,12 @@ static int lan8841_config_init(struct phy_device *phydev)
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_RX_PARSE_IP_ADDR_EN, 0);
+
++ /* Disable checking for minorVersionPTP field */
++ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
++ LAN8841_PTP_RX_VERSION, 0xff00);
++ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
++ LAN8841_PTP_TX_VERSION, 0xff00);
++
+ /* 100BT Clause 40 improvenent errata */
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_1,
+@@ -3414,7 +3544,7 @@ static int lan8841_config_intr(struct phy_device *phydev)
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, LAN8814_INTS);
+- if (err)
++ if (err < 0)
+ return err;
+
+ /* Enable / disable interrupts. It is OK to enable PTP interrupt
+@@ -3430,6 +3560,14 @@ static int lan8841_config_intr(struct phy_device *phydev)
+ return err;
+
+ err = phy_read(phydev, LAN8814_INTS);
++ if (err < 0)
++ return err;
++
++ /* Getting a positive value doesn't mean that is an error, it
++ * just indicates what was the status. Therefore make sure to
++ * clear the value and say that there is no error.
++ */
++ err = 0;
+ }
+
+ return err;
+@@ -3609,12 +3747,8 @@ static int lan8841_ts_info(struct mii_timestamper *mii_ts,
+
+ info->phc_index = ptp_priv->ptp_clock ?
+ ptp_clock_index(ptp_priv->ptp_clock) : -1;
+- if (info->phc_index == -1) {
+- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+- SOF_TIMESTAMPING_RX_SOFTWARE |
+- SOF_TIMESTAMPING_SOFTWARE;
++ if (info->phc_index == -1)
+ return 0;
+- }
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+@@ -4580,7 +4714,8 @@ static int lan8841_suspend(struct phy_device *phydev)
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+
+- ptp_cancel_worker_sync(ptp_priv->ptp_clock);
++ if (ptp_priv->ptp_clock)
++ ptp_cancel_worker_sync(ptp_priv->ptp_clock);
+
+ return genphy_suspend(phydev);
+ }
+@@ -4717,10 +4852,11 @@ static struct phy_driver ksphy_driver[] = {
+ /* PHY_BASIC_FEATURES */
+ .probe = kszphy_probe,
+ .config_init = ksz8061_config_init,
++ .soft_reset = genphy_soft_reset,
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
+ .suspend = kszphy_suspend,
+- .resume = kszphy_resume,
++ .resume = ksz8061_resume,
+ }, {
+ .phy_id = PHY_ID_KSZ9021,
+ .phy_id_mask = 0x000ffffe,
+@@ -4820,6 +4956,7 @@ static struct phy_driver ksphy_driver[] = {
+ .flags = PHY_POLL_CABLE_TEST,
+ .driver_data = &ksz9131_type,
+ .probe = kszphy_probe,
++ .soft_reset = genphy_soft_reset,
+ .config_init = ksz9131_config_init,
+ .config_intr = kszphy_config_intr,
+ .config_aneg = ksz9131_config_aneg,
+@@ -4873,7 +5010,7 @@ static struct phy_driver ksphy_driver[] = {
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
+ .suspend = genphy_suspend,
+- .resume = genphy_resume,
++ .resume = ksz9477_resume,
+ .get_features = ksz9477_get_features,
+ } };
+
+@@ -4897,6 +5034,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
+ { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
++ { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
+diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
+index a838b61cd844b9..a35528497a5762 100644
+--- a/drivers/net/phy/microchip_t1.c
++++ b/drivers/net/phy/microchip_t1.c
+@@ -748,7 +748,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev)
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ lan87xx_cable_test_report_trans(detect));
+
+- return 0;
++ return phy_init_hw(phydev);
+ }
+
+ static int lan87xx_cable_test_get_status(struct phy_device *phydev,
+diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
+index ea1073adc5a16a..034f5c4d033771 100644
+--- a/drivers/net/phy/mxl-gpy.c
++++ b/drivers/net/phy/mxl-gpy.c
+@@ -107,6 +107,7 @@ struct gpy_priv {
+
+ u8 fw_major;
+ u8 fw_minor;
++ u32 wolopts;
+
+ /* It takes 3 seconds to fully switch out of loopback mode before
+ * it can safely re-enter loopback mode. Record the time when
+@@ -221,6 +222,15 @@ static int gpy_hwmon_register(struct phy_device *phydev)
+ }
+ #endif
+
++static int gpy_ack_interrupt(struct phy_device *phydev)
++{
++ int ret;
++
++ /* Clear all pending interrupts */
++ ret = phy_read(phydev, PHY_ISTAT);
++ return ret < 0 ? ret : 0;
++}
++
+ static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
+ {
+ struct gpy_priv *priv = phydev->priv;
+@@ -262,16 +272,8 @@ static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
+
+ static int gpy_config_init(struct phy_device *phydev)
+ {
+- int ret;
+-
+- /* Mask all interrupts */
+- ret = phy_write(phydev, PHY_IMASK, 0);
+- if (ret)
+- return ret;
+-
+- /* Clear all pending interrupts */
+- ret = phy_read(phydev, PHY_ISTAT);
+- return ret < 0 ? ret : 0;
++ /* Nothing to configure. Configuration Requirement Placeholder */
++ return 0;
+ }
+
+ static int gpy_probe(struct phy_device *phydev)
+@@ -619,11 +621,23 @@ static int gpy_read_status(struct phy_device *phydev)
+
+ static int gpy_config_intr(struct phy_device *phydev)
+ {
++ struct gpy_priv *priv = phydev->priv;
+ u16 mask = 0;
++ int ret;
++
++ ret = gpy_ack_interrupt(phydev);
++ if (ret)
++ return ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ mask = PHY_IMASK_MASK;
+
++ if (priv->wolopts & WAKE_MAGIC)
++ mask |= PHY_IMASK_WOL;
++
++ if (priv->wolopts & WAKE_PHY)
++ mask |= PHY_IMASK_LSTC;
++
+ return phy_write(phydev, PHY_IMASK, mask);
+ }
+
+@@ -670,6 +684,7 @@ static int gpy_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+ {
+ struct net_device *attach_dev = phydev->attached_dev;
++ struct gpy_priv *priv = phydev->priv;
+ int ret;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+@@ -717,6 +732,8 @@ static int gpy_set_wol(struct phy_device *phydev,
+ ret = phy_read(phydev, PHY_ISTAT);
+ if (ret < 0)
+ return ret;
++
++ priv->wolopts |= WAKE_MAGIC;
+ } else {
+ /* Disable magic packet matching */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+@@ -724,6 +741,13 @@ static int gpy_set_wol(struct phy_device *phydev,
+ WOL_EN);
+ if (ret < 0)
+ return ret;
++
++ /* Disable the WOL interrupt */
++ ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL);
++ if (ret < 0)
++ return ret;
++
++ priv->wolopts &= ~WAKE_MAGIC;
+ }
+
+ if (wol->wolopts & WAKE_PHY) {
+@@ -740,9 +764,11 @@ static int gpy_set_wol(struct phy_device *phydev,
+ if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
+ phy_trigger_machine(phydev);
+
++ priv->wolopts |= WAKE_PHY;
+ return 0;
+ }
+
++ priv->wolopts &= ~WAKE_PHY;
+ /* Disable the link state change interrupt */
+ return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
+ }
+@@ -750,18 +776,10 @@ static int gpy_set_wol(struct phy_device *phydev,
+ static void gpy_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+ {
+- int ret;
++ struct gpy_priv *priv = phydev->priv;
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+- wol->wolopts = 0;
+-
+- ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
+- if (ret & WOL_EN)
+- wol->wolopts |= WAKE_MAGIC;
+-
+- ret = phy_read(phydev, PHY_IMASK);
+- if (ret & PHY_IMASK_LSTC)
+- wol->wolopts |= WAKE_PHY;
++ wol->wolopts = priv->wolopts;
+ }
+
+ static int gpy_loopback(struct phy_device *phydev, bool enable)
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 2ce74593d6e4a1..ec2a3d16b1a2da 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1411,6 +1411,11 @@ int phy_sfp_probe(struct phy_device *phydev,
+ }
+ EXPORT_SYMBOL(phy_sfp_probe);
+
++static bool phy_drv_supports_irq(struct phy_driver *phydrv)
++{
++ return phydrv->config_intr && phydrv->handle_interrupt;
++}
++
+ /**
+ * phy_attach_direct - attach a network device to a given PHY device pointer
+ * @dev: network device to attach
+@@ -1525,6 +1530,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ if (phydev->dev_flags & PHY_F_NO_IRQ)
+ phydev->irq = PHY_POLL;
+
++ if (!phy_drv_supports_irq(phydev->drv) && phy_interrupt_is_valid(phydev))
++ phydev->irq = PHY_POLL;
++
+ /* Port is set to PORT_TP by default and the actual PHY driver will set
+ * it to different value depending on the PHY configuration. If we have
+ * the generic PHY driver we can't figure it out, thus set the old
+@@ -1548,7 +1556,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ goto error;
+
+ phy_resume(phydev);
+- phy_led_triggers_register(phydev);
++ if (!phydev->is_on_sfp_module)
++ phy_led_triggers_register(phydev);
+
+ /**
+ * If the external phy used by current mac interface is managed by
+@@ -1817,7 +1826,8 @@ void phy_detach(struct phy_device *phydev)
+ }
+ phydev->phylink = NULL;
+
+- phy_led_triggers_unregister(phydev);
++ if (!phydev->is_on_sfp_module)
++ phy_led_triggers_unregister(phydev);
+
+ if (phydev->mdio.dev.driver)
+ module_put(phydev->mdio.dev.driver->owner);
+@@ -2699,8 +2709,8 @@ EXPORT_SYMBOL(genphy_resume);
+ int genphy_loopback(struct phy_device *phydev, bool enable)
+ {
+ if (enable) {
+- u16 val, ctl = BMCR_LOOPBACK;
+- int ret;
++ u16 ctl = BMCR_LOOPBACK;
++ int ret, val;
+
+ ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
+
+@@ -2952,7 +2962,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
+ if (delay < 0)
+ return delay;
+
+- if (delay && size == 0)
++ if (size == 0)
+ return delay;
+
+ if (delay < delay_values[0] || delay > delay_values[size - 1]) {
+@@ -2985,11 +2995,6 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
+ }
+ EXPORT_SYMBOL(phy_get_internal_delay);
+
+-static bool phy_drv_supports_irq(struct phy_driver *phydrv)
+-{
+- return phydrv->config_intr && phydrv->handle_interrupt;
+-}
+-
+ static int phy_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+ {
+@@ -3077,10 +3082,11 @@ static __maybe_unused int phy_led_hw_is_supported(struct led_classdev *led_cdev,
+
+ static void phy_leds_unregister(struct phy_device *phydev)
+ {
+- struct phy_led *phyled;
++ struct phy_led *phyled, *tmp;
+
+- list_for_each_entry(phyled, &phydev->leds, list) {
++ list_for_each_entry_safe(phyled, tmp, &phydev->leds, list) {
+ led_classdev_unregister(&phyled->led_cdev);
++ list_del(&phyled->list);
+ }
+ }
+
+@@ -3159,11 +3165,13 @@ static int of_phy_leds(struct phy_device *phydev)
+ err = of_phy_led(phydev, led);
+ if (err) {
+ of_node_put(led);
++ of_node_put(leds);
+ phy_leds_unregister(phydev);
+ return err;
+ }
+ }
+
++ of_node_put(leds);
+ return 0;
+ }
+
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 0d7354955d626c..b5f012619e42da 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1631,6 +1631,7 @@ struct phylink *phylink_create(struct phylink_config *config,
+ pl->config = config;
+ if (config->type == PHYLINK_NETDEV) {
+ pl->netdev = to_net_dev(config->dev);
++ netif_carrier_off(pl->netdev);
+ } else if (config->type == PHYLINK_DEV) {
+ pl->dev = config->dev;
+ } else {
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 894172a3e15fe8..2604d9663a5b21 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -421,9 +421,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
+ ERR_PTR(ret));
+ return ret;
+ }
++
++ return genphy_soft_reset(phydev);
+ }
+
+- return genphy_soft_reset(phydev);
++ return 0;
+ }
+
+ static int rtl821x_suspend(struct phy_device *phydev)
+@@ -1081,6 +1083,13 @@ static struct phy_driver realtek_drvs[] = {
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
++ }, {
++ PHY_ID_MATCH_EXACT(0x001cc960),
++ .name = "RTL8366S Gigabit Ethernet",
++ .suspend = genphy_suspend,
++ .resume = genphy_resume,
++ .read_mmd = genphy_read_mmd_unsupported,
++ .write_mmd = genphy_write_mmd_unsupported,
+ },
+ };
+
+diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
+index 208a9393c2dfde..274bb090b827c4 100644
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -151,10 +151,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned int br_min, br_nom, br_max;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
+
+- phylink_set(modes, Autoneg);
+- phylink_set(modes, Pause);
+- phylink_set(modes, Asym_Pause);
+-
+ /* Decode the bitrate information to MBd */
+ br_min = br_nom = br_max = 0;
+ if (id->base.br_nominal) {
+@@ -339,6 +335,10 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ }
+ }
+
++ phylink_set(modes, Autoneg);
++ phylink_set(modes, Pause);
++ phylink_set(modes, Asym_Pause);
++
+ if (bus->sfp_quirk && bus->sfp_quirk->modes)
+ bus->sfp_quirk->modes(id, modes, interfaces);
+
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 4ecfac22786514..4278a93b055e59 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -452,6 +452,11 @@ static const struct sfp_quirk sfp_quirks[] = {
+ // Rollball protocol to talk to the PHY.
+ SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+
++ // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
++ // NRZ in their EEPROM
++ SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
++ sfp_fixup_ignore_tx_fault),
++
+ SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+
+ // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
+@@ -463,6 +468,9 @@ static const struct sfp_quirk sfp_quirks[] = {
+ SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ sfp_fixup_ignore_tx_fault),
+
++ // FS 2.5G Base-T
++ SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
++
+ // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ // 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+@@ -474,6 +482,9 @@ static const struct sfp_quirk sfp_quirks[] = {
+ SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt),
+ SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt),
+
++ // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator
++ SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
++
+ SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
+ SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+ SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
+@@ -2386,8 +2397,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
+
+ /* Handle remove event globally, it resets this state machine */
+ if (event == SFP_E_REMOVE) {
+- if (sfp->sm_mod_state > SFP_MOD_PROBE)
+- sfp_sm_mod_remove(sfp);
++ sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
+ return;
+ }
+diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
+index 897b979ec03c81..3b5fcaf0dd36db 100644
+--- a/drivers/net/phy/vitesse.c
++++ b/drivers/net/phy/vitesse.c
+@@ -237,16 +237,6 @@ static int vsc739x_config_init(struct phy_device *phydev)
+ return 0;
+ }
+
+-static int vsc73xx_config_aneg(struct phy_device *phydev)
+-{
+- /* The VSC73xx switches does not like to be instructed to
+- * do autonegotiation in any way, it prefers that you just go
+- * with the power-on/reset defaults. Writing some registers will
+- * just make autonegotiation permanently fail.
+- */
+- return 0;
+-}
+-
+ /* This adds a skew for both TX and RX clocks, so the skew should only be
+ * applied to "rgmii-id" interfaces. It may not work as expected
+ * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
+@@ -444,7 +434,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ .phy_id_mask = 0x000ffff0,
+ /* PHY_GBIT_FEATURES */
+ .config_init = vsc738x_config_init,
+- .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+ }, {
+@@ -453,7 +442,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ .phy_id_mask = 0x000ffff0,
+ /* PHY_GBIT_FEATURES */
+ .config_init = vsc738x_config_init,
+- .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+ }, {
+@@ -462,7 +450,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ .phy_id_mask = 0x000ffff0,
+ /* PHY_GBIT_FEATURES */
+ .config_init = vsc739x_config_init,
+- .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+ }, {
+@@ -471,7 +458,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ .phy_id_mask = 0x000ffff0,
+ /* PHY_GBIT_FEATURES */
+ .config_init = vsc739x_config_init,
+- .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+ }, {
+diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
+index fbaaa8c102a1b1..7d9201ef925ff1 100644
+--- a/drivers/net/ppp/ppp_async.c
++++ b/drivers/net/ppp/ppp_async.c
+@@ -460,6 +460,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+ case PPPIOCSMRU:
+ if (get_user(val, p))
+ break;
++ if (val > U16_MAX) {
++ err = -EINVAL;
++ break;
++ }
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ ap->mru = val;
+@@ -537,7 +541,7 @@ ppp_async_encode(struct asyncppp *ap)
+ * and 7 (code-reject) must be sent as though no options
+ * had been negotiated.
+ */
+- islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
++ islcp = proto == PPP_LCP && count >= 3 && 1 <= data[2] && data[2] <= 7;
+
+ if (i == 0) {
+ if (islcp)
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index a9beacd552cf82..90f1cfbc7c50b3 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -70,6 +70,7 @@
+ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
+
+ #define PPP_PROTO_LEN 2
++#define PPP_LCP_HDRLEN 4
+
+ /*
+ * An instance of /dev/ppp can be associated with either a ppp
+@@ -491,6 +492,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
+ return ret;
+ }
+
++static bool ppp_check_packet(struct sk_buff *skb, size_t count)
++{
++ /* LCP packets must include LCP header which 4 bytes long:
++ * 1-byte code, 1-byte identifier, and 2-byte length.
++ */
++ return get_unaligned_be16(skb->data) != PPP_LCP ||
++ count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
++}
++
+ static ssize_t ppp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+@@ -513,6 +523,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
+ kfree_skb(skb);
+ goto out;
+ }
++ ret = -EINVAL;
++ if (unlikely(!ppp_check_packet(skb, count))) {
++ kfree_skb(skb);
++ goto out;
++ }
+
+ switch (pf->kind) {
+ case INTERFACE:
+@@ -2254,7 +2269,7 @@ static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
+ if (!pchb)
+ goto out_rcu;
+
+- spin_lock(&pchb->downl);
++ spin_lock_bh(&pchb->downl);
+ if (!pchb->chan) {
+ /* channel got unregistered */
+ kfree_skb(skb);
+@@ -2266,7 +2281,7 @@ static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb)
+ kfree_skb(skb);
+
+ outl:
+- spin_unlock(&pchb->downl);
++ spin_unlock_bh(&pchb->downl);
+ out_rcu:
+ rcu_read_unlock();
+
+diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
+index ebcdffdf4f0e01..52d05ce4a28198 100644
+--- a/drivers/net/ppp/ppp_synctty.c
++++ b/drivers/net/ppp/ppp_synctty.c
+@@ -453,6 +453,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+ case PPPIOCSMRU:
+ if (get_user(val, (int __user *) argp))
+ break;
++ if (val > U16_MAX) {
++ err = -EINVAL;
++ break;
++ }
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ ap->mru = val;
+@@ -687,7 +691,7 @@ ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count)
+
+ /* strip address/control field if present */
+ p = skb->data;
+- if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
++ if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ /* chop off address/control */
+ if (skb->len < 3)
+ goto err;
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index ba8b6bd8233cad..96cca4ee470a4b 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
+ struct sk_buff *skb;
+ int error = 0;
+
+- if (sk->sk_state & PPPOX_BOUND) {
+- error = -EIO;
+- goto end;
+- }
++ if (sk->sk_state & PPPOX_BOUND)
++ return -EIO;
+
+ skb = skb_recv_datagram(sk, flags, &error);
+- if (error < 0)
+- goto end;
++ if (!skb)
++ return error;
+
+- if (skb) {
+- total_len = min_t(size_t, total_len, skb->len);
+- error = skb_copy_datagram_msg(skb, 0, m, total_len);
+- if (error == 0) {
+- consume_skb(skb);
+- return total_len;
+- }
++ total_len = min_t(size_t, total_len, skb->len);
++ error = skb_copy_datagram_msg(skb, 0, m, total_len);
++ if (error == 0) {
++ consume_skb(skb);
++ return total_len;
+ }
+
+ kfree_skb(skb);
+-end:
+ return error;
+ }
+
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index ba93bab948e09f..bf9e801cc61cce 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -643,46 +643,57 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
+ int
+ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
+ {
+- struct cstate *cs;
+- unsigned ihl;
+-
++ const struct tcphdr *th;
+ unsigned char index;
++ struct iphdr *iph;
++ struct cstate *cs;
++ unsigned int ihl;
+
+- if(isize < 20) {
+- /* The packet is shorter than a legal IP header */
++ /* The packet is shorter than a legal IP header.
++ * Also make sure isize is positive.
++ */
++ if (isize < (int)sizeof(struct iphdr)) {
++runt:
+ comp->sls_i_runt++;
+- return slhc_toss( comp );
++ return slhc_toss(comp);
+ }
++ iph = (struct iphdr *)icp;
+ /* Peek at the IP header's IHL field to find its length */
+- ihl = icp[0] & 0xf;
+- if(ihl < 20 / 4){
+- /* The IP header length field is too small */
+- comp->sls_i_runt++;
+- return slhc_toss( comp );
+- }
+- index = icp[9];
+- icp[9] = IPPROTO_TCP;
++ ihl = iph->ihl;
++ /* The IP header length field is too small,
++ * or packet is shorter than the IP header followed
++ * by minimal tcp header.
++ */
++ if (ihl < 5 || isize < ihl * 4 + sizeof(struct tcphdr))
++ goto runt;
++
++ index = iph->protocol;
++ iph->protocol = IPPROTO_TCP;
+
+ if (ip_fast_csum(icp, ihl)) {
+ /* Bad IP header checksum; discard */
+ comp->sls_i_badcheck++;
+- return slhc_toss( comp );
++ return slhc_toss(comp);
+ }
+- if(index > comp->rslot_limit) {
++ if (index > comp->rslot_limit) {
+ comp->sls_i_error++;
+ return slhc_toss(comp);
+ }
+-
++ th = (struct tcphdr *)(icp + ihl * 4);
++ if (th->doff < sizeof(struct tcphdr) / 4)
++ goto runt;
++ if (isize < ihl * 4 + th->doff * 4)
++ goto runt;
+ /* Update local state */
+ cs = &comp->rstate[comp->recv_current = index];
+ comp->flags &=~ SLF_TOSS;
+- memcpy(&cs->cs_ip,icp,20);
+- memcpy(&cs->cs_tcp,icp + ihl*4,20);
++ memcpy(&cs->cs_ip, iph, sizeof(*iph));
++ memcpy(&cs->cs_tcp, th, sizeof(*th));
+ if (ihl > 5)
+- memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4);
+- if (cs->cs_tcp.doff > 5)
+- memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
+- cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
++ memcpy(cs->cs_ipopt, &iph[1], (ihl - 5) * 4);
++ if (th->doff > 5)
++ memcpy(cs->cs_tcpopt, &th[1], (th->doff - 5) * 4);
++ cs->cs_hsize = ihl*2 + th->doff*2;
+ cs->initialized = true;
+ /* Put headers back on packet
+ * Neither header checksum is recalculated
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 5c01cc7b9949de..e7212a64a59183 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -1177,6 +1177,11 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
+ struct sk_buff *skb;
+ int err, depth;
+
++ if (unlikely(xdp->data_end - xdp->data < ETH_HLEN)) {
++ err = -EINVAL;
++ goto err;
++ }
++
+ if (q->flags & IFF_VNET_HDR)
+ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 508d9a392ab182..f575f225d41789 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -281,8 +281,10 @@ static int __team_options_register(struct team *team,
+ return 0;
+
+ inst_rollback:
+- for (i--; i >= 0; i--)
++ for (i--; i >= 0; i--) {
+ __team_option_inst_del_option(team, dst_opts[i]);
++ list_del(&dst_opts[i]->list);
++ }
+
+ i = option_count;
+ alloc_rollback:
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index afa5497f7c35c3..e9cd3b810e2c79 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
+ tun->tfiles[tun->numqueues - 1]);
+ ntfile = rtnl_dereference(tun->tfiles[index]);
+ ntfile->queue_index = index;
++ ntfile->xdp_rxq.queue_index = index;
+ rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
+ NULL);
+
+@@ -1630,13 +1631,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
+ switch (act) {
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
+- if (err)
++ if (err) {
++ dev_core_stats_rx_dropped_inc(tun->dev);
+ return err;
++ }
++ dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
+ break;
+ case XDP_TX:
+ err = tun_xdp_tx(tun->dev, xdp);
+- if (err < 0)
++ if (err < 0) {
++ dev_core_stats_rx_dropped_inc(tun->dev);
+ return err;
++ }
++ dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
+ break;
+ case XDP_PASS:
+ break;
+@@ -2125,14 +2132,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ tun_is_little_endian(tun), true,
+ vlan_hlen)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+- pr_err("unexpected GSO type: "
+- "0x%x, gso_size %d, hdr_len %d\n",
+- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+- tun16_to_cpu(tun, gso.hdr_len));
+- print_hex_dump(KERN_ERR, "tun: ",
+- DUMP_PREFIX_NONE,
+- 16, 1, skb->head,
+- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
++
++ if (net_ratelimit()) {
++ netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
++ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
++ tun16_to_cpu(tun, gso.hdr_len));
++ print_hex_dump(KERN_ERR, "tun: ",
++ DUMP_PREFIX_NONE,
++ 16, 1, skb->head,
++ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
++ }
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+@@ -2450,6 +2459,9 @@ static int tun_xdp_one(struct tun_struct *tun,
+ bool skb_xdp = false;
+ struct page *page;
+
++ if (unlikely(datasize < ETH_HLEN))
++ return -EINVAL;
++
+ xdp_prog = rcu_dereference(tun->xdp_prog);
+ if (xdp_prog) {
+ if (gso->gso_type) {
+diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
+index a017e9de2119d5..284375f662f1e0 100644
+--- a/drivers/net/usb/aqc111.c
++++ b/drivers/net/usb/aqc111.c
+@@ -1079,17 +1079,17 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ u16 pkt_count = 0;
+ u64 desc_hdr = 0;
+ u16 vlan_tag = 0;
+- u32 skb_len = 0;
++ u32 skb_len;
+
+ if (!skb)
+ goto err;
+
+- if (skb->len == 0)
++ skb_len = skb->len;
++ if (skb_len < sizeof(desc_hdr))
+ goto err;
+
+- skb_len = skb->len;
+ /* RX Descriptor Header */
+- skb_trim(skb, skb->len - sizeof(desc_hdr));
++ skb_trim(skb, skb_len - sizeof(desc_hdr));
+ desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb));
+
+ /* Check these packets */
+@@ -1141,17 +1141,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ continue;
+ }
+
+- /* Clone SKB */
+- new_skb = skb_clone(skb, GFP_ATOMIC);
++ new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
+
+ if (!new_skb)
+ goto err;
+
+- new_skb->len = pkt_len;
++ skb_put(new_skb, pkt_len);
++ memcpy(new_skb->data, skb->data, pkt_len);
+ skb_pull(new_skb, AQ_RX_HW_PAD);
+- skb_set_tail_pointer(new_skb, new_skb->len);
+
+- new_skb->truesize = SKB_TRUESIZE(new_skb->len);
+ if (aqc111_data->rx_checksum)
+ aqc111_rx_checksum(new_skb, pkt_desc);
+
+diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
+index 3777c7e2e6fc00..e47bb125048d47 100644
+--- a/drivers/net/usb/ax88172a.c
++++ b/drivers/net/usb/ax88172a.c
+@@ -161,7 +161,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
+ u8 buf[ETH_ALEN];
+ struct ax88172a_private *priv;
+
+- usbnet_get_endpoints(dev, intf);
++ ret = usbnet_get_endpoints(dev, intf);
++ if (ret)
++ return ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index aff39bf3161ded..73de34179f3525 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -173,6 +173,7 @@ struct ax88179_data {
+ u8 in_pm;
+ u32 wol_supported;
+ u32 wolopts;
++ u8 disconnecting;
+ };
+
+ struct ax88179_int_data {
+@@ -208,6 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ {
+ int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
++ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ BUG_ON(!dev);
+
+@@ -219,7 +221,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
+
+- if (unlikely(ret < 0))
++ if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
+ netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
+ index, ret);
+
+@@ -231,6 +233,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ {
+ int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
++ struct ax88179_data *ax179_data = dev->driver_priv;
+
+ BUG_ON(!dev);
+
+@@ -242,7 +245,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
+
+- if (unlikely(ret < 0))
++ if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
+ netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
+ index, ret);
+
+@@ -323,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb)
+
+ if (netif_carrier_ok(dev->net) != link) {
+ usbnet_link_change(dev, link, 1);
+- netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
++ if (!link)
++ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
+ }
+ }
+
+@@ -492,6 +496,20 @@ static int ax88179_resume(struct usb_interface *intf)
+ return usbnet_resume(intf);
+ }
+
++static void ax88179_disconnect(struct usb_interface *intf)
++{
++ struct usbnet *dev = usb_get_intfdata(intf);
++ struct ax88179_data *ax179_data;
++
++ if (!dev)
++ return;
++
++ ax179_data = dev->driver_priv;
++ ax179_data->disconnecting = 1;
++
++ usbnet_disconnect(intf);
++}
++
+ static void
+ ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+ {
+@@ -1256,6 +1274,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
+
+ if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(dev->net, mac);
++ if (!is_local_ether_addr(mac))
++ dev->net->addr_assign_type = NET_ADDR_PERM;
+ } else {
+ netdev_info(dev->net, "invalid MAC address, using random\n");
+ eth_hw_addr_random(dev->net);
+@@ -1437,21 +1457,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ /* Skip IP alignment pseudo header */
+ skb_pull(skb, 2);
+
+- skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
+ ax88179_rx_checksum(skb, pkt_hdr);
+ return 1;
+ }
+
+- ax_skb = skb_clone(skb, GFP_ATOMIC);
++ ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
+ if (!ax_skb)
+ return 0;
+- skb_trim(ax_skb, pkt_len);
++ skb_put(ax_skb, pkt_len);
++ memcpy(ax_skb->data, skb->data + 2, pkt_len);
+
+- /* Skip IP alignment pseudo header */
+- skb_pull(ax_skb, 2);
+-
+- skb->truesize = pkt_len_plus_padd +
+- SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ ax88179_rx_checksum(ax_skb, pkt_hdr);
+ usbnet_skb_return(dev, ax_skb);
+
+@@ -1526,6 +1541,7 @@ static int ax88179_link_reset(struct usbnet *dev)
+ GMII_PHY_PHYSR, 2, &tmp16);
+
+ if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
++ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
+ return 0;
+ } else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+ mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
+@@ -1563,6 +1579,8 @@ static int ax88179_link_reset(struct usbnet *dev)
+
+ netif_carrier_on(dev->net);
+
++ netdev_info(dev->net, "ax88179 - Link status is: 1\n");
++
+ return 0;
+ }
+
+@@ -1583,11 +1601,11 @@ static int ax88179_reset(struct usbnet *dev)
+
+ *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+- msleep(200);
++ msleep(500);
+
+ *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+- msleep(100);
++ msleep(200);
+
+ /* Ethernet PHY Auto Detach*/
+ ax88179_auto_detach(dev);
+@@ -1659,6 +1677,27 @@ static int ax88179_reset(struct usbnet *dev)
+ return 0;
+ }
+
++static int ax88179_net_reset(struct usbnet *dev)
++{
++ u16 tmp16;
++
++ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR,
++ 2, &tmp16);
++ if (tmp16) {
++ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
++ 2, 2, &tmp16);
++ if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) {
++ tmp16 |= AX_MEDIUM_RECEIVE_EN;
++ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
++ 2, 2, &tmp16);
++ }
++ } else {
++ ax88179_reset(dev);
++ }
++
++ return 0;
++}
++
+ static int ax88179_stop(struct usbnet *dev)
+ {
+ u16 tmp16;
+@@ -1678,7 +1717,7 @@ static const struct driver_info ax88179_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1691,7 +1730,7 @@ static const struct driver_info ax88178a_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1704,7 +1743,7 @@ static const struct driver_info cypress_GX3_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1717,7 +1756,7 @@ static const struct driver_info dlink_dub1312_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1730,7 +1769,7 @@ static const struct driver_info sitecom_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1743,7 +1782,7 @@ static const struct driver_info samsung_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1756,7 +1795,7 @@ static const struct driver_info lenovo_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1769,7 +1808,7 @@ static const struct driver_info belkin_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1782,7 +1821,7 @@ static const struct driver_info toshiba_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1795,7 +1834,7 @@ static const struct driver_info mct_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1808,7 +1847,7 @@ static const struct driver_info at_umc2000_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1821,7 +1860,7 @@ static const struct driver_info at_umc200_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1834,7 +1873,7 @@ static const struct driver_info at_umc2000sp_info = {
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+- .reset = ax88179_reset,
++ .reset = ax88179_net_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+@@ -1906,7 +1945,7 @@ static struct usb_driver ax88179_178a_driver = {
+ .suspend = ax88179_suspend,
+ .resume = ax88179_resume,
+ .reset_resume = ax88179_resume,
+- .disconnect = usbnet_disconnect,
++ .disconnect = ax88179_disconnect,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+ };
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index 99ec1d4a972db8..8b6d6a1b3c2eca 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ err = dm_read_shared_word(dev, 1, loc, &res);
+ if (err < 0) {
+ netdev_err(dev->net, "MDIO read error: %d\n", err);
+- return err;
++ return 0;
+ }
+
+ netdev_dbg(dev->net,
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 687d70cfc55635..46afb95ffabe3b 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -286,10 +286,11 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
+ return;
+ }
+
+- if (urb->actual_length <= IPHETH_IP_ALIGN) {
+- dev->net->stats.rx_length_errors++;
+- return;
+- }
++ /* iPhone may periodically send URBs with no payload
++ * on the "bulk in" endpoint. It is safe to ignore them.
++ */
++ if (urb->actual_length == 0)
++ goto rx_submit;
+
+ /* RX URBs starting with 0x00 0x01 do not encapsulate Ethernet frames,
+ * but rather are control frames. Their purpose is not documented, and
+@@ -298,7 +299,8 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
+ * URB received from the bulk IN endpoint.
+ */
+ if (unlikely
+- (((char *)urb->transfer_buffer)[0] == 0 &&
++ (urb->actual_length == 4 &&
++ ((char *)urb->transfer_buffer)[0] == 0 &&
+ ((char *)urb->transfer_buffer)[1] == 1))
+ goto rx_submit;
+
+@@ -306,7 +308,6 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
+ if (retval != 0) {
+ dev_err(&dev->intf->dev, "%s: callback retval: %d\n",
+ __func__, retval);
+- return;
+ }
+
+ rx_submit:
+@@ -354,13 +355,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
+ 0x02, /* index */
+ dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
+ IPHETH_CTRL_TIMEOUT);
+- if (retval < 0) {
++ if (retval <= 0) {
+ dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
++ if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) ||
++ (retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) {
+ netif_carrier_on(dev->net);
+ if (dev->tx_urb->status != -EINPROGRESS)
+ netif_wake_queue(dev->net);
+@@ -475,8 +477,8 @@ static int ipheth_close(struct net_device *net)
+ {
+ struct ipheth_device *dev = netdev_priv(net);
+
+- cancel_delayed_work_sync(&dev->carrier_work);
+ netif_stop_queue(net);
++ cancel_delayed_work_sync(&dev->carrier_work);
+ return 0;
+ }
+
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 59cde06aa7f60c..921ae046f86041 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
+
+ lan78xx_rx_urb_submit_all(dev);
+
++ local_bh_disable();
+ napi_schedule(&dev->napi);
++ local_bh_enable();
+ }
+
+ return 0;
+@@ -3035,7 +3037,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
+ if (dev->chipid == ID_REV_CHIP_ID_7801_)
+ buf &= ~MAC_CR_GMII_EN_;
+
+- if (dev->chipid == ID_REV_CHIP_ID_7800_) {
++ if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
++ dev->chipid == ID_REV_CHIP_ID_7850_) {
+ ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+ if (!ret && sig != EEPROM_INDICATOR) {
+ /* Implies there is no external eeprom. Set mac speed */
+@@ -3134,7 +3137,8 @@ static int lan78xx_open(struct net_device *net)
+ done:
+ mutex_unlock(&dev->dev_mutex);
+
+- usb_autopm_put_interface(dev->intf);
++ if (ret < 0)
++ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+ }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 344af3c5c83668..92c1500fa7c448 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -201,6 +201,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ break;
+ default:
+ /* not ip - do not know what to do */
++ kfree_skb(skbn);
+ goto skip;
+ }
+
+@@ -1289,6 +1290,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
++ {QMI_FIXED_INTF(0x19d2, 0x0189, 4)}, /* ZTE MF290 */
+ {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */
+ {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
+ {QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
+@@ -1367,6 +1369,9 @@ static const struct usb_device_id products[] = {
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+@@ -1376,6 +1381,8 @@ static const struct usb_device_id products[] = {
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)}, /* Telit FN912 series */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)}, /* Telit FN912 series */
+ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
+@@ -1430,6 +1437,8 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+ {QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */
+ {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
++ {QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
++ {QMI_FIXED_INTF(0x2dee, 0x4d22, 5)}, /* MeiG Smart SRM825L */
+
+ /* 4. Gobi 1000 devices */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index afb20c0ed688d8..ce19ebd180f126 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -2543,7 +2543,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
+ }
+ }
+
+- if (list_empty(&tp->rx_done))
++ if (list_empty(&tp->rx_done) || work_done >= budget)
+ goto out1;
+
+ clear_bit(RX_EPROTO, &tp->flags);
+@@ -2559,6 +2559,15 @@ static int rx_bottom(struct r8152 *tp, int budget)
+ struct urb *urb;
+ u8 *rx_data;
+
++ /* A bulk transfer of USB may contain may packets, so the
++ * total packets may more than the budget. Deal with all
++ * packets in current bulk transfer, and stop to handle the
++ * next bulk transfer until next schedule, if budget is
++ * exhausted.
++ */
++ if (work_done >= budget)
++ break;
++
+ list_del_init(cursor);
+
+ agg = list_entry(cursor, struct rx_agg, list);
+@@ -2578,9 +2587,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
+ unsigned int pkt_len, rx_frag_head_sz;
+ struct sk_buff *skb;
+
+- /* limit the skb numbers for rx_queue */
+- if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
+- break;
++ WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000);
+
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ if (pkt_len < ETH_ZLEN)
+@@ -2658,9 +2665,10 @@ static int rx_bottom(struct r8152 *tp, int budget)
+ }
+ }
+
++ /* Splice the remained list back to rx_done for next schedule */
+ if (!list_empty(&rx_queue)) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+- list_splice_tail(&rx_queue, &tp->rx_done);
++ list_splice(&rx_queue, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ }
+
+@@ -2959,6 +2967,8 @@ static void rtl8152_nic_reset(struct r8152 *tp)
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
+
+ for (i = 0; i < 1000; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
+ break;
+ usleep_range(100, 400);
+@@ -3288,6 +3298,8 @@ static void rtl_disable(struct r8152 *tp)
+ rxdy_gated_en(tp, true);
+
+ for (i = 0; i < 1000; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY)
+ break;
+@@ -3295,6 +3307,8 @@ static void rtl_disable(struct r8152 *tp)
+ }
+
+ for (i = 0; i < 1000; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY)
+ break;
+ usleep_range(1000, 2000);
+@@ -5129,14 +5143,23 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
+ data = (u8 *)mac;
+ data += __le16_to_cpu(mac->fw_offset);
+
+- generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data,
+- type);
++ if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length,
++ data, type) < 0) {
++ dev_err(&tp->intf->dev, "Write %s fw fail\n",
++ type ? "PLA" : "USB");
++ return;
++ }
+
+ ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr),
+ __le16_to_cpu(mac->bp_ba_value));
+
+- generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD,
+- __le16_to_cpu(mac->bp_num) << 1, mac->bp, type);
++ if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD,
++ ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4),
++ mac->bp, type) < 0) {
++ dev_err(&tp->intf->dev, "Write %s bp fail\n",
++ type ? "PLA" : "USB");
++ return;
++ }
+
+ bp_en_addr = __le16_to_cpu(mac->bp_en_addr);
+ if (bp_en_addr)
+@@ -5458,6 +5481,8 @@ static void wait_oob_link_list_ready(struct r8152 *tp)
+ int i;
+
+ for (i = 0; i < 1000; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+@@ -5472,6 +5497,8 @@ static void r8156b_wait_loading_flash(struct r8152 *tp)
+ int i;
+
+ for (i = 0; i < 100; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE)
+ break;
+ usleep_range(1000, 2000);
+@@ -5594,6 +5621,8 @@ static int r8153_pre_firmware_1(struct r8152 *tp)
+ for (i = 0; i < 104; i++) {
+ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL);
+
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ return -ENODEV;
+ if (!(ocp_data & WTD1_EN))
+ break;
+ usleep_range(1000, 2000);
+@@ -5750,6 +5779,8 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable)
+ data &= ~EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ for (i = 0; i < 20; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ return;
+ usleep_range(1000, 2000);
+ if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100)
+ break;
+@@ -8356,6 +8387,8 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
+ struct r8152 *tp = usb_get_intfdata(intf);
+ struct net_device *netdev;
+
++ rtnl_lock();
++
+ if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
+ return 0;
+
+@@ -8387,20 +8420,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
+ struct sockaddr sa;
+
+ if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
+- return 0;
++ goto exit;
+
+ rtl_set_accessible(tp);
+
+ /* reset the MAC address in case of policy change */
+- if (determine_ethernet_addr(tp, &sa) >= 0) {
+- rtnl_lock();
++ if (determine_ethernet_addr(tp, &sa) >= 0)
+ dev_set_mac_address (tp->netdev, &sa, NULL);
+- rtnl_unlock();
+- }
+
+ netdev = tp->netdev;
+ if (!netif_running(netdev))
+- return 0;
++ goto exit;
+
+ set_bit(WORK_ENABLE, &tp->flags);
+ if (netif_carrier_ok(netdev)) {
+@@ -8419,6 +8449,8 @@ static int rtl8152_post_reset(struct usb_interface *intf)
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
+
++exit:
++ rtnl_unlock();
+ return 0;
+ }
+
+@@ -9993,6 +10025,7 @@ static const struct usb_device_id rtl8152_table[] = {
+ { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
+ { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
+ { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
++ { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
+ {}
+ };
+
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 97afd7335d8685..01a3b2417a5401 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
+ {
+ rtl8150_t *dev = netdev_priv(netdev);
+- short lpa, bmcr;
++ short lpa = 0;
++ short bmcr = 0;
+ u32 supported;
+
+ supported = (SUPPORTED_10baseT_Half |
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index a530f20ee25755..8e82184be5e7d9 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
+ static int smsc95xx_reset(struct usbnet *dev)
+ {
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+- u32 read_buf, write_buf, burst_cap;
++ u32 read_buf, burst_cap;
+ int ret = 0, timeout;
+
+ netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
+@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
+ return ret;
+ netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
+
++ ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
++ if (ret < 0)
++ return ret;
+ /* Configure GPIO pins as LED outputs */
+- write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+- LED_GPIO_CFG_FDX_LED;
+- ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
++ read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
++ LED_GPIO_CFG_FDX_LED;
++ ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
+ if (ret < 0)
+ return ret;
+
+@@ -1810,9 +1813,11 @@ static int smsc95xx_reset_resume(struct usb_interface *intf)
+
+ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
+ {
+- skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
++ u16 *csum_ptr = (u16 *)(skb_tail_pointer(skb) - 2);
++
++ skb->csum = (__force __wsum)get_unaligned(csum_ptr);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+- skb_trim(skb, skb->len - 2);
++ skb_trim(skb, skb->len - 2); /* remove csum */
+ }
+
+ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+@@ -1870,25 +1875,22 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ if (dev->net->features & NETIF_F_RXCSUM)
+ smsc95xx_rx_csum_offload(skb);
+ skb_trim(skb, skb->len - 4); /* remove fcs */
+- skb->truesize = size + sizeof(struct sk_buff);
+
+ return 1;
+ }
+
+- ax_skb = skb_clone(skb, GFP_ATOMIC);
++ ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (unlikely(!ax_skb)) {
+ netdev_warn(dev->net, "Error allocating skb\n");
+ return 0;
+ }
+
+- ax_skb->len = size;
+- ax_skb->data = packet;
+- skb_set_tail_pointer(ax_skb, size);
++ skb_put(ax_skb, size);
++ memcpy(ax_skb->data, packet, size);
+
+ if (dev->net->features & NETIF_F_RXCSUM)
+ smsc95xx_rx_csum_offload(ax_skb);
+ skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
+- ax_skb->truesize = size + sizeof(struct sk_buff);
+
+ usbnet_skb_return(dev, ax_skb);
+ }
+@@ -2104,6 +2106,11 @@ static const struct usb_device_id products[] = {
+ USB_DEVICE(0x0424, 0x9E08),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
++ {
++ /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
++ USB_DEVICE(0x0878, 0x1400),
++ .driver_info = (unsigned long)&smsc95xx_info,
++ },
+ {
+ /* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
+ USB_DEVICE(0x184F, 0x0051),
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 3164451e1010cc..cb7d2f798fb436 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res;
+ int rc = 0;
++ int err;
+
+ if (phy_id) {
+ netdev_dbg(netdev, "Only internal phy supported\n");
+@@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
+ if (loc == MII_BMSR) {
+ u8 value;
+
+- sr_read_reg(dev, SR_NSR, &value);
++ err = sr_read_reg(dev, SR_NSR, &value);
++ if (err < 0)
++ return err;
++
+ if (value & NSR_LINKST)
+ rc = 1;
+ }
+- sr_share_read_word(dev, 1, loc, &res);
++ err = sr_share_read_word(dev, 1, loc, &res);
++ if (err < 0)
++ return err;
++
+ if (rc == 1)
+ res = le16_to_cpu(res) | BMSR_LSTATUS;
+ else
+@@ -421,19 +428,15 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ skb_pull(skb, 3);
+ skb->len = len;
+ skb_set_tail_pointer(skb, len);
+- skb->truesize = len + sizeof(struct sk_buff);
+ return 2;
+ }
+
+- /* skb_clone is used for address align */
+- sr_skb = skb_clone(skb, GFP_ATOMIC);
++ sr_skb = netdev_alloc_skb_ip_align(dev->net, len);
+ if (!sr_skb)
+ return 0;
+
+- sr_skb->len = len;
+- sr_skb->data = skb->data + 3;
+- skb_set_tail_pointer(sr_skb, len);
+- sr_skb->truesize = len + sizeof(struct sk_buff);
++ skb_put(sr_skb, len);
++ memcpy(sr_skb->data, skb->data + 3, len);
+ usbnet_skb_return(dev, sr_skb);
+
+ skb_pull(skb, len + SR_RX_OVERHEAD);
+diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
+index f5e19f3ef6cdd2..4de51448218355 100644
+--- a/drivers/net/usb/sr9800.c
++++ b/drivers/net/usb/sr9800.c
+@@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+
+ data->eeprom_len = SR9800_EEPROM_LEN;
+
+- usbnet_get_endpoints(dev, intf);
++ ret = usbnet_get_endpoints(dev, intf);
++ if (ret)
++ goto out;
+
+ /* LED Setting Rule :
+ * AABB:CCDD
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 2d14b0d78541a3..60c58dd6d25311 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -61,9 +61,6 @@
+
+ /*-------------------------------------------------------------------------*/
+
+-// randomly generated ethernet address
+-static u8 node_id [ETH_ALEN];
+-
+ /* use ethtool to change the level for any given device */
+ static int msg_level = -1;
+ module_param (msg_level, int, 0);
+@@ -467,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
+ void usbnet_defer_kevent (struct usbnet *dev, int work)
+ {
+ set_bit (work, &dev->flags);
+- if (!schedule_work (&dev->kevent))
+- netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]);
+- else
+- netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
++ if (!usbnet_going_away(dev)) {
++ if (!schedule_work(&dev->kevent))
++ netdev_dbg(dev->net,
++ "kevent %s may have been dropped\n",
++ usbnet_event_names[work]);
++ else
++ netdev_dbg(dev->net,
++ "kevent %s scheduled\n", usbnet_event_names[work]);
++ }
+ }
+ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
+
+@@ -538,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ tasklet_schedule (&dev->bh);
+ break;
+ case 0:
+- __usbnet_queue_skb(&dev->rxq, skb, rx_start);
++ if (!usbnet_going_away(dev))
++ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ }
+ } else {
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+@@ -846,9 +849,18 @@ int usbnet_stop (struct net_device *net)
+
+ /* deferred work (timer, softirq, task) must also stop */
+ dev->flags = 0;
+- del_timer_sync (&dev->delay);
+- tasklet_kill (&dev->bh);
++ del_timer_sync(&dev->delay);
++ tasklet_kill(&dev->bh);
+ cancel_work_sync(&dev->kevent);
++
++ /* We have cyclic dependencies. Those calls are needed
++ * to break a cycle. We cannot fall into the gaps because
++ * we have a flag
++ */
++ tasklet_kill(&dev->bh);
++ del_timer_sync(&dev->delay);
++ cancel_work_sync(&dev->kevent);
++
+ if (!pm)
+ usb_autopm_put_interface(dev->intf);
+
+@@ -1174,7 +1186,8 @@ usbnet_deferred_kevent (struct work_struct *work)
+ status);
+ } else {
+ clear_bit (EVENT_RX_HALT, &dev->flags);
+- tasklet_schedule (&dev->bh);
++ if (!usbnet_going_away(dev))
++ tasklet_schedule(&dev->bh);
+ }
+ }
+
+@@ -1199,7 +1212,8 @@ usbnet_deferred_kevent (struct work_struct *work)
+ usb_autopm_put_interface(dev->intf);
+ fail_lowmem:
+ if (resched)
+- tasklet_schedule (&dev->bh);
++ if (!usbnet_going_away(dev))
++ tasklet_schedule(&dev->bh);
+ }
+ }
+
+@@ -1562,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t)
+ } else if (netif_running (dev->net) &&
+ netif_device_present (dev->net) &&
+ netif_carrier_ok(dev->net) &&
++ !usbnet_going_away(dev) &&
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags)) {
+@@ -1609,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf)
+ usb_set_intfdata(intf, NULL);
+ if (!dev)
+ return;
++ usbnet_mark_going_away(dev);
+
+ xdev = interface_to_usbdev (intf);
+
+@@ -1731,7 +1747,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+
+ dev->net = net;
+ strscpy(net->name, "usb%d", sizeof(net->name));
+- eth_hw_addr_set(net, node_id);
+
+ /* rx and tx sides can use different message sizes;
+ * bind() should set rx_urb_size in that case.
+@@ -1805,9 +1820,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ goto out4;
+ }
+
+- /* let userspace know we have a random address */
+- if (ether_addr_equal(net->dev_addr, node_id))
+- net->addr_assign_type = NET_ADDR_RANDOM;
++ /* this flags the device for user space */
++ if (!is_valid_ether_addr(net->dev_addr))
++ eth_hw_addr_random(net);
+
+ if ((dev->driver_info->flags & FLAG_WLAN) != 0)
+ SET_NETDEV_DEVTYPE(net, &wlan_type);
+@@ -2217,7 +2232,6 @@ static int __init usbnet_init(void)
+ BUILD_BUG_ON(
+ sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
+
+- eth_random_addr(node_id);
+ return 0;
+ }
+ module_init(usbnet_init);
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 0deefd1573cf26..7767b6ff5a1559 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -236,8 +236,8 @@ static void veth_get_ethtool_stats(struct net_device *dev,
+ data[tx_idx + j] += *(u64 *)(base + offset);
+ }
+ } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
+- pp_idx = tx_idx + VETH_TQ_STATS_LEN;
+ }
++ pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
+
+ page_pool_stats:
+ veth_get_page_pool_stats(dev, &data[pp_idx]);
+@@ -373,7 +373,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb_tx_timestamp(skb);
+ if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+ if (!use_napi)
+- dev_lstats_add(dev, length);
++ dev_sw_netstats_tx_add(dev, 1, length);
+ else
+ __veth_xdp_flush(rq);
+ } else {
+@@ -387,14 +387,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ return ret;
+ }
+
+-static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
+-{
+- struct veth_priv *priv = netdev_priv(dev);
+-
+- dev_lstats_read(dev, packets, bytes);
+- return atomic64_read(&priv->dropped);
+-}
+-
+ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
+ {
+ struct veth_priv *priv = netdev_priv(dev);
+@@ -432,24 +424,24 @@ static void veth_get_stats64(struct net_device *dev,
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+ struct veth_stats rx;
+- u64 packets, bytes;
+
+- tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
+- tot->tx_bytes = bytes;
+- tot->tx_packets = packets;
++ tot->tx_dropped = atomic64_read(&priv->dropped);
++ dev_fetch_sw_netstats(tot, dev->tstats);
+
+ veth_stats_rx(&rx, dev);
+ tot->tx_dropped += rx.xdp_tx_err;
+ tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
+- tot->rx_bytes = rx.xdp_bytes;
+- tot->rx_packets = rx.xdp_packets;
++ tot->rx_bytes += rx.xdp_bytes;
++ tot->rx_packets += rx.xdp_packets;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ if (peer) {
+- veth_stats_tx(peer, &packets, &bytes);
+- tot->rx_bytes += bytes;
+- tot->rx_packets += packets;
++ struct rtnl_link_stats64 tot_peer = {};
++
++ dev_fetch_sw_netstats(&tot_peer, peer->tstats);
++ tot->rx_bytes += tot_peer.tx_bytes;
++ tot->rx_packets += tot_peer.tx_packets;
+
+ veth_stats_rx(&rx, peer);
+ tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
+@@ -1208,14 +1200,6 @@ static int veth_enable_xdp(struct net_device *dev)
+ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
+ return err;
+ }
+-
+- if (!veth_gro_requested(dev)) {
+- /* user-space did not require GRO, but adding XDP
+- * is supposed to get GRO working
+- */
+- dev->features |= NETIF_F_GRO;
+- netdev_features_change(dev);
+- }
+ }
+ }
+
+@@ -1235,18 +1219,9 @@ static void veth_disable_xdp(struct net_device *dev)
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
+
+- if (!netif_running(dev) || !veth_gro_requested(dev)) {
++ if (!netif_running(dev) || !veth_gro_requested(dev))
+ veth_napi_del(dev);
+
+- /* if user-space did not require GRO, since adding XDP
+- * enabled it, clear it now
+- */
+- if (!veth_gro_requested(dev) && netif_running(dev)) {
+- dev->features &= ~NETIF_F_GRO;
+- netdev_features_change(dev);
+- }
+- }
+-
+ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
+ }
+
+@@ -1478,7 +1453,8 @@ static int veth_alloc_queues(struct net_device *dev)
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+- priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
++ priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
++ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ if (!priv->rq)
+ return -ENOMEM;
+
+@@ -1494,30 +1470,18 @@ static void veth_free_queues(struct net_device *dev)
+ {
+ struct veth_priv *priv = netdev_priv(dev);
+
+- kfree(priv->rq);
++ kvfree(priv->rq);
+ }
+
+ static int veth_dev_init(struct net_device *dev)
+ {
+- int err;
+-
+- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
+- if (!dev->lstats)
+- return -ENOMEM;
+-
+- err = veth_alloc_queues(dev);
+- if (err) {
+- free_percpu(dev->lstats);
+- return err;
+- }
+-
+- return 0;
++ netdev_lockdep_set_classes(dev);
++ return veth_alloc_queues(dev);
+ }
+
+ static void veth_dev_free(struct net_device *dev)
+ {
+ veth_free_queues(dev);
+- free_percpu(dev->lstats);
+ }
+
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+@@ -1562,8 +1526,6 @@ static netdev_features_t veth_fix_features(struct net_device *dev,
+ if (peer_priv->_xdp_prog)
+ features &= ~NETIF_F_GSO_SOFTWARE;
+ }
+- if (priv->_xdp_prog)
+- features |= NETIF_F_GRO;
+
+ return features;
+ }
+@@ -1789,6 +1751,7 @@ static void veth_setup(struct net_device *dev)
+ NETIF_F_HW_VLAN_STAG_RX);
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = veth_dev_free;
++ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->max_mtu = ETH_MAX_MTU;
+
+ dev->hw_features = VETH_FEATURES;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index d67f742fbd4c56..2da3be3fb9423c 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -81,24 +81,24 @@ struct virtnet_stat_desc {
+
+ struct virtnet_sq_stats {
+ struct u64_stats_sync syncp;
+- u64 packets;
+- u64 bytes;
+- u64 xdp_tx;
+- u64 xdp_tx_drops;
+- u64 kicks;
+- u64 tx_timeouts;
++ u64_stats_t packets;
++ u64_stats_t bytes;
++ u64_stats_t xdp_tx;
++ u64_stats_t xdp_tx_drops;
++ u64_stats_t kicks;
++ u64_stats_t tx_timeouts;
+ };
+
+ struct virtnet_rq_stats {
+ struct u64_stats_sync syncp;
+- u64 packets;
+- u64 bytes;
+- u64 drops;
+- u64 xdp_packets;
+- u64 xdp_tx;
+- u64 xdp_redirects;
+- u64 xdp_drops;
+- u64 kicks;
++ u64_stats_t packets;
++ u64_stats_t bytes;
++ u64_stats_t drops;
++ u64_stats_t xdp_packets;
++ u64_stats_t xdp_tx;
++ u64_stats_t xdp_redirects;
++ u64_stats_t xdp_drops;
++ u64_stats_t kicks;
+ };
+
+ #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
+@@ -334,7 +334,6 @@ struct virtio_net_common_hdr {
+ };
+ };
+
+-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+
+ static bool is_xdp_frame(void *ptr)
+@@ -408,6 +407,17 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
+ return p;
+ }
+
++static void virtnet_rq_free_buf(struct virtnet_info *vi,
++ struct receive_queue *rq, void *buf)
++{
++ if (vi->mergeable_rx_bufs)
++ put_page(virt_to_head_page(buf));
++ else if (vi->big_packets)
++ give_pages(rq, buf);
++ else
++ put_page(virt_to_head_page(buf));
++}
++
+ static void enable_delayed_refill(struct virtnet_info *vi)
+ {
+ spin_lock_bh(&vi->refill_lock);
+@@ -634,17 +644,6 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
+ return buf;
+ }
+
+-static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
+-{
+- void *buf;
+-
+- buf = virtqueue_detach_unused_buf(rq->vq);
+- if (buf && rq->do_dma)
+- virtnet_rq_unmap(rq, buf, 0);
+-
+- return buf;
+-}
+-
+ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
+ {
+ struct virtnet_rq_dma *dma;
+@@ -744,6 +743,20 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
+ }
+ }
+
++static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
++{
++ struct virtnet_info *vi = vq->vdev->priv;
++ struct receive_queue *rq;
++ int i = vq2rxq(vq);
++
++ rq = &vi->rq[i];
++
++ if (rq->do_dma)
++ virtnet_rq_unmap(rq, buf, 0);
++
++ virtnet_rq_free_buf(vi, rq, buf);
++}
++
+ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+ {
+ unsigned int len;
+@@ -775,8 +788,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+ return;
+
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.bytes += bytes;
+- sq->stats.packets += packets;
++ u64_stats_add(&sq->stats.bytes, bytes);
++ u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+
+@@ -975,11 +988,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
+ }
+ out:
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.bytes += bytes;
+- sq->stats.packets += packets;
+- sq->stats.xdp_tx += n;
+- sq->stats.xdp_tx_drops += n - nxmit;
+- sq->stats.kicks += kicks;
++ u64_stats_add(&sq->stats.bytes, bytes);
++ u64_stats_add(&sq->stats.packets, packets);
++ u64_stats_add(&sq->stats.xdp_tx, n);
++ u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
++ u64_stats_add(&sq->stats.kicks, kicks);
+ u64_stats_update_end(&sq->stats.syncp);
+
+ virtnet_xdp_put_sq(vi, sq);
+@@ -1011,14 +1024,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ u32 act;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+- stats->xdp_packets++;
++ u64_stats_inc(&stats->xdp_packets);
+
+ switch (act) {
+ case XDP_PASS:
+ return act;
+
+ case XDP_TX:
+- stats->xdp_tx++;
++ u64_stats_inc(&stats->xdp_tx);
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ netdev_dbg(dev, "convert buff to frame failed for xdp\n");
+@@ -1036,7 +1049,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ return act;
+
+ case XDP_REDIRECT:
+- stats->xdp_redirects++;
++ u64_stats_inc(&stats->xdp_redirects);
+ err = xdp_do_redirect(dev, xdp, xdp_prog);
+ if (err)
+ return XDP_DROP;
+@@ -1177,6 +1190,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
+ if (unlikely(hdr->hdr.gso_type))
+ goto err_xdp;
+
++ /* Partially checksummed packets must be dropped. */
++ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
++ goto err_xdp;
++
+ buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+@@ -1232,9 +1249,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
+ return skb;
+
+ err_xdp:
+- stats->xdp_drops++;
++ u64_stats_inc(&stats->xdp_drops);
+ err:
+- stats->drops++;
++ u64_stats_inc(&stats->drops);
+ put_page(page);
+ xdp_xmit:
+ return NULL;
+@@ -1252,13 +1269,18 @@ static struct sk_buff *receive_small(struct net_device *dev,
+ struct page *page = virt_to_head_page(buf);
+ struct sk_buff *skb;
+
++ /* We passed the address of virtnet header to virtio-core,
++ * so truncate the padding.
++ */
++ buf -= VIRTNET_RX_PAD + xdp_headroom;
++
+ len -= vi->hdr_len;
+- stats->bytes += len;
++ u64_stats_add(&stats->bytes, len);
+
+ if (unlikely(len > GOOD_PACKET_LEN)) {
+ pr_debug("%s: rx error: len %u exceeds max size %d\n",
+ dev->name, len, GOOD_PACKET_LEN);
+- dev->stats.rx_length_errors++;
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto err;
+ }
+
+@@ -1282,7 +1304,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
+ return skb;
+
+ err:
+- stats->drops++;
++ u64_stats_inc(&stats->drops);
+ put_page(page);
+ return NULL;
+ }
+@@ -1298,14 +1320,14 @@ static struct sk_buff *receive_big(struct net_device *dev,
+ struct sk_buff *skb =
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
+
+- stats->bytes += len - vi->hdr_len;
++ u64_stats_add(&stats->bytes, len - vi->hdr_len);
+ if (unlikely(!skb))
+ goto err;
+
+ return skb;
+
+ err:
+- stats->drops++;
++ u64_stats_inc(&stats->drops);
+ give_pages(rq, page);
+ return NULL;
+ }
+@@ -1323,10 +1345,10 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
+ if (unlikely(!buf)) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ dev->name, num_buf);
+- dev->stats.rx_length_errors++;
++ DEV_STATS_INC(dev, rx_length_errors);
+ break;
+ }
+- stats->bytes += len;
++ u64_stats_add(&stats->bytes, len);
+ page = virt_to_head_page(buf);
+ put_page(page);
+ }
+@@ -1432,11 +1454,11 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
+ pr_debug("%s: rx error: %d buffers out of %d missing\n",
+ dev->name, *num_buf,
+ virtio16_to_cpu(vi->vdev, hdr->num_buffers));
+- dev->stats.rx_length_errors++;
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto err;
+ }
+
+- stats->bytes += len;
++ u64_stats_add(&stats->bytes, len);
+ page = virt_to_head_page(buf);
+ offset = buf - page_address(page);
+
+@@ -1451,7 +1473,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
+ put_page(page);
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)(truesize - room));
+- dev->stats.rx_length_errors++;
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto err;
+ }
+
+@@ -1494,6 +1516,10 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
+ if (unlikely(hdr->hdr.gso_type))
+ return NULL;
+
++ /* Partially checksummed packets must be dropped. */
++ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
++ return NULL;
++
+ /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
+ * with headroom may add hole in truesize, which
+ * make their length exceed PAGE_SIZE. So we disabled the
+@@ -1600,8 +1626,8 @@ static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
+ put_page(page);
+ mergeable_buf_free(rq, num_buf, dev, stats);
+
+- stats->xdp_drops++;
+- stats->drops++;
++ u64_stats_inc(&stats->xdp_drops);
++ u64_stats_inc(&stats->drops);
+ return NULL;
+ }
+
+@@ -1625,12 +1651,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
+
+ head_skb = NULL;
+- stats->bytes += len - vi->hdr_len;
++ u64_stats_add(&stats->bytes, len - vi->hdr_len);
+
+ if (unlikely(len > truesize - room)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)(truesize - room));
+- dev->stats.rx_length_errors++;
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto err_skb;
+ }
+
+@@ -1662,11 +1688,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ dev->name, num_buf,
+ virtio16_to_cpu(vi->vdev,
+ hdr->num_buffers));
+- dev->stats.rx_length_errors++;
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto err_buf;
+ }
+
+- stats->bytes += len;
++ u64_stats_add(&stats->bytes, len);
+ page = virt_to_head_page(buf);
+
+ truesize = mergeable_ctx_to_truesize(ctx);
+@@ -1676,7 +1702,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ if (unlikely(len > truesize - room)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)(truesize - room));
+- dev->stats.rx_length_errors++;
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto err_skb;
+ }
+
+@@ -1718,7 +1744,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ mergeable_buf_free(rq, num_buf, dev, stats);
+
+ err_buf:
+- stats->drops++;
++ u64_stats_inc(&stats->drops);
+ dev_kfree_skb(head_skb);
+ return NULL;
+ }
+@@ -1760,14 +1786,24 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ struct net_device *dev = vi->dev;
+ struct sk_buff *skb;
+ struct virtio_net_common_hdr *hdr;
++ u8 flags;
+
+ if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+- dev->stats.rx_length_errors++;
+- virtnet_rq_free_unused_buf(rq->vq, buf);
++ DEV_STATS_INC(dev, rx_length_errors);
++ virtnet_rq_free_buf(vi, rq, buf);
+ return;
+ }
+
++ /* 1. Save the flags early, as the XDP program might overwrite them.
++ * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
++ * stay valid after XDP processing.
++ * 2. XDP doesn't work with partially checksummed packets (refer to
++ * virtnet_xdp_set()), so packets marked as
++ * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
++ */
++ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
++
+ if (vi->mergeable_rx_bufs)
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
+ stats);
+@@ -1783,7 +1819,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
+ virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
+
+- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
++ if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
+@@ -1803,7 +1839,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ return;
+
+ frame_err:
+- dev->stats.rx_frame_errors++;
++ DEV_STATS_INC(dev, rx_frame_errors);
+ dev_kfree_skb(skb);
+ }
+
+@@ -1828,8 +1864,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
+ if (unlikely(!buf))
+ return -ENOMEM;
+
+- virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
+- vi->hdr_len + GOOD_PACKET_LEN);
++ buf += VIRTNET_RX_PAD + xdp_headroom;
++
++ virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
+
+ err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+ if (err < 0) {
+@@ -1985,7 +2022,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
+ unsigned long flags;
+
+ flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
+- rq->stats.kicks++;
++ u64_stats_inc(&rq->stats.kicks);
+ u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
+ }
+
+@@ -2065,22 +2102,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct virtnet_rq_stats stats = {};
+ unsigned int len;
++ int packets = 0;
+ void *buf;
+ int i;
+
+ if (!vi->big_packets || vi->mergeable_rx_bufs) {
+ void *ctx;
+
+- while (stats.packets < budget &&
++ while (packets < budget &&
+ (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
+ receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+- stats.packets++;
++ packets++;
+ }
+ } else {
+- while (stats.packets < budget &&
++ while (packets < budget &&
+ (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
+ receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+- stats.packets++;
++ packets++;
+ }
+ }
+
+@@ -2093,20 +2131,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
+ }
+ }
+
++ u64_stats_set(&stats.packets, packets);
+ u64_stats_update_begin(&rq->stats.syncp);
+ for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ size_t offset = virtnet_rq_stats_desc[i].offset;
+- u64 *item;
++ u64_stats_t *item, *src;
+
+- item = (u64 *)((u8 *)&rq->stats + offset);
+- *item += *(u64 *)((u8 *)&stats + offset);
++ item = (u64_stats_t *)((u8 *)&rq->stats + offset);
++ src = (u64_stats_t *)((u8 *)&stats + offset);
++ u64_stats_add(item, u64_stats_read(src));
+ }
+ u64_stats_update_end(&rq->stats.syncp);
+
+- return stats.packets;
++ return packets;
+ }
+
+-static void virtnet_poll_cleantx(struct receive_queue *rq)
++static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
+ {
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+@@ -2124,7 +2164,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
+
+ do {
+ virtqueue_disable_cb(sq->vq);
+- free_old_xmit_skbs(sq, true);
++ free_old_xmit_skbs(sq, !!budget);
+ } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+@@ -2143,7 +2183,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ unsigned int received;
+ unsigned int xdp_xmit = 0;
+
+- virtnet_poll_cleantx(rq);
++ virtnet_poll_cleantx(rq, budget);
+
+ received = virtnet_receive(rq, budget, &xdp_xmit);
+
+@@ -2158,7 +2198,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ sq = virtnet_xdp_get_sq(vi);
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.kicks++;
++ u64_stats_inc(&sq->stats.kicks);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ virtnet_xdp_put_sq(vi, sq);
+@@ -2246,7 +2286,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+ txq = netdev_get_tx_queue(vi->dev, index);
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+- free_old_xmit_skbs(sq, true);
++ free_old_xmit_skbs(sq, !!budget);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+@@ -2349,12 +2389,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ /* This should not happen! */
+ if (unlikely(err)) {
+- dev->stats.tx_fifo_errors++;
++ DEV_STATS_INC(dev, tx_fifo_errors);
+ if (net_ratelimit())
+ dev_warn(&dev->dev,
+ "Unexpected TXQ (%d) queue failure: %d\n",
+ qnum, err);
+- dev->stats.tx_dropped++;
++ DEV_STATS_INC(dev, tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+@@ -2370,7 +2410,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (kick || netif_xmit_stopped(txq)) {
+ if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.kicks++;
++ u64_stats_inc(&sq->stats.kicks);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
+ }
+@@ -2389,7 +2429,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
+ if (running)
+ napi_disable(&rq->napi);
+
+- err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
++ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+@@ -2553,16 +2593,16 @@ static void virtnet_stats(struct net_device *dev,
+
+ do {
+ start = u64_stats_fetch_begin(&sq->stats.syncp);
+- tpackets = sq->stats.packets;
+- tbytes = sq->stats.bytes;
+- terrors = sq->stats.tx_timeouts;
++ tpackets = u64_stats_read(&sq->stats.packets);
++ tbytes = u64_stats_read(&sq->stats.bytes);
++ terrors = u64_stats_read(&sq->stats.tx_timeouts);
+ } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin(&rq->stats.syncp);
+- rpackets = rq->stats.packets;
+- rbytes = rq->stats.bytes;
+- rdrops = rq->stats.drops;
++ rpackets = u64_stats_read(&rq->stats.packets);
++ rbytes = u64_stats_read(&rq->stats.bytes);
++ rdrops = u64_stats_read(&rq->stats.drops);
+ } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
+
+ tot->rx_packets += rpackets;
+@@ -2573,10 +2613,10 @@ static void virtnet_stats(struct net_device *dev,
+ tot->tx_errors += terrors;
+ }
+
+- tot->tx_dropped = dev->stats.tx_dropped;
+- tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+- tot->rx_length_errors = dev->stats.rx_length_errors;
+- tot->rx_frame_errors = dev->stats.rx_frame_errors;
++ tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
++ tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
++ tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
++ tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
+ }
+
+ static void virtnet_ack_link_announce(struct virtnet_info *vi)
+@@ -2855,6 +2895,9 @@ static void virtnet_get_ringparam(struct net_device *dev,
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+ }
+
++static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
++ u16 vqn, u32 max_usecs, u32 max_packets);
++
+ static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+@@ -2890,12 +2933,36 @@ static int virtnet_set_ringparam(struct net_device *dev,
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
++
++ /* Upon disabling and re-enabling a transmit virtqueue, the device must
++ * set the coalescing parameters of the virtqueue to those configured
++ * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
++ * did not set any TX coalescing parameters, to 0.
++ */
++ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
++ vi->intr_coal_tx.max_usecs,
++ vi->intr_coal_tx.max_packets);
++ if (err)
++ return err;
++
++ vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
++ vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
++
++ /* The reason is same as the transmit virtqueue reset */
++ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
++ vi->intr_coal_rx.max_usecs,
++ vi->intr_coal_rx.max_packets);
++ if (err)
++ return err;
++
++ vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
++ vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
+ }
+ }
+
+@@ -3164,17 +3231,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
+ struct virtnet_info *vi = netdev_priv(dev);
+ unsigned int idx = 0, start, i, j;
+ const u8 *stats_base;
++ const u64_stats_t *p;
+ size_t offset;
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ struct receive_queue *rq = &vi->rq[i];
+
+- stats_base = (u8 *)&rq->stats;
++ stats_base = (const u8 *)&rq->stats;
+ do {
+ start = u64_stats_fetch_begin(&rq->stats.syncp);
+ for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+ offset = virtnet_rq_stats_desc[j].offset;
+- data[idx + j] = *(u64 *)(stats_base + offset);
++ p = (const u64_stats_t *)(stats_base + offset);
++ data[idx + j] = u64_stats_read(p);
+ }
+ } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
+ idx += VIRTNET_RQ_STATS_LEN;
+@@ -3183,12 +3252,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ struct send_queue *sq = &vi->sq[i];
+
+- stats_base = (u8 *)&sq->stats;
++ stats_base = (const u8 *)&sq->stats;
+ do {
+ start = u64_stats_fetch_begin(&sq->stats.syncp);
+ for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+ offset = virtnet_sq_stats_desc[j].offset;
+- data[idx + j] = *(u64 *)(stats_base + offset);
++ p = (const u64_stats_t *)(stats_base + offset);
++ data[idx + j] = u64_stats_read(p);
+ }
+ } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
+ idx += VIRTNET_SQ_STATS_LEN;
+@@ -3233,6 +3303,7 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+ {
+ struct scatterlist sgs_tx, sgs_rx;
++ int i;
+
+ vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+@@ -3246,6 +3317,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ /* Save parameters */
+ vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
+ vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
++ for (i = 0; i < vi->max_queue_pairs; i++) {
++ vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
++ vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
++ }
+
+ vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+@@ -3259,6 +3334,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ /* Save parameters */
+ vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
+ vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
++ for (i = 0; i < vi->max_queue_pairs; i++) {
++ vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
++ vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
++ }
+
+ return 0;
+ }
+@@ -3287,27 +3366,23 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
+ {
+ int err;
+
+- if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
+- err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
+- ec->rx_coalesce_usecs,
+- ec->rx_max_coalesced_frames);
+- if (err)
+- return err;
+- /* Save parameters */
+- vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+- vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+- }
++ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
++ ec->rx_coalesce_usecs,
++ ec->rx_max_coalesced_frames);
++ if (err)
++ return err;
+
+- if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
+- err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
+- ec->tx_coalesce_usecs,
+- ec->tx_max_coalesced_frames);
+- if (err)
+- return err;
+- /* Save parameters */
+- vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
+- vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
+- }
++ vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
++ vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
++
++ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
++ ec->tx_coalesce_usecs,
++ ec->tx_max_coalesced_frames);
++ if (err)
++ return err;
++
++ vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
++ vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
+
+ return 0;
+ }
+@@ -3453,7 +3528,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
+ } else {
+ ec->rx_max_coalesced_frames = 1;
+
+- if (vi->sq[0].napi.weight)
++ if (vi->sq[queue].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
+
+@@ -3519,19 +3594,34 @@ static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfu
+ static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
+ {
+ struct virtnet_info *vi = netdev_priv(dev);
++ bool update = false;
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
++ if (!vi->has_rss)
++ return -EOPNOTSUPP;
++
+ for (i = 0; i < vi->rss_indir_table_size; ++i)
+ vi->ctrl->rss.indirection_table[i] = indir[i];
++ update = true;
+ }
+- if (key)
++ if (key) {
++ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
++ * device provides hash calculation capabilities, that is,
++ * hash_key is configured.
++ */
++ if (!vi->has_rss && !vi->has_rss_hash_report)
++ return -EOPNOTSUPP;
++
+ memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
++ update = true;
++ }
+
+- virtnet_commit_rss_command(vi);
++ if (update)
++ virtnet_commit_rss_command(vi);
+
+ return 0;
+ }
+@@ -3866,7 +3956,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
+
+ u64_stats_update_begin(&sq->stats.syncp);
+- sq->stats.tx_timeouts++;
++ u64_stats_inc(&sq->stats.tx_timeouts);
+ u64_stats_update_end(&sq->stats.syncp);
+
+ netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
+@@ -3993,19 +4083,6 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+ xdp_return_frame(ptr_to_xdp(buf));
+ }
+
+-static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+-{
+- struct virtnet_info *vi = vq->vdev->priv;
+- int i = vq2rxq(vq);
+-
+- if (vi->mergeable_rx_bufs)
+- put_page(virt_to_head_page(buf));
+- else if (vi->big_packets)
+- give_pages(&vi->rq[i], buf);
+- else
+- put_page(virt_to_head_page(buf));
+-}
+-
+ static void free_unused_bufs(struct virtnet_info *vi)
+ {
+ void *buf;
+@@ -4019,10 +4096,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
+ }
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+- struct receive_queue *rq = &vi->rq[i];
++ struct virtqueue *vq = vi->rq[i].vq;
+
+- while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
+- virtnet_rq_free_unused_buf(rq->vq, buf);
++ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
++ virtnet_rq_unmap_free_buf(vq, buf);
+ cond_resched();
+ }
+ }
+@@ -4058,10 +4135,11 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
+ {
+ vq_callback_t **callbacks;
+ struct virtqueue **vqs;
+- int ret = -ENOMEM;
+- int i, total_vqs;
+ const char **names;
++ int ret = -ENOMEM;
++ int total_vqs;
+ bool *ctx;
++ u16 i;
+
+ /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
+ * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
+@@ -4098,8 +4176,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ callbacks[rxq2vq(i)] = skb_recv_done;
+ callbacks[txq2vq(i)] = skb_xmit_done;
+- sprintf(vi->rq[i].name, "input.%d", i);
+- sprintf(vi->sq[i].name, "output.%d", i);
++ sprintf(vi->rq[i].name, "input.%u", i);
++ sprintf(vi->sq[i].name, "output.%u", i);
+ names[rxq2vq(i)] = vi->rq[i].name;
+ names[txq2vq(i)] = vi->sq[i].name;
+ if (ctx)
+@@ -4399,8 +4477,16 @@ static int virtnet_probe(struct virtio_device *vdev)
+ dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ /* (!csum && gso) case will be fixed by register_netdev() */
+ }
+- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+- dev->features |= NETIF_F_RXCSUM;
++
++ /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
++ * need to calculate checksums for partially checksummed packets,
++ * as they're considered valid by the upper layer.
++ * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
++ * receives fully checksummed packets. The device may assist in
++ * validating these packets' checksums, so the driver won't have to.
++ */
++ dev->features |= NETIF_F_RXCSUM;
++
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
+ dev->features |= NETIF_F_GRO_HW;
+@@ -4452,13 +4538,15 @@ static int virtnet_probe(struct virtio_device *vdev)
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+ vi->has_rss_hash_report = true;
+
+- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
++ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
+ vi->has_rss = true;
+
+- if (vi->has_rss || vi->has_rss_hash_report) {
+ vi->rss_indir_table_size =
+ virtio_cread16(vdev, offsetof(struct virtio_net_config,
+ rss_max_indirection_table_length));
++ }
++
++ if (vi->has_rss || vi->has_rss_hash_report) {
+ vi->rss_key_size =
+ virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 0578864792b60e..beebe09eb88ff3 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
+ rq->data_ring.base,
+ rq->data_ring.basePA);
+ rq->data_ring.base = NULL;
+- rq->data_ring.desc_size = 0;
+ }
++ rq->data_ring.desc_size = 0;
+ }
+ }
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
+index 80ddaff759d47a..a6c787454a1aeb 100644
+--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
++++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
+@@ -382,12 +382,12 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
+ page = rbi->page;
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ page_pool_get_dma_addr(page) +
+- rq->page_pool->p.offset, rcd->len,
++ rq->page_pool->p.offset, rbi->len,
+ page_pool_get_dma_dir(rq->page_pool));
+
+- xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq);
++ xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
+- rcd->len, false);
++ rbi->len, false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index a3408e4e1491bb..27761a884dc627 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -121,22 +121,12 @@ struct net_vrf {
+ int ifindex;
+ };
+
+-struct pcpu_dstats {
+- u64 tx_pkts;
+- u64 tx_bytes;
+- u64 tx_drps;
+- u64 rx_pkts;
+- u64 rx_bytes;
+- u64 rx_drps;
+- struct u64_stats_sync syncp;
+-};
+-
+ static void vrf_rx_stats(struct net_device *dev, int len)
+ {
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+- dstats->rx_pkts++;
++ dstats->rx_packets++;
+ dstats->rx_bytes += len;
+ u64_stats_update_end(&dstats->syncp);
+ }
+@@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev,
+ do {
+ start = u64_stats_fetch_begin(&dstats->syncp);
+ tbytes = dstats->tx_bytes;
+- tpkts = dstats->tx_pkts;
+- tdrops = dstats->tx_drps;
++ tpkts = dstats->tx_packets;
++ tdrops = dstats->tx_drops;
+ rbytes = dstats->rx_bytes;
+- rpkts = dstats->rx_pkts;
++ rpkts = dstats->rx_packets;
+ } while (u64_stats_fetch_retry(&dstats->syncp, start));
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+@@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ vrf_rx_stats(dev, len);
+ else
+- this_cpu_inc(dev->dstats->rx_drps);
++ this_cpu_inc(dev->dstats->rx_drops);
+
+ return NETDEV_TX_OK;
+ }
+@@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+- dstats->tx_pkts++;
++ dstats->tx_packets++;
+ dstats->tx_bytes += len;
+ u64_stats_update_end(&dstats->syncp);
+ } else {
+- this_cpu_inc(dev->dstats->tx_drps);
++ this_cpu_inc(dev->dstats->tx_drops);
+ }
+
+ return ret;
+@@ -638,7 +628,9 @@ static void vrf_finish_direct(struct sk_buff *skb)
+ eth_zero_addr(eth->h_dest);
+ eth->h_proto = skb->protocol;
+
++ rcu_read_lock_bh();
+ dev_queue_xmit_nit(skb, vrf_dev);
++ rcu_read_unlock_bh();
+
+ skb_pull(skb, ETH_HLEN);
+ }
+@@ -1174,22 +1166,15 @@ static void vrf_dev_uninit(struct net_device *dev)
+
+ vrf_rtable_release(dev, vrf);
+ vrf_rt6_release(dev, vrf);
+-
+- free_percpu(dev->dstats);
+- dev->dstats = NULL;
+ }
+
+ static int vrf_dev_init(struct net_device *dev)
+ {
+ struct net_vrf *vrf = netdev_priv(dev);
+
+- dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
+- if (!dev->dstats)
+- goto out_nomem;
+-
+ /* create the default dst which points back to us */
+ if (vrf_rtable_create(dev) != 0)
+- goto out_stats;
++ goto out_nomem;
+
+ if (vrf_rt6_create(dev) != 0)
+ goto out_rth;
+@@ -1203,9 +1188,6 @@ static int vrf_dev_init(struct net_device *dev)
+
+ out_rth:
+ vrf_rtable_release(dev, vrf);
+-out_stats:
+- free_percpu(dev->dstats);
+- dev->dstats = NULL;
+ out_nomem:
+ return -ENOMEM;
+ }
+@@ -1704,6 +1686,8 @@ static void vrf_setup(struct net_device *dev)
+ dev->min_mtu = IPV6_MIN_MTU;
+ dev->max_mtu = IP6_MAX_MTU;
+ dev->mtu = dev->max_mtu;
++
++ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+ }
+
+ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 5b5597073b004c..c114c91b558bdc 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
+ struct vxlan_fdb *f;
+ u32 ifindex = 0;
+
++ /* Ignore packets from invalid src-address */
++ if (!is_valid_ether_addr(src_mac))
++ return true;
++
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (src_ip->sa.sa_family == AF_INET6 &&
+ (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
+@@ -1670,6 +1674,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ bool raw_proto = false;
+ void *oiph;
+ __be32 vni = 0;
++ int nh;
+
+ /* Need UDP and VXLAN header to be present */
+ if (!pskb_may_pull(skb, VXLAN_HLEN))
+@@ -1758,12 +1763,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+ skb->pkt_type = PACKET_HOST;
+ }
+
+- oiph = skb_network_header(skb);
++ /* Save offset of outer header relative to skb->head,
++ * because we are going to reset the network header to the inner header
++ * and might change skb->head.
++ */
++ nh = skb_network_header(skb) - skb->head;
++
+ skb_reset_network_header(skb);
+
++ if (!pskb_inet_may_pull(skb)) {
++ DEV_STATS_INC(vxlan->dev, rx_length_errors);
++ DEV_STATS_INC(vxlan->dev, rx_errors);
++ vxlan_vnifilter_count(vxlan, vni, vninode,
++ VXLAN_VNI_STATS_RX_ERRORS, 0);
++ goto drop;
++ }
++
++ /* Get the outer header. */
++ oiph = skb->head + nh;
++
+ if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
+- ++vxlan->dev->stats.rx_frame_errors;
+- ++vxlan->dev->stats.rx_errors;
++ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
++ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+@@ -1833,7 +1854,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ goto out;
+
+ if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
+- dev->stats.tx_dropped++;
++ dev_core_stats_tx_dropped_inc(dev);
++ vxlan_vnifilter_count(vxlan, vni, NULL,
++ VXLAN_VNI_STATS_TX_DROPS, 0);
+ goto out;
+ }
+ parp = arp_hdr(skb);
+@@ -1889,7 +1912,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ reply->pkt_type = PACKET_HOST;
+
+ if (netif_rx(reply) == NET_RX_DROP) {
+- dev->stats.rx_dropped++;
++ dev_core_stats_rx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
+@@ -2048,7 +2071,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ goto out;
+
+ if (netif_rx(reply) == NET_RX_DROP) {
+- dev->stats.rx_dropped++;
++ dev_core_stats_rx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
+@@ -2367,7 +2390,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ len);
+ } else {
+ drop:
+- dev->stats.rx_dropped++;
++ dev_core_stats_rx_dropped_inc(dev);
+ vxlan_vnifilter_count(dst_vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
+@@ -2399,7 +2422,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
+ daddr->sa.sa_family, dst_port,
+ vxlan->cfg.flags);
+ if (!dst_vxlan) {
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_ERRORS, 0);
+ kfree_skb(skb);
+@@ -2660,7 +2683,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ return;
+
+ drop:
+- dev->stats.tx_dropped++;
++ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
+ dev_kfree_skb(skb);
+ return;
+@@ -2668,11 +2691,11 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ tx_error:
+ rcu_read_unlock();
+ if (err == -ELOOP)
+- dev->stats.collisions++;
++ DEV_STATS_INC(dev, collisions);
+ else if (err == -ENETUNREACH)
+- dev->stats.tx_carrier_errors++;
++ DEV_STATS_INC(dev, tx_carrier_errors);
+ dst_release(ndst);
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
+ kfree_skb(skb);
+ }
+@@ -2705,7 +2728,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
+ return;
+
+ drop:
+- dev->stats.tx_dropped++;
++ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
+ dev_kfree_skb(skb);
+@@ -2743,7 +2766,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
+ return NETDEV_TX_OK;
+
+ drop:
+- dev->stats.tx_dropped++;
++ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
+ dev_kfree_skb(skb);
+@@ -2840,7 +2863,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ !is_multicast_ether_addr(eth->h_dest))
+ vxlan_fdb_miss(vxlan, eth->h_dest);
+
+- dev->stats.tx_dropped++;
++ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
+ kfree_skb(skb);
+@@ -2960,6 +2983,7 @@ static int vxlan_init(struct net_device *dev)
+ if (err)
+ goto err_gro_cells_destroy;
+
++ netdev_lockdep_set_classes(dev);
+ return 0;
+
+ err_gro_cells_destroy:
+@@ -4795,9 +4819,13 @@ static int __init vxlan_init_module(void)
+ if (rc)
+ goto out4;
+
+- vxlan_vnifilter_init();
++ rc = vxlan_vnifilter_init();
++ if (rc)
++ goto out5;
+
+ return 0;
++out5:
++ rtnl_link_unregister(&vxlan_link_ops);
+ out4:
+ unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
+ out3:
+diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
+index 817fa3075842ee..85b6d0c347e3b4 100644
+--- a/drivers/net/vxlan/vxlan_private.h
++++ b/drivers/net/vxlan/vxlan_private.h
+@@ -202,7 +202,7 @@ int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
+ int vxlan_vnigroup_init(struct vxlan_dev *vxlan);
+ void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan);
+
+-void vxlan_vnifilter_init(void);
++int vxlan_vnifilter_init(void);
+ void vxlan_vnifilter_uninit(void);
+ void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ struct vxlan_vni_node *vninode,
+diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
+index 9c59d0bf8c3de0..d2023e7131bd4f 100644
+--- a/drivers/net/vxlan/vxlan_vnifilter.c
++++ b/drivers/net/vxlan/vxlan_vnifilter.c
+@@ -992,19 +992,18 @@ static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh,
+ return err;
+ }
+
+-void vxlan_vnifilter_init(void)
++static const struct rtnl_msg_handler vxlan_vnifilter_rtnl_msg_handlers[] = {
++ {THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL, vxlan_vnifilter_dump, 0},
++ {THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL, vxlan_vnifilter_process, NULL, 0},
++ {THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL, vxlan_vnifilter_process, NULL, 0},
++};
++
++int vxlan_vnifilter_init(void)
+ {
+- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL,
+- vxlan_vnifilter_dump, 0);
+- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL,
+- vxlan_vnifilter_process, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL,
+- vxlan_vnifilter_process, NULL, 0);
++ return rtnl_register_many(vxlan_vnifilter_rtnl_msg_handlers);
+ }
+
+ void vxlan_vnifilter_uninit(void)
+ {
+- rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL);
+- rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL);
+- rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL);
++ rtnl_unregister_many(vxlan_vnifilter_rtnl_msg_handlers);
+ }
+diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
+index 0ba714ca5185cd..4b8528206cc8a2 100644
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
+ if (bits == 32) {
+ *(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
+ } else if (bits == 128) {
+- ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
+- ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
++ ((u64 *)dst)[0] = get_unaligned_be64(src);
++ ((u64 *)dst)[1] = get_unaligned_be64(src + 8);
+ }
+ }
+
+diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
+index 258dcc1039216f..deb9636b0ecf8f 100644
+--- a/drivers/net/wireguard/device.c
++++ b/drivers/net/wireguard/device.c
+@@ -210,7 +210,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ */
+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
+ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
+- ++dev->stats.tx_dropped;
++ DEV_STATS_INC(dev, tx_dropped);
+ }
+ skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+@@ -228,7 +228,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ err:
+- ++dev->stats.tx_errors;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
+index dc09b75a32485c..a6661c9500d403 100644
+--- a/drivers/net/wireguard/netlink.c
++++ b/drivers/net/wireguard/netlink.c
+@@ -164,8 +164,8 @@ get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
+ if (!allowedips_node)
+ goto no_allowedips;
+ if (!ctx->allowedips_seq)
+- ctx->allowedips_seq = peer->device->peer_allowedips.seq;
+- else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
++ ctx->allowedips_seq = ctx->wg->peer_allowedips.seq;
++ else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq)
+ goto no_allowedips;
+
+ allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
+@@ -255,17 +255,17 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ if (!peers_nest)
+ goto out;
+ ret = 0;
+- /* If the last cursor was removed via list_del_init in peer_remove, then
++ lockdep_assert_held(&wg->device_update_lock);
++ /* If the last cursor was removed in peer_remove or peer_remove_all, then
+ * we just treat this the same as there being no more peers left. The
+ * reason is that seq_nr should indicate to userspace that this isn't a
+ * coherent dump anyway, so they'll try again.
+ */
+ if (list_empty(&wg->peer_list) ||
+- (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
++ (ctx->next_peer && ctx->next_peer->is_dead)) {
+ nla_nest_cancel(skb, peers_nest);
+ goto out;
+ }
+- lockdep_assert_held(&wg->device_update_lock);
+ peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
+ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
+ if (get_peer(peer, skb, ctx)) {
+diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
+index 1ea4f874e367ee..7eb76724b3edb5 100644
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
+ */
+ static inline int wg_cpumask_next_online(int *last_cpu)
+ {
+- int cpu = cpumask_next(*last_cpu, cpu_online_mask);
++ int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_online_mask);
+- *last_cpu = cpu;
++ WRITE_ONCE(*last_cpu, cpu);
+ return cpu;
+ }
+
+diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
+index 0b3f0c84355095..db01ec03bda005 100644
+--- a/drivers/net/wireguard/receive.c
++++ b/drivers/net/wireguard/receive.c
+@@ -251,7 +251,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
+
+ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
+ wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
+- keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
++ READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) {
+ WRITE_ONCE(keypair->receiving.is_valid, false);
+ return false;
+ }
+@@ -318,7 +318,7 @@ static bool counter_validate(struct noise_replay_counter *counter, u64 their_cou
+ for (i = 1; i <= top; ++i)
+ counter->backtrack[(i + index_current) &
+ ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
+- counter->counter = their_counter;
++ WRITE_ONCE(counter->counter, their_counter);
+ }
+
+ index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
+@@ -416,20 +416,20 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
+ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
+ dev->name, skb, peer->internal_id,
+ &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_frame_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_frame_errors);
+ goto packet_processed;
+ dishonest_packet_type:
+ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_frame_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_frame_errors);
+ goto packet_processed;
+ dishonest_packet_size:
+ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
+ dev->name, peer->internal_id, &peer->endpoint.addr);
+- ++dev->stats.rx_errors;
+- ++dev->stats.rx_length_errors;
++ DEV_STATS_INC(dev, rx_errors);
++ DEV_STATS_INC(dev, rx_length_errors);
+ goto packet_processed;
+ packet_processed:
+ dev_kfree_skb(skb);
+@@ -463,7 +463,7 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+ net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
+ peer->device->dev->name,
+ PACKET_CB(skb)->nonce,
+- keypair->receiving_counter.counter);
++ READ_ONCE(keypair->receiving_counter.counter));
+ goto next;
+ }
+
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index 95c853b59e1dae..26e09c30d596ca 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
+ {
+ struct sk_buff *skb;
+
+- if (skb_queue_empty(&peer->staged_packet_queue)) {
++ if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
+ skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
+ GFP_ATOMIC);
+ if (unlikely(!skb))
+@@ -333,7 +333,8 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
+ void wg_packet_purge_staged_packets(struct wg_peer *peer)
+ {
+ spin_lock_bh(&peer->staged_packet_queue.lock);
+- peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
++ DEV_STATS_ADD(peer->device->dev, tx_dropped,
++ peer->staged_packet_queue.qlen);
+ __skb_queue_purge(&peer->staged_packet_queue);
+ spin_unlock_bh(&peer->staged_packet_queue.lock);
+ }
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index 19f61225a7085f..5d82edf8a59d5a 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -1590,6 +1590,20 @@ static int ar5523_probe(struct usb_interface *intf,
+ struct ar5523 *ar;
+ int error = -ENOMEM;
+
++ static const u8 bulk_ep_addr[] = {
++ AR5523_CMD_TX_PIPE | USB_DIR_OUT,
++ AR5523_DATA_TX_PIPE | USB_DIR_OUT,
++ AR5523_CMD_RX_PIPE | USB_DIR_IN,
++ AR5523_DATA_RX_PIPE | USB_DIR_IN,
++ 0};
++
++ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) {
++ dev_err(&dev->dev,
++ "Could not find all expected endpoints\n");
++ error = -ENODEV;
++ goto out;
++ }
++
+ /*
+ * Load firmware if the device requires it. This will return
+ * -ENXIO on success and we'll get called back afer the usb
+diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
+index f02a308a9ffc5e..34654f710d8a1e 100644
+--- a/drivers/net/wireless/ath/ath.h
++++ b/drivers/net/wireless/ath/ath.h
+@@ -171,8 +171,10 @@ struct ath_common {
+ unsigned int clockrate;
+
+ spinlock_t cc_lock;
+- struct ath_cycle_counters cc_ani;
+- struct ath_cycle_counters cc_survey;
++ struct_group(cc,
++ struct ath_cycle_counters cc_ani;
++ struct ath_cycle_counters cc_survey;
++ );
+
+ struct ath_regulatory regulatory;
+ struct ath_regulatory reg_world_copy;
+diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
+index e6ea884cafc190..4f385f4a8cef2a 100644
+--- a/drivers/net/wireless/ath/ath10k/Kconfig
++++ b/drivers/net/wireless/ath/ath10k/Kconfig
+@@ -45,6 +45,7 @@ config ATH10K_SNOC
+ depends on ATH10K
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_SMEM
++ depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
+ select QCOM_SCM
+ select QCOM_QMI_HELPERS
+ help
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 6cdb225b7eaccc..81058be3598f15 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -704,6 +704,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .max_spatial_stream = 4,
+ .fw = {
+ .dir = WCN3990_HW_1_0_FW_DIR,
++ .board = WCN3990_HW_1_0_BOARD_DATA_FILE,
++ .board_size = WCN3990_BOARD_DATA_SZ,
++ .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &wcn3990_rx_desc_ops,
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index f9518e1c99039e..fe89bc61e5317d 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1140,7 +1140,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath10k_gstrings_stats,
++ memcpy(data, ath10k_gstrings_stats,
+ sizeof(ath10k_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+index 87a3365330ff80..5598cf706daabc 100644
+--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+@@ -438,7 +438,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
+ }
+ out:
+ mutex_unlock(&ar->conf_mutex);
+- return count;
++ return ret ?: count;
+ }
+
+ static const struct file_operations fops_peer_debug_trigger = {
+diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
+index 9643031a4427ad..7ecdd0011cfa48 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.h
++++ b/drivers/net/wireless/ath/ath10k/hw.h
+@@ -132,6 +132,7 @@ enum qca9377_chip_id_rev {
+ /* WCN3990 1.0 definitions */
+ #define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990
+ #define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0"
++#define WCN3990_HW_1_0_BOARD_DATA_FILE "board.bin"
+
+ #define ATH10K_FW_FILE_BASE "firmware"
+ #define ATH10K_FW_API_MAX 6
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index 26214c00cd0d7b..2c39bad7ebfb9a 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -828,12 +828,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+
+ static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+ {
+- ath10k_ce_disable_interrupts(ar);
++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++ int id;
++
++ for (id = 0; id < CE_COUNT_MAX; id++)
++ disable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+
+ static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+ {
+- ath10k_ce_enable_interrupts(ar);
++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++ int id;
++
++ for (id = 0; id < CE_COUNT_MAX; id++)
++ enable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+
+ static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+@@ -1090,6 +1098,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ goto err_free_rri;
+ }
+
++ ath10k_ce_enable_interrupts(ar);
++
+ return 0;
+
+ err_free_rri:
+@@ -1253,8 +1263,8 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
+
+ for (id = 0; id < CE_COUNT_MAX; id++) {
+ ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+- ath10k_snoc_per_engine_handler, 0,
+- ce_name[id], ar);
++ ath10k_snoc_per_engine_handler,
++ IRQF_NO_AUTOEN, ce_name[id], ar);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to register IRQ handler for CE %d: %d\n",
+diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
+index ec556bb88d6581..ba37e6c7ced08b 100644
+--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
++++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
+@@ -491,4 +491,7 @@ struct host_interest {
+ #define QCA4019_BOARD_DATA_SZ 12064
+ #define QCA4019_BOARD_EXT_DATA_SZ 0
+
++#define WCN3990_BOARD_DATA_SZ 26328
++#define WCN3990_BOARD_EXT_DATA_SZ 0
++
+ #endif /* __TARGADDRS_H__ */
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index 6b6aa3c3674487..0ce08e9a0a3d2d 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -851,6 +851,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
++ if (!ev) {
++ kfree(tb);
++ return -EPROTO;
++ }
+
+ arg->desc_id = ev->desc_id;
+ arg->status = ev->status;
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 05fa7d4c0e1aba..ee08a4c668f7a0 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -1762,12 +1762,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
+
+ int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+ {
+- unsigned long time_left;
++ unsigned long time_left, i;
+
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+- if (!time_left)
+- return -ETIMEDOUT;
++ if (!time_left) {
++ /* Sometimes the PCI HIF doesn't receive interrupt
++ * for the service ready message even if the buffer
++ * was completed. PCIe sniffer shows that it's
++ * because the corresponding CE ring doesn't fires
++ * it. Workaround here by polling CE rings once.
++ */
++ ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
++
++ for (i = 0; i < CE_COUNT; i++)
++ ath10k_hif_send_complete_check(ar, i, 1);
++
++ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
++ WMI_SERVICE_READY_TIMEOUT_HZ);
++ if (!time_left) {
++ ath10k_warn(ar, "polling timed out\n");
++ return -ETIMEDOUT;
++ }
++
++ ath10k_warn(ar, "service ready completion received, continuing normally\n");
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index 1215ebdf173a80..ef11c138bf3079 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -802,8 +802,8 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab)
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+- ath11k_err(ab, "failed to get rproc\n");
+- return -EINVAL;
++ ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
++ return -EPROBE_DEFER;
+ }
+ ab_ahb->tgt_rproc = prproc;
+
+diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
+index 289d47ae92afc5..e66e86bdec20ff 100644
+--- a/drivers/net/wireless/ath/ath11k/ce.c
++++ b/drivers/net/wireless/ath/ath11k/ce.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "dp_rx.h"
+diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
+index c0f6a0ba86df09..bcde2fcf02cf78 100644
+--- a/drivers/net/wireless/ath/ath11k/ce.h
++++ b/drivers/net/wireless/ath/ath11k/ce.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_CE_H
+@@ -145,7 +146,7 @@ struct ath11k_ce_ring {
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+- u32 base_addr_ce_space_unaligned;
++ dma_addr_t base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+@@ -155,7 +156,7 @@ struct ath11k_ce_ring {
+ void *base_addr_owner_space;
+
+ /* CE address space */
+- u32 base_addr_ce_space;
++ dma_addr_t base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
+index 5536e864233124..fbb6e8d8a47692 100644
+--- a/drivers/net/wireless/ath/ath11k/dbring.c
++++ b/drivers/net/wireless/ath/ath11k/dbring.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h
+index ef906c687b8cdb..2f93b78a50df0e 100644
+--- a/drivers/net/wireless/ath/ath11k/dbring.h
++++ b/drivers/net/wireless/ath/ath11k/dbring.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_DBRING_H
+diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
+index f5c8a34c8802f0..2b8544355fc1a6 100644
+--- a/drivers/net/wireless/ath/ath11k/debug.c
++++ b/drivers/net/wireless/ath/ath11k/debug.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/vmalloc.h>
+diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
+index 9c52804ef8ac30..cc8934d156977c 100644
+--- a/drivers/net/wireless/ath/ath11k/debug.h
++++ b/drivers/net/wireless/ath/ath11k/debug.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _ATH11K_DEBUG_H_
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
+index 5bb6fd17fdf6f5..8cda73b78ebf41 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs.c
++++ b/drivers/net/wireless/ath/ath11k/debugfs.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/vmalloc.h>
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
+index 3af0169f6cf218..44d15845f39a67 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs.h
++++ b/drivers/net/wireless/ath/ath11k/debugfs.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _ATH11K_DEBUGFS_H_
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+index 0207fc4910f342..870e86a31bf896 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
++++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/vmalloc.h>
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+index 96219301f05bd4..476689bbd4dad7 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
++++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef DEBUG_HTT_STATS_H
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+index 9cc4ef28e7519b..168879a380cb2d 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
++++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/vmalloc.h>
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.h b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
+index e6c11b3a40aa93..ace877e19275eb 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.h
++++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _ATH11K_DEBUGFS_STA_H_
+diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
+index d070bcb3fe247f..be0beb6bae8fbb 100644
+--- a/drivers/net/wireless/ath/ath11k/dp.c
++++ b/drivers/net/wireless/ath/ath11k/dp.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <crypto/hash.h>
+diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
+index 15815af453b2a6..2f6dd69d3be276 100644
+--- a/drivers/net/wireless/ath/ath11k/dp.h
++++ b/drivers/net/wireless/ath/ath11k/dp.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_DP_H
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 62bc98852f0f7f..a4d56136f42f7e 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/ieee80211.h>
+@@ -1621,14 +1622,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+ u8 pdev_id;
+
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
++
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+- return;
++ goto out;
+ }
+
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ ar->ab->pktlog_defs_checksum);
++
++out:
++ rcu_read_unlock();
+ }
+
+ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+@@ -1873,8 +1880,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ }
+
+-static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
+- enum hal_encrypt_type enctype)
++int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
+ {
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+@@ -2694,7 +2700,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ if (unlikely(push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
+ dev_kfree_skb_any(msdu);
+- ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
++ ab->soc_stats.hal_reo_error[ring_id]++;
+ continue;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
+index 623da3bf9dc810..c322e30caa9683 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #ifndef ATH11K_DP_RX_H
+ #define ATH11K_DP_RX_H
+@@ -95,4 +96,6 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
+ int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab);
+ int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer);
+
++int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype);
++
+ #endif /* ATH11K_DP_RX_H */
+diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
+index 0dda76f7a4b50d..7dd1ee58980177 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
+index 68a21ea9b93463..61be2265e09f08 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
++++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_DP_TX_H
+diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
+index 0a99aa7ddbf45a..ae5f7e401e21b7 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.c
++++ b/drivers/net/wireless/ath/ath11k/hal.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #include <linux/dma-mapping.h>
+ #include "hal_tx.h"
+diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
+index 1942d41d6de541..80447f488954a8 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.h
++++ b/drivers/net/wireless/ath/ath11k/hal.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_HAL_H
+diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
+index d895ea878d9f03..b2fd180bd28e6b 100644
+--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
++++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #include "core.h"
+
+diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
+index e5ed5efb139e11..363adac84a8700 100644
+--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
++++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "debug.h"
+diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
+index 61bd8416c4fde4..e05411005fc614 100644
+--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
++++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_HAL_RX_H
+diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
+index 659b80d2abd4de..e0952c0629293e 100644
+--- a/drivers/net/wireless/ath/ath11k/hif.h
++++ b/drivers/net/wireless/ath/ath11k/hif.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _HIF_H_
+diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
+index 2c2e425c866595..23054ab29a5eed 100644
+--- a/drivers/net/wireless/ath/ath11k/htc.c
++++ b/drivers/net/wireless/ath/ath11k/htc.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #include <linux/skbuff.h>
+ #include <linux/ctype.h>
+diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h
+index f429b37cfdf759..e9b123a50b5d98 100644
+--- a/drivers/net/wireless/ath/ath11k/htc.h
++++ b/drivers/net/wireless/ath/ath11k/htc.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_HTC_H
+diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
+index d7b5ec6e690490..77d8f9237680b2 100644
+--- a/drivers/net/wireless/ath/ath11k/hw.c
++++ b/drivers/net/wireless/ath/ath11k/hw.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/types.h>
+diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
+index d51a99669dd6ee..1b070747a5dbfe 100644
+--- a/drivers/net/wireless/ath/ath11k/hw.h
++++ b/drivers/net/wireless/ath/ath11k/hw.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_HW_H
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index c071bf5841af60..4247c0f840a482 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -1233,14 +1233,7 @@ static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
+
+ enable_ps = arvif->ps;
+
+- if (!arvif->is_started) {
+- /* mac80211 can update vif powersave state while disconnected.
+- * Firmware doesn't behave nicely and consumes more power than
+- * necessary if PS is disabled on a non-started vdev. Hence
+- * force-enable PS for non-running vdevs.
+- */
+- psmode = WMI_STA_PS_MODE_ENABLED;
+- } else if (enable_ps) {
++ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+@@ -2296,6 +2289,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+
++ /* Initialize rx_mcs_160 to 9 which is an invalid value */
++ rx_mcs_160 = 9;
+ if (support_160) {
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
+@@ -2307,6 +2302,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ }
+ }
+
++ /* Initialize rx_mcs_80 to 9 which is an invalid value */
++ rx_mcs_80 = 9;
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
+
+@@ -3025,7 +3022,14 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
+
+ rcu_read_unlock();
+
++ if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
++ ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
++ arvif->vdev_id, bss_conf->bssid);
++ return;
++ }
++
+ peer_arg.is_assoc = true;
++
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
+@@ -3048,12 +3052,6 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
+ return;
+ }
+
+- if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+- ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+- arvif->vdev_id, bss_conf->bssid);
+- return;
+- }
+-
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = vif->cfg.aid;
+@@ -4132,6 +4130,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
++ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ /* TODO: Re-check if flag is valid */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+@@ -4141,12 +4140,10 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+- case WLAN_CIPHER_SUITE_CCMP_256:
+- arg.key_cipher = WMI_CIPHER_AES_CCM;
+- break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
++ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ default:
+ ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+@@ -6025,7 +6022,10 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ {
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
++ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ieee80211_tx_info *info;
++ enum hal_encrypt_type enctype;
++ unsigned int mic_len;
+ dma_addr_t paddr;
+ int buf_id;
+ int ret;
+@@ -6049,7 +6049,12 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
++ if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET))
++ ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET");
++
++ enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
++ mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype);
++ skb_put(skb, mic_len);
+ }
+ }
+
+@@ -8905,7 +8910,7 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ {
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+- struct scan_req_params arg;
++ struct scan_req_params *arg;
+ int ret;
+ u32 scan_time_msec;
+
+@@ -8937,27 +8942,31 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
+
+- memset(&arg, 0, sizeof(arg));
+- ath11k_wmi_start_scan_init(ar, &arg);
+- arg.num_chan = 1;
+- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+- GFP_KERNEL);
+- if (!arg.chan_list) {
++ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
++ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
++ ath11k_wmi_start_scan_init(ar, arg);
++ arg->num_chan = 1;
++ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
++ GFP_KERNEL);
++ if (!arg->chan_list) {
++ ret = -ENOMEM;
++ goto free_arg;
++ }
+
+- arg.vdev_id = arvif->vdev_id;
+- arg.scan_id = ATH11K_SCAN_ID;
+- arg.chan_list[0] = chan->center_freq;
+- arg.dwell_time_active = scan_time_msec;
+- arg.dwell_time_passive = scan_time_msec;
+- arg.max_scan_time = scan_time_msec;
+- arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+- arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+- arg.burst_duration = duration;
+-
+- ret = ath11k_start_scan(ar, &arg);
++ arg->vdev_id = arvif->vdev_id;
++ arg->scan_id = ATH11K_SCAN_ID;
++ arg->chan_list[0] = chan->center_freq;
++ arg->dwell_time_active = scan_time_msec;
++ arg->dwell_time_passive = scan_time_msec;
++ arg->max_scan_time = scan_time_msec;
++ arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
++ arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
++ arg->burst_duration = duration;
++
++ ret = ath11k_start_scan(ar, arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+@@ -8983,7 +8992,9 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ ret = 0;
+
+ free_chan_list:
+- kfree(arg.chan_list);
++ kfree(arg->chan_list);
++free_arg:
++ kfree(arg);
+ exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+@@ -9042,6 +9053,14 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ if (ar->state != ATH11K_STATE_ON)
+ goto err_fallback;
+
++ /* Firmware doesn't provide Tx power during CAC hence no need to fetch
++ * the stats.
++ */
++ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
++ mutex_unlock(&ar->conf_mutex);
++ return -EAGAIN;
++ }
++
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
+index 0231783ad754be..0dfdeed5177b88 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.h
++++ b/drivers/net/wireless/ath/ath11k/mac.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_MAC_H
+diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
+index 3ac689f1def403..48ae81efc2696a 100644
+--- a/drivers/net/wireless/ath/ath11k/mhi.c
++++ b/drivers/net/wireless/ath/ath11k/mhi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/msi.h>
+@@ -105,7 +105,7 @@ static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
+ .max_channels = 128,
+ .timeout_ms = 2000,
+ .use_bounce_buf = false,
+- .buf_len = 0,
++ .buf_len = 8192,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
+ .ch_cfg = ath11k_mhi_channels_qca6390,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
+diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
+index 8d9f852da69527..f81fba2644a4c7 100644
+--- a/drivers/net/wireless/ath/ath11k/mhi.h
++++ b/drivers/net/wireless/ath/ath11k/mhi.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #ifndef _ATH11K_MHI_H
+ #define _ATH11K_MHI_H
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index a5aa1857ec14be..09e65c5e55c4a9 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -854,10 +854,16 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
+ if (ret)
+ goto err_pci_disable_msi;
+
++ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
++ if (ret) {
++ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
++ goto err_pci_disable_msi;
++ }
++
+ ret = ath11k_mhi_register(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to register mhi: %d\n", ret);
+- goto err_pci_disable_msi;
++ goto err_irq_affinity_cleanup;
+ }
+
+ ret = ath11k_hal_srng_init(ab);
+@@ -878,12 +884,6 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
+ goto err_ce_free;
+ }
+
+- ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+- if (ret) {
+- ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+- goto err_free_irq;
+- }
+-
+ /* kernel may allocate a dummy vector before request_irq and
+ * then allocate a real vector when request_irq is called.
+ * So get msi_data here again to avoid spurious interrupt
+@@ -892,20 +892,17 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
+ ret = ath11k_pci_config_msi_data(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to config msi_data: %d\n", ret);
+- goto err_irq_affinity_cleanup;
++ goto err_free_irq;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+- goto err_irq_affinity_cleanup;
++ goto err_free_irq;
+ }
+ ath11k_qmi_fwreset_from_cold_boot(ab);
+ return 0;
+
+-err_irq_affinity_cleanup:
+- ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+-
+ err_free_irq:
+ ath11k_pcic_free_irq(ab);
+
+@@ -918,6 +915,9 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
+ err_mhi_unregister:
+ ath11k_mhi_unregister(ab_pci);
+
++err_irq_affinity_cleanup:
++ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
++
+ err_pci_disable_msi:
+ ath11k_pci_free_msi(ab_pci);
+
+diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
+index c63083633b3713..803ee9dd7967d6 100644
+--- a/drivers/net/wireless/ath/ath11k/pcic.c
++++ b/drivers/net/wireless/ath/ath11k/pcic.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+@@ -460,8 +460,6 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
+ {
+ int i;
+
+- set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+-
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+@@ -471,6 +469,8 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
+ }
+ ath11k_pcic_ext_grp_enable(irq_grp);
+ }
++
++ set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+ }
+ EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
+
+diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
+index 114aa3a9a3397b..ca719eb3f7f829 100644
+--- a/drivers/net/wireless/ath/ath11k/peer.c
++++ b/drivers/net/wireless/ath/ath11k/peer.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
+index 9bd385d0a38c97..3ad2f3355b14fd 100644
+--- a/drivers/net/wireless/ath/ath11k/peer.h
++++ b/drivers/net/wireless/ath/ath11k/peer.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_PEER_H
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index 41fad03a3025c4..83dc284392de22 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/elf.h>
+@@ -2293,7 +2293,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
+ struct qmi_txn txn;
+ const u8 *temp = data;
+ void __iomem *bdf_addr = NULL;
+- int ret;
++ int ret = 0;
+ u32 remaining = len;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
+index d477e2be814b10..7e06d100af5759 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.h
++++ b/drivers/net/wireless/ath/ath11k/qmi.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_QMI_H
+diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
+index 7f9fb968dac6d4..c9e8bbc4896f3a 100644
+--- a/drivers/net/wireless/ath/ath11k/reg.c
++++ b/drivers/net/wireless/ath/ath11k/reg.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #include <linux/rtnetlink.h>
+
+diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
+index 2f284f26378d1f..d873b9cf7fc4f9 100644
+--- a/drivers/net/wireless/ath/ath11k/reg.h
++++ b/drivers/net/wireless/ath/ath11k/reg.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_REG_H
+diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
+index 786d5f36f5e547..2da6da72727892 100644
+--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
++++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #ifndef ATH11K_RX_DESC_H
+ #define ATH11K_RX_DESC_H
+diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
+index 705868198df4bc..ae2abe8ae9920c 100644
+--- a/drivers/net/wireless/ath/ath11k/spectral.c
++++ b/drivers/net/wireless/ath/ath11k/spectral.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/relay.h>
+diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h
+index 96bfa16e18e96d..789cff7c64a72d 100644
+--- a/drivers/net/wireless/ath/ath11k/spectral.h
++++ b/drivers/net/wireless/ath/ath11k/spectral.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_SPECTRAL_H
+diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
+index 23ed01bd44f9aa..d39acc03be5b17 100644
+--- a/drivers/net/wireless/ath/ath11k/thermal.c
++++ b/drivers/net/wireless/ath/ath11k/thermal.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/device.h>
+diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
+index 3e39675ef7f577..40c1a9563e0c2a 100644
+--- a/drivers/net/wireless/ath/ath11k/thermal.h
++++ b/drivers/net/wireless/ath/ath11k/thermal.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _ATH11K_THERMAL_
+diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
+index 9535745fe026c2..235ab8ea715fed 100644
+--- a/drivers/net/wireless/ath/ath11k/trace.h
++++ b/drivers/net/wireless/ath/ath11k/trace.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index 23ad6825e5be58..2cc13e60f422f7 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #include <linux/skbuff.h>
+ #include <linux/ctype.h>
+@@ -8337,6 +8337,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ if (!ar) {
+@@ -8354,6 +8356,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ ieee80211_radar_detected(ar->hw);
+
+ exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -8383,15 +8387,19 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n",
+ ev->temp, ev->pdev_id);
+
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
+- kfree(tb);
+- return;
++ goto exit;
+ }
+
+ ath11k_thermal_event_temperature(ar, ev->temp);
+
++exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -8611,12 +8619,13 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ return;
+ }
+
++ rcu_read_lock();
++
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ ev->vdev_id);
+- kfree(tb);
+- return;
++ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n",
+@@ -8633,6 +8642,8 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
++exit:
++ rcu_read_unlock();
+
+ kfree(tb);
+ }
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
+index 100bb816b59230..fa3b480b9d24fa 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.h
++++ b/drivers/net/wireless/ath/ath11k/wmi.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH11K_WMI_H
+diff --git a/drivers/net/wireless/ath/ath11k/wow.h b/drivers/net/wireless/ath/ath11k/wow.h
+index 553ba850d910b5..c85811e3f42b2d 100644
+--- a/drivers/net/wireless/ath/ath11k/wow.h
++++ b/drivers/net/wireless/ath/ath11k/wow.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _WOW_H_
+diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
+index 79af3b6159f1c7..857bc5f9e946a9 100644
+--- a/drivers/net/wireless/ath/ath12k/ce.h
++++ b/drivers/net/wireless/ath/ath12k/ce.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH12K_CE_H
+@@ -119,7 +119,7 @@ struct ath12k_ce_ring {
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+- u32 base_addr_ce_space_unaligned;
++ dma_addr_t base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+@@ -129,7 +129,7 @@ struct ath12k_ce_ring {
+ void *base_addr_owner_space;
+
+ /* CE address space */
+- u32 base_addr_ce_space;
++ dma_addr_t base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
+index d873b573dac669..33f4706af880d1 100644
+--- a/drivers/net/wireless/ath/ath12k/core.h
++++ b/drivers/net/wireless/ath/ath12k/core.h
+@@ -181,6 +181,8 @@ enum ath12k_dev_flags {
+ ATH12K_FLAG_REGISTERED,
+ ATH12K_FLAG_QMI_FAIL,
+ ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
++ ATH12K_FLAG_CE_IRQ_ENABLED,
++ ATH12K_FLAG_EXT_IRQ_ENABLED,
+ };
+
+ enum ath12k_monitor_flags {
+@@ -400,7 +402,7 @@ struct ath12k_sta {
+ };
+
+ #define ATH12K_MIN_5G_FREQ 4150
+-#define ATH12K_MIN_6G_FREQ 5945
++#define ATH12K_MIN_6G_FREQ 5925
+ #define ATH12K_MAX_6G_FREQ 7115
+ #define ATH12K_NUM_CHANS 100
+ #define ATH12K_MAX_5G_CHAN 173
+diff --git a/drivers/net/wireless/ath/ath12k/dbring.c b/drivers/net/wireless/ath/ath12k/dbring.c
+index 8fbf868e6f7ec0..788160c84c6868 100644
+--- a/drivers/net/wireless/ath/ath12k/dbring.c
++++ b/drivers/net/wireless/ath/ath12k/dbring.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
+index 67893923e01092..5709e38ff1fb87 100644
+--- a/drivers/net/wireless/ath/ath12k/debug.c
++++ b/drivers/net/wireless/ath/ath12k/debug.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/vmalloc.h>
+diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
+index f933896f2a68d7..907655c45a4b9a 100644
+--- a/drivers/net/wireless/ath/ath12k/dp.c
++++ b/drivers/net/wireless/ath/ath12k/dp.c
+@@ -38,6 +38,7 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
+
+ ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ crypto_free_shash(peer->tfm_mmic);
++ peer->dp_setup_done = false;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+@@ -126,7 +127,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
+ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
+ enum hal_ring_type type, int ring_num)
+ {
++ const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
+ const u8 *grp_mask;
++ int i;
+
+ switch (type) {
+ case HAL_WBM2SW_RELEASE:
+@@ -134,6 +137,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
+ grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
+ ring_num = 0;
+ } else {
++ map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
++ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
++ if (ring_num == map[i].wbm_ring_num) {
++ ring_num = i;
++ break;
++ }
++ }
++
+ grp_mask = &ab->hw_params->ring_mask->tx[0];
+ }
+ break;
+@@ -875,11 +886,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
+ enum dp_monitor_mode monitor_mode;
+ u8 ring_mask;
+
+- while (i < ab->hw_params->max_tx_ring) {
+- if (ab->hw_params->ring_mask->tx[grp_id] &
+- BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
+- ath12k_dp_tx_completion_handler(ab, i);
+- i++;
++ if (ab->hw_params->ring_mask->tx[grp_id]) {
++ i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
++ ath12k_dp_tx_completion_handler(ab, i);
+ }
+
+ if (ab->hw_params->ring_mask->rx_err[grp_id]) {
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index e6e64d437c47aa..70ad035acac755 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -1555,6 +1555,13 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+
+ msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
+ len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
++ if (len > (skb->len - struct_size(msg, data, 0))) {
++ ath12k_warn(ab,
++ "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
++ len, skb->len);
++ return -EINVAL;
++ }
++
+ pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
+ ppdu_id = le32_to_cpu(msg->ppdu_id);
+
+@@ -1583,6 +1590,16 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+ goto exit;
+ }
+
++ if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
++ spin_unlock_bh(&ar->data_lock);
++ ath12k_warn(ab,
++ "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
++ ppdu_info->ppdu_stats.common.num_users,
++ HTT_PPDU_STATS_MAX_USERS);
++ ret = -EINVAL;
++ goto exit;
++ }
++
+ /* back up data rate tlv for all peers */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
+@@ -1641,11 +1658,12 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
+ pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
+ HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
+- ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+
++ rcu_read_lock();
++ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+- return;
++ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+@@ -1661,6 +1679,8 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
+
+ spin_unlock_bh(&ar->data_lock);
++exit:
++ rcu_read_unlock();
+ }
+
+ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+@@ -2356,8 +2376,10 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
+ channel_num = meta_data;
+ center_freq = meta_data >> 16;
+
+- if (center_freq >= 5935 && center_freq <= 7105) {
++ if (center_freq >= ATH12K_MIN_6G_FREQ &&
++ center_freq <= ATH12K_MAX_6G_FREQ) {
+ rx_status->band = NL80211_BAND_6GHZ;
++ rx_status->freq = center_freq;
+ } else if (channel_num >= 1 && channel_num <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (channel_num >= 36 && channel_num <= 173) {
+@@ -2375,8 +2397,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
+ rx_desc, sizeof(*rx_desc));
+ }
+
+- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+- rx_status->band);
++ if (rx_status->band != NL80211_BAND_6GHZ)
++ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
++ rx_status->band);
+
+ ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
+ }
+@@ -2647,7 +2670,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
+ if (push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ dev_kfree_skb_any(msdu);
+- ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
++ ab->soc_stats.hal_reo_error[ring_id]++;
+ continue;
+ }
+
+@@ -2736,6 +2759,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
+ peer = ath12k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
++ crypto_free_shash(tfm);
+ ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
+ return -ENOENT;
+ }
+@@ -2748,6 +2772,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
+ }
+
+ peer->tfm_mmic = tfm;
++ peer->dp_setup_done = true;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+@@ -2964,7 +2989,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
+ struct hal_srng *srng;
+ dma_addr_t link_paddr, buf_paddr;
+ u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
+- u32 cookie, hal_rx_desc_sz, dest_ring_info0;
++ u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
+ int ret;
+ struct ath12k_rx_desc_info *desc_info;
+ u8 dst_ind;
+@@ -3000,7 +3025,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
+
+ buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
+ defrag_skb->len + skb_tailroom(defrag_skb),
+- DMA_FROM_DEVICE);
++ DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, buf_paddr))
+ return -ENOMEM;
+
+@@ -3056,13 +3081,11 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
+ reo_ent_ring->rx_mpdu_info.peer_meta_data =
+ reo_dest_ring->rx_mpdu_info.peer_meta_data;
+
+- /* Firmware expects physical address to be filled in queue_addr_lo in
+- * the MLO scenario and in case of non MLO peer meta data needs to be
+- * filled.
+- * TODO: Need to handle for MLO scenario.
+- */
+- reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
+- reo_ent_ring->info0 = le32_encode_bits(dst_ind,
++ reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
++ queue_addr_hi = upper_32_bits(rx_tid->paddr);
++ reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
++ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
++ le32_encode_bits(dst_ind,
+ HAL_REO_ENTR_RING_INFO0_DEST_IND);
+
+ reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
+@@ -3086,7 +3109,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
+ spin_unlock_bh(&dp->rx_desc_lock);
+ err_unmap_dma:
+ dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+- DMA_FROM_DEVICE);
++ DMA_TO_DEVICE);
+ return ret;
+ }
+
+@@ -3214,6 +3237,14 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ ret = -ENOENT;
+ goto out_unlock;
+ }
++
++ if (!peer->dp_setup_done) {
++ ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
++ peer->addr, peer_id);
++ ret = -ENOENT;
++ goto out_unlock;
++ }
++
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+@@ -3229,7 +3260,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ goto out_unlock;
+ }
+
+- if (frag_no > __fls(rx_tid->rx_frag_bitmap))
++ if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
+diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
+index 8874c815d7faf8..e025e4d0e7678f 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+@@ -330,8 +330,11 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+
+ fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
++
++ if (skb_cb->paddr_ext_desc)
++ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
++ sizeof(struct hal_tx_msdu_ext_desc),
++ DMA_TO_DEVICE);
+
+ fail_remove_tx_buf:
+ ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
+index e7a150e7158e91..0b5a91ab0df49c 100644
+--- a/drivers/net/wireless/ath/ath12k/hal.c
++++ b/drivers/net/wireless/ath/ath12k/hal.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #include <linux/dma-mapping.h>
+ #include "hal_tx.h"
+@@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+
+ static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+ {
+- return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
+- RX_MSDU_END_INFO5_DA_IS_MCBC;
++ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
++ RX_MPDU_START_INFO6_MCAST_BCAST;
+ }
+
+ static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+@@ -889,8 +889,8 @@ static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+
+ static bool ath12k_hw_wcn7850_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+ {
+- return __le16_to_cpu(desc->u.wcn7850.msdu_end.info5) &
+- RX_MSDU_END_INFO5_DA_IS_MCBC;
++ return __le32_to_cpu(desc->u.wcn7850.msdu_end.info13) &
++ RX_MSDU_END_INFO13_MCAST_BCAST;
+ }
+
+ static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
+index 66035a787c728d..fc47e7e6b498a1 100644
+--- a/drivers/net/wireless/ath/ath12k/hal.h
++++ b/drivers/net/wireless/ath/ath12k/hal.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH12K_HAL_H
+diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
+index ee61a6462fdcf4..4d1b89cdffe122 100644
+--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
++++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "debug.h"
+diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
+index 54490cdb63a1b1..c653ca1f59b22d 100644
+--- a/drivers/net/wireless/ath/ath12k/hif.h
++++ b/drivers/net/wireless/ath/ath12k/hif.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH12K_HIF_H
+@@ -10,17 +10,17 @@
+ #include "core.h"
+
+ struct ath12k_hif_ops {
+- u32 (*read32)(struct ath12k_base *sc, u32 address);
+- void (*write32)(struct ath12k_base *sc, u32 address, u32 data);
+- void (*irq_enable)(struct ath12k_base *sc);
+- void (*irq_disable)(struct ath12k_base *sc);
+- int (*start)(struct ath12k_base *sc);
+- void (*stop)(struct ath12k_base *sc);
+- int (*power_up)(struct ath12k_base *sc);
+- void (*power_down)(struct ath12k_base *sc);
++ u32 (*read32)(struct ath12k_base *ab, u32 address);
++ void (*write32)(struct ath12k_base *ab, u32 address, u32 data);
++ void (*irq_enable)(struct ath12k_base *ab);
++ void (*irq_disable)(struct ath12k_base *ab);
++ int (*start)(struct ath12k_base *ab);
++ void (*stop)(struct ath12k_base *ab);
++ int (*power_up)(struct ath12k_base *ab);
++ void (*power_down)(struct ath12k_base *ab);
+ int (*suspend)(struct ath12k_base *ab);
+ int (*resume)(struct ath12k_base *ab);
+- int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id,
++ int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+ int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
+index 5991cc91cd00f0..dafd7c34d74650 100644
+--- a/drivers/net/wireless/ath/ath12k/hw.c
++++ b/drivers/net/wireless/ath/ath12k/hw.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/types.h>
+@@ -540,9 +540,6 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
+ },
+ .rx_mon_dest = {
+ 0, 0, 0,
+- ATH12K_RX_MON_RING_MASK_0,
+- ATH12K_RX_MON_RING_MASK_1,
+- ATH12K_RX_MON_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+@@ -568,16 +565,15 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
+ ATH12K_HOST2RXDMA_RING_MASK_0,
+ },
+ .tx_mon_dest = {
+- ATH12K_TX_MON_RING_MASK_0,
+- ATH12K_TX_MON_RING_MASK_1,
++ 0, 0, 0,
+ },
+ };
+
+ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
++ ATH12K_TX_RING_MASK_1,
+ ATH12K_TX_RING_MASK_2,
+- ATH12K_TX_RING_MASK_4,
+ },
+ .rx_mon_dest = {
+ },
+@@ -942,7 +938,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+
+- .interface_modes = BIT(NL80211_IFTYPE_STATION),
++ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
++ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+
+ .idle_ps = true,
+diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
+index e6c4223c283c30..0b0b2a4f70f28b 100644
+--- a/drivers/net/wireless/ath/ath12k/hw.h
++++ b/drivers/net/wireless/ath/ath12k/hw.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH12K_HW_H
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 88346e66bb753e..4bb30e40372877 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <net/mac80211.h>
+@@ -1614,7 +1614,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
+ {
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ int i;
+- u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
++ u8 ampdu_factor, max_nss;
++ u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
++ u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ u16 mcs_160_map, mcs_80_map;
+ bool support_160;
+ u16 v;
+@@ -1679,9 +1681,8 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
+ * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
+ * length.
+ */
+- ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
+- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
+- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK;
++ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
++ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
+
+ if (ampdu_factor) {
+ if (sta->deflink.vht_cap.vht_supported)
+@@ -3355,6 +3356,11 @@ static int ath12k_station_assoc(struct ath12k *ar,
+
+ ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
++ if (peer_arg.peer_nss < 1) {
++ ath12k_warn(ar->ab,
++ "invalid peer NSS %d\n", peer_arg.peer_nss);
++ return -EINVAL;
++ }
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+@@ -5152,7 +5158,7 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
+ do {
+ if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
+ vdev_stats_id++;
+- if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
++ if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) {
+ vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
+ break;
+ }
+@@ -6039,13 +6045,28 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+- if (WARN_ON(!arvif->is_up))
+- continue;
++ /* Firmware expect vdev_restart only if vdev is up.
++ * If vdev is down then it expect vdev_stop->vdev_start.
++ */
++ if (arvif->is_up) {
++ ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
++ if (ret) {
++ ath12k_warn(ab, "failed to restart vdev %d: %d\n",
++ arvif->vdev_id, ret);
++ continue;
++ }
++ } else {
++ ret = ath12k_mac_vdev_stop(arvif);
++ if (ret) {
++ ath12k_warn(ab, "failed to stop vdev %d: %d\n",
++ arvif->vdev_id, ret);
++ continue;
++ }
+
+- ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
+- if (ret) {
+- ath12k_warn(ab, "failed to restart vdev %d: %d\n",
+- arvif->vdev_id, ret);
++ ret = ath12k_mac_vdev_start(arvif, &vifs[i].new_ctx->def);
++ if (ret)
++ ath12k_warn(ab, "failed to start vdev %d: %d\n",
++ arvif->vdev_id, ret);
+ continue;
+ }
+
+@@ -6196,8 +6217,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ }
+
+ if (ab->hw_params->vdev_start_delay &&
+- (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
++ arvif->vdev_type != WMI_VDEV_TYPE_AP &&
++ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ param.vdev_id = arvif->vdev_id;
+ param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ param.peer_addr = ar->mac_addr;
+@@ -7004,7 +7025,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+- if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
++ if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
+ channels = kmemdup(ath12k_6ghz_channels,
+ sizeof(ath12k_6ghz_channels), GFP_KERNEL);
+ if (!channels) {
+diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
+index 7b16b70df4fa89..ec1be11cce7fc7 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.h
++++ b/drivers/net/wireless/ath/ath12k/mac.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH12K_MAC_H
+diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
+index 42f1140baa4fe2..380328fa2822e7 100644
+--- a/drivers/net/wireless/ath/ath12k/mhi.c
++++ b/drivers/net/wireless/ath/ath12k/mhi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/msi.h>
+@@ -370,8 +370,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+ ret = ath12k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to get msi for mhi\n");
+- mhi_free_controller(mhi_ctrl);
+- return ret;
++ goto free_controller;
+ }
+
+ mhi_ctrl->iova_start = 0;
+@@ -388,11 +387,15 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+ ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config);
+ if (ret) {
+ ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+- mhi_free_controller(mhi_ctrl);
+- return ret;
++ goto free_controller;
+ }
+
+ return 0;
++
++free_controller:
++ mhi_free_controller(mhi_ctrl);
++ ab_pci->mhi_ctrl = NULL;
++ return ret;
+ }
+
+ void ath12k_mhi_unregister(struct ath12k_pci *ab_pci)
+diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
+index fae5dfd6e9d70e..041a9602f0e15f 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.c
++++ b/drivers/net/wireless/ath/ath12k/pci.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/module.h>
+@@ -373,6 +373,8 @@ static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
+ {
+ int i;
+
++ clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
++
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+@@ -406,6 +408,10 @@ static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
+ static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
+ {
+ struct ath12k_ce_pipe *ce_pipe = arg;
++ struct ath12k_base *ab = ce_pipe->ab;
++
++ if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
++ return IRQ_HANDLED;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+@@ -424,12 +430,15 @@ static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+ }
+
+-static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc)
++static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
+ {
+ int i;
+
++ if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
++ return;
++
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+- struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
++ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath12k_pci_ext_grp_disable(irq_grp);
+
+@@ -483,6 +492,10 @@ static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+ static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
+ {
+ struct ath12k_ext_irq_grp *irq_grp = arg;
++ struct ath12k_base *ab = irq_grp->ab;
++
++ if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
++ return IRQ_HANDLED;
+
+ ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
+
+@@ -626,6 +639,8 @@ static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
+ {
+ int i;
+
++ set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
++
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+@@ -956,6 +971,8 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
+ {
+ int i;
+
++ set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
++
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
+index 0f24fd9395cd9b..9a17a7dcdd6a6e 100644
+--- a/drivers/net/wireless/ath/ath12k/pci.h
++++ b/drivers/net/wireless/ath/ath12k/pci.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #ifndef ATH12K_PCI_H
+ #define ATH12K_PCI_H
+diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
+index b296dc0e2f6711..7b3500b5c8c20e 100644
+--- a/drivers/net/wireless/ath/ath12k/peer.h
++++ b/drivers/net/wireless/ath/ath12k/peer.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH12K_PEER_H
+@@ -44,6 +44,9 @@ struct ath12k_peer {
+ struct ppdu_user_delayba ppdu_stats_delayba;
+ bool delayba_flag;
+ bool is_authorized;
++
++ /* protected by ab->data_lock */
++ bool dp_setup_done;
+ };
+
+ void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
+diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
+index b2db0436bdde63..c49f585cc39656 100644
+--- a/drivers/net/wireless/ath/ath12k/qmi.c
++++ b/drivers/net/wireless/ath/ath12k/qmi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/elf.h>
+@@ -1977,6 +1977,7 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
+ QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
++ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
+ goto out;
+ }
+@@ -2040,6 +2041,7 @@ static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
+ QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
++ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
+ ret);
+ goto out;
+@@ -2114,6 +2116,7 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
+ QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
++ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
+ ret);
+ goto out;
+@@ -2228,6 +2231,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
+ QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
++ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
+ ret);
+ goto out;
+@@ -2308,7 +2312,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ const u8 *temp = data;
+- int ret;
++ int ret = 0;
+ u32 remaining = len;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+@@ -2567,6 +2571,7 @@ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+ QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
++ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
+ ret);
+ goto out;
+@@ -2613,6 +2618,7 @@ static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
+ QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
++ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+@@ -2704,6 +2710,7 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
+ QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
++ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
+ ret);
+ goto out;
+@@ -2935,6 +2942,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
+ .fn = ath12k_qmi_msg_fw_ready_cb,
+ },
++
++ /* end of list */
++ {},
+ };
+
+ static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
+index 15944f5f33ab0e..4c1ba3196a403d 100644
+--- a/drivers/net/wireless/ath/ath12k/qmi.h
++++ b/drivers/net/wireless/ath/ath12k/qmi.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH12K_QMI_H
+diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
+index 6ede91ebc8e169..32bdefeccc2453 100644
+--- a/drivers/net/wireless/ath/ath12k/reg.c
++++ b/drivers/net/wireless/ath/ath12k/reg.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #include <linux/rtnetlink.h>
+ #include "core.h"
+@@ -103,7 +103,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+- if (!bands[band])
++ if (!(ar->mac.sbands[band].channels && bands[band]))
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+@@ -129,7 +129,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
+ ch = arg->channel;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+- if (!bands[band])
++ if (!(ar->mac.sbands[band].channels && bands[band]))
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
+index 56d009a4723439..5b25b603eb4048 100644
+--- a/drivers/net/wireless/ath/ath12k/reg.h
++++ b/drivers/net/wireless/ath/ath12k/reg.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef ATH12K_REG_H
+diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
+index bfa87cb8d0213d..d2cd11e0e24623 100644
+--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
++++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #ifndef ATH12K_RX_DESC_H
+ #define ATH12K_RX_DESC_H
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index ef0f3cf35cfd1d..9105fdd14c6671 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #include <linux/skbuff.h>
+ #include <linux/ctype.h>
+@@ -1501,6 +1501,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ sizeof(*cmd));
+ cmd->req_type = cpu_to_le32(type);
++ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI bss chan info req type %d\n", type);
+@@ -1834,7 +1835,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ if (arg->bw_160)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
+ if (arg->bw_320)
+- cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
++ cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
+
+ /* Typically if STBC is enabled for VHT it should be enabled
+ * for HT as well
+@@ -3876,6 +3877,12 @@ static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
+ ath12k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
++
++ if (reg_cap.phy_id >= MAX_RADIOS) {
++ ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
++ return -EINVAL;
++ }
++
+ soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
+ }
+ return 0;
+@@ -5766,8 +5773,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
+ if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+- if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
++ if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
++ rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
+ status->band = NL80211_BAND_6GHZ;
++ status->freq = rx_ev.chan_freq;
+ } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
+@@ -5788,8 +5797,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
+
+ sband = &ar->mac.sbands[status->band];
+
+- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+- status->band);
++ if (status->band != NL80211_BAND_6GHZ)
++ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
++ status->band);
++
+ status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+@@ -6476,6 +6487,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
++ rcu_read_lock();
++
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+
+ if (!ar) {
+@@ -6493,6 +6506,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
+ ieee80211_radar_detected(ar->hw);
+
+ exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -6511,11 +6526,16 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+
++ rcu_read_lock();
++
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+- return;
++ goto exit;
+ }
++
++exit:
++ rcu_read_unlock();
+ }
+
+ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
+index c75a6fa1f7e089..a19a2c29f2264a 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.h
++++ b/drivers/net/wireless/ath/ath12k/wmi.h
+@@ -3058,6 +3058,7 @@ struct wmi_pdev_bss_chan_info_req_cmd {
+ __le32 tlv_header;
+ /* ref wmi_bss_chan_info_req_type */
+ __le32 req_type;
++ __le32 pdev_id;
+ } __packed;
+
+ struct wmi_ap_ps_peer_cmd {
+@@ -4033,7 +4034,6 @@ struct wmi_vdev_stopped_event {
+ } __packed;
+
+ struct wmi_pdev_bss_chan_info_event {
+- __le32 pdev_id;
+ __le32 freq; /* Units in MHz */
+ __le32 noise_floor; /* units are dBm */
+ /* rx clear - how often the channel was unused */
+@@ -4051,6 +4051,7 @@ struct wmi_pdev_bss_chan_info_event {
+ /*rx_cycle cnt for my bss in 64bits format */
+ __le32 rx_bss_cycle_count_low;
+ __le32 rx_bss_cycle_count_high;
++ __le32 pdev_id;
+ } __packed;
+
+ #define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
+index 988222cea9dfe7..acc84e6711b0e1 100644
+--- a/drivers/net/wireless/ath/ath9k/antenna.c
++++ b/drivers/net/wireless/ath/ath9k/antenna.c
+@@ -643,7 +643,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+- antcomb->rssi_lna1) {
++ antcomb->rssi_lna2) {
+ /* set to A-B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
+index 9bc57c5a89bfeb..7791f4df6d4840 100644
+--- a/drivers/net/wireless/ath/ath9k/debug.c
++++ b/drivers/net/wireless/ath/ath9k/debug.c
+@@ -1293,7 +1293,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath9k_gstrings_stats,
++ memcpy(data, ath9k_gstrings_stats,
+ sizeof(ath9k_gstrings_stats));
+ }
+
+@@ -1325,11 +1325,11 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
+ struct ath_softc *sc = hw->priv;
+ int i = 0;
+
+- data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
++ data[i++] = ((u64)sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all);
+- data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
++ data[i++] = ((u64)sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all);
+@@ -1380,8 +1380,6 @@ int ath9k_init_debug(struct ath_hw *ah)
+
+ sc->debug.debugfs_phy = debugfs_create_dir("ath9k",
+ sc->hw->wiphy->debugfsdir);
+- if (IS_ERR(sc->debug.debugfs_phy))
+- return -ENOMEM;
+
+ #ifdef CONFIG_ATH_DEBUG
+ debugfs_create_file("debug", 0600, sc->debug.debugfs_phy,
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index e5414435b14140..ab728a70ed2796 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -716,8 +716,7 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb)
+ }
+
+ resubmit:
+- skb_reset_tail_pointer(skb);
+- skb_trim(skb, 0);
++ __skb_set_length(skb, 0);
+
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -754,8 +753,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ case -ESHUTDOWN:
+ goto free_skb;
+ default:
+- skb_reset_tail_pointer(skb);
+- skb_trim(skb, 0);
++ __skb_set_length(skb, 0);
+
+ goto resubmit;
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
+index 237f4ec2cffd7b..6c33e898b30006 100644
+--- a/drivers/net/wireless/ath/ath9k/htc.h
++++ b/drivers/net/wireless/ath/ath9k/htc.h
+@@ -306,7 +306,6 @@ struct ath9k_htc_tx {
+ DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
+ struct timer_list cleanup_timer;
+ spinlock_t tx_lock;
+- bool initialized;
+ };
+
+ struct ath9k_htc_tx_ctl {
+@@ -515,6 +514,7 @@ struct ath9k_htc_priv {
+ unsigned long ps_usecount;
+ bool ps_enabled;
+ bool ps_idle;
++ bool initialized;
+
+ #ifdef CONFIG_MAC80211_LEDS
+ enum led_brightness brightness;
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+index c549ff3abcdc4f..7b145282243190 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+@@ -423,7 +423,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath9k_htc_gstrings_stats,
++ memcpy(data, ath9k_htc_gstrings_stats,
+ sizeof(ath9k_htc_gstrings_stats));
+ }
+
+@@ -486,8 +486,6 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
+
+ priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
+ priv->hw->wiphy->debugfsdir);
+- if (IS_ERR(priv->debug.debugfs_phy))
+- return -ENOMEM;
+
+ ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
+
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index dae3d9c7b64082..fc339079ee8c9d 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+
+ htc_handle->drv_priv = priv;
+
++ /* Allow ath9k_wmi_event_tasklet() to operate. */
++ smp_wmb();
++ priv->initialized = true;
++
+ return 0;
+
+ err_init:
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index 672789e3c55d0f..2fdd27885f5437 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -652,9 +652,10 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
+ struct ath9k_htc_tx_event *tx_pend;
+ int i;
+
+- for (i = 0; i < txs->cnt; i++) {
+- WARN_ON(txs->cnt > HTC_MAX_TX_STATUS);
++ if (WARN_ON_ONCE(txs->cnt > HTC_MAX_TX_STATUS))
++ return;
+
++ for (i = 0; i < txs->cnt; i++) {
+ __txs = &txs->txstatus[i];
+
+ skb = ath9k_htc_tx_get_packet(priv, __txs);
+@@ -814,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
+ skb_queue_head_init(&priv->tx.data_vo_queue);
+ skb_queue_head_init(&priv->tx.tx_failed);
+
+- /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
+- smp_wmb();
+- priv->tx.initialized = true;
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 1494feedb27dbc..aa271b82875e04 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -135,8 +135,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
+ if (power_mode != ATH9K_PM_AWAKE) {
+ spin_lock(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+- memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+- memset(&common->cc_ani, 0, sizeof(common->cc_ani));
++ memset(&common->cc, 0, sizeof(common->cc));
+ spin_unlock(&common->cc_lock);
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index 1476b42b52a915..805ad31edba2ba 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
+ }
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+
++ /* Check if ath9k_htc_probe_device() completed. */
++ if (!data_race(priv->initialized)) {
++ kfree_skb(skb);
++ continue;
++ }
++
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_id = be16_to_cpu(hdr->command_id);
+ wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+@@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
+ &wmi->drv_priv->fatal_work);
+ break;
+ case WMI_TXSTATUS_EVENTID:
+- /* Check if ath9k_tx_init() completed. */
+- if (!data_race(priv->tx.initialized))
+- break;
+-
+ spin_lock_bh(&priv->tx.tx_lock);
+ if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
+ spin_unlock_bh(&priv->tx.tx_lock);
+diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
+index 6bb9aa2bfe6541..88ef6e023f8266 100644
+--- a/drivers/net/wireless/ath/carl9170/tx.c
++++ b/drivers/net/wireless/ath/carl9170/tx.c
+@@ -280,7 +280,8 @@ static void carl9170_tx_release(struct kref *ref)
+ * carl9170_tx_fill_rateinfo() has filled the rate information
+ * before we get to this point.
+ */
+- memset_after(&txinfo->status, 0, rates);
++ memset(&txinfo->pad, 0, sizeof(txinfo->pad));
++ memset(&txinfo->rate_driver_data, 0, sizeof(txinfo->rate_driver_data));
+
+ if (atomic_read(&ar->tx_total_queued))
+ ar->tx_schedule = true;
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
+index e4eb666c6eea41..a5265997b5767c 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -1069,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ ar->usb_ep_cmd_is_bulk = true;
+ }
+
++ /* Verify that all expected endpoints are present */
++ if (ar->usb_ep_cmd_is_bulk) {
++ u8 bulk_ep_addr[] = {
++ AR9170_USB_EP_RX | USB_DIR_IN,
++ AR9170_USB_EP_TX | USB_DIR_OUT,
++ AR9170_USB_EP_CMD | USB_DIR_OUT,
++ 0};
++ u8 int_ep_addr[] = {
++ AR9170_USB_EP_IRQ | USB_DIR_IN,
++ 0};
++ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++ !usb_check_int_endpoints(intf, int_ep_addr))
++ err = -ENODEV;
++ } else {
++ u8 bulk_ep_addr[] = {
++ AR9170_USB_EP_RX | USB_DIR_IN,
++ AR9170_USB_EP_TX | USB_DIR_OUT,
++ 0};
++ u8 int_ep_addr[] = {
++ AR9170_USB_EP_IRQ | USB_DIR_IN,
++ AR9170_USB_EP_CMD | USB_DIR_OUT,
++ 0};
++ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++ !usb_check_int_endpoints(intf, int_ep_addr))
++ err = -ENODEV;
++ }
++
++ if (err) {
++ carl9170_free(ar);
++ return err;
++ }
++
+ usb_set_intfdata(intf, ar);
+ SET_IEEE80211_DEV(ar->hw, &intf->dev);
+
+diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
+index 27f4d74a41c808..2788a1b06c17ca 100644
+--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
++++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
+@@ -206,7 +206,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
+
+ INIT_LIST_HEAD(&cd->head);
+ cd->freq = freq;
+- cd->detectors = kmalloc_array(dpd->num_radar_types,
++ cd->detectors = kcalloc(dpd->num_radar_types,
+ sizeof(*cd->detectors), GFP_ATOMIC);
+ if (cd->detectors == NULL)
+ goto fail;
+diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
+index 67b4bac048e585..c0d8fc0b22fb28 100644
+--- a/drivers/net/wireless/broadcom/b43/b43.h
++++ b/drivers/net/wireless/broadcom/b43/b43.h
+@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
+ return dev->__using_pio_transfers;
+ }
+
++static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
++{
++ if (dev->qos_enabled)
++ ieee80211_wake_queue(dev->wl->hw, queue_prio);
++ else
++ ieee80211_wake_queue(dev->wl->hw, 0);
++}
++
++static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
++{
++ if (dev->qos_enabled)
++ ieee80211_stop_queue(dev->wl->hw, queue_prio);
++ else
++ ieee80211_stop_queue(dev->wl->hw, 0);
++}
++
+ /* Message printing */
+ __printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
+ __printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
+diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
+index 9a7c62bd5e4316..cfaf2f9d67b227 100644
+--- a/drivers/net/wireless/broadcom/b43/dma.c
++++ b/drivers/net/wireless/broadcom/b43/dma.c
+@@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
+ should_inject_overflow(ring)) {
+ /* This TX ring is full. */
+ unsigned int skb_mapping = skb_get_queue_mapping(skb);
+- ieee80211_stop_queue(dev->wl->hw, skb_mapping);
++ b43_stop_queue(dev, skb_mapping);
+ dev->wl->tx_queue_stopped[skb_mapping] = true;
+ ring->stopped = true;
+ if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
+@@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ } else {
+ /* If the driver queue is running wake the corresponding
+ * mac80211 queue. */
+- ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
++ b43_wake_queue(dev, ring->queue_prio);
+ if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
+ b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
+ }
+diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
+index 92ca0b2ca286db..effb6c23f82574 100644
+--- a/drivers/net/wireless/broadcom/b43/main.c
++++ b/drivers/net/wireless/broadcom/b43/main.c
+@@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work)
+
+ start_ieee80211:
+ wl->hw->queues = B43_QOS_QUEUE_NUM;
+- if (!modparam_qos || dev->fw.opensource)
++ if (!modparam_qos || dev->fw.opensource ||
++ dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
+ wl->hw->queues = 1;
+
+ err = ieee80211_register_hw(wl->hw);
+@@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work)
+ err = b43_dma_tx(dev, skb);
+ if (err == -ENOSPC) {
+ wl->tx_queue_stopped[queue_num] = true;
+- ieee80211_stop_queue(wl->hw, queue_num);
++ b43_stop_queue(dev, queue_num);
+ skb_queue_head(&wl->tx_queue[queue_num], skb);
+ break;
+ }
+@@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+ {
+ struct b43_wl *wl = hw_to_b43_wl(hw);
++ u16 skb_queue_mapping;
+
+ if (unlikely(skb->len < 2 + 2 + 6)) {
+ /* Too short, this can't be a valid frame. */
+@@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
+ }
+ B43_WARN_ON(skb_shinfo(skb)->nr_frags);
+
+- skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+- if (!wl->tx_queue_stopped[skb->queue_mapping]) {
++ skb_queue_mapping = skb_get_queue_mapping(skb);
++ skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
++ if (!wl->tx_queue_stopped[skb_queue_mapping])
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+- } else {
+- ieee80211_stop_queue(wl->hw, skb->queue_mapping);
+- }
++ else
++ b43_stop_queue(wl->current_dev, skb_queue_mapping);
+ }
+
+ static void b43_qos_params_upload(struct b43_wldev *dev,
+diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
+index 8c28a9250cd19a..cc19b589fa70de 100644
+--- a/drivers/net/wireless/broadcom/b43/pio.c
++++ b/drivers/net/wireless/broadcom/b43/pio.c
+@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
+ if (total_len > (q->buffer_size - q->buffer_used)) {
+ /* Not enough memory on the queue. */
+ err = -EBUSY;
+- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
++ b43_stop_queue(dev, skb_get_queue_mapping(skb));
+ q->stopped = true;
+ goto out;
+ }
+@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
+ if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
+ (q->free_packet_slots == 0)) {
+ /* The queue is full. */
+- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
++ b43_stop_queue(dev, skb_get_queue_mapping(skb));
+ q->stopped = true;
+ }
+
+@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
+ list_add(&pack->list, &q->packets_list);
+
+ if (q->stopped) {
+- ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
++ b43_wake_queue(dev, q->queue_prio);
+ q->stopped = false;
+ }
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
+index ac3a36fa3640ce..a963c242975ac9 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
+@@ -7,21 +7,16 @@
+ #include <core.h>
+ #include <bus.h>
+ #include <fwvid.h>
++#include <feature.h>
+
+ #include "vops.h"
+
+-static int brcmf_bca_attach(struct brcmf_pub *drvr)
++static void brcmf_bca_feat_attach(struct brcmf_if *ifp)
+ {
+- pr_err("%s: executing\n", __func__);
+- return 0;
+-}
+-
+-static void brcmf_bca_detach(struct brcmf_pub *drvr)
+-{
+- pr_err("%s: executing\n", __func__);
++ /* SAE support not confirmed so disabling for now */
++ ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_SAE);
+ }
+
+ const struct brcmf_fwvid_ops brcmf_bca_ops = {
+- .attach = brcmf_bca_attach,
+- .detach = brcmf_bca_detach,
++ .feat_attach = brcmf_bca_feat_attach,
+ };
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+index 7ea2631b80692d..00794086cc7c97 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+@@ -123,7 +123,7 @@ static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data)
+ {
+ *data = addr;
+
+- return brcmf_fil_iovar_int_get(ifp, "btc_params", data);
++ return brcmf_fil_iovar_int_query(ifp, "btc_params", data);
+ }
+
+ /**
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 2a90bb24ba77f9..c708ae91c3ce93 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -32,6 +32,7 @@
+ #include "vendor.h"
+ #include "bus.h"
+ #include "common.h"
++#include "fwvid.h"
+
+ #define BRCMF_SCAN_IE_LEN_MAX 2048
+
+@@ -662,8 +663,8 @@ static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr)
+ /* interface_create version 3+ */
+ /* get supported version from firmware side */
+ iface_create_ver = 0;
+- err = brcmf_fil_bsscfg_int_get(ifp, "interface_create",
+- &iface_create_ver);
++ err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
++ &iface_create_ver);
+ if (err) {
+ brcmf_err("fail to get supported version, err=%d\n", err);
+ return -EOPNOTSUPP;
+@@ -755,8 +756,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
+ /* interface_create version 3+ */
+ /* get supported version from firmware side */
+ iface_create_ver = 0;
+- err = brcmf_fil_bsscfg_int_get(ifp, "interface_create",
+- &iface_create_ver);
++ err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
++ &iface_create_ver);
+ if (err) {
+ brcmf_err("fail to get supported version, err=%d\n", err);
+ return -EOPNOTSUPP;
+@@ -1179,8 +1180,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ scan_request = cfg->scan_request;
+ cfg->scan_request = NULL;
+
+- if (timer_pending(&cfg->escan_timeout))
+- del_timer_sync(&cfg->escan_timeout);
++ timer_delete_sync(&cfg->escan_timeout);
+
+ if (fw_abort) {
+ /* Do a scan abort to stop the driver's scan engine */
+@@ -1687,52 +1687,39 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
+ return reason;
+ }
+
+-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
++int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags)
+ {
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_pmk_le pmk;
+ int err;
+
++ if (key_len > sizeof(pmk.key)) {
++ bphy_err(drvr, "key must be less than %zu bytes\n",
++ sizeof(pmk.key));
++ return -EINVAL;
++ }
++
+ memset(&pmk, 0, sizeof(pmk));
+
+- /* pass pmk directly */
+- pmk.key_len = cpu_to_le16(pmk_len);
+- pmk.flags = cpu_to_le16(0);
+- memcpy(pmk.key, pmk_data, pmk_len);
++ /* pass key material directly */
++ pmk.key_len = cpu_to_le16(key_len);
++ pmk.flags = cpu_to_le16(flags);
++ memcpy(pmk.key, key, key_len);
+
+- /* store psk in firmware */
++ /* store key material in firmware */
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
+ &pmk, sizeof(pmk));
+ if (err < 0)
+ bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n",
+- pmk_len);
++ key_len);
+
+ return err;
+ }
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_set_wsec);
+
+-static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
+- u16 pwd_len)
++static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+ {
+- struct brcmf_pub *drvr = ifp->drvr;
+- struct brcmf_wsec_sae_pwd_le sae_pwd;
+- int err;
+-
+- if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+- bphy_err(drvr, "sae_password must be less than %d\n",
+- BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+- return -EINVAL;
+- }
+-
+- sae_pwd.key_len = cpu_to_le16(pwd_len);
+- memcpy(sae_pwd.key, pwd_data, pwd_len);
+-
+- err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+- sizeof(sae_pwd));
+- if (err < 0)
+- bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+- pwd_len);
+-
+- return err;
++ return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0);
+ }
+
+ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
+@@ -2114,7 +2101,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
+ if (!sme->crypto.n_akm_suites)
+ return 0;
+
+- err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val);
++ err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
++ "wpa_auth", &val);
+ if (err) {
+ bphy_err(drvr, "could not get wpa_auth (%d)\n", err);
+ return err;
+@@ -2503,8 +2491,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
+ bphy_err(drvr, "failed to clean up user-space RSNE\n");
+ goto done;
+ }
+- err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
+- sme->crypto.sae_pwd_len);
++ err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto);
+ if (!err && sme->crypto.psk)
+ err = brcmf_set_pmk(ifp, sme->crypto.psk,
+ BRCMF_WSEC_MAX_PSK_LEN);
+@@ -2694,7 +2681,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
+ struct brcmf_pub *drvr = cfg->pub;
+- s32 qdbm = 0;
++ s32 qdbm;
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+@@ -3780,8 +3767,10 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
+ if (req->channels[i] == chan)
+ break;
+ }
+- if (i == req->n_channels)
+- req->channels[req->n_channels++] = chan;
++ if (i == req->n_channels) {
++ req->n_channels++;
++ req->channels[i] = chan;
++ }
+
+ for (i = 0; i < req->n_ssids; i++) {
+ if (req->ssids[i].ssid_len == ssid_len &&
+@@ -4321,6 +4310,9 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
+ int ret;
+
+ pmk_op = kzalloc(sizeof(*pmk_op), GFP_KERNEL);
++ if (!pmk_op)
++ return -ENOMEM;
++
+ pmk_op->version = cpu_to_le16(BRCMF_PMKSA_VER_3);
+
+ if (!pmksa) {
+@@ -4330,9 +4322,16 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
+ /* Single PMK operation */
+ pmk_op->count = cpu_to_le16(1);
+ length += sizeof(struct brcmf_pmksa_v3);
+- memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
+- memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+- pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
++ if (pmksa->bssid)
++ memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
++ if (pmksa->pmkid) {
++ memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
++ pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
++ }
++ if (pmksa->ssid && pmksa->ssid_len) {
++ memcpy(pmk_op->pmk[0].ssid.SSID, pmksa->ssid, pmksa->ssid_len);
++ pmk_op->pmk[0].ssid.SSID_len = pmksa->ssid_len;
++ }
+ pmk_op->pmk[0].time_left = cpu_to_le32(alive ? BRCMF_PMKSA_NO_EXPIRY : 0);
+ }
+
+@@ -5253,8 +5252,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+ if (crypto->sae_pwd) {
+ brcmf_dbg(INFO, "using SAE offload\n");
+ profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
+- err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
+- crypto->sae_pwd_len);
++ err = brcmf_fwvid_set_sae_password(ifp, crypto);
+ if (err < 0)
+ goto exit;
+ }
+@@ -5361,10 +5359,12 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
+ msleep(400);
+
+ if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
++ struct cfg80211_crypto_settings crypto = {};
++
+ if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
+ brcmf_set_pmk(ifp, NULL, 0);
+ if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
+- brcmf_set_sae_password(ifp, NULL, 0);
++ brcmf_fwvid_set_sae_password(ifp, &crypto);
+ profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
+ }
+
+@@ -7047,8 +7047,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
+ ch.bw = BRCMU_CHAN_BW_20;
+ cfg->d11inf.encchspec(&ch);
+ chaninfo = ch.chspec;
+- err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info",
+- &chaninfo);
++ err = brcmf_fil_bsscfg_int_query(ifp, "per_chan_info",
++ &chaninfo);
+ if (!err) {
+ if (chaninfo & WL_CHAN_RADAR)
+ channel->flags |=
+@@ -7082,7 +7082,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
+
+ /* verify support for bw_cap command */
+ val = WLC_BAND_5G;
+- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
++ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &val);
+
+ if (!err) {
+ /* only set 2G bandwidth using bw_cap command */
+@@ -7158,11 +7158,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
+ int err;
+
+ band = WLC_BAND_2G;
+- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
++ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
+ if (!err) {
+ bw_cap[NL80211_BAND_2GHZ] = band;
+ band = WLC_BAND_5G;
+- err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
++ err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
+ if (!err) {
+ bw_cap[NL80211_BAND_5GHZ] = band;
+ return;
+@@ -7171,7 +7171,6 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
+ return;
+ }
+ brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n");
+- mimo_bwcap = 0;
+ err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap);
+ if (err)
+ /* assume 20MHz if firmware does not give a clue */
+@@ -7267,7 +7266,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
+ struct brcmf_pub *drvr = cfg->pub;
+ struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg);
+- u32 nmode = 0;
++ u32 nmode;
+ u32 vhtmode = 0;
+ u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
+ u32 rxchain;
+@@ -8436,6 +8435,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
+ brcmf_btcoex_detach(cfg);
+ wiphy_unregister(cfg->wiphy);
+ wl_deinit_priv(cfg);
++ cancel_work_sync(&cfg->escan_timeout_work);
+ brcmf_free_wiphy(cfg->wiphy);
+ kfree(cfg);
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+index 0e1fa3f0dea2ca..dc3a6a537507d1 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+@@ -468,4 +468,6 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
+ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+ void brcmf_cfg80211_free_netdev(struct net_device *ndev);
+
++int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
++
+ #endif /* BRCMFMAC_CFG80211_H */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+index b75652ba9359f2..bec5748310b9cd 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+@@ -7,21 +7,36 @@
+ #include <core.h>
+ #include <bus.h>
+ #include <fwvid.h>
++#include <fwil.h>
+
+ #include "vops.h"
+
+-static int brcmf_cyw_attach(struct brcmf_pub *drvr)
++static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
++ struct cfg80211_crypto_settings *crypto)
+ {
+- pr_err("%s: executing\n", __func__);
+- return 0;
+-}
++ struct brcmf_pub *drvr = ifp->drvr;
++ struct brcmf_wsec_sae_pwd_le sae_pwd;
++ u16 pwd_len = crypto->sae_pwd_len;
++ int err;
+
+-static void brcmf_cyw_detach(struct brcmf_pub *drvr)
+-{
+- pr_err("%s: executing\n", __func__);
++ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
++ bphy_err(drvr, "sae_password must be less than %d\n",
++ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
++ return -EINVAL;
++ }
++
++ sae_pwd.key_len = cpu_to_le16(pwd_len);
++ memcpy(sae_pwd.key, crypto->sae_pwd, pwd_len);
++
++ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
++ sizeof(sae_pwd));
++ if (err < 0)
++ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
++ pwd_len);
++
++ return err;
+ }
+
+ const struct brcmf_fwvid_ops brcmf_cyw_ops = {
+- .attach = brcmf_cyw_attach,
+- .detach = brcmf_cyw_detach,
++ .set_sae_password = brcmf_cyw_set_sae_pwd,
+ };
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+index 86ff174936a9a0..c3a602197662b7 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+@@ -82,6 +82,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
++ {
++ /* ACEPC W5 Pro Cherry Trail Z8350 HDMI stick, same wifi as the T8 */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
++ },
++ .driver_data = (void *)&acepc_t8_data,
++ },
+ {
+ /* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
+ .matches = {
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+index 6d10c9efbe93d8..909a34a1ab5035 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+@@ -13,6 +13,7 @@
+ #include "debug.h"
+ #include "fwil.h"
+ #include "fwil_types.h"
++#include "fwvid.h"
+ #include "feature.h"
+ #include "common.h"
+
+@@ -339,6 +340,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver");
+
++ brcmf_fwvid_feat_attach(ifp);
++
+ if (drvr->settings->feature_disable) {
+ brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
+ ifp->drvr->feat_flags,
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+index 72fe8bce6eaf55..6385a7db7f7d77 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+@@ -142,6 +142,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+
+ return err;
+ }
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_set);
+
+ s32
+ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+@@ -160,36 +161,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+
+ return err;
+ }
+-
+-
+-s32
+-brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+-{
+- s32 err;
+- __le32 data_le = cpu_to_le32(data);
+-
+- mutex_lock(&ifp->drvr->proto_block);
+- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
+- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
+- mutex_unlock(&ifp->drvr->proto_block);
+-
+- return err;
+-}
+-
+-s32
+-brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+-{
+- s32 err;
+- __le32 data_le = cpu_to_le32(*data);
+-
+- mutex_lock(&ifp->drvr->proto_block);
+- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
+- mutex_unlock(&ifp->drvr->proto_block);
+- *data = le32_to_cpu(data_le);
+- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
+-
+- return err;
+-}
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_get);
+
+ static u32
+ brcmf_create_iovar(const char *name, const char *data, u32 datalen,
+@@ -239,6 +211,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *dat
+ mutex_unlock(&drvr->proto_block);
+ return err;
+ }
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_set);
+
+ s32
+ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
+@@ -270,26 +243,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
+ mutex_unlock(&drvr->proto_block);
+ return err;
+ }
+-
+-s32
+-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+-{
+- __le32 data_le = cpu_to_le32(data);
+-
+- return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+-}
+-
+-s32
+-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+-{
+- __le32 data_le = cpu_to_le32(*data);
+- s32 err;
+-
+- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+- if (err == 0)
+- *data = le32_to_cpu(data_le);
+- return err;
+-}
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_get);
+
+ static u32
+ brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
+@@ -364,6 +318,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
+ mutex_unlock(&drvr->proto_block);
+ return err;
+ }
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_set);
+
+ s32
+ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
+@@ -394,28 +349,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
+ mutex_unlock(&drvr->proto_block);
+ return err;
+ }
+-
+-s32
+-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+-{
+- __le32 data_le = cpu_to_le32(data);
+-
+- return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+- sizeof(data_le));
+-}
+-
+-s32
+-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+-{
+- __le32 data_le = cpu_to_le32(*data);
+- s32 err;
+-
+- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+- sizeof(data_le));
+- if (err == 0)
+- *data = le32_to_cpu(data_le);
+- return err;
+-}
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_get);
+
+ static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
+ char *buf, u32 buflen)
+@@ -465,6 +399,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
+ mutex_unlock(&drvr->proto_block);
+ return err;
+ }
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_set);
+
+ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
+ void *data, u32 len)
+@@ -494,39 +429,4 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
+ mutex_unlock(&drvr->proto_block);
+ return err;
+ }
+-
+-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
+-{
+- __le32 data_le = cpu_to_le32(data);
+-
+- return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
+- sizeof(data_le));
+-}
+-
+-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
+-{
+- __le32 data_le = cpu_to_le32(*data);
+- s32 err;
+-
+- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+- if (err == 0)
+- *data = le32_to_cpu(data_le);
+- return err;
+-}
+-
+-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
+-{
+- return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
+-}
+-
+-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
+-{
+- __le16 data_le = cpu_to_le16(*data);
+- s32 err;
+-
+- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+- if (err == 0)
+- *data = le16_to_cpu(data_le);
+- return err;
+-}
+-
++BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_get);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+index bc693157c4b1c8..31e080e4da6697 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+@@ -81,29 +81,142 @@
+
+ s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+-s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
+-s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
++static inline
++s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
++{
++ s32 err;
++ __le32 data_le = cpu_to_le32(data);
+
+-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
+- u32 len);
++ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
++ err = brcmf_fil_cmd_data_set(ifp, cmd, &data_le, sizeof(data_le));
++
++ return err;
++}
++static inline
++s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
++{
++ s32 err;
++
++ err = brcmf_fil_cmd_data_get(ifp, cmd, data, sizeof(*data));
++ if (err == 0)
++ *data = le32_to_cpu(*(__le32 *)data);
++ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
++
++ return err;
++}
++static inline
++s32 brcmf_fil_cmd_int_query(struct brcmf_if *ifp, u32 cmd, u32 *data)
++{
++ __le32 *data_le = (__le32 *)data;
++
++ *data_le = cpu_to_le32(*data);
++ return brcmf_fil_cmd_int_get(ifp, cmd, data);
++}
++
++s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name,
++ const void *data, u32 len);
+ s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
+ u32 len);
+-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+-
+-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
+- u32 len);
+-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
+- u32 len);
+-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
++static inline
++s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
++{
++ __le32 data_le = cpu_to_le32(data);
++
++ return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
++}
++static inline
++s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
++{
++ s32 err;
++
++ err = brcmf_fil_iovar_data_get(ifp, name, data, sizeof(*data));
++ if (err == 0)
++ *data = le32_to_cpu(*(__le32 *)data);
++ return err;
++}
++static inline
++s32 brcmf_fil_iovar_int_query(struct brcmf_if *ifp, const char *name, u32 *data)
++{
++ __le32 *data_le = (__le32 *)data;
++
++ *data_le = cpu_to_le32(*data);
++ return brcmf_fil_iovar_int_get(ifp, name, data);
++}
++
++
++s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
++ void *data, u32 len);
++s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
++ void *data, u32 len);
++static inline
++s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
++{
++ __le32 data_le = cpu_to_le32(data);
++
++ return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
++ sizeof(data_le));
++}
++static inline
++s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
++{
++ s32 err;
++
++ err = brcmf_fil_bsscfg_data_get(ifp, name, data, sizeof(*data));
++ if (err == 0)
++ *data = le32_to_cpu(*(__le32 *)data);
++ return err;
++}
++static inline
++s32 brcmf_fil_bsscfg_int_query(struct brcmf_if *ifp, const char *name, u32 *data)
++{
++ __le32 *data_le = (__le32 *)data;
++
++ *data_le = cpu_to_le32(*data);
++ return brcmf_fil_bsscfg_int_get(ifp, name, data);
++}
++
+ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
+ void *data, u32 len);
+ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
+ void *data, u32 len);
+-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
+-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
+-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
+-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
++static inline
++s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id,
++ u32 data)
++{
++ __le32 data_le = cpu_to_le32(data);
++
++ return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
++ sizeof(data_le));
++}
++static inline
++s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id,
++ u32 *data)
++{
++ __le32 data_le = cpu_to_le32(*data);
++ s32 err;
++
++ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
++ if (err == 0)
++ *data = le32_to_cpu(data_le);
++ return err;
++}
++static inline
++s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id,
++ u8 *data)
++{
++ return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
++}
++static inline
++s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id,
++ u16 *data)
++{
++ __le16 data_le = cpu_to_le16(*data);
++ s32 err;
++
++ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
++ if (err == 0)
++ *data = le16_to_cpu(data_le);
++ return err;
++}
+
+ #endif /* _fwil_h_ */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+index 611d1a6aabb9e5..b68c46caabe862 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+@@ -584,7 +584,7 @@ struct brcmf_wsec_key_le {
+ struct brcmf_wsec_pmk_le {
+ __le16 key_len;
+ __le16 flags;
+- u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
++ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
+ };
+
+ /**
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
+index 86eafdb4054198..b427782554b590 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
+@@ -89,8 +89,7 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *vmod,
+ if (fwvid >= BRCMF_FWVENDOR_NUM)
+ return -ERANGE;
+
+- if (WARN_ON(!vmod) || WARN_ON(!vops) ||
+- WARN_ON(!vops->attach) || WARN_ON(!vops->detach))
++ if (WARN_ON(!vmod) || WARN_ON(!vops))
+ return -EINVAL;
+
+ if (WARN_ON(fwvid_list[fwvid].vmod))
+@@ -150,7 +149,7 @@ static inline int brcmf_fwvid_request_module(enum brcmf_fwvendor fwvid)
+ }
+ #endif
+
+-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
++int brcmf_fwvid_attach(struct brcmf_pub *drvr)
+ {
+ enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
+ int ret;
+@@ -175,7 +174,7 @@ int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
+ return ret;
+ }
+
+-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
++void brcmf_fwvid_detach(struct brcmf_pub *drvr)
+ {
+ enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
+
+@@ -187,9 +186,10 @@ void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
+
+ mutex_lock(&fwvid_list_lock);
+
+- drvr->vops = NULL;
+- list_del(&drvr->bus_if->list);
+-
++ if (drvr->vops) {
++ drvr->vops = NULL;
++ list_del(&drvr->bus_if->list);
++ }
+ mutex_unlock(&fwvid_list_lock);
+ }
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+index 43df58bb70ad33..dac22534d0334e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+@@ -6,12 +6,14 @@
+ #define FWVID_H_
+
+ #include "firmware.h"
++#include "cfg80211.h"
+
+ struct brcmf_pub;
++struct brcmf_if;
+
+ struct brcmf_fwvid_ops {
+- int (*attach)(struct brcmf_pub *drvr);
+- void (*detach)(struct brcmf_pub *drvr);
++ void (*feat_attach)(struct brcmf_if *ifp);
++ int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto);
+ };
+
+ /* exported functions */
+@@ -20,28 +22,29 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *mod,
+ int brcmf_fwvid_unregister_vendor(enum brcmf_fwvendor fwvid, struct module *mod);
+
+ /* core driver functions */
+-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr);
+-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr);
++int brcmf_fwvid_attach(struct brcmf_pub *drvr);
++void brcmf_fwvid_detach(struct brcmf_pub *drvr);
+ const char *brcmf_fwvid_vendor_name(struct brcmf_pub *drvr);
+
+-static inline int brcmf_fwvid_attach(struct brcmf_pub *drvr)
++static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp)
+ {
+- int ret;
++ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
+
+- ret = brcmf_fwvid_attach_ops(drvr);
+- if (ret)
+- return ret;
++ if (!vops->feat_attach)
++ return;
+
+- return drvr->vops->attach(drvr);
++ vops->feat_attach(ifp);
+ }
+
+-static inline void brcmf_fwvid_detach(struct brcmf_pub *drvr)
++static inline int brcmf_fwvid_set_sae_password(struct brcmf_if *ifp,
++ struct cfg80211_crypto_settings *crypto)
+ {
+- if (!drvr->vops)
+- return;
++ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
++
++ if (!vops || !vops->set_sae_password)
++ return -EOPNOTSUPP;
+
+- drvr->vops->detach(drvr);
+- brcmf_fwvid_detach_ops(drvr);
++ return vops->set_sae_password(ifp, crypto);
+ }
+
+ #endif /* FWVID_H_ */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index 80220685f5e451..a43af82691401e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -1675,6 +1675,15 @@ struct brcmf_random_seed_footer {
+ #define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de
+ #define BRCMF_RANDOM_SEED_LENGTH 0x100
+
++static noinline_for_stack void
++brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address)
++{
++ u8 randbuf[BRCMF_RANDOM_SEED_LENGTH];
++
++ get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH);
++ memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH);
++}
++
+ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ const struct firmware *fw, void *nvram,
+ u32 nvram_len)
+@@ -1717,7 +1726,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ .length = cpu_to_le32(rand_len),
+ .magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
+ };
+- void *randbuf;
+
+ /* Some Apple chips/firmwares expect a buffer of random
+ * data to be present before NVRAM
+@@ -1729,10 +1737,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ sizeof(footer));
+
+ address -= rand_len;
+- randbuf = kzalloc(rand_len, GFP_KERNEL);
+- get_random_bytes(randbuf, rand_len);
+- memcpy_toio(devinfo->tcm + address, randbuf, rand_len);
+- kfree(randbuf);
++ brcmf_pcie_provide_random_bytes(devinfo, address);
+ }
+ } else {
+ brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
+index 5573a47766ad5f..fd593b93ad404c 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
+@@ -7,21 +7,17 @@
+ #include <core.h>
+ #include <bus.h>
+ #include <fwvid.h>
++#include <cfg80211.h>
+
+ #include "vops.h"
+
+-static int brcmf_wcc_attach(struct brcmf_pub *drvr)
++static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp,
++ struct cfg80211_crypto_settings *crypto)
+ {
+- pr_debug("%s: executing\n", __func__);
+- return 0;
+-}
+-
+-static void brcmf_wcc_detach(struct brcmf_pub *drvr)
+-{
+- pr_debug("%s: executing\n", __func__);
++ return brcmf_set_wsec(ifp, crypto->sae_pwd, crypto->sae_pwd_len,
++ BRCMF_WSEC_PASSPHRASE);
+ }
+
+ const struct brcmf_fwvid_ops brcmf_wcc_ops = {
+- .attach = brcmf_wcc_attach,
+- .detach = brcmf_wcc_detach,
++ .set_sae_password = brcmf_wcc_set_sae_pwd,
+ };
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+index 543e93ec49d228..9ab669487de4de 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+@@ -1086,6 +1086,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
++ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ hw->extra_tx_headroom = brcms_c_get_header_len();
+ hw->queues = N_TX_QUEUES;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+index ccc621b8ed9f2b..4a1fe982a948e4 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
+ return sh;
+ }
+
+-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
++static void wlc_phy_timercb_phycal(void *ptr)
+ {
++ struct brcms_phy *pi = ptr;
+ uint delay = 5;
+
+ if (PHY_PERICAL_MPHASE_PENDING(pi)) {
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+index 7717eb85a1db68..47c0e8e429e544 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+@@ -2567,7 +2567,6 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
+
+ struct lcnphy_txgains cal_gains, temp_gains;
+ u16 hash;
+- u8 band_idx;
+ int j;
+ u16 ncorr_override[5];
+ u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+@@ -2599,6 +2598,9 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
+ u16 *values_to_save;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+
++ if (WARN_ON(CHSPEC_IS5G(pi->radio_chanspec)))
++ return;
++
+ values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC);
+ if (NULL == values_to_save)
+ return;
+@@ -2662,20 +2664,18 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
+ hash = (target_gains->gm_gain << 8) |
+ (target_gains->pga_gain << 4) | (target_gains->pad_gain);
+
+- band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
+-
+ cal_gains = *target_gains;
+ memset(ncorr_override, 0, sizeof(ncorr_override));
+- for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) {
+- if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) {
++ for (j = 0; j < iqcal_gainparams_numgains_lcnphy[0]; j++) {
++ if (hash == tbl_iqcal_gainparams_lcnphy[0][j][0]) {
+ cal_gains.gm_gain =
+- tbl_iqcal_gainparams_lcnphy[band_idx][j][1];
++ tbl_iqcal_gainparams_lcnphy[0][j][1];
+ cal_gains.pga_gain =
+- tbl_iqcal_gainparams_lcnphy[band_idx][j][2];
++ tbl_iqcal_gainparams_lcnphy[0][j][2];
+ cal_gains.pad_gain =
+- tbl_iqcal_gainparams_lcnphy[band_idx][j][3];
++ tbl_iqcal_gainparams_lcnphy[0][j][3];
+ memcpy(ncorr_override,
+- &tbl_iqcal_gainparams_lcnphy[band_idx][j][3],
++ &tbl_iqcal_gainparams_lcnphy[0][j][3],
+ sizeof(ncorr_override));
+ break;
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+index a0de5db0cd6467..b7238179153653 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
+ }
+
+ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+- void (*fn)(struct brcms_phy *pi),
++ void (*fn)(void *pi),
+ void *arg, const char *name)
+ {
+ return (struct wlapi_timer *)
+- brcms_init_timer(physhim->wl, (void (*)(void *))fn,
+- arg, name);
++ brcms_init_timer(physhim->wl, fn, arg, name);
+ }
+
+ void wlapi_free_timer(struct wlapi_timer *t)
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+index dd8774717adee1..27d0934e600ed3 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+
+ /* PHY to WL utility functions */
+ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+- void (*fn)(struct brcms_phy *pi),
++ void (*fn)(void *pi),
+ void *arg, const char *name);
+ void wlapi_free_timer(struct wlapi_timer *t);
+ void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+index 8d5f9dce71d58b..dc9667586de9eb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+@@ -299,3 +299,9 @@ MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
++
++MODULE_FIRMWARE("iwlwifi-so-a0-gf-a0.pnvm");
++MODULE_FIRMWARE("iwlwifi-so-a0-gf4-a0.pnvm");
++MODULE_FIRMWARE("iwlwifi-ty-a0-gf-a0.pnvm");
++MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm");
++MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+index b9893b22e41da6..cebd3c91756fe5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+@@ -134,12 +134,10 @@ static const struct iwl_base_params iwl_bz_base_params = {
+ .ht_params = &iwl_gl_a_ht_params
+
+ /*
+- * If the device doesn't support HE, no need to have that many buffers.
+- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
++ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
+ * A-MPDU, with additional overhead to account for processing time.
+ */
+-#define IWL_NUM_RBDS_NON_HE 512
+-#define IWL_NUM_RBDS_BZ_HE 4096
++#define IWL_NUM_RBDS_BZ_EHT (512 * 16)
+
+ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_BZ,
+@@ -154,22 +152,33 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+ };
+
++const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
++ .device_family = IWL_DEVICE_FAMILY_BZ,
++ .base_params = &iwl_bz_base_params,
++ .mq_rx_supported = true,
++ .rf_id = true,
++ .gen2 = true,
++ .umac_prph_offset = 0x300000,
++ .xtal_latency = 12000,
++ .low_latency_xtal = true,
++};
++
+ const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+
+ const struct iwl_cfg iwl_cfg_bz = {
+ .fw_name_mac = "bz",
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+- .num_rbds = IWL_NUM_RBDS_BZ_HE,
++ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++ .num_rbds = IWL_NUM_RBDS_BZ_EHT,
+ };
+
+ const struct iwl_cfg iwl_cfg_gl = {
+ .fw_name_mac = "gl",
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+- .num_rbds = IWL_NUM_RBDS_BZ_HE,
++ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++ .num_rbds = IWL_NUM_RBDS_BZ_EHT,
+ };
+
+
+@@ -181,3 +190,5 @@ MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
++
++MODULE_FIRMWARE("iwlwifi-gl-c0-fm-c0.pnvm");
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+index ad283fd22e2a26..604e9cef6baacd 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+@@ -127,12 +127,10 @@ static const struct iwl_base_params iwl_sc_base_params = {
+ .ht_params = &iwl_22000_ht_params
+
+ /*
+- * If the device doesn't support HE, no need to have that many buffers.
+- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
++ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
+ * A-MPDU, with additional overhead to account for processing time.
+ */
+-#define IWL_NUM_RBDS_NON_HE 512
+-#define IWL_NUM_RBDS_SC_HE 4096
++#define IWL_NUM_RBDS_SC_EHT (512 * 16)
+
+ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_SC,
+@@ -153,8 +151,8 @@ const struct iwl_cfg iwl_cfg_sc = {
+ .fw_name_mac = "sc",
+ .uhb_supported = true,
+ IWL_DEVICE_SC,
+- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
+- .num_rbds = IWL_NUM_RBDS_SC_HE,
++ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
++ .num_rbds = IWL_NUM_RBDS_SC_EHT,
+ };
+
+ MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+index 60a7b61d59aa33..ca1daec641c4fc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -3,6 +3,7 @@
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2019 Intel Corporation
++ * Copyright (C) 2023 Intel Corporation
+ *****************************************************************************/
+
+ #include <linux/kernel.h>
+@@ -1169,7 +1170,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
+ }
+
+- iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
++ iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false);
+
+ freed = 0;
+
+@@ -1315,7 +1316,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
+- &reclaimed_skbs);
++ &reclaimed_skbs, false);
+
+ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ "sta_id = %d\n",
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index b26f90e522564f..9943e2d21a8f53 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -618,7 +618,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 2) {
+- ret = PTR_ERR(wifi_pkg);
++ ret = -EINVAL;
+ goto out_free;
+ }
+
+@@ -634,7 +634,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 1) {
+- ret = PTR_ERR(wifi_pkg);
++ ret = -EINVAL;
+ goto out_free;
+ }
+
+@@ -650,7 +650,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+- ret = PTR_ERR(wifi_pkg);
++ ret = -EINVAL;
+ goto out_free;
+ }
+
+@@ -707,7 +707,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 2) {
+- ret = PTR_ERR(wifi_pkg);
++ ret = -EINVAL;
+ goto out_free;
+ }
+
+@@ -723,7 +723,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 1) {
+- ret = PTR_ERR(wifi_pkg);
++ ret = -EINVAL;
+ goto out_free;
+ }
+
+@@ -739,7 +739,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+- ret = PTR_ERR(wifi_pkg);
++ ret = -EINVAL;
+ goto out_free;
+ }
+
+@@ -767,7 +767,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+ * from index 1, so the maximum value allowed here is
+ * ACPI_SAR_PROFILES_NUM - 1.
+ */
+- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
++ if (n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+@@ -867,22 +867,25 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+ entry = &wifi_pkg->package.elements[entry_idx];
+ entry_idx++;
+ if (entry->type != ACPI_TYPE_INTEGER ||
+- entry->integer.value > num_profiles) {
++ entry->integer.value > num_profiles ||
++ entry->integer.value <
++ rev_data[idx].min_profiles) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+- num_profiles = entry->integer.value;
+
+ /*
+- * this also validates >= min_profiles since we
+- * otherwise wouldn't have gotten the data when
+- * looking up in ACPI
++ * Check to see if we received package count
++ * same as max # of profiles
+ */
+ if (wifi_pkg->package.count !=
+ hdr_size + profile_size * num_profiles) {
+ ret = -EINVAL;
+ goto out_free;
+ }
++
++ /* Number of valid profiles */
++ num_profiles = entry->integer.value;
+ }
+ goto read_table;
+ }
+@@ -1088,6 +1091,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+ goto read_table;
+ }
+
++ ret = PTR_ERR(wifi_pkg);
++ goto out_free;
++
+ read_table:
+ fwrt->ppag_ver = tbl_rev;
+ flags = &wifi_pkg->package.elements[1];
+@@ -1265,7 +1271,6 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
+ if (IS_ERR(data))
+ return;
+
+- /* try to read wtas table revision 1 or revision 0*/
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WPFC_WIFI_DATA_SIZE,
+ &tbl_rev);
+@@ -1275,13 +1280,14 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
+ if (tbl_rev != 0)
+ goto out_free;
+
+- BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE);
++ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
++ ACPI_WPFC_WIFI_DATA_SIZE - 1);
+
+ for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
+- if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER)
+- return;
++ if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
++ goto out_free;
+ tmp.filter_cfg_chains[i] =
+- cpu_to_le32(wifi_pkg->package.elements[i].integer.value);
++ cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+index c36c62d6414de8..8784e50407be8a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+@@ -56,7 +56,7 @@
+ #define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
+ ACPI_SAR_NUM_CHAINS_REV2 * \
+ ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
+-#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */
++#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */
+
+ /* revision 0 and 1 are identical, except for the semantics in the FW */
+ #define ACPI_GEO_NUM_BANDS_REV0 2
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+index ba538d70985f45..39bee9c00e071a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+@@ -13,6 +13,7 @@
+ #define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
+ #define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
+ #define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
++#define IWL_FW_INI_PRESET_DISABLE 0xff
+
+ /**
+ * struct iwl_fw_ini_hcmd
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+index 93078f8cc08c03..af487a2738f826 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+@@ -1123,6 +1123,19 @@ struct iwl_umac_scan_abort {
+ __le32 flags;
+ } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
++/**
++ * enum iwl_umac_scan_abort_status
++ *
++ * @IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS: scan was successfully aborted
++ * @IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS: scan abort is in progress
++ * @IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND: nothing to abort
++ */
++enum iwl_umac_scan_abort_status {
++ IWL_UMAC_SCAN_ABORT_STATUS_SUCCESS = 0,
++ IWL_UMAC_SCAN_ABORT_STATUS_IN_PROGRESS,
++ IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND,
++};
++
+ /**
+ * struct iwl_umac_scan_complete
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index 3ab6a68f1e9f94..2a408e1ce06ec1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -2933,8 +2933,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
+ struct iwl_fw_dbg_params params = {0};
+ struct iwl_fwrt_dump_data *dump_data =
+ &fwrt->dump.wks[wk_idx].dump_data;
+- u32 policy;
+- u32 time_point;
+ if (!test_bit(wk_idx, &fwrt->dump.active_wks))
+ return;
+
+@@ -2965,13 +2963,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
+
+ iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+
+- policy = le32_to_cpu(dump_data->trig->apply_policy);
+- time_point = le32_to_cpu(dump_data->trig->time_point);
++ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
++ u32 policy = le32_to_cpu(dump_data->trig->apply_policy);
++ u32 time_point = le32_to_cpu(dump_data->trig->time_point);
+
+- if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+- IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+- iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
++ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
++ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
++ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
++ }
+ }
++
+ if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
+ iwl_force_nmi(fwrt->trans);
+
+@@ -3205,7 +3206,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
+ {
+ int ret __maybe_unused = 0;
+
+- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
++ if (!iwl_trans_fw_running(fwrt->trans))
+ return;
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+index 3cdbc6ac7ae5d6..0b71a71ca240ba 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+@@ -141,7 +141,11 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
+
+ event_cfg.enabled_severities = cpu_to_le32(enabled_severities);
+
+- ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
++ if (fwrt->ops && fwrt->ops->send_hcmd)
++ ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
++ else
++ ret = -EPERM;
++
+ IWL_INFO(fwrt,
+ "sent host event cfg with enabled_severities: %u, ret: %d\n",
+ enabled_severities, ret);
+@@ -226,8 +230,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
+ .data = { NULL, },
+ };
+
+- if (fwrt->ops && fwrt->ops->fw_running &&
+- !fwrt->ops->fw_running(fwrt->ops_ctx))
++ if (!iwl_trans_fw_running(fwrt->trans))
+ return -EIO;
+
+ if (count < header_size + 1 || count > 1024 * 4)
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+index 650e4bde9c17b9..56ee0ceed78ab7 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+@@ -255,21 +255,27 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
+ struct pnvm_sku_package *package;
+ u8 *image = NULL;
+
+- /* First attempt to get the PNVM from BIOS */
+- package = iwl_uefi_get_pnvm(trans_p, len);
+- if (!IS_ERR_OR_NULL(package)) {
+- if (*len >= sizeof(*package)) {
+- /* we need only the data */
+- *len -= sizeof(*package);
+- image = kmemdup(package->data, *len, GFP_KERNEL);
++ /* Get PNVM from BIOS for non-Intel SKU */
++ if (trans_p->sku_id[2]) {
++ package = iwl_uefi_get_pnvm(trans_p, len);
++ if (!IS_ERR_OR_NULL(package)) {
++ if (*len >= sizeof(*package)) {
++ /* we need only the data */
++ *len -= sizeof(*package);
++ image = kmemdup(package->data,
++ *len, GFP_KERNEL);
++ }
++ /*
++ * free package regardless of whether kmemdup
++ * succeeded
++ */
++ kfree(package);
++ if (image)
++ return image;
+ }
+- /* free package regardless of whether kmemdup succeeded */
+- kfree(package);
+- if (image)
+- return image;
+ }
+
+- /* If it's not available, try from the filesystem */
++ /* If it's not available, or for Intel SKU, try from the filesystem */
+ if (iwl_pnvm_get_from_fs(trans_p, &image, len))
+ return NULL;
+ return image;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+index 702586945533ee..5812b58c92b06c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+@@ -18,7 +18,6 @@
+ struct iwl_fw_runtime_ops {
+ void (*dump_start)(void *ctx);
+ void (*dump_end)(void *ctx);
+- bool (*fw_running)(void *ctx);
+ int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
+ bool (*d3_debug_enable)(void *ctx);
+ };
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index 241a9e3f2a1a71..dd3913617bb0be 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -86,10 +86,7 @@ enum iwl_nvm_type {
+ #define IWL_DEFAULT_MAX_TX_POWER 22
+ #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+ NETIF_F_TSO | NETIF_F_TSO6)
+-#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6)
+-#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \
+- IWL_TX_CSUM_NETIF_FLAGS_BZ | \
+- NETIF_F_RXCSUM)
++#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM)
+
+ /* Antenna presence definitions */
+ #define ANT_NONE 0x0
+@@ -496,6 +493,7 @@ extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
++extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
+ extern const char iwl9162_name[];
+ extern const char iwl9260_name[];
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index ef5baee6c9c52d..a97ed7cbe4d140 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2018-2023 Intel Corporation
++ * Copyright (C) 2018-2024 Intel Corporation
+ */
+ #include <linux/firmware.h>
+ #include "iwl-drv.h"
+@@ -103,6 +103,12 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
+ if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
+ return -EINVAL;
+
++ /* we use this as a string, ensure input was NUL terminated */
++ if (strnlen(debug_info->debug_cfg_name,
++ sizeof(debug_info->debug_cfg_name)) ==
++ sizeof(debug_info->debug_cfg_name))
++ return -EINVAL;
++
+ IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
+ debug_info->debug_cfg_name);
+
+@@ -1094,7 +1100,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
+ node_trig = (void *)node_tlv->data;
+ }
+
+- memcpy(node_trig->data + offset, trig->data, trig_data_len);
++ memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
+ node_tlv->length = cpu_to_le32(size);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+index 128059ca77e60a..06fb7d6653905a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+ */
+ #ifndef __iwl_dbg_tlv_h__
+ #define __iwl_dbg_tlv_h__
+@@ -10,7 +10,8 @@
+ #include <fw/file.h>
+ #include <fw/api/dbg-tlv.h>
+
+-#define IWL_DBG_TLV_MAX_PRESET 15
++#define IWL_DBG_TLV_MAX_PRESET 15
++#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
+
+ /**
+ * struct iwl_dbg_tlv_node - debug TLV node
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 3d87d26845e74b..47bea1855e8c8d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
+ kfree(drv->fw.ucode_capa.cmd_versions);
+ kfree(drv->fw.phy_integration_ver);
+ kfree(drv->trans->dbg.pc_data);
++ drv->trans->dbg.pc_data = NULL;
+
+ for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
+ iwl_free_fw_img(drv, drv->fw.img + i);
+@@ -1303,10 +1304,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
+ case IWL_UCODE_TLV_CURRENT_PC:
+ if (tlv_len < sizeof(struct iwl_pc_data))
+ goto invalid_tlv_len;
+- drv->trans->dbg.num_pc =
+- tlv_len / sizeof(struct iwl_pc_data);
+ drv->trans->dbg.pc_data =
+ kmemdup(tlv_data, tlv_len, GFP_KERNEL);
++ if (!drv->trans->dbg.pc_data)
++ return -ENOMEM;
++ drv->trans->dbg.num_pc =
++ tlv_len / sizeof(struct iwl_pc_data);
+ break;
+ default:
+ IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
+@@ -1795,6 +1798,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ #endif
+
+ drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
++ if (iwlwifi_mod_params.enable_ini != ENABLE_INI) {
++ /* We have a non-default value in the module parameter,
++ * take its value
++ */
++ drv->trans->dbg.domains_bitmap &= 0xffff;
++ if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) {
++ if (iwlwifi_mod_params.enable_ini > ENABLE_INI) {
++ IWL_ERR(trans,
++ "invalid enable_ini module parameter value: max = %d, using 0 instead\n",
++ ENABLE_INI);
++ iwlwifi_mod_params.enable_ini = 0;
++ }
++ drv->trans->dbg.domains_bitmap =
++ BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini);
++ }
++ }
+
+ ret = iwl_request_firmware(drv, true);
+ if (ret) {
+@@ -1807,8 +1826,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ err_fw:
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ debugfs_remove_recursive(drv->dbgfs_drv);
+- iwl_dbg_tlv_free(drv->trans);
+ #endif
++ iwl_dbg_tlv_free(drv->trans);
+ kfree(drv);
+ err:
+ return ERR_PTR(ret);
+@@ -1843,8 +1862,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
+ kfree(drv);
+ }
+
+-#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
+-
+ /* shared module parameters */
+ struct iwl_mod_params iwlwifi_mod_params = {
+ .fw_restart = true,
+@@ -1964,38 +1981,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
+ MODULE_PARM_DESC(uapsd_disable,
+ "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
+
+-static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+-{
+- int ret = 0;
+- bool res;
+- __u32 new_enable_ini;
+-
+- /* in case the argument type is a number */
+- ret = kstrtou32(arg, 0, &new_enable_ini);
+- if (!ret) {
+- if (new_enable_ini > ENABLE_INI) {
+- pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+- return -EINVAL;
+- }
+- goto out;
+- }
+-
+- /* in case the argument type is boolean */
+- ret = kstrtobool(arg, &res);
+- if (ret)
+- return ret;
+- new_enable_ini = (res ? ENABLE_INI : 0);
+-
+-out:
+- iwlwifi_mod_params.enable_ini = new_enable_ini;
+- return 0;
+-}
+-
+-static const struct kernel_param_ops enable_ini_ops = {
+- .set = enable_ini_set
+-};
+-
+-module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
++module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444);
+ MODULE_PARM_DESC(enable_ini,
+ "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+ "Debug INI TLV FW debug infrastructure (default: 16)");
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+index 31176897b74638..26870fc12df810 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+@@ -668,7 +668,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
+@@ -792,7 +791,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
+@@ -1003,8 +1001,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
+ if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ iftype_data->eht_cap.has_eht) {
+ iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
+- ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+- IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
++ ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
+ ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+@@ -1012,7 +1009,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK);
++ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
++ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK);
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] &=
+ ~(IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP);
+@@ -2040,7 +2038,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
+ nvm->sku_cap_mimo_disabled =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
+- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM)
++ if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
+ nvm->sku_cap_11be_enable = true;
+
+ /* Initialize PHY sku data */
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+index 6dd381ff0f9e70..2a63968b0e55b8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+@@ -348,8 +348,8 @@
+ #define RFIC_REG_RD 0xAD0470
+ #define WFPM_CTRL_REG 0xA03030
+ #define WFPM_OTP_CFG1_ADDR 0x00a03098
+-#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
+-#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
++#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(5)
++#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(4)
+ #define WFPM_OTP_BZ_BNJ_JACKET_BIT 5
+ #define WFPM_OTP_BZ_BNJ_CDB_BIT 4
+ #define WFPM_OTP_CFG1_IS_JACKET(_val) (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+index 3b6b0e03037f15..e8f48cb8d2da19 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+@@ -56,6 +56,10 @@
+ * 6) Eventually, the free function will be called.
+ */
+
++/* default preset 0 (start from bit 16)*/
++#define IWL_FW_DBG_DOMAIN_POS 16
++#define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS)
++
+ #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
+
+ #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
+@@ -274,7 +278,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
+ #define IWL_MGMT_TID 15
+ #define IWL_FRAME_LIMIT 64
+ #define IWL_MAX_RX_HW_QUEUES 16
+-#define IWL_9000_MAX_RX_HW_QUEUES 6
++#define IWL_9000_MAX_RX_HW_QUEUES 1
+
+ /**
+ * enum iwl_wowlan_status - WoWLAN image/device status
+@@ -584,7 +588,7 @@ struct iwl_trans_ops {
+ int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
+ void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
+- struct sk_buff_head *skbs);
++ struct sk_buff_head *skbs, bool is_flush);
+
+ void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
+
+@@ -1269,14 +1273,15 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ }
+
+ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
+- int ssn, struct sk_buff_head *skbs)
++ int ssn, struct sk_buff_head *skbs,
++ bool is_flush)
+ {
+ if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ return;
+ }
+
+- trans->ops->reclaim(trans, queue, ssn, skbs);
++ trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
+ }
+
+ static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
+@@ -1549,8 +1554,8 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
+
+ /* prevent double restarts due to the same erroneous FW */
+ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
+- iwl_op_mode_nic_error(trans->op_mode, sync);
+ trans->state = IWL_TRANS_NO_FW;
++ iwl_op_mode_nic_error(trans->op_mode, sync);
+ }
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+index 243eccc68cb050..f7bec6f3d75847 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+@@ -103,7 +103,7 @@
+ #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false
+ #define IWL_MVM_FTM_RESP_NDP_SUPPORT true
+ #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true
+-#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5
++#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7
+ #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
+ #define IWL_MVM_D3_DEBUG false
+ #define IWL_MVM_USE_TWT true
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index f6488b4bbe68bc..08d1fab7f53c3a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -461,12 +461,10 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
+ struct wowlan_key_rsc_v5_data data = {};
+ int i;
+
+- data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
++ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
+ if (!data.rsc)
+ return -ENOMEM;
+
+- memset(data.rsc, 0xff, sizeof(*data.rsc));
+-
+ for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
+ data.rsc->mcast_key_id_map[i] =
+ IWL_MCAST_KEY_MAP_INVALID;
+@@ -597,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
+ void *_data)
+ {
+ struct wowlan_key_gtk_type_iter *data = _data;
++ __le32 *cipher = NULL;
++
++ if (key->keyidx == 4 || key->keyidx == 5)
++ cipher = &data->kek_kck_cmd->igtk_cipher;
++ if (key->keyidx == 6 || key->keyidx == 7)
++ cipher = &data->kek_kck_cmd->bigtk_cipher;
+
+ switch (key->cipher) {
+ default:
+@@ -608,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
+ return;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
++ if (cipher)
++ *cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ return;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
++ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
++ if (cipher)
++ *cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (!sta)
+@@ -1842,9 +1849,12 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
+ memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn));
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
++ case WLAN_CIPHER_SUITE_AES_CMAC:
+ BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn));
+ memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn));
+ break;
++ default:
++ WARN_ON(1);
+ }
+ }
+
+@@ -2012,6 +2022,16 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
+ if (IS_ERR(key_config))
+ return false;
+ ieee80211_set_key_rx_seq(key_config, 0, &seq);
++
++ if (key_config->keyidx == 4 || key_config->keyidx == 5) {
++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
++ struct iwl_mvm_vif_link_info *mvm_link =
++ mvmvif->link[link_id];
++
++ mvm_link->igtk = key_config;
++ }
++
+ return true;
+ }
+
+@@ -2094,7 +2114,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+
+ out:
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+- WOWLAN_GET_STATUSES, 0) < 10) {
++ WOWLAN_GET_STATUSES,
++ IWL_FW_CMD_VER_UNKNOWN) < 10) {
+ mvmvif->seqno_valid = true;
+ /* +0x10 because the set API expects next-to-use, not last-used */
+ mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
+@@ -2157,7 +2178,10 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
+ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_igtk_status *data)
+ {
++ int i;
++
+ BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
++ BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn));
+
+ if (!data->key_len)
+ return;
+@@ -2169,7 +2193,10 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ + WOWLAN_IGTK_MIN_INDEX;
+
+ memcpy(status->igtk.key, data->key, sizeof(data->key));
+- memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn));
++
++ /* mac80211 expects big endian for memcmp() to work, convert */
++ for (i = 0; i < sizeof(data->ipn); i++)
++ status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1];
+ }
+
+ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+index cf27f106d4d56a..7057421e513bab 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+@@ -1673,7 +1673,7 @@ static ssize_t _iwl_dbgfs_link_sta_##name##_write(struct file *file, \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+- if (copy_from_user(buf, user_buf, sizeof(buf))) \
++ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return _iwl_dbgfs_link_sta_wrap_write(iwl_dbgfs_##name##_write, \
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+index 233ae81884a0ea..ae0eb585b61eef 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+@@ -53,6 +53,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ if (!pasn)
+ return -ENOBUFS;
+
++ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
++
+ pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
+
+ switch (pasn->cipher) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+index b49781d1a07a7a..10b9219b3bfd35 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2023 Intel Corporation
+ */
+ #include <net/cfg80211.h>
+ #include <linux/etherdevice.h>
+@@ -302,7 +302,12 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_pasn_sta *sta)
+ {
+ list_del(&sta->list);
+- iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
++
++ if (iwl_mvm_has_mld_api(mvm->fw))
++ iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
++ else
++ iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
++
+ iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta);
+ kfree(sta);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 1d5ee4330f29f3..51f396287dc698 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -92,20 +92,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ {
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+- __le32 *dump_data = mfu_dump_notif->data;
+- int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+- int i;
+
+ if (mfu_dump_notif->index_num == 0)
+ IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+ le32_to_cpu(mfu_dump_notif->assert_id));
+-
+- for (i = 0; i < n_words; i++)
+- IWL_DEBUG_INFO(mvm,
+- "MFUART assert dump, dword %u: 0x%08x\n",
+- le16_to_cpu(mfu_dump_notif->index_num) *
+- n_words + i,
+- le32_to_cpu(dump_data[i]));
+ }
+
+ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+index ace82e2c5bd913..4ab55a1fcbf042 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+@@ -53,7 +53,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ struct iwl_link_config_cmd cmd = {};
+- struct iwl_mvm_phy_ctxt *phyctxt;
+
+ if (WARN_ON_ONCE(!link_info))
+ return -EINVAL;
+@@ -61,7 +60,7 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
+ link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
+ mvmvif);
+- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
++ if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))
+ return -EINVAL;
+
+ rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
+@@ -77,12 +76,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ cmd.link_id = cpu_to_le32(link_info->fw_link_id);
+ cmd.mac_id = cpu_to_le32(mvmvif->id);
+ cmd.spec_link_id = link_conf->link_id;
+- /* P2P-Device already has a valid PHY context during add */
+- phyctxt = link_info->phy_ctxt;
+- if (phyctxt)
+- cmd.phy_id = cpu_to_le32(phyctxt->id);
+- else
+- cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
++ WARN_ON_ONCE(link_info->phy_ctxt);
++ cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
+
+ memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN);
+
+@@ -194,11 +189,14 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ flags_mask |= LINK_FLG_MU_EDCA_CW;
+ }
+
+- if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be)
+- cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing);
+- else
+- /* This flag can be set only if the MAC has eht support */
+- changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
++ if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
++ if (iwlwifi_mod_params.disable_11be ||
++ !link_conf->eht_support)
++ changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
++ else
++ cmd.puncture_mask =
++ cpu_to_le16(link_conf->eht_puncturing);
++ }
+
+ cmd.bss_color = link_conf->he_bss_color.color;
+
+@@ -245,7 +243,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int ret;
+
+ if (WARN_ON(!link_info ||
+- link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
++ link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+index 7369a45f7f2bd7..9c97691e603844 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+@@ -286,6 +286,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+ mvmvif->time_event_data.id = TE_MAX;
+
++ mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
++ mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
++ mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
++
+ /* No need to allocate data queues to P2P Device MAC and NAN.*/
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return 0;
+@@ -300,10 +304,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ }
+
+- mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+- mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+- mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
+-
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
+ mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 5918c1f2b10c38..2d35a8865d00b3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -318,7 +318,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+ if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
+ !iwlwifi_mod_params.disable_11ax &&
+ !iwlwifi_mod_params.disable_11be)
+- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
++ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+
+ /* With MLD FW API, it tracks timing by itself,
+ * no need for any timing from the host
+@@ -352,7 +352,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ }
+
+- if (iwl_mvm_has_new_rx_api(mvm))
++ /* We want to use the mac80211's reorder buffer for 9000 */
++ if (iwl_mvm_has_new_rx_api(mvm) &&
++ mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_9000)
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+@@ -598,7 +600,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+ hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
+- IWL_FW_CMD_VER_UNKNOWN) == 3)
++ IWL_FW_CMD_VER_UNKNOWN) >= 3)
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+@@ -764,20 +766,10 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ sta = NULL;
+
+- /* If there is no sta, and it's not offchannel - send through AP */
++ /* this shouldn't even happen: just drop */
+ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
+- !offchannel) {
+- struct iwl_mvm_vif *mvmvif =
+- iwl_mvm_vif_from_mac80211(info->control.vif);
+- u8 ap_sta_id = READ_ONCE(mvmvif->deflink.ap_sta_id);
+-
+- if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
+- /* mac80211 holds rcu read lock */
+- sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
+- if (IS_ERR_OR_NULL(sta))
+- goto drop;
+- }
+- }
++ !offchannel)
++ goto drop;
+
+ if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED &&
+ !ieee80211_is_probe_resp(hdr->frame_control)) {
+@@ -1033,6 +1025,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
++ mvmvif->ap_sta = NULL;
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA;
+@@ -1049,6 +1042,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
+ }
+
++static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
++{
++ struct iwl_mvm *mvm = data;
++ struct iwl_mvm_sta *mvm_sta;
++ struct ieee80211_vif *vif;
++ int link_id;
++
++ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
++ vif = mvm_sta->vif;
++
++ if (!sta->valid_links)
++ return;
++
++ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
++ struct iwl_mvm_link_sta *mvm_link_sta;
++
++ mvm_link_sta =
++ rcu_dereference_check(mvm_sta->link[link_id],
++ lockdep_is_held(&mvm->mutex));
++ if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
++ /*
++ * We have a link STA but the link is inactive in
++ * mac80211. This will happen if we failed to
++ * deactivate the link but mac80211 roll back the
++ * deactivation of the link.
++ * Delete the stale data to avoid issues later on.
++ */
++ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
++ link_id, false);
++ }
++ }
++}
++
+ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+ {
+ iwl_mvm_stop_device(mvm);
+@@ -1071,6 +1097,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+ */
+ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+
++ /* cleanup stations as links may be gone after restart */
++ ieee80211_iterate_stations_atomic(mvm->hw,
++ iwl_mvm_cleanup_sta_iterator, mvm);
++
+ mvm->p2p_device_vif = NULL;
+
+ iwl_mvm_reset_phy_ctxts(mvm);
+@@ -1511,6 +1541,17 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
+ IWL_STA_MULTICAST);
+ }
+
++void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
++{
++ lockdep_assert_held(&mvm->mutex);
++
++ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
++ return;
++
++ INIT_DELAYED_WORK(&mvmvif->csa_work,
++ iwl_mvm_channel_switch_disconnect_wk);
++}
++
+ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+ {
+@@ -1520,6 +1561,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+
+ mutex_lock(&mvm->mutex);
+
++ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
++
+ mvmvif->mvm = mvm;
+
+ /* the first link always points to the default one */
+@@ -1589,36 +1632,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+- /*
+- * P2P_DEVICE interface does not have a channel context assigned to it,
+- * so a dedicated PHY context is allocated to it and the corresponding
+- * MAC context is bound to it at this stage.
+- */
+- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+-
+- mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+- if (!mvmvif->deflink.phy_ctxt) {
+- ret = -ENOSPC;
+- goto out_free_bf;
+- }
+-
+- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+- ret = iwl_mvm_binding_add_vif(mvm, vif);
+- if (ret)
+- goto out_unref_phy;
+-
+- ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+- if (ret)
+- goto out_unbind;
+-
+- /* Save a pointer to p2p device vif, so it can later be used to
+- * update the p2p device MAC when a GO is started/stopped */
++ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_device_vif = vif;
+- }
+
+ iwl_mvm_tcm_add_vif(mvm, vif);
+- INIT_DELAYED_WORK(&mvmvif->csa_work,
+- iwl_mvm_channel_switch_disconnect_wk);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mvm->monitor_on = true;
+@@ -1643,11 +1660,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+
+ goto out_unlock;
+
+- out_unbind:
+- iwl_mvm_binding_remove_vif(mvm, vif);
+- out_unref_phy:
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+- out_free_bf:
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+@@ -1665,6 +1677,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+ {
++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ /*
+ * Flush the ROC worker which will flush the OFFCHANNEL queue.
+@@ -1673,6 +1687,8 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+ */
+ flush_work(&mvm->roc_done_wk);
+ }
++
++ cancel_delayed_work_sync(&mvmvif->csa_work);
+ }
+
+ /* This function is doing the common part of removing the interface for
+@@ -1744,12 +1760,17 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ if (iwl_mvm_mac_remove_interface_common(hw, vif))
+ goto out;
+
++ /* Before the interface removal, mac80211 would cancel the ROC, and the
++ * ROC worker would be scheduled if needed. The worker would be flushed
++ * in iwl_mvm_prepare_mac_removal() and thus at this point there is no
++ * binding etc. so nothing needs to be done here.
++ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++ if (mvmvif->deflink.phy_ctxt) {
++ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
++ mvmvif->deflink.phy_ctxt = NULL;
++ }
+ mvm->p2p_device_vif = NULL;
+- iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
+- iwl_mvm_binding_remove_vif(mvm, vif);
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+- mvmvif->deflink.phy_ctxt = NULL;
+ }
+
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+@@ -3690,6 +3711,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
+ NL80211_TDLS_SETUP);
+ }
+
++ if (ret)
++ return ret;
++
+ for_each_sta_active_link(vif, sta, link_sta, i)
+ link_sta->agg.max_rc_amsdu_len = 1;
+
+@@ -3698,6 +3722,19 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mvmvif->ap_sta = sta;
+
++ /*
++ * Initialize the rates here already - this really tells
++ * the firmware only what the supported legacy rates are
++ * (may be) since it's initialized already from what the
++ * AP advertised in the beacon/probe response. This will
++ * allow the firmware to send auth/assoc frames with one
++ * of the supported rates already, rather than having to
++ * use a mandatory rate.
++ * If we're the AP, we'll just assume mandatory rates at
++ * this point, but we know nothing about the STA anyway.
++ */
++ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
++
+ return 0;
+ }
+
+@@ -3789,6 +3826,16 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
+
+ mvm_sta->authorized = true;
+
++ /* MFP is set by default before the station is authorized.
++ * Clear it here in case it's not used.
++ */
++ if (!sta->mfp) {
++ int ret = callbacks->update_sta(mvm, vif, sta);
++
++ if (ret)
++ return ret;
++ }
++
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
+ return 0;
+@@ -3878,7 +3925,11 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
+
+ mutex_lock(&mvm->mutex);
+
+- /* this would be a mac80211 bug ... but don't crash */
++ /* this would be a mac80211 bug ... but don't crash, unless we had a
++ * firmware crash while we were activating a link, in which case it is
++ * legit to have phy_ctxt = NULL. Don't bother not to WARN if we are in
++ * recovery flow since we spit tons of error messages anyway.
++ */
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ if (WARN_ON_ONCE(!mvmvif->link[link_id]->phy_ctxt)) {
+ mutex_unlock(&mvm->mutex);
+@@ -4531,30 +4582,20 @@ static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
+ return ret;
+ }
+
+-static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm,
+- struct ieee80211_vif *vif,
+- struct iwl_mvm_phy_ctxt *new_phy_ctxt)
++static int iwl_mvm_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ {
+- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+- int ret = 0;
++ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+- /* Unbind the P2P_DEVICE from the current PHY context,
+- * and if the PHY context is not used remove it.
+- */
+- ret = iwl_mvm_binding_remove_vif(mvm, vif);
+- if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
++ ret = iwl_mvm_binding_add_vif(mvm, vif);
++ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+ return ret;
+
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+-
+- /* Bind the P2P_DEVICE to the current PHY Context */
+- mvmvif->deflink.phy_ctxt = new_phy_ctxt;
+-
+- ret = iwl_mvm_binding_add_vif(mvm, vif);
+- WARN(ret, "Failed binding P2P_DEVICE\n");
+- return ret;
++ /* The station and queue allocation must be done only after the binding
++ * is done, as otherwise the FW might incorrectly configure its state.
++ */
++ return iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+ }
+
+ static int iwl_mvm_roc(struct ieee80211_hw *hw,
+@@ -4565,7 +4606,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ {
+ static const struct iwl_mvm_roc_ops ops = {
+ .add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20,
+- .switch_phy_ctxt = iwl_mvm_roc_switch_binding,
++ .link = iwl_mvm_roc_link,
+ };
+
+ return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
+@@ -4581,7 +4622,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct cfg80211_chan_def chandef;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+- bool band_change_removal;
+ int ret, i;
+ u32 lmac_id;
+
+@@ -4610,82 +4650,61 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ /* handle below */
+ break;
+ default:
+- IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
++ IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
++ /* Try using a PHY context that is already in use */
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ phy_ctxt = &mvm->phy_ctxts[i];
+- if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt)
++ if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt)
+ continue;
+
+- if (phy_ctxt->ref && channel == phy_ctxt->channel) {
+- ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
+- if (ret)
+- goto out_unlock;
++ if (channel == phy_ctxt->channel) {
++ if (mvmvif->deflink.phy_ctxt)
++ iwl_mvm_phy_ctxt_unref(mvm,
++ mvmvif->deflink.phy_ctxt);
+
++ mvmvif->deflink.phy_ctxt = phy_ctxt;
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+- goto schedule_time_event;
++ goto link_and_start_p2p_roc;
+ }
+ }
+
+- /* Need to update the PHY context only if the ROC channel changed */
+- if (channel == mvmvif->deflink.phy_ctxt->channel)
+- goto schedule_time_event;
+-
+- cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+-
+- /*
+- * Check if the remain-on-channel is on a different band and that
+- * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
+- * so, we'll need to release and then re-configure here, since we
+- * must not remove a PHY context that's part of a binding.
++ /* If the currently used PHY context is configured with a matching
++ * channel use it
+ */
+- band_change_removal =
+- fw_has_capa(&mvm->fw->ucode_capa,
+- IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+- mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band;
+-
+- if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) {
+- /*
+- * Change the PHY context configuration as it is currently
+- * referenced only by the P2P Device MAC (and we can modify it)
+- */
+- ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt,
+- &chandef, 1, 1);
+- if (ret)
+- goto out_unlock;
++ if (mvmvif->deflink.phy_ctxt) {
++ if (channel == mvmvif->deflink.phy_ctxt->channel)
++ goto link_and_start_p2p_roc;
+ } else {
+- /*
+- * The PHY context is shared with other MACs (or we're trying to
+- * switch bands), so remove the P2P Device from the binding,
+- * allocate an new PHY context and create a new binding.
+- */
+ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!phy_ctxt) {
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
+- 1, 1);
+- if (ret) {
+- IWL_ERR(mvm, "Failed to change PHY context\n");
+- goto out_unlock;
+- }
++ mvmvif->deflink.phy_ctxt = phy_ctxt;
++ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
++ }
+
+- ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
+- if (ret)
+- goto out_unlock;
++ /* Configure the PHY context */
++ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+
+- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
++ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
++ 1, 1);
++ if (ret) {
++ IWL_ERR(mvm, "Failed to change PHY context\n");
++ goto out_unlock;
+ }
+
+-schedule_time_event:
+- /* Schedule the time events */
+- ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
++link_and_start_p2p_roc:
++ ret = ops->link(mvm, vif);
++ if (ret)
++ goto out_unlock;
+
++ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+@@ -5560,6 +5579,10 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
+ int i;
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
++ /* we can't ask the firmware anything if it is dead */
++ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
++ &mvm->status))
++ return;
+ if (drop) {
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_flush_tx_path(mvm,
+@@ -5629,7 +5652,8 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ }
+
+ if (drop) {
+- if (iwl_mvm_flush_sta(mvm, mvmsta, false))
++ if (iwl_mvm_flush_sta(mvm, mvmsta->deflink.sta_id,
++ mvmsta->tfd_queue_msk))
+ IWL_ERR(mvm, "flush request fail\n");
+ } else {
+ if (iwl_mvm_has_new_tx_api(mvm))
+@@ -5643,30 +5667,32 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
++ * If the firmware is dead, this can't work...
+ */
+- if (!drop && !iwl_mvm_has_new_tx_api(mvm))
++ if (!drop && !iwl_mvm_has_new_tx_api(mvm) &&
++ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
++ &mvm->status))
+ iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
+ }
+
+ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+ {
++ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+- int i;
++ struct iwl_mvm_link_sta *mvm_link_sta;
++ struct ieee80211_link_sta *link_sta;
++ int link_id;
+
+ mutex_lock(&mvm->mutex);
+- for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+- struct iwl_mvm_sta *mvmsta;
+- struct ieee80211_sta *tmp;
+-
+- tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+- lockdep_is_held(&mvm->mutex));
+- if (tmp != sta)
++ for_each_sta_active_link(vif, sta, link_sta, link_id) {
++ mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id],
++ lockdep_is_held(&mvm->mutex));
++ if (!mvm_link_sta)
+ continue;
+
+- mvmsta = iwl_mvm_sta_from_mac80211(sta);
+-
+- if (iwl_mvm_flush_sta(mvm, mvmsta, false))
++ if (iwl_mvm_flush_sta(mvm, mvm_link_sta->sta_id,
++ mvmsta->tfd_queue_msk))
+ IWL_ERR(mvm, "flush request fail\n");
+ }
+ mutex_unlock(&mvm->mutex);
+@@ -6045,7 +6071,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ .len[0] = sizeof(cmd),
+ .data[1] = data,
+ .len[1] = size,
+- .flags = sync ? 0 : CMD_ASYNC,
++ .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
+ };
+ int ret;
+
+@@ -6070,11 +6096,9 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ if (sync) {
+ lockdep_assert_held(&mvm->mutex);
+ ret = wait_event_timeout(mvm->rx_sync_waitq,
+- READ_ONCE(mvm->queue_sync_state) == 0 ||
+- iwl_mvm_is_radio_killed(mvm),
++ READ_ONCE(mvm->queue_sync_state) == 0,
+ HZ);
+- WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm),
+- "queue sync: failed to sync, state is 0x%lx\n",
++ WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx\n",
+ mvm->queue_sync_state);
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+index 2c9f2f71b083a5..7c9234929b4f1b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2022 - 2023 Intel Corporation
++ * Copyright (C) 2022 - 2024 Intel Corporation
+ */
+ #include <linux/kernel.h>
+ #include <net/mac80211.h>
+@@ -24,10 +24,15 @@ static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm,
+ return 0;
+ }
+
+- /* AP group keys are per link and should be on the mcast STA */
++ /* AP group keys are per link and should be on the mcast/bcast STA */
+ if (vif->type == NL80211_IFTYPE_AP &&
+- !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
++ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
++ /* IGTK/BIGTK to bcast STA */
++ if (keyconf->keyidx >= 4)
++ return BIT(link_info->bcast_sta.sta_id);
++ /* GTK for data to mcast STA */
+ return BIT(link_info->mcast_sta.sta_id);
++ }
+
+ /* for client mode use the AP STA also for group keys */
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+@@ -57,11 +62,13 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
+ struct ieee80211_key_conf *keyconf)
+ {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++ bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
++ bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5;
+ u32 flags = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
++ if (!pairwise)
+ flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
+
+ switch (keyconf->cipher) {
+@@ -91,7 +98,14 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ sta = mvmvif->ap_sta;
+
+- if (!IS_ERR_OR_NULL(sta) && sta->mfp)
++ /*
++ * If we are installing an iGTK (in AP or STA mode), we need to tell
++ * the firmware this key will en/decrypt MGMT frames.
++ * Same goes if we are installing a pairwise key for an MFP station.
++ * In case we're installing a groupwise key (which is not an iGTK),
++ * then, we will not use this key for MGMT frames.
++ */
++ if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk)
+ flags |= IWL_SEC_KEY_FLAG_MFP;
+
+ return flags;
+@@ -127,7 +141,7 @@ static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw,
+ if (sta != data->sta || key->link_id >= 0)
+ return;
+
+- err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cmd), &cmd);
++ err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+
+ if (err)
+ data->err = err;
+@@ -145,8 +159,8 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
+ .new_sta_mask = new_sta_mask,
+ };
+
+- ieee80211_iter_keys_rcu(mvm->hw, vif, iwl_mvm_mld_update_sta_key,
+- &data);
++ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mld_update_sta_key,
++ &data);
+ return data.err;
+ }
+
+@@ -370,7 +384,7 @@ void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
+ if (!sec_key_ver)
+ return;
+
+- ieee80211_iter_keys_rcu(mvm->hw, vif,
+- iwl_mvm_sec_key_remove_ap_iter,
+- (void *)(uintptr_t)link_id);
++ ieee80211_iter_keys(mvm->hw, vif,
++ iwl_mvm_sec_key_remove_ap_iter,
++ (void *)(uintptr_t)link_id);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+index f313a8d771e42e..ad78c69cc6cb7a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+@@ -167,7 +167,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
+ iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC |
+- MAC_FILTER_IN_CONTROL_AND_MGMT |
++ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_BEACON |
+ MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+ MAC_CFG_FILTER_ACCEPT_GRP);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+index b719843e94576e..4d9a872818a527 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+@@ -13,6 +13,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+
+ mutex_lock(&mvm->mutex);
+
++ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
++
+ mvmvif->mvm = mvm;
+
+ /* Not much to do here. The stack will not allow interface
+@@ -56,51 +58,21 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+- /*
+- * P2P_DEVICE interface does not have a channel context assigned to it,
+- * so a dedicated PHY context is allocated to it and the corresponding
+- * MAC context is bound to it at this stage.
+- */
+- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+- mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+- if (!mvmvif->deflink.phy_ctxt) {
+- ret = -ENOSPC;
+- goto out_free_bf;
+- }
+-
+- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+- if (ret)
+- goto out_unref_phy;
+-
+- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+- LINK_CONTEXT_MODIFY_ACTIVE |
+- LINK_CONTEXT_MODIFY_RATES_INFO,
+- true);
+- if (ret)
+- goto out_remove_link;
+-
+- ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
+- if (ret)
+- goto out_remove_link;
++ ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
++ if (ret)
++ goto out_free_bf;
+
+- /* Save a pointer to p2p device vif, so it can later be used to
+- * update the p2p device MAC when a GO is started/stopped
+- */
++ /* Save a pointer to p2p device vif, so it can later be used to
++ * update the p2p device MAC when a GO is started/stopped
++ */
++ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_device_vif = vif;
+- } else {
+- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+- if (ret)
+- goto out_free_bf;
+- }
+
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ goto out_free_bf;
+
+ iwl_mvm_tcm_add_vif(mvm, vif);
+- INIT_DELAYED_WORK(&mvmvif->csa_work,
+- iwl_mvm_channel_switch_disconnect_wk);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mvm->monitor_on = true;
+@@ -119,10 +91,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+
+ goto out_unlock;
+
+- out_remove_link:
+- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+- out_unref_phy:
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+ out_free_bf:
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+@@ -130,7 +98,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
+ out_remove_mac:
+- mvmvif->deflink.phy_ctxt = NULL;
+ mvmvif->link[0] = NULL;
+ iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
+ out_unlock:
+@@ -185,14 +152,18 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
+
+ iwl_mvm_power_update_mac(mvm);
+
++ /* Before the interface removal, mac80211 would cancel the ROC, and the
++ * ROC worker would be scheduled if needed. The worker would be flushed
++ * in iwl_mvm_prepare_mac_removal() and thus at this point the link is
++ * not active. So need only to remove the link.
++ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++ if (mvmvif->deflink.phy_ctxt) {
++ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
++ mvmvif->deflink.phy_ctxt = NULL;
++ }
+ mvm->p2p_device_vif = NULL;
+-
+- /* P2P device uses only one link */
+- iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf);
+- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+- mvmvif->deflink.phy_ctxt = NULL;
++ iwl_mvm_remove_link(mvm, vif, &vif->bss_conf);
+ } else {
+ iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+ }
+@@ -298,17 +269,17 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
+ }
+ }
+
++ mvmvif->link[link_id]->phy_ctxt = phy_ctxt;
++
+ if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
+ mvmvif->link[link_id]->listen_lmac = true;
+ ret = iwl_mvm_esr_mode_active(mvm, vif);
+ if (ret) {
+ IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
+- return ret;
++ goto out;
+ }
+ }
+
+- mvmvif->link[link_id]->phy_ctxt = phy_ctxt;
+-
+ if (switching_chanctx) {
+ /* reactivate if we turned this off during channel switch */
+ if (vif->type == NL80211_IFTYPE_AP)
+@@ -325,13 +296,8 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
+ * this needs the phy context assigned (and in FW?), and we cannot
+ * do it later because it needs to be initialized as soon as we're
+ * able to TX on the link, i.e. when active.
+- *
+- * Firmware restart isn't quite correct yet for MLO, but we don't
+- * need to do it in that case anyway since it will happen from the
+- * normal station state callback.
+ */
+- if (mvmvif->ap_sta &&
+- !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
++ if (mvmvif->ap_sta) {
+ struct ieee80211_link_sta *link_sta;
+
+ rcu_read_lock();
+@@ -464,6 +430,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
+ mvmvif->ap_ibss_active = false;
+ }
+
++ iwl_mvm_link_changed(mvm, vif, link_conf,
++ LINK_CONTEXT_MODIFY_ACTIVE, false);
++
+ if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
+ int ret = iwl_mvm_esr_mode_inactive(mvm, vif);
+
+@@ -475,9 +444,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ iwl_mvm_mld_rm_snif_sta(mvm, vif);
+
+- iwl_mvm_link_changed(mvm, vif, link_conf,
+- LINK_CONTEXT_MODIFY_ACTIVE, false);
+-
+ if (switching_chanctx)
+ return;
+ mvmvif->link[link_id]->phy_ctxt = NULL;
+@@ -653,7 +619,7 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
+ }
+
+ /* Update EHT Puncturing info */
+- if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht)
++ if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc)
+ link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
+
+ if (link_changes) {
+@@ -968,36 +934,29 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw,
+ return 0;
+ }
+
+-static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm,
+- struct ieee80211_vif *vif,
+- struct iwl_mvm_phy_ctxt *new_phy_ctxt)
++static int iwl_mvm_mld_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ {
+- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+- int ret = 0;
++ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+- /* Inorder to change the phy_ctx of a link, the link needs to be
+- * inactive. Therefore, first deactivate the link, then change its
+- * phy_ctx, and then activate it again.
+- */
+- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+- LINK_CONTEXT_MODIFY_ACTIVE, false);
+- if (WARN(ret, "Failed to deactivate link\n"))
++ /* The PHY context ID might have changed so need to set it */
++ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
++ if (WARN(ret, "Failed to set PHY context ID\n"))
+ return ret;
+
+- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+-
+- mvmvif->deflink.phy_ctxt = new_phy_ctxt;
++ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
++ LINK_CONTEXT_MODIFY_ACTIVE |
++ LINK_CONTEXT_MODIFY_RATES_INFO,
++ true);
+
+- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
+- if (WARN(ret, "Failed to deactivate link\n"))
++ if (WARN(ret, "Failed linking P2P_DEVICE\n"))
+ return ret;
+
+- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+- LINK_CONTEXT_MODIFY_ACTIVE, true);
+- WARN(ret, "Failed binding P2P_DEVICE\n");
+- return ret;
++ /* The station and queue allocation must be done only after the linking
++ * is done, as otherwise the FW might incorrectly configure its state.
++ */
++ return iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
+ }
+
+ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -1006,7 +965,7 @@ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ {
+ static const struct iwl_mvm_roc_ops ops = {
+ .add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta,
+- .switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx,
++ .link = iwl_mvm_mld_roc_link,
+ };
+
+ return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
+@@ -1089,9 +1048,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
+ }
+ }
+
+- if (err)
+- goto out_err;
+-
+ err = 0;
+ if (new_links == 0) {
+ mvmvif->link[0] = &mvmvif->deflink;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+index 524852cf5cd2db..e87cc1ddb9c2fa 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+@@ -9,7 +9,9 @@
+ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int filter_link_id)
+ {
++ struct ieee80211_link_sta *link_sta;
+ struct iwl_mvm_sta *mvmsta;
++ struct ieee80211_vif *vif;
+ unsigned int link_id;
+ u32 result = 0;
+
+@@ -17,26 +19,27 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ return 0;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
++ vif = mvmsta->vif;
+
+ /* it's easy when the STA is not an MLD */
+ if (!sta->valid_links)
+ return BIT(mvmsta->deflink.sta_id);
+
+ /* but if it is an MLD, get the mask of all the FW STAs it has ... */
+- for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) {
+- struct iwl_mvm_link_sta *link_sta;
++ for_each_sta_active_link(vif, sta, link_sta, link_id) {
++ struct iwl_mvm_link_sta *mvm_link_sta;
+
+ /* unless we have a specific link in mind */
+ if (filter_link_id >= 0 && link_id != filter_link_id)
+ continue;
+
+- link_sta =
++ mvm_link_sta =
+ rcu_dereference_check(mvmsta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+- if (!link_sta)
++ if (!mvm_link_sta)
+ continue;
+
+- result |= BIT(link_sta->sta_id);
++ result |= BIT(mvm_link_sta->sta_id);
+ }
+
+ return result;
+@@ -347,7 +350,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm,
+ return -EINVAL;
+
+ if (flush)
+- iwl_mvm_flush_sta(mvm, int_sta, true);
++ iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk);
+
+ iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid);
+
+@@ -512,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
+ }
+
+-static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+- struct iwl_mvm_sta *mvm_sta,
+- struct iwl_mvm_link_sta *mvm_sta_link,
+- unsigned int link_id,
+- bool is_in_fw)
++void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
++ struct iwl_mvm_sta *mvm_sta,
++ struct iwl_mvm_link_sta *mvm_sta_link,
++ unsigned int link_id,
++ bool is_in_fw)
+ {
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
+ is_in_fw ? ERR_PTR(-EINVAL) : NULL);
+@@ -582,14 +585,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+ {
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
++ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+- for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
+- if (!rcu_access_pointer(sta->link[link_id]) ||
+- mvm_sta->link[link_id])
++ for_each_sta_active_link(vif, sta, link_sta, link_id) {
++ if (WARN_ON(mvm_sta->link[link_id]))
+ continue;
+
+ ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
+@@ -705,8 +708,10 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+- if (WARN_ON(!link_conf || !mvm_link_sta))
++ if (WARN_ON(!link_conf || !mvm_link_sta)) {
++ ret = -EINVAL;
+ goto err;
++ }
+
+ ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
+ mvm_link_sta);
+@@ -851,10 +856,15 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+
+ int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)
+ {
+- int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
++ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
++ if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
++ return 0;
++
++ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
++
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
+ return ret;
+@@ -997,7 +1007,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
+
+ cmd.modify.tid = cpu_to_le32(data->tid);
+
+- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
++ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
++ sizeof(cmd), &cmd);
+ data->sta_mask = new_sta_mask;
+ if (ret)
+ return ret;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index b18c91c5dd5d1c..bace9d01fd5832 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -1318,7 +1318,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
+ static inline struct ieee80211_bss_conf *
+ iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu)
+ {
+- if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
++ if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf),
++ "erroneous FW link ID: %d\n", link_id))
+ return NULL;
+
+ if (rcu)
+@@ -1658,7 +1659,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
+ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+ #endif
+ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
+-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
++int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask);
+ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
+
+ /* Utils to extract sta related data */
+@@ -1737,6 +1738,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
+
+ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+
++void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif);
++
+ /*
+ * FW notifications / CMD responses handlers
+ * Convention: iwl_mvm_rx_<NAME OF THE CMD>
+@@ -1942,13 +1945,12 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
+ *
+ * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta
+ * for Hot Spot 2.0
+- * @switch_phy_ctxt: pointer to the function that switches a vif from one
+- * phy_ctx to another
++ * @link: For a P2P Device interface, pointer to a function that links the
++ * MAC/Link to the PHY context
+ */
+ struct iwl_mvm_roc_ops {
+ int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id);
+- int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+- struct iwl_mvm_phy_ctxt *new_phy_ctxt);
++ int (*link)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+ };
+
+ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 5336a4afde4d23..b2cf5aeff7e3cf 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -702,11 +702,6 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
+ mutex_unlock(&mvm->mutex);
+ }
+
+-static bool iwl_mvm_fwrt_fw_running(void *ctx)
+-{
+- return iwl_mvm_firmware_running(ctx);
+-}
+-
+ static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+ {
+ struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
+@@ -727,7 +722,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx)
+ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
+ .dump_start = iwl_mvm_fwrt_dump_start,
+ .dump_end = iwl_mvm_fwrt_dump_end,
+- .fw_running = iwl_mvm_fwrt_fw_running,
+ .send_hcmd = iwl_mvm_fwrt_send_hcmd,
+ .d3_debug_enable = iwl_mvm_d3_debug_enable,
+ };
+@@ -1424,6 +1418,8 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
+
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
++ iwl_mvm_pause_tcm(mvm, false);
++
+ iwl_fw_dbg_stop_sync(&mvm->fwrt);
+ iwl_trans_stop_device(mvm->trans);
+ iwl_free_fw_paging(&mvm->fwrt);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+index a5b432bc9e2f82..9c582e23ebbaf8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+@@ -99,17 +99,6 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+ active_cnt = 2;
+ }
+
+- /*
+- * If the firmware requested it, then we know that it supports
+- * getting zero for the values to indicate "use one, but pick
+- * which one yourself", which means it can dynamically pick one
+- * that e.g. has better RSSI.
+- */
+- if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
+- idle_cnt = 0;
+- active_cnt = 0;
+- }
+-
+ *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ PHY_RX_CHAIN_VALID_POS);
+ *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+index 2ecd32bed752ff..045c862a8fc4fc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+@@ -132,14 +132,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
+ if (ret)
+ return ERR_PTR(ret);
+
+- if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
++ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) !=
++ resp_size)) {
++ iwl_free_resp(&cmd);
+ return ERR_PTR(-EIO);
++ }
+
+ resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
++ iwl_free_resp(&cmd);
++
+ if (!resp)
+ return ERR_PTR(-ENOMEM);
+
+- iwl_free_resp(&cmd);
+ return resp;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+index 1ca375a5cf6b5e..639cecc7a6e608 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+@@ -122,13 +122,8 @@ enum {
+
+ #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
+ #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
+-/*
+- * FIXME - various places in firmware API still use u8,
+- * e.g. LQ command and SCD config command.
+- * This should be 256 instead.
+- */
+-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
+-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
++#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
++#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
+ #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
+
+ #define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 8d1e44fd9de73b..8cff24d5f5f405 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -236,21 +236,13 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
+ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+- struct ieee80211_sta *sta,
+- struct ieee80211_link_sta *link_sta)
++ struct ieee80211_sta *sta)
+ {
+ if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
+ kfree_skb(skb);
+ return;
+ }
+
+- if (sta && sta->valid_links && link_sta) {
+- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+-
+- rx_status->link_valid = 1;
+- rx_status->link_id = link_sta->link_id;
+- }
+-
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+ }
+
+@@ -282,6 +274,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ u32 status,
+ struct ieee80211_rx_status *stats)
+ {
++ struct wireless_dev *wdev;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_vif *mvmvif;
+ u8 keyid;
+@@ -303,9 +296,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return 0;
+
++ if (!sta)
++ return -1;
++
++ mvmsta = iwl_mvm_sta_from_mac80211(sta);
++ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
++
+ /* key mismatch - will also report !MIC_OK but we shouldn't count it */
+ if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
+- return -1;
++ goto report;
+
+ /* good cases */
+ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
+@@ -314,13 +313,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ return 0;
+ }
+
+- if (!sta)
+- return -1;
+-
+- mvmsta = iwl_mvm_sta_from_mac80211(sta);
+-
+- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+-
+ /*
+ * both keys will have the same cipher and MIC length, use
+ * whichever one is available
+@@ -329,11 +321,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ if (!key) {
+ key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
+ if (!key)
+- return -1;
++ goto report;
+ }
+
+ if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
+- return -1;
++ goto report;
+
+ /* get the real key ID */
+ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
+@@ -347,7 +339,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ return -1;
+ key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
+ if (!key)
+- return -1;
++ goto report;
+ }
+
+ /* Report status to mac80211 */
+@@ -355,6 +347,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ ieee80211_key_mic_failure(key);
+ else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
+ ieee80211_key_replay(key);
++report:
++ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
++ if (wdev->netdev)
++ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
+
+ return -1;
+ }
+@@ -503,6 +499,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
+ return false;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
++
++ if (WARN_ON_ONCE(!mvm_sta->dup_data))
++ return false;
++
+ dup_data = &mvm_sta->dup_data[queue];
+
+ /*
+@@ -628,7 +628,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
+ while ((skb = __skb_dequeue(skb_list))) {
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
+ reorder_buf->queue,
+- sta, NULL /* FIXME */);
++ sta);
+ reorder_buf->num_stored--;
+ }
+ }
+@@ -955,6 +955,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+ baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
++ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000)
++ return false;
++
+ /*
+ * This also covers the case of receiving a Block Ack Request
+ * outside a BA session; we'll pass it to mac80211 and that
+@@ -2478,6 +2481,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ if (IS_ERR(sta))
+ sta = NULL;
+ link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]);
++
++ if (sta && sta->valid_links && link_sta) {
++ rx_status->link_valid = 1;
++ rx_status->link_id = link_sta->link_id;
++ }
+ }
+ } else if (!is_multicast_ether_addr(hdr->addr2)) {
+ /*
+@@ -2613,9 +2621,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+
+ if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) &&
+ likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) &&
+- likely(!iwl_mvm_mei_filter_scan(mvm, skb)))
+- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta,
+- link_sta);
++ likely(!iwl_mvm_mei_filter_scan(mvm, skb))) {
++ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
++ (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
++ !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
++ rx_status->flag |= RX_FLAG_AMSDU_MORE;
++
++ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
++ }
+ out:
+ rcu_read_unlock();
+ }
+@@ -2717,8 +2730,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+ *
+ * We mark it as mac header, for upper layers to know where
+ * all radio tap header ends.
++ *
++ * Since data doesn't move data while putting data on skb and that is
++ * the only way we use, data + len is the next place that hdr would be put
+ */
+- skb_reset_mac_header(skb);
++ skb_set_mac_header(skb, skb->len);
+
+ /*
+ * Override the nss from the rx_vec since the rate_n_flags has
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index 3cbe2c0b8d6bcd..ded06602f6ced3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -48,6 +48,8 @@
+ /* Number of iterations on the channel for mei filtered scan */
+ #define IWL_MEI_SCAN_NUM_ITER 5U
+
++#define WFA_TPC_IE_LEN 9
++
+ struct iwl_mvm_scan_timing_params {
+ u32 suspend_time;
+ u32 max_out_time;
+@@ -296,8 +298,8 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
+
+ max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+- /* we create the 802.11 header and SSID element */
+- max_probe_len -= 24 + 2;
++ /* we create the 802.11 header SSID element and WFA TPC element */
++ max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN;
+
+ /* DS parameter set element is added on 2.4GHZ band if required */
+ if (iwl_mvm_rrm_scan_needed(mvm))
+@@ -724,8 +726,6 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
+ return newpos;
+ }
+
+-#define WFA_TPC_IE_LEN 9
+-
+ static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+ {
+ pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+@@ -830,8 +830,8 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+ return ((n_ssids <= PROBE_OPTION_MAX) &&
+ (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
+ (ies->common_ie_len +
+- ies->len[NL80211_BAND_2GHZ] +
+- ies->len[NL80211_BAND_5GHZ] <=
++ ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
++ ies->len[NL80211_BAND_6GHZ] <=
+ iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+ }
+
+@@ -1304,7 +1304,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
+ if (IWL_MVM_ADWELL_MAX_BUDGET)
+ cmd->v7.adwell_max_budget =
+ cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+- else if (params->ssids && params->ssids[0].ssid_len)
++ else if (params->n_ssids && params->ssids[0].ssid_len)
+ cmd->v7.adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+@@ -1406,7 +1406,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
+ if (IWL_MVM_ADWELL_MAX_BUDGET)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+- else if (params->ssids && params->ssids[0].ssid_len)
++ else if (params->n_ssids && params->ssids[0].ssid_len)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+@@ -1718,7 +1718,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
+ break;
+ }
+
+- if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
++ if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
++ !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
++ "scan: invalid BSSID at index %u, index_b=%u\n",
++ j, idex_b)) {
+ memcpy(&pp->bssid_array[idex_b++],
+ scan_6ghz_params[j].bssid, ETH_ALEN);
+ }
+@@ -2819,7 +2822,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
+ if (ver_handler->version != scan_ver)
+ continue;
+
+- return ver_handler->handler(mvm, vif, params, type, uid);
++ err = ver_handler->handler(mvm, vif, params, type, uid);
++ return err ? : uid;
+ }
+
+ err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
+@@ -3114,18 +3118,16 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ params.n_channels = j;
+ }
+
+- if (non_psc_included &&
+- !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
+- kfree(params.channels);
+- return -ENOBUFS;
++ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
++ ret = -ENOBUFS;
++ goto out;
+ }
+
+ uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
+-
+- if (non_psc_included)
+- kfree(params.channels);
+- if (uid < 0)
+- return uid;
++ if (uid < 0) {
++ ret = uid;
++ goto out;
++ }
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+@@ -3142,6 +3144,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ }
+
++out:
++ if (non_psc_included)
++ kfree(params.channels);
+ return ret;
+ }
+
+@@ -3217,13 +3222,23 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ mvm->scan_start);
+ }
+
+-static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
++static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type, bool *wait)
+ {
+- struct iwl_umac_scan_abort cmd = {};
++ struct iwl_umac_scan_abort abort_cmd = {};
++ struct iwl_host_cmd cmd = {
++ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
++ .len = { sizeof(abort_cmd), },
++ .data = { &abort_cmd, },
++ .flags = CMD_SEND_IN_RFKILL,
++ };
++
+ int uid, ret;
++ u32 status = IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND;
+
+ lockdep_assert_held(&mvm->mutex);
+
++ *wait = true;
++
+ /* We should always get a valid index here, because we already
+ * checked that this type of scan was running in the generic
+ * code.
+@@ -3232,16 +3247,28 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
+ if (WARN_ON_ONCE(uid < 0))
+ return uid;
+
+- cmd.uid = cpu_to_le32(uid);
++ abort_cmd.uid = cpu_to_le32(uid);
+
+ IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
+
+- ret = iwl_mvm_send_cmd_pdu(mvm,
+- WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
+- 0, sizeof(cmd), &cmd);
++ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
++
++ IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d, status=%u\n", ret, status);
+ if (!ret)
+ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
++ /* Handle the case that the FW is no longer familiar with the scan that
++ * is to be stopped. In such a case, it is expected that the scan
++ * complete notification was already received but not yet processed.
++ * In such a case, there is no need to wait for a scan complete
++ * notification and the flow should continue similar to the case that
++ * the scan was really aborted.
++ */
++ if (status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND) {
++ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
++ *wait = false;
++ }
++
+ return ret;
+ }
+
+@@ -3251,6 +3278,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
+ static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+ SCAN_OFFLOAD_COMPLETE, };
+ int ret;
++ bool wait = true;
+
+ lockdep_assert_held(&mvm->mutex);
+
+@@ -3262,7 +3290,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
+ IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+- ret = iwl_mvm_umac_scan_abort(mvm, type);
++ ret = iwl_mvm_umac_scan_abort(mvm, type, &wait);
+ else
+ ret = iwl_mvm_lmac_scan_abort(mvm);
+
+@@ -3270,6 +3298,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
+ IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return ret;
++ } else if (!wait) {
++ IWL_DEBUG_SCAN(mvm, "no need to wait for scan type %d\n", type);
++ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
++ return 0;
+ }
+
+ return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
+@@ -3408,7 +3440,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+ if (!(mvm->scan_status & type))
+ return 0;
+
+- if (iwl_mvm_is_radio_killed(mvm)) {
++ if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+ ret = 0;
+ goto out;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 3b9a343d4f672b..84f4a9576cbda1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -2059,7 +2059,8 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ *status = IWL_MVM_QUEUE_FREE;
+ }
+
+- if (vif->type == NL80211_IFTYPE_STATION) {
++ if (vif->type == NL80211_IFTYPE_STATION &&
++ mvm_link->ap_sta_id == sta_id) {
+ /* if associated - we can't remove the AP STA now */
+ if (vif->cfg.assoc)
+ return true;
+@@ -2097,7 +2098,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ return ret;
+
+ /* flush its queues here since we are freeing mvm_sta */
+- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
++ ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
++ mvm_sta->tfd_queue_msk);
+ if (ret)
+ return ret;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+@@ -2408,7 +2410,8 @@ void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+
+ lockdep_assert_held(&mvm->mutex);
+
+- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true);
++ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
++ mvmvif->deflink.bcast_sta.tfd_queue_msk);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+@@ -2664,7 +2667,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+
+ lockdep_assert_held(&mvm->mutex);
+
+- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true);
++ iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
++ mvmvif->deflink.mcast_sta.tfd_queue_msk);
+
+ iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
+ &mvmvif->deflink.cab_queue, 0);
+@@ -2815,7 +2819,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
+ cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+ };
+- u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
++ struct iwl_host_cmd hcmd = {
++ .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
++ .flags = CMD_SEND_IN_RFKILL,
++ .len[0] = sizeof(cmd),
++ .data[0] = &cmd,
++ };
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+@@ -2827,7 +2836,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ cmd.alloc.ssn = cpu_to_le16(ssn);
+ cmd.alloc.win_size = cpu_to_le16(buf_size);
+ baid = -EIO;
+- } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
++ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
+ cmd.remove_v1.baid = cpu_to_le32(baid);
+ BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
+ } else {
+@@ -2836,8 +2845,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ cmd.remove.tid = cpu_to_le32(tid);
+ }
+
+- ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
+- &cmd, &baid);
++ ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+index 7364346a1209fc..95ef60daa62f04 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+@@ -642,6 +642,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
++void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
++ struct iwl_mvm_sta *mvm_sta,
++ struct iwl_mvm_link_sta *mvm_sta_link,
++ unsigned int link_id,
++ bool is_in_fw);
+ int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
+ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 5f0e7144a951ce..158266719ffd75 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -78,9 +78,29 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
+ */
+
+ if (!WARN_ON(!mvm->p2p_device_vif)) {
+- mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta,
+- true);
++ struct ieee80211_vif *vif = mvm->p2p_device_vif;
++
++ mvmvif = iwl_mvm_vif_from_mac80211(vif);
++ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
++ mvmvif->deflink.bcast_sta.tfd_queue_msk);
++
++ if (mvm->mld_api_is_used) {
++ iwl_mvm_mld_rm_bcast_sta(mvm, vif,
++ &vif->bss_conf);
++
++ iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
++ LINK_CONTEXT_MODIFY_ACTIVE,
++ false);
++ } else {
++ iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
++ iwl_mvm_binding_remove_vif(mvm, vif);
++ }
++
++ /* Do not remove the PHY context as removing and adding
++ * a PHY context has timing overheads. Leaving it
++ * configured in FW would be useful in case the next ROC
++ * is with the same channel.
++ */
+ }
+ }
+
+@@ -93,7 +113,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
+ /* do the same in case of hot spot 2.0 */
+- iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
++ iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
++ mvm->aux_sta.tfd_queue_msk);
+
+ if (mvm->mld_api_is_used) {
+ iwl_mvm_mld_rm_aux_sta(mvm);
+@@ -880,8 +901,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
+ /* End TE, notify mac80211 */
+ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
+- ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_p2p_roc_finished(mvm);
++ ieee80211_remain_on_channel_expired(mvm->hw);
+ } else if (le32_to_cpu(notif->start)) {
+ if (WARN_ON(mvmvif->time_event_data.id !=
+ le32_to_cpu(notif->conf_id)))
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 898dca3936435c..ce5f2bdde13888 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
++ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+@@ -500,13 +500,24 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ }
+ }
+
++static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
++ const u8 *addr3_override)
++{
++ struct ieee80211_hdr *out_hdr = cmd;
++
++ memcpy(cmd, hdr, hdrlen);
++ if (addr3_override)
++ memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
++}
++
+ /*
+ * Allocates and sets the Tx cmd the driver data pointers in the skb
+ */
+ static struct iwl_device_tx_cmd *
+ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info, int hdrlen,
+- struct ieee80211_sta *sta, u8 sta_id)
++ struct ieee80211_sta *sta, u8 sta_id,
++ const u8 *addr3_override)
+ {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_device_tx_cmd *dev_cmd;
+@@ -536,16 +547,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+ /*
+- * For data packets rate info comes from the fw. Only
+- * set rate/antenna during connection establishment or in case
+- * no station is given.
++ * For data and mgmt packets rate info comes from the fw. Only
++ * set rate/antenna for injected frames with fixed rate, or
++ * when no sta is given.
+ */
+- if (!sta || !ieee80211_is_data(hdr->frame_control) ||
+- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++ if (unlikely(!sta ||
++ info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags =
+ iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
+ hdr->frame_control);
++ } else if (!ieee80211_is_data(hdr->frame_control) ||
++ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++ /* These are important frames */
++ flags |= IWL_TX_FLAGS_HIGH_PRI;
+ }
+
+ if (mvm->trans->trans_cfg->device_family >=
+@@ -560,7 +575,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+- memcpy(cmd->hdr, hdr, hdrlen);
++ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
+
+ cmd->flags = cpu_to_le16(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+@@ -575,7 +590,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+- memcpy(cmd->hdr, hdr, hdrlen);
++ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
+
+ cmd->flags = cpu_to_le32(flags);
+ cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+@@ -593,7 +608,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+
+ /* Copy MAC header from skb into command buffer */
+- memcpy(tx_cmd->hdr, hdr, hdrlen);
++ iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
+
+ out:
+ return dev_cmd;
+@@ -796,7 +811,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+
+ IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
+
+- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
++ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
++ NULL);
+ if (!dev_cmd)
+ return -1;
+
+@@ -1116,7 +1132,8 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+ */
+ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+- struct ieee80211_sta *sta)
++ struct ieee80211_sta *sta,
++ const u8 *addr3_override)
+ {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_mvm_sta *mvmsta;
+@@ -1128,6 +1145,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ bool is_ampdu = false;
+ int hdrlen;
+
++ if (WARN_ON_ONCE(!sta))
++ return -1;
++
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ fc = hdr->frame_control;
+ hdrlen = ieee80211_hdrlen(fc);
+@@ -1135,9 +1155,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
+ return -1;
+
+- if (WARN_ON_ONCE(!mvmsta))
+- return -1;
+-
+ if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
+ return -1;
+
+@@ -1148,7 +1165,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ iwl_mvm_probe_resp_set_noa(mvm, skb);
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
+- sta, mvmsta->deflink.sta_id);
++ sta, mvmsta->deflink.sta_id,
++ addr3_override);
+ if (!dev_cmd)
+ goto drop;
+
+@@ -1267,42 +1285,79 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+ {
+- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
++ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_tx_info info;
+ struct sk_buff_head mpdus_skbs;
++ struct ieee80211_vif *vif;
+ unsigned int payload_len;
+ int ret;
+ struct sk_buff *orig_skb = skb;
++ const u8 *addr3;
+
+- if (WARN_ON_ONCE(!mvmsta))
++ if (WARN_ON_ONCE(!sta))
+ return -1;
+
++ mvmsta = iwl_mvm_sta_from_mac80211(sta);
++
+ if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
+ return -1;
+
+ memcpy(&info, skb->cb, sizeof(info));
+
+ if (!skb_is_gso(skb))
+- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
++ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
+
+ payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ if (payload_len <= skb_shinfo(skb)->gso_size)
+- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
++ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
+
+ __skb_queue_head_init(&mpdus_skbs);
+
++ vif = info.control.vif;
++ if (!vif)
++ return -1;
++
+ ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
+ if (ret)
+ return ret;
+
+ WARN_ON(skb_queue_empty(&mpdus_skbs));
+
++ /*
++ * As described in IEEE sta 802.11-2020, table 9-30 (Address
++ * field contents), A-MSDU address 3 should contain the BSSID
++ * address.
++ * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
++ * in the command header. We need to preserve the original
++ * address 3 in the skb header to correctly create all the
++ * A-MSDU subframe headers from it.
++ */
++ switch (vif->type) {
++ case NL80211_IFTYPE_STATION:
++ addr3 = vif->cfg.ap_addr;
++ break;
++ case NL80211_IFTYPE_AP:
++ addr3 = vif->addr;
++ break;
++ default:
++ addr3 = NULL;
++ break;
++ }
++
+ while (!skb_queue_empty(&mpdus_skbs)) {
++ struct ieee80211_hdr *hdr;
++ bool amsdu;
++
+ skb = __skb_dequeue(&mpdus_skbs);
++ hdr = (void *)skb->data;
++ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
++ (*ieee80211_get_qos_ctl(hdr) &
++ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+- ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
++ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
++ amsdu ? addr3 : NULL);
+ if (ret) {
+ /* Free skbs created as part of TSO logic that have not yet been dequeued */
+ __skb_queue_purge(&mpdus_skbs);
+@@ -1563,12 +1618,18 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
++ *
++ * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
+ */
+ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
+ struct iwl_mvm_tx_resp *tx_resp)
+ {
+- return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+- tx_resp->frame_count) & 0xfff;
++ u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
++ tx_resp->frame_count);
++
++ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
++ return val & 0xFFFF;
++ return val & 0xFFF;
+ }
+
+ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+@@ -1599,7 +1660,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+- iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
++ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+@@ -1951,7 +2012,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+- iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
++ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+@@ -2232,7 +2293,7 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
+- cmd.flags |= CMD_WANT_SKB;
++ cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
+ sta_id, tids);
+@@ -2293,24 +2354,10 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
+ return ret;
+ }
+
+-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
++int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask)
+ {
+- u32 sta_id, tfd_queue_msk;
+-
+- if (internal) {
+- struct iwl_mvm_int_sta *int_sta = sta;
+-
+- sta_id = int_sta->sta_id;
+- tfd_queue_msk = int_sta->tfd_queue_msk;
+- } else {
+- struct iwl_mvm_sta *mvm_sta = sta;
+-
+- sta_id = mvm_sta->deflink.sta_id;
+- tfd_queue_msk = mvm_sta->tfd_queue_msk;
+- }
+-
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
+
+- return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk);
++ return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index fa4a1454686012..9be41673650eee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -68,7 +68,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
+ }
+ break;
+ default:
+- IWL_ERR(trans, "WRT: Invalid buffer destination\n");
++ IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
++ le32_to_cpu(fw_mon_cfg->buf_location));
+ }
+ out:
+ if (dbg_flags)
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index bc83d2ba55c676..4a2de79f2e864b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -501,9 +501,38 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
+
+ /* Bz devices */
+- {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+- {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
+- {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)},
++ {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
+
+ /* Sc devices */
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index 0f6493dab8cbd2..8408e4ddddedd8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -190,17 +190,17 @@ struct iwl_rb_allocator {
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @rxq - the rxq to get the rb stts from
+ */
+-static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+- struct iwl_rxq *rxq)
++static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
++ struct iwl_rxq *rxq)
+ {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ __le16 *rb_stts = rxq->rb_stts;
+
+- return READ_ONCE(*rb_stts);
++ return le16_to_cpu(READ_ONCE(*rb_stts));
+ } else {
+ struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+- return READ_ONCE(rb_stts->closed_rb_num);
++ return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF;
+ }
+ }
+
+@@ -749,7 +749,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
+ }
+ }
+
+-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
++void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
+
+ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
+ {
+@@ -796,7 +796,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
+ return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
+ }
+
+-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
++void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
+ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
+
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 4614acee9f7bac..be9b5a19e2a7cb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1385,7 +1385,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+ * if it is true then one of the handlers took the page.
+ */
+
+- if (reclaim) {
++ if (reclaim && txq) {
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ int cmd_index = iwl_txq_get_cmd_index(txq, index);
+@@ -1510,7 +1510,7 @@ static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
+ spin_lock(&rxq->lock);
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+- r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
++ r = iwl_get_closed_rb_stts(trans, rxq);
+ i = rxq->read;
+
+ /* W/A 9000 device step A0 wrap-around bug */
+@@ -1785,7 +1785,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+ return inta;
+ }
+
+-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
++void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+@@ -1809,7 +1809,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
+ isr_stats->rfkill++;
+
+ if (prev != report)
+- iwl_trans_pcie_rf_kill(trans, report);
++ iwl_trans_pcie_rf_kill(trans, report, from_irq);
+ mutex_unlock(&trans_pcie->mutex);
+
+ if (hw_rfkill) {
+@@ -1949,7 +1949,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+- iwl_pcie_handle_rfkill_irq(trans);
++ iwl_pcie_handle_rfkill_irq(trans, true);
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+@@ -2366,7 +2366,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
+
+ /* HW RF KILL switch toggled */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
+- iwl_pcie_handle_rfkill_irq(trans);
++ iwl_pcie_handle_rfkill_irq(trans, true);
+
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+ IWL_ERR(trans,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index fa46dad5fd6802..2ecf6db95fb313 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -161,6 +161,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
++ iwl_pcie_synchronize_irqs(trans);
+ iwl_pcie_rx_napi_sync(trans);
+ iwl_txq_gen2_tx_free(trans);
+ iwl_pcie_rx_stop(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 198933f853c557..e9807fcca6ad10 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1082,7 +1082,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
+ report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+
+ if (prev != report)
+- iwl_trans_pcie_rf_kill(trans, report);
++ iwl_trans_pcie_rf_kill(trans, report, false);
+
+ return hw_rfkill;
+ }
+@@ -1236,7 +1236,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
+ trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+ }
+
+-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
++static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+@@ -1263,6 +1263,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
++ if (!from_irq)
++ iwl_pcie_synchronize_irqs(trans);
+ iwl_pcie_rx_napi_sync(trans);
+ iwl_pcie_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
+@@ -1452,7 +1454,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
+ clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+ }
+ if (hw_rfkill != was_in_rfkill)
+- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
++ iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
+ }
+
+ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+@@ -1467,12 +1469,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+ mutex_lock(&trans_pcie->mutex);
+ trans_pcie->opmode_down = true;
+ was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+- _iwl_trans_pcie_stop_device(trans);
++ _iwl_trans_pcie_stop_device(trans, false);
+ iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
+ mutex_unlock(&trans_pcie->mutex);
+ }
+
+-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
++void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
+ {
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+@@ -1485,7 +1487,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+ if (trans->trans_cfg->gen2)
+ _iwl_trans_pcie_gen2_stop_device(trans);
+ else
+- _iwl_trans_pcie_stop_device(trans);
++ _iwl_trans_pcie_stop_device(trans, from_irq);
+ }
+ }
+
+@@ -2712,11 +2714,9 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+- u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
+- rxq));
++ u32 r = iwl_get_closed_rb_stts(trans, rxq);
+ pos += scnprintf(buf + pos, bufsz - pos,
+- "\tclosed_rb_num: %u\n",
+- r & 0x0FFF);
++ "\tclosed_rb_num: %u\n", r);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tclosed_rb_num: Not Allocated\n");
+@@ -2868,7 +2868,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
+ IWL_WARN(trans, "changing debug rfkill %d->%d\n",
+ trans_pcie->debug_rfkill, new_value);
+ trans_pcie->debug_rfkill = new_value;
+- iwl_pcie_handle_rfkill_irq(trans);
++ iwl_pcie_handle_rfkill_irq(trans, false);
+
+ return count;
+ }
+@@ -3087,9 +3087,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+ u32 i, r, j, rb_len = 0;
+
+- spin_lock(&rxq->lock);
++ spin_lock_bh(&rxq->lock);
+
+- r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
++ r = iwl_get_closed_rb_stts(trans, rxq);
+
+ for (i = rxq->read, j = 0;
+ i != r && j < allocated_rb_nums;
+@@ -3111,7 +3111,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+ *data = iwl_fw_error_next_data(*data);
+ }
+
+- spin_unlock(&rxq->lock);
++ spin_unlock_bh(&rxq->lock);
+
+ return rb_len;
+ }
+@@ -3385,9 +3385,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+ struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+ /* RBs */
+- num_rbs =
+- le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
+- & 0x0FFF;
++ num_rbs = iwl_get_closed_rb_stts(trans, rxq);
+ num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
+ len += num_rbs * (sizeof(*data) +
+ sizeof(struct iwl_fw_error_dump_rb) +
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+index 340240b8954f6a..0efa304904bd30 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+@@ -1575,7 +1575,7 @@ void iwl_txq_progress(struct iwl_txq *txq)
+
+ /* Frees buffers until index _not_ inclusive */
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+- struct sk_buff_head *skbs)
++ struct sk_buff_head *skbs, bool is_flush)
+ {
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ int tfd_num, read_ptr, last_to_free;
+@@ -1588,9 +1588,9 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ return;
+
+ tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+
+ spin_lock_bh(&txq->lock);
++ read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+
+ if (!test_bit(txq_id, trans->txqs.queue_used)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+@@ -1650,9 +1650,11 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ test_bit(txq_id, trans->txqs.queue_stopped)) {
+ struct sk_buff_head overflow_skbs;
++ struct sk_buff *skb;
+
+ __skb_queue_head_init(&overflow_skbs);
+- skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
++ skb_queue_splice_init(&txq->overflow_q,
++ is_flush ? skbs : &overflow_skbs);
+
+ /*
+ * We are going to transmit from the overflow queue.
+@@ -1672,8 +1674,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ */
+ spin_unlock_bh(&txq->lock);
+
+- while (!skb_queue_empty(&overflow_skbs)) {
+- struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
++ while ((skb = __skb_dequeue(&overflow_skbs))) {
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+index b7d3808588bfbc..4c09bc1930fa14 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+@@ -179,7 +179,7 @@ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs);
+ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+- struct sk_buff_head *skbs);
++ struct sk_buff_head *skbs, bool is_flush);
+ void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+ void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
+ bool freeze);
+diff --git a/drivers/net/wireless/marvell/libertas/Kconfig b/drivers/net/wireless/marvell/libertas/Kconfig
+index 6d62ab49aa8d41..c7d02adb3eead3 100644
+--- a/drivers/net/wireless/marvell/libertas/Kconfig
++++ b/drivers/net/wireless/marvell/libertas/Kconfig
+@@ -2,8 +2,6 @@
+ config LIBERTAS
+ tristate "Marvell 8xxx Libertas WLAN driver support"
+ depends on CFG80211
+- select WIRELESS_EXT
+- select WEXT_SPY
+ select LIB80211
+ select FW_LOADER
+ help
+diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
+index 104d2b6dc9af6e..5a525da434c281 100644
+--- a/drivers/net/wireless/marvell/libertas/cmd.c
++++ b/drivers/net/wireless/marvell/libertas/cmd.c
+@@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
+ if (!cmdarray[i].cmdbuf) {
+ lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
+ ret = -1;
+- goto done;
++ goto free_cmd_array;
+ }
+ }
+
+@@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
+ init_waitqueue_head(&cmdarray[i].cmdwait_q);
+ lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
+ }
+- ret = 0;
++ return 0;
+
++free_cmd_array:
++ for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
++ if (cmdarray[i].cmdbuf) {
++ kfree(cmdarray[i].cmdbuf);
++ cmdarray[i].cmdbuf = NULL;
++ }
++ }
++ kfree(priv->cmd_array);
++ priv->cmd_array = NULL;
+ done:
+ return ret;
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index ba4e29713a8c94..b7ead0cd004508 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -926,6 +926,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
+ return -EOPNOTSUPP;
+ }
+
++ priv->bss_num = mwifiex_get_unused_bss_num(adapter, priv->bss_type);
++
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
+ adapter->main_locked = false;
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
+@@ -2046,6 +2048,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
+
+ mwifiex_set_sys_config_invalid_data(bss_cfg);
+
++ memcpy(bss_cfg->mac_addr, priv->curr_addr, ETH_ALEN);
++
+ if (params->beacon_interval)
+ bss_cfg->beacon_period = params->beacon_interval;
+ if (params->dtim_period)
+@@ -4358,11 +4362,27 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+ if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
+- wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+- if (adapter->config_bands & BAND_A)
+- wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+- else
++ wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev,
++ &mwifiex_band_2ghz,
++ sizeof(mwifiex_band_2ghz),
++ GFP_KERNEL);
++ if (!wiphy->bands[NL80211_BAND_2GHZ]) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ if (adapter->config_bands & BAND_A) {
++ wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev,
++ &mwifiex_band_5ghz,
++ sizeof(mwifiex_band_5ghz),
++ GFP_KERNEL);
++ if (!wiphy->bands[NL80211_BAND_5GHZ]) {
++ ret = -ENOMEM;
++ goto err;
++ }
++ } else {
+ wiphy->bands[NL80211_BAND_5GHZ] = NULL;
++ }
+
+ if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+@@ -4456,8 +4476,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+ if (ret < 0) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: wiphy_register failed: %d\n", __func__, ret);
+- wiphy_free(wiphy);
+- return ret;
++ goto err;
+ }
+
+ if (!adapter->regd) {
+@@ -4499,4 +4518,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+
+ adapter->wiphy = wiphy;
+ return ret;
++
++err:
++ wiphy_free(wiphy);
++
++ return ret;
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
+index f9c9fec7c792a3..d14a0f4c1b6d79 100644
+--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
++++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
+@@ -970,9 +970,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
+ priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
+ mwifiex_dfs_dir);
+
+- if (!priv->dfs_dev_dir)
+- return;
+-
+ MWIFIEX_DFS_ADD_FILE(info);
+ MWIFIEX_DFS_ADD_FILE(debug);
+ MWIFIEX_DFS_ADD_FILE(getlog);
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
+index 8e6db904e5b2d8..a3be37526697b4 100644
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
+@@ -165,6 +165,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
+ #define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
+ #define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
+ #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
++#define TLV_TYPE_UAP_MAC_ADDRESS (PROPRIETARY_TLV_BASE_ID + 43)
+ #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
+ #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
+ #define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
+@@ -1586,7 +1587,7 @@ struct host_cmd_ds_802_11_scan_rsp {
+
+ struct host_cmd_ds_802_11_scan_ext {
+ u32 reserved;
+- u8 tlv_buffer[1];
++ u8 tlv_buffer[];
+ } __packed;
+
+ struct mwifiex_ie_types_bss_mode {
+diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
+index 091e7ca7937620..e8825f302de8a3 100644
+--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
++++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
+@@ -107,6 +107,7 @@ struct mwifiex_uap_bss_param {
+ u8 qos_info;
+ u8 power_constraint;
+ struct mwifiex_types_wmm_info wmm_info;
++ u8 mac_addr[ETH_ALEN];
+ };
+
+ enum {
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
+index 7bdec6c622481d..dc6b4cf616bea6 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -1290,6 +1290,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i]) {
++ if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
++ continue;
++
+ if ((adapter->priv[i]->bss_num == bss_num) &&
+ (adapter->priv[i]->bss_type == bss_type))
+ break;
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index 72904c275461e6..5be817d9854a68 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -2543,8 +2543,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
+ ext_scan_resp = &resp->params.ext_scan;
+
+ tlv = (void *)ext_scan_resp->tlv_buffer;
+- buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN
+- - 1);
++ buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN);
+
+ while (buf_left >= sizeof(struct mwifiex_ie_types_header)) {
+ type = le16_to_cpu(tlv->type);
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index 774858cfe86f2c..e66ba0d156f85d 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -331,6 +331,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = false,
++ .fw_ready_extra_delay = false,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
+@@ -346,6 +347,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = false,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
+@@ -361,6 +363,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = false,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
+@@ -376,6 +379,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
+ .can_dump_fw = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = false,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
+@@ -392,6 +396,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = false,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
+@@ -408,6 +413,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = true,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+@@ -425,6 +431,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = false,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
+@@ -440,6 +447,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
+ .can_dump_fw = false,
+ .can_auto_tdls = true,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = false,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
+@@ -456,6 +464,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
+ .fw_dump_enh = true,
+ .can_auto_tdls = true,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = false,
+ };
+
+ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
+@@ -471,6 +480,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
++ .fw_ready_extra_delay = false,
+ };
+
+ static struct memory_type_mapping generic_mem_type_map[] = {
+@@ -563,6 +573,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
+ card->fw_dump_enh = data->fw_dump_enh;
+ card->can_auto_tdls = data->can_auto_tdls;
+ card->can_ext_scan = data->can_ext_scan;
++ card->fw_ready_extra_delay = data->fw_ready_extra_delay;
+ INIT_WORK(&card->work, mwifiex_sdio_work);
+ }
+
+@@ -766,8 +777,9 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
+ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
+ u32 poll_num)
+ {
++ struct sdio_mmc_card *card = adapter->card;
+ int ret = 0;
+- u16 firmware_stat;
++ u16 firmware_stat = 0;
+ u32 tries;
+
+ for (tries = 0; tries < poll_num; tries++) {
+@@ -783,6 +795,13 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
+ ret = -1;
+ }
+
++ if (card->fw_ready_extra_delay &&
++ firmware_stat == FIRMWARE_READY_SDIO)
++ /* firmware might pretend to be ready, when it's not.
++ * Wait a little bit more as a workaround.
++ */
++ msleep(100);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
+index ae94c172310ff5..a5112cb35cdcda 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
+@@ -258,6 +258,7 @@ struct sdio_mmc_card {
+ bool fw_dump_enh;
+ bool can_auto_tdls;
+ bool can_ext_scan;
++ bool fw_ready_extra_delay;
+
+ struct mwifiex_sdio_mpa_tx mpa_tx;
+ struct mwifiex_sdio_mpa_rx mpa_rx;
+@@ -281,6 +282,7 @@ struct mwifiex_sdio_device {
+ bool fw_dump_enh;
+ bool can_auto_tdls;
+ bool can_ext_scan;
++ bool fw_ready_extra_delay;
+ };
+
+ /*
+diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+index e78a201cd1507d..491e366119096e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
++++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+@@ -468,6 +468,7 @@ void mwifiex_config_uap_11d(struct mwifiex_private *priv,
+ static int
+ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
+ {
++ struct host_cmd_tlv_mac_addr *mac_tlv;
+ struct host_cmd_tlv_dtim_period *dtim_period;
+ struct host_cmd_tlv_beacon_period *beacon_period;
+ struct host_cmd_tlv_ssid *ssid;
+@@ -487,6 +488,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
+ int i;
+ u16 cmd_size = *param_size;
+
++ mac_tlv = (struct host_cmd_tlv_mac_addr *)tlv;
++ mac_tlv->header.type = cpu_to_le16(TLV_TYPE_UAP_MAC_ADDRESS);
++ mac_tlv->header.len = cpu_to_le16(ETH_ALEN);
++ memcpy(mac_tlv->mac_addr, bss_cfg->mac_addr, ETH_ALEN);
++ cmd_size += sizeof(struct host_cmd_tlv_mac_addr);
++ tlv += sizeof(struct host_cmd_tlv_mac_addr);
++
+ if (bss_cfg->ssid.ssid_len) {
+ ssid = (struct host_cmd_tlv_ssid *)tlv;
+ ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
+diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
+index 13bcb123d12234..c0ecd769ada764 100644
+--- a/drivers/net/wireless/marvell/mwl8k.c
++++ b/drivers/net/wireless/marvell/mwl8k.c
+@@ -2718,7 +2718,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
+ cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
+ cmd->numaddr = cpu_to_le16(mc_count);
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+- memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
++ memcpy(cmd->addr[i++], ha->addr, ETH_ALEN);
+ }
+ }
+
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index dc8f4e157eb291..cd048659706436 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -330,9 +330,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ if (e->txwi == DMA_DUMMY_DATA)
+ e->txwi = NULL;
+
+- if (e->skb == DMA_DUMMY_DATA)
+- e->skb = NULL;
+-
+ *prev_e = *e;
+ memset(e, 0, sizeof(*e));
+ }
+@@ -779,7 +776,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+
+ static void
+ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+- int len, bool more, u32 info)
++ int len, bool more, u32 info, bool allow_direct)
+ {
+ struct sk_buff *skb = q->rx_head;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+@@ -791,7 +788,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+
+ skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
+ } else {
+- mt76_put_page_pool_buf(data, true);
++ mt76_put_page_pool_buf(data, allow_direct);
+ }
+
+ if (more)
+@@ -811,6 +808,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ struct sk_buff *skb;
+ unsigned char *data;
+ bool check_ddone = false;
++ bool allow_direct = !mt76_queue_is_wed_rx(q);
+ bool more;
+
+ if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
+@@ -851,7 +849,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ }
+
+ if (q->rx_head) {
+- mt76_add_fragment(dev, q, data, len, more, info);
++ mt76_add_fragment(dev, q, data, len, more, info,
++ allow_direct);
+ continue;
+ }
+
+@@ -880,7 +879,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+ continue;
+
+ free_frag:
+- mt76_put_page_pool_buf(data, true);
++ mt76_put_page_pool_buf(data, allow_direct);
+ }
+
+ mt76_dma_rx_fill(dev, q, true);
+diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
+index 36564930aef121..1de3c734e136a4 100644
+--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
+@@ -67,7 +67,7 @@ static int mt76_get_of_epprom_from_mtd(struct mt76_dev *dev, void *eep, int offs
+ goto out_put_node;
+ }
+
+- offset = be32_to_cpup(list);
++ offset += be32_to_cpup(list);
+ ret = mtd_read(mtd, offset, len, &retlen, eep);
+ put_mtd_device(mtd);
+ if (mtd_is_bitflip(ret))
+@@ -106,7 +106,7 @@ static int mt76_get_of_epprom_from_mtd(struct mt76_dev *dev, void *eep, int offs
+ #endif
+ }
+
+-static int mt76_get_of_epprom_from_nvmem(struct mt76_dev *dev, void *eep, int len)
++static int mt76_get_of_eeprom_from_nvmem(struct mt76_dev *dev, void *eep, int len)
+ {
+ struct device_node *np = dev->dev->of_node;
+ struct nvmem_cell *cell;
+@@ -153,7 +153,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
+ if (!ret)
+ return 0;
+
+- return mt76_get_of_epprom_from_nvmem(dev, eep, len);
++ return mt76_get_of_eeprom_from_nvmem(dev, eep, len);
+ }
+ EXPORT_SYMBOL_GPL(mt76_get_of_eeprom);
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
+index d158320bc15dbe..bf4541e76ba228 100644
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
+@@ -415,6 +415,9 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
+ struct mt76_dev *dev = phy->dev;
+ struct wiphy *wiphy = hw->wiphy;
+
++ INIT_LIST_HEAD(&phy->tx_list);
++ spin_lock_init(&phy->tx_lock);
++
+ SET_IEEE80211_DEV(hw, dev->dev);
+ SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
+
+@@ -688,6 +691,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
+ int ret;
+
+ dev_set_drvdata(dev->dev, dev);
++ mt76_wcid_init(&dev->global_wcid);
+ ret = mt76_phy_init(phy, hw);
+ if (ret)
+ return ret;
+@@ -743,6 +747,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
+ if (IS_ENABLED(CONFIG_MT76_LEDS))
+ mt76_led_cleanup(&dev->phy);
+ mt76_tx_status_check(dev, true);
++ mt76_wcid_cleanup(dev, &dev->global_wcid);
+ ieee80211_unregister_hw(hw);
+ }
+ EXPORT_SYMBOL_GPL(mt76_unregister_device);
+@@ -1411,7 +1416,7 @@ mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ wcid->phy_idx = phy->band_idx;
+ rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
+
+- mt76_packet_id_init(wcid);
++ mt76_wcid_init(wcid);
+ out:
+ mutex_unlock(&dev->mutex);
+
+@@ -1430,7 +1435,7 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ if (dev->drv->sta_remove)
+ dev->drv->sta_remove(dev, vif, sta);
+
+- mt76_packet_id_flush(dev, wcid);
++ mt76_wcid_cleanup(dev, wcid);
+
+ mt76_wcid_mask_clear(dev->wcid_mask, idx);
+ mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
+@@ -1486,6 +1491,47 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ }
+ EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
+
++void mt76_wcid_init(struct mt76_wcid *wcid)
++{
++ INIT_LIST_HEAD(&wcid->tx_list);
++ skb_queue_head_init(&wcid->tx_pending);
++
++ INIT_LIST_HEAD(&wcid->list);
++ idr_init(&wcid->pktid);
++}
++EXPORT_SYMBOL_GPL(mt76_wcid_init);
++
++void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
++{
++ struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
++ struct ieee80211_hw *hw;
++ struct sk_buff_head list;
++ struct sk_buff *skb;
++
++ mt76_tx_status_lock(dev, &list);
++ mt76_tx_status_skb_get(dev, wcid, -1, &list);
++ mt76_tx_status_unlock(dev, &list);
++
++ idr_destroy(&wcid->pktid);
++
++ spin_lock_bh(&phy->tx_lock);
++
++ if (!list_empty(&wcid->tx_list))
++ list_del_init(&wcid->tx_list);
++
++ spin_lock(&wcid->tx_pending.lock);
++ skb_queue_splice_tail_init(&wcid->tx_pending, &list);
++ spin_unlock(&wcid->tx_pending.lock);
++
++ spin_unlock_bh(&phy->tx_lock);
++
++ while ((skb = __skb_dequeue(&list)) != NULL) {
++ hw = mt76_tx_status_get_hw(dev, skb);
++ ieee80211_free_txskb(hw, skb);
++ }
++}
++EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
++
+ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm)
+ {
+@@ -1697,11 +1743,16 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
+ }
+ EXPORT_SYMBOL_GPL(mt76_init_queue);
+
+-u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
++u16 mt76_calculate_default_rate(struct mt76_phy *phy,
++ struct ieee80211_vif *vif, int rateidx)
+ {
++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct cfg80211_chan_def *chandef = mvif->ctx ?
++ &mvif->ctx->def :
++ &phy->chandef;
+ int offset = 0;
+
+- if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
++ if (chandef->chan->band != NL80211_BAND_2GHZ)
+ offset = 4;
+
+ /* pick the lowest rate for hidden nodes */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index e8757865a3d068..8b620d4fed4390 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -334,6 +334,9 @@ struct mt76_wcid {
+ u32 tx_info;
+ bool sw_iv;
+
++ struct list_head tx_list;
++ struct sk_buff_head tx_pending;
++
+ struct list_head list;
+ struct idr pktid;
+
+@@ -572,8 +575,7 @@ struct mt76_sdio {
+ struct mt76_worker txrx_worker;
+ struct mt76_worker status_worker;
+ struct mt76_worker net_worker;
+-
+- struct work_struct stat_work;
++ struct mt76_worker stat_worker;
+
+ u8 *xmit_buf;
+ u32 xmit_buf_sz;
+@@ -709,6 +711,7 @@ struct mt76_vif {
+ u8 basic_rates_idx;
+ u8 mcast_rates_idx;
+ u8 beacon_rates_idx;
++ struct ieee80211_chanctx_conf *ctx;
+ };
+
+ struct mt76_phy {
+@@ -719,6 +722,8 @@ struct mt76_phy {
+ unsigned long state;
+ u8 band_idx;
+
++ spinlock_t tx_lock;
++ struct list_head tx_list;
+ struct mt76_queue *q_tx[__MT_TXQ_MAX];
+
+ struct cfg80211_chan_def chandef;
+@@ -1100,7 +1105,8 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
+ struct mt76_queue *
+ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
+ int ring_base, u32 flags);
+-u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
++u16 mt76_calculate_default_rate(struct mt76_phy *phy,
++ struct ieee80211_vif *vif, int rateidx);
+ static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
+ int n_desc, int ring_base, u32 flags)
+ {
+@@ -1598,22 +1604,7 @@ mt76_token_put(struct mt76_dev *dev, int token)
+ return txwi;
+ }
+
+-static inline void mt76_packet_id_init(struct mt76_wcid *wcid)
+-{
+- INIT_LIST_HEAD(&wcid->list);
+- idr_init(&wcid->pktid);
+-}
+-
+-static inline void
+-mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
+-{
+- struct sk_buff_head list;
+-
+- mt76_tx_status_lock(dev, &list);
+- mt76_tx_status_skb_get(dev, wcid, -1, &list);
+- mt76_tx_status_unlock(dev, &list);
+-
+- idr_destroy(&wcid->pktid);
+-}
++void mt76_wcid_init(struct mt76_wcid *wcid);
++void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid);
+
+ #endif
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+index 888678732f2906..c223f7c19e6da1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+@@ -9,6 +9,23 @@ struct beacon_bc_data {
+ int count[MT7603_MAX_INTERFACES];
+ };
+
++static void
++mt7603_mac_stuck_beacon_recovery(struct mt7603_dev *dev)
++{
++ if (dev->beacon_check % 5 != 4)
++ return;
++
++ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++ mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
++ mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
++ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
++
++ mt76_set(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++ mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++ mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
++ mt76_clear(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
++}
++
+ static void
+ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ {
+@@ -16,6 +33,8 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
++ u32 om_idx = mvif->idx;
++ u32 val;
+
+ if (!(mdev->beacon_mask & BIT(mvif->idx)))
+ return;
+@@ -24,20 +43,33 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ if (!skb)
+ return;
+
+- mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
+- MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++ if (om_idx)
++ om_idx |= 0x10;
++ val = MT_DMA_FQCR0_BUSY | MT_DMA_FQCR0_MODE |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_BSS, om_idx) |
++ FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
++ FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8);
+
+ spin_lock_bh(&dev->ps_lock);
+- mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
+- FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
+- FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
+- dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
+- FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
+- FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
+
+- if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
++ mt76_wr(dev, MT_DMA_FQCR0, val |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BCN));
++ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
+ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++ goto out;
++ }
++
++ mt76_wr(dev, MT_DMA_FQCR0, val |
++ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BMC));
++ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
++ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
++ goto out;
++ }
+
++ mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
++ MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
++
++out:
+ spin_unlock_bh(&dev->ps_lock);
+ }
+
+@@ -81,6 +113,18 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ data.dev = dev;
+ __skb_queue_head_init(&data.q);
+
++ /* Flush all previous CAB queue packets and beacons */
++ mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
++
++ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
++ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
++
++ if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > 0)
++ dev->beacon_check++;
++ else
++ dev->beacon_check = 0;
++ mt7603_mac_stuck_beacon_recovery(dev);
++
+ q = dev->mphy.q_tx[MT_TXQ_BEACON];
+ spin_lock(&q->lock);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+@@ -89,14 +133,9 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ mt76_queue_kick(dev, q);
+ spin_unlock(&q->lock);
+
+- /* Flush all previous CAB queue packets */
+- mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
+-
+- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
+-
+ mt76_csa_check(mdev);
+ if (mdev->csa_complete)
+- goto out;
++ return;
+
+ q = dev->mphy.q_tx[MT_TXQ_CAB];
+ do {
+@@ -108,7 +147,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ skb_queue_len(&data.q) < 8);
+
+ if (skb_queue_empty(&data.q))
+- goto out;
++ return;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+@@ -136,11 +175,6 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
+ MT_WF_ARB_CAB_START_BSSn(0) |
+ (MT_WF_ARB_CAB_START_BSS0n(1) *
+ ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
+-
+-out:
+- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
+- if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
+- dev->beacon_check++;
+ }
+
+ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+index 60a996b63c0c05..915b8349146af8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+@@ -42,11 +42,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
++ dev->rx_pse_check = 0;
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
++ dev->rx_pse_check = 0;
+ mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+index 03ba11a61c90c1..525444953df687 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+@@ -4,6 +4,13 @@
+ #include "mac.h"
+ #include "../dma.h"
+
++static const u8 wmm_queue_map[] = {
++ [IEEE80211_AC_BK] = 0,
++ [IEEE80211_AC_BE] = 1,
++ [IEEE80211_AC_VI] = 2,
++ [IEEE80211_AC_VO] = 3,
++};
++
+ static void
+ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+ {
+@@ -22,10 +29,10 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+ struct ieee80211_sta *sta;
+ struct mt7603_sta *msta;
+ struct mt76_wcid *wcid;
++ u8 qid, tid = 0, hwq = 0;
+ void *priv;
+ int idx;
+ u32 val;
+- u8 tid = 0;
+
+ if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
+ goto free;
+@@ -42,19 +49,36 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+ goto free;
+
+ priv = msta = container_of(wcid, struct mt7603_sta, wcid);
+- val = le32_to_cpu(txd[0]);
+- val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+- val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+- txd[0] = cpu_to_le32(val);
+
+ sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
+- if (ieee80211_is_data_qos(hdr->frame_control))
++
++ hwq = wmm_queue_map[IEEE80211_AC_BE];
++ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ tid = *ieee80211_get_qos_ctl(hdr) &
+- IEEE80211_QOS_CTL_TAG1D_MASK;
+- skb_set_queue_mapping(skb, tid_to_ac[tid]);
++ IEEE80211_QOS_CTL_TAG1D_MASK;
++ qid = tid_to_ac[tid];
++ hwq = wmm_queue_map[qid];
++ skb_set_queue_mapping(skb, qid);
++ } else if (ieee80211_is_data(hdr->frame_control)) {
++ skb_set_queue_mapping(skb, IEEE80211_AC_BE);
++ hwq = wmm_queue_map[IEEE80211_AC_BE];
++ } else {
++ skb_pull(skb, MT_TXD_SIZE);
++ if (!ieee80211_is_bufferable_mmpdu(skb))
++ goto free;
++ skb_push(skb, MT_TXD_SIZE);
++ skb_set_queue_mapping(skb, MT_TXQ_PSD);
++ hwq = MT_TX_HW_QUEUE_MGMT;
++ }
++
+ ieee80211_sta_set_buffered(sta, tid, true);
+
++ val = le32_to_cpu(txd[0]);
++ val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
++ val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq);
++ txd[0] = cpu_to_le32(val);
++
+ spin_lock_bh(&dev->ps_lock);
+ __skb_queue_tail(&msta->psq, skb);
+ if (skb_queue_len(&msta->psq) >= 64) {
+@@ -151,12 +175,6 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
+
+ int mt7603_dma_init(struct mt7603_dev *dev)
+ {
+- static const u8 wmm_queue_map[] = {
+- [IEEE80211_AC_BK] = 0,
+- [IEEE80211_AC_BE] = 1,
+- [IEEE80211_AC_VI] = 2,
+- [IEEE80211_AC_VO] = 3,
+- };
+ int ret;
+ int i;
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index 99ae080502d800..dc8a77f0a1cc46 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1393,6 +1393,7 @@ void mt7603_pse_client_reset(struct mt7603_dev *dev)
+ MT_CLIENT_RESET_TX_R_E_2_S);
+
+ /* Start PSE client TX abort */
++ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+ mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
+ mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
+ MT_CLIENT_RESET_TX_R_E_1_S, 500);
+@@ -1441,15 +1442,6 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+
+ mt7603_beacon_set_timer(dev, -1, 0);
+
+- if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
+- dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
+- dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
+- dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
+- mt7603_pse_reset(dev);
+-
+- if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
+- goto skip_dma_reset;
+-
+ mt7603_mac_stop(dev);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+@@ -1459,28 +1451,32 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
+
+ mt7603_irq_disable(dev, mask);
+
+- mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+-
+ mt7603_pse_client_reset(dev);
+
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+
++ mt7603_dma_sched_reset(dev);
++
++ mt76_tx_status_check(&dev->mt76, true);
++
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ mt76_queue_rx_reset(dev, i);
+ }
+
+- mt76_tx_status_check(&dev->mt76, true);
++ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
++ dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY)
++ mt7603_pse_reset(dev);
+
+- mt7603_dma_sched_reset(dev);
++ if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
++ mt7603_mac_dma_start(dev);
+
+- mt7603_mac_dma_start(dev);
++ mt7603_irq_enable(dev, mask);
+
+- mt7603_irq_enable(dev, mask);
++ clear_bit(MT76_RESET, &dev->mphy.state);
++ }
+
+-skip_dma_reset:
+- clear_bit(MT76_RESET, &dev->mphy.state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+@@ -1570,20 +1566,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
+ {
+ u32 addr, val;
+
+- if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
+- return true;
+-
+ if (mt7603_rx_fifo_busy(dev))
+- return false;
++ goto out;
+
+ addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
+ mt76_wr(dev, addr, 3);
+ val = mt76_rr(dev, addr) >> 16;
+
+- if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
+- return true;
++ if (!(val & BIT(0)))
++ return false;
++
++ if (is_mt7628(dev))
++ val &= 0xa000;
++ else
++ val &= 0x8000;
++ if (!val)
++ return false;
+
+- return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
++out:
++ if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
++ (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
++ return false;
++
++ return true;
+ }
+
+ static bool
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+index c213fd2a5216bf..89d738deea62e9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+@@ -70,7 +70,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.vif = mvif;
+- mt76_packet_id_init(&mvif->sta.wcid);
++ mt76_wcid_init(&mvif->sta.wcid);
+
+ eth_broadcast_addr(bc_addr);
+ mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
+@@ -110,7 +110,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx);
+ mutex_unlock(&dev->mt76.mutex);
+
+- mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);
++ mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid);
+ }
+
+ void mt7603_init_edcca(struct mt7603_dev *dev)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+index a39c9a0fcb1cba..524bceb8e95818 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+@@ -469,6 +469,11 @@ enum {
+ #define MT_WF_SEC_BASE 0x21a00
+ #define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs))
+
++#define MT_WF_CFG_OFF_BASE 0x21e00
++#define MT_WF_CFG_OFF(ofs) (MT_WF_CFG_OFF_BASE + (ofs))
++#define MT_WF_CFG_OFF_WOCCR MT_WF_CFG_OFF(0x004)
++#define MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS BIT(4)
++
+ #define MT_SEC_SCR MT_WF_SEC(0x004)
+ #define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+index 18a50ccff106a8..f22a1aa8850521 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+@@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev)
+
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s",
+ wiphy_name(wiphy));
++ if (!name)
++ return -ENOMEM;
++
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev,
+ mt7615_hwmon_groups);
+ if (IS_ERR(hwmon))
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 200b1752ca77f1..dab16b5fc38611 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -226,7 +226,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+- mt76_packet_id_init(&mvif->sta.wcid);
++ mt76_wcid_init(&mvif->sta.wcid);
+
+ mt7615_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+@@ -279,7 +279,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
+ list_del_init(&msta->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+- mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);
++ mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid);
+ }
+
+ int mt7615_set_channel(struct mt7615_phy *phy)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index 8d745c9730c72c..955974a82180fd 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -2147,7 +2147,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
+ };
+
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++ phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+index 0019890fdb7841..fbb1181c58ff3c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+@@ -106,7 +106,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ else
+ mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
+
+- tx_info->skb = DMA_DUMMY_DATA;
++ tx_info->skb = NULL;
+
+ return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+index fc547a0031eae2..67cedd2555f973 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+@@ -204,8 +204,8 @@ static int mt7663s_suspend(struct device *dev)
+ mt76_worker_disable(&mdev->mt76.sdio.txrx_worker);
+ mt76_worker_disable(&mdev->mt76.sdio.status_worker);
+ mt76_worker_disable(&mdev->mt76.sdio.net_worker);
++ mt76_worker_disable(&mdev->mt76.sdio.stat_worker);
+
+- cancel_work_sync(&mdev->mt76.sdio.stat_work);
+ clear_bit(MT76_READING_STATS, &mdev->mphy.state);
+
+ mt76_tx_status_check(&mdev->mt76, true);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+index 68ca0844cbbfad..87bfa441a93743 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+@@ -257,6 +257,8 @@ enum tx_mgnt_type {
+ #define MT_TXD7_UDP_TCP_SUM BIT(15)
+ #define MT_TXD7_TX_TIME GENMASK(9, 0)
+
++#define MT_TXD9_WLAN_IDX GENMASK(23, 8)
++
+ #define MT_TX_RATE_STBC BIT(14)
+ #define MT_TX_RATE_NSS GENMASK(13, 10)
+ #define MT_TX_RATE_MODE GENMASK(9, 6)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index ee5177fd6ddea0..87479c6c2b505f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -151,23 +151,6 @@ void mt76_connac_tx_complete_skb(struct mt76_dev *mdev,
+ return;
+ }
+
+- /* error path */
+- if (e->skb == DMA_DUMMY_DATA) {
+- struct mt76_connac_txp_common *txp;
+- struct mt76_txwi_cache *t;
+- u16 token;
+-
+- txp = mt76_connac_txwi_to_txp(mdev, e->txwi);
+- if (is_mt76_fw_txp(mdev))
+- token = le16_to_cpu(txp->fw.token);
+- else
+- token = le16_to_cpu(txp->hw.msdu_id[0]) &
+- ~MT_MSDU_ID_VALID;
+-
+- t = mt76_token_put(mdev, token);
+- e->skb = t ? t->skb : NULL;
+- }
+-
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+ }
+@@ -310,7 +293,10 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
+ struct ieee80211_vif *vif,
+ bool beacon, bool mcast)
+ {
+- u8 nss = 0, mode = 0, band = mphy->chandef.chan->band;
++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct cfg80211_chan_def *chandef = mvif->ctx ?
++ &mvif->ctx->def : &mphy->chandef;
++ u8 nss = 0, mode = 0, band = chandef->chan->band;
+ int rateidx = 0, mcast_rate;
+
+ if (!vif)
+@@ -343,7 +329,7 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ legacy:
+- rateidx = mt76_calculate_default_rate(mphy, rateidx);
++ rateidx = mt76_calculate_default_rate(mphy, vif, rateidx);
+ mode = rateidx >> 8;
+ rateidx &= GENMASK(7, 0);
+ out:
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 0f0a519f956f81..998cfd73764a9c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -255,7 +255,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ };
+ u16 ntlv;
+
+- ptlv = skb_put(skb, len);
++ ptlv = skb_put_zero(skb, len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+@@ -829,7 +829,9 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ u8 rcpi, u8 sta_state)
+ {
+- struct cfg80211_chan_def *chandef = &mphy->chandef;
++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct cfg80211_chan_def *chandef = mvif->ctx ?
++ &mvif->ctx->def : &mphy->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_dev *dev = mphy->dev;
+ struct sta_rec_ra_info *ra_info;
+@@ -1345,7 +1347,7 @@ u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ sband = phy->hw->wiphy->bands[band];
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
+
+- if (!eht_cap || !eht_cap->has_eht)
++ if (!eht_cap || !eht_cap->has_eht || !vif->bss_conf.eht_support)
+ return mode;
+
+ switch (band) {
+@@ -1369,7 +1371,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
+ const struct ieee80211_sta_he_cap *
+ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
+ {
+- enum nl80211_band band = phy->chandef.chan->band;
++ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
++ struct cfg80211_chan_def *chandef = mvif->ctx ?
++ &mvif->ctx->def : &phy->chandef;
++ enum nl80211_band band = chandef->chan->band;
+ struct ieee80211_supported_band *sband;
+
+ sband = phy->hw->wiphy->bands[band];
+@@ -1649,7 +1654,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ set_bit(MT76_HW_SCANNING, &phy->state);
+ mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+
+- req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
++ req = (struct mt76_connac_hw_scan_req *)skb_put_zero(skb, sizeof(*req));
+
+ req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+ req->bss_idx = mvif->idx;
+@@ -1777,7 +1782,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
+
+ mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+
+- req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
++ req = (struct mt76_connac_sched_scan_req *)skb_put_zero(skb, sizeof(*req));
+ req->version = 1;
+ req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+
+@@ -2411,7 +2416,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+- gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
++ gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put_zero(skb,
+ sizeof(*gtk_tlv));
+ gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
+ gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
+@@ -2534,7 +2539,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
+ return -ENOMEM;
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+- ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
++ ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put_zero(skb, sizeof(*ptlv));
+ ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
+ ptlv->len = cpu_to_le16(sizeof(*ptlv));
+ ptlv->data_len = pattern->pattern_len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+index dcbb5c605dfe69..8a0e8124b89400 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+@@ -288,7 +288,7 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ mvif->idx = idx;
+ mvif->group_wcid.idx = MT_VIF_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+- mt76_packet_id_init(&mvif->group_wcid);
++ mt76_wcid_init(&mvif->group_wcid);
+
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid);
+@@ -346,7 +346,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
+
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx);
+ rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL);
+- mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid);
++ mt76_wcid_cleanup(&dev->mt76, &mvif->group_wcid);
+ }
+ EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+index 6c3696c8c70022..450f4d221184b0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+@@ -1049,6 +1049,7 @@ static ssize_t
+ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+ {
++ int i, ret, pwr, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+@@ -1057,7 +1058,6 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
+ .band_idx = phy->mt76->band_idx,
+ };
+ char buf[100];
+- int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
+ enum mac80211_rx_encoding mode;
+ u32 offs = 0, len = 0;
+
+@@ -1130,8 +1130,8 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
+ if (ret)
+ goto out;
+
+- mphy->txpower_cur = max(mphy->txpower_cur,
+- max(pwr160, max(pwr80, max(pwr40, pwr20))));
++ pwr = max3(pwr80, pwr40, pwr20);
++ mphy->txpower_cur = max3(mphy->txpower_cur, pwr160, pwr);
+ out:
+ mutex_unlock(&dev->mt76.mutex);
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+index f3e56817d36e9d..adc26a222823bb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+@@ -144,7 +144,8 @@ static inline bool
+ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
+ {
+ u8 *eep = dev->mt76.eeprom.data;
+- u8 val = eep[MT_EE_WIFI_CONF + 7];
++ u8 offs = is_mt7981(&dev->mt76) ? 8 : 7;
++ u8 val = eep[MT_EE_WIFI_CONF + offs];
+
+ if (band == NL80211_BAND_2GHZ)
+ return val & MT_EE_WIFI_CONF7_TSSI0_2G;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 35fdf4f98d80ba..5ff260319282c6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
+
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s",
+ wiphy_name(wiphy));
++ if (!name)
++ return -ENOMEM;
+
+ cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops);
+ if (!IS_ERR(cdev)) {
+@@ -386,6 +388,7 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
+ ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
++ ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
+
+ hw->max_tx_fragments = 4;
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index b8b0c0fda75227..38d27f87217336 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -809,7 +809,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
+ else
+ txp->rept_wds_wcid = cpu_to_le16(0x3ff);
+- tx_info->skb = DMA_DUMMY_DATA;
++ tx_info->skb = NULL;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+@@ -1538,12 +1538,14 @@ void mt7915_mac_reset_work(struct work_struct *work)
+ set_bit(MT76_RESET, &phy2->mt76->state);
+ cancel_delayed_work_sync(&phy2->mt76->mac_work);
+ }
++
++ mutex_lock(&dev->mt76.mutex);
++
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_disable(&dev->mt76.napi[i]);
+ napi_disable(&dev->mt76.tx_napi);
+
+- mutex_lock(&dev->mt76.mutex);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 8ebbf186fab238..4fd5fd555191a4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -253,7 +253,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
+ mvif->sta.wcid.phy_idx = ext_phy;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+- mt76_packet_id_init(&mvif->sta.wcid);
++ mt76_wcid_init(&mvif->sta.wcid);
+
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+@@ -314,7 +314,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
+ list_del_init(&msta->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+- mt76_packet_id_flush(&dev->mt76, &msta->wcid);
++ mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
+ }
+
+ int mt7915_set_channel(struct mt7915_phy *phy)
+@@ -557,8 +557,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+- MT_WF_RFCR_DROP_CTL_RSV |
+- MT_WF_RFCR_DROP_NDPA);
++ MT_WF_RFCR_DROP_CTL_RSV);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+@@ -646,11 +645,13 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ mt7915_update_bss_color(hw, vif, &info->he_bss_color);
+
+ if (changed & (BSS_CHANGED_BEACON |
+- BSS_CHANGED_BEACON_ENABLED |
+- BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+- BSS_CHANGED_FILS_DISCOVERY))
++ BSS_CHANGED_BEACON_ENABLED))
+ mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
+
++ if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
++ BSS_CHANGED_FILS_DISCOVERY))
++ mt7915_mcu_add_inband_discov(dev, vif, changed);
++
+ if (set_bss_info == 0)
+ mt7915_mcu_add_bss_info(phy, vif, false);
+ if (set_sta == 0)
+@@ -1045,8 +1046,9 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+
+ phy->mt76->antenna_mask = tx_ant;
+
+- /* handle a variant of mt7916 which has 3T3R but nss2 on 5 GHz band */
+- if (is_mt7916(&dev->mt76) && band && hweight8(tx_ant) == max_nss)
++ /* handle a variant of mt7916/mt7981 which has 3T3R but nss2 on 5 GHz band */
++ if ((is_mt7916(&dev->mt76) || is_mt7981(&dev->mt76)) &&
++ band && hweight8(tx_ant) == max_nss)
+ phy->mt76->chainmask = (dev->chainmask >> chainshift) << chainshift;
+ else
+ phy->mt76->chainmask = tx_ant << (chainshift * band);
+@@ -1386,7 +1388,7 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
+ if (sset != ETH_SS_STATS)
+ return;
+
+- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
++ memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
+ data += sizeof(mt7915_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
+ }
+@@ -1559,6 +1561,12 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
++static int
++mt7915_set_frag_threshold(struct ieee80211_hw *hw, u32 val)
++{
++ return 0;
++}
++
+ static int
+ mt7915_set_radar_background(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef)
+@@ -1685,6 +1693,7 @@ const struct ieee80211_ops mt7915_ops = {
+ .sta_set_decap_offload = mt7915_sta_set_decap_offload,
+ .add_twt_setup = mt7915_mac_add_twt_setup,
+ .twt_teardown_request = mt7915_twt_teardown_request,
++ .set_frag_threshold = mt7915_set_frag_threshold,
+ CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
+ #ifdef CONFIG_MAC80211_DEBUGFS
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 50ae7bf3af91c1..5fba103bfd65d5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -422,7 +422,7 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
+ .len = cpu_to_le16(sub_len),
+ };
+
+- ptlv = skb_put(skb, sub_len);
++ ptlv = skb_put_zero(skb, sub_len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ le16_add_cpu(sub_ntlv, 1);
+@@ -688,13 +688,17 @@ int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ {
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
++ int ret;
+
++ mt76_worker_disable(&dev->mt76.tx_worker);
+ if (enable && !params->amsdu)
+ msta->wcid.amsdu = false;
++ ret = mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
++ MCU_EXT_CMD(STA_REC_UPDATE),
++ enable, true);
++ mt76_worker_enable(&dev->mt76.tx_worker);
+
+- return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+- MCU_EXT_CMD(STA_REC_UPDATE),
+- enable, true);
++ return ret;
+ }
+
+ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+@@ -1015,13 +1019,13 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool bfee)
+ {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
++ int sts = hweight16(phy->mt76->chainmask);
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return false;
+
+- if (!bfee && tx_ant < 2)
++ if (!bfee && sts < 2)
+ return false;
+
+ if (sta->deflink.he_cap.has_he) {
+@@ -1882,10 +1886,9 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+ }
+
+-static void
+-mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+- struct sk_buff *rskb, struct bss_info_bcn *bcn,
+- u32 changed)
++int
++mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
++ u32 changed)
+ {
+ #define OFFLOAD_TX_MODE_SU BIT(0)
+ #define OFFLOAD_TX_MODE_MU BIT(1)
+@@ -1895,14 +1898,27 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
++ struct bss_info_bcn *bcn;
+ struct bss_info_inband_discovery *discov;
+ struct ieee80211_tx_info *info;
+- struct sk_buff *skb = NULL;
+- struct tlv *tlv;
++ struct sk_buff *rskb, *skb = NULL;
++ struct tlv *tlv, *sub_tlv;
+ bool ext_phy = phy != &dev->phy;
+ u8 *buf, interval;
+ int len;
+
++ if (vif->bss_conf.nontransmitted)
++ return 0;
++
++ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
++ MT7915_MAX_BSS_OFFLOAD_SIZE);
++ if (IS_ERR(rskb))
++ return PTR_ERR(rskb);
++
++ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
++ bcn = (struct bss_info_bcn *)tlv;
++ bcn->enable = true;
++
+ if (changed & BSS_CHANGED_FILS_DISCOVERY &&
+ vif->bss_conf.fils_discovery.max_interval) {
+ interval = vif->bss_conf.fils_discovery.max_interval;
+@@ -1913,27 +1929,29 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ }
+
+- if (!skb)
+- return;
++ if (!skb) {
++ dev_kfree_skb(rskb);
++ return -EINVAL;
++ }
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->band = band;
+-
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
+
+ len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
+ len = (len & 0x3) ? ((len | 0x3) + 1) : len;
+
+- if (len > (MT7915_MAX_BSS_OFFLOAD_SIZE - rskb->len)) {
++ if (skb->len > MT7915_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
++ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+- return;
++ return -EINVAL;
+ }
+
+- tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
+- len, &bcn->sub_ntlv, &bcn->len);
+- discov = (struct bss_info_inband_discovery *)tlv;
++ sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
++ len, &bcn->sub_ntlv, &bcn->len);
++ discov = (struct bss_info_inband_discovery *)sub_tlv;
+ discov->tx_mode = OFFLOAD_TX_MODE_SU;
+ /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
+ discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
+@@ -1941,13 +1959,16 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
+ discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ discov->enable = true;
+
+- buf = (u8 *)tlv + sizeof(*discov);
++ buf = (u8 *)sub_tlv + sizeof(*discov);
+
+ mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 0, changed);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
++
++ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
++ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+ }
+
+ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -1980,11 +2001,14 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ goto out;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+- if (!skb)
++ if (!skb) {
++ dev_kfree_skb(rskb);
+ return -EINVAL;
++ }
+
+- if (skb->len > MT7915_MAX_BEACON_SIZE - MT_TXD_SIZE) {
++ if (skb->len > MT7915_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
++ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1997,11 +2021,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
+ dev_kfree_skb(skb);
+
+- if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+- changed & BSS_CHANGED_FILS_DISCOVERY)
+- mt7915_mcu_beacon_inband_discov(dev, vif, rskb,
+- bcn, changed);
+-
+ out:
+ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+@@ -2725,10 +2744,10 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
+ if (mt76_connac_spe_idx(phy->mt76->antenna_mask))
+ req.tx_path_num = fls(phy->mt76->antenna_mask);
+
+- if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
++ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
++ phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+index b9ea297f382c3a..1592b5d6751a0f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+@@ -495,10 +495,14 @@ enum {
+ SER_RECOVER
+ };
+
+-#define MT7915_MAX_BEACON_SIZE 512
+-#define MT7915_MAX_INBAND_FRAME_SIZE 256
+-#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
+- MT7915_MAX_INBAND_FRAME_SIZE + \
++#define MT7915_MAX_BEACON_SIZE 1308
++#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
++ sizeof(struct bss_info_bcn) + \
++ sizeof(struct bss_info_bcn_cntdwn) + \
++ sizeof(struct bss_info_bcn_mbss) + \
++ MT_TXD_SIZE + \
++ sizeof(struct bss_info_bcn_cont))
++#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
+ MT7915_BEACON_UPDATE_SIZE)
+
+ #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+@@ -511,12 +515,6 @@ enum {
+ sizeof(struct bss_info_bmc_rate) +\
+ sizeof(struct bss_info_ext_bss))
+
+-#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
+- sizeof(struct bss_info_bcn_cntdwn) + \
+- sizeof(struct bss_info_bcn_mbss) + \
+- sizeof(struct bss_info_bcn_cont) + \
+- sizeof(struct bss_info_inband_discovery))
+-
+ static inline s8
+ mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
+ {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+index fc7ace638ce8e9..a306a42777d789 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+@@ -490,6 +490,11 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+ return dev->reg.map[i].maps + ofs;
+ }
+
++ return 0;
++}
++
++static u32 __mt7915_reg_remap_addr(struct mt7915_dev *dev, u32 addr)
++{
+ if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
+ (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
+ (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
+@@ -514,15 +519,30 @@ void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
+ {
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
++ if (addr) {
++ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
++ return;
++ }
++
++ spin_lock_bh(&dev->reg_lock);
++ memcpy_fromio(buf, dev->mt76.mmio.regs +
++ __mt7915_reg_remap_addr(dev, offset), len);
++ spin_unlock_bh(&dev->reg_lock);
+ }
+
+ static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
+ {
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+- u32 addr = __mt7915_reg_addr(dev, offset);
++ u32 addr = __mt7915_reg_addr(dev, offset), val;
+
+- return dev->bus_ops->rr(mdev, addr);
++ if (addr)
++ return dev->bus_ops->rr(mdev, addr);
++
++ spin_lock_bh(&dev->reg_lock);
++ val = dev->bus_ops->rr(mdev, __mt7915_reg_remap_addr(dev, offset));
++ spin_unlock_bh(&dev->reg_lock);
++
++ return val;
+ }
+
+ static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+@@ -530,7 +550,14 @@ static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+- dev->bus_ops->wr(mdev, addr, val);
++ if (addr) {
++ dev->bus_ops->wr(mdev, addr, val);
++ return;
++ }
++
++ spin_lock_bh(&dev->reg_lock);
++ dev->bus_ops->wr(mdev, __mt7915_reg_remap_addr(dev, offset), val);
++ spin_unlock_bh(&dev->reg_lock);
+ }
+
+ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+@@ -538,7 +565,14 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+- return dev->bus_ops->rmw(mdev, addr, mask, val);
++ if (addr)
++ return dev->bus_ops->rmw(mdev, addr, mask, val);
++
++ spin_lock_bh(&dev->reg_lock);
++ val = dev->bus_ops->rmw(mdev, __mt7915_reg_remap_addr(dev, offset), mask, val);
++ spin_unlock_bh(&dev->reg_lock);
++
++ return val;
+ }
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+@@ -742,7 +776,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+
+ res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res)
+- return -ENOMEM;
++ return 0;
+
+ wed->wlan.platform_dev = plat_dev;
+ wed->wlan.bus_type = MTK_WED_BUS_AXI;
+@@ -813,6 +847,7 @@ static int mt7915_mmio_init(struct mt76_dev *mdev,
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
++ spin_lock_init(&dev->reg_lock);
+
+ switch (device_id) {
+ case 0x7915:
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index 0456e56f634804..e192211d4b23ee 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -287,6 +287,7 @@ struct mt7915_dev {
+
+ struct list_head sta_rc_list;
+ struct list_head twt_list;
++ spinlock_t reg_lock;
+
+ u32 hw_pattern;
+
+@@ -447,6 +448,8 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ bool add);
+ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *he_bss_color);
++int mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
++ u32 changed);
+ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int enable, u32 changed);
+ int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index ff63f37f67d9c9..61de6b03fa0beb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy)
+
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s",
+ wiphy_name(wiphy));
++ if (!name)
++ return -ENOMEM;
+
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
+ mt7921_hwmon_groups);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 21f93745422900..cd4eee3749226d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -663,6 +663,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
+ int i, ret;
+
+ dev_dbg(dev->mt76.dev, "chip reset\n");
++ set_bit(MT76_RESET, &dev->mphy.state);
+ dev->hw_full_reset = true;
+ ieee80211_stop_queues(hw);
+
+@@ -691,6 +692,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
+ }
+
+ dev->hw_full_reset = false;
++ clear_bit(MT76_RESET, &dev->mphy.state);
+ pm->suspended = false;
+ ieee80211_wake_queues(hw);
+ ieee80211_iterate_active_interfaces(hw,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 0844d28b3223da..6dec54431312ad 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -318,7 +318,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+- mt76_packet_id_init(&mvif->sta.wcid);
++ mt76_wcid_init(&mvif->sta.wcid);
+
+ mt7921_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+@@ -756,7 +756,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
+- true, mvif->ctx);
++ true, mvif->mt76.ctx);
+
+ ewma_avg_signal_init(&msta->avg_ack_signal);
+
+@@ -791,7 +791,7 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ if (!sta->tdls)
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
+ &mvif->sta.wcid, false,
+- mvif->ctx);
++ mvif->mt76.ctx);
+ }
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+@@ -1095,7 +1095,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct inet6_dev *idev)
+ {
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+- struct mt792x_dev *dev = mvif->phy->dev;
++ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct inet6_ifaddr *ifa;
+ struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ struct sk_buff *skb;
+@@ -1208,7 +1208,7 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ mt792x_mutex_acquire(dev);
+
+ err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
+- true, mvif->ctx);
++ true, mvif->mt76.ctx);
+ if (err)
+ goto out;
+
+@@ -1240,7 +1240,7 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ goto out;
+
+ mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false,
+- mvif->ctx);
++ mvif->mt76.ctx);
+
+ out:
+ mt792x_mutex_release(dev);
+@@ -1265,7 +1265,7 @@ static void mt7921_ctx_iter(void *priv, u8 *mac,
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct ieee80211_chanctx_conf *ctx = priv;
+
+- if (ctx != mvif->ctx)
++ if (ctx != mvif->mt76.ctx)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+@@ -1298,7 +1298,7 @@ static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw,
+ jiffies_to_msecs(HZ);
+
+ mt792x_mutex_acquire(dev);
+- mt7921_set_roc(mvif->phy, mvif, mvif->ctx->def.chan, duration,
++ mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration,
+ MT7921_ROC_REQ_JOIN);
+ mt792x_mutex_release(dev);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index 90c93970acabcb..d1b1b8f767fc89 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -1136,22 +1136,27 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
+ u8 type[2];
+ u8 rsvd[64];
+ } __packed req = {
++ .ver = 1,
+ .idx = idx,
+ .env = env_cap,
+ .acpi_conf = mt792x_acpi_get_flags(&dev->phy),
+ };
+ int ret, valid_cnt = 0;
+- u8 i, *pos;
++ u16 buf_len = 0;
++ u8 *pos;
+
+ if (!clc)
+ return 0;
+
++ buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
+ pos = clc->data;
+- for (i = 0; i < clc->nr_country; i++) {
++ while (buf_len > 16) {
+ struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
+ u16 len = le16_to_cpu(rule->len);
++ u16 offset = len + sizeof(*rule);
+
+- pos += len + sizeof(*rule);
++ pos += offset;
++ buf_len -= offset;
+ if (rule->alpha2[0] != alpha2[0] ||
+ rule->alpha2[1] != alpha2[1])
+ continue;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index 3dda84a937175d..49d4f3c4829eab 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -17,6 +17,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
+ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
++ { PCI_DEVICE(PCI_VENDOR_ID_ITTIM, 0x7922),
++ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
+@@ -385,6 +387,7 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
+ struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+
+ mt7921e_unregister_device(dev);
++ set_bit(MT76_REMOVED, &mdev->phy.state);
+ devm_free_irq(&pdev->dev, pdev->irq, dev);
+ mt76_free_device(&dev->mt76);
+ pci_free_irq_vectors(pdev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+index e7a995e7e70a33..031ba9aaa4e2f8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+@@ -48,7 +48,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ memset(txp, 0, sizeof(struct mt76_connac_hw_txp));
+ mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
+
+- tx_info->skb = DMA_DUMMY_DATA;
++ tx_info->skb = NULL;
+
+ return 0;
+ }
+@@ -64,7 +64,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
+ mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+
+- set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+@@ -115,7 +114,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
+
+ err = __mt7921_start(&dev->phy);
+ out:
+- clear_bit(MT76_RESET, &dev->mphy.state);
+
+ local_bh_disable();
+ napi_enable(&dev->mt76.tx_napi);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+index dc1beb76df3e16..7591e54d289733 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+@@ -228,7 +228,7 @@ static int mt7921s_suspend(struct device *__dev)
+ mt76_txq_schedule_all(&dev->mphy);
+ mt76_worker_disable(&mdev->tx_worker);
+ mt76_worker_disable(&mdev->sdio.status_worker);
+- cancel_work_sync(&mdev->sdio.stat_work);
++ mt76_worker_disable(&mdev->sdio.stat_worker);
+ clear_bit(MT76_READING_STATS, &dev->mphy.state);
+ mt76_tx_status_check(mdev, true);
+
+@@ -260,6 +260,7 @@ static int mt7921s_suspend(struct device *__dev)
+ restore_worker:
+ mt76_worker_enable(&mdev->tx_worker);
+ mt76_worker_enable(&mdev->sdio.status_worker);
++ mt76_worker_enable(&mdev->sdio.stat_worker);
+
+ if (!pm->ds_enable)
+ mt76_connac_mcu_set_deep_sleep(mdev, false);
+@@ -292,6 +293,7 @@ static int mt7921s_resume(struct device *__dev)
+ mt76_worker_enable(&mdev->sdio.txrx_worker);
+ mt76_worker_enable(&mdev->sdio.status_worker);
+ mt76_worker_enable(&mdev->sdio.net_worker);
++ mt76_worker_enable(&mdev->sdio.stat_worker);
+
+ /* restore previous ds setting */
+ if (!pm->ds_enable)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+index 8edd0291c12801..1f77cf71ca701a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+@@ -98,7 +98,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+ mt76_txq_schedule_all(&dev->mphy);
+ mt76_worker_disable(&dev->mt76.tx_worker);
+- set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+@@ -107,7 +106,7 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
+ mt76_worker_disable(&dev->mt76.sdio.txrx_worker);
+ mt76_worker_disable(&dev->mt76.sdio.status_worker);
+ mt76_worker_disable(&dev->mt76.sdio.net_worker);
+- cancel_work_sync(&dev->mt76.sdio.stat_work);
++ mt76_worker_disable(&dev->mt76.sdio.stat_worker);
+
+ mt7921s_disable_irq(&dev->mt76);
+ mt7921s_wfsys_reset(dev);
+@@ -115,6 +114,7 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
+ mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
+ mt76_worker_enable(&dev->mt76.sdio.status_worker);
+ mt76_worker_enable(&dev->mt76.sdio.net_worker);
++ mt76_worker_enable(&dev->mt76.sdio.stat_worker);
+
+ dev->fw_assert = false;
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+@@ -134,7 +134,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
+
+ err = __mt7921_start(&dev->phy);
+ out:
+- clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
+index 5d5ab8630041b1..6c347495e1185f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
++++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
+@@ -91,7 +91,6 @@ struct mt792x_vif {
+ struct ewma_rssi rssi;
+
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+- struct ieee80211_chanctx_conf *ctx;
+ };
+
+ struct mt792x_phy {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+index 303c0f5c9c662c..c4e3bfcc519e21 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+@@ -66,13 +66,15 @@ mt792x_acpi_read(struct mt792x_dev *dev, u8 *method, u8 **tbl, u32 *len)
+ }
+
+ /* MTCL : Country List Table for 6G band */
+-static void
++static int
+ mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
+ {
+- if (mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL) < 0)
+- *version = 1;
+- else
+- *version = 2;
++ int ret;
++
++ *version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0)
++ ? 1 : 2;
++
++ return ret;
+ }
+
+ /* MTDS : Dynamic SAR Power Table */
+@@ -166,16 +168,16 @@ int mt792x_init_acpi_sar(struct mt792x_dev *dev)
+ if (!asar)
+ return -ENOMEM;
+
+- mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
++ ret = mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
++ if (ret) {
++ devm_kfree(dev->mt76.dev, asar->countrylist);
++ asar->countrylist = NULL;
++ }
+
+- /* MTDS is mandatory. Return error if table is invalid */
+ ret = mt792x_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->dyn);
+- devm_kfree(dev->mt76.dev, asar->countrylist);
+- devm_kfree(dev->mt76.dev, asar);
+-
+- return ret;
++ asar->dyn = NULL;
+ }
+
+ /* MTGS is optional */
+@@ -290,7 +292,7 @@ int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default)
+ const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
+ int i;
+
+- if (!phy->acpisar)
++ if (!phy->acpisar || !((struct mt792x_acpi_sar *)phy->acpisar)->dyn)
+ return 0;
+
+ /* When ACPI SAR enabled in HW, we should apply rules for .frp
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+index 46be7f996c7e19..66806ed4f942db 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+@@ -115,7 +115,7 @@ void mt792x_remove_interface(struct ieee80211_hw *hw,
+ list_del_init(&msta->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+- mt76_packet_id_flush(&dev->mt76, &msta->wcid);
++ mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
+ }
+ EXPORT_SYMBOL_GPL(mt792x_remove_interface);
+
+@@ -243,7 +243,7 @@ int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+- mvif->ctx = ctx;
++ mvif->mt76.ctx = ctx;
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+@@ -259,7 +259,7 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+- mvif->ctx = NULL;
++ mvif->mt76.ctx = NULL;
+ mutex_unlock(&dev->mt76.mutex);
+ }
+ EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx);
+@@ -332,6 +332,7 @@ static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "v_tx_bw_40",
+ "v_tx_bw_80",
+ "v_tx_bw_160",
++ "v_tx_bw_320",
+ "v_tx_mcs_0",
+ "v_tx_mcs_1",
+ "v_tx_mcs_2",
+@@ -358,7 +359,7 @@ void mt792x_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ if (sset != ETH_SS_STATS)
+ return;
+
+- memcpy(data, *mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats));
++ memcpy(data, mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats));
+
+ data += sizeof(mt792x_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+index a3dbd3865b2f5b..be3119aa9afa10 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+@@ -12,6 +12,8 @@ irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
+ {
+ struct mt792x_dev *dev = dev_instance;
+
++ if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
++ return IRQ_NONE;
+ mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+index 4d40ec7ff57f53..630520c21a47f7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+@@ -225,6 +225,11 @@ mt7996_radar_trigger(void *data, u64 val)
+ if (val > MT_RX_SEL2)
+ return -EINVAL;
+
++ if (val == MT_RX_SEL2 && !dev->rdd2_phy) {
++ dev_err(dev->mt76.dev, "Background radar is not enabled\n");
++ return -EINVAL;
++ }
++
+ return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE,
+ val, 0, 0);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+index 26e03b28935f2f..aee531cab46f64 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+@@ -276,7 +276,7 @@ static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev)
+
+ void mt7996_mac_init(struct mt7996_dev *dev)
+ {
+-#define HIF_TXD_V2_1 4
++#define HIF_TXD_V2_1 0x21
+ int i;
+
+ mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
+@@ -557,11 +557,10 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
+ /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+ elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
+
+- if (vif != NL80211_IFTYPE_AP)
++ if (!(vif == NL80211_IFTYPE_AP || vif == NL80211_IFTYPE_STATION))
+ return;
+
+ elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ sts - 1) |
+@@ -569,6 +568,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
+ sts - 1);
+ elem->phy_cap_info[5] |= c;
+
++ if (vif != NL80211_IFTYPE_AP)
++ return;
++
++ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
++
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
+ elem->phy_cap_info[6] |= c;
+@@ -728,43 +732,49 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
+
+ eht_cap_elem->phy_cap_info[0] =
+- IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+
++ val = max_t(u8, sts - 1, 3);
+ eht_cap_elem->phy_cap_info[0] |=
+- u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
++ u8_encode_bits(u8_get_bits(val, BIT(0)),
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[1] =
+- u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
++ u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+- u8_encode_bits(sts - 1,
+- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
+- u8_encode_bits(sts - 1,
+- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
++ u8_encode_bits(val,
++ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] =
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
+- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) |
+- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
++ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
++
++ if (band == NL80211_BAND_6GHZ) {
++ eht_cap_elem->phy_cap_info[0] |=
++ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
++
++ eht_cap_elem->phy_cap_info[1] |=
++ u8_encode_bits(val,
++ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
++
++ eht_cap_elem->phy_cap_info[2] |=
++ u8_encode_bits(sts - 1,
++ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
++ }
+
+ eht_cap_elem->phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+- IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+- IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+- IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
++ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
+
+ eht_cap_elem->phy_cap_info[4] =
+ u8_encode_bits(min_t(int, sts - 1, 2),
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+
+ eht_cap_elem->phy_cap_info[5] =
+- IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
+ u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
+@@ -778,14 +788,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
+ u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
+
+- eht_cap_elem->phy_cap_info[7] =
+- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
+-
+ val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
+ u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX);
+ #define SET_EHT_MAX_NSS(_bw, _val) do { \
+@@ -796,8 +798,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+
+ SET_EHT_MAX_NSS(80, val);
+ SET_EHT_MAX_NSS(160, val);
+- SET_EHT_MAX_NSS(320, val);
++ if (band == NL80211_BAND_6GHZ)
++ SET_EHT_MAX_NSS(320, val);
+ #undef SET_EHT_MAX_NSS
++
++ if (iftype != NL80211_IFTYPE_AP)
++ return;
++
++ eht_cap_elem->phy_cap_info[3] |=
++ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
++ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
++
++ eht_cap_elem->phy_cap_info[7] =
++ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
++ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
++ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
++ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ;
++
++ if (band != NL80211_BAND_6GHZ)
++ return;
++
++ eht_cap_elem->phy_cap_info[7] |=
++ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
++ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
+ }
+
+ static void
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index ac8759febe485c..73d46ec1181ae8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -433,7 +433,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
++ /* rxv reports bw 320-1 and 320-2 separately */
+ case IEEE80211_STA_RX_BW_320:
++ case IEEE80211_STA_RX_BW_320 + 1:
+ status->bw = RATE_INFO_BW_320;
+ break;
+ default:
+@@ -755,6 +757,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+
+ txwi[2] |= cpu_to_le32(val);
++
++ if (wcid->amsdu)
++ txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
+ }
+
+ static void
+@@ -838,10 +843,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ struct mt76_vif *mvif;
+ u16 tx_count = 15;
+ u32 val;
+- bool beacon = !!(changed & (BSS_CHANGED_BEACON |
+- BSS_CHANGED_BEACON_ENABLED));
+ bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ BSS_CHANGED_FILS_DISCOVERY));
++ bool beacon = !!(changed & (BSS_CHANGED_BEACON |
++ BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
+
+ mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
+ if (mvif) {
+@@ -885,8 +890,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ val |= MT_TXD3_PROTECT_FRAME;
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ val |= MT_TXD3_NO_ACK;
+- if (wcid->amsdu)
+- val |= MT_TXD3_HW_AMSDU;
+
+ txwi[3] = cpu_to_le32(val);
+ txwi[4] = 0;
+@@ -991,11 +994,9 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ }
+
+ txp->fw.token = cpu_to_le16(id);
+- if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
+- txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
+- else
+- txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
+- tx_info->skb = DMA_DUMMY_DATA;
++ txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
++
++ tx_info->skb = NULL;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+@@ -1051,7 +1052,7 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7996_tx_check_aggr(sta, txwi);
+ } else {
+- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
++ wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
+ }
+
+ __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+@@ -2433,6 +2434,34 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
+ return 0;
+ }
+
++static bool
++mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
++ struct ieee80211_twt_params *twt_agrt)
++{
++ u16 type = le16_to_cpu(twt_agrt->req_type);
++ u8 exp;
++ int i;
++
++ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
++ for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
++ struct mt7996_twt_flow *f;
++
++ if (!(msta->twt.flowid_mask & BIT(i)))
++ continue;
++
++ f = &msta->twt.flow[i];
++ if (f->duration == twt_agrt->min_twt_dur &&
++ f->mantissa == twt_agrt->mantissa &&
++ f->exp == exp &&
++ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
++ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
++ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
++ return true;
++ }
++
++ return false;
++}
++
+ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt)
+@@ -2444,8 +2473,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
+ enum ieee80211_twt_setup_cmd sta_setup_cmd;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_twt_flow *flow;
+- int flowid, table_id;
+- u8 exp;
++ u8 flowid, table_id, exp;
+
+ if (mt7996_mac_check_twt_req(twt))
+ goto out;
+@@ -2458,9 +2486,19 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
+ if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
+ goto unlock;
+
++ if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
++ setup_cmd = TWT_SETUP_CMD_DICTATE;
++ twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
++ goto unlock;
++ }
++
++ if (mt7996_mac_twt_param_equal(msta, twt_agrt))
++ goto unlock;
++
+ flowid = ffs(~msta->twt.flowid_mask) - 1;
+- le16p_replace_bits(&twt_agrt->req_type, flowid,
+- IEEE80211_TWT_REQTYPE_FLOWID);
++ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
++ twt_agrt->req_type |= le16_encode_bits(flowid,
++ IEEE80211_TWT_REQTYPE_FLOWID);
+
+ table_id = ffs(~dev->twt.table_mask) - 1;
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
+@@ -2507,10 +2545,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
+ unlock:
+ mutex_unlock(&dev->mt76.mutex);
+ out:
+- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
+- IEEE80211_TWT_REQTYPE_SETUP_CMD);
+- twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
+- (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
++ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
++ twt_agrt->req_type |=
++ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
++ twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
+ }
+
+ void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+index c3a479dc3f533c..0e69f0a508616b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+@@ -190,7 +190,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
+ mvif->mt76.omac_idx = idx;
+ mvif->phy = phy;
+ mvif->mt76.band_idx = band_idx;
+- mvif->mt76.wmm_idx = band_idx;
++ mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
+
+ ret = mt7996_mcu_add_dev_info(phy, vif, true);
+ if (ret)
+@@ -207,7 +207,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
+ mvif->sta.wcid.phy_idx = band_idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+- mt76_packet_id_init(&mvif->sta.wcid);
++ mt76_wcid_init(&mvif->sta.wcid);
+
+ mt7996_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+@@ -268,7 +268,7 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
+ list_del_init(&msta->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+- mt76_packet_id_flush(&dev->mt76, &msta->wcid);
++ mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
+ }
+
+ int mt7996_set_channel(struct mt7996_phy *phy)
+@@ -287,6 +287,10 @@ int mt7996_set_channel(struct mt7996_phy *phy)
+ if (ret)
+ goto out;
+
++ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH);
++ if (ret)
++ goto out;
++
+ ret = mt7996_dfs_init_radar_detector(phy);
+ mt7996_mac_cca_stats_reset(phy);
+
+@@ -414,10 +418,16 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const struct ieee80211_tx_queue_params *params)
+ {
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
++ const u8 mq_to_aci[] = {
++ [IEEE80211_AC_VO] = 3,
++ [IEEE80211_AC_VI] = 2,
++ [IEEE80211_AC_BE] = 0,
++ [IEEE80211_AC_BK] = 1,
++ };
+
++ /* firmware uses access class index */
++ mvif->queue_params[mq_to_aci[queue]] = *params;
+ /* no need to update right away, we'll get BSS_CHANGED_QOS */
+- queue = mt76_connac_lmac_mapping(queue);
+- mvif->queue_params[queue] = *params;
+
+ return 0;
+ }
+@@ -618,8 +628,8 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
+ mt7996_mcu_add_beacon(hw, vif, info->enable_beacon);
+ }
+
+- if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+- changed & BSS_CHANGED_FILS_DISCOVERY)
++ if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
++ BSS_CHANGED_FILS_DISCOVERY))
+ mt7996_mcu_beacon_inband_discov(dev, vif, changed);
+
+ if (changed & BSS_CHANGED_MU_GROUPS)
+@@ -1192,7 +1202,7 @@ void mt7996_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *mt7996_gstrings_stats,
++ memcpy(data, mt7996_gstrings_stats,
+ sizeof(mt7996_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index 4a30db49ef33fa..302171e1035977 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -339,7 +339,10 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
+ if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
+ return;
+
+- if (dev->rdd2_phy && r->band_idx == MT_RX_SEL2)
++ if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy)
++ return;
++
++ if (r->band_idx == MT_RX_SEL2)
+ mphy = dev->rdd2_phy->mt76;
+ else
+ mphy = dev->mt76.phys[r->band_idx];
+@@ -517,13 +520,10 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
+ static struct tlv *
+ mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
+ {
+- struct tlv *ptlv, tlv = {
+- .tag = cpu_to_le16(tag),
+- .len = cpu_to_le16(len),
+- };
++ struct tlv *ptlv = skb_put_zero(skb, len);
+
+- ptlv = skb_put(skb, len);
+- memcpy(ptlv, &tlv, sizeof(tlv));
++ ptlv->tag = cpu_to_le16(tag);
++ ptlv->len = cpu_to_le16(len);
+
+ return ptlv;
+ }
+@@ -1005,6 +1005,9 @@ mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+ static void
+ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+ {
++ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
++ struct ieee80211_vif *vif = container_of((void *)msta->vif,
++ struct ieee80211_vif, drv_priv);
+ struct ieee80211_eht_mcs_nss_supp *mcs_map;
+ struct ieee80211_eht_cap_elem_fixed *elem;
+ struct sta_rec_eht *eht;
+@@ -1024,8 +1027,17 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+ eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
+ eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
+
+- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+- memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
++ if (vif->type != NL80211_IFTYPE_STATION &&
++ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
++ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) {
++ memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz,
++ sizeof(eht->mcs_map_bw20));
++ return;
++ }
++
+ memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
+ memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
+ memcpy(eht->mcs_map_bw320, &mcs_map->bw._320, sizeof(eht->mcs_map_bw320));
+@@ -1173,10 +1185,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+
+ if (bfee)
+ return vif->bss_conf.eht_su_beamformee &&
+- EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
++ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ else
+ return vif->bss_conf.eht_su_beamformer &&
+- EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
++ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ }
+
+ if (sta->deflink.he_cap.has_he) {
+@@ -1288,6 +1300,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
+ u8 snd_dim, sts;
+
++ if (!vc)
++ return;
++
+ bf->tx_mode = MT_PHY_TYPE_HE_SU;
+
+ mt7996_mcu_sta_sounding_rate(bf);
+@@ -1397,7 +1412,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ {
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mvif->phy;
+- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
++ int tx_ant = hweight16(phy->mt76->chainmask) - 1;
+ struct sta_rec_bf *bf;
+ struct tlv *tlv;
+ const u8 matrix[4][4] = {
+@@ -2016,7 +2031,7 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ bcn->bcc_ie_pos = cpu_to_le16(offset - 3);
+ }
+
+- buf = (u8 *)bcn + sizeof(*bcn) - MAX_BEACON_SIZE;
++ buf = (u8 *)bcn + sizeof(*bcn);
+ mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0,
+ BSS_CHANGED_BEACON);
+
+@@ -2034,26 +2049,22 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
+ struct sk_buff *skb, *rskb;
+ struct tlv *tlv;
+ struct bss_bcn_content_tlv *bcn;
++ int len;
+
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+- MT7996_BEACON_UPDATE_SIZE);
++ MT7996_MAX_BSS_OFFLOAD_SIZE);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+- tlv = mt7996_mcu_add_uni_tlv(rskb,
+- UNI_BSS_INFO_BCN_CONTENT, sizeof(*bcn));
+- bcn = (struct bss_bcn_content_tlv *)tlv;
+- bcn->enable = en;
+-
+- if (!en)
+- goto out;
+-
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+- if (!skb)
++ if (!skb) {
++ dev_kfree_skb(rskb);
+ return -EINVAL;
++ }
+
+- if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
++ if (skb->len > MT7996_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
++ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -2061,11 +2072,18 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+
++ len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4);
++ tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
++ bcn = (struct bss_bcn_content_tlv *)tlv;
++ bcn->enable = en;
++ if (!en)
++ goto out;
++
+ mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
+ /* TODO: subtag - 11v MBSSID */
+ mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs);
+- dev_kfree_skb(skb);
+ out:
++ dev_kfree_skb(skb);
+ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+ }
+@@ -2086,9 +2104,13 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ struct sk_buff *rskb, *skb = NULL;
+ struct tlv *tlv;
+ u8 *buf, interval;
++ int len;
++
++ if (vif->bss_conf.nontransmitted)
++ return 0;
+
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+- MT7996_INBAND_FRAME_SIZE);
++ MT7996_MAX_BSS_OFFLOAD_SIZE);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+@@ -2102,11 +2124,14 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ }
+
+- if (!skb)
++ if (!skb) {
++ dev_kfree_skb(rskb);
+ return -EINVAL;
++ }
+
+- if (skb->len > MAX_INBAND_FRAME_SIZE - MT_TXD_SIZE) {
++ if (skb->len > MT7996_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
++ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -2116,7 +2141,8 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ info->band = band;
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
+
+- tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, sizeof(*discov));
++ len = ALIGN(sizeof(*discov) + MT_TXD_SIZE + skb->len, 4);
++ tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
+
+ discov = (struct bss_inband_discovery_tlv *)tlv;
+ discov->tx_mode = OFFLOAD_TX_MODE_SU;
+@@ -2127,7 +2153,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ discov->enable = true;
+ discov->wcid = cpu_to_le16(MT7996_WTBL_RESERVED);
+
+- buf = (u8 *)tlv + sizeof(*discov) - MAX_INBAND_FRAME_SIZE;
++ buf = (u8 *)tlv + sizeof(*discov);
+
+ mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, changed);
+
+@@ -2679,7 +2705,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
+
+ e = (struct edca *)tlv;
+ e->set = WMM_PARAM_SET;
+- e->queue = ac + mvif->mt76.wmm_idx * MT7996_MAX_WMM_SETS;
++ e->queue = ac;
+ e->aifs = q->aifs;
+ e->txop = cpu_to_le16(q->txop);
+
+@@ -2960,10 +2986,10 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
+ .channel_band = ch_band[chandef->chan->band],
+ };
+
+- if (tag == UNI_CHANNEL_RX_PATH ||
+- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
++ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
++ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
++ phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
+@@ -3076,7 +3102,7 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
+ u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
+ u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
+
+- skb_pull(skb, 64);
++ skb_pull(skb, 48);
+ memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
+ }
+
+@@ -3307,8 +3333,8 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
+
+ tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
+ req_mod_en = (struct bf_mod_en_ctrl *)tlv;
+- req_mod_en->bf_num = 2;
+- req_mod_en->bf_bitmap = GENMASK(0, 0);
++ req_mod_en->bf_num = 3;
++ req_mod_en->bf_bitmap = GENMASK(2, 0);
+ break;
+ }
+ default:
+@@ -3548,7 +3574,9 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ int cmd)
+ {
+ struct {
+- u8 _rsv[4];
++ /* fixed field */
++ u8 bss;
++ u8 _rsv[3];
+
+ __le16 tag;
+ __le16 len;
+@@ -3566,7 +3594,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ u8 exponent;
+ u8 is_ap;
+ u8 agrt_params;
+- u8 __rsv2[135];
++ u8 __rsv2[23];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_CMD_TWT_ARGT_UPDATE),
+ .len = cpu_to_le16(sizeof(req) - 4),
+@@ -3576,6 +3604,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
+ .flowid = flow->id,
+ .peer_id = cpu_to_le16(flow->wcid),
+ .duration = flow->duration,
++ .bss = mvif->mt76.idx,
+ .bss_idx = mvif->mt76.idx,
+ .start_tsf = cpu_to_le64(flow->tsf),
+ .mantissa = flow->mantissa,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+index 078f828586212a..58504b80eae8b0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+@@ -221,7 +221,7 @@ struct bss_rate_tlv {
+ u8 short_preamble;
+ u8 bc_fixed_rate;
+ u8 mc_fixed_rate;
+- u8 __rsv2[1];
++ u8 __rsv2[9];
+ } __packed;
+
+ struct bss_ra_tlv {
+@@ -270,8 +270,6 @@ struct bss_inband_discovery_tlv {
+ u8 enable;
+ __le16 wcid;
+ __le16 prob_rsp_len;
+-#define MAX_INBAND_FRAME_SIZE 512
+- u8 pkt[MAX_INBAND_FRAME_SIZE];
+ } __packed;
+
+ struct bss_bcn_content_tlv {
+@@ -283,8 +281,6 @@ struct bss_bcn_content_tlv {
+ u8 enable;
+ u8 type;
+ __le16 pkt_len;
+-#define MAX_BEACON_SIZE 512
+- u8 pkt[MAX_BEACON_SIZE];
+ } __packed;
+
+ struct bss_bcn_cntdwn_tlv {
+@@ -591,13 +587,14 @@ enum {
+ sizeof(struct sta_rec_hdr_trans) + \
+ sizeof(struct tlv))
+
++#define MT7996_MAX_BEACON_SIZE 1338
+ #define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
+ sizeof(struct bss_bcn_content_tlv) + \
++ 4 + MT_TXD_SIZE + \
+ sizeof(struct bss_bcn_cntdwn_tlv) + \
+ sizeof(struct bss_bcn_mbss_tlv))
+-
+-#define MT7996_INBAND_FRAME_SIZE (sizeof(struct bss_req_hdr) + \
+- sizeof(struct bss_inband_discovery_tlv))
++#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
++ MT7996_BEACON_UPDATE_SIZE)
+
+ enum {
+ UNI_BAND_CONFIG_RADIO_ENABLE,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+index 3a591a7b47ae68..e75becadc2e54d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+@@ -82,7 +82,6 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+
+- dev->reg_l1_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
+ MT_HIF_REMAP_L1_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
+@@ -97,7 +96,6 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+- dev->reg_l2_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
+ MT_HIF_REMAP_L2_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
+@@ -107,26 +105,10 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
+ return MT_HIF_REMAP_BASE_L2 + offset;
+ }
+
+-static void mt7996_reg_remap_restore(struct mt7996_dev *dev)
+-{
+- /* remap to ori status */
+- if (unlikely(dev->reg_l1_backup)) {
+- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->reg_l1_backup);
+- dev->reg_l1_backup = 0;
+- }
+-
+- if (dev->reg_l2_backup) {
+- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->reg_l2_backup);
+- dev->reg_l2_backup = 0;
+- }
+-}
+-
+ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
+ {
+ int i;
+
+- mt7996_reg_remap_restore(dev);
+-
+ if (addr < 0x100000)
+ return addr;
+
+@@ -143,6 +125,11 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
+ return dev->reg.map[i].mapped + ofs;
+ }
+
++ return 0;
++}
++
++static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr)
++{
+ if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
+ (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
+ (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
+@@ -167,28 +154,60 @@ void mt7996_memcpy_fromio(struct mt7996_dev *dev, void *buf, u32 offset,
+ {
+ u32 addr = __mt7996_reg_addr(dev, offset);
+
+- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
++ if (addr) {
++ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
++ return;
++ }
++
++ spin_lock_bh(&dev->reg_lock);
++ memcpy_fromio(buf, dev->mt76.mmio.regs +
++ __mt7996_reg_remap_addr(dev, offset), len);
++ spin_unlock_bh(&dev->reg_lock);
+ }
+
+ static u32 mt7996_rr(struct mt76_dev *mdev, u32 offset)
+ {
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
++ u32 addr = __mt7996_reg_addr(dev, offset), val;
++
++ if (addr)
++ return dev->bus_ops->rr(mdev, addr);
+
+- return dev->bus_ops->rr(mdev, __mt7996_reg_addr(dev, offset));
++ spin_lock_bh(&dev->reg_lock);
++ val = dev->bus_ops->rr(mdev, __mt7996_reg_remap_addr(dev, offset));
++ spin_unlock_bh(&dev->reg_lock);
++
++ return val;
+ }
+
+ static void mt7996_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+ {
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
++ u32 addr = __mt7996_reg_addr(dev, offset);
+
+- dev->bus_ops->wr(mdev, __mt7996_reg_addr(dev, offset), val);
++ if (addr) {
++ dev->bus_ops->wr(mdev, addr, val);
++ return;
++ }
++
++ spin_lock_bh(&dev->reg_lock);
++ dev->bus_ops->wr(mdev, __mt7996_reg_remap_addr(dev, offset), val);
++ spin_unlock_bh(&dev->reg_lock);
+ }
+
+ static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+ {
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
++ u32 addr = __mt7996_reg_addr(dev, offset);
++
++ if (addr)
++ return dev->bus_ops->rmw(mdev, addr, mask, val);
++
++ spin_lock_bh(&dev->reg_lock);
++ val = dev->bus_ops->rmw(mdev, __mt7996_reg_remap_addr(dev, offset), mask, val);
++ spin_unlock_bh(&dev->reg_lock);
+
+- return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
++ return val;
+ }
+
+ static int mt7996_mmio_init(struct mt76_dev *mdev,
+@@ -200,6 +219,7 @@ static int mt7996_mmio_init(struct mt76_dev *mdev,
+
+ dev = container_of(mdev, struct mt7996_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
++ spin_lock_init(&dev->reg_lock);
+
+ switch (device_id) {
+ case 0x7990:
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+index 7354e5cf8e6748..25bb3656123141 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+@@ -43,6 +43,7 @@
+
+ #define MT7996_MAX_TWT_AGRT 16
+ #define MT7996_MAX_STA_TWT_AGRT 8
++#define MT7996_MIN_TWT_DUR 64
+ #define MT7996_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 3)
+
+ /* NOTE: used to map mt76_rates. idx may change if firmware expands table */
+@@ -237,12 +238,11 @@ struct mt7996_dev {
+ struct rchan *relay_fwlog;
+
+ struct {
+- u8 table_mask;
++ u16 table_mask;
+ u8 n_agrt;
+ } twt;
+
+- u32 reg_l1_backup;
+- u32 reg_l2_backup;
++ spinlock_t reg_lock;
+
+ u8 wtbl_size_group;
+ };
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+index c5301050ff8b30..67c015896243f0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+@@ -17,11 +17,13 @@ static u32 hif_idx;
+
+ static const struct pci_device_id mt7996_pci_device_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7990) },
++ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7992) },
+ { },
+ };
+
+ static const struct pci_device_id mt7996_hif_device_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7991) },
++ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x799a) },
+ { },
+ };
+
+@@ -60,7 +62,9 @@ static void mt7996_put_hif2(struct mt7996_hif *hif)
+ static struct mt7996_hif *mt7996_pci_init_hif2(struct pci_dev *pdev)
+ {
+ hif_idx++;
+- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7991, NULL))
++
++ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7991, NULL) &&
++ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x799a, NULL))
+ return NULL;
+
+ writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+@@ -113,7 +117,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
+
+ mt76_pci_disable_aspm(pdev);
+
+- if (id->device == 0x7991)
++ if (id->device == 0x7991 || id->device == 0x799a)
+ return mt7996_pci_hif2_probe(pdev);
+
+ dev = mt7996_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
+index 419723118ded8e..2c761d469c06bd 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
+@@ -481,25 +481,26 @@ static void mt76s_status_worker(struct mt76_worker *w)
+ if (dev->drv->tx_status_data && ndata_frames > 0 &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
+ !test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
+- ieee80211_queue_work(dev->hw, &dev->sdio.stat_work);
++ mt76_worker_schedule(&sdio->stat_worker);
+ } while (nframes > 0);
+
+ if (resched)
+ mt76_worker_schedule(&dev->tx_worker);
+ }
+
+-static void mt76s_tx_status_data(struct work_struct *work)
++static void mt76s_tx_status_data(struct mt76_worker *worker)
+ {
+ struct mt76_sdio *sdio;
+ struct mt76_dev *dev;
+ u8 update = 1;
+ u16 count = 0;
+
+- sdio = container_of(work, struct mt76_sdio, stat_work);
++ sdio = container_of(worker, struct mt76_sdio, stat_worker);
+ dev = container_of(sdio, struct mt76_dev, sdio);
+
+ while (true) {
+- if (test_bit(MT76_REMOVED, &dev->phy.state))
++ if (test_bit(MT76_RESET, &dev->phy.state) ||
++ test_bit(MT76_REMOVED, &dev->phy.state))
+ break;
+
+ if (!dev->drv->tx_status_data(dev, &update))
+@@ -508,7 +509,7 @@ static void mt76s_tx_status_data(struct work_struct *work)
+ }
+
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+- ieee80211_queue_work(dev->hw, &sdio->stat_work);
++ mt76_worker_schedule(&sdio->status_worker);
+ else
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
+ }
+@@ -600,8 +601,8 @@ void mt76s_deinit(struct mt76_dev *dev)
+ mt76_worker_teardown(&sdio->txrx_worker);
+ mt76_worker_teardown(&sdio->status_worker);
+ mt76_worker_teardown(&sdio->net_worker);
++ mt76_worker_teardown(&sdio->stat_worker);
+
+- cancel_work_sync(&sdio->stat_work);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
+
+ mt76_tx_status_check(dev, true);
+@@ -644,10 +645,14 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
+ if (err)
+ return err;
+
++ err = mt76_worker_setup(dev->hw, &sdio->stat_worker, mt76s_tx_status_data,
++ "sdio-sta");
++ if (err)
++ return err;
++
+ sched_set_fifo_low(sdio->status_worker.task);
+ sched_set_fifo_low(sdio->net_worker.task);
+-
+- INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
++ sched_set_fifo_low(sdio->stat_worker.task);
+
+ dev->queue_ops = &sdio_queue_ops;
+ dev->bus = bus_ops;
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index 6cc26cc6c5178c..1809b03292c3d9 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -329,40 +329,32 @@ void
+ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid, struct sk_buff *skb)
+ {
+- struct mt76_dev *dev = phy->dev;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+- struct mt76_queue *q;
+- int qid = skb_get_queue_mapping(skb);
+
+ if (mt76_testmode_enabled(phy)) {
+ ieee80211_free_txskb(phy->hw, skb);
+ return;
+ }
+
+- if (WARN_ON(qid >= MT_TXQ_PSD)) {
+- qid = MT_TXQ_BE;
+- skb_set_queue_mapping(skb, qid);
+- }
+-
+- if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
+- !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+- !ieee80211_is_data(hdr->frame_control) &&
+- !ieee80211_is_bufferable_mmpdu(skb)) {
+- qid = MT_TXQ_PSD;
+- }
++ if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD))
++ skb_set_queue_mapping(skb, MT_TXQ_BE);
+
+ if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
+- q = phy->q_tx[qid];
+
+- spin_lock_bh(&q->lock);
+- __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
+- dev->queue_ops->kick(dev, q);
+- spin_unlock_bh(&q->lock);
++ spin_lock_bh(&wcid->tx_pending.lock);
++ __skb_queue_tail(&wcid->tx_pending, skb);
++ spin_unlock_bh(&wcid->tx_pending.lock);
++
++ spin_lock_bh(&phy->tx_lock);
++ if (list_empty(&wcid->tx_list))
++ list_add_tail(&wcid->tx_list, &phy->tx_list);
++ spin_unlock_bh(&phy->tx_lock);
++
++ mt76_worker_schedule(&phy->dev->tx_worker);
+ }
+ EXPORT_SYMBOL_GPL(mt76_tx);
+
+@@ -593,10 +585,86 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
+ }
+ EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+
++static int
++mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
++{
++ struct mt76_dev *dev = phy->dev;
++ struct ieee80211_sta *sta;
++ struct mt76_queue *q;
++ struct sk_buff *skb;
++ int ret = 0;
++
++ spin_lock(&wcid->tx_pending.lock);
++ while ((skb = skb_peek(&wcid->tx_pending)) != NULL) {
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
++ int qid = skb_get_queue_mapping(skb);
++
++ if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
++ !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
++ !ieee80211_is_data(hdr->frame_control) &&
++ !ieee80211_is_bufferable_mmpdu(skb))
++ qid = MT_TXQ_PSD;
++
++ q = phy->q_tx[qid];
++ if (mt76_txq_stopped(q)) {
++ ret = -1;
++ break;
++ }
++
++ __skb_unlink(skb, &wcid->tx_pending);
++ spin_unlock(&wcid->tx_pending.lock);
++
++ sta = wcid_to_sta(wcid);
++ spin_lock(&q->lock);
++ __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
++ dev->queue_ops->kick(dev, q);
++ spin_unlock(&q->lock);
++
++ spin_lock(&wcid->tx_pending.lock);
++ }
++ spin_unlock(&wcid->tx_pending.lock);
++
++ return ret;
++}
++
++static void mt76_txq_schedule_pending(struct mt76_phy *phy)
++{
++ if (list_empty(&phy->tx_list))
++ return;
++
++ local_bh_disable();
++ rcu_read_lock();
++
++ spin_lock(&phy->tx_lock);
++ while (!list_empty(&phy->tx_list)) {
++ struct mt76_wcid *wcid = NULL;
++ int ret;
++
++ wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list);
++ list_del_init(&wcid->tx_list);
++
++ spin_unlock(&phy->tx_lock);
++ ret = mt76_txq_schedule_pending_wcid(phy, wcid);
++ spin_lock(&phy->tx_lock);
++
++ if (ret) {
++ if (list_empty(&wcid->tx_list))
++ list_add_tail(&wcid->tx_list, &phy->tx_list);
++ break;
++ }
++ }
++ spin_unlock(&phy->tx_lock);
++
++ rcu_read_unlock();
++ local_bh_enable();
++}
++
+ void mt76_txq_schedule_all(struct mt76_phy *phy)
+ {
+ int i;
+
++ mt76_txq_schedule_pending(phy);
+ for (i = 0; i <= MT_TXQ_BK; i++)
+ mt76_txq_schedule(phy, i);
+ }
+diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+index b545d93c6e374e..6f3245a43aef17 100644
+--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
++++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+@@ -1615,7 +1615,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
+ cfg80211_unregister_netdevice(vif->ndev);
+ vif->monitor_flag = 0;
+
+- wilc_set_operation_mode(vif, 0, 0, 0);
+ mutex_lock(&wl->vif_mutex);
+ list_del_rcu(&vif->list);
+ wl->vif_num--;
+@@ -1810,15 +1809,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
+ INIT_LIST_HEAD(&wl->rxq_head.list);
+ INIT_LIST_HEAD(&wl->vif_list);
+
++ wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
++ wiphy_name(wl->wiphy));
++ if (!wl->hif_workqueue) {
++ ret = -ENOMEM;
++ goto free_cfg;
++ }
+ vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
+ NL80211_IFTYPE_STATION, false);
+ if (IS_ERR(vif)) {
+ ret = PTR_ERR(vif);
+- goto free_cfg;
++ goto free_hq;
+ }
+
+ return 0;
+
++free_hq:
++ destroy_workqueue(wl->hif_workqueue);
++
+ free_cfg:
+ wilc_wlan_cfg_deinit(wl);
+
+diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
+index a28da593848134..25e881ee727cdb 100644
+--- a/drivers/net/wireless/microchip/wilc1000/hif.c
++++ b/drivers/net/wireless/microchip/wilc1000/hif.c
+@@ -374,38 +374,52 @@ static void handle_connect_timeout(struct work_struct *work)
+ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ struct cfg80211_crypto_settings *crypto)
+ {
+- struct wilc_join_bss_param *param;
++ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
++ const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ struct ieee80211_p2p_noa_attr noa_attr;
++ const struct cfg80211_bss_ies *ies;
++ struct wilc_join_bss_param *param;
+ u8 rates_len = 0;
+- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+- const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
++ int ies_len;
++ u64 ies_tsf;
+ int ret;
+- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param)
+ return NULL;
+
++ rcu_read_lock();
++ ies = rcu_dereference(bss->ies);
++ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
++ if (!ies_data) {
++ rcu_read_unlock();
++ kfree(param);
++ return NULL;
++ }
++ ies_len = ies->len;
++ ies_tsf = ies->tsf;
++ rcu_read_unlock();
++
+ param->beacon_period = cpu_to_le16(bss->beacon_interval);
+ param->cap_info = cpu_to_le16(bss->capability);
+ param->bss_type = WILC_FW_BSS_TYPE_INFRA;
+ param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
+ ether_addr_copy(param->bssid, bss->bssid);
+
+- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
++ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
+ if (ssid_elm) {
+ if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
+ memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
+ }
+
+- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
++ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
+ if (tim_elm && tim_elm[1] >= 2)
+ param->dtim_period = tim_elm[3];
+
+ memset(param->p_suites, 0xFF, 3);
+ memset(param->akm_suites, 0xFF, 3);
+
+- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
++ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
+ if (rates_ie) {
+ rates_len = rates_ie[1];
+ if (rates_len > WILC_MAX_RATES_SUPPORTED)
+@@ -416,7 +430,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+
+ if (rates_len < WILC_MAX_RATES_SUPPORTED) {
+ supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
+- ies->data, ies->len);
++ ies_data, ies_len);
+ if (supp_rates_ie) {
+ u8 ext_rates = supp_rates_ie[1];
+
+@@ -431,15 +445,15 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ }
+ }
+
+- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
++ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
+ if (ht_ie)
+ param->ht_capable = true;
+
+- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
++ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
+ IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+ (u8 *)&noa_attr, sizeof(noa_attr));
+ if (ret > 0) {
+- param->tsf_lo = cpu_to_le32(ies->tsf);
++ param->tsf_lo = cpu_to_le32(ies_tsf);
+ param->noa_enabled = 1;
+ param->idx = noa_attr.index;
+ if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {
+@@ -459,7 +473,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ }
+ wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WMM,
+- ies->data, ies->len);
++ ies_data, ies_len);
+ if (wmm_ie) {
+ struct ieee80211_wmm_param_ie *ie;
+
+@@ -474,13 +488,13 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+
+ wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+- ies->data, ies->len);
++ ies_data, ies_len);
+ if (wpa_ie) {
+ param->mode_802_11i = 1;
+ param->rsn_found = true;
+ }
+
+- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
++ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
+ if (rsn_ie) {
+ int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
+ int offset = 8;
+@@ -514,6 +528,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
+ }
+
++ kfree(ies_data);
+ return (void *)param;
+ }
+
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index e9f59de31b0b95..b714da48eaa172 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -878,8 +878,7 @@ static const struct net_device_ops wilc_netdev_ops = {
+
+ void wilc_netdev_cleanup(struct wilc *wilc)
+ {
+- struct wilc_vif *vif;
+- int srcu_idx, ifc_cnt = 0;
++ struct wilc_vif *vif, *vif_tmp;
+
+ if (!wilc)
+ return;
+@@ -889,32 +888,19 @@ void wilc_netdev_cleanup(struct wilc *wilc)
+ wilc->firmware = NULL;
+ }
+
+- srcu_idx = srcu_read_lock(&wilc->srcu);
+- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
++ list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) {
++ mutex_lock(&wilc->vif_mutex);
++ list_del_rcu(&vif->list);
++ wilc->vif_num--;
++ mutex_unlock(&wilc->vif_mutex);
++ synchronize_srcu(&wilc->srcu);
+ if (vif->ndev)
+ unregister_netdev(vif->ndev);
+ }
+- srcu_read_unlock(&wilc->srcu, srcu_idx);
+
+ wilc_wfi_deinit_mon_interface(wilc, false);
+ destroy_workqueue(wilc->hif_workqueue);
+
+- while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
+- mutex_lock(&wilc->vif_mutex);
+- if (wilc->vif_num <= 0) {
+- mutex_unlock(&wilc->vif_mutex);
+- break;
+- }
+- vif = wilc_get_wl_to_vif(wilc);
+- if (!IS_ERR(vif))
+- list_del_rcu(&vif->list);
+-
+- wilc->vif_num--;
+- mutex_unlock(&wilc->vif_mutex);
+- synchronize_srcu(&wilc->srcu);
+- ifc_cnt++;
+- }
+-
+ wilc_wlan_cfg_deinit(wilc);
+ wlan_deinit_locks(wilc);
+ wiphy_unregister(wilc->wiphy);
+@@ -977,13 +963,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
+ goto error;
+ }
+
+- wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
+- ndev->name);
+- if (!wl->hif_workqueue) {
+- ret = -ENOMEM;
+- goto unregister_netdev;
+- }
+-
+ ndev->needs_free_netdev = true;
+ vif->iftype = vif_type;
+ vif->idx = wilc_get_available_idx(wl);
+@@ -996,12 +975,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
+
+ return vif;
+
+-unregister_netdev:
++error:
+ if (rtnl_locked)
+ cfg80211_unregister_netdevice(ndev);
+ else
+ unregister_netdev(ndev);
+- error:
+ free_netdev(ndev);
+ return ERR_PTR(ret);
+ }
+diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
+index 77b4cdff73c370..4cf8586ed55aeb 100644
+--- a/drivers/net/wireless/microchip/wilc1000/spi.c
++++ b/drivers/net/wireless/microchip/wilc1000/spi.c
+@@ -192,11 +192,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
+ /* assert ENABLE: */
+ gpiod_set_value(gpios->enable, 1);
+ mdelay(5);
+- /* assert RESET: */
+- gpiod_set_value(gpios->reset, 1);
+- } else {
+ /* deassert RESET: */
+ gpiod_set_value(gpios->reset, 0);
++ } else {
++ /* assert RESET: */
++ gpiod_set_value(gpios->reset, 1);
+ /* deassert ENABLE: */
+ gpiod_set_value(gpios->enable, 0);
+ }
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
+index 58bbf50081e474..9eb115c79c90aa 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
+@@ -1492,7 +1492,7 @@ int wilc_wlan_init(struct net_device *dev)
+ }
+
+ if (!wilc->vmm_table)
+- wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
++ wilc->vmm_table = kcalloc(WILC_VMM_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+
+ if (!wilc->vmm_table) {
+ ret = -ENOBUFS;
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
+index 94ee831b5de353..506d2f31efb5af 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
+@@ -666,7 +666,7 @@ static void plfxlc_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *et_strings, sizeof(et_strings));
++ memcpy(data, et_strings, sizeof(et_strings));
+ }
+
+ static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
+index 76d0a778636a4c..311676c1ece0ac 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
++++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
+@@ -493,9 +493,12 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
+ void *context)
+ {
+ struct usb_device *udev = interface_to_usbdev(usb->ez_usb);
+- struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC);
++ struct urb *urb;
+ int r;
+
++ urb = usb_alloc_urb(0, GFP_ATOMIC);
++ if (!urb)
++ return -ENOMEM;
+ usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
+ (void *)buffer, buffer_len, complete_fn, context);
+
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+index e65cc00fa17c9d..c13ae87f94f498 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+@@ -8695,7 +8695,7 @@ static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev)
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, saverfb7r4);
+
+- rt2800_bbp_write(rt2x00dev, 158, 141);
++ rt2800_bbp_write(rt2x00dev, 158, 140);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg = bbpreg & (~0x40);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+index 9a9cfd0ce402d4..00b945053e1991 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+@@ -101,6 +101,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
+ rt2x00link_stop_tuner(rt2x00dev);
+ rt2x00queue_stop_queues(rt2x00dev);
+ rt2x00queue_flush_queues(rt2x00dev, true);
++ rt2x00queue_stop_queue(rt2x00dev->bcn);
+
+ /*
+ * Disable radio.
+@@ -1286,6 +1287,7 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
+ rt2x00dev->intf_ap_count = 0;
+ rt2x00dev->intf_sta_count = 0;
+ rt2x00dev->intf_associated = 0;
++ rt2x00dev->intf_beaconing = 0;
+
+ /* Enable the radio */
+ retval = rt2x00lib_enable_radio(rt2x00dev);
+@@ -1312,6 +1314,7 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
+ rt2x00dev->intf_ap_count = 0;
+ rt2x00dev->intf_sta_count = 0;
+ rt2x00dev->intf_associated = 0;
++ rt2x00dev->intf_beaconing = 0;
+ }
+
+ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+index 4202c65177839f..75fda72c14ca95 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+@@ -598,6 +598,17 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
+ */
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ mutex_lock(&intf->beacon_skb_mutex);
++
++ /*
++ * Clear the 'enable_beacon' flag and clear beacon because
++ * the beacon queue has been stopped after hardware reset.
++ */
++ if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags) &&
++ intf->enable_beacon) {
++ intf->enable_beacon = false;
++ rt2x00queue_clear_beacon(rt2x00dev, vif);
++ }
++
+ if (!bss_conf->enable_beacon && intf->enable_beacon) {
+ rt2x00dev->intf_beaconing--;
+ intf->enable_beacon = false;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index 4695fb4e2d2dba..af541e52e683b9 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -121,6 +121,15 @@ enum rtl8xxxu_rx_type {
+ RX_TYPE_ERROR = -1
+ };
+
++enum rtl8xxxu_rx_desc_enc {
++ RX_DESC_ENC_NONE = 0,
++ RX_DESC_ENC_WEP40 = 1,
++ RX_DESC_ENC_TKIP_WO_MIC = 2,
++ RX_DESC_ENC_TKIP_MIC = 3,
++ RX_DESC_ENC_AES = 4,
++ RX_DESC_ENC_WEP104 = 5,
++};
++
+ struct rtl8xxxu_rxdesc16 {
+ #ifdef __LITTLE_ENDIAN
+ u32 pktlen:14;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+index 1e1c8fa194cb83..0466b8be5df011 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+@@ -713,9 +713,14 @@ static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv)
+ rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
+ }
+
++#define TX_POWER_INDEX_MAX 0x3F
++#define TX_POWER_INDEX_DEFAULT_CCK 0x22
++#define TX_POWER_INDEX_DEFAULT_HT40 0x27
++
+ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
+ {
+ struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu;
++ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+@@ -729,6 +734,16 @@ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+
++ for (i = 0; i < ARRAY_SIZE(priv->cck_tx_power_index_A); i++) {
++ if (priv->cck_tx_power_index_A[i] > TX_POWER_INDEX_MAX)
++ priv->cck_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_CCK;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(priv->ht40_1s_tx_power_index_A); i++) {
++ if (priv->ht40_1s_tx_power_index_A[i] > TX_POWER_INDEX_MAX)
++ priv->ht40_1s_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_HT40;
++ }
++
+ priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
+ priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 5d102a1246a301..6e47dde9389092 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -1505,13 +1505,13 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+ u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
+ u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
+ u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
+- u8 val8;
++ u8 val8, base;
+ int group, i;
+
+ group = rtl8xxxu_gen1_channel_to_group(channel);
+
+- cck[0] = priv->cck_tx_power_index_A[group] - 1;
+- cck[1] = priv->cck_tx_power_index_B[group] - 1;
++ cck[0] = priv->cck_tx_power_index_A[group];
++ cck[1] = priv->cck_tx_power_index_B[group];
+
+ if (priv->hi_pa) {
+ if (cck[0] > 0x20)
+@@ -1522,10 +1522,6 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+
+ ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
+ ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+- if (ofdm[0])
+- ofdm[0] -= 1;
+- if (ofdm[1])
+- ofdm[1] -= 1;
+
+ ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
+ ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
+@@ -1614,20 +1610,19 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
+ mcs_a + power_base->reg_0e1c);
++ val8 = u32_get_bits(mcs_a + power_base->reg_0e1c, 0xff000000);
+ for (i = 0; i < 3; i++) {
+- if (i != 2)
+- val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
+- else
+- val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
++ base = i != 2 ? 8 : 6;
++ val8 = max_t(int, val8 - base, 0);
+ rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
+ }
++
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
+ mcs_b + power_base->reg_0868);
++ val8 = u32_get_bits(mcs_b + power_base->reg_0868, 0xff000000);
+ for (i = 0; i < 3; i++) {
+- if (i != 2)
+- val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
+- else
+- val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
++ base = i != 2 ? 8 : 6;
++ val8 = max_t(int, val8 - base, 0);
+ rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
+ }
+ }
+@@ -6324,7 +6319,8 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+ rx_status->mactime = rx_desc->tsfl;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+- if (!rx_desc->swdec)
++ if (!rx_desc->swdec &&
++ rx_desc->security != RX_DESC_ENC_NONE)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+@@ -6424,7 +6420,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+ rx_status->mactime = rx_desc->tsfl;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+- if (!rx_desc->swdec)
++ if (!rx_desc->swdec &&
++ rx_desc->security != RX_DESC_ENC_NONE)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+@@ -7304,6 +7301,7 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
+ if (priv->usb_interrupts)
+ rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+
++ cancel_work_sync(&priv->c2hcmd_work);
+ cancel_delayed_work_sync(&priv->ra_watchdog);
+
+ rtl8xxxu_free_rx_resources(priv);
+@@ -7658,6 +7656,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
++ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+@@ -7959,6 +7958,18 @@ static const struct usb_device_id dev_table[] = {
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
++/* D-Link DWA-131 rev C1 */
++{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3312, 0xff, 0xff, 0xff),
++ .driver_info = (unsigned long)&rtl8192eu_fops},
++/* TP-Link TL-WN8200ND V2 */
++{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0126, 0xff, 0xff, 0xff),
++ .driver_info = (unsigned long)&rtl8192eu_fops},
++/* Mercusys MW300UM */
++{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0100, 0xff, 0xff, 0xff),
++ .driver_info = (unsigned long)&rtl8192eu_fops},
++/* Mercusys MW300UH */
++{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0104, 0xff, 0xff, 0xff),
++ .driver_info = (unsigned long)&rtl8192eu_fops},
+ #endif
+ { }
+ };
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 9886e719739be0..b118df035243c5 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -164,21 +164,29 @@ static bool _rtl_pci_platform_switch_device_pci_aspm(
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
++ value &= PCI_EXP_LNKCTL_ASPMC;
++
+ if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
+- value |= 0x40;
++ value |= PCI_EXP_LNKCTL_CCC;
+
+- pci_write_config_byte(rtlpci->pdev, 0x80, value);
++ pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_ASPMC | value,
++ value);
+
+ return false;
+ }
+
+-/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
+-static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
++/* @value is PCI_EXP_LNKCTL_CLKREQ_EN or 0 to enable/disable clk request. */
++static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u16 value)
+ {
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+- pci_write_config_byte(rtlpci->pdev, 0x81, value);
++ value &= PCI_EXP_LNKCTL_CLKREQ_EN;
++
++ pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_CLKREQ_EN,
++ value);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+ udelay(100);
+@@ -192,11 +200,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ /*Retrieve original configuration settings. */
+ u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
+- u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
+- pcibridge_linkctrlreg;
+ u16 aspmlevel = 0;
+ u8 tmp_u1b = 0;
+
+@@ -221,16 +226,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+ /*Set corresponding value. */
+ aspmlevel |= BIT(0) | BIT(1);
+ linkctrl_reg &= ~aspmlevel;
+- pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
+
+ _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
+- udelay(50);
+-
+- /*4 Disable Pci Bridge ASPM */
+- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
+- pcibridge_linkctrlreg);
+-
+- udelay(50);
+ }
+
+ /*Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
+@@ -245,9 +242,7 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+- u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ u16 aspmlevel;
+- u8 u_pcibridge_aspmsetting;
+ u8 u_device_aspmsetting;
+
+ if (!ppsc->support_aspm)
+@@ -259,25 +254,6 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+ return;
+ }
+
+- /*4 Enable Pci Bridge ASPM */
+-
+- u_pcibridge_aspmsetting =
+- pcipriv->ndis_adapter.pcibridge_linkctrlreg |
+- rtlpci->const_hostpci_aspm_setting;
+-
+- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
+- u_pcibridge_aspmsetting &= ~BIT(0);
+-
+- pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
+- u_pcibridge_aspmsetting);
+-
+- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+- "PlatformEnableASPM(): Write reg[%x] = %x\n",
+- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+- u_pcibridge_aspmsetting);
+-
+- udelay(50);
+-
+ /*Get ASPM level (with/without Clock Req) */
+ aspmlevel = rtlpci->const_devicepci_aspm_setting;
+ u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
+@@ -291,7 +267,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
+- RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
++ RT_RF_OFF_LEVL_CLK_REQ) ?
++ PCI_EXP_LNKCTL_CLKREQ_EN : 0);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ }
+ udelay(100);
+@@ -358,22 +335,6 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
+ return tpriv != NULL;
+ }
+
+-static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
+-{
+- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+- struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+- u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
+- u8 linkctrl_reg;
+- u8 num4bbytes;
+-
+- num4bbytes = (capabilityoffset + 0x10) / 4;
+-
+- /*Read Link Control Register */
+- pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
+-
+- pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
+-}
+-
+ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+ struct ieee80211_hw *hw)
+ {
+@@ -2028,12 +1989,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ PCI_SLOT(bridge_pdev->devfn);
+ pcipriv->ndis_adapter.pcibridge_funcnum =
+ PCI_FUNC(bridge_pdev->devfn);
+- pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+- pci_pcie_cap(bridge_pdev);
+- pcipriv->ndis_adapter.num4bytes =
+- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
+-
+- rtl_pci_get_linkcontrol_field(hw);
+
+ if (pcipriv->ndis_adapter.pcibridge_vendor ==
+ PCI_BRIDGE_VENDOR_AMD) {
+@@ -2050,13 +2005,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+- "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
++ "pci_bridge busnumber:devnumber:funcnumber:vendor:amd %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch);
+
+ rtl_pci_parse_configuration(pdev, hw);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
+index 866861626a0a1b..d6307197dfea06 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
+@@ -236,11 +236,6 @@ struct mp_adapter {
+ u16 pcibridge_vendorid;
+ u16 pcibridge_deviceid;
+
+- u8 num4bytes;
+-
+- u8 pcibridge_pciehdr_offset;
+- u8 pcibridge_linkctrlreg;
+-
+ bool amd_l1_patch;
+ };
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+index 6f61d6a106272a..5a34894a533bee 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+@@ -799,7 +799,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+index 12d0b3a87af7c1..0fab3a0c7d49dd 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+@@ -16,12 +16,6 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
+ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw);
+ static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+ static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+@@ -51,7 +45,7 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -74,7 +68,7 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+@@ -99,7 +93,7 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+
+
+ original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr);
+- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -127,7 +121,7 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl88e_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+index 0b6a15c2e5ccde..d92aad60edfe9c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+@@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+index 3d29c8dbb2559b..144ee780e1b6ab 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+@@ -17,7 +17,7 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -40,7 +40,7 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+@@ -143,14 +143,6 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ }
+ EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
+
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+-EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
+-
+ static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
+ {
+ rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+index 75afa6253ad027..e64d377dfe9e2d 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+@@ -196,7 +196,6 @@ bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
+ void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+ void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+index da54e51badd3a7..fa70a7d5539fd1 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+@@ -39,7 +39,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+ rfpath, regaddr);
+ }
+
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -110,7 +110,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+@@ -122,7 +122,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+index 7582a162bd112e..c7a0d4c776f0ab 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+@@ -94,7 +94,6 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 offset);
+ u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+-u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset, u32 data);
+ void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+index a8d9fe269f3139..0b8cb7e61fd802 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+@@ -32,7 +32,7 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath, regaddr);
+ }
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+@@ -56,7 +56,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+@@ -67,7 +67,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+index d18c092b614263..56b5cd032a9ac8 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+@@ -169,13 +169,6 @@ static const u8 channel_all[59] = {
+ 157, 159, 161, 163, 165
+ };
+
+-static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+-
+ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -198,7 +191,7 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ } else {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ }
+- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+@@ -230,7 +223,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+ dbi_direct);
+ else
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+ if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob)
+@@ -317,7 +310,7 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+ regaddr, rfpath, bitmask);
+ spin_lock(&rtlpriv->locks.rf_lock);
+ original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
+- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ spin_unlock(&rtlpriv->locks.rf_lock);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -343,7 +336,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92d_phy_rf_serial_read(hw,
+ rfpath, regaddr);
+- bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+@@ -899,8 +892,8 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
+ u8 place = chnl;
+
+ if (chnl > 14) {
+- for (place = 14; place < ARRAY_SIZE(channel5g); place++) {
+- if (channel5g[place] == chnl) {
++ for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
++ if (channel_all[place] == chnl) {
+ place++;
+ break;
+ }
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+index c09c0c3126658a..d8813a3b444ac5 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+@@ -35,7 +35,7 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
+
+ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+- struct rx_desc_92d *pdesc,
++ __le32 *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+@@ -49,8 +49,10 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool is_cck_rate;
++ u8 rxmcs;
+
+- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
++ rxmcs = get_rx_desc_rxmcs(pdesc);
++ is_cck_rate = rxmcs <= DESC_RATE11M;
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+ pstats->packet_beacon = packet_beacon;
+@@ -158,8 +160,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->rxpower = rx_pwr_all;
+ pstats->recvsignalpower = rx_pwr_all;
+- if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
+- pdesc->rxmcs <= DESC_RATEMCS15)
++ if (get_rx_desc_rxht(pdesc) && rxmcs >= DESC_RATEMCS8 &&
++ rxmcs <= DESC_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+@@ -365,7 +367,7 @@ static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
+ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+- struct rx_desc_92d *pdesc,
++ __le32 *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo)
+ {
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+@@ -414,7 +416,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ stats->icv = (u16)get_rx_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_desc_crc32(pdesc);
+ stats->hwerror = (stats->crc | stats->icv);
+- stats->decrypted = !get_rx_desc_swdec(pdesc);
++ stats->decrypted = !get_rx_desc_swdec(pdesc) &&
++ get_rx_desc_enc_type(pdesc) != RX_DESC_ENC_NONE;
+ stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+@@ -427,8 +430,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ rx_status->band = hw->conf.chandef.chan->band;
+ if (get_rx_desc_crc32(pdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+- if (!get_rx_desc_swdec(pdesc))
+- rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (get_rx_desc_bw(pdesc))
+ rx_status->bw = RATE_INFO_BW_40;
+ if (get_rx_desc_rxht(pdesc))
+@@ -442,9 +443,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
+ stats->rx_bufshift);
+- _rtl92de_translate_rx_signal_stuff(hw,
+- skb, stats,
+- (struct rx_desc_92d *)pdesc,
++ _rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+index d01578875cd5ff..eb3f768140b5bd 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+@@ -14,6 +14,15 @@
+ #define USB_HWDESC_HEADER_LEN 32
+ #define CRCLENGTH 4
+
++enum rtl92d_rx_desc_enc {
++ RX_DESC_ENC_NONE = 0,
++ RX_DESC_ENC_WEP40 = 1,
++ RX_DESC_ENC_TKIP_WO_MIC = 2,
++ RX_DESC_ENC_TKIP_MIC = 3,
++ RX_DESC_ENC_AES = 4,
++ RX_DESC_ENC_WEP104 = 5,
++};
++
+ /* macros to read/write various fields in RX or TX descriptors */
+
+ static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+@@ -246,6 +255,11 @@ static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
+ return le32_get_bits(*__pdesc, GENMASK(19, 16));
+ }
+
++static inline u32 get_rx_desc_enc_type(__le32 *__pdesc)
++{
++ return le32_get_bits(*__pdesc, GENMASK(22, 20));
++}
++
+ static inline u32 get_rx_desc_shift(__le32 *__pdesc)
+ {
+ return le32_get_bits(*__pdesc, GENMASK(25, 24));
+@@ -380,10 +394,17 @@ struct rx_fwinfo_92d {
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
++#ifdef __LITTLE_ENDIAN
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
++#else
++ u8 reserve:4;
++ u8 rxsc:2;
++ u8 sgi_en:1;
++ u8 ex_intf_flag:1;
++#endif
+ } __packed;
+
+ struct tx_desc_92d {
+@@ -488,64 +509,6 @@ struct tx_desc_92d {
+ u32 reserve_pass_pcie_mm_limit[4];
+ } __packed;
+
+-struct rx_desc_92d {
+- u32 length:14;
+- u32 crc32:1;
+- u32 icverror:1;
+- u32 drv_infosize:4;
+- u32 security:3;
+- u32 qos:1;
+- u32 shift:2;
+- u32 phystatus:1;
+- u32 swdec:1;
+- u32 lastseg:1;
+- u32 firstseg:1;
+- u32 eor:1;
+- u32 own:1;
+-
+- u32 macid:5;
+- u32 tid:4;
+- u32 hwrsvd:5;
+- u32 paggr:1;
+- u32 faggr:1;
+- u32 a1_fit:4;
+- u32 a2_fit:4;
+- u32 pam:1;
+- u32 pwr:1;
+- u32 moredata:1;
+- u32 morefrag:1;
+- u32 type:2;
+- u32 mc:1;
+- u32 bc:1;
+-
+- u32 seq:12;
+- u32 frag:4;
+- u32 nextpktlen:14;
+- u32 nextind:1;
+- u32 rsvd:1;
+-
+- u32 rxmcs:6;
+- u32 rxht:1;
+- u32 amsdu:1;
+- u32 splcp:1;
+- u32 bandwidth:1;
+- u32 htc:1;
+- u32 tcpchk_rpt:1;
+- u32 ipcchk_rpt:1;
+- u32 tcpchk_valid:1;
+- u32 hwpcerr:1;
+- u32 hwpcind:1;
+- u32 iv0:16;
+-
+- u32 iv1;
+-
+- u32 tsfl;
+-
+- u32 bufferaddress;
+- u32 bufferaddress64;
+-
+-} __packed;
+-
+ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+index cc0bcaf13e96e5..73ef602bfb01a6 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+@@ -16,7 +16,6 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
+ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask);
+ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw);
+ static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+ static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw,
+@@ -46,7 +45,7 @@ u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+@@ -68,7 +67,7 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+@@ -92,7 +91,7 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
+ spin_lock(&rtlpriv->locks.rf_lock);
+
+ original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr);
+- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -119,7 +118,7 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
+
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr);
+- bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = (original_value & (~bitmask)) | (data << bitshift);
+ }
+
+@@ -201,13 +200,6 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
+ pphyreg->rf3wire_offset, data_and_addr);
+ }
+
+-static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+-
+ bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw)
+ {
+ return _rtl92ee_phy_config_mac_with_headerfile(hw);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+index 09591a0b5a8185..d9ef7e1da1db45 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+@@ -14,13 +14,6 @@
+ #include "hw.h"
+ #include "table.h"
+
+-static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask)
+-{
+- u32 i = ffs(bitmask);
+-
+- return i ? i - 1 : 32;
+-}
+-
+ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -30,7 +23,7 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+ regaddr, bitmask);
+
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+@@ -52,7 +45,7 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+@@ -157,7 +150,7 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+
+ original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr);
+
+- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -188,7 +181,7 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92s_phy_rf_serial_read(hw, rfpath,
+ regaddr);
+- bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) | (data << bitshift));
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+index 8ada31380efa48..0ff8e355c23a4b 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+@@ -466,7 +466,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw)
+ }
+
+ if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
++ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
+ bt_change_edca = true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+index fe9b407dc2affb..71e29b103da5ad 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+@@ -49,7 +49,7 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
+ rfpath, regaddr);
+ }
+
+- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -80,7 +80,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ original_value = rtl8723_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+@@ -89,7 +89,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
+ rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data);
+ } else {
+ if (bitmask != RFREG_OFFSET_MASK) {
+- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+index 2b9313cb93dbd2..094cb36153f5a7 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+@@ -41,7 +41,7 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ spin_lock(&rtlpriv->locks.rf_lock);
+
+ original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
+- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock(&rtlpriv->locks.rf_lock);
+@@ -68,7 +68,7 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = rtl8723_phy_rf_serial_read(hw, path,
+ regaddr);
+- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
++ bitshift = calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+index 5323ead30db038..fa1839d8ee55fa 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+@@ -29,9 +29,10 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
+ u32 data);
+ static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+ {
+- u32 i = ffs(bitmask);
++ if (WARN_ON_ONCE(!bitmask))
++ return 0;
+
+- return i ? i - 1 : 32;
++ return __ffs(bitmask);
+ }
+ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
+ /*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index 2e7e04f9127935..8cbf3fb3885397 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -3080,4 +3080,11 @@ static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+ return ieee80211_find_sta(mac->vif, mac_addr);
+ }
+
++static inline u32 calculate_bit_shift(u32 bitmask)
++{
++ if (WARN_ON_ONCE(!bitmask))
++ return 0;
++
++ return __ffs(bitmask);
++}
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
+index cffad1c012499e..2af2bc613458d4 100644
+--- a/drivers/net/wireless/realtek/rtw88/Kconfig
++++ b/drivers/net/wireless/realtek/rtw88/Kconfig
+@@ -12,6 +12,7 @@ if RTW88
+
+ config RTW88_CORE
+ tristate
++ select WANT_DEV_COREDUMP
+
+ config RTW88_PCI
+ tristate
+diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
+index 86467d2f8888ca..d35f26919806a7 100644
+--- a/drivers/net/wireless/realtek/rtw88/coex.c
++++ b/drivers/net/wireless/realtek/rtw88/coex.c
+@@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 table_case, tdma_case;
+- bool wl_cpt_test = false, bt_cpt_test = false;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
+
+@@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+- if (wl_cpt_test) {
+- if (coex_stat->wl_gl_busy) {
+- table_case = 20;
+- tdma_case = 17;
+- } else {
+- table_case = 10;
+- tdma_case = 15;
+- }
+- } else if (bt_cpt_test) {
+- table_case = 26;
+- tdma_case = 26;
+- } else {
+- if (coex_stat->wl_gl_busy &&
+- coex_stat->wl_noisy_level == 0)
+- table_case = 14;
+- else
+- table_case = 10;
++ if (coex_stat->wl_gl_busy &&
++ coex_stat->wl_noisy_level == 0)
++ table_case = 14;
++ else
++ table_case = 10;
+
+- if (coex_stat->wl_gl_busy)
+- tdma_case = 15;
+- else
+- tdma_case = 20;
+- }
++ if (coex_stat->wl_gl_busy)
++ tdma_case = 15;
++ else
++ tdma_case = 20;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 112;
+@@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
+ tdma_case = 120;
+ }
+
+- if (wl_cpt_test)
+- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]);
+- else
+- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+-
++ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, false, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+ }
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index f8ba133baff069..35bc37a3c469db 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -1233,9 +1233,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
+ #define rtw_debugfs_add_core(name, mode, fopname, parent) \
+ do { \
+ rtw_debug_priv_ ##name.rtwdev = rtwdev; \
+- if (!debugfs_create_file(#name, mode, \
++ if (IS_ERR(debugfs_create_file(#name, mode, \
+ parent, &rtw_debug_priv_ ##name,\
+- &file_ops_ ##fopname)) \
++ &file_ops_ ##fopname))) \
+ pr_debug("Unable to initialize debugfs:%s\n", \
+ #name); \
+ } while (0)
+diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
+index a1b674e3caaa3c..3596cf99c2ed44 100644
+--- a/drivers/net/wireless/realtek/rtw88/fw.c
++++ b/drivers/net/wireless/realtek/rtw88/fw.c
+@@ -1388,10 +1388,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
+ val |= BIT_ENSWBCN >> 8;
+ rtw_write8(rtwdev, REG_CR + 1, val);
+
+- val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
+- bckp[1] = val;
+- val &= ~(BIT_EN_BCNQ_DL >> 16);
+- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
++ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
++ val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
++ bckp[1] = val;
++ val &= ~(BIT_EN_BCNQ_DL >> 16);
++ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
++ }
+
+ ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
+ if (ret) {
+@@ -1416,7 +1418,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
+ rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
+ rsvd_pg_head | BIT_BCN_VALID_V1);
+- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
++ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
++ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
+ rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
+
+ return ret;
+diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
+index 298663b0358085..0c1c1ff31085cf 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac.c
++++ b/drivers/net/wireless/realtek/rtw88/mac.c
+@@ -309,6 +309,13 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+ pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
+ ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
+
++ if (pwr_on && rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) {
++ if (chip->id == RTW_CHIP_TYPE_8822C ||
++ chip->id == RTW_CHIP_TYPE_8822B ||
++ chip->id == RTW_CHIP_TYPE_8821C)
++ rtw_write8_clr(rtwdev, REG_SYS_STATUS1 + 1, BIT(0));
++ }
++
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO)
+ rtw_write32(rtwdev, REG_SDIO_HIMR, imr);
+
+diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
+index a99b53d4426763..d8d68f16014e34 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
+@@ -280,9 +280,9 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
+
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI)
+- rtwdev->hal.rcr |= BIT_AM | BIT_AB;
++ rtwdev->hal.rcr |= BIT_AM;
+ else
+- rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB);
++ rtwdev->hal.rcr &= ~(BIT_AM);
+ }
+ if (changed_flags & FIF_FCSFAIL) {
+ if (*new_flags & FIF_FCSFAIL)
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 4a33d2e47f33ff..b90ea6c88b15d9 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -1314,20 +1314,21 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
+ {
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_fw_state *fw;
++ int ret = 0;
+
+ fw = &rtwdev->fw;
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+- return -EINVAL;
++ ret = -EINVAL;
+
+ if (chip->wow_fw_name) {
+ fw = &rtwdev->wow_fw;
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+- return -EINVAL;
++ ret = -EINVAL;
+ }
+
+- return 0;
++ return ret;
+ }
+
+ static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
+@@ -2027,8 +2028,6 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
+ rtw_phy_setup_phy_cond(rtwdev, hal->pkg_type);
+
+ rtw_phy_init_tx_power(rtwdev);
+- if (rfe_def->agc_btg_tbl)
+- rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
+ rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
+ rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
+ rtw_phy_tx_power_by_rate_config(hal);
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
+index 128e75a81bf3cc..37ef80c9091dba 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.c
++++ b/drivers/net/wireless/realtek/rtw88/phy.c
+@@ -1761,12 +1761,15 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
+
+ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
+ {
++ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ u8 rf_path;
+
+ rtw_load_table(rtwdev, chip->mac_tbl);
+ rtw_load_table(rtwdev, chip->bb_tbl);
+ rtw_load_table(rtwdev, chip->agc_tbl);
++ if (rfe_def->agc_btg_tbl)
++ rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
+ rtw_load_rfk_table(rtwdev);
+
+ for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+index adf224618a2a6f..5f3a3a88c3d0d4 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+@@ -706,9 +706,9 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev)
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
++ dm_info->total_fa_cnt = ofdm_fa_cnt;
+ if (cck_enable)
+ dm_info->total_fa_cnt += cck_fa_cnt;
+- dm_info->total_fa_cnt = ofdm_fa_cnt;
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
+ dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+index 7a5cbdc31ef793..a019f4085e7389 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+@@ -9,24 +9,34 @@
+ #include "usb.h"
+
+ static const struct usb_device_id rtw_8821cu_id_table[] = {
+- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8731, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb820, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc80c, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc820, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
++ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82a, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
+- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
+- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
+- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* TOTOLINK A650UA v3 */
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
+ {},
+ };
+ MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table);
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+index cd965edc29cea3..3fe5c70ce731be 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+@@ -2611,12 +2611,14 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
+ else
+ rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
+
+- if (rxsc >= 9 && rxsc <= 12)
++ if (rxsc == 0)
++ bw = rtwdev->hal.current_band_width;
++ else if (rxsc >= 1 && rxsc <= 8)
++ bw = RTW_CHANNEL_WIDTH_20;
++ else if (rxsc >= 9 && rxsc <= 12)
+ bw = RTW_CHANNEL_WIDTH_40;
+- else if (rxsc >= 13)
+- bw = RTW_CHANNEL_WIDTH_80;
+ else
+- bw = RTW_CHANNEL_WIDTH_20;
++ bw = RTW_CHANNEL_WIDTH_80;
+
+ channel = GET_PHY_STAT_P1_CHANNEL(phy_status);
+ rtw_set_rx_freq_band(pkt_stat, channel);
+diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
+index 2c1fb2dabd40a4..0cae5746f540fa 100644
+--- a/drivers/net/wireless/realtek/rtw88/sdio.c
++++ b/drivers/net/wireless/realtek/rtw88/sdio.c
+@@ -500,19 +500,40 @@ static u32 rtw_sdio_get_tx_addr(struct rtw_dev *rtwdev, size_t size,
+ static int rtw_sdio_read_port(struct rtw_dev *rtwdev, u8 *buf, size_t count)
+ {
+ struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
++ struct mmc_host *host = rtwsdio->sdio_func->card->host;
+ bool bus_claim = rtw_sdio_bus_claim_needed(rtwsdio);
+ u32 rxaddr = rtwsdio->rx_addr++;
+- int ret;
++ int ret = 0, err;
++ size_t bytes;
+
+ if (bus_claim)
+ sdio_claim_host(rtwsdio->sdio_func);
+
+- ret = sdio_memcpy_fromio(rtwsdio->sdio_func, buf,
+- RTW_SDIO_ADDR_RX_RX0FF_GEN(rxaddr), count);
+- if (ret)
+- rtw_warn(rtwdev,
+- "Failed to read %zu byte(s) from SDIO port 0x%08x",
+- count, rxaddr);
++ while (count > 0) {
++ bytes = min_t(size_t, host->max_req_size, count);
++
++ err = sdio_memcpy_fromio(rtwsdio->sdio_func, buf,
++ RTW_SDIO_ADDR_RX_RX0FF_GEN(rxaddr),
++ bytes);
++ if (err) {
++ rtw_warn(rtwdev,
++ "Failed to read %zu byte(s) from SDIO port 0x%08x: %d",
++ bytes, rxaddr, err);
++
++ /* Signal to the caller that reading did not work and
++ * that the data in the buffer is short/corrupted.
++ */
++ ret = err;
++
++ /* Don't stop here - instead drain the remaining data
++ * from the card's buffer, else the card will return
++ * corrupt data for the next rtw_sdio_read_port() call.
++ */
++ }
++
++ count -= bytes;
++ buf += bytes;
++ }
+
+ if (bus_claim)
+ sdio_release_host(rtwsdio->sdio_func);
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index d879d7e3dc81fe..04a64afcbf8a2d 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -33,6 +33,36 @@ static void rtw_usb_fill_tx_checksum(struct rtw_usb *rtwusb,
+ rtw_tx_fill_txdesc_checksum(rtwdev, &pkt_info, skb->data);
+ }
+
++static void rtw_usb_reg_sec(struct rtw_dev *rtwdev, u32 addr, __le32 *data)
++{
++ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
++ struct usb_device *udev = rtwusb->udev;
++ bool reg_on_section = false;
++ u16 t_reg = 0x4e0;
++ u8 t_len = 1;
++ int status;
++
++ /* There are three sections:
++ * 1. on (0x00~0xFF; 0x1000~0x10FF): this section is always powered on
++ * 2. off (< 0xFE00, excluding "on" section): this section could be
++ * powered off
++ * 3. local (>= 0xFE00): usb specific registers section
++ */
++ if (addr <= 0xff || (addr >= 0x1000 && addr <= 0x10ff))
++ reg_on_section = true;
++
++ if (!reg_on_section)
++ return;
++
++ status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
++ RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
++ t_reg, 0, data, t_len, 500);
++
++ if (status != t_len && status != -ENODEV)
++ rtw_err(rtwdev, "%s: reg 0x%x, usb write %u fail, status: %d\n",
++ __func__, t_reg, t_len, status);
++}
++
+ static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
+ {
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+@@ -58,6 +88,11 @@ static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
+ rtw_err(rtwdev, "read register 0x%x failed with %d\n",
+ addr, ret);
+
++ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
++ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
++ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
++ rtw_usb_reg_sec(rtwdev, addr, data);
++
+ return le32_to_cpu(*data);
+ }
+
+@@ -102,6 +137,11 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len)
+ if (ret < 0 && ret != -ENODEV && count++ < 4)
+ rtw_err(rtwdev, "write register 0x%x failed with %d\n",
+ addr, ret);
++
++ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
++ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
++ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
++ rtw_usb_reg_sec(rtwdev, addr, data);
+ }
+
+ static void rtw_usb_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
+@@ -233,6 +273,8 @@ static void rtw_usb_write_port_tx_complete(struct urb *urb)
+ info = IEEE80211_SKB_CB(skb);
+ tx_data = rtw_usb_get_tx_data(skb);
+
++ skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
++
+ /* enqueue to wait for tx report */
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
+@@ -611,8 +653,7 @@ static void rtw_usb_cancel_rx_bufs(struct rtw_usb *rtwusb)
+
+ for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+- if (rxcb->rx_urb)
+- usb_kill_urb(rxcb->rx_urb);
++ usb_kill_urb(rxcb->rx_urb);
+ }
+ }
+
+@@ -623,10 +664,8 @@ static void rtw_usb_free_rx_bufs(struct rtw_usb *rtwusb)
+
+ for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+- if (rxcb->rx_urb) {
+- usb_kill_urb(rxcb->rx_urb);
+- usb_free_urb(rxcb->rx_urb);
+- }
++ usb_kill_urb(rxcb->rx_urb);
++ usb_free_urb(rxcb->rx_urb);
+ }
+ }
+
+@@ -703,7 +742,6 @@ static struct rtw_hci_ops rtw_usb_ops = {
+ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
+ {
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+- int i;
+
+ rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq");
+ if (!rtwusb->rxwq) {
+@@ -715,13 +753,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
+
+ INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler);
+
++ return 0;
++}
++
++static void rtw_usb_setup_rx(struct rtw_dev *rtwdev)
++{
++ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
++ int i;
++
+ for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
+ struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i];
+
+ rtw_usb_rx_resubmit(rtwusb, rxcb);
+ }
+-
+- return 0;
+ }
+
+ static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
+@@ -858,6 +902,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ goto err_destroy_rxwq;
+ }
+
++ rtw_usb_setup_rx(rtwdev);
++
+ return 0;
+
+ err_destroy_rxwq:
+diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
+index 4ba8b3df70aeb0..6ab1b6ffbb5072 100644
+--- a/drivers/net/wireless/realtek/rtw89/coex.c
++++ b/drivers/net/wireless/realtek/rtw89/coex.c
+@@ -131,7 +131,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
+ .fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
++ .fwlrole = 2, .frptmap = 3, .fcxctrl = 1,
+ .info_buf = 1800, .max_role_num = 6,
+ },
+ {RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
+@@ -159,7 +159,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
+ .fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
++ .fwlrole = 2, .frptmap = 3, .fcxctrl = 1,
+ .info_buf = 1800, .max_role_num = 6,
+ },
+ {RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index 133bf289bacb43..535393eca5641c 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -2548,7 +2548,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ ieee80211_queue_delayed_work(hw, &roc->roc_work,
+- RTW89_ROC_IDLE_TIMEOUT);
++ msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT));
+ }
+
+ void rtw89_roc_work(struct work_struct *work)
+diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
+index 04ce221730f995..ee6ae2a0c79815 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -2230,12 +2230,6 @@ struct rtw89_btc_fbtc_fddt_cell_status {
+ u8 state_phase; /* [0:3] train state, [4:7] train phase */
+ } __packed;
+
+-struct rtw89_btc_fbtc_fddt_cell_status_v5 {
+- s8 wl_tx_pwr;
+- s8 bt_tx_pwr;
+- s8 bt_rx_gain;
+-} __packed;
+-
+ struct rtw89_btc_fbtc_cysta_v3 { /* statistics for cycles */
+ u8 fver;
+ u8 rsvd;
+@@ -2299,9 +2293,9 @@ struct rtw89_btc_fbtc_cysta_v5 { /* statistics for cycles */
+ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept;
+ struct rtw89_btc_fbtc_a2dp_trx_stat_v4 a2dp_trx[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_fddt_info_v5 fddt_trx[BTC_CYCLE_SLOT_MAX];
+- struct rtw89_btc_fbtc_fddt_cell_status_v5 fddt_cells[FDD_TRAIN_WL_DIRECTION]
+- [FDD_TRAIN_WL_RSSI_LEVEL]
+- [FDD_TRAIN_BT_RSSI_LEVEL];
++ struct rtw89_btc_fbtc_fddt_cell_status fddt_cells[FDD_TRAIN_WL_DIRECTION]
++ [FDD_TRAIN_WL_RSSI_LEVEL]
++ [FDD_TRAIN_BT_RSSI_LEVEL];
+ __le32 except_map;
+ } __packed;
+
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index d162e64f606471..94fe921e9ff28f 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -3292,7 +3292,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
+ case RX_ENC_HE:
+ seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
+ status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
+- he_gi_str[rate->he_gi] : "N/A");
++ he_gi_str[status->he_gi] : "N/A");
+ break;
+ }
+ seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(status->bw));
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index df1dc2f43c86be..468cfa43ec0499 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -3912,6 +3912,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ rtw89_core_scan_complete(rtwdev, vif, true);
+ ieee80211_scan_completed(rtwdev->hw, &info);
+ ieee80211_wake_queues(rtwdev->hw);
++ rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
+
+ rtw89_release_pkt_list(rtwdev);
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+@@ -3929,6 +3930,19 @@ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+ rtw89_hw_scan_complete(rtwdev, vif, true);
+ }
+
++static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
++{
++ struct rtw89_vif *rtwvif;
++
++ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
++ /* This variable implies connected or during attempt to connect */
++ if (!is_zero_ether_addr(rtwvif->bssid))
++ return true;
++ }
++
++ return false;
++}
++
+ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable)
+ {
+@@ -3941,8 +3955,7 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ if (!rtwvif)
+ return -EINVAL;
+
+- /* This variable implies connected or during attempt to connect */
+- connected = !is_zero_ether_addr(rtwvif->bssid);
++ connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
+ opt.enable = enable;
+ opt.target_ch_mode = connected;
+ if (enable) {
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
+index fab9f5004a75e3..3c818c4b4653ad 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.c
++++ b/drivers/net/wireless/realtek/rtw89/mac.c
+@@ -3833,11 +3833,9 @@ static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ }
+
+ static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
+- struct rtw89_vif *rtwvif)
++ struct rtw89_vif *rtwvif, bool en)
+ {
+ const struct rtw89_port_reg *p = &rtw_port_base;
+- bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ||
+- rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
+
+ if (en)
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
+@@ -3845,6 +3843,24 @@ static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
+ }
+
++static void rtw89_mac_port_cfg_tx_sw_by_nettype(struct rtw89_dev *rtwdev,
++ struct rtw89_vif *rtwvif)
++{
++ bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ||
++ rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
++
++ rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif, en);
++}
++
++void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en)
++{
++ struct rtw89_vif *rtwvif;
++
++ rtw89_for_each_rtwvif(rtwdev, rtwvif)
++ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
++ rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif, en);
++}
++
+ static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+ {
+@@ -4137,7 +4153,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
+- rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif);
++ rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif);
+@@ -4298,8 +4314,10 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+
+ switch (reason) {
+ case RTW89_SCAN_LEAVE_CH_NOTIFY:
+- if (rtw89_is_op_chan(rtwdev, band, chan))
++ if (rtw89_is_op_chan(rtwdev, band, chan)) {
++ rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, false);
+ ieee80211_stop_queues(rtwdev->hw);
++ }
+ return;
+ case RTW89_SCAN_END_SCAN_NOTIFY:
+ if (rtwvif && rtwvif->scan_req &&
+@@ -4317,6 +4335,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ if (rtw89_is_op_chan(rtwdev, band, chan)) {
+ rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
+ &rtwdev->scan_info.op_chan);
++ rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
+ ieee80211_wake_queues(rtwdev->hw);
+ } else {
+ rtw89_chan_create(&new, chan, chan, band,
+diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
+index 7cf34137c0bcfc..7c57ab2814c767 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac.h
++++ b/drivers/net/wireless/realtek/rtw89/mac.h
+@@ -974,6 +974,7 @@ int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif);
+ void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
++void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en);
+ int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+ void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev);
+ int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw);
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index 5e48618706d911..5b9de1f41dc780 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -126,7 +126,9 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
+ rtwvif->rtwdev = rtwdev;
+ rtwvif->roc.state = RTW89_ROC_IDLE;
+ rtwvif->offchan = false;
+- list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
++ if (!rtw89_rtwvif_in_list(rtwdev, rtwvif))
++ list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
++
+ INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work);
+ INIT_DELAYED_WORK(&rtwvif->roc.roc_work, rtw89_roc_work);
+ rtw89_leave_ps_mode(rtwdev);
+@@ -303,7 +305,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
+ u8 sifs;
+
+ slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
+- sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
++ sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16;
+
+ return aifsn * slot_time + sifs;
+ }
+@@ -422,7 +424,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
+ * when disconnected by peer
+ */
+ if (rtwdev->scanning)
+- rtw89_hw_scan_abort(rtwdev, vif);
++ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
+ }
+ }
+
+@@ -472,6 +474,9 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
+ return -EOPNOTSUPP;
+ }
+
++ if (rtwdev->scanning)
++ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
++
+ ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
+ rtw89_cam_bssid_changed(rtwdev, rtwvif);
+ rtw89_mac_port_update(rtwdev, rtwvif);
+@@ -968,7 +973,7 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
+ }
+
+ if (rtwdev->scanning)
+- rtw89_hw_scan_abort(rtwdev, vif);
++ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
+
+ if (type == IEEE80211_ROC_TYPE_MGMT_TX)
+ roc->state = RTW89_ROC_MGMT;
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 3a4bfc44142b69..98af64444d3ebc 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -958,7 +958,8 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
+
+ spin_lock_bh(&rtwpci->trx_lock);
+ cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+- cnt = min(cnt, wd_ring->curr_num);
++ if (txch != RTW89_TXCH_CH12)
++ cnt = min(cnt, wd_ring->curr_num);
+ spin_unlock_bh(&rtwpci->trx_lock);
+
+ return cnt;
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
+index 2f3d1ad3b0f7d0..4259b79b138fb4 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.h
++++ b/drivers/net/wireless/realtek/rtw89/pci.h
+@@ -559,7 +559,7 @@
+ #define RTW89_PCI_TXWD_NUM_MAX 512
+ #define RTW89_PCI_TXWD_PAGE_SIZE 128
+ #define RTW89_PCI_ADDRINFO_MAX 4
+-#define RTW89_PCI_RX_BUF_SIZE 11460
++#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
+
+ #define RTW89_PCI_POLL_BDRAM_RST_CNT 100
+ #define RTW89_PCI_MULTITAG 8
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
+index 7139146cb3fad2..fac83b718a30cd 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.c
++++ b/drivers/net/wireless/realtek/rtw89/phy.c
+@@ -284,8 +284,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
+ csi_mode = RTW89_RA_RPT_MODE_HT;
+ ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
+- (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
+- (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
++ ((u64)sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
++ ((u64)sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ high_rate_masks = rtw89_ra_mask_ht_rates;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ stbc_en = 1;
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+index 259df67836a0e9..a2fa1d339bc21f 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+@@ -20,7 +20,7 @@
+ #define RTW8852B_RF_REL_VERSION 34
+ #define RTW8852B_DPK_VER 0x0d
+ #define RTW8852B_DPK_RF_PATH 2
+-#define RTW8852B_DPK_KIP_REG_NUM 2
++#define RTW8852B_DPK_KIP_REG_NUM 3
+
+ #define _TSSI_DE_MASK GENMASK(21, 12)
+ #define ADDC_T_AVG 100
+diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
+index c1644353053f01..01b17b8f4ff9dc 100644
+--- a/drivers/net/wireless/realtek/rtw89/ser.c
++++ b/drivers/net/wireless/realtek/rtw89/ser.c
+@@ -308,9 +308,13 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+
+ static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
+ {
+- struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
+- struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++ struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
++ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
++ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
++
++ if (rtwvif != target_rtwvif)
++ return;
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
+ rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
+diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
+index e2ed4565025dda..d4ee9078a4f48c 100644
+--- a/drivers/net/wireless/realtek/rtw89/util.h
++++ b/drivers/net/wireless/realtek/rtw89/util.h
+@@ -14,6 +14,24 @@
+ #define rtw89_for_each_rtwvif(rtwdev, rtwvif) \
+ list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list)
+
++/* Before adding rtwvif to list, we need to check if it already exist, beacase
++ * in some case such as SER L2 happen during WoWLAN flow, calling reconfig
++ * twice cause the list to be added twice.
++ */
++static inline bool rtw89_rtwvif_in_list(struct rtw89_dev *rtwdev,
++ struct rtw89_vif *new)
++{
++ struct rtw89_vif *rtwvif;
++
++ lockdep_assert_held(&rtwdev->mutex);
++
++ rtw89_for_each_rtwvif(rtwdev, rtwvif)
++ if (rtwvif == new)
++ return true;
++
++ return false;
++}
++
+ /* The result of negative dividend and positive divisor is undefined, but it
+ * should be one case of round-down or round-up. So, make it round-down if the
+ * result is round-up.
+diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
+index 6a5e52a96d183a..caa22226b01bc9 100644
+--- a/drivers/net/wireless/silabs/wfx/data_tx.c
++++ b/drivers/net/wireless/silabs/wfx/data_tx.c
+@@ -226,53 +226,40 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+
+ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+ {
+- int i;
+- bool finished;
++ bool has_rate0 = false;
++ int i, j;
+
+- /* Firmware is not able to mix rates with different flags */
+- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+- if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+- rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+- if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
++ for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) {
++ if (rates[j].idx == -1)
++ break;
++ /* The device use the rates in descending order, whatever the request from minstrel.
++ * We have to trade off here. Most important is to respect the primary rate
++ * requested by minstrel. So, we drops the entries with rate higher than the
++ * previous.
++ */
++ if (rates[j].idx >= rates[i - 1].idx) {
++ rates[i - 1].count += rates[j].count;
++ rates[i - 1].count = min_t(u16, 15, rates[i - 1].count);
++ } else {
++ memcpy(rates + i, rates + j, sizeof(rates[i]));
++ if (rates[i].idx == 0)
++ has_rate0 = true;
++ /* The device apply Short GI only on the first rate */
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+- if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+- rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+- }
+-
+- /* Sort rates and remove duplicates */
+- do {
+- finished = true;
+- for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+- if (rates[i + 1].idx == rates[i].idx &&
+- rates[i].idx != -1) {
+- rates[i].count += rates[i + 1].count;
+- if (rates[i].count > 15)
+- rates[i].count = 15;
+- rates[i + 1].idx = -1;
+- rates[i + 1].count = 0;
+-
+- finished = false;
+- }
+- if (rates[i + 1].idx > rates[i].idx) {
+- swap(rates[i + 1], rates[i]);
+- finished = false;
+- }
++ i++;
+ }
+- } while (!finished);
++ }
+ /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
+- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+- if (rates[i].idx == 0)
+- break;
+- if (rates[i].idx == -1) {
+- rates[i].idx = 0;
+- rates[i].count = 8; /* == hw->max_rate_tries */
+- rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+- break;
+- }
++ if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) {
++ rates[i].idx = 0;
++ rates[i].count = 8; /* == hw->max_rate_tries */
++ rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS;
++ i++;
++ }
++ for (; i < IEEE80211_TX_MAX_RATES; i++) {
++ memset(rates + i, 0, sizeof(rates[i]));
++ rates[i].idx = -1;
+ }
+- /* All retries use long GI */
+- for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+- rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ }
+
+ static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
+diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
+index 626dfb4b7a55de..048a552e9da1d8 100644
+--- a/drivers/net/wireless/silabs/wfx/sta.c
++++ b/drivers/net/wireless/silabs/wfx/sta.c
+@@ -354,29 +354,46 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif)
+ return 0;
+ }
+
+-static void wfx_set_mfp_ap(struct wfx_vif *wvif)
++static int wfx_set_mfp_ap(struct wfx_vif *wvif)
+ {
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0);
+ const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+- const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
+- skb->len - ieoffset);
+ const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
+ const int pairwise_cipher_suite_size = 4 / sizeof(u16);
+ const int akm_suite_size = 4 / sizeof(u16);
++ int ret = -EINVAL;
++ const u16 *ptr;
+
+- if (ptr) {
+- ptr += pairwise_cipher_suite_count_offset;
+- if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+- return;
+- ptr += 1 + pairwise_cipher_suite_size * *ptr;
+- if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+- return;
+- ptr += 1 + akm_suite_size * *ptr;
+- if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+- return;
+- wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
++ if (unlikely(!skb))
++ return -ENOMEM;
++
++ ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
++ skb->len - ieoffset);
++ if (!ptr) {
++ /* No RSN IE is fine in open networks */
++ ret = 0;
++ goto free_skb;
+ }
++
++ ptr += pairwise_cipher_suite_count_offset;
++ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
++ goto free_skb;
++
++ ptr += 1 + pairwise_cipher_suite_size * *ptr;
++ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
++ goto free_skb;
++
++ ptr += 1 + akm_suite_size * *ptr;
++ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
++ goto free_skb;
++
++ wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
++ ret = 0;
++
++free_skb:
++ dev_kfree_skb(skb);
++ return ret;
+ }
+
+ int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+@@ -394,8 +411,7 @@ int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ ret = wfx_hif_start(wvif, &vif->bss_conf, wvif->channel);
+ if (ret > 0)
+ return -EIO;
+- wfx_set_mfp_ap(wvif);
+- return ret;
++ return wfx_set_mfp_ap(wvif);
+ }
+
+ void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
+index 6894b919ff94b5..e16e9ae90d2043 100644
+--- a/drivers/net/wireless/st/cw1200/txrx.c
++++ b/drivers/net/wireless/st/cw1200/txrx.c
+@@ -1166,7 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
+ size_t ies_len = skb->len - (ies - (u8 *)(skb->data));
+
+ tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
+- if (tim_ie) {
++ if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) {
+ struct ieee80211_tim_ie *tim =
+ (struct ieee80211_tim_ie *)&tim_ie[2];
+
+diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
+index 1f524030b186e5..07be0adc13ec5c 100644
+--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
++++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
+@@ -3170,7 +3170,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *mac80211_hwsim_gstrings_stats,
++ memcpy(data, mac80211_hwsim_gstrings_stats,
+ sizeof(mac80211_hwsim_gstrings_stats));
+ }
+
+@@ -3795,7 +3795,7 @@ static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info)
+ }
+
+ nla_for_each_nested(peer, peers, rem) {
+- struct cfg80211_pmsr_result result;
++ struct cfg80211_pmsr_result result = {};
+
+ err = mac80211_hwsim_parse_pmsr_result(peer, &result, info);
+ if (err)
+diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
+index ba14d83353a4b2..fb4d95a027fefa 100644
+--- a/drivers/net/wireless/virtual/virt_wifi.c
++++ b/drivers/net/wireless/virtual/virt_wifi.c
+@@ -136,6 +136,9 @@ static struct ieee80211_supported_band band_5ghz = {
+ /* Assigned at module init. Guaranteed locally-administered and unicast. */
+ static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
+
++#define VIRT_WIFI_SSID "VirtWifi"
++#define VIRT_WIFI_SSID_LEN 8
++
+ static void virt_wifi_inform_bss(struct wiphy *wiphy)
+ {
+ u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
+@@ -146,8 +149,8 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy)
+ u8 ssid[8];
+ } __packed ssid = {
+ .tag = WLAN_EID_SSID,
+- .len = 8,
+- .ssid = "VirtWifi",
++ .len = VIRT_WIFI_SSID_LEN,
++ .ssid = VIRT_WIFI_SSID,
+ };
+
+ informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+@@ -213,6 +216,8 @@ struct virt_wifi_netdev_priv {
+ struct net_device *upperdev;
+ u32 tx_packets;
+ u32 tx_failed;
++ u32 connect_requested_ssid_len;
++ u8 connect_requested_ssid[IEEE80211_MAX_SSID_LEN];
+ u8 connect_requested_bss[ETH_ALEN];
+ bool is_up;
+ bool is_connected;
+@@ -229,6 +234,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
+ if (priv->being_deleted || !priv->is_up)
+ return -EBUSY;
+
++ if (!sme->ssid)
++ return -EINVAL;
++
++ priv->connect_requested_ssid_len = sme->ssid_len;
++ memcpy(priv->connect_requested_ssid, sme->ssid, sme->ssid_len);
++
+ could_schedule = schedule_delayed_work(&priv->connect, HZ * 2);
+ if (!could_schedule)
+ return -EBUSY;
+@@ -252,12 +263,15 @@ static void virt_wifi_connect_complete(struct work_struct *work)
+ container_of(work, struct virt_wifi_netdev_priv, connect.work);
+ u8 *requested_bss = priv->connect_requested_bss;
+ bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
++ bool right_ssid = priv->connect_requested_ssid_len == VIRT_WIFI_SSID_LEN &&
++ !memcmp(priv->connect_requested_ssid, VIRT_WIFI_SSID,
++ priv->connect_requested_ssid_len);
+ u16 status = WLAN_STATUS_SUCCESS;
+
+ if (is_zero_ether_addr(requested_bss))
+ requested_bss = NULL;
+
+- if (!priv->is_up || (requested_bss && !right_addr))
++ if (!priv->is_up || (requested_bss && !right_addr) || !right_ssid)
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ else
+ priv->is_connected = true;
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+index 2fe724d623c061..33c5a46f1b9223 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+@@ -210,7 +210,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
+ rc = PTR_ERR(devlink->cd_regions[i]);
+ dev_err(devlink->dev, "Devlink region fail,err %d", rc);
+ /* Delete previously created regions */
+- for ( ; i >= 0; i--)
++ for (i--; i >= 0; i--)
+ devlink_region_destroy(devlink->cd_regions[i]);
+ goto region_create_fail;
+ }
+diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
+index 17d46f4d291390..174a9156b3233d 100644
+--- a/drivers/net/wwan/qcom_bam_dmux.c
++++ b/drivers/net/wwan/qcom_bam_dmux.c
+@@ -823,17 +823,17 @@ static int bam_dmux_probe(struct platform_device *pdev)
+ ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
+ IRQF_ONESHOT, NULL, dmux);
+ if (ret)
+- return ret;
++ goto err_disable_pm;
+
+ ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
+ IRQF_ONESHOT, NULL, dmux);
+ if (ret)
+- return ret;
++ goto err_disable_pm;
+
+ ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
+ &dmux->pc_state);
+ if (ret)
+- return ret;
++ goto err_disable_pm;
+
+ /* Check if remote finished initialization before us */
+ if (dmux->pc_state) {
+@@ -844,6 +844,11 @@ static int bam_dmux_probe(struct platform_device *pdev)
+ }
+
+ return 0;
++
++err_disable_pm:
++ pm_runtime_disable(dev);
++ pm_runtime_dont_use_autosuspend(dev);
++ return ret;
+ }
+
+ static int bam_dmux_remove(struct platform_device *pdev)
+diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
+index 9f43f256db1d06..f0a4783baf1f32 100644
+--- a/drivers/net/wwan/t7xx/t7xx_cldma.c
++++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
+@@ -106,7 +106,7 @@ bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
+ {
+ u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
+
+- return ioread64(hw_info->ap_pdn_base + offset);
++ return ioread64_lo_hi(hw_info->ap_pdn_base + offset);
+ }
+
+ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
+@@ -117,7 +117,7 @@ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qn
+
+ reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
+ hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
+- iowrite64(address, reg + offset);
++ iowrite64_lo_hi(address, reg + offset);
+ }
+
+ void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
+diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+index cc70360364b7d6..554ba4669cc8d3 100644
+--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+@@ -139,8 +139,9 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
+ return -ENODEV;
+ }
+
+- gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
+- queue->index * sizeof(u64));
++ gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
++ REG_CLDMA_DL_CURRENT_ADDRL_0 +
++ queue->index * sizeof(u64));
+ if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
+ return 0;
+
+@@ -318,8 +319,8 @@ static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
+ struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
+
+ /* Check current processing TGPD, 64-bit address is in a table by Q index */
+- ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
+- queue->index * sizeof(u64));
++ ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
++ queue->index * sizeof(u64));
+ if (req->gpd_addr != ul_curr_addr) {
+ spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+ dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
+diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
+index 76da4c15e3de17..f071ec7ff23d50 100644
+--- a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
++++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
+@@ -75,7 +75,7 @@ static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_
+ for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
+ offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
+ reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
+- iowrite64(0, reg);
++ iowrite64_lo_hi(0, reg);
+ }
+ }
+
+@@ -112,17 +112,17 @@ static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_
+
+ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
+ value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
+- iowrite64(value, reg);
++ iowrite64_lo_hi(value, reg);
+
+ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
+ iowrite32(cfg->trsl_id, reg);
+
+ reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
+ value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
+- iowrite64(value, reg);
++ iowrite64_lo_hi(value, reg);
+
+ /* Ensure ATR is set */
+- ioread64(reg);
++ ioread64_lo_hi(reg);
+ return 0;
+ }
+
+diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
+index ff96f22648efde..45ddce35f6d2c6 100644
+--- a/drivers/net/xen-netback/hash.c
++++ b/drivers/net/xen-netback/hash.c
+@@ -95,7 +95,7 @@ static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
+
+ static void xenvif_flush_hash(struct xenvif *vif)
+ {
+- struct xenvif_hash_cache_entry *entry;
++ struct xenvif_hash_cache_entry *entry, *n;
+ unsigned long flags;
+
+ if (xenvif_hash_cache_size == 0)
+@@ -103,8 +103,7 @@ static void xenvif_flush_hash(struct xenvif *vif)
+
+ spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
+- lockdep_is_held(&vif->hash.cache.lock)) {
++ list_for_each_entry_safe(entry, n, &vif->hash.cache.list, link) {
+ list_del_rcu(&entry->link);
+ vif->hash.cache.count--;
+ kfree_rcu(entry, rcu);
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index fc3bb63b9ac3e5..acf310e58f7e28 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -668,8 +668,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
+ {
+ if (queue->task) {
+- kthread_stop(queue->task);
+- put_task_struct(queue->task);
++ kthread_stop_put(queue->task);
+ queue->task = NULL;
+ }
+
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 88f760a7cbc354..fab361a250d605 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -104,13 +104,12 @@ bool provides_xdp_headroom = true;
+ module_param(provides_xdp_headroom, bool, 0644);
+
+ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+- u8 status);
++ s8 status);
+
+ static void make_tx_response(struct xenvif_queue *queue,
+- struct xen_netif_tx_request *txp,
++ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+- s8 st);
+-static void push_tx_responses(struct xenvif_queue *queue);
++ s8 status);
+
+ static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
+
+@@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
+ unsigned int extra_count, RING_IDX end)
+ {
+ RING_IDX cons = queue->tx.req_cons;
+- unsigned long flags;
+
+ do {
+- spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
+- push_tx_responses(queue);
+- spin_unlock_irqrestore(&queue->response_lock, flags);
+ if (cons == end)
+ break;
+ RING_COPY_REQUEST(&queue->tx, cons++, txp);
+@@ -463,12 +458,20 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ }
+
+ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
+- shinfo->nr_frags++, gop++, nr_slots--) {
++ nr_slots--) {
++ if (unlikely(!txp->size)) {
++ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
++ ++txp;
++ continue;
++ }
++
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp,
+ txp == first ? extra_count : 0, gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
++ ++shinfo->nr_frags;
++ ++gop;
+
+ if (txp == first)
+ txp = txfrags;
+@@ -481,20 +484,33 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ shinfo = skb_shinfo(nskb);
+ frags = shinfo->frags;
+
+- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
+- shinfo->nr_frags++, txp++, gop++) {
++ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
++ if (unlikely(!txp->size)) {
++ make_tx_response(queue, txp, 0,
++ XEN_NETIF_RSP_OKAY);
++ continue;
++ }
++
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
+ gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags],
+ pending_idx);
++ ++shinfo->nr_frags;
++ ++gop;
++ }
++
++ if (shinfo->nr_frags) {
++ skb_shinfo(skb)->frag_list = nskb;
++ nskb = NULL;
+ }
++ }
+
+- skb_shinfo(skb)->frag_list = nskb;
+- } else if (nskb) {
++ if (nskb) {
+ /* A frag_list skb was allocated but it is no longer needed
+- * because enough slots were converted to copy ops above.
++ * because enough slots were converted to copy ops above or some
++ * were empty.
+ */
+ kfree_skb(nskb);
+ }
+@@ -963,7 +979,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+ (ret == 0) ?
+ XEN_NETIF_RSP_OKAY :
+ XEN_NETIF_RSP_ERROR);
+- push_tx_responses(queue);
+ continue;
+ }
+
+@@ -975,7 +990,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+
+ make_tx_response(queue, &txreq, extra_count,
+ XEN_NETIF_RSP_OKAY);
+- push_tx_responses(queue);
+ continue;
+ }
+
+@@ -1401,8 +1415,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
+ return work_done;
+ }
+
++static void _make_tx_response(struct xenvif_queue *queue,
++ const struct xen_netif_tx_request *txp,
++ unsigned int extra_count,
++ s8 status)
++{
++ RING_IDX i = queue->tx.rsp_prod_pvt;
++ struct xen_netif_tx_response *resp;
++
++ resp = RING_GET_RESPONSE(&queue->tx, i);
++ resp->id = txp->id;
++ resp->status = status;
++
++ while (extra_count-- != 0)
++ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
++
++ queue->tx.rsp_prod_pvt = ++i;
++}
++
++static void push_tx_responses(struct xenvif_queue *queue)
++{
++ int notify;
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
++ if (notify)
++ notify_remote_via_irq(queue->tx_irq);
++}
++
+ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+- u8 status)
++ s8 status)
+ {
+ struct pending_tx_info *pending_tx_info;
+ pending_ring_idx_t index;
+@@ -1412,8 +1453,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+
+ spin_lock_irqsave(&queue->response_lock, flags);
+
+- make_tx_response(queue, &pending_tx_info->req,
+- pending_tx_info->extra_count, status);
++ _make_tx_response(queue, &pending_tx_info->req,
++ pending_tx_info->extra_count, status);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+@@ -1427,32 +1468,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
+ spin_unlock_irqrestore(&queue->response_lock, flags);
+ }
+
+-
+ static void make_tx_response(struct xenvif_queue *queue,
+- struct xen_netif_tx_request *txp,
++ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+- s8 st)
++ s8 status)
+ {
+- RING_IDX i = queue->tx.rsp_prod_pvt;
+- struct xen_netif_tx_response *resp;
+-
+- resp = RING_GET_RESPONSE(&queue->tx, i);
+- resp->id = txp->id;
+- resp->status = st;
+-
+- while (extra_count-- != 0)
+- RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
++ unsigned long flags;
+
+- queue->tx.rsp_prod_pvt = ++i;
+-}
++ spin_lock_irqsave(&queue->response_lock, flags);
+
+-static void push_tx_responses(struct xenvif_queue *queue)
+-{
+- int notify;
++ _make_tx_response(queue, txp, extra_count, status);
++ push_tx_responses(queue);
+
+- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+- if (notify)
+- notify_remote_via_irq(queue->tx_irq);
++ spin_unlock_irqrestore(&queue->response_lock, flags);
+ }
+
+ static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index ad29f370034e4f..8d2aee88526c69 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -285,6 +285,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
+ return NULL;
+ }
+ skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
++ skb_mark_for_recycle(skb);
+
+ /* Align ip header to a 16 bytes boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index b19c39dcfbd932..e2bc67300a915a 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -1723,6 +1723,11 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
+ }
+
+ pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
++ if (!dev->poll_mod_count) {
++ nfc_err(dev->dev,
++ "Poll mod list is empty\n");
++ return -EINVAL;
++ }
+
+ /* Do not always start polling from the same modulation */
+ get_random_bytes(&rand_mod, sizeof(rand_mod));
+diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
+index 7eb17f46a8153d..9e1a34e23af26e 100644
+--- a/drivers/nfc/trf7970a.c
++++ b/drivers/nfc/trf7970a.c
+@@ -424,7 +424,8 @@ struct trf7970a {
+ enum trf7970a_state state;
+ struct device *dev;
+ struct spi_device *spi;
+- struct regulator *regulator;
++ struct regulator *vin_regulator;
++ struct regulator *vddio_regulator;
+ struct nfc_digital_dev *ddev;
+ u32 quirks;
+ bool is_initiator;
+@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
+ if (trf->state != TRF7970A_ST_PWR_OFF)
+ return 0;
+
+- ret = regulator_enable(trf->regulator);
++ ret = regulator_enable(trf->vin_regulator);
+ if (ret) {
+ dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
+ return ret;
+@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
+ if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
+ gpiod_set_value_cansleep(trf->en2_gpiod, 0);
+
+- ret = regulator_disable(trf->regulator);
++ ret = regulator_disable(trf->vin_regulator);
+ if (ret)
+ dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
+ ret);
+@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
+ mutex_init(&trf->lock);
+ INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
+
+- trf->regulator = devm_regulator_get(&spi->dev, "vin");
+- if (IS_ERR(trf->regulator)) {
+- ret = PTR_ERR(trf->regulator);
++ trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
++ if (IS_ERR(trf->vin_regulator)) {
++ ret = PTR_ERR(trf->vin_regulator);
+ dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
+ goto err_destroy_lock;
+ }
+
+- ret = regulator_enable(trf->regulator);
++ ret = regulator_enable(trf->vin_regulator);
+ if (ret) {
+ dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
+ goto err_destroy_lock;
+ }
+
+- uvolts = regulator_get_voltage(trf->regulator);
++ uvolts = regulator_get_voltage(trf->vin_regulator);
+ if (uvolts > 4000000)
+ trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
+
+- trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
+- if (IS_ERR(trf->regulator)) {
+- ret = PTR_ERR(trf->regulator);
++ trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
++ if (IS_ERR(trf->vddio_regulator)) {
++ ret = PTR_ERR(trf->vddio_regulator);
+ dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
+- goto err_destroy_lock;
++ goto err_disable_vin_regulator;
+ }
+
+- ret = regulator_enable(trf->regulator);
++ ret = regulator_enable(trf->vddio_regulator);
+ if (ret) {
+ dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
+- goto err_destroy_lock;
++ goto err_disable_vin_regulator;
+ }
+
+- if (regulator_get_voltage(trf->regulator) == 1800000) {
++ if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
+ trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
+ dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
+ }
+@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
+ if (!trf->ddev) {
+ dev_err(trf->dev, "Can't allocate NFC digital device\n");
+ ret = -ENOMEM;
+- goto err_disable_regulator;
++ goto err_disable_vddio_regulator;
+ }
+
+ nfc_digital_set_parent_dev(trf->ddev, trf->dev);
+@@ -2137,8 +2138,10 @@ static int trf7970a_probe(struct spi_device *spi)
+ trf7970a_shutdown(trf);
+ err_free_ddev:
+ nfc_digital_free_device(trf->ddev);
+-err_disable_regulator:
+- regulator_disable(trf->regulator);
++err_disable_vddio_regulator:
++ regulator_disable(trf->vddio_regulator);
++err_disable_vin_regulator:
++ regulator_disable(trf->vin_regulator);
+ err_destroy_lock:
+ mutex_destroy(&trf->lock);
+ return ret;
+@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
+ nfc_digital_unregister_device(trf->ddev);
+ nfc_digital_free_device(trf->ddev);
+
+- regulator_disable(trf->regulator);
++ regulator_disable(trf->vddio_regulator);
++ regulator_disable(trf->vin_regulator);
+
+ mutex_destroy(&trf->lock);
+ }
+diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
+index b027be0b0b6ff7..6b89d596ba9afe 100644
+--- a/drivers/nfc/virtual_ncidev.c
++++ b/drivers/nfc/virtual_ncidev.c
+@@ -26,10 +26,14 @@ struct virtual_nci_dev {
+ struct mutex mtx;
+ struct sk_buff *send_buff;
+ struct wait_queue_head wq;
++ bool running;
+ };
+
+ static int virtual_nci_open(struct nci_dev *ndev)
+ {
++ struct virtual_nci_dev *vdev = nci_get_drvdata(ndev);
++
++ vdev->running = true;
+ return 0;
+ }
+
+@@ -40,6 +44,7 @@ static int virtual_nci_close(struct nci_dev *ndev)
+ mutex_lock(&vdev->mtx);
+ kfree_skb(vdev->send_buff);
+ vdev->send_buff = NULL;
++ vdev->running = false;
+ mutex_unlock(&vdev->mtx);
+
+ return 0;
+@@ -50,7 +55,7 @@ static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+ struct virtual_nci_dev *vdev = nci_get_drvdata(ndev);
+
+ mutex_lock(&vdev->mtx);
+- if (vdev->send_buff) {
++ if (vdev->send_buff || !vdev->running) {
+ mutex_unlock(&vdev->mtx);
+ kfree_skb(skb);
+ return -1;
+@@ -120,6 +125,10 @@ static ssize_t virtual_ncidev_write(struct file *file,
+ kfree_skb(skb);
+ return -EFAULT;
+ }
++ if (strnlen(skb->data, count) != count) {
++ kfree_skb(skb);
++ return -EINVAL;
++ }
+
+ nci_recv_frame(vdev->ndev, skb);
+ return count;
+diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
+index 27dd93deff6e56..d702bee7808266 100644
+--- a/drivers/ntb/core.c
++++ b/drivers/ntb/core.c
+@@ -100,6 +100,8 @@ EXPORT_SYMBOL(ntb_unregister_client);
+
+ int ntb_register_device(struct ntb_dev *ntb)
+ {
++ int ret;
++
+ if (!ntb)
+ return -EINVAL;
+ if (!ntb->pdev)
+@@ -120,7 +122,11 @@ int ntb_register_device(struct ntb_dev *ntb)
+ ntb->ctx_ops = NULL;
+ spin_lock_init(&ntb->ctx_lock);
+
+- return device_register(&ntb->dev);
++ ret = device_register(&ntb->dev);
++ if (ret)
++ put_device(&ntb->dev);
++
++ return ret;
+ }
+ EXPORT_SYMBOL(ntb_register_device);
+
+diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
+index 9ab836d0d4f12d..079b8cd7978573 100644
+--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
+@@ -778,7 +778,7 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
+ ndev->debugfs_dir =
+ debugfs_create_dir(pci_name(ndev->ntb.pdev),
+ debugfs_dir);
+- if (!ndev->debugfs_dir)
++ if (IS_ERR(ndev->debugfs_dir))
+ ndev->debugfs_info = NULL;
+ else
+ ndev->debugfs_info =
+diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+index d6bbcc7b5b90d6..0a94c634ddc27e 100644
+--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
++++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+@@ -1554,6 +1554,7 @@ static void switchtec_ntb_remove(struct device *dev)
+ switchtec_ntb_deinit_db_msg_irq(sndev);
+ switchtec_ntb_deinit_shared_mw(sndev);
+ switchtec_ntb_deinit_crosslink(sndev);
++ cancel_work_sync(&sndev->check_link_status_work);
+ kfree(sndev);
+ dev_info(dev, "ntb device unregistered\n");
+ }
+diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
+index f9e7847a378e77..c84fadfc63c52c 100644
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -807,16 +807,29 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
+ }
+
+ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
+- struct device *dma_dev, size_t align)
++ struct device *ntb_dev, size_t align)
+ {
+ dma_addr_t dma_addr;
+ void *alloc_addr, *virt_addr;
+ int rc;
+
+- alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size,
+- &dma_addr, GFP_KERNEL);
++ /*
++ * The buffer here is allocated against the NTB device. The reason to
++ * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer
++ * backing the NTB BAR for the remote host to write to. During receive
++ * processing, the data is being copied out of the receive buffer to
++ * the kernel skbuff. When a DMA device is being used, dma_map_page()
++ * is called on the kvaddr of the receive buffer (from dma_alloc_*())
++ * and remapped against the DMA device. It appears to be a double
++ * DMA mapping of buffers, but first is mapped to the NTB device and
++ * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary
++ * in order for the later dma_map_page() to not fail.
++ */
++ alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size,
++ &dma_addr, GFP_KERNEL,
++ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!alloc_addr) {
+- dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n",
++ dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n",
+ mw->alloc_size);
+ return -ENOMEM;
+ }
+@@ -845,7 +858,7 @@ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
+ return 0;
+
+ err:
+- dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr);
++ dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr);
+
+ return rc;
+ }
+diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
+index 553f1f46bc664f..72bc1d017a46ee 100644
+--- a/drivers/ntb/test/ntb_perf.c
++++ b/drivers/ntb/test/ntb_perf.c
+@@ -1227,7 +1227,7 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
+ "\tOut buffer addr 0x%pK\n", peer->outbuf);
+
+ pos += scnprintf(buf + pos, buf_size - pos,
+- "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
++ "\tOut buff phys addr %pap\n", &peer->out_phys_addr);
+
+ pos += scnprintf(buf + pos, buf_size - pos,
+ "\tOut buffer size %pa\n", &peer->outbuf_size);
+diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
+index 07177eadc56e87..1ea8c27e8874d2 100644
+--- a/drivers/nvdimm/namespace_devs.c
++++ b/drivers/nvdimm/namespace_devs.c
+@@ -1927,12 +1927,16 @@ static int cmp_dpa(const void *a, const void *b)
+ static struct device **scan_labels(struct nd_region *nd_region)
+ {
+ int i, count = 0;
+- struct device *dev, **devs = NULL;
++ struct device *dev, **devs;
+ struct nd_label_ent *label_ent, *e;
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
+
++ devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
++ if (!devs)
++ return NULL;
++
+ /* "safe" because create_namespace_pmem() might list_move() label_ent */
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+ struct nd_namespace_label *nd_label = label_ent->label;
+@@ -1951,12 +1955,14 @@ static struct device **scan_labels(struct nd_region *nd_region)
+ goto err;
+ if (i < count)
+ continue;
+- __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
+- if (!__devs)
+- goto err;
+- memcpy(__devs, devs, sizeof(dev) * count);
+- kfree(devs);
+- devs = __devs;
++ if (count) {
++ __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
++ if (!__devs)
++ goto err;
++ memcpy(__devs, devs, sizeof(dev) * count);
++ kfree(devs);
++ devs = __devs;
++ }
+
+ dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
+ if (IS_ERR(dev)) {
+@@ -1983,11 +1989,6 @@ static struct device **scan_labels(struct nd_region *nd_region)
+
+ /* Publish a zero-sized namespace for userspace to configure. */
+ nd_mapping_free_labels(nd_mapping);
+-
+- devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
+- if (!devs)
+- goto err;
+-
+ nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+ if (!nspm)
+ goto err;
+@@ -2026,11 +2027,10 @@ static struct device **scan_labels(struct nd_region *nd_region)
+ return devs;
+
+ err:
+- if (devs) {
+- for (i = 0; devs[i]; i++)
+- namespace_pmem_release(devs[i]);
+- kfree(devs);
+- }
++ for (i = 0; devs[i]; i++)
++ namespace_pmem_release(devs[i]);
++ kfree(devs);
++
+ return NULL;
+ }
+
+diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
+index 1f8c667c6f1ee0..839f10ca56eac9 100644
+--- a/drivers/nvdimm/nd_virtio.c
++++ b/drivers/nvdimm/nd_virtio.c
+@@ -44,6 +44,15 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
+ unsigned long flags;
+ int err, err1;
+
++ /*
++ * Don't bother to submit the request to the device if the device is
++ * not activated.
++ */
++ if (vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_NEEDS_RESET) {
++ dev_info(&vdev->dev, "virtio pmem device needs a reset\n");
++ return -EIO;
++ }
++
+ might_sleep();
+ req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data)
+diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
+index 1b9f5b8a6167e6..d3fca0ab629007 100644
+--- a/drivers/nvdimm/of_pmem.c
++++ b/drivers/nvdimm/of_pmem.c
+@@ -30,7 +30,13 @@ static int of_pmem_region_probe(struct platform_device *pdev)
+ if (!priv)
+ return -ENOMEM;
+
+- priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
++ priv->bus_desc.provider_name = devm_kstrdup(&pdev->dev, pdev->name,
++ GFP_KERNEL);
++ if (!priv->bus_desc.provider_name) {
++ kfree(priv);
++ return -ENOMEM;
++ }
++
+ priv->bus_desc.module = THIS_MODULE;
+ priv->bus_desc.of_node = np;
+
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index 0a81f87f6f6c0e..e2f1fb99707fc4 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -939,7 +939,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
+ {
+ unsigned int cpu, lane;
+
+- cpu = get_cpu();
++ migrate_disable();
++ cpu = smp_processor_id();
+ if (nd_region->num_lanes < nr_cpu_ids) {
+ struct nd_percpu_lane *ndl_lock, *ndl_count;
+
+@@ -958,16 +959,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane);
+ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
+ {
+ if (nd_region->num_lanes < nr_cpu_ids) {
+- unsigned int cpu = get_cpu();
++ unsigned int cpu = smp_processor_id();
+ struct nd_percpu_lane *ndl_lock, *ndl_count;
+
+ ndl_count = per_cpu_ptr(nd_region->lane, cpu);
+ ndl_lock = per_cpu_ptr(nd_region->lane, lane);
+ if (--ndl_count->count == 0)
+ spin_unlock(&ndl_lock->lock);
+- put_cpu();
+ }
+- put_cpu();
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(nd_region_release_lane);
+
+diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
+index 064592a5d546a6..811541ce206bf8 100644
+--- a/drivers/nvme/host/auth.c
++++ b/drivers/nvme/host/auth.c
+@@ -840,6 +840,8 @@ static void nvme_queue_auth_work(struct work_struct *work)
+ }
+
+ fail2:
++ if (chap->status == 0)
++ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+ __func__, chap->qid, chap->status);
+ tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 21783aa2ee8e18..82509f3679373a 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -131,7 +131,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
+ /*
+ * Only new queue scan work when admin and IO queues are both alive
+ */
+- if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
++ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
+ queue_work(nvme_wq, &ctrl->scan_work);
+ }
+
+@@ -143,7 +143,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
+ */
+ int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
+ {
+- if (ctrl->state != NVME_CTRL_RESETTING)
++ if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
+ return -EBUSY;
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
+ return -EBUSY;
+@@ -156,7 +156,7 @@ static void nvme_failfast_work(struct work_struct *work)
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, failfast_work);
+
+- if (ctrl->state != NVME_CTRL_CONNECTING)
++ if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
+ return;
+
+ set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+@@ -200,7 +200,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+ ret = nvme_reset_ctrl(ctrl);
+ if (!ret) {
+ flush_work(&ctrl->reset_work);
+- if (ctrl->state != NVME_CTRL_LIVE)
++ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
+ ret = -ENETRESET;
+ }
+
+@@ -377,7 +377,7 @@ static inline void nvme_end_req_zoned(struct request *req)
+ le64_to_cpu(nvme_req(req)->result.u64));
+ }
+
+-static inline void nvme_end_req(struct request *req)
++void nvme_end_req(struct request *req)
+ {
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
+
+@@ -499,7 +499,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+- old_state = ctrl->state;
++ old_state = nvme_ctrl_state(ctrl);
+ switch (new_state) {
+ case NVME_CTRL_LIVE:
+ switch (old_state) {
+@@ -567,7 +567,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ }
+
+ if (changed) {
+- ctrl->state = new_state;
++ WRITE_ONCE(ctrl->state, new_state);
+ wake_up_all(&ctrl->state_wq);
+ }
+
+@@ -575,11 +575,11 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ if (!changed)
+ return false;
+
+- if (ctrl->state == NVME_CTRL_LIVE) {
++ if (new_state == NVME_CTRL_LIVE) {
+ if (old_state == NVME_CTRL_CONNECTING)
+ nvme_stop_failfast_work(ctrl);
+ nvme_kick_requeue_lists(ctrl);
+- } else if (ctrl->state == NVME_CTRL_CONNECTING &&
++ } else if (new_state == NVME_CTRL_CONNECTING &&
+ old_state == NVME_CTRL_RESETTING) {
+ nvme_start_failfast_work(ctrl);
+ }
+@@ -587,27 +587,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ }
+ EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+
+-/*
+- * Returns true for sink states that can't ever transition back to live.
+- */
+-static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+-{
+- switch (ctrl->state) {
+- case NVME_CTRL_NEW:
+- case NVME_CTRL_LIVE:
+- case NVME_CTRL_RESETTING:
+- case NVME_CTRL_CONNECTING:
+- return false;
+- case NVME_CTRL_DELETING:
+- case NVME_CTRL_DELETING_NOIO:
+- case NVME_CTRL_DEAD:
+- return true;
+- default:
+- WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+- return true;
+- }
+-}
+-
+ /*
+ * Waits for the controller state to be resetting, or returns false if it is
+ * not possible to ever transition to that state.
+@@ -617,7 +596,7 @@ bool nvme_wait_reset(struct nvme_ctrl *ctrl)
+ wait_event(ctrl->state_wq,
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
+ nvme_state_terminal(ctrl));
+- return ctrl->state == NVME_CTRL_RESETTING;
++ return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
+ }
+ EXPORT_SYMBOL_GPL(nvme_wait_reset);
+
+@@ -653,7 +632,7 @@ static void nvme_free_ns(struct kref *kref)
+ kfree(ns);
+ }
+
+-static inline bool nvme_get_ns(struct nvme_ns *ns)
++bool nvme_get_ns(struct nvme_ns *ns)
+ {
+ return kref_get_unless_zero(&ns->kref);
+ }
+@@ -704,9 +683,11 @@ EXPORT_SYMBOL_GPL(nvme_init_request);
+ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *rq)
+ {
+- if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
+- ctrl->state != NVME_CTRL_DELETING &&
+- ctrl->state != NVME_CTRL_DEAD &&
++ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
++
++ if (state != NVME_CTRL_DELETING_NOIO &&
++ state != NVME_CTRL_DELETING &&
++ state != NVME_CTRL_DEAD &&
+ !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
+ !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ return BLK_STS_RESOURCE;
+@@ -736,7 +717,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ * command, which is require to set the queue live in the
+ * appropinquate states.
+ */
+- switch (ctrl->state) {
++ switch (nvme_ctrl_state(ctrl)) {
+ case NVME_CTRL_CONNECTING:
+ if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
+ (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+@@ -957,6 +938,7 @@ void nvme_cleanup_cmd(struct request *req)
+ clear_bit_unlock(0, &ctrl->discard_page_busy);
+ else
+ kfree(bvec_virt(&req->special_vec));
++ req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
+ }
+ }
+ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+@@ -1331,8 +1313,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
+
+ error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
+ sizeof(struct nvme_id_ctrl));
+- if (error)
++ if (error) {
+ kfree(*id);
++ *id = NULL;
++ }
+ return error;
+ }
+
+@@ -1461,6 +1445,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ if (error) {
+ dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
+ kfree(*id);
++ *id = NULL;
+ }
+ return error;
+ }
+@@ -1479,7 +1464,8 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ if (id->ncap == 0) {
+ /* namespace not allocated or attached */
+ info->is_removed = true;
+- return -ENODEV;
++ ret = -ENODEV;
++ goto error;
+ }
+
+ info->anagrpid = id->anagrpid;
+@@ -1497,8 +1483,10 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
+ }
++
++error:
+ kfree(id);
+- return 0;
++ return ret;
+ }
+
+ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+@@ -1813,16 +1801,18 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
+ return ret;
+ }
+
+-static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
++static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+ {
+ struct nvme_ctrl *ctrl = ns->ctrl;
++ int ret;
+
+- if (nvme_init_ms(ns, id))
+- return;
++ ret = nvme_init_ms(ns, id);
++ if (ret)
++ return ret;
+
+ ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+- return;
++ return 0;
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ /*
+@@ -1831,7 +1821,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+ * remap the separate metadata buffer from the block layer.
+ */
+ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
+- return;
++ return 0;
+
+ ns->features |= NVME_NS_EXT_LBAS;
+
+@@ -1858,6 +1848,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+ else
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ }
++ return 0;
+ }
+
+ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+@@ -1887,9 +1878,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
+
+ /*
+ * The block layer can't support LBA sizes larger than the page size
+- * yet, so catch this early and don't allow block I/O.
++ * or smaller than a sector size yet, so catch this early and don't
++ * allow block I/O.
+ */
+- if (ns->lba_shift > PAGE_SHIFT) {
++ if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
+ capacity = 0;
+ bs = (1 << 9);
+ }
+@@ -2026,12 +2018,23 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ if (ret)
+ return ret;
+
++ if (id->ncap == 0) {
++ /* namespace not allocated or attached */
++ info->is_removed = true;
++ ret = -ENODEV;
++ goto error;
++ }
++
+ blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
+ ns->lba_shift = id->lbaf[lbaf].ds;
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+
+- nvme_configure_metadata(ns, id);
++ ret = nvme_configure_metadata(ns, id);
++ if (ret < 0) {
++ blk_mq_unfreeze_queue(ns->disk->queue);
++ goto out;
++ }
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(ns->disk, ns, id);
+
+@@ -2083,6 +2086,8 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ set_bit(NVME_NS_READY, &ns->flags);
+ ret = 0;
+ }
++
++error:
+ kfree(id);
+ return ret;
+ }
+@@ -2522,7 +2527,7 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
+
+ if (ctrl->ps_max_latency_us != latency) {
+ ctrl->ps_max_latency_us = latency;
+- if (ctrl->state == NVME_CTRL_LIVE)
++ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
+ nvme_configure_apst(ctrl);
+ }
+ }
+@@ -3208,7 +3213,7 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
+ struct nvme_ctrl *ctrl =
+ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
+
+- switch (ctrl->state) {
++ switch (nvme_ctrl_state(ctrl)) {
+ case NVME_CTRL_LIVE:
+ break;
+ default:
+@@ -3518,7 +3523,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
+ "Found shared namespace %d, but multipathing not supported.\n",
+ info->nsid);
+ dev_warn_once(ctrl->device,
+- "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
++ "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
+ }
+ }
+
+@@ -3537,9 +3542,10 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
+ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ {
+ struct nvme_ns *ns, *ret = NULL;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list) {
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ if (ns->head->ns_id == nsid) {
+ if (!nvme_get_ns(ns))
+ continue;
+@@ -3549,7 +3555,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ if (ns->head->ns_id > nsid)
+ break;
+ }
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ return ret;
+ }
+ EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
+@@ -3563,7 +3569,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
+
+ list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
+ if (tmp->head->ns_id < ns->head->ns_id) {
+- list_add(&ns->list, &tmp->list);
++ list_add_rcu(&ns->list, &tmp->list);
+ return;
+ }
+ }
+@@ -3629,9 +3635,18 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
+ if (nvme_update_ns_info(ns, info))
+ goto out_unlink_ns;
+
+- down_write(&ctrl->namespaces_rwsem);
++ mutex_lock(&ctrl->namespaces_lock);
++ /*
++ * Ensure that no namespaces are added to the ctrl list after the queues
++ * are frozen, thereby avoiding a deadlock between scan and reset.
++ */
++ if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
++ mutex_unlock(&ctrl->namespaces_lock);
++ goto out_unlink_ns;
++ }
+ nvme_ns_add_to_ctrl_list(ns);
+- up_write(&ctrl->namespaces_rwsem);
++ mutex_unlock(&ctrl->namespaces_lock);
++ synchronize_srcu(&ctrl->srcu);
+ nvme_get_ctrl(ctrl);
+
+ if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
+@@ -3647,9 +3662,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
+
+ out_cleanup_ns_from_list:
+ nvme_put_ctrl(ctrl);
+- down_write(&ctrl->namespaces_rwsem);
+- list_del_init(&ns->list);
+- up_write(&ctrl->namespaces_rwsem);
++ mutex_lock(&ctrl->namespaces_lock);
++ list_del_rcu(&ns->list);
++ mutex_unlock(&ctrl->namespaces_lock);
++ synchronize_srcu(&ctrl->srcu);
+ out_unlink_ns:
+ mutex_lock(&ctrl->subsys->lock);
+ list_del_rcu(&ns->siblings);
+@@ -3699,9 +3715,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
+ nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+ del_gendisk(ns->disk);
+
+- down_write(&ns->ctrl->namespaces_rwsem);
+- list_del_init(&ns->list);
+- up_write(&ns->ctrl->namespaces_rwsem);
++ mutex_lock(&ns->ctrl->namespaces_lock);
++ list_del_rcu(&ns->list);
++ mutex_unlock(&ns->ctrl->namespaces_lock);
++ synchronize_srcu(&ns->ctrl->srcu);
+
+ if (last_path)
+ nvme_mpath_shutdown_disk(ns->head);
+@@ -3791,16 +3808,18 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ struct nvme_ns *ns, *next;
+ LIST_HEAD(rm_list);
+
+- down_write(&ctrl->namespaces_rwsem);
++ mutex_lock(&ctrl->namespaces_lock);
+ list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
+- if (ns->head->ns_id > nsid)
+- list_move_tail(&ns->list, &rm_list);
++ if (ns->head->ns_id > nsid) {
++ list_del_rcu(&ns->list);
++ synchronize_srcu(&ctrl->srcu);
++ list_add_tail_rcu(&ns->list, &rm_list);
++ }
+ }
+- up_write(&ctrl->namespaces_rwsem);
++ mutex_unlock(&ctrl->namespaces_lock);
+
+ list_for_each_entry_safe(ns, next, &rm_list, list)
+ nvme_ns_remove(ns);
+-
+ }
+
+ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
+@@ -3894,7 +3913,7 @@ static void nvme_scan_work(struct work_struct *work)
+ int ret;
+
+ /* No tagset on a live ctrl means IO queues could not created */
+- if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
++ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
+ return;
+
+ /*
+@@ -3964,15 +3983,16 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
+ * removing the namespaces' disks; fail all the queues now to avoid
+ * potentially having to clean up the failed sync later.
+ */
+- if (ctrl->state == NVME_CTRL_DEAD)
++ if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
+ nvme_mark_namespaces_dead(ctrl);
+
+ /* this is a no-op when called from the controller reset handler */
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
+
+- down_write(&ctrl->namespaces_rwsem);
+- list_splice_init(&ctrl->namespaces, &ns_list);
+- up_write(&ctrl->namespaces_rwsem);
++ mutex_lock(&ctrl->namespaces_lock);
++ list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
++ mutex_unlock(&ctrl->namespaces_lock);
++ synchronize_srcu(&ctrl->srcu);
+
+ list_for_each_entry_safe(ns, next, &ns_list, list)
+ nvme_ns_remove(ns);
+@@ -4046,7 +4066,7 @@ static void nvme_async_event_work(struct work_struct *work)
+ * flushing ctrl async_event_work after changing the controller state
+ * from LIVE and before freeing the admin queue.
+ */
+- if (ctrl->state == NVME_CTRL_LIVE)
++ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
+ ctrl->ops->submit_async_event(ctrl);
+ }
+
+@@ -4084,6 +4104,8 @@ static void nvme_fw_act_work(struct work_struct *work)
+ struct nvme_ctrl, fw_act_work);
+ unsigned long fw_act_timeout;
+
++ nvme_auth_stop(ctrl);
++
+ if (ctrl->mtfa)
+ fw_act_timeout = jiffies +
+ msecs_to_jiffies(ctrl->mtfa * 100);
+@@ -4139,7 +4161,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+ * firmware activation.
+ */
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+- nvme_auth_stop(ctrl);
+ requeue = false;
+ queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
+@@ -4215,7 +4236,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ set->ops = ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+- set->reserved_tags = NVMF_RESERVED_TAGS;
++ /* Reserved for fabric connect and keep alive */
++ set->reserved_tags = 2;
+ set->numa_node = ctrl->numa_node;
+ set->flags = BLK_MQ_F_NO_SCHED;
+ if (ctrl->ops->flags & NVME_F_BLOCKING)
+@@ -4284,7 +4306,8 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
+ set->reserved_tags = NVME_AQ_DEPTH;
+ else if (ctrl->ops->flags & NVME_F_FABRICS)
+- set->reserved_tags = NVMF_RESERVED_TAGS;
++ /* Reserved for fabric connect */
++ set->reserved_tags = 1;
+ set->numa_node = ctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ if (ctrl->ops->flags & NVME_F_BLOCKING)
+@@ -4402,6 +4425,7 @@ static void nvme_free_ctrl(struct device *dev)
+
+ nvme_free_cels(ctrl);
+ nvme_mpath_uninit(ctrl);
++ cleanup_srcu_struct(&ctrl->srcu);
+ nvme_auth_stop(ctrl);
+ nvme_auth_free(ctrl);
+ __free_page(ctrl->discard_page);
+@@ -4430,13 +4454,18 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ {
+ int ret;
+
+- ctrl->state = NVME_CTRL_NEW;
++ WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
+ clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+ spin_lock_init(&ctrl->lock);
++ mutex_init(&ctrl->namespaces_lock);
++
++ ret = init_srcu_struct(&ctrl->srcu);
++ if (ret)
++ return ret;
++
+ mutex_init(&ctrl->scan_lock);
+ INIT_LIST_HEAD(&ctrl->namespaces);
+ xa_init(&ctrl->cels);
+- init_rwsem(&ctrl->namespaces_rwsem);
+ ctrl->dev = dev;
+ ctrl->ops = ops;
+ ctrl->quirks = quirks;
+@@ -4515,6 +4544,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ out:
+ if (ctrl->discard_page)
+ __free_page(ctrl->discard_page);
++ cleanup_srcu_struct(&ctrl->srcu);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+@@ -4523,36 +4553,40 @@ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
+ {
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list)
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ blk_mark_disk_dead(ns->disk);
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ }
+ EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
+
+ void nvme_unfreeze(struct nvme_ctrl *ctrl)
+ {
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list)
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ blk_mq_unfreeze_queue(ns->queue);
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
++ clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
+ }
+ EXPORT_SYMBOL_GPL(nvme_unfreeze);
+
+ int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
+ {
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list) {
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
+ if (timeout <= 0)
+ break;
+ }
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ return timeout;
+ }
+ EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
+@@ -4560,22 +4594,25 @@ EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
+ void nvme_wait_freeze(struct nvme_ctrl *ctrl)
+ {
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list)
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ blk_mq_freeze_queue_wait(ns->queue);
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ }
+ EXPORT_SYMBOL_GPL(nvme_wait_freeze);
+
+ void nvme_start_freeze(struct nvme_ctrl *ctrl)
+ {
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list)
++ set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ blk_freeze_queue_start(ns->queue);
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ }
+ EXPORT_SYMBOL_GPL(nvme_start_freeze);
+
+@@ -4618,11 +4655,12 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
+ void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
+ {
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list)
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list)
+ blk_sync_queue(ns->queue);
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ }
+ EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
+
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 8175d49f290901..92ba315cfe19e6 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -645,8 +645,10 @@ static const match_table_t opt_tokens = {
+ { NVMF_OPT_TOS, "tos=%d" },
+ { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
+ { NVMF_OPT_DISCOVERY, "discovery" },
++#ifdef CONFIG_NVME_HOST_AUTH
+ { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
+ { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
++#endif
+ { NVMF_OPT_ERR, NULL }
+ };
+
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index 82e7a27ffbde35..80e15ad3936f37 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -18,13 +18,6 @@
+ /* default is -1: the fail fast mechanism is disabled */
+ #define NVMF_DEF_FAIL_FAST_TMO -1
+
+-/*
+- * Reserved one command for internal usage. This command is used for sending
+- * the connect command, as well as for the keep alive command on the admin
+- * queue once live.
+- */
+-#define NVMF_RESERVED_TAGS 1
+-
+ /*
+ * Define a host as seen by the target. We allocate one at boot, but also
+ * allow the override it when creating controllers. This is both to provide
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index a15b37750d6e93..cdb1e706f855e5 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
+ static DEFINE_IDA(nvme_fc_local_port_cnt);
+ static DEFINE_IDA(nvme_fc_ctrl_cnt);
+
+-static struct workqueue_struct *nvme_fc_wq;
+-
+-static bool nvme_fc_waiting_to_unload;
+-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
+-
+ /*
+ * These items are short-term. They will eventually be moved into
+ * a generic FC class. See comments in module init.
+@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
+ /* remove from transport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&lport->port_list);
+- if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
+- complete(&nvme_fc_unload_proceed);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
+@@ -557,7 +550,7 @@ nvme_fc_rport_get(struct nvme_fc_rport *rport)
+ static void
+ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
+ {
+- switch (ctrl->ctrl.state) {
++ switch (nvme_ctrl_state(&ctrl->ctrl)) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_CONNECTING:
+ /*
+@@ -793,7 +786,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
+ "NVME-FC{%d}: controller connectivity lost. Awaiting "
+ "Reconnect", ctrl->cnum);
+
+- switch (ctrl->ctrl.state) {
++ switch (nvme_ctrl_state(&ctrl->ctrl)) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ /*
+@@ -2548,24 +2541,17 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ * the controller. Abort any ios on the association and let the
+ * create_association error path resolve things.
+ */
+- enum nvme_ctrl_state state;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&ctrl->lock, flags);
+- state = ctrl->ctrl.state;
+- if (state == NVME_CTRL_CONNECTING) {
+- set_bit(ASSOC_FAILED, &ctrl->flags);
+- spin_unlock_irqrestore(&ctrl->lock, flags);
++ if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ __nvme_fc_abort_outstanding_ios(ctrl, true);
++ set_bit(ASSOC_FAILED, &ctrl->flags);
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: transport error during (re)connect\n",
+ ctrl->cnum);
+ return;
+ }
+- spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ /* Otherwise, only proceed if in LIVE state - e.g. on first error */
+- if (state != NVME_CTRL_LIVE)
++ if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ return;
+
+ dev_warn(ctrl->ctrl.device,
+@@ -3179,16 +3165,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+ else
+ ret = nvme_fc_recreate_io_queues(ctrl);
+ }
+-
+- spin_lock_irqsave(&ctrl->lock, flags);
+ if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
+ ret = -EIO;
+- if (ret) {
+- spin_unlock_irqrestore(&ctrl->lock, flags);
++ if (ret)
+ goto out_term_aen_ops;
+- }
++
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+- spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ ctrl->ctrl.nr_reconnects = 0;
+
+@@ -3316,7 +3298,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
+ unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
+ bool recon = true;
+
+- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
++ if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING)
+ return;
+
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+@@ -3904,10 +3886,6 @@ static int __init nvme_fc_init_module(void)
+ {
+ int ret;
+
+- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+- if (!nvme_fc_wq)
+- return -ENOMEM;
+-
+ /*
+ * NOTE:
+ * It is expected that in the future the kernel will combine
+@@ -3925,7 +3903,7 @@ static int __init nvme_fc_init_module(void)
+ ret = class_register(&fc_class);
+ if (ret) {
+ pr_err("couldn't register class fc\n");
+- goto out_destroy_wq;
++ return ret;
+ }
+
+ /*
+@@ -3949,8 +3927,6 @@ static int __init nvme_fc_init_module(void)
+ device_destroy(&fc_class, MKDEV(0, 0));
+ out_destroy_class:
+ class_unregister(&fc_class);
+-out_destroy_wq:
+- destroy_workqueue(nvme_fc_wq);
+
+ return ret;
+ }
+@@ -3970,45 +3946,23 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
+ spin_unlock(&rport->lock);
+ }
+
+-static void
+-nvme_fc_cleanup_for_unload(void)
++static void __exit nvme_fc_exit_module(void)
+ {
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+-
+- list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+- list_for_each_entry(rport, &lport->endp_list, endp_list) {
+- nvme_fc_delete_controllers(rport);
+- }
+- }
+-}
+-
+-static void __exit nvme_fc_exit_module(void)
+-{
+ unsigned long flags;
+- bool need_cleanup = false;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+- nvme_fc_waiting_to_unload = true;
+- if (!list_empty(&nvme_fc_lport_list)) {
+- need_cleanup = true;
+- nvme_fc_cleanup_for_unload();
+- }
++ list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
++ list_for_each_entry(rport, &lport->endp_list, endp_list)
++ nvme_fc_delete_controllers(rport);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+- if (need_cleanup) {
+- pr_info("%s: waiting for ctlr deletes\n", __func__);
+- wait_for_completion(&nvme_fc_unload_proceed);
+- pr_info("%s: ctrl deletes complete\n", __func__);
+- }
++ flush_workqueue(nvme_delete_wq);
+
+ nvmf_unregister_transport(&nvme_fc_transport);
+
+- ida_destroy(&nvme_fc_local_port_cnt);
+- ida_destroy(&nvme_fc_ctrl_cnt);
+-
+ device_destroy(&fc_class, MKDEV(0, 0));
+ class_unregister(&fc_class);
+- destroy_workqueue(nvme_fc_wq);
+ }
+
+ module_init(nvme_fc_init_module);
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index 747c879e8982b8..875dee6ecd4081 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -18,15 +18,12 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
+ {
+ u32 effects;
+
+- if (capable(CAP_SYS_ADMIN))
+- return true;
+-
+ /*
+ * Do not allow unprivileged passthrough on partitions, as that allows an
+ * escape from the containment of the partition.
+ */
+ if (flags & NVME_IOCTL_PARTITION)
+- return false;
++ goto admin;
+
+ /*
+ * Do not allow unprivileged processes to send vendor specific or fabrics
+@@ -34,7 +31,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
+ */
+ if (c->common.opcode >= nvme_cmd_vendor_start ||
+ c->common.opcode == nvme_fabrics_command)
+- return false;
++ goto admin;
+
+ /*
+ * Do not allow unprivileged passthrough of admin commands except
+@@ -53,7 +50,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
+ return true;
+ }
+ }
+- return false;
++ goto admin;
+ }
+
+ /*
+@@ -63,7 +60,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
+ */
+ effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
+ if (!(effects & NVME_CMD_EFFECTS_CSUPP))
+- return false;
++ goto admin;
+
+ /*
+ * Don't allow passthrough for command that have intrusive (or unknown)
+@@ -72,16 +69,20 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_UUID_SEL |
+ NVME_CMD_EFFECTS_SCOPE_MASK))
+- return false;
++ goto admin;
+
+ /*
+ * Only allow I/O commands that transfer data to the controller or that
+ * change the logical block contents if the file descriptor is open for
+ * writing.
+ */
+- if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
+- return open_for_write;
++ if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
++ !open_for_write)
++ goto admin;
++
+ return true;
++admin:
++ return capable(CAP_SYS_ADMIN);
+ }
+
+ /*
+@@ -510,10 +511,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ req->bio = pdu->bio;
+- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
++ if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
+ pdu->nvme_status = -EINTR;
+- else
++ } else {
+ pdu->nvme_status = nvme_req(req)->status;
++ if (!pdu->nvme_status)
++ pdu->nvme_status = blk_status_to_errno(err);
++ }
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ /*
+@@ -917,15 +921,15 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
+ bool open_for_write)
+ {
+ struct nvme_ns *ns;
+- int ret;
++ int ret, srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
+ if (list_empty(&ctrl->namespaces)) {
+ ret = -ENOTTY;
+ goto out_unlock;
+ }
+
+- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
++ ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
+ if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
+ dev_warn(ctrl->device,
+ "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
+@@ -935,15 +939,18 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
+
+ dev_warn(ctrl->device,
+ "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
+- kref_get(&ns->kref);
+- up_read(&ctrl->namespaces_rwsem);
++ if (!nvme_get_ns(ns)) {
++ ret = -ENXIO;
++ goto out_unlock;
++ }
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+
+ ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
+ nvme_put_ns(ns);
+ return ret;
+
+ out_unlock:
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ return ret;
+ }
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 0a88d7bdc5e370..37ea0fa421da8b 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -118,7 +118,8 @@ void nvme_failover_req(struct request *req)
+ blk_steal_bios(&ns->head->requeue_list, req);
+ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+
+- blk_mq_end_request(req, 0);
++ nvme_req(req)->status = 0;
++ nvme_end_req(req);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
+
+@@ -150,16 +151,17 @@ void nvme_mpath_end_request(struct request *rq)
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+ {
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list) {
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ if (!ns->head->disk)
+ continue;
+ kblockd_schedule_work(&ns->head->requeue_work);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ disk_uevent(ns->head->disk, KOBJ_CHANGE);
+ }
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ }
+
+ static const char *nvme_ana_state_names[] = {
+@@ -193,13 +195,14 @@ bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
+ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+ {
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list) {
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ nvme_mpath_clear_current_path(ns);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ }
+
+ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+@@ -246,7 +249,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
+ if (nvme_path_is_disabled(ns))
+ continue;
+
+- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
++ if (ns->ctrl->numa_node != NUMA_NO_NODE &&
++ READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+ distance = node_distance(node, ns->ctrl->numa_node);
+ else
+ distance = LOCAL_DISTANCE;
+@@ -581,7 +585,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ rc = device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+ if (rc) {
+- clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
++ clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags);
+ return;
+ }
+ nvme_add_ns_head_cdev(head);
+@@ -592,7 +596,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ int node, srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+- for_each_node(node)
++ for_each_online_node(node)
+ __nvme_find_path(head, node);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ }
+@@ -677,6 +681,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
+ unsigned *nr_change_groups = data;
+ struct nvme_ns *ns;
++ int srcu_idx;
+
+ dev_dbg(ctrl->device, "ANA group %d: %s.\n",
+ le32_to_cpu(desc->grpid),
+@@ -688,8 +693,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ if (!nr_nsids)
+ return 0;
+
+- down_read(&ctrl->namespaces_rwsem);
+- list_for_each_entry(ns, &ctrl->namespaces, list) {
++ srcu_idx = srcu_read_lock(&ctrl->srcu);
++ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ unsigned nsid;
+ again:
+ nsid = le32_to_cpu(desc->nsids[n]);
+@@ -702,7 +707,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+ if (ns->head->ns_id > nsid)
+ goto again;
+ }
+- up_read(&ctrl->namespaces_rwsem);
++ srcu_read_unlock(&ctrl->srcu, srcu_idx);
+ return 0;
+ }
+
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index f35647c470afad..799f8a2bb0b4f1 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -88,6 +88,11 @@ enum nvme_quirks {
+ */
+ NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
+
++ /*
++ * Problems seen with concurrent commands
++ */
++ NVME_QUIRK_QDEPTH_ONE = (1 << 6),
++
+ /*
+ * Set MEDIUM priority on SQ creation
+ */
+@@ -156,6 +161,16 @@ enum nvme_quirks {
+ * No temperature thresholds for channels other than 0 (Composite).
+ */
+ NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
++
++ /*
++ * Disables simple suspend/resume path.
++ */
++ NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
++
++ /*
++ * MSI (but not MSI-X) interrupts are broken and never fire.
++ */
++ NVME_QUIRK_BROKEN_MSI = (1 << 21),
+ };
+
+ /*
+@@ -251,6 +266,7 @@ enum nvme_ctrl_flags {
+ NVME_CTRL_STOPPED = 3,
+ NVME_CTRL_SKIP_ID_CNS_CS = 4,
+ NVME_CTRL_DIRTY_CAPABILITY = 5,
++ NVME_CTRL_FROZEN = 6,
+ };
+
+ struct nvme_ctrl {
+@@ -269,7 +285,8 @@ struct nvme_ctrl {
+ struct blk_mq_tag_set *tagset;
+ struct blk_mq_tag_set *admin_tagset;
+ struct list_head namespaces;
+- struct rw_semaphore namespaces_rwsem;
++ struct mutex namespaces_lock;
++ struct srcu_struct srcu;
+ struct device ctrl_device;
+ struct device *device; /* char device */
+ #ifdef CONFIG_NVME_HWMON
+@@ -386,6 +403,11 @@ struct nvme_ctrl {
+ enum nvme_dctype dctype;
+ };
+
++static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
++{
++ return READ_ONCE(ctrl->state);
++}
++
+ enum nvme_iopolicy {
+ NVME_IOPOLICY_NUMA,
+ NVME_IOPOLICY_RR,
+@@ -469,7 +491,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
+ enum nvme_ns_features {
+ NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
+ NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
+- NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
++ NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
+ };
+
+ struct nvme_ns {
+@@ -724,6 +746,28 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+ nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
+ }
+
++/*
++ * Returns true for sink states that can't ever transition back to live.
++ */
++static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
++{
++ switch (nvme_ctrl_state(ctrl)) {
++ case NVME_CTRL_NEW:
++ case NVME_CTRL_LIVE:
++ case NVME_CTRL_RESETTING:
++ case NVME_CTRL_CONNECTING:
++ return false;
++ case NVME_CTRL_DELETING:
++ case NVME_CTRL_DELETING_NOIO:
++ case NVME_CTRL_DEAD:
++ return true;
++ default:
++ WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
++ return true;
++ }
++}
++
++void nvme_end_req(struct request *req);
+ void nvme_complete_rq(struct request *req);
+ void nvme_complete_batch_req(struct request *req);
+
+@@ -1088,6 +1132,7 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
+ struct nvme_command *cmd, int status);
+ struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
+ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
++bool nvme_get_ns(struct nvme_ns *ns);
+ void nvme_put_ns(struct nvme_ns *ns);
+
+ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 3f0c9ee09a12bb..32b5cc76a0223c 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -778,7 +778,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ struct bio_vec bv = req_bvec(req);
+
+ if (!is_pci_p2pdma_page(bv.bv_page)) {
+- if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
++ if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
++ bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ return nvme_setup_prp_simple(dev, req,
+ &cmnd->rw, &bv);
+
+@@ -825,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
+ struct nvme_command *cmnd)
+ {
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
++ struct bio_vec bv = rq_integrity_vec(req);
+
+- iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
+- rq_dma_dir(req), 0);
++ iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->meta_dma))
+ return BLK_STS_IOERR;
+ cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+@@ -862,7 +863,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
+ nvme_start_request(req);
+ return BLK_STS_OK;
+ out_unmap_data:
+- nvme_unmap_data(dev, req);
++ if (blk_rq_nr_phys_segments(req))
++ nvme_unmap_data(dev, req);
+ out_free_cmd:
+ nvme_cleanup_cmd(req);
+ return ret;
+@@ -967,7 +969,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ dma_unmap_page(dev->dev, iod->meta_dma,
+- rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
++ rq_integrity_vec(req).bv_len, rq_dma_dir(req));
+ }
+
+ if (blk_rq_nr_phys_segments(req))
+@@ -1234,7 +1236,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
+ bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
+
+ /* If there is a reset/reinit ongoing, we shouldn't reset again. */
+- switch (dev->ctrl.state) {
++ switch (nvme_ctrl_state(&dev->ctrl)) {
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+@@ -1274,7 +1276,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
+ dev_warn(dev->ctrl.device,
+ "Does your device have a faulty power saving mode enabled?\n");
+ dev_warn(dev->ctrl.device,
+- "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
++ "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n");
+ }
+
+ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+@@ -1286,6 +1288,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+ struct nvme_command cmd = { };
+ u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
++ if (nvme_state_terminal(&dev->ctrl))
++ goto disable;
++
+ /* If PCI error recovery process is happening, we cannot reset or
+ * the recovery mechanism will surely fail.
+ */
+@@ -1322,7 +1327,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+ * cancellation error. All outstanding requests are completed on
+ * shutdown, so we return BLK_EH_DONE.
+ */
+- switch (dev->ctrl.state) {
++ switch (nvme_ctrl_state(&dev->ctrl)) {
+ case NVME_CTRL_CONNECTING:
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+ fallthrough;
+@@ -1388,8 +1393,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
+ return BLK_EH_RESET_TIMER;
+
+ disable:
+- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
++ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
++ if (nvme_state_terminal(&dev->ctrl))
++ nvme_dev_disable(dev, true);
+ return BLK_EH_DONE;
++ }
+
+ nvme_dev_disable(dev, false);
+ if (nvme_try_sched_reset(&dev->ctrl))
+@@ -1594,7 +1602,7 @@ static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
+ /*
+ * Controller is in wrong state, fail early.
+ */
+- if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
++ if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
+ mutex_unlock(&dev->shutdown_lock);
+ return -ENODEV;
+ }
+@@ -2216,6 +2224,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
+ .priv = dev,
+ };
+ unsigned int irq_queues, poll_queues;
++ unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
+
+ /*
+ * Poll queues don't need interrupts, but we need at least one I/O queue
+@@ -2239,8 +2248,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
+ irq_queues = 1;
+ if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
+ irq_queues += (nr_io_queues - poll_queues);
+- return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
+- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
++ if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
++ flags &= ~PCI_IRQ_MSI;
++ return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
++ &affd);
+ }
+
+ static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
+@@ -2460,6 +2471,12 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
+
+ static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+ {
++ if (!dev->ctrl.tagset) {
++ nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
++ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
++ return;
++ }
++
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
+@@ -2469,6 +2486,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
+ {
+ int result = -ENOMEM;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
++ unsigned int flags = PCI_IRQ_ALL_TYPES;
+
+ if (pci_enable_device_mem(pdev))
+ return result;
+@@ -2485,7 +2503,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
+ * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+ * adjust this later.
+ */
+- result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
++ if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
++ flags &= ~PCI_IRQ_MSI;
++ result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
+ if (result < 0)
+ goto disable;
+
+@@ -2506,15 +2526,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
+ else
+ dev->io_sqes = NVME_NVM_IOSQES;
+
+- /*
+- * Temporary fix for the Apple controller found in the MacBook8,1 and
+- * some MacBook7,1 to avoid controller resets and data loss.
+- */
+- if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
++ if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) {
+ dev->q_depth = 2;
+- dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
+- "set queue depth=%u to work around controller resets\n",
+- dev->q_depth);
+ } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
+ (pdev->device == 0xa821 || pdev->device == 0xa822) &&
+ NVME_CAP_MQES(dev->ctrl.cap) == 0) {
+@@ -2574,13 +2587,13 @@ static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
+
+ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+ {
++ enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ bool dead;
+
+ mutex_lock(&dev->shutdown_lock);
+ dead = nvme_pci_ctrl_is_dead(dev);
+- if (dev->ctrl.state == NVME_CTRL_LIVE ||
+- dev->ctrl.state == NVME_CTRL_RESETTING) {
++ if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) {
+ if (pci_is_enabled(pdev))
+ nvme_start_freeze(&dev->ctrl);
+ /*
+@@ -2691,7 +2704,7 @@ static void nvme_reset_work(struct work_struct *work)
+ bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
+ int result;
+
+- if (dev->ctrl.state != NVME_CTRL_RESETTING) {
++ if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
+ dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
+ dev->ctrl.state);
+ result = -ENODEV;
+@@ -2903,8 +2916,38 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
+ dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
+ return NVME_QUIRK_SIMPLE_SUSPEND;
++ } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
++ pdev->device == 0x500f)) {
++ /*
++ * Exclude some Kingston NV1 and A2000 devices from
++ * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
++ * lot fo energy with s2idle sleep on some TUXEDO platforms.
++ */
++ if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
++ dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
++ dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
++ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
++ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
++ } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) {
++ /*
++ * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
++ * because of high power consumption (> 2 Watt) in s2idle
++ * sleep. Only some boards with Intel CPU are affected.
++ */
++ if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
++ dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
++ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
++ dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
++ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ }
+
++ /*
++ * NVMe SSD drops off the PCIe bus after system idle
++ * for 10 hours on a Lenovo N60z board.
++ */
++ if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6"))
++ return NVME_QUIRK_NO_APST;
++
+ return 0;
+ }
+
+@@ -2933,7 +2976,9 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
+ dev->dev = get_device(&pdev->dev);
+
+ quirks |= check_vendor_combination_bug(pdev);
+- if (!noacpi && acpi_storage_d3(&pdev->dev)) {
++ if (!noacpi &&
++ !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
++ acpi_storage_d3(&pdev->dev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+@@ -3193,7 +3238,7 @@ static int nvme_suspend(struct device *dev)
+ nvme_wait_freeze(ctrl);
+ nvme_sync_queues(ctrl);
+
+- if (ctrl->state != NVME_CTRL_LIVE)
++ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
+ goto unfreeze;
+
+ /*
+@@ -3347,6 +3392,11 @@ static const struct pci_device_id nvme_id_table[] = {
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
++ { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
++ .driver_data = NVME_QUIRK_QDEPTH_ONE },
++ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
++ NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ NVME_QUIRK_BOGUS_NID, },
+@@ -3365,6 +3415,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES|
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++ { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
++ .driver_data = NVME_QUIRK_BROKEN_MSI },
+ { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+@@ -3474,7 +3526,12 @@ static const struct pci_device_id nvme_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
+- .driver_data = NVME_QUIRK_SINGLE_VECTOR },
++ /*
++ * Fix for the Apple controller found in the MacBook8,1 and
++ * some MacBook7,1 to avoid controller resets and data loss.
++ */
++ .driver_data = NVME_QUIRK_SINGLE_VECTOR |
++ NVME_QUIRK_QDEPTH_ONE },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR |
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index 391b1465ebfd5e..803efc97fd1ea5 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -77,7 +77,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
+ if (nvme_is_path_error(nvme_sc))
+ return PR_STS_PATH_FAILED;
+
+- switch (nvme_sc) {
++ switch (nvme_sc & 0x7ff) {
+ case NVME_SC_SUCCESS:
+ return PR_STS_SUCCESS;
+ case NVME_SC_RESERVATION_CONFLICT:
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index a7fea4cbacd753..c04317a966b387 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -984,10 +984,11 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
+
+ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
+ {
++ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
++
+ /* If we are resetting/deleting then do nothing */
+- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
+- WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
+- ctrl->ctrl.state == NVME_CTRL_LIVE);
++ if (state != NVME_CTRL_CONNECTING) {
++ WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
+ return;
+ }
+
+@@ -1059,8 +1060,10 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
+- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
+- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
++ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
++
++ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
++ state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(new);
+ ret = -EINVAL;
+ goto destroy_io;
+@@ -1128,8 +1131,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure is ok if we started ctrl delete */
+- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
+- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
++ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
++
++ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
++ state != NVME_CTRL_DELETING_NOIO);
+ return;
+ }
+
+@@ -1161,7 +1166,7 @@ static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
++ if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
+ dev_info(ctrl->ctrl.device,
+ "%s for CQE 0x%p failed with status %s (%d)\n",
+ op, wc->wr_cqe,
+@@ -1944,7 +1949,7 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
+ dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
+ rq->tag, nvme_rdma_queue_idx(queue));
+
+- if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
++ if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
+ /*
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 5b332d9f87fc3a..f1d62d74426f0e 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1993,10 +1993,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+
+ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
+ {
++ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
++
+ /* If we are resetting/deleting then do nothing */
+- if (ctrl->state != NVME_CTRL_CONNECTING) {
+- WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
+- ctrl->state == NVME_CTRL_LIVE);
++ if (state != NVME_CTRL_CONNECTING) {
++ WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
+ return;
+ }
+
+@@ -2056,8 +2057,10 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
+- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+- ctrl->state != NVME_CTRL_DELETING_NOIO);
++ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
++
++ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
++ state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(new);
+ ret = -EINVAL;
+ goto destroy_io;
+@@ -2124,8 +2127,10 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure is ok if we started ctrl delete */
+- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+- ctrl->state != NVME_CTRL_DELETING_NOIO);
++ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
++
++ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
++ state != NVME_CTRL_DELETING_NOIO);
+ return;
+ }
+
+@@ -2155,8 +2160,10 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure is ok if we started ctrl delete */
+- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+- ctrl->state != NVME_CTRL_DELETING_NOIO);
++ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
++
++ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
++ state != NVME_CTRL_DELETING_NOIO);
+ return;
+ }
+
+@@ -2274,7 +2281,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
+ nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
+ opc, nvme_opcode_str(qid, opc, fctype));
+
+- if (ctrl->state != NVME_CTRL_LIVE) {
++ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
+ /*
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
+index 4dcddcf95279b3..aacc05ec00c2b8 100644
+--- a/drivers/nvme/target/auth.c
++++ b/drivers/nvme/target/auth.c
+@@ -284,9 +284,9 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+- pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+- __func__, shash_len,
+- crypto_shash_digestsize(shash_tfm));
++ pr_err("%s: hash len mismatch (len %d digest %d)\n",
++ __func__, shash_len,
++ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+@@ -314,7 +314,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ req->sq->dhchap_c1,
+ challenge, shash_len);
+ if (ret)
+- goto out_free_response;
++ goto out_free_challenge;
+ }
+
+ pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
+@@ -325,7 +325,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+- goto out_free_response;
++ goto out_free_challenge;
+ }
+ shash->tfm = shash_tfm;
+ ret = crypto_shash_init(shash);
+@@ -361,14 +361,15 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ goto out;
+ ret = crypto_shash_final(shash, response);
+ out:
++ kfree(shash);
++out_free_challenge:
+ if (challenge != req->sq->dhchap_c1)
+ kfree(challenge);
+- kfree(shash);
+ out_free_response:
+ kfree_sensitive(host_response);
+ out_free_tfm:
+ crypto_free_shash(shash_tfm);
+- return 0;
++ return ret;
+ }
+
+ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+@@ -426,14 +427,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ req->sq->dhchap_c2,
+ challenge, shash_len);
+ if (ret)
+- goto out_free_response;
++ goto out_free_challenge;
+ }
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+- goto out_free_response;
++ goto out_free_challenge;
+ }
+ shash->tfm = shash_tfm;
+
+@@ -470,9 +471,10 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ goto out;
+ ret = crypto_shash_final(shash, response);
+ out:
++ kfree(shash);
++out_free_challenge:
+ if (challenge != req->sq->dhchap_c2)
+ kfree(challenge);
+- kfree(shash);
+ out_free_response:
+ kfree_sensitive(ctrl_response);
+ out_free_tfm:
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 907143870da52a..384cd2b540d0ce 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -17,6 +17,7 @@
+ #endif
+ #include <crypto/hash.h>
+ #include <crypto/kpp.h>
++#include <linux/nospec.h>
+
+ #include "nvmet.h"
+
+@@ -509,6 +510,7 @@ static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
+
+ down_write(&nvmet_ana_sem);
+ oldgrpid = ns->anagrpid;
++ newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
+ nvmet_ana_group_enabled[newgrpid]++;
+ ns->anagrpid = newgrpid;
+ nvmet_ana_group_enabled[oldgrpid]--;
+@@ -536,10 +538,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
+ if (kstrtobool(page, &enable))
+ return -EINVAL;
+
++ /*
++ * take a global nvmet_config_sem because the disable routine has a
++ * window where it releases the subsys-lock, giving a chance to
++ * a parallel enable to concurrently execute causing the disable to
++ * have a misaccounting of the ns percpu_ref.
++ */
++ down_write(&nvmet_config_sem);
+ if (enable)
+ ret = nvmet_ns_enable(ns);
+ else
+ nvmet_ns_disable(ns);
++ up_write(&nvmet_config_sem);
+
+ return ret ? ret : count;
+ }
+@@ -614,6 +624,18 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
+ NULL,
+ };
+
++bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
++{
++ struct config_item *ns_item;
++ char name[12];
++
++ snprintf(name, sizeof(name), "%u", nsid);
++ mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
++ ns_item = config_group_find_item(&subsys->namespaces_group, name);
++ mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
++ return ns_item != NULL;
++}
++
+ static void nvmet_ns_release(struct config_item *item)
+ {
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+@@ -1700,6 +1722,7 @@ static struct config_group *nvmet_ana_groups_make_group(
+ grp->grpid = grpid;
+
+ down_write(&nvmet_ana_sem);
++ grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
+ nvmet_ana_group_enabled[grpid]++;
+ up_write(&nvmet_ana_sem);
+
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 3935165048e741..1cf6dfac183615 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -425,10 +425,13 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+ u16 nvmet_req_find_ns(struct nvmet_req *req)
+ {
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
++ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+
+- req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
++ req->ns = xa_load(&subsys->namespaces, nsid);
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
++ if (nvmet_subsys_nsid_exists(subsys, nsid))
++ return NVME_SC_INTERNAL_PATH_ERROR;
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ }
+
+@@ -803,6 +806,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
+ percpu_ref_exit(&sq->ref);
+ nvmet_auth_sq_free(sq);
+
++ /*
++ * we must reference the ctrl again after waiting for inflight IO
++ * to complete. Because admin connect may have sneaked in after we
++ * store sq->ctrl locally, but before we killed the percpu_ref. the
++ * admin connect allocates and assigns sq->ctrl, which now needs a
++ * final ref put, as this ctrl is going away.
++ */
++ ctrl = sq->ctrl;
++
+ if (ctrl) {
+ /*
+ * The teardown flow may take some time, and the host may not
+@@ -933,6 +945,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ req->metadata_sg_cnt = 0;
+ req->transfer_len = 0;
+ req->metadata_len = 0;
++ req->cqe->result.u64 = 0;
+ req->cqe->status = 0;
+ req->cqe->sq_head = 0;
+ req->ns = NULL;
+diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
+index 1d9854484e2e83..c103eba96350e8 100644
+--- a/drivers/nvme/target/fabrics-cmd-auth.c
++++ b/drivers/nvme/target/fabrics-cmd-auth.c
+@@ -332,7 +332,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
+ pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ status, req->error_loc);
+- req->cqe->result.u64 = 0;
+ if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+@@ -515,8 +514,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
+ status = nvmet_copy_to_sgl(req, 0, d, al);
+ kfree(d);
+ done:
+- req->cqe->result.u64 = 0;
+-
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ nvmet_auth_sq_free(req->sq);
+ else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
+index 43b5bd8bb6a52d..fa9e8dc9215398 100644
+--- a/drivers/nvme/target/fabrics-cmd.c
++++ b/drivers/nvme/target/fabrics-cmd.c
+@@ -225,9 +225,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
+ if (status)
+ goto out;
+
+- /* zero out initial completion result, assign values as needed */
+- req->cqe->result.u32 = 0;
+-
+ if (c->recfmt != 0) {
+ pr_warn("invalid connect version (%d).\n",
+ le16_to_cpu(c->recfmt));
+@@ -244,6 +241,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
+ goto out;
+ }
+
++ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
++ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
+ le32_to_cpu(c->kato), &ctrl);
+ if (status)
+@@ -303,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
+ if (status)
+ goto out;
+
+- /* zero out initial completion result, assign values as needed */
+- req->cqe->result.u32 = 0;
+-
+ if (c->recfmt != 0) {
+ pr_warn("invalid connect version (%d).\n",
+ le16_to_cpu(c->recfmt));
+@@ -313,6 +309,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
+ goto out;
+ }
+
++ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
++ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ le16_to_cpu(d->cntlid), req);
+ if (!ctrl) {
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 1ab6601fdd5cf9..8a02ed63b15666 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
+ struct nvmet_fc_port_entry *pe;
+ struct kref ref;
+ u32 max_sg_cnt;
++
++ struct work_struct put_work;
+ };
+
+ struct nvmet_fc_port_entry {
+@@ -165,7 +167,7 @@ struct nvmet_fc_tgt_assoc {
+ struct nvmet_fc_hostport *hostport;
+ struct nvmet_fc_ls_iod *rcv_disconn;
+ struct list_head a_list;
+- struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
++ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
+ struct kref ref;
+ struct work_struct del_work;
+ struct rcu_head rcu;
+@@ -248,6 +250,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+ static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+ static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
++static void nvmet_fc_put_tgtport_work(struct work_struct *work)
++{
++ struct nvmet_fc_tgtport *tgtport =
++ container_of(work, struct nvmet_fc_tgtport, put_work);
++
++ nvmet_fc_tgtport_put(tgtport);
++}
+ static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+ static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod);
+@@ -359,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+- return;
++ goto out_putwork;
+ }
+
+ list_del(&lsop->lsreq_list);
+@@ -372,7 +381,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+- nvmet_fc_tgtport_put(tgtport);
++out_putwork:
++ queue_work(nvmet_wq, &tgtport->put_work);
+ }
+
+ static int
+@@ -801,14 +811,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ if (!queue)
+ return NULL;
+
+- if (!nvmet_fc_tgt_a_get(assoc))
+- goto out_free_queue;
+-
+ queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
+ assoc->tgtport->fc_target_port.port_num,
+ assoc->a_id, qid);
+ if (!queue->work_q)
+- goto out_a_put;
++ goto out_free_queue;
+
+ queue->qid = qid;
+ queue->sqsize = sqsize;
+@@ -830,15 +837,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ goto out_fail_iodlist;
+
+ WARN_ON(assoc->queues[qid]);
+- rcu_assign_pointer(assoc->queues[qid], queue);
++ assoc->queues[qid] = queue;
+
+ return queue;
+
+ out_fail_iodlist:
+ nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
+ destroy_workqueue(queue->work_q);
+-out_a_put:
+- nvmet_fc_tgt_a_put(assoc);
+ out_free_queue:
+ kfree(queue);
+ return NULL;
+@@ -851,12 +856,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
+ struct nvmet_fc_tgt_queue *queue =
+ container_of(ref, struct nvmet_fc_tgt_queue, ref);
+
+- rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
+-
+ nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
+
+- nvmet_fc_tgt_a_put(queue->assoc);
+-
+ destroy_workqueue(queue->work_q);
+
+ kfree_rcu(queue, rcu);
+@@ -968,7 +969,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (association_id == assoc->association_id) {
+- queue = rcu_dereference(assoc->queues[qid]);
++ queue = assoc->queues[qid];
+ if (queue &&
+ (!atomic_read(&queue->connected) ||
+ !nvmet_fc_tgt_q_get(queue)))
+@@ -1077,8 +1078,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ /* new allocation not needed */
+ kfree(newhost);
+ newhost = match;
+- /* no new allocation - release reference */
+- nvmet_fc_tgtport_put(tgtport);
+ } else {
+ newhost->tgtport = tgtport;
+ newhost->hosthandle = hosthandle;
+@@ -1093,13 +1092,28 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ }
+
+ static void
+-nvmet_fc_delete_assoc(struct work_struct *work)
++nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
++{
++ nvmet_fc_delete_target_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
++}
++
++static void
++nvmet_fc_delete_assoc_work(struct work_struct *work)
+ {
+ struct nvmet_fc_tgt_assoc *assoc =
+ container_of(work, struct nvmet_fc_tgt_assoc, del_work);
++ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+- nvmet_fc_delete_target_assoc(assoc);
+- nvmet_fc_tgt_a_put(assoc);
++ nvmet_fc_delete_assoc(assoc);
++ nvmet_fc_tgtport_put(tgtport);
++}
++
++static void
++nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
++{
++ nvmet_fc_tgtport_get(assoc->tgtport);
++ queue_work(nvmet_wq, &assoc->del_work);
+ }
+
+ static struct nvmet_fc_tgt_assoc *
+@@ -1111,6 +1125,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ int idx;
+ bool needrandom = true;
+
++ if (!tgtport->pe)
++ return NULL;
++
+ assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
+ if (!assoc)
+ return NULL;
+@@ -1130,7 +1147,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ assoc->a_id = idx;
+ INIT_LIST_HEAD(&assoc->a_list);
+ kref_init(&assoc->ref);
+- INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
++ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
+ atomic_set(&assoc->terminating, 0);
+
+ while (needrandom) {
+@@ -1171,13 +1188,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_ls_iod *oldls;
+ unsigned long flags;
++ int i;
++
++ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
++ if (assoc->queues[i])
++ nvmet_fc_delete_target_queue(assoc->queues[i]);
++ }
+
+ /* Send Disconnect now that all i/o has completed */
+ nvmet_fc_xmt_disconnect_assoc(assoc);
+
+ nvmet_fc_free_hostport(assoc->hostport);
+ spin_lock_irqsave(&tgtport->lock, flags);
+- list_del_rcu(&assoc->a_list);
+ oldls = assoc->rcv_disconn;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ /* if pending Rcv Disconnect Association LS, send rsp now */
+@@ -1207,7 +1229,7 @@ static void
+ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+ {
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+- struct nvmet_fc_tgt_queue *queue;
++ unsigned long flags;
+ int i, terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+@@ -1216,29 +1238,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+ if (terminating)
+ return;
+
++ spin_lock_irqsave(&tgtport->lock, flags);
++ list_del_rcu(&assoc->a_list);
++ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+- for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+- rcu_read_lock();
+- queue = rcu_dereference(assoc->queues[i]);
+- if (!queue) {
+- rcu_read_unlock();
+- continue;
+- }
++ synchronize_rcu();
+
+- if (!nvmet_fc_tgt_q_get(queue)) {
+- rcu_read_unlock();
+- continue;
+- }
+- rcu_read_unlock();
+- nvmet_fc_delete_target_queue(queue);
+- nvmet_fc_tgt_q_put(queue);
++ /* ensure all in-flight I/Os have been processed */
++ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
++ if (assoc->queues[i])
++ flush_workqueue(assoc->queues[i]->work_q);
+ }
+
+ dev_info(tgtport->dev,
+ "{%d:%d} Association deleted\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+-
+- nvmet_fc_tgt_a_put(assoc);
+ }
+
+ static struct nvmet_fc_tgt_assoc *
+@@ -1414,6 +1428,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+ kref_init(&newrec->ref);
+ ida_init(&newrec->assoc_cnt);
+ newrec->max_sg_cnt = template->max_sgl_segments;
++ INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
+
+ ret = nvmet_fc_alloc_ls_iodlist(newrec);
+ if (ret) {
+@@ -1491,9 +1506,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+- if (!queue_work(nvmet_wq, &assoc->del_work))
+- /* already deleting - release local reference */
+- nvmet_fc_tgt_a_put(assoc);
++ nvmet_fc_schedule_delete_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
+ }
+ rcu_read_unlock();
+ }
+@@ -1546,9 +1560,8 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
+ continue;
+ assoc->hostport->invalid = 1;
+ noassoc = false;
+- if (!queue_work(nvmet_wq, &assoc->del_work))
+- /* already deleting - release local reference */
+- nvmet_fc_tgt_a_put(assoc);
++ nvmet_fc_schedule_delete_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+@@ -1580,7 +1593,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+- queue = rcu_dereference(assoc->queues[0]);
++ queue = assoc->queues[0];
+ if (queue && queue->nvme_sq.ctrl == ctrl) {
+ if (nvmet_fc_tgt_a_get(assoc))
+ found_ctrl = true;
+@@ -1592,9 +1605,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+ nvmet_fc_tgtport_put(tgtport);
+
+ if (found_ctrl) {
+- if (!queue_work(nvmet_wq, &assoc->del_work))
+- /* already deleting - release local reference */
+- nvmet_fc_tgt_a_put(assoc);
++ nvmet_fc_schedule_delete_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
+ return;
+ }
+
+@@ -1624,6 +1636,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(tgtport);
+
++ flush_workqueue(nvmet_wq);
++
+ /*
+ * should terminate LS's as well. However, LS's will be generated
+ * at the tail end of association termination, so they likely don't
+@@ -1869,9 +1883,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
+
+- /* release get taken in nvmet_fc_find_target_assoc */
+- nvmet_fc_tgt_a_put(assoc);
+-
+ /*
+ * The rules for LS response says the response cannot
+ * go back until ABTS's have been sent for all outstanding
+@@ -1886,8 +1897,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ assoc->rcv_disconn = iod;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+- nvmet_fc_delete_target_assoc(assoc);
+-
+ if (oldls) {
+ dev_info(tgtport->dev,
+ "{%d:%d} Multiple Disconnect Association LS's "
+@@ -1903,6 +1912,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ }
+
++ nvmet_fc_schedule_delete_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
++
+ return false;
+ }
+
+@@ -2539,8 +2551,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+
+ fod->req.cmd = &fod->cmdiubuf.sqe;
+ fod->req.cqe = &fod->rspiubuf.cqe;
+- if (tgtport->pe)
+- fod->req.port = tgtport->pe->port;
++ if (!tgtport->pe)
++ goto transport_error;
++ fod->req.port = tgtport->pe->port;
+
+ /* clear any response payload */
+ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+@@ -2901,6 +2914,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
+
+ nvmet_fc_portentry_unbind(pe);
+
++ /* terminate any outstanding associations */
++ __nvmet_fc_free_assocs(pe->tgtport);
++
+ kfree(pe);
+ }
+
+@@ -2932,6 +2948,9 @@ static int __init nvmet_fc_init_module(void)
+
+ static void __exit nvmet_fc_exit_module(void)
+ {
++ /* ensure any shutdown operation, e.g. delete ctrls have finished */
++ flush_workqueue(nvmet_wq);
++
+ /* sanity check - all lports should be removed */
+ if (!list_empty(&nvmet_fc_target_list))
+ pr_warn("%s: targetport list not empty\n", __func__);
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index c65a73433c05f6..e6d4226827b527 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
+ if (!rport->targetport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&rport->lock);
+- list_add_tail(&rport->ls_list, &tls_req->ls_list);
++ list_add_tail(&tls_req->ls_list, &rport->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
+ return ret;
+@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
+ if (remoteport) {
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+- list_add_tail(&rport->ls_list, &tls_req->ls_list);
++ list_add_tail(&tls_req->ls_list, &rport->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
+ }
+@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ if (!tport->remoteport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&tport->lock);
+- list_add_tail(&tport->ls_list, &tls_req->ls_list);
++ list_add_tail(&tls_req->ls_list, &tport->ls_list);
+ spin_unlock(&tport->lock);
+ queue_work(nvmet_wq, &tport->ls_work);
+ return ret;
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 8cfd60f3b5648f..15b00ed7be16a8 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -530,6 +530,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
+ struct nvmet_host *host);
+ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+ u8 event_info, u8 log_page);
++bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
+
+ #define NVMET_QUEUE_SIZE 1024
+ #define NVMET_NR_QUEUES 128
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index 9fe07d7efa96cf..d4a61645d61a59 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+ req->cmd->common.opcode == nvme_admin_identify) {
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+- nvmet_passthru_override_id_ctrl(req);
++ status = nvmet_passthru_override_id_ctrl(req);
+ break;
+ case NVME_ID_CNS_NS:
+- nvmet_passthru_override_id_ns(req);
++ status = nvmet_passthru_override_id_ns(req);
+ break;
+ case NVME_ID_CNS_NS_DESC_LIST:
+- nvmet_passthru_override_id_descs(req);
++ status = nvmet_passthru_override_id_descs(req);
+ break;
+ }
+ } else if (status < 0)
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 4597bca43a6d87..a6d55ebb823824 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -473,12 +473,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
+ return 0;
+
+ out_free:
+- while (--i >= 0) {
+- struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+-
+- list_del(&rsp->free_list);
+- nvmet_rdma_free_rsp(ndev, rsp);
+- }
++ while (--i >= 0)
++ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
+ kfree(queue->rsps);
+ out:
+ return ret;
+@@ -489,12 +485,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int i, nr_rsps = queue->recv_queue_size * 2;
+
+- for (i = 0; i < nr_rsps; i++) {
+- struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+-
+- list_del(&rsp->free_list);
+- nvmet_rdma_free_rsp(ndev, rsp);
+- }
++ for (i = 0; i < nr_rsps; i++)
++ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
+ kfree(queue->rsps);
+ }
+
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 197fc2ecb164dc..bd142aed20f456 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -19,6 +19,7 @@
+ #include "nvmet.h"
+
+ #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
++#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
+
+ static int param_store_val(const char *str, int *val, int min, int max)
+ {
+@@ -323,6 +324,7 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
+ return 0;
+ }
+
++/* If cmd buffers are NULL, no operation is performed */
+ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
+ {
+ kfree(cmd->iov);
+@@ -873,6 +875,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ pr_err("bad nvme-tcp pdu length (%d)\n",
+ le32_to_cpu(icreq->hdr.plen));
+ nvmet_tcp_fatal_error(queue);
++ return -EPROTO;
+ }
+
+ if (icreq->pfv != NVME_TCP_PFV_1_0) {
+@@ -900,7 +903,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ icresp->hdr.pdo = 0;
+ icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
+ icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
+- icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
++ icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
+ icresp->cpda = 0;
+ if (queue->hdr_digest)
+ icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
+@@ -953,6 +956,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ {
+ struct nvme_tcp_data_pdu *data = &queue->pdu.data;
+ struct nvmet_tcp_cmd *cmd;
++ unsigned int exp_data_len;
+
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+@@ -971,12 +975,24 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+ data->ttag, le32_to_cpu(data->data_offset),
+ cmd->rbytes_done);
+ /* FIXME: use path and transport errors */
+- nvmet_req_complete(&cmd->req,
+- NVME_SC_INVALID_FIELD | NVME_SC_DNR);
++ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
++ exp_data_len = le32_to_cpu(data->hdr.plen) -
++ nvmet_tcp_hdgst_len(queue) -
++ nvmet_tcp_ddgst_len(queue) -
++ sizeof(*data);
++
+ cmd->pdu_len = le32_to_cpu(data->data_length);
++ if (unlikely(cmd->pdu_len != exp_data_len ||
++ cmd->pdu_len == 0 ||
++ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
++ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
++ /* FIXME: use proper transport errors */
++ nvmet_tcp_fatal_error(queue);
++ return -EPROTO;
++ }
+ cmd->pdu_recv = 0;
+ nvmet_tcp_build_pdu_iovec(cmd);
+ queue->cmd = cmd;
+@@ -1462,13 +1478,9 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
+ struct nvmet_tcp_cmd *cmd = queue->cmds;
+ int i;
+
+- for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+- if (nvmet_tcp_need_data_in(cmd))
+- nvmet_tcp_free_cmd_buffers(cmd);
+- }
+-
+- if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
+- nvmet_tcp_free_cmd_buffers(&queue->connect);
++ for (i = 0; i < queue->nr_cmds; i++, cmd++)
++ nvmet_tcp_free_cmd_buffers(cmd);
++ nvmet_tcp_free_cmd_buffers(&queue->connect);
+ }
+
+ static void nvmet_tcp_release_queue_work(struct work_struct *w)
+@@ -1847,8 +1859,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
+ }
+
+ queue->nr_cmds = sq->size * 2;
+- if (nvmet_tcp_alloc_cmds(queue))
++ if (nvmet_tcp_alloc_cmds(queue)) {
++ queue->nr_cmds = 0;
+ return NVME_SC_INTERNAL;
++ }
+ return 0;
+ }
+
+@@ -1913,6 +1927,7 @@ static void __exit nvmet_tcp_exit(void)
+ flush_workqueue(nvmet_wq);
+
+ destroy_workqueue(nvmet_tcp_wq);
++ ida_destroy(&nvmet_tcp_queue_ida);
+ }
+
+ module_init(nvmet_tcp_init);
+diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
+index bff454d46255b4..6ee1f3db81d040 100644
+--- a/drivers/nvme/target/trace.c
++++ b/drivers/nvme/target/trace.c
+@@ -211,7 +211,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
+ return ret;
+ }
+
+-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
++const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
+ {
+ const char *ret = trace_seq_buffer_ptr(p);
+
+@@ -224,8 +224,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+ * If we can know the extra data of the connect command in this stage,
+ * we can update this print statement later.
+ */
+- if (ctrl)
+- trace_seq_printf(p, "%d", ctrl->cntlid);
++ if (ctrl_id)
++ trace_seq_printf(p, "%d", ctrl_id);
+ else
+ trace_seq_printf(p, "_");
+ trace_seq_putc(p, 0);
+diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
+index 6109b3806b12be..7f7ebf9558e505 100644
+--- a/drivers/nvme/target/trace.h
++++ b/drivers/nvme/target/trace.h
+@@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
+ nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \
+ nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
+
+-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
+-#define __print_ctrl_name(ctrl) \
+- nvmet_trace_ctrl_name(p, ctrl)
++const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id);
++#define __print_ctrl_id(ctrl_id) \
++ nvmet_trace_ctrl_id(p, ctrl_id)
+
+ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
+ #define __print_disk_name(name) \
+ nvmet_trace_disk_name(p, name)
+
+ #ifndef TRACE_HEADER_MULTI_READ
+-static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
++static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req)
+ {
+- return req->sq->ctrl;
++ /*
++ * The queue and controller pointers are not valid until an association
++ * has been established.
++ */
++ if (!req->sq || !req->sq->ctrl)
++ return 0;
++ return req->sq->ctrl->cntlid;
+ }
+
+ static inline void __assign_req_name(char *name, struct nvmet_req *req)
+@@ -53,8 +59,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req)
+ return;
+ }
+
+- strncpy(name, req->ns->device_path,
+- min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
++ strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
+ }
+ #endif
+
+@@ -63,7 +68,7 @@ TRACE_EVENT(nvmet_req_init,
+ TP_ARGS(req, cmd),
+ TP_STRUCT__entry(
+ __field(struct nvme_command *, cmd)
+- __field(struct nvmet_ctrl *, ctrl)
++ __field(u16, ctrl_id)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, qid)
+ __field(u16, cid)
+@@ -76,7 +81,7 @@ TRACE_EVENT(nvmet_req_init,
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+- __entry->ctrl = nvmet_req_to_ctrl(req);
++ __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
+ __assign_req_name(__entry->disk, req);
+ __entry->qid = req->sq->qid;
+ __entry->cid = cmd->common.command_id;
+@@ -85,12 +90,12 @@ TRACE_EVENT(nvmet_req_init,
+ __entry->flags = cmd->common.flags;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+- memcpy(__entry->cdw10, &cmd->common.cdw10,
++ memcpy(__entry->cdw10, &cmd->common.cdws,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
+ "meta=%#llx, cmd=(%s, %s)",
+- __print_ctrl_name(__entry->ctrl),
++ __print_ctrl_id(__entry->ctrl_id),
+ __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->nsid,
+ __entry->flags, __entry->metadata,
+@@ -104,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete,
+ TP_PROTO(struct nvmet_req *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+- __field(struct nvmet_ctrl *, ctrl)
++ __field(u16, ctrl_id)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, qid)
+ __field(int, cid)
+@@ -112,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete,
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+- __entry->ctrl = nvmet_req_to_ctrl(req);
++ __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
+ __entry->qid = req->cq->qid;
+ __entry->cid = req->cqe->command_id;
+ __entry->result = le64_to_cpu(req->cqe->result.u64);
+@@ -120,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete,
+ __assign_req_name(__entry->disk, req);
+ ),
+ TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
+- __print_ctrl_name(__entry->ctrl),
++ __print_ctrl_id(__entry->ctrl_id),
+ __print_disk_name(__entry->disk),
+ __entry->qid, __entry->cid, __entry->result, __entry->status)
+
+diff --git a/drivers/nvmem/apple-efuses.c b/drivers/nvmem/apple-efuses.c
+index 9b7c871021043d..d3d49d22338b3b 100644
+--- a/drivers/nvmem/apple-efuses.c
++++ b/drivers/nvmem/apple-efuses.c
+@@ -36,6 +36,7 @@ static int apple_efuses_probe(struct platform_device *pdev)
+ struct resource *res;
+ struct nvmem_config config = {
+ .dev = &pdev->dev,
++ .add_legacy_fixed_of_cells = true,
+ .read_only = true,
+ .reg_read = apple_efuses_read,
+ .stride = sizeof(u32),
+diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
+index 9737104f3b76b3..5cdf339cfbec0e 100644
+--- a/drivers/nvmem/brcm_nvram.c
++++ b/drivers/nvmem/brcm_nvram.c
+@@ -17,9 +17,23 @@
+
+ #define NVRAM_MAGIC "FLSH"
+
++/**
++ * struct brcm_nvram - driver state internal struct
++ *
++ * @dev: NVMEM device pointer
++ * @nvmem_size: Size of the whole space available for NVRAM
++ * @data: NVRAM data copy stored to avoid poking underlaying flash controller
++ * @data_len: NVRAM data size
++ * @padding_byte: Padding value used to fill remaining space
++ * @cells: Array of discovered NVMEM cells
++ * @ncells: Number of elements in cells
++ */
+ struct brcm_nvram {
+ struct device *dev;
+- void __iomem *base;
++ size_t nvmem_size;
++ uint8_t *data;
++ size_t data_len;
++ uint8_t padding_byte;
+ struct nvmem_cell_info *cells;
+ int ncells;
+ };
+@@ -36,10 +50,47 @@ static int brcm_nvram_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+ {
+ struct brcm_nvram *priv = context;
+- u8 *dst = val;
++ size_t to_copy;
++
++ if (offset + bytes > priv->data_len)
++ to_copy = max_t(ssize_t, (ssize_t)priv->data_len - offset, 0);
++ else
++ to_copy = bytes;
++
++ memcpy(val, priv->data + offset, to_copy);
++
++ memset((uint8_t *)val + to_copy, priv->padding_byte, bytes - to_copy);
++
++ return 0;
++}
++
++static int brcm_nvram_copy_data(struct brcm_nvram *priv, struct platform_device *pdev)
++{
++ struct resource *res;
++ void __iomem *base;
++
++ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ priv->nvmem_size = resource_size(res);
++
++ priv->padding_byte = readb(base + priv->nvmem_size - 1);
++ for (priv->data_len = priv->nvmem_size;
++ priv->data_len;
++ priv->data_len--) {
++ if (readb(base + priv->data_len - 1) != priv->padding_byte)
++ break;
++ }
++ WARN(priv->data_len > SZ_128K, "Unexpected (big) NVRAM size: %zu B\n", priv->data_len);
++
++ priv->data = devm_kzalloc(priv->dev, priv->data_len, GFP_KERNEL);
++ if (!priv->data)
++ return -ENOMEM;
++
++ memcpy_fromio(priv->data, base, priv->data_len);
+
+- while (bytes--)
+- *dst++ = readb(priv->base + offset++);
++ bcm47xx_nvram_init_from_iomem(base, priv->data_len);
+
+ return 0;
+ }
+@@ -67,8 +118,13 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
+ size_t len)
+ {
+ struct device *dev = priv->dev;
+- char *var, *value, *eq;
++ char *var, *value;
++ uint8_t tmp;
+ int idx;
++ int err = 0;
++
++ tmp = priv->data[len - 1];
++ priv->data[len - 1] = '\0';
+
+ priv->ncells = 0;
+ for (var = data + sizeof(struct brcm_nvram_header);
+@@ -78,67 +134,68 @@ static int brcm_nvram_add_cells(struct brcm_nvram *priv, uint8_t *data,
+ }
+
+ priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
+- if (!priv->cells)
+- return -ENOMEM;
++ if (!priv->cells) {
++ err = -ENOMEM;
++ goto out;
++ }
+
+ for (var = data + sizeof(struct brcm_nvram_header), idx = 0;
+ var < (char *)data + len && *var;
+ var = value + strlen(value) + 1, idx++) {
++ char *eq, *name;
++
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
++ name = devm_kstrdup(dev, var, GFP_KERNEL);
++ *eq = '=';
++ if (!name) {
++ err = -ENOMEM;
++ goto out;
++ }
+ value = eq + 1;
+
+- priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
+- if (!priv->cells[idx].name)
+- return -ENOMEM;
++ priv->cells[idx].name = name;
+ priv->cells[idx].offset = value - (char *)data;
+ priv->cells[idx].bytes = strlen(value);
+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+- if (!strcmp(var, "et0macaddr") ||
+- !strcmp(var, "et1macaddr") ||
+- !strcmp(var, "et2macaddr")) {
++ if (!strcmp(name, "et0macaddr") ||
++ !strcmp(name, "et1macaddr") ||
++ !strcmp(name, "et2macaddr")) {
+ priv->cells[idx].raw_len = strlen(value);
+ priv->cells[idx].bytes = ETH_ALEN;
+ priv->cells[idx].read_post_process = brcm_nvram_read_post_process_macaddr;
+ }
+ }
+
+- return 0;
++out:
++ priv->data[len - 1] = tmp;
++ return err;
+ }
+
+ static int brcm_nvram_parse(struct brcm_nvram *priv)
+ {
++ struct brcm_nvram_header *header = (struct brcm_nvram_header *)priv->data;
+ struct device *dev = priv->dev;
+- struct brcm_nvram_header header;
+- uint8_t *data;
+ size_t len;
+ int err;
+
+- memcpy_fromio(&header, priv->base, sizeof(header));
+-
+- if (memcmp(header.magic, NVRAM_MAGIC, 4)) {
++ if (memcmp(header->magic, NVRAM_MAGIC, 4)) {
+ dev_err(dev, "Invalid NVRAM magic\n");
+ return -EINVAL;
+ }
+
+- len = le32_to_cpu(header.len);
+-
+- data = kzalloc(len, GFP_KERNEL);
+- if (!data)
+- return -ENOMEM;
+-
+- memcpy_fromio(data, priv->base, len);
+- data[len - 1] = '\0';
+-
+- err = brcm_nvram_add_cells(priv, data, len);
+- if (err) {
+- dev_err(dev, "Failed to add cells: %d\n", err);
+- return err;
++ len = le32_to_cpu(header->len);
++ if (len > priv->nvmem_size) {
++ dev_err(dev, "NVRAM length (%zd) exceeds mapped size (%zd)\n", len,
++ priv->nvmem_size);
++ return -EINVAL;
+ }
+
+- kfree(data);
++ err = brcm_nvram_add_cells(priv, priv->data, len);
++ if (err)
++ dev_err(dev, "Failed to add cells: %d\n", err);
+
+ return 0;
+ }
+@@ -150,7 +207,6 @@ static int brcm_nvram_probe(struct platform_device *pdev)
+ .reg_read = brcm_nvram_read,
+ };
+ struct device *dev = &pdev->dev;
+- struct resource *res;
+ struct brcm_nvram *priv;
+ int err;
+
+@@ -159,21 +215,19 @@ static int brcm_nvram_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ priv->dev = dev;
+
+- priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+- if (IS_ERR(priv->base))
+- return PTR_ERR(priv->base);
++ err = brcm_nvram_copy_data(priv, pdev);
++ if (err)
++ return err;
+
+ err = brcm_nvram_parse(priv);
+ if (err)
+ return err;
+
+- bcm47xx_nvram_init_from_iomem(priv->base, resource_size(res));
+-
+ config.dev = dev;
+ config.cells = priv->cells;
+ config.ncells = priv->ncells;
+ config.priv = priv;
+- config.size = resource_size(res);
++ config.size = priv->nvmem_size;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+ }
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index eaf6a3fe8ca6d4..f28c005c2bb265 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -374,10 +374,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ if (!config->base_dev)
+ return -EINVAL;
+
+- if (config->type == NVMEM_TYPE_FRAM)
+- bin_attr_nvmem_eeprom_compat.attr.name = "fram";
+-
+ nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
++ if (config->type == NVMEM_TYPE_FRAM)
++ nvmem->eeprom.attr.name = "fram";
+ nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
+ nvmem->eeprom.size = nvmem->size;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+@@ -796,6 +795,12 @@ static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
+ if (!layout_np)
+ return NULL;
+
++ /* Fixed layouts don't have a matching driver */
++ if (of_device_is_compatible(layout_np, "fixed-layout")) {
++ of_node_put(layout_np);
++ return NULL;
++ }
++
+ /*
+ * In case the nvmem device was built-in while the layout was built as a
+ * module, we shall manually request the layout driver loading otherwise
+@@ -997,9 +1002,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+ if (rval)
+ goto err_remove_cells;
+
+- rval = nvmem_add_cells_from_legacy_of(nvmem);
+- if (rval)
+- goto err_remove_cells;
++ if (config->add_legacy_fixed_of_cells) {
++ rval = nvmem_add_cells_from_legacy_of(nvmem);
++ if (rval)
++ goto err_remove_cells;
++ }
+
+ rval = nvmem_add_cells_from_fixed_layout(nvmem);
+ if (rval)
+@@ -1246,13 +1253,13 @@ void nvmem_device_put(struct nvmem_device *nvmem)
+ EXPORT_SYMBOL_GPL(nvmem_device_put);
+
+ /**
+- * devm_nvmem_device_get() - Get nvmem cell of device form a given id
++ * devm_nvmem_device_get() - Get nvmem device of device form a given id
+ *
+ * @dev: Device that requests the nvmem device.
+ * @id: name id for the requested nvmem device.
+ *
+- * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
+- * on success. The nvmem_cell will be freed by the automatically once the
++ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
++ * on success. The nvmem_device will be freed by the automatically once the
+ * device is freed.
+ */
+ struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
+@@ -2124,6 +2131,19 @@ const char *nvmem_dev_name(struct nvmem_device *nvmem)
+ }
+ EXPORT_SYMBOL_GPL(nvmem_dev_name);
+
++/**
++ * nvmem_dev_size() - Get the size of a given nvmem device.
++ *
++ * @nvmem: nvmem device.
++ *
++ * Return: size of the nvmem device.
++ */
++size_t nvmem_dev_size(struct nvmem_device *nvmem)
++{
++ return nvmem->size;
++}
++EXPORT_SYMBOL_GPL(nvmem_dev_size);
++
+ static int __init nvmem_init(void)
+ {
+ return bus_register(&nvmem_bus_type);
+diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
+index c38d9c1c3f4866..517d83e11af2c6 100644
+--- a/drivers/nvmem/imx-ocotp-scu.c
++++ b/drivers/nvmem/imx-ocotp-scu.c
+@@ -220,6 +220,7 @@ static int imx_scu_ocotp_write(void *context, unsigned int offset,
+
+ static struct nvmem_config imx_scu_ocotp_nvmem_config = {
+ .name = "imx-scu-ocotp",
++ .add_legacy_fixed_of_cells = true,
+ .read_only = false,
+ .word_size = 4,
+ .stride = 1,
+diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
+index e8b6f194925dfc..f1e202efaa4970 100644
+--- a/drivers/nvmem/imx-ocotp.c
++++ b/drivers/nvmem/imx-ocotp.c
+@@ -615,6 +615,7 @@ static int imx_ocotp_probe(struct platform_device *pdev)
+ return PTR_ERR(priv->clk);
+
+ priv->params = of_device_get_match_data(&pdev->dev);
++ imx_ocotp_nvmem_config.add_legacy_fixed_of_cells = true;
+ imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
+ imx_ocotp_nvmem_config.dev = dev;
+ imx_ocotp_nvmem_config.priv = priv;
+diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
+index d6b533497ce1a5..6c2f80e166e28c 100644
+--- a/drivers/nvmem/meson-efuse.c
++++ b/drivers/nvmem/meson-efuse.c
+@@ -18,18 +18,24 @@ static int meson_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+ {
+ struct meson_sm_firmware *fw = context;
++ int ret;
+
+- return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
+- bytes, 0, 0, 0);
++ ret = meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
++ bytes, 0, 0, 0);
++
++ return ret < 0 ? ret : 0;
+ }
+
+ static int meson_efuse_write(void *context, unsigned int offset,
+ void *val, size_t bytes)
+ {
+ struct meson_sm_firmware *fw = context;
++ int ret;
++
++ ret = meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
++ bytes, 0, 0, 0);
+
+- return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
+- bytes, 0, 0, 0);
++ return ret < 0 ? ret : 0;
+ }
+
+ static const struct of_device_id meson_efuse_match[] = {
+@@ -47,7 +53,6 @@ static int meson_efuse_probe(struct platform_device *pdev)
+ struct nvmem_config *econfig;
+ struct clk *clk;
+ unsigned int size;
+- int ret;
+
+ sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
+ if (!sm_np) {
+@@ -60,27 +65,9 @@ static int meson_efuse_probe(struct platform_device *pdev)
+ if (!fw)
+ return -EPROBE_DEFER;
+
+- clk = devm_clk_get(dev, NULL);
+- if (IS_ERR(clk)) {
+- ret = PTR_ERR(clk);
+- if (ret != -EPROBE_DEFER)
+- dev_err(dev, "failed to get efuse gate");
+- return ret;
+- }
+-
+- ret = clk_prepare_enable(clk);
+- if (ret) {
+- dev_err(dev, "failed to enable gate");
+- return ret;
+- }
+-
+- ret = devm_add_action_or_reset(dev,
+- (void(*)(void *))clk_disable_unprepare,
+- clk);
+- if (ret) {
+- dev_err(dev, "failed to add disable callback");
+- return ret;
+- }
++ clk = devm_clk_get_enabled(dev, NULL);
++ if (IS_ERR(clk))
++ return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate");
+
+ if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
+ dev_err(dev, "failed to get max user");
+@@ -93,6 +80,7 @@ static int meson_efuse_probe(struct platform_device *pdev)
+
+ econfig->dev = dev;
+ econfig->name = dev_name(dev);
++ econfig->add_legacy_fixed_of_cells = true;
+ econfig->stride = 1;
+ econfig->word_size = 1;
+ econfig->reg_read = meson_efuse_read;
+diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
+index d6d7aeda31f92b..3ff04d5ca8f854 100644
+--- a/drivers/nvmem/meson-mx-efuse.c
++++ b/drivers/nvmem/meson-mx-efuse.c
+@@ -210,6 +210,7 @@ static int meson_mx_efuse_probe(struct platform_device *pdev)
+ efuse->config.owner = THIS_MODULE;
+ efuse->config.dev = &pdev->dev;
+ efuse->config.priv = efuse;
++ efuse->config.add_legacy_fixed_of_cells = true;
+ efuse->config.stride = drvdata->word_size;
+ efuse->config.word_size = drvdata->word_size;
+ efuse->config.size = SZ_512;
+diff --git a/drivers/nvmem/microchip-otpc.c b/drivers/nvmem/microchip-otpc.c
+index 436e0dc4f33755..7cf81738a3e0a5 100644
+--- a/drivers/nvmem/microchip-otpc.c
++++ b/drivers/nvmem/microchip-otpc.c
+@@ -261,6 +261,7 @@ static int mchp_otpc_probe(struct platform_device *pdev)
+ return ret;
+
+ mchp_nvmem_config.dev = otpc->dev;
++ mchp_nvmem_config.add_legacy_fixed_of_cells = true;
+ mchp_nvmem_config.size = size;
+ mchp_nvmem_config.priv = otpc;
+ nvmem = devm_nvmem_register(&pdev->dev, &mchp_nvmem_config);
+diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
+index b36cd0dcc8c7fa..87c94686cfd216 100644
+--- a/drivers/nvmem/mtk-efuse.c
++++ b/drivers/nvmem/mtk-efuse.c
+@@ -83,6 +83,7 @@ static int mtk_efuse_probe(struct platform_device *pdev)
+ return PTR_ERR(priv->base);
+
+ pdata = device_get_match_data(dev);
++ econfig.add_legacy_fixed_of_cells = true;
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.reg_read = mtk_reg_read;
+diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
+index 70f2d4f2efbf16..9aa8f42faa4c93 100644
+--- a/drivers/nvmem/qcom-spmi-sdam.c
++++ b/drivers/nvmem/qcom-spmi-sdam.c
+@@ -142,6 +142,7 @@ static int sdam_probe(struct platform_device *pdev)
+ sdam->sdam_config.name = "spmi_sdam";
+ sdam->sdam_config.id = NVMEM_DEVID_AUTO;
+ sdam->sdam_config.owner = THIS_MODULE;
++ sdam->sdam_config.add_legacy_fixed_of_cells = true;
+ sdam->sdam_config.stride = 1;
+ sdam->sdam_config.word_size = 1;
+ sdam->sdam_config.reg_read = sdam_read;
+diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
+index 14814cba2dd656..6c554040c6e67d 100644
+--- a/drivers/nvmem/qfprom.c
++++ b/drivers/nvmem/qfprom.c
+@@ -357,6 +357,7 @@ static int qfprom_probe(struct platform_device *pdev)
+ {
+ struct nvmem_config econfig = {
+ .name = "qfprom",
++ .add_legacy_fixed_of_cells = true,
+ .stride = 1,
+ .word_size = 1,
+ .id = NVMEM_DEVID_AUTO,
+diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c
+index df6a1c594b7812..9ecf3873cbb765 100644
+--- a/drivers/nvmem/rave-sp-eeprom.c
++++ b/drivers/nvmem/rave-sp-eeprom.c
+@@ -328,6 +328,7 @@ static int rave_sp_eeprom_probe(struct platform_device *pdev)
+ of_property_read_string(np, "zii,eeprom-name", &config.name);
+ config.priv = eeprom;
+ config.dev = dev;
++ config.add_legacy_fixed_of_cells = true;
+ config.size = size;
+ config.reg_read = rave_sp_eeprom_reg_read;
+ config.reg_write = rave_sp_eeprom_reg_write;
+diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
+index 752d0bf4445eef..7f907c5a445e78 100644
+--- a/drivers/nvmem/rmem.c
++++ b/drivers/nvmem/rmem.c
+@@ -46,7 +46,10 @@ static int rmem_read(void *context, unsigned int offset,
+
+ memunmap(addr);
+
+- return count;
++ if (count < 0)
++ return count;
++
++ return count == bytes ? 0 : -EIO;
+ }
+
+ static int rmem_probe(struct platform_device *pdev)
+diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
+index 4004c5bece4239..2b40978ddb18cc 100644
+--- a/drivers/nvmem/rockchip-efuse.c
++++ b/drivers/nvmem/rockchip-efuse.c
+@@ -205,6 +205,7 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
+
+ static struct nvmem_config econfig = {
+ .name = "rockchip-efuse",
++ .add_legacy_fixed_of_cells = true,
+ .stride = 1,
+ .word_size = 1,
+ .read_only = true,
+diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
+index cb9aa5428350ab..7107d68a2f8c75 100644
+--- a/drivers/nvmem/rockchip-otp.c
++++ b/drivers/nvmem/rockchip-otp.c
+@@ -255,6 +255,7 @@ static int rockchip_otp_read(void *context, unsigned int offset,
+ static struct nvmem_config otp_config = {
+ .name = "rockchip-otp",
+ .owner = THIS_MODULE,
++ .add_legacy_fixed_of_cells = true,
+ .read_only = true,
+ .stride = 1,
+ .word_size = 1,
+diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
+index 2210da40dfbd7b..bff27011f4ff28 100644
+--- a/drivers/nvmem/sc27xx-efuse.c
++++ b/drivers/nvmem/sc27xx-efuse.c
+@@ -247,6 +247,7 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
+ econfig.reg_read = sc27xx_efuse_read;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
++ econfig.add_legacy_fixed_of_cells = true;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem config\n");
+diff --git a/drivers/nvmem/sec-qfprom.c b/drivers/nvmem/sec-qfprom.c
+index e48c2dc0c44b39..19799b3fe00aad 100644
+--- a/drivers/nvmem/sec-qfprom.c
++++ b/drivers/nvmem/sec-qfprom.c
+@@ -47,6 +47,7 @@ static int sec_qfprom_probe(struct platform_device *pdev)
+ {
+ struct nvmem_config econfig = {
+ .name = "sec-qfprom",
++ .add_legacy_fixed_of_cells = true,
+ .stride = 1,
+ .word_size = 1,
+ .id = NVMEM_DEVID_AUTO,
+diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
+index 7e6e31db4baaef..bb3105f3291fcf 100644
+--- a/drivers/nvmem/sprd-efuse.c
++++ b/drivers/nvmem/sprd-efuse.c
+@@ -408,6 +408,7 @@ static int sprd_efuse_probe(struct platform_device *pdev)
+ econfig.read_only = false;
+ econfig.name = "sprd-efuse";
+ econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH;
++ econfig.add_legacy_fixed_of_cells = true;
+ econfig.reg_read = sprd_efuse_read;
+ econfig.reg_write = sprd_efuse_write;
+ econfig.priv = efuse;
+diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c
+index 0f84044bd1adef..1541c20709d25b 100644
+--- a/drivers/nvmem/stm32-romem.c
++++ b/drivers/nvmem/stm32-romem.c
+@@ -207,6 +207,7 @@ static int stm32_romem_probe(struct platform_device *pdev)
+ priv->cfg.priv = priv;
+ priv->cfg.owner = THIS_MODULE;
+ priv->cfg.type = NVMEM_TYPE_OTP;
++ priv->cfg.add_legacy_fixed_of_cells = true;
+
+ priv->lower = 0;
+
+diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
+index f3a18aa0a6c732..38f5d9df39cd53 100644
+--- a/drivers/nvmem/sunplus-ocotp.c
++++ b/drivers/nvmem/sunplus-ocotp.c
+@@ -145,6 +145,7 @@ static int sp_ocotp_read(void *priv, unsigned int offset, void *value, size_t by
+
+ static struct nvmem_config sp_ocotp_nvmem_config = {
+ .name = "sp-ocotp",
++ .add_legacy_fixed_of_cells = true,
+ .read_only = true,
+ .word_size = 1,
+ .size = QAC628_OTP_SIZE,
+diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
+index 5d364d85347fc9..ba14a76208ab68 100644
+--- a/drivers/nvmem/sunxi_sid.c
++++ b/drivers/nvmem/sunxi_sid.c
+@@ -153,6 +153,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
+ nvmem_cfg->dev = dev;
+ nvmem_cfg->name = "sunxi-sid";
+ nvmem_cfg->type = NVMEM_TYPE_OTP;
++ nvmem_cfg->add_legacy_fixed_of_cells = true;
+ nvmem_cfg->read_only = true;
+ nvmem_cfg->size = cfg->size;
+ nvmem_cfg->word_size = 1;
+diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
+index c4ae94af4af78e..adabbfdad6fb6d 100644
+--- a/drivers/nvmem/u-boot-env.c
++++ b/drivers/nvmem/u-boot-env.c
+@@ -23,13 +23,10 @@ enum u_boot_env_format {
+
+ struct u_boot_env {
+ struct device *dev;
++ struct nvmem_device *nvmem;
+ enum u_boot_env_format format;
+
+ struct mtd_info *mtd;
+-
+- /* Cells */
+- struct nvmem_cell_info *cells;
+- int ncells;
+ };
+
+ struct u_boot_env_image_single {
+@@ -94,70 +91,71 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i
+ static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
+ size_t data_offset, size_t data_len)
+ {
++ struct nvmem_device *nvmem = priv->nvmem;
+ struct device *dev = priv->dev;
+ char *data = buf + data_offset;
+ char *var, *value, *eq;
+- int idx;
+-
+- priv->ncells = 0;
+- for (var = data; var < data + data_len && *var; var += strlen(var) + 1)
+- priv->ncells++;
+
+- priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
+- if (!priv->cells)
+- return -ENOMEM;
+-
+- for (var = data, idx = 0;
++ for (var = data;
+ var < data + data_len && *var;
+- var = value + strlen(value) + 1, idx++) {
++ var = value + strlen(value) + 1) {
++ struct nvmem_cell_info info = {};
++
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
+ value = eq + 1;
+
+- priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
+- if (!priv->cells[idx].name)
++ info.name = devm_kstrdup(dev, var, GFP_KERNEL);
++ if (!info.name)
+ return -ENOMEM;
+- priv->cells[idx].offset = data_offset + value - data;
+- priv->cells[idx].bytes = strlen(value);
+- priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
++ info.offset = data_offset + value - data;
++ info.bytes = strlen(value);
++ info.np = of_get_child_by_name(dev->of_node, info.name);
+ if (!strcmp(var, "ethaddr")) {
+- priv->cells[idx].raw_len = strlen(value);
+- priv->cells[idx].bytes = ETH_ALEN;
+- priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr;
++ info.raw_len = strlen(value);
++ info.bytes = ETH_ALEN;
++ info.read_post_process = u_boot_env_read_post_process_ethaddr;
+ }
+- }
+
+- if (WARN_ON(idx != priv->ncells))
+- priv->ncells = idx;
++ nvmem_add_one_cell(nvmem, &info);
++ }
+
+ return 0;
+ }
+
+ static int u_boot_env_parse(struct u_boot_env *priv)
+ {
++ struct nvmem_device *nvmem = priv->nvmem;
+ struct device *dev = priv->dev;
+ size_t crc32_data_offset;
+ size_t crc32_data_len;
+ size_t crc32_offset;
++ __le32 *crc32_addr;
+ size_t data_offset;
+ size_t data_len;
++ size_t dev_size;
+ uint32_t crc32;
+ uint32_t calc;
+- size_t bytes;
+ uint8_t *buf;
++ int bytes;
+ int err;
+
+- buf = kcalloc(1, priv->mtd->size, GFP_KERNEL);
++ dev_size = nvmem_dev_size(nvmem);
++
++ buf = kzalloc(dev_size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+- err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf);
+- if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) {
+- dev_err(dev, "Failed to read from mtd: %d\n", err);
++ bytes = nvmem_device_read(nvmem, 0, dev_size, buf);
++ if (bytes < 0) {
++ err = bytes;
++ goto err_kfree;
++ } else if (bytes != dev_size) {
++ err = -EIO;
+ goto err_kfree;
+ }
+
+@@ -178,9 +176,17 @@ static int u_boot_env_parse(struct u_boot_env *priv)
+ data_offset = offsetof(struct u_boot_env_image_broadcom, data);
+ break;
+ }
+- crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset));
+- crc32_data_len = priv->mtd->size - crc32_data_offset;
+- data_len = priv->mtd->size - data_offset;
++
++ if (dev_size < data_offset) {
++ dev_err(dev, "Device too small for u-boot-env\n");
++ err = -EIO;
++ goto err_kfree;
++ }
++
++ crc32_addr = (__le32 *)(buf + crc32_offset);
++ crc32 = le32_to_cpu(*crc32_addr);
++ crc32_data_len = dev_size - crc32_data_offset;
++ data_len = dev_size - data_offset;
+
+ calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
+ if (calc != crc32) {
+@@ -189,10 +195,8 @@ static int u_boot_env_parse(struct u_boot_env *priv)
+ goto err_kfree;
+ }
+
+- buf[priv->mtd->size - 1] = '\0';
++ buf[dev_size - 1] = '\0';
+ err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
+- if (err)
+- dev_err(dev, "Failed to add cells: %d\n", err);
+
+ err_kfree:
+ kfree(buf);
+@@ -209,7 +213,6 @@ static int u_boot_env_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct u_boot_env *priv;
+- int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+@@ -224,17 +227,15 @@ static int u_boot_env_probe(struct platform_device *pdev)
+ return PTR_ERR(priv->mtd);
+ }
+
+- err = u_boot_env_parse(priv);
+- if (err)
+- return err;
+-
+ config.dev = dev;
+- config.cells = priv->cells;
+- config.ncells = priv->ncells;
+ config.priv = priv;
+ config.size = priv->mtd->size;
+
+- return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
++ priv->nvmem = devm_nvmem_register(dev, &config);
++ if (IS_ERR(priv->nvmem))
++ return PTR_ERR(priv->nvmem);
++
++ return u_boot_env_parse(priv);
+ }
+
+ static const struct of_device_id u_boot_env_of_match_table[] = {
+diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
+index 0a1dbb80537ec1..6ad3295d319511 100644
+--- a/drivers/nvmem/uniphier-efuse.c
++++ b/drivers/nvmem/uniphier-efuse.c
+@@ -52,6 +52,7 @@ static int uniphier_efuse_probe(struct platform_device *pdev)
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
++ econfig.add_legacy_fixed_of_cells = true;
+ nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
+index f49bb9a26d0532..7f15aa89a9d091 100644
+--- a/drivers/nvmem/zynqmp_nvmem.c
++++ b/drivers/nvmem/zynqmp_nvmem.c
+@@ -58,6 +58,7 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
+
+ priv->dev = dev;
+ econfig.dev = dev;
++ econfig.add_legacy_fixed_of_cells = true;
+ econfig.reg_read = zynqmp_nvmem_read;
+ econfig.priv = priv;
+
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index e692809ff82279..f323e53816e196 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -8,6 +8,7 @@
+ #include <linux/logic_pio.h>
+ #include <linux/module.h>
+ #include <linux/of_address.h>
++#include <linux/overflow.h>
+ #include <linux/pci.h>
+ #include <linux/pci_regs.h>
+ #include <linux/sizes.h>
+@@ -100,6 +101,32 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
+ return IORESOURCE_MEM;
+ }
+
++static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na,
++ int ns, int pna)
++{
++ u64 cp, s, da;
++
++ /* Check that flags match */
++ if (*addr != *range)
++ return OF_BAD_ADDR;
++
++ /* Read address values, skipping high cell */
++ cp = of_read_number(range + 1, na - 1);
++ s = of_read_number(range + na + pna, ns);
++ da = of_read_number(addr + 1, na - 1);
++
++ pr_debug("default flags map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
++
++ if (da < cp || da >= (cp + s))
++ return OF_BAD_ADDR;
++ return da - cp;
++}
++
++static int of_bus_default_flags_translate(__be32 *addr, u64 offset, int na)
++{
++ /* Keep "flags" part (high cell) in translated address */
++ return of_bus_default_translate(addr + 1, offset, na - 1);
++}
+
+ #ifdef CONFIG_PCI
+ static unsigned int of_bus_pci_get_flags(const __be32 *addr)
+@@ -374,8 +401,8 @@ static struct of_bus of_busses[] = {
+ .addresses = "reg",
+ .match = of_bus_default_flags_match,
+ .count_cells = of_bus_default_count_cells,
+- .map = of_bus_default_map,
+- .translate = of_bus_default_translate,
++ .map = of_bus_default_flags_map,
++ .translate = of_bus_default_flags_translate,
+ .has_flags = true,
+ .get_flags = of_bus_default_flags_get_flags,
+ },
+@@ -1116,7 +1143,11 @@ static int __of_address_to_resource(struct device_node *dev, int index, int bar_
+ if (of_mmio_is_nonposted(dev))
+ flags |= IORESOURCE_MEM_NONPOSTED;
+
++ if (overflows_type(taddr, r->start))
++ return -EOVERFLOW;
+ r->start = taddr;
++ if (overflows_type(taddr + size - 1, r->end))
++ return -EOVERFLOW;
+ r->end = taddr + size - 1;
+ r->flags = flags;
+ r->name = name ? name : dev->full_name;
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 8d93cb6ea9cde4..b0ad8fc06e80e0 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1464,6 +1464,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ out_args->np = new;
+ of_node_put(cur);
+ cur = new;
++ new = NULL;
+ }
+ put:
+ of_node_put(cur);
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index f63250c650cafd..4d57a4e3410546 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -9,6 +9,7 @@
+
+ #define pr_fmt(fmt) "OF: " fmt
+
++#include <linux/device.h>
+ #include <linux/of.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
+@@ -98,8 +99,9 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
+ *
+ * Returns the new state of a device based on the notifier used.
+ *
+- * Return: 0 on device going from enabled to disabled, 1 on device
+- * going from disabled to enabled and -1 on no change.
++ * Return: OF_RECONFIG_CHANGE_REMOVE on device going from enabled to
++ * disabled, OF_RECONFIG_CHANGE_ADD on device going from disabled to
++ * enabled and OF_RECONFIG_NO_CHANGE on no change.
+ */
+ int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
+ {
+@@ -666,6 +668,17 @@ void of_changeset_destroy(struct of_changeset *ocs)
+ {
+ struct of_changeset_entry *ce, *cen;
+
++ /*
++ * When a device is deleted, the device links to/from it are also queued
++ * for deletion. Until these device links are freed, the devices
++ * themselves aren't freed. If the device being deleted is due to an
++ * overlay change, this device might be holding a reference to a device
++ * node that will be freed. So, wait until all already pending device
++ * links are deleted before freeing a device node. This ensures we don't
++ * free any device node that has a non-zero reference count.
++ */
++ device_link_wait_removal();
++
+ list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
+ __of_changeset_entry_destroy(ce);
+ }
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 174900072c18cd..36351ad6115eb1 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -25,6 +25,8 @@
+ #include <linux/string.h>
+ #include <linux/slab.h>
+
++#include "of_private.h"
++
+ /**
+ * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
+ * @dev: Device node of the device whose interrupt is to be mapped
+@@ -79,7 +81,8 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent);
+ /*
+ * These interrupt controllers abuse interrupt-map for unspeakable
+ * reasons and rely on the core code to *ignore* it (the drivers do
+- * their own parsing of the property).
++ * their own parsing of the property). The PAsemi entry covers a
++ * non-sensical interrupt-map that is better left ignored.
+ *
+ * If you think of adding to the list for something *new*, think
+ * again. There is a high chance that you will be sent back to the
+@@ -93,9 +96,61 @@ static const char * const of_irq_imap_abusers[] = {
+ "fsl,ls1043a-extirq",
+ "fsl,ls1088a-extirq",
+ "renesas,rza1-irqc",
++ "pasemi,rootbus",
+ NULL,
+ };
+
++const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq)
++{
++ u32 intsize, addrsize;
++ struct device_node *np;
++
++ /* Get the interrupt parent */
++ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
++ np = of_node_get(of_irq_dflt_pic);
++ else
++ np = of_find_node_by_phandle(be32_to_cpup(imap));
++ imap++;
++
++ /* Check if not found */
++ if (!np) {
++ pr_debug(" -> imap parent not found !\n");
++ return NULL;
++ }
++
++ /* Get #interrupt-cells and #address-cells of new parent */
++ if (of_property_read_u32(np, "#interrupt-cells",
++ &intsize)) {
++ pr_debug(" -> parent lacks #interrupt-cells!\n");
++ of_node_put(np);
++ return NULL;
++ }
++ if (of_property_read_u32(np, "#address-cells",
++ &addrsize))
++ addrsize = 0;
++
++ pr_debug(" -> intsize=%d, addrsize=%d\n",
++ intsize, addrsize);
++
++ /* Check for malformed properties */
++ if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)
++ || (len < (addrsize + intsize))) {
++ of_node_put(np);
++ return NULL;
++ }
++
++ pr_debug(" -> imaplen=%d\n", len);
++
++ imap += addrsize + intsize;
++
++ out_irq->np = np;
++ for (int i = 0; i < intsize; i++)
++ out_irq->args[i] = be32_to_cpup(imap - intsize + i);
++ out_irq->args_count = intsize;
++
++ return imap;
++}
++
+ /**
+ * of_irq_parse_raw - Low level interrupt tree parsing
+ * @addr: address specifier (start of "reg" property of the device) in be32 format
+@@ -112,12 +167,12 @@ static const char * const of_irq_imap_abusers[] = {
+ */
+ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ {
+- struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
++ struct device_node *ipar, *tnode, *old = NULL;
+ __be32 initial_match_array[MAX_PHANDLE_ARGS];
+ const __be32 *match_array = initial_match_array;
+- const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+- u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
+- int imaplen, match, i, rc = -EINVAL;
++ const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
++ u32 intsize = 1, addrsize;
++ int i, rc = -EINVAL;
+
+ #ifdef DEBUG
+ of_print_phandle_args("of_irq_parse_raw: ", out_irq);
+@@ -176,6 +231,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+
+ /* Now start the actual "proper" walk of the interrupt tree */
+ while (ipar != NULL) {
++ int imaplen, match;
++ const __be32 *imap, *oldimap, *imask;
++ struct device_node *newpar;
+ /*
+ * Now check if cursor is an interrupt-controller and
+ * if it is then we are done, unless there is an
+@@ -216,7 +274,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+
+ /* Parse interrupt-map */
+ match = 0;
+- while (imaplen > (addrsize + intsize + 1) && !match) {
++ while (imaplen > (addrsize + intsize + 1)) {
+ /* Compare specifiers */
+ match = 1;
+ for (i = 0; i < (addrsize + intsize); i++, imaplen--)
+@@ -224,74 +282,31 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+
+ pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
+
+- /* Get the interrupt parent */
+- if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+- newpar = of_node_get(of_irq_dflt_pic);
+- else
+- newpar = of_find_node_by_phandle(be32_to_cpup(imap));
+- imap++;
+- --imaplen;
+-
+- /* Check if not found */
+- if (newpar == NULL) {
+- pr_debug(" -> imap parent not found !\n");
++ oldimap = imap;
++ imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq);
++ if (!imap)
+ goto fail;
+- }
+-
+- if (!of_device_is_available(newpar))
+- match = 0;
+-
+- /* Get #interrupt-cells and #address-cells of new
+- * parent
+- */
+- if (of_property_read_u32(newpar, "#interrupt-cells",
+- &newintsize)) {
+- pr_debug(" -> parent lacks #interrupt-cells!\n");
+- goto fail;
+- }
+- if (of_property_read_u32(newpar, "#address-cells",
+- &newaddrsize))
+- newaddrsize = 0;
+-
+- pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
+- newintsize, newaddrsize);
+-
+- /* Check for malformed properties */
+- if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS)
+- || (imaplen < (newaddrsize + newintsize))) {
+- rc = -EFAULT;
+- goto fail;
+- }
+
+- imap += newaddrsize + newintsize;
+- imaplen -= newaddrsize + newintsize;
++ match &= of_device_is_available(out_irq->np);
++ if (match)
++ break;
+
++ of_node_put(out_irq->np);
++ imaplen -= imap - oldimap;
+ pr_debug(" -> imaplen=%d\n", imaplen);
+ }
+- if (!match) {
+- if (intc) {
+- /*
+- * The PASEMI Nemo is a known offender, so
+- * let's only warn for anyone else.
+- */
+- WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
+- "%pOF interrupt-map failed, using interrupt-controller\n",
+- ipar);
+- return 0;
+- }
+-
++ if (!match)
+ goto fail;
+- }
+
+ /*
+ * Successfully parsed an interrupt-map translation; copy new
+ * interrupt specifier into the out_irq structure
+ */
+- match_array = imap - newaddrsize - newintsize;
+- for (i = 0; i < newintsize; i++)
+- out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
+- out_irq->args_count = intsize = newintsize;
+- addrsize = newaddrsize;
++ match_array = oldimap + 1;
++
++ newpar = out_irq->np;
++ intsize = out_irq->args_count;
++ addrsize = (imap - match_array) - intsize;
+
+ if (ipar == newpar) {
+ pr_debug("%pOF interrupt-map entry to self\n", ipar);
+@@ -300,7 +315,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+
+ skiplevel:
+ /* Iterate again with new parent */
+- out_irq->np = newpar;
+ pr_debug(" -> new parent: %pOF\n", newpar);
+ of_node_put(ipar);
+ ipar = newpar;
+@@ -310,7 +324,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+
+ fail:
+ of_node_put(ipar);
+- of_node_put(newpar);
+
+ return rc;
+ }
+@@ -331,7 +344,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ struct device_node *p;
+ const __be32 *addr;
+ u32 intsize;
+- int i, res;
++ int i, res, addr_len;
++ __be32 addr_buf[3] = { 0 };
+
+ pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
+
+@@ -340,13 +354,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ return of_irq_parse_oldworld(device, index, out_irq);
+
+ /* Get the reg property (if any) */
+- addr = of_get_property(device, "reg", NULL);
++ addr = of_get_property(device, "reg", &addr_len);
++
++ /* Prevent out-of-bounds read in case of longer interrupt parent address size */
++ if (addr_len > sizeof(addr_buf))
++ addr_len = sizeof(addr_buf);
++ if (addr)
++ memcpy(addr_buf, addr, addr_len);
+
+ /* Try the new-style interrupts-extended first */
+ res = of_parse_phandle_with_args(device, "interrupts-extended",
+ "#interrupt-cells", index, out_irq);
+ if (!res)
+- return of_irq_parse_raw(addr, out_irq);
++ return of_irq_parse_raw(addr_buf, out_irq);
+
+ /* Look for the interrupt parent. */
+ p = of_irq_find_parent(device);
+@@ -376,7 +396,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+
+
+ /* Check if there are any interrupt-map translations to process */
+- res = of_irq_parse_raw(addr, out_irq);
++ res = of_irq_parse_raw(addr_buf, out_irq);
+ out:
+ of_node_put(p);
+ return res;
+@@ -696,8 +716,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id,
+ * @np: device node for @dev
+ * @token: bus type for this domain
+ *
+- * Parse the msi-parent property (both the simple and the complex
+- * versions), and returns the corresponding MSI domain.
++ * Parse the msi-parent property and returns the corresponding MSI domain.
+ *
+ * Returns: the MSI domain for this device (or NULL on failure).
+ */
+@@ -705,33 +724,14 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
+ struct device_node *np,
+ enum irq_domain_bus_token token)
+ {
+- struct device_node *msi_np;
++ struct of_phandle_iterator it;
+ struct irq_domain *d;
++ int err;
+
+- /* Check for a single msi-parent property */
+- msi_np = of_parse_phandle(np, "msi-parent", 0);
+- if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
+- d = irq_find_matching_host(msi_np, token);
+- if (!d)
+- of_node_put(msi_np);
+- return d;
+- }
+-
+- if (token == DOMAIN_BUS_PLATFORM_MSI) {
+- /* Check for the complex msi-parent version */
+- struct of_phandle_args args;
+- int index = 0;
+-
+- while (!of_parse_phandle_with_args(np, "msi-parent",
+- "#msi-cells",
+- index, &args)) {
+- d = irq_find_matching_host(args.np, token);
+- if (d)
+- return d;
+-
+- of_node_put(args.np);
+- index++;
+- }
++ of_for_each_phandle(&it, err, np, "msi-parent", "#msi-cells", 0) {
++ d = irq_find_matching_host(it.node, token);
++ if (d)
++ return d;
+ }
+
+ return NULL;
+diff --git a/drivers/of/module.c b/drivers/of/module.c
+index 0e8aa974f0f2bb..780fd82a7ecc58 100644
+--- a/drivers/of/module.c
++++ b/drivers/of/module.c
+@@ -16,19 +16,28 @@ ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len)
+ ssize_t csize;
+ ssize_t tsize;
+
++ /*
++ * Prevent a kernel oops in vsnprintf() -- it only allows passing a
++ * NULL ptr when the length is also 0. Also filter out the negative
++ * lengths...
++ */
++ if ((len > 0 && !str) || len < 0)
++ return -EINVAL;
++
+ /* Name & Type */
+ /* %p eats all alphanum characters, so %c must be used here */
+ csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T',
+ of_node_get_device_type(np));
+ tsize = csize;
++ if (csize >= len)
++ csize = len > 0 ? len - 1 : 0;
+ len -= csize;
+- if (str)
+- str += csize;
++ str += csize;
+
+ of_property_for_each_string(np, "compatible", p, compat) {
+ csize = strlen(compat) + 1;
+ tsize += csize;
+- if (csize > len)
++ if (csize >= len)
+ continue;
+
+ csize = snprintf(str, len, "C%s", compat);
+diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
+index f38397c7b58241..21f8f5e80917d1 100644
+--- a/drivers/of/of_private.h
++++ b/drivers/of/of_private.h
+@@ -158,6 +158,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
+ extern int of_bus_n_addr_cells(struct device_node *np);
+ extern int of_bus_n_size_cells(struct device_node *np);
+
++const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len,
++ struct of_phandle_args *out_irq);
++
+ struct bus_dma_region;
+ #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
+ int of_dma_get_range(struct device_node *np,
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index cf8dacf3e3b84d..b3f0285e401cad 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -762,7 +762,9 @@ struct device_node *of_graph_get_port_parent(struct device_node *node)
+ /* Walk 3 levels up only if there is 'ports' node. */
+ for (depth = 3; depth && node; depth--) {
+ node = of_get_next_parent(node);
+- if (depth == 2 && !of_node_name_eq(node, "ports"))
++ if (depth == 2 && !of_node_name_eq(node, "ports") &&
++ !of_node_name_eq(node, "in-ports") &&
++ !of_node_name_eq(node, "out-ports"))
+ break;
+ }
+ return node;
+@@ -1062,36 +1064,6 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ return of_device_get_match_data(dev);
+ }
+
+-static struct device_node *of_get_compat_node(struct device_node *np)
+-{
+- of_node_get(np);
+-
+- while (np) {
+- if (!of_device_is_available(np)) {
+- of_node_put(np);
+- np = NULL;
+- }
+-
+- if (of_property_present(np, "compatible"))
+- break;
+-
+- np = of_get_next_parent(np);
+- }
+-
+- return np;
+-}
+-
+-static struct device_node *of_get_compat_node_parent(struct device_node *np)
+-{
+- struct device_node *parent, *node;
+-
+- parent = of_get_parent(np);
+- node = of_get_compat_node(parent);
+- of_node_put(parent);
+-
+- return node;
+-}
+-
+ static void of_link_to_phandle(struct device_node *con_np,
+ struct device_node *sup_np)
+ {
+@@ -1221,10 +1193,10 @@ static struct device_node *parse_##fname(struct device_node *np, \
+ * @parse_prop.prop_name: Name of property holding a phandle value
+ * @parse_prop.index: For properties holding a list of phandles, this is the
+ * index into the list
++ * @get_con_dev: If the consumer node containing the property is never converted
++ * to a struct device, implement this ops so fw_devlink can use it
++ * to find the true consumer.
+ * @optional: Describes whether a supplier is mandatory or not
+- * @node_not_dev: The consumer node containing the property is never converted
+- * to a struct device. Instead, parse ancestor nodes for the
+- * compatible property to find a node corresponding to a device.
+ *
+ * Returns:
+ * parse_prop() return values are
+@@ -1235,15 +1207,15 @@ static struct device_node *parse_##fname(struct device_node *np, \
+ struct supplier_bindings {
+ struct device_node *(*parse_prop)(struct device_node *np,
+ const char *prop_name, int index);
++ struct device_node *(*get_con_dev)(struct device_node *np);
+ bool optional;
+- bool node_not_dev;
+ };
+
+ DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
+ DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
+ DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
+ DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
+-DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
++DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
+ DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
+ DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+ DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
+@@ -1261,7 +1233,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
+-DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
+ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
+ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
+ DEFINE_SIMPLE_PROP(leds, "leds", NULL)
+@@ -1326,6 +1297,17 @@ static struct device_node *parse_interrupts(struct device_node *np,
+ return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
+ }
+
++static struct device_node *parse_remote_endpoint(struct device_node *np,
++ const char *prop_name,
++ int index)
++{
++ /* Return NULL for index > 0 to signify end of remote-endpoints. */
++ if (index > 0 || strcmp(prop_name, "remote-endpoint"))
++ return NULL;
++
++ return of_graph_get_remote_port_parent(np);
++}
++
+ static const struct supplier_bindings of_supplier_bindings[] = {
+ { .parse_prop = parse_clocks, },
+ { .parse_prop = parse_interconnects, },
+@@ -1350,7 +1332,10 @@ static const struct supplier_bindings of_supplier_bindings[] = {
+ { .parse_prop = parse_pinctrl6, },
+ { .parse_prop = parse_pinctrl7, },
+ { .parse_prop = parse_pinctrl8, },
+- { .parse_prop = parse_remote_endpoint, .node_not_dev = true, },
++ {
++ .parse_prop = parse_remote_endpoint,
++ .get_con_dev = of_graph_get_port_parent,
++ },
+ { .parse_prop = parse_pwms, },
+ { .parse_prop = parse_resets, },
+ { .parse_prop = parse_leds, },
+@@ -1400,8 +1385,8 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
+ while ((phandle = s->parse_prop(con_np, prop_name, i))) {
+ struct device_node *con_dev_np;
+
+- con_dev_np = s->node_not_dev
+- ? of_get_compat_node_parent(con_np)
++ con_dev_np = s->get_con_dev
++ ? s->get_con_dev(con_np)
+ : of_node_get(con_np);
+ matched = true;
+ i++;
+diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi
+index d01f92f0f0db7f..554a996b2ef18e 100644
+--- a/drivers/of/unittest-data/tests-phandle.dtsi
++++ b/drivers/of/unittest-data/tests-phandle.dtsi
+@@ -40,6 +40,13 @@ provider4: provider4 {
+ phandle-map-pass-thru = <0x0 0xf0>;
+ };
+
++ provider5: provider5 {
++ #phandle-cells = <2>;
++ phandle-map = <2 7 &provider4 2 3>;
++ phandle-map-mask = <0xff 0xf>;
++ phandle-map-pass-thru = <0x0 0xf0>;
++ };
++
+ consumer-a {
+ phandle-list = <&provider1 1>,
+ <&provider2 2 0>,
+@@ -66,7 +73,8 @@ consumer-b {
+ <&provider4 4 0x100>,
+ <&provider4 0 0x61>,
+ <&provider0>,
+- <&provider4 19 0x20>;
++ <&provider4 19 0x20>,
++ <&provider5 2 7>;
+ phandle-list-bad-phandle = <12345678 0 0>;
+ phandle-list-bad-args = <&provider2 1 0>,
+ <&provider4 0>;
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index ad2b7879cc675b..4f58345b5c683d 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -50,6 +50,12 @@ static struct unittest_results {
+ failed; \
+ })
+
++#ifdef CONFIG_OF_KOBJ
++#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref)
++#else
++#define OF_KREF_READ(NODE) 1
++#endif
++
+ /*
+ * Expected message may have a message level other than KERN_INFO.
+ * Print the expected message only if the current loglevel will allow
+@@ -456,6 +462,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
+
+ unittest(passed, "index %i - data error on node %pOF rc=%i\n",
+ i, args.np, rc);
++
++ if (rc == 0)
++ of_node_put(args.np);
+ }
+
+ /* Check for missing list property */
+@@ -545,8 +554,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
+
+ static void __init of_unittest_parse_phandle_with_args_map(void)
+ {
+- struct device_node *np, *p0, *p1, *p2, *p3;
++ struct device_node *np, *p[6] = {};
+ struct of_phandle_args args;
++ unsigned int prefs[6];
+ int i, rc;
+
+ np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b");
+@@ -555,34 +565,24 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ return;
+ }
+
+- p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
+- if (!p0) {
+- pr_err("missing testcase data\n");
+- return;
+- }
+-
+- p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
+- if (!p1) {
+- pr_err("missing testcase data\n");
+- return;
+- }
+-
+- p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
+- if (!p2) {
+- pr_err("missing testcase data\n");
+- return;
+- }
+-
+- p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
+- if (!p3) {
+- pr_err("missing testcase data\n");
+- return;
++ p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
++ p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
++ p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
++ p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
++ p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4");
++ p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5");
++ for (i = 0; i < ARRAY_SIZE(p); ++i) {
++ if (!p[i]) {
++ pr_err("missing testcase data\n");
++ return;
++ }
++ prefs[i] = OF_KREF_READ(p[i]);
+ }
+
+ rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
+- unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
++ unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc);
+
+- for (i = 0; i < 8; i++) {
++ for (i = 0; i < 9; i++) {
+ bool passed = true;
+
+ memset(&args, 0, sizeof(args));
+@@ -593,13 +593,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ switch (i) {
+ case 0:
+ passed &= !rc;
+- passed &= (args.np == p1);
++ passed &= (args.np == p[1]);
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 1);
+ break;
+ case 1:
+ passed &= !rc;
+- passed &= (args.np == p3);
++ passed &= (args.np == p[3]);
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == 2);
+ passed &= (args.args[1] == 5);
+@@ -610,28 +610,36 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ break;
+ case 3:
+ passed &= !rc;
+- passed &= (args.np == p0);
++ passed &= (args.np == p[0]);
+ passed &= (args.args_count == 0);
+ break;
+ case 4:
+ passed &= !rc;
+- passed &= (args.np == p1);
++ passed &= (args.np == p[1]);
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == 3);
+ break;
+ case 5:
+ passed &= !rc;
+- passed &= (args.np == p0);
++ passed &= (args.np == p[0]);
+ passed &= (args.args_count == 0);
+ break;
+ case 6:
+ passed &= !rc;
+- passed &= (args.np == p2);
++ passed &= (args.np == p[2]);
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == 15);
+ passed &= (args.args[1] == 0x20);
+ break;
+ case 7:
++ passed &= !rc;
++ passed &= (args.np == p[3]);
++ passed &= (args.args_count == 3);
++ passed &= (args.args[0] == 2);
++ passed &= (args.args[1] == 5);
++ passed &= (args.args[2] == 3);
++ break;
++ case 8:
+ passed &= (rc == -ENOENT);
+ break;
+ default:
+@@ -640,6 +648,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+
+ unittest(passed, "index %i - data error on node %s rc=%i\n",
+ i, args.np->full_name, rc);
++
++ if (rc == 0)
++ of_node_put(args.np);
+ }
+
+ /* Check for missing list property */
+@@ -686,6 +697,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
+
+ unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
++
++ for (i = 0; i < ARRAY_SIZE(p); ++i) {
++ unittest(prefs[i] == OF_KREF_READ(p[i]),
++ "provider%d: expected:%d got:%d\n",
++ i, prefs[i], OF_KREF_READ(p[i]));
++ of_node_put(p[i]);
++ }
+ }
+
+ static void __init of_unittest_property_string(void)
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 919cc53bc02e35..bceb27b1baa18a 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1322,12 +1322,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+ * value of the frequency. In such a case, do not abort but
+ * configure the hardware to the desired frequency forcefully.
+ */
+- forced = opp_table->rate_clk_single != target_freq;
++ forced = opp_table->rate_clk_single != freq;
+ }
+
+- ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
++ ret = _set_opp(dev, opp_table, opp, &freq, forced);
+
+- if (target_freq)
++ if (freq)
+ dev_pm_opp_put(opp);
+
+ put_opp_table:
+diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
+index 17543c0aa5b681..83f591e40cdfc9 100644
+--- a/drivers/opp/debugfs.c
++++ b/drivers/opp/debugfs.c
+@@ -37,10 +37,12 @@ static ssize_t bw_name_read(struct file *fp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+ {
+ struct icc_path *path = fp->private_data;
++ const char *name = icc_get_name(path);
+ char buf[64];
+- int i;
++ int i = 0;
+
+- i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path));
++ if (name)
++ i = scnprintf(buf, sizeof(buf), "%.62s\n", name);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+ }
+diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
+index 8f3f13fbbb25a9..a8a696d2e03ab7 100644
+--- a/drivers/opp/ti-opp-supply.c
++++ b/drivers/opp/ti-opp-supply.c
+@@ -400,10 +400,12 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
+ }
+
+ ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators);
+- if (ret < 0)
++ if (ret < 0) {
+ _free_optimized_voltages(dev, &opp_data);
++ return ret;
++ }
+
+- return ret;
++ return 0;
+ }
+
+ static struct platform_driver ti_opp_supply_driver = {
+diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
+index 6f5e5f0230d399..498bae2e3403c0 100644
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -197,6 +197,14 @@ static struct notifier_block parisc_panic_block = {
+ .priority = INT_MAX,
+ };
+
++/* qemu soft power-off function */
++static int qemu_power_off(struct sys_off_data *data)
++{
++ /* this turns the system off via SeaBIOS */
++ gsc_writel(0, (unsigned long) data->cb_data);
++ pdc_soft_power_button(1);
++ return NOTIFY_DONE;
++}
+
+ static int __init power_init(void)
+ {
+@@ -226,7 +234,13 @@ static int __init power_init(void)
+ soft_power_reg);
+ }
+
+- power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME);
++ power_task = NULL;
++ if (running_on_qemu && soft_power_reg)
++ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
++ qemu_power_off, (void *)soft_power_reg);
++ if (!running_on_qemu || soft_power_reg)
++ power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
++ KTHREAD_NAME);
+ if (IS_ERR(power_task)) {
+ printk(KERN_ERR DRIVER_NAME ": thread creation failed. Driver not loaded.\n");
+ pdc_soft_power_button(0);
+diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
+index 1f236aaf7867a7..f33b5d1ddfc16f 100644
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -2658,6 +2658,8 @@ enum parport_pc_pci_cards {
+ asix_ax99100,
+ quatech_sppxp100,
+ wch_ch382l,
++ brainboxes_uc146,
++ brainboxes_px203,
+ };
+
+
+@@ -2737,6 +2739,8 @@ static struct parport_pc_pci {
+ /* asix_ax99100 */ { 1, { { 0, 1 }, } },
+ /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
+ /* wch_ch382l */ { 1, { { 2, -1 }, } },
++ /* brainboxes_uc146 */ { 1, { { 3, -1 }, } },
++ /* brainboxes_px203 */ { 1, { { 0, -1 }, } },
+ };
+
+ static const struct pci_device_id parport_pc_pci_tbl[] = {
+@@ -2833,6 +2837,23 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
+ /* WCH CH382L PCI-E single parallel port card */
+ { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
++ /* Brainboxes IX-500/550 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x402a,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
++ /* Brainboxes UC-146/UC-157 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0be1,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0be2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
++ /* Brainboxes PX-146/PX-257 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x401c,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
++ /* Brainboxes PX-203 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4007,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 },
++ /* Brainboxes PX-475 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x401f,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
+diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
+index 9f5d784cd95d58..3644997a834255 100644
+--- a/drivers/parport/parport_serial.c
++++ b/drivers/parport/parport_serial.c
+@@ -65,6 +65,10 @@ enum parport_pc_pci_cards {
+ sunix_5069a,
+ sunix_5079a,
+ sunix_5099a,
++ brainboxes_uc257,
++ brainboxes_is300,
++ brainboxes_uc414,
++ brainboxes_px263,
+ };
+
+ /* each element directly indexed from enum list, above */
+@@ -158,6 +162,10 @@ static struct parport_pc_pci cards[] = {
+ /* sunix_5069a */ { 1, { { 1, 2 }, } },
+ /* sunix_5079a */ { 1, { { 1, 2 }, } },
+ /* sunix_5099a */ { 1, { { 1, 2 }, } },
++ /* brainboxes_uc257 */ { 1, { { 3, -1 }, } },
++ /* brainboxes_is300 */ { 1, { { 3, -1 }, } },
++ /* brainboxes_uc414 */ { 1, { { 3, -1 }, } },
++ /* brainboxes_px263 */ { 1, { { 3, -1 }, } },
+ };
+
+ static struct pci_device_id parport_serial_pci_tbl[] = {
+@@ -277,6 +285,38 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0104, 0, 0, sunix_5099a },
+
++ /* Brainboxes UC-203 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0bc1,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0bc2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++ /* Brainboxes UC-257 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0862,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0863,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++ /* Brainboxes UC-414 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0e61,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc414 },
++
++ /* Brainboxes UC-475 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0981,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0982,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
++
++ /* Brainboxes IS-300/IS-500 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0da0,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_is300 },
++
++ /* Brainboxes PX-263/PX-295 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x402c,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px263 },
++
+ { 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
+@@ -542,6 +582,30 @@ static struct pciserial_board pci_parport_serial_boards[] = {
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
++ [brainboxes_uc257] = {
++ .flags = FL_BASE2,
++ .num_ports = 2,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ },
++ [brainboxes_is300] = {
++ .flags = FL_BASE2,
++ .num_ports = 1,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ },
++ [brainboxes_uc414] = {
++ .flags = FL_BASE2,
++ .num_ports = 4,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ },
++ [brainboxes_px263] = {
++ .flags = FL_BASE2,
++ .num_ports = 4,
++ .base_baud = 921600,
++ .uart_offset = 8,
++ },
+ };
+
+ struct parport_serial_private {
+diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
+index 4e5b972c3e2633..3b7d7e23602af2 100644
+--- a/drivers/parport/procfs.c
++++ b/drivers/parport/procfs.c
+@@ -58,12 +58,12 @@ static int do_active_device(struct ctl_table *table, int write,
+
+ for (dev = port->devices; dev ; dev = dev->next) {
+ if(dev == port->cad) {
+- len += sprintf(buffer, "%s\n", dev->name);
++ len += scnprintf(buffer, sizeof(buffer), "%s\n", dev->name);
+ }
+ }
+
+ if(!len) {
+- len += sprintf(buffer, "%s\n", "none");
++ len += scnprintf(buffer, sizeof(buffer), "%s\n", "none");
+ }
+
+ if (len > *lenp)
+@@ -94,19 +94,19 @@ static int do_autoprobe(struct ctl_table *table, int write,
+ }
+
+ if ((str = info->class_name) != NULL)
+- len += sprintf (buffer + len, "CLASS:%s;\n", str);
++ len += scnprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str);
+
+ if ((str = info->model) != NULL)
+- len += sprintf (buffer + len, "MODEL:%s;\n", str);
++ len += scnprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str);
+
+ if ((str = info->mfr) != NULL)
+- len += sprintf (buffer + len, "MANUFACTURER:%s;\n", str);
++ len += scnprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str);
+
+ if ((str = info->description) != NULL)
+- len += sprintf (buffer + len, "DESCRIPTION:%s;\n", str);
++ len += scnprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str);
+
+ if ((str = info->cmdset) != NULL)
+- len += sprintf (buffer + len, "COMMAND SET:%s;\n", str);
++ len += scnprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str);
+
+ if (len > *lenp)
+ len = *lenp;
+@@ -124,7 +124,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
+ void *result, size_t *lenp, loff_t *ppos)
+ {
+ struct parport *port = (struct parport *)table->extra1;
+- char buffer[20];
++ char buffer[64];
+ int len = 0;
+
+ if (*ppos) {
+@@ -135,7 +135,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
+ if (write) /* permissions prevent this anyway */
+ return -EACCES;
+
+- len += sprintf (buffer, "%lu\t%lu\n", port->base, port->base_hi);
++ len += scnprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi);
+
+ if (len > *lenp)
+ len = *lenp;
+@@ -162,7 +162,7 @@ static int do_hardware_irq(struct ctl_table *table, int write,
+ if (write) /* permissions prevent this anyway */
+ return -EACCES;
+
+- len += sprintf (buffer, "%d\n", port->irq);
++ len += scnprintf (buffer, sizeof(buffer), "%d\n", port->irq);
+
+ if (len > *lenp)
+ len = *lenp;
+@@ -189,7 +189,7 @@ static int do_hardware_dma(struct ctl_table *table, int write,
+ if (write) /* permissions prevent this anyway */
+ return -EACCES;
+
+- len += sprintf (buffer, "%d\n", port->dma);
++ len += scnprintf (buffer, sizeof(buffer), "%d\n", port->dma);
+
+ if (len > *lenp)
+ len = *lenp;
+@@ -220,7 +220,7 @@ static int do_hardware_modes(struct ctl_table *table, int write,
+ #define printmode(x) \
+ do { \
+ if (port->modes & PARPORT_MODE_##x) \
+- len += sprintf(buffer + len, "%s%s", f++ ? "," : "", #x); \
++ len += scnprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \
+ } while (0)
+ int f = 0;
+ printmode(PCSPP);
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index 9c2137dae429aa..826b5016a10102 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -386,21 +386,8 @@ void pci_bus_add_devices(const struct pci_bus *bus)
+ }
+ EXPORT_SYMBOL(pci_bus_add_devices);
+
+-/** pci_walk_bus - walk devices on/under bus, calling callback.
+- * @top bus whose devices should be walked
+- * @cb callback to be called for each device found
+- * @userdata arbitrary pointer to be passed to callback.
+- *
+- * Walk the given bus, including any bridged devices
+- * on buses under this bus. Call the provided callback
+- * on each device found.
+- *
+- * We check the return of @cb each time. If it returns anything
+- * other than 0, we break out.
+- *
+- */
+-void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+- void *userdata)
++static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
++ void *userdata, bool locked)
+ {
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+@@ -408,7 +395,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ int retval;
+
+ bus = top;
+- down_read(&pci_bus_sem);
++ if (!locked)
++ down_read(&pci_bus_sem);
+ next = top->devices.next;
+ for (;;) {
+ if (next == &bus->devices) {
+@@ -431,10 +419,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ if (retval)
+ break;
+ }
+- up_read(&pci_bus_sem);
++ if (!locked)
++ up_read(&pci_bus_sem);
++}
++
++/**
++ * pci_walk_bus - walk devices on/under bus, calling callback.
++ * @top: bus whose devices should be walked
++ * @cb: callback to be called for each device found
++ * @userdata: arbitrary pointer to be passed to callback
++ *
++ * Walk the given bus, including any bridged devices
++ * on buses under this bus. Call the provided callback
++ * on each device found.
++ *
++ * We check the return of @cb each time. If it returns anything
++ * other than 0, we break out.
++ */
++void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
++{
++ __pci_walk_bus(top, cb, userdata, false);
+ }
+ EXPORT_SYMBOL_GPL(pci_walk_bus);
+
++void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
++{
++ lockdep_assert_held(&pci_bus_sem);
++
++ __pci_walk_bus(top, cb, userdata, true);
++}
++EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
++
+ struct pci_bus *pci_bus_get(struct pci_bus *bus)
+ {
+ if (bus)
+diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
+index b445ffe95e3f04..0e29a76ca53077 100644
+--- a/drivers/pci/controller/dwc/pci-dra7xx.c
++++ b/drivers/pci/controller/dwc/pci-dra7xx.c
+@@ -841,7 +841,8 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
+ dra7xx->mode = mode;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
+- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
++ IRQF_SHARED | IRQF_ONESHOT,
++ "dra7xx-pcie-main", dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_gpio;
+diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
+index 6319082301d68e..c6bede3469320e 100644
+--- a/drivers/pci/controller/dwc/pci-exynos.c
++++ b/drivers/pci/controller/dwc/pci-exynos.c
+@@ -375,7 +375,7 @@ static int exynos_pcie_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int __exit exynos_pcie_remove(struct platform_device *pdev)
++static int exynos_pcie_remove(struct platform_device *pdev)
+ {
+ struct exynos_pcie *ep = platform_get_drvdata(pdev);
+
+@@ -431,7 +431,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
+
+ static struct platform_driver exynos_pcie_driver = {
+ .probe = exynos_pcie_probe,
+- .remove = __exit_p(exynos_pcie_remove),
++ .remove = exynos_pcie_remove,
+ .driver = {
+ .name = "exynos-pcie",
+ .of_match_table = exynos_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 74703362aeec71..86b09b5d7f2493 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -997,7 +997,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+ ret = phy_power_on(imx6_pcie->phy);
+ if (ret) {
+ dev_err(dev, "waiting for PHY ready timeout!\n");
+- goto err_phy_off;
++ goto err_phy_exit;
+ }
+ }
+
+@@ -1012,8 +1012,9 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+ return 0;
+
+ err_phy_off:
+- if (imx6_pcie->phy)
+- phy_exit(imx6_pcie->phy);
++ phy_power_off(imx6_pcie->phy);
++err_phy_exit:
++ phy_exit(imx6_pcie->phy);
+ err_clk_disable:
+ imx6_pcie_clk_disable(imx6_pcie);
+ err_reg_disable:
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 49aea6ce3e878e..c5475830c835f5 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -34,6 +34,11 @@
+ #define PCIE_DEVICEID_SHIFT 16
+
+ /* Application registers */
++#define PID 0x000
++#define RTL GENMASK(15, 11)
++#define RTL_SHIFT 11
++#define AM6_PCI_PG1_RTL_VER 0x15
++
+ #define CMD_STATUS 0x004
+ #define LTSSM_EN_VAL BIT(0)
+ #define OB_XLAT_EN_VAL BIT(1)
+@@ -104,6 +109,8 @@
+
+ #define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+
++#define PCI_DEVICE_ID_TI_AM654X 0xb00c
++
+ struct ks_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct dw_pcie_host_ops *host_ops;
+@@ -246,8 +253,68 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
+ .irq_unmask = ks_pcie_msi_unmask,
+ };
+
++/**
++ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
++ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
++ * PCIe host controller driver information.
++ *
++ * Since modification of dbi_cs2 involves different clock domain, read the
++ * status back to ensure the transition is complete.
++ */
++static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
++{
++ u32 val;
++
++ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
++ val |= DBI_CS2;
++ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
++
++ do {
++ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
++ } while (!(val & DBI_CS2));
++}
++
++/**
++ * ks_pcie_clear_dbi_mode() - Disable DBI mode
++ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
++ * PCIe host controller driver information.
++ *
++ * Since modification of dbi_cs2 involves different clock domain, read the
++ * status back to ensure the transition is complete.
++ */
++static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
++{
++ u32 val;
++
++ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
++ val &= ~DBI_CS2;
++ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
++
++ do {
++ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
++ } while (val & DBI_CS2);
++}
++
+ static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
+ {
++ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
++ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
++
++ /* Configure and set up BAR0 */
++ ks_pcie_set_dbi_mode(ks_pcie);
++
++ /* Enable BAR0 */
++ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
++ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
++
++ ks_pcie_clear_dbi_mode(ks_pcie);
++
++ /*
++ * For BAR0, just setting bus address for inbound writes (MSI) should
++ * be sufficient. Use physical address to avoid any conflicts.
++ */
++ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
++
+ pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
+ return dw_pcie_allocate_domains(pp);
+ }
+@@ -342,59 +409,22 @@ static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ };
+
+-/**
+- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+- * PCIe host controller driver information.
+- *
+- * Since modification of dbi_cs2 involves different clock domain, read the
+- * status back to ensure the transition is complete.
+- */
+-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+-{
+- u32 val;
+-
+- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+- val |= DBI_CS2;
+- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+-
+- do {
+- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+- } while (!(val & DBI_CS2));
+-}
+-
+-/**
+- * ks_pcie_clear_dbi_mode() - Disable DBI mode
+- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+- * PCIe host controller driver information.
+- *
+- * Since modification of dbi_cs2 involves different clock domain, read the
+- * status back to ensure the transition is complete.
+- */
+-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+-{
+- u32 val;
+-
+- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+- val &= ~DBI_CS2;
+- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+-
+- do {
+- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+- } while (val & DBI_CS2);
+-}
+-
+-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
++static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+ {
+ u32 val;
+ u32 num_viewport = ks_pcie->num_viewport;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+- u64 start, end;
++ struct resource_entry *entry;
+ struct resource *mem;
++ u64 start, end;
+ int i;
+
+- mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
++ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
++ if (!entry)
++ return -ENODEV;
++
++ mem = entry->res;
+ start = mem->start;
+ end = mem->end;
+
+@@ -405,7 +435,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ if (ks_pcie->is_am6)
+- return;
++ return 0;
+
+ val = ilog2(OB_WIN_SIZE);
+ ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+@@ -422,6 +452,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= OB_XLAT_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
++
++ return 0;
+ }
+
+ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+@@ -447,44 +479,10 @@ static struct pci_ops ks_child_pcie_ops = {
+ .write = pci_generic_config_write,
+ };
+
+-/**
+- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
+- * @bus: A pointer to the PCI bus structure.
+- *
+- * This sets BAR0 to enable inbound access for MSI_IRQ register
+- */
+-static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
+-{
+- struct dw_pcie_rp *pp = bus->sysdata;
+- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+-
+- if (!pci_is_root_bus(bus))
+- return 0;
+-
+- /* Configure and set up BAR0 */
+- ks_pcie_set_dbi_mode(ks_pcie);
+-
+- /* Enable BAR0 */
+- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+-
+- ks_pcie_clear_dbi_mode(ks_pcie);
+-
+- /*
+- * For BAR0, just setting bus address for inbound writes (MSI) should
+- * be sufficient. Use physical address to avoid any conflicts.
+- */
+- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+-
+- return 0;
+-}
+-
+ static struct pci_ops ks_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+- .add_bus = ks_pcie_v3_65_add_bus,
+ };
+
+ /**
+@@ -527,7 +525,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
+ static void ks_pcie_quirk(struct pci_dev *dev)
+ {
+ struct pci_bus *bus = dev->bus;
++ struct keystone_pcie *ks_pcie;
++ struct device *bridge_dev;
+ struct pci_dev *bridge;
++ u32 val;
++
+ static const struct pci_device_id rc_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+@@ -539,6 +541,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { 0, },
+ };
++ static const struct pci_device_id am6_pci_devids[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
++ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
++ { 0, },
++ };
+
+ if (pci_is_root_bus(bus))
+ bridge = dev;
+@@ -560,10 +567,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+- dev_info(&dev->dev, "limiting MRRS to 256\n");
++ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
+ pcie_set_readrq(dev, 256);
+ }
+ }
++
++ /*
++ * Memory transactions fail with PCI controller in AM654 PG1.0
++ * when MRRS is set to more than 128 bytes. Force the MRRS to
++ * 128 bytes in all downstream devices.
++ */
++ if (pci_match_id(am6_pci_devids, bridge)) {
++ bridge_dev = pci_get_host_bridge_device(dev);
++ if (!bridge_dev || !bridge_dev->parent)
++ return;
++
++ ks_pcie = dev_get_drvdata(bridge_dev->parent);
++ if (!ks_pcie)
++ return;
++
++ val = ks_pcie_app_readl(ks_pcie, PID);
++ val &= RTL;
++ val >>= RTL_SHIFT;
++ if (val != AM6_PCI_PG1_RTL_VER)
++ return;
++
++ if (pcie_get_readrq(dev) > 128) {
++ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
++ pcie_set_readrq(dev, 128);
++ }
++ }
+ }
+ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
+
+@@ -817,7 +850,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
+ return ret;
+
+ ks_pcie_stop_link(pci);
+- ks_pcie_setup_rc_app_regs(ks_pcie);
++ ret = ks_pcie_setup_rc_app_regs(ks_pcie);
++ if (ret)
++ return ret;
++
+ writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+ pci->dbi_base + PCI_IO_BASE);
+
+@@ -1100,7 +1136,7 @@ static const struct of_device_id ks_pcie_of_match[] = {
+ { },
+ };
+
+-static int __init ks_pcie_probe(struct platform_device *pdev)
++static int ks_pcie_probe(struct platform_device *pdev)
+ {
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+@@ -1218,7 +1254,16 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
+ goto err_link;
+ }
+
++ /* Obtain references to the PHYs */
++ for (i = 0; i < num_lanes; i++)
++ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
++
+ ret = ks_pcie_enable_phy(ks_pcie);
++
++ /* Release references to the PHYs */
++ for (i = 0; i < num_lanes; i++)
++ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
++
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ goto err_link;
+@@ -1302,7 +1347,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int __exit ks_pcie_remove(struct platform_device *pdev)
++static int ks_pcie_remove(struct platform_device *pdev)
+ {
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ struct device_link **link = ks_pcie->link;
+@@ -1318,9 +1363,9 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver ks_pcie_driver __refdata = {
++static struct platform_driver ks_pcie_driver = {
+ .probe = ks_pcie_probe,
+- .remove = __exit_p(ks_pcie_remove),
++ .remove = ks_pcie_remove,
+ .driver = {
+ .name = "keystone-pcie",
+ .of_match_table = ks_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
+index b8cb77c9c4bd2c..3132b27bc0064c 100644
+--- a/drivers/pci/controller/dwc/pcie-al.c
++++ b/drivers/pci/controller/dwc/pcie-al.c
+@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
+ .write = pci_generic_config_write,
+ };
+
+-static void al_pcie_config_prepare(struct al_pcie *pcie)
++static int al_pcie_config_prepare(struct al_pcie *pcie)
+ {
+ struct al_pcie_target_bus_cfg *target_bus_cfg;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ unsigned int ecam_bus_mask;
++ struct resource_entry *ft;
+ u32 cfg_control_offset;
++ struct resource *bus;
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ u32 cfg_control;
+ u32 reg;
+- struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+
++ ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
++ if (!ft)
++ return -ENODEV;
++
++ bus = ft->res;
+ target_bus_cfg = &pcie->target_bus_cfg;
+
+ ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
+@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
+ FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+
+ al_pcie_controller_writel(pcie, cfg_control_offset, reg);
++
++ return 0;
+ }
+
+ static int al_pcie_host_init(struct dw_pcie_rp *pp)
+@@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
+ if (rc)
+ return rc;
+
+- al_pcie_config_prepare(pcie);
++ rc = al_pcie_config_prepare(pcie);
++ if (rc)
++ return rc;
+
+ return 0;
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index f9182f8d552f49..f2e5feba552678 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -6,6 +6,7 @@
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
++#include <linux/align.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+
+@@ -162,7 +163,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ if (!ep->bar_to_atu[bar])
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ else
+- free_win = ep->bar_to_atu[bar];
++ free_win = ep->bar_to_atu[bar] - 1;
+
+ if (free_win >= pci->num_ib_windows) {
+ dev_err(pci->dev, "No free inbound window\n");
+@@ -176,7 +177,11 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ return ret;
+ }
+
+- ep->bar_to_atu[bar] = free_win;
++ /*
++ * Always increment free_win before assignment, since value 0 is used to identify
++ * unallocated mapping.
++ */
++ ep->bar_to_atu[bar] = free_win + 1;
+ set_bit(free_win, ep->ib_window_map);
+
+ return 0;
+@@ -213,7 +218,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+- u32 atu_index = ep->bar_to_atu[bar];
++ u32 atu_index = ep->bar_to_atu[bar] - 1;
++
++ if (!ep->bar_to_atu[bar])
++ return;
+
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
+
+@@ -598,6 +606,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ }
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
++ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
+ if (ret)
+@@ -669,8 +678,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
++ /*
++ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
++ * size in the range from 1 MB to 512 GB. Advertise support
++ * for 1 MB BAR size only.
++ */
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
++ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ }
+
+ /*
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 1c1c7348972b03..2b60d20dfdf59d 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -732,6 +732,53 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+
+ }
+
++static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
++{
++ u32 lnkcap, lwsc, plc;
++ u8 cap;
++
++ if (!num_lanes)
++ return;
++
++ /* Set the number of lanes */
++ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
++ plc &= ~PORT_LINK_FAST_LINK_MODE;
++ plc &= ~PORT_LINK_MODE_MASK;
++
++ /* Set link width speed control register */
++ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
++ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
++ switch (num_lanes) {
++ case 1:
++ plc |= PORT_LINK_MODE_1_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
++ break;
++ case 2:
++ plc |= PORT_LINK_MODE_2_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
++ break;
++ case 4:
++ plc |= PORT_LINK_MODE_4_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
++ break;
++ case 8:
++ plc |= PORT_LINK_MODE_8_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
++ break;
++ default:
++ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
++ return;
++ }
++ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
++ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
++
++ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
++ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
++ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
++ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
++ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
++}
++
+ void dw_pcie_iatu_detect(struct dw_pcie *pci)
+ {
+ int max_region, ob, ib;
+@@ -1013,49 +1060,5 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+- if (!pci->num_lanes) {
+- dev_dbg(pci->dev, "Using h/w default number of lanes\n");
+- return;
+- }
+-
+- /* Set the number of lanes */
+- val &= ~PORT_LINK_FAST_LINK_MODE;
+- val &= ~PORT_LINK_MODE_MASK;
+- switch (pci->num_lanes) {
+- case 1:
+- val |= PORT_LINK_MODE_1_LANES;
+- break;
+- case 2:
+- val |= PORT_LINK_MODE_2_LANES;
+- break;
+- case 4:
+- val |= PORT_LINK_MODE_4_LANES;
+- break;
+- case 8:
+- val |= PORT_LINK_MODE_8_LANES;
+- break;
+- default:
+- dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
+- return;
+- }
+- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+-
+- /* Set link width speed control register */
+- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+- val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+- switch (pci->num_lanes) {
+- case 1:
+- val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+- break;
+- case 2:
+- val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+- break;
+- case 4:
+- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+- break;
+- case 8:
+- val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+- break;
+- }
+- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
++ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index 2fe42c70097fdb..9b1256da096cb6 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -240,7 +240,7 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+- GPIOD_OUT_HIGH);
++ GPIOD_OUT_LOW);
+ if (IS_ERR(rockchip->rst_gpio))
+ return PTR_ERR(rockchip->rst_gpio);
+
+diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
+index d93bc290695028..421697ec7591d6 100644
+--- a/drivers/pci/controller/dwc/pcie-kirin.c
++++ b/drivers/pci/controller/dwc/pcie-kirin.c
+@@ -415,12 +415,12 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
+ if (pcie->gpio_id_reset[i] < 0)
+ continue;
+
+- pcie->num_slots++;
+- if (pcie->num_slots > MAX_PCI_SLOTS) {
++ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) {
+ dev_err(dev, "Too many PCI slots!\n");
+ ret = -EINVAL;
+ goto put_node;
+ }
++ pcie->num_slots++;
+
+ ret = of_pci_get_devfn(child);
+ if (ret < 0) {
+@@ -741,7 +741,7 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
+ return ret;
+ }
+
+-static int __exit kirin_pcie_remove(struct platform_device *pdev)
++static int kirin_pcie_remove(struct platform_device *pdev)
+ {
+ struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+
+@@ -818,7 +818,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
+
+ static struct platform_driver kirin_pcie_driver = {
+ .probe = kirin_pcie_probe,
+- .remove = __exit_p(kirin_pcie_remove),
++ .remove = kirin_pcie_remove,
+ .driver = {
+ .name = "kirin-pcie",
+ .of_match_table = kirin_pcie_match,
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index 8bd8107690a6c3..66e080c99d5df1 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -123,6 +123,7 @@
+
+ /* ELBI registers */
+ #define ELBI_SYS_STTS 0x08
++#define ELBI_CS2_ENABLE 0xa4
+
+ /* DBI registers */
+ #define DBI_CON_STATUS 0x44
+@@ -263,6 +264,21 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+ disable_irq(pcie_ep->perst_irq);
+ }
+
++static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
++ u32 reg, size_t size, u32 val)
++{
++ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
++ int ret;
++
++ writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
++
++ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
++ if (ret)
++ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
++
++ writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
++}
++
+ static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
+ {
+ struct dw_pcie *pci = &pcie_ep->pci;
+@@ -503,12 +519,6 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
+ {
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+- struct device *dev = pci->dev;
+-
+- if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
+- dev_dbg(dev, "Link is already disabled\n");
+- return;
+- }
+
+ qcom_pcie_disable_resources(pcie_ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
+@@ -519,6 +529,7 @@ static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
++ .write_dbi2 = qcom_pcie_dw_write_dbi2,
+ };
+
+ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 64420ecc24d1c3..d3ca6d3493130b 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -53,6 +53,7 @@
+ #define PARF_SLV_ADDR_SPACE_SIZE 0x358
+ #define PARF_DEVICE_TYPE 0x1000
+ #define PARF_BDF_TO_SID_TABLE_N 0x2000
++#define PARF_BDF_TO_SID_CFG 0x2c00
+
+ /* ELBI registers */
+ #define ELBI_SYS_CTRL 0x04
+@@ -120,6 +121,9 @@
+ /* PARF_DEVICE_TYPE register fields */
+ #define DEVICE_TYPE_RC 0x4
+
++/* PARF_BDF_TO_SID_CFG fields */
++#define BDF_TO_SID_BYPASS BIT(0)
++
+ /* ELBI_SYS_CTRL register fields */
+ #define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+@@ -985,11 +989,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
++ u32 val;
+
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
+
++ /* Enable BDF to SID translation by disabling bypass mode (default) */
++ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
++ val &= ~BDF_TO_SID_BYPASS;
++ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
++
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 4bba31502ce1d6..416d6b45d1fe8d 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -9,6 +9,7 @@
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+@@ -346,8 +347,7 @@ static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
+ */
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+ if (val & PCI_EXP_LNKSTA_LBMS) {
+- current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+ if (pcie->init_link_width > current_link_width) {
+ dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+@@ -760,8 +760,7 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+- pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+@@ -920,7 +919,7 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+ /* Configure Max lane width from DT */
+ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_MLW;
+- val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
++ val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+ dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+@@ -2273,11 +2272,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
+ ret = tegra_pcie_config_ep(pcie, pdev);
+ if (ret < 0)
+ goto fail;
++ else
++ return 0;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PCIe device type %d\n",
+ pcie->of_data->mode);
++ ret = -EINVAL;
+ }
+
+ fail:
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index bed3cefdaf198f..4c34909810d8ef 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -49,6 +49,7 @@
+ #include <linux/refcount.h>
+ #include <linux/irqdomain.h>
+ #include <linux/acpi.h>
++#include <linux/sizes.h>
+ #include <asm/mshyperv.h>
+
+ /*
+@@ -465,7 +466,7 @@ struct pci_eject_response {
+ u32 status;
+ } __packed;
+
+-static int pci_ring_size = (4 * PAGE_SIZE);
++static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
+
+ /*
+ * Driver specific state.
+@@ -1136,8 +1137,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
+ PCI_CAPABILITY_LIST) {
+ /* ROM BARs are unimplemented */
+ *val = 0;
+- } else if (where >= PCI_INTERRUPT_LINE && where + size <=
+- PCI_INTERRUPT_PIN) {
++ } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
++ (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
+ /*
+ * Interrupt Line and Interrupt PIN are hard-wired to zero
+ * because this front-end only supports message-signaled
+diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
+index d45e7b8dc530d6..bc630ab8a28316 100644
+--- a/drivers/pci/controller/pci-loongson.c
++++ b/drivers/pci/controller/pci-loongson.c
+@@ -80,13 +80,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_LPC, system_bus_quirk);
+
++/*
++ * Some Loongson PCIe ports have hardware limitations on their Maximum Read
++ * Request Size. They can't handle anything larger than this. Sane
++ * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for
++ * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly,
++ * so we have to enforce maximum safe MRRS, which is 256 bytes.
++ */
++#ifdef CONFIG_MIPS
++static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev)
++{
++ struct pci_bus *bus = pdev->bus;
++ struct pci_dev *bridge;
++ static const struct pci_device_id bridge_devids[] = {
++ { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) },
++ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) },
++ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) },
++ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) },
++ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) },
++ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) },
++ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) },
++ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) },
++ { 0, },
++ };
++
++ /* look for the matching bridge */
++ while (!pci_is_root_bus(bus)) {
++ bridge = bus->self;
++ bus = bus->parent;
++
++ if (pci_match_id(bridge_devids, bridge)) {
++ if (pcie_get_readrq(pdev) > 256) {
++ pci_info(pdev, "limiting MRRS to 256\n");
++ pcie_set_readrq(pdev, 256);
++ }
++ break;
++ }
++ }
++}
++DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk);
++#endif
++
+ static void loongson_mrrs_quirk(struct pci_dev *pdev)
+ {
+- /*
+- * Some Loongson PCIe ports have h/w limitations of maximum read
+- * request size. They can't handle anything larger than this. So
+- * force this limit on any devices attached under these ports.
+- */
+ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
+
+ bridge->no_inc_mrrs = 1;
+@@ -127,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+
++static void loongson_pci_msi_quirk(struct pci_dev *dev)
++{
++ u16 val, class = dev->class >> 8;
++
++ if (class != PCI_CLASS_BRIDGE_HOST)
++ return;
++
++ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
++ val |= PCI_MSI_FLAGS_ENABLE;
++ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
++
+ static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
+ {
+ struct pci_config_window *cfg;
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
+index 60810a1fbfb75e..29fe09c99e7d9c 100644
+--- a/drivers/pci/controller/pci-mvebu.c
++++ b/drivers/pci/controller/pci-mvebu.c
+@@ -264,7 +264,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+ */
+ lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+- lnkcap |= (port->is_x4 ? 4 : 1) << 4;
++ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
+ mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
+diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
+index f9dd6622fe1095..e47a77f943b1e1 100644
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -330,7 +330,7 @@ static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
+ readl(base + PCIE_RC_DL_MDIO_ADDR);
+ writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
+
+- err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
++ err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
+ MDIO_WT_DONE(data), 10, 100);
+ return err;
+ }
+diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
+index e0e27645fdf4c8..975b3024fb08cd 100644
+--- a/drivers/pci/controller/pcie-mediatek-gen3.c
++++ b/drivers/pci/controller/pcie-mediatek-gen3.c
+@@ -245,35 +245,60 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
+ resource_size_t cpu_addr,
+ resource_size_t pci_addr,
+ resource_size_t size,
+- unsigned long type, int num)
++ unsigned long type, int *num)
+ {
++ resource_size_t remaining = size;
++ resource_size_t table_size;
++ resource_size_t addr_align;
++ const char *range_type;
+ void __iomem *table;
+ u32 val;
+
+- if (num >= PCIE_MAX_TRANS_TABLES) {
+- dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+- (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
+- return -ENODEV;
+- }
++ while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
++ /* Table size needs to be a power of 2 */
++ table_size = BIT(fls(remaining) - 1);
++
++ if (cpu_addr > 0) {
++ addr_align = BIT(ffs(cpu_addr) - 1);
++ table_size = min(table_size, addr_align);
++ }
++
++ /* Minimum size of translate table is 4KiB */
++ if (table_size < 0x1000) {
++ dev_err(pcie->dev, "illegal table size %#llx\n",
++ (unsigned long long)table_size);
++ return -EINVAL;
++ }
+
+- table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
+- num * PCIE_ATR_TLB_SET_OFFSET;
++ table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
++ writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
++ writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
++ writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
++ writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+
+- writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
+- table);
+- writel_relaxed(upper_32_bits(cpu_addr),
+- table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+- writel_relaxed(lower_32_bits(pci_addr),
+- table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+- writel_relaxed(upper_32_bits(pci_addr),
+- table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
++ if (type == IORESOURCE_IO) {
++ val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
++ range_type = "IO";
++ } else {
++ val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
++ range_type = "MEM";
++ }
+
+- if (type == IORESOURCE_IO)
+- val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+- else
+- val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
++ writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+
+- writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
++ dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
++ range_type, *num, (unsigned long long)cpu_addr,
++ (unsigned long long)pci_addr, (unsigned long long)table_size);
++
++ cpu_addr += table_size;
++ pci_addr += table_size;
++ remaining -= table_size;
++ (*num)++;
++ }
++
++ if (remaining)
++ dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
++ (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
+
+ return 0;
+ }
+@@ -380,30 +405,20 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
+ resource_size_t cpu_addr;
+ resource_size_t pci_addr;
+ resource_size_t size;
+- const char *range_type;
+
+- if (type == IORESOURCE_IO) {
++ if (type == IORESOURCE_IO)
+ cpu_addr = pci_pio_to_address(res->start);
+- range_type = "IO";
+- } else if (type == IORESOURCE_MEM) {
++ else if (type == IORESOURCE_MEM)
+ cpu_addr = res->start;
+- range_type = "MEM";
+- } else {
++ else
+ continue;
+- }
+
+ pci_addr = res->start - entry->offset;
+ size = resource_size(res);
+ err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
+- type, table_index);
++ type, &table_index);
+ if (err)
+ return err;
+-
+- dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+- range_type, table_index, (unsigned long long)cpu_addr,
+- (unsigned long long)pci_addr, (unsigned long long)size);
+-
+- table_index++;
+ }
+
+ return 0;
+diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
+index 66a8f73296fc8b..48372013f26d23 100644
+--- a/drivers/pci/controller/pcie-mediatek.c
++++ b/drivers/pci/controller/pcie-mediatek.c
+@@ -617,12 +617,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
+ if (status & MSI_STATUS){
+ unsigned long imsi_status;
+
++ /*
++ * The interrupt status can be cleared even if the
++ * MSI status remains pending. As such, given the
++ * edge-triggered interrupt type, its status should
++ * be cleared before being dispatched to the
++ * handler of the underlying device.
++ */
++ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
+ for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
+ generic_handle_domain_irq(port->inner_domain, bit);
+ }
+- /* Clear MSI interrupt status */
+- writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ }
+ }
+
+diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
+index 88975e40ee2fbf..704ab5d723a959 100644
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -77,7 +77,11 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+ writel(L1IATN, pcie_base + PMCTLR);
+ ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
+ val & L1FAEG, 10, 1000);
+- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
++ if (ret) {
++ dev_warn_ratelimited(pcie_dev,
++ "Timeout waiting for L1 link state, ret=%d\n",
++ ret);
++ }
+ writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ }
+
+diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
+index 0af0e965fb57ea..1e3c3192d122cb 100644
+--- a/drivers/pci/controller/pcie-rockchip-ep.c
++++ b/drivers/pci/controller/pcie-rockchip-ep.c
+@@ -98,10 +98,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+
+ /* All functions share the same vendor ID with function 0 */
+ if (fn == 0) {
+- u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
+- (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
+-
+- rockchip_pcie_write(rockchip, vid_regs,
++ rockchip_pcie_write(rockchip,
++ hdr->vendorid | hdr->subsys_vendor_id << 16,
+ PCIE_CORE_CONFIG_VENDOR);
+ }
+
+diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
+index 0ef2e622d36e1d..c07d7129f1c7c4 100644
+--- a/drivers/pci/controller/pcie-rockchip.c
++++ b/drivers/pci/controller/pcie-rockchip.c
+@@ -121,7 +121,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+
+ if (rockchip->is_rc) {
+ rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
+- GPIOD_OUT_HIGH);
++ GPIOD_OUT_LOW);
+ if (IS_ERR(rockchip->ep_gpio))
+ return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
+ "failed to get ep GPIO\n");
+diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
+index 176686bdb15c18..5b82098f32b7c7 100644
+--- a/drivers/pci/controller/pcie-xilinx-nwl.c
++++ b/drivers/pci/controller/pcie-xilinx-nwl.c
+@@ -80,8 +80,8 @@
+ #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
+ #define MSGF_MISC_SR_FATAL_DEV BIT(23)
+ #define MSGF_MISC_SR_LINK_DOWN BIT(24)
+-#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
+-#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
++#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
++#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
+
+ #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
+ MSGF_MISC_SR_RXMSG_OVER | \
+@@ -96,8 +96,8 @@
+ MSGF_MISC_SR_NON_FATAL_DEV | \
+ MSGF_MISC_SR_FATAL_DEV | \
+ MSGF_MISC_SR_LINK_DOWN | \
+- MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
+- MSGF_MSIC_SR_LINK_BWIDTH)
++ MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
++ MSGF_MISC_SR_LINK_BWIDTH)
+
+ /* Legacy interrupt status mask bits */
+ #define MSGF_LEG_SR_INTA BIT(0)
+@@ -301,10 +301,10 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
+ if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
+ dev_err(dev, "Fatal Error Detected\n");
+
+- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
++ if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
+ dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
+
+- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
++ if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
+ dev_info(dev, "Link Bandwidth Management Status bit set\n");
+
+ /* Clear misc interrupt status */
+@@ -373,7 +373,7 @@ static void nwl_mask_leg_irq(struct irq_data *data)
+ u32 mask;
+ u32 val;
+
+- mask = 1 << (data->hwirq - 1);
++ mask = 1 << data->hwirq;
+ raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+ val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+ nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
+@@ -387,7 +387,7 @@ static void nwl_unmask_leg_irq(struct irq_data *data)
+ u32 mask;
+ u32 val;
+
+- mask = 1 << (data->hwirq - 1);
++ mask = 1 << data->hwirq;
+ raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+ val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+ nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
+@@ -790,6 +790,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
++ platform_set_drvdata(pdev, pcie);
+
+ pcie->dev = dev;
+ pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
+@@ -813,13 +814,13 @@ static int nwl_pcie_probe(struct platform_device *pdev)
+ err = nwl_pcie_bridge_init(pcie);
+ if (err) {
+ dev_err(dev, "HW Initialization failed\n");
+- return err;
++ goto err_clk;
+ }
+
+ err = nwl_pcie_init_irq_domain(pcie);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+- return err;
++ goto err_clk;
+ }
+
+ bridge->sysdata = pcie;
+@@ -829,11 +830,24 @@ static int nwl_pcie_probe(struct platform_device *pdev)
+ err = nwl_pcie_enable_msi(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to enable MSI support: %d\n", err);
+- return err;
++ goto err_clk;
+ }
+ }
+
+- return pci_host_probe(bridge);
++ err = pci_host_probe(bridge);
++ if (!err)
++ return 0;
++
++err_clk:
++ clk_disable_unprepare(pcie->clk);
++ return err;
++}
++
++static void nwl_pcie_remove(struct platform_device *pdev)
++{
++ struct nwl_pcie *pcie = platform_get_drvdata(pdev);
++
++ clk_disable_unprepare(pcie->clk);
+ }
+
+ static struct platform_driver nwl_pcie_driver = {
+@@ -843,5 +857,6 @@ static struct platform_driver nwl_pcie_driver = {
+ .of_match_table = nwl_pcie_of_match,
+ },
+ .probe = nwl_pcie_probe,
++ .remove_new = nwl_pcie_remove,
+ };
+ builtin_platform_driver(nwl_pcie_driver);
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index ad56df98b8e63d..6ac0afae0ca18c 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -525,8 +525,7 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, 0), 0);
+
+- hdr_type = readb(base + PCI_HEADER_TYPE) &
+- PCI_HEADER_TYPE_MASK;
++ hdr_type = readb(base + PCI_HEADER_TYPE);
+
+ functions = (hdr_type & 0x80) ? 8 : 1;
+ for (fn = 0; fn < functions; fn++) {
+@@ -752,7 +751,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
+ if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
+ return 0;
+
+- pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
++ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
+ if (!pos)
+diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
+index b7b9d3e21f97d3..34e7191f950867 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
++++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
+@@ -209,28 +209,28 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
+ vector + 1);
+ }
+
+-static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
+- void *to, size_t size)
++static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
++ struct mhi_ep_buf_info *buf_info)
+ {
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+- size_t offset = get_align_offset(epf_mhi, from);
++ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+- ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf,
+- offset, size);
++ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
++ &tre_buf, offset, buf_info->size);
+ if (ret) {
+ mutex_unlock(&epf_mhi->lock);
+ return ret;
+ }
+
+- memcpy_fromio(to, tre_buf, size);
++ memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
+
+- __pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset,
+- size);
++ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
++ tre_buf, offset, buf_info->size);
+
+ mutex_unlock(&epf_mhi->lock);
+
+@@ -238,27 +238,27 @@ static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
+ }
+
+ static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
+- void *from, u64 to, size_t size)
++ struct mhi_ep_buf_info *buf_info)
+ {
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+- size_t offset = get_align_offset(epf_mhi, to);
++ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+- ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf,
+- offset, size);
++ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
++ &tre_buf, offset, buf_info->size);
+ if (ret) {
+ mutex_unlock(&epf_mhi->lock);
+ return ret;
+ }
+
+- memcpy_toio(tre_buf, from, size);
++ memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
+
+- __pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset,
+- size);
++ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
++ tre_buf, offset, buf_info->size);
+
+ mutex_unlock(&epf_mhi->lock);
+
+@@ -270,8 +270,8 @@ static void pci_epf_mhi_dma_callback(void *param)
+ complete(param);
+ }
+
+-static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
+- void *to, size_t size)
++static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
++ struct mhi_ep_buf_info *buf_info)
+ {
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+@@ -284,13 +284,13 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
+ dma_addr_t dst_addr;
+ int ret;
+
+- if (size < SZ_4K)
+- return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size);
++ if (buf_info->size < SZ_4K)
++ return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_DEV_TO_MEM;
+- config.src_addr = from;
++ config.src_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+@@ -298,14 +298,16 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
+ goto err_unlock;
+ }
+
+- dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
++ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
++ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+- desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
++ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
++ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+@@ -332,15 +334,15 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
+ }
+
+ err_unmap:
+- dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
++ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+ err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+ }
+
+-static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
+- u64 to, size_t size)
++static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
++ struct mhi_ep_buf_info *buf_info)
+ {
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+@@ -353,13 +355,13 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
+ dma_addr_t src_addr;
+ int ret;
+
+- if (size < SZ_4K)
+- return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size);
++ if (buf_info->size < SZ_4K)
++ return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_MEM_TO_DEV;
+- config.dst_addr = to;
++ config.dst_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+@@ -367,14 +369,16 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
+ goto err_unlock;
+ }
+
+- src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
++ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
++ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+- desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
++ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
++ DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+@@ -401,7 +405,7 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
+ }
+
+ err_unmap:
+- dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE);
++ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+ err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+@@ -532,11 +536,11 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
+ mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
+ mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
+ if (info->flags & MHI_EPF_USE_DMA) {
+- mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
+- mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
++ mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
++ mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
+ } else {
+- mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
+- mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
++ mhi_cntrl->read_sync = pci_epf_mhi_iatu_read;
++ mhi_cntrl->write_sync = pci_epf_mhi_iatu_write;
+ }
+
+ /* Register the MHI EP controller */
+diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+index 3f60128560ed0f..3368f483f818df 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
++++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+@@ -810,8 +810,9 @@ static int epf_ntb_epc_init(struct epf_ntb *ntb)
+ */
+ static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
+ {
+- epf_ntb_db_bar_clear(ntb);
+ epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
++ epf_ntb_db_bar_clear(ntb);
++ epf_ntb_config_sspad_bar_clear(ntb);
+ }
+
+ #define EPF_NTB_R(_name) \
+@@ -1029,8 +1030,10 @@ static int vpci_scan_bus(void *sysdata)
+ struct epf_ntb *ndev = sysdata;
+
+ vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
+- if (vpci_bus)
+- pr_err("create pci bus\n");
++ if (!vpci_bus) {
++ pr_err("create pci bus failed\n");
++ return -EINVAL;
++ }
+
+ pci_bus_add_devices(vpci_bus);
+
+@@ -1278,15 +1281,11 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ ret = ntb_register_device(&ndev->ntb);
+ if (ret) {
+ dev_err(dev, "Failed to register NTB device\n");
+- goto err_register_dev;
++ return ret;
+ }
+
+ dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
+ return 0;
+-
+-err_register_dev:
+- put_device(&ndev->ntb.dev);
+- return -EINVAL;
+ }
+
+ static struct pci_device_id pci_vntb_table[] = {
+@@ -1353,13 +1352,19 @@ static int epf_ntb_bind(struct pci_epf *epf)
+ ret = pci_register_driver(&vntb_pci_driver);
+ if (ret) {
+ dev_err(dev, "failure register vntb pci driver\n");
+- goto err_bar_alloc;
++ goto err_epc_cleanup;
+ }
+
+- vpci_scan_bus(ntb);
++ ret = vpci_scan_bus(ntb);
++ if (ret)
++ goto err_unregister;
+
+ return 0;
+
++err_unregister:
++ pci_unregister_driver(&vntb_pci_driver);
++err_epc_cleanup:
++ epf_ntb_epc_cleanup(ntb);
+ err_bar_alloc:
+ epf_ntb_config_spad_bar_free(ntb);
+
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index 5a4a8b0be62625..a7d3a92391a418 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -869,7 +869,6 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+
+ put_dev:
+ put_device(&epc->dev);
+- kfree(epc);
+
+ err_ret:
+ return ERR_PTR(ret);
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 601129772b2d50..5b1f271c6034be 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -512,15 +512,12 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+- if (pci_is_root_bus(bus))
+- __pci_bus_size_bridges(dev->subordinate, &add_list);
++ __pci_bus_size_bridges(dev->subordinate,
++ &add_list);
+ }
+ }
+ }
+- if (pci_is_root_bus(bus))
+- __pci_bus_assign_resources(bus, &add_list, NULL);
+- else
+- pci_assign_unassigned_bridge_resources(bus->self);
++ __pci_bus_assign_resources(bus, &add_list, NULL);
+ }
+
+ acpiphp_sanitize_bus(bus);
+diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
+index 881d420637bf1e..092c9ac0d26d27 100644
+--- a/drivers/pci/hotplug/pnv_php.c
++++ b/drivers/pci/hotplug/pnv_php.c
+@@ -39,7 +39,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ bool disable_device)
+ {
+ struct pci_dev *pdev = php_slot->pdev;
+- int irq = php_slot->irq;
+ u16 ctrl;
+
+ if (php_slot->irq > 0) {
+@@ -58,7 +57,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ php_slot->wq = NULL;
+ }
+
+- if (disable_device || irq > 0) {
++ if (disable_device) {
+ if (pdev->msix_enabled)
+ pci_disable_msix(pdev);
+ else if (pdev->msi_enabled)
+diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
+index c8be056c248ded..cfd84a899c82d8 100644
+--- a/drivers/pci/msi/irqdomain.c
++++ b/drivers/pci/msi/irqdomain.c
+@@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
+
+ return (irq_hw_number_t)desc->msi_index |
+ pci_dev_id(dev) << 11 |
+- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
++ ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
+ }
+
+ static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
+diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
+index ef1d8857a51ba6..2d117cb74832be 100644
+--- a/drivers/pci/msi/msi.c
++++ b/drivers/pci/msi/msi.c
+@@ -348,7 +348,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ struct irq_affinity *affd)
+ {
+ struct irq_affinity_desc *masks = NULL;
+- struct msi_desc *entry;
++ struct msi_desc *entry, desc;
+ int ret;
+
+ /* Reject multi-MSI early on irq domain enabled architectures */
+@@ -373,6 +373,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ /* All MSIs are unmasked by default; mask them all */
+ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
+ pci_msi_mask(entry, msi_multi_mask(entry));
++ /*
++ * Copy the MSI descriptor for the error path because
++ * pci_msi_setup_msi_irqs() will free it for the hierarchical
++ * interrupt domain case.
++ */
++ memcpy(&desc, entry, sizeof(desc));
+
+ /* Configure MSI capability structure */
+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
+@@ -392,7 +398,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ goto unlock;
+
+ err:
+- pci_msi_unmask(entry, msi_multi_mask(entry));
++ pci_msi_unmask(&desc, msi_multi_mask(&desc));
+ pci_free_msi_irqs(dev);
+ fail:
+ dev->msi_enabled = 0;
+diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
+index c2c7334152bc05..03539e5053720f 100644
+--- a/drivers/pci/of_property.c
++++ b/drivers/pci/of_property.c
+@@ -238,6 +238,8 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
+ return 0;
+
+ int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL);
++ if (!int_map)
++ return -ENOMEM;
+ mapp = int_map;
+
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
+index fa7370f9561ac8..b8becc7257cda8 100644
+--- a/drivers/pci/p2pdma.c
++++ b/drivers/pci/p2pdma.c
+@@ -661,7 +661,7 @@ calc_map_type_and_dist(struct pci_dev *provider, struct pci_dev *client,
+ p2pdma = rcu_dereference(provider->p2pdma);
+ if (p2pdma)
+ xa_store(&p2pdma->map_types, map_types_idx(client),
+- xa_mk_value(map_type), GFP_KERNEL);
++ xa_mk_value(map_type), GFP_ATOMIC);
+ rcu_read_unlock();
+ return map_type;
+ }
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index a05350a4e49cb4..05b7357bd25861 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -911,7 +911,7 @@ pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
+ {
+ int acpi_state, d_max;
+
+- if (pdev->no_d3cold)
++ if (pdev->no_d3cold || !pdev->d3cold_allowed)
+ d_max = ACPI_STATE_D3_HOT;
+ else
+ d_max = ACPI_STATE_D3_COLD;
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 51ec9e7e784f0e..9c59bf03d6579f 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -473,6 +473,13 @@ static void pci_device_remove(struct device *dev)
+
+ if (drv->remove) {
+ pm_runtime_get_sync(dev);
++ /*
++ * If the driver provides a .runtime_idle() callback and it has
++ * started to run already, it may continue to run in parallel
++ * with the code below, so wait until all of the runtime PM
++ * activity has completed.
++ */
++ pm_runtime_barrier(dev);
+ drv->remove(pci_dev);
+ pm_runtime_put_noidle(dev);
+ }
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index d9eede2dbc0e13..3317b93547167c 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -12,7 +12,7 @@
+ * Modeled after usb's driverfs.c
+ */
+
+-
++#include <linux/bitfield.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/pci.h>
+@@ -230,8 +230,7 @@ static ssize_t current_link_width_show(struct device *dev,
+ if (err)
+ return -EINVAL;
+
+- return sysfs_emit(buf, "%u\n",
+- (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
++ return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
+ }
+ static DEVICE_ATTR_RO(current_link_width);
+
+@@ -530,10 +529,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
+ return -EINVAL;
+
+ pdev->d3cold_allowed = !!val;
+- if (pdev->d3cold_allowed)
+- pci_d3cold_enable(pdev);
+- else
+- pci_d3cold_disable(pdev);
++ pci_bridge_d3_update(pdev);
+
+ pm_runtime_resume(dev);
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 59c01d68c6d5ed..93f2f4dcf6d696 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -732,15 +732,18 @@ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
+ {
+ u16 vsec = 0;
+ u32 header;
++ int ret;
+
+ if (vendor != dev->vendor)
+ return 0;
+
+ while ((vsec = pci_find_next_ext_capability(dev, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+- if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
+- &header) == PCIBIOS_SUCCESSFUL &&
+- PCI_VNDR_HEADER_ID(header) == cap)
++ ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
++ if (ret != PCIBIOS_SUCCESSFUL)
++ continue;
++
++ if (PCI_VNDR_HEADER_ID(header) == cap)
+ return vsec;
+ }
+
+@@ -1187,6 +1190,11 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+ for (;;) {
+ u32 id;
+
++ if (pci_dev_is_disconnected(dev)) {
++ pci_dbg(dev, "disconnected; not waiting\n");
++ return -ENOTTY;
++ }
++
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ if (!PCI_POSSIBLE_ERROR(id))
+ break;
+@@ -1200,7 +1208,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+ if (delay > PCI_RESET_WAIT) {
+ if (retrain) {
+ retrain = false;
+- if (pcie_failed_link_retrain(bridge)) {
++ if (pcie_failed_link_retrain(bridge) == 0) {
+ delay = 1;
+ continue;
+ }
+@@ -1288,6 +1296,7 @@ int pci_power_up(struct pci_dev *dev)
+ /**
+ * pci_set_full_power_state - Put a PCI device into D0 and update its state
+ * @dev: PCI device to power up
++ * @locked: whether pci_bus_sem is held
+ *
+ * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
+ * to confirm the state change, restore its BARs if they might be lost and
+@@ -1297,7 +1306,7 @@ int pci_power_up(struct pci_dev *dev)
+ * to D0, it is more efficient to use pci_power_up() directly instead of this
+ * function.
+ */
+-static int pci_set_full_power_state(struct pci_dev *dev)
++static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
+ {
+ u16 pmcsr;
+ int ret;
+@@ -1332,6 +1341,9 @@ static int pci_set_full_power_state(struct pci_dev *dev)
+ pci_restore_bars(dev);
+ }
+
++ if (dev->bus->self)
++ pcie_aspm_pm_state_change(dev->bus->self, locked);
++
+ return 0;
+ }
+
+@@ -1359,10 +1371,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
+ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
+ }
+
++static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
++{
++ if (!bus)
++ return;
++
++ if (locked)
++ pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
++ else
++ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
++}
++
+ /**
+ * pci_set_low_power_state - Put a PCI device into a low-power state.
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D1, D2, D3hot) to put the device into.
++ * @locked: whether pci_bus_sem is held
+ *
+ * Use the device's PCI_PM_CTRL register to put it into a low-power state.
+ *
+@@ -1373,7 +1397,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
+ * 0 if device already is in the requested state.
+ * 0 if device's power state has been successfully changed.
+ */
+-static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
++static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
+ {
+ u16 pmcsr;
+
+@@ -1426,27 +1450,13 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+
++ if (dev->bus->self)
++ pcie_aspm_pm_state_change(dev->bus->self, locked);
++
+ return 0;
+ }
+
+-/**
+- * pci_set_power_state - Set the power state of a PCI device
+- * @dev: PCI device to handle.
+- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+- *
+- * Transition a device to a new power state, using the platform firmware and/or
+- * the device's PCI PM registers.
+- *
+- * RETURN VALUE:
+- * -EINVAL if the requested state is invalid.
+- * -EIO if device does not support PCI PM or its PM capabilities register has a
+- * wrong version, or device doesn't support the requested state.
+- * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
+- * 0 if device already is in the requested state.
+- * 0 if the transition is to D3 but D3 is not supported.
+- * 0 if device's power state has been successfully changed.
+- */
+-int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
++static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
+ {
+ int error;
+
+@@ -1470,7 +1480,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ return 0;
+
+ if (state == PCI_D0)
+- return pci_set_full_power_state(dev);
++ return pci_set_full_power_state(dev, locked);
+
+ /*
+ * This device is quirked not to be put into D3, so don't put it in
+@@ -1484,16 +1494,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ * To put the device in D3cold, put it into D3hot in the native
+ * way, then put it into D3cold using platform ops.
+ */
+- error = pci_set_low_power_state(dev, PCI_D3hot);
++ error = pci_set_low_power_state(dev, PCI_D3hot, locked);
+
+ if (pci_platform_power_transition(dev, PCI_D3cold))
+ return error;
+
+ /* Powering off a bridge may power off the whole hierarchy */
+ if (dev->current_state == PCI_D3cold)
+- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
++ __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
+ } else {
+- error = pci_set_low_power_state(dev, state);
++ error = pci_set_low_power_state(dev, state, locked);
+
+ if (pci_platform_power_transition(dev, state))
+ return error;
+@@ -1501,8 +1511,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+
+ return 0;
+ }
++
++/**
++ * pci_set_power_state - Set the power state of a PCI device
++ * @dev: PCI device to handle.
++ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
++ *
++ * Transition a device to a new power state, using the platform firmware and/or
++ * the device's PCI PM registers.
++ *
++ * RETURN VALUE:
++ * -EINVAL if the requested state is invalid.
++ * -EIO if device does not support PCI PM or its PM capabilities register has a
++ * wrong version, or device doesn't support the requested state.
++ * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
++ * 0 if device already is in the requested state.
++ * 0 if the transition is to D3 but D3 is not supported.
++ * 0 if device's power state has been successfully changed.
++ */
++int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
++{
++ return __pci_set_power_state(dev, state, false);
++}
+ EXPORT_SYMBOL(pci_set_power_state);
+
++int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
++{
++ lockdep_assert_held(&pci_bus_sem);
++
++ return __pci_set_power_state(dev, state, true);
++}
++EXPORT_SYMBOL(pci_set_power_state_locked);
++
+ #define PCI_EXP_SAVE_REGS 7
+
+ static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
+@@ -2425,29 +2465,36 @@ static void pci_pme_list_scan(struct work_struct *work)
+ if (pdev->pme_poll) {
+ struct pci_dev *bridge = pdev->bus->self;
+ struct device *dev = &pdev->dev;
+- int pm_status;
++ struct device *bdev = bridge ? &bridge->dev : NULL;
++ int bref = 0;
+
+ /*
+- * If bridge is in low power state, the
+- * configuration space of subordinate devices
+- * may be not accessible
++ * If we have a bridge, it should be in an active/D0
++ * state or the configuration space of subordinate
++ * devices may not be accessible or stable over the
++ * course of the call.
+ */
+- if (bridge && bridge->current_state != PCI_D0)
+- continue;
++ if (bdev) {
++ bref = pm_runtime_get_if_active(bdev, true);
++ if (!bref)
++ continue;
++
++ if (bridge->current_state != PCI_D0)
++ goto put_bridge;
++ }
+
+ /*
+- * If the device is in a low power state it
+- * should not be polled either.
++ * The device itself should be suspended but config
++ * space must be accessible, therefore it cannot be in
++ * D3cold.
+ */
+- pm_status = pm_runtime_get_if_active(dev, true);
+- if (!pm_status)
+- continue;
+-
+- if (pdev->current_state != PCI_D3cold)
++ if (pm_runtime_suspended(dev) &&
++ pdev->current_state != PCI_D3cold)
+ pci_pme_wakeup(pdev, NULL);
+
+- if (pm_status > 0)
+- pm_runtime_put(dev);
++put_bridge:
++ if (bref > 0)
++ pm_runtime_put(bdev);
+ } else {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
+@@ -2998,6 +3045,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
+ DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
+ },
+ },
++ {
++ /*
++ * Changing power state of root port dGPU is connected fails
++ * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
++ */
++ .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_BOARD_NAME, "1972"),
++ DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
++ },
++ },
+ #endif
+ { }
+ };
+@@ -3752,14 +3811,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+ return 0;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+- cap &= PCI_REBAR_CAP_SIZES;
++ cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
+
+ /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+- bar == 0 && cap == 0x7000)
+- cap = 0x3f000;
++ bar == 0 && cap == 0x700)
++ return 0x3f00;
+
+- return cap >> 4;
++ return cap;
+ }
+ EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
+
+@@ -4944,7 +5003,7 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
+ * avoid LTSSM race as recommended in Implementation Note at the
+ * end of PCIe r6.0.1 sec 7.5.3.7.
+ */
+- rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
++ rc = pcie_wait_for_link_status(pdev, true, false);
+ if (rc)
+ return rc;
+
+@@ -4958,7 +5017,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
+ }
+
+- return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
++ rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
++
++ /*
++ * Clear LBMS after a manual retrain so that the bit can be used
++ * to track link speed or width changes made by hardware itself
++ * in attempt to correct unreliable link operation.
++ */
++ pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
++ return rc;
+ }
+
+ /**
+@@ -5064,7 +5131,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+ */
+ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
+ {
+- struct pci_dev *child;
++ struct pci_dev *child __free(pci_dev_put) = NULL;
+ int delay;
+
+ if (pci_dev_is_disconnected(dev))
+@@ -5093,8 +5160,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
+ return 0;
+ }
+
+- child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+- bus_list);
++ child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
++ struct pci_dev, bus_list));
+ up_read(&pci_bus_sem);
+
+ /*
+@@ -5659,10 +5726,12 @@ static void pci_bus_lock(struct pci_bus *bus)
+ {
+ struct pci_dev *dev;
+
++ pci_dev_lock(bus->self);
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+- pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
++ else
++ pci_dev_lock(dev);
+ }
+ }
+
+@@ -5674,8 +5743,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+- pci_dev_unlock(dev);
++ else
++ pci_dev_unlock(dev);
+ }
++ pci_dev_unlock(bus->self);
+ }
+
+ /* Return 1 on successful lock, 0 on contention */
+@@ -5683,15 +5754,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
+ {
+ struct pci_dev *dev;
+
++ if (!pci_dev_trylock(bus->self))
++ return 0;
++
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+- if (!pci_dev_trylock(dev))
+- goto unlock;
+ if (dev->subordinate) {
+- if (!pci_bus_trylock(dev->subordinate)) {
+- pci_dev_unlock(dev);
++ if (!pci_bus_trylock(dev->subordinate))
+ goto unlock;
+- }
+- }
++ } else if (!pci_dev_trylock(dev))
++ goto unlock;
+ }
+ return 1;
+
+@@ -5699,8 +5770,10 @@ static int pci_bus_trylock(struct pci_bus *bus)
+ list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+- pci_dev_unlock(dev);
++ else
++ pci_dev_unlock(dev);
+ }
++ pci_dev_unlock(bus->self);
+ return 0;
+ }
+
+@@ -5732,9 +5805,10 @@ static void pci_slot_lock(struct pci_slot *slot)
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+- pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
++ else
++ pci_dev_lock(dev);
+ }
+ }
+
+@@ -5760,14 +5834,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+- if (!pci_dev_trylock(dev))
+- goto unlock;
+ if (dev->subordinate) {
+ if (!pci_bus_trylock(dev->subordinate)) {
+ pci_dev_unlock(dev);
+ goto unlock;
+ }
+- }
++ } else if (!pci_dev_trylock(dev))
++ goto unlock;
+ }
+ return 1;
+
+@@ -5778,7 +5851,8 @@ static int pci_slot_trylock(struct pci_slot *slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+- pci_dev_unlock(dev);
++ else
++ pci_dev_unlock(dev);
+ }
+ return 0;
+ }
+@@ -5809,8 +5883,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus)
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_restore(dev);
+- if (dev->subordinate)
++ if (dev->subordinate) {
++ pci_bridge_wait_for_secondary_bus(dev, "bus reset");
+ pci_bus_restore_locked(dev->subordinate);
++ }
+ }
+ }
+
+@@ -5844,8 +5920,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_restore(dev);
+- if (dev->subordinate)
++ if (dev->subordinate) {
++ pci_bridge_wait_for_secondary_bus(dev, "slot reset");
+ pci_bus_restore_locked(dev->subordinate);
++ }
+ }
+ }
+
+@@ -6257,8 +6335,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+- next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+
+ next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
+
+@@ -6330,7 +6407,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap)
+- return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+
+ return PCIE_LNK_WIDTH_UNKNOWN;
+ }
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 39a8932dc340c6..d5e9010a135a14 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -269,7 +269,7 @@ void pci_bus_put(struct pci_bus *bus);
+
+ /* PCIe speed to Mb/s reduced by encoding overhead */
+ #define PCIE_SPEED2MBS_ENC(speed) \
+- ((speed) == PCIE_SPEED_64_0GT ? 64000*128/130 : \
++ ((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \
+ (speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \
+ (speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
+ (speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \
+@@ -363,11 +363,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
+ return 0;
+ }
+
+-static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+-{
+- return dev->error_state == pci_channel_io_perm_failure;
+-}
+-
+ /* pci_dev priv_flags */
+ #define PCI_DEV_ADDED 0
+ #define PCI_DPC_RECOVERED 1
+@@ -535,7 +530,7 @@ void pci_acs_init(struct pci_dev *dev);
+ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
+ int pci_dev_specific_enable_acs(struct pci_dev *dev);
+ int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
+-bool pcie_failed_link_retrain(struct pci_dev *dev);
++int pcie_failed_link_retrain(struct pci_dev *dev);
+ #else
+ static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
+ u16 acs_flags)
+@@ -550,9 +545,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+ {
+ return -ENOTTY;
+ }
+-static inline bool pcie_failed_link_retrain(struct pci_dev *dev)
++static inline int pcie_failed_link_retrain(struct pci_dev *dev)
+ {
+- return false;
++ return -ENOTTY;
+ }
+ #endif
+
+@@ -566,10 +561,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
+ #ifdef CONFIG_PCIEASPM
+ void pcie_aspm_init_link_state(struct pci_dev *pdev);
+ void pcie_aspm_exit_link_state(struct pci_dev *pdev);
++void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
+ void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+ #else
+ static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
+ static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
++static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
+ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+ #endif
+
+diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
+index 9c8fd69ae5ad8e..c9afe43628356c 100644
+--- a/drivers/pci/pcie/aer.c
++++ b/drivers/pci/pcie/aer.c
+@@ -29,6 +29,7 @@
+ #include <linux/kfifo.h>
+ #include <linux/slab.h>
+ #include <acpi/apei.h>
++#include <acpi/ghes.h>
+ #include <ras/ras_event.h>
+
+ #include "../pci.h"
+@@ -739,7 +740,7 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
+ u8 bus = info->id >> 8;
+ u8 devfn = info->id & 0xff;
+
+- pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
++ pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
+@@ -927,7 +928,12 @@ static bool find_source_device(struct pci_dev *parent,
+ pci_walk_bus(parent->subordinate, find_device_iter, e_info);
+
+ if (!e_info->error_dev_num) {
+- pci_info(parent, "can't find device of ID%04x\n", e_info->id);
++ u8 bus = e_info->id >> 8;
++ u8 devfn = e_info->id & 0xff;
++
++ pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n",
++ pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn),
++ PCI_FUNC(devfn));
+ return false;
+ }
+ return true;
+@@ -997,6 +1003,15 @@ static void aer_recover_work_func(struct work_struct *work)
+ continue;
+ }
+ cper_print_aer(pdev, entry.severity, entry.regs);
++ /*
++ * Memory for aer_capability_regs(entry.regs) is being allocated from the
++ * ghes_estatus_pool to protect it from overwriting when multiple sections
++ * are present in the error status. Thus free the same after processing
++ * the data.
++ */
++ ghes_estatus_pool_region_free((unsigned long)entry.regs,
++ sizeof(struct aer_capability_regs));
++
+ if (entry.severity == AER_NONFATAL)
+ pcie_do_recovery(pdev, pci_channel_io_normal,
+ aer_root_reset);
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 1bf63005926447..0aef6dc055b922 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -689,10 +689,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
+ * in pcie_config_aspm_link().
+ */
+ if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
+- pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
+- PCI_EXP_LNKCTL_ASPM_L1, 0);
+- pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+- PCI_EXP_LNKCTL_ASPM_L1, 0);
++ pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_ASPM_L1);
++ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
++ PCI_EXP_LNKCTL_ASPM_L1);
+ }
+
+ val = 0;
+@@ -1001,6 +1001,30 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ up_read(&pci_bus_sem);
+ }
+
++/*
++ * @pdev: the root port or switch downstream port
++ * @locked: whether pci_bus_sem is held
++ */
++void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
++{
++ struct pcie_link_state *link = pdev->link_state;
++
++ if (aspm_disabled || !link)
++ return;
++ /*
++ * Devices changed PM state, we should recheck if latency
++ * meets all functions' requirement
++ */
++ if (!locked)
++ down_read(&pci_bus_sem);
++ mutex_lock(&aspm_lock);
++ pcie_update_aspm_capable(link->root);
++ pcie_config_aspm_path(link);
++ mutex_unlock(&aspm_lock);
++ if (!locked)
++ up_read(&pci_bus_sem);
++}
++
+ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+ {
+ struct pcie_link_state *link = pdev->link_state;
+@@ -1059,7 +1083,8 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+ if (state & PCIE_LINK_STATE_L0S)
+ link->aspm_disable |= ASPM_STATE_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+- link->aspm_disable |= ASPM_STATE_L1;
++ /* L1 PM substates require L1 */
++ link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1_1)
+ link->aspm_disable |= ASPM_STATE_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2)
+@@ -1101,17 +1126,7 @@ int pci_disable_link_state(struct pci_dev *pdev, int state)
+ }
+ EXPORT_SYMBOL(pci_disable_link_state);
+
+-/**
+- * pci_enable_link_state - Clear and set the default device link state so that
+- * the link may be allowed to enter the specified states. Note that if the
+- * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
+- * touch the LNKCTL register. Also note that this does not enable states
+- * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+- *
+- * @pdev: PCI device
+- * @state: Mask of ASPM link states to enable
+- */
+-int pci_enable_link_state(struct pci_dev *pdev, int state)
++static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
+ {
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+
+@@ -1128,7 +1143,8 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
+ return -EPERM;
+ }
+
+- down_read(&pci_bus_sem);
++ if (!locked)
++ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ link->aspm_default = 0;
+ if (state & PCIE_LINK_STATE_L0S)
+@@ -1149,12 +1165,48 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
+ link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ mutex_unlock(&aspm_lock);
+- up_read(&pci_bus_sem);
++ if (!locked)
++ up_read(&pci_bus_sem);
+
+ return 0;
+ }
++
++/**
++ * pci_enable_link_state - Clear and set the default device link state so that
++ * the link may be allowed to enter the specified states. Note that if the
++ * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
++ * touch the LNKCTL register. Also note that this does not enable states
++ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
++ *
++ * @pdev: PCI device
++ * @state: Mask of ASPM link states to enable
++ */
++int pci_enable_link_state(struct pci_dev *pdev, int state)
++{
++ return __pci_enable_link_state(pdev, state, false);
++}
+ EXPORT_SYMBOL(pci_enable_link_state);
+
++/**
++ * pci_enable_link_state_locked - Clear and set the default device link state
++ * so that the link may be allowed to enter the specified states. Note that if
++ * the BIOS didn't grant ASPM control to the OS, this does nothing because we
++ * can't touch the LNKCTL register. Also note that this does not enable states
++ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
++ *
++ * @pdev: PCI device
++ * @state: Mask of ASPM link states to enable
++ *
++ * Context: Caller holds pci_bus_sem read lock.
++ */
++int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
++{
++ lockdep_assert_held_read(&pci_bus_sem);
++
++ return __pci_enable_link_state(pdev, state, true);
++}
++EXPORT_SYMBOL(pci_enable_link_state_locked);
++
+ static int pcie_aspm_set_policy(const char *val,
+ const struct kernel_param *kp)
+ {
+@@ -1247,6 +1299,8 @@ static ssize_t aspm_attr_store_common(struct device *dev,
+ link->aspm_disable &= ~ASPM_STATE_L1;
+ } else {
+ link->aspm_disable |= state;
++ if (state & ASPM_STATE_L1)
++ link->aspm_disable |= ASPM_STATE_L1SS;
+ }
+
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
+index 3ceed8e3de4167..a5cec2a4e057d1 100644
+--- a/drivers/pci/pcie/dpc.c
++++ b/drivers/pci/pcie/dpc.c
+@@ -9,6 +9,7 @@
+ #define dev_fmt(fmt) "DPC: " fmt
+
+ #include <linux/aer.h>
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+@@ -202,7 +203,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
+
+ /* Get First Error Pointer */
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
+- first_error = (dpc_status & 0x1f00) >> 8;
++ first_error = FIELD_GET(PCI_EXP_DPC_RP_PIO_FEP, dpc_status);
+
+ for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
+ if ((status & ~mask) & (1 << i))
+@@ -230,7 +231,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
+
+ for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
+ pci_read_config_dword(pdev,
+- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
++ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
+ pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
+ }
+ clear_status:
+@@ -338,7 +339,7 @@ void pci_dpc_init(struct pci_dev *pdev)
+ /* Quirks may set dpc_rp_log_size if device or firmware is buggy */
+ if (!pdev->dpc_rp_log_size) {
+ pdev->dpc_rp_log_size =
+- (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
++ FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
+ if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ pci_err(pdev, "RP PIO log size %u is invalid\n",
+ pdev->dpc_rp_log_size);
+diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
+index 5f4914d313a174..e86298dbbcff60 100644
+--- a/drivers/pci/pcie/edr.c
++++ b/drivers/pci/pcie/edr.c
+@@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
+ int status = 0;
+
+ /*
+- * Behavior when calling unsupported _DSM functions is undefined,
+- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
++ * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
++ * optional. Return success if it's not implemented.
+ */
+- if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
++ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
+ 1ULL << EDR_PORT_DPC_ENABLE_DSM))
+ return 0;
+
+@@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
+ argv4.package.count = 1;
+ argv4.package.elements = &req;
+
+- /*
+- * Per Downstream Port Containment Related Enhancements ECN to PCI
+- * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+- * optional. Return success if it's not implemented.
+- */
+- obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
++ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
+ EDR_PORT_DPC_ENABLE_DSM, &argv4);
+ if (!obj)
+ return 0;
+@@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
+ u16 port;
+
+ /*
+- * Behavior when calling unsupported _DSM functions is undefined,
+- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
++ * If EDR_PORT_LOCATE_DSM is not implemented under the target of
++ * EDR, the target is the port that experienced the containment
++ * event (PCI Firmware r3.3, sec 4.6.13).
+ */
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ 1ULL << EDR_PORT_LOCATE_DSM))
+@@ -103,6 +99,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
+ return NULL;
+ }
+
++ /*
++ * Bit 31 represents the success/failure of the operation. If bit
++ * 31 is set, the operation failed.
++ */
++ if (obj->integer.value & BIT(31)) {
++ ACPI_FREE(obj);
++ pci_err(pdev, "Locate Port _DSM failed\n");
++ return NULL;
++ }
++
+ /*
+ * Firmware returns DPC port BDF details in following format:
+ * 15:8 = bus
+diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
+index 59c90d04a609af..705893b5f7b09b 100644
+--- a/drivers/pci/pcie/err.c
++++ b/drivers/pci/pcie/err.c
+@@ -13,6 +13,7 @@
+ #define dev_fmt(fmt) "AER: " fmt
+
+ #include <linux/pci.h>
++#include <linux/pm_runtime.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+@@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev,
+ return 0;
+ }
+
++static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data)
++{
++ pm_runtime_get_sync(&pdev->dev);
++ return 0;
++}
++
++static int pci_pm_runtime_put(struct pci_dev *pdev, void *data)
++{
++ pm_runtime_put(&pdev->dev);
++ return 0;
++}
++
+ static int report_frozen_detected(struct pci_dev *dev, void *data)
+ {
+ return report_error_detected(dev, pci_channel_io_frozen, data);
+@@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ else
+ bridge = pci_upstream_bridge(dev);
+
++ pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
++
+ pci_dbg(bridge, "broadcast error_detected message\n");
+ if (state == pci_channel_io_frozen) {
+ pci_walk_bridge(bridge, report_frozen_detected, &status);
+@@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ pcie_clear_device_status(dev);
+ pci_aer_clear_nonfatal_status(dev);
+ }
++
++ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
++
+ pci_info(bridge, "device recovery successful\n");
+ return status;
+
+ failed:
++ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
++
+ pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
+
+ /* TODO: Should kernel panic here? */
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 795534589b9850..43159965e09e93 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1652,15 +1652,15 @@ static void pci_set_removable(struct pci_dev *dev)
+ static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
+ {
+ #ifdef CONFIG_PCI_QUIRKS
+- int pos;
++ int pos, ret;
+ u32 header, tmp;
+
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
+
+ for (pos = PCI_CFG_SPACE_SIZE;
+ pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
+- if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
+- || header != tmp)
++ ret = pci_read_config_dword(dev, pos, &tmp);
++ if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
+ return false;
+ }
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index eeec1d6f90238e..54061b65a2b721 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -66,7 +66,7 @@
+ * apply this erratum workaround to any downstream ports as long as they
+ * support Link Active reporting and have the Link Control 2 register.
+ * Restrict the speed to 2.5GT/s then with the Target Link Speed field,
+- * request a retrain and wait 200ms for the data link to go up.
++ * request a retrain and check the result.
+ *
+ * If this turns out successful and we know by the Vendor:Device ID it is
+ * safe to do so, then lift the restriction, letting the devices negotiate
+@@ -74,33 +74,45 @@
+ * firmware may have already arranged and lift it with ports that already
+ * report their data link being up.
+ *
+- * Return TRUE if the link has been successfully retrained, otherwise FALSE.
++ * Otherwise revert the speed to the original setting and request a retrain
++ * again to remove any residual state, ignoring the result as it's supposed
++ * to fail anyway.
++ *
++ * Return 0 if the link has been successfully retrained. Return an error
++ * if retraining was not needed or we attempted a retrain and it failed.
+ */
+-bool pcie_failed_link_retrain(struct pci_dev *dev)
++int pcie_failed_link_retrain(struct pci_dev *dev)
+ {
+ static const struct pci_device_id ids[] = {
+ { PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */
+ {}
+ };
+ u16 lnksta, lnkctl2;
++ int ret = -ENOTTY;
+
+ if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) ||
+ !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
+- return false;
++ return ret;
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+ if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) ==
+ PCI_EXP_LNKSTA_LBMS) {
++ u16 oldlnkctl2 = lnkctl2;
++
+ pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
+
+ lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
+ lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
+
+- if (pcie_retrain_link(dev, false)) {
++ ret = pcie_retrain_link(dev, false);
++ if (ret) {
+ pci_info(dev, "retraining failed\n");
+- return false;
++ pcie_capability_write_word(dev, PCI_EXP_LNKCTL2,
++ oldlnkctl2);
++ pcie_retrain_link(dev, true);
++ return ret;
+ }
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+@@ -117,13 +129,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev)
+ lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS;
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
+
+- if (pcie_retrain_link(dev, false)) {
++ ret = pcie_retrain_link(dev, false);
++ if (ret) {
+ pci_info(dev, "retraining failed\n");
+- return false;
++ return ret;
+ }
+ }
+
+- return true;
++ return ret;
+ }
+
+ static ktime_t fixup_debug_start(struct pci_dev *dev,
+@@ -690,7 +703,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
+ /*
+ * In the AMD NL platform, this device ([1022:7912]) has a class code of
+ * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
+- * claim it.
++ * claim it. The same applies on the VanGogh platform device ([1022:163a]).
+ *
+ * But the dwc3 driver is a more specific driver for this device, and we'd
+ * prefer to use it instead of xhci. To prevent xhci from claiming the
+@@ -698,17 +711,22 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
+ * defines as "USB device (not host controller)". The dwc3 driver can then
+ * claim it based on its Vendor and Device ID.
+ */
+-static void quirk_amd_nl_class(struct pci_dev *pdev)
++static void quirk_amd_dwc_class(struct pci_dev *pdev)
+ {
+ u32 class = pdev->class;
+
+- /* Use "USB Device (not host controller)" class */
+- pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
+- pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+- class, pdev->class);
++ if (class != PCI_CLASS_SERIAL_USB_DEVICE) {
++ /* Use "USB Device (not host controller)" class */
++ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
++ pci_info(pdev,
++ "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
++ class, pdev->class);
++ }
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
+- quirk_amd_nl_class);
++ quirk_amd_dwc_class);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
++ quirk_amd_dwc_class);
+
+ /*
+ * Synopsys USB 3.x host HAPS platform has a class code of
+@@ -3596,6 +3614,8 @@ DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+ quirk_broken_intx_masking);
+ DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */
+ quirk_broken_intx_masking);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2,
++ quirk_broken_intx_masking);
+
+ /*
+ * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
+@@ -3784,6 +3804,19 @@ static void quirk_no_pm_reset(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
+
++/*
++ * Spectrum-{1,2,3,4} devices report that a D3hot->D0 transition causes a reset
++ * (i.e., they advertise NoSoftRst-). However, this transition does not have
++ * any effect on the device: It continues to be operational and network ports
++ * remain up. Advertising this support makes it seem as if a PM reset is viable
++ * for these devices. Mark it as unavailable to skip it when testing reset
++ * methods.
++ */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcb84, quirk_no_pm_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf6c, quirk_no_pm_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf70, quirk_no_pm_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf80, quirk_no_pm_reset);
++
+ /*
+ * Thunderbolt controllers with broken MSI hotplug signaling:
+ * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
+@@ -4221,6 +4254,10 @@ static void quirk_dma_func0_alias(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+
++/* Some Glenfly chips use function 0 as the PCIe Requester ID for DMA */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d40, quirk_dma_func0_alias);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d41, quirk_dma_func0_alias);
++
+ static void quirk_dma_func1_alias(struct pci_dev *dev)
+ {
+ if (PCI_FUNC(dev->devfn) != 1)
+@@ -4553,9 +4590,9 @@ static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
+
+ pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
+ dev_name(&pdev->dev));
+- pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
+- PCI_EXP_DEVCTL_RELAX_EN |
+- PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
++ pcie_capability_clear_word(root_port, PCI_EXP_DEVCTL,
++ PCI_EXP_DEVCTL_RELAX_EN |
++ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ }
+
+ /*
+@@ -4697,17 +4734,21 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
+ * But the implementation could block peer-to-peer transactions between them
+ * and provide ACS-like functionality.
+ */
+-static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
++static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+ {
+ if (!pci_is_pcie(dev) ||
+ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ return -ENOTTY;
+
++ /*
++ * Future Zhaoxin Root Ports and Switch Downstream Ports will
++ * implement ACS capability in accordance with the PCIe Spec.
++ */
+ switch (dev->device) {
+ case 0x0710 ... 0x071e:
+ case 0x0721:
+- case 0x0723 ... 0x0732:
++ case 0x0723 ... 0x0752:
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+@@ -5041,6 +5082,8 @@ static const struct pci_dev_acs_enabled {
+ /* QCOM QDF2xxx root ports */
+ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
+ { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
++ /* QCOM SA8775P root port */
++ { PCI_VENDOR_ID_QCOM, 0x0115, pci_quirk_qcom_rp_acs },
+ /* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */
+ { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
+ /* Intel PCH root ports */
+@@ -5383,7 +5426,7 @@ int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+ */
+ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ {
+- int pos, i = 0;
++ int pos, i = 0, ret;
+ u8 next_cap;
+ u16 reg16, *cap;
+ struct pci_cap_saved_state *state;
+@@ -5429,8 +5472,8 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+
+ pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+- if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+- PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
++ ret = pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status);
++ if ((ret != PCIBIOS_SUCCESSFUL) || (PCI_POSSIBLE_ERROR(status)))
+ pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+
+ if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+@@ -5498,6 +5541,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
+
+ pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
+ }
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
+@@ -5507,6 +5551,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+
+ #ifdef CONFIG_PCI_ATS
++static void quirk_no_ats(struct pci_dev *pdev)
++{
++ pci_info(pdev, "disabling ATS\n");
++ pdev->ats_cap = 0;
++}
++
+ /*
+ * Some devices require additional driver setup to enable ATS. Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+@@ -5520,14 +5570,10 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ (pdev->subsystem_device == 0xce19 ||
+ pdev->subsystem_device == 0xcc10 ||
+ pdev->subsystem_device == 0xcc08))
+- goto no_ats;
+- else
+- return;
++ quirk_no_ats(pdev);
++ } else {
++ quirk_no_ats(pdev);
+ }
+-
+-no_ats:
+- pci_info(pdev, "disabling ATS\n");
+- pdev->ats_cap = 0;
+ }
+
+ /* AMD Stoney platform GPU */
+@@ -5550,6 +5596,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
+ /* AMD Raven platform iGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
++
++/*
++ * Intel IPU E2000 revisions before C0 implement incorrect endianness
++ * in ATS Invalidate Request message body. Disable ATS for those devices.
++ */
++static void quirk_intel_e2000_no_ats(struct pci_dev *pdev)
++{
++ if (pdev->revision < 0x20)
++ quirk_no_ats(pdev);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1451, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1452, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1453, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1454, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1455, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1457, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1459, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145a, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145c, quirk_intel_e2000_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+
+ /* Freescale PCIe doesn't support MSI in RC mode */
+@@ -6154,7 +6219,7 @@ static void dpc_log_size(struct pci_dev *dev)
+ if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
+ return;
+
+- if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) {
++ if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
+ pci_info(dev, "Overriding RP PIO Log Size to 4\n");
+ dev->dpc_rp_log_size = 4;
+ }
+@@ -6175,6 +6240,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
+ #endif
+
+ /*
+@@ -6188,3 +6255,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node);
++
++/*
++ * Devices known to require a longer delay before first config space access
++ * after reset recovery or resume from D3cold:
++ *
++ * VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
++ */
++static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
++{
++ pdev->d3cold_delay = 1000;
++}
++DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+diff --git a/drivers/pci/search.c b/drivers/pci/search.c
+index b4c138a6ec025d..53840634fbfc2b 100644
+--- a/drivers/pci/search.c
++++ b/drivers/pci/search.c
+@@ -363,6 +363,37 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
+ }
+ EXPORT_SYMBOL(pci_get_class);
+
++/**
++ * pci_get_base_class - searching for a PCI device by matching against the base class code only
++ * @class: search for a PCI device with this base class code
++ * @from: Previous PCI device found in search, or %NULL for new search.
++ *
++ * Iterates through the list of known PCI devices. If a PCI device is found
++ * with a matching base class code, the reference count to the device is
++ * incremented. See pci_match_one_device() to figure out how does this works.
++ * A new search is initiated by passing %NULL as the @from argument.
++ * Otherwise if @from is not %NULL, searches continue from next device on the
++ * global list. The reference count for @from is always decremented if it is
++ * not %NULL.
++ *
++ * Returns:
++ * A pointer to a matched PCI device, %NULL Otherwise.
++ */
++struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from)
++{
++ struct pci_device_id id = {
++ .vendor = PCI_ANY_ID,
++ .device = PCI_ANY_ID,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .class_mask = 0xFF0000,
++ .class = class << 16,
++ };
++
++ return pci_get_dev_by_id(&id, from);
++}
++EXPORT_SYMBOL(pci_get_base_class);
++
+ /**
+ * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not.
+ * @ids: A pointer to a null terminated list of struct pci_device_id structures
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index dae490f2564177..5a143ad5fca249 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -820,11 +820,9 @@ static resource_size_t calculate_memsize(resource_size_t size,
+ size = min_size;
+ if (old_size == 1)
+ old_size = 0;
+- if (size < old_size)
+- size = old_size;
+
+- size = ALIGN(max(size, add_size) + children_add_size, align);
+- return size;
++ size = max(size, add_size) + children_add_size;
++ return ALIGN(max(size, old_size), align);
+ }
+
+ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 5b921387eca61a..5a4adf6c04cf89 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -1308,13 +1308,6 @@ static void stdev_release(struct device *dev)
+ {
+ struct switchtec_dev *stdev = to_stdev(dev);
+
+- if (stdev->dma_mrpc) {
+- iowrite32(0, &stdev->mmio_mrpc->dma_en);
+- flush_wc_buf(stdev);
+- writeq(0, &stdev->mmio_mrpc->dma_addr);
+- dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
+- stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
+- }
+ kfree(stdev);
+ }
+
+@@ -1358,7 +1351,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
+ return ERR_PTR(-ENOMEM);
+
+ stdev->alive = true;
+- stdev->pdev = pdev;
++ stdev->pdev = pci_dev_get(pdev);
+ INIT_LIST_HEAD(&stdev->mrpc_queue);
+ mutex_init(&stdev->mrpc_mutex);
+ stdev->mrpc_busy = 0;
+@@ -1391,6 +1384,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
+ return stdev;
+
+ err_put:
++ pci_dev_put(stdev->pdev);
+ put_device(&stdev->dev);
+ return ERR_PTR(rc);
+ }
+@@ -1644,6 +1638,18 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
+ return 0;
+ }
+
++static void switchtec_exit_pci(struct switchtec_dev *stdev)
++{
++ if (stdev->dma_mrpc) {
++ iowrite32(0, &stdev->mmio_mrpc->dma_en);
++ flush_wc_buf(stdev);
++ writeq(0, &stdev->mmio_mrpc->dma_addr);
++ dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
++ stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
++ stdev->dma_mrpc = NULL;
++ }
++}
++
+ static int switchtec_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
+@@ -1666,7 +1672,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
+ rc = switchtec_init_isr(stdev);
+ if (rc) {
+ dev_err(&stdev->dev, "failed to init isr.\n");
+- goto err_put;
++ goto err_exit_pci;
+ }
+
+ iowrite32(SWITCHTEC_EVENT_CLEAR |
+@@ -1687,6 +1693,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
+
+ err_devadd:
+ stdev_kill(stdev);
++err_exit_pci:
++ switchtec_exit_pci(stdev);
+ err_put:
+ ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ put_device(&stdev->dev);
+@@ -1703,6 +1711,9 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
+ ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ dev_info(&stdev->dev, "unregistered.\n");
+ stdev_kill(stdev);
++ switchtec_exit_pci(stdev);
++ pci_dev_put(stdev->pdev);
++ stdev->pdev = NULL;
+ put_device(&stdev->dev);
+ }
+
+diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
+index 5658745c398f57..b33be1e63c98fb 100644
+--- a/drivers/pcmcia/cs.c
++++ b/drivers/pcmcia/cs.c
+@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
+ dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
+ skt->thread = NULL;
+ complete(&skt->thread_done);
++ put_device(&skt->dev);
+ return 0;
+ }
+ ret = pccard_sysfs_add_socket(&skt->dev);
+diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
+index d500e5dbbc3f5e..b4b8363d1de21f 100644
+--- a/drivers/pcmcia/ds.c
++++ b/drivers/pcmcia/ds.c
+@@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+ /* by default don't allow DMA */
+ p_dev->dma_mask = 0;
+ p_dev->dev.dma_mask = &p_dev->dma_mask;
+- dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
+- if (!dev_name(&p_dev->dev))
+- goto err_free;
+ p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
+ if (!p_dev->devname)
+ goto err_free;
+@@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
+
+ pcmcia_device_query(p_dev);
+
+- if (device_register(&p_dev->dev))
+- goto err_unreg;
++ dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
++ if (device_register(&p_dev->dev)) {
++ mutex_lock(&s->ops_mutex);
++ list_del(&p_dev->socket_device_list);
++ s->device_count--;
++ mutex_unlock(&s->ops_mutex);
++ put_device(&p_dev->dev);
++ return NULL;
++ }
+
+ return p_dev;
+
+diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
+index 1365eaa20ff49a..ff169124929cc2 100644
+--- a/drivers/pcmcia/yenta_socket.c
++++ b/drivers/pcmcia/yenta_socket.c
+@@ -638,11 +638,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
+ start = PCIBIOS_MIN_CARDBUS_IO;
+ end = ~0U;
+ } else {
+- unsigned long avail = root->end - root->start;
++ unsigned long avail = resource_size(root);
+ int i;
+ size = BRIDGE_MEM_MAX;
+- if (size > avail/8) {
+- size = (avail+1)/8;
++ if (size > (avail - 1) / 8) {
++ size = avail / 8;
+ /* round size down to next power of 2 */
+ i = 0;
+ while ((size /= 2) != 0)
+diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
+index 19d459a36be55c..818ce4424d34dc 100644
+--- a/drivers/perf/alibaba_uncore_drw_pmu.c
++++ b/drivers/perf/alibaba_uncore_drw_pmu.c
+@@ -408,7 +408,7 @@ static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data)
+ }
+
+ /* clear common counter intr status */
+- clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1);
++ clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
+ writel(clr_status,
+ drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
+ }
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 6b50bc55198462..0b3ce77136456a 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -24,14 +24,6 @@
+ #define CMN_NI_NODE_ID GENMASK_ULL(31, 16)
+ #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
+
+-#define CMN_NODEID_DEVID(reg) ((reg) & 3)
+-#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1)
+-#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1)
+-#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3)
+-#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7)
+-#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits)))
+-#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
+-
+ #define CMN_CHILD_INFO 0x0080
+ #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
+ #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
+@@ -43,6 +35,9 @@
+ #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
+ #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
+
++/* Currently XPs are the node type we can have most of; others top out at 128 */
++#define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS
++
+ /* The CFG node has various info besides the discovery tree */
+ #define CMN_CFGM_PERIPH_ID_01 0x0008
+ #define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0)
+@@ -78,7 +73,8 @@
+ /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
+ #define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
+
+-/* HN-Ps are weird... */
++/* Some types are designed to coexist with another device in the same node */
++#define CMN_CCLA_PMU_EVENT_SEL 0x008
+ #define CMN_HNP_PMU_EVENT_SEL 0x008
+
+ /* DTMs live in the PMU space of XP registers */
+@@ -112,7 +108,9 @@
+
+ #define CMN_DTM_PMEVCNTSR 0x240
+
+-#define CMN_DTM_UNIT_INFO 0x0910
++#define CMN650_DTM_UNIT_INFO 0x0910
++#define CMN_DTM_UNIT_INFO 0x0960
++#define CMN_DTM_UNIT_INFO_DTC_DOMAIN GENMASK_ULL(1, 0)
+
+ #define CMN_DTM_NUM_COUNTERS 4
+ /* Want more local counters? Why not replicate the whole DTM! Ugh... */
+@@ -279,16 +277,16 @@ struct arm_cmn_node {
+ u16 id, logid;
+ enum cmn_node_type type;
+
+- int dtm;
+- union {
+- /* DN/HN-F/CXHA */
+- struct {
+- u8 val : 4;
+- u8 count : 4;
+- } occupid[SEL_MAX];
+- /* XP */
+- u8 dtc;
+- };
++ /* XP properties really, but replicated to children for convenience */
++ u8 dtm;
++ s8 dtc;
++ u8 portid_bits:4;
++ u8 deviceid_bits:4;
++ /* DN/HN-F/CXHA */
++ struct {
++ u8 val : 4;
++ u8 count : 4;
++ } occupid[SEL_MAX];
+ union {
+ u8 event[4];
+ __le32 event_sel;
+@@ -359,49 +357,33 @@ struct arm_cmn {
+ static int arm_cmn_hp_state;
+
+ struct arm_cmn_nodeid {
+- u8 x;
+- u8 y;
+ u8 port;
+ u8 dev;
+ };
+
+ static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
+ {
+- return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
++ return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1));
+ }
+
+-static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
++static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn)
+ {
+ struct arm_cmn_nodeid nid;
+
+- if (cmn->num_xps == 1) {
+- nid.x = 0;
+- nid.y = 0;
+- nid.port = CMN_NODEID_1x1_PID(id);
+- nid.dev = CMN_NODEID_DEVID(id);
+- } else {
+- int bits = arm_cmn_xyidbits(cmn);
+-
+- nid.x = CMN_NODEID_X(id, bits);
+- nid.y = CMN_NODEID_Y(id, bits);
+- if (cmn->ports_used & 0xc) {
+- nid.port = CMN_NODEID_EXT_PID(id);
+- nid.dev = CMN_NODEID_EXT_DEVID(id);
+- } else {
+- nid.port = CMN_NODEID_PID(id);
+- nid.dev = CMN_NODEID_DEVID(id);
+- }
+- }
++ nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1);
++ nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1);
+ return nid;
+ }
+
+ static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
+ const struct arm_cmn_node *dn)
+ {
+- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+- int xp_idx = cmn->mesh_x * nid.y + nid.x;
++ int id = dn->id >> (dn->portid_bits + dn->deviceid_bits);
++ int bits = arm_cmn_xyidbits(cmn);
++ int x = id >> bits;
++ int y = id & ((1U << bits) - 1);
+
+- return cmn->xps + xp_idx;
++ return cmn->xps + cmn->mesh_x * y + x;
+ }
+ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
+ enum cmn_node_type type)
+@@ -487,13 +469,14 @@ static const char *arm_cmn_device_type(u8 type)
+ }
+ }
+
+-static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
++static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d)
+ {
+ struct arm_cmn *cmn = s->private;
+ struct arm_cmn_node *dn;
++ u16 id = xp->id | d | (p << xp->deviceid_bits);
+
+ for (dn = cmn->dns; dn->type; dn++) {
+- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
++ int pad = dn->logid < 10;
+
+ if (dn->type == CMN_TYPE_XP)
+ continue;
+@@ -501,10 +484,10 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
+ if (dn->type < CMN_TYPE_HNI)
+ continue;
+
+- if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
++ if (dn->id != id)
+ continue;
+
+- seq_printf(s, " #%-2d |", dn->logid);
++ seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid);
+ return;
+ }
+ seq_puts(s, " |");
+@@ -517,33 +500,32 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
+
+ seq_puts(s, " X");
+ for (x = 0; x < cmn->mesh_x; x++)
+- seq_printf(s, " %d ", x);
++ seq_printf(s, " %-2d ", x);
+ seq_puts(s, "\nY P D+");
+ y = cmn->mesh_y;
+ while (y--) {
+ int xp_base = cmn->mesh_x * y;
++ struct arm_cmn_node *xp = cmn->xps + xp_base;
+ u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
+
+ for (x = 0; x < cmn->mesh_x; x++)
+ seq_puts(s, "--------+");
+
+- seq_printf(s, "\n%d |", y);
++ seq_printf(s, "\n%-2d |", y);
+ for (x = 0; x < cmn->mesh_x; x++) {
+- struct arm_cmn_node *xp = cmn->xps + xp_base + x;
+-
+ for (p = 0; p < CMN_MAX_PORTS; p++)
+- port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
+- seq_printf(s, " XP #%-2d |", xp_base + x);
++ port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p);
++ seq_printf(s, " XP #%-3d|", xp_base + x);
+ }
+
+ seq_puts(s, "\n |");
+ for (x = 0; x < cmn->mesh_x; x++) {
+- u8 dtc = cmn->xps[xp_base + x].dtc;
++ s8 dtc = xp[x].dtc;
+
+- if (dtc & (dtc - 1))
++ if (dtc < 0)
+ seq_puts(s, " DTC ?? |");
+ else
+- seq_printf(s, " DTC %ld |", __ffs(dtc));
++ seq_printf(s, " DTC %d |", dtc);
+ }
+ seq_puts(s, "\n |");
+ for (x = 0; x < cmn->mesh_x; x++)
+@@ -555,10 +537,10 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
+ seq_puts(s, arm_cmn_device_type(port[p][x]));
+ seq_puts(s, "\n 0|");
+ for (x = 0; x < cmn->mesh_x; x++)
+- arm_cmn_show_logid(s, x, y, p, 0);
++ arm_cmn_show_logid(s, xp + x, p, 0);
+ seq_puts(s, "\n 1|");
+ for (x = 0; x < cmn->mesh_x; x++)
+- arm_cmn_show_logid(s, x, y, p, 1);
++ arm_cmn_show_logid(s, xp + x, p, 1);
+ }
+ seq_puts(s, "\n-----+");
+ }
+@@ -586,9 +568,8 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
+
+ struct arm_cmn_hw_event {
+ struct arm_cmn_node *dn;
+- u64 dtm_idx[4];
+- unsigned int dtc_idx;
+- u8 dtcs_used;
++ u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)];
++ s8 dtc_idx[CMN_MAX_DTCS];
+ u8 num_dns;
+ u8 dtm_offset;
+ bool wide_sel;
+@@ -598,6 +579,10 @@ struct arm_cmn_hw_event {
+ #define for_each_hw_dn(hw, dn, i) \
+ for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
+
++/* @i is the DTC number, @idx is the counter index on that DTC */
++#define for_each_hw_dtc_idx(hw, i, idx) \
++ for (int i = 0, idx; i < CMN_MAX_DTCS; i++) if ((idx = hw->dtc_idx[i]) >= 0)
++
+ static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
+ {
+ BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
+@@ -809,7 +794,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ #define CMN_EVENT_HNF_OCC(_model, _name, _event) \
+ CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event)
+ #define CMN_EVENT_HNF_CLS(_model, _name, _event) \
+- CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNS, _event)
++ CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNF, _event)
+ #define CMN_EVENT_HNF_SNT(_model, _name, _event) \
+ CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event)
+
+@@ -1427,12 +1412,11 @@ static void arm_cmn_init_counter(struct perf_event *event)
+ {
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+- unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx);
+ u64 count;
+
+- for (i = 0; hw->dtcs_used & (1U << i); i++) {
+- writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt);
+- cmn->dtc[i].counters[hw->dtc_idx] = event;
++ for_each_hw_dtc_idx(hw, i, idx) {
++ writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx));
++ cmn->dtc[i].counters[idx] = event;
+ }
+
+ count = arm_cmn_read_dtm(cmn, hw, false);
+@@ -1445,11 +1429,9 @@ static void arm_cmn_event_read(struct perf_event *event)
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ u64 delta, new, prev;
+ unsigned long flags;
+- unsigned int i;
+
+- if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) {
+- i = __ffs(hw->dtcs_used);
+- delta = arm_cmn_read_cc(cmn->dtc + i);
++ if (CMN_EVENT_TYPE(event) == CMN_TYPE_DTC) {
++ delta = arm_cmn_read_cc(cmn->dtc + hw->dtc_idx[0]);
+ local64_add(delta, &event->count);
+ return;
+ }
+@@ -1459,8 +1441,8 @@ static void arm_cmn_event_read(struct perf_event *event)
+ delta = new - prev;
+
+ local_irq_save(flags);
+- for (i = 0; hw->dtcs_used & (1U << i); i++) {
+- new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx);
++ for_each_hw_dtc_idx(hw, i, idx) {
++ new = arm_cmn_read_counter(cmn->dtc + i, idx);
+ delta += new << 16;
+ }
+ local_irq_restore(flags);
+@@ -1516,7 +1498,7 @@ static void arm_cmn_event_start(struct perf_event *event, int flags)
+ int i;
+
+ if (type == CMN_TYPE_DTC) {
+- i = __ffs(hw->dtcs_used);
++ i = hw->dtc_idx[0];
+ writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
+ cmn->dtc[i].cc_active = true;
+ } else if (type == CMN_TYPE_WP) {
+@@ -1547,7 +1529,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
+ int i;
+
+ if (type == CMN_TYPE_DTC) {
+- i = __ffs(hw->dtcs_used);
++ i = hw->dtc_idx[0];
+ cmn->dtc[i].cc_active = false;
+ } else if (type == CMN_TYPE_WP) {
+ int wp_idx = arm_cmn_wp_idx(event);
+@@ -1733,29 +1715,27 @@ static int arm_cmn_event_init(struct perf_event *event)
+ hw->dn = arm_cmn_node(cmn, type);
+ if (!hw->dn)
+ return -EINVAL;
++
++ memset(hw->dtc_idx, -1, sizeof(hw->dtc_idx));
+ for (dn = hw->dn; dn->type == type; dn++) {
+ if (bynodeid && dn->id != nodeid) {
+ hw->dn++;
+ continue;
+ }
+ hw->num_dns++;
++ if (dn->dtc < 0)
++ memset(hw->dtc_idx, 0, cmn->num_dtcs);
++ else
++ hw->dtc_idx[dn->dtc] = 0;
++
+ if (bynodeid)
+ break;
+ }
+
+ if (!hw->num_dns) {
+- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid);
+-
+- dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
+- nodeid, nid.x, nid.y, nid.port, nid.dev, type);
++ dev_dbg(cmn->dev, "invalid node 0x%x type 0x%x\n", nodeid, type);
+ return -EINVAL;
+ }
+- /*
+- * Keep assuming non-cycles events count in all DTC domains; turns out
+- * it's hard to make a worthwhile optimisation around this, short of
+- * going all-in with domain-local counter allocation as well.
+- */
+- hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
+
+ return arm_cmn_validate_group(cmn, event);
+ }
+@@ -1781,28 +1761,25 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
+ }
+ memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
+
+- for (i = 0; hw->dtcs_used & (1U << i); i++)
+- cmn->dtc[i].counters[hw->dtc_idx] = NULL;
++ for_each_hw_dtc_idx(hw, j, idx)
++ cmn->dtc[j].counters[idx] = NULL;
+ }
+
+ static int arm_cmn_event_add(struct perf_event *event, int flags)
+ {
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+- struct arm_cmn_dtc *dtc = &cmn->dtc[0];
+ struct arm_cmn_node *dn;
+ enum cmn_node_type type = CMN_EVENT_TYPE(event);
+- unsigned int i, dtc_idx, input_sel;
++ unsigned int input_sel, i = 0;
+
+ if (type == CMN_TYPE_DTC) {
+- i = 0;
+ while (cmn->dtc[i].cycles)
+ if (++i == cmn->num_dtcs)
+ return -ENOSPC;
+
+ cmn->dtc[i].cycles = event;
+- hw->dtc_idx = CMN_DT_NUM_COUNTERS;
+- hw->dtcs_used = 1U << i;
++ hw->dtc_idx[0] = i;
+
+ if (flags & PERF_EF_START)
+ arm_cmn_event_start(event, 0);
+@@ -1810,17 +1787,22 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
+ }
+
+ /* Grab a free global counter first... */
+- dtc_idx = 0;
+- while (dtc->counters[dtc_idx])
+- if (++dtc_idx == CMN_DT_NUM_COUNTERS)
+- return -ENOSPC;
+-
+- hw->dtc_idx = dtc_idx;
++ for_each_hw_dtc_idx(hw, j, idx) {
++ if (j > 0) {
++ idx = hw->dtc_idx[0];
++ } else {
++ idx = 0;
++ while (cmn->dtc[j].counters[idx])
++ if (++idx == CMN_DT_NUM_COUNTERS)
++ return -ENOSPC;
++ }
++ hw->dtc_idx[j] = idx;
++ }
+
+ /* ...then the local counters to feed it. */
+ for_each_hw_dn(hw, dn, i) {
+ struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
+- unsigned int dtm_idx, shift;
++ unsigned int dtm_idx, shift, d = 0;
+ u64 reg;
+
+ dtm_idx = 0;
+@@ -1839,14 +1821,14 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
+
+ tmp = dtm->wp_event[wp_idx ^ 1];
+ if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
+- CMN_EVENT_WP_COMBINE(dtc->counters[tmp]))
++ CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp]))
+ goto free_dtms;
+
+ input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
+- dtm->wp_event[wp_idx] = dtc_idx;
++ dtm->wp_event[wp_idx] = hw->dtc_idx[d];
+ writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
+ } else {
+- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
++ struct arm_cmn_nodeid nid = arm_cmn_nid(dn);
+
+ if (cmn->multi_dtm)
+ nid.port %= 2;
+@@ -1863,7 +1845,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
+ dtm->input_sel[dtm_idx] = input_sel;
+ shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
+ dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
+- dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift;
++ dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, hw->dtc_idx[d]) << shift;
+ dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
+ reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low;
+ writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG);
+@@ -1891,7 +1873,7 @@ static void arm_cmn_event_del(struct perf_event *event, int flags)
+ arm_cmn_event_stop(event, PERF_EF_UPDATE);
+
+ if (type == CMN_TYPE_DTC)
+- cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL;
++ cmn->dtc[hw->dtc_idx[0]].cycles = NULL;
+ else
+ arm_cmn_event_clear(cmn, event, hw->num_dns);
+ }
+@@ -2072,7 +2054,6 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ {
+ struct arm_cmn_node *dn, *xp;
+ int dtc_idx = 0;
+- u8 dtcs_present = (1 << cmn->num_dtcs) - 1;
+
+ cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
+ if (!cmn->dtc)
+@@ -2082,23 +2063,28 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+
+ cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
+
++ if (cmn->part == PART_CMN600 && cmn->num_dtcs > 1) {
++ /* We do at least know that a DTC's XP must be in that DTC's domain */
++ dn = arm_cmn_node(cmn, CMN_TYPE_DTC);
++ for (int i = 0; i < cmn->num_dtcs; i++)
++ arm_cmn_node_to_xp(cmn, dn + i)->dtc = i;
++ }
++
+ for (dn = cmn->dns; dn->type; dn++) {
+- if (dn->type == CMN_TYPE_XP) {
+- dn->dtc &= dtcs_present;
++ if (dn->type == CMN_TYPE_XP)
+ continue;
+- }
+
+ xp = arm_cmn_node_to_xp(cmn, dn);
++ dn->portid_bits = xp->portid_bits;
++ dn->deviceid_bits = xp->deviceid_bits;
++ dn->dtc = xp->dtc;
+ dn->dtm = xp->dtm;
+ if (cmn->multi_dtm)
+- dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
++ dn->dtm += arm_cmn_nid(dn).port / 2;
+
+ if (dn->type == CMN_TYPE_DTC) {
+- int err;
+- /* We do at least know that a DTC's XP must be in that DTC's domain */
+- if (xp->dtc == 0xf)
+- xp->dtc = 1 << dtc_idx;
+- err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
++ int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
++
+ if (err)
+ return err;
+ }
+@@ -2117,6 +2103,16 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+ return 0;
+ }
+
++static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
++{
++ int offset = CMN_DTM_UNIT_INFO;
++
++ if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
++ offset = CMN650_DTM_UNIT_INFO;
++
++ return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
++}
++
+ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
+ {
+ int level;
+@@ -2246,26 +2242,35 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ cmn->mesh_x = xp->logid;
+
+ if (cmn->part == PART_CMN600)
+- xp->dtc = 0xf;
++ xp->dtc = -1;
+ else
+- xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
++ xp->dtc = arm_cmn_dtc_domain(cmn, xp_region);
+
+ xp->dtm = dtm - cmn->dtms;
+ arm_cmn_init_dtm(dtm++, xp, 0);
+ /*
+ * Keeping track of connected ports will let us filter out
+- * unnecessary XP events easily. We can also reliably infer the
+- * "extra device ports" configuration for the node ID format
+- * from this, since in that case we will see at least one XP
+- * with port 2 connected, for the HN-D.
++ * unnecessary XP events easily, and also infer the per-XP
++ * part of the node ID format.
+ */
+ for (int p = 0; p < CMN_MAX_PORTS; p++)
+ if (arm_cmn_device_connect_info(cmn, xp, p))
+ xp_ports |= BIT(p);
+
+- if (cmn->multi_dtm && (xp_ports & 0xc))
++ if (cmn->num_xps == 1) {
++ xp->portid_bits = 3;
++ xp->deviceid_bits = 2;
++ } else if (xp_ports > 0x3) {
++ xp->portid_bits = 2;
++ xp->deviceid_bits = 1;
++ } else {
++ xp->portid_bits = 1;
++ xp->deviceid_bits = 2;
++ }
++
++ if (cmn->multi_dtm && (xp_ports > 0x3))
+ arm_cmn_init_dtm(dtm++, xp, 1);
+- if (cmn->multi_dtm && (xp_ports & 0x30))
++ if (cmn->multi_dtm && (xp_ports > 0xf))
+ arm_cmn_init_dtm(dtm++, xp, 2);
+
+ cmn->ports_used |= xp_ports;
+@@ -2289,6 +2294,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
+ continue;
+ }
++ /*
++ * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
++ * child count larger than the number of valid child pointers.
++ * A child offset of 0 can only occur on CMN-600; otherwise it
++ * would imply the root node being its own grandchild, which
++ * we can safely dismiss in general.
++ */
++ if (reg == 0 && cmn->part != PART_CMN600) {
++ dev_dbg(cmn->dev, "bogus child pointer?\n");
++ continue;
++ }
+
+ arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
+
+@@ -2309,10 +2325,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ case CMN_TYPE_CXHA:
+ case CMN_TYPE_CCRA:
+ case CMN_TYPE_CCHA:
+- case CMN_TYPE_CCLA:
+ case CMN_TYPE_HNS:
+ dn++;
+ break;
++ case CMN_TYPE_CCLA:
++ dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL;
++ dn++;
++ break;
+ /* Nothing to see here */
+ case CMN_TYPE_MPAM_S:
+ case CMN_TYPE_MPAM_NS:
+@@ -2330,7 +2349,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ case CMN_TYPE_HNP:
+ case CMN_TYPE_CCLA_RNI:
+ dn[1] = dn[0];
+- dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL;
++ dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL;
+ dn[1].type = arm_cmn_subtype(dn->type);
+ dn += 2;
+ break;
+diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
+index e2b7827c456354..9363c31f31b895 100644
+--- a/drivers/perf/arm_cspmu/arm_cspmu.c
++++ b/drivers/perf/arm_cspmu/arm_cspmu.c
+@@ -635,6 +635,9 @@ static int arm_cspmu_event_init(struct perf_event *event)
+
+ cspmu = to_arm_cspmu(event->pmu);
+
++ if (event->attr.type != event->pmu->type)
++ return -ENOENT;
++
+ /*
+ * Following other "uncore" PMUs, we do not support sampling mode or
+ * attach to a task (per-process mode).
+diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
+index 30cea685957470..b6a677224d6820 100644
+--- a/drivers/perf/arm_dmc620_pmu.c
++++ b/drivers/perf/arm_dmc620_pmu.c
+@@ -542,12 +542,16 @@ static int dmc620_pmu_event_init(struct perf_event *event)
+ if (event->cpu < 0)
+ return -EINVAL;
+
++ hwc->idx = -1;
++
++ if (event->group_leader == event)
++ return 0;
++
+ /*
+ * We can't atomically disable all HW counters so only one event allowed,
+ * although software events are acceptable.
+ */
+- if (event->group_leader != event &&
+- !is_software_event(event->group_leader))
++ if (!is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+@@ -556,7 +560,6 @@ static int dmc620_pmu_event_init(struct perf_event *event)
+ return -EINVAL;
+ }
+
+- hwc->idx = -1;
+ return 0;
+ }
+
+diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
+index 8fcaa26f0f8a6f..0e8f54168cb641 100644
+--- a/drivers/perf/arm_pmuv3.c
++++ b/drivers/perf/arm_pmuv3.c
+@@ -169,7 +169,11 @@ armv8pmu_events_sysfs_show(struct device *dev,
+ PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
+
+ static struct attribute *armv8_pmuv3_event_attrs[] = {
+- ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
++ /*
++ * Don't expose the sw_incr event in /sys. It's not usable as writes to
++ * PMSWINC_EL0 will trap as PMUSERENR.{SW,EN}=={0,0} and event rotation
++ * means we don't have a fixed event<->counter relationship regardless.
++ */
+ ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
+ ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
+ ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
+@@ -428,12 +432,12 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+ #define ARMV8_IDX_TO_COUNTER(x) \
+ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
+
+-static inline u32 armv8pmu_pmcr_read(void)
++static inline u64 armv8pmu_pmcr_read(void)
+ {
+ return read_pmcr();
+ }
+
+-static inline void armv8pmu_pmcr_write(u32 val)
++static inline void armv8pmu_pmcr_write(u64 val)
+ {
+ val &= ARMV8_PMU_PMCR_MASK;
+ isb();
+@@ -957,7 +961,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
+ static void armv8pmu_reset(void *info)
+ {
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+- u32 pmcr;
++ u64 pmcr;
+
+ /* The counter and interrupt enable registers are unknown at reset. */
+ armv8pmu_disable_counter(U32_MAX);
+diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
+index d2b0cbf0e0c41e..2bec2e3af0bd62 100644
+--- a/drivers/perf/arm_spe_pmu.c
++++ b/drivers/perf/arm_spe_pmu.c
+@@ -41,7 +41,7 @@
+
+ /*
+ * Cache if the event is allowed to trace Context information.
+- * This allows us to perform the check, i.e, perfmon_capable(),
++ * This allows us to perform the check, i.e, perf_allow_kernel(),
+ * in the context of the event owner, once, during the event_init().
+ */
+ #define SPE_PMU_HW_FLAGS_CX 0x00001
+@@ -50,7 +50,7 @@ static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_C
+
+ static void set_spe_event_has_cx(struct perf_event *event)
+ {
+- if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
++ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel(&event->attr))
+ event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
+ }
+
+@@ -767,9 +767,8 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
+
+ set_spe_event_has_cx(event);
+ reg = arm_spe_event_to_pmscr(event);
+- if (!perfmon_capable() &&
+- (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
+- return -EACCES;
++ if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
++ return perf_allow_kernel(&event->attr);
+
+ return 0;
+ }
+diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
+index 365d964b0f6a6d..308c9969642e1f 100644
+--- a/drivers/perf/cxl_pmu.c
++++ b/drivers/perf/cxl_pmu.c
+@@ -59,7 +59,7 @@
+ #define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
+
+ #define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
+-#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
++#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0)
+
+ #define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
+
+@@ -314,9 +314,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event)
+ }
+
+ /*
+- * CPMU specification allows for 8 filters, each with a 16 bit value...
+- * So we need to find 8x16bits to store it in.
+- * As the value used for disable is 0xffff, a separate enable switch
++ * CPMU specification allows for 8 filters, each with a 32 bit value...
++ * So we need to find 8x32bits to store it in.
++ * As the value used for disable is 0xffff_ffff, a separate enable switch
+ * is needed.
+ */
+
+@@ -419,7 +419,7 @@ static struct attribute *cxl_pmu_event_attrs[] = {
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
+- CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)),
++ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)),
+ /* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
+ CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)),
+@@ -642,7 +642,7 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags)
+ if (cxl_pmu_config1_hdm_filter_en(event))
+ cfg = cxl_pmu_config2_get_hdm_decoder(event);
+ else
+- cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
++ cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
+ writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
+ }
+
+diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
+index 5cf770a1bc3124..4f6eade5220246 100644
+--- a/drivers/perf/fsl_imx9_ddr_perf.c
++++ b/drivers/perf/fsl_imx9_ddr_perf.c
+@@ -476,12 +476,12 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
+ hwc->idx = counter;
+ hwc->state |= PERF_HES_STOPPED;
+
+- if (flags & PERF_EF_START)
+- ddr_perf_event_start(event, flags);
+-
+ /* read trans, write trans, read beat */
+ ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
+
++ if (flags & PERF_EF_START)
++ ddr_perf_event_start(event, flags);
++
+ return 0;
+ }
+
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index 5a00adb2de8c9d..4a902da5c1d495 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -221,7 +221,7 @@ static void hisi_pcie_pmu_config_filter(struct perf_event *event)
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 port, trig_len, thr_len, len_mode;
+- u64 reg = HISI_PCIE_INIT_SET;
++ u64 reg = 0;
+
+ /* Config HISI_PCIE_EVENT_CTRL according to event. */
+ reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event));
+@@ -337,15 +337,27 @@ static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
+ return false;
+
+ for (num = 0; num < counters; num++) {
++ /*
++ * If we find a related event, then it's a valid group
++ * since we don't need to allocate a new counter for it.
++ */
+ if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
+ break;
+ }
+
++ /*
++ * Otherwise it's a new event but if there's no available counter,
++ * fail the check since we cannot schedule all the events in
++ * the group simultaneously.
++ */
++ if (num == HISI_PCIE_MAX_COUNTERS)
++ return false;
++
+ if (num == counters)
+ event_group[counters++] = sibling;
+ }
+
+- return counters <= HISI_PCIE_MAX_COUNTERS;
++ return true;
+ }
+
+ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+@@ -353,6 +365,10 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
++ /* Check the type first before going on, otherwise it's not our event */
++ if (event->attr.type != event->pmu->type)
++ return -ENOENT;
++
+ event->cpu = pcie_pmu->on_cpu;
+
+ if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
+@@ -360,9 +376,6 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+ else
+ hwc->event_base = HISI_PCIE_CNT;
+
+- if (event->attr.type != event->pmu->type)
+- return -ENOENT;
+-
+ /* Sampling is not supported. */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+@@ -445,10 +458,24 @@ static void hisi_pcie_pmu_set_period(struct perf_event *event)
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
++ u64 orig_cnt, cnt;
++
++ orig_cnt = hisi_pcie_pmu_read_counter(event);
+
+ local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL);
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL);
+ hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL);
++
++ /*
++ * The counter maybe unwritable if the target event is unsupported.
++ * Check this by comparing the counts after setting the period. If
++ * the counts stay unchanged after setting the period then update
++ * the hwc->prev_count correctly. Otherwise the final counts user
++ * get maybe totally wrong.
++ */
++ cnt = hisi_pcie_pmu_read_counter(event);
++ if (orig_cnt == cnt)
++ local64_set(&hwc->prev_count, cnt);
+ }
+
+ static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
+diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+index d941e746b42483..797cf201996a96 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+@@ -505,8 +505,8 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
+ ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+- &pa_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
++ &pa_pmu->node);
+ return ret;
+ }
+
+diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+index 6fe534a665eda3..e706ca5676764b 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+@@ -450,8 +450,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
+ ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+- &sllc_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
++ &sllc_pmu->node);
+ return ret;
+ }
+
+diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+index 63da05e5831c1f..481dcc9e8fbf88 100644
+--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
++++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+@@ -287,12 +287,52 @@ static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
+ return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
+ }
+
+-static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
++static bool hisi_uc_pmu_get_glb_en_state(struct hisi_pmu *uc_pmu)
++{
++ u32 val;
++
++ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
++ return !!FIELD_GET(HISI_UC_EVENT_GLB_EN, val);
++}
++
++static void hisi_uc_pmu_write_counter_normal(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+ {
+ writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
+ }
+
++static void hisi_uc_pmu_write_counter_quirk_v2(struct hisi_pmu *uc_pmu,
++ struct hw_perf_event *hwc, u64 val)
++{
++ hisi_uc_pmu_start_counters(uc_pmu);
++ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
++ hisi_uc_pmu_stop_counters(uc_pmu);
++}
++
++static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
++ struct hw_perf_event *hwc, u64 val)
++{
++ bool enable = hisi_uc_pmu_get_glb_en_state(uc_pmu);
++ bool erratum = uc_pmu->identifier == HISI_PMU_V2;
++
++ /*
++ * HiSilicon UC PMU v2 suffers the erratum 162700402 that the
++ * PMU counter cannot be set due to the lack of clock under power
++ * saving mode. This will lead to error or inaccurate counts.
++ * The clock can be enabled by the PMU global enabling control.
++ * The irq handler and pmu_start() will call the function to set
++ * period. If the function under irq context, the PMU has been
++ * enabled therefore we set counter directly. Other situations
++ * the PMU is disabled, we need to enable it to turn on the
++ * counter clock to set period, and then restore PMU enable
++ * status, the counter can hold its value without a clock.
++ */
++ if (enable || !erratum)
++ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
++ else
++ hisi_uc_pmu_write_counter_quirk_v2(uc_pmu, hwc, val);
++}
++
+ static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc)
+ {
+@@ -383,8 +423,8 @@ static struct attribute *hisi_uc_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(cpu_rd, 0x10),
+ HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17),
+ HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19),
+- HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a),
+- HISI_PMU_EVENT_ATTR(cycles, 0x9c),
++ HISI_PMU_EVENT_ATTR(cpu_mru, 0x1c),
++ HISI_PMU_EVENT_ATTR(cycles, 0x95),
+ HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3),
+ HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb),
+ HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa),
+diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
+index e0457d84af6b37..60062eaa342aad 100644
+--- a/drivers/perf/hisilicon/hns3_pmu.c
++++ b/drivers/perf/hisilicon/hns3_pmu.c
+@@ -1085,15 +1085,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event)
+ return false;
+
+ for (num = 0; num < counters; num++) {
++ /*
++ * If we find a related event, then it's a valid group
++ * since we don't need to allocate a new counter for it.
++ */
+ if (hns3_pmu_cmp_event(event_group[num], sibling))
+ break;
+ }
+
++ /*
++ * Otherwise it's a new event but if there's no available counter,
++ * fail the check since we cannot schedule all the events in
++ * the group simultaneously.
++ */
++ if (num == HNS3_PMU_MAX_HW_EVENTS)
++ return false;
++
+ if (num == counters)
+ event_group[counters++] = sibling;
+ }
+
+- return counters <= HNS3_PMU_MAX_HW_EVENTS;
++ return true;
+ }
+
+ static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
+@@ -1515,7 +1527,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev,
+ return ret;
+ }
+
+- ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
++ ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
+ if (ret) {
+ pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
+ return ret;
+@@ -1556,8 +1568,8 @@ static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
+ ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
+ if (ret) {
+ pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+- &hns3_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++ &hns3_pmu->node);
+ }
+
+ return ret;
+@@ -1568,8 +1580,8 @@ static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
+ struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
+
+ perf_pmu_unregister(&hns3_pmu->pmu);
+- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+- &hns3_pmu->node);
++ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
++ &hns3_pmu->node);
+ }
+
+ static int hns3_pmu_init_dev(struct pci_dev *pdev)
+diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
+index 0dda70e1ef90a1..b4efdddb2ad91f 100644
+--- a/drivers/perf/riscv_pmu.c
++++ b/drivers/perf/riscv_pmu.c
+@@ -150,19 +150,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
+ struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+- if (!rvpmu->ctr_get_width)
+- /**
+- * If the pmu driver doesn't support counter width, set it to default
+- * maximum allowed by the specification.
+- */
+- cwidth = 63;
+- else {
+- if (hwc->idx == -1)
+- /* Handle init case where idx is not initialized yet */
+- cwidth = rvpmu->ctr_get_width(0);
+- else
+- cwidth = rvpmu->ctr_get_width(hwc->idx);
+- }
++ if (hwc->idx == -1)
++ /* Handle init case where idx is not initialized yet */
++ cwidth = rvpmu->ctr_get_width(0);
++ else
++ cwidth = rvpmu->ctr_get_width(hwc->idx);
+
+ return GENMASK_ULL(cwidth, 0);
+ }
+@@ -321,6 +313,10 @@ static int riscv_pmu_event_init(struct perf_event *event)
+ u64 event_config = 0;
+ uint64_t cmask;
+
++ /* driver does not support branch stack sampling */
++ if (has_branch_stack(event))
++ return -EOPNOTSUPP;
++
+ hwc->flags = 0;
+ mapped_event = rvpmu->event_map(event, &event_config);
+ if (mapped_event < 0) {
+diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
+index 79fdd667922e81..fa0bccf4edf2ea 100644
+--- a/drivers/perf/riscv_pmu_legacy.c
++++ b/drivers/perf/riscv_pmu_legacy.c
+@@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
+ return pmu_legacy_ctr_get_idx(event);
+ }
+
++/* cycle & instret are always 64 bit, one bit less according to SBI spec */
++static int pmu_legacy_ctr_get_width(int idx)
++{
++ return 63;
++}
++
+ static u64 pmu_legacy_read_ctr(struct perf_event *event)
+ {
+ struct hw_perf_event *hwc = &event->hw;
+@@ -111,12 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
+ pmu->ctr_stop = NULL;
+ pmu->event_map = pmu_legacy_event_map;
+ pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
+- pmu->ctr_get_width = NULL;
++ pmu->ctr_get_width = pmu_legacy_ctr_get_width;
+ pmu->ctr_clear_idx = NULL;
+ pmu->ctr_read = pmu_legacy_read_ctr;
+ pmu->event_mapped = pmu_legacy_event_mapped;
+ pmu->event_unmapped = pmu_legacy_event_unmapped;
+ pmu->csr_index = pmu_legacy_csr_index;
++ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
++ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
+
+ perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
+ }
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 96c7f670c8f0d1..901da688ea3f8f 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -355,7 +355,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
+ * but not in the user access mode as we want to use the other counters
+ * that support sampling/filtering.
+ */
+- if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
++ if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
+ if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+ cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
+ cmask = 1;
+@@ -512,7 +512,7 @@ static void pmu_sbi_set_scounteren(void *arg)
+
+ if (event->hw.idx != -1)
+ csr_write(CSR_SCOUNTEREN,
+- csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
++ csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
+ }
+
+ static void pmu_sbi_reset_scounteren(void *arg)
+@@ -521,7 +521,7 @@ static void pmu_sbi_reset_scounteren(void *arg)
+
+ if (event->hw.idx != -1)
+ csr_write(CSR_SCOUNTEREN,
+- csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
++ csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
+ }
+
+ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
+@@ -543,8 +543,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+- on_each_cpu_mask(mm_cpumask(event->owner->mm),
+- pmu_sbi_set_scounteren, (void *)event, 1);
++ pmu_sbi_set_scounteren((void *)event);
+ }
+
+ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
+@@ -554,8 +553,7 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+- on_each_cpu_mask(mm_cpumask(event->owner->mm),
+- pmu_sbi_reset_scounteren, (void *)event, 1);
++ pmu_sbi_reset_scounteren((void *)event);
+
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
+ if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
+@@ -613,7 +611,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
+ * which may include counters that are not enabled yet.
+ */
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
+- 0, pmu->cmask, 0, 0, 0, 0);
++ 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
+ }
+
+ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
+@@ -689,6 +687,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
+
+ /* Firmware counter don't support overflow yet */
+ fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
++ if (fidx == RISCV_MAX_COUNTERS) {
++ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
++ return IRQ_NONE;
++ }
++
+ event = cpu_hw_evt->events[fidx];
+ if (!event) {
+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+@@ -728,14 +731,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
+ /* compute hardware counter index */
+ hidx = info->csr - CSR_CYCLE;
+ /* check if the corresponding bit is set in sscountovf */
+- if (!(overflow & (1 << hidx)))
++ if (!(overflow & BIT(hidx)))
+ continue;
+
+ /*
+ * Keep a track of overflowed counters so that they can be started
+ * with updated initial value.
+ */
+- overflowed_ctrs |= 1 << lidx;
++ overflowed_ctrs |= BIT(lidx);
+ hw_evt = &event->hw;
+ riscv_pmu_event_update(event);
+ perf_sample_data_init(&data, 0, hw_evt->last_period);
+diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
+index d1670bbe6d6bcc..e4502958fd62d6 100644
+--- a/drivers/phy/Kconfig
++++ b/drivers/phy/Kconfig
+@@ -87,7 +87,6 @@ source "drivers/phy/motorola/Kconfig"
+ source "drivers/phy/mscc/Kconfig"
+ source "drivers/phy/qualcomm/Kconfig"
+ source "drivers/phy/ralink/Kconfig"
+-source "drivers/phy/realtek/Kconfig"
+ source "drivers/phy/renesas/Kconfig"
+ source "drivers/phy/rockchip/Kconfig"
+ source "drivers/phy/samsung/Kconfig"
+diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
+index 868a220ed0f6df..fb3dc9de611154 100644
+--- a/drivers/phy/Makefile
++++ b/drivers/phy/Makefile
+@@ -26,7 +26,6 @@ obj-y += allwinner/ \
+ mscc/ \
+ qualcomm/ \
+ ralink/ \
+- realtek/ \
+ renesas/ \
+ rockchip/ \
+ samsung/ \
+diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
+index a75c96385c57ac..a23d7f9b7d10f8 100644
+--- a/drivers/phy/cadence/phy-cadence-torrent.c
++++ b/drivers/phy/cadence/phy-cadence-torrent.c
+@@ -1154,6 +1154,9 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == value, 0,
+ POLL_TIMEOUT_US);
++ if (ret)
++ return ret;
++
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000);
+ ndelay(100);
+
+diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+index e625b32889bfce..0928a526e2ab36 100644
+--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
++++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+@@ -706,7 +706,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- priv->id = of_alias_get_id(np, "mipi_dphy");
++ priv->id = of_alias_get_id(np, "mipi-dphy");
+ if (priv->id < 0) {
+ dev_err(dev, "Failed to get phy node alias id: %d\n",
+ priv->id);
+diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+index b700f52b7b6799..11fcb1867118c3 100644
+--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
++++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+@@ -110,8 +110,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
+ /* Source clock from SoC internal PLL */
+ writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
+- writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+- imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
++ if (imx8_phy->drvdata->variant != IMX8MM) {
++ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
++ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
++ }
+ val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
+ writel(val | ANA_AUX_RX_TERM_GND_EN,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
+diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+index 0b9a59d5b8f023..adc6394626ce83 100644
+--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
++++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+@@ -176,7 +176,7 @@ static void imx8m_get_phy_tuning_data(struct imx8mq_usb_phy *imx_phy)
+ imx_phy->comp_dis_tune =
+ phy_comp_dis_tune_from_property(imx_phy->comp_dis_tune);
+
+- if (device_property_read_u32(dev, "fsl,pcs-tx-deemph-3p5db-attenuation-db",
++ if (device_property_read_u32(dev, "fsl,phy-pcs-tx-deemph-3p5db-attenuation-db",
+ &imx_phy->pcs_tx_deemph_3p5db))
+ imx_phy->pcs_tx_deemph_3p5db = PHY_TUNE_DEFAULT;
+ else
+diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+index 24c3371e2bb294..27f221a0f922d2 100644
+--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+@@ -603,7 +603,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
+ u16 val;
+
+ fix_idx = 0;
+- for (addr = 0; addr < 512; addr++) {
++ for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
+ /*
+ * All PHY register values are defined in full for 3.125Gbps
+ * SERDES speed. The values required for 1.25 Gbps are almost
+@@ -611,11 +611,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
+ * comparison to 3.125 Gbps values. These register values are
+ * stored in "gbe_phy_init_fix" array.
+ */
+- if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
++ if (!is_1gbps &&
++ fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
++ gbe_phy_init_fix[fix_idx].addr == addr) {
+ /* Use new value */
+ val = gbe_phy_init_fix[fix_idx].value;
+- if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
+- fix_idx++;
++ fix_idx++;
+ } else {
+ val = gbe_phy_init[addr];
+ }
+diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
+index f021ec5a70e5c3..553725e1269c9d 100644
+--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
++++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
+@@ -100,7 +100,7 @@ static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
+ static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+ {
+- return clamp_val(rate, 50000000, 1600000000);
++ return clamp_val(rate, 125000000, 1600000000);
+ }
+
+ static const struct clk_ops mtk_mipi_tx_pll_ops = {
+diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+index 52c275fbb2a1c0..a43e20abb10d54 100644
+--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
++++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+@@ -24,23 +24,45 @@
+ #define EUSB2_FORCE_VAL_5 0xeD
+ #define V_CLK_19P2M_EN BIT(6)
+
++#define EUSB2_TUNE_USB2_CROSSOVER 0x50
+ #define EUSB2_TUNE_IUSB2 0x51
++#define EUSB2_TUNE_RES_FSDIF 0x52
++#define EUSB2_TUNE_HSDISC 0x53
+ #define EUSB2_TUNE_SQUELCH_U 0x54
++#define EUSB2_TUNE_USB2_SLEW 0x55
++#define EUSB2_TUNE_USB2_EQU 0x56
+ #define EUSB2_TUNE_USB2_PREEM 0x57
+-
+-#define QCOM_EUSB2_REPEATER_INIT_CFG(o, v) \
+- { \
+- .offset = o, \
+- .val = v, \
+- }
+-
+-struct eusb2_repeater_init_tbl {
+- unsigned int offset;
+- unsigned int val;
++#define EUSB2_TUNE_USB2_HS_COMP_CUR 0x58
++#define EUSB2_TUNE_EUSB_SLEW 0x59
++#define EUSB2_TUNE_EUSB_EQU 0x5A
++#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
++
++enum eusb2_reg_layout {
++ TUNE_EUSB_HS_COMP_CUR,
++ TUNE_EUSB_EQU,
++ TUNE_EUSB_SLEW,
++ TUNE_USB2_HS_COMP_CUR,
++ TUNE_USB2_PREEM,
++ TUNE_USB2_EQU,
++ TUNE_USB2_SLEW,
++ TUNE_SQUELCH_U,
++ TUNE_HSDISC,
++ TUNE_RES_FSDIF,
++ TUNE_IUSB2,
++ TUNE_USB2_CROSSOVER,
++ NUM_TUNE_FIELDS,
++
++ FORCE_VAL_5 = NUM_TUNE_FIELDS,
++ FORCE_EN_5,
++
++ EN_CTL1,
++
++ RPTR_STATUS,
++ LAYOUT_SIZE,
+ };
+
+ struct eusb2_repeater_cfg {
+- const struct eusb2_repeater_init_tbl *init_tbl;
++ const u32 *init_tbl;
+ int init_tbl_num;
+ const char * const *vreg_list;
+ int num_vregs;
+@@ -52,7 +74,7 @@ struct eusb2_repeater {
+ struct phy *phy;
+ struct regulator_bulk_data *vregs;
+ const struct eusb2_repeater_cfg *cfg;
+- u16 base;
++ u32 base;
+ enum phy_mode mode;
+ };
+
+@@ -60,10 +82,10 @@ static const char * const pm8550b_vreg_l[] = {
+ "vdd18", "vdd3",
+ };
+
+-static const struct eusb2_repeater_init_tbl pm8550b_init_tbl[] = {
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_IUSB2, 0x8),
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_SQUELCH_U, 0x3),
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_USB2_PREEM, 0x5),
++static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
++ [TUNE_IUSB2] = 0x8,
++ [TUNE_SQUELCH_U] = 0x3,
++ [TUNE_USB2_PREEM] = 0x5,
+ };
+
+ static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
+@@ -92,27 +114,41 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
+ static int eusb2_repeater_init(struct phy *phy)
+ {
+ struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+- const struct eusb2_repeater_init_tbl *init_tbl = rptr->cfg->init_tbl;
+- int num = rptr->cfg->init_tbl_num;
++ struct device_node *np = rptr->dev->of_node;
++ struct regmap *regmap = rptr->regmap;
++ const u32 *init_tbl = rptr->cfg->init_tbl;
++ u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
++ u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
++ u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
++ u32 base = rptr->base;
+ u32 val;
+ int ret;
+- int i;
++
++ of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
++ of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
++ of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
+
+ ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
+ if (ret)
+ return ret;
+
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_EN_CTL1,
+- EUSB2_RPTR_EN, EUSB2_RPTR_EN);
++ regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
+
+- for (i = 0; i < num; i++)
+- regmap_update_bits(rptr->regmap,
+- rptr->base + init_tbl[i].offset,
+- init_tbl[i].val, init_tbl[i].val);
++ regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
++ regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
++ regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
++ regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
++ regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
++ regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
++ regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
++ regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
++ regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
++
++ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
++ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
++ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
+
+- ret = regmap_read_poll_timeout(rptr->regmap,
+- rptr->base + EUSB2_RPTR_STATUS, val,
+- val & RPTR_OK, 10, 5);
++ ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
+ if (ret)
+ dev_err(rptr->dev, "initialization timed-out\n");
+
+@@ -123,6 +159,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ enum phy_mode mode, int submode)
+ {
+ struct eusb2_repeater *rptr = phy_get_drvdata(phy);
++ struct regmap *regmap = rptr->regmap;
++ u32 base = rptr->base;
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST:
+@@ -131,10 +169,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ * per eUSB 1.2 Spec. Below implement software workaround until
+ * PHY and controller is fixing seen observation.
+ */
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
++ regmap_write(regmap, base + EUSB2_FORCE_EN_5, F_CLK_19P2M_EN);
++ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, V_CLK_19P2M_EN);
+ break;
+ case PHY_MODE_USB_DEVICE:
+ /*
+@@ -143,10 +179,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ * repeater doesn't clear previous value due to shared
+ * regulators (say host <-> device mode switch).
+ */
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+- F_CLK_19P2M_EN, 0);
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+- V_CLK_19P2M_EN, 0);
++ regmap_write(regmap, base + EUSB2_FORCE_EN_5, 0);
++ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, 0);
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
+index 5cb7e79b99b3f5..89c9d74e35466c 100644
+--- a/drivers/phy/qualcomm/phy-qcom-m31.c
++++ b/drivers/phy/qualcomm/phy-qcom-m31.c
+@@ -253,7 +253,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
+ return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ "failed to create phy\n");
+
+- qphy->vreg = devm_regulator_get(dev, "vdda-phy");
++ qphy->vreg = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(qphy->vreg))
+ return dev_err_probe(dev, PTR_ERR(qphy->vreg),
+ "failed to get vreg\n");
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+index 5e6fc8103e9d81..54fb5fca1c4226 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+@@ -112,6 +112,7 @@ enum qphy_reg_layout {
+ QPHY_COM_BIAS_EN_CLKBUFLR_EN,
+
+ QPHY_DP_PHY_STATUS,
++ QPHY_DP_PHY_VCO_DIV,
+
+ QPHY_TX_TX_POL_INV,
+ QPHY_TX_TX_DRV_LVL,
+@@ -137,6 +138,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
++ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V3_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
+@@ -161,6 +163,7 @@ static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
++ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V4_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
+@@ -185,6 +188,7 @@ static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
++ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V5_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
+@@ -209,6 +213,7 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
++ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
+@@ -2047,9 +2052,9 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
+ writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
+
+ if (reverse)
+- writel(0x4c, qmp->pcs + QSERDES_DP_PHY_MODE);
++ writel(0x4c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
+ else
+- writel(0x5c, qmp->pcs + QSERDES_DP_PHY_MODE);
++ writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
+
+ return reverse;
+ }
+@@ -2059,6 +2064,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
+ const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
+ u32 phy_vco_div;
+ unsigned long pixel_freq;
++ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+@@ -2081,7 +2087,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
++ writel(phy_vco_div, qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_VCO_DIV]);
+
+ clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
+ clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
+@@ -2328,8 +2334,6 @@ static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
+ writel(0x20, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
+
+ return 0;
+-
+- return 0;
+ }
+
+ /*
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
+index 32d8976847557a..e2c22edfe6532e 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
+@@ -134,9 +134,11 @@
+ #define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
+ #define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
+
++#define QSERDES_V5_DP_PHY_VCO_DIV 0x070
+ #define QSERDES_V5_DP_PHY_STATUS 0x0dc
+
+ /* Only for QMP V6 PHY - DP PHY registers */
++#define QSERDES_V6_DP_PHY_VCO_DIV 0x070
+ #define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
+ #define QSERDES_V6_DP_PHY_STATUS 0x0e4
+
+diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
+deleted file mode 100644
+index 75ac7e7c31aec6..00000000000000
+--- a/drivers/phy/realtek/Kconfig
++++ /dev/null
+@@ -1,32 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-# Phy drivers for Realtek platforms
+-#
+-
+-if ARCH_REALTEK || COMPILE_TEST
+-
+-config PHY_RTK_RTD_USB2PHY
+- tristate "Realtek RTD USB2 PHY Transceiver Driver"
+- depends on USB_SUPPORT
+- select GENERIC_PHY
+- select USB_PHY
+- select USB_COMMON
+- help
+- Enable this to support Realtek SoC USB2 phy transceiver.
+- The DHC (digital home center) RTD series SoCs used the Synopsys
+- DWC3 USB IP. This driver will do the PHY initialization
+- of the parameters.
+-
+-config PHY_RTK_RTD_USB3PHY
+- tristate "Realtek RTD USB3 PHY Transceiver Driver"
+- depends on USB_SUPPORT
+- select GENERIC_PHY
+- select USB_PHY
+- select USB_COMMON
+- help
+- Enable this to support Realtek SoC USB3 phy transceiver.
+- The DHC (digital home center) RTD series SoCs used the Synopsys
+- DWC3 USB IP. This driver will do the PHY initialization
+- of the parameters.
+-
+-endif # ARCH_REALTEK || COMPILE_TEST
+diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
+deleted file mode 100644
+index ed7b47ff8a2685..00000000000000
+--- a/drivers/phy/realtek/Makefile
++++ /dev/null
+@@ -1,3 +0,0 @@
+-# SPDX-License-Identifier: GPL-2.0
+-obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
+-obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
+diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
+deleted file mode 100644
+index aedc78bd37f733..00000000000000
+--- a/drivers/phy/realtek/phy-rtk-usb2.c
++++ /dev/null
+@@ -1,1325 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * phy-rtk-usb2.c RTK usb2.0 PHY driver
+- *
+- * Copyright (C) 2023 Realtek Semiconductor Corporation
+- *
+- */
+-
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/of_device.h>
+-#include <linux/of_address.h>
+-#include <linux/uaccess.h>
+-#include <linux/debugfs.h>
+-#include <linux/nvmem-consumer.h>
+-#include <linux/regmap.h>
+-#include <linux/sys_soc.h>
+-#include <linux/mfd/syscon.h>
+-#include <linux/phy/phy.h>
+-#include <linux/usb.h>
+-#include <linux/usb/phy.h>
+-#include <linux/usb/hcd.h>
+-
+-/* GUSB2PHYACCn register */
+-#define PHY_NEW_REG_REQ BIT(25)
+-#define PHY_VSTS_BUSY BIT(23)
+-#define PHY_VCTRL_SHIFT 8
+-#define PHY_REG_DATA_MASK 0xff
+-
+-#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
+-#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
+-
+-#define EFUS_USB_DC_CAL_RATE 2
+-#define EFUS_USB_DC_CAL_MAX 7
+-
+-#define EFUS_USB_DC_DIS_RATE 1
+-#define EFUS_USB_DC_DIS_MAX 7
+-
+-#define MAX_PHY_DATA_SIZE 20
+-#define OFFEST_PHY_READ 0x20
+-
+-#define MAX_USB_PHY_NUM 4
+-#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
+-#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
+-#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
+-
+-#define SET_PAGE_OFFSET 0xf4
+-#define SET_PAGE_0 0x9b
+-#define SET_PAGE_1 0xbb
+-#define SET_PAGE_2 0xdb
+-
+-#define PAGE_START 0xe0
+-#define PAGE0_0XE4 0xe4
+-#define PAGE0_0XE6 0xe6
+-#define PAGE0_0XE7 0xe7
+-#define PAGE1_0XE0 0xe0
+-#define PAGE1_0XE2 0xe2
+-
+-#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
+-#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
+-#define DEFAULT_DC_DRIVING_VALUE (0x8)
+-#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
+-#define HS_CLK_SELECT BIT(6)
+-
+-struct phy_reg {
+- void __iomem *reg_wrap_vstatus;
+- void __iomem *reg_gusb2phyacc0;
+- int vstatus_index;
+-};
+-
+-struct phy_data {
+- u8 addr;
+- u8 data;
+-};
+-
+-struct phy_cfg {
+- int page0_size;
+- struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
+- int page1_size;
+- struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
+- int page2_size;
+- struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
+-
+- int num_phy;
+-
+- bool check_efuse;
+- int check_efuse_version;
+-#define CHECK_EFUSE_V1 1
+-#define CHECK_EFUSE_V2 2
+- int efuse_dc_driving_rate;
+- int efuse_dc_disconnect_rate;
+- int dc_driving_mask;
+- int dc_disconnect_mask;
+- bool usb_dc_disconnect_at_page0;
+- int driving_updated_for_dev_dis;
+-
+- bool do_toggle;
+- bool do_toggle_driving;
+- bool use_default_parameter;
+- bool is_double_sensitivity_mode;
+-};
+-
+-struct phy_parameter {
+- struct phy_reg phy_reg;
+-
+- /* Get from efuse */
+- s8 efuse_usb_dc_cal;
+- s8 efuse_usb_dc_dis;
+-
+- /* Get from dts */
+- bool inverse_hstx_sync_clock;
+- u32 driving_level;
+- s32 driving_level_compensate;
+- s32 disconnection_compensate;
+-};
+-
+-struct rtk_phy {
+- struct usb_phy phy;
+- struct device *dev;
+-
+- struct phy_cfg *phy_cfg;
+- int num_phy;
+- struct phy_parameter *phy_parameter;
+-
+- struct dentry *debug_dir;
+-};
+-
+-/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
+-static inline int page_addr_to_array_index(u8 addr)
+-{
+- return (int)((((addr) - PAGE_START) & 0x7) +
+- ((((addr) - PAGE_START) & 0x10) >> 1));
+-}
+-
+-static inline u8 array_index_to_page_addr(int index)
+-{
+- return ((((index) + PAGE_START) & 0x7) +
+- ((((index) & 0x8) << 1) + PAGE_START));
+-}
+-
+-#define PHY_IO_TIMEOUT_USEC (50000)
+-#define PHY_IO_DELAY_US (100)
+-
+-static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+-{
+- int ret;
+- unsigned int val;
+-
+- ret = read_poll_timeout(readl, val, ((val & mask) == result),
+- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+- if (ret) {
+- pr_err("%s can't program USB phy\n", __func__);
+- return -ETIMEDOUT;
+- }
+-
+- return 0;
+-}
+-
+-static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
+-{
+- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+- unsigned int val;
+- int ret = 0;
+-
+- addr -= OFFEST_PHY_READ;
+-
+- /* polling until VBusy == 0 */
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return (char)ret;
+-
+- /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
+- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+- writel(val, reg_gusb2phyacc0);
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return (char)ret;
+-
+- /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
+- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+- writel(val, reg_gusb2phyacc0);
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return (char)ret;
+-
+- val = readl(reg_gusb2phyacc0);
+-
+- return (char)(val & PHY_REG_DATA_MASK);
+-}
+-
+-static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
+-{
+- unsigned int val;
+- void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
+- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+- int shift_bits = phy_reg->vstatus_index * 8;
+- int ret = 0;
+-
+- /* write data to VStatusOut2 (data output to phy) */
+- writel((u32)data << shift_bits, reg_wrap_vstatus);
+-
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return ret;
+-
+- /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
+- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+-
+- writel(val, reg_gusb2phyacc0);
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return ret;
+-
+- /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
+- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+-
+- writel(val, reg_gusb2phyacc0);
+- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+- if (ret)
+- return ret;
+-
+- return 0;
+-}
+-
+-static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
+-{
+- switch (page) {
+- case 0:
+- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
+- case 1:
+- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
+- case 2:
+- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
+- default:
+- pr_err("%s error page=%d\n", __func__, page);
+- }
+-
+- return -EINVAL;
+-}
+-
+-static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
+- struct phy_parameter *phy_parameter, u8 data)
+-{
+- u8 ret;
+- s32 val;
+- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+- int offset = 4;
+-
+- val = (s32)((data >> offset) & dc_disconnect_mask)
+- + phy_parameter->efuse_usb_dc_dis
+- + phy_parameter->disconnection_compensate;
+-
+- if (val > dc_disconnect_mask)
+- val = dc_disconnect_mask;
+- else if (val < 0)
+- val = 0;
+-
+- ret = (data & (~(dc_disconnect_mask << offset))) |
+- (val & dc_disconnect_mask) << offset;
+-
+- return ret;
+-}
+-
+-/* updated disconnect level at page0 */
+-static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, bool update)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+- struct phy_data *phy_data_page;
+- struct phy_data *phy_data;
+- u8 addr, data;
+- int offset = 4;
+- s32 dc_disconnect_mask;
+- int i;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_reg = &phy_parameter->phy_reg;
+-
+- /* Set page 0 */
+- phy_data_page = phy_cfg->page0;
+- rtk_phy_set_page(phy_reg, 0);
+-
+- i = page_addr_to_array_index(PAGE0_0XE4);
+- phy_data = phy_data_page + i;
+- if (!phy_data->addr) {
+- phy_data->addr = PAGE0_0XE4;
+- phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+- }
+-
+- addr = phy_data->addr;
+- data = phy_data->data;
+- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+-
+- if (update)
+- data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
+- else
+- data = (data & ~(dc_disconnect_mask << offset)) |
+- (DEFAULT_DC_DISCONNECTION_VALUE << offset);
+-
+- if (rtk_phy_write(phy_reg, addr, data))
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+-}
+-
+-static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
+- struct phy_parameter *phy_parameter, u8 data)
+-{
+- u8 ret;
+- s32 val;
+- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+-
+- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+- val = (s32)(data & dc_disconnect_mask)
+- + phy_parameter->efuse_usb_dc_dis
+- + phy_parameter->disconnection_compensate;
+- } else { /* for CHECK_EFUSE_V2 or no efuse */
+- if (phy_parameter->efuse_usb_dc_dis)
+- val = (s32)(phy_parameter->efuse_usb_dc_dis +
+- phy_parameter->disconnection_compensate);
+- else
+- val = (s32)((data & dc_disconnect_mask) +
+- phy_parameter->disconnection_compensate);
+- }
+-
+- if (val > dc_disconnect_mask)
+- val = dc_disconnect_mask;
+- else if (val < 0)
+- val = 0;
+-
+- ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
+-
+- return ret;
+-}
+-
+-/* updated disconnect level at page1 */
+-static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, bool update)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_data *phy_data_page;
+- struct phy_data *phy_data;
+- struct phy_reg *phy_reg;
+- u8 addr, data;
+- s32 dc_disconnect_mask;
+- int i;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_reg = &phy_parameter->phy_reg;
+-
+- /* Set page 1 */
+- phy_data_page = phy_cfg->page1;
+- rtk_phy_set_page(phy_reg, 1);
+-
+- i = page_addr_to_array_index(PAGE1_0XE2);
+- phy_data = phy_data_page + i;
+- if (!phy_data->addr) {
+- phy_data->addr = PAGE1_0XE2;
+- phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
+- }
+-
+- addr = phy_data->addr;
+- data = phy_data->data;
+- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+-
+- if (update)
+- data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
+- else
+- data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
+-
+- if (rtk_phy_write(phy_reg, addr, data))
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+-}
+-
+-static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, bool update)
+-{
+- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+-
+- if (phy_cfg->usb_dc_disconnect_at_page0)
+- update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
+- else
+- update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
+-}
+-
+-static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
+- struct phy_parameter *phy_parameter, u8 data)
+-{
+- s32 driving_level_compensate = phy_parameter->driving_level_compensate;
+- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+- s32 val;
+- u8 ret;
+-
+- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+- val = (s32)(data & dc_driving_mask) + driving_level_compensate
+- + phy_parameter->efuse_usb_dc_cal;
+- } else { /* for CHECK_EFUSE_V2 or no efuse */
+- if (phy_parameter->efuse_usb_dc_cal)
+- val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
+- + driving_level_compensate);
+- else
+- val = (s32)(data & dc_driving_mask);
+- }
+-
+- if (val > dc_driving_mask)
+- val = dc_driving_mask;
+- else if (val < 0)
+- val = 0;
+-
+- ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
+-
+- return ret;
+-}
+-
+-static void update_dc_driving_level(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+-
+- phy_reg = &phy_parameter->phy_reg;
+- phy_cfg = rtk_phy->phy_cfg;
+- if (!phy_cfg->page0[4].addr) {
+- rtk_phy_set_page(phy_reg, 0);
+- phy_cfg->page0[4].addr = PAGE0_0XE4;
+- phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+- }
+-
+- if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
+- u32 dc_driving_mask;
+- u8 driving_level;
+- u8 data;
+-
+- data = phy_cfg->page0[4].data;
+- dc_driving_mask = phy_cfg->dc_driving_mask;
+- driving_level = data & dc_driving_mask;
+-
+- dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
+- __func__, driving_level, phy_parameter->driving_level);
+-
+- phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
+- (phy_parameter->driving_level & dc_driving_mask);
+- }
+-
+- phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
+- phy_parameter,
+- phy_cfg->page0[4].data);
+-}
+-
+-static void update_hs_clk_select(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (phy_parameter->inverse_hstx_sync_clock) {
+- if (!phy_cfg->page0[6].addr) {
+- rtk_phy_set_page(phy_reg, 0);
+- phy_cfg->page0[6].addr = PAGE0_0XE6;
+- phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
+- }
+-
+- phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
+- }
+-}
+-
+-static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
+- int index, bool connect)
+-{
+- struct phy_parameter *phy_parameter;
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+- struct phy_data *phy_data_page;
+- u8 addr, data;
+- int i;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (!phy_cfg->do_toggle)
+- goto out;
+-
+- if (phy_cfg->is_double_sensitivity_mode)
+- goto do_toggle_driving;
+-
+- /* Set page 0 */
+- rtk_phy_set_page(phy_reg, 0);
+-
+- addr = PAGE0_0XE7;
+- data = rtk_phy_read(phy_reg, addr);
+-
+- if (connect)
+- rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
+- else
+- rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
+-
+-do_toggle_driving:
+-
+- if (!phy_cfg->do_toggle_driving)
+- goto do_toggle;
+-
+- /* Page 0 addr 0xE4 driving capability */
+-
+- /* Set page 0 */
+- phy_data_page = phy_cfg->page0;
+- rtk_phy_set_page(phy_reg, 0);
+-
+- i = page_addr_to_array_index(PAGE0_0XE4);
+- addr = phy_data_page[i].addr;
+- data = phy_data_page[i].data;
+-
+- if (connect) {
+- rtk_phy_write(phy_reg, addr, data);
+- } else {
+- u8 value;
+- s32 tmp;
+- s32 driving_updated =
+- phy_cfg->driving_updated_for_dev_dis;
+- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+-
+- tmp = (s32)(data & dc_driving_mask) + driving_updated;
+-
+- if (tmp > dc_driving_mask)
+- tmp = dc_driving_mask;
+- else if (tmp < 0)
+- tmp = 0;
+-
+- value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
+-
+- rtk_phy_write(phy_reg, addr, value);
+- }
+-
+-do_toggle:
+- /* restore dc disconnect level before toggle */
+- update_dc_disconnect_level(rtk_phy, phy_parameter, false);
+-
+- /* Set page 1 */
+- rtk_phy_set_page(phy_reg, 1);
+-
+- addr = PAGE1_0XE0;
+- data = rtk_phy_read(phy_reg, addr);
+-
+- rtk_phy_write(phy_reg, addr, data &
+- (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+- mdelay(1);
+- rtk_phy_write(phy_reg, addr, data |
+- (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+-
+- /* update dc disconnect level after toggle */
+- update_dc_disconnect_level(rtk_phy, phy_parameter, true);
+-
+-out:
+- return;
+-}
+-
+-static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+-{
+- struct phy_parameter *phy_parameter;
+- struct phy_cfg *phy_cfg;
+- struct phy_data *phy_data_page;
+- struct phy_reg *phy_reg;
+- int i;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (phy_cfg->use_default_parameter) {
+- dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
+- __func__, index);
+- goto do_toggle;
+- }
+-
+- /* Set page 0 */
+- phy_data_page = phy_cfg->page0;
+- rtk_phy_set_page(phy_reg, 0);
+-
+- for (i = 0; i < phy_cfg->page0_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = phy_data->addr;
+- u8 data = phy_data->data;
+-
+- if (!addr)
+- continue;
+-
+- if (rtk_phy_write(phy_reg, addr, data)) {
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+- return -EINVAL;
+- }
+- }
+-
+- /* Set page 1 */
+- phy_data_page = phy_cfg->page1;
+- rtk_phy_set_page(phy_reg, 1);
+-
+- for (i = 0; i < phy_cfg->page1_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = phy_data->addr;
+- u8 data = phy_data->data;
+-
+- if (!addr)
+- continue;
+-
+- if (rtk_phy_write(phy_reg, addr, data)) {
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+- return -EINVAL;
+- }
+- }
+-
+- if (phy_cfg->page2_size == 0)
+- goto do_toggle;
+-
+- /* Set page 2 */
+- phy_data_page = phy_cfg->page2;
+- rtk_phy_set_page(phy_reg, 2);
+-
+- for (i = 0; i < phy_cfg->page2_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = phy_data->addr;
+- u8 data = phy_data->data;
+-
+- if (!addr)
+- continue;
+-
+- if (rtk_phy_write(phy_reg, addr, data)) {
+- dev_err(rtk_phy->dev,
+- "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
+- __func__, addr, data);
+- return -EINVAL;
+- }
+- }
+-
+-do_toggle:
+- do_rtk_phy_toggle(rtk_phy, index, false);
+-
+- return 0;
+-}
+-
+-static int rtk_phy_init(struct phy *phy)
+-{
+- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+- unsigned long phy_init_time = jiffies;
+- int i, ret = 0;
+-
+- if (!rtk_phy)
+- return -EINVAL;
+-
+- for (i = 0; i < rtk_phy->num_phy; i++)
+- ret = do_rtk_phy_init(rtk_phy, i);
+-
+- dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
+- jiffies_to_msecs(jiffies - phy_init_time));
+- return ret;
+-}
+-
+-static int rtk_phy_exit(struct phy *phy)
+-{
+- return 0;
+-}
+-
+-static const struct phy_ops ops = {
+- .init = rtk_phy_init,
+- .exit = rtk_phy_exit,
+- .owner = THIS_MODULE,
+-};
+-
+-static void rtk_phy_toggle(struct usb_phy *usb2_phy, bool connect, int port)
+-{
+- int index = port;
+- struct rtk_phy *rtk_phy = NULL;
+-
+- rtk_phy = dev_get_drvdata(usb2_phy->dev);
+-
+- if (index > rtk_phy->num_phy) {
+- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+- __func__, index, rtk_phy->num_phy);
+- return;
+- }
+-
+- do_rtk_phy_toggle(rtk_phy, index, connect);
+-}
+-
+-static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
+- u16 portstatus, u16 portchange)
+-{
+- bool connect = false;
+-
+- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
+- __func__, port, (int)portstatus, (int)portchange);
+- if (portstatus & USB_PORT_STAT_CONNECTION)
+- connect = true;
+-
+- if (portchange & USB_PORT_STAT_C_CONNECTION)
+- rtk_phy_toggle(x, connect, port);
+-
+- return 0;
+-}
+-
+-#ifdef CONFIG_DEBUG_FS
+-static struct dentry *create_phy_debug_root(void)
+-{
+- struct dentry *phy_debug_root;
+-
+- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+- if (!phy_debug_root)
+- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+-
+- return phy_debug_root;
+-}
+-
+-static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
+-{
+- struct rtk_phy *rtk_phy = s->private;
+- struct phy_cfg *phy_cfg;
+- int i, index;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+-
+- seq_puts(s, "Property:\n");
+- seq_printf(s, " check_efuse: %s\n",
+- phy_cfg->check_efuse ? "Enable" : "Disable");
+- seq_printf(s, " check_efuse_version: %d\n",
+- phy_cfg->check_efuse_version);
+- seq_printf(s, " efuse_dc_driving_rate: %d\n",
+- phy_cfg->efuse_dc_driving_rate);
+- seq_printf(s, " dc_driving_mask: 0x%x\n",
+- phy_cfg->dc_driving_mask);
+- seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
+- phy_cfg->efuse_dc_disconnect_rate);
+- seq_printf(s, " dc_disconnect_mask: 0x%x\n",
+- phy_cfg->dc_disconnect_mask);
+- seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
+- phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
+- seq_printf(s, " do_toggle: %s\n",
+- phy_cfg->do_toggle ? "Enable" : "Disable");
+- seq_printf(s, " do_toggle_driving: %s\n",
+- phy_cfg->do_toggle_driving ? "Enable" : "Disable");
+- seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
+- phy_cfg->driving_updated_for_dev_dis);
+- seq_printf(s, " use_default_parameter: %s\n",
+- phy_cfg->use_default_parameter ? "Enable" : "Disable");
+- seq_printf(s, " is_double_sensitivity_mode: %s\n",
+- phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
+-
+- for (index = 0; index < rtk_phy->num_phy; index++) {
+- struct phy_parameter *phy_parameter;
+- struct phy_reg *phy_reg;
+- struct phy_data *phy_data_page;
+-
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- seq_printf(s, "PHY %d:\n", index);
+-
+- seq_puts(s, "Page 0:\n");
+- /* Set page 0 */
+- phy_data_page = phy_cfg->page0;
+- rtk_phy_set_page(phy_reg, 0);
+-
+- for (i = 0; i < phy_cfg->page0_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = array_index_to_page_addr(i);
+- u8 data = phy_data->data;
+- u8 value = rtk_phy_read(phy_reg, addr);
+-
+- if (phy_data->addr)
+- seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+- addr, data, value);
+- else
+- seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
+- addr, value);
+- }
+-
+- seq_puts(s, "Page 1:\n");
+- /* Set page 1 */
+- phy_data_page = phy_cfg->page1;
+- rtk_phy_set_page(phy_reg, 1);
+-
+- for (i = 0; i < phy_cfg->page1_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = array_index_to_page_addr(i);
+- u8 data = phy_data->data;
+- u8 value = rtk_phy_read(phy_reg, addr);
+-
+- if (phy_data->addr)
+- seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+- addr, data, value);
+- else
+- seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
+- addr, value);
+- }
+-
+- if (phy_cfg->page2_size == 0)
+- goto out;
+-
+- seq_puts(s, "Page 2:\n");
+- /* Set page 2 */
+- phy_data_page = phy_cfg->page2;
+- rtk_phy_set_page(phy_reg, 2);
+-
+- for (i = 0; i < phy_cfg->page2_size; i++) {
+- struct phy_data *phy_data = phy_data_page + i;
+- u8 addr = array_index_to_page_addr(i);
+- u8 data = phy_data->data;
+- u8 value = rtk_phy_read(phy_reg, addr);
+-
+- if (phy_data->addr)
+- seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+- addr, data, value);
+- else
+- seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
+- addr, value);
+- }
+-
+-out:
+- seq_puts(s, "PHY Property:\n");
+- seq_printf(s, " efuse_usb_dc_cal: %d\n",
+- (int)phy_parameter->efuse_usb_dc_cal);
+- seq_printf(s, " efuse_usb_dc_dis: %d\n",
+- (int)phy_parameter->efuse_usb_dc_dis);
+- seq_printf(s, " inverse_hstx_sync_clock: %s\n",
+- phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
+- seq_printf(s, " driving_level: %d\n",
+- phy_parameter->driving_level);
+- seq_printf(s, " driving_level_compensate: %d\n",
+- phy_parameter->driving_level_compensate);
+- seq_printf(s, " disconnection_compensate: %d\n",
+- phy_parameter->disconnection_compensate);
+- }
+-
+- return 0;
+-}
+-DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
+-
+-static inline void create_debug_files(struct rtk_phy *rtk_phy)
+-{
+- struct dentry *phy_debug_root = NULL;
+-
+- phy_debug_root = create_phy_debug_root();
+- if (!phy_debug_root)
+- return;
+-
+- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
+- phy_debug_root);
+-
+- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+- &rtk_usb2_parameter_fops);
+-
+- return;
+-}
+-
+-static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+-{
+- debugfs_remove_recursive(rtk_phy->debug_dir);
+-}
+-#else
+-static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+-static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+-#endif /* CONFIG_DEBUG_FS */
+-
+-static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, int index)
+-{
+- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+- u8 value = 0;
+- struct nvmem_cell *cell;
+- struct soc_device_attribute rtk_soc_groot[] = {
+- { .family = "Realtek Groot",},
+- { /* empty */ } };
+-
+- if (!phy_cfg->check_efuse)
+- goto out;
+-
+- /* Read efuse for usb dc cal */
+- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
+- if (IS_ERR(cell)) {
+- dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
+- __func__, PTR_ERR(cell));
+- } else {
+- unsigned char *buf;
+- size_t buf_size;
+-
+- buf = nvmem_cell_read(cell, &buf_size);
+- if (!IS_ERR(buf)) {
+- value = buf[0] & phy_cfg->dc_driving_mask;
+- kfree(buf);
+- }
+- nvmem_cell_put(cell);
+- }
+-
+- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+- int rate = phy_cfg->efuse_dc_driving_rate;
+-
+- if (value <= EFUS_USB_DC_CAL_MAX)
+- phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
+- else
+- phy_parameter->efuse_usb_dc_cal = -(int8_t)
+- ((EFUS_USB_DC_CAL_MAX & value) * rate);
+-
+- if (soc_device_match(rtk_soc_groot)) {
+- dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
+-
+- /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
+- if (value <= EFUS_USB_DC_CAL_MAX)
+- phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
+-
+- /* We set max dc cal compensate is 0x8 if otp is 0x7 */
+- if (value == 0x7)
+- phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
+- }
+- } else { /* for CHECK_EFUSE_V2 */
+- phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
+- }
+-
+- /* Read efuse for usb dc disconnect level */
+- value = 0;
+- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
+- if (IS_ERR(cell)) {
+- dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
+- __func__, PTR_ERR(cell));
+- } else {
+- unsigned char *buf;
+- size_t buf_size;
+-
+- buf = nvmem_cell_read(cell, &buf_size);
+- if (!IS_ERR(buf)) {
+- value = buf[0] & phy_cfg->dc_disconnect_mask;
+- kfree(buf);
+- }
+- nvmem_cell_put(cell);
+- }
+-
+- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+- int rate = phy_cfg->efuse_dc_disconnect_rate;
+-
+- if (value <= EFUS_USB_DC_DIS_MAX)
+- phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
+- else
+- phy_parameter->efuse_usb_dc_dis = -(int8_t)
+- ((EFUS_USB_DC_DIS_MAX & value) * rate);
+- } else { /* for CHECK_EFUSE_V2 */
+- phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
+- }
+-
+-out:
+- return 0;
+-}
+-
+-static int parse_phy_data(struct rtk_phy *rtk_phy)
+-{
+- struct device *dev = rtk_phy->dev;
+- struct device_node *np = dev->of_node;
+- struct phy_parameter *phy_parameter;
+- int ret = 0;
+- int index;
+-
+- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+- rtk_phy->num_phy, GFP_KERNEL);
+- if (!rtk_phy->phy_parameter)
+- return -ENOMEM;
+-
+- for (index = 0; index < rtk_phy->num_phy; index++) {
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+-
+- phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
+- phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
+- phy_parameter->phy_reg.vstatus_index = index;
+-
+- if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
+- phy_parameter->inverse_hstx_sync_clock = true;
+- else
+- phy_parameter->inverse_hstx_sync_clock = false;
+-
+- if (of_property_read_u32_index(np, "realtek,driving-level",
+- index, &phy_parameter->driving_level))
+- phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
+-
+- if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
+- index, &phy_parameter->driving_level_compensate))
+- phy_parameter->driving_level_compensate = 0;
+-
+- if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
+- index, &phy_parameter->disconnection_compensate))
+- phy_parameter->disconnection_compensate = 0;
+-
+- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+-
+- update_dc_driving_level(rtk_phy, phy_parameter);
+-
+- update_hs_clk_select(rtk_phy, phy_parameter);
+- }
+-
+- return ret;
+-}
+-
+-static int rtk_usb2phy_probe(struct platform_device *pdev)
+-{
+- struct rtk_phy *rtk_phy;
+- struct device *dev = &pdev->dev;
+- struct phy *generic_phy;
+- struct phy_provider *phy_provider;
+- const struct phy_cfg *phy_cfg;
+- int ret = 0;
+-
+- phy_cfg = of_device_get_match_data(dev);
+- if (!phy_cfg) {
+- dev_err(dev, "phy config are not assigned!\n");
+- return -EINVAL;
+- }
+-
+- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+- if (!rtk_phy)
+- return -ENOMEM;
+-
+- rtk_phy->dev = &pdev->dev;
+- rtk_phy->phy.dev = rtk_phy->dev;
+- rtk_phy->phy.label = "rtk-usb2phy";
+- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
+-
+- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+-
+- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+-
+- rtk_phy->num_phy = phy_cfg->num_phy;
+-
+- ret = parse_phy_data(rtk_phy);
+- if (ret)
+- goto err;
+-
+- platform_set_drvdata(pdev, rtk_phy);
+-
+- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+- if (IS_ERR(generic_phy))
+- return PTR_ERR(generic_phy);
+-
+- phy_set_drvdata(generic_phy, rtk_phy);
+-
+- phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
+- of_phy_simple_xlate);
+- if (IS_ERR(phy_provider))
+- return PTR_ERR(phy_provider);
+-
+- ret = usb_add_phy_dev(&rtk_phy->phy);
+- if (ret)
+- goto err;
+-
+- create_debug_files(rtk_phy);
+-
+-err:
+- return ret;
+-}
+-
+-static void rtk_usb2phy_remove(struct platform_device *pdev)
+-{
+- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+-
+- remove_debug_files(rtk_phy);
+-
+- usb_remove_phy(&rtk_phy->phy);
+-}
+-
+-static const struct phy_cfg rtd1295_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0x90},
+- [3] = {0xe3, 0x3a},
+- [4] = {0xe4, 0x68},
+- [6] = {0xe6, 0x91},
+- [13] = {0xf5, 0x81},
+- [15] = {0xf7, 0x02}, },
+- .page1_size = 8,
+- .page1 = { /* default parameter */ },
+- .page2_size = 0,
+- .page2 = { /* no parameter */ },
+- .num_phy = 1,
+- .check_efuse = false,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = false,
+-};
+-
+-static const struct phy_cfg rtd1395_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [4] = {0xe4, 0xac},
+- [13] = {0xf5, 0x00},
+- [15] = {0xf7, 0x02}, },
+- .page1_size = 8,
+- .page1 = { /* default parameter */ },
+- .page2_size = 0,
+- .page2 = { /* no parameter */ },
+- .num_phy = 1,
+- .check_efuse = false,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = false,
+-};
+-
+-static const struct phy_cfg rtd1395_phy_cfg_2port = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [4] = {0xe4, 0xac},
+- [13] = {0xf5, 0x00},
+- [15] = {0xf7, 0x02}, },
+- .page1_size = 8,
+- .page1 = { /* default parameter */ },
+- .page2_size = 0,
+- .page2 = { /* no parameter */ },
+- .num_phy = 2,
+- .check_efuse = false,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = false,
+-};
+-
+-static const struct phy_cfg rtd1619_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [4] = {0xe4, 0x68}, },
+- .page1_size = 8,
+- .page1 = { /* default parameter */ },
+- .page2_size = 0,
+- .page2 = { /* no parameter */ },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = false,
+-};
+-
+-static const struct phy_cfg rtd1319_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0x18},
+- [4] = {0xe4, 0x6a},
+- [7] = {0xe7, 0x71},
+- [13] = {0xf5, 0x15},
+- [15] = {0xf7, 0x32}, },
+- .page1_size = 8,
+- .page1 = { [3] = {0xe3, 0x44}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { [0] = {0xe0, 0x01}, },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = true,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct phy_cfg rtd1312c_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0x14},
+- [4] = {0xe4, 0x67},
+- [5] = {0xe5, 0x55}, },
+- .page1_size = 8,
+- .page1 = { [3] = {0xe3, 0x23},
+- [6] = {0xe6, 0x58}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { /* default parameter */ },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = 1,
+- .dc_driving_mask = 0xf,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = true,
+- .do_toggle = true,
+- .do_toggle_driving = true,
+- .driving_updated_for_dev_dis = 0xf,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct phy_cfg rtd1619b_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0xa3},
+- [4] = {0xe4, 0x88},
+- [5] = {0xe5, 0x4f},
+- [6] = {0xe6, 0x02}, },
+- .page1_size = 8,
+- .page1 = { [3] = {0xe3, 0x64}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { [7] = {0xe7, 0x45}, },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+- .dc_driving_mask = 0x1f,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = false,
+- .do_toggle = true,
+- .do_toggle_driving = true,
+- .driving_updated_for_dev_dis = 0x8,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct phy_cfg rtd1319d_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0xa3},
+- [4] = {0xe4, 0x8e},
+- [5] = {0xe5, 0x4f},
+- [6] = {0xe6, 0x02}, },
+- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+- .page1 = { [14] = {0xf5, 0x1}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { [7] = {0xe7, 0x44}, },
+- .check_efuse = true,
+- .num_phy = 1,
+- .check_efuse_version = CHECK_EFUSE_V1,
+- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+- .dc_driving_mask = 0x1f,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = false,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0x8,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct phy_cfg rtd1315e_phy_cfg = {
+- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+- .page0 = { [0] = {0xe0, 0xa3},
+- [4] = {0xe4, 0x8c},
+- [5] = {0xe5, 0x4f},
+- [6] = {0xe6, 0x02}, },
+- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+- .page1 = { [3] = {0xe3, 0x7f},
+- [14] = {0xf5, 0x01}, },
+- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+- .page2 = { [7] = {0xe7, 0x44}, },
+- .num_phy = 1,
+- .check_efuse = true,
+- .check_efuse_version = CHECK_EFUSE_V2,
+- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+- .dc_driving_mask = 0x1f,
+- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+- .dc_disconnect_mask = 0xf,
+- .usb_dc_disconnect_at_page0 = false,
+- .do_toggle = true,
+- .do_toggle_driving = false,
+- .driving_updated_for_dev_dis = 0x8,
+- .use_default_parameter = false,
+- .is_double_sensitivity_mode = true,
+-};
+-
+-static const struct of_device_id usbphy_rtk_dt_match[] = {
+- { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
+- { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
+- { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
+- { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
+- { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
+- { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
+- { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
+- { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
+- { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+-
+-static struct platform_driver rtk_usb2phy_driver = {
+- .probe = rtk_usb2phy_probe,
+- .remove_new = rtk_usb2phy_remove,
+- .driver = {
+- .name = "rtk-usb2phy",
+- .of_match_table = usbphy_rtk_dt_match,
+- },
+-};
+-
+-module_platform_driver(rtk_usb2phy_driver);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS("platform: rtk-usb2phy");
+-MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+-MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
+diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
+deleted file mode 100644
+index dfb3122f3f114b..00000000000000
+--- a/drivers/phy/realtek/phy-rtk-usb3.c
++++ /dev/null
+@@ -1,761 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * phy-rtk-usb3.c RTK usb3.0 phy driver
+- *
+- * copyright (c) 2023 realtek semiconductor corporation
+- *
+- */
+-
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/of_device.h>
+-#include <linux/of_address.h>
+-#include <linux/uaccess.h>
+-#include <linux/debugfs.h>
+-#include <linux/nvmem-consumer.h>
+-#include <linux/regmap.h>
+-#include <linux/sys_soc.h>
+-#include <linux/mfd/syscon.h>
+-#include <linux/phy/phy.h>
+-#include <linux/usb.h>
+-#include <linux/usb/hcd.h>
+-#include <linux/usb/phy.h>
+-
+-#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
+-#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
+-#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
+-#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
+-
+-#define MAX_USB_PHY_DATA_SIZE 0x30
+-#define PHY_ADDR_0X09 0x09
+-#define PHY_ADDR_0X0B 0x0b
+-#define PHY_ADDR_0X0D 0x0d
+-#define PHY_ADDR_0X10 0x10
+-#define PHY_ADDR_0X1F 0x1f
+-#define PHY_ADDR_0X20 0x20
+-#define PHY_ADDR_0X21 0x21
+-#define PHY_ADDR_0X30 0x30
+-
+-#define REG_0X09_FORCE_CALIBRATION BIT(9)
+-#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
+-#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
+-#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
+-#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
+-#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
+-
+-#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
+-#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
+-#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
+-#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
+-#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
+-#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
+-
+-#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
+-#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
+-
+-struct phy_reg {
+- void __iomem *reg_mdio_ctl;
+-};
+-
+-struct phy_data {
+- u8 addr;
+- u16 data;
+-};
+-
+-struct phy_cfg {
+- int param_size;
+- struct phy_data param[MAX_USB_PHY_DATA_SIZE];
+-
+- bool check_efuse;
+- bool do_toggle;
+- bool do_toggle_once;
+- bool use_default_parameter;
+- bool check_rx_front_end_offset;
+-};
+-
+-struct phy_parameter {
+- struct phy_reg phy_reg;
+-
+- /* Get from efuse */
+- u8 efuse_usb_u3_tx_lfps_swing_trim;
+-
+- /* Get from dts */
+- u32 amplitude_control_coarse;
+- u32 amplitude_control_fine;
+-};
+-
+-struct rtk_phy {
+- struct usb_phy phy;
+- struct device *dev;
+-
+- struct phy_cfg *phy_cfg;
+- int num_phy;
+- struct phy_parameter *phy_parameter;
+-
+- struct dentry *debug_dir;
+-};
+-
+-#define PHY_IO_TIMEOUT_USEC (50000)
+-#define PHY_IO_DELAY_US (100)
+-
+-static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+-{
+- int ret;
+- unsigned int val;
+-
+- ret = read_poll_timeout(readl, val, ((val & mask) == result),
+- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+- if (ret) {
+- pr_err("%s can't program USB phy\n", __func__);
+- return -ETIMEDOUT;
+- }
+-
+- return 0;
+-}
+-
+-static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
+-{
+- return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
+-}
+-
+-static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
+-{
+- unsigned int tmp;
+- u32 value;
+-
+- tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
+-
+- writel(tmp, phy_reg->reg_mdio_ctl);
+-
+- rtk_phy3_wait_vbusy(phy_reg);
+-
+- value = readl(phy_reg->reg_mdio_ctl);
+- value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
+-
+- return (u16)value;
+-}
+-
+-static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
+-{
+- unsigned int val;
+-
+- val = USB_MDIO_CTRL_PHY_WRITE |
+- (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
+- (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
+-
+- writel(val, phy_reg->reg_mdio_ctl);
+-
+- rtk_phy3_wait_vbusy(phy_reg);
+-
+- return 0;
+-}
+-
+-static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
+-{
+- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+- struct phy_reg *phy_reg;
+- struct phy_parameter *phy_parameter;
+- struct phy_data *phy_data;
+- u8 addr;
+- u16 data;
+- int i;
+-
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (!phy_cfg->do_toggle)
+- return;
+-
+- i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
+- phy_data = phy_cfg->param + i;
+- addr = phy_data->addr;
+- data = phy_data->data;
+-
+- if (!addr && !data) {
+- addr = PHY_ADDR_0X09;
+- data = rtk_phy_read(phy_reg, addr);
+- phy_data->addr = addr;
+- phy_data->data = data;
+- }
+-
+- rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
+- mdelay(1);
+- rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
+-}
+-
+-static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+- struct phy_parameter *phy_parameter;
+- int i = 0;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- if (phy_cfg->use_default_parameter)
+- goto do_toggle;
+-
+- for (i = 0; i < phy_cfg->param_size; i++) {
+- struct phy_data *phy_data = phy_cfg->param + i;
+- u8 addr = phy_data->addr;
+- u16 data = phy_data->data;
+-
+- if (!addr && !data)
+- continue;
+-
+- rtk_phy_write(phy_reg, addr, data);
+- }
+-
+-do_toggle:
+- if (phy_cfg->do_toggle_once)
+- phy_cfg->do_toggle = true;
+-
+- do_rtk_usb3_phy_toggle(rtk_phy, index, false);
+-
+- if (phy_cfg->do_toggle_once) {
+- u16 check_value = 0;
+- int count = 10;
+- u16 value_0x0d, value_0x10;
+-
+- /* Enable Debug mode by set 0x0D and 0x10 */
+- value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
+- value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
+-
+- rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
+- value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
+- rtk_phy_write(phy_reg, PHY_ADDR_0X10,
+- (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
+- REG_0X10_DEBUG_MODE_SETTING);
+-
+- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+-
+- while (!(check_value & BIT(15))) {
+- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+- mdelay(1);
+- if (count-- < 0)
+- break;
+- }
+-
+- if (!(check_value & BIT(15)))
+- dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
+- PHY_ADDR_0X30, check_value);
+-
+- /* Disable Debug mode by set 0x0D and 0x10 to default*/
+- rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
+- rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
+-
+- phy_cfg->do_toggle = false;
+- }
+-
+- if (phy_cfg->check_rx_front_end_offset) {
+- u16 rx_offset_code, rx_offset_range;
+- u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
+- u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
+- bool do_update = false;
+-
+- rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
+- if (((rx_offset_code & code_mask) == 0x0) ||
+- ((rx_offset_code & code_mask) == code_mask))
+- do_update = true;
+-
+- rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
+- if (((rx_offset_range & range_mask) == range_mask) && do_update) {
+- dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
+- rx_offset_code, rx_offset_range);
+- do_update = false;
+- }
+-
+- if (do_update) {
+- u16 tmp1, tmp2;
+-
+- tmp1 = rx_offset_range & (~range_mask);
+- tmp2 = rx_offset_range & range_mask;
+- tmp2 += (1 << 2);
+- rx_offset_range = tmp1 | (tmp2 & range_mask);
+- rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
+- goto do_toggle;
+- }
+- }
+-
+- return 0;
+-}
+-
+-static int rtk_phy_init(struct phy *phy)
+-{
+- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+- int ret = 0;
+- int i;
+- unsigned long phy_init_time = jiffies;
+-
+- for (i = 0; i < rtk_phy->num_phy; i++)
+- ret = do_rtk_phy_init(rtk_phy, i);
+-
+- dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
+- jiffies_to_msecs(jiffies - phy_init_time));
+-
+- return ret;
+-}
+-
+-static int rtk_phy_exit(struct phy *phy)
+-{
+- return 0;
+-}
+-
+-static const struct phy_ops ops = {
+- .init = rtk_phy_init,
+- .exit = rtk_phy_exit,
+- .owner = THIS_MODULE,
+-};
+-
+-static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port)
+-{
+- int index = port;
+- struct rtk_phy *rtk_phy = NULL;
+-
+- rtk_phy = dev_get_drvdata(usb3_phy->dev);
+-
+- if (index > rtk_phy->num_phy) {
+- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+- __func__, index, rtk_phy->num_phy);
+- return;
+- }
+-
+- do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
+-}
+-
+-static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
+- u16 portstatus, u16 portchange)
+-{
+- bool connect = false;
+-
+- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
+- __func__, port, (int)portstatus, (int)portchange);
+- if (portstatus & USB_PORT_STAT_CONNECTION)
+- connect = true;
+-
+- if (portchange & USB_PORT_STAT_C_CONNECTION)
+- rtk_phy_toggle(x, connect, port);
+-
+- return 0;
+-}
+-
+-#ifdef CONFIG_DEBUG_FS
+-static struct dentry *create_phy_debug_root(void)
+-{
+- struct dentry *phy_debug_root;
+-
+- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+- if (!phy_debug_root)
+- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+-
+- return phy_debug_root;
+-}
+-
+-static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
+-{
+- struct rtk_phy *rtk_phy = s->private;
+- struct phy_cfg *phy_cfg;
+- int i, index;
+-
+- phy_cfg = rtk_phy->phy_cfg;
+-
+- seq_puts(s, "Property:\n");
+- seq_printf(s, " check_efuse: %s\n",
+- phy_cfg->check_efuse ? "Enable" : "Disable");
+- seq_printf(s, " do_toggle: %s\n",
+- phy_cfg->do_toggle ? "Enable" : "Disable");
+- seq_printf(s, " do_toggle_once: %s\n",
+- phy_cfg->do_toggle_once ? "Enable" : "Disable");
+- seq_printf(s, " use_default_parameter: %s\n",
+- phy_cfg->use_default_parameter ? "Enable" : "Disable");
+-
+- for (index = 0; index < rtk_phy->num_phy; index++) {
+- struct phy_reg *phy_reg;
+- struct phy_parameter *phy_parameter;
+-
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+- phy_reg = &phy_parameter->phy_reg;
+-
+- seq_printf(s, "PHY %d:\n", index);
+-
+- for (i = 0; i < phy_cfg->param_size; i++) {
+- struct phy_data *phy_data = phy_cfg->param + i;
+- u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
+- u16 data = phy_data->data;
+-
+- if (!phy_data->addr && !data)
+- seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
+- addr, rtk_phy_read(phy_reg, addr));
+- else
+- seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
+- addr, data, rtk_phy_read(phy_reg, addr));
+- }
+-
+- seq_puts(s, "PHY Property:\n");
+- seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
+- (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
+- seq_printf(s, " amplitude_control_coarse: 0x%x\n",
+- (int)phy_parameter->amplitude_control_coarse);
+- seq_printf(s, " amplitude_control_fine: 0x%x\n",
+- (int)phy_parameter->amplitude_control_fine);
+- }
+-
+- return 0;
+-}
+-DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
+-
+-static inline void create_debug_files(struct rtk_phy *rtk_phy)
+-{
+- struct dentry *phy_debug_root = NULL;
+-
+- phy_debug_root = create_phy_debug_root();
+-
+- if (!phy_debug_root)
+- return;
+-
+- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
+-
+- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+- &rtk_usb3_parameter_fops);
+-
+- return;
+-}
+-
+-static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+-{
+- debugfs_remove_recursive(rtk_phy->debug_dir);
+-}
+-#else
+-static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+-static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+-#endif /* CONFIG_DEBUG_FS */
+-
+-static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter, int index)
+-{
+- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+- u8 value = 0;
+- struct nvmem_cell *cell;
+-
+- if (!phy_cfg->check_efuse)
+- goto out;
+-
+- cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
+- if (IS_ERR(cell)) {
+- dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
+- __func__, PTR_ERR(cell));
+- } else {
+- unsigned char *buf;
+- size_t buf_size;
+-
+- buf = nvmem_cell_read(cell, &buf_size);
+- if (!IS_ERR(buf)) {
+- value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
+- kfree(buf);
+- }
+- nvmem_cell_put(cell);
+- }
+-
+- if (value > 0 && value < 0x8)
+- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
+- else
+- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
+-
+-out:
+- return 0;
+-}
+-
+-static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
+- struct phy_parameter *phy_parameter)
+-{
+- struct phy_cfg *phy_cfg;
+- struct phy_reg *phy_reg;
+-
+- phy_reg = &phy_parameter->phy_reg;
+- phy_cfg = rtk_phy->phy_cfg;
+-
+- if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
+- u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
+- u16 data;
+-
+- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+- } else {
+- data = phy_cfg->param[PHY_ADDR_0X20].data;
+- }
+-
+- data &= (~val_mask);
+- data |= (phy_parameter->amplitude_control_coarse & val_mask);
+-
+- phy_cfg->param[PHY_ADDR_0X20].data = data;
+- }
+-
+- if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
+- u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
+- u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
+- int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
+- u16 data;
+-
+- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+- } else {
+- data = phy_cfg->param[PHY_ADDR_0X20].data;
+- }
+-
+- data &= ~(val_mask << val_shift);
+- data |= ((efuse_val & val_mask) << val_shift);
+-
+- phy_cfg->param[PHY_ADDR_0X20].data = data;
+- }
+-
+- if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
+- u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
+-
+- if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
+- phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
+-
+- phy_cfg->param[PHY_ADDR_0X21].data =
+- phy_parameter->amplitude_control_fine & val_mask;
+- }
+-}
+-
+-static int parse_phy_data(struct rtk_phy *rtk_phy)
+-{
+- struct device *dev = rtk_phy->dev;
+- struct phy_parameter *phy_parameter;
+- int ret = 0;
+- int index;
+-
+- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+- rtk_phy->num_phy, GFP_KERNEL);
+- if (!rtk_phy->phy_parameter)
+- return -ENOMEM;
+-
+- for (index = 0; index < rtk_phy->num_phy; index++) {
+- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+-
+- phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
+-
+- /* Amplitude control address 0x20 bit 0 to bit 7 */
+- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
+- &phy_parameter->amplitude_control_coarse))
+- phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
+-
+- /* Amplitude control address 0x21 bit 0 to bit 16 */
+- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
+- &phy_parameter->amplitude_control_fine))
+- phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
+-
+- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+-
+- update_amplitude_control_value(rtk_phy, phy_parameter);
+- }
+-
+- return ret;
+-}
+-
+-static int rtk_usb3phy_probe(struct platform_device *pdev)
+-{
+- struct rtk_phy *rtk_phy;
+- struct device *dev = &pdev->dev;
+- struct phy *generic_phy;
+- struct phy_provider *phy_provider;
+- const struct phy_cfg *phy_cfg;
+- int ret;
+-
+- phy_cfg = of_device_get_match_data(dev);
+- if (!phy_cfg) {
+- dev_err(dev, "phy config are not assigned!\n");
+- return -EINVAL;
+- }
+-
+- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+- if (!rtk_phy)
+- return -ENOMEM;
+-
+- rtk_phy->dev = &pdev->dev;
+- rtk_phy->phy.dev = rtk_phy->dev;
+- rtk_phy->phy.label = "rtk-usb3phy";
+- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
+-
+- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+-
+- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+-
+- rtk_phy->num_phy = 1;
+-
+- ret = parse_phy_data(rtk_phy);
+- if (ret)
+- goto err;
+-
+- platform_set_drvdata(pdev, rtk_phy);
+-
+- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+- if (IS_ERR(generic_phy))
+- return PTR_ERR(generic_phy);
+-
+- phy_set_drvdata(generic_phy, rtk_phy);
+-
+- phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
+- if (IS_ERR(phy_provider))
+- return PTR_ERR(phy_provider);
+-
+- ret = usb_add_phy_dev(&rtk_phy->phy);
+- if (ret)
+- goto err;
+-
+- create_debug_files(rtk_phy);
+-
+-err:
+- return ret;
+-}
+-
+-static void rtk_usb3phy_remove(struct platform_device *pdev)
+-{
+- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+-
+- remove_debug_files(rtk_phy);
+-
+- usb_remove_phy(&rtk_phy->phy);
+-}
+-
+-static const struct phy_cfg rtd1295_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
+- [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
+- [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
+- [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
+- [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
+- [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
+- [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
+- [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
+- [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
+- [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
+- [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
+- [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
+- [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
+- [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
+- [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
+- [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
+- [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
+- [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
+- [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
+- [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
+- [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
+- [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
+- [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
+- [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
+- .check_efuse = false,
+- .do_toggle = true,
+- .do_toggle_once = false,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = false,
+-};
+-
+-static const struct phy_cfg rtd1619_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [8] = {0x08, 0x3591},
+- [38] = {0x26, 0x840b},
+- [40] = {0x28, 0xf842}, },
+- .check_efuse = false,
+- .do_toggle = true,
+- .do_toggle_once = false,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = false,
+-};
+-
+-static const struct phy_cfg rtd1319_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [1] = {0x01, 0xac86},
+- [6] = {0x06, 0x0003},
+- [9] = {0x09, 0x924c},
+- [10] = {0x0a, 0xa608},
+- [11] = {0x0b, 0xb905},
+- [14] = {0x0e, 0x2010},
+- [32] = {0x20, 0x705a},
+- [33] = {0x21, 0xf645},
+- [34] = {0x22, 0x0013},
+- [35] = {0x23, 0xcb66},
+- [41] = {0x29, 0xff00}, },
+- .check_efuse = true,
+- .do_toggle = true,
+- .do_toggle_once = false,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = false,
+-};
+-
+-static const struct phy_cfg rtd1619b_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [1] = {0x01, 0xac8c},
+- [6] = {0x06, 0x0017},
+- [9] = {0x09, 0x724c},
+- [10] = {0x0a, 0xb610},
+- [11] = {0x0b, 0xb90d},
+- [13] = {0x0d, 0xef2a},
+- [15] = {0x0f, 0x9050},
+- [16] = {0x10, 0x000c},
+- [32] = {0x20, 0x70ff},
+- [34] = {0x22, 0x0013},
+- [35] = {0x23, 0xdb66},
+- [38] = {0x26, 0x8609},
+- [41] = {0x29, 0xff13},
+- [42] = {0x2a, 0x3070}, },
+- .check_efuse = true,
+- .do_toggle = false,
+- .do_toggle_once = true,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = false,
+-};
+-
+-static const struct phy_cfg rtd1319d_phy_cfg = {
+- .param_size = MAX_USB_PHY_DATA_SIZE,
+- .param = { [1] = {0x01, 0xac89},
+- [4] = {0x04, 0xf2f5},
+- [6] = {0x06, 0x0017},
+- [9] = {0x09, 0x424c},
+- [10] = {0x0a, 0x9610},
+- [11] = {0x0b, 0x9901},
+- [12] = {0x0c, 0xf000},
+- [13] = {0x0d, 0xef2a},
+- [14] = {0x0e, 0x1000},
+- [15] = {0x0f, 0x9050},
+- [32] = {0x20, 0x7077},
+- [35] = {0x23, 0x0b62},
+- [37] = {0x25, 0x10ec},
+- [42] = {0x2a, 0x3070}, },
+- .check_efuse = true,
+- .do_toggle = false,
+- .do_toggle_once = true,
+- .use_default_parameter = false,
+- .check_rx_front_end_offset = true,
+-};
+-
+-static const struct of_device_id usbphy_rtk_dt_match[] = {
+- { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
+- { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
+- { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
+- { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
+- { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+-
+-static struct platform_driver rtk_usb3phy_driver = {
+- .probe = rtk_usb3phy_probe,
+- .remove_new = rtk_usb3phy_remove,
+- .driver = {
+- .name = "rtk-usb3phy",
+- .of_match_table = usbphy_rtk_dt_match,
+- },
+-};
+-
+-module_platform_driver(rtk_usb3phy_driver);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS("platform: rtk-usb3phy");
+-MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+-MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index e53eace7c91e37..6387c0d34c551c 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -673,8 +673,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ channel->irq = platform_get_irq_optional(pdev, 0);
+ channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
+ if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
+- int ret;
+-
+ channel->is_otg_channel = true;
+ channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
+ "renesas,no-otg-pins");
+@@ -738,8 +736,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ ret = PTR_ERR(provider);
+ goto error;
+ } else if (channel->is_otg_channel) {
+- int ret;
+-
+ ret = device_create_file(dev, &dev_attr_role);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+index 5de5e2e97ffa0f..26b157f53f3da0 100644
+--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
++++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+@@ -125,12 +125,15 @@ struct rockchip_combphy_grfcfg {
+ };
+
+ struct rockchip_combphy_cfg {
++ unsigned int num_phys;
++ unsigned int phy_ids[3];
+ const struct rockchip_combphy_grfcfg *grfcfg;
+ int (*combphy_cfg)(struct rockchip_combphy_priv *priv);
+ };
+
+ struct rockchip_combphy_priv {
+ u8 type;
++ int id;
+ void __iomem *mmio;
+ int num_clks;
+ struct clk_bulk_data *clks;
+@@ -320,7 +323,7 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
+ struct rockchip_combphy_priv *priv;
+ const struct rockchip_combphy_cfg *phy_cfg;
+ struct resource *res;
+- int ret;
++ int ret, id;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+@@ -338,6 +341,15 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ /* find the phy-id from the io address */
++ priv->id = -ENODEV;
++ for (id = 0; id < phy_cfg->num_phys; id++) {
++ if (res->start == phy_cfg->phy_ids[id]) {
++ priv->id = id;
++ break;
++ }
++ }
++
+ priv->dev = dev;
+ priv->type = PHY_NONE;
+ priv->cfg = phy_cfg;
+@@ -562,6 +574,12 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
+ };
+
+ static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
++ .num_phys = 3,
++ .phy_ids = {
++ 0xfe820000,
++ 0xfe830000,
++ 0xfe840000,
++ },
+ .grfcfg = &rk3568_combphy_grfcfgs,
+ .combphy_cfg = rk3568_combphy_cfg,
+ };
+@@ -578,8 +596,14 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
+- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
+- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
++ switch (priv->id) {
++ case 1:
++ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
++ break;
++ case 2:
++ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
++ break;
++ }
+ break;
+ case PHY_TYPE_USB3:
+ /* Set SSC downward spread spectrum */
+@@ -736,6 +760,12 @@ static const struct rockchip_combphy_grfcfg rk3588_combphy_grfcfgs = {
+ };
+
+ static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
++ .num_phys = 3,
++ .phy_ids = {
++ 0xfee00000,
++ 0xfee10000,
++ 0xfee20000,
++ },
+ .grfcfg = &rk3588_combphy_grfcfgs,
+ .combphy_cfg = rk3588_combphy_cfg,
+ };
+diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+index 121e5961ce1147..9857ee45b89e0d 100644
+--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
++++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+@@ -40,6 +40,8 @@
+ #define RK3588_BIFURCATION_LANE_0_1 BIT(0)
+ #define RK3588_BIFURCATION_LANE_2_3 BIT(1)
+ #define RK3588_LANE_AGGREGATION BIT(2)
++#define RK3588_PCIE1LN_SEL_EN (GENMASK(1, 0) << 16)
++#define RK3588_PCIE30_PHY_MODE_EN (GENMASK(2, 0) << 16)
+
+ struct rockchip_p3phy_ops;
+
+@@ -132,7 +134,7 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
+ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
+ {
+ u32 reg = 0;
+- u8 mode = 0;
++ u8 mode = RK3588_LANE_AGGREGATION; /* default */
+ int ret;
+
+ /* Deassert PCIe PMA output clamp mode */
+@@ -140,31 +142,24 @@ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
+
+ /* Set bifurcation if needed */
+ for (int i = 0; i < priv->num_lanes; i++) {
+- if (!priv->lanes[i])
+- mode |= (BIT(i) << 3);
+-
+ if (priv->lanes[i] > 1)
+- mode |= (BIT(i) >> 1);
+- }
+-
+- if (!mode)
+- reg = RK3588_LANE_AGGREGATION;
+- else {
+- if (mode & (BIT(0) | BIT(1)))
+- reg |= RK3588_BIFURCATION_LANE_0_1;
+-
+- if (mode & (BIT(2) | BIT(3)))
+- reg |= RK3588_BIFURCATION_LANE_2_3;
++ mode &= ~RK3588_LANE_AGGREGATION;
++ if (priv->lanes[i] == 3)
++ mode |= RK3588_BIFURCATION_LANE_0_1;
++ if (priv->lanes[i] == 4)
++ mode |= RK3588_BIFURCATION_LANE_2_3;
+ }
+
+- regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
++ reg = mode;
++ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
++ RK3588_PCIE30_PHY_MODE_EN | reg);
+
+ /* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
+ if (!IS_ERR(priv->pipe_grf)) {
+- reg = (mode & (BIT(6) | BIT(7))) >> 6;
++ reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
+ if (reg)
+ regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
+- (reg << 16) | reg);
++ RK3588_PCIE1LN_SEL_EN | reg);
+ }
+
+ reset_control_deassert(priv->p30phy);
+diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
+index 0efe74ac9c6afc..637a5fbae6d9a9 100644
+--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
++++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
+@@ -275,7 +275,7 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
+
+ phy = devm_phy_create(&pdev->dev, NULL, &sp_uphy_ops);
+ if (IS_ERR(phy)) {
+- ret = -PTR_ERR(phy);
++ ret = PTR_ERR(phy);
+ return ret;
+ }
+
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index 142ebe0247cc00..983a6e6173bd21 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -1531,6 +1531,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ }
+ EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
+
++int tegra_xusb_padctl_get_port_number(struct phy *phy)
++{
++ struct tegra_xusb_lane *lane;
++
++ if (!phy)
++ return -ENODEV;
++
++ lane = phy_get_drvdata(phy);
++
++ return lane->index;
++}
++EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number);
++
+ MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+ MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
+index 555b323f45da1e..bc847d3879f79c 100644
+--- a/drivers/phy/ti/phy-gmii-sel.c
++++ b/drivers/phy/ti/phy-gmii-sel.c
+@@ -64,6 +64,7 @@ struct phy_gmii_sel_priv {
+ u32 num_ports;
+ u32 reg_offset;
+ u32 qsgmii_main_ports;
++ bool no_offset;
+ };
+
+ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
+@@ -402,7 +403,8 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
+ priv->num_ports = size / sizeof(u32);
+ if (!priv->num_ports)
+ return -EINVAL;
+- priv->reg_offset = __be32_to_cpu(*offset);
++ if (!priv->no_offset)
++ priv->reg_offset = __be32_to_cpu(*offset);
+ }
+
+ if_phys = devm_kcalloc(dev, priv->num_ports,
+@@ -471,6 +473,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
+ dev_err(dev, "Failed to get syscon %d\n", ret);
+ return ret;
+ }
++ priv->no_offset = true;
+ }
+
+ ret = phy_gmii_sel_init_ports(priv);
+diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
+index 762d3de8b3c530..6bd3c749233068 100644
+--- a/drivers/phy/ti/phy-omap-usb2.c
++++ b/drivers/phy/ti/phy-omap-usb2.c
+@@ -116,7 +116,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
+ {
+ struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
+
+- if (!phy->comparator)
++ if (!phy->comparator || !phy->comparator->set_vbus)
+ return -ENODEV;
+
+ return phy->comparator->set_vbus(phy->comparator, enabled);
+@@ -126,7 +126,7 @@ static int omap_usb_start_srp(struct usb_otg *otg)
+ {
+ struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
+
+- if (!phy->comparator)
++ if (!phy->comparator || !phy->comparator->start_srp)
+ return -ENODEV;
+
+ return phy->comparator->start_srp(phy->comparator);
+diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
+index b4881cb344759d..c23eecc7d1800f 100644
+--- a/drivers/phy/ti/phy-tusb1210.c
++++ b/drivers/phy/ti/phy-tusb1210.c
+@@ -65,7 +65,6 @@ struct tusb1210 {
+ struct delayed_work chg_det_work;
+ struct notifier_block psy_nb;
+ struct power_supply *psy;
+- struct power_supply *charger;
+ #endif
+ };
+
+@@ -231,19 +230,24 @@ static const char * const tusb1210_chargers[] = {
+
+ static bool tusb1210_get_online(struct tusb1210 *tusb)
+ {
++ struct power_supply *charger = NULL;
+ union power_supply_propval val;
+- int i;
++ bool online = false;
++ int i, ret;
+
+- for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
+- tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
++ for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
++ charger = power_supply_get_by_name(tusb1210_chargers[i]);
+
+- if (!tusb->charger)
++ if (!charger)
+ return false;
+
+- if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
+- return false;
++ ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
++ if (ret == 0)
++ online = val.intval;
++
++ power_supply_put(charger);
+
+- return val.intval;
++ return online;
+ }
+
+ static void tusb1210_chg_det_work(struct work_struct *work)
+@@ -467,9 +471,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
+ cancel_delayed_work_sync(&tusb->chg_det_work);
+ power_supply_unregister(tusb->psy);
+ }
+-
+- if (tusb->charger)
+- power_supply_put(tusb->charger);
+ }
+ #else
+ static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
+diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
+index 2559c6594cea2b..c72b52955a8678 100644
+--- a/drivers/phy/xilinx/phy-zynqmp.c
++++ b/drivers/phy/xilinx/phy-zynqmp.c
+@@ -80,7 +80,8 @@
+
+ /* Reference clock selection parameters */
+ #define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4)
+-#define L0_REF_CLK_SEL_MASK 0x8f
++#define L0_REF_CLK_LCL_SEL BIT(7)
++#define L0_REF_CLK_SEL_MASK 0x9f
+
+ /* Calibration digital logic parameters */
+ #define L3_TM_CALIB_DIG19 0xec4c
+@@ -165,6 +166,24 @@
+ /* Timeout values */
+ #define TIMEOUT_US 1000
+
++/* Lane 0/1/2/3 offset */
++#define DIG_8(n) ((0x4000 * (n)) + 0x1074)
++#define ILL13(n) ((0x4000 * (n)) + 0x1994)
++#define DIG_10(n) ((0x4000 * (n)) + 0x107c)
++#define RST_DLY(n) ((0x4000 * (n)) + 0x19a4)
++#define BYP_15(n) ((0x4000 * (n)) + 0x1038)
++#define BYP_12(n) ((0x4000 * (n)) + 0x102c)
++#define MISC3(n) ((0x4000 * (n)) + 0x19ac)
++#define EQ11(n) ((0x4000 * (n)) + 0x1978)
++
++static u32 save_reg_address[] = {
++ /* Lane 0/1/2/3 Register */
++ DIG_8(0), ILL13(0), DIG_10(0), RST_DLY(0), BYP_15(0), BYP_12(0), MISC3(0), EQ11(0),
++ DIG_8(1), ILL13(1), DIG_10(1), RST_DLY(1), BYP_15(1), BYP_12(1), MISC3(1), EQ11(1),
++ DIG_8(2), ILL13(2), DIG_10(2), RST_DLY(2), BYP_15(2), BYP_12(2), MISC3(2), EQ11(2),
++ DIG_8(3), ILL13(3), DIG_10(3), RST_DLY(3), BYP_15(3), BYP_12(3), MISC3(3), EQ11(3),
++};
++
+ struct xpsgtr_dev;
+
+ /**
+@@ -213,6 +232,7 @@ struct xpsgtr_phy {
+ * @tx_term_fix: fix for GT issue
+ * @saved_icm_cfg0: stored value of ICM CFG0 register
+ * @saved_icm_cfg1: stored value of ICM CFG1 register
++ * @saved_regs: registers to be saved/restored during suspend/resume
+ */
+ struct xpsgtr_dev {
+ struct device *dev;
+@@ -225,6 +245,7 @@ struct xpsgtr_dev {
+ bool tx_term_fix;
+ unsigned int saved_icm_cfg0;
+ unsigned int saved_icm_cfg1;
++ u32 *saved_regs;
+ };
+
+ /*
+@@ -298,6 +319,32 @@ static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
+ writel((readl(addr) & ~clr) | set, addr);
+ }
+
++/**
++ * xpsgtr_save_lane_regs - Saves registers on suspend
++ * @gtr_dev: pointer to phy controller context structure
++ */
++static void xpsgtr_save_lane_regs(struct xpsgtr_dev *gtr_dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
++ gtr_dev->saved_regs[i] = xpsgtr_read(gtr_dev,
++ save_reg_address[i]);
++}
++
++/**
++ * xpsgtr_restore_lane_regs - Restores registers on resume
++ * @gtr_dev: pointer to phy controller context structure
++ */
++static void xpsgtr_restore_lane_regs(struct xpsgtr_dev *gtr_dev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
++ xpsgtr_write(gtr_dev, save_reg_address[i],
++ gtr_dev->saved_regs[i]);
++}
++
+ /*
+ * Hardware Configuration
+ */
+@@ -349,11 +396,12 @@ static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
+ PLL_FREQ_MASK, ssc->pll_ref_clk);
+
+ /* Enable lane clock sharing, if required */
+- if (gtr_phy->refclk != gtr_phy->lane) {
+- /* Lane3 Ref Clock Selection Register */
++ if (gtr_phy->refclk == gtr_phy->lane)
++ xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
++ L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL);
++ else
+ xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
+ L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
+- }
+
+ /* SSC step size [7:0] */
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
+@@ -573,7 +621,7 @@ static int xpsgtr_phy_init(struct phy *phy)
+ mutex_lock(&gtr_dev->gtr_mutex);
+
+ /* Configure and enable the clock when peripheral phy_init call */
+- if (clk_prepare_enable(gtr_dev->clk[gtr_phy->lane]))
++ if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk]))
+ goto out;
+
+ /* Skip initialization if not required. */
+@@ -625,7 +673,7 @@ static int xpsgtr_phy_exit(struct phy *phy)
+ gtr_phy->skip_phy_init = false;
+
+ /* Ensure that disable clock only, which configure for lane */
+- clk_disable_unprepare(gtr_dev->clk[gtr_phy->lane]);
++ clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]);
+
+ return 0;
+ }
+@@ -798,6 +846,7 @@ static struct phy *xpsgtr_xlate(struct device *dev,
+ phy_type = args->args[1];
+ phy_instance = args->args[2];
+
++ guard(mutex)(&gtr_phy->phy->mutex);
+ ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
+ if (ret < 0) {
+ dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
+@@ -837,6 +886,8 @@ static int xpsgtr_runtime_suspend(struct device *dev)
+ gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
++ xpsgtr_save_lane_regs(gtr_dev);
++
+ return 0;
+ }
+
+@@ -847,6 +898,8 @@ static int xpsgtr_runtime_resume(struct device *dev)
+ unsigned int i;
+ bool skip_phy_init;
+
++ xpsgtr_restore_lane_regs(gtr_dev);
++
+ icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
+@@ -992,6 +1045,12 @@ static int xpsgtr_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ gtr_dev->saved_regs = devm_kmalloc(gtr_dev->dev,
++ sizeof(save_reg_address),
++ GFP_KERNEL);
++ if (!gtr_dev->saved_regs)
++ return -ENOMEM;
++
+ return 0;
+ }
+
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+index 80838dc54b3abb..7938741136a2c2 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+@@ -43,7 +43,7 @@
+ #define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
+ #define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
+ #define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
+-#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
++#define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */
+ #define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
+ #define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
+ #define SCU690 0x690 /* Multi-function Pin Control #24 */
+@@ -2494,38 +2494,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
+ ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
+
+ /* GPIOS7 */
+- ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
++ ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23),
+ /* GPIOS6 */
+- ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
++ ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22),
+ /* GPIOS5 */
+- ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
++ ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21),
+ /* GPIOS4 */
+- ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
++ ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20),
+ /* GPIOS3*/
+- ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
++ ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19),
+ /* GPIOS2 */
+- ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
++ ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18),
+ /* GPIOS1 */
+- ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
++ ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17),
+ /* GPIOS0 */
+- ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
++ ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16),
+
+ /* GPIOR7 */
+- ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
++ ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15),
+ /* GPIOR6 */
+- ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
++ ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14),
+ /* GPIOR5 */
+- ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
++ ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13),
+ /* GPIOR4 */
+- ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
++ ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12),
+ /* GPIOR3*/
+- ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
++ ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11),
+ /* GPIOR2 */
+- ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
++ ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10),
+ /* GPIOR1 */
+- ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
++ ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9),
+ /* GPIOR0 */
+- ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
++ ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8),
+
+ /* GPIOX7 */
+ ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
+diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
+index f80630a74d34a5..d099a7f25f64c9 100644
+--- a/drivers/pinctrl/bcm/pinctrl-ns.c
++++ b/drivers/pinctrl/bcm/pinctrl-ns.c
+@@ -7,11 +7,11 @@
+ #include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/pinctrl/pinconf-generic.h>
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/pinctrl/pinmux.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/slab.h>
+
+ #include "../core.h"
+@@ -208,7 +208,6 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = {
+ static int ns_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+- const struct of_device_id *of_id;
+ struct ns_pinctrl *ns_pinctrl;
+ struct pinctrl_desc *pctldesc;
+ struct pinctrl_pin_desc *pin;
+@@ -225,10 +224,7 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
+
+ ns_pinctrl->dev = dev;
+
+- of_id = of_match_device(ns_pinctrl_of_match_table, dev);
+- if (!of_id)
+- return -EINVAL;
+- ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
++ ns_pinctrl->chipset_flag = (uintptr_t)device_get_match_data(dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cru_gpio_control");
+diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c
+index acbd413340e8be..15aed44676271a 100644
+--- a/drivers/pinctrl/berlin/berlin-bg2.c
++++ b/drivers/pinctrl/berlin/berlin-bg2.c
+@@ -8,8 +8,9 @@
+ */
+
+ #include <linux/init.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+
+ #include "berlin.h"
+@@ -227,10 +228,7 @@ static const struct of_device_id berlin2_pinctrl_match[] = {
+
+ static int berlin2_pinctrl_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match =
+- of_match_device(berlin2_pinctrl_match, &pdev->dev);
+-
+- return berlin_pinctrl_probe(pdev, match->data);
++ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev));
+ }
+
+ static struct platform_driver berlin2_pinctrl_driver = {
+diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c
+index c0f5d86d5d01d9..73a1d8c2308866 100644
+--- a/drivers/pinctrl/berlin/berlin-bg2cd.c
++++ b/drivers/pinctrl/berlin/berlin-bg2cd.c
+@@ -8,8 +8,9 @@
+ */
+
+ #include <linux/init.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+
+ #include "berlin.h"
+@@ -172,10 +173,7 @@ static const struct of_device_id berlin2cd_pinctrl_match[] = {
+
+ static int berlin2cd_pinctrl_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match =
+- of_match_device(berlin2cd_pinctrl_match, &pdev->dev);
+-
+- return berlin_pinctrl_probe(pdev, match->data);
++ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev));
+ }
+
+ static struct platform_driver berlin2cd_pinctrl_driver = {
+diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c
+index 20a3216ede07a7..a5dbc8f279e70a 100644
+--- a/drivers/pinctrl/berlin/berlin-bg2q.c
++++ b/drivers/pinctrl/berlin/berlin-bg2q.c
+@@ -8,8 +8,9 @@
+ */
+
+ #include <linux/init.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+
+ #include "berlin.h"
+@@ -389,10 +390,7 @@ static const struct of_device_id berlin2q_pinctrl_match[] = {
+
+ static int berlin2q_pinctrl_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match =
+- of_match_device(berlin2q_pinctrl_match, &pdev->dev);
+-
+- return berlin_pinctrl_probe(pdev, match->data);
++ return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev));
+ }
+
+ static struct platform_driver berlin2q_pinctrl_driver = {
+diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
+index 3026a3b3da2dd9..9bf0a54f2798a2 100644
+--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
++++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
+@@ -8,8 +8,9 @@
+ */
+
+ #include <linux/init.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+
+ #include "berlin.h"
+@@ -449,8 +450,8 @@ static const struct of_device_id berlin4ct_pinctrl_match[] = {
+
+ static int berlin4ct_pinctrl_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match =
+- of_match_device(berlin4ct_pinctrl_match, &pdev->dev);
++ const struct berlin_pinctrl_desc *desc =
++ device_get_match_data(&pdev->dev);
+ struct regmap_config *rmconfig;
+ struct regmap *regmap;
+ struct resource *res;
+@@ -473,7 +474,7 @@ static int berlin4ct_pinctrl_probe(struct platform_device *pdev)
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+- return berlin_pinctrl_probe_regmap(pdev, match->data, regmap);
++ return berlin_pinctrl_probe_regmap(pdev, desc, regmap);
+ }
+
+ static struct platform_driver berlin4ct_pinctrl_driver = {
+diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c
+index b631c14813a7dc..fc0daec94e1059 100644
+--- a/drivers/pinctrl/berlin/pinctrl-as370.c
++++ b/drivers/pinctrl/berlin/pinctrl-as370.c
+@@ -8,8 +8,9 @@
+ */
+
+ #include <linux/init.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+
+ #include "berlin.h"
+@@ -330,8 +331,8 @@ static const struct of_device_id as370_pinctrl_match[] = {
+
+ static int as370_pinctrl_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match =
+- of_match_device(as370_pinctrl_match, &pdev->dev);
++ const struct berlin_pinctrl_desc *desc =
++ device_get_match_data(&pdev->dev);
+ struct regmap_config *rmconfig;
+ struct regmap *regmap;
+ struct resource *res;
+@@ -354,7 +355,7 @@ static int as370_pinctrl_probe(struct platform_device *pdev)
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+- return berlin_pinctrl_probe_regmap(pdev, match->data, regmap);
++ return berlin_pinctrl_probe_regmap(pdev, desc, regmap);
+ }
+
+ static struct platform_driver as370_pinctrl_driver = {
+diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
+index d6318cb57aff2a..e7e827a8877a0e 100644
+--- a/drivers/pinctrl/cirrus/Kconfig
++++ b/drivers/pinctrl/cirrus/Kconfig
+@@ -12,7 +12,8 @@ config PINCTRL_CS42L43
+
+ config PINCTRL_LOCHNAGAR
+ tristate "Cirrus Logic Lochnagar pinctrl driver"
+- depends on MFD_LOCHNAGAR
++ # Avoid clash caused by MIPS defining RST, which is used in the driver
++ depends on MFD_LOCHNAGAR && !MIPS
+ select GPIOLIB
+ select PINMUX
+ select PINCONF
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index e9dc9638120a52..88ee086e137636 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1098,8 +1098,8 @@ static struct pinctrl *create_pinctrl(struct device *dev,
+ * an -EPROBE_DEFER later, as that is the worst case.
+ */
+ if (ret == -EPROBE_DEFER) {
+- pinctrl_free(p, false);
+ mutex_unlock(&pinctrl_maps_mutex);
++ pinctrl_free(p, false);
+ return ERR_PTR(ret);
+ }
+ }
+@@ -1253,17 +1253,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
+ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
+ {
+ struct pinctrl_setting *setting, *setting2;
+- struct pinctrl_state *old_state = p->state;
++ struct pinctrl_state *old_state = READ_ONCE(p->state);
+ int ret;
+
+- if (p->state) {
++ if (old_state) {
+ /*
+ * For each pinmux setting in the old state, forget SW's record
+ * of mux owner for that pingroup. Any pingroups which are
+ * still owned by the new state will be re-acquired by the call
+ * to pinmux_enable_setting() in the loop below.
+ */
+- list_for_each_entry(setting, &p->state->settings, node) {
++ list_for_each_entry(setting, &old_state->settings, node) {
+ if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
+ continue;
+ pinmux_disable_setting(setting);
+@@ -2072,6 +2072,14 @@ pinctrl_init_controller(struct pinctrl_desc *pctldesc, struct device *dev,
+ return ERR_PTR(ret);
+ }
+
++static void pinctrl_uninit_controller(struct pinctrl_dev *pctldev, struct pinctrl_desc *pctldesc)
++{
++ pinctrl_free_pindescs(pctldev, pctldesc->pins,
++ pctldesc->npins);
++ mutex_destroy(&pctldev->mutex);
++ kfree(pctldev);
++}
++
+ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
+ {
+ pctldev->p = create_pinctrl(pctldev->dev, pctldev);
+@@ -2116,13 +2124,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev)
+
+ error = pinctrl_claim_hogs(pctldev);
+ if (error) {
+- dev_err(pctldev->dev, "could not claim hogs: %i\n",
+- error);
+- pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
+- pctldev->desc->npins);
+- mutex_destroy(&pctldev->mutex);
+- kfree(pctldev);
+-
++ dev_err(pctldev->dev, "could not claim hogs: %i\n", error);
+ return error;
+ }
+
+@@ -2158,8 +2160,10 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ return pctldev;
+
+ error = pinctrl_enable(pctldev);
+- if (error)
++ if (error) {
++ pinctrl_uninit_controller(pctldev, pctldesc);
+ return ERR_PTR(error);
++ }
+
+ return pctldev;
+ }
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index 6e0a40962f384a..5ee746cb81f591 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
+ for (state = 0; ; state++) {
+ /* Retrieve the pinctrl-* property */
+ propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
+- if (!propname)
+- return -ENOMEM;
++ if (!propname) {
++ ret = -ENOMEM;
++ goto err;
++ }
+ prop = of_find_property(np, propname, &size);
+ kfree(propname);
+ if (!prop) {
+ if (state == 0) {
+- of_node_put(np);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto err;
+ }
+ break;
+ }
+diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
+index cf3f4d2e0c168e..a53287aaa653db 100644
+--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
++++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
+@@ -408,8 +408,8 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
+ int ret;
+ u32 val;
+
+- child = of_get_next_child(np, NULL);
+- if (!child) {
++ val = of_get_child_count(np);
++ if (val == 0) {
+ dev_err(&pdev->dev, "no group is defined\n");
+ return -ENOENT;
+ }
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index faa8b7ff5bcf34..0aaeb54a64765d 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -277,33 +277,33 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
+ static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
+
+ static const struct intel_pingroup byt_score_groups[] = {
+- PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1),
+- PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1),
+- PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1),
+- PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1),
+- PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1),
+- PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1),
+- PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1),
+- PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1),
+- PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1),
+- PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1),
+- PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1),
+- PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1),
+- PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1),
+- PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1),
+- PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1),
+- PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
+- PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1),
+- PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1),
+- PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1),
+- PIN_GROUP("sata_grp", byt_score_sata_pins, 1),
+- PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
+- PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
+- PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
+- PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
+- PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
+- PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
+- PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1),
++ PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1),
++ PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1),
++ PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1),
++ PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1),
++ PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1),
++ PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1),
++ PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1),
++ PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1),
++ PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1),
++ PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1),
++ PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1),
++ PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1),
++ PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1),
++ PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1),
++ PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1),
++ PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
++ PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1),
++ PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1),
++ PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1),
++ PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1),
++ PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
++ PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
++ PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
++ PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
++ PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
++ PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
++ PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1),
+ };
+
+ static const char * const byt_score_uart_groups[] = {
+@@ -331,12 +331,14 @@ static const char * const byt_score_plt_clk_groups[] = {
+ };
+ static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
+ static const char * const byt_score_gpio_groups[] = {
+- "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
+- "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
+- "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
+- "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
+- "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
+- "plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
++ "uart1_grp_gpio", "uart2_grp_gpio", "pwm0_grp_gpio",
++ "pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio",
++ "sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio",
++ "i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio",
++ "sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio",
++ "sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio",
++ "plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio",
++ "plt_clk5_grp_gpio", "smbus_grp_gpio",
+ };
+
+ static const struct intel_function byt_score_functions[] = {
+@@ -455,8 +457,8 @@ static const struct intel_pingroup byt_sus_groups[] = {
+ PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values),
+ PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values),
+ PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values),
+- PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
+- PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
++ PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
++ PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
+ };
+
+ static const char * const byt_sus_usb_groups[] = {
+@@ -468,7 +470,7 @@ static const char * const byt_sus_pmu_clk_groups[] = {
+ };
+ static const char * const byt_sus_gpio_groups[] = {
+ "usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio",
+- "pmu_clk1_grp", "pmu_clk2_grp",
++ "pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio",
+ };
+
+ static const struct intel_function byt_sus_functions[] = {
+@@ -921,13 +923,14 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
+ unsigned int num_configs)
+ {
+ struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
+- unsigned int param, arg;
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
+ u32 conf, val, db_pulse, debounce;
++ enum pin_config_param param;
+ unsigned long flags;
+ int i, ret = 0;
++ u32 arg;
+
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
+@@ -983,11 +986,18 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
+
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+- if (arg)
++ if (arg) {
+ conf |= BYT_DEBOUNCE_EN;
+- else
++ } else {
+ conf &= ~BYT_DEBOUNCE_EN;
+
++ /*
++ * No need to update the pulse value.
++ * Debounce is going to be disabled.
++ */
++ break;
++ }
++
+ switch (arg) {
+ case 375:
+ db_pulse = BYT_DEBOUNCE_PULSE_375US;
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
+index cee512f97b5663..45216b9e852dc1 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.h
++++ b/drivers/pinctrl/intel/pinctrl-intel.h
+@@ -179,6 +179,10 @@ struct intel_community {
+ .modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
+ }
+
++#define PIN_GROUP_GPIO(n, p, m) \
++ PIN_GROUP(n, p, m), \
++ PIN_GROUP(n "_gpio", p, 0)
++
+ #define FUNCTION(n, g) \
+ { \
+ .func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \
+diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
+index 7ced2b402dce04..812696dfe30263 100644
+--- a/drivers/pinctrl/intel/pinctrl-meteorlake.c
++++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
+@@ -583,6 +583,7 @@ static const struct intel_pinctrl_soc_data mtls_soc_data = {
+ };
+
+ static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
++ { "INTC105E", (kernel_ulong_t)&mtlp_soc_data },
+ { "INTC1083", (kernel_ulong_t)&mtlp_soc_data },
+ { "INTC1082", (kernel_ulong_t)&mtls_soc_data },
+ { }
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+index a02f7c3269707e..09edcf47effec5 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+@@ -1198,7 +1198,6 @@ static const struct mtk_pin_reg_calc mt8186_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8186_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8186_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8186_pin_do_range),
+- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8186_pin_dir_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8186_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8186_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8186_pin_pu_range),
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+index dee1b3aefd36ec..bf5788d6810ff0 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+@@ -1379,7 +1379,6 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range),
+- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range),
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+index b7921b59eb7b15..54301fbba524af 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+@@ -709,32 +709,35 @@ static int mtk_pinconf_bias_set_rsel(struct mtk_pinctrl *hw,
+ {
+ int err, rsel_val;
+
+- if (!pullup && arg == MTK_DISABLE)
+- return 0;
+-
+ if (hw->rsel_si_unit) {
+ /* find pin rsel_index from pin_rsel array*/
+ err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val);
+ if (err)
+- goto out;
++ return err;
+ } else {
+- if (arg < MTK_PULL_SET_RSEL_000 ||
+- arg > MTK_PULL_SET_RSEL_111) {
+- err = -EINVAL;
+- goto out;
+- }
++ if (arg < MTK_PULL_SET_RSEL_000 || arg > MTK_PULL_SET_RSEL_111)
++ return -EINVAL;
+
+ rsel_val = arg - MTK_PULL_SET_RSEL_000;
+ }
+
+- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val);
+- if (err)
+- goto out;
++ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val);
++}
+
+- err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, MTK_ENABLE);
++static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw,
++ const struct mtk_pin_desc *desc,
++ u32 pullup, u32 arg)
++{
++ u32 enable = arg == MTK_DISABLE ? MTK_DISABLE : MTK_ENABLE;
++ int err;
+
+-out:
+- return err;
++ if (arg != MTK_DISABLE) {
++ err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg);
++ if (err)
++ return err;
++ }
++
++ return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable);
+ }
+
+ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+@@ -750,22 +753,22 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
+ try_all_type = MTK_PULL_TYPE_MASK;
+
+ if (try_all_type & MTK_PULL_RSEL_TYPE) {
+- err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg);
++ err = mtk_pinconf_bias_set_pu_pd_rsel(hw, desc, pullup, arg);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PU_PD_TYPE) {
+ err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PULLSEL_TYPE) {
+ err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc,
+ pullup, arg);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
+@@ -803,9 +806,9 @@ static int mtk_rsel_get_si_unit(struct mtk_pinctrl *hw,
+ return 0;
+ }
+
+-static int mtk_pinconf_bias_get_rsel(struct mtk_pinctrl *hw,
+- const struct mtk_pin_desc *desc,
+- u32 *pullup, u32 *enable)
++static int mtk_pinconf_bias_get_pu_pd_rsel(struct mtk_pinctrl *hw,
++ const struct mtk_pin_desc *desc,
++ u32 *pullup, u32 *enable)
+ {
+ int pu, pd, rsel, err;
+
+@@ -939,22 +942,22 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
+ try_all_type = MTK_PULL_TYPE_MASK;
+
+ if (try_all_type & MTK_PULL_RSEL_TYPE) {
+- err = mtk_pinconf_bias_get_rsel(hw, desc, pullup, enable);
++ err = mtk_pinconf_bias_get_pu_pd_rsel(hw, desc, pullup, enable);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PU_PD_TYPE) {
+ err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PULLSEL_TYPE) {
+ err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc,
+ pullup, enable);
+ if (!err)
+- return err;
++ return 0;
+ }
+
+ if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
+diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
+index 33d6c3fb79080a..9cd7fe3c3e0df1 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
++++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
+@@ -165,20 +165,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+- case PIN_CONFIG_OUTPUT_ENABLE:
++ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret);
++ if (!ret)
++ err = -EINVAL;
++ break;
++ case PIN_CONFIG_OUTPUT:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
+ if (err)
+ break;
+- /* CONFIG Current direction return value
+- * ------------- ----------------- ----------------------
+- * OUTPUT_ENABLE output 1 (= HW value)
+- * input 0 (= HW value)
+- * INPUT_ENABLE output 0 (= reverse HW value)
+- * input 1 (= reverse HW value)
+- */
+- if (param == PIN_CONFIG_INPUT_ENABLE)
+- ret = !ret;
+
++ if (!ret) {
++ err = -EINVAL;
++ break;
++ }
++
++ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret);
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
+@@ -193,6 +194,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
+ }
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
++ if (!ret)
++ err = -EINVAL;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ if (!hw->soc->drive_get)
+@@ -281,26 +284,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ break;
+ err = hw->soc->bias_set_combo(hw, desc, 0, arg);
+ break;
+- case PIN_CONFIG_OUTPUT_ENABLE:
+- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+- MTK_DISABLE);
+- /* Keep set direction to consider the case that a GPIO pin
+- * does not have SMT control
+- */
+- if (err != -ENOTSUPP)
+- break;
+-
+- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+- MTK_OUTPUT);
+- break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ /* regard all non-zero value as enable */
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
+- if (err)
+- break;
+-
+- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+- MTK_INPUT);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ /* regard all non-zero value as enable */
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
+index 79f5d753d7e1a5..50a87d9618a8e8 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
+@@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
+ static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
+ static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
+ static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
+-static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
++static const unsigned int pdm_dclk_a_pins[] = { GPIOA_9 };
+
+ /* gen_clk */
+ static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
+@@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = {
+ GROUP(pdm_din2_a, 3),
+ GROUP(pdm_din1_a, 3),
+ GROUP(pdm_din0_a, 3),
+- GROUP(pdm_dclk, 3),
++ GROUP(pdm_dclk_a, 3),
+ GROUP(pwm_c_a, 3),
+ GROUP(pwm_b_a, 3),
+
+@@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = {
+
+ static const char * const pdm_groups[] = {
+ "pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
+- "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
++ "pdm_din1_a", "pdm_din0_a", "pdm_dclk_a",
+ };
+
+ static const char * const gen_clk_groups[] = {
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+index 040e418dbfc1be..162dfc213669a7 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+@@ -12,8 +12,8 @@
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/pinctrl/pinctrl.h>
++#include <linux/property.h>
+
+ #include "pinctrl-mvebu.h"
+
+@@ -404,13 +404,8 @@ static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
+ static int armada_38x_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info;
+- const struct of_device_id *match =
+- of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
+
+- if (!match)
+- return -ENODEV;
+-
+- soc->variant = (unsigned) match->data & 0xff;
++ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff;
+ soc->controls = armada_38x_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls);
+ soc->gpioranges = armada_38x_mpp_gpio_ranges;
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+index c33f1cbaf661aa..d9c98faa7b0e94 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+@@ -12,8 +12,8 @@
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/pinctrl/pinctrl.h>
++#include <linux/property.h>
+
+ #include "pinctrl-mvebu.h"
+
+@@ -386,13 +386,8 @@ static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = {
+ static int armada_39x_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct mvebu_pinctrl_soc_info *soc = &armada_39x_pinctrl_info;
+- const struct of_device_id *match =
+- of_match_device(armada_39x_pinctrl_of_match, &pdev->dev);
+
+- if (!match)
+- return -ENODEV;
+-
+- soc->variant = (unsigned) match->data & 0xff;
++ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff;
+ soc->controls = armada_39x_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(armada_39x_mpp_controls);
+ soc->gpioranges = armada_39x_mpp_gpio_ranges;
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c
+index 89bab536717df6..7becf2781a0b9f 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c
+@@ -13,7 +13,6 @@
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/pinctrl/pinctrl.h>
+
+ #include "pinctrl-mvebu.h"
+@@ -106,10 +105,8 @@ static struct pinctrl_gpio_range armada_ap806_mpp_gpio_ranges[] = {
+ static int armada_ap806_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct mvebu_pinctrl_soc_info *soc = &armada_ap806_pinctrl_info;
+- const struct of_device_id *match =
+- of_match_device(armada_ap806_pinctrl_of_match, &pdev->dev);
+
+- if (!match || !pdev->dev.parent)
++ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ soc->variant = 0; /* no variants for Armada AP806 */
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
+index 8ba8f3e9121f04..9a250c491f33d3 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
+@@ -12,9 +12,9 @@
+ #include <linux/io.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+
+ #include "pinctrl-mvebu.h"
+
+@@ -638,8 +638,6 @@ static void mvebu_pinctrl_assign_variant(struct mvebu_mpp_mode *m,
+ static int armada_cp110_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct mvebu_pinctrl_soc_info *soc;
+- const struct of_device_id *match =
+- of_match_device(armada_cp110_pinctrl_of_match, &pdev->dev);
+ int i;
+
+ if (!pdev->dev.parent)
+@@ -650,7 +648,7 @@ static int armada_cp110_pinctrl_probe(struct platform_device *pdev)
+ if (!soc)
+ return -ENOMEM;
+
+- soc->variant = (unsigned long) match->data & 0xff;
++ soc->variant = (unsigned long)device_get_match_data(&pdev->dev) & 0xff;
+ soc->controls = armada_cp110_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(armada_cp110_mpp_controls);
+ soc->modes = armada_cp110_mpp_modes;
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+index 48e2a6c56a83b9..487825bfd125f3 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+@@ -19,8 +19,8 @@
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/pinctrl/pinctrl.h>
++#include <linux/property.h>
+ #include <linux/bitops.h>
+
+ #include "pinctrl-mvebu.h"
+@@ -568,14 +568,9 @@ static int armada_xp_pinctrl_resume(struct platform_device *pdev)
+ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
+- const struct of_device_id *match =
+- of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
+ int nregs;
+
+- if (!match)
+- return -ENODEV;
+-
+- soc->variant = (unsigned) match->data & 0xff;
++ soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff;
+
+ switch (soc->variant) {
+ case V_MV78230:
+diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
+index bd74daa9ed6663..dce601d993728c 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
++++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
+@@ -12,9 +12,9 @@
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/pinctrl/pinctrl.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+
+ #include "pinctrl-mvebu.h"
+@@ -765,13 +765,11 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct resource *res, *mpp_res;
+ struct resource fb_res;
+- const struct of_device_id *match =
+- of_match_device(dove_pinctrl_of_match, &pdev->dev);
+ struct mvebu_mpp_ctrl_data *mpp_data;
+ void __iomem *base;
+- int i;
++ int i, ret;
+
+- pdev->dev.platform_data = (void *)match->data;
++ pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
+
+ /*
+ * General MPP Configuration Register is part of pdma registers.
+@@ -785,13 +783,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
+ clk_prepare_enable(clk);
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &mpp_res);
+- if (IS_ERR(base))
+- return PTR_ERR(base);
++ if (IS_ERR(base)) {
++ ret = PTR_ERR(base);
++ goto err_probe;
++ }
+
+ mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols,
+ sizeof(*mpp_data), GFP_KERNEL);
+- if (!mpp_data)
+- return -ENOMEM;
++ if (!mpp_data) {
++ ret = -ENOMEM;
++ goto err_probe;
++ }
+
+ dove_pinctrl_info.control_data = mpp_data;
+ for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++)
+@@ -810,8 +812,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
+ }
+
+ mpp4_base = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(mpp4_base))
+- return PTR_ERR(mpp4_base);
++ if (IS_ERR(mpp4_base)) {
++ ret = PTR_ERR(mpp4_base);
++ goto err_probe;
++ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+@@ -822,8 +826,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
+ }
+
+ pmu_base = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(pmu_base))
+- return PTR_ERR(pmu_base);
++ if (IS_ERR(pmu_base)) {
++ ret = PTR_ERR(pmu_base);
++ goto err_probe;
++ }
+
+ gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config");
+ if (IS_ERR(gconfmap)) {
+@@ -833,12 +839,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
+ adjust_resource(&fb_res,
+ (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14);
+ gc_base = devm_ioremap_resource(&pdev->dev, &fb_res);
+- if (IS_ERR(gc_base))
+- return PTR_ERR(gc_base);
++ if (IS_ERR(gc_base)) {
++ ret = PTR_ERR(gc_base);
++ goto err_probe;
++ }
++
+ gconfmap = devm_regmap_init_mmio(&pdev->dev,
+ gc_base, &gc_regmap_config);
+- if (IS_ERR(gconfmap))
+- return PTR_ERR(gconfmap);
++ if (IS_ERR(gconfmap)) {
++ ret = PTR_ERR(gconfmap);
++ goto err_probe;
++ }
+ }
+
+ /* Warn on any missing DT resource */
+@@ -846,6 +857,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
+ dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
+
+ return mvebu_pinctrl_probe(pdev);
++err_probe:
++ clk_disable_unprepare(clk);
++ return ret;
+ }
+
+ static struct platform_driver dove_pinctrl_driver = {
+diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+index d45c31f281c856..4789d7442f788e 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
++++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+@@ -11,8 +11,8 @@
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/pinctrl/pinctrl.h>
++#include <linux/property.h>
+
+ #include "pinctrl-mvebu.h"
+
+@@ -470,10 +470,7 @@ static const struct of_device_id kirkwood_pinctrl_of_match[] = {
+
+ static int kirkwood_pinctrl_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match =
+- of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
+-
+- pdev->dev.platform_data = (void *)match->data;
++ pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
+
+ return mvebu_pinctrl_simple_mmio_probe(pdev);
+ }
+diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
+index cc97d270be61b4..2b6ab7f2afc781 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
++++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
+@@ -19,8 +19,8 @@
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/pinctrl/pinctrl.h>
++#include <linux/property.h>
+
+ #include "pinctrl-mvebu.h"
+
+@@ -218,10 +218,7 @@ static const struct of_device_id orion_pinctrl_of_match[] = {
+
+ static int orion_pinctrl_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match =
+- of_match_device(orion_pinctrl_of_match, &pdev->dev);
+-
+- pdev->dev.platform_data = (void*)match->data;
++ pdev->dev.platform_data = (void*)device_get_match_data(&pdev->dev);
+
+ mpp_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mpp_base))
+diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
+index 6b90051af20674..0cfa74365733ca 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
++++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
+@@ -17,6 +17,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+@@ -985,7 +986,6 @@ static const struct of_device_id abx500_gpio_match[] = {
+ static int abx500_gpio_probe(struct platform_device *pdev)
+ {
+ struct device_node *np = pdev->dev.of_node;
+- const struct of_device_id *match;
+ struct abx500_pinctrl *pct;
+ unsigned int id = -1;
+ int ret;
+@@ -1006,12 +1006,7 @@ static int abx500_gpio_probe(struct platform_device *pdev)
+ pct->chip.parent = &pdev->dev;
+ pct->chip.base = -1; /* Dynamic allocation */
+
+- match = of_match_device(abx500_gpio_match, &pdev->dev);
+- if (!match) {
+- dev_err(&pdev->dev, "gpio dt not matching\n");
+- return -ENODEV;
+- }
+- id = (unsigned long)match->data;
++ id = (unsigned long)device_get_match_data(&pdev->dev);
+
+ /* Poke in other ASIC variants here */
+ switch (id) {
+diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+index 86a638077a6973..445c61a4a7e553 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+@@ -16,9 +16,11 @@
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
++#include <linux/of.h>
+ #include <linux/of_address.h>
+-#include <linux/of_device.h>
++#include <linux/of_platform.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+@@ -1573,8 +1575,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
+ * Then mask the pins that need to be sleeping now when we're
+ * switching to the ALT C function.
+ */
+- for (i = 0; i < g->grp.npins; i++)
+- slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]);
++ for (i = 0; i < g->grp.npins; i++) {
++ unsigned int bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP;
++ slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(bit);
++ }
+ nmk_gpio_glitch_slpm_init(slpm);
+ }
+
+@@ -1838,7 +1842,6 @@ static int nmk_pinctrl_resume(struct device *dev)
+
+ static int nmk_pinctrl_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *prcm_np;
+ struct nmk_pinctrl *npct;
+@@ -1849,10 +1852,7 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
+ if (!npct)
+ return -ENOMEM;
+
+- match = of_match_device(nmk_pinctrl_match, &pdev->dev);
+- if (!match)
+- return -ENODEV;
+- version = (unsigned int) match->data;
++ version = (unsigned int)device_get_match_data(&pdev->dev);
+
+ /* Poke in other ASIC variants here */
+ if (version == PINCTRL_NMK_STN8815)
+diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c
+index 7daff9f186cd86..f0cad2c501f766 100644
+--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c
++++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c
+@@ -843,8 +843,8 @@ static int s32_pinctrl_probe_dt(struct platform_device *pdev,
+ if (!np)
+ return -ENODEV;
+
+- if (mem_regions == 0) {
+- dev_err(&pdev->dev, "mem_regions is 0\n");
++ if (mem_regions == 0 || mem_regions >= 10000) {
++ dev_err(&pdev->dev, "mem_regions is invalid: %u\n", mem_regions);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 74241b2ff21e3a..86034c457c0436 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -923,6 +923,15 @@ static int amd_gpio_suspend(struct device *dev)
+
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
++
++ /* mask any interrupts not intended to be a wake source */
++ if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) {
++ writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF),
++ gpio_dev->base + pin * 4);
++ pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n",
++ pin);
++ }
++
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ }
+
+diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
+index 34c5c3e71fb261..cf59089f277639 100644
+--- a/drivers/pinctrl/pinctrl-amd.h
++++ b/drivers/pinctrl/pinctrl-amd.h
+@@ -80,6 +80,11 @@
+ #define FUNCTION_MASK GENMASK(1, 0)
+ #define FUNCTION_INVALID GENMASK(7, 0)
+
++#define WAKE_SOURCE (BIT(WAKE_CNTRL_OFF_S0I3) | \
++ BIT(WAKE_CNTRL_OFF_S3) | \
++ BIT(WAKE_CNTRL_OFF_S4) | \
++ BIT(WAKECNTRL_Z_OFF))
++
+ struct amd_function {
+ const char *name;
+ const char * const groups[NSELECTS];
+diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
+index 3751c7de37aa9f..f861e63f411521 100644
+--- a/drivers/pinctrl/pinctrl-apple-gpio.c
++++ b/drivers/pinctrl/pinctrl-apple-gpio.c
+@@ -474,6 +474,9 @@ static int apple_gpio_pinctrl_probe(struct platform_device *pdev)
+ for (i = 0; i < npins; i++) {
+ pins[i].number = i;
+ pins[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "PIN%u", i);
++ if (!pins[i].name)
++ return -ENOMEM;
++
+ pins[i].drv_data = pctl;
+ pin_names[i] = pins[i].name;
+ pin_nums[i] = i;
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index 383309e533c3d7..a27c01fcbb47ed 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1068,6 +1068,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = {
+ }
+ };
+
++/*
++ * This lock class allows to tell lockdep that parent IRQ and children IRQ do
++ * not share the same class so it does not raise false positive
++ */
++static struct lock_class_key atmel_lock_key;
++static struct lock_class_key atmel_request_key;
++
+ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+@@ -1214,6 +1221,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
+ handle_simple_irq);
+ irq_set_chip_data(irq, atmel_pioctrl);
++ irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key);
+ dev_dbg(dev,
+ "atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
+ i, irq);
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index 608f55c5ba5fe6..d7b66928a4e50d 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -12,10 +12,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+-#include <linux/of_address.h>
+-#include <linux/of_device.h>
+-#include <linux/of_irq.h>
++#include <linux/platform_device.h>
+ #include <linux/pm.h>
++#include <linux/property.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/string_helpers.h>
+@@ -1302,8 +1301,8 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
+ if (!np)
+ return -ENODEV;
+
+- info->dev = dev;
+- info->ops = of_device_get_match_data(dev);
++ info->dev = &pdev->dev;
++ info->ops = device_get_match_data(&pdev->dev);
+ at91_pinctrl_child_count(info, np);
+
+ /*
+@@ -1410,8 +1409,11 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
+
+ /* We will handle a range of GPIO pins */
+ for (i = 0; i < gpio_banks; i++)
+- if (gpio_chips[i])
++ if (gpio_chips[i]) {
+ pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
++ gpiochip_add_pin_range(&gpio_chips[i]->chip, dev_name(info->pctl->dev), 0,
++ gpio_chips[i]->range.pin_base, gpio_chips[i]->range.npins);
++ }
+
+ dev_info(dev, "initialized AT91 pinctrl driver\n");
+
+@@ -1845,7 +1847,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
+ if (IS_ERR(at91_chip->regbase))
+ return PTR_ERR(at91_chip->regbase);
+
+- at91_chip->ops = of_device_get_match_data(dev);
++ at91_chip->ops = device_get_match_data(dev);
+ at91_chip->pioc_virq = irq;
+
+ at91_chip->clock = devm_clk_get_enabled(dev, NULL);
+diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
+index 58ca6fac7849ab..f2b9db66fdb6a4 100644
+--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
++++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
+@@ -307,6 +307,9 @@ static const char * const cy8c95x0_groups[] = {
+ "gp77",
+ };
+
++static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
++ unsigned int pin, bool input);
++
+ static inline u8 cypress_get_port(struct cy8c95x0_pinctrl *chip, unsigned int pin)
+ {
+ /* Account for GPORT2 which only has 4 bits */
+@@ -711,6 +714,8 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
+ ret = regmap_read(chip->regmap, reg, &reg_val);
+ if (reg_val & bit)
+ arg = 1;
++ if (param == PIN_CONFIG_OUTPUT_ENABLE)
++ arg = !arg;
+
+ *config = pinconf_to_config_packed(param, (u16)arg);
+ out:
+@@ -726,6 +731,7 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
+ u8 port = cypress_get_port(chip, off);
+ u8 bit = cypress_get_pin_mask(chip, off);
+ unsigned long param = pinconf_to_config_param(config);
++ unsigned long arg = pinconf_to_config_argument(config);
+ unsigned int reg;
+ int ret;
+
+@@ -764,6 +770,12 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
+ case PIN_CONFIG_MODE_PWM:
+ reg = CY8C95X0_PWMSEL;
+ break;
++ case PIN_CONFIG_OUTPUT_ENABLE:
++ ret = cy8c95x0_pinmux_direction(chip, off, !arg);
++ goto out;
++ case PIN_CONFIG_INPUT_ENABLE:
++ ret = cy8c95x0_pinmux_direction(chip, off, arg);
++ goto out;
+ default:
+ ret = -ENOTSUPP;
+ goto out;
+@@ -821,7 +833,7 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
+ gc->get_direction = cy8c95x0_gpio_get_direction;
+ gc->get_multiple = cy8c95x0_gpio_get_multiple;
+ gc->set_multiple = cy8c95x0_gpio_set_multiple;
+- gc->set_config = gpiochip_generic_config,
++ gc->set_config = gpiochip_generic_config;
+ gc->can_sleep = true;
+ gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
+
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
+index f8ae2e9742217c..8d26c8061c77eb 100644
+--- a/drivers/pinctrl/pinctrl-ocelot.c
++++ b/drivers/pinctrl/pinctrl-ocelot.c
+@@ -1962,21 +1962,21 @@ static void ocelot_irq_handler(struct irq_desc *desc)
+ unsigned int reg = 0, irq, i;
+ unsigned long irqs;
+
++ chained_irq_enter(parent_chip, desc);
++
+ for (i = 0; i < info->stride; i++) {
+ regmap_read(info->map, id_reg + 4 * i, &reg);
+ if (!reg)
+ continue;
+
+- chained_irq_enter(parent_chip, desc);
+-
+ irqs = reg;
+
+ for_each_set_bit(irq, &irqs,
+ min(32U, info->desc->npins - 32 * i))
+ generic_handle_domain_irq(chip->irq.domain, irq + 32 * i);
+-
+- chained_irq_exit(parent_chip, desc);
+ }
++
++ chained_irq_exit(parent_chip, desc);
+ }
+
+ static int ocelot_gpiochip_register(struct platform_device *pdev,
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index 45e416f68e74f6..b5a02335617d77 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -634,23 +634,68 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
+
+ static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
+ {
+- .num = 2,
+- .pin = 12,
+- .reg = 0x24,
+- .bit = 8,
+- .mask = 0x3
+- }, {
++ /* gpio2_b7_sel */
+ .num = 2,
+ .pin = 15,
+ .reg = 0x28,
+ .bit = 0,
+ .mask = 0x7
+ }, {
++ /* gpio2_c7_sel */
+ .num = 2,
+ .pin = 23,
+ .reg = 0x30,
+ .bit = 14,
+ .mask = 0x3
++ }, {
++ /* gpio3_b1_sel */
++ .num = 3,
++ .pin = 9,
++ .reg = 0x44,
++ .bit = 2,
++ .mask = 0x3
++ }, {
++ /* gpio3_b2_sel */
++ .num = 3,
++ .pin = 10,
++ .reg = 0x44,
++ .bit = 4,
++ .mask = 0x3
++ }, {
++ /* gpio3_b3_sel */
++ .num = 3,
++ .pin = 11,
++ .reg = 0x44,
++ .bit = 6,
++ .mask = 0x3
++ }, {
++ /* gpio3_b4_sel */
++ .num = 3,
++ .pin = 12,
++ .reg = 0x44,
++ .bit = 8,
++ .mask = 0x3
++ }, {
++ /* gpio3_b5_sel */
++ .num = 3,
++ .pin = 13,
++ .reg = 0x44,
++ .bit = 10,
++ .mask = 0x3
++ }, {
++ /* gpio3_b6_sel */
++ .num = 3,
++ .pin = 14,
++ .reg = 0x44,
++ .bit = 12,
++ .mask = 0x3
++ }, {
++ /* gpio3_b7_sel */
++ .num = 3,
++ .pin = 15,
++ .reg = 0x44,
++ .bit = 14,
++ .mask = 0x3
+ },
+ };
+
+@@ -870,9 +915,8 @@ static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
+ RK_MUXROUTE_SAME(0, RK_PC3, 1, 0x314, BIT(16 + 0) | BIT(0)), /* rtc_clk */
+ RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x314, BIT(16 + 2) | BIT(16 + 3)), /* uart2_rxm0 */
+ RK_MUXROUTE_SAME(4, RK_PD2, 2, 0x314, BIT(16 + 2) | BIT(16 + 3) | BIT(2)), /* uart2_rxm1 */
+- RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x608, BIT(16 + 8) | BIT(16 + 9)), /* i2c3_sdam0 */
+- RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(8)), /* i2c3_sdam1 */
+- RK_MUXROUTE_SAME(2, RK_PA0, 3, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(9)), /* i2c3_sdam2 */
++ RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x314, BIT(16 + 4)), /* i2c3_sdam0 */
++ RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x314, BIT(16 + 4) | BIT(4)), /* i2c3_sdam1 */
+ RK_MUXROUTE_SAME(1, RK_PA3, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclktxm0 */
+ RK_MUXROUTE_SAME(1, RK_PA4, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclkrxm0 */
+ RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclktxm1 */
+@@ -881,18 +925,6 @@ static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
+ RK_MUXROUTE_SAME(1, RK_PB6, 4, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* pdm-clkm1 */
+ RK_MUXROUTE_SAME(2, RK_PA6, 2, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* pdm-clkm2 */
+ RK_MUXROUTE_SAME(2, RK_PA4, 3, 0x600, BIT(16 + 2) | BIT(2)), /* pdm-clkm-m2 */
+- RK_MUXROUTE_SAME(3, RK_PB2, 3, 0x314, BIT(16 + 9)), /* spi1_miso */
+- RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x314, BIT(16 + 9) | BIT(9)), /* spi1_miso_m1 */
+- RK_MUXROUTE_SAME(0, RK_PB3, 3, 0x314, BIT(16 + 10) | BIT(16 + 11)), /* owire_m0 */
+- RK_MUXROUTE_SAME(1, RK_PC6, 7, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* owire_m1 */
+- RK_MUXROUTE_SAME(2, RK_PA2, 5, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* owire_m2 */
+- RK_MUXROUTE_SAME(0, RK_PB3, 2, 0x314, BIT(16 + 12) | BIT(16 + 13)), /* can_rxd_m0 */
+- RK_MUXROUTE_SAME(1, RK_PC6, 5, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* can_rxd_m1 */
+- RK_MUXROUTE_SAME(2, RK_PA2, 4, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* can_rxd_m2 */
+- RK_MUXROUTE_SAME(1, RK_PC4, 3, 0x314, BIT(16 + 14)), /* mac_rxd0_m0 */
+- RK_MUXROUTE_SAME(4, RK_PA2, 2, 0x314, BIT(16 + 14) | BIT(14)), /* mac_rxd0_m1 */
+- RK_MUXROUTE_SAME(3, RK_PB4, 4, 0x314, BIT(16 + 15)), /* uart3_rx */
+- RK_MUXROUTE_SAME(0, RK_PC1, 3, 0x314, BIT(16 + 15) | BIT(15)), /* uart3_rx_m1 */
+ };
+
+ static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
+@@ -2433,6 +2465,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
+ case RK3188:
+ case RK3288:
+ case RK3308:
++ case RK3328:
+ case RK3368:
+ case RK3399:
+ case RK3568:
+@@ -2491,6 +2524,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
+ case RK3188:
+ case RK3288:
+ case RK3308:
++ case RK3328:
+ case RK3368:
+ case RK3399:
+ case RK3568:
+@@ -2704,8 +2738,10 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+
+ if (ret) {
+ /* revert the already done pin settings */
+- for (cnt--; cnt >= 0; cnt--)
++ for (cnt--; cnt >= 0; cnt--) {
++ bank = pin_to_bank(info, pins[cnt]);
+ rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
++ }
+
+ return ret;
+ }
+@@ -2753,6 +2789,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
+ case RK3188:
+ case RK3288:
+ case RK3308:
++ case RK3328:
+ case RK3368:
+ case RK3399:
+ case RK3568:
+@@ -3765,7 +3802,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0,
+- IOMUX_WIDTH_3BIT,
++ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_3BIT,
+ 0),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
+@@ -3779,7 +3816,7 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = {
+ .pin_banks = rk3328_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3328_pin_banks),
+ .label = "RK3328-GPIO",
+- .type = RK3288,
++ .type = RK3328,
+ .grf_mux_offset = 0x0,
+ .iomux_recalced = rk3328_mux_recalced_data,
+ .niomux_recalced = ARRAY_SIZE(rk3328_mux_recalced_data),
+diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
+index 4759f336941ef3..849266f8b19131 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.h
++++ b/drivers/pinctrl/pinctrl-rockchip.h
+@@ -193,6 +193,7 @@ enum rockchip_pinctrl_type {
+ RK3188,
+ RK3288,
+ RK3308,
++ RK3328,
+ RK3368,
+ RK3399,
+ RK3568,
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index 461a7c02d4a392..6c670203b3ac28 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -349,6 +349,8 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
+ return -ENOTSUPP;
+ fselector = setting->func;
+ function = pinmux_generic_get_function(pctldev, fselector);
++ if (!function)
++ return -EINVAL;
+ *func = function->data;
+ if (!(*func)) {
+ dev_err(pcs->dev, "%s could not find function%i\n",
+@@ -1327,7 +1329,6 @@ static void pcs_irq_free(struct pcs_device *pcs)
+ static void pcs_free_resources(struct pcs_device *pcs)
+ {
+ pcs_irq_free(pcs);
+- pinctrl_unregister(pcs->pctl);
+
+ #if IS_BUILTIN(CONFIG_PINCTRL_SINGLE)
+ if (pcs->missing_nr_pinctrl_cells)
+@@ -1884,7 +1885,7 @@ static int pcs_probe(struct platform_device *pdev)
+ if (ret < 0)
+ goto free;
+
+- ret = pinctrl_register_and_init(&pcs->desc, pcs->dev, pcs, &pcs->pctl);
++ ret = devm_pinctrl_register_and_init(pcs->dev, &pcs->desc, pcs, &pcs->pctl);
+ if (ret) {
+ dev_err(pcs->dev, "could not register single pinctrl driver\n");
+ goto free;
+@@ -1917,8 +1918,11 @@ static int pcs_probe(struct platform_device *pdev)
+
+ dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size);
+
+- return pinctrl_enable(pcs->pctl);
++ ret = pinctrl_enable(pcs->pctl);
++ if (ret)
++ goto free;
+
++ return 0;
+ free:
+ pcs_free_resources(pcs);
+
+diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
+index cf0383f575d9c9..f4256a918165f4 100644
+--- a/drivers/pinctrl/pinctrl-xway.c
++++ b/drivers/pinctrl/pinctrl-xway.c
+@@ -11,12 +11,12 @@
+ #include <linux/gpio/driver.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
+-#include <linux/of_platform.h>
+-#include <linux/of_address.h>
++#include <linux/of.h>
+ #include <linux/ioport.h>
+ #include <linux/io.h>
+ #include <linux/device.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+
+ #include "pinctrl-lantiq.h"
+
+@@ -1451,7 +1451,6 @@ MODULE_DEVICE_TABLE(of, xway_match);
+
+ static int pinmux_xway_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match;
+ const struct pinctrl_xway_soc *xway_soc;
+ int ret, i;
+
+@@ -1460,10 +1459,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
+ if (IS_ERR(xway_info.membase[0]))
+ return PTR_ERR(xway_info.membase[0]);
+
+- match = of_match_device(xway_match, &pdev->dev);
+- if (match)
+- xway_soc = (const struct pinctrl_xway_soc *) match->data;
+- else
++ xway_soc = device_get_match_data(&pdev->dev);
++ if (!xway_soc)
+ xway_soc = &danube_pinctrl;
+
+ /* find out how many pads we have */
+diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
+index 33657cf98fb9d5..edb5984cd35190 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
++++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
+@@ -65,7 +65,7 @@ enum {
+ .intr_detection_width = 2, \
+ }
+
+-#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
++#define SDC_QDSD_PINGROUP(pg_name, _tile, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+@@ -75,7 +75,7 @@ enum {
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+- .tile = SOUTH, \
++ .tile = _tile, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+@@ -101,7 +101,7 @@ enum {
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+- .tile = SOUTH, \
++ .tile = WEST, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+@@ -1199,13 +1199,13 @@ static const struct msm_pingroup sm7150_groups[] = {
+ [117] = PINGROUP(117, NORTH, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, NORTH, _, _, _, _, _, _, _, _, _),
+ [119] = UFS_RESET(ufs_reset, 0x9f000),
+- [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
+- [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+- [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+- [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+- [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x98000, 14, 6),
+- [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x98000, 11, 3),
+- [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x98000, 9, 0),
++ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, WEST, 0x9a000, 15, 0),
++ [121] = SDC_QDSD_PINGROUP(sdc1_clk, WEST, 0x9a000, 13, 6),
++ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, WEST, 0x9a000, 11, 3),
++ [123] = SDC_QDSD_PINGROUP(sdc1_data, WEST, 0x9a000, 9, 0),
++ [124] = SDC_QDSD_PINGROUP(sdc2_clk, SOUTH, 0x98000, 14, 6),
++ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, SOUTH, 0x98000, 11, 3),
++ [126] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x98000, 9, 0),
+ };
+
+ static const struct msm_gpio_wakeirq_map sm7150_pdc_map[] = {
+diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+index deded9c6fd7dba..5817c52cee6bad 100644
+--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+@@ -1207,7 +1207,6 @@ static const struct of_device_id pmic_gpio_of_match[] = {
+ { .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8},
+ { .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
+- { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
+ { .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
+ /* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
+ { .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
+diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
+index 93e51abbf519aa..757bbc549b0e26 100644
+--- a/drivers/pinctrl/renesas/core.c
++++ b/drivers/pinctrl/renesas/core.c
+@@ -731,10 +731,12 @@ static int sh_pfc_resume_noirq(struct device *dev)
+ sh_pfc_walk_regs(pfc, sh_pfc_restore_reg);
+ return 0;
+ }
++#define pm_psci_sleep_ptr(_ptr) pm_sleep_ptr(_ptr)
+ #else
+ static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
+ static int sh_pfc_suspend_noirq(struct device *dev) { return 0; }
+ static int sh_pfc_resume_noirq(struct device *dev) { return 0; }
++#define pm_psci_sleep_ptr(_ptr) PTR_IF(false, (_ptr))
+ #endif /* CONFIG_ARM_PSCI_FW */
+
+ static DEFINE_NOIRQ_DEV_PM_OPS(sh_pfc_pm, sh_pfc_suspend_noirq, sh_pfc_resume_noirq);
+@@ -907,9 +909,11 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname,
+ sh_pfc_err("reg 0x%x: var_field_width declares %u instead of %u bits\n",
+ cfg_reg->reg, rw, cfg_reg->reg_width);
+
+- if (n != cfg_reg->nr_enum_ids)
++ if (n != cfg_reg->nr_enum_ids) {
+ sh_pfc_err("reg 0x%x: enum_ids[] has %u instead of %u values\n",
+ cfg_reg->reg, cfg_reg->nr_enum_ids, n);
++ n = cfg_reg->nr_enum_ids;
++ }
+
+ check_enum_ids:
+ sh_pfc_check_reg_enums(drvname, cfg_reg->reg, cfg_reg->enum_ids, n);
+@@ -1415,7 +1419,7 @@ static struct platform_driver sh_pfc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(sh_pfc_of_table),
+- .pm = pm_sleep_ptr(&sh_pfc_pm),
++ .pm = pm_psci_sleep_ptr(&sh_pfc_pm),
+ },
+ };
+
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+index acdea6ac152531..bb843e333c880f 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+@@ -68,20 +68,20 @@
+ #define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
+ #define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
+ #define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
+-#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
+-#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
+-#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
+-#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
++#define GPSR0_6 F_(IRQ0_A, IP0SR0_27_24)
++#define GPSR0_5 F_(IRQ1_A, IP0SR0_23_20)
++#define GPSR0_4 F_(IRQ2_A, IP0SR0_19_16)
++#define GPSR0_3 F_(IRQ3_A, IP0SR0_15_12)
+ #define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
+ #define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
+ #define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
+
+ /* GPSR1 */
+-#define GPSR1_28 F_(HTX3, IP3SR1_19_16)
+-#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12)
+-#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8)
+-#define GPSR1_25 F_(HSCK3, IP3SR1_7_4)
+-#define GPSR1_24 F_(HRX3, IP3SR1_3_0)
++#define GPSR1_28 F_(HTX3_A, IP3SR1_19_16)
++#define GPSR1_27 F_(HCTS3_N_A, IP3SR1_15_12)
++#define GPSR1_26 F_(HRTS3_N_A, IP3SR1_11_8)
++#define GPSR1_25 F_(HSCK3_A, IP3SR1_7_4)
++#define GPSR1_24 F_(HRX3_A, IP3SR1_3_0)
+ #define GPSR1_23 F_(GP1_23, IP2SR1_31_28)
+ #define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24)
+ #define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20)
+@@ -119,14 +119,14 @@
+ #define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12)
+ #define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8)
+ #define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4)
+-#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0)
+-#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28)
++#define GPSR2_8 F_(TPU0TO0_A, IP1SR2_3_0)
++#define GPSR2_7 F_(TPU0TO1_A, IP0SR2_31_28)
+ #define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24)
+-#define GPSR2_5 F_(FXR_TXENB_N, IP0SR2_23_20)
++#define GPSR2_5 F_(FXR_TXENB_N_A, IP0SR2_23_20)
+ #define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16)
+ #define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12)
+ #define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8)
+-#define GPSR2_1 F_(FXR_TXENA_N, IP0SR2_7_4)
++#define GPSR2_1 F_(FXR_TXENA_N_A, IP0SR2_7_4)
+ #define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0)
+
+ /* GPSR3 */
+@@ -275,13 +275,13 @@
+
+ /* SR0 */
+ /* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+-#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_15_12 FM(IRQ3_A) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_19_16 FM(IRQ2_A) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_23_20 FM(IRQ1_A) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_27_24 FM(IRQ0_A) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+@@ -290,72 +290,72 @@
+ #define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1_A) FM(IRQ2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1_A) FM(TX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1_A) FM(RX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+-#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N_A) FM(CTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N_A) FM(RTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1_A) FM(SCK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* SR1 */
+ /* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+-#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_B) FM(TX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_B) FM(RX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_B) FM(RTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_B) FM(CTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_B) FM(SCK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_B) FM(TX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_B) FM(RX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+-#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_B) FM(CTS1_N_B) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_B) FM(RTS1_N_B) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_B) FM(SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+ #define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_31_28 F_(0, 0) FM(TCLK2_A) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+-#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_3_0 FM(HRX3_A) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_7_4 FM(HSCK3_A) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_11_8 FM(HRTS3_N_A) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_15_12 FM(HCTS3_N_A) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_19_16 FM(HTX3_A) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* SR2 */
+ /* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+-#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_7_4 FM(FXR_TXENA_N_A) FM(CANFD1_RX) FM(TPU0TO3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX_A) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX_A) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_23_20 FM(FXR_TXENB_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_31_28 FM(TPU0TO1_A) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+-#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_3_0 FM(TPU0TO0_A) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2_A) F_(0, 0) FM(TCLK3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3_A) FM(PWM1_B) FM(TCLK4_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+@@ -381,8 +381,8 @@
+ #define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_N_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_N_A) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+ /* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
+@@ -718,22 +718,22 @@ static const u16 pinmux_data[] = {
+
+ /* IP0SR0 */
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_N_B),
+- PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A),
++ PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_B),
+
+ PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
+
+ PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
+
+- PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
++ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3_A),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
+
+- PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
++ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2_A),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
+
+- PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
++ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1_A),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
+
+- PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
++ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0_A),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
+
+ PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
+@@ -750,75 +750,75 @@ static const u16 pinmux_data[] = {
+ PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD),
+
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2),
+- PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1),
+- PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_A),
++ PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1_A),
++ PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_B),
+
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1),
+- PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1),
+- PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1),
++ PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1_A),
++ PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1_A),
+
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC),
+- PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1),
+- PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1),
++ PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1_A),
++ PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1_A),
+
+ /* IP2SR0 */
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD),
+- PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N),
+- PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N),
++ PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N_A),
++ PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N_A),
+
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK),
+- PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N),
+- PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N),
++ PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N_A),
++ PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N_A),
+
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD),
+- PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1),
+- PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1),
++ PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1_A),
++ PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1_A),
+
+ /* IP0SR1 */
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2),
+- PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_A),
+- PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3),
++ PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_B),
++ PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1),
+- PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_A),
+- PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3),
++ PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_B),
++ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC),
+- PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_A),
+- PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N),
++ PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_B),
++ PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK),
+- PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_A),
+- PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N),
++ PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_B),
++ PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD),
+- PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_A),
+- PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3),
++ PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_B),
++ PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD),
+
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2),
+- PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_X),
+- PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_X),
++ PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_B),
++ PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_B),
+
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1),
+- PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_X),
+- PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_X),
++ PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_B),
++ PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_B),
+
+ /* IP1SR1 */
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC),
+- PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_X),
+- PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_X),
++ PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_B),
++ PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_B),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CANFD5_TX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD),
+- PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_X),
+- PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_X),
++ PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_B),
++ PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_B),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, CANFD5_RX_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK),
+- PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_X),
+- PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_X),
++ PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_B),
++ PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_B),
+
+ PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD),
+
+@@ -827,15 +827,15 @@ static const u16 pinmux_data[] = {
+
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N),
+- PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8_A),
++ PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8),
+
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N),
+- PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9_A),
++ PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9),
+
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0),
+- PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A),
++ PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0),
+
+ /* IP2SR1 */
+ PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0),
+@@ -845,99 +845,99 @@ static const u16 pinmux_data[] = {
+ PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A),
+
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK),
+- PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3),
++ PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS),
+- PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4),
++ PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD),
+- PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_A),
++ PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT),
+- PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_A),
++ PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_B),
+
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN),
+ PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_A),
+
+- PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2),
++ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2_A),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B),
+
+ /* IP3SR1 */
+- PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3),
++ PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2),
+
+- PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3),
++ PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK),
+- PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_A),
++ PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_B),
+
+- PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N),
++ PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD),
+- PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_A),
++ PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_B),
+
+- PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N),
++ PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD),
+
+- PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3),
++ PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC),
+
+ /* IP0SR2 */
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, CANFD1_TX),
+- PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_A),
++ PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_B),
+
+- PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N),
++ PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N_A),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, CANFD1_RX),
+- PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_A),
++ PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR),
+- PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX),
++ PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX_A),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5),
+
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR),
+- PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX),
++ PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX_A),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B),
+
+ PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR),
+
+- PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N),
++ PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N_A),
+
+ PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB),
+
+- PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1),
++ PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1_A),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, CANFD6_TX),
+- PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_B),
++ PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_C),
+
+ /* IP1SR2 */
+- PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0),
++ PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0_A),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, CANFD6_RX),
+- PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_A),
++ PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK),
+- PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_X),
++ PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX),
+- PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_X),
++ PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_B),
+
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR),
+
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX),
+- PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2),
+- PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_A),
++ PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2_A),
++ PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_C),
+
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX),
+- PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3),
++ PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3_A),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B),
+- PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_A),
++ PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_C),
+
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX),
+- PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B),
++ PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2),
+
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX),
+ PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B),
+@@ -979,12 +979,12 @@ static const u16 pinmux_data[] = {
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKIN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKEN_IN),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A),
+- PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_X),
++ PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_A),
+
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_N_A),
+- PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X),
++ PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_A),
+
+ PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
+
+@@ -1531,15 +1531,14 @@ static const unsigned int canfd4_data_mux[] = {
+ };
+
+ /* - CANFD5 ----------------------------------------------------------------- */
+-static const unsigned int canfd5_data_pins[] = {
+- /* CANFD5_TX, CANFD5_RX */
++static const unsigned int canfd5_data_a_pins[] = {
++ /* CANFD5_TX_A, CANFD5_RX_A */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+ };
+-static const unsigned int canfd5_data_mux[] = {
+- CANFD5_TX_MARK, CANFD5_RX_MARK,
++static const unsigned int canfd5_data_a_mux[] = {
++ CANFD5_TX_A_MARK, CANFD5_RX_A_MARK,
+ };
+
+-/* - CANFD5_B ----------------------------------------------------------------- */
+ static const unsigned int canfd5_data_b_pins[] = {
+ /* CANFD5_TX_B, CANFD5_RX_B */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
+@@ -1599,49 +1598,48 @@ static const unsigned int hscif0_ctrl_mux[] = {
+ };
+
+ /* - HSCIF1 ----------------------------------------------------------------- */
+-static const unsigned int hscif1_data_pins[] = {
+- /* HRX1, HTX1 */
++static const unsigned int hscif1_data_a_pins[] = {
++ /* HRX1_A, HTX1_A */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+ };
+-static const unsigned int hscif1_data_mux[] = {
+- HRX1_MARK, HTX1_MARK,
++static const unsigned int hscif1_data_a_mux[] = {
++ HRX1_A_MARK, HTX1_A_MARK,
+ };
+-static const unsigned int hscif1_clk_pins[] = {
+- /* HSCK1 */
++static const unsigned int hscif1_clk_a_pins[] = {
++ /* HSCK1_A */
+ RCAR_GP_PIN(0, 18),
+ };
+-static const unsigned int hscif1_clk_mux[] = {
+- HSCK1_MARK,
++static const unsigned int hscif1_clk_a_mux[] = {
++ HSCK1_A_MARK,
+ };
+-static const unsigned int hscif1_ctrl_pins[] = {
+- /* HRTS1_N, HCTS1_N */
++static const unsigned int hscif1_ctrl_a_pins[] = {
++ /* HRTS1_N_A, HCTS1_N_A */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+ };
+-static const unsigned int hscif1_ctrl_mux[] = {
+- HRTS1_N_MARK, HCTS1_N_MARK,
++static const unsigned int hscif1_ctrl_a_mux[] = {
++ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+ };
+
+-/* - HSCIF1_X---------------------------------------------------------------- */
+-static const unsigned int hscif1_data_x_pins[] = {
+- /* HRX1_X, HTX1_X */
++static const unsigned int hscif1_data_b_pins[] = {
++ /* HRX1_B, HTX1_B */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+ };
+-static const unsigned int hscif1_data_x_mux[] = {
+- HRX1_X_MARK, HTX1_X_MARK,
++static const unsigned int hscif1_data_b_mux[] = {
++ HRX1_B_MARK, HTX1_B_MARK,
+ };
+-static const unsigned int hscif1_clk_x_pins[] = {
+- /* HSCK1_X */
++static const unsigned int hscif1_clk_b_pins[] = {
++ /* HSCK1_B */
+ RCAR_GP_PIN(1, 10),
+ };
+-static const unsigned int hscif1_clk_x_mux[] = {
+- HSCK1_X_MARK,
++static const unsigned int hscif1_clk_b_mux[] = {
++ HSCK1_B_MARK,
+ };
+-static const unsigned int hscif1_ctrl_x_pins[] = {
+- /* HRTS1_N_X, HCTS1_N_X */
++static const unsigned int hscif1_ctrl_b_pins[] = {
++ /* HRTS1_N_B, HCTS1_N_B */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+ };
+-static const unsigned int hscif1_ctrl_x_mux[] = {
+- HRTS1_N_X_MARK, HCTS1_N_X_MARK,
++static const unsigned int hscif1_ctrl_b_mux[] = {
++ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+ };
+
+ /* - HSCIF2 ----------------------------------------------------------------- */
+@@ -1668,49 +1666,48 @@ static const unsigned int hscif2_ctrl_mux[] = {
+ };
+
+ /* - HSCIF3 ----------------------------------------------------------------- */
+-static const unsigned int hscif3_data_pins[] = {
+- /* HRX3, HTX3 */
++static const unsigned int hscif3_data_a_pins[] = {
++ /* HRX3_A, HTX3_A */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
+ };
+-static const unsigned int hscif3_data_mux[] = {
+- HRX3_MARK, HTX3_MARK,
++static const unsigned int hscif3_data_a_mux[] = {
++ HRX3_A_MARK, HTX3_A_MARK,
+ };
+-static const unsigned int hscif3_clk_pins[] = {
+- /* HSCK3 */
++static const unsigned int hscif3_clk_a_pins[] = {
++ /* HSCK3_A */
+ RCAR_GP_PIN(1, 25),
+ };
+-static const unsigned int hscif3_clk_mux[] = {
+- HSCK3_MARK,
++static const unsigned int hscif3_clk_a_mux[] = {
++ HSCK3_A_MARK,
+ };
+-static const unsigned int hscif3_ctrl_pins[] = {
+- /* HRTS3_N, HCTS3_N */
++static const unsigned int hscif3_ctrl_a_pins[] = {
++ /* HRTS3_N_A, HCTS3_N_A */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+ };
+-static const unsigned int hscif3_ctrl_mux[] = {
+- HRTS3_N_MARK, HCTS3_N_MARK,
++static const unsigned int hscif3_ctrl_a_mux[] = {
++ HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+ };
+
+-/* - HSCIF3_A ----------------------------------------------------------------- */
+-static const unsigned int hscif3_data_a_pins[] = {
+- /* HRX3_A, HTX3_A */
++static const unsigned int hscif3_data_b_pins[] = {
++ /* HRX3_B, HTX3_B */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
+ };
+-static const unsigned int hscif3_data_a_mux[] = {
+- HRX3_A_MARK, HTX3_A_MARK,
++static const unsigned int hscif3_data_b_mux[] = {
++ HRX3_B_MARK, HTX3_B_MARK,
+ };
+-static const unsigned int hscif3_clk_a_pins[] = {
+- /* HSCK3_A */
++static const unsigned int hscif3_clk_b_pins[] = {
++ /* HSCK3_B */
+ RCAR_GP_PIN(1, 3),
+ };
+-static const unsigned int hscif3_clk_a_mux[] = {
+- HSCK3_A_MARK,
++static const unsigned int hscif3_clk_b_mux[] = {
++ HSCK3_B_MARK,
+ };
+-static const unsigned int hscif3_ctrl_a_pins[] = {
+- /* HRTS3_N_A, HCTS3_N_A */
++static const unsigned int hscif3_ctrl_b_pins[] = {
++ /* HRTS3_N_B, HCTS3_N_B */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+ };
+-static const unsigned int hscif3_ctrl_a_mux[] = {
+- HRTS3_N_A_MARK, HCTS3_N_A_MARK,
++static const unsigned int hscif3_ctrl_b_mux[] = {
++ HRTS3_N_B_MARK, HCTS3_N_B_MARK,
+ };
+
+ /* - I2C0 ------------------------------------------------------------------- */
+@@ -2093,13 +2090,13 @@ static const unsigned int pcie1_clkreq_n_mux[] = {
+ PCIE1_CLKREQ_N_MARK,
+ };
+
+-/* - PWM0_A ------------------------------------------------------------------- */
+-static const unsigned int pwm0_a_pins[] = {
+- /* PWM0_A */
++/* - PWM0 ------------------------------------------------------------------- */
++static const unsigned int pwm0_pins[] = {
++ /* PWM0 */
+ RCAR_GP_PIN(1, 15),
+ };
+-static const unsigned int pwm0_a_mux[] = {
+- PWM0_A_MARK,
++static const unsigned int pwm0_mux[] = {
++ PWM0_MARK,
+ };
+
+ /* - PWM1_A ------------------------------------------------------------------- */
+@@ -2120,13 +2117,13 @@ static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+ };
+
+-/* - PWM2_B ------------------------------------------------------------------- */
+-static const unsigned int pwm2_b_pins[] = {
+- /* PWM2_B */
++/* - PWM2 ------------------------------------------------------------------- */
++static const unsigned int pwm2_pins[] = {
++ /* PWM2 */
+ RCAR_GP_PIN(2, 14),
+ };
+-static const unsigned int pwm2_b_mux[] = {
+- PWM2_B_MARK,
++static const unsigned int pwm2_mux[] = {
++ PWM2_MARK,
+ };
+
+ /* - PWM3_A ------------------------------------------------------------------- */
+@@ -2183,22 +2180,22 @@ static const unsigned int pwm7_mux[] = {
+ PWM7_MARK,
+ };
+
+-/* - PWM8_A ------------------------------------------------------------------- */
+-static const unsigned int pwm8_a_pins[] = {
+- /* PWM8_A */
++/* - PWM8 ------------------------------------------------------------------- */
++static const unsigned int pwm8_pins[] = {
++ /* PWM8 */
+ RCAR_GP_PIN(1, 13),
+ };
+-static const unsigned int pwm8_a_mux[] = {
+- PWM8_A_MARK,
++static const unsigned int pwm8_mux[] = {
++ PWM8_MARK,
+ };
+
+-/* - PWM9_A ------------------------------------------------------------------- */
+-static const unsigned int pwm9_a_pins[] = {
+- /* PWM9_A */
++/* - PWM9 ------------------------------------------------------------------- */
++static const unsigned int pwm9_pins[] = {
++ /* PWM9 */
+ RCAR_GP_PIN(1, 14),
+ };
+-static const unsigned int pwm9_a_mux[] = {
+- PWM9_A_MARK,
++static const unsigned int pwm9_mux[] = {
++ PWM9_MARK,
+ };
+
+ /* - QSPI0 ------------------------------------------------------------------ */
+@@ -2261,75 +2258,51 @@ static const unsigned int scif0_ctrl_mux[] = {
+ };
+
+ /* - SCIF1 ------------------------------------------------------------------ */
+-static const unsigned int scif1_data_pins[] = {
+- /* RX1, TX1 */
++static const unsigned int scif1_data_a_pins[] = {
++ /* RX1_A, TX1_A */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+ };
+-static const unsigned int scif1_data_mux[] = {
+- RX1_MARK, TX1_MARK,
++static const unsigned int scif1_data_a_mux[] = {
++ RX1_A_MARK, TX1_A_MARK,
+ };
+-static const unsigned int scif1_clk_pins[] = {
+- /* SCK1 */
++static const unsigned int scif1_clk_a_pins[] = {
++ /* SCK1_A */
+ RCAR_GP_PIN(0, 18),
+ };
+-static const unsigned int scif1_clk_mux[] = {
+- SCK1_MARK,
++static const unsigned int scif1_clk_a_mux[] = {
++ SCK1_A_MARK,
+ };
+-static const unsigned int scif1_ctrl_pins[] = {
+- /* RTS1_N, CTS1_N */
++static const unsigned int scif1_ctrl_a_pins[] = {
++ /* RTS1_N_A, CTS1_N_A */
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
+ };
+-static const unsigned int scif1_ctrl_mux[] = {
+- RTS1_N_MARK, CTS1_N_MARK,
++static const unsigned int scif1_ctrl_a_mux[] = {
++ RTS1_N_A_MARK, CTS1_N_A_MARK,
+ };
+
+-/* - SCIF1_X ------------------------------------------------------------------ */
+-static const unsigned int scif1_data_x_pins[] = {
+- /* RX1_X, TX1_X */
++static const unsigned int scif1_data_b_pins[] = {
++ /* RX1_B, TX1_B */
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
+ };
+-static const unsigned int scif1_data_x_mux[] = {
+- RX1_X_MARK, TX1_X_MARK,
++static const unsigned int scif1_data_b_mux[] = {
++ RX1_B_MARK, TX1_B_MARK,
+ };
+-static const unsigned int scif1_clk_x_pins[] = {
+- /* SCK1_X */
++static const unsigned int scif1_clk_b_pins[] = {
++ /* SCK1_B */
+ RCAR_GP_PIN(1, 10),
+ };
+-static const unsigned int scif1_clk_x_mux[] = {
+- SCK1_X_MARK,
++static const unsigned int scif1_clk_b_mux[] = {
++ SCK1_B_MARK,
+ };
+-static const unsigned int scif1_ctrl_x_pins[] = {
+- /* RTS1_N_X, CTS1_N_X */
++static const unsigned int scif1_ctrl_b_pins[] = {
++ /* RTS1_N_B, CTS1_N_B */
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
+ };
+-static const unsigned int scif1_ctrl_x_mux[] = {
+- RTS1_N_X_MARK, CTS1_N_X_MARK,
++static const unsigned int scif1_ctrl_b_mux[] = {
++ RTS1_N_B_MARK, CTS1_N_B_MARK,
+ };
+
+ /* - SCIF3 ------------------------------------------------------------------ */
+-static const unsigned int scif3_data_pins[] = {
+- /* RX3, TX3 */
+- RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+-};
+-static const unsigned int scif3_data_mux[] = {
+- RX3_MARK, TX3_MARK,
+-};
+-static const unsigned int scif3_clk_pins[] = {
+- /* SCK3 */
+- RCAR_GP_PIN(1, 4),
+-};
+-static const unsigned int scif3_clk_mux[] = {
+- SCK3_MARK,
+-};
+-static const unsigned int scif3_ctrl_pins[] = {
+- /* RTS3_N, CTS3_N */
+- RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+-};
+-static const unsigned int scif3_ctrl_mux[] = {
+- RTS3_N_MARK, CTS3_N_MARK,
+-};
+-
+-/* - SCIF3_A ------------------------------------------------------------------ */
+ static const unsigned int scif3_data_a_pins[] = {
+ /* RX3_A, TX3_A */
+ RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
+@@ -2352,6 +2325,28 @@ static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_A_MARK, CTS3_N_A_MARK,
+ };
+
++static const unsigned int scif3_data_b_pins[] = {
++ /* RX3_B, TX3_B */
++ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
++};
++static const unsigned int scif3_data_b_mux[] = {
++ RX3_B_MARK, TX3_B_MARK,
++};
++static const unsigned int scif3_clk_b_pins[] = {
++ /* SCK3_B */
++ RCAR_GP_PIN(1, 4),
++};
++static const unsigned int scif3_clk_b_mux[] = {
++ SCK3_B_MARK,
++};
++static const unsigned int scif3_ctrl_b_pins[] = {
++ /* RTS3_N_B, CTS3_N_B */
++ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
++};
++static const unsigned int scif3_ctrl_b_mux[] = {
++ RTS3_N_B_MARK, CTS3_N_B_MARK,
++};
++
+ /* - SCIF4 ------------------------------------------------------------------ */
+ static const unsigned int scif4_data_pins[] = {
+ /* RX4, TX4 */
+@@ -2384,6 +2379,14 @@ static const unsigned int scif_clk_mux[] = {
+ SCIF_CLK_MARK,
+ };
+
++static const unsigned int scif_clk2_pins[] = {
++ /* SCIF_CLK2 */
++ RCAR_GP_PIN(8, 11),
++};
++static const unsigned int scif_clk2_mux[] = {
++ SCIF_CLK2_MARK,
++};
++
+ /* - SSI ------------------------------------------------- */
+ static const unsigned int ssi_data_pins[] = {
+ /* SSI_SD */
+@@ -2400,64 +2403,63 @@ static const unsigned int ssi_ctrl_mux[] = {
+ SSI_SCK_MARK, SSI_WS_MARK,
+ };
+
+-/* - TPU ------------------------------------------------------------------- */
+-static const unsigned int tpu_to0_pins[] = {
+- /* TPU0TO0 */
++/* - TPU -------------------------------------------------------------------- */
++static const unsigned int tpu_to0_a_pins[] = {
++ /* TPU0TO0_A */
+ RCAR_GP_PIN(2, 8),
+ };
+-static const unsigned int tpu_to0_mux[] = {
+- TPU0TO0_MARK,
++static const unsigned int tpu_to0_a_mux[] = {
++ TPU0TO0_A_MARK,
+ };
+-static const unsigned int tpu_to1_pins[] = {
+- /* TPU0TO1 */
++static const unsigned int tpu_to1_a_pins[] = {
++ /* TPU0TO1_A */
+ RCAR_GP_PIN(2, 7),
+ };
+-static const unsigned int tpu_to1_mux[] = {
+- TPU0TO1_MARK,
++static const unsigned int tpu_to1_a_mux[] = {
++ TPU0TO1_A_MARK,
+ };
+-static const unsigned int tpu_to2_pins[] = {
+- /* TPU0TO2 */
++static const unsigned int tpu_to2_a_pins[] = {
++ /* TPU0TO2_A */
+ RCAR_GP_PIN(2, 12),
+ };
+-static const unsigned int tpu_to2_mux[] = {
+- TPU0TO2_MARK,
++static const unsigned int tpu_to2_a_mux[] = {
++ TPU0TO2_A_MARK,
+ };
+-static const unsigned int tpu_to3_pins[] = {
+- /* TPU0TO3 */
++static const unsigned int tpu_to3_a_pins[] = {
++ /* TPU0TO3_A */
+ RCAR_GP_PIN(2, 13),
+ };
+-static const unsigned int tpu_to3_mux[] = {
+- TPU0TO3_MARK,
++static const unsigned int tpu_to3_a_mux[] = {
++ TPU0TO3_A_MARK,
+ };
+
+-/* - TPU_A ------------------------------------------------------------------- */
+-static const unsigned int tpu_to0_a_pins[] = {
+- /* TPU0TO0_A */
++static const unsigned int tpu_to0_b_pins[] = {
++ /* TPU0TO0_B */
+ RCAR_GP_PIN(1, 25),
+ };
+-static const unsigned int tpu_to0_a_mux[] = {
+- TPU0TO0_A_MARK,
++static const unsigned int tpu_to0_b_mux[] = {
++ TPU0TO0_B_MARK,
+ };
+-static const unsigned int tpu_to1_a_pins[] = {
+- /* TPU0TO1_A */
++static const unsigned int tpu_to1_b_pins[] = {
++ /* TPU0TO1_B */
+ RCAR_GP_PIN(1, 26),
+ };
+-static const unsigned int tpu_to1_a_mux[] = {
+- TPU0TO1_A_MARK,
++static const unsigned int tpu_to1_b_mux[] = {
++ TPU0TO1_B_MARK,
+ };
+-static const unsigned int tpu_to2_a_pins[] = {
+- /* TPU0TO2_A */
++static const unsigned int tpu_to2_b_pins[] = {
++ /* TPU0TO2_B */
+ RCAR_GP_PIN(2, 0),
+ };
+-static const unsigned int tpu_to2_a_mux[] = {
+- TPU0TO2_A_MARK,
++static const unsigned int tpu_to2_b_mux[] = {
++ TPU0TO2_B_MARK,
+ };
+-static const unsigned int tpu_to3_a_pins[] = {
+- /* TPU0TO3_A */
++static const unsigned int tpu_to3_b_pins[] = {
++ /* TPU0TO3_B */
+ RCAR_GP_PIN(2, 1),
+ };
+-static const unsigned int tpu_to3_a_mux[] = {
+- TPU0TO3_A_MARK,
++static const unsigned int tpu_to3_b_mux[] = {
++ TPU0TO3_B_MARK,
+ };
+
+ /* - TSN0 ------------------------------------------------ */
+@@ -2570,8 +2572,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(canfd2_data),
+ SH_PFC_PIN_GROUP(canfd3_data),
+ SH_PFC_PIN_GROUP(canfd4_data),
+- SH_PFC_PIN_GROUP(canfd5_data), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(canfd5_data_b), /* suffix might be updated */
++ SH_PFC_PIN_GROUP(canfd5_data_a),
++ SH_PFC_PIN_GROUP(canfd5_data_b),
+ SH_PFC_PIN_GROUP(canfd6_data),
+ SH_PFC_PIN_GROUP(canfd7_data),
+ SH_PFC_PIN_GROUP(can_clk),
+@@ -2579,21 +2581,21 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+- SH_PFC_PIN_GROUP(hscif1_data), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif1_clk), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif1_ctrl), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif1_data_x), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif1_clk_x), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif1_ctrl_x), /* suffix might be updated */
++ SH_PFC_PIN_GROUP(hscif1_data_a),
++ SH_PFC_PIN_GROUP(hscif1_clk_a),
++ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
++ SH_PFC_PIN_GROUP(hscif1_data_b),
++ SH_PFC_PIN_GROUP(hscif1_clk_b),
++ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+- SH_PFC_PIN_GROUP(hscif3_data), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif3_clk), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif3_ctrl), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif3_data_a), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif3_clk_a), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(hscif3_ctrl_a), /* suffix might be updated */
++ SH_PFC_PIN_GROUP(hscif3_data_a),
++ SH_PFC_PIN_GROUP(hscif3_clk_a),
++ SH_PFC_PIN_GROUP(hscif3_ctrl_a),
++ SH_PFC_PIN_GROUP(hscif3_data_b),
++ SH_PFC_PIN_GROUP(hscif3_clk_b),
++ SH_PFC_PIN_GROUP(hscif3_ctrl_b),
+
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+@@ -2655,18 +2657,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(pcie0_clkreq_n),
+ SH_PFC_PIN_GROUP(pcie1_clkreq_n),
+
+- SH_PFC_PIN_GROUP(pwm0_a), /* suffix might be updated */
++ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+- SH_PFC_PIN_GROUP(pwm2_b), /* suffix might be updated */
++ SH_PFC_PIN_GROUP(pwm2),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4),
+ SH_PFC_PIN_GROUP(pwm5),
+ SH_PFC_PIN_GROUP(pwm6),
+ SH_PFC_PIN_GROUP(pwm7),
+- SH_PFC_PIN_GROUP(pwm8_a), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(pwm9_a), /* suffix might be updated */
++ SH_PFC_PIN_GROUP(pwm8),
++ SH_PFC_PIN_GROUP(pwm9),
+
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ BUS_DATA_PIN_GROUP(qspi0_data, 2),
+@@ -2678,34 +2680,35 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+- SH_PFC_PIN_GROUP(scif1_data), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif1_clk), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif1_ctrl), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif1_data_x), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif1_clk_x), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif1_ctrl_x), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif3_data), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif3_clk), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif3_ctrl), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif3_data_a), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif3_clk_a), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(scif3_ctrl_a), /* suffix might be updated */
++ SH_PFC_PIN_GROUP(scif1_data_a),
++ SH_PFC_PIN_GROUP(scif1_clk_a),
++ SH_PFC_PIN_GROUP(scif1_ctrl_a),
++ SH_PFC_PIN_GROUP(scif1_data_b),
++ SH_PFC_PIN_GROUP(scif1_clk_b),
++ SH_PFC_PIN_GROUP(scif1_ctrl_b),
++ SH_PFC_PIN_GROUP(scif3_data_a),
++ SH_PFC_PIN_GROUP(scif3_clk_a),
++ SH_PFC_PIN_GROUP(scif3_ctrl_a),
++ SH_PFC_PIN_GROUP(scif3_data_b),
++ SH_PFC_PIN_GROUP(scif3_clk_b),
++ SH_PFC_PIN_GROUP(scif3_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_clk),
+ SH_PFC_PIN_GROUP(scif4_ctrl),
+ SH_PFC_PIN_GROUP(scif_clk),
++ SH_PFC_PIN_GROUP(scif_clk2),
+
+ SH_PFC_PIN_GROUP(ssi_data),
+ SH_PFC_PIN_GROUP(ssi_ctrl),
+
+- SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(tpu_to1), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(tpu_to1_a), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(tpu_to2), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(tpu_to2_a), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(tpu_to3), /* suffix might be updated */
+- SH_PFC_PIN_GROUP(tpu_to3_a), /* suffix might be updated */
++ SH_PFC_PIN_GROUP(tpu_to0_a),
++ SH_PFC_PIN_GROUP(tpu_to0_b),
++ SH_PFC_PIN_GROUP(tpu_to1_a),
++ SH_PFC_PIN_GROUP(tpu_to1_b),
++ SH_PFC_PIN_GROUP(tpu_to2_a),
++ SH_PFC_PIN_GROUP(tpu_to2_b),
++ SH_PFC_PIN_GROUP(tpu_to3_a),
++ SH_PFC_PIN_GROUP(tpu_to3_b),
+
+ SH_PFC_PIN_GROUP(tsn0_link),
+ SH_PFC_PIN_GROUP(tsn0_phy_int),
+@@ -2779,8 +2782,7 @@ static const char * const canfd4_groups[] = {
+ };
+
+ static const char * const canfd5_groups[] = {
+- /* suffix might be updated */
+- "canfd5_data",
++ "canfd5_data_a",
+ "canfd5_data_b",
+ };
+
+@@ -2803,13 +2805,12 @@ static const char * const hscif0_groups[] = {
+ };
+
+ static const char * const hscif1_groups[] = {
+- /* suffix might be updated */
+- "hscif1_data",
+- "hscif1_clk",
+- "hscif1_ctrl",
+- "hscif1_data_x",
+- "hscif1_clk_x",
+- "hscif1_ctrl_x",
++ "hscif1_data_a",
++ "hscif1_clk_a",
++ "hscif1_ctrl_a",
++ "hscif1_data_b",
++ "hscif1_clk_b",
++ "hscif1_ctrl_b",
+ };
+
+ static const char * const hscif2_groups[] = {
+@@ -2819,13 +2820,12 @@ static const char * const hscif2_groups[] = {
+ };
+
+ static const char * const hscif3_groups[] = {
+- /* suffix might be updated */
+- "hscif3_data",
+- "hscif3_clk",
+- "hscif3_ctrl",
+ "hscif3_data_a",
+ "hscif3_clk_a",
+ "hscif3_ctrl_a",
++ "hscif3_data_b",
++ "hscif3_clk_b",
++ "hscif3_ctrl_b",
+ };
+
+ static const char * const i2c0_groups[] = {
+@@ -2922,8 +2922,7 @@ static const char * const pcie_groups[] = {
+ };
+
+ static const char * const pwm0_groups[] = {
+- /* suffix might be updated */
+- "pwm0_a",
++ "pwm0",
+ };
+
+ static const char * const pwm1_groups[] = {
+@@ -2932,8 +2931,7 @@ static const char * const pwm1_groups[] = {
+ };
+
+ static const char * const pwm2_groups[] = {
+- /* suffix might be updated */
+- "pwm2_b",
++ "pwm2",
+ };
+
+ static const char * const pwm3_groups[] = {
+@@ -2958,13 +2956,11 @@ static const char * const pwm7_groups[] = {
+ };
+
+ static const char * const pwm8_groups[] = {
+- /* suffix might be updated */
+- "pwm8_a",
++ "pwm8",
+ };
+
+ static const char * const pwm9_groups[] = {
+- /* suffix might be updated */
+- "pwm9_a",
++ "pwm9",
+ };
+
+ static const char * const qspi0_groups[] = {
+@@ -2986,23 +2982,21 @@ static const char * const scif0_groups[] = {
+ };
+
+ static const char * const scif1_groups[] = {
+- /* suffix might be updated */
+- "scif1_data",
+- "scif1_clk",
+- "scif1_ctrl",
+- "scif1_data_x",
+- "scif1_clk_x",
+- "scif1_ctrl_x",
++ "scif1_data_a",
++ "scif1_clk_a",
++ "scif1_ctrl_a",
++ "scif1_data_b",
++ "scif1_clk_b",
++ "scif1_ctrl_b",
+ };
+
+ static const char * const scif3_groups[] = {
+- /* suffix might be updated */
+- "scif3_data",
+- "scif3_clk",
+- "scif3_ctrl",
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
++ "scif3_data_b",
++ "scif3_clk_b",
++ "scif3_ctrl_b",
+ };
+
+ static const char * const scif4_groups[] = {
+@@ -3015,21 +3009,24 @@ static const char * const scif_clk_groups[] = {
+ "scif_clk",
+ };
+
++static const char * const scif_clk2_groups[] = {
++ "scif_clk2",
++};
++
+ static const char * const ssi_groups[] = {
+ "ssi_data",
+ "ssi_ctrl",
+ };
+
+ static const char * const tpu_groups[] = {
+- /* suffix might be updated */
+- "tpu_to0",
+ "tpu_to0_a",
+- "tpu_to1",
++ "tpu_to0_b",
+ "tpu_to1_a",
+- "tpu_to2",
++ "tpu_to1_b",
+ "tpu_to2_a",
+- "tpu_to3",
++ "tpu_to2_b",
+ "tpu_to3_a",
++ "tpu_to3_b",
+ };
+
+ static const char * const tsn0_groups[] = {
+@@ -3102,6 +3099,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif_clk),
++ SH_PFC_FUNCTION(scif_clk2),
+
+ SH_PFC_FUNCTION(ssi),
+
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 37cdfe4b04f9a4..2ea6ef99cc70bf 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -1175,6 +1175,8 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ u32 port;
+ u8 bit;
+
++ irq_chip_disable_parent(d);
++
+ port = RZG2L_PIN_ID_TO_PORT(hwirq);
+ bit = RZG2L_PIN_ID_TO_PIN(hwirq);
+
+@@ -1189,7 +1191,6 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
+- irq_chip_disable_parent(d);
+ }
+
+ static void rzg2l_gpio_irq_enable(struct irq_data *d)
+diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+index 530fe340a9a154..561fd0c6b9b0ae 100644
+--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+@@ -492,7 +492,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
+
+ nmaps = 0;
+ ngroups = 0;
+- for_each_child_of_node(np, child) {
++ for_each_available_child_of_node(np, child) {
+ int npinmux = of_property_count_u32_elems(child, "pinmux");
+ int npins = of_property_count_u32_elems(child, "pins");
+
+@@ -527,7 +527,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
+ nmaps = 0;
+ ngroups = 0;
+ mutex_lock(&sfp->mutex);
+- for_each_child_of_node(np, child) {
++ for_each_available_child_of_node(np, child) {
+ int npins;
+ int i;
+
+diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+index 640f827a9b2ca6..a3fee55479d206 100644
+--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
++++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+@@ -135,7 +135,7 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
+ int ret;
+
+ ngroups = 0;
+- for_each_child_of_node(np, child)
++ for_each_available_child_of_node(np, child)
+ ngroups += 1;
+ nmaps = 2 * ngroups;
+
+@@ -150,7 +150,7 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
+ nmaps = 0;
+ ngroups = 0;
+ mutex_lock(&sfp->mutex);
+- for_each_child_of_node(np, child) {
++ for_each_available_child_of_node(np, child) {
+ int npins = of_property_count_u32_elems(child, "pinmux");
+ int *pins;
+ u32 *pinmux;
+@@ -805,12 +805,12 @@ static int jh7110_irq_set_type(struct irq_data *d, unsigned int trigger)
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = 0; /* 0: level triggered */
+ edge_both = 0; /* 0: ignored */
+- polarity = mask; /* 1: high level */
++ polarity = 0; /* 0: high level */
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = 0; /* 0: level triggered */
+ edge_both = 0; /* 0: ignored */
+- polarity = 0; /* 0: low level */
++ polarity = mask; /* 1: low level */
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index a73385a431de98..5e91def6078474 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -1283,9 +1283,11 @@ static struct stm32_desc_pin *stm32_pctrl_get_desc_pin_from_gpio(struct stm32_pi
+ int i;
+
+ /* With few exceptions (e.g. bank 'Z'), pin number matches with pin index in array */
+- pin_desc = pctl->pins + stm32_pin_nb;
+- if (pin_desc->pin.number == stm32_pin_nb)
+- return pin_desc;
++ if (stm32_pin_nb < pctl->npins) {
++ pin_desc = pctl->pins + stm32_pin_nb;
++ if (pin_desc->pin.number == stm32_pin_nb)
++ return pin_desc;
++ }
+
+ /* Otherwise, loop all array to find the pin with the right number */
+ for (i = 0; i < pctl->npins; i++) {
+@@ -1378,12 +1380,22 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
+ }
+
+ names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
++ if (!names) {
++ err = -ENOMEM;
++ goto err_clk;
++ }
++
+ for (i = 0; i < npins; i++) {
+ stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
+- if (stm32_pin && stm32_pin->pin.name)
++ if (stm32_pin && stm32_pin->pin.name) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name);
+- else
++ if (!names[i]) {
++ err = -ENOMEM;
++ goto err_clk;
++ }
++ } else {
+ names[i] = NULL;
++ }
+ }
+
+ bank->gpio_chip.names = (const char * const *)names;
+diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+index c1477f65783933..451801acdc4038 100644
+--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
++++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+@@ -14,7 +14,8 @@
+ #include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+@@ -272,6 +273,22 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod,
+ return r;
+ }
+
++/**
++ * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device
++ * @data: IODelay device
++ *
++ * Deinitialize the IODelay device (basically just lock the region back up.
++ */
++static void ti_iodelay_pinconf_deinit_dev(void *data)
++{
++ struct ti_iodelay_device *iod = data;
++ const struct ti_iodelay_reg_data *reg = iod->reg_data;
++
++ /* lock the iodelay region back again */
++ regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
++ reg->global_lock_mask, reg->global_lock_val);
++}
++
+ /**
+ * ti_iodelay_pinconf_init_dev() - Initialize IODelay device
+ * @iod: iodelay device
+@@ -294,6 +311,11 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod)
+ if (r)
+ return r;
+
++ r = devm_add_action_or_reset(iod->dev, ti_iodelay_pinconf_deinit_dev,
++ iod);
++ if (r)
++ return r;
++
+ /* Read up Recalibration sequence done by bootloader */
+ r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val);
+ if (r)
+@@ -352,21 +374,6 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod)
+ return 0;
+ }
+
+-/**
+- * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device
+- * @iod: IODelay device
+- *
+- * Deinitialize the IODelay device (basically just lock the region back up.
+- */
+-static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod)
+-{
+- const struct ti_iodelay_reg_data *reg = iod->reg_data;
+-
+- /* lock the iodelay region back again */
+- regmap_update_bits(iod->regmap, reg->reg_global_lock_offset,
+- reg->global_lock_mask, reg->global_lock_val);
+-}
+-
+ /**
+ * ti_iodelay_get_pingroup() - Find the group mapped by a group selector
+ * @iod: iodelay device
+@@ -821,56 +828,48 @@ MODULE_DEVICE_TABLE(of, ti_iodelay_of_match);
+ static int ti_iodelay_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+- struct device_node *np = of_node_get(dev->of_node);
+- const struct of_device_id *match;
++ struct device_node *np __free(device_node) = of_node_get(dev->of_node);
+ struct resource *res;
+ struct ti_iodelay_device *iod;
+- int ret = 0;
++ int ret;
+
+ if (!np) {
+- ret = -EINVAL;
+ dev_err(dev, "No OF node\n");
+- goto exit_out;
+- }
+-
+- match = of_match_device(ti_iodelay_of_match, dev);
+- if (!match) {
+- ret = -EINVAL;
+- dev_err(dev, "No DATA match\n");
+- goto exit_out;
++ return -EINVAL;
+ }
+
+ iod = devm_kzalloc(dev, sizeof(*iod), GFP_KERNEL);
+- if (!iod) {
+- ret = -ENOMEM;
+- goto exit_out;
+- }
++ if (!iod)
++ return -ENOMEM;
++
+ iod->dev = dev;
+- iod->reg_data = match->data;
++ iod->reg_data = device_get_match_data(dev);
++ if (!iod->reg_data) {
++ dev_err(dev, "No DATA match\n");
++ return -EINVAL;
++ }
+
+ /* So far We can assume there is only 1 bank of registers */
+ iod->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+- if (IS_ERR(iod->reg_base)) {
+- ret = PTR_ERR(iod->reg_base);
+- goto exit_out;
+- }
++ if (IS_ERR(iod->reg_base))
++ return PTR_ERR(iod->reg_base);
++
+ iod->phys_base = res->start;
+
+ iod->regmap = devm_regmap_init_mmio(dev, iod->reg_base,
+ iod->reg_data->regmap_config);
+ if (IS_ERR(iod->regmap)) {
+ dev_err(dev, "Regmap MMIO init failed.\n");
+- ret = PTR_ERR(iod->regmap);
+- goto exit_out;
++ return PTR_ERR(iod->regmap);
+ }
+
+ ret = ti_iodelay_pinconf_init_dev(iod);
+ if (ret)
+- goto exit_out;
++ return ret;
+
+ ret = ti_iodelay_alloc_pins(dev, iod, res->start);
+ if (ret)
+- goto exit_out;
++ return ret;
+
+ iod->desc.pctlops = &ti_iodelay_pinctrl_ops;
+ /* no pinmux ops - we are pinconf */
+@@ -878,47 +877,17 @@ static int ti_iodelay_probe(struct platform_device *pdev)
+ iod->desc.name = dev_name(dev);
+ iod->desc.owner = THIS_MODULE;
+
+- ret = pinctrl_register_and_init(&iod->desc, dev, iod, &iod->pctl);
++ ret = devm_pinctrl_register_and_init(dev, &iod->desc, iod, &iod->pctl);
+ if (ret) {
+ dev_err(dev, "Failed to register pinctrl\n");
+- goto exit_out;
++ return ret;
+ }
+
+- platform_set_drvdata(pdev, iod);
+-
+ return pinctrl_enable(iod->pctl);
+-
+-exit_out:
+- of_node_put(np);
+- return ret;
+-}
+-
+-/**
+- * ti_iodelay_remove() - standard remove
+- * @pdev: platform device
+- *
+- * Return: 0 if all went fine, else appropriate error value.
+- */
+-static int ti_iodelay_remove(struct platform_device *pdev)
+-{
+- struct ti_iodelay_device *iod = platform_get_drvdata(pdev);
+-
+- if (!iod)
+- return 0;
+-
+- if (iod->pctl)
+- pinctrl_unregister(iod->pctl);
+-
+- ti_iodelay_pinconf_deinit_dev(iod);
+-
+- /* Expect other allocations to be freed by devm */
+-
+- return 0;
+ }
+
+ static struct platform_driver ti_iodelay_driver = {
+ .probe = ti_iodelay_probe,
+- .remove = ti_iodelay_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ti_iodelay_of_match,
+diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
+index 5d36fbc75e1bbb..47d19f7e295a7a 100644
+--- a/drivers/platform/chrome/cros_ec.c
++++ b/drivers/platform/chrome/cros_ec.c
+@@ -321,17 +321,8 @@ void cros_ec_unregister(struct cros_ec_device *ec_dev)
+ EXPORT_SYMBOL(cros_ec_unregister);
+
+ #ifdef CONFIG_PM_SLEEP
+-/**
+- * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+- * @ec_dev: Device to suspend.
+- *
+- * This can be called by drivers to handle a suspend event.
+- *
+- * Return: 0 on success or negative error code.
+- */
+-int cros_ec_suspend(struct cros_ec_device *ec_dev)
++static void cros_ec_send_suspend_event(struct cros_ec_device *ec_dev)
+ {
+- struct device *dev = ec_dev->dev;
+ int ret;
+ u8 sleep_event;
+
+@@ -343,7 +334,26 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
+ if (ret < 0)
+ dev_dbg(ec_dev->dev, "Error %d sending suspend event to ec\n",
+ ret);
++}
+
++/**
++ * cros_ec_suspend_prepare() - Handle a suspend prepare operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend prepare stage of suspend.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev)
++{
++ cros_ec_send_suspend_event(ec_dev);
++ return 0;
++}
++EXPORT_SYMBOL(cros_ec_suspend_prepare);
++
++static void cros_ec_disable_irq(struct cros_ec_device *ec_dev)
++{
++ struct device *dev = ec_dev->dev;
+ if (device_may_wakeup(dev))
+ ec_dev->wake_enabled = !enable_irq_wake(ec_dev->irq);
+ else
+@@ -351,7 +361,35 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
+
+ disable_irq(ec_dev->irq);
+ ec_dev->suspended = true;
++}
+
++/**
++ * cros_ec_suspend_late() - Handle a suspend late operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend late stage of suspend.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend_late(struct cros_ec_device *ec_dev)
++{
++ cros_ec_disable_irq(ec_dev);
++ return 0;
++}
++EXPORT_SYMBOL(cros_ec_suspend_late);
++
++/**
++ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
++ * @ec_dev: Device to suspend.
++ *
++ * This can be called by drivers to handle a suspend event.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_suspend(struct cros_ec_device *ec_dev)
++{
++ cros_ec_send_suspend_event(ec_dev);
++ cros_ec_disable_irq(ec_dev);
+ return 0;
+ }
+ EXPORT_SYMBOL(cros_ec_suspend);
+@@ -370,22 +408,11 @@ static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
+ }
+ }
+
+-/**
+- * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+- * @ec_dev: Device to resume.
+- *
+- * This can be called by drivers to handle a resume event.
+- *
+- * Return: 0 on success or negative error code.
+- */
+-int cros_ec_resume(struct cros_ec_device *ec_dev)
++static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev)
+ {
+ int ret;
+ u8 sleep_event;
+
+- ec_dev->suspended = false;
+- enable_irq(ec_dev->irq);
+-
+ sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ?
+ HOST_SLEEP_EVENT_S3_RESUME :
+ HOST_SLEEP_EVENT_S0IX_RESUME;
+@@ -394,17 +421,62 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
+ if (ret < 0)
+ dev_dbg(ec_dev->dev, "Error %d sending resume event to ec\n",
+ ret);
++}
+
+- if (ec_dev->wake_enabled)
+- disable_irq_wake(ec_dev->irq);
++/**
++ * cros_ec_resume_complete() - Handle a resume complete operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume complete stage of resume.
++ */
++void cros_ec_resume_complete(struct cros_ec_device *ec_dev)
++{
++ cros_ec_send_resume_event(ec_dev);
+
+ /*
+ * Let the mfd devices know about events that occur during
+ * suspend. This way the clients know what to do with them.
+ */
+ cros_ec_report_events_during_suspend(ec_dev);
++}
++EXPORT_SYMBOL(cros_ec_resume_complete);
+
++static void cros_ec_enable_irq(struct cros_ec_device *ec_dev)
++{
++ ec_dev->suspended = false;
++ enable_irq(ec_dev->irq);
+
++ if (ec_dev->wake_enabled)
++ disable_irq_wake(ec_dev->irq);
++}
++
++/**
++ * cros_ec_resume_early() - Handle a resume early operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume early stage of resume.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_resume_early(struct cros_ec_device *ec_dev)
++{
++ cros_ec_enable_irq(ec_dev);
++ return 0;
++}
++EXPORT_SYMBOL(cros_ec_resume_early);
++
++/**
++ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
++ * @ec_dev: Device to resume.
++ *
++ * This can be called by drivers to handle a resume event.
++ *
++ * Return: 0 always.
++ */
++int cros_ec_resume(struct cros_ec_device *ec_dev)
++{
++ cros_ec_resume_early(ec_dev);
++ cros_ec_resume_complete(ec_dev);
+ return 0;
+ }
+ EXPORT_SYMBOL(cros_ec_resume);
+diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
+index bbca0096868ac9..566332f4878920 100644
+--- a/drivers/platform/chrome/cros_ec.h
++++ b/drivers/platform/chrome/cros_ec.h
+@@ -14,7 +14,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev);
+ void cros_ec_unregister(struct cros_ec_device *ec_dev);
+
+ int cros_ec_suspend(struct cros_ec_device *ec_dev);
++int cros_ec_suspend_late(struct cros_ec_device *ec_dev);
++int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev);
+ int cros_ec_resume(struct cros_ec_device *ec_dev);
++int cros_ec_resume_early(struct cros_ec_device *ec_dev);
++void cros_ec_resume_complete(struct cros_ec_device *ec_dev);
+
+ irqreturn_t cros_ec_irq_thread(int irq, void *data);
+
+diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
+index c876120e0ebc93..793c8c4bf35bad 100644
+--- a/drivers/platform/chrome/cros_ec_debugfs.c
++++ b/drivers/platform/chrome/cros_ec_debugfs.c
+@@ -329,6 +329,7 @@ static int ec_read_version_supported(struct cros_ec_dev *ec)
+ if (!msg)
+ return 0;
+
++ msg->version = 1;
+ msg->command = EC_CMD_GET_CMD_VERSIONS + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*response);
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index 356572452898de..42e1770887fb0d 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -549,22 +549,36 @@ MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
+ static int cros_ec_lpc_prepare(struct device *dev)
+ {
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+-
+- return cros_ec_suspend(ec_dev);
++ return cros_ec_suspend_prepare(ec_dev);
+ }
+
+ static void cros_ec_lpc_complete(struct device *dev)
+ {
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+- cros_ec_resume(ec_dev);
++ cros_ec_resume_complete(ec_dev);
++}
++
++static int cros_ec_lpc_suspend_late(struct device *dev)
++{
++ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
++
++ return cros_ec_suspend_late(ec_dev);
++}
++
++static int cros_ec_lpc_resume_early(struct device *dev)
++{
++ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
++
++ return cros_ec_resume_early(ec_dev);
+ }
+ #endif
+
+ static const struct dev_pm_ops cros_ec_lpc_pm_ops = {
+ #ifdef CONFIG_PM_SLEEP
+ .prepare = cros_ec_lpc_prepare,
+- .complete = cros_ec_lpc_complete
++ .complete = cros_ec_lpc_complete,
+ #endif
++ SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend_late, cros_ec_lpc_resume_early)
+ };
+
+ static struct platform_driver cros_ec_lpc_driver = {
+diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c
+index 0d9c79b270ce20..63b6b261b8e58c 100644
+--- a/drivers/platform/chrome/cros_ec_lpc_mec.c
++++ b/drivers/platform/chrome/cros_ec_lpc_mec.c
+@@ -10,13 +10,65 @@
+
+ #include "cros_ec_lpc_mec.h"
+
++#define ACPI_LOCK_DELAY_MS 500
++
+ /*
+ * This mutex must be held while accessing the EMI unit. We can't rely on the
+ * EC mutex because memmap data may be accessed without it being held.
+ */
+ static DEFINE_MUTEX(io_mutex);
++/*
++ * An alternative mutex to be used when the ACPI AML code may also
++ * access memmap data. When set, this mutex is used in preference to
++ * io_mutex.
++ */
++static acpi_handle aml_mutex;
++
+ static u16 mec_emi_base, mec_emi_end;
+
++/**
++ * cros_ec_lpc_mec_lock() - Acquire mutex for EMI
++ *
++ * @return: Negative error code, or zero for success
++ */
++static int cros_ec_lpc_mec_lock(void)
++{
++ bool success;
++
++ if (!aml_mutex) {
++ mutex_lock(&io_mutex);
++ return 0;
++ }
++
++ success = ACPI_SUCCESS(acpi_acquire_mutex(aml_mutex,
++ NULL, ACPI_LOCK_DELAY_MS));
++ if (!success)
++ return -EBUSY;
++
++ return 0;
++}
++
++/**
++ * cros_ec_lpc_mec_unlock() - Release mutex for EMI
++ *
++ * @return: Negative error code, or zero for success
++ */
++static int cros_ec_lpc_mec_unlock(void)
++{
++ bool success;
++
++ if (!aml_mutex) {
++ mutex_unlock(&io_mutex);
++ return 0;
++ }
++
++ success = ACPI_SUCCESS(acpi_release_mutex(aml_mutex, NULL));
++ if (!success)
++ return -EBUSY;
++
++ return 0;
++}
++
+ /**
+ * cros_ec_lpc_mec_emi_write_address() - Initialize EMI at a given address.
+ *
+@@ -77,6 +129,7 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ int io_addr;
+ u8 sum = 0;
+ enum cros_ec_lpc_mec_emi_access_mode access, new_access;
++ int ret;
+
+ /* Return checksum of 0 if window is not initialized */
+ WARN_ON(mec_emi_base == 0 || mec_emi_end == 0);
+@@ -92,7 +145,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ else
+ access = ACCESS_TYPE_LONG_AUTO_INCREMENT;
+
+- mutex_lock(&io_mutex);
++ ret = cros_ec_lpc_mec_lock();
++ if (ret)
++ return ret;
+
+ /* Initialize I/O at desired address */
+ cros_ec_lpc_mec_emi_write_address(offset, access);
+@@ -134,7 +189,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
+ }
+
+ done:
+- mutex_unlock(&io_mutex);
++ ret = cros_ec_lpc_mec_unlock();
++ if (ret)
++ return ret;
+
+ return sum;
+ }
+@@ -146,3 +203,18 @@ void cros_ec_lpc_mec_init(unsigned int base, unsigned int end)
+ mec_emi_end = end;
+ }
+ EXPORT_SYMBOL(cros_ec_lpc_mec_init);
++
++int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname)
++{
++ int status;
++
++ if (!adev)
++ return -ENOENT;
++
++ status = acpi_get_handle(adev->handle, pathname, &aml_mutex);
++ if (ACPI_FAILURE(status))
++ return -ENOENT;
++
++ return 0;
++}
++EXPORT_SYMBOL(cros_ec_lpc_mec_acpi_mutex);
+diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h
+index 9d0521b23e8aed..3f3af37e58a50d 100644
+--- a/drivers/platform/chrome/cros_ec_lpc_mec.h
++++ b/drivers/platform/chrome/cros_ec_lpc_mec.h
+@@ -8,6 +8,8 @@
+ #ifndef __CROS_EC_LPC_MEC_H
+ #define __CROS_EC_LPC_MEC_H
+
++#include <linux/acpi.h>
++
+ enum cros_ec_lpc_mec_emi_access_mode {
+ /* 8-bit access */
+ ACCESS_TYPE_BYTE = 0x0,
+@@ -45,6 +47,15 @@ enum cros_ec_lpc_mec_io_type {
+ */
+ void cros_ec_lpc_mec_init(unsigned int base, unsigned int end);
+
++/**
++ * cros_ec_lpc_mec_acpi_mutex() - Find and set ACPI mutex for MEC
++ *
++ * @adev: Parent ACPI device
++ * @pathname: Name of AML mutex
++ * @return: Negative error code, or zero for success
++ */
++int cros_ec_lpc_mec_acpi_mutex(struct acpi_device *adev, const char *pathname);
++
+ /**
+ * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range.
+ *
+diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
+index 475a6dd72db6bb..809fabef3b44a2 100644
+--- a/drivers/platform/chrome/cros_ec_proto.c
++++ b/drivers/platform/chrome/cros_ec_proto.c
+@@ -805,9 +805,11 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ if (ret == -ENOPROTOOPT) {
+ dev_dbg(ec_dev->dev,
+ "GET_NEXT_EVENT returned invalid version error.\n");
++ mutex_lock(&ec_dev->lock);
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_GET_NEXT_EVENT,
+ &ver_mask);
++ mutex_unlock(&ec_dev->lock);
+ if (ret < 0 || ver_mask == 0)
+ /*
+ * Do not change the MKBP supported version if we can't
+diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
+index 5b9748e0463bcc..63e38671e95a64 100644
+--- a/drivers/platform/chrome/cros_ec_proto_test.c
++++ b/drivers/platform/chrome/cros_ec_proto_test.c
+@@ -2668,6 +2668,7 @@ static int cros_ec_proto_test_init(struct kunit *test)
+ ec_dev->dev->release = cros_ec_proto_test_release;
+ ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
++ mutex_init(&ec_dev->lock);
+
+ priv->msg = (struct cros_ec_command *)priv->_msg;
+
+diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
+index 788246559bbba2..823371037af729 100644
+--- a/drivers/platform/chrome/cros_ec_uart.c
++++ b/drivers/platform/chrome/cros_ec_uart.c
+@@ -264,12 +264,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
+ if (!ec_dev)
+ return -ENOMEM;
+
+- ret = devm_serdev_device_open(dev, serdev);
+- if (ret) {
+- dev_err(dev, "Unable to open UART device");
+- return ret;
+- }
+-
+ serdev_device_set_drvdata(serdev, ec_dev);
+ init_waitqueue_head(&ec_uart->response.wait_queue);
+
+@@ -281,14 +275,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
+ return ret;
+ }
+
+- ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+- if (ret < 0) {
+- dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+- return ret;
+- }
+-
+- serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+-
+ /* Initialize ec_dev for cros_ec */
+ ec_dev->phys_name = dev_name(dev);
+ ec_dev->dev = dev;
+@@ -302,6 +288,20 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
+
+ serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+
++ ret = devm_serdev_device_open(dev, serdev);
++ if (ret) {
++ dev_err(dev, "Unable to open UART device");
++ return ret;
++ }
++
++ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
++ if (ret < 0) {
++ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
++ return ret;
++ }
++
++ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
++
+ return cros_ec_register(ec_dev);
+ }
+
+diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
+index 4ee7bb431b7c02..e278092f889b92 100644
+--- a/drivers/platform/mellanox/mlxbf-bootctl.c
++++ b/drivers/platform/mellanox/mlxbf-bootctl.c
+@@ -20,6 +20,7 @@
+
+ #define MLXBF_BOOTCTL_SB_SECURE_MASK 0x03
+ #define MLXBF_BOOTCTL_SB_TEST_MASK 0x0c
++#define MLXBF_BOOTCTL_SB_DEV_MASK BIT(4)
+
+ #define MLXBF_SB_KEY_NUM 4
+
+@@ -40,11 +41,18 @@ static struct mlxbf_bootctl_name boot_names[] = {
+ { MLXBF_BOOTCTL_NONE, "none" },
+ };
+
++enum {
++ MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION = 0,
++ MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE = 1,
++ MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE = 2,
++ MLXBF_BOOTCTL_SB_LIFECYCLE_RMA = 3
++};
++
+ static const char * const mlxbf_bootctl_lifecycle_states[] = {
+- [0] = "Production",
+- [1] = "GA Secured",
+- [2] = "GA Non-Secured",
+- [3] = "RMA",
++ [MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION] = "Production",
++ [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE] = "GA Secured",
++ [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE] = "GA Non-Secured",
++ [MLXBF_BOOTCTL_SB_LIFECYCLE_RMA] = "RMA",
+ };
+
+ /* Log header format. */
+@@ -247,25 +255,30 @@ static ssize_t second_reset_action_store(struct device *dev,
+ static ssize_t lifecycle_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
++ int status_bits;
++ int use_dev_key;
++ int test_state;
+ int lc_state;
+
+- lc_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+- MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
+- if (lc_state < 0)
+- return lc_state;
++ status_bits = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
++ MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
++ if (status_bits < 0)
++ return status_bits;
+
+- lc_state &=
+- MLXBF_BOOTCTL_SB_TEST_MASK | MLXBF_BOOTCTL_SB_SECURE_MASK;
++ use_dev_key = status_bits & MLXBF_BOOTCTL_SB_DEV_MASK;
++ test_state = status_bits & MLXBF_BOOTCTL_SB_TEST_MASK;
++ lc_state = status_bits & MLXBF_BOOTCTL_SB_SECURE_MASK;
+
+ /*
+ * If the test bits are set, we specify that the current state may be
+ * due to using the test bits.
+ */
+- if (lc_state & MLXBF_BOOTCTL_SB_TEST_MASK) {
+- lc_state &= MLXBF_BOOTCTL_SB_SECURE_MASK;
+-
++ if (test_state) {
+ return sprintf(buf, "%s(test)\n",
+ mlxbf_bootctl_lifecycle_states[lc_state]);
++ } else if (use_dev_key &&
++ (lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
++ return sprintf(buf, "Secured (development)\n");
+ }
+
+ return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 2d4bbe99959ef4..db7a1d360cd2ce 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -1202,6 +1202,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ attr->dev_attr.show = mlxbf_pmc_event_list_show;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
+ attr = NULL;
+
+@@ -1214,6 +1216,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "enable");
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+@@ -1240,6 +1244,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "counter%d", j);
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+
+@@ -1251,6 +1257,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "event%d", j);
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+@@ -1283,6 +1291,8 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ events[j].evt_name);
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
+ attr = NULL;
+ i++;
+@@ -1311,6 +1321,8 @@ static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
+ pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
+ pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
+ dev, GFP_KERNEL, pmc->block_name[blk_num]);
++ if (!pmc->block[blk_num].block_attr_grp.name)
++ return -ENOMEM;
+ pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp;
+
+ return 0;
+@@ -1442,6 +1454,8 @@ static int mlxbf_pmc_probe(struct platform_device *pdev)
+
+ pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
+ dev, "bfperf", pmc, pmc->groups);
++ if (IS_ERR(pmc->hwmon_dev))
++ return PTR_ERR(pmc->hwmon_dev);
+ platform_set_drvdata(pdev, pmc);
+
+ return 0;
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index ab7d7a1235b836..39828eb84e0ba0 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -47,6 +47,9 @@
+ /* Message with data needs at least two words (for header & data). */
+ #define MLXBF_TMFIFO_DATA_MIN_WORDS 2
+
++/* Tx timeout in milliseconds. */
++#define TMFIFO_TX_TIMEOUT 2000
++
+ /* ACPI UID for BlueField-3. */
+ #define TMFIFO_BF3_UID 1
+
+@@ -62,12 +65,14 @@ struct mlxbf_tmfifo;
+ * @drop_desc: dummy desc for packet dropping
+ * @cur_len: processed length of the current descriptor
+ * @rem_len: remaining length of the pending packet
++ * @rem_padding: remaining bytes to send as paddings
+ * @pkt_len: total length of the pending packet
+ * @next_avail: next avail descriptor id
+ * @num: vring size (number of descriptors)
+ * @align: vring alignment size
+ * @index: vring index
+ * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
++ * @tx_timeout: expire time of last tx packet
+ * @fifo: pointer to the tmfifo structure
+ */
+ struct mlxbf_tmfifo_vring {
+@@ -79,12 +84,14 @@ struct mlxbf_tmfifo_vring {
+ struct vring_desc drop_desc;
+ int cur_len;
+ int rem_len;
++ int rem_padding;
+ u32 pkt_len;
+ u16 next_avail;
+ int num;
+ int align;
+ int index;
+ int vdev_id;
++ unsigned long tx_timeout;
+ struct mlxbf_tmfifo *fifo;
+ };
+
+@@ -819,6 +826,50 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ return true;
+ }
+
++static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring)
++{
++ unsigned long flags;
++
++ /* Only handle Tx timeout for network vdev. */
++ if (vring->vdev_id != VIRTIO_ID_NET)
++ return;
++
++ /* Initialize the timeout or return if not expired. */
++ if (!vring->tx_timeout) {
++ /* Initialize the timeout. */
++ vring->tx_timeout = jiffies +
++ msecs_to_jiffies(TMFIFO_TX_TIMEOUT);
++ return;
++ } else if (time_before(jiffies, vring->tx_timeout)) {
++ /* Return if not timeout yet. */
++ return;
++ }
++
++ /*
++ * Drop the packet after timeout. The outstanding packet is
++ * released and the remaining bytes will be sent with padding byte 0x00
++ * as a recovery. On the peer(host) side, the padding bytes 0x00 will be
++ * either dropped directly, or appended into existing outstanding packet
++ * thus dropped as corrupted network packet.
++ */
++ vring->rem_padding = round_up(vring->rem_len, sizeof(u64));
++ mlxbf_tmfifo_release_pkt(vring);
++ vring->cur_len = 0;
++ vring->rem_len = 0;
++ vring->fifo->vring[0] = NULL;
++
++ /*
++ * Make sure the load/store are in order before
++ * returning back to virtio.
++ */
++ virtio_mb(false);
++
++ /* Notify upper layer. */
++ spin_lock_irqsave(&vring->fifo->spin_lock[0], flags);
++ vring_interrupt(0, vring->vq);
++ spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags);
++}
++
+ /* Rx & Tx processing of a queue. */
+ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ {
+@@ -841,6 +892,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ return;
+
+ do {
++retry:
+ /* Get available FIFO space. */
+ if (avail == 0) {
+ if (is_rx)
+@@ -851,6 +903,17 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ break;
+ }
+
++ /* Insert paddings for discarded Tx packet. */
++ if (!is_rx) {
++ vring->tx_timeout = 0;
++ while (vring->rem_padding >= sizeof(u64)) {
++ writeq(0, vring->fifo->tx.data);
++ vring->rem_padding -= sizeof(u64);
++ if (--avail == 0)
++ goto retry;
++ }
++ }
++
+ /* Console output always comes from the Tx buffer. */
+ if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
+ mlxbf_tmfifo_console_tx(fifo, avail);
+@@ -860,6 +923,10 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ /* Handle one descriptor. */
+ more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
+ } while (more);
++
++ /* Check Tx timeout. */
++ if (avail <= 0 && !is_rx)
++ mlxbf_tmfifo_check_tx_timeout(vring);
+ }
+
+ /* Handle Rx or Tx queues. */
+diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
+index 75b699676ca6d7..1a7c45aa41bbf0 100644
+--- a/drivers/platform/mellanox/nvsw-sn2201.c
++++ b/drivers/platform/mellanox/nvsw-sn2201.c
+@@ -1198,6 +1198,7 @@ static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
+ static int nvsw_sn2201_probe(struct platform_device *pdev)
+ {
+ struct nvsw_sn2201 *nvsw_sn2201;
++ int ret;
+
+ nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
+ if (!nvsw_sn2201)
+@@ -1205,8 +1206,10 @@ static int nvsw_sn2201_probe(struct platform_device *pdev)
+
+ nvsw_sn2201->dev = &pdev->dev;
+ platform_set_drvdata(pdev, nvsw_sn2201);
+- platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
++ ret = platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
+ ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
++ if (ret)
++ return ret;
+
+ nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
+ nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
+diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
+index d8c5f9195f85f5..2ac2f31090f96f 100644
+--- a/drivers/platform/mips/cpu_hwmon.c
++++ b/drivers/platform/mips/cpu_hwmon.c
+@@ -139,6 +139,9 @@ static int __init loongson_hwmon_init(void)
+ csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
+ LOONGSON_CSRF_TEMP;
+
++ if (!csr_temp_enable && !loongson_chiptemp[0])
++ return -ENODEV;
++
+ nr_packages = loongson_sysconf.nr_cpus /
+ loongson_sysconf.cores_per_package;
+
+diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
+index 7fc602e01487d4..7e89f547999b2a 100644
+--- a/drivers/platform/surface/aggregator/controller.c
++++ b/drivers/platform/surface/aggregator/controller.c
+@@ -1354,7 +1354,8 @@ void ssam_controller_destroy(struct ssam_controller *ctrl)
+ if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED)
+ return;
+
+- WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
++ WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED &&
++ ctrl->state != SSAM_CONTROLLER_INITIALIZED);
+
+ /*
+ * Note: New events could still have been received after the previous
+diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
+index 1a6373dea109cc..6152be38398c48 100644
+--- a/drivers/platform/surface/aggregator/core.c
++++ b/drivers/platform/surface/aggregator/core.c
+@@ -231,9 +231,12 @@ static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
+ size_t n)
+ {
+ struct ssam_controller *ctrl;
++ int ret;
+
+ ctrl = serdev_device_get_drvdata(dev);
+- return ssam_controller_receive_buf(ctrl, buf, n);
++ ret = ssam_controller_receive_buf(ctrl, buf, n);
++
++ return ret < 0 ? 0 : ret;
+ }
+
+ static void ssam_write_wakeup(struct serdev_device *dev)
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index 0fe5be53965252..8c5b5f35d8485b 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -298,7 +298,7 @@ static const struct software_node *ssam_node_group_sp8[] = {
+ NULL,
+ };
+
+-/* Devices for Surface Pro 9 */
++/* Devices for Surface Pro 9 and 10 */
+ static const struct software_node *ssam_node_group_sp9[] = {
+ &ssam_node_root,
+ &ssam_node_hub_kip,
+@@ -337,6 +337,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Pro 9 */
+ { "MSHW0343", (unsigned long)ssam_node_group_sp9 },
+
++ /* Surface Pro 10 */
++ { "MSHW0510", (unsigned long)ssam_node_group_sp9 },
++
+ /* Surface Book 2 */
+ { "MSHW0107", (unsigned long)ssam_node_group_gen5 },
+
+@@ -367,6 +370,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Laptop Go 2 */
+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+
++ /* Surface Laptop Go 3 */
++ { "MSHW0440", (unsigned long)ssam_node_group_slg1 },
++
+ /* Surface Laptop Studio */
+ { "MSHW0123", (unsigned long)ssam_node_group_sls },
+
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 2a10705433911d..07eea525091b08 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -263,6 +263,7 @@ config ASUS_WMI
+ depends on RFKILL || RFKILL = n
+ depends on HOTPLUG_PCI
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
++ depends on SERIO_I8042 || SERIO_I8042 = n
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select NEW_LEDS
+@@ -279,7 +280,6 @@ config ASUS_WMI
+ config ASUS_NB_WMI
+ tristate "Asus Notebook WMI Driver"
+ depends on ASUS_WMI
+- depends on SERIO_I8042 || SERIO_I8042 = n
+ help
+ This is a driver for newer Asus notebooks. It adds extra features
+ like wireless radio and bluetooth control, leds, hotkeys, backlight...
+diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+index 6bbffb081053e5..b4f49720c87f62 100644
+--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
++++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
+@@ -16,12 +16,17 @@
+
+ struct quirk_entry {
+ u32 s2idle_bug_mmio;
++ bool spurious_8042;
+ };
+
+ static struct quirk_entry quirk_s2idle_bug = {
+ .s2idle_bug_mmio = 0xfed80380,
+ };
+
++static struct quirk_entry quirk_spurious_8042 = {
++ .spurious_8042 = true,
++};
++
+ static const struct dmi_system_id fwbug_list[] = {
+ {
+ .ident = "L14 Gen2 AMD",
+@@ -193,6 +198,25 @@ static const struct dmi_system_id fwbug_list[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
+ }
+ },
++ /* https://community.frame.work/t/tracking-framework-amd-ryzen-7040-series-lid-wakeup-behavior-feedback/39128 */
++ {
++ .ident = "Framework Laptop 13 (Phoenix)",
++ .driver_data = &quirk_spurious_8042,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
++ DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
++ }
++ },
++ {
++ .ident = "Framework Laptop 13 (Phoenix)",
++ .driver_data = &quirk_spurious_8042,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
++ DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
++ }
++ },
+ {}
+ };
+
+@@ -235,6 +259,9 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
+ {
+ const struct dmi_system_id *dmi_id;
+
++ if (dev->cpu_id == AMD_CPU_ID_CZN)
++ dev->disable_8042_wakeup = true;
++
+ dmi_id = dmi_first_match(fwbug_list);
+ if (!dmi_id)
+ return;
+@@ -242,4 +269,6 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
+ if (dev->quirks->s2idle_bug_mmio)
+ pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
+ dmi_id->ident);
++ if (dev->quirks->spurious_8042)
++ dev->disable_8042_wakeup = true;
+ }
+diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
+index c1e788b67a7483..96caf2221d877b 100644
+--- a/drivers/platform/x86/amd/pmc/pmc.c
++++ b/drivers/platform/x86/amd/pmc/pmc.c
+@@ -87,16 +87,6 @@
+ #define SMU_MSG_LOG_RESET 0x07
+ #define SMU_MSG_LOG_DUMP_DATA 0x08
+ #define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
+-/* List of supported CPU ids */
+-#define AMD_CPU_ID_RV 0x15D0
+-#define AMD_CPU_ID_RN 0x1630
+-#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
+-#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+-#define AMD_CPU_ID_YC 0x14B5
+-#define AMD_CPU_ID_CB 0x14D8
+-#define AMD_CPU_ID_PS 0x14E8
+-#define AMD_CPU_ID_SP 0x14A4
+-#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+
+ #define PMC_MSG_DELAY_MIN_US 50
+ #define RESPONSE_REGISTER_LOOP_MAX 20000
+@@ -714,19 +704,22 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+ return -EINVAL;
+ }
+
+-static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
++static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev)
+ {
+ struct device *d;
+ int rc;
+
+- if (!pdev->major) {
+- rc = amd_pmc_get_smu_version(pdev);
+- if (rc)
+- return rc;
+- }
++ /* cezanne platform firmware has a fix in 64.66.0 */
++ if (pdev->cpu_id == AMD_CPU_ID_CZN) {
++ if (!pdev->major) {
++ rc = amd_pmc_get_smu_version(pdev);
++ if (rc)
++ return rc;
++ }
+
+- if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
+- return 0;
++ if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
++ return 0;
++ }
+
+ d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
+ if (!d)
+@@ -885,8 +878,8 @@ static int amd_pmc_suspend_handler(struct device *dev)
+ {
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+
+- if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
+- int rc = amd_pmc_czn_wa_irq1(pdev);
++ if (pdev->disable_8042_wakeup && !disable_workarounds) {
++ int rc = amd_pmc_wa_irq1(pdev);
+
+ if (rc) {
+ dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
+@@ -912,33 +905,6 @@ static const struct pci_device_id pmc_pci_ids[] = {
+ { }
+ };
+
+-static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev)
+-{
+- int ret;
+-
+- switch (dev->cpu_id) {
+- case AMD_CPU_ID_YC:
+- if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) {
+- ret = -EINVAL;
+- goto err_dram_size;
+- }
+- break;
+- default:
+- ret = -EINVAL;
+- goto err_dram_size;
+- }
+-
+- ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
+- if (ret || !dev->dram_size)
+- goto err_dram_size;
+-
+- return 0;
+-
+-err_dram_size:
+- dev_err(dev->dev, "DRAM size command not supported for this platform\n");
+- return ret;
+-}
+-
+ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
+ {
+ u32 phys_addr_low, phys_addr_hi;
+@@ -957,8 +923,8 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
+ return -EIO;
+
+ /* Get DRAM size */
+- ret = amd_pmc_get_dram_size(dev);
+- if (ret)
++ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
++ if (ret || !dev->dram_size)
+ dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
+
+ /* Get STB DRAM address */
+diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
+index c27bd6a5642f48..b4794f118739f6 100644
+--- a/drivers/platform/x86/amd/pmc/pmc.h
++++ b/drivers/platform/x86/amd/pmc/pmc.h
+@@ -36,9 +36,21 @@ struct amd_pmc_dev {
+ struct mutex lock; /* generic mutex lock */
+ struct dentry *dbgfs_dir;
+ struct quirk_entry *quirks;
++ bool disable_8042_wakeup;
+ };
+
+ void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
+ void amd_pmc_quirks_init(struct amd_pmc_dev *dev);
+
++/* List of supported CPU ids */
++#define AMD_CPU_ID_RV 0x15D0
++#define AMD_CPU_ID_RN 0x1630
++#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
++#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
++#define AMD_CPU_ID_YC 0x14B5
++#define AMD_CPU_ID_CB 0x14D8
++#define AMD_CPU_ID_PS 0x14E8
++#define AMD_CPU_ID_SP 0x14A4
++#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
++
+ #endif /* PMC_H */
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index df1db54d4e183b..af3da303e2b15a 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -501,8 +501,6 @@ static const struct dmi_system_id asus_quirks[] = {
+
+ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+ {
+- int ret;
+-
+ quirks = &quirk_asus_unknown;
+ dmi_check_system(asus_quirks);
+
+@@ -517,15 +515,6 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+
+ if (tablet_mode_sw != -1)
+ quirks->tablet_switch_mode = tablet_mode_sw;
+-
+- if (quirks->i8042_filter) {
+- ret = i8042_install_filter(quirks->i8042_filter);
+- if (ret) {
+- pr_warn("Unable to install key filter\n");
+- return;
+- }
+- pr_info("Using i8042 filter function for receiving events\n");
+- }
+ }
+
+ static const struct key_entry asus_nb_wmi_keymap[] = {
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 19bfd30861aa88..9c6321c2fc3c59 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -4437,6 +4437,12 @@ static int asus_wmi_add(struct platform_device *pdev)
+ goto fail_wmi_handler;
+ }
+
++ if (asus->driver->quirks->i8042_filter) {
++ err = i8042_install_filter(asus->driver->quirks->i8042_filter);
++ if (err)
++ pr_warn("Unable to install key filter - %d\n", err);
++ }
++
+ asus_wmi_battery_init(asus);
+
+ asus_wmi_debugfs_init(asus);
+@@ -4471,6 +4477,8 @@ static int asus_wmi_remove(struct platform_device *device)
+ struct asus_wmi *asus;
+
+ asus = platform_get_drvdata(device);
++ if (asus->driver->quirks->i8042_filter)
++ i8042_remove_filter(asus->driver->quirks->i8042_filter);
+ wmi_remove_notify_handler(asus->driver->event_guid);
+ asus_wmi_backlight_exit(asus);
+ asus_wmi_input_exit(asus);
+diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
+index e61bfaf8b5c48f..6fb538a1386894 100644
+--- a/drivers/platform/x86/dell/dell-smbios-base.c
++++ b/drivers/platform/x86/dell/dell-smbios-base.c
+@@ -11,6 +11,7 @@
+ */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/container_of.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/capability.h>
+@@ -25,11 +26,16 @@ static u32 da_supported_commands;
+ static int da_num_tokens;
+ static struct platform_device *platform_device;
+ static struct calling_interface_token *da_tokens;
+-static struct device_attribute *token_location_attrs;
+-static struct device_attribute *token_value_attrs;
++static struct token_sysfs_data *token_entries;
+ static struct attribute **token_attrs;
+ static DEFINE_MUTEX(smbios_mutex);
+
++struct token_sysfs_data {
++ struct device_attribute location_attr;
++ struct device_attribute value_attr;
++ struct calling_interface_token *token;
++};
++
+ struct smbios_device {
+ struct list_head list;
+ struct device *device;
+@@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+ }
+ }
+
+-static int match_attribute(struct device *dev,
+- struct device_attribute *attr)
+-{
+- int i;
+-
+- for (i = 0; i < da_num_tokens * 2; i++) {
+- if (!token_attrs[i])
+- continue;
+- if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
+- return i/2;
+- }
+- dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
+- return -EINVAL;
+-}
+-
+ static ssize_t location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+- int i;
++ struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+- i = match_attribute(dev, attr);
+- if (i > 0)
+- return sysfs_emit(buf, "%08x", da_tokens[i].location);
+- return 0;
++ return sysfs_emit(buf, "%08x", data->token->location);
+ }
+
+ static ssize_t value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+- int i;
++ struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+- i = match_attribute(dev, attr);
+- if (i > 0)
+- return sysfs_emit(buf, "%08x", da_tokens[i].value);
+- return 0;
++ return sysfs_emit(buf, "%08x", data->token->value);
+ }
+
+ static struct attribute_group smbios_attribute_group = {
+@@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ {
+ char *location_name;
+ char *value_name;
+- size_t size;
+ int ret;
+ int i, j;
+
+- /* (number of tokens + 1 for null terminated */
+- size = sizeof(struct device_attribute) * (da_num_tokens + 1);
+- token_location_attrs = kzalloc(size, GFP_KERNEL);
+- if (!token_location_attrs)
++ token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL);
++ if (!token_entries)
+ return -ENOMEM;
+- token_value_attrs = kzalloc(size, GFP_KERNEL);
+- if (!token_value_attrs)
+- goto out_allocate_value;
+
+ /* need to store both location and value + terminator*/
+- size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
+- token_attrs = kzalloc(size, GFP_KERNEL);
++ token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL);
+ if (!token_attrs)
+ goto out_allocate_attrs;
+
+@@ -496,27 +474,32 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ /* skip empty */
+ if (da_tokens[i].tokenID == 0)
+ continue;
++
++ token_entries[i].token = &da_tokens[i];
++
+ /* add location */
+ location_name = kasprintf(GFP_KERNEL, "%04x_location",
+ da_tokens[i].tokenID);
+ if (location_name == NULL)
+ goto out_unwind_strings;
+- sysfs_attr_init(&token_location_attrs[i].attr);
+- token_location_attrs[i].attr.name = location_name;
+- token_location_attrs[i].attr.mode = 0444;
+- token_location_attrs[i].show = location_show;
+- token_attrs[j++] = &token_location_attrs[i].attr;
++
++ sysfs_attr_init(&token_entries[i].location_attr.attr);
++ token_entries[i].location_attr.attr.name = location_name;
++ token_entries[i].location_attr.attr.mode = 0444;
++ token_entries[i].location_attr.show = location_show;
++ token_attrs[j++] = &token_entries[i].location_attr.attr;
+
+ /* add value */
+ value_name = kasprintf(GFP_KERNEL, "%04x_value",
+ da_tokens[i].tokenID);
+ if (value_name == NULL)
+ goto loop_fail_create_value;
+- sysfs_attr_init(&token_value_attrs[i].attr);
+- token_value_attrs[i].attr.name = value_name;
+- token_value_attrs[i].attr.mode = 0444;
+- token_value_attrs[i].show = value_show;
+- token_attrs[j++] = &token_value_attrs[i].attr;
++
++ sysfs_attr_init(&token_entries[i].value_attr.attr);
++ token_entries[i].value_attr.attr.name = value_name;
++ token_entries[i].value_attr.attr.mode = 0444;
++ token_entries[i].value_attr.show = value_show;
++ token_attrs[j++] = &token_entries[i].value_attr.attr;
+ continue;
+
+ loop_fail_create_value:
+@@ -532,14 +515,12 @@ static int build_tokens_sysfs(struct platform_device *dev)
+
+ out_unwind_strings:
+ while (i--) {
+- kfree(token_location_attrs[i].attr.name);
+- kfree(token_value_attrs[i].attr.name);
++ kfree(token_entries[i].location_attr.attr.name);
++ kfree(token_entries[i].value_attr.attr.name);
+ }
+ kfree(token_attrs);
+ out_allocate_attrs:
+- kfree(token_value_attrs);
+-out_allocate_value:
+- kfree(token_location_attrs);
++ kfree(token_entries);
+
+ return -ENOMEM;
+ }
+@@ -551,12 +532,11 @@ static void free_group(struct platform_device *pdev)
+ sysfs_remove_group(&pdev->dev.kobj,
+ &smbios_attribute_group);
+ for (i = 0; i < da_num_tokens; i++) {
+- kfree(token_location_attrs[i].attr.name);
+- kfree(token_value_attrs[i].attr.name);
++ kfree(token_entries[i].location_attr.attr.name);
++ kfree(token_entries[i].value_attr.attr.name);
+ }
+ kfree(token_attrs);
+- kfree(token_value_attrs);
+- kfree(token_location_attrs);
++ kfree(token_entries);
+ }
+
+ static int __init dell_smbios_init(void)
+@@ -610,7 +590,10 @@ static int __init dell_smbios_init(void)
+ return 0;
+
+ fail_sysfs:
+- free_group(platform_device);
++ if (!wmi)
++ exit_dell_smbios_wmi();
++ if (!smm)
++ exit_dell_smbios_smm();
+
+ fail_create_group:
+ platform_device_del(platform_device);
+diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+index 5798b49ddaba90..6ddca857cc4d1a 100644
+--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
++++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+@@ -592,13 +592,11 @@ static int hp_add_other_attributes(int attr_type)
+ int ret;
+ char *attr_name;
+
+- mutex_lock(&bioscfg_drv.mutex);
+-
+ attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
+- if (!attr_name_kobj) {
+- ret = -ENOMEM;
+- goto err_other_attr_init;
+- }
++ if (!attr_name_kobj)
++ return -ENOMEM;
++
++ mutex_lock(&bioscfg_drv.mutex);
+
+ /* Check if attribute type is supported */
+ switch (attr_type) {
+@@ -615,14 +613,14 @@ static int hp_add_other_attributes(int attr_type)
+ default:
+ pr_err("Error: Unknown attr_type: %d\n", attr_type);
+ ret = -EINVAL;
+- goto err_other_attr_init;
++ kfree(attr_name_kobj);
++ goto unlock_drv_mutex;
+ }
+
+ ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
+ NULL, "%s", attr_name);
+ if (ret) {
+ pr_err("Error encountered [%d]\n", ret);
+- kobject_put(attr_name_kobj);
+ goto err_other_attr_init;
+ }
+
+@@ -630,25 +628,25 @@ static int hp_add_other_attributes(int attr_type)
+ switch (attr_type) {
+ case HPWMI_SECURE_PLATFORM_TYPE:
+ ret = hp_populate_secure_platform_data(attr_name_kobj);
+- if (ret)
+- goto err_other_attr_init;
+ break;
+
+ case HPWMI_SURE_START_TYPE:
+ ret = hp_populate_sure_start_data(attr_name_kobj);
+- if (ret)
+- goto err_other_attr_init;
+ break;
+
+ default:
+ ret = -EINVAL;
+- goto err_other_attr_init;
+ }
+
++ if (ret)
++ goto err_other_attr_init;
++
+ mutex_unlock(&bioscfg_drv.mutex);
+ return 0;
+
+ err_other_attr_init:
++ kobject_put(attr_name_kobj);
++unlock_drv_mutex:
+ mutex_unlock(&bioscfg_drv.mutex);
+ kfree(obj);
+ return ret;
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index ac037540acfc60..88eefccb6ed276 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1425,18 +1425,17 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
+ if (WARN_ON(priv->kbd_bl.initialized))
+ return -EEXIST;
+
+- brightness = ideapad_kbd_bl_brightness_get(priv);
+- if (brightness < 0)
+- return brightness;
+-
+- priv->kbd_bl.last_brightness = brightness;
+-
+ if (ideapad_kbd_bl_check_tristate(priv->kbd_bl.type)) {
+ priv->kbd_bl.led.max_brightness = 2;
+ } else {
+ priv->kbd_bl.led.max_brightness = 1;
+ }
+
++ brightness = ideapad_kbd_bl_brightness_get(priv);
++ if (brightness < 0)
++ return brightness;
++
++ priv->kbd_bl.last_brightness = brightness;
+ priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
+ priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
+ priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
+diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
+index 306f886b52d208..4ff2aa4b484bc5 100644
+--- a/drivers/platform/x86/intel/ifs/core.c
++++ b/drivers/platform/x86/intel/ifs/core.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /* Copyright(c) 2022 Intel Corporation. */
+
++#include <linux/bitfield.h>
+ #include <linux/module.h>
+ #include <linux/kdev_t.h>
+ #include <linux/semaphore.h>
+@@ -94,6 +95,8 @@ static int __init ifs_init(void)
+ for (i = 0; i < IFS_NUMTESTS; i++) {
+ if (!(msrval & BIT(ifs_devices[i].test_caps->integrity_cap_bit)))
+ continue;
++ ifs_devices[i].rw_data.generation = FIELD_GET(MSR_INTEGRITY_CAPS_SAF_GEN_MASK,
++ msrval);
+ ret = misc_register(&ifs_devices[i].misc);
+ if (ret)
+ goto err_exit;
+diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
+index 93191855890f2c..6bc63ab7051752 100644
+--- a/drivers/platform/x86/intel/ifs/ifs.h
++++ b/drivers/platform/x86/intel/ifs/ifs.h
+@@ -174,9 +174,17 @@ union ifs_chunks_auth_status {
+ union ifs_scan {
+ u64 data;
+ struct {
+- u32 start :8;
+- u32 stop :8;
+- u32 rsvd :16;
++ union {
++ struct {
++ u8 start;
++ u8 stop;
++ u16 rsvd;
++ } gen0;
++ struct {
++ u16 start;
++ u16 stop;
++ } gen2;
++ };
+ u32 delay :31;
+ u32 sigmce :1;
+ };
+@@ -186,9 +194,17 @@ union ifs_scan {
+ union ifs_status {
+ u64 data;
+ struct {
+- u32 chunk_num :8;
+- u32 chunk_stop_index :8;
+- u32 rsvd1 :16;
++ union {
++ struct {
++ u8 chunk_num;
++ u8 chunk_stop_index;
++ u16 rsvd1;
++ } gen0;
++ struct {
++ u16 chunk_num;
++ u16 chunk_stop_index;
++ } gen2;
++ };
+ u32 error_code :8;
+ u32 rsvd2 :22;
+ u32 control_error :1;
+@@ -229,6 +245,7 @@ struct ifs_test_caps {
+ * @status: it holds simple status pass/fail/untested
+ * @scan_details: opaque scan status code from h/w
+ * @cur_batch: number indicating the currently loaded test file
++ * @generation: IFS test generation enumerated by hardware
+ */
+ struct ifs_data {
+ int loaded_version;
+@@ -238,6 +255,7 @@ struct ifs_data {
+ int status;
+ u64 scan_details;
+ u32 cur_batch;
++ u32 generation;
+ };
+
+ struct ifs_work {
+diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
+index cefd0d886cfd4d..53d957d4eea4d1 100644
+--- a/drivers/platform/x86/intel/ifs/load.c
++++ b/drivers/platform/x86/intel/ifs/load.c
+@@ -260,6 +260,7 @@ int ifs_load_firmware(struct device *dev)
+ {
+ const struct ifs_test_caps *test = ifs_get_test_caps(dev);
+ struct ifs_data *ifsd = ifs_get_data(dev);
++ unsigned int expected_size;
+ const struct firmware *fw;
+ char scan_path[64];
+ int ret = -EINVAL;
+@@ -274,6 +275,14 @@ int ifs_load_firmware(struct device *dev)
+ goto done;
+ }
+
++ expected_size = ((struct microcode_header_intel *)fw->data)->totalsize;
++ if (fw->size != expected_size) {
++ dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n",
++ expected_size, fw->size);
++ ret = -EINVAL;
++ goto release;
++ }
++
+ ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data);
+ if (ret)
+ goto release;
+diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
+index 43c864add778f8..c7a5bf24bef35e 100644
+--- a/drivers/platform/x86/intel/ifs/runtest.c
++++ b/drivers/platform/x86/intel/ifs/runtest.c
+@@ -167,25 +167,35 @@ static int doscan(void *data)
+ */
+ static void ifs_test_core(int cpu, struct device *dev)
+ {
++ union ifs_status status = {};
+ union ifs_scan activate;
+- union ifs_status status;
+ unsigned long timeout;
+ struct ifs_data *ifsd;
++ int to_start, to_stop;
++ int status_chunk;
+ u64 msrvals[2];
+ int retries;
+
+ ifsd = ifs_get_data(dev);
+
+- activate.rsvd = 0;
++ activate.gen0.rsvd = 0;
+ activate.delay = IFS_THREAD_WAIT;
+ activate.sigmce = 0;
+- activate.start = 0;
+- activate.stop = ifsd->valid_chunks - 1;
++ to_start = 0;
++ to_stop = ifsd->valid_chunks - 1;
++
++ if (ifsd->generation) {
++ activate.gen2.start = to_start;
++ activate.gen2.stop = to_stop;
++ } else {
++ activate.gen0.start = to_start;
++ activate.gen0.stop = to_stop;
++ }
+
+ timeout = jiffies + HZ / 2;
+ retries = MAX_IFS_RETRIES;
+
+- while (activate.start <= activate.stop) {
++ while (to_start <= to_stop) {
+ if (time_after(jiffies, timeout)) {
+ status.error_code = IFS_SW_TIMEOUT;
+ break;
+@@ -196,13 +206,14 @@ static void ifs_test_core(int cpu, struct device *dev)
+
+ status.data = msrvals[1];
+
+- trace_ifs_status(cpu, activate, status);
++ trace_ifs_status(cpu, to_start, to_stop, status.data);
+
+ /* Some cases can be retried, give up for others */
+ if (!can_restart(status))
+ break;
+
+- if (status.chunk_num == activate.start) {
++ status_chunk = ifsd->generation ? status.gen2.chunk_num : status.gen0.chunk_num;
++ if (status_chunk == to_start) {
+ /* Check for forward progress */
+ if (--retries == 0) {
+ if (status.error_code == IFS_NO_ERROR)
+@@ -211,7 +222,11 @@ static void ifs_test_core(int cpu, struct device *dev)
+ }
+ } else {
+ retries = MAX_IFS_RETRIES;
+- activate.start = status.chunk_num;
++ if (ifsd->generation)
++ activate.gen2.start = status_chunk;
++ else
++ activate.gen0.start = status_chunk;
++ to_start = status_chunk;
+ }
+ }
+
+diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c
+index 5006008e01bea2..606f7678bcb0a1 100644
+--- a/drivers/platform/x86/intel/pmc/adl.c
++++ b/drivers/platform/x86/intel/pmc/adl.c
+@@ -314,16 +314,13 @@ int adl_core_init(struct pmc_dev *pmcdev)
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ int ret;
+
++ pmcdev->suspend = cnl_suspend;
++ pmcdev->resume = cnl_resume;
++
+ pmc->map = &adl_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+- /* Due to a hardware limitation, the GBE LTR blocks PC10
+- * when a cable is attached. Tell the PMC to ignore it.
+- */
+- dev_dbg(&pmcdev->pdev->dev, "ignoring GBE LTR\n");
+- pmc_core_send_ltr_ignore(pmcdev, 3);
+-
+ return 0;
+ }
+diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
+index 420aaa1d7c7697..98b36651201a06 100644
+--- a/drivers/platform/x86/intel/pmc/cnp.c
++++ b/drivers/platform/x86/intel/pmc/cnp.c
+@@ -204,21 +204,35 @@ const struct pmc_reg_map cnp_reg_map = {
+ .etr3_offset = ETR3_OFFSET,
+ };
+
++void cnl_suspend(struct pmc_dev *pmcdev)
++{
++ /*
++ * Due to a hardware limitation, the GBE LTR blocks PC10
++ * when a cable is attached. To unblock PC10 during suspend,
++ * tell the PMC to ignore it.
++ */
++ pmc_core_send_ltr_ignore(pmcdev, 3, 1);
++}
++
++int cnl_resume(struct pmc_dev *pmcdev)
++{
++ pmc_core_send_ltr_ignore(pmcdev, 3, 0);
++
++ return pmc_core_resume_common(pmcdev);
++}
++
+ int cnp_core_init(struct pmc_dev *pmcdev)
+ {
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ int ret;
+
++ pmcdev->suspend = cnl_suspend;
++ pmcdev->resume = cnl_resume;
++
+ pmc->map = &cnp_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+- /* Due to a hardware limitation, the GBE LTR blocks PC10
+- * when a cable is attached. Tell the PMC to ignore it.
+- */
+- dev_dbg(&pmcdev->pdev->dev, "ignoring GBE LTR\n");
+- pmc_core_send_ltr_ignore(pmcdev, 3);
+-
+ return 0;
+ }
+diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
+index 84c175b9721a0b..022afb97d531c9 100644
+--- a/drivers/platform/x86/intel/pmc/core.c
++++ b/drivers/platform/x86/intel/pmc/core.c
+@@ -460,7 +460,7 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
+
+-int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
++int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
+ {
+ struct pmc *pmc;
+ const struct pmc_reg_map *map;
+@@ -472,7 +472,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
+ * is based on the contiguous indexes from ltr_show output.
+ * pmc index and ltr index needs to be calculated from it.
+ */
+- for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index > 0; pmc_index++) {
++ for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
+ pmc = pmcdev->pmcs[pmc_index];
+
+ if (!pmc)
+@@ -498,7 +498,10 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value)
+ mutex_lock(&pmcdev->lock);
+
+ reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
+- reg |= BIT(ltr_index);
++ if (ignore)
++ reg |= BIT(ltr_index);
++ else
++ reg &= ~BIT(ltr_index);
+ pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
+
+ mutex_unlock(&pmcdev->lock);
+@@ -521,7 +524,7 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ if (err)
+ return err;
+
+- err = pmc_core_send_ltr_ignore(pmcdev, value);
++ err = pmc_core_send_ltr_ignore(pmcdev, value, 1);
+
+ return err == 0 ? count : err;
+ }
+@@ -1279,6 +1282,9 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
+ struct pmc_dev *pmcdev = dev_get_drvdata(dev);
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+
++ if (pmcdev->suspend)
++ pmcdev->suspend(pmcdev);
++
+ /* Check if the syspend will actually use S0ix */
+ if (pm_suspend_via_firmware())
+ return 0;
+diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
+index 0729f593c6a759..b66dacbfb94bf7 100644
+--- a/drivers/platform/x86/intel/pmc/core.h
++++ b/drivers/platform/x86/intel/pmc/core.h
+@@ -363,6 +363,7 @@ struct pmc {
+ * @s0ix_counter: S0ix residency (step adjusted)
+ * @num_lpm_modes: Count of enabled modes
+ * @lpm_en_modes: Array of enabled modes from lowest to highest priority
++ * @suspend: Function to perform platform specific suspend
+ * @resume: Function to perform platform specific resume
+ *
+ * pmc_dev contains info about power management controller device.
+@@ -379,6 +380,7 @@ struct pmc_dev {
+ u64 s0ix_counter;
+ int num_lpm_modes;
+ int lpm_en_modes[LPM_MAX_NUM_MODES];
++ void (*suspend)(struct pmc_dev *pmcdev);
+ int (*resume)(struct pmc_dev *pmcdev);
+
+ bool has_die_c6;
+@@ -486,7 +488,7 @@ extern const struct pmc_bit_map *mtl_ioem_lpm_maps[];
+ extern const struct pmc_reg_map mtl_ioem_reg_map;
+
+ extern void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
+-extern int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value);
++int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
+
+ int pmc_core_resume_common(struct pmc_dev *pmcdev);
+ int get_primary_reg_base(struct pmc *pmc);
+@@ -500,6 +502,9 @@ int tgl_core_init(struct pmc_dev *pmcdev);
+ int adl_core_init(struct pmc_dev *pmcdev);
+ int mtl_core_init(struct pmc_dev *pmcdev);
+
++void cnl_suspend(struct pmc_dev *pmcdev);
++int cnl_resume(struct pmc_dev *pmcdev);
++
+ #define pmc_for_each_mode(i, mode, pmcdev) \
+ for (i = 0, mode = pmcdev->lpm_en_modes[i]; \
+ i < pmcdev->num_lpm_modes; \
+diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
+index 2204bc666980ed..504e3e273c323b 100644
+--- a/drivers/platform/x86/intel/pmc/mtl.c
++++ b/drivers/platform/x86/intel/pmc/mtl.c
+@@ -979,6 +979,8 @@ static void mtl_d3_fixup(void)
+ static int mtl_resume(struct pmc_dev *pmcdev)
+ {
+ mtl_d3_fixup();
++ pmc_core_send_ltr_ignore(pmcdev, 3, 0);
++
+ return pmc_core_resume_common(pmcdev);
+ }
+
+@@ -989,6 +991,7 @@ int mtl_core_init(struct pmc_dev *pmcdev)
+
+ mtl_d3_fixup();
+
++ pmcdev->suspend = cnl_suspend;
+ pmcdev->resume = mtl_resume;
+
+ pmcdev->regmap_list = mtl_pmc_info_list;
+@@ -1002,11 +1005,5 @@ int mtl_core_init(struct pmc_dev *pmcdev)
+ return ret;
+ }
+
+- /* Due to a hardware limitation, the GBE LTR blocks PC10
+- * when a cable is attached. Tell the PMC to ignore it.
+- */
+- dev_dbg(&pmcdev->pdev->dev, "ignoring GBE LTR\n");
+- pmc_core_send_ltr_ignore(pmcdev, 3);
+-
+ return 0;
+ }
+diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
+index 2449940102db4f..e88d3d00c85393 100644
+--- a/drivers/platform/x86/intel/pmc/tgl.c
++++ b/drivers/platform/x86/intel/pmc/tgl.c
+@@ -259,16 +259,15 @@ int tgl_core_init(struct pmc_dev *pmcdev)
+ int ret;
+
+ pmc->map = &tgl_reg_map;
++
++ pmcdev->suspend = cnl_suspend;
++ pmcdev->resume = cnl_resume;
++
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+ pmc_core_get_tgl_lpm_reqs(pmcdev->pdev);
+- /* Due to a hardware limitation, the GBE LTR blocks PC10
+- * when a cable is attached. Tell the PMC to ignore it.
+- */
+- dev_dbg(&pmcdev->pdev->dev, "ignoring GBE LTR\n");
+- pmc_core_send_ltr_ignore(pmcdev, 3);
+
+ return 0;
+ }
+diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+index 08df9494603c5e..9040a3d39924bb 100644
+--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
++++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+@@ -316,7 +316,9 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
+ cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
+ return NULL;
+
+- pkg_id = topology_physical_package_id(cpu);
++ pkg_id = topology_logical_package_id(cpu);
++ if (pkg_id >= topology_max_packages())
++ return NULL;
+
+ bus_number = isst_cpu_info[cpu].bus_info[bus_no];
+ if (bus_number < 0)
+@@ -719,7 +721,9 @@ static struct miscdevice isst_if_char_driver = {
+ };
+
+ static const struct x86_cpu_id hpm_cpu_ids[] = {
++ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
+ {}
+ };
+diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c
+index fdf55b5d69480e..e4be40f73eebfc 100644
+--- a/drivers/platform/x86/intel/telemetry/core.c
++++ b/drivers/platform/x86/intel/telemetry/core.c
+@@ -102,7 +102,7 @@ static const struct telemetry_core_ops telm_defpltops = {
+ /**
+ * telemetry_update_events() - Update telemetry Configuration
+ * @pss_evtconfig: PSS related config. No change if num_evts = 0.
+- * @pss_evtconfig: IOSS related config. No change if num_evts = 0.
++ * @ioss_evtconfig: IOSS related config. No change if num_evts = 0.
+ *
+ * This API updates the IOSS & PSS Telemetry configuration. Old config
+ * is overwritten. Call telemetry_reset_events when logging is over
+@@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(telemetry_reset_events);
+ /**
+ * telemetry_get_eventconfig() - Returns the pss and ioss events enabled
+ * @pss_evtconfig: Pointer to PSS related configuration.
+- * @pss_evtconfig: Pointer to IOSS related configuration.
++ * @ioss_evtconfig: Pointer to IOSS related configuration.
+ * @pss_len: Number of u32 elements allocated for pss_evtconfig array
+ * @ioss_len: Number of u32 elements allocated for ioss_evtconfig array
+ *
+diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
+index 0a95736d97e4dc..4c42c28bdd3d49 100644
+--- a/drivers/platform/x86/intel/tpmi.c
++++ b/drivers/platform/x86/intel/tpmi.c
+@@ -96,7 +96,7 @@ struct intel_tpmi_pfs_entry {
+ */
+ struct intel_tpmi_pm_feature {
+ struct intel_tpmi_pfs_entry pfs_header;
+- unsigned int vsec_offset;
++ u64 vsec_offset;
+ struct intel_vsec_device *vsec_dev;
+ };
+
+@@ -359,7 +359,7 @@ static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
+ disabled = disabled ? 'Y' : 'N';
+ locked = locked ? 'Y' : 'N';
+ }
+- seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%08x\t%c\t%c\n",
++ seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\n",
+ pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
+ pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
+ pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled);
+@@ -377,7 +377,8 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
+ struct intel_tpmi_pm_feature *pfs = s->private;
+ int count, ret = 0;
+ void __iomem *mem;
+- u32 off, size;
++ u32 size;
++ u64 off;
+ u8 *buffer;
+
+ size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
+@@ -393,7 +394,7 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
+ mutex_lock(&tpmi_dev_lock);
+
+ for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
+- seq_printf(s, "TPMI Instance:%d offset:0x%x\n", count, off);
++ seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off);
+
+ mem = ioremap(off, size);
+ if (!mem) {
+@@ -732,8 +733,11 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
+ * when actual device nodes created outside this
+ * loop via tpmi_create_devices().
+ */
+- if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID)
+- tpmi_process_info(tpmi_info, pfs);
++ if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) {
++ ret = tpmi_process_info(tpmi_info, pfs);
++ if (ret)
++ return ret;
++ }
+
+ if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
+ tpmi_set_control_base(auxdev, tpmi_info, pfs);
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+index 33ab207493e3e6..33bb58dc3f78c3 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+@@ -23,23 +23,23 @@ static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned
+ static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
+ static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
+
+-static ssize_t show_domain_id(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+ {
+- struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_dev_attr);
++ struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->domain_id);
+ }
+
+-static ssize_t show_fabric_cluster_id(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+ {
+- struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_dev_attr);
++ struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->cluster_id);
+ }
+
+-static ssize_t show_package_id(struct device *dev, struct device_attribute *attr, char *buf)
++static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+ {
+- struct uncore_data *data = container_of(attr, struct uncore_data, package_id_dev_attr);
++ struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr);
+
+ return sprintf(buf, "%u\n", data->package_id);
+ }
+@@ -97,30 +97,30 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
+ }
+
+ #define store_uncore_min_max(name, min_max) \
+- static ssize_t store_##name(struct device *dev, \
+- struct device_attribute *attr, \
++ static ssize_t store_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return store_min_max_freq_khz(data, buf, count, \
+ min_max); \
+ }
+
+ #define show_uncore_min_max(name, min_max) \
+- static ssize_t show_##name(struct device *dev, \
+- struct device_attribute *attr, char *buf)\
++ static ssize_t show_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf)\
+ { \
+- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return show_min_max_freq_khz(data, buf, min_max); \
+ }
+
+ #define show_uncore_perf_status(name) \
+- static ssize_t show_##name(struct device *dev, \
+- struct device_attribute *attr, char *buf)\
++ static ssize_t show_##name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf)\
+ { \
+- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
++ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
+ \
+ return show_perf_status_freq_khz(data, buf); \
+ }
+@@ -134,11 +134,11 @@ show_uncore_min_max(max_freq_khz, 1);
+ show_uncore_perf_status(current_freq_khz);
+
+ #define show_uncore_data(member_name) \
+- static ssize_t show_##member_name(struct device *dev, \
+- struct device_attribute *attr, char *buf)\
++ static ssize_t show_##member_name(struct kobject *kobj, \
++ struct kobj_attribute *attr, char *buf)\
+ { \
+ struct uncore_data *data = container_of(attr, struct uncore_data,\
+- member_name##_dev_attr);\
++ member_name##_kobj_attr);\
+ \
+ return sysfs_emit(buf, "%u\n", \
+ data->member_name); \
+@@ -149,29 +149,29 @@ show_uncore_data(initial_max_freq_khz);
+
+ #define init_attribute_rw(_name) \
+ do { \
+- sysfs_attr_init(&data->_name##_dev_attr.attr); \
+- data->_name##_dev_attr.show = show_##_name; \
+- data->_name##_dev_attr.store = store_##_name; \
+- data->_name##_dev_attr.attr.name = #_name; \
+- data->_name##_dev_attr.attr.mode = 0644; \
++ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
++ data->_name##_kobj_attr.show = show_##_name; \
++ data->_name##_kobj_attr.store = store_##_name; \
++ data->_name##_kobj_attr.attr.name = #_name; \
++ data->_name##_kobj_attr.attr.mode = 0644; \
+ } while (0)
+
+ #define init_attribute_ro(_name) \
+ do { \
+- sysfs_attr_init(&data->_name##_dev_attr.attr); \
+- data->_name##_dev_attr.show = show_##_name; \
+- data->_name##_dev_attr.store = NULL; \
+- data->_name##_dev_attr.attr.name = #_name; \
+- data->_name##_dev_attr.attr.mode = 0444; \
++ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
++ data->_name##_kobj_attr.show = show_##_name; \
++ data->_name##_kobj_attr.store = NULL; \
++ data->_name##_kobj_attr.attr.name = #_name; \
++ data->_name##_kobj_attr.attr.mode = 0444; \
+ } while (0)
+
+ #define init_attribute_root_ro(_name) \
+ do { \
+- sysfs_attr_init(&data->_name##_dev_attr.attr); \
+- data->_name##_dev_attr.show = show_##_name; \
+- data->_name##_dev_attr.store = NULL; \
+- data->_name##_dev_attr.attr.name = #_name; \
+- data->_name##_dev_attr.attr.mode = 0400; \
++ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
++ data->_name##_kobj_attr.show = show_##_name; \
++ data->_name##_kobj_attr.store = NULL; \
++ data->_name##_kobj_attr.attr.name = #_name; \
++ data->_name##_kobj_attr.attr.mode = 0400; \
+ } while (0)
+
+ static int create_attr_group(struct uncore_data *data, char *name)
+@@ -186,21 +186,21 @@ static int create_attr_group(struct uncore_data *data, char *name)
+
+ if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
+ init_attribute_root_ro(domain_id);
+- data->uncore_attrs[index++] = &data->domain_id_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr;
+ init_attribute_root_ro(fabric_cluster_id);
+- data->uncore_attrs[index++] = &data->fabric_cluster_id_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
+ init_attribute_root_ro(package_id);
+- data->uncore_attrs[index++] = &data->package_id_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
+ }
+
+- data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr;
+- data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
+- data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
+- data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
++ data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
++ data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
++ data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
+
+ ret = uncore_read_freq(data, &freq);
+ if (!ret)
+- data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
++ data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
+
+ data->uncore_attrs[index] = NULL;
+
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+index 7afb69977c7e8c..0e5bf507e55520 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+@@ -26,14 +26,14 @@
+ * @instance_id: Unique instance id to append to directory name
+ * @name: Sysfs entry name for this instance
+ * @uncore_attr_group: Attribute group storage
+- * @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz
+- * @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz
+- * @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz
+- * @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz
+- * @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz
+- * @domain_id_dev_attr: Storage for device attribute domain_id
+- * @fabric_cluster_id_dev_attr: Storage for device attribute fabric_cluster_id
+- * @package_id_dev_attr: Storage for device attribute package_id
++ * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
++ * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
++ * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
++ * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
++ * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
++ * @domain_id_kobj_attr: Storage for kobject attribute domain_id
++ * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
++ * @package_id_kobj_attr: Storage for kobject attribute package_id
+ * @uncore_attrs: Attribute storage for group creation
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+@@ -53,14 +53,14 @@ struct uncore_data {
+ char name[32];
+
+ struct attribute_group uncore_attr_group;
+- struct device_attribute max_freq_khz_dev_attr;
+- struct device_attribute min_freq_khz_dev_attr;
+- struct device_attribute initial_max_freq_khz_dev_attr;
+- struct device_attribute initial_min_freq_khz_dev_attr;
+- struct device_attribute current_freq_khz_dev_attr;
+- struct device_attribute domain_id_dev_attr;
+- struct device_attribute fabric_cluster_id_dev_attr;
+- struct device_attribute package_id_dev_attr;
++ struct kobj_attribute max_freq_khz_kobj_attr;
++ struct kobj_attribute min_freq_khz_kobj_attr;
++ struct kobj_attribute initial_max_freq_khz_kobj_attr;
++ struct kobj_attribute initial_min_freq_khz_kobj_attr;
++ struct kobj_attribute current_freq_khz_kobj_attr;
++ struct kobj_attribute domain_id_kobj_attr;
++ struct kobj_attribute fabric_cluster_id_kobj_attr;
++ struct kobj_attribute package_id_kobj_attr;
+ struct attribute *uncore_attrs[9];
+ };
+
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+index 7d0a67f8b517a7..10502216454824 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+@@ -234,6 +234,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
+ {
+ struct intel_tpmi_plat_info *plat_info;
+ struct tpmi_uncore_struct *tpmi_uncore;
++ bool uncore_sysfs_added = false;
+ int ret, i, pkg = 0;
+ int num_resources;
+
+@@ -359,9 +360,15 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
+ }
+ /* Point to next cluster offset */
+ cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
++ uncore_sysfs_added = true;
+ }
+ }
+
++ if (!uncore_sysfs_added) {
++ ret = -ENODEV;
++ goto remove_clusters;
++ }
++
+ auxiliary_set_drvdata(auxdev, tpmi_uncore);
+
+ tpmi_uncore->root_cluster.root_domain = true;
+diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
+index 6fa1735ad7a49a..5d13452bb947a2 100644
+--- a/drivers/platform/x86/intel/vbtn.c
++++ b/drivers/platform/x86/intel/vbtn.c
+@@ -73,10 +73,10 @@ struct intel_vbtn_priv {
+ bool wakeup_mode;
+ };
+
+-static void detect_tablet_mode(struct platform_device *device)
++static void detect_tablet_mode(struct device *dev)
+ {
+- struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+- acpi_handle handle = ACPI_HANDLE(&device->dev);
++ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
++ acpi_handle handle = ACPI_HANDLE(dev);
+ unsigned long long vgbs;
+ acpi_status status;
+ int m;
+@@ -89,6 +89,8 @@ static void detect_tablet_mode(struct platform_device *device)
+ input_report_switch(priv->switches_dev, SW_TABLET_MODE, m);
+ m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0;
+ input_report_switch(priv->switches_dev, SW_DOCK, m);
++
++ input_sync(priv->switches_dev);
+ }
+
+ /*
+@@ -134,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
+ priv->switches_dev->id.bustype = BUS_HOST;
+
+ if (priv->has_switches) {
+- detect_tablet_mode(device);
+-
+ ret = input_register_device(priv->switches_dev);
+ if (ret)
+ return ret;
+@@ -314,6 +314,9 @@ static int intel_vbtn_probe(struct platform_device *device)
+ if (ACPI_FAILURE(status))
+ dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
+ }
++ // Check switches after buttons since VBDL may have side effects.
++ if (has_switches)
++ detect_tablet_mode(&device->dev);
+
+ device_init_wakeup(&device->dev, true);
+ /*
+@@ -352,7 +355,13 @@ static void intel_vbtn_pm_complete(struct device *dev)
+
+ static int intel_vbtn_pm_resume(struct device *dev)
+ {
++ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
++
+ intel_vbtn_pm_complete(dev);
++
++ if (priv->has_switches)
++ detect_tablet_mode(dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
+index c1f9e4471b28f9..343ab6a82c0177 100644
+--- a/drivers/platform/x86/intel/vsec.c
++++ b/drivers/platform/x86/intel/vsec.c
+@@ -120,6 +120,8 @@ static void intel_vsec_dev_release(struct device *dev)
+ {
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
+
++ xa_erase(&auxdev_array, intel_vsec_dev->id);
++
+ mutex_lock(&vsec_ida_lock);
+ ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
+ mutex_unlock(&vsec_ida_lock);
+@@ -135,19 +137,28 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
+ int ret, id;
+
+- mutex_lock(&vsec_ida_lock);
+- ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+- mutex_unlock(&vsec_ida_lock);
++ ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev,
++ PMT_XA_LIMIT, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+
++ mutex_lock(&vsec_ida_lock);
++ id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
++ mutex_unlock(&vsec_ida_lock);
++ if (id < 0) {
++ xa_erase(&auxdev_array, intel_vsec_dev->id);
++ kfree(intel_vsec_dev->resource);
++ kfree(intel_vsec_dev);
++ return id;
++ }
++
+ if (!parent)
+ parent = &pdev->dev;
+
+- auxdev->id = ret;
++ auxdev->id = id;
+ auxdev->name = name;
+ auxdev->dev.parent = parent;
+ auxdev->dev.release = intel_vsec_dev_release;
+@@ -169,12 +180,6 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ if (ret < 0)
+ return ret;
+
+- /* Add auxdev to list */
+- ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT,
+- GFP_KERNEL);
+- if (ret)
+- return ret;
+-
+ return 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC);
+diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
+index 0fd042c171ba07..0a6201b4a0e906 100644
+--- a/drivers/platform/x86/intel/vsec.h
++++ b/drivers/platform/x86/intel/vsec.h
+@@ -45,6 +45,7 @@ struct intel_vsec_device {
+ struct ida *ida;
+ struct intel_vsec_platform_info *info;
+ int num_resources;
++ int id; /* xa */
+ void *priv_data;
+ size_t priv_data_size;
+ };
+diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c
+index e1fbc35504d498..ef2c267ab485cd 100644
+--- a/drivers/platform/x86/lenovo-ymc.c
++++ b/drivers/platform/x86/lenovo-ymc.c
+@@ -78,6 +78,8 @@ static void lenovo_ymc_trigger_ec(struct wmi_device *wdev, struct lenovo_ymc_pri
+ }
+
+ static const struct key_entry lenovo_ymc_keymap[] = {
++ /* Ignore the uninitialized state */
++ { KE_IGNORE, 0x00 },
+ /* Laptop */
+ { KE_SW, 0x01, { .sw = { SW_TABLET_MODE, 0 } } },
+ /* Tablet */
+diff --git a/drivers/platform/x86/lenovo-yogabook.c b/drivers/platform/x86/lenovo-yogabook.c
+index b8d0239192cbf5..fd62bf746ebde4 100644
+--- a/drivers/platform/x86/lenovo-yogabook.c
++++ b/drivers/platform/x86/lenovo-yogabook.c
+@@ -435,7 +435,7 @@ static int yogabook_pdev_set_kbd_backlight(struct yogabook_data *data, u8 level)
+ .enabled = level,
+ };
+
+- pwm_apply_state(data->kbd_bl_pwm, &state);
++ pwm_apply_might_sleep(data->kbd_bl_pwm, &state);
+ gpiod_set_value(data->kbd_bl_led_enable, level ? 1 : 0);
+ return 0;
+ }
+diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
+index ad3c39e9e9f586..78c48a1f9c68a5 100644
+--- a/drivers/platform/x86/lg-laptop.c
++++ b/drivers/platform/x86/lg-laptop.c
+@@ -39,8 +39,6 @@ MODULE_LICENSE("GPL");
+ #define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
+ #define WMI_EVENT_GUID WMI_EVENT_GUID0
+
+-#define WMAB_METHOD "\\XINI.WMAB"
+-#define WMBB_METHOD "\\XINI.WMBB"
+ #define SB_GGOV_METHOD "\\_SB.GGOV"
+ #define GOV_TLED 0x2020008
+ #define WM_GET 1
+@@ -74,7 +72,7 @@ static u32 inited;
+
+ static int battery_limit_use_wmbb;
+ static struct led_classdev kbd_backlight;
+-static enum led_brightness get_kbd_backlight_level(void);
++static enum led_brightness get_kbd_backlight_level(struct device *dev);
+
+ static const struct key_entry wmi_keymap[] = {
+ {KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
+@@ -84,7 +82,6 @@ static const struct key_entry wmi_keymap[] = {
+ * this key both sends an event and
+ * changes backlight level.
+ */
+- {KE_KEY, 0x80, {KEY_RFKILL} },
+ {KE_END, 0}
+ };
+
+@@ -128,11 +125,10 @@ static int ggov(u32 arg0)
+ return res;
+ }
+
+-static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
++static union acpi_object *lg_wmab(struct device *dev, u32 method, u32 arg1, u32 arg2)
+ {
+ union acpi_object args[3];
+ acpi_status status;
+- acpi_handle handle;
+ struct acpi_object_list arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+@@ -143,29 +139,22 @@ static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
+ args[2].type = ACPI_TYPE_INTEGER;
+ args[2].integer.value = arg2;
+
+- status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
+- if (ACPI_FAILURE(status)) {
+- pr_err("Cannot get handle");
+- return NULL;
+- }
+-
+ arg.count = 3;
+ arg.pointer = args;
+
+- status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
++ status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMAB", &arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+- acpi_handle_err(handle, "WMAB: call failed.\n");
++ dev_err(dev, "WMAB: call failed.\n");
+ return NULL;
+ }
+
+ return buffer.pointer;
+ }
+
+-static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
++static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u32 arg2)
+ {
+ union acpi_object args[3];
+ acpi_status status;
+- acpi_handle handle;
+ struct acpi_object_list arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ u8 buf[32];
+@@ -181,18 +170,12 @@ static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
+ args[2].buffer.length = 32;
+ args[2].buffer.pointer = buf;
+
+- status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
+- if (ACPI_FAILURE(status)) {
+- pr_err("Cannot get handle");
+- return NULL;
+- }
+-
+ arg.count = 3;
+ arg.pointer = args;
+
+- status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
++ status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMBB", &arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+- acpi_handle_err(handle, "WMAB: call failed.\n");
++ dev_err(dev, "WMBB: call failed.\n");
+ return NULL;
+ }
+
+@@ -223,7 +206,7 @@ static void wmi_notify(u32 value, void *context)
+
+ if (eventcode == 0x10000000) {
+ led_classdev_notify_brightness_hw_changed(
+- &kbd_backlight, get_kbd_backlight_level());
++ &kbd_backlight, get_kbd_backlight_level(kbd_backlight.dev->parent));
+ } else {
+ key = sparse_keymap_entry_from_scancode(
+ wmi_input_dev, eventcode);
+@@ -272,14 +255,7 @@ static void wmi_input_setup(void)
+
+ static void acpi_notify(struct acpi_device *device, u32 event)
+ {
+- struct key_entry *key;
+-
+ acpi_handle_debug(device->handle, "notify: %d\n", event);
+- if (inited & INIT_SPARSE_KEYMAP) {
+- key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
+- if (key && key->type == KE_KEY)
+- sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
+- }
+ }
+
+ static ssize_t fan_mode_store(struct device *dev,
+@@ -295,7 +271,7 @@ static ssize_t fan_mode_store(struct device *dev,
+ if (ret)
+ return ret;
+
+- r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
++ r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+@@ -306,9 +282,9 @@ static ssize_t fan_mode_store(struct device *dev,
+
+ m = r->integer.value;
+ kfree(r);
+- r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
++ r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+ kfree(r);
+- r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
++ r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ kfree(r);
+
+ return count;
+@@ -320,7 +296,7 @@ static ssize_t fan_mode_show(struct device *dev,
+ unsigned int status;
+ union acpi_object *r;
+
+- r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
++ r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+@@ -347,7 +323,7 @@ static ssize_t usb_charge_store(struct device *dev,
+ if (ret)
+ return ret;
+
+- r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
++ r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+@@ -361,7 +337,7 @@ static ssize_t usb_charge_show(struct device *dev,
+ unsigned int status;
+ union acpi_object *r;
+
+- r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
++ r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+@@ -389,7 +365,7 @@ static ssize_t reader_mode_store(struct device *dev,
+ if (ret)
+ return ret;
+
+- r = lg_wmab(WM_READER_MODE, WM_SET, value);
++ r = lg_wmab(dev, WM_READER_MODE, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+@@ -403,7 +379,7 @@ static ssize_t reader_mode_show(struct device *dev,
+ unsigned int status;
+ union acpi_object *r;
+
+- r = lg_wmab(WM_READER_MODE, WM_GET, 0);
++ r = lg_wmab(dev, WM_READER_MODE, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+@@ -431,7 +407,7 @@ static ssize_t fn_lock_store(struct device *dev,
+ if (ret)
+ return ret;
+
+- r = lg_wmab(WM_FN_LOCK, WM_SET, value);
++ r = lg_wmab(dev, WM_FN_LOCK, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+@@ -445,7 +421,7 @@ static ssize_t fn_lock_show(struct device *dev,
+ unsigned int status;
+ union acpi_object *r;
+
+- r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
++ r = lg_wmab(dev, WM_FN_LOCK, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+@@ -475,9 +451,9 @@ static ssize_t charge_control_end_threshold_store(struct device *dev,
+ union acpi_object *r;
+
+ if (battery_limit_use_wmbb)
+- r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
++ r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_SET, value);
+ else
+- r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
++ r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_SET, value);
+ if (!r)
+ return -EIO;
+
+@@ -496,7 +472,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
+ union acpi_object *r;
+
+ if (battery_limit_use_wmbb) {
+- r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
++ r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+@@ -507,7 +483,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
+
+ status = r->buffer.pointer[0x10];
+ } else {
+- r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
++ r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_GET, 0);
+ if (!r)
+ return -EIO;
+
+@@ -586,7 +562,7 @@ static void tpad_led_set(struct led_classdev *cdev,
+ {
+ union acpi_object *r;
+
+- r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
++ r = lg_wmab(cdev->dev->parent, WM_TLED, WM_SET, brightness > LED_OFF);
+ kfree(r);
+ }
+
+@@ -608,16 +584,16 @@ static void kbd_backlight_set(struct led_classdev *cdev,
+ val = 0;
+ if (brightness >= LED_FULL)
+ val = 0x24;
+- r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
++ r = lg_wmab(cdev->dev->parent, WM_KEY_LIGHT, WM_SET, val);
+ kfree(r);
+ }
+
+-static enum led_brightness get_kbd_backlight_level(void)
++static enum led_brightness get_kbd_backlight_level(struct device *dev)
+ {
+ union acpi_object *r;
+ int val;
+
+- r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
++ r = lg_wmab(dev, WM_KEY_LIGHT, WM_GET, 0);
+
+ if (!r)
+ return LED_OFF;
+@@ -645,7 +621,7 @@ static enum led_brightness get_kbd_backlight_level(void)
+
+ static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
+ {
+- return get_kbd_backlight_level();
++ return get_kbd_backlight_level(cdev->dev->parent);
+ }
+
+ static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
+@@ -672,6 +648,11 @@ static struct platform_driver pf_driver = {
+
+ static int acpi_add(struct acpi_device *device)
+ {
++ struct platform_device_info pdev_info = {
++ .fwnode = acpi_fwnode_handle(device),
++ .name = PLATFORM_NAME,
++ .id = PLATFORM_DEVID_NONE,
++ };
+ int ret;
+ const char *product;
+ int year = 2017;
+@@ -683,9 +664,7 @@ static int acpi_add(struct acpi_device *device)
+ if (ret)
+ return ret;
+
+- pf_device = platform_device_register_simple(PLATFORM_NAME,
+- PLATFORM_DEVID_NONE,
+- NULL, 0);
++ pf_device = platform_device_register_full(&pdev_info);
+ if (IS_ERR(pf_device)) {
+ ret = PTR_ERR(pf_device);
+ pf_device = NULL;
+@@ -736,7 +715,7 @@ static int acpi_add(struct acpi_device *device)
+ default:
+ year = 2019;
+ }
+- pr_info("product: %s year: %d\n", product, year);
++ pr_info("product: %s year: %d\n", product ?: "unknown", year);
+
+ if (year >= 2019)
+ battery_limit_use_wmbb = 1;
+@@ -776,7 +755,7 @@ static void acpi_remove(struct acpi_device *device)
+ }
+
+ static const struct acpi_device_id device_ids[] = {
+- {"LGEX0815", 0},
++ {"LGEX0820", 0},
+ {"", 0}
+ };
+ MODULE_DEVICE_TABLE(acpi, device_ids);
+diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
+index 1cf2471d54ddef..053be5c5e0cad4 100644
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -20,12 +20,29 @@
+ #define P2SBC_HIDE BIT(8)
+
+ #define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
++#define P2SB_DEVFN_GOLDMONT PCI_DEVFN(13, 0)
++#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2)
+
+ static const struct x86_cpu_id p2sb_cpu_ids[] = {
+- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
++ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
+ {}
+ };
+
++/*
++ * Cache BAR0 of P2SB device functions 0 to 7.
++ * TODO: The constant 8 is the number of functions that PCI specification
++ * defines. Same definitions exist tree-wide. Unify this definition and
++ * the other definitions then move to include/uapi/linux/pci.h.
++ */
++#define NR_P2SB_RES_CACHE 8
++
++struct p2sb_res_cache {
++ u32 bus_dev_id;
++ struct resource res;
++};
++
++static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
++
+ static int p2sb_get_devfn(unsigned int *devfn)
+ {
+ unsigned int fn = P2SB_DEVFN_DEFAULT;
+@@ -39,8 +56,13 @@ static int p2sb_get_devfn(unsigned int *devfn)
+ return 0;
+ }
+
++static bool p2sb_valid_resource(const struct resource *res)
++{
++ return res->flags & ~IORESOURCE_UNSET;
++}
++
+ /* Copy resource from the first BAR of the device in question */
+-static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
++static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+ {
+ struct resource *bar0 = &pdev->resource[0];
+
+@@ -56,49 +78,57 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+ mem->end = bar0->end;
+ mem->flags = bar0->flags;
+ mem->desc = bar0->desc;
+-
+- return 0;
+ }
+
+-static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
+ {
++ struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
+ struct pci_dev *pdev;
+- int ret;
+
+ pdev = pci_scan_single_device(bus, devfn);
+ if (!pdev)
+- return -ENODEV;
++ return;
+
+- ret = p2sb_read_bar0(pdev, mem);
++ p2sb_read_bar0(pdev, &cache->res);
++ cache->bus_dev_id = bus->dev.id;
+
+ pci_stop_and_remove_bus_device(pdev);
+- return ret;
+ }
+
+-/**
+- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+- * @bus: PCI bus to communicate with
+- * @devfn: PCI slot and function to communicate with
+- * @mem: memory resource to be filled in
+- *
+- * The BIOS prevents the P2SB device from being enumerated by the PCI
+- * subsystem, so we need to unhide and hide it back to lookup the BAR.
+- *
+- * if @bus is NULL, the bus 0 in domain 0 will be used.
+- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+- *
+- * Caller must provide a valid pointer to @mem.
+- *
+- * Locking is handled by pci_rescan_remove_lock mutex.
+- *
+- * Return:
+- * 0 on success or appropriate errno value on error.
+- */
+-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
++{
++ /* Scan the P2SB device and cache its BAR0 */
++ p2sb_scan_and_cache_devfn(bus, devfn);
++
++ /* On Goldmont p2sb_bar() also gets called for the SPI controller */
++ if (devfn == P2SB_DEVFN_GOLDMONT)
++ p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
++
++ if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
++ return -ENOENT;
++
++ return 0;
++}
++
++static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
++{
++ static struct pci_bus *p2sb_bus;
++
++ bus = bus ?: p2sb_bus;
++ if (bus)
++ return bus;
++
++ /* Assume P2SB is on the bus 0 in domain 0 */
++ p2sb_bus = pci_find_bus(0, 0);
++ return p2sb_bus;
++}
++
++static int p2sb_cache_resources(void)
+ {
+- struct pci_dev *pdev_p2sb;
+ unsigned int devfn_p2sb;
+ u32 value = P2SBC_HIDE;
++ struct pci_bus *bus;
++ u16 class;
+ int ret;
+
+ /* Get devfn for P2SB device itself */
+@@ -106,8 +136,17 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ if (ret)
+ return ret;
+
+- /* if @bus is NULL, use bus 0 in domain 0 */
+- bus = bus ?: pci_find_bus(0, 0);
++ bus = p2sb_get_bus(NULL);
++ if (!bus)
++ return -ENODEV;
++
++ /*
++ * When a device with same devfn exists and its device class is not
++ * PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
++ */
++ pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
++ if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
++ return -ENODEV;
+
+ /*
+ * Prevent concurrent PCI bus scan from seeing the P2SB device and
+@@ -115,17 +154,16 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+ */
+ pci_lock_rescan_remove();
+
+- /* Unhide the P2SB device, if needed */
++ /*
++ * The BIOS prevents the P2SB device from being enumerated by the PCI
++ * subsystem, so we need to unhide and hide it back to lookup the BAR.
++ * Unhide the P2SB device here, if needed.
++ */
+ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+ if (value & P2SBC_HIDE)
+ pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+
+- pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
+- if (devfn)
+- ret = p2sb_scan_and_read(bus, devfn, mem);
+- else
+- ret = p2sb_read_bar0(pdev_p2sb, mem);
+- pci_stop_and_remove_bus_device(pdev_p2sb);
++ ret = p2sb_scan_and_cache(bus, devfn_p2sb);
+
+ /* Hide the P2SB device, if it was hidden */
+ if (value & P2SBC_HIDE)
+@@ -133,12 +171,66 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+
+ pci_unlock_rescan_remove();
+
+- if (ret)
+- return ret;
++ return ret;
++}
+
+- if (mem->flags == 0)
++/**
++ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
++ * @bus: PCI bus to communicate with
++ * @devfn: PCI slot and function to communicate with
++ * @mem: memory resource to be filled in
++ *
++ * If @bus is NULL, the bus 0 in domain 0 will be used.
++ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
++ *
++ * Caller must provide a valid pointer to @mem.
++ *
++ * Return:
++ * 0 on success or appropriate errno value on error.
++ */
++int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++{
++ struct p2sb_res_cache *cache;
++ int ret;
++
++ bus = p2sb_get_bus(bus);
++ if (!bus)
++ return -ENODEV;
++
++ if (!devfn) {
++ ret = p2sb_get_devfn(&devfn);
++ if (ret)
++ return ret;
++ }
++
++ cache = &p2sb_resources[PCI_FUNC(devfn)];
++ if (cache->bus_dev_id != bus->dev.id)
+ return -ENODEV;
+
++ if (!p2sb_valid_resource(&cache->res))
++ return -ENOENT;
++
++ memcpy(mem, &cache->res, sizeof(*mem));
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(p2sb_bar);
++
++static int __init p2sb_fs_init(void)
++{
++ return p2sb_cache_resources();
++}
++
++/*
++ * pci_rescan_remove_lock() can not be locked in sysfs PCI bus rescan path
++ * because of deadlock. To avoid the deadlock, access P2SB devices with the lock
++ * at an early step in kernel initialization and cache required resources.
++ *
++ * We want to run as early as possible. If the P2SB was assigned a bad BAR,
++ * we'll need to wait on pcibios_assign_resources() to fix it. So, our list of
++ * initcall dependencies looks something like this:
++ *
++ * ...
++ * subsys_initcall (pci_subsys_init)
++ * fs_initcall (pcibios_assign_resources)
++ */
++fs_initcall_sync(p2sb_fs_init);
+diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
+index cf845ee1c7b1f0..ebd81846e2d564 100644
+--- a/drivers/platform/x86/panasonic-laptop.c
++++ b/drivers/platform/x86/panasonic-laptop.c
+@@ -337,7 +337,8 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc)
+ }
+
+ if (pcc->num_sifr < hkey->package.count) {
+- pr_err("SQTY reports bad SINF length\n");
++ pr_err("SQTY reports bad SINF length SQTY: %lu SINF-pkg-count: %u\n",
++ pcc->num_sifr, hkey->package.count);
+ status = AE_ERROR;
+ goto end;
+ }
+@@ -773,6 +774,24 @@ static DEVICE_ATTR_RW(dc_brightness);
+ static DEVICE_ATTR_RW(current_brightness);
+ static DEVICE_ATTR_RW(cdpower);
+
++static umode_t pcc_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
++{
++ struct device *dev = kobj_to_dev(kobj);
++ struct acpi_device *acpi = to_acpi_device(dev);
++ struct pcc_acpi *pcc = acpi_driver_data(acpi);
++
++ if (attr == &dev_attr_mute.attr)
++ return (pcc->num_sifr > SINF_MUTE) ? attr->mode : 0;
++
++ if (attr == &dev_attr_eco_mode.attr)
++ return (pcc->num_sifr > SINF_ECO_MODE) ? attr->mode : 0;
++
++ if (attr == &dev_attr_current_brightness.attr)
++ return (pcc->num_sifr > SINF_CUR_BRIGHT) ? attr->mode : 0;
++
++ return attr->mode;
++}
++
+ static struct attribute *pcc_sysfs_entries[] = {
+ &dev_attr_numbatt.attr,
+ &dev_attr_lcdtype.attr,
+@@ -787,8 +806,9 @@ static struct attribute *pcc_sysfs_entries[] = {
+ };
+
+ static const struct attribute_group pcc_attr_group = {
+- .name = NULL, /* put in device directory */
+- .attrs = pcc_sysfs_entries,
++ .name = NULL, /* put in device directory */
++ .attrs = pcc_sysfs_entries,
++ .is_visible = pcc_sysfs_is_visible,
+ };
+
+
+@@ -941,12 +961,15 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
+ if (!pcc)
+ return -EINVAL;
+
+- acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
+- acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
++ if (pcc->num_sifr > SINF_MUTE)
++ acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
++ if (pcc->num_sifr > SINF_ECO_MODE)
++ acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
+ acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_key);
+ acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, pcc->ac_brightness);
+ acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, pcc->dc_brightness);
+- acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
++ if (pcc->num_sifr > SINF_CUR_BRIGHT)
++ acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
+
+ return 0;
+ }
+@@ -963,11 +986,21 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
+
+ num_sifr = acpi_pcc_get_sqty(device);
+
+- if (num_sifr < 0 || num_sifr > 255) {
+- pr_err("num_sifr out of range");
++ /*
++ * pcc->sinf is expected to at least have the AC+DC brightness entries.
++ * Accesses to higher SINF entries are checked against num_sifr.
++ */
++ if (num_sifr <= SINF_DC_CUR_BRIGHT || num_sifr > 255) {
++ pr_err("num_sifr %d out of range %d - 255\n", num_sifr, SINF_DC_CUR_BRIGHT + 1);
+ return -ENODEV;
+ }
+
++ /*
++ * Some DSDT-s have an off-by-one bug where the SINF package count is
++ * one higher than the SQTY reported value, allocate 1 entry extra.
++ */
++ num_sifr++;
++
+ pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
+ if (!pcc) {
+ pr_err("Couldn't allocate mem for pcc");
+@@ -1020,11 +1053,14 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
+ acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
+ pcc->sticky_key = 0;
+
+- pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
+- pcc->mute = pcc->sinf[SINF_MUTE];
+ pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
+- pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
++ if (pcc->num_sifr > SINF_MUTE)
++ pcc->mute = pcc->sinf[SINF_MUTE];
++ if (pcc->num_sifr > SINF_ECO_MODE)
++ pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
++ if (pcc->num_sifr > SINF_CUR_BRIGHT)
++ pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+
+ /* add sysfs attributes */
+ result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index aee869769843f5..2396decdb3cb3f 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -1021,7 +1021,16 @@ static ssize_t current_value_store(struct kobject *kobj,
+ * Note - this sets the variable and then the password as separate
+ * WMI calls. Function tlmi_save_bios_settings will error if the
+ * password is incorrect.
++ * Workstation's require the opcode to be set before changing the
++ * attribute.
+ */
++ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
++ ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
++ tlmi_priv.pwd_admin->password);
++ if (ret)
++ goto out;
++ }
++
+ set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
+ new_setting);
+ if (!set_str) {
+@@ -1033,13 +1042,6 @@ static ssize_t current_value_store(struct kobject *kobj,
+ if (ret)
+ goto out;
+
+- if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+- ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
+- tlmi_priv.pwd_admin->password);
+- if (ret)
+- goto out;
+- }
+-
+ ret = tlmi_save_bios_settings("");
+ } else { /* old non-opcode based authentication method (deprecated) */
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 41584427dc323b..5b1f08eabd9232 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -3042,10 +3042,9 @@ static void tpacpi_send_radiosw_update(void)
+
+ static void hotkey_exit(void)
+ {
+-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ mutex_lock(&hotkey_mutex);
++#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_poll_stop_sync();
+- mutex_unlock(&hotkey_mutex);
+ #endif
+ dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
+ "restoring original HKEY status and mask\n");
+@@ -3055,6 +3054,8 @@ static void hotkey_exit(void)
+ hotkey_mask_set(hotkey_orig_mask)) |
+ hotkey_status_set(false)) != 0)
+ pr_err("failed to restore hot key mask to BIOS defaults\n");
++
++ mutex_unlock(&hotkey_mutex);
+ }
+
+ static void __init hotkey_unmap(const unsigned int scancode)
+@@ -7948,8 +7949,19 @@ static struct ibm_struct volume_driver_data = {
+ * TPACPI_FAN_WR_TPEC is also available and should be used to
+ * command the fan. The X31/X40/X41 seems to have 8 fan levels,
+ * but the ACPI tables just mention level 7.
++ *
++ * TPACPI_FAN_RD_TPEC_NS:
++ * This mode is used for a few ThinkPads (L13 Yoga Gen2, X13 Yoga Gen2 etc.)
++ * that are using non-standard EC locations for reporting fan speeds.
++ * Currently these platforms only provide fan rpm reporting.
++ *
+ */
+
++#define FAN_RPM_CAL_CONST 491520 /* FAN RPM calculation offset for some non-standard ECFW */
++
++#define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */
++#define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */
++
+ enum { /* Fan control constants */
+ fan_status_offset = 0x2f, /* EC register 0x2f */
+ fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
+@@ -7957,6 +7969,11 @@ enum { /* Fan control constants */
+ fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M)
+ bit 0 selects which fan is active */
+
++ fan_status_offset_ns = 0x93, /* Special status/control offset for non-standard EC Fan1 */
++ fan2_status_offset_ns = 0x96, /* Special status/control offset for non-standard EC Fan2 */
++ fan_rpm_status_ns = 0x95, /* Special offset for Fan1 RPM status for non-standard EC */
++ fan2_rpm_status_ns = 0x98, /* Special offset for Fan2 RPM status for non-standard EC */
++
+ TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
+ TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
+
+@@ -7967,6 +7984,7 @@ enum fan_status_access_mode {
+ TPACPI_FAN_NONE = 0, /* No fan status or control */
+ TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */
+ TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */
++ TPACPI_FAN_RD_TPEC_NS, /* Use non-standard ACPI EC regs (eg: L13 Yoga gen2 etc.) */
+ };
+
+ enum fan_control_access_mode {
+@@ -7994,6 +8012,8 @@ static u8 fan_control_desired_level;
+ static u8 fan_control_resume_level;
+ static int fan_watchdog_maxinterval;
+
++static bool fan_with_ns_addr;
++
+ static struct mutex fan_mutex;
+
+ static void fan_watchdog_fire(struct work_struct *ignored);
+@@ -8123,6 +8143,15 @@ static int fan_get_status(u8 *status)
+ }
+
+ break;
++ case TPACPI_FAN_RD_TPEC_NS:
++ /* Default mode is AUTO which means controlled by EC */
++ if (!acpi_ec_read(fan_status_offset_ns, &s))
++ return -EIO;
++
++ if (status)
++ *status = s;
++
++ break;
+
+ default:
+ return -ENXIO;
+@@ -8139,7 +8168,8 @@ static int fan_get_status_safe(u8 *status)
+ if (mutex_lock_killable(&fan_mutex))
+ return -ERESTARTSYS;
+ rc = fan_get_status(&s);
+- if (!rc)
++ /* NS EC doesn't have register with level settings */
++ if (!rc && !fan_with_ns_addr)
+ fan_update_desired_level(s);
+ mutex_unlock(&fan_mutex);
+
+@@ -8166,7 +8196,13 @@ static int fan_get_speed(unsigned int *speed)
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
++ break;
++ case TPACPI_FAN_RD_TPEC_NS:
++ if (!acpi_ec_read(fan_rpm_status_ns, &lo))
++ return -EIO;
+
++ if (speed)
++ *speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
+ break;
+
+ default:
+@@ -8178,7 +8214,7 @@ static int fan_get_speed(unsigned int *speed)
+
+ static int fan2_get_speed(unsigned int *speed)
+ {
+- u8 hi, lo;
++ u8 hi, lo, status;
+ bool rc;
+
+ switch (fan_status_access_mode) {
+@@ -8194,7 +8230,21 @@ static int fan2_get_speed(unsigned int *speed)
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
++ break;
+
++ case TPACPI_FAN_RD_TPEC_NS:
++ rc = !acpi_ec_read(fan2_status_offset_ns, &status);
++ if (rc)
++ return -EIO;
++ if (!(status & FAN_NS_CTRL_STATUS)) {
++ pr_info("secondary fan control not supported\n");
++ return -EIO;
++ }
++ rc = !acpi_ec_read(fan2_rpm_status_ns, &lo);
++ if (rc)
++ return -EIO;
++ if (speed)
++ *speed = lo ? FAN_RPM_CAL_CONST / lo : 0;
+ break;
+
+ default:
+@@ -8697,6 +8747,7 @@ static const struct attribute_group fan_driver_attr_group = {
+ #define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+ #define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
+ #define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
++#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */
+
+ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
+@@ -8715,6 +8766,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
+ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
+ TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
++ TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS), /* L13 Yoga Gen 2 */
++ TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS), /* X13 Yoga Gen 2*/
+ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
+ };
+
+@@ -8749,18 +8802,27 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ return -ENODEV;
+ }
+
++ if (quirks & TPACPI_FAN_NS) {
++ pr_info("ECFW with non-standard fan reg control found\n");
++ fan_with_ns_addr = 1;
++ /* Fan ctrl support from host is undefined for now */
++ tp_features.fan_ctrl_status_undef = 1;
++ }
++
+ if (gfan_handle) {
+ /* 570, 600e/x, 770e, 770x */
+ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
+ } else {
+ /* all other ThinkPads: note that even old-style
+ * ThinkPad ECs supports the fan control register */
+- if (likely(acpi_ec_read(fan_status_offset,
+- &fan_control_initial_status))) {
++ if (fan_with_ns_addr ||
++ likely(acpi_ec_read(fan_status_offset, &fan_control_initial_status))) {
+ int res;
+ unsigned int speed;
+
+- fan_status_access_mode = TPACPI_FAN_RD_TPEC;
++ fan_status_access_mode = fan_with_ns_addr ?
++ TPACPI_FAN_RD_TPEC_NS : TPACPI_FAN_RD_TPEC;
++
+ if (quirks & TPACPI_FAN_Q1)
+ fan_quirk1_setup();
+ /* Try and probe the 2nd fan */
+@@ -8769,7 +8831,8 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ if (res >= 0 && speed != FAN_NOT_PRESENT) {
+ /* It responded - so let's assume it's there */
+ tp_features.second_fan = 1;
+- tp_features.second_fan_ctl = 1;
++ /* fan control not currently available for ns ECFW */
++ tp_features.second_fan_ctl = !fan_with_ns_addr;
+ pr_info("secondary fan control detected & enabled\n");
+ } else {
+ /* Fan not auto-detected */
+@@ -8944,6 +9007,7 @@ static int fan_read(struct seq_file *m)
+ str_enabled_disabled(status), status);
+ break;
+
++ case TPACPI_FAN_RD_TPEC_NS:
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ rc = fan_get_status_safe(&status);
+@@ -8958,13 +9022,22 @@ static int fan_read(struct seq_file *m)
+
+ seq_printf(m, "speed:\t\t%d\n", speed);
+
+- if (status & TP_EC_FAN_FULLSPEED)
+- /* Disengaged mode takes precedence */
+- seq_printf(m, "level:\t\tdisengaged\n");
+- else if (status & TP_EC_FAN_AUTO)
+- seq_printf(m, "level:\t\tauto\n");
+- else
+- seq_printf(m, "level:\t\t%d\n", status);
++ if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) {
++ /*
++ * No full speed bit in NS EC
++ * EC Auto mode is set by default.
++ * No other levels settings available
++ */
++ seq_printf(m, "level:\t\t%s\n", status & FAN_NS_CTRL ? "unknown" : "auto");
++ } else {
++ if (status & TP_EC_FAN_FULLSPEED)
++ /* Disengaged mode takes precedence */
++ seq_printf(m, "level:\t\tdisengaged\n");
++ else if (status & TP_EC_FAN_AUTO)
++ seq_printf(m, "level:\t\tauto\n");
++ else
++ seq_printf(m, "level:\t\t%d\n", status);
++ }
+ break;
+
+ case TPACPI_FAN_NONE:
+@@ -9816,6 +9889,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ * Individual addressing is broken on models that expose the
+ * primary battery as BAT1.
+ */
++ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
+ TPACPI_Q_LNV('J', '7', true), /* B5400 */
+ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
+ TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+@@ -10235,6 +10309,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
+ return 0;
+ default:
+ /* Unknown function */
++ pr_debug("unknown function 0x%x\n", funcmode);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+@@ -10420,8 +10495,8 @@ static void dytc_profile_refresh(void)
+ return;
+
+ perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+- convert_dytc_to_profile(funcmode, perfmode, &profile);
+- if (profile != dytc_current_profile) {
++ err = convert_dytc_to_profile(funcmode, perfmode, &profile);
++ if (!err && profile != dytc_current_profile) {
+ dytc_current_profile = profile;
+ platform_profile_notify();
+ }
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 291f14ef67024a..26991b2f7ae91e 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -57,6 +57,11 @@ module_param(turn_on_panel_on_resume, int, 0644);
+ MODULE_PARM_DESC(turn_on_panel_on_resume,
+ "Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes");
+
++static int hci_hotkey_quickstart = -1;
++module_param(hci_hotkey_quickstart, int, 0644);
++MODULE_PARM_DESC(hci_hotkey_quickstart,
++ "Call HCI_HOTKEY_EVENT with value 0x5 for quickstart button support (-1 = auto, 0 = no, 1 = yes");
++
+ #define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+
+ /* Scan code for Fn key on TOS1900 models */
+@@ -136,6 +141,7 @@ MODULE_PARM_DESC(turn_on_panel_on_resume,
+ #define HCI_ACCEL_MASK 0x7fff
+ #define HCI_ACCEL_DIRECTION_MASK 0x8000
+ #define HCI_HOTKEY_DISABLE 0x0b
++#define HCI_HOTKEY_ENABLE_QUICKSTART 0x05
+ #define HCI_HOTKEY_ENABLE 0x09
+ #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
+ #define HCI_LCD_BRIGHTNESS_BITS 3
+@@ -2730,10 +2736,15 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
+ return -ENODEV;
+
+ /*
++ * Enable quickstart buttons if supported.
++ *
+ * Enable the "Special Functions" mode only if they are
+ * supported and if they are activated.
+ */
+- if (dev->kbd_function_keys_supported && dev->special_functions)
++ if (hci_hotkey_quickstart)
++ result = hci_write(dev, HCI_HOTKEY_EVENT,
++ HCI_HOTKEY_ENABLE_QUICKSTART);
++ else if (dev->kbd_function_keys_supported && dev->special_functions)
+ result = hci_write(dev, HCI_HOTKEY_EVENT,
+ HCI_HOTKEY_SPECIAL_FUNCTIONS);
+ else
+@@ -3257,7 +3268,14 @@ static const char *find_hci_method(acpi_handle handle)
+ * works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing
+ * the configured brightness level.
+ */
+-static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
++#define QUIRK_TURN_ON_PANEL_ON_RESUME BIT(0)
++/*
++ * Some Toshibas use "quickstart" keys. On these, HCI_HOTKEY_EVENT must use
++ * the value HCI_HOTKEY_ENABLE_QUICKSTART.
++ */
++#define QUIRK_HCI_HOTKEY_QUICKSTART BIT(1)
++
++static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
+ {
+ /* Toshiba Portégé R700 */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+@@ -3265,6 +3283,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
+ },
++ .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
+ },
+ {
+ /* Toshiba Satellite/Portégé R830 */
+@@ -3274,6 +3293,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
+ },
++ .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
+ },
+ {
+ /* Toshiba Satellite/Portégé Z830 */
+@@ -3281,7 +3301,9 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
+ },
++ .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
+ },
++ { }
+ };
+
+ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+@@ -3441,9 +3463,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ }
+ #endif
+
+- if (turn_on_panel_on_resume == -1)
+- turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids);
+-
+ toshiba_wwan_available(dev);
+ if (dev->wwan_supported)
+ toshiba_acpi_setup_wwan_rfkill(dev);
+@@ -3592,10 +3611,27 @@ static struct acpi_driver toshiba_acpi_driver = {
+ .drv.pm = &toshiba_acpi_pm,
+ };
+
++static void __init toshiba_dmi_init(void)
++{
++ const struct dmi_system_id *dmi_id;
++ long quirks = 0;
++
++ dmi_id = dmi_first_match(toshiba_dmi_quirks);
++ if (dmi_id)
++ quirks = (long)dmi_id->driver_data;
++
++ if (turn_on_panel_on_resume == -1)
++ turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
++
++ if (hci_hotkey_quickstart == -1)
++ hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
++}
++
+ static int __init toshiba_acpi_init(void)
+ {
+ int ret;
+
++ toshiba_dmi_init();
+ toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
+ if (!toshiba_proc_dir) {
+ pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 0c673377269840..30c05a9948319d 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -81,7 +81,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
+ };
+
+ static const struct ts_dmi_data chuwi_hi8_air_data = {
+- .acpi_name = "MSSL1680:00",
++ .acpi_name = "MSSL1680",
+ .properties = chuwi_hi8_air_props,
+ };
+
+@@ -885,6 +885,21 @@ static const struct ts_dmi_data rwc_nanote_p8_data = {
+ .properties = rwc_nanote_p8_props,
+ };
+
++static const struct property_entry rwc_nanote_next_props[] = {
++ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
++ PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1785),
++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1145),
++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
++ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-next.fw"),
++ { }
++};
++
++static const struct ts_dmi_data rwc_nanote_next_data = {
++ .acpi_name = "MSSL1680:00",
++ .properties = rwc_nanote_next_props,
++};
++
+ static const struct property_entry schneider_sct101ctm_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+@@ -902,6 +917,22 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
+ .properties = schneider_sct101ctm_props,
+ };
+
++static const struct property_entry globalspace_solt_ivw116_props[] = {
++ PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
++ PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
++ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"),
++ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++ PROPERTY_ENTRY_BOOL("silead,home-button"),
++ { }
++};
++
++static const struct ts_dmi_data globalspace_solt_ivw116_data = {
++ .acpi_name = "MSSL1680:00",
++ .properties = globalspace_solt_ivw116_props,
++};
++
+ static const struct property_entry techbite_arc_11_6_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
+@@ -944,6 +975,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
+ .properties = teclast_tbook11_props,
+ };
+
++static const struct property_entry teclast_x16_plus_props[] = {
++ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
++ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
++ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
++ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++ PROPERTY_ENTRY_BOOL("silead,home-button"),
++ { }
++};
++
++static const struct ts_dmi_data teclast_x16_plus_data = {
++ .embedded_fw = {
++ .name = "silead/gsl3692-teclast-x16-plus.fw",
++ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
++ .length = 43560,
++ .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
++ 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
++ 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
++ 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
++ },
++ .acpi_name = "MSSL1680:00",
++ .properties = teclast_x16_plus_props,
++};
++
+ static const struct property_entry teclast_x3_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+@@ -1196,6 +1253,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.D86JLBNR"),
+ },
+ },
++ {
++ /* Chuwi Vi8 dual-boot (CWI506) */
++ .driver_data = (void *)&chuwi_vi8_data,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
++ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
++ },
++ },
+ {
+ /* Chuwi Vi8 Plus (CWI519) */
+ .driver_data = (void *)&chuwi_vi8_plus_data,
+@@ -1355,6 +1421,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
+ },
+ },
++ {
++ /* Jumper EZpad 6s Pro */
++ .driver_data = (void *)&jumper_ezpad_6_pro_b_data,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
++ /* Above matches are too generic, add bios match */
++ DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
++ DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
++ },
++ },
+ {
+ /* Jumper EZpad 6 m4 */
+ .driver_data = (void *)&jumper_ezpad_6_m4_data,
+@@ -1586,6 +1663,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_SKU, "0001")
+ },
+ },
++ {
++ /* RWC NANOTE NEXT */
++ .driver_data = (void *)&rwc_nanote_next_data,
++ .matches = {
++ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
++ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
++ DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
++ /* Above matches are too generic, add bios-version match */
++ DMI_MATCH(DMI_BIOS_VERSION, "S8A70R100-V005"),
++ },
++ },
+ {
+ /* Schneider SCT101CTM */
+ .driver_data = (void *)&schneider_sct101ctm_data,
+@@ -1594,6 +1682,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
+ },
+ },
++ {
++ /* GlobalSpace SoLT IVW 11.6" */
++ .driver_data = (void *)&globalspace_solt_ivw116_data,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
++ DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
++ },
++ },
+ {
+ /* Techbite Arc 11.6 */
+ .driver_data = (void *)&techbite_arc_11_6_data,
+@@ -1612,6 +1709,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
+ },
+ },
++ {
++ /* Teclast X16 Plus */
++ .driver_data = (void *)&teclast_x16_plus_data,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
++ DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
++ },
++ },
+ {
+ /* Teclast X3 Plus */
+ .driver_data = (void *)&teclast_x3_plus_data,
+@@ -1786,7 +1892,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
+ int error;
+
+ if (has_acpi_companion(dev) &&
+- !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
++ strstarts(client->name, ts_data->acpi_name)) {
+ error = device_create_managed_software_node(dev, ts_data->properties, NULL);
+ if (error)
+ dev_err(dev, "failed to add properties: %d\n", error);
+diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
+index 4422863f47bbe6..01feb6e6787f2d 100644
+--- a/drivers/platform/x86/wireless-hotkey.c
++++ b/drivers/platform/x86/wireless-hotkey.c
+@@ -19,6 +19,7 @@ MODULE_AUTHOR("Alex Hung");
+ MODULE_ALIAS("acpi*:HPQ6001:*");
+ MODULE_ALIAS("acpi*:WSTADEF:*");
+ MODULE_ALIAS("acpi*:AMDI0051:*");
++MODULE_ALIAS("acpi*:LGEX0815:*");
+
+ struct wl_button {
+ struct input_dev *input_dev;
+@@ -29,6 +30,7 @@ static const struct acpi_device_id wl_ids[] = {
+ {"HPQ6001", 0},
+ {"WSTADEF", 0},
+ {"AMDI0051", 0},
++ {"LGEX0815", 0},
+ {"", 0},
+ };
+
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index a78ddd83cda02f..d75a0ae9cd0c5d 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -911,21 +911,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+ }
+ static int wmi_char_open(struct inode *inode, struct file *filp)
+ {
+- const char *driver_name = filp->f_path.dentry->d_iname;
+- struct wmi_block *wblock;
+- struct wmi_block *next;
+-
+- list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+- if (!wblock->dev.dev.driver)
+- continue;
+- if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
+- filp->private_data = wblock;
+- break;
+- }
+- }
++ /*
++ * The miscdevice already stores a pointer to itself
++ * inside filp->private_data
++ */
++ struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
+
+- if (!filp->private_data)
+- return -ENODEV;
++ filp->private_data = wblock;
+
+ return nonseekable_open(inode, filp);
+ }
+@@ -1270,8 +1262,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ struct wmi_block *wblock, *next;
+ union acpi_object *obj;
+ acpi_status status;
+- int retval = 0;
+ u32 i, total;
++ int retval;
+
+ status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
+ if (ACPI_FAILURE(status))
+@@ -1282,8 +1274,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ return -ENXIO;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+- retval = -ENXIO;
+- goto out_free_pointer;
++ kfree(obj);
++ return -ENXIO;
+ }
+
+ gblock = (const struct guid_block *)obj->buffer.pointer;
+@@ -1293,13 +1285,18 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ if (debug_dump_wdg)
+ wmi_dump_wdg(&gblock[i]);
+
++ if (!gblock[i].instance_count) {
++ dev_info(wmi_bus_dev, FW_INFO "%pUL has zero instances\n", &gblock[i].guid);
++ continue;
++ }
++
+ if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
+ continue;
+
+ wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
+ if (!wblock) {
+- retval = -ENOMEM;
+- break;
++ dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
++ continue;
+ }
+
+ wblock->acpi_device = device;
+@@ -1338,9 +1335,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ }
+ }
+
+-out_free_pointer:
+- kfree(out.pointer);
+- return retval;
++ kfree(obj);
++
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
+index 2fd6060a31bb0b..a0fa0b6859c9ca 100644
+--- a/drivers/platform/x86/x86-android-tablets/core.c
++++ b/drivers/platform/x86/x86-android-tablets/core.c
+@@ -25,6 +25,8 @@
+ #include "../../../gpio/gpiolib.h"
+ #include "../../../gpio/gpiolib-acpi.h"
+
++static struct platform_device *x86_android_tablet_device;
++
+ static int gpiochip_find_match_label(struct gpio_chip *gc, void *data)
+ {
+ return gc->label && !strcmp(gc->label, data);
+@@ -224,7 +226,7 @@ static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int
+ return ret;
+ }
+
+-static void x86_android_tablet_cleanup(void)
++static void x86_android_tablet_remove(struct platform_device *pdev)
+ {
+ int i;
+
+@@ -255,7 +257,7 @@ static void x86_android_tablet_cleanup(void)
+ software_node_unregister(bat_swnode);
+ }
+
+-static __init int x86_android_tablet_init(void)
++static __init int x86_android_tablet_probe(struct platform_device *pdev)
+ {
+ const struct x86_dev_info *dev_info;
+ const struct dmi_system_id *id;
+@@ -267,6 +269,8 @@ static __init int x86_android_tablet_init(void)
+ return -ENODEV;
+
+ dev_info = id->driver_data;
++ /* Allow x86_android_tablet_device use before probe() exits */
++ x86_android_tablet_device = pdev;
+
+ /*
+ * The broken DSDTs on these devices often also include broken
+@@ -303,7 +307,7 @@ static __init int x86_android_tablet_init(void)
+ if (dev_info->init) {
+ ret = dev_info->init();
+ if (ret < 0) {
+- x86_android_tablet_cleanup();
++ x86_android_tablet_remove(pdev);
+ return ret;
+ }
+ exit_handler = dev_info->exit;
+@@ -311,7 +315,7 @@ static __init int x86_android_tablet_init(void)
+
+ i2c_clients = kcalloc(dev_info->i2c_client_count, sizeof(*i2c_clients), GFP_KERNEL);
+ if (!i2c_clients) {
+- x86_android_tablet_cleanup();
++ x86_android_tablet_remove(pdev);
+ return -ENOMEM;
+ }
+
+@@ -319,7 +323,7 @@ static __init int x86_android_tablet_init(void)
+ for (i = 0; i < i2c_client_count; i++) {
+ ret = x86_instantiate_i2c_client(dev_info, i);
+ if (ret < 0) {
+- x86_android_tablet_cleanup();
++ x86_android_tablet_remove(pdev);
+ return ret;
+ }
+ }
+@@ -327,7 +331,7 @@ static __init int x86_android_tablet_init(void)
+ /* + 1 to make space for (optional) gpio_keys_button pdev */
+ pdevs = kcalloc(dev_info->pdev_count + 1, sizeof(*pdevs), GFP_KERNEL);
+ if (!pdevs) {
+- x86_android_tablet_cleanup();
++ x86_android_tablet_remove(pdev);
+ return -ENOMEM;
+ }
+
+@@ -335,14 +339,15 @@ static __init int x86_android_tablet_init(void)
+ for (i = 0; i < pdev_count; i++) {
+ pdevs[i] = platform_device_register_full(&dev_info->pdev_info[i]);
+ if (IS_ERR(pdevs[i])) {
+- x86_android_tablet_cleanup();
+- return PTR_ERR(pdevs[i]);
++ ret = PTR_ERR(pdevs[i]);
++ x86_android_tablet_remove(pdev);
++ return ret;
+ }
+ }
+
+ serdevs = kcalloc(dev_info->serdev_count, sizeof(*serdevs), GFP_KERNEL);
+ if (!serdevs) {
+- x86_android_tablet_cleanup();
++ x86_android_tablet_remove(pdev);
+ return -ENOMEM;
+ }
+
+@@ -350,7 +355,7 @@ static __init int x86_android_tablet_init(void)
+ for (i = 0; i < serdev_count; i++) {
+ ret = x86_instantiate_serdev(&dev_info->serdev_info[i], i);
+ if (ret < 0) {
+- x86_android_tablet_cleanup();
++ x86_android_tablet_remove(pdev);
+ return ret;
+ }
+ }
+@@ -361,7 +366,7 @@ static __init int x86_android_tablet_init(void)
+
+ buttons = kcalloc(dev_info->gpio_button_count, sizeof(*buttons), GFP_KERNEL);
+ if (!buttons) {
+- x86_android_tablet_cleanup();
++ x86_android_tablet_remove(pdev);
+ return -ENOMEM;
+ }
+
+@@ -369,7 +374,7 @@ static __init int x86_android_tablet_init(void)
+ ret = x86_android_tablet_get_gpiod(dev_info->gpio_button[i].chip,
+ dev_info->gpio_button[i].pin, &gpiod);
+ if (ret < 0) {
+- x86_android_tablet_cleanup();
++ x86_android_tablet_remove(pdev);
+ return ret;
+ }
+
+@@ -384,8 +389,9 @@ static __init int x86_android_tablet_init(void)
+ PLATFORM_DEVID_AUTO,
+ &pdata, sizeof(pdata));
+ if (IS_ERR(pdevs[pdev_count])) {
+- x86_android_tablet_cleanup();
+- return PTR_ERR(pdevs[pdev_count]);
++ ret = PTR_ERR(pdevs[pdev_count]);
++ x86_android_tablet_remove(pdev);
++ return ret;
+ }
+ pdev_count++;
+ }
+@@ -393,8 +399,29 @@ static __init int x86_android_tablet_init(void)
+ return 0;
+ }
+
++static struct platform_driver x86_android_tablet_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ },
++ .remove_new = x86_android_tablet_remove,
++};
++
++static int __init x86_android_tablet_init(void)
++{
++ x86_android_tablet_device = platform_create_bundle(&x86_android_tablet_driver,
++ x86_android_tablet_probe,
++ NULL, 0, NULL, 0);
++
++ return PTR_ERR_OR_ZERO(x86_android_tablet_device);
++}
+ module_init(x86_android_tablet_init);
+-module_exit(x86_android_tablet_cleanup);
++
++static void __exit x86_android_tablet_exit(void)
++{
++ platform_device_unregister(x86_android_tablet_device);
++ platform_driver_unregister(&x86_android_tablet_driver);
++}
++module_exit(x86_android_tablet_exit);
+
+ MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+ MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
+diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
+index 5d6c12494f082a..0c9d9caf074cb6 100644
+--- a/drivers/platform/x86/x86-android-tablets/dmi.c
++++ b/drivers/platform/x86/x86-android-tablets/dmi.c
+@@ -122,7 +122,6 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
+ /* Lenovo Yoga Tab 3 Pro YT3-X90F */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
+ .driver_data = (void *)&lenovo_yt3_info,
+diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
+index e79549c6aae174..fe5f68fa7bca76 100644
+--- a/drivers/platform/x86/x86-android-tablets/other.c
++++ b/drivers/platform/x86/x86-android-tablets/other.c
+@@ -66,7 +66,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst =
+ },
+ };
+
+-static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
++static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = {
+ .dev_id = "i2c-NVT-ts",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
+@@ -75,7 +75,7 @@ static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
+ };
+
+ static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
+- &acer_b1_750_goodix_gpios,
++ &acer_b1_750_nvt_ts_gpios,
+ &int3496_reference_gpios,
+ NULL
+ };
+diff --git a/drivers/pmdomain/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
+index cfb796d40d9d26..0dd71cd814c52f 100644
+--- a/drivers/pmdomain/amlogic/meson-ee-pwrc.c
++++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
+@@ -228,7 +228,7 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
+
+ static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
+ { G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) },
+- { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(23, 0) },
++ { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(31, 0) },
+ };
+
+ #define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \
+diff --git a/drivers/pmdomain/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
+index 1a179d4e011cfe..d2f0233cb6206d 100644
+--- a/drivers/pmdomain/bcm/bcm2835-power.c
++++ b/drivers/pmdomain/bcm/bcm2835-power.c
+@@ -175,7 +175,7 @@ static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable
+ }
+ writel(PM_PASSWORD | val, base + reg);
+
+- while (readl(base + reg) & ASB_ACK) {
++ while (!!(readl(base + reg) & ASB_ACK) == enable) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
+index 90a8b2c0676ff3..419ed15cc10c42 100644
+--- a/drivers/pmdomain/imx/gpc.c
++++ b/drivers/pmdomain/imx/gpc.c
+@@ -498,6 +498,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
+
+ pd_pdev->dev.parent = &pdev->dev;
+ pd_pdev->dev.of_node = np;
++ pd_pdev->dev.fwnode = of_fwnode_handle(np);
+
+ ret = platform_device_add(pd_pdev);
+ if (ret) {
+diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+index c6ac32c1a8c171..31693add7d633f 100644
+--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
++++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+@@ -55,7 +55,7 @@ struct imx8mp_blk_ctrl_domain_data {
+ const char *gpc_name;
+ };
+
+-#define DOMAIN_MAX_CLKS 2
++#define DOMAIN_MAX_CLKS 3
+ #define DOMAIN_MAX_PATHS 3
+
+ struct imx8mp_blk_ctrl_domain {
+@@ -457,8 +457,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
+ },
+ [IMX8MP_HDMIBLK_PD_LCDIF] = {
+ .name = "hdmiblk-lcdif",
+- .clk_names = (const char *[]){ "axi", "apb" },
+- .num_clks = 2,
++ .clk_names = (const char *[]){ "axi", "apb", "fdcc" },
++ .num_clks = 3,
+ .gpc_name = "lcdif",
+ .path_names = (const char *[]){"lcdif-hdmi"},
+ .num_paths = 1,
+@@ -483,8 +483,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
+ },
+ [IMX8MP_HDMIBLK_PD_HDMI_TX] = {
+ .name = "hdmiblk-hdmi-tx",
+- .clk_names = (const char *[]){ "apb", "ref_266m" },
+- .num_clks = 2,
++ .clk_names = (const char *[]){ "apb", "ref_266m", "fdcc" },
++ .num_clks = 3,
+ .gpc_name = "hdmi-tx",
+ },
+ [IMX8MP_HDMIBLK_PD_HDMI_TX_PHY] = {
+diff --git a/drivers/pmdomain/imx/imx93-pd.c b/drivers/pmdomain/imx/imx93-pd.c
+index b9e60d136875ae..660d00d98ecc16 100644
+--- a/drivers/pmdomain/imx/imx93-pd.c
++++ b/drivers/pmdomain/imx/imx93-pd.c
+@@ -20,6 +20,7 @@
+ #define FUNC_STAT_PSW_STAT_MASK BIT(0)
+ #define FUNC_STAT_RST_STAT_MASK BIT(2)
+ #define FUNC_STAT_ISO_STAT_MASK BIT(4)
++#define FUNC_STAT_SSAR_STAT_MASK BIT(8)
+
+ struct imx93_power_domain {
+ struct generic_pm_domain genpd;
+@@ -50,7 +51,7 @@ static int imx93_pd_on(struct generic_pm_domain *genpd)
+ writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+
+ ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+- !(val & FUNC_STAT_ISO_STAT_MASK), 1, 10000);
++ !(val & FUNC_STAT_SSAR_STAT_MASK), 1, 10000);
+ if (ret) {
+ dev_err(domain->dev, "pd_on timeout: name: %s, stat: %x\n", genpd->name, val);
+ return ret;
+@@ -72,7 +73,7 @@ static int imx93_pd_off(struct generic_pm_domain *genpd)
+ writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+
+ ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+- val & FUNC_STAT_PSW_STAT_MASK, 1, 1000);
++ val & FUNC_STAT_PSW_STAT_MASK, 1, 10000);
+ if (ret) {
+ dev_err(domain->dev, "pd_off timeout: name: %s, stat: %x\n", genpd->name, val);
+ return ret;
+diff --git a/drivers/pmdomain/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c
+index 891c1d925a9dea..368918e562f55b 100644
+--- a/drivers/pmdomain/imx/scu-pd.c
++++ b/drivers/pmdomain/imx/scu-pd.c
+@@ -223,11 +223,6 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
+ { "lvds1-pwm", IMX_SC_R_LVDS_1_PWM_0, 1, false, 0 },
+ { "lvds1-lpi2c", IMX_SC_R_LVDS_1_I2C_0, 2, true, 0 },
+
+- { "mipi1", IMX_SC_R_MIPI_1, 1, 0 },
+- { "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, 0 },
+- { "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, 1 },
+- { "lvds1", IMX_SC_R_LVDS_1, 1, 0 },
+-
+ /* DC SS */
+ { "dc0", IMX_SC_R_DC_0, 1, false, 0 },
+ { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
+index ee962804b83031..edded392950cef 100644
+--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
++++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
+@@ -508,6 +508,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
+ goto err_put_node;
+ }
+
++ /* recursive call to add all subdomains */
++ ret = scpsys_add_subdomain(scpsys, child);
++ if (ret)
++ goto err_put_node;
++
+ ret = pm_genpd_add_subdomain(parent_pd, child_pd);
+ if (ret) {
+ dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
+@@ -517,11 +522,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
+ dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
+ child_pd->name);
+ }
+-
+- /* recursive call to add all subdomains */
+- ret = scpsys_add_subdomain(scpsys, child);
+- if (ret)
+- goto err_put_node;
+ }
+
+ return 0;
+@@ -535,9 +535,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
+ {
+ int ret;
+
+- if (scpsys_domain_is_on(pd))
+- scpsys_power_off(&pd->genpd);
+-
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+@@ -547,6 +544,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
+ dev_err(pd->scpsys->dev,
+ "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
++ if (scpsys_domain_is_on(pd))
++ scpsys_power_off(&pd->genpd);
+
+ clk_bulk_put(pd->num_clks, pd->clks);
+ clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
+index a87e336d5e33b9..1bb9f70ab04c85 100644
+--- a/drivers/pmdomain/qcom/rpmhpd.c
++++ b/drivers/pmdomain/qcom/rpmhpd.c
+@@ -40,6 +40,7 @@
+ * @addr: Resource address as looped up using resource name from
+ * cmd-db
+ * @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource
++ * @skip_retention_level: Indicate that retention level should not be used for the power domain
+ */
+ struct rpmhpd {
+ struct device *dev;
+@@ -56,6 +57,7 @@ struct rpmhpd {
+ const char *res_name;
+ u32 addr;
+ bool state_synced;
++ bool skip_retention_level;
+ };
+
+ struct rpmhpd_desc {
+@@ -173,6 +175,7 @@ static struct rpmhpd mxc = {
+ .pd = { .name = "mxc", },
+ .peer = &mxc_ao,
+ .res_name = "mxc.lvl",
++ .skip_retention_level = true,
+ };
+
+ static struct rpmhpd mxc_ao = {
+@@ -180,6 +183,7 @@ static struct rpmhpd mxc_ao = {
+ .active_only = true,
+ .peer = &mxc,
+ .res_name = "mxc.lvl",
++ .skip_retention_level = true,
+ };
+
+ static struct rpmhpd nsp = {
+@@ -207,7 +211,6 @@ static struct rpmhpd *sa8540p_rpmhpds[] = {
+ [SC8280XP_CX] = &cx,
+ [SC8280XP_CX_AO] = &cx_ao,
+ [SC8280XP_EBI] = &ebi,
+- [SC8280XP_GFX] = &gfx,
+ [SC8280XP_LCX] = &lcx,
+ [SC8280XP_LMX] = &lmx,
+ [SC8280XP_MMCX] = &mmcx,
+@@ -616,6 +619,7 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
++ unsigned int peer_enabled_corner;
+
+ if (pd->state_synced) {
+ to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+@@ -625,9 +629,11 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+ this_sleep_corner = pd->level_count - 1;
+ }
+
+- if (peer && peer->enabled)
+- to_active_sleep(peer, peer->corner, &peer_active_corner,
++ if (peer && peer->enabled) {
++ peer_enabled_corner = max(peer->corner, peer->enable_corner);
++ to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
+ &peer_sleep_corner);
++ }
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+@@ -747,6 +753,9 @@ static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+ return -EINVAL;
+
+ for (i = 0; i < rpmhpd->level_count; i++) {
++ if (rpmhpd->skip_retention_level && buf[i] == RPMH_REGULATOR_LEVEL_RETENTION)
++ continue;
++
+ rpmhpd->level[i] = buf[i];
+
+ /* Remember the first corner with non-zero level */
+diff --git a/drivers/pmdomain/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c
+index 39ca84a67daadd..621e411fc9991a 100644
+--- a/drivers/pmdomain/renesas/r8a77980-sysc.c
++++ b/drivers/pmdomain/renesas/r8a77980-sysc.c
+@@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
+ PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+- { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
++ { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON,
++ PD_CPU_NOCR },
+ { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
+ { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
+ { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
+diff --git a/drivers/pmdomain/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c
+index c2feae3a634caf..b8ceb3c2b81c25 100644
+--- a/drivers/pmdomain/ti/omap_prm.c
++++ b/drivers/pmdomain/ti/omap_prm.c
+@@ -695,6 +695,8 @@ static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
+ data = prm->data;
+ name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
+ data->name);
++ if (!name)
++ return -ENOMEM;
+
+ prmd->dev = dev;
+ prmd->prm = prm;
+diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
+index 34645104fe45d3..f520228e1b6ae9 100644
+--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
++++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
+@@ -114,6 +114,18 @@ static const struct of_device_id ti_sci_pm_domain_matches[] = {
+ };
+ MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
+
++static bool ti_sci_pm_idx_exists(struct ti_sci_genpd_provider *pd_provider, u32 idx)
++{
++ struct ti_sci_pm_domain *pd;
++
++ list_for_each_entry(pd, &pd_provider->pd_list, node) {
++ if (pd->idx == idx)
++ return true;
++ }
++
++ return false;
++}
++
+ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+@@ -149,8 +161,14 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+ break;
+
+ if (args.args_count >= 1 && args.np == dev->of_node) {
+- if (args.args[0] > max_id)
++ if (args.args[0] > max_id) {
+ max_id = args.args[0];
++ } else {
++ if (ti_sci_pm_idx_exists(pd_provider, args.args[0])) {
++ index++;
++ continue;
++ }
++ }
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
+index 4f05f610391b00..c02ce0834c2cd5 100644
+--- a/drivers/pnp/pnpacpi/rsparser.c
++++ b/drivers/pnp/pnpacpi/rsparser.c
+@@ -151,13 +151,13 @@ static int vendor_resource_matches(struct pnp_dev *dev,
+ static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev,
+ struct acpi_resource_vendor_typed *vendor)
+ {
+- if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) {
+- u64 start, length;
++ struct { u64 start, length; } range;
+
+- memcpy(&start, vendor->byte_data, sizeof(start));
+- memcpy(&length, vendor->byte_data + 8, sizeof(length));
+-
+- pnp_add_mem_resource(dev, start, start + length - 1, 0);
++ if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid,
++ sizeof(range))) {
++ memcpy(&range, vendor->byte_data, sizeof(range));
++ pnp_add_mem_resource(dev, range.start, range.start +
++ range.length - 1, 0);
+ }
+ }
+
+diff --git a/drivers/power/reset/brcmstb-reboot.c b/drivers/power/reset/brcmstb-reboot.c
+index 0f2944dc935516..a04713f191a112 100644
+--- a/drivers/power/reset/brcmstb-reboot.c
++++ b/drivers/power/reset/brcmstb-reboot.c
+@@ -62,9 +62,6 @@ static int brcmstb_restart_handler(struct notifier_block *this,
+ return NOTIFY_DONE;
+ }
+
+- while (1)
+- ;
+-
+ return NOTIFY_DONE;
+ }
+
+diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
+index 6ac5c80cfda214..7520b599eb3d17 100644
+--- a/drivers/power/supply/axp20x_battery.c
++++ b/drivers/power/supply/axp20x_battery.c
+@@ -303,11 +303,11 @@ static int axp20x_battery_get_prop(struct power_supply *psy,
+ val->intval = reg & AXP209_FG_PERCENT;
+ break;
+
+- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
++ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return axp20x_batt->data->get_max_voltage(axp20x_batt,
+ &val->intval);
+
+- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
++ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ ret = regmap_read(axp20x_batt->regmap, AXP20X_V_OFF, &reg);
+ if (ret)
+ return ret;
+@@ -455,10 +455,10 @@ static int axp20x_battery_set_prop(struct power_supply *psy,
+ struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
++ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ return axp20x_set_voltage_min_design(axp20x_batt, val->intval);
+
+- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
++ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+@@ -493,8 +493,8 @@ static enum power_supply_property axp20x_battery_props[] = {
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_HEALTH,
+- POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+- POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
++ POWER_SUPPLY_PROP_VOLTAGE_MAX,
++ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CAPACITY,
+ };
+
+@@ -502,8 +502,8 @@ static int axp20x_battery_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+ {
+ return psp == POWER_SUPPLY_PROP_STATUS ||
+- psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN ||
+- psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN ||
++ psp == POWER_SUPPLY_PROP_VOLTAGE_MIN ||
++ psp == POWER_SUPPLY_PROP_VOLTAGE_MAX ||
+ psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT ||
+ psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
+ }
+diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
+index b5903193e2f96d..ac05942e4e6ac1 100644
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -178,18 +178,18 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv)
+ u8 reg_val;
+ int ret;
+
+- if (cv <= CV_4100MV) {
+- reg_val = CHRG_CCCV_CV_4100MV;
+- cv = CV_4100MV;
+- } else if (cv <= CV_4150MV) {
+- reg_val = CHRG_CCCV_CV_4150MV;
+- cv = CV_4150MV;
+- } else if (cv <= CV_4200MV) {
++ if (cv >= CV_4350MV) {
++ reg_val = CHRG_CCCV_CV_4350MV;
++ cv = CV_4350MV;
++ } else if (cv >= CV_4200MV) {
+ reg_val = CHRG_CCCV_CV_4200MV;
+ cv = CV_4200MV;
++ } else if (cv >= CV_4150MV) {
++ reg_val = CHRG_CCCV_CV_4150MV;
++ cv = CV_4150MV;
+ } else {
+- reg_val = CHRG_CCCV_CV_4350MV;
+- cv = CV_4350MV;
++ reg_val = CHRG_CCCV_CV_4100MV;
++ cv = CV_4100MV;
+ }
+
+ reg_val = reg_val << CHRG_CCCV_CV_BIT_POS;
+@@ -337,8 +337,8 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
+ }
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+- scaled_val = min(val->intval, info->max_cv);
+- scaled_val = DIV_ROUND_CLOSEST(scaled_val, 1000);
++ scaled_val = DIV_ROUND_CLOSEST(val->intval, 1000);
++ scaled_val = min(scaled_val, info->max_cv);
+ ret = axp288_charger_set_cv(info, scaled_val);
+ if (ret < 0) {
+ dev_warn(&info->pdev->dev, "set charge voltage failed\n");
+diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
+index 82d3cd5ee2f92f..c8368dae69c712 100644
+--- a/drivers/power/supply/bq256xx_charger.c
++++ b/drivers/power/supply/bq256xx_charger.c
+@@ -1574,13 +1574,16 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
+ wd_reg_val = i;
+ break;
+ }
+- if (bq->watchdog_timer > bq256xx_watchdog_time[i] &&
++ if (i + 1 < BQ256XX_NUM_WD_VAL &&
++ bq->watchdog_timer > bq256xx_watchdog_time[i] &&
+ bq->watchdog_timer < bq256xx_watchdog_time[i + 1])
+ wd_reg_val = i;
+ }
+ ret = regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_1,
+ BQ256XX_WATCHDOG_MASK, wd_reg_val <<
+ BQ256XX_WDT_BIT_SHIFT);
++ if (ret)
++ return ret;
+
+ ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ if (ret == -ENOMEM)
+diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
+index 9b5475590518fb..886e0a8e2abd1e 100644
+--- a/drivers/power/supply/bq27xxx_battery_i2c.c
++++ b/drivers/power/supply/bq27xxx_battery_i2c.c
+@@ -209,7 +209,9 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
+ {
+ struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+
+- free_irq(client->irq, di);
++ if (client->irq)
++ free_irq(client->irq, di);
++
+ bq27xxx_battery_teardown(di);
+
+ mutex_lock(&battery_mutex);
+diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
+index b6c96376776a9a..8008e31c0c0987 100644
+--- a/drivers/power/supply/cros_usbpd-charger.c
++++ b/drivers/power/supply/cros_usbpd-charger.c
+@@ -5,6 +5,7 @@
+ * Copyright (c) 2014 - 2018 Google, Inc
+ */
+
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/platform_data/cros_ec_commands.h>
+ #include <linux/platform_data/cros_ec_proto.h>
+@@ -711,16 +712,22 @@ static int cros_usbpd_charger_resume(struct device *dev)
+ static SIMPLE_DEV_PM_OPS(cros_usbpd_charger_pm_ops, NULL,
+ cros_usbpd_charger_resume);
+
++static const struct platform_device_id cros_usbpd_charger_id[] = {
++ { DRV_NAME, 0 },
++ {}
++};
++MODULE_DEVICE_TABLE(platform, cros_usbpd_charger_id);
++
+ static struct platform_driver cros_usbpd_charger_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_usbpd_charger_pm_ops,
+ },
+- .probe = cros_usbpd_charger_probe
++ .probe = cros_usbpd_charger_probe,
++ .id_table = cros_usbpd_charger_id,
+ };
+
+ module_platform_driver(cros_usbpd_charger_driver);
+
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("ChromeOS EC USBPD charger");
+-MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
+index bb29e9ebd24a8e..99f3ccdc30a6a7 100644
+--- a/drivers/power/supply/cw2015_battery.c
++++ b/drivers/power/supply/cw2015_battery.c
+@@ -491,7 +491,7 @@ static int cw_battery_get_property(struct power_supply *psy,
+
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ if (cw_battery_valid_time_to_empty(cw_bat))
+- val->intval = cw_bat->time_to_empty;
++ val->intval = cw_bat->time_to_empty * 60;
+ else
+ val->intval = 0;
+ break;
+diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
+index 17ac2ab78c4e4c..ab97dd7ca5cb69 100644
+--- a/drivers/power/supply/max17042_battery.c
++++ b/drivers/power/supply/max17042_battery.c
+@@ -853,7 +853,10 @@ static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
+ /* program interrupt thresholds such that we should
+ * get interrupt for every 'off' perc change in the soc
+ */
+- regmap_read(map, MAX17042_RepSOC, &soc);
++ if (chip->pdata->enable_current_sense)
++ regmap_read(map, MAX17042_RepSOC, &soc);
++ else
++ regmap_read(map, MAX17042_VFSOC, &soc);
+ soc >>= 8;
+ soc_tr = (soc + off) << 8;
+ if (off < soc)
+diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
+index 1305cba61edd4b..aca123783efccd 100644
+--- a/drivers/power/supply/mt6360_charger.c
++++ b/drivers/power/supply/mt6360_charger.c
+@@ -588,7 +588,7 @@ static const struct regulator_ops mt6360_chg_otg_ops = {
+ };
+
+ static const struct regulator_desc mt6360_otg_rdesc = {
+- .of_match = "usb-otg-vbus",
++ .of_match = "usb-otg-vbus-regulator",
+ .name = "usb-otg-vbus",
+ .ops = &mt6360_chg_otg_ops,
+ .owner = THIS_MODULE,
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 0b69fb7bafd85d..416409e2fd6da2 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -29,7 +29,7 @@
+ struct class *power_supply_class;
+ EXPORT_SYMBOL_GPL(power_supply_class);
+
+-ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
++BLOCKING_NOTIFIER_HEAD(power_supply_notifier);
+ EXPORT_SYMBOL_GPL(power_supply_notifier);
+
+ static struct device_type power_supply_dev_type;
+@@ -97,7 +97,7 @@ static void power_supply_changed_work(struct work_struct *work)
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
+ power_supply_update_leds(psy);
+- atomic_notifier_call_chain(&power_supply_notifier,
++ blocking_notifier_call_chain(&power_supply_notifier,
+ PSY_EVENT_PROP_CHANGED, psy);
+ kobject_uevent(&psy->dev.kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+@@ -1262,13 +1262,13 @@ static void power_supply_dev_release(struct device *dev)
+
+ int power_supply_reg_notifier(struct notifier_block *nb)
+ {
+- return atomic_notifier_chain_register(&power_supply_notifier, nb);
++ return blocking_notifier_chain_register(&power_supply_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
+
+ void power_supply_unreg_notifier(struct notifier_block *nb)
+ {
+- atomic_notifier_chain_unregister(&power_supply_notifier, nb);
++ blocking_notifier_chain_unregister(&power_supply_notifier, nb);
+ }
+ EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
+
+diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
+index c97893d4c25eb1..6465b5e4a3879c 100644
+--- a/drivers/power/supply/power_supply_hwmon.c
++++ b/drivers/power/supply/power_supply_hwmon.c
+@@ -299,7 +299,8 @@ static const struct hwmon_channel_info * const power_supply_hwmon_info[] = {
+ HWMON_T_INPUT |
+ HWMON_T_MAX |
+ HWMON_T_MIN |
+- HWMON_T_MIN_ALARM,
++ HWMON_T_MIN_ALARM |
++ HWMON_T_MAX_ALARM,
+
+ HWMON_T_LABEL |
+ HWMON_T_INPUT |
+diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
+index ec163d1bcd1891..5b3681b9100c1e 100644
+--- a/drivers/power/supply/qcom_battmgr.c
++++ b/drivers/power/supply/qcom_battmgr.c
+@@ -486,7 +486,7 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
+ int ret;
+
+ if (!battmgr->service_up)
+- return -ENODEV;
++ return -EAGAIN;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+@@ -683,7 +683,7 @@ static int qcom_battmgr_ac_get_property(struct power_supply *psy,
+ int ret;
+
+ if (!battmgr->service_up)
+- return -ENODEV;
++ return -EAGAIN;
+
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+ if (ret)
+@@ -748,7 +748,7 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy,
+ int ret;
+
+ if (!battmgr->service_up)
+- return -ENODEV;
++ return -EAGAIN;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+@@ -867,7 +867,7 @@ static int qcom_battmgr_wls_get_property(struct power_supply *psy,
+ int ret;
+
+ if (!battmgr->service_up)
+- return -ENODEV;
++ return -EAGAIN;
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
+@@ -1384,12 +1384,16 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev,
+ "failed to register wireless charing power supply\n");
+ }
+
+- battmgr->client = devm_pmic_glink_register_client(dev,
+- PMIC_GLINK_OWNER_BATTMGR,
+- qcom_battmgr_callback,
+- qcom_battmgr_pdr_notify,
+- battmgr);
+- return PTR_ERR_OR_ZERO(battmgr->client);
++ battmgr->client = devm_pmic_glink_client_alloc(dev, PMIC_GLINK_OWNER_BATTMGR,
++ qcom_battmgr_callback,
++ qcom_battmgr_pdr_notify,
++ battmgr);
++ if (IS_ERR(battmgr->client))
++ return PTR_ERR(battmgr->client);
++
++ pmic_glink_client_register(battmgr->client);
++
++ return 0;
+ }
+
+ static const struct auxiliary_device_id qcom_battmgr_id_table[] = {
+diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
+index 10f4dd0caca177..22c7c0e7c52286 100644
+--- a/drivers/power/supply/qcom_pmi8998_charger.c
++++ b/drivers/power/supply/qcom_pmi8998_charger.c
+@@ -973,10 +973,14 @@ static int smb2_probe(struct platform_device *pdev)
+ supply_config.of_node = pdev->dev.of_node;
+
+ desc = devm_kzalloc(chip->dev, sizeof(smb2_psy_desc), GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
+ memcpy(desc, &smb2_psy_desc, sizeof(smb2_psy_desc));
+ desc->name =
+ devm_kasprintf(chip->dev, GFP_KERNEL, "%s-charger",
+ (const char *)device_get_match_data(chip->dev));
++ if (!desc->name)
++ return -ENOMEM;
+
+ chip->chg_psy =
+ devm_power_supply_register(chip->dev, desc, &supply_config);
+diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
+index c345a77f9f78c0..e4dbacd50a437d 100644
+--- a/drivers/power/supply/rt9455_charger.c
++++ b/drivers/power/supply/rt9455_charger.c
+@@ -192,6 +192,7 @@ static const int rt9455_voreg_values[] = {
+ 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000
+ };
+
++#if IS_ENABLED(CONFIG_USB_PHY)
+ /*
+ * When the charger is in boost mode, REG02[7:2] represent boost output
+ * voltage.
+@@ -207,6 +208,7 @@ static const int rt9455_boost_voltage_values[] = {
+ 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
+ 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
+ };
++#endif
+
+ /* REG07[3:0] (VMREG) in uV */
+ static const int rt9455_vmreg_values[] = {
+diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
+index 2ff7717530bf85..ae7ee611978ba1 100644
+--- a/drivers/powercap/dtpm_cpu.c
++++ b/drivers/powercap/dtpm_cpu.c
+@@ -24,7 +24,6 @@
+ #include <linux/of.h>
+ #include <linux/pm_qos.h>
+ #include <linux/slab.h>
+-#include <linux/units.h>
+
+ struct dtpm_cpu {
+ struct dtpm dtpm;
+@@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ if (pd->table[i].frequency < freq)
+ continue;
+
+- return scale_pd_power_uw(pd_mask, pd->table[i].power *
+- MICROWATT_PER_MILLIWATT);
++ return scale_pd_power_uw(pd_mask, pd->table[i].power);
+ }
+
+ return 0;
+@@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
+ nr_cpus = cpumask_weight(&cpus);
+
+ dtpm->power_min = em->table[0].power;
+- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+ dtpm->power_min *= nr_cpus;
+
+ dtpm->power_max = em->table[em->nr_perf_states - 1].power;
+- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+ dtpm->power_max *= nr_cpus;
+
+ return 0;
+@@ -144,6 +140,8 @@ static void pd_release(struct dtpm *dtpm)
+ if (policy) {
+ for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
+ per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
++
++ cpufreq_cpu_put(policy);
+ }
+
+ kfree(dtpm_cpu);
+@@ -195,12 +193,16 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+ return 0;
+
+ pd = em_cpu_get(cpu);
+- if (!pd || em_is_artificial(pd))
+- return -EINVAL;
++ if (!pd || em_is_artificial(pd)) {
++ ret = -EINVAL;
++ goto release_policy;
++ }
+
+ dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
+- if (!dtpm_cpu)
+- return -ENOMEM;
++ if (!dtpm_cpu) {
++ ret = -ENOMEM;
++ goto release_policy;
++ }
+
+ dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops);
+ dtpm_cpu->cpu = cpu;
+@@ -217,9 +219,10 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+ ret = freq_qos_add_request(&policy->constraints,
+ &dtpm_cpu->qos_req, FREQ_QOS_MAX,
+ pd->table[pd->nr_perf_states - 1].frequency);
+- if (ret)
++ if (ret < 0)
+ goto out_dtpm_unregister;
+
++ cpufreq_cpu_put(policy);
+ return 0;
+
+ out_dtpm_unregister:
+@@ -231,6 +234,8 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+ per_cpu(dtpm_per_cpu, cpu) = NULL;
+ kfree(dtpm_cpu);
+
++release_policy:
++ cpufreq_cpu_put(policy);
+ return ret;
+ }
+
+diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
+index 91276761a31d94..612c3b59dd5bef 100644
+--- a/drivers/powercap/dtpm_devfreq.c
++++ b/drivers/powercap/dtpm_devfreq.c
+@@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
+ struct em_perf_domain *pd = em_pd_get(dev);
+
+ dtpm->power_min = pd->table[0].power;
+- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+
+ dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
+- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+
+ return 0;
+ }
+@@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+ unsigned long freq;
+- u64 power;
+ int i;
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+-
+- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+- if (power > power_limit)
++ if (pd->table[i].power > power_limit)
+ break;
+ }
+
+@@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+
+ dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
+
+- power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
++ power_limit = pd->table[i - 1].power;
+
+ return power_limit;
+ }
+@@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ if (pd->table[i].frequency < freq)
+ continue;
+
+- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
++ power = pd->table[i].power;
+ power *= status.busy_time;
+ power >>= 10;
+
+diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
+index 40a2cc649c79b4..f1de4111e98d9d 100644
+--- a/drivers/powercap/intel_rapl_common.c
++++ b/drivers/powercap/intel_rapl_common.c
+@@ -5,6 +5,7 @@
+ */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/cleanup.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/list.h>
+@@ -737,7 +738,7 @@ static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim)
+ {
+ struct rapl_primitive_info *rpi = rp->priv->rpi;
+
+- if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi)
++ if (prim < 0 || prim >= NR_RAPL_PRIMITIVES || !rpi)
+ return NULL;
+
+ return &rpi[prim];
+@@ -759,6 +760,11 @@ static int rapl_config(struct rapl_package *rp)
+ default:
+ return -EINVAL;
+ }
++
++ /* defaults_msr can be NULL on unsupported platforms */
++ if (!rp->priv->defaults || !rp->priv->rpi)
++ return -ENODEV;
++
+ return 0;
+ }
+
+@@ -892,7 +898,7 @@ static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
+ return -EINVAL;
+
+ if (rd->rpl[pl].locked) {
+- pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
++ pr_debug("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
+ return -EACCES;
+ }
+
+@@ -1274,6 +1280,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
+
+ X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd),
+ X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd),
++ X86_MATCH_VENDOR_FAM(AMD, 0x1A, &rapl_defaults_amd),
+ X86_MATCH_VENDOR_FAM(HYGON, 0x18, &rapl_defaults_amd),
+ {}
+ };
+@@ -1499,7 +1506,7 @@ static int rapl_detect_domains(struct rapl_package *rp)
+ }
+
+ /* called from CPU hotplug notifier, hotplug lock held */
+-void rapl_remove_package(struct rapl_package *rp)
++void rapl_remove_package_cpuslocked(struct rapl_package *rp)
+ {
+ struct rapl_domain *rd, *rd_package = NULL;
+
+@@ -1528,10 +1535,18 @@ void rapl_remove_package(struct rapl_package *rp)
+ list_del(&rp->plist);
+ kfree(rp);
+ }
++EXPORT_SYMBOL_GPL(rapl_remove_package_cpuslocked);
++
++void rapl_remove_package(struct rapl_package *rp)
++{
++ guard(cpus_read_lock)();
++ rapl_remove_package_cpuslocked(rp);
++}
+ EXPORT_SYMBOL_GPL(rapl_remove_package);
+
+ /* caller to ensure CPU hotplug lock is held */
+-struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
++struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
++ bool id_is_cpu)
+ {
+ struct rapl_package *rp;
+ int uid;
+@@ -1549,10 +1564,17 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv,
+
+ return NULL;
+ }
++EXPORT_SYMBOL_GPL(rapl_find_package_domain_cpuslocked);
++
++struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
++{
++ guard(cpus_read_lock)();
++ return rapl_find_package_domain_cpuslocked(id, priv, id_is_cpu);
++}
+ EXPORT_SYMBOL_GPL(rapl_find_package_domain);
+
+ /* called from CPU hotplug notifier, hotplug lock held */
+-struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
++struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+ {
+ struct rapl_package *rp;
+ int ret;
+@@ -1598,6 +1620,13 @@ struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id
+ kfree(rp);
+ return ERR_PTR(ret);
+ }
++EXPORT_SYMBOL_GPL(rapl_add_package_cpuslocked);
++
++struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
++{
++ guard(cpus_read_lock)();
++ return rapl_add_package_cpuslocked(id, priv, id_is_cpu);
++}
+ EXPORT_SYMBOL_GPL(rapl_add_package);
+
+ static void power_limit_state_save(void)
+diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
+index 250bd41a588c7d..b4b6930cacb0b1 100644
+--- a/drivers/powercap/intel_rapl_msr.c
++++ b/drivers/powercap/intel_rapl_msr.c
+@@ -73,9 +73,9 @@ static int rapl_cpu_online(unsigned int cpu)
+ {
+ struct rapl_package *rp;
+
+- rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
++ rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
+ if (!rp) {
+- rp = rapl_add_package(cpu, rapl_msr_priv, true);
++ rp = rapl_add_package_cpuslocked(cpu, rapl_msr_priv, true);
+ if (IS_ERR(rp))
+ return PTR_ERR(rp);
+ }
+@@ -88,14 +88,14 @@ static int rapl_cpu_down_prep(unsigned int cpu)
+ struct rapl_package *rp;
+ int lead_cpu;
+
+- rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
++ rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
+ if (!rp)
+ return 0;
+
+ cpumask_clear_cpu(cpu, &rp->cpumask);
+ lead_cpu = cpumask_first(&rp->cpumask);
+ if (lead_cpu >= nr_cpu_ids)
+- rapl_remove_package(rp);
++ rapl_remove_package_cpuslocked(rp);
+ else if (rp->lead_cpu == cpu)
+ rp->lead_cpu = lead_cpu;
+ return 0;
+diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c
+index 891c90fefd8b72..1c48dba0ba96af 100644
+--- a/drivers/powercap/intel_rapl_tpmi.c
++++ b/drivers/powercap/intel_rapl_tpmi.c
+@@ -15,7 +15,8 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+
+-#define TPMI_RAPL_VERSION 1
++#define TPMI_RAPL_MAJOR_VERSION 0
++#define TPMI_RAPL_MINOR_VERSION 1
+
+ /* 1 header + 10 registers + 5 reserved. 8 bytes for each. */
+ #define TPMI_RAPL_DOMAIN_SIZE 128
+@@ -40,6 +41,7 @@ enum tpmi_rapl_register {
+ TPMI_RAPL_REG_ENERGY_STATUS,
+ TPMI_RAPL_REG_PERF_STATUS,
+ TPMI_RAPL_REG_POWER_INFO,
++ TPMI_RAPL_REG_DOMAIN_INFO,
+ TPMI_RAPL_REG_INTERRUPT,
+ TPMI_RAPL_REG_MAX = 15,
+ };
+@@ -130,6 +132,12 @@ static void trp_release(struct tpmi_rapl_package *trp)
+ mutex_unlock(&tpmi_rapl_lock);
+ }
+
++/*
++ * Bit 0 of TPMI_RAPL_REG_DOMAIN_INFO indicates if the current package is a domain
++ * root or not. Only domain root packages can enumerate System (Psys) Domain.
++ */
++#define TPMI_RAPL_DOMAIN_ROOT BIT(0)
++
+ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
+ {
+ u8 tpmi_domain_version;
+@@ -139,6 +147,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
+ enum rapl_domain_reg_id reg_id;
+ int tpmi_domain_size, tpmi_domain_flags;
+ u64 tpmi_domain_header = readq(trp->base + offset);
++ u64 tpmi_domain_info;
+
+ /* Domain Parent bits are ignored for now */
+ tpmi_domain_version = tpmi_domain_header & 0xff;
+@@ -146,11 +155,21 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
+ tpmi_domain_size = tpmi_domain_header >> 16 & 0xff;
+ tpmi_domain_flags = tpmi_domain_header >> 32 & 0xffff;
+
+- if (tpmi_domain_version != TPMI_RAPL_VERSION) {
+- pr_warn(FW_BUG "Unsupported version:%d\n", tpmi_domain_version);
++ if (tpmi_domain_version == TPMI_VERSION_INVALID) {
++ pr_warn(FW_BUG "Invalid version\n");
++ return -ENODEV;
++ }
++
++ if (TPMI_MAJOR_VERSION(tpmi_domain_version) != TPMI_RAPL_MAJOR_VERSION) {
++ pr_warn(FW_BUG "Unsupported major version:%ld\n",
++ TPMI_MAJOR_VERSION(tpmi_domain_version));
+ return -ENODEV;
+ }
+
++ if (TPMI_MINOR_VERSION(tpmi_domain_version) > TPMI_RAPL_MINOR_VERSION)
++ pr_info("Ignore: Unsupported minor version:%ld\n",
++ TPMI_MINOR_VERSION(tpmi_domain_version));
++
+ /* Domain size: in unit of 128 Bytes */
+ if (tpmi_domain_size != 1) {
+ pr_warn(FW_BUG "Invalid Domain size %d\n", tpmi_domain_size);
+@@ -169,6 +188,13 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
+ domain_type = RAPL_DOMAIN_PACKAGE;
+ break;
+ case TPMI_RAPL_DOMAIN_SYSTEM:
++ if (!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_DOMAIN_INFO))) {
++ pr_warn(FW_BUG "System domain must support Domain Info register\n");
++ return -ENODEV;
++ }
++ tpmi_domain_info = readq(trp->base + offset + TPMI_RAPL_REG_DOMAIN_INFO * 8);
++ if (!(tpmi_domain_info & TPMI_RAPL_DOMAIN_ROOT))
++ return 0;
+ domain_type = RAPL_DOMAIN_PLATFORM;
+ break;
+ case TPMI_RAPL_DOMAIN_MEMORY:
+diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
+index 42f93d4c6ee329..53e9c304ae0a7a 100644
+--- a/drivers/pps/clients/pps_parport.c
++++ b/drivers/pps/clients/pps_parport.c
+@@ -148,7 +148,10 @@ static void parport_attach(struct parport *port)
+ return;
+ }
+
+- index = ida_simple_get(&pps_client_index, 0, 0, GFP_KERNEL);
++ index = ida_alloc(&pps_client_index, GFP_KERNEL);
++ if (index < 0)
++ goto err_free_device;
++
+ memset(&pps_client_cb, 0, sizeof(pps_client_cb));
+ pps_client_cb.private = device;
+ pps_client_cb.irq_func = parport_irq;
+@@ -159,7 +162,7 @@ static void parport_attach(struct parport *port)
+ index);
+ if (!device->pardev) {
+ pr_err("couldn't register with %s\n", port->name);
+- goto err_free;
++ goto err_free_ida;
+ }
+
+ if (parport_claim_or_block(device->pardev) < 0) {
+@@ -187,8 +190,9 @@ static void parport_attach(struct parport *port)
+ parport_release(device->pardev);
+ err_unregister_dev:
+ parport_unregister_device(device->pardev);
+-err_free:
+- ida_simple_remove(&pps_client_index, index);
++err_free_ida:
++ ida_free(&pps_client_index, index);
++err_free_device:
+ kfree(device);
+ }
+
+@@ -208,7 +212,7 @@ static void parport_detach(struct parport *port)
+ pps_unregister_source(device->pps);
+ parport_release(pardev);
+ parport_unregister_device(pardev);
+- ida_simple_remove(&pps_client_index, device->index);
++ ida_free(&pps_client_index, device->index);
+ kfree(device);
+ }
+
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 362bf756e6b78b..91cc6ffa0095e0 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -84,7 +84,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ }
+
+ if (info->verify(info, pin, func, chan)) {
+- pr_err("driver cannot use function %u on pin %u\n", func, chan);
++ pr_err("driver cannot use function %u and channel %u on pin %u\n",
++ func, chan, pin);
+ return -EOPNOTSUPP;
+ }
+
+@@ -490,7 +491,8 @@ ssize_t ptp_read(struct posix_clock *pc,
+
+ for (i = 0; i < cnt; i++) {
+ event[i] = queue->buf[queue->head];
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ /* Paired with READ_ONCE() in queue_cnt() */
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ }
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 80f74e38c2da4b..9a50bfb56453c5 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -56,10 +56,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
+ dst->t.sec = seconds;
+ dst->t.nsec = remainder;
+
++ /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
+ if (!queue_free(queue))
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+
+- queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
++ WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+ }
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 75f58fc468a711..b8d4f61f14be4f 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -76,9 +76,13 @@ struct ptp_vclock {
+ * that a writer might concurrently increment the tail does not
+ * matter, since the queue remains nonempty nonetheless.
+ */
+-static inline int queue_cnt(struct timestamp_event_queue *q)
++static inline int queue_cnt(const struct timestamp_event_queue *q)
+ {
+- int cnt = q->tail - q->head;
++ /*
++ * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
++ * ptp_read(), extts_fifo_show().
++ */
++ int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
+ return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
+ }
+
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 6e4d5456a88511..aefc06ae5d0995 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -90,7 +90,8 @@ static ssize_t extts_fifo_show(struct device *dev,
+ qcnt = queue_cnt(queue);
+ if (qcnt) {
+ event = queue->buf[queue->head];
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ /* Paired with READ_ONCE() in queue_cnt() */
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+@@ -293,8 +294,7 @@ static ssize_t max_vclocks_store(struct device *dev,
+ if (max < ptp->n_vclocks)
+ goto out;
+
+- size = sizeof(int) * max;
+- vclock_index = kzalloc(size, GFP_KERNEL);
++ vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL);
+ if (!vclock_index) {
+ err = -ENOMEM;
+ goto out;
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index dc66e3405bf50b..a1a355ba238371 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -176,7 +176,7 @@ of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
+ pwm->args.period = args->args[0];
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
+
+- if (args->args_count == 2 && args->args[2] & PWM_POLARITY_INVERTED)
++ if (args->args_count == 2 && args->args[1] & PWM_POLARITY_INVERTED)
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
+
+ return pwm;
+@@ -382,8 +382,8 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ }
+ EXPORT_SYMBOL_GPL(pwm_request_from_chip);
+
+-static void pwm_apply_state_debug(struct pwm_device *pwm,
+- const struct pwm_state *state)
++static void pwm_apply_debug(struct pwm_device *pwm,
++ const struct pwm_state *state)
+ {
+ struct pwm_state *last = &pwm->last;
+ struct pwm_chip *chip = pwm->chip;
+@@ -489,11 +489,11 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
+ }
+
+ /**
+- * pwm_apply_state() - atomically apply a new state to a PWM device
++ * pwm_apply_might_sleep() - atomically apply a new state to a PWM device
+ * @pwm: PWM device
+ * @state: new state to apply
+ */
+-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
++int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state)
+ {
+ struct pwm_chip *chip;
+ int err;
+@@ -501,7 +501,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
+ /*
+ * Some lowlevel driver's implementations of .apply() make use of
+ * mutexes, also with some drivers only returning when the new
+- * configuration is active calling pwm_apply_state() from atomic context
++ * configuration is active calling pwm_apply_might_sleep() from atomic context
+ * is a bad idea. So make it explicit that calling this function might
+ * sleep.
+ */
+@@ -531,11 +531,11 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
+ * only do this after pwm->state was applied as some
+ * implementations of .get_state depend on this
+ */
+- pwm_apply_state_debug(pwm, state);
++ pwm_apply_debug(pwm, state);
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(pwm_apply_state);
++EXPORT_SYMBOL_GPL(pwm_apply_might_sleep);
+
+ /**
+ * pwm_capture() - capture and report a PWM signal
+@@ -593,7 +593,7 @@ int pwm_adjust_config(struct pwm_device *pwm)
+ state.period = pargs.period;
+ state.polarity = pargs.polarity;
+
+- return pwm_apply_state(pwm, &state);
++ return pwm_apply_might_sleep(pwm, &state);
+ }
+
+ /*
+@@ -616,7 +616,7 @@ int pwm_adjust_config(struct pwm_device *pwm)
+ state.duty_cycle = state.period - state.duty_cycle;
+ }
+
+- return pwm_apply_state(pwm, &state);
++ return pwm_apply_might_sleep(pwm, &state);
+ }
+ EXPORT_SYMBOL_GPL(pwm_adjust_config);
+
+diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
+index e271d920151e47..5f6612e1dd34dd 100644
+--- a/drivers/pwm/pwm-atmel-hlcdc.c
++++ b/drivers/pwm/pwm-atmel-hlcdc.c
+@@ -187,7 +187,7 @@ static int atmel_hlcdc_pwm_suspend(struct device *dev)
+ struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
+
+ /* Keep the periph clock enabled if the PWM is still running. */
+- if (pwm_is_enabled(&atmel->chip.pwms[0]))
++ if (!pwm_is_enabled(&atmel->chip.pwms[0]))
+ clk_disable_unprepare(atmel->hlcdc->periph_clk);
+
+ return 0;
+diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
+index c00dd37c5fbd86..06df8c6127416c 100644
+--- a/drivers/pwm/pwm-atmel-tcb.c
++++ b/drivers/pwm/pwm-atmel-tcb.c
+@@ -82,7 +82,8 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
+ tcbpwm->period = 0;
+ tcbpwm->div = 0;
+
+- spin_lock(&tcbpwmc->lock);
++ guard(spinlock)(&tcbpwmc->lock);
++
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
+ /*
+ * Get init config from Timer Counter registers if
+@@ -108,7 +109,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
+
+ cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0;
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
+- spin_unlock(&tcbpwmc->lock);
+
+ return 0;
+ }
+@@ -138,7 +138,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
+ if (tcbpwm->duty == 0)
+ polarity = !polarity;
+
+- spin_lock(&tcbpwmc->lock);
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
+
+ /* flush old setting and set the new one */
+@@ -173,8 +172,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
+ ATMEL_TC_SWTRG);
+ tcbpwmc->bkup.enabled = 0;
+ }
+-
+- spin_unlock(&tcbpwmc->lock);
+ }
+
+ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
+@@ -195,7 +192,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
+ if (tcbpwm->duty == 0)
+ polarity = !polarity;
+
+- spin_lock(&tcbpwmc->lock);
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
+
+ /* flush old setting and set the new one */
+@@ -257,7 +253,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CCR),
+ ATMEL_TC_SWTRG | ATMEL_TC_CLKEN);
+ tcbpwmc->bkup.enabled = 1;
+- spin_unlock(&tcbpwmc->lock);
+ return 0;
+ }
+
+@@ -342,9 +337,12 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+ {
++ struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
+ int duty_cycle, period;
+ int ret;
+
++ guard(spinlock)(&tcbpwmc->lock);
++
+ if (!state->enabled) {
+ atmel_tcb_pwm_disable(chip, pwm, state->polarity);
+ return 0;
+diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
+index a3faa9a3de7ccf..a7d529bf76adca 100644
+--- a/drivers/pwm/pwm-brcmstb.c
++++ b/drivers/pwm/pwm-brcmstb.c
+@@ -288,7 +288,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
+ {
+ struct brcmstb_pwm *p = dev_get_drvdata(dev);
+
+- clk_disable(p->clk);
++ clk_disable_unprepare(p->clk);
+
+ return 0;
+ }
+@@ -297,7 +297,7 @@ static int brcmstb_pwm_resume(struct device *dev)
+ {
+ struct brcmstb_pwm *p = dev_get_drvdata(dev);
+
+- clk_enable(p->clk);
++ clk_prepare_enable(p->clk);
+
+ return 0;
+ }
+diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
+index 326af85888e7b4..fd1535c47b6dba 100644
+--- a/drivers/pwm/pwm-img.c
++++ b/drivers/pwm/pwm-img.c
+@@ -289,9 +289,9 @@ static int img_pwm_probe(struct platform_device *pdev)
+ return PTR_ERR(imgchip->sys_clk);
+ }
+
+- imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");
++ imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+ if (IS_ERR(imgchip->pwm_clk)) {
+- dev_err(&pdev->dev, "failed to get imgchip clock\n");
++ dev_err(&pdev->dev, "failed to get pwm clock\n");
+ return PTR_ERR(imgchip->pwm_clk);
+ }
+
+diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
+index ef1293f2a897ee..7758d274a26cd0 100644
+--- a/drivers/pwm/pwm-jz4740.c
++++ b/drivers/pwm/pwm-jz4740.c
+@@ -60,9 +60,10 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+ snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
+
+ clk = clk_get(chip->dev, name);
+- if (IS_ERR(clk))
+- return dev_err_probe(chip->dev, PTR_ERR(clk),
+- "Failed to get clock\n");
++ if (IS_ERR(clk)) {
++ dev_err(chip->dev, "error %pe: Failed to get clock\n", clk);
++ return PTR_ERR(clk);
++ }
+
+ err = clk_prepare_enable(clk);
+ if (err < 0) {
+diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
+index b1d1373648a38f..a0467f0b549c25 100644
+--- a/drivers/pwm/pwm-sti.c
++++ b/drivers/pwm/pwm-sti.c
+@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
+ unsigned int cpt_num_devs;
+ unsigned int max_pwm_cnt;
+ unsigned int max_prescale;
++ struct sti_cpt_ddata *ddata;
+ };
+
+ struct sti_pwm_chip {
+@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ {
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+- struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
++ struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
+ struct device *dev = pc->dev;
+ unsigned int effective_ticks;
+ unsigned long long high, low;
+@@ -394,8 +395,17 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+ {
++ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
++ struct sti_pwm_compat_data *cdata = pc->cdata;
++ struct device *dev = pc->dev;
+ int err;
+
++ if (pwm->hwpwm >= cdata->pwm_num_devs) {
++ dev_err(dev, "device %u is not valid for pwm mode\n",
++ pwm->hwpwm);
++ return -EINVAL;
++ }
++
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+@@ -440,7 +450,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
+ while (cpt_int_stat) {
+ devicenum = ffs(cpt_int_stat) - 1;
+
+- ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
++ ddata = &pc->cdata->ddata[devicenum];
+
+ /*
+ * Capture input:
+@@ -561,6 +571,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct sti_pwm_compat_data *cdata;
++ struct pwm_chip *chip;
+ struct sti_pwm_chip *pc;
+ unsigned int i;
+ int irq, ret;
+@@ -568,6 +579,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
++ chip = &pc->chip;
+
+ cdata = devm_kzalloc(dev, sizeof(*cdata), GFP_KERNEL);
+ if (!cdata)
+@@ -613,70 +625,37 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ return ret;
+
+ if (cdata->pwm_num_devs) {
+- pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
++ pc->pwm_clk = devm_clk_get_prepared(dev, "pwm");
+ if (IS_ERR(pc->pwm_clk)) {
+ dev_err(dev, "failed to get PWM clock\n");
+ return PTR_ERR(pc->pwm_clk);
+ }
+-
+- ret = clk_prepare(pc->pwm_clk);
+- if (ret) {
+- dev_err(dev, "failed to prepare clock\n");
+- return ret;
+- }
+ }
+
+ if (cdata->cpt_num_devs) {
+- pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
++ pc->cpt_clk = devm_clk_get_prepared(dev, "capture");
+ if (IS_ERR(pc->cpt_clk)) {
+ dev_err(dev, "failed to get PWM capture clock\n");
+ return PTR_ERR(pc->cpt_clk);
+ }
+
+- ret = clk_prepare(pc->cpt_clk);
+- if (ret) {
+- dev_err(dev, "failed to prepare clock\n");
+- return ret;
+- }
++ cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
++ if (!cdata->ddata)
++ return -ENOMEM;
+ }
+
+- pc->chip.dev = dev;
+- pc->chip.ops = &sti_pwm_ops;
+- pc->chip.npwm = pc->cdata->pwm_num_devs;
+-
+- ret = pwmchip_add(&pc->chip);
+- if (ret < 0) {
+- clk_unprepare(pc->pwm_clk);
+- clk_unprepare(pc->cpt_clk);
+- return ret;
+- }
++ chip->dev = dev;
++ chip->ops = &sti_pwm_ops;
++ chip->npwm = max(cdata->pwm_num_devs, cdata->cpt_num_devs);
+
+ for (i = 0; i < cdata->cpt_num_devs; i++) {
+- struct sti_cpt_ddata *ddata;
+-
+- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+- if (!ddata)
+- return -ENOMEM;
++ struct sti_cpt_ddata *ddata = &cdata->ddata[i];
+
+ init_waitqueue_head(&ddata->wait);
+ mutex_init(&ddata->lock);
+-
+- pwm_set_chip_data(&pc->chip.pwms[i], ddata);
+ }
+
+- platform_set_drvdata(pdev, pc);
+-
+- return 0;
+-}
+-
+-static void sti_pwm_remove(struct platform_device *pdev)
+-{
+- struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
+-
+- pwmchip_remove(&pc->chip);
+-
+- clk_unprepare(pc->pwm_clk);
+- clk_unprepare(pc->cpt_clk);
++ return devm_pwmchip_add(dev, chip);
+ }
+
+ static const struct of_device_id sti_pwm_of_match[] = {
+@@ -691,7 +670,6 @@ static struct platform_driver sti_pwm_driver = {
+ .of_match_table = sti_pwm_of_match,
+ },
+ .probe = sti_pwm_probe,
+- .remove_new = sti_pwm_remove,
+ };
+ module_platform_driver(sti_pwm_driver);
+
+diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
+index 3d6be7749e2314..b91a14c895bea8 100644
+--- a/drivers/pwm/pwm-stm32.c
++++ b/drivers/pwm/pwm-stm32.c
+@@ -344,6 +344,9 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
+
+ prd = div;
+
++ if (!prd)
++ return -EINVAL;
++
+ if (prescaler > MAX_TIM_PSC)
+ return -EINVAL;
+
+@@ -453,8 +456,9 @@ static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+
+ enabled = pwm->state.enabled;
+
+- if (enabled && !state->enabled) {
+- stm32_pwm_disable(priv, pwm->hwpwm);
++ if (!state->enabled) {
++ if (enabled)
++ stm32_pwm_disable(priv, pwm->hwpwm);
+ return 0;
+ }
+
+@@ -579,32 +583,23 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv)
+ priv->have_complementary_output = (ccer != 0);
+ }
+
+-static int stm32_pwm_detect_channels(struct stm32_pwm *priv)
++static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
++ unsigned int *num_enabled)
+ {
+- u32 ccer;
+- int npwm = 0;
++ u32 ccer, ccer_backup;
+
+ /*
+ * If channels enable bits don't exist writing 1 will have no
+ * effect so we can detect and count them.
+ */
++ regmap_read(priv->regmap, TIM_CCER, &ccer_backup);
+ regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
+ regmap_read(priv->regmap, TIM_CCER, &ccer);
+- regmap_clear_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
+-
+- if (ccer & TIM_CCER_CC1E)
+- npwm++;
++ regmap_write(priv->regmap, TIM_CCER, ccer_backup);
+
+- if (ccer & TIM_CCER_CC2E)
+- npwm++;
++ *num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE);
+
+- if (ccer & TIM_CCER_CC3E)
+- npwm++;
+-
+- if (ccer & TIM_CCER_CC4E)
+- npwm++;
+-
+- return npwm;
++ return hweight32(ccer & TIM_CCER_CCXE);
+ }
+
+ static int stm32_pwm_probe(struct platform_device *pdev)
+@@ -613,6 +608,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+ struct device_node *np = dev->of_node;
+ struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct stm32_pwm *priv;
++ unsigned int num_enabled;
++ unsigned int i;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -635,7 +632,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+
+ priv->chip.dev = dev;
+ priv->chip.ops = &stm32pwm_ops;
+- priv->chip.npwm = stm32_pwm_detect_channels(priv);
++ priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled);
++
++ /* Initialize clock refcount to number of enabled PWM channels. */
++ for (i = 0; i < num_enabled; i++)
++ clk_enable(priv->clk);
+
+ ret = devm_pwmchip_add(dev, &priv->chip);
+ if (ret < 0)
+diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
+index 8fb84b44185380..65205449ed79ca 100644
+--- a/drivers/pwm/pwm-twl-led.c
++++ b/drivers/pwm/pwm-twl-led.c
+@@ -172,7 +172,7 @@ static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ * We cannot skip calling ->config even if state->period ==
+ * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
+ * because we might have exited early in the last call to
+- * pwm_apply_state because of !state->enabled and so the two values in
++ * pwm_apply_might_sleep because of !state->enabled and so the two values in
+ * pwm->state might not be configured in hardware.
+ */
+ ret = twl4030_pwmled_config(pwm->chip, pwm,
+diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
+index 6d46db51daaccd..ba1204e18afbb0 100644
+--- a/drivers/pwm/pwm-vt8500.c
++++ b/drivers/pwm/pwm-vt8500.c
+@@ -206,7 +206,7 @@ static int vt8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ * We cannot skip calling ->config even if state->period ==
+ * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
+ * because we might have exited early in the last call to
+- * pwm_apply_state because of !state->enabled and so the two values in
++ * pwm_apply_might_sleep because of !state->enabled and so the two values in
+ * pwm->state might not be configured in hardware.
+ */
+ err = vt8500_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index 8d1254761e4dd2..052ccadbdabfe6 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -62,7 +62,7 @@ static ssize_t period_store(struct device *child,
+ mutex_lock(&export->lock);
+ pwm_get_state(pwm, &state);
+ state.period = val;
+- ret = pwm_apply_state(pwm, &state);
++ ret = pwm_apply_might_sleep(pwm, &state);
+ mutex_unlock(&export->lock);
+
+ return ret ? : size;
+@@ -97,7 +97,7 @@ static ssize_t duty_cycle_store(struct device *child,
+ mutex_lock(&export->lock);
+ pwm_get_state(pwm, &state);
+ state.duty_cycle = val;
+- ret = pwm_apply_state(pwm, &state);
++ ret = pwm_apply_might_sleep(pwm, &state);
+ mutex_unlock(&export->lock);
+
+ return ret ? : size;
+@@ -144,7 +144,7 @@ static ssize_t enable_store(struct device *child,
+ goto unlock;
+ }
+
+- ret = pwm_apply_state(pwm, &state);
++ ret = pwm_apply_might_sleep(pwm, &state);
+
+ unlock:
+ mutex_unlock(&export->lock);
+@@ -194,7 +194,7 @@ static ssize_t polarity_store(struct device *child,
+ mutex_lock(&export->lock);
+ pwm_get_state(pwm, &state);
+ state.polarity = polarity;
+- ret = pwm_apply_state(pwm, &state);
++ ret = pwm_apply_might_sleep(pwm, &state);
+ mutex_unlock(&export->lock);
+
+ return ret ? : size;
+@@ -401,7 +401,7 @@ static int pwm_class_apply_state(struct pwm_export *export,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+ {
+- int ret = pwm_apply_state(pwm, state);
++ int ret = pwm_apply_might_sleep(pwm, state);
+
+ /* release lock taken in pwm_class_get_state */
+ mutex_unlock(&export->lock);
+diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
+index 26192d55a6858e..79fbb45297f6bc 100644
+--- a/drivers/regulator/bd71815-regulator.c
++++ b/drivers/regulator/bd71815-regulator.c
+@@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np,
+ * 10: 2.50mV/usec 10mV 4uS
+ * 11: 1.25mV/usec 10mV 8uS
+ */
+-static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
++static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 };
+
+ static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
+index 08d4ee369287e0..dd871ffe979c37 100644
+--- a/drivers/regulator/bd71828-regulator.c
++++ b/drivers/regulator/bd71828-regulator.c
+@@ -206,14 +206,11 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+- .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ /*
+ * LPSR voltage is same as SUSPEND voltage. Allow
+- * setting it so that regulator can be set enabled at
+- * LPSR state
++ * only enabling/disabling regulator for LPSR state
+ */
+- .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+- .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
++ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ .reg_inits = buck1_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck1_inits),
+@@ -288,13 +285,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK3_VOLT,
+- .idle_reg = BD71828_REG_BUCK3_VOLT,
+- .suspend_reg = BD71828_REG_BUCK3_VOLT,
+- .lpsr_reg = BD71828_REG_BUCK3_VOLT,
+ .run_mask = BD71828_MASK_BUCK3_VOLT,
+- .idle_mask = BD71828_MASK_BUCK3_VOLT,
+- .suspend_mask = BD71828_MASK_BUCK3_VOLT,
+- .lpsr_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -329,13 +320,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK4_VOLT,
+- .idle_reg = BD71828_REG_BUCK4_VOLT,
+- .suspend_reg = BD71828_REG_BUCK4_VOLT,
+- .lpsr_reg = BD71828_REG_BUCK4_VOLT,
+ .run_mask = BD71828_MASK_BUCK4_VOLT,
+- .idle_mask = BD71828_MASK_BUCK4_VOLT,
+- .suspend_mask = BD71828_MASK_BUCK4_VOLT,
+- .lpsr_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -370,13 +355,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK5_VOLT,
+- .idle_reg = BD71828_REG_BUCK5_VOLT,
+- .suspend_reg = BD71828_REG_BUCK5_VOLT,
+- .lpsr_reg = BD71828_REG_BUCK5_VOLT,
+ .run_mask = BD71828_MASK_BUCK5_VOLT,
+- .idle_mask = BD71828_MASK_BUCK5_VOLT,
+- .suspend_mask = BD71828_MASK_BUCK5_VOLT,
+- .lpsr_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -493,13 +472,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO1_VOLT,
+- .idle_reg = BD71828_REG_LDO1_VOLT,
+- .suspend_reg = BD71828_REG_LDO1_VOLT,
+- .lpsr_reg = BD71828_REG_LDO1_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+- .idle_mask = BD71828_MASK_LDO_VOLT,
+- .suspend_mask = BD71828_MASK_LDO_VOLT,
+- .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -533,13 +506,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO2_VOLT,
+- .idle_reg = BD71828_REG_LDO2_VOLT,
+- .suspend_reg = BD71828_REG_LDO2_VOLT,
+- .lpsr_reg = BD71828_REG_LDO2_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+- .idle_mask = BD71828_MASK_LDO_VOLT,
+- .suspend_mask = BD71828_MASK_LDO_VOLT,
+- .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -573,13 +540,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO3_VOLT,
+- .idle_reg = BD71828_REG_LDO3_VOLT,
+- .suspend_reg = BD71828_REG_LDO3_VOLT,
+- .lpsr_reg = BD71828_REG_LDO3_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+- .idle_mask = BD71828_MASK_LDO_VOLT,
+- .suspend_mask = BD71828_MASK_LDO_VOLT,
+- .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -614,13 +575,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO4_VOLT,
+- .idle_reg = BD71828_REG_LDO4_VOLT,
+- .suspend_reg = BD71828_REG_LDO4_VOLT,
+- .lpsr_reg = BD71828_REG_LDO4_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+- .idle_mask = BD71828_MASK_LDO_VOLT,
+- .suspend_mask = BD71828_MASK_LDO_VOLT,
+- .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -655,13 +610,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO5_VOLT,
+- .idle_reg = BD71828_REG_LDO5_VOLT,
+- .suspend_reg = BD71828_REG_LDO5_VOLT,
+- .lpsr_reg = BD71828_REG_LDO5_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+- .idle_mask = BD71828_MASK_LDO_VOLT,
+- .suspend_mask = BD71828_MASK_LDO_VOLT,
+- .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -720,9 +669,6 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ .suspend_reg = BD71828_REG_LDO7_VOLT,
+ .lpsr_reg = BD71828_REG_LDO7_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+- .idle_mask = BD71828_MASK_LDO_VOLT,
+- .suspend_mask = BD71828_MASK_LDO_VOLT,
+- .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 3137e40fcd3e05..c96bf095695fd8 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1909,19 +1909,24 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ }
+ }
+
+- if (err != -EEXIST)
++ if (err != -EEXIST) {
+ regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
+- if (IS_ERR(regulator->debugfs))
+- rdev_dbg(rdev, "Failed to create debugfs directory\n");
++ if (IS_ERR(regulator->debugfs)) {
++ rdev_dbg(rdev, "Failed to create debugfs directory\n");
++ regulator->debugfs = NULL;
++ }
++ }
+
+- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+- &regulator->uA_load);
+- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+- &regulator->voltage[PM_SUSPEND_ON].min_uV);
+- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+- &regulator->voltage[PM_SUSPEND_ON].max_uV);
+- debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+- regulator, &constraint_flags_fops);
++ if (regulator->debugfs) {
++ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
++ &regulator->uA_load);
++ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
++ &regulator->voltage[PM_SUSPEND_ON].min_uV);
++ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
++ &regulator->voltage[PM_SUSPEND_ON].max_uV);
++ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
++ regulator, &constraint_flags_fops);
++ }
+
+ /*
+ * Check now if the regulator is an always on regulator - if
+@@ -2918,7 +2923,8 @@ static int _regulator_enable(struct regulator *regulator)
+ /* Fallthrough on positive return values - already enabled */
+ }
+
+- rdev->use_count++;
++ if (regulator->enable_count == 1)
++ rdev->use_count++;
+
+ return 0;
+
+@@ -2993,37 +2999,40 @@ static int _regulator_disable(struct regulator *regulator)
+
+ lockdep_assert_held_once(&rdev->mutex.base);
+
+- if (WARN(rdev->use_count <= 0,
++ if (WARN(regulator->enable_count == 0,
+ "unbalanced disables for %s\n", rdev_get_name(rdev)))
+ return -EIO;
+
+- /* are we the last user and permitted to disable ? */
+- if (rdev->use_count == 1 &&
+- (rdev->constraints && !rdev->constraints->always_on)) {
+-
+- /* we are last user */
+- if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
+- ret = _notifier_call_chain(rdev,
+- REGULATOR_EVENT_PRE_DISABLE,
+- NULL);
+- if (ret & NOTIFY_STOP_MASK)
+- return -EINVAL;
+-
+- ret = _regulator_do_disable(rdev);
+- if (ret < 0) {
+- rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret));
+- _notifier_call_chain(rdev,
+- REGULATOR_EVENT_ABORT_DISABLE,
++ if (regulator->enable_count == 1) {
++ /* disabling last enable_count from this regulator */
++ /* are we the last user and permitted to disable ? */
++ if (rdev->use_count == 1 &&
++ (rdev->constraints && !rdev->constraints->always_on)) {
++
++ /* we are last user */
++ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
++ ret = _notifier_call_chain(rdev,
++ REGULATOR_EVENT_PRE_DISABLE,
++ NULL);
++ if (ret & NOTIFY_STOP_MASK)
++ return -EINVAL;
++
++ ret = _regulator_do_disable(rdev);
++ if (ret < 0) {
++ rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret));
++ _notifier_call_chain(rdev,
++ REGULATOR_EVENT_ABORT_DISABLE,
++ NULL);
++ return ret;
++ }
++ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
+- return ret;
+ }
+- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+- NULL);
+- }
+
+- rdev->use_count = 0;
+- } else if (rdev->use_count > 1) {
+- rdev->use_count--;
++ rdev->use_count = 0;
++ } else if (rdev->use_count > 1) {
++ rdev->use_count--;
++ }
+ }
+
+ if (ret == 0)
+@@ -3325,6 +3334,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator)
+
+ return map ? map : ERR_PTR(-EOPNOTSUPP);
+ }
++EXPORT_SYMBOL_GPL(regulator_get_regmap);
+
+ /**
+ * regulator_get_hardware_vsel_register - get the HW voltage selector register
+diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
+index d4926833655327..6e1ace660b8cf2 100644
+--- a/drivers/regulator/helpers.c
++++ b/drivers/regulator/helpers.c
+@@ -161,6 +161,32 @@ int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev)
+ }
+ EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_pickable_regmap);
+
++static int write_separate_vsel_and_range(struct regulator_dev *rdev,
++ unsigned int sel, unsigned int range)
++{
++ bool range_updated;
++ int ret;
++
++ ret = regmap_update_bits_base(rdev->regmap, rdev->desc->vsel_range_reg,
++ rdev->desc->vsel_range_mask,
++ range, &range_updated, false, false);
++ if (ret)
++ return ret;
++
++ /*
++ * Some PMICs treat the vsel_reg same as apply-bit. Force it to be
++ * written if the range changed, even if the old selector was same as
++ * the new one
++ */
++ if (rdev->desc->range_applied_by_vsel && range_updated)
++ return regmap_write_bits(rdev->regmap,
++ rdev->desc->vsel_reg,
++ rdev->desc->vsel_mask, sel);
++
++ return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
++ rdev->desc->vsel_mask, sel);
++}
++
+ /**
+ * regulator_set_voltage_sel_pickable_regmap - pickable range set_voltage_sel
+ *
+@@ -199,21 +225,12 @@ int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
+ range = rdev->desc->linear_range_selectors_bitfield[i];
+ range <<= ffs(rdev->desc->vsel_range_mask) - 1;
+
+- if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) {
+- ret = regmap_update_bits(rdev->regmap,
+- rdev->desc->vsel_reg,
++ if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg)
++ ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ rdev->desc->vsel_range_mask |
+ rdev->desc->vsel_mask, sel | range);
+- } else {
+- ret = regmap_update_bits(rdev->regmap,
+- rdev->desc->vsel_range_reg,
+- rdev->desc->vsel_range_mask, range);
+- if (ret)
+- return ret;
+-
+- ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+- rdev->desc->vsel_mask, sel);
+- }
++ else
++ ret = write_separate_vsel_and_range(rdev, sel, range);
+
+ if (ret)
+ return ret;
+diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
+index fe7ae0f3f46af9..5ab1a0befe12f7 100644
+--- a/drivers/regulator/irq_helpers.c
++++ b/drivers/regulator/irq_helpers.c
+@@ -352,6 +352,9 @@ void *regulator_irq_helper(struct device *dev,
+
+ h->irq = irq;
+ h->desc = *d;
++ h->desc.name = devm_kstrdup(dev, d->name, GFP_KERNEL);
++ if (!h->desc.name)
++ return ERR_PTR(-ENOMEM);
+
+ ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
+ rdev_amount);
+diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c
+index b56a174cde3df1..4362f332f746ba 100644
+--- a/drivers/regulator/max5970-regulator.c
++++ b/drivers/regulator/max5970-regulator.c
+@@ -28,8 +28,8 @@ struct max5970_regulator {
+ };
+
+ enum max597x_regulator_id {
+- MAX597X_SW0,
+- MAX597X_SW1,
++ MAX597X_sw0,
++ MAX597X_sw1,
+ };
+
+ static int max597x_uvp_ovp_check_mode(struct regulator_dev *rdev, int severity)
+@@ -251,8 +251,8 @@ static int max597x_dt_parse(struct device_node *np,
+ }
+
+ static const struct regulator_desc regulators[] = {
+- MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"),
+- MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"),
++ MAX597X_SWITCH(sw0, MAX5970_REG_CHXEN, 0, "vss1"),
++ MAX597X_SWITCH(sw1, MAX5970_REG_CHXEN, 1, "vss2"),
+ };
+
+ static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
+@@ -265,7 +265,7 @@ static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
+ return ret;
+
+ if (*val)
+- return regmap_write(map, reg, *val);
++ return regmap_write(map, reg, 0);
+
+ return 0;
+ }
+diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
+index 65fbd95f1dbb0c..4ca8fbf4b3e2e3 100644
+--- a/drivers/regulator/mt6358-regulator.c
++++ b/drivers/regulator/mt6358-regulator.c
+@@ -688,12 +688,18 @@ static int mt6358_regulator_probe(struct platform_device *pdev)
+ const struct mt6358_regulator_info *mt6358_info;
+ int i, max_regulator, ret;
+
+- if (mt6397->chip_id == MT6366_CHIP_ID) {
+- max_regulator = MT6366_MAX_REGULATOR;
+- mt6358_info = mt6366_regulators;
+- } else {
++ switch (mt6397->chip_id) {
++ case MT6358_CHIP_ID:
+ max_regulator = MT6358_MAX_REGULATOR;
+ mt6358_info = mt6358_regulators;
++ break;
++ case MT6366_CHIP_ID:
++ max_regulator = MT6366_MAX_REGULATOR;
++ mt6358_info = mt6366_regulators;
++ break;
++ default:
++ dev_err(&pdev->dev, "unsupported chip ID: %d\n", mt6397->chip_id);
++ return -EINVAL;
+ }
+
+ ret = mt6358_sync_vcn33_setting(&pdev->dev);
+diff --git a/drivers/regulator/mt6360-regulator.c b/drivers/regulator/mt6360-regulator.c
+index ad6587a378d09c..24cc9fc94e900a 100644
+--- a/drivers/regulator/mt6360-regulator.c
++++ b/drivers/regulator/mt6360-regulator.c
+@@ -319,15 +319,15 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
+ }
+ }
+
+-#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg, vmask, \
+- mreg, mmask, streg, stmask, vranges, \
+- vcnts, offon_delay, irq_tbls) \
++#define MT6360_REGULATOR_DESC(match, _name, _sname, ereg, emask, vreg, \
++ vmask, mreg, mmask, streg, stmask, \
++ vranges, vcnts, offon_delay, irq_tbls) \
+ { \
+ .desc = { \
+ .name = #_name, \
+ .supply_name = #_sname, \
+ .id = MT6360_REGULATOR_##_name, \
+- .of_match = of_match_ptr(#_name), \
++ .of_match = of_match_ptr(match), \
+ .regulators_node = of_match_ptr("regulator"), \
+ .of_map_mode = mt6360_regulator_of_map_mode, \
+ .owner = THIS_MODULE, \
+@@ -351,21 +351,29 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
+ }
+
+ static const struct mt6360_regulator_desc mt6360_regulator_descs[] = {
+- MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
++ MT6360_REGULATOR_DESC("buck1", BUCK1, BUCK1_VIN,
++ 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
+ buck_vout_ranges, 256, 0, buck1_irq_tbls),
+- MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
++ MT6360_REGULATOR_DESC("buck2", BUCK2, BUCK2_VIN,
++ 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
+ buck_vout_ranges, 256, 0, buck2_irq_tbls),
+- MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
++ MT6360_REGULATOR_DESC("ldo6", LDO6, LDO_VIN3,
++ 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
+ ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
+- MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
++ MT6360_REGULATOR_DESC("ldo7", LDO7, LDO_VIN3,
++ 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
+ ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
+- MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
++ MT6360_REGULATOR_DESC("ldo1", LDO1, LDO_VIN1,
++ 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
+ ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
+- MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
++ MT6360_REGULATOR_DESC("ldo2", LDO2, LDO_VIN1,
++ 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
+ ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
+- MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
++ MT6360_REGULATOR_DESC("ldo3", LDO3, LDO_VIN1,
++ 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
+ ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
+- MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
++ MT6360_REGULATOR_DESC("ldo5", LDO5, LDO_VIN2,
++ 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
+ ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
+ };
+
+diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
+index 1b65e5e4e40ffc..59e71fd0db4390 100644
+--- a/drivers/regulator/of_regulator.c
++++ b/drivers/regulator/of_regulator.c
+@@ -768,7 +768,7 @@ int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ name[i] = '\0';
+ tmp = regulator_get(dev, name);
+ if (IS_ERR(tmp)) {
+- ret = -EINVAL;
++ ret = PTR_ERR(tmp);
+ goto error;
+ }
+ (*consumers)[n].consumer = tmp;
+diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
+index 2aff6db748e2c9..226ca4c62673ff 100644
+--- a/drivers/regulator/pwm-regulator.c
++++ b/drivers/regulator/pwm-regulator.c
+@@ -90,7 +90,7 @@ static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ pwm_set_relative_duty_cycle(&pstate,
+ drvdata->duty_cycle_table[selector].dutycycle, 100);
+
+- ret = pwm_apply_state(drvdata->pwm, &pstate);
++ ret = pwm_apply_might_sleep(drvdata->pwm, &pstate);
+ if (ret) {
+ dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
+ return ret;
+@@ -158,6 +158,9 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
+ pwm_get_state(drvdata->pwm, &pstate);
+
+ voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
++ if (voltage < min(max_uV_duty, min_uV_duty) ||
++ voltage > max(max_uV_duty, min_uV_duty))
++ return -ENOTRECOVERABLE;
+
+ /*
+ * The dutycycle for min_uV might be greater than the one for max_uV.
+@@ -216,7 +219,7 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
+
+ pwm_set_relative_duty_cycle(&pstate, dutycycle, duty_unit);
+
+- ret = pwm_apply_state(drvdata->pwm, &pstate);
++ ret = pwm_apply_might_sleep(drvdata->pwm, &pstate);
+ if (ret) {
+ dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
+ return ret;
+diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c
+index 656fe330d38f0a..063e12c08e75f7 100644
+--- a/drivers/regulator/qcom-refgen-regulator.c
++++ b/drivers/regulator/qcom-refgen-regulator.c
+@@ -140,6 +140,7 @@ static const struct of_device_id qcom_refgen_match_table[] = {
+ { .compatible = "qcom,sm8250-refgen-regulator", .data = &sm8250_refgen_desc },
+ { }
+ };
++MODULE_DEVICE_TABLE(of, qcom_refgen_match_table);
+
+ static struct platform_driver qcom_refgen_driver = {
+ .probe = qcom_refgen_probe,
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index d990ba19c50eb4..b2e359ac316932 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -1095,7 +1095,7 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
+- RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"),
++ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
+diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
+index 86d2d80b4b41bc..ae5dcaea84dfbb 100644
+--- a/drivers/regulator/ti-abb-regulator.c
++++ b/drivers/regulator/ti-abb-regulator.c
+@@ -734,9 +734,25 @@ static int ti_abb_probe(struct platform_device *pdev)
+ return PTR_ERR(abb->setup_reg);
+ }
+
+- abb->int_base = devm_platform_ioremap_resource_byname(pdev, "int-address");
+- if (IS_ERR(abb->int_base))
+- return PTR_ERR(abb->int_base);
++ pname = "int-address";
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
++ if (!res) {
++ dev_err(dev, "Missing '%s' IO resource\n", pname);
++ return -ENODEV;
++ }
++ /*
++ * The MPU interrupt status register (PRM_IRQSTATUS_MPU) is
++ * shared between regulator-abb-{ivahd,dspeve,gpu} driver
++ * instances. Therefore use devm_ioremap() rather than
++ * devm_platform_ioremap_resource_byname() to avoid busy
++ * resource region conflicts.
++ */
++ abb->int_base = devm_ioremap(dev, res->start,
++ resource_size(res));
++ if (!abb->int_base) {
++ dev_err(dev, "Unable to map '%s'\n", pname);
++ return -ENOMEM;
++ }
+
+ /* Map Optional resources */
+ pname = "efuse-address";
+diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
+index 9b7c3d77789e3d..3c9d79e003e4b9 100644
+--- a/drivers/regulator/tps6287x-regulator.c
++++ b/drivers/regulator/tps6287x-regulator.c
+@@ -115,6 +115,7 @@ static struct regulator_desc tps6287x_reg = {
+ .vsel_mask = 0xFF,
+ .vsel_range_reg = TPS6287X_CTRL2,
+ .vsel_range_mask = TPS6287X_CTRL2_VRANGE,
++ .range_applied_by_vsel = true,
+ .ramp_reg = TPS6287X_CTRL1,
+ .ramp_mask = TPS6287X_CTRL1_VRAMP,
+ .ramp_delay_table = tps6287x_ramp_table,
+diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
+index a06f5f2d79329d..9c2f0dd42613d4 100644
+--- a/drivers/regulator/tps65132-regulator.c
++++ b/drivers/regulator/tps65132-regulator.c
+@@ -267,10 +267,17 @@ static const struct i2c_device_id tps65132_id[] = {
+ };
+ MODULE_DEVICE_TABLE(i2c, tps65132_id);
+
++static const struct of_device_id __maybe_unused tps65132_of_match[] = {
++ { .compatible = "ti,tps65132" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, tps65132_of_match);
++
+ static struct i2c_driver tps65132_i2c_driver = {
+ .driver = {
+ .name = "tps65132",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
++ .of_match_table = of_match_ptr(tps65132_of_match),
+ },
+ .probe = tps65132_probe,
+ .id_table = tps65132_id,
+diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
+index b7f0c87797577e..5fad61785e72f8 100644
+--- a/drivers/regulator/tps6594-regulator.c
++++ b/drivers/regulator/tps6594-regulator.c
+@@ -287,30 +287,30 @@ static struct tps6594_regulator_irq_type *tps6594_ldos_irq_types[] = {
+ static const struct regulator_desc multi_regs[] = {
+ TPS6594_REGULATOR("BUCK12", "buck12", TPS6594_BUCK_1,
+ REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
+- TPS6594_REG_BUCKX_VOUT_1(1),
++ TPS6594_REG_BUCKX_VOUT_1(0),
+ TPS6594_MASK_BUCKS_VSET,
+- TPS6594_REG_BUCKX_CTRL(1),
++ TPS6594_REG_BUCKX_CTRL(0),
+ TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
+ 4, 4000, 0, NULL, 0, 0),
+ TPS6594_REGULATOR("BUCK34", "buck34", TPS6594_BUCK_3,
+ REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
+- TPS6594_REG_BUCKX_VOUT_1(3),
++ TPS6594_REG_BUCKX_VOUT_1(2),
+ TPS6594_MASK_BUCKS_VSET,
+- TPS6594_REG_BUCKX_CTRL(3),
++ TPS6594_REG_BUCKX_CTRL(2),
+ TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
+ 4, 0, 0, NULL, 0, 0),
+ TPS6594_REGULATOR("BUCK123", "buck123", TPS6594_BUCK_1,
+ REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
+- TPS6594_REG_BUCKX_VOUT_1(1),
++ TPS6594_REG_BUCKX_VOUT_1(0),
+ TPS6594_MASK_BUCKS_VSET,
+- TPS6594_REG_BUCKX_CTRL(1),
++ TPS6594_REG_BUCKX_CTRL(0),
+ TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
+ 4, 4000, 0, NULL, 0, 0),
+ TPS6594_REGULATOR("BUCK1234", "buck1234", TPS6594_BUCK_1,
+ REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
+- TPS6594_REG_BUCKX_VOUT_1(1),
++ TPS6594_REG_BUCKX_VOUT_1(0),
+ TPS6594_MASK_BUCKS_VSET,
+- TPS6594_REG_BUCKX_CTRL(1),
++ TPS6594_REG_BUCKX_CTRL(0),
+ TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
+ 4, 4000, 0, NULL, 0, 0),
+ };
+diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
+index 97f075ed68c95a..cb1de24b986269 100644
+--- a/drivers/regulator/userspace-consumer.c
++++ b/drivers/regulator/userspace-consumer.c
+@@ -210,6 +210,7 @@ static const struct of_device_id regulator_userspace_consumer_of_match[] = {
+ { .compatible = "regulator-output", },
+ {},
+ };
++MODULE_DEVICE_TABLE(of, regulator_userspace_consumer_of_match);
+
+ static struct platform_driver regulator_userspace_consumer_driver = {
+ .probe = regulator_userspace_consumer_probe,
+diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
+index 086da36abc0b49..4955616517ce9c 100644
+--- a/drivers/regulator/vqmmc-ipq4019-regulator.c
++++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
+@@ -84,6 +84,7 @@ static const struct of_device_id regulator_ipq4019_of_match[] = {
+ { .compatible = "qcom,vqmmc-ipq4019-regulator", },
+ {},
+ };
++MODULE_DEVICE_TABLE(of, regulator_ipq4019_of_match);
+
+ static struct platform_driver ipq4019_regulator_driver = {
+ .probe = ipq4019_regulator_probe,
+diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
+index 8bb293b9f327cc..610a69928dff2a 100644
+--- a/drivers/remoteproc/imx_rproc.c
++++ b/drivers/remoteproc/imx_rproc.c
+@@ -213,7 +213,7 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
+ /* QSPI Code - alias */
+ { 0x08000000, 0x08000000, 0x08000000, 0 },
+ /* DDR (Code) - alias */
+- { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
++ { 0x10000000, 0x40000000, 0x0FFE0000, 0 },
+ /* TCML */
+ { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM},
+ /* TCMU */
+@@ -669,6 +669,17 @@ static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc
+ return (struct resource_table *)priv->rsc_table;
+ }
+
++static struct resource_table *
++imx_rproc_elf_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
++{
++ struct imx_rproc *priv = rproc->priv;
++
++ if (priv->rsc_table)
++ return (struct resource_table *)priv->rsc_table;
++
++ return rproc_elf_find_loaded_rsc_table(rproc, fw);
++}
++
+ static const struct rproc_ops imx_rproc_ops = {
+ .prepare = imx_rproc_prepare,
+ .attach = imx_rproc_attach,
+@@ -679,7 +690,7 @@ static const struct rproc_ops imx_rproc_ops = {
+ .da_to_va = imx_rproc_da_to_va,
+ .load = rproc_elf_load_segments,
+ .parse_fw = imx_rproc_parse_fw,
+- .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
++ .find_loaded_rsc_table = imx_rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+@@ -729,31 +740,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
+ struct resource res;
+
+ node = of_parse_phandle(np, "memory-region", a);
++ if (!node)
++ continue;
+ /* Not map vdevbuffer, vdevring region */
+ if (!strncmp(node->name, "vdev", strlen("vdev"))) {
+ of_node_put(node);
+ continue;
+ }
+ err = of_address_to_resource(node, 0, &res);
+- of_node_put(node);
+ if (err) {
+ dev_err(dev, "unable to resolve memory region\n");
++ of_node_put(node);
+ return err;
+ }
+
+- if (b >= IMX_RPROC_MEM_MAX)
++ if (b >= IMX_RPROC_MEM_MAX) {
++ of_node_put(node);
+ break;
++ }
+
+ /* Not use resource version, because we might share region */
+ priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
+ if (!priv->mem[b].cpu_addr) {
+ dev_err(dev, "failed to remap %pr\n", &res);
++ of_node_put(node);
+ return -ENOMEM;
+ }
+ priv->mem[b].sys_addr = res.start;
+ priv->mem[b].size = resource_size(&res);
+ if (!strcmp(node->name, "rsc-table"))
+ priv->rsc_table = priv->mem[b].cpu_addr;
++ of_node_put(node);
+ b++;
+ }
+
+@@ -1128,6 +1145,8 @@ static int imx_rproc_probe(struct platform_device *pdev)
+ goto err_put_rproc;
+ }
+
++ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
++
+ ret = imx_rproc_xtr_mbox_init(rproc);
+ if (ret)
+ goto err_put_wkq;
+@@ -1146,8 +1165,6 @@ static int imx_rproc_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_put_scu;
+
+- INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+-
+ if (rproc->state != RPROC_DETACHED)
+ rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
+
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index dcc94ee2458d8e..c4c535b011812d 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -126,7 +126,7 @@ static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+ {
+ int ret;
+- size_t offset;
++ size_t buf_sz, offset;
+
+ /* read the ipi buf addr from FW itself first */
+ ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
+@@ -138,6 +138,14 @@ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+ }
+ dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
+
++ /* Make sure IPI buffer fits in the L2TCM range assigned to this core */
++ buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf);
++
++ if (scp->sram_size < buf_sz + offset) {
++ dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n");
++ return -EOVERFLOW;
++ }
++
+ scp->recv_buf = (struct mtk_share_obj __iomem *)
+ (scp->sram_base + offset);
+ scp->send_buf = (struct mtk_share_obj __iomem *)
+diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
+index 83d76915a6ad6f..25b66b113b6959 100644
+--- a/drivers/remoteproc/remoteproc_virtio.c
++++ b/drivers/remoteproc/remoteproc_virtio.c
+@@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev)
+
+ kfree(vdev);
+
++ of_reserved_mem_device_release(&rvdev->pdev->dev);
++ dma_release_coherent_memory(&rvdev->pdev->dev);
++
+ put_device(&rvdev->pdev->dev);
+ }
+
+@@ -584,9 +587,6 @@ static void rproc_virtio_remove(struct platform_device *pdev)
+ rproc_remove_subdev(rproc, &rvdev->subdev);
+ rproc_remove_rvdev(rvdev);
+
+- of_reserved_mem_device_release(&pdev->dev);
+- dma_release_coherent_memory(&pdev->dev);
+-
+ put_device(&rproc->dev);
+ }
+
+diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
+index 9d9b13530f78aa..c786badf08fa67 100644
+--- a/drivers/remoteproc/stm32_rproc.c
++++ b/drivers/remoteproc/stm32_rproc.c
+@@ -120,7 +120,7 @@ static int stm32_rproc_mem_alloc(struct rproc *rproc,
+ void *va;
+
+ dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len);
+- va = ioremap_wc(mem->dma, mem->len);
++ va = (__force void *)ioremap_wc(mem->dma, mem->len);
+ if (IS_ERR_OR_NULL(va)) {
+ dev_err(dev, "Unable to map memory region: %pad+0x%zx\n",
+ &mem->dma, mem->len);
+@@ -137,7 +137,7 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+ {
+ dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
+- iounmap(mem->va);
++ iounmap((__force __iomem void *)mem->va);
+
+ return 0;
+ }
+@@ -294,7 +294,7 @@ static void stm32_rproc_mb_vq_work(struct work_struct *work)
+
+ mutex_lock(&rproc->lock);
+
+- if (rproc->state != RPROC_RUNNING)
++ if (rproc->state != RPROC_RUNNING && rproc->state != RPROC_ATTACHED)
+ goto unlock_mutex;
+
+ if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
+@@ -657,7 +657,7 @@ stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+ * entire area by overwriting it with the initial values stored in rproc->clean_table.
+ */
+ *table_sz = RSC_TBL_SIZE;
+- return (struct resource_table *)ddata->rsc_va;
++ return (__force struct resource_table *)ddata->rsc_va;
+ }
+
+ static const struct rproc_ops st_rproc_ops = {
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index ad3415a3851b26..5491b1b17ca368 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -103,12 +103,14 @@ struct k3_r5_soc_data {
+ * @dev: cached device pointer
+ * @mode: Mode to configure the Cluster - Split or LockStep
+ * @cores: list of R5 cores within the cluster
++ * @core_transition: wait queue to sync core state changes
+ * @soc_data: SoC-specific feature data for a R5FSS
+ */
+ struct k3_r5_cluster {
+ struct device *dev;
+ enum cluster_mode mode;
+ struct list_head cores;
++ wait_queue_head_t core_transition;
+ const struct k3_r5_soc_data *soc_data;
+ };
+
+@@ -128,6 +130,7 @@ struct k3_r5_cluster {
+ * @atcm_enable: flag to control ATCM enablement
+ * @btcm_enable: flag to control BTCM enablement
+ * @loczrama: flag to dictate which TCM is at device address 0x0
++ * @released_from_reset: flag to signal when core is out of reset
+ */
+ struct k3_r5_core {
+ struct list_head elem;
+@@ -144,6 +147,7 @@ struct k3_r5_core {
+ u32 atcm_enable;
+ u32 btcm_enable;
+ u32 loczrama;
++ bool released_from_reset;
+ };
+
+ /**
+@@ -190,6 +194,10 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
++ /* Do not forward message from a detached core */
++ if (kproc->rproc->state == RPROC_DETACHED)
++ return;
++
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+@@ -225,6 +233,10 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
++ /* Do not forward message to a detached core */
++ if (kproc->rproc->state == RPROC_DETACHED)
++ return;
++
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+@@ -395,12 +407,9 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+- if (IS_ERR(kproc->mbox)) {
+- ret = -EBUSY;
+- dev_err(dev, "mbox_request_channel failed: %ld\n",
+- PTR_ERR(kproc->mbox));
+- return ret;
+- }
++ if (IS_ERR(kproc->mbox))
++ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
++ "mbox_request_channel failed\n");
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+@@ -542,14 +551,10 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct device *dev = kproc->dev;
+- struct k3_r5_core *core;
++ struct k3_r5_core *core0, *core;
+ u32 boot_addr;
+ int ret;
+
+- ret = k3_r5_rproc_request_mbox(rproc);
+- if (ret)
+- return ret;
+-
+ boot_addr = rproc->bootaddr;
+ /* TODO: add boot_addr sanity checking */
+ dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
+@@ -558,7 +563,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ core = kproc->core;
+ ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+ if (ret)
+- goto put_mbox;
++ return ret;
+
+ /* unhalt/run all applicable cores */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+@@ -568,9 +573,21 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ goto unroll_core_run;
+ }
+ } else {
++ /* do not allow core 1 to start before core 0 */
++ core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
++ elem);
++ if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
++ dev_err(dev, "%s: can not start core 1 before core 0\n",
++ __func__);
++ return -EPERM;
++ }
++
+ ret = k3_r5_core_run(core);
+ if (ret)
+- goto put_mbox;
++ return ret;
++
++ core->released_from_reset = true;
++ wake_up_interruptible(&cluster->core_transition);
+ }
+
+ return 0;
+@@ -580,8 +597,6 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ if (k3_r5_core_halt(core))
+ dev_warn(core->dev, "core halt back failed\n");
+ }
+-put_mbox:
+- mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+@@ -613,7 +628,8 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ {
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+- struct k3_r5_core *core = kproc->core;
++ struct device *dev = kproc->dev;
++ struct k3_r5_core *core1, *core = kproc->core;
+ int ret;
+
+ /* halt all applicable cores */
+@@ -626,13 +642,21 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ }
+ }
+ } else {
++ /* do not allow core 0 to stop before core 1 */
++ core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
++ elem);
++ if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
++ dev_err(dev, "%s: can not stop core 0 before core 1\n",
++ __func__);
++ ret = -EPERM;
++ goto out;
++ }
++
+ ret = k3_r5_core_halt(core);
+ if (ret)
+ goto out;
+ }
+
+- mbox_free_channel(kproc->mbox);
+-
+ return 0;
+
+ unroll_core_halt:
+@@ -647,42 +671,22 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ /*
+ * Attach to a running R5F remote processor (IPC-only mode)
+ *
+- * The R5F attach callback only needs to request the mailbox, the remote
+- * processor is already booted, so there is no need to issue any TI-SCI
+- * commands to boot the R5F cores in IPC-only mode. This callback is invoked
+- * only in IPC-only mode.
++ * The R5F attach callback is a NOP. The remote processor is already booted, and
++ * all required resources have been acquired during probe routine, so there is
++ * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
++ * This callback is invoked only in IPC-only mode and exists because
++ * rproc_validate() checks for its existence.
+ */
+-static int k3_r5_rproc_attach(struct rproc *rproc)
+-{
+- struct k3_r5_rproc *kproc = rproc->priv;
+- struct device *dev = kproc->dev;
+- int ret;
+-
+- ret = k3_r5_rproc_request_mbox(rproc);
+- if (ret)
+- return ret;
+-
+- dev_info(dev, "R5F core initialized in IPC-only mode\n");
+- return 0;
+-}
++static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
+
+ /*
+ * Detach from a running R5F remote processor (IPC-only mode)
+ *
+- * The R5F detach callback performs the opposite operation to attach callback
+- * and only needs to release the mailbox, the R5F cores are not stopped and
+- * will be left in booted state in IPC-only mode. This callback is invoked
+- * only in IPC-only mode.
++ * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
++ * left in booted state in IPC-only mode. This callback is invoked only in
++ * IPC-only mode and exists for sanity sake.
+ */
+-static int k3_r5_rproc_detach(struct rproc *rproc)
+-{
+- struct k3_r5_rproc *kproc = rproc->priv;
+- struct device *dev = kproc->dev;
+-
+- mbox_free_channel(kproc->mbox);
+- dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
+- return 0;
+-}
++static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
+
+ /*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+@@ -1140,6 +1144,12 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+ return ret;
+ }
+
++ /*
++ * Skip the waiting mechanism for sequential power-on of cores if the
++ * core has already been booted by another entity.
++ */
++ core->released_from_reset = c_state;
++
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ &stat);
+ if (ret < 0) {
+@@ -1244,6 +1254,10 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ kproc->rproc = rproc;
+ core->rproc = rproc;
+
++ ret = k3_r5_rproc_request_mbox(rproc);
++ if (ret)
++ return ret;
++
+ ret = k3_r5_rproc_configure_mode(kproc);
+ if (ret < 0)
+ goto err_config;
+@@ -1280,6 +1294,26 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ cluster->mode == CLUSTER_MODE_SINGLECPU ||
+ cluster->mode == CLUSTER_MODE_SINGLECORE)
+ break;
++
++ /*
++ * R5 cores require to be powered on sequentially, core0
++ * should be in higher power state than core1 in a cluster
++ * So, wait for current core to power up before proceeding
++ * to next core and put timeout of 2sec for each core.
++ *
++ * This waiting mechanism is necessary because
++ * rproc_auto_boot_callback() for core1 can be called before
++ * core0 due to thread execution order.
++ */
++ ret = wait_event_interruptible_timeout(cluster->core_transition,
++ core->released_from_reset,
++ msecs_to_jiffies(2000));
++ if (ret <= 0) {
++ dev_err(dev,
++ "Timed out waiting for %s core to power up!\n",
++ rproc->name);
++ goto err_powerup;
++ }
+ }
+
+ return 0;
+@@ -1294,6 +1328,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ }
+ }
+
++err_powerup:
+ rproc_del(rproc);
+ err_add:
+ k3_r5_reserved_mem_exit(kproc);
+@@ -1341,6 +1376,8 @@ static void k3_r5_cluster_rproc_exit(void *data)
+ }
+ }
+
++ mbox_free_channel(kproc->mbox);
++
+ rproc_del(rproc);
+
+ k3_r5_reserved_mem_exit(kproc);
+@@ -1709,6 +1746,7 @@ static int k3_r5_probe(struct platform_device *pdev)
+ cluster->dev = dev;
+ cluster->soc_data = data;
+ INIT_LIST_HEAD(&cluster->cores);
++ init_waitqueue_head(&cluster->core_transition);
+
+ ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
+ if (ret < 0 && ret != -EINVAL) {
+diff --git a/drivers/reset/core.c b/drivers/reset/core.c
+index f0a076e94118f3..92cc13ef3e5668 100644
+--- a/drivers/reset/core.c
++++ b/drivers/reset/core.c
+@@ -807,6 +807,9 @@ static void __reset_control_put_internal(struct reset_control *rstc)
+ {
+ lockdep_assert_held(&reset_list_mutex);
+
++ if (IS_ERR_OR_NULL(rstc))
++ return;
++
+ kref_put(&rstc->refcnt, __reset_control_release);
+ }
+
+@@ -1017,11 +1020,8 @@ EXPORT_SYMBOL_GPL(reset_control_put);
+ void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
+ {
+ mutex_lock(&reset_list_mutex);
+- while (num_rstcs--) {
+- if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
+- continue;
++ while (num_rstcs--)
+ __reset_control_put_internal(rstcs[num_rstcs].rstc);
+- }
+ mutex_unlock(&reset_list_mutex);
+ }
+ EXPORT_SYMBOL_GPL(reset_control_bulk_put);
+diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
+index 8d1fce18ded78c..5c3267acd2b1c2 100644
+--- a/drivers/reset/hisilicon/hi6220_reset.c
++++ b/drivers/reset/hisilicon/hi6220_reset.c
+@@ -163,7 +163,7 @@ static int hi6220_reset_probe(struct platform_device *pdev)
+ if (!data)
+ return -ENOMEM;
+
+- type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev);
++ type = (uintptr_t)of_device_get_match_data(dev);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap)) {
+diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
+index 2537ec05eceefd..578fe867080ce0 100644
+--- a/drivers/reset/reset-berlin.c
++++ b/drivers/reset/reset-berlin.c
+@@ -68,13 +68,14 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
+
+ static int berlin2_reset_probe(struct platform_device *pdev)
+ {
+- struct device_node *parent_np = of_get_parent(pdev->dev.of_node);
++ struct device_node *parent_np;
+ struct berlin_reset_priv *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
++ parent_np = of_get_parent(pdev->dev.of_node);
+ priv->regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
+ if (IS_ERR(priv->regmap))
+diff --git a/drivers/reset/reset-k210.c b/drivers/reset/reset-k210.c
+index b62a2fd44e4e42..e77e4cca377dca 100644
+--- a/drivers/reset/reset-k210.c
++++ b/drivers/reset/reset-k210.c
+@@ -90,7 +90,7 @@ static const struct reset_control_ops k210_rst_ops = {
+ static int k210_rst_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+- struct device_node *parent_np = of_get_parent(dev->of_node);
++ struct device_node *parent_np;
+ struct k210_rst *ksr;
+
+ dev_info(dev, "K210 reset controller\n");
+@@ -99,6 +99,7 @@ static int k210_rst_probe(struct platform_device *pdev)
+ if (!ksr)
+ return -ENOMEM;
+
++ parent_np = of_get_parent(dev->of_node);
+ ksr->map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
+ if (IS_ERR(ksr->map))
+diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
+index 905ac7910c98f3..f1af0f6746150e 100644
+--- a/drivers/rpmsg/virtio_rpmsg_bus.c
++++ b/drivers/rpmsg/virtio_rpmsg_bus.c
+@@ -378,6 +378,7 @@ static void virtio_rpmsg_release_device(struct device *dev)
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+
++ kfree(rpdev->driver_override);
+ kfree(vch);
+ }
+
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index d7502433c78aa3..92f46a6312c24a 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -1832,7 +1832,8 @@ config RTC_DRV_MT2712
+
+ config RTC_DRV_MT6397
+ tristate "MediaTek PMIC based RTC"
+- depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
++ depends on MFD_MT6397 || COMPILE_TEST
++ select IRQ_DOMAIN
+ help
+ This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
+ MT6397 PMIC. You should enable MT6397 PMIC MFD before select
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 1b63111cdda2e9..0b23706d9fd3cc 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -274,10 +274,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ return err;
+
+ /* full-function RTCs won't have such missing fields */
+- if (rtc_valid_tm(&alarm->time) == 0) {
+- rtc_add_offset(rtc, &alarm->time);
+- return 0;
+- }
++ err = rtc_valid_tm(&alarm->time);
++ if (!err)
++ goto done;
+
+ /* get the "after" timestamp, to detect wrapped fields */
+ err = rtc_read_time(rtc, &now);
+@@ -379,6 +378,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ if (err && alarm->enabled)
+ dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
+ &alarm->time);
++ else
++ rtc_add_offset(rtc, &alarm->time);
+
+ return err;
+ }
+diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
+index d5caf36c56cdcf..225c859d6da550 100644
+--- a/drivers/rtc/lib_test.c
++++ b/drivers/rtc/lib_test.c
+@@ -54,7 +54,7 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test)
+
+ days = div_s64(secs, 86400);
+
+- #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
++ #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \
+ year, month, mday, yday, days
+
+ KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
+diff --git a/drivers/rtc/nvmem.c b/drivers/rtc/nvmem.c
+index 07ede21cee347c..37df7e80525b4a 100644
+--- a/drivers/rtc/nvmem.c
++++ b/drivers/rtc/nvmem.c
+@@ -21,6 +21,7 @@ int devm_rtc_nvmem_register(struct rtc_device *rtc,
+
+ nvmem_config->dev = dev;
+ nvmem_config->owner = rtc->owner;
++ nvmem_config->add_legacy_fixed_of_cells = true;
+ nvmem = devm_nvmem_register(dev, nvmem_config);
+ if (IS_ERR(nvmem))
+ dev_err(dev, "failed to register nvmem device for RTC\n");
+diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
+index fde2b8054c2ea8..1298962402ff47 100644
+--- a/drivers/rtc/rtc-abx80x.c
++++ b/drivers/rtc/rtc-abx80x.c
+@@ -705,14 +705,18 @@ static int abx80x_nvmem_xfer(struct abx80x_priv *priv, unsigned int offset,
+ if (ret)
+ return ret;
+
+- if (write)
++ if (write) {
+ ret = i2c_smbus_write_i2c_block_data(priv->client, reg,
+ len, val);
+- else
++ if (ret)
++ return ret;
++ } else {
+ ret = i2c_smbus_read_i2c_block_data(priv->client, reg,
+ len, val);
+- if (ret)
+- return ret;
++ if (ret <= 0)
++ return ret ? ret : -EIO;
++ len = ret;
++ }
+
+ offset += len;
+ val += len;
+diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
+index f93bee96e36233..993c0878fb6606 100644
+--- a/drivers/rtc/rtc-at91sam9.c
++++ b/drivers/rtc/rtc-at91sam9.c
+@@ -368,6 +368,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
+ return ret;
+
+ rtc->gpbr = syscon_node_to_regmap(args.np);
++ of_node_put(args.np);
+ rtc->gpbr_offset = args.args[0];
+ if (IS_ERR(rtc->gpbr)) {
+ dev_err(&pdev->dev, "failed to retrieve gpbr regmap, aborting.\n");
+diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
+index 3cdc015692ca63..1a65a4e0dc0035 100644
+--- a/drivers/rtc/rtc-brcmstb-waketimer.c
++++ b/drivers/rtc/rtc-brcmstb-waketimer.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright © 2014-2017 Broadcom
++ * Copyright © 2014-2023 Broadcom
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -34,6 +34,7 @@ struct brcmstb_waketmr {
+ u32 rate;
+ unsigned long rtc_alarm;
+ bool alarm_en;
++ bool alarm_expired;
+ };
+
+ #define BRCMSTB_WKTMR_EVENT 0x00
+@@ -64,6 +65,11 @@ static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer)
+ writel_relaxed(reg - 1, timer->base + BRCMSTB_WKTMR_ALARM);
+ writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT);
+ (void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
++ if (timer->alarm_expired) {
++ timer->alarm_expired = false;
++ /* maintain call balance */
++ enable_irq(timer->alarm_irq);
++ }
+ }
+
+ static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer,
+@@ -105,10 +111,17 @@ static irqreturn_t brcmstb_alarm_irq(int irq, void *data)
+ return IRQ_HANDLED;
+
+ if (timer->alarm_en) {
+- if (!device_may_wakeup(timer->dev))
++ if (device_may_wakeup(timer->dev)) {
++ disable_irq_nosync(irq);
++ timer->alarm_expired = true;
++ } else {
+ writel_relaxed(WKTMR_ALARM_EVENT,
+ timer->base + BRCMSTB_WKTMR_EVENT);
++ }
+ rtc_update_irq(timer->rtc, 1, RTC_IRQF | RTC_AF);
++ } else {
++ writel_relaxed(WKTMR_ALARM_EVENT,
++ timer->base + BRCMSTB_WKTMR_EVENT);
+ }
+
+ return IRQ_HANDLED;
+@@ -221,8 +234,14 @@ static int brcmstb_waketmr_alarm_enable(struct device *dev,
+ !brcmstb_waketmr_is_pending(timer))
+ return -EINVAL;
+ timer->alarm_en = true;
+- if (timer->alarm_irq)
++ if (timer->alarm_irq) {
++ if (timer->alarm_expired) {
++ timer->alarm_expired = false;
++ /* maintain call balance */
++ enable_irq(timer->alarm_irq);
++ }
+ enable_irq(timer->alarm_irq);
++ }
+ } else if (!enabled && timer->alarm_en) {
+ if (timer->alarm_irq)
+ disable_irq(timer->alarm_irq);
+@@ -352,6 +371,17 @@ static int brcmstb_waketmr_suspend(struct device *dev)
+ return brcmstb_waketmr_prepare_suspend(timer);
+ }
+
++static int brcmstb_waketmr_suspend_noirq(struct device *dev)
++{
++ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
++
++ /* Catch any alarms occurring prior to noirq */
++ if (timer->alarm_expired && device_may_wakeup(dev))
++ return -EBUSY;
++
++ return 0;
++}
++
+ static int brcmstb_waketmr_resume(struct device *dev)
+ {
+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+@@ -368,10 +398,17 @@ static int brcmstb_waketmr_resume(struct device *dev)
+
+ return ret;
+ }
++#else
++#define brcmstb_waketmr_suspend NULL
++#define brcmstb_waketmr_suspend_noirq NULL
++#define brcmstb_waketmr_resume NULL
+ #endif /* CONFIG_PM_SLEEP */
+
+-static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops,
+- brcmstb_waketmr_suspend, brcmstb_waketmr_resume);
++static const struct dev_pm_ops brcmstb_waketmr_pm_ops = {
++ .suspend = brcmstb_waketmr_suspend,
++ .suspend_noirq = brcmstb_waketmr_suspend_noirq,
++ .resume = brcmstb_waketmr_resume,
++};
+
+ static const __maybe_unused struct of_device_id brcmstb_waketmr_of_match[] = {
+ { .compatible = "brcm,brcmstb-waketimer" },
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 228fb2d11c7091..35dca2accbb8df 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -231,7 +231,7 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
+ if (!pm_trace_rtc_valid())
+ return -EIO;
+
+- ret = mc146818_get_time(t);
++ ret = mc146818_get_time(t, 1000);
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "unable to read current time\n");
+ return ret;
+@@ -292,7 +292,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+
+ /* This not only a rtc_op, but also called directly */
+ if (!is_valid_irq(cmos->irq))
+- return -EIO;
++ return -ETIMEDOUT;
+
+ /* Basic alarms only support hour, minute, and seconds fields.
+ * Some also support day and month, for alarms up to a year in
+@@ -307,7 +307,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+ *
+ * Use the mc146818_avoid_UIP() function to avoid this.
+ */
+- if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p))
++ if (!mc146818_avoid_UIP(cmos_read_alarm_callback, 10, &p))
+ return -EIO;
+
+ if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+@@ -556,8 +556,8 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+ *
+ * Use mc146818_avoid_UIP() to avoid this.
+ */
+- if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p))
+- return -EIO;
++ if (!mc146818_avoid_UIP(cmos_set_alarm_callback, 10, &p))
++ return -ETIMEDOUT;
+
+ cmos->alarm_expires = rtc_tm_to_time64(&t->time);
+
+@@ -643,11 +643,10 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val,
+ size_t count)
+ {
+ unsigned char *buf = val;
+- int retval;
+
+ off += NVRAM_OFFSET;
+ spin_lock_irq(&rtc_lock);
+- for (retval = 0; count; count--, off++, retval++) {
++ for (; count; count--, off++) {
+ if (off < 128)
+ *buf++ = CMOS_READ(off);
+ else if (can_bank2)
+@@ -657,7 +656,7 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val,
+ }
+ spin_unlock_irq(&rtc_lock);
+
+- return retval;
++ return count ? -EIO : 0;
+ }
+
+ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+@@ -665,7 +664,6 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+ {
+ struct cmos_rtc *cmos = priv;
+ unsigned char *buf = val;
+- int retval;
+
+ /* NOTE: on at least PCs and Ataris, the boot firmware uses a
+ * checksum on part of the NVRAM data. That's currently ignored
+@@ -674,7 +672,7 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+ */
+ off += NVRAM_OFFSET;
+ spin_lock_irq(&rtc_lock);
+- for (retval = 0; count; count--, off++, retval++) {
++ for (; count; count--, off++) {
+ /* don't trash RTC registers */
+ if (off == cmos->day_alrm
+ || off == cmos->mon_alrm
+@@ -689,7 +687,7 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
+ }
+ spin_unlock_irq(&rtc_lock);
+
+- return retval;
++ return count ? -EIO : 0;
+ }
+
+ /*----------------------------------------------------------------*/
+@@ -818,18 +816,24 @@ static void rtc_wake_off(struct device *dev)
+ }
+
+ #ifdef CONFIG_X86
+-/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
+ static void use_acpi_alarm_quirks(void)
+ {
+- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ if (dmi_get_bios_year() < 2015)
++ return;
++ break;
++ case X86_VENDOR_AMD:
++ case X86_VENDOR_HYGON:
++ if (dmi_get_bios_year() < 2021)
++ return;
++ break;
++ default:
+ return;
+-
++ }
+ if (!is_hpet_enabled())
+ return;
+
+- if (dmi_get_bios_year() < 2015)
+- return;
+-
+ use_acpi_alarm = true;
+ }
+ #else
+diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
+index e50c23ee1646a5..206f96b90f58bc 100644
+--- a/drivers/rtc/rtc-isl1208.c
++++ b/drivers/rtc/rtc-isl1208.c
+@@ -775,14 +775,13 @@ static int isl1208_nvmem_read(void *priv, unsigned int off, void *buf,
+ {
+ struct isl1208_state *isl1208 = priv;
+ struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent);
+- int ret;
+
+ /* nvmem sanitizes offset/count for us, but count==0 is possible */
+ if (!count)
+ return count;
+- ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf,
++
++ return isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf,
+ count);
+- return ret == 0 ? count : ret;
+ }
+
+ static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf,
+@@ -790,15 +789,13 @@ static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf,
+ {
+ struct isl1208_state *isl1208 = priv;
+ struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent);
+- int ret;
+
+ /* nvmem sanitizes off/count for us, but count==0 is possible */
+ if (!count)
+ return count;
+- ret = isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf,
+- count);
+
+- return ret == 0 ? count : ret;
++ return isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf,
++ count);
+ }
+
+ static const struct nvmem_config isl1208_nvmem_config = {
+diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
+index f1c09f1db044c8..651bf3c279c746 100644
+--- a/drivers/rtc/rtc-mc146818-lib.c
++++ b/drivers/rtc/rtc-mc146818-lib.c
+@@ -8,26 +8,31 @@
+ #include <linux/acpi.h>
+ #endif
+
++#define UIP_RECHECK_DELAY 100 /* usec */
++#define UIP_RECHECK_DELAY_MS (USEC_PER_MSEC / UIP_RECHECK_DELAY)
++#define UIP_RECHECK_LOOPS_MS(x) (x / UIP_RECHECK_DELAY_MS)
++
+ /*
+ * Execute a function while the UIP (Update-in-progress) bit of the RTC is
+- * unset.
++ * unset. The timeout is configurable by the caller in ms.
+ *
+ * Warning: callback may be executed more then once.
+ */
+ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
++ int timeout,
+ void *param)
+ {
+ int i;
+ unsigned long flags;
+ unsigned char seconds;
+
+- for (i = 0; i < 100; i++) {
++ for (i = 0; UIP_RECHECK_LOOPS_MS(i) < timeout; i++) {
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ /*
+ * Check whether there is an update in progress during which the
+ * readout is unspecified. The maximum update time is ~2ms. Poll
+- * every 100 usec for completion.
++ * for completion.
+ *
+ * Store the second value before checking UIP so a long lasting
+ * NMI which happens to hit after the UIP check cannot make
+@@ -37,7 +42,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+- udelay(100);
++ udelay(UIP_RECHECK_DELAY);
+ continue;
+ }
+
+@@ -56,7 +61,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ */
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+- udelay(100);
++ udelay(UIP_RECHECK_DELAY);
+ continue;
+ }
+
+@@ -72,6 +77,10 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ }
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
++ if (UIP_RECHECK_LOOPS_MS(i) >= 100)
++ pr_warn("Reading current time from RTC took around %li ms\n",
++ UIP_RECHECK_LOOPS_MS(i));
++
+ return true;
+ }
+ return false;
+@@ -84,7 +93,7 @@ EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
+ */
+ bool mc146818_does_rtc_work(void)
+ {
+- return mc146818_avoid_UIP(NULL, NULL);
++ return mc146818_avoid_UIP(NULL, 1000, NULL);
+ }
+ EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
+
+@@ -130,15 +139,27 @@ static void mc146818_get_time_callback(unsigned char seconds, void *param_in)
+ p->ctrl = CMOS_READ(RTC_CONTROL);
+ }
+
+-int mc146818_get_time(struct rtc_time *time)
++/**
++ * mc146818_get_time - Get the current time from the RTC
++ * @time: pointer to struct rtc_time to store the current time
++ * @timeout: timeout value in ms
++ *
++ * This function reads the current time from the RTC and stores it in the
++ * provided struct rtc_time. The timeout parameter specifies the maximum
++ * time to wait for the RTC to become ready.
++ *
++ * Return: 0 on success, -ETIMEDOUT if the RTC did not become ready within
++ * the specified timeout, or another error code if an error occurred.
++ */
++int mc146818_get_time(struct rtc_time *time, int timeout)
+ {
+ struct mc146818_get_time_callback_param p = {
+ .time = time
+ };
+
+- if (!mc146818_avoid_UIP(mc146818_get_time_callback, &p)) {
++ if (!mc146818_avoid_UIP(mc146818_get_time_callback, timeout, &p)) {
+ memset(time, 0, sizeof(*time));
+- return -EIO;
++ return -ETIMEDOUT;
+ }
+
+ if (!(p.ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
+index ed4e606be8e58d..c4533c0f538967 100644
+--- a/drivers/rtc/rtc-nct3018y.c
++++ b/drivers/rtc/rtc-nct3018y.c
+@@ -99,6 +99,8 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
+ if (flags < 0)
+ return flags;
+ *alarm_enable = flags & NCT3018Y_BIT_AIE;
++ dev_dbg(&client->dev, "%s:alarm_enable:%x\n", __func__, *alarm_enable);
++
+ }
+
+ if (alarm_flag) {
+@@ -107,11 +109,9 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
+ if (flags < 0)
+ return flags;
+ *alarm_flag = flags & NCT3018Y_BIT_AF;
++ dev_dbg(&client->dev, "%s:alarm_flag:%x\n", __func__, *alarm_flag);
+ }
+
+- dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
+- __func__, *alarm_enable, *alarm_flag);
+-
+ return 0;
+ }
+
+diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
+index 06194674d71c57..540042b9eec8f5 100644
+--- a/drivers/rtc/rtc-pcf85363.c
++++ b/drivers/rtc/rtc-pcf85363.c
+@@ -438,7 +438,7 @@ static int pcf85363_probe(struct i2c_client *client)
+ if (client->irq > 0 || wakeup_source) {
+ regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
+ regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
+- PIN_IO_INTA_OUT, PIN_IO_INTAPM);
++ PIN_IO_INTAPM, PIN_IO_INTA_OUT);
+ }
+
+ if (client->irq > 0) {
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 215597f73be4f3..0bd880f5475b13 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -8,9 +8,6 @@
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+-#define KMSG_COMPONENT "dasd"
+-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+-
+ #include <linux/kmod.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+@@ -674,18 +671,20 @@ static void dasd_profile_start(struct dasd_block *block,
+ * we count each request only once.
+ */
+ device = cqr->startdev;
+- if (device->profile.data) {
+- counter = 1; /* request is not yet queued on the start device */
+- list_for_each(l, &device->ccw_queue)
+- if (++counter >= 31)
+- break;
+- }
++ if (!device->profile.data)
++ return;
++
++ spin_lock(get_ccwdev_lock(device->cdev));
++ counter = 1; /* request is not yet queued on the start device */
++ list_for_each(l, &device->ccw_queue)
++ if (++counter >= 31)
++ break;
++ spin_unlock(get_ccwdev_lock(device->cdev));
++
+ spin_lock(&device->profile.lock);
+- if (device->profile.data) {
+- device->profile.data->dasd_io_nr_req[counter]++;
+- if (rq_data_dir(req) == READ)
+- device->profile.data->dasd_read_nr_req[counter]++;
+- }
++ device->profile.data->dasd_io_nr_req[counter]++;
++ if (rq_data_dir(req) == READ)
++ device->profile.data->dasd_read_nr_req[counter]++;
+ spin_unlock(&device->profile.lock);
+ }
+
+@@ -1600,9 +1599,15 @@ static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
+ if (!sense)
+ return 0;
+
+- return !!(sense[1] & SNS1_NO_REC_FOUND) ||
+- !!(sense[1] & SNS1_FILE_PROTECTED) ||
+- scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
++ if (sense[1] & SNS1_NO_REC_FOUND)
++ return 1;
++
++ if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
++ scsw_is_tm(&irb->scsw) &&
++ !(sense[2] & SNS2_ENV_DATA_PRESENT))
++ return 1;
++
++ return 0;
+ }
+
+ static int dasd_ese_oos_cond(u8 *sense)
+@@ -1623,7 +1628,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct dasd_device *device;
+ unsigned long now;
+ int nrf_suppressed = 0;
+- int fp_suppressed = 0;
++ int it_suppressed = 0;
+ struct request *req;
+ u8 *sense = NULL;
+ int expires;
+@@ -1678,8 +1683,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ */
+ sense = dasd_get_sense(irb);
+ if (sense) {
+- fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
+- test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
++ it_suppressed = (sense[1] & SNS1_INV_TRACK_FORMAT) &&
++ !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++ test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+ nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
+ test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+@@ -1694,7 +1700,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ return;
+ }
+ }
+- if (!(fp_suppressed || nrf_suppressed))
++ if (!(it_suppressed || nrf_suppressed))
+ device->discipline->dump_sense_dbf(device, irb, "int");
+
+ if (device->features & DASD_FEATURE_ERPLOG)
+@@ -2466,14 +2472,17 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
+ rc = 0;
+ list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+ /*
+- * In some cases the 'File Protected' or 'Incorrect Length'
+- * error might be expected and error recovery would be
+- * unnecessary in these cases. Check if the according suppress
+- * bit is set.
++ * In some cases certain errors might be expected and
++ * error recovery would be unnecessary in these cases.
++ * Check if the according suppress bit is set.
+ */
+ sense = dasd_get_sense(&cqr->irb);
+- if (sense && sense[1] & SNS1_FILE_PROTECTED &&
+- test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
++ if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
++ !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++ test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags))
++ continue;
++ if (sense && (sense[1] & SNS1_NO_REC_FOUND) &&
++ test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags))
+ continue;
+ if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
+ test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
+@@ -3404,8 +3413,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
+
+ ret = ccw_device_set_online(cdev);
+ if (ret)
+- pr_warn("%s: Setting the DASD online failed with rc=%d\n",
+- dev_name(&cdev->dev), ret);
++ dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
+ }
+
+ /*
+@@ -3492,8 +3500,11 @@ int dasd_generic_set_online(struct ccw_device *cdev,
+ {
+ struct dasd_discipline *discipline;
+ struct dasd_device *device;
++ struct device *dev;
+ int rc;
+
++ dev = &cdev->dev;
++
+ /* first online clears initial online feature flag */
+ dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
+ device = dasd_create_device(cdev);
+@@ -3506,11 +3517,10 @@ int dasd_generic_set_online(struct ccw_device *cdev,
+ /* Try to load the required module. */
+ rc = request_module(DASD_DIAG_MOD);
+ if (rc) {
+- pr_warn("%s Setting the DASD online failed "
+- "because the required module %s "
+- "could not be loaded (rc=%d)\n",
+- dev_name(&cdev->dev), DASD_DIAG_MOD,
+- rc);
++ dev_warn(dev, "Setting the DASD online failed "
++ "because the required module %s "
++ "could not be loaded (rc=%d)\n",
++ DASD_DIAG_MOD, rc);
+ dasd_delete_device(device);
+ return -ENODEV;
+ }
+@@ -3518,8 +3528,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
+ /* Module init could have failed, so check again here after
+ * request_module(). */
+ if (!dasd_diag_discipline_pointer) {
+- pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
+- dev_name(&cdev->dev));
++ dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
+ dasd_delete_device(device);
+ return -ENODEV;
+ }
+@@ -3529,37 +3538,33 @@ int dasd_generic_set_online(struct ccw_device *cdev,
+ dasd_delete_device(device);
+ return -EINVAL;
+ }
++ device->base_discipline = base_discipline;
+ if (!try_module_get(discipline->owner)) {
+- module_put(base_discipline->owner);
+ dasd_delete_device(device);
+ return -EINVAL;
+ }
+- device->base_discipline = base_discipline;
+ device->discipline = discipline;
+
+ /* check_device will allocate block device if necessary */
+ rc = discipline->check_device(device);
+ if (rc) {
+- pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
+- dev_name(&cdev->dev), discipline->name, rc);
+- module_put(discipline->owner);
+- module_put(base_discipline->owner);
++ dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
++ discipline->name, rc);
+ dasd_delete_device(device);
+ return rc;
+ }
+
+ dasd_set_target_state(device, DASD_STATE_ONLINE);
+ if (device->state <= DASD_STATE_KNOWN) {
+- pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
+- dev_name(&cdev->dev));
++ dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
+ rc = -ENODEV;
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ if (device->block)
+ dasd_free_block(device->block);
+ dasd_delete_device(device);
+- } else
+- pr_debug("dasd_generic device %s found\n",
+- dev_name(&cdev->dev));
++ } else {
++ dev_dbg(dev, "dasd_generic device found\n");
++ }
+
+ wait_event(dasd_init_waitq, _wait_for_device(device));
+
+@@ -3570,10 +3575,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
+
+ int dasd_generic_set_offline(struct ccw_device *cdev)
+ {
++ int max_count, open_count, rc;
+ struct dasd_device *device;
+ struct dasd_block *block;
+- int max_count, open_count, rc;
+ unsigned long flags;
++ struct device *dev;
++
++ dev = &cdev->dev;
+
+ rc = 0;
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+@@ -3594,11 +3602,10 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
+ open_count = atomic_read(&device->block->open_count);
+ if (open_count > max_count) {
+ if (open_count > 0)
+- pr_warn("%s: The DASD cannot be set offline with open count %i\n",
+- dev_name(&cdev->dev), open_count);
++ dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
++ open_count);
+ else
+- pr_warn("%s: The DASD cannot be set offline while it is in use\n",
+- dev_name(&cdev->dev));
++ dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
+ rc = -EBUSY;
+ goto out_err;
+ }
+@@ -3958,8 +3965,8 @@ static int dasd_handle_autoquiesce(struct dasd_device *device,
+ if (dasd_eer_enabled(device))
+ dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
+
+- pr_info("%s: The DASD has been put in the quiesce state\n",
+- dev_name(&device->cdev->dev));
++ dev_info(&device->cdev->dev,
++ "The DASD has been put in the quiesce state\n");
+ dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
+
+ if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
+diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
+index 89957bb7244d26..07f886029a358c 100644
+--- a/drivers/s390/block/dasd_3990_erp.c
++++ b/drivers/s390/block/dasd_3990_erp.c
+@@ -1406,14 +1406,8 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+
+ struct dasd_device *device = erp->startdev;
+
+- /*
+- * In some cases the 'File Protected' error might be expected and
+- * log messages shouldn't be written then.
+- * Check if the according suppress bit is set.
+- */
+- if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
+- dev_err(&device->cdev->dev,
+- "Accessing the DASD failed because of a hardware error\n");
++ dev_err(&device->cdev->dev,
++ "Accessing the DASD failed because of a hardware error\n");
+
+ return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+
+diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
+index c4e36650c42649..91522dba9fd989 100644
+--- a/drivers/s390/block/dasd_devmap.c
++++ b/drivers/s390/block/dasd_devmap.c
+@@ -2258,13 +2258,19 @@ static ssize_t dasd_copy_pair_store(struct device *dev,
+
+ /* allocate primary devmap if needed */
+ prim_devmap = dasd_find_busid(prim_busid);
+- if (IS_ERR(prim_devmap))
++ if (IS_ERR(prim_devmap)) {
+ prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT);
++ if (IS_ERR(prim_devmap))
++ return PTR_ERR(prim_devmap);
++ }
+
+ /* allocate secondary devmap if needed */
+ sec_devmap = dasd_find_busid(sec_busid);
+- if (IS_ERR(sec_devmap))
++ if (IS_ERR(sec_devmap)) {
+ sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT);
++ if (IS_ERR(sec_devmap))
++ return PTR_ERR(sec_devmap);
++ }
+
+ /* setting copy relation is only allowed for offline secondary */
+ if (sec_devmap->device)
+diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
+index 2e4e555b37c332..12db1046aad0f3 100644
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -639,7 +639,6 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+ /* With page sized segments each segment can be translated into one idaw/tidaw */
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+- blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+ }
+
+ static int dasd_diag_pe_handler(struct dasd_device *device,
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index bd89b032968a4b..d9fb7f097b7e53 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -2289,6 +2289,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
+ cqr->status = DASD_CQR_FILLED;
+ /* Set flags to suppress output for expected errors */
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
++ set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+
+ return cqr;
+ }
+@@ -2570,7 +2571,6 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ /* Set flags to suppress output for expected errors */
+- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+
+ return cqr;
+@@ -4146,8 +4146,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
+
+ /* Set flags to suppress output for expected errors */
+ if (dasd_eckd_is_ese(basedev)) {
+- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+- set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ }
+
+@@ -4649,9 +4647,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
+
+ /* Set flags to suppress output for expected errors */
+ if (dasd_eckd_is_ese(basedev)) {
+- set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+- set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
++ set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+ }
+
+ return cqr;
+@@ -5820,36 +5817,32 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
+ {
+ u8 *sense = dasd_get_sense(irb);
+
+- if (scsw_is_tm(&irb->scsw)) {
+- /*
+- * In some cases the 'File Protected' or 'Incorrect Length'
+- * error might be expected and log messages shouldn't be written
+- * then. Check if the according suppress bit is set.
+- */
+- if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
+- test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
+- return;
+- if (scsw_cstat(&irb->scsw) == 0x40 &&
+- test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
+- return;
++ /*
++ * In some cases certain errors might be expected and
++ * log messages shouldn't be written then.
++ * Check if the according suppress bit is set.
++ */
++ if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
++ !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++ test_bit(DASD_CQR_SUPPRESS_IT, &req->flags))
++ return;
+
+- dasd_eckd_dump_sense_tcw(device, req, irb);
+- } else {
+- /*
+- * In some cases the 'Command Reject' or 'No Record Found'
+- * error might be expected and log messages shouldn't be
+- * written then. Check if the according suppress bit is set.
+- */
+- if (sense && sense[0] & SNS0_CMD_REJECT &&
+- test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
+- return;
++ if (sense && sense[0] & SNS0_CMD_REJECT &&
++ test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
++ return;
+
+- if (sense && sense[1] & SNS1_NO_REC_FOUND &&
+- test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
+- return;
++ if (sense && sense[1] & SNS1_NO_REC_FOUND &&
++ test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
++ return;
+
++ if (scsw_cstat(&irb->scsw) == 0x40 &&
++ test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
++ return;
++
++ if (scsw_is_tm(&irb->scsw))
++ dasd_eckd_dump_sense_tcw(device, req, irb);
++ else
+ dasd_eckd_dump_sense_ccw(device, req, irb);
+- }
+ }
+
+ static int dasd_eckd_reload_device(struct dasd_device *device)
+@@ -6895,7 +6888,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
+ /* With page sized segments each segment can be translated into one idaw/tidaw */
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+- blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+ }
+
+ static struct ccw_driver dasd_eckd_driver = {
+diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
+index 8a4dbe9d774113..fa5e070fd0c1cb 100644
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -225,7 +225,7 @@ struct dasd_ccw_req {
+ * The following flags are used to suppress output of certain errors.
+ */
+ #define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */
+-#define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/
++#define DASD_CQR_SUPPRESS_IT 5 /* Suppress 'Invalid Track' error*/
+ #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
+ #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
+
+diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
+index 3a9cc8a4a23022..ade95e91b3c8db 100644
+--- a/drivers/s390/block/scm_blk.c
++++ b/drivers/s390/block/scm_blk.c
+@@ -17,6 +17,7 @@
+ #include <linux/blk-mq.h>
+ #include <linux/slab.h>
+ #include <linux/list.h>
++#include <linux/io.h>
+ #include <asm/eadm.h>
+ #include "scm_blk.h"
+
+@@ -130,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq)
+
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+ msb = &scmrq->aob->msb[i];
+- aidaw = msb->data_addr;
++ aidaw = (u64)phys_to_virt(msb->data_addr);
+
+ if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
+ IS_ALIGNED(aidaw, PAGE_SIZE))
+@@ -195,12 +196,12 @@ static int scm_request_prepare(struct scm_request *scmrq)
+ msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
+ msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
+ msb->flags |= MSB_FLAG_IDA;
+- msb->data_addr = (u64) aidaw;
++ msb->data_addr = (u64)virt_to_phys(aidaw);
+
+ rq_for_each_segment(bv, req, iter) {
+ WARN_ON(bv.bv_offset);
+ msb->blk_count += bv.bv_len >> 12;
+- aidaw->data_addr = (u64) page_address(bv.bv_page);
++ aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
+ aidaw++;
+ }
+
+diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
+index 8f74db689a0c22..6fa0fb35e5210f 100644
+--- a/drivers/s390/char/sclp.c
++++ b/drivers/s390/char/sclp.c
+@@ -1195,7 +1195,8 @@ sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
+ }
+
+ static struct notifier_block sclp_reboot_notifier = {
+- .notifier_call = sclp_reboot_event
++ .notifier_call = sclp_reboot_event,
++ .priority = INT_MIN,
+ };
+
+ static ssize_t con_pages_show(struct device_driver *dev, char *buf)
+@@ -1293,6 +1294,7 @@ sclp_init(void)
+ fail_unregister_reboot_notifier:
+ unregister_reboot_notifier(&sclp_reboot_notifier);
+ fail_init_state_uninitialized:
++ list_del(&sclp_state_change_event.list);
+ sclp_init_state = sclp_init_state_uninitialized;
+ free_page((unsigned long) sclp_read_sccb);
+ free_page((unsigned long) sclp_init_sccb);
+diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c
+index f9e164be7568f2..944e75beb160c6 100644
+--- a/drivers/s390/char/sclp_sd.c
++++ b/drivers/s390/char/sclp_sd.c
+@@ -320,8 +320,14 @@ static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di)
+ &esize);
+ if (rc) {
+ /* Cancel running request if interrupted */
+- if (rc == -ERESTARTSYS)
+- sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL);
++ if (rc == -ERESTARTSYS) {
++ if (sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL)) {
++ pr_warn("Could not stop Store Data request - leaking at least %zu bytes\n",
++ (size_t)dsize * PAGE_SIZE);
++ data = NULL;
++ asce = 0;
++ }
++ }
+ vfree(data);
+ goto out;
+ }
+diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
+index 218ae604f737ff..33b9c968dbcba6 100644
+--- a/drivers/s390/char/sclp_vt220.c
++++ b/drivers/s390/char/sclp_vt220.c
+@@ -319,7 +319,7 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
+ buffer = (void *) ((addr_t) sccb + sccb->header.length);
+
+ if (convertlf) {
+- /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
++ /* Perform Linefeed conversion (0x0a -> 0x0d 0x0a)*/
+ for (from=0, to=0;
+ (from < count) && (to < sclp_vt220_space_left(request));
+ from++) {
+@@ -328,8 +328,8 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
+ /* Perform conversion */
+ if (c == 0x0a) {
+ if (to + 1 < sclp_vt220_space_left(request)) {
+- ((unsigned char *) buffer)[to++] = c;
+ ((unsigned char *) buffer)[to++] = 0x0d;
++ ((unsigned char *) buffer)[to++] = c;
+ } else
+ break;
+
+diff --git a/drivers/s390/cio/cio_inject.c b/drivers/s390/cio/cio_inject.c
+index 8613fa937237bd..a2e771ebae8ebd 100644
+--- a/drivers/s390/cio/cio_inject.c
++++ b/drivers/s390/cio/cio_inject.c
+@@ -95,7 +95,7 @@ static ssize_t crw_inject_write(struct file *file, const char __user *buf,
+ return -EINVAL;
+ }
+
+- buffer = vmemdup_user(buf, lbuf);
++ buffer = memdup_user_nul(buf, lbuf);
+ if (IS_ERR(buffer))
+ return -ENOMEM;
+
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index 4ca5adce91079b..57e0050dbaa538 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
+
+ spin_lock_irq(cdev->ccwlock);
+ ret = ccw_device_online(cdev);
+- spin_unlock_irq(cdev->ccwlock);
+- if (ret == 0)
+- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+- else {
++ if (ret) {
++ spin_unlock_irq(cdev->ccwlock);
+ CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+@@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
+ put_device(&cdev->dev);
+ return ret;
+ }
+- spin_lock_irq(cdev->ccwlock);
++ /* Wait until a final state is reached */
++ while (!dev_fsm_final_state(cdev)) {
++ spin_unlock_irq(cdev->ccwlock);
++ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
++ spin_lock_irq(cdev->ccwlock);
++ }
+ /* Check if online processing was successful */
+ if ((cdev->private->state != DEV_STATE_ONLINE) &&
+ (cdev->private->state != DEV_STATE_W4SENSE)) {
+diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
+index c533d1dadc6bbb..a5dba3829769c7 100644
+--- a/drivers/s390/cio/device_ops.c
++++ b/drivers/s390/cio/device_ops.c
+@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+- if (cdev->private->state == DEV_STATE_VERIFY) {
++ if (cdev->private->state == DEV_STATE_VERIFY ||
++ cdev->private->flags.doverify) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
+@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ }
+ if (cdev->private->state != DEV_STATE_ONLINE ||
+ ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+- !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
+- cdev->private->flags.doverify)
++ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
+ return -EBUSY;
+ ret = cio_set_options (sch, flags);
+ if (ret)
+diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
+index 45f9c0736be4fd..e5f28370a9039d 100644
+--- a/drivers/s390/cio/idset.c
++++ b/drivers/s390/cio/idset.c
+@@ -16,20 +16,21 @@ struct idset {
+ unsigned long bitmap[];
+ };
+
+-static inline unsigned long bitmap_size(int num_ssid, int num_id)
++static inline unsigned long idset_bitmap_size(int num_ssid, int num_id)
+ {
+- return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
++ return bitmap_size(size_mul(num_ssid, num_id));
+ }
+
+ static struct idset *idset_new(int num_ssid, int num_id)
+ {
+ struct idset *set;
+
+- set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
++ set = vmalloc(sizeof(struct idset) +
++ idset_bitmap_size(num_ssid, num_id));
+ if (set) {
+ set->num_ssid = num_ssid;
+ set->num_id = num_id;
+- memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
++ memset(set->bitmap, 0, idset_bitmap_size(num_ssid, num_id));
+ }
+ return set;
+ }
+@@ -41,7 +42,8 @@ void idset_free(struct idset *set)
+
+ void idset_fill(struct idset *set)
+ {
+- memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
++ memset(set->bitmap, 0xff,
++ idset_bitmap_size(set->num_ssid, set->num_id));
+ }
+
+ static inline void idset_add(struct idset *set, int ssid, int id)
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 9cde55730b65a7..ebcb535809882f 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
+ lgr_info_log();
+ }
+
+-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+- int dstat)
++static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
++ int dstat, int dcc)
+ {
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
+
+@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ goto error;
+ if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
+ goto error;
++ if (dcc == 1)
++ return -EAGAIN;
+ if (!(dstat & DEV_STAT_DEV_END))
+ goto error;
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+- return;
++ return 0;
+
+ error:
+ DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
+ DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
++ return -EIO;
+ }
+
+ /* qdio interrupt handler */
+@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ {
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
+- int cstat, dstat;
++ int cstat, dstat, rc, dcc;
+
+ if (!intparm || !irq_ptr) {
+ ccw_device_get_schid(cdev, &schid);
+@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ qdio_irq_check_sense(irq_ptr, irb);
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
++ dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
++ rc = 0;
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_INACTIVE:
+- qdio_establish_handle_irq(irq_ptr, cstat, dstat);
++ rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
+ break;
+ case QDIO_IRQ_STATE_CLEANUP:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ if (cstat || dstat)
+ qdio_handle_activate_check(irq_ptr, intparm, cstat,
+ dstat);
++ else if (dcc == 1)
++ rc = -EAGAIN;
+ break;
+ case QDIO_IRQ_STATE_STOPPED:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
++
++ if (rc == -EAGAIN) {
++ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
++ rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
++ if (!rc)
++ return;
++ DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
++ DBF_ERROR("rc:%4x", rc);
++ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
++ }
++
+ wake_up(&cdev->private->wait_q);
+ }
+
+diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
+index 86993de253451a..a4c5c6736b3107 100644
+--- a/drivers/s390/cio/trace.h
++++ b/drivers/s390/cio/trace.h
+@@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(s390_class_schib,
+ __entry->devno = schib->pmcw.dev;
+ __entry->schib = *schib;
+ __entry->pmcw_ena = schib->pmcw.ena;
+- __entry->pmcw_st = schib->pmcw.ena;
++ __entry->pmcw_st = schib->pmcw.st;
+ __entry->pmcw_dnv = schib->pmcw.dnv;
+ __entry->pmcw_dev = schib->pmcw.dev;
+ __entry->pmcw_lpm = schib->pmcw.lpm;
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index 339812efe82213..93351452184ab5 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1022,6 +1022,10 @@ EXPORT_SYMBOL(ap_driver_unregister);
+
+ void ap_bus_force_rescan(void)
+ {
++ /* Only trigger AP bus scans after the initial scan is done */
++ if (atomic64_read(&ap_scan_bus_count) <= 0)
++ return;
++
+ /* processing a asynchronous bus rescan */
+ del_timer(&ap_config_timer);
+ queue_work(system_long_wq, &ap_scan_work);
+@@ -1094,7 +1098,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+ */
+ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
+ {
+- int a, i, z;
++ unsigned long a, i, z;
+ char *np, sign;
+
+ /* bits needs to be a multiple of 8 */
+@@ -1865,15 +1869,18 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ }
+ /* get it and thus adjust reference counter */
+ get_device(dev);
+- if (decfg)
++ if (decfg) {
+ AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
+ __func__, ac->id, dom);
+- else if (chkstop)
++ } else if (chkstop) {
+ AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
+ __func__, ac->id, dom);
+- else
++ } else {
++ /* nudge the queue's state machine */
++ ap_queue_init_state(aq);
+ AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
+ __func__, ac->id, dom);
++ }
+ goto put_dev_and_continue;
+ }
+ /* handle state changes on already existing queue device */
+@@ -1895,10 +1902,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ } else if (!chkstop && aq->chkstop) {
+ /* checkstop off */
+ aq->chkstop = false;
+- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+- aq->dev_state = AP_DEV_STATE_OPERATING;
+- aq->sm_state = AP_SM_STATE_RESET_START;
+- }
++ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
++ _ap_queue_init_state(aq);
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
+ __func__, ac->id, dom);
+@@ -1922,10 +1927,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
+ } else if (!decfg && !aq->config) {
+ /* config on this queue device */
+ aq->config = true;
+- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+- aq->dev_state = AP_DEV_STATE_OPERATING;
+- aq->sm_state = AP_SM_STATE_RESET_START;
+- }
++ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
++ _ap_queue_init_state(aq);
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
+ __func__, ac->id, dom);
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index be54b070c0316c..3e34912a605066 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -287,6 +287,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+ void ap_queue_prepare_remove(struct ap_queue *aq);
+ void ap_queue_remove(struct ap_queue *aq);
+ void ap_queue_init_state(struct ap_queue *aq);
++void _ap_queue_init_state(struct ap_queue *aq);
+
+ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+ int comp_type, unsigned int functions, int ml);
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index 1336e632adc4a3..2943b2529d3a03 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -1160,14 +1160,19 @@ void ap_queue_remove(struct ap_queue *aq)
+ spin_unlock_bh(&aq->lock);
+ }
+
+-void ap_queue_init_state(struct ap_queue *aq)
++void _ap_queue_init_state(struct ap_queue *aq)
+ {
+- spin_lock_bh(&aq->lock);
+ aq->dev_state = AP_DEV_STATE_OPERATING;
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ aq->last_err_rc = 0;
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
++}
++
++void ap_queue_init_state(struct ap_queue *aq)
++{
++ spin_lock_bh(&aq->lock);
++ _ap_queue_init_state(aq);
+ spin_unlock_bh(&aq->lock);
+ }
+ EXPORT_SYMBOL(ap_queue_init_state);
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index 6cfb6b2340c997..d2ffdf2491da04 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -1369,7 +1369,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ if (rc)
+ break;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+- return -EFAULT;
++ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ break;
+ }
+@@ -1404,7 +1404,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ if (rc)
+ break;
+ if (copy_to_user(ucp, &kcp, sizeof(kcp)))
+- return -EFAULT;
++ rc = -EFAULT;
+ memzero_explicit(&kcp, sizeof(kcp));
+ break;
+ }
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 4db538a5519255..d6ea2fd4c2a02b 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -32,7 +32,8 @@
+
+ #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
+
+-static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
++static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
++static int vfio_ap_mdev_reset_qlist(struct list_head *qlist);
+ static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
+ static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
+ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
+@@ -457,6 +458,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
+ __func__, nisc, isc, q->apqn);
+
++ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
+ status.response_code = AP_RESPONSE_INVALID_GISA;
+ return status;
+ }
+@@ -661,17 +663,23 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
+ * device driver.
+ *
+ * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
++ * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the
++ * guest's AP configuration that are still in the host's AP
++ * configuration.
+ *
+ * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
+ * driver, its APID will be filtered from the guest's APCB. The matrix
+ * structure precludes filtering an individual APQN, so its APID will be
+- * filtered.
++ * filtered. Consequently, all queues associated with the adapter that
++ * are in the host's AP configuration must be reset. If queues are
++ * subsequently made available again to the guest, they should re-appear
++ * in a reset state
+ *
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
+ */
+-static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+- struct ap_matrix_mdev *matrix_mdev)
++static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long *apm_filtered)
+ {
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+@@ -681,6 +689,7 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
+ bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
++ bitmap_clear(apm_filtered, 0, AP_DEVICES);
+
+ /*
+ * Copy the adapters, domains and control domains to the shadow_apcb
+@@ -692,8 +701,9 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+- for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+- for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
++ for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) {
++ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
++ AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
+@@ -705,8 +715,16 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+ apqn = AP_MKQID(apid, apqi);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
+ if (!q || q->reset_status.response_code) {
+- clear_bit_inv(apid,
+- matrix_mdev->shadow_apcb.apm);
++ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
++
++ /*
++ * If the adapter was previously plugged into
++ * the guest, let's let the caller know that
++ * the APID was filtered.
++ */
++ if (test_bit_inv(apid, prev_shadow_apm))
++ set_bit_inv(apid, apm_filtered);
++
+ break;
+ }
+ }
+@@ -808,7 +826,7 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+- vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ vfio_ap_mdev_reset_queues(matrix_mdev);
+ vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+@@ -918,6 +936,47 @@ static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
+ AP_MKQID(apid, apqi));
+ }
+
++static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long apid,
++ struct list_head *qlist)
++{
++ struct vfio_ap_queue *q;
++ unsigned long apqi;
++
++ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) {
++ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
++ if (q)
++ list_add_tail(&q->reset_qnode, qlist);
++ }
++}
++
++static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long apid)
++{
++ struct list_head qlist;
++
++ INIT_LIST_HEAD(&qlist);
++ collect_queues_to_reset(matrix_mdev, apid, &qlist);
++ vfio_ap_mdev_reset_qlist(&qlist);
++}
++
++static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long *apm_reset)
++{
++ struct list_head qlist;
++ unsigned long apid;
++
++ if (bitmap_empty(apm_reset, AP_DEVICES))
++ return 0;
++
++ INIT_LIST_HEAD(&qlist);
++
++ for_each_set_bit_inv(apid, apm_reset, AP_DEVICES)
++ collect_queues_to_reset(matrix_mdev, apid, &qlist);
++
++ return vfio_ap_mdev_reset_qlist(&qlist);
++}
++
+ /**
+ * assign_adapter_store - parses the APID from @buf and sets the
+ * corresponding bit in the mediated matrix device's APM
+@@ -958,7 +1017,7 @@ static ssize_t assign_adapter_store(struct device *dev,
+ {
+ int ret;
+ unsigned long apid;
+- DECLARE_BITMAP(apm_delta, AP_DEVICES);
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&ap_perms_mutex);
+@@ -987,12 +1046,11 @@ static ssize_t assign_adapter_store(struct device *dev,
+ }
+
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+- memset(apm_delta, 0, sizeof(apm_delta));
+- set_bit_inv(apid, apm_delta);
+
+- if (vfio_ap_mdev_filter_matrix(apm_delta,
+- matrix_mdev->matrix.aqm, matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+
+ ret = count;
+ done:
+@@ -1023,11 +1081,12 @@ static struct vfio_ap_queue
+ * adapter was assigned.
+ * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
+ * @apid: the APID of the unassigned adapter.
+- * @qtable: table for storing queues associated with unassigned adapter.
++ * @qlist: list for storing queues associated with unassigned adapter that
++ * need to be reset.
+ */
+ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+- struct ap_queue_table *qtable)
++ struct list_head *qlist)
+ {
+ unsigned long apqi;
+ struct vfio_ap_queue *q;
+@@ -1035,11 +1094,10 @@ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+- if (q && qtable) {
++ if (q && qlist) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+- hash_add(qtable->queues, &q->mdev_qnode,
+- q->apqn);
++ list_add_tail(&q->reset_qnode, qlist);
+ }
+ }
+ }
+@@ -1047,26 +1105,23 @@ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+ {
+- int loop_cursor;
+- struct vfio_ap_queue *q;
+- struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
++ struct vfio_ap_queue *q, *tmpq;
++ struct list_head qlist;
+
+- hash_init(qtable->queues);
+- vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
++ INIT_LIST_HEAD(&qlist);
++ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+- vfio_ap_mdev_reset_queues(qtable);
++ vfio_ap_mdev_reset_qlist(&qlist);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+- hash_del(&q->mdev_qnode);
++ list_del(&q->reset_qnode);
+ }
+-
+- kfree(qtable);
+ }
+
+ /**
+@@ -1167,7 +1222,7 @@ static ssize_t assign_domain_store(struct device *dev,
+ {
+ int ret;
+ unsigned long apqi;
+- DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&ap_perms_mutex);
+@@ -1196,12 +1251,11 @@ static ssize_t assign_domain_store(struct device *dev,
+ }
+
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+- memset(aqm_delta, 0, sizeof(aqm_delta));
+- set_bit_inv(apqi, aqm_delta);
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
+- matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+
+ ret = count;
+ done:
+@@ -1214,7 +1268,7 @@ static DEVICE_ATTR_WO(assign_domain);
+
+ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi,
+- struct ap_queue_table *qtable)
++ struct list_head *qlist)
+ {
+ unsigned long apid;
+ struct vfio_ap_queue *q;
+@@ -1222,11 +1276,10 @@ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+- if (q && qtable) {
++ if (q && qlist) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+- hash_add(qtable->queues, &q->mdev_qnode,
+- q->apqn);
++ list_add_tail(&q->reset_qnode, qlist);
+ }
+ }
+ }
+@@ -1234,26 +1287,23 @@ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+ {
+- int loop_cursor;
+- struct vfio_ap_queue *q;
+- struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
++ struct vfio_ap_queue *q, *tmpq;
++ struct list_head qlist;
+
+- hash_init(qtable->queues);
+- vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
++ INIT_LIST_HEAD(&qlist);
++ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+- vfio_ap_mdev_reset_queues(qtable);
++ vfio_ap_mdev_reset_qlist(&qlist);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+- hash_del(&q->mdev_qnode);
++ list_del(&q->reset_qnode);
+ }
+-
+- kfree(qtable);
+ }
+
+ /**
+@@ -1608,7 +1658,7 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+ get_update_locks_for_kvm(kvm);
+
+ kvm_arch_crypto_clear_masks(kvm);
+- vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ vfio_ap_mdev_reset_queues(matrix_mdev);
+ kvm_put_kvm(kvm);
+ matrix_mdev->kvm = NULL;
+
+@@ -1744,15 +1794,33 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
+ }
+ }
+
+-static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
++static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
+ {
+ int ret = 0, loop_cursor;
+ struct vfio_ap_queue *q;
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode)
++ hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode)
+ vfio_ap_mdev_reset_queue(q);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) {
++ flush_work(&q->reset_work);
++
++ if (q->reset_status.response_code)
++ ret = -EIO;
++ }
++
++ return ret;
++}
++
++static int vfio_ap_mdev_reset_qlist(struct list_head *qlist)
++{
++ int ret = 0;
++ struct vfio_ap_queue *q;
++
++ list_for_each_entry(q, qlist, reset_qnode)
++ vfio_ap_mdev_reset_queue(q);
++
++ list_for_each_entry(q, qlist, reset_qnode) {
+ flush_work(&q->reset_work);
+
+ if (q->reset_status.response_code)
+@@ -1938,7 +2006,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
+ ret = vfio_ap_mdev_get_device_info(arg);
+ break;
+ case VFIO_DEVICE_RESET:
+- ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ ret = vfio_ap_mdev_reset_queues(matrix_mdev);
+ break;
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ ret = vfio_ap_get_irq_info(arg);
+@@ -1976,6 +2044,7 @@ static ssize_t status_show(struct device *dev,
+ {
+ ssize_t nchars = 0;
+ struct vfio_ap_queue *q;
++ unsigned long apid, apqi;
+ struct ap_matrix_mdev *matrix_mdev;
+ struct ap_device *apdev = to_ap_dev(dev);
+
+@@ -1983,8 +2052,21 @@ static ssize_t status_show(struct device *dev,
+ q = dev_get_drvdata(&apdev->device);
+ matrix_mdev = vfio_ap_mdev_for_queue(q);
+
++ /* If the queue is assigned to the matrix mediated device, then
++ * determine whether it is passed through to a guest; otherwise,
++ * indicate that it is unassigned.
++ */
+ if (matrix_mdev) {
+- if (matrix_mdev->kvm)
++ apid = AP_QID_CARD(q->apqn);
++ apqi = AP_QID_QUEUE(q->apqn);
++ /*
++ * If the queue is passed through to the guest, then indicate
++ * that it is in use; otherwise, indicate that it is
++ * merely assigned to a matrix mediated device.
++ */
++ if (matrix_mdev->kvm &&
++ test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
++ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
+ AP_QUEUE_IN_USE);
+ else
+@@ -2070,6 +2152,7 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ {
+ int ret;
+ struct vfio_ap_queue *q;
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+@@ -2091,15 +2174,28 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
+- matrix_mdev->matrix.aqm,
+- matrix_mdev))
++ /*
++ * If we're in the process of handling the adding of adapters or
++ * domains to the host's AP configuration, then let the
++ * vfio_ap device driver's on_scan_complete callback filter the
++ * matrix and update the guest's AP configuration after all of
++ * the new queue devices are probed.
++ */
++ if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) ||
++ !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
++ goto done;
++
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+ }
++
++done:
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
+- return 0;
++ return ret;
+
+ err_remove_group:
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+@@ -2116,26 +2212,40 @@ void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+ q = dev_get_drvdata(&apdev->device);
+ get_update_locks_for_queue(q);
+ matrix_mdev = q->matrix_mdev;
++ apid = AP_QID_CARD(q->apqn);
++ apqi = AP_QID_QUEUE(q->apqn);
+
+ if (matrix_mdev) {
+- vfio_ap_unlink_queue_fr_mdev(q);
+-
+- apid = AP_QID_CARD(q->apqn);
+- apqi = AP_QID_QUEUE(q->apqn);
+-
+- /*
+- * If the queue is assigned to the guest's APCB, then remove
+- * the adapter's APID from the APCB and hot it into the guest.
+- */
++ /* If the queue is assigned to the guest's AP configuration */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
++ /*
++ * Since the queues are defined via a matrix of adapters
++ * and domains, it is not possible to hot unplug a
++ * single queue; so, let's unplug the adapter.
++ */
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apid(matrix_mdev, apid);
++ goto done;
+ }
+ }
+
+- vfio_ap_mdev_reset_queue(q);
+- flush_work(&q->reset_work);
++ /*
++ * If the queue is not in the host's AP configuration, then resetting
++ * it will fail with response code 01, (APQN not valid); so, let's make
++ * sure it is in the host's config.
++ */
++ if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) &&
++ test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) {
++ vfio_ap_mdev_reset_queue(q);
++ flush_work(&q->reset_work);
++ }
++
++done:
++ if (matrix_mdev)
++ vfio_ap_unlink_queue_fr_mdev(q);
++
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ release_update_locks_for_mdev(matrix_mdev);
+@@ -2443,39 +2553,30 @@ void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
+
+ static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+ {
+- bool do_hotplug = false;
+- int filter_domains = 0;
+- int filter_adapters = 0;
+- DECLARE_BITMAP(apm, AP_DEVICES);
+- DECLARE_BITMAP(aqm, AP_DOMAINS);
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
++ bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+- filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
+- matrix_mdev->apm_add, AP_DEVICES);
+- filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
+- matrix_mdev->aqm_add, AP_DOMAINS);
+-
+- if (filter_adapters && filter_domains)
+- do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
+- else if (filter_adapters)
+- do_hotplug |=
+- vfio_ap_mdev_filter_matrix(apm,
+- matrix_mdev->shadow_apcb.aqm,
+- matrix_mdev);
+- else
+- do_hotplug |=
+- vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
+- aqm, matrix_mdev);
++ filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm,
++ matrix_mdev->apm_add, AP_DEVICES);
++ filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm,
++ matrix_mdev->aqm_add, AP_DOMAINS);
++ filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm,
++ matrix_mdev->adm_add, AP_DOMAINS);
+
+- if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
+- AP_DOMAINS))
++ if (filter_adapters || filter_domains)
++ do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
++
++ if (filter_cdoms)
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ }
+diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
+index 88aff8b81f2fc6..98d37aa27044a6 100644
+--- a/drivers/s390/crypto/vfio_ap_private.h
++++ b/drivers/s390/crypto/vfio_ap_private.h
+@@ -133,6 +133,8 @@ struct ap_matrix_mdev {
+ * @apqn: the APQN of the AP queue device
+ * @saved_isc: the guest ISC registered with the GIB interface
+ * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
++ * @reset_qnode: allows the vfio_ap_queue struct to be added to a list of queues
++ * that need to be reset
+ * @reset_status: the status from the last reset of the queue
+ * @reset_work: work to wait for queue reset to complete
+ */
+@@ -143,6 +145,7 @@ struct vfio_ap_queue {
+ #define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+ struct hlist_node mdev_qnode;
++ struct list_head reset_qnode;
+ struct ap_queue_status reset_status;
+ struct work_struct reset_work;
+ };
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index ce04caa7913fb0..357889cc03f0a1 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -579,6 +579,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
+ {
+ if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
+ return NULL;
++ zcrypt_card_get(zc);
+ zcrypt_queue_get(zq);
+ get_device(&zq->queue->ap_dev.device);
+ atomic_add(weight, &zc->load);
+@@ -598,6 +599,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
+ atomic_sub(weight, &zq->load);
+ put_device(&zq->queue->ap_dev.device);
+ zcrypt_queue_put(zq);
++ zcrypt_card_put(zc);
+ module_put(mod);
+ }
+
+diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
+index 4902d45e929ce2..c61e6427384c34 100644
+--- a/drivers/s390/net/Kconfig
++++ b/drivers/s390/net/Kconfig
+@@ -103,10 +103,11 @@ config CCWGROUP
+ config ISM
+ tristate "Support for ISM vPCI Adapter"
+ depends on PCI
++ imply SMC
+ default n
+ help
+ Select this option if you want to use the Internal Shared Memory
+- vPCI Adapter.
++ vPCI Adapter. The adapter can be used with the SMC network protocol.
+
+ To compile as a module choose M. The module name is ism.
+ If unsure, choose N.
+diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
+index 6df7f377d2f90c..622a61f8a3b849 100644
+--- a/drivers/s390/net/ism_drv.c
++++ b/drivers/s390/net/ism_drv.c
+@@ -30,7 +30,6 @@ static const struct pci_device_id ism_device_table[] = {
+ MODULE_DEVICE_TABLE(pci, ism_device_table);
+
+ static debug_info_t *ism_debug_info;
+-static const struct smcd_ops ism_ops;
+
+ #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
+ static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
+@@ -289,32 +288,19 @@ static int ism_read_local_gid(struct ism_dev *ism)
+ return ret;
+ }
+
+-static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
+- u32 vid)
+-{
+- union ism_query_rgid cmd;
+-
+- memset(&cmd, 0, sizeof(cmd));
+- cmd.request.hdr.cmd = ISM_QUERY_RGID;
+- cmd.request.hdr.len = sizeof(cmd.request);
+-
+- cmd.request.rgid = rgid;
+- cmd.request.vlan_valid = vid_valid;
+- cmd.request.vlan_id = vid;
+-
+- return ism_cmd(ism, &cmd);
+-}
+-
+ static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+ {
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+- dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
+- dmb->cpu_addr, dmb->dma_addr);
++ dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
++ DMA_FROM_DEVICE);
++ folio_put(virt_to_folio(dmb->cpu_addr));
+ }
+
+ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+ {
++ struct folio *folio;
+ unsigned long bit;
++ int rc;
+
+ if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
+ return -EINVAL;
+@@ -331,14 +317,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+ test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
+ return -EINVAL;
+
+- dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+- &dmb->dma_addr,
+- GFP_KERNEL | __GFP_NOWARN |
+- __GFP_NOMEMALLOC | __GFP_NORETRY);
+- if (!dmb->cpu_addr)
+- clear_bit(dmb->sba_idx, ism->sba_bitmap);
++ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
++ __GFP_NORETRY, get_order(dmb->dmb_len));
++
++ if (!folio) {
++ rc = -ENOMEM;
++ goto out_bit;
++ }
++
++ dmb->cpu_addr = folio_address(folio);
++ dmb->dma_addr = dma_map_page(&ism->pdev->dev,
++ virt_to_page(dmb->cpu_addr), 0,
++ dmb->dmb_len, DMA_FROM_DEVICE);
++ if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
++ rc = -ENOMEM;
++ goto out_free;
++ }
++
++ return 0;
+
+- return dmb->cpu_addr ? 0 : -ENOMEM;
++out_free:
++ kfree(dmb->cpu_addr);
++out_bit:
++ clear_bit(dmb->sba_idx, ism->sba_bitmap);
++ return rc;
+ }
+
+ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
+@@ -429,23 +431,6 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
+ return ism_cmd(ism, &cmd);
+ }
+
+-static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
+- u32 event_code, u64 info)
+-{
+- union ism_sig_ieq cmd;
+-
+- memset(&cmd, 0, sizeof(cmd));
+- cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+- cmd.request.hdr.len = sizeof(cmd.request);
+-
+- cmd.request.rgid = rgid;
+- cmd.request.trigger_irq = trigger_irq;
+- cmd.request.event_code = event_code;
+- cmd.request.info = info;
+-
+- return ism_cmd(ism, &cmd);
+-}
+-
+ static unsigned int max_bytes(unsigned int start, unsigned int len,
+ unsigned int boundary)
+ {
+@@ -503,14 +488,6 @@ u8 *ism_get_seid(void)
+ }
+ EXPORT_SYMBOL_GPL(ism_get_seid);
+
+-static u16 ism_get_chid(struct ism_dev *ism)
+-{
+- if (!ism || !ism->pdev)
+- return 0;
+-
+- return to_zpci(ism->pdev)->pchid;
+-}
+-
+ static void ism_handle_event(struct ism_dev *ism)
+ {
+ struct ism_event *entry;
+@@ -569,11 +546,6 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+-static u64 ism_get_local_gid(struct ism_dev *ism)
+-{
+- return ism->local_gid;
+-}
+-
+ static int ism_dev_init(struct ism_dev *ism)
+ {
+ struct pci_dev *pdev = ism->pdev;
+@@ -774,6 +746,22 @@ module_exit(ism_exit);
+ /*************************** SMC-D Implementation *****************************/
+
+ #if IS_ENABLED(CONFIG_SMC)
++static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
++ u32 vid)
++{
++ union ism_query_rgid cmd;
++
++ memset(&cmd, 0, sizeof(cmd));
++ cmd.request.hdr.cmd = ISM_QUERY_RGID;
++ cmd.request.hdr.len = sizeof(cmd.request);
++
++ cmd.request.rgid = rgid;
++ cmd.request.vlan_valid = vid_valid;
++ cmd.request.vlan_id = vid;
++
++ return ism_cmd(ism, &cmd);
++}
++
+ static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+ u32 vid)
+ {
+@@ -811,6 +799,23 @@ static int smcd_reset_vlan_required(struct smcd_dev *smcd)
+ return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
+ }
+
++static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
++ u32 event_code, u64 info)
++{
++ union ism_sig_ieq cmd;
++
++ memset(&cmd, 0, sizeof(cmd));
++ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
++ cmd.request.hdr.len = sizeof(cmd.request);
++
++ cmd.request.rgid = rgid;
++ cmd.request.trigger_irq = trigger_irq;
++ cmd.request.event_code = event_code;
++ cmd.request.info = info;
++
++ return ism_cmd(ism, &cmd);
++}
++
+ static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info)
+ {
+@@ -830,11 +835,24 @@ static int smcd_supports_v2(void)
+ SYSTEM_EID.type[0] != '0';
+ }
+
++static u64 ism_get_local_gid(struct ism_dev *ism)
++{
++ return ism->local_gid;
++}
++
+ static u64 smcd_get_local_gid(struct smcd_dev *smcd)
+ {
+ return ism_get_local_gid(smcd->priv);
+ }
+
++static u16 ism_get_chid(struct ism_dev *ism)
++{
++ if (!ism || !ism->pdev)
++ return 0;
++
++ return to_zpci(ism->pdev)->pchid;
++}
++
+ static u16 smcd_get_chid(struct smcd_dev *smcd)
+ {
+ return ism_get_chid(smcd->priv);
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index cd783290bde5ec..f0f3b6272d5b8a 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -364,30 +364,33 @@ static int qeth_cq_init(struct qeth_card *card)
+ return rc;
+ }
+
++static void qeth_free_cq(struct qeth_card *card)
++{
++ if (card->qdio.c_q) {
++ qeth_free_qdio_queue(card->qdio.c_q);
++ card->qdio.c_q = NULL;
++ }
++}
++
+ static int qeth_alloc_cq(struct qeth_card *card)
+ {
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ QETH_CARD_TEXT(card, 2, "cqon");
+- card->qdio.c_q = qeth_alloc_qdio_queue();
+ if (!card->qdio.c_q) {
+- dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+- return -ENOMEM;
++ card->qdio.c_q = qeth_alloc_qdio_queue();
++ if (!card->qdio.c_q) {
++ dev_err(&card->gdev->dev,
++ "Failed to create completion queue\n");
++ return -ENOMEM;
++ }
+ }
+ } else {
+ QETH_CARD_TEXT(card, 2, "nocq");
+- card->qdio.c_q = NULL;
++ qeth_free_cq(card);
+ }
+ return 0;
+ }
+
+-static void qeth_free_cq(struct qeth_card *card)
+-{
+- if (card->qdio.c_q) {
+- qeth_free_qdio_queue(card->qdio.c_q);
+- card->qdio.c_q = NULL;
+- }
+-}
+-
+ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+ int delayed)
+ {
+@@ -1179,6 +1182,20 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
+ }
+ }
+
++/**
++ * qeth_irq() - qeth interrupt handler
++ * @cdev: ccw device
++ * @intparm: expect pointer to iob
++ * @irb: Interruption Response Block
++ *
++ * In the good path:
++ * corresponding qeth channel is locked with last used iob as active_cmd.
++ * But this function is also called for error interrupts.
++ *
++ * Caller ensures that:
++ * Interrupts are disabled; ccw device lock is held;
++ *
++ */
+ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
+ {
+@@ -1220,11 +1237,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
+ }
+
+- qeth_unlock_channel(card, channel);
+-
+ rc = qeth_check_irb_error(card, cdev, irb);
+ if (rc) {
+ /* IO was terminated, free its resources. */
++ qeth_unlock_channel(card, channel);
+ if (iob)
+ qeth_cancel_cmd(iob, rc);
+ return;
+@@ -1268,6 +1284,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ rc = qeth_get_problem(card, cdev, irb);
+ if (rc) {
+ card->read_or_write_problem = 1;
++ qeth_unlock_channel(card, channel);
+ if (iob)
+ qeth_cancel_cmd(iob, rc);
+ qeth_clear_ipacmd_list(card);
+@@ -1276,6 +1293,26 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ }
+ }
+
++ if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) {
++ /* channel command hasn't started: retry.
++ * active_cmd is still set to last iob
++ */
++ QETH_CARD_TEXT(card, 2, "irqcc1");
++ rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob),
++ (addr_t)iob, 0, 0, iob->timeout);
++ if (rc) {
++ QETH_DBF_MESSAGE(2,
++ "ccw retry on %x failed, rc = %i\n",
++ CARD_DEVID(card), rc);
++ QETH_CARD_TEXT_(card, 2, " err%d", rc);
++ qeth_unlock_channel(card, channel);
++ qeth_cancel_cmd(iob, rc);
++ }
++ return;
++ }
++
++ qeth_unlock_channel(card, channel);
++
+ if (iob) {
+ /* sanity check: */
+ if (irb->scsw.cmd.count > iob->length) {
+@@ -2594,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+
+ QETH_CARD_TEXT(card, 2, "allcqdbf");
+
++ /* completion */
++ if (qeth_alloc_cq(card))
++ goto out_err;
++
+ if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
+ QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
+ return 0;
+@@ -2629,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
+ }
+
+- /* completion */
+- if (qeth_alloc_cq(card))
+- goto out_freeoutq;
+-
+ return 0;
+
+ out_freeoutq:
+@@ -2643,6 +2680,8 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ qeth_free_buffer_pool(card);
+ out_buffer_pool:
+ atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
++ qeth_free_cq(card);
++out_err:
+ return -ENOMEM;
+ }
+
+@@ -2650,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
+ {
+ int i, j;
+
++ qeth_free_cq(card);
++
+ if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+ QETH_QDIO_UNINITIALIZED)
+ return;
+
+- qeth_free_cq(card);
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+ if (card->qdio.in_q->bufs[j].rx_skb) {
+ consume_skb(card->qdio.in_q->bufs[j].rx_skb);
+@@ -3708,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
+
+ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
+ {
+- int rc;
+-
+- if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
+- rc = -1;
+- goto out;
+- } else {
+- if (card->options.cq == cq) {
+- rc = 0;
+- goto out;
+- }
+-
+- qeth_free_qdio_queues(card);
+- card->options.cq = cq;
+- rc = 0;
+- }
+-out:
+- return rc;
++ if (card->options.cq == QETH_CQ_NOTAVAILABLE)
++ return -1;
+
++ card->options.cq = cq;
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(qeth_configure_cq);
+
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index b92a32b4b11416..04c64ce0a1ca1a 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
+ if (!recover) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+- continue;
++ } else {
++ /* prepare for recovery */
++ addr->disp_flag = QETH_DISP_ADDR_ADD;
+ }
+- addr->disp_flag = QETH_DISP_ADDR_ADD;
+ }
+
+ mutex_unlock(&card->ip_lock);
+@@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
+ if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+ rc = qeth_l3_register_addr_entry(card, addr);
+
+- if (!rc) {
++ if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
++ /* keep it in the records */
+ addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ } else {
++ /* bad address */
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index 695a57d894cdd0..23bce8995a5575 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -1285,7 +1285,7 @@ source "drivers/scsi/arm/Kconfig"
+
+ config JAZZ_ESP
+ bool "MIPS JAZZ FAS216 SCSI support"
+- depends on MACH_JAZZ && SCSI
++ depends on MACH_JAZZ && SCSI=y
+ select SCSI_SPI_ATTRS
+ help
+ This is the driver for the onboard SCSI host adapter of MIPS Magnum
+diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
+index cea3a79d538e4b..4fcb73b727aa5d 100644
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -1485,6 +1485,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
+ unsigned char **data)
+ {
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
++ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected);
+ int c = *count;
+ unsigned char p = *phase;
+ unsigned char *d = *data;
+@@ -1496,7 +1497,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
+ return -1;
+ }
+
+- NCR5380_to_ncmd(hostdata->connected)->phase = p;
++ ncmd->phase = p;
+
+ if (p & SR_IO) {
+ if (hostdata->read_overruns)
+@@ -1608,45 +1609,44 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
+ * request.
+ */
+
+- if (hostdata->flags & FLAG_DMA_FIXUP) {
+- if (p & SR_IO) {
+- /*
+- * The workaround was to transfer fewer bytes than we
+- * intended to with the pseudo-DMA read function, wait for
+- * the chip to latch the last byte, read it, and then disable
+- * pseudo-DMA mode.
+- *
+- * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
+- * REQ is deasserted when ACK is asserted, and not reasserted
+- * until ACK goes false. Since the NCR5380 won't lower ACK
+- * until DACK is asserted, which won't happen unless we twiddle
+- * the DMA port or we take the NCR5380 out of DMA mode, we
+- * can guarantee that we won't handshake another extra
+- * byte.
+- */
+-
+- if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+- BASR_DRQ, BASR_DRQ, 0) < 0) {
+- result = -1;
+- shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
+- }
+- if (NCR5380_poll_politely(hostdata, STATUS_REG,
+- SR_REQ, 0, 0) < 0) {
+- result = -1;
+- shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
+- }
+- d[*count - 1] = NCR5380_read(INPUT_DATA_REG);
+- } else {
+- /*
+- * Wait for the last byte to be sent. If REQ is being asserted for
+- * the byte we're interested, we'll ACK it and it will go false.
+- */
+- if (NCR5380_poll_politely2(hostdata,
+- BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
+- BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) {
+- result = -1;
+- shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n");
++ if ((hostdata->flags & FLAG_DMA_FIXUP) &&
++ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
++ /*
++ * The workaround was to transfer fewer bytes than we
++ * intended to with the pseudo-DMA receive function, wait for
++ * the chip to latch the last byte, read it, and then disable
++ * DMA mode.
++ *
++ * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
++ * REQ is deasserted when ACK is asserted, and not reasserted
++ * until ACK goes false. Since the NCR5380 won't lower ACK
++ * until DACK is asserted, which won't happen unless we twiddle
++ * the DMA port or we take the NCR5380 out of DMA mode, we
++ * can guarantee that we won't handshake another extra
++ * byte.
++ *
++ * If sending, wait for the last byte to be sent. If REQ is
++ * being asserted for the byte we're interested, we'll ACK it
++ * and it will go false.
++ */
++ if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
++ BASR_DRQ, BASR_DRQ, 0)) {
++ if ((p & SR_IO) &&
++ (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
++ if (!NCR5380_poll_politely(hostdata, STATUS_REG,
++ SR_REQ, 0, 0)) {
++ d[c] = NCR5380_read(INPUT_DATA_REG);
++ --ncmd->this_residual;
++ } else {
++ result = -1;
++ scmd_printk(KERN_ERR, hostdata->connected,
++ "PDMA fixup: !REQ timeout\n");
++ }
+ }
++ } else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) {
++ result = -1;
++ scmd_printk(KERN_ERR, hostdata->connected,
++ "PDMA fixup: DRQ timeout\n");
+ }
+ }
+
+@@ -1807,8 +1807,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ return;
+ case PHASE_MSGIN:
+ len = 1;
++ tmp = 0xff;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
++ if (tmp == 0xff)
++ break;
+ ncmd->message = tmp;
+
+ switch (tmp) {
+@@ -1996,6 +1999,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ break;
+ case PHASE_STATIN:
+ len = 1;
++ tmp = ncmd->status;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
+ ncmd->status = tmp;
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index 73b6ac0c01f549..9b66fa29fb05ca 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -1678,7 +1678,6 @@ struct aac_dev
+ u32 handle_pci_error;
+ bool init_reset;
+ u8 soft_reset_support;
+- u8 use_map_queue;
+ };
+
+ #define aac_adapter_interrupt(dev) \
+@@ -2030,8 +2029,8 @@ struct aac_srb_reply
+ };
+
+ struct aac_srb_unit {
+- struct aac_srb srb;
+ struct aac_srb_reply srb_reply;
++ struct aac_srb srb;
+ };
+
+ /*
+diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
+index bd99c5492b7d49..0f64b024430376 100644
+--- a/drivers/scsi/aacraid/comminit.c
++++ b/drivers/scsi/aacraid/comminit.c
+@@ -642,6 +642,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+
+ if (aac_comm_init(dev)<0){
+ kfree(dev->queues);
++ dev->queues = NULL;
+ return NULL;
+ }
+ /*
+@@ -649,6 +650,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+ */
+ if (aac_fib_setup(dev) < 0) {
+ kfree(dev->queues);
++ dev->queues = NULL;
+ return NULL;
+ }
+
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 013a9a334972eb..25cee03d7f9737 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev)
+ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+ {
+ struct fib *fibptr;
+- u32 blk_tag;
+- int i;
+
+- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+- i = blk_mq_unique_tag_to_tag(blk_tag);
+- fibptr = &dev->fibs[i];
++ fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
+ /*
+ * Null out fields that depend on being zero at the start of
+ * each I/O
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index c4a36c0be527cd..68f4dbcfff4925 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -19,7 +19,6 @@
+
+ #include <linux/compat.h>
+ #include <linux/blkdev.h>
+-#include <linux/blk-mq-pci.h>
+ #include <linux/completion.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+@@ -505,15 +504,6 @@ static int aac_slave_configure(struct scsi_device *sdev)
+ return 0;
+ }
+
+-static void aac_map_queues(struct Scsi_Host *shost)
+-{
+- struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+-
+- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+- aac->pdev, 0);
+- aac->use_map_queue = true;
+-}
+-
+ /**
+ * aac_change_queue_depth - alter queue depths
+ * @sdev: SCSI device we are considering
+@@ -1498,7 +1488,6 @@ static const struct scsi_host_template aac_driver_template = {
+ .bios_param = aac_biosparm,
+ .shost_groups = aac_host_groups,
+ .slave_configure = aac_slave_configure,
+- .map_queues = aac_map_queues,
+ .change_queue_depth = aac_change_queue_depth,
+ .sdev_groups = aac_dev_groups,
+ .eh_abort_handler = aac_eh_abort,
+@@ -1786,8 +1775,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ shost->max_lun = AAC_MAX_LUN;
+
+ pci_set_drvdata(pdev, shost);
+- shost->nr_hw_queues = aac->max_msix;
+- shost->host_tagset = 1;
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+@@ -1919,7 +1906,6 @@ static void aac_remove_one(struct pci_dev *pdev)
+ struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+ aac_cancel_rescan_worker(aac);
+- aac->use_map_queue = false;
+ scsi_remove_host(shost);
+
+ __aac_shutdown(aac);
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 61949f3741886b..11ef58204e96f1 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -493,10 +493,6 @@ static int aac_src_deliver_message(struct fib *fib)
+ #endif
+
+ u16 vector_no;
+- struct scsi_cmnd *scmd;
+- u32 blk_tag;
+- struct Scsi_Host *shost = dev->scsi_host_ptr;
+- struct blk_mq_queue_map *qmap;
+
+ atomic_inc(&q->numpending);
+
+@@ -509,25 +505,8 @@ static int aac_src_deliver_message(struct fib *fib)
+ if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
+ && dev->sa_firmware)
+ vector_no = aac_get_vector(dev);
+- else {
+- if (!fib->vector_no || !fib->callback_data) {
+- if (shost && dev->use_map_queue) {
+- qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+- vector_no = qmap->mq_map[raw_smp_processor_id()];
+- }
+- /*
+- * We hardcode the vector_no for
+- * reserved commands as a valid shost is
+- * absent during the init
+- */
+- else
+- vector_no = 0;
+- } else {
+- scmd = (struct scsi_cmnd *)fib->callback_data;
+- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+- vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
+- }
+- }
++ else
++ vector_no = fib->vector_no;
+
+ if (native_hba) {
+ if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
+diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
+index ed8d9319862a5a..3819d559ebbb42 100644
+--- a/drivers/scsi/arcmsr/arcmsr.h
++++ b/drivers/scsi/arcmsr/arcmsr.h
+@@ -78,9 +78,13 @@ struct device_attribute;
+ #ifndef PCI_DEVICE_ID_ARECA_1203
+ #define PCI_DEVICE_ID_ARECA_1203 0x1203
+ #endif
++#ifndef PCI_DEVICE_ID_ARECA_1883
++#define PCI_DEVICE_ID_ARECA_1883 0x1883
++#endif
+ #ifndef PCI_DEVICE_ID_ARECA_1884
+ #define PCI_DEVICE_ID_ARECA_1884 0x1884
+ #endif
++#define PCI_DEVICE_ID_ARECA_1886_0 0x1886
+ #define PCI_DEVICE_ID_ARECA_1886 0x188A
+ #define ARCMSR_HOURS (1000 * 60 * 60 * 4)
+ #define ARCMSR_MINUTES (1000 * 60 * 60)
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index a66221c3b72f82..01fb1396e1a924 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -214,8 +214,12 @@ static struct pci_device_id arcmsr_device_id_table[] = {
+ .driver_data = ACB_ADAPTER_TYPE_A},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
+ .driver_data = ACB_ADAPTER_TYPE_C},
++ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1883),
++ .driver_data = ACB_ADAPTER_TYPE_C},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
+ .driver_data = ACB_ADAPTER_TYPE_E},
++ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886_0),
++ .driver_data = ACB_ADAPTER_TYPE_F},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
+ .driver_data = ACB_ADAPTER_TYPE_F},
+ {0, 0}, /* Terminating entry */
+@@ -4706,9 +4710,11 @@ static const char *arcmsr_info(struct Scsi_Host *host)
+ case PCI_DEVICE_ID_ARECA_1680:
+ case PCI_DEVICE_ID_ARECA_1681:
+ case PCI_DEVICE_ID_ARECA_1880:
++ case PCI_DEVICE_ID_ARECA_1883:
+ case PCI_DEVICE_ID_ARECA_1884:
+ type = "SAS/SATA";
+ break;
++ case PCI_DEVICE_ID_ARECA_1886_0:
+ case PCI_DEVICE_ID_ARECA_1886:
+ type = "NVMe/SAS/SATA";
+ break;
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index e48f14ad6dfd89..06acb5ff609ee7 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -2710,6 +2710,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
+ kfree(pwrb_context->pwrb_handle_base);
+ kfree(pwrb_context->pwrb_handle_basestd);
+ }
++ kfree(phwi_ctxt->be_wrbq);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
+index 7bd2ba1ad4d118..f30fe324e6ecc1 100644
+--- a/drivers/scsi/bfa/bfa.h
++++ b/drivers/scsi/bfa/bfa.h
+@@ -20,7 +20,6 @@
+ struct bfa_s;
+
+ typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
+-typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
+
+ /*
+ * Interrupt message handlers
+@@ -437,4 +436,12 @@ struct bfa_cb_pending_q_s {
+ (__qe)->data = (__data); \
+ } while (0)
+
++#define bfa_pending_q_init_status(__qe, __cbfn, __cbarg, __data) do { \
++ bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
++ (__qe)->hcb_qe.cbfn_status = (__cbfn); \
++ (__qe)->hcb_qe.cbarg = (__cbarg); \
++ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
++ (__qe)->data = (__data); \
++} while (0)
++
+ #endif /* __BFA_H__ */
+diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
+index 6846ca8f7313c3..3438d0b8ba0624 100644
+--- a/drivers/scsi/bfa/bfa_core.c
++++ b/drivers/scsi/bfa/bfa_core.c
+@@ -1907,15 +1907,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
+ struct list_head *qe;
+ struct list_head *qen;
+ struct bfa_cb_qe_s *hcb_qe;
+- bfa_cb_cbfn_status_t cbfn;
+
+ list_for_each_safe(qe, qen, comp_q) {
+ hcb_qe = (struct bfa_cb_qe_s *) qe;
+ if (hcb_qe->pre_rmv) {
+ /* qe is invalid after return, dequeue before cbfn() */
+ list_del(qe);
+- cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+- cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
++ hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status);
+ } else
+ hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+ }
+diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
+index 933a1c3890ff50..5e568d6d7b2610 100644
+--- a/drivers/scsi/bfa/bfa_ioc.h
++++ b/drivers/scsi/bfa/bfa_ioc.h
+@@ -361,14 +361,18 @@ struct bfa_reqq_wait_s {
+ void *cbarg;
+ };
+
+-typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
++typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
++typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
+
+ /*
+ * Generic BFA callback element.
+ */
+ struct bfa_cb_qe_s {
+ struct list_head qe;
+- bfa_cb_cbfn_t cbfn;
++ union {
++ bfa_cb_cbfn_status_t cbfn_status;
++ bfa_cb_cbfn_t cbfn;
++ };
+ bfa_boolean_t once;
+ bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
+ bfa_status_t fw_status; /* to access fw status in comp proc */
+diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
+index 520f9152f3bf2d..54bd11e6d59335 100644
+--- a/drivers/scsi/bfa/bfad_bsg.c
++++ b/drivers/scsi/bfa/bfad_bsg.c
+@@ -2135,8 +2135,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+- &fcomp, &iocmd->stats);
++ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+@@ -2159,7 +2158,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
++ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+@@ -2443,8 +2442,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+
+ init_completion(&fcomp.comp);
+- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+- &fcomp, &iocmd->stats);
++ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+@@ -2474,8 +2472,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+
+ init_completion(&fcomp.comp);
+- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+- &fcomp, NULL);
++ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+@@ -2550,7 +2547,7 @@ bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
+ static void bfad_reset_sdev_bflags(struct bfad_im_port_s *im_port,
+ int lunmask_cfg)
+ {
+- const u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;
++ const blist_flags_t scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;
+ struct bfad_itnim_s *itnim;
+ struct scsi_device *sdev;
+ unsigned long flags;
+diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
+index 52db147d9979da..f6dd077d47c9a7 100644
+--- a/drivers/scsi/bfa/bfad_debugfs.c
++++ b/drivers/scsi/bfa/bfad_debugfs.c
+@@ -250,7 +250,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ unsigned long flags;
+ void *kern_buf;
+
+- kern_buf = memdup_user(buf, nbytes);
++ kern_buf = memdup_user_nul(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+@@ -317,7 +317,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ unsigned long flags;
+ void *kern_buf;
+
+- kern_buf = memdup_user(buf, nbytes);
++ kern_buf = memdup_user_nul(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+index 05ddbb9bb7d8aa..451a58e0fd9691 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+@@ -429,7 +429,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct fcoe_ctlr *ctlr;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_percpu_s *bg;
+- struct sk_buff *tmp_skb;
+
+ interface = container_of(ptype, struct bnx2fc_interface,
+ fcoe_packet_type);
+@@ -441,11 +440,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ goto err;
+ }
+
+- tmp_skb = skb_share_check(skb, GFP_ATOMIC);
+- if (!tmp_skb)
+- goto err;
+-
+- skb = tmp_skb;
++ skb = skb_share_check(skb, GFP_ATOMIC);
++ if (!skb)
++ return -1;
+
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+index 2c246e80c1c4d6..d91659811eb3c5 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+@@ -833,7 +833,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+
+ BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+
+- spin_lock_bh(&tgt->cq_lock);
+ ctx_base_ptr = tgt->ctx_base;
+ tgt->ctx_base = NULL;
+
+@@ -889,7 +888,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ tgt->sq, tgt->sq_dma);
+ tgt->sq = NULL;
+ }
+- spin_unlock_bh(&tgt->cq_lock);
+
+ if (ctx_base_ptr)
+ iounmap(ctx_base_ptr);
+diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
+index c38017b4af9826..e50e93e7fe5a1c 100644
+--- a/drivers/scsi/csiostor/csio_defs.h
++++ b/drivers/scsi/csiostor/csio_defs.h
+@@ -73,7 +73,21 @@ csio_list_deleted(struct list_head *list)
+ #define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
+
+ /* State machine */
+-typedef void (*csio_sm_state_t)(void *, uint32_t);
++struct csio_lnode;
++
++/* State machine evets */
++enum csio_ln_ev {
++ CSIO_LNE_NONE = (uint32_t)0,
++ CSIO_LNE_LINKUP,
++ CSIO_LNE_FAB_INIT_DONE,
++ CSIO_LNE_LINK_DOWN,
++ CSIO_LNE_DOWN_LINK,
++ CSIO_LNE_LOGO,
++ CSIO_LNE_CLOSE,
++ CSIO_LNE_MAX_EVENT,
++};
++
++typedef void (*csio_sm_state_t)(struct csio_lnode *ln, enum csio_ln_ev evt);
+
+ struct csio_sm {
+ struct list_head sm_list;
+@@ -83,7 +97,7 @@ struct csio_sm {
+ static inline void
+ csio_set_state(void *smp, void *state)
+ {
+- ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
++ ((struct csio_sm *)smp)->sm_state = state;
+ }
+
+ static inline void
+diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
+index d5ac9389702327..5b3ffefae476d0 100644
+--- a/drivers/scsi/csiostor/csio_lnode.c
++++ b/drivers/scsi/csiostor/csio_lnode.c
+@@ -1095,7 +1095,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ int
+ csio_is_lnode_ready(struct csio_lnode *ln)
+ {
+- return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
++ return (csio_get_state(ln) == csio_lns_ready);
+ }
+
+ /*****************************************************************************/
+@@ -1366,15 +1366,15 @@ csio_free_fcfinfo(struct kref *kref)
+ void
+ csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
+ {
+- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
++ if (csio_get_state(ln) == csio_lns_uninit) {
+ strcpy(str, "UNINIT");
+ return;
+ }
+- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
++ if (csio_get_state(ln) == csio_lns_ready) {
+ strcpy(str, "READY");
+ return;
+ }
+- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
++ if (csio_get_state(ln) == csio_lns_offline) {
+ strcpy(str, "OFFLINE");
+ return;
+ }
+diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
+index 372a67d122d38f..607698a0f06315 100644
+--- a/drivers/scsi/csiostor/csio_lnode.h
++++ b/drivers/scsi/csiostor/csio_lnode.h
+@@ -53,19 +53,6 @@
+ extern int csio_fcoe_rnodes;
+ extern int csio_fdmi_enable;
+
+-/* State machine evets */
+-enum csio_ln_ev {
+- CSIO_LNE_NONE = (uint32_t)0,
+- CSIO_LNE_LINKUP,
+- CSIO_LNE_FAB_INIT_DONE,
+- CSIO_LNE_LINK_DOWN,
+- CSIO_LNE_DOWN_LINK,
+- CSIO_LNE_LOGO,
+- CSIO_LNE_CLOSE,
+- CSIO_LNE_MAX_EVENT,
+-};
+-
+-
+ struct csio_fcf_info {
+ struct list_head list;
+ uint8_t priority;
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index a226dc1b65d715..4eb0837298d4d2 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state)
+ }
+ }
+
+-static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
+- struct scsi_sense_hdr *sense_hdr)
++static void alua_handle_state_transition(struct scsi_device *sdev)
+ {
+ struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group *pg;
+
++ rcu_read_lock();
++ pg = rcu_dereference(h->pg);
++ if (pg)
++ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
++ rcu_read_unlock();
++ alua_check(sdev, false);
++}
++
++static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
++ struct scsi_sense_hdr *sense_hdr)
++{
+ switch (sense_hdr->sense_key) {
+ case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
+ /*
+ * LUN Not Accessible - ALUA state transition
+ */
+- rcu_read_lock();
+- pg = rcu_dereference(h->pg);
+- if (pg)
+- pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+- rcu_read_unlock();
+- alua_check(sdev, false);
++ alua_handle_state_transition(sdev);
+ return NEEDS_RETRY;
+ }
+ break;
+ case UNIT_ATTENTION:
++ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
++ /*
++ * LUN Not Accessible - ALUA state transition
++ */
++ alua_handle_state_transition(sdev);
++ return NEEDS_RETRY;
++ }
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
+ /*
+ * Power On, Reset, or Bus Device Reset.
+@@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev)
+
+ retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, &sense_hdr);
+- if (sense_hdr.sense_key == NOT_READY &&
++ if ((sense_hdr.sense_key == NOT_READY ||
++ sense_hdr.sense_key == UNIT_ATTENTION) &&
+ sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
+ return SCSI_DH_RETRY;
+ else if (retval)
+diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c
+index 2e83a667901fec..1a7437f4328e87 100644
+--- a/drivers/scsi/elx/libefc/efc_nport.c
++++ b/drivers/scsi/elx/libefc/efc_nport.c
+@@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ spin_lock_irqsave(&efc->lock, flags);
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
+- kref_put(&nport->ref, nport->release);
+ /* Shutdown this NPORT */
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
++ kref_put(&nport->ref, nport->release);
+ break;
+ }
+ }
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index 19eee108db0214..5c8d1ba3f8f3c9 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
+ {
+ struct fcoe_fcf *sel;
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+
+ mutex_lock(&fip->ctlr_mutex);
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = NULL;
+ list_for_each_entry(fcf, &fip->fcfs, list)
+ fcf->flogi_sent = 0;
+
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ sel = fip->sel_fcf;
+
+ if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
+@@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ {
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+- unsigned long flags;
+ u16 old_xid;
+ u8 op;
+ u8 mac[ETH_ALEN];
+@@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
+ op = FIP_DT_FLOGI;
+ if (fip->mode == FIP_MODE_VN2VN)
+ break;
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ kfree_skb(fip->flogi_req);
+ fip->flogi_req = skb;
+ fip->flogi_req_send = 1;
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ schedule_work(&fip->timer_work);
+ return -EINPROGRESS;
+ case ELS_FDISC:
+@@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
+ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ {
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+ int error;
+
+ mutex_lock(&fip->ctlr_mutex);
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+ fcf = fcoe_ctlr_select(fip);
+ if (!fcf || fcf->flogi_sent) {
+@@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ fcoe_ctlr_solicit(fip, NULL);
+ error = fcoe_ctlr_flogi_send_locked(fip);
+ }
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ mutex_unlock(&fip->ctlr_mutex);
+ return error;
+ }
+@@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ {
+ struct fcoe_fcf *fcf;
+- unsigned long flags;
+
+- spin_lock_irqsave(&fip->ctlr_lock, flags);
++ spin_lock_bh(&fip->ctlr_lock);
+ fcf = fip->sel_fcf;
+ if (!fcf || !fip->flogi_req_send)
+ goto unlock;
+@@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+ } else /* XXX */
+ LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+ unlock:
+- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
++ spin_unlock_bh(&fip->ctlr_lock);
+ }
+
+ /**
+diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
+index c4d9ed0d7d753f..2619a2d4f5f143 100644
+--- a/drivers/scsi/fnic/fnic_debugfs.c
++++ b/drivers/scsi/fnic/fnic_debugfs.c
+@@ -52,9 +52,10 @@ int fnic_debugfs_init(void)
+ fc_trc_flag->fnic_trace = 2;
+ fc_trc_flag->fc_trace = 3;
+ fc_trc_flag->fc_clear = 4;
++ return 0;
+ }
+
+- return 0;
++ return -ENOMEM;
+ }
+
+ /*
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 9472b9743aefbe..e4363b8c6ad268 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1565,12 +1565,12 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
+ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
+ {
+ if (!hisi_hba->hw->soft_reset)
+- return -1;
++ return -ENOENT;
+
+ down(&hisi_hba->sem);
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ up(&hisi_hba->sem);
+- return -1;
++ return -EPERM;
+ }
+
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+@@ -1641,7 +1641,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+- if (slot && task->task_proto & SAS_PROTOCOL_SSP) {
++ if (!slot)
++ goto out;
++
++ if (task->task_proto & SAS_PROTOCOL_SSP) {
+ u16 tag = slot->idx;
+ int rc2;
+
+@@ -1688,7 +1691,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
+ rc = hisi_sas_softreset_ata_disk(device);
+ }
+ }
+- } else if (slot && task->task_proto & SAS_PROTOCOL_SMP) {
++ } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ /* SMP */
+ u32 tag = slot->idx;
+ struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
+@@ -1789,7 +1792,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
+ if (dev_is_sata(device)) {
+ struct ata_link *link = &device->sata_dev.ap->link;
+
+- rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
++ rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
+ smp_ata_check_ready_type);
+ } else {
+ msleep(2000);
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index bbb64ee6afd7c4..6b97c066e6631a 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -1606,6 +1606,11 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+ }
+
+ phy->port_id = port_id;
++ spin_lock(&phy->lock);
++ /* Delete timer and set phy_attached atomically */
++ del_timer(&phy->timer);
++ phy->phy_attached = 1;
++ spin_unlock(&phy->lock);
+
+ /*
+ * Call pm_runtime_get_noresume() which pairs with
+@@ -1619,11 +1624,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+
+ res = IRQ_HANDLED;
+
+- spin_lock(&phy->lock);
+- /* Delete timer and set phy_attached atomically */
+- del_timer(&phy->timer);
+- phy->phy_attached = 1;
+- spin_unlock(&phy->lock);
+ end:
+ if (phy->reset_completion)
+ complete(phy->reset_completion);
+@@ -2245,7 +2245,15 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
+- ts->stat = SAS_PROTO_RESPONSE;
++ if (task->ata_task.use_ncq) {
++ struct domain_device *device = task->dev;
++ struct hisi_sas_device *sas_dev = device->lldd_dev;
++
++ sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR;
++ slot->abort = 1;
++ } else {
++ ts->stat = SAS_PROTO_RESPONSE;
++ }
+ } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ ts->residual = trans_tx_fail_type;
+ ts->stat = SAS_DATA_UNDERRUN;
+@@ -3478,7 +3486,7 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
+ int i;
+
+- for (i = 0; i < debugfs_axi_reg.count; i++, databuf++)
++ for (i = 0; i < debugfs_global_reg.count; i++, databuf++)
+ *databuf = hisi_sas_read32(hisi_hba, 4 * i);
+ }
+
+@@ -4865,6 +4873,12 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+ hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ }
+
++static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
++{
++ debugfs_remove_recursive(hisi_hba->debugfs_dir);
++ hisi_hba->debugfs_dir = NULL;
++}
++
+ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ struct device *dev = hisi_hba->dev;
+@@ -4888,18 +4902,13 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (debugfs_alloc_v3_hw(hisi_hba, i)) {
+- debugfs_remove_recursive(hisi_hba->debugfs_dir);
++ debugfs_exit_v3_hw(hisi_hba);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
+ }
+ }
+
+-static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+-{
+- debugfs_remove_recursive(hisi_hba->debugfs_dir);
+-}
+-
+ static int
+ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+@@ -5097,6 +5106,7 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+ {
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
++ struct Scsi_Host *shost = hisi_hba->shost;
+ struct device *dev = hisi_hba->dev;
+ int rc;
+
+@@ -5105,6 +5115,10 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
+ rc = hw_init_v3_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
++ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
++ scsi_unblock_requests(shost);
++ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
++ up(&hisi_hba->sem);
+ return;
+ }
+
+@@ -5147,7 +5161,7 @@ static int _suspend_v3_hw(struct device *device)
+ }
+
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+- return -1;
++ return -EPERM;
+
+ dev_warn(dev, "entering suspend state\n");
+
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index d7f51b84f3c788..445f4a220df3eb 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -353,12 +353,13 @@ static void scsi_host_dev_release(struct device *dev)
+
+ if (shost->shost_state == SHOST_CREATED) {
+ /*
+- * Free the shost_dev device name here if scsi_host_alloc()
+- * and scsi_host_put() have been called but neither
++ * Free the shost_dev device name and remove the proc host dir
++ * here if scsi_host_{alloc,put}() have been called but neither
+ * scsi_host_add() nor scsi_remove_host() has been called.
+ * This avoids that the memory allocated for the shost_dev
+- * name is leaked.
++ * name as well as the proc dir structure are leaked.
+ */
++ scsi_proc_hostdir_rm(shost->hostt);
+ kfree(dev_name(&shost->shost_dev));
+ }
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index af18d20f30794b..49c57a9c110b5f 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
+ {
+ struct Scsi_Host *sh;
+
+- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
++ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *));
+ if (sh == NULL) {
+ dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
+ return -ENOMEM;
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index ce9eb00e2ca04d..c98346e464b484 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -22,7 +22,6 @@
+ #include <linux/bsg-lib.h>
+ #include <asm/firmware.h>
+ #include <asm/irq.h>
+-#include <asm/rtas.h>
+ #include <asm/vio.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -1519,7 +1518,11 @@ static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->l_lock, flags);
+- BUG_ON(list_empty(&queue->free));
++ if (list_empty(&queue->free)) {
++ ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
++ spin_unlock_irqrestore(&queue->l_lock, flags);
++ return NULL;
++ }
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ atomic_set(&evt->free, 0);
+ list_del(&evt->queue_list);
+@@ -1948,9 +1951,15 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+ if (vhost->using_channels) {
+ scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
++ if (!evt)
++ return SCSI_MLQUEUE_HOST_BUSY;
++
+ evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+- } else
++ } else {
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt)
++ return SCSI_MLQUEUE_HOST_BUSY;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+ evt->cmnd = cmnd;
+@@ -2038,6 +2047,11 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
+
+ vhost->aborting_passthru = 1;
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+@@ -2096,6 +2110,10 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
+ goto unlock_out;
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ rc = -ENOMEM;
++ goto unlock_out;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ plogi = &evt->iu.plogi;
+ memset(plogi, 0, sizeof(*plogi));
+@@ -2214,6 +2232,11 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
+ }
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ rc = -ENOMEM;
++ goto out;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.passthru;
+
+@@ -2302,6 +2325,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+ else
+ evt = ibmvfc_get_event(&vhost->crq);
+
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -2505,6 +2533,8 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
+ struct ibmvfc_tmf *tmf;
+
+ evt = ibmvfc_get_event(queue);
++ if (!evt)
++ return NULL;
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+@@ -2561,6 +2591,11 @@ static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
+
+ if (found_evt && vhost->logged_in) {
+ evt = ibmvfc_init_tmf(&queues[i], sdev, type);
++ if (!evt) {
++ spin_unlock(queues[i].q_lock);
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
+ evt->sync_iu = &queues[i].cancel_rsp;
+ ibmvfc_send_event(evt, vhost, default_timeout);
+ list_add_tail(&evt->cancel, &cancelq);
+@@ -2774,6 +2809,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -4032,6 +4071,12 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+@@ -4139,6 +4184,12 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+ kref_get(&tgt->kref);
+ tgt->logo_rcvd = 0;
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+@@ -4215,6 +4266,8 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt)
++ return NULL;
+ ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ mad = &evt->iu.implicit_logout;
+@@ -4242,6 +4295,13 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_done);
++ if (!evt) {
++ vhost->discovery_threads--;
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+@@ -4381,6 +4441,12 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+@@ -4547,6 +4613,14 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
+ vhost->abort_threads++;
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
++ vhost->abort_threads--;
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return;
++ }
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
+
+ evt->tgt = tgt;
+@@ -4597,6 +4671,12 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+@@ -4700,6 +4780,12 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ evt->tgt = tgt;
+ ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+@@ -4872,6 +4958,13 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+ {
+ struct ibmvfc_discover_targets *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.discover_targets;
+@@ -4949,8 +5042,15 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
+ struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ unsigned int num_channels =
+ min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ int i;
+
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ memset(setup_buf, 0, sizeof(*setup_buf));
+ if (num_channels == 0)
+ setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
+@@ -5012,6 +5112,13 @@ static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
+ {
+ struct ibmvfc_channel_enquiry *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_enquiry;
+@@ -5134,6 +5241,12 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+ struct ibmvfc_npiv_login_mad *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+
++ if (!evt) {
++ ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ ibmvfc_gather_partition_info(vhost);
+ ibmvfc_set_login_info(vhost);
+ ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+@@ -5198,6 +5311,12 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+ struct ibmvfc_event *evt;
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+
+ mad = &evt->iu.npiv_logout;
+@@ -5804,7 +5923,7 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
+ irq_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
+- } while (rtas_busy_delay(rc));
++ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ reg_failed:
+ LEAVE;
+ return rc;
+diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
+index a7b3243b471d5b..7162a5029b37a8 100644
+--- a/drivers/scsi/isci/request.c
++++ b/drivers/scsi/isci/request.c
+@@ -3390,7 +3390,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
+ return SCI_FAILURE;
+ }
+
+- return SCI_SUCCESS;
++ return status;
+ }
+
+ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
+index 945adca5e72fda..05be0810b5e31b 100644
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -265,6 +265,11 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
+ if (!fsp->seq_ptr)
+ return -EINVAL;
+
++ if (fsp->state & FC_SRB_ABORT_PENDING) {
++ FC_FCP_DBG(fsp, "abort already pending\n");
++ return -EBUSY;
++ }
++
+ this_cpu_inc(fsp->lp->stats->FcpPktAborts);
+
+ fsp->state |= FC_SRB_ABORT_PENDING;
+@@ -1671,7 +1676,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+- fc_fcp_recovery(fsp, FC_ERROR);
++ fc_fcp_recovery(fsp, FC_TIMED_OUT);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+@@ -1690,11 +1695,12 @@ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
+ fsp->status_code = code;
+ fsp->cdb_status = 0;
+ fsp->io_status = 0;
+- /*
+- * if this fails then we let the scsi command timer fire and
+- * scsi-ml escalate.
+- */
+- fc_fcp_send_abort(fsp);
++ if (!fsp->cmd)
++ /*
++ * Only abort non-scsi commands; otherwise let the
++ * scsi command timer fire and scsi-ml escalate.
++ */
++ fc_fcp_send_abort(fsp);
+ }
+
+ /**
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 9c02c9523c4d4d..ab06e9aeb613e7 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -241,6 +241,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
+ }
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->ptp_rdata = fc_rport_create(lport, remote_fid);
++ if (!lport->ptp_rdata) {
++ printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
++ lport->port_id);
++ mutex_unlock(&lport->disc.disc_mutex);
++ return;
++ }
+ kref_get(&lport->ptp_rdata->kref);
+ lport->ptp_rdata->ids.port_name = remote_wwpn;
+ lport->ptp_rdata->ids.node_name = remote_wwnn;
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 12e2653846e3f0..70891a1e98a017 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -610,15 +610,15 @@ int sas_ata_init(struct domain_device *found_dev)
+
+ rc = ata_sas_tport_add(ata_host->dev, ap);
+ if (rc)
+- goto destroy_port;
++ goto free_port;
+
+ found_dev->sata_dev.ata_host = ata_host;
+ found_dev->sata_dev.ap = ap;
+
+ return 0;
+
+-destroy_port:
+- kfree(ap);
++free_port:
++ ata_port_free(ap);
+ free_host:
+ ata_host_put(ata_host);
+ return rc;
+diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
+index ff7b63b10aeb37..8afd8ce259e27a 100644
+--- a/drivers/scsi/libsas/sas_discover.c
++++ b/drivers/scsi/libsas/sas_discover.c
+@@ -301,7 +301,7 @@ void sas_free_device(struct kref *kref)
+
+ if (dev_is_sata(dev) && dev->sata_dev.ap) {
+ ata_sas_tport_delete(dev->sata_dev.ap);
+- kfree(dev->sata_dev.ap);
++ ata_port_free(dev->sata_dev.ap);
+ ata_host_put(dev->sata_dev.ata_host);
+ dev->sata_dev.ata_host = NULL;
+ dev->sata_dev.ap = NULL;
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index a2204674b6808f..e97f4e01a865a0 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -135,7 +135,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
+
+ static inline void *alloc_smp_req(int size)
+ {
+- u8 *p = kzalloc(size, GFP_KERNEL);
++ u8 *p = kzalloc(ALIGN(size, ARCH_DMA_MINALIGN), GFP_KERNEL);
+ if (p)
+ p[0] = SMP_REQUEST;
+ return p;
+@@ -239,8 +239,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ /* help some expanders that fail to zero sas_address in the 'no
+ * device' case
+ */
+- if (phy->attached_dev_type == SAS_PHY_UNUSED ||
+- phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
++ if (phy->attached_dev_type == SAS_PHY_UNUSED)
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ else
+ memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
+@@ -1621,6 +1620,16 @@ int sas_discover_root_expander(struct domain_device *dev)
+
+ /* ---------- Domain revalidation ---------- */
+
++static void sas_get_sas_addr_and_dev_type(struct smp_disc_resp *disc_resp,
++ u8 *sas_addr,
++ enum sas_device_type *type)
++{
++ memcpy(sas_addr, disc_resp->disc.attached_sas_addr, SAS_ADDR_SIZE);
++ *type = to_dev_type(&disc_resp->disc);
++ if (*type == SAS_PHY_UNUSED)
++ memset(sas_addr, 0, SAS_ADDR_SIZE);
++}
++
+ static int sas_get_phy_discover(struct domain_device *dev,
+ int phy_id, struct smp_disc_resp *disc_resp)
+ {
+@@ -1674,13 +1683,8 @@ int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
+ return -ENOMEM;
+
+ res = sas_get_phy_discover(dev, phy_id, disc_resp);
+- if (res == 0) {
+- memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
+- SAS_ADDR_SIZE);
+- *type = to_dev_type(&disc_resp->disc);
+- if (*type == 0)
+- memset(sas_addr, 0, SAS_ADDR_SIZE);
+- }
++ if (res == 0)
++ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, type);
+ kfree(disc_resp);
+ return res;
+ }
+@@ -1940,6 +1944,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ struct expander_device *ex = &dev->ex_dev;
+ struct ex_phy *phy = &ex->ex_phy[phy_id];
+ enum sas_device_type type = SAS_PHY_UNUSED;
++ struct smp_disc_resp *disc_resp;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ char msg[80] = "";
+ int res;
+@@ -1951,33 +1956,41 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ SAS_ADDR(dev->sas_addr), phy_id, msg);
+
+ memset(sas_addr, 0, SAS_ADDR_SIZE);
+- res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
++ disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
++ if (!disc_resp)
++ return -ENOMEM;
++
++ res = sas_get_phy_discover(dev, phy_id, disc_resp);
+ switch (res) {
+ case SMP_RESP_NO_PHY:
+ phy->phy_state = PHY_NOT_PRESENT;
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+- return res;
++ goto out_free_resp;
+ case SMP_RESP_PHY_VACANT:
+ phy->phy_state = PHY_VACANT;
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+- return res;
++ goto out_free_resp;
+ case SMP_RESP_FUNC_ACC:
+ break;
+ case -ECOMM:
+ break;
+ default:
+- return res;
++ goto out_free_resp;
+ }
+
++ if (res == 0)
++ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, &type);
++
+ if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
+ phy->phy_state = PHY_EMPTY;
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+ /*
+- * Even though the PHY is empty, for convenience we discover
+- * the PHY to update the PHY info, like negotiated linkrate.
++ * Even though the PHY is empty, for convenience we update
++ * the PHY info, like negotiated linkrate.
+ */
+- sas_ex_phy_discover(dev, phy_id);
+- return res;
++ if (res == 0)
++ sas_set_ex_phy(dev, phy_id, disc_resp);
++ goto out_free_resp;
+ } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
+ dev_type_flutter(type, phy->attached_dev_type)) {
+ struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
+@@ -1989,7 +2002,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ action = ", needs recovery";
+ pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
+ SAS_ADDR(dev->sas_addr), phy_id, action);
+- return res;
++ goto out_free_resp;
+ }
+
+ /* we always have to delete the old device when we went here */
+@@ -1998,7 +2011,10 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+ SAS_ADDR(phy->attached_sas_addr));
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
+
+- return sas_discover_new(dev, phy_id);
++ res = sas_discover_new(dev, phy_id);
++out_free_resp:
++ kfree(disc_resp);
++ return res;
+ }
+
+ /**
+diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
+index a6dc7dc07fce37..277e45fed85d63 100644
+--- a/drivers/scsi/libsas/sas_internal.h
++++ b/drivers/scsi/libsas/sas_internal.h
+@@ -133,6 +133,20 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
+ func, dev->parent ? "exp-attached" :
+ "direct-attached",
+ SAS_ADDR(dev->sas_addr), err);
++
++ /*
++ * If the device probe failed, the expander phy attached address
++ * needs to be reset so that the phy will not be treated as flutter
++ * in the next revalidation
++ */
++ if (dev->parent && !dev_is_expander(dev->dev_type)) {
++ struct sas_phy *phy = dev->phy;
++ struct domain_device *parent = dev->parent;
++ struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
++
++ memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
++ }
++
+ sas_unregister_dev(dev->port, dev);
+ }
+
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index af15f7a22d2584..9670cb2bf198e4 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -33,6 +33,7 @@
+ struct lpfc_sli2_slim;
+
+ #define ELX_MODEL_NAME_SIZE 80
++#define ELX_FW_NAME_SIZE 84
+
+ #define LPFC_PCI_DEV_LP 0x1
+ #define LPFC_PCI_DEV_OC 0x2
+@@ -1324,7 +1325,6 @@ struct lpfc_hba {
+ struct timer_list fabric_block_timer;
+ unsigned long bit_flags;
+ atomic_t num_rsrc_err;
+- atomic_t num_cmd_success;
+ unsigned long last_rsrc_error_time;
+ unsigned long last_ramp_down_time;
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+@@ -1429,6 +1429,7 @@ struct lpfc_hba {
+ struct timer_list inactive_vmid_poll;
+
+ /* RAS Support */
++ spinlock_t ras_fwlog_lock; /* do not take while holding another lock */
+ struct lpfc_ras_fwlog ras_fwlog;
+
+ uint32_t iocb_cnt;
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index b1c9107d340836..8123062ec2faff 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -1904,6 +1904,11 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr,
+
+ /* Get transceiver information */
+ rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL);
++ if (!rdp_context) {
++ len = scnprintf(buf, PAGE_SIZE - len,
++ "SPF info NA: alloc failure\n");
++ return len;
++ }
+
+ rc = lpfc_get_sfp_info_wait(phba, rdp_context);
+ if (rc) {
+@@ -5864,9 +5869,9 @@ lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
+ if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
+ return -EINVAL;
+
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ state = phba->ras_fwlog.state;
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ if (state == REG_INPROGRESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
+diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
+index 595dca92e8db5a..0166f86c7b71a0 100644
+--- a/drivers/scsi/lpfc/lpfc_bsg.c
++++ b/drivers/scsi/lpfc/lpfc_bsg.c
+@@ -3169,10 +3169,10 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
+ }
+
+ cmdwqe = &cmdiocbq->wqe;
+- memset(cmdwqe, 0, sizeof(union lpfc_wqe));
++ memset(cmdwqe, 0, sizeof(*cmdwqe));
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rspwqe = &rspiocbq->wqe;
+- memset(rspwqe, 0, sizeof(union lpfc_wqe));
++ memset(rspwqe, 0, sizeof(*rspwqe));
+ }
+
+ INIT_LIST_HEAD(&head);
+@@ -5070,12 +5070,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ /* Current logging state */
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (ras_fwlog->state == ACTIVE)
+ ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
+ else
+ ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
+ ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
+@@ -5132,13 +5132,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
+
+ if (action == LPFC_RASACTION_STOP_LOGGING) {
+ /* Check if already disabled */
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (ras_fwlog->state != ACTIVE) {
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+ rc = -ESRCH;
+ goto ras_job_error;
+ }
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ /* Disable logging */
+ lpfc_ras_stop_fwlog(phba);
+@@ -5149,10 +5149,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
+ * FW-logging with new log-level. Return status
+ * "Logging already Running" to caller.
+ **/
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (ras_fwlog->state != INACTIVE)
+ action_status = -EINPROGRESS;
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ /* Enable logging */
+ rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
+@@ -5268,13 +5268,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
+ goto ras_job_error;
+
+ /* Logging to be stopped before reading */
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (ras_fwlog->state == ACTIVE) {
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+ rc = -EINPROGRESS;
+ goto ras_job_error;
+ }
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+@@ -5409,7 +5409,7 @@ lpfc_get_cgnbuf_info(struct bsg_job *job)
+ struct get_cgnbuf_info_req *cgnbuf_req;
+ struct lpfc_cgn_info *cp;
+ uint8_t *cgn_buff;
+- int size, cinfosz;
++ size_t size, cinfosz;
+ int rc = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index baae1f8279e0cb..1775115239860b 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -1671,6 +1671,18 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ }
+
+ out:
++ /* If the caller wanted a synchronous DA_ID completion, signal the
++ * wait obj and clear flag to reset the vport.
++ */
++ if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
++ if (ndlp->da_id_waitq)
++ wake_up(ndlp->da_id_waitq);
++ }
++
++ spin_lock_irq(&ndlp->lock);
++ ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
++ spin_unlock_irq(&ndlp->lock);
++
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+ return;
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index ea9b42225e629d..20662b4f339eb3 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -2196,12 +2196,12 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
+
+ memset(buffer, 0, size);
+
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+ return -EINVAL;
+ }
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ list_for_each_entry_safe(dmabuf, next,
+ &phba->ras_fwlog.fwlog_buff_list, list) {
+@@ -2252,13 +2252,13 @@ lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
+ int size;
+ int rc = -ENOMEM;
+
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+ rc = -EINVAL;
+ goto out;
+ }
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE,
+ phba->cfg_ras_fwlog_buffsize, &size))
+diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
+index f82615d87c4bbb..f5ae8cc158205c 100644
+--- a/drivers/scsi/lpfc/lpfc_disc.h
++++ b/drivers/scsi/lpfc/lpfc_disc.h
+@@ -90,6 +90,8 @@ enum lpfc_nlp_save_flags {
+ NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
+ /* wait for outstanding LOGO to cmpl */
+ NLP_WAIT_FOR_LOGO = 0x2,
++ /* wait for outstanding DA_ID to finish */
++ NLP_WAIT_FOR_DA_ID = 0x4
+ };
+
+ struct lpfc_nodelist {
+@@ -159,7 +161,12 @@ struct lpfc_nodelist {
+ uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
+ #define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
+ uint32_t nlp_defer_did;
++
++ /* These wait objects are NPIV specific. These IOs must complete
++ * synchronously.
++ */
+ wait_queue_head_t *logo_waitq;
++ wait_queue_head_t *da_id_waitq;
+ };
+
+ struct lpfc_node_rrq {
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 54e47f2682358a..ebe84bb7bb3ddf 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -1119,12 +1119,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ vport->port_state, vport->fc_flag,
+ sp->cmn.priority_tagging, kref_read(&ndlp->kref));
+
+- if (sp->cmn.priority_tagging)
+- vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
+- LPFC_VMID_TYPE_PRIO);
+ /* reinitialize the VMID datastructure before returning */
+ if (lpfc_is_vmid_enabled(phba))
+ lpfc_reinit_vmid(vport);
++ if (sp->cmn.priority_tagging)
++ vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
++ LPFC_VMID_TYPE_PRIO);
+
+ /*
+ * Address a timing race with dev_loss. If dev_loss is active on
+@@ -4432,23 +4432,23 @@ lpfc_els_retry_delay(struct timer_list *t)
+ unsigned long flags;
+ struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
+
++ /* Hold a node reference for outstanding queued work */
++ if (!lpfc_nlp_get(ndlp))
++ return;
++
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
++ lpfc_nlp_put(ndlp);
+ return;
+ }
+
+- /* We need to hold the node by incrementing the reference
+- * count until the queued work is done
+- */
+- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+- if (evtp->evt_arg1) {
+- evtp->evt = LPFC_EVT_ELS_RETRY;
+- list_add_tail(&evtp->evt_listp, &phba->work_list);
+- lpfc_worker_wake_up(phba);
+- }
++ evtp->evt_arg1 = ndlp;
++ evtp->evt = LPFC_EVT_ELS_RETRY;
++ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+- return;
++
++ lpfc_worker_wake_up(phba);
+ }
+
+ /**
+@@ -5228,9 +5228,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ /* ACC to LOGO completes to NPort <nlp_DID> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0109 ACC to LOGO completes to NPort x%x refcnt %d "
+- "Data: x%x x%x x%x\n",
+- ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
+- ndlp->nlp_state, ndlp->nlp_rpi);
++ "last els x%x Data: x%x x%x x%x\n",
++ ndlp->nlp_DID, kref_read(&ndlp->kref),
++ ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state,
++ ndlp->nlp_rpi);
+
+ /* This clause allows the LOGO ACC to complete and free resources
+ * for the Fabric Domain Controller. It does deliberately skip
+@@ -5242,18 +5243,22 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ goto out;
+
+ if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+- /* If PLOGI is being retried, PLOGI completion will cleanup the
+- * node. The NLP_NPR_2B_DISC flag needs to be retained to make
+- * progress on nodes discovered from last RSCN.
+- */
+- if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
+- (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
+- goto out;
+-
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
+ lpfc_unreg_rpi(vport, ndlp);
+
++ /* If came from PRLO, then PRLO_ACC is done.
++ * Start rediscovery now.
++ */
++ if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) {
++ spin_lock_irq(&ndlp->lock);
++ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
++ spin_unlock_irq(&ndlp->lock);
++ ndlp->nlp_prev_state = ndlp->nlp_state;
++ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
++ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
++ }
+ }
++
+ out:
+ /*
+ * The driver received a LOGO from the rport and has ACK'd it.
+@@ -9644,11 +9649,12 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
+ if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
+ continue;
+
+- /* On the ELS ring we can have ELS_REQUESTs or
+- * GEN_REQUESTs waiting for a response.
++ /* On the ELS ring we can have ELS_REQUESTs, ELS_RSPs,
++ * or GEN_REQUESTs waiting for a CQE response.
+ */
+ ulp_command = get_job_cmnd(phba, piocb);
+- if (ulp_command == CMD_ELS_REQUEST64_CR) {
++ if (ulp_command == CMD_ELS_REQUEST64_WQE ||
++ ulp_command == CMD_XMIT_ELS_RSP64_WQE) {
+ list_add_tail(&piocb->dlist, &abort_list);
+
+ /* If the link is down when flushing ELS commands
+@@ -11110,6 +11116,14 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ lpfc_nlp_put(ndlp);
+
+ mempool_free(pmb, phba->mbox_mem_pool);
++
++ /* reinitialize the VMID datastructure before returning.
++ * this is specifically for vport
++ */
++ if (lpfc_is_vmid_enabled(phba))
++ lpfc_reinit_vmid(vport);
++ vport->vmid_flag = vport->phba->pport->vmid_flag;
++
+ return;
+ }
+
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 5154eeaee0ec32..0a01575ab06dd6 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -257,7 +257,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
++ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_worker_wake_up(phba);
++ return;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ } else {
+@@ -275,10 +277,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
+-
+ }
+-
+- return;
+ }
+
+ /**
+@@ -5783,7 +5782,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+ return NULL;
+
+ if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
+- ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
++ ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 9e59c050103d64..416816d74ea1ca 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -7698,6 +7698,9 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+ "NVME" : " "),
+ (phba->nvmet_support ? "NVMET" : " "));
+
++ /* ras_fwlog state */
++ spin_lock_init(&phba->ras_fwlog_lock);
++
+ /* Initialize the IO buffer list used by driver for SLI3 SCSI */
+ spin_lock_init(&phba->scsi_buf_list_get_lock);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+@@ -13051,7 +13054,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
+ rc = request_threaded_irq(eqhdl->irq,
+ &lpfc_sli4_hba_intr_handler,
+ &lpfc_sli4_hba_intr_handler_th,
+- IRQF_ONESHOT, name, eqhdl);
++ 0, name, eqhdl);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0486 MSI-X fast-path (%d) "
+@@ -14725,7 +14728,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
+ int
+ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+ {
+- uint8_t file_name[ELX_MODEL_NAME_SIZE];
++ char file_name[ELX_FW_NAME_SIZE] = {0};
+ int ret;
+ const struct firmware *fw;
+
+@@ -14734,7 +14737,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -EPERM;
+
+- snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
++ scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
+
+ if (fw_upgrade == INT_FW_UPGRADE) {
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index 1eb7f7e60bba55..fe174062e49460 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -748,8 +748,10 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ /* Save the ELS cmd */
+ elsiocb->drvrTimeout = cmd;
+
+- lpfc_sli4_resume_rpi(ndlp,
+- lpfc_mbx_cmpl_resume_rpi, elsiocb);
++ if (lpfc_sli4_resume_rpi(ndlp,
++ lpfc_mbx_cmpl_resume_rpi,
++ elsiocb))
++ kfree(elsiocb);
+ goto out;
+ }
+ }
+@@ -2633,8 +2635,26 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ /* flush the target */
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+
+- /* Treat like rcv logo */
+- lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
++ /* Send PRLO_ACC */
++ spin_lock_irq(&ndlp->lock);
++ ndlp->nlp_flag |= NLP_LOGO_ACC;
++ spin_unlock_irq(&ndlp->lock);
++ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
++
++ /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR.
++ * lpfc_cmpl_els_logo_acc is expected to restart discovery.
++ */
++ ndlp->nlp_last_elscmd = ELS_CMD_PRLO;
++ ndlp->nlp_prev_state = ndlp->nlp_state;
++
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY,
++ "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n",
++ ndlp->nlp_DID, ndlp->nlp_flag,
++ ndlp->nlp_last_elscmd,
++ kref_read(&ndlp->kref));
++
++ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
++
+ return ndlp->nlp_state;
+ }
+
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
+index 96e11a26c297eb..a7479258e80559 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -2614,9 +2614,9 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ /* No concern about the role change on the nvme remoteport.
+ * The transport will update it.
+ */
+- spin_lock_irq(&vport->phba->hbalock);
++ spin_lock_irq(&ndlp->lock);
+ ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
+- spin_unlock_irq(&vport->phba->hbalock);
++ spin_unlock_irq(&ndlp->lock);
+
+ /* Don't let the host nvme transport keep sending keep-alives
+ * on this remoteport. Vport is unloading, no recovery. The
+diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
+index 425328d9c2d80b..d41fea53e41e90 100644
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -1586,7 +1586,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
+ wqe = &nvmewqe->wqe;
+
+ /* Initialize WQE */
+- memset(wqe, 0, sizeof(union lpfc_wqe));
++ memset(wqe, 0, sizeof(*wqe));
+
+ ctx_buf->iocbq->cmd_dmabuf = NULL;
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index d26941b131fdb8..cf506556f3b0bd 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -167,11 +167,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ unsigned long new_queue_depth;
+- unsigned long num_rsrc_err, num_cmd_success;
++ unsigned long num_rsrc_err;
+ int i;
+
+ num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+- num_cmd_success = atomic_read(&phba->num_cmd_success);
+
+ /*
+ * The error and success command counters are global per
+@@ -186,20 +185,16 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ shost_for_each_device(sdev, shost) {
+- new_queue_depth =
+- sdev->queue_depth * num_rsrc_err /
+- (num_rsrc_err + num_cmd_success);
+- if (!new_queue_depth)
+- new_queue_depth = sdev->queue_depth - 1;
++ if (num_rsrc_err >= sdev->queue_depth)
++ new_queue_depth = 1;
+ else
+ new_queue_depth = sdev->queue_depth -
+- new_queue_depth;
++ num_rsrc_err;
+ scsi_change_queue_depth(sdev, new_queue_depth);
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ atomic_set(&phba->num_rsrc_err, 0);
+- atomic_set(&phba->num_cmd_success, 0);
+ }
+
+ /**
+@@ -1918,7 +1913,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+-static int
++static uint32_t
+ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct sli4_sge *sgl, int datasegcnt,
+ struct lpfc_io_buf *lpfc_cmd)
+@@ -1926,8 +1921,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct sli4_sge_diseed *diseed = NULL;
+ dma_addr_t physaddr;
+- int i = 0, num_sge = 0, status;
+- uint32_t reftag;
++ int i = 0, status;
++ uint32_t reftag, num_sge = 0;
+ uint8_t txop, rxop;
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t rc;
+@@ -2099,7 +2094,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+-static int
++static uint32_t
+ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct sli4_sge *sgl, int datacnt, int protcnt,
+ struct lpfc_io_buf *lpfc_cmd)
+@@ -2123,8 +2118,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint32_t rc;
+ #endif
+ uint32_t checking = 1;
+- uint32_t dma_offset = 0;
+- int num_sge = 0, j = 2;
++ uint32_t dma_offset = 0, num_sge = 0;
++ int j = 2;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
+
+ sgpe = scsi_prot_sglist(sc);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 4dfadf254a7271..9cd22588c8eb33 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -1217,9 +1217,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ empty = list_empty(&phba->active_rrq_list);
+ list_add_tail(&rrq->list, &phba->active_rrq_list);
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
++ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (empty)
+ lpfc_worker_wake_up(phba);
+- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return 0;
+ out:
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+@@ -6844,9 +6844,9 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
+ {
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ ras_fwlog->state = INACTIVE;
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ /* Disable FW logging to host memory */
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+@@ -6889,9 +6889,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
+ ras_fwlog->lwpd.virt = NULL;
+ }
+
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ ras_fwlog->state = INACTIVE;
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+ }
+
+ /**
+@@ -6993,9 +6993,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ goto disable_ras;
+ }
+
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ ras_fwlog->state = ACTIVE;
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return;
+@@ -7027,9 +7027,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
+ uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
+ int rc = 0;
+
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ ras_fwlog->state = INACTIVE;
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
+ phba->cfg_ras_fwlog_buffsize);
+@@ -7090,9 +7090,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
+ mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
+ mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+
+- spin_lock_irq(&phba->hbalock);
++ spin_lock_irq(&phba->ras_fwlog_lock);
+ ras_fwlog->state = REG_INPROGRESS;
+- spin_unlock_irq(&phba->hbalock);
++ spin_unlock_irq(&phba->ras_fwlog_lock);
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
+
+@@ -7577,7 +7577,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
+ struct lpfc_sglq *sglq_entry = NULL;
+ struct lpfc_sglq *sglq_entry_next = NULL;
+ struct lpfc_sglq *sglq_entry_first = NULL;
+- int status, total_cnt;
++ int status = 0, total_cnt;
+ int post_cnt = 0, num_posted = 0, block_cnt = 0;
+ int last_xritag = NO_XRI;
+ LIST_HEAD(prep_sgl_list);
+@@ -11369,18 +11369,18 @@ lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
+ unsigned long iflags;
+ struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
+
++ /* Hold a node reference for outstanding queued work */
++ if (!lpfc_nlp_get(ndlp))
++ return;
++
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
++ lpfc_nlp_put(ndlp);
+ return;
+ }
+
+- /* Incrementing the reference count until the queued work is done. */
+- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+- if (!evtp->evt_arg1) {
+- spin_unlock_irqrestore(&phba->hbalock, iflags);
+- return;
+- }
++ evtp->evt_arg1 = ndlp;
+ evtp->evt = LPFC_EVT_RECOVER_PORT;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
+index cf8ba840d0eab6..773e02ae20c374 100644
+--- a/drivers/scsi/lpfc/lpfc_vmid.c
++++ b/drivers/scsi/lpfc/lpfc_vmid.c
+@@ -321,5 +321,6 @@ lpfc_reinit_vmid(struct lpfc_vport *vport)
+ if (!hash_empty(vport->hash_table))
+ hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode)
+ hash_del(&cur->hnode);
++ vport->vmid_flag = 0;
+ write_unlock(&vport->vmid_lock);
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
+index 6c7559cf1a4b65..256ee797adb306 100644
+--- a/drivers/scsi/lpfc/lpfc_vport.c
++++ b/drivers/scsi/lpfc/lpfc_vport.c
+@@ -633,6 +633,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ int rc;
++ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+@@ -683,30 +684,54 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+ lpfc_free_sysfs_attr(vport);
+ lpfc_debugfs_terminate(vport);
+
+- /* Remove FC host to break driver binding. */
+- fc_remove_host(shost);
+- scsi_remove_host(shost);
+-
+ /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ goto skip_logo;
+
++ /* Send the DA_ID and Fabric LOGO to cleanup the NPIV fabric entries. */
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ phba->link_state >= LPFC_LINK_UP &&
+ phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+ if (vport->cfg_enable_da_id) {
+- /* Send DA_ID and wait for a completion. */
++ /* Send DA_ID and wait for a completion. This is best
++ * effort. If the DA_ID fails, likely the fabric will
++ * "leak" NportIDs but at least the driver issued the
++ * command.
++ */
++ ndlp = lpfc_findnode_did(vport, NameServer_DID);
++ if (!ndlp)
++ goto issue_logo;
++
++ spin_lock_irq(&ndlp->lock);
++ ndlp->da_id_waitq = &waitq;
++ ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
++ spin_unlock_irq(&ndlp->lock);
++
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
+- if (rc) {
+- lpfc_printf_log(vport->phba, KERN_WARNING,
+- LOG_VPORT,
+- "1829 CT command failed to "
+- "delete objects on fabric, "
+- "rc %d\n", rc);
++ if (!rc) {
++ wait_event_timeout(waitq,
++ !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
++ msecs_to_jiffies(phba->fc_ratov * 2000));
+ }
++
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
++ "1829 DA_ID issue status %d. "
++ "SFlag x%x NState x%x, NFlag x%x "
++ "Rpi x%x\n",
++ rc, ndlp->save_flags, ndlp->nlp_state,
++ ndlp->nlp_flag, ndlp->nlp_rpi);
++
++ /* Remove the waitq and save_flags. It no
++ * longer matters if the wake happened.
++ */
++ spin_lock_irq(&ndlp->lock);
++ ndlp->da_id_waitq = NULL;
++ ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
++ spin_unlock_irq(&ndlp->lock);
+ }
+
++issue_logo:
+ /*
+ * If the vpi is not registered, then a valid FDISC doesn't
+ * exist and there is no need for a ELS LOGO. Just cleanup
+@@ -730,6 +755,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+
+ skip_logo:
+
++ /* Remove FC host to break driver binding. */
++ fc_remove_host(shost);
++ scsi_remove_host(shost);
++
+ lpfc_cleanup(vport);
+
+ /* Remove scsi host now. The nodes are cleaned up. */
+diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
+index 2e511697fce3e9..2c88ce24d19a7a 100644
+--- a/drivers/scsi/mac_scsi.c
++++ b/drivers/scsi/mac_scsi.c
+@@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup);
+ * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
+ * so bus errors are unavoidable.
+ *
+- * If a MOVE.B instruction faults, we assume that zero bytes were transferred
+- * and simply retry. That assumption probably depends on target behaviour but
+- * seems to hold up okay. The NOP provides synchronization: without it the
+- * fault can sometimes occur after the program counter has moved past the
+- * offending instruction. Post-increment addressing can't be used.
++ * If a MOVE.B instruction faults during a receive operation, we assume the
++ * target sent nothing and try again. That assumption probably depends on
++ * target firmware but it seems to hold up okay. If a fault happens during a
++ * send operation, the target may or may not have seen /ACK and got the byte.
++ * It's uncertain so the whole SCSI command gets retried.
++ *
++ * The NOP is needed for synchronization because the fault address in the
++ * exception stack frame may or may not be the instruction that actually
++ * caused the bus error. Post-increment addressing can't be used.
+ */
+
+ #define MOVE_BYTE(operands) \
+@@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup);
+ ".previous \n" \
+ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+-#define MAC_PDMA_DELAY 32
+-
+ static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
+ {
+ unsigned char *addr = start;
+@@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
+ if (n >= 1) {
+ MOVE_BYTE("%0@,%3@");
+ if (result)
+- goto out;
++ return -1;
+ }
+ if (n >= 1 && ((unsigned long)addr & 1)) {
+ MOVE_BYTE("%0@,%3@");
+ if (result)
+- goto out;
++ return -2;
+ }
+ while (n >= 32)
+ MOVE_16_WORDS("%0@+,%3@");
+ while (n >= 2)
+ MOVE_WORD("%0@+,%3@");
+ if (result)
+- return start - addr; /* Negated to indicate uncertain length */
++ return start - addr - 1; /* Negated to indicate uncertain length */
+ if (n == 1)
+ MOVE_BYTE("%0@,%3@");
+-out:
+ return addr - start;
+ }
+
+@@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value)
+ out_be32(hostdata->io + (CTRL_REG << 4), value);
+ }
+
++static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata)
++{
++ unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */
++ unsigned char basr;
++
++again:
++ basr = NCR5380_read(BUS_AND_STATUS_REG);
++
++ if (!(basr & BASR_PHASE_MATCH))
++ return 1;
++
++ if (basr & BASR_IRQ)
++ return -1;
++
++ if (basr & BASR_DRQ)
++ return 0;
++
++ if (n-- == 0) {
++ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
++ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
++ "%s: DRQ timeout\n", __func__);
++ return -1;
++ }
++
++ NCR5380_poll_politely2(hostdata,
++ BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
++ BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0);
++ goto again;
++}
++
+ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ unsigned char *dst, int len)
+ {
+ u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
+ unsigned char *d = dst;
+- int result = 0;
+
+ hostdata->pdma_residual = len;
+
+- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+- BASR_DRQ | BASR_PHASE_MATCH,
+- BASR_DRQ | BASR_PHASE_MATCH, 0)) {
+- int bytes;
++ while (macscsi_wait_for_drq(hostdata) == 0) {
++ int bytes, chunk_bytes;
+
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
+ CTRL_INTERRUPTS_ENABLE);
+
+- bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
++ chunk_bytes = min(hostdata->pdma_residual, 512);
++ bytes = mac_pdma_recv(s, d, chunk_bytes);
++
++ if (macintosh_config->ident == MAC_MODEL_IIFX)
++ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
+
+ if (bytes > 0) {
+ d += bytes;
+@@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ }
+
+ if (hostdata->pdma_residual == 0)
+- goto out;
++ break;
+
+- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+- BUS_AND_STATUS_REG, BASR_ACK,
+- BASR_ACK, 0) < 0)
+- scmd_printk(KERN_DEBUG, hostdata->connected,
+- "%s: !REQ and !ACK\n", __func__);
+- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+- goto out;
++ if (bytes > 0)
++ continue;
+
+- if (bytes == 0)
+- udelay(MAC_PDMA_DELAY);
++ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
++ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
++ "%s: bus error [%d/%d] (%d/%d)\n",
++ __func__, d - dst, len, bytes, chunk_bytes);
+
+- if (bytes >= 0)
++ if (bytes == 0)
+ continue;
+
+- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+- "%s: bus error (%d/%d)\n", __func__, d - dst, len);
+- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+- result = -1;
+- goto out;
++ if (macscsi_wait_for_drq(hostdata) <= 0)
++ set_host_byte(hostdata->connected, DID_ERROR);
++ break;
+ }
+
+- scmd_printk(KERN_ERR, hostdata->connected,
+- "%s: phase mismatch or !DRQ\n", __func__);
+- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+- result = -1;
+-out:
+- if (macintosh_config->ident == MAC_MODEL_IIFX)
+- write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
+- return result;
++ return 0;
+ }
+
+ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+@@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+ {
+ unsigned char *s = src;
+ u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
+- int result = 0;
+
+ hostdata->pdma_residual = len;
+
+- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+- BASR_DRQ | BASR_PHASE_MATCH,
+- BASR_DRQ | BASR_PHASE_MATCH, 0)) {
+- int bytes;
++ while (macscsi_wait_for_drq(hostdata) == 0) {
++ int bytes, chunk_bytes;
+
+ if (macintosh_config->ident == MAC_MODEL_IIFX)
+ write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
+ CTRL_INTERRUPTS_ENABLE);
+
+- bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
++ chunk_bytes = min(hostdata->pdma_residual, 512);
++ bytes = mac_pdma_send(s, d, chunk_bytes);
++
++ if (macintosh_config->ident == MAC_MODEL_IIFX)
++ write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
+
+ if (bytes > 0) {
+ s += bytes;
+ hostdata->pdma_residual -= bytes;
+ }
+
+- if (hostdata->pdma_residual == 0) {
+- if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
+- TCR_LAST_BYTE_SENT,
+- TCR_LAST_BYTE_SENT,
+- 0) < 0) {
+- scmd_printk(KERN_ERR, hostdata->connected,
+- "%s: Last Byte Sent timeout\n", __func__);
+- result = -1;
+- }
+- goto out;
+- }
++ if (hostdata->pdma_residual == 0)
++ break;
+
+- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+- BUS_AND_STATUS_REG, BASR_ACK,
+- BASR_ACK, 0) < 0)
+- scmd_printk(KERN_DEBUG, hostdata->connected,
+- "%s: !REQ and !ACK\n", __func__);
+- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+- goto out;
++ if (bytes > 0)
++ continue;
+
+- if (bytes == 0)
+- udelay(MAC_PDMA_DELAY);
++ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
++ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
++ "%s: bus error [%d/%d] (%d/%d)\n",
++ __func__, s - src, len, bytes, chunk_bytes);
+
+- if (bytes >= 0)
++ if (bytes == 0)
+ continue;
+
+- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+- "%s: bus error (%d/%d)\n", __func__, s - src, len);
+- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+- result = -1;
+- goto out;
++ if (macscsi_wait_for_drq(hostdata) <= 0)
++ set_host_byte(hostdata->connected, DID_ERROR);
++ break;
+ }
+
+- scmd_printk(KERN_ERR, hostdata->connected,
+- "%s: phase mismatch or !DRQ\n", __func__);
+- NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+- result = -1;
+-out:
+- if (macintosh_config->ident == MAC_MODEL_IIFX)
+- write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
+- return result;
++ return 0;
+ }
+
+ static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index e1aa667dae662d..3d4f13da1ae873 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -263,13 +263,13 @@ u32 megasas_readl(struct megasas_instance *instance,
+ * Fusion registers could intermittently return all zeroes.
+ * This behavior is transient in nature and subsequent reads will
+ * return valid value. As a workaround in driver, retry readl for
+- * upto three times until a non-zero value is read.
++ * up to thirty times until a non-zero value is read.
+ */
+ if (instance->adapter_type == AERO_SERIES) {
+ do {
+ ret_val = readl(addr);
+ i++;
+- } while (ret_val == 0 && i < 3);
++ } while (ret_val == 0 && i < 30);
+ return ret_val;
+ } else {
+ return readl(addr);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 08645a99ad6b3b..8e9e278d04495c 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -223,6 +223,22 @@ static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
+ return rval;
+ }
+
++ if (mrioc->unrecoverable) {
++ dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
++ __func__);
++ return -EFAULT;
++ }
++
++ if (mrioc->reset_in_progress) {
++ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
++ return -EAGAIN;
++ }
++
++ if (mrioc->stop_bsgs) {
++ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
++ return -EAGAIN;
++ }
++
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &pel_enable, sizeof(pel_enable));
+@@ -1329,7 +1345,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
+ if ((mpirep_offset != 0xFF) &&
+ drv_bufs[mpirep_offset].bsg_buf_len) {
+ drv_buf_iter = &drv_bufs[mpirep_offset];
+- drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
++ drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) +
+ mrioc->reply_sz);
+ bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
+
+@@ -1838,10 +1854,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(persistent_id);
+
++/**
++ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
++ * @dev: pointer to embedded device
++ * @attr: sas_ncq_prio_supported attribute descriptor
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' sdev attribute, only works with SATA devices
++ */
++static ssize_t
++sas_ncq_prio_supported_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct scsi_device *sdev = to_scsi_device(dev);
++
++ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
++}
++static DEVICE_ATTR_RO(sas_ncq_prio_supported);
++
++/**
++ * sas_ncq_prio_enable_show - send prioritized io commands to device
++ * @dev: pointer to embedded device
++ * @attr: sas_ncq_prio_enable attribute descriptor
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read/write' sdev attribute, only works with SATA devices
++ */
++static ssize_t
++sas_ncq_prio_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct scsi_device *sdev = to_scsi_device(dev);
++ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
++
++ if (!sdev_priv_data)
++ return 0;
++
++ return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
++}
++
++static ssize_t
++sas_ncq_prio_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct scsi_device *sdev = to_scsi_device(dev);
++ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
++ bool ncq_prio_enable = 0;
++
++ if (kstrtobool(buf, &ncq_prio_enable))
++ return -EINVAL;
++
++ if (!sas_ata_ncq_prio_supported(sdev))
++ return -EINVAL;
++
++ sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
++
++ return strlen(buf);
++}
++static DEVICE_ATTR_RW(sas_ncq_prio_enable);
++
+ static struct attribute *mpi3mr_dev_attrs[] = {
+ &dev_attr_sas_address.attr,
+ &dev_attr_device_handle.attr,
+ &dev_attr_persistent_id.attr,
++ &dev_attr_sas_ncq_prio_supported.attr,
++ &dev_attr_sas_ncq_prio_enable.attr,
+ NULL,
+ };
+
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index f039f1d9864776..0d148c39ebcc98 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -1892,7 +1892,8 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+
+ reply_qid = qidx + 1;
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+- if (!mrioc->pdev->revision)
++ if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
++ !mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ op_reply_q->ci = 0;
+ op_reply_q->ephase = 1;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 89ba015c5d7e8d..7f32619234696f 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -1047,8 +1047,9 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
+- tgtdev->host_exposed && tgtdev->starget &&
+- tgtdev->starget->hostdata) {
++ tgtdev->is_hidden &&
++ tgtdev->host_exposed && tgtdev->starget &&
++ tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_removed = 1;
+ atomic_set(&tgt_priv->block_io, 0);
+@@ -1064,14 +1065,24 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
+ mpi3mr_tgtdev_put(tgtdev);
++ } else if (tgtdev->is_hidden & tgtdev->host_exposed) {
++ dprint_reset(mrioc, "hiding target device with perst_id(%d)\n",
++ tgtdev->perst_id);
++ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ }
+ }
+
+ tgtdev = NULL;
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
+- !tgtdev->is_hidden && !tgtdev->host_exposed)
+- mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
++ !tgtdev->is_hidden) {
++ if (!tgtdev->host_exposed)
++ mpi3mr_report_tgtdev_to_host(mrioc,
++ tgtdev->perst_id);
++ else if (tgtdev->starget)
++ starget_for_each_device(tgtdev->starget,
++ (void *)tgtdev, mpi3mr_update_sdev);
++ }
+ }
+ }
+
+@@ -3436,6 +3447,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
+ scmd->sc_data_direction);
+ priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
+ } else {
++ /*
++ * Some firmware versions byte-swap the REPORT ZONES command
++ * reply from ATA-ZAC devices by directly accessing in the host
++ * buffer. This does not respect the default command DMA
++ * direction and causes IOMMU page faults on some architectures
++ * with an IOMMU enforcing write mappings (e.g. AMD hosts).
++ * Avoid such issue by making the REPORT ZONES buffer mapping
++ * bi-directional.
++ */
++ if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES)
++ scmd->sc_data_direction = DMA_BIDIRECTIONAL;
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ }
+@@ -5084,7 +5106,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
+ MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
+
+- if (pdev->revision)
++ if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
++ !pdev->revision)
++ mrioc->enable_segqueue = false;
++ else
+ mrioc->enable_segqueue = true;
+
+ init_waitqueue_head(&mrioc->reset_waitq);
+@@ -5413,6 +5438,14 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
+ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
+ MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
+ },
++ {
++ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
++ MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID)
++ },
++ {
++ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
++ MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID)
++ },
+ { 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 82b55e95573041..0072bbdb265b8c 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1355,11 +1355,21 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ mr_sas_port->remote_identify.sas_address, hba_port);
+
++ if (mr_sas_node->num_phys > sizeof(mr_sas_port->phy_mask) * 8)
++ ioc_info(mrioc, "max port count %u could be too high\n",
++ mr_sas_node->num_phys);
++
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ mr_sas_port->remote_identify.sas_address) ||
+ (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
++
++ if (i > sizeof(mr_sas_port->phy_mask) * 8) {
++ ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
++ i, sizeof(mr_sas_port->phy_mask) * 8);
++ goto out_fail;
++ }
+ list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 61a32bf00747e6..8acf586dc8b2ed 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -223,8 +223,8 @@ _base_readl_ext_retry(const void __iomem *addr)
+
+ for (i = 0 ; i < 30 ; i++) {
+ ret_val = readl(addr);
+- if (ret_val == 0)
+- continue;
++ if (ret_val != 0)
++ break;
+ }
+
+ return ret_val;
+@@ -2671,6 +2671,22 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+ _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
+ }
+
++static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd)
++{
++ /*
++ * Some firmware versions byte-swap the REPORT ZONES command reply from
++ * ATA-ZAC devices by directly accessing in the host buffer. This does
++ * not respect the default command DMA direction and causes IOMMU page
++ * faults on some architectures with an IOMMU enforcing write mappings
++ * (e.g. AMD hosts). Avoid such issue by making the report zones buffer
++ * mapping bi-directional.
++ */
++ if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES)
++ cmd->sc_data_direction = DMA_BIDIRECTIONAL;
++
++ return scsi_dma_map(cmd);
++}
++
+ /**
+ * _base_build_sg_scmd - main sg creation routine
+ * pcie_device is unused here!
+@@ -2717,7 +2733,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+
+ sg_scmd = scsi_sglist(scmd);
+- sges_left = scsi_dma_map(scmd);
++ sges_left = _base_scsi_dma_map(scmd);
+ if (sges_left < 0)
+ return -ENOMEM;
+
+@@ -2861,7 +2877,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
+ }
+
+ sg_scmd = scsi_sglist(scmd);
+- sges_left = scsi_dma_map(scmd);
++ sges_left = _base_scsi_dma_map(scmd);
+ if (sges_left < 0)
+ return -ENOMEM;
+
+@@ -7387,7 +7403,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
+ return -EFAULT;
+ }
+
+- issue_diag_reset:
++ return 0;
++
++issue_diag_reset:
+ rc = _base_diag_reset(ioc);
+ return rc;
+ }
+@@ -8484,6 +8502,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pd_handles_sz++;
++ /*
++ * pd_handles_sz should have, at least, the minimal room for
++ * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
++ */
++ ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
++
+ ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->pd_handles) {
+@@ -8501,6 +8525,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pend_os_device_add_sz++;
++
++ /*
++ * pend_os_device_add_sz should have, at least, the minimal room for
++ * set_bit()/test_bit(), otherwise out-of-memory may occur.
++ */
++ ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
++ sizeof(unsigned long));
+ ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+ GFP_KERNEL);
+ if (!ioc->pend_os_device_add) {
+@@ -8792,6 +8823,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
+ if (ioc->facts.MaxDevHandle % 8)
+ pd_handles_sz++;
+
++ /*
++ * pd_handles should have, at least, the minimal room for
++ * set_bit()/test_bit(), otherwise out-of-memory touch may
++ * occur.
++ */
++ pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
+ pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
+ GFP_KERNEL);
+ if (!pd_handles) {
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index 1be0850ca17aa1..ae21cc064acf51 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -2045,9 +2045,6 @@ void
+ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
+
+-/* NCQ Prio Handling Check */
+-bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+-
+ void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
+ void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
+ void mpt3sas_init_debugfs(void);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+index efdb8178db3248..e289f18fc76437 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -4034,7 +4034,7 @@ sas_ncq_prio_supported_show(struct device *dev,
+ {
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+- return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
++ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
+ }
+ static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+@@ -4069,7 +4069,7 @@ sas_ncq_prio_enable_store(struct device *dev,
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+- if (!scsih_ncq_prio_supp(sdev))
++ if (!sas_ata_ncq_prio_supported(sdev))
+ return -EINVAL;
+
+ sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 605013d3ee83a4..f270b0d829f6ea 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -12590,29 +12590,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+-/**
+- * scsih_ncq_prio_supp - Check for NCQ command priority support
+- * @sdev: scsi device struct
+- *
+- * This is called when a user indicates they would like to enable
+- * ncq command priorities. This works only on SATA devices.
+- */
+-bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+-{
+- struct scsi_vpd *vpd;
+- bool ncq_prio_supp = false;
+-
+- rcu_read_lock();
+- vpd = rcu_dereference(sdev->vpd_pg89);
+- if (!vpd || vpd->len < 214)
+- goto out;
+-
+- ncq_prio_supp = (vpd->data[213] >> 4) & 1;
+-out:
+- rcu_read_unlock();
+-
+- return ncq_prio_supp;
+-}
+ /*
+ * The pci device ids are defined in mpi/mpi2_cnfg.h.
+ */
+diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
+index ca2e932dd9b701..f684eb5e04898a 100644
+--- a/drivers/scsi/myrb.c
++++ b/drivers/scsi/myrb.c
+@@ -1775,9 +1775,9 @@ static ssize_t raid_state_show(struct device *dev,
+
+ name = myrb_devstate_name(ldev_info->state);
+ if (name)
+- ret = snprintf(buf, 32, "%s\n", name);
++ ret = snprintf(buf, 64, "%s\n", name);
+ else
+- ret = snprintf(buf, 32, "Invalid (%02X)\n",
++ ret = snprintf(buf, 64, "Invalid (%02X)\n",
+ ldev_info->state);
+ } else {
+ struct myrb_pdev_state *pdev_info = sdev->hostdata;
+@@ -1796,9 +1796,9 @@ static ssize_t raid_state_show(struct device *dev,
+ else
+ name = myrb_devstate_name(pdev_info->state);
+ if (name)
+- ret = snprintf(buf, 32, "%s\n", name);
++ ret = snprintf(buf, 64, "%s\n", name);
+ else
+- ret = snprintf(buf, 32, "Invalid (%02X)\n",
++ ret = snprintf(buf, 64, "Invalid (%02X)\n",
+ pdev_info->state);
+ }
+ return ret;
+@@ -1886,11 +1886,11 @@ static ssize_t raid_level_show(struct device *dev,
+
+ name = myrb_raidlevel_name(ldev_info->raid_level);
+ if (!name)
+- return snprintf(buf, 32, "Invalid (%02X)\n",
++ return snprintf(buf, 64, "Invalid (%02X)\n",
+ ldev_info->state);
+- return snprintf(buf, 32, "%s\n", name);
++ return snprintf(buf, 64, "%s\n", name);
+ }
+- return snprintf(buf, 32, "Physical Drive\n");
++ return snprintf(buf, 64, "Physical Drive\n");
+ }
+ static DEVICE_ATTR_RO(raid_level);
+
+@@ -1903,15 +1903,15 @@ static ssize_t rebuild_show(struct device *dev,
+ unsigned char status;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+- return snprintf(buf, 32, "physical device - not rebuilding\n");
++ return snprintf(buf, 64, "physical device - not rebuilding\n");
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+
+ if (rbld_buf.ldev_num != sdev->id ||
+ status != MYRB_STATUS_SUCCESS)
+- return snprintf(buf, 32, "not rebuilding\n");
++ return snprintf(buf, 64, "not rebuilding\n");
+
+- return snprintf(buf, 32, "rebuilding block %u of %u\n",
++ return snprintf(buf, 64, "rebuilding block %u of %u\n",
+ rbld_buf.ldev_size - rbld_buf.blocks_left,
+ rbld_buf.ldev_size);
+ }
+diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
+index a1eec65a9713f5..e824be9d9bbb94 100644
+--- a/drivers/scsi/myrs.c
++++ b/drivers/scsi/myrs.c
+@@ -947,9 +947,9 @@ static ssize_t raid_state_show(struct device *dev,
+
+ name = myrs_devstate_name(ldev_info->dev_state);
+ if (name)
+- ret = snprintf(buf, 32, "%s\n", name);
++ ret = snprintf(buf, 64, "%s\n", name);
+ else
+- ret = snprintf(buf, 32, "Invalid (%02X)\n",
++ ret = snprintf(buf, 64, "Invalid (%02X)\n",
+ ldev_info->dev_state);
+ } else {
+ struct myrs_pdev_info *pdev_info;
+@@ -958,9 +958,9 @@ static ssize_t raid_state_show(struct device *dev,
+ pdev_info = sdev->hostdata;
+ name = myrs_devstate_name(pdev_info->dev_state);
+ if (name)
+- ret = snprintf(buf, 32, "%s\n", name);
++ ret = snprintf(buf, 64, "%s\n", name);
+ else
+- ret = snprintf(buf, 32, "Invalid (%02X)\n",
++ ret = snprintf(buf, 64, "Invalid (%02X)\n",
+ pdev_info->dev_state);
+ }
+ return ret;
+@@ -1066,13 +1066,13 @@ static ssize_t raid_level_show(struct device *dev,
+ ldev_info = sdev->hostdata;
+ name = myrs_raid_level_name(ldev_info->raid_level);
+ if (!name)
+- return snprintf(buf, 32, "Invalid (%02X)\n",
++ return snprintf(buf, 64, "Invalid (%02X)\n",
+ ldev_info->dev_state);
+
+ } else
+ name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
+
+- return snprintf(buf, 32, "%s\n", name);
++ return snprintf(buf, 64, "%s\n", name);
+ }
+ static DEVICE_ATTR_RO(raid_level);
+
+@@ -1086,7 +1086,7 @@ static ssize_t rebuild_show(struct device *dev,
+ unsigned char status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+- return snprintf(buf, 32, "physical device - not rebuilding\n");
++ return snprintf(buf, 64, "physical device - not rebuilding\n");
+
+ ldev_info = sdev->hostdata;
+ ldev_num = ldev_info->ldev_num;
+@@ -1098,11 +1098,11 @@ static ssize_t rebuild_show(struct device *dev,
+ return -EIO;
+ }
+ if (ldev_info->rbld_active) {
+- return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
++ return snprintf(buf, 64, "rebuilding block %zu of %zu\n",
+ (size_t)ldev_info->rbld_lba,
+ (size_t)ldev_info->cfg_devsize);
+ } else
+- return snprintf(buf, 32, "not rebuilding\n");
++ return snprintf(buf, 64, "not rebuilding\n");
+ }
+
+ static ssize_t rebuild_store(struct device *dev,
+@@ -1190,7 +1190,7 @@ static ssize_t consistency_check_show(struct device *dev,
+ unsigned short ldev_num;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+- return snprintf(buf, 32, "physical device - not checking\n");
++ return snprintf(buf, 64, "physical device - not checking\n");
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+@@ -1198,11 +1198,11 @@ static ssize_t consistency_check_show(struct device *dev,
+ ldev_num = ldev_info->ldev_num;
+ myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (ldev_info->cc_active)
+- return snprintf(buf, 32, "checking block %zu of %zu\n",
++ return snprintf(buf, 64, "checking block %zu of %zu\n",
+ (size_t)ldev_info->cc_lba,
+ (size_t)ldev_info->cfg_devsize);
+ else
+- return snprintf(buf, 32, "not checking\n");
++ return snprintf(buf, 64, "not checking\n");
+ }
+
+ static ssize_t consistency_check_store(struct device *dev,
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index 443a3176c6c0c9..c2f6151cbd2d02 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -88,10 +88,12 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+
+- if (pm8001_ha->number_of_intr > 1)
++ if (pm8001_ha->number_of_intr > 1) {
+ blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
++ return;
++ }
+
+- return blk_mq_map_queues(qmap);
++ blk_mq_map_queues(qmap);
+ }
+
+ /*
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index a5a31dfa451228..ee2da8e49d4cfb 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -166,7 +166,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ unsigned long flags;
+ pm8001_ha = sas_phy->ha->lldd_ha;
+ phy = &pm8001_ha->phy[phy_id];
+- pm8001_ha->phy[phy_id].enable_completion = &completion;
+
+ if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
+ /*
+@@ -190,6 +189,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ rates->maximum_linkrate;
+ }
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
++ pm8001_ha->phy[phy_id].enable_completion = &completion;
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+@@ -198,6 +198,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ break;
+ case PHY_FUNC_HARD_RESET:
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
++ pm8001_ha->phy[phy_id].enable_completion = &completion;
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+@@ -206,6 +207,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ break;
+ case PHY_FUNC_LINK_RESET:
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
++ pm8001_ha->phy[phy_id].enable_completion = &completion;
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
+ wait_for_completion(&completion);
+ }
+diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
+index 1619cc33034f21..c5d06d16c490fa 100644
+--- a/drivers/scsi/qedf/qedf.h
++++ b/drivers/scsi/qedf/qedf.h
+@@ -362,6 +362,7 @@ struct qedf_ctx {
+ #define QEDF_IN_RECOVERY 5
+ #define QEDF_DBG_STOP_IO 6
+ #define QEDF_PROBING 8
++#define QEDF_STAG_IN_PROGRESS 9
+ unsigned long flags; /* Miscellaneous state flags */
+ int fipvlan_retries;
+ u8 num_queues;
+diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
+index 451fd236bfd058..96174353e3898f 100644
+--- a/drivers/scsi/qedf/qedf_debugfs.c
++++ b/drivers/scsi/qedf/qedf_debugfs.c
+@@ -170,7 +170,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
+ if (!count || *ppos)
+ return 0;
+
+- kern_buf = memdup_user(buffer, count);
++ kern_buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
+index 10fe3383855c00..031e605b3f4270 100644
+--- a/drivers/scsi/qedf/qedf_io.c
++++ b/drivers/scsi/qedf/qedf_io.c
+@@ -2331,9 +2331,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
+ io_req->fcport = fcport;
+ io_req->cmd_type = QEDF_TASK_MGMT_CMD;
+
+- /* Record which cpu this request is associated with */
+- io_req->cpu = smp_processor_id();
+-
+ /* Set TM flags */
+ io_req->io_req_flags = QEDF_READ;
+ io_req->data_xfer_len = 0;
+@@ -2355,6 +2352,9 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
+
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+
++ /* Record which cpu this request is associated with */
++ io_req->cpu = smp_processor_id();
++
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index 91f3f1d7098eb5..14625e6bc88246 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
+ */
+ if (resp == fc_lport_flogi_resp) {
+ qedf->flogi_cnt++;
++ qedf->flogi_pending++;
++
++ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
++ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
++ qedf->flogi_pending = 0;
++ }
++
+ if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+ schedule_delayed_work(&qedf->stag_work, 2);
+ return NULL;
+ }
+- qedf->flogi_pending++;
++
+ return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
+ arg, timeout);
+ }
+@@ -911,13 +918,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ struct qedf_ctx *qedf;
+ struct qed_link_output if_link;
+
++ qedf = lport_priv(lport);
++
+ if (lport->vport) {
++ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ printk_ratelimited("Cannot issue host reset on NPIV port.\n");
+ return;
+ }
+
+- qedf = lport_priv(lport);
+-
+ qedf->flogi_pending = 0;
+ /* For host reset, essentially do a soft link up/down */
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+@@ -937,6 +945,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ if (!if_link.link_up) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Physical link is not up.\n");
++ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ return;
+ }
+ /* Flush and wait to make sure link down is processed */
+@@ -949,6 +958,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ "Queue link up work.\n");
+ queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
+ 0);
++ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ }
+
+ /* Reset the host by gracefully logging out and then logging back in */
+@@ -3462,6 +3472,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
+ }
+
+ /* Start the Slowpath-process */
++ memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
+ slowpath_params.int_mode = QED_INT_MODE_MSIX;
+ slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
+ slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
+@@ -3720,6 +3731,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
+ {
+ struct qedf_ctx *qedf;
+ int rc;
++ int cnt = 0;
+
+ if (!pdev) {
+ QEDF_ERR(NULL, "pdev is NULL.\n");
+@@ -3737,6 +3749,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
+ return;
+ }
+
++stag_in_prog:
++ if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
++ QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
++ cnt++;
++
++ if (cnt < 5) {
++ msleep(500);
++ goto stag_in_prog;
++ }
++ }
++
+ if (mode != QEDF_MODE_RECOVERY)
+ set_bit(QEDF_UNLOADING, &qedf->flags);
+
+@@ -3996,6 +4019,24 @@ void qedf_stag_change_work(struct work_struct *work)
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, stag_work.work);
+
++ if (!qedf) {
++ QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
++ return;
++ }
++
++ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
++ QEDF_ERR(&qedf->dbg_ctx,
++ "Already is in recovery, hence not calling software context reset.\n");
++ return;
++ }
++
++ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
++ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
++ return;
++ }
++
++ set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
++
+ printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ qedf->dbg_ctx.host_no);
+diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
+index 8deb2001dc2ff9..37eed6a2781640 100644
+--- a/drivers/scsi/qedi/qedi_debugfs.c
++++ b/drivers/scsi/qedi/qedi_debugfs.c
+@@ -120,15 +120,11 @@ static ssize_t
+ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+- size_t cnt = 0;
+-
+- if (*ppos)
+- return 0;
++ char buf[64];
++ int len;
+
+- cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
+- cnt = min_t(int, count, cnt - *ppos);
+- *ppos += cnt;
+- return cnt;
++ len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover);
++ return simple_read_from_buffer(buffer, count, ppos, buf, len);
+ }
+
+ static int
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 44449c70a375f3..76eeba435fd046 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2741,7 +2741,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
+ return;
+
+ if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
++ /* Will wait for wind down of adapter */
++ ql_dbg(ql_dbg_aer, fcport->vha, 0x900c,
++ "%s pci offline detected (id %06x)\n", __func__,
++ fcport->d_id.b24);
++ qla_pci_set_eeh_busy(fcport->vha);
++ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
++ 0, WAIT_TARGET);
+ return;
+ }
+ }
+@@ -2763,7 +2769,11 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
+ vha = fcport->vha;
+
+ if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
++ /* Will wait for wind down of adapter */
++ ql_dbg(ql_dbg_aer, fcport->vha, 0x900b,
++ "%s pci offline detected (id %06x)\n", __func__,
++ fcport->d_id.b24);
++ qla_pci_set_eeh_busy(vha);
+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 0, WAIT_TARGET);
+ return;
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index 19bb64bdd88b19..52dc9604f56746 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -324,7 +324,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ "request_sg_cnt=%x reply_sg_cnt=%x.\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt);
+- rval = -EPERM;
++ rval = -ENOBUFS;
+ goto done;
+ }
+
+@@ -3059,17 +3059,61 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
+ return ret;
+ }
+
+-int
+-qla24xx_bsg_timeout(struct bsg_job *bsg_job)
++static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
+ {
++ bool found = false;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct qla_hw_data *ha = vha->hw;
+- srb_t *sp;
+- int cnt, que;
++ srb_t *sp = NULL;
++ int cnt;
+ unsigned long flags;
+ struct req_que *req;
+
++ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
++ req = qpair->req;
++
++ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
++ sp = req->outstanding_cmds[cnt];
++ if (sp &&
++ (sp->type == SRB_CT_CMD ||
++ sp->type == SRB_ELS_CMD_HST ||
++ sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
++ sp->u.bsg_job == bsg_job) {
++ req->outstanding_cmds[cnt] = NULL;
++ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++
++ if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
++ ql_log(ql_log_warn, vha, 0x7089,
++ "mbx abort_command failed.\n");
++ bsg_reply->result = -EIO;
++ } else {
++ ql_dbg(ql_dbg_user, vha, 0x708a,
++ "mbx abort_command success.\n");
++ bsg_reply->result = 0;
++ }
++ /* ref: INIT */
++ kref_put(&sp->cmd_kref, qla2x00_sp_release);
++
++ found = true;
++ goto done;
++ }
++ }
++ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
++
++done:
++ return found;
++}
++
++int
++qla24xx_bsg_timeout(struct bsg_job *bsg_job)
++{
++ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
++ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
++ struct qla_hw_data *ha = vha->hw;
++ int i;
++ struct qla_qpair *qpair;
++
+ ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
+ __func__, bsg_job);
+
+@@ -3079,48 +3123,22 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
+ qla_pci_set_eeh_busy(vha);
+ }
+
++ if (qla_bsg_found(ha->base_qpair, bsg_job))
++ goto done;
++
+ /* find the bsg job from the active list of commands */
+- spin_lock_irqsave(&ha->hardware_lock, flags);
+- for (que = 0; que < ha->max_req_queues; que++) {
+- req = ha->req_q_map[que];
+- if (!req)
++ for (i = 0; i < ha->max_qpairs; i++) {
++ qpair = vha->hw->queue_pair_map[i];
++ if (!qpair)
+ continue;
+-
+- for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+- sp = req->outstanding_cmds[cnt];
+- if (sp &&
+- (sp->type == SRB_CT_CMD ||
+- sp->type == SRB_ELS_CMD_HST ||
+- sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
+- sp->type == SRB_FXIOCB_BCMD) &&
+- sp->u.bsg_job == bsg_job) {
+- req->outstanding_cmds[cnt] = NULL;
+- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+-
+- if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
+- ql_log(ql_log_warn, vha, 0x7089,
+- "mbx abort_command failed.\n");
+- bsg_reply->result = -EIO;
+- } else {
+- ql_dbg(ql_dbg_user, vha, 0x708a,
+- "mbx abort_command success.\n");
+- bsg_reply->result = 0;
+- }
+- spin_lock_irqsave(&ha->hardware_lock, flags);
+- goto done;
+-
+- }
+- }
++ if (qla_bsg_found(qpair, bsg_job))
++ goto done;
+ }
+- spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
+ ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
+ bsg_reply->result = -ENXIO;
+- return 0;
+
+ done:
+- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+- /* ref: INIT */
+- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ return 0;
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index deb642607deb6f..7cf998e3cc681c 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -82,7 +82,7 @@ typedef union {
+ #include "qla_nvme.h"
+ #define QLA2XXX_DRIVER_NAME "qla2xxx"
+ #define QLA2XXX_APIDEV "ql2xapidev"
+-#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
++#define QLA2XXX_MANUFACTURER "Marvell"
+
+ /*
+ * We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
+@@ -3309,9 +3309,20 @@ struct fab_scan_rp {
+ u8 node_name[8];
+ };
+
++enum scan_step {
++ FAB_SCAN_START,
++ FAB_SCAN_GPNFT_FCP,
++ FAB_SCAN_GNNFT_FCP,
++ FAB_SCAN_GPNFT_NVME,
++ FAB_SCAN_GNNFT_NVME,
++};
++
+ struct fab_scan {
+ struct fab_scan_rp *l;
+ u32 size;
++ u32 rscn_gen_start;
++ u32 rscn_gen_end;
++ enum scan_step step;
+ u16 scan_retry;
+ #define MAX_SCAN_RETRIES 5
+ enum scan_flags_t scan_flags;
+@@ -3537,9 +3548,8 @@ enum qla_work_type {
+ QLA_EVT_RELOGIN,
+ QLA_EVT_ASYNC_PRLO,
+ QLA_EVT_ASYNC_PRLO_DONE,
+- QLA_EVT_GPNFT,
+- QLA_EVT_GPNFT_DONE,
+- QLA_EVT_GNNFT_DONE,
++ QLA_EVT_SCAN_CMD,
++ QLA_EVT_SCAN_FINISH,
+ QLA_EVT_GFPNID,
+ QLA_EVT_SP_RETRY,
+ QLA_EVT_IIDMA,
+@@ -5030,6 +5040,7 @@ typedef struct scsi_qla_host {
+
+ /* Counter to detect races between ELS and RSCN events */
+ atomic_t generation_tick;
++ atomic_t rscn_gen;
+ /* Time when global fcport update has been scheduled */
+ int total_fcport_update_gen;
+ /* List of pending LOGOs, protected by tgt_mutex */
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index a7a364760b8002..081af4d420a05f 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -274,7 +274,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
+ iocbs_used, ha->base_qpair->fwres.iocbs_limit);
+
+- seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
++ seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n",
+ exch_used, ha->base_qpair->fwres.exch_limit);
+
+ if (ql2xenforce_iocb_limit == 2) {
+diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
+index 26e6b3e3af4317..dcde55c8ee5dea 100644
+--- a/drivers/scsi/qla2xxx/qla_edif.c
++++ b/drivers/scsi/qla2xxx/qla_edif.c
+@@ -1100,7 +1100,7 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (fcport->edif.enable) {
+- if (pcnt > app_req.num_ports)
++ if (pcnt >= app_req.num_ports)
+ break;
+
+ app_reply->elem[pcnt].rekey_count =
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index 09cb9413670a5e..cededfda9d0e31 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -44,7 +44,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
+ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
+
+ extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
+-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
++extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *);
+ extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
+ struct els_plogi *els_plogi);
+
+@@ -728,9 +728,9 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+ void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
+ int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
+ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
+-int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
+-void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
+-void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
++int qla_fab_async_scan(scsi_qla_host_t *, srb_t *);
++void qla_fab_scan_start(struct scsi_qla_host *);
++void qla_fab_scan_finish(scsi_qla_host_t *, srb_t *);
+ int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
+ int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
+ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index 1cf9d200d56307..d2bddca7045aa1 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -1710,7 +1710,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
+ eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+ alen = scnprintf(
+ eiter->a.orom_version, sizeof(eiter->a.orom_version),
+- "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
++ "%d.%02d", ha->efi_revision[1], ha->efi_revision[0]);
+ alen += FDMI_ATTR_ALIGNMENT(alen);
+ alen += FDMI_ATTR_TYPELEN(eiter);
+ eiter->len = cpu_to_be16(alen);
+@@ -3168,7 +3168,30 @@ static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
+ return rc;
+ }
+
+-void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
++static bool qla_ok_to_clear_rscn(scsi_qla_host_t *vha, fc_port_t *fcport)
++{
++ u32 rscn_gen;
++
++ rscn_gen = atomic_read(&vha->rscn_gen);
++ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2017,
++ "%s %d %8phC rscn_gen %x start %x end %x current %x\n",
++ __func__, __LINE__, fcport->port_name, fcport->rscn_gen,
++ vha->scan.rscn_gen_start, vha->scan.rscn_gen_end, rscn_gen);
++
++ if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_start,
++ vha->scan.rscn_gen_end))
++ /* rscn came in before fabric scan */
++ return true;
++
++ if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_end, rscn_gen))
++ /* rscn came in after fabric scan */
++ return false;
++
++ /* rare: fcport's scan_needed + rscn_gen must be stale */
++ return true;
++}
++
++void qla_fab_scan_finish(scsi_qla_host_t *vha, srb_t *sp)
+ {
+ fc_port_t *fcport;
+ u32 i, rc;
+@@ -3281,10 +3304,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ (fcport->scan_needed &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_NVME_INITIATOR)) {
++ fcport->scan_needed = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ fcport->d_id.b24 = rp->id.b24;
+- fcport->scan_needed = 0;
+ break;
+ }
+
+@@ -3325,7 +3348,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ do_delete = true;
+ }
+
+- fcport->scan_needed = 0;
++ if (qla_ok_to_clear_rscn(vha, fcport))
++ fcport->scan_needed = 0;
++
+ if (((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) ||
+@@ -3355,7 +3380,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
+- fcport->scan_needed = 0;
++
++ if (qla_ok_to_clear_rscn(vha, fcport))
++ fcport->scan_needed = 0;
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+ }
+@@ -3379,14 +3406,11 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ }
+ }
+
+-static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
++static int qla2x00_post_next_scan_work(struct scsi_qla_host *vha,
+ srb_t *sp, int cmd)
+ {
+ struct qla_work_evt *e;
+
+- if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
+- return QLA_PARAMETER_ERROR;
+-
+ e = qla2x00_alloc_work(vha, cmd);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+@@ -3396,37 +3420,15 @@ static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
+ return qla2x00_post_work(vha, e);
+ }
+
+-static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
+- srb_t *sp, int cmd)
+-{
+- struct qla_work_evt *e;
+-
+- if (cmd != QLA_EVT_GPNFT)
+- return QLA_PARAMETER_ERROR;
+-
+- e = qla2x00_alloc_work(vha, cmd);
+- if (!e)
+- return QLA_FUNCTION_FAILED;
+-
+- e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+- e->u.gpnft.sp = sp;
+-
+- return qla2x00_post_work(vha, e);
+-}
+-
+ static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
+ struct srb *sp)
+ {
+ struct qla_hw_data *ha = vha->hw;
+ int num_fibre_dev = ha->max_fibre_devices;
+- struct ct_sns_req *ct_req =
+- (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+ struct ct_sns_gpnft_rsp *ct_rsp =
+ (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct ct_sns_gpn_ft_data *d;
+ struct fab_scan_rp *rp;
+- u16 cmd = be16_to_cpu(ct_req->command);
+- u8 fc4_type = sp->gen2;
+ int i, j, k;
+ port_id_t id;
+ u8 found;
+@@ -3445,85 +3447,83 @@ static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
+ if (id.b24 == 0 || wwn == 0)
+ continue;
+
+- if (fc4_type == FC4_TYPE_FCP_SCSI) {
+- if (cmd == GPN_FT_CMD) {
+- rp = &vha->scan.l[j];
+- rp->id = id;
+- memcpy(rp->port_name, d->port_name, 8);
+- j++;
+- rp->fc4type = FS_FC4TYPE_FCP;
+- } else {
+- for (k = 0; k < num_fibre_dev; k++) {
+- rp = &vha->scan.l[k];
+- if (id.b24 == rp->id.b24) {
+- memcpy(rp->node_name,
+- d->port_name, 8);
+- break;
+- }
++ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2025,
++ "%s %06x %8ph \n",
++ __func__, id.b24, d->port_name);
++
++ switch (vha->scan.step) {
++ case FAB_SCAN_GPNFT_FCP:
++ rp = &vha->scan.l[j];
++ rp->id = id;
++ memcpy(rp->port_name, d->port_name, 8);
++ j++;
++ rp->fc4type = FS_FC4TYPE_FCP;
++ break;
++ case FAB_SCAN_GNNFT_FCP:
++ for (k = 0; k < num_fibre_dev; k++) {
++ rp = &vha->scan.l[k];
++ if (id.b24 == rp->id.b24) {
++ memcpy(rp->node_name,
++ d->port_name, 8);
++ break;
+ }
+ }
+- } else {
+- /* Search if the fibre device supports FC4_TYPE_NVME */
+- if (cmd == GPN_FT_CMD) {
+- found = 0;
+-
+- for (k = 0; k < num_fibre_dev; k++) {
+- rp = &vha->scan.l[k];
+- if (!memcmp(rp->port_name,
+- d->port_name, 8)) {
+- /*
+- * Supports FC-NVMe & FCP
+- */
+- rp->fc4type |= FS_FC4TYPE_NVME;
+- found = 1;
+- break;
+- }
++ break;
++ case FAB_SCAN_GPNFT_NVME:
++ found = 0;
++
++ for (k = 0; k < num_fibre_dev; k++) {
++ rp = &vha->scan.l[k];
++ if (!memcmp(rp->port_name, d->port_name, 8)) {
++ /*
++ * Supports FC-NVMe & FCP
++ */
++ rp->fc4type |= FS_FC4TYPE_NVME;
++ found = 1;
++ break;
+ }
++ }
+
+- /* We found new FC-NVMe only port */
+- if (!found) {
+- for (k = 0; k < num_fibre_dev; k++) {
+- rp = &vha->scan.l[k];
+- if (wwn_to_u64(rp->port_name)) {
+- continue;
+- } else {
+- rp->id = id;
+- memcpy(rp->port_name,
+- d->port_name, 8);
+- rp->fc4type =
+- FS_FC4TYPE_NVME;
+- break;
+- }
+- }
+- }
+- } else {
++ /* We found new FC-NVMe only port */
++ if (!found) {
+ for (k = 0; k < num_fibre_dev; k++) {
+ rp = &vha->scan.l[k];
+- if (id.b24 == rp->id.b24) {
+- memcpy(rp->node_name,
+- d->port_name, 8);
++ if (wwn_to_u64(rp->port_name)) {
++ continue;
++ } else {
++ rp->id = id;
++ memcpy(rp->port_name, d->port_name, 8);
++ rp->fc4type = FS_FC4TYPE_NVME;
+ break;
+ }
+ }
+ }
++ break;
++ case FAB_SCAN_GNNFT_NVME:
++ for (k = 0; k < num_fibre_dev; k++) {
++ rp = &vha->scan.l[k];
++ if (id.b24 == rp->id.b24) {
++ memcpy(rp->node_name, d->port_name, 8);
++ break;
++ }
++ }
++ break;
++ default:
++ break;
+ }
+ }
+ }
+
+-static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
++static void qla_async_scan_sp_done(srb_t *sp, int res)
+ {
+ struct scsi_qla_host *vha = sp->vha;
+- struct ct_sns_req *ct_req =
+- (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+- u16 cmd = be16_to_cpu(ct_req->command);
+- u8 fc4_type = sp->gen2;
+ unsigned long flags;
+ int rc;
+
+ /* gen2 field is holding the fc4type */
+- ql_dbg(ql_dbg_disc, vha, 0xffff,
+- "Async done-%s res %x FC4Type %x\n",
+- sp->name, res, sp->gen2);
++ ql_dbg(ql_dbg_disc, vha, 0x2026,
++ "Async done-%s res %x step %x\n",
++ sp->name, res, vha->scan.step);
+
+ sp->rc = res;
+ if (res) {
+@@ -3547,8 +3547,7 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
+ * sp for GNNFT_DONE work. This will allow all
+ * the resource to get freed up.
+ */
+- rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+- QLA_EVT_GNNFT_DONE);
++ rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
+ if (rc) {
+ /* Cleanup here to prevent memory leak */
+ qla24xx_sp_unmap(vha, sp);
+@@ -3573,28 +3572,30 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
+
+ qla2x00_find_free_fcp_nvme_slot(vha, sp);
+
+- if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
+- cmd == GNN_FT_CMD) {
+- spin_lock_irqsave(&vha->work_lock, flags);
+- vha->scan.scan_flags &= ~SF_SCANNING;
+- spin_unlock_irqrestore(&vha->work_lock, flags);
++ spin_lock_irqsave(&vha->work_lock, flags);
++ vha->scan.scan_flags &= ~SF_SCANNING;
++ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+- sp->rc = res;
+- rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
+- if (rc) {
+- qla24xx_sp_unmap(vha, sp);
+- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+- }
+- return;
+- }
++ switch (vha->scan.step) {
++ case FAB_SCAN_GPNFT_FCP:
++ case FAB_SCAN_GPNFT_NVME:
++ rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD);
++ break;
++ case FAB_SCAN_GNNFT_FCP:
++ if (vha->flags.nvme_enabled)
++ rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD);
++ else
++ rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
+
+- if (cmd == GPN_FT_CMD) {
+- rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+- QLA_EVT_GPNFT_DONE);
+- } else {
+- rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+- QLA_EVT_GNNFT_DONE);
++ break;
++ case FAB_SCAN_GNNFT_NVME:
++ rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
++ break;
++ default:
++ /* should not be here */
++ WARN_ON(1);
++ rc = QLA_FUNCTION_FAILED;
++ break;
+ }
+
+ if (rc) {
+@@ -3605,127 +3606,16 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
+ }
+ }
+
+-/*
+- * Get WWNN list for fc4_type
+- *
+- * It is assumed the same SRB is re-used from GPNFT to avoid
+- * mem free & re-alloc
+- */
+-static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
+- u8 fc4_type)
+-{
+- int rval = QLA_FUNCTION_FAILED;
+- struct ct_sns_req *ct_req;
+- struct ct_sns_pkt *ct_sns;
+- unsigned long flags;
+-
+- if (!vha->flags.online) {
+- spin_lock_irqsave(&vha->work_lock, flags);
+- vha->scan.scan_flags &= ~SF_SCANNING;
+- spin_unlock_irqrestore(&vha->work_lock, flags);
+- goto done_free_sp;
+- }
+-
+- if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
+- ql_log(ql_log_warn, vha, 0xffff,
+- "%s: req %p rsp %p are not setup\n",
+- __func__, sp->u.iocb_cmd.u.ctarg.req,
+- sp->u.iocb_cmd.u.ctarg.rsp);
+- spin_lock_irqsave(&vha->work_lock, flags);
+- vha->scan.scan_flags &= ~SF_SCANNING;
+- spin_unlock_irqrestore(&vha->work_lock, flags);
+- WARN_ON(1);
+- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+- goto done_free_sp;
+- }
+-
+- ql_dbg(ql_dbg_disc, vha, 0xfffff,
+- "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
+- __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
+- sp->u.iocb_cmd.u.ctarg.req_size);
+-
+- sp->type = SRB_CT_PTHRU_CMD;
+- sp->name = "gnnft";
+- sp->gen1 = vha->hw->base_qpair->chip_reset;
+- sp->gen2 = fc4_type;
+- qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+- qla2x00_async_gpnft_gnnft_sp_done);
+-
+- memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+- memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
+-
+- ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+- /* CT_IU preamble */
+- ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
+- sp->u.iocb_cmd.u.ctarg.rsp_size);
+-
+- /* GPN_FT req */
+- ct_req->req.gpn_ft.port_type = fc4_type;
+-
+- sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
+- sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+-
+- ql_dbg(ql_dbg_disc, vha, 0xffff,
+- "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+- sp->handle, ct_req->req.gpn_ft.port_type);
+-
+- rval = qla2x00_start_sp(sp);
+- if (rval != QLA_SUCCESS) {
+- goto done_free_sp;
+- }
+-
+- return rval;
+-
+-done_free_sp:
+- if (sp->u.iocb_cmd.u.ctarg.req) {
+- dma_free_coherent(&vha->hw->pdev->dev,
+- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+- sp->u.iocb_cmd.u.ctarg.req,
+- sp->u.iocb_cmd.u.ctarg.req_dma);
+- sp->u.iocb_cmd.u.ctarg.req = NULL;
+- }
+- if (sp->u.iocb_cmd.u.ctarg.rsp) {
+- dma_free_coherent(&vha->hw->pdev->dev,
+- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+- sp->u.iocb_cmd.u.ctarg.rsp,
+- sp->u.iocb_cmd.u.ctarg.rsp_dma);
+- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+- }
+- /* ref: INIT */
+- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+-
+- spin_lock_irqsave(&vha->work_lock, flags);
+- vha->scan.scan_flags &= ~SF_SCANNING;
+- if (vha->scan.scan_flags == 0) {
+- ql_dbg(ql_dbg_disc, vha, 0xffff,
+- "%s: schedule\n", __func__);
+- vha->scan.scan_flags |= SF_QUEUED;
+- schedule_delayed_work(&vha->scan.scan_work, 5);
+- }
+- spin_unlock_irqrestore(&vha->work_lock, flags);
+-
+-
+- return rval;
+-} /* GNNFT */
+-
+-void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
+-{
+- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+- "%s enter\n", __func__);
+- qla24xx_async_gnnft(vha, sp, sp->gen2);
+-}
+-
+ /* Get WWPN list for certain fc4_type */
+-int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
++int qla_fab_async_scan(scsi_qla_host_t *vha, srb_t *sp)
+ {
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_pkt *ct_sns;
+- u32 rspsz;
++ u32 rspsz = 0;
+ unsigned long flags;
+
+- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
++ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x200c,
+ "%s enter\n", __func__);
+
+ if (!vha->flags.online)
+@@ -3734,22 +3624,21 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags & SF_SCANNING) {
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
++ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2012,
+ "%s: scan active\n", __func__);
+ return rval;
+ }
+ vha->scan.scan_flags |= SF_SCANNING;
++ if (!sp)
++ vha->scan.step = FAB_SCAN_START;
++
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+- if (fc4_type == FC4_TYPE_FCP_SCSI) {
+- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
++ switch (vha->scan.step) {
++ case FAB_SCAN_START:
++ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2018,
+ "%s: Performing FCP Scan\n", __func__);
+
+- if (sp) {
+- /* ref: INIT */
+- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+- }
+-
+ /* ref: INIT */
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp) {
+@@ -3765,7 +3654,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
+ GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+- ql_log(ql_log_warn, vha, 0xffff,
++ ql_log(ql_log_warn, vha, 0x201a,
+ "Failed to allocate ct_sns request.\n");
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+@@ -3773,7 +3662,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
+ qla2x00_rel_sp(sp);
+ return rval;
+ }
+- sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
+
+ rspsz = sizeof(struct ct_sns_gpnft_rsp) +
+ vha->hw->max_fibre_devices *
+@@ -3785,7 +3673,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
+ GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+- ql_log(ql_log_warn, vha, 0xffff,
++ ql_log(ql_log_warn, vha, 0x201b,
+ "Failed to allocate ct_sns request.\n");
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+@@ -3805,35 +3693,95 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
+ "%s scan list size %d\n", __func__, vha->scan.size);
+
+ memset(vha->scan.l, 0, vha->scan.size);
+- } else if (!sp) {
+- ql_dbg(ql_dbg_disc, vha, 0xffff,
+- "NVME scan did not provide SP\n");
++
++ vha->scan.step = FAB_SCAN_GPNFT_FCP;
++ break;
++ case FAB_SCAN_GPNFT_FCP:
++ vha->scan.step = FAB_SCAN_GNNFT_FCP;
++ break;
++ case FAB_SCAN_GNNFT_FCP:
++ vha->scan.step = FAB_SCAN_GPNFT_NVME;
++ break;
++ case FAB_SCAN_GPNFT_NVME:
++ vha->scan.step = FAB_SCAN_GNNFT_NVME;
++ break;
++ case FAB_SCAN_GNNFT_NVME:
++ default:
++ /* should not be here */
++ WARN_ON(1);
++ goto done_free_sp;
++ }
++
++ if (!sp) {
++ ql_dbg(ql_dbg_disc, vha, 0x201c,
++ "scan did not provide SP\n");
+ return rval;
+ }
++ if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
++ ql_log(ql_log_warn, vha, 0x201d,
++ "%s: req %p rsp %p are not setup\n",
++ __func__, sp->u.iocb_cmd.u.ctarg.req,
++ sp->u.iocb_cmd.u.ctarg.rsp);
++ spin_lock_irqsave(&vha->work_lock, flags);
++ vha->scan.scan_flags &= ~SF_SCANNING;
++ spin_unlock_irqrestore(&vha->work_lock, flags);
++ WARN_ON(1);
++ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
++ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
++ goto done_free_sp;
++ }
++
++ rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
++ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
++ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
++
+
+ sp->type = SRB_CT_PTHRU_CMD;
+- sp->name = "gpnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+- sp->gen2 = fc4_type;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+- qla2x00_async_gpnft_gnnft_sp_done);
+-
+- rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
+- memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+- memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
++ qla_async_scan_sp_done);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+- /* CT_IU preamble */
+- ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
+
+- /* GPN_FT req */
+- ct_req->req.gpn_ft.port_type = fc4_type;
++ /* CT_IU preamble */
++ switch (vha->scan.step) {
++ case FAB_SCAN_GPNFT_FCP:
++ sp->name = "gpnft";
++ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
++ ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI;
++ sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
++ break;
++ case FAB_SCAN_GNNFT_FCP:
++ sp->name = "gnnft";
++ ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz);
++ ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI;
++ sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
++ break;
++ case FAB_SCAN_GPNFT_NVME:
++ sp->name = "gpnft";
++ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
++ ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME;
++ sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
++ break;
++ case FAB_SCAN_GNNFT_NVME:
++ sp->name = "gnnft";
++ ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz);
++ ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME;
++ sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
++ break;
++ default:
++ /* should not be here */
++ WARN_ON(1);
++ goto done_free_sp;
++ }
+
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+- ql_dbg(ql_dbg_disc, vha, 0xffff,
+- "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+- sp->handle, ct_req->req.gpn_ft.port_type);
++ ql_dbg(ql_dbg_disc, vha, 0x2003,
++ "%s: step %d, rsp size %d, req size %d hdl %x %s FC4TYPE %x \n",
++ __func__, vha->scan.step, sp->u.iocb_cmd.u.ctarg.rsp_size,
++ sp->u.iocb_cmd.u.ctarg.req_size, sp->handle, sp->name,
++ ct_req->req.gpn_ft.port_type);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+@@ -3864,7 +3812,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ if (vha->scan.scan_flags == 0) {
+- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
++ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2007,
+ "%s: Scan scheduled.\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
+@@ -3875,6 +3823,15 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
+ return rval;
+ }
+
++void qla_fab_scan_start(struct scsi_qla_host *vha)
++{
++ int rval;
++
++ rval = qla_fab_async_scan(vha, NULL);
++ if (rval)
++ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
++}
++
+ void qla_scan_work_fn(struct work_struct *work)
+ {
+ struct fab_scan *s = container_of(to_delayed_work(work),
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index a314cfc5b263f2..eda3bdab934d57 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1193,8 +1193,12 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+ return rval;
+
+ done_free_sp:
+- /* ref: INIT */
+- kref_put(&sp->cmd_kref, qla2x00_sp_release);
++ /*
++ * use qla24xx_async_gnl_sp_done to purge all pending gnl request.
++ * kref_put is call behind the scene.
++ */
++ sp->u.iocb_cmd.u.mbx.in_mb[0] = MBS_COMMAND_ERROR;
++ qla24xx_async_gnl_sp_done(sp, QLA_COMMAND_ERROR);
+ fcport->flags &= ~(FCF_ASYNC_SENT);
+ done:
+ fcport->flags &= ~(FCF_ASYNC_ACTIVE);
+@@ -1838,10 +1842,18 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
+ return qla2x00_post_work(vha, e);
+ }
+
++static void qla_rscn_gen_tick(scsi_qla_host_t *vha, u32 *ret_rscn_gen)
++{
++ *ret_rscn_gen = atomic_inc_return(&vha->rscn_gen);
++ /* memory barrier */
++ wmb();
++}
++
+ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ {
+ fc_port_t *fcport;
+ unsigned long flags;
++ u32 rscn_gen;
+
+ switch (ea->id.b.rsvd_1) {
+ case RSCN_PORT_ADDR:
+@@ -1871,15 +1883,16 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ * Otherwise we're already in the middle of a relogin
+ */
+ fcport->scan_needed = 1;
+- fcport->rscn_gen++;
++ qla_rscn_gen_tick(vha, &fcport->rscn_gen);
+ }
+ } else {
+ fcport->scan_needed = 1;
+- fcport->rscn_gen++;
++ qla_rscn_gen_tick(vha, &fcport->rscn_gen);
+ }
+ }
+ break;
+ case RSCN_AREA_ADDR:
++ qla_rscn_gen_tick(vha, &rscn_gen);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
+@@ -1887,11 +1900,12 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+
+ if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
+ fcport->scan_needed = 1;
+- fcport->rscn_gen++;
++ fcport->rscn_gen = rscn_gen;
+ }
+ }
+ break;
+ case RSCN_DOM_ADDR:
++ qla_rscn_gen_tick(vha, &rscn_gen);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
+@@ -1899,19 +1913,20 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+
+ if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
+ fcport->scan_needed = 1;
+- fcport->rscn_gen++;
++ fcport->rscn_gen = rscn_gen;
+ }
+ }
+ break;
+ case RSCN_FAB_ADDR:
+ default:
++ qla_rscn_gen_tick(vha, &rscn_gen);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
+ continue;
+
+ fcport->scan_needed = 1;
+- fcport->rscn_gen++;
++ fcport->rscn_gen = rscn_gen;
+ }
+ break;
+ }
+@@ -1920,6 +1935,7 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
++ vha->scan.rscn_gen_start = atomic_read(&vha->rscn_gen);
+ schedule_delayed_work(&vha->scan.scan_work, 5);
+ }
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+@@ -2665,6 +2681,40 @@ qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
+ return rval;
+ }
+
++static void qla_enable_fce_trace(scsi_qla_host_t *vha)
++{
++ int rval;
++ struct qla_hw_data *ha = vha->hw;
++
++ if (ha->fce) {
++ ha->flags.fce_enabled = 1;
++ memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
++ rval = qla2x00_enable_fce_trace(vha,
++ ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs);
++
++ if (rval) {
++ ql_log(ql_log_warn, vha, 0x8033,
++ "Unable to reinitialize FCE (%d).\n", rval);
++ ha->flags.fce_enabled = 0;
++ }
++ }
++}
++
++static void qla_enable_eft_trace(scsi_qla_host_t *vha)
++{
++ int rval;
++ struct qla_hw_data *ha = vha->hw;
++
++ if (ha->eft) {
++ memset(ha->eft, 0, EFT_SIZE);
++ rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS);
++
++ if (rval) {
++ ql_log(ql_log_warn, vha, 0x8034,
++ "Unable to reinitialize EFT (%d).\n", rval);
++ }
++ }
++}
+ /*
+ * qla2x00_initialize_adapter
+ * Initialize board.
+@@ -3668,9 +3718,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
+ }
+
+ static void
+-qla2x00_init_fce_trace(scsi_qla_host_t *vha)
++qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+ {
+- int rval;
+ dma_addr_t tc_dma;
+ void *tc;
+ struct qla_hw_data *ha = vha->hw;
+@@ -3699,27 +3748,17 @@ qla2x00_init_fce_trace(scsi_qla_host_t *vha)
+ return;
+ }
+
+- rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
+- ha->fce_mb, &ha->fce_bufs);
+- if (rval) {
+- ql_log(ql_log_warn, vha, 0x00bf,
+- "Unable to initialize FCE (%d).\n", rval);
+- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
+- return;
+- }
+-
+ ql_dbg(ql_dbg_init, vha, 0x00c0,
+ "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
+
+- ha->flags.fce_enabled = 1;
+ ha->fce_dma = tc_dma;
+ ha->fce = tc;
++ ha->fce_bufs = FCE_NUM_BUFFERS;
+ }
+
+ static void
+-qla2x00_init_eft_trace(scsi_qla_host_t *vha)
++qla2x00_alloc_eft_trace(scsi_qla_host_t *vha)
+ {
+- int rval;
+ dma_addr_t tc_dma;
+ void *tc;
+ struct qla_hw_data *ha = vha->hw;
+@@ -3744,14 +3783,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+ return;
+ }
+
+- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+- if (rval) {
+- ql_log(ql_log_warn, vha, 0x00c2,
+- "Unable to initialize EFT (%d).\n", rval);
+- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
+- return;
+- }
+-
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+
+@@ -3759,13 +3790,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+ ha->eft = tc;
+ }
+
+-static void
+-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+-{
+- qla2x00_init_fce_trace(vha);
+- qla2x00_init_eft_trace(vha);
+-}
+-
+ void
+ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ {
+@@ -3820,10 +3844,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ if (ha->tgt.atio_ring)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+
+- qla2x00_init_fce_trace(vha);
++ qla2x00_alloc_fce_trace(vha);
+ if (ha->fce)
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+- qla2x00_init_eft_trace(vha);
++ qla2x00_alloc_eft_trace(vha);
+ if (ha->eft)
+ eft_size = EFT_SIZE;
+ }
+@@ -4253,7 +4277,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ unsigned long flags;
+- uint16_t fw_major_version;
+ int done_once = 0;
+
+ if (IS_P3P_TYPE(ha)) {
+@@ -4320,7 +4343,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ goto failed;
+
+ enable_82xx_npiv:
+- fw_major_version = ha->fw_major_version;
+ if (IS_P3P_TYPE(ha))
+ qla82xx_check_md_needed(vha);
+ else
+@@ -4349,12 +4371,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
+- if (!fw_major_version && !(IS_P3P_TYPE(ha)))
+- qla2x00_alloc_offload_mem(vha);
+-
+ if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
+ qla2x00_alloc_fw_dump(vha);
+
++ qla_enable_fce_trace(vha);
++ qla_enable_eft_trace(vha);
+ } else {
+ goto failed;
+ }
+@@ -6384,10 +6405,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ qlt_do_generation_tick(vha, &discovery_gen);
+
+ if (USE_ASYNC_SCAN(ha)) {
+- rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
+- NULL);
+- if (rval)
+- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
++ /* start of scan begins here */
++ vha->scan.rscn_gen_end = atomic_read(&vha->rscn_gen);
++ qla_fab_scan_start(vha);
+ } else {
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+@@ -7487,12 +7507,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+ int
+ qla2x00_abort_isp(scsi_qla_host_t *vha)
+ {
+- int rval;
+ uint8_t status = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp, *tvp;
+ struct req_que *req = ha->req_q_map[0];
+ unsigned long flags;
++ fc_port_t *fcport;
+
+ if (vha->flags.online) {
+ qla2x00_abort_isp_cleanup(vha);
+@@ -7561,6 +7581,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
+ "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
+ return status;
+ }
++
++ /* User may have updated [fcp|nvme] prefer in flash */
++ list_for_each_entry(fcport, &vha->vp_fcports, list) {
++ if (NVME_PRIORITY(ha, fcport))
++ fcport->do_prli_nvme = 1;
++ else
++ fcport->do_prli_nvme = 0;
++ }
++
+ if (!qla2x00_restart_isp(vha)) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+@@ -7581,31 +7610,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
+
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha))
+ qla2x00_get_fw_version(vha);
+- if (ha->fce) {
+- ha->flags.fce_enabled = 1;
+- memset(ha->fce, 0,
+- fce_calc_size(ha->fce_bufs));
+- rval = qla2x00_enable_fce_trace(vha,
+- ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+- &ha->fce_bufs);
+- if (rval) {
+- ql_log(ql_log_warn, vha, 0x8033,
+- "Unable to reinitialize FCE "
+- "(%d).\n", rval);
+- ha->flags.fce_enabled = 0;
+- }
+- }
+
+- if (ha->eft) {
+- memset(ha->eft, 0, EFT_SIZE);
+- rval = qla2x00_enable_eft_trace(vha,
+- ha->eft_dma, EFT_NUM_BUFFERS);
+- if (rval) {
+- ql_log(ql_log_warn, vha, 0x8034,
+- "Unable to reinitialize EFT "
+- "(%d).\n", rval);
+- }
+- }
+ } else { /* failed the ISP abort */
+ vha->flags.online = 1;
+ if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+@@ -7655,6 +7660,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
++ /* User may have updated [fcp|nvme] prefer in flash */
++ list_for_each_entry(fcport, &vp->vp_fcports, list) {
++ if (NVME_PRIORITY(ha, fcport))
++ fcport->do_prli_nvme = 1;
++ else
++ fcport->do_prli_nvme = 0;
++ }
++
+ qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+@@ -8205,15 +8218,21 @@ qla28xx_get_aux_images(
+ struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
+ bool valid_pri_image = false, valid_sec_image = false;
+ bool active_pri_image = false, active_sec_image = false;
++ int rc;
+
+ if (!ha->flt_region_aux_img_status_pri) {
+ ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
+ goto check_sec_image;
+ }
+
+- qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
++ rc = qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
+ ha->flt_region_aux_img_status_pri,
+ sizeof(pri_aux_image_status) >> 2);
++ if (rc) {
++ ql_log(ql_log_info, vha, 0x01a1,
++ "Unable to read Primary aux image(%x).\n", rc);
++ goto check_sec_image;
++ }
+ qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
+
+ if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
+@@ -8244,9 +8263,15 @@ qla28xx_get_aux_images(
+ goto check_valid_image;
+ }
+
+- qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
++ rc = qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
+ ha->flt_region_aux_img_status_sec,
+ sizeof(sec_aux_image_status) >> 2);
++ if (rc) {
++ ql_log(ql_log_info, vha, 0x01a2,
++ "Unable to read Secondary aux image(%x).\n", rc);
++ goto check_valid_image;
++ }
++
+ qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
+
+ if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
+@@ -8304,6 +8329,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
+ struct qla27xx_image_status pri_image_status, sec_image_status;
+ bool valid_pri_image = false, valid_sec_image = false;
+ bool active_pri_image = false, active_sec_image = false;
++ int rc;
+
+ if (!ha->flt_region_img_status_pri) {
+ ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
+@@ -8345,8 +8371,14 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
+ goto check_valid_image;
+ }
+
+- qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
++ rc = qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
+ ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
++ if (rc) {
++ ql_log(ql_log_info, vha, 0x01a3,
++ "Unable to read Secondary image status(%x).\n", rc);
++ goto check_valid_image;
++ }
++
+ qla27xx_print_image(vha, "Secondary image", &sec_image_status);
+
+ if (qla27xx_check_image_status_signature(&sec_image_status)) {
+@@ -8418,11 +8450,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ "FW: Loading firmware from flash (%x).\n", faddr);
+
+ dcode = (uint32_t *)req->ring;
+- qla24xx_read_flash_data(vha, dcode, faddr, 8);
+- if (qla24xx_risc_firmware_invalid(dcode)) {
++ rval = qla24xx_read_flash_data(vha, dcode, faddr, 8);
++ if (rval || qla24xx_risc_firmware_invalid(dcode)) {
+ ql_log(ql_log_fatal, vha, 0x008c,
+- "Unable to verify the integrity of flash firmware "
+- "image.\n");
++ "Unable to verify the integrity of flash firmware image (rval %x).\n", rval);
+ ql_log(ql_log_fatal, vha, 0x008d,
+ "Firmware data: %08x %08x %08x %08x.\n",
+ dcode[0], dcode[1], dcode[2], dcode[3]);
+@@ -8436,7 +8467,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ for (j = 0; j < segments; j++) {
+ ql_dbg(ql_dbg_init, vha, 0x008d,
+ "-> Loading segment %u...\n", j);
+- qla24xx_read_flash_data(vha, dcode, faddr, 10);
++ rval = qla24xx_read_flash_data(vha, dcode, faddr, 10);
++ if (rval) {
++ ql_log(ql_log_fatal, vha, 0x016a,
++ "-> Unable to read segment addr + size .\n");
++ return QLA_FUNCTION_FAILED;
++ }
+ risc_addr = be32_to_cpu((__force __be32)dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[3]);
+ if (!*srisc_addr) {
+@@ -8452,7 +8488,13 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ ql_dbg(ql_dbg_init, vha, 0x008e,
+ "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
+ fragment, risc_addr, faddr, dlen);
+- qla24xx_read_flash_data(vha, dcode, faddr, dlen);
++ rval = qla24xx_read_flash_data(vha, dcode, faddr, dlen);
++ if (rval) {
++ ql_log(ql_log_fatal, vha, 0x016b,
++ "-> Unable to read fragment(faddr %#x dlen %#lx).\n",
++ faddr, dlen);
++ return QLA_FUNCTION_FAILED;
++ }
+ for (i = 0; i < dlen; i++)
+ dcode[i] = swab32(dcode[i]);
+
+@@ -8481,7 +8523,14 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ fwdt->length = 0;
+
+ dcode = (uint32_t *)req->ring;
+- qla24xx_read_flash_data(vha, dcode, faddr, 7);
++
++ rval = qla24xx_read_flash_data(vha, dcode, faddr, 7);
++ if (rval) {
++ ql_log(ql_log_fatal, vha, 0x016c,
++ "-> Unable to read template size.\n");
++ goto failed;
++ }
++
+ risc_size = be32_to_cpu((__force __be32)dcode[2]);
+ ql_dbg(ql_dbg_init, vha, 0x0161,
+ "-> fwdt%u template array at %#x (%#x dwords)\n",
+@@ -8507,11 +8556,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ }
+
+ dcode = fwdt->template;
+- qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
++ rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+
+- if (!qla27xx_fwdt_template_valid(dcode)) {
++ if (rval || !qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0165,
+- "-> fwdt%u failed template validate\n", j);
++ "-> fwdt%u failed template validate (rval %x)\n",
++ j, rval);
+ goto failed;
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index a4a56ab0ba7473..ef4b3cc1cd77e1 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -631,3 +631,11 @@ static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
+ }
+ return 0;
+ }
++
++static inline bool val_is_in_range(u32 val, u32 start, u32 end)
++{
++ if (val >= start && val <= end)
++ return true;
++ else
++ return false;
++}
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index df90169f82440a..0b41e8a0660262 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -2587,6 +2587,33 @@ void
+ qla2x00_sp_release(struct kref *kref)
+ {
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
++ struct scsi_qla_host *vha = sp->vha;
++
++ switch (sp->type) {
++ case SRB_CT_PTHRU_CMD:
++ /* GPSC & GFPNID use fcport->ct_desc.ct_sns for both req & rsp */
++ if (sp->u.iocb_cmd.u.ctarg.req &&
++ (!sp->fcport ||
++ sp->u.iocb_cmd.u.ctarg.req != sp->fcport->ct_desc.ct_sns)) {
++ dma_free_coherent(&vha->hw->pdev->dev,
++ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
++ sp->u.iocb_cmd.u.ctarg.req,
++ sp->u.iocb_cmd.u.ctarg.req_dma);
++ sp->u.iocb_cmd.u.ctarg.req = NULL;
++ }
++ if (sp->u.iocb_cmd.u.ctarg.rsp &&
++ (!sp->fcport ||
++ sp->u.iocb_cmd.u.ctarg.rsp != sp->fcport->ct_desc.ct_sns)) {
++ dma_free_coherent(&vha->hw->pdev->dev,
++ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
++ sp->u.iocb_cmd.u.ctarg.rsp,
++ sp->u.iocb_cmd.u.ctarg.rsp_dma);
++ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
++ }
++ break;
++ default:
++ break;
++ }
+
+ sp->free(sp);
+ }
+@@ -2610,7 +2637,8 @@ static void qla2x00_els_dcmd_sp_free(srb_t *sp)
+ {
+ struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+- kfree(sp->fcport);
++ if (sp->fcport)
++ qla2x00_free_fcport(sp->fcport);
+
+ if (elsio->u.els_logo.els_logo_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
+@@ -2692,7 +2720,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
+ */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+- kfree(fcport);
++ qla2x00_free_fcport(fcport);
+ ql_log(ql_log_info, vha, 0x70e6,
+ "SRB allocation failed\n");
+ return -ENOMEM;
+@@ -2723,6 +2751,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
+ if (!elsio->u.els_logo.els_logo_pyld) {
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
++ qla2x00_free_fcport(fcport);
+ return QLA_FUNCTION_FAILED;
+ }
+
+@@ -2747,6 +2776,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
+ if (rval != QLA_SUCCESS) {
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
++ qla2x00_free_fcport(fcport);
+ return QLA_FUNCTION_FAILED;
+ }
+
+@@ -3012,7 +3042,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
+
+ int
+ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+- fc_port_t *fcport, bool wait)
++ fc_port_t *fcport)
+ {
+ srb_t *sp;
+ struct srb_iocb *elsio = NULL;
+@@ -3027,8 +3057,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ if (!sp) {
+ ql_log(ql_log_info, vha, 0x70e6,
+ "SRB allocation failed\n");
+- fcport->flags &= ~FCF_ASYNC_ACTIVE;
+- return -ENOMEM;
++ goto done;
+ }
+
+ fcport->flags |= FCF_ASYNC_SENT;
+@@ -3037,9 +3066,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ ql_dbg(ql_dbg_io, vha, 0x3073,
+ "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
+
+- if (wait)
+- sp->flags = SRB_WAKEUP_ON_COMP;
+-
+ sp->type = SRB_ELS_DCMD;
+ sp->name = "ELS_DCMD";
+ sp->fcport = fcport;
+@@ -3055,7 +3081,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+
+ if (!elsio->u.els_plogi.els_plogi_pyld) {
+ rval = QLA_FUNCTION_FAILED;
+- goto out;
++ goto done_free_sp;
+ }
+
+ resp_ptr = elsio->u.els_plogi.els_resp_pyld =
+@@ -3064,7 +3090,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+
+ if (!elsio->u.els_plogi.els_resp_pyld) {
+ rval = QLA_FUNCTION_FAILED;
+- goto out;
++ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
+@@ -3080,7 +3106,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+
+ if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
+ struct fc_els_flogi *p = ptr;
+-
+ p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
+ }
+
+@@ -3089,10 +3114,11 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
+ sizeof(*elsio->u.els_plogi.els_plogi_pyld));
+
+- init_completion(&elsio->u.els_plogi.comp);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+- rval = QLA_FUNCTION_FAILED;
++ fcport->flags |= FCF_LOGIN_NEEDED;
++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
++ goto done_free_sp;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
+@@ -3100,21 +3126,15 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ fcport->d_id.b24, vha->d_id.b24);
+ }
+
+- if (wait) {
+- wait_for_completion(&elsio->u.els_plogi.comp);
+-
+- if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
+- rval = QLA_FUNCTION_FAILED;
+- } else {
+- goto done;
+- }
++ return rval;
+
+-out:
+- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
++done_free_sp:
+ qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ done:
++ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
++ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
+ return rval;
+ }
+
+@@ -3918,7 +3938,7 @@ qla2x00_start_sp(srb_t *sp)
+ return -EAGAIN;
+ }
+
+- pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
++ pkt = qla2x00_alloc_iocbs_ready(sp->qpair, sp);
+ if (!pkt) {
+ rval = -EAGAIN;
+ ql_log(ql_log_warn, vha, 0x700c,
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 21ec32b4fb2809..0cd6f3e1488249 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -194,7 +194,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+ ha->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0xd035,
+- "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
++ "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
+ ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
+ rval = QLA_ABORTED;
+ goto premature_exit;
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index b67416951a5f7c..76703f2706b8e3 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -180,7 +180,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+- fcport->logout_on_delete = 0;
++ fcport->logout_on_delete = 1;
+
+ if (!vha->hw->flags.edif_enabled)
+ qla2x00_wait_for_sess_deletion(vha);
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index a8ddf356e66260..8f4cc136a9c9c4 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -49,7 +49,10 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
+ return 0;
+ }
+
+- if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
++ if (qla_nvme_register_hba(vha))
++ return 0;
++
++ if (!vha->nvme_local_port)
+ return 0;
+
+ if (!(fcport->nvme_prli_service_param &
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index dcae09a37d498f..da8331dbb01ce8 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1836,8 +1836,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
+ }
+
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+- if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
+- sp->done(sp, res);
++ switch (sp->type) {
++ case SRB_SCSI_CMD:
++ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
++ sp->done(sp, res);
++ break;
++ default:
++ if (ret_cmd)
++ sp->done(sp, res);
++ break;
++ }
+ } else {
+ sp->done(sp, res);
+ }
+@@ -1866,14 +1874,9 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+- /*
+- * perform lockless completion during driver unload
+- */
+ if (qla2x00_chip_is_down(vha)) {
+ req->outstanding_cmds[cnt] = NULL;
+- spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ sp->done(sp, res);
+- spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ continue;
+ }
+
+@@ -4593,6 +4596,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ ha->init_cb_dma = 0;
+ fail_free_vp_map:
+ kfree(ha->vp_map);
++ ha->vp_map = NULL;
+ fail:
+ ql_log(ql_log_fatal, NULL, 0x0030,
+ "Memory allocation failure.\n");
+@@ -4679,7 +4683,7 @@ static void
+ qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
+ {
+ u32 temp;
+- struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
++ struct init_cb_81xx *icb = (struct init_cb_81xx *)vha->hw->init_cb;
+ *ret_cnt = FW_DEF_EXCHANGES_CNT;
+
+ if (max_cnt > vha->hw->max_exchg)
+@@ -5553,15 +5557,11 @@ qla2x00_do_work(struct scsi_qla_host *vha)
+ qla2x00_async_prlo_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+- case QLA_EVT_GPNFT:
+- qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
+- e->u.gpnft.sp);
+- break;
+- case QLA_EVT_GPNFT_DONE:
+- qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
++ case QLA_EVT_SCAN_CMD:
++ qla_fab_async_scan(vha, e->u.iosb.sp);
+ break;
+- case QLA_EVT_GNNFT_DONE:
+- qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
++ case QLA_EVT_SCAN_FINISH:
++ qla_fab_scan_finish(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GFPNID:
+ qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
+@@ -5574,7 +5574,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
+ break;
+ case QLA_EVT_ELS_PLOGI:
+ qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
+- e->u.fcport.fcport, false);
++ e->u.fcport.fcport);
+ break;
+ case QLA_EVT_SA_REPLACE:
+ rc = qla24xx_issue_sa_replace_iocb(vha, e);
+diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
+index c092a6b1ced4fe..6d16546e172926 100644
+--- a/drivers/scsi/qla2xxx/qla_sup.c
++++ b/drivers/scsi/qla2xxx/qla_sup.c
+@@ -555,6 +555,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+ struct qla_flt_location *fltl = (void *)req->ring;
+ uint32_t *dcode = (uint32_t *)req->ring;
+ uint8_t *buf = (void *)req->ring, *bcode, last_image;
++ int rc;
+
+ /*
+ * FLT-location structure resides after the last PCI region.
+@@ -584,14 +585,24 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+ pcihdr = 0;
+ do {
+ /* Verify PCI expansion ROM header. */
+- qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
++ rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
++ if (rc) {
++ ql_log(ql_log_info, vha, 0x016d,
++ "Unable to read PCI Expansion Rom Header (%x).\n", rc);
++ return QLA_FUNCTION_FAILED;
++ }
+ bcode = buf + (pcihdr % 4);
+ if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
+ goto end;
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+- qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
++ rc = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
++ if (rc) {
++ ql_log(ql_log_info, vha, 0x0179,
++ "Unable to read PCI Data Structure (%x).\n", rc);
++ return QLA_FUNCTION_FAILED;
++ }
+ bcode = buf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+@@ -606,7 +617,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+ } while (!last_image);
+
+ /* Now verify FLT-location structure. */
+- qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
++ rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
++ if (rc) {
++ ql_log(ql_log_info, vha, 0x017a,
++ "Unable to read FLT (%x).\n", rc);
++ return QLA_FUNCTION_FAILED;
++ }
+ if (memcmp(fltl->sig, "QFLT", 4))
+ goto end;
+
+@@ -2605,13 +2621,18 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
+ uint32_t offset, uint32_t length)
+ {
+ struct qla_hw_data *ha = vha->hw;
++ int rc;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+
+ /* Go with read. */
+- qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
++ rc = qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
++ if (rc) {
++ ql_log(ql_log_info, vha, 0x01a0,
++ "Unable to perform optrom read(%x).\n", rc);
++ }
+
+ /* Resume HBA. */
+ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+@@ -3412,7 +3433,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ struct active_regions active_regions = { };
+
+ if (IS_P3P_TYPE(ha))
+- return ret;
++ return QLA_SUCCESS;
+
+ if (!mbuf)
+ return QLA_FUNCTION_FAILED;
+@@ -3432,20 +3453,31 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+
+ do {
+ /* Verify PCI expansion ROM header. */
+- qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
++ ret = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
++ if (ret) {
++ ql_log(ql_log_info, vha, 0x017d,
++ "Unable to read PCI EXP Rom Header(%x).\n", ret);
++ return QLA_FUNCTION_FAILED;
++ }
++
+ bcode = mbuf + (pcihdr % 4);
+ if (memcmp(bcode, "\x55\xaa", 2)) {
+ /* No signature */
+ ql_log(ql_log_fatal, vha, 0x0059,
+ "No matching ROM signature.\n");
+- ret = QLA_FUNCTION_FAILED;
+- break;
++ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+
+- qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
++ ret = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
++ if (ret) {
++ ql_log(ql_log_info, vha, 0x018e,
++ "Unable to read PCI Data Structure (%x).\n", ret);
++ return QLA_FUNCTION_FAILED;
++ }
++
+ bcode = mbuf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+@@ -3454,8 +3486,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ ql_log(ql_log_fatal, vha, 0x005a,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
+ ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32);
+- ret = QLA_FUNCTION_FAILED;
+- break;
++ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Read version */
+@@ -3507,20 +3538,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ faddr = ha->flt_region_fw_sec;
+ }
+
+- qla24xx_read_flash_data(vha, dcode, faddr, 8);
+- if (qla24xx_risc_firmware_invalid(dcode)) {
+- ql_log(ql_log_warn, vha, 0x005f,
+- "Unrecognized fw revision at %x.\n",
+- ha->flt_region_fw * 4);
+- ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
++ ret = qla24xx_read_flash_data(vha, dcode, faddr, 8);
++ if (ret) {
++ ql_log(ql_log_info, vha, 0x019e,
++ "Unable to read FW version (%x).\n", ret);
++ return ret;
+ } else {
+- for (i = 0; i < 4; i++)
+- ha->fw_revision[i] =
++ if (qla24xx_risc_firmware_invalid(dcode)) {
++ ql_log(ql_log_warn, vha, 0x005f,
++ "Unrecognized fw revision at %x.\n",
++ ha->flt_region_fw * 4);
++ ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
++ } else {
++ for (i = 0; i < 4; i++)
++ ha->fw_revision[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
+- ql_dbg(ql_dbg_init, vha, 0x0060,
+- "Firmware revision (flash) %u.%u.%u (%x).\n",
+- ha->fw_revision[0], ha->fw_revision[1],
+- ha->fw_revision[2], ha->fw_revision[3]);
++ ql_dbg(ql_dbg_init, vha, 0x0060,
++ "Firmware revision (flash) %u.%u.%u (%x).\n",
++ ha->fw_revision[0], ha->fw_revision[1],
++ ha->fw_revision[2], ha->fw_revision[3]);
++ }
+ }
+
+ /* Check for golden firmware and get version if available */
+@@ -3531,18 +3568,23 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+
+ memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
+ faddr = ha->flt_region_gold_fw;
+- qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
+- if (qla24xx_risc_firmware_invalid(dcode)) {
+- ql_log(ql_log_warn, vha, 0x0056,
+- "Unrecognized golden fw at %#x.\n", faddr);
+- ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
++ ret = qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
++ if (ret) {
++ ql_log(ql_log_info, vha, 0x019f,
++ "Unable to read Gold FW version (%x).\n", ret);
+ return ret;
+- }
+-
+- for (i = 0; i < 4; i++)
+- ha->gold_fw_version[i] =
+- be32_to_cpu((__force __be32)dcode[4+i]);
++ } else {
++ if (qla24xx_risc_firmware_invalid(dcode)) {
++ ql_log(ql_log_warn, vha, 0x0056,
++ "Unrecognized golden fw at %#x.\n", faddr);
++ ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
++ return QLA_FUNCTION_FAILED;
++ }
+
++ for (i = 0; i < 4; i++)
++ ha->gold_fw_version[i] =
++ be32_to_cpu((__force __be32)dcode[4+i]);
++ }
+ return ret;
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 2ef2dbac0db273..d7551b1443e4a7 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1062,6 +1062,16 @@ void qlt_free_session_done(struct work_struct *work)
+ "%s: sess %p logout completed\n", __func__, sess);
+ }
+
++ /* check for any straggling io left behind */
++ if (!(sess->flags & FCF_FCP2_DEVICE) &&
++ qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) {
++ ql_log(ql_log_warn, vha, 0x3027,
++ "IO not return. Resetting.\n");
++ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
++ qla2xxx_wake_dpc(vha);
++ qla2x00_wait_for_chip_reset(vha);
++ }
++
+ if (sess->logo_ack_needed) {
+ sess->logo_ack_needed = 0;
+ qla24xx_async_notify_ack(vha, sess,
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 89367c4bf0ef5e..22bdce0bc32792 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -328,21 +328,46 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
+ return result + 4;
+ }
+
++enum scsi_vpd_parameters {
++ SCSI_VPD_HEADER_SIZE = 4,
++ SCSI_VPD_LIST_SIZE = 36,
++};
++
+ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+ {
+- unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
++ unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
+ int result;
+
+ if (sdev->no_vpd_size)
+ return SCSI_DEFAULT_VPD_LEN;
+
++ /*
++ * Fetch the supported pages VPD and validate that the requested page
++ * number is present.
++ */
++ if (page != 0) {
++ result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
++ if (result < SCSI_VPD_HEADER_SIZE)
++ return 0;
++
++ if (result > sizeof(vpd)) {
++ dev_warn_once(&sdev->sdev_gendev,
++ "%s: long VPD page 0 length: %d bytes\n",
++ __func__, result);
++ result = sizeof(vpd);
++ }
++
++ result -= SCSI_VPD_HEADER_SIZE;
++ if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
++ return 0;
++ }
+ /*
+ * Fetch the VPD page header to find out how big the page
+ * is. This is done to prevent problems on legacy devices
+ * which can not handle allocation lengths as large as
+ * potentially requested by the caller.
+ */
+- result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
++ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
+ if (result < 0)
+ return 0;
+
+@@ -646,6 +671,13 @@ void scsi_cdl_check(struct scsi_device *sdev)
+ sdev->use_10_for_rw = 0;
+
+ sdev->cdl_supported = 1;
++
++ /*
++ * If the device supports CDL, make sure that the current drive
++ * feature status is consistent with the user controlled
++ * cdl_enable state.
++ */
++ scsi_cdl_enable(sdev, sdev->cdl_enable);
+ } else {
+ sdev->cdl_supported = 0;
+ }
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index c67cdcdc3ba86d..43eff1107038a6 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
+ static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *,
+ struct scsi_cmnd *);
+
+-void scsi_eh_wakeup(struct Scsi_Host *shost)
++void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy)
+ {
+ lockdep_assert_held(shost->host_lock);
+
+- if (scsi_host_busy(shost) == shost->host_failed) {
++ if (busy == shost->host_failed) {
+ trace_scsi_eh_wakeup(shost);
+ wake_up_process(shost->ehandler);
+ SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
+@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
+ if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
+ scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
+ shost->host_eh_scheduled++;
+- scsi_eh_wakeup(shost);
++ scsi_eh_wakeup(shost, scsi_host_busy(shost));
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+@@ -282,11 +282,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
+ {
+ struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
+ struct Scsi_Host *shost = scmd->device->host;
++ unsigned int busy = scsi_host_busy(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->host_failed++;
+- scsi_eh_wakeup(shost);
++ scsi_eh_wakeup(shost, busy);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+
+@@ -1152,6 +1153,7 @@ static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd,
+
+ scsi_log_send(scmd);
+ scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
++ scmd->flags |= SCMD_LAST;
+
+ /*
+ * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
+@@ -2195,15 +2197,18 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
+ struct scsi_cmnd *scmd, *next;
+
+ list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
++ struct scsi_device *sdev = scmd->device;
++
+ list_del_init(&scmd->eh_entry);
+- if (scsi_device_online(scmd->device) &&
+- !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
+- scsi_eh_should_retry_cmd(scmd)) {
++ if (scsi_device_online(sdev) && !scsi_noretry_cmd(scmd) &&
++ scsi_cmd_retry_allowed(scmd) &&
++ scsi_eh_should_retry_cmd(scmd)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: flush retry cmd\n",
+ current->comm));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
++ blk_mq_kick_requeue_list(sdev->request_queue);
+ } else {
+ /*
+ * If just we got sense for the device (called
+@@ -2459,6 +2464,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
+ scsi_init_command(dev, scmd);
+
+ scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
++ scmd->flags |= SCMD_LAST;
+ memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+
+ scmd->cmd_len = 0;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index c2f647a7c1b050..97def2619ecf2a 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -278,9 +278,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+ rcu_read_lock();
+ __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+ if (unlikely(scsi_host_in_recovery(shost))) {
++ unsigned int busy = scsi_host_busy(shost);
++
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->host_failed || shost->host_eh_scheduled)
+- scsi_eh_wakeup(shost);
++ scsi_eh_wakeup(shost, busy);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ rcu_read_unlock();
+@@ -541,10 +543,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
+ if (blk_queue_add_random(q))
+ add_disk_randomness(req->q->disk);
+
+- if (!blk_rq_is_passthrough(req)) {
+- WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
+- cmd->flags &= ~SCMD_INITIALIZED;
+- }
++ WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
++ !(cmd->flags & SCMD_INITIALIZED));
++ cmd->flags = 0;
+
+ /*
+ * Calling rcu_barrier() is not necessary here because the
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index 3f0dfb97db6bd1..1fbfe1b52c9f1a 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work);
+ extern enum blk_eh_timer_return scsi_timeout(struct request *req);
+ extern int scsi_error_handler(void *host);
+ extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
+-extern void scsi_eh_wakeup(struct Scsi_Host *shost);
++extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy);
+ extern void scsi_eh_scmd_add(struct scsi_cmnd *);
+ void scsi_eh_ready_devs(struct Scsi_Host *shost,
+ struct list_head *work_q,
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 44680f65ea1455..ca99be7341d9be 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1619,6 +1619,40 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
+ }
+ EXPORT_SYMBOL(scsi_add_device);
+
++int scsi_resume_device(struct scsi_device *sdev)
++{
++ struct device *dev = &sdev->sdev_gendev;
++ int ret = 0;
++
++ device_lock(dev);
++
++ /*
++ * Bail out if the device or its queue are not running. Otherwise,
++ * the rescan may block waiting for commands to be executed, with us
++ * holding the device lock. This can result in a potential deadlock
++ * in the power management core code when system resume is on-going.
++ */
++ if (sdev->sdev_state != SDEV_RUNNING ||
++ blk_queue_pm_only(sdev->request_queue)) {
++ ret = -EWOULDBLOCK;
++ goto unlock;
++ }
++
++ if (dev->driver && try_module_get(dev->driver->owner)) {
++ struct scsi_driver *drv = to_scsi_driver(dev->driver);
++
++ if (drv->resume)
++ ret = drv->resume(dev);
++ module_put(dev->driver->owner);
++ }
++
++unlock:
++ device_unlock(dev);
++
++ return ret;
++}
++EXPORT_SYMBOL(scsi_resume_device);
++
+ int scsi_rescan_device(struct scsi_device *sdev)
+ {
+ struct device *dev = &sdev->sdev_gendev;
+diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
+index d704c484a251c5..7fdd2b61fe855e 100644
+--- a/drivers/scsi/scsi_transport_sas.c
++++ b/drivers/scsi/scsi_transport_sas.c
+@@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
+ }
+ EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+
++/**
++ * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
++ * @sdev: SCSI device
++ *
++ * Check if an ATA device supports NCQ priority using VPD page 89h (ATA
++ * Information). Since this VPD page is implemented only for ATA devices,
++ * this function always returns false for SCSI devices.
++ */
++bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
++{
++ struct scsi_vpd *vpd;
++ bool ncq_prio_supported = false;
++
++ rcu_read_lock();
++ vpd = rcu_dereference(sdev->vpd_pg89);
++ if (vpd && vpd->len >= 214)
++ ncq_prio_supported = (vpd->data[213] >> 4) & 1;
++ rcu_read_unlock();
++
++ return ncq_prio_supported;
++}
++EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
++
+ /*
+ * SAS Phy attributes
+ */
+diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
+index 2442d4d2e3f38f..f668c1c0a98f20 100644
+--- a/drivers/scsi/scsi_transport_spi.c
++++ b/drivers/scsi/scsi_transport_spi.c
+@@ -676,10 +676,10 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
+ for (r = 0; r < retries; r++) {
+ result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT,
+ buffer, len, &sshdr);
+- if(result || !scsi_device_online(sdev)) {
++ if (result || !scsi_device_online(sdev)) {
+
+ scsi_device_set_state(sdev, SDEV_QUIESCE);
+- if (scsi_sense_valid(&sshdr)
++ if (result > 0 && scsi_sense_valid(&sshdr)
+ && sshdr.sense_key == ILLEGAL_REQUEST
+ /* INVALID FIELD IN CDB */
+ && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6effa13039f394..2c627deedc1fa2 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1642,24 +1642,21 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
+ return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ }
+
+-static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
++static int sd_sync_cache(struct scsi_disk *sdkp)
+ {
+ int retries, res;
+ struct scsi_device *sdp = sdkp->device;
+ const int timeout = sdp->request_queue->rq_timeout
+ * SD_FLUSH_TIMEOUT_MULTIPLIER;
+- struct scsi_sense_hdr my_sshdr;
++ struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+- /* caller might not be interested in sense, but we need it */
+- .sshdr = sshdr ? : &my_sshdr,
++ .sshdr = &sshdr,
+ };
+
+ if (!scsi_device_online(sdp))
+ return -ENODEV;
+
+- sshdr = exec_args.sshdr;
+-
+ for (retries = 3; retries > 0; --retries) {
+ unsigned char cmd[16] = { 0 };
+
+@@ -1684,15 +1681,25 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
+ return res;
+
+ if (scsi_status_is_check_condition(res) &&
+- scsi_sense_valid(sshdr)) {
+- sd_print_sense_hdr(sdkp, sshdr);
++ scsi_sense_valid(&sshdr)) {
++ sd_print_sense_hdr(sdkp, &sshdr);
+
+ /* we need to evaluate the error return */
+- if (sshdr->asc == 0x3a || /* medium not present */
+- sshdr->asc == 0x20 || /* invalid command */
+- (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
++ if (sshdr.asc == 0x3a || /* medium not present */
++ sshdr.asc == 0x20 || /* invalid command */
++ (sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */
+ /* this is no error here */
+ return 0;
++
++ /*
++ * If a format is in progress or if the drive does not
++ * support sync, there is not much we can do because
++ * this is called during shutdown or suspend so just
++ * return success so those operations can proceed.
++ */
++ if ((sshdr.asc == 0x04 && sshdr.ascq == 0x04) ||
++ sshdr.sense_key == ILLEGAL_REQUEST)
++ return 0;
+ }
+
+ switch (host_byte(res)) {
+@@ -3111,7 +3118,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb1);
+
+- if (!vpd || vpd->len < 8) {
++ if (!vpd || vpd->len <= 8) {
+ rcu_read_unlock();
+ return;
+ }
+@@ -3399,6 +3406,31 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
+ return true;
+ }
+
++static void sd_read_block_zero(struct scsi_disk *sdkp)
++{
++ struct scsi_device *sdev = sdkp->device;
++ unsigned int buf_len = sdev->sector_size;
++ u8 *buffer, cmd[16] = { };
++
++ buffer = kmalloc(buf_len, GFP_KERNEL);
++ if (!buffer)
++ return;
++
++ if (sdev->use_16_for_rw) {
++ cmd[0] = READ_16;
++ put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
++ put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
++ } else {
++ cmd[0] = READ_10;
++ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
++ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
++ }
++
++ scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
++ SD_TIMEOUT, sdkp->max_retries, NULL);
++ kfree(buffer);
++}
++
+ /**
+ * sd_revalidate_disk - called the first time a new disk is seen,
+ * performs disk spin up, read_capacity, etc.
+@@ -3438,7 +3470,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ */
+ if (sdkp->media_present) {
+ sd_read_capacity(sdkp, buffer);
+-
++ /*
++ * Some USB/UAS devices return generic values for mode pages
++ * until the media has been accessed. Trigger a READ operation
++ * to force the device to populate mode pages.
++ */
++ if (sdp->read_before_ms)
++ sd_read_block_zero(sdkp);
+ /*
+ * set the default to rotational. All non-rotational devices
+ * support the block characteristics VPD page, which will
+@@ -3725,7 +3763,7 @@ static int sd_probe(struct device *dev)
+
+ error = device_add_disk(dev, gd, NULL);
+ if (error) {
+- put_device(&sdkp->disk_dev);
++ device_unregister(&sdkp->disk_dev);
+ put_disk(gd);
+ goto out;
+ }
+@@ -3847,7 +3885,7 @@ static void sd_shutdown(struct device *dev)
+
+ if (sdkp->WCE && sdkp->media_present) {
+ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+- sd_sync_cache(sdkp, NULL);
++ sd_sync_cache(sdkp);
+ }
+
+ if ((system_state != SYSTEM_RESTART &&
+@@ -3868,7 +3906,6 @@ static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
+ static int sd_suspend_common(struct device *dev, bool runtime)
+ {
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+- struct scsi_sense_hdr sshdr;
+ int ret = 0;
+
+ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
+@@ -3877,24 +3914,13 @@ static int sd_suspend_common(struct device *dev, bool runtime)
+ if (sdkp->WCE && sdkp->media_present) {
+ if (!sdkp->device->silence_suspend)
+ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+- ret = sd_sync_cache(sdkp, &sshdr);
+-
+- if (ret) {
+- /* ignore OFFLINE device */
+- if (ret == -ENODEV)
+- return 0;
+-
+- if (!scsi_sense_valid(&sshdr) ||
+- sshdr.sense_key != ILLEGAL_REQUEST)
+- return ret;
++ ret = sd_sync_cache(sdkp);
++ /* ignore OFFLINE device */
++ if (ret == -ENODEV)
++ return 0;
+
+- /*
+- * sshdr.sense_key == ILLEGAL_REQUEST means this drive
+- * doesn't support sync. There's not much to do and
+- * suspend shouldn't fail.
+- */
+- ret = 0;
+- }
++ if (ret)
++ return ret;
+ }
+
+ if (sd_do_start_stop(sdkp->device, runtime)) {
+@@ -3925,10 +3951,24 @@ static int sd_suspend_runtime(struct device *dev)
+ return sd_suspend_common(dev, true);
+ }
+
+-static int sd_resume(struct device *dev, bool runtime)
++static int sd_resume(struct device *dev)
+ {
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+- int ret = 0;
++
++ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
++
++ if (opal_unlock_from_suspend(sdkp->opal_dev)) {
++ sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
++ return -EIO;
++ }
++
++ return 0;
++}
++
++static int sd_resume_common(struct device *dev, bool runtime)
++{
++ struct scsi_disk *sdkp = dev_get_drvdata(dev);
++ int ret;
+
+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
+ return 0;
+@@ -3938,13 +3978,10 @@ static int sd_resume(struct device *dev, bool runtime)
+ return 0;
+ }
+
+- if (!sdkp->device->no_start_on_resume) {
+- sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+- ret = sd_start_stop_device(sdkp, 1);
+- }
+-
++ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
++ ret = sd_start_stop_device(sdkp, 1);
+ if (!ret) {
+- opal_unlock_from_suspend(sdkp->opal_dev);
++ sd_resume(dev);
+ sdkp->suspended = false;
+ }
+
+@@ -3953,10 +3990,17 @@ static int sd_resume(struct device *dev, bool runtime)
+
+ static int sd_resume_system(struct device *dev)
+ {
+- if (pm_runtime_suspended(dev))
++ if (pm_runtime_suspended(dev)) {
++ struct scsi_disk *sdkp = dev_get_drvdata(dev);
++ struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
++
++ if (sdp && sdp->force_runtime_start_on_system_start)
++ pm_request_resume(dev);
++
+ return 0;
++ }
+
+- return sd_resume(dev, false);
++ return sd_resume_common(dev, false);
+ }
+
+ static int sd_resume_runtime(struct device *dev)
+@@ -3983,7 +4027,7 @@ static int sd_resume_runtime(struct device *dev)
+ "Failed to clear sense data\n");
+ }
+
+- return sd_resume(dev, true);
++ return sd_resume_common(dev, true);
+ }
+
+ static const struct dev_pm_ops sd_pm_ops = {
+@@ -4006,6 +4050,7 @@ static struct scsi_driver sd_template = {
+ .pm = &sd_pm_ops,
+ },
+ .rescan = sd_rescan,
++ .resume = sd_resume,
+ .init_command = sd_init_command,
+ .uninit_command = sd_uninit_command,
+ .done = sd_done,
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 0d8afffd1683b1..e6d8beb8777669 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -285,6 +285,7 @@ sg_open(struct inode *inode, struct file *filp)
+ int dev = iminor(inode);
+ int flags = filp->f_flags;
+ struct request_queue *q;
++ struct scsi_device *device;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ int retval;
+@@ -301,11 +302,12 @@ sg_open(struct inode *inode, struct file *filp)
+
+ /* This driver's module count bumped by fops_get in <linux/fs.h> */
+ /* Prevent the device driver from vanishing while we sleep */
+- retval = scsi_device_get(sdp->device);
++ device = sdp->device;
++ retval = scsi_device_get(device);
+ if (retval)
+ goto sg_put;
+
+- retval = scsi_autopm_get_device(sdp->device);
++ retval = scsi_autopm_get_device(device);
+ if (retval)
+ goto sdp_put;
+
+@@ -313,7 +315,7 @@ sg_open(struct inode *inode, struct file *filp)
+ * check if O_NONBLOCK. Permits SCSI commands to be issued
+ * during error recovery. Tread carefully. */
+ if (!((flags & O_NONBLOCK) ||
+- scsi_block_when_processing_errors(sdp->device))) {
++ scsi_block_when_processing_errors(device))) {
+ retval = -ENXIO;
+ /* we are in error recovery for this device */
+ goto error_out;
+@@ -344,7 +346,7 @@ sg_open(struct inode *inode, struct file *filp)
+
+ if (sdp->open_cnt < 1) { /* no existing opens */
+ sdp->sgdebug = 0;
+- q = sdp->device->request_queue;
++ q = device->request_queue;
+ sdp->sg_tablesize = queue_max_segments(q);
+ }
+ sfp = sg_add_sfp(sdp);
+@@ -370,10 +372,11 @@ sg_open(struct inode *inode, struct file *filp)
+ error_mutex_locked:
+ mutex_unlock(&sdp->open_rel_lock);
+ error_out:
+- scsi_autopm_put_device(sdp->device);
++ scsi_autopm_put_device(device);
+ sdp_put:
+- scsi_device_put(sdp->device);
+- goto sg_put;
++ kref_put(&sdp->d_ref, sg_device_destroy);
++ scsi_device_put(device);
++ return retval;
+ }
+
+ /* Release resources associated with a successful sg_open()
+@@ -2208,6 +2211,7 @@ sg_remove_sfp_usercontext(struct work_struct *work)
+ {
+ struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
+ struct sg_device *sdp = sfp->parentdp;
++ struct scsi_device *device = sdp->device;
+ Sg_request *srp;
+ unsigned long iflags;
+
+@@ -2233,8 +2237,8 @@ sg_remove_sfp_usercontext(struct work_struct *work)
+ "sg_remove_sfp: sfp=0x%p\n", sfp));
+ kfree(sfp);
+
+- scsi_device_put(sdp->device);
+ kref_put(&sdp->d_ref, sg_device_destroy);
++ scsi_device_put(device);
+ module_put(THIS_MODULE);
+ }
+
+diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
+index 04194018351696..cdedc271857aae 100644
+--- a/drivers/scsi/smartpqi/smartpqi.h
++++ b/drivers/scsi/smartpqi/smartpqi.h
+@@ -1347,7 +1347,6 @@ struct pqi_ctrl_info {
+ bool controller_online;
+ bool block_requests;
+ bool scan_blocked;
+- u8 logical_volume_rescan_needed : 1;
+ u8 inbound_spanning_supported : 1;
+ u8 outbound_spanning_supported : 1;
+ u8 pqi_mode_enabled : 1;
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 9a58df9312fa7e..0af2d366c85f94 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -2093,8 +2093,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ if (existing_device->devtype == TYPE_DISK) {
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->volume_status = new_device->volume_status;
+- if (ctrl_info->logical_volume_rescan_needed)
+- existing_device->rescan = true;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
+ if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
+ kfree(existing_device->raid_map);
+@@ -2164,6 +2162,20 @@ static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
+ INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
+ }
+
++static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
++{
++ if (pqi_device_in_remove(device))
++ return false;
++
++ if (device->sdev == NULL)
++ return false;
++
++ if (!scsi_device_online(device->sdev))
++ return false;
++
++ return device->rescan;
++}
++
+ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
+ {
+@@ -2284,9 +2296,13 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
+ device->advertised_queue_depth = device->queue_depth;
+ scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
+- if (device->rescan) {
+- scsi_rescan_device(device->sdev);
++ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
++ if (pqi_volume_rescan_needed(device)) {
+ device->rescan = false;
++ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
++ scsi_rescan_device(device->sdev);
++ } else {
++ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ }
+ }
+ }
+@@ -2308,8 +2324,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ }
+ }
+
+- ctrl_info->logical_volume_rescan_needed = false;
+-
+ }
+
+ static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+@@ -2341,14 +2355,6 @@ static inline void pqi_mask_device(u8 *scsi3addr)
+ scsi3addr[3] |= 0xc0;
+ }
+
+-static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
+-{
+- if (pqi_is_logical_device(device))
+- return false;
+-
+- return (device->path_map & (device->path_map - 1)) != 0;
+-}
+-
+ static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
+ {
+ return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
+@@ -3245,14 +3251,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
+ int residual_count;
+ int xfer_count;
+ bool device_offline;
+- struct pqi_scsi_dev *device;
+
+ scmd = io_request->scmd;
+ error_info = io_request->error_info;
+ host_byte = DID_OK;
+ sense_data_length = 0;
+ device_offline = false;
+- device = scmd->device->hostdata;
+
+ switch (error_info->service_response) {
+ case PQI_AIO_SERV_RESPONSE_COMPLETE:
+@@ -3277,14 +3281,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
+ break;
+ case PQI_AIO_STATUS_AIO_PATH_DISABLED:
+ pqi_aio_path_disabled(io_request);
+- if (pqi_is_multipath_device(device)) {
+- pqi_device_remove_start(device);
+- host_byte = DID_NO_CONNECT;
+- scsi_status = SAM_STAT_CHECK_CONDITION;
+- } else {
+- scsi_status = SAM_STAT_GOOD;
+- io_request->status = -EAGAIN;
+- }
++ scsi_status = SAM_STAT_GOOD;
++ io_request->status = -EAGAIN;
+ break;
+ case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
+ case PQI_AIO_STATUS_INVALID_DEVICE:
+@@ -3702,6 +3700,21 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+ return ack_event;
+ }
+
++static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
++{
++ unsigned long flags;
++ struct pqi_scsi_dev *device;
++
++ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
++
++ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
++ if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
++ device->rescan = true;
++ }
++
++ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
++}
++
+ static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
+ {
+ unsigned long flags;
+@@ -3742,7 +3755,7 @@ static void pqi_event_worker(struct work_struct *work)
+ ack_event = true;
+ rescan_needed = true;
+ if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
+- ctrl_info->logical_volume_rescan_needed = true;
++ pqi_mark_volumes_for_rescan(ctrl_info);
+ else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
+ pqi_disable_raid_bypass(ctrl_info);
+ }
+@@ -5905,7 +5918,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
+ int rc;
+ struct pqi_scsi_dev *device;
+ struct pqi_stream_data *pqi_stream_data;
+- struct pqi_scsi_dev_raid_map_data rmd;
++ struct pqi_scsi_dev_raid_map_data rmd = { 0 };
+
+ if (!ctrl_info->enable_stream_detection)
+ return false;
+@@ -6504,8 +6517,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
+ {
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
++ if (!ctrl_info->disable_managed_interrupts)
++ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ ctrl_info->pci_dev, 0);
++ else
++ return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ }
+
+ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
+@@ -10142,6 +10158,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1014, 0x0718)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1137, 0x02f8)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1137, 0x02f9)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1137, 0x02fa)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1000)
+@@ -10198,6 +10226,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100a)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x100e)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x100f)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1010)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1011)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1043)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1044)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1045)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_ANY_ID, PCI_ANY_ID)
+diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
+index 1175f2e213b566..dc899277b3a441 100644
+--- a/drivers/scsi/sr.h
++++ b/drivers/scsi/sr.h
+@@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *);
+ int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *);
+ int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
+ int sr_reset(struct cdrom_device_info *);
+-int sr_select_speed(struct cdrom_device_info *cdi, int speed);
++int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed);
+ int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
+
+ int sr_is_xa(Scsi_CD *);
+diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
+index 5b0b35e60e61fe..089653018d32c1 100644
+--- a/drivers/scsi/sr_ioctl.c
++++ b/drivers/scsi/sr_ioctl.c
+@@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi)
+ return 0;
+ }
+
+-int sr_select_speed(struct cdrom_device_info *cdi, int speed)
++int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed)
+ {
+ Scsi_CD *cd = cdi->handle;
+ struct packet_command cgc;
+
++ /* avoid exceeding the max speed or overflowing integer bounds */
++ speed = clamp(speed, 0, 0xffff / 177);
++
+ if (speed == 0)
+ speed = 0xffff; /* set to max */
+ else
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index 338aa8c429682c..212a402e753587 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -835,6 +835,9 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next)
+ int backspace, result;
+ struct st_partstat *STps;
+
++ if (STp->ready != ST_READY)
++ return 0;
++
+ /*
+ * If there was a bus reset, block further access
+ * to this device.
+@@ -842,8 +845,6 @@ static int flush_buffer(struct scsi_tape *STp, int seek_next)
+ if (STp->pos_unknown)
+ return (-EIO);
+
+- if (STp->ready != ST_READY)
+- return 0;
+ STps = &(STp->ps[STp->partition]);
+ if (STps->rw == ST_WRITING) /* Writing */
+ return st_flush_write_buffer(STp);
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index a95936b18f695e..7ceb982040a5df 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -330,6 +330,7 @@ enum storvsc_request_type {
+ */
+
+ static int storvsc_ringbuffer_size = (128 * 1024);
++static int aligned_ringbuffer_size;
+ static u32 max_outstanding_req_per_channel;
+ static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
+
+@@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
+ new_sc->next_request_id_callback = storvsc_next_request_id;
+
+ ret = vmbus_open(new_sc,
+- storvsc_ringbuffer_size,
+- storvsc_ringbuffer_size,
++ aligned_ringbuffer_size,
++ aligned_ringbuffer_size,
+ (void *)&props,
+ sizeof(struct vmstorage_channel_properties),
+ storvsc_on_channel_callback, new_sc);
+@@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device,
+ dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
+
+ stor_device->port_number = host->host_no;
+- ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
++ ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc);
+ if (ret)
+ goto err_out1;
+
+@@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev)
+ {
+ int ret;
+
+- ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
++ ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size,
+ hv_dev_is_fc(hv_dev));
+ return ret;
+ }
+@@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void)
+ * the ring buffer indices) by the max request size (which is
+ * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
+ */
++ aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
+ max_outstanding_req_per_channel =
+- ((storvsc_ringbuffer_size - PAGE_SIZE) /
++ ((aligned_ringbuffer_size - PAGE_SIZE) /
+ ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
+ sizeof(struct vstor_packet) + sizeof(u64),
+ sizeof(u64)));
+diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
+index e4fafc77bd2010..bbf6327b9e5a68 100644
+--- a/drivers/scsi/wd33c93.c
++++ b/drivers/scsi/wd33c93.c
+@@ -831,7 +831,7 @@ wd33c93_intr(struct Scsi_Host *instance)
+ /* construct an IDENTIFY message with correct disconnect bit */
+
+ hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun);
+- if (scsi_pointer->phase)
++ if (WD33C93_scsi_pointer(cmd)->phase)
+ hostdata->outgoing_msg[0] |= 0x40;
+
+ if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
+diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
+index d43873bb5fe6df..01cbd462198107 100644
+--- a/drivers/slimbus/core.c
++++ b/drivers/slimbus/core.c
+@@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
+ if (ret < 0)
+ goto err;
+ } else if (report_present) {
+- ret = ida_simple_get(&ctrl->laddr_ida,
+- 0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
++ ret = ida_alloc_max(&ctrl->laddr_ida,
++ SLIM_LA_MANAGER - 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+
+diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
+index 77aa6d26476cd2..0da5d9d1af0370 100644
+--- a/drivers/slimbus/qcom-ngd-ctrl.c
++++ b/drivers/slimbus/qcom-ngd-ctrl.c
+@@ -1451,7 +1451,11 @@ static void qcom_slim_ngd_up_worker(struct work_struct *work)
+ ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work);
+
+ /* Make sure qmi service is up before continuing */
+- wait_for_completion_interruptible(&ctrl->qmi_up);
++ if (!wait_for_completion_interruptible_timeout(&ctrl->qmi_up,
++ msecs_to_jiffies(MSEC_PER_SEC))) {
++ dev_err(ctrl->dev, "QMI wait timeout\n");
++ return;
++ }
+
+ mutex_lock(&ctrl->ssr_lock);
+ qcom_slim_ngd_enable(ctrl, true);
+diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
+index 1d2b27e3ea63f8..b811446e0fa55f 100644
+--- a/drivers/soc/fsl/dpio/dpio-service.c
++++ b/drivers/soc/fsl/dpio/dpio-service.c
+@@ -523,7 +523,7 @@ int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
+ struct qbman_eq_desc *ed;
+ int i, ret;
+
+- ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
++ ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
+index 739e4eee6b75ca..7e9074519ad22d 100644
+--- a/drivers/soc/fsl/qbman/qman.c
++++ b/drivers/soc/fsl/qbman/qman.c
+@@ -991,7 +991,7 @@ struct qman_portal {
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+- spinlock_t cgr_lock;
++ raw_spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+@@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal,
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+- spin_lock_init(&portal->cgr_lock);
++ raw_spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+@@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work)
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+- spin_lock(&p->cgr_lock);
++ /*
++ * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
++ */
++ raw_spin_lock_irq(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+- spin_unlock(&p->cgr_lock);
++ raw_spin_unlock_irq(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ return;
+@@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work)
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+- spin_unlock(&p->cgr_lock);
++ raw_spin_unlock_irq(&p->cgr_lock);
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ }
+
+@@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+- spin_lock(&p->cgr_lock);
++ raw_spin_lock_irq(&p->cgr_lock);
+
+ if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+@@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+ out:
+- spin_unlock(&p->cgr_lock);
++ raw_spin_unlock_irq(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+ }
+@@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
+ return -EINVAL;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+- spin_lock_irqsave(&p->cgr_lock, irqflags);
++ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+@@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ release_lock:
+- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+ }
+@@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+ if (!p)
+ return -EINVAL;
+
+- spin_lock_irqsave(&p->cgr_lock, irqflags);
++ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
+ ret = qm_modify_cgr(cgr, 0, opts);
+- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
++ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+ }
+diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
+index b3c292c9a14eae..8dc73cc1a83b10 100644
+--- a/drivers/soc/fsl/qe/qmc.c
++++ b/drivers/soc/fsl/qe/qmc.c
+@@ -175,7 +175,7 @@ struct qmc_chan {
+ struct list_head list;
+ unsigned int id;
+ struct qmc *qmc;
+- void *__iomem s_param;
++ void __iomem *s_param;
+ enum qmc_mode mode;
+ u64 tx_ts_mask;
+ u64 rx_ts_mask;
+@@ -203,9 +203,9 @@ struct qmc_chan {
+ struct qmc {
+ struct device *dev;
+ struct tsa_serial *tsa_serial;
+- void *__iomem scc_regs;
+- void *__iomem scc_pram;
+- void *__iomem dpram;
++ void __iomem *scc_regs;
++ void __iomem *scc_pram;
++ void __iomem *dpram;
+ u16 scc_pram_offset;
+ cbd_t __iomem *bd_table;
+ dma_addr_t bd_dma_addr;
+@@ -218,37 +218,37 @@ struct qmc {
+ struct qmc_chan *chans[64];
+ };
+
+-static inline void qmc_write16(void *__iomem addr, u16 val)
++static inline void qmc_write16(void __iomem *addr, u16 val)
+ {
+ iowrite16be(val, addr);
+ }
+
+-static inline u16 qmc_read16(void *__iomem addr)
++static inline u16 qmc_read16(void __iomem *addr)
+ {
+ return ioread16be(addr);
+ }
+
+-static inline void qmc_setbits16(void *__iomem addr, u16 set)
++static inline void qmc_setbits16(void __iomem *addr, u16 set)
+ {
+ qmc_write16(addr, qmc_read16(addr) | set);
+ }
+
+-static inline void qmc_clrbits16(void *__iomem addr, u16 clr)
++static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
+ {
+ qmc_write16(addr, qmc_read16(addr) & ~clr);
+ }
+
+-static inline void qmc_write32(void *__iomem addr, u32 val)
++static inline void qmc_write32(void __iomem *addr, u32 val)
+ {
+ iowrite32be(val, addr);
+ }
+
+-static inline u32 qmc_read32(void *__iomem addr)
++static inline u32 qmc_read32(void __iomem *addr)
+ {
+ return ioread32be(addr);
+ }
+
+-static inline void qmc_setbits32(void *__iomem addr, u32 set)
++static inline void qmc_setbits32(void __iomem *addr, u32 set)
+ {
+ qmc_write32(addr, qmc_read32(addr) | set);
+ }
+@@ -318,7 +318,7 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+@@ -374,7 +374,7 @@ static void qmc_chan_write_done(struct qmc_chan *chan)
+ void (*complete)(void *context);
+ unsigned long flags;
+ void *context;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ /*
+@@ -425,7 +425,7 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+@@ -488,7 +488,7 @@ static void qmc_chan_read_done(struct qmc_chan *chan)
+ void (*complete)(void *context, size_t size);
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ void *context;
+ u16 datalen;
+ u16 ctrl;
+@@ -663,7 +663,7 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+@@ -685,7 +685,6 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
+ qmc_read16(chan->s_param + QMC_SPE_RBASE));
+
+ chan->rx_pending = 0;
+- chan->is_rx_stopped = false;
+
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ }
+@@ -694,7 +693,7 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan)
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c
+index 3646153117b38d..1bbc9af1e50b86 100644
+--- a/drivers/soc/fsl/qe/tsa.c
++++ b/drivers/soc/fsl/qe/tsa.c
+@@ -98,9 +98,9 @@
+ #define TSA_SIRP 0x10
+
+ struct tsa_entries_area {
+- void *__iomem entries_start;
+- void *__iomem entries_next;
+- void *__iomem last_entry;
++ void __iomem *entries_start;
++ void __iomem *entries_next;
++ void __iomem *last_entry;
+ };
+
+ struct tsa_tdm {
+@@ -117,8 +117,8 @@ struct tsa_tdm {
+
+ struct tsa {
+ struct device *dev;
+- void *__iomem si_regs;
+- void *__iomem si_ram;
++ void __iomem *si_regs;
++ void __iomem *si_ram;
+ resource_size_t si_ram_sz;
+ spinlock_t lock;
+ int tdms; /* TSA_TDMx ORed */
+@@ -135,27 +135,27 @@ static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
+ return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
+ }
+
+-static inline void tsa_write32(void *__iomem addr, u32 val)
++static inline void tsa_write32(void __iomem *addr, u32 val)
+ {
+ iowrite32be(val, addr);
+ }
+
+-static inline void tsa_write8(void *__iomem addr, u32 val)
++static inline void tsa_write8(void __iomem *addr, u8 val)
+ {
+ iowrite8(val, addr);
+ }
+
+-static inline u32 tsa_read32(void *__iomem addr)
++static inline u32 tsa_read32(void __iomem *addr)
+ {
+ return ioread32be(addr);
+ }
+
+-static inline void tsa_clrbits32(void *__iomem addr, u32 clr)
++static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
+ {
+ tsa_write32(addr, tsa_read32(addr) & ~clr);
+ }
+
+-static inline void tsa_clrsetbits32(void *__iomem addr, u32 clr, u32 set)
++static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
+ {
+ tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
+ }
+@@ -313,7 +313,7 @@ static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
+ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+ {
+- void *__iomem addr;
++ void __iomem *addr;
+ u32 left;
+ u32 val;
+ u32 cnt;
+diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
+index b0cd071c4719b7..0b2e5690dacfae 100644
+--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
+@@ -14,7 +14,8 @@
+ #define CMDQ_POLL_ENABLE_MASK BIT(0)
+ #define CMDQ_EOC_IRQ_EN BIT(0)
+ #define CMDQ_REG_TYPE 1
+-#define CMDQ_JUMP_RELATIVE 1
++#define CMDQ_JUMP_RELATIVE 0
++#define CMDQ_JUMP_ABSOLUTE 1
+
+ struct cmdq_instruction {
+ union {
+@@ -397,7 +398,7 @@ int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_JUMP;
+- inst.offset = CMDQ_JUMP_RELATIVE;
++ inst.offset = CMDQ_JUMP_ABSOLUTE;
+ inst.value = addr >>
+ cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ return cmdq_pkt_append_command(pkt, inst);
+diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
+index eb656b33156ba4..f19e74d342aa25 100644
+--- a/drivers/soc/microchip/Kconfig
++++ b/drivers/soc/microchip/Kconfig
+@@ -1,5 +1,5 @@
+ config POLARFIRE_SOC_SYS_CTRL
+- tristate "POLARFIRE_SOC_SYS_CTRL"
++ tristate "Microchip PolarFire SoC (MPFS) system controller support"
+ depends on POLARFIRE_SOC_MAILBOX
+ help
+ This driver adds support for the PolarFire SoC (MPFS) system controller.
+diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
+index 34c40368d5b5ec..ab2418d2fe43a9 100644
+--- a/drivers/soc/qcom/cmd-db.c
++++ b/drivers/soc/qcom/cmd-db.c
+@@ -1,6 +1,10 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
++/*
++ * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
+
++#include <linux/bitfield.h>
+ #include <linux/debugfs.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -17,6 +21,8 @@
+ #define MAX_SLV_ID 8
+ #define SLAVE_ID_MASK 0x7
+ #define SLAVE_ID_SHIFT 16
++#define SLAVE_ID(addr) FIELD_GET(GENMASK(19, 16), addr)
++#define VRM_ADDR(addr) FIELD_GET(GENMASK(19, 4), addr)
+
+ /**
+ * struct entry_header: header for each entry in cmddb
+@@ -220,6 +226,30 @@ const void *cmd_db_read_aux_data(const char *id, size_t *len)
+ }
+ EXPORT_SYMBOL(cmd_db_read_aux_data);
+
++/**
++ * cmd_db_match_resource_addr() - Compare if both Resource addresses are same
++ *
++ * @addr1: Resource address to compare
++ * @addr2: Resource address to compare
++ *
++ * Return: true if two addresses refer to the same resource, false otherwise
++ */
++bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
++{
++ /*
++ * Each RPMh VRM accelerator resource has 3 or 4 contiguous 4-byte
++ * aligned addresses associated with it. Ignore the offset to check
++ * for VRM requests.
++ */
++ if (addr1 == addr2)
++ return true;
++ else if (SLAVE_ID(addr1) == CMD_DB_HW_VRM && VRM_ADDR(addr1) == VRM_ADDR(addr2))
++ return true;
++
++ return false;
++}
++EXPORT_SYMBOL_GPL(cmd_db_match_resource_addr);
++
+ /**
+ * cmd_db_read_slave_id - Get the slave ID for a given resource address
+ *
+@@ -324,7 +354,7 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
+- cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
++ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WC);
+ if (!cmd_db_header) {
+ ret = -ENOMEM;
+ cmd_db_header = NULL;
+diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
+index adf2d523f103c4..59ef8d739e93b6 100644
+--- a/drivers/soc/qcom/icc-bwmon.c
++++ b/drivers/soc/qcom/icc-bwmon.c
+@@ -565,7 +565,7 @@ static void bwmon_start(struct icc_bwmon *bwmon)
+ int window;
+
+ /* No need to check for errors, as this must have succeeded before. */
+- dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0);
++ dev_pm_opp_put(dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0));
+
+ bwmon_clear_counters(bwmon, true);
+
+@@ -772,11 +772,13 @@ static int bwmon_probe(struct platform_device *pdev)
+ opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
++ dev_pm_opp_put(opp);
+
+ bwmon->min_bw_kbps = 0;
+ opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
++ dev_pm_opp_put(opp);
+
+ bwmon->dev = dev;
+
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index e32a4161a8d025..03d5de759b2567 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -46,7 +46,7 @@
+ #define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
+ #define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
+ #define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+-#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_8 * n)
++#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n)
+
+ #define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
+ #define LLCC_TRP_PCB_ACT 0x21f04
+@@ -610,6 +610,8 @@ static int llcc_update_act_ctrl(u32 sid,
+ ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
+ slice_status, !(slice_status & status),
+ 0, LLCC_STATUS_READ_DELAY);
++ if (ret)
++ return ret;
+
+ if (drv_data->version >= LLCC_VERSION_4_1_0_0)
+ ret = regmap_write(drv_data->bcast_regmap, act_clear_reg,
+@@ -785,15 +787,15 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
+ u32 disable_cap_alloc, retain_pc;
+
+ disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
+- ret = regmap_write(drv_data->bcast_regmap,
+- LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc);
++ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC,
++ BIT(config->slice_id), disable_cap_alloc);
+ if (ret)
+ return ret;
+
+ if (drv_data->version < LLCC_VERSION_4_1_0_0) {
+ retain_pc = config->retain_on_pc << config->slice_id;
+- ret = regmap_write(drv_data->bcast_regmap,
+- LLCC_TRP_PCB_ACT, retain_pc);
++ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT,
++ BIT(config->slice_id), retain_pc);
+ if (ret)
+ return ret;
+ }
+@@ -944,6 +946,9 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ u32 version;
+ struct regmap *regmap;
+
++ if (!IS_ERR(drv_data))
++ return -EBUSY;
++
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data) {
+ ret = -ENOMEM;
+diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
+index 0034af927b4887..c7cd4daa10b0fd 100644
+--- a/drivers/soc/qcom/pdr_interface.c
++++ b/drivers/soc/qcom/pdr_interface.c
+@@ -76,12 +76,12 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
+ locator_hdl);
+ struct pdr_service *pds;
+
++ mutex_lock(&pdr->lock);
+ /* Create a local client port for QMI communication */
+ pdr->locator_addr.sq_family = AF_QIPCRTR;
+ pdr->locator_addr.sq_node = svc->node;
+ pdr->locator_addr.sq_port = svc->port;
+
+- mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = true;
+ mutex_unlock(&pdr->lock);
+
+@@ -104,10 +104,10 @@ static void pdr_locator_del_server(struct qmi_handle *qmi,
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = false;
+- mutex_unlock(&pdr->lock);
+
+ pdr->locator_addr.sq_node = 0;
+ pdr->locator_addr.sq_port = 0;
++ mutex_unlock(&pdr->lock);
+ }
+
+ static const struct qmi_ops pdr_locator_ops = {
+@@ -365,12 +365,14 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
+ if (ret < 0)
+ return ret;
+
++ mutex_lock(&pdr->lock);
+ ret = qmi_send_request(&pdr->locator_hdl,
+ &pdr->locator_addr,
+ &txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ servreg_get_domain_list_req_ei,
+ req);
++ mutex_unlock(&pdr->lock);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+@@ -415,7 +417,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
+ if (ret < 0)
+ goto out;
+
+- for (i = domains_read; i < resp->domain_list_len; i++) {
++ for (i = 0; i < resp->domain_list_len; i++) {
+ entry = &resp->domain_list[i];
+
+ if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
+diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
+index 61c89ddfc75b85..7e50eeb4cc6f2f 100644
+--- a/drivers/soc/qcom/pmic_glink.c
++++ b/drivers/soc/qcom/pmic_glink.c
+@@ -11,6 +11,7 @@
+ #include <linux/slab.h>
+ #include <linux/soc/qcom/pdr.h>
+ #include <linux/soc/qcom/pmic_glink.h>
++#include <linux/spinlock.h>
+
+ enum {
+ PMIC_GLINK_CLIENT_BATT = 0,
+@@ -39,7 +40,7 @@ struct pmic_glink {
+ unsigned int pdr_state;
+
+ /* serializing clients list updates */
+- struct mutex client_lock;
++ spinlock_t client_lock;
+ struct list_head clients;
+ };
+
+@@ -61,17 +62,18 @@ static void _devm_pmic_glink_release_client(struct device *dev, void *res)
+ {
+ struct pmic_glink_client *client = (struct pmic_glink_client *)res;
+ struct pmic_glink *pg = client->pg;
++ unsigned long flags;
+
+- mutex_lock(&pg->client_lock);
++ spin_lock_irqsave(&pg->client_lock, flags);
+ list_del(&client->node);
+- mutex_unlock(&pg->client_lock);
++ spin_unlock_irqrestore(&pg->client_lock, flags);
+ }
+
+-struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+- unsigned int id,
+- void (*cb)(const void *, size_t, void *),
+- void (*pdr)(void *, int),
+- void *priv)
++struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev,
++ unsigned int id,
++ void (*cb)(const void *, size_t, void *),
++ void (*pdr)(void *, int),
++ void *priv)
+ {
+ struct pmic_glink_client *client;
+ struct pmic_glink *pg = dev_get_drvdata(dev->parent);
+@@ -85,16 +87,30 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+ client->cb = cb;
+ client->pdr_notify = pdr;
+ client->priv = priv;
+-
+- mutex_lock(&pg->client_lock);
+- list_add(&client->node, &pg->clients);
+- mutex_unlock(&pg->client_lock);
++ INIT_LIST_HEAD(&client->node);
+
+ devres_add(dev, client);
+
+ return client;
+ }
+-EXPORT_SYMBOL_GPL(devm_pmic_glink_register_client);
++EXPORT_SYMBOL_GPL(devm_pmic_glink_client_alloc);
++
++void pmic_glink_client_register(struct pmic_glink_client *client)
++{
++ struct pmic_glink *pg = client->pg;
++ unsigned long flags;
++
++ mutex_lock(&pg->state_lock);
++ spin_lock_irqsave(&pg->client_lock, flags);
++
++ list_add(&client->node, &pg->clients);
++ client->pdr_notify(client->priv, pg->client_state);
++
++ spin_unlock_irqrestore(&pg->client_lock, flags);
++ mutex_unlock(&pg->state_lock);
++
++}
++EXPORT_SYMBOL_GPL(pmic_glink_client_register);
+
+ int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
+ {
+@@ -110,6 +126,7 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ struct pmic_glink_client *client;
+ struct pmic_glink_hdr *hdr;
+ struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev);
++ unsigned long flags;
+
+ if (len < sizeof(*hdr)) {
+ dev_warn(pg->dev, "ignoring truncated message\n");
+@@ -118,10 +135,12 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+
+ hdr = data;
+
++ spin_lock_irqsave(&pg->client_lock, flags);
+ list_for_each_entry(client, &pg->clients, node) {
+ if (client->id == le32_to_cpu(hdr->owner))
+ client->cb(data, len, client->priv);
+ }
++ spin_unlock_irqrestore(&pg->client_lock, flags);
+
+ return 0;
+ }
+@@ -161,18 +180,21 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
+ {
+ struct pmic_glink_client *client;
+ unsigned int new_state = pg->client_state;
++ unsigned long flags;
+
+ if (pg->client_state != SERVREG_SERVICE_STATE_UP) {
+ if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
+ new_state = SERVREG_SERVICE_STATE_UP;
+ } else {
+- if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
++ if (pg->pdr_state == SERVREG_SERVICE_STATE_DOWN || !pg->ept)
+ new_state = SERVREG_SERVICE_STATE_DOWN;
+ }
+
+ if (new_state != pg->client_state) {
++ spin_lock_irqsave(&pg->client_lock, flags);
+ list_for_each_entry(client, &pg->clients, node)
+ client->pdr_notify(client->priv, new_state);
++ spin_unlock_irqrestore(&pg->client_lock, flags);
+ pg->client_state = new_state;
+ }
+ }
+@@ -259,7 +281,7 @@ static int pmic_glink_probe(struct platform_device *pdev)
+ pg->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&pg->clients);
+- mutex_init(&pg->client_lock);
++ spin_lock_init(&pg->client_lock);
+ mutex_init(&pg->state_lock);
+
+ match_data = (unsigned long *)of_device_get_match_data(&pdev->dev);
+@@ -268,10 +290,17 @@ static int pmic_glink_probe(struct platform_device *pdev)
+ else
+ pg->client_mask = PMIC_GLINK_CLIENT_DEFAULT;
+
++ pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
++ if (IS_ERR(pg->pdr)) {
++ ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
++ "failed to initialize pdr\n");
++ return ret;
++ }
++
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
+ ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
+ if (ret)
+- return ret;
++ goto out_release_pdr_handle;
+ }
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
+ ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
+@@ -284,17 +313,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
+ goto out_release_altmode_aux;
+ }
+
+- pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+- if (IS_ERR(pg->pdr)) {
+- ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
+- goto out_release_aux_devices;
+- }
+-
+ service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
+ if (IS_ERR(service)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
+ "failed adding pdr lookup for charger_pd\n");
+- goto out_release_pdr_handle;
++ goto out_release_aux_devices;
+ }
+
+ mutex_lock(&__pmic_glink_lock);
+@@ -303,8 +326,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
+
+ return 0;
+
+-out_release_pdr_handle:
+- pdr_handle_release(pg->pdr);
+ out_release_aux_devices:
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
+ pmic_glink_del_aux_device(pg, &pg->ps_aux);
+@@ -314,6 +335,8 @@ static int pmic_glink_probe(struct platform_device *pdev)
+ out_release_ucsi_aux:
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
+ pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
++out_release_pdr_handle:
++ pdr_handle_release(pg->pdr);
+
+ return ret;
+ }
+@@ -361,8 +384,17 @@ static struct platform_driver pmic_glink_driver = {
+
+ static int pmic_glink_init(void)
+ {
+- platform_driver_register(&pmic_glink_driver);
+- register_rpmsg_driver(&pmic_glink_rpmsg_driver);
++ int ret;
++
++ ret = platform_driver_register(&pmic_glink_driver);
++ if (ret < 0)
++ return ret;
++
++ ret = register_rpmsg_driver(&pmic_glink_rpmsg_driver);
++ if (ret < 0) {
++ platform_driver_unregister(&pmic_glink_driver);
++ return ret;
++ }
+
+ return 0;
+ };
+diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
+index d05e0d6edf4930..60b0d79efb0f15 100644
+--- a/drivers/soc/qcom/pmic_glink_altmode.c
++++ b/drivers/soc/qcom/pmic_glink_altmode.c
+@@ -285,7 +285,7 @@ static void pmic_glink_altmode_sc8180xp_notify(struct pmic_glink_altmode *altmod
+
+ svid = mux == 2 ? USB_TYPEC_DP_SID : 0;
+
+- if (!altmode->ports[port].altmode) {
++ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+@@ -328,7 +328,7 @@ static void pmic_glink_altmode_sc8280xp_notify(struct pmic_glink_altmode *altmod
+ hpd_state = FIELD_GET(SC8280XP_HPD_STATE_MASK, notify->payload[8]);
+ hpd_irq = FIELD_GET(SC8280XP_HPD_IRQ_MASK, notify->payload[8]);
+
+- if (!altmode->ports[port].altmode) {
++ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+@@ -444,6 +444,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+ ret = fwnode_property_read_u32(fwnode, "reg", &port);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
++ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+@@ -454,6 +455,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+
+ if (altmode->ports[port].altmode) {
+ dev_err(dev, "multiple connector definition for port %u\n", port);
++ fwnode_handle_put(fwnode);
+ return -EINVAL;
+ }
+
+@@ -465,56 +467,79 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+ alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
+ alt_port->bridge.of_node = to_of_node(fwnode);
+ alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
+- alt_port->bridge.type = DRM_MODE_CONNECTOR_USB;
+-
+- ret = devm_drm_bridge_add(dev, &alt_port->bridge);
+- if (ret)
+- return ret;
++ alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
+ alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
+ alt_port->dp_alt.active = 1;
+
+ alt_port->typec_mux = fwnode_typec_mux_get(fwnode);
+- if (IS_ERR(alt_port->typec_mux))
++ if (IS_ERR(alt_port->typec_mux)) {
++ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_mux),
+ "failed to acquire mode-switch for port: %d\n",
+ port);
++ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_mux,
+ alt_port->typec_mux);
+- if (ret)
++ if (ret) {
++ fwnode_handle_put(fwnode);
+ return ret;
++ }
+
+ alt_port->typec_retimer = fwnode_typec_retimer_get(fwnode);
+- if (IS_ERR(alt_port->typec_retimer))
++ if (IS_ERR(alt_port->typec_retimer)) {
++ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_retimer),
+ "failed to acquire retimer-switch for port: %d\n",
+ port);
++ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_retimer,
+ alt_port->typec_retimer);
+- if (ret)
++ if (ret) {
++ fwnode_handle_put(fwnode);
+ return ret;
++ }
+
+ alt_port->typec_switch = fwnode_typec_switch_get(fwnode);
+- if (IS_ERR(alt_port->typec_switch))
++ if (IS_ERR(alt_port->typec_switch)) {
++ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_switch),
+ "failed to acquire orientation-switch for port: %d\n",
+ port);
++ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_switch,
+ alt_port->typec_switch);
++ if (ret) {
++ fwnode_handle_put(fwnode);
++ return ret;
++ }
++ }
++
++ for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
++ alt_port = &altmode->ports[port];
++ if (!alt_port->altmode)
++ continue;
++
++ ret = devm_drm_bridge_add(dev, &alt_port->bridge);
+ if (ret)
+ return ret;
+ }
+
+- altmode->client = devm_pmic_glink_register_client(dev,
+- altmode->owner_id,
+- pmic_glink_altmode_callback,
+- pmic_glink_altmode_pdr_notify,
+- altmode);
+- return PTR_ERR_OR_ZERO(altmode->client);
++ altmode->client = devm_pmic_glink_client_alloc(dev,
++ altmode->owner_id,
++ pmic_glink_altmode_callback,
++ pmic_glink_altmode_pdr_notify,
++ altmode);
++ if (IS_ERR(altmode->client))
++ return PTR_ERR(altmode->client);
++
++ pmic_glink_client_register(altmode->client);
++
++ return 0;
+ }
+
+ static const struct auxiliary_device_id pmic_glink_altmode_id_table[] = {
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index a021dc71807be0..dfc2d4e38fa9b9 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+@@ -557,7 +558,7 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+ for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+ addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
+ for (k = 0; k < msg->num_cmds; k++) {
+- if (addr == msg->cmds[k].addr)
++ if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr))
+ return -EBUSY;
+ }
+ }
+@@ -645,13 +646,14 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+ {
+ struct tcs_group *tcs;
+ int tcs_id;
+- unsigned long flags;
++
++ might_sleep();
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+- spin_lock_irqsave(&drv->lock, flags);
++ spin_lock_irq(&drv->lock);
+
+ /* Wait forever for a free tcs. It better be there eventually! */
+ wait_event_lock_irq(drv->tcs_wait,
+@@ -669,7 +671,7 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+ write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0);
+ enable_tcs_irq(drv, tcs_id, true);
+ }
+- spin_unlock_irqrestore(&drv->lock, flags);
++ spin_unlock_irq(&drv->lock);
+
+ /*
+ * These two can be done after the lock is released because:
+diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
+index 08e09642d7f551..62dfc7df935412 100644
+--- a/drivers/soc/qcom/rpmh.c
++++ b/drivers/soc/qcom/rpmh.c
+@@ -183,7 +183,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
+ }
+
+ if (state == RPMH_ACTIVE_ONLY_STATE) {
+- WARN_ON(irqs_disabled());
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
+ } else {
+ /* Clean up our call by spoofing tx_done */
+diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
+index f9fd6177118cac..577f1f25ab103c 100644
+--- a/drivers/soc/qcom/smd-rpm.c
++++ b/drivers/soc/qcom/smd-rpm.c
+@@ -196,9 +196,6 @@ static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
+ {
+ struct qcom_smd_rpm *rpm;
+
+- if (!rpdev->dev.of_node)
+- return -EINVAL;
+-
+ rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
+ if (!rpm)
+ return -ENOMEM;
+@@ -218,18 +215,38 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
+ of_platform_depopulate(&rpdev->dev);
+ }
+
+-static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = {
+- { .name = "rpm_requests", },
+- { /* sentinel */ }
++static const struct of_device_id qcom_smd_rpm_of_match[] = {
++ { .compatible = "qcom,rpm-apq8084" },
++ { .compatible = "qcom,rpm-ipq6018" },
++ { .compatible = "qcom,rpm-ipq9574" },
++ { .compatible = "qcom,rpm-msm8226" },
++ { .compatible = "qcom,rpm-msm8909" },
++ { .compatible = "qcom,rpm-msm8916" },
++ { .compatible = "qcom,rpm-msm8936" },
++ { .compatible = "qcom,rpm-msm8953" },
++ { .compatible = "qcom,rpm-msm8974" },
++ { .compatible = "qcom,rpm-msm8976" },
++ { .compatible = "qcom,rpm-msm8994" },
++ { .compatible = "qcom,rpm-msm8996" },
++ { .compatible = "qcom,rpm-msm8998" },
++ { .compatible = "qcom,rpm-sdm660" },
++ { .compatible = "qcom,rpm-sm6115" },
++ { .compatible = "qcom,rpm-sm6125" },
++ { .compatible = "qcom,rpm-sm6375" },
++ { .compatible = "qcom,rpm-qcm2290" },
++ { .compatible = "qcom,rpm-qcs404" },
++ {}
+ };
+-MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table);
++MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+ static struct rpmsg_driver qcom_smd_rpm_driver = {
+ .probe = qcom_smd_rpm_probe,
+ .remove = qcom_smd_rpm_remove,
+ .callback = qcom_smd_rpm_callback,
+- .id_table = qcom_smd_rpm_id_table,
+- .drv.name = "qcom_smd_rpm",
++ .drv = {
++ .name = "qcom_smd_rpm",
++ .of_match_table = qcom_smd_rpm_of_match,
++ },
+ };
+
+ static int __init qcom_smd_rpm_init(void)
+diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
+index d4a89d2bb43bb4..2e8568d6cde948 100644
+--- a/drivers/soc/qcom/smem.c
++++ b/drivers/soc/qcom/smem.c
+@@ -359,6 +359,32 @@ static struct qcom_smem *__smem;
+ /* Timeout (ms) for the trylock of remote spinlocks */
+ #define HWSPINLOCK_TIMEOUT 1000
+
++/* The qcom hwspinlock id is always plus one from the smem host id */
++#define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1)
++
++/**
++ * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
++ * @host: remote processor id
++ *
++ * Busts the hwspin_lock for the given smem host id. This helper is intended
++ * for remoteproc drivers that manage remoteprocs with an equivalent smem
++ * driver instance in the remote firmware. Drivers can force a release of the
++ * smem hwspin_lock if the rproc unexpectedly goes into a bad state.
++ *
++ * Context: Process context.
++ *
++ * Returns: 0 on success, otherwise negative errno.
++ */
++int qcom_smem_bust_hwspin_lock_by_host(unsigned int host)
++{
++ /* This function is for remote procs, so ignore SMEM_HOST_APPS */
++ if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT)
++ return -EINVAL;
++
++ return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host));
++}
++EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host);
++
+ /**
+ * qcom_smem_is_available() - Check if SMEM is available
+ *
+diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
+index 497cfb720fcb04..880b41a57da019 100644
+--- a/drivers/soc/qcom/socinfo.c
++++ b/drivers/soc/qcom/socinfo.c
+@@ -114,7 +114,7 @@ static const char *const pmic_models[] = {
+ [50] = "PM8350B",
+ [51] = "PMR735A",
+ [52] = "PMR735B",
+- [55] = "PM2250",
++ [55] = "PM4125",
+ [58] = "PM8450",
+ [65] = "PM8010",
+ };
+diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
+index 3aff106fc11a00..9a229329842320 100644
+--- a/drivers/soc/ti/wkup_m3_ipc.c
++++ b/drivers/soc/ti/wkup_m3_ipc.c
+@@ -16,7 +16,6 @@
+ #include <linux/irq.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/omap-mailbox.h>
+ #include <linux/platform_device.h>
+ #include <linux/remoteproc.h>
+ #include <linux/suspend.h>
+@@ -314,7 +313,6 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
+ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ {
+ struct device *dev = m3_ipc->dev;
+- mbox_msg_t dummy_msg = 0;
+ int ret;
+
+ if (!m3_ipc->mbox) {
+@@ -330,7 +328,7 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ * the RX callback to avoid multiple interrupts being received
+ * by the CM3.
+ */
+- ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
++ ret = mbox_send_message(m3_ipc->mbox, NULL);
+ if (ret < 0) {
+ dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ __func__, ret);
+@@ -352,7 +350,6 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+ {
+ struct device *dev = m3_ipc->dev;
+- mbox_msg_t dummy_msg = 0;
+ int ret;
+
+ if (!m3_ipc->mbox) {
+@@ -361,7 +358,7 @@ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+ return -EIO;
+ }
+
+- ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
++ ret = mbox_send_message(m3_ipc->mbox, NULL);
+ if (ret < 0) {
+ dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ __func__, ret);
+diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
+index bab4ad87aa7500..d5099a3386b4fc 100644
+--- a/drivers/soc/versatile/soc-integrator.c
++++ b/drivers/soc/versatile/soc-integrator.c
+@@ -113,6 +113,7 @@ static int __init integrator_soc_init(void)
+ return -ENODEV;
+
+ syscon_regmap = syscon_node_to_regmap(np);
++ of_node_put(np);
+ if (IS_ERR(syscon_regmap))
+ return PTR_ERR(syscon_regmap);
+
+diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
+index c6876d232d8fd6..cf91abe07d38d0 100644
+--- a/drivers/soc/versatile/soc-realview.c
++++ b/drivers/soc/versatile/soc-realview.c
+@@ -4,6 +4,7 @@
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
++#include <linux/device.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/slab.h>
+@@ -81,6 +82,13 @@ static struct attribute *realview_attrs[] = {
+
+ ATTRIBUTE_GROUPS(realview);
+
++static void realview_soc_socdev_release(void *data)
++{
++ struct soc_device *soc_dev = data;
++
++ soc_device_unregister(soc_dev);
++}
++
+ static int realview_soc_probe(struct platform_device *pdev)
+ {
+ struct regmap *syscon_regmap;
+@@ -93,7 +101,7 @@ static int realview_soc_probe(struct platform_device *pdev)
+ if (IS_ERR(syscon_regmap))
+ return PTR_ERR(syscon_regmap);
+
+- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
++ soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+@@ -106,10 +114,14 @@ static int realview_soc_probe(struct platform_device *pdev)
+ soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = realview_groups[0];
+ soc_dev = soc_device_register(soc_dev_attr);
+- if (IS_ERR(soc_dev)) {
+- kfree(soc_dev_attr);
++ if (IS_ERR(soc_dev))
+ return -ENODEV;
+- }
++
++ ret = devm_add_action_or_reset(&pdev->dev, realview_soc_socdev_release,
++ soc_dev);
++ if (ret)
++ return ret;
++
+ ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET,
+ &realview_coreid);
+ if (ret)
+diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
+index 86a048a10a13f2..098a2ecfd5c689 100644
+--- a/drivers/soc/xilinx/xlnx_event_manager.c
++++ b/drivers/soc/xilinx/xlnx_event_manager.c
+@@ -3,6 +3,7 @@
+ * Xilinx Event Management Driver
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
++ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
+ */
+@@ -19,7 +20,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+
+-static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
++static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number);
+
+ static int virq_sgi;
+ static int event_manager_availability = -EACCES;
+@@ -477,7 +478,7 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
+ }
+ }
+ if (!is_callback_found)
+- pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
++ pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n",
+ payload[1], payload[2]);
+ }
+
+@@ -555,7 +556,6 @@ static void xlnx_disable_percpu_irq(void *data)
+ static int xlnx_event_init_sgi(struct platform_device *pdev)
+ {
+ int ret = 0;
+- int cpu = smp_processor_id();
+ /*
+ * IRQ related structures are used for the following:
+ * for each SGI interrupt ensure its mapped by GIC IRQ domain
+@@ -592,9 +592,9 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
+ sgi_fwspec.param[0] = sgi_num;
+ virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
+
+- per_cpu(cpu_number1, cpu) = cpu;
+ ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
+- &cpu_number1);
++ &dummy_cpu_number);
++
+ WARN_ON(ret);
+ if (ret) {
+ irq_dispose_mapping(virq_sgi);
+@@ -609,16 +609,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev)
+
+ static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
+ {
+- int cpu = smp_processor_id();
+-
+- per_cpu(cpu_number1, cpu) = cpu;
+-
+ cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
+
+ on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
+
+ irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
+- free_percpu_irq(virq_sgi, &cpu_number1);
++ free_percpu_irq(virq_sgi, &dummy_cpu_number);
+ irq_dispose_mapping(virq_sgi);
+ }
+
+diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
+index c2c819701eec11..d7c784d77208ca 100644
+--- a/drivers/soc/xilinx/zynqmp_power.c
++++ b/drivers/soc/xilinx/zynqmp_power.c
+@@ -188,7 +188,9 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
+ u32 pm_api_version;
+ struct mbox_client *client;
+
+- zynqmp_pm_get_api_version(&pm_api_version);
++ ret = zynqmp_pm_get_api_version(&pm_api_version);
++ if (ret)
++ return ret;
+
+ /* Check PM API version number */
+ if (pm_api_version < ZYNQMP_PM_VERSION)
+diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
+index 3a99f6dcdfafa2..79173ab540a6bf 100644
+--- a/drivers/soundwire/amd_manager.c
++++ b/drivers/soundwire/amd_manager.c
+@@ -148,6 +148,19 @@ static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
+ writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
+ }
+
++static void amd_sdw_wake_enable(struct amd_sdw_manager *amd_manager, bool enable)
++{
++ u32 wake_ctrl;
++
++ wake_ctrl = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
++ if (enable)
++ wake_ctrl |= AMD_SDW_WAKE_INTR_MASK;
++ else
++ wake_ctrl &= ~AMD_SDW_WAKE_INTR_MASK;
++
++ writel(wake_ctrl, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
++}
++
+ static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
+ int cmd_offset)
+ {
+@@ -927,6 +940,14 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
+ amd_manager->bus.clk_stop_timeout = 200;
+ amd_manager->bus.link_id = amd_manager->instance;
+
++ /*
++ * Due to BIOS compatibility, the two links are exposed within
++ * the scope of a single controller. If this changes, the
++ * controller_id will have to be updated with drv_data
++ * information.
++ */
++ amd_manager->bus.controller_id = 0;
++
+ switch (amd_manager->instance) {
+ case ACP_SDW0:
+ amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS;
+@@ -1114,6 +1135,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
+ }
+
+ if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
++ amd_sdw_wake_enable(amd_manager, false);
+ return amd_sdw_clock_stop(amd_manager);
+ } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ /*
+@@ -1140,6 +1162,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
+ return 0;
+ }
+ if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
++ amd_sdw_wake_enable(amd_manager, true);
+ return amd_sdw_clock_stop(amd_manager);
+ } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ ret = amd_sdw_clock_stop(amd_manager);
+diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
+index 5f040151a259b8..6dcc7a449346e2 100644
+--- a/drivers/soundwire/amd_manager.h
++++ b/drivers/soundwire/amd_manager.h
+@@ -152,7 +152,7 @@
+ #define AMD_SDW0_EXT_INTR_MASK 0x200000
+ #define AMD_SDW1_EXT_INTR_MASK 4
+ #define AMD_SDW_IRQ_MASK_0TO7 0x77777777
+-#define AMD_SDW_IRQ_MASK_8TO11 0x000d7777
++#define AMD_SDW_IRQ_MASK_8TO11 0x000c7777
+ #define AMD_SDW_IRQ_ERROR_MASK 0xff
+ #define AMD_SDW_MAX_FREQ_NUM 1
+ #define AMD_SDW0_MAX_TX_PORTS 3
+@@ -190,6 +190,7 @@
+ #define AMD_SDW_CLK_RESUME_REQ 2
+ #define AMD_SDW_CLK_RESUME_DONE 3
+ #define AMD_SDW_WAKE_STAT_MASK BIT(16)
++#define AMD_SDW_WAKE_INTR_MASK BIT(16)
+
+ static u32 amd_sdw_freq_tbl[AMD_SDW_MAX_FREQ_NUM] = {
+ AMD_SDW_DEFAULT_CLK_FREQ,
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index 0e7bc3c40f9dfe..e7553c38be59d6 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -22,6 +22,10 @@ static int sdw_get_id(struct sdw_bus *bus)
+ return rc;
+
+ bus->id = rc;
++
++ if (bus->controller_id == -1)
++ bus->controller_id = rc;
++
+ return 0;
+ }
+
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 0efc1c3bee5f54..e69982dbd449bc 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -891,8 +891,14 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
+ }
+ }
+
+- if (is_slave)
+- return sdw_handle_slave_status(&cdns->bus, status);
++ if (is_slave) {
++ int ret;
++
++ mutex_lock(&cdns->status_update_lock);
++ ret = sdw_handle_slave_status(&cdns->bus, status);
++ mutex_unlock(&cdns->status_update_lock);
++ return ret;
++ }
+
+ return 0;
+ }
+@@ -989,6 +995,31 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
+ }
+ EXPORT_SYMBOL(sdw_cdns_irq);
+
++static void cdns_check_attached_status_dwork(struct work_struct *work)
++{
++ struct sdw_cdns *cdns =
++ container_of(work, struct sdw_cdns, attach_dwork.work);
++ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
++ u32 val;
++ int ret;
++ int i;
++
++ val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
++
++ for (i = 0; i <= SDW_MAX_DEVICES; i++) {
++ status[i] = val & 0x3;
++ if (status[i])
++ dev_dbg(cdns->dev, "Peripheral %d status: %d\n", i, status[i]);
++ val >>= 2;
++ }
++
++ mutex_lock(&cdns->status_update_lock);
++ ret = sdw_handle_slave_status(&cdns->bus, status);
++ mutex_unlock(&cdns->status_update_lock);
++ if (ret < 0)
++ dev_err(cdns->dev, "%s: sdw_handle_slave_status failed: %d\n", __func__, ret);
++}
++
+ /**
+ * cdns_update_slave_status_work - update slave status in a work since we will need to handle
+ * other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave
+@@ -1745,7 +1776,11 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
+ init_completion(&cdns->tx_complete);
+ cdns->bus.port_ops = &cdns_port_ops;
+
++ mutex_init(&cdns->status_update_lock);
++
+ INIT_WORK(&cdns->work, cdns_update_slave_status_work);
++ INIT_DELAYED_WORK(&cdns->attach_dwork, cdns_check_attached_status_dwork);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(sdw_cdns_probe);
+@@ -1880,7 +1915,7 @@ struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+
+ /* check if we found a PDI, else find in bi-directional */
+ if (!pdi)
+- pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd,
++ pdi = cdns_find_pdi(cdns, 0, stream->num_bd, stream->bd,
+ dai_id);
+
+ if (pdi) {
+diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
+index bc84435e420f5b..e1d7969ba48ae8 100644
+--- a/drivers/soundwire/cadence_master.h
++++ b/drivers/soundwire/cadence_master.h
+@@ -117,6 +117,8 @@ struct sdw_cdns_dai_runtime {
+ * @link_up: Link status
+ * @msg_count: Messages sent on bus
+ * @dai_runtime_array: runtime context for each allocated DAI.
++ * @status_update_lock: protect concurrency between interrupt-based and delayed work
++ * status update
+ */
+ struct sdw_cdns {
+ struct device *dev;
+@@ -148,10 +150,13 @@ struct sdw_cdns {
+ bool interrupt_enabled;
+
+ struct work_struct work;
++ struct delayed_work attach_dwork;
+
+ struct list_head list;
+
+ struct sdw_cdns_dai_runtime **dai_runtime_array;
++
++ struct mutex status_update_lock; /* add mutual exclusion to sdw_handle_slave_status() */
+ };
+
+ #define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus)
+diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
+index d1553cb7718747..67abd7e52f092a 100644
+--- a/drivers/soundwire/debugfs.c
++++ b/drivers/soundwire/debugfs.c
+@@ -20,7 +20,7 @@ void sdw_bus_debugfs_init(struct sdw_bus *bus)
+ return;
+
+ /* create the debugfs master-N */
+- snprintf(name, sizeof(name), "master-%d-%d", bus->id, bus->link_id);
++ snprintf(name, sizeof(name), "master-%d-%d", bus->controller_id, bus->link_id);
+ bus->debugfs = debugfs_create_dir(name, sdw_debugfs_root);
+ }
+
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index 2a1096dab63d3c..91ab97a456fa9f 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -130,6 +130,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ },
+ .driver_data = (void *)intel_rooks_county,
+ },
++ {
++ /* quirk used for NUC15 LAPRC710 skew */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
++ },
++ .driver_data = (void *)intel_rooks_county,
++ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+@@ -141,7 +149,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16"),
+ },
+ .driver_data = (void *)hp_omen_16,
+ },
+diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
+index 511932c55216c7..bb6b1df2d2c201 100644
+--- a/drivers/soundwire/intel.h
++++ b/drivers/soundwire/intel.h
+@@ -91,6 +91,8 @@ static inline void intel_writew(void __iomem *base, int offset, u16 value)
+
+ #define INTEL_MASTER_RESET_ITERATIONS 10
+
++#define SDW_INTEL_DELAYED_ENUMERATION_MS 100
++
+ #define SDW_INTEL_CHECK_OPS(sdw, cb) ((sdw) && (sdw)->link_res && (sdw)->link_res->hw_ops && \
+ (sdw)->link_res->hw_ops->cb)
+ #define SDW_INTEL_OPS(sdw, cb) ((sdw)->link_res->hw_ops->cb)
+diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
+index a9d25ae0b73fec..e320c912891351 100644
+--- a/drivers/soundwire/intel_ace2x.c
++++ b/drivers/soundwire/intel_ace2x.c
+@@ -23,8 +23,9 @@
+ static void intel_shim_vs_init(struct sdw_intel *sdw)
+ {
+ void __iomem *shim_vs = sdw->link_res->shim_vs;
+- u16 act = 0;
++ u16 act;
+
++ act = intel_readw(shim_vs, SDW_SHIM2_INTEL_VS_ACTMCTL);
+ u16p_replace_bits(&act, 0x1, SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS);
+ act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE;
+ act |= SDW_SHIM2_INTEL_VS_ACTMCTL_DODS;
+diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
+index 7f15e3549e539d..bdfff78ac2f810 100644
+--- a/drivers/soundwire/intel_auxdevice.c
++++ b/drivers/soundwire/intel_auxdevice.c
+@@ -234,6 +234,9 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
+ cdns->instance = sdw->instance;
+ cdns->msg_count = 0;
+
++ /* single controller for all SoundWire links */
++ bus->controller_id = 0;
++
+ bus->link_id = auxdev->id;
+ bus->clk_stop_timeout = 1;
+
+@@ -395,6 +398,7 @@ static void intel_link_remove(struct auxiliary_device *auxdev)
+ */
+ if (!bus->prop.hw_disabled) {
+ sdw_intel_debugfs_exit(sdw);
++ cancel_delayed_work_sync(&cdns->attach_dwork);
+ sdw_cdns_enable_interrupt(cdns, false);
+ }
+ sdw_bus_master_delete(bus);
+diff --git a/drivers/soundwire/intel_bus_common.c b/drivers/soundwire/intel_bus_common.c
+index e5ac3cc7cb79b1..db9cf211671a3e 100644
+--- a/drivers/soundwire/intel_bus_common.c
++++ b/drivers/soundwire/intel_bus_common.c
+@@ -45,21 +45,24 @@ int intel_start_bus(struct sdw_intel *sdw)
+ return ret;
+ }
+
+- ret = sdw_cdns_exit_reset(cdns);
++ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+- dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
++ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ return ret;
+ }
+
+- ret = sdw_cdns_enable_interrupt(cdns, true);
++ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+- dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
++ dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
+ return ret;
+ }
+
+ sdw_cdns_check_self_clearing_bits(cdns, __func__,
+ true, INTEL_MASTER_RESET_ITERATIONS);
+
++ schedule_delayed_work(&cdns->attach_dwork,
++ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
++
+ return 0;
+ }
+
+@@ -136,21 +139,24 @@ int intel_start_bus_after_reset(struct sdw_intel *sdw)
+ return ret;
+ }
+
+- ret = sdw_cdns_exit_reset(cdns);
++ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+- dev_err(dev, "unable to exit bus reset sequence during resume\n");
++ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+
+- ret = sdw_cdns_enable_interrupt(cdns, true);
++ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+- dev_err(dev, "cannot enable interrupts during resume\n");
++ dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ return ret;
+ }
+
+ }
+ sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
+
++ schedule_delayed_work(&cdns->attach_dwork,
++ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
++
+ return 0;
+ }
+
+@@ -184,6 +190,9 @@ int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
+
+ sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
+
++ schedule_delayed_work(&cdns->attach_dwork,
++ msecs_to_jiffies(SDW_INTEL_DELAYED_ENUMERATION_MS));
++
+ return 0;
+ }
+
+@@ -194,6 +203,8 @@ int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
+ bool wake_enable = false;
+ int ret;
+
++ cancel_delayed_work_sync(&cdns->attach_dwork);
++
+ if (clock_stop) {
+ ret = sdw_cdns_clock_stop(cdns, true);
+ if (ret < 0)
+diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c
+index 9b05c9e25ebe48..51abedbbaa6630 100644
+--- a/drivers/soundwire/master.c
++++ b/drivers/soundwire/master.c
+@@ -145,7 +145,7 @@ int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
+ md->dev.fwnode = fwnode;
+ md->dev.dma_mask = parent->dma_mask;
+
+- dev_set_name(&md->dev, "sdw-master-%d", bus->id);
++ dev_set_name(&md->dev, "sdw-master-%d-%d", bus->controller_id, bus->link_id);
+
+ ret = device_register(&md->dev);
+ if (ret) {
+diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
+index 55be9f4b8d59ab..e3ae4e4e07ac53 100644
+--- a/drivers/soundwire/qcom.c
++++ b/drivers/soundwire/qcom.c
+@@ -1612,6 +1612,9 @@ static int qcom_swrm_probe(struct platform_device *pdev)
+ }
+ }
+
++ /* FIXME: is there a DT-defined value to use ? */
++ ctrl->bus.controller_id = -1;
++
+ ret = sdw_bus_master_add(&ctrl->bus, dev, dev->fwnode);
+ if (ret) {
+ dev_err(dev, "Failed to register Soundwire controller (%d)\n",
+diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
+index c1c1a2ac293af4..060c2982e26b00 100644
+--- a/drivers/soundwire/slave.c
++++ b/drivers/soundwire/slave.c
+@@ -39,14 +39,14 @@ int sdw_slave_add(struct sdw_bus *bus,
+ slave->dev.fwnode = fwnode;
+
+ if (id->unique_id == SDW_IGNORED_UNIQUE_ID) {
+- /* name shall be sdw:link:mfg:part:class */
+- dev_set_name(&slave->dev, "sdw:%01x:%04x:%04x:%02x",
+- bus->link_id, id->mfg_id, id->part_id,
++ /* name shall be sdw:ctrl:link:mfg:part:class */
++ dev_set_name(&slave->dev, "sdw:%01x:%01x:%04x:%04x:%02x",
++ bus->controller_id, bus->link_id, id->mfg_id, id->part_id,
+ id->class_id);
+ } else {
+- /* name shall be sdw:link:mfg:part:class:unique */
+- dev_set_name(&slave->dev, "sdw:%01x:%04x:%04x:%02x:%01x",
+- bus->link_id, id->mfg_id, id->part_id,
++ /* name shall be sdw:ctrl:link:mfg:part:class:unique */
++ dev_set_name(&slave->dev, "sdw:%01x:%01x:%04x:%04x:%02x:%01x",
++ bus->controller_id, bus->link_id, id->mfg_id, id->part_id,
+ id->class_id, id->unique_id);
+ }
+
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index d77a8a0d42c8d3..68d54887992d91 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -742,14 +742,15 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
+ * sdw_ml_sync_bank_switch: Multilink register bank switch
+ *
+ * @bus: SDW bus instance
++ * @multi_link: whether this is a multi-link stream with hardware-based sync
+ *
+ * Caller function should free the buffers on error
+ */
+-static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
++static int sdw_ml_sync_bank_switch(struct sdw_bus *bus, bool multi_link)
+ {
+ unsigned long time_left;
+
+- if (!bus->multi_link)
++ if (!multi_link)
+ return 0;
+
+ /* Wait for completion of transfer */
+@@ -847,7 +848,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
+ bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
+
+ /* Check if bank switch was successful */
+- ret = sdw_ml_sync_bank_switch(bus);
++ ret = sdw_ml_sync_bank_switch(bus, multi_link);
+ if (ret < 0) {
+ dev_err(bus->dev,
+ "multi link bank switch failed: %d\n", ret);
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 2c21d5b96fdcef..3ce0fd5df8e9ca 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -1157,6 +1157,7 @@ config SPI_XTENSA_XTFPGA
+ config SPI_ZYNQ_QSPI
+ tristate "Xilinx Zynq QSPI controller"
+ depends on ARCH_ZYNQ || COMPILE_TEST
++ depends on SPI_MEM
+ help
+ This enables support for the Zynq Quad SPI controller
+ in master mode.
+@@ -1164,9 +1165,10 @@ config SPI_ZYNQ_QSPI
+
+ config SPI_ZYNQMP_GQSPI
+ tristate "Xilinx ZynqMP GQSPI controller"
+- depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST
++ depends on (SPI_MEM && HAS_DMA) || COMPILE_TEST
+ help
+ Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
++ This controller only supports SPI memory interface.
+
+ config SPI_AMD
+ tristate "AMD SPI controller"
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index 3d1252566134b3..6f9e9d87167758 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -375,9 +375,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
+ * If the QSPI controller is set in regular SPI mode, set it in
+ * Serial Memory Mode (SMM).
+ */
+- if (aq->mr != QSPI_MR_SMM) {
+- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
+- aq->mr = QSPI_MR_SMM;
++ if (!(aq->mr & QSPI_MR_SMM)) {
++ aq->mr |= QSPI_MR_SMM;
++ atmel_qspi_write(aq->mr, aq, QSPI_MR);
+ }
+
+ /* Clear pending interrupts */
+@@ -501,7 +501,8 @@ static int atmel_qspi_setup(struct spi_device *spi)
+ if (ret < 0)
+ return ret;
+
+- aq->scr = QSPI_SCR_SCBR(scbr);
++ aq->scr &= ~QSPI_SCR_SCBR_MASK;
++ aq->scr |= QSPI_SCR_SCBR(scbr);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
+
+ pm_runtime_mark_last_busy(ctrl->dev.parent);
+@@ -534,6 +535,7 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi)
+ if (ret < 0)
+ return ret;
+
++ aq->scr &= ~QSPI_SCR_DLYBS_MASK;
+ aq->scr |= QSPI_SCR_DLYBS(cs_setup);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
+
+@@ -549,8 +551,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq)
+ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
+
+ /* Set the QSPI controller by default in Serial Memory Mode */
+- atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
+- aq->mr = QSPI_MR_SMM;
++ aq->mr |= QSPI_MR_SMM;
++ atmel_qspi_write(aq->mr, aq, QSPI_MR);
+
+ /* Enable the QSPI controller */
+ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+@@ -726,6 +728,7 @@ static void atmel_qspi_remove(struct platform_device *pdev)
+ clk_unprepare(aq->pclk);
+
+ pm_runtime_disable(&pdev->dev);
++ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ }
+
+@@ -756,8 +759,15 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ int ret;
+
+- clk_prepare(aq->pclk);
+- clk_prepare(aq->qspick);
++ ret = clk_prepare(aq->pclk);
++ if (ret)
++ return ret;
++
++ ret = clk_prepare(aq->qspick);
++ if (ret) {
++ clk_unprepare(aq->pclk);
++ return ret;
++ }
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index 6aa8adbe4170cd..e073d54873b1f0 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -22,6 +22,7 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/pm_runtime.h>
++#include <linux/iopoll.h>
+ #include <trace/events/spi.h>
+
+ /* SPI register offsets */
+@@ -279,6 +280,7 @@ struct atmel_spi {
+ bool keep_cs;
+
+ u32 fifo_size;
++ bool last_polarity;
+ u8 native_cs_free;
+ u8 native_cs_for_gpio;
+ };
+@@ -291,6 +293,22 @@ struct atmel_spi_device {
+ #define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */
+ #define INVALID_DMA_ADDRESS 0xffffffff
+
++/*
++ * This frequency can be anything supported by the controller, but to avoid
++ * unnecessary delay, the highest possible frequency is chosen.
++ *
++ * This frequency is the highest possible which is not interfering with other
++ * chip select registers (see Note for Serial Clock Bit Rate configuration in
++ * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283)
++ */
++#define DUMMY_MSG_FREQUENCY 0x02
++/*
++ * 8 bits is the minimum data the controller is capable of sending.
++ *
++ * This message can be anything as it should not be treated by any SPI device.
++ */
++#define DUMMY_MSG 0xAA
++
+ /*
+ * Version 2 of the SPI controller has
+ * - CR.LASTXFER
+@@ -304,6 +322,43 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
+ return as->caps.is_spi2;
+ }
+
++/*
++ * Send a dummy message.
++ *
++ * This is sometimes needed when using a CS GPIO to force clock transition when
++ * switching between devices with different polarities.
++ */
++static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select)
++{
++ u32 status;
++ u32 csr;
++
++ /*
++ * Set a clock frequency to allow sending message on SPI bus.
++ * The frequency here can be anything, but is needed for
++ * the controller to send the data.
++ */
++ csr = spi_readl(as, CSR0 + 4 * chip_select);
++ csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr);
++ spi_writel(as, CSR0 + 4 * chip_select, csr);
++
++ /*
++ * Read all data coming from SPI bus, needed to be able to send
++ * the message.
++ */
++ spi_readl(as, RDR);
++ while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
++ spi_readl(as, RDR);
++ cpu_relax();
++ }
++
++ spi_writel(as, TDR, DUMMY_MSG);
++
++ readl_poll_timeout_atomic(as->regs + SPI_SR, status,
++ (status & SPI_BIT(TXEMPTY)), 1, 1000);
++}
++
++
+ /*
+ * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
+ * they assume that spi slave device state will not change on deselect, so
+@@ -320,11 +375,17 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
+ * Master on Chip Select 0.") No workaround exists for that ... so for
+ * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
+ * and (c) will trigger that first erratum in some cases.
++ *
++ * When changing the clock polarity, the SPI controller waits for the next
++ * transmission to enforce the default clock state. This may be an issue when
++ * using a GPIO as Chip Select: the clock level is applied only when the first
++ * packet is sent, once the CS has already been asserted. The workaround is to
++ * avoid this by sending a first (dummy) message before toggling the CS state.
+ */
+-
+ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
+ {
+ struct atmel_spi_device *asd = spi->controller_state;
++ bool new_polarity;
+ int chip_select;
+ u32 mr;
+
+@@ -353,6 +414,25 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
+ }
+
+ mr = spi_readl(as, MR);
++
++ /*
++ * Ensures the clock polarity is valid before we actually
++ * assert the CS to avoid spurious clock edges to be
++ * processed by the spi devices.
++ */
++ if (spi_get_csgpiod(spi, 0)) {
++ new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0;
++ if (new_polarity != as->last_polarity) {
++ /*
++ * Need to disable the GPIO before sending the dummy
++ * message because it is already set by the spi core.
++ */
++ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0);
++ atmel_spi_send_dummy(as, spi, chip_select);
++ as->last_polarity = new_polarity;
++ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1);
++ }
++ }
+ } else {
+ u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
+ int i;
+@@ -1336,12 +1416,10 @@ static int atmel_spi_one_transfer(struct spi_controller *host,
+ }
+
+ dma_timeout = msecs_to_jiffies(spi_controller_xfer_timeout(host, xfer));
+- ret_timeout = wait_for_completion_interruptible_timeout(&as->xfer_completion,
+- dma_timeout);
+- if (ret_timeout <= 0) {
+- dev_err(&spi->dev, "spi transfer %s\n",
+- !ret_timeout ? "timeout" : "canceled");
+- as->done_status = ret_timeout < 0 ? ret_timeout : -EIO;
++ ret_timeout = wait_for_completion_timeout(&as->xfer_completion, dma_timeout);
++ if (!ret_timeout) {
++ dev_err(&spi->dev, "spi transfer timeout\n");
++ as->done_status = -EIO;
+ }
+
+ if (as->done_status)
+diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
+index 0258c9a72fdcc6..9faee4fcc049a1 100644
+--- a/drivers/spi/spi-axi-spi-engine.c
++++ b/drivers/spi/spi-axi-spi-engine.c
+@@ -6,6 +6,8 @@
+ */
+
+ #include <linux/clk.h>
++#include <linux/fpga/adi-axi-common.h>
++#include <linux/idr.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+@@ -13,12 +15,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/spi/spi.h>
+
+-#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
+-#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
+-#define SPI_ENGINE_VERSION_PATCH(x) (x & 0xff)
+-
+-#define SPI_ENGINE_REG_VERSION 0x00
+-
+ #define SPI_ENGINE_REG_RESET 0x40
+
+ #define SPI_ENGINE_REG_INT_ENABLE 0x80
+@@ -78,28 +74,42 @@ struct spi_engine_program {
+ uint16_t instructions[];
+ };
+
+-struct spi_engine {
+- struct clk *clk;
+- struct clk *ref_clk;
+-
+- spinlock_t lock;
+-
+- void __iomem *base;
+-
+- struct spi_message *msg;
++/**
++ * struct spi_engine_message_state - SPI engine per-message state
++ */
++struct spi_engine_message_state {
++ /** Instructions for executing this message. */
+ struct spi_engine_program *p;
++ /** Number of elements in cmd_buf array. */
+ unsigned cmd_length;
++ /** Array of commands not yet written to CMD FIFO. */
+ const uint16_t *cmd_buf;
+-
++ /** Next xfer with tx_buf not yet fully written to TX FIFO. */
+ struct spi_transfer *tx_xfer;
++ /** Size of tx_buf in bytes. */
+ unsigned int tx_length;
++ /** Bytes not yet written to TX FIFO. */
+ const uint8_t *tx_buf;
+-
++ /** Next xfer with rx_buf not yet fully written to RX FIFO. */
+ struct spi_transfer *rx_xfer;
++ /** Size of tx_buf in bytes. */
+ unsigned int rx_length;
++ /** Bytes not yet written to the RX FIFO. */
+ uint8_t *rx_buf;
++ /** ID to correlate SYNC interrupts with this message. */
++ u8 sync_id;
++};
++
++struct spi_engine {
++ struct clk *clk;
++ struct clk *ref_clk;
++
++ spinlock_t lock;
++
++ void __iomem *base;
+
+- unsigned int sync_id;
++ struct spi_message *msg;
++ struct ida sync_ida;
+ unsigned int completed_id;
+
+ unsigned int int_enable;
+@@ -258,100 +268,105 @@ static void spi_engine_xfer_next(struct spi_engine *spi_engine,
+
+ static void spi_engine_tx_next(struct spi_engine *spi_engine)
+ {
+- struct spi_transfer *xfer = spi_engine->tx_xfer;
++ struct spi_engine_message_state *st = spi_engine->msg->state;
++ struct spi_transfer *xfer = st->tx_xfer;
+
+ do {
+ spi_engine_xfer_next(spi_engine, &xfer);
+ } while (xfer && !xfer->tx_buf);
+
+- spi_engine->tx_xfer = xfer;
++ st->tx_xfer = xfer;
+ if (xfer) {
+- spi_engine->tx_length = xfer->len;
+- spi_engine->tx_buf = xfer->tx_buf;
++ st->tx_length = xfer->len;
++ st->tx_buf = xfer->tx_buf;
+ } else {
+- spi_engine->tx_buf = NULL;
++ st->tx_buf = NULL;
+ }
+ }
+
+ static void spi_engine_rx_next(struct spi_engine *spi_engine)
+ {
+- struct spi_transfer *xfer = spi_engine->rx_xfer;
++ struct spi_engine_message_state *st = spi_engine->msg->state;
++ struct spi_transfer *xfer = st->rx_xfer;
+
+ do {
+ spi_engine_xfer_next(spi_engine, &xfer);
+ } while (xfer && !xfer->rx_buf);
+
+- spi_engine->rx_xfer = xfer;
++ st->rx_xfer = xfer;
+ if (xfer) {
+- spi_engine->rx_length = xfer->len;
+- spi_engine->rx_buf = xfer->rx_buf;
++ st->rx_length = xfer->len;
++ st->rx_buf = xfer->rx_buf;
+ } else {
+- spi_engine->rx_buf = NULL;
++ st->rx_buf = NULL;
+ }
+ }
+
+ static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
+ {
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
++ struct spi_engine_message_state *st = spi_engine->msg->state;
+ unsigned int n, m, i;
+ const uint16_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
+- while (n && spi_engine->cmd_length) {
+- m = min(n, spi_engine->cmd_length);
+- buf = spi_engine->cmd_buf;
++ while (n && st->cmd_length) {
++ m = min(n, st->cmd_length);
++ buf = st->cmd_buf;
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+- spi_engine->cmd_buf += m;
+- spi_engine->cmd_length -= m;
++ st->cmd_buf += m;
++ st->cmd_length -= m;
+ n -= m;
+ }
+
+- return spi_engine->cmd_length != 0;
++ return st->cmd_length != 0;
+ }
+
+ static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
+ {
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
++ struct spi_engine_message_state *st = spi_engine->msg->state;
+ unsigned int n, m, i;
+ const uint8_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
+- while (n && spi_engine->tx_length) {
+- m = min(n, spi_engine->tx_length);
+- buf = spi_engine->tx_buf;
++ while (n && st->tx_length) {
++ m = min(n, st->tx_length);
++ buf = st->tx_buf;
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+- spi_engine->tx_buf += m;
+- spi_engine->tx_length -= m;
++ st->tx_buf += m;
++ st->tx_length -= m;
+ n -= m;
+- if (spi_engine->tx_length == 0)
++ if (st->tx_length == 0)
+ spi_engine_tx_next(spi_engine);
+ }
+
+- return spi_engine->tx_length != 0;
++ return st->tx_length != 0;
+ }
+
+ static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
+ {
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
++ struct spi_engine_message_state *st = spi_engine->msg->state;
+ unsigned int n, m, i;
+ uint8_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
+- while (n && spi_engine->rx_length) {
+- m = min(n, spi_engine->rx_length);
+- buf = spi_engine->rx_buf;
++ while (n && st->rx_length) {
++ m = min(n, st->rx_length);
++ buf = st->rx_buf;
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+- spi_engine->rx_buf += m;
+- spi_engine->rx_length -= m;
++ st->rx_buf += m;
++ st->rx_length -= m;
+ n -= m;
+- if (spi_engine->rx_length == 0)
++ if (st->rx_length == 0)
+ spi_engine_rx_next(spi_engine);
+ }
+
+- return spi_engine->rx_length != 0;
++ return st->rx_length != 0;
+ }
+
+ static irqreturn_t spi_engine_irq(int irq, void *devid)
+@@ -387,12 +402,16 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
+ disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+ }
+
+- if (pending & SPI_ENGINE_INT_SYNC) {
+- if (spi_engine->msg &&
+- spi_engine->completed_id == spi_engine->sync_id) {
++ if (pending & SPI_ENGINE_INT_SYNC && spi_engine->msg) {
++ struct spi_engine_message_state *st = spi_engine->msg->state;
++
++ if (spi_engine->completed_id == st->sync_id) {
+ struct spi_message *msg = spi_engine->msg;
++ struct spi_engine_message_state *st = msg->state;
+
+- kfree(spi_engine->p);
++ ida_free(&spi_engine->sync_ida, st->sync_id);
++ kfree(st->p);
++ kfree(st);
+ msg->status = 0;
+ msg->actual_length = msg->frame_length;
+ spi_engine->msg = NULL;
+@@ -417,29 +436,46 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
+ {
+ struct spi_engine_program p_dry, *p;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
++ struct spi_engine_message_state *st;
+ unsigned int int_enable = 0;
+ unsigned long flags;
+ size_t size;
++ int ret;
++
++ st = kzalloc(sizeof(*st), GFP_KERNEL);
++ if (!st)
++ return -ENOMEM;
+
+ p_dry.length = 0;
+ spi_engine_compile_message(spi_engine, msg, true, &p_dry);
+
+ size = sizeof(*p->instructions) * (p_dry.length + 1);
+ p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
+- if (!p)
++ if (!p) {
++ kfree(st);
+ return -ENOMEM;
++ }
++
++ ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL);
++ if (ret < 0) {
++ kfree(p);
++ kfree(st);
++ return ret;
++ }
++
++ st->sync_id = ret;
++
+ spi_engine_compile_message(spi_engine, msg, false, p);
+
+ spin_lock_irqsave(&spi_engine->lock, flags);
+- spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
+- spi_engine_program_add_cmd(p, false,
+- SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
++ spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id));
+
++ msg->state = st;
+ spi_engine->msg = msg;
+- spi_engine->p = p;
++ st->p = p;
+
+- spi_engine->cmd_buf = p->instructions;
+- spi_engine->cmd_length = p->length;
++ st->cmd_buf = p->instructions;
++ st->cmd_length = p->length;
+ if (spi_engine_write_cmd_fifo(spi_engine))
+ int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+
+@@ -448,7 +484,7 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
+ int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+
+ spi_engine_rx_next(spi_engine);
+- if (spi_engine->rx_length != 0)
++ if (st->rx_length != 0)
+ int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+
+ int_enable |= SPI_ENGINE_INT_SYNC;
+@@ -473,52 +509,34 @@ static int spi_engine_probe(struct platform_device *pdev)
+ if (irq < 0)
+ return irq;
+
+- spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
+- if (!spi_engine)
+- return -ENOMEM;
+-
+- host = spi_alloc_host(&pdev->dev, 0);
++ host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
+ if (!host)
+ return -ENOMEM;
+
+- spi_controller_set_devdata(host, spi_engine);
++ spi_engine = spi_controller_get_devdata(host);
+
+ spin_lock_init(&spi_engine->lock);
++ ida_init(&spi_engine->sync_ida);
+
+- spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+- if (IS_ERR(spi_engine->clk)) {
+- ret = PTR_ERR(spi_engine->clk);
+- goto err_put_host;
+- }
+-
+- spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
+- if (IS_ERR(spi_engine->ref_clk)) {
+- ret = PTR_ERR(spi_engine->ref_clk);
+- goto err_put_host;
+- }
++ spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
++ if (IS_ERR(spi_engine->clk))
++ return PTR_ERR(spi_engine->clk);
+
+- ret = clk_prepare_enable(spi_engine->clk);
+- if (ret)
+- goto err_put_host;
+-
+- ret = clk_prepare_enable(spi_engine->ref_clk);
+- if (ret)
+- goto err_clk_disable;
++ spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
++ if (IS_ERR(spi_engine->ref_clk))
++ return PTR_ERR(spi_engine->ref_clk);
+
+ spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(spi_engine->base)) {
+- ret = PTR_ERR(spi_engine->base);
+- goto err_ref_clk_disable;
+- }
+-
+- version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
+- if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
+- dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+- SPI_ENGINE_VERSION_MAJOR(version),
+- SPI_ENGINE_VERSION_MINOR(version),
+- SPI_ENGINE_VERSION_PATCH(version));
+- ret = -ENODEV;
+- goto err_ref_clk_disable;
++ if (IS_ERR(spi_engine->base))
++ return PTR_ERR(spi_engine->base);
++
++ version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
++ if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
++ dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
++ ADI_AXI_PCORE_VER_MAJOR(version),
++ ADI_AXI_PCORE_VER_MINOR(version),
++ ADI_AXI_PCORE_VER_PATCH(version));
++ return -ENODEV;
+ }
+
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
+@@ -527,7 +545,7 @@ static int spi_engine_probe(struct platform_device *pdev)
+
+ ret = request_irq(irq, spi_engine_irq, 0, pdev->name, host);
+ if (ret)
+- goto err_ref_clk_disable;
++ return ret;
+
+ host->dev.of_node = pdev->dev.of_node;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
+@@ -545,18 +563,12 @@ static int spi_engine_probe(struct platform_device *pdev)
+ return 0;
+ err_free_irq:
+ free_irq(irq, host);
+-err_ref_clk_disable:
+- clk_disable_unprepare(spi_engine->ref_clk);
+-err_clk_disable:
+- clk_disable_unprepare(spi_engine->clk);
+-err_put_host:
+- spi_controller_put(host);
+ return ret;
+ }
+
+ static void spi_engine_remove(struct platform_device *pdev)
+ {
+- struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev));
++ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ int irq = platform_get_irq(pdev, 0);
+
+@@ -564,14 +576,9 @@ static void spi_engine_remove(struct platform_device *pdev)
+
+ free_irq(irq, host);
+
+- spi_controller_put(host);
+-
+ writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+-
+- clk_disable_unprepare(spi_engine->ref_clk);
+- clk_disable_unprepare(spi_engine->clk);
+ }
+
+ static const struct of_device_id spi_engine_match_table[] = {
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index ef08fcac2f6dab..0407b91183caa7 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -19,7 +19,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/spi/spi.h>
+-#include <linux/spi/spi-mem.h>
++#include <linux/mtd/spi-nor.h>
+ #include <linux/sysfs.h>
+ #include <linux/types.h>
+ #include "spi-bcm-qspi.h"
+@@ -1221,7 +1221,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+
+ /* non-aligned and very short transfers are handled by MSPI */
+ if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
+- len < 4)
++ len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
+ mspi_read = true;
+
+ if (!has_bspi(qspi) || mspi_read)
+diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
+index aac41bd05f98f8..ef3a7226db125c 100644
+--- a/drivers/spi/spi-bcm63xx.c
++++ b/drivers/spi/spi-bcm63xx.c
+@@ -466,12 +466,14 @@ static const struct platform_device_id bcm63xx_spi_dev_match[] = {
+ {
+ },
+ };
++MODULE_DEVICE_TABLE(platform, bcm63xx_spi_dev_match);
+
+ static const struct of_device_id bcm63xx_spi_of_match[] = {
+ { .compatible = "brcm,bcm6348-spi", .data = &bcm6348_spi_reg_offsets },
+ { .compatible = "brcm,bcm6358-spi", .data = &bcm6358_spi_reg_offsets },
+ { },
+ };
++MODULE_DEVICE_TABLE(of, bcm63xx_spi_of_match);
+
+ static int bcm63xx_spi_probe(struct platform_device *pdev)
+ {
+@@ -582,13 +584,15 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
+
+ bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
+
+- pm_runtime_enable(&pdev->dev);
++ ret = devm_pm_runtime_enable(&pdev->dev);
++ if (ret)
++ goto out_clk_disable;
+
+ /* register and we are done */
+ ret = devm_spi_register_controller(dev, host);
+ if (ret) {
+ dev_err(dev, "spi register failed\n");
+- goto out_pm_disable;
++ goto out_clk_disable;
+ }
+
+ dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n",
+@@ -596,8 +600,6 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
+
+ return 0;
+
+-out_pm_disable:
+- pm_runtime_disable(&pdev->dev);
+ out_clk_disable:
+ clk_disable_unprepare(clk);
+ out_err:
+diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c
+index 9f64afd8164ea9..4965bc86d7f52a 100644
+--- a/drivers/spi/spi-bcmbca-hsspi.c
++++ b/drivers/spi/spi-bcmbca-hsspi.c
+@@ -546,12 +546,14 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
+ goto out_put_host;
+ }
+
+- pm_runtime_enable(&pdev->dev);
++ ret = devm_pm_runtime_enable(&pdev->dev);
++ if (ret)
++ goto out_put_host;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register sysfs group\n");
+- goto out_pm_disable;
++ goto out_put_host;
+ }
+
+ /* register and we are done */
+@@ -565,8 +567,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
+
+ out_sysgroup_disable:
+ sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
+-out_pm_disable:
+- pm_runtime_disable(&pdev->dev);
+ out_put_host:
+ spi_controller_put(host);
+ out_disable_pll_clk:
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index b50db71ac4cccc..08811577d8f8b2 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1825,7 +1825,7 @@ static int cqspi_probe(struct platform_device *pdev)
+ if (ddata->jh7110_clk_init) {
+ ret = cqspi_jh7110_clk_init(pdev, cqspi);
+ if (ret)
+- goto probe_clk_failed;
++ goto probe_reset_failed;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+@@ -1872,6 +1872,8 @@ static int cqspi_probe(struct platform_device *pdev)
+ probe_setup_failed:
+ cqspi_controller_enable(cqspi, 0);
+ probe_reset_failed:
++ if (cqspi->is_jh7110)
++ cqspi_jh7110_disable_clk(pdev, cqspi);
+ clk_disable_unprepare(cqspi->clk);
+ probe_clk_failed:
+ pm_runtime_put_sync(dev);
+@@ -1902,10 +1904,9 @@ static void cqspi_remove(struct platform_device *pdev)
+ static int cqspi_suspend(struct device *dev)
+ {
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+- struct spi_controller *host = dev_get_drvdata(dev);
+ int ret;
+
+- ret = spi_controller_suspend(host);
++ ret = spi_controller_suspend(cqspi->host);
+ cqspi_controller_enable(cqspi, 0);
+
+ clk_disable_unprepare(cqspi->clk);
+@@ -1916,7 +1917,6 @@ static int cqspi_suspend(struct device *dev)
+ static int cqspi_resume(struct device *dev)
+ {
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+- struct spi_controller *host = dev_get_drvdata(dev);
+
+ clk_prepare_enable(cqspi->clk);
+ cqspi_wait_idle(cqspi);
+@@ -1925,7 +1925,7 @@ static int cqspi_resume(struct device *dev)
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+- return spi_controller_resume(host);
++ return spi_controller_resume(cqspi->host);
+ }
+
+ static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);
+diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
+index b7e04b03be58e3..ee342501b759f8 100644
+--- a/drivers/spi/spi-cadence-xspi.c
++++ b/drivers/spi/spi-cadence-xspi.c
+@@ -145,6 +145,9 @@
+ #define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
+ #define CDNS_XSPI_TRD_STATUS 0x0104
+
++#define MODE_NO_OF_BYTES GENMASK(25, 24)
++#define MODEBYTES_COUNT 1
++
+ /* Helper macros for filling command registers */
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
+@@ -157,9 +160,10 @@
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
+
+-#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
++ FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
+
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
+@@ -173,12 +177,12 @@
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
+
+-#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ ((op)->data.nbytes >> 16) & 0xffff) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+ (op)->dummy.buswidth != 0 ? \
+- (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
++ (((dummybytes) * 8) / (op)->dummy.buswidth) : \
+ 0))
+
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+@@ -351,6 +355,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ u32 cmd_regs[6];
+ u32 cmd_status;
+ int ret;
++ int dummybytes = op->dummy.nbytes;
+
+ ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
+ if (ret < 0)
+@@ -365,7 +370,12 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ memset(cmd_regs, 0, sizeof(cmd_regs));
+ cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
+ cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
+- cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
++ if (dummybytes != 0) {
++ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
++ dummybytes--;
++ } else {
++ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
++ }
+ cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
+ cdns_xspi->cur_cs);
+
+@@ -375,7 +385,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
+ cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
+ cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
+- cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
++ cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
+ cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
+ cdns_xspi->cur_cs);
+
+diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
+index 12c940ba074abd..81edf0a3ddf843 100644
+--- a/drivers/spi/spi-cadence.c
++++ b/drivers/spi/spi-cadence.c
+@@ -317,6 +317,15 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
+ xspi->rx_bytes -= nrx;
+
+ while (ntx || nrx) {
++ if (nrx) {
++ u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
++
++ if (xspi->rxbuf)
++ *xspi->rxbuf++ = data;
++
++ nrx--;
++ }
++
+ if (ntx) {
+ if (xspi->txbuf)
+ cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
+@@ -326,14 +335,6 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
+ ntx--;
+ }
+
+- if (nrx) {
+- u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+-
+- if (xspi->rxbuf)
+- *xspi->rxbuf++ = data;
+-
+- nrx--;
+- }
+ }
+ }
+
+@@ -451,7 +452,6 @@ static int cdns_transfer_one(struct spi_controller *ctlr,
+ udelay(10);
+
+ cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0);
+- spi_transfer_delay_exec(transfer);
+
+ cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
+ return transfer->len;
+@@ -581,31 +581,19 @@ static int cdns_spi_probe(struct platform_device *pdev)
+ goto remove_ctlr;
+ }
+
+- xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
++ xspi->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(xspi->pclk)) {
+ dev_err(&pdev->dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xspi->pclk);
+ goto remove_ctlr;
+ }
+
+- ret = clk_prepare_enable(xspi->pclk);
+- if (ret) {
+- dev_err(&pdev->dev, "Unable to enable APB clock.\n");
+- goto remove_ctlr;
+- }
+-
+ if (!spi_controller_is_target(ctlr)) {
+- xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
++ xspi->ref_clk = devm_clk_get_enabled(&pdev->dev, "ref_clk");
+ if (IS_ERR(xspi->ref_clk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xspi->ref_clk);
+- goto clk_dis_apb;
+- }
+-
+- ret = clk_prepare_enable(xspi->ref_clk);
+- if (ret) {
+- dev_err(&pdev->dev, "Unable to enable device clock.\n");
+- goto clk_dis_apb;
++ goto remove_ctlr;
+ }
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+@@ -677,12 +665,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
+
+ clk_dis_all:
+ if (!spi_controller_is_target(ctlr)) {
+- pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+- clk_disable_unprepare(xspi->ref_clk);
++ pm_runtime_set_suspended(&pdev->dev);
+ }
+-clk_dis_apb:
+- clk_disable_unprepare(xspi->pclk);
+ remove_ctlr:
+ spi_controller_put(ctlr);
+ return ret;
+@@ -703,10 +688,10 @@ static void cdns_spi_remove(struct platform_device *pdev)
+
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+
+- clk_disable_unprepare(xspi->ref_clk);
+- clk_disable_unprepare(xspi->pclk);
+- pm_runtime_set_suspended(&pdev->dev);
+- pm_runtime_disable(&pdev->dev);
++ if (!spi_controller_is_target(ctlr)) {
++ pm_runtime_disable(&pdev->dev);
++ pm_runtime_set_suspended(&pdev->dev);
++ }
+
+ spi_unregister_controller(ctlr);
+ }
+diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
+index f0b630fe16c3c8..b341b6908df06d 100644
+--- a/drivers/spi/spi-coldfire-qspi.c
++++ b/drivers/spi/spi-coldfire-qspi.c
+@@ -441,7 +441,6 @@ static void mcfqspi_remove(struct platform_device *pdev)
+ mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
+
+ mcfqspi_cs_teardown(mcfqspi);
+- clk_disable_unprepare(mcfqspi->clk);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
+index d239fc5a49ccca..3169febd805146 100644
+--- a/drivers/spi/spi-cs42l43.c
++++ b/drivers/spi/spi-cs42l43.c
+@@ -19,7 +19,7 @@
+ #include <linux/units.h>
+
+ #define CS42L43_FIFO_SIZE 16
+-#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ)
++#define CS42L43_SPI_ROOT_HZ 49152000
+ #define CS42L43_SPI_MAX_LENGTH 65532
+
+ enum cs42l43_spi_cmd {
+@@ -244,7 +244,10 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
+ priv->ctlr->use_gpio_descriptors = true;
+ priv->ctlr->auto_runtime_pm = true;
+
+- devm_pm_runtime_enable(priv->dev);
++ ret = devm_pm_runtime_enable(priv->dev);
++ if (ret)
++ return ret;
++
+ pm_runtime_idle(priv->dev);
+
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG6, CS42L43_FIFO_SIZE - 1);
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 11991eb1263644..13313f07839b6f 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -82,6 +82,10 @@
+ #define TCR_RXMSK BIT(19)
+ #define TCR_TXMSK BIT(18)
+
++struct fsl_lpspi_devtype_data {
++ u8 prescale_max;
++};
++
+ struct lpspi_config {
+ u8 bpw;
+ u8 chip_select;
+@@ -119,10 +123,25 @@ struct fsl_lpspi_data {
+ bool usedma;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
++
++ const struct fsl_lpspi_devtype_data *devtype_data;
++};
++
++/*
++ * ERR051608 fixed or not:
++ * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
++ */
++static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
++ .prescale_max = 1,
++};
++
++static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
++ .prescale_max = 7,
+ };
+
+ static const struct of_device_id fsl_lpspi_dt_ids[] = {
+- { .compatible = "fsl,imx7ulp-spi", },
++ { .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
++ { .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
+@@ -296,10 +315,12 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
+ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ {
+ struct lpspi_config config = fsl_lpspi->config;
+- unsigned int perclk_rate, scldiv;
++ unsigned int perclk_rate, scldiv, div;
++ u8 prescale_max;
+ u8 prescale;
+
+ perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
++ prescale_max = fsl_lpspi->devtype_data->prescale_max;
+
+ if (!config.speed_hz) {
+ dev_err(fsl_lpspi->dev,
+@@ -313,8 +334,10 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ return -EINVAL;
+ }
+
+- for (prescale = 0; prescale < 8; prescale++) {
+- scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
++ div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
++
++ for (prescale = 0; prescale <= prescale_max; prescale++) {
++ scldiv = div / (1 << prescale) - 2;
+ if (scldiv < 256) {
+ fsl_lpspi->config.prescale = prescale;
+ break;
+@@ -820,6 +843,7 @@ static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
+
+ static int fsl_lpspi_probe(struct platform_device *pdev)
+ {
++ const struct fsl_lpspi_devtype_data *devtype_data;
+ struct fsl_lpspi_data *fsl_lpspi;
+ struct spi_controller *controller;
+ struct resource *res;
+@@ -828,13 +852,17 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ u32 temp;
+ bool is_target;
+
++ devtype_data = of_device_get_match_data(&pdev->dev);
++ if (!devtype_data)
++ return -ENODEV;
++
+ is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
+ if (is_target)
+- controller = spi_alloc_target(&pdev->dev,
+- sizeof(struct fsl_lpspi_data));
++ controller = devm_spi_alloc_target(&pdev->dev,
++ sizeof(struct fsl_lpspi_data));
+ else
+- controller = spi_alloc_host(&pdev->dev,
+- sizeof(struct fsl_lpspi_data));
++ controller = devm_spi_alloc_host(&pdev->dev,
++ sizeof(struct fsl_lpspi_data));
+
+ if (!controller)
+ return -ENOMEM;
+@@ -846,45 +874,46 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ fsl_lpspi->is_target = is_target;
+ fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
+ "fsl,spi-only-use-cs1-sel");
++ fsl_lpspi->devtype_data = devtype_data;
+
+ init_completion(&fsl_lpspi->xfer_done);
+
+ fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(fsl_lpspi->base)) {
+ ret = PTR_ERR(fsl_lpspi->base);
+- goto out_controller_put;
++ return ret;
+ }
+ fsl_lpspi->base_phys = res->start;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+- goto out_controller_put;
++ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
+ dev_name(&pdev->dev), fsl_lpspi);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
+- goto out_controller_put;
++ return ret;
+ }
+
+ fsl_lpspi->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(fsl_lpspi->clk_per)) {
+ ret = PTR_ERR(fsl_lpspi->clk_per);
+- goto out_controller_put;
++ return ret;
+ }
+
+ fsl_lpspi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fsl_lpspi->clk_ipg)) {
+ ret = PTR_ERR(fsl_lpspi->clk_ipg);
+- goto out_controller_put;
++ return ret;
+ }
+
+ /* enable the clock */
+ ret = fsl_lpspi_init_rpm(fsl_lpspi);
+ if (ret)
+- goto out_controller_put;
++ return ret;
+
+ ret = pm_runtime_get_sync(fsl_lpspi->dev);
+ if (ret < 0) {
+@@ -945,8 +974,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
+ pm_runtime_put_sync(fsl_lpspi->dev);
+ pm_runtime_disable(fsl_lpspi->dev);
+-out_controller_put:
+- spi_controller_put(controller);
+
+ return ret;
+ }
+@@ -959,6 +986,7 @@ static void fsl_lpspi_remove(struct platform_device *pdev)
+
+ fsl_lpspi_dma_exit(controller);
+
++ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
+ pm_runtime_disable(fsl_lpspi->dev);
+ }
+
+diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
+index f4f376a8351b4a..7401ed3b9acd40 100644
+--- a/drivers/spi/spi-geni-qcom.c
++++ b/drivers/spi/spi-geni-qcom.c
+@@ -1110,25 +1110,27 @@ static int spi_geni_probe(struct platform_device *pdev)
+ spin_lock_init(&mas->lock);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
+- pm_runtime_enable(dev);
++ ret = devm_pm_runtime_enable(dev);
++ if (ret)
++ return ret;
+
+ if (device_property_read_bool(&pdev->dev, "spi-slave"))
+ spi->slave = true;
+
+ ret = geni_icc_get(&mas->se, NULL);
+ if (ret)
+- goto spi_geni_probe_runtime_disable;
++ return ret;
+ /* Set the bus quota to a reasonable value for register access */
+ mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
+ mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+
+ ret = geni_icc_set_bw(&mas->se);
+ if (ret)
+- goto spi_geni_probe_runtime_disable;
++ return ret;
+
+ ret = spi_geni_init(mas);
+ if (ret)
+- goto spi_geni_probe_runtime_disable;
++ return ret;
+
+ /*
+ * check the mode supported and set_cs for fifo mode only
+@@ -1157,8 +1159,6 @@ static int spi_geni_probe(struct platform_device *pdev)
+ free_irq(mas->irq, spi);
+ spi_geni_release_dma:
+ spi_geni_release_dma_chan(mas);
+-spi_geni_probe_runtime_disable:
+- pm_runtime_disable(dev);
+ return ret;
+ }
+
+@@ -1170,10 +1170,9 @@ static void spi_geni_remove(struct platform_device *pdev)
+ /* Unregister _before_ disabling pm_runtime() so we stop transfers */
+ spi_unregister_master(spi);
+
+- spi_geni_release_dma_chan(mas);
+-
+ free_irq(mas->irq, spi);
+- pm_runtime_disable(&pdev->dev);
++
++ spi_geni_release_dma_chan(mas);
+ }
+
+ static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
+diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
+index 35ef5e8e2ffd25..16054695bdb04a 100644
+--- a/drivers/spi/spi-hisi-kunpeng.c
++++ b/drivers/spi/spi-hisi-kunpeng.c
+@@ -151,8 +151,6 @@ static const struct debugfs_reg32 hisi_spi_regs[] = {
+ HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
+ HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
+ HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
+- HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
+- HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
+ HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
+ HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
+ HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
+@@ -483,6 +481,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ if (host->max_speed_hz == 0)
++ return dev_err_probe(dev, -EINVAL, "spi-max-frequency can't be 0\n");
++
+ ret = device_property_read_u16(dev, "num-cs",
+ &host->num_chipselect);
+ if (ret)
+@@ -497,6 +498,7 @@ static int hisi_spi_probe(struct platform_device *pdev)
+ host->transfer_one = hisi_spi_transfer_one;
+ host->handle_err = hisi_spi_handle_err;
+ host->dev.fwnode = dev->fwnode;
++ host->min_speed_hz = DIV_ROUND_UP(host->max_speed_hz, CLK_DIV_MAX);
+
+ hisi_spi_hw_init(hs);
+
+diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
+index 9d22018f7985f1..1301d14483d482 100644
+--- a/drivers/spi/spi-hisi-sfc-v3xx.c
++++ b/drivers/spi/spi-hisi-sfc-v3xx.c
+@@ -377,6 +377,11 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+ static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
+ {
+ struct hisi_sfc_v3xx_host *host = data;
++ u32 reg;
++
++ reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
++ if (!reg)
++ return IRQ_NONE;
+
+ hisi_sfc_v3xx_disable_int(host);
+
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 498e35c8db2c1d..daa32bde615561 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -2,6 +2,7 @@
+ // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ // Copyright (C) 2008 Juergen Beisert
+
++#include <linux/bits.h>
+ #include <linux/clk.h>
+ #include <linux/completion.h>
+ #include <linux/delay.h>
+@@ -659,11 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
+ ctrl |= (spi_imx->target_burst * 8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ else {
+- if (spi_imx->count >= 512)
+- ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET;
+- else
+- ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
+- << MX51_ECSPI_CTRL_BL_OFFSET;
++ ctrl |= (spi_imx->bits_per_word - 1)
++ << MX51_ECSPI_CTRL_BL_OFFSET;
+ }
+
+ /* set clock speed */
+@@ -1052,7 +1050,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .fifo_size = 8,
+- .has_dmamode = true,
++ .has_dmamode = false,
+ .dynamic_burst = false,
+ .has_targetmode = false,
+ .devtype = IMX35_CSPI,
+@@ -1872,8 +1870,8 @@ static int spi_imx_probe(struct platform_device *pdev)
+ spi_imx_sdma_exit(spi_imx);
+ out_runtime_pm_put:
+ pm_runtime_dont_use_autosuspend(spi_imx->dev);
+- pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(spi_imx->dev);
++ pm_runtime_set_suspended(&pdev->dev);
+
+ clk_disable_unprepare(spi_imx->clk_ipg);
+ out_put_per:
+diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
+index 57d767a68e7b27..4337ca51d7aa21 100644
+--- a/drivers/spi/spi-intel-pci.c
++++ b/drivers/spi/spi-intel-pci.c
+@@ -76,6 +76,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
++ { PCI_VDEVICE(INTEL, 0x7f24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
+@@ -84,7 +85,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
+- { PCI_VDEVICE(INTEL, 0xae23), (unsigned long)&cnl_info },
++ { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info },
+ { },
+ };
+ MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
+diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
+index 4f76ddf97b10ff..32a0fa4ba50f76 100644
+--- a/drivers/spi/spi-microchip-core-qspi.c
++++ b/drivers/spi/spi-microchip-core-qspi.c
+@@ -283,6 +283,7 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi
+ }
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
++ control &= ~CONTROL_CLKRATE_MASK;
+ control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
+index b451cd4860ecba..aa05127c8696cd 100644
+--- a/drivers/spi/spi-microchip-core.c
++++ b/drivers/spi/spi-microchip-core.c
+@@ -21,7 +21,7 @@
+ #include <linux/spi/spi.h>
+
+ #define MAX_LEN (0xffff)
+-#define MAX_CS (8)
++#define MAX_CS (1)
+ #define DEFAULT_FRAMESIZE (8)
+ #define FIFO_DEPTH (32)
+ #define CLK_GEN_MODE1_MAX (255)
+@@ -75,6 +75,7 @@
+
+ #define REG_CONTROL (0x00)
+ #define REG_FRAME_SIZE (0x04)
++#define FRAME_SIZE_MASK GENMASK(5, 0)
+ #define REG_STATUS (0x08)
+ #define REG_INT_CLEAR (0x0c)
+ #define REG_RX_DATA (0x10)
+@@ -89,6 +90,9 @@
+ #define REG_RIS (0x24)
+ #define REG_CONTROL2 (0x28)
+ #define REG_COMMAND (0x2c)
++#define COMMAND_CLRFRAMECNT BIT(4)
++#define COMMAND_TXFIFORST BIT(3)
++#define COMMAND_RXFIFORST BIT(2)
+ #define REG_PKTSIZE (0x30)
+ #define REG_CMD_SIZE (0x34)
+ #define REG_HWSTATUS (0x38)
+@@ -103,6 +107,7 @@ struct mchp_corespi {
+ u8 *rx_buf;
+ u32 clk_gen; /* divider for spi output clock generated by the controller */
+ u32 clk_mode;
++ u32 pending_slave_select;
+ int irq;
+ int tx_len;
+ int rx_len;
+@@ -148,62 +153,59 @@ static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+
+ static void mchp_corespi_enable_ints(struct mchp_corespi *spi)
+ {
+- u32 control, mask = INT_ENABLE_MASK;
+-
+- mchp_corespi_disable(spi);
+-
+- control = mchp_corespi_read(spi, REG_CONTROL);
+-
+- control |= mask;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+- control |= CONTROL_ENABLE;
++ control |= INT_ENABLE_MASK;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+ }
+
+ static void mchp_corespi_disable_ints(struct mchp_corespi *spi)
+ {
+- u32 control, mask = INT_ENABLE_MASK;
+-
+- mchp_corespi_disable(spi);
+-
+- control = mchp_corespi_read(spi, REG_CONTROL);
+- control &= ~mask;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+- control |= CONTROL_ENABLE;
++ control &= ~INT_ENABLE_MASK;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+ }
+
+ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
+ {
+ u32 control;
+- u16 lenpart;
++ u32 lenpart;
++ u32 frames = mchp_corespi_read(spi, REG_FRAMESUP);
+
+ /*
+- * Disable the SPI controller. Writes to transfer length have
+- * no effect when the controller is enabled.
++ * Writing to FRAMECNT in REG_CONTROL will reset the frame count, taking
++ * a shortcut requires an explicit clear.
+ */
+- mchp_corespi_disable(spi);
++ if (frames == len) {
++ mchp_corespi_write(spi, REG_COMMAND, COMMAND_CLRFRAMECNT);
++ return;
++ }
+
+ /*
+ * The lower 16 bits of the frame count are stored in the control reg
+ * for legacy reasons, but the upper 16 written to a different register:
+ * FRAMESUP. While both the upper and lower bits can be *READ* from the
+- * FRAMESUP register, writing to the lower 16 bits is a NOP
++ * FRAMESUP register, writing to the lower 16 bits is (supposedly) a NOP.
++ *
++ * The driver used to disable the controller while modifying the frame
++ * count, and mask off the lower 16 bits of len while writing to
++ * FRAMES_UP. When the driver was changed to disable the controller as
++ * infrequently as possible, it was discovered that the logic of
++ * lenpart = len & 0xffff_0000
++ * write(REG_FRAMESUP, lenpart)
++ * would actually write zeros into the lower 16 bits on an mpfs250t-es,
++ * despite documentation stating these bits were read-only.
++ * Writing len unmasked into FRAMES_UP ensures those bits aren't zeroed
++ * on an mpfs250t-es and will be a NOP for the lower 16 bits on hardware
++ * that matches the documentation.
+ */
+ lenpart = len & 0xffff;
+-
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ control &= ~CONTROL_FRAMECNT_MASK;
+ control |= lenpart << CONTROL_FRAMECNT_SHIFT;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+-
+- lenpart = len & 0xffff0000;
+- mchp_corespi_write(spi, REG_FRAMESUP, lenpart);
+-
+- control |= CONTROL_ENABLE;
+- mchp_corespi_write(spi, REG_CONTROL, control);
++ mchp_corespi_write(spi, REG_FRAMESUP, len);
+ }
+
+ static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+@@ -226,17 +228,22 @@ static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+
+ static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
+ {
++ u32 frame_size = mchp_corespi_read(spi, REG_FRAME_SIZE);
+ u32 control;
+
++ if ((frame_size & FRAME_SIZE_MASK) == bt)
++ return;
++
+ /*
+ * Disable the SPI controller. Writes to the frame size have
+ * no effect when the controller is enabled.
+ */
+- mchp_corespi_disable(spi);
++ control = mchp_corespi_read(spi, REG_CONTROL);
++ control &= ~CONTROL_ENABLE;
++ mchp_corespi_write(spi, REG_CONTROL, control);
+
+ mchp_corespi_write(spi, REG_FRAME_SIZE, bt);
+
+- control = mchp_corespi_read(spi, REG_CONTROL);
+ control |= CONTROL_ENABLE;
+ mchp_corespi_write(spi, REG_CONTROL, control);
+ }
+@@ -244,49 +251,56 @@ static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt)
+ static void mchp_corespi_set_cs(struct spi_device *spi, bool disable)
+ {
+ u32 reg;
+- struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
++ struct mchp_corespi *corespi = spi_controller_get_devdata(spi->controller);
+
+ reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
+ reg &= ~BIT(spi_get_chipselect(spi, 0));
+ reg |= !disable << spi_get_chipselect(spi, 0);
++ corespi->pending_slave_select = reg;
+
+- mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
++ /*
++ * Only deassert chip select immediately. Writing to some registers
++ * requires the controller to be disabled, which results in the
++ * output pins being tristated and can cause the SCLK and MOSI lines
++ * to transition. Therefore asserting the chip select is deferred
++ * until just before writing to the TX FIFO, to ensure the device
++ * doesn't see any spurious clock transitions whilst CS is enabled.
++ */
++ if (((spi->mode & SPI_CS_HIGH) == 0) == disable)
++ mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
+ }
+
+ static int mchp_corespi_setup(struct spi_device *spi)
+ {
+- struct mchp_corespi *corespi = spi_master_get_devdata(spi->master);
++ struct mchp_corespi *corespi = spi_controller_get_devdata(spi->controller);
+ u32 reg;
+
+ /*
+- * Active high slaves need to be specifically set to their inactive
++ * Active high targets need to be specifically set to their inactive
+ * states during probe by adding them to the "control group" & thus
+ * driving their select line low.
+ */
+ if (spi->mode & SPI_CS_HIGH) {
+ reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT);
+ reg |= BIT(spi_get_chipselect(spi, 0));
++ corespi->pending_slave_select = reg;
+ mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg);
+ }
+ return 0;
+ }
+
+-static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *spi)
++static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi *spi)
+ {
+ unsigned long clk_hz;
+ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+- control |= CONTROL_MASTER;
++ control &= ~CONTROL_ENABLE;
++ mchp_corespi_write(spi, REG_CONTROL, control);
+
++ control |= CONTROL_MASTER;
+ control &= ~CONTROL_MODE_MASK;
+ control |= MOTOROLA_MODE;
+
+- mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+-
+- /* max. possible spi clock rate is the apb clock rate */
+- clk_hz = clk_get_rate(spi->clk);
+- master->max_speed_hz = clk_hz;
+-
+ /*
+ * The controller must be configured so that it doesn't remove Chip
+ * Select until the entire message has been transferred, even if at
+@@ -295,19 +309,25 @@ static void mchp_corespi_init(struct spi_master *master, struct mchp_corespi *sp
+ * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames
+ * for the 8 bit transfers that this driver uses.
+ */
+- control = mchp_corespi_read(spi, REG_CONTROL);
+ control |= CONTROL_SPS | CONTROL_BIGFIFO;
+
+ mchp_corespi_write(spi, REG_CONTROL, control);
+
++ mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
++
++ /* max. possible spi clock rate is the apb clock rate */
++ clk_hz = clk_get_rate(spi->clk);
++ host->max_speed_hz = clk_hz;
++
+ mchp_corespi_enable_ints(spi);
+
+ /*
+ * It is required to enable direct mode, otherwise control over the chip
+ * select is relinquished to the hardware. SSELOUT is enabled too so we
+- * can deal with active high slaves.
++ * can deal with active high targets.
+ */
+- mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT);
++ spi->pending_slave_select = SSELOUT | SSEL_DIRECT;
++ mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
+
+ control = mchp_corespi_read(spi, REG_CONTROL);
+
+@@ -321,8 +341,6 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
+ {
+ u32 control;
+
+- mchp_corespi_disable(spi);
+-
+ control = mchp_corespi_read(spi, REG_CONTROL);
+ if (spi->clk_mode)
+ control |= CONTROL_CLKMODE;
+@@ -331,12 +349,12 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi)
+
+ mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen);
+ mchp_corespi_write(spi, REG_CONTROL, control);
+- mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE);
+ }
+
+ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode)
+ {
+- u32 control, mode_val;
++ u32 mode_val;
++ u32 control = mchp_corespi_read(spi, REG_CONTROL);
+
+ switch (mode & SPI_MODE_X_MASK) {
+ case SPI_MODE_0:
+@@ -354,12 +372,13 @@ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int
+ }
+
+ /*
+- * Disable the SPI controller. Writes to the frame size have
++ * Disable the SPI controller. Writes to the frame protocol have
+ * no effect when the controller is enabled.
+ */
+- mchp_corespi_disable(spi);
+
+- control = mchp_corespi_read(spi, REG_CONTROL);
++ control &= ~CONTROL_ENABLE;
++ mchp_corespi_write(spi, REG_CONTROL, control);
++
+ control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT);
+ control |= mode_val;
+
+@@ -371,8 +390,8 @@ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int
+
+ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ {
+- struct spi_master *master = dev_id;
+- struct mchp_corespi *spi = spi_master_get_devdata(master);
++ struct spi_controller *host = dev_id;
++ struct mchp_corespi *spi = spi_controller_get_devdata(host);
+ u32 intfield = mchp_corespi_read(spi, REG_MIS) & 0xf;
+ bool finalise = false;
+
+@@ -380,26 +399,23 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ if (intfield == 0)
+ return IRQ_NONE;
+
+- if (intfield & INT_TXDONE) {
++ if (intfield & INT_TXDONE)
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
+
++ if (intfield & INT_RXRDY) {
++ mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
++
+ if (spi->rx_len)
+ mchp_corespi_read_fifo(spi);
+-
+- if (spi->tx_len)
+- mchp_corespi_write_fifo(spi);
+-
+- if (!spi->rx_len)
+- finalise = true;
+ }
+
+- if (intfield & INT_RXRDY)
+- mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
++ if (!spi->rx_len && !spi->tx_len)
++ finalise = true;
+
+ if (intfield & INT_RX_CHANNEL_OVERFLOW) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
+ finalise = true;
+- dev_err(&master->dev,
++ dev_err(&host->dev,
+ "%s: RX OVERFLOW: rxlen: %d, txlen: %d\n", __func__,
+ spi->rx_len, spi->tx_len);
+ }
+@@ -407,13 +423,13 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
+ if (intfield & INT_TX_CHANNEL_UNDERRUN) {
+ mchp_corespi_write(spi, REG_INT_CLEAR, INT_TX_CHANNEL_UNDERRUN);
+ finalise = true;
+- dev_err(&master->dev,
++ dev_err(&host->dev,
+ "%s: TX UNDERFLOW: rxlen: %d, txlen: %d\n", __func__,
+ spi->rx_len, spi->tx_len);
+ }
+
+ if (finalise)
+- spi_finalize_current_transfer(master);
++ spi_finalize_current_transfer(host);
+
+ return IRQ_HANDLED;
+ }
+@@ -455,16 +471,16 @@ static int mchp_corespi_calculate_clkgen(struct mchp_corespi *spi,
+ return 0;
+ }
+
+-static int mchp_corespi_transfer_one(struct spi_master *master,
++static int mchp_corespi_transfer_one(struct spi_controller *host,
+ struct spi_device *spi_dev,
+ struct spi_transfer *xfer)
+ {
+- struct mchp_corespi *spi = spi_master_get_devdata(master);
++ struct mchp_corespi *spi = spi_controller_get_devdata(host);
+ int ret;
+
+ ret = mchp_corespi_calculate_clkgen(spi, (unsigned long)xfer->speed_hz);
+ if (ret) {
+- dev_err(&master->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz);
++ dev_err(&host->dev, "failed to set clk_gen for target %u Hz\n", xfer->speed_hz);
+ return ret;
+ }
+
+@@ -479,16 +495,21 @@ static int mchp_corespi_transfer_one(struct spi_master *master,
+ mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH)
+ ? FIFO_DEPTH : spi->tx_len);
+
+- if (spi->tx_len)
++ mchp_corespi_write(spi, REG_COMMAND, COMMAND_RXFIFORST | COMMAND_TXFIFORST);
++
++ mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
++
++ while (spi->tx_len)
+ mchp_corespi_write_fifo(spi);
++
+ return 1;
+ }
+
+-static int mchp_corespi_prepare_message(struct spi_master *master,
++static int mchp_corespi_prepare_message(struct spi_controller *host,
+ struct spi_message *msg)
+ {
+ struct spi_device *spi_dev = msg->spi;
+- struct mchp_corespi *spi = spi_master_get_devdata(master);
++ struct mchp_corespi *spi = spi_controller_get_devdata(host);
+
+ mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE);
+ mchp_corespi_set_mode(spi, spi_dev->mode);
+@@ -498,32 +519,32 @@ static int mchp_corespi_prepare_message(struct spi_master *master,
+
+ static int mchp_corespi_probe(struct platform_device *pdev)
+ {
+- struct spi_master *master;
++ struct spi_controller *host;
+ struct mchp_corespi *spi;
+ struct resource *res;
+ u32 num_cs;
+ int ret = 0;
+
+- master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi));
+- if (!master)
++ host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi));
++ if (!host)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+- "unable to allocate master for SPI controller\n");
++ "unable to allocate host for SPI controller\n");
+
+- platform_set_drvdata(pdev, master);
++ platform_set_drvdata(pdev, host);
+
+ if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs))
+ num_cs = MAX_CS;
+
+- master->num_chipselect = num_cs;
+- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+- master->setup = mchp_corespi_setup;
+- master->bits_per_word_mask = SPI_BPW_MASK(8);
+- master->transfer_one = mchp_corespi_transfer_one;
+- master->prepare_message = mchp_corespi_prepare_message;
+- master->set_cs = mchp_corespi_set_cs;
+- master->dev.of_node = pdev->dev.of_node;
++ host->num_chipselect = num_cs;
++ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
++ host->setup = mchp_corespi_setup;
++ host->bits_per_word_mask = SPI_BPW_MASK(8);
++ host->transfer_one = mchp_corespi_transfer_one;
++ host->prepare_message = mchp_corespi_prepare_message;
++ host->set_cs = mchp_corespi_set_cs;
++ host->dev.of_node = pdev->dev.of_node;
+
+- spi = spi_master_get_devdata(master);
++ spi = spi_controller_get_devdata(host);
+
+ spi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(spi->regs))
+@@ -534,7 +555,7 @@ static int mchp_corespi_probe(struct platform_device *pdev)
+ return spi->irq;
+
+ ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt,
+- IRQF_SHARED, dev_name(&pdev->dev), master);
++ IRQF_SHARED, dev_name(&pdev->dev), host);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not request irq\n");
+@@ -549,25 +570,25 @@ static int mchp_corespi_probe(struct platform_device *pdev)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable clock\n");
+
+- mchp_corespi_init(master, spi);
++ mchp_corespi_init(host, spi);
+
+- ret = devm_spi_register_master(&pdev->dev, master);
++ ret = devm_spi_register_controller(&pdev->dev, host);
+ if (ret) {
+ mchp_corespi_disable(spi);
+ clk_disable_unprepare(spi->clk);
+ return dev_err_probe(&pdev->dev, ret,
+- "unable to register master for SPI controller\n");
++ "unable to register host for SPI controller\n");
+ }
+
+- dev_info(&pdev->dev, "Registered SPI controller %d\n", master->bus_num);
++ dev_info(&pdev->dev, "Registered SPI controller %d\n", host->bus_num);
+
+ return 0;
+ }
+
+ static void mchp_corespi_remove(struct platform_device *pdev)
+ {
+- struct spi_master *master = platform_get_drvdata(pdev);
+- struct mchp_corespi *spi = spi_master_get_devdata(master);
++ struct spi_controller *host = platform_get_drvdata(pdev);
++ struct mchp_corespi *spi = spi_controller_get_devdata(host);
+
+ mchp_corespi_disable_ints(spi);
+ clk_disable_unprepare(spi->clk);
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 0757985947dd92..ea8e38bfa17469 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -787,17 +787,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+ mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
+ mtk_spi_setup_packet(master);
+
+- cnt = mdata->xfer_len / 4;
+- iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+- trans->tx_buf + mdata->num_xfered, cnt);
++ if (trans->tx_buf) {
++ cnt = mdata->xfer_len / 4;
++ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
++ trans->tx_buf + mdata->num_xfered, cnt);
+
+- remainder = mdata->xfer_len % 4;
+- if (remainder > 0) {
+- reg_val = 0;
+- memcpy(&reg_val,
+- trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+- remainder);
+- writel(reg_val, mdata->base + SPI_TX_DATA_REG);
++ remainder = mdata->xfer_len % 4;
++ if (remainder > 0) {
++ reg_val = 0;
++ memcpy(&reg_val,
++ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
++ remainder);
++ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
++ }
+ }
+
+ mtk_spi_enable_transfer(master);
+diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
+index fa8c1f740c702d..804b911f2baf1d 100644
+--- a/drivers/spi/spi-mux.c
++++ b/drivers/spi/spi-mux.c
+@@ -156,6 +156,7 @@ static int spi_mux_probe(struct spi_device *spi)
+ /* supported modes are the same as our parent's */
+ ctlr->mode_bits = spi->controller->mode_bits;
+ ctlr->flags = spi->controller->flags;
++ ctlr->bits_per_word_mask = spi->controller->bits_per_word_mask;
+ ctlr->transfer_one_message = spi_mux_transfer_one_message;
+ ctlr->setup = spi_mux_setup;
+ ctlr->num_chipselect = mux_control_states(priv->mux);
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index c964f41dcc428c..bc6c086ddd43f4 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -57,13 +57,6 @@
+ #include <linux/spi/spi.h>
+ #include <linux/spi/spi-mem.h>
+
+-/*
+- * The driver only uses one single LUT entry, that is updated on
+- * each call of exec_op(). Index 0 is preset at boot with a basic
+- * read operation, so let's use the last entry (31).
+- */
+-#define SEQID_LUT 31
+-
+ /* Registers used by the driver */
+ #define FSPI_MCR0 0x00
+ #define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
+@@ -263,9 +256,6 @@
+ #define FSPI_TFDR 0x180
+
+ #define FSPI_LUT_BASE 0x200
+-#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+-#define FSPI_LUT_REG(idx) \
+- (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
+
+ /* register map end */
+
+@@ -341,6 +331,7 @@ struct nxp_fspi_devtype_data {
+ unsigned int txfifo;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
++ unsigned int lut_num;
+ bool little_endian;
+ };
+
+@@ -349,6 +340,7 @@ static struct nxp_fspi_devtype_data lx2160a_data = {
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
++ .lut_num = 32,
+ .little_endian = true, /* little-endian */
+ };
+
+@@ -357,6 +349,7 @@ static struct nxp_fspi_devtype_data imx8mm_data = {
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
++ .lut_num = 32,
+ .little_endian = true, /* little-endian */
+ };
+
+@@ -365,6 +358,7 @@ static struct nxp_fspi_devtype_data imx8qxp_data = {
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
++ .lut_num = 32,
+ .little_endian = true, /* little-endian */
+ };
+
+@@ -373,6 +367,16 @@ static struct nxp_fspi_devtype_data imx8dxl_data = {
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = FSPI_QUIRK_USE_IP_ONLY,
++ .lut_num = 32,
++ .little_endian = true, /* little-endian */
++};
++
++static struct nxp_fspi_devtype_data imx8ulp_data = {
++ .rxfifo = SZ_512, /* (64 * 64 bits) */
++ .txfifo = SZ_1K, /* (128 * 64 bits) */
++ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
++ .quirks = 0,
++ .lut_num = 16,
+ .little_endian = true, /* little-endian */
+ };
+
+@@ -544,6 +548,8 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
+ void __iomem *base = f->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
++ u32 lut_offset = (f->devtype_data->lut_num - 1) * 4 * 4;
++ u32 target_lut_reg;
+
+ /* cmd */
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+@@ -588,8 +594,10 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
+ fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
+
+ /* fill LUT */
+- for (i = 0; i < ARRAY_SIZE(lutval); i++)
+- fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
++ for (i = 0; i < ARRAY_SIZE(lutval); i++) {
++ target_lut_reg = FSPI_LUT_BASE + lut_offset + i * 4;
++ fspi_writel(f, lutval[i], base + target_lut_reg);
++ }
+
+ dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x], size: 0x%08x\n",
+ op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes);
+@@ -759,7 +767,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+ f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
+ len : NXP_FSPI_MIN_IOMAP;
+
+- f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
++ f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start,
+ f->memmap_len);
+
+ if (!f->ahb_addr) {
+@@ -805,14 +813,15 @@ static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
+ if (i < op->data.nbytes) {
+ u32 data = 0;
+ int j;
++ int remaining = op->data.nbytes - i;
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+- for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
+- memcpy(&data, buf + i + j, 4);
++ for (j = 0; j < ALIGN(remaining, 4); j += 4) {
++ memcpy(&data, buf + i + j, min_t(int, 4, remaining - j));
+ fspi_writel(f, data, base + FSPI_TFDR + j);
+ }
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+@@ -875,7 +884,7 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
+ void __iomem *base = f->iobase;
+ int seqnum = 0;
+ int err = 0;
+- u32 reg;
++ u32 reg, seqid_lut;
+
+ reg = fspi_readl(f, base + FSPI_IPRXFCR);
+ /* invalid RXFIFO first */
+@@ -891,8 +900,9 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
++ seqid_lut = f->devtype_data->lut_num - 1;
+ fspi_writel(f, op->data.nbytes |
+- (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) |
++ (seqid_lut << FSPI_IPCR1_SEQID_SHIFT) |
+ (seqnum << FSPI_IPCR1_SEQNUM_SHIFT),
+ base + FSPI_IPCR1);
+
+@@ -1016,7 +1026,7 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
+ {
+ void __iomem *base = f->iobase;
+ int ret, i;
+- u32 reg;
++ u32 reg, seqid_lut;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ nxp_fspi_clk_disable_unprep(f);
+@@ -1091,11 +1101,17 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
+ fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
+
++ /*
++ * The driver only uses one single LUT entry, that is updated on
++ * each call of exec_op(). Index 0 is preset at boot with a basic
++ * read operation, so let's use the last entry.
++ */
++ seqid_lut = f->devtype_data->lut_num - 1;
+ /* AHB Read - Set lut sequence ID for all CS. */
+- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
+- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
+- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2);
+- fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2);
++ fspi_writel(f, seqid_lut, base + FSPI_FLSHA1CR2);
++ fspi_writel(f, seqid_lut, base + FSPI_FLSHA2CR2);
++ fspi_writel(f, seqid_lut, base + FSPI_FLSHB1CR2);
++ fspi_writel(f, seqid_lut, base + FSPI_FLSHB2CR2);
+
+ f->selected = -1;
+
+@@ -1290,6 +1306,7 @@ static const struct of_device_id nxp_fspi_dt_ids[] = {
+ { .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, },
+ { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, },
+ { .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, },
++ { .compatible = "nxp,imx8ulp-fspi", .data = (void *)&imx8ulp_data, },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index e5cd82eb9e5498..ddf1c684bcc7d8 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -117,7 +117,7 @@ struct omap2_mcspi_regs {
+
+ struct omap2_mcspi {
+ struct completion txdone;
+- struct spi_master *master;
++ struct spi_controller *ctlr;
+ /* Virtual base address of the controller */
+ void __iomem *base;
+ unsigned long phys;
+@@ -125,10 +125,12 @@ struct omap2_mcspi {
+ struct omap2_mcspi_dma *dma_channels;
+ struct device *dev;
+ struct omap2_mcspi_regs ctx;
++ struct clk *ref_clk;
+ int fifo_depth;
+- bool slave_aborted;
++ bool target_aborted;
+ unsigned int pin_dir:1;
+ size_t max_xfer_len;
++ u32 ref_clk_hz;
+ };
+
+ struct omap2_mcspi_cs {
+@@ -141,17 +143,17 @@ struct omap2_mcspi_cs {
+ u32 chconf0, chctrl0;
+ };
+
+-static inline void mcspi_write_reg(struct spi_master *master,
++static inline void mcspi_write_reg(struct spi_controller *ctlr,
+ int idx, u32 val)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+
+ writel_relaxed(val, mcspi->base + idx);
+ }
+
+-static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
++static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+
+ return readl_relaxed(mcspi->base + idx);
+ }
+@@ -235,7 +237,7 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
+
+ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ u32 l;
+
+ /* The controller handles the inverted chip selects
+@@ -266,24 +268,24 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
+ }
+ }
+
+-static void omap2_mcspi_set_mode(struct spi_master *master)
++static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ u32 l;
+
+ /*
+- * Choose master or slave mode
++ * Choose host or target mode
+ */
+- l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
++ l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
+ l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
+- if (spi_controller_is_slave(master)) {
++ if (spi_controller_is_target(ctlr)) {
+ l |= (OMAP2_MCSPI_MODULCTRL_MS);
+ } else {
+ l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
+ l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ }
+- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
+
+ ctx->modulctrl = l;
+ }
+@@ -291,14 +293,14 @@ static void omap2_mcspi_set_mode(struct spi_master *master)
+ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ struct spi_transfer *t, int enable)
+ {
+- struct spi_master *master = spi->master;
++ struct spi_controller *ctlr = spi->controller;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ unsigned int wcnt;
+ int max_fifo_depth, bytes_per_word;
+ u32 chconf, xferlevel;
+
+- mcspi = spi_master_get_devdata(master);
++ mcspi = spi_controller_get_devdata(ctlr);
+
+ chconf = mcspi_cached_chconf0(spi);
+ if (enable) {
+@@ -326,7 +328,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ xferlevel |= bytes_per_word - 1;
+ }
+
+- mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+ mcspi_write_chconf0(spi, chconf);
+ mcspi->fifo_depth = max_fifo_depth;
+
+@@ -364,9 +366,9 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+ static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
+ struct completion *x)
+ {
+- if (spi_controller_is_slave(mcspi->master)) {
++ if (spi_controller_is_target(mcspi->ctlr)) {
+ if (wait_for_completion_interruptible(x) ||
+- mcspi->slave_aborted)
++ mcspi->target_aborted)
+ return -EINTR;
+ } else {
+ wait_for_completion(x);
+@@ -378,7 +380,7 @@ static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
+ static void omap2_mcspi_rx_callback(void *data)
+ {
+ struct spi_device *spi = data;
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ /* We must disable the DMA RX request */
+@@ -390,7 +392,7 @@ static void omap2_mcspi_rx_callback(void *data)
+ static void omap2_mcspi_tx_callback(void *data)
+ {
+ struct spi_device *spi = data;
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ /* We must disable the DMA TX request */
+@@ -407,7 +409,7 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct dma_async_tx_descriptor *tx;
+
+- mcspi = spi_master_get_devdata(spi->master);
++ mcspi = spi_controller_get_devdata(spi->controller);
+ mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+@@ -445,13 +447,13 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ struct dma_async_tx_descriptor *tx;
+
+- mcspi = spi_master_get_devdata(spi->master);
++ mcspi = spi_controller_get_devdata(spi->controller);
+ mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+ count = xfer->len;
+
+ /*
+ * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
+- * it mentions reducing DMA transfer length by one element in master
++ * it mentions reducing DMA transfer length by one element in host
+ * normal mode.
+ */
+ if (mcspi->fifo_depth == 0)
+@@ -514,7 +516,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ omap2_mcspi_set_dma_req(spi, 1, 1);
+
+ ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
+- if (ret || mcspi->slave_aborted) {
++ if (ret || mcspi->target_aborted) {
+ dmaengine_terminate_sync(mcspi_dma->dma_rx);
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+ return 0;
+@@ -590,7 +592,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ void __iomem *irqstat_reg;
+ int wait_res;
+
+- mcspi = spi_master_get_devdata(spi->master);
++ mcspi = spi_controller_get_devdata(spi->controller);
+ mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ if (cs->word_len <= 8) {
+@@ -617,14 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+- mcspi->slave_aborted = false;
++ mcspi->target_aborted = false;
+ reinit_completion(&mcspi_dma->dma_tx_completion);
+ reinit_completion(&mcspi_dma->dma_rx_completion);
+ reinit_completion(&mcspi->txdone);
+ if (tx) {
+- /* Enable EOW IRQ to know end of tx in slave mode */
+- if (spi_controller_is_slave(spi->master))
+- mcspi_write_reg(spi->master,
++ /* Enable EOW IRQ to know end of tx in target mode */
++ if (spi_controller_is_target(spi->controller))
++ mcspi_write_reg(spi->controller,
+ OMAP2_MCSPI_IRQENABLE,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
+ omap2_mcspi_tx_dma(spi, xfer, cfg);
+@@ -637,15 +639,15 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ int ret;
+
+ ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
+- if (ret || mcspi->slave_aborted) {
++ if (ret || mcspi->target_aborted) {
+ dmaengine_terminate_sync(mcspi_dma->dma_tx);
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+ return 0;
+ }
+
+- if (spi_controller_is_slave(mcspi->master)) {
++ if (spi_controller_is_target(mcspi->ctlr)) {
+ ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
+- if (ret || mcspi->slave_aborted)
++ if (ret || mcspi->target_aborted)
+ return 0;
+ }
+
+@@ -656,7 +658,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+ OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
+ dev_err(&spi->dev, "EOW timed out\n");
+
+- mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
++ mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
+ }
+
+@@ -880,12 +882,12 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+ return count - c;
+ }
+
+-static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
++static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
+ {
+ u32 div;
+
+ for (div = 0; div < 15; div++)
+- if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
++ if (speed_hz >= (ref_clk_hz >> div))
+ return div;
+
+ return 15;
+@@ -897,11 +899,11 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ {
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+- u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
++ u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0;
+ u8 word_len = spi->bits_per_word;
+ u32 speed_hz = spi->max_speed_hz;
+
+- mcspi = spi_master_get_devdata(spi->master);
++ mcspi = spi_controller_get_devdata(spi->controller);
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+@@ -911,14 +913,15 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ if (t && t->speed_hz)
+ speed_hz = t->speed_hz;
+
+- speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
+- if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+- clkd = omap2_mcspi_calc_divisor(speed_hz);
+- speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
++ ref_clk_hz = mcspi->ref_clk_hz;
++ speed_hz = min_t(u32, speed_hz, ref_clk_hz);
++ if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) {
++ clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz);
++ speed_hz = ref_clk_hz >> clkd;
+ clkg = 0;
+ } else {
+- div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+- speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
++ div = (ref_clk_hz + speed_hz - 1) / speed_hz;
++ speed_hz = ref_clk_hz / div;
+ clkd = (div - 1) & 0xf;
+ extclk = (div - 1) >> 4;
+ clkg = OMAP2_MCSPI_CHCONF_CLKG;
+@@ -926,7 +929,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+
+ l = mcspi_cached_chconf0(spi);
+
+- /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
++ /* standard 4-wire host mode: SCK, MOSI/out, MISO/in, nCS
+ * REVISIT: this controller could support SPI_3WIRE mode.
+ */
+ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+@@ -1017,13 +1020,13 @@ static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
+ return ret;
+ }
+
+-static void omap2_mcspi_release_dma(struct spi_master *master)
++static void omap2_mcspi_release_dma(struct spi_controller *ctlr)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_dma *mcspi_dma;
+ int i;
+
+- for (i = 0; i < master->num_chipselect; i++) {
++ for (i = 0; i < ctlr->num_chipselect; i++) {
+ mcspi_dma = &mcspi->dma_channels[i];
+
+ if (mcspi_dma->dma_rx) {
+@@ -1054,7 +1057,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ {
+ bool initial_setup = false;
+ int ret;
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+@@ -1096,24 +1099,24 @@ static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
+ struct omap2_mcspi *mcspi = data;
+ u32 irqstat;
+
+- irqstat = mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS);
++ irqstat = mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS);
+ if (!irqstat)
+ return IRQ_NONE;
+
+- /* Disable IRQ and wakeup slave xfer task */
+- mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
++ /* Disable IRQ and wakeup target xfer task */
++ mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0);
+ if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
+ complete(&mcspi->txdone);
+
+ return IRQ_HANDLED;
+ }
+
+-static int omap2_mcspi_slave_abort(struct spi_master *master)
++static int omap2_mcspi_target_abort(struct spi_controller *ctlr)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
+
+- mcspi->slave_aborted = true;
++ mcspi->target_aborted = true;
+ complete(&mcspi_dma->dma_rx_completion);
+ complete(&mcspi_dma->dma_tx_completion);
+ complete(&mcspi->txdone);
+@@ -1121,7 +1124,7 @@ static int omap2_mcspi_slave_abort(struct spi_master *master)
+ return 0;
+ }
+
+-static int omap2_mcspi_transfer_one(struct spi_master *master,
++static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+ {
+@@ -1129,7 +1132,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ /* We only enable one channel at a time -- the one whose message is
+ * -- although this controller would gladly
+ * arbitrate among multiple channels. This corresponds to "single
+- * channel" master mode. As a side effect, we need to manage the
++ * channel" host mode. As a side effect, we need to manage the
+ * chipselect with the FORCE bit ... CS != channel enable.
+ */
+
+@@ -1141,13 +1144,13 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ int status = 0;
+ u32 chconf;
+
+- mcspi = spi_master_get_devdata(master);
++ mcspi = spi_controller_get_devdata(ctlr);
+ mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
+ cs = spi->controller_state;
+ cd = spi->controller_data;
+
+ /*
+- * The slave driver could have changed spi->mode in which case
++ * The target driver could have changed spi->mode in which case
+ * it will be different from cs->mode (the current hardware setup).
+ * If so, set par_override (even though its not a parity issue) so
+ * omap2_mcspi_setup_transfer will be called to configure the hardware
+@@ -1175,7 +1178,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
+- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+@@ -1201,8 +1204,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ unsigned count;
+
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+- master->cur_msg_mapped &&
+- master->can_dma(master, spi, t))
++ ctlr->cur_msg_mapped &&
++ ctlr->can_dma(ctlr, spi, t))
+ omap2_mcspi_set_fifo(spi, t, 1);
+
+ omap2_mcspi_set_enable(spi, 1);
+@@ -1213,8 +1216,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ + OMAP2_MCSPI_TX0);
+
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+- master->cur_msg_mapped &&
+- master->can_dma(master, spi, t))
++ ctlr->cur_msg_mapped &&
++ ctlr->can_dma(ctlr, spi, t))
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
+@@ -1240,7 +1243,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+@@ -1256,10 +1259,10 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
+ return status;
+ }
+
+-static int omap2_mcspi_prepare_message(struct spi_master *master,
++static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+
+@@ -1283,29 +1286,29 @@ static int omap2_mcspi_prepare_message(struct spi_master *master,
+ return 0;
+ }
+
+-static bool omap2_mcspi_can_dma(struct spi_master *master,
++static bool omap2_mcspi_can_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
+ return false;
+
+- if (spi_controller_is_slave(master))
++ if (spi_controller_is_target(ctlr))
+ return true;
+
+- master->dma_rx = mcspi_dma->dma_rx;
+- master->dma_tx = mcspi_dma->dma_tx;
++ ctlr->dma_rx = mcspi_dma->dma_rx;
++ ctlr->dma_tx = mcspi_dma->dma_tx;
+
+ return (xfer->len >= DMA_MIN_BYTES);
+ }
+
+ static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+ {
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
+
+@@ -1317,7 +1320,7 @@ static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+
+ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
+ {
+- struct spi_master *master = mcspi->master;
++ struct spi_controller *ctlr = mcspi->ctlr;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ int ret = 0;
+
+@@ -1325,11 +1328,11 @@ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
+ if (ret < 0)
+ return ret;
+
+- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE,
+ OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
+
+- omap2_mcspi_set_mode(master);
++ omap2_mcspi_set_mode(ctlr);
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+ return 0;
+@@ -1353,8 +1356,8 @@ static int omap_mcspi_runtime_suspend(struct device *dev)
+ */
+ static int omap_mcspi_runtime_resume(struct device *dev)
+ {
+- struct spi_master *master = dev_get_drvdata(dev);
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct spi_controller *ctlr = dev_get_drvdata(dev);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+ int error;
+@@ -1364,8 +1367,8 @@ static int omap_mcspi_runtime_resume(struct device *dev)
+ dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
+
+ /* McSPI: context restore */
+- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
+- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
++ mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
+
+ list_for_each_entry(cs, &ctx->cs, node) {
+ /*
+@@ -1420,7 +1423,7 @@ MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
+
+ static int omap2_mcspi_probe(struct platform_device *pdev)
+ {
+- struct spi_master *master;
++ struct spi_controller *ctlr;
+ const struct omap2_mcspi_platform_config *pdata;
+ struct omap2_mcspi *mcspi;
+ struct resource *r;
+@@ -1430,32 +1433,30 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ const struct of_device_id *match;
+
+ if (of_property_read_bool(node, "spi-slave"))
+- master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi));
++ ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi));
+ else
+- master = spi_alloc_master(&pdev->dev, sizeof(*mcspi));
+- if (!master)
++ ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi));
++ if (!ctlr)
+ return -ENOMEM;
+
+ /* the spi->mode bits understood by this driver: */
+- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+- master->setup = omap2_mcspi_setup;
+- master->auto_runtime_pm = true;
+- master->prepare_message = omap2_mcspi_prepare_message;
+- master->can_dma = omap2_mcspi_can_dma;
+- master->transfer_one = omap2_mcspi_transfer_one;
+- master->set_cs = omap2_mcspi_set_cs;
+- master->cleanup = omap2_mcspi_cleanup;
+- master->slave_abort = omap2_mcspi_slave_abort;
+- master->dev.of_node = node;
+- master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+- master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+- master->use_gpio_descriptors = true;
+-
+- platform_set_drvdata(pdev, master);
+-
+- mcspi = spi_master_get_devdata(master);
+- mcspi->master = master;
++ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
++ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
++ ctlr->setup = omap2_mcspi_setup;
++ ctlr->auto_runtime_pm = true;
++ ctlr->prepare_message = omap2_mcspi_prepare_message;
++ ctlr->can_dma = omap2_mcspi_can_dma;
++ ctlr->transfer_one = omap2_mcspi_transfer_one;
++ ctlr->set_cs = omap2_mcspi_set_cs;
++ ctlr->cleanup = omap2_mcspi_cleanup;
++ ctlr->target_abort = omap2_mcspi_target_abort;
++ ctlr->dev.of_node = node;
++ ctlr->use_gpio_descriptors = true;
++
++ platform_set_drvdata(pdev, ctlr);
++
++ mcspi = spi_controller_get_devdata(ctlr);
++ mcspi->ctlr = ctlr;
+
+ match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+ if (match) {
+@@ -1463,24 +1464,24 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ pdata = match->data;
+
+ of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
+- master->num_chipselect = num_cs;
++ ctlr->num_chipselect = num_cs;
+ if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
+ mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+- master->num_chipselect = pdata->num_cs;
++ ctlr->num_chipselect = pdata->num_cs;
+ mcspi->pin_dir = pdata->pin_dir;
+ }
+ regs_offset = pdata->regs_offset;
+ if (pdata->max_xfer_len) {
+ mcspi->max_xfer_len = pdata->max_xfer_len;
+- master->max_transfer_size = omap2_mcspi_max_xfer_size;
++ ctlr->max_transfer_size = omap2_mcspi_max_xfer_size;
+ }
+
+ mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
+ if (IS_ERR(mcspi->base)) {
+ status = PTR_ERR(mcspi->base);
+- goto free_master;
++ goto free_ctlr;
+ }
+ mcspi->phys = r->start + regs_offset;
+ mcspi->base += regs_offset;
+@@ -1489,36 +1490,44 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+
+ INIT_LIST_HEAD(&mcspi->ctx.cs);
+
+- mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
++ mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect,
+ sizeof(struct omap2_mcspi_dma),
+ GFP_KERNEL);
+ if (mcspi->dma_channels == NULL) {
+ status = -ENOMEM;
+- goto free_master;
++ goto free_ctlr;
+ }
+
+- for (i = 0; i < master->num_chipselect; i++) {
++ for (i = 0; i < ctlr->num_chipselect; i++) {
+ sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
+ sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
+
+ status = omap2_mcspi_request_dma(mcspi,
+ &mcspi->dma_channels[i]);
+ if (status == -EPROBE_DEFER)
+- goto free_master;
++ goto free_ctlr;
+ }
+
+ status = platform_get_irq(pdev, 0);
+ if (status < 0)
+- goto free_master;
++ goto free_ctlr;
+ init_completion(&mcspi->txdone);
+ status = devm_request_irq(&pdev->dev, status,
+ omap2_mcspi_irq_handler, 0, pdev->name,
+ mcspi);
+ if (status) {
+ dev_err(&pdev->dev, "Cannot request IRQ");
+- goto free_master;
++ goto free_ctlr;
+ }
+
++ mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
++ if (mcspi->ref_clk)
++ mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
++ else
++ mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
++ ctlr->max_speed_hz = mcspi->ref_clk_hz;
++ ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
++
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+@@ -1527,7 +1536,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ if (status < 0)
+ goto disable_pm;
+
+- status = devm_spi_register_controller(&pdev->dev, master);
++ status = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (status < 0)
+ goto disable_pm;
+
+@@ -1537,18 +1546,18 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+-free_master:
+- omap2_mcspi_release_dma(master);
+- spi_master_put(master);
++free_ctlr:
++ omap2_mcspi_release_dma(ctlr);
++ spi_controller_put(ctlr);
+ return status;
+ }
+
+ static void omap2_mcspi_remove(struct platform_device *pdev)
+ {
+- struct spi_master *master = platform_get_drvdata(pdev);
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct spi_controller *ctlr = platform_get_drvdata(pdev);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+
+- omap2_mcspi_release_dma(master);
++ omap2_mcspi_release_dma(ctlr);
+
+ pm_runtime_dont_use_autosuspend(mcspi->dev);
+ pm_runtime_put_sync(mcspi->dev);
+@@ -1560,8 +1569,8 @@ MODULE_ALIAS("platform:omap2_mcspi");
+
+ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+ {
+- struct spi_master *master = dev_get_drvdata(dev);
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct spi_controller *ctlr = dev_get_drvdata(dev);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ int error;
+
+ error = pinctrl_pm_select_sleep_state(dev);
+@@ -1569,9 +1578,9 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+ dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
+ __func__, error);
+
+- error = spi_master_suspend(master);
++ error = spi_controller_suspend(ctlr);
+ if (error)
+- dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
++ dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n",
+ __func__, error);
+
+ return pm_runtime_force_suspend(dev);
+@@ -1579,13 +1588,13 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
+
+ static int __maybe_unused omap2_mcspi_resume(struct device *dev)
+ {
+- struct spi_master *master = dev_get_drvdata(dev);
+- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
++ struct spi_controller *ctlr = dev_get_drvdata(dev);
++ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
+ int error;
+
+- error = spi_master_resume(master);
++ error = spi_controller_resume(ctlr);
+ if (error)
+- dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
++ dev_warn(mcspi->dev, "%s: controller resume failed: %i\n",
+ __func__, error);
+
+ return pm_runtime_force_resume(dev);
+diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
+index 3638e974f5d49d..06bf58b7e5d72b 100644
+--- a/drivers/spi/spi-pci1xxxx.c
++++ b/drivers/spi/spi-pci1xxxx.c
+@@ -275,6 +275,8 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
+ spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
+ sizeof(struct pci1xxxx_spi_internal),
+ GFP_KERNEL);
++ if (!spi_bus->spi_int[iter])
++ return -ENOMEM;
+ spi_sub_ptr = spi_bus->spi_int[iter];
+ spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
+ if (!spi_sub_ptr->spi_host)
+diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
+index 03aab661be9d33..1381563941fe3e 100644
+--- a/drivers/spi/spi-ppc4xx.c
++++ b/drivers/spi/spi-ppc4xx.c
+@@ -26,7 +26,6 @@
+ #include <linux/errno.h>
+ #include <linux/wait.h>
+ #include <linux/of_address.h>
+-#include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+@@ -166,10 +165,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+ int scr;
+ u8 cdm = 0;
+ u32 speed;
+- u8 bits_per_word;
+
+ /* Start with the generic configuration for this device. */
+- bits_per_word = spi->bits_per_word;
+ speed = spi->max_speed_hz;
+
+ /*
+@@ -177,9 +174,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+ * the transfer to overwrite the generic configuration with zeros.
+ */
+ if (t) {
+- if (t->bits_per_word)
+- bits_per_word = t->bits_per_word;
+-
+ if (t->speed_hz)
+ speed = min(t->speed_hz, spi->max_speed_hz);
+ }
+@@ -415,7 +409,11 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
+ }
+
+ /* Request IRQ */
+- hw->irqnum = irq_of_parse_and_map(np, 0);
++ ret = platform_get_irq(op, 0);
++ if (ret < 0)
++ goto free_host;
++ hw->irqnum = ret;
++
+ ret = request_irq(hw->irqnum, spi_ppc4xx_int,
+ 0, "spi_ppc4xx_of", (void *)hw);
+ if (ret) {
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 5b010094dace50..1f374cf4d6f65c 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -974,14 +974,16 @@ static int rockchip_spi_suspend(struct device *dev)
+ {
+ int ret;
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+- struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ ret = spi_controller_suspend(ctlr);
+ if (ret < 0)
+ return ret;
+
+- clk_disable_unprepare(rs->spiclk);
+- clk_disable_unprepare(rs->apb_pclk);
++ ret = pm_runtime_force_suspend(dev);
++ if (ret < 0) {
++ spi_controller_resume(ctlr);
++ return ret;
++ }
+
+ pinctrl_pm_select_sleep_state(dev);
+
+@@ -992,25 +994,14 @@ static int rockchip_spi_resume(struct device *dev)
+ {
+ int ret;
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+- struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+
+ pinctrl_pm_select_default_state(dev);
+
+- ret = clk_prepare_enable(rs->apb_pclk);
++ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+
+- ret = clk_prepare_enable(rs->spiclk);
+- if (ret < 0)
+- clk_disable_unprepare(rs->apb_pclk);
+-
+- ret = spi_controller_resume(ctlr);
+- if (ret < 0) {
+- clk_disable_unprepare(rs->spiclk);
+- clk_disable_unprepare(rs->apb_pclk);
+- }
+-
+- return 0;
++ return spi_controller_resume(ctlr);
+ }
+ #endif /* CONFIG_PM_SLEEP */
+
+diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
+index e11146932828a2..7cce2d2ab9ca61 100644
+--- a/drivers/spi/spi-rpc-if.c
++++ b/drivers/spi/spi-rpc-if.c
+@@ -198,9 +198,16 @@ static int __maybe_unused rpcif_spi_resume(struct device *dev)
+
+ static SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume);
+
++static const struct platform_device_id rpc_if_spi_id_table[] = {
++ { .name = "rpc-if-spi" },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(platform, rpc_if_spi_id_table);
++
+ static struct platform_driver rpcif_spi_driver = {
+ .probe = rpcif_spi_probe,
+ .remove_new = rpcif_spi_remove,
++ .id_table = rpc_if_spi_id_table,
+ .driver = {
+ .name = "rpc-if-spi",
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
+index 0e48ffd499b9f6..f699ce1b402530 100644
+--- a/drivers/spi/spi-s3c64xx.c
++++ b/drivers/spi/spi-s3c64xx.c
+@@ -3,19 +3,20 @@
+ // Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ // Jaswinder Singh <jassi.brar@samsung.com>
+
+-#include <linux/init.h>
+-#include <linux/module.h>
+-#include <linux/interrupt.h>
+-#include <linux/delay.h>
++#include <linux/bitops.h>
++#include <linux/bits.h>
+ #include <linux/clk.h>
++#include <linux/delay.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmaengine.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_data/spi-s3c64xx.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/spi/spi.h>
+-#include <linux/of.h>
+-
+-#include <linux/platform_data/spi-s3c64xx.h>
+
+ #define MAX_SPI_PORTS 12
+ #define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
+@@ -76,6 +77,7 @@
+ #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
+ #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
+
++#define S3C64XX_SPI_ST_TX_FIFO_LVL_SHIFT 6
+ #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
+ #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
+ #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
+@@ -106,9 +108,11 @@
+ #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
+ #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
+ (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
+-#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
+-#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
+- FIFO_LVL_MASK(i))
++#define TX_FIFO_LVL(v, sdd) (((v) & (sdd)->tx_fifomask) >> \
++ __ffs((sdd)->tx_fifomask))
++#define RX_FIFO_LVL(v, sdd) (((v) & (sdd)->rx_fifomask) >> \
++ __ffs((sdd)->rx_fifomask))
++#define FIFO_DEPTH(i) ((FIFO_LVL_MASK(i) >> 1) + 1)
+
+ #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
+ #define S3C64XX_SPI_TRAILCNT_OFF 19
+@@ -133,6 +137,10 @@ struct s3c64xx_spi_dma_data {
+ * struct s3c64xx_spi_port_config - SPI Controller hardware info
+ * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
+ * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
++ * @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
++ * length and position.
++ * @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
++ * length and position.
+ * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
+ * @clk_div: Internal clock divider
+ * @quirks: Bitmask of known quirks
+@@ -150,6 +158,8 @@ struct s3c64xx_spi_dma_data {
+ struct s3c64xx_spi_port_config {
+ int fifo_lvl_mask[MAX_SPI_PORTS];
+ int rx_lvl_offset;
++ u32 rx_fifomask;
++ u32 tx_fifomask;
+ int tx_st_done;
+ int quirks;
+ int clk_div;
+@@ -179,6 +189,11 @@ struct s3c64xx_spi_port_config {
+ * @tx_dma: Local transmit DMA data (e.g. chan and direction)
+ * @port_conf: Local SPI port configuartion data
+ * @port_id: Port identification number
++ * @fifo_depth: depth of the FIFO.
++ * @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
++ * length and position.
++ * @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
++ * length and position.
+ */
+ struct s3c64xx_spi_driver_data {
+ void __iomem *regs;
+@@ -198,6 +213,9 @@ struct s3c64xx_spi_driver_data {
+ struct s3c64xx_spi_dma_data tx_dma;
+ const struct s3c64xx_spi_port_config *port_conf;
+ unsigned int port_id;
++ unsigned int fifo_depth;
++ u32 rx_fifomask;
++ u32 tx_fifomask;
+ };
+
+ static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
+@@ -221,7 +239,7 @@ static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+- } while (TX_FIFO_LVL(val, sdd) && loops--);
++ } while (TX_FIFO_LVL(val, sdd) && --loops);
+
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
+@@ -234,7 +252,7 @@ static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
+ readl(regs + S3C64XX_SPI_RX_DATA);
+ else
+ break;
+- } while (loops--);
++ } while (--loops);
+
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
+@@ -405,12 +423,10 @@ static bool s3c64xx_spi_can_dma(struct spi_controller *host,
+ {
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
+
+- if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
+- return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+- } else {
+- return false;
+- }
++ if (sdd->rx_dma.ch && sdd->tx_dma.ch)
++ return xfer->len >= sdd->fifo_depth;
+
++ return false;
+ }
+
+ static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+@@ -495,9 +511,7 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
+ void __iomem *regs = sdd->regs;
+ unsigned long val = 1;
+ u32 status;
+-
+- /* max fifo depth available */
+- u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
++ u32 max_fifo = sdd->fifo_depth;
+
+ if (timeout_ms)
+ val = msecs_to_loops(timeout_ms);
+@@ -604,7 +618,7 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ * For any size less than the fifo size the below code is
+ * executed atleast once.
+ */
+- loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
++ loops = xfer->len / sdd->fifo_depth;
+ buf = xfer->rx_buf;
+ do {
+ /* wait for data to be received in the fifo */
+@@ -741,7 +755,7 @@ static int s3c64xx_spi_transfer_one(struct spi_controller *host,
+ struct spi_transfer *xfer)
+ {
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
+- const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
++ const unsigned int fifo_len = sdd->fifo_depth;
+ const void *tx_buf = NULL;
+ void *rx_buf = NULL;
+ int target_len = 0, origin_len = 0;
+@@ -769,10 +783,9 @@ static int s3c64xx_spi_transfer_one(struct spi_controller *host,
+ return status;
+ }
+
+- if (!is_polling(sdd) && (xfer->len > fifo_len) &&
++ if (!is_polling(sdd) && xfer->len >= fifo_len &&
+ sdd->rx_dma.ch && sdd->tx_dma.ch) {
+ use_dma = 1;
+-
+ } else if (xfer->len >= fifo_len) {
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+@@ -1146,6 +1159,23 @@ static inline const struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
+ return (const struct s3c64xx_spi_port_config *)platform_get_device_id(pdev)->driver_data;
+ }
+
++static void s3c64xx_spi_set_fifomask(struct s3c64xx_spi_driver_data *sdd)
++{
++ const struct s3c64xx_spi_port_config *port_conf = sdd->port_conf;
++
++ if (port_conf->rx_fifomask)
++ sdd->rx_fifomask = port_conf->rx_fifomask;
++ else
++ sdd->rx_fifomask = FIFO_LVL_MASK(sdd) <<
++ port_conf->rx_lvl_offset;
++
++ if (port_conf->tx_fifomask)
++ sdd->tx_fifomask = port_conf->tx_fifomask;
++ else
++ sdd->tx_fifomask = FIFO_LVL_MASK(sdd) <<
++ S3C64XX_SPI_ST_TX_FIFO_LVL_SHIFT;
++}
++
+ static int s3c64xx_spi_probe(struct platform_device *pdev)
+ {
+ struct resource *mem_res;
+@@ -1191,6 +1221,10 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
+ sdd->port_id = pdev->id;
+ }
+
++ sdd->fifo_depth = FIFO_DEPTH(sdd);
++
++ s3c64xx_spi_set_fifomask(sdd);
++
+ sdd->cur_bpw = 8;
+
+ sdd->tx_dma.direction = DMA_MEM_TO_DEV;
+@@ -1280,7 +1314,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Targets attached\n",
+ sdd->port_id, host->num_chipselect);
+ dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
+- mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
++ mem_res, sdd->fifo_depth);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index fb452bc7837270..6f12e4fb2e2e18 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -29,12 +29,15 @@
+
+ #include <asm/unaligned.h>
+
++#define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0)
++
+ struct sh_msiof_chipdata {
+ u32 bits_per_word_mask;
+ u16 tx_fifo_size;
+ u16 rx_fifo_size;
+ u16 ctlr_flags;
+ u16 min_div_pow;
++ u32 flags;
+ };
+
+ struct sh_msiof_spi_priv {
+@@ -133,14 +136,14 @@ struct sh_msiof_spi_priv {
+
+ /* SIFCTR */
+ #define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+-#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+-#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+-#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+-#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+-#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+-#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+-#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+-#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
++#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */
++#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */
++#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */
++#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */
++#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */
++#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */
++#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */
++#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */
+ #define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+ #define SIFCTR_TFUA_SHIFT 20
+ #define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
+@@ -1072,6 +1075,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
+ .min_div_pow = 1,
+ };
+
++static const struct sh_msiof_chipdata rcar_r8a7795_data = {
++ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
++ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
++ .tx_fifo_size = 64,
++ .rx_fifo_size = 64,
++ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
++ .min_div_pow = 1,
++ .flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
++};
++
+ static const struct of_device_id sh_msiof_match[] __maybe_unused = {
+ { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
+@@ -1082,6 +1095,7 @@ static const struct of_device_id sh_msiof_match[] __maybe_unused = {
+ { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
+ { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
++ { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data },
+ { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
+@@ -1279,6 +1293,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
+ return -ENXIO;
+ }
+
++ if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200)
++ info->dtdl = 200;
++
+ if (info->mode == MSIOF_SPI_TARGET)
+ ctlr = spi_alloc_target(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index def74ae9b5f641..f37dd7dbb9d27b 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -349,7 +349,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
+
+ static int stm32_qspi_get_mode(u8 buswidth)
+ {
+- if (buswidth == 4)
++ if (buswidth >= 4)
+ return CCR_BUSWIDTH_4;
+
+ return buswidth;
+@@ -653,9 +653,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
+ return -EINVAL;
+
+ mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
+- if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
+- ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
+- gpiod_count(qspi->dev, "cs") == -ENOENT)) {
++ if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) {
+ dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
+ dev_err(qspi->dev, "configuration not supported\n");
+
+@@ -676,10 +674,10 @@ static int stm32_qspi_setup(struct spi_device *spi)
+ qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
+
+ /*
+- * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
+- * are both set in spi->mode and "cs-gpios" properties is found in DT
++ * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL
++ * is set in spi->mode and "cs-gpios" properties is found in DT
+ */
+- if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
++ if (mode) {
+ qspi->cr_reg |= CR_DFM;
+ dev_dbg(qspi->dev, "Dual flash mode enable");
+ }
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index ef665f470c5b5e..40680b5fffc9ab 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -898,7 +898,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+ mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
+
+ if (!(sr & mask)) {
+- dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
++ dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ sr, ier);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_NONE;
+diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
+index 4d6db6182c5ed1..f5cd365c913a87 100644
+--- a/drivers/spi/spi-tegra20-slink.c
++++ b/drivers/spi/spi-tegra20-slink.c
+@@ -1086,6 +1086,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
+ reset_control_deassert(tspi->rst);
+
+ spi_irq = platform_get_irq(pdev, 0);
++ if (spi_irq < 0)
++ return spi_irq;
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ tegra_slink_isr_thread, IRQF_ONESHOT,
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 8d6304cb061ec9..5c57c7378ee708 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1128,6 +1128,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ else
+ rx_dev = ctlr->dev.parent;
+
++ ret = -ENOMSG;
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync is done before each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+@@ -1157,6 +1158,9 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ }
+ }
+ }
++ /* No transfer has been mapped, bail out with success */
++ if (ret)
++ return 0;
+
+ ctlr->cur_rx_dma_dev = rx_dev;
+ ctlr->cur_tx_dma_dev = tx_dev;
+@@ -1624,6 +1628,10 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
+ pm_runtime_put_noidle(ctlr->dev.parent);
+ dev_err(&ctlr->dev, "Failed to power device: %d\n",
+ ret);
++
++ msg->status = ret;
++ spi_finalize_current_message(ctlr);
++
+ return ret;
+ }
+ }
+@@ -3323,33 +3331,52 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+
++static inline int __spi_check_suspended(const struct spi_controller *ctlr)
++{
++ return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
++}
++
++static inline void __spi_mark_suspended(struct spi_controller *ctlr)
++{
++ mutex_lock(&ctlr->bus_lock_mutex);
++ ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
++ mutex_unlock(&ctlr->bus_lock_mutex);
++}
++
++static inline void __spi_mark_resumed(struct spi_controller *ctlr)
++{
++ mutex_lock(&ctlr->bus_lock_mutex);
++ ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
++ mutex_unlock(&ctlr->bus_lock_mutex);
++}
++
+ int spi_controller_suspend(struct spi_controller *ctlr)
+ {
+- int ret;
++ int ret = 0;
+
+ /* Basically no-ops for non-queued controllers */
+- if (!ctlr->queued)
+- return 0;
+-
+- ret = spi_stop_queue(ctlr);
+- if (ret)
+- dev_err(&ctlr->dev, "queue stop failed\n");
++ if (ctlr->queued) {
++ ret = spi_stop_queue(ctlr);
++ if (ret)
++ dev_err(&ctlr->dev, "queue stop failed\n");
++ }
+
++ __spi_mark_suspended(ctlr);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_controller_suspend);
+
+ int spi_controller_resume(struct spi_controller *ctlr)
+ {
+- int ret;
++ int ret = 0;
+
+- if (!ctlr->queued)
+- return 0;
+-
+- ret = spi_start_queue(ctlr);
+- if (ret)
+- dev_err(&ctlr->dev, "queue restart failed\n");
++ __spi_mark_resumed(ctlr);
+
++ if (ctlr->queued) {
++ ret = spi_start_queue(ctlr);
++ if (ret)
++ dev_err(&ctlr->dev, "queue restart failed\n");
++ }
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_controller_resume);
+@@ -3973,7 +4000,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ return -EINVAL;
+ if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
+ xfer->tx_nbits != SPI_NBITS_DUAL &&
+- xfer->tx_nbits != SPI_NBITS_QUAD)
++ xfer->tx_nbits != SPI_NBITS_QUAD &&
++ xfer->tx_nbits != SPI_NBITS_OCTAL)
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+@@ -3988,7 +4016,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ return -EINVAL;
+ if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
+ xfer->rx_nbits != SPI_NBITS_DUAL &&
+- xfer->rx_nbits != SPI_NBITS_QUAD)
++ xfer->rx_nbits != SPI_NBITS_QUAD &&
++ xfer->rx_nbits != SPI_NBITS_OCTAL)
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+@@ -4153,8 +4182,7 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
+ ctlr->cur_msg = msg;
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ if (ret)
+- goto out;
+-
++ dev_err(&ctlr->dev, "noqueue transfer failed\n");
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
+@@ -4170,7 +4198,6 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
+ spi_idle_runtime_pm(ctlr);
+ }
+
+-out:
+ mutex_unlock(&ctlr->io_mutex);
+ }
+
+@@ -4193,6 +4220,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
+ int status;
+ struct spi_controller *ctlr = spi->controller;
+
++ if (__spi_check_suspended(ctlr)) {
++ dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
++ return -ESHUTDOWN;
++ }
++
+ status = __spi_validate(spi, message);
+ if (status != 0)
+ return status;
+@@ -4235,6 +4267,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
+ wait_for_completion(&done);
+ status = message->status;
+ }
++ message->complete = NULL;
+ message->context = NULL;
+
+ return status;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index d13dc15cc1919c..16bb4fc3a4ba9f 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -704,7 +704,9 @@ static const struct file_operations spidev_fops = {
+ static struct class *spidev_class;
+
+ static const struct spi_device_id spidev_spi_ids[] = {
++ { .name = "bh2228fv" },
+ { .name = "dh2228fv" },
++ { .name = "jg10309-01" },
+ { .name = "ltc2488" },
+ { .name = "sx1301" },
+ { .name = "bk4" },
+@@ -734,10 +736,12 @@ static int spidev_of_check(struct device *dev)
+ static const struct of_device_id spidev_dt_ids[] = {
+ { .compatible = "cisco,spi-petra", .data = &spidev_of_check },
+ { .compatible = "dh,dhcom-board", .data = &spidev_of_check },
++ { .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
+ { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
+ { .compatible = "lwn,bk4", .data = &spidev_of_check },
+ { .compatible = "menlo,m53cpld", .data = &spidev_of_check },
+ { .compatible = "micron,spi-authenta", .data = &spidev_of_check },
++ { .compatible = "rohm,bh2228fv", .data = &spidev_of_check },
+ { .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
+ { .compatible = "semtech,sx1301", .data = &spidev_of_check },
+ { .compatible = "silabs,em3581", .data = &spidev_of_check },
+diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c
+index 9cbd473487cb0d..6eea83ee779dde 100644
+--- a/drivers/spmi/hisi-spmi-controller.c
++++ b/drivers/spmi/hisi-spmi-controller.c
+@@ -303,7 +303,6 @@ static int spmi_controller_probe(struct platform_device *pdev)
+
+ spin_lock_init(&spmi_controller->lock);
+
+- ctrl->nr = spmi_controller->channel;
+ ctrl->dev.parent = pdev->dev.parent;
+ ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
+
+diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c
+index b3c991e1ea40da..1261f381cae6ce 100644
+--- a/drivers/spmi/spmi-mtk-pmif.c
++++ b/drivers/spmi/spmi-mtk-pmif.c
+@@ -50,6 +50,7 @@ struct pmif {
+ struct clk_bulk_data clks[PMIF_MAX_CLKS];
+ size_t nclks;
+ const struct pmif_data *data;
++ raw_spinlock_t lock;
+ };
+
+ static const char * const pmif_clock_names[] = {
+@@ -314,6 +315,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ struct ch_reg *inf_reg;
+ int ret;
+ u32 data, cmd;
++ unsigned long flags;
+
+ /* Check for argument validation. */
+ if (sid & ~0xf) {
+@@ -334,6 +336,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ else
+ return -EINVAL;
+
++ raw_spin_lock_irqsave(&arb->lock, flags);
+ /* Wait for Software Interface FSM state to be IDLE. */
+ inf_reg = &arb->chan;
+ ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+@@ -343,6 +346,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ /* set channel ready if the data has transferred */
+ if (pmif_is_fsm_vldclr(arb))
+ pmif_writel(arb, 1, inf_reg->ch_rdy);
++ raw_spin_unlock_irqrestore(&arb->lock, flags);
+ dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
+ return ret;
+ }
+@@ -350,6 +354,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ /* Send the command. */
+ cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr;
+ pmif_writel(arb, cmd, inf_reg->ch_send);
++ raw_spin_unlock_irqrestore(&arb->lock, flags);
+
+ /*
+ * Wait for Software Interface FSM state to be WFVLDCLR,
+@@ -376,7 +381,8 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ struct ch_reg *inf_reg;
+ int ret;
+- u32 data, cmd;
++ u32 data, wdata, cmd;
++ unsigned long flags;
+
+ if (len > 4) {
+ dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
+@@ -394,6 +400,10 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ else
+ return -EINVAL;
+
++ /* Set the write data. */
++ memcpy(&wdata, buf, len);
++
++ raw_spin_lock_irqsave(&arb->lock, flags);
+ /* Wait for Software Interface FSM state to be IDLE. */
+ inf_reg = &arb->chan;
+ ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+@@ -403,17 +413,17 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ /* set channel ready if the data has transferred */
+ if (pmif_is_fsm_vldclr(arb))
+ pmif_writel(arb, 1, inf_reg->ch_rdy);
++ raw_spin_unlock_irqrestore(&arb->lock, flags);
+ dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
+ return ret;
+ }
+
+- /* Set the write data. */
+- memcpy(&data, buf, len);
+- pmif_writel(arb, data, inf_reg->wdata);
++ pmif_writel(arb, wdata, inf_reg->wdata);
+
+ /* Send the command. */
+ cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr;
+ pmif_writel(arb, cmd, inf_reg->ch_send);
++ raw_spin_unlock_irqrestore(&arb->lock, flags);
+
+ return 0;
+ }
+@@ -465,7 +475,7 @@ static int mtk_spmi_probe(struct platform_device *pdev)
+ for (i = 0; i < arb->nclks; i++)
+ arb->clks[i].id = pmif_clock_names[i];
+
+- err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
++ err = clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get clocks: %d\n", err);
+ goto err_put_ctrl;
+@@ -474,7 +484,7 @@ static int mtk_spmi_probe(struct platform_device *pdev)
+ err = clk_bulk_prepare_enable(arb->nclks, arb->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err);
+- goto err_put_ctrl;
++ goto err_put_clks;
+ }
+
+ ctrl->cmd = pmif_arb_cmd;
+@@ -488,6 +498,8 @@ static int mtk_spmi_probe(struct platform_device *pdev)
+ arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset;
+ arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset;
+
++ raw_spin_lock_init(&arb->lock);
++
+ platform_set_drvdata(pdev, ctrl);
+
+ err = spmi_controller_add(ctrl);
+@@ -498,6 +510,8 @@ static int mtk_spmi_probe(struct platform_device *pdev)
+
+ err_domain_remove:
+ clk_bulk_disable_unprepare(arb->nclks, arb->clks);
++err_put_clks:
++ clk_bulk_put(arb->nclks, arb->clks);
+ err_put_ctrl:
+ spmi_controller_put(ctrl);
+ return err;
+@@ -509,6 +523,7 @@ static void mtk_spmi_remove(struct platform_device *pdev)
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+
+ clk_bulk_disable_unprepare(arb->nclks, arb->clks);
++ clk_bulk_put(arb->nclks, arb->clks);
+ spmi_controller_remove(ctrl);
+ spmi_controller_put(ctrl);
+ }
+diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
+index ab080cf26c9fff..070a99a4180cc4 100644
+--- a/drivers/ssb/main.c
++++ b/drivers/ssb/main.c
+@@ -341,11 +341,13 @@ static int ssb_bus_match(struct device *dev, struct device_driver *drv)
+
+ static int ssb_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
+ {
+- const struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
++ const struct ssb_device *ssb_dev;
+
+ if (!dev)
+ return -ENODEV;
+
++ ssb_dev = dev_to_ssb_dev(dev);
++
+ return add_uevent_var(env,
+ "MODALIAS=ssb:v%04Xid%04Xrev%02X",
+ ssb_dev->id.vendor, ssb_dev->id.coreid,
+@@ -837,7 +839,7 @@ static u32 clkfactor_f6_resolve(u32 v)
+ case SSB_CHIPCO_CLK_F6_7:
+ return 7;
+ }
+- return 0;
++ return 1;
+ }
+
+ /* Calculate the speed the backplane would run at a given set of clockcontrol values */
+diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
+index 8541995008da86..aa6f266b62a141 100644
+--- a/drivers/staging/greybus/arche-apb-ctrl.c
++++ b/drivers/staging/greybus/arche-apb-ctrl.c
+@@ -466,6 +466,7 @@ static const struct of_device_id arche_apb_ctrl_of_match[] = {
+ { .compatible = "usbffff,2", },
+ { },
+ };
++MODULE_DEVICE_TABLE(of, arche_apb_ctrl_of_match);
+
+ static struct platform_driver arche_apb_ctrl_device_driver = {
+ .probe = arche_apb_ctrl_probe,
+diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
+index 891b75327d7f73..b33977ccd52710 100644
+--- a/drivers/staging/greybus/arche-platform.c
++++ b/drivers/staging/greybus/arche-platform.c
+@@ -619,14 +619,7 @@ static const struct of_device_id arche_platform_of_match[] = {
+ { .compatible = "google,arche-platform", },
+ { },
+ };
+-
+-static const struct of_device_id arche_combined_id[] = {
+- /* Use PID/VID of SVC device */
+- { .compatible = "google,arche-platform", },
+- { .compatible = "usbffff,2", },
+- { },
+-};
+-MODULE_DEVICE_TABLE(of, arche_combined_id);
++MODULE_DEVICE_TABLE(of, arche_platform_of_match);
+
+ static struct platform_driver arche_platform_device_driver = {
+ .probe = arche_platform_probe,
+diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
+index 87d36948c61067..9999f84016992a 100644
+--- a/drivers/staging/greybus/light.c
++++ b/drivers/staging/greybus/light.c
+@@ -100,15 +100,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
+ static struct gb_channel *get_channel_from_mode(struct gb_light *light,
+ u32 mode)
+ {
+- struct gb_channel *channel = NULL;
++ struct gb_channel *channel;
+ int i;
+
+ for (i = 0; i < light->channels_count; i++) {
+ channel = &light->channels[i];
+- if (channel && channel->mode == mode)
+- break;
++ if (channel->mode == mode)
++ return channel;
+ }
+- return channel;
++ return NULL;
+ }
+
+ static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
+@@ -147,6 +147,9 @@ static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
+ channel = get_channel_from_mode(channel->light,
+ GB_CHANNEL_MODE_TORCH);
+
++ if (!channel)
++ return -EINVAL;
++
+ /* For not flash we need to convert brightness to intensity */
+ intensity = channel->intensity_uA.min +
+ (channel->intensity_uA.step * channel->led->brightness);
+@@ -549,7 +552,10 @@ static int gb_lights_light_v4l2_register(struct gb_light *light)
+ }
+
+ channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH);
+- WARN_ON(!channel_flash);
++ if (!channel_flash) {
++ dev_err(dev, "failed to get flash channel from mode\n");
++ return -EINVAL;
++ }
+
+ fled = &channel_flash->fled;
+
+diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
+index 285df0e489a625..cfdfe66d74f17c 100644
+--- a/drivers/staging/iio/frequency/ad9834.c
++++ b/drivers/staging/iio/frequency/ad9834.c
+@@ -114,7 +114,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
+
+ clk_freq = clk_get_rate(st->mclk);
+
+- if (fout > (clk_freq / 2))
++ if (!clk_freq || fout > (clk_freq / 2))
+ return -EINVAL;
+
+ regval = ad9834_calc_freqreg(clk_freq, fout);
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index 46db6d91542a40..2d0883a6408277 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -608,7 +608,7 @@ static void ad5933_work(struct work_struct *work)
+ struct ad5933_state, work.work);
+ struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
+ __be16 buf[2];
+- int val[2];
++ u16 val[2];
+ unsigned char status;
+ int ret;
+
+diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
+index 06de5823eb8e32..d38c4674cd6d79 100644
+--- a/drivers/staging/iio/resolver/ad2s1210.c
++++ b/drivers/staging/iio/resolver/ad2s1210.c
+@@ -658,9 +658,6 @@ static int ad2s1210_probe(struct spi_device *spi)
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+- ret = ad2s1210_setup_gpios(st);
+- if (ret < 0)
+- return ret;
+
+ spi_set_drvdata(spi, indio_dev);
+
+@@ -671,6 +668,10 @@ static int ad2s1210_probe(struct spi_device *spi)
+ st->resolution = 12;
+ st->fexcit = AD2S1210_DEF_EXCIT;
+
++ ret = ad2s1210_setup_gpios(st);
++ if (ret < 0)
++ return ret;
++
+ indio_dev->info = &ad2s1210_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad2s1210_channels;
+diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
+index 9fb118e77a1f08..f1d44e4955fc63 100644
+--- a/drivers/staging/ks7010/ks7010_sdio.c
++++ b/drivers/staging/ks7010/ks7010_sdio.c
+@@ -395,9 +395,9 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
+ priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event);
+ priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+
+- spin_lock(&priv->tx_dev.tx_dev_lock);
++ spin_lock_bh(&priv->tx_dev.tx_dev_lock);
+ result = enqueue_txdev(priv, p, size, complete_handler, skb);
+- spin_unlock(&priv->tx_dev.tx_dev_lock);
++ spin_unlock_bh(&priv->tx_dev.tx_dev_lock);
+
+ if (txq_has_space(priv))
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
+diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
+index 4b3fa6d93fe0ab..d8a1d4a58db6a4 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css.c
++++ b/drivers/staging/media/atomisp/pci/sh_css.c
+@@ -4737,6 +4737,7 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
+ sizeof(struct ia_css_binary),
+ GFP_KERNEL);
+ if (!mycs->yuv_scaler_binary) {
++ mycs->num_yuv_scaler = 0;
+ err = -ENOMEM;
+ return err;
+ }
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_frac.h b/drivers/staging/media/atomisp/pci/sh_css_frac.h
+index 8f08df5c88cc36..569a2f59e5519f 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_frac.h
++++ b/drivers/staging/media/atomisp/pci/sh_css_frac.h
+@@ -30,12 +30,24 @@
+ #define uISP_VAL_MAX ((unsigned int)((1 << uISP_REG_BIT) - 1))
+
+ /* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */
+-#define sDIGIT_FITTING(v, a, b) \
+- min_t(int, max_t(int, (((v) >> sSHIFT) >> max(sFRACTION_BITS_FITTING(a) - (b), 0)), \
+- sISP_VAL_MIN), sISP_VAL_MAX)
+-#define uDIGIT_FITTING(v, a, b) \
+- min((unsigned int)max((unsigned)(((v) >> uSHIFT) \
+- >> max((int)(uFRACTION_BITS_FITTING(a) - (b)), 0)), \
+- uISP_VAL_MIN), uISP_VAL_MAX)
++static inline int sDIGIT_FITTING(int v, int a, int b)
++{
++ int fit_shift = sFRACTION_BITS_FITTING(a) - b;
++
++ v >>= sSHIFT;
++ v >>= fit_shift > 0 ? fit_shift : 0;
++
++ return clamp_t(int, v, sISP_VAL_MIN, sISP_VAL_MAX);
++}
++
++static inline unsigned int uDIGIT_FITTING(unsigned int v, int a, int b)
++{
++ int fit_shift = uFRACTION_BITS_FITTING(a) - b;
++
++ v >>= uSHIFT;
++ v >>= fit_shift > 0 ? fit_shift : 0;
++
++ return clamp_t(unsigned int, v, uISP_VAL_MIN, uISP_VAL_MAX);
++}
+
+ #endif /* __SH_CSS_FRAC_H */
+diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
+index 1fd39a2fca98ae..95cca281e8a378 100644
+--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
++++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
+@@ -803,6 +803,7 @@ static int ipu_csc_scaler_release(struct file *file)
+
+ dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+
++ v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
+index e530767e80a5d1..55cc44a401bc43 100644
+--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
+@@ -1069,6 +1069,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ /* Initialize subdev media entity */
++ imgu_sd->subdev.entity.ops = &imgu_media_ops;
++ for (i = 0; i < IMGU_NODE_NUM; i++) {
++ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
++ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
++ }
+ r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
+ imgu_sd->subdev_pads);
+ if (r) {
+@@ -1076,11 +1081,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
+ "failed initialize subdev media entity (%d)\n", r);
+ return r;
+ }
+- imgu_sd->subdev.entity.ops = &imgu_media_ops;
+- for (i = 0; i < IMGU_NODE_NUM; i++) {
+- imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
+- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+- }
+
+ /* Initialize subdev */
+ v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
+@@ -1177,15 +1177,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
+ }
+
+ /* Initialize media entities */
++ node->vdev_pad.flags = node->output ?
++ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
++ vdev->entity.ops = NULL;
+ r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
+ if (r) {
+ dev_err(dev, "failed initialize media entity (%d)\n", r);
+ mutex_destroy(&node->lock);
+ return r;
+ }
+- node->vdev_pad.flags = node->output ?
+- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+- vdev->entity.ops = NULL;
+
+ /* Initialize vbq */
+ vbq->type = node->vdev_fmt.type;
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index 84a41792cb4b80..ac398b5a973604 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -461,6 +461,9 @@ static const struct v4l2_ioctl_ops rkvdec_ioctl_ops = {
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
++
++ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
++ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+ };
+
+ static int rkvdec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+index fc9297232456f5..16c822637dc6ee 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+@@ -427,11 +427,11 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ unsigned int ctb_addr_x, ctb_addr_y;
+ struct cedrus_buffer *cedrus_buf;
+ dma_addr_t src_buf_addr;
+- dma_addr_t src_buf_end_addr;
+ u32 chroma_log2_weight_denom;
+ u32 num_entry_point_offsets;
+ u32 output_pic_list_index;
+ u32 pic_order_cnt[2];
++ size_t slice_bytes;
+ u8 padding;
+ int count;
+ u32 reg;
+@@ -443,6 +443,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+ pred_weight_table = &slice_params->pred_weight_table;
+ num_entry_point_offsets = slice_params->num_entry_point_offsets;
+ cedrus_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
++ slice_bytes = vb2_get_plane_payload(&run->src->vb2_buf, 0);
+
+ /*
+ * If entry points offsets are present, we should get them
+@@ -490,7 +491,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+
+ cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
+
+- reg = slice_params->bit_size;
++ reg = slice_bytes * 8;
+ cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
+
+ /* Source beginning and end addresses. */
+@@ -504,10 +505,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+
+ cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
+
+- src_buf_end_addr = src_buf_addr +
+- DIV_ROUND_UP(slice_params->bit_size, 8);
+-
+- reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
++ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_addr + slice_bytes);
+ cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
+
+ /* Coding tree block address */
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+index b696bf884cbd69..32af0e96e762b4 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+@@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device)
+ {
+ struct cedrus_dev *dev = dev_get_drvdata(device);
+
+- reset_control_assert(dev->rstc);
+-
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->ahb_clk);
+
++ reset_control_assert(dev->rstc);
++
+ return 0;
+ }
+
+@@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device)
+ struct cedrus_dev *dev = dev_get_drvdata(device);
+ int ret;
+
++ ret = reset_control_reset(dev->rstc);
++ if (ret) {
++ dev_err(dev->dev, "Failed to apply reset\n");
++
++ return ret;
++ }
++
+ ret = clk_prepare_enable(dev->ahb_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable AHB clock\n");
+
+- return ret;
++ goto err_rst;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+@@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device)
+ goto err_mod_clk;
+ }
+
+- ret = reset_control_reset(dev->rstc);
+- if (ret) {
+- dev_err(dev->dev, "Failed to apply reset\n");
+-
+- goto err_ram_clk;
+- }
+-
+ return 0;
+
+-err_ram_clk:
+- clk_disable_unprepare(dev->ram_clk);
+ err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+ err_ahb_clk:
+ clk_disable_unprepare(dev->ahb_clk);
++err_rst:
++ reset_control_assert(dev->rstc);
+
+ return ret;
+ }
+diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+index 258aa0e37f5544..4c3684dd902ed4 100644
+--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
++++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+@@ -937,8 +937,9 @@ static int create_component(struct vchiq_mmal_instance *instance,
+ /* build component create message */
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
+ m.u.component_create.client_component = component->client_component;
+- strncpy(m.u.component_create.name, name,
+- sizeof(m.u.component_create.name));
++ strscpy_pad(m.u.component_create.name, name,
++ sizeof(m.u.component_create.name));
++ m.u.component_create.pid = 0;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_create),
+diff --git a/drivers/staging/vme_user/vme_fake.c b/drivers/staging/vme_user/vme_fake.c
+index 7c53a8a7b79b81..95730d1270af8f 100644
+--- a/drivers/staging/vme_user/vme_fake.c
++++ b/drivers/staging/vme_user/vme_fake.c
+@@ -1064,6 +1064,12 @@ static int __init fake_init(void)
+ struct vme_slave_resource *slave_image;
+ struct vme_lm_resource *lm;
+
++ if (geoid < 0 || geoid >= VME_MAX_SLOTS) {
++ pr_err("VME geographical address must be between 0 and %d (exclusive), but got %d\n",
++ VME_MAX_SLOTS, geoid);
++ return -EINVAL;
++ }
++
+ /* We need a fake parent device */
+ vme_root = root_device_register("vme");
+ if (IS_ERR(vme_root))
+diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
+index 2f5eafd5093402..4566e391d913fc 100644
+--- a/drivers/staging/vme_user/vme_tsi148.c
++++ b/drivers/staging/vme_user/vme_tsi148.c
+@@ -2252,6 +2252,12 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ struct vme_dma_resource *dma_ctrlr;
+ struct vme_lm_resource *lm;
+
++ if (geoid < 0 || geoid >= VME_MAX_SLOTS) {
++ dev_err(&pdev->dev, "VME geographical address must be between 0 and %d (exclusive), but got %d\n",
++ VME_MAX_SLOTS, geoid);
++ return -EINVAL;
++ }
++
+ /* If we want to support more than one of each bridge, we need to
+ * dynamically generate this so we get one per device
+ */
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index d5860c1c1f469e..9a88774836c9db 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3634,6 +3634,8 @@ static int __init target_core_init_configfs(void)
+ {
+ struct configfs_subsystem *subsys = &target_core_fabrics;
+ struct t10_alua_lu_gp *lu_gp;
++ struct cred *kern_cred;
++ const struct cred *old_cred;
+ int ret;
+
+ pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
+@@ -3710,11 +3712,21 @@ static int __init target_core_init_configfs(void)
+ if (ret < 0)
+ goto out;
+
++ /* We use the kernel credentials to access the target directory */
++ kern_cred = prepare_kernel_cred(&init_task);
++ if (!kern_cred) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ old_cred = override_creds(kern_cred);
+ target_init_dbroot();
++ revert_creds(old_cred);
++ put_cred(kern_cred);
+
+ return 0;
+
+ out:
++ target_xcopy_release_pt();
+ configfs_unregister_subsystem(subsys);
+ core_dev_release_virtual_lun0();
+ rd_module_exit();
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index b6523d4b9259e4..86590a7e29f6ae 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -147,7 +147,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
+ struct se_session *se_sess = se_cmd->se_sess;
+ struct se_node_acl *nacl = se_sess->se_node_acl;
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+- unsigned long flags;
+
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
+@@ -178,10 +177,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
+ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+ se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+
+- spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
+- list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+- spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(transport_lookup_tmr_lun);
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 4d447520bab87d..4e4cf6c34a775c 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -332,11 +332,13 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
+ }
+
+ iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
+- if (is_write)
++ if (is_write) {
++ file_start_write(fd);
+ ret = vfs_iter_write(fd, &iter, &pos, 0);
+- else
++ file_end_write(fd);
++ } else {
+ ret = vfs_iter_read(fd, &iter, &pos, 0);
+-
++ }
+ if (is_write) {
+ if (ret < 0 || ret != data_length) {
+ pr_err("%s() write returned %d\n", __func__, ret);
+@@ -467,7 +469,9 @@ fd_execute_write_same(struct se_cmd *cmd)
+ }
+
+ iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len);
++ file_start_write(fd_dev->fd_file);
+ ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
++ file_end_write(fd_dev->fd_file);
+
+ kfree(bvec);
+ if (ret < 0 || ret != len) {
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 0d4f09693ef46e..da59c1ac2f2e65 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -907,12 +907,15 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+
+ return 0;
+ fail:
+- if (bio)
+- bio_put(bio);
++ if (bio) {
++ bio_uninit(bio);
++ kfree(bio);
++ }
+ while (req->bio) {
+ bio = req->bio;
+ req->bio = bio->bi_next;
+- bio_put(bio);
++ bio_uninit(bio);
++ kfree(bio);
+ }
+ req->biotail = NULL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 0686882bcbda35..fb93d74c5d0b2a 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3627,6 +3627,10 @@ int transport_generic_handle_tmr(
+ unsigned long flags;
+ bool aborted = false;
+
++ spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
++ list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
++ spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
++
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ aborted = true;
+diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
+index 64f0e047c23d2a..1892e49a8e6a68 100644
+--- a/drivers/tee/optee/device.c
++++ b/drivers/tee/optee/device.c
+@@ -60,7 +60,16 @@ static void optee_release_device(struct device *dev)
+ kfree(optee_device);
+ }
+
+-static int optee_register_device(const uuid_t *device_uuid)
++static ssize_t need_supplicant_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ return 0;
++}
++
++static DEVICE_ATTR_RO(need_supplicant);
++
++static int optee_register_device(const uuid_t *device_uuid, u32 func)
+ {
+ struct tee_client_device *optee_device = NULL;
+ int rc;
+@@ -81,9 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid)
+ if (rc) {
+ pr_err("device registration failed, err: %d\n", rc);
+ put_device(&optee_device->dev);
++ return rc;
+ }
+
+- return rc;
++ if (func == PTA_CMD_GET_DEVICES_SUPP)
++ device_create_file(&optee_device->dev,
++ &dev_attr_need_supplicant);
++
++ return 0;
+ }
+
+ static int __optee_enumerate_devices(u32 func)
+@@ -142,7 +156,7 @@ static int __optee_enumerate_devices(u32 func)
+ num_devices = shm_size / sizeof(uuid_t);
+
+ for (idx = 0; idx < num_devices; idx++) {
+- rc = optee_register_device(&device_uuid[idx]);
++ rc = optee_register_device(&device_uuid[idx], func);
+ if (rc)
+ goto out_shm;
+ }
+diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
+index 0828240f27e624..b8ba360e863edf 100644
+--- a/drivers/tee/optee/ffa_abi.c
++++ b/drivers/tee/optee/ffa_abi.c
+@@ -657,7 +657,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ const struct ffa_ops *ops)
+ {
+ const struct ffa_msg_ops *msg_ops = ops->msg_ops;
+- struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
++ struct ffa_send_direct_data data = {
++ .data0 = OPTEE_FFA_GET_API_VERSION,
++ };
+ int rc;
+
+ msg_ops->mode_32bit_set(ffa_dev);
+@@ -674,7 +676,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ return false;
+ }
+
+- data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
++ data = (struct ffa_send_direct_data){
++ .data0 = OPTEE_FFA_GET_OS_VERSION,
++ };
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+@@ -694,7 +698,9 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
+ u32 *sec_caps,
+ unsigned int *rpc_param_count)
+ {
+- struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
++ struct ffa_send_direct_data data = {
++ .data0 = OPTEE_FFA_EXCHANGE_CAPABILITIES,
++ };
+ int rc;
+
+ rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
+diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
+index 3acc9288b31056..3b1030fc4fbfe0 100644
+--- a/drivers/thermal/broadcom/bcm2835_thermal.c
++++ b/drivers/thermal/broadcom/bcm2835_thermal.c
+@@ -185,7 +185,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
+ return err;
+ }
+
+- data->clk = devm_clk_get(&pdev->dev, NULL);
++ data->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(data->clk)) {
+ err = PTR_ERR(data->clk);
+ if (err != -EPROBE_DEFER)
+@@ -193,10 +193,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
+ return err;
+ }
+
+- err = clk_prepare_enable(data->clk);
+- if (err)
+- return err;
+-
+ rate = clk_get_rate(data->clk);
+ if ((rate < 1920000) || (rate > 5000000))
+ dev_warn(&pdev->dev,
+@@ -211,7 +207,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev,
+ "Failed to register the thermal device: %d\n",
+ err);
+- goto err_clk;
++ return err;
+ }
+
+ /*
+@@ -236,7 +232,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev,
+ "Not able to read trip_temp: %d\n",
+ err);
+- goto err_tz;
++ return err;
+ }
+
+ /* set bandgap reference voltage and enable voltage regulator */
+@@ -269,32 +265,23 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
+ */
+ err = thermal_add_hwmon_sysfs(tz);
+ if (err)
+- goto err_tz;
++ return err;
+
+ bcm2835_thermal_debugfs(pdev);
+
+ return 0;
+-err_tz:
+- devm_thermal_of_zone_unregister(&pdev->dev, tz);
+-err_clk:
+- clk_disable_unprepare(data->clk);
+-
+- return err;
+ }
+
+-static int bcm2835_thermal_remove(struct platform_device *pdev)
++static void bcm2835_thermal_remove(struct platform_device *pdev)
+ {
+ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(data->debugfsdir);
+- clk_disable_unprepare(data->clk);
+-
+- return 0;
+ }
+
+ static struct platform_driver bcm2835_thermal_driver = {
+ .probe = bcm2835_thermal_probe,
+- .remove = bcm2835_thermal_remove,
++ .remove_new = bcm2835_thermal_remove,
+ .driver = {
+ .name = "bcm2835_thermal",
+ .of_match_table = bcm2835_thermal_of_match_table,
+diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
+index 262e62ab6cf2f2..90b828bcca2436 100644
+--- a/drivers/thermal/devfreq_cooling.c
++++ b/drivers/thermal/devfreq_cooling.c
+@@ -201,7 +201,7 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
+
+ res = dfc->power_ops->get_real_power(df, power, freq, voltage);
+ if (!res) {
+- state = dfc->capped_state;
++ state = dfc->max_state - dfc->capped_state;
+
+ /* Convert EM power into milli-Watts first */
+ dfc->res_util = dfc->em_pd->table[state].power;
+diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
+index 1b121066521ffc..49cdfaa3a92793 100644
+--- a/drivers/thermal/gov_bang_bang.c
++++ b/drivers/thermal/gov_bang_bang.c
+@@ -13,28 +13,21 @@
+
+ #include "thermal_core.h"
+
+-static int thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id)
++static int thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_index)
+ {
+- struct thermal_trip trip;
++ const struct thermal_trip *trip = &tz->trips[trip_index];
+ struct thermal_instance *instance;
+- int ret;
+-
+- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
+- if (ret) {
+- pr_warn_once("Failed to retrieve trip point %d\n", trip_id);
+- return ret;
+- }
+
+- if (!trip.hysteresis)
++ if (!trip->hysteresis)
+ dev_info_once(&tz->device,
+ "Zero hysteresis value for thermal zone %s\n", tz->type);
+
+ dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
+- trip_id, trip.temperature, tz->temperature,
+- trip.hysteresis);
++ trip_index, trip->temperature, tz->temperature,
++ trip->hysteresis);
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+- if (instance->trip != trip_id)
++ if (instance->trip != trip)
+ continue;
+
+ /* in case fan is in initial state, switch the fan off */
+@@ -52,10 +45,10 @@ static int thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id)
+ * enable fan when temperature exceeds trip_temp and disable
+ * the fan in case it falls below trip_temp minus hysteresis
+ */
+- if (instance->target == 0 && tz->temperature >= trip.temperature)
++ if (instance->target == 0 && tz->temperature >= trip->temperature)
+ instance->target = 1;
+ else if (instance->target == 1 &&
+- tz->temperature <= trip.temperature - trip.hysteresis)
++ tz->temperature <= trip->temperature - trip->hysteresis)
+ instance->target = 0;
+
+ dev_dbg(&instance->cdev->device, "target=%d\n",
+diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
+index 03c2daeb6ee8bc..2abeb8979f5002 100644
+--- a/drivers/thermal/gov_fair_share.c
++++ b/drivers/thermal/gov_fair_share.c
+@@ -49,7 +49,7 @@ static long get_target_state(struct thermal_zone_device *tz,
+ /**
+ * fair_share_throttle - throttles devices associated with the given zone
+ * @tz: thermal_zone_device
+- * @trip: trip point index
++ * @trip_index: trip point index
+ *
+ * Throttling Logic: This uses three parameters to calculate the new
+ * throttle state of the cooling devices associated with the given zone.
+@@ -65,8 +65,9 @@ static long get_target_state(struct thermal_zone_device *tz,
+ * (Heavily assumes the trip points are in ascending order)
+ * new_state of cooling device = P3 * P2 * P1
+ */
+-static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
++static int fair_share_throttle(struct thermal_zone_device *tz, int trip_index)
+ {
++ const struct thermal_trip *trip = &tz->trips[trip_index];
+ struct thermal_instance *instance;
+ int total_weight = 0;
+ int total_instance = 0;
+diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
+index 8642f1096b91c8..fc969642f70b72 100644
+--- a/drivers/thermal/gov_power_allocator.c
++++ b/drivers/thermal/gov_power_allocator.c
+@@ -90,12 +90,14 @@ static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
+ u32 sustainable_power = 0;
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
++ const struct thermal_trip *trip_max_desired_temperature =
++ &tz->trips[params->trip_max_desired_temperature];
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ struct thermal_cooling_device *cdev = instance->cdev;
+ u32 min_power;
+
+- if (instance->trip != params->trip_max_desired_temperature)
++ if (instance->trip != trip_max_desired_temperature)
+ continue;
+
+ if (!cdev_is_power_actor(cdev))
+@@ -383,12 +385,13 @@ static int allocate_power(struct thermal_zone_device *tz,
+ {
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
++ const struct thermal_trip *trip_max_desired_temperature =
++ &tz->trips[params->trip_max_desired_temperature];
+ u32 *req_power, *max_power, *granted_power, *extra_actor_power;
+ u32 *weighted_req_power;
+ u32 total_req_power, max_allocatable_power, total_weighted_req_power;
+ u32 total_granted_power, power_range;
+ int i, num_actors, total_weight, ret = 0;
+- int trip_max_desired_temperature = params->trip_max_desired_temperature;
+
+ num_actors = 0;
+ total_weight = 0;
+@@ -564,12 +567,14 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
+ {
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
++ const struct thermal_trip *trip_max_desired_temperature =
++ &tz->trips[params->trip_max_desired_temperature];
+ u32 req_power;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ struct thermal_cooling_device *cdev = instance->cdev;
+
+- if ((instance->trip != params->trip_max_desired_temperature) ||
++ if ((instance->trip != trip_max_desired_temperature) ||
+ (!cdev_is_power_actor(instance->cdev)))
+ continue;
+
+@@ -710,7 +715,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip_id)
+
+ ret = __thermal_zone_get_trip(tz, params->trip_switch_on, &trip);
+ if (!ret && (tz->temperature < trip.temperature)) {
+- update = (tz->last_temperature >= trip.temperature);
++ update = tz->passive;
+ tz->passive = 0;
+ reset_pid_controller(params);
+ allow_maximum_power(tz, update);
+diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
+index 1050fb4d94c2d4..849dc1ec8d27c8 100644
+--- a/drivers/thermal/gov_step_wise.c
++++ b/drivers/thermal/gov_step_wise.c
+@@ -81,26 +81,24 @@ static void update_passive_instance(struct thermal_zone_device *tz,
+
+ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id)
+ {
++ const struct thermal_trip *trip = &tz->trips[trip_id];
+ enum thermal_trend trend;
+ struct thermal_instance *instance;
+- struct thermal_trip trip;
+ bool throttle = false;
+ int old_target;
+
+- __thermal_zone_get_trip(tz, trip_id, &trip);
+-
+ trend = get_tz_trend(tz, trip_id);
+
+- if (tz->temperature >= trip.temperature) {
++ if (tz->temperature >= trip->temperature) {
+ throttle = true;
+- trace_thermal_zone_trip(tz, trip_id, trip.type);
++ trace_thermal_zone_trip(tz, trip_id, trip->type);
+ }
+
+ dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
+- trip_id, trip.type, trip.temperature, trend, throttle);
++ trip_id, trip->type, trip->temperature, trend, throttle);
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+- if (instance->trip != trip_id)
++ if (instance->trip != trip)
+ continue;
+
+ old_target = instance->target;
+@@ -114,11 +112,11 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id
+ /* Activate a passive thermal instance */
+ if (old_target == THERMAL_NO_TARGET &&
+ instance->target != THERMAL_NO_TARGET)
+- update_passive_instance(tz, trip.type, 1);
++ update_passive_instance(tz, trip->type, 1);
+ /* Deactivate a passive thermal instance */
+ else if (old_target != THERMAL_NO_TARGET &&
+ instance->target == THERMAL_NO_TARGET)
+- update_passive_instance(tz, trip.type, -1);
++ update_passive_instance(tz, trip->type, -1);
+
+ instance->initialized = true;
+ mutex_lock(&instance->cdev->lock);
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+index 3ca0a2f5937f26..cdf88cadfc4f19 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+@@ -113,14 +113,14 @@ static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
+ int *temp)
+ {
+ int cpu;
+- int curr_temp;
++ int curr_temp, ret;
+
+ *temp = 0;
+
+ for_each_online_cpu(cpu) {
+- curr_temp = intel_tcc_get_temp(cpu, false);
+- if (curr_temp < 0)
+- return curr_temp;
++ ret = intel_tcc_get_temp(cpu, &curr_temp, false);
++ if (ret < 0)
++ return ret;
+ if (!*temp || curr_temp > *temp)
+ *temp = curr_temp;
+ }
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+index 0d1e980072704e..24eaec5d095c1d 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+@@ -223,19 +223,19 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
+
+ INIT_DELAYED_WORK(&pci_info->work, proc_thermal_threshold_work_fn);
+
+- ret = proc_thermal_add(&pdev->dev, proc_priv);
+- if (ret) {
+- dev_err(&pdev->dev, "error: proc_thermal_add, will continue\n");
+- pci_info->no_legacy = 1;
+- }
+-
+ proc_priv->priv_data = pci_info;
+ pci_info->proc_priv = proc_priv;
+ pci_set_drvdata(pdev, proc_priv);
+
+ ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data);
+ if (ret)
+- goto err_ret_thermal;
++ return ret;
++
++ ret = proc_thermal_add(&pdev->dev, proc_priv);
++ if (ret) {
++ dev_err(&pdev->dev, "error: proc_thermal_add, will continue\n");
++ pci_info->no_legacy = 1;
++ }
+
+ psv_trip.temperature = get_trip_temp(pci_info);
+
+@@ -245,7 +245,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
+ &tzone_params, 0, 0);
+ if (IS_ERR(pci_info->tzone)) {
+ ret = PTR_ERR(pci_info->tzone);
+- goto err_ret_mmio;
++ goto err_del_legacy;
+ }
+
+ /* request and enable interrupt */
+@@ -276,12 +276,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
+ pci_free_irq_vectors(pdev);
+ err_ret_tzone:
+ thermal_zone_device_unregister(pci_info->tzone);
+-err_ret_mmio:
+- proc_thermal_mmio_remove(pdev, proc_priv);
+-err_ret_thermal:
++err_del_legacy:
+ if (!pci_info->no_legacy)
+ proc_thermal_remove(proc_priv);
+- pci_disable_device(pdev);
++ proc_thermal_mmio_remove(pdev, proc_priv);
+
+ return ret;
+ }
+@@ -303,7 +301,6 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev)
+ proc_thermal_mmio_remove(pdev, pci_info->proc_priv);
+ if (!pci_info->no_legacy)
+ proc_thermal_remove(proc_priv);
+- pci_disable_device(pdev);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
+index 2f00fc3bf274a3..e964a9375722ad 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
+@@ -27,9 +27,9 @@ static int rapl_mmio_cpu_online(unsigned int cpu)
+ if (topology_physical_package_id(cpu))
+ return 0;
+
+- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true);
++ rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true);
+ if (!rp) {
+- rp = rapl_add_package(cpu, &rapl_mmio_priv, true);
++ rp = rapl_add_package_cpuslocked(cpu, &rapl_mmio_priv, true);
+ if (IS_ERR(rp))
+ return PTR_ERR(rp);
+ }
+@@ -42,14 +42,14 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu)
+ struct rapl_package *rp;
+ int lead_cpu;
+
+- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true);
++ rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true);
+ if (!rp)
+ return 0;
+
+ cpumask_clear_cpu(cpu, &rp->cpumask);
+ lead_cpu = cpumask_first(&rp->cpumask);
+ if (lead_cpu >= nr_cpu_ids)
+- rapl_remove_package(rp);
++ rapl_remove_package_cpuslocked(rp);
+ else if (rp->lead_cpu == cpu)
+ rp->lead_cpu = lead_cpu;
+ return 0;
+diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
+index c69db6c90869c8..1c5a429b2e3e90 100644
+--- a/drivers/thermal/intel/intel_hfi.c
++++ b/drivers/thermal/intel/intel_hfi.c
+@@ -24,6 +24,7 @@
+ #include <linux/bitops.h>
+ #include <linux/cpufeature.h>
+ #include <linux/cpumask.h>
++#include <linux/delay.h>
+ #include <linux/gfp.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -34,7 +35,9 @@
+ #include <linux/processor.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/suspend.h>
+ #include <linux/string.h>
++#include <linux/syscore_ops.h>
+ #include <linux/topology.h>
+ #include <linux/workqueue.h>
+
+@@ -347,6 +350,52 @@ static void init_hfi_instance(struct hfi_instance *hfi_instance)
+ hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
+ }
+
++/* Caller must hold hfi_instance_lock. */
++static void hfi_enable(void)
++{
++ u64 msr_val;
++
++ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++}
++
++static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
++{
++ phys_addr_t hw_table_pa;
++ u64 msr_val;
++
++ hw_table_pa = virt_to_phys(hfi_instance->hw_table);
++ msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
++}
++
++/* Caller must hold hfi_instance_lock. */
++static void hfi_disable(void)
++{
++ u64 msr_val;
++ int i;
++
++ rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
++ wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++
++ /*
++ * Wait for hardware to acknowledge the disabling of HFI. Some
++ * processors may not do it. Wait for ~2ms. This is a reasonable
++ * time for hardware to complete any pending actions on the HFI
++ * memory.
++ */
++ for (i = 0; i < 2000; i++) {
++ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
++ if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
++ break;
++
++ udelay(1);
++ cpu_relax();
++ }
++}
++
+ /**
+ * intel_hfi_online() - Enable HFI on @cpu
+ * @cpu: CPU in which the HFI will be enabled
+@@ -364,8 +413,6 @@ void intel_hfi_online(unsigned int cpu)
+ {
+ struct hfi_instance *hfi_instance;
+ struct hfi_cpu_info *info;
+- phys_addr_t hw_table_pa;
+- u64 msr_val;
+ u16 die_id;
+
+ /* Nothing to do if hfi_instances are missing. */
+@@ -403,14 +450,16 @@ void intel_hfi_online(unsigned int cpu)
+ /*
+ * Hardware is programmed with the physical address of the first page
+ * frame of the table. Hence, the allocated memory must be page-aligned.
++ *
++ * Some processors do not forget the initial address of the HFI table
++ * even after having been reprogrammed. Keep using the same pages. Do
++ * not free them.
+ */
+ hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!hfi_instance->hw_table)
+ goto unlock;
+
+- hw_table_pa = virt_to_phys(hfi_instance->hw_table);
+-
+ /*
+ * Allocate memory to keep a local copy of the table that
+ * hardware generates.
+@@ -420,16 +469,6 @@ void intel_hfi_online(unsigned int cpu)
+ if (!hfi_instance->local_table)
+ goto free_hw_table;
+
+- /*
+- * Program the address of the feedback table of this die/package. On
+- * some processors, hardware remembers the old address of the HFI table
+- * even after having been reprogrammed and re-enabled. Thus, do not free
+- * the pages allocated for the table or reprogram the hardware with a
+- * new base address. Namely, program the hardware only once.
+- */
+- msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
+- wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+-
+ init_hfi_instance(hfi_instance);
+
+ INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
+@@ -438,13 +477,8 @@ void intel_hfi_online(unsigned int cpu)
+
+ cpumask_set_cpu(cpu, hfi_instance->cpus);
+
+- /*
+- * Enable the hardware feedback interface and never disable it. See
+- * comment on programming the address of the table.
+- */
+- rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+- msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
+- wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
++ hfi_set_hw_table(hfi_instance);
++ hfi_enable();
+
+ unlock:
+ mutex_unlock(&hfi_instance_lock);
+@@ -484,6 +518,10 @@ void intel_hfi_offline(unsigned int cpu)
+
+ mutex_lock(&hfi_instance_lock);
+ cpumask_clear_cpu(cpu, hfi_instance->cpus);
++
++ if (!cpumask_weight(hfi_instance->cpus))
++ hfi_disable();
++
+ mutex_unlock(&hfi_instance_lock);
+ }
+
+@@ -532,6 +570,30 @@ static __init int hfi_parse_features(void)
+ return 0;
+ }
+
++static void hfi_do_enable(void)
++{
++ /* This code runs only on the boot CPU. */
++ struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
++ struct hfi_instance *hfi_instance = info->hfi_instance;
++
++ /* No locking needed. There is no concurrency with CPU online. */
++ hfi_set_hw_table(hfi_instance);
++ hfi_enable();
++}
++
++static int hfi_do_disable(void)
++{
++ /* No locking needed. There is no concurrency with CPU offline. */
++ hfi_disable();
++
++ return 0;
++}
++
++static struct syscore_ops hfi_pm_ops = {
++ .resume = hfi_do_enable,
++ .suspend = hfi_do_disable,
++};
++
+ void __init intel_hfi_init(void)
+ {
+ struct hfi_instance *hfi_instance;
+@@ -563,6 +625,8 @@ void __init intel_hfi_init(void)
+ if (!hfi_updates_wq)
+ goto err_nomem;
+
++ register_syscore_ops(&hfi_pm_ops);
++
+ return;
+
+ err_nomem:
+diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
+index 36243a3972fd7f..5ac5cb60bae67b 100644
+--- a/drivers/thermal/intel/intel_powerclamp.c
++++ b/drivers/thermal/intel/intel_powerclamp.c
+@@ -256,7 +256,7 @@ static int max_idle_set(const char *arg, const struct kernel_param *kp)
+
+ static const struct kernel_param_ops max_idle_ops = {
+ .set = max_idle_set,
+- .get = param_get_int,
++ .get = param_get_byte,
+ };
+
+ module_param_cb(max_idle, &max_idle_ops, &max_idle, 0644);
+diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
+index 2e5c741c41ca03..5e8b7f34b39510 100644
+--- a/drivers/thermal/intel/intel_tcc.c
++++ b/drivers/thermal/intel/intel_tcc.c
+@@ -103,18 +103,19 @@ EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC);
+ /**
+ * intel_tcc_get_temp() - returns the current temperature
+ * @cpu: cpu that the MSR should be run on, nagative value means any cpu.
++ * @temp: pointer to the memory for saving cpu temperature.
+ * @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor.
+ *
+ * Get the current temperature returned by the CPU core/package level
+ * thermal sensor, in degrees C.
+ *
+- * Return: Temperature in degrees C on success, negative error code otherwise.
++ * Return: 0 on success, negative error code otherwise.
+ */
+-int intel_tcc_get_temp(int cpu, bool pkg)
++int intel_tcc_get_temp(int cpu, int *temp, bool pkg)
+ {
+ u32 low, high;
+ u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS;
+- int tjmax, temp, err;
++ int tjmax, err;
+
+ tjmax = intel_tcc_get_tjmax(cpu);
+ if (tjmax < 0)
+@@ -131,9 +132,8 @@ int intel_tcc_get_temp(int cpu, bool pkg)
+ if (!(low & BIT(31)))
+ return -ENODATA;
+
+- temp = tjmax - ((low >> 16) & 0x7f);
++ *temp = tjmax - ((low >> 16) & 0x7f);
+
+- /* Do not allow negative CPU temperature */
+- return temp >= 0 ? temp : -ENODATA;
++ return 0;
+ }
+ EXPORT_SYMBOL_NS_GPL(intel_tcc_get_temp, INTEL_TCC);
+diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
+index 11a7f8108bbbfe..61c3d450ee605a 100644
+--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
+@@ -108,11 +108,11 @@ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
+ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
+ {
+ struct zone_device *zonedev = thermal_zone_device_priv(tzd);
+- int val;
++ int val, ret;
+
+- val = intel_tcc_get_temp(zonedev->cpu, true);
+- if (val < 0)
+- return val;
++ ret = intel_tcc_get_temp(zonedev->cpu, &val, true);
++ if (ret < 0)
++ return ret;
+
+ *temp = val * 1000;
+ pr_debug("sys_get_curr_temp %d\n", *temp);
+diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
+index 133098dc085471..99ca0c7bc41c79 100644
+--- a/drivers/thermal/loongson2_thermal.c
++++ b/drivers/thermal/loongson2_thermal.c
+@@ -127,7 +127,7 @@ static int loongson2_thermal_probe(struct platform_device *pdev)
+ if (!IS_ERR(tzd))
+ break;
+
+- if (PTR_ERR(tzd) != ENODEV)
++ if (PTR_ERR(tzd) != -ENODEV)
+ continue;
+
+ return dev_err_probe(dev, PTR_ERR(tzd), "failed to register");
+diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
+index 843214d30bd8b2..9ee2e7283435ac 100644
+--- a/drivers/thermal/mediatek/auxadc_thermal.c
++++ b/drivers/thermal/mediatek/auxadc_thermal.c
+@@ -690,6 +690,9 @@ static const struct mtk_thermal_data mt7986_thermal_data = {
+ .adcpnp = mt7986_adcpnp,
+ .sensor_mux_values = mt7986_mux_values,
+ .version = MTK_THERMAL_V3,
++ .apmixed_buffer_ctl_reg = APMIXED_SYS_TS_CON1,
++ .apmixed_buffer_ctl_mask = GENMASK(31, 6) | BIT(3),
++ .apmixed_buffer_ctl_set = BIT(0),
+ };
+
+ static bool mtk_thermal_temp_is_valid(int temp)
+@@ -1267,7 +1270,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+
+ mtk_thermal_turn_on_buffer(mt, apmixed_base);
+
+- if (mt->conf->version != MTK_THERMAL_V2)
++ if (mt->conf->version != MTK_THERMAL_V1)
+ mtk_thermal_release_periodic_ts(mt, auxadc_base);
+
+ if (mt->conf->version == MTK_THERMAL_V1)
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index effd9b00a424bb..8d0ccf494ba224 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -679,8 +679,10 @@ static int lvts_calibration_read(struct device *dev, struct lvts_domain *lvts_td
+
+ lvts_td->calib = devm_krealloc(dev, lvts_td->calib,
+ lvts_td->calib_len + len, GFP_KERNEL);
+- if (!lvts_td->calib)
++ if (!lvts_td->calib) {
++ kfree(efuse);
+ return -ENOMEM;
++ }
+
+ memcpy(lvts_td->calib + lvts_td->calib_len, efuse, len);
+
+@@ -698,7 +700,11 @@ static int lvts_golden_temp_init(struct device *dev, u32 *value)
+
+ gt = (*value) >> 24;
+
+- if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
++ /* A zero value for gt means that device has invalid efuse data */
++ if (!gt)
++ return -ENODATA;
++
++ if (gt < LVTS_GOLDEN_TEMP_MAX)
+ golden_temp = gt;
+
+ coeff_b = golden_temp * 500 + LVTS_COEFF_B;
+@@ -1202,6 +1208,8 @@ static int lvts_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ lvts_data = of_device_get_match_data(dev);
++ if (!lvts_data)
++ return -ENODEV;
+
+ lvts_td->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(lvts_td->clk))
+diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
+index f6edb12ec00419..5225b3621a56c4 100644
+--- a/drivers/thermal/qcom/lmh.c
++++ b/drivers/thermal/qcom/lmh.c
+@@ -95,6 +95,9 @@ static int lmh_probe(struct platform_device *pdev)
+ unsigned int enable_alg;
+ u32 node_id;
+
++ if (!qcom_scm_is_available())
++ return -EPROBE_DEFER;
++
+ lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
+ if (!lmh_data)
+ return -ENOMEM;
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index 98c356acfe9836..ee22672471e81b 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -264,7 +264,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
+ for (i = 0; i < priv->num_sensors; i++) {
+ dev_dbg(priv->dev,
+ "%s: sensor%d - data_point1:%#x data_point2:%#x\n",
+- __func__, i, p1[i], p2[i]);
++ __func__, i, p1[i], p2 ? p2[i] : 0);
+
+ if (!priv->sensor[i].slope)
+ priv->sensor[i].slope = SLOPE_DEFAULT;
+diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
+index ccc2eea7f9f548..404f01cca4dab5 100644
+--- a/drivers/thermal/qoriq_thermal.c
++++ b/drivers/thermal/qoriq_thermal.c
+@@ -57,6 +57,9 @@
+ #define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n
+ * Control Register
+ */
++#define NUM_TTRCR_V1 4
++#define NUM_TTRCR_MAX 16
++
+ #define REGS_IPBRR(n) (0xbf8 + 4 * (n)) /* IP Block Revision
+ * Register n
+ */
+@@ -71,6 +74,7 @@ struct qoriq_sensor {
+
+ struct qoriq_tmu_data {
+ int ver;
++ u32 ttrcr[NUM_TTRCR_MAX];
+ struct regmap *regmap;
+ struct clk *clk;
+ struct qoriq_sensor sensor[SITES_MAX];
+@@ -182,17 +186,17 @@ static int qoriq_tmu_calibration(struct device *dev,
+ struct qoriq_tmu_data *data)
+ {
+ int i, val, len;
+- u32 range[4];
+ const u32 *calibration;
+ struct device_node *np = dev->of_node;
+
+ len = of_property_count_u32_elems(np, "fsl,tmu-range");
+- if (len < 0 || len > 4) {
++ if (len < 0 || (data->ver == TMU_VER1 && len > NUM_TTRCR_V1) ||
++ (data->ver > TMU_VER1 && len > NUM_TTRCR_MAX)) {
+ dev_err(dev, "invalid range data.\n");
+ return len;
+ }
+
+- val = of_property_read_u32_array(np, "fsl,tmu-range", range, len);
++ val = of_property_read_u32_array(np, "fsl,tmu-range", data->ttrcr, len);
+ if (val != 0) {
+ dev_err(dev, "failed to read range data.\n");
+ return val;
+@@ -200,7 +204,7 @@ static int qoriq_tmu_calibration(struct device *dev,
+
+ /* Init temperature range registers */
+ for (i = 0; i < len; i++)
+- regmap_write(data->regmap, REGS_TTRnCR(i), range[i]);
++ regmap_write(data->regmap, REGS_TTRnCR(i), data->ttrcr[i]);
+
+ calibration = of_get_property(np, "fsl,tmu-calibration", &len);
+ if (calibration == NULL || len % 8) {
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 58533ea75cd925..dee3022539cf78 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -37,8 +37,6 @@ static LIST_HEAD(thermal_governor_list);
+ static DEFINE_MUTEX(thermal_list_lock);
+ static DEFINE_MUTEX(thermal_governor_lock);
+
+-static atomic_t in_suspend;
+-
+ static struct thermal_governor *def_governor;
+
+ /*
+@@ -409,7 +407,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
+ {
+ int count;
+
+- if (atomic_read(&in_suspend))
++ if (tz->suspended)
+ return;
+
+ if (WARN_ONCE(!tz->ops->get_temp,
+@@ -606,7 +604,7 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
+ /**
+ * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone
+ * @tz: pointer to struct thermal_zone_device
+- * @trip: indicates which trip point the cooling devices is
++ * @trip_index: indicates which trip point the cooling devices is
+ * associated with in this thermal zone.
+ * @cdev: pointer to struct thermal_cooling_device
+ * @upper: the Maximum cooling state for this trip point.
+@@ -626,7 +624,7 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id)
+ * Return: 0 on success, the proper error value otherwise.
+ */
+ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+- int trip,
++ int trip_index,
+ struct thermal_cooling_device *cdev,
+ unsigned long upper, unsigned long lower,
+ unsigned int weight)
+@@ -635,12 +633,15 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ struct thermal_instance *pos;
+ struct thermal_zone_device *pos1;
+ struct thermal_cooling_device *pos2;
++ const struct thermal_trip *trip;
+ bool upper_no_limit;
+ int result;
+
+- if (trip >= tz->num_trips || trip < 0)
++ if (trip_index >= tz->num_trips || trip_index < 0)
+ return -EINVAL;
+
++ trip = &tz->trips[trip_index];
++
+ list_for_each_entry(pos1, &thermal_tz_list, node) {
+ if (pos1 == tz)
+ break;
+@@ -689,7 +690,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (result)
+ goto release_ida;
+
+- sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
++ snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point",
++ dev->id);
+ sysfs_attr_init(&dev->attr.attr);
+ dev->attr.attr.name = dev->attr_name;
+ dev->attr.attr.mode = 0444;
+@@ -698,7 +700,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (result)
+ goto remove_symbol_link;
+
+- sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
++ snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name),
++ "cdev%d_weight", dev->id);
+ sysfs_attr_init(&dev->weight_attr.attr);
+ dev->weight_attr.attr.name = dev->weight_attr_name;
+ dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
+@@ -743,7 +746,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
+ * thermal_zone_unbind_cooling_device() - unbind a cooling device from a
+ * thermal zone.
+ * @tz: pointer to a struct thermal_zone_device.
+- * @trip: indicates which trip point the cooling devices is
++ * @trip_index: indicates which trip point the cooling devices is
+ * associated with in this thermal zone.
+ * @cdev: pointer to a struct thermal_cooling_device.
+ *
+@@ -754,13 +757,15 @@ EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
+ * Return: 0 on success, the proper error value otherwise.
+ */
+ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
+- int trip,
++ int trip_index,
+ struct thermal_cooling_device *cdev)
+ {
+ struct thermal_instance *pos, *next;
++ const struct thermal_trip *trip;
+
+ mutex_lock(&tz->lock);
+ mutex_lock(&cdev->lock);
++ trip = &tz->trips[trip_index];
+ list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) {
+ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ list_del(&pos->tz_node);
+@@ -1380,7 +1385,6 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
+ device_del(&tz->device);
+ release_device:
+ put_device(&tz->device);
+- tz = NULL;
+ remove_id:
+ ida_free(&thermal_tz_ida, id);
+ free_tzp:
+@@ -1526,17 +1530,35 @@ static int thermal_pm_notify(struct notifier_block *nb,
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+- atomic_set(&in_suspend, 1);
++ mutex_lock(&thermal_list_lock);
++
++ list_for_each_entry(tz, &thermal_tz_list, node) {
++ mutex_lock(&tz->lock);
++
++ tz->suspended = true;
++
++ mutex_unlock(&tz->lock);
++ }
++
++ mutex_unlock(&thermal_list_lock);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+- atomic_set(&in_suspend, 0);
++ mutex_lock(&thermal_list_lock);
++
+ list_for_each_entry(tz, &thermal_tz_list, node) {
++ mutex_lock(&tz->lock);
++
++ tz->suspended = false;
++
+ thermal_zone_device_init(tz);
+- thermal_zone_device_update(tz,
+- THERMAL_EVENT_UNSPECIFIED);
++ __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
++
++ mutex_unlock(&tz->lock);
+ }
++
++ mutex_unlock(&thermal_list_lock);
+ break;
+ default:
+ break;
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index de884bea28b66c..024e82ebf5920f 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -87,7 +87,7 @@ struct thermal_instance {
+ char name[THERMAL_NAME_LENGTH];
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+- int trip;
++ const struct thermal_trip *trip;
+ bool initialized;
+ unsigned long upper; /* Highest cooling state for this trip point */
+ unsigned long lower; /* Lowest cooling state for this trip point */
+@@ -119,6 +119,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
+ void __thermal_zone_set_trips(struct thermal_zone_device *tz);
+ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
+ struct thermal_trip *trip);
++int thermal_zone_trip_id(struct thermal_zone_device *tz,
++ const struct thermal_trip *trip);
+ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
+
+ /* sysfs I/F */
+diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
+index 4d66372c962952..c1d0af73c85d6f 100644
+--- a/drivers/thermal/thermal_helpers.c
++++ b/drivers/thermal/thermal_helpers.c
+@@ -42,14 +42,17 @@ int get_tz_trend(struct thermal_zone_device *tz, int trip_index)
+
+ struct thermal_instance *
+ get_thermal_instance(struct thermal_zone_device *tz,
+- struct thermal_cooling_device *cdev, int trip)
++ struct thermal_cooling_device *cdev, int trip_index)
+ {
+ struct thermal_instance *pos = NULL;
+ struct thermal_instance *target_instance = NULL;
++ const struct thermal_trip *trip;
+
+ mutex_lock(&tz->lock);
+ mutex_lock(&cdev->lock);
+
++ trip = &tz->trips[trip_index];
++
+ list_for_each_entry(pos, &tz->thermal_instances, tz_node) {
+ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ target_instance = pos;
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 1e0655b63259fe..d8dfcd49695d3d 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -123,7 +123,7 @@ static int thermal_of_populate_trip(struct device_node *np,
+ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *ntrips)
+ {
+ struct thermal_trip *tt;
+- struct device_node *trips, *trip;
++ struct device_node *trips;
+ int ret, count;
+
+ trips = of_get_child_by_name(np, "trips");
+@@ -148,7 +148,7 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n
+ *ntrips = count;
+
+ count = 0;
+- for_each_child_of_node(trips, trip) {
++ for_each_child_of_node_scoped(trips, trip) {
+ ret = thermal_of_populate_trip(trip, &tt[count++]);
+ if (ret)
+ goto out_kfree;
+@@ -182,14 +182,14 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int
+ * Search for each thermal zone, a defined sensor
+ * corresponding to the one passed as parameter
+ */
+- for_each_available_child_of_node(np, tz) {
++ for_each_available_child_of_node_scoped(np, child) {
+
+ int count, i;
+
+- count = of_count_phandle_with_args(tz, "thermal-sensors",
++ count = of_count_phandle_with_args(child, "thermal-sensors",
+ "#thermal-sensor-cells");
+ if (count <= 0) {
+- pr_err("%pOFn: missing thermal sensor\n", tz);
++ pr_err("%pOFn: missing thermal sensor\n", child);
+ tz = ERR_PTR(-EINVAL);
+ goto out;
+ }
+@@ -198,18 +198,19 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int
+
+ int ret;
+
+- ret = of_parse_phandle_with_args(tz, "thermal-sensors",
++ ret = of_parse_phandle_with_args(child, "thermal-sensors",
+ "#thermal-sensor-cells",
+ i, &sensor_specs);
+ if (ret < 0) {
+- pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", tz, ret);
++ pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", child, ret);
+ tz = ERR_PTR(ret);
+ goto out;
+ }
+
+ if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ?
+ sensor_specs.args[0] : 0)) {
+- pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, tz);
++ pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child);
++ tz = no_free_ptr(child);
+ goto out;
+ }
+ }
+@@ -225,14 +226,18 @@ static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdel
+ int ret;
+
+ ret = of_property_read_u32(np, "polling-delay-passive", pdelay);
+- if (ret < 0) {
+- pr_err("%pOFn: missing polling-delay-passive property\n", np);
++ if (ret == -EINVAL) {
++ *pdelay = 0;
++ } else if (ret < 0) {
++ pr_err("%pOFn: Couldn't get polling-delay-passive: %d\n", np, ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "polling-delay", delay);
+- if (ret < 0) {
+- pr_err("%pOFn: missing polling-delay property\n", np);
++ if (ret == -EINVAL) {
++ *delay = 0;
++ } else if (ret < 0) {
++ pr_err("%pOFn: Couldn't get polling-delay: %d\n", np, ret);
+ return ret;
+ }
+
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index 4e6a97db894e9c..eef40d4f306394 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -943,7 +943,8 @@ trip_point_show(struct device *dev, struct device_attribute *attr, char *buf)
+ instance =
+ container_of(attr, struct thermal_instance, attr);
+
+- return sprintf(buf, "%d\n", instance->trip);
++ return sprintf(buf, "%d\n",
++ thermal_zone_trip_id(instance->tz, instance->trip));
+ }
+
+ ssize_t
+diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
+index 024e2e365a26b6..afc9499128c29b 100644
+--- a/drivers/thermal/thermal_trip.c
++++ b/drivers/thermal/thermal_trip.c
+@@ -17,9 +17,6 @@ int for_each_thermal_trip(struct thermal_zone_device *tz,
+
+ lockdep_assert_held(&tz->lock);
+
+- if (!tz->trips)
+- return -ENODATA;
+-
+ for (i = 0; i < tz->num_trips; i++) {
+ ret = cb(&tz->trips[i], data);
+ if (ret)
+@@ -55,6 +52,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+ {
+ struct thermal_trip trip;
+ int low = -INT_MAX, high = INT_MAX;
++ bool same_trip = false;
+ int i, ret;
+
+ lockdep_assert_held(&tz->lock);
+@@ -63,6 +61,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+ return;
+
+ for (i = 0; i < tz->num_trips; i++) {
++ bool low_set = false;
+ int trip_low;
+
+ ret = __thermal_zone_get_trip(tz, i , &trip);
+@@ -71,18 +70,31 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+
+ trip_low = trip.temperature - trip.hysteresis;
+
+- if (trip_low < tz->temperature && trip_low > low)
++ if (trip_low < tz->temperature && trip_low > low) {
+ low = trip_low;
++ low_set = true;
++ same_trip = false;
++ }
+
+ if (trip.temperature > tz->temperature &&
+- trip.temperature < high)
++ trip.temperature < high) {
+ high = trip.temperature;
++ same_trip = low_set;
++ }
+ }
+
+ /* No need to change trip points */
+ if (tz->prev_low_trip == low && tz->prev_high_trip == high)
+ return;
+
++ /*
++ * If "high" and "low" are the same, skip the change unless this is the
++ * first time.
++ */
++ if (same_trip && (tz->prev_low_trip != -INT_MAX ||
++ tz->prev_high_trip != INT_MAX))
++ return;
++
+ tz->prev_low_trip = low;
+ tz->prev_high_trip = high;
+
+@@ -160,3 +172,16 @@ int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
+
+ return 0;
+ }
++
++int thermal_zone_trip_id(struct thermal_zone_device *tz,
++ const struct thermal_trip *trip)
++{
++ int i;
++
++ for (i = 0; i < tz->num_trips; i++) {
++ if (&tz->trips[i] == trip)
++ return i;
++ }
++
++ return -ENODATA;
++}
+diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
+index c9ddd49138d822..0754fe76edde4c 100644
+--- a/drivers/thunderbolt/debugfs.c
++++ b/drivers/thunderbolt/debugfs.c
+@@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port)
+ debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
+ debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
+ debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
+- if (independent_voltage_margins(usb4) ||
+- (supports_time(usb4) && independent_time_margins(usb4)))
++ if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL ||
++ (supports_time(usb4) &&
++ independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR))
+ debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
+ }
+
+@@ -959,7 +960,7 @@ static void margining_port_remove(struct tb_port *port)
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ if (parent)
+- debugfs_remove_recursive(debugfs_lookup("margining", parent));
++ debugfs_lookup_and_remove("margining", parent);
+
+ kfree(port->usb4->margining);
+ port->usb4->margining = NULL;
+diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
+index ec7b5f65804e49..31f3da4e6a08df 100644
+--- a/drivers/thunderbolt/domain.c
++++ b/drivers/thunderbolt/domain.c
+@@ -423,6 +423,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
+ /**
+ * tb_domain_add() - Add domain to the system
+ * @tb: Domain to add
++ * @reset: Issue reset to the host router
+ *
+ * Starts the domain and adds it to the system. Hotplugging devices will
+ * work after this has been returned successfully. In order to remove
+@@ -431,7 +432,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+-int tb_domain_add(struct tb *tb)
++int tb_domain_add(struct tb *tb, bool reset)
+ {
+ int ret;
+
+@@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
+
+ /* Start the domain */
+ if (tb->cm_ops->start) {
+- ret = tb->cm_ops->start(tb);
++ ret = tb->cm_ops->start(tb, reset);
+ if (ret)
+ goto err_domain_del;
+ }
+diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
+index d8b9c734abd363..623aa81a883371 100644
+--- a/drivers/thunderbolt/icm.c
++++ b/drivers/thunderbolt/icm.c
+@@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
+ return 0;
+ }
+
+-static int icm_start(struct tb *tb)
++static int icm_start(struct tb *tb, bool not_used)
+ {
+ struct icm *icm = tb_priv(tb);
+ int ret;
+diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
+index 633970fbe9b059..63cb4b6afb718a 100644
+--- a/drivers/thunderbolt/lc.c
++++ b/drivers/thunderbolt/lc.c
+@@ -6,6 +6,8 @@
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
++#include <linux/delay.h>
++
+ #include "tb.h"
+
+ /**
+@@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
+ return sw->cap_lc + start + phys * size;
+ }
+
++/**
++ * tb_lc_reset_port() - Trigger downstream port reset through LC
++ * @port: Port that is reset
++ *
++ * Triggers downstream port reset through link controller registers.
++ * Returns %0 in case of success negative errno otherwise. Only supports
++ * non-USB4 routers with link controller (that's Thunderbolt 2 and
++ * Thunderbolt 3).
++ */
++int tb_lc_reset_port(struct tb_port *port)
++{
++ struct tb_switch *sw = port->sw;
++ int cap, ret;
++ u32 mode;
++
++ if (sw->generation < 2)
++ return -EINVAL;
++
++ cap = find_port_lc_cap(port);
++ if (cap < 0)
++ return cap;
++
++ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
++ if (ret)
++ return ret;
++
++ mode |= TB_LC_PORT_MODE_DPR;
++
++ ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
++ if (ret)
++ return ret;
++
++ fsleep(10000);
++
++ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
++ if (ret)
++ return ret;
++
++ mode &= ~TB_LC_PORT_MODE_DPR;
++
++ return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
++}
++
+ static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
+ {
+ bool upstream = tb_is_upstream_port(port);
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 4b7bec74e89fbc..1ec6f9c82aef06 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -1221,7 +1221,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
+ str_enabled_disabled(port_ok));
+ }
+
+-static void nhi_reset(struct tb_nhi *nhi)
++static bool nhi_reset(struct tb_nhi *nhi)
+ {
+ ktime_t timeout;
+ u32 val;
+@@ -1229,11 +1229,11 @@ static void nhi_reset(struct tb_nhi *nhi)
+ val = ioread32(nhi->iobase + REG_CAPS);
+ /* Reset only v2 and later routers */
+ if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
+- return;
++ return false;
+
+ if (!host_reset) {
+ dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
+- return;
++ return false;
+ }
+
+ iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
+@@ -1244,12 +1244,14 @@ static void nhi_reset(struct tb_nhi *nhi)
+ val = ioread32(nhi->iobase + REG_RESET);
+ if (!(val & REG_RESET_HRR)) {
+ dev_warn(&nhi->pdev->dev, "host router reset successful\n");
+- return;
++ return true;
+ }
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
++
++ return false;
+ }
+
+ static int nhi_init_msi(struct tb_nhi *nhi)
+@@ -1331,6 +1333,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ struct device *dev = &pdev->dev;
+ struct tb_nhi *nhi;
+ struct tb *tb;
++ bool reset;
+ int res;
+
+ if (!nhi_imr_valid(pdev))
+@@ -1365,7 +1368,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ nhi_check_quirks(nhi);
+ nhi_check_iommu(nhi);
+
+- nhi_reset(nhi);
++ /*
++ * Only USB4 v2 hosts support host reset so if we already did
++ * that then don't do it again when the domain is initialized.
++ */
++ reset = nhi_reset(nhi) ? false : host_reset;
+
+ res = nhi_init_msi(nhi);
+ if (res)
+@@ -1392,7 +1399,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
+
+- res = tb_domain_add(tb);
++ res = tb_domain_add(tb, reset);
+ if (res) {
+ /*
+ * At this point the RX/TX rings might already have been
+diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
+index ee03fd75a4728f..17f7647e6a7225 100644
+--- a/drivers/thunderbolt/path.c
++++ b/drivers/thunderbolt/path.c
+@@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
+ return -ETIMEDOUT;
+ }
+
++/**
++ * tb_path_deactivate_hop() - Deactivate one path in path config space
++ * @port: Lane or protocol adapter
++ * @hop_index: HopID of the path to be cleared
++ *
++ * This deactivates or clears a single path config space entry at
++ * @hop_index. Returns %0 in success and negative errno otherwise.
++ */
++int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
++{
++ return __tb_path_deactivate_hop(port, hop_index, true);
++}
++
+ static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
+ {
+ int i, res;
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 488138a28ae13b..e81de9c30eac9a 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -31,6 +31,9 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+ {
+ struct tb_port *port;
+
++ if (tb_switch_is_icm(sw))
++ return;
++
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_usb3_down(port))
+ continue;
+@@ -40,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+ }
+ }
+
++static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
++{
++ sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
++ tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
++}
++
+ struct tb_quirk {
+ u16 hw_vendor_id;
+ u16 hw_device_id;
+@@ -83,6 +92,14 @@ static const struct tb_quirk tb_quirks[] = {
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
++ /*
++ * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
++ * controllers.
++ */
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
++ quirk_block_rpm_in_redrive },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
++ quirk_block_rpm_in_redrive },
+ /*
+ * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ */
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index bd5815f8f23bd6..52cb1a3bb8c786 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -675,6 +675,13 @@ int tb_port_disable(struct tb_port *port)
+ return __tb_port_enable(port, false);
+ }
+
++static int tb_port_reset(struct tb_port *port)
++{
++ if (tb_switch_is_usb4(port->sw))
++ return port->cap_usb4 ? usb4_port_reset(port) : 0;
++ return tb_lc_reset_port(port);
++}
++
+ /*
+ * tb_init_port() - initialize a port
+ *
+@@ -914,6 +921,48 @@ int tb_port_get_link_speed(struct tb_port *port)
+ }
+ }
+
++/**
++ * tb_port_get_link_generation() - Returns link generation
++ * @port: Lane adapter
++ *
++ * Returns link generation as number or negative errno in case of
++ * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
++ * links so for those always returns 2.
++ */
++int tb_port_get_link_generation(struct tb_port *port)
++{
++ int ret;
++
++ ret = tb_port_get_link_speed(port);
++ if (ret < 0)
++ return ret;
++
++ switch (ret) {
++ case 40:
++ return 4;
++ case 20:
++ return 3;
++ default:
++ return 2;
++ }
++}
++
++static const char *width_name(enum tb_link_width width)
++{
++ switch (width) {
++ case TB_LINK_WIDTH_SINGLE:
++ return "symmetric, single lane";
++ case TB_LINK_WIDTH_DUAL:
++ return "symmetric, dual lanes";
++ case TB_LINK_WIDTH_ASYM_TX:
++ return "asymmetric, 3 transmitters, 1 receiver";
++ case TB_LINK_WIDTH_ASYM_RX:
++ return "asymmetric, 3 receivers, 1 transmitter";
++ default:
++ return "unknown";
++ }
++}
++
+ /**
+ * tb_port_get_link_width() - Get current link width
+ * @port: Port to check (USB4 or CIO)
+@@ -939,8 +988,15 @@ int tb_port_get_link_width(struct tb_port *port)
+ LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
+ }
+
+-static bool tb_port_is_width_supported(struct tb_port *port,
+- unsigned int width_mask)
++/**
++ * tb_port_width_supported() - Is the given link width supported
++ * @port: Port to check
++ * @width: Widths to check (bitmask)
++ *
++ * Can be called to any lane adapter. Checks if given @width is
++ * supported by the hardware and returns %true if it is.
++ */
++bool tb_port_width_supported(struct tb_port *port, unsigned int width)
+ {
+ u32 phy, widths;
+ int ret;
+@@ -948,20 +1004,23 @@ static bool tb_port_is_width_supported(struct tb_port *port,
+ if (!port->cap_phy)
+ return false;
+
++ if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) {
++ if (tb_port_get_link_generation(port) < 4 ||
++ !usb4_port_asym_supported(port))
++ return false;
++ }
++
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return false;
+
+- widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+- LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+-
+- return widths & width_mask;
+-}
+-
+-static bool is_gen4_link(struct tb_port *port)
+-{
+- return tb_port_get_link_speed(port) > 20;
++ /*
++ * The field encoding is the same as &enum tb_link_width (which is
++ * passed to @width).
++ */
++ widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy);
++ return widths & width;
+ }
+
+ /**
+@@ -991,15 +1050,23 @@ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
+ switch (width) {
+ case TB_LINK_WIDTH_SINGLE:
+ /* Gen 4 link cannot be single */
+- if (is_gen4_link(port))
++ if (tb_port_get_link_generation(port) >= 4)
+ return -EOPNOTSUPP;
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
++
+ case TB_LINK_WIDTH_DUAL:
++ if (tb_port_get_link_generation(port) >= 4)
++ return usb4_port_asym_set_link_width(port, width);
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
++
++ case TB_LINK_WIDTH_ASYM_TX:
++ case TB_LINK_WIDTH_ASYM_RX:
++ return usb4_port_asym_set_link_width(port, width);
++
+ default:
+ return -EINVAL;
+ }
+@@ -1082,7 +1149,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
+ * Only set bonding if the link was not already bonded. This
+ * avoids the lane adapter to re-enter bonding state.
+ */
+- if (width == TB_LINK_WIDTH_SINGLE) {
++ if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
+ ret = tb_port_set_lane_bonding(port, true);
+ if (ret)
+ goto err_lane1;
+@@ -1124,7 +1191,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
+ /**
+ * tb_port_wait_for_link_width() - Wait until link reaches specific width
+ * @port: Port to wait for
+- * @width_mask: Expected link width mask
++ * @width: Expected link width (bitmask)
+ * @timeout_msec: Timeout in ms how long to wait
+ *
+ * Should be used after both ends of the link have been bonded (or
+@@ -1133,14 +1200,15 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
+ * within the given timeout, %0 if it did. Can be passed a mask of
+ * expected widths and succeeds if any of the widths is reached.
+ */
+-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
++int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
+ int timeout_msec)
+ {
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+ int ret;
+
+ /* Gen 4 link does not support single lane */
+- if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port))
++ if ((width & TB_LINK_WIDTH_SINGLE) &&
++ tb_port_get_link_generation(port) >= 4)
+ return -EOPNOTSUPP;
+
+ do {
+@@ -1153,7 +1221,7 @@ int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
+ */
+ if (ret != -EACCES)
+ return ret;
+- } else if (ret & width_mask) {
++ } else if (ret & width) {
+ return 0;
+ }
+
+@@ -1203,6 +1271,9 @@ int tb_port_update_credits(struct tb_port *port)
+ ret = tb_port_do_update_credits(port);
+ if (ret)
+ return ret;
++
++ if (!port->dual_link_port)
++ return 0;
+ return tb_port_do_update_credits(port->dual_link_port);
+ }
+
+@@ -1485,29 +1556,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
+ regs->__unknown1, regs->__unknown4);
+ }
+
++static int tb_switch_reset_host(struct tb_switch *sw)
++{
++ if (sw->generation > 1) {
++ struct tb_port *port;
++
++ tb_switch_for_each_port(sw, port) {
++ int i, ret;
++
++ /*
++ * For lane adapters we issue downstream port
++ * reset and clear up path config spaces.
++ *
++ * For protocol adapters we disable the path and
++ * clear path config space one by one (from 8 to
++ * Max Input HopID of the adapter).
++ */
++ if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
++ ret = tb_port_reset(port);
++ if (ret)
++ return ret;
++ } else if (tb_port_is_usb3_down(port) ||
++ tb_port_is_usb3_up(port)) {
++ tb_usb3_port_enable(port, false);
++ } else if (tb_port_is_dpin(port) ||
++ tb_port_is_dpout(port)) {
++ tb_dp_port_enable(port, false);
++ } else if (tb_port_is_pcie_down(port) ||
++ tb_port_is_pcie_up(port)) {
++ tb_pci_port_enable(port, false);
++ } else {
++ continue;
++ }
++
++ /* Cleanup path config space of protocol adapter */
++ for (i = TB_PATH_MIN_HOPID;
++ i <= port->config.max_in_hop_id; i++) {
++ ret = tb_path_deactivate_hop(port, i);
++ if (ret)
++ return ret;
++ }
++ }
++ } else {
++ struct tb_cfg_result res;
++
++ /* Thunderbolt 1 uses the "reset" config space packet */
++ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
++ TB_CFG_SWITCH, 2, 2);
++ if (res.err)
++ return res.err;
++ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
++ if (res.err > 0)
++ return -EIO;
++ else if (res.err < 0)
++ return res.err;
++ }
++
++ return 0;
++}
++
++static int tb_switch_reset_device(struct tb_switch *sw)
++{
++ return tb_port_reset(tb_switch_downstream_port(sw));
++}
++
++static bool tb_switch_enumerated(struct tb_switch *sw)
++{
++ u32 val;
++ int ret;
++
++ /*
++ * Read directly from the hardware because we use this also
++ * during system sleep where sw->config.enabled is already set
++ * by us.
++ */
++ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
++ if (ret)
++ return false;
++
++ return !!(val & ROUTER_CS_3_V);
++}
++
+ /**
+- * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
+- * @sw: Switch to reset
++ * tb_switch_reset() - Perform reset to the router
++ * @sw: Router to reset
+ *
+- * Return: Returns 0 on success or an error code on failure.
++ * Issues reset to the router @sw. Can be used for any router. For host
++ * routers, resets all the downstream ports and cleans up path config
++ * spaces accordingly. For device routers issues downstream port reset
++ * through the parent router, so as side effect there will be unplug
++ * soon after this is finished.
++ *
++ * If the router is not enumerated does nothing.
++ *
++ * Returns %0 on success or negative errno in case of failure.
+ */
+ int tb_switch_reset(struct tb_switch *sw)
+ {
+- struct tb_cfg_result res;
++ int ret;
+
+- if (sw->generation > 1)
++ /*
++ * We cannot access the port config spaces unless the router is
++ * already enumerated. If the router is not enumerated it is
++ * equal to being reset so we can skip that here.
++ */
++ if (!tb_switch_enumerated(sw))
+ return 0;
+
+- tb_sw_dbg(sw, "resetting switch\n");
++ tb_sw_dbg(sw, "resetting\n");
++
++ if (tb_route(sw))
++ ret = tb_switch_reset_device(sw);
++ else
++ ret = tb_switch_reset_host(sw);
++
++ if (ret)
++ tb_sw_warn(sw, "failed to reset\n");
+
+- res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+- TB_CFG_SWITCH, 2, 2);
+- if (res.err)
+- return res.err;
+- res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
+- if (res.err > 0)
+- return -EIO;
+- return res.err;
++ return ret;
+ }
+
+ /**
+@@ -2697,6 +2863,38 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw)
+ return 0;
+ }
+
++/* Must be called after tb_switch_update_link_attributes() */
++static void tb_switch_link_init(struct tb_switch *sw)
++{
++ struct tb_port *up, *down;
++ bool bonded;
++
++ if (!tb_route(sw) || tb_switch_is_icm(sw))
++ return;
++
++ tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
++ tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width));
++
++ bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
++
++ /*
++ * Gen 4 links come up as bonded so update the port structures
++ * accordingly.
++ */
++ up = tb_upstream_port(sw);
++ down = tb_switch_downstream_port(sw);
++
++ up->bonded = bonded;
++ if (up->dual_link_port)
++ up->dual_link_port->bonded = bonded;
++ tb_port_update_credits(up);
++
++ down->bonded = bonded;
++ if (down->dual_link_port)
++ down->dual_link_port->bonded = bonded;
++ tb_port_update_credits(down);
++}
++
+ /**
+ * tb_switch_lane_bonding_enable() - Enable lane bonding
+ * @sw: Switch to enable lane bonding
+@@ -2705,24 +2903,20 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw)
+ * switch. If conditions are correct and both switches support the feature,
+ * lanes are bonded. It is safe to call this to any switch.
+ */
+-int tb_switch_lane_bonding_enable(struct tb_switch *sw)
++static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+ {
+ struct tb_port *up, *down;
+- u64 route = tb_route(sw);
+- unsigned int width_mask;
++ unsigned int width;
+ int ret;
+
+- if (!route)
+- return 0;
+-
+ if (!tb_switch_lane_bonding_possible(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+- if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) ||
+- !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
++ if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) ||
++ !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL))
+ return 0;
+
+ /*
+@@ -2746,21 +2940,10 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+ }
+
+ /* Any of the widths are all bonded */
+- width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
+- TB_LINK_WIDTH_ASYM_RX;
++ width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
++ TB_LINK_WIDTH_ASYM_RX;
+
+- ret = tb_port_wait_for_link_width(down, width_mask, 100);
+- if (ret) {
+- tb_port_warn(down, "timeout enabling lane bonding\n");
+- return ret;
+- }
+-
+- tb_port_update_credits(down);
+- tb_port_update_credits(up);
+- tb_switch_update_link_attributes(sw);
+-
+- tb_sw_dbg(sw, "lane bonding enabled\n");
+- return ret;
++ return tb_port_wait_for_link_width(down, width, 100);
+ }
+
+ /**
+@@ -2770,20 +2953,27 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+ * Disables lane bonding between @sw and parent. This can be called even
+ * if lanes were not bonded originally.
+ */
+-void tb_switch_lane_bonding_disable(struct tb_switch *sw)
++static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
+ {
+ struct tb_port *up, *down;
+ int ret;
+
+- if (!tb_route(sw))
+- return;
+-
+ up = tb_upstream_port(sw);
+ if (!up->bonded)
+- return;
++ return 0;
+
+- down = tb_switch_downstream_port(sw);
++ /*
++ * If the link is Gen 4 there is no way to switch the link to
++ * two single lane links so avoid that here. Also don't bother
++ * if the link is not up anymore (sw is unplugged).
++ */
++ ret = tb_port_get_link_generation(up);
++ if (ret < 0)
++ return ret;
++ if (ret >= 4)
++ return -EOPNOTSUPP;
+
++ down = tb_switch_downstream_port(sw);
+ tb_port_lane_bonding_disable(up);
+ tb_port_lane_bonding_disable(down);
+
+@@ -2791,15 +2981,160 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+ * It is fine if we get other errors as the router might have
+ * been unplugged.
+ */
+- ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
+- if (ret == -ETIMEDOUT)
+- tb_sw_warn(sw, "timeout disabling lane bonding\n");
++ return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
++}
++
++/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
++static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
++{
++ struct tb_port *up, *down, *port;
++ enum tb_link_width down_width;
++ int ret;
++
++ up = tb_upstream_port(sw);
++ down = tb_switch_downstream_port(sw);
++
++ if (width == TB_LINK_WIDTH_ASYM_TX) {
++ down_width = TB_LINK_WIDTH_ASYM_RX;
++ port = down;
++ } else {
++ down_width = TB_LINK_WIDTH_ASYM_TX;
++ port = up;
++ }
++
++ ret = tb_port_set_link_width(up, width);
++ if (ret)
++ return ret;
++
++ ret = tb_port_set_link_width(down, down_width);
++ if (ret)
++ return ret;
++
++ /*
++ * Initiate the change in the router that one of its TX lanes is
++ * changing to RX but do so only if there is an actual change.
++ */
++ if (sw->link_width != width) {
++ ret = usb4_port_asym_start(port);
++ if (ret)
++ return ret;
++
++ ret = tb_port_wait_for_link_width(up, width, 100);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
++static int tb_switch_asym_disable(struct tb_switch *sw)
++{
++ struct tb_port *up, *down;
++ int ret;
++
++ up = tb_upstream_port(sw);
++ down = tb_switch_downstream_port(sw);
++
++ ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL);
++ if (ret)
++ return ret;
++
++ ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL);
++ if (ret)
++ return ret;
++
++ /*
++ * Initiate the change in the router that has three TX lanes and
++ * is changing one of its TX lanes to RX but only if there is a
++ * change in the link width.
++ */
++ if (sw->link_width > TB_LINK_WIDTH_DUAL) {
++ if (sw->link_width == TB_LINK_WIDTH_ASYM_TX)
++ ret = usb4_port_asym_start(up);
++ else
++ ret = usb4_port_asym_start(down);
++ if (ret)
++ return ret;
++
++ ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++/**
++ * tb_switch_set_link_width() - Configure router link width
++ * @sw: Router to configure
++ * @width: The new link width
++ *
++ * Set device router link width to @width from router upstream port
++ * perspective. Supports also asymmetric links if the routers boths side
++ * of the link supports it.
++ *
++ * Does nothing for host router.
++ *
++ * Returns %0 in case of success, negative errno otherwise.
++ */
++int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
++{
++ struct tb_port *up, *down;
++ int ret = 0;
++
++ if (!tb_route(sw))
++ return 0;
++
++ up = tb_upstream_port(sw);
++ down = tb_switch_downstream_port(sw);
++
++ switch (width) {
++ case TB_LINK_WIDTH_SINGLE:
++ ret = tb_switch_lane_bonding_disable(sw);
++ break;
++
++ case TB_LINK_WIDTH_DUAL:
++ if (sw->link_width == TB_LINK_WIDTH_ASYM_TX ||
++ sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
++ ret = tb_switch_asym_disable(sw);
++ if (ret)
++ break;
++ }
++ ret = tb_switch_lane_bonding_enable(sw);
++ break;
++
++ case TB_LINK_WIDTH_ASYM_TX:
++ case TB_LINK_WIDTH_ASYM_RX:
++ ret = tb_switch_asym_enable(sw, width);
++ break;
++ }
++
++ switch (ret) {
++ case 0:
++ break;
++
++ case -ETIMEDOUT:
++ tb_sw_warn(sw, "timeout changing link width\n");
++ return ret;
++
++ case -ENOTCONN:
++ case -EOPNOTSUPP:
++ case -ENODEV:
++ return ret;
++
++ default:
++ tb_sw_dbg(sw, "failed to change link width: %d\n", ret);
++ return ret;
++ }
+
+ tb_port_update_credits(down);
+ tb_port_update_credits(up);
++
+ tb_switch_update_link_attributes(sw);
+
+- tb_sw_dbg(sw, "lane bonding disabled\n");
++ tb_sw_dbg(sw, "link width set to %s\n", width_name(width));
++ return ret;
+ }
+
+ /**
+@@ -2847,22 +3182,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
+ {
+ struct tb_port *up, *down;
+
+- if (sw->is_unplugged)
+- return;
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return;
+
++ /*
++ * Unconfigure downstream port so that wake-on-connect can be
++ * configured after router unplug. No need to unconfigure upstream port
++ * since its router is unplugged.
++ */
+ up = tb_upstream_port(sw);
+- if (tb_switch_is_usb4(up->sw))
+- usb4_port_unconfigure(up);
+- else
+- tb_lc_unconfigure_port(up);
+-
+ down = up->remote;
+ if (tb_switch_is_usb4(down->sw))
+ usb4_port_unconfigure(down);
+ else
+ tb_lc_unconfigure_port(down);
++
++ if (sw->is_unplugged)
++ return;
++
++ up = tb_upstream_port(sw);
++ if (tb_switch_is_usb4(up->sw))
++ usb4_port_unconfigure(up);
++ else
++ tb_lc_unconfigure_port(up);
+ }
+
+ static void tb_switch_credits_init(struct tb_switch *sw)
+@@ -2959,6 +3301,8 @@ int tb_switch_add(struct tb_switch *sw)
+ if (ret)
+ return ret;
+
++ tb_switch_link_init(sw);
++
+ ret = tb_switch_clx_init(sw);
+ if (ret)
+ return ret;
+@@ -3050,6 +3394,7 @@ void tb_switch_remove(struct tb_switch *sw)
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ } else if (port->xdomain) {
++ port->xdomain->is_unplugged = true;
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
+ }
+@@ -3106,7 +3451,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+ return tb_lc_set_wake(sw, flags);
+ }
+
+-int tb_switch_resume(struct tb_switch *sw)
++static void tb_switch_check_wakes(struct tb_switch *sw)
++{
++ if (device_may_wakeup(&sw->dev)) {
++ if (tb_switch_is_usb4(sw))
++ usb4_switch_check_wakes(sw);
++ }
++}
++
++/**
++ * tb_switch_resume() - Resume a switch after sleep
++ * @sw: Switch to resume
++ * @runtime: Is this resume from runtime suspend or system sleep
++ *
++ * Resumes and re-enumerates router (and all its children), if still plugged
++ * after suspend. Don't enumerate device router whose UID was changed during
++ * suspend. If this is resume from system sleep, notifies PM core about the
++ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
++ * upstream port for USB4 routers that shall be always enabled.
++ */
++int tb_switch_resume(struct tb_switch *sw, bool runtime)
+ {
+ struct tb_port *port;
+ int err;
+@@ -3155,6 +3519,9 @@ int tb_switch_resume(struct tb_switch *sw)
+ if (err)
+ return err;
+
++ if (!runtime)
++ tb_switch_check_wakes(sw);
++
+ /* Disable wakes */
+ tb_switch_set_wake(sw, 0);
+
+@@ -3184,7 +3551,8 @@ int tb_switch_resume(struct tb_switch *sw)
+ */
+ if (tb_port_unlock(port))
+ tb_port_warn(port, "failed to unlock port\n");
+- if (port->remote && tb_switch_resume(port->remote->sw)) {
++ if (port->remote &&
++ tb_switch_resume(port->remote->sw, runtime)) {
+ tb_port_warn(port,
+ "lost during suspend, disconnecting\n");
+ tb_sw_set_unplugged(port->remote->sw);
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index 27bd6ca6f99e41..ea155547e8719d 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -16,8 +16,31 @@
+ #include "tb_regs.h"
+ #include "tunnel.h"
+
+-#define TB_TIMEOUT 100 /* ms */
+-#define MAX_GROUPS 7 /* max Group_ID is 7 */
++#define TB_TIMEOUT 100 /* ms */
++
++/*
++ * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
++ * direction. This is 40G - 10% guard band bandwidth.
++ */
++#define TB_ASYM_MIN (40000 * 90 / 100)
++
++/*
++ * Threshold bandwidth (in Mb/s) that is used to switch the links to
++ * asymmetric and back. This is selected as 45G which means when the
++ * request is higher than this, we switch the link to asymmetric, and
++ * when it is less than this we switch it back. The 45G is selected so
++ * that we still have 27G (of the total 72G) for bulk PCIe traffic when
++ * switching back to symmetric.
++ */
++#define TB_ASYM_THRESHOLD 45000
++
++#define MAX_GROUPS 7 /* max Group_ID is 7 */
++
++static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
++module_param_named(asym_threshold, asym_threshold, uint, 0444);
++MODULE_PARM_DESC(asym_threshold,
++ "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
++ __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
+
+ /**
+ * struct tb_cm - Simple Thunderbolt connection manager
+@@ -255,13 +278,13 @@ static int tb_enable_clx(struct tb_switch *sw)
+ * this in the future to cover the whole topology if it turns
+ * out to be beneficial.
+ */
+- while (sw && sw->config.depth > 1)
++ while (sw && tb_switch_depth(sw) > 1)
+ sw = tb_switch_parent(sw);
+
+ if (!sw)
+ return 0;
+
+- if (sw->config.depth != 1)
++ if (tb_switch_depth(sw) != 1)
+ return 0;
+
+ /*
+@@ -285,14 +308,32 @@ static int tb_enable_clx(struct tb_switch *sw)
+ return ret == -EOPNOTSUPP ? 0 : ret;
+ }
+
+-/* Disables CL states up to the host router */
+-static void tb_disable_clx(struct tb_switch *sw)
++/**
++ * tb_disable_clx() - Disable CL states up to host router
++ * @sw: Router to start
++ *
++ * Disables CL states from @sw up to the host router. Returns true if
++ * any CL state were disabled. This can be used to figure out whether
++ * the link was setup by us or the boot firmware so we don't
++ * accidentally enable them if they were not enabled during discovery.
++ */
++static bool tb_disable_clx(struct tb_switch *sw)
+ {
++ bool disabled = false;
++
+ do {
+- if (tb_switch_clx_disable(sw) < 0)
++ int ret;
++
++ ret = tb_switch_clx_disable(sw);
++ if (ret > 0)
++ disabled = true;
++ else if (ret < 0)
+ tb_sw_warn(sw, "failed to disable CL states\n");
++
+ sw = tb_switch_parent(sw);
+ } while (sw);
++
++ return disabled;
+ }
+
+ static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
+@@ -553,7 +594,7 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
+ struct tb_switch *sw;
+
+ /* Pick the router that is deepest in the topology */
+- if (dst_port->sw->config.depth > src_port->sw->config.depth)
++ if (tb_port_path_direction_downstream(src_port, dst_port))
+ sw = dst_port->sw;
+ else
+ sw = src_port->sw;
+@@ -572,133 +613,294 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
+ return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
+ }
+
+-static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
+- struct tb_port *dst_port, int *available_up, int *available_down)
+-{
+- int usb3_consumed_up, usb3_consumed_down, ret;
+- struct tb_cm *tcm = tb_priv(tb);
++/**
++ * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
++ * @tb: Domain structure
++ * @src_port: Source protocol adapter
++ * @dst_port: Destination protocol adapter
++ * @port: USB4 port the consumed bandwidth is calculated
++ * @consumed_up: Consumed upsream bandwidth (Mb/s)
++ * @consumed_down: Consumed downstream bandwidth (Mb/s)
++ *
++ * Calculates consumed USB3 and PCIe bandwidth at @port between path
++ * from @src_port to @dst_port. Does not take tunnel starting from
++ * @src_port and ending from @src_port into account.
++ */
++static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
++ struct tb_port *src_port,
++ struct tb_port *dst_port,
++ struct tb_port *port,
++ int *consumed_up,
++ int *consumed_down)
++{
++ int pci_consumed_up, pci_consumed_down;
+ struct tb_tunnel *tunnel;
+- struct tb_port *port;
+
+- tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
+- tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
+- dst_port->port);
++ *consumed_up = *consumed_down = 0;
+
+ tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
+ if (tunnel && tunnel->src_port != src_port &&
+ tunnel->dst_port != dst_port) {
+- ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
+- &usb3_consumed_down);
++ int ret;
++
++ ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
++ consumed_down);
+ if (ret)
+ return ret;
+- } else {
+- usb3_consumed_up = 0;
+- usb3_consumed_down = 0;
+ }
+
+- /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
+- *available_up = *available_down = 120000;
++ /*
++ * If there is anything reserved for PCIe bulk traffic take it
++ * into account here too.
++ */
++ if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
++ *consumed_up += pci_consumed_up;
++ *consumed_down += pci_consumed_down;
++ }
+
+- /* Find the minimum available bandwidth over all links */
+- tb_for_each_port_on_path(src_port, dst_port, port) {
+- int link_speed, link_width, up_bw, down_bw;
++ return 0;
++}
+
+- if (!tb_port_is_null(port))
++/**
++ * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
++ * @tb: Domain structure
++ * @src_port: Source protocol adapter
++ * @dst_port: Destination protocol adapter
++ * @port: USB4 port the consumed bandwidth is calculated
++ * @consumed_up: Consumed upsream bandwidth (Mb/s)
++ * @consumed_down: Consumed downstream bandwidth (Mb/s)
++ *
++ * Calculates consumed DP bandwidth at @port between path from @src_port
++ * to @dst_port. Does not take tunnel starting from @src_port and ending
++ * from @src_port into account.
++ */
++static int tb_consumed_dp_bandwidth(struct tb *tb,
++ struct tb_port *src_port,
++ struct tb_port *dst_port,
++ struct tb_port *port,
++ int *consumed_up,
++ int *consumed_down)
++{
++ struct tb_cm *tcm = tb_priv(tb);
++ struct tb_tunnel *tunnel;
++ int ret;
++
++ *consumed_up = *consumed_down = 0;
++
++ /*
++ * Find all DP tunnels that cross the port and reduce
++ * their consumed bandwidth from the available.
++ */
++ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
++ int dp_consumed_up, dp_consumed_down;
++
++ if (tb_tunnel_is_invalid(tunnel))
++ continue;
++
++ if (!tb_tunnel_is_dp(tunnel))
++ continue;
++
++ if (!tb_tunnel_port_on_path(tunnel, port))
+ continue;
+
+- if (tb_is_upstream_port(port)) {
+- link_speed = port->sw->link_speed;
++ /*
++ * Ignore the DP tunnel between src_port and dst_port
++ * because it is the same tunnel and we may be
++ * re-calculating estimated bandwidth.
++ */
++ if (tunnel->src_port == src_port &&
++ tunnel->dst_port == dst_port)
++ continue;
++
++ ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
++ &dp_consumed_down);
++ if (ret)
++ return ret;
++
++ *consumed_up += dp_consumed_up;
++ *consumed_down += dp_consumed_down;
++ }
++
++ return 0;
++}
++
++static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
++ struct tb_port *port)
++{
++ bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
++ enum tb_link_width width;
++
++ if (tb_is_upstream_port(port))
++ width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
++ else
++ width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
++
++ return tb_port_width_supported(port, width);
++}
++
++/**
++ * tb_maximum_banwidth() - Maximum bandwidth over a single link
++ * @tb: Domain structure
++ * @src_port: Source protocol adapter
++ * @dst_port: Destination protocol adapter
++ * @port: USB4 port the total bandwidth is calculated
++ * @max_up: Maximum upstream bandwidth (Mb/s)
++ * @max_down: Maximum downstream bandwidth (Mb/s)
++ * @include_asym: Include bandwidth if the link is switched from
++ * symmetric to asymmetric
++ *
++ * Returns maximum possible bandwidth in @max_up and @max_down over a
++ * single link at @port. If @include_asym is set then includes the
++ * additional banwdith if the links are transitioned into asymmetric to
++ * direction from @src_port to @dst_port.
++ */
++static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
++ struct tb_port *dst_port, struct tb_port *port,
++ int *max_up, int *max_down, bool include_asym)
++{
++ bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
++ int link_speed, link_width, up_bw, down_bw;
++
++ /*
++ * Can include asymmetric, only if it is actually supported by
++ * the lane adapter.
++ */
++ if (!tb_asym_supported(src_port, dst_port, port))
++ include_asym = false;
++
++ if (tb_is_upstream_port(port)) {
++ link_speed = port->sw->link_speed;
++ /*
++ * sw->link_width is from upstream perspective so we use
++ * the opposite for downstream of the host router.
++ */
++ if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
++ up_bw = link_speed * 3 * 1000;
++ down_bw = link_speed * 1 * 1000;
++ } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
++ up_bw = link_speed * 1 * 1000;
++ down_bw = link_speed * 3 * 1000;
++ } else if (include_asym) {
+ /*
+- * sw->link_width is from upstream perspective
+- * so we use the opposite for downstream of the
+- * host router.
++ * The link is symmetric at the moment but we
++ * can switch it to asymmetric as needed. Report
++ * this bandwidth as available (even though it
++ * is not yet enabled).
+ */
+- if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
+- up_bw = link_speed * 3 * 1000;
+- down_bw = link_speed * 1 * 1000;
+- } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
++ if (downstream) {
+ up_bw = link_speed * 1 * 1000;
+ down_bw = link_speed * 3 * 1000;
+ } else {
+- up_bw = link_speed * port->sw->link_width * 1000;
+- down_bw = up_bw;
++ up_bw = link_speed * 3 * 1000;
++ down_bw = link_speed * 1 * 1000;
+ }
+ } else {
+- link_speed = tb_port_get_link_speed(port);
+- if (link_speed < 0)
+- return link_speed;
+-
+- link_width = tb_port_get_link_width(port);
+- if (link_width < 0)
+- return link_width;
+-
+- if (link_width == TB_LINK_WIDTH_ASYM_TX) {
++ up_bw = link_speed * port->sw->link_width * 1000;
++ down_bw = up_bw;
++ }
++ } else {
++ link_speed = tb_port_get_link_speed(port);
++ if (link_speed < 0)
++ return link_speed;
++
++ link_width = tb_port_get_link_width(port);
++ if (link_width < 0)
++ return link_width;
++
++ if (link_width == TB_LINK_WIDTH_ASYM_TX) {
++ up_bw = link_speed * 1 * 1000;
++ down_bw = link_speed * 3 * 1000;
++ } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
++ up_bw = link_speed * 3 * 1000;
++ down_bw = link_speed * 1 * 1000;
++ } else if (include_asym) {
++ /*
++ * The link is symmetric at the moment but we
++ * can switch it to asymmetric as needed. Report
++ * this bandwidth as available (even though it
++ * is not yet enabled).
++ */
++ if (downstream) {
+ up_bw = link_speed * 1 * 1000;
+ down_bw = link_speed * 3 * 1000;
+- } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
++ } else {
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
+- } else {
+- up_bw = link_speed * link_width * 1000;
+- down_bw = up_bw;
+ }
++ } else {
++ up_bw = link_speed * link_width * 1000;
++ down_bw = up_bw;
+ }
++ }
+
+- /* Leave 10% guard band */
+- up_bw -= up_bw / 10;
+- down_bw -= down_bw / 10;
+-
+- tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
+- down_bw);
++ /* Leave 10% guard band */
++ *max_up = up_bw - up_bw / 10;
++ *max_down = down_bw - down_bw / 10;
+
+- /*
+- * Find all DP tunnels that cross the port and reduce
+- * their consumed bandwidth from the available.
+- */
+- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+- int dp_consumed_up, dp_consumed_down;
++ tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
++ return 0;
++}
+
+- if (tb_tunnel_is_invalid(tunnel))
+- continue;
++/**
++ * tb_available_bandwidth() - Available bandwidth for tunneling
++ * @tb: Domain structure
++ * @src_port: Source protocol adapter
++ * @dst_port: Destination protocol adapter
++ * @available_up: Available bandwidth upstream (Mb/s)
++ * @available_down: Available bandwidth downstream (Mb/s)
++ * @include_asym: Include bandwidth if the link is switched from
++ * symmetric to asymmetric
++ *
++ * Calculates maximum available bandwidth for protocol tunneling between
++ * @src_port and @dst_port at the moment. This is minimum of maximum
++ * link bandwidth across all links reduced by currently consumed
++ * bandwidth on that link.
++ *
++ * If @include_asym is true then includes also bandwidth that can be
++ * added when the links are transitioned into asymmetric (but does not
++ * transition the links).
++ */
++static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
++ struct tb_port *dst_port, int *available_up,
++ int *available_down, bool include_asym)
++{
++ struct tb_port *port;
++ int ret;
+
+- if (!tb_tunnel_is_dp(tunnel))
+- continue;
++ /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
++ *available_up = *available_down = 120000;
+
+- if (!tb_tunnel_port_on_path(tunnel, port))
+- continue;
++ /* Find the minimum available bandwidth over all links */
++ tb_for_each_port_on_path(src_port, dst_port, port) {
++ int max_up, max_down, consumed_up, consumed_down;
+
+- /*
+- * Ignore the DP tunnel between src_port and
+- * dst_port because it is the same tunnel and we
+- * may be re-calculating estimated bandwidth.
+- */
+- if (tunnel->src_port == src_port &&
+- tunnel->dst_port == dst_port)
+- continue;
++ if (!tb_port_is_null(port))
++ continue;
+
+- ret = tb_tunnel_consumed_bandwidth(tunnel,
+- &dp_consumed_up,
+- &dp_consumed_down);
+- if (ret)
+- return ret;
++ ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
++ &max_up, &max_down, include_asym);
++ if (ret)
++ return ret;
+
+- up_bw -= dp_consumed_up;
+- down_bw -= dp_consumed_down;
+- }
++ ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
++ port, &consumed_up,
++ &consumed_down);
++ if (ret)
++ return ret;
++ max_up -= consumed_up;
++ max_down -= consumed_down;
+
+- /*
+- * If USB3 is tunneled from the host router down to the
+- * branch leading to port we need to take USB3 consumed
+- * bandwidth into account regardless whether it actually
+- * crosses the port.
+- */
+- up_bw -= usb3_consumed_up;
+- down_bw -= usb3_consumed_down;
++ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
++ &consumed_up, &consumed_down);
++ if (ret)
++ return ret;
++ max_up -= consumed_up;
++ max_down -= consumed_down;
+
+- if (up_bw < *available_up)
+- *available_up = up_bw;
+- if (down_bw < *available_down)
+- *available_down = down_bw;
++ if (max_up < *available_up)
++ *available_up = max_up;
++ if (max_down < *available_down)
++ *available_down = max_down;
+ }
+
+ if (*available_up < 0)
+@@ -736,7 +938,7 @@ static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
+ * That determines the whole USB3 bandwidth for this branch.
+ */
+ ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
+- &available_up, &available_down);
++ &available_up, &available_down, false);
+ if (ret) {
+ tb_warn(tb, "failed to calculate available bandwidth\n");
+ return;
+@@ -794,8 +996,8 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
+ return ret;
+ }
+
+- ret = tb_available_bandwidth(tb, down, up, &available_up,
+- &available_down);
++ ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
++ false);
+ if (ret)
+ goto err_reclaim;
+
+@@ -856,6 +1058,225 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
+ return 0;
+ }
+
++/**
++ * tb_configure_asym() - Transition links to asymmetric if needed
++ * @tb: Domain structure
++ * @src_port: Source adapter to start the transition
++ * @dst_port: Destination adapter
++ * @requested_up: Additional bandwidth (Mb/s) required upstream
++ * @requested_down: Additional bandwidth (Mb/s) required downstream
++ *
++ * Transition links between @src_port and @dst_port into asymmetric, with
++ * three lanes in the direction from @src_port towards @dst_port and one lane
++ * in the opposite direction, if the bandwidth requirements
++ * (requested + currently consumed) on that link exceed @asym_threshold.
++ *
++ * Must be called with available >= requested over all links.
++ */
++static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
++ struct tb_port *dst_port, int requested_up,
++ int requested_down)
++{
++ struct tb_switch *sw;
++ bool clx, downstream;
++ struct tb_port *up;
++ int ret = 0;
++
++ if (!asym_threshold)
++ return 0;
++
++ /* Disable CL states before doing any transitions */
++ downstream = tb_port_path_direction_downstream(src_port, dst_port);
++ /* Pick up router deepest in the hierarchy */
++ if (downstream)
++ sw = dst_port->sw;
++ else
++ sw = src_port->sw;
++
++ clx = tb_disable_clx(sw);
++
++ tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
++ int consumed_up, consumed_down;
++ enum tb_link_width width;
++
++ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
++ &consumed_up, &consumed_down);
++ if (ret)
++ break;
++
++ if (downstream) {
++ /*
++ * Downstream so make sure upstream is within the 36G
++ * (40G - guard band 10%), and the requested is above
++ * what the threshold is.
++ */
++ if (consumed_up + requested_up >= TB_ASYM_MIN) {
++ ret = -ENOBUFS;
++ break;
++ }
++ /* Does consumed + requested exceed the threshold */
++ if (consumed_down + requested_down < asym_threshold)
++ continue;
++
++ width = TB_LINK_WIDTH_ASYM_RX;
++ } else {
++ /* Upstream, the opposite of above */
++ if (consumed_down + requested_down >= TB_ASYM_MIN) {
++ ret = -ENOBUFS;
++ break;
++ }
++ if (consumed_up + requested_up < asym_threshold)
++ continue;
++
++ width = TB_LINK_WIDTH_ASYM_TX;
++ }
++
++ if (up->sw->link_width == width)
++ continue;
++
++ if (!tb_port_width_supported(up, width))
++ continue;
++
++ tb_sw_dbg(up->sw, "configuring asymmetric link\n");
++
++ /*
++ * Here requested + consumed > threshold so we need to
++ * transtion the link into asymmetric now.
++ */
++ ret = tb_switch_set_link_width(up->sw, width);
++ if (ret) {
++ tb_sw_warn(up->sw, "failed to set link width\n");
++ break;
++ }
++ }
++
++ /* Re-enable CL states if they were previosly enabled */
++ if (clx)
++ tb_enable_clx(sw);
++
++ return ret;
++}
++
++/**
++ * tb_configure_sym() - Transition links to symmetric if possible
++ * @tb: Domain structure
++ * @src_port: Source adapter to start the transition
++ * @dst_port: Destination adapter
++ * @requested_up: New lower bandwidth request upstream (Mb/s)
++ * @requested_down: New lower bandwidth request downstream (Mb/s)
++ *
++ * Goes over each link from @src_port to @dst_port and tries to
++ * transition the link to symmetric if the currently consumed bandwidth
++ * allows.
++ */
++static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
++ struct tb_port *dst_port, int requested_up,
++ int requested_down)
++{
++ struct tb_switch *sw;
++ bool clx, downstream;
++ struct tb_port *up;
++ int ret = 0;
++
++ if (!asym_threshold)
++ return 0;
++
++ /* Disable CL states before doing any transitions */
++ downstream = tb_port_path_direction_downstream(src_port, dst_port);
++ /* Pick up router deepest in the hierarchy */
++ if (downstream)
++ sw = dst_port->sw;
++ else
++ sw = src_port->sw;
++
++ clx = tb_disable_clx(sw);
++
++ tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
++ int consumed_up, consumed_down;
++
++ /* Already symmetric */
++ if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
++ continue;
++ /* Unplugged, no need to switch */
++ if (up->sw->is_unplugged)
++ continue;
++
++ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
++ &consumed_up, &consumed_down);
++ if (ret)
++ break;
++
++ if (downstream) {
++ /*
++ * Downstream so we want the consumed_down < threshold.
++ * Upstream traffic should be less than 36G (40G
++ * guard band 10%) as the link was configured asymmetric
++ * already.
++ */
++ if (consumed_down + requested_down >= asym_threshold)
++ continue;
++ } else {
++ if (consumed_up + requested_up >= asym_threshold)
++ continue;
++ }
++
++ if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
++ continue;
++
++ tb_sw_dbg(up->sw, "configuring symmetric link\n");
++
++ ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
++ if (ret) {
++ tb_sw_warn(up->sw, "failed to set link width\n");
++ break;
++ }
++ }
++
++ /* Re-enable CL states if they were previosly enabled */
++ if (clx)
++ tb_enable_clx(sw);
++
++ return ret;
++}
++
++static void tb_configure_link(struct tb_port *down, struct tb_port *up,
++ struct tb_switch *sw)
++{
++ struct tb *tb = sw->tb;
++
++ /* Link the routers using both links if available */
++ down->remote = up;
++ up->remote = down;
++ if (down->dual_link_port && up->dual_link_port) {
++ down->dual_link_port->remote = up->dual_link_port;
++ up->dual_link_port->remote = down->dual_link_port;
++ }
++
++ /*
++ * Enable lane bonding if the link is currently two single lane
++ * links.
++ */
++ if (sw->link_width < TB_LINK_WIDTH_DUAL)
++ tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
++
++ /*
++ * Device router that comes up as symmetric link is
++ * connected deeper in the hierarchy, we transition the links
++ * above into symmetric if bandwidth allows.
++ */
++ if (tb_switch_depth(sw) > 1 &&
++ tb_port_get_link_generation(up) >= 4 &&
++ up->sw->link_width == TB_LINK_WIDTH_DUAL) {
++ struct tb_port *host_port;
++
++ host_port = tb_port_at(tb_route(sw), tb->root_switch);
++ tb_configure_sym(tb, host_port, up, 0, 0);
++ }
++
++ /* Set the link configured */
++ tb_switch_configure_link(sw);
++}
++
+ static void tb_scan_port(struct tb_port *port);
+
+ /*
+@@ -964,19 +1385,9 @@ static void tb_scan_port(struct tb_port *port)
+ goto out_rpm_put;
+ }
+
+- /* Link the switches using both links if available */
+ upstream_port = tb_upstream_port(sw);
+- port->remote = upstream_port;
+- upstream_port->remote = port;
+- if (port->dual_link_port && upstream_port->dual_link_port) {
+- port->dual_link_port->remote = upstream_port->dual_link_port;
+- upstream_port->dual_link_port->remote = port->dual_link_port;
+- }
++ tb_configure_link(port, upstream_port, sw);
+
+- /* Enable lane bonding if supported */
+- tb_switch_lane_bonding_enable(sw);
+- /* Set the link configured */
+- tb_switch_configure_link(sw);
+ /*
+ * CL0s and CL1 are enabled and supported together.
+ * Silently ignore CLx enabling in case CLx is not supported.
+@@ -1040,6 +1451,11 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
+ * deallocated properly.
+ */
+ tb_switch_dealloc_dp_resource(src_port->sw, src_port);
++ /*
++ * If bandwidth on a link is < asym_threshold
++ * transition the link to symmetric.
++ */
++ tb_configure_sym(tb, src_port, dst_port, 0, 0);
+ /* Now we can allow the domain to runtime suspend again */
+ pm_runtime_mark_last_busy(&dst_port->sw->dev);
+ pm_runtime_put_autosuspend(&dst_port->sw->dev);
+@@ -1092,7 +1508,8 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
+ tb_retimer_remove_all(port);
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_unconfigure_link(port->remote->sw);
+- tb_switch_lane_bonding_disable(port->remote->sw);
++ tb_switch_set_link_width(port->remote->sw,
++ TB_LINK_WIDTH_SINGLE);
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ if (port->dual_link_port)
+@@ -1196,7 +1613,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+- &estimated_down);
++ &estimated_down, true);
+ if (ret) {
+ tb_port_warn(in,
+ "failed to re-calculate estimated bandwidth\n");
+@@ -1212,7 +1629,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+ tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
+ estimated_up, estimated_down);
+
+- if (in->sw->config.depth < out->sw->config.depth)
++ if (tb_port_path_direction_downstream(in, out))
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+@@ -1282,53 +1699,14 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
+ return NULL;
+ }
+
+-static void tb_tunnel_dp(struct tb *tb)
++static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
++ struct tb_port *out)
+ {
+ int available_up, available_down, ret, link_nr;
+ struct tb_cm *tcm = tb_priv(tb);
+- struct tb_port *port, *in, *out;
++ int consumed_up, consumed_down;
+ struct tb_tunnel *tunnel;
+
+- if (!tb_acpi_may_tunnel_dp()) {
+- tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
+- return;
+- }
+-
+- /*
+- * Find pair of inactive DP IN and DP OUT adapters and then
+- * establish a DP tunnel between them.
+- */
+- tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+-
+- in = NULL;
+- out = NULL;
+- list_for_each_entry(port, &tcm->dp_resources, list) {
+- if (!tb_port_is_dpin(port))
+- continue;
+-
+- if (tb_port_is_enabled(port)) {
+- tb_port_dbg(port, "DP IN in use\n");
+- continue;
+- }
+-
+- tb_port_dbg(port, "DP IN available\n");
+-
+- out = tb_find_dp_out(tb, port);
+- if (out) {
+- in = port;
+- break;
+- }
+- }
+-
+- if (!in) {
+- tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+- return;
+- }
+- if (!out) {
+- tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+- return;
+- }
+-
+ /*
+ * This is only applicable to links that are not bonded (so
+ * when Thunderbolt 1 hardware is involved somewhere in the
+@@ -1369,7 +1747,8 @@ static void tb_tunnel_dp(struct tb *tb)
+ goto err_detach_group;
+ }
+
+- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
++ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
++ true);
+ if (ret)
+ goto err_reclaim_usb;
+
+@@ -1388,8 +1767,19 @@ static void tb_tunnel_dp(struct tb *tb)
+ goto err_free;
+ }
+
++ /* If fail reading tunnel's consumed bandwidth, tear it down */
++ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
++ if (ret)
++ goto err_deactivate;
++
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
++
+ tb_reclaim_usb3_bandwidth(tb, in, out);
++ /*
++ * Transition the links to asymmetric if the consumption exceeds
++ * the threshold.
++ */
++ tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+
+ /* Update the domain with the new bandwidth estimation */
+ tb_recalc_estimated_bandwidth(tb);
+@@ -1399,8 +1789,10 @@ static void tb_tunnel_dp(struct tb *tb)
+ * TMU mode to HiFi for CL0s to work.
+ */
+ tb_increase_tmu_accuracy(tunnel);
+- return;
++ return true;
+
++err_deactivate:
++ tb_tunnel_deactivate(tunnel);
+ err_free:
+ tb_tunnel_free(tunnel);
+ err_reclaim_usb:
+@@ -1414,6 +1806,92 @@ static void tb_tunnel_dp(struct tb *tb)
+ pm_runtime_put_autosuspend(&out->sw->dev);
+ pm_runtime_mark_last_busy(&in->sw->dev);
+ pm_runtime_put_autosuspend(&in->sw->dev);
++
++ return false;
++}
++
++static void tb_tunnel_dp(struct tb *tb)
++{
++ struct tb_cm *tcm = tb_priv(tb);
++ struct tb_port *port, *in, *out;
++
++ if (!tb_acpi_may_tunnel_dp()) {
++ tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
++ return;
++ }
++
++ /*
++ * Find pair of inactive DP IN and DP OUT adapters and then
++ * establish a DP tunnel between them.
++ */
++ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
++
++ in = NULL;
++ out = NULL;
++ list_for_each_entry(port, &tcm->dp_resources, list) {
++ if (!tb_port_is_dpin(port))
++ continue;
++
++ if (tb_port_is_enabled(port)) {
++ tb_port_dbg(port, "DP IN in use\n");
++ continue;
++ }
++
++ in = port;
++ tb_port_dbg(in, "DP IN available\n");
++
++ out = tb_find_dp_out(tb, port);
++ if (out)
++ tb_tunnel_one_dp(tb, in, out);
++ else
++ tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
++ }
++
++ if (!in)
++ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
++}
++
++static void tb_enter_redrive(struct tb_port *port)
++{
++ struct tb_switch *sw = port->sw;
++
++ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
++ return;
++
++ /*
++ * If we get hot-unplug for the DP IN port of the host router
++ * and the DP resource is not available anymore it means there
++ * is a monitor connected directly to the Type-C port and we are
++ * in "redrive" mode. For this to work we cannot enter RTD3 so
++ * we bump up the runtime PM reference count here.
++ */
++ if (!tb_port_is_dpin(port))
++ return;
++ if (tb_route(sw))
++ return;
++ if (!tb_switch_query_dp_resource(sw, port)) {
++ port->redrive = true;
++ pm_runtime_get(&sw->dev);
++ tb_port_dbg(port, "enter redrive mode, keeping powered\n");
++ }
++}
++
++static void tb_exit_redrive(struct tb_port *port)
++{
++ struct tb_switch *sw = port->sw;
++
++ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
++ return;
++
++ if (!tb_port_is_dpin(port))
++ return;
++ if (tb_route(sw))
++ return;
++ if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
++ port->redrive = false;
++ pm_runtime_put(&sw->dev);
++ tb_port_dbg(port, "exit redrive mode\n");
++ }
+ }
+
+ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+@@ -1432,7 +1910,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+ }
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
+- tb_deactivate_and_free_tunnel(tunnel);
++ if (tunnel)
++ tb_deactivate_and_free_tunnel(tunnel);
++ else
++ tb_enter_redrive(port);
+ list_del_init(&port->list);
+
+ /*
+@@ -1459,6 +1940,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
+ tb_port_dbg(port, "DP %s resource available\n",
+ tb_port_is_dpin(port) ? "IN" : "OUT");
+ list_add_tail(&port->list, &tcm->dp_resources);
++ tb_exit_redrive(port);
+
+ /* Look for suitable DP IN <-> DP OUT pairs now */
+ tb_tunnel_dp(tb);
+@@ -1701,7 +2183,8 @@ static void tb_handle_hotplug(struct work_struct *work)
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_tmu_disable(port->remote->sw);
+ tb_switch_unconfigure_link(port->remote->sw);
+- tb_switch_lane_bonding_disable(port->remote->sw);
++ tb_switch_set_link_width(port->remote->sw,
++ TB_LINK_WIDTH_SINGLE);
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ if (port->dual_link_port)
+@@ -1836,6 +2319,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
+
+ if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
+ (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
++ /*
++ * If bandwidth on a link is < asym_threshold transition
++ * the link to symmetric.
++ */
++ tb_configure_sym(tb, in, out, *requested_up, *requested_down);
+ /*
+ * If requested bandwidth is less or equal than what is
+ * currently allocated to that tunnel we simply change
+@@ -1861,7 +2349,8 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
+ * are also in the same group but we use the same function here
+ * that we use with the normal bandwidth allocation).
+ */
+- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
++ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
++ true);
+ if (ret)
+ goto reclaim;
+
+@@ -1870,8 +2359,23 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
+
+ if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
+ (*requested_down >= 0 && available_down >= requested_down_corrected)) {
++ /*
++ * If bandwidth on a link is >= asym_threshold
++ * transition the link to asymmetric.
++ */
++ ret = tb_configure_asym(tb, in, out, *requested_up,
++ *requested_down);
++ if (ret) {
++ tb_configure_sym(tb, in, out, 0, 0);
++ return ret;
++ }
++
+ ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
+ requested_down);
++ if (ret) {
++ tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
++ tb_configure_sym(tb, in, out, 0, 0);
++ }
+ } else {
+ ret = -ENOBUFS;
+ }
+@@ -1937,7 +2441,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
+
+ out = tunnel->dst_port;
+
+- if (in->sw->config.depth < out->sw->config.depth) {
++ if (tb_port_path_direction_downstream(in, out)) {
+ requested_up = -1;
+ requested_down = requested_bw;
+ } else {
+@@ -2084,7 +2588,7 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
+ return 0;
+ }
+
+-static int tb_start(struct tb *tb)
++static int tb_start(struct tb *tb, bool reset)
+ {
+ struct tb_cm *tcm = tb_priv(tb);
+ int ret;
+@@ -2125,12 +2629,24 @@ static int tb_start(struct tb *tb)
+ tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
+ /* Enable TMU if it is off */
+ tb_switch_tmu_enable(tb->root_switch);
+- /* Full scan to discover devices added before the driver was loaded. */
+- tb_scan_switch(tb->root_switch);
+- /* Find out tunnels created by the boot firmware */
+- tb_discover_tunnels(tb);
+- /* Add DP resources from the DP tunnels created by the boot firmware */
+- tb_discover_dp_resources(tb);
++
++ /*
++ * Boot firmware might have created tunnels of its own. Since we
++ * cannot be sure they are usable for us, tear them down and
++ * reset the ports to handle it as new hotplug for USB4 v1
++ * routers (for USB4 v2 and beyond we already do host reset).
++ */
++ if (reset && usb4_switch_version(tb->root_switch) == 1) {
++ tb_switch_reset(tb->root_switch);
++ } else {
++ /* Full scan to discover devices added before the driver was loaded. */
++ tb_scan_switch(tb->root_switch);
++ /* Find out tunnels created by the boot firmware */
++ tb_discover_tunnels(tb);
++ /* Add DP resources from the DP tunnels created by the boot firmware */
++ tb_discover_dp_resources(tb);
++ }
++
+ /*
+ * If the boot firmware did not create USB 3.x tunnels create them
+ * now for the whole topology.
+@@ -2181,7 +2697,8 @@ static void tb_restore_children(struct tb_switch *sw)
+ continue;
+
+ if (port->remote) {
+- tb_switch_lane_bonding_enable(port->remote->sw);
++ tb_switch_set_link_width(port->remote->sw,
++ port->remote->sw->link_width);
+ tb_switch_configure_link(port->remote->sw);
+
+ tb_restore_children(port->remote->sw);
+@@ -2200,10 +2717,14 @@ static int tb_resume_noirq(struct tb *tb)
+
+ tb_dbg(tb, "resuming...\n");
+
+- /* remove any pci devices the firmware might have setup */
+- tb_switch_reset(tb->root_switch);
++ /*
++ * For non-USB4 hosts (Apple systems) remove any PCIe devices
++ * the firmware might have setup.
++ */
++ if (!tb_switch_is_usb4(tb->root_switch))
++ tb_switch_reset(tb->root_switch);
+
+- tb_switch_resume(tb->root_switch);
++ tb_switch_resume(tb->root_switch, false);
+ tb_free_invalid_tunnels(tb);
+ tb_free_unplugged_children(tb->root_switch);
+ tb_restore_children(tb->root_switch);
+@@ -2329,7 +2850,7 @@ static int tb_runtime_resume(struct tb *tb)
+ struct tb_tunnel *tunnel, *n;
+
+ mutex_lock(&tb->lock);
+- tb_switch_resume(tb->root_switch);
++ tb_switch_resume(tb->root_switch, true);
+ tb_free_invalid_tunnels(tb);
+ tb_restore_children(tb->root_switch);
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index d2a55ad2fd3e69..920dac8a63e1df 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -23,6 +23,8 @@
+ #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
+ /* Disable CLx if not supported */
+ #define QUIRK_NO_CLX BIT(1)
++/* Need to keep power on while USB4 port is in redrive mode */
++#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
+
+ /**
+ * struct tb_nvm - Structure holding NVM information
+@@ -162,11 +164,6 @@ struct tb_switch_tmu {
+ * switches) you need to have domain lock held.
+ *
+ * In USB4 terminology this structure represents a router.
+- *
+- * Note @link_width is not the same as whether link is bonded or not.
+- * For Gen 4 links the link is also bonded when it is asymmetric. The
+- * correct way to find out whether the link is bonded or not is to look
+- * @bonded field of the upstream port.
+ */
+ struct tb_switch {
+ struct device dev;
+@@ -261,6 +258,7 @@ struct tb_bandwidth_group {
+ * @group_list: The adapter is linked to the group's list of ports through this
+ * @max_bw: Maximum possible bandwidth through this adapter if set to
+ * non-zero.
++ * @redrive: For DP IN, if true the adapter is in redrive mode.
+ *
+ * In USB4 terminology this structure represents an adapter (protocol or
+ * lane adapter).
+@@ -289,6 +287,7 @@ struct tb_port {
+ struct tb_bandwidth_group *group;
+ struct list_head group_list;
+ unsigned int max_bw;
++ bool redrive;
+ };
+
+ /**
+@@ -484,7 +483,7 @@ struct tb_path {
+ */
+ struct tb_cm_ops {
+ int (*driver_ready)(struct tb *tb);
+- int (*start)(struct tb *tb);
++ int (*start)(struct tb *tb, bool reset);
+ void (*stop)(struct tb *tb);
+ int (*suspend_noirq)(struct tb *tb);
+ int (*resume_noirq)(struct tb *tb);
+@@ -731,7 +730,7 @@ int tb_xdomain_init(void);
+ void tb_xdomain_exit(void);
+
+ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
+-int tb_domain_add(struct tb *tb);
++int tb_domain_add(struct tb *tb, bool reset);
+ void tb_domain_remove(struct tb *tb);
+ int tb_domain_suspend_noirq(struct tb *tb);
+ int tb_domain_resume_noirq(struct tb *tb);
+@@ -798,7 +797,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
+ int tb_switch_add(struct tb_switch *sw);
+ void tb_switch_remove(struct tb_switch *sw);
+ void tb_switch_suspend(struct tb_switch *sw, bool runtime);
+-int tb_switch_resume(struct tb_switch *sw);
++int tb_switch_resume(struct tb_switch *sw, bool runtime);
+ int tb_switch_reset(struct tb_switch *sw);
+ int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec);
+@@ -864,6 +863,15 @@ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
+ return tb_port_at(tb_route(sw), tb_switch_parent(sw));
+ }
+
++/**
++ * tb_switch_depth() - Returns depth of the connected router
++ * @sw: Router
++ */
++static inline int tb_switch_depth(const struct tb_switch *sw)
++{
++ return sw->config.depth;
++}
++
+ static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
+ {
+ return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
+@@ -956,8 +964,7 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw)
+ return !sw->config.enabled;
+ }
+
+-int tb_switch_lane_bonding_enable(struct tb_switch *sw);
+-void tb_switch_lane_bonding_disable(struct tb_switch *sw);
++int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width);
+ int tb_switch_configure_link(struct tb_switch *sw);
+ void tb_switch_unconfigure_link(struct tb_switch *sw);
+
+@@ -1040,6 +1047,21 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid);
+ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
+ struct tb_port *prev);
+
++/**
++ * tb_port_path_direction_downstream() - Checks if path directed downstream
++ * @src: Source adapter
++ * @dst: Destination adapter
++ *
++ * Returns %true only if the specified path from source adapter (@src)
++ * to destination adapter (@dst) is directed downstream.
++ */
++static inline bool
++tb_port_path_direction_downstream(const struct tb_port *src,
++ const struct tb_port *dst)
++{
++ return src->sw->config.depth < dst->sw->config.depth;
++}
++
+ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
+ {
+ return tb_port_is_null(port) && port->sw->credit_allocation;
+@@ -1057,12 +1079,29 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
+ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
+ (p) = tb_next_port_on_path((src), (dst), (p)))
+
++/**
++ * tb_for_each_upstream_port_on_path() - Iterate over each upstreamm port on path
++ * @src: Source port
++ * @dst: Destination port
++ * @p: Port used as iterator
++ *
++ * Walks over each upstream lane adapter on path from @src to @dst.
++ */
++#define tb_for_each_upstream_port_on_path(src, dst, p) \
++ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
++ (p) = tb_next_port_on_path((src), (dst), (p))) \
++ if (!tb_port_is_null((p)) || !tb_is_upstream_port((p))) {\
++ continue; \
++ } else
++
+ int tb_port_get_link_speed(struct tb_port *port);
++int tb_port_get_link_generation(struct tb_port *port);
+ int tb_port_get_link_width(struct tb_port *port);
++bool tb_port_width_supported(struct tb_port *port, unsigned int width);
+ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width);
+ int tb_port_lane_bonding_enable(struct tb_port *port);
+ void tb_port_lane_bonding_disable(struct tb_port *port);
+-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
++int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
+ int timeout_msec);
+ int tb_port_update_credits(struct tb_port *port);
+
+@@ -1096,6 +1135,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
+ void tb_path_free(struct tb_path *path);
+ int tb_path_activate(struct tb_path *path);
+ void tb_path_deactivate(struct tb_path *path);
++int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
+ bool tb_path_is_invalid(struct tb_path *path);
+ bool tb_path_port_on_path(const struct tb_path *path,
+ const struct tb_port *port);
+@@ -1115,6 +1155,7 @@ int tb_drom_read(struct tb_switch *sw);
+ int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
+
+ int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
++int tb_lc_reset_port(struct tb_port *port);
+ int tb_lc_configure_port(struct tb_port *port);
+ void tb_lc_unconfigure_port(struct tb_port *port);
+ int tb_lc_configure_xdomain(struct tb_port *port);
+@@ -1218,6 +1259,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
+ return usb4_switch_version(sw) > 0;
+ }
+
++void usb4_switch_check_wakes(struct tb_switch *sw);
+ int usb4_switch_setup(struct tb_switch *sw);
+ int usb4_switch_configuration_valid(struct tb_switch *sw);
+ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+@@ -1247,6 +1289,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
+
+ int usb4_port_unlock(struct tb_port *port);
+ int usb4_port_hotplug_enable(struct tb_port *port);
++int usb4_port_reset(struct tb_port *port);
+ int usb4_port_configure(struct tb_port *port);
+ void usb4_port_unconfigure(struct tb_port *port);
+ int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
+@@ -1256,6 +1299,11 @@ int usb4_port_router_online(struct tb_port *port);
+ int usb4_port_enumerate_retimers(struct tb_port *port);
+ bool usb4_port_clx_supported(struct tb_port *port);
+ int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
++
++bool usb4_port_asym_supported(struct tb_port *port);
++int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width);
++int usb4_port_asym_start(struct tb_port *port);
++
+ int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
+ unsigned int ber_level, bool timing, bool right_high,
+ u32 *results);
+diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
+index cf9f2370878a8e..4419e274d2b4c6 100644
+--- a/drivers/thunderbolt/tb_regs.h
++++ b/drivers/thunderbolt/tb_regs.h
+@@ -194,6 +194,8 @@ struct tb_regs_switch_header {
+ #define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
+
+ #define ROUTER_CS_1 0x01
++#define ROUTER_CS_3 0x03
++#define ROUTER_CS_3_V BIT(31)
+ #define ROUTER_CS_4 0x04
+ /* Used with the router cmuv field */
+ #define ROUTER_CS_4_CMUV_V1 0x10
+@@ -203,7 +205,7 @@ struct tb_regs_switch_header {
+ #define ROUTER_CS_5_WOP BIT(1)
+ #define ROUTER_CS_5_WOU BIT(2)
+ #define ROUTER_CS_5_WOD BIT(3)
+-#define ROUTER_CS_5_C3S BIT(23)
++#define ROUTER_CS_5_CNS BIT(23)
+ #define ROUTER_CS_5_PTO BIT(24)
+ #define ROUTER_CS_5_UTO BIT(25)
+ #define ROUTER_CS_5_HCO BIT(26)
+@@ -346,10 +348,14 @@ struct tb_regs_port_header {
+ #define LANE_ADP_CS_1 0x01
+ #define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
+ #define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
+-#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
++#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(5, 4)
+ #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
+ #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
+ #define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK GENMASK(7, 6)
++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX 0x1
++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX 0x2
++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL 0x0
+ #define LANE_ADP_CS_1_CL0S_ENABLE BIT(10)
+ #define LANE_ADP_CS_1_CL1_ENABLE BIT(11)
+ #define LANE_ADP_CS_1_CL2_ENABLE BIT(12)
+@@ -382,12 +388,16 @@ struct tb_regs_port_header {
+ #define PORT_CS_18_WOCS BIT(16)
+ #define PORT_CS_18_WODS BIT(17)
+ #define PORT_CS_18_WOU4S BIT(18)
++#define PORT_CS_18_CSA BIT(22)
++#define PORT_CS_18_TIP BIT(24)
+ #define PORT_CS_19 0x13
++#define PORT_CS_19_DPR BIT(0)
+ #define PORT_CS_19_PC BIT(3)
+ #define PORT_CS_19_PID BIT(4)
+ #define PORT_CS_19_WOC BIT(16)
+ #define PORT_CS_19_WOD BIT(17)
+ #define PORT_CS_19_WOU4 BIT(18)
++#define PORT_CS_19_START_ASYM BIT(24)
+
+ /* Display Port adapter registers */
+ #define ADP_DP_CS_0 0x00
+@@ -579,6 +589,9 @@ struct tb_regs_hop {
+ #define TB_LC_POWER 0x740
+
+ /* Link controller registers */
++#define TB_LC_PORT_MODE 0x26
++#define TB_LC_PORT_MODE_DPR BIT(0)
++
+ #define TB_LC_CS_42 0x2a
+ #define TB_LC_CS_42_USB_PLUGGED BIT(31)
+
+diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
+index a6810fb368600f..8aec678d80d357 100644
+--- a/drivers/thunderbolt/tunnel.c
++++ b/drivers/thunderbolt/tunnel.c
+@@ -21,12 +21,18 @@
+ #define TB_PCI_PATH_DOWN 0
+ #define TB_PCI_PATH_UP 1
+
++#define TB_PCI_PRIORITY 3
++#define TB_PCI_WEIGHT 1
++
+ /* USB3 adapters use always HopID of 8 for both directions */
+ #define TB_USB3_HOPID 8
+
+ #define TB_USB3_PATH_DOWN 0
+ #define TB_USB3_PATH_UP 1
+
++#define TB_USB3_PRIORITY 3
++#define TB_USB3_WEIGHT 2
++
+ /* DP adapters use HopID 8 for AUX and 9 for Video */
+ #define TB_DP_AUX_TX_HOPID 8
+ #define TB_DP_AUX_RX_HOPID 8
+@@ -36,6 +42,12 @@
+ #define TB_DP_AUX_PATH_OUT 1
+ #define TB_DP_AUX_PATH_IN 2
+
++#define TB_DP_VIDEO_PRIORITY 1
++#define TB_DP_VIDEO_WEIGHT 1
++
++#define TB_DP_AUX_PRIORITY 2
++#define TB_DP_AUX_WEIGHT 1
++
+ /* Minimum number of credits needed for PCIe path */
+ #define TB_MIN_PCIE_CREDITS 6U
+ /*
+@@ -46,6 +58,18 @@
+ /* Minimum number of credits for DMA path */
+ #define TB_MIN_DMA_CREDITS 1
+
++#define TB_DMA_PRIORITY 5
++#define TB_DMA_WEIGHT 1
++
++/*
++ * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
++ * according to USB4 v2 Connection Manager guide. This ends up reserving
++ * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
++ * account.
++ */
++#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
++#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
++
+ static unsigned int dma_credits = TB_DMA_CREDITS;
+ module_param(dma_credits, uint, 0444);
+ MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
+@@ -58,27 +82,6 @@ MODULE_PARM_DESC(bw_alloc_mode,
+
+ static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+
+-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
+- do { \
+- struct tb_tunnel *__tunnel = (tunnel); \
+- level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
+- tb_route(__tunnel->src_port->sw), \
+- __tunnel->src_port->port, \
+- tb_route(__tunnel->dst_port->sw), \
+- __tunnel->dst_port->port, \
+- tb_tunnel_names[__tunnel->type], \
+- ## arg); \
+- } while (0)
+-
+-#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+- __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+-#define tb_tunnel_warn(tunnel, fmt, arg...) \
+- __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+-#define tb_tunnel_info(tunnel, fmt, arg...) \
+- __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+-#define tb_tunnel_dbg(tunnel, fmt, arg...) \
+- __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
+-
+ static inline unsigned int tb_usable_credits(const struct tb_port *port)
+ {
+ return port->total_credits - port->ctl_credits;
+@@ -156,11 +159,11 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
+
+ static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
+ {
++ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
+ int ret;
+
+ /* Only supported of both routers are at least USB4 v2 */
+- if (usb4_switch_version(tunnel->src_port->sw) < 2 ||
+- usb4_switch_version(tunnel->dst_port->sw) < 2)
++ if (tb_port_get_link_generation(port) < 4)
+ return 0;
+
+ ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
+@@ -234,8 +237,8 @@ static int tb_pci_init_path(struct tb_path *path)
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+- path->priority = 3;
+- path->weight = 1;
++ path->priority = TB_PCI_PRIORITY;
++ path->weight = TB_PCI_WEIGHT;
+ path->drop_packages = 0;
+
+ tb_path_for_each_hop(path, hop) {
+@@ -376,6 +379,51 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+ return NULL;
+ }
+
++/**
++ * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
++ * @port: Lane 0 adapter
++ * @reserved_up: Upstream bandwidth in Mb/s to reserve
++ * @reserved_down: Downstream bandwidth in Mb/s to reserve
++ *
++ * Can be called to any connected lane 0 adapter to find out how much
++ * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
++ * Returns true if there is something to be reserved and writes the
++ * amount to @reserved_down/@reserved_up. Otherwise returns false and
++ * does not touch the parameters.
++ */
++bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
++ int *reserved_down)
++{
++ if (WARN_ON_ONCE(!port->remote))
++ return false;
++
++ if (!tb_acpi_may_tunnel_pcie())
++ return false;
++
++ if (tb_port_get_link_generation(port) < 4)
++ return false;
++
++ /* Must have PCIe adapters */
++ if (tb_is_upstream_port(port)) {
++ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
++ return false;
++ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
++ return false;
++ } else {
++ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
++ return false;
++ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
++ return false;
++ }
++
++ *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
++ *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
++
++ tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
++ *reserved_down);
++ return true;
++}
++
+ static bool tb_dp_is_usb4(const struct tb_switch *sw)
+ {
+ /* Titan Ridge DP adapters need the same treatment as USB4 */
+@@ -614,8 +662,9 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
+
+ in_rate = tb_dp_cap_get_rate(in_dp_cap);
+ in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+- in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
++ tb_tunnel_dbg(tunnel,
++ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
++ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+
+ /*
+ * If the tunnel bandwidth is limited (max_bw is set) then see
+@@ -624,10 +673,11 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
+ out_rate = tb_dp_cap_get_rate(out_dp_cap);
+ out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+ bw = tb_dp_bandwidth(out_rate, out_lanes);
+- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+- out_rate, out_lanes, bw);
++ tb_tunnel_dbg(tunnel,
++ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
++ out_rate, out_lanes, bw);
+
+- if (in->sw->config.depth < out->sw->config.depth)
++ if (tb_port_path_direction_downstream(in, out))
+ max_bw = tunnel->max_down;
+ else
+ max_bw = tunnel->max_up;
+@@ -639,13 +689,14 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
+ out_rate, out_lanes, &new_rate,
+ &new_lanes);
+ if (ret) {
+- tb_port_info(out, "not enough bandwidth for DP tunnel\n");
++ tb_tunnel_info(tunnel, "not enough bandwidth\n");
+ return ret;
+ }
+
+ new_bw = tb_dp_bandwidth(new_rate, new_lanes);
+- tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+- new_rate, new_lanes, new_bw);
++ tb_tunnel_dbg(tunnel,
++ "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
++ new_rate, new_lanes, new_bw);
+
+ /*
+ * Set new rate and number of lanes before writing it to
+@@ -662,7 +713,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
+ */
+ if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
+ out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
+- tb_port_dbg(out, "disabling LTTPR\n");
++ tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
+ }
+
+ return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
+@@ -712,8 +763,8 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
+ lanes = min(in_lanes, out_lanes);
+ tmp = tb_dp_bandwidth(rate, lanes);
+
+- tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
+- lanes, tmp);
++ tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
++ rate, lanes, tmp);
+
+ ret = usb4_dp_port_set_nrd(in, rate, lanes);
+ if (ret)
+@@ -728,15 +779,15 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
+ rate = min(in_rate, out_rate);
+ tmp = tb_dp_bandwidth(rate, lanes);
+
+- tb_port_dbg(in,
+- "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
+- rate, lanes, tmp);
++ tb_tunnel_dbg(tunnel,
++ "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
++ rate, lanes, tmp);
+
+ for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
+ granularity *= 2)
+ ;
+
+- tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
++ tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
+
+ /*
+ * Returns -EINVAL if granularity above is outside of the
+@@ -751,12 +802,12 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
+ * max_up/down fields. For discovery we just read what the
+ * estimation was set to.
+ */
+- if (in->sw->config.depth < out->sw->config.depth)
++ if (tb_port_path_direction_downstream(in, out))
+ estimated_bw = tunnel->max_down;
+ else
+ estimated_bw = tunnel->max_up;
+
+- tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
++ tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
+
+ ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
+ if (ret)
+@@ -767,7 +818,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
+ if (ret)
+ return ret;
+
+- tb_port_dbg(in, "bandwidth allocation mode enabled\n");
++ tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
+ return 0;
+ }
+
+@@ -788,7 +839,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
+ if (!usb4_dp_port_bandwidth_mode_supported(in))
+ return 0;
+
+- tb_port_dbg(in, "bandwidth allocation mode supported\n");
++ tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
+
+ ret = usb4_dp_port_set_cm_id(in, tb->index);
+ if (ret)
+@@ -805,7 +856,7 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
+ return;
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+ usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
+- tb_port_dbg(in, "bandwidth allocation mode disabled\n");
++ tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
+ }
+ }
+
+@@ -921,10 +972,7 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
+ if (allocated_bw == max_bw)
+ allocated_bw = ret;
+
+- tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
+- allocated_bw);
+-
+- if (in->sw->config.depth < out->sw->config.depth) {
++ if (tb_port_path_direction_downstream(in, out)) {
+ *consumed_up = 0;
+ *consumed_down = allocated_bw;
+ } else {
+@@ -959,7 +1007,7 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
+ if (allocated_bw == max_bw)
+ allocated_bw = ret;
+
+- if (in->sw->config.depth < out->sw->config.depth) {
++ if (tb_port_path_direction_downstream(in, out)) {
+ *allocated_up = 0;
+ *allocated_down = allocated_bw;
+ } else {
+@@ -987,7 +1035,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
+ if (ret < 0)
+ return ret;
+
+- if (in->sw->config.depth < out->sw->config.depth) {
++ if (tb_port_path_direction_downstream(in, out)) {
+ tmp = min(*alloc_down, max_bw);
+ ret = usb4_dp_port_allocate_bandwidth(in, tmp);
+ if (ret)
+@@ -1006,9 +1054,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
+ /* Now we can use BW mode registers to figure out the bandwidth */
+ /* TODO: need to handle discovery too */
+ tunnel->bw_mode = true;
+-
+- tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n",
+- tmp);
+ return 0;
+ }
+
+@@ -1035,8 +1080,7 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
+ *rate = tb_dp_cap_get_rate(val);
+ *lanes = tb_dp_cap_get_lanes(val);
+
+- tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
+- tb_dp_bandwidth(*rate, *lanes));
++ tb_tunnel_dbg(tunnel, "DPRX read done\n");
+ return 0;
+ }
+ usleep_range(100, 150);
+@@ -1073,9 +1117,6 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
+
+ *rate = tb_dp_cap_get_rate(val);
+ *lanes = tb_dp_cap_get_lanes(val);
+-
+- tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
+- tb_dp_bandwidth(*rate, *lanes));
+ return 0;
+ }
+
+@@ -1092,7 +1133,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
+ if (ret < 0)
+ return ret;
+
+- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
++ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ *max_up = 0;
+ *max_down = ret;
+ } else {
+@@ -1150,7 +1191,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
+ return 0;
+ }
+
+- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
++ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ *consumed_up = 0;
+ *consumed_down = tb_dp_bandwidth(rate, lanes);
+ } else {
+@@ -1180,8 +1221,8 @@ static void tb_dp_init_aux_path(struct tb_path *path)
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+- path->priority = 2;
+- path->weight = 1;
++ path->priority = TB_DP_AUX_PRIORITY;
++ path->weight = TB_DP_AUX_WEIGHT;
+
+ tb_path_for_each_hop(path, hop)
+ tb_dp_init_aux_credits(hop);
+@@ -1224,8 +1265,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_NONE;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+- path->priority = 1;
+- path->weight = 1;
++ path->priority = TB_DP_VIDEO_PRIORITY;
++ path->weight = TB_DP_VIDEO_WEIGHT;
+
+ tb_path_for_each_hop(path, hop) {
+ int ret;
+@@ -1253,8 +1294,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
+ rate = tb_dp_cap_get_rate(dp_cap);
+ lanes = tb_dp_cap_get_lanes(dp_cap);
+
+- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+- rate, lanes, tb_dp_bandwidth(rate, lanes));
++ tb_tunnel_dbg(tunnel,
++ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
++ rate, lanes, tb_dp_bandwidth(rate, lanes));
+
+ out = tunnel->dst_port;
+
+@@ -1265,8 +1307,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
+ rate = tb_dp_cap_get_rate(dp_cap);
+ lanes = tb_dp_cap_get_lanes(dp_cap);
+
+- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+- rate, lanes, tb_dp_bandwidth(rate, lanes));
++ tb_tunnel_dbg(tunnel,
++ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
++ rate, lanes, tb_dp_bandwidth(rate, lanes));
+
+ if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
+ in->cap_adap + DP_REMOTE_CAP, 1))
+@@ -1275,8 +1318,8 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
+ rate = tb_dp_cap_get_rate(dp_cap);
+ lanes = tb_dp_cap_get_lanes(dp_cap);
+
+- tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+- rate, lanes, tb_dp_bandwidth(rate, lanes));
++ tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
++ rate, lanes, tb_dp_bandwidth(rate, lanes));
+ }
+
+ /**
+@@ -1497,8 +1540,8 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+- path->priority = 5;
+- path->weight = 1;
++ path->priority = TB_DMA_PRIORITY;
++ path->weight = TB_DMA_WEIGHT;
+ path->clear_fc = true;
+
+ /*
+@@ -1531,8 +1574,8 @@ static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+- path->priority = 5;
+- path->weight = 1;
++ path->priority = TB_DMA_PRIORITY;
++ path->weight = TB_DMA_WEIGHT;
+ path->clear_fc = true;
+
+ tb_path_for_each_hop(path, hop) {
+@@ -1758,14 +1801,23 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
+ static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
+ int *consumed_up, int *consumed_down)
+ {
+- int pcie_enabled = tb_acpi_may_tunnel_pcie();
++ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
++ int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
+
+ /*
+ * PCIe tunneling, if enabled, affects the USB3 bandwidth so
+ * take that it into account here.
+ */
+- *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
+- *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
++ *consumed_up = tunnel->allocated_up *
++ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
++ *consumed_down = tunnel->allocated_down *
++ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
++
++ if (tb_port_get_link_generation(port) >= 4) {
++ *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
++ *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
++ }
++
+ return 0;
+ }
+
+@@ -1871,8 +1923,8 @@ static void tb_usb3_init_path(struct tb_path *path)
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+- path->priority = 3;
+- path->weight = 3;
++ path->priority = TB_USB3_PRIORITY;
++ path->weight = TB_USB3_WEIGHT;
+ path->drop_packages = 0;
+
+ tb_path_for_each_hop(path, hop)
+@@ -2387,3 +2439,8 @@ void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
+ tunnel->reclaim_available_bandwidth(tunnel, available_up,
+ available_down);
+ }
++
++const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
++{
++ return tb_tunnel_names[tunnel->type];
++}
+diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
+index bf690f7beeeeba..b4cff5482112d3 100644
+--- a/drivers/thunderbolt/tunnel.h
++++ b/drivers/thunderbolt/tunnel.h
+@@ -80,6 +80,8 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid);
+ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+ struct tb_port *down);
++bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
++ int *reserved_down);
+ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
+ bool alloc_hopid);
+ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
+@@ -137,5 +139,27 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
+ return tunnel->type == TB_TUNNEL_USB3;
+ }
+
+-#endif
++const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
++
++#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
++ do { \
++ struct tb_tunnel *__tunnel = (tunnel); \
++ level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
++ tb_route(__tunnel->src_port->sw), \
++ __tunnel->src_port->port, \
++ tb_route(__tunnel->dst_port->sw), \
++ __tunnel->dst_port->port, \
++ tb_tunnel_type_name(__tunnel), \
++ ## arg); \
++ } while (0)
+
++#define tb_tunnel_WARN(tunnel, fmt, arg...) \
++ __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
++#define tb_tunnel_warn(tunnel, fmt, arg...) \
++ __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
++#define tb_tunnel_info(tunnel, fmt, arg...) \
++ __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
++#define tb_tunnel_dbg(tunnel, fmt, arg...) \
++ __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
++
++#endif
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index 05ddb224c46491..8db9bd32f47384 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
+ tx_dwords, rx_data, rx_dwords);
+ }
+
+-static void usb4_switch_check_wakes(struct tb_switch *sw)
++/**
++ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
++ * @sw: Router whose wakes to check
++ *
++ * Checks wakes occurred during suspend and notify the PM core about them.
++ */
++void usb4_switch_check_wakes(struct tb_switch *sw)
+ {
+ bool wakeup_usb4 = false;
+ struct usb4_port *usb4;
+@@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
+ bool wakeup = false;
+ u32 val;
+
+- if (!device_may_wakeup(&sw->dev))
+- return;
+-
+ if (tb_route(sw)) {
+ if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
+ return;
+@@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw)
+ u32 val = 0;
+ int ret;
+
+- usb4_switch_check_wakes(sw);
+-
+ if (!tb_route(sw))
+ return 0;
+
+@@ -290,7 +291,7 @@ int usb4_switch_setup(struct tb_switch *sw)
+ }
+
+ /* TBT3 supported by the CM */
+- val |= ROUTER_CS_5_C3S;
++ val &= ~ROUTER_CS_5_CNS;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ }
+@@ -1113,6 +1114,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+ }
+
++/**
++ * usb4_port_reset() - Issue downstream port reset
++ * @port: USB4 port to reset
++ *
++ * Issues downstream port reset to @port.
++ */
++int usb4_port_reset(struct tb_port *port)
++{
++ int ret;
++ u32 val;
++
++ if (!port->cap_usb4)
++ return -EINVAL;
++
++ ret = tb_port_read(port, &val, TB_CFG_PORT,
++ port->cap_usb4 + PORT_CS_19, 1);
++ if (ret)
++ return ret;
++
++ val |= PORT_CS_19_DPR;
++
++ ret = tb_port_write(port, &val, TB_CFG_PORT,
++ port->cap_usb4 + PORT_CS_19, 1);
++ if (ret)
++ return ret;
++
++ fsleep(10000);
++
++ ret = tb_port_read(port, &val, TB_CFG_PORT,
++ port->cap_usb4 + PORT_CS_19, 1);
++ if (ret)
++ return ret;
++
++ val &= ~PORT_CS_19_DPR;
++
++ return tb_port_write(port, &val, TB_CFG_PORT,
++ port->cap_usb4 + PORT_CS_19, 1);
++}
++
+ static int usb4_port_set_configured(struct tb_port *port, bool configured)
+ {
+ int ret;
+@@ -1454,6 +1494,112 @@ bool usb4_port_clx_supported(struct tb_port *port)
+ return !!(val & PORT_CS_18_CPS);
+ }
+
++/**
++ * usb4_port_asym_supported() - If the port supports asymmetric link
++ * @port: USB4 port
++ *
++ * Checks if the port and the cable supports asymmetric link and returns
++ * %true in that case.
++ */
++bool usb4_port_asym_supported(struct tb_port *port)
++{
++ u32 val;
++
++ if (!port->cap_usb4)
++ return false;
++
++ if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1))
++ return false;
++
++ return !!(val & PORT_CS_18_CSA);
++}
++
++/**
++ * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric
++ * @port: USB4 port
++ * @width: Asymmetric width to configure
++ *
++ * Sets USB4 port link width to @width. Can be called for widths where
++ * usb4_port_asym_width_supported() returned @true.
++ */
++int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
++{
++ u32 val;
++ int ret;
++
++ if (!port->cap_phy)
++ return -EINVAL;
++
++ ret = tb_port_read(port, &val, TB_CFG_PORT,
++ port->cap_phy + LANE_ADP_CS_1, 1);
++ if (ret)
++ return ret;
++
++ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK;
++ switch (width) {
++ case TB_LINK_WIDTH_DUAL:
++ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
++ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL);
++ break;
++ case TB_LINK_WIDTH_ASYM_TX:
++ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
++ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX);
++ break;
++ case TB_LINK_WIDTH_ASYM_RX:
++ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
++ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return tb_port_write(port, &val, TB_CFG_PORT,
++ port->cap_phy + LANE_ADP_CS_1, 1);
++}
++
++/**
++ * usb4_port_asym_start() - Start symmetry change and wait for completion
++ * @port: USB4 port
++ *
++ * Start symmetry change of the link to asymmetric or symmetric
++ * (according to what was previously set in tb_port_set_link_width().
++ * Wait for completion of the change.
++ *
++ * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
++ * a negative errno in case of a failure.
++ */
++int usb4_port_asym_start(struct tb_port *port)
++{
++ int ret;
++ u32 val;
++
++ ret = tb_port_read(port, &val, TB_CFG_PORT,
++ port->cap_usb4 + PORT_CS_19, 1);
++ if (ret)
++ return ret;
++
++ val &= ~PORT_CS_19_START_ASYM;
++ val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1);
++
++ ret = tb_port_write(port, &val, TB_CFG_PORT,
++ port->cap_usb4 + PORT_CS_19, 1);
++ if (ret)
++ return ret;
++
++ /*
++ * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4
++ * port started the symmetry transition.
++ */
++ ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
++ PORT_CS_19_START_ASYM, 0, 1000);
++ if (ret)
++ return ret;
++
++ /* Then wait for the transtion to be completed */
++ return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
++ PORT_CS_18_TIP, 0, 5000);
++}
++
+ /**
+ * usb4_port_margining_caps() - Read USB4 port marginig capabilities
+ * @port: USB4 port
+@@ -2234,13 +2380,13 @@ int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
+ goto err_request;
+
+ /*
+- * Always keep 1000 Mb/s to make sure xHCI has at least some
++ * Always keep 900 Mb/s to make sure xHCI has at least some
+ * bandwidth available for isochronous traffic.
+ */
+- if (consumed_up < 1000)
+- consumed_up = 1000;
+- if (consumed_down < 1000)
+- consumed_down = 1000;
++ if (consumed_up < 900)
++ consumed_up = 900;
++ if (consumed_down < 900)
++ consumed_down = 900;
+
+ ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
+ consumed_down);
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 98764e740c0783..34c01874f45beb 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -377,18 +377,21 @@ void xen_console_resume(void)
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ static void xencons_disconnect_backend(struct xencons_info *info)
+ {
+- if (info->irq > 0)
+- unbind_from_irqhandler(info->irq, NULL);
+- info->irq = 0;
++ if (info->hvc != NULL)
++ hvc_remove(info->hvc);
++ info->hvc = NULL;
++ if (info->irq > 0) {
++ evtchn_put(info->evtchn);
++ info->irq = 0;
++ info->evtchn = 0;
++ }
++ /* evtchn_put() will also close it so this is only an error path */
+ if (info->evtchn > 0)
+ xenbus_free_evtchn(info->xbdev, info->evtchn);
+ info->evtchn = 0;
+ if (info->gntref > 0)
+ gnttab_free_grant_references(info->gntref);
+ info->gntref = 0;
+- if (info->hvc != NULL)
+- hvc_remove(info->hvc);
+- info->hvc = NULL;
+ }
+
+ static void xencons_free(struct xencons_info *info)
+@@ -433,7 +436,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
+ if (ret)
+ return ret;
+ info->evtchn = evtchn;
+- irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
++ irq = bind_evtchn_to_irq_lateeoi(evtchn);
+ if (irq < 0)
+ return irq;
+ info->irq = irq;
+@@ -553,10 +556,23 @@ static void xencons_backend_changed(struct xenbus_device *dev,
+ if (dev->state == XenbusStateClosed)
+ break;
+ fallthrough; /* Missed the backend's CLOSING state */
+- case XenbusStateClosing:
++ case XenbusStateClosing: {
++ struct xencons_info *info = dev_get_drvdata(&dev->dev);;
++
++ /*
++ * Don't tear down the evtchn and grant ref before the other
++ * end has disconnected, but do stop userspace from trying
++ * to use the device before we allow the backend to close.
++ */
++ if (info->hvc) {
++ hvc_remove(info->hvc);
++ info->hvc = NULL;
++ }
++
+ xenbus_frontend_closed(dev);
+ break;
+ }
++ }
+ }
+
+ static const struct xenbus_device_id xencons_ids[] = {
+@@ -588,7 +604,7 @@ static int __init xen_hvc_init(void)
+ ops = &dom0_hvc_ops;
+ r = xen_initial_domain_console_init();
+ if (r < 0)
+- return r;
++ goto register_fe;
+ info = vtermno_to_xencons(HVC_COOKIE);
+ } else {
+ ops = &domU_hvc_ops;
+@@ -597,7 +613,7 @@ static int __init xen_hvc_init(void)
+ else
+ r = xen_pv_console_init();
+ if (r < 0)
+- return r;
++ goto register_fe;
+
+ info = vtermno_to_xencons(HVC_COOKIE);
+ info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
+@@ -616,12 +632,13 @@ static int __init xen_hvc_init(void)
+ list_del(&info->list);
+ spin_unlock_irqrestore(&xencons_lock, flags);
+ if (info->irq)
+- unbind_from_irqhandler(info->irq, NULL);
++ evtchn_put(info->evtchn);
+ kfree(info);
+ return r;
+ }
+
+ r = 0;
++ register_fe:
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ r = xenbus_register_frontend(&xencons_driver);
+ #endif
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 1f3aba607cd51d..8559ba1361c645 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -244,16 +244,18 @@ enum gsm_encoding {
+
+ enum gsm_mux_state {
+ GSM_SEARCH,
+- GSM_START,
+- GSM_ADDRESS,
+- GSM_CONTROL,
+- GSM_LEN,
+- GSM_DATA,
+- GSM_FCS,
+- GSM_OVERRUN,
+- GSM_LEN0,
+- GSM_LEN1,
+- GSM_SSOF,
++ GSM0_ADDRESS,
++ GSM0_CONTROL,
++ GSM0_LEN0,
++ GSM0_LEN1,
++ GSM0_DATA,
++ GSM0_FCS,
++ GSM0_SSOF,
++ GSM1_START,
++ GSM1_ADDRESS,
++ GSM1_CONTROL,
++ GSM1_DATA,
++ GSM1_OVERRUN,
+ };
+
+ /*
+@@ -2846,6 +2848,30 @@ static void gsm_queue(struct gsm_mux *gsm)
+ return;
+ }
+
++/**
++ * gsm0_receive_state_check_and_fix - check and correct receive state
++ * @gsm: gsm data for this ldisc instance
++ *
++ * Ensures that the current receive state is valid for basic option mode.
++ */
++
++static void gsm0_receive_state_check_and_fix(struct gsm_mux *gsm)
++{
++ switch (gsm->state) {
++ case GSM_SEARCH:
++ case GSM0_ADDRESS:
++ case GSM0_CONTROL:
++ case GSM0_LEN0:
++ case GSM0_LEN1:
++ case GSM0_DATA:
++ case GSM0_FCS:
++ case GSM0_SSOF:
++ break;
++ default:
++ gsm->state = GSM_SEARCH;
++ break;
++ }
++}
+
+ /**
+ * gsm0_receive - perform processing for non-transparency
+@@ -2859,26 +2885,27 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ {
+ unsigned int len;
+
++ gsm0_receive_state_check_and_fix(gsm);
+ switch (gsm->state) {
+ case GSM_SEARCH: /* SOF marker */
+ if (c == GSM0_SOF) {
+- gsm->state = GSM_ADDRESS;
++ gsm->state = GSM0_ADDRESS;
+ gsm->address = 0;
+ gsm->len = 0;
+ gsm->fcs = INIT_FCS;
+ }
+ break;
+- case GSM_ADDRESS: /* Address EA */
++ case GSM0_ADDRESS: /* Address EA */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+- gsm->state = GSM_CONTROL;
++ gsm->state = GSM0_CONTROL;
+ break;
+- case GSM_CONTROL: /* Control Byte */
++ case GSM0_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+- gsm->state = GSM_LEN0;
++ gsm->state = GSM0_LEN0;
+ break;
+- case GSM_LEN0: /* Length EA */
++ case GSM0_LEN0: /* Length EA */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->len, c)) {
+ if (gsm->len > gsm->mru) {
+@@ -2888,14 +2915,14 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ }
+ gsm->count = 0;
+ if (!gsm->len)
+- gsm->state = GSM_FCS;
++ gsm->state = GSM0_FCS;
+ else
+- gsm->state = GSM_DATA;
++ gsm->state = GSM0_DATA;
+ break;
+ }
+- gsm->state = GSM_LEN1;
++ gsm->state = GSM0_LEN1;
+ break;
+- case GSM_LEN1:
++ case GSM0_LEN1:
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ len = c;
+ gsm->len |= len << 7;
+@@ -2906,26 +2933,29 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ }
+ gsm->count = 0;
+ if (!gsm->len)
+- gsm->state = GSM_FCS;
++ gsm->state = GSM0_FCS;
+ else
+- gsm->state = GSM_DATA;
++ gsm->state = GSM0_DATA;
+ break;
+- case GSM_DATA: /* Data */
++ case GSM0_DATA: /* Data */
+ gsm->buf[gsm->count++] = c;
+- if (gsm->count == gsm->len) {
++ if (gsm->count >= MAX_MRU) {
++ gsm->bad_size++;
++ gsm->state = GSM_SEARCH;
++ } else if (gsm->count >= gsm->len) {
+ /* Calculate final FCS for UI frames over all data */
+ if ((gsm->control & ~PF) != UIH) {
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
+ gsm->count);
+ }
+- gsm->state = GSM_FCS;
++ gsm->state = GSM0_FCS;
+ }
+ break;
+- case GSM_FCS: /* FCS follows the packet */
++ case GSM0_FCS: /* FCS follows the packet */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+- gsm->state = GSM_SSOF;
++ gsm->state = GSM0_SSOF;
+ break;
+- case GSM_SSOF:
++ case GSM0_SSOF:
+ gsm->state = GSM_SEARCH;
+ if (c == GSM0_SOF)
+ gsm_queue(gsm);
+@@ -2938,6 +2968,29 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+ }
+ }
+
++/**
++ * gsm1_receive_state_check_and_fix - check and correct receive state
++ * @gsm: gsm data for this ldisc instance
++ *
++ * Ensures that the current receive state is valid for advanced option mode.
++ */
++
++static void gsm1_receive_state_check_and_fix(struct gsm_mux *gsm)
++{
++ switch (gsm->state) {
++ case GSM_SEARCH:
++ case GSM1_START:
++ case GSM1_ADDRESS:
++ case GSM1_CONTROL:
++ case GSM1_DATA:
++ case GSM1_OVERRUN:
++ break;
++ default:
++ gsm->state = GSM_SEARCH;
++ break;
++ }
++}
++
+ /**
+ * gsm1_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+@@ -2948,6 +3001,7 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+
+ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+ {
++ gsm1_receive_state_check_and_fix(gsm);
+ /* handle XON/XOFF */
+ if ((c & ISO_IEC_646_MASK) == XON) {
+ gsm->constipated = true;
+@@ -2960,11 +3014,11 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+ }
+ if (c == GSM1_SOF) {
+ /* EOF is only valid in frame if we have got to the data state */
+- if (gsm->state == GSM_DATA) {
++ if (gsm->state == GSM1_DATA) {
+ if (gsm->count < 1) {
+ /* Missing FSC */
+ gsm->malformed++;
+- gsm->state = GSM_START;
++ gsm->state = GSM1_START;
+ return;
+ }
+ /* Remove the FCS from data */
+@@ -2980,14 +3034,14 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
+ gsm->len = gsm->count;
+ gsm_queue(gsm);
+- gsm->state = GSM_START;
++ gsm->state = GSM1_START;
+ return;
+ }
+ /* Any partial frame was a runt so go back to start */
+- if (gsm->state != GSM_START) {
++ if (gsm->state != GSM1_START) {
+ if (gsm->state != GSM_SEARCH)
+ gsm->malformed++;
+- gsm->state = GSM_START;
++ gsm->state = GSM1_START;
+ }
+ /* A SOF in GSM_START means we are still reading idling or
+ framing bytes */
+@@ -3008,30 +3062,30 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+ gsm->escape = false;
+ }
+ switch (gsm->state) {
+- case GSM_START: /* First byte after SOF */
++ case GSM1_START: /* First byte after SOF */
+ gsm->address = 0;
+- gsm->state = GSM_ADDRESS;
++ gsm->state = GSM1_ADDRESS;
+ gsm->fcs = INIT_FCS;
+ fallthrough;
+- case GSM_ADDRESS: /* Address continuation */
++ case GSM1_ADDRESS: /* Address continuation */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+- gsm->state = GSM_CONTROL;
++ gsm->state = GSM1_CONTROL;
+ break;
+- case GSM_CONTROL: /* Control Byte */
++ case GSM1_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->count = 0;
+- gsm->state = GSM_DATA;
++ gsm->state = GSM1_DATA;
+ break;
+- case GSM_DATA: /* Data */
+- if (gsm->count > gsm->mru) { /* Allow one for the FCS */
+- gsm->state = GSM_OVERRUN;
++ case GSM1_DATA: /* Data */
++ if (gsm->count > gsm->mru || gsm->count > MAX_MRU) { /* Allow one for the FCS */
++ gsm->state = GSM1_OVERRUN;
+ gsm->bad_size++;
+ } else
+ gsm->buf[gsm->count++] = c;
+ break;
+- case GSM_OVERRUN: /* Over-long - eg a dropped SOF */
++ case GSM1_OVERRUN: /* Over-long - eg a dropped SOF */
+ break;
+ default:
+ pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
+@@ -3102,6 +3156,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
+ mutex_unlock(&gsm->mutex);
+ /* Now wipe the queues */
+ tty_ldisc_flush(gsm->tty);
++
++ guard(spinlock_irqsave)(&gsm->tx_lock);
+ list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
+ kfree(txq);
+ INIT_LIST_HEAD(&gsm->tx_ctrl_list);
+@@ -4108,6 +4164,8 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
+
+ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
+ {
++ if (dlci->gsm->dead)
++ return -EL2HLT;
+ if (dlci->adaption == 2) {
+ /* Send convergence layer type 2 empty data frame. */
+ gsm_modem_upd_via_data(dlci, brk);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 6c9a408d67cd68..e05341b85c599f 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1624,15 +1624,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ else if (ldata->raw || (L_EXTPROC(tty) && !preops))
+ n_tty_receive_buf_raw(tty, cp, fp, count);
+ else if (tty->closing && !L_EXTPROC(tty)) {
+- if (la_count > 0)
++ if (la_count > 0) {
+ n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
+- if (count > la_count)
+- n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
++ cp += la_count;
++ if (fp)
++ fp += la_count;
++ count -= la_count;
++ }
++ if (count > 0)
++ n_tty_receive_buf_closing(tty, cp, fp, count, false);
+ } else {
+- if (la_count > 0)
++ if (la_count > 0) {
+ n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
+- if (count > la_count)
+- n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
++ cp += la_count;
++ if (fp)
++ fp += la_count;
++ count -= la_count;
++ }
++ if (count > 0)
++ n_tty_receive_buf_standard(tty, cp, fp, count, false);
+
+ flush_echoes(tty);
+ if (tty->ops->flush_chars)
+diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
+index 15a2387a5b258f..4f4502fb5454c6 100644
+--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
++++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
+@@ -119,6 +119,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
+
+ /* get the clock - this also enables the HW */
+ data->clk = devm_clk_get_optional(&pdev->dev, NULL);
++ if (IS_ERR(data->clk))
++ return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n");
+
+ /* get the interrupt */
+ ret = platform_get_irq(pdev, 0);
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index aa5aff046756bb..9afd5979c9e0d1 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -676,18 +676,46 @@ static void init_real_clk_rates(struct device *dev, struct brcmuart_priv *priv)
+ clk_set_rate(priv->baud_mux_clk, priv->default_mux_rate);
+ }
+
++static u32 find_quot(struct device *dev, u32 freq, u32 baud, u32 *percent)
++{
++ u32 quot;
++ u32 rate;
++ u64 hires_rate;
++ u64 hires_baud;
++ u64 hires_err;
++
++ rate = freq / 16;
++ quot = DIV_ROUND_CLOSEST(rate, baud);
++ if (!quot)
++ return 0;
++
++ /* increase resolution to get xx.xx percent */
++ hires_rate = div_u64((u64)rate * 10000, (u64)quot);
++ hires_baud = (u64)baud * 10000;
++
++ /* get the delta */
++ if (hires_rate > hires_baud)
++ hires_err = (hires_rate - hires_baud);
++ else
++ hires_err = (hires_baud - hires_rate);
++
++ *percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud);
++
++ dev_dbg(dev, "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n",
++ baud, freq, *percent / 100, *percent % 100);
++
++ return quot;
++}
++
+ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ u32 baud)
+ {
+ u32 percent;
+ u32 best_percent = UINT_MAX;
+ u32 quot;
++ u32 freq;
+ u32 best_quot = 1;
+- u32 rate;
+- int best_index = -1;
+- u64 hires_rate;
+- u64 hires_baud;
+- u64 hires_err;
++ u32 best_freq = 0;
+ int rc;
+ int i;
+ int real_baud;
+@@ -696,44 +724,35 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ if (priv->baud_mux_clk == NULL)
+ return;
+
+- /* Find the closest match for specified baud */
+- for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) {
+- if (priv->real_rates[i] == 0)
+- continue;
+- rate = priv->real_rates[i] / 16;
+- quot = DIV_ROUND_CLOSEST(rate, baud);
+- if (!quot)
+- continue;
+-
+- /* increase resolution to get xx.xx percent */
+- hires_rate = (u64)rate * 10000;
+- hires_baud = (u64)baud * 10000;
+-
+- hires_err = div_u64(hires_rate, (u64)quot);
+-
+- /* get the delta */
+- if (hires_err > hires_baud)
+- hires_err = (hires_err - hires_baud);
+- else
+- hires_err = (hires_baud - hires_err);
+-
+- percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud);
+- dev_dbg(up->dev,
+- "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n",
+- baud, priv->real_rates[i], percent / 100,
+- percent % 100);
+- if (percent < best_percent) {
+- best_percent = percent;
+- best_index = i;
+- best_quot = quot;
++ /* Try default_mux_rate first */
++ quot = find_quot(up->dev, priv->default_mux_rate, baud, &percent);
++ if (quot) {
++ best_percent = percent;
++ best_freq = priv->default_mux_rate;
++ best_quot = quot;
++ }
++ /* If more than 1% error, find the closest match for specified baud */
++ if (best_percent > 100) {
++ for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) {
++ freq = priv->real_rates[i];
++ if (freq == 0 || freq == priv->default_mux_rate)
++ continue;
++ quot = find_quot(up->dev, freq, baud, &percent);
++ if (!quot)
++ continue;
++
++ if (percent < best_percent) {
++ best_percent = percent;
++ best_freq = freq;
++ best_quot = quot;
++ }
+ }
+ }
+- if (best_index == -1) {
++ if (!best_freq) {
+ dev_err(up->dev, "Error, %d BAUD rate is too fast.\n", baud);
+ return;
+ }
+- rate = priv->real_rates[best_index];
+- rc = clk_set_rate(priv->baud_mux_clk, rate);
++ rc = clk_set_rate(priv->baud_mux_clk, best_freq);
+ if (rc)
+ dev_err(up->dev, "Error selecting BAUD MUX clock\n");
+
+@@ -742,8 +761,8 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ dev_err(up->dev, "Error, baud: %d has %u.%u%% error\n",
+ baud, percent / 100, percent % 100);
+
+- real_baud = rate / 16 / best_quot;
+- dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", rate);
++ real_baud = best_freq / 16 / best_quot;
++ dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", best_freq);
+ dev_dbg(up->dev, "Requested baud: %u, Actual baud: %u\n",
+ baud, real_baud);
+
+@@ -752,7 +771,7 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ i += (i / 2);
+ priv->char_wait = ns_to_ktime(i);
+
+- up->uartclk = rate;
++ up->uartclk = best_freq;
+ }
+
+ static void brcmstb_set_termios(struct uart_port *up,
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index f4cafca1a7dad2..8aed33be2ebf48 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -9,7 +9,6 @@
+ * LCR is written whilst busy. If it is, then a busy detect interrupt is
+ * raised, the LCR needs to be rewritten and the uart status register read.
+ */
+-#include <linux/acpi.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+@@ -17,7 +16,6 @@
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/notifier.h>
+-#include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/property.h>
+@@ -56,6 +54,35 @@
+ #define DW_UART_QUIRK_ARMADA_38X BIT(1)
+ #define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
+ #define DW_UART_QUIRK_IS_DMA_FC BIT(3)
++#define DW_UART_QUIRK_APMC0D08 BIT(4)
++#define DW_UART_QUIRK_CPR_VALUE BIT(5)
++
++struct dw8250_platform_data {
++ u8 usr_reg;
++ u32 cpr_value;
++ unsigned int quirks;
++};
++
++struct dw8250_data {
++ struct dw8250_port_data data;
++ const struct dw8250_platform_data *pdata;
++
++ int msr_mask_on;
++ int msr_mask_off;
++ struct clk *clk;
++ struct clk *pclk;
++ struct notifier_block clk_notifier;
++ struct work_struct clk_work;
++ struct reset_control *rst;
++
++ unsigned int skip_autocfg:1;
++ unsigned int uart_16550_compatible:1;
++};
++
++static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
++{
++ return container_of(data, struct dw8250_data, data);
++}
+
+ static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
+ {
+@@ -445,44 +472,33 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
+
+ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
+ {
+- struct device_node *np = p->dev->of_node;
++ unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
++ u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0;
+
+- if (np) {
+- unsigned int quirks = data->pdata->quirks;
+- int id;
++ if (quirks & DW_UART_QUIRK_CPR_VALUE)
++ data->data.cpr_value = cpr_value;
+
+- /* get index of serial line, if found in DT aliases */
+- id = of_alias_get_id(np, "serial");
+- if (id >= 0)
+- p->line = id;
+ #ifdef CONFIG_64BIT
+- if (quirks & DW_UART_QUIRK_OCTEON) {
+- p->serial_in = dw8250_serial_inq;
+- p->serial_out = dw8250_serial_outq;
+- p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
+- p->type = PORT_OCTEON;
+- data->skip_autocfg = true;
+- }
++ if (quirks & DW_UART_QUIRK_OCTEON) {
++ p->serial_in = dw8250_serial_inq;
++ p->serial_out = dw8250_serial_outq;
++ p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
++ p->type = PORT_OCTEON;
++ data->skip_autocfg = true;
++ }
+ #endif
+
+- if (of_device_is_big_endian(np)) {
+- p->iotype = UPIO_MEM32BE;
+- p->serial_in = dw8250_serial_in32be;
+- p->serial_out = dw8250_serial_out32be;
+- }
+-
+- if (quirks & DW_UART_QUIRK_ARMADA_38X)
+- p->serial_out = dw8250_serial_out38x;
+- if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
+- p->set_termios = dw8250_do_set_termios;
+- if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
+- data->data.dma.txconf.device_fc = 1;
+- data->data.dma.rxconf.device_fc = 1;
+- data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
+- data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
+- }
+-
+- } else if (acpi_dev_present("APMC0D08", NULL, -1)) {
++ if (quirks & DW_UART_QUIRK_ARMADA_38X)
++ p->serial_out = dw8250_serial_out38x;
++ if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
++ p->set_termios = dw8250_do_set_termios;
++ if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
++ data->data.dma.txconf.device_fc = 1;
++ data->data.dma.rxconf.device_fc = 1;
++ data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
++ data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
++ }
++ if (quirks & DW_UART_QUIRK_APMC0D08) {
+ p->iotype = UPIO_MEM32;
+ p->regshift = 2;
+ p->serial_in = dw8250_serial_in32;
+@@ -515,39 +531,21 @@ static int dw8250_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct dw8250_data *data;
+ struct resource *regs;
+- int irq;
+ int err;
+- u32 val;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return dev_err_probe(dev, -EINVAL, "no registers defined\n");
+
+- irq = platform_get_irq_optional(pdev, 0);
+- /* no interrupt -> fall back to polling */
+- if (irq == -ENXIO)
+- irq = 0;
+- if (irq < 0)
+- return irq;
+-
+ spin_lock_init(&p->lock);
+- p->mapbase = regs->start;
+- p->irq = irq;
+ p->handle_irq = dw8250_handle_irq;
+ p->pm = dw8250_do_pm;
+ p->type = PORT_8250;
+- p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT;
++ p->flags = UPF_FIXED_PORT;
+ p->dev = dev;
+- p->iotype = UPIO_MEM;
+- p->serial_in = dw8250_serial_in;
+- p->serial_out = dw8250_serial_out;
+ p->set_ldisc = dw8250_set_ldisc;
+ p->set_termios = dw8250_set_termios;
+
+- p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
+- if (!p->membase)
+- return -ENOMEM;
+-
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+@@ -559,15 +557,35 @@ static int dw8250_probe(struct platform_device *pdev)
+ data->uart_16550_compatible = device_property_read_bool(dev,
+ "snps,uart-16550-compatible");
+
+- err = device_property_read_u32(dev, "reg-shift", &val);
+- if (!err)
+- p->regshift = val;
++ p->mapbase = regs->start;
++ p->mapsize = resource_size(regs);
+
+- err = device_property_read_u32(dev, "reg-io-width", &val);
+- if (!err && val == 4) {
+- p->iotype = UPIO_MEM32;
++ p->membase = devm_ioremap(dev, p->mapbase, p->mapsize);
++ if (!p->membase)
++ return -ENOMEM;
++
++ err = uart_read_port_properties(p);
++ /* no interrupt -> fall back to polling */
++ if (err == -ENXIO)
++ err = 0;
++ if (err)
++ return err;
++
++ switch (p->iotype) {
++ case UPIO_MEM:
++ p->serial_in = dw8250_serial_in;
++ p->serial_out = dw8250_serial_out;
++ break;
++ case UPIO_MEM32:
+ p->serial_in = dw8250_serial_in32;
+ p->serial_out = dw8250_serial_out32;
++ break;
++ case UPIO_MEM32BE:
++ p->serial_in = dw8250_serial_in32be;
++ p->serial_out = dw8250_serial_out32be;
++ break;
++ default:
++ return -ENODEV;
+ }
+
+ if (device_property_read_bool(dev, "dcd-override")) {
+@@ -594,9 +612,6 @@ static int dw8250_probe(struct platform_device *pdev)
+ data->msr_mask_off |= UART_MSR_TERI;
+ }
+
+- /* Always ask for fixed clock rate from a property. */
+- device_property_read_u32(dev, "clock-frequency", &p->uartclk);
+-
+ /* If there is separate baudclk, get the rate from it. */
+ data->clk = devm_clk_get_optional(dev, "baudclk");
+ if (data->clk == NULL)
+@@ -766,8 +781,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = {
+
+ static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
+ .usr_reg = DW_UART_USR,
+- .cpr_val = 0x00012f32,
+- .quirks = DW_UART_QUIRK_IS_DMA_FC,
++ .cpr_value = 0x00012f32,
++ .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC,
+ };
+
+ static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
+@@ -785,19 +800,25 @@ static const struct of_device_id dw8250_of_match[] = {
+ };
+ MODULE_DEVICE_TABLE(of, dw8250_of_match);
+
++static const struct dw8250_platform_data dw8250_apmc0d08 = {
++ .usr_reg = DW_UART_USR,
++ .quirks = DW_UART_QUIRK_APMC0D08,
++};
++
+ static const struct acpi_device_id dw8250_acpi_match[] = {
+ { "80860F0A", (kernel_ulong_t)&dw8250_dw_apb },
+ { "8086228A", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMD0020", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb },
+- { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb},
++ { "APMC0D08", (kernel_ulong_t)&dw8250_apmc0d08 },
+ { "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb },
+ { "HISI0031", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT33C4", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT3434", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT3435", (kernel_ulong_t)&dw8250_dw_apb },
++ { "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb },
+ { },
+ };
+ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
+diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
+index 84843e204a5e80..8fc8b6753148bb 100644
+--- a/drivers/tty/serial/8250/8250_dwlib.c
++++ b/drivers/tty/serial/8250/8250_dwlib.c
+@@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = {
+ void dw8250_setup_port(struct uart_port *p)
+ {
+ struct dw8250_port_data *pd = p->private_data;
+- struct dw8250_data *data = to_dw8250_data(pd);
+ struct uart_8250_port *up = up_to_u8250p(p);
+ u32 reg, old_dlf;
+
+@@ -284,7 +283,7 @@ void dw8250_setup_port(struct uart_port *p)
+
+ reg = dw8250_readl_ext(p, DW_UART_CPR);
+ if (!reg) {
+- reg = data->pdata->cpr_val;
++ reg = pd->cpr_value;
+ dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
+ }
+ if (!reg)
+diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
+index f13e91f2cace9c..7dd2a8e7b78085 100644
+--- a/drivers/tty/serial/8250/8250_dwlib.h
++++ b/drivers/tty/serial/8250/8250_dwlib.h
+@@ -2,15 +2,10 @@
+ /* Synopsys DesignWare 8250 library header file. */
+
+ #include <linux/io.h>
+-#include <linux/notifier.h>
+ #include <linux/types.h>
+-#include <linux/workqueue.h>
+
+ #include "8250.h"
+
+-struct clk;
+-struct reset_control;
+-
+ struct dw8250_port_data {
+ /* Port properties */
+ int line;
+@@ -19,42 +14,16 @@ struct dw8250_port_data {
+ struct uart_8250_dma dma;
+
+ /* Hardware configuration */
++ u32 cpr_value;
+ u8 dlf_size;
+
+ /* RS485 variables */
+ bool hw_rs485_support;
+ };
+
+-struct dw8250_platform_data {
+- u8 usr_reg;
+- u32 cpr_val;
+- unsigned int quirks;
+-};
+-
+-struct dw8250_data {
+- struct dw8250_port_data data;
+- const struct dw8250_platform_data *pdata;
+-
+- int msr_mask_on;
+- int msr_mask_off;
+- struct clk *clk;
+- struct clk *pclk;
+- struct notifier_block clk_notifier;
+- struct work_struct clk_work;
+- struct reset_control *rst;
+-
+- unsigned int skip_autocfg:1;
+- unsigned int uart_16550_compatible:1;
+-};
+-
+ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old);
+ void dw8250_setup_port(struct uart_port *p);
+
+-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+-{
+- return container_of(data, struct dw8250_data, data);
+-}
+-
+ static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
+ {
+ if (p->iotype == UPIO_MEM32BE)
+diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
+index 9837a27739fdf5..e3f482fd3de481 100644
+--- a/drivers/tty/serial/8250/8250_early.c
++++ b/drivers/tty/serial/8250/8250_early.c
+@@ -189,5 +189,6 @@ static int __init early_omap8250_setup(struct earlycon_device *device,
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
++OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup);
+
+ #endif
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 077c3ba3539e68..27430fdd9e761c 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -41,8 +41,50 @@
+ #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021
+ #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
+
++#define PCI_VENDOR_ID_CONNECT_TECH 0x12c4
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_SP_OPTO 0x0340
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_A 0x0341
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_B 0x0342
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS 0x0350
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_A 0x0351
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_B 0x0352
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS 0x0353
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_A 0x0354
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_B 0x0355
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS_OPTO 0x0360
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_A 0x0361
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_B 0x0362
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP 0x0370
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232 0x0371
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_485 0x0372
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_SP 0x0373
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_6_2_SP 0x0374
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_6_SP 0x0375
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232_NS 0x0376
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_LEFT 0x0380
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_RIGHT 0x0381
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XP_OPTO 0x0382
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_XPRS_OPTO 0x0392
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP 0x03A0
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232 0x03A1
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_485 0x03A2
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232_NS 0x03A3
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XEG001 0x0602
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_BASE 0x1000
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_2 0x1002
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_4 0x1004
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_8 0x1008
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_12 0x100C
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_16 0x1010
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG00X 0x110c
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG01X 0x110d
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_16 0x1110
++
+ #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
+ #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
++#define PCI_DEVICE_ID_EXAR_XR17V252 0x0252
++#define PCI_DEVICE_ID_EXAR_XR17V254 0x0254
++#define PCI_DEVICE_ID_EXAR_XR17V258 0x0258
+
+ #define PCI_SUBDEVICE_ID_USR_2980 0x0128
+ #define PCI_SUBDEVICE_ID_USR_2981 0x0129
+@@ -446,7 +488,7 @@ static int generic_rs485_config(struct uart_port *port, struct ktermios *termios
+ }
+
+ static const struct serial_rs485 generic_rs485_supported = {
+- .flags = SER_RS485_ENABLED,
++ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND,
+ };
+
+ static const struct exar8250_platform exar8250_default_platform = {
+@@ -490,7 +532,8 @@ static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios
+ }
+
+ static const struct serial_rs485 iot2040_rs485_supported = {
+- .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
++ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
++ SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
+ };
+
+ static const struct property_entry iot2040_gpio_properties[] = {
+@@ -717,6 +760,7 @@ static void exar_pci_remove(struct pci_dev *pcidev)
+ for (i = 0; i < priv->nr; i++)
+ serial8250_unregister_port(priv->line[i]);
+
++ /* Ensure that every init quirk is properly torn down */
+ if (priv->board->exit)
+ priv->board->exit(pcidev);
+ }
+@@ -731,10 +775,6 @@ static int __maybe_unused exar_suspend(struct device *dev)
+ if (priv->line[i] >= 0)
+ serial8250_suspend_port(priv->line[i]);
+
+- /* Ensure that every init quirk is properly torn down */
+- if (priv->board->exit)
+- priv->board->exit(pcidev);
+-
+ return 0;
+ }
+
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index 74da5676ce67dc..28f9a2679a20e8 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -209,15 +209,19 @@ static int mtk8250_startup(struct uart_port *port)
+
+ static void mtk8250_shutdown(struct uart_port *port)
+ {
+-#ifdef CONFIG_SERIAL_8250_DMA
+ struct uart_8250_port *up = up_to_u8250p(port);
+ struct mtk8250_data *data = port->private_data;
++ int irq = data->rx_wakeup_irq;
+
++#ifdef CONFIG_SERIAL_8250_DMA
+ if (up->dma)
+ data->rx_status = DMA_RX_SHUTDOWN;
+ #endif
+
+- return serial8250_do_shutdown(port);
++ serial8250_do_shutdown(port);
++
++ if (irq >= 0)
++ serial8250_do_set_mctrl(&up->port, TIOCM_RTS);
+ }
+
+ static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index ca972fd377256c..4caecc3525bfd1 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -165,6 +165,10 @@ static u32 uart_read(struct omap8250_priv *priv, u32 reg)
+ return readl(priv->membase + (reg << OMAP_UART_REGSHIFT));
+ }
+
++/* Timeout low and High */
++#define UART_OMAP_TO_L 0x26
++#define UART_OMAP_TO_H 0x27
++
+ /*
+ * Called on runtime PM resume path from omap8250_restore_regs(), and
+ * omap8250_set_mctrl().
+@@ -646,13 +650,25 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+
+ /*
+ * On K3 SoCs, it is observed that RX TIMEOUT is signalled after
+- * FIFO has been drained, in which case a dummy read of RX FIFO
+- * is required to clear RX TIMEOUT condition.
++ * FIFO has been drained or erroneously.
++ * So apply solution of Errata i2310 as mentioned in
++ * https://www.ti.com/lit/pdf/sprz536
+ */
+ if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
+ (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
+ serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
+- serial_port_in(port, UART_RX);
++ unsigned char efr2, timeout_h, timeout_l;
++
++ efr2 = serial_in(up, UART_OMAP_EFR2);
++ timeout_h = serial_in(up, UART_OMAP_TO_H);
++ timeout_l = serial_in(up, UART_OMAP_TO_L);
++ serial_out(up, UART_OMAP_TO_H, 0xFF);
++ serial_out(up, UART_OMAP_TO_L, 0xFF);
++ serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE);
++ serial_in(up, UART_IIR);
++ serial_out(up, UART_OMAP_EFR2, efr2);
++ serial_out(up, UART_OMAP_TO_H, timeout_h);
++ serial_out(up, UART_OMAP_TO_L, timeout_l);
+ }
+
+ /* Stop processing interrupts on input overrun */
+@@ -914,7 +930,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
+ if (priv->habit & UART_HAS_RHR_IT_DIS) {
+ reg = serial_in(p, UART_OMAP_IER2);
+ reg &= ~UART_OMAP_IER2_RHR_IT_DIS;
+- serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
++ serial_out(p, UART_OMAP_IER2, reg);
+ }
+
+ dmaengine_tx_status(rxchan, cookie, &state);
+@@ -1060,7 +1076,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
+ if (priv->habit & UART_HAS_RHR_IT_DIS) {
+ reg = serial_in(p, UART_OMAP_IER2);
+ reg |= UART_OMAP_IER2_RHR_IT_DIS;
+- serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
++ serial_out(p, UART_OMAP_IER2, reg);
+ }
+
+ dma_async_issue_pending(dma->rxchan);
+@@ -1282,10 +1298,12 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
+
+ status = serial_port_in(port, UART_LSR);
+
+- if (priv->habit & UART_HAS_EFR2)
+- am654_8250_handle_rx_dma(up, iir, status);
+- else
+- status = omap_8250_handle_rx_dma(up, iir, status);
++ if ((iir & 0x3f) != UART_IIR_THRI) {
++ if (priv->habit & UART_HAS_EFR2)
++ am654_8250_handle_rx_dma(up, iir, status);
++ else
++ status = omap_8250_handle_rx_dma(up, iir, status);
++ }
+
+ serial8250_modem_status(up);
+ if (status & UART_LSR_THRE && up->dma->tx_err) {
+@@ -1549,7 +1567,7 @@ static int omap8250_probe(struct platform_device *pdev)
+ ret = devm_request_irq(&pdev->dev, irq, omap8250_irq, 0,
+ dev_name(&pdev->dev), priv);
+ if (ret < 0)
+- return ret;
++ goto err;
+
+ priv->wakeirq = irq_of_parse_and_map(np, 1);
+
+@@ -1579,7 +1597,7 @@ static int omap8250_remove(struct platform_device *pdev)
+
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+- return err;
++ dev_err(&pdev->dev, "Failed to resume hardware\n");
+
+ up = serial8250_get_port(priv->line);
+ omap_8250_shutdown(&up->port);
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 62a9bd30b4db5c..bbd7914ddc9adc 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2429,6 +2429,153 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
++ /*
++ * Brainboxes devices - all Oxsemi based
++ */
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4027,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4028,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4029,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4019,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4016,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4015,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400A,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400E,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400C,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400B,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x400F,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4010,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4011,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x401D,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x401E,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4013,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4017,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTASHIELD,
++ .device = 0x4018,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_oxsemi_tornado_init,
++ .setup = pci_oxsemi_tornado_setup,
++ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8811,
+@@ -4913,6 +5060,12 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 0, 0,
+ pbn_b1_bt_1_115200 },
+
++ /*
++ * IntaShield IS-100
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0D60,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_b2_1_115200 },
+ /*
+ * IntaShield IS-200
+ */
+@@ -4925,6 +5078,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
+ pbn_b2_4_115200 },
++ /*
++ * IntaShield IX-100
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4027,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_1_15625000 },
++ /*
++ * IntaShield IX-200
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4028,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_2_15625000 },
++ /*
++ * IntaShield IX-400
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4029,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_4_15625000 },
+ /* Brainboxes Devices */
+ /*
+ * Brainboxes UC-101
+@@ -4940,10 +5114,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_1_115200 },
+ /*
+- * Brainboxes UC-257
++ * Brainboxes UC-253/UC-734
+ */
+- { PCI_VENDOR_ID_INTASHIELD, 0x0861,
++ { PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+@@ -4979,6 +5157,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x08E2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x08E3,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-310
+ */
+@@ -4989,6 +5175,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ /*
+ * Brainboxes UC-313
+ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x08A1,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x08A2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+@@ -5003,6 +5197,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ /*
+ * Brainboxes UC-346
+ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B01,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+@@ -5014,6 +5212,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0A82,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+@@ -5026,12 +5228,94 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+- * Brainboxes UC-420/431
++ * Brainboxes UC-420
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
++ /*
++ * Brainboxes UC-607
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x09A1,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x09A2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x09A3,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ /*
++ * Brainboxes UC-836
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0D41,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_4_115200 },
++ /*
++ * Brainboxes UP-189
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ /*
++ * Brainboxes UP-200
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B21,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B22,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0B23,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ /*
++ * Brainboxes UP-869
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C01,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C02,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C03,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ /*
++ * Brainboxes UP-880
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C21,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C22,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0C23,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_2_115200 },
+ /*
+ * Brainboxes PX-101
+ */
+@@ -5064,7 +5348,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_INTASHIELD, 0x4015,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+- pbn_oxsemi_4_15625000 },
++ pbn_oxsemi_2_15625000 },
+ /*
+ * Brainboxes PX-260/PX-701
+ */
+@@ -5072,6 +5356,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
++ /*
++ * Brainboxes PX-275/279
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0E41,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b2_8_115200 },
+ /*
+ * Brainboxes PX-310
+ */
+@@ -5119,16 +5410,38 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 0, 0,
+ pbn_oxsemi_4_15625000 },
+ /*
+- * Brainboxes PX-803
++ * Brainboxes PX-475
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x401D,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_1_15625000 },
++ /*
++ * Brainboxes PX-803/PX-857
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4009,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+- pbn_b0_1_115200 },
++ pbn_b0_2_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x4018,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_2_15625000 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x401E,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+- pbn_oxsemi_1_15625000 },
++ pbn_oxsemi_2_15625000 },
++ /*
++ * Brainboxes PX-820
++ */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4002,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_b0_4_115200 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x4013,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0,
++ pbn_oxsemi_4_15625000 },
+ /*
+ * Brainboxes PX-846
+ */
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 141627370aabc3..a17803da83f8cd 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1337,9 +1337,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
+ inb_p(ICP);
+ }
+
+- if (uart_console(port))
+- console_lock();
+-
+ /* forget possible initially masked and pending IRQ */
+ probe_irq_off(probe_irq_on());
+ save_mcr = serial8250_in_MCR(up);
+@@ -1379,9 +1376,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
+ if (port->flags & UPF_FOURPORT)
+ outb_p(save_ICP, ICP);
+
+- if (uart_console(port))
+- console_unlock();
+-
+ port->irq = (irq > 0) ? irq : 0;
+ }
+
+diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
+index a5b3ea27fc9020..2cbaf68d28119d 100644
+--- a/drivers/tty/serial/8250/8250_pxa.c
++++ b/drivers/tty/serial/8250/8250_pxa.c
+@@ -124,6 +124,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
+ uart.port.regshift = 2;
+ uart.port.irq = irq;
+ uart.port.fifosize = 64;
++ uart.tx_loadsz = 32;
+ uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST | UPF_FIXED_TYPE;
+ uart.port.dev = &pdev->dev;
+ uart.port.uartclk = clk_get_rate(data->clk);
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 3dc9b0fcab1c64..362bbcdece0d76 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -218,17 +218,18 @@ static struct vendor_data vendor_st = {
+
+ /* Deals with DMA transactions */
+
+-struct pl011_sgbuf {
+- struct scatterlist sg;
+- char *buf;
++struct pl011_dmabuf {
++ dma_addr_t dma;
++ size_t len;
++ char *buf;
+ };
+
+ struct pl011_dmarx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ bool use_buf_b;
+- struct pl011_sgbuf sgbuf_a;
+- struct pl011_sgbuf sgbuf_b;
++ struct pl011_dmabuf dbuf_a;
++ struct pl011_dmabuf dbuf_b;
+ dma_cookie_t cookie;
+ bool running;
+ struct timer_list timer;
+@@ -241,7 +242,8 @@ struct pl011_dmarx_data {
+
+ struct pl011_dmatx_data {
+ struct dma_chan *chan;
+- struct scatterlist sg;
++ dma_addr_t dma;
++ size_t len;
+ char *buf;
+ bool queued;
+ };
+@@ -366,32 +368,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+
+ #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
+
+-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
++static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
+ enum dma_data_direction dir)
+ {
+- dma_addr_t dma_addr;
+-
+- sg->buf = dma_alloc_coherent(chan->device->dev,
+- PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
+- if (!sg->buf)
++ db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
++ &db->dma, GFP_KERNEL);
++ if (!db->buf)
+ return -ENOMEM;
+-
+- sg_init_table(&sg->sg, 1);
+- sg_set_page(&sg->sg, phys_to_page(dma_addr),
+- PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
+- sg_dma_address(&sg->sg) = dma_addr;
+- sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
++ db->len = PL011_DMA_BUFFER_SIZE;
+
+ return 0;
+ }
+
+-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
++static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
+ enum dma_data_direction dir)
+ {
+- if (sg->buf) {
++ if (db->buf) {
+ dma_free_coherent(chan->device->dev,
+- PL011_DMA_BUFFER_SIZE, sg->buf,
+- sg_dma_address(&sg->sg));
++ PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
+ }
+ }
+
+@@ -552,8 +546,8 @@ static void pl011_dma_tx_callback(void *data)
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+ if (uap->dmatx.queued)
+- dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
+- DMA_TO_DEVICE);
++ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
++ dmatx->len, DMA_TO_DEVICE);
+
+ dmacr = uap->dmacr;
+ uap->dmacr = dmacr & ~UART011_TXDMAE;
+@@ -639,18 +633,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
+ memcpy(&dmatx->buf[first], &xmit->buf[0], second);
+ }
+
+- dmatx->sg.length = count;
+-
+- if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
++ dmatx->len = count;
++ dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
++ DMA_TO_DEVICE);
++ if (dmatx->dma == DMA_MAPPING_ERROR) {
+ uap->dmatx.queued = false;
+ dev_dbg(uap->port.dev, "unable to map TX DMA\n");
+ return -EBUSY;
+ }
+
+- desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
++ desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+- dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
++ dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ /*
+ * If DMA cannot be used right now, we complete this
+@@ -813,8 +808,8 @@ __acquires(&uap->port.lock)
+ dmaengine_terminate_async(uap->dmatx.chan);
+
+ if (uap->dmatx.queued) {
+- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+- DMA_TO_DEVICE);
++ dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
++ uap->dmatx.len, DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ uap->dmacr &= ~UART011_TXDMAE;
+ pl011_write(uap->dmacr, uap, REG_DMACR);
+@@ -828,15 +823,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+ struct dma_chan *rxchan = uap->dmarx.chan;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_async_tx_descriptor *desc;
+- struct pl011_sgbuf *sgbuf;
++ struct pl011_dmabuf *dbuf;
+
+ if (!rxchan)
+ return -EIO;
+
+ /* Start the RX DMA job */
+- sgbuf = uap->dmarx.use_buf_b ?
+- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+- desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
++ dbuf = uap->dmarx.use_buf_b ?
++ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
++ desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ /*
+@@ -876,8 +871,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ bool readfifo)
+ {
+ struct tty_port *port = &uap->port.state->port;
+- struct pl011_sgbuf *sgbuf = use_buf_b ?
+- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
++ struct pl011_dmabuf *dbuf = use_buf_b ?
++ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ int dma_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+
+@@ -886,7 +881,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+
+ if (uap->dmarx.poll_rate) {
+ /* The data can be taken by polling */
+- dmataken = sgbuf->sg.length - dmarx->last_residue;
++ dmataken = dbuf->len - dmarx->last_residue;
+ /* Recalculate the pending size */
+ if (pending >= dmataken)
+ pending -= dmataken;
+@@ -900,7 +895,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ * Note that tty_insert_flip_buf() tries to take as many chars
+ * as it can.
+ */
+- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
++ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+ pending);
+
+ uap->port.icount.rx += dma_count;
+@@ -911,7 +906,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+
+ /* Reset the last_residue for Rx DMA poll */
+ if (uap->dmarx.poll_rate)
+- dmarx->last_residue = sgbuf->sg.length;
++ dmarx->last_residue = dbuf->len;
+
+ /*
+ * Only continue with trying to read the FIFO if all DMA chars have
+@@ -946,8 +941,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+ {
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
++ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
++ &dmarx->dbuf_b : &dmarx->dbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+@@ -969,7 +964,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+ pl011_write(uap->dmacr, uap, REG_DMACR);
+ uap->dmarx.running = false;
+
+- pending = sgbuf->sg.length - state.residue;
++ pending = dbuf->len - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+@@ -996,8 +991,8 @@ static void pl011_dma_rx_callback(void *data)
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ bool lastbuf = dmarx->use_buf_b;
+- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
++ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
++ &dmarx->dbuf_b : &dmarx->dbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ int ret;
+@@ -1015,7 +1010,7 @@ static void pl011_dma_rx_callback(void *data)
+ * the DMA irq handler. So we check the residue here.
+ */
+ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+- pending = sgbuf->sg.length - state.residue;
++ pending = dbuf->len - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+@@ -1067,16 +1062,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
+ unsigned long flags;
+ unsigned int dmataken = 0;
+ unsigned int size = 0;
+- struct pl011_sgbuf *sgbuf;
++ struct pl011_dmabuf *dbuf;
+ int dma_count;
+ struct dma_tx_state state;
+
+- sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
++ dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ if (likely(state.residue < dmarx->last_residue)) {
+- dmataken = sgbuf->sg.length - dmarx->last_residue;
++ dmataken = dbuf->len - dmarx->last_residue;
+ size = dmarx->last_residue - state.residue;
+- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
++ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+ size);
+ if (dma_count == size)
+ dmarx->last_residue = state.residue;
+@@ -1123,7 +1118,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ return;
+ }
+
+- sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
++ uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
+
+ /* The DMA buffer is now the FIFO the TTY subsystem can use */
+ uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
+@@ -1133,7 +1128,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ goto skip_rx;
+
+ /* Allocate and map DMA RX buffers */
+- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
++ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+@@ -1141,12 +1136,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ goto skip_rx;
+ }
+
+- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
++ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer B", ret);
+- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+ DMA_FROM_DEVICE);
+ goto skip_rx;
+ }
+@@ -1200,8 +1195,9 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
+ /* In theory, this should already be done by pl011_dma_flush_buffer */
+ dmaengine_terminate_all(uap->dmatx.chan);
+ if (uap->dmatx.queued) {
+- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+- DMA_TO_DEVICE);
++ dma_unmap_single(uap->dmatx.chan->device->dev,
++ uap->dmatx.dma, uap->dmatx.len,
++ DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ }
+
+@@ -1212,8 +1208,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
+ if (uap->using_rx_dma) {
+ dmaengine_terminate_all(uap->dmarx.chan);
+ /* Clean up the RX DMA */
+- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
+ if (uap->dmarx.poll_rate)
+ del_timer_sync(&uap->dmarx.timer);
+ uap->using_rx_dma = false;
+@@ -1349,11 +1345,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
+ }
+ }
+
++static void pl011_rs485_tx_start(struct uart_amba_port *uap)
++{
++ struct uart_port *port = &uap->port;
++ u32 cr;
++
++ /* Enable transmitter */
++ cr = pl011_read(uap, REG_CR);
++ cr |= UART011_CR_TXE;
++
++ /* Disable receiver if half-duplex */
++ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
++ cr &= ~UART011_CR_RXE;
++
++ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
++ cr &= ~UART011_CR_RTS;
++ else
++ cr |= UART011_CR_RTS;
++
++ pl011_write(cr, uap, REG_CR);
++
++ if (port->rs485.delay_rts_before_send)
++ mdelay(port->rs485.delay_rts_before_send);
++
++ uap->rs485_tx_started = true;
++}
++
+ static void pl011_start_tx(struct uart_port *port)
+ {
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
+
++ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
++ !uap->rs485_tx_started)
++ pl011_rs485_tx_start(uap);
++
+ if (!pl011_dma_tx_start(uap))
+ pl011_start_tx_pio(uap);
+ }
+@@ -1435,42 +1461,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
+ return true;
+ }
+
+-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+-{
+- struct uart_port *port = &uap->port;
+- u32 cr;
+-
+- /* Enable transmitter */
+- cr = pl011_read(uap, REG_CR);
+- cr |= UART011_CR_TXE;
+-
+- /* Disable receiver if half-duplex */
+- if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+- cr &= ~UART011_CR_RXE;
+-
+- if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+- cr &= ~UART011_CR_RTS;
+- else
+- cr |= UART011_CR_RTS;
+-
+- pl011_write(cr, uap, REG_CR);
+-
+- if (port->rs485.delay_rts_before_send)
+- mdelay(port->rs485.delay_rts_before_send);
+-
+- uap->rs485_tx_started = true;
+-}
+-
+ /* Returns true if tx interrupts have to be (kept) enabled */
+ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
+ {
+ struct circ_buf *xmit = &uap->port.state->xmit;
+ int count = uap->fifosize >> 1;
+
+- if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+- !uap->rs485_tx_started)
+- pl011_rs485_tx_start(uap);
+-
+ if (uap->port.x_char) {
+ if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
+ return true;
+diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
+index d7658f38083801..d3cb341f2c5536 100644
+--- a/drivers/tty/serial/apbuart.c
++++ b/drivers/tty/serial/apbuart.c
+@@ -122,7 +122,7 @@ static void apbuart_tx_chars(struct uart_port *port)
+ {
+ u8 ch;
+
+- uart_port_tx_limited(port, ch, port->fifosize >> 1,
++ uart_port_tx_limited(port, ch, port->fifosize,
+ true,
+ UART_PUT_CHAR(port, ch),
+ ({}));
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 88cdafa5ac541d..bcca5627afaca8 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2522,7 +2522,7 @@ static const struct uart_ops atmel_pops = {
+ };
+
+ static const struct serial_rs485 atmel_rs485_supported = {
+- .flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX,
++ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+ };
+diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
+index 0dd8cceb837cc6..44c27e5cefbc97 100644
+--- a/drivers/tty/serial/bcm63xx_uart.c
++++ b/drivers/tty/serial/bcm63xx_uart.c
+@@ -309,8 +309,8 @@ static void bcm_uart_do_tx(struct uart_port *port)
+
+ val = bcm_uart_readl(port, UART_MCTL_REG);
+ val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
+-
+- pending = uart_port_tx_limited(port, ch, port->fifosize - val,
++ pending = uart_port_tx_limited_flags(port, ch, UART_TX_NOSTOP,
++ port->fifosize - val,
+ true,
+ bcm_uart_writel(port, ch, UART_FIFO_REG),
+ ({}));
+@@ -321,6 +321,9 @@ static void bcm_uart_do_tx(struct uart_port *port)
+ val = bcm_uart_readl(port, UART_IR_REG);
+ val &= ~UART_TX_INT_MASK;
+ bcm_uart_writel(port, val, UART_IR_REG);
++
++ if (uart_tx_stopped(port))
++ bcm_uart_stop_tx(port);
+ }
+
+ /*
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index f72e1340b47d4e..8bd0f8e45b146f 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2345,9 +2345,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+
+ lpuart32_write(&sport->port, bd, UARTBAUD);
+ lpuart32_serial_setbrg(sport, baud);
+- lpuart32_write(&sport->port, modem, UARTMODIR);
+- lpuart32_write(&sport->port, ctrl, UARTCTRL);
++ /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
++ lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
+ /* restore control register */
++ lpuart32_write(&sport->port, ctrl, UARTCTRL);
++ /* re-enable the CTS if needed */
++ lpuart32_write(&sport->port, modem, UARTMODIR);
+
+ if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
+ sport->is_cs7 = true;
+@@ -2927,6 +2930,7 @@ static int lpuart_probe(struct platform_device *pdev)
+ pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
++ pm_runtime_mark_last_busy(&pdev->dev);
+
+ ret = lpuart_global_reset(sport);
+ if (ret)
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 13cb78340709a9..349d4849ba5e3b 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -26,6 +26,7 @@
+ #include <linux/slab.h>
+ #include <linux/of.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/dma-mapping.h>
+
+ #include <asm/irq.h>
+@@ -118,6 +119,7 @@
+ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
+ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
+ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
++#define UFCR_RXTL_MASK 0x3F /* Receiver trigger 6 bits wide */
+ #define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
+ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
+ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
+@@ -421,13 +423,13 @@ static void imx_uart_stop_tx(struct uart_port *port)
+ ucr1 = imx_uart_readl(sport, UCR1);
+ imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1);
+
++ ucr4 = imx_uart_readl(sport, UCR4);
+ usr2 = imx_uart_readl(sport, USR2);
+- if (!(usr2 & USR2_TXDC)) {
++ if ((!(usr2 & USR2_TXDC)) && (ucr4 & UCR4_TCEN)) {
+ /* The shifter is still busy, so retry once TC triggers */
+ return;
+ }
+
+- ucr4 = imx_uart_readl(sport, UCR4);
+ ucr4 &= ~UCR4_TCEN;
+ imx_uart_writel(sport, ucr4, UCR4);
+
+@@ -468,8 +470,7 @@ static void imx_uart_stop_tx(struct uart_port *port)
+ }
+ }
+
+-/* called with port.lock taken and irqs off */
+-static void imx_uart_stop_rx(struct uart_port *port)
++static void imx_uart_stop_rx_with_loopback_ctrl(struct uart_port *port, bool loopback)
+ {
+ struct imx_port *sport = (struct imx_port *)port;
+ u32 ucr1, ucr2, ucr4, uts;
+@@ -491,7 +492,7 @@ static void imx_uart_stop_rx(struct uart_port *port)
+ /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ port->rs485.flags & SER_RS485_RTS_ON_SEND &&
+- sport->have_rtscts && !sport->have_rtsgpio) {
++ sport->have_rtscts && !sport->have_rtsgpio && loopback) {
+ uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
+ uts |= UTS_LOOP;
+ imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
+@@ -503,6 +504,16 @@ static void imx_uart_stop_rx(struct uart_port *port)
+ imx_uart_writel(sport, ucr2, UCR2);
+ }
+
++/* called with port.lock taken and irqs off */
++static void imx_uart_stop_rx(struct uart_port *port)
++{
++ /*
++ * Stop RX and enable loopback in order to make sure RS485 bus
++ * is not blocked. Se comment in imx_uart_probe().
++ */
++ imx_uart_stop_rx_with_loopback_ctrl(port, true);
++}
++
+ /* called with port.lock taken and irqs off */
+ static void imx_uart_enable_ms(struct uart_port *port)
+ {
+@@ -688,9 +699,14 @@ static void imx_uart_start_tx(struct uart_port *port)
+ imx_uart_rts_inactive(sport, &ucr2);
+ imx_uart_writel(sport, ucr2, UCR2);
+
++ /*
++ * Since we are about to transmit we can not stop RX
++ * with loopback enabled because that will make our
++ * transmitted data being just looped to RX.
++ */
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX) &&
+ !port->rs485_rx_during_tx_gpio)
+- imx_uart_stop_rx(port);
++ imx_uart_stop_rx_with_loopback_ctrl(port, false);
+
+ sport->tx_state = WAIT_AFTER_RTS;
+
+@@ -755,6 +771,21 @@ static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id)
+
+ imx_uart_writel(sport, USR1_RTSD, USR1);
+ usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
++ /*
++ * Update sport->old_status here, so any follow-up calls to
++ * imx_uart_mctrl_check() will be able to recognize that RTS
++ * state changed since last imx_uart_mctrl_check() call.
++ *
++ * In case RTS has been detected as asserted here and later on
++ * deasserted by the time imx_uart_mctrl_check() was called,
++ * imx_uart_mctrl_check() can detect the RTS state change and
++ * trigger uart_handle_cts_change() to unblock the port for
++ * further TX transfers.
++ */
++ if (usr1 & USR1_RTSS)
++ sport->old_status |= TIOCM_CTS;
++ else
++ sport->old_status &= ~TIOCM_CTS;
+ uart_handle_cts_change(&sport->port, usr1);
+ wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
+
+@@ -1304,7 +1335,7 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport)
+
+ }
+
+-#define TXTL_DEFAULT 2 /* reset default */
++#define TXTL_DEFAULT 8
+ #define RXTL_DEFAULT 8 /* 8 characters or aging timer */
+ #define TXTL_DMA 8 /* DMA burst setting */
+ #define RXTL_DMA 9 /* DMA burst setting */
+@@ -1925,7 +1956,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
+ struct serial_rs485 *rs485conf)
+ {
+ struct imx_port *sport = (struct imx_port *)port;
+- u32 ucr2;
++ u32 ucr2, ufcr;
+
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ /* Enable receiver if low-active RTS signal is requested */
+@@ -1944,12 +1975,13 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
+
+ /* Make sure Rx is enabled in case Tx is active with Rx disabled */
+ if (!(rs485conf->flags & SER_RS485_ENABLED) ||
+- rs485conf->flags & SER_RS485_RX_DURING_TX)
++ rs485conf->flags & SER_RS485_RX_DURING_TX) {
++ /* If the receiver trigger is 0, set it to a default value */
++ ufcr = imx_uart_readl(sport, UFCR);
++ if ((ufcr & UFCR_RXTL_MASK) == 0)
++ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ imx_uart_start_rx(port);
+-
+- if (port->rs485_rx_during_tx_gpio)
+- gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
+- !!(rs485conf->flags & SER_RS485_RX_DURING_TX));
++ }
+
+ return 0;
+ }
+@@ -1999,7 +2031,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+ struct imx_port *sport = imx_uart_ports[co->index];
+ struct imx_port_ucrs old_ucr;
+ unsigned long flags;
+- unsigned int ucr1;
++ unsigned int ucr1, usr2;
+ int locked = 1;
+
+ if (sport->port.sysrq)
+@@ -2030,8 +2062,8 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+ * Finally, wait for transmitter to become empty
+ * and restore UCR1/2/3
+ */
+- while (!(imx_uart_readl(sport, USR2) & USR2_TXDC));
+-
++ read_poll_timeout_atomic(imx_uart_readl, usr2, usr2 & USR2_TXDC,
++ 0, USEC_PER_SEC, false, sport, USR2);
+ imx_uart_ucrs_restore(sport, &old_ucr);
+
+ if (locked)
+@@ -2214,7 +2246,6 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
+ return HRTIMER_NORESTART;
+ }
+
+-static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */
+ static const struct serial_rs485 imx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+ SER_RS485_RX_DURING_TX,
+@@ -2298,8 +2329,6 @@ static int imx_uart_probe(struct platform_device *pdev)
+ /* RTS is required to control the RS485 transmitter */
+ if (sport->have_rtscts || sport->have_rtsgpio)
+ sport->port.rs485_supported = imx_rs485_supported;
+- else
+- sport->port.rs485_supported = imx_no_rs485;
+ sport->port.flags = UPF_BOOT_AUTOCONF;
+ timer_setup(&sport->timer, imx_uart_timeout, 0);
+
+@@ -2326,19 +2355,13 @@ static int imx_uart_probe(struct platform_device *pdev)
+ /* For register access, we only need to enable the ipg clock. */
+ ret = clk_prepare_enable(sport->clk_ipg);
+ if (ret) {
+- dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
++ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = uart_get_rs485_mode(&sport->port);
+- if (ret) {
+- clk_disable_unprepare(sport->clk_ipg);
+- return ret;
+- }
+-
+- if (sport->port.rs485.flags & SER_RS485_ENABLED &&
+- (!sport->have_rtscts && !sport->have_rtsgpio))
+- dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
++ if (ret)
++ goto err_clk;
+
+ /*
+ * If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
+@@ -2418,8 +2441,6 @@ static int imx_uart_probe(struct platform_device *pdev)
+ imx_uart_writel(sport, ucr3, UCR3);
+ }
+
+- clk_disable_unprepare(sport->clk_ipg);
+-
+ hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ sport->trigger_start_tx.function = imx_trigger_start_tx;
+@@ -2435,7 +2456,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rx irq: %d\n",
+ ret);
+- return ret;
++ goto err_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0,
+@@ -2443,7 +2464,7 @@ static int imx_uart_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request tx irq: %d\n",
+ ret);
+- return ret;
++ goto err_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
+@@ -2451,14 +2472,14 @@ static int imx_uart_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ ret);
+- return ret;
++ goto err_clk;
+ }
+ } else {
+ ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+- return ret;
++ goto err_clk;
+ }
+ }
+
+@@ -2466,7 +2487,12 @@ static int imx_uart_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, sport);
+
+- return uart_add_one_port(&imx_uart_uart_driver, &sport->port);
++ ret = uart_add_one_port(&imx_uart_uart_driver, &sport->port);
++
++err_clk:
++ clk_disable_unprepare(sport->clk_ipg);
++
++ return ret;
+ }
+
+ static int imx_uart_remove(struct platform_device *pdev)
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index 7ce7bb1640054d..58ea1e1391ceef 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -19,6 +19,7 @@
+ #include <linux/console.h>
+ #include <linux/vt_kern.h>
+ #include <linux/input.h>
++#include <linux/irq_work.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/serial_core.h>
+@@ -48,6 +49,25 @@ static struct kgdb_io kgdboc_earlycon_io_ops;
+ static int (*earlycon_orig_exit)(struct console *con);
+ #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
+
++/*
++ * When we leave the debug trap handler we need to reset the keyboard status
++ * (since the original keyboard state gets partially clobbered by kdb use of
++ * the keyboard).
++ *
++ * The path to deliver the reset is somewhat circuitous.
++ *
++ * To deliver the reset we register an input handler, reset the keyboard and
++ * then deregister the input handler. However, to get this done right, we do
++ * have to carefully manage the calling context because we can only register
++ * input handlers from task context.
++ *
++ * In particular we need to trigger the action from the debug trap handler with
++ * all its NMI and/or NMI-like oddities. To solve this the kgdboc trap exit code
++ * (the "post_exception" callback) uses irq_work_queue(), which is NMI-safe, to
++ * schedule a callback from a hardirq context. From there we have to defer the
++ * work again, this time using schedule_work(), to get a callback using the
++ * system workqueue, which runs in task context.
++ */
+ #ifdef CONFIG_KDB_KEYBOARD
+ static int kgdboc_reset_connect(struct input_handler *handler,
+ struct input_dev *dev,
+@@ -99,10 +119,17 @@ static void kgdboc_restore_input_helper(struct work_struct *dummy)
+
+ static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+
++static void kgdboc_queue_restore_input_helper(struct irq_work *unused)
++{
++ schedule_work(&kgdboc_restore_input_work);
++}
++
++static DEFINE_IRQ_WORK(kgdboc_restore_input_irq_work, kgdboc_queue_restore_input_helper);
++
+ static void kgdboc_restore_input(void)
+ {
+ if (likely(system_state == SYSTEM_RUNNING))
+- schedule_work(&kgdboc_restore_input_work);
++ irq_work_queue(&kgdboc_restore_input_irq_work);
+ }
+
+ static int kgdboc_register_kbd(char **cptr)
+@@ -133,6 +160,7 @@ static void kgdboc_unregister_kbd(void)
+ i--;
+ }
+ }
++ irq_work_sync(&kgdboc_restore_input_irq_work);
+ flush_work(&kgdboc_restore_input_work);
+ }
+ #else /* ! CONFIG_KDB_KEYBOARD */
+diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
+index 465b1def9e119b..99225f1e02ac8f 100644
+--- a/drivers/tty/serial/ma35d1_serial.c
++++ b/drivers/tty/serial/ma35d1_serial.c
+@@ -552,11 +552,19 @@ static void ma35d1serial_console_putchar(struct uart_port *port, unsigned char c
+ */
+ static void ma35d1serial_console_write(struct console *co, const char *s, u32 count)
+ {
+- struct uart_ma35d1_port *up = &ma35d1serial_ports[co->index];
++ struct uart_ma35d1_port *up;
+ unsigned long flags;
+ int locked = 1;
+ u32 ier;
+
++ if ((co->index < 0) || (co->index >= MA35_UART_NR)) {
++ pr_warn("Failed to write on ononsole port %x, out of range\n",
++ co->index);
++ return;
++ }
++
++ up = &ma35d1serial_ports[co->index];
++
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+@@ -680,12 +688,13 @@ static int ma35d1serial_probe(struct platform_device *pdev)
+ struct uart_ma35d1_port *up;
+ int ret = 0;
+
+- if (pdev->dev.of_node) {
+- ret = of_alias_get_id(pdev->dev.of_node, "serial");
+- if (ret < 0) {
+- dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret);
+- return ret;
+- }
++ if (!pdev->dev.of_node)
++ return -ENODEV;
++
++ ret = of_alias_get_id(pdev->dev.of_node, "serial");
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret);
++ return ret;
+ }
+ up = &ma35d1serial_ports[ret];
+ up->port.line = ret;
+diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
+index 5efb2b593be333..3d2b83d6ab51a6 100644
+--- a/drivers/tty/serial/max3100.c
++++ b/drivers/tty/serial/max3100.c
+@@ -45,6 +45,9 @@
+ #include <linux/freezer.h>
+ #include <linux/tty.h>
+ #include <linux/tty_flip.h>
++#include <linux/types.h>
++
++#include <asm/unaligned.h>
+
+ #include <linux/serial_max3100.h>
+
+@@ -191,7 +194,7 @@ static void max3100_timeout(struct timer_list *t)
+ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
+ {
+ struct spi_message message;
+- u16 etx, erx;
++ __be16 etx, erx;
+ int status;
+ struct spi_transfer tran = {
+ .tx_buf = &etx,
+@@ -213,7 +216,7 @@ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
+ return 0;
+ }
+
+-static int max3100_handlerx(struct max3100_port *s, u16 rx)
++static int max3100_handlerx_unlocked(struct max3100_port *s, u16 rx)
+ {
+ unsigned int status = 0;
+ int ret = 0, cts;
+@@ -254,6 +257,17 @@ static int max3100_handlerx(struct max3100_port *s, u16 rx)
+ return ret;
+ }
+
++static int max3100_handlerx(struct max3100_port *s, u16 rx)
++{
++ unsigned long flags;
++ int ret;
++
++ uart_port_lock_irqsave(&s->port, &flags);
++ ret = max3100_handlerx_unlocked(s, rx);
++ uart_port_unlock_irqrestore(&s->port, flags);
++ return ret;
++}
++
+ static void max3100_work(struct work_struct *w)
+ {
+ struct max3100_port *s = container_of(w, struct max3100_port, work);
+@@ -738,13 +752,14 @@ static int max3100_probe(struct spi_device *spi)
+ mutex_lock(&max3100s_lock);
+
+ if (!uart_driver_registered) {
+- uart_driver_registered = 1;
+ retval = uart_register_driver(&max3100_uart_driver);
+ if (retval) {
+ printk(KERN_ERR "Couldn't register max3100 uart driver\n");
+ mutex_unlock(&max3100s_lock);
+ return retval;
+ }
++
++ uart_driver_registered = 1;
+ }
+
+ for (i = 0; i < MAX_MAX3100; i++)
+@@ -830,6 +845,7 @@ static void max3100_remove(struct spi_device *spi)
+ }
+ pr_debug("removing max3100 driver\n");
+ uart_unregister_driver(&max3100_uart_driver);
++ uart_driver_registered = 0;
+
+ mutex_unlock(&max3100s_lock);
+ }
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index db3204d2a30533..e339abff926d32 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -237,6 +237,14 @@
+ #define MAX310x_REV_MASK (0xf8)
+ #define MAX310X_WRITE_BIT 0x80
+
++/* Port startup definitions */
++#define MAX310X_PORT_STARTUP_WAIT_RETRIES 20 /* Number of retries */
++#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS 10 /* Delay between retries */
++
++/* Crystal-related definitions */
++#define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */
++#define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */
++
+ /* MAX3107 specific */
+ #define MAX3107_REV_ID (0xa0)
+
+@@ -583,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
+ return 1;
+ }
+
+-static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
++static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+ unsigned long freq, bool xtal)
+ {
+ unsigned int div, clksrc, pllcfg = 0;
+@@ -641,12 +649,20 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+
+ /* Wait for crystal */
+ if (xtal) {
+- unsigned int val;
+- msleep(10);
+- regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+- if (!(val & MAX310X_STS_CLKREADY_BIT)) {
+- dev_warn(dev, "clock is not stable yet\n");
+- }
++ bool stable = false;
++ unsigned int try = 0, val = 0;
++
++ do {
++ msleep(MAX310X_XTAL_WAIT_DELAY_MS);
++ regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
++
++ if (val & MAX310X_STS_CLKREADY_BIT)
++ stable = true;
++ } while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES));
++
++ if (!stable)
++ return dev_err_probe(dev, -EAGAIN,
++ "clock is not stable\n");
+ }
+
+ return bestfreq;
+@@ -1271,7 +1287,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
+ {
+ int i, ret, fmin, fmax, freq;
+ struct max310x_port *s;
+- u32 uartclk = 0;
++ s32 uartclk = 0;
+ bool xtal;
+
+ for (i = 0; i < devtype->nr; i++)
+@@ -1334,6 +1350,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
+ goto out_clk;
+
+ for (i = 0; i < devtype->nr; i++) {
++ bool started = false;
++ unsigned int try = 0, val = 0;
++
+ /* Reset port */
+ regmap_write(regmaps[i], MAX310X_MODE2_REG,
+ MAX310X_MODE2_RST_BIT);
+@@ -1342,13 +1361,27 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
+
+ /* Wait for port startup */
+ do {
+- regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
+- } while (ret != 0x01);
++ msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS);
++ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val);
++
++ if (val == 0x01)
++ started = true;
++ } while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES));
++
++ if (!started) {
++ ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n");
++ goto out_uart;
++ }
+
+ regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
+ }
+
+ uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
++ if (uartclk < 0) {
++ ret = uartclk;
++ goto out_uart;
++ }
++
+ dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
+
+ for (i = 0; i < devtype->nr; i++) {
+@@ -1428,7 +1461,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
+ if (!ret)
+ return 0;
+
+- dev_err(dev, "Unable to reguest IRQ %i\n", irq);
++ dev_err(dev, "Unable to request IRQ %i\n", irq);
+
+ out_uart:
+ for (i = 0; i < devtype->nr; i++) {
+@@ -1602,13 +1635,16 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr,
+
+ static int max310x_i2c_probe(struct i2c_client *client)
+ {
+- const struct max310x_devtype *devtype =
+- device_get_match_data(&client->dev);
++ const struct max310x_devtype *devtype;
+ struct i2c_client *port_client;
+ struct regmap *regmaps[4];
+ unsigned int i;
+ u8 port_addr;
+
++ devtype = device_get_match_data(&client->dev);
++ if (!devtype)
++ return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n");
++
+ if (client->addr < devtype->slave_addr.min ||
+ client->addr > devtype->slave_addr.max)
+ return dev_err_probe(&client->dev, -EINVAL,
+diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
+index 1666ce012e5e80..aea29b4e656752 100644
+--- a/drivers/tty/serial/mcf.c
++++ b/drivers/tty/serial/mcf.c
+@@ -462,7 +462,7 @@ static const struct uart_ops mcf_uart_ops = {
+ .verify_port = mcf_verify_port,
+ };
+
+-static struct mcf_uart mcf_ports[4];
++static struct mcf_uart mcf_ports[10];
+
+ #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
+
+diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
+index 790d910dafa5d0..9388b9ddea3bd0 100644
+--- a/drivers/tty/serial/meson_uart.c
++++ b/drivers/tty/serial/meson_uart.c
+@@ -380,10 +380,14 @@ static void meson_uart_set_termios(struct uart_port *port,
+ else
+ val |= AML_UART_STOP_BIT_1SB;
+
+- if (cflags & CRTSCTS)
+- val &= ~AML_UART_TWO_WIRE_EN;
+- else
++ if (cflags & CRTSCTS) {
++ if (port->flags & UPF_HARD_FLOW)
++ val &= ~AML_UART_TWO_WIRE_EN;
++ else
++ termios->c_cflag &= ~CRTSCTS;
++ } else {
+ val |= AML_UART_TWO_WIRE_EN;
++ }
+
+ writel(val, port->membase + AML_UART_CONTROL);
+
+@@ -705,6 +709,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
+ int ret = 0;
+ int irq;
++ bool has_rtscts;
+
+ if (pdev->dev.of_node)
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+@@ -732,6 +737,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ return irq;
+
+ of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
++ has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts");
+
+ if (meson_ports[pdev->id]) {
+ return dev_err_probe(&pdev->dev, -EBUSY,
+@@ -762,6 +768,8 @@ static int meson_uart_probe(struct platform_device *pdev)
+ port->mapsize = resource_size(res_mem);
+ port->irq = irq;
+ port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
++ if (has_rtscts)
++ port->flags |= UPF_HARD_FLOW;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
+ port->dev = &pdev->dev;
+ port->line = pdev->id;
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 8eeecf8ad3596b..777dc8a0aa835f 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -605,13 +605,16 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
+ return;
+ }
+
+- pending = uart_port_tx(&s->port, ch,
++ pending = uart_port_tx_flags(&s->port, ch, UART_TX_NOSTOP,
+ !(mxs_read(s, REG_STAT) & AUART_STAT_TXFF),
+ mxs_write(ch, s, REG_DATA));
+ if (pending)
+ mxs_set(AUART_INTR_TXIEN, s, REG_INTR);
+ else
+ mxs_clr(AUART_INTR_TXIEN, s, REG_INTR);
++
++ if (uart_tx_stopped(&s->port))
++ mxs_auart_stop_tx(&s->port);
+ }
+
+ static void mxs_auart_rx_char(struct mxs_auart_port *s)
+@@ -1077,11 +1080,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
+
+ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ {
+- u32 istat;
++ u32 istat, stat;
+ struct mxs_auart_port *s = context;
+ u32 mctrl_temp = s->mctrl_prev;
+- u32 stat = mxs_read(s, REG_STAT);
+
++ uart_port_lock(&s->port);
++
++ stat = mxs_read(s, REG_STAT);
+ istat = mxs_read(s, REG_INTR);
+
+ /* ack irq */
+@@ -1117,6 +1122,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ istat &= ~AUART_INTR_TXIS;
+ }
+
++ uart_port_unlock(&s->port);
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 0ead88c5a19ad8..135a838f517a20 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1483,6 +1483,13 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
+ return omap_up_info;
+ }
+
++static const struct serial_rs485 serial_omap_rs485_supported = {
++ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
++ SER_RS485_RX_DURING_TX,
++ .delay_rts_before_send = 1,
++ .delay_rts_after_send = 1,
++};
++
+ static int serial_omap_probe_rs485(struct uart_omap_port *up,
+ struct device *dev)
+ {
+@@ -1497,6 +1504,9 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
+ if (!np)
+ return 0;
+
++ up->port.rs485_config = serial_omap_config_rs485;
++ up->port.rs485_supported = serial_omap_rs485_supported;
++
+ ret = uart_get_rs485_mode(&up->port);
+ if (ret)
+ return ret;
+@@ -1531,13 +1541,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
+ return 0;
+ }
+
+-static const struct serial_rs485 serial_omap_rs485_supported = {
+- .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+- SER_RS485_RX_DURING_TX,
+- .delay_rts_before_send = 1,
+- .delay_rts_after_send = 1,
+-};
+-
+ static int serial_omap_probe(struct platform_device *pdev)
+ {
+ struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
+@@ -1604,17 +1607,11 @@ static int serial_omap_probe(struct platform_device *pdev)
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
+
+- ret = serial_omap_probe_rs485(up, &pdev->dev);
+- if (ret < 0)
+- goto err_rs485;
+-
+ sprintf(up->name, "OMAP UART%d", up->port.line);
+ up->port.mapbase = mem->start;
+ up->port.membase = base;
+ up->port.flags = omap_up_info->flags;
+ up->port.uartclk = omap_up_info->uartclk;
+- up->port.rs485_config = serial_omap_config_rs485;
+- up->port.rs485_supported = serial_omap_rs485_supported;
+ if (!up->port.uartclk) {
+ up->port.uartclk = DEFAULT_CLK_SPEED;
+ dev_warn(&pdev->dev,
+@@ -1622,6 +1619,10 @@ static int serial_omap_probe(struct platform_device *pdev)
+ DEFAULT_CLK_SPEED);
+ }
+
++ ret = serial_omap_probe_rs485(up, &pdev->dev);
++ if (ret < 0)
++ goto err_rs485;
++
+ up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ cpu_latency_qos_add_request(&up->pm_qos_request, up->latency);
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index 13668ffdb1e7d7..29bc80d39e8b75 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
+ {
+ struct tty_port *port;
+ unsigned char ch, r1, drop, flag;
+- int loops = 0;
+
+ /* Sanity check, make sure the old bug is no longer happening */
+ if (uap->port.state == NULL) {
+@@ -291,24 +290,11 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
+ if (r1 & Rx_OVR)
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
+ next_char:
+- /* We can get stuck in an infinite loop getting char 0 when the
+- * line is in a wrong HW state, we break that here.
+- * When that happens, I disable the receive side of the driver.
+- * Note that what I've been experiencing is a real irq loop where
+- * I'm getting flooded regardless of the actual port speed.
+- * Something strange is going on with the HW
+- */
+- if ((++loops) > 1000)
+- goto flood;
+ ch = read_zsreg(uap, R0);
+ if (!(ch & Rx_CH_AV))
+ break;
+ }
+
+- return true;
+- flood:
+- pmz_interrupt_control(uap, 0);
+- pmz_error("pmz: rx irq flood !\n");
+ return true;
+ }
+
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index b8aa4c1293baef..f820a09cb5c39b 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -124,7 +124,7 @@ struct qcom_geni_serial_port {
+ dma_addr_t tx_dma_addr;
+ dma_addr_t rx_dma_addr;
+ bool setup;
+- unsigned int baud;
++ unsigned long poll_timeout_us;
+ unsigned long clk_rate;
+ void *rx_buf;
+ u32 loopback;
+@@ -144,6 +144,8 @@ static const struct uart_ops qcom_geni_uart_pops;
+ static struct uart_driver qcom_geni_console_driver;
+ static struct uart_driver qcom_geni_uart_driver;
+
++static int qcom_geni_serial_port_setup(struct uart_port *uport);
++
+ static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport)
+ {
+ return container_of(uport, struct qcom_geni_serial_port, uport);
+@@ -270,22 +272,13 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
+ {
+ u32 reg;
+ struct qcom_geni_serial_port *port;
+- unsigned int baud;
+- unsigned int fifo_bits;
+ unsigned long timeout_us = 20000;
+ struct qcom_geni_private_data *private_data = uport->private_data;
+
+ if (private_data->drv) {
+ port = to_dev_port(uport);
+- baud = port->baud;
+- if (!baud)
+- baud = 115200;
+- fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
+- /*
+- * Total polling iterations based on FIFO worth of bytes to be
+- * sent at current baud. Add a little fluff to the wait.
+- */
+- timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500;
++ if (port->poll_timeout_us)
++ timeout_us = port->poll_timeout_us;
+ }
+
+ /*
+@@ -394,6 +387,23 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
+ writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ qcom_geni_serial_poll_tx_done(uport);
+ }
++
++static int qcom_geni_serial_poll_init(struct uart_port *uport)
++{
++ struct qcom_geni_serial_port *port = to_dev_port(uport);
++ int ret;
++
++ if (!port->setup) {
++ ret = qcom_geni_serial_port_setup(uport);
++ if (ret)
++ return ret;
++ }
++
++ if (!qcom_geni_serial_secondary_active(uport))
++ geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
++
++ return 0;
++}
+ #endif
+
+ #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+@@ -765,17 +775,27 @@ static void qcom_geni_serial_start_rx_fifo(struct uart_port *uport)
+ static void qcom_geni_serial_stop_rx_dma(struct uart_port *uport)
+ {
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
++ bool done;
+
+ if (!qcom_geni_serial_secondary_active(uport))
+ return;
+
+ geni_se_cancel_s_cmd(&port->se);
+- qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
+- S_CMD_CANCEL_EN, true);
+-
+- if (qcom_geni_serial_secondary_active(uport))
++ done = qcom_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT,
++ RX_EOT, true);
++ if (done) {
++ writel(RX_EOT | RX_DMA_DONE,
++ uport->membase + SE_DMA_RX_IRQ_CLR);
++ } else {
+ qcom_geni_serial_abort_rx(uport);
+
++ writel(1, uport->membase + SE_DMA_RX_FSM_RST);
++ qcom_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT,
++ RX_RESET_DONE, true);
++ writel(RX_RESET_DONE | RX_DMA_DONE,
++ uport->membase + SE_DMA_RX_IRQ_CLR);
++ }
++
+ if (port->rx_dma_addr) {
+ geni_se_rx_dma_unprep(&port->se, port->rx_dma_addr,
+ DMA_RX_BUF_SIZE);
+@@ -851,19 +871,21 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
+ }
+
+ static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport,
+- unsigned int remaining)
++ unsigned int chunk)
+ {
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+- unsigned int tx_bytes;
++ unsigned int tx_bytes, c, remaining = chunk;
+ u8 buf[BYTES_PER_FIFO_WORD];
+
+ while (remaining) {
+ memset(buf, 0, sizeof(buf));
+ tx_bytes = min(remaining, BYTES_PER_FIFO_WORD);
+
+- memcpy(buf, &xmit->buf[xmit->tail], tx_bytes);
+- uart_xmit_advance(uport, tx_bytes);
++ for (c = 0; c < tx_bytes ; c++) {
++ buf[c] = xmit->buf[xmit->tail];
++ uart_xmit_advance(uport, 1);
++ }
+
+ iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
+
+@@ -1122,7 +1144,6 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
+ false, true, true);
+ geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
+ geni_se_select_mode(&port->se, port->dev_data->mode);
+- qcom_geni_serial_start_rx(uport);
+ port->setup = true;
+
+ return 0;
+@@ -1138,6 +1159,11 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
+ if (ret)
+ return ret;
+ }
++
++ uart_port_lock_irq(uport);
++ qcom_geni_serial_start_rx(uport);
++ uart_port_unlock_irq(uport);
++
+ enable_irq(uport->irq);
+
+ return 0;
+@@ -1221,11 +1247,10 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ unsigned long clk_rate;
+ u32 ver, sampling_rate;
+ unsigned int avg_bw_core;
++ unsigned long timeout;
+
+- qcom_geni_serial_stop_rx(uport);
+ /* baud rate */
+ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
+- port->baud = baud;
+
+ sampling_rate = UART_OVERSAMPLING;
+ /* Sampling rate is halved for IP versions >= 2.5 */
+@@ -1239,7 +1264,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ dev_err(port->se.dev,
+ "Couldn't find suitable clock rate for %u\n",
+ baud * sampling_rate);
+- goto out_restart_rx;
++ return;
+ }
+
+ dev_dbg(port->se.dev, "desired_rate = %u, clk_rate = %lu, clk_div = %u\n",
+@@ -1303,9 +1328,21 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ else
+ tx_trans_cfg |= UART_CTS_MASK;
+
+- if (baud)
++ if (baud) {
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
++ /*
++ * Make sure that qcom_geni_serial_poll_bitfield() waits for
++ * the FIFO, two-word intermediate transfer register and shift
++ * register to clear.
++ *
++ * Note that uart_fifo_timeout() also adds a 20 ms margin.
++ */
++ timeout = jiffies_to_usecs(uart_fifo_timeout(uport));
++ timeout += 3 * timeout / port->tx_fifo_depth;
++ WRITE_ONCE(port->poll_timeout_us, timeout);
++ }
++
+ if (!uart_console(uport))
+ writel(port->loopback,
+ uport->membase + SE_UART_LOOPBACK_CFG);
+@@ -1318,8 +1355,6 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+ writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
+ writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
+-out_restart_rx:
+- qcom_geni_serial_start_rx(uport);
+ }
+
+ #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+@@ -1539,7 +1574,7 @@ static const struct uart_ops qcom_geni_console_pops = {
+ #ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = qcom_geni_serial_get_char,
+ .poll_put_char = qcom_geni_serial_poll_put_char,
+- .poll_init = qcom_geni_serial_port_setup,
++ .poll_init = qcom_geni_serial_poll_init,
+ #endif
+ .pm = qcom_geni_serial_pm,
+ };
+@@ -1740,38 +1775,6 @@ static int qcom_geni_serial_sys_resume(struct device *dev)
+ return ret;
+ }
+
+-static int qcom_geni_serial_sys_hib_resume(struct device *dev)
+-{
+- int ret = 0;
+- struct uart_port *uport;
+- struct qcom_geni_private_data *private_data;
+- struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
+-
+- uport = &port->uport;
+- private_data = uport->private_data;
+-
+- if (uart_console(uport)) {
+- geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS);
+- geni_icc_set_bw(&port->se);
+- ret = uart_resume_port(private_data->drv, uport);
+- /*
+- * For hibernation usecase clients for
+- * console UART won't call port setup during restore,
+- * hence call port setup for console uart.
+- */
+- qcom_geni_serial_port_setup(uport);
+- } else {
+- /*
+- * Peripheral register settings are lost during hibernation.
+- * Update setup flag such that port setup happens again
+- * during next session. Clients of HS-UART will close and
+- * open the port during hibernation.
+- */
+- port->setup = false;
+- }
+- return ret;
+-}
+-
+ static const struct qcom_geni_device_data qcom_geni_console_data = {
+ .console = true,
+ .mode = GENI_SE_FIFO,
+@@ -1783,12 +1786,8 @@ static const struct qcom_geni_device_data qcom_geni_uart_data = {
+ };
+
+ static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
+- .suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+- .resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
+- .freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+- .poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+- .restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
+- .thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
++ SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend,
++ qcom_geni_serial_sys_resume)
+ };
+
+ static const struct of_device_id qcom_geni_serial_match_table[] = {
+diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
+index de220ac8ca5494..5a1de6044b38cc 100644
+--- a/drivers/tty/serial/rp2.c
++++ b/drivers/tty/serial/rp2.c
+@@ -578,8 +578,8 @@ static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id)
+ u32 clk_cfg;
+
+ writew(1, base + RP2_GLOBAL_CMD);
+- readw(base + RP2_GLOBAL_CMD);
+ msleep(100);
++ readw(base + RP2_GLOBAL_CMD);
+ writel(0, base + RP2_CLK_PRESCALER);
+
+ /* TDM clock configuration */
+diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
+index 07fb8a9dac6336..5a4d88e134715d 100644
+--- a/drivers/tty/serial/samsung_tty.c
++++ b/drivers/tty/serial/samsung_tty.c
+@@ -990,11 +990,10 @@ static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
+ if ((ufstat & info->tx_fifomask) != 0 ||
+ (ufstat & info->tx_fifofull))
+ return 0;
+-
+- return 1;
++ return TIOCSER_TEMT;
+ }
+
+- return s3c24xx_serial_txempty_nofifo(port);
++ return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0;
+ }
+
+ /* no modem control lines */
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index f61d98e09dc397..7a9924d9b294e9 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -18,13 +18,14 @@
+ #include <linux/module.h>
+ #include <linux/property.h>
+ #include <linux/regmap.h>
++#include <linux/sched.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+ #include <linux/tty.h>
+ #include <linux/tty_flip.h>
+ #include <linux/spi/spi.h>
+ #include <linux/uaccess.h>
+-#include <uapi/linux/sched/types.h>
++#include <linux/units.h>
+
+ #define SC16IS7XX_NAME "sc16is7xx"
+ #define SC16IS7XX_MAX_DEVS 8
+@@ -300,8 +301,8 @@
+
+
+ /* Misc definitions */
++#define SC16IS7XX_SPI_READ_BIT BIT(7)
+ #define SC16IS7XX_FIFO_SIZE (64)
+-#define SC16IS7XX_REG_SHIFT 2
+ #define SC16IS7XX_GPIOS_PER_BANK 4
+
+ struct sc16is7xx_devtype {
+@@ -322,7 +323,8 @@ struct sc16is7xx_one_config {
+
+ struct sc16is7xx_one {
+ struct uart_port port;
+- u8 line;
++ struct regmap *regmap;
++ struct mutex efr_lock; /* EFR registers access */
+ struct kthread_work tx_work;
+ struct kthread_work reg_work;
+ struct kthread_delayed_work ms_work;
+@@ -333,7 +335,6 @@ struct sc16is7xx_one {
+
+ struct sc16is7xx_port {
+ const struct sc16is7xx_devtype *devtype;
+- struct regmap *regmap;
+ struct clk *clk;
+ #ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio;
+@@ -343,7 +344,6 @@ struct sc16is7xx_port {
+ unsigned char buf[SC16IS7XX_FIFO_SIZE];
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+- struct mutex efr_lock;
+ struct sc16is7xx_one p[];
+ };
+
+@@ -361,48 +361,35 @@ static void sc16is7xx_stop_tx(struct uart_port *port);
+ #define to_sc16is7xx_port(p,e) ((container_of((p), struct sc16is7xx_port, e)))
+ #define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
+
+-static int sc16is7xx_line(struct uart_port *port)
+-{
+- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-
+- return one->line;
+-}
+-
+ static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ unsigned int val = 0;
+- const u8 line = sc16is7xx_line(port);
+
+- regmap_read(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, &val);
++ regmap_read(one->regmap, reg, &val);
+
+ return val;
+ }
+
+ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- const u8 line = sc16is7xx_line(port);
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+- regmap_write(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, val);
++ regmap_write(one->regmap, reg, val);
+ }
+
+ static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+ {
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- const u8 line = sc16is7xx_line(port);
+- u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | line;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+- regcache_cache_bypass(s->regmap, true);
+- regmap_raw_read(s->regmap, addr, s->buf, rxlen);
+- regcache_cache_bypass(s->regmap, false);
++ regmap_noinc_read(one->regmap, SC16IS7XX_RHR_REG, s->buf, rxlen);
+ }
+
+ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+ {
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- const u8 line = sc16is7xx_line(port);
+- u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | line;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ /*
+ * Don't send zero-length data, at least on SPI it confuses the chip
+@@ -411,32 +398,15 @@ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+ if (unlikely(!to_send))
+ return;
+
+- regcache_cache_bypass(s->regmap, true);
+- regmap_raw_write(s->regmap, addr, s->buf, to_send);
+- regcache_cache_bypass(s->regmap, false);
++ regmap_noinc_write(one->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
+ }
+
+ static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
+ u8 mask, u8 val)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- const u8 line = sc16is7xx_line(port);
+-
+- regmap_update_bits(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line,
+- mask, val);
+-}
+-
+-static int sc16is7xx_alloc_line(void)
+-{
+- int i;
+-
+- BUILD_BUG_ON(SC16IS7XX_MAX_DEVS > BITS_PER_LONG);
+-
+- for (i = 0; i < SC16IS7XX_MAX_DEVS; i++)
+- if (!test_and_set_bit(i, &sc16is7xx_lines))
+- break;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+- return i;
++ regmap_update_bits(one->regmap, reg, mask, val);
+ }
+
+ static void sc16is7xx_power(struct uart_port *port, int on)
+@@ -478,7 +448,7 @@ static const struct sc16is7xx_devtype sc16is762_devtype = {
+
+ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+ {
+- switch (reg >> SC16IS7XX_REG_SHIFT) {
++ switch (reg) {
+ case SC16IS7XX_RHR_REG:
+ case SC16IS7XX_IIR_REG:
+ case SC16IS7XX_LSR_REG:
+@@ -497,7 +467,7 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+
+ static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
+ {
+- switch (reg >> SC16IS7XX_REG_SHIFT) {
++ switch (reg) {
+ case SC16IS7XX_RHR_REG:
+ return true;
+ default:
+@@ -507,16 +477,33 @@ static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
+ return false;
+ }
+
++static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg)
++{
++ return reg == SC16IS7XX_RHR_REG;
++}
++
++/*
++ * Configure programmable baud rate generator (divisor) according to the
++ * desired baud rate.
++ *
++ * From the datasheet, the divisor is computed according to:
++ *
++ * XTAL1 input frequency
++ * -----------------------
++ * prescaler
++ * divisor = ---------------------------
++ * baud-rate x sampling-rate
++ */
+ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ u8 lcr;
+- u8 prescaler = 0;
++ unsigned int prescaler = 1;
+ unsigned long clk = port->uartclk, div = clk / 16 / baud;
+
+- if (div > 0xffff) {
+- prescaler = SC16IS7XX_MCR_CLKSEL_BIT;
+- div /= 4;
++ if (div >= BIT(16)) {
++ prescaler = 4;
++ div /= prescaler;
+ }
+
+ /* In an amazing feat of design, the Enhanced Features Register shares
+@@ -532,7 +519,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ * because the bulk of the interrupt processing is run as a workqueue
+ * job in thread context.
+ */
+- mutex_lock(&s->efr_lock);
++ mutex_lock(&one->efr_lock);
+
+ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+
+@@ -541,36 +528,37 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+ /* Enable enhanced features */
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(one->regmap, true);
+ sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_ENABLE_BIT,
+ SC16IS7XX_EFR_ENABLE_BIT);
+
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(one->regmap, false);
+
+ /* Put LCR back to the normal mode */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
+- mutex_unlock(&s->efr_lock);
++ mutex_unlock(&one->efr_lock);
+
++ /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
+ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ SC16IS7XX_MCR_CLKSEL_BIT,
+- prescaler);
++ prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT);
+
+ /* Open the LCR divisors for configuration */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_A);
+
+ /* Write the new divisor */
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(one->regmap, true);
+ sc16is7xx_port_write(port, SC16IS7XX_DLH_REG, div / 256);
+ sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256);
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(one->regmap, false);
+
+ /* Put LCR back to the normal mode */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
+- return DIV_ROUND_CLOSEST(clk / 16, div);
++ return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div);
+ }
+
+ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
+@@ -667,9 +655,9 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_stop_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return;
+ }
+
+@@ -695,13 +683,15 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ sc16is7xx_fifo_write(port, to_send);
+ }
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ sc16is7xx_stop_tx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ else
++ sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
+@@ -719,11 +709,10 @@ static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
+ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+ {
+ struct uart_port *port = &one->port;
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ unsigned long flags;
+ unsigned int status, changed;
+
+- lockdep_assert_held_once(&s->efr_lock);
++ lockdep_assert_held_once(&one->efr_lock);
+
+ status = sc16is7xx_get_hwmctrl(port);
+ changed = status ^ one->old_mctrl;
+@@ -733,7 +722,7 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+
+ one->old_mctrl = status;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if ((changed & TIOCM_RNG) && (status & TIOCM_RNG))
+ port->icount.rng++;
+ if (changed & TIOCM_DSR)
+@@ -744,67 +733,82 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
+ uart_handle_cts_change(port, status & TIOCM_CTS);
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+ {
++ bool rc = true;
++ unsigned int iir, rxlen;
+ struct uart_port *port = &s->p[portno].port;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+- do {
+- unsigned int iir, rxlen;
+- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+-
+- iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
+- if (iir & SC16IS7XX_IIR_NO_INT_BIT)
+- return false;
+-
+- iir &= SC16IS7XX_IIR_ID_MASK;
+-
+- switch (iir) {
+- case SC16IS7XX_IIR_RDI_SRC:
+- case SC16IS7XX_IIR_RLSE_SRC:
+- case SC16IS7XX_IIR_RTOI_SRC:
+- case SC16IS7XX_IIR_XOFFI_SRC:
+- rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
+- if (rxlen)
+- sc16is7xx_handle_rx(port, rxlen, iir);
+- break;
++ mutex_lock(&one->efr_lock);
++
++ iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
++ if (iir & SC16IS7XX_IIR_NO_INT_BIT) {
++ rc = false;
++ goto out_port_irq;
++ }
++
++ iir &= SC16IS7XX_IIR_ID_MASK;
++
++ switch (iir) {
++ case SC16IS7XX_IIR_RDI_SRC:
++ case SC16IS7XX_IIR_RLSE_SRC:
++ case SC16IS7XX_IIR_RTOI_SRC:
++ case SC16IS7XX_IIR_XOFFI_SRC:
++ rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
++
++ /*
++ * There is a silicon bug that makes the chip report a
++ * time-out interrupt but no data in the FIFO. This is
++ * described in errata section 18.1.4.
++ *
++ * When this happens, read one byte from the FIFO to
++ * clear the interrupt.
++ */
++ if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
++ rxlen = 1;
++
++ if (rxlen)
++ sc16is7xx_handle_rx(port, rxlen, iir);
++ break;
+ /* CTSRTS interrupt comes only when CTS goes inactive */
+- case SC16IS7XX_IIR_CTSRTS_SRC:
+- case SC16IS7XX_IIR_MSI_SRC:
+- sc16is7xx_update_mlines(one);
+- break;
+- case SC16IS7XX_IIR_THRI_SRC:
+- sc16is7xx_handle_tx(port);
+- break;
+- default:
+- dev_err_ratelimited(port->dev,
+- "ttySC%i: Unexpected interrupt: %x",
+- port->line, iir);
+- break;
+- }
+- } while (0);
+- return true;
++ case SC16IS7XX_IIR_CTSRTS_SRC:
++ case SC16IS7XX_IIR_MSI_SRC:
++ sc16is7xx_update_mlines(one);
++ break;
++ case SC16IS7XX_IIR_THRI_SRC:
++ sc16is7xx_handle_tx(port);
++ break;
++ default:
++ dev_err_ratelimited(port->dev,
++ "ttySC%i: Unexpected interrupt: %x",
++ port->line, iir);
++ break;
++ }
++
++out_port_irq:
++ mutex_unlock(&one->efr_lock);
++
++ return rc;
+ }
+
+ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
+ {
+- struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
++ bool keep_polling;
+
+- mutex_lock(&s->efr_lock);
++ struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
+
+- while (1) {
+- bool keep_polling = false;
++ do {
+ int i;
+
++ keep_polling = false;
++
+ for (i = 0; i < s->devtype->nr_uart; ++i)
+ keep_polling |= sc16is7xx_port_irq(s, i);
+- if (!keep_polling)
+- break;
+- }
+-
+- mutex_unlock(&s->efr_lock);
++ } while (keep_polling);
+
+ return IRQ_HANDLED;
+ }
+@@ -812,20 +816,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
+ static void sc16is7xx_tx_proc(struct kthread_work *ws)
+ {
+ struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+- unsigned long flags;
++ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ (port->rs485.delay_rts_before_send > 0))
+ msleep(port->rs485.delay_rts_before_send);
+
+- mutex_lock(&s->efr_lock);
++ mutex_lock(&one->efr_lock);
+ sc16is7xx_handle_tx(port);
+- mutex_unlock(&s->efr_lock);
+-
+- spin_lock_irqsave(&port->lock, flags);
+- sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ mutex_unlock(&one->efr_lock);
+ }
+
+ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+@@ -836,14 +835,14 @@ static void sc16is7xx_reconf_rs485(struct uart_port *port)
+ struct serial_rs485 *rs485 = &port->rs485;
+ unsigned long irqflags;
+
+- spin_lock_irqsave(&port->lock, irqflags);
++ uart_port_lock_irqsave(port, &irqflags);
+ if (rs485->flags & SER_RS485_ENABLED) {
+ efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT;
+
+ if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
+ }
+- spin_unlock_irqrestore(&port->lock, irqflags);
++ uart_port_unlock_irqrestore(port, irqflags);
+
+ sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
+ }
+@@ -854,10 +853,10 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws)
+ struct sc16is7xx_one_config config;
+ unsigned long irqflags;
+
+- spin_lock_irqsave(&one->port.lock, irqflags);
++ uart_port_lock_irqsave(&one->port, &irqflags);
+ config = one->config;
+ memset(&one->config, 0, sizeof(one->config));
+- spin_unlock_irqrestore(&one->port.lock, irqflags);
++ uart_port_unlock_irqrestore(&one->port, irqflags);
+
+ if (config.flags & SC16IS7XX_RECONF_MD) {
+ u8 mcr = 0;
+@@ -928,9 +927,9 @@ static void sc16is7xx_ms_proc(struct kthread_work *ws)
+ struct sc16is7xx_port *s = dev_get_drvdata(one->port.dev);
+
+ if (one->port.state) {
+- mutex_lock(&s->efr_lock);
++ mutex_lock(&one->efr_lock);
+ sc16is7xx_update_mlines(one);
+- mutex_unlock(&s->efr_lock);
++ mutex_unlock(&one->efr_lock);
+
+ kthread_queue_delayed_work(&s->kworker, &one->ms_work, HZ);
+ }
+@@ -963,18 +962,18 @@ static void sc16is7xx_throttle(struct uart_port *port)
+ * value set in MCR register. Stop reading data from RX FIFO so the
+ * AutoRTS feature will de-activate RTS output.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void sc16is7xx_unthrottle(struct uart_port *port)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_ier_set(port, SC16IS7XX_IER_RDI_BIT);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
+@@ -1014,7 +1013,6 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
+ {
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ unsigned int lcr, flow = 0;
+ int baud;
+@@ -1073,13 +1071,13 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
+
+ /* As above, claim the mutex while accessing the EFR. */
+- mutex_lock(&s->efr_lock);
++ mutex_lock(&one->efr_lock);
+
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+ /* Configure flow control */
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(one->regmap, true);
+ sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
+ sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
+
+@@ -1098,12 +1096,12 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_FLOWCTRL_BITS,
+ flow);
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(one->regmap, false);
+
+ /* Update LCR register */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+
+- mutex_unlock(&s->efr_lock);
++ mutex_unlock(&one->efr_lock);
+
+ /* Get baud rate generator configuration */
+ baud = uart_get_baud_rate(port, termios, old,
+@@ -1113,7 +1111,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ /* Setup baudrate generator */
+ baud = sc16is7xx_set_baud(port, baud);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /* Update timeout according to new baud rate */
+ uart_update_timeout(port, termios->c_cflag, baud);
+@@ -1121,7 +1119,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ sc16is7xx_enable_ms(port);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termios,
+@@ -1149,7 +1147,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termi
+ static int sc16is7xx_startup(struct uart_port *port)
+ {
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ unsigned int val;
+ unsigned long flags;
+
+@@ -1166,7 +1163,7 @@ static int sc16is7xx_startup(struct uart_port *port)
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(one->regmap, true);
+
+ /* Enable write access to enhanced features and internal clock div */
+ sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+@@ -1184,7 +1181,7 @@ static int sc16is7xx_startup(struct uart_port *port)
+ SC16IS7XX_TCR_RX_RESUME(24) |
+ SC16IS7XX_TCR_RX_HALT(48));
+
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(one->regmap, false);
+
+ /* Now, initialize the UART */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
+@@ -1208,9 +1205,9 @@ static int sc16is7xx_startup(struct uart_port *port)
+ sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
+
+ /* Enable modem status polling */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ sc16is7xx_enable_ms(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ return 0;
+ }
+@@ -1412,7 +1409,8 @@ static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
+ /*
+ * Configure ports designated to operate as modem control lines.
+ */
+-static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
++static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s,
++ struct regmap *regmap)
+ {
+ int i;
+ int ret;
+@@ -1441,8 +1439,8 @@ static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
+
+ if (s->mctrl_mask)
+ regmap_update_bits(
+- s->regmap,
+- SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
++ regmap,
++ SC16IS7XX_IOCONTROL_REG,
+ SC16IS7XX_IOCONTROL_MODEM_A_BIT |
+ SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask);
+
+@@ -1457,7 +1455,7 @@ static const struct serial_rs485 sc16is7xx_rs485_supported = {
+
+ static int sc16is7xx_probe(struct device *dev,
+ const struct sc16is7xx_devtype *devtype,
+- struct regmap *regmap, int irq)
++ struct regmap *regmaps[], int irq)
+ {
+ unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
+ unsigned int val;
+@@ -1465,16 +1463,20 @@ static int sc16is7xx_probe(struct device *dev,
+ int i, ret;
+ struct sc16is7xx_port *s;
+
+- if (IS_ERR(regmap))
+- return PTR_ERR(regmap);
++ for (i = 0; i < devtype->nr_uart; i++)
++ if (IS_ERR(regmaps[i]))
++ return PTR_ERR(regmaps[i]);
+
+ /*
+ * This device does not have an identification register that would
+ * tell us if we are really connected to the correct device.
+ * The best we can do is to check if communication is at all possible.
++ *
++ * Note: regmap[0] is used in the probe function to access registers
++ * common to all channels/ports, as it is guaranteed to be present on
++ * all variants.
+ */
+- ret = regmap_read(regmap,
+- SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
++ ret = regmap_read(regmaps[0], SC16IS7XX_LSR_REG, &val);
+ if (ret < 0)
+ return -EPROBE_DEFER;
+
+@@ -1508,10 +1510,8 @@ static int sc16is7xx_probe(struct device *dev,
+ return -EINVAL;
+ }
+
+- s->regmap = regmap;
+ s->devtype = devtype;
+ dev_set_drvdata(dev, s);
+- mutex_init(&s->efr_lock);
+
+ kthread_init_worker(&s->kworker);
+ s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
+@@ -1523,11 +1523,17 @@ static int sc16is7xx_probe(struct device *dev,
+ sched_set_fifo(s->kworker_task);
+
+ /* reset device, purging any pending irq / data */
+- regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
+- SC16IS7XX_IOCONTROL_SRESET_BIT);
++ regmap_write(regmaps[0], SC16IS7XX_IOCONTROL_REG,
++ SC16IS7XX_IOCONTROL_SRESET_BIT);
+
+ for (i = 0; i < devtype->nr_uart; ++i) {
+- s->p[i].line = i;
++ s->p[i].port.line = find_first_zero_bit(&sc16is7xx_lines,
++ SC16IS7XX_MAX_DEVS);
++ if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
++ ret = -ERANGE;
++ goto out_ports;
++ }
++
+ /* Initialize port data */
+ s->p[i].port.dev = dev;
+ s->p[i].port.irq = irq;
+@@ -1547,12 +1553,9 @@ static int sc16is7xx_probe(struct device *dev,
+ s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
+ s->p[i].port.ops = &sc16is7xx_ops;
+ s->p[i].old_mctrl = 0;
+- s->p[i].port.line = sc16is7xx_alloc_line();
++ s->p[i].regmap = regmaps[i];
+
+- if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
+- ret = -ENOMEM;
+- goto out_ports;
+- }
++ mutex_init(&s->p[i].efr_lock);
+
+ ret = uart_get_rs485_mode(&s->p[i].port);
+ if (ret)
+@@ -1569,20 +1572,25 @@ static int sc16is7xx_probe(struct device *dev,
+ kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+ kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+ kthread_init_delayed_work(&s->p[i].ms_work, sc16is7xx_ms_proc);
++
+ /* Register port */
+- uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
++ ret = uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
++ if (ret)
++ goto out_ports;
++
++ set_bit(s->p[i].port.line, &sc16is7xx_lines);
+
+ /* Enable EFR */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
+ SC16IS7XX_LCR_CONF_MODE_B);
+
+- regcache_cache_bypass(s->regmap, true);
++ regcache_cache_bypass(regmaps[i], true);
+
+ /* Enable write access to enhanced features */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_ENABLE_BIT);
+
+- regcache_cache_bypass(s->regmap, false);
++ regcache_cache_bypass(regmaps[i], false);
+
+ /* Restore access to general registers */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, 0x00);
+@@ -1602,7 +1610,7 @@ static int sc16is7xx_probe(struct device *dev,
+ s->p[u].irda_mode = true;
+ }
+
+- ret = sc16is7xx_setup_mctrl_ports(s);
++ ret = sc16is7xx_setup_mctrl_ports(s, regmaps[0]);
+ if (ret)
+ goto out_ports;
+
+@@ -1637,10 +1645,9 @@ static int sc16is7xx_probe(struct device *dev,
+ #endif
+
+ out_ports:
+- for (i--; i >= 0; i--) {
+- uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+- clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+- }
++ for (i = 0; i < devtype->nr_uart; i++)
++ if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
++ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+
+ kthread_stop(s->kworker_task);
+
+@@ -1662,8 +1669,8 @@ static void sc16is7xx_remove(struct device *dev)
+
+ for (i = 0; i < s->devtype->nr_uart; i++) {
+ kthread_cancel_delayed_work_sync(&s->p[i].ms_work);
+- uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+- clear_bit(s->p[i].port.line, &sc16is7xx_lines);
++ if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
++ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+ sc16is7xx_power(&s->p[i].port, 0);
+ }
+
+@@ -1685,26 +1692,52 @@ static const struct of_device_id __maybe_unused sc16is7xx_dt_ids[] = {
+ MODULE_DEVICE_TABLE(of, sc16is7xx_dt_ids);
+
+ static struct regmap_config regcfg = {
+- .reg_bits = 7,
+- .pad_bits = 1,
++ .reg_bits = 5,
++ .pad_bits = 3,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = sc16is7xx_regmap_volatile,
+ .precious_reg = sc16is7xx_regmap_precious,
++ .writeable_noinc_reg = sc16is7xx_regmap_noinc,
++ .readable_noinc_reg = sc16is7xx_regmap_noinc,
++ .max_raw_read = SC16IS7XX_FIFO_SIZE,
++ .max_raw_write = SC16IS7XX_FIFO_SIZE,
++ .max_register = SC16IS7XX_EFCR_REG,
+ };
+
++static const char *sc16is7xx_regmap_name(u8 port_id)
++{
++ switch (port_id) {
++ case 0: return "port0";
++ case 1: return "port1";
++ default:
++ WARN_ON(true);
++ return NULL;
++ }
++}
++
++static unsigned int sc16is7xx_regmap_port_mask(unsigned int port_id)
++{
++ /* CH1,CH0 are at bits 2:1. */
++ return port_id << 1;
++}
++
+ #ifdef CONFIG_SERIAL_SC16IS7XX_SPI
+ static int sc16is7xx_spi_probe(struct spi_device *spi)
+ {
+ const struct sc16is7xx_devtype *devtype;
+- struct regmap *regmap;
++ struct regmap *regmaps[2];
++ unsigned int i;
+ int ret;
+
+ /* Setup SPI bus */
+ spi->bits_per_word = 8;
+- /* only supports mode 0 on SC16IS762 */
++ /* For all variants, only mode 0 is supported */
++ if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
++ return dev_err_probe(&spi->dev, -EINVAL, "Unsupported SPI mode\n");
++
+ spi->mode = spi->mode ? : SPI_MODE_0;
+- spi->max_speed_hz = spi->max_speed_hz ? : 15000000;
++ spi->max_speed_hz = spi->max_speed_hz ? : 4 * HZ_PER_MHZ;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+@@ -1719,11 +1752,20 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
+ devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
+ }
+
+- regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
+- (devtype->nr_uart - 1);
+- regmap = devm_regmap_init_spi(spi, &regcfg);
++ for (i = 0; i < devtype->nr_uart; i++) {
++ regcfg.name = sc16is7xx_regmap_name(i);
++ /*
++ * If read_flag_mask is 0, the regmap code sets it to a default
++ * of 0x80. Since we specify our own mask, we must add the READ
++ * bit ourselves:
++ */
++ regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i) |
++ SC16IS7XX_SPI_READ_BIT;
++ regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i);
++ regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
++ }
+
+- return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
++ return sc16is7xx_probe(&spi->dev, devtype, regmaps, spi->irq);
+ }
+
+ static void sc16is7xx_spi_remove(struct spi_device *spi)
+@@ -1762,7 +1804,8 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
+ {
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
+ const struct sc16is7xx_devtype *devtype;
+- struct regmap *regmap;
++ struct regmap *regmaps[2];
++ unsigned int i;
+
+ if (i2c->dev.of_node) {
+ devtype = device_get_match_data(&i2c->dev);
+@@ -1772,11 +1815,14 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
+ devtype = (struct sc16is7xx_devtype *)id->driver_data;
+ }
+
+- regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
+- (devtype->nr_uart - 1);
+- regmap = devm_regmap_init_i2c(i2c, &regcfg);
++ for (i = 0; i < devtype->nr_uart; i++) {
++ regcfg.name = sc16is7xx_regmap_name(i);
++ regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i);
++ regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i);
++ regmaps[i] = devm_regmap_init_i2c(i2c, &regcfg);
++ }
+
+- return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
++ return sc16is7xx_probe(&i2c->dev, devtype, regmaps, i2c->irq);
+ }
+
+ static void sc16is7xx_i2c_remove(struct i2c_client *client)
+diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
+index c74c548f0db62a..b6c38d2edfd401 100644
+--- a/drivers/tty/serial/serial_base.h
++++ b/drivers/tty/serial/serial_base.h
+@@ -22,6 +22,7 @@ struct serial_ctrl_device {
+ struct serial_port_device {
+ struct device dev;
+ struct uart_port *port;
++ unsigned int tx_enabled:1;
+ };
+
+ int serial_base_ctrl_init(void);
+@@ -30,6 +31,9 @@ void serial_base_ctrl_exit(void);
+ int serial_base_port_init(void);
+ void serial_base_port_exit(void);
+
++void serial_base_port_startup(struct uart_port *port);
++void serial_base_port_shutdown(struct uart_port *port);
++
+ int serial_base_driver_register(struct device_driver *driver);
+ void serial_base_driver_unregister(struct device_driver *driver);
+
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index d5ba6e90bd95ff..8ff0efac6aa0d3 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -146,7 +146,7 @@ static void __uart_start(struct uart_state *state)
+
+ /* Increment the runtime PM usage count for the active check below */
+ err = pm_runtime_get(&port_dev->dev);
+- if (err < 0) {
++ if (err < 0 && err != -EINPROGRESS) {
+ pm_runtime_put_noidle(&port_dev->dev);
+ return;
+ }
+@@ -323,16 +323,26 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
+ bool init_hw)
+ {
+ struct tty_port *port = &state->port;
++ struct uart_port *uport;
+ int retval;
+
+ if (tty_port_initialized(port))
+- return 0;
++ goto out_base_port_startup;
+
+ retval = uart_port_startup(tty, state, init_hw);
+- if (retval)
++ if (retval) {
+ set_bit(TTY_IO_ERROR, &tty->flags);
++ return retval;
++ }
+
+- return retval;
++out_base_port_startup:
++ uport = uart_port_check(state);
++ if (!uport)
++ return -EIO;
++
++ serial_base_port_startup(uport);
++
++ return 0;
+ }
+
+ /*
+@@ -355,20 +365,25 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
+ if (tty)
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
++ if (uport)
++ serial_base_port_shutdown(uport);
++
+ if (tty_port_initialized(port)) {
+ tty_port_set_initialized(port, false);
+
+ /*
+ * Turn off DTR and RTS early.
+ */
+- if (uport && uart_console(uport) && tty) {
+- uport->cons->cflag = tty->termios.c_cflag;
+- uport->cons->ispeed = tty->termios.c_ispeed;
+- uport->cons->ospeed = tty->termios.c_ospeed;
+- }
++ if (uport) {
++ if (uart_console(uport) && tty) {
++ uport->cons->cflag = tty->termios.c_cflag;
++ uport->cons->ispeed = tty->termios.c_ispeed;
++ uport->cons->ospeed = tty->termios.c_ospeed;
++ }
+
+- if (!tty || C_HUPCL(tty))
+- uart_port_dtr_rts(uport, false);
++ if (!tty || C_HUPCL(tty))
++ uart_port_dtr_rts(uport, false);
++ }
+
+ uart_port_shutdown(port);
+ }
+@@ -863,6 +878,14 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
+ new_flags = (__force upf_t)new_info->flags;
+ old_custom_divisor = uport->custom_divisor;
+
++ if (!(uport->flags & UPF_FIXED_PORT)) {
++ unsigned int uartclk = new_info->baud_base * 16;
++ /* check needs to be done here before other settings made */
++ if (uartclk == 0) {
++ retval = -EINVAL;
++ goto exit;
++ }
++ }
+ if (!capable(CAP_SYS_ADMIN)) {
+ retval = -EPERM;
+ if (change_irq || change_port ||
+@@ -1370,19 +1393,27 @@ static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs4
+ return;
+ }
+
++ rs485->flags &= supported_flags;
++
+ /* Pick sane settings if the user hasn't */
+- if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
+- !(rs485->flags & SER_RS485_RTS_ON_SEND) ==
++ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
+- dev_warn_ratelimited(port->dev,
+- "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+- port->name, port->line);
+- rs485->flags |= SER_RS485_RTS_ON_SEND;
+- rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+- supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
+- }
++ if (supported_flags & SER_RS485_RTS_ON_SEND) {
++ rs485->flags |= SER_RS485_RTS_ON_SEND;
++ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+
+- rs485->flags &= supported_flags;
++ dev_warn_ratelimited(port->dev,
++ "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
++ port->name, port->line);
++ } else {
++ rs485->flags |= SER_RS485_RTS_AFTER_SEND;
++ rs485->flags &= ~SER_RS485_RTS_ON_SEND;
++
++ dev_warn_ratelimited(port->dev,
++ "%s (%d): invalid RTS setting, using RTS_AFTER_SEND instead\n",
++ port->name, port->line);
++ }
++ }
+
+ uart_sanitize_serial_rs485_delays(port, rs485);
+
+@@ -1401,6 +1432,16 @@ static void uart_set_rs485_termination(struct uart_port *port,
+ !!(rs485->flags & SER_RS485_TERMINATE_BUS));
+ }
+
++static void uart_set_rs485_rx_during_tx(struct uart_port *port,
++ const struct serial_rs485 *rs485)
++{
++ if (!(rs485->flags & SER_RS485_ENABLED))
++ return;
++
++ gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
++ !!(rs485->flags & SER_RS485_RX_DURING_TX));
++}
++
+ static int uart_rs485_config(struct uart_port *port)
+ {
+ struct serial_rs485 *rs485 = &port->rs485;
+@@ -1412,12 +1453,17 @@ static int uart_rs485_config(struct uart_port *port)
+
+ uart_sanitize_serial_rs485(port, rs485);
+ uart_set_rs485_termination(port, rs485);
++ uart_set_rs485_rx_during_tx(port, rs485);
+
+ spin_lock_irqsave(&port->lock, flags);
+ ret = port->rs485_config(port, NULL, rs485);
+ spin_unlock_irqrestore(&port->lock, flags);
+- if (ret)
++ if (ret) {
+ memset(rs485, 0, sizeof(*rs485));
++ /* unset GPIOs */
++ gpiod_set_value_cansleep(port->rs485_term_gpio, 0);
++ gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio, 0);
++ }
+
+ return ret;
+ }
+@@ -1445,7 +1491,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
+ int ret;
+ unsigned long flags;
+
+- if (!port->rs485_config)
++ if (!(port->rs485_supported.flags & SER_RS485_ENABLED))
+ return -ENOTTY;
+
+ if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
+@@ -1456,6 +1502,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
+ return ret;
+ uart_sanitize_serial_rs485(port, &rs485);
+ uart_set_rs485_termination(port, &rs485);
++ uart_set_rs485_rx_during_tx(port, &rs485);
+
+ spin_lock_irqsave(&port->lock, flags);
+ ret = port->rs485_config(port, &tty->termios, &rs485);
+@@ -1467,8 +1514,14 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
+ port->ops->set_mctrl(port, port->mctrl);
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+- if (ret)
++ if (ret) {
++ /* restore old GPIO settings */
++ gpiod_set_value_cansleep(port->rs485_term_gpio,
++ !!(port->rs485.flags & SER_RS485_TERMINATE_BUS));
++ gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
++ !!(port->rs485.flags & SER_RS485_RX_DURING_TX));
+ return ret;
++ }
+
+ if (copy_to_user(rs485_user, &port->rs485, sizeof(port->rs485)))
+ return -EFAULT;
+@@ -1739,6 +1792,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
+ uport->ops->stop_rx(uport);
+ spin_unlock_irq(&uport->lock);
+
++ serial_base_port_shutdown(uport);
+ uart_port_shutdown(port);
+
+ /*
+@@ -1752,6 +1806,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
+ * Free the transmit buffer.
+ */
+ spin_lock_irq(&uport->lock);
++ uart_circ_clear(&state->xmit);
+ buf = state->xmit.buf;
+ state->xmit.buf = NULL;
+ spin_unlock_irq(&uport->lock);
+@@ -2572,7 +2627,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ port->type = PORT_UNKNOWN;
+ flags |= UART_CONFIG_TYPE;
+ }
++ /* Synchronize with possible boot console. */
++ if (uart_console(port))
++ console_lock();
+ port->ops->config_port(port, flags);
++ if (uart_console(port))
++ console_unlock();
+ }
+
+ if (port->type != PORT_UNKNOWN) {
+@@ -2580,6 +2640,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+
+ uart_report_port(drv, port);
+
++ /* Synchronize with possible boot console. */
++ if (uart_console(port))
++ console_lock();
++
+ /* Power up port for set_mctrl() */
+ uart_change_pm(state, UART_PM_STATE_ON);
+
+@@ -2596,6 +2660,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+
+ uart_rs485_config(port);
+
++ if (uart_console(port))
++ console_unlock();
++
+ /*
+ * If this driver supports console, and it hasn't been
+ * successfully registered yet, try to re-register it.
+@@ -2629,13 +2696,13 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
+ int ret = 0;
+
+ tport = &state->port;
+- mutex_lock(&tport->mutex);
++
++ guard(mutex)(&tport->mutex);
+
+ port = uart_port_check(state);
+- if (!port || !(port->ops->poll_get_char && port->ops->poll_put_char)) {
+- ret = -1;
+- goto out;
+- }
++ if (!port || port->type == PORT_UNKNOWN ||
++ !(port->ops->poll_get_char && port->ops->poll_put_char))
++ return -1;
+
+ pm_state = state->pm_state;
+ uart_change_pm(state, UART_PM_STATE_ON);
+@@ -2655,10 +2722,10 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
+ ret = uart_set_options(port, NULL, baud, parity, bits, flow);
+ console_list_unlock();
+ }
+-out:
++
+ if (ret)
+ uart_change_pm(state, pm_state);
+- mutex_unlock(&tport->mutex);
++
+ return ret;
+ }
+
+@@ -3564,9 +3631,13 @@ int uart_get_rs485_mode(struct uart_port *port)
+ {
+ struct serial_rs485 *rs485conf = &port->rs485;
+ struct device *dev = port->dev;
++ enum gpiod_flags dflags;
++ struct gpio_desc *desc;
+ u32 rs485_delay[2];
+ int ret;
+- int rx_during_tx_gpio_flag;
++
++ if (!(port->rs485_supported.flags & SER_RS485_ENABLED))
++ return 0;
+
+ ret = device_property_read_u32_array(dev, "rs485-rts-delay",
+ rs485_delay, 2);
+@@ -3605,26 +3676,21 @@ int uart_get_rs485_mode(struct uart_port *port)
+ * bus participants enable it, no communication is possible at all.
+ * Works fine for short cables and users may enable for longer cables.
+ */
+- port->rs485_term_gpio = devm_gpiod_get_optional(dev, "rs485-term",
+- GPIOD_OUT_LOW);
+- if (IS_ERR(port->rs485_term_gpio)) {
+- ret = PTR_ERR(port->rs485_term_gpio);
+- port->rs485_term_gpio = NULL;
+- return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n");
+- }
++ desc = devm_gpiod_get_optional(dev, "rs485-term", GPIOD_OUT_LOW);
++ if (IS_ERR(desc))
++ return dev_err_probe(dev, PTR_ERR(desc), "Cannot get rs485-term-gpios\n");
++ port->rs485_term_gpio = desc;
+ if (port->rs485_term_gpio)
+ port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS;
+
+- rx_during_tx_gpio_flag = (rs485conf->flags & SER_RS485_RX_DURING_TX) ?
+- GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+- port->rs485_rx_during_tx_gpio = devm_gpiod_get_optional(dev,
+- "rs485-rx-during-tx",
+- rx_during_tx_gpio_flag);
+- if (IS_ERR(port->rs485_rx_during_tx_gpio)) {
+- ret = PTR_ERR(port->rs485_rx_during_tx_gpio);
+- port->rs485_rx_during_tx_gpio = NULL;
+- return dev_err_probe(dev, ret, "Cannot get rs485-rx-during-tx-gpios\n");
+- }
++ dflags = (rs485conf->flags & SER_RS485_RX_DURING_TX) ?
++ GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
++ desc = devm_gpiod_get_optional(dev, "rs485-rx-during-tx", dflags);
++ if (IS_ERR(desc))
++ return dev_err_probe(dev, PTR_ERR(desc), "Cannot get rs485-rx-during-tx-gpios\n");
++ port->rs485_rx_during_tx_gpio = desc;
++ if (port->rs485_rx_during_tx_gpio)
++ port->rs485_supported.flags |= SER_RS485_RX_DURING_TX;
+
+ return 0;
+ }
+diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
+index 86242323700708..469ad26cde4870 100644
+--- a/drivers/tty/serial/serial_port.c
++++ b/drivers/tty/serial/serial_port.c
+@@ -8,7 +8,10 @@
+
+ #include <linux/device.h>
+ #include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
++#include <linux/property.h>
+ #include <linux/serial_core.h>
+ #include <linux/spinlock.h>
+
+@@ -36,8 +39,12 @@ static int serial_port_runtime_resume(struct device *dev)
+
+ /* Flush any pending TX for the port */
+ spin_lock_irqsave(&port->lock, flags);
++ if (!port_dev->tx_enabled)
++ goto unlock;
+ if (__serial_port_busy(port))
+ port->ops->start_tx(port);
++
++unlock:
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ out:
+@@ -46,8 +53,68 @@ static int serial_port_runtime_resume(struct device *dev)
+ return 0;
+ }
+
++static int serial_port_runtime_suspend(struct device *dev)
++{
++ struct serial_port_device *port_dev = to_serial_base_port_device(dev);
++ struct uart_port *port = port_dev->port;
++ unsigned long flags;
++ bool busy;
++
++ if (port->flags & UPF_DEAD)
++ return 0;
++
++ /*
++ * Nothing to do on pm_runtime_force_suspend(), see
++ * DEFINE_RUNTIME_DEV_PM_OPS.
++ */
++ if (!pm_runtime_enabled(dev))
++ return 0;
++
++ uart_port_lock_irqsave(port, &flags);
++ if (!port_dev->tx_enabled) {
++ uart_port_unlock_irqrestore(port, flags);
++ return 0;
++ }
++
++ busy = __serial_port_busy(port);
++ if (busy)
++ port->ops->start_tx(port);
++ uart_port_unlock_irqrestore(port, flags);
++
++ if (busy)
++ pm_runtime_mark_last_busy(dev);
++
++ return busy ? -EBUSY : 0;
++}
++
++static void serial_base_port_set_tx(struct uart_port *port,
++ struct serial_port_device *port_dev,
++ bool enabled)
++{
++ unsigned long flags;
++
++ uart_port_lock_irqsave(port, &flags);
++ port_dev->tx_enabled = enabled;
++ uart_port_unlock_irqrestore(port, flags);
++}
++
++void serial_base_port_startup(struct uart_port *port)
++{
++ struct serial_port_device *port_dev = port->port_dev;
++
++ serial_base_port_set_tx(port, port_dev, true);
++}
++
++void serial_base_port_shutdown(struct uart_port *port)
++{
++ struct serial_port_device *port_dev = port->port_dev;
++
++ serial_base_port_set_tx(port, port_dev, false);
++}
++
+ static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
+- NULL, serial_port_runtime_resume, NULL);
++ serial_port_runtime_suspend,
++ serial_port_runtime_resume, NULL);
+
+ static int serial_port_probe(struct device *dev)
+ {
+@@ -82,6 +149,148 @@ void uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
+ }
+ EXPORT_SYMBOL(uart_remove_one_port);
+
++/**
++ * __uart_read_properties - read firmware properties of the given UART port
++ * @port: corresponding port
++ * @use_defaults: apply defaults (when %true) or validate the values (when %false)
++ *
++ * The following device properties are supported:
++ * - clock-frequency (optional)
++ * - fifo-size (optional)
++ * - no-loopback-test (optional)
++ * - reg-shift (defaults may apply)
++ * - reg-offset (value may be validated)
++ * - reg-io-width (defaults may apply or value may be validated)
++ * - interrupts (OF only)
++ * - serial [alias ID] (OF only)
++ *
++ * If the port->dev is of struct platform_device type the interrupt line
++ * will be retrieved via platform_get_irq() call against that device.
++ * Otherwise it will be assigned by fwnode_irq_get() call. In both cases
++ * the index 0 of the resource is used.
++ *
++ * The caller is responsible to initialize the following fields of the @port
++ * ->dev (must be valid)
++ * ->flags
++ * ->mapbase
++ * ->mapsize
++ * ->regshift (if @use_defaults is false)
++ * before calling this function. Alternatively the above mentioned fields
++ * may be zeroed, in such case the only ones, that have associated properties
++ * found, will be set to the respective values.
++ *
++ * If no error happened, the ->irq, ->mapbase, ->mapsize will be altered.
++ * The ->iotype is always altered.
++ *
++ * When @use_defaults is true and the respective property is not found
++ * the following values will be applied:
++ * ->regshift = 0
++ * In this case IRQ must be provided, otherwise an error will be returned.
++ *
++ * When @use_defaults is false and the respective property is found
++ * the following values will be validated:
++ * - reg-io-width (->iotype)
++ * - reg-offset (->mapsize against ->mapbase)
++ *
++ * Returns: 0 on success or negative errno on failure
++ */
++static int __uart_read_properties(struct uart_port *port, bool use_defaults)
++{
++ struct device *dev = port->dev;
++ u32 value;
++ int ret;
++
++ /* Read optional UART functional clock frequency */
++ device_property_read_u32(dev, "clock-frequency", &port->uartclk);
++
++ /* Read the registers alignment (default: 8-bit) */
++ ret = device_property_read_u32(dev, "reg-shift", &value);
++ if (ret)
++ port->regshift = use_defaults ? 0 : port->regshift;
++ else
++ port->regshift = value;
++
++ /* Read the registers I/O access type (default: MMIO 8-bit) */
++ ret = device_property_read_u32(dev, "reg-io-width", &value);
++ if (ret) {
++ port->iotype = UPIO_MEM;
++ } else {
++ switch (value) {
++ case 1:
++ port->iotype = UPIO_MEM;
++ break;
++ case 2:
++ port->iotype = UPIO_MEM16;
++ break;
++ case 4:
++ port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
++ break;
++ default:
++ if (!use_defaults) {
++ dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
++ return -EINVAL;
++ }
++ port->iotype = UPIO_UNKNOWN;
++ break;
++ }
++ }
++
++ /* Read the address mapping base offset (default: no offset) */
++ ret = device_property_read_u32(dev, "reg-offset", &value);
++ if (ret)
++ value = 0;
++
++ /* Check for shifted address mapping overflow */
++ if (!use_defaults && port->mapsize < value) {
++ dev_err(dev, "reg-offset %u exceeds region size %pa\n", value, &port->mapsize);
++ return -EINVAL;
++ }
++
++ port->mapbase += value;
++ port->mapsize -= value;
++
++ /* Read optional FIFO size */
++ device_property_read_u32(dev, "fifo-size", &port->fifosize);
++
++ if (device_property_read_bool(dev, "no-loopback-test"))
++ port->flags |= UPF_SKIP_TEST;
++
++ /* Get index of serial line, if found in DT aliases */
++ ret = of_alias_get_id(dev_of_node(dev), "serial");
++ if (ret >= 0)
++ port->line = ret;
++
++ if (dev_is_platform(dev))
++ ret = platform_get_irq(to_platform_device(dev), 0);
++ else
++ ret = fwnode_irq_get(dev_fwnode(dev), 0);
++ if (ret == -EPROBE_DEFER)
++ return ret;
++ if (ret > 0)
++ port->irq = ret;
++ else if (use_defaults)
++ /* By default IRQ support is mandatory */
++ return ret;
++ else
++ port->irq = 0;
++
++ port->flags |= UPF_SHARE_IRQ;
++
++ return 0;
++}
++
++int uart_read_port_properties(struct uart_port *port)
++{
++ return __uart_read_properties(port, true);
++}
++EXPORT_SYMBOL_GPL(uart_read_port_properties);
++
++int uart_read_and_validate_port_properties(struct uart_port *port)
++{
++ return __uart_read_properties(port, false);
++}
++EXPORT_SYMBOL_GPL(uart_read_and_validate_port_properties);
++
+ static struct device_driver serial_port_driver = {
+ .name = "port",
+ .suppress_bind_attrs = true,
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index a560b729fa3b79..f793624fd5018f 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1272,9 +1272,14 @@ static void sci_dma_rx_chan_invalidate(struct sci_port *s)
+ static void sci_dma_rx_release(struct sci_port *s)
+ {
+ struct dma_chan *chan = s->chan_rx_saved;
++ struct uart_port *port = &s->port;
++ unsigned long flags;
+
++ uart_port_lock_irqsave(port, &flags);
+ s->chan_rx_saved = NULL;
+ sci_dma_rx_chan_invalidate(s);
++ uart_port_unlock_irqrestore(port, flags);
++
+ dmaengine_terminate_sync(chan);
+ dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
+ sg_dma_address(&s->sg_rx[0]));
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 5e9cf0c48813da..9ef90bb30a47eb 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -226,12 +226,6 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
+
+ stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+
+- if (port->rs485_rx_during_tx_gpio)
+- gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
+- !!(rs485conf->flags & SER_RS485_RX_DURING_TX));
+- else
+- rs485conf->flags |= SER_RS485_RX_DURING_TX;
+-
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ cr1 = readl_relaxed(port->membase + ofs->cr1);
+ cr3 = readl_relaxed(port->membase + ofs->cr3);
+@@ -256,6 +250,10 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
+
+ writel_relaxed(cr3, port->membase + ofs->cr3);
+ writel_relaxed(cr1, port->membase + ofs->cr1);
++
++ if (!port->rs485_rx_during_tx_gpio)
++ rs485conf->flags |= SER_RS485_RX_DURING_TX;
++
+ } else {
+ stm32_usart_clr_bits(port, ofs->cr3,
+ USART_CR3_DEM | USART_CR3_DEP);
+@@ -859,6 +857,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ u32 sr;
+ unsigned int size;
++ irqreturn_t ret = IRQ_NONE;
+
+ sr = readl_relaxed(port->membase + ofs->isr);
+
+@@ -867,11 +866,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ (sr & USART_SR_TC)) {
+ stm32_usart_tc_interrupt_disable(port);
+ stm32_usart_rs485_rts_disable(port);
++ ret = IRQ_HANDLED;
+ }
+
+- if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
++ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
+ writel_relaxed(USART_ICR_RTOCF,
+ port->membase + ofs->icr);
++ ret = IRQ_HANDLED;
++ }
+
+ if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
+ /* Clear wake up flag and disable wake up interrupt */
+@@ -880,6 +882,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
+ if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+ pm_wakeup_event(tport->tty->dev, 0);
++ ret = IRQ_HANDLED;
+ }
+
+ /*
+@@ -894,6 +897,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ uart_unlock_and_check_sysrq(port);
+ if (size)
+ tty_flip_buffer_push(tport);
++ ret = IRQ_HANDLED;
+ }
+ }
+
+@@ -901,6 +905,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ spin_lock(&port->lock);
+ stm32_usart_transmit_chars(port);
+ spin_unlock(&port->lock);
++ ret = IRQ_HANDLED;
+ }
+
+ /* Receiver timeout irq for DMA RX */
+@@ -910,9 +915,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ uart_unlock_and_check_sysrq(port);
+ if (size)
+ tty_flip_buffer_push(tport);
++ ret = IRQ_HANDLED;
+ }
+
+- return IRQ_HANDLED;
++ return ret;
+ }
+
+ static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+@@ -1071,6 +1077,7 @@ static int stm32_usart_startup(struct uart_port *port)
+ val |= USART_CR2_SWAP;
+ writel_relaxed(val, port->membase + ofs->cr2);
+ }
++ stm32_port->throttled = false;
+
+ /* RX FIFO Flush */
+ if (ofs->rqr != UNDEF_REG)
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 23198e3f1461ac..6b4a28bcf2f5f5 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -262,13 +262,14 @@ static void sysrq_handle_showallcpus(u8 key)
+ if (in_hardirq())
+ regs = get_irq_regs();
+
+- pr_info("CPU%d:\n", smp_processor_id());
++ pr_info("CPU%d:\n", get_cpu());
+ if (regs)
+ show_regs(regs);
+ else
+ show_stack(NULL, NULL, KERN_INFO);
+
+ schedule_work(&sysrq_showallcpus);
++ put_cpu();
+ }
+ }
+
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 8a94e5a43c6d2e..493fc4742895f1 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2475,22 +2475,25 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
+ return 0;
+
+ if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
+- retval = tty->ops->break_ctl(tty, duration);
+- else {
+- /* Do the work ourselves */
+- if (tty_write_lock(tty, false) < 0)
+- return -EINTR;
+- retval = tty->ops->break_ctl(tty, -1);
+- if (retval)
+- goto out;
+- if (!signal_pending(current))
+- msleep_interruptible(duration);
++ return tty->ops->break_ctl(tty, duration);
++
++ /* Do the work ourselves */
++ if (tty_write_lock(tty, false) < 0)
++ return -EINTR;
++
++ retval = tty->ops->break_ctl(tty, -1);
++ if (!retval) {
++ msleep_interruptible(duration);
+ retval = tty->ops->break_ctl(tty, 0);
+-out:
+- tty_write_unlock(tty);
+- if (signal_pending(current))
+- retval = -EINTR;
++ } else if (retval == -EOPNOTSUPP) {
++ /* some drivers can tell only dynamically */
++ retval = 0;
+ }
++ tty_write_unlock(tty);
++
++ if (signal_pending(current))
++ retval = -EINTR;
++
+ return retval;
+ }
+
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 7958bf6d27c401..7c625368554500 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -850,7 +850,7 @@ int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+ ret = -EFAULT;
+ return ret;
+ case TIOCSLCKTRMIOS:
+- if (!capable(CAP_SYS_ADMIN))
++ if (!checkpoint_restore_ns_capable(&init_user_ns))
+ return -EPERM;
+ copy_termios_locked(real_tty, &kterm);
+ if (user_termios_to_kernel_termios(&kterm,
+@@ -867,7 +867,7 @@ int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+ ret = -EFAULT;
+ return ret;
+ case TIOCSLCKTRMIOS:
+- if (!capable(CAP_SYS_ADMIN))
++ if (!checkpoint_restore_ns_capable(&init_user_ns))
+ return -EPERM;
+ copy_termios_locked(real_tty, &kterm);
+ if (user_termios_to_kernel_termios_1(&kterm,
+diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
+index 0d04287da09844..ef8741c3e66296 100644
+--- a/drivers/tty/tty_jobctrl.c
++++ b/drivers/tty/tty_jobctrl.c
+@@ -300,12 +300,7 @@ void disassociate_ctty(int on_exit)
+ return;
+ }
+
+- spin_lock_irq(&current->sighand->siglock);
+- put_pid(current->signal->tty_old_pgrp);
+- current->signal->tty_old_pgrp = NULL;
+- tty = tty_kref_get(current->signal->tty);
+- spin_unlock_irq(&current->sighand->siglock);
+-
++ tty = get_current_tty();
+ if (tty) {
+ unsigned long flags;
+
+@@ -320,6 +315,16 @@ void disassociate_ctty(int on_exit)
+ tty_kref_put(tty);
+ }
+
++ /* If tty->ctrl.pgrp is not NULL, it may be assigned to
++ * current->signal->tty_old_pgrp in a race condition, and
++ * cause pid memleak. Release current->signal->tty_old_pgrp
++ * after tty->ctrl.pgrp set to NULL.
++ */
++ spin_lock_irq(&current->sighand->siglock);
++ put_pid(current->signal->tty_old_pgrp);
++ current->signal->tty_old_pgrp = NULL;
++ spin_unlock_irq(&current->sighand->siglock);
++
+ /* Now clear signal->tty under the lock */
+ read_lock(&tasklist_lock);
+ session_clear_tty(task_session(current));
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 3f68e213df1f70..d80e9d4c974b4f 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -545,6 +545,12 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
+ goto out;
+ }
+
++ if (tty->ops->ldisc_ok) {
++ retval = tty->ops->ldisc_ok(tty, disc);
++ if (retval)
++ goto out;
++ }
++
+ old_ldisc = tty->ldisc;
+
+ /* Shutdown the old discipline. */
+diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
+index a39ed981bfd3e4..5b625f20233b47 100644
+--- a/drivers/tty/vcc.c
++++ b/drivers/tty/vcc.c
+@@ -579,18 +579,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ return -ENOMEM;
+
+ name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
++ if (!name) {
++ rv = -ENOMEM;
++ goto free_port;
++ }
+
+ rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
+ ARRAY_SIZE(vcc_versions), NULL, name);
+ if (rv)
+- goto free_port;
++ goto free_name;
+
+ port->vio.debug = vcc_dbg_vio;
+ vcc_ldc_cfg.debug = vcc_dbg_ldc;
+
+ rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
+ if (rv)
+- goto free_port;
++ goto free_name;
+
+ spin_lock_init(&port->lock);
+
+@@ -624,6 +628,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ goto unreg_tty;
+ }
+ port->domain = kstrdup(domain, GFP_KERNEL);
++ if (!port->domain) {
++ rv = -ENOMEM;
++ goto unreg_tty;
++ }
++
+
+ mdesc_release(hp);
+
+@@ -653,8 +662,9 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ vcc_table_remove(port->index);
+ free_ldc:
+ vio_ldc_free(&port->vio);
+-free_port:
++free_name:
+ kfree(name);
++free_port:
+ kfree(port);
+
+ return rv;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 5c47f77804f0f6..6bd1a7785e888c 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -381,7 +381,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
+ u32 *ln = vc->vc_uni_lines[vc->state.y];
+ unsigned int x = vc->state.x, cols = vc->vc_cols;
+
+- memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
++ memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
+ memset32(&ln[cols - nr], ' ', nr);
+ }
+ }
+@@ -2469,7 +2469,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+ }
+ return;
+ case EScsiignore:
+- if (c >= 20 && c <= 0x3f)
++ if (c >= 0x20 && c <= 0x3f)
+ return;
+ vc->vc_state = ESnormal;
+ return;
+@@ -3390,6 +3390,15 @@ static void con_cleanup(struct tty_struct *tty)
+ tty_port_put(&vc->port);
+ }
+
++/*
++ * We can't deal with anything but the N_TTY ldisc,
++ * because we can sleep in our write() routine.
++ */
++static int con_ldisc_ok(struct tty_struct *tty, int ldisc)
++{
++ return ldisc == N_TTY ? 0 : -EINVAL;
++}
++
+ static int default_color = 7; /* white */
+ static int default_italic_color = 2; // green (ASCII)
+ static int default_underline_color = 3; // cyan (ASCII)
+@@ -3509,6 +3518,7 @@ static const struct tty_operations con_ops = {
+ .resize = vt_resize,
+ .shutdown = con_shutdown,
+ .cleanup = con_cleanup,
++ .ldisc_ok = con_ldisc_ok,
+ };
+
+ static struct cdev vc0_cdev;
+@@ -4540,7 +4550,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
+ return -EINVAL;
+
+ if (op->data) {
+- font.data = kvmalloc(max_font_size, GFP_KERNEL);
++ font.data = kvzalloc(max_font_size, GFP_KERNEL);
+ if (!font.data)
+ return -ENOMEM;
+ } else
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 2ba8ec254dceee..da8c1734d33358 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -94,7 +94,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
+
+ val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
+ val &= ~MCQ_CFG_MAC_MASK;
+- val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
++ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
+ ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
+@@ -105,16 +105,15 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
+ * @hba: per adapter instance
+ * @req: pointer to the request to be issued
+ *
+- * Return: the hardware queue instance on which the request would
+- * be queued.
++ * Return: the hardware queue instance on which the request will be or has
++ * been queued. %NULL if the request has already been freed.
+ */
+ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req)
+ {
+- u32 utag = blk_mq_unique_tag(req);
+- u32 hwq = blk_mq_unique_tag_to_hwq(utag);
++ struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
+
+- return &hba->uhq[hwq];
++ return hctx ? &hba->uhq[hctx->queue_num] : NULL;
+ }
+
+ /**
+@@ -231,8 +230,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+
+ /* Operation and runtime registers configuration */
+ #define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
+-#define MCQ_OPR_OFFSET_n(p, i) \
+- (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
+
+ static void __iomem *mcq_opr_base(struct ufs_hba *hba,
+ enum ufshcd_mcq_opr n, int i)
+@@ -345,10 +342,10 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
+ ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQUBA, i));
+ /* Submission Queue Doorbell Address Offset */
+- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
++ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
+ MCQ_CFG_n(REG_SQDAO, i));
+ /* Submission Queue Interrupt Status Address Offset */
+- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
++ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
+ MCQ_CFG_n(REG_SQISAO, i));
+
+ /* Completion Queue Lower Base Address */
+@@ -358,10 +355,10 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
+ ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQUBA, i));
+ /* Completion Queue Doorbell Address Offset */
+- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
++ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
+ MCQ_CFG_n(REG_CQDAO, i));
+ /* Completion Queue Interrupt Status Address Offset */
+- ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
++ ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
+ MCQ_CFG_n(REG_CQISAO, i));
+
+ /* Save the base addresses for quicker access */
+@@ -436,7 +433,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+- hwq->max_entries = hba->nutrs;
++ hwq->max_entries = hba->nutrs + 1;
+ spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
+ mutex_init(&hwq->sq_mutex);
+@@ -501,7 +498,7 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ struct ufs_hw_queue *hwq;
+ void __iomem *reg, *opr_sqd_base;
+- u32 nexus, id, val;
++ u32 nexus, id, val, rtc;
+ int err;
+
+ if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
+@@ -511,6 +508,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
+ if (!cmd)
+ return -EINVAL;
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
++ if (!hwq)
++ return 0;
+ } else {
+ hwq = hba->dev_cmd_queue;
+ }
+@@ -529,17 +528,18 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
+ opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
+ writel(nexus, opr_sqd_base + REG_SQCTI);
+
+- /* SQRTCy.ICU = 1 */
+- writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
++ /* Initiate Cleanup */
++ writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU,
++ opr_sqd_base + REG_SQRTC);
+
+ /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
+ reg = opr_sqd_base + REG_SQRTS;
+ err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
+ MCQ_POLL_US, false, reg);
+- if (err)
+- dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
+- __func__, id, task_tag,
+- FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
++ rtc = FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg));
++ if (err || rtc)
++ dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d RTC=%d\n",
++ __func__, id, task_tag, err, rtc);
+
+ if (ufshcd_mcq_sq_start(hba, hwq))
+ err = -ETIMEDOUT;
+@@ -597,8 +597,7 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
+ addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
+
+ while (sq_head_slot != hwq->sq_tail_slot) {
+- utrd = hwq->sqe_base_addr +
+- sq_head_slot * sizeof(struct utp_transfer_req_desc);
++ utrd = hwq->sqe_base_addr + sq_head_slot;
+ match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
+ if (addr == match) {
+ ufshcd_mcq_nullify_sqe(utrd);
+@@ -630,20 +629,21 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ int tag = scsi_cmd_to_rq(cmd)->tag;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufs_hw_queue *hwq;
+- int err = FAILED;
++ unsigned long flags;
++ int err;
+
+ if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+ dev_err(hba->dev,
+ "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+- goto out;
++ return FAILED;
+ }
+
+ /* Skip task abort in case previous aborts failed and report failure */
+ if (lrbp->req_abort_skip) {
+ dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
+ __func__, tag);
+- goto out;
++ return FAILED;
+ }
+
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+@@ -655,7 +655,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ */
+ dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
+ __func__, hwq->id, tag);
+- goto out;
++ return FAILED;
+ }
+
+ /*
+@@ -663,16 +663,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ * in the completion queue either. Query the device to see if
+ * the command is being processed in the device.
+ */
+- if (ufshcd_try_to_abort_task(hba, tag)) {
++ err = ufshcd_try_to_abort_task(hba, tag);
++ if (err) {
+ dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
+ lrbp->req_abort_skip = true;
+- goto out;
++ return FAILED;
+ }
+
+- err = SUCCESS;
++ spin_lock_irqsave(&hwq->cq_lock, flags);
+ if (ufshcd_cmd_inflight(lrbp->cmd))
+ ufshcd_release_scsi_cmd(hba, lrbp);
++ spin_unlock_irqrestore(&hwq->cq_lock, flags);
+
+-out:
+- return err;
++ return SUCCESS;
+ }
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 8382e8cfa414a0..db4044358e22d8 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -1267,7 +1267,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
+ * make sure that there are no outstanding requests when
+ * clock scaling is in progress
+ */
+- ufshcd_scsi_block_requests(hba);
++ blk_mq_quiesce_tagset(&hba->host->tag_set);
+ mutex_lock(&hba->wb_mutex);
+ down_write(&hba->clk_scaling_lock);
+
+@@ -1276,7 +1276,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
+- ufshcd_scsi_unblock_requests(hba);
++ blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ goto out;
+ }
+
+@@ -1297,7 +1297,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
+
+ mutex_unlock(&hba->wb_mutex);
+
+- ufshcd_scsi_unblock_requests(hba);
++ blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ ufshcd_release(hba);
+ }
+
+@@ -2172,9 +2172,10 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
+ if (is_mcq_enabled(hba)) {
+ int utrd_size = sizeof(struct utp_transfer_req_desc);
+ struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
+- struct utp_transfer_req_desc *dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
++ struct utp_transfer_req_desc *dest;
+
+ spin_lock(&hwq->sq_lock);
++ dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
+ memcpy(dest, src, utrd_size);
+ ufshcd_inc_sq_tail(hwq);
+ spin_unlock(&hwq->sq_lock);
+@@ -2280,7 +2281,17 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
+ return err;
+ }
+
++ /*
++ * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
++ * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
++ * means we can simply read values regardless of version.
++ */
+ hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
++ /*
++ * 0h: legacy single doorbell support is available
++ * 1h: indicate that legacy single doorbell support has been removed
++ */
++ hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
+ if (!hba->mcq_sup)
+ return 0;
+
+@@ -2793,9 +2804,8 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
+ struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
+ dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
+ i * ufshcd_get_ucd_size(hba);
+- u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
+- response_upiu);
+- u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
++ u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
++ u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
+
+ lrb->utr_descriptor_ptr = utrdlp + i;
+ lrb->utrd_dma_addr = hba->utrdl_dma_addr +
+@@ -2948,7 +2958,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
+ */
+ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
+ {
+- u32 mask = 1U << task_tag;
++ u32 mask;
+ unsigned long flags;
+ int err;
+
+@@ -2966,6 +2976,8 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
+ return 0;
+ }
+
++ mask = 1U << task_tag;
++
+ /* clear outstanding transaction before retry */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utrl_clear(hba, mask);
+@@ -3061,7 +3073,9 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
+
+ /* MCQ mode */
+ if (is_mcq_enabled(hba)) {
+- err = ufshcd_clear_cmd(hba, lrbp->task_tag);
++ /* successfully cleared the command, retry if needed */
++ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
++ err = -EAGAIN;
+ hba->dev_cmd.complete = NULL;
+ return err;
+ }
+@@ -3632,7 +3646,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ */
+ ret = utf16s_to_utf8s(uc_str->uc,
+ uc_str->len - QUERY_DESC_HDR_SIZE,
+- UTF16_BIG_ENDIAN, str, ascii_len);
++ UTF16_BIG_ENDIAN, str, ascii_len - 1);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ret; i++)
+@@ -3966,11 +3980,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
+ min_sleep_time_us =
+ MIN_DELAY_BEFORE_DME_CMDS_US - delta;
+ else
+- return; /* no more delay required */
++ min_sleep_time_us = 0; /* no more delay required */
++ }
++
++ if (min_sleep_time_us > 0) {
++ /* allow sleep for extra 50us if needed */
++ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+ }
+
+- /* allow sleep for extra 50us if needed */
+- usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
++ /* update the last_dme_cmd_tstamp */
++ hba->last_dme_cmd_tstamp = ktime_get();
+ }
+
+ /**
+@@ -4131,7 +4150,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ * Make sure UIC command completion interrupt is disabled before
+ * issuing UIC command.
+ */
+- wmb();
++ ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ reenable_intr = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+@@ -6250,7 +6269,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
+ ufshcd_hold(hba);
+ if (!ufshcd_is_clkgating_allowed(hba))
+ ufshcd_setup_clocks(hba, true);
+- ufshcd_release(hba);
+ pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
+ ufshcd_vops_resume(hba, pm_op);
+ } else {
+@@ -6347,11 +6365,26 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
++ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
++ struct ufs_hw_queue *hwq;
++ unsigned long flags;
+
+ *ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ *ret ? "failed" : "succeeded");
++
++ /* Release cmd in MCQ mode if abort succeeds */
++ if (is_mcq_enabled(hba) && (*ret == 0)) {
++ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
++ if (!hwq)
++ return 0;
++ spin_lock_irqsave(&hwq->cq_lock, flags);
++ if (ufshcd_cmd_inflight(lrbp->cmd))
++ ufshcd_release_scsi_cmd(hba, lrbp);
++ spin_unlock_irqrestore(&hwq->cq_lock, flags);
++ }
++
+ return *ret == 0;
+ }
+
+@@ -6433,7 +6466,8 @@ static void ufshcd_err_handler(struct work_struct *work)
+ if (ufshcd_err_handling_should_stop(hba))
+ goto skip_err_handling;
+
+- if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
++ if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
++ !hba->force_reset) {
+ bool ret;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+@@ -8526,7 +8560,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
+
+ ufs_bsg_probe(hba);
+ scsi_scan_host(hba->host);
+- pm_runtime_put_sync(hba->dev);
+
+ out:
+ return ret;
+@@ -8723,9 +8756,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+ if (ret)
+ goto out;
+
+- if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
++ if (!hba->pm_op_in_progress &&
++ (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
+ /* Reset the device and controller before doing reinit */
+ ufshcd_device_reset(hba);
++ ufs_put_device_desc(hba);
+ ufshcd_hba_stop(hba);
+ ufshcd_vops_reinit_notify(hba);
+ ret = ufshcd_hba_enable(hba);
+@@ -8793,15 +8828,12 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+
+ /* Probe and add UFS logical units */
+ ret = ufshcd_add_lus(hba);
++
+ out:
+- /*
+- * If we failed to initialize the device or the device is not
+- * present, turn off the power/clocks etc.
+- */
+- if (ret) {
+- pm_runtime_put_sync(hba->dev);
+- ufshcd_hba_exit(hba);
+- }
++ pm_runtime_put_sync(hba->dev);
++
++ if (ret)
++ dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
+ }
+
+ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+@@ -9535,7 +9567,10 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+
+ /* UFS device & link must be active before we enter in this function */
+ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+- ret = -EINVAL;
++ /* Wait err handler finish or trigger err recovery */
++ if (!ufshcd_eh_in_progress(hba))
++ ufshcd_force_error_recovery(hba);
++ ret = -EBUSY;
+ goto enable_scaling;
+ }
+
+@@ -10065,7 +10100,9 @@ static void ufshcd_wl_shutdown(struct device *dev)
+ shost_for_each_device(sdev, hba->host) {
+ if (sdev == hba->ufs_device_wlun)
+ continue;
+- scsi_device_quiesce(sdev);
++ mutex_lock(&sdev->state_mutex);
++ scsi_device_set_state(sdev, SDEV_OFFLINE);
++ mutex_unlock(&sdev->state_mutex);
+ }
+ __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+
+@@ -10094,7 +10131,8 @@ void ufshcd_remove(struct ufs_hba *hba)
+ blk_mq_destroy_queue(hba->tmf_queue);
+ blk_put_queue(hba->tmf_queue);
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+- scsi_remove_host(hba->host);
++ if (hba->scsi_host_added)
++ scsi_remove_host(hba->host);
+ /* disable interrupts */
+ ufshcd_disable_intr(hba, hba->intr_mask);
+ ufshcd_hba_stop(hba);
+@@ -10135,10 +10173,7 @@ int ufshcd_system_restore(struct device *dev)
+ * are updated with the latest queue addresses. Only after
+ * updating these addresses, we can queue the new commands.
+ */
+- mb();
+-
+- /* Resuming from hibernate, assume that link was OFF */
+- ufshcd_set_link_off(hba);
++ ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ return 0;
+
+@@ -10352,7 +10387,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ * Make sure that UFS interrupts are disabled and any pending interrupt
+ * status is cleared before registering UFS interrupt handler.
+ */
+- mb();
++ ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ /* IRQ registration */
+ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+@@ -10364,11 +10399,18 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ }
+
+ if (!is_mcq_supported(hba)) {
++ if (!hba->lsdb_sup) {
++ dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n",
++ __func__);
++ err = -EINVAL;
++ goto out_disable;
++ }
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto out_disable;
+ }
++ hba->scsi_host_added = true;
+ }
+
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
+@@ -10450,7 +10492,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+ out_remove_scsi_host:
+- scsi_remove_host(hba->host);
++ if (hba->scsi_host_added)
++ scsi_remove_host(hba->host);
+ out_disable:
+ hba->is_irq_enabled = false;
+ ufshcd_hba_exit(hba);
+diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
+index 2491e7e870283d..56014ef302b497 100644
+--- a/drivers/ufs/host/cdns-pltfrm.c
++++ b/drivers/ufs/host/cdns-pltfrm.c
+@@ -136,7 +136,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
+ * Make sure the register was updated,
+ * UniPro layer will not work with an incorrect value.
+ */
+- mb();
++ ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV);
+
+ return 0;
+ }
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index d1149b1c3ed50e..643157a92c62a7 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -47,7 +47,7 @@ enum {
+ TSTBUS_MAX,
+ };
+
+-#define QCOM_UFS_MAX_GEAR 4
++#define QCOM_UFS_MAX_GEAR 5
+ #define QCOM_UFS_MAX_LANE 2
+
+ enum {
+@@ -67,27 +67,33 @@ static const struct __ufs_qcom_bw_table {
+ [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
+ [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
+ [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
++ [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 },
+ [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
+ [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
+ [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
++ [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 },
+ [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
+ [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
+ [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
+ [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
++ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
+ [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
+ [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
+ [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
+ [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
++ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
+ [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
+ [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
+ [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
+ [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
++ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
+ [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
+ [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
+ [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
+ [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+- [MODE_MAX][0][0] = { 7643136, 307200 },
++ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
++ [MODE_MAX][0][0] = { 7643136, 819200 },
+ };
+
+ static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
+@@ -159,7 +165,7 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
+ cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
+ if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
+ cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
+- return -EINVAL;
++ return -EOPNOTSUPP;
+
+ if (config_enable)
+ return qcom_ice_program_key(host->ice,
+@@ -367,9 +373,6 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
+
+ if (host->hw_ver.major >= 0x05)
+ ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
+-
+- /* make sure above configuration is applied before we return */
+- mb();
+ }
+
+ /*
+@@ -495,7 +498,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+ REG_UFS_CFG2);
+
+ /* Ensure that HW clock gating is enabled before next operations */
+- mb();
++ ufshcd_readl(hba, REG_UFS_CFG2);
+ }
+
+ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
+@@ -591,7 +594,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ * make sure above write gets applied before we return from
+ * this function.
+ */
+- mb();
++ ufshcd_readl(hba, REG_UFS_SYS1CLK_1US);
+ }
+
+ if (ufs_qcom_cap_qunipro(host))
+@@ -909,8 +912,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+ return ret;
+ }
+
+- /* Use the agreed gear */
+- host->hs_gear = dev_req_params->gear_tx;
++ /*
++ * Update hs_gear only when the gears are scaled to a higher value. This is because,
++ * the PHY gear settings are backwards compatible and we only need to change the PHY
++ * settings while scaling to higher gears.
++ */
++ if (dev_req_params->gear_tx > host->hs_gear)
++ host->hs_gear = dev_req_params->gear_tx;
+
+ /* enable the device ref clock before changing to HS mode */
+ if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
+@@ -1394,9 +1402,11 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
+ err = ufs_qcom_clk_scale_up_pre_change(hba);
+ else
+ err = ufs_qcom_clk_scale_down_pre_change(hba);
+- if (err)
+- ufshcd_uic_hibern8_exit(hba);
+
++ if (err) {
++ ufshcd_uic_hibern8_exit(hba);
++ return err;
++ }
+ } else {
+ if (scale_up)
+ err = ufs_qcom_clk_scale_up_post_change(hba);
+@@ -1668,7 +1678,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
+ if (!res->resource) {
+ dev_info(hba->dev, "Resource %s not provided\n", res->name);
+ if (i == RES_UFS)
+- return -ENOMEM;
++ return -ENODEV;
+ continue;
+ } else if (i == RES_UFS) {
+ res_mem = res->resource;
+diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
+index d6f8e74bd5381c..532667d8e6f0e8 100644
+--- a/drivers/ufs/host/ufs-qcom.h
++++ b/drivers/ufs/host/ufs-qcom.h
+@@ -149,10 +149,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
+ REG_UFS_CFG1);
+
+ /*
+- * Make sure assertion of ufs phy reset is written to
+- * register before returning
++ * Dummy read to ensure the write takes effect before doing any sort
++ * of delay
+ */
+- mb();
++ ufshcd_readl(hba, REG_UFS_CFG1);
+ }
+
+ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+@@ -161,10 +161,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+ REG_UFS_CFG1);
+
+ /*
+- * Make sure de-assertion of ufs phy reset is written to
+- * register before returning
++ * Dummy read to ensure the write takes effect before doing any sort
++ * of delay
+ */
+- mb();
++ ufshcd_readl(hba, REG_UFS_CFG1);
+ }
+
+ /* Host controller hardware version: major.minor.step */
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index 62082d64ece006..2d572f6c8ec833 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -466,13 +466,13 @@ static int uio_open(struct inode *inode, struct file *filep)
+
+ mutex_lock(&minor_lock);
+ idev = idr_find(&uio_idr, iminor(inode));
+- mutex_unlock(&minor_lock);
+ if (!idev) {
+ ret = -ENODEV;
++ mutex_unlock(&minor_lock);
+ goto out;
+ }
+-
+ get_device(&idev->dev);
++ mutex_unlock(&minor_lock);
+
+ if (!try_module_get(idev->owner)) {
+ ret = -ENODEV;
+@@ -1064,9 +1064,8 @@ void uio_unregister_device(struct uio_info *info)
+ wake_up_interruptible(&idev->wait);
+ kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
+
+- device_unregister(&idev->dev);
+-
+ uio_free_minor(minor);
++ device_unregister(&idev->dev);
+
+ return;
+ }
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index 20d9762331bd76..a2c7abf8c289ef 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -104,10 +104,11 @@ static void hv_uio_channel_cb(void *context)
+
+ /*
+ * Callback from vmbus_event when channel is rescinded.
++ * It is meant for rescind of primary channels only.
+ */
+ static void hv_uio_rescind(struct vmbus_channel *channel)
+ {
+- struct hv_device *hv_dev = channel->primary_channel->device_obj;
++ struct hv_device *hv_dev = channel->device_obj;
+ struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
+
+ /*
+@@ -118,6 +119,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
+
+ /* Wake up reader */
+ uio_event_notify(&pdata->info);
++
++ /*
++ * With rescind callback registered, rescind path will not unregister the device
++ * from vmbus when the primary channel is rescinded.
++ * Without it, rescind handling is incomplete and next onoffer msg does not come.
++ * Unregister the device from vmbus here.
++ */
++ vmbus_device_unregister(channel->device_obj);
+ }
+
+ /* Sysfs API to allow mmap of the ring buffers
+@@ -181,12 +190,14 @@ hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
+ {
+ if (pdata->send_gpadl.gpadl_handle) {
+ vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
+- vfree(pdata->send_buf);
++ if (!pdata->send_gpadl.decrypted)
++ vfree(pdata->send_buf);
+ }
+
+ if (pdata->recv_gpadl.gpadl_handle) {
+ vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
+- vfree(pdata->recv_buf);
++ if (!pdata->recv_gpadl.decrypted)
++ vfree(pdata->recv_buf);
+ }
+ }
+
+@@ -295,7 +306,8 @@ hv_uio_probe(struct hv_device *dev,
+ ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
+ RECV_BUFFER_SIZE, &pdata->recv_gpadl);
+ if (ret) {
+- vfree(pdata->recv_buf);
++ if (!pdata->recv_gpadl.decrypted)
++ vfree(pdata->recv_buf);
+ goto fail_close;
+ }
+
+@@ -317,7 +329,8 @@ hv_uio_probe(struct hv_device *dev,
+ ret = vmbus_establish_gpadl(channel, pdata->send_buf,
+ SEND_BUFFER_SIZE, &pdata->send_gpadl);
+ if (ret) {
+- vfree(pdata->send_buf);
++ if (!pdata->send_gpadl.decrypted)
++ vfree(pdata->send_buf);
+ goto fail_close;
+ }
+
+diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
+index 3a9a0dd4be706d..949eca0adebea3 100644
+--- a/drivers/usb/Makefile
++++ b/drivers/usb/Makefile
+@@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/
+ obj-$(CONFIG_USB_FSL_USB2) += host/
+ obj-$(CONFIG_USB_FOTG210_HCD) += host/
+ obj-$(CONFIG_USB_MAX3421_HCD) += host/
++obj-$(CONFIG_USB_XEN_HCD) += host/
+
+ obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
+
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index 4ce7cba2b48aa3..8f3b9a0a38e1dd 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -1131,6 +1131,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
+ struct cxacru_data *instance;
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
++ struct usb_endpoint_descriptor *in, *out;
+ int ret;
+
+ /* instance init */
+@@ -1177,6 +1178,19 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
+ goto fail;
+ }
+
++ if (usb_endpoint_xfer_int(&cmd_ep->desc))
++ ret = usb_find_common_endpoints(intf->cur_altsetting,
++ NULL, NULL, &in, &out);
++ else
++ ret = usb_find_common_endpoints(intf->cur_altsetting,
++ &in, &out, NULL, NULL);
++
++ if (ret) {
++ usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
++ ret = -ENODEV;
++ goto fail;
++ }
++
+ if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_INT) {
+ usb_fill_int_urb(instance->rcv_urb,
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index 11a5b3437c32d2..b1b46c7c63f8b3 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -828,7 +828,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
+ return;
+ }
+
+- if (request->complete) {
++ /*
++ * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
++ * gadget composite driver.
++ */
++ if (request->complete && request->buf != priv_dev->zlp_buf) {
+ spin_unlock(&priv_dev->lock);
+ usb_gadget_giveback_request(&priv_ep->endpoint,
+ request);
+@@ -1119,6 +1123,8 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ dma_addr_t trb_dma;
+ u32 togle_pcs = 1;
+ int sg_iter = 0;
++ int num_trb_req;
++ int trb_burst;
+ int num_trb;
+ int address;
+ u32 control;
+@@ -1127,15 +1133,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct scatterlist *s = NULL;
+ bool sg_supported = !!(request->num_mapped_sgs);
+
++ num_trb_req = sg_supported ? request->num_mapped_sgs : 1;
++
++ /* ISO transfer require each SOF have a TD, each TD include some TRBs */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+- num_trb = priv_ep->interval;
++ num_trb = priv_ep->interval * num_trb_req;
+ else
+- num_trb = sg_supported ? request->num_mapped_sgs : 1;
+-
+- if (num_trb > priv_ep->free_trbs) {
+- priv_ep->flags |= EP_RING_FULL;
+- return -ENOBUFS;
+- }
++ num_trb = num_trb_req;
+
+ priv_req = to_cdns3_request(request);
+ address = priv_ep->endpoint.desc->bEndpointAddress;
+@@ -1184,14 +1188,31 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+
+ link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
+ TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
++
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
++ /*
++ * ISO require LINK TRB must be first one of TD.
++ * Fill LINK TRBs for left trb space to simply software process logic.
++ */
++ while (priv_ep->enqueue) {
++ *trb = *link_trb;
++ trace_cdns3_prepare_trb(priv_ep, trb);
++
++ cdns3_ep_inc_enq(priv_ep);
++ trb = priv_ep->trb_pool + priv_ep->enqueue;
++ priv_req->trb = trb;
++ }
++ }
++ }
++
++ if (num_trb > priv_ep->free_trbs) {
++ priv_ep->flags |= EP_RING_FULL;
++ return -ENOBUFS;
+ }
+
+ if (priv_dev->dev_ver <= DEV_VER_V2)
+ togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
+
+- if (sg_supported)
+- s = request->sg;
+-
+ /* set incorrect Cycle Bit for first trb*/
+ control = priv_ep->pcs ? 0 : TRB_CYCLE;
+ trb->length = 0;
+@@ -1209,6 +1230,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ do {
+ u32 length;
+
++ if (!(sg_iter % num_trb_req) && sg_supported)
++ s = request->sg;
++
+ /* fill TRB */
+ control |= TRB_TYPE(TRB_NORMAL);
+ if (sg_supported) {
+@@ -1223,7 +1247,36 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ total_tdl += DIV_ROUND_UP(length,
+ priv_ep->endpoint.maxpacket);
+
+- trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
++ trb_burst = priv_ep->trb_burst_size;
++
++ /*
++ * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still
++ * met problem when do ISO transfer if sg enabled.
++ *
++ * Data pattern likes below when sg enabled, package size is 1k and mult is 2
++ * [UVC Header(8B) ] [data(3k - 8)] ...
++ *
++ * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen
++ * as below pattern:
++ * 0xd000: wrong
++ * 0xe000: wrong
++ * 0xf000: correct
++ * 0x10000: wrong
++ * 0x11000: wrong
++ * 0x12000: correct
++ * ...
++ *
++ * But it is still unclear about why error have not happen below 0xd000, it should
++ * cross 4k bounder. But anyway, the below code can fix this problem.
++ *
++ * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16.
++ */
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2)
++ if (ALIGN_DOWN(trb->buffer, SZ_4K) !=
++ ALIGN_DOWN(trb->buffer + length, SZ_4K))
++ trb_burst = 16;
++
++ trb->length |= cpu_to_le32(TRB_BURST_LEN(trb_burst) |
+ TRB_LEN(length));
+ pcs = priv_ep->pcs ? TRB_CYCLE : 0;
+
+@@ -1250,7 +1303,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ if (sg_supported) {
+ trb->control |= cpu_to_le32(TRB_ISP);
+ /* Don't set chain bit for last TRB */
+- if (sg_iter < num_trb - 1)
++ if ((sg_iter % num_trb_req) < num_trb_req - 1)
+ trb->control |= cpu_to_le32(TRB_CHAIN);
+
+ s = sg_next(s);
+@@ -1508,6 +1561,12 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
+
+ /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
+ while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
++
++ /* ISO ep_traddr may stop at LINK TRB */
++ if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) &&
++ priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++ break;
++
+ trace_cdns3_complete_trb(priv_ep, trb);
+ cdns3_ep_inc_deq(priv_ep);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+@@ -1540,6 +1599,10 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
+ }
+
+ if (request_handled) {
++ /* TRBs are duplicated by priv_ep->interval time for ISO IN */
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir)
++ request->actual /= priv_ep->interval;
++
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ request_handled = false;
+ transfer_end = false;
+@@ -2035,11 +2098,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
+- u32 max_packet_size = 0;
+- u8 maxburst = 0;
++ u32 max_packet_size = priv_ep->wMaxPacketSize;
++ u8 maxburst = priv_ep->bMaxBurst;
+ u32 ep_cfg = 0;
+ u8 buffering;
+- u8 mult = 0;
+ int ret;
+
+ buffering = priv_dev->ep_buf_size - 1;
+@@ -2061,8 +2123,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ break;
+ default:
+ ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
+- mult = priv_dev->ep_iso_burst - 1;
+- buffering = mult + 1;
++ buffering = (priv_ep->bMaxBurst + 1) * (priv_ep->mult + 1) - 1;
+ }
+
+ switch (priv_dev->gadget.speed) {
+@@ -2073,17 +2134,8 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ max_packet_size = is_iso_ep ? 1024 : 512;
+ break;
+ case USB_SPEED_SUPER:
+- /* It's limitation that driver assumes in driver. */
+- mult = 0;
+- max_packet_size = 1024;
+- if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
+- maxburst = priv_dev->ep_iso_burst - 1;
+- buffering = (mult + 1) *
+- (maxburst + 1);
+-
+- if (priv_ep->interval > 1)
+- buffering++;
+- } else {
++ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
++ max_packet_size = 1024;
+ maxburst = priv_dev->ep_buf_size - 1;
+ }
+ break;
+@@ -2112,7 +2164,6 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ if (priv_dev->dev_ver < DEV_VER_V2)
+ priv_ep->trb_burst_size = 16;
+
+- mult = min_t(u8, mult, EP_CFG_MULT_MAX);
+ buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
+ maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
+
+@@ -2146,7 +2197,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ }
+
+ ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
+- EP_CFG_MULT(mult) |
++ EP_CFG_MULT(priv_ep->mult) | /* must match EP setting */
+ EP_CFG_BUFFERING(buffering) |
+ EP_CFG_MAXBURST(maxburst);
+
+@@ -2236,6 +2287,13 @@ usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
+ priv_ep->type = usb_endpoint_type(desc);
+ priv_ep->flags |= EP_CLAIMED;
+ priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
++ priv_ep->wMaxPacketSize = usb_endpoint_maxp(desc);
++ priv_ep->mult = USB_EP_MAXP_MULT(priv_ep->wMaxPacketSize);
++ priv_ep->wMaxPacketSize &= USB_ENDPOINT_MAXP_MASK;
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && comp_desc) {
++ priv_ep->mult = USB_SS_MULT(comp_desc->bmAttributes) - 1;
++ priv_ep->bMaxBurst = comp_desc->bMaxBurst;
++ }
+
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return &priv_ep->endpoint;
+@@ -2485,11 +2543,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
+
+ while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
+ priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
++ list_del_init(&priv_req->list);
+
+ kfree(priv_req->request.buf);
+ cdns3_gadget_ep_free_request(&priv_ep->endpoint,
+ &priv_req->request);
+- list_del_init(&priv_req->list);
+ --priv_ep->wa2_counter;
+ }
+
+@@ -3019,22 +3077,40 @@ static int cdns3_gadget_check_config(struct usb_gadget *gadget)
+ struct cdns3_endpoint *priv_ep;
+ struct usb_ep *ep;
+ int n_in = 0;
++ int iso = 0;
++ int out = 1;
+ int total;
++ int n;
+
+ list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+ priv_ep = ep_to_cdns3_ep(ep);
+- if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN))
+- n_in++;
++ if (!(priv_ep->flags & EP_CLAIMED))
++ continue;
++
++ n = (priv_ep->mult + 1) * (priv_ep->bMaxBurst + 1);
++ if (ep->address & USB_DIR_IN) {
++ /*
++ * ISO transfer: DMA start move data when get ISO, only transfer
++ * data as min(TD size, iso). No benefit for allocate bigger
++ * internal memory than 'iso'.
++ */
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++ iso += n;
++ else
++ n_in++;
++ } else {
++ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
++ out = max_t(int, out, n);
++ }
+ }
+
+ /* 2KB are reserved for EP0, 1KB for out*/
+- total = 2 + n_in + 1;
++ total = 2 + n_in + out + iso;
+
+ if (total > priv_dev->onchip_buffers)
+ return -ENOMEM;
+
+- priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
+- (priv_dev->onchip_buffers - 2) / (n_in + 1);
++ priv_dev->ep_buf_size = (priv_dev->onchip_buffers - 2 - iso) / (n_in + out);
+
+ return 0;
+ }
+diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h
+index fbe4a8e3aa8977..086a7bb8389752 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.h
++++ b/drivers/usb/cdns3/cdns3-gadget.h
+@@ -1168,6 +1168,9 @@ struct cdns3_endpoint {
+ u8 dir;
+ u8 num;
+ u8 type;
++ u8 mult;
++ u8 bMaxBurst;
++ u16 wMaxPacketSize;
+ int interval;
+
+ int free_trbs;
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
+index e1b5801fdddf8c..9a5577a772af62 100644
+--- a/drivers/usb/cdns3/cdnsp-gadget.h
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -811,6 +811,7 @@ struct cdnsp_stream_info {
+ * generate Missed Service Error Event.
+ * Set skip flag when receive a Missed Service Error Event and
+ * process the missed tds on the endpoint ring.
++ * @wa1_nop_trb: hold pointer to NOP trb.
+ */
+ struct cdnsp_ep {
+ struct usb_ep endpoint;
+@@ -838,6 +839,8 @@ struct cdnsp_ep {
+ #define EP_UNCONFIGURED BIT(7)
+
+ bool skip;
++ union cdnsp_trb *wa1_nop_trb;
++
+ };
+
+ /**
+diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
+index 07f6068342d460..1d18d5002ef01d 100644
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -402,7 +402,7 @@ static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
+ struct cdnsp_stream_ctx *st_ctx;
+ struct cdnsp_ep *pep;
+
+- pep = &pdev->eps[stream_id];
++ pep = &pdev->eps[ep_index];
+
+ if (pep->ep_state & EP_HAS_STREAMS) {
+ st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
+@@ -718,7 +718,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev,
+ seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
+ cur_td->last_trb, hw_deq);
+
+- if (seg && (pep->ep_state & EP_ENABLED))
++ if (seg && (pep->ep_state & EP_ENABLED) &&
++ !(pep->ep_state & EP_DIS_IN_RROGRESS))
+ cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
+ cur_td, &deq_state);
+ else
+@@ -736,7 +737,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev,
+ * During disconnecting all endpoint will be disabled so we don't
+ * have to worry about updating dequeue pointer.
+ */
+- if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
++ if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING ||
++ pep->ep_state & EP_DIS_IN_RROGRESS) {
+ status = -ESHUTDOWN;
+ ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
+ }
+@@ -1529,6 +1531,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ unsigned long flags;
+ int counter = 0;
+
++ local_bh_disable();
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
+@@ -1541,6 +1544,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ cdnsp_died(pdev);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
++ local_bh_enable();
+ return IRQ_HANDLED;
+ }
+
+@@ -1557,6 +1561,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+ cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
++ local_bh_enable();
+
+ return IRQ_HANDLED;
+ }
+@@ -1901,6 +1906,23 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ if (ret)
+ return ret;
+
++ /*
++ * workaround 1: STOP EP command on LINK TRB with TC bit set to 1
++ * causes that internal cycle bit can have incorrect state after
++ * command complete. In consequence empty transfer ring can be
++ * incorrectly detected when EP is resumed.
++ * NOP TRB before LINK TRB avoid such scenario. STOP EP command is
++ * then on NOP TRB and internal cycle bit is not changed and have
++ * correct value.
++ */
++ if (pep->wa1_nop_trb) {
++ field = le32_to_cpu(pep->wa1_nop_trb->trans_event.flags);
++ field ^= TRB_CYCLE;
++
++ pep->wa1_nop_trb->trans_event.flags = cpu_to_le32(field);
++ pep->wa1_nop_trb = NULL;
++ }
++
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+@@ -1996,6 +2018,17 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+ send_addr = addr;
+ }
+
++ if (cdnsp_trb_is_link(ring->enqueue + 1)) {
++ field = TRB_TYPE(TRB_TR_NOOP) | TRB_IOC;
++ if (!ring->cycle_state)
++ field |= TRB_CYCLE;
++
++ pep->wa1_nop_trb = ring->enqueue;
++
++ cdnsp_queue_trb(pdev, ring, 0, 0x0, 0x0,
++ TRB_INTR_TARGET(0), field);
++ }
++
+ cdnsp_check_trb_math(preq, enqd_len);
+ ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
+ start_cycle, start_trb);
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 33548771a0d3a7..465e9267b49c12 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -395,7 +395,6 @@ static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role)
+ return ret;
+ }
+
+-
+ /**
+ * cdns_wakeup_irq - interrupt handler for wakeup events
+ * @irq: irq number for cdns3/cdnsp core device
+diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
+index 04b6d12f2b9a39..ee917f1b091c89 100644
+--- a/drivers/usb/cdns3/drd.c
++++ b/drivers/usb/cdns3/drd.c
+@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
+ */
+ static void cdns_otg_disable_irq(struct cdns *cdns)
+ {
+- writel(0, &cdns->otg_irq_regs->ien);
++ if (cdns->version)
++ writel(0, &cdns->otg_irq_regs->ien);
+ }
+
+ /**
+@@ -422,15 +423,20 @@ int cdns_drd_init(struct cdns *cdns)
+
+ cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
+
+- if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
++ state = readl(&cdns->otg_cdnsp_regs->did);
++
++ if (OTG_CDNSP_CHECK_DID(state)) {
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_cdnsp_regs->ien;
+ cdns->version = CDNSP_CONTROLLER_V2;
+- } else {
++ } else if (OTG_CDNS3_CHECK_DID(state)) {
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_v1_regs->ien;
+ writel(1, &cdns->otg_v1_regs->simulate);
+ cdns->version = CDNS3_CONTROLLER_V1;
++ } else {
++ dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
++ return -EINVAL;
+ }
+
+ dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
+@@ -483,7 +489,6 @@ int cdns_drd_exit(struct cdns *cdns)
+ return 0;
+ }
+
+-
+ /* Indicate the cdns3 core was power lost before */
+ bool cdns_power_is_lost(struct cdns *cdns)
+ {
+diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
+index cbdf94f73ed917..d72370c321d392 100644
+--- a/drivers/usb/cdns3/drd.h
++++ b/drivers/usb/cdns3/drd.h
+@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
+ __le32 susp_timing_ctrl;
+ };
+
+-#define OTG_CDNSP_DID 0x0004034E
++/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
++#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
++
++/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
++#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
+
+ /*
+ * Common registers interface for both CDNS3 and CDNSP version of DRD.
+diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
+index 6164fc4c96a49b..7ba760ee62e331 100644
+--- a/drivers/usb/cdns3/host.c
++++ b/drivers/usb/cdns3/host.c
+@@ -18,6 +18,11 @@
+ #include "../host/xhci.h"
+ #include "../host/xhci-plat.h"
+
++/*
++ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
++ * in Cadence USB3 dual-role controller, so it can't be used
++ * with Cadence CDNSP dual-role controller.
++ */
+ #define XECP_PORT_CAP_REG 0x8000
+ #define XECP_AUX_CTRL_REG1 0x8120
+
+@@ -57,6 +62,10 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
+ .resume_quirk = xhci_cdns3_resume_quirk,
+ };
+
++static const struct xhci_plat_priv xhci_plat_cdnsp_xhci = {
++ .quirks = XHCI_CDNS_SCTX_QUIRK,
++};
++
+ static int __cdns_host_init(struct cdns *cdns)
+ {
+ struct platform_device *xhci;
+@@ -81,8 +90,13 @@ static int __cdns_host_init(struct cdns *cdns)
+ goto err1;
+ }
+
+- cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
+- sizeof(struct xhci_plat_priv), GFP_KERNEL);
++ if (cdns->version < CDNSP_CONTROLLER_V2)
++ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
++ sizeof(struct xhci_plat_priv), GFP_KERNEL);
++ else
++ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
++ sizeof(struct xhci_plat_priv), GFP_KERNEL);
++
+ if (!cdns->xhci_plat_data) {
+ ret = -ENOMEM;
+ goto err1;
+diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
+index d9bb3d3f026e68..2a38e1eb65466c 100644
+--- a/drivers/usb/chipidea/ci.h
++++ b/drivers/usb/chipidea/ci.h
+@@ -176,6 +176,7 @@ struct hw_bank {
+ * @enabled_otg_timer_bits: bits of enabled otg timers
+ * @next_otg_timer: next nearest enabled timer to be expired
+ * @work: work for role changing
++ * @power_lost_work: work for power lost handling
+ * @wq: workqueue thread
+ * @qh_pool: allocation pool for queue heads
+ * @td_pool: allocation pool for transfer descriptors
+@@ -226,6 +227,7 @@ struct ci_hdrc {
+ enum otg_fsm_timer next_otg_timer;
+ struct usb_role_switch *role_switch;
+ struct work_struct work;
++ struct work_struct power_lost_work;
+ struct workqueue_struct *wq;
+
+ struct dma_pool *qh_pool;
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 7ac39a281b8cb5..ca71df4f32e4cc 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -523,6 +523,13 @@ static irqreturn_t ci_irq_handler(int irq, void *data)
+ u32 otgsc = 0;
+
+ if (ci->in_lpm) {
++ /*
++ * If we already have a wakeup irq pending there,
++ * let's just return to wait resume finished firstly.
++ */
++ if (ci->wakeup_int)
++ return IRQ_HANDLED;
++
+ disable_irq_nosync(irq);
+ ci->wakeup_int = true;
+ pm_runtime_get(ci->dev);
+@@ -849,6 +856,27 @@ static int ci_extcon_register(struct ci_hdrc *ci)
+ return 0;
+ }
+
++static void ci_power_lost_work(struct work_struct *work)
++{
++ struct ci_hdrc *ci = container_of(work, struct ci_hdrc, power_lost_work);
++ enum ci_role role;
++
++ disable_irq_nosync(ci->irq);
++ pm_runtime_get_sync(ci->dev);
++ if (!ci_otg_is_fsm_mode(ci)) {
++ role = ci_get_role(ci);
++
++ if (ci->role != role) {
++ ci_handle_id_switch(ci);
++ } else if (role == CI_ROLE_GADGET) {
++ if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
++ usb_gadget_vbus_connect(&ci->gadget);
++ }
++ }
++ pm_runtime_put_sync(ci->dev);
++ enable_irq(ci->irq);
++}
++
+ static DEFINE_IDA(ci_ida);
+
+ struct platform_device *ci_hdrc_add_device(struct device *dev,
+@@ -1038,6 +1066,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+
+ spin_lock_init(&ci->lock);
+ mutex_init(&ci->mutex);
++ INIT_WORK(&ci->power_lost_work, ci_power_lost_work);
++
+ ci->dev = dev;
+ ci->platdata = dev_get_platdata(dev);
+ ci->imx28_write_fix = !!(ci->platdata->flags &
+@@ -1389,25 +1419,6 @@ static int ci_suspend(struct device *dev)
+ return 0;
+ }
+
+-static void ci_handle_power_lost(struct ci_hdrc *ci)
+-{
+- enum ci_role role;
+-
+- disable_irq_nosync(ci->irq);
+- if (!ci_otg_is_fsm_mode(ci)) {
+- role = ci_get_role(ci);
+-
+- if (ci->role != role) {
+- ci_handle_id_switch(ci);
+- } else if (role == CI_ROLE_GADGET) {
+- if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
+- usb_gadget_vbus_connect(&ci->gadget);
+- }
+- }
+-
+- enable_irq(ci->irq);
+-}
+-
+ static int ci_resume(struct device *dev)
+ {
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+@@ -1439,7 +1450,7 @@ static int ci_resume(struct device *dev)
+ ci_role(ci)->resume(ci, power_lost);
+
+ if (power_lost)
+- ci_handle_power_lost(ci);
++ queue_work(system_freezable_wq, &ci->power_lost_work);
+
+ if (ci->supports_runtime_pm) {
+ pm_runtime_disable(dev);
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 08af26b762a2d6..0cce192083701e 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -30,8 +30,7 @@ struct ehci_ci_priv {
+ };
+
+ struct ci_hdrc_dma_aligned_buffer {
+- void *kmalloc_ptr;
+- void *old_xfer_buffer;
++ void *original_buffer;
+ u8 data[];
+ };
+
+@@ -380,59 +379,52 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
+ return 0;
+ }
+
+-static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
++static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb, bool copy_back)
+ {
+ struct ci_hdrc_dma_aligned_buffer *temp;
+- size_t length;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
++ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+
+ temp = container_of(urb->transfer_buffer,
+ struct ci_hdrc_dma_aligned_buffer, data);
++ urb->transfer_buffer = temp->original_buffer;
++
++ if (copy_back && usb_urb_dir_in(urb)) {
++ size_t length;
+
+- if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+- memcpy(temp->old_xfer_buffer, temp->data, length);
++ memcpy(temp->original_buffer, temp->data, length);
+ }
+- urb->transfer_buffer = temp->old_xfer_buffer;
+- kfree(temp->kmalloc_ptr);
+
+- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
++ kfree(temp);
+ }
+
+ static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+ {
+- struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
+- const unsigned int ci_hdrc_usb_dma_align = 32;
+- size_t kmalloc_size;
++ struct ci_hdrc_dma_aligned_buffer *temp;
+
+- if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
+- !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
++ if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0)
++ return 0;
++ if (IS_ALIGNED((uintptr_t)urb->transfer_buffer, 4)
++ && IS_ALIGNED(urb->transfer_buffer_length, 4))
+ return 0;
+
+- /* Allocate a buffer with enough padding for alignment */
+- kmalloc_size = urb->transfer_buffer_length +
+- sizeof(struct ci_hdrc_dma_aligned_buffer) +
+- ci_hdrc_usb_dma_align - 1;
+-
+- kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+- if (!kmalloc_ptr)
++ temp = kmalloc(sizeof(*temp) + ALIGN(urb->transfer_buffer_length, 4), mem_flags);
++ if (!temp)
+ return -ENOMEM;
+
+- /* Position our struct dma_aligned_buffer such that data is aligned */
+- temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
+- temp->kmalloc_ptr = kmalloc_ptr;
+- temp->old_xfer_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+- urb->transfer_buffer = temp->data;
+
++ temp->original_buffer = urb->transfer_buffer;
++ urb->transfer_buffer = temp->data;
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+@@ -449,7 +441,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+- ci_hdrc_free_dma_aligned_buffer(urb);
++ ci_hdrc_free_dma_aligned_buffer(urb, false);
+
+ return ret;
+ }
+@@ -457,7 +449,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+ {
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+- ci_hdrc_free_dma_aligned_buffer(urb);
++ ci_hdrc_free_dma_aligned_buffer(urb, true);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 0b7bd3c643c3aa..f70ceedfb468f7 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -86,7 +86,7 @@ static int hw_device_state(struct ci_hdrc *ci, u32 dma)
+ hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
+ /* interrupt, error, port change, reset, sleep/suspend */
+ hw_write(ci, OP_USBINTR, ~0,
+- USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
++ USBi_UI|USBi_UEI|USBi_PCI|USBi_URI);
+ } else {
+ hw_write(ci, OP_USBINTR, ~0, 0);
+ }
+@@ -876,6 +876,7 @@ __releases(ci->lock)
+ __acquires(ci->lock)
+ {
+ int retval;
++ u32 intr;
+
+ spin_unlock(&ci->lock);
+ if (ci->gadget.speed != USB_SPEED_UNKNOWN)
+@@ -889,6 +890,11 @@ __acquires(ci->lock)
+ if (retval)
+ goto done;
+
++ /* clear SLI */
++ hw_write(ci, OP_USBSTS, USBi_SLI, USBi_SLI);
++ intr = hw_read(ci, OP_USBINTR, ~0);
++ hw_write(ci, OP_USBINTR, ~0, intr | USBi_SLI);
++
+ ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
+ if (ci->status == NULL)
+ retval = -ENOMEM;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index a1f4e1ead97ff4..605fea4611029b 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -916,6 +916,9 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
+ struct acm *acm = tty->driver_data;
+ int retval;
+
++ if (!(acm->ctrl_caps & USB_CDC_CAP_BRK))
++ return -EOPNOTSUPP;
++
+ retval = acm_send_break(acm, state ? 0xffff : 0);
+ if (retval < 0)
+ dev_dbg(&acm->control->dev,
+@@ -959,10 +962,12 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ struct acm *acm = tty->driver_data;
+
+ ss->line = acm->minor;
++ mutex_lock(&acm->port.mutex);
+ ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
+ ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE :
+ jiffies_to_msecs(acm->port.closing_wait) / 10;
++ mutex_unlock(&acm->port.mutex);
+ return 0;
+ }
+
+@@ -1758,6 +1763,9 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
+ .driver_info = SINGLE_RX_URB,
+ },
++ { USB_DEVICE(0x1901, 0x0006), /* GE Healthcare Patient Monitor UI Controller */
++ .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
++ },
+ { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index c553decb546107..6830be4419e20a 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb)
+ dev_err(&desc->intf->dev, "Stall on int endpoint\n");
+ goto sw; /* halt is cleared in work */
+ default:
+- dev_err(&desc->intf->dev,
++ dev_err_ratelimited(&desc->intf->dev,
+ "nonzero urb status received: %d\n", status);
+ break;
+ }
+ }
+
+ if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
+- dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
++ dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
+ urb->actual_length);
+ goto exit;
+ }
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 311007b1d90465..c2e666e82857c1 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -754,7 +754,7 @@ static struct urb *usbtmc_create_urb(void)
+ if (!urb)
+ return NULL;
+
+- dmabuf = kmalloc(bufsize, GFP_KERNEL);
++ dmabuf = kzalloc(bufsize, GFP_KERNEL);
+ if (!dmabuf) {
+ usb_free_urb(urb);
+ return NULL;
+diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
+index 84d91b1c1eed53..0886b19d2e1c8f 100644
+--- a/drivers/usb/common/ulpi.c
++++ b/drivers/usb/common/ulpi.c
+@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
+ return ret;
+ }
+
+- root = debugfs_create_dir(dev_name(dev), ulpi_root);
++ root = debugfs_create_dir(dev_name(&ulpi->dev), ulpi_root);
+ debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
+
+ dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index b19e38d5fd10c1..847dd32c0f5e28 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -291,6 +291,20 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ if (ifp->desc.bNumEndpoints >= num_ep)
+ goto skip_to_next_endpoint_or_interface_descriptor;
+
++ /* Save a copy of the descriptor and use it instead of the original */
++ endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ memcpy(&endpoint->desc, d, n);
++ d = &endpoint->desc;
++
++ /* Clear the reserved bits in bEndpointAddress */
++ i = d->bEndpointAddress &
++ (USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK);
++ if (i != d->bEndpointAddress) {
++ dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n",
++ cfgno, inum, asnum, d->bEndpointAddress, i);
++ endpoint->desc.bEndpointAddress = i;
++ }
++
+ /* Check for duplicate endpoint addresses */
+ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+ dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+@@ -308,10 +322,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ }
+ }
+
+- endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ /* Accept this endpoint */
+ ++ifp->desc.bNumEndpoints;
+-
+- memcpy(&endpoint->desc, d, n);
+ INIT_LIST_HEAD(&endpoint->urb_list);
+
+ /*
+@@ -1047,7 +1059,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+
+ if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+ dev_notice(ddev, "descriptor type invalid, skip\n");
+- continue;
++ goto skip_to_next_descriptor;
+ }
+
+ switch (cap_type) {
+@@ -1078,6 +1090,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ break;
+ }
+
++skip_to_next_descriptor:
+ total_len -= length;
+ buffer += length;
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0ff47eeffb4909..1ba3feb5e19000 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -47,12 +47,24 @@
+ #define USB_VENDOR_TEXAS_INSTRUMENTS 0x0451
+ #define USB_PRODUCT_TUSB8041_USB3 0x8140
+ #define USB_PRODUCT_TUSB8041_USB2 0x8142
+-#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+-#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
++#define USB_VENDOR_MICROCHIP 0x0424
++#define USB_PRODUCT_USB4913 0x4913
++#define USB_PRODUCT_USB4914 0x4914
++#define USB_PRODUCT_USB4915 0x4915
++#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND BIT(0)
++#define HUB_QUIRK_DISABLE_AUTOSUSPEND BIT(1)
++#define HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL BIT(2)
+
+ #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
+ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
+ #define USB_PING_RESPONSE_TIME 400 /* ns */
++#define USB_REDUCE_FRAME_INTR_BINTERVAL 9
++
++/*
++ * The SET_ADDRESS request timeout will be 500 ms when
++ * USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT quirk flag is set.
++ */
++#define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */
+
+ /* Protect struct usb_device->state and ->children members
+ * Note: Both are also protected by ->dev.sem, except that ->state can
+@@ -117,7 +129,6 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
+ #define HUB_DEBOUNCE_STEP 25
+ #define HUB_DEBOUNCE_STABLE 100
+
+-static void hub_release(struct kref *kref);
+ static int usb_reset_and_verify_device(struct usb_device *udev);
+ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
+ static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
+@@ -622,29 +633,6 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
+ ret = 0;
+ }
+ mutex_unlock(&hub->status_mutex);
+-
+- /*
+- * There is no need to lock status_mutex here, because status_mutex
+- * protects hub->status, and the phy driver only checks the port
+- * status without changing the status.
+- */
+- if (!ret) {
+- struct usb_device *hdev = hub->hdev;
+-
+- /*
+- * Only roothub will be notified of port state changes,
+- * since the USB PHY only cares about changes at the next
+- * level.
+- */
+- if (is_root_hub(hdev)) {
+- struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+-
+- if (hcd->usb_phy)
+- usb_phy_notify_port_status(hcd->usb_phy,
+- port1 - 1, *status, *change);
+- }
+- }
+-
+ return ret;
+ }
+
+@@ -702,14 +690,14 @@ static void kick_hub_wq(struct usb_hub *hub)
+ */
+ intf = to_usb_interface(hub->intfdev);
+ usb_autopm_get_interface_no_resume(intf);
+- kref_get(&hub->kref);
++ hub_get(hub);
+
+ if (queue_work(hub_wq, &hub->events))
+ return;
+
+ /* the work has already been scheduled */
+ usb_autopm_put_interface_async(intf);
+- kref_put(&hub->kref, hub_release);
++ hub_put(hub);
+ }
+
+ void usb_kick_hub_wq(struct usb_device *hdev)
+@@ -1077,7 +1065,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ goto init2;
+ goto init3;
+ }
+- kref_get(&hub->kref);
++ hub_get(hub);
+
+ /* The superspeed hub except for root hub has to use Hub Depth
+ * value as an offset into the route string to locate the bits
+@@ -1325,7 +1313,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ device_unlock(&hdev->dev);
+ }
+
+- kref_put(&hub->kref, hub_release);
++ hub_put(hub);
+ }
+
+ /* Implement the continuations for the delays above */
+@@ -1741,6 +1729,16 @@ static void hub_release(struct kref *kref)
+ kfree(hub);
+ }
+
++void hub_get(struct usb_hub *hub)
++{
++ kref_get(&hub->kref);
++}
++
++void hub_put(struct usb_hub *hub)
++{
++ kref_put(&hub->kref, hub_release);
++}
++
+ static unsigned highspeed_hubs;
+
+ static void hub_disconnect(struct usb_interface *intf)
+@@ -1789,7 +1787,7 @@ static void hub_disconnect(struct usb_interface *intf)
+
+ onboard_hub_destroy_pdevs(&hub->onboard_hub_devs);
+
+- kref_put(&hub->kref, hub_release);
++ hub_put(hub);
+ }
+
+ static bool hub_descriptor_is_sane(struct usb_host_interface *desc)
+@@ -1927,6 +1925,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ usb_autopm_get_interface_no_resume(intf);
+ }
+
++ if ((id->driver_info & HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL) &&
++ desc->endpoint[0].desc.bInterval > USB_REDUCE_FRAME_INTR_BINTERVAL) {
++ desc->endpoint[0].desc.bInterval =
++ USB_REDUCE_FRAME_INTR_BINTERVAL;
++ /* Tell the HCD about the interrupt ep's new bInterval */
++ usb_set_interface(hdev, 0, 0);
++ }
++
+ if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) {
+ onboard_hub_create_pdevs(hdev, &hub->onboard_hub_devs);
+
+@@ -2056,9 +2062,19 @@ static void update_port_device_state(struct usb_device *udev)
+
+ if (udev->parent) {
+ hub = usb_hub_to_struct_hub(udev->parent);
+- port_dev = hub->ports[udev->portnum - 1];
+- WRITE_ONCE(port_dev->state, udev->state);
+- sysfs_notify_dirent(port_dev->state_kn);
++
++ /*
++ * The Link Layer Validation System Driver (lvstest)
++ * has a test step to unbind the hub before running the
++ * rest of the procedure. This triggers hub_disconnect
++ * which will set the hub's maxchild to 0, further
++ * resulting in usb_hub_to_struct_hub returning NULL.
++ */
++ if (hub) {
++ port_dev = hub->ports[udev->portnum - 1];
++ WRITE_ONCE(port_dev->state, udev->state);
++ sysfs_notify_dirent(port_dev->state_kn);
++ }
+ }
+ }
+
+@@ -2389,17 +2405,25 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
+ }
+ } else if (desc->bLength == sizeof
+ (struct usb_otg_descriptor)) {
+- /* Set a_alt_hnp_support for legacy otg device */
+- err = usb_control_msg(udev,
+- usb_sndctrlpipe(udev, 0),
+- USB_REQ_SET_FEATURE, 0,
+- USB_DEVICE_A_ALT_HNP_SUPPORT,
+- 0, NULL, 0,
+- USB_CTRL_SET_TIMEOUT);
+- if (err < 0)
+- dev_err(&udev->dev,
+- "set a_alt_hnp_support failed: %d\n",
+- err);
++ /*
++ * We are operating on a legacy OTP device
++ * These should be told that they are operating
++ * on the wrong port if we have another port that does
++ * support HNP
++ */
++ if (bus->otg_port != 0) {
++ /* Set a_alt_hnp_support for legacy otg device */
++ err = usb_control_msg(udev,
++ usb_sndctrlpipe(udev, 0),
++ USB_REQ_SET_FEATURE, 0,
++ USB_DEVICE_A_ALT_HNP_SUPPORT,
++ 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ if (err < 0)
++ dev_err(&udev->dev,
++ "set a_alt_hnp_support failed: %d\n",
++ err);
++ }
+ }
+ }
+ #endif
+@@ -4645,7 +4669,12 @@ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
+ static int hub_set_address(struct usb_device *udev, int devnum)
+ {
+ int retval;
++ unsigned int timeout_ms = USB_CTRL_SET_TIMEOUT;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
++
++ if (hub->hdev->quirks & USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT)
++ timeout_ms = USB_SHORT_SET_ADDRESS_REQ_TIMEOUT;
+
+ /*
+ * The host controller will choose the device address,
+@@ -4658,11 +4687,11 @@ static int hub_set_address(struct usb_device *udev, int devnum)
+ if (udev->state != USB_STATE_DEFAULT)
+ return -EINVAL;
+ if (hcd->driver->address_device)
+- retval = hcd->driver->address_device(hcd, udev);
++ retval = hcd->driver->address_device(hcd, udev, timeout_ms);
+ else
+ retval = usb_control_msg(udev, usb_sndaddr0pipe(),
+ USB_REQ_SET_ADDRESS, 0, devnum, 0,
+- NULL, 0, USB_CTRL_SET_TIMEOUT);
++ NULL, 0, timeout_ms);
+ if (retval == 0) {
+ update_devnum(udev, devnum);
+ /* Device now using proper address. */
+@@ -5048,9 +5077,10 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ }
+ if (usb_endpoint_maxp(&udev->ep0.desc) == i) {
+ ; /* Initial ep0 maxpacket guess is right */
+- } else if ((udev->speed == USB_SPEED_FULL ||
++ } else if (((udev->speed == USB_SPEED_FULL ||
+ udev->speed == USB_SPEED_HIGH) &&
+- (i == 8 || i == 16 || i == 32 || i == 64)) {
++ (i == 8 || i == 16 || i == 32 || i == 64)) ||
++ (udev->speed >= USB_SPEED_SUPER && i > 0)) {
+ /* Initial guess is wrong; use the descriptor's value */
+ if (udev->speed == USB_SPEED_FULL)
+ dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
+@@ -5881,7 +5911,7 @@ static void hub_event(struct work_struct *work)
+
+ /* Balance the stuff in kick_hub_wq() and allow autosuspend */
+ usb_autopm_put_interface(intf);
+- kref_put(&hub->kref, hub_release);
++ hub_put(hub);
+
+ kcov_remote_stop();
+ }
+@@ -5914,6 +5944,21 @@ static const struct usb_device_id hub_id_table[] = {
+ .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+ .idProduct = USB_PRODUCT_TUSB8041_USB3,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++ | USB_DEVICE_ID_MATCH_PRODUCT,
++ .idVendor = USB_VENDOR_MICROCHIP,
++ .idProduct = USB_PRODUCT_USB4913,
++ .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++ | USB_DEVICE_ID_MATCH_PRODUCT,
++ .idVendor = USB_VENDOR_MICROCHIP,
++ .idProduct = USB_PRODUCT_USB4914,
++ .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++ | USB_DEVICE_ID_MATCH_PRODUCT,
++ .idVendor = USB_VENDOR_MICROCHIP,
++ .idProduct = USB_PRODUCT_USB4915,
++ .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
+ { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
+ .bDeviceClass = USB_CLASS_HUB},
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index d44dd7f6623ee6..6610cf6131c673 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -126,6 +126,8 @@ extern void usb_hub_remove_port_device(struct usb_hub *hub,
+ extern int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
+ int port1, bool set);
+ extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev);
++extern void hub_get(struct usb_hub *hub);
++extern void hub_put(struct usb_hub *hub);
+ extern int hub_port_debounce(struct usb_hub *hub, int port1,
+ bool must_be_connected);
+ extern int usb_clear_port_feature(struct usb_device *hdev,
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 77be0dc28da9a6..5fb3f55ef06db5 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -50,16 +50,29 @@ static ssize_t disable_show(struct device *dev,
+ struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *hdev = to_usb_device(dev->parent->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+- struct usb_interface *intf = to_usb_interface(hub->intfdev);
++ struct usb_interface *intf = to_usb_interface(dev->parent);
+ int port1 = port_dev->portnum;
+ u16 portstatus, unused;
+ bool disabled;
+ int rc;
++ struct kernfs_node *kn;
+
++ if (!hub)
++ return -ENODEV;
++ hub_get(hub);
+ rc = usb_autopm_get_interface(intf);
+ if (rc < 0)
+- return rc;
++ goto out_hub_get;
+
++ /*
++ * Prevent deadlock if another process is concurrently
++ * trying to unregister hdev.
++ */
++ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
++ if (!kn) {
++ rc = -ENODEV;
++ goto out_autopm;
++ }
+ usb_lock_device(hdev);
+ if (hub->disconnected) {
+ rc = -ENODEV;
+@@ -69,9 +82,13 @@ static ssize_t disable_show(struct device *dev,
+ usb_hub_port_status(hub, port1, &portstatus, &unused);
+ disabled = !usb_port_is_power_on(hub, portstatus);
+
+-out_hdev_lock:
++ out_hdev_lock:
+ usb_unlock_device(hdev);
++ sysfs_unbreak_active_protection(kn);
++ out_autopm:
+ usb_autopm_put_interface(intf);
++ out_hub_get:
++ hub_put(hub);
+
+ if (rc)
+ return rc;
+@@ -85,19 +102,32 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
+ struct usb_port *port_dev = to_usb_port(dev);
+ struct usb_device *hdev = to_usb_device(dev->parent->parent);
+ struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
+- struct usb_interface *intf = to_usb_interface(hub->intfdev);
++ struct usb_interface *intf = to_usb_interface(dev->parent);
+ int port1 = port_dev->portnum;
+ bool disabled;
+ int rc;
++ struct kernfs_node *kn;
+
++ if (!hub)
++ return -ENODEV;
+ rc = kstrtobool(buf, &disabled);
+ if (rc)
+ return rc;
+
++ hub_get(hub);
+ rc = usb_autopm_get_interface(intf);
+ if (rc < 0)
+- return rc;
++ goto out_hub_get;
+
++ /*
++ * Prevent deadlock if another process is concurrently
++ * trying to unregister hdev.
++ */
++ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
++ if (!kn) {
++ rc = -ENODEV;
++ goto out_autopm;
++ }
+ usb_lock_device(hdev);
+ if (hub->disconnected) {
+ rc = -ENODEV;
+@@ -118,9 +148,13 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
+ if (!rc)
+ rc = count;
+
+-out_hdev_lock:
++ out_hdev_lock:
+ usb_unlock_device(hdev);
++ sysfs_unbreak_active_protection(kn);
++ out_autopm:
+ usb_autopm_put_interface(intf);
++ out_hub_get:
++ hub_put(hub);
+
+ return rc;
+ }
+@@ -418,8 +452,10 @@ static void usb_port_shutdown(struct device *dev)
+ {
+ struct usb_port *port_dev = to_usb_port(dev);
+
+- if (port_dev->child)
++ if (port_dev->child) {
+ usb_disable_usb2_hardware_lpm(port_dev->child);
++ usb_unlocked_disable_lpm(port_dev->child);
++ }
+ }
+
+ static const struct dev_pm_ops usb_port_pm_ops = {
+@@ -573,7 +609,7 @@ static int match_location(struct usb_device *peer_hdev, void *p)
+ struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev);
+ struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent);
+
+- if (!peer_hub)
++ if (!peer_hub || port_dev->connect_type == USB_PORT_NOT_USED)
+ return 0;
+
+ hcd = bus_to_hcd(hdev->bus);
+@@ -584,7 +620,8 @@ static int match_location(struct usb_device *peer_hdev, void *p)
+
+ for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) {
+ peer = peer_hub->ports[port1 - 1];
+- if (peer && peer->location == port_dev->location) {
++ if (peer && peer->connect_type != USB_PORT_NOT_USED &&
++ peer->location == port_dev->location) {
+ link_peers_report(port_dev, peer);
+ return 1; /* done */
+ }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 15e9bd180a1d25..13171454f9591a 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -138,6 +138,9 @@ static int quirks_param_set(const char *value, const struct kernel_param *kp)
+ case 'o':
+ flags |= USB_QUIRK_HUB_SLOW_RESET;
+ break;
++ case 'p':
++ flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT;
++ break;
+ /* Ignore unrecognized flag characters */
+ }
+ }
+@@ -503,6 +506,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
+
++ /* START BP-850k Printer */
++ { USB_DEVICE(0x1bc3, 0x0003), .driver_info = USB_QUIRK_NO_SET_INTF },
++
+ /* MIDI keyboard WORLDE MINI */
+ { USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+@@ -527,6 +533,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+
+ { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+
++ /* APTIV AUTOMOTIVE HUB */
++ { USB_DEVICE(0x2c48, 0x0132), .driver_info =
++ USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT },
++
+ /* DJI CineSSD */
+ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 5d21718afb05cf..164edebc7f1f5c 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -668,6 +668,7 @@ static int add_power_attributes(struct device *dev)
+
+ static void remove_power_attributes(struct device *dev)
+ {
++ sysfs_unmerge_group(&dev->kobj, &usb3_hardware_lpm_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &power_attr_group);
+ }
+@@ -1168,14 +1169,24 @@ static ssize_t interface_authorized_store(struct device *dev,
+ {
+ struct usb_interface *intf = to_usb_interface(dev);
+ bool val;
++ struct kernfs_node *kn;
+
+ if (kstrtobool(buf, &val) != 0)
+ return -EINVAL;
+
+- if (val)
++ if (val) {
+ usb_authorize_interface(intf);
+- else
+- usb_deauthorize_interface(intf);
++ } else {
++ /*
++ * Prevent deadlock if another process is concurrently
++ * trying to unregister intf.
++ */
++ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
++ if (kn) {
++ usb_deauthorize_interface(intf);
++ sysfs_unbreak_active_protection(kn);
++ }
++ }
+
+ return count;
+ }
+diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
+index c92a1da46a0147..a141f83aba0cce 100644
+--- a/drivers/usb/dwc2/core.h
++++ b/drivers/usb/dwc2/core.h
+@@ -729,8 +729,14 @@ struct dwc2_dregs_backup {
+ * struct dwc2_hregs_backup - Holds host registers state before
+ * entering partial power down
+ * @hcfg: Backup of HCFG register
++ * @hflbaddr: Backup of HFLBADDR register
+ * @haintmsk: Backup of HAINTMSK register
++ * @hcchar: Backup of HCCHAR register
++ * @hcsplt: Backup of HCSPLT register
+ * @hcintmsk: Backup of HCINTMSK register
++ * @hctsiz: Backup of HCTSIZ register
++ * @hdma: Backup of HCDMA register
++ * @hcdmab: Backup of HCDMAB register
+ * @hprt0: Backup of HPTR0 register
+ * @hfir: Backup of HFIR register
+ * @hptxfsiz: Backup of HPTXFSIZ register
+@@ -738,8 +744,14 @@ struct dwc2_dregs_backup {
+ */
+ struct dwc2_hregs_backup {
+ u32 hcfg;
++ u32 hflbaddr;
+ u32 haintmsk;
++ u32 hcchar[MAX_EPS_CHANNELS];
++ u32 hcsplt[MAX_EPS_CHANNELS];
+ u32 hcintmsk[MAX_EPS_CHANNELS];
++ u32 hctsiz[MAX_EPS_CHANNELS];
++ u32 hcidma[MAX_EPS_CHANNELS];
++ u32 hcidmab[MAX_EPS_CHANNELS];
+ u32 hprt0;
+ u32 hfir;
+ u32 hptxfsiz;
+@@ -1086,6 +1098,7 @@ struct dwc2_hsotg {
+ bool needs_byte_swap;
+
+ /* DWC OTG HW Release versions */
++#define DWC2_CORE_REV_4_30a 0x4f54430a
+ #define DWC2_CORE_REV_2_71a 0x4f54271a
+ #define DWC2_CORE_REV_2_72a 0x4f54272a
+ #define DWC2_CORE_REV_2_80a 0x4f54280a
+@@ -1323,6 +1336,7 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg);
+ int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg);
+
+ void dwc2_enable_acg(struct dwc2_hsotg *hsotg);
++void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup);
+
+ /* This function should be called on every hardware interrupt. */
+ irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 158ede7538548e..26d752a4c3ca95 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -297,7 +297,8 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+
+ /* Exit gadget mode clock gating. */
+ if (hsotg->params.power_down ==
+- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
++ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
++ !hsotg->params.no_clock_gating)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ }
+
+@@ -322,10 +323,11 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ */
+-static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
++void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup)
+ {
+ u32 glpmcfg;
+- u32 i = 0;
++ u32 pcgctl;
++ u32 dctl;
+
+ if (hsotg->lx_state != DWC2_L1) {
+ dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");
+@@ -334,37 +336,57 @@ static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
+
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ if (dwc2_is_device_mode(hsotg)) {
+- dev_dbg(hsotg->dev, "Exit from L1 state\n");
++ dev_dbg(hsotg->dev, "Exit from L1 state, remotewakeup=%d\n", remotewakeup);
+ glpmcfg &= ~GLPMCFG_ENBLSLPM;
+- glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
++ glpmcfg &= ~GLPMCFG_HIRD_THRES_MASK;
+ dwc2_writel(hsotg, glpmcfg, GLPMCFG);
+
+- do {
+- glpmcfg = dwc2_readl(hsotg, GLPMCFG);
++ pcgctl = dwc2_readl(hsotg, PCGCTL);
++ pcgctl &= ~PCGCTL_ENBL_SLEEP_GATING;
++ dwc2_writel(hsotg, pcgctl, PCGCTL);
+
+- if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |
+- GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))
+- break;
++ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
++ if (glpmcfg & GLPMCFG_ENBESL) {
++ glpmcfg |= GLPMCFG_RSTRSLPSTS;
++ dwc2_writel(hsotg, glpmcfg, GLPMCFG);
++ }
++
++ if (remotewakeup) {
++ if (dwc2_hsotg_wait_bit_set(hsotg, GLPMCFG, GLPMCFG_L1RESUMEOK, 1000)) {
++ dev_warn(hsotg->dev, "%s: timeout GLPMCFG_L1RESUMEOK\n", __func__);
++ goto fail;
++ return;
++ }
++
++ dctl = dwc2_readl(hsotg, DCTL);
++ dctl |= DCTL_RMTWKUPSIG;
++ dwc2_writel(hsotg, dctl, DCTL);
+
+- udelay(1);
+- } while (++i < 200);
++ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_WKUPINT, 1000)) {
++ dev_warn(hsotg->dev, "%s: timeout GINTSTS_WKUPINT\n", __func__);
++ goto fail;
++ return;
++ }
++ }
+
+- if (i == 200) {
+- dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");
++ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
++ if (glpmcfg & GLPMCFG_COREL1RES_MASK || glpmcfg & GLPMCFG_SLPSTS ||
++ glpmcfg & GLPMCFG_L1RESUMEOK) {
++ goto fail;
+ return;
+ }
+- dwc2_gadget_init_lpm(hsotg);
++
++ /* Inform gadget to exit from L1 */
++ call_gadget(hsotg, resume);
++ /* Change to L0 state */
++ hsotg->lx_state = DWC2_L0;
++ hsotg->bus_suspended = false;
++fail: dwc2_gadget_init_lpm(hsotg);
+ } else {
+ /* TODO */
+ dev_err(hsotg->dev, "Host side LPM is not supported.\n");
+ return;
+ }
+-
+- /* Change to L0 state */
+- hsotg->lx_state = DWC2_L0;
+-
+- /* Inform gadget to exit from L1 */
+- call_gadget(hsotg, resume);
+ }
+
+ /*
+@@ -385,7 +407,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
+ dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
+
+ if (hsotg->lx_state == DWC2_L1) {
+- dwc2_wakeup_from_lpm_l1(hsotg);
++ dwc2_wakeup_from_lpm_l1(hsotg, false);
+ return;
+ }
+
+@@ -408,7 +430,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
+
+ /* Exit gadget mode clock gating. */
+ if (hsotg->params.power_down ==
+- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
++ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
++ !hsotg->params.no_clock_gating)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ } else {
+ /* Change to L0 state */
+@@ -425,7 +448,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
+ }
+
+ if (hsotg->params.power_down ==
+- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
++ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
++ !hsotg->params.no_clock_gating)
+ dwc2_host_exit_clock_gating(hsotg, 1);
+
+ /*
+diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
+index a8605b02115b1c..1ad8fa3f862a15 100644
+--- a/drivers/usb/dwc2/drd.c
++++ b/drivers/usb/dwc2/drd.c
+@@ -127,6 +127,15 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
+ role = USB_ROLE_DEVICE;
+ }
+
++ if ((IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) ||
++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)) &&
++ dwc2_is_device_mode(hsotg) &&
++ hsotg->lx_state == DWC2_L2 &&
++ hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
++ hsotg->bus_suspended &&
++ !hsotg->params.no_clock_gating)
++ dwc2_gadget_exit_clock_gating(hsotg, 0);
++
+ if (role == USB_ROLE_HOST) {
+ already = dwc2_ovr_avalid(hsotg, true);
+ } else if (role == USB_ROLE_DEVICE) {
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index b517a7216de22a..b2f6da5b65ccd0 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -1415,6 +1415,10 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ ep->name, req, req->length, req->buf, req->no_interrupt,
+ req->zero, req->short_not_ok);
+
++ if (hs->lx_state == DWC2_L1) {
++ dwc2_wakeup_from_lpm_l1(hs, true);
++ }
++
+ /* Prevent new request submission when controller is suspended */
+ if (hs->lx_state != DWC2_L0) {
+ dev_dbg(hs->dev, "%s: submit request only in active state\n",
+@@ -3727,6 +3731,12 @@ static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
+ if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
+ dwc2_exit_partial_power_down(hsotg, 0, true);
+
++ /* Exit gadget mode clock gating. */
++ if (hsotg->params.power_down ==
++ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
++ !hsotg->params.no_clock_gating)
++ dwc2_gadget_exit_clock_gating(hsotg, 0);
++
+ hsotg->lx_state = DWC2_L0;
+ }
+
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 657f1f659ffaf8..dd5b1c5691e11e 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -2701,8 +2701,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
+ hsotg->available_host_channels--;
+ }
+ qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+- if (dwc2_assign_and_init_hc(hsotg, qh))
++ if (dwc2_assign_and_init_hc(hsotg, qh)) {
++ if (hsotg->params.uframe_sched)
++ hsotg->available_host_channels++;
+ break;
++ }
+
+ /*
+ * Move the QH from the periodic ready schedule to the
+@@ -2735,8 +2738,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
+ hsotg->available_host_channels--;
+ }
+
+- if (dwc2_assign_and_init_hc(hsotg, qh))
++ if (dwc2_assign_and_init_hc(hsotg, qh)) {
++ if (hsotg->params.uframe_sched)
++ hsotg->available_host_channels++;
+ break;
++ }
+
+ /*
+ * Move the QH from the non-periodic inactive schedule to the
+@@ -4143,6 +4149,8 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ urb->actual_length);
+
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
++ if (!hsotg->params.dma_desc_enable)
++ urb->start_frame = qtd->qh->start_active_frame;
+ urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
+ for (i = 0; i < urb->number_of_packets; ++i) {
+ urb->iso_frame_desc[i].actual_length =
+@@ -4649,7 +4657,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ }
+
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+- hsotg->bus_suspended) {
++ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
+ if (dwc2_is_device_mode(hsotg))
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ else
+@@ -4769,8 +4777,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ if (qh_allocated && qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
+ fail2:
+- spin_unlock_irqrestore(&hsotg->lock, flags);
+ urb->hcpriv = NULL;
++ spin_unlock_irqrestore(&hsotg->lock, flags);
+ kfree(qtd);
+ fail1:
+ if (qh_allocated) {
+@@ -5406,9 +5414,16 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
+ /* Backup Host regs */
+ hr = &hsotg->hr_backup;
+ hr->hcfg = dwc2_readl(hsotg, HCFG);
++ hr->hflbaddr = dwc2_readl(hsotg, HFLBADDR);
+ hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
+- for (i = 0; i < hsotg->params.host_channels; ++i)
++ for (i = 0; i < hsotg->params.host_channels; ++i) {
++ hr->hcchar[i] = dwc2_readl(hsotg, HCCHAR(i));
++ hr->hcsplt[i] = dwc2_readl(hsotg, HCSPLT(i));
+ hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
++ hr->hctsiz[i] = dwc2_readl(hsotg, HCTSIZ(i));
++ hr->hcidma[i] = dwc2_readl(hsotg, HCDMA(i));
++ hr->hcidmab[i] = dwc2_readl(hsotg, HCDMAB(i));
++ }
+
+ hr->hprt0 = dwc2_read_hprt0(hsotg);
+ hr->hfir = dwc2_readl(hsotg, HFIR);
+@@ -5442,10 +5457,17 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
+ hr->valid = false;
+
+ dwc2_writel(hsotg, hr->hcfg, HCFG);
++ dwc2_writel(hsotg, hr->hflbaddr, HFLBADDR);
+ dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
+
+- for (i = 0; i < hsotg->params.host_channels; ++i)
++ for (i = 0; i < hsotg->params.host_channels; ++i) {
++ dwc2_writel(hsotg, hr->hcchar[i], HCCHAR(i));
++ dwc2_writel(hsotg, hr->hcsplt[i], HCSPLT(i));
+ dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
++ dwc2_writel(hsotg, hr->hctsiz[i], HCTSIZ(i));
++ dwc2_writel(hsotg, hr->hcidma[i], HCDMA(i));
++ dwc2_writel(hsotg, hr->hcidmab[i], HCDMAB(i));
++ }
+
+ dwc2_writel(hsotg, hr->hprt0, HPRT0);
+ dwc2_writel(hsotg, hr->hfir, HFIR);
+@@ -5610,10 +5632,12 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ dwc2_writel(hsotg, hr->hcfg, HCFG);
+
+ /* De-assert Wakeup Logic */
+- gpwrdn = dwc2_readl(hsotg, GPWRDN);
+- gpwrdn &= ~GPWRDN_PMUACTV;
+- dwc2_writel(hsotg, gpwrdn, GPWRDN);
+- udelay(10);
++ if (!(rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
++ gpwrdn = dwc2_readl(hsotg, GPWRDN);
++ gpwrdn &= ~GPWRDN_PMUACTV;
++ dwc2_writel(hsotg, gpwrdn, GPWRDN);
++ udelay(10);
++ }
+
+ hprt0 = hr->hprt0;
+ hprt0 |= HPRT0_PWR;
+@@ -5638,6 +5662,13 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ hprt0 |= HPRT0_RES;
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
++ /* De-assert Wakeup Logic */
++ if ((rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
++ gpwrdn = dwc2_readl(hsotg, GPWRDN);
++ gpwrdn &= ~GPWRDN_PMUACTV;
++ dwc2_writel(hsotg, gpwrdn, GPWRDN);
++ udelay(10);
++ }
+ /* Wait for Resume time and then program HPRT again */
+ mdelay(100);
+ hprt0 &= ~HPRT0_RES;
+diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
+index 6b4d825e97a2d9..994a78ad084b1c 100644
+--- a/drivers/usb/dwc2/hcd_ddma.c
++++ b/drivers/usb/dwc2/hcd_ddma.c
+@@ -559,7 +559,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ idx = qh->td_last;
+ inc = qh->host_interval;
+ hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
+- cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
++ cur_idx = idx;
+ next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
+
+ /*
+@@ -866,20 +866,27 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ {
+ struct dwc2_dma_desc *dma_desc;
+ struct dwc2_hcd_iso_packet_desc *frame_desc;
++ u16 frame_desc_idx;
++ struct urb *usb_urb;
+ u16 remain = 0;
+ int rc = 0;
+
+ if (!qtd->urb)
+ return -EINVAL;
+
++ usb_urb = qtd->urb->priv;
++
+ dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
+ sizeof(struct dwc2_dma_desc)),
+ sizeof(struct dwc2_dma_desc),
+ DMA_FROM_DEVICE);
+
+ dma_desc = &qh->desc_list[idx];
++ frame_desc_idx = (idx - qtd->isoc_td_first) & (usb_urb->number_of_packets - 1);
+
+- frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
++ frame_desc = &qtd->urb->iso_descs[frame_desc_idx];
++ if (idx == qtd->isoc_td_first)
++ usb_urb->start_frame = dwc2_hcd_get_frame_number(hsotg);
+ dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
+ if (chan->ep_is_in)
+ remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
+@@ -900,7 +907,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
+ frame_desc->status = 0;
+ }
+
+- if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
++ if (++qtd->isoc_frame_index == usb_urb->number_of_packets) {
+ /*
+ * urb->status is not used for isoc transfers here. The
+ * individual frame_desc status are used instead.
+@@ -1005,11 +1012,11 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
+ return;
+ idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
+ chan->speed);
+- if (!rc)
++ if (rc == 0)
+ continue;
+
+- if (rc == DWC2_CMPL_DONE)
+- break;
++ if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP)
++ goto stop_scan;
+
+ /* rc == DWC2_CMPL_STOP */
+
+diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
+index 0144ca8350c312..5c7538d498dd11 100644
+--- a/drivers/usb/dwc2/hcd_intr.c
++++ b/drivers/usb/dwc2/hcd_intr.c
+@@ -2015,15 +2015,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ {
+ struct dwc2_qtd *qtd;
+ struct dwc2_host_chan *chan;
+- u32 hcint, hcintmsk;
++ u32 hcint, hcintraw, hcintmsk;
+
+ chan = hsotg->hc_ptr_array[chnum];
+
+- hcint = dwc2_readl(hsotg, HCINT(chnum));
++ hcintraw = dwc2_readl(hsotg, HCINT(chnum));
+ hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
++ hcint = hcintraw & hcintmsk;
++ dwc2_writel(hsotg, hcint, HCINT(chnum));
++
+ if (!chan) {
+ dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
+- dwc2_writel(hsotg, hcint, HCINT(chnum));
+ return;
+ }
+
+@@ -2032,11 +2034,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ chnum);
+ dev_vdbg(hsotg->dev,
+ " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+- hcint, hcintmsk, hcint & hcintmsk);
++ hcintraw, hcintmsk, hcint);
+ }
+
+- dwc2_writel(hsotg, hcint, HCINT(chnum));
+-
+ /*
+ * If we got an interrupt after someone called
+ * dwc2_hcd_endpoint_disable() we don't want to crash below
+@@ -2046,8 +2046,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
+ return;
+ }
+
+- chan->hcint = hcint;
+- hcint &= hcintmsk;
++ chan->hcint = hcintraw;
+
+ /*
+ * If the channel was halted due to a dequeue, the qtd list might
+diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
+index 13abdd5f675299..12f8c7f86dc980 100644
+--- a/drivers/usb/dwc2/hw.h
++++ b/drivers/usb/dwc2/hw.h
+@@ -698,7 +698,7 @@
+ #define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
+ #define TXSTS_QTOP_TOKEN_SHIFT 25
+ #define TXSTS_QTOP_TERMINATE BIT(24)
+-#define TXSTS_QSPCAVAIL_MASK (0xff << 16)
++#define TXSTS_QSPCAVAIL_MASK (0x7f << 16)
+ #define TXSTS_QSPCAVAIL_SHIFT 16
+ #define TXSTS_FSPCAVAIL_MASK (0xffff << 0)
+ #define TXSTS_FSPCAVAIL_SHIFT 0
+diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
+index b1d48019e944f3..c1b7209b94836c 100644
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -331,7 +331,7 @@ static void dwc2_driver_remove(struct platform_device *dev)
+
+ /* Exit clock gating when driver is removed. */
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+- hsotg->bus_suspended) {
++ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
+ if (dwc2_is_device_mode(hsotg))
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ else
+@@ -469,18 +469,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
+
+ spin_lock_init(&hsotg->lock);
+
+- hsotg->irq = platform_get_irq(dev, 0);
+- if (hsotg->irq < 0)
+- return hsotg->irq;
+-
+- dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
+- hsotg->irq);
+- retval = devm_request_irq(hsotg->dev, hsotg->irq,
+- dwc2_handle_common_intr, IRQF_SHARED,
+- dev_name(hsotg->dev), hsotg);
+- if (retval)
+- return retval;
+-
+ hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
+ if (IS_ERR(hsotg->vbus_supply)) {
+ retval = PTR_ERR(hsotg->vbus_supply);
+@@ -524,6 +512,20 @@ static int dwc2_driver_probe(struct platform_device *dev)
+ if (retval)
+ goto error;
+
++ hsotg->irq = platform_get_irq(dev, 0);
++ if (hsotg->irq < 0) {
++ retval = hsotg->irq;
++ goto error;
++ }
++
++ dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
++ hsotg->irq);
++ retval = devm_request_irq(hsotg->dev, hsotg->irq,
++ dwc2_handle_common_intr, IRQF_SHARED,
++ dev_name(hsotg->dev), hsotg);
++ if (retval)
++ goto error;
++
+ /*
+ * For OTG cores, set the force mode bits to reflect the value
+ * of dr_mode. Force mode bits should not be touched at any
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 343d2570189ff9..af851e4e8c8a76 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -104,6 +104,27 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
+ return 0;
+ }
+
++void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
++{
++ u32 reg;
++
++ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
++ if (enable && !dwc->dis_u3_susphy_quirk)
++ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
++ else
++ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
++
++ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
++
++ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++ if (enable && !dwc->dis_u2_susphy_quirk)
++ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
++ else
++ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
++
++ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
++}
++
+ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+ {
+ u32 reg;
+@@ -277,48 +298,11 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
+ /*
+ * We're resetting only the device side because, if we're in host mode,
+ * XHCI driver will reset the host block. If dwc3 was configured for
+- * host-only mode or current role is host, then we can return early.
++ * host-only mode, then we can return early.
+ */
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ return 0;
+
+- /*
+- * If the dr_mode is host and the dwc->current_dr_role is not the
+- * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
+- * isn't executed yet. Ensure the phy is ready before the controller
+- * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
+- * the phy.
+- *
+- * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
+- * is port index. If this is a multiport host, then we need to reset
+- * all active ports.
+- */
+- if (dwc->dr_mode == USB_DR_MODE_HOST) {
+- u32 usb3_port;
+- u32 usb2_port;
+-
+- usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+- usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
+- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+-
+- usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+- usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
+- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+-
+- /* Small delay for phy reset assertion */
+- usleep_range(1000, 2000);
+-
+- usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
+- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+-
+- usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
+- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+-
+- /* Wait for clock synchronization */
+- msleep(50);
+- return 0;
+- }
+-
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CSFTRST;
+ reg &= ~DWC3_DCTL_RUN_STOP;
+@@ -522,6 +506,13 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
+ static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
+ {
+ struct dwc3_event_buffer *evt;
++ unsigned int hw_mode;
++
++ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
++ if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
++ dwc->ev_buf = NULL;
++ return 0;
++ }
+
+ evt = dwc3_alloc_one_event_buffer(dwc, length);
+ if (IS_ERR(evt)) {
+@@ -542,6 +533,10 @@ static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned int length)
+ int dwc3_event_buffers_setup(struct dwc3 *dwc)
+ {
+ struct dwc3_event_buffer *evt;
++ u32 reg;
++
++ if (!dwc->ev_buf)
++ return 0;
+
+ evt = dwc->ev_buf;
+ evt->lpos = 0;
+@@ -551,14 +546,27 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc)
+ upper_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_SIZE(evt->length));
+- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
+
++ /* Clear any stale event */
++ reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
++ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
+ return 0;
+ }
+
+ void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
+ {
+ struct dwc3_event_buffer *evt;
++ u32 reg;
++
++ if (!dwc->ev_buf)
++ return;
++ /*
++ * Exynos platforms may not be able to access event buffer if the
++ * controller failed to halt on dwc3_core_exit().
++ */
++ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
++ if (!(reg & DWC3_DSTS_DEVCTRLHLT))
++ return;
+
+ evt = dwc->ev_buf;
+
+@@ -568,7 +576,10 @@ void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
+ | DWC3_GEVNTSIZ_SIZE(0));
+- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
++
++ /* Clear any stale event */
++ reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
++ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
+ }
+
+ static void dwc3_core_num_eps(struct dwc3 *dwc)
+@@ -622,11 +633,8 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc)
+ */
+ static int dwc3_phy_setup(struct dwc3 *dwc)
+ {
+- unsigned int hw_mode;
+ u32 reg;
+
+- hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+-
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+
+ /*
+@@ -636,21 +644,16 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
+ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
+
+ /*
+- * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
+- * to '0' during coreConsultant configuration. So default value
+- * will be '0' when the core is reset. Application needs to set it
+- * to '1' after the core initialization is completed.
+- */
+- if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+- reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+-
+- /*
+- * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
+- * power-on reset, and it can be set after core initialization, which is
+- * after device soft-reset during initialization.
++ * Above DWC_usb3.0 1.94a, it is recommended to set
++ * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
++ * So default value will be '0' when the core is reset. Application
++ * needs to set it to '1' after the core initialization is completed.
++ *
++ * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
++ * cleared after power-on reset, and it can be set after core
++ * initialization.
+ */
+- if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+- reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
++ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
+ if (dwc->u2ss_inp3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
+@@ -676,9 +679,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
+ if (dwc->tx_de_emphasis_quirk)
+ reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
+
+- if (dwc->dis_u3_susphy_quirk)
+- reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+-
+ if (dwc->dis_del_phy_power_chg_quirk)
+ reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
+
+@@ -726,24 +726,15 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
+ }
+
+ /*
+- * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to
+- * '0' during coreConsultant configuration. So default value will
+- * be '0' when the core is reset. Application needs to set it to
+- * '1' after the core initialization is completed.
+- */
+- if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+- reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+-
+- /*
+- * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
+- * power-on reset, and it can be set after core initialization, which is
+- * after device soft-reset during initialization.
++ * Above DWC_usb3.0 1.94a, it is recommended to set
++ * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
++ * So default value will be '0' when the core is reset. Application
++ * needs to set it to '1' after the core initialization is completed.
++ *
++ * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
++ * after power-on reset, and it can be set after core initialization.
+ */
+- if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+-
+- if (dwc->dis_u2_susphy_quirk)
+- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
++ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
+ if (dwc->dis_enblslpm_quirk)
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+@@ -901,12 +892,16 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc)
+
+ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+ {
++ unsigned int power_opt;
++ unsigned int hw_mode;
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
++ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
++ power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
+
+- switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
++ switch (power_opt) {
+ case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
+ /**
+ * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
+@@ -939,6 +934,20 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+ break;
+ }
+
++ /*
++ * This is a workaround for STAR#4846132, which only affects
++ * DWC_usb31 version2.00a operating in host mode.
++ *
++ * There is a problem in DWC_usb31 version 2.00a operating
++ * in host mode that would cause a CSR read timeout When CSR
++ * read coincides with RAM Clock Gating Entry. By disable
++ * Clock Gating, sacrificing power consumption for normal
++ * operation.
++ */
++ if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
++ hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
++ reg |= DWC3_GCTL_DSBLCLKGTNG;
++
+ /* check if current dwc3 is on simulation board */
+ if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
+ dev_info(dwc->dev, "Running with FPGA optimizations\n");
+@@ -1094,6 +1103,111 @@ static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
+ }
+ }
+
++static void dwc3_config_threshold(struct dwc3 *dwc)
++{
++ u32 reg;
++ u8 rx_thr_num;
++ u8 rx_maxburst;
++ u8 tx_thr_num;
++ u8 tx_maxburst;
++
++ /*
++ * Must config both number of packets and max burst settings to enable
++ * RX and/or TX threshold.
++ */
++ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
++ rx_thr_num = dwc->rx_thr_num_pkt_prd;
++ rx_maxburst = dwc->rx_max_burst_prd;
++ tx_thr_num = dwc->tx_thr_num_pkt_prd;
++ tx_maxburst = dwc->tx_max_burst_prd;
++
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC31_RXTHRNUMPKTSEL_PRD;
++
++ reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
++ reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
++
++ reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
++ reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC31_TXTHRNUMPKTSEL_PRD;
++
++ reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
++ reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
++
++ reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
++ reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ }
++
++ rx_thr_num = dwc->rx_thr_num_pkt;
++ rx_maxburst = dwc->rx_max_burst;
++ tx_thr_num = dwc->tx_thr_num_pkt;
++ tx_maxburst = dwc->tx_max_burst;
++
++ if (DWC3_IP_IS(DWC3)) {
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
++ reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++ reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++ reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
++ reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++ reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++ reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ } else {
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
++ reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++ reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++ reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
++ reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++ reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++ reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ }
++}
++
+ /**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+@@ -1145,21 +1259,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ if (ret)
+ goto err_exit_phy;
+
+- if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
+- !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
+- if (!dwc->dis_u3_susphy_quirk) {
+- reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+- reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+- }
+-
+- if (!dwc->dis_u2_susphy_quirk) {
+- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+- reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+- }
+- }
+-
+ dwc3_core_setup_global_control(dwc);
+ dwc3_core_num_eps(dwc);
+
+@@ -1195,6 +1294,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+ }
+
++ /*
++ * STAR 9001285599: This issue affects DWC_usb3 version 3.20a
++ * only. If the PM TIMER ECM is enabled through GUCTL2[19], the
++ * link compliance test (TD7.21) may fail. If the ECN is not
++ * enabled (GUCTL2[19] = 0), the controller will use the old timer
++ * value (5us), which is still acceptable for the link compliance
++ * test. Therefore, do not enable PM TIMER ECM in 3.20a by
++ * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0.
++ */
++ if (DWC3_VER_IS(DWC3, 320A)) {
++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
++ reg &= ~DWC3_GUCTL2_LC_TIMER;
++ dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
++ }
++
+ /*
+ * When configured in HOST mode, after issuing U3/L2 exit controller
+ * fails to send proper CRC checksum in CRC5 feild. Because of this
+@@ -1246,42 +1360,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
+- /*
+- * Must config both number of packets and max burst settings to enable
+- * RX and/or TX threshold.
+- */
+- if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
+- u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
+- u8 rx_maxburst = dwc->rx_max_burst_prd;
+- u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
+- u8 tx_maxburst = dwc->tx_max_burst_prd;
+-
+- if (rx_thr_num && rx_maxburst) {
+- reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+- reg |= DWC31_RXTHRNUMPKTSEL_PRD;
+-
+- reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
+- reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
+-
+- reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
+- reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
+-
+- dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+- }
+-
+- if (tx_thr_num && tx_maxburst) {
+- reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+- reg |= DWC31_TXTHRNUMPKTSEL_PRD;
+-
+- reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
+- reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
+-
+- reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
+- reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
+-
+- dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+- }
+- }
++ dwc3_config_threshold(dwc);
+
+ return 0;
+
+@@ -1417,6 +1496,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ u8 lpm_nyet_threshold;
+ u8 tx_de_emphasis;
+ u8 hird_threshold;
++ u8 rx_thr_num_pkt = 0;
++ u8 rx_max_burst = 0;
++ u8 tx_thr_num_pkt = 0;
++ u8 tx_max_burst = 0;
+ u8 rx_thr_num_pkt_prd = 0;
+ u8 rx_max_burst_prd = 0;
+ u8 tx_thr_num_pkt_prd = 0;
+@@ -1456,6 +1539,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ else
+ dwc->sysdev = dwc->dev;
+
++ dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
++
+ ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
+ if (ret >= 0) {
+ dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
+@@ -1479,6 +1564,14 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ "snps,usb2-lpm-disable");
+ dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
+ "snps,usb2-gadget-lpm-disable");
++ device_property_read_u8(dev, "snps,rx-thr-num-pkt",
++ &rx_thr_num_pkt);
++ device_property_read_u8(dev, "snps,rx-max-burst",
++ &rx_max_burst);
++ device_property_read_u8(dev, "snps,tx-thr-num-pkt",
++ &tx_thr_num_pkt);
++ device_property_read_u8(dev, "snps,tx-max-burst",
++ &tx_max_burst);
+ device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
+ &rx_thr_num_pkt_prd);
+ device_property_read_u8(dev, "snps,rx-max-burst-prd",
+@@ -1560,6 +1653,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+
+ dwc->hird_threshold = hird_threshold;
+
++ dwc->rx_thr_num_pkt = rx_thr_num_pkt;
++ dwc->rx_max_burst = rx_max_burst;
++
++ dwc->tx_thr_num_pkt = tx_thr_num_pkt;
++ dwc->tx_max_burst = tx_max_burst;
++
+ dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
+ dwc->rx_max_burst_prd = rx_max_burst_prd;
+
+@@ -1918,6 +2017,8 @@ static int dwc3_probe(struct platform_device *pdev)
+
+ pm_runtime_put(dev);
+
++ dma_set_max_seg_size(dev, UINT_MAX);
++
+ return 0;
+
+ err_exit_debugfs:
+@@ -2003,7 +2104,6 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc)
+
+ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ {
+- unsigned long flags;
+ u32 reg;
+
+ switch (dwc->current_dr_role) {
+@@ -2041,9 +2141,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ break;
+
+ if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+- spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_suspend(dwc);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+ synchronize_irq(dwc->irq_gadget);
+ }
+
+@@ -2060,7 +2158,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+
+ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ {
+- unsigned long flags;
+ int ret;
+ u32 reg;
+
+@@ -2109,9 +2206,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
+ dwc3_otg_host_init(dwc);
+ } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+- spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_resume(dwc);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+
+ break;
+@@ -2165,7 +2260,11 @@ static int dwc3_runtime_resume(struct device *dev)
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+- dwc3_gadget_process_pending_events(dwc);
++ if (dwc->pending_events) {
++ pm_runtime_put(dwc->dev);
++ dwc->pending_events = false;
++ enable_irq(dwc->irq_gadget);
++ }
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
+ default:
+@@ -2252,6 +2351,12 @@ static void dwc3_complete(struct device *dev)
+ static const struct dev_pm_ops dwc3_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+ .complete = dwc3_complete,
++
++ /*
++ * Runtime suspend halts the controller on disconnection. It relies on
++ * platforms with custom connection notification to start the controller
++ * again.
++ */
+ SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
+ dwc3_runtime_idle)
+ };
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index a69ac67d89fe68..420753205fafa1 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -211,6 +211,11 @@
+ #define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
+ #define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
+
++/* Global TX Threshold Configuration Register */
++#define DWC3_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0xff) << 16)
++#define DWC3_GTXTHRCFG_TXPKTCNT(n) (((n) & 0xf) << 24)
++#define DWC3_GTXTHRCFG_PKTCNTSEL BIT(29)
++
+ /* Global RX Threshold Configuration Register for DWC_usb31 only */
+ #define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16)
+ #define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21)
+@@ -403,6 +408,7 @@
+
+ /* Global User Control Register 2 */
+ #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
++#define DWC3_GUCTL2_LC_TIMER BIT(19)
+
+ /* Global User Control Register 3 */
+ #define DWC3_GUCTL3_SPLITDISABLE BIT(14)
+@@ -1045,6 +1051,10 @@ struct dwc3_scratchpad_array {
+ * @test_mode_nr: test feature selector
+ * @lpm_nyet_threshold: LPM NYET response threshold
+ * @hird_threshold: HIRD threshold
++ * @rx_thr_num_pkt: USB receive packet count
++ * @rx_max_burst: max USB receive burst size
++ * @tx_thr_num_pkt: USB transmit packet count
++ * @tx_max_burst: max USB transmit burst size
+ * @rx_thr_num_pkt_prd: periodic ESS receive packet count
+ * @rx_max_burst_prd: max periodic ESS receive burst size
+ * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
+@@ -1114,6 +1124,7 @@ struct dwc3_scratchpad_array {
+ * 3 - Reserved
+ * @dis_metastability_quirk: set to disable metastability quirk.
+ * @dis_split_quirk: set to disable split boundary.
++ * @sys_wakeup: set if the device may do system wakeup.
+ * @wakeup_configured: set if the device is configured for remote wakeup.
+ * @suspended: set to track suspend event due to U3/L2.
+ * @imod_interval: set the interrupt moderation interval in 250ns
+@@ -1228,6 +1239,7 @@ struct dwc3 {
+ #define DWC3_REVISION_290A 0x5533290a
+ #define DWC3_REVISION_300A 0x5533300a
+ #define DWC3_REVISION_310A 0x5533310a
++#define DWC3_REVISION_320A 0x5533320a
+ #define DWC3_REVISION_330A 0x5533330a
+
+ #define DWC31_REVISION_ANY 0x0
+@@ -1237,6 +1249,7 @@ struct dwc3 {
+ #define DWC31_REVISION_170A 0x3137302a
+ #define DWC31_REVISION_180A 0x3138302a
+ #define DWC31_REVISION_190A 0x3139302a
++#define DWC31_REVISION_200A 0x3230302a
+
+ #define DWC32_REVISION_ANY 0x0
+ #define DWC32_REVISION_100A 0x3130302a
+@@ -1273,6 +1286,10 @@ struct dwc3 {
+ u8 test_mode_nr;
+ u8 lpm_nyet_threshold;
+ u8 hird_threshold;
++ u8 rx_thr_num_pkt;
++ u8 rx_max_burst;
++ u8 tx_thr_num_pkt;
++ u8 tx_max_burst;
+ u8 rx_thr_num_pkt_prd;
+ u8 rx_max_burst_prd;
+ u8 tx_thr_num_pkt_prd;
+@@ -1331,6 +1348,7 @@ struct dwc3 {
+
+ unsigned dis_split_quirk:1;
+ unsigned async_callbacks:1;
++ unsigned sys_wakeup:1;
+ unsigned wakeup_configured:1;
+ unsigned suspended:1;
+
+@@ -1552,6 +1570,7 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc);
+ void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
+
+ int dwc3_core_soft_reset(struct dwc3 *dwc);
++void dwc3_enable_susphy(struct dwc3 *dwc, bool enable);
+
+ #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+ int dwc3_host_init(struct dwc3 *dwc);
+@@ -1624,7 +1643,6 @@ static inline void dwc3_otg_host_init(struct dwc3 *dwc)
+ #if !IS_ENABLED(CONFIG_USB_DWC3_HOST)
+ int dwc3_gadget_suspend(struct dwc3 *dwc);
+ int dwc3_gadget_resume(struct dwc3 *dwc);
+-void dwc3_gadget_process_pending_events(struct dwc3 *dwc);
+ #else
+ static inline int dwc3_gadget_suspend(struct dwc3 *dwc)
+ {
+@@ -1636,9 +1654,6 @@ static inline int dwc3_gadget_resume(struct dwc3 *dwc)
+ return 0;
+ }
+
+-static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+-{
+-}
+ #endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */
+
+ #if IS_ENABLED(CONFIG_USB_DWC3_ULPI)
+diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
+index 039bf241769afb..57ddd2e43022eb 100644
+--- a/drivers/usb/dwc3/drd.c
++++ b/drivers/usb/dwc3/drd.c
+@@ -505,6 +505,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
+ dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
+ }
++ dwc3_set_mode(dwc, mode);
+
+ dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
+ dwc3_role_switch.set = dwc3_usb_role_switch_set;
+@@ -526,7 +527,6 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
+ }
+ }
+
+- dwc3_set_mode(dwc, mode);
+ return 0;
+ }
+ #else
+diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
+index 90a587bc29b74e..ea6e29091c0c9a 100644
+--- a/drivers/usb/dwc3/dwc3-am62.c
++++ b/drivers/usb/dwc3/dwc3-am62.c
+@@ -267,21 +267,15 @@ static int dwc3_ti_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int dwc3_ti_remove_core(struct device *dev, void *c)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+-
+- platform_device_unregister(pdev);
+- return 0;
+-}
+-
+ static void dwc3_ti_remove(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct dwc3_am62 *am62 = platform_get_drvdata(pdev);
+ u32 reg;
+
+- device_for_each_child(dev, NULL, dwc3_ti_remove_core);
++ pm_runtime_get_sync(dev);
++ device_init_wakeup(dev, false);
++ of_platform_depopulate(dev);
+
+ /* Clear mode valid bit */
+ reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
+@@ -289,7 +283,6 @@ static void dwc3_ti_remove(struct platform_device *pdev)
+ dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
+
+ pm_runtime_put_sync(dev);
+- clk_disable_unprepare(am62->usb2_refclk);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ }
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index d5c77db4daa920..2a11fc0ee84f1e 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -522,11 +522,13 @@ static int dwc3_omap_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n",
+ omap->irq, ret);
+- goto err1;
++ goto err2;
+ }
+ dwc3_omap_enable_irqs(omap);
+ return 0;
+
++err2:
++ of_platform_depopulate(dev);
+ err1:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 6604845c397cd2..052852f8014676 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -8,6 +8,7 @@
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
++#include <linux/dmi.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -51,7 +52,12 @@
+ #define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
+ #define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
+ #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
++#define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e
+ #define PCI_DEVICE_ID_INTEL_TGL 0x9a15
++#define PCI_DEVICE_ID_INTEL_PTLH 0xe332
++#define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e
++#define PCI_DEVICE_ID_INTEL_PTLU 0xe432
++#define PCI_DEVICE_ID_INTEL_PTLU_PCH 0xe47e
+ #define PCI_DEVICE_ID_AMD_MR 0x163a
+
+ #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+@@ -219,6 +225,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
+ struct gpio_desc *gpio;
++ const char *bios_ver;
+ int ret;
+
+ /* On BYT the FW does not always enable the refclock */
+@@ -276,8 +283,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
+ * detection. These can be identified by them _not_
+ * using the standard ACPI battery and ac drivers.
+ */
++ bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+ if (acpi_dev_present("INT33FD", "1", 2) &&
+- acpi_quirk_skip_acpi_ac_and_battery()) {
++ acpi_quirk_skip_acpi_ac_and_battery() &&
++ /* Lenovo Yoga Tablet 2 Pro 1380 uses LC824206XA instead */
++ !(bios_ver &&
++ strstarts(bios_ver, "BLADE_21.X64.0005.R00.1504101516"))) {
+ dev_info(&pdev->dev, "Using TUSB1211 phy for charger detection\n");
+ swnode = &dwc3_pci_intel_phy_charger_detect_swnode;
+ }
+@@ -421,7 +432,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ { PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
++ { PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
++ { PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) },
++ { PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) },
++ { PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) },
++ { PCI_DEVICE_DATA(INTEL, PTLU_PCH, &dwc3_pci_intel_swnode) },
+
+ { PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) },
+ { PCI_DEVICE_DATA(AMD, MR, &dwc3_pci_amd_mr_swnode) },
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 3de43df6bbe814..82544374110b03 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -549,7 +549,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ IRQF_ONESHOT,
+ "qcom_dwc3 HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
+@@ -564,7 +564,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ IRQF_ONESHOT,
+ "qcom_dwc3 DP_HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
+@@ -579,7 +579,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ IRQF_ONESHOT,
+ "qcom_dwc3 DM_HS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
+@@ -594,7 +594,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
++ IRQF_ONESHOT,
+ "qcom_dwc3 SS", qcom);
+ if (ret) {
+ dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
+@@ -758,6 +758,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ if (!qcom->dwc3) {
+ ret = -ENODEV;
+ dev_err(dev, "failed to get dwc3 platform device\n");
++ of_platform_depopulate(dev);
+ }
+
+ node_put:
+@@ -766,9 +767,9 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ return ret;
+ }
+
+-static struct platform_device *
+-dwc3_qcom_create_urs_usb_platdev(struct device *dev)
++static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+ {
++ struct platform_device *urs_usb = NULL;
+ struct fwnode_handle *fwh;
+ struct acpi_device *adev;
+ char name[8];
+@@ -788,9 +789,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+
+ adev = to_acpi_device_node(fwh);
+ if (!adev)
+- return NULL;
++ goto err_put_handle;
++
++ urs_usb = acpi_create_platform_device(adev, NULL);
++ if (IS_ERR_OR_NULL(urs_usb))
++ goto err_put_handle;
++
++ return urs_usb;
++
++err_put_handle:
++ fwnode_handle_put(fwh);
++
++ return urs_usb;
++}
+
+- return acpi_create_platform_device(adev, NULL);
++static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
++{
++ struct fwnode_handle *fwh = urs_usb->dev.fwnode;
++
++ platform_device_unregister(urs_usb);
++ fwnode_handle_put(fwh);
+ }
+
+ static int dwc3_qcom_probe(struct platform_device *pdev)
+@@ -874,13 +892,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+ if (IS_ERR(qcom->qscratch_base)) {
+ ret = PTR_ERR(qcom->qscratch_base);
+- goto clk_disable;
++ goto free_urs;
+ }
+
+ ret = dwc3_qcom_setup_irq(pdev);
+ if (ret) {
+ dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
+- goto clk_disable;
++ goto free_urs;
+ }
+
+ /*
+@@ -899,7 +917,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+
+ if (ret) {
+ dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
+- goto depopulate;
++ goto free_urs;
+ }
+
+ ret = dwc3_qcom_interconnect_init(qcom);
+@@ -931,10 +949,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ interconnect_exit:
+ dwc3_qcom_interconnect_exit(qcom);
+ depopulate:
+- if (np)
++ if (np) {
+ of_platform_depopulate(&pdev->dev);
+- else
+- platform_device_put(pdev);
++ } else {
++ device_remove_software_node(&qcom->dwc3->dev);
++ platform_device_del(qcom->dwc3);
++ }
++ platform_device_put(qcom->dwc3);
++free_urs:
++ if (qcom->urs_usb)
++ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+ clk_disable:
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+@@ -953,11 +977,16 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ int i;
+
+- device_remove_software_node(&qcom->dwc3->dev);
+- if (np)
++ if (np) {
+ of_platform_depopulate(&pdev->dev);
+- else
+- platform_device_put(pdev);
++ } else {
++ device_remove_software_node(&qcom->dwc3->dev);
++ platform_device_del(qcom->dwc3);
++ }
++ platform_device_put(qcom->dwc3);
++
++ if (qcom->urs_usb)
++ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
+
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
+index 211360eee95a0f..c8c7cd0c179693 100644
+--- a/drivers/usb/dwc3/dwc3-st.c
++++ b/drivers/usb/dwc3/dwc3-st.c
+@@ -219,10 +219,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ dwc3_data->regmap = regmap;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg");
+- if (!res) {
+- ret = -ENXIO;
+- goto undo_platform_dev_alloc;
+- }
++ if (!res)
++ return -ENXIO;
+
+ dwc3_data->syscfg_reg_off = res->start;
+
+@@ -233,8 +231,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ devm_reset_control_get_exclusive(dev, "powerdown");
+ if (IS_ERR(dwc3_data->rstc_pwrdn)) {
+ dev_err(&pdev->dev, "could not get power controller\n");
+- ret = PTR_ERR(dwc3_data->rstc_pwrdn);
+- goto undo_platform_dev_alloc;
++ return PTR_ERR(dwc3_data->rstc_pwrdn);
+ }
+
+ /* Manage PowerDown */
+@@ -269,7 +266,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ if (!child_pdev) {
+ dev_err(dev, "failed to find dwc3 core device\n");
+ ret = -ENODEV;
+- goto err_node_put;
++ goto depopulate;
+ }
+
+ dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
+@@ -285,6 +282,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ ret = st_dwc3_drd_init(dwc3_data);
+ if (ret) {
+ dev_err(dev, "drd initialisation failed\n");
++ of_platform_depopulate(dev);
+ goto undo_softreset;
+ }
+
+@@ -294,14 +292,14 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, dwc3_data);
+ return 0;
+
++depopulate:
++ of_platform_depopulate(dev);
+ err_node_put:
+ of_node_put(child);
+ undo_softreset:
+ reset_control_assert(dwc3_data->rstc_rst);
+ undo_powerdown:
+ reset_control_assert(dwc3_data->rstc_pwrdn);
+-undo_platform_dev_alloc:
+- platform_device_put(pdev);
+ return ret;
+ }
+
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index b9424323729379..6ae8a36f21cf68 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -238,7 +238,10 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+ struct dwc3_request *req;
+
+ req = next_request(&dep->pending_list);
+- dwc3_gadget_giveback(dep, req, -ECONNRESET);
++ if (!dwc->connected)
++ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
++ else
++ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ }
+
+ dwc->eps[0]->trb_enqueue = 0;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 858fe4c299b7af..867000cdeb9653 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -287,6 +287,23 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async);
+ *
+ * Caller should handle locking. This function will issue @cmd with given
+ * @params to @dep and wait for its completion.
++ *
++ * According to the programming guide, if the link state is in L1/L2/U3,
++ * then sending the Start Transfer command may not complete. The
++ * programming guide suggested to bring the link state back to ON/U0 by
++ * performing remote wakeup prior to sending the command. However, don't
++ * initiate remote wakeup when the user/function does not send wakeup
++ * request via wakeup ops. Send the command when it's allowed.
++ *
++ * Notes:
++ * For L1 link state, issuing a command requires the clearing of
++ * GUSB2PHYCFG.SUSPENDUSB2, which turns on the signal required to complete
++ * the given command (usually within 50us). This should happen within the
++ * command timeout set by driver. No additional step is needed.
++ *
++ * For L2 or U3 link state, the gadget is in USB suspend. Care should be
++ * taken when sending Start Transfer command to ensure that it's done after
++ * USB resume.
+ */
+ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
+ struct dwc3_gadget_ep_cmd_params *params)
+@@ -327,30 +344,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
+- if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+- int link_state;
+-
+- /*
+- * Initiate remote wakeup if the link state is in U3 when
+- * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
+- * link state is in U1/U2, no remote wakeup is needed. The Start
+- * Transfer command will initiate the link recovery.
+- */
+- link_state = dwc3_gadget_get_link_state(dwc);
+- switch (link_state) {
+- case DWC3_LINK_STATE_U2:
+- if (dwc->gadget->speed >= USB_SPEED_SUPER)
+- break;
+-
+- fallthrough;
+- case DWC3_LINK_STATE_U3:
+- ret = __dwc3_gadget_wakeup(dwc, false);
+- dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
+- ret);
+- break;
+- }
+- }
+-
+ /*
+ * For some commands such as Update Transfer command, DEPCMDPARn
+ * registers are reserved. Since the driver often sends Update Transfer
+@@ -445,6 +438,10 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
+ dwc3_gadget_ep_get_transfer_index(dep);
+ }
+
++ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
++ !(cmd & DWC3_DEPCMD_CMDIOC))
++ mdelay(1);
++
+ if (saved_config) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= saved_config;
+@@ -1718,7 +1715,6 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+ */
+ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
+ {
+- struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+ int ret;
+@@ -1742,13 +1738,10 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
+ WARN_ON_ONCE(ret);
+ dep->resource_index = 0;
+
+- if (!interrupt) {
+- if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
+- mdelay(1);
++ if (!interrupt)
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+- } else if (!ret) {
++ else if (!ret)
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+- }
+
+ dep->flags &= ~DWC3_EP_DELAY_STOP;
+ return ret;
+@@ -2103,7 +2096,17 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
+
+ list_for_each_entry(r, &dep->pending_list, list) {
+ if (r == req) {
+- dwc3_gadget_giveback(dep, req, -ECONNRESET);
++ /*
++ * Explicitly check for EP0/1 as dequeue for those
++ * EPs need to be handled differently. Control EP
++ * only deals with one USB req, and giveback will
++ * occur during dwc3_ep0_stall_and_restart(). EP0
++ * requests are never added to started_list.
++ */
++ if (dep->number > 1)
++ dwc3_gadget_giveback(dep, req, -ECONNRESET);
++ else
++ dwc3_ep0_reset_state(dwc);
+ goto out;
+ }
+ }
+@@ -2640,6 +2643,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
++ if (!dwc->pullups_connected) {
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return 0;
++ }
++
+ dwc->connected = false;
+
+ /*
+@@ -2922,6 +2930,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
+ dwc3_ep0_out_start(dwc);
+
+ dwc3_gadget_enable_irq(dwc);
++ dwc3_enable_susphy(dwc, true);
+
+ return 0;
+
+@@ -2953,6 +2962,9 @@ static int dwc3_gadget_start(struct usb_gadget *g,
+ dwc->gadget_driver = driver;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
++ if (dwc->sys_wakeup)
++ device_wakeup_enable(dwc->sysdev);
++
+ return 0;
+ }
+
+@@ -2968,6 +2980,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+
++ if (dwc->sys_wakeup)
++ device_wakeup_disable(dwc->sysdev);
++
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->gadget_driver = NULL;
+ dwc->max_cfg_eps = 0;
+@@ -3973,6 +3988,13 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+ usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
+
+ dwc3_ep0_reset_state(dwc);
++
++ /*
++ * Request PM idle to address condition where usage count is
++ * already decremented to zero, but waiting for the disconnect
++ * interrupt to set dwc->connected to FALSE.
++ */
++ pm_request_idle(dwc->dev);
+ }
+
+ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+@@ -4642,6 +4664,10 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+ else
+ dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
+
++ /* No system wakeup if no gadget driver bound */
++ if (dwc->sys_wakeup)
++ device_wakeup_disable(dwc->sysdev);
++
+ return 0;
+
+ err5:
+@@ -4671,6 +4697,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
+ if (!dwc->gadget)
+ return;
+
++ dwc3_enable_susphy(dwc, false);
+ usb_del_gadget(dwc->gadget);
+ dwc3_gadget_free_endpoints(dwc);
+ usb_put_gadget(dwc->gadget);
+@@ -4686,15 +4713,13 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
+ unsigned long flags;
+ int ret;
+
+- if (!dwc->gadget_driver)
+- return 0;
+-
+ ret = dwc3_gadget_soft_disconnect(dwc);
+ if (ret)
+ goto err;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+- dwc3_disconnect_gadget(dwc);
++ if (dwc->gadget_driver)
++ dwc3_disconnect_gadget(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+@@ -4718,14 +4743,3 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
+
+ return dwc3_gadget_soft_connect(dwc);
+ }
+-
+-void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+-{
+- if (dwc->pending_events) {
+- dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
+- dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
+- pm_runtime_put(dwc->dev);
+- dwc->pending_events = false;
+- enable_irq(dwc->irq_gadget);
+- }
+-}
+diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
+index 61f57fe5bb783b..6c143f7d241036 100644
+--- a/drivers/usb/dwc3/host.c
++++ b/drivers/usb/dwc3/host.c
+@@ -10,9 +10,30 @@
+ #include <linux/irq.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/usb.h>
++#include <linux/usb/hcd.h>
+
++#include "../host/xhci-plat.h"
+ #include "core.h"
+
++static void dwc3_xhci_plat_start(struct usb_hcd *hcd)
++{
++ struct platform_device *pdev;
++ struct dwc3 *dwc;
++
++ if (!usb_hcd_is_primary_hcd(hcd))
++ return;
++
++ pdev = to_platform_device(hcd->self.controller);
++ dwc = dev_get_drvdata(pdev->dev.parent);
++
++ dwc3_enable_susphy(dwc, true);
++}
++
++static const struct xhci_plat_priv dwc3_xhci_plat_quirk = {
++ .plat_start = dwc3_xhci_plat_start,
++};
++
+ static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
+ int irq, char *name)
+ {
+@@ -61,7 +82,7 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
+
+ int dwc3_host_init(struct dwc3 *dwc)
+ {
+- struct property_entry props[4];
++ struct property_entry props[5];
+ struct platform_device *xhci;
+ int ret, irq;
+ int prop_idx = 0;
+@@ -89,6 +110,8 @@ int dwc3_host_init(struct dwc3 *dwc)
+
+ memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
+
++ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk");
++
+ if (dwc->usb3_lpm_capable)
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
+
+@@ -115,12 +138,25 @@ int dwc3_host_init(struct dwc3 *dwc)
+ }
+ }
+
++ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_quirk,
++ sizeof(struct xhci_plat_priv));
++ if (ret)
++ goto err;
++
+ ret = platform_device_add(xhci);
+ if (ret) {
+ dev_err(dwc->dev, "failed to register xHCI device\n");
+ goto err;
+ }
+
++ if (dwc->sys_wakeup) {
++ /* Restore wakeup setting if switched from device */
++ device_wakeup_enable(dwc->sysdev);
++
++ /* Pass on wakeup setting to the new xhci platform device */
++ device_init_wakeup(&xhci->dev, true);
++ }
++
+ return 0;
+ err:
+ platform_device_put(xhci);
+@@ -129,6 +165,10 @@ int dwc3_host_init(struct dwc3 *dwc)
+
+ void dwc3_host_exit(struct dwc3 *dwc)
+ {
++ if (dwc->sys_wakeup)
++ device_init_wakeup(&dwc->xhci->dev, false);
++
++ dwc3_enable_susphy(dwc, false);
+ platform_device_unregister(dwc->xhci);
+ dwc->xhci = NULL;
+ }
+diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
+index 958fc40eae86b7..0655afe7f9779f 100644
+--- a/drivers/usb/fotg210/fotg210-core.c
++++ b/drivers/usb/fotg210/fotg210-core.c
+@@ -95,6 +95,7 @@ static int fotg210_gemini_init(struct fotg210 *fotg, struct resource *res,
+
+ /**
+ * fotg210_vbus() - Called by gadget driver to enable/disable VBUS
++ * @fotg: pointer to a private fotg210 object
+ * @enable: true to enable VBUS, false to disable VBUS
+ */
+ void fotg210_vbus(struct fotg210 *fotg, bool enable)
+diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
+index 929106c16b29b5..7bf810a0c98a93 100644
+--- a/drivers/usb/fotg210/fotg210-hcd.c
++++ b/drivers/usb/fotg210/fotg210-hcd.c
+@@ -428,8 +428,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+ temp = size;
+ size -= temp;
+ next += temp;
+- if (temp == size)
+- goto done;
+ }
+
+ temp = snprintf(next, size, "\n");
+@@ -439,7 +437,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+ size -= temp;
+ next += temp;
+
+-done:
+ *sizep = size;
+ *nextp = next;
+ }
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 0ace45b66a31c4..0e151b54aae82a 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -2112,7 +2112,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ buf[5] = 0x01;
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+- if (w_index != 0x4 || (w_value >> 8))
++ if (w_index != 0x4 || (w_value & 0xff))
+ break;
+ buf[6] = w_index;
+ /* Number of ext compat interfaces */
+@@ -2128,9 +2128,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+- if (w_index != 0x5 || (w_value >> 8))
++ if (w_index != 0x5 || (w_value & 0xff))
+ break;
+- interface = w_value & 0xFF;
++ interface = w_value >> 8;
+ if (interface >= MAX_CONFIG_INTERFACES ||
+ !os_desc_cfg->interface[interface])
+ break;
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 4c639e9ddedc0a..60a1abfc565474 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -115,9 +115,12 @@ static int usb_string_copy(const char *s, char **s_copy)
+ int ret;
+ char *str;
+ char *copy = *s_copy;
++
+ ret = strlen(s);
+ if (ret > USB_MAX_STRING_LEN)
+ return -EOVERFLOW;
++ if (ret < 1)
++ return -EINVAL;
+
+ if (copy) {
+ str = copy;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 6e9ef35a43a7ba..fd0f4879f38e8b 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -821,6 +821,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ work);
+ int ret = io_data->status;
+ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
++ unsigned long flags;
+
+ if (io_data->read && ret > 0) {
+ kthread_use_mm(io_data->mm);
+@@ -833,6 +834,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
+ eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+
++ spin_lock_irqsave(&io_data->ffs->eps_lock, flags);
++ usb_ep_free_request(io_data->ep, io_data->req);
++ io_data->req = NULL;
++ spin_unlock_irqrestore(&io_data->ffs->eps_lock, flags);
++
+ if (io_data->read)
+ kfree(io_data->to_free);
+ ffs_free_buffer(io_data);
+@@ -846,7 +852,6 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
+ struct ffs_data *ffs = io_data->ffs;
+
+ io_data->status = req->status ? req->status : req->actual;
+- usb_ep_free_request(_ep, req);
+
+ INIT_WORK(&io_data->work, ffs_user_copy_worker);
+ queue_work(ffs->io_completion_wq, &io_data->work);
+@@ -3331,7 +3336,7 @@ static int ffs_func_setup(struct usb_function *f,
+ __ffs_event_add(ffs, FUNCTIONFS_SETUP);
+ spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+
+- return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
++ return ffs->ev.setup.wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
+ }
+
+ static bool ffs_func_req_match(struct usb_function *f,
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index ea85e2c701a15f..2db01e03bfbf0b 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -92,6 +92,7 @@ static void hidg_release(struct device *dev)
+ {
+ struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
+
++ kfree(hidg->report_desc);
+ kfree(hidg->set_report_buf);
+ kfree(hidg);
+ }
+@@ -1028,9 +1029,9 @@ static inline int hidg_get_minor(void)
+ {
+ int ret;
+
+- ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
++ ret = ida_alloc(&hidg_ida, GFP_KERNEL);
+ if (ret >= HIDG_MINORS) {
+- ida_simple_remove(&hidg_ida, ret);
++ ida_free(&hidg_ida, ret);
+ ret = -ENODEV;
+ }
+
+@@ -1175,7 +1176,7 @@ static const struct config_item_type hid_func_type = {
+
+ static inline void hidg_put_minor(int minor)
+ {
+- ida_simple_remove(&hidg_ida, minor);
++ ida_free(&hidg_ida, minor);
+ }
+
+ static void hidg_free_inst(struct usb_function_instance *f)
+@@ -1287,9 +1288,9 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ hidg->report_length = opts->report_length;
+ hidg->report_desc_length = opts->report_desc_length;
+ if (opts->report_desc) {
+- hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
+- opts->report_desc_length,
+- GFP_KERNEL);
++ hidg->report_desc = kmemdup(opts->report_desc,
++ opts->report_desc_length,
++ GFP_KERNEL);
+ if (!hidg->report_desc) {
+ ret = -ENOMEM;
+ goto err_put_device;
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 722a3ab2b33793..c265a1f62fc145 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -545,21 +545,37 @@ static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+
+ static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+ {
++ int rc;
++
+ if (!fsg_is_set(common))
+ return false;
+ bh->state = BUF_STATE_SENDING;
+- if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
++ rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq);
++ if (rc) {
+ bh->state = BUF_STATE_EMPTY;
++ if (rc == -ESHUTDOWN) {
++ common->running = 0;
++ return false;
++ }
++ }
+ return true;
+ }
+
+ static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+ {
++ int rc;
++
+ if (!fsg_is_set(common))
+ return false;
+ bh->state = BUF_STATE_RECEIVING;
+- if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
++ rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq);
++ if (rc) {
+ bh->state = BUF_STATE_FULL;
++ if (rc == -ESHUTDOWN) {
++ common->running = 0;
++ return false;
++ }
++ }
+ return true;
+ }
+
+diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
+index ec8cd7c7bbfc1e..6908fdd4a83f3a 100644
+--- a/drivers/usb/gadget/function/f_midi2.c
++++ b/drivers/usb/gadget/function/f_midi2.c
+@@ -150,6 +150,9 @@ struct f_midi2 {
+
+ #define func_to_midi2(f) container_of(f, struct f_midi2, func)
+
++/* convert from MIDI protocol number (1 or 2) to SNDRV_UMP_EP_INFO_PROTO_* */
++#define to_ump_protocol(v) (((v) & 3) << 8)
++
+ /* get EP name string */
+ static const char *ump_ep_name(const struct f_midi2_ep *ep)
+ {
+@@ -564,8 +567,7 @@ static void reply_ump_stream_ep_config(struct f_midi2_ep *ep)
+ .status = UMP_STREAM_MSG_STATUS_STREAM_CFG,
+ };
+
+- if ((ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK) ==
+- SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++ if (ep->info.protocol == 2)
+ rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI2 >> 8;
+ else
+ rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 >> 8;
+@@ -627,25 +629,34 @@ static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data)
+ return;
+ case UMP_STREAM_MSG_STATUS_STREAM_CFG_REQUEST:
+ if (*data & UMP_STREAM_MSG_EP_INFO_CAP_MIDI2) {
+- ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI2;
++ ep->info.protocol = 2;
+ DBG(midi2, "Switching Protocol to MIDI2\n");
+ } else {
+- ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI1;
++ ep->info.protocol = 1;
+ DBG(midi2, "Switching Protocol to MIDI1\n");
+ }
+- snd_ump_switch_protocol(ep->ump, ep->info.protocol);
++ snd_ump_switch_protocol(ep->ump, to_ump_protocol(ep->info.protocol));
+ reply_ump_stream_ep_config(ep);
+ return;
+ case UMP_STREAM_MSG_STATUS_FB_DISCOVERY:
+ if (format)
+ return; // invalid
+ blk = (*data >> 8) & 0xff;
+- if (blk >= ep->num_blks)
+- return;
+- if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO)
+- reply_ump_stream_fb_info(ep, blk);
+- if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME)
+- reply_ump_stream_fb_name(ep, blk);
++ if (blk == 0xff) {
++ /* inquiry for all blocks */
++ for (blk = 0; blk < ep->num_blks; blk++) {
++ if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO)
++ reply_ump_stream_fb_info(ep, blk);
++ if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME)
++ reply_ump_stream_fb_name(ep, blk);
++ }
++ } else if (blk < ep->num_blks) {
++ /* only the specified block */
++ if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO)
++ reply_ump_stream_fb_info(ep, blk);
++ if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME)
++ reply_ump_stream_fb_name(ep, blk);
++ }
+ return;
+ }
+ }
+@@ -1065,7 +1076,8 @@ static void f_midi2_midi1_ep_out_complete(struct usb_ep *usb_ep,
+ group = midi2->out_cable_mapping[cable].group;
+ bytes = midi1_packet_bytes[*buf & 0x0f];
+ for (c = 0; c < bytes; c++) {
+- snd_ump_convert_to_ump(cvt, group, ep->info.protocol,
++ snd_ump_convert_to_ump(cvt, group,
++ to_ump_protocol(ep->info.protocol),
+ buf[c + 1]);
+ if (cvt->ump_bytes) {
+ snd_ump_receive(ep->ump, cvt->ump,
+@@ -1375,7 +1387,7 @@ static void assign_block_descriptors(struct f_midi2 *midi2,
+ desc->nNumGroupTrm = b->num_groups;
+ desc->iBlockItem = ep->blks[blk].string_id;
+
+- if (ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++ if (ep->info.protocol == 2)
+ desc->bMIDIProtocol = USB_MS_MIDI_PROTO_2_0;
+ else
+ desc->bMIDIProtocol = USB_MS_MIDI_PROTO_1_0_128;
+@@ -1552,7 +1564,7 @@ static int f_midi2_create_card(struct f_midi2 *midi2)
+ if (midi2->info.static_block)
+ ump->info.flags |= SNDRV_UMP_EP_INFO_STATIC_BLOCKS;
+ ump->info.protocol_caps = (ep->info.protocol_caps & 3) << 8;
+- ump->info.protocol = (ep->info.protocol & 3) << 8;
++ ump->info.protocol = to_ump_protocol(ep->info.protocol);
+ ump->info.version = 0x0101;
+ ump->info.family_id = ep->info.family;
+ ump->info.model_id = ep->info.model;
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index e6ab8cc225ffdc..f5731d465cd7b3 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -869,7 +869,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ if (alt > 1)
+ goto fail;
+
+- if (ncm->port.in_ep->enabled) {
++ if (ncm->netdev) {
+ DBG(cdev, "reset ncm\n");
+ ncm->netdev = NULL;
+ gether_disconnect(&ncm->port);
+@@ -1325,7 +1325,15 @@ static int ncm_unwrap_ntb(struct gether *port,
+ "Parsed NTB with %d frames\n", dgram_counter);
+
+ to_process -= block_len;
+- if (to_process != 0) {
++
++ /*
++ * Windows NCM driver avoids USB ZLPs by adding a 1-byte
++ * zero pad as needed.
++ */
++ if (to_process == 1 &&
++ (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
++ to_process--;
++ } else if ((to_process > 0) && (block_len != 0)) {
+ ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
+ goto parse_ntb;
+ }
+@@ -1346,7 +1354,7 @@ static void ncm_disable(struct usb_function *f)
+
+ DBG(cdev, "ncm deactivated\n");
+
+- if (ncm->port.in_ep->enabled) {
++ if (ncm->netdev) {
+ ncm->netdev = NULL;
+ gether_disconnect(&ncm->port);
+ }
+@@ -1410,7 +1418,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ncm *ncm = func_to_ncm(f);
+ struct usb_string *us;
+- int status;
++ int status = 0;
+ struct usb_ep *ep;
+ struct f_ncm_opts *ncm_opts;
+
+@@ -1428,22 +1436,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ }
+
+- /*
+- * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+- * configurations are bound in sequence with list_for_each_entry,
+- * in each configuration its functions are bound in sequence
+- * with list_for_each_entry, so we assume no race condition
+- * with regard to ncm_opts->bound access
+- */
+- if (!ncm_opts->bound) {
+- mutex_lock(&ncm_opts->lock);
+- gether_set_gadget(ncm_opts->net, cdev->gadget);
++ mutex_lock(&ncm_opts->lock);
++ gether_set_gadget(ncm_opts->net, cdev->gadget);
++ if (!ncm_opts->bound)
+ status = gether_register_netdev(ncm_opts->net);
+- mutex_unlock(&ncm_opts->lock);
+- if (status)
+- goto fail;
+- ncm_opts->bound = true;
+- }
++ mutex_unlock(&ncm_opts->lock);
++
++ if (status)
++ goto fail;
++
++ ncm_opts->bound = true;
++
+ us = usb_gstrings_attach(cdev, ncm_strings,
+ ARRAY_SIZE(ncm_string_defs));
+ if (IS_ERR(us)) {
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index 076dd4c1be96c0..44e20c6c36d32b 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -213,6 +213,7 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *ss)
+ {
+ switch (gadget->speed) {
++ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER:
+ return ss;
+ case USB_SPEED_HIGH:
+@@ -449,11 +450,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ mutex_lock(&dev->lock_printer_io);
+ spin_lock_irqsave(&dev->lock, flags);
+
+- if (dev->interface < 0) {
+- spin_unlock_irqrestore(&dev->lock, flags);
+- mutex_unlock(&dev->lock_printer_io);
+- return -ENODEV;
+- }
++ if (dev->interface < 0)
++ goto out_disabled;
+
+ /* We will use this flag later to check if a printer reset happened
+ * after we turn interrupts back on.
+@@ -461,6 +459,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ dev->reset_printer = 0;
+
+ setup_rx_reqs(dev);
++ /* this dropped the lock - need to retest */
++ if (dev->interface < 0)
++ goto out_disabled;
+
+ bytes_copied = 0;
+ current_rx_req = dev->current_rx_req;
+@@ -494,6 +495,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ wait_event_interruptible(dev->rx_wait,
+ (likely(!list_empty(&dev->rx_buffers))));
+ spin_lock_irqsave(&dev->lock, flags);
++ if (dev->interface < 0)
++ goto out_disabled;
+ }
+
+ /* We have data to return then copy it to the caller's buffer.*/
+@@ -537,6 +540,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ return -EAGAIN;
+ }
+
++ if (dev->interface < 0)
++ goto out_disabled;
++
+ /* If we not returning all the data left in this RX request
+ * buffer then adjust the amount of data left in the buffer.
+ * Othewise if we are done with this RX request buffer then
+@@ -566,6 +572,11 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ return bytes_copied;
+ else
+ return -EAGAIN;
++
++out_disabled:
++ spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock_printer_io);
++ return -ENODEV;
+ }
+
+ static ssize_t
+@@ -586,11 +597,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ mutex_lock(&dev->lock_printer_io);
+ spin_lock_irqsave(&dev->lock, flags);
+
+- if (dev->interface < 0) {
+- spin_unlock_irqrestore(&dev->lock, flags);
+- mutex_unlock(&dev->lock_printer_io);
+- return -ENODEV;
+- }
++ if (dev->interface < 0)
++ goto out_disabled;
+
+ /* Check if a printer reset happens while we have interrupts on */
+ dev->reset_printer = 0;
+@@ -613,6 +621,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ wait_event_interruptible(dev->tx_wait,
+ (likely(!list_empty(&dev->tx_reqs))));
+ spin_lock_irqsave(&dev->lock, flags);
++ if (dev->interface < 0)
++ goto out_disabled;
+ }
+
+ while (likely(!list_empty(&dev->tx_reqs)) && len) {
+@@ -662,6 +672,9 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ return -EAGAIN;
+ }
+
++ if (dev->interface < 0)
++ goto out_disabled;
++
+ list_add(&req->list, &dev->tx_reqs_active);
+
+ /* here, we unlock, and only unlock, to avoid deadlock. */
+@@ -674,6 +687,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ mutex_unlock(&dev->lock_printer_io);
+ return -EAGAIN;
+ }
++ if (dev->interface < 0)
++ goto out_disabled;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+@@ -685,6 +700,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ return bytes_copied;
+ else
+ return -EAGAIN;
++
++out_disabled:
++ spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock_printer_io);
++ return -ENODEV;
+ }
+
+ static int
+@@ -1312,9 +1332,9 @@ static inline int gprinter_get_minor(void)
+ {
+ int ret;
+
+- ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
++ ret = ida_alloc(&printer_ida, GFP_KERNEL);
+ if (ret >= PRINTER_MINORS) {
+- ida_simple_remove(&printer_ida, ret);
++ ida_free(&printer_ida, ret);
+ ret = -ENODEV;
+ }
+
+@@ -1323,7 +1343,7 @@ static inline int gprinter_get_minor(void)
+
+ static inline void gprinter_put_minor(int minor)
+ {
+- ida_simple_remove(&printer_ida, minor);
++ ida_free(&printer_ida, minor);
+ }
+
+ static int gprinter_setup(int);
+diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
+index faa398109431fc..2e6bafb2a55492 100644
+--- a/drivers/usb/gadget/function/f_uvc.c
++++ b/drivers/usb/gadget/function/f_uvc.c
+@@ -719,13 +719,29 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+ }
+ uvc->enable_interrupt_ep = opts->enable_interrupt_ep;
+
+- ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
++ /*
++ * gadget_is_{super|dual}speed() API check UDC controller capitblity. It should pass down
++ * highest speed endpoint descriptor to UDC controller. So UDC controller driver can reserve
++ * enough resource at check_config(), especially mult and maxburst. So UDC driver (such as
++ * cdns3) can know need at least (mult + 1) * (maxburst + 1) * wMaxPacketSize internal
++ * memory for this uvc functions. This is the only straightforward method to resolve the UDC
++ * resource allocation issue in the current gadget framework.
++ */
++ if (gadget_is_superspeed(c->cdev->gadget))
++ ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep,
++ &uvc_ss_streaming_comp);
++ else if (gadget_is_dualspeed(cdev->gadget))
++ ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep);
++ else
++ ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
++
+ if (!ep) {
+ uvcg_info(f, "Unable to allocate streaming EP\n");
+ goto error;
+ }
+ uvc->video.ep = ep;
+
++ uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
+ uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
+ uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
+
+@@ -950,7 +966,8 @@ static void uvc_free(struct usb_function *f)
+ struct uvc_device *uvc = to_uvc(f);
+ struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts,
+ func_inst);
+- config_item_put(&uvc->header->item);
++ if (!opts->header)
++ config_item_put(&uvc->header->item);
+ --opts->refcnt;
+ kfree(uvc);
+ }
+@@ -1042,25 +1059,29 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
+ uvc->desc.hs_streaming = opts->hs_streaming;
+ uvc->desc.ss_streaming = opts->ss_streaming;
+
+- streaming = config_group_find_item(&opts->func_inst.group, "streaming");
+- if (!streaming)
+- goto err_config;
+-
+- header = config_group_find_item(to_config_group(streaming), "header");
+- config_item_put(streaming);
+- if (!header)
+- goto err_config;
+-
+- h = config_group_find_item(to_config_group(header), "h");
+- config_item_put(header);
+- if (!h)
+- goto err_config;
+-
+- uvc->header = to_uvcg_streaming_header(h);
+- if (!uvc->header->linked) {
+- mutex_unlock(&opts->lock);
+- kfree(uvc);
+- return ERR_PTR(-EBUSY);
++ if (opts->header) {
++ uvc->header = opts->header;
++ } else {
++ streaming = config_group_find_item(&opts->func_inst.group, "streaming");
++ if (!streaming)
++ goto err_config;
++
++ header = config_group_find_item(to_config_group(streaming), "header");
++ config_item_put(streaming);
++ if (!header)
++ goto err_config;
++
++ h = config_group_find_item(to_config_group(header), "h");
++ config_item_put(header);
++ if (!h)
++ goto err_config;
++
++ uvc->header = to_uvcg_streaming_header(h);
++ if (!uvc->header->linked) {
++ mutex_unlock(&opts->lock);
++ kfree(uvc);
++ return ERR_PTR(-EBUSY);
++ }
+ }
+
+ uvc->desc.extension_units = &opts->extension_units;
+diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
+index 29bf8664bf582d..12c5d9cf450c10 100644
+--- a/drivers/usb/gadget/function/rndis.c
++++ b/drivers/usb/gadget/function/rndis.c
+@@ -869,12 +869,12 @@ EXPORT_SYMBOL_GPL(rndis_msg_parser);
+
+ static inline int rndis_get_nr(void)
+ {
+- return ida_simple_get(&rndis_ida, 0, 1000, GFP_KERNEL);
++ return ida_alloc_max(&rndis_ida, 999, GFP_KERNEL);
+ }
+
+ static inline void rndis_put_nr(int nr)
+ {
+- ida_simple_remove(&rndis_ida, nr);
++ ida_free(&rndis_ida, nr);
+ }
+
+ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index 4a42574b4a7feb..0be0966973c7fd 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -57,13 +57,13 @@ struct uac_rtd_params {
+
+ /* Volume/Mute controls and their state */
+ int fu_id; /* Feature Unit ID */
+- struct snd_kcontrol *snd_kctl_volume;
+- struct snd_kcontrol *snd_kctl_mute;
++ struct snd_ctl_elem_id snd_kctl_volume_id;
++ struct snd_ctl_elem_id snd_kctl_mute_id;
+ s16 volume_min, volume_max, volume_res;
+ s16 volume;
+ int mute;
+
+- struct snd_kcontrol *snd_kctl_rate; /* read-only current rate */
++ struct snd_ctl_elem_id snd_kctl_rate_id; /* read-only current rate */
+ int srate; /* selected samplerate */
+ int active; /* playback/capture running */
+
+@@ -494,14 +494,13 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
+ static void set_active(struct uac_rtd_params *prm, bool active)
+ {
+ // notifying through the Rate ctrl
+- struct snd_kcontrol *kctl = prm->snd_kctl_rate;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prm->lock, flags);
+ if (prm->active != active) {
+ prm->active = active;
+ snd_ctl_notify(prm->uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &kctl->id);
++ &prm->snd_kctl_rate_id);
+ }
+ spin_unlock_irqrestore(&prm->lock, flags);
+ }
+@@ -593,16 +592,25 @@ int u_audio_start_capture(struct g_audio *audio_dev)
+ struct usb_ep *ep, *ep_fback;
+ struct uac_rtd_params *prm;
+ struct uac_params *params = &audio_dev->params;
+- int req_len, i;
++ int req_len, i, ret;
+
+ prm = &uac->c_prm;
+ dev_dbg(dev, "start capture with rate %d\n", prm->srate);
+ ep = audio_dev->out_ep;
+- config_ep_by_speed(gadget, &audio_dev->func, ep);
++ ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
++ if (ret < 0) {
++ dev_err(dev, "config_ep_by_speed for out_ep failed (%d)\n", ret);
++ return ret;
++ }
++
+ req_len = ep->maxpacket;
+
+ prm->ep_enabled = true;
+- usb_ep_enable(ep);
++ ret = usb_ep_enable(ep);
++ if (ret < 0) {
++ dev_err(dev, "usb_ep_enable failed for out_ep (%d)\n", ret);
++ return ret;
++ }
+
+ for (i = 0; i < params->req_number; i++) {
+ if (!prm->reqs[i]) {
+@@ -630,9 +638,18 @@ int u_audio_start_capture(struct g_audio *audio_dev)
+ return 0;
+
+ /* Setup feedback endpoint */
+- config_ep_by_speed(gadget, &audio_dev->func, ep_fback);
++ ret = config_ep_by_speed(gadget, &audio_dev->func, ep_fback);
++ if (ret < 0) {
++ dev_err(dev, "config_ep_by_speed in_ep_fback failed (%d)\n", ret);
++ return ret; // TODO: Clean up out_ep
++ }
++
+ prm->fb_ep_enabled = true;
+- usb_ep_enable(ep_fback);
++ ret = usb_ep_enable(ep_fback);
++ if (ret < 0) {
++ dev_err(dev, "usb_ep_enable failed for in_ep_fback (%d)\n", ret);
++ return ret; // TODO: Clean up out_ep
++ }
+ req_len = ep_fback->maxpacket;
+
+ req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC);
+@@ -688,13 +705,17 @@ int u_audio_start_playback(struct g_audio *audio_dev)
+ struct uac_params *params = &audio_dev->params;
+ unsigned int factor;
+ const struct usb_endpoint_descriptor *ep_desc;
+- int req_len, i;
++ int req_len, i, ret;
+ unsigned int p_pktsize;
+
+ prm = &uac->p_prm;
+ dev_dbg(dev, "start playback with rate %d\n", prm->srate);
+ ep = audio_dev->in_ep;
+- config_ep_by_speed(gadget, &audio_dev->func, ep);
++ ret = config_ep_by_speed(gadget, &audio_dev->func, ep);
++ if (ret < 0) {
++ dev_err(dev, "config_ep_by_speed for in_ep failed (%d)\n", ret);
++ return ret;
++ }
+
+ ep_desc = ep->desc;
+ /*
+@@ -721,7 +742,11 @@ int u_audio_start_playback(struct g_audio *audio_dev)
+ uac->p_residue_mil = 0;
+
+ prm->ep_enabled = true;
+- usb_ep_enable(ep);
++ ret = usb_ep_enable(ep);
++ if (ret < 0) {
++ dev_err(dev, "usb_ep_enable failed for in_ep (%d)\n", ret);
++ return ret;
++ }
+
+ for (i = 0; i < params->req_number; i++) {
+ if (!prm->reqs[i]) {
+@@ -807,7 +832,7 @@ int u_audio_set_volume(struct g_audio *audio_dev, int playback, s16 val)
+
+ if (change)
+ snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &prm->snd_kctl_volume->id);
++ &prm->snd_kctl_volume_id);
+
+ return 0;
+ }
+@@ -856,7 +881,7 @@ int u_audio_set_mute(struct g_audio *audio_dev, int playback, int val)
+
+ if (change)
+ snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &prm->snd_kctl_mute->id);
++ &prm->snd_kctl_mute_id);
+
+ return 0;
+ }
+@@ -1331,7 +1356,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ goto snd_fail;
+- prm->snd_kctl_mute = kctl;
++ prm->snd_kctl_mute_id = kctl->id;
+ prm->mute = 0;
+ }
+
+@@ -1359,7 +1384,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ goto snd_fail;
+- prm->snd_kctl_volume = kctl;
++ prm->snd_kctl_volume_id = kctl->id;
+ prm->volume = fu->volume_max;
+ prm->volume_max = fu->volume_max;
+ prm->volume_min = fu->volume_min;
+@@ -1383,7 +1408,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ goto snd_fail;
+- prm->snd_kctl_rate = kctl;
++ prm->snd_kctl_rate_id = kctl->id;
+ }
+
+ strscpy(card->driver, card_name, sizeof(card->driver));
+@@ -1420,6 +1445,8 @@ void g_audio_cleanup(struct g_audio *g_audio)
+ return;
+
+ uac = g_audio->uac;
++ g_audio->uac = NULL;
++
+ card = uac->card;
+ if (card)
+ snd_card_free_when_closed(card);
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index a92eb6d9097685..8962f96ae7294a 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -1441,6 +1441,7 @@ void gserial_suspend(struct gserial *gser)
+ spin_lock(&port->port_lock);
+ spin_unlock(&serial_port_lock);
+ port->suspended = true;
++ port->start_delayed = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(gserial_suspend);
+diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
+index 1ce58f61253c9a..3ac392cbb77949 100644
+--- a/drivers/usb/gadget/function/u_uvc.h
++++ b/drivers/usb/gadget/function/u_uvc.h
+@@ -98,6 +98,12 @@ struct f_uvc_opts {
+ */
+ struct mutex lock;
+ int refcnt;
++
++ /*
++ * Only for legacy gadget. Shall be NULL for configfs-composed gadgets,
++ * which is guaranteed by alloc_inst implementation of f_uvc doing kzalloc.
++ */
++ struct uvcg_streaming_header *header;
+ };
+
+ #endif /* U_UVC_H */
+diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
+index 9bf0e985acfab5..4acf336e946d64 100644
+--- a/drivers/usb/gadget/function/uvc_configfs.c
++++ b/drivers/usb/gadget/function/uvc_configfs.c
+@@ -13,6 +13,7 @@
+ #include "uvc_configfs.h"
+
+ #include <linux/sort.h>
++#include <linux/usb/uvc.h>
+ #include <linux/usb/video.h>
+
+ /* -----------------------------------------------------------------------------
+@@ -92,10 +93,10 @@ static int __uvcg_iter_item_entries(const char *page, size_t len,
+
+ while (pg - page < len) {
+ i = 0;
+- while (i < sizeof(buf) && (pg - page < len) &&
++ while (i < bufsize && (pg - page < len) &&
+ *pg != '\0' && *pg != '\n')
+ buf[i++] = *pg++;
+- if (i == sizeof(buf)) {
++ if (i == bufsize) {
+ ret = -EINVAL;
+ goto out_free_buf;
+ }
+@@ -2260,6 +2261,8 @@ static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item,
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
++ const struct uvc_format_desc *format;
++ u8 tmpguidFormat[sizeof(ch->desc.guidFormat)];
+ int ret;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+@@ -2273,7 +2276,16 @@ static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item,
+ goto end;
+ }
+
+- memcpy(ch->desc.guidFormat, page,
++ memcpy(tmpguidFormat, page,
++ min(sizeof(tmpguidFormat), len));
++
++ format = uvc_format_by_guid(tmpguidFormat);
++ if (!format) {
++ ret = -EINVAL;
++ goto end;
++ }
++
++ memcpy(ch->desc.guidFormat, tmpguidFormat,
+ min(sizeof(ch->desc.guidFormat), len));
+ ret = sizeof(ch->desc.guidFormat);
+
+diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
+index 91af3b1ef0d412..281e75027b3442 100644
+--- a/drivers/usb/gadget/function/uvc_video.c
++++ b/drivers/usb/gadget/function/uvc_video.c
+@@ -35,6 +35,9 @@ uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
+
+ data[1] = UVC_STREAM_EOH | video->fid;
+
++ if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
++ data[1] |= UVC_STREAM_ERR;
++
+ if (video->queue.buf_used == 0 && ts.tv_sec) {
+ /* dwClockFrequency is 48 MHz */
+ u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
+index e549022642e569..ea106ad665a1fa 100644
+--- a/drivers/usb/gadget/legacy/raw_gadget.c
++++ b/drivers/usb/gadget/legacy/raw_gadget.c
+@@ -663,12 +663,12 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ if (WARN_ON(in && dev->ep0_out_pending)) {
+ ret = -ENODEV;
+ dev->state = STATE_DEV_FAILED;
+- goto out_done;
++ goto out_unlock;
+ }
+ if (WARN_ON(!in && dev->ep0_in_pending)) {
+ ret = -ENODEV;
+ dev->state = STATE_DEV_FAILED;
+- goto out_done;
++ goto out_unlock;
+ }
+
+ dev->req->buf = data;
+@@ -683,7 +683,7 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ "fail, usb_ep_queue returned %d\n", ret);
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_FAILED;
+- goto out_done;
++ goto out_queue_failed;
+ }
+
+ ret = wait_for_completion_interruptible(&dev->ep0_done);
+@@ -692,13 +692,16 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ usb_ep_dequeue(dev->gadget->ep0, dev->req);
+ wait_for_completion(&dev->ep0_done);
+ spin_lock_irqsave(&dev->lock, flags);
+- goto out_done;
++ if (dev->ep0_status == -ECONNRESET)
++ dev->ep0_status = -EINTR;
++ goto out_interrupted;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+- ret = dev->ep0_status;
+
+-out_done:
++out_interrupted:
++ ret = dev->ep0_status;
++out_queue_failed:
+ dev->ep0_urb_queued = false;
+ out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+@@ -1067,7 +1070,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ "fail, usb_ep_queue returned %d\n", ret);
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->state = STATE_DEV_FAILED;
+- goto out_done;
++ goto out_queue_failed;
+ }
+
+ ret = wait_for_completion_interruptible(&done);
+@@ -1076,13 +1079,16 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
+ usb_ep_dequeue(ep->ep, ep->req);
+ wait_for_completion(&done);
+ spin_lock_irqsave(&dev->lock, flags);
+- goto out_done;
++ if (ep->status == -ECONNRESET)
++ ep->status = -EINTR;
++ goto out_interrupted;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+- ret = ep->status;
+
+-out_done:
++out_interrupted:
++ ret = ep->status;
++out_queue_failed:
+ ep->urb_queued = false;
+ out_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
+index c06dd1af7a0c50..c395438d39780e 100644
+--- a/drivers/usb/gadget/legacy/webcam.c
++++ b/drivers/usb/gadget/legacy/webcam.c
+@@ -12,6 +12,7 @@
+ #include <linux/usb/video.h>
+
+ #include "u_uvc.h"
++#include "uvc_configfs.h"
+
+ USB_GADGET_COMPOSITE_OPTIONS();
+
+@@ -84,8 +85,6 @@ static struct usb_device_descriptor webcam_device_descriptor = {
+ .bNumConfigurations = 0, /* dynamic */
+ };
+
+-DECLARE_UVC_HEADER_DESCRIPTOR(1);
+-
+ static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
+ .bLength = UVC_DT_HEADER_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+@@ -158,43 +157,112 @@ static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
+ .bmaControls[1][0] = 4,
+ };
+
+-static const struct uvc_format_uncompressed uvc_format_yuv = {
+- .bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
+- .bDescriptorType = USB_DT_CS_INTERFACE,
+- .bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED,
+- .bFormatIndex = 1,
+- .bNumFrameDescriptors = 2,
+- .guidFormat =
+- { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
+- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
+- .bBitsPerPixel = 16,
+- .bDefaultFrameIndex = 1,
+- .bAspectRatioX = 0,
+- .bAspectRatioY = 0,
+- .bmInterlaceFlags = 0,
+- .bCopyProtect = 0,
++static const struct uvcg_color_matching uvcg_color_matching = {
++ .desc = {
++ .bLength = UVC_DT_COLOR_MATCHING_SIZE,
++ .bDescriptorType = USB_DT_CS_INTERFACE,
++ .bDescriptorSubType = UVC_VS_COLORFORMAT,
++ .bColorPrimaries = 1,
++ .bTransferCharacteristics = 1,
++ .bMatrixCoefficients = 4,
++ },
++};
++
++static struct uvcg_uncompressed uvcg_format_yuv = {
++ .fmt = {
++ .type = UVCG_UNCOMPRESSED,
++ /* add to .frames and fill .num_frames at runtime */
++ .color_matching = (struct uvcg_color_matching *)&uvcg_color_matching,
++ },
++ .desc = {
++ .bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
++ .bDescriptorType = USB_DT_CS_INTERFACE,
++ .bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED,
++ .bFormatIndex = 1,
++ .bNumFrameDescriptors = 2,
++ .guidFormat = {
++ 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
++ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71
++ },
++ .bBitsPerPixel = 16,
++ .bDefaultFrameIndex = 1,
++ .bAspectRatioX = 0,
++ .bAspectRatioY = 0,
++ .bmInterlaceFlags = 0,
++ .bCopyProtect = 0,
++ },
++};
++
++static struct uvcg_format_ptr uvcg_format_ptr_yuv = {
++ .fmt = &uvcg_format_yuv.fmt,
+ };
+
+ DECLARE_UVC_FRAME_UNCOMPRESSED(1);
+ DECLARE_UVC_FRAME_UNCOMPRESSED(3);
+
++#define UVCG_WIDTH_360P 640
++#define UVCG_HEIGHT_360P 360
++#define UVCG_MIN_BITRATE_360P 18432000
++#define UVCG_MAX_BITRATE_360P 55296000
++#define UVCG_MAX_VIDEO_FB_SZ_360P 460800
++#define UVCG_FRM_INTERV_0_360P 666666
++#define UVCG_FRM_INTERV_1_360P 1000000
++#define UVCG_FRM_INTERV_2_360P 5000000
++#define UVCG_DEFAULT_FRM_INTERV_360P UVCG_FRM_INTERV_0_360P
++
+ static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
+ .bFrameIndex = 1,
+ .bmCapabilities = 0,
+- .wWidth = cpu_to_le16(640),
+- .wHeight = cpu_to_le16(360),
+- .dwMinBitRate = cpu_to_le32(18432000),
+- .dwMaxBitRate = cpu_to_le32(55296000),
+- .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
+- .dwDefaultFrameInterval = cpu_to_le32(666666),
++ .wWidth = cpu_to_le16(UVCG_WIDTH_360P),
++ .wHeight = cpu_to_le16(UVCG_HEIGHT_360P),
++ .dwMinBitRate = cpu_to_le32(UVCG_MIN_BITRATE_360P),
++ .dwMaxBitRate = cpu_to_le32(UVCG_MAX_BITRATE_360P),
++ .dwMaxVideoFrameBufferSize = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_360P),
++ .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_360P),
+ .bFrameIntervalType = 3,
+- .dwFrameInterval[0] = cpu_to_le32(666666),
+- .dwFrameInterval[1] = cpu_to_le32(1000000),
+- .dwFrameInterval[2] = cpu_to_le32(5000000),
++ .dwFrameInterval[0] = cpu_to_le32(UVCG_FRM_INTERV_0_360P),
++ .dwFrameInterval[1] = cpu_to_le32(UVCG_FRM_INTERV_1_360P),
++ .dwFrameInterval[2] = cpu_to_le32(UVCG_FRM_INTERV_2_360P),
++};
++
++static u32 uvcg_frame_yuv_360p_dw_frame_interval[] = {
++ [0] = UVCG_FRM_INTERV_0_360P,
++ [1] = UVCG_FRM_INTERV_1_360P,
++ [2] = UVCG_FRM_INTERV_2_360P,
++};
++
++static const struct uvcg_frame uvcg_frame_yuv_360p = {
++ .fmt_type = UVCG_UNCOMPRESSED,
++ .frame = {
++ .b_length = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
++ .b_descriptor_type = USB_DT_CS_INTERFACE,
++ .b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED,
++ .b_frame_index = 1,
++ .bm_capabilities = 0,
++ .w_width = UVCG_WIDTH_360P,
++ .w_height = UVCG_HEIGHT_360P,
++ .dw_min_bit_rate = UVCG_MIN_BITRATE_360P,
++ .dw_max_bit_rate = UVCG_MAX_BITRATE_360P,
++ .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_360P,
++ .dw_default_frame_interval = UVCG_DEFAULT_FRM_INTERV_360P,
++ .b_frame_interval_type = 3,
++ },
++ .dw_frame_interval = uvcg_frame_yuv_360p_dw_frame_interval,
++};
++
++static struct uvcg_frame_ptr uvcg_frame_ptr_yuv_360p = {
++ .frm = (struct uvcg_frame *)&uvcg_frame_yuv_360p,
+ };
++#define UVCG_WIDTH_720P 1280
++#define UVCG_HEIGHT_720P 720
++#define UVCG_MIN_BITRATE_720P 29491200
++#define UVCG_MAX_BITRATE_720P 29491200
++#define UVCG_MAX_VIDEO_FB_SZ_720P 1843200
++#define UVCG_FRM_INTERV_0_720P 5000000
++#define UVCG_DEFAULT_FRM_INTERV_720P UVCG_FRM_INTERV_0_720P
+
+ static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
+@@ -202,28 +270,66 @@ static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
+ .bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
+ .bFrameIndex = 2,
+ .bmCapabilities = 0,
+- .wWidth = cpu_to_le16(1280),
+- .wHeight = cpu_to_le16(720),
+- .dwMinBitRate = cpu_to_le32(29491200),
+- .dwMaxBitRate = cpu_to_le32(29491200),
+- .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
+- .dwDefaultFrameInterval = cpu_to_le32(5000000),
++ .wWidth = cpu_to_le16(UVCG_WIDTH_720P),
++ .wHeight = cpu_to_le16(UVCG_HEIGHT_720P),
++ .dwMinBitRate = cpu_to_le32(UVCG_MIN_BITRATE_720P),
++ .dwMaxBitRate = cpu_to_le32(UVCG_MAX_BITRATE_720P),
++ .dwMaxVideoFrameBufferSize = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_720P),
++ .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_720P),
+ .bFrameIntervalType = 1,
+- .dwFrameInterval[0] = cpu_to_le32(5000000),
++ .dwFrameInterval[0] = cpu_to_le32(UVCG_FRM_INTERV_0_720P),
+ };
+
+-static const struct uvc_format_mjpeg uvc_format_mjpg = {
+- .bLength = UVC_DT_FORMAT_MJPEG_SIZE,
+- .bDescriptorType = USB_DT_CS_INTERFACE,
+- .bDescriptorSubType = UVC_VS_FORMAT_MJPEG,
+- .bFormatIndex = 2,
+- .bNumFrameDescriptors = 2,
+- .bmFlags = 0,
+- .bDefaultFrameIndex = 1,
+- .bAspectRatioX = 0,
+- .bAspectRatioY = 0,
+- .bmInterlaceFlags = 0,
+- .bCopyProtect = 0,
++static u32 uvcg_frame_yuv_720p_dw_frame_interval[] = {
++ [0] = UVCG_FRM_INTERV_0_720P,
++};
++
++static const struct uvcg_frame uvcg_frame_yuv_720p = {
++ .fmt_type = UVCG_UNCOMPRESSED,
++ .frame = {
++ .b_length = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
++ .b_descriptor_type = USB_DT_CS_INTERFACE,
++ .b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED,
++ .b_frame_index = 2,
++ .bm_capabilities = 0,
++ .w_width = UVCG_WIDTH_720P,
++ .w_height = UVCG_HEIGHT_720P,
++ .dw_min_bit_rate = UVCG_MIN_BITRATE_720P,
++ .dw_max_bit_rate = UVCG_MAX_BITRATE_720P,
++ .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_720P,
++ .dw_default_frame_interval = UVCG_DEFAULT_FRM_INTERV_720P,
++ .b_frame_interval_type = 1,
++ },
++ .dw_frame_interval = uvcg_frame_yuv_720p_dw_frame_interval,
++};
++
++static struct uvcg_frame_ptr uvcg_frame_ptr_yuv_720p = {
++ .frm = (struct uvcg_frame *)&uvcg_frame_yuv_720p,
++};
++
++static struct uvcg_mjpeg uvcg_format_mjpeg = {
++ .fmt = {
++ .type = UVCG_MJPEG,
++ /* add to .frames and fill .num_frames at runtime */
++ .color_matching = (struct uvcg_color_matching *)&uvcg_color_matching,
++ },
++ .desc = {
++ .bLength = UVC_DT_FORMAT_MJPEG_SIZE,
++ .bDescriptorType = USB_DT_CS_INTERFACE,
++ .bDescriptorSubType = UVC_VS_FORMAT_MJPEG,
++ .bFormatIndex = 2,
++ .bNumFrameDescriptors = 2,
++ .bmFlags = 0,
++ .bDefaultFrameIndex = 1,
++ .bAspectRatioX = 0,
++ .bAspectRatioY = 0,
++ .bmInterlaceFlags = 0,
++ .bCopyProtect = 0,
++ },
++};
++
++static struct uvcg_format_ptr uvcg_format_ptr_mjpeg = {
++ .fmt = &uvcg_format_mjpeg.fmt,
+ };
+
+ DECLARE_UVC_FRAME_MJPEG(1);
+@@ -235,16 +341,45 @@ static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
+ .bDescriptorSubType = UVC_VS_FRAME_MJPEG,
+ .bFrameIndex = 1,
+ .bmCapabilities = 0,
+- .wWidth = cpu_to_le16(640),
+- .wHeight = cpu_to_le16(360),
+- .dwMinBitRate = cpu_to_le32(18432000),
+- .dwMaxBitRate = cpu_to_le32(55296000),
+- .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
+- .dwDefaultFrameInterval = cpu_to_le32(666666),
++ .wWidth = cpu_to_le16(UVCG_WIDTH_360P),
++ .wHeight = cpu_to_le16(UVCG_HEIGHT_360P),
++ .dwMinBitRate = cpu_to_le32(UVCG_MIN_BITRATE_360P),
++ .dwMaxBitRate = cpu_to_le32(UVCG_MAX_BITRATE_360P),
++ .dwMaxVideoFrameBufferSize = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_360P),
++ .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_360P),
+ .bFrameIntervalType = 3,
+- .dwFrameInterval[0] = cpu_to_le32(666666),
+- .dwFrameInterval[1] = cpu_to_le32(1000000),
+- .dwFrameInterval[2] = cpu_to_le32(5000000),
++ .dwFrameInterval[0] = cpu_to_le32(UVCG_FRM_INTERV_0_360P),
++ .dwFrameInterval[1] = cpu_to_le32(UVCG_FRM_INTERV_1_360P),
++ .dwFrameInterval[2] = cpu_to_le32(UVCG_FRM_INTERV_2_360P),
++};
++
++static u32 uvcg_frame_mjpeg_360p_dw_frame_interval[] = {
++ [0] = UVCG_FRM_INTERV_0_360P,
++ [1] = UVCG_FRM_INTERV_1_360P,
++ [2] = UVCG_FRM_INTERV_2_360P,
++};
++
++static const struct uvcg_frame uvcg_frame_mjpeg_360p = {
++ .fmt_type = UVCG_MJPEG,
++ .frame = {
++ .b_length = UVC_DT_FRAME_MJPEG_SIZE(3),
++ .b_descriptor_type = USB_DT_CS_INTERFACE,
++ .b_descriptor_subtype = UVC_VS_FRAME_MJPEG,
++ .b_frame_index = 1,
++ .bm_capabilities = 0,
++ .w_width = UVCG_WIDTH_360P,
++ .w_height = UVCG_HEIGHT_360P,
++ .dw_min_bit_rate = UVCG_MIN_BITRATE_360P,
++ .dw_max_bit_rate = UVCG_MAX_BITRATE_360P,
++ .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_360P,
++ .dw_default_frame_interval = UVCG_DEFAULT_FRM_INTERV_360P,
++ .b_frame_interval_type = 3,
++ },
++ .dw_frame_interval = uvcg_frame_mjpeg_360p_dw_frame_interval,
++};
++
++static struct uvcg_frame_ptr uvcg_frame_ptr_mjpeg_360p = {
++ .frm = (struct uvcg_frame *)&uvcg_frame_mjpeg_360p,
+ };
+
+ static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
+@@ -253,23 +388,44 @@ static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
+ .bDescriptorSubType = UVC_VS_FRAME_MJPEG,
+ .bFrameIndex = 2,
+ .bmCapabilities = 0,
+- .wWidth = cpu_to_le16(1280),
+- .wHeight = cpu_to_le16(720),
+- .dwMinBitRate = cpu_to_le32(29491200),
+- .dwMaxBitRate = cpu_to_le32(29491200),
+- .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
+- .dwDefaultFrameInterval = cpu_to_le32(5000000),
++ .wWidth = cpu_to_le16(UVCG_WIDTH_720P),
++ .wHeight = cpu_to_le16(UVCG_HEIGHT_720P),
++ .dwMinBitRate = cpu_to_le32(UVCG_MIN_BITRATE_720P),
++ .dwMaxBitRate = cpu_to_le32(UVCG_MAX_BITRATE_720P),
++ .dwMaxVideoFrameBufferSize = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_720P),
++ .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_720P),
+ .bFrameIntervalType = 1,
+- .dwFrameInterval[0] = cpu_to_le32(5000000),
++ .dwFrameInterval[0] = cpu_to_le32(UVCG_FRM_INTERV_0_720P),
+ };
+
+-static const struct uvc_color_matching_descriptor uvc_color_matching = {
+- .bLength = UVC_DT_COLOR_MATCHING_SIZE,
+- .bDescriptorType = USB_DT_CS_INTERFACE,
+- .bDescriptorSubType = UVC_VS_COLORFORMAT,
+- .bColorPrimaries = 1,
+- .bTransferCharacteristics = 1,
+- .bMatrixCoefficients = 4,
++static u32 uvcg_frame_mjpeg_720p_dw_frame_interval[] = {
++ [0] = UVCG_FRM_INTERV_0_720P,
++};
++
++static const struct uvcg_frame uvcg_frame_mjpeg_720p = {
++ .fmt_type = UVCG_MJPEG,
++ .frame = {
++ .b_length = UVC_DT_FRAME_MJPEG_SIZE(1),
++ .b_descriptor_type = USB_DT_CS_INTERFACE,
++ .b_descriptor_subtype = UVC_VS_FRAME_MJPEG,
++ .b_frame_index = 2,
++ .bm_capabilities = 0,
++ .w_width = UVCG_WIDTH_720P,
++ .w_height = UVCG_HEIGHT_720P,
++ .dw_min_bit_rate = UVCG_MIN_BITRATE_720P,
++ .dw_max_bit_rate = UVCG_MAX_BITRATE_720P,
++ .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_720P,
++ .dw_default_frame_interval = UVCG_DEFAULT_FRM_INTERV_720P,
++ .b_frame_interval_type = 1,
++ },
++ .dw_frame_interval = uvcg_frame_mjpeg_720p_dw_frame_interval,
++};
++
++static struct uvcg_frame_ptr uvcg_frame_ptr_mjpeg_720p = {
++ .frm = (struct uvcg_frame *)&uvcg_frame_mjpeg_720p,
++};
++
++static struct uvcg_streaming_header uvcg_streaming_header = {
+ };
+
+ static const struct uvc_descriptor_header * const uvc_fs_control_cls[] = {
+@@ -290,40 +446,40 @@ static const struct uvc_descriptor_header * const uvc_ss_control_cls[] = {
+
+ static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_input_header,
+- (const struct uvc_descriptor_header *) &uvc_format_yuv,
++ (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+- (const struct uvc_descriptor_header *) &uvc_color_matching,
+- (const struct uvc_descriptor_header *) &uvc_format_mjpg,
++ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
++ (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+- (const struct uvc_descriptor_header *) &uvc_color_matching,
++ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+ NULL,
+ };
+
+ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_input_header,
+- (const struct uvc_descriptor_header *) &uvc_format_yuv,
++ (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+- (const struct uvc_descriptor_header *) &uvc_color_matching,
+- (const struct uvc_descriptor_header *) &uvc_format_mjpg,
++ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
++ (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+- (const struct uvc_descriptor_header *) &uvc_color_matching,
++ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+ NULL,
+ };
+
+ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_input_header,
+- (const struct uvc_descriptor_header *) &uvc_format_yuv,
++ (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+- (const struct uvc_descriptor_header *) &uvc_color_matching,
+- (const struct uvc_descriptor_header *) &uvc_format_mjpg,
++ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
++ (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+- (const struct uvc_descriptor_header *) &uvc_color_matching,
++ (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+ NULL,
+ };
+
+@@ -387,6 +543,23 @@ webcam_bind(struct usb_composite_dev *cdev)
+ uvc_opts->hs_streaming = uvc_hs_streaming_cls;
+ uvc_opts->ss_streaming = uvc_ss_streaming_cls;
+
++ INIT_LIST_HEAD(&uvcg_format_yuv.fmt.frames);
++ list_add_tail(&uvcg_frame_ptr_yuv_360p.entry, &uvcg_format_yuv.fmt.frames);
++ list_add_tail(&uvcg_frame_ptr_yuv_720p.entry, &uvcg_format_yuv.fmt.frames);
++ uvcg_format_yuv.fmt.num_frames = 2;
++
++ INIT_LIST_HEAD(&uvcg_format_mjpeg.fmt.frames);
++ list_add_tail(&uvcg_frame_ptr_mjpeg_360p.entry, &uvcg_format_mjpeg.fmt.frames);
++ list_add_tail(&uvcg_frame_ptr_mjpeg_720p.entry, &uvcg_format_mjpeg.fmt.frames);
++ uvcg_format_mjpeg.fmt.num_frames = 2;
++
++ INIT_LIST_HEAD(&uvcg_streaming_header.formats);
++ list_add_tail(&uvcg_format_ptr_yuv.entry, &uvcg_streaming_header.formats);
++ list_add_tail(&uvcg_format_ptr_mjpeg.entry, &uvcg_streaming_header.formats);
++ uvcg_streaming_header.num_fmt = 2;
++
++ uvc_opts->header = &uvcg_streaming_header;
++
+ /* Allocate string descriptor numbers ... note that string contents
+ * can be overridden by the composite_dev glue.
+ */
+diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
+index 2ef89a442f50f1..4868286574a1c9 100644
+--- a/drivers/usb/gadget/udc/aspeed_udc.c
++++ b/drivers/usb/gadget/udc/aspeed_udc.c
+@@ -66,8 +66,8 @@
+ #define USB_UPSTREAM_EN BIT(0)
+
+ /* Main config reg */
+-#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f)
+-#define UDC_CFG_ADDR_MASK (0x3f)
++#define UDC_CFG_SET_ADDR(x) ((x) & UDC_CFG_ADDR_MASK)
++#define UDC_CFG_ADDR_MASK GENMASK(6, 0)
+
+ /* Interrupt ctrl & status reg */
+ #define UDC_IRQ_EP_POOL_NAK BIT(17)
+@@ -1009,6 +1009,8 @@ static void ast_udc_getstatus(struct ast_udc_dev *udc)
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = crq.wIndex & USB_ENDPOINT_NUMBER_MASK;
++ if (epnum >= AST_UDC_NUM_ENDPOINTS)
++ goto stall;
+ status = udc->ep[epnum].stopped;
+ break;
+ default:
+diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+index 0eed0e03842cff..d394affb707236 100644
+--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
++++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+@@ -2251,7 +2251,6 @@ static int cdns2_gadget_start(struct cdns2_device *pdev)
+ {
+ u32 max_speed;
+ void *buf;
+- int val;
+ int ret;
+
+ pdev->usb_regs = pdev->regs;
+@@ -2261,14 +2260,9 @@ static int cdns2_gadget_start(struct cdns2_device *pdev)
+ pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET;
+
+ /* Reset controller. */
+- set_reg_bit_8(&pdev->usb_regs->cpuctrl, CPUCTRL_SW_RST);
+-
+- ret = readl_poll_timeout_atomic(&pdev->usb_regs->cpuctrl, val,
+- !(val & CPUCTRL_SW_RST), 1, 10000);
+- if (ret) {
+- dev_err(pdev->dev, "Error: reset controller timeout\n");
+- return -EINVAL;
+- }
++ writeb(CPUCTRL_SW_RST | CPUCTRL_UPCLK | CPUCTRL_WUEN,
++ &pdev->usb_regs->cpuctrl);
++ usleep_range(5, 10);
+
+ usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL);
+
+diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
+index 71e2f62d653a51..b5d5ec12e986e0 100644
+--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
++++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
+@@ -292,8 +292,17 @@ struct cdns2_usb_regs {
+ #define SPEEDCTRL_HSDISABLE BIT(7)
+
+ /* CPUCTRL- bitmasks. */
++/* UP clock enable */
++#define CPUCTRL_UPCLK BIT(0)
+ /* Controller reset bit. */
+ #define CPUCTRL_SW_RST BIT(1)
++/**
++ * If the wuen bit is ‘1’, the upclken is automatically set to ‘1’ after
++ * detecting rising edge of wuintereq interrupt. If the wuen bit is ‘0’,
++ * the wuintereq interrupt is ignored.
++ */
++#define CPUCTRL_WUEN BIT(7)
++
+
+ /**
+ * struct cdns2_adma_regs - ADMA controller registers.
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 7166d1117742a1..33979f61dc4dd7 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -118,12 +118,10 @@ int usb_ep_enable(struct usb_ep *ep)
+ goto out;
+
+ /* UDC drivers can't handle endpoints with maxpacket size 0 */
+- if (usb_endpoint_maxp(ep->desc) == 0) {
+- /*
+- * We should log an error message here, but we can't call
+- * dev_err() because there's no way to find the gadget
+- * given only ep.
+- */
++ if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) {
++ WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name,
++ (!ep->desc) ? "NULL descriptor" : "maxpacket 0");
++
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -292,7 +290,9 @@ int usb_ep_queue(struct usb_ep *ep,
+ {
+ int ret = 0;
+
+- if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
++ if (!ep->enabled && ep->address) {
++ pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n",
++ ep->address, ep->name);
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+@@ -1635,8 +1635,6 @@ static void gadget_unbind_driver(struct device *dev)
+
+ dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
+
+- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+-
+ udc->allow_connect = false;
+ cancel_work_sync(&udc->vbus_work);
+ mutex_lock(&udc->connect_lock);
+@@ -1656,6 +1654,8 @@ static void gadget_unbind_driver(struct device *dev)
+ driver->is_bound = false;
+ udc->driver = NULL;
+ mutex_unlock(&udc_lock);
++
++ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ }
+
+ /* ------------------------------------------------------------------------- */
+@@ -1671,6 +1671,7 @@ int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver,
+ driver->driver.bus = &gadget_bus_type;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
++ driver->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+ ret = driver_register(&driver->driver);
+ if (ret) {
+ pr_warn("%s: driver registration failed: %d\n",
+diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
+index ee5705d336e3d6..10a82527626eb6 100644
+--- a/drivers/usb/gadget/udc/fsl_udc_core.c
++++ b/drivers/usb/gadget/udc/fsl_udc_core.c
+@@ -2486,7 +2486,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
+ /* setup the udc->eps[] for non-control endpoints and link
+ * to gadget.ep_list */
+ for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
+- char name[14];
++ char name[16];
+
+ sprintf(name, "ep%dout", i);
+ struct_ep_setup(udc_controller, i * 2, name, 1);
+diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
+index 12e76bb62c2094..19bbc38f3d35dc 100644
+--- a/drivers/usb/gadget/udc/net2272.c
++++ b/drivers/usb/gadget/udc/net2272.c
+@@ -2650,7 +2650,7 @@ net2272_plat_probe(struct platform_device *pdev)
+ goto err_req;
+ }
+
+- ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
++ ret = net2272_probe_fin(dev, irqflags);
+ if (ret)
+ goto err_io;
+
+diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
+index 10c5d7f726a1fd..f90eeecf27de11 100644
+--- a/drivers/usb/gadget/udc/omap_udc.c
++++ b/drivers/usb/gadget/udc/omap_udc.c
+@@ -2036,7 +2036,8 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
+
+ static inline int machine_without_vbus_sense(void)
+ {
+- return machine_is_omap_osk() || machine_is_sx1();
++ return machine_is_omap_osk() || machine_is_omap_palmte() ||
++ machine_is_sx1();
+ }
+
+ static int omap_udc_start(struct usb_gadget *g,
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index cb85168fd00c28..7aa46d426f31b2 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -3491,8 +3491,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
+
+ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ {
+- int err = 0, usb3;
+- unsigned int i;
++ int err = 0, usb3_companion_port;
++ unsigned int i, j;
+
+ xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
+ sizeof(*xudc->utmi_phy), GFP_KERNEL);
+@@ -3520,7 +3520,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ if (IS_ERR(xudc->utmi_phy[i])) {
+ err = PTR_ERR(xudc->utmi_phy[i]);
+ dev_err_probe(xudc->dev, err,
+- "failed to get usb2-%d PHY\n", i);
++ "failed to get PHY for phy-name usb2-%d\n", i);
+ goto clean_up;
+ } else if (xudc->utmi_phy[i]) {
+ /* Get usb-phy, if utmi phy is available */
+@@ -3539,19 +3539,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
+ }
+
+ /* Get USB3 phy */
+- usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+- if (usb3 < 0)
++ usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
++ if (usb3_companion_port < 0)
+ continue;
+
+- snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
+- xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+- if (IS_ERR(xudc->usb3_phy[i])) {
+- err = PTR_ERR(xudc->usb3_phy[i]);
+- dev_err_probe(xudc->dev, err,
+- "failed to get usb3-%d PHY\n", usb3);
+- goto clean_up;
+- } else if (xudc->usb3_phy[i])
+- dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
++ for (j = 0; j < xudc->soc->num_phys; j++) {
++ snprintf(phy_name, sizeof(phy_name), "usb3-%d", j);
++ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
++ if (IS_ERR(xudc->usb3_phy[i])) {
++ err = PTR_ERR(xudc->usb3_phy[i]);
++ dev_err_probe(xudc->dev, err,
++ "failed to get PHY for phy-name usb3-%d\n", j);
++ goto clean_up;
++ } else if (xudc->usb3_phy[i]) {
++ int usb2_port =
++ tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]);
++ int usb3_port =
++ tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]);
++ if (usb3_port == usb3_companion_port) {
++ dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n",
++ usb2_port, usb3_port, i);
++ break;
++ }
++ }
++ }
+ }
+
+ return err;
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index 4f9982ecfb583e..5cec7640e913c8 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -888,6 +888,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ /* Check for an all 1's result which is a typical consequence
+ * of dead, unclocked, or unplugged (CardBus...) devices
+ */
++again:
+ if (ints == ~(u32)0) {
+ ohci->rh_state = OHCI_RH_HALTED;
+ ohci_dbg (ohci, "device removed!\n");
+@@ -982,6 +983,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ }
+ spin_unlock(&ohci->lock);
+
++ /* repeat until all enabled interrupts are handled */
++ if (ohci->rh_state != OHCI_RH_HALTED) {
++ ints = ohci_readl(ohci, &regs->intrstatus);
++ if (ints && (ints & ohci_readl(ohci, &regs->intrenable)))
++ goto again;
++ }
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index 0956495bba5757..2b871540bb5002 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -585,6 +585,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
+ finish_request(sl811, ep, urb, urbstat);
+ }
+
++#ifdef QUIRK2
+ static inline u8 checkdone(struct sl811 *sl811)
+ {
+ u8 ctl;
+@@ -616,6 +617,7 @@ static inline u8 checkdone(struct sl811 *sl811)
+ #endif
+ return irqstat;
+ }
++#endif
+
+ static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
+ {
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 0a37f0d511cf53..54c47463c215c2 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1729,6 +1729,8 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
+ }
+
+ command->status = 0;
++ /* set default timeout to 5000 ms */
++ command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT;
+ INIT_LIST_HEAD(&command->cmd_list);
+ return command;
+ }
+@@ -2287,7 +2289,10 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+ erst_base &= ERST_BASE_RSVDP;
+ erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
+- xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
++ if (xhci->quirks & XHCI_WRITE_64_HI_LO)
++ hi_lo_writeq(erst_base, &ir->ir_set->erst_base);
++ else
++ xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
+
+ /* Set the event ring dequeue address of this interrupter */
+ xhci_set_hc_event_deq(xhci, ir);
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index bbdf1b0b7be11e..3252e3d2d79cd6 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -7,6 +7,7 @@
+ * Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/iopoll.h>
+ #include <linux/kernel.h>
+@@ -73,6 +74,9 @@
+ #define FRMCNT_LEV1_RANG (0x12b << 8)
+ #define FRMCNT_LEV1_RANG_MASK GENMASK(19, 8)
+
++#define HSCH_CFG1 0x960
++#define SCH3_RXFIFO_DEPTH_MASK GENMASK(21, 20)
++
+ #define SS_GEN2_EOF_CFG 0x990
+ #define SSG2EOF_OFFSET 0x3c
+
+@@ -114,6 +118,8 @@
+ #define SSC_IP_SLEEP_EN BIT(4)
+ #define SSC_SPM_INT_EN BIT(1)
+
++#define SCH_FIFO_TO_KB(x) ((x) >> 10)
++
+ enum ssusb_uwk_vers {
+ SSUSB_UWK_V1 = 1,
+ SSUSB_UWK_V2,
+@@ -165,6 +171,35 @@ static void xhci_mtk_set_frame_interval(struct xhci_hcd_mtk *mtk)
+ writel(value, hcd->regs + SS_GEN2_EOF_CFG);
+ }
+
++/*
++ * workaround: usb3.2 gen1 isoc rx hw issue
++ * host send out unexpected ACK afer device fininsh a burst transfer with
++ * a short packet.
++ */
++static void xhci_mtk_rxfifo_depth_set(struct xhci_hcd_mtk *mtk)
++{
++ struct usb_hcd *hcd = mtk->hcd;
++ u32 value;
++
++ if (!mtk->rxfifo_depth)
++ return;
++
++ value = readl(hcd->regs + HSCH_CFG1);
++ value &= ~SCH3_RXFIFO_DEPTH_MASK;
++ value |= FIELD_PREP(SCH3_RXFIFO_DEPTH_MASK,
++ SCH_FIFO_TO_KB(mtk->rxfifo_depth) - 1);
++ writel(value, hcd->regs + HSCH_CFG1);
++}
++
++static void xhci_mtk_init_quirk(struct xhci_hcd_mtk *mtk)
++{
++ /* workaround only for mt8195 */
++ xhci_mtk_set_frame_interval(mtk);
++
++ /* workaround for SoCs using SSUSB about before IPM v1.6.0 */
++ xhci_mtk_rxfifo_depth_set(mtk);
++}
++
+ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
+ {
+ struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
+@@ -448,8 +483,7 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
+ if (ret)
+ return ret;
+
+- /* workaround only for mt8195 */
+- xhci_mtk_set_frame_interval(mtk);
++ xhci_mtk_init_quirk(mtk);
+ }
+
+ ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
+@@ -527,6 +561,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ of_property_read_u32(node, "mediatek,u2p-dis-msk",
+ &mtk->u2p_dis_msk);
+
++ of_property_read_u32(node, "rx-fifo-depth", &mtk->rxfifo_depth);
++
+ ret = usb_wakeup_of_property_parse(mtk, node);
+ if (ret) {
+ dev_err(dev, "failed to parse uwk property\n");
+diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
+index faaaf05e36ce09..ac042077db8c12 100644
+--- a/drivers/usb/host/xhci-mtk.h
++++ b/drivers/usb/host/xhci-mtk.h
+@@ -160,6 +160,8 @@ struct xhci_hcd_mtk {
+ struct regmap *uwk;
+ u32 uwk_reg_base;
+ u32 uwk_vers;
++ /* quirk */
++ u32 rxfifo_depth;
+ };
+
+ static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd)
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index b9ae5c2a25275f..044303187d9fd0 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -36,6 +36,7 @@
+
+ #define PCI_VENDOR_ID_ETRON 0x1b6f
+ #define PCI_DEVICE_ID_EJ168 0x7023
++#define PCI_DEVICE_ID_EJ188 0x7052
+
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+@@ -72,8 +73,12 @@
+ #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
+ #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
+ #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
++#define PCI_DEVICE_ID_ASMEDIA_3042_XHCI 0x3042
+ #define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
+
++#define PCI_DEVICE_ID_CADENCE 0x17CD
++#define PCI_DEVICE_ID_CADENCE_SSP 0x0200
++
+ static const char hcd_name[] = "xhci_hcd";
+
+ static struct hc_driver __read_mostly xhci_pci_hc_driver;
+@@ -461,6 +466,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
++ pdev->device == PCI_DEVICE_ID_EJ188) {
++ xhci->quirks |= XHCI_RESET_ON_RESUME;
++ xhci->quirks |= XHCI_BROKEN_STREAMS;
++ }
++
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0014) {
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+@@ -506,6 +517,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+ xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
+
++ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
++ pdev->device == PCI_DEVICE_ID_ASMEDIA_3042_XHCI)
++ xhci->quirks |= XHCI_RESET_ON_RESUME;
++
+ if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
+ xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
+
+@@ -532,6 +547,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ }
+
++ if (pdev->vendor == PCI_DEVICE_ID_CADENCE &&
++ pdev->device == PCI_DEVICE_ID_CADENCE_SSP)
++ xhci->quirks |= XHCI_CDNS_SCTX_QUIRK;
++
+ /* xHC spec requires PCI devices to support D3hot and D3cold */
+ if (xhci->hci_version >= 0x120)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+@@ -693,7 +712,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ pm_runtime_put_noidle(&dev->dev);
+
+- if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
++ if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
++ pm_runtime_forbid(&dev->dev);
++ else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_allow(&dev->dev);
+
+ dma_set_max_seg_size(&dev->dev, UINT_MAX);
+@@ -712,8 +733,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ static void xhci_pci_remove(struct pci_dev *dev)
+ {
+ struct xhci_hcd *xhci;
++ bool set_power_d3;
+
+ xhci = hcd_to_xhci(pci_get_drvdata(dev));
++ set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP;
+
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
+
+@@ -726,11 +749,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ xhci->shared_hcd = NULL;
+ }
+
++ usb_hcd_pci_remove(dev);
++
+ /* Workaround for spurious wakeups at shutdown with HSW */
+- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ if (set_power_d3)
+ pci_set_power_state(dev, PCI_D3hot);
+-
+- usb_hcd_pci_remove(dev);
+ }
+
+ /*
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 28218c8f183768..d68e9abcdc69a6 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -13,6 +13,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/of.h>
++#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/usb/phy.h>
+ #include <linux/slab.h>
+@@ -148,7 +149,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ int ret;
+ int irq;
+ struct xhci_plat_priv *priv = NULL;
+-
++ bool of_match;
+
+ if (usb_disabled())
+ return -ENODEV;
+@@ -249,20 +250,30 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
+ xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
++ if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
++ xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
++
+ device_property_read_u32(tmpdev, "imod-interval-ns",
+ &xhci->imod_interval);
+ }
+
+- hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+- if (IS_ERR(hcd->usb_phy)) {
+- ret = PTR_ERR(hcd->usb_phy);
+- if (ret == -EPROBE_DEFER)
+- goto disable_clk;
+- hcd->usb_phy = NULL;
+- } else {
+- ret = usb_phy_init(hcd->usb_phy);
+- if (ret)
+- goto disable_clk;
++ /*
++ * Drivers such as dwc3 manages PHYs themself (and rely on driver name
++ * matching for the xhci platform device).
++ */
++ of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
++ if (of_match) {
++ hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
++ if (IS_ERR(hcd->usb_phy)) {
++ ret = PTR_ERR(hcd->usb_phy);
++ if (ret == -EPROBE_DEFER)
++ goto disable_clk;
++ hcd->usb_phy = NULL;
++ } else {
++ ret = usb_phy_init(hcd->usb_phy);
++ if (ret)
++ goto disable_clk;
++ }
+ }
+
+ hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
+@@ -285,15 +296,17 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
+ goto dealloc_usb2_hcd;
+ }
+
+- xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
+- "usb-phy", 1);
+- if (IS_ERR(xhci->shared_hcd->usb_phy)) {
+- xhci->shared_hcd->usb_phy = NULL;
+- } else {
+- ret = usb_phy_init(xhci->shared_hcd->usb_phy);
+- if (ret)
+- dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
+- __func__, ret);
++ if (of_match) {
++ xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
++ "usb-phy", 1);
++ if (IS_ERR(xhci->shared_hcd->usb_phy)) {
++ xhci->shared_hcd->usb_phy = NULL;
++ } else {
++ ret = usb_phy_init(xhci->shared_hcd->usb_phy);
++ if (ret)
++ dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
++ __func__, ret);
++ }
+ }
+
+ xhci->shared_hcd->tpl_support = hcd->tpl_support;
+@@ -423,7 +436,7 @@ void xhci_plat_remove(struct platform_device *dev)
+ }
+ EXPORT_SYMBOL_GPL(xhci_plat_remove);
+
+-static int __maybe_unused xhci_plat_suspend(struct device *dev)
++static int xhci_plat_suspend(struct device *dev)
+ {
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+@@ -451,30 +464,55 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
+ return 0;
+ }
+
+-static int __maybe_unused xhci_plat_resume(struct device *dev)
++static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg)
+ {
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+- clk_prepare_enable(xhci->clk);
+- clk_prepare_enable(xhci->reg_clk);
++ ret = clk_prepare_enable(xhci->clk);
++ if (ret)
++ return ret;
++
++ ret = clk_prepare_enable(xhci->reg_clk);
++ if (ret) {
++ clk_disable_unprepare(xhci->clk);
++ return ret;
++ }
+ }
+
+ ret = xhci_priv_resume_quirk(hcd);
+ if (ret)
+- return ret;
++ goto disable_clks;
+
+- ret = xhci_resume(xhci, PMSG_RESUME);
++ ret = xhci_resume(xhci, pmsg);
+ if (ret)
+- return ret;
++ goto disable_clks;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
++
++disable_clks:
++ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
++ clk_disable_unprepare(xhci->clk);
++ clk_disable_unprepare(xhci->reg_clk);
++ }
++
++ return ret;
++}
++
++static int xhci_plat_resume(struct device *dev)
++{
++ return xhci_plat_resume_common(dev, PMSG_RESUME);
++}
++
++static int xhci_plat_restore(struct device *dev)
++{
++ return xhci_plat_resume_common(dev, PMSG_RESTORE);
+ }
+
+ static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
+@@ -499,7 +537,12 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
+ }
+
+ const struct dev_pm_ops xhci_plat_pm_ops = {
+- SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
++ .suspend = pm_sleep_ptr(xhci_plat_suspend),
++ .resume = pm_sleep_ptr(xhci_plat_resume),
++ .freeze = pm_sleep_ptr(xhci_plat_suspend),
++ .thaw = pm_sleep_ptr(xhci_plat_resume),
++ .poweroff = pm_sleep_ptr(xhci_plat_suspend),
++ .restore = pm_sleep_ptr(xhci_plat_restore),
+
+ SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend,
+ xhci_plat_runtime_resume,
+diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
+index 2d15386f2c504b..6475130eac4b38 100644
+--- a/drivers/usb/host/xhci-plat.h
++++ b/drivers/usb/host/xhci-plat.h
+@@ -8,7 +8,9 @@
+ #ifndef _XHCI_PLAT_H
+ #define _XHCI_PLAT_H
+
+-#include "xhci.h" /* for hcd_to_xhci() */
++struct device;
++struct platform_device;
++struct usb_hcd;
+
+ struct xhci_plat_priv {
+ const char *firmware_name;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 3e5dc0723a8fc2..7d959e2753f906 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -326,7 +326,13 @@ static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhc
+ /* how many trbs will be queued past the enqueue segment? */
+ trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
+
+- if (trbs_past_seg <= 0)
++ /*
++ * Consider expanding the ring already if num_trbs fills the current
++ * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into
++ * the next segment. Avoids confusing full ring with special empty ring
++ * case below
++ */
++ if (trbs_past_seg < 0)
+ return 0;
+
+ /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
+@@ -366,9 +372,10 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+ readl(&xhci->dba->doorbell[0]);
+ }
+
+-static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
++static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
+ {
+- return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
++ return mod_delayed_work(system_wq, &xhci->cmd_timer,
++ msecs_to_jiffies(xhci->current_cmd->timeout_ms));
+ }
+
+ static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
+@@ -412,7 +419,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ xhci->current_cmd = cur_cmd;
+- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
++ xhci_mod_cmd_timer(xhci);
+ xhci_ring_cmd_db(xhci);
+ }
+ }
+@@ -1020,13 +1027,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ break;
+ case TD_DIRTY: /* TD is cached, clear it */
+ case TD_HALTED:
++ case TD_CLEARING_CACHE_DEFERRED:
++ if (cached_td) {
++ if (cached_td->urb->stream_id != td->urb->stream_id) {
++ /* Multiple streams case, defer move dq */
++ xhci_dbg(xhci,
++ "Move dq deferred: stream %u URB %p\n",
++ td->urb->stream_id, td->urb);
++ td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
++ break;
++ }
++
++ /* Should never happen, but clear the TD if it does */
++ xhci_warn(xhci,
++ "Found multiple active URBs %p and %p in stream %u?\n",
++ td->urb, cached_td->urb,
++ td->urb->stream_id);
++ td_to_noop(xhci, ring, cached_td, false);
++ cached_td->cancel_status = TD_CLEARED;
++ }
++ td_to_noop(xhci, ring, td, false);
+ td->cancel_status = TD_CLEARING_CACHE;
+- if (cached_td)
+- /* FIXME stream case, several stopped rings */
+- xhci_dbg(xhci,
+- "Move dq past stream %u URB %p instead of stream %u URB %p\n",
+- td->urb->stream_id, td->urb,
+- cached_td->urb->stream_id, cached_td->urb);
+ cached_td = td;
+ break;
+ }
+@@ -1046,10 +1067,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ if (err) {
+ /* Failed to move past cached td, just set cached TDs to no-op */
+ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
+- if (td->cancel_status != TD_CLEARING_CACHE)
++ /*
++ * Deferred TDs need to have the deq pointer set after the above command
++ * completes, so if that failed we just give up on all of them (and
++ * complain loudly since this could cause issues due to caching).
++ */
++ if (td->cancel_status != TD_CLEARING_CACHE &&
++ td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
+ continue;
+- xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+- td->urb);
++ xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
++ td->urb);
+ td_to_noop(xhci, ring, td, false);
+ td->cancel_status = TD_CLEARED;
+ }
+@@ -1327,6 +1354,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_td *td, *tmp_td;
++ bool deferred = false;
+
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
+@@ -1386,6 +1414,20 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
++
++ /*
++ * Cadence xHCI controllers store some endpoint state
++ * information within Rsvd0 fields of Stream Endpoint
++ * context. This field is not cleared during Set TR
++ * Dequeue Pointer command which causes XDMA to skip
++ * over transfer ring and leads to data loss on stream
++ * pipe.
++ * To fix this issue driver must clear Rsvd0 field.
++ */
++ if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) {
++ ctx->reserved[0] = 0;
++ ctx->reserved[1] = 0;
++ }
+ } else {
+ deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+ }
+@@ -1413,6 +1455,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
+ __func__, td->urb);
+ xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
++ } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
++ deferred = true;
+ } else {
+ xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
+ __func__, td->urb, td->cancel_status);
+@@ -1422,8 +1466,17 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ ep->ep_state &= ~SET_DEQ_PENDING;
+ ep->queued_deq_seg = NULL;
+ ep->queued_deq_ptr = NULL;
+- /* Restart any rings with pending URBs */
+- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
++
++ if (deferred) {
++ /* We have more streams to clear */
++ xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
++ __func__);
++ xhci_invalidate_cancelled_tds(ep);
++ } else {
++ /* Restart any rings with pending URBs */
++ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
++ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
++ }
+ }
+
+ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
+@@ -1786,7 +1839,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ if (!list_is_singular(&xhci->cmd_list)) {
+ xhci->current_cmd = list_first_entry(&cmd->cmd_list,
+ struct xhci_command, cmd_list);
+- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
++ xhci_mod_cmd_timer(xhci);
+ } else if (xhci->current_cmd == cmd) {
+ xhci->current_cmd = NULL;
+ }
+@@ -2377,6 +2430,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
++ /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
++ if (td->error_mid_td)
++ break;
+ if (remaining) {
+ frame->status = short_framestatus;
+ if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+@@ -2392,9 +2448,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ case COMP_BANDWIDTH_OVERRUN_ERROR:
+ frame->status = -ECOMM;
+ break;
+- case COMP_ISOCH_BUFFER_OVERRUN:
+ case COMP_BABBLE_DETECTED_ERROR:
++ sum_trbs_for_length = true;
++ fallthrough;
++ case COMP_ISOCH_BUFFER_OVERRUN:
+ frame->status = -EOVERFLOW;
++ if (ep_trb != td->last_trb)
++ td->error_mid_td = true;
+ break;
+ case COMP_INCOMPATIBLE_DEVICE_ERROR:
+ case COMP_STALL_ERROR:
+@@ -2402,8 +2462,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ break;
+ case COMP_USB_TRANSACTION_ERROR:
+ frame->status = -EPROTO;
++ sum_trbs_for_length = true;
+ if (ep_trb != td->last_trb)
+- return 0;
++ td->error_mid_td = true;
+ break;
+ case COMP_STOPPED:
+ sum_trbs_for_length = true;
+@@ -2423,6 +2484,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ break;
+ }
+
++ if (td->urb_length_set)
++ goto finish_td;
++
+ if (sum_trbs_for_length)
+ frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
+ ep_trb_len - remaining;
+@@ -2431,6 +2495,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+
+ td->urb->actual_length += frame->actual_length;
+
++finish_td:
++ /* Don't give back TD yet if we encountered an error mid TD */
++ if (td->error_mid_td && ep_trb != td->last_trb) {
++ xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
++ td->urb_length_set = true;
++ return 0;
++ }
++
+ return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ }
+
+@@ -2499,9 +2571,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ goto finish_td;
+ case COMP_STOPPED_LENGTH_INVALID:
+ /* stopped on ep trb with invalid length, exclude it */
+- ep_trb_len = 0;
+- remaining = 0;
+- break;
++ td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
++ goto finish_td;
+ case COMP_USB_TRANSACTION_ERROR:
+ if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
+ (ep->err_count++ > MAX_SOFT_RETRY) ||
+@@ -2809,17 +2880,51 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ }
+
+ if (!ep_seg) {
+- if (!ep->skip ||
+- !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+- /* Some host controllers give a spurious
+- * successful event after a short transfer.
+- * Ignore it.
+- */
+- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+- ep_ring->last_td_was_short) {
+- ep_ring->last_td_was_short = false;
+- goto cleanup;
++
++ if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
++ skip_isoc_td(xhci, td, ep, status);
++ goto cleanup;
++ }
++
++ /*
++ * Some hosts give a spurious success event after a short
++ * transfer. Ignore it.
++ */
++ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
++ ep_ring->last_td_was_short) {
++ ep_ring->last_td_was_short = false;
++ goto cleanup;
++ }
++
++ /*
++ * xhci 4.10.2 states isoc endpoints should continue
++ * processing the next TD if there was an error mid TD.
++ * So host like NEC don't generate an event for the last
++ * isoc TRB even if the IOC flag is set.
++ * xhci 4.9.1 states that if there are errors in mult-TRB
++ * TDs xHC should generate an error for that TRB, and if xHC
++ * proceeds to the next TD it should genete an event for
++ * any TRB with IOC flag on the way. Other host follow this.
++ * So this event might be for the next TD.
++ */
++ if (td->error_mid_td &&
++ !list_is_last(&td->td_list, &ep_ring->td_list)) {
++ struct xhci_td *td_next = list_next_entry(td, td_list);
++
++ ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb,
++ td_next->last_trb, ep_trb_dma, false);
++ if (ep_seg) {
++ /* give back previous TD, start handling new */
++ xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
++ ep_ring->dequeue = td->last_trb;
++ ep_ring->deq_seg = td->last_trb_seg;
++ inc_deq(xhci, ep_ring);
++ xhci_td_cleanup(xhci, td, ep_ring, td->status);
++ td = td_next;
+ }
++ }
++
++ if (!ep_seg) {
+ /* HC is busted, give up! */
+ xhci_err(xhci,
+ "ERROR Transfer event TRB DMA ptr not "
+@@ -2831,9 +2936,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ ep_trb_dma, true);
+ return -ESHUTDOWN;
+ }
+-
+- skip_isoc_td(xhci, td, ep, status);
+- goto cleanup;
+ }
+ if (trb_comp_code == COMP_SHORT_PACKET)
+ ep_ring->last_td_was_short = true;
+@@ -4303,7 +4405,7 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ /* if there are no other commands queued we start the timeout timer */
+ if (list_empty(&xhci->cmd_list)) {
+ xhci->current_cmd = cmd;
+- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
++ xhci_mod_cmd_timer(xhci);
+ }
+
+ list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
+diff --git a/drivers/usb/host/xhci-rzv2m.c b/drivers/usb/host/xhci-rzv2m.c
+index ec65b24eafa868..4f59867d7117cf 100644
+--- a/drivers/usb/host/xhci-rzv2m.c
++++ b/drivers/usb/host/xhci-rzv2m.c
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/usb/rzv2m_usb3drd.h>
++#include "xhci.h"
+ #include "xhci-plat.h"
+ #include "xhci-rzv2m.h"
+
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index 6246d5ad146848..76f228e7443cb6 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -2183,7 +2183,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
+ goto out;
+ }
+
+- for (i = 0; i < tegra->num_usb_phys; i++) {
++ for (i = 0; i < xhci->usb2_rhub.num_ports; i++) {
+ if (!xhci->usb2_rhub.ports[i])
+ continue;
+ portsc = readl(xhci->usb2_rhub.ports[i]->addr);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index e1b1b64a072329..f005ce1f91ca2a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -968,6 +968,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ int retval = 0;
+ bool comp_timer_running = false;
+ bool pending_portevent = false;
++ bool suspended_usb3_devs = false;
+ bool reinit_xhc = false;
+
+ if (!hcd->state)
+@@ -1083,10 +1084,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ xhci_dbg(xhci, "Start the secondary HCD\n");
+ retval = xhci_run(xhci->shared_hcd);
+ }
+-
++ if (retval)
++ return retval;
++ /*
++ * Resume roothubs unconditionally as PORTSC change bits are not
++ * immediately visible after xHC reset
++ */
+ hcd->state = HC_STATE_SUSPENDED;
+- if (xhci->shared_hcd)
++
++ if (xhci->shared_hcd) {
+ xhci->shared_hcd->state = HC_STATE_SUSPENDED;
++ usb_hcd_resume_root_hub(xhci->shared_hcd);
++ }
++ usb_hcd_resume_root_hub(hcd);
++
+ goto done;
+ }
+
+@@ -1110,15 +1121,21 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+
+ xhci_dbc_resume(xhci);
+
+- done:
+ if (retval == 0) {
+ /*
+ * Resume roothubs only if there are pending events.
+ * USB 3 devices resend U3 LFPS wake after a 100ms delay if
+- * the first wake signalling failed, give it that chance.
++ * the first wake signalling failed, give it that chance if
++ * there are suspended USB 3 devices.
+ */
++ if (xhci->usb3_rhub.bus_state.suspended_ports ||
++ xhci->usb3_rhub.bus_state.bus_suspended)
++ suspended_usb3_devs = true;
++
+ pending_portevent = xhci_pending_portevent(xhci);
+- if (!pending_portevent && msg.event == PM_EVENT_AUTO_RESUME) {
++
++ if (suspended_usb3_devs && !pending_portevent &&
++ msg.event == PM_EVENT_AUTO_RESUME) {
+ msleep(120);
+ pending_portevent = xhci_pending_portevent(xhci);
+ }
+@@ -1129,6 +1146,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ usb_hcd_resume_root_hub(hcd);
+ }
+ }
++done:
+ /*
+ * If system is subject to the Quirk, Compliance Mode Timer needs to
+ * be re-initialized Always after a system resume. Ports are subject
+@@ -1170,6 +1188,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
+
+ temp = kzalloc_node(buf_len, GFP_ATOMIC,
+ dev_to_node(hcd->self.sysdev));
++ if (!temp)
++ return -ENOMEM;
+
+ if (usb_urb_dir_out(urb))
+ sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+@@ -2788,7 +2808,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
+- if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
++ if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
+ xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ xhci_free_host_resources(xhci, ctrl_ctx);
+@@ -3997,12 +4017,18 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ return 0;
+ }
+
+-/*
+- * Issue an Address Device command and optionally send a corresponding
+- * SetAddress request to the device.
++/**
++ * xhci_setup_device - issues an Address Device command to assign a unique
++ * USB bus address.
++ * @hcd: USB host controller data structure.
++ * @udev: USB dev structure representing the connected device.
++ * @setup: Enum specifying setup mode: address only or with context.
++ * @timeout_ms: Max wait time (ms) for the command operation to complete.
++ *
++ * Return: 0 if successful; otherwise, negative error code.
+ */
+ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+- enum xhci_setup_dev setup)
++ enum xhci_setup_dev setup, unsigned int timeout_ms)
+ {
+ const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
+ unsigned long flags;
+@@ -4059,6 +4085,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ }
+
+ command->in_ctx = virt_dev->in_ctx;
++ command->timeout_ms = timeout_ms;
+
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
+@@ -4123,8 +4150,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ mutex_unlock(&xhci->mutex);
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
+- if (!ret)
+- xhci_alloc_dev(hcd, udev);
++ if (!ret) {
++ if (xhci_alloc_dev(hcd, udev) == 1)
++ xhci_setup_addressable_virt_dev(xhci, udev);
++ }
+ kfree(command->completion);
+ kfree(command);
+ return -EPROTO;
+@@ -4185,14 +4214,16 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ return ret;
+ }
+
+-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
++static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
++ unsigned int timeout_ms)
+ {
+- return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
++ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
+ }
+
+ static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+- return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
++ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ }
+
+ /*
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 5df370482521f7..7754ed55d220b3 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -17,6 +17,7 @@
+ #include <linux/kernel.h>
+ #include <linux/usb/hcd.h>
+ #include <linux/io-64-nonatomic-lo-hi.h>
++#include <linux/io-64-nonatomic-hi-lo.h>
+
+ /* Code sharing between pci-quirks and xhci hcd */
+ #include "xhci-ext-caps.h"
+@@ -818,6 +819,8 @@ struct xhci_command {
+ struct completion *completion;
+ union xhci_trb *command_trb;
+ struct list_head cmd_list;
++ /* xHCI command response timeout in milliseconds */
++ unsigned int timeout_ms;
+ };
+
+ /* drop context bitmasks */
+@@ -1283,7 +1286,7 @@ enum xhci_setup_dev {
+ /* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
+ #define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
+ #define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
+-#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
++#define SCT_FOR_TRB(p) (((p) & 0x7) << 1)
+
+ /* Link TRB specific fields */
+ #define TRB_TC (1<<1)
+@@ -1557,6 +1560,7 @@ enum xhci_cancelled_td_status {
+ TD_DIRTY = 0,
+ TD_HALTED,
+ TD_CLEARING_CACHE,
++ TD_CLEARING_CACHE_DEFERRED,
+ TD_CLEARED,
+ };
+
+@@ -1573,11 +1577,15 @@ struct xhci_td {
+ struct xhci_segment *bounce_seg;
+ /* actual_length of the URB has already been set */
+ bool urb_length_set;
++ bool error_mid_td;
+ unsigned int num_trbs;
+ };
+
+-/* xHCI command default timeout value */
+-#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
++/*
++ * xHCI command default timeout value in milliseconds.
++ * USB 3.2 spec, section 9.2.6.1
++ */
++#define XHCI_CMD_DEFAULT_TIMEOUT 5000
+
+ /* command descriptor */
+ struct xhci_cd {
+@@ -1906,6 +1914,8 @@ struct xhci_hcd {
+ #define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
+ #define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
+ #define XHCI_ZHAOXIN_HOST BIT_ULL(46)
++#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
++#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
+
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index c8098e9b432e13..62b5a30edc4267 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -107,7 +107,12 @@ static void appledisplay_complete(struct urb *urb)
+ case ACD_BTN_BRIGHT_UP:
+ case ACD_BTN_BRIGHT_DOWN:
+ pdata->button_pressed = 1;
+- schedule_delayed_work(&pdata->work, 0);
++ /*
++ * there is a window during which no device
++ * is registered
++ */
++ if (pdata->bd )
++ schedule_delayed_work(&pdata->work, 0);
+ break;
+ case ACD_BTN_NONE:
+ default:
+@@ -202,6 +207,7 @@ static int appledisplay_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+ {
+ struct backlight_properties props;
++ struct backlight_device *backlight;
+ struct appledisplay *pdata;
+ struct usb_device *udev = interface_to_usbdev(iface);
+ struct usb_endpoint_descriptor *endpoint;
+@@ -272,13 +278,14 @@ static int appledisplay_probe(struct usb_interface *iface,
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = 0xff;
+- pdata->bd = backlight_device_register(bl_name, NULL, pdata,
++ backlight = backlight_device_register(bl_name, NULL, pdata,
+ &appledisplay_bl_data, &props);
+- if (IS_ERR(pdata->bd)) {
++ if (IS_ERR(backlight)) {
+ dev_err(&iface->dev, "Backlight registration failed\n");
+- retval = PTR_ERR(pdata->bd);
++ retval = PTR_ERR(backlight);
+ goto error;
+ }
++ pdata->bd = backlight;
+
+ /* Try to get brightness */
+ brightness = appledisplay_bl_get_brightness(pdata->bd);
+diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
+index cecd7693b7413c..75f5a740cba397 100644
+--- a/drivers/usb/misc/cypress_cy7c63.c
++++ b/drivers/usb/misc/cypress_cy7c63.c
+@@ -88,6 +88,9 @@ static int vendor_command(struct cypress *dev, unsigned char request,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ address, data, iobuf, CYPRESS_MAX_REQSIZE,
+ USB_CTRL_GET_TIMEOUT);
++ /* we must not process garbage */
++ if (retval < 2)
++ goto err_buf;
+
+ /* store returned data (more READs to be added) */
+ switch (request) {
+@@ -107,6 +110,7 @@ static int vendor_command(struct cypress *dev, unsigned char request,
+ break;
+ }
+
++err_buf:
+ kfree(iobuf);
+ error:
+ return retval;
+diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
+index 57bbe130909480..d72130eda57d62 100644
+--- a/drivers/usb/misc/onboard_usb_hub.c
++++ b/drivers/usb/misc/onboard_usb_hub.c
+@@ -437,6 +437,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
++ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 */
++ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
+diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
+index 2a4ab5ac0ebed9..8af34e6d1afff0 100644
+--- a/drivers/usb/misc/onboard_usb_hub.h
++++ b/drivers/usb/misc/onboard_usb_hub.h
+@@ -16,6 +16,11 @@ static const struct onboard_hub_pdata microchip_usb424_data = {
+ .num_supplies = 1,
+ };
+
++static const struct onboard_hub_pdata microchip_usb5744_data = {
++ .reset_us = 0,
++ .num_supplies = 2,
++};
++
+ static const struct onboard_hub_pdata realtek_rts5411_data = {
+ .reset_us = 0,
+ .num_supplies = 1,
+@@ -50,6 +55,8 @@ static const struct of_device_id onboard_hub_match[] = {
+ { .compatible = "usb424,2412", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2517", .data = &microchip_usb424_data, },
++ { .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
++ { .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
+ { .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
+ { .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
+ { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
+diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
+index b00d92db5dfd1b..eb5a8e0d9e2d6c 100644
+--- a/drivers/usb/misc/uss720.c
++++ b/drivers/usb/misc/uss720.c
+@@ -677,7 +677,7 @@ static int uss720_probe(struct usb_interface *intf,
+ struct parport_uss720_private *priv;
+ struct parport *pp;
+ unsigned char reg;
+- int i;
++ int ret;
+
+ dev_dbg(&intf->dev, "probe: vendor id 0x%x, device id 0x%x\n",
+ le16_to_cpu(usbdev->descriptor.idVendor),
+@@ -688,8 +688,8 @@ static int uss720_probe(struct usb_interface *intf,
+ usb_put_dev(usbdev);
+ return -ENODEV;
+ }
+- i = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
+- dev_dbg(&intf->dev, "set interface result %d\n", i);
++ ret = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
++ dev_dbg(&intf->dev, "set interface result %d\n", ret);
+
+ interface = intf->cur_altsetting;
+
+@@ -725,12 +725,18 @@ static int uss720_probe(struct usb_interface *intf,
+ set_1284_register(pp, 7, 0x00, GFP_KERNEL);
+ set_1284_register(pp, 6, 0x30, GFP_KERNEL); /* PS/2 mode */
+ set_1284_register(pp, 2, 0x0c, GFP_KERNEL);
+- /* debugging */
+- get_1284_register(pp, 0, &reg, GFP_KERNEL);
++
++ /* The Belkin F5U002 Rev 2 P80453-B USB parallel port adapter shares the
++ * device ID 050d:0002 with some other device that works with this
++ * driver, but it itself does not. Detect and handle the bad cable
++ * here. */
++ ret = get_1284_register(pp, 0, &reg, GFP_KERNEL);
+ dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
++ if (ret < 0)
++ return ret;
+
+- i = usb_find_last_int_in_endpoint(interface, &epd);
+- if (!i) {
++ ret = usb_find_last_int_in_endpoint(interface, &epd);
++ if (!ret) {
+ dev_dbg(&intf->dev, "epaddr %d interval %d\n",
+ epd->bEndpointAddress, epd->bInterval);
+ }
+diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
+index c640f98d20c548..c313cd41f7a5a5 100644
+--- a/drivers/usb/misc/yurex.c
++++ b/drivers/usb/misc/yurex.c
+@@ -507,8 +507,11 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
+ __func__, retval);
+ goto error;
+ }
+- if (set && timeout)
++ if (set && timeout) {
++ spin_lock_irq(&dev->lock);
+ dev->bbu = c2;
++ spin_unlock_irq(&dev->lock);
++ }
+ return timeout ? count : -EIO;
+
+ error:
+diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
+index 9ca9305243fe59..4e30de4db1c0a8 100644
+--- a/drivers/usb/mon/mon_bin.c
++++ b/drivers/usb/mon/mon_bin.c
+@@ -1250,14 +1250,19 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
+ struct mon_reader_bin *rp = vmf->vma->vm_private_data;
+ unsigned long offset, chunk_idx;
+ struct page *pageptr;
++ unsigned long flags;
+
++ spin_lock_irqsave(&rp->b_lock, flags);
+ offset = vmf->pgoff << PAGE_SHIFT;
+- if (offset >= rp->b_size)
++ if (offset >= rp->b_size) {
++ spin_unlock_irqrestore(&rp->b_lock, flags);
+ return VM_FAULT_SIGBUS;
++ }
+ chunk_idx = offset / CHUNK_SIZE;
+ pageptr = rp->b_vec[chunk_idx].pg;
+ get_page(pageptr);
+ vmf->page = pageptr;
++ spin_unlock_irqrestore(&rp->b_lock, flags);
+ return 0;
+ }
+
+diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
+index 912e32b78ac6e7..a8d7224838cf67 100644
+--- a/drivers/usb/musb/da8xx.c
++++ b/drivers/usb/musb/da8xx.c
+@@ -555,7 +555,7 @@ static int da8xx_probe(struct platform_device *pdev)
+ ret = of_platform_populate(pdev->dev.of_node, NULL,
+ da8xx_auxdata_lookup, &pdev->dev);
+ if (ret)
+- return ret;
++ goto err_unregister_phy;
+
+ pinfo = da8xx_dev_info;
+ pinfo.parent = &pdev->dev;
+@@ -570,9 +570,13 @@ static int da8xx_probe(struct platform_device *pdev)
+ ret = PTR_ERR_OR_ZERO(glue->musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+- usb_phy_generic_unregister(glue->usb_phy);
++ goto err_unregister_phy;
+ }
+
++ return 0;
++
++err_unregister_phy:
++ usb_phy_generic_unregister(glue->usb_phy);
+ return ret;
+ }
+
+diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
+index acd46b72899e90..920a32cd094d6f 100644
+--- a/drivers/usb/phy/phy-mxs-usb.c
++++ b/drivers/usb/phy/phy-mxs-usb.c
+@@ -388,8 +388,7 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
+
+ static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
+ {
+- return IS_ENABLED(CONFIG_USB_OTG) &&
+- mxs_phy->phy.last_event == USB_EVENT_ID;
++ return mxs_phy->phy.last_event == USB_EVENT_ID;
+ }
+
+ static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index ae41578bd01499..70165dd86b5de9 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -21,7 +21,9 @@ static const struct class role_class = {
+ struct usb_role_switch {
+ struct device dev;
+ struct mutex lock; /* device lock*/
++ struct module *module; /* the module this device depends on */
+ enum usb_role role;
++ bool registered;
+
+ /* From descriptor */
+ struct device *usb2_port;
+@@ -48,6 +50,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
+ if (IS_ERR_OR_NULL(sw))
+ return 0;
+
++ if (!sw->registered)
++ return -EOPNOTSUPP;
++
+ mutex_lock(&sw->lock);
+
+ ret = sw->set(sw, role);
+@@ -73,7 +78,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
+ {
+ enum usb_role role;
+
+- if (IS_ERR_OR_NULL(sw))
++ if (IS_ERR_OR_NULL(sw) || !sw->registered)
+ return USB_ROLE_NONE;
+
+ mutex_lock(&sw->lock);
+@@ -135,7 +140,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
+ usb_role_switch_match);
+
+ if (!IS_ERR_OR_NULL(sw))
+- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
++ WARN_ON(!try_module_get(sw->module));
+
+ return sw;
+ }
+@@ -157,7 +162,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
+ sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
+ NULL, usb_role_switch_match);
+ if (!IS_ERR_OR_NULL(sw))
+- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
++ WARN_ON(!try_module_get(sw->module));
+
+ return sw;
+ }
+@@ -172,7 +177,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
+ void usb_role_switch_put(struct usb_role_switch *sw)
+ {
+ if (!IS_ERR_OR_NULL(sw)) {
+- module_put(sw->dev.parent->driver->owner);
++ module_put(sw->module);
+ put_device(&sw->dev);
+ }
+ }
+@@ -189,15 +194,18 @@ struct usb_role_switch *
+ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+ {
+ struct device *dev;
++ struct usb_role_switch *sw = NULL;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = class_find_device_by_fwnode(&role_class, fwnode);
+- if (dev)
+- WARN_ON(!try_module_get(dev->parent->driver->owner));
++ if (dev) {
++ sw = to_role_switch(dev);
++ WARN_ON(!try_module_get(sw->module));
++ }
+
+- return dev ? to_role_switch(dev) : NULL;
++ return sw;
+ }
+ EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
+
+@@ -338,6 +346,7 @@ usb_role_switch_register(struct device *parent,
+ sw->set = desc->set;
+ sw->get = desc->get;
+
++ sw->module = parent->driver->owner;
+ sw->dev.parent = parent;
+ sw->dev.fwnode = desc->fwnode;
+ sw->dev.class = &role_class;
+@@ -352,6 +361,8 @@ usb_role_switch_register(struct device *parent,
+ return ERR_PTR(ret);
+ }
+
++ sw->registered = true;
++
+ /* TODO: Symlinks for the host port and the device controller. */
+
+ return sw;
+@@ -366,8 +377,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
+ */
+ void usb_role_switch_unregister(struct usb_role_switch *sw)
+ {
+- if (!IS_ERR_OR_NULL(sw))
++ if (!IS_ERR_OR_NULL(sw)) {
++ sw->registered = false;
+ device_unregister(&sw->dev);
++ }
+ }
+ EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 1e61fe04317158..21fd26609252be 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
+ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
++ { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */
++ { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */
+ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
+ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+@@ -144,8 +146,10 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
++ { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */
+ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
++ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
+ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
+ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
+ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+@@ -176,6 +180,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++ { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */
+ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 1bf23611be1221..22d01a0f10fbc2 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1033,9 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
+- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
+- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
+- { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) },
++ { USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) },
+ { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
+ { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
+ { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
+@@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ /* GMC devices */
++ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index e2099445db708f..5ee60ba2a73cdb 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1568,9 +1568,9 @@
+ #define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
+ #define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
+ #define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
+-#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
+-#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
+-#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
++#define ACTISENSE_UID_PID 0xD9AC /* USB Isolating Device */
++#define ACTISENSE_USA_PID 0xD9AD /* USB to Serial Adapter */
++#define ACTISENSE_NGX_PID 0xD9AE /* NGX NMEA2000 Gateway */
+ #define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
+ #define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
+ #define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
+@@ -1606,3 +1606,9 @@
+ #define UBLOX_VID 0x1546
+ #define UBLOX_C099F9P_ZED_PID 0x0502
+ #define UBLOX_C099F9P_ODIN_PID 0x0503
++
++/*
++ * GMC devices
++ */
++#define GMC_VID 0x1cd7
++#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 8b0308d84270f5..85697466b14768 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1737,6 +1737,49 @@ static void mos7840_port_remove(struct usb_serial_port *port)
+ kfree(mos7840_port);
+ }
+
++static int mos7840_suspend(struct usb_serial *serial, pm_message_t message)
++{
++ struct moschip_port *mos7840_port;
++ struct usb_serial_port *port;
++ int i;
++
++ for (i = 0; i < serial->num_ports; ++i) {
++ port = serial->port[i];
++ if (!tty_port_initialized(&port->port))
++ continue;
++
++ mos7840_port = usb_get_serial_port_data(port);
++
++ usb_kill_urb(mos7840_port->read_urb);
++ mos7840_port->read_urb_busy = false;
++ }
++
++ return 0;
++}
++
++static int mos7840_resume(struct usb_serial *serial)
++{
++ struct moschip_port *mos7840_port;
++ struct usb_serial_port *port;
++ int res;
++ int i;
++
++ for (i = 0; i < serial->num_ports; ++i) {
++ port = serial->port[i];
++ if (!tty_port_initialized(&port->port))
++ continue;
++
++ mos7840_port = usb_get_serial_port_data(port);
++
++ mos7840_port->read_urb_busy = true;
++ res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO);
++ if (res)
++ mos7840_port->read_urb_busy = false;
++ }
++
++ return 0;
++}
++
+ static struct usb_serial_driver moschip7840_4port_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+@@ -1764,6 +1807,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
+ .port_probe = mos7840_port_probe,
+ .port_remove = mos7840_port_remove,
+ .read_bulk_callback = mos7840_bulk_in_callback,
++ .suspend = mos7840_suspend,
++ .resume = mos7840_resume,
+ };
+
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 45dcfaadaf98eb..55886b64cadd83 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
+ #define DELL_PRODUCT_5829E_ESIM 0x81e4
+ #define DELL_PRODUCT_5829E 0x81e6
+
+-#define DELL_PRODUCT_FM101R 0x8213
+-#define DELL_PRODUCT_FM101R_ESIM 0x8215
++#define DELL_PRODUCT_FM101R_ESIM 0x8213
++#define DELL_PRODUCT_FM101R 0x8215
+
+ #define KYOCERA_VENDOR_ID 0x0c88
+ #define KYOCERA_PRODUCT_KPC650 0x17da
+@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EM061K_LMS 0x0124
+ #define QUECTEL_PRODUCT_EC25 0x0125
+ #define QUECTEL_PRODUCT_EM060K_128 0x0128
++#define QUECTEL_PRODUCT_EM060K_129 0x0129
++#define QUECTEL_PRODUCT_EM060K_12a 0x012a
++#define QUECTEL_PRODUCT_EM060K_12b 0x012b
++#define QUECTEL_PRODUCT_EM060K_12c 0x012c
+ #define QUECTEL_PRODUCT_EG91 0x0191
+ #define QUECTEL_PRODUCT_EG95 0x0195
+ #define QUECTEL_PRODUCT_BG96 0x0296
+@@ -272,8 +276,10 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_RM500Q 0x0800
+ #define QUECTEL_PRODUCT_RM520N 0x0801
+ #define QUECTEL_PRODUCT_EC200U 0x0901
++#define QUECTEL_PRODUCT_EG912Y 0x6001
+ #define QUECTEL_PRODUCT_EC200S_CN 0x6002
+ #define QUECTEL_PRODUCT_EC200A 0x6005
++#define QUECTEL_PRODUCT_EG916Q 0x6007
+ #define QUECTEL_PRODUCT_EM061K_LWW 0x6008
+ #define QUECTEL_PRODUCT_EM061K_LCN 0x6009
+ #define QUECTEL_PRODUCT_EC200T 0x6026
+@@ -609,6 +615,15 @@ static void option_instat_callback(struct urb *urb);
+ #define UNISOC_VENDOR_ID 0x1782
+ /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
+ #define TOZED_PRODUCT_LT70C 0x4055
++/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
++#define LUAT_PRODUCT_AIR720U 0x4e00
++
++/* MeiG Smart Technology products */
++#define MEIGSMART_VENDOR_ID 0x2dee
++/* MeiG Smart SRM825L based on Qualcomm 315 */
++#define MEIGSMART_PRODUCT_SRM825L 0x4d22
++/* MeiG Smart SLM320 based on UNISOC UIS8910 */
++#define MEIGSMART_PRODUCT_SLM320 0x4d41
+
+ /* Device flags */
+
+@@ -1210,6 +1225,18 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
+@@ -1230,6 +1257,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
+ .driver_info = RSVD(3) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ .driver_info = ZLP },
+@@ -1242,6 +1270,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG916Q, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+@@ -1350,6 +1380,18 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
++ .driver_info = RSVD(0) | NCTRL(3) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */
++ .driver_info = NCTRL(4) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
++ .driver_info = RSVD(0) | NCTRL(3) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */
++ .driver_info = NCTRL(4) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
++ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
++ .driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -1393,6 +1435,10 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
+ .driver_info = NCTRL(0) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3000, 0xff), /* Telit FN912 */
++ .driver_info = RSVD(0) | NCTRL(3) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3001, 0xff), /* Telit FN912 */
++ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
+@@ -1401,6 +1447,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
+ .driver_info = NCTRL(2) },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x9000, 0xff), /* Telit generic core-dump device */
++ .driver_info = NCTRL(0) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
+@@ -1546,7 +1594,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(4) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
++ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
+@@ -2041,6 +2090,10 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+ .driver_info = RSVD(4) },
++ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
++ .driver_info = RSVD(4) | RSVD(5) },
++ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
++ .driver_info = RSVD(0) | RSVD(4) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+@@ -2187,6 +2240,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7126, 0xff, 0x00, 0x00),
++ .driver_info = NCTRL(2) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00),
++ .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
+ .driver_info = RSVD(1) | RSVD(4) },
+@@ -2239,16 +2296,21 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
++ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff), /* Foxconn T99W265 MBIM variant */
++ .driver_info = RSVD(3) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
+ .driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */
+ .driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */
+ .driver_info = RSVD(3) },
++ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */
++ .driver_info = RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
+ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
+@@ -2258,19 +2320,65 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
++ .driver_info = RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
++ { USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
++ .driver_info = RSVD(4) | RSVD(5) },
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
++ .driver_info = RSVD(4) },
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
++ .driver_info = RSVD(5) },
++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff), /* Rolling RW350-GL (laptop MBIM) */
++ .driver_info = RSVD(5) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Global */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for Global SKU */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for China SKU */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for SA */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for EU */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for NA */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for China EDU */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Golbal EDU */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index d93f5d58455782..8e327fcb222f73 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
+ { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) },
++ { USB_DEVICE(MACROSILICON_VENDOR_ID, MACROSILICON_MS3020_PRODUCT_ID) },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 732f9b13ad5d59..d60eda7f6edaf8 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -171,3 +171,7 @@
+ /* Allied Telesis VT-Kit3 */
+ #define AT_VENDOR_ID 0x0caa
+ #define AT_VTKIT3_PRODUCT_ID 0x3001
++
++/* Macrosilicon MS3020 */
++#define MACROSILICON_VENDOR_ID 0x345f
++#define MACROSILICON_MS3020_PRODUCT_ID 0x3020
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index b1e844bf31f81f..703a9c56355731 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -184,6 +184,8 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
++ {DEVICE_SWI(0x413c, 0x8217)}, /* Dell Wireless DW5826e */
++ {DEVICE_SWI(0x413c, 0x8218)}, /* Dell Wireless DW5826e QDL */
+
+ /* Huawei devices */
+ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
+index 6934970f180d7f..5a8869cd95d523 100644
+--- a/drivers/usb/serial/usb_debug.c
++++ b/drivers/usb/serial/usb_debug.c
+@@ -76,6 +76,11 @@ static void usb_debug_process_read_urb(struct urb *urb)
+ usb_serial_generic_process_read_urb(urb);
+ }
+
++static void usb_debug_init_termios(struct tty_struct *tty)
++{
++ tty->termios.c_lflag &= ~(ECHO | ECHONL);
++}
++
+ static struct usb_serial_driver debug_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+@@ -85,6 +90,7 @@ static struct usb_serial_driver debug_device = {
+ .num_ports = 1,
+ .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
+ .break_ctl = usb_debug_break_ctl,
++ .init_termios = usb_debug_init_termios,
+ .process_read_urb = usb_debug_process_read_urb,
+ };
+
+@@ -96,6 +102,7 @@ static struct usb_serial_driver dbc_device = {
+ .id_table = dbc_id_table,
+ .num_ports = 1,
+ .break_ctl = usb_debug_break_ctl,
++ .init_termios = usb_debug_init_termios,
+ .process_read_urb = usb_debug_process_read_urb,
+ };
+
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
+index 115f05a6201a16..40d34cc28344a4 100644
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -105,6 +105,8 @@ struct alauda_info {
+ unsigned char sense_key;
+ unsigned long sense_asc; /* additional sense code */
+ unsigned long sense_ascq; /* additional sense code qualifier */
++
++ bool media_initialized;
+ };
+
+ #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
+@@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us)
+ }
+
+ /* Check for media change */
+- if (status[0] & 0x08) {
++ if (status[0] & 0x08 || !info->media_initialized) {
+ usb_stor_dbg(us, "Media change detected\n");
+ alauda_free_maps(&MEDIA_INFO(us));
+- alauda_init_media(us);
+-
++ rc = alauda_init_media(us);
++ if (rc == USB_STOR_TRANSPORT_GOOD)
++ info->media_initialized = true;
+ info->sense_key = UNIT_ATTENTION;
+ info->sense_asc = 0x28;
+ info->sense_ascq = 0x00;
+diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
+index 4e0eef1440b7fd..300aeef160e75c 100644
+--- a/drivers/usb/storage/isd200.c
++++ b/drivers/usb/storage/isd200.c
+@@ -1105,7 +1105,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id)
+ static int isd200_get_inquiry_data( struct us_data *us )
+ {
+ struct isd200_info *info = (struct isd200_info *)us->extra;
+- int retStatus = ISD200_GOOD;
++ int retStatus;
+ u16 *id = info->id;
+
+ usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n");
+@@ -1137,6 +1137,13 @@ static int isd200_get_inquiry_data( struct us_data *us )
+ isd200_fix_driveid(id);
+ isd200_dump_driveid(us, id);
+
++ /* Prevent division by 0 in isd200_scsi_to_ata() */
++ if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) {
++ usb_stor_dbg(us, " Invalid ATA Identify data\n");
++ retStatus = ISD200_ERROR;
++ goto Done;
++ }
++
+ memset(&info->InquiryData, 0, sizeof(info->InquiryData));
+
+ /* Standard IDE interface only supports disks */
+@@ -1202,6 +1209,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
+ }
+ }
+
++ Done:
+ usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus);
+
+ return(retStatus);
+@@ -1481,22 +1489,27 @@ static int isd200_init_info(struct us_data *us)
+
+ static int isd200_Initialization(struct us_data *us)
+ {
++ int rc = 0;
++
+ usb_stor_dbg(us, "ISD200 Initialization...\n");
+
+ /* Initialize ISD200 info struct */
+
+- if (isd200_init_info(us) == ISD200_ERROR) {
++ if (isd200_init_info(us) < 0) {
+ usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n");
++ rc = -ENOMEM;
+ } else {
+ /* Get device specific data */
+
+- if (isd200_get_inquiry_data(us) != ISD200_GOOD)
++ if (isd200_get_inquiry_data(us) != ISD200_GOOD) {
+ usb_stor_dbg(us, "ISD200 Initialization Failure\n");
+- else
++ rc = -EINVAL;
++ } else {
+ usb_stor_dbg(us, "ISD200 Initialization complete\n");
++ }
+ }
+
+- return 0;
++ return rc;
+ }
+
+
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index c54e9805da536a..12cf9940e5b675 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -179,6 +179,13 @@ static int slave_configure(struct scsi_device *sdev)
+ */
+ sdev->use_192_bytes_for_3f = 1;
+
++ /*
++ * Some devices report generic values until the media has been
++ * accessed. Force a READ(10) prior to querying device
++ * characteristics.
++ */
++ sdev->read_before_ms = 1;
++
+ /*
+ * Some devices don't like MODE SENSE with page=0x3f,
+ * which is the command used for checking if a device
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 2583ee9815c556..f794cb39cc3130 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -422,6 +422,7 @@ static void uas_data_cmplt(struct urb *urb)
+ uas_log_cmd_state(cmnd, "data cmplt err", status);
+ /* error: no data transfered */
+ scsi_set_resid(cmnd, sdb->length);
++ set_host_byte(cmnd, DID_ERROR);
+ } else {
+ scsi_set_resid(cmnd, sdb->length - urb->actual_length);
+ }
+@@ -533,7 +534,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
+ * daft to me.
+ */
+
+-static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
++static int uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
+ {
+ struct uas_dev_info *devinfo = cmnd->device->hostdata;
+ struct urb *urb;
+@@ -541,30 +542,28 @@ static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
+
+ urb = uas_alloc_sense_urb(devinfo, gfp, cmnd);
+ if (!urb)
+- return NULL;
++ return -ENOMEM;
+ usb_anchor_urb(urb, &devinfo->sense_urbs);
+ err = usb_submit_urb(urb, gfp);
+ if (err) {
+ usb_unanchor_urb(urb);
+ uas_log_cmd_state(cmnd, "sense submit err", err);
+ usb_free_urb(urb);
+- return NULL;
+ }
+- return urb;
++ return err;
+ }
+
+ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ struct uas_dev_info *devinfo)
+ {
+ struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
+- struct urb *urb;
+ int err;
+
+ lockdep_assert_held(&devinfo->lock);
+ if (cmdinfo->state & SUBMIT_STATUS_URB) {
+- urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
+- if (!urb)
+- return SCSI_MLQUEUE_DEVICE_BUSY;
++ err = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
++ if (err)
++ return err;
+ cmdinfo->state &= ~SUBMIT_STATUS_URB;
+ }
+
+@@ -572,7 +571,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
+ cmnd, DMA_FROM_DEVICE);
+ if (!cmdinfo->data_in_urb)
+- return SCSI_MLQUEUE_DEVICE_BUSY;
++ return -ENOMEM;
+ cmdinfo->state &= ~ALLOC_DATA_IN_URB;
+ }
+
+@@ -582,7 +581,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ if (err) {
+ usb_unanchor_urb(cmdinfo->data_in_urb);
+ uas_log_cmd_state(cmnd, "data in submit err", err);
+- return SCSI_MLQUEUE_DEVICE_BUSY;
++ return err;
+ }
+ cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
+ cmdinfo->state |= DATA_IN_URB_INFLIGHT;
+@@ -592,7 +591,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
+ cmnd, DMA_TO_DEVICE);
+ if (!cmdinfo->data_out_urb)
+- return SCSI_MLQUEUE_DEVICE_BUSY;
++ return -ENOMEM;
+ cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
+ }
+
+@@ -602,7 +601,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ if (err) {
+ usb_unanchor_urb(cmdinfo->data_out_urb);
+ uas_log_cmd_state(cmnd, "data out submit err", err);
+- return SCSI_MLQUEUE_DEVICE_BUSY;
++ return err;
+ }
+ cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
+ cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
+@@ -611,7 +610,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ if (cmdinfo->state & ALLOC_CMD_URB) {
+ cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd);
+ if (!cmdinfo->cmd_urb)
+- return SCSI_MLQUEUE_DEVICE_BUSY;
++ return -ENOMEM;
+ cmdinfo->state &= ~ALLOC_CMD_URB;
+ }
+
+@@ -621,7 +620,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ if (err) {
+ usb_unanchor_urb(cmdinfo->cmd_urb);
+ uas_log_cmd_state(cmnd, "cmd submit err", err);
+- return SCSI_MLQUEUE_DEVICE_BUSY;
++ return err;
+ }
+ cmdinfo->cmd_urb = NULL;
+ cmdinfo->state &= ~SUBMIT_CMD_URB;
+@@ -698,7 +697,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd)
+ * of queueing, no matter how fatal the error
+ */
+ if (err == -ENODEV) {
+- set_host_byte(cmnd, DID_ERROR);
++ set_host_byte(cmnd, DID_NO_CONNECT);
+ scsi_done(cmnd);
+ goto zombie;
+ }
+@@ -878,6 +877,13 @@ static int uas_slave_configure(struct scsi_device *sdev)
+ if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
+ sdev->guess_capacity = 1;
+
++ /*
++ * Some devices report generic values until the media has been
++ * accessed. Force a READ(10) prior to querying device
++ * characteristics.
++ */
++ sdev->read_before_ms = 1;
++
+ /*
+ * Some devices don't like MODE SENSE with page=0x3f,
+ * which is the command used for checking if a device
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 0547daf116a268..5df40759d77ad4 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -19,7 +19,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
+ "Cypress ISD-300LP",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 20dcbccb290b36..e5ad23d86833d5 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1305,6 +1305,17 @@ UNUSUAL_DEV( 0x090c, 0x6000, 0x0100, 0x0100,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_INITIAL_READ10 ),
+
++/*
++ * Patch by Tasos Sahanidis <tasos@tasossah.com>
++ * This flash drive always shows up with write protect enabled
++ * during the first mode sense.
++ */
++UNUSUAL_DEV(0x0951, 0x1697, 0x0100, 0x0100,
++ "Kingston",
++ "DT Ultimate G3",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_WP_DETECT),
++
+ /*
+ * This Pentax still camera is not conformant
+ * to the USB storage specification: -
+@@ -2412,6 +2423,17 @@ UNUSUAL_DEV( 0xc251, 0x4003, 0x0100, 0x0100,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NOT_LOCKABLE),
+
++/*
++ * Reported by Icenowy Zheng <uwu@icenowy.me>
++ * This is an interface for vendor-specific cryptic commands instead
++ * of real USB storage device.
++ */
++UNUSUAL_DEV( 0xe5b7, 0x0811, 0x0100, 0x0100,
++ "ZhuHai JieLi Technology",
++ "JieLi BR21",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_IGNORE_DEVICE),
++
+ /* Reported by Andrew Simmons <andrew.simmons@gmail.com> */
+ UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001,
+ "DataStor",
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 59e0218a8bc56f..ccff838ab89e12 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -554,16 +554,21 @@ static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char
+ }
+ static DEVICE_ATTR_RO(hpd);
+
+-static struct attribute *dp_altmode_attrs[] = {
++static struct attribute *displayport_attrs[] = {
+ &dev_attr_configuration.attr,
+ &dev_attr_pin_assignment.attr,
+ &dev_attr_hpd.attr,
+ NULL
+ };
+
+-static const struct attribute_group dp_altmode_group = {
++static const struct attribute_group displayport_group = {
+ .name = "displayport",
+- .attrs = dp_altmode_attrs,
++ .attrs = displayport_attrs,
++};
++
++static const struct attribute_group *displayport_groups[] = {
++ &displayport_group,
++ NULL,
+ };
+
+ int dp_altmode_probe(struct typec_altmode *alt)
+@@ -571,7 +576,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
+ const struct typec_altmode *port = typec_altmode_get_partner(alt);
+ struct fwnode_handle *fwnode;
+ struct dp_altmode *dp;
+- int ret;
+
+ /* FIXME: Port can only be DFP_U. */
+
+@@ -582,10 +586,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
+ DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
+ return -ENODEV;
+
+- ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
+- if (ret)
+- return ret;
+-
+ dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+@@ -619,7 +619,6 @@ void dp_altmode_remove(struct typec_altmode *alt)
+ {
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+
+- sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
+ cancel_work_sync(&dp->work);
+
+ if (dp->connector_fwnode) {
+@@ -644,6 +643,7 @@ static struct typec_altmode_driver dp_altmode_driver = {
+ .driver = {
+ .name = "typec_displayport",
+ .owner = THIS_MODULE,
++ .dev_groups = displayport_groups,
+ },
+ };
+ module_typec_altmode_driver(dp_altmode_driver);
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 9c1dbf3c00e0a7..f92fc2acfcba04 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -262,11 +262,13 @@ static void typec_altmode_put_partner(struct altmode *altmode)
+ {
+ struct altmode *partner = altmode->partner;
+ struct typec_altmode *adev;
++ struct typec_altmode *partner_adev;
+
+ if (!partner)
+ return;
+
+- adev = &partner->adev;
++ adev = &altmode->adev;
++ partner_adev = &partner->adev;
+
+ if (is_typec_plug(adev->dev.parent)) {
+ struct typec_plug *plug = to_typec_plug(adev->dev.parent);
+@@ -275,7 +277,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
+ } else {
+ partner->partner = NULL;
+ }
+- put_device(&adev->dev);
++ put_device(&partner_adev->dev);
+ }
+
+ /**
+@@ -496,7 +498,8 @@ static void typec_altmode_release(struct device *dev)
+ {
+ struct altmode *alt = to_altmode(to_typec_altmode(dev));
+
+- typec_altmode_put_partner(alt);
++ if (!is_typec_port(dev->parent))
++ typec_altmode_put_partner(alt);
+
+ altmode_id_remove(alt->adev.dev.parent, alt->id);
+ kfree(alt);
+@@ -1261,6 +1264,7 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
+ {
+ struct typec_port *port = to_typec_port(dev);
+ struct usb_power_delivery *pd;
++ int ret;
+
+ if (!port->ops || !port->ops->pd_set)
+ return -EOPNOTSUPP;
+@@ -1269,7 +1273,11 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
+ if (!pd)
+ return -EINVAL;
+
+- return port->ops->pd_set(port, pd);
++ ret = port->ops->pd_set(port, pd);
++ if (ret)
++ return ret;
++
++ return size;
+ }
+
+ static ssize_t select_usb_power_delivery_show(struct device *dev,
+diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c
+index cda206cf0c3876..596639dad31d7a 100644
+--- a/drivers/usb/typec/mux/nb7vpq904m.c
++++ b/drivers/usb/typec/mux/nb7vpq904m.c
+@@ -453,7 +453,7 @@ static int nb7vpq904m_probe(struct i2c_client *client)
+
+ ret = nb7vpq904m_parse_data_lanes_mapping(nb7);
+ if (ret)
+- return ret;
++ goto err_switch_put;
+
+ ret = regulator_enable(nb7->vcc_supply);
+ if (ret)
+@@ -496,6 +496,9 @@ static int nb7vpq904m_probe(struct i2c_client *client)
+ gpiod_set_value(nb7->enable_gpio, 0);
+ regulator_disable(nb7->vcc_supply);
+
++err_switch_put:
++ typec_switch_put(nb7->typec_switch);
++
+ return ret;
+ }
+
+@@ -509,6 +512,8 @@ static void nb7vpq904m_remove(struct i2c_client *client)
+ gpiod_set_value(nb7->enable_gpio, 0);
+
+ regulator_disable(nb7->vcc_supply);
++
++ typec_switch_put(nb7->typec_switch);
+ }
+
+ static const struct i2c_device_id nb7vpq904m_table[] = {
+diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+index a8f3f4d3a4509d..d6607491fcef48 100644
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+@@ -252,7 +252,6 @@ int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
+ val = TYPEC_CC_RP_DEF;
+ break;
+ }
+- val = TYPEC_CC_RP_DEF;
+ }
+
+ if (misc & CC_ORIENTATION)
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index 0ee3e6e29bb178..7118551827f6a2 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -889,6 +889,7 @@ MODULE_DEVICE_TABLE(i2c, tcpci_id);
+ #ifdef CONFIG_OF
+ static const struct of_device_id tcpci_of_match[] = {
+ { .compatible = "nxp,ptn5110", },
++ { .compatible = "tcpci", },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, tcpci_of_match);
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index d962f67c95ae66..e053b6e99b9e46 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1488,7 +1488,8 @@ static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
+ port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
+ port->partner_ident.product = product;
+
+- typec_partner_set_identity(port->partner);
++ if (port->partner)
++ typec_partner_set_identity(port->partner);
+
+ tcpm_log(port, "Identity: %04x:%04x.%04x",
+ PD_IDH_VID(vdo),
+@@ -1576,6 +1577,9 @@ static void tcpm_register_partner_altmodes(struct tcpm_port *port)
+ struct typec_altmode *altmode;
+ int i;
+
++ if (!port->partner)
++ return;
++
+ for (i = 0; i < modep->altmodes; i++) {
+ altmode = typec_partner_register_altmode(port->partner,
+ &modep->altmode_desc[i]);
+@@ -1625,6 +1629,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
+ if (PD_VDO_VID(p[0]) != USB_SID_PD)
+ break;
+
++ if (IS_ERR_OR_NULL(port->partner))
++ break;
++
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
+ typec_partner_set_svdm_version(port->partner,
+ PD_VDO_SVDM_VER(p[0]));
+@@ -2396,7 +2403,7 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
+ {
+ struct usb_power_delivery_desc desc = { port->negotiated_rev };
+ struct usb_power_delivery_capabilities_desc caps = { };
+- struct usb_power_delivery_capabilities *cap;
++ struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
+
+ if (!port->partner_pd)
+ port->partner_pd = usb_power_delivery_register(NULL, &desc);
+@@ -2406,6 +2413,11 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
+ memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
+ caps.role = TYPEC_SOURCE;
+
++ if (cap) {
++ usb_power_delivery_unregister_capabilities(cap);
++ port->partner_source_caps = NULL;
++ }
++
+ cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
+ if (IS_ERR(cap))
+ return PTR_ERR(cap);
+@@ -3568,7 +3580,10 @@ static int tcpm_init_vconn(struct tcpm_port *port)
+
+ static void tcpm_typec_connect(struct tcpm_port *port)
+ {
++ struct typec_partner *partner;
++
+ if (!port->connected) {
++ port->connected = true;
+ /* Make sure we don't report stale identity information */
+ memset(&port->partner_ident, 0, sizeof(port->partner_ident));
+ port->partner_desc.usb_pd = port->pd_capable;
+@@ -3578,9 +3593,13 @@ static void tcpm_typec_connect(struct tcpm_port *port)
+ port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
+ else
+ port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
+- port->partner = typec_register_partner(port->typec_port,
+- &port->partner_desc);
+- port->connected = true;
++ partner = typec_register_partner(port->typec_port, &port->partner_desc);
++ if (IS_ERR(partner)) {
++ dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
++ return;
++ }
++
++ port->partner = partner;
+ typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
+ }
+ }
+@@ -3650,9 +3669,11 @@ static int tcpm_src_attach(struct tcpm_port *port)
+ static void tcpm_typec_disconnect(struct tcpm_port *port)
+ {
+ if (port->connected) {
+- typec_partner_set_usb_power_delivery(port->partner, NULL);
+- typec_unregister_partner(port->partner);
+- port->partner = NULL;
++ if (port->partner) {
++ typec_partner_set_usb_power_delivery(port->partner, NULL);
++ typec_unregister_partner(port->partner);
++ port->partner = NULL;
++ }
+ port->connected = false;
+ }
+ }
+@@ -3727,9 +3748,6 @@ static void tcpm_detach(struct tcpm_port *port)
+ if (tcpm_port_is_disconnected(port))
+ port->hard_reset_count = 0;
+
+- port->try_src_count = 0;
+- port->try_snk_count = 0;
+-
+ if (!port->attached)
+ return;
+
+@@ -3871,6 +3889,9 @@ static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
+
+ static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
+ {
++ if (!port->partner)
++ return;
++
+ switch (port->negotiated_rev) {
+ case PD_REV30:
+ break;
+@@ -3903,6 +3924,8 @@ static void run_state_machine(struct tcpm_port *port)
+ port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
+ port->state == SRC_UNATTACHED) ||
+ (port->enter_state == SNK_ATTACH_WAIT &&
++ port->state == SNK_UNATTACHED) ||
++ (port->enter_state == SNK_DEBOUNCED &&
+ port->state == SNK_UNATTACHED));
+
+ port->enter_state = port->state;
+@@ -4268,7 +4291,8 @@ static void run_state_machine(struct tcpm_port *port)
+ current_lim = PD_P_SNK_STDBY_MW / 5;
+ tcpm_set_current_limit(port, current_lim, 5000);
+ /* Not sink vbus if operational current is 0mA */
+- tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
++ tcpm_set_charge(port, !port->pd_supported ||
++ pdo_max_current(port->snk_pdo[0]));
+
+ if (!port->pd_supported)
+ tcpm_set_state(port, SNK_READY, 0);
+@@ -4856,8 +4880,11 @@ static void run_state_machine(struct tcpm_port *port)
+ break;
+ case PORT_RESET:
+ tcpm_reset_port(port);
+- tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
+- TYPEC_CC_RD : tcpm_rp_cc(port));
++ if (port->self_powered)
++ tcpm_set_cc(port, TYPEC_CC_OPEN);
++ else
++ tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
++ TYPEC_CC_RD : tcpm_rp_cc(port));
+ tcpm_set_state(port, PORT_RESET_WAIT_OFF,
+ PD_T_ERROR_RECOVERY);
+ break;
+@@ -5386,6 +5413,16 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
+ if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
+ port->tcpc->set_bist_data(port->tcpc, false);
+
++ switch (port->state) {
++ case TOGGLING:
++ case ERROR_RECOVERY:
++ case PORT_RESET:
++ case PORT_RESET_WAIT_OFF:
++ return;
++ default:
++ break;
++ }
++
+ if (port->ams != NONE_AMS)
+ port->ams = NONE_AMS;
+ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
+diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
+index 37b56ce75f39d5..01db27cbf1d10d 100644
+--- a/drivers/usb/typec/tipd/core.c
++++ b/drivers/usb/typec/tipd/core.c
+@@ -26,6 +26,7 @@
+ #define TPS_REG_MODE 0x03
+ #define TPS_REG_CMD1 0x08
+ #define TPS_REG_DATA1 0x09
++#define TPS_REG_VERSION 0x0F
+ #define TPS_REG_INT_EVENT1 0x14
+ #define TPS_REG_INT_EVENT2 0x15
+ #define TPS_REG_INT_MASK1 0x16
+@@ -515,49 +516,67 @@ static irqreturn_t cd321x_interrupt(int irq, void *data)
+
+ static irqreturn_t tps6598x_interrupt(int irq, void *data)
+ {
++ int intev_len = TPS_65981_2_6_INTEVENT_LEN;
+ struct tps6598x *tps = data;
+- u64 event1 = 0;
+- u64 event2 = 0;
++ u64 event1[2] = { };
++ u64 event2[2] = { };
++ u32 version;
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+- ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1);
+- ret |= tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2);
++ ret = tps6598x_read32(tps, TPS_REG_VERSION, &version);
++ if (ret)
++ dev_warn(tps->dev, "%s: failed to read version (%d)\n",
++ __func__, ret);
++
++ if (TPS_VERSION_HW_VERSION(version) == TPS_VERSION_HW_65987_8_DH ||
++ TPS_VERSION_HW_VERSION(version) == TPS_VERSION_HW_65987_8_DK)
++ intev_len = TPS_65987_8_INTEVENT_LEN;
++
++ ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event1, intev_len);
++
++ ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event1, intev_len);
+ if (ret) {
+- dev_err(tps->dev, "%s: failed to read events\n", __func__);
++ dev_err(tps->dev, "%s: failed to read event1\n", __func__);
+ goto err_unlock;
+ }
+- trace_tps6598x_irq(event1, event2);
++ ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT2, event2, intev_len);
++ if (ret) {
++ dev_err(tps->dev, "%s: failed to read event2\n", __func__);
++ goto err_unlock;
++ }
++ trace_tps6598x_irq(event1[0], event2[0]);
+
+- if (!(event1 | event2))
++ if (!(event1[0] | event1[1] | event2[0] | event2[1]))
+ goto err_unlock;
+
+ if (!tps6598x_read_status(tps, &status))
+ goto err_clear_ints;
+
+- if ((event1 | event2) & TPS_REG_INT_POWER_STATUS_UPDATE)
++ if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE)
+ if (!tps6598x_read_power_status(tps))
+ goto err_clear_ints;
+
+- if ((event1 | event2) & TPS_REG_INT_DATA_STATUS_UPDATE)
++ if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE)
+ if (!tps6598x_read_data_status(tps))
+ goto err_clear_ints;
+
+ /* Handle plug insert or removal */
+- if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT)
++ if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT)
+ tps6598x_handle_plug_event(tps, status);
+
+ err_clear_ints:
+- tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event1);
+- tps6598x_write64(tps, TPS_REG_INT_CLEAR2, event2);
++ tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
++ tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
+
+ err_unlock:
+ mutex_unlock(&tps->lock);
+
+- if (event1 | event2)
++ if (event1[0] | event1[1] | event2[0] | event2[1])
+ return IRQ_HANDLED;
++
+ return IRQ_NONE;
+ }
+
+@@ -888,6 +907,8 @@ static void tps6598x_remove(struct i2c_client *client)
+
+ if (!client->irq)
+ cancel_delayed_work_sync(&tps->wq_poll);
++ else
++ devm_free_irq(tps->dev, client->irq, tps);
+
+ tps6598x_disconnect(tps, 0);
+ typec_unregister_port(tps->port);
+diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h
+index 527857549d699e..1fc3cc8ad199a9 100644
+--- a/drivers/usb/typec/tipd/tps6598x.h
++++ b/drivers/usb/typec/tipd/tps6598x.h
+@@ -199,4 +199,15 @@
+ #define TPS_DATA_STATUS_DP_SPEC_PIN_ASSIGNMENT_A BIT(2)
+ #define TPS_DATA_STATUS_DP_SPEC_PIN_ASSIGNMENT_B (BIT(2) | BIT(1))
+
++/* Version Register */
++#define TPS_VERSION_HW_VERSION_MASK GENMASK(31, 24)
++#define TPS_VERSION_HW_VERSION(x) TPS_FIELD_GET(TPS_VERSION_HW_VERSION_MASK, (x))
++#define TPS_VERSION_HW_65981_2_6 0x00
++#define TPS_VERSION_HW_65987_8_DH 0xF7
++#define TPS_VERSION_HW_65987_8_DK 0xF9
++
++/* Int Event Register length */
++#define TPS_65981_2_6_INTEVENT_LEN 8
++#define TPS_65987_8_INTEVENT_LEN 11
++
+ #endif /* __TPS6598X_H__ */
+diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
+index 73cd5bf350472f..2431febc461516 100644
+--- a/drivers/usb/typec/ucsi/displayport.c
++++ b/drivers/usb/typec/ucsi/displayport.c
+@@ -275,8 +275,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+ struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work);
+ int ret;
+
+- mutex_lock(&dp->con->lock);
+-
+ ret = typec_altmode_vdm(dp->alt, dp->header,
+ dp->vdo_data, dp->vdo_size);
+ if (ret)
+@@ -285,8 +283,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+ dp->vdo_data = NULL;
+ dp->vdo_size = 0;
+ dp->header = 0;
+-
+- mutex_unlock(&dp->con->lock);
+ }
+
+ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 61b64558f96c57..f6fb5575d4f0ac 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -36,22 +36,29 @@
+ */
+ #define UCSI_SWAP_TIMEOUT_MS 5000
+
+-static int ucsi_acknowledge_command(struct ucsi *ucsi)
++static int ucsi_read_message_in(struct ucsi *ucsi, void *buf,
++ size_t buf_size)
+ {
+- u64 ctrl;
+-
+- ctrl = UCSI_ACK_CC_CI;
+- ctrl |= UCSI_ACK_COMMAND_COMPLETE;
++ /*
++ * Below UCSI 2.0, MESSAGE_IN was limited to 16 bytes. Truncate the
++ * reads here.
++ */
++ if (ucsi->version <= UCSI_VERSION_1_2)
++ buf_size = clamp(buf_size, 0, 16);
+
+- return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
++ return ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, buf, buf_size);
+ }
+
+-static int ucsi_acknowledge_connector_change(struct ucsi *ucsi)
++static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
+ {
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+- ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
++ ctrl |= UCSI_ACK_COMMAND_COMPLETE;
++ if (conn_ack) {
++ clear_bit(EVENT_PENDING, &ucsi->flags);
++ ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
++ }
+
+ return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+ }
+@@ -64,7 +71,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
+ int ret;
+
+ /* Acknowledge the command that failed */
+- ret = ucsi_acknowledge_command(ucsi);
++ ret = ucsi_acknowledge(ucsi, false);
+ if (ret)
+ return ret;
+
+@@ -72,11 +79,11 @@ static int ucsi_read_error(struct ucsi *ucsi)
+ if (ret < 0)
+ return ret;
+
+- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
++ ret = ucsi_read_message_in(ucsi, &error, sizeof(error));
+ if (ret)
+ return ret;
+
+- ret = ucsi_acknowledge_command(ucsi);
++ ret = ucsi_acknowledge(ucsi, false);
+ if (ret)
+ return ret;
+
+@@ -138,25 +145,34 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
+ if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
+ return -EIO;
+
+- if (cci & UCSI_CCI_NOT_SUPPORTED)
++ if (cci & UCSI_CCI_NOT_SUPPORTED) {
++ if (ucsi_acknowledge(ucsi, false) < 0)
++ dev_err(ucsi->dev,
++ "ACK of unsupported command failed\n");
+ return -EOPNOTSUPP;
++ }
+
+ if (cci & UCSI_CCI_ERROR) {
+- if (cmd == UCSI_GET_ERROR_STATUS)
++ if (cmd == UCSI_GET_ERROR_STATUS) {
++ ret = ucsi_acknowledge(ucsi, false);
++ if (ret)
++ return ret;
++
+ return -EIO;
++ }
+ return ucsi_read_error(ucsi);
+ }
+
+ if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) {
+- ret = ucsi_acknowledge_command(ucsi);
++ ret = ucsi_acknowledge(ucsi, false);
+ return ret ? ret : -EBUSY;
+ }
+
+ return UCSI_CCI_LENGTH(cci);
+ }
+
+-int ucsi_send_command(struct ucsi *ucsi, u64 command,
+- void *data, size_t size)
++static int ucsi_send_command_common(struct ucsi *ucsi, u64 command,
++ void *data, size_t size, bool conn_ack)
+ {
+ u8 length;
+ int ret;
+@@ -170,12 +186,12 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ length = ret;
+
+ if (data) {
+- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
++ ret = ucsi_read_message_in(ucsi, data, size);
+ if (ret)
+ goto out;
+ }
+
+- ret = ucsi_acknowledge_command(ucsi);
++ ret = ucsi_acknowledge(ucsi, conn_ack);
+ if (ret)
+ goto out;
+
+@@ -184,6 +200,12 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ mutex_unlock(&ucsi->ppm_lock);
+ return ret;
+ }
++
++int ucsi_send_command(struct ucsi *ucsi, u64 command,
++ void *data, size_t size)
++{
++ return ucsi_send_command_common(ucsi, command, data, size, false);
++}
+ EXPORT_SYMBOL_GPL(ucsi_send_command);
+
+ /* -------------------------------------------------------------------------- */
+@@ -677,12 +699,6 @@ static int ucsi_register_partner_pdos(struct ucsi_connector *con)
+ return PTR_ERR(cap);
+
+ con->partner_source_caps = cap;
+-
+- ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+- if (ret) {
+- usb_power_delivery_unregister_capabilities(con->partner_source_caps);
+- return ret;
+- }
+ }
+
+ ret = ucsi_get_pdos(con, TYPEC_SINK, 1, caps.pdo);
+@@ -697,15 +713,9 @@ static int ucsi_register_partner_pdos(struct ucsi_connector *con)
+ return PTR_ERR(cap);
+
+ con->partner_sink_caps = cap;
+-
+- ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+- if (ret) {
+- usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
+- return ret;
+- }
+ }
+
+- return 0;
++ return typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+ }
+
+ static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
+@@ -881,7 +891,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ mutex_lock(&con->lock);
+
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+- ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
++
++ ret = ucsi_send_command_common(ucsi, command, &con->status,
++ sizeof(con->status), true);
+ if (ret < 0) {
+ dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
+ __func__, ret);
+@@ -933,12 +945,6 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
+ ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
+
+- clear_bit(EVENT_PENDING, &con->ucsi->flags);
+-
+- ret = ucsi_acknowledge_connector_change(ucsi);
+- if (ret)
+- dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+-
+ out_unlock:
+ mutex_unlock(&con->lock);
+ }
+@@ -953,7 +959,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
+ struct ucsi_connector *con = &ucsi->connector[num - 1];
+
+ if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) {
+- dev_dbg(ucsi->dev, "Bogus connector change event\n");
++ dev_dbg(ucsi->dev, "Early connector change event\n");
+ return;
+ }
+
+@@ -976,13 +982,47 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
+
+ static int ucsi_reset_ppm(struct ucsi *ucsi)
+ {
+- u64 command = UCSI_PPM_RESET;
++ u64 command;
+ unsigned long tmo;
+ u32 cci;
+ int ret;
+
+ mutex_lock(&ucsi->ppm_lock);
+
++ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
++ if (ret < 0)
++ goto out;
++
++ /*
++ * If UCSI_CCI_RESET_COMPLETE is already set we must clear
++ * the flag before we start another reset. Send a
++ * UCSI_SET_NOTIFICATION_ENABLE command to achieve this.
++ * Ignore a timeout and try the reset anyway if this fails.
++ */
++ if (cci & UCSI_CCI_RESET_COMPLETE) {
++ command = UCSI_SET_NOTIFICATION_ENABLE;
++ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
++ sizeof(command));
++ if (ret < 0)
++ goto out;
++
++ tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
++ do {
++ ret = ucsi->ops->read(ucsi, UCSI_CCI,
++ &cci, sizeof(cci));
++ if (ret < 0)
++ goto out;
++ if (cci & UCSI_CCI_COMMAND_COMPLETE)
++ break;
++ if (time_is_before_jiffies(tmo))
++ break;
++ msleep(20);
++ } while (1);
++
++ WARN_ON(cci & UCSI_CCI_RESET_COMPLETE);
++ }
++
++ command = UCSI_PPM_RESET;
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ sizeof(command));
+ if (ret < 0)
+@@ -1244,7 +1284,6 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
+ }
+
+ con->port_source_caps = pd_cap;
+- typec_port_set_usb_power_delivery(con->port, con->pd);
+ }
+
+ memset(&pd_caps, 0, sizeof(pd_caps));
+@@ -1261,9 +1300,10 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
+ }
+
+ con->port_sink_caps = pd_cap;
+- typec_port_set_usb_power_delivery(con->port, con->pd);
+ }
+
++ typec_port_set_usb_power_delivery(con->port, con->pd);
++
+ /* Alternate modes */
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
+ if (ret) {
+@@ -1350,6 +1390,7 @@ static int ucsi_init(struct ucsi *ucsi)
+ {
+ struct ucsi_connector *con, *connector;
+ u64 command, ntfy;
++ u32 cci;
+ int ret;
+ int i;
+
+@@ -1402,6 +1443,15 @@ static int ucsi_init(struct ucsi *ucsi)
+
+ ucsi->connector = connector;
+ ucsi->ntfy = ntfy;
++
++ mutex_lock(&ucsi->ppm_lock);
++ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
++ mutex_unlock(&ucsi->ppm_lock);
++ if (ret)
++ return ret;
++ if (UCSI_CCI_CONNECTOR(cci))
++ ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
++
+ return 0;
+
+ err_unregister:
+@@ -1553,6 +1603,15 @@ int ucsi_register(struct ucsi *ucsi)
+ if (!ucsi->version)
+ return -ENODEV;
+
++ /*
++ * Version format is JJ.M.N (JJ = Major version, M = Minor version,
++ * N = sub-minor version).
++ */
++ dev_dbg(ucsi->dev, "Registered UCSI interface with version %x.%x.%x",
++ UCSI_BCD_GET_MAJOR(ucsi->version),
++ UCSI_BCD_GET_MINOR(ucsi->version),
++ UCSI_BCD_GET_SUBMINOR(ucsi->version));
++
+ queue_delayed_work(system_long_wq, &ucsi->work, 0);
+
+ ucsi_debugfs_register(ucsi);
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 474315a72c7707..42c60eba5fb6ee 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -23,6 +23,17 @@ struct dentry;
+ #define UCSI_CONTROL 8
+ #define UCSI_MESSAGE_IN 16
+ #define UCSI_MESSAGE_OUT 32
++#define UCSIv2_MESSAGE_OUT 272
++
++/* UCSI versions */
++#define UCSI_VERSION_1_2 0x0120
++#define UCSI_VERSION_2_0 0x0200
++#define UCSI_VERSION_2_1 0x0210
++#define UCSI_VERSION_3_0 0x0300
++
++#define UCSI_BCD_GET_MAJOR(_v_) (((_v_) >> 8) & 0xFF)
++#define UCSI_BCD_GET_MINOR(_v_) (((_v_) >> 4) & 0x0F)
++#define UCSI_BCD_GET_SUBMINOR(_v_) ((_v_) & 0x0F)
+
+ /* Command Status and Connector Change Indication (CCI) bits */
+ #define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 1)) >> 1)
+@@ -221,12 +232,12 @@ struct ucsi_cable_property {
+ #define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
+ #define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
+ #define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
+-#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
++#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3)
+ #define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
+ #define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
+ #define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
+ #define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
+-#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
++#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5)
+ u8 latency;
+ } __packed;
+
+@@ -393,7 +404,7 @@ ucsi_register_displayport(struct ucsi_connector *con,
+ bool override, int offset,
+ struct typec_altmode_desc *desc)
+ {
+- return NULL;
++ return typec_port_register_altmode(con->port, desc);
+ }
+
+ static inline void
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index 6bbf490ac4010e..7b3ac133ef8618 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -23,6 +23,9 @@ struct ucsi_acpi {
+ void *base;
+ struct completion complete;
+ unsigned long flags;
++#define UCSI_ACPI_SUPPRESS_EVENT 0
++#define UCSI_ACPI_COMMAND_PENDING 1
++#define UCSI_ACPI_ACK_PENDING 2
+ guid_t guid;
+ u64 cmd;
+ };
+@@ -73,9 +76,13 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+ {
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
++ bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
+ int ret;
+
+- set_bit(COMMAND_PENDING, &ua->flags);
++ if (ack)
++ set_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
++ else
++ set_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
+
+ ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
+ if (ret)
+@@ -85,7 +92,10 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ ret = -ETIMEDOUT;
+
+ out_clear_bit:
+- clear_bit(COMMAND_PENDING, &ua->flags);
++ if (ack)
++ clear_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
++ else
++ clear_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
+
+ return ret;
+ }
+@@ -119,12 +129,62 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
+ .async_write = ucsi_acpi_async_write
+ };
+
+-static const struct dmi_system_id zenbook_dmi_id[] = {
++/*
++ * Some Dell laptops don't like ACK commands with the
++ * UCSI_ACK_CONNECTOR_CHANGE but not the UCSI_ACK_COMMAND_COMPLETE
++ * bit set. To work around this send a dummy command and bundle the
++ * UCSI_ACK_CONNECTOR_CHANGE with the UCSI_ACK_COMMAND_COMPLETE
++ * for the dummy command.
++ */
++static int
++ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
++ const void *val, size_t val_len)
++{
++ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
++ u64 cmd = *(u64 *)val;
++ u64 dummycmd = UCSI_GET_CAPABILITY;
++ int ret;
++
++ if (cmd == (UCSI_ACK_CC_CI | UCSI_ACK_CONNECTOR_CHANGE)) {
++ cmd |= UCSI_ACK_COMMAND_COMPLETE;
++
++ /*
++ * The UCSI core thinks it is sending a connector change ack
++ * and will accept new connector change events. We don't want
++ * this to happen for the dummy command as its response will
++ * still report the very event that the core is trying to clear.
++ */
++ set_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
++ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &dummycmd,
++ sizeof(dummycmd));
++ clear_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
++
++ if (ret < 0)
++ return ret;
++ }
++
++ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
++}
++
++static const struct ucsi_operations ucsi_dell_ops = {
++ .read = ucsi_acpi_read,
++ .sync_write = ucsi_dell_sync_write,
++ .async_write = ucsi_acpi_async_write
++};
++
++static const struct dmi_system_id ucsi_acpi_quirks[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
++ .driver_data = (void *)&ucsi_zenbook_ops,
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ },
++ .driver_data = (void *)&ucsi_dell_ops,
+ },
+ { }
+ };
+@@ -139,11 +199,14 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
+ if (ret)
+ return;
+
+- if (UCSI_CCI_CONNECTOR(cci))
++ if (UCSI_CCI_CONNECTOR(cci) &&
++ !test_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags))
+ ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
+
+- if (test_bit(COMMAND_PENDING, &ua->flags) &&
+- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
++ if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
++ complete(&ua->complete);
++ if (cci & UCSI_CCI_COMMAND_COMPLETE &&
++ test_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags))
+ complete(&ua->complete);
+ }
+
+@@ -151,6 +214,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
+ {
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ const struct ucsi_operations *ops = &ucsi_acpi_ops;
++ const struct dmi_system_id *id;
+ struct ucsi_acpi *ua;
+ struct resource *res;
+ acpi_status status;
+@@ -180,8 +244,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
+ init_completion(&ua->complete);
+ ua->dev = &pdev->dev;
+
+- if (dmi_check_system(zenbook_dmi_id))
+- ops = &ucsi_zenbook_ops;
++ id = dmi_first_match(ucsi_acpi_quirks);
++ if (id)
++ ops = id->driver_data;
+
+ ua->ucsi = ucsi_create(&pdev->dev, ops);
+ if (IS_ERR(ua->ucsi))
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index bb1854b3311dc7..94f2df02f06eeb 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -8,9 +8,13 @@
+ #include <linux/mutex.h>
+ #include <linux/property.h>
+ #include <linux/soc/qcom/pdr.h>
++#include <linux/usb/typec_mux.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/soc/qcom/pmic_glink.h>
+ #include "ucsi.h"
+
++#define PMIC_GLINK_MAX_PORTS 2
++
+ #define UCSI_BUF_SIZE 48
+
+ #define MSG_TYPE_REQ_RESP 1
+@@ -52,6 +56,9 @@ struct ucsi_notify_ind_msg {
+ struct pmic_glink_ucsi {
+ struct device *dev;
+
++ struct gpio_desc *port_orientation[PMIC_GLINK_MAX_PORTS];
++ struct typec_switch *port_switch[PMIC_GLINK_MAX_PORTS];
++
+ struct pmic_glink_client *client;
+
+ struct ucsi *ucsi;
+@@ -168,7 +175,8 @@ static int pmic_glink_ucsi_sync_write(struct ucsi *__ucsi, unsigned int offset,
+ left = wait_for_completion_timeout(&ucsi->sync_ack, 5 * HZ);
+ if (!left) {
+ dev_err(ucsi->dev, "timeout waiting for UCSI sync write response\n");
+- ret = -ETIMEDOUT;
++ /* return 0 here and let core UCSI code handle the CCI_BUSY */
++ ret = 0;
+ } else if (ucsi->sync_val) {
+ dev_err(ucsi->dev, "sync write returned: %d\n", ucsi->sync_val);
+ }
+@@ -220,13 +228,22 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
+ }
+
+ con_num = UCSI_CCI_CONNECTOR(cci);
+- if (con_num)
++ if (con_num) {
++ if (con_num <= PMIC_GLINK_MAX_PORTS &&
++ ucsi->port_orientation[con_num - 1]) {
++ int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]);
++
++ if (orientation >= 0) {
++ typec_switch_set(ucsi->port_switch[con_num - 1],
++ orientation ? TYPEC_ORIENTATION_REVERSE
++ : TYPEC_ORIENTATION_NORMAL);
++ }
++ }
++
+ ucsi_connector_change(ucsi->ucsi, con_num);
++ }
+
+- if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
+- ucsi->sync_val = -EBUSY;
+- complete(&ucsi->sync_ack);
+- } else if (ucsi->sync_pending &&
++ if (ucsi->sync_pending &&
+ (cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))) {
+ complete(&ucsi->sync_ack);
+ }
+@@ -235,6 +252,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
+ static void pmic_glink_ucsi_register(struct work_struct *work)
+ {
+ struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);
++ int orientation;
++ int i;
++
++ for (i = 0; i < PMIC_GLINK_MAX_PORTS; i++) {
++ if (!ucsi->port_orientation[i])
++ continue;
++ orientation = gpiod_get_value(ucsi->port_orientation[i]);
++
++ if (orientation >= 0) {
++ typec_switch_set(ucsi->port_switch[i],
++ orientation ? TYPEC_ORIENTATION_REVERSE
++ : TYPEC_ORIENTATION_NORMAL);
++ }
++ }
+
+ ucsi_register(ucsi->ucsi);
+ }
+@@ -282,6 +313,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+ {
+ struct pmic_glink_ucsi *ucsi;
+ struct device *dev = &adev->dev;
++ struct fwnode_handle *fwnode;
+ int ret;
+
+ ucsi = devm_kzalloc(dev, sizeof(*ucsi), GFP_KERNEL);
+@@ -309,12 +341,51 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+
+ ucsi_set_drvdata(ucsi->ucsi, ucsi);
+
+- ucsi->client = devm_pmic_glink_register_client(dev,
+- PMIC_GLINK_OWNER_USBC,
+- pmic_glink_ucsi_callback,
+- pmic_glink_ucsi_pdr_notify,
+- ucsi);
+- return PTR_ERR_OR_ZERO(ucsi->client);
++ device_for_each_child_node(dev, fwnode) {
++ struct gpio_desc *desc;
++ u32 port;
++
++ ret = fwnode_property_read_u32(fwnode, "reg", &port);
++ if (ret < 0) {
++ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
++ fwnode_handle_put(fwnode);
++ return ret;
++ }
++
++ if (port >= PMIC_GLINK_MAX_PORTS) {
++ dev_warn(dev, "invalid connector number, ignoring\n");
++ continue;
++ }
++
++ desc = devm_gpiod_get_index_optional(&adev->dev, "orientation", port, GPIOD_IN);
++
++ /* If GPIO isn't found, continue */
++ if (!desc)
++ continue;
++
++ if (IS_ERR(desc)) {
++ fwnode_handle_put(fwnode);
++ return dev_err_probe(dev, PTR_ERR(desc),
++ "unable to acquire orientation gpio\n");
++ }
++ ucsi->port_orientation[port] = desc;
++
++ ucsi->port_switch[port] = fwnode_typec_switch_get(fwnode);
++ if (IS_ERR(ucsi->port_switch[port]))
++ return dev_err_probe(dev, PTR_ERR(ucsi->port_switch[port]),
++ "failed to acquire orientation-switch\n");
++ }
++
++ ucsi->client = devm_pmic_glink_client_alloc(dev, PMIC_GLINK_OWNER_USBC,
++ pmic_glink_ucsi_callback,
++ pmic_glink_ucsi_pdr_notify,
++ ucsi);
++ if (IS_ERR(ucsi->client))
++ return PTR_ERR(ucsi->client);
++
++ pmic_glink_client_register(ucsi->client);
++
++ return 0;
+ }
+
+ static void pmic_glink_ucsi_remove(struct auxiliary_device *adev)
+diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+index 93d7806681cf01..1d7ee833eb4fd3 100644
+--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
++++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+@@ -64,6 +64,7 @@ struct ucsi_stm32g0 {
+ struct completion complete;
+ struct device *dev;
+ unsigned long flags;
++#define ACK_PENDING 2
+ const char *fw_name;
+ struct ucsi *ucsi;
+ bool suspended;
+@@ -395,9 +396,13 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
+ size_t len)
+ {
+ struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
++ bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
+ int ret;
+
+- set_bit(COMMAND_PENDING, &g0->flags);
++ if (ack)
++ set_bit(ACK_PENDING, &g0->flags);
++ else
++ set_bit(COMMAND_PENDING, &g0->flags);
+
+ ret = ucsi_stm32g0_async_write(ucsi, offset, val, len);
+ if (ret)
+@@ -405,9 +410,14 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
+
+ if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
++ else
++ return 0;
+
+ out_clear_bit:
+- clear_bit(COMMAND_PENDING, &g0->flags);
++ if (ack)
++ clear_bit(ACK_PENDING, &g0->flags);
++ else
++ clear_bit(COMMAND_PENDING, &g0->flags);
+
+ return ret;
+ }
+@@ -428,8 +438,9 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci));
+
+- if (test_bit(COMMAND_PENDING, &g0->flags) &&
+- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
++ if (cci & UCSI_CCI_ACK_COMPLETE && test_and_clear_bit(ACK_PENDING, &g0->flags))
++ complete(&g0->complete);
++ if (cci & UCSI_CCI_COMMAND_COMPLETE && test_and_clear_bit(COMMAND_PENDING, &g0->flags))
+ complete(&g0->complete);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index 9c6954aad6c882..ce625b1ce9a512 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -464,8 +464,13 @@ static void stub_disconnect(struct usb_device *udev)
+ /* release port */
+ rc = usb_hub_release_port(udev->parent, udev->portnum,
+ (struct usb_dev_state *) udev);
+- if (rc) {
+- dev_dbg(&udev->dev, "unable to release port\n");
++ /*
++ * NOTE: If a HUB disconnect triggered disconnect of the down stream
++ * device usb_hub_release_port will return -ENODEV so we can safely ignore
++ * that error here.
++ */
++ if (rc && (rc != -ENODEV)) {
++ dev_dbg(&udev->dev, "unable to release port (%i)\n", rc);
+ return;
+ }
+
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
+index fc01b31bbb875d..6338d818bc8bc9 100644
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -144,53 +144,62 @@ static int tweak_set_configuration_cmd(struct urb *urb)
+ if (err && err != -ENODEV)
+ dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
+ config, err);
+- return 0;
++ return err;
+ }
+
+ static int tweak_reset_device_cmd(struct urb *urb)
+ {
+ struct stub_priv *priv = (struct stub_priv *) urb->context;
+ struct stub_device *sdev = priv->sdev;
++ int err;
+
+ dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
+
+- if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
++ err = usb_lock_device_for_reset(sdev->udev, NULL);
++ if (err < 0) {
+ dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
+- return 0;
++ return err;
+ }
+- usb_reset_device(sdev->udev);
++ err = usb_reset_device(sdev->udev);
+ usb_unlock_device(sdev->udev);
+
+- return 0;
++ return err;
+ }
+
+ /*
+ * clear_halt, set_interface, and set_configuration require special tricks.
++ * Returns 1 if request was tweaked, 0 otherwise.
+ */
+-static void tweak_special_requests(struct urb *urb)
++static int tweak_special_requests(struct urb *urb)
+ {
++ int err;
++
+ if (!urb || !urb->setup_packet)
+- return;
++ return 0;
+
+ if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
+- return;
++ return 0;
+
+ if (is_clear_halt_cmd(urb))
+ /* tweak clear_halt */
+- tweak_clear_halt_cmd(urb);
++ err = tweak_clear_halt_cmd(urb);
+
+ else if (is_set_interface_cmd(urb))
+ /* tweak set_interface */
+- tweak_set_interface_cmd(urb);
++ err = tweak_set_interface_cmd(urb);
+
+ else if (is_set_configuration_cmd(urb))
+ /* tweak set_configuration */
+- tweak_set_configuration_cmd(urb);
++ err = tweak_set_configuration_cmd(urb);
+
+ else if (is_reset_device_cmd(urb))
+- tweak_reset_device_cmd(urb);
+- else
++ err = tweak_reset_device_cmd(urb);
++ else {
+ usbip_dbg_stub_rx("no need to tweak\n");
++ return 0;
++ }
++
++ return !err;
+ }
+
+ /*
+@@ -468,6 +477,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ int support_sg = 1;
+ int np = 0;
+ int ret, i;
++ int is_tweaked;
+
+ if (pipe == -1)
+ return;
+@@ -580,8 +590,11 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ priv->urbs[i]->pipe = pipe;
+ priv->urbs[i]->complete = stub_complete;
+
+- /* no need to submit an intercepted request, but harmless? */
+- tweak_special_requests(priv->urbs[i]);
++ /*
++ * all URBs belong to a single PDU, so a global is_tweaked flag is
++ * enough
++ */
++ is_tweaked = tweak_special_requests(priv->urbs[i]);
+
+ masking_bogus_flags(priv->urbs[i]);
+ }
+@@ -594,22 +607,32 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+
+ /* urb is now ready to submit */
+ for (i = 0; i < priv->num_urbs; i++) {
+- ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
++ if (!is_tweaked) {
++ ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
+
+- if (ret == 0)
+- usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
+- pdu->base.seqnum);
+- else {
+- dev_err(&udev->dev, "submit_urb error, %d\n", ret);
+- usbip_dump_header(pdu);
+- usbip_dump_urb(priv->urbs[i]);
++ if (ret == 0)
++ usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
++ pdu->base.seqnum);
++ else {
++ dev_err(&udev->dev, "submit_urb error, %d\n", ret);
++ usbip_dump_header(pdu);
++ usbip_dump_urb(priv->urbs[i]);
+
++ /*
++ * Pessimistic.
++ * This connection will be discarded.
++ */
++ usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
++ break;
++ }
++ } else {
+ /*
+- * Pessimistic.
+- * This connection will be discarded.
++ * An identical URB was already submitted in
++ * tweak_special_requests(). Skip submitting this URB to not
++ * duplicate the request.
+ */
+- usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
+- break;
++ priv->urbs[i]->status = 0;
++ stub_complete(priv->urbs[i]);
+ }
+ }
+
+diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
+index d8cbd2dfc2c256..282efca64a0120 100644
+--- a/drivers/usb/usbip/usbip_common.h
++++ b/drivers/usb/usbip/usbip_common.h
+@@ -298,12 +298,6 @@ struct usbip_device {
+ __k; \
+ })
+
+-#define kthread_stop_put(k) \
+- do { \
+- kthread_stop(k); \
+- put_task_struct(k); \
+- } while (0)
+-
+ /* usbip_common.c */
+ void usbip_dump_urb(struct urb *purb);
+ void usbip_dump_header(struct usbip_header *pdu);
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 37d1fc34e8a564..14a5f55f24fc8c 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -745,6 +745,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ *
+ */
+ if (usb_pipedevice(urb->pipe) == 0) {
++ struct usb_device *old;
+ __u8 type = usb_pipetype(urb->pipe);
+ struct usb_ctrlrequest *ctrlreq =
+ (struct usb_ctrlrequest *) urb->setup_packet;
+@@ -755,14 +756,15 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ goto no_need_xmit;
+ }
+
++ old = vdev->udev;
+ switch (ctrlreq->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* set_address may come when a device is reset */
+ dev_info(dev, "SetAddress Request (%d) to port %d\n",
+ ctrlreq->wValue, vdev->rhport);
+
+- usb_put_dev(vdev->udev);
+ vdev->udev = usb_get_dev(urb->dev);
++ usb_put_dev(old);
+
+ spin_lock(&vdev->ud.lock);
+ vdev->ud.status = VDEV_ST_USED;
+@@ -781,8 +783,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ usbip_dbg_vhci_hc(
+ "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n");
+
+- usb_put_dev(vdev->udev);
+ vdev->udev = usb_get_dev(urb->dev);
++ usb_put_dev(old);
+ goto out;
+
+ default:
+@@ -1067,6 +1069,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
+ static void vhci_device_reset(struct usbip_device *ud)
+ {
+ struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
++ struct usb_device *old = vdev->udev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ud->lock, flags);
+@@ -1074,8 +1077,8 @@ static void vhci_device_reset(struct usbip_device *ud)
+ vdev->speed = 0;
+ vdev->devid = 0;
+
+- usb_put_dev(vdev->udev);
+ vdev->udev = NULL;
++ usb_put_dev(old);
+
+ if (ud->tcp_socket) {
+ sockfd_put(ud->tcp_socket);
+diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
+index 5a09a09cca7090..cce3d1837104c3 100644
+--- a/drivers/vdpa/alibaba/eni_vdpa.c
++++ b/drivers/vdpa/alibaba/eni_vdpa.c
+@@ -497,7 +497,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (!eni_vdpa->vring) {
+ ret = -ENOMEM;
+ ENI_ERR(pdev, "failed to allocate virtqueues\n");
+- goto err;
++ goto err_remove_vp_legacy;
+ }
+
+ for (i = 0; i < eni_vdpa->queues; i++) {
+@@ -509,11 +509,13 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
+ if (ret) {
+ ENI_ERR(pdev, "failed to register to vdpa bus\n");
+- goto err;
++ goto err_remove_vp_legacy;
+ }
+
+ return 0;
+
++err_remove_vp_legacy:
++ vp_legacy_remove(&eni_vdpa->ldev);
+ err:
+ put_device(&eni_vdpa->vdpa.dev);
+ return ret;
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 946488b8989f4b..b56aae3f7be378 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -144,8 +144,6 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev);
+
+ static bool mlx5_vdpa_debug;
+
+-#define MLX5_CVQ_MAX_ENT 16
+-
+ #define MLX5_LOG_VIO_FLAG(_feature) \
+ do { \
+ if (features & BIT_ULL(_feature)) \
+@@ -2136,9 +2134,16 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq;
+
+- if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
++ if (!is_index_valid(mvdev, idx))
+ return;
+
++ if (is_ctrl_vq_idx(mvdev, idx)) {
++ struct mlx5_control_vq *cvq = &mvdev->cvq;
++
++ cvq->vring.vring.num = num;
++ return;
++ }
++
+ mvq = &ndev->vqs[idx];
+ mvq->num_ent = num;
+ }
+@@ -2795,13 +2800,18 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ int err = 0;
+
+- if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
++ if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
++ u16 idx = cvq->vring.last_avail_idx;
++
+ err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+- MLX5_CVQ_MAX_ENT, false,
++ cvq->vring.vring.num, false,
+ (struct vring_desc *)(uintptr_t)cvq->desc_addr,
+ (struct vring_avail *)(uintptr_t)cvq->driver_addr,
+ (struct vring_used *)(uintptr_t)cvq->device_addr);
+
++ if (!err)
++ cvq->vring.last_avail_idx = cvq->vring.last_used_idx = idx;
++ }
+ return err;
+ }
+
+diff --git a/drivers/vdpa/pds/debugfs.c b/drivers/vdpa/pds/debugfs.c
+index 9b04aad6ec35d7..c328e694f6e7f0 100644
+--- a/drivers/vdpa/pds/debugfs.c
++++ b/drivers/vdpa/pds/debugfs.c
+@@ -261,7 +261,7 @@ void pds_vdpa_debugfs_add_vdpadev(struct pds_vdpa_aux *vdpa_aux)
+ debugfs_create_file("config", 0400, vdpa_aux->dentry, vdpa_aux->pdsv, &config_fops);
+
+ for (i = 0; i < vdpa_aux->pdsv->num_vqs; i++) {
+- char name[8];
++ char name[16];
+
+ snprintf(name, sizeof(name), "vq%02d", i);
+ debugfs_create_file(name, 0400, vdpa_aux->dentry,
+diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c
+index 52b2449182ad71..25c0fe5ec3d5df 100644
+--- a/drivers/vdpa/pds/vdpa_dev.c
++++ b/drivers/vdpa/pds/vdpa_dev.c
+@@ -318,9 +318,8 @@ static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 featur
+ return -EOPNOTSUPP;
+ }
+
+- pdsv->negotiated_features = nego_features;
+-
+ driver_features = pds_vdpa_get_driver_features(vdpa_dev);
++ pdsv->negotiated_features = nego_features;
+ dev_dbg(dev, "%s: %#llx => %#llx\n",
+ __func__, driver_features, nego_features);
+
+@@ -461,8 +460,10 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
+
+ pds_vdpa_cmd_set_status(pdsv, status);
+
+- /* Note: still working with FW on the need for this reset cmd */
+ if (status == 0) {
++ struct vdpa_callback null_cb = { };
++
++ pds_vdpa_set_config_cb(vdpa_dev, &null_cb);
+ pds_vdpa_cmd_reset(pdsv);
+
+ for (i = 0; i < pdsv->num_vqs; i++) {
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index 76d41058add9a8..421ab01ef06ba5 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -158,7 +158,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
+ vdpasim->iommu_pt[i] = true;
+ }
+
+- vdpasim->running = true;
++ vdpasim->running = false;
+ spin_unlock(&vdpasim->iommu_lock);
+
+ vdpasim->features = 0;
+@@ -477,6 +477,7 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
+
+ mutex_lock(&vdpasim->mutex);
+ vdpasim->status = status;
++ vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0;
+ mutex_unlock(&vdpasim->mutex);
+ }
+
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+index b3a3cb16579552..b137f367934393 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+@@ -437,7 +437,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ if (blk->shared_backend) {
+ blk->buffer = shared_buffer;
+ } else {
+- blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
++ blk->buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ GFP_KERNEL);
+ if (!blk->buffer) {
+ ret = -ENOMEM;
+@@ -495,7 +495,7 @@ static int __init vdpasim_blk_init(void)
+ goto parent_err;
+
+ if (shared_backend) {
+- shared_buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
++ shared_buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ GFP_KERNEL);
+ if (!shared_buffer) {
+ ret = -ENOMEM;
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index df7869537ef146..d7bda179ef79f8 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -8,6 +8,7 @@
+ *
+ */
+
++#include "linux/virtio_net.h"
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/cdev.h>
+@@ -28,6 +29,7 @@
+ #include <uapi/linux/virtio_config.h>
+ #include <uapi/linux/virtio_ids.h>
+ #include <uapi/linux/virtio_blk.h>
++#include <uapi/linux/virtio_ring.h>
+ #include <linux/mod_devicetable.h>
+
+ #include "iova_domain.h"
+@@ -1662,13 +1664,17 @@ static bool device_is_allowed(u32 device_id)
+ return false;
+ }
+
+-static bool features_is_valid(u64 features)
++static bool features_is_valid(struct vduse_dev_config *config)
+ {
+- if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
++ if (!(config->features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
+ return false;
+
+ /* Now we only support read-only configuration space */
+- if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE))
++ if ((config->device_id == VIRTIO_ID_BLOCK) &&
++ (config->features & BIT_ULL(VIRTIO_BLK_F_CONFIG_WCE)))
++ return false;
++ else if ((config->device_id == VIRTIO_ID_NET) &&
++ (config->features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ return false;
+
+ return true;
+@@ -1695,7 +1701,7 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
+ if (!device_is_allowed(config->device_id))
+ return false;
+
+- if (!features_is_valid(config->features))
++ if (!features_is_valid(config))
+ return false;
+
+ return true;
+diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+index c51229fccbd6a8..1a1d0d5ec35c2b 100644
+--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+@@ -141,13 +141,14 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
+ irq = &vdev->mc_irqs[index];
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- vfio_fsl_mc_irq_handler(hwirq, irq);
++ if (irq->trigger)
++ eventfd_signal(irq->trigger, 1);
+
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ u8 trigger = *(u8 *)data;
+
+- if (trigger)
+- vfio_fsl_mc_irq_handler(hwirq, irq);
++ if (trigger && irq->trigger)
++ eventfd_signal(irq->trigger, 1);
+ }
+
+ return 0;
+diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c
+index 0848f920efb7c1..7af5b204990bb5 100644
+--- a/drivers/vfio/iova_bitmap.c
++++ b/drivers/vfio/iova_bitmap.c
+@@ -100,7 +100,7 @@ struct iova_bitmap {
+ struct iova_bitmap_map mapped;
+
+ /* userspace address of the bitmap */
+- u64 __user *bitmap;
++ u8 __user *bitmap;
+
+ /* u64 index that @mapped points to */
+ unsigned long mapped_base_index;
+@@ -162,7 +162,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
+ {
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+ unsigned long npages;
+- u64 __user *addr;
++ u8 __user *addr;
+ long ret;
+
+ /*
+@@ -175,18 +175,19 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
+ bitmap->mapped_base_index) *
+ sizeof(*bitmap->bitmap), PAGE_SIZE);
+
+- /*
+- * We always cap at max number of 'struct page' a base page can fit.
+- * This is, for example, on x86 means 2M of bitmap data max.
+- */
+- npages = min(npages, PAGE_SIZE / sizeof(struct page *));
+-
+ /*
+ * Bitmap address to be pinned is calculated via pointer arithmetic
+ * with bitmap u64 word index.
+ */
+ addr = bitmap->bitmap + bitmap->mapped_base_index;
+
++ /*
++ * We always cap at max number of 'struct page' a base page can fit.
++ * This is, for example, on x86 means 2M of bitmap data max.
++ */
++ npages = min(npages + !!offset_in_page(addr),
++ PAGE_SIZE / sizeof(struct page *));
++
+ ret = pin_user_pages_fast((unsigned long)addr, npages,
+ FOLL_WRITE, mapped->pages);
+ if (ret <= 0)
+@@ -247,7 +248,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+
+ mapped = &bitmap->mapped;
+ mapped->pgshift = __ffs(page_size);
+- bitmap->bitmap = data;
++ bitmap->bitmap = (u8 __user *)data;
+ bitmap->mapped_total_index =
+ iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ bitmap->iova = iova;
+@@ -302,7 +303,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
+
+ remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
+ remaining = min_t(unsigned long, remaining,
+- bytes / sizeof(*bitmap->bitmap));
++ DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
+
+ return remaining;
+ }
+@@ -406,6 +407,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
+ mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+ unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
+ mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
++ unsigned long last_page_idx = mapped->npages - 1;
+
+ do {
+ unsigned int page_idx = cur_bit / BITS_PER_PAGE;
+@@ -414,6 +416,9 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
+ last_bit - cur_bit + 1);
+ void *kaddr;
+
++ if (unlikely(page_idx > last_page_idx))
++ break;
++
+ kaddr = kmap_local_page(mapped->pages[page_idx]);
+ bitmap_set(kaddr, offset, nbits);
+ kunmap_local(kaddr);
+diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+index b2f9778c8366ea..4d27465c8f1a89 100644
+--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
++++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+@@ -694,6 +694,7 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu
+ size_t len, loff_t *pos)
+ {
+ struct hisi_acc_vf_migration_file *migf = filp->private_data;
++ u8 *vf_data = (u8 *)&migf->vf_data;
+ loff_t requested_length;
+ ssize_t done = 0;
+ int ret;
+@@ -715,7 +716,7 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu
+ goto out_unlock;
+ }
+
+- ret = copy_from_user(&migf->vf_data, buf, len);
++ ret = copy_from_user(vf_data + *pos, buf, len);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+@@ -835,7 +836,9 @@ static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t
+
+ len = min_t(size_t, migf->total_length - *pos, len);
+ if (len) {
+- ret = copy_to_user(buf, &migf->vf_data, len);
++ u8 *vf_data = (u8 *)&migf->vf_data;
++
++ ret = copy_to_user(buf, vf_data + *pos, len);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c
+index c937aa6f39546d..27607d7b9030a4 100644
+--- a/drivers/vfio/pci/pds/dirty.c
++++ b/drivers/vfio/pci/pds/dirty.c
+@@ -478,8 +478,7 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
+ pds_vfio->vf_id, iova, length, pds_vfio->dirty.region_page_size,
+ pages, bitmap_size);
+
+- if (!length || ((dirty->region_start + iova + length) >
+- (dirty->region_start + dirty->region_size))) {
++ if (!length || ((iova - dirty->region_start + length) > dirty->region_size)) {
+ dev_err(dev, "Invalid iova 0x%lx and/or length 0x%lx to sync\n",
+ iova, length);
+ return -EINVAL;
+@@ -496,7 +495,8 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
+ return -EINVAL;
+ }
+
+- bmp_offset = DIV_ROUND_UP(iova / dirty->region_page_size, sizeof(u64));
++ bmp_offset = DIV_ROUND_UP((iova - dirty->region_start) /
++ dirty->region_page_size, sizeof(u64));
+
+ dev_dbg(dev,
+ "Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n",
+diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c
+index 79fe2e66bb4986..6b94cc0bf45b44 100644
+--- a/drivers/vfio/pci/pds/lm.c
++++ b/drivers/vfio/pci/pds/lm.c
+@@ -92,8 +92,10 @@ static void pds_vfio_put_lm_file(struct pds_vfio_lm_file *lm_file)
+ {
+ mutex_lock(&lm_file->lock);
+
++ lm_file->disabled = true;
+ lm_file->size = 0;
+ lm_file->alloc_size = 0;
++ lm_file->filep->f_pos = 0;
+
+ /* Free scatter list of file pages */
+ sg_free_table(&lm_file->sg_table);
+@@ -183,6 +185,12 @@ static ssize_t pds_vfio_save_read(struct file *filp, char __user *buf,
+ pos = &filp->f_pos;
+
+ mutex_lock(&lm_file->lock);
++
++ if (lm_file->disabled) {
++ done = -ENODEV;
++ goto out_unlock;
++ }
++
+ if (*pos > lm_file->size) {
+ done = -EINVAL;
+ goto out_unlock;
+@@ -283,6 +291,11 @@ static ssize_t pds_vfio_restore_write(struct file *filp, const char __user *buf,
+
+ mutex_lock(&lm_file->lock);
+
++ if (lm_file->disabled) {
++ done = -ENODEV;
++ goto out_unlock;
++ }
++
+ while (len) {
+ size_t page_offset;
+ struct page *page;
+diff --git a/drivers/vfio/pci/pds/lm.h b/drivers/vfio/pci/pds/lm.h
+index 13be893198b743..9511b1afc6a112 100644
+--- a/drivers/vfio/pci/pds/lm.h
++++ b/drivers/vfio/pci/pds/lm.h
+@@ -27,6 +27,7 @@ struct pds_vfio_lm_file {
+ struct scatterlist *last_offset_sg; /* Iterator */
+ unsigned int sg_last_entry;
+ unsigned long last_offset;
++ bool disabled;
+ };
+
+ struct pds_vfio_pci_device;
+diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c
+index ab4b5958e4131c..caffa1a2cf591e 100644
+--- a/drivers/vfio/pci/pds/pci_drv.c
++++ b/drivers/vfio/pci/pds/pci_drv.c
+@@ -55,10 +55,10 @@ static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
+ * VFIO_DEVICE_STATE_RUNNING.
+ */
+ if (deferred_reset_needed) {
+- spin_lock(&pds_vfio->reset_lock);
++ mutex_lock(&pds_vfio->reset_mutex);
+ pds_vfio->deferred_reset = true;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ }
+ }
+
+diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
+index 649b18ee394bb7..a286ebcc711262 100644
+--- a/drivers/vfio/pci/pds/vfio_dev.c
++++ b/drivers/vfio/pci/pds/vfio_dev.c
+@@ -29,33 +29,33 @@ struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
+ void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
+ {
+ again:
+- spin_lock(&pds_vfio->reset_lock);
++ mutex_lock(&pds_vfio->reset_mutex);
+ if (pds_vfio->deferred_reset) {
+ pds_vfio->deferred_reset = false;
++ pds_vfio_put_restore_file(pds_vfio);
++ pds_vfio_put_save_file(pds_vfio);
+ if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
+- pds_vfio_put_restore_file(pds_vfio);
+- pds_vfio_put_save_file(pds_vfio);
+ pds_vfio_dirty_disable(pds_vfio, false);
+ }
+ pds_vfio->state = pds_vfio->deferred_reset_state;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ goto again;
+ }
+ mutex_unlock(&pds_vfio->state_mutex);
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ }
+
+ void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
+ {
+- spin_lock(&pds_vfio->reset_lock);
++ mutex_lock(&pds_vfio->reset_mutex);
+ pds_vfio->deferred_reset = true;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+ if (!mutex_trylock(&pds_vfio->state_mutex)) {
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ return;
+ }
+- spin_unlock(&pds_vfio->reset_lock);
++ mutex_unlock(&pds_vfio->reset_mutex);
+ pds_vfio_state_mutex_unlock(pds_vfio);
+ }
+
+@@ -155,6 +155,9 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
+
+ pds_vfio->vf_id = vf_id;
+
++ mutex_init(&pds_vfio->state_mutex);
++ mutex_init(&pds_vfio->reset_mutex);
++
+ vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
+ vdev->mig_ops = &pds_vfio_lm_ops;
+ vdev->log_ops = &pds_vfio_log_ops;
+@@ -168,6 +171,17 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
+ return 0;
+ }
+
++static void pds_vfio_release_device(struct vfio_device *vdev)
++{
++ struct pds_vfio_pci_device *pds_vfio =
++ container_of(vdev, struct pds_vfio_pci_device,
++ vfio_coredev.vdev);
++
++ mutex_destroy(&pds_vfio->state_mutex);
++ mutex_destroy(&pds_vfio->reset_mutex);
++ vfio_pci_core_release_dev(vdev);
++}
++
+ static int pds_vfio_open_device(struct vfio_device *vdev)
+ {
+ struct pds_vfio_pci_device *pds_vfio =
+@@ -179,7 +193,6 @@ static int pds_vfio_open_device(struct vfio_device *vdev)
+ if (err)
+ return err;
+
+- mutex_init(&pds_vfio->state_mutex);
+ pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
+ pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
+
+@@ -199,14 +212,13 @@ static void pds_vfio_close_device(struct vfio_device *vdev)
+ pds_vfio_put_save_file(pds_vfio);
+ pds_vfio_dirty_disable(pds_vfio, true);
+ mutex_unlock(&pds_vfio->state_mutex);
+- mutex_destroy(&pds_vfio->state_mutex);
+ vfio_pci_core_close_device(vdev);
+ }
+
+ static const struct vfio_device_ops pds_vfio_ops = {
+ .name = "pds-vfio",
+ .init = pds_vfio_init_device,
+- .release = vfio_pci_core_release_dev,
++ .release = pds_vfio_release_device,
+ .open_device = pds_vfio_open_device,
+ .close_device = pds_vfio_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+diff --git a/drivers/vfio/pci/pds/vfio_dev.h b/drivers/vfio/pci/pds/vfio_dev.h
+index b8f2d667608f3b..e7b01080a1ec3a 100644
+--- a/drivers/vfio/pci/pds/vfio_dev.h
++++ b/drivers/vfio/pci/pds/vfio_dev.h
+@@ -18,7 +18,7 @@ struct pds_vfio_pci_device {
+ struct pds_vfio_dirty dirty;
+ struct mutex state_mutex; /* protect migration state */
+ enum vfio_device_mig_state state;
+- spinlock_t reset_lock; /* protect reset_done flow */
++ struct mutex reset_mutex; /* protect reset_done flow */
+ u8 deferred_reset;
+ enum vfio_device_mig_state deferred_reset_state;
+ struct notifier_block nb;
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index cb5b7f865d5856..e727941f589de5 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -71,6 +71,8 @@ static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
++ case PCI_DEVICE_ID_INTEL_DSA_SPR0:
++ case PCI_DEVICE_ID_INTEL_IAX_SPR0:
+ return true;
+ default:
+ return false;
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index 1929103ee59a3d..a8f259bc2f4d0c 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -778,25 +778,26 @@ static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
+ }
+
+ struct vfio_pci_fill_info {
+- struct vfio_pci_dependent_device __user *devices;
+- struct vfio_pci_dependent_device __user *devices_end;
+ struct vfio_device *vdev;
++ struct vfio_pci_dependent_device *devices;
++ int nr_devices;
+ u32 count;
+ u32 flags;
+ };
+
+ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+ {
+- struct vfio_pci_dependent_device info = {
+- .segment = pci_domain_nr(pdev->bus),
+- .bus = pdev->bus->number,
+- .devfn = pdev->devfn,
+- };
++ struct vfio_pci_dependent_device *info;
+ struct vfio_pci_fill_info *fill = data;
+
+- fill->count++;
+- if (fill->devices >= fill->devices_end)
+- return 0;
++ /* The topology changed since we counted devices */
++ if (fill->count >= fill->nr_devices)
++ return -EAGAIN;
++
++ info = &fill->devices[fill->count++];
++ info->segment = pci_domain_nr(pdev->bus);
++ info->bus = pdev->bus->number;
++ info->devfn = pdev->devfn;
+
+ if (fill->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID) {
+ struct iommufd_ctx *iommufd = vfio_iommufd_device_ictx(fill->vdev);
+@@ -809,19 +810,19 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+ */
+ vdev = vfio_find_device_in_devset(dev_set, &pdev->dev);
+ if (!vdev) {
+- info.devid = VFIO_PCI_DEVID_NOT_OWNED;
++ info->devid = VFIO_PCI_DEVID_NOT_OWNED;
+ } else {
+ int id = vfio_iommufd_get_dev_id(vdev, iommufd);
+
+ if (id > 0)
+- info.devid = id;
++ info->devid = id;
+ else if (id == -ENOENT)
+- info.devid = VFIO_PCI_DEVID_OWNED;
++ info->devid = VFIO_PCI_DEVID_OWNED;
+ else
+- info.devid = VFIO_PCI_DEVID_NOT_OWNED;
++ info->devid = VFIO_PCI_DEVID_NOT_OWNED;
+ }
+ /* If devid is VFIO_PCI_DEVID_NOT_OWNED, clear owned flag. */
+- if (info.devid == VFIO_PCI_DEVID_NOT_OWNED)
++ if (info->devid == VFIO_PCI_DEVID_NOT_OWNED)
+ fill->flags &= ~VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
+ } else {
+ struct iommu_group *iommu_group;
+@@ -830,13 +831,10 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+ if (!iommu_group)
+ return -EPERM; /* Cannot reset non-isolated devices */
+
+- info.group_id = iommu_group_id(iommu_group);
++ info->group_id = iommu_group_id(iommu_group);
+ iommu_group_put(iommu_group);
+ }
+
+- if (copy_to_user(fill->devices, &info, sizeof(info)))
+- return -EFAULT;
+- fill->devices++;
+ return 0;
+ }
+
+@@ -1258,10 +1256,11 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ {
+ unsigned long minsz =
+ offsetofend(struct vfio_pci_hot_reset_info, count);
++ struct vfio_pci_dependent_device *devices = NULL;
+ struct vfio_pci_hot_reset_info hdr;
+ struct vfio_pci_fill_info fill = {};
+ bool slot = false;
+- int ret = 0;
++ int ret, count = 0;
+
+ if (copy_from_user(&hdr, arg, minsz))
+ return -EFAULT;
+@@ -1277,9 +1276,23 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ else if (pci_probe_reset_bus(vdev->pdev->bus))
+ return -ENODEV;
+
+- fill.devices = arg->devices;
+- fill.devices_end = arg->devices +
+- (hdr.argsz - sizeof(hdr)) / sizeof(arg->devices[0]);
++ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
++ &count, slot);
++ if (ret)
++ return ret;
++
++ if (count > (hdr.argsz - sizeof(hdr)) / sizeof(*devices)) {
++ hdr.count = count;
++ ret = -ENOSPC;
++ goto header;
++ }
++
++ devices = kcalloc(count, sizeof(*devices), GFP_KERNEL);
++ if (!devices)
++ return -ENOMEM;
++
++ fill.devices = devices;
++ fill.nr_devices = count;
+ fill.vdev = &vdev->vdev;
+
+ if (vfio_device_cdev_opened(&vdev->vdev))
+@@ -1291,16 +1304,23 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ &fill, slot);
+ mutex_unlock(&vdev->vdev.dev_set->lock);
+ if (ret)
+- return ret;
++ goto out;
++
++ if (copy_to_user(arg->devices, devices,
++ sizeof(*devices) * fill.count)) {
++ ret = -EFAULT;
++ goto out;
++ }
+
+ hdr.count = fill.count;
+ hdr.flags = fill.flags;
+- if (copy_to_user(arg, &hdr, minsz))
+- return -EFAULT;
+
+- if (fill.count > fill.devices - arg->devices)
+- return -ENOSPC;
+- return 0;
++header:
++ if (copy_to_user(arg, &hdr, minsz))
++ ret = -EFAULT;
++out:
++ kfree(devices);
++ return ret;
+ }
+
+ static int
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index cbb4bcbfbf83d9..620134041b4881 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -90,22 +90,28 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
+
+ if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
+ struct vfio_pci_irq_ctx *ctx;
++ struct eventfd_ctx *trigger;
+
+ ctx = vfio_irq_ctx_get(vdev, 0);
+ if (WARN_ON_ONCE(!ctx))
+ return;
+- eventfd_signal(ctx->trigger, 1);
++
++ trigger = READ_ONCE(ctx->trigger);
++ if (likely(trigger))
++ eventfd_signal(trigger, 1);
+ }
+ }
+
+ /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
+-bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
++static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_irq_ctx *ctx;
+ unsigned long flags;
+ bool masked_changed = false;
+
++ lockdep_assert_held(&vdev->igate);
++
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ /*
+@@ -143,6 +149,17 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+ return masked_changed;
+ }
+
++bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
++{
++ bool mask_changed;
++
++ mutex_lock(&vdev->igate);
++ mask_changed = __vfio_pci_intx_mask(vdev);
++ mutex_unlock(&vdev->igate);
++
++ return mask_changed;
++}
++
+ /*
+ * If this is triggered by an eventfd, we can't call eventfd_signal
+ * or else we'll deadlock on the eventfd wait queue. Return >0 when
+@@ -194,12 +211,21 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
+ return ret;
+ }
+
+-void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
++static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+ {
++ lockdep_assert_held(&vdev->igate);
++
+ if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
+ vfio_send_intx_eventfd(vdev, NULL);
+ }
+
++void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
++{
++ mutex_lock(&vdev->igate);
++ __vfio_pci_intx_unmask(vdev);
++ mutex_unlock(&vdev->igate);
++}
++
+ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+ {
+ struct vfio_pci_core_device *vdev = dev_id;
+@@ -231,97 +257,102 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+ return ret;
+ }
+
+-static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
++static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
++ struct eventfd_ctx *trigger)
+ {
++ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_irq_ctx *ctx;
++ unsigned long irqflags;
++ char *name;
++ int ret;
+
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+- if (!vdev->pdev->irq)
++ if (!pdev->irq)
+ return -ENODEV;
+
++ name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
++ if (!name)
++ return -ENOMEM;
++
+ ctx = vfio_irq_ctx_alloc(vdev, 0);
+- if (!ctx)
++ if (!ctx) {
++ kfree(name);
+ return -ENOMEM;
++ }
++
++ ctx->name = name;
++ ctx->trigger = trigger;
+
+ /*
+- * If the virtual interrupt is masked, restore it. Devices
+- * supporting DisINTx can be masked at the hardware level
+- * here, non-PCI-2.3 devices will have to wait until the
+- * interrupt is enabled.
++ * Fill the initial masked state based on virq_disabled. After
++ * enable, changing the DisINTx bit in vconfig directly changes INTx
++ * masking. igate prevents races during setup, once running masked
++ * is protected via irqlock.
++ *
++ * Devices supporting DisINTx also reflect the current mask state in
++ * the physical DisINTx bit, which is not affected during IRQ setup.
++ *
++ * Devices without DisINTx support require an exclusive interrupt.
++ * IRQ masking is performed at the IRQ chip. Again, igate protects
++ * against races during setup and IRQ handlers and irqfds are not
++ * yet active, therefore masked is stable and can be used to
++ * conditionally auto-enable the IRQ.
++ *
++ * irq_type must be stable while the IRQ handler is registered,
++ * therefore it must be set before request_irq().
+ */
+ ctx->masked = vdev->virq_disabled;
+- if (vdev->pci_2_3)
+- pci_intx(vdev->pdev, !ctx->masked);
++ if (vdev->pci_2_3) {
++ pci_intx(pdev, !ctx->masked);
++ irqflags = IRQF_SHARED;
++ } else {
++ irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0;
++ }
+
+ vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+
++ ret = request_irq(pdev->irq, vfio_intx_handler,
++ irqflags, ctx->name, vdev);
++ if (ret) {
++ vdev->irq_type = VFIO_PCI_NUM_IRQS;
++ kfree(name);
++ vfio_irq_ctx_free(vdev, ctx, 0);
++ return ret;
++ }
++
+ return 0;
+ }
+
+-static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
++static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev,
++ struct eventfd_ctx *trigger)
+ {
+ struct pci_dev *pdev = vdev->pdev;
+- unsigned long irqflags = IRQF_SHARED;
+ struct vfio_pci_irq_ctx *ctx;
+- struct eventfd_ctx *trigger;
+- unsigned long flags;
+- int ret;
++ struct eventfd_ctx *old;
+
+ ctx = vfio_irq_ctx_get(vdev, 0);
+ if (WARN_ON_ONCE(!ctx))
+ return -EINVAL;
+
+- if (ctx->trigger) {
+- free_irq(pdev->irq, vdev);
+- kfree(ctx->name);
+- eventfd_ctx_put(ctx->trigger);
+- ctx->trigger = NULL;
+- }
+-
+- if (fd < 0) /* Disable only */
+- return 0;
++ old = ctx->trigger;
+
+- ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
+- pci_name(pdev));
+- if (!ctx->name)
+- return -ENOMEM;
++ WRITE_ONCE(ctx->trigger, trigger);
+
+- trigger = eventfd_ctx_fdget(fd);
+- if (IS_ERR(trigger)) {
+- kfree(ctx->name);
+- return PTR_ERR(trigger);
+- }
+-
+- ctx->trigger = trigger;
+-
+- if (!vdev->pci_2_3)
+- irqflags = 0;
+-
+- ret = request_irq(pdev->irq, vfio_intx_handler,
+- irqflags, ctx->name, vdev);
+- if (ret) {
+- ctx->trigger = NULL;
+- kfree(ctx->name);
+- eventfd_ctx_put(trigger);
+- return ret;
++ /* Releasing an old ctx requires synchronizing in-flight users */
++ if (old) {
++ synchronize_irq(pdev->irq);
++ vfio_virqfd_flush_thread(&ctx->unmask);
++ eventfd_ctx_put(old);
+ }
+
+- /*
+- * INTx disable will stick across the new irq setup,
+- * disable_irq won't.
+- */
+- spin_lock_irqsave(&vdev->irqlock, flags);
+- if (!vdev->pci_2_3 && ctx->masked)
+- disable_irq_nosync(pdev->irq);
+- spin_unlock_irqrestore(&vdev->irqlock, flags);
+-
+ return 0;
+ }
+
+ static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
+ {
++ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_irq_ctx *ctx;
+
+ ctx = vfio_irq_ctx_get(vdev, 0);
+@@ -329,10 +360,13 @@ static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
+ if (ctx) {
+ vfio_virqfd_disable(&ctx->unmask);
+ vfio_virqfd_disable(&ctx->mask);
++ free_irq(pdev->irq, vdev);
++ if (ctx->trigger)
++ eventfd_ctx_put(ctx->trigger);
++ kfree(ctx->name);
++ vfio_irq_ctx_free(vdev, ctx, 0);
+ }
+- vfio_intx_set_signal(vdev, -1);
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+- vfio_irq_ctx_free(vdev, ctx, 0);
+ }
+
+ /*
+@@ -560,11 +594,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- vfio_pci_intx_unmask(vdev);
++ __vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t unmask = *(uint8_t *)data;
+ if (unmask)
+- vfio_pci_intx_unmask(vdev);
++ __vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
+ int32_t fd = *(int32_t *)data;
+@@ -591,11 +625,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- vfio_pci_intx_mask(vdev);
++ __vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t mask = *(uint8_t *)data;
+ if (mask)
+- vfio_pci_intx_mask(vdev);
++ __vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ return -ENOTTY; /* XXX implement me */
+ }
+@@ -616,19 +650,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
++ struct eventfd_ctx *trigger = NULL;
+ int32_t fd = *(int32_t *)data;
+ int ret;
+
+- if (is_intx(vdev))
+- return vfio_intx_set_signal(vdev, fd);
++ if (fd >= 0) {
++ trigger = eventfd_ctx_fdget(fd);
++ if (IS_ERR(trigger))
++ return PTR_ERR(trigger);
++ }
+
+- ret = vfio_intx_enable(vdev);
+- if (ret)
+- return ret;
++ if (is_intx(vdev))
++ ret = vfio_intx_set_signal(vdev, trigger);
++ else
++ ret = vfio_intx_enable(vdev, trigger);
+
+- ret = vfio_intx_set_signal(vdev, fd);
+- if (ret)
+- vfio_intx_disable(vdev);
++ if (ret && trigger)
++ eventfd_ctx_put(trigger);
+
+ return ret;
+ }
+diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c
+index 665197caed89e4..31636d1414a049 100644
+--- a/drivers/vfio/platform/vfio_platform_irq.c
++++ b/drivers/vfio/platform/vfio_platform_irq.c
+@@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
+ return 0;
+ }
+
++/*
++ * The trigger eventfd is guaranteed valid in the interrupt path
++ * and protected by the igate mutex when triggered via ioctl.
++ */
++static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx)
++{
++ if (likely(irq_ctx->trigger))
++ eventfd_signal(irq_ctx->trigger, 1);
++}
++
+ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
+ {
+ struct vfio_platform_irq *irq_ctx = dev_id;
+@@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
+ spin_unlock_irqrestore(&irq_ctx->lock, flags);
+
+ if (ret == IRQ_HANDLED)
+- eventfd_signal(irq_ctx->trigger, 1);
++ vfio_send_eventfd(irq_ctx);
+
+ return ret;
+ }
+@@ -164,52 +174,40 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
+ {
+ struct vfio_platform_irq *irq_ctx = dev_id;
+
+- eventfd_signal(irq_ctx->trigger, 1);
++ vfio_send_eventfd(irq_ctx);
+
+ return IRQ_HANDLED;
+ }
+
+ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
+- int fd, irq_handler_t handler)
++ int fd)
+ {
+ struct vfio_platform_irq *irq = &vdev->irqs[index];
+ struct eventfd_ctx *trigger;
+- int ret;
+
+ if (irq->trigger) {
+- irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
+- free_irq(irq->hwirq, irq);
+- kfree(irq->name);
++ disable_irq(irq->hwirq);
+ eventfd_ctx_put(irq->trigger);
+ irq->trigger = NULL;
+ }
+
+ if (fd < 0) /* Disable only */
+ return 0;
+- irq->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-irq[%d](%s)",
+- irq->hwirq, vdev->name);
+- if (!irq->name)
+- return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+- if (IS_ERR(trigger)) {
+- kfree(irq->name);
++ if (IS_ERR(trigger))
+ return PTR_ERR(trigger);
+- }
+
+ irq->trigger = trigger;
+
+- irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
+- ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
+- if (ret) {
+- kfree(irq->name);
+- eventfd_ctx_put(trigger);
+- irq->trigger = NULL;
+- return ret;
+- }
+-
+- if (!irq->masked)
+- enable_irq(irq->hwirq);
++ /*
++ * irq->masked effectively provides nested disables within the overall
++ * enable relative to trigger. Specifically request_irq() is called
++ * with NO_AUTOEN, therefore the IRQ is initially disabled. The user
++ * may only further disable the IRQ with a MASK operations because
++ * irq->masked is initially false.
++ */
++ enable_irq(irq->hwirq);
+
+ return 0;
+ }
+@@ -228,7 +226,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
+ handler = vfio_irq_handler;
+
+ if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
+- return vfio_set_trigger(vdev, index, -1, handler);
++ return vfio_set_trigger(vdev, index, -1);
+
+ if (start != 0 || count != 1)
+ return -EINVAL;
+@@ -236,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+
+- return vfio_set_trigger(vdev, index, fd, handler);
++ return vfio_set_trigger(vdev, index, fd);
+ }
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+@@ -260,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+ unsigned start, unsigned count, uint32_t flags,
+ void *data) = NULL;
+
++ /*
++ * For compatibility, errors from request_irq() are local to the
++ * SET_IRQS path and reflected in the name pointer. This allows,
++ * for example, polling mode fallback for an exclusive IRQ failure.
++ */
++ if (IS_ERR(vdev->irqs[index].name))
++ return PTR_ERR(vdev->irqs[index].name);
++
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = vfio_platform_set_irq_mask;
+@@ -280,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
+
+ int vfio_platform_irq_init(struct vfio_platform_device *vdev)
+ {
+- int cnt = 0, i;
++ int cnt = 0, i, ret = 0;
+
+ while (vdev->get_irq(vdev, cnt) >= 0)
+ cnt++;
+@@ -292,37 +298,70 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev)
+
+ for (i = 0; i < cnt; i++) {
+ int hwirq = vdev->get_irq(vdev, i);
++ irq_handler_t handler = vfio_irq_handler;
+
+- if (hwirq < 0)
++ if (hwirq < 0) {
++ ret = -EINVAL;
+ goto err;
++ }
+
+ spin_lock_init(&vdev->irqs[i].lock);
+
+ vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
+
+- if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
++ if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) {
+ vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
+ | VFIO_IRQ_INFO_AUTOMASKED;
++ handler = vfio_automasked_irq_handler;
++ }
+
+ vdev->irqs[i].count = 1;
+ vdev->irqs[i].hwirq = hwirq;
+ vdev->irqs[i].masked = false;
++ vdev->irqs[i].name = kasprintf(GFP_KERNEL_ACCOUNT,
++ "vfio-irq[%d](%s)", hwirq,
++ vdev->name);
++ if (!vdev->irqs[i].name) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN,
++ vdev->irqs[i].name, &vdev->irqs[i]);
++ if (ret) {
++ kfree(vdev->irqs[i].name);
++ vdev->irqs[i].name = ERR_PTR(ret);
++ }
+ }
+
+ vdev->num_irqs = cnt;
+
+ return 0;
+ err:
++ for (--i; i >= 0; i--) {
++ if (!IS_ERR(vdev->irqs[i].name)) {
++ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
++ kfree(vdev->irqs[i].name);
++ }
++ }
+ kfree(vdev->irqs);
+- return -EINVAL;
++ return ret;
+ }
+
+ void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
+ {
+ int i;
+
+- for (i = 0; i < vdev->num_irqs; i++)
+- vfio_set_trigger(vdev, i, -1, NULL);
++ for (i = 0; i < vdev->num_irqs; i++) {
++ vfio_virqfd_disable(&vdev->irqs[i].mask);
++ vfio_virqfd_disable(&vdev->irqs[i].unmask);
++ if (!IS_ERR(vdev->irqs[i].name)) {
++ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
++ if (vdev->irqs[i].trigger)
++ eventfd_ctx_put(vdev->irqs[i].trigger);
++ kfree(vdev->irqs[i].name);
++ }
++ }
+
+ vdev->num_irqs = 0;
+ kfree(vdev->irqs);
+diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
+index a94ec6225d31ad..5f9e7e47707839 100644
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c
+@@ -364,7 +364,6 @@ static void tce_iommu_release(void *iommu_data)
+ if (!tbl)
+ continue;
+
+- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+ tce_iommu_free_table(container, tbl);
+ }
+
+@@ -720,6 +719,8 @@ static long tce_iommu_remove_window(struct tce_container *container,
+
+ BUG_ON(!tbl->it_size);
+
++ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
++
+ /* Detach groups from IOMMUs */
+ list_for_each_entry(tcegrp, &container->group_list, next) {
+ table_group = iommu_group_get_iommudata(tcegrp->grp);
+@@ -738,7 +739,6 @@ static long tce_iommu_remove_window(struct tce_container *container,
+ }
+
+ /* Free table */
+- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+ tce_iommu_free_table(container, tbl);
+ container->tables[num] = NULL;
+
+@@ -1197,9 +1197,14 @@ static void tce_iommu_release_ownership(struct tce_container *container,
+ return;
+ }
+
+- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+- if (container->tables[i])
++ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
++ if (container->tables[i]) {
++ tce_iommu_clear(container, container->tables[i],
++ container->tables[i]->it_offset,
++ container->tables[i]->it_size);
+ table_group->ops->unset_window(table_group, i);
++ }
++ }
+ }
+
+ static long tce_iommu_take_ownership(struct tce_container *container,
+diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
+index 29c564b7a6e13e..53226913380197 100644
+--- a/drivers/vfio/virqfd.c
++++ b/drivers/vfio/virqfd.c
+@@ -101,6 +101,13 @@ static void virqfd_inject(struct work_struct *work)
+ virqfd->thread(virqfd->opaque, virqfd->data);
+ }
+
++static void virqfd_flush_inject(struct work_struct *work)
++{
++ struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject);
++
++ flush_work(&virqfd->inject);
++}
++
+ int vfio_virqfd_enable(void *opaque,
+ int (*handler)(void *, void *),
+ void (*thread)(void *, void *),
+@@ -124,6 +131,7 @@ int vfio_virqfd_enable(void *opaque,
+
+ INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
+ INIT_WORK(&virqfd->inject, virqfd_inject);
++ INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
+
+ irqfd = fdget(fd);
+ if (!irqfd.file) {
+@@ -213,3 +221,16 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd)
+ flush_workqueue(vfio_irqfd_cleanup_wq);
+ }
+ EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
++
++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&virqfd_lock, flags);
++ if (*pvirqfd && (*pvirqfd)->thread)
++ queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject);
++ spin_unlock_irqrestore(&virqfd_lock, flags);
++
++ flush_workqueue(vfio_irqfd_cleanup_wq);
++}
++EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread);
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index abef0619c7901a..99813232c25e98 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -497,10 +497,8 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
+ vq_err(vq, "Faulted on vhost_scsi_send_event\n");
+ }
+
+-static void vhost_scsi_evt_work(struct vhost_work *work)
++static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
+ {
+- struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+- vs_event_work);
+ struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+ struct vhost_scsi_evt *evt, *t;
+ struct llist_node *llnode;
+@@ -508,12 +506,20 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
+ mutex_lock(&vq->mutex);
+ llnode = llist_del_all(&vs->vs_event_list);
+ llist_for_each_entry_safe(evt, t, llnode, list) {
+- vhost_scsi_do_evt_work(vs, evt);
++ if (!drop)
++ vhost_scsi_do_evt_work(vs, evt);
+ vhost_scsi_free_evt(vs, evt);
+ }
+ mutex_unlock(&vq->mutex);
+ }
+
++static void vhost_scsi_evt_work(struct vhost_work *work)
++{
++ struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
++ vs_event_work);
++ vhost_scsi_complete_events(vs, false);
++}
++
+ static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
+ {
+ struct iov_iter *iter = &cmd->saved_iter;
+@@ -1012,20 +1018,23 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+ vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
+ } else {
+- struct vhost_scsi_tpg **vs_tpg, *tpg;
+-
+- vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
+-
+- tpg = READ_ONCE(vs_tpg[*vc->target]);
+- if (unlikely(!tpg)) {
+- vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
+- } else {
+- if (tpgp)
+- *tpgp = tpg;
+- ret = 0;
++ struct vhost_scsi_tpg **vs_tpg, *tpg = NULL;
++
++ if (vc->target) {
++ /* validated at handler entry */
++ vs_tpg = vhost_vq_get_backend(vq);
++ tpg = READ_ONCE(vs_tpg[*vc->target]);
++ if (unlikely(!tpg)) {
++ vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
++ goto out;
++ }
+ }
+- }
+
++ if (tpgp)
++ *tpgp = tpg;
++ ret = 0;
++ }
++out:
+ return ret;
+ }
+
+@@ -1509,7 +1518,8 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ }
+
+ llist_add(&evt->list, &vs->vs_event_list);
+- vhost_vq_work_queue(vq, &vs->vs_event_work);
++ if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
++ vhost_scsi_complete_events(vs, true);
+ }
+
+ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 78379ffd23363d..c29a195a0175c0 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -191,11 +191,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
+ if (irq < 0)
+ return;
+
+- irq_bypass_unregister_producer(&vq->call_ctx.producer);
+ if (!vq->call_ctx.ctx)
+ return;
+
+- vq->call_ctx.producer.token = vq->call_ctx.ctx;
+ vq->call_ctx.producer.irq = irq;
+ ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+ if (unlikely(ret))
+@@ -627,6 +625,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
+ vq->last_avail_idx = vq_state.split.avail_index;
+ }
+ break;
++ case VHOST_SET_VRING_CALL:
++ if (vq->call_ctx.ctx) {
++ if (ops->get_status(vdpa) &
++ VIRTIO_CONFIG_S_DRIVER_OK)
++ vhost_vdpa_unsetup_vq_irq(v, idx);
++ vq->call_ctx.producer.token = NULL;
++ }
++ break;
+ }
+
+ r = vhost_vring_ioctl(&v->vdev, cmd, argp);
+@@ -659,13 +665,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
+ cb.callback = vhost_vdpa_virtqueue_cb;
+ cb.private = vq;
+ cb.trigger = vq->call_ctx.ctx;
++ vq->call_ctx.producer.token = vq->call_ctx.ctx;
++ if (ops->get_status(vdpa) &
++ VIRTIO_CONFIG_S_DRIVER_OK)
++ vhost_vdpa_setup_vq_irq(v, idx);
+ } else {
+ cb.callback = NULL;
+ cb.private = NULL;
+ cb.trigger = NULL;
+ }
+ ops->set_vq_cb(vdpa, idx, &cb);
+- vhost_vdpa_setup_vq_irq(v, idx);
+ break;
+
+ case VHOST_SET_VRING_NUM:
+@@ -1316,6 +1325,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
+ for (i = 0; i < nvqs; i++) {
+ vqs[i] = &v->vqs[i];
+ vqs[i]->handle_kick = handle_vq_kick;
++ vqs[i]->call_ctx.ctx = NULL;
+ }
+ vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
+ vhost_vdpa_process_iotlb_msg);
+@@ -1378,13 +1388,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
+
+ notify = ops->get_vq_notification(vdpa, index);
+
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+- if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
+- PFN_DOWN(notify.addr), PAGE_SIZE,
+- vma->vm_page_prot))
+- return VM_FAULT_SIGBUS;
+-
+- return VM_FAULT_NOPAGE;
++ return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr));
+ }
+
+ static const struct vm_operations_struct vhost_vdpa_vm_ops = {
+@@ -1511,7 +1515,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+
+ err:
+ put_device(&v->dev);
+- ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ return r;
+ }
+
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index e0c181ad17e316..d0238bd741b089 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -276,21 +276,36 @@ void vhost_vq_flush(struct vhost_virtqueue *vq)
+ EXPORT_SYMBOL_GPL(vhost_vq_flush);
+
+ /**
+- * vhost_worker_flush - flush a worker
++ * __vhost_worker_flush - flush a worker
+ * @worker: worker to flush
+ *
+- * This does not use RCU to protect the worker, so the device or worker
+- * mutex must be held.
++ * The worker's flush_mutex must be held.
+ */
+-static void vhost_worker_flush(struct vhost_worker *worker)
++static void __vhost_worker_flush(struct vhost_worker *worker)
+ {
+ struct vhost_flush_struct flush;
+
++ if (!worker->attachment_cnt || worker->killed)
++ return;
++
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
+
+ vhost_worker_queue(worker, &flush.work);
++ /*
++ * Drop mutex in case our worker is killed and it needs to take the
++ * mutex to force cleanup.
++ */
++ mutex_unlock(&worker->mutex);
+ wait_for_completion(&flush.wait_event);
++ mutex_lock(&worker->mutex);
++}
++
++static void vhost_worker_flush(struct vhost_worker *worker)
++{
++ mutex_lock(&worker->mutex);
++ __vhost_worker_flush(worker);
++ mutex_unlock(&worker->mutex);
+ }
+
+ void vhost_dev_flush(struct vhost_dev *dev)
+@@ -298,15 +313,8 @@ void vhost_dev_flush(struct vhost_dev *dev)
+ struct vhost_worker *worker;
+ unsigned long i;
+
+- xa_for_each(&dev->worker_xa, i, worker) {
+- mutex_lock(&worker->mutex);
+- if (!worker->attachment_cnt) {
+- mutex_unlock(&worker->mutex);
+- continue;
+- }
++ xa_for_each(&dev->worker_xa, i, worker)
+ vhost_worker_flush(worker);
+- mutex_unlock(&worker->mutex);
+- }
+ }
+ EXPORT_SYMBOL_GPL(vhost_dev_flush);
+
+@@ -392,7 +400,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
+ __vhost_vq_meta_reset(vq);
+ }
+
+-static bool vhost_worker(void *data)
++static bool vhost_run_work_list(void *data)
+ {
+ struct vhost_worker *worker = data;
+ struct vhost_work *work, *work_next;
+@@ -417,6 +425,40 @@ static bool vhost_worker(void *data)
+ return !!node;
+ }
+
++static void vhost_worker_killed(void *data)
++{
++ struct vhost_worker *worker = data;
++ struct vhost_dev *dev = worker->dev;
++ struct vhost_virtqueue *vq;
++ int i, attach_cnt = 0;
++
++ mutex_lock(&worker->mutex);
++ worker->killed = true;
++
++ for (i = 0; i < dev->nvqs; i++) {
++ vq = dev->vqs[i];
++
++ mutex_lock(&vq->mutex);
++ if (worker ==
++ rcu_dereference_check(vq->worker,
++ lockdep_is_held(&vq->mutex))) {
++ rcu_assign_pointer(vq->worker, NULL);
++ attach_cnt++;
++ }
++ mutex_unlock(&vq->mutex);
++ }
++
++ worker->attachment_cnt -= attach_cnt;
++ if (attach_cnt)
++ synchronize_rcu();
++ /*
++ * Finish vhost_worker_flush calls and any other works that snuck in
++ * before the synchronize_rcu.
++ */
++ vhost_run_work_list(worker);
++ mutex_unlock(&worker->mutex);
++}
++
+ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
+ {
+ kfree(vq->indirect);
+@@ -631,9 +673,11 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+ if (!worker)
+ return NULL;
+
++ worker->dev = dev;
+ snprintf(name, sizeof(name), "vhost-%d", current->pid);
+
+- vtsk = vhost_task_create(vhost_worker, worker, name);
++ vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
++ worker, name);
+ if (!vtsk)
+ goto free_worker;
+
+@@ -664,22 +708,37 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ {
+ struct vhost_worker *old_worker;
+
+- old_worker = rcu_dereference_check(vq->worker,
+- lockdep_is_held(&vq->dev->mutex));
+-
+ mutex_lock(&worker->mutex);
+- worker->attachment_cnt++;
+- mutex_unlock(&worker->mutex);
++ if (worker->killed) {
++ mutex_unlock(&worker->mutex);
++ return;
++ }
++
++ mutex_lock(&vq->mutex);
++
++ old_worker = rcu_dereference_check(vq->worker,
++ lockdep_is_held(&vq->mutex));
+ rcu_assign_pointer(vq->worker, worker);
++ worker->attachment_cnt++;
+
+- if (!old_worker)
++ if (!old_worker) {
++ mutex_unlock(&vq->mutex);
++ mutex_unlock(&worker->mutex);
+ return;
++ }
++ mutex_unlock(&vq->mutex);
++ mutex_unlock(&worker->mutex);
++
+ /*
+ * Take the worker mutex to make sure we see the work queued from
+ * device wide flushes which doesn't use RCU for execution.
+ */
+ mutex_lock(&old_worker->mutex);
+- old_worker->attachment_cnt--;
++ if (old_worker->killed) {
++ mutex_unlock(&old_worker->mutex);
++ return;
++ }
++
+ /*
+ * We don't want to call synchronize_rcu for every vq during setup
+ * because it will slow down VM startup. If we haven't done
+@@ -690,6 +749,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ mutex_lock(&vq->mutex);
+ if (!vhost_vq_get_backend(vq) && !vq->kick) {
+ mutex_unlock(&vq->mutex);
++
++ old_worker->attachment_cnt--;
+ mutex_unlock(&old_worker->mutex);
+ /*
+ * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
+@@ -705,7 +766,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ /* Make sure new vq queue/flush/poll calls see the new worker */
+ synchronize_rcu();
+ /* Make sure whatever was queued gets run */
+- vhost_worker_flush(old_worker);
++ __vhost_worker_flush(old_worker);
++ old_worker->attachment_cnt--;
+ mutex_unlock(&old_worker->mutex);
+ }
+
+@@ -754,10 +816,16 @@ static int vhost_free_worker(struct vhost_dev *dev,
+ return -ENODEV;
+
+ mutex_lock(&worker->mutex);
+- if (worker->attachment_cnt) {
++ if (worker->attachment_cnt || worker->killed) {
+ mutex_unlock(&worker->mutex);
+ return -EBUSY;
+ }
++ /*
++ * A flush might have raced and snuck in before attachment_cnt was set
++ * to zero. Make sure flushes are flushed from the queue before
++ * freeing.
++ */
++ __vhost_worker_flush(worker);
+ mutex_unlock(&worker->mutex);
+
+ vhost_worker_destroy(dev, worker);
+@@ -2799,9 +2867,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+ r = vhost_get_avail_idx(vq, &avail_idx);
+ if (unlikely(r))
+ return false;
++
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
++ if (vq->avail_idx != vq->last_avail_idx) {
++ /* Since we have updated avail_idx, the following
++ * call to vhost_get_vq_desc() will read available
++ * ring entries. Make sure that read happens after
++ * the avail_idx read.
++ */
++ smp_rmb();
++ return false;
++ }
+
+- return vq->avail_idx == vq->last_avail_idx;
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
+
+@@ -2838,9 +2916,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+ &vq->avail->idx, r);
+ return false;
+ }
++
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
++ if (vq->avail_idx != vq->last_avail_idx) {
++ /* Since we have updated avail_idx, the following
++ * call to vhost_get_vq_desc() will read available
++ * ring entries. Make sure that read happens after
++ * the avail_idx read.
++ */
++ smp_rmb();
++ return true;
++ }
+
+- return vq->avail_idx != vq->last_avail_idx;
++ return false;
+ }
+ EXPORT_SYMBOL_GPL(vhost_enable_notify);
+
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index f60d5f7bef944e..8014d2b3595039 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -28,12 +28,14 @@ struct vhost_work {
+
+ struct vhost_worker {
+ struct vhost_task *vtsk;
++ struct vhost_dev *dev;
+ /* Used to serialize device wide flushing with worker swapping. */
+ struct mutex mutex;
+ struct llist_head work_list;
+ u64 kcov_handle;
+ u32 id;
+ int attachment_cnt;
++ bool killed;
+ };
+
+ /* Poll a file (eventfd or socket) */
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index 817d377a3f360f..d94a06008ff647 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -438,6 +438,7 @@ static struct virtio_transport vhost_transport = {
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
++ .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
+
+ .read_skb = virtio_transport_read_skb,
+ },
+@@ -655,6 +656,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+ }
+
+ vsock->guest_cid = 0; /* no CID assigned yet */
++ vsock->seqpacket_allow = false;
+
+ atomic_set(&vsock->queued_replies, 0);
+
+@@ -798,8 +800,7 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
+ goto err;
+ }
+
+- if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
+- vsock->seqpacket_allow = true;
++ vsock->seqpacket_allow = features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET);
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
+index b694d7669d3200..1eb755a94940a8 100644
+--- a/drivers/video/Kconfig
++++ b/drivers/video/Kconfig
+@@ -11,6 +11,10 @@ config APERTURE_HELPERS
+ Support tracking and hand-over of aperture ownership. Required
+ by graphics drivers for firmware-provided framebuffers.
+
++config SCREEN_INFO
++ bool
++ default n
++
+ config STI_CORE
+ bool
+ depends on PARISC
+diff --git a/drivers/video/Makefile b/drivers/video/Makefile
+index 6bbc039508995e..6bbf87c1b579e6 100644
+--- a/drivers/video/Makefile
++++ b/drivers/video/Makefile
+@@ -1,12 +1,16 @@
+ # SPDX-License-Identifier: GPL-2.0
+
+ obj-$(CONFIG_APERTURE_HELPERS) += aperture.o
++obj-$(CONFIG_SCREEN_INFO) += screen_info.o
+ obj-$(CONFIG_STI_CORE) += sticore.o
+ obj-$(CONFIG_VGASTATE) += vgastate.o
+ obj-$(CONFIG_VIDEO_CMDLINE) += cmdline.o
+ obj-$(CONFIG_VIDEO_NOMODESET) += nomodeset.o
+ obj-$(CONFIG_HDMI) += hdmi.o
+
++screen_info-y := screen_info_generic.o
++screen_info-$(CONFIG_PCI) += screen_info_pci.o
++
+ obj-$(CONFIG_VT) += console/
+ obj-$(CONFIG_FB_STI) += console/
+ obj-$(CONFIG_LOGO) += logo/
+diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
+index 1cdc8543310b4d..b8ff7046510eb9 100644
+--- a/drivers/video/backlight/da9052_bl.c
++++ b/drivers/video/backlight/da9052_bl.c
+@@ -117,6 +117,7 @@ static int da9052_backlight_probe(struct platform_device *pdev)
+ wleds->led_reg = platform_get_device_id(pdev)->driver_data;
+ wleds->state = DA9052_WLEDS_OFF;
+
++ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = DA9052_MAX_BRIGHTNESS;
+
+diff --git a/drivers/video/backlight/ktz8866.c b/drivers/video/backlight/ktz8866.c
+index 9c980f2571ee35..014877b5a9848f 100644
+--- a/drivers/video/backlight/ktz8866.c
++++ b/drivers/video/backlight/ktz8866.c
+@@ -97,20 +97,20 @@ static void ktz8866_init(struct ktz8866 *ktz)
+ {
+ unsigned int val = 0;
+
+- if (of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
++ if (!of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
+ ktz8866_write(ktz, BL_EN, BIT(val) - 1);
+ else
+ /* Enable all 6 current sinks if the number of current sinks isn't specified. */
+ ktz8866_write(ktz, BL_EN, BIT(6) - 1);
+
+- if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
++ if (!of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
+ if (val <= 128)
+ ktz8866_write(ktz, BL_CFG2, BIT(7) | (ilog2(val) << 3) | PWM_HYST);
+ else
+ ktz8866_write(ktz, BL_CFG2, BIT(7) | ((5 + val / 64) << 3) | PWM_HYST);
+ }
+
+- if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
++ if (!of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
+ if (val == 0)
+ ktz8866_write(ktz, BL_DIMMING, 0);
+ else {
+diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
+index 8fcb62be597b84..26ff4178cc1612 100644
+--- a/drivers/video/backlight/lm3630a_bl.c
++++ b/drivers/video/backlight/lm3630a_bl.c
+@@ -180,7 +180,7 @@ static int lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
+
+ pchip->pwmd_state.enabled = pchip->pwmd_state.duty_cycle ? true : false;
+
+- return pwm_apply_state(pchip->pwmd, &pchip->pwmd_state);
++ return pwm_apply_might_sleep(pchip->pwmd, &pchip->pwmd_state);
+ }
+
+ /* update and get brightness */
+@@ -233,7 +233,7 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
+ if (rval < 0)
+ goto out_i2c_err;
+ brightness |= rval;
+- goto out;
++ return brightness;
+ }
+
+ /* disable sleep */
+@@ -244,11 +244,8 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
+ rval = lm3630a_read(pchip, REG_BRT_A);
+ if (rval < 0)
+ goto out_i2c_err;
+- brightness = rval;
++ return rval;
+
+-out:
+- bl->props.brightness = brightness;
+- return bl->props.brightness;
+ out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access register\n");
+ return 0;
+@@ -310,7 +307,7 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
+ if (rval < 0)
+ goto out_i2c_err;
+ brightness |= rval;
+- goto out;
++ return brightness;
+ }
+
+ /* disable sleep */
+@@ -321,11 +318,8 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
+ rval = lm3630a_read(pchip, REG_BRT_B);
+ if (rval < 0)
+ goto out_i2c_err;
+- brightness = rval;
++ return rval;
+
+-out:
+- bl->props.brightness = brightness;
+- return bl->props.brightness;
+ out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access register\n");
+ return 0;
+@@ -343,6 +337,7 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
+ struct backlight_properties props;
+ const char *label;
+
++ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
+ props.brightness = pdata->leda_init_brt;
+diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
+index 5246c171497d6f..564f62acd7211e 100644
+--- a/drivers/video/backlight/lm3639_bl.c
++++ b/drivers/video/backlight/lm3639_bl.c
+@@ -338,6 +338,7 @@ static int lm3639_probe(struct i2c_client *client)
+ }
+
+ /* backlight */
++ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.brightness = pdata->init_brt_led;
+ props.max_brightness = pdata->max_brt_led;
+diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
+index da1f124db69c02..7075bfab59c4dc 100644
+--- a/drivers/video/backlight/lp855x_bl.c
++++ b/drivers/video/backlight/lp855x_bl.c
+@@ -234,7 +234,7 @@ static int lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
+ state.duty_cycle = div_u64(br * state.period, max_br);
+ state.enabled = state.duty_cycle;
+
+- return pwm_apply_state(lp->pwm, &state);
++ return pwm_apply_might_sleep(lp->pwm, &state);
+ }
+
+ static int lp855x_bl_update_status(struct backlight_device *bl)
+diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
+index d1a14b0db265b7..31f97230ee506a 100644
+--- a/drivers/video/backlight/lp8788_bl.c
++++ b/drivers/video/backlight/lp8788_bl.c
+@@ -191,6 +191,7 @@ static int lp8788_backlight_register(struct lp8788_bl *bl)
+ int init_brt;
+ char *name;
+
++ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = MAX_BRIGHTNESS;
+
+diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
+index a51fbab9636805..35c716e9043c3d 100644
+--- a/drivers/video/backlight/pwm_bl.c
++++ b/drivers/video/backlight/pwm_bl.c
+@@ -103,7 +103,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
+ pwm_get_state(pb->pwm, &state);
+ state.duty_cycle = compute_duty_cycle(pb, brightness, &state);
+ state.enabled = true;
+- pwm_apply_state(pb->pwm, &state);
++ pwm_apply_might_sleep(pb->pwm, &state);
+
+ pwm_backlight_power_on(pb);
+ } else {
+@@ -120,7 +120,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
+ * inactive output.
+ */
+ state.enabled = !pb->power_supply && !pb->enable_gpio;
+- pwm_apply_state(pb->pwm, &state);
++ pwm_apply_might_sleep(pb->pwm, &state);
+ }
+
+ if (pb->notify_after)
+@@ -528,7 +528,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
+ if (!state.period && (data->pwm_period_ns > 0))
+ state.period = data->pwm_period_ns;
+
+- ret = pwm_apply_state(pb->pwm, &state);
++ ret = pwm_apply_might_sleep(pb->pwm, &state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
+ ret);
+@@ -626,9 +626,14 @@ static void pwm_backlight_remove(struct platform_device *pdev)
+ {
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
++ struct pwm_state state;
+
+ backlight_device_unregister(bl);
+ pwm_backlight_power_off(pb);
++ pwm_get_state(pb->pwm, &state);
++ state.duty_cycle = 0;
++ state.enabled = false;
++ pwm_apply_might_sleep(pb->pwm, &state);
+
+ if (pb->exit)
+ pb->exit(&pdev->dev);
+@@ -638,8 +643,13 @@ static void pwm_backlight_shutdown(struct platform_device *pdev)
+ {
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
++ struct pwm_state state;
+
+ pwm_backlight_power_off(pb);
++ pwm_get_state(pb->pwm, &state);
++ state.duty_cycle = 0;
++ state.enabled = false;
++ pwm_apply_might_sleep(pb->pwm, &state);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+@@ -647,12 +657,24 @@ static int pwm_backlight_suspend(struct device *dev)
+ {
+ struct backlight_device *bl = dev_get_drvdata(dev);
+ struct pwm_bl_data *pb = bl_get_data(bl);
++ struct pwm_state state;
+
+ if (pb->notify)
+ pb->notify(pb->dev, 0);
+
+ pwm_backlight_power_off(pb);
+
++ /*
++ * Note that disabling the PWM doesn't guarantee that the output stays
++ * at its inactive state. However without the PWM disabled, the PWM
++ * driver refuses to suspend. So disable here even though this might
++ * enable the backlight on poorly designed boards.
++ */
++ pwm_get_state(pb->pwm, &state);
++ state.duty_cycle = 0;
++ state.enabled = false;
++ pwm_apply_might_sleep(pb->pwm, &state);
++
+ if (pb->notify_after)
+ pb->notify_after(pb->dev, 0);
+
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index c29754b65c0ec9..325298573e1201 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1761,8 +1761,8 @@ config FB_COBALT
+ depends on FB && MIPS_COBALT
+
+ config FB_SH7760
+- bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
+- depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
++ tristate "SH7760/SH7763/SH7720/SH7721 LCDC support"
++ depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ || CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
+ select FB_IOMEM_HELPERS
+ help
+diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
+index 163d2c9f951c3a..f0600f6ca2548e 100644
+--- a/drivers/video/fbdev/acornfb.c
++++ b/drivers/video/fbdev/acornfb.c
+@@ -605,7 +605,7 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+
+ static const struct fb_ops acornfb_ops = {
+ .owner = THIS_MODULE,
+- FB_IOMEM_DEFAULT_OPS,
++ FB_DEFAULT_IOMEM_OPS,
+ .fb_check_var = acornfb_check_var,
+ .fb_set_par = acornfb_set_par,
+ .fb_setcolreg = acornfb_setcolreg,
+diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
+index 5ac1b063753110..acb19045d30468 100644
+--- a/drivers/video/fbdev/core/Kconfig
++++ b/drivers/video/fbdev/core/Kconfig
+@@ -4,6 +4,7 @@
+ #
+
+ config FB_CORE
++ select FB_IOMEM_FOPS
+ select VIDEO_CMDLINE
+ tristate
+
+@@ -144,12 +145,23 @@ config FB_DMAMEM_HELPERS
+ select FB_SYS_FOPS
+ select FB_SYS_IMAGEBLIT
+
++config FB_DMAMEM_HELPERS_DEFERRED
++ bool
++ depends on FB_CORE
++ select FB_DEFERRED_IO
++ select FB_DMAMEM_HELPERS
++
++config FB_IOMEM_FOPS
++ tristate
++ depends on FB_CORE
++
+ config FB_IOMEM_HELPERS
+ bool
+ depends on FB_CORE
+ select FB_CFB_COPYAREA
+ select FB_CFB_FILLRECT
+ select FB_CFB_IMAGEBLIT
++ select FB_IOMEM_FOPS
+
+ config FB_SYSMEM_HELPERS
+ bool
+diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
+index edfde2948e5c87..d165055ec3fc50 100644
+--- a/drivers/video/fbdev/core/Makefile
++++ b/drivers/video/fbdev/core/Makefile
+@@ -3,7 +3,7 @@ obj-$(CONFIG_FB_NOTIFY) += fb_notify.o
+ obj-$(CONFIG_FB_CORE) += fb.o
+ fb-y := fb_info.o \
+ fbmem.o fbcmap.o \
+- modedb.o fbcvt.o fb_cmdline.o fb_io_fops.o
++ modedb.o fbcvt.o fb_cmdline.o
+ ifdef CONFIG_FB
+ fb-y += fb_backlight.o fbmon.o
+ endif
+@@ -26,6 +26,7 @@ endif
+ obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
+ obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
+ obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o
++obj-$(CONFIG_FB_IOMEM_FOPS) += fb_io_fops.o
+ obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o
+ obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o
+ obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
+diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
+index 274f5d0fa24714..b9607d5a370d4e 100644
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -132,11 +132,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
+ return 0;
+
+ inode_lock(inode);
+- /* Kill off the delayed work */
+- cancel_delayed_work_sync(&info->deferred_work);
+-
+- /* Run it immediately */
+- schedule_delayed_work(&info->deferred_work, 0);
++ flush_delayed_work(&info->deferred_work);
+ inode_unlock(inode);
+
+ return 0;
+@@ -200,7 +196,7 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
+ */
+ static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
+ {
+- unsigned long offset = vmf->address - vmf->vma->vm_start;
++ unsigned long offset = vmf->pgoff << PAGE_SHIFT;
+ struct page *page = vmf->page;
+
+ file_update_time(vmf->vma->vm_file);
+@@ -317,7 +313,7 @@ static void fb_deferred_io_lastclose(struct fb_info *info)
+ struct page *page;
+ int i;
+
+- cancel_delayed_work_sync(&info->deferred_work);
++ flush_delayed_work(&info->deferred_work);
+
+ /* clear out the mapping that we setup */
+ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+diff --git a/drivers/video/fbdev/core/fb_io_fops.c b/drivers/video/fbdev/core/fb_io_fops.c
+index 5985e5e1b040c1..871b829521af35 100644
+--- a/drivers/video/fbdev/core/fb_io_fops.c
++++ b/drivers/video/fbdev/core/fb_io_fops.c
+@@ -131,3 +131,6 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
+ return (cnt) ? cnt : err;
+ }
+ EXPORT_SYMBOL(fb_io_write);
++
++MODULE_DESCRIPTION("Fbdev helpers for framebuffers in I/O memory");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index f157a5a1dffcf3..405d587450ef84 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -847,6 +847,8 @@ static int set_con2fb_map(int unit, int newidx, int user)
+ return err;
+
+ fbcon_add_cursor_work(info);
++ } else if (vc) {
++ set_blitting_type(vc, info);
+ }
+
+ con2fb_map[unit] = newidx;
+@@ -2398,11 +2400,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ int resize, ret, old_userfont, old_width, old_height, old_charcount;
+- char *old_data = NULL;
++ u8 *old_data = vc->vc_font.data;
+
+ resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+- if (p->userfont)
+- old_data = vc->vc_font.data;
+ vc->vc_font.data = (void *)(p->fontdata = data);
+ old_userfont = p->userfont;
+ if ((p->userfont = userfont))
+@@ -2436,13 +2436,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
+ update_screen(vc);
+ }
+
+- if (old_data && (--REFCOUNT(old_data) == 0))
++ if (old_userfont && (--REFCOUNT(old_data) == 0))
+ kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
+ return 0;
+
+ err_out:
+ p->fontdata = old_data;
+- vc->vc_font.data = (void *)old_data;
++ vc->vc_font.data = old_data;
+
+ if (userfont) {
+ p->userfont = old_userfont;
+diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
+index 79e5bfbdd34c26..0a26399dbc899d 100644
+--- a/drivers/video/fbdev/core/fbmon.c
++++ b/drivers/video/fbdev/core/fbmon.c
+@@ -1311,7 +1311,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
+ int fb_videomode_from_videomode(const struct videomode *vm,
+ struct fb_videomode *fbmode)
+ {
+- unsigned int htotal, vtotal;
++ unsigned int htotal, vtotal, total;
+
+ fbmode->xres = vm->hactive;
+ fbmode->left_margin = vm->hback_porch;
+@@ -1344,8 +1344,9 @@ int fb_videomode_from_videomode(const struct videomode *vm,
+ vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
+ vm->vsync_len;
+ /* prevent division by zero */
+- if (htotal && vtotal) {
+- fbmode->refresh = vm->pixelclock / (htotal * vtotal);
++ total = htotal * vtotal;
++ if (total) {
++ fbmode->refresh = vm->pixelclock / total;
+ /* a mode must have htotal and vtotal != 0 or it is invalid */
+ } else {
+ fbmode->refresh = 0;
+diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
+index f9b4ddd592ce4d..88ac24202a1ff4 100644
+--- a/drivers/video/fbdev/efifb.c
++++ b/drivers/video/fbdev/efifb.c
+@@ -571,15 +571,10 @@ static int efifb_probe(struct platform_device *dev)
+ break;
+ }
+
+- err = sysfs_create_groups(&dev->dev.kobj, efifb_groups);
+- if (err) {
+- pr_err("efifb: cannot add sysfs attrs\n");
+- goto err_unmap;
+- }
+ err = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (err < 0) {
+ pr_err("efifb: cannot allocate colormap\n");
+- goto err_groups;
++ goto err_unmap;
+ }
+
+ if (efifb_pci_dev)
+@@ -603,8 +598,6 @@ static int efifb_probe(struct platform_device *dev)
+ pm_runtime_put(&efifb_pci_dev->dev);
+
+ fb_dealloc_cmap(&info->cmap);
+-err_groups:
+- sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
+ err_unmap:
+ if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+ iounmap(info->screen_base);
+@@ -624,12 +617,12 @@ static void efifb_remove(struct platform_device *pdev)
+
+ /* efifb_destroy takes care of info cleanup */
+ unregister_framebuffer(info);
+- sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
+ }
+
+ static struct platform_driver efifb_driver = {
+ .driver = {
+ .name = "efi-framebuffer",
++ .dev_groups = efifb_groups,
+ },
+ .probe = efifb_probe,
+ .remove_new = efifb_remove,
+diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
+index 7fbd9f069ac2ed..0bced82fa4940d 100644
+--- a/drivers/video/fbdev/fsl-diu-fb.c
++++ b/drivers/video/fbdev/fsl-diu-fb.c
+@@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
+ * Workaround for failed writing desc register of planes.
+ * Needed with MPC5121 DIU rev 2.0 silicon.
+ */
+-void wr_reg_wa(u32 *reg, u32 val)
++static void wr_reg_wa(u32 *reg, u32 val)
+ {
+ do {
+ out_be32(reg, val);
+diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
+index 406c1383cbda91..1461a909e17ed2 100644
+--- a/drivers/video/fbdev/hpfb.c
++++ b/drivers/video/fbdev/hpfb.c
+@@ -343,6 +343,7 @@ static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent)
+ if (hpfb_init_one(paddr, vaddr)) {
+ if (d->scode >= DIOII_SCBASE)
+ iounmap((void *)vaddr);
++ release_mem_region(d->resource.start, resource_size(&d->resource));
+ return -ENOMEM;
+ }
+ return 0;
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index f4c8677488fb88..f5eaa58a808fb8 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1419,7 +1419,6 @@ static int init_imstt(struct fb_info *info)
+ if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
+ || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
+ printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
+- framebuffer_release(info);
+ return -ENODEV;
+ }
+
+@@ -1451,14 +1450,11 @@ static int init_imstt(struct fb_info *info)
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_YPAN;
+
+- if (fb_alloc_cmap(&info->cmap, 0, 0)) {
+- framebuffer_release(info);
++ if (fb_alloc_cmap(&info->cmap, 0, 0))
+ return -ENODEV;
+- }
+
+ if (register_framebuffer(info) < 0) {
+ fb_dealloc_cmap(&info->cmap);
+- framebuffer_release(info);
+ return -ENODEV;
+ }
+
+@@ -1498,8 +1494,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ if (!request_mem_region(addr, size, "imsttfb")) {
+ printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
+- framebuffer_release(info);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto release_info;
+ }
+
+ switch (pdev->device) {
+@@ -1516,36 +1512,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
+ "contact maintainer.\n", pdev->device);
+ ret = -ENODEV;
+- goto error;
++ goto release_mem_region;
+ }
+
+ info->fix.smem_start = addr;
+ info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
+ 0x400000 : 0x800000);
+ if (!info->screen_base)
+- goto error;
++ goto release_mem_region;
+ info->fix.mmio_start = addr + 0x800000;
+ par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ if (!par->dc_regs)
+- goto error;
++ goto unmap_screen_base;
+ par->cmap_regs_phys = addr + 0x840000;
+ par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ if (!par->cmap_regs)
+- goto error;
++ goto unmap_dc_regs;
+ info->pseudo_palette = par->palette;
+ ret = init_imstt(info);
+ if (ret)
+- goto error;
++ goto unmap_cmap_regs;
+
+ pci_set_drvdata(pdev, info);
+- return ret;
++ return 0;
+
+-error:
+- if (par->dc_regs)
+- iounmap(par->dc_regs);
+- if (info->screen_base)
+- iounmap(info->screen_base);
++unmap_cmap_regs:
++ iounmap(par->cmap_regs);
++unmap_dc_regs:
++ iounmap(par->dc_regs);
++unmap_screen_base:
++ iounmap(info->screen_base);
++release_mem_region:
+ release_mem_region(addr, size);
++release_info:
+ framebuffer_release(info);
+ return ret;
+ }
+diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
+index 84201c9608d36c..7042a43b81d856 100644
+--- a/drivers/video/fbdev/imxfb.c
++++ b/drivers/video/fbdev/imxfb.c
+@@ -42,6 +42,7 @@
+ #include <video/videomode.h>
+
+ #define PCR_TFT (1 << 31)
++#define PCR_COLOR (1 << 30)
+ #define PCR_BPIX_8 (3 << 25)
+ #define PCR_BPIX_12 (4 << 25)
+ #define PCR_BPIX_16 (5 << 25)
+@@ -150,6 +151,12 @@ enum imxfb_type {
+ IMX21_FB,
+ };
+
++enum imxfb_panel_type {
++ PANEL_TYPE_MONOCHROME,
++ PANEL_TYPE_CSTN,
++ PANEL_TYPE_TFT,
++};
++
+ struct imxfb_info {
+ struct platform_device *pdev;
+ void __iomem *regs;
+@@ -157,6 +164,7 @@ struct imxfb_info {
+ struct clk *clk_ahb;
+ struct clk *clk_per;
+ enum imxfb_type devtype;
++ enum imxfb_panel_type panel_type;
+ bool enabled;
+
+ /*
+@@ -444,6 +452,13 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+ if (!is_imx1_fb(fbi) && imxfb_mode->aus_mode)
+ fbi->lauscr = LAUSCR_AUS_MODE;
+
++ if (imxfb_mode->pcr & PCR_TFT)
++ fbi->panel_type = PANEL_TYPE_TFT;
++ else if (imxfb_mode->pcr & PCR_COLOR)
++ fbi->panel_type = PANEL_TYPE_CSTN;
++ else
++ fbi->panel_type = PANEL_TYPE_MONOCHROME;
++
+ /*
+ * Copy the RGB parameters for this display
+ * from the machine specific parameters.
+@@ -596,6 +611,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ {
+ struct imxfb_info *fbi = info->par;
+ u32 ymax_mask = is_imx1_fb(fbi) ? YMAX_MASK_IMX1 : YMAX_MASK_IMX21;
++ u8 left_margin_low;
+
+ pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n",
+ var->xres, var->hsync_len,
+@@ -604,6 +620,13 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ var->yres, var->vsync_len,
+ var->upper_margin, var->lower_margin);
+
++ if (fbi->panel_type == PANEL_TYPE_TFT)
++ left_margin_low = 3;
++ else if (fbi->panel_type == PANEL_TYPE_CSTN)
++ left_margin_low = 2;
++ else
++ left_margin_low = 0;
++
+ #if DEBUG_VAR
+ if (var->xres < 16 || var->xres > 1024)
+ printk(KERN_ERR "%s: invalid xres %d\n",
+@@ -611,7 +634,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+ if (var->hsync_len < 1 || var->hsync_len > 64)
+ printk(KERN_ERR "%s: invalid hsync_len %d\n",
+ info->fix.id, var->hsync_len);
+- if (var->left_margin < 3 || var->left_margin > 255)
++ if (var->left_margin < left_margin_low || var->left_margin > 255)
+ printk(KERN_ERR "%s: invalid left_margin %d\n",
+ info->fix.id, var->left_margin);
+ if (var->right_margin < 1 || var->right_margin > 255)
+@@ -637,7 +660,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
+
+ writel(HCR_H_WIDTH(var->hsync_len - 1) |
+ HCR_H_WAIT_1(var->right_margin - 1) |
+- HCR_H_WAIT_2(var->left_margin - 3),
++ HCR_H_WAIT_2(var->left_margin - left_margin_low),
+ fbi->regs + LCDC_HCR);
+
+ writel(VCR_V_WIDTH(var->vsync_len) |
+diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+index fa943612c4e2b3..3a2427eb29f236 100644
+--- a/drivers/video/fbdev/pxafb.c
++++ b/drivers/video/fbdev/pxafb.c
+@@ -2403,6 +2403,7 @@ static void pxafb_remove(struct platform_device *dev)
+ info = &fbi->fb;
+
+ pxafb_overlay_exit(fbi);
++ cancel_work_sync(&fbi->task);
+ unregister_framebuffer(info);
+
+ pxafb_disable_controller(fbi);
+diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
+index b5f84bd4804b83..205ccbe8f17207 100644
+--- a/drivers/video/fbdev/savage/savagefb_driver.c
++++ b/drivers/video/fbdev/savage/savagefb_driver.c
+@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo *var,
+
+ DBG("savagefb_check_var");
+
++ if (!var->pixclock)
++ return -EINVAL;
++
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ switch (var->bits_per_pixel) {
+@@ -2273,7 +2276,10 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ if (info->var.xres_virtual > 0x1000)
+ info->var.xres_virtual = 0x1000;
+ #endif
+- savagefb_check_var(&info->var, info);
++ err = savagefb_check_var(&info->var, info);
++ if (err)
++ goto failed;
++
+ savagefb_set_fix(info);
+
+ /*
+diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
+index 1364dafaadb1d4..2a4794ec194737 100644
+--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
++++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
+@@ -1575,7 +1575,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
+ */
+ info->fix = sh_mobile_lcdc_overlay_fix;
+ snprintf(info->fix.id, sizeof(info->fix.id),
+- "SH Mobile LCDC Overlay %u", ovl->index);
++ "SHMobile ovl %u", ovl->index);
+ info->fix.smem_start = ovl->dma_handle;
+ info->fix.smem_len = ovl->fb_size;
+ info->fix.line_length = ovl->pitch;
+diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
+index a8fb41f1a2580e..09329072004f40 100644
+--- a/drivers/video/fbdev/sis/init301.c
++++ b/drivers/video/fbdev/sis/init301.c
+@@ -172,7 +172,7 @@ static const unsigned char SiS_HiTVGroup3_2[] = {
+ };
+
+ /* 301C / 302ELV extended Part2 TV registers (4 tap scaler) */
+-
++#ifdef CONFIG_FB_SIS_315
+ static const unsigned char SiS_Part2CLVX_1[] = {
+ 0x00,0x00,
+ 0x00,0x20,0x00,0x00,0x7F,0x20,0x02,0x7F,0x7D,0x20,0x04,0x7F,0x7D,0x1F,0x06,0x7E,
+@@ -245,7 +245,6 @@ static const unsigned char SiS_Part2CLVX_6[] = { /* 1080i */
+ 0xFF,0xFF,
+ };
+
+-#ifdef CONFIG_FB_SIS_315
+ /* 661 et al LCD data structure (2.03.00) */
+ static const unsigned char SiS_LCDStruct661[] = {
+ /* 1024x768 */
+diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
+index 0f5374f6ef0559..ad39571f913491 100644
+--- a/drivers/video/fbdev/sis/sis_main.c
++++ b/drivers/video/fbdev/sis/sis_main.c
+@@ -184,7 +184,7 @@ static void sisfb_search_mode(char *name, bool quiet)
+ {
+ unsigned int j = 0, xres = 0, yres = 0, depth = 0, rate = 0;
+ int i = 0;
+- char strbuf[16], strbuf1[20];
++ char strbuf[24], strbuf1[20];
+ char *nameptr = name;
+
+ /* We don't know the hardware specs yet and there is no ivideo */
+@@ -1475,6 +1475,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+
+ vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
+
++ if (!var->pixclock)
++ return -EINVAL;
+ pixclock = var->pixclock;
+
+ if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
+diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
+index 5ae48e36fccb4e..1a4f90ea7d5a8c 100644
+--- a/drivers/video/fbdev/ssd1307fb.c
++++ b/drivers/video/fbdev/ssd1307fb.c
+@@ -347,7 +347,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
+
+ pwm_init_state(par->pwm, &pwmstate);
+ pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
+- pwm_apply_state(par->pwm, &pwmstate);
++ pwm_apply_might_sleep(par->pwm, &pwmstate);
+
+ /* Enable the PWM */
+ pwm_enable(par->pwm);
+diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
+index c0edceea0a7934..a21581b40256c8 100644
+--- a/drivers/video/fbdev/vesafb.c
++++ b/drivers/video/fbdev/vesafb.c
+@@ -243,6 +243,7 @@ static int vesafb_setup(char *options)
+
+ static int vesafb_probe(struct platform_device *dev)
+ {
++ struct screen_info *si = &screen_info;
+ struct fb_info *info;
+ struct vesafb_par *par;
+ int i, err;
+@@ -255,17 +256,17 @@ static int vesafb_probe(struct platform_device *dev)
+ fb_get_options("vesafb", &option);
+ vesafb_setup(option);
+
+- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
++ if (si->orig_video_isVGA != VIDEO_TYPE_VLFB)
+ return -ENODEV;
+
+- vga_compat = (screen_info.capabilities & 2) ? 0 : 1;
+- vesafb_fix.smem_start = screen_info.lfb_base;
+- vesafb_defined.bits_per_pixel = screen_info.lfb_depth;
++ vga_compat = !__screen_info_vbe_mode_nonvga(si);
++ vesafb_fix.smem_start = si->lfb_base;
++ vesafb_defined.bits_per_pixel = si->lfb_depth;
+ if (15 == vesafb_defined.bits_per_pixel)
+ vesafb_defined.bits_per_pixel = 16;
+- vesafb_defined.xres = screen_info.lfb_width;
+- vesafb_defined.yres = screen_info.lfb_height;
+- vesafb_fix.line_length = screen_info.lfb_linelength;
++ vesafb_defined.xres = si->lfb_width;
++ vesafb_defined.yres = si->lfb_height;
++ vesafb_fix.line_length = si->lfb_linelength;
+ vesafb_fix.visual = (vesafb_defined.bits_per_pixel == 8) ?
+ FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+
+@@ -277,7 +278,7 @@ static int vesafb_probe(struct platform_device *dev)
+ /* size_total -- all video memory we have. Used for mtrr
+ * entries, resource allocation and bounds
+ * checking. */
+- size_total = screen_info.lfb_size * 65536;
++ size_total = si->lfb_size * 65536;
+ if (vram_total)
+ size_total = vram_total * 1024 * 1024;
+ if (size_total < size_vmode)
+@@ -297,7 +298,7 @@ static int vesafb_probe(struct platform_device *dev)
+ vesafb_fix.smem_len = size_remap;
+
+ #ifndef __i386__
+- screen_info.vesapm_seg = 0;
++ si->vesapm_seg = 0;
+ #endif
+
+ if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
+@@ -317,23 +318,26 @@ static int vesafb_probe(struct platform_device *dev)
+ par = info->par;
+ info->pseudo_palette = par->pseudo_palette;
+
+- par->base = screen_info.lfb_base;
++ par->base = si->lfb_base;
+ par->size = size_total;
+
+ printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+- vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
++ vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel,
++ vesafb_fix.line_length, si->pages);
+
+- if (screen_info.vesapm_seg) {
++ if (si->vesapm_seg) {
+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+- screen_info.vesapm_seg,screen_info.vesapm_off);
++ si->vesapm_seg, si->vesapm_off);
+ }
+
+- if (screen_info.vesapm_seg < 0xc000)
++ if (si->vesapm_seg < 0xc000)
+ ypan = pmi_setpal = 0; /* not available or some DOS TSR ... */
+
+ if (ypan || pmi_setpal) {
++ unsigned long pmi_phys;
+ unsigned short *pmi_base;
+- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
++ pmi_phys = ((unsigned long)si->vesapm_seg << 4) + si->vesapm_off;
++ pmi_base = (unsigned short *)phys_to_virt(pmi_phys);
+ pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
+ pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
+ printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+@@ -377,14 +381,14 @@ static int vesafb_probe(struct platform_device *dev)
+ vesafb_defined.left_margin = (vesafb_defined.xres / 8) & 0xf8;
+ vesafb_defined.hsync_len = (vesafb_defined.xres / 8) & 0xf8;
+
+- vesafb_defined.red.offset = screen_info.red_pos;
+- vesafb_defined.red.length = screen_info.red_size;
+- vesafb_defined.green.offset = screen_info.green_pos;
+- vesafb_defined.green.length = screen_info.green_size;
+- vesafb_defined.blue.offset = screen_info.blue_pos;
+- vesafb_defined.blue.length = screen_info.blue_size;
+- vesafb_defined.transp.offset = screen_info.rsvd_pos;
+- vesafb_defined.transp.length = screen_info.rsvd_size;
++ vesafb_defined.red.offset = si->red_pos;
++ vesafb_defined.red.length = si->red_size;
++ vesafb_defined.green.offset = si->green_pos;
++ vesafb_defined.green.length = si->green_size;
++ vesafb_defined.blue.offset = si->blue_pos;
++ vesafb_defined.blue.length = si->blue_size;
++ vesafb_defined.transp.offset = si->rsvd_pos;
++ vesafb_defined.transp.length = si->rsvd_size;
+
+ if (vesafb_defined.bits_per_pixel <= 8) {
+ depth = vesafb_defined.green.length;
+@@ -399,14 +403,14 @@ static int vesafb_probe(struct platform_device *dev)
+ (vesafb_defined.bits_per_pixel > 8) ?
+ "Truecolor" : (vga_compat || pmi_setpal) ?
+ "Pseudocolor" : "Static Pseudocolor",
+- screen_info.rsvd_size,
+- screen_info.red_size,
+- screen_info.green_size,
+- screen_info.blue_size,
+- screen_info.rsvd_pos,
+- screen_info.red_pos,
+- screen_info.green_pos,
+- screen_info.blue_pos);
++ si->rsvd_size,
++ si->red_size,
++ si->green_size,
++ si->blue_size,
++ si->rsvd_pos,
++ si->red_pos,
++ si->green_pos,
++ si->blue_pos);
+
+ vesafb_fix.ypanstep = ypan ? 1 : 0;
+ vesafb_fix.ywrapstep = (ypan>1) ? 1 : 0;
+diff --git a/drivers/video/fbdev/via/accel.c b/drivers/video/fbdev/via/accel.c
+index 0a1bc7a4d7853c..1e04026f080918 100644
+--- a/drivers/video/fbdev/via/accel.c
++++ b/drivers/video/fbdev/via/accel.c
+@@ -115,7 +115,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
+
+ if (op != VIA_BITBLT_FILL) {
+ tmp = src_mem ? 0 : src_addr;
+- if (dst_addr & 0xE0000007) {
++ if (tmp & 0xE0000007) {
+ printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
+ "address %X\n", tmp);
+ return -EINVAL;
+@@ -260,7 +260,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
+ writel(tmp, engine + 0x18);
+
+ tmp = src_mem ? 0 : src_addr;
+- if (dst_addr & 0xE0000007) {
++ if (tmp & 0xE0000007) {
+ printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
+ "address %X\n", tmp);
+ return -EINVAL;
+diff --git a/drivers/video/logo/pnmtologo.c b/drivers/video/logo/pnmtologo.c
+index ada5ef6e51b7a9..87912cc35e9247 100644
+--- a/drivers/video/logo/pnmtologo.c
++++ b/drivers/video/logo/pnmtologo.c
+@@ -235,8 +235,6 @@ static void write_header(void)
+ fputs("/*\n", out);
+ fputs(" * DO NOT EDIT THIS FILE!\n", out);
+ fputs(" *\n", out);
+- fprintf(out, " * It was automatically generated from %s\n", filename);
+- fputs(" *\n", out);
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+diff --git a/drivers/video/screen_info_generic.c b/drivers/video/screen_info_generic.c
+new file mode 100644
+index 00000000000000..64117c6367abbe
+--- /dev/null
++++ b/drivers/video/screen_info_generic.c
+@@ -0,0 +1,146 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/export.h>
++#include <linux/ioport.h>
++#include <linux/screen_info.h>
++#include <linux/string.h>
++
++static void resource_init_named(struct resource *r,
++ resource_size_t start, resource_size_t size,
++ const char *name, unsigned int flags)
++{
++ memset(r, 0, sizeof(*r));
++
++ r->start = start;
++ r->end = start + size - 1;
++ r->name = name;
++ r->flags = flags;
++}
++
++static void resource_init_io_named(struct resource *r,
++ resource_size_t start, resource_size_t size,
++ const char *name)
++{
++ resource_init_named(r, start, size, name, IORESOURCE_IO);
++}
++
++static void resource_init_mem_named(struct resource *r,
++ resource_size_t start, resource_size_t size,
++ const char *name)
++{
++ resource_init_named(r, start, size, name, IORESOURCE_MEM);
++}
++
++static inline bool __screen_info_has_ega_gfx(unsigned int mode)
++{
++ switch (mode) {
++ case 0x0d: /* 320x200-4 */
++ case 0x0e: /* 640x200-4 */
++ case 0x0f: /* 640x350-1 */
++ case 0x10: /* 640x350-4 */
++ return true;
++ default:
++ return false;
++ }
++}
++
++static inline bool __screen_info_has_vga_gfx(unsigned int mode)
++{
++ switch (mode) {
++ case 0x10: /* 640x480-1 */
++ case 0x12: /* 640x480-4 */
++ case 0x13: /* 320-200-8 */
++ case 0x6a: /* 800x600-4 (VESA) */
++ return true;
++ default:
++ return __screen_info_has_ega_gfx(mode);
++ }
++}
++
++/**
++ * screen_info_resources() - Get resources from screen_info structure
++ * @si: the screen_info
++ * @r: pointer to an array of resource structures
++ * @num: number of elements in @r:
++ *
++ * Returns:
++ * The number of resources stored in @r on success, or a negative errno code otherwise.
++ *
++ * A call to screen_info_resources() returns the resources consumed by the
++ * screen_info's device or framebuffer. The result is stored in the caller-supplied
++ * array @r with up to @num elements. The function returns the number of
++ * initialized elements.
++ */
++ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num)
++{
++ struct resource *pos = r;
++ unsigned int type = screen_info_video_type(si);
++ u64 base, size;
++
++ switch (type) {
++ case VIDEO_TYPE_MDA:
++ if (num > 0)
++ resource_init_io_named(pos++, 0x3b0, 12, "mda");
++ if (num > 1)
++ resource_init_io_named(pos++, 0x3bf, 0x01, "mda");
++ if (num > 2)
++ resource_init_mem_named(pos++, 0xb0000, 0x2000, "mda");
++ break;
++ case VIDEO_TYPE_CGA:
++ if (num > 0)
++ resource_init_io_named(pos++, 0x3d4, 0x02, "cga");
++ if (num > 1)
++ resource_init_mem_named(pos++, 0xb8000, 0x2000, "cga");
++ break;
++ case VIDEO_TYPE_EGAM:
++ if (num > 0)
++ resource_init_io_named(pos++, 0x3bf, 0x10, "ega");
++ if (num > 1)
++ resource_init_mem_named(pos++, 0xb0000, 0x8000, "ega");
++ break;
++ case VIDEO_TYPE_EGAC:
++ if (num > 0)
++ resource_init_io_named(pos++, 0x3c0, 0x20, "ega");
++ if (num > 1) {
++ if (__screen_info_has_ega_gfx(si->orig_video_mode))
++ resource_init_mem_named(pos++, 0xa0000, 0x10000, "ega");
++ else
++ resource_init_mem_named(pos++, 0xb8000, 0x8000, "ega");
++ }
++ break;
++ case VIDEO_TYPE_VGAC:
++ if (num > 0)
++ resource_init_io_named(pos++, 0x3c0, 0x20, "vga+");
++ if (num > 1) {
++ if (__screen_info_has_vga_gfx(si->orig_video_mode))
++ resource_init_mem_named(pos++, 0xa0000, 0x10000, "vga+");
++ else
++ resource_init_mem_named(pos++, 0xb8000, 0x8000, "vga+");
++ }
++ break;
++ case VIDEO_TYPE_VLFB:
++ case VIDEO_TYPE_EFI:
++ base = __screen_info_lfb_base(si);
++ if (!base)
++ break;
++ size = __screen_info_lfb_size(si, type);
++ if (!size)
++ break;
++ if (num > 0)
++ resource_init_mem_named(pos++, base, size, "lfb");
++ break;
++ case VIDEO_TYPE_PICA_S3:
++ case VIDEO_TYPE_MIPS_G364:
++ case VIDEO_TYPE_SGI:
++ case VIDEO_TYPE_TGAC:
++ case VIDEO_TYPE_SUN:
++ case VIDEO_TYPE_SUNPCI:
++ case VIDEO_TYPE_PMAC:
++ default:
++ /* not supported */
++ return -EINVAL;
++ }
++
++ return pos - r;
++}
++EXPORT_SYMBOL(screen_info_resources);
+diff --git a/drivers/video/screen_info_pci.c b/drivers/video/screen_info_pci.c
+new file mode 100644
+index 00000000000000..6c583351714100
+--- /dev/null
++++ b/drivers/video/screen_info_pci.c
+@@ -0,0 +1,136 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/pci.h>
++#include <linux/printk.h>
++#include <linux/screen_info.h>
++#include <linux/string.h>
++
++static struct pci_dev *screen_info_lfb_pdev;
++static size_t screen_info_lfb_bar;
++static resource_size_t screen_info_lfb_offset;
++static struct resource screen_info_lfb_res = DEFINE_RES_MEM(0, 0);
++
++static bool __screen_info_relocation_is_valid(const struct screen_info *si, struct resource *pr)
++{
++ u64 size = __screen_info_lfb_size(si, screen_info_video_type(si));
++
++ if (screen_info_lfb_offset > resource_size(pr))
++ return false;
++ if (size > resource_size(pr))
++ return false;
++ if (resource_size(pr) - size < screen_info_lfb_offset)
++ return false;
++
++ return true;
++}
++
++void screen_info_apply_fixups(void)
++{
++ struct screen_info *si = &screen_info;
++
++ if (screen_info_lfb_pdev) {
++ struct resource *pr = &screen_info_lfb_pdev->resource[screen_info_lfb_bar];
++
++ if (pr->start != screen_info_lfb_res.start) {
++ if (__screen_info_relocation_is_valid(si, pr)) {
++ /*
++ * Only update base if we have an actual
++ * relocation to a valid I/O range.
++ */
++ __screen_info_set_lfb_base(si, pr->start + screen_info_lfb_offset);
++ pr_info("Relocating firmware framebuffer to offset %pa[d] within %pr\n",
++ &screen_info_lfb_offset, pr);
++ } else {
++ pr_warn("Invalid relocating, disabling firmware framebuffer\n");
++ }
++ }
++ }
++}
++
++static void screen_info_fixup_lfb(struct pci_dev *pdev)
++{
++ unsigned int type;
++ struct resource res[SCREEN_INFO_MAX_RESOURCES];
++ size_t i, numres;
++ int ret;
++ const struct screen_info *si = &screen_info;
++
++ if (screen_info_lfb_pdev)
++ return; // already found
++
++ type = screen_info_video_type(si);
++ if (type != VIDEO_TYPE_EFI)
++ return; // only applies to EFI
++
++ ret = screen_info_resources(si, res, ARRAY_SIZE(res));
++ if (ret < 0)
++ return;
++ numres = ret;
++
++ for (i = 0; i < numres; ++i) {
++ struct resource *r = &res[i];
++ const struct resource *pr;
++
++ if (!(r->flags & IORESOURCE_MEM))
++ continue;
++ pr = pci_find_resource(pdev, r);
++ if (!pr)
++ continue;
++
++ /*
++ * We've found a PCI device with the framebuffer
++ * resource. Store away the parameters to track
++ * relocation of the framebuffer aperture.
++ */
++ screen_info_lfb_pdev = pdev;
++ screen_info_lfb_bar = pr - pdev->resource;
++ screen_info_lfb_offset = r->start - pr->start;
++ memcpy(&screen_info_lfb_res, r, sizeof(screen_info_lfb_res));
++ }
++}
++DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16,
++ screen_info_fixup_lfb);
++
++static struct pci_dev *__screen_info_pci_dev(struct resource *res)
++{
++ struct pci_dev *pdev = NULL;
++ const struct resource *r = NULL;
++
++ if (!(res->flags & IORESOURCE_MEM))
++ return NULL;
++
++ while (!r && (pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
++ r = pci_find_resource(pdev, res);
++ }
++
++ return pdev;
++}
++
++/**
++ * screen_info_pci_dev() - Return PCI parent device that contains screen_info's framebuffer
++ * @si: the screen_info
++ *
++ * Returns:
++ * The screen_info's parent device or NULL on success, or a pointer-encoded
++ * errno value otherwise. The value NULL is not an error. It signals that no
++ * PCI device has been found.
++ */
++struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
++{
++ struct resource res[SCREEN_INFO_MAX_RESOURCES];
++ ssize_t i, numres;
++
++ numres = screen_info_resources(si, res, ARRAY_SIZE(res));
++ if (numres < 0)
++ return ERR_PTR(numres);
++
++ for (i = 0; i < numres; ++i) {
++ struct pci_dev *pdev = __screen_info_pci_dev(&res[i]);
++
++ if (pdev)
++ return pdev;
++ }
++
++ return NULL;
++}
++EXPORT_SYMBOL(screen_info_pci_dev);
+diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
+index b4ad8d452e9a1a..8ef49d7be453c9 100644
+--- a/drivers/virt/acrn/mm.c
++++ b/drivers/virt/acrn/mm.c
+@@ -155,43 +155,83 @@ int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ {
+ struct vm_memory_region_batch *regions_info;
+- int nr_pages, i = 0, order, nr_regions = 0;
++ int nr_pages, i, order, nr_regions = 0;
+ struct vm_memory_mapping *region_mapping;
+ struct vm_memory_region_op *vm_region;
+ struct page **pages = NULL, *page;
+ void *remap_vaddr;
+ int ret, pinned;
+ u64 user_vm_pa;
+- unsigned long pfn;
+ struct vm_area_struct *vma;
+
+ if (!vm || !memmap)
+ return -EINVAL;
+
++ /* Get the page number of the map region */
++ nr_pages = memmap->len >> PAGE_SHIFT;
++ if (!nr_pages)
++ return -EINVAL;
++
+ mmap_read_lock(current->mm);
+ vma = vma_lookup(current->mm, memmap->vma_base);
+ if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
++ unsigned long start_pfn, cur_pfn;
++ spinlock_t *ptl;
++ bool writable;
++ pte_t *ptep;
++
+ if ((memmap->vma_base + memmap->len) > vma->vm_end) {
+ mmap_read_unlock(current->mm);
+ return -EINVAL;
+ }
+
+- ret = follow_pfn(vma, memmap->vma_base, &pfn);
++ for (i = 0; i < nr_pages; i++) {
++ ret = follow_pte(vma->vm_mm,
++ memmap->vma_base + i * PAGE_SIZE,
++ &ptep, &ptl);
++ if (ret)
++ break;
++
++ cur_pfn = pte_pfn(ptep_get(ptep));
++ if (i == 0)
++ start_pfn = cur_pfn;
++ writable = !!pte_write(ptep_get(ptep));
++ pte_unmap_unlock(ptep, ptl);
++
++ /* Disallow write access if the PTE is not writable. */
++ if (!writable &&
++ (memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
++ ret = -EFAULT;
++ break;
++ }
++
++ /* Disallow refcounted pages. */
++ if (pfn_valid(cur_pfn) &&
++ !PageReserved(pfn_to_page(cur_pfn))) {
++ ret = -EFAULT;
++ break;
++ }
++
++ /* Disallow non-contiguous ranges. */
++ if (cur_pfn != start_pfn + i) {
++ ret = -EINVAL;
++ break;
++ }
++ }
+ mmap_read_unlock(current->mm);
+- if (ret < 0) {
++
++ if (ret) {
+ dev_dbg(acrn_dev.this_device,
+ "Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
+ return ret;
+ }
+
+ return acrn_mm_region_add(vm, memmap->user_vm_pa,
+- PFN_PHYS(pfn), memmap->len,
++ PFN_PHYS(start_pfn), memmap->len,
+ ACRN_MEM_TYPE_WB, memmap->attr);
+ }
+ mmap_read_unlock(current->mm);
+
+- /* Get the page number of the map region */
+- nr_pages = memmap->len >> PAGE_SHIFT;
+ pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
+ if (!pages)
+ return -ENOMEM;
+@@ -235,12 +275,11 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ mutex_unlock(&vm->regions_mapping_lock);
+
+ /* Calculate count of vm_memory_region_op */
+- while (i < nr_pages) {
++ for (i = 0; i < nr_pages; i += 1 << order) {
+ page = pages[i];
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ order = compound_order(page);
+ nr_regions++;
+- i += 1 << order;
+ }
+
+ /* Prepare the vm_memory_region_batch */
+@@ -257,8 +296,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ regions_info->regions_num = nr_regions;
+ regions_info->regions_gpa = virt_to_phys(vm_region);
+ user_vm_pa = memmap->user_vm_pa;
+- i = 0;
+- while (i < nr_pages) {
++ for (i = 0; i < nr_pages; i += 1 << order) {
+ u32 region_size;
+
+ page = pages[i];
+@@ -274,7 +312,6 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+
+ vm_region++;
+ user_vm_pa += region_size;
+- i += 1 << order;
+ }
+
+ /* Inform the ACRN Hypervisor to set up EPT mappings */
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 97dbe715e96adf..5bee58ef5f1e39 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -57,6 +57,11 @@ struct snp_guest_dev {
+
+ struct snp_secrets_page_layout *layout;
+ struct snp_req_data input;
++ union {
++ struct snp_report_req report;
++ struct snp_derived_key_req derived_key;
++ struct snp_ext_report_req ext_report;
++ } req;
+ u32 *os_area_msg_seqno;
+ u8 *vmpck;
+ };
+@@ -473,8 +478,8 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
+ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
++ struct snp_report_req *req = &snp_dev->req.report;
+ struct snp_report_resp *resp;
+- struct snp_report_req req;
+ int rc, resp_len;
+
+ lockdep_assert_held(&snp_cmd_mutex);
+@@ -482,7 +487,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ if (!arg->req_data || !arg->resp_data)
+ return -EINVAL;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+ /*
+@@ -496,7 +501,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+ return -ENOMEM;
+
+ rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
+- SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
++ SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
+ resp_len);
+ if (rc)
+ goto e_free;
+@@ -511,9 +516,9 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
+
+ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++ struct snp_derived_key_req *req = &snp_dev->req.derived_key;
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
+ struct snp_derived_key_resp resp = {0};
+- struct snp_derived_key_req req;
+ int rc, resp_len;
+ /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
+ u8 buf[64 + 16];
+@@ -532,11 +537,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+ if (sizeof(buf) < resp_len)
+ return -ENOMEM;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+ rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
+- SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len);
++ SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
+ if (rc)
+ return rc;
+
+@@ -552,8 +557,8 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
+
+ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
++ struct snp_ext_report_req *req = &snp_dev->req.ext_report;
+ struct snp_guest_crypto *crypto = snp_dev->crypto;
+- struct snp_ext_report_req req;
+ struct snp_report_resp *resp;
+ int ret, npages = 0, resp_len;
+
+@@ -562,18 +567,18 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ if (!arg->req_data || !arg->resp_data)
+ return -EINVAL;
+
+- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
++ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
+ return -EFAULT;
+
+ /* userspace does not want certificate data */
+- if (!req.certs_len || !req.certs_address)
++ if (!req->certs_len || !req->certs_address)
+ goto cmd;
+
+- if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
+- !IS_ALIGNED(req.certs_len, PAGE_SIZE))
++ if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
++ !IS_ALIGNED(req->certs_len, PAGE_SIZE))
+ return -EINVAL;
+
+- if (!access_ok((const void __user *)req.certs_address, req.certs_len))
++ if (!access_ok((const void __user *)req->certs_address, req->certs_len))
+ return -EFAULT;
+
+ /*
+@@ -582,8 +587,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ * the host. If host does not supply any certs in it, then copy
+ * zeros to indicate that certificate data was not provided.
+ */
+- memset(snp_dev->certs_data, 0, req.certs_len);
+- npages = req.certs_len >> PAGE_SHIFT;
++ memset(snp_dev->certs_data, 0, req->certs_len);
++ npages = req->certs_len >> PAGE_SHIFT;
+ cmd:
+ /*
+ * The intermediate response buffer is used while decrypting the
+@@ -597,14 +602,14 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+
+ snp_dev->input.data_npages = npages;
+ ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
+- SNP_MSG_REPORT_REQ, &req.data,
+- sizeof(req.data), resp->data, resp_len);
++ SNP_MSG_REPORT_REQ, &req->data,
++ sizeof(req->data), resp->data, resp_len);
+
+ /* If certs length is invalid then copy the returned length */
+ if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
+- req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
++ req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
+
+- if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
++ if (copy_to_user((void __user *)arg->req_data, req, sizeof(*req)))
+ ret = -EFAULT;
+ }
+
+@@ -612,8 +617,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
+ goto e_free;
+
+ if (npages &&
+- copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
+- req.certs_len)) {
++ copy_to_user((void __user *)req->certs_address, snp_dev->certs_data,
++ req->certs_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
+index 3893dc29eb2633..71dee622b771b3 100644
+--- a/drivers/virtio/virtio.c
++++ b/drivers/virtio/virtio.c
+@@ -489,13 +489,19 @@ EXPORT_SYMBOL_GPL(unregister_virtio_device);
+ int virtio_device_freeze(struct virtio_device *dev)
+ {
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
++ int ret;
+
+ virtio_config_disable(dev);
+
+ dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+
+- if (drv && drv->freeze)
+- return drv->freeze(dev);
++ if (drv && drv->freeze) {
++ ret = drv->freeze(dev);
++ if (ret) {
++ virtio_config_enable(dev);
++ return ret;
++ }
++ }
+
+ return 0;
+ }
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index c2524a7207cfae..64dfa54d702f11 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -345,8 +345,10 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
+ vring_interrupt, 0,
+ vp_dev->msix_names[msix_vec],
+ vqs[i]);
+- if (err)
++ if (err) {
++ vp_del_vq(vqs[i]);
+ goto error_find;
++ }
+ }
+ return 0;
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 51d8f3299c1055..80669e05bf0ee4 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -1340,7 +1340,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
+ sizeof(struct vring_packed_desc));
+ vq->packed.vring.desc[head].id = cpu_to_le16(id);
+
+- if (vq->do_unmap) {
++ if (vq->use_dma_api) {
+ vq->packed.desc_extra[id].addr = addr;
+ vq->packed.desc_extra[id].len = total_sg *
+ sizeof(struct vring_packed_desc);
+@@ -1481,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
+ desc[i].len = cpu_to_le32(sg->length);
+ desc[i].id = cpu_to_le16(id);
+
+- if (unlikely(vq->do_unmap)) {
++ if (unlikely(vq->use_dma_api)) {
+ vq->packed.desc_extra[curr].addr = addr;
+ vq->packed.desc_extra[curr].len = sg->length;
+ vq->packed.desc_extra[curr].flags =
+@@ -1615,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
+ vq->free_head = id;
+ vq->vq.num_free += state->num;
+
+- if (unlikely(vq->do_unmap)) {
++ if (unlikely(vq->use_dma_api)) {
+ curr = id;
+ for (i = 0; i < state->num; i++) {
+ vring_unmap_extra_packed(vq,
+@@ -3126,8 +3126,10 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
+ {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+- if (!vq->use_dma_api)
++ if (!vq->use_dma_api) {
++ kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
+ return (dma_addr_t)virt_to_phys(ptr);
++ }
+
+ return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
+ }
+@@ -3219,8 +3221,7 @@ void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
+ if (!vq->use_dma_api)
+ return;
+
+- dma_sync_single_range_for_cpu(dev, addr, offset, size,
+- DMA_BIDIRECTIONAL);
++ dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
+
+@@ -3246,8 +3247,7 @@ void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
+ if (!vq->use_dma_api)
+ return;
+
+- dma_sync_single_range_for_device(dev, addr, offset, size,
+- DMA_BIDIRECTIONAL);
++ dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
+
+diff --git a/drivers/w1/slaves/w1_ds250x.c b/drivers/w1/slaves/w1_ds250x.c
+index 7592c7050d1d7d..cb426f7dd23d44 100644
+--- a/drivers/w1/slaves/w1_ds250x.c
++++ b/drivers/w1/slaves/w1_ds250x.c
+@@ -168,6 +168,7 @@ static int w1_eprom_add_slave(struct w1_slave *sl)
+ struct nvmem_device *nvmem;
+ struct nvmem_config nvmem_cfg = {
+ .dev = &sl->dev,
++ .add_legacy_fixed_of_cells = true,
+ .reg_read = w1_nvmem_read,
+ .type = NVMEM_TYPE_OTP,
+ .read_only = true,
+diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
+index 7a855289ff5e67..bb001c5d7f17fd 100644
+--- a/drivers/watchdog/bcm2835_wdt.c
++++ b/drivers/watchdog/bcm2835_wdt.c
+@@ -42,6 +42,7 @@
+
+ #define SECS_TO_WDOG_TICKS(x) ((x) << 16)
+ #define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
++#define WDOG_TICKS_TO_MSECS(x) ((x) * 1000 >> 16)
+
+ struct bcm2835_wdt {
+ void __iomem *base;
+@@ -140,7 +141,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
+ .info = &bcm2835_wdt_info,
+ .ops = &bcm2835_wdt_ops,
+ .min_timeout = 1,
+- .max_timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
++ .max_hw_heartbeat_ms = WDOG_TICKS_TO_MSECS(PM_WDOG_TIME_SET),
+ .timeout = WDOG_TICKS_TO_SECS(PM_WDOG_TIME_SET),
+ };
+
+diff --git a/drivers/watchdog/bd9576_wdt.c b/drivers/watchdog/bd9576_wdt.c
+index 4a20e07fbb699b..f00ea1b4e40b6a 100644
+--- a/drivers/watchdog/bd9576_wdt.c
++++ b/drivers/watchdog/bd9576_wdt.c
+@@ -29,7 +29,6 @@ struct bd9576_wdt_priv {
+ struct gpio_desc *gpiod_en;
+ struct device *dev;
+ struct regmap *regmap;
+- bool always_running;
+ struct watchdog_device wdd;
+ };
+
+@@ -62,10 +61,7 @@ static int bd9576_wdt_stop(struct watchdog_device *wdd)
+ {
+ struct bd9576_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+- if (!priv->always_running)
+- bd9576_wdt_disable(priv);
+- else
+- set_bit(WDOG_HW_RUNNING, &wdd->status);
++ bd9576_wdt_disable(priv);
+
+ return 0;
+ }
+@@ -264,9 +260,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
+- priv->always_running = device_property_read_bool(dev->parent,
+- "always-running");
+-
+ watchdog_set_drvdata(&priv->wdd, priv);
+
+ priv->wdd.info = &bd957x_wdt_ident;
+@@ -281,9 +274,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
+
+ watchdog_stop_on_reboot(&priv->wdd);
+
+- if (priv->always_running)
+- bd9576_wdt_start(&priv->wdd);
+-
+ return devm_watchdog_register_device(dev, &priv->wdd);
+ }
+
+diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
+index 688b112e712bad..9f279c0e13a66f 100644
+--- a/drivers/watchdog/cpu5wdt.c
++++ b/drivers/watchdog/cpu5wdt.c
+@@ -252,7 +252,7 @@ static void cpu5wdt_exit(void)
+ if (cpu5wdt_device.queue) {
+ cpu5wdt_device.queue = 0;
+ wait_for_completion(&cpu5wdt_device.stop);
+- del_timer(&cpu5wdt_device.timer);
++ timer_shutdown_sync(&cpu5wdt_device.timer);
+ }
+
+ misc_deregister(&cpu5wdt_misc);
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index f79f932bca1489..79ed1626d8ea11 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -178,7 +178,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
+ "3. OA Forward Progress Log\n"
+ "4. iLO Event Log";
+
+- if (ilo5 && ulReason == NMI_UNKNOWN && !mynmi)
++ if (ulReason == NMI_UNKNOWN && !mynmi)
+ return NMI_DONE;
+
+ if (ilo5 && !pretimeout && !mynmi)
+diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
+index 8ac021748d160d..79649b0e89e473 100644
+--- a/drivers/watchdog/imx_sc_wdt.c
++++ b/drivers/watchdog/imx_sc_wdt.c
+@@ -213,29 +213,6 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
+ return devm_watchdog_register_device(dev, wdog);
+ }
+
+-static int __maybe_unused imx_sc_wdt_suspend(struct device *dev)
+-{
+- struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev);
+-
+- if (watchdog_active(&imx_sc_wdd->wdd))
+- imx_sc_wdt_stop(&imx_sc_wdd->wdd);
+-
+- return 0;
+-}
+-
+-static int __maybe_unused imx_sc_wdt_resume(struct device *dev)
+-{
+- struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev);
+-
+- if (watchdog_active(&imx_sc_wdd->wdd))
+- imx_sc_wdt_start(&imx_sc_wdd->wdd);
+-
+- return 0;
+-}
+-
+-static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops,
+- imx_sc_wdt_suspend, imx_sc_wdt_resume);
+-
+ static const struct of_device_id imx_sc_wdt_dt_ids[] = {
+ { .compatible = "fsl,imx-sc-wdt", },
+ { /* sentinel */ }
+@@ -247,7 +224,6 @@ static struct platform_driver imx_sc_wdt_driver = {
+ .driver = {
+ .name = "imx-sc-wdt",
+ .of_match_table = imx_sc_wdt_dt_ids,
+- .pm = &imx_sc_wdt_pm_ops,
+ },
+ };
+ module_platform_driver(imx_sc_wdt_driver);
+diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
+index bb11229093966d..843f9f8e391776 100644
+--- a/drivers/watchdog/it87_wdt.c
++++ b/drivers/watchdog/it87_wdt.c
+@@ -255,6 +255,7 @@ static struct watchdog_device wdt_dev = {
+ static int __init it87_wdt_init(void)
+ {
+ u8 chip_rev;
++ u8 ctrl;
+ int rc;
+
+ rc = superio_enter();
+@@ -313,7 +314,18 @@ static int __init it87_wdt_init(void)
+
+ superio_select(GPIO);
+ superio_outb(WDT_TOV1, WDTCFG);
+- superio_outb(0x00, WDTCTRL);
++
++ switch (chip_type) {
++ case IT8784_ID:
++ case IT8786_ID:
++ ctrl = superio_inb(WDTCTRL);
++ ctrl &= 0x08;
++ superio_outb(ctrl, WDTCTRL);
++ break;
++ default:
++ superio_outb(0x00, WDTCTRL);
++ }
++
+ superio_exit();
+
+ if (timeout < 1 || timeout > max_units * 60) {
+diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
+index 607ce4b8df574f..ec0c08652ec2f9 100644
+--- a/drivers/watchdog/ixp4xx_wdt.c
++++ b/drivers/watchdog/ixp4xx_wdt.c
+@@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = {
+ .owner = THIS_MODULE,
+ };
+
++/*
++ * The A0 version of the IXP422 had a bug in the watchdog making
++ * is useless, but we still need to use it to restart the system
++ * as it is the only way, so in this special case we register a
++ * "dummy" watchdog that doesn't really work, but will support
++ * the restart operation.
++ */
++static int ixp4xx_wdt_dummy(struct watchdog_device *wdd)
++{
++ return 0;
++}
++
++static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = {
++ .start = ixp4xx_wdt_dummy,
++ .stop = ixp4xx_wdt_dummy,
++ .restart = ixp4xx_wdt_restart,
++ .owner = THIS_MODULE,
++};
++
+ static const struct watchdog_info ixp4xx_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING
+ | WDIOF_MAGICCLOSE
+@@ -114,14 +133,17 @@ static const struct watchdog_info ixp4xx_wdt_info = {
+
+ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ {
++ static const struct watchdog_ops *iwdt_ops;
+ struct device *dev = &pdev->dev;
+ struct ixp4xx_wdt *iwdt;
+ struct clk *clk;
+ int ret;
+
+ if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
+- dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
+- return -ENODEV;
++ dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n");
++ iwdt_ops = &ixp4xx_wdt_restart_only_ops;
++ } else {
++ iwdt_ops = &ixp4xx_wdt_ops;
+ }
+
+ iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
+@@ -141,7 +163,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
+ iwdt->rate = IXP4XX_TIMER_FREQ;
+
+ iwdt->wdd.info = &ixp4xx_wdt_info;
+- iwdt->wdd.ops = &ixp4xx_wdt_ops;
++ iwdt->wdd.ops = iwdt_ops;
+ iwdt->wdd.min_timeout = 1;
+ iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
+ iwdt->wdd.parent = dev;
+diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c
+index d7eb8286e11eca..1ec1e014ba8312 100644
+--- a/drivers/watchdog/marvell_gti_wdt.c
++++ b/drivers/watchdog/marvell_gti_wdt.c
+@@ -271,7 +271,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
+ &wdt_idx);
+ if (!err) {
+ if (wdt_idx >= priv->data->gti_num_timers)
+- return dev_err_probe(&pdev->dev, err,
++ return dev_err_probe(&pdev->dev, -EINVAL,
+ "GTI wdog timer index not valid");
+
+ priv->wdt_timer_idx = wdt_idx;
+diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
+index 8e1be7ba010398..4895a69015a8ea 100644
+--- a/drivers/watchdog/rti_wdt.c
++++ b/drivers/watchdog/rti_wdt.c
+@@ -59,6 +59,8 @@
+ #define PON_REASON_EOF_NUM 0xCCCCBBBB
+ #define RESERVED_MEM_MIN_SIZE 12
+
++#define MAX_HW_ERROR 250
++
+ static int heartbeat = DEFAULT_HEARTBEAT;
+
+ /*
+@@ -77,6 +79,11 @@ static int rti_wdt_start(struct watchdog_device *wdd)
+ {
+ u32 timer_margin;
+ struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
++ int ret;
++
++ ret = pm_runtime_resume_and_get(wdd->parent);
++ if (ret)
++ return ret;
+
+ /* set timeout period */
+ timer_margin = (u64)wdd->timeout * wdt->freq;
+@@ -92,7 +99,7 @@ static int rti_wdt_start(struct watchdog_device *wdd)
+ * to be 50% or less than that; we obviouly want to configure the open
+ * window as large as possible so we select the 50% option.
+ */
+- wdd->min_hw_heartbeat_ms = 500 * wdd->timeout;
++ wdd->min_hw_heartbeat_ms = 520 * wdd->timeout + MAX_HW_ERROR;
+
+ /* Generate NMI when wdt expires */
+ writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
+@@ -126,31 +133,33 @@ static int rti_wdt_setup_hw_hb(struct watchdog_device *wdd, u32 wsize)
+ * be petted during the open window; not too early or not too late.
+ * The HW configuration options only allow for the open window size
+ * to be 50% or less than that.
++ * To avoid any glitches, we accommodate 2% + max hardware error
++ * safety margin.
+ */
+ switch (wsize) {
+ case RTIWWDSIZE_50P:
+- /* 50% open window => 50% min heartbeat */
+- wdd->min_hw_heartbeat_ms = 500 * heartbeat;
++ /* 50% open window => 52% min heartbeat */
++ wdd->min_hw_heartbeat_ms = 520 * heartbeat + MAX_HW_ERROR;
+ break;
+
+ case RTIWWDSIZE_25P:
+- /* 25% open window => 75% min heartbeat */
+- wdd->min_hw_heartbeat_ms = 750 * heartbeat;
++ /* 25% open window => 77% min heartbeat */
++ wdd->min_hw_heartbeat_ms = 770 * heartbeat + MAX_HW_ERROR;
+ break;
+
+ case RTIWWDSIZE_12P5:
+- /* 12.5% open window => 87.5% min heartbeat */
+- wdd->min_hw_heartbeat_ms = 875 * heartbeat;
++ /* 12.5% open window => 89.5% min heartbeat */
++ wdd->min_hw_heartbeat_ms = 895 * heartbeat + MAX_HW_ERROR;
+ break;
+
+ case RTIWWDSIZE_6P25:
+- /* 6.5% open window => 93.5% min heartbeat */
+- wdd->min_hw_heartbeat_ms = 935 * heartbeat;
++ /* 6.5% open window => 95.5% min heartbeat */
++ wdd->min_hw_heartbeat_ms = 955 * heartbeat + MAX_HW_ERROR;
+ break;
+
+ case RTIWWDSIZE_3P125:
+- /* 3.125% open window => 96.9% min heartbeat */
+- wdd->min_hw_heartbeat_ms = 969 * heartbeat;
++ /* 3.125% open window => 98.9% min heartbeat */
++ wdd->min_hw_heartbeat_ms = 989 * heartbeat + MAX_HW_ERROR;
+ break;
+
+ default:
+@@ -228,14 +237,6 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
+- /*
+- * If watchdog is running at 32k clock, it is not accurate.
+- * Adjust frequency down in this case so that we don't pet
+- * the watchdog too often.
+- */
+- if (wdt->freq < 32768)
+- wdt->freq = wdt->freq * 9 / 10;
+-
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+@@ -343,6 +344,9 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ if (last_ping)
+ watchdog_set_last_hw_keepalive(wdd, last_ping);
+
++ if (!watchdog_hw_running(wdd))
++ pm_runtime_put_sync(&pdev->dev);
++
+ return 0;
+
+ err_iomap:
+@@ -357,7 +361,10 @@ static void rti_wdt_remove(struct platform_device *pdev)
+ struct rti_wdt_device *wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&wdt->wdd);
+- pm_runtime_put(&pdev->dev);
++
++ if (!pm_runtime_suspended(&pdev->dev))
++ pm_runtime_put(&pdev->dev);
++
+ pm_runtime_disable(&pdev->dev);
+ }
+
+diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
+index 1741f98ca67c52..7bce093316c4d7 100644
+--- a/drivers/watchdog/rzg2l_wdt.c
++++ b/drivers/watchdog/rzg2l_wdt.c
+@@ -123,8 +123,11 @@ static void rzg2l_wdt_init_timeout(struct watchdog_device *wdev)
+ static int rzg2l_wdt_start(struct watchdog_device *wdev)
+ {
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
++ int ret;
+
+- pm_runtime_get_sync(wdev->parent);
++ ret = pm_runtime_resume_and_get(wdev->parent);
++ if (ret)
++ return ret;
+
+ /* Initialize time out */
+ rzg2l_wdt_init_timeout(wdev);
+@@ -141,15 +144,21 @@ static int rzg2l_wdt_start(struct watchdog_device *wdev)
+ static int rzg2l_wdt_stop(struct watchdog_device *wdev)
+ {
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
++ int ret;
+
+ rzg2l_wdt_reset(priv);
+- pm_runtime_put(wdev->parent);
++
++ ret = pm_runtime_put(wdev->parent);
++ if (ret < 0)
++ return ret;
+
+ return 0;
+ }
+
+ static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
+ {
++ int ret = 0;
++
+ wdev->timeout = timeout;
+
+ /*
+@@ -158,11 +167,14 @@ static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int time
+ * to reset the module) so that it is updated with new timeout values.
+ */
+ if (watchdog_active(wdev)) {
+- rzg2l_wdt_stop(wdev);
+- rzg2l_wdt_start(wdev);
++ ret = rzg2l_wdt_stop(wdev);
++ if (ret)
++ return ret;
++
++ ret = rzg2l_wdt_start(wdev);
+ }
+
+- return 0;
++ return ret;
+ }
+
+ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
+index 5d2df008b92a5c..34a917221e316f 100644
+--- a/drivers/watchdog/sa1100_wdt.c
++++ b/drivers/watchdog/sa1100_wdt.c
+@@ -191,9 +191,8 @@ static int sa1100dog_probe(struct platform_device *pdev)
+ if (!res)
+ return -ENXIO;
+ reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+- ret = PTR_ERR_OR_ZERO(reg_base);
+- if (ret)
+- return ret;
++ if (!reg_base)
++ return -ENOMEM;
+
+ clk = clk_get(NULL, "OSTIMER0");
+ if (IS_ERR(clk)) {
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index 421ebcda62e645..5f23913ce3b49c 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -152,14 +152,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
+
+ if (action)
+- sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
++ sbsa_gwdt_reg_write((u64)gwdt->clk * timeout, gwdt);
+ else
+ /*
+ * In the single stage mode, The first signal (WS0) is ignored,
+ * the timeout is (WOR * 2), so the WOR should be configured
+ * to half value of timeout.
+ */
+- sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
++ sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
+
+ return 0;
+ }
+diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
+index 5f501b41faf9d9..e4b344db38030a 100644
+--- a/drivers/watchdog/starfive-wdt.c
++++ b/drivers/watchdog/starfive-wdt.c
+@@ -202,12 +202,14 @@ static u32 starfive_wdt_ticks_to_sec(struct starfive_wdt *wdt, u32 ticks)
+
+ /* Write unlock-key to unlock. Write other value to lock. */
+ static void starfive_wdt_unlock(struct starfive_wdt *wdt)
++ __acquires(&wdt->lock)
+ {
+ spin_lock(&wdt->lock);
+ writel(wdt->variant->unlock_key, wdt->base + wdt->variant->unlock);
+ }
+
+ static void starfive_wdt_lock(struct starfive_wdt *wdt)
++ __releases(&wdt->lock)
+ {
+ writel(~wdt->variant->unlock_key, wdt->base + wdt->variant->unlock);
+ spin_unlock(&wdt->lock);
+@@ -492,8 +494,13 @@ static int starfive_wdt_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_exit;
+
+- if (!early_enable)
+- pm_runtime_put_sync(&pdev->dev);
++ if (!early_enable) {
++ if (pm_runtime_enabled(&pdev->dev)) {
++ ret = pm_runtime_put_sync(&pdev->dev);
++ if (ret)
++ goto err_exit;
++ }
++ }
+
+ return 0;
+
+diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
+index d9fd50df9802ce..5404e038762021 100644
+--- a/drivers/watchdog/stm32_iwdg.c
++++ b/drivers/watchdog/stm32_iwdg.c
+@@ -20,6 +20,8 @@
+ #include <linux/platform_device.h>
+ #include <linux/watchdog.h>
+
++#define DEFAULT_TIMEOUT 10
++
+ /* IWDG registers */
+ #define IWDG_KR 0x00 /* Key register */
+ #define IWDG_PR 0x04 /* Prescaler Register */
+@@ -248,6 +250,7 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
+ wdd->parent = dev;
+ wdd->info = &stm32_iwdg_info;
+ wdd->ops = &stm32_iwdg_ops;
++ wdd->timeout = DEFAULT_TIMEOUT;
+ wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate);
+ wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler *
+ 1000) / wdt->rate;
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 15df74e11a5953..e2bd266b1b5b3d 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -1073,6 +1073,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+
+ /* Fill in the data structures */
+ cdev_init(&wd_data->cdev, &watchdog_fops);
++ wd_data->cdev.owner = wdd->ops->owner;
+
+ /* Add the device */
+ err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
+@@ -1087,8 +1088,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ return err;
+ }
+
+- wd_data->cdev.owner = wdd->ops->owner;
+-
+ /* Record time of most recent heartbeat as 'just before now'. */
+ wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
+ watchdog_set_open_deadline(wd_data);
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 586a1673459eb7..db61bcb3aab176 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -673,7 +673,6 @@ EXPORT_SYMBOL(xen_free_ballooned_pages);
+
+ static void __init balloon_add_regions(void)
+ {
+-#if defined(CONFIG_XEN_PV)
+ unsigned long start_pfn, pages;
+ unsigned long pfn, extra_pfn_end;
+ unsigned int i;
+@@ -697,7 +696,6 @@ static void __init balloon_add_regions(void)
+
+ balloon_stats.total_pages += extra_pfn_end - start_pfn;
+ }
+-#endif
+ }
+
+ static int __init balloon_init(void)
+diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
+index b8f2f971c2f0fc..e3585330cf98b1 100644
+--- a/drivers/xen/events/events_2l.c
++++ b/drivers/xen/events/events_2l.c
+@@ -171,11 +171,11 @@ static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
+ int i;
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
++ evtchn_port_t evtchn;
+
+ /* Timer interrupt has highest priority. */
+- irq = irq_from_virq(cpu, VIRQ_TIMER);
++ irq = irq_evtchn_from_virq(cpu, VIRQ_TIMER, &evtchn);
+ if (irq != -1) {
+- evtchn_port_t evtchn = evtchn_from_irq(irq);
+ word_idx = evtchn / BITS_PER_LONG;
+ bit_idx = evtchn % BITS_PER_LONG;
+ if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
+@@ -328,9 +328,9 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
+ for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
+ if (sync_test_bit(i, BM(sh->evtchn_pending))) {
+ int word_idx = i / BITS_PER_EVTCHN_WORD;
+- printk(" %d: event %d -> irq %d%s%s%s\n",
++ printk(" %d: event %d -> irq %u%s%s%s\n",
+ cpu_from_evtchn(i), i,
+- get_evtchn_to_irq(i),
++ irq_from_evtchn(i),
+ sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
+ ? "" : " l2-clear",
+ !sync_test_bit(i, BM(sh->evtchn_mask))
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 1b2136fe0fa519..9e3b5d21d09877 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
+
+ /* IRQ <-> IPI mapping */
+ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
++/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
++static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
+
+ /* Event channel distribution data */
+ static atomic_t channels_on_cpu[NR_CPUS];
+@@ -246,15 +248,6 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
+ return 0;
+ }
+
+-int get_evtchn_to_irq(evtchn_port_t evtchn)
+-{
+- if (evtchn >= xen_evtchn_max_channels())
+- return -1;
+- if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
+- return -1;
+- return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
+-}
+-
+ /* Get info for IRQ */
+ static struct irq_info *info_for_irq(unsigned irq)
+ {
+@@ -272,6 +265,19 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
+ irq_set_chip_data(irq, info);
+ }
+
++static struct irq_info *evtchn_to_info(evtchn_port_t evtchn)
++{
++ int irq;
++
++ if (evtchn >= xen_evtchn_max_channels())
++ return NULL;
++ if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
++ return NULL;
++ irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
++
++ return (irq < 0) ? NULL : info_for_irq(irq);
++}
++
+ /* Per CPU channel accounting */
+ static void channels_on_cpu_dec(struct irq_info *info)
+ {
+@@ -298,6 +304,13 @@ static void channels_on_cpu_inc(struct irq_info *info)
+ info->is_accounted = 1;
+ }
+
++static void xen_irq_free_desc(unsigned int irq)
++{
++ /* Legacy IRQ descriptors are managed by the arch. */
++ if (irq >= nr_legacy_irqs())
++ irq_free_desc(irq);
++}
++
+ static void delayed_free_irq(struct work_struct *work)
+ {
+ struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
+@@ -309,14 +322,11 @@ static void delayed_free_irq(struct work_struct *work)
+
+ kfree(info);
+
+- /* Legacy IRQ descriptors are managed by the arch. */
+- if (irq >= nr_legacy_irqs())
+- irq_free_desc(irq);
++ xen_irq_free_desc(irq);
+ }
+
+ /* Constructors for packed IRQ information. */
+ static int xen_irq_info_common_setup(struct irq_info *info,
+- unsigned irq,
+ enum xen_irq_type type,
+ evtchn_port_t evtchn,
+ unsigned short cpu)
+@@ -326,29 +336,27 @@ static int xen_irq_info_common_setup(struct irq_info *info,
+ BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
+
+ info->type = type;
+- info->irq = irq;
+ info->evtchn = evtchn;
+ info->cpu = cpu;
+ info->mask_reason = EVT_MASK_REASON_EXPLICIT;
+ raw_spin_lock_init(&info->lock);
+
+- ret = set_evtchn_to_irq(evtchn, irq);
++ ret = set_evtchn_to_irq(evtchn, info->irq);
+ if (ret < 0)
+ return ret;
+
+- irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
++ irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
+
+ return xen_evtchn_port_setup(evtchn);
+ }
+
+-static int xen_irq_info_evtchn_setup(unsigned irq,
++static int xen_irq_info_evtchn_setup(struct irq_info *info,
+ evtchn_port_t evtchn,
+ struct xenbus_device *dev)
+ {
+- struct irq_info *info = info_for_irq(irq);
+ int ret;
+
+- ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
++ ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
+ info->u.interdomain = dev;
+ if (dev)
+ atomic_inc(&dev->event_channels);
+@@ -356,49 +364,37 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
+ return ret;
+ }
+
+-static int xen_irq_info_ipi_setup(unsigned cpu,
+- unsigned irq,
+- evtchn_port_t evtchn,
+- enum ipi_vector ipi)
++static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
++ evtchn_port_t evtchn, enum ipi_vector ipi)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ info->u.ipi = ipi;
+
+- per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++ per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
++ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+
+- return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
++ return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
+ }
+
+-static int xen_irq_info_virq_setup(unsigned cpu,
+- unsigned irq,
+- evtchn_port_t evtchn,
+- unsigned virq)
++static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
++ evtchn_port_t evtchn, unsigned int virq)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ info->u.virq = virq;
+
+- per_cpu(virq_to_irq, cpu)[virq] = irq;
++ per_cpu(virq_to_irq, cpu)[virq] = info->irq;
+
+- return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
++ return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
+ }
+
+-static int xen_irq_info_pirq_setup(unsigned irq,
+- evtchn_port_t evtchn,
+- unsigned pirq,
+- unsigned gsi,
+- uint16_t domid,
+- unsigned char flags)
++static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
++ unsigned int pirq, unsigned int gsi,
++ uint16_t domid, unsigned char flags)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ info->u.pirq.pirq = pirq;
+ info->u.pirq.gsi = gsi;
+ info->u.pirq.domid = domid;
+ info->u.pirq.flags = flags;
+
+- return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
++ return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
+ }
+
+ static void xen_irq_info_cleanup(struct irq_info *info)
+@@ -412,7 +408,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
+ /*
+ * Accessors for packed IRQ information.
+ */
+-evtchn_port_t evtchn_from_irq(unsigned irq)
++static evtchn_port_t evtchn_from_irq(unsigned int irq)
+ {
+ const struct irq_info *info = NULL;
+
+@@ -426,29 +422,32 @@ evtchn_port_t evtchn_from_irq(unsigned irq)
+
+ unsigned int irq_from_evtchn(evtchn_port_t evtchn)
+ {
+- return get_evtchn_to_irq(evtchn);
++ struct irq_info *info = evtchn_to_info(evtchn);
++
++ return info ? info->irq : -1;
+ }
+ EXPORT_SYMBOL_GPL(irq_from_evtchn);
+
+-int irq_from_virq(unsigned int cpu, unsigned int virq)
++int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
++ evtchn_port_t *evtchn)
+ {
+- return per_cpu(virq_to_irq, cpu)[virq];
++ int irq = per_cpu(virq_to_irq, cpu)[virq];
++
++ *evtchn = evtchn_from_irq(irq);
++
++ return irq;
+ }
+
+-static enum ipi_vector ipi_from_irq(unsigned irq)
++static enum ipi_vector ipi_from_irq(struct irq_info *info)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_IPI);
+
+ return info->u.ipi;
+ }
+
+-static unsigned virq_from_irq(unsigned irq)
++static unsigned int virq_from_irq(struct irq_info *info)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_VIRQ);
+
+@@ -465,25 +464,11 @@ static unsigned pirq_from_irq(unsigned irq)
+ return info->u.pirq.pirq;
+ }
+
+-static enum xen_irq_type type_from_irq(unsigned irq)
+-{
+- return info_for_irq(irq)->type;
+-}
+-
+-static unsigned cpu_from_irq(unsigned irq)
+-{
+- return info_for_irq(irq)->cpu;
+-}
+-
+ unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
+ {
+- int irq = get_evtchn_to_irq(evtchn);
+- unsigned ret = 0;
+-
+- if (irq != -1)
+- ret = cpu_from_irq(irq);
++ struct irq_info *info = evtchn_to_info(evtchn);
+
+- return ret;
++ return info ? info->cpu : 0;
+ }
+
+ static void do_mask(struct irq_info *info, u8 reason)
+@@ -529,22 +514,17 @@ static bool pirq_needs_eoi_flag(unsigned irq)
+ return info->u.pirq.flags & PIRQ_NEEDS_EOI;
+ }
+
+-static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
++static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
+ bool force_affinity)
+ {
+- int irq = get_evtchn_to_irq(evtchn);
+- struct irq_info *info = info_for_irq(irq);
+-
+- BUG_ON(irq == -1);
+-
+ if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
+- struct irq_data *data = irq_get_irq_data(irq);
++ struct irq_data *data = irq_get_irq_data(info->irq);
+
+ irq_data_update_affinity(data, cpumask_of(cpu));
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
+ }
+
+- xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
++ xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
+
+ channels_on_cpu_dec(info);
+ info->cpu = cpu;
+@@ -601,7 +581,9 @@ static void lateeoi_list_add(struct irq_info *info)
+
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+
+- if (list_empty(&eoi->eoi_list)) {
++ elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
++ eoi_list);
++ if (!elem || info->eoi_time < elem->eoi_time) {
+ list_add(&info->eoi_list, &eoi->eoi_list);
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed, delay);
+@@ -732,50 +714,49 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
+ }
+ EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
+
+-static void xen_irq_init(unsigned irq)
++static struct irq_info *xen_irq_init(unsigned int irq)
+ {
+ struct irq_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+- if (info == NULL)
+- panic("Unable to allocate metadata for IRQ%d\n", irq);
++ if (info) {
++ info->irq = irq;
++ info->type = IRQT_UNBOUND;
++ info->refcnt = -1;
++ INIT_RCU_WORK(&info->rwork, delayed_free_irq);
+
+- info->type = IRQT_UNBOUND;
+- info->refcnt = -1;
+- INIT_RCU_WORK(&info->rwork, delayed_free_irq);
++ set_info_for_irq(irq, info);
++ /*
++ * Interrupt affinity setting can be immediate. No point
++ * in delaying it until an interrupt is handled.
++ */
++ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+
+- set_info_for_irq(irq, info);
+- /*
+- * Interrupt affinity setting can be immediate. No point
+- * in delaying it until an interrupt is handled.
+- */
+- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
++ INIT_LIST_HEAD(&info->eoi_list);
++ list_add_tail(&info->list, &xen_irq_list_head);
++ }
+
+- INIT_LIST_HEAD(&info->eoi_list);
+- list_add_tail(&info->list, &xen_irq_list_head);
++ return info;
+ }
+
+-static int __must_check xen_allocate_irqs_dynamic(int nvec)
++static struct irq_info *xen_allocate_irq_dynamic(void)
+ {
+- int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
++ int irq = irq_alloc_desc_from(0, -1);
++ struct irq_info *info = NULL;
+
+ if (irq >= 0) {
+- for (i = 0; i < nvec; i++)
+- xen_irq_init(irq + i);
++ info = xen_irq_init(irq);
++ if (!info)
++ xen_irq_free_desc(irq);
+ }
+
+- return irq;
+-}
+-
+-static inline int __must_check xen_allocate_irq_dynamic(void)
+-{
+-
+- return xen_allocate_irqs_dynamic(1);
++ return info;
+ }
+
+-static int __must_check xen_allocate_irq_gsi(unsigned gsi)
++static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
+ {
+ int irq;
++ struct irq_info *info;
+
+ /*
+ * A PV guest has no concept of a GSI (since it has no ACPI
+@@ -792,15 +773,15 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
+ else
+ irq = irq_alloc_desc_at(gsi, -1);
+
+- xen_irq_init(irq);
++ info = xen_irq_init(irq);
++ if (!info)
++ xen_irq_free_desc(irq);
+
+- return irq;
++ return info;
+ }
+
+-static void xen_free_irq(unsigned irq)
++static void xen_free_irq(struct irq_info *info)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ if (WARN_ON(!info))
+ return;
+
+@@ -891,7 +872,7 @@ static unsigned int __startup_pirq(unsigned int irq)
+ goto err;
+
+ info->evtchn = evtchn;
+- bind_evtchn_to_cpu(evtchn, 0, false);
++ bind_evtchn_to_cpu(info, 0, false);
+
+ rc = xen_evtchn_port_setup(evtchn);
+ if (rc)
+@@ -927,8 +908,8 @@ static void shutdown_pirq(struct irq_data *data)
+ return;
+
+ do_mask(info, EVT_MASK_REASON_EXPLICIT);
+- xen_evtchn_close(evtchn);
+ xen_irq_info_cleanup(info);
++ xen_evtchn_close(evtchn);
+ }
+
+ static void enable_pirq(struct irq_data *data)
+@@ -957,10 +938,15 @@ int xen_irq_from_gsi(unsigned gsi)
+ }
+ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
+
+-static void __unbind_from_irq(unsigned int irq)
++static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
+ {
+- evtchn_port_t evtchn = evtchn_from_irq(irq);
+- struct irq_info *info = info_for_irq(irq);
++ evtchn_port_t evtchn;
++ bool close_evtchn = false;
++
++ if (!info) {
++ xen_irq_free_desc(irq);
++ return;
++ }
+
+ if (info->refcnt > 0) {
+ info->refcnt--;
+@@ -968,19 +954,22 @@ static void __unbind_from_irq(unsigned int irq)
+ return;
+ }
+
++ evtchn = info->evtchn;
++
+ if (VALID_EVTCHN(evtchn)) {
+- unsigned int cpu = cpu_from_irq(irq);
++ unsigned int cpu = info->cpu;
+ struct xenbus_device *dev;
+
+ if (!info->is_static)
+- xen_evtchn_close(evtchn);
++ close_evtchn = true;
+
+- switch (type_from_irq(irq)) {
++ switch (info->type) {
+ case IRQT_VIRQ:
+- per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
++ per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
+ break;
+ case IRQT_IPI:
+- per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
++ per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
++ per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
+ break;
+ case IRQT_EVTCHN:
+ dev = info->u.interdomain;
+@@ -992,9 +981,12 @@ static void __unbind_from_irq(unsigned int irq)
+ }
+
+ xen_irq_info_cleanup(info);
++
++ if (close_evtchn)
++ xen_evtchn_close(evtchn);
+ }
+
+- xen_free_irq(irq);
++ xen_free_irq(info);
+ }
+
+ /*
+@@ -1010,24 +1002,24 @@ static void __unbind_from_irq(unsigned int irq)
+ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ unsigned pirq, int shareable, char *name)
+ {
+- int irq;
++ struct irq_info *info;
+ struct physdev_irq irq_op;
+ int ret;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = xen_irq_from_gsi(gsi);
+- if (irq != -1) {
++ ret = xen_irq_from_gsi(gsi);
++ if (ret != -1) {
+ pr_info("%s: returning irq %d for gsi %u\n",
+- __func__, irq, gsi);
++ __func__, ret, gsi);
+ goto out;
+ }
+
+- irq = xen_allocate_irq_gsi(gsi);
+- if (irq < 0)
++ info = xen_allocate_irq_gsi(gsi);
++ if (!info)
+ goto out;
+
+- irq_op.irq = irq;
++ irq_op.irq = info->irq;
+ irq_op.vector = 0;
+
+ /* Only the privileged domain can do this. For non-priv, the pcifront
+@@ -1035,20 +1027,19 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ * this in the priv domain. */
+ if (xen_initial_domain() &&
+ HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
+- xen_free_irq(irq);
+- irq = -ENOSPC;
++ xen_free_irq(info);
++ ret = -ENOSPC;
+ goto out;
+ }
+
+- ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
++ ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
+ shareable ? PIRQ_SHAREABLE : 0);
+ if (ret < 0) {
+- __unbind_from_irq(irq);
+- irq = ret;
++ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
+
+- pirq_query_unmask(irq);
++ pirq_query_unmask(info->irq);
+ /* We try to use the handler with the appropriate semantic for the
+ * type of interrupt: if the interrupt is an edge triggered
+ * interrupt we use handle_edge_irq.
+@@ -1065,16 +1056,18 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ * is the right choice either way.
+ */
+ if (shareable)
+- irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
+ handle_fasteoi_irq, name);
+ else
+- irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
+ handle_edge_irq, name);
+
++ ret = info->irq;
++
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+
+- return irq;
++ return ret;
+ }
+
+ #ifdef CONFIG_PCI_MSI
+@@ -1096,17 +1089,24 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ int pirq, int nvec, const char *name, domid_t domid)
+ {
+ int i, irq, ret;
++ struct irq_info *info;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = xen_allocate_irqs_dynamic(nvec);
++ irq = irq_alloc_descs(-1, 0, nvec, -1);
+ if (irq < 0)
+ goto out;
+
+ for (i = 0; i < nvec; i++) {
++ info = xen_irq_init(irq + i);
++ if (!info) {
++ ret = -ENOMEM;
++ goto error_irq;
++ }
++
+ irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
+
+- ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
++ ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
+ i == 0 ? 0 : PIRQ_MSI_GROUP);
+ if (ret < 0)
+ goto error_irq;
+@@ -1118,9 +1118,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+ return irq;
++
+ error_irq:
+- while (nvec--)
+- __unbind_from_irq(irq + nvec);
++ while (nvec--) {
++ info = info_for_irq(irq + nvec);
++ __unbind_from_irq(info, irq + nvec);
++ }
+ mutex_unlock(&irq_mapping_update_lock);
+ return ret;
+ }
+@@ -1156,7 +1159,7 @@ int xen_destroy_irq(int irq)
+ }
+ }
+
+- xen_free_irq(irq);
++ xen_free_irq(info);
+
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+@@ -1193,30 +1196,29 @@ int xen_pirq_from_irq(unsigned irq)
+ EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
+
+ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
+- struct xenbus_device *dev)
++ struct xenbus_device *dev, bool shared)
+ {
+- int irq;
+- int ret;
++ int ret = -ENOMEM;
++ struct irq_info *info;
+
+ if (evtchn >= xen_evtchn_max_channels())
+ return -ENOMEM;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = get_evtchn_to_irq(evtchn);
++ info = evtchn_to_info(evtchn);
+
+- if (irq == -1) {
+- irq = xen_allocate_irq_dynamic();
+- if (irq < 0)
++ if (!info) {
++ info = xen_allocate_irq_dynamic();
++ if (!info)
+ goto out;
+
+- irq_set_chip_and_handler_name(irq, chip,
++ irq_set_chip_and_handler_name(info->irq, chip,
+ handle_edge_irq, "event");
+
+- ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
++ ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
+ if (ret < 0) {
+- __unbind_from_irq(irq);
+- irq = ret;
++ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
+ /*
+@@ -1226,27 +1228,29 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
+ * affinity setting is not invoked on them so nothing would
+ * bind the channel.
+ */
+- bind_evtchn_to_cpu(evtchn, 0, false);
+- } else {
+- struct irq_info *info = info_for_irq(irq);
+- WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
++ bind_evtchn_to_cpu(info, 0, false);
++ } else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
++ if (shared && !WARN_ON(info->refcnt < 0))
++ info->refcnt++;
+ }
+
++ ret = info->irq;
++
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+
+- return irq;
++ return ret;
+ }
+
+ int bind_evtchn_to_irq(evtchn_port_t evtchn)
+ {
+- return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
++ return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL, false);
+ }
+ EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
+
+ int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
+ {
+- return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
++ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL, false);
+ }
+ EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
+
+@@ -1254,18 +1258,19 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+ {
+ struct evtchn_bind_ipi bind_ipi;
+ evtchn_port_t evtchn;
+- int ret, irq;
++ struct irq_info *info;
++ int ret;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = per_cpu(ipi_to_irq, cpu)[ipi];
++ ret = per_cpu(ipi_to_irq, cpu)[ipi];
+
+- if (irq == -1) {
+- irq = xen_allocate_irq_dynamic();
+- if (irq < 0)
++ if (ret == -1) {
++ info = xen_allocate_irq_dynamic();
++ if (!info)
+ goto out;
+
+- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
+ handle_percpu_irq, "ipi");
+
+ bind_ipi.vcpu = xen_vcpu_nr(cpu);
+@@ -1274,30 +1279,31 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+ BUG();
+ evtchn = bind_ipi.port;
+
+- ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
++ ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
+ if (ret < 0) {
+- __unbind_from_irq(irq);
+- irq = ret;
++ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
+ /*
+ * Force the affinity mask to the target CPU so proc shows
+ * the correct target.
+ */
+- bind_evtchn_to_cpu(evtchn, cpu, true);
++ bind_evtchn_to_cpu(info, cpu, true);
++ ret = info->irq;
+ } else {
+- struct irq_info *info = info_for_irq(irq);
++ info = info_for_irq(ret);
+ WARN_ON(info == NULL || info->type != IRQT_IPI);
+ }
+
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+- return irq;
++ return ret;
+ }
+
+ static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
+ evtchn_port_t remote_port,
+- struct irq_chip *chip)
++ struct irq_chip *chip,
++ bool shared)
+ {
+ struct evtchn_bind_interdomain bind_interdomain;
+ int err;
+@@ -1309,14 +1315,14 @@ static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
+ &bind_interdomain);
+
+ return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
+- chip, dev);
++ chip, dev, shared);
+ }
+
+ int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
+ evtchn_port_t remote_port)
+ {
+ return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
+- &xen_lateeoi_chip);
++ &xen_lateeoi_chip, false);
+ }
+ EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
+
+@@ -1360,22 +1366,23 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ {
+ struct evtchn_bind_virq bind_virq;
+ evtchn_port_t evtchn = 0;
+- int irq, ret;
++ struct irq_info *info;
++ int ret;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = per_cpu(virq_to_irq, cpu)[virq];
++ ret = per_cpu(virq_to_irq, cpu)[virq];
+
+- if (irq == -1) {
+- irq = xen_allocate_irq_dynamic();
+- if (irq < 0)
++ if (ret == -1) {
++ info = xen_allocate_irq_dynamic();
++ if (!info)
+ goto out;
+
+ if (percpu)
+- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
+ handle_percpu_irq, "virq");
+ else
+- irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
+ handle_edge_irq, "virq");
+
+ bind_virq.virq = virq;
+@@ -1390,10 +1397,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ BUG_ON(ret < 0);
+ }
+
+- ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
++ ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
+ if (ret < 0) {
+- __unbind_from_irq(irq);
+- irq = ret;
++ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
+
+@@ -1401,22 +1407,26 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ * Force the affinity mask for percpu interrupts so proc
+ * shows the correct target.
+ */
+- bind_evtchn_to_cpu(evtchn, cpu, percpu);
++ bind_evtchn_to_cpu(info, cpu, percpu);
++ ret = info->irq;
+ } else {
+- struct irq_info *info = info_for_irq(irq);
++ info = info_for_irq(ret);
+ WARN_ON(info == NULL || info->type != IRQT_VIRQ);
+ }
+
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+
+- return irq;
++ return ret;
+ }
+
+ static void unbind_from_irq(unsigned int irq)
+ {
++ struct irq_info *info;
++
+ mutex_lock(&irq_mapping_update_lock);
+- __unbind_from_irq(irq);
++ info = info_for_irq(irq);
++ __unbind_from_irq(info, irq);
+ mutex_unlock(&irq_mapping_update_lock);
+ }
+
+@@ -1428,7 +1438,8 @@ static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
+ {
+ int irq, retval;
+
+- irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
++ irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL,
++ irqflags & IRQF_SHARED);
+ if (irq < 0)
+ return irq;
+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
+@@ -1469,7 +1480,8 @@ static int bind_interdomain_evtchn_to_irqhandler_chip(
+ {
+ int irq, retval;
+
+- irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
++ irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip,
++ irqflags & IRQF_SHARED);
+ if (irq < 0)
+ return irq;
+
+@@ -1567,13 +1579,7 @@ EXPORT_SYMBOL_GPL(xen_set_irq_priority);
+
+ int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
+ {
+- int irq = get_evtchn_to_irq(evtchn);
+- struct irq_info *info;
+-
+- if (irq == -1)
+- return -ENOENT;
+-
+- info = info_for_irq(irq);
++ struct irq_info *info = evtchn_to_info(evtchn);
+
+ if (!info)
+ return -ENOENT;
+@@ -1589,7 +1595,6 @@ EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
+
+ int evtchn_get(evtchn_port_t evtchn)
+ {
+- int irq;
+ struct irq_info *info;
+ int err = -ENOENT;
+
+@@ -1598,11 +1603,7 @@ int evtchn_get(evtchn_port_t evtchn)
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = get_evtchn_to_irq(evtchn);
+- if (irq == -1)
+- goto done;
+-
+- info = info_for_irq(irq);
++ info = evtchn_to_info(evtchn);
+
+ if (!info)
+ goto done;
+@@ -1622,16 +1623,17 @@ EXPORT_SYMBOL_GPL(evtchn_get);
+
+ void evtchn_put(evtchn_port_t evtchn)
+ {
+- int irq = get_evtchn_to_irq(evtchn);
+- if (WARN_ON(irq == -1))
++ struct irq_info *info = evtchn_to_info(evtchn);
++
++ if (WARN_ON(!info))
+ return;
+- unbind_from_irq(irq);
++ unbind_from_irq(info->irq);
+ }
+ EXPORT_SYMBOL_GPL(evtchn_put);
+
+ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ {
+- int irq;
++ evtchn_port_t evtchn;
+
+ #ifdef CONFIG_X86
+ if (unlikely(vector == XEN_NMI_VECTOR)) {
+@@ -1642,9 +1644,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ return;
+ }
+ #endif
+- irq = per_cpu(ipi_to_irq, cpu)[vector];
+- BUG_ON(irq < 0);
+- notify_remote_via_irq(irq);
++ evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
++ BUG_ON(evtchn == 0);
++ notify_remote_via_evtchn(evtchn);
+ }
+
+ struct evtchn_loop_ctrl {
+@@ -1655,12 +1657,10 @@ struct evtchn_loop_ctrl {
+
+ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ {
+- int irq;
+- struct irq_info *info;
++ struct irq_info *info = evtchn_to_info(port);
+ struct xenbus_device *dev;
+
+- irq = get_evtchn_to_irq(port);
+- if (irq == -1)
++ if (!info)
+ return;
+
+ /*
+@@ -1685,7 +1685,6 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ }
+ }
+
+- info = info_for_irq(irq);
+ if (xchg_acquire(&info->is_active, 1))
+ return;
+
+@@ -1699,7 +1698,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ info->eoi_time = get_jiffies_64() + event_eoi_delay;
+ }
+
+- generic_handle_irq(irq);
++ generic_handle_irq(info->irq);
+ }
+
+ int xen_evtchn_do_upcall(void)
+@@ -1757,16 +1756,17 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
+ mutex_lock(&irq_mapping_update_lock);
+
+ /* After resume the irq<->evtchn mappings are all cleared out */
+- BUG_ON(get_evtchn_to_irq(evtchn) != -1);
++ BUG_ON(evtchn_to_info(evtchn));
+ /* Expect irq to have been bound before,
+ so there should be a proper type */
+ BUG_ON(info->type == IRQT_UNBOUND);
+
+- (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
++ info->irq = irq;
++ (void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
+
+ mutex_unlock(&irq_mapping_update_lock);
+
+- bind_evtchn_to_cpu(evtchn, info->cpu, false);
++ bind_evtchn_to_cpu(info, info->cpu, false);
+
+ /* Unmask the event channel. */
+ enable_irq(irq);
+@@ -1800,7 +1800,7 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
+ * it, but don't do the xenlinux-level rebind in that case.
+ */
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+- bind_evtchn_to_cpu(evtchn, tcpu, false);
++ bind_evtchn_to_cpu(info, tcpu, false);
+
+ do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+
+@@ -1951,7 +1951,7 @@ static void restore_pirqs(void)
+ if (rc) {
+ pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
+ gsi, irq, pirq, rc);
+- xen_free_irq(irq);
++ xen_free_irq(info);
+ continue;
+ }
+
+@@ -1965,13 +1965,15 @@ static void restore_cpu_virqs(unsigned int cpu)
+ {
+ struct evtchn_bind_virq bind_virq;
+ evtchn_port_t evtchn;
++ struct irq_info *info;
+ int virq, irq;
+
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+ continue;
++ info = info_for_irq(irq);
+
+- BUG_ON(virq_from_irq(irq) != virq);
++ BUG_ON(virq_from_irq(info) != virq);
+
+ /* Get a new binding from Xen. */
+ bind_virq.virq = virq;
+@@ -1982,9 +1984,9 @@ static void restore_cpu_virqs(unsigned int cpu)
+ evtchn = bind_virq.port;
+
+ /* Record the new mapping. */
+- (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
++ xen_irq_info_virq_setup(info, cpu, evtchn, virq);
+ /* The affinity mask is still valid */
+- bind_evtchn_to_cpu(evtchn, cpu, false);
++ bind_evtchn_to_cpu(info, cpu, false);
+ }
+ }
+
+@@ -1992,13 +1994,15 @@ static void restore_cpu_ipis(unsigned int cpu)
+ {
+ struct evtchn_bind_ipi bind_ipi;
+ evtchn_port_t evtchn;
++ struct irq_info *info;
+ int ipi, irq;
+
+ for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
+ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
+ continue;
++ info = info_for_irq(irq);
+
+- BUG_ON(ipi_from_irq(irq) != ipi);
++ BUG_ON(ipi_from_irq(info) != ipi);
+
+ /* Get a new binding from Xen. */
+ bind_ipi.vcpu = xen_vcpu_nr(cpu);
+@@ -2008,9 +2012,9 @@ static void restore_cpu_ipis(unsigned int cpu)
+ evtchn = bind_ipi.port;
+
+ /* Record the new mapping. */
+- (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
++ xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
+ /* The affinity mask is still valid */
+- bind_evtchn_to_cpu(evtchn, cpu, false);
++ bind_evtchn_to_cpu(info, cpu, false);
+ }
+ }
+
+diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
+index 4d3398eff9cdf1..19ae31695edcf1 100644
+--- a/drivers/xen/events/events_internal.h
++++ b/drivers/xen/events/events_internal.h
+@@ -33,7 +33,6 @@ struct evtchn_ops {
+
+ extern const struct evtchn_ops *evtchn_ops;
+
+-int get_evtchn_to_irq(evtchn_port_t evtchn);
+ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
+
+ unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index 9139a7364df539..f6a2216c2c8701 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -85,6 +85,7 @@ struct user_evtchn {
+ struct per_user_data *user;
+ evtchn_port_t port;
+ bool enabled;
++ bool unbinding;
+ };
+
+ static void evtchn_free_ring(evtchn_port_t *ring)
+@@ -164,6 +165,10 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
+ struct per_user_data *u = evtchn->user;
+ unsigned int prod, cons;
+
++ /* Handler might be called when tearing down the IRQ. */
++ if (evtchn->unbinding)
++ return IRQ_HANDLED;
++
+ WARN(!evtchn->enabled,
+ "Interrupt for port %u, but apparently not enabled; per-user %p\n",
+ evtchn->port, u);
+@@ -397,7 +402,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port,
+ if (rc < 0)
+ goto err;
+
+- rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
++ rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, IRQF_SHARED,
+ u->name, evtchn);
+ if (rc < 0)
+ goto err;
+@@ -421,6 +426,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
+
+ BUG_ON(irq < 0);
+
++ evtchn->unbinding = true;
+ unbind_from_irqhandler(irq, evtchn);
+
+ del_evtchn(u, evtchn);
+diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
+index 4440e626b7975f..42adc2c1e06b37 100644
+--- a/drivers/xen/gntdev-dmabuf.c
++++ b/drivers/xen/gntdev-dmabuf.c
+@@ -11,6 +11,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/dma-buf.h>
++#include <linux/dma-direct.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/uaccess.h>
+@@ -50,7 +51,7 @@ struct gntdev_dmabuf {
+
+ /* Number of pages this buffer has. */
+ int nr_pages;
+- /* Pages of this buffer. */
++ /* Pages of this buffer (only for dma-buf export). */
+ struct page **pages;
+ };
+
+@@ -484,7 +485,7 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
+ /* DMA buffer import support. */
+
+ static int
+-dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
++dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
+ int count, int domid)
+ {
+ grant_ref_t priv_gref_head;
+@@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
+ }
+
+ gnttab_grant_foreign_access_ref(cur_ref, domid,
+- xen_page_to_gfn(pages[i]), 0);
++ gfns[i], 0);
+ refs[i] = cur_ref;
+ }
+
+@@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
+
+ static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
+ {
+- kfree(gntdev_dmabuf->pages);
+ kfree(gntdev_dmabuf->u.imp.refs);
+ kfree(gntdev_dmabuf);
+ }
+@@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
+ if (!gntdev_dmabuf->u.imp.refs)
+ goto fail;
+
+- gntdev_dmabuf->pages = kcalloc(count,
+- sizeof(gntdev_dmabuf->pages[0]),
+- GFP_KERNEL);
+- if (!gntdev_dmabuf->pages)
+- goto fail;
+-
+ gntdev_dmabuf->nr_pages = count;
+
+ for (i = 0; i < count; i++)
+@@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+- struct sg_page_iter sg_iter;
++ struct sg_dma_page_iter sg_iter;
++ unsigned long *gfns;
+ int i;
+
+ dma_buf = dma_buf_get(fd);
+@@ -624,26 +619,31 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+
+ gntdev_dmabuf->u.imp.sgt = sgt;
+
+- /* Now convert sgt to array of pages and check for page validity. */
++ gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
++ if (!gfns) {
++ ret = ERR_PTR(-ENOMEM);
++ goto fail_unmap;
++ }
++
++ /*
++ * Now convert sgt to array of gfns without accessing underlying pages.
++ * It is not allowed to access the underlying struct page of an sg table
++ * exported by DMA-buf, but since we deal with special Xen dma device here
++ * (not a normal physical one) look at the dma addresses in the sg table
++ * and then calculate gfns directly from them.
++ */
+ i = 0;
+- for_each_sgtable_page(sgt, &sg_iter, 0) {
+- struct page *page = sg_page_iter_page(&sg_iter);
+- /*
+- * Check if page is valid: this can happen if we are given
+- * a page from VRAM or other resources which are not backed
+- * by a struct page.
+- */
+- if (!pfn_valid(page_to_pfn(page))) {
+- ret = ERR_PTR(-EINVAL);
+- goto fail_unmap;
+- }
++ for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
++ dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
++ unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
+
+- gntdev_dmabuf->pages[i++] = page;
++ gfns[i++] = pfn_to_gfn(pfn);
+ }
+
+- ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
++ ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
+ gntdev_dmabuf->u.imp.refs,
+ count, domid));
++ kfree(gfns);
+ if (IS_ERR(ret))
+ goto fail_end_access;
+
+diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
+index b3e3d1bb37f3e3..50865527314538 100644
+--- a/drivers/xen/pcpu.c
++++ b/drivers/xen/pcpu.c
+@@ -47,6 +47,9 @@
+ #include <asm/xen/hypervisor.h>
+ #include <asm/xen/hypercall.h>
+
++#ifdef CONFIG_ACPI
++#include <acpi/processor.h>
++#endif
+
+ /*
+ * @cpu_id: Xen physical cpu logic number
+@@ -400,4 +403,23 @@ bool __init xen_processor_present(uint32_t acpi_id)
+
+ return online;
+ }
++
++void xen_sanitize_proc_cap_bits(uint32_t *cap)
++{
++ struct xen_platform_op op = {
++ .cmd = XENPF_set_processor_pminfo,
++ .u.set_pminfo.id = -1,
++ .u.set_pminfo.type = XEN_PM_PDC,
++ };
++ u32 buf[3] = { ACPI_PDC_REVISION_ID, 1, *cap };
++ int ret;
++
++ set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
++ ret = HYPERVISOR_platform_op(&op);
++ if (ret)
++ pr_err("sanitize of _PDC buffer bits from Xen failed: %d\n",
++ ret);
++ else
++ *cap = buf[2];
++}
+ #endif
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index f00ad5f5f1d4a5..61aaded483e1d3 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -17,6 +17,7 @@
+ #include <linux/poll.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/srcu.h>
+ #include <linux/string.h>
+ #include <linux/workqueue.h>
+ #include <linux/errno.h>
+@@ -841,7 +842,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
+ #ifdef CONFIG_XEN_PRIVCMD_IRQFD
+ /* Irqfd support */
+ static struct workqueue_struct *irqfd_cleanup_wq;
+-static DEFINE_MUTEX(irqfds_lock);
++static DEFINE_SPINLOCK(irqfds_lock);
++DEFINE_STATIC_SRCU(irqfds_srcu);
+ static LIST_HEAD(irqfds_list);
+
+ struct privcmd_kernel_irqfd {
+@@ -869,6 +871,9 @@ static void irqfd_shutdown(struct work_struct *work)
+ container_of(work, struct privcmd_kernel_irqfd, shutdown);
+ u64 cnt;
+
++ /* Make sure irqfd has been initialized in assign path */
++ synchronize_srcu(&irqfds_srcu);
++
+ eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
+ eventfd_ctx_put(kirqfd->eventfd);
+ kfree(kirqfd);
+@@ -905,9 +910,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
+ irqfd_inject(kirqfd);
+
+ if (flags & EPOLLHUP) {
+- mutex_lock(&irqfds_lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&irqfds_lock, flags);
+ irqfd_deactivate(kirqfd);
+- mutex_unlock(&irqfds_lock);
++ spin_unlock_irqrestore(&irqfds_lock, flags);
+ }
+
+ return 0;
+@@ -925,17 +932,18 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
+ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
+ {
+ struct privcmd_kernel_irqfd *kirqfd, *tmp;
++ unsigned long flags;
+ __poll_t events;
+ struct fd f;
+ void *dm_op;
+- int ret;
++ int ret, idx;
+
+ kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
+ if (!kirqfd)
+ return -ENOMEM;
+ dm_op = kirqfd + 1;
+
+- if (copy_from_user(dm_op, irqfd->dm_op, irqfd->size)) {
++ if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
+ ret = -EFAULT;
+ goto error_kfree;
+ }
+@@ -964,18 +972,19 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
+ init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
+ init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
+
+- mutex_lock(&irqfds_lock);
++ spin_lock_irqsave(&irqfds_lock, flags);
+
+ list_for_each_entry(tmp, &irqfds_list, list) {
+ if (kirqfd->eventfd == tmp->eventfd) {
+ ret = -EBUSY;
+- mutex_unlock(&irqfds_lock);
++ spin_unlock_irqrestore(&irqfds_lock, flags);
+ goto error_eventfd;
+ }
+ }
+
++ idx = srcu_read_lock(&irqfds_srcu);
+ list_add_tail(&kirqfd->list, &irqfds_list);
+- mutex_unlock(&irqfds_lock);
++ spin_unlock_irqrestore(&irqfds_lock, flags);
+
+ /*
+ * Check if there was an event already pending on the eventfd before we
+@@ -985,6 +994,8 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
+ if (events & EPOLLIN)
+ irqfd_inject(kirqfd);
+
++ srcu_read_unlock(&irqfds_srcu, idx);
++
+ /*
+ * Do not drop the file until the kirqfd is fully initialized, otherwise
+ * we might race against the EPOLLHUP.
+@@ -1007,12 +1018,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
+ {
+ struct privcmd_kernel_irqfd *kirqfd;
+ struct eventfd_ctx *eventfd;
++ unsigned long flags;
+
+ eventfd = eventfd_ctx_fdget(irqfd->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+- mutex_lock(&irqfds_lock);
++ spin_lock_irqsave(&irqfds_lock, flags);
+
+ list_for_each_entry(kirqfd, &irqfds_list, list) {
+ if (kirqfd->eventfd == eventfd) {
+@@ -1021,7 +1033,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
+ }
+ }
+
+- mutex_unlock(&irqfds_lock);
++ spin_unlock_irqrestore(&irqfds_lock, flags);
+
+ eventfd_ctx_put(eventfd);
+
+@@ -1069,13 +1081,14 @@ static int privcmd_irqfd_init(void)
+ static void privcmd_irqfd_exit(void)
+ {
+ struct privcmd_kernel_irqfd *kirqfd, *tmp;
++ unsigned long flags;
+
+- mutex_lock(&irqfds_lock);
++ spin_lock_irqsave(&irqfds_lock, flags);
+
+ list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
+ irqfd_deactivate(kirqfd);
+
+- mutex_unlock(&irqfds_lock);
++ spin_unlock_irqrestore(&irqfds_lock, flags);
+
+ destroy_workqueue(irqfd_cleanup_wq);
+ }
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 946bd56f0ac53e..6d0d1c8a508bf9 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -78,9 +78,15 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
+ {
+ unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+ unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
++ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+
+ next_bfn = pfn_to_bfn(xen_pfn);
+
++ /* If buffer is physically aligned, ensure DMA alignment. */
++ if (IS_ALIGNED(p, algn) &&
++ !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
++ return 1;
++
+ for (i = 1; i < nr_pages; i++)
+ if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
+ return 1;
+@@ -140,7 +146,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ void *ret;
+
+ /* Align the allocation to the Xen page size */
+- size = 1UL << (order + XEN_PAGE_SHIFT);
++ size = ALIGN(size, XEN_PAGE_SIZE);
+
+ ret = (void *)__get_free_pages(flags, get_order(size));
+ if (!ret)
+@@ -172,7 +178,7 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
+ int order = get_order(size);
+
+ /* Convert the size to actually allocated. */
+- size = 1UL << (order + XEN_PAGE_SHIFT);
++ size = ALIGN(size, XEN_PAGE_SIZE);
+
+ if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
+ WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
+@@ -405,4 +411,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
+ .get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
++ .max_mapping_size = swiotlb_max_mapping_size,
+ };
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 059de92aea7d0f..d47eee6c514359 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -288,12 +288,6 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ u16 val;
+ int ret = 0;
+
+- err = pci_read_config_word(dev, PCI_COMMAND, &val);
+- if (err)
+- return err;
+- if (!(val & PCI_COMMAND_INTX_DISABLE))
+- ret |= INTERRUPT_TYPE_INTX;
+-
+ /*
+ * Do not trust dev->msi(x)_enabled here, as enabling could be done
+ * bypassing the pci_*msi* functions, by the qemu.
+@@ -316,6 +310,19 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
+ if (val & PCI_MSIX_FLAGS_ENABLE)
+ ret |= INTERRUPT_TYPE_MSIX;
+ }
++
++ /*
++ * PCIe spec says device cannot use INTx if MSI/MSI-X is enabled,
++ * so check for INTx only when both are disabled.
++ */
++ if (!ret) {
++ err = pci_read_config_word(dev, PCI_COMMAND, &val);
++ if (err)
++ return err;
++ if (!(val & PCI_COMMAND_INTX_DISABLE))
++ ret |= INTERRUPT_TYPE_INTX;
++ }
++
+ return ret ?: INTERRUPT_TYPE_NONE;
+ }
+
+diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
+index 097316a741268b..1948a9700c8fa6 100644
+--- a/drivers/xen/xen-pciback/conf_space_capability.c
++++ b/drivers/xen/xen-pciback/conf_space_capability.c
+@@ -236,10 +236,16 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
+ return PCIBIOS_SET_FAILED;
+
+ if (new_value & field_config->enable_bit) {
+- /* don't allow enabling together with other interrupt types */
++ /*
++ * Don't allow enabling together with other interrupt type, but do
++ * allow enabling MSI(-X) while INTx is still active to please Linuxes
++ * MSI(-X) startup sequence. It is safe to do, as according to PCI
++ * spec, device with enabled MSI(-X) shouldn't use INTx.
++ */
+ int int_type = xen_pcibk_get_interrupt_type(dev);
+
+ if (int_type == INTERRUPT_TYPE_NONE ||
++ int_type == INTERRUPT_TYPE_INTX ||
+ int_type == field_config->int_type)
+ goto write;
+ return PCIBIOS_SET_FAILED;
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index 981435103af1ab..fc033264596644 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -104,24 +104,9 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ pci_clear_mwi(dev);
+ }
+
+- if (dev_data && dev_data->allow_interrupt_control) {
+- if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
+- if (value & PCI_COMMAND_INTX_DISABLE) {
+- pci_intx(dev, 0);
+- } else {
+- /* Do not allow enabling INTx together with MSI or MSI-X. */
+- switch (xen_pcibk_get_interrupt_type(dev)) {
+- case INTERRUPT_TYPE_NONE:
+- pci_intx(dev, 1);
+- break;
+- case INTERRUPT_TYPE_INTX:
+- break;
+- default:
+- return PCIBIOS_SET_FAILED;
+- }
+- }
+- }
+- }
++ if (dev_data && dev_data->allow_interrupt_control &&
++ ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
++ pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
+
+ cmd->val = value;
+
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 639bf628389ba4..1a9ded0cddcb0f 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -65,13 +65,17 @@
+ #include "xenbus.h"
+
+
+-static int xs_init_irq;
++static int xs_init_irq = -1;
+ int xen_store_evtchn;
+ EXPORT_SYMBOL_GPL(xen_store_evtchn);
+
+ struct xenstore_domain_interface *xen_store_interface;
+ EXPORT_SYMBOL_GPL(xen_store_interface);
+
++#define XS_INTERFACE_READY \
++ ((xen_store_interface != NULL) && \
++ (xen_store_interface->connection == XENSTORE_CONNECTED))
++
+ enum xenstore_init xen_store_domain_type;
+ EXPORT_SYMBOL_GPL(xen_store_domain_type);
+
+@@ -751,19 +755,19 @@ static void xenbus_probe(void)
+ {
+ xenstored_ready = 1;
+
+- if (!xen_store_interface) {
++ if (!xen_store_interface)
+ xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE, MEMREMAP_WB);
+- /*
+- * Now it is safe to free the IRQ used for xenstore late
+- * initialization. No need to unbind: it is about to be
+- * bound again from xb_init_comms. Note that calling
+- * unbind_from_irqhandler now would result in xen_evtchn_close()
+- * being called and the event channel not being enabled again
+- * afterwards, resulting in missed event notifications.
+- */
++ /*
++ * Now it is safe to free the IRQ used for xenstore late
++ * initialization. No need to unbind: it is about to be
++ * bound again from xb_init_comms. Note that calling
++ * unbind_from_irqhandler now would result in xen_evtchn_close()
++ * being called and the event channel not being enabled again
++ * afterwards, resulting in missed event notifications.
++ */
++ if (xs_init_irq >= 0)
+ free_irq(xs_init_irq, &xb_waitq);
+- }
+
+ /*
+ * In the HVM case, xenbus_init() deferred its call to
+@@ -822,7 +826,7 @@ static int __init xenbus_probe_initcall(void)
+ if (xen_store_domain_type == XS_PV ||
+ (xen_store_domain_type == XS_HVM &&
+ !xs_hvm_defer_init_for_callback() &&
+- xen_store_interface != NULL))
++ XS_INTERFACE_READY))
+ xenbus_probe();
+
+ /*
+@@ -831,7 +835,7 @@ static int __init xenbus_probe_initcall(void)
+ * started, then probe. It will be triggered when communication
+ * starts happening, by waiting on xb_waitq.
+ */
+- if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) {
++ if (xen_store_domain_type == XS_LOCAL || !XS_INTERFACE_READY) {
+ struct task_struct *probe_task;
+
+ probe_task = kthread_run(xenbus_probe_thread, NULL,
+@@ -1014,6 +1018,12 @@ static int __init xenbus_init(void)
+ xen_store_interface =
+ memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE, MEMREMAP_WB);
++ if (!xen_store_interface) {
++ pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n",
++ __func__, v);
++ err = -EINVAL;
++ goto out_error;
++ }
+ if (xen_store_interface->connection != XENSTORE_CONNECTED)
+ wait = true;
+ }
+@@ -1025,7 +1035,7 @@ static int __init xenbus_init(void)
+ if (err < 0) {
+ pr_err("xenstore_late_init couldn't bind irq err=%d\n",
+ err);
+- return err;
++ goto out_error;
+ }
+
+ xs_init_irq = err;
+diff --git a/fs/9p/fid.h b/fs/9p/fid.h
+index 29281b7c388703..0d6138bee2a3d1 100644
+--- a/fs/9p/fid.h
++++ b/fs/9p/fid.h
+@@ -49,9 +49,6 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
+ static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags,
+ unsigned int s_cache, unsigned int f_flags)
+ {
+- if (fid->qid.type != P9_QTFILE)
+- return;
+-
+ if ((!s_cache) ||
+ ((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) ||
+ (s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) {
+diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
+index cdf441f22e0737..dcce42d55d68f2 100644
+--- a/fs/9p/v9fs_vfs.h
++++ b/fs/9p/v9fs_vfs.h
+@@ -42,6 +42,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb);
+ void v9fs_free_inode(struct inode *inode);
+ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode,
+ dev_t rdev);
++void v9fs_set_netfs_context(struct inode *inode);
+ int v9fs_init_inode(struct v9fs_session_info *v9ses,
+ struct inode *inode, umode_t mode, dev_t rdev);
+ void v9fs_evict_inode(struct inode *inode);
+diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
+index f16f7358163490..01338d4c2d9e6f 100644
+--- a/fs/9p/vfs_dentry.c
++++ b/fs/9p/vfs_dentry.c
+@@ -48,12 +48,17 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
+ static void v9fs_dentry_release(struct dentry *dentry)
+ {
+ struct hlist_node *p, *n;
++ struct hlist_head head;
+
+ p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
+ dentry, dentry);
+- hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
++
++ spin_lock(&dentry->d_lock);
++ hlist_move_list((struct hlist_head *)&dentry->d_fsdata, &head);
++ spin_unlock(&dentry->d_lock);
++
++ hlist_for_each_safe(p, n, &head)
+ p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
+- dentry->d_fsdata = NULL;
+ }
+
+ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index 11cd8d23f6f238..8566ddad49ad57 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -591,6 +591,7 @@ const struct file_operations v9fs_file_operations = {
+ .splice_read = v9fs_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .fsync = v9fs_file_fsync,
++ .setlease = simple_nosetlease,
+ };
+
+ const struct file_operations v9fs_file_operations_dotl = {
+@@ -605,4 +606,5 @@ const struct file_operations v9fs_file_operations_dotl = {
+ .splice_read = v9fs_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .fsync = v9fs_file_fsync_dotl,
++ .setlease = simple_nosetlease,
+ };
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 0d28ecf668d010..853c63b8368157 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -83,7 +83,7 @@ static int p9mode2perm(struct v9fs_session_info *v9ses,
+ int res;
+ int mode = stat->mode;
+
+- res = mode & S_IALLUGO;
++ res = mode & 0777; /* S_IRWXUGO */
+ if (v9fs_proto_dotu(v9ses)) {
+ if ((mode & P9_DMSETUID) == P9_DMSETUID)
+ res |= S_ISUID;
+@@ -178,6 +178,9 @@ int v9fs_uflags2omode(int uflags, int extended)
+ break;
+ }
+
++ if (uflags & O_TRUNC)
++ ret |= P9_OTRUNC;
++
+ if (extended) {
+ if (uflags & O_EXCL)
+ ret |= P9_OEXCL;
+@@ -246,7 +249,7 @@ void v9fs_free_inode(struct inode *inode)
+ /*
+ * Set parameters for the netfs library
+ */
+-static void v9fs_set_netfs_context(struct inode *inode)
++void v9fs_set_netfs_context(struct inode *inode)
+ {
+ struct v9fs_inode *v9inode = V9FS_I(inode);
+ netfs_inode_init(&v9inode->netfs, &v9fs_req_ops);
+@@ -326,8 +329,6 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
+ err = -EINVAL;
+ goto error;
+ }
+-
+- v9fs_set_netfs_context(inode);
+ error:
+ return err;
+
+@@ -359,6 +360,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
+ iput(inode);
+ return ERR_PTR(err);
+ }
++ v9fs_set_netfs_context(inode);
+ return inode;
+ }
+
+@@ -464,6 +466,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
+ goto error;
+
+ v9fs_stat2inode(st, inode, sb, 0);
++ v9fs_set_netfs_context(inode);
+ v9fs_cache_inode_get_cookie(inode);
+ unlock_new_inode(inode);
+ return inode;
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 1312f68965ac00..91bcee2ab3c491 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -128,6 +128,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
+ goto error;
+
+ v9fs_stat2inode_dotl(st, inode, 0);
++ v9fs_set_netfs_context(inode);
+ v9fs_cache_inode_get_cookie(inode);
+ retval = v9fs_get_acl(inode, fid);
+ if (retval)
+diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
+index 73db55c050bf10..958efc84233346 100644
+--- a/fs/9p/vfs_super.c
++++ b/fs/9p/vfs_super.c
+@@ -320,6 +320,7 @@ static const struct super_operations v9fs_super_ops = {
+ .alloc_inode = v9fs_alloc_inode,
+ .free_inode = v9fs_free_inode,
+ .statfs = simple_statfs,
++ .drop_inode = v9fs_drop_inode,
+ .evict_inode = v9fs_evict_inode,
+ .show_options = v9fs_show_options,
+ .umount_begin = v9fs_umount_begin,
+diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
+index e00cf8109b3f31..3c4572ef3a488a 100644
+--- a/fs/9p/xattr.c
++++ b/fs/9p/xattr.c
+@@ -68,7 +68,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
+ struct p9_fid *fid;
+ int ret;
+
+- p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
++ p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
+ name, buffer_size);
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+@@ -139,7 +139,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
+
+ ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ {
+- return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
++ /* Txattrwalk with an empty string lists xattrs instead */
++ return v9fs_xattr_get(dentry, "", buffer, buffer_size);
+ }
+
+ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
+diff --git a/fs/afs/callback.c b/fs/afs/callback.c
+index a484fa6428081a..90f9b2a46ff48a 100644
+--- a/fs/afs/callback.c
++++ b/fs/afs/callback.c
+@@ -110,13 +110,14 @@ static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
+ {
+ struct afs_volume *volume = NULL;
+ struct rb_node *p;
+- int seq = 0;
++ int seq = 1;
+
+ do {
+ /* Unfortunately, rbtree walking doesn't give reliable results
+ * under just the RCU read lock, so we have to check for
+ * changes.
+ */
++ seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ read_seqbegin_or_lock(&cell->volume_lock, &seq);
+
+ p = rcu_dereference_raw(cell->volumes.rb_node);
+diff --git a/fs/afs/cell.c b/fs/afs/cell.c
+index 988c2ac7cececd..926cb1188eba6c 100644
+--- a/fs/afs/cell.c
++++ b/fs/afs/cell.c
+@@ -409,10 +409,12 @@ static int afs_update_cell(struct afs_cell *cell)
+ if (ret == -ENOMEM)
+ goto out_wake;
+
+- ret = -ENOMEM;
+ vllist = afs_alloc_vlserver_list(0);
+- if (!vllist)
++ if (!vllist) {
++ if (ret >= 0)
++ ret = -ENOMEM;
+ goto out_wake;
++ }
+
+ switch (ret) {
+ case -ENODATA:
+diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
+index 95bcbd7654d1b6..10905a53d5b272 100644
+--- a/fs/afs/dynroot.c
++++ b/fs/afs/dynroot.c
+@@ -114,6 +114,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
+ struct afs_net *net = afs_d2net(dentry);
+ const char *name = dentry->d_name.name;
+ size_t len = dentry->d_name.len;
++ char *result = NULL;
+ int ret;
+
+ /* Names prefixed with a dot are R/W mounts. */
+@@ -131,9 +132,22 @@ static int afs_probe_cell_name(struct dentry *dentry)
+ }
+
+ ret = dns_query(net->net, "afsdb", name, len, "srv=1",
+- NULL, NULL, false);
+- if (ret == -ENODATA)
+- ret = -EDESTADDRREQ;
++ &result, NULL, false);
++ if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
++ ret = -ENOENT;
++ if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
++ struct dns_server_list_v1_header *v1 = (void *)result;
++
++ if (v1->hdr.zero == 0 &&
++ v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
++ v1->hdr.version == 1 &&
++ (v1->status != DNS_LOOKUP_GOOD &&
++ v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
++ return -ENOENT;
++
++ }
++
++ kfree(result);
+ return ret;
+ }
+
+@@ -252,20 +266,9 @@ static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
+ return 1;
+ }
+
+-/*
+- * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
+- * sleep)
+- * - called from dput() when d_count is going to 0.
+- * - return 1 to request dentry be unhashed, 0 otherwise
+- */
+-static int afs_dynroot_d_delete(const struct dentry *dentry)
+-{
+- return d_really_is_positive(dentry);
+-}
+-
+ const struct dentry_operations afs_dynroot_dentry_operations = {
+ .d_revalidate = afs_dynroot_d_revalidate,
+- .d_delete = afs_dynroot_d_delete,
++ .d_delete = always_delete_dentry,
+ .d_release = afs_d_release,
+ .d_automount = afs_d_automount,
+ };
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index d37dd201752baa..0012ea300eb53d 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -529,13 +529,17 @@ static void afs_add_open_mmap(struct afs_vnode *vnode)
+
+ static void afs_drop_open_mmap(struct afs_vnode *vnode)
+ {
+- if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
++ if (atomic_add_unless(&vnode->cb_nr_mmap, -1, 1))
+ return;
+
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+- if (atomic_read(&vnode->cb_nr_mmap) == 0)
++ read_seqlock_excl(&vnode->cb_lock);
++ // the only place where ->cb_nr_mmap may hit 0
++ // see __afs_break_callback() for the other side...
++ if (atomic_dec_and_test(&vnode->cb_nr_mmap))
+ list_del_init(&vnode->cb_mmap_link);
++ read_sequnlock_excl(&vnode->cb_lock);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ flush_work(&vnode->cb_work);
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index da73b97e19a9af..c4bf8439bc9c99 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -553,6 +553,7 @@ struct afs_server_entry {
+ };
+
+ struct afs_server_list {
++ struct rcu_head rcu;
+ afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
+ refcount_t usage;
+ unsigned char nr_servers;
+@@ -585,6 +586,7 @@ struct afs_volume {
+ #define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */
+ #define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */
+ #define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */
++#define AFS_VOLUME_RM_TREE 7 /* - Set if volume removed from cell->volumes */
+ #ifdef CONFIG_AFS_FSCACHE
+ struct fscache_volume *cache; /* Caching cookie */
+ #endif
+@@ -1512,6 +1514,7 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
+ extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
+ extern int afs_activate_volume(struct afs_volume *);
+ extern void afs_deactivate_volume(struct afs_volume *);
++bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason);
+ extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
+ extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
+ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
+diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
+index 97f50e9fd9eb01..297487ee832317 100644
+--- a/fs/afs/mntpt.c
++++ b/fs/afs/mntpt.c
+@@ -140,6 +140,11 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
+ put_page(page);
+ if (ret < 0)
+ return ret;
++
++ /* Don't cross a backup volume mountpoint from a backup volume */
++ if (src_as->volume && src_as->volume->type == AFSVL_BACKVOL &&
++ ctx->type == AFSVL_BACKVOL)
++ return -ENODEV;
+ }
+
+ return 0;
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index ed1644e7683f47..d642d06a453be7 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -424,7 +424,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ if (call->async) {
+ if (cancel_work_sync(&call->async_work))
+ afs_put_call(call);
+- afs_put_call(call);
++ afs_set_call_complete(call, ret, 0);
+ }
+
+ ac->error = ret;
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index b5237206eac3e9..0bd2f5ba6900c1 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -27,7 +27,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
+ const struct afs_addr_list *alist;
+ struct afs_server *server = NULL;
+ unsigned int i;
+- int seq = 0, diff;
++ int seq = 1, diff;
+
+ rcu_read_lock();
+
+@@ -35,6 +35,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
+ if (server)
+ afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
+ server = NULL;
++ seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
+
+ if (srx->transport.family == AF_INET6) {
+@@ -90,7 +91,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
+ {
+ struct afs_server *server = NULL;
+ struct rb_node *p;
+- int diff, seq = 0;
++ int diff, seq = 1;
+
+ _enter("%pU", uuid);
+
+@@ -102,7 +103,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
+ if (server)
+ afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
+ server = NULL;
+-
++ seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ read_seqbegin_or_lock(&net->fs_lock, &seq);
+
+ p = net->fs_servers.rb_node;
+diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
+index ed9056703505fe..b59896b1de0af2 100644
+--- a/fs/afs/server_list.c
++++ b/fs/afs/server_list.c
+@@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
+ for (i = 0; i < slist->nr_servers; i++)
+ afs_unuse_server(net, slist->servers[i].server,
+ afs_server_trace_put_slist);
+- kfree(slist);
++ kfree_rcu(slist, rcu);
+ }
+ }
+
+diff --git a/fs/afs/super.c b/fs/afs/super.c
+index 95d713074dc813..e95fb4cb4fcd23 100644
+--- a/fs/afs/super.c
++++ b/fs/afs/super.c
+@@ -407,6 +407,8 @@ static int afs_validate_fc(struct fs_context *fc)
+ return PTR_ERR(volume);
+
+ ctx->volume = volume;
++ if (volume->type != AFSVL_RWVOL)
++ ctx->flock_mode = afs_flock_mode_local;
+ }
+
+ return 0;
+diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
+index 488e58490b16e7..eb415ce563600e 100644
+--- a/fs/afs/vl_rotate.c
++++ b/fs/afs/vl_rotate.c
+@@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+ }
+
+ /* Status load is ordered after lookup counter load */
++ if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
++ pr_warn("No record of cell %s\n", cell->name);
++ vc->error = -ENOENT;
++ return false;
++ }
++
+ if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
+ vc->error = -EDESTADDRREQ;
+ return false;
+@@ -285,6 +291,7 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
+ */
+ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+ {
++ struct afs_cell *cell = vc->cell;
+ static int count;
+ int i;
+
+@@ -294,6 +301,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+
+ rcu_read_lock();
+ pr_notice("EDESTADDR occurred\n");
++ pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
++ pr_notice("DNS: src=%u st=%u lc=%x\n",
++ cell->dns_source, cell->dns_status, cell->dns_lookup_count);
+ pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
+ vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
+
+diff --git a/fs/afs/volume.c b/fs/afs/volume.c
+index 29d483c8028130..c028598a903c9c 100644
+--- a/fs/afs/volume.c
++++ b/fs/afs/volume.c
+@@ -32,8 +32,13 @@ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell,
+ } else if (p->vid > volume->vid) {
+ pp = &(*pp)->rb_right;
+ } else {
+- volume = afs_get_volume(p, afs_volume_trace_get_cell_insert);
+- goto found;
++ if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) {
++ volume = p;
++ goto found;
++ }
++
++ set_bit(AFS_VOLUME_RM_TREE, &volume->flags);
++ rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes);
+ }
+ }
+
+@@ -56,7 +61,8 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
+ afs_volume_trace_remove);
+ write_seqlock(&cell->volume_lock);
+ hlist_del_rcu(&volume->proc_link);
+- rb_erase(&volume->cell_node, &cell->volumes);
++ if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags))
++ rb_erase(&volume->cell_node, &cell->volumes);
+ write_sequnlock(&cell->volume_lock);
+ }
+ }
+@@ -231,6 +237,20 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
+ _leave(" [destroyed]");
+ }
+
++/*
++ * Try to get a reference on a volume record.
++ */
++bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
++{
++ int r;
++
++ if (__refcount_inc_not_zero(&volume->ref, &r)) {
++ trace_afs_volume(volume->vid, r + 1, reason);
++ return true;
++ }
++ return false;
++}
++
+ /*
+ * Get a reference on a volume record.
+ */
+@@ -317,7 +337,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
+ {
+ struct afs_server_list *new, *old, *discard;
+ struct afs_vldb_entry *vldb;
+- char idbuf[16];
++ char idbuf[24];
+ int ret, idsz;
+
+ _enter("");
+@@ -325,7 +345,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
+ /* We look up an ID by passing it as a decimal string in the
+ * operation's name parameter.
+ */
+- idsz = sprintf(idbuf, "%llu", volume->vid);
++ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
+
+ vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
+ if (IS_ERR(vldb)) {
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index e1c45341719bc5..948db2be26ec33 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -496,7 +496,7 @@ static void afs_extend_writeback(struct address_space *mapping,
+ if (folio_index(folio) != index)
+ break;
+
+- if (!folio_try_get_rcu(folio)) {
++ if (!folio_try_get(folio)) {
+ xas_reset(&xas);
+ continue;
+ }
+diff --git a/fs/aio.c b/fs/aio.c
+index f8589caef9c10e..4a9b5e4719eea5 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -590,13 +590,24 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
+
+ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
+ {
+- struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
+- struct kioctx *ctx = req->ki_ctx;
++ struct aio_kiocb *req;
++ struct kioctx *ctx;
+ unsigned long flags;
+
++ /*
++ * kiocb didn't come from aio or is neither a read nor a write, hence
++ * ignore it.
++ */
++ if (!(iocb->ki_flags & IOCB_AIO_RW))
++ return;
++
++ req = container_of(iocb, struct aio_kiocb, rw);
++
+ if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
+ return;
+
++ ctx = req->ki_ctx;
++
+ spin_lock_irqsave(&ctx->ctx_lock, flags);
+ list_add_tail(&req->ki_list, &ctx->active_reqs);
+ req->ki_cancel = cancel;
+@@ -1463,7 +1474,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
+ req->ki_complete = aio_complete_rw;
+ req->private = NULL;
+ req->ki_pos = iocb->aio_offset;
+- req->ki_flags = req->ki_filp->f_iocb_flags;
++ req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
+ if (iocb->aio_flags & IOCB_FLAG_RESFD)
+ req->ki_flags |= IOCB_EVENTFD;
+ if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 7b3d2d4914073f..fb2c8d14327ae1 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1008,7 +1008,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ if (elf_read_implies_exec(*elf_ex, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
++ const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space);
++ if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)
+ current->flags |= PF_RANDOMIZE;
+
+ setup_new_exec(bprm);
+@@ -1300,7 +1301,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ mm->end_data = end_data;
+ mm->start_stack = bprm->p;
+
+- if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
++ if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
+ /*
+ * For architectures with ELF randomization, when executing
+ * a loader directly (i.e. no interpreter listed in ELF
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 206812ce544aeb..96a8b13b57d969 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -320,7 +320,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ else
+ executable_stack = EXSTACK_DEFAULT;
+
+- if (stack_size == 0) {
++ if (stack_size == 0 && interp_params.flags & ELF_FDPIC_FLAG_PRESENT) {
+ stack_size = interp_params.stack_size;
+ if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
+ executable_stack = EXSTACK_ENABLE_X;
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index c26545d71d39a3..cd6d5bbb4b9df5 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -72,8 +72,10 @@
+
+ #ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET
+ #define DATA_START_OFFSET_WORDS (0)
++#define MAX_SHARED_LIBS_UPDATE (0)
+ #else
+ #define DATA_START_OFFSET_WORDS (MAX_SHARED_LIBS)
++#define MAX_SHARED_LIBS_UPDATE (MAX_SHARED_LIBS)
+ #endif
+
+ struct lib_info {
+@@ -880,7 +882,7 @@ static int load_flat_binary(struct linux_binprm *bprm)
+ return res;
+
+ /* Update data segment pointers for all libraries */
+- for (i = 0; i < MAX_SHARED_LIBS; i++) {
++ for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) {
+ if (!libinfo.lib_list[i].loaded)
+ continue;
+ for (j = 0; j < MAX_SHARED_LIBS; j++) {
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index e0108d17b085cf..cf5ed5cd4102df 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -60,12 +60,11 @@ typedef struct {
+ char *name;
+ struct dentry *dentry;
+ struct file *interp_file;
++ refcount_t users; /* sync removal with load_misc_binary() */
+ } Node;
+
+ static DEFINE_RWLOCK(entries_lock);
+ static struct file_system_type bm_fs_type;
+-static struct vfsmount *bm_mnt;
+-static int entry_count;
+
+ /*
+ * Max length of the register string. Determined by:
+@@ -82,19 +81,23 @@ static int entry_count;
+ */
+ #define MAX_REGISTER_LENGTH 1920
+
+-/*
+- * Check if we support the binfmt
+- * if we do, return the node, else NULL
+- * locking is done in load_misc_binary
++/**
++ * search_binfmt_handler - search for a binary handler for @bprm
++ * @misc: handle to binfmt_misc instance
++ * @bprm: binary for which we are looking for a handler
++ *
++ * Search for a binary type handler for @bprm in the list of registered binary
++ * type handlers.
++ *
++ * Return: binary type list entry on success, NULL on failure
+ */
+-static Node *check_file(struct linux_binprm *bprm)
++static Node *search_binfmt_handler(struct linux_binprm *bprm)
+ {
+ char *p = strrchr(bprm->interp, '.');
+- struct list_head *l;
++ Node *e;
+
+ /* Walk all the registered handlers. */
+- list_for_each(l, &entries) {
+- Node *e = list_entry(l, Node, list);
++ list_for_each_entry(e, &entries, list) {
+ char *s;
+ int j;
+
+@@ -123,9 +126,49 @@ static Node *check_file(struct linux_binprm *bprm)
+ if (j == e->size)
+ return e;
+ }
++
+ return NULL;
+ }
+
++/**
++ * get_binfmt_handler - try to find a binary type handler
++ * @misc: handle to binfmt_misc instance
++ * @bprm: binary for which we are looking for a handler
++ *
++ * Try to find a binfmt handler for the binary type. If one is found take a
++ * reference to protect against removal via bm_{entry,status}_write().
++ *
++ * Return: binary type list entry on success, NULL on failure
++ */
++static Node *get_binfmt_handler(struct linux_binprm *bprm)
++{
++ Node *e;
++
++ read_lock(&entries_lock);
++ e = search_binfmt_handler(bprm);
++ if (e)
++ refcount_inc(&e->users);
++ read_unlock(&entries_lock);
++ return e;
++}
++
++/**
++ * put_binfmt_handler - put binary handler node
++ * @e: node to put
++ *
++ * Free node syncing with load_misc_binary() and defer final free to
++ * load_misc_binary() in case it is using the binary type handler we were
++ * requested to remove.
++ */
++static void put_binfmt_handler(Node *e)
++{
++ if (refcount_dec_and_test(&e->users)) {
++ if (e->flags & MISC_FMT_OPEN_FILE)
++ filp_close(e->interp_file, NULL);
++ kfree(e);
++ }
++}
++
+ /*
+ * the loader itself
+ */
+@@ -139,12 +182,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
+ if (!enabled)
+ return retval;
+
+- /* to keep locking time low, we copy the interpreter string */
+- read_lock(&entries_lock);
+- fmt = check_file(bprm);
+- if (fmt)
+- dget(fmt->dentry);
+- read_unlock(&entries_lock);
++ fmt = get_binfmt_handler(bprm);
+ if (!fmt)
+ return retval;
+
+@@ -198,7 +236,16 @@ static int load_misc_binary(struct linux_binprm *bprm)
+
+ retval = 0;
+ ret:
+- dput(fmt->dentry);
++
++ /*
++ * If we actually put the node here all concurrent calls to
++ * load_misc_binary() will have finished. We also know
++ * that for the refcount to be zero ->evict_inode() must have removed
++ * the node to be deleted from the list. All that is left for us is to
++ * close and free.
++ */
++ put_binfmt_handler(fmt);
++
+ return retval;
+ }
+
+@@ -552,30 +599,90 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
+ return inode;
+ }
+
++/**
++ * bm_evict_inode - cleanup data associated with @inode
++ * @inode: inode to which the data is attached
++ *
++ * Cleanup the binary type handler data associated with @inode if a binary type
++ * entry is removed or the filesystem is unmounted and the super block is
++ * shutdown.
++ *
++ * If the ->evict call was not caused by a super block shutdown but by a write
++ * to remove the entry or all entries via bm_{entry,status}_write() the entry
++ * will have already been removed from the list. We keep the list_empty() check
++ * to make that explicit.
++*/
+ static void bm_evict_inode(struct inode *inode)
+ {
+ Node *e = inode->i_private;
+
+- if (e && e->flags & MISC_FMT_OPEN_FILE)
+- filp_close(e->interp_file, NULL);
+-
+ clear_inode(inode);
+- kfree(e);
++
++ if (e) {
++ write_lock(&entries_lock);
++ if (!list_empty(&e->list))
++ list_del_init(&e->list);
++ write_unlock(&entries_lock);
++ put_binfmt_handler(e);
++ }
+ }
+
+-static void kill_node(Node *e)
++/**
++ * unlink_binfmt_dentry - remove the dentry for the binary type handler
++ * @dentry: dentry associated with the binary type handler
++ *
++ * Do the actual filesystem work to remove a dentry for a registered binary
++ * type handler. Since binfmt_misc only allows simple files to be created
++ * directly under the root dentry of the filesystem we ensure that we are
++ * indeed passed a dentry directly beneath the root dentry, that the inode
++ * associated with the root dentry is locked, and that it is a regular file we
++ * are asked to remove.
++ */
++static void unlink_binfmt_dentry(struct dentry *dentry)
+ {
+- struct dentry *dentry;
++ struct dentry *parent = dentry->d_parent;
++ struct inode *inode, *parent_inode;
++
++ /* All entries are immediate descendants of the root dentry. */
++ if (WARN_ON_ONCE(dentry->d_sb->s_root != parent))
++ return;
+
++ /* We only expect to be called on regular files. */
++ inode = d_inode(dentry);
++ if (WARN_ON_ONCE(!S_ISREG(inode->i_mode)))
++ return;
++
++ /* The parent inode must be locked. */
++ parent_inode = d_inode(parent);
++ if (WARN_ON_ONCE(!inode_is_locked(parent_inode)))
++ return;
++
++ if (simple_positive(dentry)) {
++ dget(dentry);
++ simple_unlink(parent_inode, dentry);
++ d_delete(dentry);
++ dput(dentry);
++ }
++}
++
++/**
++ * remove_binfmt_handler - remove a binary type handler
++ * @misc: handle to binfmt_misc instance
++ * @e: binary type handler to remove
++ *
++ * Remove a binary type handler from the list of binary type handlers and
++ * remove its associated dentry. This is called from
++ * binfmt_{entry,status}_write(). In the future, we might want to think about
++ * adding a proper ->unlink() method to binfmt_misc instead of forcing caller's
++ * to use writes to files in order to delete binary type handlers. But it has
++ * worked for so long that it's not a pressing issue.
++ */
++static void remove_binfmt_handler(Node *e)
++{
+ write_lock(&entries_lock);
+ list_del_init(&e->list);
+ write_unlock(&entries_lock);
+-
+- dentry = e->dentry;
+- drop_nlink(d_inode(dentry));
+- d_drop(dentry);
+- dput(dentry);
+- simple_release_fs(&bm_mnt, &entry_count);
++ unlink_binfmt_dentry(e->dentry);
+ }
+
+ /* /<entry> */
+@@ -602,8 +709,8 @@ bm_entry_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+- struct dentry *root;
+- Node *e = file_inode(file)->i_private;
++ struct inode *inode = file_inode(file);
++ Node *e = inode->i_private;
+ int res = parse_command(buffer, count);
+
+ switch (res) {
+@@ -617,13 +724,22 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
+ break;
+ case 3:
+ /* Delete this handler. */
+- root = file_inode(file)->i_sb->s_root;
+- inode_lock(d_inode(root));
++ inode = d_inode(inode->i_sb->s_root);
++ inode_lock(inode);
+
++ /*
++ * In order to add new element or remove elements from the list
++ * via bm_{entry,register,status}_write() inode_lock() on the
++ * root inode must be held.
++ * The lock is exclusive ensuring that the list can't be
++ * modified. Only load_misc_binary() can access but does so
++ * read-only. So we only need to take the write lock when we
++ * actually remove the entry from the list.
++ */
+ if (!list_empty(&e->list))
+- kill_node(e);
++ remove_binfmt_handler(e);
+
+- inode_unlock(d_inode(root));
++ inode_unlock(inode);
+ break;
+ default:
+ return res;
+@@ -682,13 +798,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
+ if (!inode)
+ goto out2;
+
+- err = simple_pin_fs(&bm_fs_type, &bm_mnt, &entry_count);
+- if (err) {
+- iput(inode);
+- inode = NULL;
+- goto out2;
+- }
+-
++ refcount_set(&e->users, 1);
+ e->dentry = dget(dentry);
+ inode->i_private = e;
+ inode->i_fop = &bm_entry_operations;
+@@ -732,7 +842,8 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+ int res = parse_command(buffer, count);
+- struct dentry *root;
++ Node *e, *next;
++ struct inode *inode;
+
+ switch (res) {
+ case 1:
+@@ -745,13 +856,22 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
+ break;
+ case 3:
+ /* Delete all handlers. */
+- root = file_inode(file)->i_sb->s_root;
+- inode_lock(d_inode(root));
++ inode = d_inode(file_inode(file)->i_sb->s_root);
++ inode_lock(inode);
+
+- while (!list_empty(&entries))
+- kill_node(list_first_entry(&entries, Node, list));
++ /*
++ * In order to add new element or remove elements from the list
++ * via bm_{entry,register,status}_write() inode_lock() on the
++ * root inode must be held.
++ * The lock is exclusive ensuring that the list can't be
++ * modified. Only load_misc_binary() can access but does so
++ * read-only. So we only need to take the write lock when we
++ * actually remove the entry from the list.
++ */
++ list_for_each_entry_safe(e, next, &entries, list)
++ remove_binfmt_handler(e);
+
+- inode_unlock(d_inode(root));
++ inode_unlock(inode);
+ break;
+ default:
+ return res;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index a4a809efc92fc6..a2ba1c7fc16af4 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -2770,20 +2770,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
+ size_t alloc_bytes;
+
+ alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
+- data = kvmalloc(alloc_bytes, GFP_KERNEL);
++ data = kvzalloc(alloc_bytes, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+- if (total_bytes >= sizeof(*data)) {
++ if (total_bytes >= sizeof(*data))
+ data->bytes_left = total_bytes - sizeof(*data);
+- data->bytes_missing = 0;
+- } else {
++ else
+ data->bytes_missing = sizeof(*data) - total_bytes;
+- data->bytes_left = 0;
+- }
+-
+- data->elem_cnt = 0;
+- data->elem_missed = 0;
+
+ return data;
+ }
+@@ -3104,10 +3098,14 @@ void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
+ btrfs_backref_cleanup_node(cache, node);
+ }
+
+- cache->last_trans = 0;
+-
+- for (i = 0; i < BTRFS_MAX_LEVEL; i++)
+- ASSERT(list_empty(&cache->pending[i]));
++ for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
++ while (!list_empty(&cache->pending[i])) {
++ node = list_first_entry(&cache->pending[i],
++ struct btrfs_backref_node,
++ list);
++ btrfs_backref_cleanup_node(cache, node);
++ }
++ }
+ ASSERT(list_empty(&cache->pending_edge));
+ ASSERT(list_empty(&cache->useless_node));
+ ASSERT(list_empty(&cache->changed));
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index 12b12443efaabb..650972895652d8 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -646,7 +646,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ {
+ struct btrfs_inode *inode = bbio->inode;
+ struct btrfs_fs_info *fs_info = bbio->fs_info;
+- struct btrfs_bio *orig_bbio = bbio;
+ struct bio *bio = &bbio->bio;
+ u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 length = bio->bi_iter.bi_size;
+@@ -682,7 +681,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ bbio->saved_iter = bio->bi_iter;
+ ret = btrfs_lookup_bio_sums(bbio);
+ if (ret)
+- goto fail_put_bio;
++ goto fail;
+ }
+
+ if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
+@@ -704,11 +703,13 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+
+ ret = btrfs_bio_csum(bbio);
+ if (ret)
+- goto fail_put_bio;
+- } else if (use_append) {
++ goto fail;
++ } else if (use_append ||
++ (btrfs_is_zoned(fs_info) && inode &&
++ inode->flags & BTRFS_INODE_NODATASUM)) {
+ ret = btrfs_alloc_dummy_sum(bbio);
+ if (ret)
+- goto fail_put_bio;
++ goto fail;
+ }
+ }
+
+@@ -716,12 +717,23 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ done:
+ return map_length == length;
+
+-fail_put_bio:
+- if (map_length < length)
+- btrfs_cleanup_bio(bbio);
+ fail:
+ btrfs_bio_counter_dec(fs_info);
+- btrfs_bio_end_io(orig_bbio, ret);
++ /*
++ * We have split the original bbio, now we have to end both the current
++ * @bbio and remaining one, as the remaining one will never be submitted.
++ */
++ if (map_length < length) {
++ struct btrfs_bio *remaining = bbio->private;
++
++ ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
++ ASSERT(remaining);
++
++ remaining->bio.bi_status = ret;
++ btrfs_orig_bbio_end_io(remaining);
++ }
++ bbio->bio.bi_status = ret;
++ btrfs_orig_bbio_end_io(bbio);
+ /* Do not submit another chunk */
+ return true;
+ }
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index b2e5107b7cecc4..4e999e1c14075d 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1214,8 +1214,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ block_group->space_info->total_bytes -= block_group->length;
+ block_group->space_info->bytes_readonly -=
+ (block_group->length - block_group->zone_unusable);
+- block_group->space_info->bytes_zone_unusable -=
+- block_group->zone_unusable;
++ btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
++ -block_group->zone_unusable);
+ block_group->space_info->disk_total -= block_group->length * factor;
+
+ spin_unlock(&block_group->space_info->lock);
+@@ -1399,7 +1399,8 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
+ if (btrfs_is_zoned(cache->fs_info)) {
+ /* Migrate zone_unusable bytes to readonly */
+ sinfo->bytes_readonly += cache->zone_unusable;
+- sinfo->bytes_zone_unusable -= cache->zone_unusable;
++ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
++ -cache->zone_unusable);
+ cache->zone_unusable = 0;
+ }
+ cache->ro++;
+@@ -1467,6 +1468,7 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
+ */
+ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ {
++ LIST_HEAD(retry_list);
+ struct btrfs_block_group *block_group;
+ struct btrfs_space_info *space_info;
+ struct btrfs_trans_handle *trans;
+@@ -1488,6 +1490,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ while (!list_empty(&fs_info->unused_bgs)) {
++ u64 used;
+ int trimming;
+
+ block_group = list_first_entry(&fs_info->unused_bgs,
+@@ -1523,9 +1526,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ goto next;
+ }
+
++ spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+- if (block_group->reserved || block_group->pinned ||
+- block_group->used || block_group->ro ||
++ if (btrfs_is_block_group_used(block_group) || block_group->ro ||
+ list_is_singular(&block_group->list)) {
+ /*
+ * We want to bail if we made new allocations or have
+@@ -1535,10 +1538,50 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ */
+ trace_btrfs_skip_unused_block_group(block_group);
+ spin_unlock(&block_group->lock);
++ spin_unlock(&space_info->lock);
++ up_write(&space_info->groups_sem);
++ goto next;
++ }
++
++ /*
++ * The block group may be unused but there may be space reserved
++ * accounting with the existence of that block group, that is,
++ * space_info->bytes_may_use was incremented by a task but no
++ * space was yet allocated from the block group by the task.
++ * That space may or may not be allocated, as we are generally
++ * pessimistic about space reservation for metadata as well as
++ * for data when using compression (as we reserve space based on
++ * the worst case, when data can't be compressed, and before
++ * actually attempting compression, before starting writeback).
++ *
++ * So check if the total space of the space_info minus the size
++ * of this block group is less than the used space of the
++ * space_info - if that's the case, then it means we have tasks
++ * that might be relying on the block group in order to allocate
++ * extents, and add back the block group to the unused list when
++ * we finish, so that we retry later in case no tasks ended up
++ * needing to allocate extents from the block group.
++ */
++ used = btrfs_space_info_used(space_info, true);
++ if (space_info->total_bytes - block_group->length < used &&
++ block_group->zone_unusable < block_group->length) {
++ /*
++ * Add a reference for the list, compensate for the ref
++ * drop under the "next" label for the
++ * fs_info->unused_bgs list.
++ */
++ btrfs_get_block_group(block_group);
++ list_add_tail(&block_group->bg_list, &retry_list);
++
++ trace_btrfs_skip_unused_block_group(block_group);
++ spin_unlock(&block_group->lock);
++ spin_unlock(&space_info->lock);
+ up_write(&space_info->groups_sem);
+ goto next;
+ }
++
+ spin_unlock(&block_group->lock);
++ spin_unlock(&space_info->lock);
+
+ /* We don't want to force the issue, only flip if it's ok. */
+ ret = inc_block_group_ro(block_group, 0);
+@@ -1662,12 +1705,16 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ btrfs_put_block_group(block_group);
+ spin_lock(&fs_info->unused_bgs_lock);
+ }
++ list_splice_tail(&retry_list, &fs_info->unused_bgs);
+ spin_unlock(&fs_info->unused_bgs_lock);
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+ return;
+
+ flip_async:
+ btrfs_end_transaction(trans);
++ spin_lock(&fs_info->unused_bgs_lock);
++ list_splice_tail(&retry_list, &fs_info->unused_bgs);
++ spin_unlock(&fs_info->unused_bgs_lock);
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+ btrfs_put_block_group(block_group);
+ btrfs_discard_punt_unused_bgs_list(fs_info);
+@@ -1742,6 +1789,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
+ struct btrfs_block_group *bg;
+ struct btrfs_space_info *space_info;
++ LIST_HEAD(retry_list);
+
+ if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
+ return;
+@@ -1878,8 +1926,20 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ }
+
+ next:
+- if (ret)
+- btrfs_mark_bg_to_reclaim(bg);
++ if (ret) {
++ /* Refcount held by the reclaim_bgs list after splice. */
++ spin_lock(&fs_info->unused_bgs_lock);
++ /*
++ * This block group might be added to the unused list
++ * during the above process. Move it back to the
++ * reclaim list otherwise.
++ */
++ if (list_empty(&bg->bg_list)) {
++ btrfs_get_block_group(bg);
++ list_add_tail(&bg->bg_list, &retry_list);
++ }
++ spin_unlock(&fs_info->unused_bgs_lock);
++ }
+ btrfs_put_block_group(bg);
+
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+@@ -1899,6 +1959,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ spin_unlock(&fs_info->unused_bgs_lock);
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+ end:
++ spin_lock(&fs_info->unused_bgs_lock);
++ list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
++ spin_unlock(&fs_info->unused_bgs_lock);
+ btrfs_exclop_finish(fs_info);
+ sb_end_write(fs_info->sb);
+ }
+@@ -2601,7 +2664,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
+
+ btrfs_set_dev_extent_length(leaf, extent, num_bytes);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -2961,9 +3024,11 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
+ if (btrfs_is_zoned(cache->fs_info)) {
+ /* Migrate zone_unusable bytes back */
+ cache->zone_unusable =
+- (cache->alloc_offset - cache->used) +
++ (cache->alloc_offset - cache->used - cache->pinned -
++ cache->reserved) +
+ (cache->length - cache->zone_capacity);
+- sinfo->bytes_zone_unusable += cache->zone_unusable;
++ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
++ cache->zone_unusable);
+ sinfo->bytes_readonly -= cache->zone_unusable;
+ }
+ num_bytes = cache->length - cache->reserved -
+@@ -3025,7 +3090,7 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
+ cache->global_root_id);
+ btrfs_set_stack_block_group_flags(&bgi, cache->flags);
+ write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ fail:
+ btrfs_release_path(path);
+ /*
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 2bdbcb834f9543..089979981e4aaa 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -255,6 +255,13 @@ static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
+ return (block_group->start + block_group->length);
+ }
+
++static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
++{
++ lockdep_assert_held(&bg->lock);
++
++ return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
++}
++
+ static inline bool btrfs_is_block_group_data_only(
+ struct btrfs_block_group *block_group)
+ {
+diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
+index 77684c5e0c8bad..db8da4e7b22891 100644
+--- a/fs/btrfs/block-rsv.c
++++ b/fs/btrfs/block-rsv.c
+@@ -486,7 +486,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
+
+ block_rsv = get_block_rsv(trans, root);
+
+- if (unlikely(block_rsv->size == 0))
++ if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
+ goto try_reserve;
+ again:
+ ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
+diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
+index b0bd12b8652f4f..43a9a6b5a79f46 100644
+--- a/fs/btrfs/block-rsv.h
++++ b/fs/btrfs/block-rsv.h
+@@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
+ return data_race(rsv->full);
+ }
+
++/*
++ * Get the reserved mount of a block reserve in a context where getting a stale
++ * value is acceptable, instead of accessing it directly and trigger data race
++ * warning from KCSAN.
++ */
++static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
++{
++ u64 ret;
++
++ spin_lock(&rsv->lock);
++ ret = rsv->reserved;
++ spin_unlock(&rsv->lock);
++
++ return ret;
++}
++
++/*
++ * Get the size of a block reserve in a context where getting a stale value is
++ * acceptable, instead of accessing it directly and trigger data race warning
++ * from KCSAN.
++ */
++static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
++{
++ u64 ret;
++
++ spin_lock(&rsv->lock);
++ ret = rsv->size;
++ spin_unlock(&rsv->lock);
++
++ return ret;
++}
++
+ #endif /* BTRFS_BLOCK_RSV_H */
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index bda1fdbba666aa..ec6679a538c1dc 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -82,8 +82,10 @@ struct btrfs_inode {
+ /*
+ * Lock for counters and all fields used to determine if the inode is in
+ * the log or not (last_trans, last_sub_trans, last_log_commit,
+- * logged_trans), to access/update new_delalloc_bytes and to update the
+- * VFS' inode number of bytes used.
++ * logged_trans), to access/update delalloc_bytes, new_delalloc_bytes,
++ * defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to
++ * update the VFS' inode number of bytes used.
++ * Also protects setting struct file::private_data.
+ */
+ spinlock_t lock;
+
+@@ -102,6 +104,14 @@ struct btrfs_inode {
+ /* held while logging the inode in tree-log.c */
+ struct mutex log_mutex;
+
++ /*
++ * Counters to keep track of the number of extent item's we may use due
++ * to delalloc and such. outstanding_extents is the number of extent
++ * items we think we'll end up using, and reserved_extents is the number
++ * of extent items we've reserved metadata for. Protected by 'lock'.
++ */
++ unsigned outstanding_extents;
++
+ /* used to order data wrt metadata */
+ struct btrfs_ordered_inode_tree ordered_tree;
+
+@@ -122,28 +132,31 @@ struct btrfs_inode {
+ u64 generation;
+
+ /*
+- * transid of the trans_handle that last modified this inode
++ * ID of the transaction handle that last modified this inode.
++ * Protected by 'lock'.
+ */
+ u64 last_trans;
+
+ /*
+- * transid that last logged this inode
++ * ID of the transaction that last logged this inode.
++ * Protected by 'lock'.
+ */
+ u64 logged_trans;
+
+ /*
+- * log transid when this inode was last modified
++ * Log transaction ID when this inode was last modified.
++ * Protected by 'lock'.
+ */
+ int last_sub_trans;
+
+- /* a local copy of root's last_log_commit */
++ /* A local copy of root's last_log_commit. Protected by 'lock'. */
+ int last_log_commit;
+
+ union {
+ /*
+ * Total number of bytes pending delalloc, used by stat to
+ * calculate the real block usage of the file. This is used
+- * only for files.
++ * only for files. Protected by 'lock'.
+ */
+ u64 delalloc_bytes;
+ /*
+@@ -161,7 +174,7 @@ struct btrfs_inode {
+ * Total number of bytes pending delalloc that fall within a file
+ * range that is either a hole or beyond EOF (and no prealloc extent
+ * exists in the range). This is always <= delalloc_bytes and this
+- * is used only for files.
++ * is used only for files. Protected by 'lock'.
+ */
+ u64 new_delalloc_bytes;
+ /*
+@@ -172,15 +185,15 @@ struct btrfs_inode {
+ };
+
+ /*
+- * total number of bytes pending defrag, used by stat to check whether
+- * it needs COW.
++ * Total number of bytes pending defrag, used by stat to check whether
++ * it needs COW. Protected by 'lock'.
+ */
+ u64 defrag_bytes;
+
+ /*
+- * the size of the file stored in the metadata on disk. data=ordered
++ * The size of the file stored in the metadata on disk. data=ordered
+ * means the in-memory i_size might be larger than the size on disk
+- * because not all the blocks are written yet.
++ * because not all the blocks are written yet. Protected by 'lock'.
+ */
+ u64 disk_i_size;
+
+@@ -214,7 +227,7 @@ struct btrfs_inode {
+
+ /*
+ * Number of bytes outstanding that are going to need csums. This is
+- * used in ENOSPC accounting.
++ * used in ENOSPC accounting. Protected by 'lock'.
+ */
+ u64 csum_bytes;
+
+@@ -223,14 +236,6 @@ struct btrfs_inode {
+ /* Read-only compatibility flags, upper half of inode_item::flags */
+ u32 ro_flags;
+
+- /*
+- * Counters to keep track of the number of extent item's we may use due
+- * to delalloc and such. outstanding_extents is the number of extent
+- * items we think we'll end up using, and reserved_extents is the number
+- * of extent items we've reserved metadata for.
+- */
+- unsigned outstanding_extents;
+-
+ struct btrfs_block_rsv block_rsv;
+
+ /*
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 8818ed5c390faa..e6acf09a1507c2 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -140,16 +140,16 @@ static int compression_decompress_bio(struct list_head *ws,
+ }
+
+ static int compression_decompress(int type, struct list_head *ws,
+- const u8 *data_in, struct page *dest_page,
+- unsigned long start_byte, size_t srclen, size_t destlen)
++ const u8 *data_in, struct page *dest_page,
++ unsigned long dest_pgoff, size_t srclen, size_t destlen)
+ {
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
+- start_byte, srclen, destlen);
++ dest_pgoff, srclen, destlen);
+ case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
+- start_byte, srclen, destlen);
++ dest_pgoff, srclen, destlen);
+ case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
+- start_byte, srclen, destlen);
++ dest_pgoff, srclen, destlen);
+ case BTRFS_COMPRESS_NONE:
+ default:
+ /*
+@@ -420,6 +420,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
+ put_page(page);
+ break;
+ }
++ add_size = min(em->start + em->len, page_end + 1) - cur;
+ free_extent_map(em);
+
+ if (page->index == end_index) {
+@@ -432,7 +433,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
+ }
+ }
+
+- add_size = min(em->start + em->len, page_end + 1) - cur;
+ ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur));
+ if (ret != add_size) {
+ unlock_extent(tree, cur, page_end, NULL);
+@@ -941,14 +941,23 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
+ * start_byte tells us the offset into the compressed data we're interested in
+ */
+ int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
+- unsigned long start_byte, size_t srclen, size_t destlen)
++ unsigned long dest_pgoff, size_t srclen, size_t destlen)
+ {
++ struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
+ struct list_head *workspace;
++ const u32 sectorsize = fs_info->sectorsize;
+ int ret;
+
++ /*
++ * The full destination page range should not exceed the page size.
++ * And the @destlen should not exceed sectorsize, as this is only called for
++ * inline file extents, which should not exceed sectorsize.
++ */
++ ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
++
+ workspace = get_workspace(type, 0);
+ ret = compression_decompress(type, workspace, data_in, dest_page,
+- start_byte, srclen, destlen);
++ dest_pgoff, srclen, destlen);
+ put_workspace(type, workspace);
+
+ return ret;
+diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
+index 03bb9d143fa75d..609865c940658c 100644
+--- a/fs/btrfs/compression.h
++++ b/fs/btrfs/compression.h
+@@ -143,7 +143,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
+ unsigned long *total_in, unsigned long *total_out);
+ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
+ int zlib_decompress(struct list_head *ws, const u8 *data_in,
+- struct page *dest_page, unsigned long start_byte, size_t srclen,
++ struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
+ size_t destlen);
+ struct list_head *zlib_alloc_workspace(unsigned int level);
+ void zlib_free_workspace(struct list_head *ws);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 617d4827eec265..2eb4e03080ac9b 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -359,7 +359,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+
+- btrfs_mark_buffer_dirty(cow);
++ btrfs_mark_buffer_dirty(trans, cow);
+ *cow_ret = cow;
+ return 0;
+ }
+@@ -451,8 +451,16 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
+ }
+
+ owner = btrfs_header_owner(buf);
+- BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
+- !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
++ if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID &&
++ !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) {
++ btrfs_crit(fs_info,
++"found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set",
++ buf->start, btrfs_header_level(buf),
++ btrfs_root_id(root), refs, flags);
++ ret = -EUCLEAN;
++ btrfs_abort_transaction(trans, ret);
++ return ret;
++ }
+
+ if (refs > 1) {
+ if ((owner == root->root_key.objectid ||
+@@ -627,7 +635,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ cow->start);
+ btrfs_set_node_ptr_generation(parent, parent_slot,
+ trans->transid);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ if (last_ref) {
+ ret = btrfs_tree_mod_log_free_eb(buf);
+ if (ret) {
+@@ -643,7 +651,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ if (unlock_orig)
+ btrfs_tree_unlock(buf);
+ free_extent_buffer_stale(buf);
+- btrfs_mark_buffer_dirty(cow);
++ btrfs_mark_buffer_dirty(trans, cow);
+ *cow_ret = cow;
+ return 0;
+ }
+@@ -1197,7 +1205,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ goto out;
+ }
+ btrfs_set_node_key(parent, &right_key, pslot + 1);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ }
+ }
+ if (btrfs_header_nritems(mid) == 1) {
+@@ -1255,7 +1263,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ goto out;
+ }
+ btrfs_set_node_key(parent, &mid_key, pslot);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ }
+
+ /* update the path */
+@@ -1362,7 +1370,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+ btrfs_set_node_key(parent, &disk_key, pslot);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ if (btrfs_header_nritems(left) > orig_slot) {
+ path->nodes[level] = left;
+ path->slots[level + 1] -= 1;
+@@ -1422,7 +1430,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+ btrfs_set_node_key(parent, &disk_key, pslot + 1);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+
+ if (btrfs_header_nritems(mid) <= orig_slot) {
+ path->nodes[level] = right;
+@@ -2678,7 +2686,8 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
+ * higher levels
+ *
+ */
+-static void fixup_low_keys(struct btrfs_path *path,
++static void fixup_low_keys(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ struct btrfs_disk_key *key, int level)
+ {
+ int i;
+@@ -2695,7 +2704,7 @@ static void fixup_low_keys(struct btrfs_path *path,
+ BTRFS_MOD_LOG_KEY_REPLACE);
+ BUG_ON(ret < 0);
+ btrfs_set_node_key(t, key, tslot);
+- btrfs_mark_buffer_dirty(path->nodes[i]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[i]);
+ if (tslot != 0)
+ break;
+ }
+@@ -2707,10 +2716,11 @@ static void fixup_low_keys(struct btrfs_path *path,
+ * This function isn't completely safe. It's the caller's responsibility
+ * that the new key won't break the order
+ */
+-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
++void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ const struct btrfs_key *new_key)
+ {
++ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_disk_key disk_key;
+ struct extent_buffer *eb;
+ int slot;
+@@ -2748,9 +2758,9 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+
+ btrfs_cpu_key_to_disk(&disk_key, new_key);
+ btrfs_set_item_key(eb, &disk_key, slot);
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+ if (slot == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ /*
+@@ -2881,8 +2891,8 @@ static int push_node_left(struct btrfs_trans_handle *trans,
+ }
+ btrfs_set_header_nritems(src, src_nritems - push_items);
+ btrfs_set_header_nritems(dst, dst_nritems + push_items);
+- btrfs_mark_buffer_dirty(src);
+- btrfs_mark_buffer_dirty(dst);
++ btrfs_mark_buffer_dirty(trans, src);
++ btrfs_mark_buffer_dirty(trans, dst);
+
+ return ret;
+ }
+@@ -2957,8 +2967,8 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(src, src_nritems - push_items);
+ btrfs_set_header_nritems(dst, dst_nritems + push_items);
+
+- btrfs_mark_buffer_dirty(src);
+- btrfs_mark_buffer_dirty(dst);
++ btrfs_mark_buffer_dirty(trans, src);
++ btrfs_mark_buffer_dirty(trans, dst);
+
+ return ret;
+ }
+@@ -3007,7 +3017,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
+
+ btrfs_set_node_ptr_generation(c, 0, lower_gen);
+
+- btrfs_mark_buffer_dirty(c);
++ btrfs_mark_buffer_dirty(trans, c);
+
+ old = root->node;
+ ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
+@@ -3079,7 +3089,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
+ WARN_ON(trans->transid == 0);
+ btrfs_set_node_ptr_generation(lower, slot, trans->transid);
+ btrfs_set_header_nritems(lower, nritems + 1);
+- btrfs_mark_buffer_dirty(lower);
++ btrfs_mark_buffer_dirty(trans, lower);
+
+ return 0;
+ }
+@@ -3158,8 +3168,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(split, c_nritems - mid);
+ btrfs_set_header_nritems(c, mid);
+
+- btrfs_mark_buffer_dirty(c);
+- btrfs_mark_buffer_dirty(split);
++ btrfs_mark_buffer_dirty(trans, c);
++ btrfs_mark_buffer_dirty(trans, split);
+
+ ret = insert_ptr(trans, path, &disk_key, split->start,
+ path->slots[level + 1] + 1, level + 1);
+@@ -3325,15 +3335,15 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(left, left_nritems);
+
+ if (left_nritems)
+- btrfs_mark_buffer_dirty(left);
++ btrfs_mark_buffer_dirty(trans, left);
+ else
+ btrfs_clear_buffer_dirty(trans, left);
+
+- btrfs_mark_buffer_dirty(right);
++ btrfs_mark_buffer_dirty(trans, right);
+
+ btrfs_item_key(right, &disk_key, 0);
+ btrfs_set_node_key(upper, &disk_key, slot + 1);
+- btrfs_mark_buffer_dirty(upper);
++ btrfs_mark_buffer_dirty(trans, upper);
+
+ /* then fixup the leaf pointer in the path */
+ if (path->slots[0] >= left_nritems) {
+@@ -3545,14 +3555,14 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
+ btrfs_set_token_item_offset(&token, i, push_space);
+ }
+
+- btrfs_mark_buffer_dirty(left);
++ btrfs_mark_buffer_dirty(trans, left);
+ if (right_nritems)
+- btrfs_mark_buffer_dirty(right);
++ btrfs_mark_buffer_dirty(trans, right);
+ else
+ btrfs_clear_buffer_dirty(trans, right);
+
+ btrfs_item_key(right, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+
+ /* then fixup the leaf pointer in the path */
+ if (path->slots[0] < push_items) {
+@@ -3683,8 +3693,8 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
+ if (ret < 0)
+ return ret;
+
+- btrfs_mark_buffer_dirty(right);
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, right);
++ btrfs_mark_buffer_dirty(trans, l);
+ BUG_ON(path->slots[0] != slot);
+
+ if (mid <= slot) {
+@@ -3925,7 +3935,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
+ path->nodes[0] = right;
+ path->slots[0] = 0;
+ if (path->slots[1] == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+ /*
+ * We create a new leaf 'right' for the required ins_len and
+@@ -4024,7 +4034,8 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+
+-static noinline int split_item(struct btrfs_path *path,
++static noinline int split_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ const struct btrfs_key *new_key,
+ unsigned long split_offset)
+ {
+@@ -4083,7 +4094,7 @@ static noinline int split_item(struct btrfs_path *path,
+ write_extent_buffer(leaf, buf + split_offset,
+ btrfs_item_ptr_offset(leaf, slot),
+ item_size - split_offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ BUG_ON(btrfs_leaf_free_space(leaf) < 0);
+ kfree(buf);
+@@ -4117,7 +4128,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ if (ret)
+ return ret;
+
+- ret = split_item(path, new_key, split_offset);
++ ret = split_item(trans, path, new_key, split_offset);
+ return ret;
+ }
+
+@@ -4127,7 +4138,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ * off the end of the item or if we shift the item to chop bytes off
+ * the front.
+ */
+-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
++void btrfs_truncate_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 new_size, int from_end)
+ {
+ int slot;
+ struct extent_buffer *leaf;
+@@ -4203,11 +4215,11 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+ btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
+ btrfs_set_item_key(leaf, &disk_key, slot);
+ if (slot == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ btrfs_set_item_size(leaf, slot, new_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4218,7 +4230,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+ /*
+ * make the item pointed to by the path bigger, data_size is the added size.
+ */
+-void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
++void btrfs_extend_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 data_size)
+ {
+ int slot;
+ struct extent_buffer *leaf;
+@@ -4268,7 +4281,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ data_end = old_data;
+ old_size = btrfs_item_size(leaf, slot);
+ btrfs_set_item_size(leaf, slot, old_size + data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4279,6 +4292,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ /*
+ * Make space in the node before inserting one or more items.
+ *
++ * @trans: transaction handle
+ * @root: root we are inserting items to
+ * @path: points to the leaf/slot where we are going to insert new items
+ * @batch: information about the batch of items to insert
+@@ -4286,7 +4300,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ * Main purpose is to save stack depth by doing the bulk of the work in a
+ * function that doesn't call btrfs_search_slot
+ */
+-static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
++static void setup_items_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root, struct btrfs_path *path,
+ const struct btrfs_item_batch *batch)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -4306,7 +4321,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ */
+ if (path->slots[0] == 0) {
+ btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+ btrfs_unlock_up_safe(path, 1);
+
+@@ -4365,7 +4380,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ }
+
+ btrfs_set_header_nritems(leaf, nritems + batch->nr);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4376,12 +4391,14 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ /*
+ * Insert a new item into a leaf.
+ *
++ * @trans: Transaction handle.
+ * @root: The root of the btree.
+ * @path: A path pointing to the target leaf and slot.
+ * @key: The key of the new item.
+ * @data_size: The size of the data associated with the new key.
+ */
+-void btrfs_setup_item_for_insert(struct btrfs_root *root,
++void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct btrfs_key *key,
+ u32 data_size)
+@@ -4393,7 +4410,7 @@ void btrfs_setup_item_for_insert(struct btrfs_root *root,
+ batch.total_data_size = data_size;
+ batch.nr = 1;
+
+- setup_items_for_insert(root, path, &batch);
++ setup_items_for_insert(trans, root, path, &batch);
+ }
+
+ /*
+@@ -4419,7 +4436,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
+ slot = path->slots[0];
+ BUG_ON(slot < 0);
+
+- setup_items_for_insert(root, path, batch);
++ setup_items_for_insert(trans, root, path, batch);
+ return 0;
+ }
+
+@@ -4444,7 +4461,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ leaf = path->nodes[0];
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ write_extent_buffer(leaf, data, ptr, data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ btrfs_free_path(path);
+ return ret;
+@@ -4475,7 +4492,7 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
+ return ret;
+
+ path->slots[0]++;
+- btrfs_setup_item_for_insert(root, path, new_key, item_size);
++ btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
+ leaf = path->nodes[0];
+ memcpy_extent_buffer(leaf,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+@@ -4533,9 +4550,9 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_disk_key disk_key;
+
+ btrfs_node_key(parent, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, level + 1);
++ fixup_low_keys(trans, path, &disk_key, level + 1);
+ }
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ return 0;
+ }
+
+@@ -4632,7 +4649,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_disk_key disk_key;
+
+ btrfs_item_key(leaf, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ /*
+@@ -4697,11 +4714,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ * dirtied this buffer
+ */
+ if (path->nodes[0] == leaf)
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ free_extent_buffer(leaf);
+ }
+ } else {
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ }
+ return ret;
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index ff40acd63a3743..f7bb4c34b984b3 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -445,6 +445,8 @@ struct btrfs_file_private {
+ void *filldir_buf;
+ u64 last_index;
+ struct extent_state *llseek_cached_state;
++ /* Task that allocated this structure. */
++ struct task_struct *owner_task;
+ };
+
+ static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
+@@ -518,7 +520,7 @@ int btrfs_previous_item(struct btrfs_root *root,
+ int type);
+ int btrfs_previous_extent_item(struct btrfs_root *root,
+ struct btrfs_path *path, u64 min_objectid);
+-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
++void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ const struct btrfs_key *new_key);
+ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
+@@ -545,8 +547,10 @@ int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct extent_buffer *buf);
+ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_path *path, int level, int slot);
+-void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
+-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
++void btrfs_extend_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 data_size);
++void btrfs_truncate_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 new_size, int from_end);
+ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+@@ -610,7 +614,8 @@ struct btrfs_item_batch {
+ int nr;
+ };
+
+-void btrfs_setup_item_for_insert(struct btrfs_root *root,
++void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct btrfs_key *key,
+ u32 data_size);
+diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
+index f2ff4cbe8656b3..e1475dfdf7a8b8 100644
+--- a/fs/btrfs/defrag.c
++++ b/fs/btrfs/defrag.c
+@@ -416,7 +416,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
+ * keep_locks set and lowest_level is 1, regardless of the value of
+ * path->slots[1].
+ */
+- BUG_ON(path->locks[1] == 0);
++ ASSERT(path->locks[1] != 0);
+ ret = btrfs_realloc_node(trans, root,
+ path->nodes[1], 0,
+ &last_ret,
+@@ -903,7 +903,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
+ goto add;
+
+ /* Skip too large extent */
+- if (range_len >= extent_thresh)
++ if (em->len >= extent_thresh)
+ goto next;
+
+ /*
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index 427abaf608b8ce..4a7aefa5f9cf92 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -199,7 +199,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
+ start = round_down(start, fs_info->sectorsize);
+
+ btrfs_free_reserved_data_space_noquota(fs_info, len);
+- btrfs_qgroup_free_data(inode, reserved, start, len);
++ btrfs_qgroup_free_data(inode, reserved, start, len, NULL);
+ }
+
+ /*
+@@ -245,7 +245,6 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
+ u64 reserve_size = 0;
+ u64 qgroup_rsv_size = 0;
+- u64 csum_leaves;
+ unsigned outstanding_extents;
+
+ lockdep_assert_held(&inode->lock);
+@@ -260,10 +259,12 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
+ outstanding_extents);
+ reserve_size += btrfs_calc_metadata_size(fs_info, 1);
+ }
+- csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
+- inode->csum_bytes);
+- reserve_size += btrfs_calc_insert_metadata_size(fs_info,
+- csum_leaves);
++ if (!(inode->flags & BTRFS_INODE_NODATASUM)) {
++ u64 csum_leaves;
++
++ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
++ reserve_size += btrfs_calc_insert_metadata_size(fs_info, csum_leaves);
++ }
+ /*
+ * For qgroup rsv, the calculation is very simple:
+ * account one nodesize for each outstanding extent
+@@ -278,14 +279,20 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
+ spin_unlock(&block_rsv->lock);
+ }
+
+-static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
++static void calc_inode_reservations(struct btrfs_inode *inode,
+ u64 num_bytes, u64 disk_num_bytes,
+ u64 *meta_reserve, u64 *qgroup_reserve)
+ {
++ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ u64 nr_extents = count_max_extents(fs_info, num_bytes);
+- u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
++ u64 csum_leaves;
+ u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
+
++ if (inode->flags & BTRFS_INODE_NODATASUM)
++ csum_leaves = 0;
++ else
++ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
++
+ *meta_reserve = btrfs_calc_insert_metadata_size(fs_info,
+ nr_extents + csum_leaves);
+
+@@ -322,9 +329,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ } else {
+ if (current->journal_info)
+ flush = BTRFS_RESERVE_FLUSH_LIMIT;
+-
+- if (btrfs_transaction_in_commit(fs_info))
+- schedule_timeout(1);
+ }
+
+ num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+@@ -340,7 +344,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ * everything out and try again, which is bad. This way we just
+ * over-reserve slightly, and clean up the mess when we are done.
+ */
+- calc_inode_reservations(fs_info, num_bytes, disk_num_bytes,
++ calc_inode_reservations(inode, num_bytes, disk_num_bytes,
+ &meta_reserve, &qgroup_reserve);
+ ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true,
+ noflush);
+@@ -361,7 +365,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ nr_extents = count_max_extents(fs_info, num_bytes);
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, nr_extents);
+- inode->csum_bytes += disk_num_bytes;
++ if (!(inode->flags & BTRFS_INODE_NODATASUM))
++ inode->csum_bytes += disk_num_bytes;
+ btrfs_calculate_inode_block_rsv_size(fs_info, inode);
+ spin_unlock(&inode->lock);
+
+@@ -395,7 +400,8 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
+
+ num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+ spin_lock(&inode->lock);
+- inode->csum_bytes -= num_bytes;
++ if (!(inode->flags & BTRFS_INODE_NODATASUM))
++ inode->csum_bytes -= num_bytes;
+ btrfs_calculate_inode_block_rsv_size(fs_info, inode);
+ spin_unlock(&inode->lock);
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 90aaedce1548a4..32c5f5a8a0e93d 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -425,8 +425,6 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+
+ delayed_root = delayed_node->root->fs_info->delayed_root;
+
+- BUG_ON(!delayed_root);
+-
+ if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
+ root = &delayed_node->ins_root;
+ else
+@@ -975,7 +973,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
+
+ if (delayed_node &&
+ test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
+- BUG_ON(!delayed_node->root);
++ ASSERT(delayed_node->root);
+ clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
+ delayed_node->count--;
+
+@@ -1030,7 +1028,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_inode_item);
+ write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
+ sizeof(struct btrfs_inode_item));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
+ goto out;
+@@ -1120,6 +1118,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ if (ret)
+ return ret;
+
++ ret = btrfs_record_root_in_trans(trans, node->root);
++ if (ret)
++ return ret;
+ ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+ return ret;
+ }
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index fff22ed55c428c..8400e212e3304b 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -442,7 +442,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
+ dev_replace->item_needs_writeback = 0;
+ up_write(&dev_replace->rwsem);
+
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+@@ -726,6 +726,23 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
+ return ret;
+ }
+
++static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
++{
++ if (args->start.srcdevid == 0) {
++ if (memchr(args->start.srcdev_name, 0,
++ sizeof(args->start.srcdev_name)) == NULL)
++ return -ENAMETOOLONG;
++ } else {
++ args->start.srcdev_name[0] = 0;
++ }
++
++ if (memchr(args->start.tgtdev_name, 0,
++ sizeof(args->start.tgtdev_name)) == NULL)
++ return -ENAMETOOLONG;
++
++ return 0;
++}
++
+ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_dev_replace_args *args)
+ {
+@@ -738,10 +755,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
+ default:
+ return -EINVAL;
+ }
+-
+- if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
+- args->start.tgtdev_name[0] == '\0')
+- return -EINVAL;
++ ret = btrfs_check_replace_dev_names(args);
++ if (ret < 0)
++ return ret;
+
+ ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
+ args->start.srcdevid,
+diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
+index 082eb0e1959819..9c07d5c3e5ad29 100644
+--- a/fs/btrfs/dir-item.c
++++ b/fs/btrfs/dir-item.c
+@@ -38,7 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ if (di)
+ return ERR_PTR(-EEXIST);
+- btrfs_extend_item(path, data_size);
++ btrfs_extend_item(trans, path, data_size);
+ } else if (ret < 0)
+ return ERR_PTR(ret);
+ WARN_ON(ret > 0);
+@@ -93,7 +93,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
+
+ write_extent_buffer(leaf, name, name_ptr, name_len);
+ write_extent_buffer(leaf, data, data_ptr, data_len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ return ret;
+ }
+@@ -153,7 +153,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
+ name_ptr = (unsigned long)(dir_item + 1);
+
+ write_extent_buffer(leaf, name->name, name_ptr, name->len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ second_insert:
+ /* FIXME, use some real flag for selecting the extra index */
+@@ -439,7 +439,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
+ start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ item_len - (ptr + sub_item_len - start));
+- btrfs_truncate_item(path, item_len - sub_item_len, 1);
++ btrfs_truncate_item(trans, path, item_len - sub_item_len, 1);
+ }
+ return ret;
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 68f60d50e1fd0c..8ec411eb9c9b0c 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -867,7 +867,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+ }
+
+ root->node = leaf;
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ root->commit_root = btrfs_root_node(root);
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+@@ -942,7 +942,7 @@ int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
+
+ root->node = leaf;
+
+- btrfs_mark_buffer_dirty(root->node);
++ btrfs_mark_buffer_dirty(trans, root->node);
+ btrfs_tree_unlock(root->node);
+
+ return 0;
+@@ -1282,12 +1282,12 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
+ *
+ * @objectid: root id
+ * @anon_dev: preallocated anonymous block device number for new roots,
+- * pass 0 for new allocation.
++ * pass NULL for a new allocation.
+ * @check_ref: whether to check root item references, If true, return -ENOENT
+ * for orphan roots
+ */
+ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+- u64 objectid, dev_t anon_dev,
++ u64 objectid, dev_t *anon_dev,
+ bool check_ref)
+ {
+ struct btrfs_root *root;
+@@ -1311,8 +1311,17 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ again:
+ root = btrfs_lookup_fs_root(fs_info, objectid);
+ if (root) {
+- /* Shouldn't get preallocated anon_dev for cached roots */
+- ASSERT(!anon_dev);
++ /*
++ * Some other caller may have read out the newly inserted
++ * subvolume already (for things like backref walk etc). Not
++ * that common but still possible. In that case, we just need
++ * to free the anon_dev.
++ */
++ if (unlikely(anon_dev && *anon_dev)) {
++ free_anon_bdev(*anon_dev);
++ *anon_dev = 0;
++ }
++
+ if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
+ btrfs_put_root(root);
+ return ERR_PTR(-ENOENT);
+@@ -1332,7 +1341,7 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ goto fail;
+ }
+
+- ret = btrfs_init_fs_root(root, anon_dev);
++ ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
+ if (ret)
+ goto fail;
+
+@@ -1368,7 +1377,7 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
+ * and once again by our caller.
+ */
+- if (anon_dev)
++ if (anon_dev && *anon_dev)
+ root->anon_dev = 0;
+ btrfs_put_root(root);
+ return ERR_PTR(ret);
+@@ -1384,7 +1393,7 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+ u64 objectid, bool check_ref)
+ {
+- return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
++ return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
+ }
+
+ /*
+@@ -1392,11 +1401,11 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+ * the anonymous block device id
+ *
+ * @objectid: tree objectid
+- * @anon_dev: if zero, allocate a new anonymous block device or use the
+- * parameter value
++ * @anon_dev: if NULL, allocate a new anonymous block device or use the
++ * parameter value if not NULL
+ */
+ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
+- u64 objectid, dev_t anon_dev)
++ u64 objectid, dev_t *anon_dev)
+ {
+ return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
+ }
+@@ -2791,6 +2800,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
+ int ret;
+
+ fs_info->sb = sb;
++ /* Temporary fixed values for block size until we read the superblock. */
+ sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
+ sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
+
+@@ -3197,6 +3207,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
+ goto fail_alloc;
+ }
+
++ btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
+ /*
+ * Verify the type first, if that or the checksum value are
+ * corrupted, we'll find out
+@@ -3329,6 +3340,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
+ sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
+ sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
+
++ /* Update the values for the current filesystem. */
+ sb->s_blocksize = sectorsize;
+ sb->s_blocksize_bits = blksize_bits(sectorsize);
+ memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
+@@ -4301,6 +4313,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ /* clear out the rbtree of defraggable inodes */
+ btrfs_cleanup_defrag_inodes(fs_info);
+
++ /*
++ * Wait for any fixup workers to complete.
++ * If we don't wait for them here and they are still running by the time
++ * we call kthread_stop() against the cleaner kthread further below, we
++ * get an use-after-free on the cleaner because the fixup worker adds an
++ * inode to the list of delayed iputs and then attempts to wakeup the
++ * cleaner kthread, which was already stopped and destroyed. We parked
++ * already the cleaner, but below we run all pending delayed iputs.
++ */
++ btrfs_flush_workqueue(fs_info->fixup_workers);
++
+ /*
+ * After we parked the cleaner kthread, ordered extents may have
+ * completed and created new delayed iputs. If one of the async reclaim
+@@ -4423,7 +4446,8 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ btrfs_close_devices(fs_info->fs_devices);
+ }
+
+-void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
++void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
++ struct extent_buffer *buf)
+ {
+ struct btrfs_fs_info *fs_info = buf->fs_info;
+ u64 transid = btrfs_header_generation(buf);
+@@ -4437,10 +4461,14 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
+ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
+ return;
+ #endif
++ /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
++ ASSERT(trans->transid == fs_info->generation);
+ btrfs_assert_tree_write_locked(buf);
+- if (transid != fs_info->generation)
++ if (transid != fs_info->generation) {
+ WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
+ buf->start, transid, fs_info->generation);
++ btrfs_abort_transaction(trans, -EUCLEAN);
++ }
+ set_extent_buffer_dirty(buf);
+ #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+ /*
+@@ -4579,18 +4607,10 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ struct btrfs_fs_info *fs_info)
+ {
+ struct rb_node *node;
+- struct btrfs_delayed_ref_root *delayed_refs;
++ struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
+ struct btrfs_delayed_ref_node *ref;
+
+- delayed_refs = &trans->delayed_refs;
+-
+ spin_lock(&delayed_refs->lock);
+- if (atomic_read(&delayed_refs->num_entries) == 0) {
+- spin_unlock(&delayed_refs->lock);
+- btrfs_debug(fs_info, "delayed_refs has NO entry");
+- return;
+- }
+-
+ while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
+ struct btrfs_delayed_ref_head *head;
+ struct rb_node *n;
+@@ -4830,6 +4850,32 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
+ }
+ }
+
++static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
++{
++ struct btrfs_root *gang[8];
++ int i;
++ int ret;
++
++ spin_lock(&fs_info->fs_roots_radix_lock);
++ while (1) {
++ ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
++ (void **)gang, 0,
++ ARRAY_SIZE(gang),
++ BTRFS_ROOT_TRANS_TAG);
++ if (ret == 0)
++ break;
++ for (i = 0; i < ret; i++) {
++ struct btrfs_root *root = gang[i];
++
++ btrfs_qgroup_free_meta_all_pertrans(root);
++ radix_tree_tag_clear(&fs_info->fs_roots_radix,
++ (unsigned long)root->root_key.objectid,
++ BTRFS_ROOT_TRANS_TAG);
++ }
++ }
++ spin_unlock(&fs_info->fs_roots_radix_lock);
++}
++
+ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ struct btrfs_fs_info *fs_info)
+ {
+@@ -4858,6 +4904,8 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ EXTENT_DIRTY);
+ btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
+
++ btrfs_free_all_qgroup_pertrans(fs_info);
++
+ cur_trans->state =TRANS_STATE_COMPLETED;
+ wake_up(&cur_trans->commit_wait);
+ }
+diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
+index 02b645744a8220..fca52385830cff 100644
+--- a/fs/btrfs/disk-io.h
++++ b/fs/btrfs/disk-io.h
+@@ -64,7 +64,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
+ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+ u64 objectid, bool check_ref);
+ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
+- u64 objectid, dev_t anon_dev);
++ u64 objectid, dev_t *anon_dev);
+ struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
+ u64 objectid);
+@@ -104,7 +104,8 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
+ }
+
+ void btrfs_put_root(struct btrfs_root *root);
+-void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
++void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
++ struct extent_buffer *buf);
+ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
+ int atomic);
+ int btrfs_read_extent_buffer(struct extent_buffer *buf,
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index 744a02b7fd6717..203e5964c9b0fc 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -174,8 +174,15 @@ struct dentry *btrfs_get_parent(struct dentry *child)
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto fail;
++ if (ret == 0) {
++ /*
++ * Key with offset of -1 found, there would have to exist an
++ * inode with such number or a root with such id.
++ */
++ ret = -EUCLEAN;
++ goto fail;
++ }
+
+- BUG_ON(ret == 0); /* Key with offset of -1 found */
+ if (path->slots[0] == 0) {
+ ret = -ENOENT;
+ goto fail;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index fc313fce5bbdc7..b3680e1c7054c4 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -575,7 +575,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
+ }
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ ret = 0;
+ fail:
+ btrfs_release_path(path);
+@@ -623,7 +623,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
+ else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
+ btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ return ret;
+ }
+@@ -976,7 +976,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
+ * helper to add new inline back ref
+ */
+ static noinline_for_stack
+-void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
++void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_extent_inline_ref *iref,
+ u64 parent, u64 root_objectid,
+@@ -999,7 +999,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+ type = extent_ref_type(parent, owner);
+ size = btrfs_extent_inline_ref_size(type);
+
+- btrfs_extend_item(path, size);
++ btrfs_extend_item(trans, path, size);
+
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ refs = btrfs_extent_refs(leaf, ei);
+@@ -1033,7 +1033,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+ } else {
+ btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+
+ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+@@ -1066,7 +1066,9 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+ /*
+ * helper to update/remove inline back ref
+ */
+-static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
++static noinline_for_stack int update_inline_extent_backref(
++ struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ struct btrfs_extent_inline_ref *iref,
+ int refs_to_mod,
+ struct btrfs_delayed_extent_op *extent_op)
+@@ -1174,9 +1176,9 @@ static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *pa
+ memmove_extent_buffer(leaf, ptr, ptr + size,
+ end - ptr - size);
+ item_size -= size;
+- btrfs_truncate_item(path, item_size, 1);
++ btrfs_truncate_item(trans, path, item_size, 1);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ return 0;
+ }
+
+@@ -1206,9 +1208,10 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
+ bytenr, num_bytes, root_objectid, path->slots[0]);
+ return -EUCLEAN;
+ }
+- ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
++ ret = update_inline_extent_backref(trans, path, iref,
++ refs_to_add, extent_op);
+ } else if (ret == -ENOENT) {
+- setup_inline_extent_backref(trans->fs_info, path, iref, parent,
++ setup_inline_extent_backref(trans, path, iref, parent,
+ root_objectid, owner, offset,
+ refs_to_add, extent_op);
+ ret = 0;
+@@ -1226,7 +1229,8 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
+
+ BUG_ON(!is_data && refs_to_drop != 1);
+ if (iref)
+- ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
++ ret = update_inline_extent_backref(trans, path, iref,
++ -refs_to_drop, NULL);
+ else if (is_data)
+ ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
+ else
+@@ -1241,7 +1245,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
+ u64 bytes_left, end;
+ u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
+
+- if (WARN_ON(start != aligned_start)) {
++ /* Adjust the range to be aligned to 512B sectors if necessary. */
++ if (start != aligned_start) {
+ len -= aligned_start - start;
+ len = round_down(len, 1 << SECTOR_SHIFT);
+ start = aligned_start;
+@@ -1298,13 +1303,24 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
+ bytes_left = end - start;
+ }
+
+- if (bytes_left) {
++ while (bytes_left) {
++ u64 bytes_to_discard = min(BTRFS_MAX_DISCARD_CHUNK_SIZE, bytes_left);
++
+ ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
+- bytes_left >> SECTOR_SHIFT,
++ bytes_to_discard >> SECTOR_SHIFT,
+ GFP_NOFS);
+- if (!ret)
+- *discarded_bytes += bytes_left;
++
++ if (ret) {
++ if (ret != -EOPNOTSUPP)
++ break;
++ continue;
++ }
++
++ start += bytes_to_discard;
++ bytes_left -= bytes_to_discard;
++ *discarded_bytes += bytes_to_discard;
+ }
++
+ return ret;
+ }
+
+@@ -1510,7 +1526,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ if (extent_op)
+ __run_delayed_extent_op(extent_op, leaf, item);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /* now insert the actual backref */
+@@ -1678,7 +1694,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ __run_delayed_extent_op(extent_op, leaf, ei);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return err;
+@@ -2744,7 +2760,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
+ readonly = true;
+ } else if (btrfs_is_zoned(fs_info)) {
+ /* Need reset before reusing in a zoned block group */
+- space_info->bytes_zone_unusable += len;
++ btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
++ len);
+ readonly = true;
+ }
+ spin_unlock(&cache->lock);
+@@ -3151,7 +3168,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ }
+ } else {
+ btrfs_set_extent_refs(leaf, ei, refs);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ if (found_extent) {
+ ret = remove_extent_backref(trans, extent_root, path,
+@@ -4134,6 +4151,42 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
+ return 0;
+ }
+
++static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
++ struct find_free_extent_ctl *ffe_ctl)
++{
++ if (ffe_ctl->for_treelog) {
++ spin_lock(&fs_info->treelog_bg_lock);
++ if (fs_info->treelog_bg)
++ ffe_ctl->hint_byte = fs_info->treelog_bg;
++ spin_unlock(&fs_info->treelog_bg_lock);
++ } else if (ffe_ctl->for_data_reloc) {
++ spin_lock(&fs_info->relocation_bg_lock);
++ if (fs_info->data_reloc_bg)
++ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
++ spin_unlock(&fs_info->relocation_bg_lock);
++ } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
++ struct btrfs_block_group *block_group;
++
++ spin_lock(&fs_info->zone_active_bgs_lock);
++ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
++ /*
++ * No lock is OK here because avail is monotinically
++ * decreasing, and this is just a hint.
++ */
++ u64 avail = block_group->zone_capacity - block_group->alloc_offset;
++
++ if (block_group_bits(block_group, ffe_ctl->flags) &&
++ avail >= ffe_ctl->num_bytes) {
++ ffe_ctl->hint_byte = block_group->start;
++ break;
++ }
++ }
++ spin_unlock(&fs_info->zone_active_bgs_lock);
++ }
++
++ return 0;
++}
++
+ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_space_info *space_info,
+@@ -4144,19 +4197,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+ return prepare_allocation_clustered(fs_info, ffe_ctl,
+ space_info, ins);
+ case BTRFS_EXTENT_ALLOC_ZONED:
+- if (ffe_ctl->for_treelog) {
+- spin_lock(&fs_info->treelog_bg_lock);
+- if (fs_info->treelog_bg)
+- ffe_ctl->hint_byte = fs_info->treelog_bg;
+- spin_unlock(&fs_info->treelog_bg_lock);
+- }
+- if (ffe_ctl->for_data_reloc) {
+- spin_lock(&fs_info->relocation_bg_lock);
+- if (fs_info->data_reloc_bg)
+- ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+- spin_unlock(&fs_info->relocation_bg_lock);
+- }
+- return 0;
++ return prepare_allocation_zoned(fs_info, ffe_ctl);
+ default:
+ BUG();
+ }
+@@ -4659,7 +4700,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
+ }
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_free_path(path);
+
+ return alloc_reserved_extent(trans, ins->objectid, ins->offset);
+@@ -4734,7 +4775,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
+ }
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_free_path(path);
+
+ return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
+@@ -5055,7 +5096,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
+ /* We don't care about errors in readahead. */
+ if (ret < 0)
+ continue;
+- BUG_ON(refs == 0);
++
++ /*
++ * This could be racey, it's conceivable that we raced and end
++ * up with a bogus refs count, if that's the case just skip, if
++ * we are actually corrupt we will notice when we look up
++ * everything again with our locks.
++ */
++ if (refs == 0)
++ continue;
+
+ if (wc->stage == DROP_REFERENCE) {
+ if (refs == 1)
+@@ -5114,7 +5163,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ if (lookup_info &&
+ ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
+ (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
+- BUG_ON(!path->locks[level]);
++ ASSERT(path->locks[level]);
+ ret = btrfs_lookup_extent_info(trans, fs_info,
+ eb->start, level, 1,
+ &wc->refs[level],
+@@ -5122,7 +5171,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ BUG_ON(ret == -ENOMEM);
+ if (ret)
+ return ret;
+- BUG_ON(wc->refs[level] == 0);
++ if (unlikely(wc->refs[level] == 0)) {
++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++ eb->start);
++ return -EUCLEAN;
++ }
+ }
+
+ if (wc->stage == DROP_REFERENCE) {
+@@ -5138,7 +5191,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+
+ /* wc->stage == UPDATE_BACKREF */
+ if (!(wc->flags[level] & flag)) {
+- BUG_ON(!path->locks[level]);
++ ASSERT(path->locks[level]);
+ ret = btrfs_inc_ref(trans, root, eb, 1);
+ BUG_ON(ret); /* -ENOMEM */
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+@@ -5256,8 +5309,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
+ goto out_unlock;
+
+ if (unlikely(wc->refs[level - 1] == 0)) {
+- btrfs_err(fs_info, "Missing references.");
+- ret = -EIO;
++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++ bytenr);
++ ret = -EUCLEAN;
+ goto out_unlock;
+ }
+ *lookup_info = 0;
+@@ -5457,7 +5511,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ path->locks[level] = 0;
+ return ret;
+ }
+- BUG_ON(wc->refs[level] == 0);
++ if (unlikely(wc->refs[level] == 0)) {
++ btrfs_tree_unlock_rw(eb, path->locks[level]);
++ btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++ eb->start);
++ return -EUCLEAN;
++ }
+ if (wc->refs[level] == 1) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ path->locks[level] = 0;
+@@ -6127,13 +6186,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
+ continue;
+
+ ret = btrfs_trim_free_extents(device, &group_trimmed);
++
++ trimmed += group_trimmed;
+ if (ret) {
+ dev_failed++;
+ dev_ret = ret;
+ break;
+ }
+-
+- trimmed += group_trimmed;
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index caccd0376342b7..b2ae50dcca0fe0 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -675,8 +675,8 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
+ * the array will be skipped
+ *
+ * Return: 0 if all pages were able to be allocated;
+- * -ENOMEM otherwise, and the caller is responsible for freeing all
+- * non-null page pointers in the array.
++ * -ENOMEM otherwise, the partially allocated pages would be freed and
++ * the array slots zeroed
+ */
+ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
+ {
+@@ -686,19 +686,14 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
+ unsigned int last = allocated;
+
+ allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);
+-
+- if (allocated == nr_pages)
+- return 0;
+-
+- /*
+- * During this iteration, no page could be allocated, even
+- * though alloc_pages_bulk_array() falls back to alloc_page()
+- * if it could not bulk-allocate. So we must be out of memory.
+- */
+- if (allocated == last)
++ if (unlikely(allocated == last)) {
++ /* No progress, fail and do cleanup. */
++ for (int i = 0; i < allocated; i++) {
++ __free_page(page_array[i]);
++ page_array[i] = NULL;
++ }
+ return -ENOMEM;
+-
+- memalloc_retry_wait(GFP_NOFS);
++ }
+ }
+ return 0;
+ }
+@@ -979,7 +974,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
+ int ret = 0;
+ size_t pg_offset = 0;
+ size_t iosize;
+- size_t blocksize = inode->i_sb->s_blocksize;
++ size_t blocksize = fs_info->sectorsize;
+ struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
+
+ ret = set_page_extent_mapped(page);
+@@ -2177,10 +2172,8 @@ void extent_write_locked_range(struct inode *inode, struct page *locked_page,
+
+ page = find_get_page(mapping, cur >> PAGE_SHIFT);
+ ASSERT(PageLocked(page));
+- if (pages_dirty && page != locked_page) {
++ if (pages_dirty && page != locked_page)
+ ASSERT(PageDirty(page));
+- clear_page_dirty_for_io(page);
+- }
+
+ ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
+ i_size, &nr);
+@@ -2261,7 +2254,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
+ struct extent_state *cached_state = NULL;
+ u64 start = folio_pos(folio);
+ u64 end = start + folio_size(folio) - 1;
+- size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
++ size_t blocksize = btrfs_sb(folio->mapping->host->i_sb)->sectorsize;
+
+ /* This function is only called for the btree inode */
+ ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
+@@ -2298,7 +2291,8 @@ static int try_release_extent_state(struct extent_io_tree *tree,
+ ret = 0;
+ } else {
+ u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
+- EXTENT_DELALLOC_NEW | EXTENT_CTLBITS);
++ EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
++ EXTENT_QGROUP_RESERVED);
+
+ /*
+ * At this point we can safely clear everything except the
+@@ -2404,12 +2398,65 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
+ return try_release_extent_state(tree, page, mask);
+ }
+
++struct btrfs_fiemap_entry {
++ u64 offset;
++ u64 phys;
++ u64 len;
++ u32 flags;
++};
++
++/*
++ * Indicate the caller of emit_fiemap_extent() that it needs to unlock the file
++ * range from the inode's io tree, unlock the subvolume tree search path, flush
++ * the fiemap cache and relock the file range and research the subvolume tree.
++ * The value here is something negative that can't be confused with a valid
++ * errno value and different from 1 because that's also a return value from
++ * fiemap_fill_next_extent() and also it's often used to mean some btree search
++ * did not find a key, so make it some distinct negative value.
++ */
++#define BTRFS_FIEMAP_FLUSH_CACHE (-(MAX_ERRNO + 1))
++
+ /*
+- * To cache previous fiemap extent
++ * Used to:
+ *
+- * Will be used for merging fiemap extent
++ * - Cache the next entry to be emitted to the fiemap buffer, so that we can
++ * merge extents that are contiguous and can be grouped as a single one;
++ *
++ * - Store extents ready to be written to the fiemap buffer in an intermediary
++ * buffer. This intermediary buffer is to ensure that in case the fiemap
++ * buffer is memory mapped to the fiemap target file, we don't deadlock
++ * during btrfs_page_mkwrite(). This is because during fiemap we are locking
++ * an extent range in order to prevent races with delalloc flushing and
++ * ordered extent completion, which is needed in order to reliably detect
++ * delalloc in holes and prealloc extents. And this can lead to a deadlock
++ * if the fiemap buffer is memory mapped to the file we are running fiemap
++ * against (a silly, useless in practice scenario, but possible) because
++ * btrfs_page_mkwrite() will try to lock the same extent range.
+ */
+ struct fiemap_cache {
++ /* An array of ready fiemap entries. */
++ struct btrfs_fiemap_entry *entries;
++ /* Number of entries in the entries array. */
++ int entries_size;
++ /* Index of the next entry in the entries array to write to. */
++ int entries_pos;
++ /*
++ * Once the entries array is full, this indicates what's the offset for
++ * the next file extent item we must search for in the inode's subvolume
++ * tree after unlocking the extent range in the inode's io tree and
++ * releasing the search path.
++ */
++ u64 next_search_offset;
++ /*
++ * This matches struct fiemap_extent_info::fi_mapped_extents, we use it
++ * to count ourselves emitted extents and stop instead of relying on
++ * fiemap_fill_next_extent() because we buffer ready fiemap entries at
++ * the @entries array, and we want to stop as soon as we hit the max
++ * amount of extents to map, not just to save time but also to make the
++ * logic at extent_fiemap() simpler.
++ */
++ unsigned int extents_mapped;
++ /* Fields for the cached extent (unsubmitted, not ready, extent). */
+ u64 offset;
+ u64 phys;
+ u64 len;
+@@ -2417,6 +2464,28 @@ struct fiemap_cache {
+ bool cached;
+ };
+
++static int flush_fiemap_cache(struct fiemap_extent_info *fieinfo,
++ struct fiemap_cache *cache)
++{
++ for (int i = 0; i < cache->entries_pos; i++) {
++ struct btrfs_fiemap_entry *entry = &cache->entries[i];
++ int ret;
++
++ ret = fiemap_fill_next_extent(fieinfo, entry->offset,
++ entry->phys, entry->len,
++ entry->flags);
++ /*
++ * Ignore 1 (reached max entries) because we keep track of that
++ * ourselves in emit_fiemap_extent().
++ */
++ if (ret < 0)
++ return ret;
++ }
++ cache->entries_pos = 0;
++
++ return 0;
++}
++
+ /*
+ * Helper to submit fiemap extent.
+ *
+@@ -2431,7 +2500,8 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+ struct fiemap_cache *cache,
+ u64 offset, u64 phys, u64 len, u32 flags)
+ {
+- int ret = 0;
++ struct btrfs_fiemap_entry *entry;
++ u64 cache_end;
+
+ /* Set at the end of extent_fiemap(). */
+ ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
+@@ -2440,15 +2510,104 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+ goto assign;
+
+ /*
+- * Sanity check, extent_fiemap() should have ensured that new
+- * fiemap extent won't overlap with cached one.
+- * Not recoverable.
++ * When iterating the extents of the inode, at extent_fiemap(), we may
++ * find an extent that starts at an offset behind the end offset of the
++ * previous extent we processed. This happens if fiemap is called
++ * without FIEMAP_FLAG_SYNC and there are ordered extents completing
++ * after we had to unlock the file range, release the search path, emit
++ * the fiemap extents stored in the buffer (cache->entries array) and
++ * the lock the remainder of the range and re-search the btree.
+ *
+- * NOTE: Physical address can overlap, due to compression
++ * For example we are in leaf X processing its last item, which is the
++ * file extent item for file range [512K, 1M[, and after
++ * btrfs_next_leaf() releases the path, there's an ordered extent that
++ * completes for the file range [768K, 2M[, and that results in trimming
++ * the file extent item so that it now corresponds to the file range
++ * [512K, 768K[ and a new file extent item is inserted for the file
++ * range [768K, 2M[, which may end up as the last item of leaf X or as
++ * the first item of the next leaf - in either case btrfs_next_leaf()
++ * will leave us with a path pointing to the new extent item, for the
++ * file range [768K, 2M[, since that's the first key that follows the
++ * last one we processed. So in order not to report overlapping extents
++ * to user space, we trim the length of the previously cached extent and
++ * emit it.
++ *
++ * Upon calling btrfs_next_leaf() we may also find an extent with an
++ * offset smaller than or equals to cache->offset, and this happens
++ * when we had a hole or prealloc extent with several delalloc ranges in
++ * it, but after btrfs_next_leaf() released the path, delalloc was
++ * flushed and the resulting ordered extents were completed, so we can
++ * now have found a file extent item for an offset that is smaller than
++ * or equals to what we have in cache->offset. We deal with this as
++ * described below.
+ */
+- if (cache->offset + cache->len > offset) {
+- WARN_ON(1);
+- return -EINVAL;
++ cache_end = cache->offset + cache->len;
++ if (cache_end > offset) {
++ if (offset == cache->offset) {
++ /*
++ * We cached a dealloc range (found in the io tree) for
++ * a hole or prealloc extent and we have now found a
++ * file extent item for the same offset. What we have
++ * now is more recent and up to date, so discard what
++ * we had in the cache and use what we have just found.
++ */
++ goto assign;
++ } else if (offset > cache->offset) {
++ /*
++ * The extent range we previously found ends after the
++ * offset of the file extent item we found and that
++ * offset falls somewhere in the middle of that previous
++ * extent range. So adjust the range we previously found
++ * to end at the offset of the file extent item we have
++ * just found, since this extent is more up to date.
++ * Emit that adjusted range and cache the file extent
++ * item we have just found. This corresponds to the case
++ * where a previously found file extent item was split
++ * due to an ordered extent completing.
++ */
++ cache->len = offset - cache->offset;
++ goto emit;
++ } else {
++ const u64 range_end = offset + len;
++
++ /*
++ * The offset of the file extent item we have just found
++ * is behind the cached offset. This means we were
++ * processing a hole or prealloc extent for which we
++ * have found delalloc ranges (in the io tree), so what
++ * we have in the cache is the last delalloc range we
++ * found while the file extent item we found can be
++ * either for a whole delalloc range we previously
++ * emmitted or only a part of that range.
++ *
++ * We have two cases here:
++ *
++ * 1) The file extent item's range ends at or behind the
++ * cached extent's end. In this case just ignore the
++ * current file extent item because we don't want to
++ * overlap with previous ranges that may have been
++ * emmitted already;
++ *
++ * 2) The file extent item starts behind the currently
++ * cached extent but its end offset goes beyond the
++ * end offset of the cached extent. We don't want to
++ * overlap with a previous range that may have been
++ * emmitted already, so we emit the currently cached
++ * extent and then partially store the current file
++ * extent item's range in the cache, for the subrange
++ * going the cached extent's end to the end of the
++ * file extent item.
++ */
++ if (range_end <= cache_end)
++ return 0;
++
++ if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
++ phys += cache_end - offset;
++
++ offset = cache_end;
++ len = range_end - cache_end;
++ goto emit;
++ }
+ }
+
+ /*
+@@ -2468,12 +2627,37 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+ return 0;
+ }
+
++emit:
+ /* Not mergeable, need to submit cached one */
+- ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+- cache->len, cache->flags);
+- cache->cached = false;
+- if (ret)
+- return ret;
++
++ if (cache->entries_pos == cache->entries_size) {
++ /*
++ * We will need to research for the end offset of the last
++ * stored extent and not from the current offset, because after
++ * unlocking the range and releasing the path, if there's a hole
++ * between that end offset and this current offset, a new extent
++ * may have been inserted due to a new write, so we don't want
++ * to miss it.
++ */
++ entry = &cache->entries[cache->entries_size - 1];
++ cache->next_search_offset = entry->offset + entry->len;
++ cache->cached = false;
++
++ return BTRFS_FIEMAP_FLUSH_CACHE;
++ }
++
++ entry = &cache->entries[cache->entries_pos];
++ entry->offset = cache->offset;
++ entry->phys = cache->phys;
++ entry->len = cache->len;
++ entry->flags = cache->flags;
++ cache->entries_pos++;
++ cache->extents_mapped++;
++
++ if (cache->extents_mapped == fieinfo->fi_extents_max) {
++ cache->cached = false;
++ return 1;
++ }
+ assign:
+ cache->cached = true;
+ cache->offset = offset;
+@@ -2599,8 +2783,8 @@ static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path
+ * neighbour leaf).
+ * We also need the private clone because holding a read lock on an
+ * extent buffer of the subvolume's b+tree will make lockdep unhappy
+- * when we call fiemap_fill_next_extent(), because that may cause a page
+- * fault when filling the user space buffer with fiemap data.
++ * when we check if extents are shared, as backref walking may need to
++ * lock the same leaf we are processing.
+ */
+ clone = btrfs_clone_extent_buffer(path->nodes[0]);
+ if (!clone)
+@@ -2824,24 +3008,29 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ struct btrfs_backref_share_check_ctx *backref_ctx;
+ u64 last_extent_end;
+ u64 prev_extent_end;
+- u64 lockstart;
+- u64 lockend;
++ u64 range_start;
++ u64 range_end;
++ const u64 sectorsize = inode->root->fs_info->sectorsize;
+ bool stopped = false;
+ int ret;
+
++ cache.entries_size = PAGE_SIZE / sizeof(struct btrfs_fiemap_entry);
++ cache.entries = kmalloc_array(cache.entries_size,
++ sizeof(struct btrfs_fiemap_entry),
++ GFP_KERNEL);
+ backref_ctx = btrfs_alloc_backref_share_check_ctx();
+ path = btrfs_alloc_path();
+- if (!backref_ctx || !path) {
++ if (!cache.entries || !backref_ctx || !path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+- lockstart = round_down(start, inode->root->fs_info->sectorsize);
+- lockend = round_up(start + len, inode->root->fs_info->sectorsize);
+- prev_extent_end = lockstart;
++restart:
++ range_start = round_down(start, sectorsize);
++ range_end = round_up(start + len, sectorsize);
++ prev_extent_end = range_start;
+
+- btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
+- lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
++ lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+
+ ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
+ if (ret < 0)
+@@ -2849,7 +3038,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ btrfs_release_path(path);
+
+ path->reada = READA_FORWARD;
+- ret = fiemap_search_slot(inode, path, lockstart);
++ ret = fiemap_search_slot(inode, path, range_start);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+@@ -2861,7 +3050,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ goto check_eof_delalloc;
+ }
+
+- while (prev_extent_end < lockend) {
++ while (prev_extent_end < range_end) {
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_file_extent_item *ei;
+ struct btrfs_key key;
+@@ -2884,19 +3073,19 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ * The first iteration can leave us at an extent item that ends
+ * before our range's start. Move to the next item.
+ */
+- if (extent_end <= lockstart)
++ if (extent_end <= range_start)
+ goto next_item;
+
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /* We have in implicit hole (NO_HOLES feature enabled). */
+ if (prev_extent_end < key.offset) {
+- const u64 range_end = min(key.offset, lockend) - 1;
++ const u64 hole_end = min(key.offset, range_end) - 1;
+
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
+ backref_ctx, 0, 0, 0,
+- prev_extent_end, range_end);
++ prev_extent_end, hole_end);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+@@ -2906,7 +3095,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ }
+
+ /* We've reached the end of the fiemap range, stop. */
+- if (key.offset >= lockend) {
++ if (key.offset >= range_end) {
+ stopped = true;
+ break;
+ }
+@@ -2967,7 +3156,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+- /* fiemap_fill_next_extent() told us to stop. */
++ /* emit_fiemap_extent() told us to stop. */
+ stopped = true;
+ break;
+ }
+@@ -2990,23 +3179,13 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ }
+
+ check_eof_delalloc:
+- /*
+- * Release (and free) the path before emitting any final entries to
+- * fiemap_fill_next_extent() to keep lockdep happy. This is because
+- * once we find no more file extent items exist, we may have a
+- * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
+- * faults when copying data to the user space buffer.
+- */
+- btrfs_free_path(path);
+- path = NULL;
+-
+- if (!stopped && prev_extent_end < lockend) {
++ if (!stopped && prev_extent_end < range_end) {
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state, backref_ctx,
+- 0, 0, 0, prev_extent_end, lockend - 1);
++ 0, 0, 0, prev_extent_end, range_end - 1);
+ if (ret < 0)
+ goto out_unlock;
+- prev_extent_end = lockend;
++ prev_extent_end = range_end;
+ }
+
+ if (cache.cached && cache.offset + cache.len >= last_extent_end) {
+@@ -3030,13 +3209,39 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ }
+ }
+
+- ret = emit_last_fiemap_cache(fieinfo, &cache);
+-
+ out_unlock:
+- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+- btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
++ unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
++
++ if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
++ btrfs_release_path(path);
++ ret = flush_fiemap_cache(fieinfo, &cache);
++ if (ret)
++ goto out;
++ len -= cache.next_search_offset - start;
++ start = cache.next_search_offset;
++ goto restart;
++ } else if (ret < 0) {
++ goto out;
++ }
++
++ /*
++ * Must free the path before emitting to the fiemap buffer because we
++ * may have a non-cloned leaf and if the fiemap buffer is memory mapped
++ * to a file, a write into it (through btrfs_page_mkwrite()) may trigger
++ * waiting for an ordered extent that in order to complete needs to
++ * modify that leaf, therefore leading to a deadlock.
++ */
++ btrfs_free_path(path);
++ path = NULL;
++
++ ret = flush_fiemap_cache(fieinfo, &cache);
++ if (ret)
++ goto out;
++
++ ret = emit_last_fiemap_cache(fieinfo, &cache);
+ out:
+ free_extent_state(delalloc_cached_state);
++ kfree(cache.entries);
+ btrfs_free_backref_share_ctx(backref_ctx);
+ btrfs_free_path(path);
+ return ret;
+@@ -3924,6 +4129,19 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
+ if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
+ goto done;
+
++ /*
++ * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
++ * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
++ * started and finished reading the same eb. In this case, UPTODATE
++ * will now be set, and we shouldn't read it in again.
++ */
++ if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
++ clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
++ smp_mb__after_atomic();
++ wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
++ return 0;
++ }
++
+ clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
+ eb->read_mirror = 0;
+ check_buffer_tree_ref(eb);
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index a6d8368ed0edd5..8c017c4105f2a2 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -843,7 +843,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ split->block_len = em->block_len;
+ split->orig_start = em->orig_start;
+ } else {
+- const u64 diff = start + len - em->start;
++ const u64 diff = end - em->start;
+
+ split->block_len = split->len;
+ split->block_start += diff;
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 1ce5dd1544995f..45cae356e89ba0 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -194,7 +194,7 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_encryption(leaf, item, 0);
+ btrfs_set_file_extent_other_encoding(leaf, item, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -811,11 +811,12 @@ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
+ * This calls btrfs_truncate_item with the correct args based on the overlap,
+ * and fixes up the key as required.
+ */
+-static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
++static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ u64 bytenr, u64 len)
+ {
++ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct extent_buffer *leaf;
+ const u32 csum_size = fs_info->csum_size;
+ u64 csum_end;
+@@ -836,7 +837,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ */
+ u32 new_size = (bytenr - key->offset) >> blocksize_bits;
+ new_size *= csum_size;
+- btrfs_truncate_item(path, new_size, 1);
++ btrfs_truncate_item(trans, path, new_size, 1);
+ } else if (key->offset >= bytenr && csum_end > end_byte &&
+ end_byte > key->offset) {
+ /*
+@@ -848,10 +849,10 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ u32 new_size = (csum_end - end_byte) >> blocksize_bits;
+ new_size *= csum_size;
+
+- btrfs_truncate_item(path, new_size, 0);
++ btrfs_truncate_item(trans, path, new_size, 0);
+
+ key->offset = end_byte;
+- btrfs_set_item_key_safe(fs_info, path, key);
++ btrfs_set_item_key_safe(trans, path, key);
+ } else {
+ BUG();
+ }
+@@ -994,7 +995,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+
+ key.offset = end_byte - 1;
+ } else {
+- truncate_one_csum(fs_info, path, &key, bytenr, len);
++ truncate_one_csum(trans, path, &key, bytenr, len);
+ if (key.offset < bytenr)
+ break;
+ }
+@@ -1202,7 +1203,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
+ diff /= csum_size;
+ diff *= csum_size;
+
+- btrfs_extend_item(path, diff);
++ btrfs_extend_item(trans, path, diff);
+ ret = 0;
+ goto csum;
+ }
+@@ -1249,7 +1250,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
+ ins_size /= csum_size;
+ total_bytes += ins_size * fs_info->sectorsize;
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ if (total_bytes < sums->len) {
+ btrfs_release_path(path);
+ cond_resched();
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 361535c71c0f5a..fc6c91773bc894 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -368,7 +368,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - args->start);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (update_refs && disk_bytenr > 0) {
+ btrfs_init_generic_ref(&ref,
+@@ -405,13 +405,13 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = args->end;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+
+ extent_offset += args->end - key.offset;
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - args->end);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (update_refs && disk_bytenr > 0)
+ args->bytes_found += args->end - key.offset;
+ break;
+@@ -431,7 +431,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ args->start - key.offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (update_refs && disk_bytenr > 0)
+ args->bytes_found += extent_end - args->start;
+ if (args->end == extent_end)
+@@ -536,7 +536,8 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
+ path->slots[0]++;
+ }
+- btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
++ btrfs_setup_item_for_insert(trans, root, path, &key,
++ args->extent_item_size);
+ args->extent_inserted = true;
+ }
+
+@@ -593,7 +594,6 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
+ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode, u64 start, u64 end)
+ {
+- struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root = inode->root;
+ struct extent_buffer *leaf;
+ struct btrfs_path *path;
+@@ -664,7 +664,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ new_key.offset = end;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi,
+@@ -679,7 +679,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ trans->transid);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ end - other_start);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ }
+@@ -698,7 +698,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ trans->transid);
+ path->slots[0]++;
+ new_key.offset = start;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+@@ -708,7 +708,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ other_end - start);
+ btrfs_set_file_extent_offset(leaf, fi,
+ start - orig_offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ }
+@@ -742,7 +742,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - split);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
+ num_bytes, 0);
+@@ -814,7 +814,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ fi = btrfs_item_ptr(leaf, del_slot - 1,
+ struct btrfs_file_extent_item);
+@@ -823,7 +823,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - key.offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ if (ret < 0) {
+@@ -1535,21 +1535,27 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ * So here we disable page faults in the iov_iter and then retry if we
+ * got -EFAULT, faulting in the pages before the retry.
+ */
++again:
+ from->nofault = true;
+ dio = btrfs_dio_write(iocb, from, written);
+ from->nofault = false;
+
+- /*
+- * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
+- * iocb, and that needs to lock the inode. So unlock it before calling
+- * iomap_dio_complete() to avoid a deadlock.
+- */
+- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+-
+- if (IS_ERR_OR_NULL(dio))
++ if (IS_ERR_OR_NULL(dio)) {
+ err = PTR_ERR_OR_ZERO(dio);
+- else
++ } else {
++ /*
++ * If we have a synchoronous write, we must make sure the fsync
++ * triggered by the iomap_dio_complete() call below doesn't
++ * deadlock on the inode lock - we are already holding it and we
++ * can't call it after unlocking because we may need to complete
++ * partial writes due to the input buffer (or parts of it) not
++ * being already faulted in.
++ */
++ ASSERT(current->journal_info == NULL);
++ current->journal_info = BTRFS_TRANS_DIO_WRITE_STUB;
+ err = iomap_dio_complete(dio);
++ current->journal_info = NULL;
++ }
+
+ /* No increment (+=) because iomap returns a cumulative value. */
+ if (err > 0)
+@@ -1576,10 +1582,12 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+ } else {
+ fault_in_iov_iter_readable(from, left);
+ prev_left = left;
+- goto relock;
++ goto again;
+ }
+ }
+
++ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
++
+ /*
+ * If 'err' is -ENOTBLK or we have not written all data, then it means
+ * we must fallback to buffered IO.
+@@ -1787,6 +1795,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ int ret = 0, err;
+ u64 len;
+ bool full_sync;
++ bool skip_ilock = false;
++
++ if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
++ skip_ilock = true;
++ current->journal_info = NULL;
++ lockdep_assert_held(&inode->i_rwsem);
++ }
+
+ trace_btrfs_sync_file(file, datasync);
+
+@@ -1814,7 +1829,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ if (ret)
+ goto out;
+
+- btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
++ if (skip_ilock)
++ down_write(&BTRFS_I(inode)->i_mmap_lock);
++ else
++ btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+
+ atomic_inc(&root->log_batch);
+
+@@ -1838,7 +1856,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ */
+ ret = start_ordered_ops(inode, start, end);
+ if (ret) {
+- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
++ if (skip_ilock)
++ up_write(&BTRFS_I(inode)->i_mmap_lock);
++ else
++ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+ goto out;
+ }
+
+@@ -1941,7 +1962,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ * file again, but that will end up using the synchronization
+ * inside btrfs_sync_log to keep things safe.
+ */
+- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
++ if (skip_ilock)
++ up_write(&BTRFS_I(inode)->i_mmap_lock);
++ else
++ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+
+ if (ret == BTRFS_NO_LOG_SYNC) {
+ ret = btrfs_end_transaction(trans);
+@@ -2009,7 +2033,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+
+ out_release_extents:
+ btrfs_release_log_ctx_extents(&ctx);
+- btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
++ if (skip_ilock)
++ up_write(&BTRFS_I(inode)->i_mmap_lock);
++ else
++ btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
+ goto out;
+ }
+
+@@ -2104,7 +2131,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+
+@@ -2112,7 +2139,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ u64 num_bytes;
+
+ key.offset = offset;
+- btrfs_set_item_key_safe(fs_info, path, &key);
++ btrfs_set_item_key_safe(trans, path, &key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
+@@ -2121,7 +2148,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ btrfs_release_path(path);
+@@ -2273,7 +2300,7 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
+ if (extent_info->is_new_extent)
+ btrfs_set_file_extent_generation(leaf, extent, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
+@@ -3187,7 +3214,7 @@ static long btrfs_fallocate(struct file *file, int mode,
+ qgroup_reserved -= range->len;
+ } else if (qgroup_reserved > 0) {
+ btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
+- range->start, range->len);
++ range->start, range->len, NULL);
+ qgroup_reserved -= range->len;
+ }
+ list_del(&range->list);
+@@ -3454,7 +3481,7 @@ static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
+ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
+ {
+ struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
+- struct btrfs_file_private *private = file->private_data;
++ struct btrfs_file_private *private;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_state *cached_state = NULL;
+ struct extent_state **delalloc_cached_state;
+@@ -3482,7 +3509,19 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
+ inode_get_bytes(&inode->vfs_inode) == i_size)
+ return i_size;
+
+- if (!private) {
++ spin_lock(&inode->lock);
++ private = file->private_data;
++ spin_unlock(&inode->lock);
++
++ if (private && private->owner_task != current) {
++ /*
++ * Not allocated by us, don't use it as its cached state is used
++ * by the task that allocated it and we don't want neither to
++ * mess with it nor get incorrect results because it reflects an
++ * invalid state for the current task.
++ */
++ private = NULL;
++ } else if (!private) {
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ /*
+ * No worries if memory allocation failed.
+@@ -3490,7 +3529,23 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
+ * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
+ * so everything will still be correct.
+ */
+- file->private_data = private;
++ if (private) {
++ bool free = false;
++
++ private->owner_task = current;
++
++ spin_lock(&inode->lock);
++ if (file->private_data)
++ free = true;
++ else
++ file->private_data = private;
++ spin_unlock(&inode->lock);
++
++ if (free) {
++ kfree(private);
++ private = NULL;
++ }
++ }
+ }
+
+ if (private)
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 27fad70451aad7..3bcf4a30cad77f 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -195,7 +195,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
+ btrfs_set_inode_nlink(leaf, inode_item, 1);
+ btrfs_set_inode_transid(leaf, inode_item, trans->transid);
+ btrfs_set_inode_block_group(leaf, inode_item, offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ key.objectid = BTRFS_FREE_SPACE_OBJECTID;
+@@ -213,7 +213,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_free_space_header);
+ memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
+ btrfs_set_free_space_key(leaf, header, &disk_key);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ return 0;
+@@ -855,6 +855,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ spin_unlock(&ctl->tree_lock);
+ btrfs_err(fs_info,
+ "Duplicate entries in free space cache, dumping");
++ kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap);
+ kmem_cache_free(btrfs_free_space_cachep, e);
+ goto free_cache;
+ }
+@@ -1185,7 +1186,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
+ btrfs_set_free_space_entries(leaf, header, entries);
+ btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
+ btrfs_set_free_space_generation(leaf, header, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ return 0;
+@@ -1909,9 +1910,9 @@ static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+ ctl->free_space -= bytes;
+ }
+
+-static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
+- struct btrfs_free_space *info, u64 offset,
+- u64 bytes)
++static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
++ struct btrfs_free_space *info, u64 offset,
++ u64 bytes)
+ {
+ unsigned long start, count, end;
+ int extent_delta = 1;
+@@ -2247,7 +2248,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
+
+ bytes_to_set = min(end - offset, bytes);
+
+- bitmap_set_bits(ctl, info, offset, bytes_to_set);
++ btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set);
+
+ return bytes_to_set;
+
+@@ -2695,15 +2696,16 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ u64 offset = bytenr - block_group->start;
+ u64 to_free, to_unusable;
+ int bg_reclaim_threshold = 0;
+- bool initial = (size == block_group->length);
++ bool initial;
+ u64 reclaimable_unusable;
+
+- WARN_ON(!initial && offset + size > block_group->zone_capacity);
++ spin_lock(&block_group->lock);
+
++ initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
++ WARN_ON(!initial && offset + size > block_group->zone_capacity);
+ if (!initial)
+ bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
+
+- spin_lock(&ctl->tree_lock);
+ if (!used)
+ to_free = size;
+ else if (initial)
+@@ -2716,18 +2718,19 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ to_free = offset + size - block_group->alloc_offset;
+ to_unusable = size - to_free;
+
++ spin_lock(&ctl->tree_lock);
+ ctl->free_space += to_free;
++ spin_unlock(&ctl->tree_lock);
+ /*
+ * If the block group is read-only, we should account freed space into
+ * bytes_readonly.
+ */
+- if (!block_group->ro)
++ if (!block_group->ro) {
+ block_group->zone_unusable += to_unusable;
+- spin_unlock(&ctl->tree_lock);
++ WARN_ON(block_group->zone_unusable > block_group->length);
++ }
+ if (!used) {
+- spin_lock(&block_group->lock);
+ block_group->alloc_offset -= size;
+- spin_unlock(&block_group->lock);
+ }
+
+ reclaimable_unusable = block_group->zone_unusable -
+@@ -2741,6 +2744,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ btrfs_mark_bg_to_reclaim(block_group);
+ }
+
++ spin_unlock(&block_group->lock);
++
+ return 0;
+ }
+
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index c0e734082dcc42..7b598b070700e7 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -89,7 +89,7 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
+ struct btrfs_free_space_info);
+ btrfs_set_free_space_extent_count(leaf, info, 0);
+ btrfs_set_free_space_flags(leaf, info, 0);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -287,7 +287,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
+ btrfs_set_free_space_flags(leaf, info, flags);
+ expected_extent_count = btrfs_free_space_extent_count(leaf, info);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ if (extent_count != expected_extent_count) {
+@@ -324,7 +324,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ write_extent_buffer(leaf, bitmap_cursor, ptr,
+ data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ i += extent_size;
+@@ -430,7 +430,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
+ flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
+ btrfs_set_free_space_flags(leaf, info, flags);
+ expected_extent_count = btrfs_free_space_extent_count(leaf, info);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
+@@ -495,7 +495,7 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
+
+ extent_count += new_extents;
+ btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+
+ if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
+@@ -533,7 +533,8 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
+ return !!extent_buffer_test_bit(leaf, ptr, i);
+ }
+
+-static void free_space_set_bits(struct btrfs_block_group *block_group,
++static void free_space_set_bits(struct btrfs_trans_handle *trans,
++ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 *start, u64 *size,
+ int bit)
+ {
+@@ -563,7 +564,7 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
+ extent_buffer_bitmap_set(leaf, ptr, first, last - first);
+ else
+ extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ *size -= end - *start;
+ *start = end;
+@@ -656,7 +657,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
+ cur_start = start;
+ cur_size = size;
+ while (1) {
+- free_space_set_bits(block_group, path, &cur_start, &cur_size,
++ free_space_set_bits(trans, block_group, path, &cur_start, &cur_size,
+ !remove);
+ if (cur_size == 0)
+ break;
+diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
+index 4c322b720a80a1..d3ff97374d48aa 100644
+--- a/fs/btrfs/inode-item.c
++++ b/fs/btrfs/inode-item.c
+@@ -167,7 +167,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+ memmove_extent_buffer(leaf, ptr, ptr + del_len,
+ item_size - (ptr + del_len - item_start));
+
+- btrfs_truncate_item(path, item_size - del_len, 1);
++ btrfs_truncate_item(trans, path, item_size - del_len, 1);
+
+ out:
+ btrfs_free_path(path);
+@@ -229,7 +229,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ item_size - (ptr + sub_item_len - item_start));
+- btrfs_truncate_item(path, item_size - sub_item_len, 1);
++ btrfs_truncate_item(trans, path, item_size - sub_item_len, 1);
+ out:
+ btrfs_free_path(path);
+
+@@ -282,7 +282,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ name))
+ goto out;
+
+- btrfs_extend_item(path, ins_len);
++ btrfs_extend_item(trans, path, ins_len);
+ ret = 0;
+ }
+ if (ret < 0)
+@@ -299,7 +299,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+
+ ptr = (unsigned long)&extref->name;
+ write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ out:
+ btrfs_free_path(path);
+@@ -338,7 +338,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ goto out;
+
+ old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
+- btrfs_extend_item(path, ins_len);
++ btrfs_extend_item(trans, path, ins_len);
+ ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_ref);
+ ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
+@@ -364,7 +364,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ ptr = (unsigned long)(ref + 1);
+ }
+ write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ out:
+ btrfs_free_path(path);
+@@ -591,7 +591,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ num_dec = (orig_num_bytes - extent_num_bytes);
+ if (extent_start != 0)
+ control->sub_bytes += num_dec;
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ extent_num_bytes =
+ btrfs_file_extent_disk_num_bytes(leaf, fi);
+@@ -617,7 +617,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+
+ btrfs_set_file_extent_ram_bytes(leaf, fi, size);
+ size = btrfs_file_extent_calc_inline_size(size);
+- btrfs_truncate_item(path, size, 1);
++ btrfs_truncate_item(trans, path, size, 1);
+ } else if (!del_item) {
+ /*
+ * We have to bail so the last_size is set to
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 7814b9d654ce12..ee04185d8e0f58 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -573,7 +573,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
+ kunmap_local(kaddr);
+ put_page(page);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /*
+@@ -687,7 +687,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
+ * And at reserve time, it's always aligned to page size, so
+ * just free one page here.
+ */
+- btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
++ btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
+ btrfs_free_path(path);
+ btrfs_end_transaction(trans);
+ return ret;
+@@ -730,7 +730,8 @@ static noinline int add_async_extent(struct async_chunk *cow,
+ struct async_extent *async_extent;
+
+ async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
+- BUG_ON(!async_extent); /* -ENOMEM */
++ if (!async_extent)
++ return -ENOMEM;
+ async_extent->start = start;
+ async_extent->ram_size = ram_size;
+ async_extent->compressed_size = compressed_size;
+@@ -1017,8 +1018,9 @@ static void compress_file_range(struct btrfs_work *work)
+ * The async work queues will take care of doing actual allocation on
+ * disk for these compressed pages, and will submit the bios.
+ */
+- add_async_extent(async_chunk, start, total_in, total_compressed, pages,
+- nr_pages, compress_type);
++ ret = add_async_extent(async_chunk, start, total_in, total_compressed, pages,
++ nr_pages, compress_type);
++ BUG_ON(ret);
+ if (start + total_in < end) {
+ start += total_in;
+ cond_resched();
+@@ -1030,8 +1032,9 @@ static void compress_file_range(struct btrfs_work *work)
+ if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
+ inode->flags |= BTRFS_INODE_NOCOMPRESS;
+ cleanup_and_bail_uncompressed:
+- add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
+- BTRFS_COMPRESS_NONE);
++ ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
++ BTRFS_COMPRESS_NONE);
++ BUG_ON(ret);
+ free_pages:
+ if (pages) {
+ for (i = 0; i < nr_pages; i++) {
+@@ -1134,13 +1137,13 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
+ 0, *alloc_hint, &ins, 1, 1);
+ if (ret) {
+ /*
+- * Here we used to try again by going back to non-compressed
+- * path for ENOSPC. But we can't reserve space even for
+- * compressed size, how could it work for uncompressed size
+- * which requires larger size? So here we directly go error
+- * path.
++ * We can't reserve contiguous space for the compressed size.
++ * Unlikely, but it's possible that we could have enough
++ * non-contiguous space for the uncompressed size instead. So
++ * fall back to uncompressed.
+ */
+- goto out_free;
++ submit_uncompressed_range(inode, async_extent, locked_page);
++ goto done;
+ }
+
+ /* Here we're doing allocation and writeback of the compressed pages */
+@@ -1192,7 +1195,6 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
+ out_free_reserve:
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
+-out_free:
+ mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
+ extent_clear_unlock_delalloc(inode, start, end,
+ NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
+@@ -2511,7 +2513,7 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
+ */
+ if (bits & EXTENT_CLEAR_META_RESV &&
+ root != fs_info->tree_root)
+- btrfs_delalloc_release_metadata(inode, len, false);
++ btrfs_delalloc_release_metadata(inode, len, true);
+
+ /* For sanity tests. */
+ if (btrfs_is_testing(fs_info))
+@@ -2912,7 +2914,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ sizeof(struct btrfs_file_extent_item));
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /*
+@@ -3168,8 +3170,23 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
+ unwritten_start += logical_len;
+ clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
+
+- /* Drop extent maps for the part of the extent we didn't write. */
+- btrfs_drop_extent_map_range(inode, unwritten_start, end, false);
++ /*
++ * Drop extent maps for the part of the extent we didn't write.
++ *
++ * We have an exception here for the free_space_inode, this is
++ * because when we do btrfs_get_extent() on the free space inode
++ * we will search the commit root. If this is a new block group
++ * we won't find anything, and we will trip over the assert in
++ * writepage where we do ASSERT(em->block_start !=
++ * EXTENT_MAP_HOLE).
++ *
++ * Theoretically we could also skip this for any NOCOW extent as
++ * we don't mess with the extent map tree in the NOCOW case, but
++ * for now simply skip this if we are the free space inode.
++ */
++ if (!btrfs_is_free_space_inode(inode))
++ btrfs_drop_extent_map_range(inode, unwritten_start,
++ end, false);
+
+ /*
+ * If the ordered extent had an IOERR or something else went
+@@ -3981,7 +3998,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
+ struct btrfs_inode_item);
+
+ fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_set_inode_last_trans(trans, inode);
+ ret = 0;
+ failed:
+@@ -4131,6 +4148,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+
+ btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
+ inode_inc_iversion(&inode->vfs_inode);
++ inode_set_ctime_current(&inode->vfs_inode);
+ inode_inc_iversion(&dir->vfs_inode);
+ inode_set_ctime_current(&inode->vfs_inode);
+ dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
+@@ -4357,7 +4375,14 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
+ ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+- BUG_ON(ret == 0);
++ if (ret == 0) {
++ /*
++ * Key with offset -1 found, there would have to exist a root
++ * with such id, but this is out of valid range.
++ */
++ ret = -EUCLEAN;
++ goto out;
++ }
+
+ ret = 0;
+ if (path->slots[0] > 0) {
+@@ -4445,8 +4470,11 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ struct btrfs_trans_handle *trans;
+ struct btrfs_block_rsv block_rsv;
+ u64 root_flags;
++ u64 qgroup_reserved = 0;
+ int ret;
+
++ down_write(&fs_info->subvol_sem);
++
+ /*
+ * Don't allow to delete a subvolume with send in progress. This is
+ * inside the inode lock so the error handling that has to drop the bit
+@@ -4458,25 +4486,25 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ btrfs_warn(fs_info,
+ "attempt to delete subvolume %llu during send",
+ dest->root_key.objectid);
+- return -EPERM;
++ ret = -EPERM;
++ goto out_up_write;
+ }
+ if (atomic_read(&dest->nr_swapfiles)) {
+ spin_unlock(&dest->root_item_lock);
+ btrfs_warn(fs_info,
+ "attempt to delete subvolume %llu with active swapfile",
+ root->root_key.objectid);
+- return -EPERM;
++ ret = -EPERM;
++ goto out_up_write;
+ }
+ root_flags = btrfs_root_flags(&dest->root_item);
+ btrfs_set_root_flags(&dest->root_item,
+ root_flags | BTRFS_ROOT_SUBVOL_DEAD);
+ spin_unlock(&dest->root_item_lock);
+
+- down_write(&fs_info->subvol_sem);
+-
+ ret = may_destroy_subvol(dest);
+ if (ret)
+- goto out_up_write;
++ goto out_undead;
+
+ btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
+ /*
+@@ -4486,13 +4514,21 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ */
+ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
+ if (ret)
+- goto out_up_write;
++ goto out_undead;
++ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_release;
+ }
++ ret = btrfs_record_root_in_trans(trans, root);
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ goto out_end_trans;
++ }
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
+
+@@ -4551,16 +4587,20 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
+ ret = btrfs_end_transaction(trans);
+ inode->i_flags |= S_DEAD;
+ out_release:
+- btrfs_subvolume_release_metadata(root, &block_rsv);
+-out_up_write:
+- up_write(&fs_info->subvol_sem);
++ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
++out_undead:
+ if (ret) {
+ spin_lock(&dest->root_item_lock);
+ root_flags = btrfs_root_flags(&dest->root_item);
+ btrfs_set_root_flags(&dest->root_item,
+ root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
+ spin_unlock(&dest->root_item_lock);
+- } else {
++ }
++out_up_write:
++ up_write(&fs_info->subvol_sem);
++ if (!ret) {
+ d_invalidate(dentry);
+ btrfs_prune_dentries(dest);
+ ASSERT(dest->send_in_progress == 0);
+@@ -5129,7 +5169,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
+ */
+ if (state_flags & EXTENT_DELALLOC)
+ btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
+- end - start + 1);
++ end - start + 1, NULL);
+
+ clear_extent_bit(io_tree, start, end,
+ EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
+@@ -5629,7 +5669,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
+ struct inode *inode;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_root *sub_root = root;
+- struct btrfs_key location;
++ struct btrfs_key location = { 0 };
+ u8 di_type = 0;
+ int ret = 0;
+
+@@ -6310,7 +6350,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ }
+ }
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ /*
+ * We don't need the path anymore, plus inheriting properties, adding
+ * ACLs, security xattrs, orphan item or adding the link, will result in
+@@ -6974,8 +7014,15 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
+ int ret;
+
+ alloc_hint = get_extent_allocation_hint(inode, start, len);
++again:
+ ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+ 0, alloc_hint, &ins, 1, 1);
++ if (ret == -EAGAIN) {
++ ASSERT(btrfs_is_zoned(fs_info));
++ wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
++ TASK_UNINTERRUPTIBLE);
++ goto again;
++ }
+ if (ret)
+ return ERR_PTR(ret);
+
+@@ -7787,6 +7834,7 @@ struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
+ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
+ {
++ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ int ret;
+
+ ret = fiemap_prep(inode, fieinfo, start, &len, 0);
+@@ -7812,7 +7860,26 @@ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ return ret;
+ }
+
+- return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
++ btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED);
++
++ /*
++ * We did an initial flush to avoid holding the inode's lock while
++ * triggering writeback and waiting for the completion of IO and ordered
++ * extents. Now after we locked the inode we do it again, because it's
++ * possible a new write may have happened in between those two steps.
++ */
++ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
++ ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
++ if (ret) {
++ btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
++ return ret;
++ }
++ }
++
++ ret = extent_fiemap(btrfs_inode, fieinfo, start, len);
++ btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
++
++ return ret;
+ }
+
+ static int btrfs_writepages(struct address_space *mapping,
+@@ -8044,7 +8111,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
+ * reserved data space.
+ * Since the IO will never happen for this page.
+ */
+- btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
++ btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
+ if (!inode_evicting) {
+ clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
+ EXTENT_DELALLOC | EXTENT_UPTODATE |
+@@ -8629,7 +8696,7 @@ static int btrfs_getattr(struct mnt_idmap *idmap,
+ u64 delalloc_bytes;
+ u64 inode_bytes;
+ struct inode *inode = d_inode(path->dentry);
+- u32 blocksize = inode->i_sb->s_blocksize;
++ u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
+ u32 bi_flags = BTRFS_I(inode)->flags;
+ u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
+
+@@ -9446,7 +9513,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
+
+ ptr = btrfs_file_extent_inline_start(ei);
+ write_extent_buffer(leaf, symname, ptr, name_len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_free_path(path);
+
+ d_instantiate_new(dentry, inode);
+@@ -9474,7 +9541,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
+ struct btrfs_path *path;
+ u64 start = ins->objectid;
+ u64 len = ins->offset;
+- int qgroup_released;
++ u64 qgroup_released = 0;
+ int ret;
+
+ memset(&stack_fi, 0, sizeof(stack_fi));
+@@ -9487,9 +9554,9 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
+ btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
+ /* Encryption and other encoding is reserved and all 0 */
+
+- qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
+- if (qgroup_released < 0)
+- return ERR_PTR(qgroup_released);
++ ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
++ if (ret < 0)
++ return ERR_PTR(ret);
+
+ if (trans) {
+ ret = insert_reserved_file_extent(trans, inode,
+@@ -10212,6 +10279,13 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
+ return -EINVAL;
+
++ /*
++ * Compressed extents should always have checksums, so error out if we
++ * have a NOCOW file or inode was created while mounted with NODATASUM.
++ */
++ if (inode->flags & BTRFS_INODE_NODATASUM)
++ return -EINVAL;
++
+ orig_count = iov_iter_count(from);
+
+ /* The extent size must be sane. */
+@@ -10384,7 +10458,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
+ btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
+ out_qgroup_free_data:
+ if (ret < 0)
+- btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
++ btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
+ out_free_data_space:
+ /*
+ * If btrfs_reserve_extent() succeeded, then we already decremented
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8e7d03bc1b5651..5f0c9c3f3bbf09 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -528,18 +528,16 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
+ * block group is in the logical address space, which can be any
+ * sectorsize aligned bytenr in the range [0, U64_MAX].
+ */
+- if (range.len < fs_info->sb->s_blocksize)
++ if (range.len < fs_info->sectorsize)
+ return -EINVAL;
+
+ range.minlen = max(range.minlen, minlen);
+ ret = btrfs_trim_fs(fs_info, &range);
+- if (ret < 0)
+- return ret;
+
+ if (copy_to_user(arg, &range, sizeof(range)))
+ return -EFAULT;
+
+- return 0;
++ return ret;
+ }
+
+ int __pure btrfs_is_empty_uuid(u8 *uuid)
+@@ -603,6 +601,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ int ret;
+ dev_t anon_dev;
+ u64 objectid;
++ u64 qgroup_reserved = 0;
+
+ root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
+ if (!root_item)
+@@ -640,13 +639,18 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ trans_num_items, false);
+ if (ret)
+ goto out_new_inode_args;
++ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+- btrfs_subvolume_release_metadata(root, &block_rsv);
+- goto out_new_inode_args;
++ goto out_release_rsv;
+ }
++ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
++ if (ret)
++ goto out;
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
+ /* Tree log can't currently deal with an inode which is a new root. */
+@@ -663,7 +667,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ goto out;
+ }
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ inode_item = &root_item->inode;
+ btrfs_set_stack_inode_generation(inode_item, 1);
+@@ -721,7 +725,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ free_extent_buffer(leaf);
+ leaf = NULL;
+
+- new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
++ new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev);
+ if (IS_ERR(new_root)) {
+ ret = PTR_ERR(new_root);
+ btrfs_abort_transaction(trans, ret);
+@@ -757,9 +761,11 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ out:
+ trans->block_rsv = NULL;
+ trans->bytes_reserved = 0;
+- btrfs_subvolume_release_metadata(root, &block_rsv);
+-
+ btrfs_end_transaction(trans);
++out_release_rsv:
++ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ out_new_inode_args:
+ btrfs_new_inode_args_destroy(&new_inode_args);
+ out_inode:
+@@ -781,6 +787,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ struct btrfs_pending_snapshot *pending_snapshot;
+ unsigned int trans_num_items;
+ struct btrfs_trans_handle *trans;
++ struct btrfs_block_rsv *block_rsv;
++ u64 qgroup_reserved = 0;
+ int ret;
+
+ /* We do not support snapshotting right now. */
+@@ -790,6 +798,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ return -EOPNOTSUPP;
+ }
+
++ if (btrfs_root_refs(&root->root_item) == 0)
++ return -ENOENT;
++
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+ return -EINVAL;
+
+@@ -814,19 +825,19 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ goto free_pending;
+ }
+
+- btrfs_init_block_rsv(&pending_snapshot->block_rsv,
+- BTRFS_BLOCK_RSV_TEMP);
++ block_rsv = &pending_snapshot->block_rsv;
++ btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP);
+ /*
+ * 1 to add dir item
+ * 1 to add dir index
+ * 1 to update parent inode item
+ */
+ trans_num_items = create_subvol_num_items(inherit) + 3;
+- ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
+- &pending_snapshot->block_rsv,
++ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv,
+ trans_num_items, false);
+ if (ret)
+ goto free_pending;
++ qgroup_reserved = block_rsv->qgroup_rsv_reserved;
+
+ pending_snapshot->dentry = dentry;
+ pending_snapshot->root = root;
+@@ -839,6 +850,13 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ ret = PTR_ERR(trans);
+ goto fail;
+ }
++ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
++ if (ret) {
++ btrfs_end_transaction(trans);
++ goto fail;
++ }
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
++ qgroup_reserved = 0;
+
+ trans->pending_snapshot = pending_snapshot;
+
+@@ -868,7 +886,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ if (ret && pending_snapshot->snap)
+ pending_snapshot->snap->anon_dev = 0;
+ btrfs_put_root(pending_snapshot->snap);
+- btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
++ btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL);
++ if (qgroup_reserved)
++ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
+ free_pending:
+ if (pending_snapshot->anon_dev)
+ free_anon_bdev(pending_snapshot->anon_dev);
+@@ -1290,6 +1310,15 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
+ * are limited to own subvolumes only
+ */
+ ret = -EPERM;
++ } else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) {
++ /*
++ * Snapshots must be made with the src_inode referring
++ * to the subvolume inode, otherwise the permission
++ * checking above is useless because we may have
++ * permission on a lower directory but not the subvol
++ * itself.
++ */
++ ret = -EINVAL;
+ } else {
+ ret = btrfs_mksnapshot(&file->f_path, idmap,
+ name, namelen,
+@@ -1528,7 +1557,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
+ static noinline int copy_to_sk(struct btrfs_path *path,
+ struct btrfs_key *key,
+ struct btrfs_ioctl_search_key *sk,
+- size_t *buf_size,
++ u64 *buf_size,
+ char __user *ubuf,
+ unsigned long *sk_offset,
+ int *num_found)
+@@ -1660,7 +1689,7 @@ static noinline int copy_to_sk(struct btrfs_path *path,
+
+ static noinline int search_ioctl(struct inode *inode,
+ struct btrfs_ioctl_search_key *sk,
+- size_t *buf_size,
++ u64 *buf_size,
+ char __user *ubuf)
+ {
+ struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
+@@ -1733,7 +1762,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode,
+ struct btrfs_ioctl_search_args __user *uargs = argp;
+ struct btrfs_ioctl_search_key sk;
+ int ret;
+- size_t buf_size;
++ u64 buf_size;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -1763,8 +1792,8 @@ static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
+ struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
+ struct btrfs_ioctl_search_args_v2 args;
+ int ret;
+- size_t buf_size;
+- const size_t buf_limit = SZ_16M;
++ u64 buf_size;
++ const u64 buf_limit = SZ_16M;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -2599,6 +2628,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
+ ret = -EFAULT;
+ goto out;
+ }
++ if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
+ /* compression requires us to start the IO */
+ if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
+ range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
+@@ -2947,7 +2980,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+
+ btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
+ btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+
+ btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
+@@ -3794,6 +3827,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
+ goto out;
+ }
+
++ if (sa->create && is_fstree(sa->qgroupid)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+@@ -4351,6 +4389,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
+ arg->clone_sources = compat_ptr(args32.clone_sources);
+ arg->parent_root = args32.parent_root;
+ arg->flags = args32.flags;
++ arg->version = args32.version;
+ memcpy(arg->reserved, args32.reserved,
+ sizeof(args32.reserved));
+ #else
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 345c449d588ccb..8a3c46cb67f538 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -153,11 +153,12 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ {
+ struct btrfs_ordered_extent *entry;
+ int ret;
++ u64 qgroup_rsv = 0;
+
+ if (flags &
+ ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
+ /* For nocow write, we can release the qgroup rsv right now */
+- ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
++ ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ } else {
+@@ -165,7 +166,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ * The ordered extent has reserved qgroup space, release now
+ * and pass the reserved number for qgroup_record to free.
+ */
+- ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
++ ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+@@ -183,7 +184,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
+ entry->inode = igrab(&inode->vfs_inode);
+ entry->compress_type = compress_type;
+ entry->truncated_len = (u64)-1;
+- entry->qgroup_rsv = ret;
++ entry->qgroup_rsv = qgroup_rsv;
+ entry->flags = flags;
+ refcount_set(&entry->refs, 1);
+ init_waitqueue_head(&entry->wait);
+@@ -603,7 +604,9 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
+ release = entry->disk_num_bytes;
+ else
+ release = entry->num_bytes;
+- btrfs_delalloc_release_metadata(btrfs_inode, release, false);
++ btrfs_delalloc_release_metadata(btrfs_inode, release,
++ test_bit(BTRFS_ORDERED_IOERR,
++ &entry->flags));
+ }
+
+ percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
+@@ -1199,6 +1202,7 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
+ ordered->disk_bytenr += len;
+ ordered->num_bytes -= len;
+ ordered->disk_num_bytes -= len;
++ ordered->ram_bytes -= len;
+
+ if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
+ ASSERT(ordered->bytes_left == 0);
+diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
+index 0c93439e929fb0..815a5fc3ff9d82 100644
+--- a/fs/btrfs/print-tree.c
++++ b/fs/btrfs/print-tree.c
+@@ -12,7 +12,7 @@
+
+ struct root_name_map {
+ u64 id;
+- char name[16];
++ const char *name;
+ };
+
+ static const struct root_name_map root_map[] = {
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index b99230db3c8200..74b82390fe8470 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -208,6 +208,7 @@ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
+ INIT_LIST_HEAD(&qgroup->groups);
+ INIT_LIST_HEAD(&qgroup->members);
+ INIT_LIST_HEAD(&qgroup->dirty);
++ INIT_LIST_HEAD(&qgroup->iterator);
+
+ rb_link_node(&qgroup->node, parent, p);
+ rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
+@@ -622,7 +623,7 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ btrfs_free_path(path);
+ return ret;
+@@ -700,7 +701,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_release_path(path);
+
+@@ -719,7 +720,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -808,7 +809,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
+ btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -854,7 +855,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
+ btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -896,7 +897,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
+ btrfs_set_qgroup_status_rescan(l, ptr,
+ fs_info->qgroup_rescan_progress.objectid);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -1069,7 +1070,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ BTRFS_QGROUP_STATUS_FLAGS_MASK);
+ btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ key.objectid = 0;
+ key.type = BTRFS_ROOT_REF_KEY;
+@@ -1227,7 +1228,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+
+ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ {
+- struct btrfs_root *quota_root;
++ struct btrfs_root *quota_root = NULL;
+ struct btrfs_trans_handle *trans = NULL;
+ int ret = 0;
+
+@@ -1322,9 +1323,9 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+ quota_root->node, 0, 1);
+
+- btrfs_put_root(quota_root);
+
+ out:
++ btrfs_put_root(quota_root);
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ if (ret && trans)
+ btrfs_end_transaction(trans);
+@@ -1342,6 +1343,24 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
+ list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
+ }
+
++static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
++{
++ if (!list_empty(&qgroup->iterator))
++ return;
++
++ list_add_tail(&qgroup->iterator, head);
++}
++
++static void qgroup_iterator_clean(struct list_head *head)
++{
++ while (!list_empty(head)) {
++ struct btrfs_qgroup *qgroup;
++
++ qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
++ list_del_init(&qgroup->iterator);
++ }
++}
++
+ /*
+ * The easy accounting, we're updating qgroup relationship whose child qgroup
+ * only has exclusive extents.
+@@ -1640,6 +1659,15 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ return ret;
+ }
+
++static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
++{
++ return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
++ qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
++ qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
++ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
++ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
++}
++
+ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ {
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+@@ -1659,6 +1687,11 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ goto out;
+ }
+
++ if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
++ ret = -EBUSY;
++ goto out;
++ }
++
+ /* Check if there are no children of this qgroup */
+ if (!list_empty(&qgroup->members)) {
+ ret = -EBUSY;
+@@ -2692,8 +2725,6 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ if (nr_old_roots == 0 && nr_new_roots == 0)
+ goto out_free;
+
+- BUG_ON(!fs_info->quota_root);
+-
+ trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
+ num_bytes, nr_old_roots, nr_new_roots);
+
+@@ -2800,11 +2831,6 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
+ ctx.roots = NULL;
+ }
+
+- /* Free the reserved data space */
+- btrfs_qgroup_free_refroot(fs_info,
+- record->data_rsv_refroot,
+- record->data_rsv,
+- BTRFS_QGROUP_RSV_DATA);
+ /*
+ * Use BTRFS_SEQ_LAST as time_seq to do special search,
+ * which doesn't lock tree or delayed_refs and search
+@@ -2828,6 +2854,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
+ record->old_roots = NULL;
+ new_roots = NULL;
+ }
++ /* Free the reserved data space */
++ btrfs_qgroup_free_refroot(fs_info,
++ record->data_rsv_refroot,
++ record->data_rsv,
++ BTRFS_QGROUP_RSV_DATA);
+ cleanup:
+ ulist_free(record->old_roots);
+ ulist_free(new_roots);
+@@ -3125,8 +3156,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u64 ref_root = root->root_key.objectid;
+ int ret = 0;
+- struct ulist_node *unode;
+- struct ulist_iterator uiter;
++ LIST_HEAD(qgroup_list);
+
+ if (!is_fstree(ref_root))
+ return 0;
+@@ -3146,49 +3176,28 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
+ if (!qgroup)
+ goto out;
+
+- /*
+- * in a first step, we check all affected qgroups if any limits would
+- * be exceeded
+- */
+- ulist_reinit(fs_info->qgroup_ulist);
+- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
+- qgroup_to_aux(qgroup), GFP_ATOMIC);
+- if (ret < 0)
+- goto out;
+- ULIST_ITER_INIT(&uiter);
+- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
+- struct btrfs_qgroup *qg;
++ qgroup_iterator_add(&qgroup_list, qgroup);
++ list_for_each_entry(qgroup, &qgroup_list, iterator) {
+ struct btrfs_qgroup_list *glist;
+
+- qg = unode_aux_to_qgroup(unode);
+-
+- if (enforce && !qgroup_check_limits(qg, num_bytes)) {
++ if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
+ ret = -EDQUOT;
+ goto out;
+ }
+
+- list_for_each_entry(glist, &qg->groups, next_group) {
+- ret = ulist_add(fs_info->qgroup_ulist,
+- glist->group->qgroupid,
+- qgroup_to_aux(glist->group), GFP_ATOMIC);
+- if (ret < 0)
+- goto out;
+- }
++ list_for_each_entry(glist, &qgroup->groups, next_group)
++ qgroup_iterator_add(&qgroup_list, glist->group);
+ }
++
+ ret = 0;
+ /*
+ * no limits exceeded, now record the reservation into all qgroups
+ */
+- ULIST_ITER_INIT(&uiter);
+- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
+- struct btrfs_qgroup *qg;
+-
+- qg = unode_aux_to_qgroup(unode);
+-
+- qgroup_rsv_add(fs_info, qg, num_bytes, type);
+- }
++ list_for_each_entry(qgroup, &qgroup_list, iterator)
++ qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
+
+ out:
++ qgroup_iterator_clean(&qgroup_list);
+ spin_unlock(&fs_info->qgroup_lock);
+ return ret;
+ }
+@@ -3753,6 +3762,8 @@ static int try_flush_qgroup(struct btrfs_root *root)
+ return 0;
+ }
+
++ btrfs_run_delayed_iputs(root->fs_info);
++ btrfs_wait_on_delayed_iputs(root->fs_info);
+ ret = btrfs_start_delalloc_snapshot(root, true);
+ if (ret < 0)
+ goto out;
+@@ -3855,13 +3866,14 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
+
+ /* Free ranges specified by @reserved, normally in error path */
+ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
+- struct extent_changeset *reserved, u64 start, u64 len)
++ struct extent_changeset *reserved,
++ u64 start, u64 len, u64 *freed_ret)
+ {
+ struct btrfs_root *root = inode->root;
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ struct extent_changeset changeset;
+- int freed = 0;
++ u64 freed = 0;
+ int ret;
+
+ extent_changeset_init(&changeset);
+@@ -3902,7 +3914,9 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
+ }
+ btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
+ BTRFS_QGROUP_RSV_DATA);
+- ret = freed;
++ if (freed_ret)
++ *freed_ret = freed;
++ ret = 0;
+ out:
+ extent_changeset_release(&changeset);
+ return ret;
+@@ -3910,7 +3924,7 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
+
+ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
+ struct extent_changeset *reserved, u64 start, u64 len,
+- int free)
++ u64 *released, int free)
+ {
+ struct extent_changeset changeset;
+ int trace_op = QGROUP_RELEASE;
+@@ -3922,7 +3936,7 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
+ /* In release case, we shouldn't have @reserved */
+ WARN_ON(!free && reserved);
+ if (free && reserved)
+- return qgroup_free_reserved_data(inode, reserved, start, len);
++ return qgroup_free_reserved_data(inode, reserved, start, len, released);
+ extent_changeset_init(&changeset);
+ ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
+ EXTENT_QGROUP_RESERVED, &changeset);
+@@ -3937,7 +3951,8 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
+ btrfs_qgroup_free_refroot(inode->root->fs_info,
+ inode->root->root_key.objectid,
+ changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
+- ret = changeset.bytes_changed;
++ if (released)
++ *released = changeset.bytes_changed;
+ out:
+ extent_changeset_release(&changeset);
+ return ret;
+@@ -3956,9 +3971,10 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
+ * NOTE: This function may sleep for memory allocation.
+ */
+ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
+- struct extent_changeset *reserved, u64 start, u64 len)
++ struct extent_changeset *reserved,
++ u64 start, u64 len, u64 *freed)
+ {
+- return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
++ return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
+ }
+
+ /*
+@@ -3976,9 +3992,9 @@ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
+ *
+ * NOTE: This function may sleep for memory allocation.
+ */
+-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
++int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
+ {
+- return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
++ return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
+ }
+
+ static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
+@@ -4104,9 +4120,7 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
+ int num_bytes)
+ {
+ struct btrfs_qgroup *qgroup;
+- struct ulist_node *unode;
+- struct ulist_iterator uiter;
+- int ret = 0;
++ LIST_HEAD(qgroup_list);
+
+ if (num_bytes == 0)
+ return;
+@@ -4117,31 +4131,22 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+ if (!qgroup)
+ goto out;
+- ulist_reinit(fs_info->qgroup_ulist);
+- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
+- qgroup_to_aux(qgroup), GFP_ATOMIC);
+- if (ret < 0)
+- goto out;
+- ULIST_ITER_INIT(&uiter);
+- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
+- struct btrfs_qgroup *qg;
+- struct btrfs_qgroup_list *glist;
+
+- qg = unode_aux_to_qgroup(unode);
++ qgroup_iterator_add(&qgroup_list, qgroup);
++ list_for_each_entry(qgroup, &qgroup_list, iterator) {
++ struct btrfs_qgroup_list *glist;
+
+- qgroup_rsv_release(fs_info, qg, num_bytes,
++ qgroup_rsv_release(fs_info, qgroup, num_bytes,
+ BTRFS_QGROUP_RSV_META_PREALLOC);
+- qgroup_rsv_add(fs_info, qg, num_bytes,
+- BTRFS_QGROUP_RSV_META_PERTRANS);
+- list_for_each_entry(glist, &qg->groups, next_group) {
+- ret = ulist_add(fs_info->qgroup_ulist,
+- glist->group->qgroupid,
+- qgroup_to_aux(glist->group), GFP_ATOMIC);
+- if (ret < 0)
+- goto out;
+- }
++ if (!sb_rdonly(fs_info->sb))
++ qgroup_rsv_add(fs_info, qgroup, num_bytes,
++ BTRFS_QGROUP_RSV_META_PERTRANS);
++
++ list_for_each_entry(glist, &qgroup->groups, next_group)
++ qgroup_iterator_add(&qgroup_list, glist->group);
+ }
+ out:
++ qgroup_iterator_clean(&qgroup_list);
+ spin_unlock(&fs_info->qgroup_lock);
+ }
+
+@@ -4157,6 +4162,8 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
+ BTRFS_QGROUP_RSV_META_PREALLOC);
+ trace_qgroup_meta_convert(root, num_bytes);
+ qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
++ if (!sb_rdonly(fs_info->sb))
++ add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
+ }
+
+ /*
+diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
+index 7bffa10589d6b2..1203f063209910 100644
+--- a/fs/btrfs/qgroup.h
++++ b/fs/btrfs/qgroup.h
+@@ -220,6 +220,15 @@ struct btrfs_qgroup {
+ struct list_head groups; /* groups this group is member of */
+ struct list_head members; /* groups that are members of this group */
+ struct list_head dirty; /* dirty groups */
++
++ /*
++ * For qgroup iteration usage.
++ *
++ * The iteration list should always be empty until qgroup_iterator_add()
++ * is called. And should be reset to empty after the iteration is
++ * finished.
++ */
++ struct list_head iterator;
+ struct rb_node node; /* tree of qgroups */
+
+ /*
+@@ -363,10 +372,10 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
+ /* New io_tree based accurate qgroup reserve API */
+ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
+ struct extent_changeset **reserved, u64 start, u64 len);
+-int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
++int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released);
+ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
+ struct extent_changeset *reserved, u64 start,
+- u64 len);
++ u64 len, u64 *freed);
+ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ enum btrfs_qgroup_rsv_type type, bool enforce);
+ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 95d28497de7c22..1ea5bfb8876e41 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -791,6 +791,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ dump_ref_action(fs_info, ra);
+ kfree(ref);
+ kfree(ra);
++ kfree(re);
+ goto out_unlock;
+ } else if (be->num_refs == 0) {
+ btrfs_err(fs_info,
+@@ -800,6 +801,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ dump_ref_action(fs_info, ra);
+ kfree(ref);
+ kfree(ra);
++ kfree(re);
+ goto out_unlock;
+ }
+
+@@ -884,8 +886,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ out_unlock:
+ spin_unlock(&fs_info->ref_verify_lock);
+ out:
+- if (ret)
++ if (ret) {
++ btrfs_free_ref_cache(fs_info);
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
++ }
+ return ret;
+ }
+
+@@ -1016,8 +1020,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
+ }
+ }
+ if (ret) {
+- btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ btrfs_free_ref_cache(fs_info);
++ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ }
+ btrfs_free_path(path);
+ return ret;
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index 65d2bd6910f2cb..9f60aa79a8c502 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -664,7 +664,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
+ struct inode *dst, u64 dst_loff)
+ {
+ struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info;
+- const u64 bs = fs_info->sb->s_blocksize;
++ const u64 bs = fs_info->sectorsize;
+ int ret;
+
+ /*
+@@ -731,7 +731,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
+ int ret;
+ int wb_ret;
+ u64 len = olen;
+- u64 bs = fs_info->sb->s_blocksize;
++ u64 bs = fs_info->sectorsize;
+
+ /*
+ * VFS's generic_remap_file_range_prep() protects us from cloning the
+@@ -797,7 +797,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ {
+ struct inode *inode_in = file_inode(file_in);
+ struct inode *inode_out = file_inode(file_out);
+- u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
++ u64 bs = BTRFS_I(inode_out)->root->fs_info->sectorsize;
+ u64 wb_len;
+ int ret;
+
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index c6d4bb8cbe2995..299eac696eb426 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -235,71 +235,7 @@ static struct btrfs_backref_node *walk_down_backref(
+ return NULL;
+ }
+
+-static void update_backref_node(struct btrfs_backref_cache *cache,
+- struct btrfs_backref_node *node, u64 bytenr)
+-{
+- struct rb_node *rb_node;
+- rb_erase(&node->rb_node, &cache->rb_root);
+- node->bytenr = bytenr;
+- rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
+- if (rb_node)
+- btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
+-}
+-
+-/*
+- * update backref cache after a transaction commit
+- */
+-static int update_backref_cache(struct btrfs_trans_handle *trans,
+- struct btrfs_backref_cache *cache)
+-{
+- struct btrfs_backref_node *node;
+- int level = 0;
+-
+- if (cache->last_trans == 0) {
+- cache->last_trans = trans->transid;
+- return 0;
+- }
+-
+- if (cache->last_trans == trans->transid)
+- return 0;
+-
+- /*
+- * detached nodes are used to avoid unnecessary backref
+- * lookup. transaction commit changes the extent tree.
+- * so the detached nodes are no longer useful.
+- */
+- while (!list_empty(&cache->detached)) {
+- node = list_entry(cache->detached.next,
+- struct btrfs_backref_node, list);
+- btrfs_backref_cleanup_node(cache, node);
+- }
+-
+- while (!list_empty(&cache->changed)) {
+- node = list_entry(cache->changed.next,
+- struct btrfs_backref_node, list);
+- list_del_init(&node->list);
+- BUG_ON(node->pending);
+- update_backref_node(cache, node, node->new_bytenr);
+- }
+-
+- /*
+- * some nodes can be left in the pending list if there were
+- * errors during processing the pending nodes.
+- */
+- for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+- list_for_each_entry(node, &cache->pending[level], list) {
+- BUG_ON(!node->pending);
+- if (node->bytenr == node->new_bytenr)
+- continue;
+- update_backref_node(cache, node, node->new_bytenr);
+- }
+- }
+-
+- cache->last_trans = 0;
+- return 1;
+-}
+-
+-static bool reloc_root_is_dead(struct btrfs_root *root)
++static bool reloc_root_is_dead(const struct btrfs_root *root)
+ {
+ /*
+ * Pair with set_bit/clear_bit in clean_dirty_subvols and
+@@ -320,7 +256,7 @@ static bool reloc_root_is_dead(struct btrfs_root *root)
+ * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
+ * special case.
+ */
+-static bool have_reloc_root(struct btrfs_root *root)
++static bool have_reloc_root(const struct btrfs_root *root)
+ {
+ if (reloc_root_is_dead(root))
+ return false;
+@@ -329,31 +265,30 @@ static bool have_reloc_root(struct btrfs_root *root)
+ return true;
+ }
+
+-int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
++bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
+ {
+ struct btrfs_root *reloc_root;
+
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+- return 0;
++ return false;
+
+ /* This root has been merged with its reloc tree, we can ignore it */
+ if (reloc_root_is_dead(root))
+- return 1;
++ return true;
+
+ reloc_root = root->reloc_root;
+ if (!reloc_root)
+- return 0;
++ return false;
+
+ if (btrfs_header_generation(reloc_root->commit_root) ==
+ root->fs_info->running_transaction->transid)
+- return 0;
++ return false;
+ /*
+- * if there is reloc tree and it was created in previous
+- * transaction backref lookup can find the reloc tree,
+- * so backref node for the fs tree root is useless for
+- * relocation.
++ * If there is reloc tree and it was created in previous transaction
++ * backref lookup can find the reloc tree, so backref node for the fs
++ * tree root is useless for relocation.
+ */
+- return 1;
++ return true;
+ }
+
+ /*
+@@ -547,7 +482,7 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
+ */
+ static int clone_backref_node(struct btrfs_trans_handle *trans,
+ struct reloc_control *rc,
+- struct btrfs_root *src,
++ const struct btrfs_root *src,
+ struct btrfs_root *dest)
+ {
+ struct btrfs_root *reloc_root = src->reloc_root;
+@@ -558,9 +493,6 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_edge *new_edge;
+ struct rb_node *rb_node;
+
+- if (cache->last_trans > 0)
+- update_backref_cache(trans, cache);
+-
+ rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
+ if (rb_node) {
+ node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
+@@ -931,7 +863,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
+ btrfs_grab_root(reloc_root);
+
+ /* root->reloc_root will stay until current relocation finished */
+- if (fs_info->reloc_ctl->merge_reloc_tree &&
++ if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree &&
+ btrfs_root_refs(root_item) == 0) {
+ set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
+ /*
+@@ -1181,15 +1113,15 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
+ }
+ }
+ if (dirty)
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (inode)
+ btrfs_add_delayed_iput(BTRFS_I(inode));
+ return ret;
+ }
+
+-static noinline_for_stack
+-int memcmp_node_keys(struct extent_buffer *eb, int slot,
+- struct btrfs_path *path, int level)
++static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
++ int slot, const struct btrfs_path *path,
++ int level)
+ {
+ struct btrfs_disk_key key1;
+ struct btrfs_disk_key key2;
+@@ -1374,13 +1306,13 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
+ */
+ btrfs_set_node_blockptr(parent, slot, new_bytenr);
+ btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+
+ btrfs_set_node_blockptr(path->nodes[level],
+ path->slots[level], old_bytenr);
+ btrfs_set_node_ptr_generation(path->nodes[level],
+ path->slots[level], old_ptr_gen);
+- btrfs_mark_buffer_dirty(path->nodes[level]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[level]);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
+ blocksize, path->nodes[level]->start);
+@@ -1518,8 +1450,8 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
+ * [min_key, max_key)
+ */
+ static int invalidate_extent_cache(struct btrfs_root *root,
+- struct btrfs_key *min_key,
+- struct btrfs_key *max_key)
++ const struct btrfs_key *min_key,
++ const struct btrfs_key *max_key)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct inode *inode = NULL;
+@@ -2517,7 +2449,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
+ node->eb->start);
+ btrfs_set_node_ptr_generation(upper->eb, slot,
+ trans->transid);
+- btrfs_mark_buffer_dirty(upper->eb);
++ btrfs_mark_buffer_dirty(trans, upper->eb);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
+ node->eb->start, blocksize,
+@@ -2830,7 +2762,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
+
+ static noinline_for_stack int prealloc_file_extent_cluster(
+ struct btrfs_inode *inode,
+- struct file_extent_cluster *cluster)
++ const struct file_extent_cluster *cluster)
+ {
+ u64 alloc_hint = 0;
+ u64 start;
+@@ -2965,7 +2897,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod
+ /*
+ * Allow error injection to test balance/relocation cancellation
+ */
+-noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
++noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
+ {
+ return atomic_read(&fs_info->balance_cancel_req) ||
+ atomic_read(&fs_info->reloc_cancel_req) ||
+@@ -2973,7 +2905,7 @@ noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
+ }
+ ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
+
+-static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
++static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
+ int cluster_nr)
+ {
+ /* Last extent, use cluster end directly */
+@@ -2985,7 +2917,7 @@ static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
+ }
+
+ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
+- struct file_extent_cluster *cluster,
++ const struct file_extent_cluster *cluster,
+ int *cluster_nr, unsigned long page_index)
+ {
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+@@ -3120,7 +3052,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
+ }
+
+ static int relocate_file_extent_cluster(struct inode *inode,
+- struct file_extent_cluster *cluster)
++ const struct file_extent_cluster *cluster)
+ {
+ u64 offset = BTRFS_I(inode)->index_cnt;
+ unsigned long index;
+@@ -3158,9 +3090,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
+ return ret;
+ }
+
+-static noinline_for_stack
+-int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
+- struct file_extent_cluster *cluster)
++static noinline_for_stack int relocate_data_extent(struct inode *inode,
++ const struct btrfs_key *extent_key,
++ struct file_extent_cluster *cluster)
+ {
+ int ret;
+
+@@ -3193,7 +3125,7 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
+ * the major work is getting the generation and level of the block
+ */
+ static int add_tree_block(struct reloc_control *rc,
+- struct btrfs_key *extent_key,
++ const struct btrfs_key *extent_key,
+ struct btrfs_path *path,
+ struct rb_root *blocks)
+ {
+@@ -3444,11 +3376,10 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
+ /*
+ * helper to find all tree blocks that reference a given data extent
+ */
+-static noinline_for_stack
+-int add_data_references(struct reloc_control *rc,
+- struct btrfs_key *extent_key,
+- struct btrfs_path *path,
+- struct rb_root *blocks)
++static noinline_for_stack int add_data_references(struct reloc_control *rc,
++ const struct btrfs_key *extent_key,
++ struct btrfs_path *path,
++ struct rb_root *blocks)
+ {
+ struct btrfs_backref_walk_ctx ctx = { 0 };
+ struct ulist_iterator leaf_uiter;
+@@ -3684,11 +3615,9 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
+ break;
+ }
+ restart:
+- if (update_backref_cache(trans, &rc->backref_cache)) {
+- btrfs_end_transaction(trans);
+- trans = NULL;
+- continue;
+- }
++ if (rc->backref_cache.last_trans != trans->transid)
++ btrfs_backref_release_cache(&rc->backref_cache);
++ rc->backref_cache.last_trans = trans->transid;
+
+ ret = find_next_extent(rc, path, &key);
+ if (ret < 0)
+@@ -3835,7 +3764,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
+ btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
+ btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
+ BTRFS_INODE_PREALLOC);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -3874,9 +3803,9 @@ static void delete_orphan_inode(struct btrfs_trans_handle *trans,
+ * helper to create inode for data relocation.
+ * the inode is in data relocation tree and its link count is 0
+ */
+-static noinline_for_stack
+-struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
+- struct btrfs_block_group *group)
++static noinline_for_stack struct inode *create_reloc_inode(
++ struct btrfs_fs_info *fs_info,
++ const struct btrfs_block_group *group)
+ {
+ struct inode *inode = NULL;
+ struct btrfs_trans_handle *trans;
+@@ -4422,7 +4351,8 @@ int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
+ }
+
+ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+- struct btrfs_root *root, struct extent_buffer *buf,
++ struct btrfs_root *root,
++ const struct extent_buffer *buf,
+ struct extent_buffer *cow)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -4561,7 +4491,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
+ *
+ * Return U64_MAX if no running relocation.
+ */
+-u64 btrfs_get_reloc_bg_bytenr(struct btrfs_fs_info *fs_info)
++u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
+ {
+ u64 logical = U64_MAX;
+
+diff --git a/fs/btrfs/relocation.h b/fs/btrfs/relocation.h
+index 77d69f6ae967c2..5fb60f2deb5305 100644
+--- a/fs/btrfs/relocation.h
++++ b/fs/btrfs/relocation.h
+@@ -10,15 +10,16 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
+ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info);
+ int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered);
+ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+- struct btrfs_root *root, struct extent_buffer *buf,
++ struct btrfs_root *root,
++ const struct extent_buffer *buf,
+ struct extent_buffer *cow);
+ void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
+ u64 *bytes_to_reserve);
+ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
+ struct btrfs_pending_snapshot *pending);
+-int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info);
++int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info);
+ struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr);
+-int btrfs_should_ignore_reloc_root(struct btrfs_root *root);
+-u64 btrfs_get_reloc_bg_bytenr(struct btrfs_fs_info *fs_info);
++bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root);
++u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info);
+
+ #endif
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index 859874579456fc..aac18f620de4ce 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -191,7 +191,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
+ btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
+
+ write_extent_buffer(l, item, ptr, sizeof(*item));
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -438,7 +438,7 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ btrfs_set_root_ref_name_len(leaf, ref, name->len);
+ ptr = (unsigned long)(ref + 1);
+ write_extent_buffer(leaf, name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (key.type == BTRFS_ROOT_BACKREF_KEY) {
+ btrfs_release_path(path);
+@@ -537,13 +537,3 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ }
+ return ret;
+ }
+-
+-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+- struct btrfs_block_rsv *rsv)
+-{
+- struct btrfs_fs_info *fs_info = root->fs_info;
+- u64 qgroup_to_release;
+-
+- btrfs_block_rsv_release(fs_info, rsv, (u64)-1, &qgroup_to_release);
+- btrfs_qgroup_convert_reserved_meta(root, qgroup_to_release);
+-}
+diff --git a/fs/btrfs/root-tree.h b/fs/btrfs/root-tree.h
+index cbbaca32126e62..cce808b44cc025 100644
+--- a/fs/btrfs/root-tree.h
++++ b/fs/btrfs/root-tree.h
+@@ -6,8 +6,6 @@
+ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ int nitems, bool use_global_rsv);
+-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+- struct btrfs_block_rsv *rsv);
+ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 sequence,
+ const struct fscrypt_str *name);
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index b877203f1dc5a8..a2d91d9f8a109d 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1013,6 +1013,7 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
+ stripe->bg->length);
++ unsigned long repaired;
+ int mirror;
+ int i;
+
+@@ -1079,16 +1080,15 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
+ * Submit the repaired sectors. For zoned case, we cannot do repair
+ * in-place, but queue the bg to be relocated.
+ */
+- if (btrfs_is_zoned(fs_info)) {
+- if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
++ bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
++ stripe->nr_sectors);
++ if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
++ if (btrfs_is_zoned(fs_info)) {
+ btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
+- } else if (!sctx->readonly) {
+- unsigned long repaired;
+-
+- bitmap_andnot(&repaired, &stripe->init_error_bitmap,
+- &stripe->error_bitmap, stripe->nr_sectors);
+- scrub_write_sectors(sctx, stripe, repaired, false);
+- wait_scrub_stripe_io(stripe);
++ } else {
++ scrub_write_sectors(sctx, stripe, repaired, false);
++ wait_scrub_stripe_io(stripe);
++ }
+ }
+
+ scrub_stripe_report_errors(sctx, stripe);
+@@ -1099,12 +1099,22 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
+ static void scrub_read_endio(struct btrfs_bio *bbio)
+ {
+ struct scrub_stripe *stripe = bbio->private;
++ struct bio_vec *bvec;
++ int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
++ int num_sectors;
++ u32 bio_size = 0;
++ int i;
++
++ ASSERT(sector_nr < stripe->nr_sectors);
++ bio_for_each_bvec_all(bvec, &bbio->bio, i)
++ bio_size += bvec->bv_len;
++ num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
+
+ if (bbio->bio.bi_status) {
+- bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
+- bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
++ bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
++ bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
+ } else {
+- bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
++ bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
+ }
+ bio_put(&bbio->bio);
+ if (atomic_dec_and_test(&stripe->pending_io)) {
+@@ -1640,6 +1650,9 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
+ {
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ struct btrfs_bio *bbio;
++ unsigned int nr_sectors = min_t(u64, BTRFS_STRIPE_LEN, stripe->bg->start +
++ stripe->bg->length - stripe->logical) >>
++ fs_info->sectorsize_bits;
+ int mirror = stripe->mirror_num;
+
+ ASSERT(stripe->bg);
+@@ -1649,14 +1662,16 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
+ bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
+ scrub_read_endio, stripe);
+
+- /* Read the whole stripe. */
+ bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
+- for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
++ /* Read the whole range inside the chunk boundary. */
++ for (unsigned int cur = 0; cur < nr_sectors; cur++) {
++ struct page *page = scrub_stripe_get_page(stripe, cur);
++ unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
+ int ret;
+
+- ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
++ ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
+ /* We should have allocated enough bio vectors. */
+- ASSERT(ret == PAGE_SIZE);
++ ASSERT(ret == fs_info->sectorsize);
+ }
+ atomic_inc(&stripe->pending_io);
+
+@@ -1798,6 +1813,9 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
+ */
+ ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
+
++ /* @found_logical_ret must be specified. */
++ ASSERT(found_logical_ret);
++
+ stripe = &sctx->stripes[sctx->cur_stripe];
+ scrub_reset_stripe(stripe);
+ ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
+@@ -1806,8 +1824,7 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
+ /* Either >0 as no more extents or <0 for error. */
+ if (ret)
+ return ret;
+- if (found_logical_ret)
+- *found_logical_ret = stripe->logical;
++ *found_logical_ret = stripe->logical;
+ sctx->cur_stripe++;
+
+ /* We filled one group, submit it. */
+@@ -2003,14 +2020,14 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ const u64 logical_end = logical_start + logical_length;
+ u64 cur_logical = logical_start;
+- int ret;
++ int ret = 0;
+
+ /* The range must be inside the bg */
+ ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
+
+ /* Go through each extent items inside the logical range */
+ while (cur_logical < logical_end) {
+- u64 found_logical;
++ u64 found_logical = U64_MAX;
+ u64 cur_physical = physical + cur_logical - logical_start;
+
+ /* Canceled? */
+@@ -2045,6 +2062,8 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ if (ret < 0)
+ break;
+
++ /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
++ ASSERT(found_logical != U64_MAX);
+ cur_logical = found_logical + BTRFS_STRIPE_LEN;
+
+ /* Don't hold CPU for too long time */
+@@ -2720,7 +2739,17 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
+ gen = fs_info->last_trans_committed;
+
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+- bytenr = btrfs_sb_offset(i);
++ ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
++ if (ret == -ENOENT)
++ break;
++
++ if (ret) {
++ spin_lock(&sctx->stat_lock);
++ sctx->stat.super_errors++;
++ spin_unlock(&sctx->stat_lock);
++ continue;
++ }
++
+ if (bytenr + BTRFS_SUPER_INFO_SIZE >
+ scrub_dev->commit_total_bytes)
+ break;
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 3a566150c531aa..aa1e6d88a72c7c 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -777,7 +777,12 @@ static int begin_cmd(struct send_ctx *sctx, int cmd)
+ if (WARN_ON(!sctx->send_buf))
+ return -EINVAL;
+
+- BUG_ON(sctx->send_size);
++ if (unlikely(sctx->send_size != 0)) {
++ btrfs_err(sctx->send_root->fs_info,
++ "send: command header buffer not empty cmd %d offset %llu",
++ cmd, sctx->send_off);
++ return -EINVAL;
++ }
+
+ sctx->send_size += sizeof(*hdr);
+ hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+@@ -1070,7 +1075,15 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
+ ret = PTR_ERR(start);
+ goto out;
+ }
+- BUG_ON(start < p->buf);
++ if (unlikely(start < p->buf)) {
++ btrfs_err(root->fs_info,
++ "send: path ref buffer underflow for key (%llu %u %llu)",
++ found_key->objectid,
++ found_key->type,
++ found_key->offset);
++ ret = -EINVAL;
++ goto out;
++ }
+ }
+ p->start = start;
+ } else {
+@@ -4182,7 +4195,13 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ * This should never happen as the root dir always has the same ref
+ * which is always '..'
+ */
+- BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
++ if (unlikely(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID)) {
++ btrfs_err(fs_info,
++ "send: unexpected inode %llu in process_recorded_refs()",
++ sctx->cur_ino);
++ ret = -EINVAL;
++ goto out;
++ }
+
+ valid_path = fs_path_alloc();
+ if (!valid_path) {
+@@ -6140,26 +6159,73 @@ static int send_write_or_clone(struct send_ctx *sctx,
+ int ret = 0;
+ u64 offset = key->offset;
+ u64 end;
+- u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
++ u64 bs = sctx->send_root->fs_info->sectorsize;
++ struct btrfs_file_extent_item *ei;
++ u64 disk_byte;
++ u64 data_offset;
++ u64 num_bytes;
++ struct btrfs_inode_info info = { 0 };
+
+ end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
+ if (offset >= end)
+ return 0;
+
+- if (clone_root && IS_ALIGNED(end, bs)) {
+- struct btrfs_file_extent_item *ei;
+- u64 disk_byte;
+- u64 data_offset;
++ num_bytes = end - offset;
+
+- ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+- struct btrfs_file_extent_item);
+- disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
+- data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
+- ret = clone_range(sctx, path, clone_root, disk_byte,
+- data_offset, offset, end - offset);
+- } else {
+- ret = send_extent_data(sctx, path, offset, end - offset);
++ if (!clone_root)
++ goto write_data;
++
++ if (IS_ALIGNED(end, bs))
++ goto clone_data;
++
++ /*
++ * If the extent end is not aligned, we can clone if the extent ends at
++ * the i_size of the inode and the clone range ends at the i_size of the
++ * source inode, otherwise the clone operation fails with -EINVAL.
++ */
++ if (end != sctx->cur_inode_size)
++ goto write_data;
++
++ ret = get_inode_info(clone_root->root, clone_root->ino, &info);
++ if (ret < 0)
++ return ret;
++
++ if (clone_root->offset + num_bytes == info.size) {
++ /*
++ * The final size of our file matches the end offset, but it may
++ * be that its current size is larger, so we have to truncate it
++ * to any value between the start offset of the range and the
++ * final i_size, otherwise the clone operation is invalid
++ * because it's unaligned and it ends before the current EOF.
++ * We do this truncate to the final i_size when we finish
++ * processing the inode, but it's too late by then. And here we
++ * truncate to the start offset of the range because it's always
++ * sector size aligned while if it were the final i_size it
++ * would result in dirtying part of a page, filling part of a
++ * page with zeroes and then having the clone operation at the
++ * receiver trigger IO and wait for it due to the dirty page.
++ */
++ if (sctx->parent_root != NULL) {
++ ret = send_truncate(sctx, sctx->cur_ino,
++ sctx->cur_inode_gen, offset);
++ if (ret < 0)
++ return ret;
++ }
++ goto clone_data;
+ }
++
++write_data:
++ ret = send_extent_data(sctx, path, offset, num_bytes);
++ sctx->cur_inode_next_write_offset = end;
++ return ret;
++
++clone_data:
++ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
++ struct btrfs_file_extent_item);
++ disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
++ data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
++ ret = clone_range(sctx, path, clone_root, disk_byte, data_offset, offset,
++ num_bytes);
+ sctx->cur_inode_next_write_offset = end;
+ return ret;
+ }
+@@ -6705,11 +6771,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
+ if (ret)
+ goto out;
+ }
+- if (sctx->cur_inode_last_extent <
+- sctx->cur_inode_size) {
+- ret = send_hole(sctx, sctx->cur_inode_size);
+- if (ret)
++ if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
++ ret = range_is_hole_in_parent(sctx,
++ sctx->cur_inode_last_extent,
++ sctx->cur_inode_size);
++ if (ret < 0) {
+ goto out;
++ } else if (ret == 0) {
++ ret = send_hole(sctx, sctx->cur_inode_size);
++ if (ret < 0)
++ goto out;
++ } else {
++ /* Range is already a hole, skip. */
++ ret = 0;
++ }
+ }
+ }
+ if (need_truncate) {
+@@ -7420,8 +7495,8 @@ static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen
+ u64 reada_done = 0;
+
+ lockdep_assert_held_read(&parent->fs_info->commit_root_sem);
++ ASSERT(*level != 0);
+
+- BUG_ON(*level == 0);
+ eb = btrfs_read_node_slot(parent, slot);
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
+@@ -8111,7 +8186,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ }
+
+ if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
+- ret = -EINVAL;
++ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+@@ -8158,7 +8233,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ }
+
+ sctx->send_filp = fget(arg->send_fd);
+- if (!sctx->send_filp) {
++ if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ goto out;
+ }
+@@ -8205,8 +8280,8 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ goto out;
+ }
+
+- sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
+- arg->clone_sources_count + 1,
++ sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
++ sizeof(*sctx->clone_roots),
+ GFP_KERNEL);
+ if (!sctx->clone_roots) {
+ ret = -ENOMEM;
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index d7e8cd4f140cfd..581bdd709ee0d0 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -312,7 +312,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+ found->bytes_used += block_group->used;
+ found->disk_used += block_group->used * factor;
+ found->bytes_readonly += block_group->bytes_super;
+- found->bytes_zone_unusable += block_group->zone_unusable;
++ btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
+ if (block_group->length > 0)
+ found->full = 0;
+ btrfs_try_granting_tickets(info, found);
+@@ -524,8 +524,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
+
+ spin_lock(&cache->lock);
+ avail = cache->length - cache->used - cache->pinned -
+- cache->reserved - cache->delalloc_bytes -
+- cache->bytes_super - cache->zone_unusable;
++ cache->reserved - cache->bytes_super - cache->zone_unusable;
+ btrfs_info(fs_info,
+ "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
+ cache->start, cache->length, cache->used, cache->pinned,
+@@ -837,7 +836,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
+ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info)
+ {
+- u64 global_rsv_size = fs_info->global_block_rsv.reserved;
++ const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
+ u64 ordered, delalloc;
+ u64 thresh;
+ u64 used;
+@@ -937,8 +936,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
+ ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
+ delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
+ if (ordered >= delalloc)
+- used += fs_info->delayed_refs_rsv.reserved +
+- fs_info->delayed_block_rsv.reserved;
++ used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
++ btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
+ else
+ used += space_info->bytes_may_use - global_rsv_size;
+
+@@ -1153,7 +1152,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
+ enum btrfs_flush_state flush;
+ u64 delalloc_size = 0;
+ u64 to_reclaim, block_rsv_size;
+- u64 global_rsv_size = global_rsv->reserved;
++ const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
+
+ loops++;
+
+@@ -1165,9 +1164,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
+ * assume it's tied up in delalloc reservations.
+ */
+ block_rsv_size = global_rsv_size +
+- delayed_block_rsv->reserved +
+- delayed_refs_rsv->reserved +
+- trans_rsv->reserved;
++ btrfs_block_rsv_reserved(delayed_block_rsv) +
++ btrfs_block_rsv_reserved(delayed_refs_rsv) +
++ btrfs_block_rsv_reserved(trans_rsv);
+ if (block_rsv_size < space_info->bytes_may_use)
+ delalloc_size = space_info->bytes_may_use - block_rsv_size;
+
+@@ -1187,16 +1186,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
+ to_reclaim = delalloc_size;
+ flush = FLUSH_DELALLOC;
+ } else if (space_info->bytes_pinned >
+- (delayed_block_rsv->reserved +
+- delayed_refs_rsv->reserved)) {
++ (btrfs_block_rsv_reserved(delayed_block_rsv) +
++ btrfs_block_rsv_reserved(delayed_refs_rsv))) {
+ to_reclaim = space_info->bytes_pinned;
+ flush = COMMIT_TRANS;
+- } else if (delayed_block_rsv->reserved >
+- delayed_refs_rsv->reserved) {
+- to_reclaim = delayed_block_rsv->reserved;
++ } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
++ btrfs_block_rsv_reserved(delayed_refs_rsv)) {
++ to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
+ flush = FLUSH_DELAYED_ITEMS_NR;
+ } else {
+- to_reclaim = delayed_refs_rsv->reserved;
++ to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
+ flush = FLUSH_DELAYED_REFS_NR;
+ }
+
+diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
+index 0bb9d14e60a82f..08a3bd10addcf9 100644
+--- a/fs/btrfs/space-info.h
++++ b/fs/btrfs/space-info.h
+@@ -197,6 +197,7 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
+
+ DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
+ DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
++DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
+
+ int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
+ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
+index 1b999c6e419307..b98d42ca55647f 100644
+--- a/fs/btrfs/subpage.c
++++ b/fs/btrfs/subpage.c
+@@ -713,8 +713,14 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
+ }
+
+ #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
+- bitmap_cut(dst, subpage->bitmaps, 0, \
+- subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
++{ \
++ const int bitmap_nr_bits = subpage_info->bitmap_nr_bits; \
++ \
++ ASSERT(bitmap_nr_bits < BITS_PER_LONG); \
++ *dst = bitmap_read(subpage->bitmaps, \
++ subpage_info->name##_offset, \
++ bitmap_nr_bits); \
++}
+
+ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 1a093ec0f7e362..e33587a814098a 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -79,7 +79,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data);
+
+ static void btrfs_put_super(struct super_block *sb)
+ {
+- close_ctree(btrfs_sb(sb));
++ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
++
++ btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
++ close_ctree(fs_info);
+ }
+
+ enum {
+@@ -2121,7 +2124,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ buf->f_bavail = 0;
+
+ buf->f_type = BTRFS_SUPER_MAGIC;
+- buf->f_bsize = dentry->d_sb->s_blocksize;
++ buf->f_bsize = fs_info->sectorsize;
+ buf->f_namelen = BTRFS_NAME_LEN;
+
+ /* We treat it as constant endianness (it doesn't matter _which_)
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index b1d1ac25237b7a..c9198723e4cb73 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -1760,6 +1760,10 @@ static ssize_t btrfs_devinfo_scrub_speed_max_store(struct kobject *kobj,
+ unsigned long long limit;
+
+ limit = memparse(buf, &endptr);
++ /* There could be trailing '\n', also catch any typos after the value. */
++ endptr = skip_spaces(endptr);
++ if (*endptr != 0)
++ return -EINVAL;
+ WRITE_ONCE(device->scrub_speed_max, limit);
+ return len;
+ }
+diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
+index 5ef0b90e25c3b9..6a43a64ba55adc 100644
+--- a/fs/btrfs/tests/extent-buffer-tests.c
++++ b/fs/btrfs/tests/extent-buffer-tests.c
+@@ -61,7 +61,11 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
+ key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = 0;
+
+- btrfs_setup_item_for_insert(root, path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
+ write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
+ value_len);
+
+diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
+index 1cc86af97dc6e6..f4fd3fb7c887b2 100644
+--- a/fs/btrfs/tests/extent-io-tests.c
++++ b/fs/btrfs/tests/extent-io-tests.c
+@@ -11,6 +11,7 @@
+ #include "btrfs-tests.h"
+ #include "../ctree.h"
+ #include "../extent_io.h"
++#include "../disk-io.h"
+ #include "../btrfs_inode.h"
+
+ #define PROCESS_UNLOCK (1 << 0)
+@@ -105,9 +106,11 @@ static void dump_extent_io_tree(const struct extent_io_tree *tree)
+ }
+ }
+
+-static int test_find_delalloc(u32 sectorsize)
++static int test_find_delalloc(u32 sectorsize, u32 nodesize)
+ {
+- struct inode *inode;
++ struct btrfs_fs_info *fs_info;
++ struct btrfs_root *root = NULL;
++ struct inode *inode = NULL;
+ struct extent_io_tree *tmp;
+ struct page *page;
+ struct page *locked_page = NULL;
+@@ -121,12 +124,27 @@ static int test_find_delalloc(u32 sectorsize)
+
+ test_msg("running find delalloc tests");
+
++ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
++ if (!fs_info) {
++ test_std_err(TEST_ALLOC_FS_INFO);
++ return -ENOMEM;
++ }
++
++ root = btrfs_alloc_dummy_root(fs_info);
++ if (IS_ERR(root)) {
++ test_std_err(TEST_ALLOC_ROOT);
++ ret = PTR_ERR(root);
++ goto out;
++ }
++
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto out;
+ }
+ tmp = &BTRFS_I(inode)->io_tree;
++ BTRFS_I(inode)->root = root;
+
+ /*
+ * Passing NULL as we don't have fs_info but tracepoints are not used
+@@ -316,6 +334,8 @@ static int test_find_delalloc(u32 sectorsize)
+ process_page_range(inode, 0, total_dirty - 1,
+ PROCESS_UNLOCK | PROCESS_RELEASE);
+ iput(inode);
++ btrfs_free_dummy_root(root);
++ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+ }
+
+@@ -794,7 +814,7 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
+
+ test_msg("running extent I/O tests");
+
+- ret = test_find_delalloc(sectorsize);
++ ret = test_find_delalloc(sectorsize, nodesize);
+ if (ret)
+ goto out;
+
+diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
+index 29bdd08b241f35..bf85c75ee72262 100644
+--- a/fs/btrfs/tests/extent-map-tests.c
++++ b/fs/btrfs/tests/extent-map-tests.c
+@@ -826,6 +826,11 @@ static int test_case_7(void)
+ goto out;
+ }
+
++ if (em->block_start != SZ_32K + SZ_4K) {
++ test_err("em->block_start is %llu, expected 36K", em->block_start);
++ goto out;
++ }
++
+ free_extent_map(em);
+
+ read_lock(&em_tree->lock);
+diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
+index 05b03f5eab83b6..492d69d2fa7374 100644
+--- a/fs/btrfs/tests/inode-tests.c
++++ b/fs/btrfs/tests/inode-tests.c
+@@ -34,7 +34,11 @@ static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+- btrfs_setup_item_for_insert(root, &path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi, 1);
+ btrfs_set_file_extent_type(leaf, fi, type);
+@@ -64,7 +68,11 @@ static void insert_inode_item_key(struct btrfs_root *root)
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+- btrfs_setup_item_for_insert(root, &path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ }
+
+ /*
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index c780d372946368..0548072c642fb0 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -37,8 +37,6 @@
+
+ static struct kmem_cache *btrfs_trans_handle_cachep;
+
+-#define BTRFS_ROOT_TRANS_TAG 0
+-
+ /*
+ * Transaction states and transitions
+ *
+@@ -717,14 +715,6 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ h->reloc_reserved = reloc_reserved;
+ }
+
+- /*
+- * Now that we have found a transaction to be a part of, convert the
+- * qgroup reservation from prealloc to pertrans. A different transaction
+- * can't race in and free our pertrans out from under us.
+- */
+- if (qgroup_reserved)
+- btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+-
+ got_it:
+ if (!current->journal_info)
+ current->journal_info = h;
+@@ -758,8 +748,15 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
+ * not just freed.
+ */
+ btrfs_end_transaction(h);
+- return ERR_PTR(ret);
++ goto reserve_fail;
+ }
++ /*
++ * Now that we have found a transaction to be a part of, convert the
++ * qgroup reservation from prealloc to pertrans. A different transaction
++ * can't race in and free our pertrans out from under us.
++ */
++ if (qgroup_reserved)
++ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+
+ return h;
+
+@@ -1452,6 +1449,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
+ radix_tree_tag_clear(&fs_info->fs_roots_radix,
+ (unsigned long)root->root_key.objectid,
+ BTRFS_ROOT_TRANS_TAG);
++ btrfs_qgroup_free_meta_all_pertrans(root);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+
+ btrfs_free_log(trans, root);
+@@ -1476,7 +1474,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
+ if (ret2)
+ return ret2;
+ spin_lock(&fs_info->fs_roots_radix_lock);
+- btrfs_qgroup_free_meta_all_pertrans(root);
+ }
+ }
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+@@ -1823,7 +1820,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ }
+
+ key.offset = (u64)-1;
+- pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
++ pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev);
+ if (IS_ERR(pending->snap)) {
+ ret = PTR_ERR(pending->snap);
+ pending->snap = NULL;
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 93869cda6af99c..7623db359881e5 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -12,6 +12,15 @@
+ #include "ctree.h"
+ #include "misc.h"
+
++/*
++ * Signal that a direct IO write is in progress, to avoid deadlock for sync
++ * direct IO writes when fsync is called during the direct IO write path.
++ */
++#define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1)
++
++/* Radix-tree tag for roots that are part of the trasaction. */
++#define BTRFS_ROOT_TRANS_TAG 0
++
+ enum btrfs_trans_state {
+ TRANS_STATE_RUNNING,
+ TRANS_STATE_COMMIT_PREP,
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index ab08a0b013112b..3a8ec33a120490 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -29,6 +29,7 @@
+ #include "accessors.h"
+ #include "file-item.h"
+ #include "inode-item.h"
++#include "extent-tree.h"
+
+ /*
+ * Error message should follow the following format:
+@@ -547,9 +548,10 @@ static int check_dir_item(struct extent_buffer *leaf,
+
+ /* dir type check */
+ dir_type = btrfs_dir_ftype(leaf, di);
+- if (unlikely(dir_type >= BTRFS_FT_MAX)) {
++ if (unlikely(dir_type <= BTRFS_FT_UNKNOWN ||
++ dir_type >= BTRFS_FT_MAX)) {
+ dir_item_err(leaf, slot,
+- "invalid dir item type, have %u expect [0, %u)",
++ "invalid dir item type, have %u expect (0, %u)",
+ dir_type, BTRFS_FT_MAX);
+ return -EUCLEAN;
+ }
+@@ -1264,6 +1266,19 @@ static void extent_err(const struct extent_buffer *eb, int slot,
+ va_end(args);
+ }
+
++static bool is_valid_dref_root(u64 rootid)
++{
++ /*
++ * The following tree root objectids are allowed to have a data backref:
++ * - subvolume trees
++ * - data reloc tree
++ * - tree root
++ * For v1 space cache
++ */
++ return is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID ||
++ rootid == BTRFS_ROOT_TREE_OBJECTID;
++}
++
+ static int check_extent_item(struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot,
+ struct btrfs_key *prev_key)
+@@ -1274,6 +1289,8 @@ static int check_extent_item(struct extent_buffer *leaf,
+ unsigned long ptr; /* Current pointer inside inline refs */
+ unsigned long end; /* Extent item end */
+ const u32 item_size = btrfs_item_size(leaf, slot);
++ u8 last_type = 0;
++ u64 last_seq = U64_MAX;
+ u64 flags;
+ u64 generation;
+ u64 total_refs; /* Total refs in btrfs_extent_item */
+@@ -1320,6 +1337,18 @@ static int check_extent_item(struct extent_buffer *leaf,
+ * 2.2) Ref type specific data
+ * Either using btrfs_extent_inline_ref::offset, or specific
+ * data structure.
++ *
++ * All above inline items should follow the order:
++ *
++ * - All btrfs_extent_inline_ref::type should be in an ascending
++ * order
++ *
++ * - Within the same type, the items should follow a descending
++ * order by their sequence number. The sequence number is
++ * determined by:
++ * * btrfs_extent_inline_ref::offset for all types other than
++ * EXTENT_DATA_REF
++ * * hash_extent_data_ref() for EXTENT_DATA_REF
+ */
+ if (unlikely(item_size < sizeof(*ei))) {
+ extent_err(leaf, slot,
+@@ -1401,6 +1430,9 @@ static int check_extent_item(struct extent_buffer *leaf,
+ struct btrfs_extent_inline_ref *iref;
+ struct btrfs_extent_data_ref *dref;
+ struct btrfs_shared_data_ref *sref;
++ u64 seq;
++ u64 dref_root;
++ u64 dref_objectid;
+ u64 dref_offset;
+ u64 inline_offset;
+ u8 inline_type;
+@@ -1414,10 +1446,11 @@ static int check_extent_item(struct extent_buffer *leaf,
+ iref = (struct btrfs_extent_inline_ref *)ptr;
+ inline_type = btrfs_extent_inline_ref_type(leaf, iref);
+ inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
++ seq = inline_offset;
+ if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
+ extent_err(leaf, slot,
+ "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
+- ptr, inline_type, end);
++ ptr, btrfs_extent_inline_ref_size(inline_type), end);
+ return -EUCLEAN;
+ }
+
+@@ -1443,7 +1476,26 @@ static int check_extent_item(struct extent_buffer *leaf,
+ */
+ case BTRFS_EXTENT_DATA_REF_KEY:
+ dref = (struct btrfs_extent_data_ref *)(&iref->offset);
++ dref_root = btrfs_extent_data_ref_root(leaf, dref);
++ dref_objectid = btrfs_extent_data_ref_objectid(leaf, dref);
+ dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
++ seq = hash_extent_data_ref(
++ btrfs_extent_data_ref_root(leaf, dref),
++ btrfs_extent_data_ref_objectid(leaf, dref),
++ btrfs_extent_data_ref_offset(leaf, dref));
++ if (unlikely(!is_valid_dref_root(dref_root))) {
++ extent_err(leaf, slot,
++ "invalid data ref root value %llu",
++ dref_root);
++ return -EUCLEAN;
++ }
++ if (unlikely(dref_objectid < BTRFS_FIRST_FREE_OBJECTID ||
++ dref_objectid > BTRFS_LAST_FREE_OBJECTID)) {
++ extent_err(leaf, slot,
++ "invalid data ref objectid value %llu",
++ dref_objectid);
++ return -EUCLEAN;
++ }
+ if (unlikely(!IS_ALIGNED(dref_offset,
+ fs_info->sectorsize))) {
+ extent_err(leaf, slot,
+@@ -1470,6 +1522,24 @@ static int check_extent_item(struct extent_buffer *leaf,
+ inline_type);
+ return -EUCLEAN;
+ }
++ if (inline_type < last_type) {
++ extent_err(leaf, slot,
++ "inline ref out-of-order: has type %u, prev type %u",
++ inline_type, last_type);
++ return -EUCLEAN;
++ }
++ /* Type changed, allow the sequence starts from U64_MAX again. */
++ if (inline_type > last_type)
++ last_seq = U64_MAX;
++ if (seq > last_seq) {
++ extent_err(leaf, slot,
++"inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx",
++ inline_type, inline_offset, seq,
++ last_type, last_seq);
++ return -EUCLEAN;
++ }
++ last_type = inline_type;
++ last_seq = seq;
+ ptr += btrfs_extent_inline_ref_size(inline_type);
+ }
+ /* No padding is allowed */
+@@ -1561,6 +1631,8 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ return -EUCLEAN;
+ }
+ for (; ptr < end; ptr += sizeof(*dref)) {
++ u64 root;
++ u64 objectid;
+ u64 offset;
+
+ /*
+@@ -1568,7 +1640,22 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ * overflow from the leaf due to hash collisions.
+ */
+ dref = (struct btrfs_extent_data_ref *)ptr;
++ root = btrfs_extent_data_ref_root(leaf, dref);
++ objectid = btrfs_extent_data_ref_objectid(leaf, dref);
+ offset = btrfs_extent_data_ref_offset(leaf, dref);
++ if (unlikely(!is_valid_dref_root(root))) {
++ extent_err(leaf, slot,
++ "invalid extent data backref root value %llu",
++ root);
++ return -EUCLEAN;
++ }
++ if (unlikely(objectid < BTRFS_FIRST_FREE_OBJECTID ||
++ objectid > BTRFS_LAST_FREE_OBJECTID)) {
++ extent_err(leaf, slot,
++ "invalid extent data backref objectid value %llu",
++ root);
++ return -EUCLEAN;
++ }
+ if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
+ extent_err(leaf, slot,
+ "invalid extent data backref offset, have %llu expect aligned to %u",
+@@ -1631,6 +1718,72 @@ static int check_inode_ref(struct extent_buffer *leaf,
+ return 0;
+ }
+
++static int check_dev_extent_item(const struct extent_buffer *leaf,
++ const struct btrfs_key *key,
++ int slot,
++ struct btrfs_key *prev_key)
++{
++ struct btrfs_dev_extent *de;
++ const u32 sectorsize = leaf->fs_info->sectorsize;
++
++ de = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
++ /* Basic fixed member checks. */
++ if (unlikely(btrfs_dev_extent_chunk_tree(leaf, de) !=
++ BTRFS_CHUNK_TREE_OBJECTID)) {
++ generic_err(leaf, slot,
++ "invalid dev extent chunk tree id, has %llu expect %llu",
++ btrfs_dev_extent_chunk_tree(leaf, de),
++ BTRFS_CHUNK_TREE_OBJECTID);
++ return -EUCLEAN;
++ }
++ if (unlikely(btrfs_dev_extent_chunk_objectid(leaf, de) !=
++ BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
++ generic_err(leaf, slot,
++ "invalid dev extent chunk objectid, has %llu expect %llu",
++ btrfs_dev_extent_chunk_objectid(leaf, de),
++ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
++ return -EUCLEAN;
++ }
++ /* Alignment check. */
++ if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
++ generic_err(leaf, slot,
++ "invalid dev extent key.offset, has %llu not aligned to %u",
++ key->offset, sectorsize);
++ return -EUCLEAN;
++ }
++ if (unlikely(!IS_ALIGNED(btrfs_dev_extent_chunk_offset(leaf, de),
++ sectorsize))) {
++ generic_err(leaf, slot,
++ "invalid dev extent chunk offset, has %llu not aligned to %u",
++ btrfs_dev_extent_chunk_objectid(leaf, de),
++ sectorsize);
++ return -EUCLEAN;
++ }
++ if (unlikely(!IS_ALIGNED(btrfs_dev_extent_length(leaf, de),
++ sectorsize))) {
++ generic_err(leaf, slot,
++ "invalid dev extent length, has %llu not aligned to %u",
++ btrfs_dev_extent_length(leaf, de), sectorsize);
++ return -EUCLEAN;
++ }
++ /* Overlap check with previous dev extent. */
++ if (slot && prev_key->objectid == key->objectid &&
++ prev_key->type == key->type) {
++ struct btrfs_dev_extent *prev_de;
++ u64 prev_len;
++
++ prev_de = btrfs_item_ptr(leaf, slot - 1, struct btrfs_dev_extent);
++ prev_len = btrfs_dev_extent_length(leaf, prev_de);
++ if (unlikely(prev_key->offset + prev_len > key->offset)) {
++ generic_err(leaf, slot,
++ "dev extent overlap, prev offset %llu len %llu current offset %llu",
++ prev_key->objectid, prev_len, key->offset);
++ return -EUCLEAN;
++ }
++ }
++ return 0;
++}
++
+ /*
+ * Common point to switch the item-specific validation.
+ */
+@@ -1667,6 +1820,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
+ case BTRFS_DEV_ITEM_KEY:
+ ret = check_dev_item(leaf, key, slot);
+ break;
++ case BTRFS_DEV_EXTENT_KEY:
++ ret = check_dev_extent_item(leaf, key, slot, prev_key);
++ break;
+ case BTRFS_INODE_ITEM_KEY:
+ ret = check_inode_item(leaf, key, slot);
+ break;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index cbb17b54213177..cc9a2f8a4ae3b7 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -140,6 +140,25 @@ static void wait_log_commit(struct btrfs_root *root, int transid);
+ * and once to do all the other items.
+ */
+
++static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
++{
++ unsigned int nofs_flag;
++ struct inode *inode;
++
++ /*
++ * We're holding a transaction handle whether we are logging or
++ * replaying a log tree, so we must make sure NOFS semantics apply
++ * because btrfs_alloc_inode() may be triggered and it uses GFP_KERNEL
++ * to allocate an inode, which can recurse back into the filesystem and
++ * attempt a transaction commit, resulting in a deadlock.
++ */
++ nofs_flag = memalloc_nofs_save();
++ inode = btrfs_iget(root->fs_info->sb, objectid, root);
++ memalloc_nofs_restore(nofs_flag);
++
++ return inode;
++}
++
+ /*
+ * start a sub transaction and setup the log tree
+ * this increments the log tree writer count to make the people
+@@ -504,9 +523,9 @@ static int overwrite_item(struct btrfs_trans_handle *trans,
+ found_size = btrfs_item_size(path->nodes[0],
+ path->slots[0]);
+ if (found_size > item_size)
+- btrfs_truncate_item(path, item_size, 1);
++ btrfs_truncate_item(trans, path, item_size, 1);
+ else if (found_size < item_size)
+- btrfs_extend_item(path, item_size - found_size);
++ btrfs_extend_item(trans, path, item_size - found_size);
+ } else if (ret) {
+ return ret;
+ }
+@@ -574,7 +593,7 @@ static int overwrite_item(struct btrfs_trans_handle *trans,
+ }
+ }
+ no_copy:
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+ return 0;
+ }
+@@ -603,7 +622,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
+ {
+ struct inode *inode;
+
+- inode = btrfs_iget(root->fs_info->sb, objectid, root);
++ inode = btrfs_iget_logging(objectid, root);
+ if (IS_ERR(inode))
+ inode = NULL;
+ return inode;
+@@ -1355,7 +1374,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ struct inode *inode = NULL;
+ unsigned long ref_ptr;
+ unsigned long ref_end;
+- struct fscrypt_str name;
++ struct fscrypt_str name = { 0 };
+ int ret;
+ int log_ref_ver = 0;
+ u64 parent_objectid;
+@@ -1827,7 +1846,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+ struct btrfs_dir_item *di,
+ struct btrfs_key *key)
+ {
+- struct fscrypt_str name;
++ struct fscrypt_str name = { 0 };
+ struct btrfs_dir_item *dir_dst_di;
+ struct btrfs_dir_item *index_dst_di;
+ bool dir_dst_matches = false;
+@@ -2107,7 +2126,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+ struct extent_buffer *eb;
+ int slot;
+ struct btrfs_dir_item *di;
+- struct fscrypt_str name;
++ struct fscrypt_str name = { 0 };
+ struct inode *inode = NULL;
+ struct btrfs_key location;
+
+@@ -3530,7 +3549,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
+ last_offset = max(last_offset, curr_end);
+ }
+ btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+ return 0;
+ }
+@@ -4488,7 +4507,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ dst_index++;
+ }
+
+- btrfs_mark_buffer_dirty(dst_path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
+ btrfs_release_path(dst_path);
+ out:
+ kfree(ins_data);
+@@ -4693,7 +4712,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
+ write_extent_buffer(leaf, &fi,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ sizeof(fi));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_release_path(path);
+
+@@ -4800,18 +4819,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
+ path->slots[0]++;
+ continue;
+ }
+- if (!dropped_extents) {
+- /*
+- * Avoid logging extent items logged in past fsync calls
+- * and leading to duplicate keys in the log tree.
+- */
++ /*
++ * Avoid overlapping items in the log tree. The first time we
++ * get here, get rid of everything from a past fsync. After
++ * that, if the current extent starts before the end of the last
++ * extent we copied, truncate the last one. This can happen if
++ * an ordered extent completion modifies the subvolume tree
++ * while btrfs_next_leaf() has the tree unlocked.
++ */
++ if (!dropped_extents || key.offset < truncate_offset) {
+ ret = truncate_inode_items(trans, root->log_root, inode,
+- truncate_offset,
++ min(key.offset, truncate_offset),
+ BTRFS_EXTENT_DATA_KEY);
+ if (ret)
+ goto out;
+ dropped_extents = true;
+ }
++ truncate_offset = btrfs_file_extent_end(path);
+ if (ins_nr == 0)
+ start_slot = slot;
+ ins_nr++;
+@@ -5372,7 +5396,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ struct btrfs_log_ctx *ctx)
+ {
+ struct btrfs_root *root = start_inode->root;
+- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_path *path;
+ LIST_HEAD(dir_list);
+ struct btrfs_dir_list *dir_elem;
+@@ -5433,7 +5456,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ continue;
+
+ btrfs_release_path(path);
+- di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
++ di_inode = btrfs_iget_logging(di_key.objectid, root);
+ if (IS_ERR(di_inode)) {
+ ret = PTR_ERR(di_inode);
+ goto out;
+@@ -5493,7 +5516,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ btrfs_add_delayed_iput(curr_inode);
+ curr_inode = NULL;
+
+- vfs_inode = btrfs_iget(fs_info->sb, ino, root);
++ vfs_inode = btrfs_iget_logging(ino, root);
+ if (IS_ERR(vfs_inode)) {
+ ret = PTR_ERR(vfs_inode);
+ break;
+@@ -5588,7 +5611,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
+ if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
+ return BTRFS_LOG_FORCE_COMMIT;
+
+- inode = btrfs_iget(root->fs_info->sb, ino, root);
++ inode = btrfs_iget_logging(ino, root);
+ /*
+ * If the other inode that had a conflicting dir entry was deleted in
+ * the current transaction then we either:
+@@ -5689,7 +5712,6 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_log_ctx *ctx)
+ {
+- struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret = 0;
+
+ /*
+@@ -5720,7 +5742,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ list_del(&curr->list);
+ kfree(curr);
+
+- inode = btrfs_iget(fs_info->sb, ino, root);
++ inode = btrfs_iget_logging(ino, root);
+ /*
+ * If the other inode that had a conflicting dir entry was
+ * deleted in the current transaction, we need to log its parent
+@@ -5731,7 +5753,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ if (ret != -ENOENT)
+ break;
+
+- inode = btrfs_iget(fs_info->sb, parent, root);
++ inode = btrfs_iget_logging(parent, root);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ break;
+@@ -6253,7 +6275,6 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
+ struct btrfs_log_ctx *ctx)
+ {
+ const bool orig_log_new_dentries = ctx->log_new_dentries;
+- struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_item *item;
+ int ret = 0;
+
+@@ -6279,7 +6300,7 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
+ if (key.type == BTRFS_ROOT_ITEM_KEY)
+ continue;
+
+- di_inode = btrfs_iget(fs_info->sb, key.objectid, inode->root);
++ di_inode = btrfs_iget_logging(key.objectid, inode->root);
+ if (IS_ERR(di_inode)) {
+ ret = PTR_ERR(di_inode);
+ break;
+@@ -6663,7 +6684,6 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_log_ctx *ctx)
+ {
+- struct btrfs_fs_info *fs_info = trans->fs_info;
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+@@ -6728,8 +6748,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ cur_offset = item_size;
+ }
+
+- dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
+- root);
++ dir_inode = btrfs_iget_logging(inode_key.objectid, root);
+ /*
+ * If the parent inode was deleted, return an error to
+ * fallback to a transaction commit. This is to prevent
+@@ -6791,7 +6810,6 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
+
+ while (true) {
+- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_buffer *leaf;
+ int slot;
+ struct btrfs_key search_key;
+@@ -6806,7 +6824,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
+ search_key.objectid = found_key.offset;
+ search_key.type = BTRFS_INODE_ITEM_KEY;
+ search_key.offset = 0;
+- inode = btrfs_iget(fs_info->sb, ino, root);
++ inode = btrfs_iget_logging(ino, root);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
+index 7c7001f42b14c9..5be74f9e47ebf3 100644
+--- a/fs/btrfs/uuid-tree.c
++++ b/fs/btrfs/uuid-tree.c
+@@ -124,7 +124,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ * An item with that type already exists.
+ * Extend the item and store the new subid at the end.
+ */
+- btrfs_extend_item(path, sizeof(subid_le));
++ btrfs_extend_item(trans, path, sizeof(subid_le));
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ offset = btrfs_item_ptr_offset(eb, slot);
+@@ -139,7 +139,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ ret = 0;
+ subid_le = cpu_to_le64(subid_cpu);
+ write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+@@ -221,7 +221,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ move_src = offset + sizeof(subid);
+ move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
+ memmove_extent_buffer(eb, move_dst, move_src, move_len);
+- btrfs_truncate_item(path, item_size - sizeof(subid), 1);
++ btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
+
+ out:
+ btrfs_free_path(path);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index b9ef6f54635ca5..d2285c9726e7b1 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1224,23 +1224,30 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
+ struct btrfs_device *device;
+ struct btrfs_device *latest_dev = NULL;
+ struct btrfs_device *tmp_device;
++ int ret = 0;
+
+ list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
+ dev_list) {
+- int ret;
++ int ret2;
+
+- ret = btrfs_open_one_device(fs_devices, device, flags, holder);
+- if (ret == 0 &&
++ ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);
++ if (ret2 == 0 &&
+ (!latest_dev || device->generation > latest_dev->generation)) {
+ latest_dev = device;
+- } else if (ret == -ENODATA) {
++ } else if (ret2 == -ENODATA) {
+ fs_devices->num_devices--;
+ list_del(&device->dev_list);
+ btrfs_free_device(device);
+ }
++ if (ret == 0 && ret2 != 0)
++ ret = ret2;
+ }
+- if (fs_devices->open_devices == 0)
++
++ if (fs_devices->open_devices == 0) {
++ if (ret)
++ return ret;
+ return -EINVAL;
++ }
+
+ fs_devices->opened = 1;
+ fs_devices->latest_dev = latest_dev;
+@@ -1432,7 +1439,7 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
+
+ if (in_range(physical_start, *start, len) ||
+ in_range(*start, physical_start,
+- physical_end - physical_start)) {
++ physical_end + 1 - physical_start)) {
+ *start = physical_end + 1;
+ return true;
+ }
+@@ -1894,7 +1901,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
+ ptr = btrfs_device_fsid(dev_item);
+ write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
+ ptr, BTRFS_FSID_SIZE);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -2597,7 +2604,7 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
+ if (device->fs_devices->seeding) {
+ btrfs_set_device_generation(leaf, dev_item,
+ device->generation);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+
+ path->slots[0]++;
+@@ -2895,7 +2902,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
+ btrfs_device_get_disk_total_bytes(device));
+ btrfs_set_device_bytes_used(leaf, dev_item,
+ btrfs_device_get_bytes_used(device));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ out:
+ btrfs_free_path(path);
+@@ -3045,15 +3052,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ read_unlock(&em_tree->lock);
+
+ if (!em) {
+- btrfs_crit(fs_info, "unable to find logical %llu length %llu",
++ btrfs_crit(fs_info,
++ "unable to find chunk map for logical %llu length %llu",
+ logical, length);
+ return ERR_PTR(-EINVAL);
+ }
+
+- if (em->start > logical || em->start + em->len < logical) {
++ if (em->start > logical || em->start + em->len <= logical) {
+ btrfs_crit(fs_info,
+- "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
+- logical, length, em->start, em->start + em->len);
++ "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
++ logical, logical + length, em->start, em->start + em->len);
+ free_extent_map(em);
+ return ERR_PTR(-EINVAL);
+ }
+@@ -3351,7 +3359,18 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
+ goto error;
+ }
+- BUG_ON(ret == 0); /* Corruption */
++ if (ret == 0) {
++ /*
++ * On the first search we would find chunk tree with
++ * offset -1, which is not possible. On subsequent
++ * loops this would find an existing item on an invalid
++ * offset (one less than the previous one, wrong
++ * alignment and size).
++ */
++ ret = -EUCLEAN;
++ mutex_unlock(&fs_info->reclaim_bgs_lock);
++ goto error;
++ }
+
+ ret = btrfs_previous_item(chunk_root, path, key.objectid,
+ key.type);
+@@ -3483,7 +3502,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
+
+ btrfs_set_balance_flags(leaf, item, bctl->flags);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ err = btrfs_commit_transaction(trans);
+@@ -7534,7 +7553,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
+ for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+ btrfs_set_dev_stats_value(eb, ptr, i,
+ btrfs_dev_stat_read(device, i));
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index 2128a032c3b74d..5203095318b027 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -15,6 +15,12 @@
+
+ #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
+
++/*
++ * Arbitratry maximum size of one discard request to limit potentially long time
++ * spent in blkdev_issue_discard().
++ */
++#define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G)
++
+ extern struct mutex uuid_mutex;
+
+ #define BTRFS_STRIPE_LEN SZ_64K
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 96828a13dd43d9..b906f809650ef1 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -188,15 +188,15 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
+ if (old_data_len + name_len + sizeof(*di) == item_size) {
+ /* No other xattrs packed in the same leaf item. */
+ if (size > old_data_len)
+- btrfs_extend_item(path, size - old_data_len);
++ btrfs_extend_item(trans, path, size - old_data_len);
+ else if (size < old_data_len)
+- btrfs_truncate_item(path, data_size, 1);
++ btrfs_truncate_item(trans, path, data_size, 1);
+ } else {
+ /* There are other xattrs packed in the same item. */
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ if (ret)
+ goto out;
+- btrfs_extend_item(path, data_size);
++ btrfs_extend_item(trans, path, data_size);
+ }
+
+ ptr = btrfs_item_ptr(leaf, slot, char);
+@@ -205,7 +205,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
+ btrfs_set_dir_data_len(leaf, di, size);
+ data_ptr = ((unsigned long)(di + 1)) + name_len;
+ write_extent_buffer(leaf, value, data_ptr, size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ /*
+ * Insert, and we had space for the xattr, so path->slots[0] is
+diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
+index 6c231a116a29cb..9f60d0bbd53069 100644
+--- a/fs/btrfs/zlib.c
++++ b/fs/btrfs/zlib.c
+@@ -354,18 +354,13 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+ }
+
+ int zlib_decompress(struct list_head *ws, const u8 *data_in,
+- struct page *dest_page, unsigned long start_byte, size_t srclen,
++ struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
+ size_t destlen)
+ {
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ int ret = 0;
+ int wbits = MAX_WBITS;
+- unsigned long bytes_left;
+- unsigned long total_out = 0;
+- unsigned long pg_offset = 0;
+-
+- destlen = min_t(unsigned long, destlen, PAGE_SIZE);
+- bytes_left = destlen;
++ unsigned long to_copy;
+
+ workspace->strm.next_in = data_in;
+ workspace->strm.avail_in = srclen;
+@@ -390,60 +385,30 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
+ return -EIO;
+ }
+
+- while (bytes_left > 0) {
+- unsigned long buf_start;
+- unsigned long buf_offset;
+- unsigned long bytes;
+-
+- ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
+- if (ret != Z_OK && ret != Z_STREAM_END)
+- break;
+-
+- buf_start = total_out;
+- total_out = workspace->strm.total_out;
+-
+- if (total_out == buf_start) {
+- ret = -EIO;
+- break;
+- }
+-
+- if (total_out <= start_byte)
+- goto next;
+-
+- if (total_out > start_byte && buf_start < start_byte)
+- buf_offset = start_byte - buf_start;
+- else
+- buf_offset = 0;
+-
+- bytes = min(PAGE_SIZE - pg_offset,
+- PAGE_SIZE - (buf_offset % PAGE_SIZE));
+- bytes = min(bytes, bytes_left);
++ /*
++ * Everything (in/out buf) should be at most one sector, there should
++ * be no need to switch any input/output buffer.
++ */
++ ret = zlib_inflate(&workspace->strm, Z_FINISH);
++ to_copy = min(workspace->strm.total_out, destlen);
++ if (ret != Z_STREAM_END)
++ goto out;
+
+- memcpy_to_page(dest_page, pg_offset,
+- workspace->buf + buf_offset, bytes);
++ memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy);
+
+- pg_offset += bytes;
+- bytes_left -= bytes;
+-next:
+- workspace->strm.next_out = workspace->buf;
+- workspace->strm.avail_out = workspace->buf_size;
+- }
+-
+- if (ret != Z_STREAM_END && bytes_left != 0)
++out:
++ if (unlikely(to_copy != destlen)) {
++ pr_warn_ratelimited("BTRFS: infalte failed, decompressed=%lu expected=%zu\n",
++ to_copy, destlen);
+ ret = -EIO;
+- else
++ } else {
+ ret = 0;
++ }
+
+ zlib_inflateEnd(&workspace->strm);
+
+- /*
+- * this should only happen if zlib returned fewer bytes than we
+- * expected. btrfs_get_block is responsible for zeroing from the
+- * end of the inline extent (destlen) to the end of the page
+- */
+- if (pg_offset < destlen) {
+- memzero_page(dest_page, pg_offset, destlen - pg_offset);
+- }
++ if (unlikely(to_copy < destlen))
++ memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
+ return ret;
+ }
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 09bc325d075dca..c4463c3f2068dd 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1282,21 +1282,175 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
+ return ret;
+ }
+
++struct zone_info {
++ u64 physical;
++ u64 capacity;
++ u64 alloc_offset;
++};
++
++static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
++ struct zone_info *info, unsigned long *active,
++ struct map_lookup *map)
++{
++ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
++ struct btrfs_device *device;
++ int dev_replace_is_ongoing = 0;
++ unsigned int nofs_flag;
++ struct blk_zone zone;
++ int ret;
++
++ info->physical = map->stripes[zone_idx].physical;
++
++ down_read(&dev_replace->rwsem);
++ device = map->stripes[zone_idx].dev;
++
++ if (!device->bdev) {
++ up_read(&dev_replace->rwsem);
++ info->alloc_offset = WP_MISSING_DEV;
++ return 0;
++ }
++
++ /* Consider a zone as active if we can allow any number of active zones. */
++ if (!device->zone_info->max_active_zones)
++ __set_bit(zone_idx, active);
++
++ if (!btrfs_dev_is_sequential(device, info->physical)) {
++ up_read(&dev_replace->rwsem);
++ info->alloc_offset = WP_CONVENTIONAL;
++ return 0;
++ }
++
++ /* This zone will be used for allocation, so mark this zone non-empty. */
++ btrfs_dev_clear_zone_empty(device, info->physical);
++
++ dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
++ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
++ btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
++
++ /*
++ * The group is mapped to a sequential zone. Get the zone write pointer
++ * to determine the allocation offset within the zone.
++ */
++ WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
++ nofs_flag = memalloc_nofs_save();
++ ret = btrfs_get_dev_zone(device, info->physical, &zone);
++ memalloc_nofs_restore(nofs_flag);
++ if (ret) {
++ up_read(&dev_replace->rwsem);
++ if (ret != -EIO && ret != -EOPNOTSUPP)
++ return ret;
++ info->alloc_offset = WP_MISSING_DEV;
++ return 0;
++ }
++
++ if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
++ btrfs_err_in_rcu(fs_info,
++ "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
++ zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
++ device->devid);
++ up_read(&dev_replace->rwsem);
++ return -EIO;
++ }
++
++ info->capacity = (zone.capacity << SECTOR_SHIFT);
++
++ switch (zone.cond) {
++ case BLK_ZONE_COND_OFFLINE:
++ case BLK_ZONE_COND_READONLY:
++ btrfs_err_in_rcu(fs_info,
++ "zoned: offline/readonly zone %llu on device %s (devid %llu)",
++ (info->physical >> device->zone_info->zone_size_shift),
++ rcu_str_deref(device->name), device->devid);
++ info->alloc_offset = WP_MISSING_DEV;
++ break;
++ case BLK_ZONE_COND_EMPTY:
++ info->alloc_offset = 0;
++ break;
++ case BLK_ZONE_COND_FULL:
++ info->alloc_offset = info->capacity;
++ break;
++ default:
++ /* Partially used zone. */
++ info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
++ __set_bit(zone_idx, active);
++ break;
++ }
++
++ up_read(&dev_replace->rwsem);
++
++ return 0;
++}
++
++static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
++ struct zone_info *info,
++ unsigned long *active)
++{
++ if (info->alloc_offset == WP_MISSING_DEV) {
++ btrfs_err(bg->fs_info,
++ "zoned: cannot recover write pointer for zone %llu",
++ info->physical);
++ return -EIO;
++ }
++
++ bg->alloc_offset = info->alloc_offset;
++ bg->zone_capacity = info->capacity;
++ if (test_bit(0, active))
++ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
++ return 0;
++}
++
++static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
++ struct map_lookup *map,
++ struct zone_info *zone_info,
++ unsigned long *active)
++{
++ if (map->type & BTRFS_BLOCK_GROUP_DATA) {
++ btrfs_err(bg->fs_info,
++ "zoned: profile DUP not yet supported on data bg");
++ return -EINVAL;
++ }
++
++ if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
++ btrfs_err(bg->fs_info,
++ "zoned: cannot recover write pointer for zone %llu",
++ zone_info[0].physical);
++ return -EIO;
++ }
++ if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
++ btrfs_err(bg->fs_info,
++ "zoned: cannot recover write pointer for zone %llu",
++ zone_info[1].physical);
++ return -EIO;
++ }
++ if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
++ btrfs_err(bg->fs_info,
++ "zoned: write pointer offset mismatch of zones in DUP profile");
++ return -EIO;
++ }
++
++ if (test_bit(0, active) != test_bit(1, active)) {
++ if (!btrfs_zone_activate(bg))
++ return -EIO;
++ } else if (test_bit(0, active)) {
++ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
++ }
++
++ bg->alloc_offset = zone_info[0].alloc_offset;
++ bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity);
++ return 0;
++}
++
+ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ {
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree;
+ struct extent_map *em;
+ struct map_lookup *map;
+- struct btrfs_device *device;
+ u64 logical = cache->start;
+ u64 length = cache->length;
++ struct zone_info *zone_info = NULL;
+ int ret;
+ int i;
+- unsigned int nofs_flag;
+- u64 *alloc_offsets = NULL;
+- u64 *caps = NULL;
+- u64 *physical = NULL;
+ unsigned long *active = NULL;
+ u64 last_alloc = 0;
+ u32 num_sequential = 0, num_conventional = 0;
+@@ -1328,20 +1482,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ goto out;
+ }
+
+- alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
+- if (!alloc_offsets) {
+- ret = -ENOMEM;
+- goto out;
+- }
+-
+- caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
+- if (!caps) {
+- ret = -ENOMEM;
+- goto out;
+- }
+-
+- physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
+- if (!physical) {
++ zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
++ if (!zone_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+@@ -1353,98 +1495,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ }
+
+ for (i = 0; i < map->num_stripes; i++) {
+- bool is_sequential;
+- struct blk_zone zone;
+- struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+- int dev_replace_is_ongoing = 0;
+-
+- device = map->stripes[i].dev;
+- physical[i] = map->stripes[i].physical;
+-
+- if (device->bdev == NULL) {
+- alloc_offsets[i] = WP_MISSING_DEV;
+- continue;
+- }
+-
+- is_sequential = btrfs_dev_is_sequential(device, physical[i]);
+- if (is_sequential)
+- num_sequential++;
+- else
+- num_conventional++;
+-
+- /*
+- * Consider a zone as active if we can allow any number of
+- * active zones.
+- */
+- if (!device->zone_info->max_active_zones)
+- __set_bit(i, active);
+-
+- if (!is_sequential) {
+- alloc_offsets[i] = WP_CONVENTIONAL;
+- continue;
+- }
+-
+- /*
+- * This zone will be used for allocation, so mark this zone
+- * non-empty.
+- */
+- btrfs_dev_clear_zone_empty(device, physical[i]);
+-
+- down_read(&dev_replace->rwsem);
+- dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+- if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
+- btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
+- up_read(&dev_replace->rwsem);
+-
+- /*
+- * The group is mapped to a sequential zone. Get the zone write
+- * pointer to determine the allocation offset within the zone.
+- */
+- WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
+- nofs_flag = memalloc_nofs_save();
+- ret = btrfs_get_dev_zone(device, physical[i], &zone);
+- memalloc_nofs_restore(nofs_flag);
+- if (ret == -EIO || ret == -EOPNOTSUPP) {
+- ret = 0;
+- alloc_offsets[i] = WP_MISSING_DEV;
+- continue;
+- } else if (ret) {
+- goto out;
+- }
+-
+- if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
+- btrfs_err_in_rcu(fs_info,
+- "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
+- zone.start << SECTOR_SHIFT,
+- rcu_str_deref(device->name), device->devid);
+- ret = -EIO;
++ ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
++ if (ret)
+ goto out;
+- }
+
+- caps[i] = (zone.capacity << SECTOR_SHIFT);
+-
+- switch (zone.cond) {
+- case BLK_ZONE_COND_OFFLINE:
+- case BLK_ZONE_COND_READONLY:
+- btrfs_err(fs_info,
+- "zoned: offline/readonly zone %llu on device %s (devid %llu)",
+- physical[i] >> device->zone_info->zone_size_shift,
+- rcu_str_deref(device->name), device->devid);
+- alloc_offsets[i] = WP_MISSING_DEV;
+- break;
+- case BLK_ZONE_COND_EMPTY:
+- alloc_offsets[i] = 0;
+- break;
+- case BLK_ZONE_COND_FULL:
+- alloc_offsets[i] = caps[i];
+- break;
+- default:
+- /* Partially used zone */
+- alloc_offsets[i] =
+- ((zone.wp - zone.start) << SECTOR_SHIFT);
+- __set_bit(i, active);
+- break;
+- }
++ if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ num_conventional++;
++ else
++ num_sequential++;
+ }
+
+ if (num_sequential > 0)
+@@ -1468,56 +1526,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+
+ switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ case 0: /* single */
+- if (alloc_offsets[0] == WP_MISSING_DEV) {
+- btrfs_err(fs_info,
+- "zoned: cannot recover write pointer for zone %llu",
+- physical[0]);
+- ret = -EIO;
+- goto out;
+- }
+- cache->alloc_offset = alloc_offsets[0];
+- cache->zone_capacity = caps[0];
+- if (test_bit(0, active))
+- set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
++ ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
+ break;
+ case BTRFS_BLOCK_GROUP_DUP:
+- if (map->type & BTRFS_BLOCK_GROUP_DATA) {
+- btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg");
+- ret = -EINVAL;
+- goto out;
+- }
+- if (alloc_offsets[0] == WP_MISSING_DEV) {
+- btrfs_err(fs_info,
+- "zoned: cannot recover write pointer for zone %llu",
+- physical[0]);
+- ret = -EIO;
+- goto out;
+- }
+- if (alloc_offsets[1] == WP_MISSING_DEV) {
+- btrfs_err(fs_info,
+- "zoned: cannot recover write pointer for zone %llu",
+- physical[1]);
+- ret = -EIO;
+- goto out;
+- }
+- if (alloc_offsets[0] != alloc_offsets[1]) {
+- btrfs_err(fs_info,
+- "zoned: write pointer offset mismatch of zones in DUP profile");
+- ret = -EIO;
+- goto out;
+- }
+- if (test_bit(0, active) != test_bit(1, active)) {
+- if (!btrfs_zone_activate(cache)) {
+- ret = -EIO;
+- goto out;
+- }
+- } else {
+- if (test_bit(0, active))
+- set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+- &cache->runtime_flags);
+- }
+- cache->alloc_offset = alloc_offsets[0];
+- cache->zone_capacity = min(caps[0], caps[1]);
++ ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
+ break;
+ case BTRFS_BLOCK_GROUP_RAID1:
+ case BTRFS_BLOCK_GROUP_RAID0:
+@@ -1570,9 +1582,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ cache->physical_map = NULL;
+ }
+ bitmap_free(active);
+- kfree(physical);
+- kfree(caps);
+- kfree(alloc_offsets);
++ kfree(zone_info);
+ free_extent_map(em);
+
+ return ret;
+@@ -1975,6 +1985,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ map = block_group->physical_map;
+
++ spin_lock(&fs_info->zone_active_bgs_lock);
+ spin_lock(&block_group->lock);
+ if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
+ ret = true;
+@@ -1987,7 +1998,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ goto out_unlock;
+ }
+
+- spin_lock(&fs_info->zone_active_bgs_lock);
+ for (i = 0; i < map->num_stripes; i++) {
+ struct btrfs_zoned_device_info *zinfo;
+ int reserved = 0;
+@@ -2007,20 +2017,17 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ */
+ if (atomic_read(&zinfo->active_zones_left) <= reserved) {
+ ret = false;
+- spin_unlock(&fs_info->zone_active_bgs_lock);
+ goto out_unlock;
+ }
+
+ if (!btrfs_dev_set_active_zone(device, physical)) {
+ /* Cannot activate the zone */
+ ret = false;
+- spin_unlock(&fs_info->zone_active_bgs_lock);
+ goto out_unlock;
+ }
+ if (!is_data)
+ zinfo->reserved_active_zones--;
+ }
+- spin_unlock(&fs_info->zone_active_bgs_lock);
+
+ /* Successfully activated all the zones */
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
+@@ -2028,8 +2035,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ /* For the active block group list */
+ btrfs_get_block_group(block_group);
+-
+- spin_lock(&fs_info->zone_active_bgs_lock);
+ list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+
+@@ -2037,6 +2042,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ out_unlock:
+ spin_unlock(&block_group->lock);
++ spin_unlock(&fs_info->zone_active_bgs_lock);
+ return ret;
+ }
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 12e9a71c693d74..ecd8b47507ff80 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2179,6 +2179,8 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
+ struct buffer_head *bh, *head;
+
+ bh = head = folio_buffers(folio);
++ if (!bh)
++ return;
+ blocksize = bh->b_size;
+
+ block_start = 0;
+diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
+index 7077f72e6f4747..9fb06dc165202c 100644
+--- a/fs/cachefiles/cache.c
++++ b/fs/cachefiles/cache.c
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include <linux/statfs.h>
+ #include <linux/namei.h>
++#include <trace/events/fscache.h>
+ #include "internal.h"
+
+ /*
+@@ -168,6 +169,8 @@ int cachefiles_add_cache(struct cachefiles_cache *cache)
+ dput(root);
+ error_open_root:
+ cachefiles_end_secure(cache, saved_cred);
++ put_cred(cache->cache_cred);
++ cache->cache_cred = NULL;
+ error_getsec:
+ fscache_relinquish_cache(cache_cookie);
+ cache->cache = NULL;
+@@ -310,19 +313,59 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
+ }
+
+ /*
+- * Withdraw volumes.
++ * Withdraw fscache volumes.
++ */
++static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
++{
++ struct list_head *cur;
++ struct cachefiles_volume *volume;
++ struct fscache_volume *vcookie;
++
++ _enter("");
++retry:
++ spin_lock(&cache->object_list_lock);
++ list_for_each(cur, &cache->volumes) {
++ volume = list_entry(cur, struct cachefiles_volume, cache_link);
++
++ if (atomic_read(&volume->vcookie->n_accesses) == 0)
++ continue;
++
++ vcookie = fscache_try_get_volume(volume->vcookie,
++ fscache_volume_get_withdraw);
++ if (vcookie) {
++ spin_unlock(&cache->object_list_lock);
++ fscache_withdraw_volume(vcookie);
++ fscache_put_volume(vcookie, fscache_volume_put_withdraw);
++ goto retry;
++ }
++ }
++ spin_unlock(&cache->object_list_lock);
++
++ _leave("");
++}
++
++/*
++ * Withdraw cachefiles volumes.
+ */
+ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
+ {
+ _enter("");
+
+ for (;;) {
++ struct fscache_volume *vcookie = NULL;
+ struct cachefiles_volume *volume = NULL;
+
+ spin_lock(&cache->object_list_lock);
+ if (!list_empty(&cache->volumes)) {
+ volume = list_first_entry(&cache->volumes,
+ struct cachefiles_volume, cache_link);
++ vcookie = fscache_try_get_volume(volume->vcookie,
++ fscache_volume_get_withdraw);
++ if (!vcookie) {
++ spin_unlock(&cache->object_list_lock);
++ cpu_relax();
++ continue;
++ }
+ list_del_init(&volume->cache_link);
+ }
+ spin_unlock(&cache->object_list_lock);
+@@ -330,6 +373,7 @@ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
+ break;
+
+ cachefiles_withdraw_volume(volume);
++ fscache_put_volume(vcookie, fscache_volume_put_withdraw);
+ }
+
+ _leave("");
+@@ -369,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
+ pr_info("File cache on %s unregistering\n", fscache->name);
+
+ fscache_withdraw_cache(fscache);
++ cachefiles_withdraw_fscache_volumes(cache);
+
+ /* we now have to destroy all the active objects pertaining to this
+ * cache - which we do by passing them off to thread pool to be
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index aa4efcabb5e37b..89b11336a83697 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -77,6 +77,7 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
+ { "tag", cachefiles_daemon_tag },
+ #ifdef CONFIG_CACHEFILES_ONDEMAND
+ { "copen", cachefiles_ondemand_copen },
++ { "restore", cachefiles_ondemand_restore },
+ #endif
+ { "", NULL }
+ };
+@@ -132,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
+ return 0;
+ }
+
+-static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
++void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+ {
+ struct xarray *xa = &cache->reqs;
+ struct cachefiles_req *req;
+@@ -158,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+ xa_for_each(xa, index, req) {
+ req->error = -EIO;
+ complete(&req->done);
++ __xa_erase(xa, index);
+ }
+ xa_unlock(xa);
+
+@@ -355,14 +357,24 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
+ struct poll_table_struct *poll)
+ {
+ struct cachefiles_cache *cache = file->private_data;
++ XA_STATE(xas, &cache->reqs, 0);
++ struct cachefiles_req *req;
+ __poll_t mask;
+
+ poll_wait(file, &cache->daemon_pollwq, poll);
+ mask = 0;
+
+ if (cachefiles_in_ondemand_mode(cache)) {
+- if (!xa_empty(&cache->reqs))
+- mask |= EPOLLIN;
++ if (!xa_empty(&cache->reqs)) {
++ xas_lock(&xas);
++ xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
++ if (!cachefiles_ondemand_is_reopening_read(req)) {
++ mask |= EPOLLIN;
++ break;
++ }
++ }
++ xas_unlock(&xas);
++ }
+ } else {
+ if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
+ mask |= EPOLLIN;
+@@ -805,6 +817,7 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
+ cachefiles_put_directory(cache->graveyard);
+ cachefiles_put_directory(cache->store);
+ mntput(cache->mnt);
++ put_cred(cache->cache_cred);
+
+ kfree(cache->rootdirname);
+ kfree(cache->secctx);
+diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
+index 40052bdb33655b..35ba2117a6f652 100644
+--- a/fs/cachefiles/interface.c
++++ b/fs/cachefiles/interface.c
+@@ -31,6 +31,11 @@ struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie)
+ if (!object)
+ return NULL;
+
++ if (cachefiles_ondemand_init_obj_info(object, volume)) {
++ kmem_cache_free(cachefiles_object_jar, object);
++ return NULL;
++ }
++
+ refcount_set(&object->ref, 1);
+
+ spin_lock_init(&object->lock);
+@@ -88,7 +93,7 @@ void cachefiles_put_object(struct cachefiles_object *object,
+ ASSERTCMP(object->file, ==, NULL);
+
+ kfree(object->d_name);
+-
++ cachefiles_ondemand_deinit_obj_info(object);
+ cache = object->volume->cache->cache;
+ fscache_put_cookie(object->cookie, fscache_cookie_put_object);
+ object->cookie = NULL;
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index 2ad58c46520848..111ad6ecd4baf3 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -44,6 +44,21 @@ struct cachefiles_volume {
+ struct dentry *fanout[256]; /* Fanout subdirs */
+ };
+
++enum cachefiles_object_state {
++ CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
++ CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
++ CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
++ CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */
++};
++
++struct cachefiles_ondemand_info {
++ struct work_struct ondemand_work;
++ int ondemand_id;
++ enum cachefiles_object_state state;
++ struct cachefiles_object *object;
++ spinlock_t lock;
++};
++
+ /*
+ * Backing file state.
+ */
+@@ -61,7 +76,7 @@ struct cachefiles_object {
+ unsigned long flags;
+ #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */
+ #ifdef CONFIG_CACHEFILES_ONDEMAND
+- int ondemand_id;
++ struct cachefiles_ondemand_info *ondemand;
+ #endif
+ };
+
+@@ -114,6 +129,7 @@ struct cachefiles_cache {
+ unsigned long req_id_next;
+ struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
+ u32 ondemand_id_next;
++ u32 msg_id_next;
+ };
+
+ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
+@@ -125,6 +141,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
+ struct cachefiles_req {
+ struct cachefiles_object *object;
+ struct completion done;
++ refcount_t ref;
+ int error;
+ struct cachefiles_msg msg;
+ };
+@@ -173,6 +190,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache,
+ * daemon.c
+ */
+ extern const struct file_operations cachefiles_daemon_fops;
++extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
+ extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
+ extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
+
+@@ -290,12 +308,43 @@ extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache,
+ char *args);
+
++extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache,
++ char *args);
++
+ extern int cachefiles_ondemand_init_object(struct cachefiles_object *object);
+ extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object);
+
+ extern int cachefiles_ondemand_read(struct cachefiles_object *object,
+ loff_t pos, size_t len);
+
++extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj,
++ struct cachefiles_volume *volume);
++extern void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj);
++
++#define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \
++static inline bool \
++cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \
++{ \
++ return object->ondemand->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \
++} \
++ \
++static inline void \
++cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
++{ \
++ object->ondemand->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \
++}
++
++CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
++CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
++CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
++CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING);
++
++static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
++{
++ return cachefiles_ondemand_object_is_reopening(req->object) &&
++ req->msg.opcode == CACHEFILES_OP_READ;
++}
++
+ #else
+ static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ char __user *_buffer, size_t buflen)
+@@ -317,6 +366,20 @@ static inline int cachefiles_ondemand_read(struct cachefiles_object *object,
+ {
+ return -EOPNOTSUPP;
+ }
++
++static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj,
++ struct cachefiles_volume *volume)
++{
++ return 0;
++}
++static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj)
++{
++}
++
++static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
++{
++ return false;
++}
+ #endif
+
+ /*
+@@ -367,6 +430,8 @@ do { \
+ pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
+ fscache_io_error((___cache)->cache); \
+ set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
++ if (cachefiles_in_ondemand_mode(___cache)) \
++ cachefiles_flush_reqs(___cache); \
+ } while (0)
+
+ #define cachefiles_io_error_obj(object, FMT, ...) \
+diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
+index 7bf7a5fcc045f8..594e41582ae9ca 100644
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -594,14 +594,12 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
+ * write and readdir but not lookup or open).
+ */
+ touch_atime(&file->f_path);
+- dput(dentry);
+ return true;
+
+ check_failed:
+ fscache_cookie_lookup_negative(object->cookie);
+ cachefiles_unmark_inode_in_use(object, file);
+ fput(file);
+- dput(dentry);
+ if (ret == -ESTALE)
+ return cachefiles_create_file(object);
+ return false;
+@@ -610,7 +608,6 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
+ fput(file);
+ error:
+ cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
+- dput(dentry);
+ return false;
+ }
+
+@@ -653,7 +650,9 @@ bool cachefiles_look_up_object(struct cachefiles_object *object)
+ goto new_file;
+ }
+
+- if (!cachefiles_open_file(object, dentry))
++ ret = cachefiles_open_file(object, dentry);
++ dput(dentry);
++ if (!ret)
+ return false;
+
+ _leave(" = t [%lu]", file_inode(object->file)->i_ino);
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 0254ed39f68ceb..2185e2908dba89 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -4,26 +4,45 @@
+ #include <linux/uio.h>
+ #include "internal.h"
+
++struct ondemand_anon_file {
++ struct file *file;
++ int fd;
++};
++
++static inline void cachefiles_req_put(struct cachefiles_req *req)
++{
++ if (refcount_dec_and_test(&req->ref))
++ kfree(req);
++}
++
+ static int cachefiles_ondemand_fd_release(struct inode *inode,
+ struct file *file)
+ {
+ struct cachefiles_object *object = file->private_data;
+- struct cachefiles_cache *cache = object->volume->cache;
+- int object_id = object->ondemand_id;
++ struct cachefiles_cache *cache;
++ struct cachefiles_ondemand_info *info;
++ int object_id;
+ struct cachefiles_req *req;
+- XA_STATE(xas, &cache->reqs, 0);
++ XA_STATE(xas, NULL, 0);
+
+- xa_lock(&cache->reqs);
+- object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
++ if (!object)
++ return 0;
+
+- /*
+- * Flush all pending READ requests since their completion depends on
+- * anon_fd.
+- */
+- xas_for_each(&xas, req, ULONG_MAX) {
++ info = object->ondemand;
++ cache = object->volume->cache;
++ xas.xa = &cache->reqs;
++
++ xa_lock(&cache->reqs);
++ spin_lock(&info->lock);
++ object_id = info->ondemand_id;
++ info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
++ cachefiles_ondemand_set_object_close(object);
++ spin_unlock(&info->lock);
++
++ /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
++ xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
+ if (req->msg.object_id == object_id &&
+- req->msg.opcode == CACHEFILES_OP_READ) {
+- req->error = -EIO;
++ req->msg.opcode == CACHEFILES_OP_CLOSE) {
+ complete(&req->done);
+ xas_store(&xas, NULL);
+ }
+@@ -78,12 +97,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
+ }
+
+ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
+- unsigned long arg)
++ unsigned long id)
+ {
+ struct cachefiles_object *object = filp->private_data;
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct cachefiles_req *req;
+- unsigned long id;
++ XA_STATE(xas, &cache->reqs, id);
+
+ if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
+ return -EINVAL;
+@@ -91,10 +110,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
+ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ return -EOPNOTSUPP;
+
+- id = arg;
+- req = xa_erase(&cache->reqs, id);
+- if (!req)
++ xa_lock(&cache->reqs);
++ req = xas_load(&xas);
++ if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
++ req->object != object) {
++ xa_unlock(&cache->reqs);
+ return -EINVAL;
++ }
++ xas_store(&xas, NULL);
++ xa_unlock(&cache->reqs);
+
+ trace_cachefiles_ondemand_cread(object, id);
+ complete(&req->done);
+@@ -118,10 +142,12 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ {
+ struct cachefiles_req *req;
+ struct fscache_cookie *cookie;
++ struct cachefiles_ondemand_info *info;
+ char *pid, *psize;
+ unsigned long id;
+ long size;
+ int ret;
++ XA_STATE(xas, &cache->reqs, 0);
+
+ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ return -EOPNOTSUPP;
+@@ -145,10 +171,18 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ if (ret)
+ return ret;
+
+- req = xa_erase(&cache->reqs, id);
+- if (!req)
++ xa_lock(&cache->reqs);
++ xas.xa_index = id;
++ req = xas_load(&xas);
++ if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
++ !req->object->ondemand->ondemand_id) {
++ xa_unlock(&cache->reqs);
+ return -EINVAL;
++ }
++ xas_store(&xas, NULL);
++ xa_unlock(&cache->reqs);
+
++ info = req->object->ondemand;
+ /* fail OPEN request if copen format is invalid */
+ ret = kstrtol(psize, 0, &size);
+ if (ret) {
+@@ -168,6 +202,32 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ goto out;
+ }
+
++ spin_lock(&info->lock);
++ /*
++ * The anonymous fd was closed before copen ? Fail the request.
++ *
++ * t1 | t2
++ * ---------------------------------------------------------
++ * cachefiles_ondemand_copen
++ * req = xa_erase(&cache->reqs, id)
++ * // Anon fd is maliciously closed.
++ * cachefiles_ondemand_fd_release
++ * xa_lock(&cache->reqs)
++ * cachefiles_ondemand_set_object_close(object)
++ * xa_unlock(&cache->reqs)
++ * cachefiles_ondemand_set_object_open
++ * // No one will ever close it again.
++ * cachefiles_ondemand_daemon_read
++ * cachefiles_ondemand_select_req
++ *
++ * Get a read req but its fd is already closed. The daemon can't
++ * issue a cread ioctl with an closed fd, then hung.
++ */
++ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
++ spin_unlock(&info->lock);
++ req->error = -EBADFD;
++ goto out;
++ }
+ cookie = req->object->cookie;
+ cookie->object_size = size;
+ if (size)
+@@ -176,19 +236,51 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ trace_cachefiles_ondemand_copen(req->object, id, size);
+
++ cachefiles_ondemand_set_object_open(req->object);
++ spin_unlock(&info->lock);
++ wake_up_all(&cache->daemon_pollwq);
++
+ out:
++ spin_lock(&info->lock);
++ /* Need to set object close to avoid reopen status continuing */
++ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
++ cachefiles_ondemand_set_object_close(req->object);
++ spin_unlock(&info->lock);
+ complete(&req->done);
+ return ret;
+ }
+
+-static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
++int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
++{
++ struct cachefiles_req *req;
++
++ XA_STATE(xas, &cache->reqs, 0);
++
++ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
++ return -EOPNOTSUPP;
++
++ /*
++ * Reset the requests to CACHEFILES_REQ_NEW state, so that the
++ * requests have been processed halfway before the crash of the
++ * user daemon could be reprocessed after the recovery.
++ */
++ xas_lock(&xas);
++ xas_for_each(&xas, req, ULONG_MAX)
++ xas_set_mark(&xas, CACHEFILES_REQ_NEW);
++ xas_unlock(&xas);
++
++ wake_up_all(&cache->daemon_pollwq);
++ return 0;
++}
++
++static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
++ struct ondemand_anon_file *anon_file)
+ {
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ struct cachefiles_open *load;
+- struct file *file;
+ u32 object_id;
+- int ret, fd;
++ int ret;
+
+ object = cachefiles_grab_object(req->object,
+ cachefiles_obj_get_ondemand_fd);
+@@ -200,60 +292,128 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
+ if (ret < 0)
+ goto err;
+
+- fd = get_unused_fd_flags(O_WRONLY);
+- if (fd < 0) {
+- ret = fd;
++ anon_file->fd = get_unused_fd_flags(O_WRONLY);
++ if (anon_file->fd < 0) {
++ ret = anon_file->fd;
+ goto err_free_id;
+ }
+
+- file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
+- object, O_WRONLY);
+- if (IS_ERR(file)) {
+- ret = PTR_ERR(file);
++ anon_file->file = anon_inode_getfile("[cachefiles]",
++ &cachefiles_ondemand_fd_fops, object, O_WRONLY);
++ if (IS_ERR(anon_file->file)) {
++ ret = PTR_ERR(anon_file->file);
+ goto err_put_fd;
+ }
+
+- file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
+- fd_install(fd, file);
++ spin_lock(&object->ondemand->lock);
++ if (object->ondemand->ondemand_id > 0) {
++ spin_unlock(&object->ondemand->lock);
++ /* Pair with check in cachefiles_ondemand_fd_release(). */
++ anon_file->file->private_data = NULL;
++ ret = -EEXIST;
++ goto err_put_file;
++ }
++
++ anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
+
+ load = (void *)req->msg.data;
+- load->fd = fd;
+- req->msg.object_id = object_id;
+- object->ondemand_id = object_id;
++ load->fd = anon_file->fd;
++ object->ondemand->ondemand_id = object_id;
++ spin_unlock(&object->ondemand->lock);
+
+ cachefiles_get_unbind_pincount(cache);
+ trace_cachefiles_ondemand_open(object, &req->msg, load);
+ return 0;
+
++err_put_file:
++ fput(anon_file->file);
++ anon_file->file = NULL;
+ err_put_fd:
+- put_unused_fd(fd);
++ put_unused_fd(anon_file->fd);
++ anon_file->fd = ret;
+ err_free_id:
+ xa_erase(&cache->ondemand_ids, object_id);
+ err:
++ spin_lock(&object->ondemand->lock);
++ /* Avoid marking an opened object as closed. */
++ if (object->ondemand->ondemand_id <= 0)
++ cachefiles_ondemand_set_object_close(object);
++ spin_unlock(&object->ondemand->lock);
+ cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
+ return ret;
+ }
+
++static void ondemand_object_worker(struct work_struct *work)
++{
++ struct cachefiles_ondemand_info *info =
++ container_of(work, struct cachefiles_ondemand_info, ondemand_work);
++
++ cachefiles_ondemand_init_object(info->object);
++}
++
++/*
++ * If there are any inflight or subsequent READ requests on the
++ * closed object, reopen it.
++ * Skip read requests whose related object is reopening.
++ */
++static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
++ unsigned long xa_max)
++{
++ struct cachefiles_req *req;
++ struct cachefiles_object *object;
++ struct cachefiles_ondemand_info *info;
++
++ xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
++ if (req->msg.opcode != CACHEFILES_OP_READ)
++ return req;
++ object = req->object;
++ info = object->ondemand;
++ if (cachefiles_ondemand_object_is_close(object)) {
++ cachefiles_ondemand_set_object_reopening(object);
++ queue_work(fscache_wq, &info->ondemand_work);
++ continue;
++ }
++ if (cachefiles_ondemand_object_is_reopening(object))
++ continue;
++ return req;
++ }
++ return NULL;
++}
++
++static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
++ struct xa_state *xas, int err)
++{
++ if (unlikely(!xas || !req))
++ return false;
++
++ if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
++ return false;
++
++ req->error = err;
++ complete(&req->done);
++ return true;
++}
++
+ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ char __user *_buffer, size_t buflen)
+ {
+ struct cachefiles_req *req;
+ struct cachefiles_msg *msg;
+- unsigned long id = 0;
+ size_t n;
+ int ret = 0;
++ struct ondemand_anon_file anon_file;
+ XA_STATE(xas, &cache->reqs, cache->req_id_next);
+
++ xa_lock(&cache->reqs);
+ /*
+ * Cyclically search for a request that has not ever been processed,
+ * to prevent requests from being processed repeatedly, and make
+ * request distribution fair.
+ */
+- xa_lock(&cache->reqs);
+- req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
++ req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
+ if (!req && cache->req_id_next > 0) {
+ xas_set(&xas, 0);
+- req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
++ req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
+ }
+ if (!req) {
+ xa_unlock(&cache->reqs);
+@@ -270,38 +430,37 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+
+ xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
+ cache->req_id_next = xas.xa_index + 1;
++ refcount_inc(&req->ref);
++ cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
+ xa_unlock(&cache->reqs);
+
+- id = xas.xa_index;
+- msg->msg_id = id;
+-
+ if (msg->opcode == CACHEFILES_OP_OPEN) {
+- ret = cachefiles_ondemand_get_fd(req);
++ ret = cachefiles_ondemand_get_fd(req, &anon_file);
+ if (ret)
+- goto error;
++ goto out;
+ }
+
+- if (copy_to_user(_buffer, msg, n) != 0) {
++ msg->msg_id = xas.xa_index;
++ msg->object_id = req->object->ondemand->ondemand_id;
++
++ if (copy_to_user(_buffer, msg, n) != 0)
+ ret = -EFAULT;
+- goto err_put_fd;
+- }
+
+- /* CLOSE request has no reply */
+- if (msg->opcode == CACHEFILES_OP_CLOSE) {
+- xa_erase(&cache->reqs, id);
+- complete(&req->done);
++ if (msg->opcode == CACHEFILES_OP_OPEN) {
++ if (ret < 0) {
++ fput(anon_file.file);
++ put_unused_fd(anon_file.fd);
++ goto out;
++ }
++ fd_install(anon_file.fd, anon_file.file);
+ }
+-
+- return n;
+-
+-err_put_fd:
+- if (msg->opcode == CACHEFILES_OP_OPEN)
+- close_fd(((struct cachefiles_open *)msg->data)->fd);
+-error:
+- xa_erase(&cache->reqs, id);
+- req->error = ret;
+- complete(&req->done);
+- return ret;
++out:
++ cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
++ /* Remove error request and CLOSE request has no reply */
++ if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
++ cachefiles_ondemand_finish_req(req, &xas, ret);
++ cachefiles_req_put(req);
++ return ret ? ret : n;
+ }
+
+ typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
+@@ -313,20 +472,25 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ void *private)
+ {
+ struct cachefiles_cache *cache = object->volume->cache;
+- struct cachefiles_req *req;
++ struct cachefiles_req *req = NULL;
+ XA_STATE(xas, &cache->reqs, 0);
+ int ret;
+
+ if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ return 0;
+
+- if (test_bit(CACHEFILES_DEAD, &cache->flags))
+- return -EIO;
++ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
++ ret = -EIO;
++ goto out;
++ }
+
+ req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
+- if (!req)
+- return -ENOMEM;
++ if (!req) {
++ ret = -ENOMEM;
++ goto out;
++ }
+
++ refcount_set(&req->ref, 1);
+ req->object = object;
+ init_completion(&req->done);
+ req->msg.opcode = opcode;
+@@ -354,7 +518,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ */
+ xas_lock(&xas);
+
+- if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
++ if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
++ cachefiles_ondemand_object_is_dropping(object)) {
+ xas_unlock(&xas);
+ ret = -EIO;
+ goto out;
+@@ -363,20 +528,33 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ /* coupled with the barrier in cachefiles_flush_reqs() */
+ smp_mb();
+
+- if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) {
+- WARN_ON_ONCE(object->ondemand_id == 0);
++ if (opcode == CACHEFILES_OP_CLOSE &&
++ !cachefiles_ondemand_object_is_open(object)) {
++ WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
+ xas_unlock(&xas);
+ ret = -EIO;
+ goto out;
+ }
+
+- xas.xa_index = 0;
++ /*
++ * Cyclically find a free xas to avoid msg_id reuse that would
++ * cause the daemon to successfully copen a stale msg_id.
++ */
++ xas.xa_index = cache->msg_id_next;
+ xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
++ if (xas.xa_node == XAS_RESTART) {
++ xas.xa_index = 0;
++ xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
++ }
+ if (xas.xa_node == XAS_RESTART)
+ xas_set_err(&xas, -EBUSY);
++
+ xas_store(&xas, req);
+- xas_clear_mark(&xas, XA_FREE_MARK);
+- xas_set_mark(&xas, CACHEFILES_REQ_NEW);
++ if (xas_valid(&xas)) {
++ cache->msg_id_next = xas.xa_index + 1;
++ xas_clear_mark(&xas, XA_FREE_MARK);
++ xas_set_mark(&xas, CACHEFILES_REQ_NEW);
++ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+@@ -385,9 +563,28 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ goto out;
+
+ wake_up_all(&cache->daemon_pollwq);
+- wait_for_completion(&req->done);
+- ret = req->error;
++wait:
++ ret = wait_for_completion_killable(&req->done);
++ if (!ret) {
++ ret = req->error;
++ } else {
++ ret = -EINTR;
++ if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
++ /* Someone will complete it soon. */
++ cpu_relax();
++ goto wait;
++ }
++ }
++ cachefiles_req_put(req);
++ return ret;
+ out:
++ /* Reset the object to close state in error handling path.
++ * If error occurs after creating the anonymous fd,
++ * cachefiles_ondemand_fd_release() will set object to close.
++ */
++ if (opcode == CACHEFILES_OP_OPEN &&
++ !cachefiles_ondemand_object_is_dropping(object))
++ cachefiles_ondemand_set_object_close(object);
+ kfree(req);
+ return ret;
+ }
+@@ -430,18 +627,10 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
+ void *private)
+ {
+ struct cachefiles_object *object = req->object;
+- int object_id = object->ondemand_id;
+
+- /*
+- * It's possible that object id is still 0 if the cookie looking up
+- * phase failed before OPEN request has ever been sent. Also avoid
+- * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means
+- * anon_fd has already been closed.
+- */
+- if (object_id <= 0)
++ if (!cachefiles_ondemand_object_is_open(object))
+ return -ENOENT;
+
+- req->msg.object_id = object_id;
+ trace_cachefiles_ondemand_close(object, &req->msg);
+ return 0;
+ }
+@@ -457,16 +646,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
+ struct cachefiles_object *object = req->object;
+ struct cachefiles_read *load = (void *)req->msg.data;
+ struct cachefiles_read_ctx *read_ctx = private;
+- int object_id = object->ondemand_id;
+
+- /* Stop enqueuing requests when daemon has closed anon_fd. */
+- if (object_id <= 0) {
+- WARN_ON_ONCE(object_id == 0);
+- pr_info_once("READ: anonymous fd closed prematurely.\n");
+- return -EIO;
+- }
+-
+- req->msg.object_id = object_id;
+ load->off = read_ctx->off;
+ load->len = read_ctx->len;
+ trace_cachefiles_ondemand_read(object, &req->msg, load);
+@@ -479,13 +659,16 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
+ struct fscache_volume *volume = object->volume->vcookie;
+ size_t volume_key_size, cookie_key_size, data_len;
+
++ if (!object->ondemand)
++ return 0;
++
+ /*
+ * CacheFiles will firstly check the cache file under the root cache
+ * directory. If the coherency check failed, it will fallback to
+ * creating a new tmpfile as the cache file. Reuse the previously
+ * allocated object ID if any.
+ */
+- if (object->ondemand_id > 0)
++ if (cachefiles_ondemand_object_is_open(object))
+ return 0;
+
+ volume_key_size = volume->key[0] + 1;
+@@ -499,8 +682,57 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
+
+ void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
+ {
++ unsigned long index;
++ struct cachefiles_req *req;
++ struct cachefiles_cache *cache;
++
++ if (!object->ondemand)
++ return;
++
+ cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
+ cachefiles_ondemand_init_close_req, NULL);
++
++ if (!object->ondemand->ondemand_id)
++ return;
++
++ /* Cancel all requests for the object that is being dropped. */
++ cache = object->volume->cache;
++ xa_lock(&cache->reqs);
++ cachefiles_ondemand_set_object_dropping(object);
++ xa_for_each(&cache->reqs, index, req) {
++ if (req->object == object) {
++ req->error = -EIO;
++ complete(&req->done);
++ __xa_erase(&cache->reqs, index);
++ }
++ }
++ xa_unlock(&cache->reqs);
++
++ /* Wait for ondemand_object_worker() to finish to avoid UAF. */
++ cancel_work_sync(&object->ondemand->ondemand_work);
++}
++
++int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
++ struct cachefiles_volume *volume)
++{
++ if (!cachefiles_in_ondemand_mode(volume->cache))
++ return 0;
++
++ object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
++ GFP_KERNEL);
++ if (!object->ondemand)
++ return -ENOMEM;
++
++ object->ondemand->object = object;
++ spin_lock_init(&object->ondemand->lock);
++ INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
++ return 0;
++}
++
++void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
++{
++ kfree(object->ondemand);
++ object->ondemand = NULL;
+ }
+
+ int cachefiles_ondemand_read(struct cachefiles_object *object,
+diff --git a/fs/cachefiles/volume.c b/fs/cachefiles/volume.c
+index 89df0ba8ba5e7b..781aac4ef274bd 100644
+--- a/fs/cachefiles/volume.c
++++ b/fs/cachefiles/volume.c
+@@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie)
+
+ void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
+ {
+- fscache_withdraw_volume(volume->vcookie);
+ cachefiles_set_volume_xattr(volume);
+ __cachefiles_free_volume(volume);
+ }
+diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
+index bcb6173943ee4a..7c6f260a3be567 100644
+--- a/fs/cachefiles/xattr.c
++++ b/fs/cachefiles/xattr.c
+@@ -64,9 +64,15 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object)
+ memcpy(buf->data, fscache_get_aux(object->cookie), len);
+
+ ret = cachefiles_inject_write_error();
+- if (ret == 0)
+- ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache,
+- buf, sizeof(struct cachefiles_xattr) + len, 0);
++ if (ret == 0) {
++ ret = mnt_want_write_file(file);
++ if (ret == 0) {
++ ret = vfs_setxattr(&nop_mnt_idmap, dentry,
++ cachefiles_xattr_cache, buf,
++ sizeof(struct cachefiles_xattr) + len, 0);
++ mnt_drop_write_file(file);
++ }
++ }
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(object, file_inode(file), ret,
+ cachefiles_trace_setxattr_error);
+@@ -110,9 +116,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
+ if (xlen == 0)
+ xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen);
+ if (xlen != tlen) {
+- if (xlen < 0)
++ if (xlen < 0) {
++ ret = xlen;
+ trace_cachefiles_vfs_error(object, file_inode(file), xlen,
+ cachefiles_trace_getxattr_error);
++ }
+ if (xlen == -EIO)
+ cachefiles_io_error_obj(
+ object,
+@@ -149,8 +157,14 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
+ int ret;
+
+ ret = cachefiles_inject_remove_error();
+- if (ret == 0)
+- ret = vfs_removexattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache);
++ if (ret == 0) {
++ ret = mnt_want_write(cache->mnt);
++ if (ret == 0) {
++ ret = vfs_removexattr(&nop_mnt_idmap, dentry,
++ cachefiles_xattr_cache);
++ mnt_drop_write(cache->mnt);
++ }
++ }
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(object, d_inode(dentry), ret,
+ cachefiles_trace_remxattr_error);
+@@ -206,9 +220,15 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
+ memcpy(buf->data, p, volume->vcookie->coherency_len);
+
+ ret = cachefiles_inject_write_error();
+- if (ret == 0)
+- ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache,
+- buf, len, 0);
++ if (ret == 0) {
++ ret = mnt_want_write(volume->cache->mnt);
++ if (ret == 0) {
++ ret = vfs_setxattr(&nop_mnt_idmap, dentry,
++ cachefiles_xattr_cache,
++ buf, len, 0);
++ mnt_drop_write(volume->cache->mnt);
++ }
++ }
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret,
+ cachefiles_trace_setxattr_error);
+@@ -252,6 +272,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
+ xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len);
+ if (xlen != len) {
+ if (xlen < 0) {
++ ret = xlen;
+ trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
+ cachefiles_trace_getxattr_error);
+ if (xlen == -EIO)
+diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
+index 94df854147d359..7249d70e1a43fa 100644
+--- a/fs/ceph/Kconfig
++++ b/fs/ceph/Kconfig
+@@ -7,6 +7,7 @@ config CEPH_FS
+ select CRYPTO_AES
+ select CRYPTO
+ select NETFS_SUPPORT
++ select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
+ default n
+ help
+ Choose Y or M here to include support for mounting the
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index f4863078f7fe55..1a2776025e9861 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -95,7 +95,6 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
+
+ /* dirty the head */
+ spin_lock(&ci->i_ceph_lock);
+- BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
+ if (__ceph_have_pending_cap_snap(ci)) {
+ struct ceph_cap_snap *capsnap =
+ list_last_entry(&ci->i_cap_snaps,
+@@ -229,7 +228,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
+ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
+ {
+ struct inode *inode = subreq->rreq->inode;
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 objno, objoff;
+ u32 xlen;
+@@ -244,7 +243,7 @@ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
+ static void finish_netfs_read(struct ceph_osd_request *req)
+ {
+ struct inode *inode = req->r_inode;
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
+ struct netfs_io_subrequest *subreq = req->r_priv;
+ struct ceph_osd_req_op *op = &req->r_ops[0];
+@@ -348,7 +347,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct inode *inode = rreq->inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_request *req = NULL;
+ struct ceph_vino vino = ceph_vino(inode);
+ struct iov_iter iter;
+@@ -484,8 +483,11 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
+ rreq->netfs_priv = priv;
+
+ out:
+- if (ret < 0)
++ if (ret < 0) {
++ if (got)
++ ceph_put_cap_refs(ceph_inode(inode), got);
+ kfree(priv);
++ }
+
+ return ret;
+ }
+@@ -658,7 +660,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+ struct folio *folio = page_folio(page);
+ struct inode *inode = page->mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_snap_context *snapc, *oldest;
+ loff_t page_off = page_offset(page);
+ int err;
+@@ -803,8 +805,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
+ ihold(inode);
+
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+- ceph_inode_to_client(inode)->write_congested)
++ ceph_inode_to_fs_client(inode)->write_congested) {
++ redirty_page_for_writepage(wbc, page);
+ return AOP_WRITEPAGE_ACTIVATE;
++ }
+
+ wait_on_page_fscache(page);
+
+@@ -836,7 +840,7 @@ static void writepages_finish(struct ceph_osd_request *req)
+ int rc = req->r_result;
+ struct ceph_snap_context *snapc = req->r_snapc;
+ struct address_space *mapping = inode->i_mapping;
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ unsigned int len = 0;
+ bool remove_page;
+
+@@ -926,7 +930,7 @@ static int ceph_writepages_start(struct address_space *mapping,
+ {
+ struct inode *inode = mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_vino vino = ceph_vino(inode);
+ pgoff_t index, start_index, end = -1;
+ struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
+@@ -1823,7 +1827,7 @@ int ceph_uninline_data(struct file *file)
+ {
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_request *req = NULL;
+ struct ceph_cap_flush *prealloc_cf = NULL;
+ struct folio *folio = NULL;
+@@ -1977,7 +1981,7 @@ enum {
+ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
+ s64 pool, struct ceph_string *pool_ns)
+ {
+- struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
+ struct rb_node **p, *parent;
+@@ -2168,7 +2172,7 @@ int ceph_pool_perm_check(struct inode *inode, int need)
+ return 0;
+ }
+
+- if (ceph_test_mount_opt(ceph_inode_to_client(inode),
++ if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
+ NOPOOLPERM))
+ return 0;
+
+diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
+index de1dee46d3df72..930fbd54d2c8c8 100644
+--- a/fs/ceph/cache.c
++++ b/fs/ceph/cache.c
+@@ -15,7 +15,7 @@
+ void ceph_fscache_register_inode_cookie(struct inode *inode)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+
+ /* No caching for filesystem? */
+ if (!fsc->fscache)
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 14215ec646f7ae..00045b8eadd142 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -635,7 +635,7 @@ void ceph_add_cap(struct inode *inode,
+ unsigned seq, unsigned mseq, u64 realmino, int flags,
+ struct ceph_cap **new_cap)
+ {
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap *cap;
+ int mds = session->s_mds;
+@@ -922,7 +922,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
+ int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
+ int touch)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+ int r;
+
+ r = __ceph_caps_issued_mask(ci, mask, touch);
+@@ -996,7 +996,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
+ const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR);
+ const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY);
+ struct ceph_mount_options *opt =
+- ceph_inode_to_client(&ci->netfs.inode)->mount_options;
++ ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
+ unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ;
+ unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ;
+
+@@ -1121,7 +1121,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+
+ dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode);
+
+- mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc;
++ mdsc = ceph_inode_to_fs_client(&ci->netfs.inode)->mdsc;
+
+ /* remove from inode's cap rbtree, and clear auth cap */
+ rb_erase(&cap->ci_node, &ci->i_caps);
+@@ -1178,7 +1178,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+ }
+ }
+
+-void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
++void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
++ bool queue_release)
+ {
+ struct ceph_inode_info *ci = cap->ci;
+ struct ceph_fs_client *fsc;
+@@ -1191,7 +1192,7 @@ void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+- fsc = ceph_inode_to_client(&ci->netfs.inode);
++ fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
+ WARN_ON_ONCE(ci->i_auth_cap == cap &&
+ !list_empty(&ci->i_dirty_item) &&
+ !fsc->blocklisted &&
+@@ -1342,6 +1343,8 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
+ */
+ void __ceph_remove_caps(struct ceph_inode_info *ci)
+ {
++ struct inode *inode = &ci->netfs.inode;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct rb_node *p;
+
+ /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
+@@ -1351,7 +1354,7 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
+ while (p) {
+ struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
+ p = rb_next(p);
+- ceph_remove_cap(cap, true);
++ ceph_remove_cap(mdsc, cap, true);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+ }
+@@ -1415,7 +1418,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
+ if (flushing & CEPH_CAP_XATTR_EXCL) {
+ arg->old_xattr_buf = __ceph_build_xattrs_blob(ci);
+ arg->xattr_version = ci->i_xattrs.version;
+- arg->xattr_buf = ci->i_xattrs.blob;
++ arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob);
+ } else {
+ arg->xattr_buf = NULL;
+ arg->old_xattr_buf = NULL;
+@@ -1513,6 +1516,7 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
+ encode_cap_msg(msg, arg);
+ ceph_con_send(&arg->session->s_con, msg);
+ ceph_buffer_put(arg->old_xattr_buf);
++ ceph_buffer_put(arg->xattr_buf);
+ if (arg->wake)
+ wake_up_all(&ci->i_cap_wq);
+ }
+@@ -1685,7 +1689,7 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session **psession)
+ {
+ struct inode *inode = &ci->netfs.inode;
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_mds_session *session = NULL;
+ bool need_put = false;
+ int mds;
+@@ -1750,7 +1754,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
+ struct ceph_cap_flush **pcf)
+ {
+ struct ceph_mds_client *mdsc =
+- ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc;
++ ceph_sb_to_fs_client(ci->netfs.inode.i_sb)->mdsc;
+ struct inode *inode = &ci->netfs.inode;
+ int was = ci->i_dirty_caps;
+ int dirty = 0;
+@@ -1873,7 +1877,7 @@ static u64 __mark_caps_flushing(struct inode *inode,
+ struct ceph_mds_session *session, bool wake,
+ u64 *oldest_flush_tid)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap_flush *cf = NULL;
+ int flushing;
+@@ -2232,7 +2236,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags)
+ */
+ static int try_flush_caps(struct inode *inode, u64 *ptid)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int flushing = 0;
+ u64 flush_tid = 0, oldest_flush_tid = 0;
+@@ -2310,7 +2314,7 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
+ */
+ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_request *req1 = NULL, *req2 = NULL;
+ int ret, err = 0;
+@@ -2493,7 +2497,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
+ caps_are_flushed(inode, flush_tid));
+ } else {
+ struct ceph_mds_client *mdsc =
+- ceph_sb_to_client(inode->i_sb)->mdsc;
++ ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+
+ spin_lock(&ci->i_ceph_lock);
+ if (__ceph_caps_dirty(ci))
+@@ -2746,7 +2750,7 @@ static int try_get_cap_refs(struct inode *inode, int need, int want,
+ loff_t endoff, int flags, int *got)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ int ret = 0;
+ int have, implemented;
+ bool snap_rwsem_locked = false;
+@@ -2964,7 +2968,7 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
+ int want, loff_t endoff, int *got)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ int ret, _got, flags;
+
+ ret = ceph_pool_perm_check(inode, need);
+@@ -3727,7 +3731,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
+ __releases(ci->i_ceph_lock)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_cap_flush *cf, *tmp_cf;
+ LIST_HEAD(to_remove);
+ unsigned seq = le32_to_cpu(m->seq);
+@@ -3833,7 +3837,7 @@ void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
+ bool *wake_ci, bool *wake_mdsc)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ bool ret;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+@@ -3877,7 +3881,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
+ struct ceph_mds_session *session)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ u64 follows = le64_to_cpu(m->snap_follows);
+ struct ceph_cap_snap *capsnap = NULL, *iter;
+ bool wake_ci = false;
+@@ -3969,7 +3973,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ struct ceph_mds_cap_peer *ph,
+ struct ceph_mds_session *session)
+ {
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_mds_session *tsession = NULL;
+ struct ceph_cap *cap, *tcap, *new_cap = NULL;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+@@ -3999,7 +4003,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ goto out_unlock;
+
+ if (target < 0) {
+- ceph_remove_cap(cap, false);
++ ceph_remove_cap(mdsc, cap, false);
+ goto out_unlock;
+ }
+
+@@ -4034,7 +4038,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ change_auth_cap_ses(ci, tcap->session);
+ }
+ }
+- ceph_remove_cap(cap, false);
++ ceph_remove_cap(mdsc, cap, false);
+ goto out_unlock;
+ } else if (tsession) {
+ /* add placeholder for the export tagert */
+@@ -4051,7 +4055,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ spin_unlock(&mdsc->cap_dirty_lock);
+ }
+
+- ceph_remove_cap(cap, false);
++ ceph_remove_cap(mdsc, cap, false);
+ goto out_unlock;
+ }
+
+@@ -4164,7 +4168,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
+ ocap->mseq, mds, le32_to_cpu(ph->seq),
+ le32_to_cpu(ph->mseq));
+ }
+- ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
++ ceph_remove_cap(mdsc, ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
+ }
+
+ *old_issued = issued;
+@@ -4672,7 +4676,7 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
+
+ if (__ceph_caps_dirty(ci)) {
+ struct ceph_mds_client *mdsc =
+- ceph_inode_to_client(inode)->mdsc;
++ ceph_inode_to_fs_client(inode)->mdsc;
+ __cap_delay_requeue_front(mdsc, ci);
+ }
+ }
+@@ -4780,12 +4784,14 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
+ struct inode *dir,
+ int mds, int drop, int unless)
+ {
+- struct dentry *parent = NULL;
+ struct ceph_mds_request_release *rel = *p;
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ int force = 0;
+ int ret;
+
++ /* This shouldn't happen */
++ BUG_ON(!dir);
++
+ /*
+ * force an record for the directory caps if we have a dentry lease.
+ * this is racy (can't take i_ceph_lock and d_lock together), but it
+@@ -4795,14 +4801,9 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
+ spin_lock(&dentry->d_lock);
+ if (di->lease_session && di->lease_session->s_mds == mds)
+ force = 1;
+- if (!dir) {
+- parent = dget(dentry->d_parent);
+- dir = d_inode(parent);
+- }
+ spin_unlock(&dentry->d_lock);
+
+ ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
+- dput(parent);
+
+ spin_lock(&dentry->d_lock);
+ if (ret && di->lease_session && di->lease_session->s_mds == mds) {
+@@ -4855,7 +4856,7 @@ static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
+
+ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate)
+ {
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_auth;
+diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
+index 5b5112c784629c..08c3856107316d 100644
+--- a/fs/ceph/crypto.c
++++ b/fs/ceph/crypto.c
+@@ -129,7 +129,7 @@ static bool ceph_crypt_empty_dir(struct inode *inode)
+
+ static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
+ {
+- return ceph_sb_to_client(sb)->fsc_dummy_enc_policy.policy;
++ return ceph_sb_to_fs_client(sb)->fsc_dummy_enc_policy.policy;
+ }
+
+ static struct fscrypt_operations ceph_fscrypt_ops = {
+diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
+index 3904333fa6c38b..2f1e7498cd7451 100644
+--- a/fs/ceph/debugfs.c
++++ b/fs/ceph/debugfs.c
+@@ -81,7 +81,7 @@ static int mdsc_show(struct seq_file *s, void *p)
+ if (req->r_inode) {
+ seq_printf(s, " #%llx", ceph_ino(req->r_inode));
+ } else if (req->r_dentry) {
+- path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
++ path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
+ &pathbase, 0);
+ if (IS_ERR(path))
+ path = NULL;
+@@ -100,7 +100,7 @@ static int mdsc_show(struct seq_file *s, void *p)
+ }
+
+ if (req->r_old_dentry) {
+- path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
++ path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
+ &pathbase, 0);
+ if (IS_ERR(path))
+ path = NULL;
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 854cbdd666619a..1395b71df5ccc2 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -310,7 +310,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
+ struct ceph_dir_file_info *dfi = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ int i;
+ int err;
+@@ -703,7 +703,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
+ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
+ struct dentry *dentry)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct inode *parent = d_inode(dentry->d_parent); /* we hold i_rwsem */
+
+ /* .snap dir? */
+@@ -771,7 +771,7 @@ static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
+ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_mds_request *req;
+ int op;
+@@ -1199,7 +1199,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+ {
+ struct dentry *dentry = req->r_dentry;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ int result = req->r_err ? req->r_err :
+ le32_to_cpu(req->r_reply_info.head->result);
+@@ -1226,7 +1226,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
+ if (result) {
+ int pathlen = 0;
+ u64 base = 0;
+- char *path = ceph_mdsc_build_path(dentry, &pathlen,
++ char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
+ &base, 0);
+
+ /* mark error on parent + clear complete */
+@@ -1290,7 +1290,7 @@ static int get_caps_for_async_unlink(struct inode *dir, struct dentry *dentry)
+ */
+ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct inode *inode = d_inode(dentry);
+ struct ceph_mds_request *req;
+@@ -1469,7 +1469,7 @@ void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
+ return;
+ }
+
+- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
++ mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ list_move_tail(&di->lease_list, &mdsc->dentry_leases);
+ spin_unlock(&mdsc->dentry_list_lock);
+@@ -1516,7 +1516,7 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
+ return;
+ }
+
+- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
++ mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ __dentry_dir_lease_touch(mdsc, di),
+ spin_unlock(&mdsc->dentry_list_lock);
+@@ -1530,7 +1530,7 @@ static void __dentry_lease_unlist(struct ceph_dentry_info *di)
+ if (list_empty(&di->lease_list))
+ return;
+
+- mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc;
++ mdsc = ceph_sb_to_fs_client(di->dentry->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ list_del_init(&di->lease_list);
+ spin_unlock(&mdsc->dentry_list_lock);
+@@ -1888,7 +1888,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
+ dentry, inode, ceph_dentry(dentry)->offset,
+ !!(dentry->d_flags & DCACHE_NOKEY_NAME));
+
+- mdsc = ceph_sb_to_client(dir->i_sb)->mdsc;
++ mdsc = ceph_sb_to_fs_client(dir->i_sb)->mdsc;
+
+ /* always trust cached snapped dentries, snapdir dentry */
+ if (ceph_snap(dir) != CEPH_NOSNAP) {
+@@ -1995,7 +1995,7 @@ static int ceph_d_delete(const struct dentry *dentry)
+ static void ceph_d_release(struct dentry *dentry)
+ {
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+
+ dout("d_release %p\n", dentry);
+
+@@ -2064,7 +2064,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
+ int left;
+ const int bufsize = 1024;
+
+- if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
++ if (!ceph_test_mount_opt(ceph_sb_to_fs_client(inode->i_sb), DIRSTAT))
+ return -EISDIR;
+
+ if (!dfi->dir_info) {
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index 8559990a59a5c5..52c4daf2447d30 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -123,7 +123,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
+
+ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+ struct inode *inode;
+ struct ceph_vino vino;
+ int err;
+@@ -205,7 +205,7 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
+ struct ceph_nfs_snapfh *sfh,
+ bool want_parent)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct inode *inode;
+ struct ceph_vino vino;
+@@ -317,7 +317,7 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
+ static struct dentry *__get_parent(struct super_block *sb,
+ struct dentry *child, u64 ino)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct inode *inode;
+ int mask;
+@@ -439,7 +439,7 @@ static int __get_snap_name(struct dentry *parent, char *name,
+ {
+ struct inode *inode = d_inode(child);
+ struct inode *dir = d_inode(parent);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_request *req = NULL;
+ char *last_name = NULL;
+ unsigned next_offset = 2;
+@@ -544,7 +544,7 @@ static int ceph_get_name(struct dentry *parent, char *name,
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return __get_snap_name(parent, name, child);
+
+- mdsc = ceph_inode_to_client(inode)->mdsc;
++ mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
+ USE_ANY_MDS);
+ if (IS_ERR(req))
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index b5f8038065d7c1..1e0497295662ad 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -200,7 +200,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mount_options *opt =
+- ceph_inode_to_client(&ci->netfs.inode)->mount_options;
++ ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
+ struct ceph_file_info *fi;
+ int ret;
+
+@@ -234,7 +234,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
+
+ spin_lock_init(&fi->rw_contexts_lock);
+ INIT_LIST_HEAD(&fi->rw_contexts);
+- fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
++ fi->filp_gen = READ_ONCE(ceph_inode_to_fs_client(inode)->filp_gen);
+
+ if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
+ ret = ceph_uninline_data(file);
+@@ -352,7 +352,7 @@ int ceph_renew_caps(struct inode *inode, int fmode)
+ int ceph_open(struct inode *inode, struct file *file)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ struct ceph_file_info *fi = file->private_data;
+@@ -574,7 +574,7 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
+ if (result) {
+ int pathlen = 0;
+ u64 base = 0;
+- char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
++ char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
+ &base, 0);
+
+ pr_warn("async create failure path=(%llx)%s result=%d!\n",
+@@ -730,7 +730,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
+ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned flags, umode_t mode)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ struct inode *new_inode = NULL;
+@@ -962,7 +962,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ u64 *last_objver)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ ssize_t ret;
+ u64 off = *ki_pos;
+@@ -1108,7 +1108,12 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ }
+
+ idx = 0;
+- left = ret > 0 ? ret : 0;
++ if (ret <= 0)
++ left = 0;
++ else if (off + ret > i_size)
++ left = i_size - off;
++ else
++ left = ret;
+ while (left > 0) {
+ size_t plen, copied;
+
+@@ -1137,15 +1142,13 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ }
+
+ if (ret > 0) {
+- if (off > *ki_pos) {
+- if (off >= i_size) {
+- *retry_op = CHECK_EOF;
+- ret = i_size - *ki_pos;
+- *ki_pos = i_size;
+- } else {
+- ret = off - *ki_pos;
+- *ki_pos = off;
+- }
++ if (off >= i_size) {
++ *retry_op = CHECK_EOF;
++ ret = i_size - *ki_pos;
++ *ki_pos = i_size;
++ } else {
++ ret = off - *ki_pos;
++ *ki_pos = off;
+ }
+
+ if (last_objver)
+@@ -1256,7 +1259,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
+ if (aio_work) {
+ INIT_WORK(&aio_work->work, ceph_aio_retry_work);
+ aio_work->req = req;
+- queue_work(ceph_inode_to_client(inode)->inode_wq,
++ queue_work(ceph_inode_to_fs_client(inode)->inode_wq,
+ &aio_work->work);
+ return;
+ }
+@@ -1386,7 +1389,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client_metric *metric = &fsc->mdsc->metric;
+ struct ceph_vino vino;
+ struct ceph_osd_request *req;
+@@ -1610,7 +1613,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ struct ceph_osd_request *req;
+ struct page **pages;
+@@ -2228,7 +2231,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ struct ceph_file_info *fi = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ struct ceph_cap_flush *prealloc_cf;
+ ssize_t count, written = 0;
+@@ -2462,7 +2465,7 @@ static int ceph_zero_partial_object(struct inode *inode,
+ loff_t offset, loff_t *length)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_request *req;
+ int ret = 0;
+ loff_t zero = 0;
+@@ -2845,7 +2848,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
+ struct ceph_inode_info *src_ci = ceph_inode(src_inode);
+ struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
+ struct ceph_cap_flush *prealloc_cf;
+- struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
++ struct ceph_fs_client *src_fsc = ceph_inode_to_fs_client(src_inode);
+ loff_t size;
+ ssize_t ret = -EIO, bytes;
+ u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
+@@ -2853,7 +2856,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
+ int src_got = 0, dst_got = 0, err, dirty;
+
+ if (src_inode->i_sb != dst_inode->i_sb) {
+- struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
++ struct ceph_fs_client *dst_fsc = ceph_inode_to_fs_client(dst_inode);
+
+ if (ceph_fsid_compare(&src_fsc->client->fsid,
+ &dst_fsc->client->fsid)) {
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index b79100f720b38f..db6977c15c2828 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -1489,7 +1489,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ struct inode *in = NULL;
+ struct ceph_vino tvino, dvino;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ int err = 0;
+
+ dout("fill_trace %p is_dentry %d is_target %d\n", req,
+@@ -2079,7 +2079,7 @@ bool ceph_inode_set_size(struct inode *inode, loff_t size)
+
+ void ceph_queue_inode_work(struct inode *inode, int work_bit)
+ {
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ set_bit(work_bit, &ci->i_work_mask);
+
+@@ -2427,7 +2427,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr,
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ unsigned int ia_valid = attr->ia_valid;
+ struct ceph_mds_request *req;
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_cap_flush *prealloc_cf;
+ loff_t isize = i_size_read(inode);
+ int issued;
+@@ -2740,7 +2740,7 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr)
+ {
+ struct inode *inode = d_inode(dentry);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ int err;
+
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+@@ -2810,7 +2810,7 @@ int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
+ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
+ int mask, bool force)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ int mode;
+@@ -2856,7 +2856,7 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
+ int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
+ size_t size)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ int mode = USE_AUTH_MDS;
+@@ -3001,7 +3001,7 @@ int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
+ stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
+
+ if (S_ISDIR(inode->i_mode)) {
+- if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) {
++ if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
+ stat->size = ci->i_rbytes;
+ } else if (ceph_snap(inode) == CEPH_SNAPDIR) {
+ struct ceph_inode_info *pci;
+diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
+index 91a84917d203c5..3f617146e4ad34 100644
+--- a/fs/ceph/ioctl.c
++++ b/fs/ceph/ioctl.c
+@@ -65,7 +65,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
+ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
+ {
+ struct inode *inode = file_inode(file);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct ceph_ioctl_layout l;
+ struct ceph_inode_info *ci = ceph_inode(file_inode(file));
+@@ -140,7 +140,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
+ struct ceph_mds_request *req;
+ struct ceph_ioctl_layout l;
+ int err;
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+
+ /* copy and validate */
+ if (copy_from_user(&l, arg, sizeof(l)))
+@@ -183,7 +183,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_osd_client *osdc =
+- &ceph_sb_to_client(inode->i_sb)->client->osdc;
++ &ceph_sb_to_fs_client(inode->i_sb)->client->osdc;
+ struct ceph_object_locator oloc;
+ CEPH_DEFINE_OID_ONSTACK(oid);
+ u32 xlen;
+@@ -244,7 +244,7 @@ static long ceph_ioctl_lazyio(struct file *file)
+ struct ceph_file_info *fi = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+
+ if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
+ spin_lock(&ci->i_ceph_lock);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 293b93182955d0..11289ce8a8cc81 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -830,7 +830,7 @@ static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
+ */
+ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct dentry *pdentry = dentry->d_parent;
+ struct dentry *udentry, *found = NULL;
+ struct ceph_dentry_info *di;
+@@ -2126,6 +2126,7 @@ static bool drop_negative_children(struct dentry *dentry)
+ */
+ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
+ {
++ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ int *remaining = arg;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int used, wanted, oissued, mine;
+@@ -2173,7 +2174,7 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
+
+ if (oissued) {
+ /* we aren't the only cap.. just remove us */
+- ceph_remove_cap(cap, true);
++ ceph_remove_cap(mdsc, cap, true);
+ (*remaining)--;
+ } else {
+ struct dentry *dentry;
+@@ -2588,6 +2589,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+
+ /**
+ * ceph_mdsc_build_path - build a path string to a given dentry
++ * @mdsc: mds client
+ * @dentry: dentry to which path should be built
+ * @plen: returned length of string
+ * @pbase: returned base inode number
+@@ -2607,8 +2609,8 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+ * Encode hidden .snap dirs as a double /, i.e.
+ * foo/.snap/bar -> foo//bar
+ */
+-char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
+- int for_wire)
++char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
++ int *plen, u64 *pbase, int for_wire)
+ {
+ struct dentry *cur;
+ struct inode *inode;
+@@ -2726,9 +2728,9 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
+ return path + pos;
+ }
+
+-static int build_dentry_path(struct dentry *dentry, struct inode *dir,
+- const char **ppath, int *ppathlen, u64 *pino,
+- bool *pfreepath, bool parent_locked)
++static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
++ struct inode *dir, const char **ppath, int *ppathlen,
++ u64 *pino, bool *pfreepath, bool parent_locked)
+ {
+ char *path;
+
+@@ -2744,7 +2746,7 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
+ return 0;
+ }
+ rcu_read_unlock();
+- path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
++ path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ *ppath = path;
+@@ -2756,6 +2758,7 @@ static int build_inode_path(struct inode *inode,
+ const char **ppath, int *ppathlen, u64 *pino,
+ bool *pfreepath)
+ {
++ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct dentry *dentry;
+ char *path;
+
+@@ -2765,7 +2768,7 @@ static int build_inode_path(struct inode *inode,
+ return 0;
+ }
+ dentry = d_find_alias(inode);
+- path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
++ path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ dput(dentry);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+@@ -2778,10 +2781,11 @@ static int build_inode_path(struct inode *inode,
+ * request arguments may be specified via an inode *, a dentry *, or
+ * an explicit ino+path.
+ */
+-static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
+- struct inode *rdiri, const char *rpath,
+- u64 rino, const char **ppath, int *pathlen,
+- u64 *ino, bool *freepath, bool parent_locked)
++static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
++ struct dentry *rdentry, struct inode *rdiri,
++ const char *rpath, u64 rino, const char **ppath,
++ int *pathlen, u64 *ino, bool *freepath,
++ bool parent_locked)
+ {
+ int r = 0;
+
+@@ -2790,7 +2794,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
+ dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
+ ceph_snap(rinode));
+ } else if (rdentry) {
+- r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
++ r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
+ freepath, parent_locked);
+ dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
+ *ppath);
+@@ -2877,7 +2881,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
+ &session->s_features);
+
+- ret = set_request_path_attr(req->r_inode, req->r_dentry,
++ ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
+ req->r_parent, req->r_path1, req->r_ino1.ino,
+ &path1, &pathlen1, &ino1, &freepath1,
+ test_bit(CEPH_MDS_R_PARENT_LOCKED,
+@@ -2891,7 +2895,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ if (req->r_old_dentry &&
+ !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
+ old_dentry = req->r_old_dentry;
+- ret = set_request_path_attr(NULL, old_dentry,
++ ret = set_request_path_attr(mdsc, NULL, old_dentry,
+ req->r_old_dentry_dir,
+ req->r_path2, req->r_ino2.ino,
+ &path2, &pathlen2, &ino2, &freepath2, true);
+@@ -4010,11 +4014,11 @@ static void handle_session(struct ceph_mds_session *session,
+ if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
+ pr_info("mds%d reconnect success\n", session->s_mds);
+
++ session->s_features = features;
+ if (session->s_state == CEPH_MDS_SESSION_OPEN) {
+ pr_notice("mds%d is already opened\n", session->s_mds);
+ } else {
+ session->s_state = CEPH_MDS_SESSION_OPEN;
+- session->s_features = features;
+ renewed_caps(mdsc, session, 0);
+ if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
+ &session->s_features))
+@@ -4290,6 +4294,7 @@ static struct dentry* d_find_primary(struct inode *inode)
+ */
+ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ {
++ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ union {
+ struct ceph_mds_cap_reconnect v2;
+ struct ceph_mds_cap_reconnect_v1 v1;
+@@ -4307,7 +4312,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ dentry = d_find_primary(inode);
+ if (dentry) {
+ /* set pathbase to parent dir when msg_version >= 2 */
+- path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
++ path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
+ recon_state->msg_version >= 2);
+ dput(dentry);
+ if (IS_ERR(path)) {
+@@ -5662,7 +5667,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+ return;
+ }
+
+- newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
++ newmap = ceph_mdsmap_decode(mdsc, &p, end, ceph_msgr2(mdsc->fsc->client));
+ if (IS_ERR(newmap)) {
+ err = PTR_ERR(newmap);
+ goto bad_unlock;
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 5a3714bdd64a8e..d930eb79dc380f 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -581,7 +581,8 @@ static inline void ceph_mdsc_free_path(char *path, int len)
+ __putname(path - (PATH_MAX - 1 - len));
+ }
+
+-extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
++extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
++ struct dentry *dentry, int *plen, u64 *base,
+ int for_wire);
+
+ extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
+diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
+index 7dac21ee6ce768..66afb18df76b2d 100644
+--- a/fs/ceph/mdsmap.c
++++ b/fs/ceph/mdsmap.c
+@@ -114,7 +114,8 @@ static int __decode_and_drop_compat_set(void **p, void* end)
+ * Ignore any fields we don't care about (there are quite a few of
+ * them).
+ */
+-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
++struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
++ void *end, bool msgr2)
+ {
+ struct ceph_mdsmap *m;
+ const void *start = *p;
+@@ -379,10 +380,11 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
+ ceph_decode_skip_8(p, end, bad_ext);
+ /* required_client_features */
+ ceph_decode_skip_set(p, end, 64, bad_ext);
++ /* bal_rank_mask */
++ ceph_decode_skip_string(p, end, bad_ext);
++ }
++ if (mdsmap_ev >= 18) {
+ ceph_decode_64_safe(p, end, m->m_max_xattr_size, bad_ext);
+- } else {
+- /* This forces the usage of the (sync) SETXATTR Op */
+- m->m_max_xattr_size = 0;
+ }
+ bad_ext:
+ dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
+diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
+index f7fcf7f08ec642..ca4932e6f71bf4 100644
+--- a/fs/ceph/quota.c
++++ b/fs/ceph/quota.c
+@@ -194,10 +194,10 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
+ }
+
+ /*
+- * This function walks through the snaprealm for an inode and returns the
+- * ceph_snap_realm for the first snaprealm that has quotas set (max_files,
++ * This function walks through the snaprealm for an inode and set the
++ * realmp with the first snaprealm that has quotas set (max_files,
+ * max_bytes, or any, depending on the 'which_quota' argument). If the root is
+- * reached, return the root ceph_snap_realm instead.
++ * reached, set the realmp with the root ceph_snap_realm instead.
+ *
+ * Note that the caller is responsible for calling ceph_put_snap_realm() on the
+ * returned realm.
+@@ -208,18 +208,19 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
+ * this function will return -EAGAIN; otherwise, the snaprealms walk-through
+ * will be restarted.
+ */
+-static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
+- struct inode *inode,
+- enum quota_get_realm which_quota,
+- bool retry)
++static int get_quota_realm(struct ceph_mds_client *mdsc, struct inode *inode,
++ enum quota_get_realm which_quota,
++ struct ceph_snap_realm **realmp, bool retry)
+ {
+ struct ceph_inode_info *ci = NULL;
+ struct ceph_snap_realm *realm, *next;
+ struct inode *in;
+ bool has_quota;
+
++ if (realmp)
++ *realmp = NULL;
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+- return NULL;
++ return 0;
+
+ restart:
+ realm = ceph_inode(inode)->i_snap_realm;
+@@ -245,7 +246,7 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
+ break;
+ ceph_put_snap_realm(mdsc, realm);
+ if (!retry)
+- return ERR_PTR(-EAGAIN);
++ return -EAGAIN;
+ goto restart;
+ }
+
+@@ -254,8 +255,11 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
+ iput(in);
+
+ next = realm->parent;
+- if (has_quota || !next)
+- return realm;
++ if (has_quota || !next) {
++ if (realmp)
++ *realmp = realm;
++ return 0;
++ }
+
+ ceph_get_snap_realm(mdsc, next);
+ ceph_put_snap_realm(mdsc, realm);
+@@ -264,7 +268,7 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
+ if (realm)
+ ceph_put_snap_realm(mdsc, realm);
+
+- return NULL;
++ return 0;
+ }
+
+ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
+@@ -272,6 +276,7 @@ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old->i_sb);
+ struct ceph_snap_realm *old_realm, *new_realm;
+ bool is_same;
++ int ret;
+
+ restart:
+ /*
+@@ -281,9 +286,9 @@ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
+ * dropped and we can then restart the whole operation.
+ */
+ down_read(&mdsc->snap_rwsem);
+- old_realm = get_quota_realm(mdsc, old, QUOTA_GET_ANY, true);
+- new_realm = get_quota_realm(mdsc, new, QUOTA_GET_ANY, false);
+- if (PTR_ERR(new_realm) == -EAGAIN) {
++ get_quota_realm(mdsc, old, QUOTA_GET_ANY, &old_realm, true);
++ ret = get_quota_realm(mdsc, new, QUOTA_GET_ANY, &new_realm, false);
++ if (ret == -EAGAIN) {
+ up_read(&mdsc->snap_rwsem);
+ if (old_realm)
+ ceph_put_snap_realm(mdsc, old_realm);
+@@ -485,8 +490,8 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
+ bool is_updated = false;
+
+ down_read(&mdsc->snap_rwsem);
+- realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root),
+- QUOTA_GET_MAX_BYTES, true);
++ get_quota_realm(mdsc, d_inode(fsc->sb->s_root), QUOTA_GET_MAX_BYTES,
++ &realm, true);
+ up_read(&mdsc->snap_rwsem);
+ if (!realm)
+ return false;
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 813f21add992c1..d0d3612f28f0ea 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -329,7 +329,8 @@ static int cmpu64_rev(const void *a, const void *b)
+ /*
+ * build the snap context for a given realm.
+ */
+-static int build_snap_context(struct ceph_snap_realm *realm,
++static int build_snap_context(struct ceph_mds_client *mdsc,
++ struct ceph_snap_realm *realm,
+ struct list_head *realm_queue,
+ struct list_head *dirty_realms)
+ {
+@@ -425,7 +426,8 @@ static int build_snap_context(struct ceph_snap_realm *realm,
+ /*
+ * rebuild snap context for the given realm and all of its children.
+ */
+-static void rebuild_snap_realms(struct ceph_snap_realm *realm,
++static void rebuild_snap_realms(struct ceph_mds_client *mdsc,
++ struct ceph_snap_realm *realm,
+ struct list_head *dirty_realms)
+ {
+ LIST_HEAD(realm_queue);
+@@ -451,7 +453,8 @@ static void rebuild_snap_realms(struct ceph_snap_realm *realm,
+ continue;
+ }
+
+- last = build_snap_context(_realm, &realm_queue, dirty_realms);
++ last = build_snap_context(mdsc, _realm, &realm_queue,
++ dirty_realms);
+ dout("%s %llx %p, %s\n", __func__, _realm->ino, _realm,
+ last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
+
+@@ -708,7 +711,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
+ * Queue cap_snaps for snap writeback for this realm and its children.
+ * Called under snap_rwsem, so realm topology won't change.
+ */
+-static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
++static void queue_realm_cap_snaps(struct ceph_mds_client *mdsc,
++ struct ceph_snap_realm *realm)
+ {
+ struct ceph_inode_info *ci;
+ struct inode *lastinode = NULL;
+@@ -855,7 +859,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
+
+ /* rebuild_snapcs when we reach the _end_ (root) of the trace */
+ if (realm_to_rebuild && p >= e)
+- rebuild_snap_realms(realm_to_rebuild, &dirty_realms);
++ rebuild_snap_realms(mdsc, realm_to_rebuild, &dirty_realms);
+
+ if (!first_realm)
+ first_realm = realm;
+@@ -873,7 +877,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
+ realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
+ dirty_item);
+ list_del_init(&realm->dirty_item);
+- queue_realm_cap_snaps(realm);
++ queue_realm_cap_snaps(mdsc, realm);
+ }
+
+ if (realm_ret)
+@@ -960,7 +964,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
+ void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 2d7f5a8d4a9260..ec51e398562c6f 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -44,7 +44,7 @@ static LIST_HEAD(ceph_fsc_list);
+ */
+ static void ceph_put_super(struct super_block *s)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(s);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
+
+ dout("put_super\n");
+ ceph_fscrypt_free_dummy_policy(fsc);
+@@ -53,7 +53,7 @@ static void ceph_put_super(struct super_block *s)
+
+ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+- struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(d_inode(dentry));
+ struct ceph_mon_client *monc = &fsc->client->monc;
+ struct ceph_statfs st;
+ int i, err;
+@@ -118,7 +118,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+
+ static int ceph_sync_fs(struct super_block *sb, int wait)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ if (!wait) {
+ dout("sync_fs (non-blocking)\n");
+@@ -684,7 +684,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
+ */
+ static int ceph_show_options(struct seq_file *m, struct dentry *root)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(root->d_sb);
+ struct ceph_mount_options *fsopt = fsc->mount_options;
+ size_t pos;
+ int ret;
+@@ -958,7 +958,8 @@ static int __init init_caches(void)
+ if (!ceph_mds_request_cachep)
+ goto bad_mds_req;
+
+- ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
++ ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10,
++ (CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT) * sizeof(struct page *));
+ if (!ceph_wb_pagevec_pool)
+ goto bad_pagevec_pool;
+
+@@ -1015,7 +1016,7 @@ static void __ceph_umount_begin(struct ceph_fs_client *fsc)
+ */
+ void ceph_umount_begin(struct super_block *sb)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ dout("ceph_umount_begin - starting forced umount\n");
+ if (!fsc)
+@@ -1226,7 +1227,7 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
+ struct ceph_fs_client *new = fc->s_fs_info;
+ struct ceph_mount_options *fsopt = new->mount_options;
+ struct ceph_options *opt = new->client->options;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ dout("ceph_compare_super %p\n", sb);
+
+@@ -1322,9 +1323,9 @@ static int ceph_get_tree(struct fs_context *fc)
+ goto out;
+ }
+
+- if (ceph_sb_to_client(sb) != fsc) {
++ if (ceph_sb_to_fs_client(sb) != fsc) {
+ destroy_fs_client(fsc);
+- fsc = ceph_sb_to_client(sb);
++ fsc = ceph_sb_to_fs_client(sb);
+ dout("get_sb got existing client %p\n", fsc);
+ } else {
+ dout("get_sb using new client %p\n", fsc);
+@@ -1377,7 +1378,7 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+ struct super_block *sb = fc->root->d_sb;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ err = ceph_apply_test_dummy_encryption(sb, fc, fsopt);
+ if (err)
+@@ -1516,7 +1517,7 @@ void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc)
+
+ static void ceph_kill_sb(struct super_block *s)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(s);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ bool wait;
+
+@@ -1578,7 +1579,7 @@ MODULE_ALIAS_FS("ceph");
+
+ int ceph_force_reconnect(struct super_block *sb)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ int err = 0;
+
+ fsc->mount_state = CEPH_MOUNT_RECOVER;
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 51c7f2b14f6f87..8efd4ba6077448 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -488,13 +488,13 @@ ceph_inode(const struct inode *inode)
+ }
+
+ static inline struct ceph_fs_client *
+-ceph_inode_to_client(const struct inode *inode)
++ceph_inode_to_fs_client(const struct inode *inode)
+ {
+ return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
+ }
+
+ static inline struct ceph_fs_client *
+-ceph_sb_to_client(const struct super_block *sb)
++ceph_sb_to_fs_client(const struct super_block *sb)
+ {
+ return (struct ceph_fs_client *)sb->s_fs_info;
+ }
+@@ -502,7 +502,7 @@ ceph_sb_to_client(const struct super_block *sb)
+ static inline struct ceph_mds_client *
+ ceph_sb_to_mdsc(const struct super_block *sb)
+ {
+- return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
++ return (struct ceph_mds_client *)ceph_sb_to_fs_client(sb)->mdsc;
+ }
+
+ static inline struct ceph_vino
+@@ -558,7 +558,7 @@ static inline u64 ceph_snap(struct inode *inode)
+ */
+ static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
+ {
+- if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
++ if (unlikely(ceph_test_mount_opt(ceph_sb_to_fs_client(sb), INO32)))
+ return ceph_ino_to_ino32(ino);
+ return ino;
+ }
+@@ -1106,7 +1106,7 @@ void ceph_inode_shutdown(struct inode *inode);
+ static inline bool ceph_inode_is_shutdown(struct inode *inode)
+ {
+ unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ int state = READ_ONCE(fsc->mount_state);
+
+ return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
+@@ -1223,7 +1223,8 @@ extern void ceph_add_cap(struct inode *inode,
+ unsigned cap, unsigned seq, u64 realmino, int flags,
+ struct ceph_cap **new_cap);
+ extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
+-extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
++extern void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
++ bool queue_release);
+ extern void __ceph_remove_caps(struct ceph_inode_info *ci);
+ extern void ceph_put_cap(struct ceph_mds_client *mdsc,
+ struct ceph_cap *cap);
+diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
+index 0deae4a0f5f169..558f64554b5914 100644
+--- a/fs/ceph/xattr.c
++++ b/fs/ceph/xattr.c
+@@ -57,7 +57,7 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
+ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
+ size_t size)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ struct ceph_string *pool_ns;
+ s64 pool = ci->i_layout.pool_id;
+@@ -161,7 +161,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
+ char *val, size_t size)
+ {
+ ssize_t ret;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ s64 pool = ci->i_layout.pool_id;
+ const char *pool_name;
+@@ -313,7 +313,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
+ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
+ char *val, size_t size)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+
+ return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
+ }
+@@ -321,7 +321,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
+ static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
+ char *val, size_t size)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+
+ return ceph_fmt_xattr(val, size, "client%lld",
+ ceph_client_gid(fsc->client));
+@@ -1094,7 +1094,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
+ static int ceph_sync_setxattr(struct inode *inode, const char *name,
+ const char *value, size_t size, int flags)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_request *req;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+@@ -1164,7 +1164,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
+ {
+ struct ceph_vxattr *vxattr;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_cap_flush *prealloc_cf = NULL;
+ struct ceph_buffer *old_blob = NULL;
+ int issued;
+diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
+index 5ee7d7bbb361ce..2fbf97077ce910 100644
+--- a/fs/cramfs/inode.c
++++ b/fs/cramfs/inode.c
+@@ -495,7 +495,7 @@ static void cramfs_kill_sb(struct super_block *sb)
+ sb->s_mtd = NULL;
+ } else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
+ sync_blockdev(sb->s_bdev);
+- blkdev_put(sb->s_bdev, sb);
++ bdev_release(sb->s_bdev_handle);
+ }
+ kfree(sbi);
+ }
+diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
+index 6eae3f12ad503d..553af738bb3e1b 100644
+--- a/fs/crypto/fname.c
++++ b/fs/crypto/fname.c
+@@ -74,13 +74,7 @@ struct fscrypt_nokey_name {
+
+ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+ {
+- if (str->len == 1 && str->name[0] == '.')
+- return true;
+-
+- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+- return true;
+-
+- return false;
++ return is_dot_dotdot(str->name, str->len);
+ }
+
+ /**
+diff --git a/fs/dax.c b/fs/dax.c
+index 8fafecbe42b159..d48b4fc7a4838e 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -412,23 +412,23 @@ static struct page *dax_busy_page(void *entry)
+ return NULL;
+ }
+
+-/*
+- * dax_lock_page - Lock the DAX entry corresponding to a page
+- * @page: The page whose entry we want to lock
++/**
++ * dax_lock_folio - Lock the DAX entry corresponding to a folio
++ * @folio: The folio whose entry we want to lock
+ *
+ * Context: Process context.
+- * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
++ * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
+ * not be locked.
+ */
+-dax_entry_t dax_lock_page(struct page *page)
++dax_entry_t dax_lock_folio(struct folio *folio)
+ {
+ XA_STATE(xas, NULL, 0);
+ void *entry;
+
+- /* Ensure page->mapping isn't freed while we look at it */
++ /* Ensure folio->mapping isn't freed while we look at it */
+ rcu_read_lock();
+ for (;;) {
+- struct address_space *mapping = READ_ONCE(page->mapping);
++ struct address_space *mapping = READ_ONCE(folio->mapping);
+
+ entry = NULL;
+ if (!mapping || !dax_mapping(mapping))
+@@ -447,11 +447,11 @@ dax_entry_t dax_lock_page(struct page *page)
+
+ xas.xa = &mapping->i_pages;
+ xas_lock_irq(&xas);
+- if (mapping != page->mapping) {
++ if (mapping != folio->mapping) {
+ xas_unlock_irq(&xas);
+ continue;
+ }
+- xas_set(&xas, page->index);
++ xas_set(&xas, folio->index);
+ entry = xas_load(&xas);
+ if (dax_is_locked(entry)) {
+ rcu_read_unlock();
+@@ -467,10 +467,10 @@ dax_entry_t dax_lock_page(struct page *page)
+ return (dax_entry_t)entry;
+ }
+
+-void dax_unlock_page(struct page *page, dax_entry_t cookie)
++void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
+ {
+- struct address_space *mapping = page->mapping;
+- XA_STATE(xas, &mapping->i_pages, page->index);
++ struct address_space *mapping = folio->mapping;
++ XA_STATE(xas, &mapping->i_pages, folio->index);
+
+ if (S_ISCHR(mapping->host->i_mode))
+ return;
+@@ -1305,11 +1305,15 @@ int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+ struct iomap_iter iter = {
+ .inode = inode,
+ .pos = pos,
+- .len = len,
+ .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
+ };
++ loff_t size = i_size_read(inode);
+ int ret;
+
++ if (pos < 0 || pos >= size)
++ return 0;
++
++ iter.len = min(len, size - pos);
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.processed = dax_unshare_iter(&iter);
+ return ret;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 25ac74d30bff3b..4030c010a76820 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -356,7 +356,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
+ flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+ WRITE_ONCE(dentry->d_flags, flags);
+ dentry->d_inode = NULL;
+- if (dentry->d_flags & DCACHE_LRU_LIST)
++ /*
++ * The negative counter only tracks dentries on the LRU. Don't inc if
++ * d_lru is on another list.
++ */
++ if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
+ this_cpu_inc(nr_dentry_negative);
+ }
+
+@@ -787,12 +791,12 @@ static inline bool fast_dput(struct dentry *dentry)
+ */
+ if (unlikely(ret < 0)) {
+ spin_lock(&dentry->d_lock);
+- if (dentry->d_lockref.count > 1) {
+- dentry->d_lockref.count--;
++ if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) {
+ spin_unlock(&dentry->d_lock);
+ return true;
+ }
+- return false;
++ dentry->d_lockref.count--;
++ goto locked;
+ }
+
+ /*
+@@ -850,6 +854,7 @@ static inline bool fast_dput(struct dentry *dentry)
+ * else could have killed it and marked it dead. Either way, we
+ * don't need to do anything else.
+ */
++locked:
+ if (dentry->d_lockref.count) {
+ spin_unlock(&dentry->d_lock);
+ return true;
+@@ -1999,9 +2004,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
+
+ spin_lock(&dentry->d_lock);
+ /*
+- * Decrement negative dentry count if it was in the LRU list.
++ * The negative counter only tracks dentries on the LRU. Don't dec if
++ * d_lru is on another list.
+ */
+- if (dentry->d_flags & DCACHE_LRU_LIST)
++ if ((dentry->d_flags &
++ (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
+ this_cpu_dec(nr_dentry_negative);
+ hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+ raw_write_seqcount_begin(&dentry->d_seq);
+@@ -3201,28 +3208,25 @@ EXPORT_SYMBOL(d_splice_alias);
+
+ bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
+ {
+- bool result;
++ bool subdir;
+ unsigned seq;
+
+ if (new_dentry == old_dentry)
+ return true;
+
+- do {
+- /* for restarting inner loop in case of seq retry */
+- seq = read_seqbegin(&rename_lock);
+- /*
+- * Need rcu_readlock to protect against the d_parent trashing
+- * due to d_move
+- */
+- rcu_read_lock();
+- if (d_ancestor(old_dentry, new_dentry))
+- result = true;
+- else
+- result = false;
+- rcu_read_unlock();
+- } while (read_seqretry(&rename_lock, seq));
+-
+- return result;
++ /* Access d_parent under rcu as d_move() may change it. */
++ rcu_read_lock();
++ seq = read_seqbegin(&rename_lock);
++ subdir = d_ancestor(old_dentry, new_dentry);
++ /* Try lockless once... */
++ if (read_seqretry(&rename_lock, seq)) {
++ /* ...else acquire lock for progress even on deep chains. */
++ read_seqlock_excl(&rename_lock);
++ subdir = d_ancestor(old_dentry, new_dentry);
++ read_sequnlock_excl(&rename_lock);
++ }
++ rcu_read_unlock();
++ return subdir;
+ }
+ EXPORT_SYMBOL(is_subdir);
+
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
+index 87b3753aa4b1e9..e40229c47fe587 100644
+--- a/fs/debugfs/file.c
++++ b/fs/debugfs/file.c
+@@ -84,6 +84,14 @@ int debugfs_file_get(struct dentry *dentry)
+ struct debugfs_fsdata *fsd;
+ void *d_fsd;
+
++ /*
++ * This could only happen if some debugfs user erroneously calls
++ * debugfs_file_get() on a dentry that isn't even a file, let
++ * them know about it.
++ */
++ if (WARN_ON(!d_is_reg(dentry)))
++ return -EINVAL;
++
+ d_fsd = READ_ONCE(dentry->d_fsdata);
+ if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+ fsd = d_fsd;
+@@ -939,7 +947,7 @@ static ssize_t debugfs_write_file_str(struct file *file, const char __user *user
+ new[pos + count] = '\0';
+ strim(new);
+
+- rcu_assign_pointer(*(char **)file->private_data, new);
++ rcu_assign_pointer(*(char __rcu **)file->private_data, new);
+ synchronize_rcu();
+ kfree(old);
+
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 83e57e9f9fa037..dcde4199a625d8 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -236,17 +236,19 @@ static const struct super_operations debugfs_super_operations = {
+
+ static void debugfs_release_dentry(struct dentry *dentry)
+ {
+- void *fsd = dentry->d_fsdata;
++ struct debugfs_fsdata *fsd = dentry->d_fsdata;
+
+- if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
+- kfree(dentry->d_fsdata);
++ if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
++ return;
++
++ kfree(fsd);
+ }
+
+ static struct vfsmount *debugfs_automount(struct path *path)
+ {
+- debugfs_automount_t f;
+- f = (debugfs_automount_t)path->dentry->d_fsdata;
+- return f(path->dentry, d_inode(path->dentry)->i_private);
++ struct debugfs_fsdata *fsd = path->dentry->d_fsdata;
++
++ return fsd->automount(path->dentry, d_inode(path->dentry)->i_private);
+ }
+
+ static const struct dentry_operations debugfs_dops = {
+@@ -634,13 +636,23 @@ struct dentry *debugfs_create_automount(const char *name,
+ void *data)
+ {
+ struct dentry *dentry = start_creating(name, parent);
++ struct debugfs_fsdata *fsd;
+ struct inode *inode;
+
+ if (IS_ERR(dentry))
+ return dentry;
+
++ fsd = kzalloc(sizeof(*fsd), GFP_KERNEL);
++ if (!fsd) {
++ failed_creating(dentry);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ fsd->automount = f;
++
+ if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
+ failed_creating(dentry);
++ kfree(fsd);
+ return ERR_PTR(-EPERM);
+ }
+
+@@ -648,13 +660,14 @@ struct dentry *debugfs_create_automount(const char *name,
+ if (unlikely(!inode)) {
+ pr_err("out of free dentries, can not create automount '%s'\n",
+ name);
++ kfree(fsd);
+ return failed_creating(dentry);
+ }
+
+ make_empty_dir_inode(inode);
+ inode->i_flags |= S_AUTOMOUNT;
+ inode->i_private = data;
+- dentry->d_fsdata = (void *)f;
++ dentry->d_fsdata = fsd;
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ d_instantiate(dentry, inode);
+diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
+index 92af8ae3131346..f7c489b5a368c6 100644
+--- a/fs/debugfs/internal.h
++++ b/fs/debugfs/internal.h
+@@ -17,8 +17,14 @@ extern const struct file_operations debugfs_full_proxy_file_operations;
+
+ struct debugfs_fsdata {
+ const struct file_operations *real_fops;
+- refcount_t active_users;
+- struct completion active_users_drained;
++ union {
++ /* automount_fn is used when real_fops is NULL */
++ debugfs_automount_t automount;
++ struct {
++ refcount_t active_users;
++ struct completion active_users_drained;
++ };
++ };
+ };
+
+ /*
+diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
+index 1f2f70a1b824eb..decedc4ee15f6d 100644
+--- a/fs/dlm/ast.c
++++ b/fs/dlm/ast.c
+@@ -12,6 +12,7 @@
+ #include <trace/events/dlm.h>
+
+ #include "dlm_internal.h"
++#include "lvb_table.h"
+ #include "memory.h"
+ #include "lock.h"
+ #include "user.h"
+@@ -42,6 +43,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+ int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
+ struct dlm_callback *cb;
++ int copy_lvb = 0;
+ int prev_mode;
+
+ if (flags & DLM_CB_BAST) {
+@@ -73,6 +75,17 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ goto out;
+ }
+ }
++ } else if (flags & DLM_CB_CAST) {
++ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
++ if (lkb->lkb_last_cast)
++ prev_mode = lkb->lkb_last_cb->mode;
++ else
++ prev_mode = -1;
++
++ if (!status && lkb->lkb_lksb->sb_lvbptr &&
++ dlm_lvb_operations[prev_mode + 1][mode + 1])
++ copy_lvb = 1;
++ }
+ }
+
+ cb = dlm_allocate_cb();
+@@ -85,6 +98,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ cb->mode = mode;
+ cb->sb_status = status;
+ cb->sb_flags = (sbflags & 0x000000FF);
++ cb->copy_lvb = copy_lvb;
+ kref_init(&cb->ref);
+ if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags))
+ rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
+diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
+index 5aabcb6f0f157c..d2c035387595a0 100644
+--- a/fs/dlm/debug_fs.c
++++ b/fs/dlm/debug_fs.c
+@@ -748,7 +748,7 @@ static int table_open4(struct inode *inode, struct file *file)
+ struct seq_file *seq;
+ int ret;
+
+- ret = seq_open(file, &format5_seq_ops);
++ ret = seq_open(file, &format4_seq_ops);
+ if (ret)
+ return ret;
+
+@@ -973,7 +973,8 @@ void dlm_delete_debug_comms_file(void *ctx)
+
+ void dlm_create_debug_file(struct dlm_ls *ls)
+ {
+- char name[DLM_LOCKSPACE_LEN + 8];
++ /* Reserve enough space for the longest file name */
++ char name[DLM_LOCKSPACE_LEN + sizeof("_queued_asts")];
+
+ /* format 1 */
+
+@@ -986,7 +987,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ /* format 2 */
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_locks", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_locks", ls->ls_name);
+
+ ls->ls_debug_locks_dentry = debugfs_create_file(name,
+ 0644,
+@@ -997,7 +998,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ /* format 3 */
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_all", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_all", ls->ls_name);
+
+ ls->ls_debug_all_dentry = debugfs_create_file(name,
+ S_IFREG | S_IRUGO,
+@@ -1008,7 +1009,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ /* format 4 */
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_toss", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_toss", ls->ls_name);
+
+ ls->ls_debug_toss_dentry = debugfs_create_file(name,
+ S_IFREG | S_IRUGO,
+@@ -1017,7 +1018,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ &format4_fops);
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_waiters", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_waiters", ls->ls_name);
+
+ ls->ls_debug_waiters_dentry = debugfs_create_file(name,
+ 0644,
+@@ -1028,7 +1029,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
+ /* format 5 */
+
+ memset(name, 0, sizeof(name));
+- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_queued_asts", ls->ls_name);
++ snprintf(name, sizeof(name), "%s_queued_asts", ls->ls_name);
+
+ ls->ls_debug_queued_asts_dentry = debugfs_create_file(name,
+ 0644,
+diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
+index dfc444dad3298a..511d0b984f580e 100644
+--- a/fs/dlm/dlm_internal.h
++++ b/fs/dlm/dlm_internal.h
+@@ -222,6 +222,7 @@ struct dlm_callback {
+ int sb_status; /* copy to lksb status */
+ uint8_t sb_flags; /* copy to lksb flags */
+ int8_t mode; /* rq mode of bast, gr mode of cast */
++ int copy_lvb;
+
+ struct list_head list;
+ struct kref ref;
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index f7bc22e74db274..32dbd1a828d010 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1805,8 +1805,8 @@ static int dlm_tcp_bind(struct socket *sock)
+ memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
+ make_sockaddr(&src_addr, 0, &addr_len);
+
+- result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
+- addr_len);
++ result = kernel_bind(sock, (struct sockaddr *)&src_addr,
++ addr_len);
+ if (result < 0) {
+ /* This *may* not indicate a critical error */
+ log_print("could not bind for connect: %d", result);
+@@ -1818,7 +1818,7 @@ static int dlm_tcp_bind(struct socket *sock)
+ static int dlm_tcp_connect(struct connection *con, struct socket *sock,
+ struct sockaddr *addr, int addr_len)
+ {
+- return sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
++ return kernel_connect(sock, addr, addr_len, O_NONBLOCK);
+ }
+
+ static int dlm_tcp_listen_validate(void)
+@@ -1850,8 +1850,8 @@ static int dlm_tcp_listen_bind(struct socket *sock)
+
+ /* Bind to our port */
+ make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
+- return sock->ops->bind(sock, (struct sockaddr *)&dlm_local_addr[0],
+- addr_len);
++ return kernel_bind(sock, (struct sockaddr *)&dlm_local_addr[0],
++ addr_len);
+ }
+
+ static const struct dlm_proto_ops dlm_tcp_ops = {
+@@ -1876,12 +1876,12 @@ static int dlm_sctp_connect(struct connection *con, struct socket *sock,
+ int ret;
+
+ /*
+- * Make sock->ops->connect() function return in specified time,
++ * Make kernel_connect() function return in specified time,
+ * since O_NONBLOCK argument in connect() function does not work here,
+ * then, we should restore the default value of this attribute.
+ */
+ sock_set_sndtimeo(sock->sk, 5);
+- ret = sock->ops->connect(sock, addr, addr_len, 0);
++ ret = kernel_connect(sock, addr, addr_len, 0);
+ sock_set_sndtimeo(sock->sk, 0);
+ return ret;
+ }
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index f641b36a36db0c..2247ebb61be1ee 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -337,13 +337,21 @@ static struct midcomms_node *nodeid2node(int nodeid)
+
+ int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
+ {
+- int ret, r = nodeid_hash(nodeid);
++ int ret, idx, r = nodeid_hash(nodeid);
+ struct midcomms_node *node;
+
+ ret = dlm_lowcomms_addr(nodeid, addr, len);
+ if (ret)
+ return ret;
+
++ idx = srcu_read_lock(&nodes_srcu);
++ node = __find_node(nodeid, r);
++ if (node) {
++ srcu_read_unlock(&nodes_srcu, idx);
++ return 0;
++ }
++ srcu_read_unlock(&nodes_srcu, idx);
++
+ node = kmalloc(sizeof(*node), GFP_NOFS);
+ if (!node)
+ return -ENOMEM;
+@@ -1030,15 +1038,15 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
+
+ break;
+ case DLM_VERSION_3_2:
++ /* send ack back if necessary */
++ dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
++
+ msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
+ ppc);
+ if (!msg) {
+ dlm_free_mhandle(mh);
+ goto err;
+ }
+-
+- /* send ack back if necessary */
+- dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
+ break;
+ default:
+ dlm_free_mhandle(mh);
+@@ -1260,12 +1268,23 @@ void dlm_midcomms_remove_member(int nodeid)
+
+ idx = srcu_read_lock(&nodes_srcu);
+ node = nodeid2node(nodeid);
+- if (WARN_ON_ONCE(!node)) {
++ /* in case of dlm_midcomms_close() removes node */
++ if (!node) {
+ srcu_read_unlock(&nodes_srcu, idx);
+ return;
+ }
+
+ spin_lock(&node->state_lock);
++ /* case of dlm_midcomms_addr() created node but
++ * was not added before because dlm_midcomms_close()
++ * removed the node
++ */
++ if (!node->users) {
++ spin_unlock(&node->state_lock);
++ srcu_read_unlock(&nodes_srcu, idx);
++ return;
++ }
++
+ node->users--;
+ pr_debug("node %d users dec count %d\n", nodeid, node->users);
+
+@@ -1386,10 +1405,16 @@ void dlm_midcomms_shutdown(void)
+ midcomms_shutdown(node);
+ }
+ }
+- srcu_read_unlock(&nodes_srcu, idx);
+- mutex_unlock(&close_lock);
+
+ dlm_lowcomms_shutdown();
++
++ for (i = 0; i < CONN_HASH_SIZE; i++) {
++ hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
++ midcomms_node_reset(node);
++ }
++ }
++ srcu_read_unlock(&nodes_srcu, idx);
++ mutex_unlock(&close_lock);
+ }
+
+ int dlm_midcomms_close(int nodeid)
+diff --git a/fs/dlm/user.c b/fs/dlm/user.c
+index 695e691b38b318..12a483deeef5ef 100644
+--- a/fs/dlm/user.c
++++ b/fs/dlm/user.c
+@@ -21,7 +21,6 @@
+ #include "dlm_internal.h"
+ #include "lockspace.h"
+ #include "lock.h"
+-#include "lvb_table.h"
+ #include "user.h"
+ #include "ast.h"
+ #include "config.h"
+@@ -806,8 +805,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+ struct dlm_lkb *lkb;
+ DECLARE_WAITQUEUE(wait, current);
+ struct dlm_callback *cb;
+- int rv, copy_lvb = 0;
+- int old_mode, new_mode;
++ int rv, ret;
+
+ if (count == sizeof(struct dlm_device_version)) {
+ rv = copy_version_to_user(buf, count);
+@@ -864,9 +862,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+
+ lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
+
+- /* rem_lkb_callback sets a new lkb_last_cast */
+- old_mode = lkb->lkb_last_cast->mode;
+-
+ rv = dlm_dequeue_lkb_callback(lkb, &cb);
+ switch (rv) {
+ case DLM_DEQUEUE_CALLBACK_EMPTY:
+@@ -895,20 +890,14 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+ if (cb->flags & DLM_CB_BAST) {
+ trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
+ } else if (cb->flags & DLM_CB_CAST) {
+- new_mode = cb->mode;
+-
+- if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
+- dlm_lvb_operations[old_mode + 1][new_mode + 1])
+- copy_lvb = 1;
+-
+ lkb->lkb_lksb->sb_status = cb->sb_status;
+ lkb->lkb_lksb->sb_flags = cb->sb_flags;
+ trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
+ }
+
+- rv = copy_result_to_user(lkb->lkb_ua,
+- test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
+- cb->flags, cb->mode, copy_lvb, buf, count);
++ ret = copy_result_to_user(lkb->lkb_ua,
++ test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
++ cb->flags, cb->mode, cb->copy_lvb, buf, count);
+
+ kref_put(&cb->ref, dlm_release_callback);
+
+@@ -916,7 +905,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+ if (rv == DLM_DEQUEUE_CALLBACK_LAST)
+ dlm_put_lkb(lkb);
+
+- return rv;
++ return ret;
+ }
+
+ static __poll_t device_poll(struct file *file, poll_table *wait)
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 03bd55069d8600..2fe0f3af1a08ec 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -1949,16 +1949,6 @@ int ecryptfs_encrypt_and_encode_filename(
+ return rc;
+ }
+
+-static bool is_dot_dotdot(const char *name, size_t name_size)
+-{
+- if (name_size == 1 && name[0] == '.')
+- return true;
+- else if (name_size == 2 && name[0] == '.' && name[1] == '.')
+- return true;
+-
+- return false;
+-}
+-
+ /**
+ * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext
+ * @plaintext_name: The plaintext name
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 992d9c7e64ae66..795e9fe2f72128 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -78,6 +78,14 @@ static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
+
+ if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb))
+ return ERR_PTR(-EXDEV);
++
++ /* Reject dealing with casefold directories. */
++ if (IS_CASEFOLDED(lower_inode)) {
++ pr_err_ratelimited("%s: Can't handle casefolded directory.\n",
++ __func__);
++ return ERR_PTR(-EREMOTE);
++ }
++
+ if (!igrab(lower_inode))
+ return ERR_PTR(-ESTALE);
+ inode = iget5_locked(sb, (unsigned long)lower_inode,
+@@ -998,6 +1006,14 @@ static int ecryptfs_getattr_link(struct mnt_idmap *idmap,
+ return rc;
+ }
+
++static int ecryptfs_do_getattr(const struct path *path, struct kstat *stat,
++ u32 request_mask, unsigned int flags)
++{
++ if (flags & AT_GETATTR_NOSEC)
++ return vfs_getattr_nosec(path, stat, request_mask, flags);
++ return vfs_getattr(path, stat, request_mask, flags);
++}
++
+ static int ecryptfs_getattr(struct mnt_idmap *idmap,
+ const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
+@@ -1006,8 +1022,8 @@ static int ecryptfs_getattr(struct mnt_idmap *idmap,
+ struct kstat lower_stat;
+ int rc;
+
+- rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat,
+- request_mask, flags);
++ rc = ecryptfs_do_getattr(ecryptfs_dentry_to_lower_path(dentry),
++ &lower_stat, request_mask, flags);
+ if (!rc) {
+ fsstack_copy_attr_all(d_inode(dentry),
+ ecryptfs_inode_to_lower(d_inode(dentry)));
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index 3fe41964c0d8d9..7f9f68c00ef63c 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -300,9 +300,11 @@ write_tag_66_packet(char *signature, u8 cipher_code,
+ * | Key Identifier Size | 1 or 2 bytes |
+ * | Key Identifier | arbitrary |
+ * | File Encryption Key Size | 1 or 2 bytes |
++ * | Cipher Code | 1 byte |
+ * | File Encryption Key | arbitrary |
++ * | Checksum | 2 bytes |
+ */
+- data_len = (5 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
++ data_len = (8 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
+ *packet = kmalloc(data_len, GFP_KERNEL);
+ message = *packet;
+ if (!message) {
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 996271473609a0..d59d9670965cb7 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/magic.h>
+ #include <linux/statfs.h>
++#include <linux/printk.h>
+
+ #include "internal.h"
+
+@@ -275,8 +276,19 @@ static int efivarfs_get_tree(struct fs_context *fc)
+ return get_tree_single(fc, efivarfs_fill_super);
+ }
+
++static int efivarfs_reconfigure(struct fs_context *fc)
++{
++ if (!efivar_supports_writes() && !(fc->sb_flags & SB_RDONLY)) {
++ pr_err("Firmware does not support SetVariableRT. Can not remount with rw\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ static const struct fs_context_operations efivarfs_context_ops = {
+ .get_tree = efivarfs_get_tree,
++ .reconfigure = efivarfs_reconfigure,
+ };
+
+ static int efivarfs_init_fs_context(struct fs_context *fc)
+@@ -287,6 +299,8 @@ static int efivarfs_init_fs_context(struct fs_context *fc)
+
+ static void efivarfs_kill_sb(struct super_block *sb)
+ {
++ struct efivarfs_fs_info *sfi = sb->s_fs_info;
++
+ kill_litter_super(sb);
+
+ if (!efivar_is_available())
+@@ -294,6 +308,7 @@ static void efivarfs_kill_sb(struct super_block *sb)
+
+ /* Remove all entries and destroy */
+ efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL);
++ kfree(sfi);
+ }
+
+ static struct file_system_type efivarfs_type = {
+diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c
+index 9e4f47808bd5ad..13bc6069895571 100644
+--- a/fs/efivarfs/vars.c
++++ b/fs/efivarfs/vars.c
+@@ -372,7 +372,7 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
+ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicates, struct list_head *head)
+ {
+- unsigned long variable_name_size = 1024;
++ unsigned long variable_name_size = 512;
+ efi_char16_t *variable_name;
+ efi_status_t status;
+ efi_guid_t vendor_guid;
+@@ -389,12 +389,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ goto free;
+
+ /*
+- * Per EFI spec, the maximum storage allocated for both
+- * the variable name and variable data is 1024 bytes.
++ * A small set of old UEFI implementations reject sizes
++ * above a certain threshold, the lowest seen in the wild
++ * is 512.
+ */
+
+ do {
+- variable_name_size = 1024;
++ variable_name_size = 512;
+
+ status = efivar_get_next_variable(&variable_name_size,
+ variable_name,
+@@ -431,9 +432,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ break;
+ case EFI_NOT_FOUND:
+ break;
++ case EFI_BUFFER_TOO_SMALL:
++ pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
++ variable_name_size);
++ status = EFI_NOT_FOUND;
++ break;
+ default:
+- printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
+- status);
++ pr_warn("efivars: get_next_variable: status=%lx\n", status);
+ status = EFI_NOT_FOUND;
+ break;
+ }
+diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
+index 349c3316ae6bbb..279933e007d217 100644
+--- a/fs/erofs/compress.h
++++ b/fs/erofs/compress.h
+@@ -21,6 +21,8 @@ struct z_erofs_decompress_req {
+ };
+
+ struct z_erofs_decompressor {
++ int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
++ void *data, int size);
+ int (*decompress)(struct z_erofs_decompress_req *rq,
+ struct page **pagepool);
+ char *name;
+@@ -92,6 +94,10 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
+ extern const struct z_erofs_decompressor erofs_decompressors[];
+
+ /* prototypes for specific algorithms */
++int z_erofs_load_lzma_config(struct super_block *sb,
++ struct erofs_super_block *dsb, void *data, int size);
++int z_erofs_load_deflate_config(struct super_block *sb,
++ struct erofs_super_block *dsb, void *data, int size);
+ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pagepool);
+ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+diff --git a/fs/erofs/data.c b/fs/erofs/data.c
+index 0c2c99c58b5e3a..19ab9bb3a9a0e1 100644
+--- a/fs/erofs/data.c
++++ b/fs/erofs/data.c
+@@ -222,7 +222,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
+ up_read(&devs->rwsem);
+ return 0;
+ }
+- map->m_bdev = dif->bdev;
++ map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL;
+ map->m_daxdev = dif->dax_dev;
+ map->m_dax_part_off = dif->dax_part_off;
+ map->m_fscache = dif->fscache;
+@@ -240,7 +240,8 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
+ if (map->m_pa >= startoff &&
+ map->m_pa < startoff + length) {
+ map->m_pa -= startoff;
+- map->m_bdev = dif->bdev;
++ map->m_bdev = dif->bdev_handle ?
++ dif->bdev_handle->bdev : NULL;
+ map->m_daxdev = dif->dax_dev;
+ map->m_dax_part_off = dif->dax_part_off;
+ map->m_fscache = dif->fscache;
+@@ -448,5 +449,6 @@ const struct file_operations erofs_file_fops = {
+ .llseek = generic_file_llseek,
+ .read_iter = erofs_file_read_iter,
+ .mmap = erofs_file_mmap,
++ .get_unmapped_area = thp_get_unmapped_area,
+ .splice_read = filemap_splice_read,
+ };
+diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
+index 332ec5f74002b8..aa59788a61e6e4 100644
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -24,11 +24,11 @@ struct z_erofs_lz4_decompress_ctx {
+ unsigned int oend;
+ };
+
+-int z_erofs_load_lz4_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_lz4_cfgs *lz4, int size)
++static int z_erofs_load_lz4_config(struct super_block *sb,
++ struct erofs_super_block *dsb, void *data, int size)
+ {
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
++ struct z_erofs_lz4_cfgs *lz4 = data;
+ u16 distance;
+
+ if (lz4) {
+@@ -122,11 +122,11 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
+ }
+
+ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+- void *inpage, unsigned int *inputmargin, int *maptype,
+- bool may_inplace)
++ void *inpage, void *out, unsigned int *inputmargin,
++ int *maptype, bool may_inplace)
+ {
+ struct z_erofs_decompress_req *rq = ctx->rq;
+- unsigned int omargin, total, i, j;
++ unsigned int omargin, total, i;
+ struct page **in;
+ void *src, *tmp;
+
+@@ -136,12 +136,13 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
+ goto docopy;
+
+- for (i = 0; i < ctx->inpages; ++i) {
+- DBG_BUGON(rq->in[i] == NULL);
+- for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
+- if (rq->out[j] == rq->in[i])
+- goto docopy;
+- }
++ for (i = 0; i < ctx->inpages; ++i)
++ if (rq->out[ctx->outpages - ctx->inpages + i] !=
++ rq->in[i])
++ goto docopy;
++ kunmap_local(inpage);
++ *maptype = 3;
++ return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
+ }
+
+ if (ctx->inpages <= 1) {
+@@ -149,7 +150,6 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+ return inpage;
+ }
+ kunmap_local(inpage);
+- might_sleep();
+ src = erofs_vm_map_ram(rq->in, ctx->inpages);
+ if (!src)
+ return ERR_PTR(-ENOMEM);
+@@ -205,12 +205,12 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
+ }
+
+ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+- u8 *out)
++ u8 *dst)
+ {
+ struct z_erofs_decompress_req *rq = ctx->rq;
+ bool support_0padding = false, may_inplace = false;
+ unsigned int inputmargin;
+- u8 *headpage, *src;
++ u8 *out, *headpage, *src;
+ int ret, maptype;
+
+ DBG_BUGON(*rq->in == NULL);
+@@ -231,11 +231,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ }
+
+ inputmargin = rq->pageofs_in;
+- src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
++ src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
+ &maptype, may_inplace);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
++ out = dst + rq->pageofs_out;
+ /* legacy format could compress extra data in a pcluster. */
+ if (rq->partial_decoding || !support_0padding)
+ ret = LZ4_decompress_safe_partial(src + inputmargin, out,
+@@ -247,15 +248,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ if (ret != rq->outputsize) {
+ erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
+ ret, rq->inputsize, inputmargin, rq->outputsize);
+-
+- print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
+- 16, 1, src + inputmargin, rq->inputsize, true);
+- print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
+- 16, 1, out, rq->outputsize, true);
+-
+ if (ret >= 0)
+ memset(out + ret, 0, rq->outputsize - ret);
+- ret = -EIO;
++ ret = -EFSCORRUPTED;
+ } else {
+ ret = 0;
+ }
+@@ -266,7 +261,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+ vm_unmap_ram(src, ctx->inpages);
+ } else if (maptype == 2) {
+ erofs_put_pcpubuf(src);
+- } else {
++ } else if (maptype != 3) {
+ DBG_BUGON(1);
+ return -EFAULT;
+ }
+@@ -309,7 +304,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
+ }
+
+ dstmap_out:
+- ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
++ ret = z_erofs_lz4_decompress_mem(&ctx, dst);
+ if (!dst_maptype)
+ kunmap_local(dst);
+ else if (dst_maptype == 2)
+@@ -370,19 +365,75 @@ const struct z_erofs_decompressor erofs_decompressors[] = {
+ .name = "interlaced"
+ },
+ [Z_EROFS_COMPRESSION_LZ4] = {
++ .config = z_erofs_load_lz4_config,
+ .decompress = z_erofs_lz4_decompress,
+ .name = "lz4"
+ },
+ #ifdef CONFIG_EROFS_FS_ZIP_LZMA
+ [Z_EROFS_COMPRESSION_LZMA] = {
++ .config = z_erofs_load_lzma_config,
+ .decompress = z_erofs_lzma_decompress,
+ .name = "lzma"
+ },
+ #endif
+ #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
+ [Z_EROFS_COMPRESSION_DEFLATE] = {
++ .config = z_erofs_load_deflate_config,
+ .decompress = z_erofs_deflate_decompress,
+ .name = "deflate"
+ },
+ #endif
+ };
++
++int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
++{
++ struct erofs_sb_info *sbi = EROFS_SB(sb);
++ struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
++ unsigned int algs, alg;
++ erofs_off_t offset;
++ int size, ret = 0;
++
++ if (!erofs_sb_has_compr_cfgs(sbi)) {
++ sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
++ return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
++ }
++
++ sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
++ if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
++ erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
++ sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
++ return -EOPNOTSUPP;
++ }
++
++ erofs_init_metabuf(&buf, sb);
++ offset = EROFS_SUPER_OFFSET + sbi->sb_size;
++ alg = 0;
++ for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
++ void *data;
++
++ if (!(algs & 1))
++ continue;
++
++ data = erofs_read_metadata(sb, &buf, &offset, &size);
++ if (IS_ERR(data)) {
++ ret = PTR_ERR(data);
++ break;
++ }
++
++ if (alg >= ARRAY_SIZE(erofs_decompressors) ||
++ !erofs_decompressors[alg].config) {
++ erofs_err(sb, "algorithm %d isn't enabled on this kernel",
++ alg);
++ ret = -EOPNOTSUPP;
++ } else {
++ ret = erofs_decompressors[alg].config(sb,
++ dsb, data, size);
++ }
++
++ kfree(data);
++ if (ret)
++ break;
++ }
++ erofs_put_metabuf(&buf);
++ return ret;
++}
+diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
+index 19e5bdeb30b606..aac2c837ef350b 100644
+--- a/fs/erofs/decompressor_deflate.c
++++ b/fs/erofs/decompressor_deflate.c
+@@ -47,39 +47,16 @@ int __init z_erofs_deflate_init(void)
+ /* by default, use # of possible CPUs instead */
+ if (!z_erofs_deflate_nstrms)
+ z_erofs_deflate_nstrms = num_possible_cpus();
+-
+- for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
+- ++z_erofs_deflate_avail_strms) {
+- struct z_erofs_deflate *strm;
+-
+- strm = kzalloc(sizeof(*strm), GFP_KERNEL);
+- if (!strm)
+- goto out_failed;
+-
+- /* XXX: in-kernel zlib cannot shrink windowbits currently */
+- strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
+- if (!strm->z.workspace) {
+- kfree(strm);
+- goto out_failed;
+- }
+-
+- spin_lock(&z_erofs_deflate_lock);
+- strm->next = z_erofs_deflate_head;
+- z_erofs_deflate_head = strm;
+- spin_unlock(&z_erofs_deflate_lock);
+- }
+ return 0;
+-
+-out_failed:
+- pr_err("failed to allocate zlib workspace\n");
+- z_erofs_deflate_exit();
+- return -ENOMEM;
+ }
+
+ int z_erofs_load_deflate_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_deflate_cfgs *dfl, int size)
++ struct erofs_super_block *dsb, void *data, int size)
+ {
++ struct z_erofs_deflate_cfgs *dfl = data;
++ static DEFINE_MUTEX(deflate_resize_mutex);
++ static bool inited;
++
+ if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
+ erofs_err(sb, "invalid deflate cfgs, size=%u", size);
+ return -EINVAL;
+@@ -89,9 +66,36 @@ int z_erofs_load_deflate_config(struct super_block *sb,
+ erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
+ return -EOPNOTSUPP;
+ }
++ mutex_lock(&deflate_resize_mutex);
++ if (!inited) {
++ for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
++ ++z_erofs_deflate_avail_strms) {
++ struct z_erofs_deflate *strm;
++
++ strm = kzalloc(sizeof(*strm), GFP_KERNEL);
++ if (!strm)
++ goto failed;
++ /* XXX: in-kernel zlib cannot customize windowbits */
++ strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
++ if (!strm->z.workspace) {
++ kfree(strm);
++ goto failed;
++ }
+
++ spin_lock(&z_erofs_deflate_lock);
++ strm->next = z_erofs_deflate_head;
++ z_erofs_deflate_head = strm;
++ spin_unlock(&z_erofs_deflate_lock);
++ }
++ inited = true;
++ }
++ mutex_unlock(&deflate_resize_mutex);
+ erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
+ return 0;
++failed:
++ mutex_unlock(&deflate_resize_mutex);
++ z_erofs_deflate_exit();
++ return -ENOMEM;
+ }
+
+ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
+index dee10d22ada96e..ba4ec73f4aaec8 100644
+--- a/fs/erofs/decompressor_lzma.c
++++ b/fs/erofs/decompressor_lzma.c
+@@ -72,10 +72,10 @@ int __init z_erofs_lzma_init(void)
+ }
+
+ int z_erofs_load_lzma_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_lzma_cfgs *lzma, int size)
++ struct erofs_super_block *dsb, void *data, int size)
+ {
+ static DEFINE_MUTEX(lzma_resize_mutex);
++ struct z_erofs_lzma_cfgs *lzma = data;
+ unsigned int dict_size, i;
+ struct z_erofs_lzma *strm, *head = NULL;
+ int err;
+diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
+index 87ff35bff8d5bb..afc37c9029ce78 100644
+--- a/fs/erofs/fscache.c
++++ b/fs/erofs/fscache.c
+@@ -3,6 +3,7 @@
+ * Copyright (C) 2022, Alibaba Cloud
+ * Copyright (C) 2022, Bytedance Inc. All rights reserved.
+ */
++#include <linux/pseudo_fs.h>
+ #include <linux/fscache.h>
+ #include "internal.h"
+
+@@ -12,6 +13,18 @@ static LIST_HEAD(erofs_domain_list);
+ static LIST_HEAD(erofs_domain_cookies_list);
+ static struct vfsmount *erofs_pseudo_mnt;
+
++static int erofs_anon_init_fs_context(struct fs_context *fc)
++{
++ return init_pseudo(fc, EROFS_SUPER_MAGIC) ? 0 : -ENOMEM;
++}
++
++static struct file_system_type erofs_anon_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "pseudo_erofs",
++ .init_fs_context = erofs_anon_init_fs_context,
++ .kill_sb = kill_anon_super,
++};
++
+ struct erofs_fscache_request {
+ struct erofs_fscache_request *primary;
+ struct netfs_cache_resources cache_resources;
+@@ -381,11 +394,12 @@ static int erofs_fscache_init_domain(struct super_block *sb)
+ goto out;
+
+ if (!erofs_pseudo_mnt) {
+- erofs_pseudo_mnt = kern_mount(&erofs_fs_type);
+- if (IS_ERR(erofs_pseudo_mnt)) {
+- err = PTR_ERR(erofs_pseudo_mnt);
++ struct vfsmount *mnt = kern_mount(&erofs_anon_fs_type);
++ if (IS_ERR(mnt)) {
++ err = PTR_ERR(mnt);
+ goto out;
+ }
++ erofs_pseudo_mnt = mnt;
+ }
+
+ domain->volume = sbi->volume;
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index edc8ec7581b8f0..9e40bee3682f7d 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -205,12 +205,14 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr,
+ unsigned int m_pofs)
+ {
+ struct erofs_inode *vi = EROFS_I(inode);
+- unsigned int bsz = i_blocksize(inode);
++ loff_t off;
+ char *lnk;
+
+- /* if it cannot be handled with fast symlink scheme */
+- if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
+- inode->i_size >= bsz || inode->i_size < 0) {
++ m_pofs += vi->xattr_isize;
++ /* check if it cannot be handled with fast symlink scheme */
++ if (vi->datalayout != EROFS_INODE_FLAT_INLINE || inode->i_size < 0 ||
++ check_add_overflow(m_pofs, inode->i_size, &off) ||
++ off > i_blocksize(inode)) {
+ inode->i_op = &erofs_symlink_iops;
+ return 0;
+ }
+@@ -219,16 +221,6 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr,
+ if (!lnk)
+ return -ENOMEM;
+
+- m_pofs += vi->xattr_isize;
+- /* inline symlink data shouldn't cross block boundary */
+- if (m_pofs + inode->i_size > bsz) {
+- kfree(lnk);
+- erofs_err(inode->i_sb,
+- "inline data cross block boundary @ nid %llu",
+- vi->nid);
+- DBG_BUGON(1);
+- return -EFSCORRUPTED;
+- }
+ memcpy(lnk, kaddr + m_pofs, inode->i_size);
+ lnk[inode->i_size] = '\0';
+
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index 4ff88d0dd980fb..787cc9ff902944 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -47,7 +47,7 @@ typedef u32 erofs_blk_t;
+ struct erofs_device_info {
+ char *path;
+ struct erofs_fscache *fscache;
+- struct block_device *bdev;
++ struct bdev_handle *bdev_handle;
+ struct dax_device *dax_dev;
+ u64 dax_part_off;
+
+@@ -82,13 +82,6 @@ struct erofs_dev_context {
+ bool flatdev;
+ };
+
+-struct erofs_fs_context {
+- struct erofs_mount_opts opt;
+- struct erofs_dev_context *devs;
+- char *fsid;
+- char *domain_id;
+-};
+-
+ /* all filesystem-wide lz4 configurations */
+ struct erofs_sb_lz4_info {
+ /* # of pages needed for EROFS lz4 rolling decompression */
+@@ -385,7 +378,6 @@ struct erofs_map_dev {
+ unsigned int m_deviceid;
+ };
+
+-extern struct file_system_type erofs_fs_type;
+ extern const struct super_operations erofs_sops;
+
+ extern const struct address_space_operations erofs_raw_access_aops;
+@@ -469,9 +461,6 @@ int __init z_erofs_init_zip_subsystem(void);
+ void z_erofs_exit_zip_subsystem(void);
+ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *egrp);
+-int z_erofs_load_lz4_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_lz4_cfgs *lz4, int len);
+ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ int flags);
+ void *erofs_get_pcpubuf(unsigned int requiredpages);
+@@ -480,6 +469,7 @@ int erofs_pcpubuf_growsize(unsigned int nrpages);
+ void __init erofs_pcpubuf_init(void);
+ void erofs_pcpubuf_exit(void);
+ int erofs_init_managed_cache(struct super_block *sb);
++int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
+ #else
+ static inline void erofs_shrinker_register(struct super_block *sb) {}
+ static inline void erofs_shrinker_unregister(struct super_block *sb) {}
+@@ -487,16 +477,6 @@ static inline int erofs_init_shrinker(void) { return 0; }
+ static inline void erofs_exit_shrinker(void) {}
+ static inline int z_erofs_init_zip_subsystem(void) { return 0; }
+ static inline void z_erofs_exit_zip_subsystem(void) {}
+-static inline int z_erofs_load_lz4_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_lz4_cfgs *lz4, int len)
+-{
+- if (lz4 || dsb->u1.lz4_max_distance) {
+- erofs_err(sb, "lz4 algorithm isn't enabled");
+- return -EINVAL;
+- }
+- return 0;
+-}
+ static inline void erofs_pcpubuf_init(void) {}
+ static inline void erofs_pcpubuf_exit(void) {}
+ static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
+@@ -505,41 +485,17 @@ static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
+ #ifdef CONFIG_EROFS_FS_ZIP_LZMA
+ int __init z_erofs_lzma_init(void);
+ void z_erofs_lzma_exit(void);
+-int z_erofs_load_lzma_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_lzma_cfgs *lzma, int size);
+ #else
+ static inline int z_erofs_lzma_init(void) { return 0; }
+ static inline int z_erofs_lzma_exit(void) { return 0; }
+-static inline int z_erofs_load_lzma_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_lzma_cfgs *lzma, int size) {
+- if (lzma) {
+- erofs_err(sb, "lzma algorithm isn't enabled");
+- return -EINVAL;
+- }
+- return 0;
+-}
+ #endif /* !CONFIG_EROFS_FS_ZIP_LZMA */
+
+ #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
+ int __init z_erofs_deflate_init(void);
+ void z_erofs_deflate_exit(void);
+-int z_erofs_load_deflate_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_deflate_cfgs *dfl, int size);
+ #else
+ static inline int z_erofs_deflate_init(void) { return 0; }
+ static inline int z_erofs_deflate_exit(void) { return 0; }
+-static inline int z_erofs_load_deflate_config(struct super_block *sb,
+- struct erofs_super_block *dsb,
+- struct z_erofs_deflate_cfgs *dfl, int size) {
+- if (dfl) {
+- erofs_err(sb, "deflate algorithm isn't enabled");
+- return -EINVAL;
+- }
+- return 0;
+-}
+ #endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */
+
+ #ifdef CONFIG_EROFS_FS_ONDEMAND
+diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
+index d4f631d39f0fa8..f0110a78acb207 100644
+--- a/fs/erofs/namei.c
++++ b/fs/erofs/namei.c
+@@ -130,24 +130,24 @@ static void *erofs_find_target_block(struct erofs_buf *target,
+ /* string comparison without already matched prefix */
+ diff = erofs_dirnamecmp(name, &dname, &matched);
+
+- if (!diff) {
+- *_ndirents = 0;
+- goto out;
+- } else if (diff > 0) {
+- head = mid + 1;
+- startprfx = matched;
+-
+- if (!IS_ERR(candidate))
+- erofs_put_metabuf(target);
+- *target = buf;
+- candidate = de;
+- *_ndirents = ndirents;
+- } else {
++ if (diff < 0) {
+ erofs_put_metabuf(&buf);
+-
+ back = mid - 1;
+ endprfx = matched;
++ continue;
++ }
++
++ if (!IS_ERR(candidate))
++ erofs_put_metabuf(target);
++ *target = buf;
++ if (!diff) {
++ *_ndirents = 0;
++ return de;
+ }
++ head = mid + 1;
++ startprfx = matched;
++ candidate = de;
++ *_ndirents = ndirents;
+ continue;
+ }
+ out: /* free if the candidate is valid */
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 3700af9ee17332..113414e6f35b96 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -156,68 +156,15 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
+ return buffer;
+ }
+
+-#ifdef CONFIG_EROFS_FS_ZIP
+-static int erofs_load_compr_cfgs(struct super_block *sb,
+- struct erofs_super_block *dsb)
++#ifndef CONFIG_EROFS_FS_ZIP
++static int z_erofs_parse_cfgs(struct super_block *sb,
++ struct erofs_super_block *dsb)
+ {
+- struct erofs_sb_info *sbi = EROFS_SB(sb);
+- struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
+- unsigned int algs, alg;
+- erofs_off_t offset;
+- int size, ret = 0;
+-
+- sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
+- if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
+- erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
+- sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
+- return -EINVAL;
+- }
+-
+- erofs_init_metabuf(&buf, sb);
+- offset = EROFS_SUPER_OFFSET + sbi->sb_size;
+- alg = 0;
+- for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
+- void *data;
+-
+- if (!(algs & 1))
+- continue;
+-
+- data = erofs_read_metadata(sb, &buf, &offset, &size);
+- if (IS_ERR(data)) {
+- ret = PTR_ERR(data);
+- break;
+- }
++ if (!dsb->u1.available_compr_algs)
++ return 0;
+
+- switch (alg) {
+- case Z_EROFS_COMPRESSION_LZ4:
+- ret = z_erofs_load_lz4_config(sb, dsb, data, size);
+- break;
+- case Z_EROFS_COMPRESSION_LZMA:
+- ret = z_erofs_load_lzma_config(sb, dsb, data, size);
+- break;
+- case Z_EROFS_COMPRESSION_DEFLATE:
+- ret = z_erofs_load_deflate_config(sb, dsb, data, size);
+- break;
+- default:
+- DBG_BUGON(1);
+- ret = -EFAULT;
+- }
+- kfree(data);
+- if (ret)
+- break;
+- }
+- erofs_put_metabuf(&buf);
+- return ret;
+-}
+-#else
+-static int erofs_load_compr_cfgs(struct super_block *sb,
+- struct erofs_super_block *dsb)
+-{
+- if (dsb->u1.available_compr_algs) {
+- erofs_err(sb, "try to load compressed fs when compression is disabled");
+- return -EINVAL;
+- }
+- return 0;
++ erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
++ return -EOPNOTSUPP;
+ }
+ #endif
+
+@@ -227,7 +174,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_fscache *fscache;
+ struct erofs_deviceslot *dis;
+- struct block_device *bdev;
++ struct bdev_handle *bdev_handle;
+ void *ptr;
+
+ ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
+@@ -251,13 +198,13 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
+ return PTR_ERR(fscache);
+ dif->fscache = fscache;
+ } else if (!sbi->devs->flatdev) {
+- bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
+- NULL);
+- if (IS_ERR(bdev))
+- return PTR_ERR(bdev);
+- dif->bdev = bdev;
+- dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
+- NULL, NULL);
++ bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ,
++ sb->s_type, NULL);
++ if (IS_ERR(bdev_handle))
++ return PTR_ERR(bdev_handle);
++ dif->bdev_handle = bdev_handle;
++ dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev,
++ &dif->dax_part_off, NULL, NULL);
+ }
+
+ dif->blocks = le32_to_cpu(dis->blocks);
+@@ -406,10 +353,7 @@ static int erofs_read_superblock(struct super_block *sb)
+ }
+
+ /* parse on-disk compression configurations */
+- if (erofs_sb_has_compr_cfgs(sbi))
+- ret = erofs_load_compr_cfgs(sb, dsb);
+- else
+- ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
++ ret = z_erofs_parse_cfgs(sb, dsb);
+ if (ret < 0)
+ goto out;
+
+@@ -423,18 +367,18 @@ static int erofs_read_superblock(struct super_block *sb)
+ return ret;
+ }
+
+-static void erofs_default_options(struct erofs_fs_context *ctx)
++static void erofs_default_options(struct erofs_sb_info *sbi)
+ {
+ #ifdef CONFIG_EROFS_FS_ZIP
+- ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+- ctx->opt.max_sync_decompress_pages = 3;
+- ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
++ sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
++ sbi->opt.max_sync_decompress_pages = 3;
++ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
+ #endif
+ #ifdef CONFIG_EROFS_FS_XATTR
+- set_opt(&ctx->opt, XATTR_USER);
++ set_opt(&sbi->opt, XATTR_USER);
+ #endif
+ #ifdef CONFIG_EROFS_FS_POSIX_ACL
+- set_opt(&ctx->opt, POSIX_ACL);
++ set_opt(&sbi->opt, POSIX_ACL);
+ #endif
+ }
+
+@@ -479,17 +423,17 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
+ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
+ {
+ #ifdef CONFIG_FS_DAX
+- struct erofs_fs_context *ctx = fc->fs_private;
++ struct erofs_sb_info *sbi = fc->s_fs_info;
+
+ switch (mode) {
+ case EROFS_MOUNT_DAX_ALWAYS:
+ warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+- set_opt(&ctx->opt, DAX_ALWAYS);
+- clear_opt(&ctx->opt, DAX_NEVER);
++ set_opt(&sbi->opt, DAX_ALWAYS);
++ clear_opt(&sbi->opt, DAX_NEVER);
+ return true;
+ case EROFS_MOUNT_DAX_NEVER:
+- set_opt(&ctx->opt, DAX_NEVER);
+- clear_opt(&ctx->opt, DAX_ALWAYS);
++ set_opt(&sbi->opt, DAX_NEVER);
++ clear_opt(&sbi->opt, DAX_ALWAYS);
+ return true;
+ default:
+ DBG_BUGON(1);
+@@ -504,7 +448,7 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
+ static int erofs_fc_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+ {
+- struct erofs_fs_context *ctx = fc->fs_private;
++ struct erofs_sb_info *sbi = fc->s_fs_info;
+ struct fs_parse_result result;
+ struct erofs_device_info *dif;
+ int opt, ret;
+@@ -517,9 +461,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
+ case Opt_user_xattr:
+ #ifdef CONFIG_EROFS_FS_XATTR
+ if (result.boolean)
+- set_opt(&ctx->opt, XATTR_USER);
++ set_opt(&sbi->opt, XATTR_USER);
+ else
+- clear_opt(&ctx->opt, XATTR_USER);
++ clear_opt(&sbi->opt, XATTR_USER);
+ #else
+ errorfc(fc, "{,no}user_xattr options not supported");
+ #endif
+@@ -527,16 +471,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
+ case Opt_acl:
+ #ifdef CONFIG_EROFS_FS_POSIX_ACL
+ if (result.boolean)
+- set_opt(&ctx->opt, POSIX_ACL);
++ set_opt(&sbi->opt, POSIX_ACL);
+ else
+- clear_opt(&ctx->opt, POSIX_ACL);
++ clear_opt(&sbi->opt, POSIX_ACL);
+ #else
+ errorfc(fc, "{,no}acl options not supported");
+ #endif
+ break;
+ case Opt_cache_strategy:
+ #ifdef CONFIG_EROFS_FS_ZIP
+- ctx->opt.cache_strategy = result.uint_32;
++ sbi->opt.cache_strategy = result.uint_32;
+ #else
+ errorfc(fc, "compression not supported, cache_strategy ignored");
+ #endif
+@@ -558,27 +502,27 @@ static int erofs_fc_parse_param(struct fs_context *fc,
+ kfree(dif);
+ return -ENOMEM;
+ }
+- down_write(&ctx->devs->rwsem);
+- ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
+- up_write(&ctx->devs->rwsem);
++ down_write(&sbi->devs->rwsem);
++ ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
++ up_write(&sbi->devs->rwsem);
+ if (ret < 0) {
+ kfree(dif->path);
+ kfree(dif);
+ return ret;
+ }
+- ++ctx->devs->extra_devices;
++ ++sbi->devs->extra_devices;
+ break;
+ #ifdef CONFIG_EROFS_FS_ONDEMAND
+ case Opt_fsid:
+- kfree(ctx->fsid);
+- ctx->fsid = kstrdup(param->string, GFP_KERNEL);
+- if (!ctx->fsid)
++ kfree(sbi->fsid);
++ sbi->fsid = kstrdup(param->string, GFP_KERNEL);
++ if (!sbi->fsid)
+ return -ENOMEM;
+ break;
+ case Opt_domain_id:
+- kfree(ctx->domain_id);
+- ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
+- if (!ctx->domain_id)
++ kfree(sbi->domain_id);
++ sbi->domain_id = kstrdup(param->string, GFP_KERNEL);
++ if (!sbi->domain_id)
+ return -ENOMEM;
+ break;
+ #else
+@@ -631,18 +575,10 @@ static const struct export_operations erofs_export_ops = {
+ .get_parent = erofs_get_parent,
+ };
+
+-static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
+-{
+- static const struct tree_descr empty_descr = {""};
+-
+- return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
+-}
+-
+ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ {
+ struct inode *inode;
+- struct erofs_sb_info *sbi;
+- struct erofs_fs_context *ctx = fc->fs_private;
++ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ int err;
+
+ sb->s_magic = EROFS_SUPER_MAGIC;
+@@ -650,19 +586,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_op = &erofs_sops;
+
+- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+- if (!sbi)
+- return -ENOMEM;
+-
+- sb->s_fs_info = sbi;
+- sbi->opt = ctx->opt;
+- sbi->devs = ctx->devs;
+- ctx->devs = NULL;
+- sbi->fsid = ctx->fsid;
+- ctx->fsid = NULL;
+- sbi->domain_id = ctx->domain_id;
+- ctx->domain_id = NULL;
+-
+ sbi->blkszbits = PAGE_SHIFT;
+ if (erofs_is_fscache_mode(sb)) {
+ sb->s_blocksize = PAGE_SIZE;
+@@ -764,16 +687,11 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ return 0;
+ }
+
+-static int erofs_fc_anon_get_tree(struct fs_context *fc)
+-{
+- return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
+-}
+-
+ static int erofs_fc_get_tree(struct fs_context *fc)
+ {
+- struct erofs_fs_context *ctx = fc->fs_private;
++ struct erofs_sb_info *sbi = fc->s_fs_info;
+
+- if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
++ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
+ return get_tree_nodev(fc, erofs_fc_fill_super);
+
+ return get_tree_bdev(fc, erofs_fc_fill_super);
+@@ -783,19 +701,19 @@ static int erofs_fc_reconfigure(struct fs_context *fc)
+ {
+ struct super_block *sb = fc->root->d_sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+- struct erofs_fs_context *ctx = fc->fs_private;
++ struct erofs_sb_info *new_sbi = fc->s_fs_info;
+
+ DBG_BUGON(!sb_rdonly(sb));
+
+- if (ctx->fsid || ctx->domain_id)
++ if (new_sbi->fsid || new_sbi->domain_id)
+ erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
+
+- if (test_opt(&ctx->opt, POSIX_ACL))
++ if (test_opt(&new_sbi->opt, POSIX_ACL))
+ fc->sb_flags |= SB_POSIXACL;
+ else
+ fc->sb_flags &= ~SB_POSIXACL;
+
+- sbi->opt = ctx->opt;
++ sbi->opt = new_sbi->opt;
+
+ fc->sb_flags |= SB_RDONLY;
+ return 0;
+@@ -806,8 +724,8 @@ static int erofs_release_device_info(int id, void *ptr, void *data)
+ struct erofs_device_info *dif = ptr;
+
+ fs_put_dax(dif->dax_dev, NULL);
+- if (dif->bdev)
+- blkdev_put(dif->bdev, &erofs_fs_type);
++ if (dif->bdev_handle)
++ bdev_release(dif->bdev_handle);
+ erofs_fscache_unregister_cookie(dif->fscache);
+ dif->fscache = NULL;
+ kfree(dif->path);
+@@ -826,12 +744,15 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
+
+ static void erofs_fc_free(struct fs_context *fc)
+ {
+- struct erofs_fs_context *ctx = fc->fs_private;
++ struct erofs_sb_info *sbi = fc->s_fs_info;
++
++ if (!sbi)
++ return;
+
+- erofs_free_dev_context(ctx->devs);
+- kfree(ctx->fsid);
+- kfree(ctx->domain_id);
+- kfree(ctx);
++ erofs_free_dev_context(sbi->devs);
++ kfree(sbi->fsid);
++ kfree(sbi->domain_id);
++ kfree(sbi);
+ }
+
+ static const struct fs_context_operations erofs_context_ops = {
+@@ -841,56 +762,37 @@ static const struct fs_context_operations erofs_context_ops = {
+ .free = erofs_fc_free,
+ };
+
+-static const struct fs_context_operations erofs_anon_context_ops = {
+- .get_tree = erofs_fc_anon_get_tree,
+-};
+-
+ static int erofs_init_fs_context(struct fs_context *fc)
+ {
+- struct erofs_fs_context *ctx;
+-
+- /* pseudo mount for anon inodes */
+- if (fc->sb_flags & SB_KERNMOUNT) {
+- fc->ops = &erofs_anon_context_ops;
+- return 0;
+- }
++ struct erofs_sb_info *sbi;
+
+- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+- if (!ctx)
++ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
++ if (!sbi)
+ return -ENOMEM;
+- ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
+- if (!ctx->devs) {
+- kfree(ctx);
++
++ sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
++ if (!sbi->devs) {
++ kfree(sbi);
+ return -ENOMEM;
+ }
+- fc->fs_private = ctx;
++ fc->s_fs_info = sbi;
+
+- idr_init(&ctx->devs->tree);
+- init_rwsem(&ctx->devs->rwsem);
+- erofs_default_options(ctx);
++ idr_init(&sbi->devs->tree);
++ init_rwsem(&sbi->devs->rwsem);
++ erofs_default_options(sbi);
+ fc->ops = &erofs_context_ops;
+ return 0;
+ }
+
+ static void erofs_kill_sb(struct super_block *sb)
+ {
+- struct erofs_sb_info *sbi;
+-
+- /* pseudo mount for anon inodes */
+- if (sb->s_flags & SB_KERNMOUNT) {
+- kill_anon_super(sb);
+- return;
+- }
++ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+- if (erofs_is_fscache_mode(sb))
++ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
+ kill_anon_super(sb);
+ else
+ kill_block_super(sb);
+
+- sbi = EROFS_SB(sb);
+- if (!sbi)
+- return;
+-
+ erofs_free_dev_context(sbi->devs);
+ fs_put_dax(sbi->dax_dev, NULL);
+ erofs_fscache_unregister_fs(sb);
+@@ -920,7 +822,7 @@ static void erofs_put_super(struct super_block *sb)
+ erofs_fscache_unregister_fs(sb);
+ }
+
+-struct file_system_type erofs_fs_type = {
++static struct file_system_type erofs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "erofs",
+ .init_fs_context = erofs_init_fs_context,
+diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
+index cc6fb9e9889917..4256a85719a1d2 100644
+--- a/fs/erofs/utils.c
++++ b/fs/erofs/utils.c
+@@ -77,12 +77,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ struct erofs_workgroup *pre;
+
+- /*
+- * Bump up before making this visible to others for the XArray in order
+- * to avoid potential UAF without serialized by xa_lock.
+- */
+- lockref_get(&grp->lockref);
+-
++ DBG_BUGON(grp->lockref.count < 1);
+ repeat:
+ xa_lock(&sbi->managed_pslots);
+ pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
+@@ -96,7 +91,6 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
+ cond_resched();
+ goto repeat;
+ }
+- lockref_put_return(&grp->lockref);
+ grp = pre;
+ }
+ xa_unlock(&sbi->managed_pslots);
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 036f610e044b60..1c0e6167d8e73b 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -796,6 +796,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+ return PTR_ERR(pcl);
+
+ spin_lock_init(&pcl->obj.lockref.lock);
++ pcl->obj.lockref.count = 1; /* one ref for this request */
+ pcl->algorithmformat = map->m_algorithmformat;
+ pcl->length = 0;
+ pcl->partial = true;
+@@ -814,7 +815,6 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
+
+ if (ztailpacking) {
+ pcl->obj.index = 0; /* which indicates ztailpacking */
+- pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
+ pcl->tailpacking_size = map->m_plen;
+ } else {
+ pcl->obj.index = map->m_pa >> PAGE_SHIFT;
+@@ -892,6 +892,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
+ }
+ get_page(map->buf.page);
+ WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
++ fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
+ fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
+ }
+ /* file-backed inplace I/O pages are traversed in reverse order */
+@@ -1308,12 +1309,11 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ put_page(page);
+ } else {
+ for (i = 0; i < pclusterpages; ++i) {
+- page = pcl->compressed_bvecs[i].page;
++ /* consider shortlived pages added when decompressing */
++ page = be->compressed_pages[i];
+
+ if (erofs_page_is_managed(sbi, page))
+ continue;
+-
+- /* recycle all individual short-lived pages */
+ (void)z_erofs_put_shortlivedpage(be->pagepool, page);
+ WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
+ }
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 7b55111fd53377..6bd435a565f614 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -82,29 +82,26 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
+ }
+
+ static unsigned int decode_compactedbits(unsigned int lobits,
+- unsigned int lomask,
+ u8 *in, unsigned int pos, u8 *type)
+ {
+ const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
+- const unsigned int lo = v & lomask;
++ const unsigned int lo = v & ((1 << lobits) - 1);
+
+ *type = (v >> lobits) & 3;
+ return lo;
+ }
+
+-static int get_compacted_la_distance(unsigned int lclusterbits,
++static int get_compacted_la_distance(unsigned int lobits,
+ unsigned int encodebits,
+ unsigned int vcnt, u8 *in, int i)
+ {
+- const unsigned int lomask = (1 << lclusterbits) - 1;
+ unsigned int lo, d1 = 0;
+ u8 type;
+
+ DBG_BUGON(i >= vcnt);
+
+ do {
+- lo = decode_compactedbits(lclusterbits, lomask,
+- in, encodebits * i, &type);
++ lo = decode_compactedbits(lobits, in, encodebits * i, &type);
+
+ if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
+ return d1;
+@@ -123,15 +120,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ {
+ struct erofs_inode *const vi = EROFS_I(m->inode);
+ const unsigned int lclusterbits = vi->z_logical_clusterbits;
+- const unsigned int lomask = (1 << lclusterbits) - 1;
+- unsigned int vcnt, base, lo, encodebits, nblk, eofs;
++ unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs;
+ int i;
+ u8 *in, type;
+ bool big_pcluster;
+
+ if (1 << amortizedshift == 4 && lclusterbits <= 14)
+ vcnt = 2;
+- else if (1 << amortizedshift == 2 && lclusterbits == 12)
++ else if (1 << amortizedshift == 2 && lclusterbits <= 12)
+ vcnt = 16;
+ else
+ return -EOPNOTSUPP;
+@@ -140,6 +136,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
+ (vcnt << amortizedshift);
+ big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
++ lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
+ encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
+ eofs = erofs_blkoff(m->inode->i_sb, pos);
+ base = round_down(eofs, vcnt << amortizedshift);
+@@ -147,15 +144,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+
+ i = (eofs - base) >> amortizedshift;
+
+- lo = decode_compactedbits(lclusterbits, lomask,
+- in, encodebits * i, &type);
++ lo = decode_compactedbits(lobits, in, encodebits * i, &type);
+ m->type = type;
+ if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ m->clusterofs = 1 << lclusterbits;
+
+ /* figure out lookahead_distance: delta[1] if needed */
+ if (lookahead)
+- m->delta[1] = get_compacted_la_distance(lclusterbits,
++ m->delta[1] = get_compacted_la_distance(lobits,
+ encodebits, vcnt, in, i);
+ if (lo & Z_EROFS_LI_D0_CBLKCNT) {
+ if (!big_pcluster) {
+@@ -174,8 +170,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ * of which lo saves delta[1] rather than delta[0].
+ * Hence, get delta[0] by the previous lcluster indirectly.
+ */
+- lo = decode_compactedbits(lclusterbits, lomask,
+- in, encodebits * (i - 1), &type);
++ lo = decode_compactedbits(lobits, in,
++ encodebits * (i - 1), &type);
+ if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
+ lo = 0;
+ else if (lo & Z_EROFS_LI_D0_CBLKCNT)
+@@ -190,8 +186,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ nblk = 1;
+ while (i > 0) {
+ --i;
+- lo = decode_compactedbits(lclusterbits, lomask,
+- in, encodebits * i, &type);
++ lo = decode_compactedbits(lobits, in,
++ encodebits * i, &type);
+ if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
+ i -= lo;
+
+@@ -202,8 +198,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ nblk = 0;
+ while (i > 0) {
+ --i;
+- lo = decode_compactedbits(lclusterbits, lomask,
+- in, encodebits * i, &type);
++ lo = decode_compactedbits(lobits, in,
++ encodebits * i, &type);
+ if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
+ if (lo & Z_EROFS_LI_D0_CBLKCNT) {
+ --i;
+@@ -458,7 +454,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ .map = map,
+ };
+ int err = 0;
+- unsigned int lclusterbits, endoff;
++ unsigned int lclusterbits, endoff, afmt;
+ unsigned long initial_lcn;
+ unsigned long long ofs, end;
+
+@@ -547,17 +543,20 @@ static int z_erofs_do_map_blocks(struct inode *inode,
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
+- if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
+- map->m_algorithmformat =
+- Z_EROFS_COMPRESSION_INTERLACED;
+- else
+- map->m_algorithmformat =
+- Z_EROFS_COMPRESSION_SHIFTED;
+- } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
+- map->m_algorithmformat = vi->z_algorithmtype[1];
++ afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
++ Z_EROFS_COMPRESSION_INTERLACED :
++ Z_EROFS_COMPRESSION_SHIFTED;
+ } else {
+- map->m_algorithmformat = vi->z_algorithmtype[0];
++ afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
++ vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
++ if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
++ erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
++ afmt, vi->nid);
++ err = -EFSCORRUPTED;
++ goto unmap_out;
++ }
+ }
++ map->m_algorithmformat = afmt;
+
+ if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
+ ((flags & EROFS_GET_BLOCKS_READMORE) &&
+@@ -724,6 +723,8 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+
+ err = z_erofs_do_map_blocks(inode, map, flags);
+ out:
++ if (err)
++ map->m_llen = 0;
+ trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
+ return err;
+ }
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 1d9a71a0c4c167..0ed73bc7d46526 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -876,6 +876,34 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
+ return res;
+ }
+
++/*
++ * The ffd.file pointer may be in the process of being torn down due to
++ * being closed, but we may not have finished eventpoll_release() yet.
++ *
++ * Normally, even with the atomic_long_inc_not_zero, the file may have
++ * been free'd and then gotten re-allocated to something else (since
++ * files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU).
++ *
++ * But for epoll, users hold the ep->mtx mutex, and as such any file in
++ * the process of being free'd will block in eventpoll_release_file()
++ * and thus the underlying file allocation will not be free'd, and the
++ * file re-use cannot happen.
++ *
++ * For the same reason we can avoid a rcu_read_lock() around the
++ * operation - 'ffd.file' cannot go away even if the refcount has
++ * reached zero (but we must still not call out to ->poll() functions
++ * etc).
++ */
++static struct file *epi_fget(const struct epitem *epi)
++{
++ struct file *file;
++
++ file = epi->ffd.file;
++ if (!atomic_long_inc_not_zero(&file->f_count))
++ file = NULL;
++ return file;
++}
++
+ /*
+ * Differs from ep_eventpoll_poll() in that internal callers already have
+ * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
+@@ -884,14 +912,22 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
+ static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
+ int depth)
+ {
+- struct file *file = epi->ffd.file;
++ struct file *file = epi_fget(epi);
+ __poll_t res;
+
++ /*
++ * We could return EPOLLERR | EPOLLHUP or something, but let's
++ * treat this more as "file doesn't exist, poll didn't happen".
++ */
++ if (!file)
++ return 0;
++
+ pt->_key = epi->event.events;
+ if (!is_file_epoll(file))
+ res = vfs_poll(file, pt);
+ else
+ res = __ep_eventpoll_poll(file, pt, depth);
++ fput(file);
+ return res & epi->event.events;
+ }
+
+diff --git a/fs/exec.c b/fs/exec.c
+index 6518e33ea813ca..f49b352a60323a 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -770,7 +770,8 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ stack_base = calc_max_stack_size(stack_base);
+
+ /* Add space for stack randomization. */
+- stack_base += (STACK_RND_MASK << PAGE_SHIFT);
++ if (current->flags & PF_RANDOMIZE)
++ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+
+ /* Make sure we didn't let the argument array grow too large. */
+ if (vma->vm_end - vma->vm_start > stack_base)
+@@ -894,6 +895,7 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
+ goto out;
+ }
+
++ bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
+ *sp_location = sp;
+
+ out:
+@@ -1410,6 +1412,9 @@ int begin_new_exec(struct linux_binprm * bprm)
+
+ out_unlock:
+ up_write(&me->signal->exec_update_lock);
++ if (!bprm->cred)
++ mutex_unlock(&me->signal->cred_guard_mutex);
++
+ out:
+ return retval;
+ }
+@@ -1605,6 +1610,7 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
+ unsigned int mode;
+ vfsuid_t vfsuid;
+ vfsgid_t vfsgid;
++ int err;
+
+ if (!mnt_may_suid(file->f_path.mnt))
+ return;
+@@ -1621,12 +1627,17 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
+ /* Be careful if suid/sgid is set */
+ inode_lock(inode);
+
+- /* reload atomically mode/uid/gid now that lock held */
++ /* Atomically reload and check mode/uid/gid now that lock held. */
+ mode = inode->i_mode;
+ vfsuid = i_uid_into_vfsuid(idmap, inode);
+ vfsgid = i_gid_into_vfsgid(idmap, inode);
++ err = inode_permission(idmap, inode, MAY_EXEC);
+ inode_unlock(inode);
+
++ /* Did the exec bit vanish out from under us? Give up. */
++ if (err)
++ return;
++
+ /* We ignore suid/sgid if there are no mappings for them in the ns */
+ if (!vfsuid_has_mapping(bprm->cred->user_ns, vfsuid) ||
+ !vfsgid_has_mapping(bprm->cred->user_ns, vfsgid))
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index e918decb373586..5b547a5963808e 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -110,11 +110,8 @@ int exfat_load_bitmap(struct super_block *sb)
+ return -EIO;
+
+ type = exfat_get_entry_type(ep);
+- if (type == TYPE_UNUSED)
+- break;
+- if (type != TYPE_BITMAP)
+- continue;
+- if (ep->dentry.bitmap.flags == 0x0) {
++ if (type == TYPE_BITMAP &&
++ ep->dentry.bitmap.flags == 0x0) {
+ int err;
+
+ err = exfat_allocate_bitmap(sb, ep);
+@@ -122,6 +119,9 @@ int exfat_load_bitmap(struct super_block *sb)
+ return err;
+ }
+ brelse(bh);
++
++ if (type == TYPE_UNUSED)
++ return -EINVAL;
+ }
+
+ if (exfat_get_next_cluster(sb, &clu.dir))
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index e1586bba6d8623..7a715016b96f34 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -890,7 +890,7 @@ int exfat_get_dentry_set(struct exfat_entry_set_cache *es,
+
+ num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb);
+ if (num_bh > ARRAY_SIZE(es->__bh)) {
+- es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_KERNEL);
++ es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_NOFS);
+ if (!es->bh) {
+ brelse(bh);
+ return -ENOMEM;
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index 1b9f587f6cca5f..95c51b025b9176 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -351,14 +351,20 @@ static int exfat_find_empty_entry(struct inode *inode,
+ if (exfat_check_max_dentries(inode))
+ return -ENOSPC;
+
+- /* we trust p_dir->size regardless of FAT type */
+- if (exfat_find_last_cluster(sb, p_dir, &last_clu))
+- return -EIO;
+-
+ /*
+ * Allocate new cluster to this directory
+ */
+- exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++ if (ei->start_clu != EXFAT_EOF_CLUSTER) {
++ /* we trust p_dir->size regardless of FAT type */
++ if (exfat_find_last_cluster(sb, p_dir, &last_clu))
++ return -EIO;
++
++ exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++ } else {
++ /* This directory is empty */
++ exfat_chain_set(&clu, EXFAT_EOF_CLUSTER, 0,
++ ALLOC_NO_FAT_CHAIN);
++ }
+
+ /* allocate a cluster */
+ ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
+@@ -368,6 +374,11 @@ static int exfat_find_empty_entry(struct inode *inode,
+ if (exfat_zeroed_cluster(inode, clu.dir))
+ return -EIO;
+
++ if (ei->start_clu == EXFAT_EOF_CLUSTER) {
++ ei->start_clu = clu.dir;
++ p_dir->dir = clu.dir;
++ }
++
+ /* append to the FAT chain */
+ if (clu.flags != p_dir->flags) {
+ /* no-fat-chain bit is disabled,
+@@ -645,7 +656,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
+ info->type = exfat_get_entry_type(ep);
+ info->attr = le16_to_cpu(ep->dentry.file.attr);
+ info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
+- if ((info->type == TYPE_FILE) && (info->size == 0)) {
++ if (info->size == 0) {
+ info->flags = ALLOC_NO_FAT_CHAIN;
+ info->start_clu = EXFAT_EOF_CLUSTER;
+ } else {
+@@ -888,6 +899,9 @@ static int exfat_check_dir_empty(struct super_block *sb,
+
+ dentries_per_clu = sbi->dentries_per_clu;
+
++ if (p_dir->dir == EXFAT_EOF_CLUSTER)
++ return 0;
++
+ exfat_chain_dup(&clu, p_dir);
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+@@ -1255,7 +1269,8 @@ static int __exfat_rename(struct inode *old_parent_inode,
+ }
+
+ /* Free the clusters if new_inode is a dir(as if exfat_rmdir) */
+- if (new_entry_type == TYPE_DIR) {
++ if (new_entry_type == TYPE_DIR &&
++ new_ei->start_clu != EXFAT_EOF_CLUSTER) {
+ /* new_ei, new_clu_to_free */
+ struct exfat_chain new_clu_to_free;
+
+diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
+index e124f3d709b23a..f2052723821afc 100644
+--- a/fs/ext2/balloc.c
++++ b/fs/ext2/balloc.c
+@@ -77,26 +77,33 @@ static int ext2_valid_block_bitmap(struct super_block *sb,
+ ext2_grpblk_t next_zero_bit;
+ ext2_fsblk_t bitmap_blk;
+ ext2_fsblk_t group_first_block;
++ ext2_grpblk_t max_bit;
+
+ group_first_block = ext2_group_first_block_no(sb, block_group);
++ max_bit = ext2_group_last_block_no(sb, block_group) - group_first_block;
+
+ /* check whether block bitmap block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
+ offset = bitmap_blk - group_first_block;
+- if (!ext2_test_bit(offset, bh->b_data))
++ if (offset < 0 || offset > max_bit ||
++ !ext2_test_bit(offset, bh->b_data))
+ /* bad block bitmap */
+ goto err_out;
+
+ /* check whether the inode bitmap block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
+ offset = bitmap_blk - group_first_block;
+- if (!ext2_test_bit(offset, bh->b_data))
++ if (offset < 0 || offset > max_bit ||
++ !ext2_test_bit(offset, bh->b_data))
+ /* bad block bitmap */
+ goto err_out;
+
+ /* check whether the inode table block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_inode_table);
+ offset = bitmap_blk - group_first_block;
++ if (offset < 0 || offset > max_bit ||
++ offset + EXT2_SB(sb)->s_itb_per_group - 1 > max_bit)
++ goto err_out;
+ next_zero_bit = ext2_find_next_zero_bit(bh->b_data,
+ offset + EXT2_SB(sb)->s_itb_per_group,
+ offset);
+diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
+index 7fdd685c384d6a..02255185d68efe 100644
+--- a/fs/ext2/ext2.h
++++ b/fs/ext2/ext2.h
+@@ -674,7 +674,7 @@ struct ext2_inode_info {
+ struct inode vfs_inode;
+ struct list_head i_orphan; /* unlinked but open inodes */
+ #ifdef CONFIG_QUOTA
+- struct dquot *i_dquot[MAXQUOTAS];
++ struct dquot __rcu *i_dquot[MAXQUOTAS];
+ #endif
+ };
+
+diff --git a/fs/ext2/file.c b/fs/ext2/file.c
+index 1039e5bf90afd3..4ddc36f4dbd407 100644
+--- a/fs/ext2/file.c
++++ b/fs/ext2/file.c
+@@ -258,7 +258,6 @@ static ssize_t ext2_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ goto out_unlock;
+ }
+
+- iocb->ki_pos += status;
+ ret += status;
+ endbyte = pos + status - 1;
+ ret2 = filemap_write_and_wait_range(inode->i_mapping, pos,
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index aaf3e3e88cb218..5bcf5623b46cce 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -320,7 +320,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, siz
+ static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
+ static int ext2_quota_on(struct super_block *sb, int type, int format_id,
+ const struct path *path);
+-static struct dquot **ext2_get_dquots(struct inode *inode)
++static struct dquot __rcu **ext2_get_dquots(struct inode *inode)
+ {
+ return EXT2_I(inode)->i_dquot;
+ }
+diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
+index 0c5a79c3b5d480..ef4c19e5f57060 100644
+--- a/fs/ext4/acl.h
++++ b/fs/ext4/acl.h
+@@ -68,6 +68,11 @@ extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
+ static inline int
+ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+ {
++ /* usually, the umask is applied by posix_acl_create(), but if
++ ext4 ACL support is disabled at compile time, we need to do
++ it here, because posix_acl_create() will never be called */
++ inode->i_mode &= ~current_umask();
++
+ return 0;
+ }
+ #endif /* CONFIG_EXT4_FS_POSIX_ACL */
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index 3985f8c33f9553..7ea33c3fe94e1c 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -279,12 +279,20 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
+ struct fscrypt_str de_name =
+ FSTR_INIT(de->name,
+ de->name_len);
++ u32 hash;
++ u32 minor_hash;
++
++ if (IS_CASEFOLDED(inode)) {
++ hash = EXT4_DIRENT_HASH(de);
++ minor_hash = EXT4_DIRENT_MINOR_HASH(de);
++ } else {
++ hash = 0;
++ minor_hash = 0;
++ }
+
+ /* Directory is encrypted */
+ err = fscrypt_fname_disk_to_usr(inode,
+- EXT4_DIRENT_HASH(de),
+- EXT4_DIRENT_MINOR_HASH(de),
+- &de_name, &fstr);
++ hash, minor_hash, &de_name, &fstr);
+ de_name = fstr;
+ fstr.len = save_len;
+ if (err)
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 9418359b1d9d3b..7bbf0b9bdff239 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1146,7 +1146,7 @@ struct ext4_inode_info {
+ tid_t i_datasync_tid;
+
+ #ifdef CONFIG_QUOTA
+- struct dquot *i_dquot[MAXQUOTAS];
++ struct dquot __rcu *i_dquot[MAXQUOTAS];
+ #endif
+
+ /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
+@@ -1676,7 +1676,8 @@ struct ext4_sb_info {
+
+ /*
+ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
+- * or EXTENTS flag.
++ * or EXTENTS flag or between writepages ops and changing DELALLOC or
++ * DIOREAD_NOLOCK mount options on remount.
+ */
+ struct percpu_rw_semaphore s_writepages_rwsem;
+ struct dax_device *s_daxdev;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 202c76996b6213..1c059ac1c1ef27 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -957,6 +957,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+
+ ext4_ext_show_path(inode, path);
+
++ if (orig_path)
++ *orig_path = path;
+ return path;
+
+ err:
+@@ -1010,6 +1012,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ ix = curp->p_idx;
+ }
+
++ if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
++ EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
++ return -EFSCORRUPTED;
++ }
++
+ len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
+ BUG_ON(len < 0);
+ if (len > 0) {
+@@ -1019,11 +1026,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+ memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
+ }
+
+- if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
+- EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
+- return -EFSCORRUPTED;
+- }
+-
+ ix->ei_block = cpu_to_le32(logical);
+ ext4_idx_store_pblock(ix, ptr);
+ le16_add_cpu(&curp->p_hdr->eh_entries, 1);
+@@ -1877,6 +1879,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
+ path[0].p_hdr->eh_max = cpu_to_le16(max_root);
+
+ brelse(path[1].p_bh);
++ path[1].p_bh = NULL;
+ ext4_free_blocks(handle, inode, NULL, blk, 1,
+ EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
+ }
+@@ -2103,6 +2106,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ ppath, newext);
+ if (err)
+ goto cleanup;
++ path = *ppath;
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+
+@@ -2229,7 +2233,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
+
+
+ /*
+- * ext4_ext_determine_hole - determine hole around given block
++ * ext4_ext_find_hole - find hole around given block according to the given path
+ * @inode: inode we lookup in
+ * @path: path in extent tree to @lblk
+ * @lblk: pointer to logical block around which we want to determine hole
+@@ -2241,9 +2245,9 @@ static int ext4_fill_es_cache_info(struct inode *inode,
+ * The function returns the length of a hole starting at @lblk. We update @lblk
+ * to the beginning of the hole if we managed to find it.
+ */
+-static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
+- struct ext4_ext_path *path,
+- ext4_lblk_t *lblk)
++static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t *lblk)
+ {
+ int depth = ext_depth(inode);
+ struct ext4_extent *ex;
+@@ -2270,30 +2274,6 @@ static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
+ return len;
+ }
+
+-/*
+- * ext4_ext_put_gap_in_cache:
+- * calculate boundaries of the gap that the requested block fits into
+- * and cache this gap
+- */
+-static void
+-ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
+- ext4_lblk_t hole_len)
+-{
+- struct extent_status es;
+-
+- ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+- hole_start + hole_len - 1, &es);
+- if (es.es_len) {
+- /* There's delayed extent containing lblock? */
+- if (es.es_lblk <= hole_start)
+- return;
+- hole_len = min(es.es_lblk - hole_start, hole_len);
+- }
+- ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
+- ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
+- EXTENT_STATUS_HOLE);
+-}
+-
+ /*
+ * ext4_ext_rm_idx:
+ * removes index from the index block.
+@@ -3254,6 +3234,24 @@ static int ext4_split_extent_at(handle_t *handle,
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+ goto out;
+
++ /*
++ * Update path is required because previous ext4_ext_insert_extent()
++ * may have freed or reallocated the path. Using EXT4_EX_NOFAIL
++ * guarantees that ext4_find_extent() will not return -ENOMEM,
++ * otherwise -ENOMEM will cause a retry in do_writepages(), and a
++ * WARN_ON may be triggered in ext4_da_update_reserve_space() due to
++ * an incorrect ee_len causing the i_reserved_data_blocks exception.
++ */
++ path = ext4_find_extent(inode, ee_block, ppath,
++ flags | EXT4_EX_NOFAIL);
++ if (IS_ERR(path)) {
++ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
++ split, PTR_ERR(path));
++ return PTR_ERR(path);
++ }
++ depth = ext_depth(inode);
++ ex = path[depth].p_ext;
++
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+ if (split_flag & EXT4_EXT_DATA_VALID1) {
+@@ -3306,12 +3304,12 @@ static int ext4_split_extent_at(handle_t *handle,
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
+ return err;
+ out:
+- ext4_ext_show_leaf(inode, path);
++ ext4_ext_show_leaf(inode, *ppath);
+ return err;
+ }
+
+ /*
+- * ext4_split_extents() splits an extent and mark extent which is covered
++ * ext4_split_extent() splits an extent and mark extent which is covered
+ * by @map as split_flags indicates
+ *
+ * It may result in splitting the extent into multiple extents (up to three)
+@@ -3387,7 +3385,7 @@ static int ext4_split_extent(handle_t *handle,
+ goto out;
+ }
+
+- ext4_ext_show_leaf(inode, path);
++ ext4_ext_show_leaf(inode, *ppath);
+ out:
+ return err ? err : allocated;
+ }
+@@ -3426,9 +3424,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ struct ext4_extent *ex, *abut_ex;
+ ext4_lblk_t ee_block, eof_block;
+ unsigned int ee_len, depth, map_len = map->m_len;
+- int allocated = 0, max_zeroout = 0;
+ int err = 0;
+ int split_flag = EXT4_EXT_DATA_VALID2;
++ int allocated = 0;
++ unsigned int max_zeroout = 0;
+
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
+ (unsigned long long)map->m_lblk, map_len);
+@@ -3851,14 +3850,13 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path **ppath, int flags,
+ unsigned int allocated, ext4_fsblk_t newblock)
+ {
+- struct ext4_ext_path __maybe_unused *path = *ppath;
+ int ret = 0;
+ int err = 0;
+
+ ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
+ (unsigned long long)map->m_lblk, map->m_len, flags,
+ allocated);
+- ext4_ext_show_leaf(inode, path);
++ ext4_ext_show_leaf(inode, *ppath);
+
+ /*
+ * When writing into unwritten space, we should not fail to
+@@ -3955,7 +3953,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ if (allocated > map->m_len)
+ allocated = map->m_len;
+ map->m_len = allocated;
+- ext4_ext_show_leaf(inode, path);
++ ext4_ext_show_leaf(inode, *ppath);
+ out2:
+ return err ? err : allocated;
+ }
+@@ -4062,6 +4060,69 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ return 0;
+ }
+
++/*
++ * Determine hole length around the given logical block, first try to
++ * locate and expand the hole from the given @path, and then adjust it
++ * if it's partially or completely converted to delayed extents, insert
++ * it into the extent cache tree if it's indeed a hole, finally return
++ * the length of the determined extent.
++ */
++static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t lblk)
++{
++ ext4_lblk_t hole_start, len;
++ struct extent_status es;
++
++ hole_start = lblk;
++ len = ext4_ext_find_hole(inode, path, &hole_start);
++again:
++ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
++ hole_start + len - 1, &es);
++ if (!es.es_len)
++ goto insert_hole;
++
++ /*
++ * There's a delalloc extent in the hole, handle it if the delalloc
++ * extent is in front of, behind and straddle the queried range.
++ */
++ if (lblk >= es.es_lblk + es.es_len) {
++ /*
++ * The delalloc extent is in front of the queried range,
++ * find again from the queried start block.
++ */
++ len -= lblk - hole_start;
++ hole_start = lblk;
++ goto again;
++ } else if (in_range(lblk, es.es_lblk, es.es_len)) {
++ /*
++ * The delalloc extent containing lblk, it must have been
++ * added after ext4_map_blocks() checked the extent status
++ * tree, adjust the length to the delalloc extent's after
++ * lblk.
++ */
++ len = es.es_lblk + es.es_len - lblk;
++ return len;
++ } else {
++ /*
++ * The delalloc extent is partially or completely behind
++ * the queried range, update hole length until the
++ * beginning of the delalloc extent.
++ */
++ len = min(es.es_lblk - hole_start, len);
++ }
++
++insert_hole:
++ /* Put just found gap into cache to speed up subsequent requests */
++ ext_debug(inode, " -> %u:%u\n", hole_start, len);
++ ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
++
++ /* Update hole_len to reflect hole size after lblk */
++ if (hole_start != lblk)
++ len -= lblk - hole_start;
++
++ return len;
++}
+
+ /*
+ * Block allocation/map/preallocation routine for extents based files
+@@ -4179,22 +4240,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ * we couldn't try to create block if create flag is zero
+ */
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
+- ext4_lblk_t hole_start, hole_len;
++ ext4_lblk_t len;
+
+- hole_start = map->m_lblk;
+- hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
+- /*
+- * put just found gap into cache to speed up
+- * subsequent requests
+- */
+- ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
++ len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
+
+- /* Update hole_len to reflect hole size after map->m_lblk */
+- if (hole_start != map->m_lblk)
+- hole_len -= map->m_lblk - hole_start;
+ map->m_pblk = 0;
+- map->m_len = min_t(unsigned int, map->m_len, hole_len);
+-
++ map->m_len = min_t(unsigned int, map->m_len, len);
+ goto out;
+ }
+
+@@ -4522,7 +4573,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ * Round up offset. This is not fallocate, we need to zero out
+ * blocks, so convert interior block aligned part of the range to
+ * unwritten and possibly manually zero out unaligned parts of the
+- * range.
++ * range. Here, start and partial_begin are inclusive, end and
++ * partial_end are exclusive.
+ */
+ start = round_up(offset, 1 << blkbits);
+ end = round_down((offset + len), 1 << blkbits);
+@@ -4608,7 +4660,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ * disk in case of crash before zeroing trans is committed.
+ */
+ if (ext4_should_journal_data(inode)) {
+- ret = filemap_write_and_wait_range(mapping, start, end);
++ ret = filemap_write_and_wait_range(mapping, start,
++ end - 1);
+ if (ret) {
+ filemap_invalidate_unlock(mapping);
+ goto out_mutex;
+@@ -5499,6 +5552,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ path = ext4_find_extent(inode, offset_lblk, NULL, 0);
+ if (IS_ERR(path)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
++ ret = PTR_ERR(path);
+ goto out_stop;
+ }
+
+@@ -5844,7 +5898,7 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ int len, int unwritten, ext4_fsblk_t pblk)
+ {
+- struct ext4_ext_path *path = NULL, *ppath;
++ struct ext4_ext_path *path;
+ struct ext4_extent *ex;
+ int ret;
+
+@@ -5860,30 +5914,29 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ if (le32_to_cpu(ex->ee_block) != start ||
+ ext4_ext_get_actual_len(ex) != len) {
+ /* We need to split this extent to match our extent first */
+- ppath = path;
+ down_write(&EXT4_I(inode)->i_data_sem);
+- ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1);
++ ret = ext4_force_split_extent_at(NULL, inode, &path, start, 1);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (ret)
+ goto out;
+- kfree(path);
+- path = ext4_find_extent(inode, start, NULL, 0);
++
++ path = ext4_find_extent(inode, start, &path, 0);
+ if (IS_ERR(path))
+- return -1;
+- ppath = path;
++ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+ WARN_ON(le32_to_cpu(ex->ee_block) != start);
++
+ if (ext4_ext_get_actual_len(ex) != len) {
+ down_write(&EXT4_I(inode)->i_data_sem);
+- ret = ext4_force_split_extent_at(NULL, inode, &ppath,
++ ret = ext4_force_split_extent_at(NULL, inode, &path,
+ start + len, 1);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (ret)
+ goto out;
+- kfree(path);
+- path = ext4_find_extent(inode, start, NULL, 0);
++
++ path = ext4_find_extent(inode, start, &path, 0);
+ if (IS_ERR(path))
+- return -EINVAL;
++ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+ }
+ }
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 6f7de14c0fa86f..d9d5cfb9c951af 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -152,8 +152,9 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ struct ext4_inode_info *locked_ei);
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len);
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len,
++ struct pending_reservation **prealloc);
+
+ int __init ext4_init_es(void)
+ {
+@@ -309,6 +310,8 @@ void ext4_es_find_extent_range(struct inode *inode,
+ ext4_lblk_t lblk, ext4_lblk_t end,
+ struct extent_status *es)
+ {
++ es->es_lblk = es->es_len = es->es_pblk = 0;
++
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+
+@@ -448,6 +451,19 @@ static void ext4_es_list_del(struct inode *inode)
+ spin_unlock(&sbi->s_es_lock);
+ }
+
++static inline struct pending_reservation *__alloc_pending(bool nofail)
++{
++ if (!nofail)
++ return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
++
++ return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
++}
++
++static inline void __free_pending(struct pending_reservation *pr)
++{
++ kmem_cache_free(ext4_pending_cachep, pr);
++}
++
+ /*
+ * Returns true if we cannot fail to allocate memory for this extent_status
+ * entry and cannot reclaim it until its status changes.
+@@ -836,11 +852,12 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ {
+ struct extent_status newes;
+ ext4_lblk_t end = lblk + len - 1;
+- int err1 = 0;
+- int err2 = 0;
++ int err1 = 0, err2 = 0, err3 = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
++ struct pending_reservation *pr = NULL;
++ bool revise_pending = false;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+@@ -868,11 +885,17 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ ext4_es_insert_extent_check(inode, &newes);
+
++ revise_pending = sbi->s_cluster_ratio > 1 &&
++ test_opt(inode->i_sb, DELALLOC) &&
++ (status & (EXTENT_STATUS_WRITTEN |
++ EXTENT_STATUS_UNWRITTEN));
+ retry:
+ if (err1 && !es1)
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
++ if ((err1 || err2 || err3) && revise_pending && !pr)
++ pr = __alloc_pending(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+@@ -897,13 +920,18 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ es2 = NULL;
+ }
+
+- if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+- (status & EXTENT_STATUS_WRITTEN ||
+- status & EXTENT_STATUS_UNWRITTEN))
+- __revise_pending(inode, lblk, len);
++ if (revise_pending) {
++ err3 = __revise_pending(inode, lblk, len, &pr);
++ if (err3 != 0)
++ goto error;
++ if (pr) {
++ __free_pending(pr);
++ pr = NULL;
++ }
++ }
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+- if (err1 || err2)
++ if (err1 || err2 || err3)
+ goto retry;
+
+ ext4_es_print_tree(inode);
+@@ -1311,7 +1339,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ rc->ndelonly--;
+ node = rb_next(&pr->rb_node);
+ rb_erase(&pr->rb_node, &tree->root);
+- kmem_cache_free(ext4_pending_cachep, pr);
++ __free_pending(pr);
+ if (!node)
+ break;
+ pr = rb_entry(node, struct pending_reservation,
+@@ -1405,8 +1433,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ }
+ }
+ if (count_reserved)
+- count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
+- &orig_es, &rc);
++ count_rsvd(inode, orig_es.es_lblk + len1,
++ orig_es.es_len - len1 - len2, &orig_es, &rc);
+ goto out_get_reserved;
+ }
+
+@@ -1907,11 +1935,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the cluster to be added
++ * @prealloc - preallocated pending entry
+ *
+ * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * pending reservation is already in the set, returns successfully.
+ */
+-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
++static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
++ struct pending_reservation **prealloc)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+@@ -1937,10 +1967,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+ }
+ }
+
+- pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+- if (pr == NULL) {
+- ret = -ENOMEM;
+- goto out;
++ if (likely(*prealloc == NULL)) {
++ pr = __alloc_pending(false);
++ if (!pr) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ } else {
++ pr = *prealloc;
++ *prealloc = NULL;
+ }
+ pr->lclu = lclu;
+
+@@ -1970,7 +2005,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+ if (pr != NULL) {
+ tree = &EXT4_I(inode)->i_pending_tree;
+ rb_erase(&pr->rb_node, &tree->root);
+- kmem_cache_free(ext4_pending_cachep, pr);
++ __free_pending(pr);
+ }
+ }
+
+@@ -2029,10 +2064,10 @@ void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated)
+ {
+ struct extent_status newes;
+- int err1 = 0;
+- int err2 = 0;
++ int err1 = 0, err2 = 0, err3 = 0;
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
++ struct pending_reservation *pr = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+@@ -2052,6 +2087,8 @@ void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
++ if ((err1 || err2 || err3) && allocated && !pr)
++ pr = __alloc_pending(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
+@@ -2074,11 +2111,18 @@ void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ es2 = NULL;
+ }
+
+- if (allocated)
+- __insert_pending(inode, lblk);
++ if (allocated) {
++ err3 = __insert_pending(inode, lblk, &pr);
++ if (err3 != 0)
++ goto error;
++ if (pr) {
++ __free_pending(pr);
++ pr = NULL;
++ }
++ }
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+- if (err1 || err2)
++ if (err1 || err2 || err3)
+ goto retry;
+
+ ext4_es_print_tree(inode);
+@@ -2184,21 +2228,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+ * @inode - file containing the range
+ * @lblk - logical block defining the start of range
+ * @len - length of range in blocks
++ * @prealloc - preallocated pending entry
+ *
+ * Used after a newly allocated extent is added to the extents status tree.
+ * Requires that the extents in the range have either written or unwritten
+ * status. Must be called while holding i_es_lock.
+ */
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len)
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len,
++ struct pending_reservation **prealloc)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t end = lblk + len - 1;
+ ext4_lblk_t first, last;
+ bool f_del = false, l_del = false;
++ int ret = 0;
+
+ if (len == 0)
+- return;
++ return 0;
+
+ /*
+ * Two cases - block range within single cluster and block range
+@@ -2219,7 +2266,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+ if (f_del) {
+- __insert_pending(inode, first);
++ ret = __insert_pending(inode, first, prealloc);
++ if (ret < 0)
++ goto out;
+ } else {
+ last = EXT4_LBLK_CMASK(sbi, end) +
+ sbi->s_cluster_ratio - 1;
+@@ -2227,9 +2276,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ l_del = __es_scan_range(inode,
+ &ext4_es_is_delonly,
+ end + 1, last);
+- if (l_del)
+- __insert_pending(inode, last);
+- else
++ if (l_del) {
++ ret = __insert_pending(inode, last, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, last);
+ }
+ } else {
+@@ -2237,18 +2288,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ if (first != lblk)
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+- if (f_del)
+- __insert_pending(inode, first);
+- else
++ if (f_del) {
++ ret = __insert_pending(inode, first, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, first);
+
+ last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
+ if (last != end)
+ l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ end + 1, last);
+- if (l_del)
+- __insert_pending(inode, last);
+- else
++ if (l_del) {
++ ret = __insert_pending(inode, last, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, last);
+ }
++out:
++ return ret;
+ }
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index b06de728b3b6c9..b527f4ab47e021 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -339,22 +339,29 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ tid_t tid;
++ bool has_transaction = true;
++ bool is_ineligible;
+
+ if (ext4_fc_disabled(sb))
+ return;
+
+- ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (handle && !IS_ERR(handle))
+ tid = handle->h_transaction->t_tid;
+ else {
+ read_lock(&sbi->s_journal->j_state_lock);
+- tid = sbi->s_journal->j_running_transaction ?
+- sbi->s_journal->j_running_transaction->t_tid : 0;
++ if (sbi->s_journal->j_running_transaction)
++ tid = sbi->s_journal->j_running_transaction->t_tid;
++ else
++ has_transaction = false;
+ read_unlock(&sbi->s_journal->j_state_lock);
+ }
+ spin_lock(&sbi->s_fc_lock);
+- if (sbi->s_fc_ineligible_tid < tid)
++ is_ineligible = ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
++ if (has_transaction &&
++ (!is_ineligible ||
++ (is_ineligible && tid_gt(tid, sbi->s_fc_ineligible_tid))))
+ sbi->s_fc_ineligible_tid = tid;
++ ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ spin_unlock(&sbi->s_fc_lock);
+ WARN_ON(reason >= EXT4_FC_REASON_MAX);
+ sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
+@@ -372,7 +379,7 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handl
+ */
+ static int ext4_fc_track_template(
+ handle_t *handle, struct inode *inode,
+- int (*__fc_track_fn)(struct inode *, void *, bool),
++ int (*__fc_track_fn)(handle_t *handle, struct inode *, void *, bool),
+ void *args, int enqueue)
+ {
+ bool update = false;
+@@ -389,7 +396,7 @@ static int ext4_fc_track_template(
+ ext4_fc_reset_inode(inode);
+ ei->i_sync_tid = tid;
+ }
+- ret = __fc_track_fn(inode, args, update);
++ ret = __fc_track_fn(handle, inode, args, update);
+ mutex_unlock(&ei->i_fc_lock);
+
+ if (!enqueue)
+@@ -413,7 +420,8 @@ struct __track_dentry_update_args {
+ };
+
+ /* __track_fn for directory entry updates. Called with ei->i_fc_lock. */
+-static int __track_dentry_update(struct inode *inode, void *arg, bool update)
++static int __track_dentry_update(handle_t *handle, struct inode *inode,
++ void *arg, bool update)
+ {
+ struct ext4_fc_dentry_update *node;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+@@ -428,14 +436,14 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
+
+ if (IS_ENCRYPTED(dir)) {
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME,
+- NULL);
++ handle);
+ mutex_lock(&ei->i_fc_lock);
+ return -EOPNOTSUPP;
+ }
+
+ node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
+ if (!node) {
+- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL);
++ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, handle);
+ mutex_lock(&ei->i_fc_lock);
+ return -ENOMEM;
+ }
+@@ -447,7 +455,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
+ node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS);
+ if (!node->fcd_name.name) {
+ kmem_cache_free(ext4_fc_dentry_cachep, node);
+- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL);
++ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, handle);
+ mutex_lock(&ei->i_fc_lock);
+ return -ENOMEM;
+ }
+@@ -569,7 +577,8 @@ void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
+ }
+
+ /* __track_fn for inode tracking */
+-static int __track_inode(struct inode *inode, void *arg, bool update)
++static int __track_inode(handle_t *handle, struct inode *inode, void *arg,
++ bool update)
+ {
+ if (update)
+ return -EEXIST;
+@@ -607,7 +616,8 @@ struct __track_range_args {
+ };
+
+ /* __track_fn for tracking data updates */
+-static int __track_range(struct inode *inode, void *arg, bool update)
++static int __track_range(handle_t *handle, struct inode *inode, void *arg,
++ bool update)
+ {
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ ext4_lblk_t oldstart;
+@@ -649,6 +659,12 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star
+ if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
+ return;
+
++ if (ext4_has_inline_data(inode)) {
++ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR,
++ handle);
++ return;
++ }
++
+ args.start = start;
+ args.end = end;
+
+@@ -1207,7 +1223,7 @@ int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
+ if (ret == -EALREADY) {
+ /* There was an ongoing commit, check if we need to restart */
+ if (atomic_read(&sbi->s_fc_subtid) <= subtid &&
+- commit_tid > journal->j_commit_sequence)
++ tid_gt(commit_tid, journal->j_commit_sequence))
+ goto restart_fc;
+ ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0,
+ commit_tid);
+@@ -1282,8 +1298,21 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
+ list_del_init(&iter->i_fc_list);
+ ext4_clear_inode_state(&iter->vfs_inode,
+ EXT4_STATE_FC_COMMITTING);
+- if (iter->i_sync_tid <= tid)
++ if (tid_geq(tid, iter->i_sync_tid)) {
+ ext4_fc_reset_inode(&iter->vfs_inode);
++ } else if (full) {
++ /*
++ * We are called after a full commit, inode has been
++ * modified while the commit was running. Re-enqueue
++ * the inode into STAGING, which will then be splice
++ * back into MAIN. This cannot happen during
++ * fastcommit because the journal is locked all the
++ * time in that case (and tid doesn't increase so
++ * tid check above isn't reliable).
++ */
++ list_add_tail(&EXT4_I(&iter->vfs_inode)->i_fc_list,
++ &sbi->s_fc_q[FC_Q_STAGING]);
++ }
+ /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
+ smp_mb();
+ #if (BITS_PER_LONG < 64)
+@@ -1313,7 +1342,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
+ list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
+ &sbi->s_fc_q[FC_Q_MAIN]);
+
+- if (tid >= sbi->s_fc_ineligible_tid) {
++ if (tid_geq(tid, sbi->s_fc_ineligible_tid)) {
+ sbi->s_fc_ineligible_tid = 0;
+ ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ }
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 6830ea3a6c59c6..c71af675e310af 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -306,80 +306,38 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
+ }
+
+ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
+- ssize_t written, size_t count)
++ ssize_t count)
+ {
+ handle_t *handle;
+- bool truncate = false;
+- u8 blkbits = inode->i_blkbits;
+- ext4_lblk_t written_blk, end_blk;
+- int ret;
+-
+- /*
+- * Note that EXT4_I(inode)->i_disksize can get extended up to
+- * inode->i_size while the I/O was running due to writeback of delalloc
+- * blocks. But, the code in ext4_iomap_alloc() is careful to use
+- * zeroed/unwritten extents if this is possible; thus we won't leave
+- * uninitialized blocks in a file even if we didn't succeed in writing
+- * as much as we intended.
+- */
+- WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
+- if (offset + count <= EXT4_I(inode)->i_disksize) {
+- /*
+- * We need to ensure that the inode is removed from the orphan
+- * list if it has been added prematurely, due to writeback of
+- * delalloc blocks.
+- */
+- if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+-
+- if (IS_ERR(handle)) {
+- ext4_orphan_del(NULL, inode);
+- return PTR_ERR(handle);
+- }
+-
+- ext4_orphan_del(handle, inode);
+- ext4_journal_stop(handle);
+- }
+-
+- return written;
+- }
+-
+- if (written < 0)
+- goto truncate;
+
++ lockdep_assert_held_write(&inode->i_rwsem);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+- if (IS_ERR(handle)) {
+- written = PTR_ERR(handle);
+- goto truncate;
+- }
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
+
+- if (ext4_update_inode_size(inode, offset + written)) {
+- ret = ext4_mark_inode_dirty(handle, inode);
++ if (ext4_update_inode_size(inode, offset + count)) {
++ int ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret)) {
+- written = ret;
+ ext4_journal_stop(handle);
+- goto truncate;
++ return ret;
+ }
+ }
+
+- /*
+- * We may need to truncate allocated but not written blocks beyond EOF.
+- */
+- written_blk = ALIGN(offset + written, 1 << blkbits);
+- end_blk = ALIGN(offset + count, 1 << blkbits);
+- if (written_blk < end_blk && ext4_can_truncate(inode))
+- truncate = true;
+-
+- /*
+- * Remove the inode from the orphan list if it has been extended and
+- * everything went OK.
+- */
+- if (!truncate && inode->i_nlink)
++ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+
+- if (truncate) {
+-truncate:
++ return count;
++}
++
++/*
++ * Clean up the inode after DIO or DAX extending write has completed and the
++ * inode size has been updated using ext4_handle_inode_extension().
++ */
++static void ext4_inode_extension_cleanup(struct inode *inode, bool need_trunc)
++{
++ lockdep_assert_held_write(&inode->i_rwsem);
++ if (need_trunc) {
+ ext4_truncate_failed_write(inode);
+ /*
+ * If the truncate operation failed early, then the inode may
+@@ -388,9 +346,29 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
++ return;
+ }
++ /*
++ * If i_disksize got extended either due to writeback of delalloc
++ * blocks or extending truncate while the DIO was running we could fail
++ * to cleanup the orphan list in ext4_handle_inode_extension(). Do it
++ * now.
++ */
++ if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
++ handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+
+- return written;
++ if (IS_ERR(handle)) {
++ /*
++ * The write has successfully completed. Not much to
++ * do with the error here so just cleanup the orphan
++ * list and hope for the best.
++ */
++ ext4_orphan_del(NULL, inode);
++ return;
++ }
++ ext4_orphan_del(handle, inode);
++ ext4_journal_stop(handle);
++ }
+ }
+
+ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+@@ -399,31 +377,23 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ loff_t pos = iocb->ki_pos;
+ struct inode *inode = file_inode(iocb->ki_filp);
+
++ if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
++ error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+ if (error)
+ return error;
+-
+- if (size && flags & IOMAP_DIO_UNWRITTEN) {
+- error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+- if (error < 0)
+- return error;
+- }
+ /*
+- * If we are extending the file, we have to update i_size here before
+- * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
+- * buffered reads could zero out too much from page cache pages. Update
+- * of on-disk size will happen later in ext4_dio_write_iter() where
+- * we have enough information to also perform orphan list handling etc.
+- * Note that we perform all extending writes synchronously under
+- * i_rwsem held exclusively so i_size update is safe here in that case.
+- * If the write was not extending, we cannot see pos > i_size here
+- * because operations reducing i_size like truncate wait for all
+- * outstanding DIO before updating i_size.
++ * Note that EXT4_I(inode)->i_disksize can get extended up to
++ * inode->i_size while the I/O was running due to writeback of delalloc
++ * blocks. But the code in ext4_iomap_alloc() is careful to use
++ * zeroed/unwritten extents if this is possible; thus we won't leave
++ * uninitialized blocks in a file even if we didn't succeed in writing
++ * as much as we intended. Also we can race with truncate or write
++ * expanding the file so we have to be a bit careful here.
+ */
+- pos += size;
+- if (pos > i_size_read(inode))
+- i_size_write(inode, pos);
+-
+- return 0;
++ if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) &&
++ pos + size <= i_size_read(inode))
++ return size;
++ return ext4_handle_inode_extension(inode, pos, size);
+ }
+
+ static const struct iomap_dio_ops ext4_dio_write_ops = {
+@@ -569,18 +539,20 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ return ext4_buffered_write_iter(iocb, from);
+ }
+
++ /*
++ * Prevent inline data from being created since we are going to allocate
++ * blocks for DIO. We know the inode does not currently have inline data
++ * because ext4_should_use_dio() checked for it, but we have to clear
++ * the state flag before the write checks because a lock cycle could
++ * introduce races with other writers.
++ */
++ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++
+ ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend,
+ &unwritten, &dio_flags);
+ if (ret <= 0)
+ return ret;
+
+- /*
+- * Make sure inline data cannot be created anymore since we are going
+- * to allocate blocks for DIO. We know the inode does not have any
+- * inline data now because ext4_dio_supported() checked for that.
+- */
+- ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+-
+ offset = iocb->ki_pos;
+ count = ret;
+
+@@ -606,9 +578,16 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ dio_flags, NULL, 0);
+ if (ret == -ENOTBLK)
+ ret = 0;
+-
+- if (extend)
+- ret = ext4_handle_inode_extension(inode, offset, ret, count);
++ if (extend) {
++ /*
++ * We always perform extending DIO write synchronously so by
++ * now the IO is completed and ext4_handle_inode_extension()
++ * was called. Cleanup the inode in case of error or race with
++ * writeback of delalloc blocks.
++ */
++ WARN_ON_ONCE(ret == -EIOCBQUEUED);
++ ext4_inode_extension_cleanup(inode, ret < 0);
++ }
+
+ out:
+ if (ilock_shared)
+@@ -689,8 +668,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+
+ ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+
+- if (extend)
+- ret = ext4_handle_inode_extension(inode, offset, ret, count);
++ if (extend) {
++ ret = ext4_handle_inode_extension(inode, offset, ret);
++ ext4_inode_extension_cleanup(inode, ret < (ssize_t)count);
++ }
+ out:
+ inode_unlock(inode);
+ if (ret > 0)
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index b65058d972f956..1a1e2214c581f3 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -514,6 +514,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
+ if (min_inodes < 1)
+ min_inodes = 1;
+ min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
++ if (min_clusters < 0)
++ min_clusters = 0;
+
+ /*
+ * Start looking in the flex group where we last allocated an
+@@ -755,10 +757,10 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
+ struct ext4_group_desc *gdp;
+ ext4_group_t group;
+ int bit;
+- int err = -EFSCORRUPTED;
++ int err;
+
+ if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
+- goto out;
++ return -EFSCORRUPTED;
+
+ group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
+ bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
+@@ -860,6 +862,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino)
+ err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
+ sync_dirty_buffer(group_desc_bh);
+ out:
++ brelse(inode_bitmap_bh);
+ return err;
+ }
+
+@@ -1053,12 +1056,13 @@ struct inode *__ext4_new_inode(struct mnt_idmap *idmap,
+ brelse(inode_bitmap_bh);
+ inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
+ /* Skip groups with suspicious inode tables */
+- if (((!(sbi->s_mount_state & EXT4_FC_REPLAY))
+- && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ||
+- IS_ERR(inode_bitmap_bh)) {
++ if (IS_ERR(inode_bitmap_bh)) {
+ inode_bitmap_bh = NULL;
+ goto next_group;
+ }
++ if (!(sbi->s_mount_state & EXT4_FC_REPLAY) &&
++ EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
++ goto next_group;
+
+ repeat_in_this_group:
+ ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 012d9259ff5320..cb65052ee3dec6 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1411,7 +1411,11 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
+ hinfo->hash = EXT4_DIRENT_HASH(de);
+ hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de);
+ } else {
+- ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
++ err = ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
++ if (err) {
++ ret = err;
++ goto out;
++ }
+ }
+ if ((hinfo->hash < start_hash) ||
+ ((hinfo->hash == start_hash) &&
+@@ -1661,24 +1665,36 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+ struct ext4_dir_entry_2 **res_dir,
+ int *has_inline_data)
+ {
++ struct ext4_xattr_ibody_find is = {
++ .s = { .not_found = -ENODATA, },
++ };
++ struct ext4_xattr_info i = {
++ .name_index = EXT4_XATTR_INDEX_SYSTEM,
++ .name = EXT4_XATTR_SYSTEM_DATA,
++ };
+ int ret;
+- struct ext4_iloc iloc;
+ void *inline_start;
+ int inline_size;
+
+- if (ext4_get_inode_loc(dir, &iloc))
+- return NULL;
++ ret = ext4_get_inode_loc(dir, &is.iloc);
++ if (ret)
++ return ERR_PTR(ret);
+
+ down_read(&EXT4_I(dir)->xattr_sem);
++
++ ret = ext4_xattr_ibody_find(dir, &i, &is);
++ if (ret)
++ goto out;
++
+ if (!ext4_has_inline_data(dir)) {
+ *has_inline_data = 0;
+ goto out;
+ }
+
+- inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
++ inline_start = (void *)ext4_raw_inode(&is.iloc)->i_block +
+ EXT4_INLINE_DOTDOT_SIZE;
+ inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
+- ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
++ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
+ dir, fname, 0, res_dir);
+ if (ret == 1)
+ goto out_find;
+@@ -1688,20 +1704,23 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+ if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE)
+ goto out;
+
+- inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
++ inline_start = ext4_get_inline_xattr_pos(dir, &is.iloc);
+ inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
+
+- ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
++ ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size,
+ dir, fname, 0, res_dir);
+ if (ret == 1)
+ goto out_find;
+
+ out:
+- brelse(iloc.bh);
+- iloc.bh = NULL;
++ brelse(is.iloc.bh);
++ if (ret < 0)
++ is.iloc.bh = ERR_PTR(ret);
++ else
++ is.iloc.bh = NULL;
+ out_find:
+ up_read(&EXT4_I(dir)->xattr_sem);
+- return iloc.bh;
++ return is.iloc.bh;
+ }
+
+ int ext4_delete_inline_entry(handle_t *handle,
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 4ce35f1c8b0a84..14f7098bcefe1c 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -453,6 +453,35 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
+ }
+ #endif /* ES_AGGRESSIVE_TEST */
+
++static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map)
++{
++ unsigned int status;
++ int retval;
++
++ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++ retval = ext4_ext_map_blocks(handle, inode, map, 0);
++ else
++ retval = ext4_ind_map_blocks(handle, inode, map, 0);
++
++ if (retval <= 0)
++ return retval;
++
++ if (unlikely(retval != map->m_len)) {
++ ext4_warning(inode->i_sb,
++ "ES len assertion failed for inode "
++ "%lu: retval %d != map->m_len %d",
++ inode->i_ino, retval, map->m_len);
++ WARN_ON(1);
++ }
++
++ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
++ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
++ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
++ map->m_pblk, status);
++ return retval;
++}
++
+ /*
+ * The ext4_map_blocks() function tries to look up the requested blocks,
+ * and returns if the blocks are already mapped.
+@@ -789,10 +818,22 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
+ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+ {
++ int ret = 0;
++
+ ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
+ inode->i_ino, create);
+- return _ext4_get_block(inode, iblock, bh_result,
++ ret = _ext4_get_block(inode, iblock, bh_result,
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
++
++ /*
++ * If the buffer is marked unwritten, mark it as new to make sure it is
++ * zeroed out correctly in case of partial writes. Otherwise, there is
++ * a chance of stale data getting exposed.
++ */
++ if (ret == 0 && buffer_unwritten(bh_result))
++ set_buffer_new(bh_result);
++
++ return ret;
+ }
+
+ /* Maximum number of blocks we map for direct IO at once. */
+@@ -1693,12 +1734,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
+
+ /* Lookup extent status tree firstly */
+ if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
+- if (ext4_es_is_hole(&es)) {
+- retval = 0;
+- down_read(&EXT4_I(inode)->i_data_sem);
++ if (ext4_es_is_hole(&es))
+ goto add_delayed;
+- }
+
++found:
+ /*
+ * Delayed extent could be allocated by fallocate.
+ * So we need to check it.
+@@ -1735,49 +1774,42 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
+ down_read(&EXT4_I(inode)->i_data_sem);
+ if (ext4_has_inline_data(inode))
+ retval = 0;
+- else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+- retval = ext4_ext_map_blocks(NULL, inode, map, 0);
+ else
+- retval = ext4_ind_map_blocks(NULL, inode, map, 0);
++ retval = ext4_map_query_blocks(NULL, inode, map);
++ up_read(&EXT4_I(inode)->i_data_sem);
++ if (retval)
++ return retval;
+
+ add_delayed:
+- if (retval == 0) {
+- int ret;
+-
+- /*
+- * XXX: __block_prepare_write() unmaps passed block,
+- * is it OK?
+- */
+-
+- ret = ext4_insert_delayed_block(inode, map->m_lblk);
+- if (ret != 0) {
+- retval = ret;
+- goto out_unlock;
++ down_write(&EXT4_I(inode)->i_data_sem);
++ /*
++ * Page fault path (ext4_page_mkwrite does not take i_rwsem)
++ * and fallocate path (no folio lock) can race. Make sure we
++ * lookup the extent status tree here again while i_data_sem
++ * is held in write mode, before inserting a new da entry in
++ * the extent status tree.
++ */
++ if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
++ if (!ext4_es_is_hole(&es)) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ goto found;
+ }
+-
+- map_bh(bh, inode->i_sb, invalid_block);
+- set_buffer_new(bh);
+- set_buffer_delay(bh);
+- } else if (retval > 0) {
+- unsigned int status;
+-
+- if (unlikely(retval != map->m_len)) {
+- ext4_warning(inode->i_sb,
+- "ES len assertion failed for inode "
+- "%lu: retval %d != map->m_len %d",
+- inode->i_ino, retval, map->m_len);
+- WARN_ON(1);
++ } else if (!ext4_has_inline_data(inode)) {
++ retval = ext4_map_query_blocks(NULL, inode, map);
++ if (retval) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ return retval;
+ }
+-
+- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+- ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+- map->m_pblk, status);
+ }
+
+-out_unlock:
+- up_read((&EXT4_I(inode)->i_data_sem));
++ retval = ext4_insert_delayed_block(inode, map->m_lblk);
++ up_write(&EXT4_I(inode)->i_data_sem);
++ if (retval)
++ return retval;
+
++ map_bh(bh, inode->i_sb, invalid_block);
++ set_buffer_new(bh);
++ set_buffer_delay(bh);
+ return retval;
+ }
+
+@@ -2330,7 +2362,7 @@ static int mpage_journal_page_buffers(handle_t *handle,
+
+ if (folio_pos(folio) + len > size &&
+ !ext4_verity_in_progress(inode))
+- len = size - folio_pos(folio);
++ len = size & (len - 1);
+
+ return ext4_journal_folio_buffers(handle, folio, len);
+ }
+@@ -2883,9 +2915,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+- /* In case writeback began while the folio was unlocked */
+- folio_wait_stable(folio);
+-
+ #ifdef CONFIG_FS_ENCRYPTION
+ ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
+ #else
+@@ -2937,23 +2966,29 @@ static int ext4_da_should_update_i_disksize(struct folio *folio,
+
+ static int ext4_da_do_write_end(struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+- struct page *page)
++ struct folio *folio)
+ {
+ struct inode *inode = mapping->host;
+ loff_t old_size = inode->i_size;
+ bool disksize_changed = false;
+ loff_t new_i_size;
+
++ if (unlikely(!folio_buffers(folio))) {
++ folio_unlock(folio);
++ folio_put(folio);
++ return -EIO;
++ }
+ /*
+ * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
+ * flag, which all that's needed to trigger page writeback.
+ */
+- copied = block_write_end(NULL, mapping, pos, len, copied, page, NULL);
++ copied = block_write_end(NULL, mapping, pos, len, copied,
++ &folio->page, NULL);
+ new_i_size = pos + copied;
+
+ /*
+- * It's important to update i_size while still holding page lock,
+- * because page writeout could otherwise come in and zero beyond
++ * It's important to update i_size while still holding folio lock,
++ * because folio writeout could otherwise come in and zero beyond
+ * i_size.
+ *
+ * Since we are holding inode lock, we are sure i_disksize <=
+@@ -2971,14 +3006,14 @@ static int ext4_da_do_write_end(struct address_space *mapping,
+
+ i_size_write(inode, new_i_size);
+ end = (new_i_size - 1) & (PAGE_SIZE - 1);
+- if (copied && ext4_da_should_update_i_disksize(page_folio(page), end)) {
++ if (copied && ext4_da_should_update_i_disksize(folio, end)) {
+ ext4_update_i_disksize(inode, new_i_size);
+ disksize_changed = true;
+ }
+ }
+
+- unlock_page(page);
+- put_page(page);
++ folio_unlock(folio);
++ folio_put(folio);
+
+ if (old_size < pos)
+ pagecache_isize_extended(inode, old_size, pos);
+@@ -3017,10 +3052,10 @@ static int ext4_da_write_end(struct file *file,
+ return ext4_write_inline_data_end(inode, pos, len, copied,
+ folio);
+
+- if (unlikely(copied < len) && !PageUptodate(page))
++ if (unlikely(copied < len) && !folio_test_uptodate(folio))
+ copied = 0;
+
+- return ext4_da_do_write_end(mapping, pos, len, copied, &folio->page);
++ return ext4_da_do_write_end(mapping, pos, len, copied, folio);
+ }
+
+ /*
+@@ -5222,8 +5257,9 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
+ {
+ unsigned offset;
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+- tid_t commit_tid = 0;
++ tid_t commit_tid;
+ int ret;
++ bool has_transaction;
+
+ offset = inode->i_size & (PAGE_SIZE - 1);
+ /*
+@@ -5248,12 +5284,14 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
+ folio_put(folio);
+ if (ret != -EBUSY)
+ return;
+- commit_tid = 0;
++ has_transaction = false;
+ read_lock(&journal->j_state_lock);
+- if (journal->j_committing_transaction)
++ if (journal->j_committing_transaction) {
+ commit_tid = journal->j_committing_transaction->t_tid;
++ has_transaction = true;
++ }
+ read_unlock(&journal->j_state_lock);
+- if (commit_tid)
++ if (has_transaction)
+ jbd2_log_wait_commit(journal, commit_tid);
+ }
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 1e599305d85fa2..87ba7f58216f70 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -417,8 +417,6 @@ static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
+
+ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ ext4_group_t group);
+-static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+- ext4_group_t group);
+ static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
+
+ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
+@@ -833,6 +831,8 @@ static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
+ return 0;
+ if (order == MB_NUM_ORDERS(sb))
+ order--;
++ if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
++ order = MB_NUM_ORDERS(sb) - 1;
+ return order;
+ }
+
+@@ -843,7 +843,7 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int new_order;
+
+- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
++ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
+ return;
+
+ new_order = mb_avg_fragment_size_order(sb,
+@@ -1010,6 +1010,8 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
+ * goal length.
+ */
+ order = fls(ac->ac_g_ex.fe_len) - 1;
++ if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
++ order = MB_NUM_ORDERS(ac->ac_sb);
+ min_order = order - sbi->s_mb_best_avail_max_trim_order;
+ if (min_order < 0)
+ min_order = 0;
+@@ -1234,6 +1236,24 @@ void ext4_mb_generate_buddy(struct super_block *sb,
+ atomic64_add(period, &sbi->s_mb_generation_time);
+ }
+
++static void mb_regenerate_buddy(struct ext4_buddy *e4b)
++{
++ int count;
++ int order = 1;
++ void *buddy;
++
++ while ((buddy = mb_find_buddy(e4b, order++, &count)))
++ mb_set_bits(buddy, 0, count);
++
++ e4b->bd_info->bb_fragments = 0;
++ memset(e4b->bd_info->bb_counters, 0,
++ sizeof(*e4b->bd_info->bb_counters) *
++ (e4b->bd_sb->s_blocksize_bits + 2));
++
++ ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
++ e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
++}
++
+ /* The buddy information is attached the buddy cache inode
+ * for convenience. The information regarding each group
+ * is loaded via ext4_mb_load_buddy. The information involve
+@@ -1361,17 +1381,17 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+ * We place the buddy block and bitmap block
+ * close together
+ */
++ grinfo = ext4_get_group_info(sb, group);
++ if (!grinfo) {
++ err = -EFSCORRUPTED;
++ goto out;
++ }
+ if ((first_block + i) & 1) {
+ /* this is block of buddy */
+ BUG_ON(incore == NULL);
+ mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
+ group, page->index, i * blocksize);
+ trace_ext4_mb_buddy_bitmap_load(sb, group);
+- grinfo = ext4_get_group_info(sb, group);
+- if (!grinfo) {
+- err = -EFSCORRUPTED;
+- goto out;
+- }
+ grinfo->bb_fragments = 0;
+ memset(grinfo->bb_counters, 0,
+ sizeof(*grinfo->bb_counters) *
+@@ -1398,7 +1418,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+
+ /* mark all preallocated blks used in in-core bitmap */
+ ext4_mb_generate_from_pa(sb, data, group);
+- ext4_mb_generate_from_freelist(sb, data, group);
++ WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
+ ext4_unlock_group(sb, group);
+
+ /* set incore so that the buddy information can be
+@@ -1893,11 +1913,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ mb_check_buddy(e4b);
+ mb_free_blocks_double(inode, e4b, first, count);
+
+- this_cpu_inc(discard_pa_seq);
+- e4b->bd_info->bb_free += count;
+- if (first < e4b->bd_info->bb_first_free)
+- e4b->bd_info->bb_first_free = first;
+-
+ /* access memory sequentially: check left neighbour,
+ * clear range and then check right neighbour
+ */
+@@ -1911,21 +1926,31 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_fsblk_t blocknr;
+
++ /*
++ * Fastcommit replay can free already freed blocks which
++ * corrupts allocation info. Regenerate it.
++ */
++ if (sbi->s_mount_state & EXT4_FC_REPLAY) {
++ mb_regenerate_buddy(e4b);
++ goto check;
++ }
++
+ blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
+ blocknr += EXT4_C2B(sbi, block);
+- if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
+- ext4_grp_locked_error(sb, e4b->bd_group,
+- inode ? inode->i_ino : 0,
+- blocknr,
+- "freeing already freed block (bit %u); block bitmap corrupt.",
+- block);
+- ext4_mark_group_bitmap_corrupted(
+- sb, e4b->bd_group,
++ ext4_grp_locked_error(sb, e4b->bd_group,
++ inode ? inode->i_ino : 0, blocknr,
++ "freeing already freed block (bit %u); block bitmap corrupt.",
++ block);
++ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+- }
+- goto done;
++ return;
+ }
+
++ this_cpu_inc(discard_pa_seq);
++ e4b->bd_info->bb_free += count;
++ if (first < e4b->bd_info->bb_first_free)
++ e4b->bd_info->bb_first_free = first;
++
+ /* let's maintain fragments counter */
+ if (left_is_free && right_is_free)
+ e4b->bd_info->bb_fragments--;
+@@ -1950,9 +1975,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ if (first <= last)
+ mb_buddy_mark_free(e4b, first >> 1, last >> 1);
+
+-done:
+ mb_set_largest_free_order(sb, e4b->bd_info);
+ mb_update_avg_fragment_size(sb, e4b->bd_info);
++check:
+ mb_check_buddy(e4b);
+ }
+
+@@ -2283,6 +2308,9 @@ void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
+ return;
+
+ ext4_lock_group(ac->ac_sb, group);
++ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++ goto out;
++
+ max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
+
+ if (max > 0) {
+@@ -2290,6 +2318,7 @@ void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
+ ext4_mb_use_best_found(ac, e4b);
+ }
+
++out:
+ ext4_unlock_group(ac->ac_sb, group);
+ ext4_mb_unload_buddy(e4b);
+ }
+@@ -2316,12 +2345,10 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+ if (err)
+ return err;
+
+- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
+- ext4_mb_unload_buddy(e4b);
+- return 0;
+- }
+-
+ ext4_lock_group(ac->ac_sb, group);
++ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++ goto out;
++
+ max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
+ ac->ac_g_ex.fe_len, &ex);
+ ex.fe_logical = 0xDEADFA11; /* debug value */
+@@ -2354,6 +2381,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+ ac->ac_b_ex = ex;
+ ext4_mb_use_best_found(ac, e4b);
+ }
++out:
+ ext4_unlock_group(ac->ac_sb, group);
+ ext4_mb_unload_buddy(e4b);
+
+@@ -3036,7 +3064,10 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ for (i = 0; i <= 13; i++)
+ seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
+ sg.info.bb_counters[i] : 0);
+- seq_puts(seq, " ]\n");
++ seq_puts(seq, " ]");
++ if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
++ seq_puts(seq, " Block bitmap corrupted!");
++ seq_puts(seq, "\n");
+
+ return 0;
+ }
+@@ -3855,11 +3886,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
+ /*
+ * Clear the trimmed flag for the group so that the next
+ * ext4_trim_fs can trim it.
+- * If the volume is mounted with -o discard, online discard
+- * is supported and the free blocks will be trimmed online.
+ */
+- if (!test_opt(sb, DISCARD))
+- EXT4_MB_GRP_CLEAR_TRIMMED(db);
++ EXT4_MB_GRP_CLEAR_TRIMMED(db);
+
+ if (!db->bb_free_root.rb_node) {
+ /* No more items in the per group rb tree
+@@ -4491,6 +4519,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+ start = max(start, rounddown(ac->ac_o_ex.fe_logical,
+ (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
+
++ /* avoid unnecessary preallocation that may trigger assertions */
++ if (start + size > EXT_MAX_BLOCKS)
++ size = EXT_MAX_BLOCKS - start;
++
+ /* don't cover already allocated blocks in selected range */
+ if (ar->pleft && start <= ar->lleft) {
+ size -= ar->lleft + 1 - start;
+@@ -4958,31 +4990,6 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
+ return false;
+ }
+
+-/*
+- * the function goes through all block freed in the group
+- * but not yet committed and marks them used in in-core bitmap.
+- * buddy must be generated from this bitmap
+- * Need to be called with the ext4 group lock held
+- */
+-static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+- ext4_group_t group)
+-{
+- struct rb_node *n;
+- struct ext4_group_info *grp;
+- struct ext4_free_data *entry;
+-
+- grp = ext4_get_group_info(sb, group);
+- if (!grp)
+- return;
+- n = rb_first(&(grp->bb_free_root));
+-
+- while (n) {
+- entry = rb_entry(n, struct ext4_free_data, efd_node);
+- mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
+- n = rb_next(n);
+- }
+-}
+-
+ /*
+ * the function goes through all preallocation in this group and marks them
+ * used in in-core bitmap. buddy must be generated from this bitmap
+@@ -5181,10 +5188,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ .fe_len = ac->ac_orig_goal_len,
+ };
+ loff_t orig_goal_end = extent_logical_end(sbi, &ex);
++ loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
+
+- /* we can't allocate as much as normalizer wants.
+- * so, found space must get proper lstart
+- * to cover original request */
++ /*
++ * We can't allocate as much as normalizer wants, so we try
++ * to get proper lstart to cover the original request, except
++ * when the goal doesn't cover the original request as below:
++ *
++ * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
++ * best_ex:0/200(200) -> adjusted: 1848/2048(200)
++ */
+ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
+ BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
+
+@@ -5196,7 +5209,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ * 1. Check if best ex can be kept at end of goal (before
+ * cr_best_avail trimmed it) and still cover original start
+ * 2. Else, check if best ex can be kept at start of goal and
+- * still cover original start
++ * still cover original end
+ * 3. Else, keep the best ex at start of original request.
+ */
+ ex.fe_len = ac->ac_b_ex.fe_len;
+@@ -5206,7 +5219,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ goto adjust_bex;
+
+ ex.fe_logical = ac->ac_g_ex.fe_logical;
+- if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
++ if (o_ex_end <= extent_logical_end(sbi, &ex))
+ goto adjust_bex;
+
+ ex.fe_logical = ac->ac_o_ex.fe_logical;
+@@ -5214,7 +5227,6 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ ac->ac_b_ex.fe_logical = ex.fe_logical;
+
+ BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+- BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+ BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
+ }
+
+@@ -6133,6 +6145,7 @@ ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
+ ext4_mb_mark_bb(sb, block, 1, 1);
+ ar->len = 1;
+
++ *errp = 0;
+ return block;
+ }
+
+@@ -6570,8 +6583,9 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
+ " group:%u block:%d count:%lu failed"
+ " with %d", block_group, bit, count,
+ err);
+- } else
+- EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
++ }
++
++ EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
+
+ ext4_lock_group(sb, block_group);
+ mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
+@@ -6910,11 +6924,16 @@ __acquires(bitlock)
+ static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ ext4_group_t grp)
+ {
+- if (grp < ext4_get_groups_count(sb))
+- return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+- return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+- ext4_group_first_block_no(sb, grp) - 1) >>
+- EXT4_CLUSTER_BITS(sb);
++ unsigned long nr_clusters_in_group;
++
++ if (grp < (ext4_get_groups_count(sb) - 1))
++ nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
++ else
++ nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
++ ext4_group_first_block_no(sb, grp))
++ >> EXT4_CLUSTER_BITS(sb);
++
++ return nr_clusters_in_group - 1;
+ }
+
+ static bool ext4_trim_interrupted(void)
+@@ -6928,13 +6947,18 @@ static int ext4_try_to_trim_range(struct super_block *sb,
+ __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
+ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ {
+- ext4_grpblk_t next, count, free_count;
++ ext4_grpblk_t next, count, free_count, last, origin_start;
+ bool set_trimmed = false;
+ void *bitmap;
+
++ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++ return 0;
++
++ last = ext4_last_grp_cluster(sb, e4b->bd_group);
+ bitmap = e4b->bd_bitmap;
+- if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
++ if (start == 0 && max >= last)
+ set_trimmed = true;
++ origin_start = start;
+ start = max(e4b->bd_info->bb_first_free, start);
+ count = 0;
+ free_count = 0;
+@@ -6943,7 +6967,10 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
+ start = mb_find_next_zero_bit(bitmap, max + 1, start);
+ if (start > max)
+ break;
+- next = mb_find_next_bit(bitmap, max + 1, start);
++
++ next = mb_find_next_bit(bitmap, last + 1, start);
++ if (origin_start == 0 && next >= last)
++ set_trimmed = true;
+
+ if ((next - start) >= minblocks) {
+ int ret = ext4_trim_extent(sb, start, next - start, e4b);
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index d7aeb5da7d8676..498af2abc5d885 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -194,8 +194,8 @@ struct ext4_allocation_context {
+
+ __u32 ac_groups_considered;
+ __u32 ac_flags; /* allocation hints */
++ __u32 ac_groups_linear_remaining;
+ __u16 ac_groups_scanned;
+- __u16 ac_groups_linear_remaining;
+ __u16 ac_found;
+ __u16 ac_cX_found[EXT4_MB_NUM_CRS];
+ __u16 ac_tail;
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index d98ac2af8199f1..a5e1492bbaaa56 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -663,8 +663,8 @@ int ext4_ind_migrate(struct inode *inode)
+ if (unlikely(ret2 && !ret))
+ ret = ret2;
+ errout:
+- ext4_journal_stop(handle);
+ up_write(&EXT4_I(inode)->i_data_sem);
++ ext4_journal_stop(handle);
+ out_unlock:
+ ext4_writepages_up_write(inode->i_sb, alloc_ctx);
+ return ret;
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 18a9e7c479754b..0bfd5ff103aa44 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -36,7 +36,6 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
+ *ppath = NULL;
+ return -ENODATA;
+ }
+- *ppath = path;
+ return 0;
+ }
+
+@@ -619,6 +618,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+ goto out;
+ o_end = o_start + len;
+
++ *moved_len = 0;
+ while (o_start < o_end) {
+ struct ext4_extent *ex;
+ ext4_lblk_t cur_blk, next_blk;
+@@ -673,7 +673,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+ */
+ ext4_double_up_write_data_sem(orig_inode, donor_inode);
+ /* Swap original branches with new branches */
+- move_extent_per_page(o_filp, donor_inode,
++ *moved_len += move_extent_per_page(o_filp, donor_inode,
+ orig_page_index, donor_page_index,
+ offset_in_page, cur_len,
+ unwritten, &ret);
+@@ -683,9 +683,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+ o_start += cur_len;
+ d_start += cur_len;
+ }
+- *moved_len = o_start - orig_blk;
+- if (*moved_len > len)
+- *moved_len = len;
+
+ out:
+ if (*moved_len) {
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index bbda587f76b85a..4de1f61bba76b3 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -151,10 +151,11 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+
+ return bh;
+ }
+- if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
++ /* The first directory block must not be a hole. */
++ if (!bh && (type == INDEX || type == DIRENT_HTREE || block == 0)) {
+ ext4_error_inode(inode, func, line, block,
+- "Directory hole found for htree %s block",
+- (type == INDEX) ? "index" : "leaf");
++ "Directory hole found for htree %s block %u",
++ (type == INDEX) ? "index" : "leaf", block);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+ if (!bh)
+@@ -1525,7 +1526,7 @@ static bool ext4_match(struct inode *parent,
+ }
+
+ /*
+- * Returns 0 if not found, -1 on failure, and 1 on success
++ * Returns 0 if not found, -EFSCORRUPTED on failure, and 1 on success
+ */
+ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
+ struct inode *dir, struct ext4_filename *fname,
+@@ -1546,7 +1547,7 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
+ * a full check */
+ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
+ buf_size, offset))
+- return -1;
++ return -EFSCORRUPTED;
+ *res_dir = de;
+ return 1;
+ }
+@@ -1554,7 +1555,7 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
+ de_len = ext4_rec_len_from_disk(de->rec_len,
+ dir->i_sb->s_blocksize);
+ if (de_len <= 0)
+- return -1;
++ return -EFSCORRUPTED;
+ offset += de_len;
+ de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
+ }
+@@ -1706,8 +1707,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
+ goto cleanup_and_exit;
+ } else {
+ brelse(bh);
+- if (i < 0)
++ if (i < 0) {
++ ret = ERR_PTR(i);
+ goto cleanup_and_exit;
++ }
+ }
+ next:
+ if (++block >= nblocks)
+@@ -1802,7 +1805,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+ if (retval == 1)
+ goto success;
+ brelse(bh);
+- if (retval == -1) {
++ if (retval < 0) {
+ bh = ERR_PTR(ERR_BAD_DX_DIR);
+ goto errout;
+ }
+@@ -2044,7 +2047,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+ split = count/2;
+
+ hash2 = map[split].hash;
+- continued = hash2 == map[split - 1].hash;
++ continued = split > 0 ? hash2 == map[split - 1].hash : 0;
+ dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
+ (unsigned long)dx_get_block(frame->at),
+ hash2, split, count-split));
+@@ -2218,6 +2221,52 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
+ return err ? err : err2;
+ }
+
++static bool ext4_check_dx_root(struct inode *dir, struct dx_root *root)
++{
++ struct fake_dirent *fde;
++ const char *error_msg;
++ unsigned int rlen;
++ unsigned int blocksize = dir->i_sb->s_blocksize;
++ char *blockend = (char *)root + dir->i_sb->s_blocksize;
++
++ fde = &root->dot;
++ if (unlikely(fde->name_len != 1)) {
++ error_msg = "invalid name_len for '.'";
++ goto corrupted;
++ }
++ if (unlikely(strncmp(root->dot_name, ".", fde->name_len))) {
++ error_msg = "invalid name for '.'";
++ goto corrupted;
++ }
++ rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize);
++ if (unlikely((char *)fde + rlen >= blockend)) {
++ error_msg = "invalid rec_len for '.'";
++ goto corrupted;
++ }
++
++ fde = &root->dotdot;
++ if (unlikely(fde->name_len != 2)) {
++ error_msg = "invalid name_len for '..'";
++ goto corrupted;
++ }
++ if (unlikely(strncmp(root->dotdot_name, "..", fde->name_len))) {
++ error_msg = "invalid name for '..'";
++ goto corrupted;
++ }
++ rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize);
++ if (unlikely((char *)fde + rlen >= blockend)) {
++ error_msg = "invalid rec_len for '..'";
++ goto corrupted;
++ }
++
++ return true;
++
++corrupted:
++ EXT4_ERROR_INODE(dir, "Corrupt dir, %s, running e2fsck is recommended",
++ error_msg);
++ return false;
++}
++
+ /*
+ * This converts a one block unindexed directory to a 3 block indexed
+ * directory, and adds the dentry to the indexed directory.
+@@ -2252,17 +2301,17 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+ brelse(bh);
+ return retval;
+ }
++
+ root = (struct dx_root *) bh->b_data;
++ if (!ext4_check_dx_root(dir, root)) {
++ brelse(bh);
++ return -EFSCORRUPTED;
++ }
+
+ /* The 0th block becomes the root, move the dirents out */
+ fde = &root->dotdot;
+ de = (struct ext4_dir_entry_2 *)((char *)fde +
+ ext4_rec_len_from_disk(fde->rec_len, blocksize));
+- if ((char *) de >= (((char *) root) + blocksize)) {
+- EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
+- brelse(bh);
+- return -EFSCORRUPTED;
+- }
+ len = ((char *) root) + (blocksize - csum_size) - (char *) de;
+
+ /* Allocate new block for the 0th block's dirents */
+@@ -2901,7 +2950,7 @@ static int ext4_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ inode = ext4_new_inode_start_handle(idmap, dir, mode,
+ NULL, 0, NULL,
+ EXT4_HT_DIR,
+- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
++ EXT4_MAXQUOTAS_TRANS_BLOCKS(dir->i_sb) +
+ 4 + EXT4_XATTR_TRANS_BLOCKS);
+ handle = ext4_journal_current_handle();
+ err = PTR_ERR(inode);
+@@ -3087,10 +3136,7 @@ bool ext4_empty_dir(struct inode *inode)
+ EXT4_ERROR_INODE(inode, "invalid size");
+ return false;
+ }
+- /* The first directory block must not be a hole,
+- * so treat it as DIRENT_HTREE
+- */
+- bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
++ bh = ext4_read_dirblock(inode, 0, EITHER);
+ if (IS_ERR(bh))
+ return false;
+
+@@ -3535,10 +3581,7 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
+ struct ext4_dir_entry_2 *de;
+ unsigned int offset;
+
+- /* The first directory block must not be a hole, so
+- * treat it as DIRENT_HTREE
+- */
+- bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
++ bh = ext4_read_dirblock(inode, 0, EITHER);
+ if (IS_ERR(bh)) {
+ *retval = PTR_ERR(bh);
+ return NULL;
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 0361c20910def7..5f105171df7b56 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -231,17 +231,24 @@ struct ext4_new_flex_group_data {
+ in the flex group */
+ __u16 *bg_flags; /* block group flags of groups
+ in @groups */
++ ext4_group_t resize_bg; /* number of allocated
++ new_group_data */
+ ext4_group_t count; /* number of groups in @groups
+ */
+ };
+
++/*
++ * Avoiding memory allocation failures due to too many groups added each time.
++ */
++#define MAX_RESIZE_BG 16384
++
+ /*
+ * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
+ * @flexbg_size.
+ *
+ * Returns NULL on failure otherwise address of the allocated structure.
+ */
+-static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
++static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size)
+ {
+ struct ext4_new_flex_group_data *flex_gd;
+
+@@ -249,17 +256,18 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+ if (flex_gd == NULL)
+ goto out3;
+
+- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
+- goto out2;
+- flex_gd->count = flexbg_size;
++ if (unlikely(flexbg_size > MAX_RESIZE_BG))
++ flex_gd->resize_bg = MAX_RESIZE_BG;
++ else
++ flex_gd->resize_bg = flexbg_size;
+
+- flex_gd->groups = kmalloc_array(flexbg_size,
++ flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
+ sizeof(struct ext4_new_group_data),
+ GFP_NOFS);
+ if (flex_gd->groups == NULL)
+ goto out2;
+
+- flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
++ flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
+ GFP_NOFS);
+ if (flex_gd->bg_flags == NULL)
+ goto out1;
+@@ -296,7 +304,7 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
+ */
+ static int ext4_alloc_group_tables(struct super_block *sb,
+ struct ext4_new_flex_group_data *flex_gd,
+- int flexbg_size)
++ unsigned int flexbg_size)
+ {
+ struct ext4_new_group_data *group_data = flex_gd->groups;
+ ext4_fsblk_t start_blk;
+@@ -397,12 +405,12 @@ static int ext4_alloc_group_tables(struct super_block *sb,
+ group = group_data[0].group;
+
+ printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
+- "%d groups, flexbg size is %d:\n", flex_gd->count,
++ "%u groups, flexbg size is %u:\n", flex_gd->count,
+ flexbg_size);
+
+ for (i = 0; i < flex_gd->count; i++) {
+ ext4_debug(
+- "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
++ "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
+ ext4_bg_has_super(sb, group + i) ? "normal" :
+ "no-super", group + i,
+ group_data[i].blocks_count,
+@@ -560,13 +568,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
+ goto handle_itb;
+
+- if (meta_bg == 1) {
+- ext4_group_t first_group;
+- first_group = ext4_meta_bg_first_group(sb, group);
+- if (first_group != group + 1 &&
+- first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
+- goto handle_itb;
+- }
++ if (meta_bg == 1)
++ goto handle_itb;
+
+ block = start + ext4_bg_has_super(sb, group);
+ /* Copy all of the GDT blocks into the backup in this group */
+@@ -1191,8 +1194,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ ext4_group_first_block_no(sb, group));
+ BUFFER_TRACE(bh, "get_write_access");
+ if ((err = ext4_journal_get_write_access(handle, sb, bh,
+- EXT4_JTR_NONE)))
++ EXT4_JTR_NONE))) {
++ brelse(bh);
+ break;
++ }
+ lock_buffer(bh);
+ memcpy(bh->b_data, data, size);
+ if (rest)
+@@ -1600,7 +1605,10 @@ static int ext4_flex_group_add(struct super_block *sb,
+ int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+ int gdb_num_end = ((group + flex_gd->count - 1) /
+ EXT4_DESC_PER_BLOCK(sb));
+- int meta_bg = ext4_has_feature_meta_bg(sb);
++ int meta_bg = ext4_has_feature_meta_bg(sb) &&
++ gdb_num >= le32_to_cpu(es->s_first_meta_bg);
++ sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
++ ext4_group_first_block_no(sb, 0);
+ sector_t old_gdb = 0;
+
+ update_backups(sb, ext4_group_first_block_no(sb, 0),
+@@ -1612,8 +1620,8 @@ static int ext4_flex_group_add(struct super_block *sb,
+ gdb_num);
+ if (old_gdb == gdb_bh->b_blocknr)
+ continue;
+- update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+- gdb_bh->b_size, meta_bg);
++ update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
++ gdb_bh->b_data, gdb_bh->b_size, meta_bg);
+ old_gdb = gdb_bh->b_blocknr;
+ }
+ }
+@@ -1623,8 +1631,7 @@ static int ext4_flex_group_add(struct super_block *sb,
+
+ static int ext4_setup_next_flex_gd(struct super_block *sb,
+ struct ext4_new_flex_group_data *flex_gd,
+- ext4_fsblk_t n_blocks_count,
+- unsigned long flexbg_size)
++ ext4_fsblk_t n_blocks_count)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+@@ -1648,7 +1655,7 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
+ BUG_ON(last);
+ ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
+
+- last_group = group | (flexbg_size - 1);
++ last_group = group | (flex_gd->resize_bg - 1);
+ if (last_group > n_group)
+ last_group = n_group;
+
+@@ -1980,9 +1987,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
+
+ errout:
+ ret = ext4_journal_stop(handle);
+- if (!err)
+- err = ret;
+- return ret;
++ return err ? err : ret;
+
+ invalid_resize_inode:
+ ext4_error(sb, "corrupted/inconsistent resize inode");
+@@ -2010,8 +2015,9 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
+ ext4_fsblk_t o_blocks_count;
+ ext4_fsblk_t n_blocks_count_retry = 0;
+ unsigned long last_update_time = 0;
+- int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
++ int err = 0;
+ int meta_bg;
++ unsigned int flexbg_size = ext4_flex_bg_size(sbi);
+
+ /* See if the device is actually as big as what was requested */
+ bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
+@@ -2152,8 +2158,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
+ /* Add flex groups. Note that a regular group is a
+ * flex group with 1 group.
+ */
+- while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
+- flexbg_size)) {
++ while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
+ if (time_is_before_jiffies(last_update_time + HZ * 10)) {
+ if (last_update_time)
+ ext4_msg(sb, KERN_INFO,
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index dbebd8b3127e51..1d14a38017a7f0 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -744,11 +744,12 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+
+ ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
+ /*
+- * Make sure updated value of ->s_mount_flags will be visible before
+- * ->s_flags update
++ * EXT4_FLAGS_SHUTDOWN was set which stops all filesystem
++ * modifications. We don't set SB_RDONLY because that requires
++ * sb->s_umount semaphore and setting it without proper remount
++ * procedure is confusing code such as freeze_super() leading to
++ * deadlocks and other problems.
+ */
+- smp_wmb();
+- sb->s_flags |= SB_RDONLY;
+ }
+
+ static void update_super_work(struct work_struct *work)
+@@ -768,7 +769,8 @@ static void update_super_work(struct work_struct *work)
+ */
+ if (!sb_rdonly(sbi->s_sb) && journal) {
+ struct buffer_head *sbh = sbi->s_sbh;
+- bool call_notify_err;
++ bool call_notify_err = false;
++
+ handle = jbd2_journal_start(journal, 1);
+ if (IS_ERR(handle))
+ goto write_directly;
+@@ -1592,7 +1594,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
+ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ unsigned int flags);
+
+-static struct dquot **ext4_get_dquots(struct inode *inode)
++static struct dquot __rcu **ext4_get_dquots(struct inode *inode)
+ {
+ return EXT4_I(inode)->i_dquot;
+ }
+@@ -5204,6 +5206,18 @@ static int ext4_block_group_meta_init(struct super_block *sb, int silent)
+ return 0;
+ }
+
++/*
++ * It's hard to get stripe aligned blocks if stripe is not aligned with
++ * cluster, just disable stripe and alert user to simplify code and avoid
++ * stripe aligned allocation which will rarely succeed.
++ */
++static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ return (stripe > 0 && sbi->s_cluster_ratio > 1 &&
++ stripe % sbi->s_cluster_ratio != 0);
++}
++
+ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ {
+ struct ext4_super_block *es = NULL;
+@@ -5311,13 +5325,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ goto failed_mount3;
+
+ sbi->s_stripe = ext4_get_stripe_size(sbi);
+- /*
+- * It's hard to get stripe aligned blocks if stripe is not aligned with
+- * cluster, just disable stripe and alert user to simpfy code and avoid
+- * stripe aligned allocation which will rarely successes.
+- */
+- if (sbi->s_stripe > 0 && sbi->s_cluster_ratio > 1 &&
+- sbi->s_stripe % sbi->s_cluster_ratio != 0) {
++ if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) {
+ ext4_msg(sb, KERN_WARNING,
+ "stripe (%lu) is not aligned with cluster size (%u), "
+ "stripe is disabled",
+@@ -5555,19 +5563,15 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ if (err)
+ goto failed_mount6;
+
+- err = ext4_register_sysfs(sb);
+- if (err)
+- goto failed_mount7;
+-
+ err = ext4_init_orphan_info(sb);
+ if (err)
+- goto failed_mount8;
++ goto failed_mount7;
+ #ifdef CONFIG_QUOTA
+ /* Enable quota usage during mount. */
+ if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
+ err = ext4_enable_quotas(sb);
+ if (err)
+- goto failed_mount9;
++ goto failed_mount8;
+ }
+ #endif /* CONFIG_QUOTA */
+
+@@ -5593,7 +5597,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ ext4_msg(sb, KERN_INFO, "recovery complete");
+ err = ext4_mark_recovery_complete(sb, es);
+ if (err)
+- goto failed_mount10;
++ goto failed_mount9;
+ }
+
+ if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev))
+@@ -5610,15 +5614,17 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ atomic_set(&sbi->s_warning_count, 0);
+ atomic_set(&sbi->s_msg_count, 0);
+
++ /* Register sysfs after all initializations are complete. */
++ err = ext4_register_sysfs(sb);
++ if (err)
++ goto failed_mount9;
++
+ return 0;
+
+-failed_mount10:
++failed_mount9:
+ ext4_quotas_off(sb, EXT4_MAXQUOTAS);
+-failed_mount9: __maybe_unused
++failed_mount8: __maybe_unused
+ ext4_release_orphan_info(sb);
+-failed_mount8:
+- ext4_unregister_sysfs(sb);
+- kobject_put(&sbi->s_kobj);
+ failed_mount7:
+ ext4_unregister_li_request(sb);
+ failed_mount6:
+@@ -5653,8 +5659,8 @@ failed_mount9: __maybe_unused
+ failed_mount3:
+ /* flush s_sb_upd_work before sbi destroy */
+ flush_work(&sbi->s_sb_upd_work);
+- del_timer_sync(&sbi->s_err_report);
+ ext4_stop_mmpd(sbi);
++ del_timer_sync(&sbi->s_err_report);
+ ext4_group_desc_free(sbi);
+ failed_mount:
+ if (sbi->s_chksum_driver)
+@@ -6442,6 +6448,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ struct ext4_mount_options old_opts;
+ ext4_group_t g;
+ int err = 0;
++ int alloc_ctx;
+ #ifdef CONFIG_QUOTA
+ int enable_quota = 0;
+ int i, j;
+@@ -6482,7 +6489,25 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+
+ }
+
++ if ((ctx->spec & EXT4_SPEC_s_stripe) &&
++ ext4_is_stripe_incompatible(sb, ctx->s_stripe)) {
++ ext4_msg(sb, KERN_WARNING,
++ "stripe (%lu) is not aligned with cluster size (%u), "
++ "stripe is disabled",
++ ctx->s_stripe, sbi->s_cluster_ratio);
++ ctx->s_stripe = 0;
++ }
++
++ /*
++ * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
++ * two calls to ext4_should_dioread_nolock() to return inconsistent
++ * values, triggering WARN_ON in ext4_add_complete_io(). we grab
++ * here s_writepages_rwsem to avoid race between writepages ops and
++ * remount.
++ */
++ alloc_ctx = ext4_writepages_down_write(sb);
+ ext4_apply_options(fc, sb);
++ ext4_writepages_up_write(sb, alloc_ctx);
+
+ if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+ test_opt(sb, JOURNAL_CHECKSUM)) {
+@@ -6700,6 +6725,8 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) &&
+ sb_any_quota_suspended(sb))
+ dquot_resume(sb, -1);
++
++ alloc_ctx = ext4_writepages_down_write(sb);
+ sb->s_flags = old_sb_flags;
+ sbi->s_mount_opt = old_opts.s_mount_opt;
+ sbi->s_mount_opt2 = old_opts.s_mount_opt2;
+@@ -6708,6 +6735,8 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ sbi->s_commit_interval = old_opts.s_commit_interval;
+ sbi->s_min_batch_time = old_opts.s_min_batch_time;
+ sbi->s_max_batch_time = old_opts.s_max_batch_time;
++ ext4_writepages_up_write(sb, alloc_ctx);
++
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ ext4_release_system_zone(sb);
+ #ifdef CONFIG_QUOTA
+@@ -6850,6 +6879,10 @@ static int ext4_write_dquot(struct dquot *dquot)
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ ret = dquot_commit(dquot);
++ if (ret < 0)
++ ext4_error_err(dquot->dq_sb, -ret,
++ "Failed to commit dquot type %d",
++ dquot->dq_id.type);
+ err = ext4_journal_stop(handle);
+ if (!ret)
+ ret = err;
+@@ -6866,6 +6899,10 @@ static int ext4_acquire_dquot(struct dquot *dquot)
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ ret = dquot_acquire(dquot);
++ if (ret < 0)
++ ext4_error_err(dquot->dq_sb, -ret,
++ "Failed to acquire dquot type %d",
++ dquot->dq_id.type);
+ err = ext4_journal_stop(handle);
+ if (!ret)
+ ret = err;
+@@ -6885,6 +6922,10 @@ static int ext4_release_dquot(struct dquot *dquot)
+ return PTR_ERR(handle);
+ }
+ ret = dquot_release(dquot);
++ if (ret < 0)
++ ext4_error_err(dquot->dq_sb, -ret,
++ "Failed to release dquot type %d",
++ dquot->dq_id.type);
+ err = ext4_journal_stop(handle);
+ if (!ret)
+ ret = err;
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 6d332dff79ddcb..d65dccb44ed592 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -29,6 +29,7 @@ typedef enum {
+ attr_trigger_test_error,
+ attr_first_error_time,
+ attr_last_error_time,
++ attr_clusters_in_group,
+ attr_feature,
+ attr_pointer_ui,
+ attr_pointer_ul,
+@@ -104,7 +105,7 @@ static ssize_t reserved_clusters_store(struct ext4_sb_info *sbi,
+ int ret;
+
+ ret = kstrtoull(skip_spaces(buf), 0, &val);
+- if (ret || val >= clusters)
++ if (ret || val >= clusters || (s64)val < 0)
+ return -EINVAL;
+
+ atomic64_set(&sbi->s_resv_clusters, val);
+@@ -207,13 +208,14 @@ EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
+
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
+ ext4_sb_info, s_inode_readahead_blks);
++EXT4_ATTR_OFFSET(mb_group_prealloc, 0644, clusters_in_group,
++ ext4_sb_info, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
+ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
+ EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
+-EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups);
+ EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
+ EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
+@@ -392,6 +394,7 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
+ (unsigned long long)
+ percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
+ case attr_inode_readahead:
++ case attr_clusters_in_group:
+ case attr_pointer_ui:
+ if (!ptr)
+ return 0;
+@@ -451,7 +454,8 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+ s_kobj);
+ struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
+ void *ptr = calc_ptr(a, sbi);
+- unsigned long t;
++ unsigned int t;
++ unsigned long lt;
+ int ret;
+
+ switch (a->attr_id) {
+@@ -460,7 +464,7 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+ case attr_pointer_ui:
+ if (!ptr)
+ return 0;
+- ret = kstrtoul(skip_spaces(buf), 0, &t);
++ ret = kstrtouint(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+@@ -468,13 +472,23 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+ else
+ *((unsigned int *) ptr) = t;
+ return len;
++ case attr_clusters_in_group:
++ if (!ptr)
++ return 0;
++ ret = kstrtouint(skip_spaces(buf), 0, &t);
++ if (ret)
++ return ret;
++ if (t > sbi->s_clusters_per_group)
++ return -EINVAL;
++ *((unsigned int *) ptr) = t;
++ return len;
+ case attr_pointer_ul:
+ if (!ptr)
+ return 0;
+- ret = kstrtoul(skip_spaces(buf), 0, &t);
++ ret = kstrtoul(skip_spaces(buf), 0, &lt);
+ if (ret)
+ return ret;
+- *((unsigned long *) ptr) = t;
++ *((unsigned long *) ptr) = lt;
+ return len;
+ case attr_inode_readahead:
+ return inode_readahead_blks_store(sbi, buf, len);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 92ba28cebac63d..f40785bc4e5549 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -458,7 +458,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
+ ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
+ ext4_xattr_inode_set_ref(inode, 1);
+ } else {
+- inode_lock(inode);
++ inode_lock_nested(inode, I_MUTEX_XATTR);
+ inode->i_flags |= S_NOQUOTA;
+ inode_unlock(inode);
+ }
+@@ -1039,7 +1039,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+ s64 ref_count;
+ int ret;
+
+- inode_lock(ea_inode);
++ inode_lock_nested(ea_inode, I_MUTEX_XATTR);
+
+ ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
+ if (ret)
+@@ -1433,6 +1433,12 @@ static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
+ goto out;
+
+ memcpy(bh->b_data, buf, csize);
++ /*
++ * Zero out block tail to avoid writing uninitialized memory
++ * to disk.
++ */
++ if (csize < blocksize)
++ memset(bh->b_data + csize, 0, blocksize - csize);
+ set_buffer_uptodate(bh);
+ ext4_handle_dirty_metadata(handle, ea_inode, bh);
+
+@@ -1565,46 +1571,49 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
+ /*
+ * Add value of the EA in an inode.
+ */
+-static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
+- const void *value, size_t value_len,
+- struct inode **ret_inode)
++static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle,
++ struct inode *inode, const void *value, size_t value_len)
+ {
+ struct inode *ea_inode;
+ u32 hash;
+ int err;
+
++ /* Account inode & space to quota even if sharing... */
++ err = ext4_xattr_inode_alloc_quota(inode, value_len);
++ if (err)
++ return ERR_PTR(err);
++
+ hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
+ ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
+ if (ea_inode) {
+ err = ext4_xattr_inode_inc_ref(handle, ea_inode);
+- if (err) {
+- iput(ea_inode);
+- return err;
+- }
+-
+- *ret_inode = ea_inode;
+- return 0;
++ if (err)
++ goto out_err;
++ return ea_inode;
+ }
+
+ /* Create an inode for the EA value */
+ ea_inode = ext4_xattr_inode_create(handle, inode, hash);
+- if (IS_ERR(ea_inode))
+- return PTR_ERR(ea_inode);
++ if (IS_ERR(ea_inode)) {
++ ext4_xattr_inode_free_quota(inode, NULL, value_len);
++ return ea_inode;
++ }
+
+ err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
+ if (err) {
+ if (ext4_xattr_inode_dec_ref(handle, ea_inode))
+ ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err);
+- iput(ea_inode);
+- return err;
++ goto out_err;
+ }
+
+ if (EA_INODE_CACHE(inode))
+ mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
+ ea_inode->i_ino, true /* reusable */);
+-
+- *ret_inode = ea_inode;
+- return 0;
++ return ea_inode;
++out_err:
++ iput(ea_inode);
++ ext4_xattr_inode_free_quota(inode, NULL, value_len);
++ return ERR_PTR(err);
+ }
+
+ /*
+@@ -1616,6 +1625,7 @@ static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
+ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ struct ext4_xattr_search *s,
+ handle_t *handle, struct inode *inode,
++ struct inode *new_ea_inode,
+ bool is_block)
+ {
+ struct ext4_xattr_entry *last, *next;
+@@ -1623,7 +1633,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ size_t min_offs = s->end - s->base, name_len = strlen(i->name);
+ int in_inode = i->in_inode;
+ struct inode *old_ea_inode = NULL;
+- struct inode *new_ea_inode = NULL;
+ size_t old_size, new_size;
+ int ret;
+
+@@ -1708,43 +1717,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ old_ea_inode = NULL;
+ goto out;
+ }
+- }
+- if (i->value && in_inode) {
+- WARN_ON_ONCE(!i->value_len);
+-
+- ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
+- if (ret)
+- goto out;
+-
+- ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
+- i->value_len,
+- &new_ea_inode);
+- if (ret) {
+- new_ea_inode = NULL;
+- ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
+- goto out;
+- }
+- }
+
+- if (old_ea_inode) {
+ /* We are ready to release ref count on the old_ea_inode. */
+ ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
+- if (ret) {
+- /* Release newly required ref count on new_ea_inode. */
+- if (new_ea_inode) {
+- int err;
+-
+- err = ext4_xattr_inode_dec_ref(handle,
+- new_ea_inode);
+- if (err)
+- ext4_warning_inode(new_ea_inode,
+- "dec ref new_ea_inode err=%d",
+- err);
+- ext4_xattr_inode_free_quota(inode, new_ea_inode,
+- i->value_len);
+- }
++ if (ret)
+ goto out;
+- }
+
+ ext4_xattr_inode_free_quota(inode, old_ea_inode,
+ le32_to_cpu(here->e_value_size));
+@@ -1868,7 +1845,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ ret = 0;
+ out:
+ iput(old_ea_inode);
+- iput(new_ea_inode);
+ return ret;
+ }
+
+@@ -1931,9 +1907,21 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ size_t old_ea_inode_quota = 0;
+ unsigned int ea_ino;
+
+-
+ #define header(x) ((struct ext4_xattr_header *)(x))
+
++ /* If we need EA inode, prepare it before locking the buffer */
++ if (i->value && i->in_inode) {
++ WARN_ON_ONCE(!i->value_len);
++
++ ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
++ i->value, i->value_len);
++ if (IS_ERR(ea_inode)) {
++ error = PTR_ERR(ea_inode);
++ ea_inode = NULL;
++ goto cleanup;
++ }
++ }
++
+ if (s->base) {
+ int offset = (char *)s->here - bs->bh->b_data;
+
+@@ -1942,6 +1930,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ EXT4_JTR_NONE);
+ if (error)
+ goto cleanup;
++
+ lock_buffer(bs->bh);
+
+ if (header(s->base)->h_refcount == cpu_to_le32(1)) {
+@@ -1968,7 +1957,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ }
+ ea_bdebug(bs->bh, "modifying in-place");
+ error = ext4_xattr_set_entry(i, s, handle, inode,
+- true /* is_block */);
++ ea_inode, true /* is_block */);
+ ext4_xattr_block_csum_set(inode, bs->bh);
+ unlock_buffer(bs->bh);
+ if (error == -EFSCORRUPTED)
+@@ -2036,29 +2025,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ s->end = s->base + sb->s_blocksize;
+ }
+
+- error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
++ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
++ true /* is_block */);
+ if (error == -EFSCORRUPTED)
+ goto bad_block;
+ if (error)
+ goto cleanup;
+
+- if (i->value && s->here->e_value_inum) {
+- /*
+- * A ref count on ea_inode has been taken as part of the call to
+- * ext4_xattr_set_entry() above. We would like to drop this
+- * extra ref but we have to wait until the xattr block is
+- * initialized and has its own ref count on the ea_inode.
+- */
+- ea_ino = le32_to_cpu(s->here->e_value_inum);
+- error = ext4_xattr_inode_iget(inode, ea_ino,
+- le32_to_cpu(s->here->e_hash),
+- &ea_inode);
+- if (error) {
+- ea_inode = NULL;
+- goto cleanup;
+- }
+- }
+-
+ inserted:
+ if (!IS_LAST_ENTRY(s->first)) {
+ new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
+@@ -2211,17 +2184,16 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+
+ cleanup:
+ if (ea_inode) {
+- int error2;
+-
+- error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+- if (error2)
+- ext4_warning_inode(ea_inode, "dec ref error=%d",
+- error2);
++ if (error) {
++ int error2;
+
+- /* If there was an error, revert the quota charge. */
+- if (error)
++ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
++ if (error2)
++ ext4_warning_inode(ea_inode, "dec ref error=%d",
++ error2);
+ ext4_xattr_inode_free_quota(inode, ea_inode,
+ i_size_read(ea_inode));
++ }
+ iput(ea_inode);
+ }
+ if (ce)
+@@ -2279,14 +2251,38 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ {
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_xattr_search *s = &is->s;
++ struct inode *ea_inode = NULL;
+ int error;
+
+ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+ return -ENOSPC;
+
+- error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
+- if (error)
++ /* If we need EA inode, prepare it before locking the buffer */
++ if (i->value && i->in_inode) {
++ WARN_ON_ONCE(!i->value_len);
++
++ ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
++ i->value, i->value_len);
++ if (IS_ERR(ea_inode))
++ return PTR_ERR(ea_inode);
++ }
++ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
++ false /* is_block */);
++ if (error) {
++ if (ea_inode) {
++ int error2;
++
++ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
++ if (error2)
++ ext4_warning_inode(ea_inode, "dec ref error=%d",
++ error2);
++
++ ext4_xattr_inode_free_quota(inode, ea_inode,
++ i_size_read(ea_inode));
++ iput(ea_inode);
++ }
+ return error;
++ }
+ header = IHDR(inode, ext4_raw_inode(&is->iloc));
+ if (!IS_LAST_ENTRY(s->first)) {
+ header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
+@@ -2295,6 +2291,7 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ header->h_magic = cpu_to_le32(0);
+ ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
+ }
++ iput(ea_inode);
+ return 0;
+ }
+
+@@ -2557,6 +2554,8 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
+
+ error = ext4_xattr_set_handle(handle, inode, name_index, name,
+ value, value_len, flags);
++ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR,
++ handle);
+ error2 = ext4_journal_stop(handle);
+ if (error == -ENOSPC &&
+ ext4_should_retry_alloc(sb, &retries))
+@@ -2564,7 +2563,6 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
+ if (error == 0)
+ error = error2;
+ }
+- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, NULL);
+
+ return error;
+ }
+@@ -3126,8 +3124,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
+
+ bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
+ if (IS_ERR(bh)) {
+- if (PTR_ERR(bh) == -ENOMEM)
++ if (PTR_ERR(bh) == -ENOMEM) {
++ mb_cache_entry_put(ea_block_cache, ce);
+ return NULL;
++ }
+ bh = NULL;
+ EXT4_ERROR_INODE(inode, "block %lu read error",
+ (unsigned long)ce->e_value);
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index b0597a539fc548..1a33a8c1623f24 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -889,7 +889,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+
+ cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count);
+
+- if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) {
++ if (cp_blocks > BLKS_PER_SEG(sbi) || cp_blocks <= F2FS_CP_PACKS) {
+ f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
+ le32_to_cpu(cp_block->cp_pack_total_block_count));
+ goto invalid_cp;
+@@ -1170,6 +1170,11 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi)
+ ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
+ ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
+ ckpt->next_free_nid = cpu_to_le32(last_nid);
++
++ /* update user_block_counts */
++ sbi->last_valid_block_count = sbi->total_valid_block_count;
++ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
++ percpu_counter_set(&sbi->rf_node_block_count, 0);
+ }
+
+ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
+@@ -1324,7 +1329,7 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+
+ if (cpc->reason & CP_UMOUNT) {
+ if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
+- NM_I(sbi)->nat_bits_blocks > sbi->blocks_per_seg) {
++ NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) {
+ clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+ f2fs_notice(sbi, "Disable nat_bits due to no space");
+ } else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
+@@ -1527,7 +1532,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ cp_ver |= ((__u64)crc32 << 32);
+ *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
+
+- blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
++ blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
+ for (i = 0; i < nm_i->nat_bits_blocks; i++)
+ f2fs_update_meta_page(sbi, nm_i->nat_bits +
+ (i << F2FS_BLKSIZE_BITS), blk + i);
+@@ -1559,11 +1564,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ start_blk += NR_CURSEG_NODE_TYPE;
+ }
+
+- /* update user_block_counts */
+- sbi->last_valid_block_count = sbi->total_valid_block_count;
+- percpu_counter_set(&sbi->alloc_valid_block_count, 0);
+- percpu_counter_set(&sbi->rf_node_block_count, 0);
+-
+ /* Here, we have one bio having CP pack except cp pack 2 page */
+ f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ /* Wait for all dirty meta pages to be submitted for IO */
+@@ -1587,8 +1587,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ */
+ if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
+ f2fs_sb_has_compression(sbi))
+- invalidate_mapping_pages(META_MAPPING(sbi),
+- MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
++ f2fs_bug_on(sbi,
++ invalidate_inode_pages2_range(META_MAPPING(sbi),
++ MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1));
+
+ f2fs_release_ino_entry(sbi, false);
+
+@@ -1730,9 +1731,9 @@ void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
+ im->ino_num = 0;
+ }
+
+- sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
++ sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS -
+ NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) *
+- F2FS_ORPHANS_PER_BLOCK;
++ F2FS_ORPHANS_PER_BLOCK;
+ }
+
+ int __init f2fs_create_checkpoint_caches(void)
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index d820801f473e56..f7ef69f44f3d84 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -198,8 +198,8 @@ static int lzo_compress_pages(struct compress_ctx *cc)
+ ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
+ &cc->clen, cc->private);
+ if (ret != LZO_E_OK) {
+- printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
+- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
++ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
++ "lzo compress failed, ret:%d", ret);
+ return -EIO;
+ }
+ return 0;
+@@ -212,17 +212,15 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic)
+ ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
+ dic->rbuf, &dic->rlen);
+ if (ret != LZO_E_OK) {
+- printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
+- KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
++ f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++ "lzo decompress failed, ret:%d", ret);
+ return -EIO;
+ }
+
+ if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
+- printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
+- "expected:%lu\n", KERN_ERR,
+- F2FS_I_SB(dic->inode)->sb->s_id,
+- dic->rlen,
+- PAGE_SIZE << dic->log_cluster_size);
++ f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++ "lzo invalid rlen:%zu, expected:%lu",
++ dic->rlen, PAGE_SIZE << dic->log_cluster_size);
+ return -EIO;
+ }
+ return 0;
+@@ -294,16 +292,15 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
+ ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
+ dic->clen, dic->rlen);
+ if (ret < 0) {
+- printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
+- KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
++ f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++ "lz4 decompress failed, ret:%d", ret);
+ return -EIO;
+ }
+
+ if (ret != PAGE_SIZE << dic->log_cluster_size) {
+- printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
+- "expected:%lu\n", KERN_ERR,
+- F2FS_I_SB(dic->inode)->sb->s_id, ret,
+- PAGE_SIZE << dic->log_cluster_size);
++ f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++ "lz4 invalid ret:%d, expected:%lu",
++ ret, PAGE_SIZE << dic->log_cluster_size);
+ return -EIO;
+ }
+ return 0;
+@@ -350,9 +347,8 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
+
+ stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
+ if (!stream) {
+- printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
+- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
+- __func__);
++ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
++ "%s zstd_init_cstream failed", __func__);
+ kvfree(workspace);
+ return -EIO;
+ }
+@@ -390,16 +386,16 @@ static int zstd_compress_pages(struct compress_ctx *cc)
+
+ ret = zstd_compress_stream(stream, &outbuf, &inbuf);
+ if (zstd_is_error(ret)) {
+- printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
+- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
++ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
++ "%s zstd_compress_stream failed, ret: %d",
+ __func__, zstd_get_error_code(ret));
+ return -EIO;
+ }
+
+ ret = zstd_end_stream(stream, &outbuf);
+ if (zstd_is_error(ret)) {
+- printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
+- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
++ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
++ "%s zstd_end_stream returned %d",
+ __func__, zstd_get_error_code(ret));
+ return -EIO;
+ }
+@@ -432,9 +428,8 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
+
+ stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
+ if (!stream) {
+- printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
+- KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
+- __func__);
++ f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++ "%s zstd_init_dstream failed", __func__);
+ kvfree(workspace);
+ return -EIO;
+ }
+@@ -469,16 +464,15 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
+
+ ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
+ if (zstd_is_error(ret)) {
+- printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
+- KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
++ f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++ "%s zstd_decompress_stream failed, ret: %d",
+ __func__, zstd_get_error_code(ret));
+ return -EIO;
+ }
+
+ if (dic->rlen != outbuf.pos) {
+- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
+- "expected:%lu\n", KERN_ERR,
+- F2FS_I_SB(dic->inode)->sb->s_id,
++ f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++ "%s ZSTD invalid rlen:%zu, expected:%lu",
+ __func__, dic->rlen,
+ PAGE_SIZE << dic->log_cluster_size);
+ return -EIO;
+@@ -512,8 +506,8 @@ static int lzorle_compress_pages(struct compress_ctx *cc)
+ ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
+ &cc->clen, cc->private);
+ if (ret != LZO_E_OK) {
+- printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
+- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
++ f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
++ "lzo-rle compress failed, ret:%d", ret);
+ return -EIO;
+ }
+ return 0;
+@@ -780,9 +774,9 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
+ if (provided != calculated) {
+ if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
+ set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
+- printk_ratelimited(
+- "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
+- KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
++ f2fs_info_ratelimited(sbi,
++ "checksum invalid, nid = %lu, %x vs %x",
++ dic->inode->i_ino,
+ provided, calculated);
+ }
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+@@ -893,14 +887,15 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
+
+ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
+ {
++#ifdef CONFIG_F2FS_CHECK_FS
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+- bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
+ int cluster_end = 0;
++ unsigned int count;
+ int i;
+ char *reason = "";
+
+- if (!compressed)
++ if (dn->data_blkaddr != COMPRESS_ADDR)
+ return false;
+
+ /* [..., COMPR_ADDR, ...] */
+@@ -909,7 +904,7 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
+ goto out;
+ }
+
+- for (i = 1; i < cluster_size; i++) {
++ for (i = 1, count = 1; i < cluster_size; i++, count++) {
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ dn->ofs_in_node + i);
+
+@@ -929,19 +924,42 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
+ goto out;
+ }
+ }
++
++ f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
++ !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
++
+ return false;
+ out:
+ f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
+ dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return true;
++#else
++ return false;
++#endif
+ }
+
+-static int __f2fs_cluster_blocks(struct inode *inode,
+- unsigned int cluster_idx, bool compr)
++static int __f2fs_get_cluster_blocks(struct inode *inode,
++ struct dnode_of_data *dn)
+ {
+- struct dnode_of_data dn;
+ unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
++ int count, i;
++
++ for (i = 0, count = 0; i < cluster_size; i++) {
++ block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
++ dn->ofs_in_node + i);
++
++ if (__is_valid_data_blkaddr(blkaddr))
++ count++;
++ }
++
++ return count;
++}
++
++static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx,
++ enum cluster_check_type type)
++{
++ struct dnode_of_data dn;
+ unsigned int start_idx = cluster_idx <<
+ F2FS_I(inode)->i_log_cluster_size;
+ int ret;
+@@ -956,31 +974,16 @@ static int __f2fs_cluster_blocks(struct inode *inode,
+
+ if (f2fs_sanity_check_cluster(&dn)) {
+ ret = -EFSCORRUPTED;
+- f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER);
+ goto fail;
+ }
+
+ if (dn.data_blkaddr == COMPRESS_ADDR) {
+- int i;
+-
+- ret = 1;
+- for (i = 1; i < cluster_size; i++) {
+- block_t blkaddr;
+-
+- blkaddr = data_blkaddr(dn.inode,
+- dn.node_page, dn.ofs_in_node + i);
+- if (compr) {
+- if (__is_valid_data_blkaddr(blkaddr))
+- ret++;
+- } else {
+- if (blkaddr != NULL_ADDR)
+- ret++;
+- }
+- }
+-
+- f2fs_bug_on(F2FS_I_SB(inode),
+- !compr && ret != cluster_size &&
+- !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
++ if (type == CLUSTER_COMPR_BLKS)
++ ret = 1 + __f2fs_get_cluster_blocks(inode, &dn);
++ else if (type == CLUSTER_IS_COMPR)
++ ret = 1;
++ } else if (type == CLUSTER_RAW_BLKS) {
++ ret = __f2fs_get_cluster_blocks(inode, &dn);
+ }
+ fail:
+ f2fs_put_dnode(&dn);
+@@ -990,15 +993,33 @@ static int __f2fs_cluster_blocks(struct inode *inode,
+ /* return # of compressed blocks in compressed cluster */
+ static int f2fs_compressed_blocks(struct compress_ctx *cc)
+ {
+- return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
++ return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx,
++ CLUSTER_COMPR_BLKS);
++}
++
++/* return # of raw blocks in non-compressed cluster */
++static int f2fs_decompressed_blocks(struct inode *inode,
++ unsigned int cluster_idx)
++{
++ return __f2fs_cluster_blocks(inode, cluster_idx,
++ CLUSTER_RAW_BLKS);
+ }
+
+-/* return # of valid blocks in compressed cluster */
++/* return whether cluster is compressed one or not */
+ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
+ {
+ return __f2fs_cluster_blocks(inode,
+ index >> F2FS_I(inode)->i_log_cluster_size,
+- false);
++ CLUSTER_IS_COMPR);
++}
++
++/* return whether cluster contains non raw blocks or not */
++bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index)
++{
++ unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size;
++
++ return f2fs_decompressed_blocks(inode, cluster_idx) !=
++ F2FS_I(inode)->i_cluster_size;
+ }
+
+ static bool cluster_may_compress(struct compress_ctx *cc)
+@@ -1029,8 +1050,10 @@ static void set_cluster_dirty(struct compress_ctx *cc)
+ int i;
+
+ for (i = 0; i < cc->cluster_size; i++)
+- if (cc->rpages[i])
++ if (cc->rpages[i]) {
+ set_page_dirty(cc->rpages[i]);
++ set_page_private_gcing(cc->rpages[i]);
++ }
+ }
+
+ static int prepare_compress_overwrite(struct compress_ctx *cc,
+@@ -1362,8 +1385,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+ add_compr_block_stat(inode, cc->valid_nr_cpages);
+
+ set_inode_flag(cc->inode, FI_APPEND_WRITE);
+- if (cc->cluster_idx == 0)
+- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+
+ f2fs_put_dnode(&dn);
+ if (quota_inode)
+@@ -1411,6 +1432,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+ struct f2fs_sb_info *sbi = bio->bi_private;
+ struct compress_io_ctx *cic =
+ (struct compress_io_ctx *)page_private(page);
++ enum count_type type = WB_DATA_TYPE(page,
++ f2fs_is_compressed_page(page));
+ int i;
+
+ if (unlikely(bio->bi_status))
+@@ -1418,7 +1441,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+
+ f2fs_compress_free_page(page);
+
+- dec_page_count(sbi, F2FS_WB_DATA);
++ dec_page_count(sbi, type);
+
+ if (atomic_dec_return(&cic->pending_pages))
+ return;
+@@ -1434,12 +1457,14 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+ }
+
+ static int f2fs_write_raw_pages(struct compress_ctx *cc,
+- int *submitted,
++ int *submitted_p,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
+ {
+ struct address_space *mapping = cc->inode->i_mapping;
+- int _submitted, compr_blocks, ret, i;
++ struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
++ int submitted, compr_blocks, i;
++ int ret = 0;
+
+ compr_blocks = f2fs_compressed_blocks(cc);
+
+@@ -1454,6 +1479,10 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
+ if (compr_blocks < 0)
+ return compr_blocks;
+
++ /* overwrite compressed cluster w/ normal cluster */
++ if (compr_blocks > 0)
++ f2fs_lock_op(sbi);
++
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (!cc->rpages[i])
+ continue;
+@@ -1478,7 +1507,7 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
+ if (!clear_page_dirty_for_io(cc->rpages[i]))
+ goto continue_unlock;
+
+- ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
++ ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
+ NULL, NULL, wbc, io_type,
+ compr_blocks, false);
+ if (ret) {
+@@ -1486,26 +1515,29 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
+ unlock_page(cc->rpages[i]);
+ ret = 0;
+ } else if (ret == -EAGAIN) {
++ ret = 0;
+ /*
+ * for quota file, just redirty left pages to
+ * avoid deadlock caused by cluster update race
+ * from foreground operation.
+ */
+ if (IS_NOQUOTA(cc->inode))
+- return 0;
+- ret = 0;
++ goto out;
+ f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+ goto retry_write;
+ }
+- return ret;
++ goto out;
+ }
+
+- *submitted += _submitted;
++ *submitted_p += submitted;
+ }
+
+- f2fs_balance_fs(F2FS_M_SB(mapping), true);
++out:
++ if (compr_blocks > 0)
++ f2fs_unlock_op(sbi);
+
+- return 0;
++ f2fs_balance_fs(sbi, true);
++ return ret;
+ }
+
+ int f2fs_write_multi_pages(struct compress_ctx *cc,
+@@ -1799,16 +1831,18 @@ void f2fs_put_page_dic(struct page *page, bool in_task)
+ * check whether cluster blocks are contiguous, and add extent cache entry
+ * only if cluster blocks are logically and physically contiguous.
+ */
+-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
++unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
++ unsigned int ofs_in_node)
+ {
+- bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
++ bool compressed = data_blkaddr(dn->inode, dn->node_page,
++ ofs_in_node) == COMPRESS_ADDR;
+ int i = compressed ? 1 : 0;
+ block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
+- dn->ofs_in_node + i);
++ ofs_in_node + i);
+
+ for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+- dn->ofs_in_node + i);
++ ofs_in_node + i);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ break;
+@@ -1976,7 +2010,7 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
+ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
+ {
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+- char slab_name[32];
++ char slab_name[35];
+
+ if (!f2fs_sb_has_compression(sbi))
+ return 0;
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 916e317ac925fc..1c59a3b2b2c348 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -48,7 +48,7 @@ void f2fs_destroy_bioset(void)
+ bioset_exit(&f2fs_bioset);
+ }
+
+-static bool __is_cp_guaranteed(struct page *page)
++bool f2fs_is_cp_guaranteed(struct page *page)
+ {
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+@@ -65,8 +65,6 @@ static bool __is_cp_guaranteed(struct page *page)
+ S_ISDIR(inode->i_mode))
+ return true;
+
+- if (f2fs_is_compressed_page(page))
+- return false;
+ if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
+ page_private_gcing(page))
+ return true;
+@@ -338,18 +336,7 @@ static void f2fs_write_end_io(struct bio *bio)
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *page = bvec->bv_page;
+- enum count_type type = WB_DATA_TYPE(page);
+-
+- if (page_private_dummy(page)) {
+- clear_page_private_dummy(page);
+- unlock_page(page);
+- mempool_free(page, sbi->write_io_dummy);
+-
+- if (unlikely(bio->bi_status))
+- f2fs_stop_checkpoint(sbi, true,
+- STOP_CP_REASON_WRITE_FAIL);
+- continue;
+- }
++ enum count_type type = WB_DATA_TYPE(page, false);
+
+ fscrypt_finalize_bounce_page(&page);
+
+@@ -524,50 +511,13 @@ void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
+ submit_bio(bio);
+ }
+
+-static void f2fs_align_write_bio(struct f2fs_sb_info *sbi, struct bio *bio)
+-{
+- unsigned int start =
+- (bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS) % F2FS_IO_SIZE(sbi);
+-
+- if (start == 0)
+- return;
+-
+- /* fill dummy pages */
+- for (; start < F2FS_IO_SIZE(sbi); start++) {
+- struct page *page =
+- mempool_alloc(sbi->write_io_dummy,
+- GFP_NOIO | __GFP_NOFAIL);
+- f2fs_bug_on(sbi, !page);
+-
+- lock_page(page);
+-
+- zero_user_segment(page, 0, PAGE_SIZE);
+- set_page_private_dummy(page);
+-
+- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
+- f2fs_bug_on(sbi, 1);
+- }
+-}
+-
+ static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
+ enum page_type type)
+ {
+ WARN_ON_ONCE(is_read_io(bio_op(bio)));
+
+- if (type == DATA || type == NODE) {
+- if (f2fs_lfs_mode(sbi) && current->plug)
+- blk_finish_plug(current->plug);
+-
+- if (F2FS_IO_ALIGNED(sbi)) {
+- f2fs_align_write_bio(sbi, bio);
+- /*
+- * In the NODE case, we lose next block address chain.
+- * So, we need to do checkpoint in f2fs_sync_file.
+- */
+- if (type == NODE)
+- set_sbi_flag(sbi, SBI_NEED_CP);
+- }
+- }
++ if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
++ blk_finish_plug(current->plug);
+
+ trace_f2fs_submit_write_bio(sbi->sb, type, bio);
+ iostat_update_submit_ctx(bio, type);
+@@ -762,7 +712,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
+ wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+
+ inc_page_count(fio->sbi, is_read_io(fio->op) ?
+- __read_io_type(page) : WB_DATA_TYPE(fio->page));
++ __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
+
+ if (is_read_io(bio_op(bio)))
+ f2fs_submit_read_bio(fio->sbi, bio, fio->type);
+@@ -796,16 +746,6 @@ static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
+ block_t last_blkaddr,
+ block_t cur_blkaddr)
+ {
+- if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
+- unsigned int filled_blocks =
+- F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
+- unsigned int io_size = F2FS_IO_SIZE(sbi);
+- unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
+-
+- /* IOs in bio is aligned and left space of vectors is not enough */
+- if (!(filled_blocks % io_size) && left_vecs < io_size)
+- return false;
+- }
+ if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
+ return false;
+ return io_type_is_mergeable(io, fio);
+@@ -973,7 +913,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
+ if (fio->io_wbc)
+ wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+
+- inc_page_count(fio->sbi, WB_DATA_TYPE(page));
++ inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
+
+ *fio->last_block = fio->new_blkaddr;
+ *fio->bio = bio;
+@@ -1007,11 +947,12 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
+ struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
+ struct page *bio_page;
++ enum count_type type;
+
+ f2fs_bug_on(sbi, is_read_io(fio->op));
+
+ f2fs_down_write(&io->io_rwsem);
+-
++next:
+ #ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
+ wait_for_completion_io(&io->zone_wait);
+@@ -1021,7 +962,6 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ }
+ #endif
+
+-next:
+ if (fio->in_list) {
+ spin_lock(&io->io_lock);
+ if (list_empty(&io->io_list)) {
+@@ -1046,7 +986,8 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ /* set submitted = true as a return value */
+ fio->submitted = 1;
+
+- inc_page_count(sbi, WB_DATA_TYPE(bio_page));
++ type = WB_DATA_TYPE(bio_page, fio->compressed_page);
++ inc_page_count(sbi, type);
+
+ if (io->bio &&
+ (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
+@@ -1056,13 +997,6 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ __submit_merged_bio(io);
+ alloc_new:
+ if (io->bio == NULL) {
+- if (F2FS_IO_ALIGNED(sbi) &&
+- (fio->type == DATA || fio->type == NODE) &&
+- fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
+- dec_page_count(sbi, WB_DATA_TYPE(bio_page));
+- fio->retry = 1;
+- goto skip;
+- }
+ io->bio = __bio_alloc(fio, BIO_MAX_VECS);
+ f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
+ bio_page->index, fio, GFP_NOIO);
+@@ -1080,10 +1014,6 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ io->last_block_in_bio = fio->new_blkaddr;
+
+ trace_f2fs_submit_page_write(fio->page, fio);
+-skip:
+- if (fio->in_list)
+- goto next;
+-out:
+ #ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
+ is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
+@@ -1096,6 +1026,9 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
+ __submit_merged_bio(io);
+ }
+ #endif
++ if (fio->in_list)
++ goto next;
++out:
+ if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
+ !f2fs_is_checkpoint_ready(sbi))
+ __submit_merged_bio(io);
+@@ -1179,18 +1112,12 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
+ return 0;
+ }
+
+-static void __set_data_blkaddr(struct dnode_of_data *dn)
++static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+ {
+- struct f2fs_node *rn = F2FS_NODE(dn->node_page);
+- __le32 *addr_array;
+- int base = 0;
+-
+- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+- base = get_extra_isize(dn->inode);
++ __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
+
+- /* Get physical address of data block */
+- addr_array = blkaddr_in_node(rn);
+- addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
++ dn->data_blkaddr = blkaddr;
++ addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+ }
+
+ /*
+@@ -1199,18 +1126,17 @@ static void __set_data_blkaddr(struct dnode_of_data *dn)
+ * ->node_page
+ * update block addresses in the node page
+ */
+-void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
++void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+ {
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+- __set_data_blkaddr(dn);
++ __set_data_blkaddr(dn, blkaddr);
+ if (set_page_dirty(dn->node_page))
+ dn->node_changed = true;
+ }
+
+ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+ {
+- dn->data_blkaddr = blkaddr;
+- f2fs_set_data_blkaddr(dn);
++ f2fs_set_data_blkaddr(dn, blkaddr);
+ f2fs_update_read_extent_cache(dn);
+ }
+
+@@ -1225,7 +1151,8 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
+
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
+ return -EPERM;
+- if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
++ err = inc_valid_block_count(sbi, dn->inode, &count, true);
++ if (unlikely(err))
+ return err;
+
+ trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
+@@ -1237,8 +1164,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
+ block_t blkaddr = f2fs_data_blkaddr(dn);
+
+ if (blkaddr == NULL_ADDR) {
+- dn->data_blkaddr = NEW_ADDR;
+- __set_data_blkaddr(dn);
++ __set_data_blkaddr(dn, NEW_ADDR);
+ count--;
+ }
+ }
+@@ -1483,7 +1409,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
+
+ dn->data_blkaddr = f2fs_data_blkaddr(dn);
+ if (dn->data_blkaddr == NULL_ADDR) {
+- err = inc_valid_block_count(sbi, dn->inode, &count);
++ err = inc_valid_block_count(sbi, dn->inode, &count, true);
+ if (unlikely(err))
+ return err;
+ }
+@@ -1492,11 +1418,9 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
+ old_blkaddr = dn->data_blkaddr;
+ f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
+ &sum, seg_type, NULL);
+- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
+- invalidate_mapping_pages(META_MAPPING(sbi),
+- old_blkaddr, old_blkaddr);
+- f2fs_invalidate_compress_page(sbi, old_blkaddr);
+- }
++ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
++ f2fs_invalidate_internal_cache(sbi, old_blkaddr);
++
+ f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
+ return 0;
+ }
+@@ -1690,9 +1614,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
+ map->m_flags |= F2FS_MAP_NEW;
+ } else if (is_hole) {
+ if (f2fs_compressed_file(inode) &&
+- f2fs_sanity_check_cluster(&dn) &&
+- (flag != F2FS_GET_BLOCK_FIEMAP ||
+- IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
++ f2fs_sanity_check_cluster(&dn)) {
+ err = -EFSCORRUPTED;
+ f2fs_handle_error(sbi,
+ ERROR_CORRUPTED_CLUSTER);
+@@ -2344,8 +2266,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ f2fs_wait_on_block_writeback(inode, blkaddr);
+
+ if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+- if (atomic_dec_and_test(&dic->remaining_pages))
++ if (atomic_dec_and_test(&dic->remaining_pages)) {
+ f2fs_decompress_cluster(dic, true);
++ break;
++ }
+ continue;
+ }
+
+@@ -2566,9 +2490,6 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
+
+ page = fio->compressed_page ? fio->compressed_page : fio->page;
+
+- /* wait for GCed page writeback via META_MAPPING */
+- f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
+-
+ if (fscrypt_inode_uses_inline_crypto(inode))
+ return 0;
+
+@@ -2663,7 +2584,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
+ return true;
+ if (IS_NOQUOTA(inode))
+ return true;
+- if (f2fs_is_atomic_file(inode))
++ if (f2fs_used_in_atomic_write(inode))
+ return true;
+
+ /* swap file is migrating in aligned write mode */
+@@ -2676,8 +2597,6 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
+ if (fio) {
+ if (page_private_gcing(fio->page))
+ return true;
+- if (page_private_dummy(fio->page))
+- return true;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
+ return true;
+@@ -2702,10 +2621,13 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
+ struct dnode_of_data dn;
+ struct node_info ni;
+ bool ipu_force = false;
++ bool atomic_commit;
+ int err = 0;
+
+ /* Use COW inode to make dnode_of_data for atomic write */
+- if (f2fs_is_atomic_file(inode))
++ atomic_commit = f2fs_is_atomic_file(inode) &&
++ page_private_atomic(fio->page);
++ if (atomic_commit)
+ set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
+ else
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+@@ -2750,6 +2672,10 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
+ goto out_writepage;
+ }
+
++ /* wait for GCed page writeback via META_MAPPING */
++ if (fio->meta_gc)
++ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
++
+ /*
+ * If current allocation needs SSR,
+ * it had better in-place writes for updated data.
+@@ -2805,8 +2731,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
+ f2fs_outplace_write_data(&dn, fio);
+ trace_f2fs_do_write_data_page(page, OPU);
+ set_inode_flag(inode, FI_APPEND_WRITE);
+- if (page->index == 0)
+- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
++ if (atomic_commit)
++ clear_page_private_atomic(page);
+ out_writepage:
+ f2fs_put_dnode(&dn);
+ out:
+@@ -2844,8 +2770,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ .encrypted_page = NULL,
+ .submitted = 0,
+ .compr_blocks = compr_blocks,
+- .need_lock = LOCK_RETRY,
+- .post_read = f2fs_post_read_required(inode) ? 1 : 0,
++ .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
++ .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0,
+ .io_type = io_type,
+ .io_wbc = wbc,
+ .bio = bio,
+@@ -2889,9 +2815,6 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+
+ zero_user_segment(page, offset, PAGE_SIZE);
+ write:
+- if (f2fs_is_drop_cache(inode))
+- goto out;
+-
+ /* Dentry/quota blocks are controlled by checkpoint */
+ if (S_ISDIR(inode->i_mode) || quota_inode) {
+ /*
+@@ -2928,6 +2851,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ if (err == -EAGAIN) {
+ err = f2fs_do_write_data_page(&fio);
+ if (err == -EAGAIN) {
++ f2fs_bug_on(sbi, compr_blocks);
+ fio.need_lock = LOCK_REQ;
+ err = f2fs_do_write_data_page(&fio);
+ }
+@@ -3023,7 +2947,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ {
+ int ret = 0;
+ int done = 0, retry = 0;
+- struct page *pages[F2FS_ONSTACK_PAGES];
++ struct page *pages_local[F2FS_ONSTACK_PAGES];
++ struct page **pages = pages_local;
+ struct folio_batch fbatch;
+ struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct bio *bio = NULL;
+@@ -3047,6 +2972,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ #endif
+ int nr_folios, p, idx;
+ int nr_pages;
++ unsigned int max_pages = F2FS_ONSTACK_PAGES;
+ pgoff_t index;
+ pgoff_t end; /* Inclusive */
+ pgoff_t done_index;
+@@ -3056,6 +2982,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ int submitted = 0;
+ int i;
+
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++ if (f2fs_compressed_file(inode) &&
++ 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
++ pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
++ cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
++ max_pages = 1 << cc.log_cluster_size;
++ }
++#endif
++
+ folio_batch_init(&fbatch);
+
+ if (get_dirty_pages(mapping->host) <=
+@@ -3101,7 +3036,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ add_more:
+ pages[nr_pages] = folio_page(folio, idx);
+ folio_get(folio);
+- if (++nr_pages == F2FS_ONSTACK_PAGES) {
++ if (++nr_pages == max_pages) {
+ index = folio->index + idx + 1;
+ folio_batch_release(&fbatch);
+ goto write;
+@@ -3283,6 +3218,11 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+ if (bio)
+ f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
+
++#ifdef CONFIG_F2FS_FS_COMPRESSION
++ if (pages != pages_local)
++ kfree(pages);
++#endif
++
+ return ret;
+ }
+
+@@ -3763,6 +3703,9 @@ static int f2fs_write_end(struct file *file,
+
+ set_page_dirty(page);
+
++ if (f2fs_is_atomic_file(inode))
++ set_page_private_atomic(page);
++
+ if (pos + copied > i_size_read(inode) &&
+ !f2fs_verity_in_progress(inode)) {
+ f2fs_i_size_write(inode, pos + copied);
+@@ -3899,25 +3842,34 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
+ unsigned int blkofs;
+ unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
+ unsigned int secidx = start_blk / blk_per_sec;
+- unsigned int end_sec = secidx + blkcnt / blk_per_sec;
++ unsigned int end_sec;
+ int ret = 0;
+
++ if (!blkcnt)
++ return 0;
++ end_sec = secidx + (blkcnt - 1) / blk_per_sec;
++
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ filemap_invalidate_lock(inode->i_mapping);
+
+ set_inode_flag(inode, FI_ALIGNED_WRITE);
+ set_inode_flag(inode, FI_OPU_WRITE);
+
+- for (; secidx < end_sec; secidx++) {
++ for (; secidx <= end_sec; secidx++) {
++ unsigned int blkofs_end = secidx == end_sec ?
++ (blkcnt - 1) % blk_per_sec : blk_per_sec - 1;
++
+ f2fs_down_write(&sbi->pin_sem);
+
+- f2fs_lock_op(sbi);
+- f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
+- f2fs_unlock_op(sbi);
++ ret = f2fs_allocate_pinning_section(sbi);
++ if (ret) {
++ f2fs_up_write(&sbi->pin_sem);
++ break;
++ }
+
+ set_inode_flag(inode, FI_SKIP_WRITES);
+
+- for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
++ for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
+ struct page *page;
+ unsigned int blkidx = secidx * blk_per_sec + blkofs;
+
+@@ -3959,15 +3911,14 @@ static int check_swap_activate(struct swap_info_struct *sis,
+ struct address_space *mapping = swap_file->f_mapping;
+ struct inode *inode = mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- sector_t cur_lblock;
+- sector_t last_lblock;
+- sector_t pblock;
+- sector_t lowest_pblock = -1;
+- sector_t highest_pblock = 0;
++ block_t cur_lblock;
++ block_t last_lblock;
++ block_t pblock;
++ block_t lowest_pblock = -1;
++ block_t highest_pblock = 0;
+ int nr_extents = 0;
+- unsigned long nr_pblocks;
++ unsigned int nr_pblocks;
+ unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
+- unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
+ unsigned int not_aligned = 0;
+ int ret = 0;
+
+@@ -4005,28 +3956,35 @@ static int check_swap_activate(struct swap_info_struct *sis,
+ pblock = map.m_pblk;
+ nr_pblocks = map.m_len;
+
+- if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
+- nr_pblocks & sec_blks_mask) {
++ if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
++ nr_pblocks % blks_per_sec ||
++ !f2fs_valid_pinned_area(sbi, pblock)) {
++ bool last_extent = false;
++
+ not_aligned++;
+
+ nr_pblocks = roundup(nr_pblocks, blks_per_sec);
+ if (cur_lblock + nr_pblocks > sis->max)
+ nr_pblocks -= blks_per_sec;
+
++ /* this extent is last one */
+ if (!nr_pblocks) {
+- /* this extent is last one */
+- nr_pblocks = map.m_len;
+- f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
+- goto next;
++ nr_pblocks = last_lblock - cur_lblock;
++ last_extent = true;
+ }
+
+ ret = f2fs_migrate_blocks(inode, cur_lblock,
+ nr_pblocks);
+- if (ret)
++ if (ret) {
++ if (ret == -ENOENT)
++ ret = -EINVAL;
+ goto out;
+- goto retry;
++ }
++
++ if (!last_extent)
++ goto retry;
+ }
+-next:
++
+ if (cur_lblock + nr_pblocks >= sis->max)
+ nr_pblocks = sis->max - cur_lblock;
+
+@@ -4064,17 +4022,17 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ sector_t *span)
+ {
+ struct inode *inode = file_inode(file);
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int ret;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+- if (f2fs_readonly(F2FS_I_SB(inode)->sb))
++ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+- if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
+- f2fs_err(F2FS_I_SB(inode),
+- "Swapfile not supported in LFS mode");
++ if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) {
++ f2fs_err(sbi, "Swapfile not supported in LFS mode");
+ return -EINVAL;
+ }
+
+@@ -4087,13 +4045,17 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
+
+ f2fs_precache_extents(inode);
+
++ ret = filemap_fdatawrite(inode->i_mapping);
++ if (ret < 0)
++ return ret;
++
+ ret = check_swap_activate(sis, file, span);
+ if (ret < 0)
+ return ret;
+
+ stat_inc_swapfile_inode(inode);
+ set_inode_flag(inode, FI_PIN_FILE);
+- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
++ f2fs_update_time(sbi, REQ_TIME);
+ return ret;
+ }
+
+@@ -4237,7 +4199,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
+ return -EINVAL;
+
+- if (map.m_pblk != NULL_ADDR) {
++ if (map.m_flags & F2FS_MAP_MAPPED) {
+ iomap->length = blks_to_bytes(inode, map.m_len);
+ iomap->type = IOMAP_MAPPED;
+ iomap->flags |= IOMAP_F_MERGED;
+diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
+index fdbf994f12718c..0d02224b99b727 100644
+--- a/fs/f2fs/debug.c
++++ b/fs/f2fs/debug.c
+@@ -41,7 +41,7 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi)
+ total_vblocks = 0;
+ blks_per_sec = CAP_BLKS_PER_SEC(sbi);
+ hblks_per_sec = blks_per_sec / 2;
+- for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
++ for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
+ vblocks = get_valid_blocks(sbi, segno, true);
+ dist = abs(vblocks - hblks_per_sec);
+ bimodal += dist * dist;
+@@ -135,7 +135,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
+ si->cur_ckpt_time = sbi->cprc_info.cur_time;
+ si->peak_ckpt_time = sbi->cprc_info.peak_time;
+ spin_unlock(&sbi->cprc_info.stat_lock);
+- si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
++ si->total_count = (int)sbi->user_block_count / BLKS_PER_SEG(sbi);
+ si->rsvd_segs = reserved_segments(sbi);
+ si->overp_segs = overprovision_segments(sbi);
+ si->valid_count = valid_user_blocks(sbi);
+@@ -208,7 +208,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
+ if (!blks)
+ continue;
+
+- if (blks == sbi->blocks_per_seg)
++ if (blks == BLKS_PER_SEG(sbi))
+ si->full_seg[type]++;
+ else
+ si->dirty_seg[type]++;
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 8aa29fe2e87b8a..166ec8942595e1 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -157,7 +157,8 @@ static unsigned long dir_block_index(unsigned int level,
+ unsigned long bidx = 0;
+
+ for (i = 0; i < level; i++)
+- bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
++ bidx += mul_u32_u32(dir_buckets(i, dir_level),
++ bucket_blocks(i));
+ bidx += idx * bucket_blocks(level);
+ return bidx;
+ }
+@@ -830,13 +831,14 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
+ return err;
+ }
+
+-int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
++int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
++ struct f2fs_filename *fname)
+ {
+ struct page *page;
+ int err = 0;
+
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+- page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
++ page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto fail;
+@@ -995,9 +997,8 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ de = &d->dentry[bit_pos];
+ if (de->name_len == 0) {
+ if (found_valid_dirent || !bit_pos) {
+- printk_ratelimited(
+- "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
+- KERN_WARNING, sbi->sb->s_id,
++ f2fs_warn_ratelimited(sbi,
++ "invalid namelen(0), ino:%u, run fsck to fix.",
+ le32_to_cpu(de->ino));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ }
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 0e2d49140c07f1..d6fb053b6dfbbe 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -19,34 +19,24 @@
+ #include "node.h"
+ #include <trace/events/f2fs.h>
+
+-bool sanity_check_extent_cache(struct inode *inode)
++bool sanity_check_extent_cache(struct inode *inode, struct page *ipage)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct f2fs_inode_info *fi = F2FS_I(inode);
+- struct extent_tree *et = fi->extent_tree[EX_READ];
+- struct extent_info *ei;
+-
+- if (!et)
+- return true;
++ struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
++ struct extent_info ei;
+
+- ei = &et->largest;
+- if (!ei->len)
+- return true;
++ get_read_extent_info(&ei, i_ext);
+
+- /* Let's drop, if checkpoint got corrupted. */
+- if (is_set_ckpt_flags(sbi, CP_ERROR_FLAG)) {
+- ei->len = 0;
+- et->largest_updated = true;
++ if (!ei.len)
+ return true;
+- }
+
+- if (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE) ||
+- !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
++ if (!f2fs_is_valid_blkaddr(sbi, ei.blk, DATA_GENERIC_ENHANCE) ||
++ !f2fs_is_valid_blkaddr(sbi, ei.blk + ei.len - 1,
+ DATA_GENERIC_ENHANCE)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
+ __func__, inode->i_ino,
+- ei->blk, ei->fofs, ei->len);
++ ei.blk, ei.fofs, ei.len);
+ return false;
+ }
+ return true;
+@@ -74,40 +64,14 @@ static void __set_extent_info(struct extent_info *ei,
+ }
+ }
+
+-static bool __may_read_extent_tree(struct inode *inode)
+-{
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+- if (!test_opt(sbi, READ_EXTENT_CACHE))
+- return false;
+- if (is_inode_flag_set(inode, FI_NO_EXTENT))
+- return false;
+- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+- !f2fs_sb_has_readonly(sbi))
+- return false;
+- return S_ISREG(inode->i_mode);
+-}
+-
+-static bool __may_age_extent_tree(struct inode *inode)
+-{
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+- if (!test_opt(sbi, AGE_EXTENT_CACHE))
+- return false;
+- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+- return false;
+- if (file_is_cold(inode))
+- return false;
+-
+- return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
+-}
+-
+ static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ if (type == EX_READ)
+- return __may_read_extent_tree(inode);
+- else if (type == EX_BLOCK_AGE)
+- return __may_age_extent_tree(inode);
++ return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
++ S_ISREG(inode->i_mode);
++ if (type == EX_BLOCK_AGE)
++ return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
++ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
+ return false;
+ }
+
+@@ -120,7 +84,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
+ if (list_empty(&F2FS_I_SB(inode)->s_list))
+ return false;
+
+- return __init_may_extent_tree(inode, type);
++ if (!__init_may_extent_tree(inode, type))
++ return false;
++
++ if (type == EX_READ) {
++ if (is_inode_flag_set(inode, FI_NO_EXTENT))
++ return false;
++ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
++ !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
++ return false;
++ } else if (type == EX_BLOCK_AGE) {
++ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
++ return false;
++ if (file_is_cold(inode))
++ return false;
++ }
++ return true;
+ }
+
+ static void __try_update_largest_extent(struct extent_tree *et,
+@@ -388,7 +367,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+ static void __drop_largest_extent(struct extent_tree *et,
+ pgoff_t fofs, unsigned int len)
+ {
+- if (fofs < et->largest.fofs + et->largest.len &&
++ if (fofs < (pgoff_t)et->largest.fofs + et->largest.len &&
+ fofs + len > et->largest.fofs) {
+ et->largest.len = 0;
+ et->largest_updated = true;
+@@ -406,24 +385,22 @@ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
+
+ if (!__may_extent_tree(inode, EX_READ)) {
+ /* drop largest read extent */
+- if (i_ext && i_ext->len) {
++ if (i_ext->len) {
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+ i_ext->len = 0;
+ set_page_dirty(ipage);
+ }
+- goto out;
++ set_inode_flag(inode, FI_NO_EXTENT);
++ return;
+ }
+
+ et = __grab_extent_tree(inode, EX_READ);
+
+- if (!i_ext || !i_ext->len)
+- goto out;
+-
+ get_read_extent_info(&ei, i_ext);
+
+ write_lock(&et->lock);
+- if (atomic_read(&et->node_cnt))
+- goto unlock_out;
++ if (atomic_read(&et->node_cnt) || !ei.len)
++ goto skip;
+
+ en = __attach_extent_node(sbi, et, &ei, NULL,
+ &et->root.rb_root.rb_node, true);
+@@ -435,11 +412,13 @@ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
+ list_add_tail(&en->list, &eti->extent_list);
+ spin_unlock(&eti->extent_lock);
+ }
+-unlock_out:
++skip:
++ /* Let's drop, if checkpoint got corrupted. */
++ if (f2fs_cp_error(sbi)) {
++ et->largest.len = 0;
++ et->largest_updated = true;
++ }
+ write_unlock(&et->lock);
+-out:
+- if (!F2FS_I(inode)->extent_tree[EX_READ])
+- set_inode_flag(inode, FI_NO_EXTENT);
+ }
+
+ void f2fs_init_age_extent_tree(struct inode *inode)
+@@ -478,7 +457,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+
+ if (type == EX_READ &&
+ et->largest.fofs <= pgofs &&
+- et->largest.fofs + et->largest.len > pgofs) {
++ (pgoff_t)et->largest.fofs + et->largest.len > pgofs) {
+ *ei = et->largest;
+ ret = true;
+ stat_inc_largest_node_hit(sbi);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 6d688e42d89c59..7faf9446ea5dcb 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -69,12 +69,17 @@ enum {
+
+ struct f2fs_fault_info {
+ atomic_t inject_ops;
+- unsigned int inject_rate;
++ int inject_rate;
+ unsigned int inject_type;
+ };
+
+ extern const char *f2fs_fault_name[FAULT_MAX];
+ #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
++
++/* maximum retry count for injected failure */
++#define DEFAULT_FAILURE_RETRY_COUNT 8
++#else
++#define DEFAULT_FAILURE_RETRY_COUNT 1
+ #endif
+
+ /*
+@@ -142,7 +147,6 @@ struct f2fs_rwsem {
+
+ struct f2fs_mount_info {
+ unsigned int opt;
+- int write_io_size_bits; /* Write IO size bits */
+ block_t root_reserved_blocks; /* root reserved blocks */
+ kuid_t s_resuid; /* reserved blocks for uid */
+ kgid_t s_resgid; /* reserved blocks for gid */
+@@ -278,6 +282,7 @@ enum {
+ APPEND_INO, /* for append ino list */
+ UPDATE_INO, /* for update ino list */
+ TRANS_DIR_INO, /* for transactions dir ino list */
++ XATTR_DIR_INO, /* for xattr updated dir ino list */
+ FLUSH_INO, /* for multiple device flushing */
+ MAX_INO_ENTRY, /* max. list */
+ };
+@@ -774,10 +779,7 @@ enum {
+ FI_UPDATE_WRITE, /* inode has in-place-update data */
+ FI_NEED_IPU, /* used for ipu per file */
+ FI_ATOMIC_FILE, /* indicate atomic file */
+- FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
+- FI_DROP_CACHE, /* drop dirty page cache */
+ FI_DATA_EXIST, /* indicate data exists */
+- FI_INLINE_DOTS, /* indicate inline dot dentries */
+ FI_SKIP_WRITES, /* should skip data page writeback */
+ FI_OPU_WRITE, /* used for opu per file */
+ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
+@@ -795,7 +797,9 @@ enum {
+ FI_ALIGNED_WRITE, /* enable aligned write */
+ FI_COW_FILE, /* indicate COW file */
+ FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
++ FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
+ FI_ATOMIC_REPLACE, /* indicate atomic replace */
++ FI_OPENED_FILE, /* indicate file has been opened */
+ FI_MAX, /* max flag, never be used */
+ };
+
+@@ -824,7 +828,7 @@ struct f2fs_inode_info {
+ spinlock_t i_size_lock; /* protect last_disk_size */
+
+ #ifdef CONFIG_QUOTA
+- struct dquot *i_dquot[MAXQUOTAS];
++ struct dquot __rcu *i_dquot[MAXQUOTAS];
+
+ /* quota space reservation, managed internally by quota code */
+ qsize_t i_reserved_quota;
+@@ -834,7 +838,11 @@ struct f2fs_inode_info {
+ struct task_struct *atomic_write_task; /* store atomic write task */
+ struct extent_tree *extent_tree[NR_EXTENT_CACHES];
+ /* cached extent_tree entry */
+- struct inode *cow_inode; /* copy-on-write inode for atomic write */
++ union {
++ struct inode *cow_inode; /* copy-on-write inode for atomic write */
++ struct inode *atomic_inode;
++ /* point to atomic_inode, available only for cow_inode */
++ };
+
+ /* avoid racing between foreground op and gc */
+ struct f2fs_rwsem i_gc_rwsem[2];
+@@ -1075,7 +1083,8 @@ struct f2fs_sm_info {
+ * f2fs monitors the number of several block types such as on-writeback,
+ * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
+ */
+-#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
++#define WB_DATA_TYPE(p, f) \
++ (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
+ enum count_type {
+ F2FS_DIRTY_DENTS,
+ F2FS_DIRTY_DATA,
+@@ -1105,6 +1114,7 @@ enum count_type {
+ * ... Only can be used with META.
+ */
+ #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
++#define PAGE_TYPE_ON_MAIN(type) ((type) == DATA || (type) == NODE)
+ enum page_type {
+ DATA = 0,
+ NODE = 1, /* should not change this */
+@@ -1140,6 +1150,7 @@ enum cp_reason_type {
+ CP_FASTBOOT_MODE,
+ CP_SPEC_LOG_NUM,
+ CP_RECOVER_DIR,
++ CP_XATTR_DIR,
+ };
+
+ enum iostat_type {
+@@ -1199,9 +1210,8 @@ struct f2fs_io_info {
+ unsigned int submitted:1; /* indicate IO submission */
+ unsigned int in_list:1; /* indicate fio is in io_list */
+ unsigned int is_por:1; /* indicate IO is from recovery or not */
+- unsigned int retry:1; /* need to reallocate block address */
+ unsigned int encrypted:1; /* indicate file is encrypted */
+- unsigned int post_read:1; /* require post read */
++ unsigned int meta_gc:1; /* require meta inode GC */
+ enum iostat_type io_type; /* io type */
+ struct writeback_control *io_wbc; /* writeback control */
+ struct bio **bio; /* bio for ipu */
+@@ -1400,10 +1410,10 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
+ * Layout A: lowest bit should be 1
+ * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
+ * bit 0 PAGE_PRIVATE_NOT_POINTER
+- * bit 1 PAGE_PRIVATE_DUMMY_WRITE
+- * bit 2 PAGE_PRIVATE_ONGOING_MIGRATION
+- * bit 3 PAGE_PRIVATE_INLINE_INODE
+- * bit 4 PAGE_PRIVATE_REF_RESOURCE
++ * bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
++ * bit 2 PAGE_PRIVATE_INLINE_INODE
++ * bit 3 PAGE_PRIVATE_REF_RESOURCE
++ * bit 4 PAGE_PRIVATE_ATOMIC_WRITE
+ * bit 5- f2fs private data
+ *
+ * Layout B: lowest bit should be 0
+@@ -1411,10 +1421,10 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
+ */
+ enum {
+ PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
+- PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
+ PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
+ PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
+ PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
++ PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
+ PAGE_PRIVATE_MAX
+ };
+
+@@ -1558,7 +1568,6 @@ struct f2fs_sb_info {
+ struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
+ /* keep migration IO order for LFS mode */
+ struct f2fs_rwsem io_order_lock;
+- mempool_t *write_io_dummy; /* Dummy pages */
+ pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */
+ int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */
+
+@@ -1804,6 +1813,35 @@ struct f2fs_sb_info {
+ #endif
+ };
+
++/* Definitions to access f2fs_sb_info */
++#define BLKS_PER_SEG(sbi) \
++ ((sbi)->blocks_per_seg)
++#define BLKS_PER_SEC(sbi) \
++ ((sbi)->segs_per_sec << (sbi)->log_blocks_per_seg)
++#define SEGS_PER_SEC(sbi) \
++ ((sbi)->segs_per_sec)
++
++__printf(3, 4)
++void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...);
++
++#define f2fs_err(sbi, fmt, ...) \
++ f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__)
++#define f2fs_warn(sbi, fmt, ...) \
++ f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__)
++#define f2fs_notice(sbi, fmt, ...) \
++ f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__)
++#define f2fs_info(sbi, fmt, ...) \
++ f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__)
++#define f2fs_debug(sbi, fmt, ...) \
++ f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__)
++
++#define f2fs_err_ratelimited(sbi, fmt, ...) \
++ f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__)
++#define f2fs_warn_ratelimited(sbi, fmt, ...) \
++ f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__)
++#define f2fs_info_ratelimited(sbi, fmt, ...) \
++ f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__)
++
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+ #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \
+ __builtin_return_address(0))
+@@ -1821,9 +1859,8 @@ static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
+ atomic_inc(&ffi->inject_ops);
+ if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
+ atomic_set(&ffi->inject_ops, 0);
+- printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",
+- KERN_INFO, sbi->sb->s_id, f2fs_fault_name[type],
+- func, parent_func);
++ f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
++ f2fs_fault_name[type], func, parent_func);
+ return true;
+ }
+ return false;
+@@ -2243,11 +2280,32 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
+ return false;
+ }
+
++static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
++ struct inode *inode, bool cap)
++{
++ block_t avail_user_block_count;
++
++ avail_user_block_count = sbi->user_block_count -
++ sbi->current_reserved_blocks;
++
++ if (!__allow_reserved_blocks(sbi, inode, cap))
++ avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
++
++ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
++ if (avail_user_block_count > sbi->unusable_block_count)
++ avail_user_block_count -= sbi->unusable_block_count;
++ else
++ avail_user_block_count = 0;
++ }
++
++ return avail_user_block_count;
++}
++
+ static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
+ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+- struct inode *inode, blkcnt_t *count)
++ struct inode *inode, blkcnt_t *count, bool partial)
+ {
+- blkcnt_t diff = 0, release = 0;
++ long long diff = 0, release = 0;
+ block_t avail_user_block_count;
+ int ret;
+
+@@ -2267,35 +2325,27 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+ percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
+
+ spin_lock(&sbi->stat_lock);
+- sbi->total_valid_block_count += (block_t)(*count);
+- avail_user_block_count = sbi->user_block_count -
+- sbi->current_reserved_blocks;
+-
+- if (!__allow_reserved_blocks(sbi, inode, true))
+- avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
+-
+- if (F2FS_IO_ALIGNED(sbi))
+- avail_user_block_count -= sbi->blocks_per_seg *
+- SM_I(sbi)->additional_reserved_segments;
+
+- if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+- if (avail_user_block_count > sbi->unusable_block_count)
+- avail_user_block_count -= sbi->unusable_block_count;
+- else
+- avail_user_block_count = 0;
+- }
+- if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
+- diff = sbi->total_valid_block_count - avail_user_block_count;
++ avail_user_block_count = get_available_block_count(sbi, inode, true);
++ diff = (long long)sbi->total_valid_block_count + *count -
++ avail_user_block_count;
++ if (unlikely(diff > 0)) {
++ if (!partial) {
++ spin_unlock(&sbi->stat_lock);
++ release = *count;
++ goto enospc;
++ }
+ if (diff > *count)
+ diff = *count;
+ *count -= diff;
+ release = diff;
+- sbi->total_valid_block_count -= diff;
+ if (!*count) {
+ spin_unlock(&sbi->stat_lock);
+ goto enospc;
+ }
+ }
++ sbi->total_valid_block_count += (block_t)(*count);
++
+ spin_unlock(&sbi->stat_lock);
+
+ if (unlikely(release)) {
+@@ -2312,20 +2362,6 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+ return -ENOSPC;
+ }
+
+-__printf(2, 3)
+-void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
+-
+-#define f2fs_err(sbi, fmt, ...) \
+- f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
+-#define f2fs_warn(sbi, fmt, ...) \
+- f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
+-#define f2fs_notice(sbi, fmt, ...) \
+- f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
+-#define f2fs_info(sbi, fmt, ...) \
+- f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
+-#define f2fs_debug(sbi, fmt, ...) \
+- f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
+-
+ #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
+ static inline bool page_private_##name(struct page *page) \
+ { \
+@@ -2354,17 +2390,17 @@ static inline void clear_page_private_##name(struct page *page) \
+ PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
+ PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
+ PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
+-PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
++PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
+
+ PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
+ PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
+ PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
+-PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
++PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
+
+ PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
+ PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
+ PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
+-PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
++PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
+
+ static inline unsigned long get_page_private_data(struct page *page)
+ {
+@@ -2396,6 +2432,7 @@ static inline void clear_page_private_all(struct page *page)
+ clear_page_private_reference(page);
+ clear_page_private_gcing(page);
+ clear_page_private_inline(page);
++ clear_page_private_atomic(page);
+
+ f2fs_bug_on(F2FS_P_SB(page), page_private(page));
+ }
+@@ -2498,11 +2535,8 @@ static inline int get_dirty_pages(struct inode *inode)
+
+ static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
+ {
+- unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
+- unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
+- sbi->log_blocks_per_seg;
+-
+- return segs / sbi->segs_per_sec;
++ return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1,
++ BLKS_PER_SEC(sbi));
+ }
+
+ static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
+@@ -2566,7 +2600,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+
+ if (sbi->cur_cp_pack == 2)
+- start_addr += sbi->blocks_per_seg;
++ start_addr += BLKS_PER_SEG(sbi);
+ return start_addr;
+ }
+
+@@ -2575,7 +2609,7 @@ static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+
+ if (sbi->cur_cp_pack == 1)
+- start_addr += sbi->blocks_per_seg;
++ start_addr += BLKS_PER_SEG(sbi);
+ return start_addr;
+ }
+
+@@ -2594,7 +2628,8 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
+ struct inode *inode, bool is_inode)
+ {
+ block_t valid_block_count;
+- unsigned int valid_node_count, user_block_count;
++ unsigned int valid_node_count;
++ unsigned int avail_user_block_count;
+ int err;
+
+ if (is_inode) {
+@@ -2614,21 +2649,10 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
+
+ spin_lock(&sbi->stat_lock);
+
+- valid_block_count = sbi->total_valid_block_count +
+- sbi->current_reserved_blocks + 1;
+-
+- if (!__allow_reserved_blocks(sbi, inode, false))
+- valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
++ valid_block_count = sbi->total_valid_block_count + 1;
++ avail_user_block_count = get_available_block_count(sbi, inode, false);
+
+- if (F2FS_IO_ALIGNED(sbi))
+- valid_block_count += sbi->blocks_per_seg *
+- SM_I(sbi)->additional_reserved_segments;
+-
+- user_block_count = sbi->user_block_count;
+- if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+- user_block_count -= sbi->unusable_block_count;
+-
+- if (unlikely(valid_block_count > user_block_count)) {
++ if (unlikely(valid_block_count > avail_user_block_count)) {
+ spin_unlock(&sbi->stat_lock);
+ goto enospc;
+ }
+@@ -3012,7 +3036,6 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
+ return;
+ fallthrough;
+ case FI_DATA_EXIST:
+- case FI_INLINE_DOTS:
+ case FI_PIN_FILE:
+ case FI_COMPRESS_RELEASED:
+ f2fs_mark_inode_dirty_sync(inode, true);
+@@ -3136,8 +3159,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
+ set_bit(FI_INLINE_DENTRY, fi->flags);
+ if (ri->i_inline & F2FS_DATA_EXIST)
+ set_bit(FI_DATA_EXIST, fi->flags);
+- if (ri->i_inline & F2FS_INLINE_DOTS)
+- set_bit(FI_INLINE_DOTS, fi->flags);
+ if (ri->i_inline & F2FS_EXTRA_ATTR)
+ set_bit(FI_EXTRA_ATTR, fi->flags);
+ if (ri->i_inline & F2FS_PIN_FILE)
+@@ -3158,8 +3179,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
+ ri->i_inline |= F2FS_INLINE_DENTRY;
+ if (is_inode_flag_set(inode, FI_DATA_EXIST))
+ ri->i_inline |= F2FS_DATA_EXIST;
+- if (is_inode_flag_set(inode, FI_INLINE_DOTS))
+- ri->i_inline |= F2FS_INLINE_DOTS;
+ if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
+ ri->i_inline |= F2FS_EXTRA_ATTR;
+ if (is_inode_flag_set(inode, FI_PIN_FILE))
+@@ -3246,11 +3265,6 @@ static inline int f2fs_exist_data(struct inode *inode)
+ return is_inode_flag_set(inode, FI_DATA_EXIST);
+ }
+
+-static inline int f2fs_has_inline_dots(struct inode *inode)
+-{
+- return is_inode_flag_set(inode, FI_INLINE_DOTS);
+-}
+-
+ static inline int f2fs_is_mmap_file(struct inode *inode)
+ {
+ return is_inode_flag_set(inode, FI_MMAP_FILE);
+@@ -3271,22 +3285,13 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
+ return is_inode_flag_set(inode, FI_COW_FILE);
+ }
+
+-static inline bool f2fs_is_first_block_written(struct inode *inode)
+-{
+- return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
+-}
+-
+-static inline bool f2fs_is_drop_cache(struct inode *inode)
+-{
+- return is_inode_flag_set(inode, FI_DROP_CACHE);
+-}
+-
++static inline __le32 *get_dnode_addr(struct inode *inode,
++ struct page *node_page);
+ static inline void *inline_data_addr(struct inode *inode, struct page *page)
+ {
+- struct f2fs_inode *ri = F2FS_INODE(page);
+- int extra_size = get_extra_isize(inode);
++ __le32 *addr = get_dnode_addr(inode, page);
+
+- return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
++ return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
+ }
+
+ static inline int f2fs_has_inline_dentry(struct inode *inode)
+@@ -3365,17 +3370,6 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
+ return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ }
+
+-static inline bool is_dot_dotdot(const u8 *name, size_t len)
+-{
+- if (len == 1 && name[0] == '.')
+- return true;
+-
+- if (len == 2 && name[0] == '.' && name[1] == '.')
+- return true;
+-
+- return false;
+-}
+-
+ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+ {
+@@ -3429,6 +3423,17 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
+ return F2FS_I(inode)->i_inline_xattr_size;
+ }
+
++static inline __le32 *get_dnode_addr(struct inode *inode,
++ struct page *node_page)
++{
++ int base = 0;
++
++ if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
++ base = get_extra_isize(inode);
++
++ return blkaddr_in_node(F2FS_NODE(node_page)) + base;
++}
++
+ #define f2fs_get_inode_mode(i) \
+ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
+ (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
+@@ -3445,7 +3450,7 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
+ sizeof((f2fs_inode)->field)) \
+ <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
+
+-#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
++#define __is_large_section(sbi) (SEGS_PER_SEC(sbi) > 1)
+
+ #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
+
+@@ -3454,11 +3459,9 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
+ {
+- if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
++ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
+ f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
+ blkaddr, type);
+- f2fs_bug_on(sbi, 1);
+- }
+ }
+
+ static inline bool __is_valid_data_blkaddr(block_t blkaddr)
+@@ -3482,6 +3485,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr);
+ int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
+ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
++int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
++ bool readonly, bool need_lock);
+ int f2fs_precache_extents(struct inode *inode);
+ int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+ int f2fs_fileattr_set(struct mnt_idmap *idmap,
+@@ -3560,7 +3565,8 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
+ struct inode *inode, nid_t ino, umode_t mode);
+ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+ struct inode *dir, struct inode *inode);
+-int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
++int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
++ struct f2fs_filename *fname);
+ bool f2fs_empty_dir(struct inode *dir);
+
+ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
+@@ -3682,7 +3688,8 @@ void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
+ unsigned int *newseg, bool new_sec, int dir);
+ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+ unsigned int start, unsigned int end);
+-void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
++int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
++int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi);
+ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
+ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
+ bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
+@@ -3794,6 +3801,7 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
+ */
+ int __init f2fs_init_bioset(void);
+ void f2fs_destroy_bioset(void);
++bool f2fs_is_cp_guaranteed(struct page *page);
+ int f2fs_init_bio_entry_cache(void);
+ void f2fs_destroy_bio_entry_cache(void);
+ void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
+@@ -3812,7 +3820,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio);
+ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blk_addr, sector_t *sector);
+ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
+-void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
++void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
+ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
+ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
+ int f2fs_reserve_new_block(struct dnode_of_data *dn);
+@@ -3857,6 +3865,9 @@ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
+ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
+ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
+ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
++int f2fs_gc_range(struct f2fs_sb_info *sbi,
++ unsigned int start_seg, unsigned int end_seg,
++ bool dry_run, unsigned int dry_run_sections);
+ int f2fs_resize_fs(struct file *filp, __u64 block_count);
+ int __init f2fs_create_garbage_collection_cache(void);
+ void f2fs_destroy_garbage_collection_cache(void);
+@@ -4125,7 +4136,7 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
+ * inline.c
+ */
+ bool f2fs_may_inline_data(struct inode *inode);
+-bool f2fs_sanity_check_inline_data(struct inode *inode);
++bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage);
+ bool f2fs_may_inline_dentry(struct inode *inode);
+ void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
+ void f2fs_truncate_inline_inode(struct inode *inode,
+@@ -4166,7 +4177,7 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
+ /*
+ * extent_cache.c
+ */
+-bool sanity_check_extent_cache(struct inode *inode);
++bool sanity_check_extent_cache(struct inode *inode, struct page *ipage);
+ void f2fs_init_extent_tree(struct inode *inode);
+ void f2fs_drop_extent_tree(struct inode *inode);
+ void f2fs_destroy_extent_node(struct inode *inode);
+@@ -4237,10 +4248,25 @@ static inline bool f2fs_post_read_required(struct inode *inode)
+ f2fs_compressed_file(inode);
+ }
+
++static inline bool f2fs_used_in_atomic_write(struct inode *inode)
++{
++ return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode);
++}
++
++static inline bool f2fs_meta_inode_gc_required(struct inode *inode)
++{
++ return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode);
++}
++
+ /*
+ * compress.c
+ */
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
++enum cluster_check_type {
++ CLUSTER_IS_COMPR, /* check only if compressed cluster */
++ CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */
++ CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */
++};
+ bool f2fs_is_compressed_page(struct page *page);
+ struct page *f2fs_compress_control_page(struct page *page);
+ int f2fs_prepare_compress_overwrite(struct inode *inode,
+@@ -4267,6 +4293,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
+ struct writeback_control *wbc,
+ enum iostat_type io_type);
+ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
++bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index);
+ void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
+ pgoff_t fofs, block_t blkaddr,
+ unsigned int llen, unsigned int c_len);
+@@ -4277,7 +4304,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
+ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
+ bool in_task);
+ void f2fs_put_page_dic(struct page *page, bool in_task);
+-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
++unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
++ unsigned int ofs_in_node);
+ int f2fs_init_compress_ctx(struct compress_ctx *cc);
+ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
+ void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+@@ -4334,7 +4362,8 @@ static inline void f2fs_put_page_dic(struct page *page, bool in_task)
+ {
+ WARN_ON_ONCE(1);
+ }
+-static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
++static inline unsigned int f2fs_cluster_blocks_are_contiguous(
++ struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
+ static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
+ static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
+ static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
+@@ -4351,6 +4380,12 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
+ static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
+ nid_t ino) { }
+ #define inc_compr_inode_stat(inode) do { } while (0)
++static inline int f2fs_is_compressed_cluster(
++ struct inode *inode,
++ pgoff_t index) { return 0; }
++static inline bool f2fs_is_sparse_cluster(
++ struct inode *inode,
++ pgoff_t index) { return true; }
+ static inline void f2fs_update_read_extent_tree_range_compressed(
+ struct inode *inode,
+ pgoff_t fofs, block_t blkaddr,
+@@ -4391,15 +4426,24 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
+ {
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
+- if (!f2fs_compressed_file(inode))
++ f2fs_down_write(&F2FS_I(inode)->i_sem);
++
++ if (!f2fs_compressed_file(inode)) {
++ f2fs_up_write(&F2FS_I(inode)->i_sem);
+ return true;
+- if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
++ }
++ if (f2fs_is_mmap_file(inode) ||
++ (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
++ f2fs_up_write(&F2FS_I(inode)->i_sem);
+ return false;
++ }
+
+ fi->i_flags &= ~F2FS_COMPR_FL;
+ stat_dec_compr_inode(inode);
+ clear_inode_flag(inode, FI_COMPRESSED_FILE);
+ f2fs_mark_inode_dirty_sync(inode, true);
++
++ f2fs_up_write(&F2FS_I(inode)->i_sem);
+ return true;
+ }
+
+@@ -4502,6 +4546,17 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
+ return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
+ }
+
++static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi,
++ block_t blkaddr)
++{
++ if (f2fs_sb_has_blkzoned(sbi)) {
++ int devi = f2fs_target_device_index(sbi, blkaddr);
++
++ return !bdev_is_zoned(FDEV(devi).bdev);
++ }
++ return true;
++}
++
+ static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
+ {
+ return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
+@@ -4553,10 +4608,14 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
+ }
+
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+- unsigned int type);
++extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++ unsigned long type);
+ #else
+-#define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
++static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
++ unsigned long rate, unsigned long type)
++{
++ return 0;
++}
+ #endif
+
+ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+@@ -4603,6 +4662,39 @@ static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
+ return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
+ }
+
++static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
++ block_t blkaddr, unsigned int cnt)
++{
++ bool need_submit = false;
++ int i = 0;
++
++ do {
++ struct page *page;
++
++ page = find_get_page(META_MAPPING(sbi), blkaddr + i);
++ if (page) {
++ if (PageWriteback(page))
++ need_submit = true;
++ f2fs_put_page(page, 0);
++ }
++ } while (++i < cnt && !need_submit);
++
++ if (need_submit)
++ f2fs_submit_merged_write_cond(sbi, sbi->meta_inode,
++ NULL, 0, DATA);
++
++ truncate_inode_pages_range(META_MAPPING(sbi),
++ F2FS_BLK_TO_BYTES((loff_t)blkaddr),
++ F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1)));
++}
++
++static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
++ block_t blkaddr)
++{
++ f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
++ f2fs_invalidate_compress_page(sbi, blkaddr);
++}
++
+ #define EFSBADCRC EBADMSG /* Bad CRC detected */
+ #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index ca5904129b1620..74fac935bd0923 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -42,7 +42,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
+ vm_fault_t ret;
+
+ ret = filemap_fault(vmf);
+- if (!ret)
++ if (ret & VM_FAULT_LOCKED)
+ f2fs_update_iostat(F2FS_I_SB(inode), inode,
+ APP_MAPPED_READ_IO, F2FS_BLKSIZE);
+
+@@ -213,6 +213,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
+ f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
+ TRANS_DIR_INO))
+ cp_reason = CP_RECOVER_DIR;
++ else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
++ XATTR_DIR_INO))
++ cp_reason = CP_XATTR_DIR;
+
+ return cp_reason;
+ }
+@@ -534,6 +537,42 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ return 0;
+ }
+
++static int finish_preallocate_blocks(struct inode *inode)
++{
++ int ret;
++
++ inode_lock(inode);
++ if (is_inode_flag_set(inode, FI_OPENED_FILE)) {
++ inode_unlock(inode);
++ return 0;
++ }
++
++ if (!file_should_truncate(inode)) {
++ set_inode_flag(inode, FI_OPENED_FILE);
++ inode_unlock(inode);
++ return 0;
++ }
++
++ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++ filemap_invalidate_lock(inode->i_mapping);
++
++ truncate_setsize(inode, i_size_read(inode));
++ ret = f2fs_truncate(inode);
++
++ filemap_invalidate_unlock(inode->i_mapping);
++ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++
++ if (!ret)
++ set_inode_flag(inode, FI_OPENED_FILE);
++
++ inode_unlock(inode);
++ if (ret)
++ return ret;
++
++ file_dont_truncate(inode);
++ return 0;
++}
++
+ static int f2fs_file_open(struct inode *inode, struct file *filp)
+ {
+ int err = fscrypt_file_open(inode, filp);
+@@ -551,26 +590,24 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
+ filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
+ filp->f_mode |= FMODE_CAN_ODIRECT;
+
+- return dquot_file_open(inode, filp);
++ err = dquot_file_open(inode, filp);
++ if (err)
++ return err;
++
++ return finish_preallocate_blocks(inode);
+ }
+
+ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+- struct f2fs_node *raw_node;
+ int nr_free = 0, ofs = dn->ofs_in_node, len = count;
+ __le32 *addr;
+- int base = 0;
+ bool compressed_cluster = false;
+ int cluster_index = 0, valid_blocks = 0;
+ int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
+
+- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+- base = get_extra_isize(dn->inode);
+-
+- raw_node = F2FS_NODE(dn->node_page);
+- addr = blkaddr_in_node(raw_node) + base + ofs;
++ addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
+
+ /* Assumption: truncation starts with cluster */
+ for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
+@@ -588,8 +625,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ if (blkaddr == NULL_ADDR)
+ continue;
+
+- dn->data_blkaddr = NULL_ADDR;
+- f2fs_set_data_blkaddr(dn);
++ f2fs_set_data_blkaddr(dn, NULL_ADDR);
+
+ if (__is_valid_data_blkaddr(blkaddr)) {
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+@@ -599,9 +635,6 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ valid_blocks++;
+ }
+
+- if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
+- clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
+-
+ f2fs_invalidate_blocks(sbi, blkaddr);
+
+ if (!released || blkaddr != COMPRESS_ADDR)
+@@ -813,6 +846,8 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw)
+ return true;
+ if (f2fs_compressed_file(inode))
+ return true;
++ if (f2fs_has_inline_data(inode))
++ return true;
+
+ /* disallow direct IO if any of devices has unaligned blksize */
+ if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
+@@ -823,8 +858,6 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw)
+ */
+ if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
+ return true;
+- if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
+- return true;
+ if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
+ return true;
+
+@@ -941,9 +974,14 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ ATTR_GID | ATTR_TIMES_SET))))
+ return -EPERM;
+
+- if ((attr->ia_valid & ATTR_SIZE) &&
+- !f2fs_is_compress_backend_ready(inode))
+- return -EOPNOTSUPP;
++ if ((attr->ia_valid & ATTR_SIZE)) {
++ if (!f2fs_is_compress_backend_ready(inode))
++ return -EOPNOTSUPP;
++ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
++ !IS_ALIGNED(attr->ia_size,
++ F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size)))
++ return -EINVAL;
++ }
+
+ err = setattr_prepare(idmap, dentry, attr);
+ if (err)
+@@ -1315,8 +1353,12 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ f2fs_put_page(psrc, 1);
+ return PTR_ERR(pdst);
+ }
++
++ f2fs_wait_on_page_writeback(pdst, DATA, true, true);
++
+ memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
+ set_page_dirty(pdst);
++ set_page_private_gcing(pdst);
+ f2fs_put_page(pdst, 1);
+ f2fs_put_page(psrc, 1);
+
+@@ -1487,8 +1529,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+ }
+
+ f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
+- dn->data_blkaddr = NEW_ADDR;
+- f2fs_set_data_blkaddr(dn);
++ f2fs_set_data_blkaddr(dn, NEW_ADDR);
+ }
+
+ f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
+@@ -1736,9 +1777,11 @@ static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
+
+ f2fs_down_write(&sbi->pin_sem);
+
+- f2fs_lock_op(sbi);
+- f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
+- f2fs_unlock_op(sbi);
++ err = f2fs_allocate_pinning_section(sbi);
++ if (err) {
++ f2fs_up_write(&sbi->pin_sem);
++ goto out_err;
++ }
+
+ map.m_seg_type = CURSEG_COLD_DATA_PINNED;
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
+@@ -1804,15 +1847,6 @@ static long f2fs_fallocate(struct file *file, int mode,
+ (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
+ return -EOPNOTSUPP;
+
+- /*
+- * Pinned file should not support partial truncation since the block
+- * can be used by applications.
+- */
+- if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
+- (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+- FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
+- return -EOPNOTSUPP;
+-
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+ FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
+ FALLOC_FL_INSERT_RANGE))
+@@ -1820,6 +1854,17 @@ static long f2fs_fallocate(struct file *file, int mode,
+
+ inode_lock(inode);
+
++ /*
++ * Pinned file should not support partial truncation since the block
++ * can be used by applications.
++ */
++ if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
++ (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
++ FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) {
++ ret = -EOPNOTSUPP;
++ goto out;
++ }
++
+ ret = file_modified(file);
+ if (ret)
+ goto out;
+@@ -2052,10 +2097,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
+ struct mnt_idmap *idmap = file_mnt_idmap(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct inode *pinode;
+ loff_t isize;
+ int ret;
+
++ if (!(filp->f_mode & FMODE_WRITE))
++ return -EBADF;
++
+ if (!inode_owner_or_capable(idmap, inode))
+ return -EACCES;
+
+@@ -2101,15 +2148,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
+ /* Check if the inode already has a COW inode */
+ if (fi->cow_inode == NULL) {
+ /* Create a COW inode for atomic write */
+- pinode = f2fs_iget(inode->i_sb, fi->i_pino);
+- if (IS_ERR(pinode)) {
+- f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+- ret = PTR_ERR(pinode);
+- goto out;
+- }
++ struct dentry *dentry = file_dentry(filp);
++ struct inode *dir = d_inode(dentry->d_parent);
+
+- ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
+- iput(pinode);
++ ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode);
+ if (ret) {
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+ goto out;
+@@ -2117,8 +2159,15 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
+
+ set_inode_flag(fi->cow_inode, FI_COW_FILE);
+ clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
++
++ /* Set the COW inode's atomic_inode to the atomic inode */
++ F2FS_I(fi->cow_inode)->atomic_inode = inode;
+ } else {
+ /* Reuse the already created COW inode */
++ f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode));
++
++ invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1);
++
+ ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
+ if (ret) {
+ f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+@@ -2160,6 +2209,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
+ struct mnt_idmap *idmap = file_mnt_idmap(filp);
+ int ret;
+
++ if (!(filp->f_mode & FMODE_WRITE))
++ return -EBADF;
++
+ if (!inode_owner_or_capable(idmap, inode))
+ return -EACCES;
+
+@@ -2192,6 +2244,9 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp)
+ struct mnt_idmap *idmap = file_mnt_idmap(filp);
+ int ret;
+
++ if (!(filp->f_mode & FMODE_WRITE))
++ return -EBADF;
++
+ if (!inode_owner_or_capable(idmap, inode))
+ return -EACCES;
+
+@@ -2210,34 +2265,13 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp)
+ return ret;
+ }
+
+-static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
++int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
++ bool readonly, bool need_lock)
+ {
+- struct inode *inode = file_inode(filp);
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct super_block *sb = sbi->sb;
+- __u32 in;
+ int ret = 0;
+
+- if (!capable(CAP_SYS_ADMIN))
+- return -EPERM;
+-
+- if (get_user(in, (__u32 __user *)arg))
+- return -EFAULT;
+-
+- if (in != F2FS_GOING_DOWN_FULLSYNC) {
+- ret = mnt_want_write_file(filp);
+- if (ret) {
+- if (ret == -EROFS) {
+- ret = 0;
+- f2fs_stop_checkpoint(sbi, false,
+- STOP_CP_REASON_SHUTDOWN);
+- trace_f2fs_shutdown(sbi, in, ret);
+- }
+- return ret;
+- }
+- }
+-
+- switch (in) {
++ switch (flag) {
+ case F2FS_GOING_DOWN_FULLSYNC:
+ ret = freeze_bdev(sb->s_bdev);
+ if (ret)
+@@ -2271,18 +2305,62 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
+ goto out;
+ }
+
++ if (readonly)
++ goto out;
++
++ /* grab sb->s_umount to avoid racing w/ remount() */
++ if (need_lock)
++ down_read(&sbi->sb->s_umount);
++
+ f2fs_stop_gc_thread(sbi);
+ f2fs_stop_discard_thread(sbi);
+
+ f2fs_drop_discard_cmd(sbi);
+ clear_opt(sbi, DISCARD);
+
++ if (need_lock)
++ up_read(&sbi->sb->s_umount);
++
+ f2fs_update_time(sbi, REQ_TIME);
+ out:
+- if (in != F2FS_GOING_DOWN_FULLSYNC)
+- mnt_drop_write_file(filp);
+
+- trace_f2fs_shutdown(sbi, in, ret);
++ trace_f2fs_shutdown(sbi, flag, ret);
++
++ return ret;
++}
++
++static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
++{
++ struct inode *inode = file_inode(filp);
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++ __u32 in;
++ int ret;
++ bool need_drop = false, readonly = false;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (get_user(in, (__u32 __user *)arg))
++ return -EFAULT;
++
++ if (in != F2FS_GOING_DOWN_FULLSYNC) {
++ ret = mnt_want_write_file(filp);
++ if (ret) {
++ if (ret != -EROFS)
++ return ret;
++
++ /* fallback to nosync shutdown for readonly fs */
++ in = F2FS_GOING_DOWN_NOSYNC;
++ readonly = true;
++ } else {
++ need_drop = true;
++ }
++ }
++
++ ret = f2fs_do_shutdown(sbi, in, readonly, true);
++
++ if (need_drop)
++ mnt_drop_write_file(filp);
+
+ return ret;
+ }
+@@ -2583,7 +2661,6 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ .m_may_create = false };
+ struct extent_info ei = {};
+ pgoff_t pg_start, pg_end, next_pgofs;
+- unsigned int blk_per_seg = sbi->blocks_per_seg;
+ unsigned int total = 0, sec_num;
+ block_t blk_end = 0;
+ bool fragmented = false;
+@@ -2596,7 +2673,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+
+ inode_lock(inode);
+
+- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) ||
++ f2fs_is_atomic_file(inode)) {
+ err = -EINVAL;
+ goto unlock_out;
+ }
+@@ -2619,7 +2697,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ * block addresses are continuous.
+ */
+ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
+- if (ei.fofs + ei.len >= pg_end)
++ if ((pgoff_t)ei.fofs + ei.len >= pg_end)
+ goto out;
+ }
+
+@@ -2692,7 +2770,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ set_inode_flag(inode, FI_SKIP_WRITES);
+
+ idx = map.m_lblk;
+- while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
++ while (idx < map.m_lblk + map.m_len &&
++ cnt < BLKS_PER_SEG(sbi)) {
+ struct page *page;
+
+ page = f2fs_get_lock_data_page(inode, idx, true);
+@@ -2701,6 +2780,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ goto clear_out;
+ }
+
++ f2fs_wait_on_page_writeback(page, DATA, true, true);
++
+ set_page_dirty(page);
+ set_page_private_gcing(page);
+ f2fs_put_page(page, 1);
+@@ -2712,7 +2793,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+
+ map.m_lblk = idx;
+ check:
+- if (map.m_lblk < pg_end && cnt < blk_per_seg)
++ if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi))
+ goto do_map;
+
+ clear_inode_flag(inode, FI_SKIP_WRITES);
+@@ -2818,6 +2899,17 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ goto out;
+ }
+
++ if (f2fs_compressed_file(src) || f2fs_compressed_file(dst) ||
++ f2fs_is_pinned_file(src) || f2fs_is_pinned_file(dst)) {
++ ret = -EOPNOTSUPP;
++ goto out_unlock;
++ }
++
++ if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++
+ ret = -EINVAL;
+ if (pos_in + len > src->i_size || pos_in + len < pos_in)
+ goto out_unlock;
+@@ -2976,8 +3068,8 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
+
+ if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
+ __is_large_section(sbi)) {
+- f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
+- range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
++ f2fs_warn(sbi, "Can't flush %u in %d for SEGS_PER_SEC %u != 1",
++ range.dev_num, sbi->s_ndevs, SEGS_PER_SEC(sbi));
+ return -EINVAL;
+ }
+
+@@ -3183,6 +3275,7 @@ int f2fs_pin_file_control(struct inode *inode, bool inc)
+ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
+ {
+ struct inode *inode = file_inode(filp);
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ __u32 pin;
+ int ret = 0;
+
+@@ -3192,7 +3285,7 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+- if (f2fs_readonly(F2FS_I_SB(inode)->sb))
++ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+@@ -3201,13 +3294,27 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
+
+ inode_lock(inode);
+
++ if (f2fs_is_atomic_file(inode)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ if (!pin) {
+ clear_inode_flag(inode, FI_PIN_FILE);
+ f2fs_i_gc_failures_write(inode, 0);
+ goto done;
++ } else if (f2fs_is_pinned_file(inode)) {
++ goto done;
+ }
+
+- if (f2fs_should_update_outplace(inode, NULL)) {
++ if (f2fs_sb_has_blkzoned(sbi) && F2FS_HAS_BLOCKS(inode)) {
++ ret = -EFBIG;
++ goto out;
++ }
++
++ /* Let's allow file pinning on zoned device. */
++ if (!f2fs_sb_has_blkzoned(sbi) &&
++ f2fs_should_update_outplace(inode, NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -3229,7 +3336,7 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
+ set_inode_flag(inode, FI_PIN_FILE);
+ ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
+ done:
+- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
++ f2fs_update_time(sbi, REQ_TIME);
+ out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+@@ -3258,6 +3365,7 @@ int f2fs_precache_extents(struct inode *inode)
+ return -EOPNOTSUPP;
+
+ map.m_lblk = 0;
++ map.m_pblk = 0;
+ map.m_next_pgofs = NULL;
+ map.m_next_extent = &m_next_extent;
+ map.m_seg_type = NO_CHECK_TYPE;
+@@ -3462,8 +3570,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+ if (blkaddr != NEW_ADDR)
+ continue;
+
+- dn->data_blkaddr = NULL_ADDR;
+- f2fs_set_data_blkaddr(dn);
++ f2fs_set_data_blkaddr(dn, NULL_ADDR);
+ }
+
+ f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
+@@ -3490,9 +3597,6 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ if (!f2fs_sb_has_compression(sbi))
+ return -EOPNOTSUPP;
+
+- if (!f2fs_compressed_file(inode))
+- return -EINVAL;
+-
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+@@ -3511,7 +3615,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ goto out;
+ }
+
+- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++ if (!f2fs_compressed_file(inode) ||
++ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -3538,9 +3643,12 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ struct dnode_of_data dn;
+ pgoff_t end_offset, count;
+
++ f2fs_lock_op(sbi);
++
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ if (ret) {
++ f2fs_unlock_op(sbi);
+ if (ret == -ENOENT) {
+ page_idx = f2fs_get_next_page_offset(&dn,
+ page_idx);
+@@ -3558,6 +3666,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+
+ f2fs_put_dnode(&dn);
+
++ f2fs_unlock_op(sbi);
++
+ if (ret < 0)
+ break;
+
+@@ -3588,10 +3698,10 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ return ret;
+ }
+
+-static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
++static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
++ unsigned int *reserved_blocks)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+- unsigned int reserved_blocks = 0;
+ int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ block_t blkaddr;
+ int i;
+@@ -3611,44 +3721,63 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+
+ while (count) {
+ int compr_blocks = 0;
+- blkcnt_t reserved;
++ blkcnt_t reserved = 0;
++ blkcnt_t to_reserved;
+ int ret;
+
+- for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+- blkaddr = f2fs_data_blkaddr(dn);
++ for (i = 0; i < cluster_size; i++) {
++ blkaddr = data_blkaddr(dn->inode, dn->node_page,
++ dn->ofs_in_node + i);
+
+ if (i == 0) {
+- if (blkaddr == COMPRESS_ADDR)
+- continue;
+- dn->ofs_in_node += cluster_size;
+- goto next;
++ if (blkaddr != COMPRESS_ADDR) {
++ dn->ofs_in_node += cluster_size;
++ goto next;
++ }
++ continue;
+ }
+
++ /*
++ * compressed cluster was not released due to it
++ * fails in release_compress_blocks(), so NEW_ADDR
++ * is a possible case.
++ */
++ if (blkaddr == NEW_ADDR) {
++ reserved++;
++ continue;
++ }
+ if (__is_valid_data_blkaddr(blkaddr)) {
+ compr_blocks++;
+ continue;
+ }
++ }
++
++ to_reserved = cluster_size - compr_blocks - reserved;
+
+- dn->data_blkaddr = NEW_ADDR;
+- f2fs_set_data_blkaddr(dn);
++ /* for the case all blocks in cluster were reserved */
++ if (to_reserved == 1) {
++ dn->ofs_in_node += cluster_size;
++ goto next;
+ }
+
+- reserved = cluster_size - compr_blocks;
+- ret = inc_valid_block_count(sbi, dn->inode, &reserved);
+- if (ret)
++ ret = inc_valid_block_count(sbi, dn->inode,
++ &to_reserved, false);
++ if (unlikely(ret))
+ return ret;
+
+- if (reserved != cluster_size - compr_blocks)
+- return -ENOSPC;
++ for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
++ if (f2fs_data_blkaddr(dn) == NULL_ADDR)
++ f2fs_set_data_blkaddr(dn, NEW_ADDR);
++ }
+
+ f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
+
+- reserved_blocks += reserved;
++ *reserved_blocks += to_reserved;
+ next:
+ count -= cluster_size;
+ }
+
+- return reserved_blocks;
++ return 0;
+ }
+
+ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+@@ -3662,9 +3791,6 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ if (!f2fs_sb_has_compression(sbi))
+ return -EOPNOTSUPP;
+
+- if (!f2fs_compressed_file(inode))
+- return -EINVAL;
+-
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+@@ -3672,18 +3798,19 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ if (ret)
+ return ret;
+
+- if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
+- goto out;
+-
+ f2fs_balance_fs(sbi, true);
+
+ inode_lock(inode);
+
+- if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++ if (!f2fs_compressed_file(inode) ||
++ !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
+ goto unlock_inode;
+ }
+
++ if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
++ goto unlock_inode;
++
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ filemap_invalidate_lock(inode->i_mapping);
+
+@@ -3693,9 +3820,12 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ struct dnode_of_data dn;
+ pgoff_t end_offset, count;
+
++ f2fs_lock_op(sbi);
++
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ if (ret) {
++ f2fs_unlock_op(sbi);
+ if (ret == -ENOENT) {
+ page_idx = f2fs_get_next_page_offset(&dn,
+ page_idx);
+@@ -3709,31 +3839,31 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
+ count = round_up(count, F2FS_I(inode)->i_cluster_size);
+
+- ret = reserve_compress_blocks(&dn, count);
++ ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
+
+ f2fs_put_dnode(&dn);
+
++ f2fs_unlock_op(sbi);
++
+ if (ret < 0)
+ break;
+
+ page_idx += count;
+- reserved_blocks += ret;
+ }
+
+ filemap_invalidate_unlock(inode->i_mapping);
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+- if (ret >= 0) {
++ if (!ret) {
+ clear_inode_flag(inode, FI_COMPRESS_RELEASED);
+ inode_set_ctime_current(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+ unlock_inode:
+ inode_unlock(inode);
+-out:
+ mnt_drop_write_file(filp);
+
+- if (ret >= 0) {
++ if (!ret) {
+ ret = put_user(reserved_blocks, (u64 __user *)arg);
+ } else if (reserved_blocks &&
+ atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
+@@ -3982,16 +4112,20 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ sizeof(option)))
+ return -EFAULT;
+
+- if (!f2fs_compressed_file(inode) ||
+- option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+- option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
+- option.algorithm >= COMPRESS_MAX)
++ if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
++ option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
++ option.algorithm >= COMPRESS_MAX)
+ return -EINVAL;
+
+ file_start_write(filp);
+ inode_lock(inode);
+
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
++ if (!f2fs_compressed_file(inode)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
+ ret = -EBUSY;
+ goto out;
+@@ -4005,6 +4139,15 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ F2FS_I(inode)->i_compress_algorithm = option.algorithm;
+ F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
+ F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
++ /* Set default level */
++ if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
++ F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
++ else
++ F2FS_I(inode)->i_compress_level = 0;
++ /* Adjust mount option level */
++ if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
++ F2FS_OPTION(sbi).compress_level)
++ F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ f2fs_mark_inode_dirty_sync(inode, true);
+
+ if (!f2fs_is_compress_backend_ready(inode))
+@@ -4043,7 +4186,10 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
+ /* It will never fail, when page has pinned above */
+ f2fs_bug_on(F2FS_I_SB(inode), !page);
+
++ f2fs_wait_on_page_writeback(page, DATA, true, true);
++
+ set_page_dirty(page);
++ set_page_private_gcing(page);
+ f2fs_put_page(page, 1);
+ f2fs_put_page(page, 0);
+ }
+@@ -4056,10 +4202,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+- pgoff_t page_idx = 0, last_idx;
+- unsigned int blk_per_seg = sbi->blocks_per_seg;
+- int cluster_size = fi->i_cluster_size;
+- int count, ret;
++ pgoff_t page_idx = 0, last_idx, cluster_idx;
++ int ret;
+
+ if (!f2fs_sb_has_compression(sbi) ||
+ F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
+@@ -4068,9 +4212,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+- if (!f2fs_compressed_file(inode))
+- return -EINVAL;
+-
+ f2fs_balance_fs(sbi, true);
+
+ file_start_write(filp);
+@@ -4081,7 +4222,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
+ goto out;
+ }
+
+- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++ if (!f2fs_compressed_file(inode) ||
++ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -4094,22 +4236,24 @@ static int f2fs_ioc_decompress_file(struct file *filp)
+ goto out;
+
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
++ last_idx >>= fi->i_log_cluster_size;
++
++ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
++ page_idx = cluster_idx << fi->i_log_cluster_size;
+
+- count = last_idx - page_idx;
+- while (count && count >= cluster_size) {
+- ret = redirty_blocks(inode, page_idx, cluster_size);
++ if (!f2fs_is_compressed_cluster(inode, page_idx))
++ continue;
++
++ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
+ if (ret < 0)
+ break;
+
+- if (get_dirty_pages(inode) >= blk_per_seg) {
++ if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
+ ret = filemap_fdatawrite(inode->i_mapping);
+ if (ret < 0)
+ break;
+ }
+
+- count -= cluster_size;
+- page_idx += cluster_size;
+-
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+@@ -4135,10 +4279,9 @@ static int f2fs_ioc_compress_file(struct file *filp)
+ {
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- pgoff_t page_idx = 0, last_idx;
+- unsigned int blk_per_seg = sbi->blocks_per_seg;
+- int cluster_size = F2FS_I(inode)->i_cluster_size;
+- int count, ret;
++ struct f2fs_inode_info *fi = F2FS_I(inode);
++ pgoff_t page_idx = 0, last_idx, cluster_idx;
++ int ret;
+
+ if (!f2fs_sb_has_compression(sbi) ||
+ F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
+@@ -4147,9 +4290,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+- if (!f2fs_compressed_file(inode))
+- return -EINVAL;
+-
+ f2fs_balance_fs(sbi, true);
+
+ file_start_write(filp);
+@@ -4160,7 +4300,8 @@ static int f2fs_ioc_compress_file(struct file *filp)
+ goto out;
+ }
+
+- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++ if (!f2fs_compressed_file(inode) ||
++ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -4172,22 +4313,24 @@ static int f2fs_ioc_compress_file(struct file *filp)
+ set_inode_flag(inode, FI_ENABLE_COMPRESS);
+
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
++ last_idx >>= fi->i_log_cluster_size;
++
++ for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
++ page_idx = cluster_idx << fi->i_log_cluster_size;
++
++ if (f2fs_is_sparse_cluster(inode, page_idx))
++ continue;
+
+- count = last_idx - page_idx;
+- while (count && count >= cluster_size) {
+- ret = redirty_blocks(inode, page_idx, cluster_size);
++ ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
+ if (ret < 0)
+ break;
+
+- if (get_dirty_pages(inode) >= blk_per_seg) {
++ if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
+ ret = filemap_fdatawrite(inode->i_mapping);
+ if (ret < 0)
+ break;
+ }
+
+- count -= cluster_size;
+- page_idx += cluster_size;
+-
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+@@ -4446,6 +4589,10 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
+ iov_iter_count(to), READ);
+
++ /* In LFS mode, if there is inflight dio, wait for its completion */
++ if (f2fs_lfs_mode(F2FS_I_SB(inode)))
++ inode_dio_wait(inode);
++
+ if (f2fs_should_use_dio(inode, iocb, to)) {
+ ret = f2fs_dio_read_iter(iocb, to);
+ } else {
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index f550cdeaa66384..888c301ffe8f4c 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -259,7 +259,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
+ p->ofs_unit = 1;
+ } else {
+ p->gc_mode = select_gc_type(sbi, gc_type);
+- p->ofs_unit = sbi->segs_per_sec;
++ p->ofs_unit = SEGS_PER_SEC(sbi);
+ if (__is_large_section(sbi)) {
+ p->dirty_bitmap = dirty_i->dirty_secmap;
+ p->max_search = count_bits(p->dirty_bitmap,
+@@ -280,11 +280,11 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
+ p->max_search > sbi->max_victim_search)
+ p->max_search = sbi->max_victim_search;
+
+- /* let's select beginning hot/small space first in no_heap mode*/
++ /* let's select beginning hot/small space first. */
+ if (f2fs_need_rand_seg(sbi))
+- p->offset = get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
+- else if (test_opt(sbi, NOHEAP) &&
+- (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
++ p->offset = get_random_u32_below(MAIN_SECS(sbi) *
++ SEGS_PER_SEC(sbi));
++ else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+ p->offset = 0;
+ else
+ p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
+@@ -295,13 +295,13 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
+ {
+ /* SSR allocates in a segment unit */
+ if (p->alloc_mode == SSR)
+- return sbi->blocks_per_seg;
++ return BLKS_PER_SEG(sbi);
+ else if (p->alloc_mode == AT_SSR)
+ return UINT_MAX;
+
+ /* LFS */
+ if (p->gc_mode == GC_GREEDY)
+- return 2 * sbi->blocks_per_seg * p->ofs_unit;
++ return 2 * BLKS_PER_SEG(sbi) * p->ofs_unit;
+ else if (p->gc_mode == GC_CB)
+ return UINT_MAX;
+ else if (p->gc_mode == GC_AT)
+@@ -496,9 +496,9 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
+ return;
+ }
+
+- for (i = 0; i < sbi->segs_per_sec; i++)
++ for (i = 0; i < SEGS_PER_SEC(sbi); i++)
+ mtime += get_seg_entry(sbi, start + i)->mtime;
+- mtime = div_u64(mtime, sbi->segs_per_sec);
++ mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
+
+ /* Handle if the system time has changed by the user */
+ if (mtime < sit_i->min_mtime)
+@@ -599,7 +599,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ unsigned long long age;
+ unsigned long long max_mtime = sit_i->dirty_max_mtime;
+ unsigned long long min_mtime = sit_i->dirty_min_mtime;
+- unsigned int seg_blocks = sbi->blocks_per_seg;
+ unsigned int vblocks;
+ unsigned int dirty_threshold = max(am->max_candidate_count,
+ am->candidate_ratio *
+@@ -629,7 +628,7 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ f2fs_bug_on(sbi, !vblocks);
+
+ /* rare case */
+- if (vblocks == seg_blocks)
++ if (vblocks == BLKS_PER_SEG(sbi))
+ goto skip_node;
+
+ iter++;
+@@ -755,7 +754,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
+ int ret = 0;
+
+ mutex_lock(&dirty_i->seglist_lock);
+- last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
++ last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
+
+ p.alloc_mode = alloc_mode;
+ p.age = age;
+@@ -896,7 +895,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
+ else
+ sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
+ sm->last_victim[p.gc_mode] %=
+- (MAIN_SECS(sbi) * sbi->segs_per_sec);
++ (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
+ break;
+ }
+ }
+@@ -1172,7 +1171,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ static int ra_data_block(struct inode *inode, pgoff_t index)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+- struct address_space *mapping = inode->i_mapping;
++ struct address_space *mapping = f2fs_is_cow_file(inode) ?
++ F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
+ struct dnode_of_data dn;
+ struct page *page;
+ struct f2fs_io_info fio = {
+@@ -1184,7 +1184,6 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ .op_flags = 0,
+ .encrypted_page = NULL,
+ .in_list = 0,
+- .retry = 0,
+ };
+ int err;
+
+@@ -1264,6 +1263,8 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ static int move_data_block(struct inode *inode, block_t bidx,
+ int gc_type, unsigned int segno, int off)
+ {
++ struct address_space *mapping = f2fs_is_cow_file(inode) ?
++ F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
+ struct f2fs_io_info fio = {
+ .sbi = F2FS_I_SB(inode),
+ .ino = inode->i_ino,
+@@ -1273,7 +1274,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
+ .op_flags = 0,
+ .encrypted_page = NULL,
+ .in_list = 0,
+- .retry = 0,
+ };
+ struct dnode_of_data dn;
+ struct f2fs_summary sum;
+@@ -1287,7 +1287,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
+ CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
+
+ /* do not read out */
+- page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
++ page = f2fs_grab_cache_page(mapping, bidx, false);
+ if (!page)
+ return -ENOMEM;
+
+@@ -1380,9 +1380,8 @@ static int move_data_block(struct inode *inode, block_t bidx,
+ memcpy(page_address(fio.encrypted_page),
+ page_address(mpage), PAGE_SIZE);
+ f2fs_put_page(mpage, 1);
+- invalidate_mapping_pages(META_MAPPING(fio.sbi),
+- fio.old_blkaddr, fio.old_blkaddr);
+- f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
++
++ f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
+
+ set_page_dirty(fio.encrypted_page);
+ if (clear_page_dirty_for_io(fio.encrypted_page))
+@@ -1394,20 +1393,12 @@ static int move_data_block(struct inode *inode, block_t bidx,
+ fio.op_flags = REQ_SYNC;
+ fio.new_blkaddr = newaddr;
+ f2fs_submit_page_write(&fio);
+- if (fio.retry) {
+- err = -EAGAIN;
+- if (PageWriteback(fio.encrypted_page))
+- end_page_writeback(fio.encrypted_page);
+- goto put_page_out;
+- }
+
+ f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
+
+ f2fs_update_data_blkaddr(&dn, newaddr);
+ set_inode_flag(inode, FI_APPEND_WRITE);
+- if (page->index == 0)
+- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+-put_page_out:
++
+ f2fs_put_page(fio.encrypted_page, 1);
+ recover_block:
+ if (err)
+@@ -1563,10 +1554,25 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ int err;
+
+ inode = f2fs_iget(sb, dni.ino);
+- if (IS_ERR(inode) || is_bad_inode(inode) ||
+- special_file(inode->i_mode))
++ if (IS_ERR(inode))
+ continue;
+
++ if (is_bad_inode(inode) ||
++ special_file(inode->i_mode)) {
++ iput(inode);
++ continue;
++ }
++
++ if (f2fs_has_inline_data(inode)) {
++ iput(inode);
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
++ f2fs_err_ratelimited(sbi,
++ "inode %lx has both inline_data flag and "
++ "data block, nid=%u, ofs_in_node=%u",
++ inode->i_ino, dni.nid, ofs_in_node);
++ continue;
++ }
++
+ err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ if (err == -EAGAIN) {
+ iput(inode);
+@@ -1583,7 +1589,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
+ ofs_in_node;
+
+- if (f2fs_post_read_required(inode)) {
++ if (f2fs_meta_inode_gc_required(inode)) {
+ int err = ra_data_block(inode, start_bidx);
+
+ f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+@@ -1634,7 +1640,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+
+ start_bidx = f2fs_start_bidx_of_node(nofs, inode)
+ + ofs_in_node;
+- if (f2fs_post_read_required(inode))
++ if (f2fs_meta_inode_gc_required(inode))
+ err = move_data_block(inode, start_bidx,
+ gc_type, segno, off);
+ else
+@@ -1642,7 +1648,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ segno, off);
+
+ if (!err && (gc_type == FG_GC ||
+- f2fs_post_read_required(inode)))
++ f2fs_meta_inode_gc_required(inode)))
+ submitted++;
+
+ if (locked) {
+@@ -1681,7 +1687,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ struct f2fs_summary_block *sum;
+ struct blk_plug plug;
+ unsigned int segno = start_segno;
+- unsigned int end_segno = start_segno + sbi->segs_per_sec;
++ unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
+ int seg_freed = 0, migrated = 0;
+ unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+ SUM_TYPE_DATA : SUM_TYPE_NODE;
+@@ -1689,7 +1695,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ int submitted = 0;
+
+ if (__is_large_section(sbi))
+- end_segno = rounddown(end_segno, sbi->segs_per_sec);
++ end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
+
+ /*
+ * zone-capacity can be less than zone-size in zoned devices,
+@@ -1697,7 +1703,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ * calculate the end segno in the zone which can be garbage collected
+ */
+ if (f2fs_sb_has_blkzoned(sbi))
+- end_segno -= sbi->segs_per_sec -
++ end_segno -= SEGS_PER_SEC(sbi) -
+ f2fs_usable_segs_in_sec(sbi, segno);
+
+ sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
+@@ -1983,10 +1989,40 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
+ init_atgc_management(sbi);
+ }
+
++int f2fs_gc_range(struct f2fs_sb_info *sbi,
++ unsigned int start_seg, unsigned int end_seg,
++ bool dry_run, unsigned int dry_run_sections)
++{
++ unsigned int segno;
++ unsigned int gc_secs = dry_run_sections;
++
++ for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
++ struct gc_inode_list gc_list = {
++ .ilist = LIST_HEAD_INIT(gc_list.ilist),
++ .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
++ };
++
++ do_garbage_collect(sbi, segno, &gc_list, FG_GC,
++ dry_run_sections == 0);
++ put_gc_inode(&gc_list);
++
++ if (!dry_run && get_valid_blocks(sbi, segno, true))
++ return -EAGAIN;
++ if (dry_run && dry_run_sections &&
++ !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
++ break;
++
++ if (fatal_signal_pending(current))
++ return -ERESTARTSYS;
++ }
++
++ return 0;
++}
++
+ static int free_segment_range(struct f2fs_sb_info *sbi,
+- unsigned int secs, bool gc_only)
++ unsigned int secs, bool dry_run)
+ {
+- unsigned int segno, next_inuse, start, end;
++ unsigned int next_inuse, start, end;
+ struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
+ int gc_mode, gc_type;
+ int err = 0;
+@@ -1994,7 +2030,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
+
+ /* Force block allocation for GC */
+ MAIN_SECS(sbi) -= secs;
+- start = MAIN_SECS(sbi) * sbi->segs_per_sec;
++ start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
+ end = MAIN_SEGS(sbi) - 1;
+
+ mutex_lock(&DIRTY_I(sbi)->seglist_lock);
+@@ -2012,25 +2048,8 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
+ f2fs_allocate_segment_for_resize(sbi, type, start, end);
+
+ /* do GC to move out valid blocks in the range */
+- for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
+- struct gc_inode_list gc_list = {
+- .ilist = LIST_HEAD_INIT(gc_list.ilist),
+- .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+- };
+-
+- do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
+- put_gc_inode(&gc_list);
+-
+- if (!gc_only && get_valid_blocks(sbi, segno, true)) {
+- err = -EAGAIN;
+- goto out;
+- }
+- if (fatal_signal_pending(current)) {
+- err = -ERESTARTSYS;
+- goto out;
+- }
+- }
+- if (gc_only)
++ err = f2fs_gc_range(sbi, start, end, dry_run, 0);
++ if (err || dry_run)
+ goto out;
+
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
+@@ -2056,7 +2075,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
+ int segment_count;
+ int segment_count_main;
+ long long block_count;
+- int segs = secs * sbi->segs_per_sec;
++ int segs = secs * SEGS_PER_SEC(sbi);
+
+ f2fs_down_write(&sbi->sb_lock);
+
+@@ -2069,7 +2088,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
+ raw_sb->segment_count = cpu_to_le32(segment_count + segs);
+ raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
+ raw_sb->block_count = cpu_to_le64(block_count +
+- (long long)segs * sbi->blocks_per_seg);
++ (long long)(segs << sbi->log_blocks_per_seg));
+ if (f2fs_is_multi_device(sbi)) {
+ int last_dev = sbi->s_ndevs - 1;
+ int dev_segs =
+@@ -2084,8 +2103,8 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
+
+ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
+ {
+- int segs = secs * sbi->segs_per_sec;
+- long long blks = (long long)segs * sbi->blocks_per_seg;
++ int segs = secs * SEGS_PER_SEC(sbi);
++ long long blks = (long long)segs << sbi->log_blocks_per_seg;
+ long long user_block_count =
+ le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
+
+@@ -2127,7 +2146,7 @@ int f2fs_resize_fs(struct file *filp, __u64 block_count)
+ int last_dev = sbi->s_ndevs - 1;
+ __u64 last_segs = FDEV(last_dev).total_segments;
+
+- if (block_count + last_segs * sbi->blocks_per_seg <=
++ if (block_count + (last_segs << sbi->log_blocks_per_seg) <=
+ old_block_count)
+ return -EINVAL;
+ }
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 2fe25619ccb5f5..a3f8b4ed495efb 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -16,7 +16,7 @@
+
+ static bool support_inline_data(struct inode *inode)
+ {
+- if (f2fs_is_atomic_file(inode))
++ if (f2fs_used_in_atomic_write(inode))
+ return false;
+ if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
+ return false;
+@@ -33,11 +33,29 @@ bool f2fs_may_inline_data(struct inode *inode)
+ return !f2fs_post_read_required(inode);
+ }
+
+-bool f2fs_sanity_check_inline_data(struct inode *inode)
++static bool inode_has_blocks(struct inode *inode, struct page *ipage)
++{
++ struct f2fs_inode *ri = F2FS_INODE(ipage);
++ int i;
++
++ if (F2FS_HAS_BLOCKS(inode))
++ return true;
++
++ for (i = 0; i < DEF_NIDS_PER_INODE; i++) {
++ if (ri->i_nid[i])
++ return true;
++ }
++ return false;
++}
++
++bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage)
+ {
+ if (!f2fs_has_inline_data(inode))
+ return false;
+
++ if (inode_has_blocks(inode, ipage))
++ return false;
++
+ if (!support_inline_data(inode))
+ return true;
+
+@@ -203,8 +221,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
+ struct page *ipage, *page;
+ int err = 0;
+
+- if (!f2fs_has_inline_data(inode) ||
+- f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
++ if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
++ return -EROFS;
++
++ if (!f2fs_has_inline_data(inode))
+ return 0;
+
+ err = f2fs_dquot_initialize(inode);
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index cde243840abd19..a3e0c927354331 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -29,9 +29,17 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
+ if (is_inode_flag_set(inode, FI_NEW_INODE))
+ return;
+
++ if (f2fs_readonly(F2FS_I_SB(inode)->sb))
++ return;
++
+ if (f2fs_inode_dirtied(inode, sync))
+ return;
+
++ if (f2fs_is_atomic_file(inode)) {
++ set_inode_flag(inode, FI_ATOMIC_DIRTIED);
++ return;
++ }
++
+ mark_inode_dirty_sync(inode);
+ }
+
+@@ -61,49 +69,31 @@ void f2fs_set_inode_flags(struct inode *inode)
+ S_ENCRYPTED|S_VERITY|S_CASEFOLD);
+ }
+
+-static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
++static void __get_inode_rdev(struct inode *inode, struct page *node_page)
+ {
+- int extra_size = get_extra_isize(inode);
++ __le32 *addr = get_dnode_addr(inode, node_page);
+
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+- if (ri->i_addr[extra_size])
+- inode->i_rdev = old_decode_dev(
+- le32_to_cpu(ri->i_addr[extra_size]));
++ if (addr[0])
++ inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
+ else
+- inode->i_rdev = new_decode_dev(
+- le32_to_cpu(ri->i_addr[extra_size + 1]));
++ inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
+ }
+ }
+
+-static int __written_first_block(struct f2fs_sb_info *sbi,
+- struct f2fs_inode *ri)
++static void __set_inode_rdev(struct inode *inode, struct page *node_page)
+ {
+- block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
+-
+- if (!__is_valid_data_blkaddr(addr))
+- return 1;
+- if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) {
+- f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+- return -EFSCORRUPTED;
+- }
+- return 0;
+-}
+-
+-static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+-{
+- int extra_size = get_extra_isize(inode);
++ __le32 *addr = get_dnode_addr(inode, node_page);
+
+ if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+ if (old_valid_dev(inode->i_rdev)) {
+- ri->i_addr[extra_size] =
+- cpu_to_le32(old_encode_dev(inode->i_rdev));
+- ri->i_addr[extra_size + 1] = 0;
++ addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
++ addr[1] = 0;
+ } else {
+- ri->i_addr[extra_size] = 0;
+- ri->i_addr[extra_size + 1] =
+- cpu_to_le32(new_encode_dev(inode->i_rdev));
+- ri->i_addr[extra_size + 2] = 0;
++ addr[0] = 0;
++ addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
++ addr[2] = 0;
+ }
+ }
+ }
+@@ -361,7 +351,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ }
+ }
+
+- if (f2fs_sanity_check_inline_data(inode)) {
++ if (f2fs_sanity_check_inline_data(inode, node_page)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
+ __func__, inode->i_ino, inode->i_mode);
+ return false;
+@@ -379,6 +369,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ return false;
+ }
+
++ if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) {
++ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.",
++ __func__, inode->i_ino, fi->i_xattr_nid);
++ return false;
++ }
++
+ return true;
+ }
+
+@@ -398,7 +394,6 @@ static int do_read_inode(struct inode *inode)
+ struct page *node_page;
+ struct f2fs_inode *ri;
+ projid_t i_projid;
+- int err;
+
+ /* Check if ino is within scope */
+ if (f2fs_check_nid_range(sbi, inode->i_ino))
+@@ -478,17 +473,7 @@ static int do_read_inode(struct inode *inode)
+ }
+
+ /* get rdev by using inline_info */
+- __get_inode_rdev(inode, ri);
+-
+- if (S_ISREG(inode->i_mode)) {
+- err = __written_first_block(sbi, ri);
+- if (err < 0) {
+- f2fs_put_page(node_page, 1);
+- return err;
+- }
+- if (!err)
+- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+- }
++ __get_inode_rdev(inode, node_page);
+
+ if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
+ fi->last_disk_size = inode->i_size;
+@@ -531,16 +516,16 @@ static int do_read_inode(struct inode *inode)
+
+ init_idisk_time(inode);
+
+- /* Need all the flag bits */
+- f2fs_init_read_extent_tree(inode, node_page);
+- f2fs_init_age_extent_tree(inode);
+-
+- if (!sanity_check_extent_cache(inode)) {
++ if (!sanity_check_extent_cache(inode, node_page)) {
+ f2fs_put_page(node_page, 1);
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+ return -EFSCORRUPTED;
+ }
+
++ /* Need all the flag bits */
++ f2fs_init_read_extent_tree(inode, node_page);
++ f2fs_init_age_extent_tree(inode);
++
+ f2fs_put_page(node_page, 1);
+
+ stat_inc_inline_xattr(inode);
+@@ -633,14 +618,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
+ }
+ f2fs_set_inode_flags(inode);
+
+- if (file_should_truncate(inode) &&
+- !is_sbi_flag_set(sbi, SBI_POR_DOING)) {
+- ret = f2fs_truncate(inode);
+- if (ret)
+- goto bad_inode;
+- file_dont_truncate(inode);
+- }
+-
+ unlock_new_inode(inode);
+ trace_f2fs_iget(inode);
+ return inode;
+@@ -761,7 +738,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
+ }
+ }
+
+- __set_inode_rdev(inode, ri);
++ __set_inode_rdev(inode, node_page);
+
+ /* deleted inode */
+ if (inode->i_nlink == 0)
+@@ -836,8 +813,9 @@ void f2fs_evict_inode(struct inode *inode)
+
+ f2fs_abort_atomic_write(inode, true);
+
+- if (fi->cow_inode) {
++ if (fi->cow_inode && f2fs_is_cow_file(fi->cow_inode)) {
+ clear_inode_flag(fi->cow_inode, FI_COW_FILE);
++ F2FS_I(fi->cow_inode)->atomic_inode = NULL;
+ iput(fi->cow_inode);
+ fi->cow_inode = NULL;
+ }
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 193b22a2d6bfb2..2e08e1fdf485c7 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -455,63 +455,6 @@ struct dentry *f2fs_get_parent(struct dentry *child)
+ return d_obtain_alias(f2fs_iget(child->d_sb, ino));
+ }
+
+-static int __recover_dot_dentries(struct inode *dir, nid_t pino)
+-{
+- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+- struct qstr dot = QSTR_INIT(".", 1);
+- struct qstr dotdot = QSTR_INIT("..", 2);
+- struct f2fs_dir_entry *de;
+- struct page *page;
+- int err = 0;
+-
+- if (f2fs_readonly(sbi->sb)) {
+- f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint",
+- dir->i_ino, pino);
+- return 0;
+- }
+-
+- if (!S_ISDIR(dir->i_mode)) {
+- f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)",
+- dir->i_ino, dir->i_mode, pino);
+- set_sbi_flag(sbi, SBI_NEED_FSCK);
+- return -ENOTDIR;
+- }
+-
+- err = f2fs_dquot_initialize(dir);
+- if (err)
+- return err;
+-
+- f2fs_balance_fs(sbi, true);
+-
+- f2fs_lock_op(sbi);
+-
+- de = f2fs_find_entry(dir, &dot, &page);
+- if (de) {
+- f2fs_put_page(page, 0);
+- } else if (IS_ERR(page)) {
+- err = PTR_ERR(page);
+- goto out;
+- } else {
+- err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
+- if (err)
+- goto out;
+- }
+-
+- de = f2fs_find_entry(dir, &dotdot, &page);
+- if (de)
+- f2fs_put_page(page, 0);
+- else if (IS_ERR(page))
+- err = PTR_ERR(page);
+- else
+- err = f2fs_do_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
+-out:
+- if (!err)
+- clear_inode_flag(dir, FI_INLINE_DOTS);
+-
+- f2fs_unlock_op(sbi);
+- return err;
+-}
+-
+ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+ {
+@@ -521,7 +464,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
+ struct dentry *new;
+ nid_t ino = -1;
+ int err = 0;
+- unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
+ struct f2fs_filename fname;
+
+ trace_f2fs_lookup_start(dir, dentry, flags);
+@@ -558,17 +500,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
+ goto out;
+ }
+
+- if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
+- err = __recover_dot_dentries(dir, root_ino);
+- if (err)
+- goto out_iput;
+- }
+-
+- if (f2fs_has_inline_dots(inode)) {
+- err = __recover_dot_dentries(inode, dir->i_ino);
+- if (err)
+- goto out_iput;
+- }
+ if (IS_ENCRYPTED(dir) &&
+ (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
+ !fscrypt_has_permitted_context(dir, inode)) {
+@@ -853,7 +784,7 @@ static int f2fs_mknod(struct mnt_idmap *idmap, struct inode *dir,
+
+ static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct file *file, umode_t mode, bool is_whiteout,
+- struct inode **new_inode)
++ struct inode **new_inode, struct f2fs_filename *fname)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct inode *inode;
+@@ -881,7 +812,7 @@ static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ if (err)
+ goto out;
+
+- err = f2fs_do_tmpfile(inode, dir);
++ err = f2fs_do_tmpfile(inode, dir, fname);
+ if (err)
+ goto release_out;
+
+@@ -932,22 +863,24 @@ static int f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ if (!f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
+
+- err = __f2fs_tmpfile(idmap, dir, file, mode, false, NULL);
++ err = __f2fs_tmpfile(idmap, dir, file, mode, false, NULL, NULL);
+
+ return finish_open_simple(file, err);
+ }
+
+ static int f2fs_create_whiteout(struct mnt_idmap *idmap,
+- struct inode *dir, struct inode **whiteout)
++ struct inode *dir, struct inode **whiteout,
++ struct f2fs_filename *fname)
+ {
+- return __f2fs_tmpfile(idmap, dir, NULL,
+- S_IFCHR | WHITEOUT_MODE, true, whiteout);
++ return __f2fs_tmpfile(idmap, dir, NULL, S_IFCHR | WHITEOUT_MODE,
++ true, whiteout, fname);
+ }
+
+ int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ struct inode **new_inode)
+ {
+- return __f2fs_tmpfile(idmap, dir, NULL, S_IFREG, false, new_inode);
++ return __f2fs_tmpfile(idmap, dir, NULL, S_IFREG,
++ false, new_inode, NULL);
+ }
+
+ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+@@ -990,7 +923,14 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ }
+
+ if (flags & RENAME_WHITEOUT) {
+- err = f2fs_create_whiteout(idmap, old_dir, &whiteout);
++ struct f2fs_filename fname;
++
++ err = f2fs_setup_filename(old_dir, &old_dentry->d_name,
++ 0, &fname);
++ if (err)
++ return err;
++
++ err = f2fs_create_whiteout(idmap, old_dir, &whiteout, &fname);
+ if (err)
+ return err;
+ }
+@@ -1106,7 +1046,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ }
+
+ if (old_dir_entry) {
+- if (old_dir != new_dir && !whiteout)
++ if (old_dir != new_dir)
+ f2fs_set_link(old_inode, old_dir_entry,
+ old_dir_page, new_dir);
+ else
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index ee2e1dd64f256f..c765bda3beaacb 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -852,21 +852,29 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
+
+ if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
+ f2fs_sb_has_readonly(sbi)) {
+- unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
++ unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
++ unsigned int ofs_in_node = dn->ofs_in_node;
++ pgoff_t fofs = index;
++ unsigned int c_len;
+ block_t blkaddr;
+
++ /* should align fofs and ofs_in_node to cluster_size */
++ if (fofs % cluster_size) {
++ fofs = round_down(fofs, cluster_size);
++ ofs_in_node = round_down(ofs_in_node, cluster_size);
++ }
++
++ c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
+ if (!c_len)
+ goto out;
+
+- blkaddr = f2fs_data_blkaddr(dn);
++ blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
+ if (blkaddr == COMPRESS_ADDR)
+ blkaddr = data_blkaddr(dn->inode, dn->node_page,
+- dn->ofs_in_node + 1);
++ ofs_in_node + 1);
+
+ f2fs_update_read_extent_tree_range_compressed(dn->inode,
+- index, blkaddr,
+- F2FS_I(dn->inode)->i_cluster_size,
+- c_len);
++ fofs, blkaddr, cluster_size, c_len);
+ }
+ out:
+ return 0;
+@@ -1311,6 +1319,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+ }
+ if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
+ err = -EFSCORRUPTED;
++ dec_valid_node_count(sbi, dn->inode, !ofs);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+ goto fail;
+@@ -1337,7 +1346,6 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+ if (ofs == 0)
+ inc_valid_inode_count(sbi);
+ return page;
+-
+ fail:
+ clear_node_page_dirty(page);
+ f2fs_put_page(page, 1);
+@@ -1467,7 +1475,8 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
+ ofs_of_node(page), cpver_of_node(page),
+ next_blkaddr_of_node(page));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- err = -EINVAL;
++ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
++ err = -EFSCORRUPTED;
+ out_err:
+ ClearPageUptodate(page);
+ out_put_err:
+@@ -2389,7 +2398,7 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
+ blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
+
+ if (blk_addr == NEW_ADDR)
+- return -EINVAL;
++ return -EFSCORRUPTED;
+
+ if (blk_addr == NULL_ADDR) {
+ add_free_nid(sbi, start_nid, true, true);
+@@ -2504,7 +2513,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+
+ if (ret) {
+ f2fs_up_read(&nm_i->nat_tree_lock);
+- f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
++
++ if (ret == -EFSCORRUPTED) {
++ f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
++ f2fs_handle_error(sbi,
++ ERROR_INCONSISTENT_NAT);
++ }
++
+ return ret;
+ }
+ }
+@@ -2743,9 +2759,11 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
+ f2fs_update_inode_page(inode);
+
+ /* 3: update and set xattr node page dirty */
+- memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
+-
+- set_page_dirty(xpage);
++ if (page) {
++ memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
++ VALID_XATTR_BLOCK_SIZE);
++ set_page_dirty(xpage);
++ }
+ f2fs_put_page(xpage, 1);
+
+ return 0;
+@@ -2831,7 +2849,7 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+ int i, idx, last_offset, nrpages;
+
+ /* scan the node segment */
+- last_offset = sbi->blocks_per_seg;
++ last_offset = BLKS_PER_SEG(sbi);
+ addr = START_BLOCK(sbi, segno);
+ sum_entry = &sum->entries[0];
+
+@@ -3148,7 +3166,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
+ if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
+ return 0;
+
+- nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
++ nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
+ nm_i->nat_bits_blocks;
+ for (i = 0; i < nm_i->nat_bits_blocks; i++) {
+ struct page *page;
+diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
+index 5bd16a95eef8f1..6aea13024ac165 100644
+--- a/fs/f2fs/node.h
++++ b/fs/f2fs/node.h
+@@ -208,10 +208,10 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
+
+ block_addr = (pgoff_t)(nm_i->nat_blkaddr +
+ (block_off << 1) -
+- (block_off & (sbi->blocks_per_seg - 1)));
++ (block_off & (BLKS_PER_SEG(sbi) - 1)));
+
+ if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
+- block_addr += sbi->blocks_per_seg;
++ block_addr += BLKS_PER_SEG(sbi);
+
+ return block_addr;
+ }
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index 7be60df277a529..f8852aa5264085 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -354,7 +354,7 @@ static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi,
+ if (blkaddr + 1 == next_blkaddr)
+ ra_blocks = min_t(unsigned int, RECOVERY_MAX_RA_BLOCKS,
+ ra_blocks * 2);
+- else if (next_blkaddr % sbi->blocks_per_seg)
++ else if (next_blkaddr % BLKS_PER_SEG(sbi))
+ ra_blocks = max_t(unsigned int, RECOVERY_MIN_RA_BLOCKS,
+ ra_blocks / 2);
+ return ra_blocks;
+@@ -611,6 +611,19 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+ return 0;
+ }
+
++static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn)
++{
++ int i, err = 0;
++
++ for (i = DEFAULT_FAILURE_RETRY_COUNT; i > 0; i--) {
++ err = f2fs_reserve_new_block(dn);
++ if (!err)
++ break;
++ }
++
++ return err;
++}
++
+ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct page *page)
+ {
+@@ -712,20 +725,17 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+ */
+ if (dest == NEW_ADDR) {
+ f2fs_truncate_data_blocks_range(&dn, 1);
+- f2fs_reserve_new_block(&dn);
++
++ err = f2fs_reserve_new_block_retry(&dn);
++ if (err)
++ goto err;
+ continue;
+ }
+
+ /* dest is valid block, try to recover from src to dest */
+ if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
+-
+ if (src == NULL_ADDR) {
+- err = f2fs_reserve_new_block(&dn);
+- while (err &&
+- IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
+- err = f2fs_reserve_new_block(&dn);
+- /* We should not get -ENOSPC */
+- f2fs_bug_on(sbi, err);
++ err = f2fs_reserve_new_block_retry(&dn);
+ if (err)
+ goto err;
+ }
+@@ -906,6 +916,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+ if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
+ f2fs_sb_has_blkzoned(sbi)) {
+ err = f2fs_fix_curseg_write_pointer(sbi);
++ if (!err)
++ err = f2fs_check_write_pointer(sbi);
+ ret = err;
+ }
+
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index d05b41608fc005..c0ba379a6d8f3e 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -192,16 +192,22 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ if (!f2fs_is_atomic_file(inode))
+ return;
+
++ if (clean)
++ truncate_inode_pages_final(inode->i_mapping);
++
+ release_atomic_write_cnt(inode);
+ clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ clear_inode_flag(inode, FI_ATOMIC_REPLACE);
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
++ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
++ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
++ f2fs_mark_inode_dirty_sync(inode, true);
++ }
+ stat_dec_atomic_inode(inode);
+
+ F2FS_I(inode)->atomic_write_task = NULL;
+
+ if (clean) {
+- truncate_inode_pages_final(inode->i_mapping);
+ f2fs_i_size_write(inode, fi->original_i_size);
+ fi->original_i_size = 0;
+ }
+@@ -248,7 +254,7 @@ static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
+ } else {
+ blkcnt_t count = 1;
+
+- err = inc_valid_block_count(sbi, inode, &count);
++ err = inc_valid_block_count(sbi, inode, &count, true);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+@@ -366,6 +372,10 @@ static int __f2fs_commit_atomic_write(struct inode *inode)
+ } else {
+ sbi->committed_atomic_block += fi->atomic_write_cnt;
+ set_inode_flag(inode, FI_ATOMIC_COMMITTED);
++ if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) {
++ clear_inode_flag(inode, FI_ATOMIC_DIRTIED);
++ f2fs_mark_inode_dirty_sync(inode, true);
++ }
+ }
+
+ __complete_revoke_list(inode, &revoke_list, ret ? true : false);
+@@ -448,8 +458,8 @@ static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
+ unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
+ unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
+ unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+- unsigned int threshold = sbi->blocks_per_seg * factor *
+- DEFAULT_DIRTY_THRESHOLD;
++ unsigned int threshold = (factor * DEFAULT_DIRTY_THRESHOLD) <<
++ sbi->log_blocks_per_seg;
+ unsigned int global_threshold = threshold * 3 / 2;
+
+ if (dents >= threshold || qdata >= threshold ||
+@@ -1101,9 +1111,8 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
+ dc->error = 0;
+
+ if (dc->error)
+- printk_ratelimited(
+- "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
+- KERN_INFO, sbi->sb->s_id,
++ f2fs_info_ratelimited(sbi,
++ "Issue discard(%u, %u, %u) failed, ret: %d",
+ dc->di.lstart, dc->di.start, dc->di.len, dc->error);
+ __detach_discard_cmd(dcc, dc);
+ }
+@@ -1132,8 +1141,7 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
+ struct seg_entry *sentry;
+ unsigned int segno;
+ block_t blk = start;
+- unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
+- unsigned long *map;
++ unsigned long offset, size, *map;
+
+ while (blk < end) {
+ segno = GET_SEGNO(sbi, blk);
+@@ -1143,7 +1151,7 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
+ if (end < START_BLOCK(sbi, segno + 1))
+ size = GET_BLKOFF_FROM_SEG0(sbi, end);
+ else
+- size = max_blocks;
++ size = BLKS_PER_SEG(sbi);
+ map = (unsigned long *)(sentry->cur_valid_map);
+ offset = __find_rev_next_bit(map, size, offset);
+ f2fs_bug_on(sbi, offset != size);
+@@ -2041,7 +2049,6 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
+ bool check_only)
+ {
+ int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
+- int max_blocks = sbi->blocks_per_seg;
+ struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
+ unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
+ unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
+@@ -2053,8 +2060,9 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
+ struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
+ int i;
+
+- if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) ||
+- !f2fs_block_unit_discard(sbi))
++ if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
++ !f2fs_hw_support_discard(sbi) ||
++ !f2fs_block_unit_discard(sbi))
+ return false;
+
+ if (!force) {
+@@ -2071,13 +2079,14 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
+
+ while (force || SM_I(sbi)->dcc_info->nr_discards <=
+ SM_I(sbi)->dcc_info->max_discards) {
+- start = __find_rev_next_bit(dmap, max_blocks, end + 1);
+- if (start >= max_blocks)
++ start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
++ if (start >= BLKS_PER_SEG(sbi))
+ break;
+
+- end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
+- if (force && start && end != max_blocks
+- && (end - start) < cpc->trim_minlen)
++ end = __find_rev_next_zero_bit(dmap,
++ BLKS_PER_SEG(sbi), start + 1);
++ if (force && start && end != BLKS_PER_SEG(sbi) &&
++ (end - start) < cpc->trim_minlen)
+ continue;
+
+ if (check_only)
+@@ -2159,8 +2168,8 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+ start + 1);
+
+ if (section_alignment) {
+- start = rounddown(start, sbi->segs_per_sec);
+- end = roundup(end, sbi->segs_per_sec);
++ start = rounddown(start, SEGS_PER_SEC(sbi));
++ end = roundup(end, SEGS_PER_SEC(sbi));
+ }
+
+ for (i = start; i < end; i++) {
+@@ -2188,9 +2197,9 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+ if (!IS_CURSEC(sbi, secno) &&
+ !get_valid_blocks(sbi, start, true))
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
+- sbi->segs_per_sec << sbi->log_blocks_per_seg);
++ BLKS_PER_SEC(sbi));
+
+- start = start_segno + sbi->segs_per_sec;
++ start = start_segno + SEGS_PER_SEC(sbi);
+ if (start < end)
+ goto next;
+ else
+@@ -2209,7 +2218,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+ find_next:
+ if (is_valid) {
+ next_pos = find_next_zero_bit_le(entry->discard_map,
+- sbi->blocks_per_seg, cur_pos);
++ BLKS_PER_SEG(sbi), cur_pos);
+ len = next_pos - cur_pos;
+
+ if (f2fs_sb_has_blkzoned(sbi) ||
+@@ -2221,13 +2230,13 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+ total_len += len;
+ } else {
+ next_pos = find_next_bit_le(entry->discard_map,
+- sbi->blocks_per_seg, cur_pos);
++ BLKS_PER_SEG(sbi), cur_pos);
+ }
+ skip:
+ cur_pos = next_pos;
+ is_valid = !is_valid;
+
+- if (cur_pos < sbi->blocks_per_seg)
++ if (cur_pos < BLKS_PER_SEG(sbi))
+ goto find_next;
+
+ release_discard_addr(entry);
+@@ -2275,7 +2284,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
+ dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
+ dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
+ if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
+- dcc->discard_granularity = sbi->blocks_per_seg;
++ dcc->discard_granularity = BLKS_PER_SEG(sbi);
+ else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
+ dcc->discard_granularity = BLKS_PER_SEC(sbi);
+
+@@ -2397,6 +2406,8 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+ #endif
+
+ segno = GET_SEGNO(sbi, blkaddr);
++ if (segno == NULL_SEGNO)
++ return;
+
+ se = get_seg_entry(sbi, segno);
+ new_vblocks = se->valid_blocks + del;
+@@ -2495,8 +2506,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
+ if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
+ return;
+
+- invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+- f2fs_invalidate_compress_page(sbi, addr);
++ f2fs_invalidate_internal_cache(sbi, addr);
+
+ /* add it into sit main buffer */
+ down_write(&sit_i->sentry_lock);
+@@ -2539,7 +2549,7 @@ static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int typ
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+
+ if (sbi->ckpt->alloc_type[type] == SSR)
+- return sbi->blocks_per_seg;
++ return BLKS_PER_SEG(sbi);
+ return curseg->next_blkoff;
+ }
+
+@@ -2627,7 +2637,7 @@ static int is_next_segment_free(struct f2fs_sb_info *sbi,
+ unsigned int segno = curseg->segno + 1;
+ struct free_segmap_info *free_i = FREE_I(sbi);
+
+- if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
++ if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))
+ return !test_bit(segno, free_i->free_segmap);
+ return 0;
+ }
+@@ -2637,53 +2647,45 @@ static int is_next_segment_free(struct f2fs_sb_info *sbi,
+ * This function should be returned with success, otherwise BUG
+ */
+ static void get_new_segment(struct f2fs_sb_info *sbi,
+- unsigned int *newseg, bool new_sec, int dir)
++ unsigned int *newseg, bool new_sec, bool pinning)
+ {
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int segno, secno, zoneno;
+ unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
+ unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
+ unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
+- unsigned int left_start = hint;
+ bool init = true;
+- int go_left = 0;
+ int i;
++ int ret = 0;
+
+ spin_lock(&free_i->segmap_lock);
+
+- if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
++ if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {
+ segno = find_next_zero_bit(free_i->free_segmap,
+ GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
+ if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
+ goto got_it;
+ }
++
++ /*
++ * If we format f2fs on zoned storage, let's try to get pinned sections
++ * from beginning of the storage, which should be a conventional one.
++ */
++ if (f2fs_sb_has_blkzoned(sbi)) {
++ segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
++ hint = GET_SEC_FROM_SEG(sbi, segno);
++ }
++
+ find_other_zone:
+ secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+ if (secno >= MAIN_SECS(sbi)) {
+- if (dir == ALLOC_RIGHT) {
+- secno = find_first_zero_bit(free_i->free_secmap,
++ secno = find_first_zero_bit(free_i->free_secmap,
+ MAIN_SECS(sbi));
+- f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
+- } else {
+- go_left = 1;
+- left_start = hint - 1;
+- }
+- }
+- if (go_left == 0)
+- goto skip_left;
+-
+- while (test_bit(left_start, free_i->free_secmap)) {
+- if (left_start > 0) {
+- left_start--;
+- continue;
++ if (secno >= MAIN_SECS(sbi)) {
++ ret = -ENOSPC;
++ goto out_unlock;
+ }
+- left_start = find_first_zero_bit(free_i->free_secmap,
+- MAIN_SECS(sbi));
+- f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
+- break;
+ }
+- secno = left_start;
+-skip_left:
+ segno = GET_SEG_FROM_SEC(sbi, secno);
+ zoneno = GET_ZONE_FROM_SEC(sbi, secno);
+
+@@ -2694,21 +2696,13 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
+ goto got_it;
+ if (zoneno == old_zoneno)
+ goto got_it;
+- if (dir == ALLOC_LEFT) {
+- if (!go_left && zoneno + 1 >= total_zones)
+- goto got_it;
+- if (go_left && zoneno == 0)
+- goto got_it;
+- }
+ for (i = 0; i < NR_CURSEG_TYPE; i++)
+ if (CURSEG_I(sbi, i)->zone == zoneno)
+ break;
+
+ if (i < NR_CURSEG_TYPE) {
+ /* zone is in user, try another */
+- if (go_left)
+- hint = zoneno * sbi->secs_per_zone - 1;
+- else if (zoneno + 1 >= total_zones)
++ if (zoneno + 1 >= total_zones)
+ hint = 0;
+ else
+ hint = (zoneno + 1) * sbi->secs_per_zone;
+@@ -2720,7 +2714,13 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
+ f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
+ __set_inuse(sbi, segno);
+ *newseg = segno;
++out_unlock:
+ spin_unlock(&free_i->segmap_lock);
++
++ if (ret) {
++ f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
++ f2fs_bug_on(sbi, 1);
++ }
+ }
+
+ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
+@@ -2754,9 +2754,8 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
+
+ sanity_check_seg_type(sbi, seg_type);
+ if (f2fs_need_rand_seg(sbi))
+- return get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
++ return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
+
+- /* if segs_per_sec is large than 1, we need to keep original policy. */
+ if (__is_large_section(sbi))
+ return curseg->segno;
+
+@@ -2767,8 +2766,7 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return 0;
+
+- if (test_opt(sbi, NOHEAP) &&
+- (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
++ if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))
+ return 0;
+
+ if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
+@@ -2785,30 +2783,30 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
+ * Allocate a current working segment.
+ * This function always allocates a free segment in LFS manner.
+ */
+-static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
++static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
+ {
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+- unsigned short seg_type = curseg->seg_type;
+ unsigned int segno = curseg->segno;
+- int dir = ALLOC_LEFT;
++ bool pinning = type == CURSEG_COLD_DATA_PINNED;
+
+ if (curseg->inited)
+- write_sum_page(sbi, curseg->sum_blk,
+- GET_SUM_BLOCK(sbi, segno));
+- if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
+- dir = ALLOC_RIGHT;
+-
+- if (test_opt(sbi, NOHEAP))
+- dir = ALLOC_RIGHT;
++ write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno));
+
+ segno = __get_next_segno(sbi, type);
+- get_new_segment(sbi, &segno, new_sec, dir);
++ get_new_segment(sbi, &segno, new_sec, pinning);
++ if (new_sec && pinning &&
++ !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) {
++ __set_free(sbi, segno);
++ return -EAGAIN;
++ }
++
+ curseg->next_segno = segno;
+ reset_curseg(sbi, type, 1);
+ curseg->alloc_type = LFS;
+ if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
+ curseg->fragment_remained_chunk =
+ get_random_u32_inclusive(1, sbi->max_fragment_chunk);
++ return 0;
+ }
+
+ static int __next_free_blkoff(struct f2fs_sb_info *sbi,
+@@ -2824,7 +2822,7 @@ static int __next_free_blkoff(struct f2fs_sb_info *sbi,
+ for (i = 0; i < entries; i++)
+ target_map[i] = ckpt_map[i] | cur_map[i];
+
+- return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
++ return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
+ }
+
+ static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
+@@ -2835,7 +2833,7 @@ static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
+
+ bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
+ {
+- return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
++ return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
+ }
+
+ /*
+@@ -3081,7 +3079,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+ f2fs_up_read(&SM_I(sbi)->curseg_lock);
+ }
+
+-static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
++static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
+ bool new_sec, bool force)
+ {
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+@@ -3091,21 +3089,49 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
+ !curseg->next_blkoff &&
+ !get_valid_blocks(sbi, curseg->segno, new_sec) &&
+ !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
+- return;
++ return 0;
+
+ old_segno = curseg->segno;
+- new_curseg(sbi, type, true);
++ if (new_curseg(sbi, type, true))
++ return -EAGAIN;
+ stat_inc_seg_type(sbi, curseg);
+ locate_dirty_segment(sbi, old_segno);
++ return 0;
+ }
+
+-void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
++int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
+ {
++ int ret;
++
+ f2fs_down_read(&SM_I(sbi)->curseg_lock);
+ down_write(&SIT_I(sbi)->sentry_lock);
+- __allocate_new_segment(sbi, type, true, force);
++ ret = __allocate_new_segment(sbi, type, true, force);
+ up_write(&SIT_I(sbi)->sentry_lock);
+ f2fs_up_read(&SM_I(sbi)->curseg_lock);
++
++ return ret;
++}
++
++int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
++{
++ int err;
++ bool gc_required = true;
++
++retry:
++ f2fs_lock_op(sbi);
++ err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
++ f2fs_unlock_op(sbi);
++
++ if (f2fs_sb_has_blkzoned(sbi) && err && gc_required) {
++ f2fs_down_write(&sbi->gc_lock);
++ f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1);
++ f2fs_up_write(&sbi->gc_lock);
++
++ gc_required = false;
++ goto retry;
++ }
++
++ return err;
+ }
+
+ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
+@@ -3235,8 +3261,8 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
+ end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
+ GET_SEGNO(sbi, end);
+ if (need_align) {
+- start_segno = rounddown(start_segno, sbi->segs_per_sec);
+- end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
++ start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi));
++ end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;
+ }
+
+ cpc.reason = CP_DISCARD;
+@@ -3344,7 +3370,9 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
+ if (page_private_gcing(fio->page)) {
+ if (fio->sbi->am.atgc_enabled &&
+ (fio->io_type == FS_DATA_IO) &&
+- (fio->sbi->gc_mode != GC_URGENT_HIGH))
++ (fio->sbi->gc_mode != GC_URGENT_HIGH) &&
++ __is_valid_data_blkaddr(fio->old_blkaddr) &&
++ !is_inode_flag_set(inode, FI_OPU_WRITE))
+ return CURSEG_ALL_DATA_ATGC;
+ else
+ return CURSEG_COLD_DATA;
+@@ -3434,7 +3462,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ }
+ *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
+
+- f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
++ f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
+
+ f2fs_wait_discard_bio(sbi, *new_blkaddr);
+
+@@ -3463,14 +3491,20 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ * since SSR needs latest valid block information.
+ */
+ update_sit_entry(sbi, *new_blkaddr, 1);
+- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+- update_sit_entry(sbi, old_blkaddr, -1);
++ update_sit_entry(sbi, old_blkaddr, -1);
+
+ /*
+ * If the current segment is full, flush it out and replace it with a
+ * new segment.
+ */
+ if (segment_full) {
++ if (type == CURSEG_COLD_DATA_PINNED &&
++ !((curseg->segno + 1) % sbi->segs_per_sec)) {
++ write_sum_page(sbi, curseg->sum_blk,
++ GET_SUM_BLOCK(sbi, curseg->segno));
++ goto skip_new_segment;
++ }
++
+ if (from_gc) {
+ get_atssr_segment(sbi, type, se->type,
+ AT_SSR, se->mtime);
+@@ -3482,6 +3516,8 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ stat_inc_seg_type(sbi, curseg);
+ }
+ }
++
++skip_new_segment:
+ /*
+ * segment dirty status should be updated after segment allocation,
+ * so we just need to update status only one time after previous
+@@ -3490,12 +3526,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
+
+- if (IS_DATASEG(type))
++ if (IS_DATASEG(curseg->seg_type))
+ atomic64_inc(&sbi->allocated_data_blocks);
+
+ up_write(&sit_i->sentry_lock);
+
+- if (page && IS_NODESEG(type)) {
++ if (page && IS_NODESEG(curseg->seg_type)) {
+ fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
+
+ f2fs_inode_chksum_set(sbi, page);
+@@ -3504,9 +3540,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ if (fio) {
+ struct f2fs_bio_info *io;
+
+- if (F2FS_IO_ALIGNED(sbi))
+- fio->retry = 0;
+-
+ INIT_LIST_HEAD(&fio->list);
+ fio->in_list = 1;
+ io = sbi->write_io[fio->type] + fio->temp;
+@@ -3554,21 +3587,14 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
+
+ if (keep_order)
+ f2fs_down_read(&fio->sbi->io_order_lock);
+-reallocate:
++
+ f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
+ &fio->new_blkaddr, sum, type, fio);
+- if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
+- invalidate_mapping_pages(META_MAPPING(fio->sbi),
+- fio->old_blkaddr, fio->old_blkaddr);
+- f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
+- }
++ if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
++ f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
+
+ /* writeout dirty page into bdev */
+ f2fs_submit_page_write(fio);
+- if (fio->retry) {
+- fio->old_blkaddr = fio->new_blkaddr;
+- goto reallocate;
+- }
+
+ f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
+
+@@ -3654,9 +3680,8 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
+ goto drop_bio;
+ }
+
+- if (fio->post_read)
+- invalidate_mapping_pages(META_MAPPING(sbi),
+- fio->new_blkaddr, fio->new_blkaddr);
++ if (fio->meta_gc)
++ f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
+
+ stat_inc_inplace_blocks(fio->sbi);
+
+@@ -3757,9 +3782,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ update_sit_entry(sbi, new_blkaddr, 1);
+ }
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
+- invalidate_mapping_pages(META_MAPPING(sbi),
+- old_blkaddr, old_blkaddr);
+- f2fs_invalidate_compress_page(sbi, old_blkaddr);
++ f2fs_invalidate_internal_cache(sbi, old_blkaddr);
+ if (!from_gc)
+ update_segment_mtime(sbi, old_blkaddr, 0);
+ update_sit_entry(sbi, old_blkaddr, -1);
+@@ -3823,7 +3846,7 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct page *cpage;
+
+- if (!f2fs_post_read_required(inode))
++ if (!f2fs_meta_inode_gc_required(inode))
+ return;
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+@@ -3842,13 +3865,13 @@ void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ block_t i;
+
+- if (!f2fs_post_read_required(inode))
++ if (!f2fs_meta_inode_gc_required(inode))
+ return;
+
+ for (i = 0; i < len; i++)
+ f2fs_wait_on_block_writeback(inode, blkaddr + i);
+
+- invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1);
++ f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
+ }
+
+ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
+@@ -3890,7 +3913,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
+ seg_i->next_blkoff = blk_off;
+
+ if (seg_i->alloc_type == SSR)
+- blk_off = sbi->blocks_per_seg;
++ blk_off = BLKS_PER_SEG(sbi);
+
+ for (j = 0; j < blk_off; j++) {
+ struct f2fs_summary *s;
+@@ -3958,7 +3981,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
+ struct f2fs_summary *ns = &sum->entries[0];
+ int i;
+
+- for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
++ for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
+ ns->version = 0;
+ ns->ofs_in_node = 0;
+ }
+@@ -4591,21 +4614,20 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
+
+ sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
+
+- if (f2fs_block_unit_discard(sbi)) {
+- /* build discard map only one time */
+- if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+- memset(se->discard_map, 0xff,
++ if (!f2fs_block_unit_discard(sbi))
++ goto init_discard_map_done;
++
++ /* build discard map only one time */
++ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
++ memset(se->discard_map, 0xff,
+ SIT_VBLOCK_MAP_SIZE);
+- } else {
+- memcpy(se->discard_map,
+- se->cur_valid_map,
++ goto init_discard_map_done;
++ }
++ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+- sbi->discard_blks +=
+- sbi->blocks_per_seg -
++ sbi->discard_blks += BLKS_PER_SEG(sbi) -
+ se->valid_blocks;
+- }
+- }
+-
++init_discard_map_done:
+ if (__is_large_section(sbi))
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks;
+@@ -4745,7 +4767,7 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
+ return;
+
+ mutex_lock(&dirty_i->seglist_lock);
+- for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
++ for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
+ valid_blocks = get_valid_blocks(sbi, segno, true);
+ secno = GET_SEC_FROM_SEG(sbi, segno);
+
+@@ -4844,7 +4866,7 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
+ if (curseg->alloc_type == SSR)
+ continue;
+
+- for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
++ for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
+ if (!f2fs_test_bit(blkofs, se->cur_valid_map))
+ continue;
+ out:
+@@ -5143,7 +5165,7 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
+ unsigned int secno;
+
+ if (!sbi->unusable_blocks_per_sec)
+- return sbi->blocks_per_seg;
++ return BLKS_PER_SEG(sbi);
+
+ secno = GET_SEC_FROM_SEG(sbi, segno);
+ seg_start = START_BLOCK(sbi, segno);
+@@ -5158,10 +5180,10 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
+ */
+ if (seg_start >= sec_cap_blkaddr)
+ return 0;
+- if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
++ if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
+ return sec_cap_blkaddr - seg_start;
+
+- return sbi->blocks_per_seg;
++ return BLKS_PER_SEG(sbi);
+ }
+ #else
+ int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
+@@ -5187,7 +5209,7 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
+ if (f2fs_sb_has_blkzoned(sbi))
+ return f2fs_usable_zone_blks_in_seg(sbi, segno);
+
+- return sbi->blocks_per_seg;
++ return BLKS_PER_SEG(sbi);
+ }
+
+ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
+@@ -5196,7 +5218,7 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
+ if (f2fs_sb_has_blkzoned(sbi))
+ return CAP_SEGS_PER_SEC(sbi);
+
+- return sbi->segs_per_sec;
++ return SEGS_PER_SEC(sbi);
+ }
+
+ /*
+@@ -5211,14 +5233,14 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi)
+
+ sit_i->min_mtime = ULLONG_MAX;
+
+- for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
++ for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
+ unsigned int i;
+ unsigned long long mtime = 0;
+
+- for (i = 0; i < sbi->segs_per_sec; i++)
++ for (i = 0; i < SEGS_PER_SEC(sbi); i++)
+ mtime += get_seg_entry(sbi, segno + i)->mtime;
+
+- mtime = div_u64(mtime, sbi->segs_per_sec);
++ mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
+
+ if (sit_i->min_mtime > mtime)
+ sit_i->min_mtime = mtime;
+@@ -5257,7 +5279,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
+ sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
+ sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
+ sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
+- sm_info->min_seq_blocks = sbi->blocks_per_seg;
++ sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
+ sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
+ sm_info->min_ssr_sections = reserved_sections(sbi);
+
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 2ca8fb5d0dc4db..952970166d5da8 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -48,21 +48,21 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
+
+ #define IS_CURSEC(sbi, secno) \
+ (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
+- (sbi)->segs_per_sec) || \
++ SEGS_PER_SEC(sbi)) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
+- (sbi)->segs_per_sec) || \
++ SEGS_PER_SEC(sbi)) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
+- (sbi)->segs_per_sec) || \
++ SEGS_PER_SEC(sbi)) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
+- (sbi)->segs_per_sec) || \
++ SEGS_PER_SEC(sbi)) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
+- (sbi)->segs_per_sec) || \
++ SEGS_PER_SEC(sbi)) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
+- (sbi)->segs_per_sec) || \
++ SEGS_PER_SEC(sbi)) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
+- (sbi)->segs_per_sec) || \
++ SEGS_PER_SEC(sbi)) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
+- (sbi)->segs_per_sec))
++ SEGS_PER_SEC(sbi)))
+
+ #define MAIN_BLKADDR(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
+@@ -93,26 +93,24 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
+ #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
+ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
+ #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
+- (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
++ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1))
+
+ #define GET_SEGNO(sbi, blk_addr) \
+ ((!__is_valid_data_blkaddr(blk_addr)) ? \
+ NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
+ GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
+-#define BLKS_PER_SEC(sbi) \
+- ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
+ #define CAP_BLKS_PER_SEC(sbi) \
+- ((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \
++ (SEGS_PER_SEC(sbi) * BLKS_PER_SEG(sbi) - \
+ (sbi)->unusable_blocks_per_sec)
+ #define CAP_SEGS_PER_SEC(sbi) \
+- ((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
++ (SEGS_PER_SEC(sbi) - ((sbi)->unusable_blocks_per_sec >> \
+ (sbi)->log_blocks_per_seg))
+ #define GET_SEC_FROM_SEG(sbi, segno) \
+- (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
++ (((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi))
+ #define GET_SEG_FROM_SEC(sbi, secno) \
+- ((secno) * (sbi)->segs_per_sec)
++ ((secno) * SEGS_PER_SEC(sbi))
+ #define GET_ZONE_FROM_SEC(sbi, secno) \
+- (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
++ (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
+ #define GET_ZONE_FROM_SEG(sbi, segno) \
+ GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
+
+@@ -138,16 +136,6 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
+ #define SECTOR_TO_BLOCK(sectors) \
+ ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
+
+-/*
+- * indicate a block allocation direction: RIGHT and LEFT.
+- * RIGHT means allocating new sections towards the end of volume.
+- * LEFT means the opposite direction.
+- */
+-enum {
+- ALLOC_RIGHT = 0,
+- ALLOC_LEFT
+-};
+-
+ /*
+ * In the victim_sel_policy->alloc_mode, there are three block allocation modes.
+ * LFS writes data sequentially with cleaning operations.
+@@ -360,11 +348,12 @@ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno, bool use_section)
+ {
+ if (use_section && __is_large_section(sbi)) {
+- unsigned int start_segno = START_SEGNO(segno);
++ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
++ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ unsigned int blocks = 0;
+ int i;
+
+- for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
++ for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
+ struct seg_entry *se = get_seg_entry(sbi, start_segno);
+
+ blocks += se->ckpt_valid_blocks;
+@@ -449,7 +438,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
+ free_i->free_segments++;
+
+ next = find_next_bit(free_i->free_segmap,
+- start_segno + sbi->segs_per_sec, start_segno);
++ start_segno + SEGS_PER_SEC(sbi), start_segno);
+ if (next >= start_segno + usable_segs) {
+ clear_bit(secno, free_i->free_secmap);
+ free_i->free_sections++;
+@@ -485,7 +474,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
+ if (!inmem && IS_CURSEC(sbi, secno))
+ goto skip_free;
+ next = find_next_bit(free_i->free_segmap,
+- start_segno + sbi->segs_per_sec, start_segno);
++ start_segno + SEGS_PER_SEC(sbi), start_segno);
+ if (next >= start_segno + usable_segs) {
+ if (test_and_clear_bit(secno, free_i->free_secmap))
+ free_i->free_sections++;
+@@ -573,23 +562,22 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
+ unsigned int node_blocks, unsigned int dent_blocks)
+ {
+
+- unsigned int segno, left_blocks;
++ unsigned segno, left_blocks;
+ int i;
+
+- /* check current node segment */
++ /* check current node sections in the worst case. */
+ for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
+ segno = CURSEG_I(sbi, i)->segno;
+- left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
+- get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+-
++ left_blocks = CAP_BLKS_PER_SEC(sbi) -
++ get_ckpt_valid_blocks(sbi, segno, true);
+ if (node_blocks > left_blocks)
+ return false;
+ }
+
+- /* check current data segment */
++ /* check current data section for dentry blocks. */
+ segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
+- left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
+- get_seg_entry(sbi, segno)->ckpt_valid_blocks;
++ left_blocks = CAP_BLKS_PER_SEC(sbi) -
++ get_ckpt_valid_blocks(sbi, segno, true);
+ if (dent_blocks > left_blocks)
+ return false;
+ return true;
+@@ -638,7 +626,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+
+ if (free_secs > upper_secs)
+ return false;
+- else if (free_secs <= lower_secs)
++ if (free_secs <= lower_secs)
+ return true;
+ return !curseg_space;
+ }
+@@ -793,10 +781,10 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
+ return -EFSCORRUPTED;
+ }
+
+- if (usable_blks_per_seg < sbi->blocks_per_seg)
++ if (usable_blks_per_seg < BLKS_PER_SEG(sbi))
+ f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
+- sbi->blocks_per_seg,
+- usable_blks_per_seg) != sbi->blocks_per_seg);
++ BLKS_PER_SEG(sbi),
++ usable_blks_per_seg) != BLKS_PER_SEG(sbi));
+
+ /* check segment usage, and check boundary of a given segment number */
+ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
+@@ -915,9 +903,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
+ return 0;
+
+ if (type == DATA)
+- return sbi->blocks_per_seg;
++ return BLKS_PER_SEG(sbi);
+ else if (type == NODE)
+- return 8 * sbi->blocks_per_seg;
++ return 8 * BLKS_PER_SEG(sbi);
+ else if (type == META)
+ return 8 * BIO_MAX_VECS;
+ else
+@@ -969,3 +957,13 @@ static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
+ dcc->discard_wake = true;
+ wake_up_interruptible_all(&dcc->discard_wait_queue);
+ }
++
++static inline unsigned int first_zoned_segno(struct f2fs_sb_info *sbi)
++{
++ int devi;
++
++ for (devi = 0; devi < sbi->s_ndevs; devi++)
++ if (bdev_is_zoned(FDEV(devi).bdev))
++ return GET_SEGNO(sbi, FDEV(devi).start_blk);
++ return 0;
++}
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index a8c8232852bb18..540fa1dfc77dff 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -64,21 +64,31 @@ const char *f2fs_fault_name[FAULT_MAX] = {
+ [FAULT_BLKADDR] = "invalid blkaddr",
+ };
+
+-void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+- unsigned int type)
++int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++ unsigned long type)
+ {
+ struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
+
+ if (rate) {
++ if (rate > INT_MAX)
++ return -EINVAL;
+ atomic_set(&ffi->inject_ops, 0);
+- ffi->inject_rate = rate;
++ ffi->inject_rate = (int)rate;
+ }
+
+- if (type)
+- ffi->inject_type = type;
++ if (type) {
++ if (type >= BIT(FAULT_MAX))
++ return -EINVAL;
++ ffi->inject_type = (unsigned int)type;
++ }
+
+ if (!rate && !type)
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
++ else
++ f2fs_info(sbi,
++ "build fault injection attr: rate: %lu, type: 0x%lx",
++ rate, type);
++ return 0;
+ }
+ #endif
+
+@@ -122,7 +132,6 @@ enum {
+ Opt_resgid,
+ Opt_resuid,
+ Opt_mode,
+- Opt_io_size_bits,
+ Opt_fault_injection,
+ Opt_fault_type,
+ Opt_lazytime,
+@@ -201,7 +210,6 @@ static match_table_t f2fs_tokens = {
+ {Opt_resgid, "resgid=%u"},
+ {Opt_resuid, "resuid=%u"},
+ {Opt_mode, "mode=%s"},
+- {Opt_io_size_bits, "io_bits=%u"},
+ {Opt_fault_injection, "fault_injection=%u"},
+ {Opt_fault_type, "fault_type=%u"},
+ {Opt_lazytime, "lazytime"},
+@@ -248,7 +256,8 @@ static match_table_t f2fs_tokens = {
+ {Opt_err, NULL},
+ };
+
+-void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
++void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate,
++ const char *fmt, ...)
+ {
+ struct va_format vaf;
+ va_list args;
+@@ -259,8 +268,12 @@ void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
+ level = printk_get_level(fmt);
+ vaf.fmt = printk_skip_level(fmt);
+ vaf.va = &args;
+- printk("%c%cF2FS-fs (%s): %pV\n",
+- KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
++ if (limit_rate)
++ printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
++ KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
++ else
++ printk("%c%cF2FS-fs (%s): %pV\n",
++ KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
+
+ va_end(args);
+ }
+@@ -328,46 +341,6 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
+ F2FS_OPTION(sbi).s_resgid));
+ }
+
+-static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
+-{
+- unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
+- unsigned int avg_vblocks;
+- unsigned int wanted_reserved_segments;
+- block_t avail_user_block_count;
+-
+- if (!F2FS_IO_ALIGNED(sbi))
+- return 0;
+-
+- /* average valid block count in section in worst case */
+- avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
+-
+- /*
+- * we need enough free space when migrating one section in worst case
+- */
+- wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
+- reserved_segments(sbi);
+- wanted_reserved_segments -= reserved_segments(sbi);
+-
+- avail_user_block_count = sbi->user_block_count -
+- sbi->current_reserved_blocks -
+- F2FS_OPTION(sbi).root_reserved_blocks;
+-
+- if (wanted_reserved_segments * sbi->blocks_per_seg >
+- avail_user_block_count) {
+- f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
+- wanted_reserved_segments,
+- avail_user_block_count >> sbi->log_blocks_per_seg);
+- return -ENOSPC;
+- }
+-
+- SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
+-
+- f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
+- wanted_reserved_segments);
+-
+- return 0;
+-}
+-
+ static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
+ {
+ if (!F2FS_OPTION(sbi).unusable_cap_perc)
+@@ -547,6 +520,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
+ }
+
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
++static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
++ const char *new_ext, bool is_ext)
++{
++ unsigned char (*ext)[F2FS_EXTENSION_LEN];
++ int ext_cnt;
++ int i;
++
++ if (is_ext) {
++ ext = F2FS_OPTION(sbi).extensions;
++ ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
++ } else {
++ ext = F2FS_OPTION(sbi).noextensions;
++ ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
++ }
++
++ for (i = 0; i < ext_cnt; i++) {
++ if (!strcasecmp(new_ext, ext[i]))
++ return true;
++ }
++
++ return false;
++}
++
+ /*
+ * 1. The same extension name cannot not appear in both compress and non-compress extension
+ * at the same time.
+@@ -625,7 +621,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+ #ifdef CONFIG_F2FS_FS_ZSTD
+ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+ {
+- unsigned int level;
++ int level;
+ int len = 4;
+
+ if (strlen(str) == len) {
+@@ -639,9 +635,15 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+ f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+ return -EINVAL;
+ }
+- if (kstrtouint(str + 1, 10, &level))
++ if (kstrtoint(str + 1, 10, &level))
+ return -EINVAL;
+
++ /* f2fs does not support negative compress level now */
++ if (level < 0) {
++ f2fs_info(sbi, "do not support negative compress level: %d", level);
++ return -ERANGE;
++ }
++
+ if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
+ f2fs_info(sbi, "invalid zstd compress level: %d", level);
+ return -EINVAL;
+@@ -725,10 +727,8 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ clear_opt(sbi, DISCARD);
+ break;
+ case Opt_noheap:
+- set_opt(sbi, NOHEAP);
+- break;
+ case Opt_heap:
+- clear_opt(sbi, NOHEAP);
++ f2fs_warn(sbi, "heap/no_heap options were deprecated");
+ break;
+ #ifdef CONFIG_F2FS_FS_XATTR
+ case Opt_user_xattr:
+@@ -875,28 +875,21 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ }
+ kfree(name);
+ break;
+- case Opt_io_size_bits:
+- if (args->from && match_int(args, &arg))
+- return -EINVAL;
+- if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
+- f2fs_warn(sbi, "Not support %ld, larger than %d",
+- BIT(arg), BIO_MAX_VECS);
+- return -EINVAL;
+- }
+- F2FS_OPTION(sbi).write_io_size_bits = arg;
+- break;
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+ case Opt_fault_injection:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+- f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
++ if (f2fs_build_fault_attr(sbi, arg,
++ F2FS_ALL_FAULT_TYPE))
++ return -EINVAL;
+ set_opt(sbi, FAULT_INJECTION);
+ break;
+
+ case Opt_fault_type:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+- f2fs_build_fault_attr(sbi, 0, arg);
++ if (f2fs_build_fault_attr(sbi, 0, arg))
++ return -EINVAL;
+ set_opt(sbi, FAULT_INJECTION);
+ break;
+ #else
+@@ -1149,6 +1142,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ return -EINVAL;
+ }
+
++ if (is_compress_extension_exist(sbi, name, true)) {
++ kfree(name);
++ break;
++ }
++
+ strcpy(ext[ext_cnt], name);
+ F2FS_OPTION(sbi).compress_ext_cnt++;
+ kfree(name);
+@@ -1173,6 +1171,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ return -EINVAL;
+ }
+
++ if (is_compress_extension_exist(sbi, name, false)) {
++ kfree(name);
++ break;
++ }
++
+ strcpy(noext[noext_cnt], name);
+ F2FS_OPTION(sbi).nocompress_ext_cnt++;
+ kfree(name);
+@@ -1344,12 +1347,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ }
+ #endif
+
+- if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
+- f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO",
+- F2FS_IO_SIZE_KB(sbi));
+- return -EINVAL;
+- }
+-
+ if (test_opt(sbi, INLINE_XATTR_SIZE)) {
+ int min_size, max_size;
+
+@@ -1629,7 +1626,7 @@ static void f2fs_put_super(struct super_block *sb)
+
+ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
+
+- if (err) {
++ if (err || f2fs_cp_error(sbi)) {
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
+ truncate_inode_pages_final(META_MAPPING(sbi));
+ }
+@@ -1677,7 +1674,6 @@ static void f2fs_put_super(struct super_block *sb)
+ destroy_device_list(sbi);
+ f2fs_destroy_page_array_cache(sbi);
+ f2fs_destroy_xattr_caches(sbi);
+- mempool_destroy(sbi->write_io_dummy);
+ #ifdef CONFIG_QUOTA
+ for (i = 0; i < MAXQUOTAS; i++)
+ kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+@@ -1969,10 +1965,6 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+ } else {
+ seq_puts(seq, ",nodiscard");
+ }
+- if (test_opt(sbi, NOHEAP))
+- seq_puts(seq, ",no_heap");
+- else
+- seq_puts(seq, ",heap");
+ #ifdef CONFIG_F2FS_FS_XATTR
+ if (test_opt(sbi, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+@@ -2038,9 +2030,6 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+ F2FS_OPTION(sbi).s_resuid),
+ from_kgid_munged(&init_user_ns,
+ F2FS_OPTION(sbi).s_resgid));
+- if (F2FS_IO_SIZE_BITS(sbi))
+- seq_printf(seq, ",io_bits=%u",
+- F2FS_OPTION(sbi).write_io_size_bits);
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (test_opt(sbi, FAULT_INJECTION)) {
+ seq_printf(seq, ",fault_injection=%u",
+@@ -2147,12 +2136,9 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
+ F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
+ F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
+
+- sbi->sb->s_flags &= ~SB_INLINECRYPT;
+-
+ set_opt(sbi, INLINE_XATTR);
+ set_opt(sbi, INLINE_DATA);
+ set_opt(sbi, INLINE_DENTRY);
+- set_opt(sbi, NOHEAP);
+ set_opt(sbi, MERGE_CHECKPOINT);
+ F2FS_OPTION(sbi).unusable_cap = 0;
+ sbi->sb->s_flags |= SB_LAZYTIME;
+@@ -2292,7 +2278,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
+ bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
+ bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
+- bool no_io_align = !F2FS_IO_ALIGNED(sbi);
+ bool no_atgc = !test_opt(sbi, ATGC);
+ bool no_discard = !test_opt(sbi, DISCARD);
+ bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
+@@ -2400,12 +2385,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ goto restore_opts;
+ }
+
+- if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
+- err = -EINVAL;
+- f2fs_warn(sbi, "switch io_bits option is not allowed");
+- goto restore_opts;
+- }
+-
+ if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
+ err = -EINVAL;
+ f2fs_warn(sbi, "switch compress_cache option is not allowed");
+@@ -2564,6 +2543,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ return err;
+ }
+
++static void f2fs_shutdown(struct super_block *sb)
++{
++ f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false);
++}
++
+ #ifdef CONFIG_QUOTA
+ static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
+ {
+@@ -2723,7 +2707,7 @@ int f2fs_dquot_initialize(struct inode *inode)
+ return dquot_initialize(inode);
+ }
+
+-static struct dquot **f2fs_get_dquots(struct inode *inode)
++static struct dquot __rcu **f2fs_get_dquots(struct inode *inode)
+ {
+ return F2FS_I(inode)->i_dquot;
+ }
+@@ -3163,6 +3147,7 @@ static const struct super_operations f2fs_sops = {
+ .unfreeze_fs = f2fs_unfreeze,
+ .statfs = f2fs_statfs,
+ .remount_fs = f2fs_remount,
++ .shutdown = f2fs_shutdown,
+ };
+
+ #ifdef CONFIG_FS_ENCRYPTION
+@@ -3351,9 +3336,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
+ u32 segment_count = le32_to_cpu(raw_super->segment_count);
+ u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ u64 main_end_blkaddr = main_blkaddr +
+- (segment_count_main << log_blocks_per_seg);
++ ((u64)segment_count_main << log_blocks_per_seg);
+ u64 seg_end_blkaddr = segment0_blkaddr +
+- (segment_count << log_blocks_per_seg);
++ ((u64)segment_count << log_blocks_per_seg);
+
+ if (segment0_blkaddr != cp_blkaddr) {
+ f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
+@@ -3657,7 +3642,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ }
+
+ main_segs = le32_to_cpu(raw_super->segment_count_main);
+- blocks_per_seg = sbi->blocks_per_seg;
++ blocks_per_seg = BLKS_PER_SEG(sbi);
+
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
+@@ -3770,8 +3755,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
+ sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
+ sbi->total_sections = le32_to_cpu(raw_super->section_count);
+ sbi->total_node_count =
+- (le32_to_cpu(raw_super->segment_count_nat) / 2)
+- * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
++ ((le32_to_cpu(raw_super->segment_count_nat) / 2) *
++ NAT_ENTRY_PER_BLOCK) << sbi->log_blocks_per_seg;
+ F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
+ F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
+ F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
+@@ -3780,7 +3765,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
+ sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+ sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
+ sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
+- sbi->migration_granularity = sbi->segs_per_sec;
++ sbi->migration_granularity = SEGS_PER_SEC(sbi);
+ sbi->seq_file_ra_mul = MIN_RA_MUL;
+ sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
+ sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
+@@ -3881,11 +3866,6 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
+ return 0;
+
+ zone_sectors = bdev_zone_sectors(bdev);
+- if (!is_power_of_2(zone_sectors)) {
+- f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
+- return -EINVAL;
+- }
+-
+ if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
+ SECTOR_TO_BLOCK(zone_sectors))
+ return -EINVAL;
+@@ -4146,17 +4126,25 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+ if (shutdown)
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+
+- /* continue filesystem operators if errors=continue */
+- if (continue_fs || f2fs_readonly(sb))
++ /*
++ * Continue filesystem operators if errors=continue. Should not set
++ * RO by shutdown, since RO bypasses thaw_super which can hang the
++ * system.
++ */
++ if (continue_fs || f2fs_readonly(sb) || shutdown) {
++ f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason);
+ return;
++ }
+
+ f2fs_warn(sbi, "Remounting filesystem read-only");
++
+ /*
+- * Make sure updated value of ->s_mount_flags will be visible before
+- * ->s_flags update
++ * We have already set CP_ERROR_FLAG flag to stop all updates
++ * to filesystem, so it doesn't need to set SB_RDONLY flag here
++ * because the flag should be set covered w/ sb->s_umount semaphore
++ * via remount procedure, otherwise, it will confuse code like
++ * freeze_super() which will lead to deadlocks and other problems.
+ */
+- smp_wmb();
+- sb->s_flags |= SB_RDONLY;
+ }
+
+ static void f2fs_record_error_work(struct work_struct *work)
+@@ -4258,8 +4246,6 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk);
+ }
+- f2fs_info(sbi,
+- "IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi));
+ return 0;
+ }
+
+@@ -4472,19 +4458,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+ if (err)
+ goto free_iostat;
+
+- if (F2FS_IO_ALIGNED(sbi)) {
+- sbi->write_io_dummy =
+- mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
+- if (!sbi->write_io_dummy) {
+- err = -ENOMEM;
+- goto free_percpu;
+- }
+- }
+-
+ /* init per sbi slab cache */
+ err = f2fs_init_xattr_caches(sbi);
+ if (err)
+- goto free_io_dummy;
++ goto free_percpu;
+ err = f2fs_init_page_array_cache(sbi);
+ if (err)
+ goto free_xattr_cache;
+@@ -4572,10 +4549,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+ goto free_nm;
+ }
+
+- err = adjust_reserved_segment(sbi);
+- if (err)
+- goto free_nm;
+-
+ /* For write statistics */
+ sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
+
+@@ -4807,8 +4780,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+ f2fs_destroy_page_array_cache(sbi);
+ free_xattr_cache:
+ f2fs_destroy_xattr_caches(sbi);
+-free_io_dummy:
+- mempool_destroy(sbi->write_io_dummy);
+ free_percpu:
+ destroy_percpu_info(sbi);
+ free_iostat:
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 417fae96890f67..180feefc4a9ceb 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -457,17 +457,23 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
+ if (ret < 0)
+ return ret;
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+- if (a->struct_type == FAULT_INFO_TYPE && t >= BIT(FAULT_MAX))
+- return -EINVAL;
+- if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
+- return -EINVAL;
++ if (a->struct_type == FAULT_INFO_TYPE) {
++ if (f2fs_build_fault_attr(sbi, 0, t))
++ return -EINVAL;
++ return count;
++ }
++ if (a->struct_type == FAULT_INFO_RATE) {
++ if (f2fs_build_fault_attr(sbi, t, 0))
++ return -EINVAL;
++ return count;
++ }
+ #endif
+ if (a->struct_type == RESERVED_BLOCKS) {
+ spin_lock(&sbi->stat_lock);
+ if (t > (unsigned long)(sbi->user_block_count -
+ F2FS_OPTION(sbi).root_reserved_blocks -
+- sbi->blocks_per_seg *
+- SM_I(sbi)->additional_reserved_segments)) {
++ (SM_I(sbi)->additional_reserved_segments <<
++ sbi->log_blocks_per_seg))) {
+ spin_unlock(&sbi->stat_lock);
+ return -EINVAL;
+ }
+@@ -517,7 +523,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
+ }
+
+ if (!strcmp(a->attr.name, "migration_granularity")) {
+- if (t == 0 || t > sbi->segs_per_sec)
++ if (t == 0 || t > SEGS_PER_SEC(sbi))
+ return -EINVAL;
+ }
+
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index a657284faee30a..54ab9caaae4dee 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -364,10 +364,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+
+ *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
+ if (!*xe) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+- err = -EFSCORRUPTED;
++ err = -ENODATA;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
+ goto out;
+@@ -584,13 +584,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+
+ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+- error = -EFSCORRUPTED;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
+- goto cleanup;
++ break;
+ }
+
+ if (!prefix)
+@@ -630,6 +629,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ const char *name, const void *value, size_t size,
+ struct page *ipage, int flags)
+ {
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_xattr_entry *here, *last;
+ void *base_addr, *last_base_addr;
+ int found, newsize;
+@@ -650,7 +650,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+
+ if (size > MAX_VALUE_LEN(inode))
+ return -E2BIG;
+-
++retry:
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
+@@ -660,7 +660,17 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ /* find entry with wanted name. */
+ here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
+ if (!here) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ if (!F2FS_I(inode)->i_xattr_nid) {
++ error = f2fs_recover_xattr_data(inode, NULL);
++ f2fs_notice(F2FS_I_SB(inode),
++ "recover xattr in inode (%lu), error(%d)",
++ inode->i_ino, error);
++ if (!error) {
++ kfree(base_addr);
++ goto retry;
++ }
++ }
++ f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ error = -EFSCORRUPTED;
+@@ -748,6 +758,12 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ memcpy(pval, value, size);
+ last->e_value_size = cpu_to_le16(size);
+ new_hsize += newsize;
++ /*
++ * Explicitly add the null terminator. The unused xattr space
++ * is supposed to always be zeroed, which would make this
++ * unnecessary, but don't depend on that.
++ */
++ *(u32 *)((u8 *)last + newsize) = 0;
+ }
+
+ error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
+@@ -757,9 +773,18 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
+ !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
+ f2fs_set_encrypted_inode(inode);
+- if (S_ISDIR(inode->i_mode))
+- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
+
++ if (!S_ISDIR(inode->i_mode))
++ goto same;
++ /*
++ * In restrict mode, fsync() always try to trigger checkpoint for all
++ * metadata consistency, in other mode, it triggers checkpoint when
++ * parent's xattr metadata was updated.
++ */
++ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
++ set_sbi_flag(sbi, SBI_NEED_CP);
++ else
++ f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO);
+ same:
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index c4d00999a43300..3cf22a6727f1b6 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -1037,7 +1037,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ if (corrupt < 0) {
+ fat_fs_error(new_dir->i_sb,
+ "%s: Filesystem corrupted (i_pos %lld)",
+- __func__, sinfo.i_pos);
++ __func__, new_i_pos);
+ }
+ goto out;
+ }
+diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
+index 3626eb585a983e..93c97bf45b0614 100644
+--- a/fs/fat/nfs.c
++++ b/fs/fat/nfs.c
+@@ -130,6 +130,12 @@ fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp,
+ fid->parent_i_gen = parent->i_generation;
+ type = FILEID_FAT_WITH_PARENT;
+ *lenp = FAT_FID_SIZE_WITH_PARENT;
++ } else {
++ /*
++ * We need to initialize this field because the fh is actually
++ * 12 bytes long
++ */
++ fid->parent_i_pos_hi = 0;
+ }
+
+ return type;
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index e871009f6c8895..1484f062ee65e3 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -86,8 +86,8 @@ static int setfl(int fd, struct file * filp, unsigned int arg)
+ return error;
+ }
+
+-static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
+- int force)
++void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
++ int force)
+ {
+ write_lock_irq(&filp->f_owner.lock);
+ if (force || !filp->f_owner.pid) {
+@@ -97,19 +97,13 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
+
+ if (pid) {
+ const struct cred *cred = current_cred();
++ security_file_set_fowner(filp);
+ filp->f_owner.uid = cred->uid;
+ filp->f_owner.euid = cred->euid;
+ }
+ }
+ write_unlock_irq(&filp->f_owner.lock);
+ }
+-
+-void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
+- int force)
+-{
+- security_file_set_fowner(filp);
+- f_modown(filp, pid, type, force);
+-}
+ EXPORT_SYMBOL(__f_setown);
+
+ int f_setown(struct file *filp, int who, int force)
+@@ -145,7 +139,7 @@ EXPORT_SYMBOL(f_setown);
+
+ void f_delown(struct file *filp)
+ {
+- f_modown(filp, NULL, PIDTYPE_TGID, 1);
++ __f_setown(filp, NULL, PIDTYPE_TGID, 1);
+ }
+
+ pid_t f_getown(struct file *filp)
+@@ -268,7 +262,7 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
+ }
+ #endif
+
+-static bool rw_hint_valid(enum rw_hint hint)
++static bool rw_hint_valid(u64 hint)
+ {
+ switch (hint) {
+ case RWH_WRITE_LIFE_NOT_SET:
+@@ -288,19 +282,17 @@ static long fcntl_rw_hint(struct file *file, unsigned int cmd,
+ {
+ struct inode *inode = file_inode(file);
+ u64 __user *argp = (u64 __user *)arg;
+- enum rw_hint hint;
+- u64 h;
++ u64 hint;
+
+ switch (cmd) {
+ case F_GET_RW_HINT:
+- h = inode->i_write_hint;
+- if (copy_to_user(argp, &h, sizeof(*argp)))
++ hint = inode->i_write_hint;
++ if (copy_to_user(argp, &hint, sizeof(*argp)))
+ return -EFAULT;
+ return 0;
+ case F_SET_RW_HINT:
+- if (copy_from_user(&h, argp, sizeof(h)))
++ if (copy_from_user(&hint, argp, sizeof(hint)))
+ return -EFAULT;
+- hint = (enum rw_hint) h;
+ if (!rw_hint_valid(hint))
+ return -EINVAL;
+
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index 6ea8d35a9382ac..c361d7ff1b88dd 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -40,7 +40,7 @@ static long do_sys_name_to_handle(const struct path *path,
+ if (f_handle.handle_bytes > MAX_HANDLE_SZ)
+ return -EINVAL;
+
+- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
++ handle = kzalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
+ GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+@@ -75,7 +75,7 @@ static long do_sys_name_to_handle(const struct path *path,
+ /* copy the mount id */
+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
+ copy_to_user(ufh, handle,
+- sizeof(struct file_handle) + handle_bytes))
++ struct_size(handle, f_handle, handle_bytes)))
+ retval = -EFAULT;
+ kfree(handle);
+ return retval;
+@@ -196,7 +196,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
+ retval = -EINVAL;
+ goto out_err;
+ }
+- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
++ handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
+ GFP_KERNEL);
+ if (!handle) {
+ retval = -ENOMEM;
+diff --git a/fs/file.c b/fs/file.c
+index 3e4a4dfa38fcad..bd817e31d79866 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -46,27 +46,23 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
+ #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
+ #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
+
++#define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
+ /*
+ * Copy 'count' fd bits from the old table to the new table and clear the extra
+ * space if any. This does not copy the file pointers. Called with the files
+ * spinlock held for write.
+ */
+-static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
+- unsigned int count)
++static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
++ unsigned int copy_words)
+ {
+- unsigned int cpy, set;
+-
+- cpy = count / BITS_PER_BYTE;
+- set = (nfdt->max_fds - count) / BITS_PER_BYTE;
+- memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
+- memset((char *)nfdt->open_fds + cpy, 0, set);
+- memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
+- memset((char *)nfdt->close_on_exec + cpy, 0, set);
+-
+- cpy = BITBIT_SIZE(count);
+- set = BITBIT_SIZE(nfdt->max_fds) - cpy;
+- memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
+- memset((char *)nfdt->full_fds_bits + cpy, 0, set);
++ unsigned int nwords = fdt_words(nfdt);
++
++ bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
++ copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
++ bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
++ copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
++ bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
++ copy_words, nwords);
+ }
+
+ /*
+@@ -84,7 +80,7 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
+ memcpy(nfdt->fd, ofdt->fd, cpy);
+ memset((char *)nfdt->fd + cpy, 0, set);
+
+- copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
++ copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
+ }
+
+ /*
+@@ -271,59 +267,45 @@ static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
+ __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
+ }
+
+-static unsigned int count_open_files(struct fdtable *fdt)
+-{
+- unsigned int size = fdt->max_fds;
+- unsigned int i;
+-
+- /* Find the last open fd */
+- for (i = size / BITS_PER_LONG; i > 0; ) {
+- if (fdt->open_fds[--i])
+- break;
+- }
+- i = (i + 1) * BITS_PER_LONG;
+- return i;
+-}
+-
+ /*
+ * Note that a sane fdtable size always has to be a multiple of
+ * BITS_PER_LONG, since we have bitmaps that are sized by this.
+ *
+- * 'max_fds' will normally already be properly aligned, but it
+- * turns out that in the close_range() -> __close_range() ->
+- * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
+- * up having a 'max_fds' value that isn't already aligned.
+- *
+- * Rather than make close_range() have to worry about this,
+- * just make that BITS_PER_LONG alignment be part of a sane
+- * fdtable size. Becuase that's really what it is.
++ * punch_hole is optional - when close_range() is asked to unshare
++ * and close, we don't need to copy descriptors in that range, so
++ * a smaller cloned descriptor table might suffice if the last
++ * currently opened descriptor falls into that range.
+ */
+-static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
++static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
+ {
+- unsigned int count;
+-
+- count = count_open_files(fdt);
+- if (max_fds < NR_OPEN_DEFAULT)
+- max_fds = NR_OPEN_DEFAULT;
+- return ALIGN(min(count, max_fds), BITS_PER_LONG);
++ unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
++
++ if (last == fdt->max_fds)
++ return NR_OPEN_DEFAULT;
++ if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
++ last = find_last_bit(fdt->open_fds, punch_hole->from);
++ if (last == punch_hole->from)
++ return NR_OPEN_DEFAULT;
++ }
++ return ALIGN(last + 1, BITS_PER_LONG);
+ }
+
+ /*
+- * Allocate a new files structure and copy contents from the
+- * passed in files structure.
+- * errorp will be valid only when the returned files_struct is NULL.
++ * Allocate a new descriptor table and copy contents from the passed in
++ * instance. Returns a pointer to cloned table on success, ERR_PTR()
++ * on failure. For 'punch_hole' see sane_fdtable_size().
+ */
+-struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
++struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
+ {
+ struct files_struct *newf;
+ struct file **old_fds, **new_fds;
+ unsigned int open_files, i;
+ struct fdtable *old_fdt, *new_fdt;
++ int error;
+
+- *errorp = -ENOMEM;
+ newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
+ if (!newf)
+- goto out;
++ return ERR_PTR(-ENOMEM);
+
+ atomic_set(&newf->count, 1);
+
+@@ -340,7 +322,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
+
+ spin_lock(&oldf->file_lock);
+ old_fdt = files_fdtable(oldf);
+- open_files = sane_fdtable_size(old_fdt, max_fds);
++ open_files = sane_fdtable_size(old_fdt, punch_hole);
+
+ /*
+ * Check whether we need to allocate a larger fd array and fd set.
+@@ -353,14 +335,14 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
+
+ new_fdt = alloc_fdtable(open_files - 1);
+ if (!new_fdt) {
+- *errorp = -ENOMEM;
++ error = -ENOMEM;
+ goto out_release;
+ }
+
+ /* beyond sysctl_nr_open; nothing to do */
+ if (unlikely(new_fdt->max_fds < open_files)) {
+ __free_fdtable(new_fdt);
+- *errorp = -EMFILE;
++ error = -EMFILE;
+ goto out_release;
+ }
+
+@@ -371,10 +353,10 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
+ */
+ spin_lock(&oldf->file_lock);
+ old_fdt = files_fdtable(oldf);
+- open_files = sane_fdtable_size(old_fdt, max_fds);
++ open_files = sane_fdtable_size(old_fdt, punch_hole);
+ }
+
+- copy_fd_bitmaps(new_fdt, old_fdt, open_files);
++ copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
+
+ old_fds = old_fdt->fd;
+ new_fds = new_fdt->fd;
+@@ -405,8 +387,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
+
+ out_release:
+ kmem_cache_free(files_cachep, newf);
+-out:
+- return NULL;
++ return ERR_PTR(error);
+ }
+
+ static struct fdtable *close_files(struct files_struct * files)
+@@ -481,12 +462,12 @@ struct files_struct init_files = {
+
+ static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
+ {
+- unsigned int maxfd = fdt->max_fds;
++ unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
+ unsigned int maxbit = maxfd / BITS_PER_LONG;
+ unsigned int bitbit = start / BITS_PER_LONG;
+
+ bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
+- if (bitbit > maxfd)
++ if (bitbit >= maxfd)
+ return maxfd;
+ if (bitbit > start)
+ start = bitbit;
+@@ -740,37 +721,25 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
+ if (fd > max_fd)
+ return -EINVAL;
+
+- if (flags & CLOSE_RANGE_UNSHARE) {
+- int ret;
+- unsigned int max_unshare_fds = NR_OPEN_MAX;
++ if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
++ struct fd_range range = {fd, max_fd}, *punch_hole = &range;
+
+ /*
+ * If the caller requested all fds to be made cloexec we always
+ * copy all of the file descriptors since they still want to
+ * use them.
+ */
+- if (!(flags & CLOSE_RANGE_CLOEXEC)) {
+- /*
+- * If the requested range is greater than the current
+- * maximum, we're closing everything so only copy all
+- * file descriptors beneath the lowest file descriptor.
+- */
+- rcu_read_lock();
+- if (max_fd >= last_fd(files_fdtable(cur_fds)))
+- max_unshare_fds = fd;
+- rcu_read_unlock();
+- }
+-
+- ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
+- if (ret)
+- return ret;
++ if (flags & CLOSE_RANGE_CLOEXEC)
++ punch_hole = NULL;
+
++ fds = dup_fd(cur_fds, punch_hole);
++ if (IS_ERR(fds))
++ return PTR_ERR(fds);
+ /*
+ * We used to share our file descriptor table, and have now
+ * created a private one, make sure we're using it below.
+ */
+- if (fds)
+- swap(cur_fds, fds);
++ swap(cur_fds, fds);
+ }
+
+ if (flags & CLOSE_RANGE_CLOEXEC)
+@@ -1124,6 +1093,7 @@ __releases(&files->file_lock)
+ * tables and this condition does not arise without those.
+ */
+ fdt = files_fdtable(files);
++ fd = array_index_nospec(fd, fdt->max_fds);
+ tofree = fdt->fd[fd];
+ if (!tofree && fd_is_open(fd, fdt))
+ goto Ebusy;
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index c1af01b2c42d70..0a498bc60f5573 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -613,6 +613,24 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
+ kfree(isw);
+ }
+
++static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
++ struct list_head *list, int *nr)
++{
++ struct inode *inode;
++
++ list_for_each_entry(inode, list, i_io_list) {
++ if (!inode_prepare_wbs_switch(inode, isw->new_wb))
++ continue;
++
++ isw->inodes[*nr] = inode;
++ (*nr)++;
++
++ if (*nr >= WB_MAX_INODES_PER_ISW - 1)
++ return true;
++ }
++ return false;
++}
++
+ /**
+ * cleanup_offline_cgwb - detach associated inodes
+ * @wb: target wb
+@@ -625,7 +643,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+ {
+ struct cgroup_subsys_state *memcg_css;
+ struct inode_switch_wbs_context *isw;
+- struct inode *inode;
+ int nr;
+ bool restart = false;
+
+@@ -647,17 +664,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
+
+ nr = 0;
+ spin_lock(&wb->list_lock);
+- list_for_each_entry(inode, &wb->b_attached, i_io_list) {
+- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+- continue;
+-
+- isw->inodes[nr++] = inode;
+-
+- if (nr >= WB_MAX_INODES_PER_ISW - 1) {
+- restart = true;
+- break;
+- }
+- }
++ /*
++ * In addition to the inodes that have completed writeback, also switch
++ * cgwbs for those inodes only with dirty timestamps. Otherwise, those
++ * inodes won't be written back for a long time when lazytime is
++ * enabled, and thus pinning the dying cgwbs. It won't break the
++ * bandwidth restrictions, as writeback of inode metadata is not
++ * accounted for.
++ */
++ restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
++ if (!restart)
++ restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
+ spin_unlock(&wb->list_lock);
+
+ /* no attached inodes? bail out */
+@@ -2027,6 +2044,7 @@ static long wb_writeback(struct bdi_writeback *wb,
+ struct inode *inode;
+ long progress;
+ struct blk_plug plug;
++ bool queued = false;
+
+ blk_start_plug(&plug);
+ for (;;) {
+@@ -2069,8 +2087,10 @@ static long wb_writeback(struct bdi_writeback *wb,
+ dirtied_before = jiffies;
+
+ trace_writeback_start(wb, work);
+- if (list_empty(&wb->b_io))
++ if (list_empty(&wb->b_io)) {
+ queue_io(wb, work, dirtied_before);
++ queued = true;
++ }
+ if (work->sb)
+ progress = writeback_sb_inodes(work->sb, wb, work);
+ else
+@@ -2085,7 +2105,7 @@ static long wb_writeback(struct bdi_writeback *wb,
+ * mean the overall work is done. So we keep looping as long
+ * as made some progress on cleaning pages or inodes.
+ */
+- if (progress) {
++ if (progress || !queued) {
+ spin_unlock(&wb->list_lock);
+ continue;
+ }
+diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
+index d645f8b302a278..9397ed39b0b4ec 100644
+--- a/fs/fscache/cache.c
++++ b/fs/fscache/cache.c
+@@ -179,13 +179,14 @@ EXPORT_SYMBOL(fscache_acquire_cache);
+ void fscache_put_cache(struct fscache_cache *cache,
+ enum fscache_cache_trace where)
+ {
+- unsigned int debug_id = cache->debug_id;
++ unsigned int debug_id;
+ bool zero;
+ int ref;
+
+ if (IS_ERR_OR_NULL(cache))
+ return;
+
++ debug_id = cache->debug_id;
+ zero = __refcount_dec_and_test(&cache->ref, &ref);
+ trace_fscache_cache(debug_id, ref - 1, where);
+
+diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
+index bce2492186d0b4..d4d4b3a8b10603 100644
+--- a/fs/fscache/cookie.c
++++ b/fs/fscache/cookie.c
+@@ -741,6 +741,10 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
+ spin_lock(&cookie->lock);
+ }
+ if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
++ if (atomic_read(&cookie->n_accesses) != 0)
++ /* still being accessed: postpone it */
++ break;
++
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING);
+ wake = true;
+diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
+index 1336f517e9b1a6..4799a722bc2855 100644
+--- a/fs/fscache/internal.h
++++ b/fs/fscache/internal.h
+@@ -145,8 +145,6 @@ extern const struct seq_operations fscache_volumes_seq_ops;
+
+ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+-void fscache_put_volume(struct fscache_volume *volume,
+- enum fscache_volume_trace where);
+ bool fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+diff --git a/fs/fscache/main.c b/fs/fscache/main.c
+index dad85fd84f6f9f..7a60cd96e87ea2 100644
+--- a/fs/fscache/main.c
++++ b/fs/fscache/main.c
+@@ -114,6 +114,7 @@ static void __exit fscache_exit(void)
+
+ kmem_cache_destroy(fscache_cookie_jar);
+ fscache_proc_cleanup();
++ timer_shutdown_sync(&fscache_cookie_lru_timer);
+ destroy_workqueue(fscache_wq);
+ pr_notice("Unloaded\n");
+ }
+diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
+index cdf991bdd9def4..cb75c07b5281a5 100644
+--- a/fs/fscache/volume.c
++++ b/fs/fscache/volume.c
+@@ -27,6 +27,19 @@ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ return volume;
+ }
+
++struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume,
++ enum fscache_volume_trace where)
++{
++ int ref;
++
++ if (!__refcount_inc_not_zero(&volume->ref, &ref))
++ return NULL;
++
++ trace_fscache_volume(volume->debug_id, ref + 1, where);
++ return volume;
++}
++EXPORT_SYMBOL(fscache_try_get_volume);
++
+ static void fscache_see_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+ {
+@@ -420,6 +433,7 @@ void fscache_put_volume(struct fscache_volume *volume,
+ fscache_free_volume(volume);
+ }
+ }
++EXPORT_SYMBOL(fscache_put_volume);
+
+ /*
+ * Relinquish a volume representation cookie.
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index 91e89e68177ee4..b6cad106c37e44 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -474,8 +474,7 @@ static int cuse_send_init(struct cuse_conn *cc)
+
+ static void cuse_fc_release(struct fuse_conn *fc)
+ {
+- struct cuse_conn *cc = fc_to_cc(fc);
+- kfree_rcu(cc, fc.rcu);
++ kfree(fc_to_cc(fc));
+ }
+
+ /**
+diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
+index 23904a6a9a96f7..12ef91d170bb30 100644
+--- a/fs/fuse/dax.c
++++ b/fs/fuse/dax.c
+@@ -1222,6 +1222,7 @@ void fuse_dax_conn_free(struct fuse_conn *fc)
+ if (fc->dax) {
+ fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
+ kfree(fc->dax);
++ fc->dax = NULL;
+ }
+ }
+
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 1a8f82f478cb7a..8573d79ef29c80 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1618,9 +1618,11 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
+
+ this_num = min_t(unsigned, num, PAGE_SIZE - offset);
+ err = fuse_copy_page(cs, &page, offset, this_num, 0);
+- if (!err && offset == 0 &&
+- (this_num == PAGE_SIZE || file_size == end))
++ if (!PageUptodate(page) && !err && offset == 0 &&
++ (this_num == PAGE_SIZE || file_size == end)) {
++ zero_user_segment(page, this_num, PAGE_SIZE);
+ SetPageUptodate(page);
++ }
+ unlock_page(page);
+ put_page(page);
+
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index d707e6987da91e..95f9913a353731 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -391,6 +391,10 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
+ err = -EIO;
+ if (fuse_invalid_attr(&outarg->attr))
+ goto out_put_forget;
++ if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
++ pr_warn_once("root generation should be zero\n");
++ outarg->generation = 0;
++ }
+
+ *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
+ &outarg->attr, ATTR_TIMEOUT(outarg),
+@@ -664,7 +668,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
+
+ err = get_create_ext(&args, dir, entry, mode);
+ if (err)
+- goto out_put_forget_req;
++ goto out_free_ff;
+
+ err = fuse_simple_request(fm, &args);
+ free_ext_value(&args);
+@@ -1210,7 +1214,7 @@ static int fuse_do_statx(struct inode *inode, struct file *file,
+ if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) ||
+ ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) ||
+ inode_wrong_type(inode, sx->mode)))) {
+- make_bad_inode(inode);
++ fuse_make_bad(inode);
+ return -EIO;
+ }
+
+@@ -1313,6 +1317,7 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
+ err = fuse_do_statx(inode, file, stat);
+ if (err == -ENOSYS) {
+ fc->no_statx = 1;
++ err = 0;
+ goto retry;
+ }
+ } else {
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 1cdb6327511ef8..ceb9f7d2303882 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1448,7 +1448,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
+ if (!ia)
+ return -ENOMEM;
+
+- if (fopen_direct_io && fc->direct_io_relax) {
++ if (fopen_direct_io && fc->direct_io_allow_mmap) {
+ res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
+ if (res) {
+ fuse_io_free(ia);
+@@ -1574,6 +1574,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ ssize_t res;
+ bool exclusive_lock =
+ !(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) ||
++ get_fuse_conn(inode)->direct_io_allow_mmap ||
+ iocb->ki_flags & IOCB_APPEND ||
+ fuse_direct_write_extending_i_size(iocb, from);
+
+@@ -1581,6 +1582,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ * Take exclusive lock if
+ * - Parallel direct writes are disabled - a user space decision
+ * - Parallel direct writes are enabled and i_size is being extended.
++ * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP).
+ * This might not be needed at all, but needs further investigation.
+ */
+ if (exclusive_lock)
+@@ -1733,10 +1735,16 @@ __acquires(fi->lock)
+ fuse_writepage_finish(fm, wpa);
+ spin_unlock(&fi->lock);
+
+- /* After fuse_writepage_finish() aux request list is private */
++ /* After rb_erase() aux request list is private */
+ for (aux = wpa->next; aux; aux = next) {
++ struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
++
+ next = aux->next;
+ aux->next = NULL;
++
++ dec_wb_stat(&bdi->wb, WB_WRITEBACK);
++ dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
++ wb_writeout_inc(&bdi->wb);
+ fuse_writepage_free(aux);
+ }
+
+@@ -2465,15 +2473,19 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
+ return fuse_dax_mmap(file, vma);
+
+ if (ff->open_flags & FOPEN_DIRECT_IO) {
+- /* Can't provide the coherency needed for MAP_SHARED
+- * if FUSE_DIRECT_IO_RELAX isn't set.
++ /*
++ * Can't provide the coherency needed for MAP_SHARED
++ * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
+ */
+- if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_relax)
++ if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
+ return -ENODEV;
+
+ invalidate_inode_pages2(file->f_mapping);
+
+- return generic_file_mmap(file, vma);
++ if (!(vma->vm_flags & VM_MAYSHARE)) {
++ /* MAP_PRIVATE */
++ return generic_file_mmap(file, vma);
++ }
+ }
+
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index bf0b85d0b95c7d..4ce1a6fdc94f03 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -63,6 +63,19 @@ struct fuse_forget_link {
+ struct fuse_forget_link *next;
+ };
+
++/* Submount lookup tracking */
++struct fuse_submount_lookup {
++ /** Refcount */
++ refcount_t count;
++
++ /** Unique ID, which identifies the inode between userspace
++ * and kernel */
++ u64 nodeid;
++
++ /** The request used for sending the FORGET message */
++ struct fuse_forget_link *forget;
++};
++
+ /** FUSE inode */
+ struct fuse_inode {
+ /** Inode data */
+@@ -158,6 +171,8 @@ struct fuse_inode {
+ */
+ struct fuse_inode_dax *dax;
+ #endif
++ /** Submount specific lookup tracking */
++ struct fuse_submount_lookup *submount_lookup;
+ };
+
+ /** FUSE inode state bits */
+@@ -797,8 +812,8 @@ struct fuse_conn {
+ /* Is tmpfile not implemented by fs? */
+ unsigned int no_tmpfile:1;
+
+- /* relax restrictions in FOPEN_DIRECT_IO mode */
+- unsigned int direct_io_relax:1;
++ /* Relax restrictions to allow shared mmap in FOPEN_DIRECT_IO mode */
++ unsigned int direct_io_allow_mmap:1;
+
+ /* Is statx not implemented by fs? */
+ unsigned int no_statx:1;
+@@ -873,6 +888,7 @@ struct fuse_mount {
+
+ /* Entry on fc->mounts */
+ struct list_head fc_entry;
++ struct rcu_head rcu;
+ };
+
+ static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb)
+@@ -924,7 +940,6 @@ static inline bool fuse_stale_inode(const struct inode *inode, int generation,
+
+ static inline void fuse_make_bad(struct inode *inode)
+ {
+- remove_inode_hash(inode);
+ set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
+ }
+
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 2e4eb7cf26fb33..735abf426a0640 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -68,6 +68,24 @@ struct fuse_forget_link *fuse_alloc_forget(void)
+ return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
+ }
+
++static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void)
++{
++ struct fuse_submount_lookup *sl;
++
++ sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT);
++ if (!sl)
++ return NULL;
++ sl->forget = fuse_alloc_forget();
++ if (!sl->forget)
++ goto out_free;
++
++ return sl;
++
++out_free:
++ kfree(sl);
++ return NULL;
++}
++
+ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ {
+ struct fuse_inode *fi;
+@@ -83,6 +101,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ fi->attr_version = 0;
+ fi->orig_ino = 0;
+ fi->state = 0;
++ fi->submount_lookup = NULL;
+ mutex_init(&fi->mutex);
+ spin_lock_init(&fi->lock);
+ fi->forget = fuse_alloc_forget();
+@@ -113,6 +132,17 @@ static void fuse_free_inode(struct inode *inode)
+ kmem_cache_free(fuse_inode_cachep, fi);
+ }
+
++static void fuse_cleanup_submount_lookup(struct fuse_conn *fc,
++ struct fuse_submount_lookup *sl)
++{
++ if (!refcount_dec_and_test(&sl->count))
++ return;
++
++ fuse_queue_forget(fc, sl->forget, sl->nodeid, 1);
++ sl->forget = NULL;
++ kfree(sl);
++}
++
+ static void fuse_evict_inode(struct inode *inode)
+ {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+@@ -132,6 +162,11 @@ static void fuse_evict_inode(struct inode *inode)
+ fi->nlookup);
+ fi->forget = NULL;
+ }
++
++ if (fi->submount_lookup) {
++ fuse_cleanup_submount_lookup(fc, fi->submount_lookup);
++ fi->submount_lookup = NULL;
++ }
+ }
+ if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
+ WARN_ON(!list_empty(&fi->write_files));
+@@ -332,6 +367,13 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ fuse_dax_dontcache(inode, attr->flags);
+ }
+
++static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl,
++ u64 nodeid)
++{
++ sl->nodeid = nodeid;
++ refcount_set(&sl->count, 1);
++}
++
+ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_conn *fc)
+ {
+@@ -395,12 +437,22 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
+ */
+ if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) &&
+ S_ISDIR(attr->mode)) {
++ struct fuse_inode *fi;
++
+ inode = new_inode(sb);
+ if (!inode)
+ return NULL;
+
+ fuse_init_inode(inode, attr, fc);
+- get_fuse_inode(inode)->nodeid = nodeid;
++ fi = get_fuse_inode(inode);
++ fi->nodeid = nodeid;
++ fi->submount_lookup = fuse_alloc_submount_lookup();
++ if (!fi->submount_lookup) {
++ iput(inode);
++ return NULL;
++ }
++ /* Sets nlookup = 1 on fi->submount_lookup->nlookup */
++ fuse_init_submount_lookup(fi->submount_lookup, nodeid);
+ inode->i_flags |= S_AUTOMOUNT;
+ goto done;
+ }
+@@ -420,14 +472,17 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
+ } else if (fuse_stale_inode(inode, generation, attr)) {
+ /* nodeid was reused, any I/O on the old inode should fail */
+ fuse_make_bad(inode);
+- iput(inode);
+- goto retry;
++ if (inode != d_inode(sb->s_root)) {
++ remove_inode_hash(inode);
++ iput(inode);
++ goto retry;
++ }
+ }
+-done:
+ fi = get_fuse_inode(inode);
+ spin_lock(&fi->lock);
+ fi->nlookup++;
+ spin_unlock(&fi->lock);
++done:
+ fuse_change_attributes(inode, attr, NULL, attr_valid, attr_version);
+
+ return inode;
+@@ -696,6 +751,8 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
+ struct fs_parse_result result;
+ struct fuse_fs_context *ctx = fsc->fs_private;
+ int opt;
++ kuid_t kuid;
++ kgid_t kgid;
+
+ if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ /*
+@@ -740,16 +797,30 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
+ break;
+
+ case OPT_USER_ID:
+- ctx->user_id = make_kuid(fsc->user_ns, result.uint_32);
+- if (!uid_valid(ctx->user_id))
++ kuid = make_kuid(fsc->user_ns, result.uint_32);
++ if (!uid_valid(kuid))
+ return invalfc(fsc, "Invalid user_id");
++ /*
++ * The requested uid must be representable in the
++ * filesystem's idmapping.
++ */
++ if (!kuid_has_mapping(fsc->user_ns, kuid))
++ return invalfc(fsc, "Invalid user_id");
++ ctx->user_id = kuid;
+ ctx->user_id_present = true;
+ break;
+
+ case OPT_GROUP_ID:
+- ctx->group_id = make_kgid(fsc->user_ns, result.uint_32);
+- if (!gid_valid(ctx->group_id))
++ kgid = make_kgid(fsc->user_ns, result.uint_32);;
++ if (!gid_valid(kgid))
++ return invalfc(fsc, "Invalid group_id");
++ /*
++ * The requested gid must be representable in the
++ * filesystem's idmapping.
++ */
++ if (!kgid_has_mapping(fsc->user_ns, kgid))
+ return invalfc(fsc, "Invalid group_id");
++ ctx->group_id = kgid;
+ ctx->group_id_present = true;
+ break;
+
+@@ -881,6 +952,14 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
+ }
+ EXPORT_SYMBOL_GPL(fuse_conn_init);
+
++static void delayed_release(struct rcu_head *p)
++{
++ struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
++
++ put_user_ns(fc->user_ns);
++ fc->release(fc);
++}
++
+ void fuse_conn_put(struct fuse_conn *fc)
+ {
+ if (refcount_dec_and_test(&fc->count)) {
+@@ -892,13 +971,12 @@ void fuse_conn_put(struct fuse_conn *fc)
+ if (fiq->ops->release)
+ fiq->ops->release(fiq);
+ put_pid_ns(fc->pid_ns);
+- put_user_ns(fc->user_ns);
+ bucket = rcu_dereference_protected(fc->curr_bucket, 1);
+ if (bucket) {
+ WARN_ON(atomic_read(&bucket->count) != 1);
+ kfree(bucket);
+ }
+- fc->release(fc);
++ call_rcu(&fc->rcu, delayed_release);
+ }
+ }
+ EXPORT_SYMBOL_GPL(fuse_conn_put);
+@@ -1232,8 +1310,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
+ fc->init_security = 1;
+ if (flags & FUSE_CREATE_SUPP_GROUP)
+ fc->create_supp_group = 1;
+- if (flags & FUSE_DIRECT_IO_RELAX)
+- fc->direct_io_relax = 1;
++ if (flags & FUSE_DIRECT_IO_ALLOW_MMAP)
++ fc->direct_io_allow_mmap = 1;
+ } else {
+ ra_pages = fc->max_read / PAGE_SIZE;
+ fc->no_lock = 1;
+@@ -1280,7 +1358,7 @@ void fuse_send_init(struct fuse_mount *fm)
+ FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
+ FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
+ FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
+- FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_RELAX;
++ FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP;
+ #ifdef CONFIG_FUSE_DAX
+ if (fm->fc->dax)
+ flags |= FUSE_MAP_ALIGNMENT;
+@@ -1316,7 +1394,7 @@ EXPORT_SYMBOL_GPL(fuse_send_init);
+ void fuse_free_conn(struct fuse_conn *fc)
+ {
+ WARN_ON(!list_empty(&fc->devices));
+- kfree_rcu(fc, rcu);
++ kfree(fc);
+ }
+ EXPORT_SYMBOL_GPL(fuse_free_conn);
+
+@@ -1465,6 +1543,8 @@ static int fuse_fill_super_submount(struct super_block *sb,
+ struct super_block *parent_sb = parent_fi->inode.i_sb;
+ struct fuse_attr root_attr;
+ struct inode *root;
++ struct fuse_submount_lookup *sl;
++ struct fuse_inode *fi;
+
+ fuse_sb_defaults(sb);
+ fm->sb = sb;
+@@ -1487,12 +1567,27 @@ static int fuse_fill_super_submount(struct super_block *sb,
+ * its nlookup should not be incremented. fuse_iget() does
+ * that, though, so undo it here.
+ */
+- get_fuse_inode(root)->nlookup--;
++ fi = get_fuse_inode(root);
++ fi->nlookup--;
++
+ sb->s_d_op = &fuse_dentry_operations;
+ sb->s_root = d_make_root(root);
+ if (!sb->s_root)
+ return -ENOMEM;
+
++ /*
++ * Grab the parent's submount_lookup pointer and take a
++ * reference on the shared nlookup from the parent. This is to
++ * prevent the last forget for this nodeid from getting
++ * triggered until all users have finished with it.
++ */
++ sl = parent_fi->submount_lookup;
++ WARN_ON(!sl);
++ if (sl) {
++ refcount_inc(&sl->count);
++ fi->submount_lookup = sl;
++ }
++
+ return 0;
+ }
+
+@@ -1833,7 +1928,7 @@ static void fuse_sb_destroy(struct super_block *sb)
+ void fuse_mount_destroy(struct fuse_mount *fm)
+ {
+ fuse_conn_put(fm->fc);
+- kfree(fm);
++ kfree_rcu(fm, rcu);
+ }
+ EXPORT_SYMBOL(fuse_mount_destroy);
+
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 5f1be1da92ce94..d84dacbdce2c9d 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -323,6 +323,16 @@ static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
+ return -ENOMEM;
+ memcpy(fs->tag, tag_buf, len);
+ fs->tag[len] = '\0';
++
++ /* While the VIRTIO specification allows any character, newlines are
++ * awkward on mount(8) command-lines and cause problems in the sysfs
++ * "tag" attr and uevent TAG= properties. Forbid them.
++ */
++ if (strchr(fs->tag, '\n')) {
++ dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
+index 49c01559580f4e..690b9aadceaa88 100644
+--- a/fs/fuse/xattr.c
++++ b/fs/fuse/xattr.c
+@@ -81,7 +81,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
+ }
+ ret = fuse_simple_request(fm, &args);
+ if (!ret && !size)
+- ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX);
++ ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX);
+ if (ret == -ENOSYS) {
+ fm->fc->no_getxattr = 1;
+ ret = -EOPNOTSUPP;
+@@ -143,7 +143,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
+ }
+ ret = fuse_simple_request(fm, &args);
+ if (!ret && !size)
+- ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX);
++ ret = min_t(size_t, outarg.size, XATTR_LIST_MAX);
+ if (ret > 0 && size)
+ ret = fuse_verify_xattr_list(list, ret);
+ if (ret == -ENOSYS) {
+diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
+index d4deb2b1995952..82f5b09c04e669 100644
+--- a/fs/gfs2/acl.h
++++ b/fs/gfs2/acl.h
+@@ -11,9 +11,9 @@
+
+ #define GFS2_ACL_MAX_ENTRIES(sdp) ((300 << (sdp)->sd_sb.sb_bsize_shift) >> 12)
+
+-extern struct posix_acl *gfs2_get_acl(struct inode *inode, int type, bool rcu);
+-extern int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+-extern int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+- struct posix_acl *acl, int type);
++struct posix_acl *gfs2_get_acl(struct inode *inode, int type, bool rcu);
++int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
++int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
++ struct posix_acl *acl, int type);
+
+ #endif /* __ACL_DOT_H__ */
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index c26d48355cc270..6097db9a7ebf3d 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -464,7 +464,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
+ error = mpage_read_folio(folio, gfs2_block_map);
+ }
+
+- if (unlikely(gfs2_withdrawn(sdp)))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ return -EIO;
+
+ return error;
+@@ -479,31 +479,29 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
+ *
+ */
+
+-int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
+- unsigned size)
++ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
++ size_t size)
+ {
+ struct address_space *mapping = ip->i_inode.i_mapping;
+ unsigned long index = *pos >> PAGE_SHIFT;
+- unsigned offset = *pos & (PAGE_SIZE - 1);
+- unsigned copied = 0;
+- unsigned amt;
+- struct page *page;
++ size_t copied = 0;
+
+ do {
+- page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
+- if (IS_ERR(page)) {
+- if (PTR_ERR(page) == -EINTR)
++ size_t offset, chunk;
++ struct folio *folio;
++
++ folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
++ if (IS_ERR(folio)) {
++ if (PTR_ERR(folio) == -EINTR)
+ continue;
+- return PTR_ERR(page);
++ return PTR_ERR(folio);
+ }
+- amt = size - copied;
+- if (offset + size > PAGE_SIZE)
+- amt = PAGE_SIZE - offset;
+- memcpy_from_page(buf + copied, page, offset, amt);
+- put_page(page);
+- copied += amt;
+- index++;
+- offset = 0;
++ offset = *pos + copied - folio_pos(folio);
++ chunk = min(size - copied, folio_size(folio) - offset);
++ memcpy_from_folio(buf + copied, folio, offset, chunk);
++ index = folio_next_index(folio);
++ folio_put(folio);
++ copied += chunk;
+ } while(copied < size);
+ (*pos) += size;
+ return size;
+diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h
+index f08322ef41cfd3..a10c4334d24893 100644
+--- a/fs/gfs2/aops.h
++++ b/fs/gfs2/aops.h
+@@ -8,8 +8,8 @@
+
+ #include "incore.h"
+
+-extern void adjust_fs_space(struct inode *inode);
+-extern void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
+- size_t from, size_t len);
++void adjust_fs_space(struct inode *inode);
++void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
++ size_t from, size_t len);
+
+ #endif /* __AOPS_DOT_H__ */
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index ef7017fb69512c..7ed276a8f599d4 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -106,7 +106,7 @@ static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
+ and write it out to disk */
+
+ unsigned int n = 1;
+- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
++ error = gfs2_alloc_blocks(ip, &block, &n, 0);
+ if (error)
+ goto out_brelse;
+ if (isdir) {
+@@ -702,7 +702,7 @@ static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
+ i = mp->mp_aheight;
+ do {
+ n = blks - alloced;
+- ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
++ ret = gfs2_alloc_blocks(ip, &bn, &n, 0);
+ if (ret)
+ goto out;
+ alloced += n;
+@@ -1715,7 +1715,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
+ struct buffer_head *dibh, *bh;
+ struct gfs2_holder rd_gh;
+ unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
+- u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
++ unsigned int bsize = 1 << bsize_shift;
++ u64 lblock = (offset + bsize - 1) >> bsize_shift;
+ __u16 start_list[GFS2_MAX_META_HEIGHT];
+ __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
+ unsigned int start_aligned, end_aligned;
+@@ -1726,7 +1727,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
+ u64 prev_bnr = 0;
+ __be64 *start, *end;
+
+- if (offset >= maxsize) {
++ if (offset + bsize - 1 >= maxsize) {
+ /*
+ * The starting point lies beyond the allocated metadata;
+ * there are no blocks to deallocate.
+diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
+index e5b7d17131ed31..4e8b1e8ebdf390 100644
+--- a/fs/gfs2/bmap.h
++++ b/fs/gfs2/bmap.h
+@@ -46,24 +46,24 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
+ extern const struct iomap_ops gfs2_iomap_ops;
+ extern const struct iomap_writeback_ops gfs2_writeback_ops;
+
+-extern int gfs2_unstuff_dinode(struct gfs2_inode *ip);
+-extern int gfs2_block_map(struct inode *inode, sector_t lblock,
+- struct buffer_head *bh, int create);
+-extern int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
+- struct iomap *iomap);
+-extern int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
+- struct iomap *iomap);
+-extern int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
+- unsigned int *extlen);
+-extern int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
+- unsigned *extlen, bool *new);
+-extern int gfs2_setattr_size(struct inode *inode, u64 size);
+-extern int gfs2_truncatei_resume(struct gfs2_inode *ip);
+-extern int gfs2_file_dealloc(struct gfs2_inode *ip);
+-extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
+- unsigned int len);
+-extern int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
+-extern void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
+-extern int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
++int gfs2_unstuff_dinode(struct gfs2_inode *ip);
++int gfs2_block_map(struct inode *inode, sector_t lblock,
++ struct buffer_head *bh, int create);
++int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
++ struct iomap *iomap);
++int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
++ struct iomap *iomap);
++int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
++ unsigned int *extlen);
++int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
++ unsigned *extlen, bool *new);
++int gfs2_setattr_size(struct inode *inode, u64 size);
++int gfs2_truncatei_resume(struct gfs2_inode *ip);
++int gfs2_file_dealloc(struct gfs2_inode *ip);
++int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
++ unsigned int len);
++int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
++void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
++int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
+
+ #endif /* __BMAP_DOT_H__ */
+diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
+index 1a2afa88f8bea8..3a2a10d6d43d13 100644
+--- a/fs/gfs2/dir.c
++++ b/fs/gfs2/dir.c
+@@ -868,7 +868,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
+ struct gfs2_dirent *dent;
+ struct timespec64 tv = current_time(inode);
+
+- error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
++ error = gfs2_alloc_blocks(ip, &bn, &n, 0);
+ if (error)
+ return NULL;
+ bh = gfs2_meta_new(ip->i_gl, bn);
+diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
+index 5b76480c17c9e9..25a857c78b538b 100644
+--- a/fs/gfs2/dir.h
++++ b/fs/gfs2/dir.h
+@@ -23,32 +23,32 @@ struct gfs2_diradd {
+ int save_loc;
+ };
+
+-extern struct inode *gfs2_dir_search(struct inode *dir,
+- const struct qstr *filename,
+- bool fail_on_exist);
+-extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
+- const struct gfs2_inode *ip);
+-extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
+- const struct gfs2_inode *ip, struct gfs2_diradd *da);
++struct inode *gfs2_dir_search(struct inode *dir,
++ const struct qstr *filename,
++ bool fail_on_exist);
++int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
++ const struct gfs2_inode *ip);
++int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
++ const struct gfs2_inode *ip, struct gfs2_diradd *da);
+ static inline void gfs2_dir_no_add(struct gfs2_diradd *da)
+ {
+ brelse(da->bh);
+ da->bh = NULL;
+ }
+-extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
+-extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
+- struct file_ra_state *f_ra);
+-extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
+- const struct gfs2_inode *nip, unsigned int new_type);
++int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
++int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
++ struct file_ra_state *f_ra);
++int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
++ const struct gfs2_inode *nip, unsigned int new_type);
+
+-extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
++int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
+
+-extern int gfs2_diradd_alloc_required(struct inode *dir,
+- const struct qstr *filename,
+- struct gfs2_diradd *da);
+-extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
+- struct buffer_head **bhp);
+-extern void gfs2_dir_hash_inval(struct gfs2_inode *ip);
++int gfs2_diradd_alloc_required(struct inode *dir,
++ const struct qstr *filename,
++ struct gfs2_diradd *da);
++int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
++ struct buffer_head **bhp);
++void gfs2_dir_hash_inval(struct gfs2_inode *ip);
+
+ static inline u32 gfs2_disk_hash(const char *data, int len)
+ {
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index f2700477a3001b..9296e0e282bcd8 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -1436,7 +1436,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
+
+ if (!(fl->fl_flags & FL_POSIX))
+ return -ENOLCK;
+- if (unlikely(gfs2_withdrawn(sdp))) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (fl->fl_type == F_UNLCK)
+ locks_lock_file_wait(file, fl);
+ return -EIO;
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 4a280be229a651..685e3ef9e9008d 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -156,7 +156,7 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
+ {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+- if (likely(!gfs2_withdrawn(sdp)))
++ if (!gfs2_withdrawing_or_withdrawn(sdp))
+ return false;
+ if (gl->gl_ops->go_flags & GLOF_NONDISK)
+ return false;
+@@ -166,19 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
+ return true;
+ }
+
+-void gfs2_glock_free(struct gfs2_glock *gl)
++static void __gfs2_glock_free(struct gfs2_glock *gl)
+ {
+- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+-
+- gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
+ rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
+ smp_mb();
+ wake_up_glock(gl);
+ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
++}
++
++void gfs2_glock_free(struct gfs2_glock *gl) {
++ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++
++ __gfs2_glock_free(gl);
++ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
++ wake_up(&sdp->sd_kill_wait);
++}
++
++void gfs2_glock_free_later(struct gfs2_glock *gl) {
++ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++
++ spin_lock(&lru_lock);
++ list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
++ spin_unlock(&lru_lock);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_kill_wait);
+ }
+
++static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
++{
++ struct list_head *list = &sdp->sd_dead_glocks;
++
++ while(!list_empty(list)) {
++ struct gfs2_glock *gl;
++
++ gl = list_first_entry(list, struct gfs2_glock, gl_lru);
++ list_del_init(&gl->gl_lru);
++ __gfs2_glock_free(gl);
++ }
++}
++
+ /**
+ * gfs2_glock_hold() - increment reference count on glock
+ * @gl: The glock to hold
+@@ -278,7 +304,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
+ if (mapping) {
+ truncate_inode_pages_final(mapping);
+- if (!gfs2_withdrawn(sdp))
++ if (!gfs2_withdrawing_or_withdrawn(sdp))
+ GLOCK_BUG_ON(gl, !mapping_empty(mapping));
+ }
+ trace_gfs2_glock_put(gl);
+@@ -574,7 +600,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+ struct gfs2_holder *gh;
+ unsigned state = ret & LM_OUT_ST_MASK;
+
+- spin_lock(&gl->gl_lockref.lock);
+ trace_gfs2_glock_state_change(gl, state);
+ state_change(gl, state);
+ gh = find_first_waiter(gl);
+@@ -622,7 +647,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+ gl->gl_target, state);
+ GLOCK_BUG_ON(gl, 1);
+ }
+- spin_unlock(&gl->gl_lockref.lock);
+ return;
+ }
+
+@@ -645,7 +669,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+ }
+ out:
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+- spin_unlock(&gl->gl_lockref.lock);
+ }
+
+ static bool is_system_glock(struct gfs2_glock *gl)
+@@ -673,6 +696,7 @@ __acquires(&gl->gl_lockref.lock)
+ {
+ const struct gfs2_glock_operations *glops = gl->gl_ops;
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
+ int ret;
+
+@@ -701,6 +725,9 @@ __acquires(&gl->gl_lockref.lock)
+ (gl->gl_state == LM_ST_EXCLUSIVE) ||
+ (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
+ clear_bit(GLF_BLOCKING, &gl->gl_flags);
++ if (!glops->go_inval && !glops->go_sync)
++ goto skip_inval;
++
+ spin_unlock(&gl->gl_lockref.lock);
+ if (glops->go_sync) {
+ ret = glops->go_sync(gl);
+@@ -713,6 +740,7 @@ __acquires(&gl->gl_lockref.lock)
+ fs_err(sdp, "Error %d syncing glock \n", ret);
+ gfs2_dump_glock(NULL, gl, true);
+ }
++ spin_lock(&gl->gl_lockref.lock);
+ goto skip_inval;
+ }
+ }
+@@ -733,9 +761,10 @@ __acquires(&gl->gl_lockref.lock)
+ glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
+ clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+ }
++ spin_lock(&gl->gl_lockref.lock);
+
+ skip_inval:
+- gfs2_glock_hold(gl);
++ gl->gl_lockref.count++;
+ /*
+ * Check for an error encountered since we called go_sync and go_inval.
+ * If so, we can't withdraw from the glock code because the withdraw
+@@ -757,7 +786,7 @@ __acquires(&gl->gl_lockref.lock)
+ * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
+ * then it's okay to tell dlm to unlock it.
+ */
+- if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
++ if (unlikely(sdp->sd_log_error) && !gfs2_withdrawing_or_withdrawn(sdp))
+ gfs2_withdraw_delayed(sdp);
+ if (glock_blocked_by_withdraw(gl) &&
+ (target != LM_ST_UNLOCKED ||
+@@ -777,31 +806,37 @@ __acquires(&gl->gl_lockref.lock)
+ */
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+ clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
+- gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+- goto out;
++ __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
++ return;
+ } else {
+ clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+ }
+ }
+
+- if (sdp->sd_lockstruct.ls_ops->lm_lock) {
+- /* lock_dlm */
+- ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
++ if (ls->ls_ops->lm_lock) {
++ spin_unlock(&gl->gl_lockref.lock);
++ ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
++ spin_lock(&gl->gl_lockref.lock);
++
+ if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
+ target == LM_ST_UNLOCKED &&
+- test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
+- finish_xmote(gl, target);
+- gfs2_glock_queue_work(gl, 0);
++ test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
++ /*
++ * The lockspace has been released and the lock has
++ * been unlocked implicitly.
++ */
+ } else if (ret) {
+ fs_err(sdp, "lm_lock ret %d\n", ret);
+- GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
++ target = gl->gl_state | LM_OUT_ERROR;
++ } else {
++ /* The operation will be completed asynchronously. */
++ return;
+ }
+- } else { /* lock_nolock */
+- finish_xmote(gl, target);
+- gfs2_glock_queue_work(gl, 0);
+ }
+-out:
+- spin_lock(&gl->gl_lockref.lock);
++
++ /* Complete the operation now. */
++ finish_xmote(gl, target);
++ __gfs2_glock_queue_work(gl, 0);
+ }
+
+ /**
+@@ -1054,11 +1089,12 @@ static void glock_work_func(struct work_struct *work)
+ struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+ unsigned int drop_refs = 1;
+
+- if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
++ spin_lock(&gl->gl_lockref.lock);
++ if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
++ clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ finish_xmote(gl, gl->gl_reply);
+ drop_refs++;
+ }
+- spin_lock(&gl->gl_lockref.lock);
+ if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
+ gl->gl_state != LM_ST_UNLOCKED &&
+ gl->gl_demote_state != LM_ST_EXCLUSIVE) {
+@@ -2116,8 +2152,11 @@ static void thaw_glock(struct gfs2_glock *gl)
+ return;
+ if (!lockref_get_not_dead(&gl->gl_lockref))
+ return;
++
++ spin_lock(&gl->gl_lockref.lock);
+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+- gfs2_glock_queue_work(gl, 0);
++ __gfs2_glock_queue_work(gl, 0);
++ spin_unlock(&gl->gl_lockref.lock);
+ }
+
+ /**
+@@ -2193,6 +2232,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
+ wait_event_timeout(sdp->sd_kill_wait,
+ atomic_read(&sdp->sd_glock_disposal) == 0,
+ HZ * 600);
++ gfs2_lm_unmount(sdp);
++ gfs2_free_dead_glocks(sdp);
+ glock_hash_walk(dump_glock_func, sdp);
+ }
+
+diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
+index c8685ca7d2a26a..f7ee9ca948eeea 100644
+--- a/fs/gfs2/glock.h
++++ b/fs/gfs2/glock.h
+@@ -181,40 +181,40 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
+ return NULL;
+ }
+
+-extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+- const struct gfs2_glock_operations *glops,
+- int create, struct gfs2_glock **glp);
+-extern struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
+-extern void gfs2_glock_put(struct gfs2_glock *gl);
+-extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
+-
+-extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
+- u16 flags, struct gfs2_holder *gh,
+- unsigned long ip);
++int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
++ const struct gfs2_glock_operations *glops,
++ int create, struct gfs2_glock **glp);
++struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
++void gfs2_glock_put(struct gfs2_glock *gl);
++void gfs2_glock_queue_put(struct gfs2_glock *gl);
++
++void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
++ u16 flags, struct gfs2_holder *gh,
++ unsigned long ip);
+ static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
+ u16 flags, struct gfs2_holder *gh) {
+ __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
+ }
+
+-extern void gfs2_holder_reinit(unsigned int state, u16 flags,
+- struct gfs2_holder *gh);
+-extern void gfs2_holder_uninit(struct gfs2_holder *gh);
+-extern int gfs2_glock_nq(struct gfs2_holder *gh);
+-extern int gfs2_glock_poll(struct gfs2_holder *gh);
+-extern int gfs2_instantiate(struct gfs2_holder *gh);
+-extern int gfs2_glock_holder_ready(struct gfs2_holder *gh);
+-extern int gfs2_glock_wait(struct gfs2_holder *gh);
+-extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
+-extern void gfs2_glock_dq(struct gfs2_holder *gh);
+-extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
+-extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
+-extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
+- const struct gfs2_glock_operations *glops,
+- unsigned int state, u16 flags,
+- struct gfs2_holder *gh);
+-extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+-extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+-extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
++void gfs2_holder_reinit(unsigned int state, u16 flags,
++ struct gfs2_holder *gh);
++void gfs2_holder_uninit(struct gfs2_holder *gh);
++int gfs2_glock_nq(struct gfs2_holder *gh);
++int gfs2_glock_poll(struct gfs2_holder *gh);
++int gfs2_instantiate(struct gfs2_holder *gh);
++int gfs2_glock_holder_ready(struct gfs2_holder *gh);
++int gfs2_glock_wait(struct gfs2_holder *gh);
++int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
++void gfs2_glock_dq(struct gfs2_holder *gh);
++void gfs2_glock_dq_wait(struct gfs2_holder *gh);
++void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
++int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
++ const struct gfs2_glock_operations *glops,
++ unsigned int state, u16 flags,
++ struct gfs2_holder *gh);
++int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
++void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
++void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
+ bool fsid);
+ #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
+ gfs2_dump_glock(NULL, gl, true); \
+@@ -228,7 +228,7 @@ extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
+ gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
+ while (0)
+
+-extern __printf(2, 3)
++__printf(2, 3)
+ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
+
+ /**
+@@ -256,27 +256,28 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
+ return error;
+ }
+
+-extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
+-extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+-extern bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
+-extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
+-extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
+-extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
+-extern void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
+-extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
+-extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
+-extern void gfs2_glock_free(struct gfs2_glock *gl);
+-
+-extern int __init gfs2_glock_init(void);
+-extern void gfs2_glock_exit(void);
+-
+-extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
+-extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
+-extern void gfs2_register_debugfs(void);
+-extern void gfs2_unregister_debugfs(void);
+-
+-extern void glock_set_object(struct gfs2_glock *gl, void *object);
+-extern void glock_clear_object(struct gfs2_glock *gl, void *object);
++void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
++void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
++bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
++void gfs2_cancel_delete_work(struct gfs2_glock *gl);
++void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
++void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
++void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
++void gfs2_glock_thaw(struct gfs2_sbd *sdp);
++void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
++void gfs2_glock_free(struct gfs2_glock *gl);
++void gfs2_glock_free_later(struct gfs2_glock *gl);
++
++int __init gfs2_glock_init(void);
++void gfs2_glock_exit(void);
++
++void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
++void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
++void gfs2_register_debugfs(void);
++void gfs2_unregister_debugfs(void);
++
++void glock_set_object(struct gfs2_glock *gl, void *object);
++void glock_clear_object(struct gfs2_glock *gl, void *object);
+
+ extern const struct lm_lockops gfs2_dlm_ops;
+
+@@ -295,7 +296,7 @@ static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
+ return !list_empty(&gh->gh_list);
+ }
+
+-extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
+-extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
++void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
++bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
+
+ #endif /* __GLOCK_DOT_H__ */
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index f41ca89d216bc2..1c854d4e2d4915 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+ GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
+ spin_unlock(&sdp->sd_ail_lock);
+ gfs2_log_unlock(sdp);
++
++ if (gfs2_withdrawing(sdp))
++ gfs2_withdraw(sdp);
+ }
+
+
+@@ -174,7 +177,7 @@ static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
+
+ filemap_fdatawrite_range(metamapping, start, end);
+ error = filemap_fdatawait_range(metamapping, start, end);
+- WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
++ WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
+ mapping_set_error(metamapping, error);
+ if (error)
+ gfs2_io_error(sdp);
+diff --git a/fs/gfs2/glops.h b/fs/gfs2/glops.h
+index 695898afcaf1fb..9341423798df8c 100644
+--- a/fs/gfs2/glops.h
++++ b/fs/gfs2/glops.h
+@@ -22,7 +22,7 @@ extern const struct gfs2_glock_operations gfs2_quota_glops;
+ extern const struct gfs2_glock_operations gfs2_journal_glops;
+ extern const struct gfs2_glock_operations *gfs2_glops_list[];
+
+-extern int gfs2_inode_metasync(struct gfs2_glock *gl);
+-extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
++int gfs2_inode_metasync(struct gfs2_glock *gl);
++void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
+
+ #endif /* __GLOPS_DOT_H__ */
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index a8c95c5293c6cf..60abd7050c9983 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -838,6 +838,7 @@ struct gfs2_sbd {
+ /* For quiescing the filesystem */
+ struct gfs2_holder sd_freeze_gh;
+ struct mutex sd_freeze_mutex;
++ struct list_head sd_dead_glocks;
+
+ char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
+ char sd_table_name[GFS2_FSNAME_LEN];
+@@ -863,7 +864,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
+ preempt_enable();
+ }
+
+-extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
++struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
+
+ static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
+ {
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 0eac0450790471..29085643ad104e 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -265,17 +265,18 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
+ }
+
+
+-struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
++/**
++ * gfs2_lookup_meta - Look up an inode in a metadata directory
++ * @dip: The directory
++ * @name: The name of the inode
++ */
++struct inode *gfs2_lookup_meta(struct inode *dip, const char *name)
+ {
+ struct qstr qstr;
+ struct inode *inode;
++
+ gfs2_str2qstr(&qstr, name);
+ inode = gfs2_lookupi(dip, &qstr, 1);
+- /* gfs2_lookupi has inconsistent callers: vfs
+- * related routines expect NULL for no entry found,
+- * gfs2_lookup_simple callers expect ENOENT
+- * and do not check for NULL.
+- */
+ if (IS_ERR_OR_NULL(inode))
+ return inode ? inode : ERR_PTR(-ENOENT);
+
+@@ -417,7 +418,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks)
+ if (error)
+ goto out_ipreserv;
+
+- error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1, &ip->i_generation);
++ error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1);
+ if (error)
+ goto out_trans_end;
+
+@@ -1866,16 +1867,24 @@ static const char *gfs2_get_link(struct dentry *dentry,
+ int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
+ int mask)
+ {
++ int may_not_block = mask & MAY_NOT_BLOCK;
+ struct gfs2_inode *ip;
+ struct gfs2_holder i_gh;
++ struct gfs2_glock *gl;
+ int error;
+
+ gfs2_holder_mark_uninitialized(&i_gh);
+ ip = GFS2_I(inode);
+- if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
+- if (mask & MAY_NOT_BLOCK)
++ gl = rcu_dereference_check(ip->i_gl, !may_not_block);
++ if (unlikely(!gl)) {
++ /* inode is getting torn down, must be RCU mode */
++ WARN_ON_ONCE(!may_not_block);
++ return -ECHILD;
++ }
++ if (gfs2_glock_is_locked_by_me(gl) == NULL) {
++ if (may_not_block)
+ return -ECHILD;
+- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
++ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ if (error)
+ return error;
+ }
+@@ -1920,7 +1929,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
+ kuid_t ouid, nuid;
+ kgid_t ogid, ngid;
+ int error;
+- struct gfs2_alloc_parms ap;
++ struct gfs2_alloc_parms ap = {};
+
+ ouid = inode->i_uid;
+ ogid = inode->i_gid;
+diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
+index c8c5814e7295d9..ce70cf26b497d9 100644
+--- a/fs/gfs2/inode.h
++++ b/fs/gfs2/inode.h
+@@ -13,9 +13,9 @@
+ #include "util.h"
+
+ bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask);
+-extern int gfs2_internal_read(struct gfs2_inode *ip,
+- char *buf, loff_t *pos, unsigned size);
+-extern void gfs2_set_aops(struct inode *inode);
++ssize_t gfs2_internal_read(struct gfs2_inode *ip,
++ char *buf, loff_t *pos, size_t size);
++void gfs2_set_aops(struct inode *inode);
+
+ static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
+ {
+@@ -88,33 +88,33 @@ static inline int gfs2_check_internal_file_size(struct inode *inode,
+ return -EIO;
+ }
+
+-extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
+- u64 no_addr, u64 no_formal_ino,
+- unsigned int blktype);
+-extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
+- u64 no_formal_ino,
+- unsigned int blktype);
+-
+-extern int gfs2_inode_refresh(struct gfs2_inode *ip);
+-
+-extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
+- int is_root);
+-extern int gfs2_permission(struct mnt_idmap *idmap,
+- struct inode *inode, int mask);
+-extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
+-extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
+-extern int gfs2_open_common(struct inode *inode, struct file *file);
+-extern loff_t gfs2_seek_data(struct file *file, loff_t offset);
+-extern loff_t gfs2_seek_hole(struct file *file, loff_t offset);
++struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
++ u64 no_addr, u64 no_formal_ino,
++ unsigned int blktype);
++struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
++ u64 no_formal_ino,
++ unsigned int blktype);
++
++int gfs2_inode_refresh(struct gfs2_inode *ip);
++
++struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
++ int is_root);
++int gfs2_permission(struct mnt_idmap *idmap,
++ struct inode *inode, int mask);
++struct inode *gfs2_lookup_meta(struct inode *dip, const char *name);
++void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
++int gfs2_open_common(struct inode *inode, struct file *file);
++loff_t gfs2_seek_data(struct file *file, loff_t offset);
++loff_t gfs2_seek_hole(struct file *file, loff_t offset);
+
+ extern const struct file_operations gfs2_file_fops_nolock;
+ extern const struct file_operations gfs2_dir_fops_nolock;
+
+-extern int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+-extern int gfs2_fileattr_set(struct mnt_idmap *idmap,
+- struct dentry *dentry, struct fileattr *fa);
+-extern void gfs2_set_inode_flags(struct inode *inode);
+-
++int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
++int gfs2_fileattr_set(struct mnt_idmap *idmap,
++ struct dentry *dentry, struct fileattr *fa);
++void gfs2_set_inode_flags(struct inode *inode);
++
+ #ifdef CONFIG_GFS2_FS_LOCKING_DLM
+ extern const struct file_operations gfs2_file_fops;
+ extern const struct file_operations gfs2_dir_fops;
+diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
+index 59ab18c798890f..e028e55e67d95f 100644
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg)
+ struct gfs2_glock *gl = arg;
+ unsigned ret = gl->gl_state;
+
++ /* If the glock is dead, we only react to a dlm_unlock() reply. */
++ if (__lockref_is_dead(&gl->gl_lockref) &&
++ gl->gl_lksb.sb_status != -DLM_EUNLOCK)
++ return;
++
+ gfs2_update_reply_times(gl);
+ BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
+
+@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode)
+ {
+ struct gfs2_glock *gl = arg;
+
++ if (__lockref_is_dead(&gl->gl_lockref))
++ return;
++
+ switch (mode) {
+ case DLM_LOCK_EX:
+ gfs2_glock_cb(gl, LM_ST_UNLOCKED);
+@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ int error;
+
+- if (gl->gl_lksb.sb_lkid == 0)
+- goto out_free;
++ BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
++
++ if (gl->gl_lksb.sb_lkid == 0) {
++ gfs2_glock_free(gl);
++ return;
++ }
+
+ clear_bit(GLF_BLOCKING, &gl->gl_flags);
+ gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
+@@ -300,13 +312,17 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ gfs2_update_request_times(gl);
+
+ /* don't want to call dlm if we've unmounted the lock protocol */
+- if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
+- goto out_free;
++ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
++ gfs2_glock_free(gl);
++ return;
++ }
+ /* don't want to skip dlm_unlock writing the lvb when lock has one */
+
+ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+- !gl->gl_lksb.sb_lvbptr)
+- goto out_free;
++ !gl->gl_lksb.sb_lvbptr) {
++ gfs2_glock_free_later(gl);
++ return;
++ }
+
+ again:
+ error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
+@@ -321,10 +337,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ gl->gl_name.ln_type,
+ (unsigned long long)gl->gl_name.ln_number, error);
+ }
+- return;
+-
+-out_free:
+- gfs2_glock_free(gl);
+ }
+
+ static void gdlm_cancel(struct gfs2_glock *gl)
+@@ -1122,7 +1134,7 @@ static void gdlm_recover_prep(void *arg)
+ struct gfs2_sbd *sdp = arg;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+
+- if (gfs2_withdrawn(sdp)) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ fs_err(sdp, "recover_prep ignored due to withdraw.\n");
+ return;
+ }
+@@ -1148,7 +1160,7 @@ static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ int jid = slot->slot - 1;
+
+- if (gfs2_withdrawn(sdp)) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n",
+ jid);
+ return;
+@@ -1177,7 +1189,7 @@ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
+ struct gfs2_sbd *sdp = arg;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+
+- if (gfs2_withdrawn(sdp)) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ fs_err(sdp, "recover_done ignored due to withdraw.\n");
+ return;
+ }
+@@ -1208,7 +1220,7 @@ static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
+ {
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+
+- if (gfs2_withdrawn(sdp)) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n",
+ jid);
+ return;
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index e5271ae87d1c43..767549066066c2 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -126,7 +126,7 @@ __acquires(&sdp->sd_ail_lock)
+ }
+ }
+
+- if (gfs2_withdrawn(sdp)) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ gfs2_remove_from_ail(bd);
+ continue;
+ }
+@@ -841,7 +841,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ struct super_block *sb = sdp->sd_vfs;
+ u64 dblock;
+
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ return;
+
+ page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
+@@ -1047,7 +1047,8 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ * Do this check while holding the log_flush_lock to prevent new
+ * buffers from being added to the ail via gfs2_pin()
+ */
+- if (gfs2_withdrawn(sdp) || !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
++ if (gfs2_withdrawing_or_withdrawn(sdp) ||
++ !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+ goto out;
+
+ /* Log might have been flushed while we waited for the flush lock */
+@@ -1096,13 +1097,14 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ goto out_withdraw;
+
+ gfs2_ordered_write(sdp);
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ goto out_withdraw;
+ lops_before_commit(sdp, tr);
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ goto out_withdraw;
+- gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
+- if (gfs2_withdrawn(sdp))
++ if (sdp->sd_jdesc)
++ gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ goto out_withdraw;
+
+ if (sdp->sd_log_head != sdp->sd_log_flush_head) {
+@@ -1110,7 +1112,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ } else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
+ log_write_header(sdp, flags);
+ }
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ goto out_withdraw;
+ lops_after_commit(sdp, tr);
+
+@@ -1128,7 +1130,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
+ if (!sdp->sd_log_idle) {
+ empty_ail1_list(sdp);
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ goto out_withdraw;
+ log_write_header(sdp, flags);
+ }
+@@ -1298,7 +1300,7 @@ int gfs2_logd(void *data)
+ unsigned long t = 1;
+
+ while (!kthread_should_stop()) {
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ break;
+
+ /* Check for errors writing to the journal */
+@@ -1337,7 +1339,7 @@ int gfs2_logd(void *data)
+ gfs2_ail_flush_reqd(sdp) ||
+ gfs2_jrnl_flush_reqd(sdp) ||
+ sdp->sd_log_error ||
+- gfs2_withdrawn(sdp) ||
++ gfs2_withdrawing_or_withdrawn(sdp) ||
+ kthread_should_stop(),
+ t);
+ }
+diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
+index 653cffcbf86945..c27b05099c1e40 100644
+--- a/fs/gfs2/log.h
++++ b/fs/gfs2/log.h
+@@ -70,29 +70,29 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
+ }
+ }
+
+-extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
+-extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
+-extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
+-extern bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
+-extern void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
+-extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
+-extern bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+- unsigned int *extra_revokes);
+-extern void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+- unsigned int *extra_revokes);
+-extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+- u64 seq, u32 tail, u32 lblock, u32 flags,
+- blk_opf_t op_flags);
+-extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
+- u32 type);
+-extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
+-extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
+-extern void log_flush_wait(struct gfs2_sbd *sdp);
++void gfs2_ordered_del_inode(struct gfs2_inode *ip);
++unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
++void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
++bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
++void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
++void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
++bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
++ unsigned int *extra_revokes);
++void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
++ unsigned int *extra_revokes);
++void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
++ u64 seq, u32 tail, u32 lblock, u32 flags,
++ blk_opf_t op_flags);
++void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
++ u32 type);
++void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
++void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
++void log_flush_wait(struct gfs2_sbd *sdp);
+
+-extern int gfs2_logd(void *data);
+-extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+-extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
+-extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+-extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
++int gfs2_logd(void *data);
++void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
++void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
++void gfs2_flush_revokes(struct gfs2_sbd *sdp);
++void gfs2_ail_drain(struct gfs2_sbd *sdp);
+
+ #endif /* __LOG_DOT_H__ */
+diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
+index 1412ffba1d4446..07890c7b145d8b 100644
+--- a/fs/gfs2/lops.h
++++ b/fs/gfs2/lops.h
+@@ -11,16 +11,18 @@
+ #include "incore.h"
+
+ extern const struct gfs2_log_operations *gfs2_log_ops[];
+-extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
+-extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
+-extern void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+- struct page *page, unsigned size, unsigned offset,
+- u64 blkno);
+-extern void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
+-extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
+-extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
+- struct gfs2_log_header_host *head, bool keep_cache);
+-extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
++
++void gfs2_log_incr_head(struct gfs2_sbd *sdp);
++u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
++void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
++ struct page *page, unsigned size, unsigned offset,
++ u64 blkno);
++void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
++void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
++int gfs2_find_jhead(struct gfs2_jdesc *jd,
++ struct gfs2_log_header_host *head, bool keep_cache);
++void gfs2_drain_revokes(struct gfs2_sbd *sdp);
++
+ static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
+ {
+ return sdp->sd_ldptrs;
+diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
+index 924361fa510b1f..1f42eae112fb88 100644
+--- a/fs/gfs2/meta_io.c
++++ b/fs/gfs2/meta_io.c
+@@ -257,7 +257,8 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
+ struct buffer_head *bh, *bhs[2];
+ int num = 0;
+
+- if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) {
++ if (gfs2_withdrawing_or_withdrawn(sdp) &&
++ !gfs2_withdraw_in_prog(sdp)) {
+ *bhp = NULL;
+ return -EIO;
+ }
+@@ -315,7 +316,8 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
+
+ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
+ {
+- if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp) &&
++ !gfs2_withdraw_in_prog(sdp))
+ return -EIO;
+
+ wait_on_buffer(bh);
+@@ -326,7 +328,8 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
+ gfs2_io_error_bh_wd(sdp, bh);
+ return -EIO;
+ }
+- if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp) &&
++ !gfs2_withdraw_in_prog(sdp))
+ return -EIO;
+
+ return 0;
+diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
+index d0a58cdd433a90..831d988c2ceb74 100644
+--- a/fs/gfs2/meta_io.h
++++ b/fs/gfs2/meta_io.h
+@@ -50,21 +50,21 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
+ return inode->i_sb->s_fs_info;
+ }
+
+-extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
+-extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
+- int rahead, struct buffer_head **bhp);
+-extern int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
+-extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
+- int create);
++struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
++int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
++ int rahead, struct buffer_head **bhp);
++int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
++struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
++ int create);
+ enum {
+ REMOVE_JDATA = 0,
+ REMOVE_META = 1,
+ };
+
+-extern void gfs2_remove_from_journal(struct buffer_head *bh, int meta);
+-extern void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
+-extern int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
+- struct buffer_head **bhp);
++void gfs2_remove_from_journal(struct buffer_head *bh, int meta);
++void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
++int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
++ struct buffer_head **bhp);
+
+ static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
+ struct buffer_head **bhp)
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 33ca04733e933e..f4c066aa24b963 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
+ atomic_set(&sdp->sd_log_in_flight, 0);
+ init_waitqueue_head(&sdp->sd_log_flush_wait);
+ mutex_init(&sdp->sd_freeze_mutex);
++ INIT_LIST_HEAD(&sdp->sd_dead_glocks);
+
+ return sdp;
+
+@@ -648,7 +649,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
+ struct gfs2_jdesc *jd;
+ struct gfs2_inode *ip;
+
+- sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
++ sdp->sd_statfs_inode = gfs2_lookup_meta(master, "statfs");
+ if (IS_ERR(sdp->sd_statfs_inode)) {
+ error = PTR_ERR(sdp->sd_statfs_inode);
+ fs_err(sdp, "can't read in statfs inode: %d\n", error);
+@@ -657,7 +658,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
+ if (sdp->sd_args.ar_spectator)
+ goto out;
+
+- pn = gfs2_lookup_simple(master, "per_node");
++ pn = gfs2_lookup_meta(master, "per_node");
+ if (IS_ERR(pn)) {
+ error = PTR_ERR(pn);
+ fs_err(sdp, "can't find per_node directory: %d\n", error);
+@@ -674,7 +675,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
+ goto free_local;
+ }
+ sprintf(buf, "statfs_change%u", jd->jd_jid);
+- lsi->si_sc_inode = gfs2_lookup_simple(pn, buf);
++ lsi->si_sc_inode = gfs2_lookup_meta(pn, buf);
+ if (IS_ERR(lsi->si_sc_inode)) {
+ error = PTR_ERR(lsi->si_sc_inode);
+ fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
+@@ -739,7 +740,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
+ if (undo)
+ goto fail_statfs;
+
+- sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
++ sdp->sd_jindex = gfs2_lookup_meta(master, "jindex");
+ if (IS_ERR(sdp->sd_jindex)) {
+ fs_err(sdp, "can't lookup journal index: %d\n", error);
+ return PTR_ERR(sdp->sd_jindex);
+@@ -888,7 +889,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
+ goto fail;
+
+ /* Read in the resource index inode */
+- sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
++ sdp->sd_rindex = gfs2_lookup_meta(master, "rindex");
+ if (IS_ERR(sdp->sd_rindex)) {
+ error = PTR_ERR(sdp->sd_rindex);
+ fs_err(sdp, "can't get resource index inode: %d\n", error);
+@@ -897,7 +898,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
+ sdp->sd_rindex_uptodate = 0;
+
+ /* Read in the quota inode */
+- sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
++ sdp->sd_quota_inode = gfs2_lookup_meta(master, "quota");
+ if (IS_ERR(sdp->sd_quota_inode)) {
+ error = PTR_ERR(sdp->sd_quota_inode);
+ fs_err(sdp, "can't get quota file inode: %d\n", error);
+@@ -941,7 +942,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
+ if (undo)
+ goto fail_qc_gh;
+
+- pn = gfs2_lookup_simple(master, "per_node");
++ pn = gfs2_lookup_meta(master, "per_node");
+ if (IS_ERR(pn)) {
+ error = PTR_ERR(pn);
+ fs_err(sdp, "can't find per_node directory: %d\n", error);
+@@ -949,7 +950,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
+ }
+
+ sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
+- sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
++ sdp->sd_qc_inode = gfs2_lookup_meta(pn, buf);
+ if (IS_ERR(sdp->sd_qc_inode)) {
+ error = PTR_ERR(sdp->sd_qc_inode);
+ fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
+@@ -1074,7 +1075,7 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
+ void gfs2_lm_unmount(struct gfs2_sbd *sdp)
+ {
+ const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
+- if (likely(!gfs2_withdrawn(sdp)) && lm->lm_unmount)
++ if (!gfs2_withdrawing_or_withdrawn(sdp) && lm->lm_unmount)
+ lm->lm_unmount(sdp);
+ }
+
+@@ -1126,8 +1127,7 @@ static int init_threads(struct gfs2_sbd *sdp)
+ return 0;
+
+ fail:
+- kthread_stop(sdp->sd_logd_process);
+- put_task_struct(sdp->sd_logd_process);
++ kthread_stop_put(sdp->sd_logd_process);
+ sdp->sd_logd_process = NULL;
+ return error;
+ }
+@@ -1135,13 +1135,11 @@ static int init_threads(struct gfs2_sbd *sdp)
+ void gfs2_destroy_threads(struct gfs2_sbd *sdp)
+ {
+ if (sdp->sd_logd_process) {
+- kthread_stop(sdp->sd_logd_process);
+- put_task_struct(sdp->sd_logd_process);
++ kthread_stop_put(sdp->sd_logd_process);
+ sdp->sd_logd_process = NULL;
+ }
+ if (sdp->sd_quotad_process) {
+- kthread_stop(sdp->sd_quotad_process);
+- put_task_struct(sdp->sd_quotad_process);
++ kthread_stop_put(sdp->sd_quotad_process);
+ sdp->sd_quotad_process = NULL;
+ }
+ }
+@@ -1281,10 +1279,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+
+ if (!sb_rdonly(sb)) {
+ error = init_threads(sdp);
+- if (error) {
+- gfs2_withdraw_delayed(sdp);
++ if (error)
+ goto fail_per_node;
+- }
+ }
+
+ error = gfs2_freeze_lock_shared(sdp);
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 171b2713d2e5e6..c537e1d02cf3a1 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -75,9 +75,6 @@
+ #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
+ #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
+
+-#define QC_CHANGE 0
+-#define QC_SYNC 1
+-
+ /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
+ /* -> sd_bitmap_lock */
+ static DEFINE_SPINLOCK(qd_lock);
+@@ -128,7 +125,7 @@ static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
+ hlist_bl_del_rcu(&qd->qd_hlist);
+ spin_unlock_bucket(qd->qd_hash);
+
+- if (!gfs2_withdrawn(sdp)) {
++ if (!gfs2_withdrawing_or_withdrawn(sdp)) {
+ gfs2_assert_warn(sdp, !qd->qd_change);
+ gfs2_assert_warn(sdp, !qd->qd_slot_ref);
+ gfs2_assert_warn(sdp, !qd->qd_bh_count);
+@@ -449,36 +446,29 @@ static void bh_put(struct gfs2_quota_data *qd)
+ mutex_unlock(&sdp->sd_quota_mutex);
+ }
+
+-static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+- u64 *sync_gen)
++static bool qd_grab_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
++ u64 sync_gen)
+ {
+ if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
+ !test_bit(QDF_CHANGE, &qd->qd_flags) ||
+- (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+- return 0;
++ qd->qd_sync_gen >= sync_gen)
++ return false;
+
+ if (!lockref_get_not_dead(&qd->qd_lockref))
+- return 0;
++ return false;
+
+ list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
+ set_bit(QDF_LOCKED, &qd->qd_flags);
+ qd->qd_change_sync = qd->qd_change;
+ slot_hold(qd);
+- return 1;
++ return true;
+ }
+
+-static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
++static void qd_ungrab_sync(struct gfs2_quota_data *qd)
+ {
+- int error;
+-
+- error = bh_get(qd);
+- if (!error)
+- return 0;
+-
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
+ slot_put(qd);
+ qd_put(qd);
+- return error;
+ }
+
+ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
+@@ -494,7 +484,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
+ spin_lock(&qd_lock);
+
+ list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
+- if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
++ if (qd_grab_sync(sdp, iter, sdp->sd_quota_sync_gen)) {
+ qd = iter;
+ break;
+ }
+@@ -503,12 +493,15 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
+ spin_unlock(&qd_lock);
+
+ if (qd) {
+- error = qd_bh_get_or_undo(sdp, qd);
+- if (error)
++ error = bh_get(qd);
++ if (error) {
++ qd_ungrab_sync(qd);
+ return error;
+- *qdp = qd;
++ }
+ }
+
++ *qdp = qd;
++
+ return 0;
+ }
+
+@@ -686,7 +679,7 @@ static int sort_qd(const void *a, const void *b)
+ return 0;
+ }
+
+-static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
++static void do_qc(struct gfs2_quota_data *qd, s64 change)
+ {
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
+@@ -711,18 +704,16 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
+ qd->qd_change = x;
+ spin_unlock(&qd_lock);
+
+- if (qc_type == QC_CHANGE) {
+- if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
+- qd_hold(qd);
+- slot_hold(qd);
+- }
+- } else {
++ if (!x) {
+ gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
+ clear_bit(QDF_CHANGE, &qd->qd_flags);
+ qc->qc_flags = 0;
+ qc->qc_id = 0;
+ slot_put(qd);
+ qd_put(qd);
++ } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
++ qd_hold(qd);
++ slot_hold(qd);
+ }
+
+ if (change < 0) /* Reset quiet flag if we freed some blocks */
+@@ -967,7 +958,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
+ if (error)
+ goto out_end_trans;
+
+- do_qc(qd, -qd->qd_change_sync, QC_SYNC);
++ do_qc(qd, -qd->qd_change_sync);
+ set_bit(QDF_REFRESH, &qd->qd_flags);
+ }
+
+@@ -1142,7 +1133,6 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
+ struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
+ unsigned int count = 0;
+ u32 x;
+- int found;
+
+ if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
+ return;
+@@ -1150,6 +1140,7 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
+ for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
+ struct gfs2_quota_data *qd;
+ bool sync;
++ int error;
+
+ qd = ip->i_qadata->qa_qd[x];
+ sync = need_sync(qd);
+@@ -1159,14 +1150,20 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
+ continue;
+
+ spin_lock(&qd_lock);
+- found = qd_check_sync(sdp, qd, NULL);
++ sync = qd_grab_sync(sdp, qd, U64_MAX);
+ spin_unlock(&qd_lock);
+
+- if (!found)
++ if (!sync)
+ continue;
+
+- if (!qd_bh_get_or_undo(sdp, qd))
+- qda[count++] = qd;
++ gfs2_assert_warn(sdp, qd->qd_change_sync);
++ error = bh_get(qd);
++ if (error) {
++ qd_ungrab_sync(qd);
++ continue;
++ }
++
++ qda[count++] = qd;
+ }
+
+ if (count) {
+@@ -1289,7 +1286,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
+
+ if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
+ qid_eq(qd->qd_id, make_kqid_gid(gid))) {
+- do_qc(qd, change, QC_CHANGE);
++ do_qc(qd, change);
+ }
+ }
+ }
+@@ -1482,7 +1479,8 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
+ LIST_HEAD(dispose);
+ int count;
+
+- BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
++ BUG_ON(!test_bit(SDF_NORECOVERY, &sdp->sd_flags) &&
++ test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
+
+ spin_lock(&qd_lock);
+ list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
+@@ -1516,7 +1514,7 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
+ {
+ if (error == 0 || error == -EROFS)
+ return;
+- if (!gfs2_withdrawn(sdp)) {
++ if (!gfs2_withdrawing_or_withdrawn(sdp)) {
+ if (!cmpxchg(&sdp->sd_log_error, 0, error))
+ fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
+ wake_up(&sdp->sd_logd_waitq);
+@@ -1560,7 +1558,7 @@ int gfs2_quotad(void *data)
+ unsigned long t = 0;
+
+ while (!kthread_should_stop()) {
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ break;
+
+ /* Update the master statfs file */
+@@ -1584,7 +1582,7 @@ int gfs2_quotad(void *data)
+
+ t = wait_event_interruptible_timeout(sdp->sd_quota_wait,
+ sdp->sd_statfs_force_sync ||
+- gfs2_withdrawn(sdp) ||
++ gfs2_withdrawing_or_withdrawn(sdp) ||
+ kthread_should_stop(),
+ t);
+
+diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
+index 1429945215a039..e4a2fdb552cd78 100644
+--- a/fs/gfs2/quota.h
++++ b/fs/gfs2/quota.h
+@@ -15,27 +15,27 @@ struct gfs2_sbd;
+ #define NO_UID_QUOTA_CHANGE INVALID_UID
+ #define NO_GID_QUOTA_CHANGE INVALID_GID
+
+-extern int gfs2_qa_get(struct gfs2_inode *ip);
+-extern void gfs2_qa_put(struct gfs2_inode *ip);
+-extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+-extern void gfs2_quota_unhold(struct gfs2_inode *ip);
++int gfs2_qa_get(struct gfs2_inode *ip);
++void gfs2_qa_put(struct gfs2_inode *ip);
++int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
++void gfs2_quota_unhold(struct gfs2_inode *ip);
+
+-extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+-extern void gfs2_quota_unlock(struct gfs2_inode *ip);
++int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
++void gfs2_quota_unlock(struct gfs2_inode *ip);
+
+-extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
+- struct gfs2_alloc_parms *ap);
+-extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
+- kuid_t uid, kgid_t gid);
++int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
++ struct gfs2_alloc_parms *ap);
++void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
++ kuid_t uid, kgid_t gid);
+
+-extern int gfs2_quota_sync(struct super_block *sb, int type);
+-extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
++int gfs2_quota_sync(struct super_block *sb, int type);
++int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
+
+-extern int gfs2_quota_init(struct gfs2_sbd *sdp);
+-extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
+-extern int gfs2_quotad(void *data);
++int gfs2_quota_init(struct gfs2_sbd *sdp);
++void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
++int gfs2_quotad(void *data);
+
+-extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
++void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
+
+ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
+ struct gfs2_alloc_parms *ap)
+@@ -62,6 +62,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
+ extern const struct quotactl_ops gfs2_quotactl_ops;
+ extern struct shrinker gfs2_qd_shrinker;
+ extern struct list_lru gfs2_qd_lru;
+-extern void __init gfs2_quota_hash_init(void);
++
++void __init gfs2_quota_hash_init(void);
+
+ #endif /* __QUOTA_DOT_H__ */
+diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
+index 5aae02669a4090..f4fe7039f725b0 100644
+--- a/fs/gfs2/recovery.c
++++ b/fs/gfs2/recovery.c
+@@ -411,7 +411,7 @@ void gfs2_recover_func(struct work_struct *work)
+ int error = 0;
+ int jlocked = 0;
+
+- if (gfs2_withdrawn(sdp)) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ fs_err(sdp, "jid=%u: Recovery not attempted due to withdraw.\n",
+ jd->jd_jid);
+ goto fail;
+diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
+index 7a0c9d0b7503f0..6a0fd42e1120fc 100644
+--- a/fs/gfs2/recovery.h
++++ b/fs/gfs2/recovery.h
+@@ -17,18 +17,18 @@ static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk)
+ *blk = 0;
+ }
+
+-extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
++int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
+ struct buffer_head **bh);
+
+-extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
+-extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
+-extern void gfs2_revoke_clean(struct gfs2_jdesc *jd);
++int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
++int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
++void gfs2_revoke_clean(struct gfs2_jdesc *jd);
+
+-extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
+-extern void gfs2_recover_func(struct work_struct *work);
+-extern int __get_log_header(struct gfs2_sbd *sdp,
+- const struct gfs2_log_header *lh, unsigned int blkno,
+- struct gfs2_log_header_host *head);
++int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
++void gfs2_recover_func(struct work_struct *work);
++int __get_log_header(struct gfs2_sbd *sdp,
++ const struct gfs2_log_header *lh, unsigned int blkno,
++ struct gfs2_log_header_host *head);
+
+ #endif /* __RECOVERY_DOT_H__ */
+
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index 9308190895c890..396d0f4a259d53 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -2306,7 +2306,7 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ (unsigned long long)rgd->rd_addr, rgd->rd_flags,
+ rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
+ rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
+- if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
++ if (rgd->rd_sbd->sd_args.ar_rgrplvb && rgd->rd_rgl) {
+ struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
+
+ gfs2_print_dbg(seq, "%s L: f:%02x b:%u i:%u\n", fs_id_buf,
+@@ -2411,13 +2411,12 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
+ * @bn: Used to return the starting block number
+ * @nblocks: requested number of blocks/extent length (value/result)
+ * @dinode: 1 if we're allocating a dinode block, else 0
+- * @generation: the generation number of the inode
+ *
+ * Returns: 0 or error
+ */
+
+ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
+- bool dinode, u64 *generation)
++ bool dinode)
+ {
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct buffer_head *dibh;
+@@ -2477,10 +2476,13 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
+ rbm.rgd->rd_free -= *nblocks;
+ spin_unlock(&rbm.rgd->rd_rsspin);
+ if (dinode) {
++ u64 generation;
++
+ rbm.rgd->rd_dinodes++;
+- *generation = rbm.rgd->rd_igeneration++;
+- if (*generation == 0)
+- *generation = rbm.rgd->rd_igeneration++;
++ generation = rbm.rgd->rd_igeneration++;
++ if (generation == 0)
++ generation = rbm.rgd->rd_igeneration++;
++ ip->i_generation = generation;
+ }
+
+ gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
+diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
+index 00b30cf893af23..8d20e99385db47 100644
+--- a/fs/gfs2/rgrp.h
++++ b/fs/gfs2/rgrp.h
+@@ -22,38 +22,38 @@ struct gfs2_rgrpd;
+ struct gfs2_sbd;
+ struct gfs2_holder;
+
+-extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
++void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
+
+-extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
+-extern struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
+-extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
++struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
++struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
++struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
+
+-extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
+-extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
+-extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
+-extern int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
+-extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
++void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
++int gfs2_rindex_update(struct gfs2_sbd *sdp);
++void gfs2_free_clones(struct gfs2_rgrpd *rgd);
++int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
++void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
+
+-extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
++struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
+
+ #define GFS2_AF_ORLOV 1
+-extern int gfs2_inplace_reserve(struct gfs2_inode *ip,
+- struct gfs2_alloc_parms *ap);
+-extern void gfs2_inplace_release(struct gfs2_inode *ip);
+-
+-extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
+- bool dinode, u64 *generation);
+-
+-extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
+-extern void gfs2_rs_delete(struct gfs2_inode *ip);
+-extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+- u64 bstart, u32 blen, int meta);
+-extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+- u64 bstart, u32 blen);
+-extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
+-extern void gfs2_unlink_di(struct inode *inode);
+-extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
+- unsigned int type);
++int gfs2_inplace_reserve(struct gfs2_inode *ip,
++ struct gfs2_alloc_parms *ap);
++void gfs2_inplace_release(struct gfs2_inode *ip);
++
++int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
++ bool dinode);
++
++void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
++void gfs2_rs_delete(struct gfs2_inode *ip);
++void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
++ u64 bstart, u32 blen, int meta);
++void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
++ u64 bstart, u32 blen);
++void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
++void gfs2_unlink_di(struct inode *inode);
++int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
++ unsigned int type);
+
+ struct gfs2_rgrp_list {
+ unsigned int rl_rgrps;
+@@ -62,18 +62,19 @@ struct gfs2_rgrp_list {
+ struct gfs2_holder *rl_ghs;
+ };
+
+-extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
+- u64 block);
+-extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
+- unsigned int state, u16 flags);
+-extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
+-extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
+-extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+- const char *fs_id_buf);
+-extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
+- struct buffer_head *bh,
+- const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
+-extern int gfs2_fitrim(struct file *filp, void __user *argp);
++void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
++ u64 block);
++void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
++ unsigned int state, u16 flags);
++void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
++u64 gfs2_ri_total(struct gfs2_sbd *sdp);
++void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
++ const char *fs_id_buf);
++int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
++ struct buffer_head *bh,
++ const struct gfs2_bitmap *bi, unsigned minlen,
++ u64 *ptrimmed);
++int gfs2_fitrim(struct file *filp, void __user *argp);
+
+ /* This is how to tell if a reservation is in the rgrp tree: */
+ static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
+@@ -88,9 +89,9 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
+ return first <= block && block < last;
+ }
+
+-extern void check_and_update_goal(struct gfs2_inode *ip);
++void check_and_update_goal(struct gfs2_inode *ip);
+
+-extern void rgrp_lock_local(struct gfs2_rgrpd *rgd);
+-extern void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
++void rgrp_lock_local(struct gfs2_rgrpd *rgd);
++void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
+
+ #endif /* __RGRP_DOT_H__ */
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 02d93da21b2b07..1200cb80599957 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -67,9 +67,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
+ sdp->sd_journals = 0;
+ spin_unlock(&sdp->sd_jindex_spin);
+
++ down_write(&sdp->sd_log_flush_lock);
+ sdp->sd_jdesc = NULL;
++ up_write(&sdp->sd_log_flush_lock);
++
+ while (!list_empty(&list)) {
+ jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
++ BUG_ON(jd->jd_log_bio);
+ gfs2_free_journal_extents(jd);
+ list_del(&jd->jd_list);
+ iput(jd->jd_inode);
+@@ -134,7 +138,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ int error;
+
+ j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ return -EIO;
+
+ error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
+@@ -153,7 +157,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ gfs2_log_pointers_init(sdp, head.lh_blkno);
+
+ error = gfs2_quota_init(sdp);
+- if (!error && gfs2_withdrawn(sdp))
++ if (!error && gfs2_withdrawing_or_withdrawn(sdp))
+ error = -EIO;
+ if (!error)
+ set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+@@ -499,7 +503,7 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
+ return;
+ }
+
+- if (unlikely(gfs2_withdrawn(sdp)))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ return;
+ if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+@@ -602,13 +606,15 @@ static void gfs2_put_super(struct super_block *sb)
+ }
+ spin_unlock(&sdp->sd_jindex_spin);
+
+- if (!sb_rdonly(sb)) {
++ if (!sb_rdonly(sb))
+ gfs2_make_fs_ro(sdp);
+- }
+- if (gfs2_withdrawn(sdp)) {
+- gfs2_destroy_threads(sdp);
++ else {
++ if (gfs2_withdrawing_or_withdrawn(sdp))
++ gfs2_destroy_threads(sdp);
++
+ gfs2_quota_cleanup(sdp);
+ }
++
+ WARN_ON(gfs2_withdrawing(sdp));
+
+ /* At this point, we're through modifying the disk */
+@@ -644,10 +650,7 @@ static void gfs2_put_super(struct super_block *sb)
+ gfs2_gl_hash_clear(sdp);
+ truncate_inode_pages_final(&sdp->sd_aspace);
+ gfs2_delete_debugfs_file(sdp);
+- /* Unmount the locking protocol */
+- gfs2_lm_unmount(sdp);
+
+- /* At this point, we're through participating in the lockspace */
+ gfs2_sys_fs_del(sdp);
+ free_sbd(sdp);
+ }
+@@ -683,7 +686,7 @@ static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+ GFS2_LFC_FREEZE_GO_SYNC);
+- if (gfs2_withdrawn(sdp)) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
+ if (error)
+ return error;
+@@ -816,6 +819,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
+ if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
+ goto out;
+
++ atomic_inc(&sb->s_active);
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+
+ error = gfs2_do_thaw(sdp);
+@@ -826,6 +830,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
+ }
+ out:
+ mutex_unlock(&sdp->sd_freeze_mutex);
++ deactivate_super(sb);
+ return error;
+ }
+
+@@ -1550,7 +1555,7 @@ static void gfs2_evict_inode(struct inode *inode)
+ wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
+ gfs2_glock_add_to_lru(ip->i_gl);
+ gfs2_glock_put_eventually(ip->i_gl);
+- ip->i_gl = NULL;
++ rcu_assign_pointer(ip->i_gl, NULL);
+ }
+ }
+
+@@ -1576,7 +1581,7 @@ static void gfs2_free_inode(struct inode *inode)
+ kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
+ }
+
+-extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
++void free_local_statfs_inodes(struct gfs2_sbd *sdp)
+ {
+ struct local_statfs_inode *lsi, *safe;
+
+@@ -1591,8 +1596,8 @@ extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
+ }
+ }
+
+-extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+- unsigned int index)
++struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
++ unsigned int index)
+ {
+ struct local_statfs_inode *lsi;
+
+diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
+index ab9c83106932db..e1f7ef9264468a 100644
+--- a/fs/gfs2/super.h
++++ b/fs/gfs2/super.h
+@@ -15,7 +15,7 @@
+ #define GFS2_FS_FORMAT_MIN (1801)
+ #define GFS2_FS_FORMAT_MAX (1802)
+
+-extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
++void gfs2_lm_unmount(struct gfs2_sbd *sdp);
+
+ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
+ {
+@@ -26,33 +26,33 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
+ return x;
+ }
+
+-extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
++void gfs2_jindex_free(struct gfs2_sbd *sdp);
+
+-extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
+-extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
+-extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
+- struct gfs2_inode **ipp);
++struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
++int gfs2_jdesc_check(struct gfs2_jdesc *jd);
++int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
++ struct gfs2_inode **ipp);
+
+-extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
+-extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
+-extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
+-extern void gfs2_destroy_threads(struct gfs2_sbd *sdp);
+-extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
+-extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
+- s64 dinodes);
+-extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
+- const void *buf);
+-extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
+- void *buf);
+-extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
+-extern int gfs2_statfs_sync(struct super_block *sb, int type);
+-extern void gfs2_freeze_func(struct work_struct *work);
+-extern void gfs2_thaw_freeze_initiator(struct super_block *sb);
++int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
++void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
++void gfs2_online_uevent(struct gfs2_sbd *sdp);
++void gfs2_destroy_threads(struct gfs2_sbd *sdp);
++int gfs2_statfs_init(struct gfs2_sbd *sdp);
++void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
++ s64 dinodes);
++void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
++ const void *buf);
++void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
++ void *buf);
++void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
++int gfs2_statfs_sync(struct super_block *sb, int type);
++void gfs2_freeze_func(struct work_struct *work);
++void gfs2_thaw_freeze_initiator(struct super_block *sb);
+
+-extern void free_local_statfs_inodes(struct gfs2_sbd *sdp);
+-extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+- unsigned int index);
+-extern void free_sbd(struct gfs2_sbd *sdp);
++void free_local_statfs_inodes(struct gfs2_sbd *sdp);
++struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
++ unsigned int index);
++void free_sbd(struct gfs2_sbd *sdp);
+
+ extern struct file_system_type gfs2_fs_type;
+ extern struct file_system_type gfs2meta_fs_type;
+diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
+index 60a0206890c54c..250f340cb44d61 100644
+--- a/fs/gfs2/sys.c
++++ b/fs/gfs2/sys.c
+@@ -193,7 +193,7 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
+
+ static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
+ {
+- unsigned int b = gfs2_withdrawn(sdp);
++ unsigned int b = gfs2_withdrawing_or_withdrawn(sdp);
+ return snprintf(buf, PAGE_SIZE, "%u\n", b);
+ }
+
+diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
+index 7e835be7032d0b..192213c7359af1 100644
+--- a/fs/gfs2/trans.c
++++ b/fs/gfs2/trans.c
+@@ -268,7 +268,7 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
+ (unsigned long long)bd->bd_bh->b_blocknr);
+ BUG();
+ }
+- if (unlikely(gfs2_withdrawn(sdp))) {
++ if (gfs2_withdrawing_or_withdrawn(sdp)) {
+ fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n",
+ (unsigned long long)bd->bd_bh->b_blocknr);
+ goto out_unlock;
+diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
+index c76ad9a4c75a98..f8ce5302280d31 100644
+--- a/fs/gfs2/trans.h
++++ b/fs/gfs2/trans.h
+@@ -34,17 +34,17 @@ static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned
+ return rgd->rd_length;
+ }
+
+-extern int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
+- unsigned int blocks, unsigned int revokes,
+- unsigned long ip);
+-extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
+- unsigned int revokes);
+-
+-extern void gfs2_trans_end(struct gfs2_sbd *sdp);
+-extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
+-extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
+-extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+-extern void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
+-extern void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
++int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
++ unsigned int blocks, unsigned int revokes,
++ unsigned long ip);
++int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
++ unsigned int revokes);
++
++void gfs2_trans_end(struct gfs2_sbd *sdp);
++void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
++void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
++void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
++void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
++void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
+
+ #endif /* __TRANS_DOT_H__ */
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index da29fafb62728a..b65261e0cae3a8 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -99,12 +99,12 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ */
+ int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
+ {
++ int flags = LM_FLAG_NOEXP | GL_EXACT;
+ int error;
+
+- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+- LM_FLAG_NOEXP | GL_EXACT,
++ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
+ &sdp->sd_freeze_gh);
+- if (error)
++ if (error && error != GLR_TRYFAILED)
+ fs_err(sdp, "can't lock the freeze glock: %d\n", error);
+ return error;
+ }
+@@ -350,7 +350,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
+ fs_err(sdp, "telling LM to unmount\n");
+ lm->lm_unmount(sdp);
+ }
+- set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
+ fs_err(sdp, "File system withdrawn\n");
+ dump_stack();
+ clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
+@@ -372,7 +371,7 @@ void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
+ const char *function, char *file, unsigned int line,
+ bool delayed)
+ {
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ return;
+
+ fs_err(sdp,
+@@ -548,7 +547,7 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function, char *file, unsigned int line,
+ bool withdraw)
+ {
+- if (gfs2_withdrawn(sdp))
++ if (gfs2_withdrawing_or_withdrawn(sdp))
+ return;
+
+ fs_err(sdp, "fatal: I/O error\n"
+diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
+index cdb839529175d8..ba071998461fd4 100644
+--- a/fs/gfs2/util.h
++++ b/fs/gfs2/util.h
+@@ -147,10 +147,10 @@ static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type,
+ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
+ char *file, unsigned int line);
+
+-extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+- bool verbose);
+-extern int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
+-extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
++int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
++ bool verbose);
++int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
++void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
+
+ #define gfs2_io_error(sdp) \
+ gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
+@@ -198,13 +198,14 @@ static inline void gfs2_withdraw_delayed(struct gfs2_sbd *sdp)
+ }
+
+ /**
+- * gfs2_withdrawn - test whether the file system is withdrawing or withdrawn
++ * gfs2_withdrawing_or_withdrawn - test whether the file system is withdrawing
++ * or withdrawn
+ * @sdp: the superblock
+ */
+-static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp)
++static inline bool gfs2_withdrawing_or_withdrawn(struct gfs2_sbd *sdp)
+ {
+- return test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
+- test_bit(SDF_WITHDRAWING, &sdp->sd_flags);
++ return unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
++ test_bit(SDF_WITHDRAWING, &sdp->sd_flags));
+ }
+
+ /**
+@@ -213,13 +214,13 @@ static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp)
+ */
+ static inline bool gfs2_withdrawing(struct gfs2_sbd *sdp)
+ {
+- return test_bit(SDF_WITHDRAWING, &sdp->sd_flags) &&
+- !test_bit(SDF_WITHDRAWN, &sdp->sd_flags);
++ return unlikely(test_bit(SDF_WITHDRAWING, &sdp->sd_flags) &&
++ !test_bit(SDF_WITHDRAWN, &sdp->sd_flags));
+ }
+
+ static inline bool gfs2_withdraw_in_prog(struct gfs2_sbd *sdp)
+ {
+- return test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
++ return unlikely(test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags));
+ }
+
+ #define gfs2_tune_get(sdp, field) \
+diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
+index 4fea70c0fe3d17..2117011c8c5778 100644
+--- a/fs/gfs2/xattr.c
++++ b/fs/gfs2/xattr.c
+@@ -639,7 +639,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
+ u64 block;
+ int error;
+
+- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
++ error = gfs2_alloc_blocks(ip, &block, &n, 0);
+ if (error)
+ return error;
+ gfs2_trans_remove_revoke(sdp, block, 1);
+@@ -701,7 +701,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
+ int mh_size = sizeof(struct gfs2_meta_header);
+ unsigned int n = 1;
+
+- error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
++ error = gfs2_alloc_blocks(ip, &block, &n, 0);
+ if (error)
+ return error;
+ gfs2_trans_remove_revoke(sdp, block, 1);
+@@ -1002,7 +1002,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
+ } else {
+ u64 blk;
+ unsigned int n = 1;
+- error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
++ error = gfs2_alloc_blocks(ip, &blk, &n, 0);
+ if (error)
+ return error;
+ gfs2_trans_remove_revoke(sdp, blk, 1);
+diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
+index 2aed9d7d483d5b..eb12eb7e37c194 100644
+--- a/fs/gfs2/xattr.h
++++ b/fs/gfs2/xattr.h
+@@ -50,14 +50,14 @@ struct gfs2_ea_location {
+ struct gfs2_ea_header *el_prev;
+ };
+
+-extern int __gfs2_xattr_set(struct inode *inode, const char *name,
+- const void *value, size_t size,
+- int flags, int type);
+-extern ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
+-extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
++int __gfs2_xattr_set(struct inode *inode, const char *name,
++ const void *value, size_t size,
++ int flags, int type);
++ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
++int gfs2_ea_dealloc(struct gfs2_inode *ip);
+
+ /* Exported to acl.c */
+
+-extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
++int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
+
+ #endif /* __EATTR_DOT_H__ */
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
+index ee349b72cfb3cd..61ed76d1039276 100644
+--- a/fs/hfs/inode.c
++++ b/fs/hfs/inode.c
+@@ -204,6 +204,7 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
+ HFS_I(inode)->flags = 0;
+ HFS_I(inode)->rsrc_inode = NULL;
+ HFS_I(inode)->fs_blocks = 0;
++ HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
+ if (S_ISDIR(mode)) {
+ inode->i_size = 2;
+ HFS_SB(sb)->folder_count++;
+@@ -279,6 +280,8 @@ void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
+ for (count = 0, i = 0; i < 3; i++)
+ count += be16_to_cpu(ext[i].count);
+ HFS_I(inode)->first_blocks = count;
++ HFS_I(inode)->cached_start = 0;
++ HFS_I(inode)->cached_blocks = 0;
+
+ inode->i_size = HFS_I(inode)->phys_size = log_size;
+ HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
+index ca2ba8c9f82ef2..901e83d65d2021 100644
+--- a/fs/hfsplus/bfind.c
++++ b/fs/hfsplus/bfind.c
+@@ -25,19 +25,8 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
+ fd->key = ptr + tree->max_key_len + 2;
+ hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
+ tree->cnid, __builtin_return_address(0));
+- switch (tree->cnid) {
+- case HFSPLUS_CAT_CNID:
+- mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
+- break;
+- case HFSPLUS_EXT_CNID:
+- mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
+- break;
+- case HFSPLUS_ATTR_CNID:
+- mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
+- break;
+- default:
+- BUG();
+- }
++ mutex_lock_nested(&tree->tree_lock,
++ hfsplus_btree_lock_class(tree));
+ return 0;
+ }
+
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index 3c572e44f2adf7..9c51867dddc51f 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -430,7 +430,8 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid,
+ hfsplus_free_extents(sb, ext_entry, total_blocks - start,
+ total_blocks);
+ total_blocks = start;
+- mutex_lock(&fd.tree->tree_lock);
++ mutex_lock_nested(&fd.tree->tree_lock,
++ hfsplus_btree_lock_class(fd.tree));
+ } while (total_blocks > blocks);
+ hfs_find_exit(&fd);
+
+@@ -592,7 +593,8 @@ void hfsplus_file_truncate(struct inode *inode)
+ alloc_cnt, alloc_cnt - blk_cnt);
+ hfsplus_dump_extent(hip->first_extents);
+ hip->first_blocks = blk_cnt;
+- mutex_lock(&fd.tree->tree_lock);
++ mutex_lock_nested(&fd.tree->tree_lock,
++ hfsplus_btree_lock_class(fd.tree));
+ break;
+ }
+ res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
+@@ -606,7 +608,8 @@ void hfsplus_file_truncate(struct inode *inode)
+ hfsplus_free_extents(sb, hip->cached_extents,
+ alloc_cnt - start, alloc_cnt - blk_cnt);
+ hfsplus_dump_extent(hip->cached_extents);
+- mutex_lock(&fd.tree->tree_lock);
++ mutex_lock_nested(&fd.tree->tree_lock,
++ hfsplus_btree_lock_class(fd.tree));
+ if (blk_cnt > start) {
+ hip->extent_state |= HFSPLUS_EXT_DIRTY;
+ break;
+diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
+index 7ededcb720c121..583c196ecd5206 100644
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -552,6 +552,27 @@ static inline __be32 __hfsp_ut2mt(time64_t ut)
+ return cpu_to_be32(lower_32_bits(ut) + HFSPLUS_UTC_OFFSET);
+ }
+
++static inline enum hfsplus_btree_mutex_classes
++hfsplus_btree_lock_class(struct hfs_btree *tree)
++{
++ enum hfsplus_btree_mutex_classes class;
++
++ switch (tree->cnid) {
++ case HFSPLUS_CAT_CNID:
++ class = CATALOG_BTREE_MUTEX;
++ break;
++ case HFSPLUS_EXT_CNID:
++ class = EXTENTS_BTREE_MUTEX;
++ break;
++ case HFSPLUS_ATTR_CNID:
++ class = ATTR_BTREE_MUTEX;
++ break;
++ default:
++ BUG();
++ }
++ return class;
++}
++
+ /* compatibility */
+ #define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) }
+ #define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec)
+diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
+index 58021e73c00bf7..f7f9d0889df342 100644
+--- a/fs/hfsplus/xattr.c
++++ b/fs/hfsplus/xattr.c
+@@ -698,7 +698,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ return err;
+ }
+
+- strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
++ strbuf = kzalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
+ XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
+ if (!strbuf) {
+ res = -ENOMEM;
+diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
+index 0239e3af394551..8b39c15c408ccd 100644
+--- a/fs/hostfs/hostfs.h
++++ b/fs/hostfs/hostfs.h
+@@ -63,9 +63,10 @@ struct hostfs_stat {
+ struct hostfs_timespec atime, mtime, ctime;
+ unsigned int blksize;
+ unsigned long long blocks;
+- unsigned int maj;
+- unsigned int min;
+- dev_t dev;
++ struct {
++ unsigned int maj;
++ unsigned int min;
++ } rdev, dev;
+ };
+
+ extern int stat_file(const char *path, struct hostfs_stat *p, int fd);
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index dc5a5cea5fae41..ff201753fd1814 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -526,10 +526,11 @@ static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
+ static int hostfs_inode_set(struct inode *ino, void *data)
+ {
+ struct hostfs_stat *st = data;
+- dev_t rdev;
++ dev_t dev, rdev;
+
+ /* Reencode maj and min with the kernel encoding.*/
+- rdev = MKDEV(st->maj, st->min);
++ rdev = MKDEV(st->rdev.maj, st->rdev.min);
++ dev = MKDEV(st->dev.maj, st->dev.min);
+
+ switch (st->mode & S_IFMT) {
+ case S_IFLNK:
+@@ -555,7 +556,7 @@ static int hostfs_inode_set(struct inode *ino, void *data)
+ return -EIO;
+ }
+
+- HOSTFS_I(ino)->dev = st->dev;
++ HOSTFS_I(ino)->dev = dev;
+ ino->i_ino = st->ino;
+ ino->i_mode = st->mode;
+ return hostfs_inode_update(ino, st);
+@@ -564,8 +565,9 @@ static int hostfs_inode_set(struct inode *ino, void *data)
+ static int hostfs_inode_test(struct inode *inode, void *data)
+ {
+ const struct hostfs_stat *st = data;
++ dev_t dev = MKDEV(st->dev.maj, st->dev.min);
+
+- return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == st->dev;
++ return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev;
+ }
+
+ static struct inode *hostfs_iget(struct super_block *sb, char *name)
+diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
+index 840619e39a1a69..97e9c40a944883 100644
+--- a/fs/hostfs/hostfs_user.c
++++ b/fs/hostfs/hostfs_user.c
+@@ -34,9 +34,10 @@ static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
+ p->mtime.tv_nsec = 0;
+ p->blksize = buf->st_blksize;
+ p->blocks = buf->st_blocks;
+- p->maj = os_major(buf->st_rdev);
+- p->min = os_minor(buf->st_rdev);
+- p->dev = buf->st_dev;
++ p->rdev.maj = os_major(buf->st_rdev);
++ p->rdev.min = os_minor(buf->st_rdev);
++ p->dev.maj = os_major(buf->st_dev);
++ p->dev.min = os_minor(buf->st_dev);
+ }
+
+ int stat_file(const char *path, struct hostfs_stat *p, int fd)
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 316c4cebd3f3de..ac519515ef6c06 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -123,6 +123,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ loff_t len, vma_len;
+ int ret;
+ struct hstate *h = hstate_file(file);
++ vm_flags_t vm_flags;
+
+ /*
+ * vma address alignment (but not the pgoff alignment) has
+@@ -164,10 +165,20 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ file_accessed(file);
+
+ ret = -ENOMEM;
++
++ vm_flags = vma->vm_flags;
++ /*
++ * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
++ * reserving here. Note: only for SHM hugetlbfs file, the inode
++ * flag S_PRIVATE is set.
++ */
++ if (inode->i_flags & S_PRIVATE)
++ vm_flags |= VM_NORESERVE;
++
+ if (!hugetlb_reserve_pages(inode,
+ vma->vm_pgoff >> huge_page_order(h),
+ len >> huge_page_shift(h), vma,
+- vma->vm_flags))
++ vm_flags))
+ goto out;
+
+ ret = 0;
+@@ -295,7 +306,7 @@ static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t byt
+ size_t res = 0;
+
+ /* First subpage to start the loop. */
+- page += offset / PAGE_SIZE;
++ page = nth_page(page, offset / PAGE_SIZE);
+ offset %= PAGE_SIZE;
+ while (1) {
+ if (is_raw_hwpoison_page_in_hugepage(page))
+@@ -309,7 +320,7 @@ static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t byt
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+- page++;
++ page = nth_page(page, 1);
+ offset = 0;
+ }
+ }
+@@ -1390,6 +1401,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+ {
+ struct hugetlbfs_fs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
++ struct hstate *h;
+ char *rest;
+ unsigned long ps;
+ int opt;
+@@ -1434,11 +1446,12 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+
+ case Opt_pagesize:
+ ps = memparse(param->string, &rest);
+- ctx->hstate = size_to_hstate(ps);
+- if (!ctx->hstate) {
++ h = size_to_hstate(ps);
++ if (!h) {
+ pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
+ return -EINVAL;
+ }
++ ctx->hstate = h;
+ return 0;
+
+ case Opt_min_size:
+diff --git a/fs/inode.c b/fs/inode.c
+index 84bc3c76e5ccb5..9cafde77e2b038 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -215,6 +215,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
+ lockdep_set_class_and_name(&mapping->invalidate_lock,
+ &sb->s_type->invalidate_lock_key,
+ "mapping.invalidate_lock");
++ if (sb->s_iflags & SB_I_STABLE_WRITES)
++ mapping_set_stable_writes(mapping);
+ inode->i_private = NULL;
+ inode->i_mapping = mapping;
+ INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
+@@ -484,6 +486,39 @@ static void inode_lru_list_del(struct inode *inode)
+ this_cpu_dec(nr_unused);
+ }
+
++static void inode_pin_lru_isolating(struct inode *inode)
++{
++ lockdep_assert_held(&inode->i_lock);
++ WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
++ inode->i_state |= I_LRU_ISOLATING;
++}
++
++static void inode_unpin_lru_isolating(struct inode *inode)
++{
++ spin_lock(&inode->i_lock);
++ WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
++ inode->i_state &= ~I_LRU_ISOLATING;
++ smp_mb();
++ wake_up_bit(&inode->i_state, __I_LRU_ISOLATING);
++ spin_unlock(&inode->i_lock);
++}
++
++static void inode_wait_for_lru_isolating(struct inode *inode)
++{
++ spin_lock(&inode->i_lock);
++ if (inode->i_state & I_LRU_ISOLATING) {
++ DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING);
++ wait_queue_head_t *wqh;
++
++ wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING);
++ spin_unlock(&inode->i_lock);
++ __wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE);
++ spin_lock(&inode->i_lock);
++ WARN_ON(inode->i_state & I_LRU_ISOLATING);
++ }
++ spin_unlock(&inode->i_lock);
++}
++
+ /**
+ * inode_sb_list_add - add inode to the superblock list of inodes
+ * @inode: inode to add
+@@ -652,6 +687,8 @@ static void evict(struct inode *inode)
+
+ inode_sb_list_del(inode);
+
++ inode_wait_for_lru_isolating(inode);
++
+ /*
+ * Wait for flusher thread to be done with the inode so that filesystem
+ * does not start destroying it while writeback is still running. Since
+@@ -720,6 +757,10 @@ void evict_inodes(struct super_block *sb)
+ continue;
+
+ spin_lock(&inode->i_lock);
++ if (atomic_read(&inode->i_count)) {
++ spin_unlock(&inode->i_lock);
++ continue;
++ }
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+@@ -840,7 +881,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
+ * be under pressure before the cache inside the highmem zone.
+ */
+ if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
+- __iget(inode);
++ inode_pin_lru_isolating(inode);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(lru_lock);
+ if (remove_inode_buffers(inode)) {
+@@ -852,7 +893,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
+ __count_vm_events(PGINODESTEAL, reap);
+ mm_account_reclaimed_pages(reap);
+ }
+- iput(inode);
++ inode_unpin_lru_isolating(inode);
+ spin_lock(lru_lock);
+ return LRU_RETRY;
+ }
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index f5fd99d6b0d4ed..76cf22ac97d762 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -920,8 +920,7 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
+ if (!f.file)
+ return -EBADF;
+
+- /* RED-PEN how should LSM module know it's handling 32bit? */
+- error = security_file_ioctl(f.file, cmd, arg);
++ error = security_file_ioctl_compat(f.file, cmd, arg);
+ if (error)
+ goto out;
+
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index 2bc0aa23fde3b9..aedaad4c37d757 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -201,6 +201,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ unsigned block_size = (1 << block_bits);
+ size_t poff = offset_in_folio(folio, *pos);
+ size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
++ size_t orig_plen = plen;
+ unsigned first = poff >> block_bits;
+ unsigned last = (poff + plen - 1) >> block_bits;
+
+@@ -237,7 +238,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ * handle both halves separately so that we properly zero data in the
+ * page cache for blocks that are entirely outside of i_size.
+ */
+- if (orig_pos <= isize && orig_pos + length > isize) {
++ if (orig_pos <= isize && orig_pos + orig_plen > isize) {
+ unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
+
+ if (first <= end && last > end)
+@@ -868,11 +869,11 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
+ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
+ {
+ loff_t length = iomap_length(iter);
+- size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
+ loff_t pos = iter->pos;
+ ssize_t written = 0;
+ long status = 0;
+ struct address_space *mapping = iter->inode->i_mapping;
++ size_t chunk = mapping_max_folio_size(mapping);
+ unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
+
+ do {
+@@ -1176,7 +1177,15 @@ static int iomap_write_delalloc_release(struct inode *inode,
+ error = data_end;
+ goto out_unlock;
+ }
+- WARN_ON_ONCE(data_end <= start_byte);
++
++ /*
++ * If we race with post-direct I/O invalidation of the page cache,
++ * there might be no data left at start_byte.
++ */
++ if (data_end == start_byte)
++ continue;
++
++ WARN_ON_ONCE(data_end < start_byte);
+ WARN_ON_ONCE(data_end > scan_end_byte);
+
+ error = iomap_write_delalloc_scan(inode, &punch_start_byte,
+@@ -1315,11 +1324,15 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+ struct iomap_iter iter = {
+ .inode = inode,
+ .pos = pos,
+- .len = len,
+ .flags = IOMAP_WRITE | IOMAP_UNSHARE,
+ };
++ loff_t size = i_size_read(inode);
+ int ret;
+
++ if (pos < 0 || pos >= size)
++ return 0;
++
++ iter.len = min(len, size - pos);
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.processed = iomap_unshare_iter(&iter);
+ return ret;
+@@ -1830,16 +1843,10 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ if (unlikely(error)) {
+ /*
+ * Let the filesystem know what portion of the current page
+- * failed to map. If the page hasn't been added to ioend, it
+- * won't be affected by I/O completion and we must unlock it
+- * now.
++ * failed to map.
+ */
+ if (wpc->ops->discard_folio)
+ wpc->ops->discard_folio(folio, pos);
+- if (!count) {
+- folio_unlock(folio);
+- goto done;
+- }
+ }
+
+ /*
+@@ -1848,6 +1855,16 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ * all the dirty bits in the folio here.
+ */
+ iomap_clear_range_dirty(folio, 0, folio_size(folio));
++
++ /*
++ * If the page hasn't been added to the ioend, it won't be affected by
++ * I/O completion and we must unlock it now.
++ */
++ if (error && !count) {
++ folio_unlock(folio);
++ goto done;
++ }
++
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index 2ee21286ac8f07..54075fe3de9b1f 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -908,8 +908,22 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
+ * we then decide whether to use the Joliet descriptor.
+ */
+ inode = isofs_iget(s, sbi->s_firstdatazone, 0);
+- if (IS_ERR(inode))
+- goto out_no_root;
++
++ /*
++ * Fix for broken CDs with a corrupt root inode but a correct Joliet
++ * root directory.
++ */
++ if (IS_ERR(inode)) {
++ if (joliet_level && sbi->s_firstdatazone != first_data_zone) {
++ printk(KERN_NOTICE
++ "ISOFS: root inode is unusable. "
++ "Disabling Rock Ridge and switching to Joliet.");
++ sbi->s_rock = 0;
++ inode = NULL;
++ } else {
++ goto out_no_root;
++ }
++ }
+
+ /*
+ * Fix for broken CDs with Rock Ridge and empty ISO root directory but
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 118699fff2f900..8fda66c98a610f 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -79,17 +79,23 @@ __releases(&journal->j_state_lock)
+ if (space_left < nblocks) {
+ int chkpt = journal->j_checkpoint_transactions != NULL;
+ tid_t tid = 0;
++ bool has_transaction = false;
+
+- if (journal->j_committing_transaction)
++ if (journal->j_committing_transaction) {
+ tid = journal->j_committing_transaction->t_tid;
++ has_transaction = true;
++ }
+ spin_unlock(&journal->j_list_lock);
+ write_unlock(&journal->j_state_lock);
+ if (chkpt) {
+ jbd2_log_do_checkpoint(journal);
+- } else if (jbd2_cleanup_journal_tail(journal) == 0) {
+- /* We were able to recover space; yay! */
++ } else if (jbd2_cleanup_journal_tail(journal) <= 0) {
++ /*
++ * We were able to recover space or the
++ * journal was aborted due to an error.
++ */
+ ;
+- } else if (tid) {
++ } else if (has_transaction) {
+ /*
+ * jbd2_journal_commit_transaction() may want
+ * to take the checkpoint_mutex if JBD2_FLUSHED
+@@ -409,6 +415,7 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+ tid_t tid = 0;
+ unsigned long nr_freed = 0;
+ unsigned long freed;
++ bool first_set = false;
+
+ again:
+ spin_lock(&journal->j_list_lock);
+@@ -428,8 +435,10 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+ else
+ transaction = journal->j_checkpoint_transactions;
+
+- if (!first_tid)
++ if (!first_set) {
+ first_tid = transaction->t_tid;
++ first_set = true;
++ }
+ last_transaction = journal->j_checkpoint_transactions->t_cpprev;
+ next_transaction = transaction;
+ last_tid = last_transaction->t_tid;
+@@ -459,7 +468,7 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
+ spin_unlock(&journal->j_list_lock);
+ cond_resched();
+
+- if (*nr_to_scan && next_tid)
++ if (*nr_to_scan && journal->j_shrink_transaction)
+ goto again;
+ out:
+ trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 8d6f934c3d9543..0cd7439470fc43 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -119,7 +119,7 @@ static int journal_submit_commit_record(journal_t *journal,
+ struct commit_header *tmp;
+ struct buffer_head *bh;
+ struct timespec64 now;
+- blk_opf_t write_flags = REQ_OP_WRITE | REQ_SYNC;
++ blk_opf_t write_flags = REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS;
+
+ *cbh = NULL;
+
+@@ -270,6 +270,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
+ if (!ret)
+ ret = err;
+ }
++ cond_resched();
+ spin_lock(&journal->j_list_lock);
+ jinode->i_flags &= ~JI_COMMIT_RUNNING;
+ smp_mb();
+@@ -395,8 +396,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ */
+ jbd2_journal_update_sb_log_tail(journal,
+ journal->j_tail_sequence,
+- journal->j_tail,
+- REQ_SYNC);
++ journal->j_tail, 0);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ } else {
+ jbd2_debug(3, "superblock not updated\n");
+@@ -715,6 +715,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+
+ for (i = 0; i < bufs; i++) {
+ struct buffer_head *bh = wbuf[i];
++
+ /*
+ * Compute checksum.
+ */
+@@ -727,7 +728,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ clear_buffer_dirty(bh);
+ set_buffer_uptodate(bh);
+ bh->b_end_io = journal_end_buffer_io_sync;
+- submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
++ submit_bh(REQ_OP_WRITE | JBD2_JOURNAL_REQ_FLAGS,
++ bh);
+ }
+ cond_resched();
+
+@@ -765,7 +767,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ if (first_block < journal->j_tail)
+ freed += journal->j_last - journal->j_first;
+ /* Update tail only if we free significant amount of space */
+- if (freed < jbd2_journal_get_max_txn_bufs(journal))
++ if (freed < journal->j_max_transaction_buffers)
+ update_tail = 0;
+ }
+ J_ASSERT(commit_transaction->t_state == T_COMMIT);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 30dec2bd2ecc26..dfbb8f73861f64 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -399,6 +399,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
+ tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
+ if (!tmp) {
+ brelse(new_bh);
++ free_buffer_head(new_bh);
+ return -ENOMEM;
+ }
+ spin_lock(&jh_in->b_state_lock);
+@@ -724,7 +725,7 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
+ return -EINVAL;
+
+ write_lock(&journal->j_state_lock);
+- if (tid <= journal->j_commit_sequence) {
++ if (tid_geq(journal->j_commit_sequence, tid)) {
+ write_unlock(&journal->j_state_lock);
+ return -EALREADY;
+ }
+@@ -754,9 +755,9 @@ EXPORT_SYMBOL(jbd2_fc_begin_commit);
+ */
+ static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
+ {
+- jbd2_journal_unlock_updates(journal);
+ if (journal->j_fc_cleanup_callback)
+ journal->j_fc_cleanup_callback(journal, 0, tid);
++ jbd2_journal_unlock_updates(journal);
+ write_lock(&journal->j_state_lock);
+ journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
+ if (fallback)
+@@ -1100,8 +1101,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ * space and if we lose sb update during power failure we'd replay
+ * old transaction with possibly newly overwritten data.
+ */
+- ret = jbd2_journal_update_sb_log_tail(journal, tid, block,
+- REQ_SYNC | REQ_FUA);
++ ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
+ if (ret)
+ goto out;
+
+@@ -1452,6 +1452,48 @@ static int journal_revoke_records_per_block(journal_t *journal)
+ return space / record_size;
+ }
+
++static int jbd2_journal_get_max_txn_bufs(journal_t *journal)
++{
++ return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
++}
++
++/*
++ * Base amount of descriptor blocks we reserve for each transaction.
++ */
++static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
++{
++ int tag_space = journal->j_blocksize - sizeof(journal_header_t);
++ int tags_per_block;
++
++ /* Subtract UUID */
++ tag_space -= 16;
++ if (jbd2_journal_has_csum_v2or3(journal))
++ tag_space -= sizeof(struct jbd2_journal_block_tail);
++ /* Commit code leaves a slack space of 16 bytes at the end of block */
++ tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
++ /*
++ * Revoke descriptors are accounted separately so we need to reserve
++ * space for commit block and normal transaction descriptor blocks.
++ */
++ return 1 + DIV_ROUND_UP(jbd2_journal_get_max_txn_bufs(journal),
++ tags_per_block);
++}
++
++/*
++ * Initialize number of blocks each transaction reserves for its bookkeeping
++ * and maximum number of blocks a transaction can use. This needs to be called
++ * after the journal size and the fastcommit area size are initialized.
++ */
++static void jbd2_journal_init_transaction_limits(journal_t *journal)
++{
++ journal->j_revoke_records_per_block =
++ journal_revoke_records_per_block(journal);
++ journal->j_transaction_overhead_buffers =
++ jbd2_descriptor_blocks_per_trans(journal);
++ journal->j_max_transaction_buffers =
++ jbd2_journal_get_max_txn_bufs(journal);
++}
++
+ /*
+ * Load the on-disk journal superblock and read the key fields into the
+ * journal_t.
+@@ -1493,8 +1535,8 @@ static int journal_load_superblock(journal_t *journal)
+ if (jbd2_journal_has_csum_v2or3(journal))
+ journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+ sizeof(sb->s_uuid));
+- journal->j_revoke_records_per_block =
+- journal_revoke_records_per_block(journal);
++ /* After journal features are set, we can compute transaction limits */
++ jbd2_journal_init_transaction_limits(journal);
+
+ if (jbd2_has_feature_fast_commit(journal)) {
+ journal->j_fc_last = be32_to_cpu(sb->s_maxlen);
+@@ -1736,8 +1778,6 @@ static int journal_reset(journal_t *journal)
+ journal->j_commit_sequence = journal->j_transaction_sequence - 1;
+ journal->j_commit_request = journal->j_commit_sequence;
+
+- journal->j_max_transaction_buffers = jbd2_journal_get_max_txn_bufs(journal);
+-
+ /*
+ * Now that journal recovery is done, turn fast commits off here. This
+ * way, if fast commit was enabled before the crash but if now FS has
+@@ -1768,8 +1808,7 @@ static int journal_reset(journal_t *journal)
+ */
+ jbd2_journal_update_sb_log_tail(journal,
+ journal->j_tail_sequence,
+- journal->j_tail,
+- REQ_SYNC | REQ_FUA);
++ journal->j_tail, REQ_FUA);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ }
+ return jbd2_journal_start_thread(journal);
+@@ -1791,9 +1830,16 @@ static int jbd2_write_superblock(journal_t *journal, blk_opf_t write_flags)
+ return -EIO;
+ }
+
+- trace_jbd2_write_superblock(journal, write_flags);
++ /*
++ * Always set high priority flags to exempt from block layer's
++ * QOS policies, e.g. writeback throttle.
++ */
++ write_flags |= JBD2_JOURNAL_REQ_FLAGS;
+ if (!(journal->j_flags & JBD2_BARRIER))
+ write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
++
++ trace_jbd2_write_superblock(journal, write_flags);
++
+ if (buffer_write_io_error(bh)) {
+ /*
+ * Oh, dear. A previous attempt to write the journal
+@@ -2043,7 +2089,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
+ jbd2_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
+ sb->s_errno = cpu_to_be32(errcode);
+
+- jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
++ jbd2_write_superblock(journal, REQ_FUA);
+ }
+ EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
+
+@@ -2164,8 +2210,7 @@ int jbd2_journal_destroy(journal_t *journal)
+ ++journal->j_transaction_sequence;
+ write_unlock(&journal->j_state_lock);
+
+- jbd2_mark_journal_empty(journal,
+- REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
++ jbd2_mark_journal_empty(journal, REQ_PREFLUSH | REQ_FUA);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ } else
+ err = -EIO;
+@@ -2273,8 +2318,6 @@ jbd2_journal_initialize_fast_commit(journal_t *journal)
+ journal->j_fc_first = journal->j_last + 1;
+ journal->j_fc_off = 0;
+ journal->j_free = journal->j_last - journal->j_first;
+- journal->j_max_transaction_buffers =
+- jbd2_journal_get_max_txn_bufs(journal);
+
+ return 0;
+ }
+@@ -2362,8 +2405,7 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
+ sb->s_feature_ro_compat |= cpu_to_be32(ro);
+ sb->s_feature_incompat |= cpu_to_be32(incompat);
+ unlock_buffer(journal->j_sb_buffer);
+- journal->j_revoke_records_per_block =
+- journal_revoke_records_per_block(journal);
++ jbd2_journal_init_transaction_limits(journal);
+
+ return 1;
+ #undef COMPAT_FEATURE_ON
+@@ -2394,8 +2436,7 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
+ sb->s_feature_compat &= ~cpu_to_be32(compat);
+ sb->s_feature_ro_compat &= ~cpu_to_be32(ro);
+ sb->s_feature_incompat &= ~cpu_to_be32(incompat);
+- journal->j_revoke_records_per_block =
+- journal_revoke_records_per_block(journal);
++ jbd2_journal_init_transaction_limits(journal);
+ }
+ EXPORT_SYMBOL(jbd2_journal_clear_features);
+
+@@ -2466,7 +2507,7 @@ int jbd2_journal_flush(journal_t *journal, unsigned int flags)
+ * the magic code for a fully-recovered superblock. Any future
+ * commits of data to the journal will restore the current
+ * s_start value. */
+- jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
++ jbd2_mark_journal_empty(journal, REQ_FUA);
+
+ if (flags)
+ err = __jbd2_journal_erase(journal, flags);
+@@ -2512,7 +2553,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
+ if (write) {
+ /* Lock to make assertions happy... */
+ mutex_lock_io(&journal->j_checkpoint_mutex);
+- jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
++ jbd2_mark_journal_empty(journal, REQ_FUA);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ }
+
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index c269a7d29a4653..421c0d360836e0 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -289,6 +289,8 @@ int jbd2_journal_recover(journal_t *journal)
+ journal_superblock_t * sb;
+
+ struct recovery_info info;
++ errseq_t wb_err;
++ struct address_space *mapping;
+
+ memset(&info, 0, sizeof(info));
+ sb = journal->j_superblock;
+@@ -306,6 +308,9 @@ int jbd2_journal_recover(journal_t *journal)
+ return 0;
+ }
+
++ wb_err = 0;
++ mapping = journal->j_fs_dev->bd_inode->i_mapping;
++ errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ err = do_one_pass(journal, &info, PASS_SCAN);
+ if (!err)
+ err = do_one_pass(journal, &info, PASS_REVOKE);
+@@ -327,6 +332,9 @@ int jbd2_journal_recover(journal_t *journal)
+
+ jbd2_journal_clear_revoke(journal);
+ err2 = sync_blockdev(journal->j_fs_dev);
++ if (!err)
++ err = err2;
++ err2 = errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ if (!err)
+ err = err2;
+ /* Make sure all replayed data is on permanent storage */
+@@ -440,6 +448,27 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+ return provided == cpu_to_be32(calculated);
+ }
+
++static bool jbd2_commit_block_csum_verify_partial(journal_t *j, void *buf)
++{
++ struct commit_header *h;
++ __be32 provided;
++ __u32 calculated;
++ void *tmpbuf;
++
++ tmpbuf = kzalloc(j->j_blocksize, GFP_KERNEL);
++ if (!tmpbuf)
++ return false;
++
++ memcpy(tmpbuf, buf, sizeof(struct commit_header));
++ h = tmpbuf;
++ provided = h->h_chksum[0];
++ h->h_chksum[0] = 0;
++ calculated = jbd2_chksum(j, j->j_csum_seed, tmpbuf, j->j_blocksize);
++ kfree(tmpbuf);
++
++ return provided == cpu_to_be32(calculated);
++}
++
+ static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
+ journal_block_tag3_t *tag3,
+ void *buf, __u32 sequence)
+@@ -806,6 +835,13 @@ static int do_one_pass(journal_t *journal,
+ if (pass == PASS_SCAN &&
+ !jbd2_commit_block_csum_verify(journal,
+ bh->b_data)) {
++ if (jbd2_commit_block_csum_verify_partial(
++ journal,
++ bh->b_data)) {
++ pr_notice("JBD2: Find incomplete commit block in transaction %u block %lu\n",
++ next_commit_ID, next_log_block);
++ goto chksum_ok;
++ }
+ chksum_error:
+ if (commit_time < last_trans_commit_time)
+ goto ignore_crc_mismatch;
+@@ -820,6 +856,7 @@ static int do_one_pass(journal_t *journal,
+ }
+ }
+ if (pass == PASS_SCAN) {
++ chksum_ok:
+ last_trans_commit_time = commit_time;
+ head_block = next_log_block;
+ }
+@@ -839,6 +876,7 @@ static int do_one_pass(journal_t *journal,
+ next_log_block);
+ need_check_commit_time = true;
+ }
++
+ /* If we aren't in the REVOKE pass, then we can
+ * just skip over this block. */
+ if (pass != PASS_REVOKE) {
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 5f08b5fd105a31..76adab83cac368 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -62,28 +62,6 @@ void jbd2_journal_free_transaction(transaction_t *transaction)
+ kmem_cache_free(transaction_cache, transaction);
+ }
+
+-/*
+- * Base amount of descriptor blocks we reserve for each transaction.
+- */
+-static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
+-{
+- int tag_space = journal->j_blocksize - sizeof(journal_header_t);
+- int tags_per_block;
+-
+- /* Subtract UUID */
+- tag_space -= 16;
+- if (jbd2_journal_has_csum_v2or3(journal))
+- tag_space -= sizeof(struct jbd2_journal_block_tail);
+- /* Commit code leaves a slack space of 16 bytes at the end of block */
+- tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
+- /*
+- * Revoke descriptors are accounted separately so we need to reserve
+- * space for commit block and normal transaction descriptor blocks.
+- */
+- return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers,
+- tags_per_block);
+-}
+-
+ /*
+ * jbd2_get_transaction: obtain a new transaction_t object.
+ *
+@@ -109,7 +87,7 @@ static void jbd2_get_transaction(journal_t *journal,
+ transaction->t_expires = jiffies + journal->j_commit_interval;
+ atomic_set(&transaction->t_updates, 0);
+ atomic_set(&transaction->t_outstanding_credits,
+- jbd2_descriptor_blocks_per_trans(journal) +
++ journal->j_transaction_overhead_buffers +
+ atomic_read(&journal->j_reserved_credits));
+ atomic_set(&transaction->t_outstanding_revokes, 0);
+ atomic_set(&transaction->t_handle_count, 0);
+@@ -213,6 +191,13 @@ static void sub_reserved_credits(journal_t *journal, int blocks)
+ wake_up(&journal->j_wait_reserved);
+ }
+
++/* Maximum number of blocks for user transaction payload */
++static int jbd2_max_user_trans_buffers(journal_t *journal)
++{
++ return journal->j_max_transaction_buffers -
++ journal->j_transaction_overhead_buffers;
++}
++
+ /*
+ * Wait until we can add credits for handle to the running transaction. Called
+ * with j_state_lock held for reading. Returns 0 if handle joined the running
+@@ -262,12 +247,12 @@ __must_hold(&journal->j_state_lock)
+ * big to fit this handle? Wait until reserved credits are freed.
+ */
+ if (atomic_read(&journal->j_reserved_credits) + total >
+- journal->j_max_transaction_buffers) {
++ jbd2_max_user_trans_buffers(journal)) {
+ read_unlock(&journal->j_state_lock);
+ jbd2_might_wait_for_commit(journal);
+ wait_event(journal->j_wait_reserved,
+ atomic_read(&journal->j_reserved_credits) + total <=
+- journal->j_max_transaction_buffers);
++ jbd2_max_user_trans_buffers(journal));
+ __acquire(&journal->j_state_lock); /* fake out sparse */
+ return 1;
+ }
+@@ -307,14 +292,14 @@ __must_hold(&journal->j_state_lock)
+
+ needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
+ /* We allow at most half of a transaction to be reserved */
+- if (needed > journal->j_max_transaction_buffers / 2) {
++ if (needed > jbd2_max_user_trans_buffers(journal) / 2) {
+ sub_reserved_credits(journal, rsv_blocks);
+ atomic_sub(total, &t->t_outstanding_credits);
+ read_unlock(&journal->j_state_lock);
+ jbd2_might_wait_for_commit(journal);
+ wait_event(journal->j_wait_reserved,
+ atomic_read(&journal->j_reserved_credits) + rsv_blocks
+- <= journal->j_max_transaction_buffers / 2);
++ <= jbd2_max_user_trans_buffers(journal) / 2);
+ __acquire(&journal->j_state_lock); /* fake out sparse */
+ return 1;
+ }
+@@ -344,12 +329,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
+ * size and limit the number of total credits to not exceed maximum
+ * transaction size per operation.
+ */
+- if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
+- (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
++ if (rsv_blocks > jbd2_max_user_trans_buffers(journal) / 2 ||
++ rsv_blocks + blocks > jbd2_max_user_trans_buffers(journal)) {
+ printk(KERN_ERR "JBD2: %s wants too many credits "
+ "credits:%d rsv_credits:%d max:%d\n",
+ current->comm, blocks, rsv_blocks,
+- journal->j_max_transaction_buffers);
++ jbd2_max_user_trans_buffers(journal));
+ WARN_ON(1);
+ return -ENOSPC;
+ }
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index 7ea37f49f1e18e..e71f4c94c44832 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -58,6 +58,7 @@ static void jffs2_i_init_once(void *foo)
+ struct jffs2_inode_info *f = foo;
+
+ mutex_init(&f->sem);
++ f->target = NULL;
+ inode_init_once(&f->vfs_inode);
+ }
+
+diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
+index 3b6bdc9a49e1b0..23c1f6a120f0c4 100644
+--- a/fs/jffs2/xattr.c
++++ b/fs/jffs2/xattr.c
+@@ -1110,6 +1110,9 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
+ return rc;
+
+ request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
++ if (request > c->sector_size - c->cleanmarker_size)
++ return -ERANGE;
++
+ rc = jffs2_reserve_space(c, request, &length,
+ ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
+ if (rc) {
+diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
+index 6b231d0d0071ba..603aae17a69343 100644
+--- a/fs/jfs/jfs_dinode.h
++++ b/fs/jfs/jfs_dinode.h
+@@ -96,7 +96,7 @@ struct dinode {
+ #define di_gengen u._file._u1._imap._gengen
+
+ union {
+- xtpage_t _xtroot;
++ xtroot_t _xtroot;
+ struct {
+ u8 unused[16]; /* 16: */
+ dxd_t _dxd; /* 16: */
+diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c
+index 575cb2ba74fc86..5f4b305030ad5e 100644
+--- a/fs/jfs/jfs_discard.c
++++ b/fs/jfs/jfs_discard.c
+@@ -65,7 +65,7 @@ void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks)
+ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
+ {
+ struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+- struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
++ struct bmap *bmp;
+ struct super_block *sb = ipbmap->i_sb;
+ int agno, agno_end;
+ u64 start, end, minlen;
+@@ -83,10 +83,15 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
+ if (minlen == 0)
+ minlen = 1;
+
++ down_read(&sb->s_umount);
++ bmp = JFS_SBI(ip->i_sb)->bmap;
++
+ if (minlen > bmp->db_agsize ||
+ start >= bmp->db_mapsize ||
+- range->len < sb->s_blocksize)
++ range->len < sb->s_blocksize) {
++ up_read(&sb->s_umount);
+ return -EINVAL;
++ }
+
+ if (end >= bmp->db_mapsize)
+ end = bmp->db_mapsize - 1;
+@@ -100,6 +105,8 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
+ trimmed += dbDiscardAG(ip, agno, minlen);
+ agno++;
+ }
++
++ up_read(&sb->s_umount);
+ range->len = trimmed << sb->s_blocksize_bits;
+
+ return 0;
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 88afd108c2dd2e..974ecf5e0d9522 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -63,10 +63,10 @@
+ */
+ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ int nblocks);
+-static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
+-static int dbBackSplit(dmtree_t * tp, int leafno);
+-static int dbJoin(dmtree_t * tp, int leafno, int newval);
+-static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
++static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl);
++static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl);
++static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl);
++static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl);
+ static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
+ int level);
+ static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
+@@ -87,7 +87,7 @@ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
+ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
+ static int dbFindBits(u32 word, int l2nb);
+ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
+ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ int nblocks);
+ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+@@ -180,13 +180,14 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+
+ bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+- if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) {
++ if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
++ bmp->db_l2nbperpage < 0) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+
+ bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+- if (!bmp->db_numag) {
++ if (!bmp->db_numag || bmp->db_numag >= MAXAG) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+@@ -194,6 +195,12 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+ bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
+ bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
++ if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
++ bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
++ err = -EINVAL;
++ goto err_release_metapage;
++ }
++
+ bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+ bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
+ bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+@@ -645,7 +652,7 @@ int dbNextAG(struct inode *ipbmap)
+ * average free space.
+ */
+ for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
+- if (agpref == bmp->db_numag)
++ if (agpref >= bmp->db_numag)
+ agpref = 0;
+
+ if (atomic_read(&bmp->db_active[agpref]))
+@@ -1619,6 +1626,8 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
+ } else if (rc == -ENOSPC) {
+ /* search for next smaller log2 block */
+ l2nb = BLKSTOL2(nblocks) - 1;
++ if (unlikely(l2nb < 0))
++ break;
+ nblocks = 1LL << l2nb;
+ } else {
+ /* Trim any already allocated blocks */
+@@ -1710,7 +1719,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
+ * dbFindLeaf() returns the index of the leaf at which
+ * free space was found.
+ */
+- rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
++ rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
+
+ /* release the buffer.
+ */
+@@ -1957,7 +1966,7 @@ dbAllocDmapLev(struct bmap * bmp,
+ * free space. if sufficient free space is found, dbFindLeaf()
+ * returns the index of the leaf at which free space was found.
+ */
+- if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
++ if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
+ return -ENOSPC;
+
+ if (leafidx < 0)
+@@ -2096,7 +2105,7 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ * system.
+ */
+ if (dp->tree.stree[word] == NOFREE)
+- dbBackSplit((dmtree_t *) & dp->tree, word);
++ dbBackSplit((dmtree_t *)&dp->tree, word, false);
+
+ dbAllocBits(bmp, dp, blkno, nblocks);
+ }
+@@ -2182,7 +2191,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ * the binary system of the leaves if need be.
+ */
+ dbSplit(tp, word, BUDMIN,
+- dbMaxBud((u8 *) & dp->wmap[word]));
++ dbMaxBud((u8 *)&dp->wmap[word]), false);
+
+ word += 1;
+ } else {
+@@ -2222,7 +2231,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ * system of the leaves to reflect the current
+ * allocation (size).
+ */
+- dbSplit(tp, word, size, NOFREE);
++ dbSplit(tp, word, size, NOFREE, false);
+
+ /* get the number of dmap words handled */
+ nw = BUDSIZE(size, BUDMIN);
+@@ -2329,7 +2338,7 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ /* update the leaf for this dmap word.
+ */
+ rc = dbJoin(tp, word,
+- dbMaxBud((u8 *) & dp->wmap[word]));
++ dbMaxBud((u8 *)&dp->wmap[word]), false);
+ if (rc)
+ return rc;
+
+@@ -2362,7 +2371,7 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+
+ /* update the leaf.
+ */
+- rc = dbJoin(tp, word, size);
++ rc = dbJoin(tp, word, size, false);
+ if (rc)
+ return rc;
+
+@@ -2514,16 +2523,16 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+ * that it is at the front of a binary buddy system.
+ */
+ if (oldval == NOFREE) {
+- rc = dbBackSplit((dmtree_t *) dcp, leafno);
++ rc = dbBackSplit((dmtree_t *)dcp, leafno, true);
+ if (rc) {
+ release_metapage(mp);
+ return rc;
+ }
+ oldval = dcp->stree[ti];
+ }
+- dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
++ dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval, true);
+ } else {
+- rc = dbJoin((dmtree_t *) dcp, leafno, newval);
++ rc = dbJoin((dmtree_t *) dcp, leafno, newval, true);
+ if (rc) {
+ release_metapage(mp);
+ return rc;
+@@ -2554,7 +2563,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+ */
+ if (alloc) {
+ dbJoin((dmtree_t *) dcp, leafno,
+- oldval);
++ oldval, true);
+ } else {
+ /* the dbJoin() above might have
+ * caused a larger binary buddy system
+@@ -2564,9 +2573,9 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+ */
+ if (dcp->stree[ti] == NOFREE)
+ dbBackSplit((dmtree_t *)
+- dcp, leafno);
++ dcp, leafno, true);
+ dbSplit((dmtree_t *) dcp, leafno,
+- dcp->budmin, oldval);
++ dcp->budmin, oldval, true);
+ }
+
+ /* release the buffer and return the error.
+@@ -2614,7 +2623,7 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+-static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
++static void dbSplit(dmtree_t *tp, int leafno, int splitsz, int newval, bool is_ctl)
+ {
+ int budsz;
+ int cursz;
+@@ -2636,7 +2645,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+ while (cursz >= splitsz) {
+ /* update the buddy's leaf with its new value.
+ */
+- dbAdjTree(tp, leafno ^ budsz, cursz);
++ dbAdjTree(tp, leafno ^ budsz, cursz, is_ctl);
+
+ /* on to the next size and buddy.
+ */
+@@ -2648,7 +2657,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+ /* adjust the dmap tree to reflect the specified leaf's new
+ * value.
+ */
+- dbAdjTree(tp, leafno, newval);
++ dbAdjTree(tp, leafno, newval, is_ctl);
+ }
+
+
+@@ -2679,7 +2688,7 @@ static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
+ *
+ * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
+ */
+-static int dbBackSplit(dmtree_t * tp, int leafno)
++static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
+ {
+ int budsz, bud, w, bsz, size;
+ int cursz;
+@@ -2730,7 +2739,7 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
+ * system in two.
+ */
+ cursz = leaf[bud] - 1;
+- dbSplit(tp, bud, cursz, cursz);
++ dbSplit(tp, bud, cursz, cursz, is_ctl);
+ break;
+ }
+ }
+@@ -2758,7 +2767,7 @@ static int dbBackSplit(dmtree_t * tp, int leafno)
+ *
+ * RETURN VALUES: none
+ */
+-static int dbJoin(dmtree_t * tp, int leafno, int newval)
++static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
+ {
+ int budsz, buddy;
+ s8 *leaf;
+@@ -2813,12 +2822,12 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
+ if (leafno < buddy) {
+ /* leafno is the left buddy.
+ */
+- dbAdjTree(tp, buddy, NOFREE);
++ dbAdjTree(tp, buddy, NOFREE, is_ctl);
+ } else {
+ /* buddy is the left buddy and becomes
+ * leafno.
+ */
+- dbAdjTree(tp, leafno, NOFREE);
++ dbAdjTree(tp, leafno, NOFREE, is_ctl);
+ leafno = buddy;
+ }
+
+@@ -2831,7 +2840,7 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
+
+ /* update the leaf value.
+ */
+- dbAdjTree(tp, leafno, newval);
++ dbAdjTree(tp, leafno, newval, is_ctl);
+
+ return 0;
+ }
+@@ -2852,15 +2861,20 @@ static int dbJoin(dmtree_t * tp, int leafno, int newval)
+ *
+ * RETURN VALUES: none
+ */
+-static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
++static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl)
+ {
+ int lp, pp, k;
+- int max;
++ int max, size;
++
++ size = is_ctl ? CTLTREESIZE : TREESIZE;
+
+ /* pick up the index of the leaf for this leafno.
+ */
+ lp = leafno + le32_to_cpu(tp->dmt_leafidx);
+
++ if (WARN_ON_ONCE(lp >= size || lp < 0))
++ return;
++
+ /* is the current value the same as the old value ? if so,
+ * there is nothing to do.
+ */
+@@ -2921,14 +2935,19 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
+ * leafidx - return pointer to be set to the index of the leaf
+ * describing at least l2nb free blocks if sufficient
+ * free blocks are found.
++ * is_ctl - determines if the tree is of type ctl
+ *
+ * RETURN VALUES:
+ * 0 - success
+ * -ENOSPC - insufficient free blocks.
+ */
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
+ {
+ int ti, n = 0, k, x = 0;
++ int max_size, max_idx;
++
++ max_size = is_ctl ? CTLTREESIZE : TREESIZE;
++ max_idx = is_ctl ? LPERCTL : LPERDMAP;
+
+ /* first check the root of the tree to see if there is
+ * sufficient free space.
+@@ -2949,6 +2968,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+ /* sufficient free space found. move to the next
+ * level (or quit if this is the last level).
+ */
++ if (x + n > max_size)
++ return -ENOSPC;
+ if (l2nb <= tp->dmt_stree[x + n])
+ break;
+ }
+@@ -2958,6 +2979,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+ */
+ assert(n < 4);
+ }
++ if (le32_to_cpu(tp->dmt_leafidx) >= max_idx)
++ return -ENOSPC;
+
+ /* set the return to the leftmost leaf describing sufficient
+ * free space.
+@@ -3002,7 +3025,7 @@ static int dbFindBits(u32 word, int l2nb)
+
+ /* scan the word for nb free bits at nb alignments.
+ */
+- for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) {
++ for (bitno = 0; mask != 0; bitno += nb, mask = (mask >> nb)) {
+ if ((mask & word) == mask)
+ break;
+ }
+diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
+index 92b7c533407c12..5d3127ca68a42d 100644
+--- a/fs/jfs/jfs_dtree.c
++++ b/fs/jfs/jfs_dtree.c
+@@ -633,6 +633,11 @@ int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
+ for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
+ index = base + (lim >> 1);
+
++ if (stbl[index] < 0) {
++ rc = -EIO;
++ goto out;
++ }
++
+ if (p->header.flag & BT_LEAF) {
+ /* uppercase leaf name to compare */
+ cmp =
+@@ -829,6 +834,8 @@ int dtInsert(tid_t tid, struct inode *ip,
+ * the full page.
+ */
+ DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
++ if (p->header.freelist == 0)
++ return -EINVAL;
+
+ /*
+ * insert entry for new key
+@@ -1970,7 +1977,7 @@ static int dtSplitRoot(tid_t tid,
+ do {
+ f = &rp->slot[fsi];
+ fsi = f->next;
+- } while (fsi != -1);
++ } while (fsi >= 0);
+
+ f->next = n;
+ }
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 923a58422c4611..b30e4cf2f5794c 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -290,7 +290,7 @@ int diSync(struct inode *ipimap)
+ int diRead(struct inode *ip)
+ {
+ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+- int iagno, ino, extno, rc;
++ int iagno, ino, extno, rc, agno;
+ struct inode *ipimap;
+ struct dinode *dp;
+ struct iag *iagp;
+@@ -339,8 +339,11 @@ int diRead(struct inode *ip)
+
+ /* get the ag for the iag */
+ agstart = le64_to_cpu(iagp->agstart);
++ agno = BLKTOAG(agstart, JFS_SBI(ip->i_sb));
+
+ release_metapage(mp);
++ if (agno >= MAXAG || agno < 0)
++ return -EIO;
+
+ rel_inode = (ino & (INOSPERPAGE - 1));
+ pageno = blkno >> sbi->l2nbperpage;
+@@ -670,7 +673,7 @@ int diWrite(tid_t tid, struct inode *ip)
+ * This is the special xtree inside the directory for storing
+ * the directory table
+ */
+- xtpage_t *p, *xp;
++ xtroot_t *p, *xp;
+ xad_t *xad;
+
+ jfs_ip->xtlid = 0;
+@@ -684,7 +687,7 @@ int diWrite(tid_t tid, struct inode *ip)
+ * copy xtree root from inode to dinode:
+ */
+ p = &jfs_ip->i_xtroot;
+- xp = (xtpage_t *) &dp->di_dirtable;
++ xp = (xtroot_t *) &dp->di_dirtable;
+ lv = ilinelock->lv;
+ for (n = 0; n < ilinelock->index; n++, lv++) {
+ memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
+@@ -713,7 +716,7 @@ int diWrite(tid_t tid, struct inode *ip)
+ * regular file: 16 byte (XAD slot) granularity
+ */
+ if (type & tlckXTREE) {
+- xtpage_t *p, *xp;
++ xtroot_t *p, *xp;
+ xad_t *xad;
+
+ /*
+@@ -1320,7 +1323,7 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
+ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+ {
+ int rc, ino, iagno, addext, extno, bitno, sword;
+- int nwords, rem, i, agno;
++ int nwords, rem, i, agno, dn_numag;
+ u32 mask, inosmap, extsmap;
+ struct inode *ipimap;
+ struct metapage *mp;
+@@ -1356,6 +1359,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+
+ /* get the ag number of this iag */
+ agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
++ dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
++ if (agno < 0 || agno > dn_numag || agno >= MAXAG)
++ return -EIO;
+
+ if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
+ /*
+@@ -2176,6 +2182,9 @@ static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
+ /* get the ag and iag numbers for this iag.
+ */
+ agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
++ if (agno >= MAXAG || agno < 0)
++ return -EIO;
++
+ iagno = le32_to_cpu(iagp->iagnum);
+
+ /* check if this is the last free extent within the
+diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
+index 721def69e732e4..10934f9a11be32 100644
+--- a/fs/jfs/jfs_incore.h
++++ b/fs/jfs/jfs_incore.h
+@@ -66,7 +66,7 @@ struct jfs_inode_info {
+ lid_t xtlid; /* lid of xtree lock on directory */
+ union {
+ struct {
+- xtpage_t _xtroot; /* 288: xtree root */
++ xtroot_t _xtroot; /* 288: xtree root */
+ struct inomap *_imap; /* 4: inode map header */
+ } file;
+ struct {
+@@ -92,7 +92,7 @@ struct jfs_inode_info {
+ } link;
+ } u;
+ #ifdef CONFIG_QUOTA
+- struct dquot *i_dquot[MAXQUOTAS];
++ struct dquot __rcu *i_dquot[MAXQUOTAS];
+ #endif
+ u32 dev; /* will die when we get wide dev_t */
+ struct inode vfs_inode;
+diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
+index e855b8fde76ce1..cb6d1fda66a702 100644
+--- a/fs/jfs/jfs_logmgr.c
++++ b/fs/jfs/jfs_logmgr.c
+@@ -1058,7 +1058,7 @@ void jfs_syncpt(struct jfs_log *log, int hard_sync)
+ int lmLogOpen(struct super_block *sb)
+ {
+ int rc;
+- struct block_device *bdev;
++ struct bdev_handle *bdev_handle;
+ struct jfs_log *log;
+ struct jfs_sb_info *sbi = JFS_SBI(sb);
+
+@@ -1070,7 +1070,7 @@ int lmLogOpen(struct super_block *sb)
+
+ mutex_lock(&jfs_log_mutex);
+ list_for_each_entry(log, &jfs_external_logs, journal_list) {
+- if (log->bdev->bd_dev == sbi->logdev) {
++ if (log->bdev_handle->bdev->bd_dev == sbi->logdev) {
+ if (!uuid_equal(&log->uuid, &sbi->loguuid)) {
+ jfs_warn("wrong uuid on JFS journal");
+ mutex_unlock(&jfs_log_mutex);
+@@ -1100,14 +1100,14 @@ int lmLogOpen(struct super_block *sb)
+ * file systems to log may have n-to-1 relationship;
+ */
+
+- bdev = blkdev_get_by_dev(sbi->logdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+- log, NULL);
+- if (IS_ERR(bdev)) {
+- rc = PTR_ERR(bdev);
++ bdev_handle = bdev_open_by_dev(sbi->logdev,
++ BLK_OPEN_READ | BLK_OPEN_WRITE, log, NULL);
++ if (IS_ERR(bdev_handle)) {
++ rc = PTR_ERR(bdev_handle);
+ goto free;
+ }
+
+- log->bdev = bdev;
++ log->bdev_handle = bdev_handle;
+ uuid_copy(&log->uuid, &sbi->loguuid);
+
+ /*
+@@ -1141,7 +1141,7 @@ int lmLogOpen(struct super_block *sb)
+ lbmLogShutdown(log);
+
+ close: /* close external log device */
+- blkdev_put(bdev, log);
++ bdev_release(bdev_handle);
+
+ free: /* free log descriptor */
+ mutex_unlock(&jfs_log_mutex);
+@@ -1162,7 +1162,7 @@ static int open_inline_log(struct super_block *sb)
+ init_waitqueue_head(&log->syncwait);
+
+ set_bit(log_INLINELOG, &log->flag);
+- log->bdev = sb->s_bdev;
++ log->bdev_handle = sb->s_bdev_handle;
+ log->base = addressPXD(&JFS_SBI(sb)->logpxd);
+ log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
+ (L2LOGPSIZE - sb->s_blocksize_bits);
+@@ -1436,7 +1436,7 @@ int lmLogClose(struct super_block *sb)
+ {
+ struct jfs_sb_info *sbi = JFS_SBI(sb);
+ struct jfs_log *log = sbi->log;
+- struct block_device *bdev;
++ struct bdev_handle *bdev_handle;
+ int rc = 0;
+
+ jfs_info("lmLogClose: log:0x%p", log);
+@@ -1482,10 +1482,10 @@ int lmLogClose(struct super_block *sb)
+ * external log as separate logical volume
+ */
+ list_del(&log->journal_list);
+- bdev = log->bdev;
++ bdev_handle = log->bdev_handle;
+ rc = lmLogShutdown(log);
+
+- blkdev_put(bdev, log);
++ bdev_release(bdev_handle);
+
+ kfree(log);
+
+@@ -1972,7 +1972,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
+
+ bp->l_flag |= lbmREAD;
+
+- bio = bio_alloc(log->bdev, 1, REQ_OP_READ, GFP_NOFS);
++ bio = bio_alloc(log->bdev_handle->bdev, 1, REQ_OP_READ, GFP_NOFS);
+ bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
+ __bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
+ BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
+@@ -2110,10 +2110,15 @@ static void lbmStartIO(struct lbuf * bp)
+ {
+ struct bio *bio;
+ struct jfs_log *log = bp->l_log;
++ struct block_device *bdev = NULL;
+
+ jfs_info("lbmStartIO");
+
+- bio = bio_alloc(log->bdev, 1, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
++ if (!log->no_integrity)
++ bdev = log->bdev_handle->bdev;
++
++ bio = bio_alloc(bdev, 1, REQ_OP_WRITE | REQ_SYNC,
++ GFP_NOFS);
+ bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
+ __bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
+ BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
+diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
+index 805877ce502044..84aa2d25390743 100644
+--- a/fs/jfs/jfs_logmgr.h
++++ b/fs/jfs/jfs_logmgr.h
+@@ -356,7 +356,7 @@ struct jfs_log {
+ * before writing syncpt.
+ */
+ struct list_head journal_list; /* Global list */
+- struct block_device *bdev; /* 4: log lv pointer */
++ struct bdev_handle *bdev_handle; /* 4: log lv pointer */
+ int serial; /* 4: log mount serial number */
+
+ s64 base; /* @8: log extent address (inline log ) */
+diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
+index b83aae56a1f261..9b5c6a20b30c83 100644
+--- a/fs/jfs/jfs_mount.c
++++ b/fs/jfs/jfs_mount.c
+@@ -172,15 +172,15 @@ int jfs_mount(struct super_block *sb)
+ }
+ jfs_info("jfs_mount: ipimap:0x%p", ipimap);
+
+- /* map further access of per fileset inodes by the fileset inode */
+- sbi->ipimap = ipimap;
+-
+ /* initialize fileset inode allocation map */
+ if ((rc = diMount(ipimap))) {
+ jfs_err("jfs_mount: diMount failed w/rc = %d", rc);
+ goto err_ipimap;
+ }
+
++ /* map further access of per fileset inodes by the fileset inode */
++ sbi->ipimap = ipimap;
++
+ return rc;
+
+ /*
+@@ -430,7 +430,8 @@ int updateSuper(struct super_block *sb, uint state)
+
+ if (state == FM_MOUNT) {
+ /* record log's dev_t and mount serial number */
+- j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev));
++ j_sb->s_logdev = cpu_to_le32(
++ new_encode_dev(sbi->log->bdev_handle->bdev->bd_dev));
+ j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
+ } else if (state == FM_CLEAN) {
+ /*
+diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
+index ce4b4760fcb1d6..dccc8b3f104593 100644
+--- a/fs/jfs/jfs_txnmgr.c
++++ b/fs/jfs/jfs_txnmgr.c
+@@ -783,7 +783,7 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
+ if (mp->xflag & COMMIT_PAGE)
+ p = (xtpage_t *) mp->data;
+ else
+- p = &jfs_ip->i_xtroot;
++ p = (xtpage_t *) &jfs_ip->i_xtroot;
+ xtlck->lwm.offset =
+ le16_to_cpu(p->header.nextindex);
+ }
+@@ -1676,7 +1676,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+
+ if (tlck->type & tlckBTROOT) {
+ lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
+- p = &JFS_IP(ip)->i_xtroot;
++ p = (xtpage_t *) &JFS_IP(ip)->i_xtroot;
+ if (S_ISDIR(ip->i_mode))
+ lrd->log.redopage.type |=
+ cpu_to_le16(LOG_DIR_XTREE);
+diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
+index 2d304cee884c6f..5ee618d17e7730 100644
+--- a/fs/jfs/jfs_xtree.c
++++ b/fs/jfs/jfs_xtree.c
+@@ -1213,7 +1213,7 @@ xtSplitRoot(tid_t tid,
+ struct xtlock *xtlck;
+ int rc;
+
+- sp = &JFS_IP(ip)->i_xtroot;
++ sp = (xtpage_t *) &JFS_IP(ip)->i_xtroot;
+
+ INCREMENT(xtStat.split);
+
+@@ -2098,7 +2098,7 @@ int xtAppend(tid_t tid, /* transaction id */
+ */
+ void xtInitRoot(tid_t tid, struct inode *ip)
+ {
+- xtpage_t *p;
++ xtroot_t *p;
+
+ /*
+ * acquire a transaction lock on the root
+diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
+index ad7592191d7607..0f6cf5a1ce75bd 100644
+--- a/fs/jfs/jfs_xtree.h
++++ b/fs/jfs/jfs_xtree.h
+@@ -65,24 +65,33 @@ struct xadlist {
+ #define XTPAGEMAXSLOT 256
+ #define XTENTRYSTART 2
+
+-/*
+- * xtree page:
+- */
+-typedef union {
+- struct xtheader {
+- __le64 next; /* 8: */
+- __le64 prev; /* 8: */
++struct xtheader {
++ __le64 next; /* 8: */
++ __le64 prev; /* 8: */
+
+- u8 flag; /* 1: */
+- u8 rsrvd1; /* 1: */
+- __le16 nextindex; /* 2: next index = number of entries */
+- __le16 maxentry; /* 2: max number of entries */
+- __le16 rsrvd2; /* 2: */
++ u8 flag; /* 1: */
++ u8 rsrvd1; /* 1: */
++ __le16 nextindex; /* 2: next index = number of entries */
++ __le16 maxentry; /* 2: max number of entries */
++ __le16 rsrvd2; /* 2: */
+
+- pxd_t self; /* 8: self */
+- } header; /* (32) */
++ pxd_t self; /* 8: self */
++};
+
++/*
++ * xtree root (in inode):
++ */
++typedef union {
++ struct xtheader header;
+ xad_t xad[XTROOTMAXSLOT]; /* 16 * maxentry: xad array */
++} xtroot_t;
++
++/*
++ * xtree page:
++ */
++typedef union {
++ struct xtheader header;
++ xad_t xad[XTPAGEMAXSLOT]; /* 16 * maxentry: xad array */
+ } xtpage_t;
+
+ /*
+diff --git a/fs/jfs/super.c b/fs/jfs/super.c
+index 2e2f7f6d36a09d..c4f565770d3166 100644
+--- a/fs/jfs/super.c
++++ b/fs/jfs/super.c
+@@ -824,7 +824,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
+ return len - towrite;
+ }
+
+-static struct dquot **jfs_get_dquots(struct inode *inode)
++static struct dquot __rcu **jfs_get_dquots(struct inode *inode)
+ {
+ return JFS_IP(inode)->i_dquot;
+ }
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index 8577ad494e056b..49e064c1f55179 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -434,6 +434,8 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
+ int rc;
+ int quota_allocation = 0;
+
++ memset(&ea_buf->new_ea, 0, sizeof(ea_buf->new_ea));
++
+ /* When fsck.jfs clears a bad ea, it doesn't clear the size */
+ if (ji->ea.flag == 0)
+ ea_size = 0;
+@@ -557,9 +559,11 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
+
+ size_check:
+ if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
++ int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size);
++
+ printk(KERN_ERR "ea_get: invalid extended attribute\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
+- ea_buf->xattr, ea_size, 1);
++ ea_buf->xattr, size, 1);
+ ea_release(inode, ea_buf);
+ rc = -EIO;
+ goto clean_up;
+@@ -795,7 +799,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ size_t buf_size)
+ {
+ struct jfs_ea_list *ealist;
+- struct jfs_ea *ea;
++ struct jfs_ea *ea, *ealist_end;
+ struct ea_buffer ea_buf;
+ int xattr_size;
+ ssize_t size;
+@@ -815,9 +819,16 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ goto not_found;
+
+ ealist = (struct jfs_ea_list *) ea_buf.xattr;
++ ealist_end = END_EALIST(ealist);
+
+ /* Find the named attribute */
+- for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
++ for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
++ if (unlikely(ea + 1 > ealist_end) ||
++ unlikely(NEXT_EA(ea) > ealist_end)) {
++ size = -EUCLEAN;
++ goto release;
++ }
++
+ if ((namelen == ea->namelen) &&
+ memcmp(name, ea->name, namelen) == 0) {
+ /* Found it */
+@@ -832,6 +843,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ memcpy(data, value, size);
+ goto release;
+ }
++ }
+ not_found:
+ size = -ENODATA;
+ release:
+@@ -859,7 +871,7 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
+ ssize_t size = 0;
+ int xattr_size;
+ struct jfs_ea_list *ealist;
+- struct jfs_ea *ea;
++ struct jfs_ea *ea, *ealist_end;
+ struct ea_buffer ea_buf;
+
+ down_read(&JFS_IP(inode)->xattr_sem);
+@@ -874,9 +886,16 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
+ goto release;
+
+ ealist = (struct jfs_ea_list *) ea_buf.xattr;
++ ealist_end = END_EALIST(ealist);
+
+ /* compute required size of list */
+- for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
++ for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
++ if (unlikely(ea + 1 > ealist_end) ||
++ unlikely(NEXT_EA(ea) > ealist_end)) {
++ size = -EUCLEAN;
++ goto release;
++ }
++
+ if (can_list(ea))
+ size += name_size(ea) + 1;
+ }
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index 8b2bd65d70e725..b068ed32d7b32d 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -127,7 +127,7 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
+ *
+ * [3] when @kn_to is %NULL result will be "(null)"
+ *
+- * Return: the length of the full path. If the full length is equal to or
++ * Return: the length of the constructed path. If the path would have been
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'. On error, -errno is returned.
+ */
+@@ -138,16 +138,17 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
+ struct kernfs_node *kn, *common;
+ const char parent_str[] = "/..";
+ size_t depth_from, depth_to, len = 0;
++ ssize_t copied;
+ int i, j;
+
+ if (!kn_to)
+- return strlcpy(buf, "(null)", buflen);
++ return strscpy(buf, "(null)", buflen);
+
+ if (!kn_from)
+ kn_from = kernfs_root(kn_to)->kn;
+
+ if (kn_from == kn_to)
+- return strlcpy(buf, "/", buflen);
++ return strscpy(buf, "/", buflen);
+
+ common = kernfs_common_ancestor(kn_from, kn_to);
+ if (WARN_ON(!common))
+@@ -158,18 +159,19 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
+
+ buf[0] = '\0';
+
+- for (i = 0; i < depth_from; i++)
+- len += strlcpy(buf + len, parent_str,
+- len < buflen ? buflen - len : 0);
++ for (i = 0; i < depth_from; i++) {
++ copied = strscpy(buf + len, parent_str, buflen - len);
++ if (copied < 0)
++ return copied;
++ len += copied;
++ }
+
+ /* Calculate how many bytes we need for the rest */
+ for (i = depth_to - 1; i >= 0; i--) {
+ for (kn = kn_to, j = 0; j < i; j++)
+ kn = kn->parent;
+- len += strlcpy(buf + len, "/",
+- len < buflen ? buflen - len : 0);
+- len += strlcpy(buf + len, kn->name,
+- len < buflen ? buflen - len : 0);
++
++ len += scnprintf(buf + len, buflen - len, "/%s", kn->name);
+ }
+
+ return len;
+@@ -214,7 +216,7 @@ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
+ * path (which includes '..'s) as needed to reach from @from to @to is
+ * returned.
+ *
+- * Return: the length of the full path. If the full length is equal to or
++ * Return: the length of the constructed path. If the path would have been
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'. On error, -errno is returned.
+ */
+@@ -265,12 +267,10 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
+ sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
+ sizeof(kernfs_pr_cont_buf));
+ if (sz < 0) {
+- pr_cont("(error)");
+- goto out;
+- }
+-
+- if (sz >= sizeof(kernfs_pr_cont_buf)) {
+- pr_cont("(name too long)");
++ if (sz == -E2BIG)
++ pr_cont("(name too long)");
++ else
++ pr_cont("(error)");
+ goto out;
+ }
+
+@@ -529,6 +529,20 @@ void kernfs_get(struct kernfs_node *kn)
+ }
+ EXPORT_SYMBOL_GPL(kernfs_get);
+
++static void kernfs_free_rcu(struct rcu_head *rcu)
++{
++ struct kernfs_node *kn = container_of(rcu, struct kernfs_node, rcu);
++
++ kfree_const(kn->name);
++
++ if (kn->iattr) {
++ simple_xattrs_free(&kn->iattr->xattrs, NULL);
++ kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
++ }
++
++ kmem_cache_free(kernfs_node_cache, kn);
++}
++
+ /**
+ * kernfs_put - put a reference count on a kernfs_node
+ * @kn: the target kernfs_node
+@@ -557,16 +571,11 @@ void kernfs_put(struct kernfs_node *kn)
+ if (kernfs_type(kn) == KERNFS_LINK)
+ kernfs_put(kn->symlink.target_kn);
+
+- kfree_const(kn->name);
+-
+- if (kn->iattr) {
+- simple_xattrs_free(&kn->iattr->xattrs, NULL);
+- kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
+- }
+ spin_lock(&kernfs_idr_lock);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
+ spin_unlock(&kernfs_idr_lock);
+- kmem_cache_free(kernfs_node_cache, kn);
++
++ call_rcu(&kn->rcu, kernfs_free_rcu);
+
+ kn = parent;
+ if (kn) {
+@@ -575,7 +584,7 @@ void kernfs_put(struct kernfs_node *kn)
+ } else {
+ /* just released the root kn, free @root too */
+ idr_destroy(&root->ino_idr);
+- kfree(root);
++ kfree_rcu(root, rcu);
+ }
+ }
+ EXPORT_SYMBOL_GPL(kernfs_put);
+@@ -676,6 +685,18 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
+ {
+ struct kernfs_node *kn;
+
++ if (parent->mode & S_ISGID) {
++ /* this code block imitates inode_init_owner() for
++ * kernfs
++ */
++
++ if (parent->iattr)
++ gid = parent->iattr->ia_gid;
++
++ if (flags & KERNFS_DIR)
++ mode |= S_ISGID;
++ }
++
+ kn = __kernfs_new_node(kernfs_root(parent), parent,
+ name, mode, uid, gid, flags);
+ if (kn) {
+@@ -703,7 +724,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ ino_t ino = kernfs_id_ino(id);
+ u32 gen = kernfs_id_gen(id);
+
+- spin_lock(&kernfs_idr_lock);
++ rcu_read_lock();
+
+ kn = idr_find(&root->ino_idr, (u32)ino);
+ if (!kn)
+@@ -727,10 +748,10 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ if (unlikely(!__kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
+ goto err_unlock;
+
+- spin_unlock(&kernfs_idr_lock);
++ rcu_read_unlock();
+ return kn;
+ err_unlock:
+- spin_unlock(&kernfs_idr_lock);
++ rcu_read_unlock();
+ return NULL;
+ }
+
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 180906c36f5151..332d08d2fe0d56 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -532,9 +532,11 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
+ goto out_put;
+
+ rc = 0;
+- of->mmapped = true;
+- of_on(of)->nr_mmapped++;
+- of->vm_ops = vma->vm_ops;
++ if (!of->mmapped) {
++ of->mmapped = true;
++ of_on(of)->nr_mmapped++;
++ of->vm_ops = vma->vm_ops;
++ }
+ vma->vm_ops = &kernfs_vm_ops;
+ out_put:
+ kernfs_put_active(of->kn);
+diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
+index a9b854cdfdb5f7..210dac7e9ee25c 100644
+--- a/fs/kernfs/kernfs-internal.h
++++ b/fs/kernfs/kernfs-internal.h
+@@ -49,6 +49,8 @@ struct kernfs_root {
+ struct rw_semaphore kernfs_rwsem;
+ struct rw_semaphore kernfs_iattr_rwsem;
+ struct rw_semaphore kernfs_supers_rwsem;
++
++ struct rcu_head rcu;
+ };
+
+ /* +1 to avoid triggering overflow warning when negating it */
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 37f2d34ee090bd..dc0f7519045f11 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -396,6 +396,8 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
+ return -EINVAL;
+ }
+
++ /* In this case, ->private_data is protected by f_pos_lock */
++ file->private_data = NULL;
+ return vfs_setpos(file, offset, U32_MAX);
+ }
+
+@@ -425,7 +427,7 @@ static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
+ inode->i_ino, fs_umode_to_dtype(inode->i_mode));
+ }
+
+-static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
++static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
+ {
+ struct offset_ctx *so_ctx = inode->i_op->get_offset_ctx(inode);
+ XA_STATE(xas, &so_ctx->xa, ctx->pos);
+@@ -434,7 +436,7 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
+ while (true) {
+ dentry = offset_find_next(&xas);
+ if (!dentry)
+- break;
++ return ERR_PTR(-ENOENT);
+
+ if (!offset_dir_emit(ctx, dentry)) {
+ dput(dentry);
+@@ -444,6 +446,7 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
+ dput(dentry);
+ ctx->pos = xas.xa_index + 1;
+ }
++ return NULL;
+ }
+
+ /**
+@@ -476,7 +479,12 @@ static int offset_readdir(struct file *file, struct dir_context *ctx)
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+
+- offset_iterate_dir(d_inode(dir), ctx);
++ /* In this case, ->private_data is protected by f_pos_lock */
++ if (ctx->pos == 2)
++ file->private_data = NULL;
++ else if (file->private_data == ERR_PTR(-ENOENT))
++ return 0;
++ file->private_data = offset_iterate_dir(d_inode(dir), ctx);
+ return 0;
+ }
+
+@@ -541,7 +549,8 @@ void simple_recursive_removal(struct dentry *dentry,
+ dput(victim); // unpin it
+ }
+ if (victim == dentry) {
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode,
++ inode_set_ctime_current(inode));
+ if (d_is_dir(dentry))
+ drop_nlink(inode);
+ inode_unlock(inode);
+@@ -582,7 +591,7 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
+ */
+ root->i_ino = 1;
+ root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
+- root->i_atime = root->i_mtime = inode_set_ctime_current(root);
++ simple_inode_init_ts(root);
+ s->s_root = d_make_root(root);
+ if (!s->s_root)
+ return -ENOMEM;
+@@ -638,8 +647,8 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
+ {
+ struct inode *inode = d_inode(old_dentry);
+
+- dir->i_mtime = inode_set_ctime_to_ts(dir,
+- inode_set_ctime_current(inode));
++ inode_set_mtime_to_ts(dir,
++ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
+ inc_nlink(inode);
+ ihold(inode);
+ dget(dentry);
+@@ -673,8 +682,8 @@ int simple_unlink(struct inode *dir, struct dentry *dentry)
+ {
+ struct inode *inode = d_inode(dentry);
+
+- dir->i_mtime = inode_set_ctime_to_ts(dir,
+- inode_set_ctime_current(inode));
++ inode_set_mtime_to_ts(dir,
++ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
+ drop_nlink(inode);
+ dput(dentry);
+ return 0;
+@@ -709,9 +718,10 @@ void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
+ {
+ struct inode *newino = d_inode(new_dentry);
+
+- old_dir->i_mtime = inode_set_ctime_current(old_dir);
++ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
+ if (new_dir != old_dir)
+- new_dir->i_mtime = inode_set_ctime_current(new_dir);
++ inode_set_mtime_to_ts(new_dir,
++ inode_set_ctime_current(new_dir));
+ inode_set_ctime_current(d_inode(old_dentry));
+ if (newino)
+ inode_set_ctime_current(newino);
+@@ -926,7 +936,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
+ */
+ inode->i_ino = 1;
+ inode->i_mode = S_IFDIR | 0755;
+- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
++ simple_inode_init_ts(inode);
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ set_nlink(inode, 2);
+@@ -952,7 +962,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
+ goto out;
+ }
+ inode->i_mode = S_IFREG | files->mode;
+- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
++ simple_inode_init_ts(inode);
+ inode->i_fop = files->ops;
+ inode->i_ino = i;
+ d_add(dentry, inode);
+@@ -1520,7 +1530,7 @@ struct inode *alloc_anon_inode(struct super_block *s)
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
+ inode->i_flags |= S_PRIVATE;
+- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
++ simple_inode_init_ts(inode);
+ return inode;
+ }
+ EXPORT_SYMBOL(alloc_anon_inode);
+@@ -1912,3 +1922,20 @@ ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
+ return direct_written + buffered_written;
+ }
+ EXPORT_SYMBOL_GPL(direct_write_fallback);
++
++/**
++ * simple_inode_init_ts - initialize the timestamps for a new inode
++ * @inode: inode to be initialized
++ *
++ * When a new inode is created, most filesystems set the timestamps to the
++ * current time. Add a helper to do this.
++ */
++struct timespec64 simple_inode_init_ts(struct inode *inode)
++{
++ struct timespec64 ts = inode_set_ctime_current(inode);
++
++ inode_set_atime_to_ts(inode, ts);
++ inode_set_mtime_to_ts(inode, ts);
++ return ts;
++}
++EXPORT_SYMBOL(simple_inode_init_ts);
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 6579948070a482..a62331487ebf16 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -712,8 +712,6 @@ static const struct svc_version *nlmsvc_version[] = {
+ #endif
+ };
+
+-static struct svc_stat nlmsvc_stats;
+-
+ #define NLM_NRVERS ARRAY_SIZE(nlmsvc_version)
+ static struct svc_program nlmsvc_program = {
+ .pg_prog = NLM_PROGRAM, /* program number */
+@@ -721,7 +719,6 @@ static struct svc_program nlmsvc_program = {
+ .pg_vers = nlmsvc_version, /* version table */
+ .pg_name = "lockd", /* service name */
+ .pg_class = "nfsd", /* share authentication with nfsd */
+- .pg_stats = &nlmsvc_stats, /* stats table */
+ .pg_authenticate = &lockd_authenticate, /* export authentication */
+ .pg_init_request = svc_generic_init_request,
+ .pg_rpcbind_set = svc_generic_rpcbind_set,
+diff --git a/fs/locks.c b/fs/locks.c
+index 76ad05f8070ad9..ccfa441e487e19 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1314,9 +1314,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
+ locks_wake_up_blocks(left);
+ }
+ out:
++ trace_posix_lock_inode(inode, request, error);
+ spin_unlock(&ctx->flc_lock);
+ percpu_up_read(&file_rwsem);
+- trace_posix_lock_inode(inode, request, error);
+ /*
+ * Free any unused locks.
+ */
+@@ -2381,8 +2381,9 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ error = do_lock_file_wait(filp, cmd, file_lock);
+
+ /*
+- * Attempt to detect a close/fcntl race and recover by releasing the
+- * lock that was just acquired. There is no need to do that when we're
++ * Detect close/fcntl races and recover by zapping all POSIX locks
++ * associated with this file and our files_struct, just like on
++ * filp_flush(). There is no need to do that when we're
+ * unlocking though, or for OFD locks.
+ */
+ if (!error && file_lock->fl_type != F_UNLCK &&
+@@ -2397,9 +2398,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ f = files_lookup_fd_locked(files, fd);
+ spin_unlock(&files->file_lock);
+ if (f != filp) {
+- file_lock->fl_type = F_UNLCK;
+- error = do_lock_file_wait(filp, cmd, file_lock);
+- WARN_ON_ONCE(error);
++ locks_remove_posix(filp, files);
+ error = -EBADF;
+ }
+ }
+@@ -2504,8 +2503,9 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ error = do_lock_file_wait(filp, cmd, file_lock);
+
+ /*
+- * Attempt to detect a close/fcntl race and recover by releasing the
+- * lock that was just acquired. There is no need to do that when we're
++ * Detect close/fcntl races and recover by zapping all POSIX locks
++ * associated with this file and our files_struct, just like on
++ * filp_flush(). There is no need to do that when we're
+ * unlocking though, or for OFD locks.
+ */
+ if (!error && file_lock->fl_type != F_UNLCK &&
+@@ -2520,9 +2520,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ f = files_lookup_fd_locked(files, fd);
+ spin_unlock(&files->file_lock);
+ if (f != filp) {
+- file_lock->fl_type = F_UNLCK;
+- error = do_lock_file_wait(filp, cmd, file_lock);
+- WARN_ON_ONCE(error);
++ locks_remove_posix(filp, files);
+ error = -EBADF;
+ }
+ }
+diff --git a/fs/namei.c b/fs/namei.c
+index 94565bd7e73f6f..beffbb02a24e67 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2667,10 +2667,8 @@ static int lookup_one_common(struct mnt_idmap *idmap,
+ if (!len)
+ return -EACCES;
+
+- if (unlikely(name[0] == '.')) {
+- if (len < 2 || (len == 2 && name[1] == '.'))
+- return -EACCES;
+- }
++ if (is_dot_dotdot(name, len))
++ return -EACCES;
+
+ while (len--) {
+ unsigned int c = *(const unsigned char *)name++;
+@@ -3021,20 +3019,14 @@ static struct dentry *lock_two_directories(struct dentry *p1, struct dentry *p2)
+ p = d_ancestor(p2, p1);
+ if (p) {
+ inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
+- inode_lock_nested(p1->d_inode, I_MUTEX_CHILD);
++ inode_lock_nested(p1->d_inode, I_MUTEX_PARENT2);
+ return p;
+ }
+
+ p = d_ancestor(p1, p2);
+- if (p) {
+- inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
+- inode_lock_nested(p2->d_inode, I_MUTEX_CHILD);
+- return p;
+- }
+-
+- lock_two_inodes(p1->d_inode, p2->d_inode,
+- I_MUTEX_PARENT, I_MUTEX_PARENT2);
+- return NULL;
++ inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
++ inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
++ return p;
+ }
+
+ /*
+@@ -4733,11 +4725,12 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
+ *
+ * a) we can get into loop creation.
+ * b) race potential - two innocent renames can create a loop together.
+- * That's where 4.4 screws up. Current fix: serialization on
++ * That's where 4.4BSD screws up. Current fix: serialization on
+ * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
+ * story.
+- * c) we have to lock _four_ objects - parents and victim (if it exists),
+- * and source.
++ * c) we may have to lock up to _four_ objects - parents and victim (if it exists),
++ * and source (if it's a non-directory or a subdirectory that moves to
++ * different parent).
+ * And that - after we got ->i_mutex on parents (until then we don't know
+ * whether the target exists). Solution: try to be smart with locking
+ * order for inodes. We rely on the fact that tree topology may change
+@@ -4769,6 +4762,7 @@ int vfs_rename(struct renamedata *rd)
+ bool new_is_dir = false;
+ unsigned max_links = new_dir->i_sb->s_max_links;
+ struct name_snapshot old_name;
++ bool lock_old_subdir, lock_new_subdir;
+
+ if (source == target)
+ return 0;
+@@ -4822,15 +4816,32 @@ int vfs_rename(struct renamedata *rd)
+ take_dentry_name_snapshot(&old_name, old_dentry);
+ dget(new_dentry);
+ /*
+- * Lock all moved children. Moved directories may need to change parent
+- * pointer so they need the lock to prevent against concurrent
+- * directory changes moving parent pointer. For regular files we've
+- * historically always done this. The lockdep locking subclasses are
+- * somewhat arbitrary but RENAME_EXCHANGE in particular can swap
+- * regular files and directories so it's difficult to tell which
+- * subclasses to use.
++ * Lock children.
++ * The source subdirectory needs to be locked on cross-directory
++ * rename or cross-directory exchange since its parent changes.
++ * The target subdirectory needs to be locked on cross-directory
++ * exchange due to parent change and on any rename due to becoming
++ * a victim.
++ * Non-directories need locking in all cases (for NFS reasons);
++ * they get locked after any subdirectories (in inode address order).
++ *
++ * NOTE: WE ONLY LOCK UNRELATED DIRECTORIES IN CROSS-DIRECTORY CASE.
++ * NEVER, EVER DO THAT WITHOUT ->s_vfs_rename_mutex.
+ */
+- lock_two_inodes(source, target, I_MUTEX_NORMAL, I_MUTEX_NONDIR2);
++ lock_old_subdir = new_dir != old_dir;
++ lock_new_subdir = new_dir != old_dir || !(flags & RENAME_EXCHANGE);
++ if (is_dir) {
++ if (lock_old_subdir)
++ inode_lock_nested(source, I_MUTEX_CHILD);
++ if (target && (!new_is_dir || lock_new_subdir))
++ inode_lock(target);
++ } else if (new_is_dir) {
++ if (lock_new_subdir)
++ inode_lock_nested(target, I_MUTEX_CHILD);
++ inode_lock(source);
++ } else {
++ lock_two_nondirectories(source, target);
++ }
+
+ error = -EPERM;
+ if (IS_SWAPFILE(source) || (target && IS_SWAPFILE(target)))
+@@ -4878,8 +4889,9 @@ int vfs_rename(struct renamedata *rd)
+ d_exchange(old_dentry, new_dentry);
+ }
+ out:
+- inode_unlock(source);
+- if (target)
++ if (!is_dir || lock_old_subdir)
++ inode_unlock(source);
++ if (target && (!new_is_dir || lock_new_subdir))
+ inode_unlock(target);
+ dput(new_dentry);
+ if (!error) {
+diff --git a/fs/namespace.c b/fs/namespace.c
+index e157efc54023a0..b4385e2413d599 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2796,8 +2796,15 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
+ if (!__mnt_is_readonly(mnt) &&
+ (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
+ (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
+- char *buf = (char *)__get_free_page(GFP_KERNEL);
+- char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
++ char *buf, *mntpath;
++
++ buf = (char *)__get_free_page(GFP_KERNEL);
++ if (buf)
++ mntpath = d_path(mountpoint, buf, PAGE_SIZE);
++ else
++ mntpath = ERR_PTR(-ENOMEM);
++ if (IS_ERR(mntpath))
++ mntpath = "(unknown)";
+
+ pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
+ sb->s_type->name,
+@@ -2805,8 +2812,9 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
+ mntpath, &sb->s_time_max,
+ (unsigned long long)sb->s_time_max);
+
+- free_page((unsigned long)buf);
+ sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
++ if (buf)
++ free_page((unsigned long)buf);
+ }
+ }
+
+@@ -2873,7 +2881,12 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
++ /*
++ * Indicate to the filesystem that the remount request is coming
++ * from the legacy mount system call.
++ */
+ fc->oldapi = true;
++
+ err = parse_monolithic_mount_data(fc, data);
+ if (!err) {
+ down_write(&sb->s_umount);
+@@ -3322,6 +3335,12 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
++ /*
++ * Indicate to the filesystem that the mount request is coming
++ * from the legacy mount system call.
++ */
++ fc->oldapi = true;
++
+ if (subtype)
+ err = vfs_parse_fs_string(fc, "subtype",
+ subtype, strlen(subtype));
+@@ -4459,10 +4478,15 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
+ /*
+ * If this is an attached mount make sure it's located in the callers
+ * mount namespace. If it's not don't let the caller interact with it.
+- * If this is a detached mount make sure it has an anonymous mount
+- * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE.
++ *
++ * If this mount doesn't have a parent it's most often simply a
++ * detached mount with an anonymous mount namespace. IOW, something
++ * that's simply not attached yet. But there are apparently also users
++ * that do change mount properties on the rootfs itself. That obviously
++ * neither has a parent nor is it a detached mount so we cannot
++ * unconditionally check for detached mounts.
+ */
+- if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns)))
++ if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
+ goto out;
+
+ /*
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 943aeea1eb160f..6be13e0ec170d1 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -580,6 +580,8 @@ bl_find_get_deviceid(struct nfs_server *server,
+ nfs4_delete_deviceid(node->ld, node->nfs_client, id);
+ goto retry;
+ }
++
++ nfs4_put_deviceid_node(node);
+ return ERR_PTR(-ENODEV);
+ }
+
+@@ -893,10 +895,9 @@ bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+ }
+
+ if (pgio->pg_dreq == NULL)
+- wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
+- req->wb_index);
++ wb_size = pnfs_num_cont_bytes(pgio->pg_inode, req->wb_index);
+ else
+- wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
++ wb_size = nfs_dreq_bytes_left(pgio->pg_dreq, req_offset(req));
+
+ pnfs_generic_pg_init_write(pgio, req, wb_size);
+
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 466ebf1d41b2b7..869c88978899c0 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -399,15 +399,12 @@ static const struct svc_version *nfs4_callback_version[] = {
+ [4] = &nfs4_callback_version4,
+ };
+
+-static struct svc_stat nfs4_callback_stats;
+-
+ static struct svc_program nfs4_callback_program = {
+ .pg_prog = NFS4_CALLBACK, /* RPC service number */
+ .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */
+ .pg_vers = nfs4_callback_version, /* version table */
+ .pg_name = "NFSv4 callback", /* service name */
+ .pg_class = "nfs", /* authentication class */
+- .pg_stats = &nfs4_callback_stats,
+ .pg_authenticate = nfs_callback_authenticate,
+ .pg_init_request = svc_generic_init_request,
+ .pg_rpcbind_set = svc_generic_rpcbind_set,
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index 321af81c456e2a..d5f6437da352da 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -372,6 +372,8 @@ static __be32 decode_rc_list(struct xdr_stream *xdr,
+
+ rc_list->rcl_nrefcalls = ntohl(*p++);
+ if (rc_list->rcl_nrefcalls) {
++ if (unlikely(rc_list->rcl_nrefcalls > xdr->buf->len))
++ goto out;
+ p = xdr_inline_decode(xdr,
+ rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 44eca51b28085d..62607d52bfa5e7 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -73,7 +73,6 @@ const struct rpc_program nfs_program = {
+ .number = NFS_PROGRAM,
+ .nrvers = ARRAY_SIZE(nfs_version),
+ .version = nfs_version,
+- .stats = &nfs_rpcstat,
+ .pipe_dir_name = NFS_PIPE_DIRNAME,
+ };
+
+@@ -502,6 +501,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
+ const struct nfs_client_initdata *cl_init,
+ rpc_authflavor_t flavor)
+ {
++ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
+ struct rpc_clnt *clnt = NULL;
+ struct rpc_create_args args = {
+ .net = clp->cl_net,
+@@ -513,6 +513,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
+ .servername = clp->cl_hostname,
+ .nodename = cl_init->nodename,
+ .program = &nfs_program,
++ .stats = &nn->rpcstats,
+ .version = clp->rpc_ops->version,
+ .authflavor = flavor,
+ .cred = cl_init->cred,
+@@ -986,6 +987,7 @@ struct nfs_server *nfs_alloc_server(void)
+ INIT_LIST_HEAD(&server->layouts);
+ INIT_LIST_HEAD(&server->state_owners_lru);
+ INIT_LIST_HEAD(&server->ss_copies);
++ INIT_LIST_HEAD(&server->ss_src_copies);
+
+ atomic_set(&server->active, 0);
+
+@@ -1175,6 +1177,8 @@ void nfs_clients_init(struct net *net)
+ #endif
+ spin_lock_init(&nn->nfs_client_lock);
+ nn->boot_time = ktime_get_real();
++ memset(&nn->rpcstats, 0, sizeof(nn->rpcstats));
++ nn->rpcstats.program = &nfs_program;
+
+ nfs_netns_sysfs_setup(nn, net);
+ }
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index cf7365581031b5..a2034511b63144 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -627,6 +627,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ prev = delegation;
+ continue;
+ }
++ inode = nfs_delegation_grab_inode(delegation);
++ if (inode == NULL)
++ continue;
+
+ if (prev) {
+ struct inode *tmp = nfs_delegation_grab_inode(prev);
+@@ -637,12 +640,6 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ }
+ }
+
+- inode = nfs_delegation_grab_inode(delegation);
+- if (inode == NULL) {
+- rcu_read_unlock();
+- iput(to_put);
+- goto restart;
+- }
+ delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ rcu_read_unlock();
+
+@@ -1164,7 +1161,6 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
+ struct inode *inode;
+ restart:
+ rcu_read_lock();
+-restart_locked:
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+@@ -1175,7 +1171,7 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+- goto restart_locked;
++ continue;
+ delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ rcu_read_unlock();
+ if (delegation != NULL) {
+@@ -1296,7 +1292,6 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ nfs4_stateid stateid;
+ restart:
+ rcu_read_lock();
+-restart_locked:
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+@@ -1307,7 +1302,7 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+- goto restart_locked;
++ continue;
+ spin_lock(&delegation->lock);
+ cred = get_cred_rcu(delegation->cred);
+ nfs4_stateid_copy(&stateid, &delegation->stateid);
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index e6a51fd94fea87..39f7549afcf5bd 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1625,7 +1625,16 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+ switch (error) {
+ case 1:
+ break;
+- case 0:
++ case -ETIMEDOUT:
++ if (inode && (IS_ROOT(dentry) ||
++ NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL))
++ error = 1;
++ break;
++ case -ESTALE:
++ case -ENOENT:
++ error = 0;
++ fallthrough;
++ default:
+ /*
+ * We can't d_drop the root of a disconnected tree:
+ * its d_hash is on the s_anon list and d_drop() would hide
+@@ -1680,18 +1689,8 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir,
+
+ dir_verifier = nfs_save_change_attribute(dir);
+ ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+- if (ret < 0) {
+- switch (ret) {
+- case -ESTALE:
+- case -ENOENT:
+- ret = 0;
+- break;
+- case -ETIMEDOUT:
+- if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
+- ret = 1;
+- }
++ if (ret < 0)
+ goto out;
+- }
+
+ /* Request help from readdirplus */
+ nfs_lookup_advise_force_readdirplus(dir, flags);
+@@ -1735,7 +1734,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+ {
+ struct inode *inode;
+- int error;
++ int error = 0;
+
+ nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
+ inode = d_inode(dentry);
+@@ -1780,7 +1779,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ out_bad:
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+- return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
++ return nfs_lookup_revalidate_done(dir, dentry, inode, error);
+ }
+
+ static int
+@@ -1802,9 +1801,10 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
+ if (parent != READ_ONCE(dentry->d_parent))
+ return -ECHILD;
+ } else {
+- /* Wait for unlink to complete */
++ /* Wait for unlink to complete - see unblock_revalidate() */
+ wait_var_event(&dentry->d_fsdata,
+- dentry->d_fsdata != NFS_FSDATA_BLOCKED);
++ smp_load_acquire(&dentry->d_fsdata)
++ != NFS_FSDATA_BLOCKED);
+ parent = dget_parent(dentry);
+ ret = reval(d_inode(parent), dentry, flags);
+ dput(parent);
+@@ -1817,6 +1817,29 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+ return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
+ }
+
++static void block_revalidate(struct dentry *dentry)
++{
++ /* old devname - just in case */
++ kfree(dentry->d_fsdata);
++
++ /* Any new reference that could lead to an open
++ * will take ->d_lock in lookup_open() -> d_lookup().
++ * Holding this lock ensures we cannot race with
++ * __nfs_lookup_revalidate() and removes and need
++ * for further barriers.
++ */
++ lockdep_assert_held(&dentry->d_lock);
++
++ dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++}
++
++static void unblock_revalidate(struct dentry *dentry)
++{
++ /* store_release ensures wait_var_event() sees the update */
++ smp_store_release(&dentry->d_fsdata, NULL);
++ wake_up_var(&dentry->d_fsdata);
++}
++
+ /*
+ * A weaker form of d_revalidate for revalidating just the d_inode(dentry)
+ * when we don't really care about the dentry name. This is called when a
+@@ -2499,15 +2522,12 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
+ spin_unlock(&dentry->d_lock);
+ goto out;
+ }
+- /* old devname */
+- kfree(dentry->d_fsdata);
+- dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++ block_revalidate(dentry);
+
+ spin_unlock(&dentry->d_lock);
+ error = nfs_safe_remove(dentry);
+ nfs_dentry_remove_handle_error(dir, dentry, error);
+- dentry->d_fsdata = NULL;
+- wake_up_var(&dentry->d_fsdata);
++ unblock_revalidate(dentry);
+ out:
+ trace_nfs_unlink_exit(dir, dentry, error);
+ return error;
+@@ -2619,8 +2639,7 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
+ {
+ struct dentry *new_dentry = data->new_dentry;
+
+- new_dentry->d_fsdata = NULL;
+- wake_up_var(&new_dentry->d_fsdata);
++ unblock_revalidate(new_dentry);
+ }
+
+ /*
+@@ -2682,11 +2701,6 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
+ WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
+ goto out;
+- if (new_dentry->d_fsdata) {
+- /* old devname */
+- kfree(new_dentry->d_fsdata);
+- new_dentry->d_fsdata = NULL;
+- }
+
+ spin_lock(&new_dentry->d_lock);
+ if (d_count(new_dentry) > 2) {
+@@ -2708,7 +2722,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ new_dentry = dentry;
+ new_inode = NULL;
+ } else {
+- new_dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++ block_revalidate(new_dentry);
+ must_unblock = true;
+ spin_unlock(&new_dentry->d_lock);
+ }
+@@ -2720,6 +2734,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
+ must_unblock ? nfs_unblock_rename : NULL);
+ if (IS_ERR(task)) {
++ if (must_unblock)
++ unblock_revalidate(new_dentry);
+ error = PTR_ERR(task);
+ goto out;
+ }
+@@ -2968,7 +2984,7 @@ static u64 nfs_access_login_time(const struct task_struct *task,
+ rcu_read_lock();
+ for (;;) {
+ parent = rcu_dereference(task->real_parent);
+- pcred = rcu_dereference(parent->cred);
++ pcred = __task_cred(parent);
+ if (parent == task || cred_fscmp(pcred, cred) != 0)
+ break;
+ task = parent;
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index f6c74f4246917f..258521d5125edd 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -141,8 +141,6 @@ int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ ssize_t ret;
+
+- VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
+-
+ if (iov_iter_rw(iter) == READ)
+ ret = nfs_file_direct_read(iocb, iter, true);
+ else
+@@ -205,9 +203,10 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
+ kref_put(&dreq->kref, nfs_direct_req_free);
+ }
+
+-ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
++ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
+ {
+- return dreq->bytes_left;
++ loff_t start = offset - dreq->io_start;
++ return dreq->max_count - start;
+ }
+ EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
+
+@@ -667,10 +666,17 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
+ LIST_HEAD(mds_list);
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
++ nfs_commit_begin(cinfo.mds);
+ nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
+ res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
+- if (res < 0) /* res == -ENOMEM */
+- nfs_direct_write_reschedule(dreq);
++ if (res < 0) { /* res == -ENOMEM */
++ spin_lock(&dreq->lock);
++ if (dreq->flags == 0)
++ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
++ spin_unlock(&dreq->lock);
++ }
++ if (nfs_commit_end(cinfo.mds))
++ nfs_direct_write_complete(dreq);
+ }
+
+ static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
+index ce8f8934bca517..569ae4ec608455 100644
+--- a/fs/nfs/filelayout/filelayout.c
++++ b/fs/nfs/filelayout/filelayout.c
+@@ -883,7 +883,7 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ NFS4_MAX_UINT64,
+ IOMODE_READ,
+ false,
+- GFP_KERNEL);
++ nfs_io_gfp_mask());
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+@@ -907,7 +907,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ NFS4_MAX_UINT64,
+ IOMODE_RW,
+ false,
+- GFP_NOFS);
++ nfs_io_gfp_mask());
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index ef817a0475ffa6..3e724cb7ef01d8 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -2016,7 +2016,7 @@ static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
+ for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
+ mirror = flseg->mirror_array[idx];
+ mirror_ds = mirror->mirror_ds;
+- if (!mirror_ds)
++ if (IS_ERR_OR_NULL(mirror_ds))
+ continue;
+ ds = mirror->mirror_ds->ds;
+ if (!ds)
+diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
+index 853e8d609bb3bc..41126d6dcd760d 100644
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -1111,9 +1111,12 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
+ ctx->acdirmax = data->acdirmax;
+ ctx->need_mount = false;
+
+- memcpy(sap, &data->addr, sizeof(data->addr));
+- ctx->nfs_server.addrlen = sizeof(data->addr);
+- ctx->nfs_server.port = ntohs(data->addr.sin_port);
++ if (!is_remount_fc(fc)) {
++ memcpy(sap, &data->addr, sizeof(data->addr));
++ ctx->nfs_server.addrlen = sizeof(data->addr);
++ ctx->nfs_server.port = ntohs(data->addr.sin_port);
++ }
++
+ if (sap->ss_family != AF_INET ||
+ !nfs_verify_server_address(sap))
+ goto out_no_address;
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index b05717fe0d4e4f..60a3c28784e0bc 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -307,11 +307,11 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
+ struct inode *inode = sreq->rreq->inode;
+ struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
+ struct page *page;
++ unsigned long idx;
+ int err;
+ pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
+ pgoff_t last = ((sreq->start + sreq->len -
+ sreq->transferred - 1) >> PAGE_SHIFT);
+- XA_STATE(xas, &sreq->rreq->mapping->i_pages, start);
+
+ nfs_pageio_init_read(&pgio, inode, false,
+ &nfs_async_read_completion_ops);
+@@ -322,19 +322,14 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
+
+ pgio.pg_netfs = netfs; /* used in completion */
+
+- xas_lock(&xas);
+- xas_for_each(&xas, page, last) {
++ xa_for_each_range(&sreq->rreq->mapping->i_pages, idx, page, start, last) {
+ /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
+- xas_pause(&xas);
+- xas_unlock(&xas);
+ err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
+ if (err < 0) {
+ netfs->error = err;
+ goto out;
+ }
+- xas_lock(&xas);
+ }
+- xas_unlock(&xas);
+ out:
+ nfs_pageio_complete_read(&pgio);
+ nfs_netfs_put(netfs);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index e21c073158e5bd..ca76b0b51b7791 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -2426,12 +2426,21 @@ EXPORT_SYMBOL_GPL(nfs_net_id);
+
+ static int nfs_net_init(struct net *net)
+ {
++ struct nfs_net *nn = net_generic(net, nfs_net_id);
++
+ nfs_clients_init(net);
++
++ if (!rpc_proc_register(net, &nn->rpcstats)) {
++ nfs_clients_exit(net);
++ return -ENOMEM;
++ }
++
+ return nfs_fs_proc_net_init(net);
+ }
+
+ static void nfs_net_exit(struct net *net)
+ {
++ rpc_proc_unregister(net, "nfs");
+ nfs_fs_proc_net_exit(net);
+ nfs_clients_exit(net);
+ }
+@@ -2486,15 +2495,12 @@ static int __init init_nfs_fs(void)
+ if (err)
+ goto out1;
+
+- rpc_proc_register(&init_net, &nfs_rpcstat);
+-
+ err = register_nfs_fs();
+ if (err)
+ goto out0;
+
+ return 0;
+ out0:
+- rpc_proc_unregister(&init_net, "nfs");
+ nfs_destroy_directcache();
+ out1:
+ nfs_destroy_writepagecache();
+@@ -2524,7 +2530,6 @@ static void __exit exit_nfs_fs(void)
+ nfs_destroy_inodecache();
+ nfs_destroy_nfspagecache();
+ unregister_pernet_subsys(&nfs_net_ops);
+- rpc_proc_unregister(&init_net, "nfs");
+ unregister_nfs_fs();
+ nfs_fs_proc_exit();
+ nfsiod_stop();
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 9c9cf764f6000d..8bceaac2205c87 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -449,8 +449,6 @@ int nfs_try_get_tree(struct fs_context *);
+ int nfs_get_tree_common(struct fs_context *);
+ void nfs_kill_super(struct super_block *);
+
+-extern struct rpc_stat nfs_rpcstat;
+-
+ extern int __init register_nfs_fs(void);
+ extern void __exit unregister_nfs_fs(void);
+ extern bool nfs_sb_active(struct super_block *sb);
+@@ -655,7 +653,7 @@ extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry);
+ /* direct.c */
+ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+ struct nfs_direct_req *dreq);
+-extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
++extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset);
+
+ /* nfs4proc.c */
+ extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+@@ -712,9 +710,9 @@ unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
+ if ((bsize & (bsize - 1)) || nrbitsp) {
+ unsigned char nrbits;
+
+- for (nrbits = 31; nrbits && !(bsize & (1 << nrbits)); nrbits--)
++ for (nrbits = 31; nrbits && !(bsize & (1UL << nrbits)); nrbits--)
+ ;
+- bsize = 1 << nrbits;
++ bsize = 1UL << nrbits;
+ if (nrbitsp)
+ *nrbitsp = nrbits;
+ }
+diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
+index c8374f74dce114..a68b21603ea9a8 100644
+--- a/fs/nfs/netns.h
++++ b/fs/nfs/netns.h
+@@ -9,6 +9,7 @@
+ #include <linux/nfs4.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
++#include <linux/sunrpc/stats.h>
+
+ struct bl_dev_msg {
+ int32_t status;
+@@ -34,6 +35,7 @@ struct nfs_net {
+ struct nfs_netns_client *nfs_client;
+ spinlock_t nfs_client_lock;
+ ktime_t boot_time;
++ struct rpc_stat rpcstats;
+ #ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_nfsfs;
+ #endif
+diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
+index b59876b01a1e3c..0282d93c8bccb3 100644
+--- a/fs/nfs/nfs42.h
++++ b/fs/nfs/nfs42.h
+@@ -55,11 +55,14 @@ int nfs42_proc_removexattr(struct inode *inode, const char *name);
+ * They would be 7 bytes long in the eventual buffer ("user.x\0"), and
+ * 8 bytes long XDR-encoded.
+ *
+- * Include the trailing eof word as well.
++ * Include the trailing eof word as well and make the result a multiple
++ * of 4 bytes.
+ */
+ static inline u32 nfs42_listxattr_xdrsize(u32 buflen)
+ {
+- return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4;
++ u32 size = 8 * buflen / (XATTR_USER_PREFIX_LEN + 2) + 4;
++
++ return (size + 3) & ~3;
+ }
+ #endif /* CONFIG_NFS_V4_2 */
+ #endif /* __LINUX_FS_NFS_NFS4_2_H */
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 28704f924612c4..531c9c20ef1d1b 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -218,7 +218,7 @@ static int handle_async_copy(struct nfs42_copy_res *res,
+
+ if (dst_server != src_server) {
+ spin_lock(&src_server->nfs_client->cl_lock);
+- list_add_tail(&copy->src_copies, &src_server->ss_copies);
++ list_add_tail(&copy->src_copies, &src_server->ss_src_copies);
+ spin_unlock(&src_server->nfs_client->cl_lock);
+ }
+
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 11e3a285594c23..ac80f87cb9d996 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -231,9 +231,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+ __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
+ __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+-
+- if (test_bit(NFS_CS_DS, &cl_init->init_flags))
+- __set_bit(NFS_CS_DS, &clp->cl_flags);
++ if (test_bit(NFS_CS_PNFS, &cl_init->init_flags))
++ __set_bit(NFS_CS_PNFS, &clp->cl_flags);
+ /*
+ * Set up the connection to the server before we add add to the
+ * global list.
+@@ -1011,7 +1010,6 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
+ if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
+ __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
+- __set_bit(NFS_CS_DS, &cl_init.init_flags);
+ __set_bit(NFS_CS_PNFS, &cl_init.init_flags);
+ cl_init.max_connect = NFS_MAX_TRANSPORTS;
+ /*
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5ee283eb9660b8..299ea2b86df668 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -170,6 +170,7 @@ static int nfs4_map_errors(int err)
+ case -NFS4ERR_RESOURCE:
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
++ case -NFS4ERR_RETURNCONFLICT:
+ return -EREMOTEIO;
+ case -NFS4ERR_WRONGSEC:
+ case -NFS4ERR_WRONG_CRED:
+@@ -558,6 +559,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
++ case -NFS4ERR_RETURNCONFLICT:
+ exception->delay = 1;
+ return 0;
+
+@@ -4001,6 +4003,23 @@ static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
+ }
+ }
+
++static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
++ struct nfs4_pathname *path2)
++{
++ int i;
++
++ if (path1->ncomponents != path2->ncomponents)
++ return false;
++ for (i = 0; i < path1->ncomponents; i++) {
++ if (path1->components[i].len != path2->components[i].len)
++ return false;
++ if (memcmp(path1->components[i].data, path2->components[i].data,
++ path1->components[i].len))
++ return false;
++ }
++ return true;
++}
++
+ static int _nfs4_discover_trunking(struct nfs_server *server,
+ struct nfs_fh *fhandle)
+ {
+@@ -4034,9 +4053,13 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
+ if (status)
+ goto out_free_3;
+
+- for (i = 0; i < locations->nlocations; i++)
++ for (i = 0; i < locations->nlocations; i++) {
++ if (!_is_same_nfs4_pathname(&locations->fs_path,
++ &locations->locations[i].rootpath))
++ continue;
+ test_fs_location_for_trunking(&locations->locations[i], clp,
+ server);
++ }
+ out_free_3:
+ kfree(locations->fattr);
+ out_free_2:
+@@ -5433,7 +5456,7 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
+ struct rpc_message *msg = &task->tk_msg;
+
+ if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
+- server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
++ task->tk_status == -ENOTSUPP) {
+ server->caps &= ~NFS_CAP_READ_PLUS;
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
+ rpc_restart_call_prepare(task);
+@@ -5622,7 +5645,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
+ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
+- nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
++ nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
+ }
+
+ static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+@@ -5663,7 +5686,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
+ data->res.server = server;
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
+- nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
++ nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
++ NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+ }
+
+ static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
+@@ -6244,6 +6268,7 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
+ if (status == 0)
+ nfs_setsecurity(inode, fattr);
+
++ nfs_free_fattr(fattr);
+ return status;
+ }
+ #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
+@@ -8791,7 +8816,7 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+ calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
+ #endif
+- if (test_bit(NFS_CS_DS, &clp->cl_flags))
++ if (test_bit(NFS_CS_PNFS, &clp->cl_flags))
+ calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
+ msg.rpc_argp = &calldata->args;
+ msg.rpc_resp = &calldata->res;
+@@ -8934,6 +8959,7 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+
+ sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
+
++try_again:
+ /* Test connection for session trunking. Async exchange_id call */
+ task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
+ if (IS_ERR(task))
+@@ -8946,11 +8972,15 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+
+ if (status == 0)
+ rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
+- else if (rpc_clnt_xprt_switch_has_addr(clnt,
++ else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
+ (struct sockaddr *)&xprt->addr))
+ rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
+
+ rpc_put_task(task);
++ if (status == -NFS4ERR_DELAY) {
++ ssleep(1);
++ goto try_again;
++ }
+ }
+ EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
+
+@@ -9656,6 +9686,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
+ status = -EBUSY;
+ break;
+ case -NFS4ERR_RECALLCONFLICT:
++ case -NFS4ERR_RETURNCONFLICT:
+ status = -ERECALLCONFLICT;
+ break;
+ case -NFS4ERR_DELEG_REVOKED:
+@@ -9814,13 +9845,16 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
+ fallthrough;
+ default:
+ task->tk_status = 0;
++ lrp->res.lrs_present = 0;
+ fallthrough;
+ case 0:
+ break;
+ case -NFS4ERR_DELAY:
+- if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
+- break;
+- goto out_restart;
++ if (nfs4_async_handle_error(task, server, NULL, NULL) ==
++ -EAGAIN)
++ goto out_restart;
++ lrp->res.lrs_present = 0;
++ break;
+ }
+ return;
+ out_restart:
+@@ -10578,29 +10612,33 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ {
+ ssize_t error, error2, error3;
++ size_t left = size;
+
+- error = generic_listxattr(dentry, list, size);
++ error = generic_listxattr(dentry, list, left);
+ if (error < 0)
+ return error;
+ if (list) {
+ list += error;
+- size -= error;
++ left -= error;
+ }
+
+- error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
++ error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left);
+ if (error2 < 0)
+ return error2;
+
+ if (list) {
+ list += error2;
+- size -= error2;
++ left -= error2;
+ }
+
+- error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
++ error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
+ if (error3 < 0)
+ return error3;
+
+- return error + error2 + error3;
++ error += error2 + error3;
++ if (size && error > size)
++ return -ERANGE;
++ return error;
+ }
+
+ static void nfs4_enable_swap(struct inode *inode)
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 9a5d911a7edc77..794bb4aa588d39 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1597,7 +1597,7 @@ static void nfs42_complete_copies(struct nfs4_state_owner *sp, struct nfs4_state
+ complete(&copy->completion);
+ }
+ }
+- list_for_each_entry(copy, &sp->so_server->ss_copies, src_copies) {
++ list_for_each_entry(copy, &sp->so_server->ss_src_copies, src_copies) {
+ if ((test_bit(NFS_CLNT_SRC_SSC_COPY_STATE, &state->flags) &&
+ !nfs4_stateid_match_other(&state->stateid,
+ &copy->parent_src_state->stateid)))
+@@ -1957,6 +1957,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
+ set_bit(ops->owner_flag_bit, &sp->so_flags);
+ nfs4_put_state_owner(sp);
+ status = nfs4_recovery_handle_error(clp, status);
++ nfs4_free_state_owners(&freeme);
+ return (status != 0) ? status : -EAGAIN;
+ }
+
+@@ -2117,6 +2118,7 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
+ {
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_fs_locations *locations = NULL;
++ struct nfs_fattr *fattr;
+ struct inode *inode;
+ struct page *page;
+ int status, result;
+@@ -2126,19 +2128,16 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
+ (unsigned long long)server->fsid.minor,
+ clp->cl_hostname);
+
+- result = 0;
+ page = alloc_page(GFP_KERNEL);
+ locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+- if (page == NULL || locations == NULL) {
+- dprintk("<-- %s: no memory\n", __func__);
+- goto out;
+- }
+- locations->fattr = nfs_alloc_fattr();
+- if (locations->fattr == NULL) {
++ fattr = nfs_alloc_fattr();
++ if (page == NULL || locations == NULL || fattr == NULL) {
+ dprintk("<-- %s: no memory\n", __func__);
++ result = 0;
+ goto out;
+ }
+
++ locations->fattr = fattr;
+ inode = d_inode(server->super->s_root);
+ result = nfs4_proc_get_locations(server, NFS_FH(inode), locations,
+ page, cred);
+diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
+index 7600100ba26f02..432612d2243742 100644
+--- a/fs/nfs/nfsroot.c
++++ b/fs/nfs/nfsroot.c
+@@ -175,10 +175,10 @@ static int __init root_nfs_cat(char *dest, const char *src,
+ size_t len = strlen(dest);
+
+ if (len && dest[len - 1] != ',')
+- if (strlcat(dest, ",", destlen) > destlen)
++ if (strlcat(dest, ",", destlen) >= destlen)
+ return -1;
+
+- if (strlcat(dest, src, destlen) > destlen)
++ if (strlcat(dest, src, destlen) >= destlen)
+ return -1;
+ return 0;
+ }
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 6efb5068c116e0..040b6b79c75e59 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -1545,6 +1545,11 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
+ continue;
+ } else if (index == prev->wb_index + 1)
+ continue;
++ /*
++ * We will submit more requests after these. Indicate
++ * this to the underlying layers.
++ */
++ desc->pg_moreio = 1;
+ nfs_pageio_complete(desc);
+ break;
+ }
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 84343aefbbd64c..3d1a9f8634a999 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1172,10 +1172,9 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
+ LIST_HEAD(freeme);
+
+ spin_lock(&inode->i_lock);
+- if (!pnfs_layout_is_valid(lo) ||
+- !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
++ if (!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
+ goto out_unlock;
+- if (stateid) {
++ if (stateid && pnfs_layout_is_valid(lo)) {
+ u32 seq = be32_to_cpu(arg_stateid->seqid);
+
+ pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
+@@ -1997,6 +1996,14 @@ pnfs_update_layout(struct inode *ino,
+ }
+
+ lookup_again:
++ if (!nfs4_valid_open_stateid(ctx->state)) {
++ trace_pnfs_update_layout(ino, pos, count,
++ iomode, lo, lseg,
++ PNFS_UPDATE_LAYOUT_INVALID_OPEN);
++ lseg = ERR_PTR(-EIO);
++ goto out;
++ }
++
+ lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
+ if (IS_ERR(lseg))
+ goto out;
+@@ -2729,7 +2736,8 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
+ if (pgio->pg_dreq == NULL)
+ rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
+ else
+- rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
++ rd_size = nfs_dreq_bytes_left(pgio->pg_dreq,
++ req_offset(req));
+
+ pgio->pg_lseg =
+ pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index afd23910f3bffc..88e061bd711b74 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -919,6 +919,8 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
+ dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
+
+ list_for_each_entry(da, &ds->ds_addrs, da_node) {
++ char servername[48];
++
+ dprintk("%s: DS %s: trying address %s\n",
+ __func__, ds->ds_remotestr, da->da_remotestr);
+
+@@ -929,6 +931,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
+ .dstaddr = (struct sockaddr *)&da->da_addr,
+ .addrlen = da->da_addrlen,
+ .servername = clp->cl_hostname,
++ .xprtsec = clp->cl_xprtsec,
+ };
+ struct nfs4_add_xprt_data xprtdata = {
+ .clp = clp,
+@@ -938,10 +941,45 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
+ .data = &xprtdata,
+ };
+
+- if (da->da_transport != clp->cl_proto)
++ if (da->da_transport != clp->cl_proto &&
++ clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
+ continue;
++ if (da->da_transport == XPRT_TRANSPORT_TCP &&
++ mds_srv->nfs_client->cl_proto ==
++ XPRT_TRANSPORT_TCP_TLS) {
++ struct sockaddr *addr =
++ (struct sockaddr *)&da->da_addr;
++ struct sockaddr_in *sin =
++ (struct sockaddr_in *)&da->da_addr;
++ struct sockaddr_in6 *sin6 =
++ (struct sockaddr_in6 *)&da->da_addr;
++
++ /* for NFS with TLS we need to supply a correct
++ * servername of the trunked transport, not the
++ * servername of the main transport stored in
++ * clp->cl_hostname. And set the protocol to
++ * indicate to use TLS
++ */
++ servername[0] = '\0';
++ switch(addr->sa_family) {
++ case AF_INET:
++ snprintf(servername, sizeof(servername),
++ "%pI4", &sin->sin_addr.s_addr);
++ break;
++ case AF_INET6:
++ snprintf(servername, sizeof(servername),
++ "%pI6", &sin6->sin6_addr);
++ break;
++ default:
++ /* do not consider this address */
++ continue;
++ }
++ xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
++ xprt_args.servername = servername;
++ }
+ if (da->da_addr.ss_family != clp->cl_addr.ss_family)
+ continue;
++
+ /**
+ * Test this address for session trunking and
+ * add as an alias
+@@ -953,6 +991,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
+ if (xprtdata.cred)
+ put_cred(xprtdata.cred);
+ } else {
++ if (da->da_transport == XPRT_TRANSPORT_TCP &&
++ mds_srv->nfs_client->cl_proto ==
++ XPRT_TRANSPORT_TCP_TLS)
++ da->da_transport = XPRT_TRANSPORT_TCP_TLS;
+ clp = nfs4_set_ds_client(mds_srv,
+ &da->da_addr,
+ da->da_addrlen,
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 7dc21a48e3e7b6..a142287d86f68e 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -305,6 +305,8 @@ int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
+ new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
+ if (IS_ERR(new)) {
+ error = PTR_ERR(new);
++ if (nfs_netfs_folio_unlock(folio))
++ folio_unlock(folio);
+ goto out;
+ }
+
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 0d6473cb00cb3e..f63513e477c50c 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -47,6 +47,7 @@
+ #include <linux/vfs.h>
+ #include <linux/inet.h>
+ #include <linux/in6.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <net/ipv6.h>
+ #include <linux/netdevice.h>
+@@ -223,6 +224,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
+ ret = fn(server, data);
+ if (ret)
+ goto out;
++ cond_resched();
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
+index 0e27a2e4e68b84..13818129d268fe 100644
+--- a/fs/nfs/symlink.c
++++ b/fs/nfs/symlink.c
+@@ -41,7 +41,7 @@ static int nfs_symlink_filler(struct file *file, struct folio *folio)
+ error:
+ folio_set_error(folio);
+ folio_unlock(folio);
+- return -EIO;
++ return error;
+ }
+
+ static const char *nfs_get_link(struct dentry *dentry,
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 9d82d50ce0b12d..7d03811f44a4bb 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -668,8 +668,10 @@ static int nfs_writepage_locked(struct folio *folio,
+ int err;
+
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+- NFS_SERVER(inode)->write_congested)
++ NFS_SERVER(inode)->write_congested) {
++ folio_redirty_for_writepage(wbc, folio);
+ return AOP_WRITEPAGE_ACTIVATE;
++ }
+
+ nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+ nfs_pageio_init_write(&pgio, inode, 0, false,
+@@ -1659,7 +1661,7 @@ static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
+ !atomic_read(&cinfo->rpcs_out));
+ }
+
+-static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
++void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
+ {
+ atomic_inc(&cinfo->rpcs_out);
+ }
+diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
+index fdf2aad7347090..e6beaaf4f1700b 100644
+--- a/fs/nfsd/auth.c
++++ b/fs/nfsd/auth.c
+@@ -26,8 +26,6 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
+ int i;
+ int flags = nfsexp_flags(rqstp, exp);
+
+- validate_process_creds();
+-
+ /* discard any old override before preparing the new set */
+ revert_creds(get_cred(current_real_cred()));
+ new = prepare_creds();
+@@ -81,10 +79,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
+ else
+ new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
+ new->cap_permitted);
+- validate_process_creds();
+ put_cred(override_creds(new));
+ put_cred(new);
+- validate_process_creds();
+ return 0;
+
+ oom:
+diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
+index 929248c6ca84c4..66a05fefae98ea 100644
+--- a/fs/nfsd/cache.h
++++ b/fs/nfsd/cache.h
+@@ -80,12 +80,10 @@ enum {
+
+ int nfsd_drc_slab_create(void);
+ void nfsd_drc_slab_free(void);
+-int nfsd_net_reply_cache_init(struct nfsd_net *nn);
+-void nfsd_net_reply_cache_destroy(struct nfsd_net *nn);
+ int nfsd_reply_cache_init(struct nfsd_net *);
+ void nfsd_reply_cache_shutdown(struct nfsd_net *);
+-int nfsd_cache_lookup(struct svc_rqst *rqstp,
+- struct nfsd_cacherep **cacherep);
++int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
++ unsigned int len, struct nfsd_cacherep **cacherep);
+ void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
+ int cachetype, __be32 *statp);
+ int nfsd_reply_cache_stats_show(struct seq_file *m, void *v);
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 11a0eaa2f91407..b7da17e530077e 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -339,12 +339,16 @@ static int export_stats_init(struct export_stats *stats)
+
+ static void export_stats_reset(struct export_stats *stats)
+ {
+- nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
++ if (stats)
++ nfsd_percpu_counters_reset(stats->counter,
++ EXP_STATS_COUNTERS_NUM);
+ }
+
+ static void export_stats_destroy(struct export_stats *stats)
+ {
+- nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
++ if (stats)
++ nfsd_percpu_counters_destroy(stats->counter,
++ EXP_STATS_COUNTERS_NUM);
+ }
+
+ static void svc_export_put(struct kref *ref)
+@@ -353,7 +357,8 @@ static void svc_export_put(struct kref *ref)
+ path_put(&exp->ex_path);
+ auth_domain_put(exp->ex_client);
+ nfsd4_fslocs_free(&exp->ex_fslocs);
+- export_stats_destroy(&exp->ex_stats);
++ export_stats_destroy(exp->ex_stats);
++ kfree(exp->ex_stats);
+ kfree(exp->ex_uuid);
+ kfree_rcu(exp, ex_rcu);
+ }
+@@ -767,13 +772,15 @@ static int svc_export_show(struct seq_file *m,
+ seq_putc(m, '\t');
+ seq_escape(m, exp->ex_client->name, " \t\n\\");
+ if (export_stats) {
+- seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
++ struct percpu_counter *counter = exp->ex_stats->counter;
++
++ seq_printf(m, "\t%lld\n", exp->ex_stats->start_time);
+ seq_printf(m, "\tfh_stale: %lld\n",
+- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
++ percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE]));
+ seq_printf(m, "\tio_read: %lld\n",
+- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
++ percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ]));
+ seq_printf(m, "\tio_write: %lld\n",
+- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
++ percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE]));
+ seq_putc(m, '\n');
+ return 0;
+ }
+@@ -819,7 +826,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
+ new->ex_layout_types = 0;
+ new->ex_uuid = NULL;
+ new->cd = item->cd;
+- export_stats_reset(&new->ex_stats);
++ export_stats_reset(new->ex_stats);
+ }
+
+ static void export_update(struct cache_head *cnew, struct cache_head *citem)
+@@ -856,7 +863,14 @@ static struct cache_head *svc_export_alloc(void)
+ if (!i)
+ return NULL;
+
+- if (export_stats_init(&i->ex_stats)) {
++ i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL);
++ if (!i->ex_stats) {
++ kfree(i);
++ return NULL;
++ }
++
++ if (export_stats_init(i->ex_stats)) {
++ kfree(i->ex_stats);
+ kfree(i);
+ return NULL;
+ }
+diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
+index 2df8ae25aad302..ca9dc230ae3d0b 100644
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -64,10 +64,10 @@ struct svc_export {
+ struct cache_head h;
+ struct auth_domain * ex_client;
+ int ex_flags;
++ int ex_fsid;
+ struct path ex_path;
+ kuid_t ex_anon_uid;
+ kgid_t ex_anon_gid;
+- int ex_fsid;
+ unsigned char * ex_uuid; /* 16 byte fsid */
+ struct nfsd4_fs_locations ex_fslocs;
+ uint32_t ex_nflavors;
+@@ -76,8 +76,8 @@ struct svc_export {
+ struct nfsd4_deviceid_map *ex_devid_map;
+ struct cache_detail *cd;
+ struct rcu_head ex_rcu;
+- struct export_stats ex_stats;
+ unsigned long ex_xprtsec_modes;
++ struct export_stats *ex_stats;
+ };
+
+ /* an "export key" (expkey) maps a filehandlefragement to an
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index ee9c923192e08a..6f2bcbfde45e69 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -718,7 +718,7 @@ nfsd_file_cache_init(void)
+
+ ret = rhltable_init(&nfsd_file_rhltable, &nfsd_file_rhash_params);
+ if (ret)
+- return ret;
++ goto out;
+
+ ret = -ENOMEM;
+ nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
+@@ -770,6 +770,8 @@ nfsd_file_cache_init(void)
+
+ INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
+ out:
++ if (ret)
++ clear_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags);
+ return ret;
+ out_notifier:
+ lease_unregister_notifier(&nfsd_file_lease_notifier);
+@@ -989,22 +991,21 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_file *new, *nf;
+- const struct cred *cred;
++ bool stale_retry = true;
+ bool open_retry = true;
+ struct inode *inode;
+ __be32 status;
+ int ret;
+
++retry:
+ status = fh_verify(rqstp, fhp, S_IFREG,
+ may_flags|NFSD_MAY_OWNER_OVERRIDE);
+ if (status != nfs_ok)
+ return status;
+ inode = d_inode(fhp->fh_dentry);
+- cred = get_current_cred();
+
+-retry:
+ rcu_read_lock();
+- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
++ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
+ rcu_read_unlock();
+
+ if (nf) {
+@@ -1026,7 +1027,7 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+
+ rcu_read_lock();
+ spin_lock(&inode->i_lock);
+- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
++ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
+ if (unlikely(nf)) {
+ spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
+@@ -1041,8 +1042,6 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ if (likely(ret == 0))
+ goto open_file;
+
+- if (ret == -EEXIST)
+- goto retry;
+ trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret);
+ status = nfserr_jukebox;
+ goto construction_err;
+@@ -1057,7 +1056,9 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ status = nfserr_jukebox;
+ goto construction_err;
+ }
++ nfsd_file_put(nf);
+ open_retry = false;
++ fh_put(fhp);
+ goto retry;
+ }
+ this_cpu_inc(nfsd_file_cache_hits);
+@@ -1074,7 +1075,6 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ nfsd_file_check_write_error(nf);
+ *pnf = nf;
+ }
+- put_cred(cred);
+ trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
+ return status;
+
+@@ -1088,8 +1088,20 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ status = nfs_ok;
+ trace_nfsd_file_opened(nf, status);
+ } else {
+- status = nfsd_open_verified(rqstp, fhp, may_flags,
+- &nf->nf_file);
++ ret = nfsd_open_verified(rqstp, fhp, may_flags,
++ &nf->nf_file);
++ if (ret == -EOPENSTALE && stale_retry) {
++ stale_retry = false;
++ nfsd_file_unhash(nf);
++ clear_and_wake_up_bit(NFSD_FILE_PENDING,
++ &nf->nf_flags);
++ if (refcount_dec_and_test(&nf->nf_ref))
++ nfsd_file_free(nf);
++ nf = NULL;
++ fh_put(fhp);
++ goto retry;
++ }
++ status = nfserrno(ret);
+ trace_nfsd_file_open(nf, status);
+ }
+ } else
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index ec49b200b79762..9bfca3dda63d33 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -11,8 +11,10 @@
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <linux/filelock.h>
++#include <linux/nfs4.h>
+ #include <linux/percpu_counter.h>
+ #include <linux/siphash.h>
++#include <linux/sunrpc/stats.h>
+
+ /* Hash tables for nfs4_clientid state */
+ #define CLIENT_HASH_BITS 4
+@@ -26,10 +28,22 @@ struct nfsd4_client_tracking_ops;
+
+ enum {
+ /* cache misses due only to checksum comparison failures */
+- NFSD_NET_PAYLOAD_MISSES,
++ NFSD_STATS_PAYLOAD_MISSES,
+ /* amount of memory (in bytes) currently consumed by the DRC */
+- NFSD_NET_DRC_MEM_USAGE,
+- NFSD_NET_COUNTERS_NUM
++ NFSD_STATS_DRC_MEM_USAGE,
++ NFSD_STATS_RC_HITS, /* repcache hits */
++ NFSD_STATS_RC_MISSES, /* repcache misses */
++ NFSD_STATS_RC_NOCACHE, /* uncached reqs */
++ NFSD_STATS_FH_STALE, /* FH stale error */
++ NFSD_STATS_IO_READ, /* bytes returned to read requests */
++ NFSD_STATS_IO_WRITE, /* bytes passed in write requests */
++#ifdef CONFIG_NFSD_V4
++ NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
++ NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
++#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
++ NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */
++#endif
++ NFSD_STATS_COUNTERS_NUM
+ };
+
+ /*
+@@ -169,7 +183,10 @@ struct nfsd_net {
+ atomic_t num_drc_entries;
+
+ /* Per-netns stats counters */
+- struct percpu_counter counter[NFSD_NET_COUNTERS_NUM];
++ struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
++
++ /* sunrpc svc stats */
++ struct svc_stat nfsd_svcstats;
+
+ /* longest hash chain seen */
+ unsigned int longest_chain;
+diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
+index 7a806ac13e317e..8cca1329f3485c 100644
+--- a/fs/nfsd/nfs4idmap.c
++++ b/fs/nfsd/nfs4idmap.c
+@@ -581,6 +581,7 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
+ .id = id,
+ .type = type,
+ };
++ __be32 status = nfs_ok;
+ __be32 *p;
+ int ret;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+@@ -593,12 +594,16 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr,
+ return nfserrno(ret);
+ ret = strlen(item->name);
+ WARN_ON_ONCE(ret > IDMAP_NAMESZ);
++
+ p = xdr_reserve_space(xdr, ret + 4);
+- if (!p)
+- return nfserr_resource;
+- p = xdr_encode_opaque(p, item->name, ret);
++ if (unlikely(!p)) {
++ status = nfserr_resource;
++ goto out_put;
++ }
++ xdr_encode_opaque(p, item->name, ret);
++out_put:
+ cache_put(&item->h, nn->idtoname_cache);
+- return 0;
++ return status;
+ }
+
+ static bool
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 4199ede0583c7d..ae0057c54ef4ed 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2218,7 +2218,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
+ const struct nfsd4_layout_ops *ops;
+ struct nfs4_layout_stateid *ls;
+ __be32 nfserr;
+- int accmode = NFSD_MAY_READ_IF_EXEC;
++ int accmode = NFSD_MAY_READ_IF_EXEC | NFSD_MAY_OWNER_OVERRIDE;
+
+ switch (lgp->lg_seg.iomode) {
+ case IOMODE_READ:
+@@ -2308,7 +2308,8 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
+ struct nfs4_layout_stateid *ls;
+ __be32 nfserr;
+
+- nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE);
++ nfserr = fh_verify(rqstp, current_fh, 0,
++ NFSD_MAY_WRITE | NFSD_MAY_OWNER_OVERRIDE);
+ if (nfserr)
+ goto out;
+
+@@ -2477,10 +2478,10 @@ nfsd4_proc_null(struct svc_rqst *rqstp)
+ return rpc_success;
+ }
+
+-static inline void nfsd4_increment_op_stats(u32 opnum)
++static inline void nfsd4_increment_op_stats(struct nfsd_net *nn, u32 opnum)
+ {
+ if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_NFS4_OP(opnum)]);
+ }
+
+ static const struct nfsd4_operation nfsd4_ops[];
+@@ -2755,7 +2756,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
+ status, nfsd4_op_name(op->opnum));
+
+ nfsd4_cstate_clear_replay(cstate);
+- nfsd4_increment_op_stats(op->opnum);
++ nfsd4_increment_op_stats(nn, op->opnum);
+ }
+
+ fh_put(current_fh);
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 3509e73abe1f4b..4395577825a7fa 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -806,6 +806,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
+ ci = &cmsg->cm_u.cm_clntinfo;
+ if (get_user(namelen, &ci->cc_name.cn_len))
+ return -EFAULT;
++ if (!namelen) {
++ dprintk("%s: namelen should not be zero", __func__);
++ return -EINVAL;
++ }
+ name.data = memdup_user(&ci->cc_name.cn_id, namelen);
+ if (IS_ERR(name.data))
+ return PTR_ERR(name.data);
+@@ -828,6 +832,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg,
+ cnm = &cmsg->cm_u.cm_name;
+ if (get_user(namelen, &cnm->cn_len))
+ return -EFAULT;
++ if (!namelen) {
++ dprintk("%s: namelen should not be zero", __func__);
++ return -EINVAL;
++ }
+ name.data = memdup_user(&cnm->cn_id, namelen);
+ if (IS_ERR(name.data))
+ return PTR_ERR(name.data);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 8534693eb6a497..f16bbbfcf672c8 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1087,7 +1087,8 @@ static void nfs4_free_deleg(struct nfs4_stid *stid)
+ * When a delegation is recalled, the filehandle is stored in the "new"
+ * filter.
+ * Every 30 seconds we swap the filters and clear the "new" one,
+- * unless both are empty of course.
++ * unless both are empty of course. This results in delegations for a
++ * given filehandle being blocked for between 30 and 60 seconds.
+ *
+ * Each filter is 256 bits. We hash the filehandle to 32bit and use the
+ * low 3 bytes as hash-table indices.
+@@ -1116,9 +1117,9 @@ static int delegation_blocked(struct knfsd_fh *fh)
+ if (ktime_get_seconds() - bd->swap_time > 30) {
+ bd->entries -= bd->old_entries;
+ bd->old_entries = bd->entries;
++ bd->new = 1-bd->new;
+ memset(bd->set[bd->new], 0,
+ sizeof(bd->set[0]));
+- bd->new = 1-bd->new;
+ bd->swap_time = ktime_get_seconds();
+ }
+ spin_unlock(&blocked_delegations_lock);
+@@ -2797,7 +2798,7 @@ static int client_opens_release(struct inode *inode, struct file *file)
+
+ /* XXX: alternatively, we could get/drop in seq start/stop */
+ drop_client(clp);
+- return 0;
++ return seq_release(inode, file);
+ }
+
+ static const struct file_operations client_states_fops = {
+@@ -2886,12 +2887,9 @@ static void
+ nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
+ {
+ struct nfs4_client *clp = cb->cb_clp;
+- struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+- spin_lock(&nn->client_lock);
+ clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
+- put_client_renew_locked(clp);
+- spin_unlock(&nn->client_lock);
++ drop_client(clp);
+ }
+
+ static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
+@@ -4944,10 +4942,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
+ */
+ fl->fl_break_time = 0;
+
+- spin_lock(&fp->fi_lock);
+ fp->fi_had_conflict = true;
+ nfsd_break_one_deleg(dp);
+- spin_unlock(&fp->fi_lock);
+ return false;
+ }
+
+@@ -5556,12 +5552,13 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ if (status)
+ goto out_unlock;
+
++ status = -EAGAIN;
++ if (fp->fi_had_conflict)
++ goto out_unlock;
++
+ spin_lock(&state_lock);
+ spin_lock(&fp->fi_lock);
+- if (fp->fi_had_conflict)
+- status = -EAGAIN;
+- else
+- status = hash_delegation_locked(dp, fp);
++ status = hash_delegation_locked(dp, fp);
+ spin_unlock(&fp->fi_lock);
+ spin_unlock(&state_lock);
+
+@@ -6274,7 +6271,7 @@ deleg_reaper(struct nfsd_net *nn)
+ list_add(&clp->cl_ra_cblist, &cblist);
+
+ /* release in nfsd4_cb_recall_any_release */
+- atomic_inc(&clp->cl_rpc_users);
++ kref_get(&clp->cl_nfsdfs.cl_ref);
+ set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
+ clp->cl_ra_time = ktime_get_boottime_seconds();
+ }
+@@ -7890,14 +7887,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ {
+ struct file_lock *fl;
+ int status = false;
+- struct nfsd_file *nf = find_any_file(fp);
++ struct nfsd_file *nf;
+ struct inode *inode;
+ struct file_lock_context *flctx;
+
++ spin_lock(&fp->fi_lock);
++ nf = find_any_file_locked(fp);
+ if (!nf) {
+ /* Any valid lock stateid should have some sort of access */
+ WARN_ON_ONCE(1);
+- return status;
++ goto out;
+ }
+
+ inode = file_inode(nf->nf_file);
+@@ -7913,7 +7912,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ }
+ spin_unlock(&flctx->flc_lock);
+ }
+- nfsd_file_put(nf);
++out:
++ spin_unlock(&fp->fi_lock);
+ return status;
+ }
+
+@@ -7923,10 +7923,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+ * @cstate: NFSv4 COMPOUND state
+ * @u: RELEASE_LOCKOWNER arguments
+ *
+- * The lockowner's so_count is bumped when a lock record is added
+- * or when copying a conflicting lock. The latter case is brief,
+- * but can lead to fleeting false positives when looking for
+- * locks-in-use.
++ * Check if theree are any locks still held and if not - free the lockowner
++ * and any lock state that is owned.
+ *
+ * Return values:
+ * %nfs_ok: lockowner released or not found
+@@ -7962,10 +7960,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+ spin_unlock(&clp->cl_lock);
+ return nfs_ok;
+ }
+- if (atomic_read(&lo->lo_owner.so_count) != 2) {
+- spin_unlock(&clp->cl_lock);
+- nfs4_put_stateowner(&lo->lo_owner);
+- return nfserr_locks_held;
++
++ list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
++ if (check_for_locks(stp->st_stid.sc_file, lo)) {
++ spin_unlock(&clp->cl_lock);
++ nfs4_put_stateowner(&lo->lo_owner);
++ return nfserr_locks_held;
++ }
+ }
+ unhash_lockowner_locked(lo);
+ while (!list_empty(&lo->lo_owner.so_stateids)) {
+@@ -8422,6 +8423,7 @@ __be32
+ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
+ {
+ __be32 status;
++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct file_lock_context *ctx;
+ struct file_lock *fl;
+ struct nfs4_delegation *dp;
+@@ -8451,7 +8453,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
+ }
+ break_lease:
+ spin_unlock(&ctx->flc_lock);
+- nfsd_stats_wdeleg_getattr_inc();
++ nfsd_stats_wdeleg_getattr_inc(nn);
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 92c7dde148a4d8..76dfbb99277f05 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1245,14 +1245,6 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ return nfs_ok;
+ }
+
+-static __be32
+-nfsd4_decode_putpubfh(struct nfsd4_compoundargs *argp, union nfsd4_op_u *p)
+-{
+- if (argp->minorversion == 0)
+- return nfs_ok;
+- return nfserr_notsupp;
+-}
+-
+ static __be32
+ nfsd4_decode_read(struct nfsd4_compoundargs *argp, union nfsd4_op_u *u)
+ {
+@@ -2345,7 +2337,7 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
+ [OP_OPEN_CONFIRM] = nfsd4_decode_open_confirm,
+ [OP_OPEN_DOWNGRADE] = nfsd4_decode_open_downgrade,
+ [OP_PUTFH] = nfsd4_decode_putfh,
+- [OP_PUTPUBFH] = nfsd4_decode_putpubfh,
++ [OP_PUTPUBFH] = nfsd4_decode_noop,
+ [OP_PUTROOTFH] = nfsd4_decode_noop,
+ [OP_READ] = nfsd4_decode_read,
+ [OP_READDIR] = nfsd4_decode_readdir,
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index 80621a70951073..c52132ecb339d5 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -176,27 +176,6 @@ void nfsd_drc_slab_free(void)
+ kmem_cache_destroy(drc_slab);
+ }
+
+-/**
+- * nfsd_net_reply_cache_init - per net namespace reply cache set-up
+- * @nn: nfsd_net being initialized
+- *
+- * Returns zero on succes; otherwise a negative errno is returned.
+- */
+-int nfsd_net_reply_cache_init(struct nfsd_net *nn)
+-{
+- return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
+-}
+-
+-/**
+- * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down
+- * @nn: nfsd_net being freed
+- *
+- */
+-void nfsd_net_reply_cache_destroy(struct nfsd_net *nn)
+-{
+- nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
+-}
+-
+ int nfsd_reply_cache_init(struct nfsd_net *nn)
+ {
+ unsigned int hashsize;
+@@ -368,33 +347,52 @@ nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+ return freed;
+ }
+
+-/*
+- * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
++/**
++ * nfsd_cache_csum - Checksum incoming NFS Call arguments
++ * @buf: buffer containing a whole RPC Call message
++ * @start: starting byte of the NFS Call header
++ * @remaining: size of the NFS Call header, in bytes
++ *
++ * Compute a weak checksum of the leading bytes of an NFS procedure
++ * call header to help verify that a retransmitted Call matches an
++ * entry in the duplicate reply cache.
++ *
++ * To avoid assumptions about how the RPC message is laid out in
++ * @buf and what else it might contain (eg, a GSS MIC suffix), the
++ * caller passes us the exact location and length of the NFS Call
++ * header.
++ *
++ * Returns a 32-bit checksum value, as defined in RFC 793.
+ */
+-static __wsum
+-nfsd_cache_csum(struct svc_rqst *rqstp)
++static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
++ unsigned int remaining)
+ {
++ unsigned int base, len;
++ struct xdr_buf subbuf;
++ __wsum csum = 0;
++ void *p;
+ int idx;
+- unsigned int base;
+- __wsum csum;
+- struct xdr_buf *buf = &rqstp->rq_arg;
+- const unsigned char *p = buf->head[0].iov_base;
+- size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
+- RC_CSUMLEN);
+- size_t len = min(buf->head[0].iov_len, csum_len);
++
++ if (remaining > RC_CSUMLEN)
++ remaining = RC_CSUMLEN;
++ if (xdr_buf_subsegment(buf, &subbuf, start, remaining))
++ return csum;
+
+ /* rq_arg.head first */
+- csum = csum_partial(p, len, 0);
+- csum_len -= len;
++ if (subbuf.head[0].iov_len) {
++ len = min_t(unsigned int, subbuf.head[0].iov_len, remaining);
++ csum = csum_partial(subbuf.head[0].iov_base, len, csum);
++ remaining -= len;
++ }
+
+ /* Continue into page array */
+- idx = buf->page_base / PAGE_SIZE;
+- base = buf->page_base & ~PAGE_MASK;
+- while (csum_len) {
+- p = page_address(buf->pages[idx]) + base;
+- len = min_t(size_t, PAGE_SIZE - base, csum_len);
++ idx = subbuf.page_base / PAGE_SIZE;
++ base = subbuf.page_base & ~PAGE_MASK;
++ while (remaining) {
++ p = page_address(subbuf.pages[idx]) + base;
++ len = min_t(unsigned int, PAGE_SIZE - base, remaining);
+ csum = csum_partial(p, len, csum);
+- csum_len -= len;
++ remaining -= len;
+ base = 0;
+ ++idx;
+ }
+@@ -465,6 +463,8 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
+ /**
+ * nfsd_cache_lookup - Find an entry in the duplicate reply cache
+ * @rqstp: Incoming Call to find
++ * @start: starting byte in @rqstp->rq_arg of the NFS Call header
++ * @len: size of the NFS Call header, in bytes
+ * @cacherep: OUT: DRC entry for this request
+ *
+ * Try to find an entry matching the current call in the cache. When none
+@@ -478,9 +478,10 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
+ * %RC_REPLY: Reply from cache
+ * %RC_DROPIT: Do not process the request further
+ */
+-int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
++int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
++ unsigned int len, struct nfsd_cacherep **cacherep)
+ {
+- struct nfsd_net *nn;
++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd_cacherep *rp, *found;
+ __wsum csum;
+ struct nfsd_drc_bucket *b;
+@@ -490,17 +491,16 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
+ int rtn = RC_DOIT;
+
+ if (type == RC_NOCACHE) {
+- nfsd_stats_rc_nocache_inc();
++ nfsd_stats_rc_nocache_inc(nn);
+ goto out;
+ }
+
+- csum = nfsd_cache_csum(rqstp);
++ csum = nfsd_cache_csum(&rqstp->rq_arg, start, len);
+
+ /*
+ * Since the common case is a cache miss followed by an insert,
+ * preallocate an entry.
+ */
+- nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ rp = nfsd_cacherep_alloc(rqstp, csum, nn);
+ if (!rp)
+ goto out;
+@@ -518,7 +518,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
+ freed = nfsd_cacherep_dispose(&dispose);
+ trace_nfsd_drc_gc(nn, freed);
+
+- nfsd_stats_rc_misses_inc();
++ nfsd_stats_rc_misses_inc(nn);
+ atomic_inc(&nn->num_drc_entries);
+ nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
+ goto out;
+@@ -526,7 +526,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
+ found_entry:
+ /* We found a matching entry which is either in progress or done. */
+ nfsd_reply_cache_free_locked(NULL, rp, nn);
+- nfsd_stats_rc_hits_inc();
++ nfsd_stats_rc_hits_inc(nn);
+ rtn = RC_DROPIT;
+ rp = found;
+
+@@ -640,24 +640,17 @@ void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
+ return;
+ }
+
+-/*
+- * Copy cached reply to current reply buffer. Should always fit.
+- * FIXME as reply is in a page, we should just attach the page, and
+- * keep a refcount....
+- */
+ static int
+ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
+ {
+- struct kvec *vec = &rqstp->rq_res.head[0];
+-
+- if (vec->iov_len + data->iov_len > PAGE_SIZE) {
+- printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
+- data->iov_len);
+- return 0;
+- }
+- memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
+- vec->iov_len += data->iov_len;
+- return 1;
++ __be32 *p;
++
++ p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len);
++ if (unlikely(!p))
++ return false;
++ memcpy(p, data->iov_base, data->iov_len);
++ xdr_commit_encode(&rqstp->rq_res_stream);
++ return true;
+ }
+
+ /*
+@@ -675,15 +668,15 @@ int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
+ atomic_read(&nn->num_drc_entries));
+ seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
+ seq_printf(m, "mem usage: %lld\n",
+- percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE]));
+ seq_printf(m, "cache hits: %lld\n",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]));
+ seq_printf(m, "cache misses: %lld\n",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]));
+ seq_printf(m, "not cached: %lld\n",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]));
+ seq_printf(m, "payload misses: %lld\n",
+- percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]));
+ seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
+ seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
+ return 0;
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 7ed02fb88a362c..887035b7446763 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -692,6 +692,7 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
+ char *mesg = buf;
+ int fd, err;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++ struct svc_serv *serv;
+
+ err = get_int(&mesg, &fd);
+ if (err != 0 || fd < 0)
+@@ -702,13 +703,15 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
+ if (err != 0)
+ return err;
+
+- err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
++ serv = nn->nfsd_serv;
++ err = svc_addsock(serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+
+- if (err >= 0 &&
+- !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+- svc_get(nn->nfsd_serv);
++ if (err < 0 && !serv->sv_nrthreads && !nn->keep_active)
++ nfsd_last_thread(net);
++ else if (err >= 0 && !serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
++ svc_get(serv);
+
+- nfsd_put(net);
++ svc_put(serv);
+ return err;
+ }
+
+@@ -722,6 +725,7 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
+ struct svc_xprt *xprt;
+ int port, err;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++ struct svc_serv *serv;
+
+ if (sscanf(buf, "%15s %5u", transport, &port) != 2)
+ return -EINVAL;
+@@ -734,29 +738,33 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
+ if (err != 0)
+ return err;
+
+- err = svc_xprt_create(nn->nfsd_serv, transport, net,
++ serv = nn->nfsd_serv;
++ err = svc_xprt_create(serv, transport, net,
+ PF_INET, port, SVC_SOCK_ANONYMOUS, cred);
+ if (err < 0)
+ goto out_err;
+
+- err = svc_xprt_create(nn->nfsd_serv, transport, net,
++ err = svc_xprt_create(serv, transport, net,
+ PF_INET6, port, SVC_SOCK_ANONYMOUS, cred);
+ if (err < 0 && err != -EAFNOSUPPORT)
+ goto out_close;
+
+- if (!nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+- svc_get(nn->nfsd_serv);
++ if (!serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
++ svc_get(serv);
+
+- nfsd_put(net);
++ svc_put(serv);
+ return 0;
+ out_close:
+- xprt = svc_find_xprt(nn->nfsd_serv, transport, net, PF_INET, port);
++ xprt = svc_find_xprt(serv, transport, net, PF_INET, port);
+ if (xprt != NULL) {
+ svc_xprt_close(xprt);
+ svc_xprt_put(xprt);
+ }
+ out_err:
+- nfsd_put(net);
++ if (!serv->sv_nrthreads && !nn->keep_active)
++ nfsd_last_thread(net);
++
++ svc_put(serv);
+ return err;
+ }
+
+@@ -1516,14 +1524,17 @@ static __net_init int nfsd_net_init(struct net *net)
+ retval = nfsd_idmap_init(net);
+ if (retval)
+ goto out_idmap_error;
+- retval = nfsd_net_reply_cache_init(nn);
++ retval = nfsd_stat_counters_init(nn);
+ if (retval)
+ goto out_repcache_error;
++ memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
++ nn->nfsd_svcstats.program = &nfsd_program;
+ nn->nfsd_versions = NULL;
+ nn->nfsd4_minorversions = NULL;
+ nfsd4_init_leases_net(nn);
+ get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ seqlock_init(&nn->writeverf_lock);
++ nfsd_proc_stat_init(net);
+
+ return 0;
+
+@@ -1544,7 +1555,8 @@ static __net_exit void nfsd_net_exit(struct net *net)
+ {
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+- nfsd_net_reply_cache_destroy(nn);
++ nfsd_proc_stat_shutdown(net);
++ nfsd_stat_counters_destroy(nn);
+ nfsd_idmap_shutdown(net);
+ nfsd_export_shutdown(net);
+ nfsd_netns_free_versions(nn);
+@@ -1567,12 +1579,9 @@ static int __init init_nfsd(void)
+ retval = nfsd4_init_pnfs();
+ if (retval)
+ goto out_free_slabs;
+- retval = nfsd_stat_init(); /* Statistics */
+- if (retval)
+- goto out_free_pnfs;
+ retval = nfsd_drc_slab_create();
+ if (retval)
+- goto out_free_stat;
++ goto out_free_pnfs;
+ nfsd_lockd_init(); /* lockd->nfsd callbacks */
+ retval = create_proc_exports_entry();
+ if (retval)
+@@ -1602,8 +1611,6 @@ static int __init init_nfsd(void)
+ out_free_lockd:
+ nfsd_lockd_shutdown();
+ nfsd_drc_slab_free();
+-out_free_stat:
+- nfsd_stat_shutdown();
+ out_free_pnfs:
+ nfsd4_exit_pnfs();
+ out_free_slabs:
+@@ -1620,7 +1627,6 @@ static void __exit exit_nfsd(void)
+ nfsd_drc_slab_free();
+ remove_proc_entry("fs/nfs/exports", NULL);
+ remove_proc_entry("fs/nfs", NULL);
+- nfsd_stat_shutdown();
+ nfsd_lockd_shutdown();
+ nfsd4_free_slabs();
+ nfsd4_exit_pnfs();
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 11c14faa6c67be..d05bd2b811f377 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -69,6 +69,7 @@ extern struct mutex nfsd_mutex;
+ extern spinlock_t nfsd_drc_lock;
+ extern unsigned long nfsd_drc_max_mem;
+ extern unsigned long nfsd_drc_mem_used;
++extern atomic_t nfsd_th_cnt; /* number of available threads */
+
+ extern const struct seq_operations nfs_exports_op;
+
+@@ -96,13 +97,6 @@ int nfsd_pool_stats_open(struct inode *, struct file *);
+ int nfsd_pool_stats_release(struct inode *, struct file *);
+ void nfsd_shutdown_threads(struct net *net);
+
+-static inline void nfsd_put(struct net *net)
+-{
+- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-
+- svc_put(nn->nfsd_serv);
+-}
+-
+ bool i_am_nfsd(void);
+
+ struct nfsdfs_client {
+@@ -138,6 +132,7 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change);
+ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change);
+ void nfsd_reset_versions(struct nfsd_net *nn);
+ int nfsd_create_serv(struct net *net);
++void nfsd_last_thread(struct net *net);
+
+ extern int nfsd_max_blksize;
+
+diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
+index 355bf0db3235b1..c2495d98c18928 100644
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -327,6 +327,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
+ __be32
+ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+ {
++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct svc_export *exp = NULL;
+ struct dentry *dentry;
+ __be32 error;
+@@ -395,7 +396,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+ out:
+ trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error);
+ if (error == nfserr_stale)
+- nfsd_stats_fh_stale_inc(exp);
++ nfsd_stats_fh_stale_inc(nn, exp);
+ return error;
+ }
+
+@@ -572,7 +573,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
+ _fh_update(fhp, exp, dentry);
+ if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
+ fh_put(fhp);
+- return nfserr_opnotsupp;
++ return nfserr_stale;
+ }
+
+ return 0;
+@@ -598,7 +599,7 @@ fh_update(struct svc_fh *fhp)
+
+ _fh_update(fhp, fhp->fh_export, dentry);
+ if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
+- return nfserr_opnotsupp;
++ return nfserr_stale;
+ return 0;
+ out_bad:
+ printk(KERN_ERR "fh_update: fh not verified!\n");
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index c7af1095f6b549..710a54c7dffc54 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -34,6 +34,7 @@
+
+ #define NFSDDBG_FACILITY NFSDDBG_SVC
+
++atomic_t nfsd_th_cnt = ATOMIC_INIT(0);
+ extern struct svc_program nfsd_program;
+ static int nfsd(void *vrqstp);
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+@@ -89,7 +90,6 @@ unsigned long nfsd_drc_max_mem;
+ unsigned long nfsd_drc_mem_used;
+
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+-static struct svc_stat nfsd_acl_svcstats;
+ static const struct svc_version *nfsd_acl_version[] = {
+ # if defined(CONFIG_NFSD_V2_ACL)
+ [2] = &nfsd_acl_version2,
+@@ -108,15 +108,11 @@ static struct svc_program nfsd_acl_program = {
+ .pg_vers = nfsd_acl_version,
+ .pg_name = "nfsacl",
+ .pg_class = "nfsd",
+- .pg_stats = &nfsd_acl_svcstats,
+ .pg_authenticate = &svc_set_client,
+ .pg_init_request = nfsd_acl_init_request,
+ .pg_rpcbind_set = nfsd_acl_rpcbind_set,
+ };
+
+-static struct svc_stat nfsd_acl_svcstats = {
+- .program = &nfsd_acl_program,
+-};
+ #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+
+ static const struct svc_version *nfsd_version[] = {
+@@ -141,7 +137,6 @@ struct svc_program nfsd_program = {
+ .pg_vers = nfsd_version, /* version table */
+ .pg_name = "nfsd", /* program name */
+ .pg_class = "nfsd", /* authentication class */
+- .pg_stats = &nfsd_svcstats, /* version table */
+ .pg_authenticate = &svc_set_client, /* export authentication */
+ .pg_init_request = nfsd_init_request,
+ .pg_rpcbind_set = nfsd_rpcbind_set,
+@@ -542,7 +537,7 @@ static struct notifier_block nfsd_inet6addr_notifier = {
+ /* Only used under nfsd_mutex, so this atomic may be overkill: */
+ static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
+
+-static void nfsd_last_thread(struct net *net)
++void nfsd_last_thread(struct net *net)
+ {
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv = nn->nfsd_serv;
+@@ -572,7 +567,6 @@ static void nfsd_last_thread(struct net *net)
+ return;
+
+ nfsd_shutdown_net(net);
+- pr_info("nfsd: last server has exited, flushing export cache\n");
+ nfsd_export_flush(net);
+ }
+
+@@ -675,7 +669,8 @@ int nfsd_create_serv(struct net *net)
+ if (nfsd_max_blksize == 0)
+ nfsd_max_blksize = nfsd_get_default_max_blksize();
+ nfsd_reset_versions(nn);
+- serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd);
++ serv = svc_create_pooled(&nfsd_program, &nn->nfsd_svcstats,
++ nfsd_max_blksize, nfsd);
+ if (serv == NULL)
+ return -ENOMEM;
+
+@@ -787,7 +782,6 @@ int
+ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ {
+ int error;
+- bool nfsd_up_before;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
+
+@@ -807,8 +801,6 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ error = nfsd_create_serv(net);
+ if (error)
+ goto out;
+-
+- nfsd_up_before = nn->nfsd_net_up;
+ serv = nn->nfsd_serv;
+
+ error = nfsd_startup_net(net, cred);
+@@ -816,17 +808,15 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ goto out_put;
+ error = svc_set_num_threads(serv, NULL, nrservs);
+ if (error)
+- goto out_shutdown;
++ goto out_put;
+ error = serv->sv_nrthreads;
+- if (error == 0)
+- nfsd_last_thread(net);
+-out_shutdown:
+- if (error < 0 && !nfsd_up_before)
+- nfsd_shutdown_net(net);
+ out_put:
+ /* Threads now hold service active */
+ if (xchg(&nn->keep_active, 0))
+ svc_put(serv);
++
++ if (serv->sv_nrthreads == 0)
++ nfsd_last_thread(net);
+ svc_put(serv);
+ out:
+ mutex_unlock(&nfsd_mutex);
+@@ -950,7 +940,7 @@ nfsd(void *vrqstp)
+
+ current->fs->umask = 0;
+
+- atomic_inc(&nfsdstats.th_cnt);
++ atomic_inc(&nfsd_th_cnt);
+
+ set_freezable();
+
+@@ -962,10 +952,9 @@ nfsd(void *vrqstp)
+ rqstp->rq_server->sv_maxconn = nn->max_connections;
+
+ svc_recv(rqstp);
+- validate_process_creds();
+ }
+
+- atomic_dec(&nfsdstats.th_cnt);
++ atomic_dec(&nfsd_th_cnt);
+
+ out:
+ /* Release the thread */
+@@ -988,6 +977,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
+ const struct svc_procedure *proc = rqstp->rq_procinfo;
+ __be32 *statp = rqstp->rq_accept_statp;
+ struct nfsd_cacherep *rp;
++ unsigned int start, len;
++ __be32 *nfs_reply;
+
+ /*
+ * Give the xdr decoder a chance to change this if it wants
+@@ -995,11 +986,18 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
+ */
+ rqstp->rq_cachetype = proc->pc_cachetype;
+
++ /*
++ * ->pc_decode advances the argument stream past the NFS
++ * Call header, so grab the header's starting location and
++ * size now for the call to nfsd_cache_lookup().
++ */
++ start = xdr_stream_pos(&rqstp->rq_arg_stream);
++ len = xdr_stream_remaining(&rqstp->rq_arg_stream);
+ if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
+ goto out_decode_err;
+
+ rp = NULL;
+- switch (nfsd_cache_lookup(rqstp, &rp)) {
++ switch (nfsd_cache_lookup(rqstp, start, len, &rp)) {
+ case RC_DOIT:
+ break;
+ case RC_REPLY:
+@@ -1008,6 +1006,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
+ goto out_dropit;
+ }
+
++ nfs_reply = xdr_inline_decode(&rqstp->rq_res_stream, 0);
+ *statp = proc->pc_func(rqstp);
+ if (test_bit(RQ_DROPME, &rqstp->rq_flags))
+ goto out_update_drop;
+@@ -1015,7 +1014,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
+ if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
+ goto out_encode_err;
+
+- nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
++ nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, nfs_reply);
+ out_cached_reply:
+ return 1;
+
+diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
+index 63797635e1c328..9f606fa08bd4b8 100644
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -27,25 +27,22 @@
+
+ #include "nfsd.h"
+
+-struct nfsd_stats nfsdstats;
+-struct svc_stat nfsd_svcstats = {
+- .program = &nfsd_program,
+-};
+-
+ static int nfsd_show(struct seq_file *seq, void *v)
+ {
++ struct net *net = pde_data(file_inode(seq->file));
++ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int i;
+
+ seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_FH_STALE]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_READ]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE]));
+
+ /* thread usage: */
+- seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt));
++ seq_printf(seq, "th %u 0", atomic_read(&nfsd_th_cnt));
+
+ /* deprecated thread usage histogram stats */
+ for (i = 0; i < 10; i++)
+@@ -55,7 +52,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n");
+
+ /* show my rpc info */
+- svc_seq_show(seq, &nfsd_svcstats);
++ svc_seq_show(seq, &nn->nfsd_svcstats);
+
+ #ifdef CONFIG_NFSD_V4
+ /* Show count for individual nfsv4 operations */
+@@ -63,10 +60,10 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
+ for (i = 0; i <= LAST_NFS4_OP; i++) {
+ seq_printf(seq, " %lld",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_NFS4_OP(i)]));
+ }
+ seq_printf(seq, "\nwdeleg_getattr %lld",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_WDELEG_GETATTR]));
+
+ seq_putc(seq, '\n');
+ #endif
+@@ -76,7 +73,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
+
+ DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
+
+-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num)
+ {
+ int i, err = 0;
+
+@@ -108,31 +105,24 @@ void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
+ percpu_counter_destroy(&counters[i]);
+ }
+
+-static int nfsd_stat_counters_init(void)
++int nfsd_stat_counters_init(struct nfsd_net *nn)
+ {
+- return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
++ return nfsd_percpu_counters_init(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ }
+
+-static void nfsd_stat_counters_destroy(void)
++void nfsd_stat_counters_destroy(struct nfsd_net *nn)
+ {
+- nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
++ nfsd_percpu_counters_destroy(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ }
+
+-int nfsd_stat_init(void)
++void nfsd_proc_stat_init(struct net *net)
+ {
+- int err;
+-
+- err = nfsd_stat_counters_init();
+- if (err)
+- return err;
++ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+- svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
+-
+- return 0;
++ svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
+ }
+
+-void nfsd_stat_shutdown(void)
++void nfsd_proc_stat_shutdown(struct net *net)
+ {
+- nfsd_stat_counters_destroy();
+- svc_proc_unregister(&init_net, "nfsd");
++ svc_proc_unregister(net, "nfsd");
+ }
+diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
+index cf5524e7ca0623..d2753e975dfd34 100644
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -10,94 +10,72 @@
+ #include <uapi/linux/nfsd/stats.h>
+ #include <linux/percpu_counter.h>
+
+-
+-enum {
+- NFSD_STATS_RC_HITS, /* repcache hits */
+- NFSD_STATS_RC_MISSES, /* repcache misses */
+- NFSD_STATS_RC_NOCACHE, /* uncached reqs */
+- NFSD_STATS_FH_STALE, /* FH stale error */
+- NFSD_STATS_IO_READ, /* bytes returned to read requests */
+- NFSD_STATS_IO_WRITE, /* bytes passed in write requests */
+-#ifdef CONFIG_NFSD_V4
+- NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
+- NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
+-#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
+- NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */
+-#endif
+- NFSD_STATS_COUNTERS_NUM
+-};
+-
+-struct nfsd_stats {
+- struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
+-
+- atomic_t th_cnt; /* number of available threads */
+-};
+-
+-extern struct nfsd_stats nfsdstats;
+-
+-extern struct svc_stat nfsd_svcstats;
+-
+-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
+-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
+-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
+-int nfsd_stat_init(void);
+-void nfsd_stat_shutdown(void);
+-
+-static inline void nfsd_stats_rc_hits_inc(void)
++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
++void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
++void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
++int nfsd_stat_counters_init(struct nfsd_net *nn);
++void nfsd_stat_counters_destroy(struct nfsd_net *nn);
++void nfsd_proc_stat_init(struct net *net);
++void nfsd_proc_stat_shutdown(struct net *net);
++
++static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_HITS]);
+ }
+
+-static inline void nfsd_stats_rc_misses_inc(void)
++static inline void nfsd_stats_rc_misses_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_MISSES]);
+ }
+
+-static inline void nfsd_stats_rc_nocache_inc(void)
++static inline void nfsd_stats_rc_nocache_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_NOCACHE]);
+ }
+
+-static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
++static inline void nfsd_stats_fh_stale_inc(struct nfsd_net *nn,
++ struct svc_export *exp)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
+- if (exp)
+- percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_FH_STALE]);
++ if (exp && exp->ex_stats)
++ percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]);
+ }
+
+-static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
++static inline void nfsd_stats_io_read_add(struct nfsd_net *nn,
++ struct svc_export *exp, s64 amount)
+ {
+- percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
+- if (exp)
+- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
++ percpu_counter_add(&nn->counter[NFSD_STATS_IO_READ], amount);
++ if (exp && exp->ex_stats)
++ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount);
+ }
+
+-static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
++static inline void nfsd_stats_io_write_add(struct nfsd_net *nn,
++ struct svc_export *exp, s64 amount)
+ {
+- percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
+- if (exp)
+- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
++ percpu_counter_add(&nn->counter[NFSD_STATS_IO_WRITE], amount);
++ if (exp && exp->ex_stats)
++ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount);
+ }
+
+ static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]);
+ }
+
+ static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount)
+ {
+- percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
++ percpu_counter_add(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
+ }
+
+ static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount)
+ {
+- percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
++ percpu_counter_sub(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
+ }
+
+ #ifdef CONFIG_NFSD_V4
+-static inline void nfsd_stats_wdeleg_getattr_inc(void)
++static inline void nfsd_stats_wdeleg_getattr_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_WDELEG_GETATTR]);
+ }
+ #endif
+ #endif /* _NFSD_STATS_H */
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 80390434887183..d7ed49eef5911f 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -843,7 +843,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class,
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __field(unsigned long, flavor)
+ __array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
+- __string_len(name, name, clp->cl_name.len)
++ __string_len(name, clp->cl_name.data, clp->cl_name.len)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 02f5fcaad03f37..b3e51d88faff46 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -101,6 +101,7 @@ nfserrno (int errno)
+ { nfserr_io, -EUCLEAN },
+ { nfserr_perm, -ENOKEY },
+ { nfserr_no_grace, -ENOGRACE},
++ { nfserr_io, -EBADMSG },
+ };
+ int i;
+
+@@ -823,7 +824,7 @@ int nfsd_open_break_lease(struct inode *inode, int access)
+ * and additional flags.
+ * N.B. After this call fhp needs an fh_put
+ */
+-static __be32
++static int
+ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ int may_flags, struct file **filp)
+ {
+@@ -831,14 +832,12 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ struct inode *inode;
+ struct file *file;
+ int flags = O_RDONLY|O_LARGEFILE;
+- __be32 err;
+- int host_err = 0;
++ int host_err = -EPERM;
+
+ path.mnt = fhp->fh_export->ex_path.mnt;
+ path.dentry = fhp->fh_dentry;
+ inode = d_inode(path.dentry);
+
+- err = nfserr_perm;
+ if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
+ goto out;
+
+@@ -847,7 +846,7 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+
+ host_err = nfsd_open_break_lease(inode, may_flags);
+ if (host_err) /* NOMEM or WOULDBLOCK */
+- goto out_nfserr;
++ goto out;
+
+ if (may_flags & NFSD_MAY_WRITE) {
+ if (may_flags & NFSD_MAY_READ)
+@@ -859,13 +858,13 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ file = dentry_open(&path, flags, current_cred());
+ if (IS_ERR(file)) {
+ host_err = PTR_ERR(file);
+- goto out_nfserr;
++ goto out;
+ }
+
+ host_err = ima_file_check(file, may_flags);
+ if (host_err) {
+ fput(file);
+- goto out_nfserr;
++ goto out;
+ }
+
+ if (may_flags & NFSD_MAY_64BIT_COOKIE)
+@@ -874,10 +873,8 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ file->f_mode |= FMODE_32BITHASH;
+
+ *filp = file;
+-out_nfserr:
+- err = nfserrno(host_err);
+ out:
+- return err;
++ return host_err;
+ }
+
+ __be32
+@@ -885,9 +882,9 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ int may_flags, struct file **filp)
+ {
+ __be32 err;
++ int host_err;
+ bool retried = false;
+
+- validate_process_creds();
+ /*
+ * If we get here, then the client has already done an "open",
+ * and (hopefully) checked permission - so allow OWNER_OVERRIDE
+@@ -904,14 +901,14 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ retry:
+ err = fh_verify(rqstp, fhp, type, may_flags);
+ if (!err) {
+- err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
+- if (err == nfserr_stale && !retried) {
++ host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
++ if (host_err == -EOPENSTALE && !retried) {
+ retried = true;
+ fh_put(fhp);
+ goto retry;
+ }
++ err = nfserrno(host_err);
+ }
+- validate_process_creds();
+ return err;
+ }
+
+@@ -922,18 +919,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
+ * @may_flags: internal permission flags
+ * @filp: OUT: open "struct file *"
+ *
+- * Returns an nfsstat value in network byte order.
++ * Returns zero on success, or a negative errno value.
+ */
+-__be32
++int
+ nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
+ struct file **filp)
+ {
+- __be32 err;
+-
+- validate_process_creds();
+- err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
+- validate_process_creds();
+- return err;
++ return __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
+ }
+
+ /*
+@@ -994,7 +986,9 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ unsigned long *count, u32 *eof, ssize_t host_err)
+ {
+ if (host_err >= 0) {
+- nfsd_stats_io_read_add(fhp->fh_export, host_err);
++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
++
++ nfsd_stats_io_read_add(nn, fhp->fh_export, host_err);
+ *eof = nfsd_eof_on_read(file, offset, host_err, *count);
+ *count = host_err;
+ fsnotify_access(file);
+@@ -1177,7 +1171,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
+ goto out_nfserr;
+ }
+ *cnt = host_err;
+- nfsd_stats_io_write_add(exp, *cnt);
++ nfsd_stats_io_write_add(nn, exp, *cnt);
+ fsnotify_modify(file);
+ host_err = filemap_check_wb_err(file->f_mapping, since);
+ if (host_err < 0)
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index a6890ea7b765b6..e3c29596f4df12 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -104,8 +104,8 @@ __be32 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ int nfsd_open_break_lease(struct inode *, int);
+ __be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
+ int, struct file **);
+-__be32 nfsd_open_verified(struct svc_rqst *, struct svc_fh *,
+- int, struct file **);
++int nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp,
++ int may_flags, struct file **filp);
+ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct file *file, loff_t offset,
+ unsigned long *count,
+diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
+index 7342de296ec3c6..25881bdd212b87 100644
+--- a/fs/nilfs2/alloc.c
++++ b/fs/nilfs2/alloc.c
+@@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
+ * @target: offset number of an entry in the group (start point)
+ * @bsize: size in bits
+ * @lock: spin lock protecting @bitmap
++ * @wrap: whether to wrap around
+ */
+ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+ unsigned long target,
+ unsigned int bsize,
+- spinlock_t *lock)
++ spinlock_t *lock, bool wrap)
+ {
+ int pos, end = bsize;
+
+@@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+
+ end = target;
+ }
++ if (!wrap)
++ return -ENOSPC;
+
+ /* wrap around */
+ for (pos = 0; pos < end; pos++) {
+@@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
+ * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
+ * @inode: inode of metadata file using this allocator
+ * @req: nilfs_palloc_req structure exchanged for the allocation
++ * @wrap: whether to wrap around
+ */
+ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+- struct nilfs_palloc_req *req)
++ struct nilfs_palloc_req *req, bool wrap)
+ {
+ struct buffer_head *desc_bh, *bitmap_bh;
+ struct nilfs_palloc_group_desc *desc;
+@@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ entries_per_group = nilfs_palloc_entries_per_group(inode);
+
+ for (i = 0; i < ngroups; i += n) {
+- if (group >= ngroups) {
++ if (group >= ngroups && wrap) {
+ /* wrap around */
+ group = 0;
+ maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
+@@ -541,7 +545,13 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
+ pos = nilfs_palloc_find_available_slot(
+ bitmap, group_offset,
+- entries_per_group, lock);
++ entries_per_group, lock, wrap);
++ /*
++ * Since the search for a free slot in the
++ * second and subsequent bitmap blocks always
++ * starts from the beginning, the wrap flag
++ * only has an effect on the first search.
++ */
+ if (pos >= 0) {
+ /* found a free entry */
+ nilfs_palloc_group_desc_add_entries(
+diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
+index b667e869ac076a..d825a9faca6d96 100644
+--- a/fs/nilfs2/alloc.h
++++ b/fs/nilfs2/alloc.h
+@@ -50,8 +50,8 @@ struct nilfs_palloc_req {
+ struct buffer_head *pr_entry_bh;
+ };
+
+-int nilfs_palloc_prepare_alloc_entry(struct inode *,
+- struct nilfs_palloc_req *);
++int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
++ struct nilfs_palloc_req *req, bool wrap);
+ void nilfs_palloc_commit_alloc_entry(struct inode *,
+ struct nilfs_palloc_req *);
+ void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *);
+diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
+index 5710833ac1cc7e..8fe348bceabe0b 100644
+--- a/fs/nilfs2/btnode.c
++++ b/fs/nilfs2/btnode.c
+@@ -51,12 +51,21 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
+
+ bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
+ if (unlikely(!bh))
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
+ buffer_dirty(bh))) {
+- brelse(bh);
+- BUG();
++ /*
++ * The block buffer at the specified new address was already
++ * in use. This can happen if it is a virtual block number
++ * and has been reallocated due to corruption of the bitmap
++ * used to manage its allocation state (if not, the buffer
++ * clearing of an abandoned b-tree node is missing somewhere).
++ */
++ nilfs_error(inode->i_sb,
++ "state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)",
++ (unsigned long long)blocknr, inode->i_ino);
++ goto failed;
+ }
+ memset(bh->b_data, 0, i_blocksize(inode));
+ bh->b_bdev = inode->i_sb->s_bdev;
+@@ -67,6 +76,12 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
+ unlock_page(bh->b_page);
+ put_page(bh->b_page);
+ return bh;
++
++failed:
++ unlock_page(bh->b_page);
++ put_page(bh->b_page);
++ brelse(bh);
++ return ERR_PTR(-EIO);
+ }
+
+ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
+@@ -217,8 +232,8 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
+ }
+
+ nbh = nilfs_btnode_create_block(btnc, newkey);
+- if (!nbh)
+- return -ENOMEM;
++ if (IS_ERR(nbh))
++ return PTR_ERR(nbh);
+
+ BUG_ON(nbh == obh);
+ ctxt->newbh = nbh;
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index 13592e82eaf68b..dbd27a44632fa9 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -63,8 +63,8 @@ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
+ struct buffer_head *bh;
+
+ bh = nilfs_btnode_create_block(btnc, ptr);
+- if (!bh)
+- return -ENOMEM;
++ if (IS_ERR(bh))
++ return PTR_ERR(bh);
+
+ set_buffer_nilfs_volatile(bh);
+ *bhp = bh;
+@@ -350,7 +350,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
+ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+ level >= NILFS_BTREE_LEVEL_MAX ||
+ (flags & NILFS_BTREE_NODE_ROOT) ||
+- nchildren < 0 ||
++ nchildren <= 0 ||
+ nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
+ nilfs_crit(inode->i_sb,
+ "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
+@@ -381,7 +381,8 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
+ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+ level >= NILFS_BTREE_LEVEL_MAX ||
+ nchildren < 0 ||
+- nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
++ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX ||
++ (nchildren == 0 && level > NILFS_BTREE_LEVEL_NODE_MIN))) {
+ nilfs_crit(inode->i_sb,
+ "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
+ inode->i_ino, level, flags, nchildren);
+@@ -724,7 +725,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+ dat = nilfs_bmap_get_dat(btree);
+ ret = nilfs_dat_translate(dat, ptr, &blocknr);
+ if (ret < 0)
+- goto out;
++ goto dat_error;
+ ptr = blocknr;
+ }
+ cnt = 1;
+@@ -743,7 +744,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+ if (dat) {
+ ret = nilfs_dat_translate(dat, ptr2, &blocknr);
+ if (ret < 0)
+- goto out;
++ goto dat_error;
+ ptr2 = blocknr;
+ }
+ if (ptr2 != ptr + cnt || ++cnt == maxblocks)
+@@ -781,6 +782,11 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
+ out:
+ nilfs_btree_free_path(path);
+ return ret;
++
++ dat_error:
++ if (ret == -ENOENT)
++ ret = -EINVAL; /* Notify bmap layer of metadata corruption */
++ goto out;
+ }
+
+ static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
+@@ -1653,13 +1659,16 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
+ int nchildren, ret;
+
+ root = nilfs_btree_get_root(btree);
++ nchildren = nilfs_btree_node_get_nchildren(root);
++ if (unlikely(nchildren == 0))
++ return 0;
++
+ switch (nilfs_btree_height(btree)) {
+ case 2:
+ bh = NULL;
+ node = root;
+ break;
+ case 3:
+- nchildren = nilfs_btree_node_get_nchildren(root);
+ if (nchildren > 1)
+ return 0;
+ ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
+@@ -1668,12 +1677,12 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
+ if (ret < 0)
+ return ret;
+ node = (struct nilfs_btree_node *)bh->b_data;
++ nchildren = nilfs_btree_node_get_nchildren(node);
+ break;
+ default:
+ return 0;
+ }
+
+- nchildren = nilfs_btree_node_get_nchildren(node);
+ maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
+ nextmaxkey = (nchildren > 1) ?
+ nilfs_btree_node_get_key(node, nchildren - 2) : 0;
+diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
+index 9cf6ba58f5859f..351010828d8836 100644
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
+ {
+ int ret;
+
+- ret = nilfs_palloc_prepare_alloc_entry(dat, req);
++ ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
+ if (ret < 0)
+ return ret;
+
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index bce734b68f08e5..ddf8e575e489ca 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -143,6 +143,9 @@ static bool nilfs_check_page(struct page *page)
+ goto Enamelen;
+ if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+ goto Espan;
++ if (unlikely(p->inode &&
++ NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
++ goto Einumber;
+ }
+ if (offs != limit)
+ goto Eend;
+@@ -168,6 +171,9 @@ static bool nilfs_check_page(struct page *page)
+ goto bad_entry;
+ Espan:
+ error = "directory entry across blocks";
++ goto bad_entry;
++Einumber:
++ error = "disallowed inode number";
+ bad_entry:
+ nilfs_error(sb,
+ "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+@@ -186,19 +192,24 @@ static bool nilfs_check_page(struct page *page)
+ return false;
+ }
+
+-static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
++static void *nilfs_get_page(struct inode *dir, unsigned long n,
++ struct page **pagep)
+ {
+ struct address_space *mapping = dir->i_mapping;
+ struct page *page = read_mapping_page(mapping, n, NULL);
++ void *kaddr;
+
+- if (!IS_ERR(page)) {
+- kmap(page);
+- if (unlikely(!PageChecked(page))) {
+- if (!nilfs_check_page(page))
+- goto fail;
+- }
++ if (IS_ERR(page))
++ return page;
++
++ kaddr = kmap(page);
++ if (unlikely(!PageChecked(page))) {
++ if (!nilfs_check_page(page))
++ goto fail;
+ }
+- return page;
++
++ *pagep = page;
++ return kaddr;
+
+ fail:
+ nilfs_put_page(page);
+@@ -243,7 +254,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
+
+ #define S_SHIFT 12
+ static unsigned char
+-nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
++nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
+ [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
+ [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
+ [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
+@@ -275,14 +286,14 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
+ for ( ; n < npages; n++, offset = 0) {
+ char *kaddr, *limit;
+ struct nilfs_dir_entry *de;
+- struct page *page = nilfs_get_page(inode, n);
++ struct page *page;
+
+- if (IS_ERR(page)) {
++ kaddr = nilfs_get_page(inode, n, &page);
++ if (IS_ERR(kaddr)) {
+ nilfs_error(sb, "bad page in #%lu", inode->i_ino);
+ ctx->pos += PAGE_SIZE - offset;
+ return -EIO;
+ }
+- kaddr = page_address(page);
+ de = (struct nilfs_dir_entry *)(kaddr + offset);
+ limit = kaddr + nilfs_last_byte(inode, n) -
+ NILFS_DIR_REC_LEN(1);
+@@ -320,6 +331,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
+ * returns the page in which the entry was found, and the entry itself
+ * (as a parameter - res_dir). Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
++ *
++ * On failure, returns an error pointer and the caller should ignore res_page.
+ */
+ struct nilfs_dir_entry *
+ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+@@ -345,26 +358,26 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+ start = 0;
+ n = start;
+ do {
+- char *kaddr;
++ char *kaddr = nilfs_get_page(dir, n, &page);
+
+- page = nilfs_get_page(dir, n);
+- if (!IS_ERR(page)) {
+- kaddr = page_address(page);
+- de = (struct nilfs_dir_entry *)kaddr;
+- kaddr += nilfs_last_byte(dir, n) - reclen;
+- while ((char *) de <= kaddr) {
+- if (de->rec_len == 0) {
+- nilfs_error(dir->i_sb,
+- "zero-length directory entry");
+- nilfs_put_page(page);
+- goto out;
+- }
+- if (nilfs_match(namelen, name, de))
+- goto found;
+- de = nilfs_next_entry(de);
++ if (IS_ERR(kaddr))
++ return ERR_CAST(kaddr);
++
++ de = (struct nilfs_dir_entry *)kaddr;
++ kaddr += nilfs_last_byte(dir, n) - reclen;
++ while ((char *)de <= kaddr) {
++ if (de->rec_len == 0) {
++ nilfs_error(dir->i_sb,
++ "zero-length directory entry");
++ nilfs_put_page(page);
++ goto out;
+ }
+- nilfs_put_page(page);
++ if (nilfs_match(namelen, name, de))
++ goto found;
++ de = nilfs_next_entry(de);
+ }
++ nilfs_put_page(page);
++
+ if (++n >= npages)
+ n = 0;
+ /* next page is past the blocks we've got */
+@@ -377,7 +390,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+ }
+ } while (n != start);
+ out:
+- return NULL;
++ return ERR_PTR(-ENOENT);
+
+ found:
+ *res_page = page;
+@@ -387,30 +400,54 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
+
+ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
+ {
+- struct page *page = nilfs_get_page(dir, 0);
+- struct nilfs_dir_entry *de = NULL;
++ struct page *page;
++ struct nilfs_dir_entry *de, *next_de;
++ size_t limit;
++ char *msg;
++
++ de = nilfs_get_page(dir, 0, &page);
++ if (IS_ERR(de))
++ return NULL;
++
++ limit = nilfs_last_byte(dir, 0); /* is a multiple of chunk size */
++ if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino ||
++ !nilfs_match(1, ".", de))) {
++ msg = "missing '.'";
++ goto fail;
++ }
+
+- if (!IS_ERR(page)) {
+- de = nilfs_next_entry(
+- (struct nilfs_dir_entry *)page_address(page));
+- *p = page;
++ next_de = nilfs_next_entry(de);
++ /*
++ * If "next_de" has not reached the end of the chunk, there is
++ * at least one more record. Check whether it matches "..".
++ */
++ if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) ||
++ !nilfs_match(2, "..", next_de))) {
++ msg = "missing '..'";
++ goto fail;
+ }
+- return de;
++ *p = page;
++ return next_de;
++
++fail:
++ nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg);
++ nilfs_put_page(page);
++ return NULL;
+ }
+
+-ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
++int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino)
+ {
+- ino_t res = 0;
+ struct nilfs_dir_entry *de;
+ struct page *page;
+
+ de = nilfs_find_entry(dir, qstr, &page);
+- if (de) {
+- res = le64_to_cpu(de->inode);
+- kunmap(page);
+- put_page(page);
+- }
+- return res;
++ if (IS_ERR(de))
++ return PTR_ERR(de);
++
++ *ino = le64_to_cpu(de->inode);
++ kunmap(page);
++ put_page(page);
++ return 0;
+ }
+
+ /* Releases the page */
+@@ -459,12 +496,11 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
+ for (n = 0; n <= npages; n++) {
+ char *dir_end;
+
+- page = nilfs_get_page(dir, n);
+- err = PTR_ERR(page);
+- if (IS_ERR(page))
++ kaddr = nilfs_get_page(dir, n, &page);
++ err = PTR_ERR(kaddr);
++ if (IS_ERR(kaddr))
+ goto out;
+ lock_page(page);
+- kaddr = page_address(page);
+ dir_end = kaddr + nilfs_last_byte(dir, n);
+ de = (struct nilfs_dir_entry *)kaddr;
+ kaddr += PAGE_SIZE - reclen;
+@@ -627,11 +663,10 @@ int nilfs_empty_dir(struct inode *inode)
+ char *kaddr;
+ struct nilfs_dir_entry *de;
+
+- page = nilfs_get_page(inode, i);
+- if (IS_ERR(page))
+- continue;
++ kaddr = nilfs_get_page(inode, i, &page);
++ if (IS_ERR(kaddr))
++ return 0;
+
+- kaddr = page_address(page);
+ de = (struct nilfs_dir_entry *)kaddr;
+ kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
+
+diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
+index 4c85914f2abc37..893ab36824cc2b 100644
+--- a/fs/nilfs2/direct.c
++++ b/fs/nilfs2/direct.c
+@@ -66,7 +66,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ dat = nilfs_bmap_get_dat(direct);
+ ret = nilfs_dat_translate(dat, ptr, &blocknr);
+ if (ret < 0)
+- return ret;
++ goto dat_error;
+ ptr = blocknr;
+ }
+
+@@ -79,7 +79,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ if (dat) {
+ ret = nilfs_dat_translate(dat, ptr2, &blocknr);
+ if (ret < 0)
+- return ret;
++ goto dat_error;
+ ptr2 = blocknr;
+ }
+ if (ptr2 != ptr + cnt)
+@@ -87,6 +87,11 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
+ }
+ *ptrp = ptr;
+ return cnt;
++
++ dat_error:
++ if (ret == -ENOENT)
++ ret = -EINVAL; /* Notify bmap layer of metadata corruption */
++ return ret;
+ }
+
+ static __u64
+diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
+index 740ce26d1e7657..0505feef79f4a6 100644
+--- a/fs/nilfs2/file.c
++++ b/fs/nilfs2/file.c
+@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
+ nilfs_transaction_commit(inode->i_sb);
+
+ mapped:
+- wait_for_stable_page(page);
++ /*
++ * Since checksumming including data blocks is performed to determine
++ * the validity of the log to be written and used for recovery, it is
++ * necessary to wait for writeback to finish here, regardless of the
++ * stable write requirement of the backing device.
++ */
++ wait_on_page_writeback(page);
+ out:
+ sb_end_pagefault(inode->i_sb);
+ return vmf_fs_error(ret);
+diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
+index a8a4bc8490b4d8..ac10a62a41e983 100644
+--- a/fs/nilfs2/ifile.c
++++ b/fs/nilfs2/ifile.c
+@@ -55,13 +55,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
+ struct nilfs_palloc_req req;
+ int ret;
+
+- req.pr_entry_nr = 0; /*
+- * 0 says find free inode from beginning
+- * of a group. dull code!!
+- */
++ req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
+ req.pr_entry_bh = NULL;
+
+- ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
++ ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
+ if (!ret) {
+ ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
+ &req.pr_entry_bh);
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 1a8bd599347610..8e1afa39a62e17 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -112,7 +112,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
+ "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
+ __func__, inode->i_ino,
+ (unsigned long long)blkoff);
+- err = 0;
++ err = -EAGAIN;
+ }
+ nilfs_transaction_abort(inode->i_sb);
+ goto out;
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index 40ffade49f389a..53022bfe0b72d2 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -60,7 +60,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
+ if (argv->v_nmembs == 0)
+ return 0;
+
+- if (argv->v_size > PAGE_SIZE)
++ if ((size_t)argv->v_size > PAGE_SIZE)
+ return -EINVAL;
+
+ /*
+diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
+index 2a4e7f4a8102f6..9f9b0762ff6901 100644
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -55,12 +55,20 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
+ {
+ struct inode *inode;
+ ino_t ino;
++ int res;
+
+ if (dentry->d_name.len > NILFS_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+- ino = nilfs_inode_by_name(dir, &dentry->d_name);
+- inode = ino ? nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino) : NULL;
++ res = nilfs_inode_by_name(dir, &dentry->d_name, &ino);
++ if (res) {
++ if (res != -ENOENT)
++ return ERR_PTR(res);
++ inode = NULL;
++ } else {
++ inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
++ }
++
+ return d_splice_alias(inode, dentry);
+ }
+
+@@ -263,10 +271,11 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
+ struct page *page;
+ int err;
+
+- err = -ENOENT;
+ de = nilfs_find_entry(dir, &dentry->d_name, &page);
+- if (!de)
++ if (IS_ERR(de)) {
++ err = PTR_ERR(de);
+ goto out;
++ }
+
+ inode = d_inode(dentry);
+ err = -EIO;
+@@ -361,10 +370,11 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+ if (unlikely(err))
+ return err;
+
+- err = -ENOENT;
+ old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+- if (!old_de)
++ if (IS_ERR(old_de)) {
++ err = PTR_ERR(old_de);
+ goto out;
++ }
+
+ if (S_ISDIR(old_inode->i_mode)) {
+ err = -EIO;
+@@ -381,10 +391,12 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+ if (dir_de && !nilfs_empty_dir(new_inode))
+ goto out_dir;
+
+- err = -ENOENT;
+- new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
+- if (!new_de)
++ new_de = nilfs_find_entry(new_dir, &new_dentry->d_name,
++ &new_page);
++ if (IS_ERR(new_de)) {
++ err = PTR_ERR(new_de);
+ goto out_dir;
++ }
+ nilfs_set_link(new_dir, new_de, new_page, old_inode);
+ nilfs_mark_inode_dirty(new_dir);
+ inode_set_ctime_current(new_inode);
+@@ -438,13 +450,14 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+ */
+ static struct dentry *nilfs_get_parent(struct dentry *child)
+ {
+- unsigned long ino;
++ ino_t ino;
++ int res;
+ struct inode *inode;
+ struct nilfs_root *root;
+
+- ino = nilfs_inode_by_name(d_inode(child), &dotdot_name);
+- if (!ino)
+- return ERR_PTR(-ENOENT);
++ res = nilfs_inode_by_name(d_inode(child), &dotdot_name, &ino);
++ if (res)
++ return ERR_PTR(res);
+
+ root = NILFS_I(d_inode(child))->i_root;
+
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index 8046490cd7fea2..ad13e74af65f92 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -116,9 +116,15 @@ enum {
+ #define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
+
+ #define NILFS_MDT_INODE(sb, ino) \
+- ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
++ ((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
+ #define NILFS_VALID_INODE(sb, ino) \
+- ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
++ ((ino) >= NILFS_FIRST_INO(sb) || \
++ ((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
++
++#define NILFS_PRIVATE_INODE(ino) ({ \
++ ino_t __ino = (ino); \
++ ((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO && \
++ (__ino) != NILFS_SKETCH_INO); })
+
+ /**
+ * struct nilfs_transaction_info: context information for synchronization
+@@ -227,7 +233,7 @@ static inline __u32 nilfs_mask_flags(umode_t mode, __u32 flags)
+
+ /* dir.c */
+ extern int nilfs_add_link(struct dentry *, struct inode *);
+-extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
++int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino);
+ extern int nilfs_make_empty(struct inode *, struct inode *);
+ extern struct nilfs_dir_entry *
+ nilfs_find_entry(struct inode *, const struct qstr *, struct page **);
+diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
+index 0955b657938ff2..ce30b51ac593cd 100644
+--- a/fs/nilfs2/recovery.c
++++ b/fs/nilfs2/recovery.c
+@@ -472,9 +472,10 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
+
+ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
+ struct nilfs_recovery_block *rb,
+- struct page *page)
++ loff_t pos, struct page *page)
+ {
+ struct buffer_head *bh_org;
++ size_t from = pos & ~PAGE_MASK;
+ void *kaddr;
+
+ bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
+@@ -482,7 +483,7 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
+ return -EIO;
+
+ kaddr = kmap_atomic(page);
+- memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
++ memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
+ kunmap_atomic(kaddr);
+ brelse(bh_org);
+ return 0;
+@@ -521,7 +522,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
+ goto failed_inode;
+ }
+
+- err = nilfs_recovery_copy_block(nilfs, rb, page);
++ err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
+ if (unlikely(err))
+ goto failed_page;
+
+@@ -707,6 +708,33 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
+ brelse(bh);
+ }
+
++/**
++ * nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery
++ * @nilfs: nilfs object
++ */
++static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
++{
++ struct nilfs_inode_info *ii, *n;
++ LIST_HEAD(head);
++
++ /* Abandon inodes that have read recovery data */
++ spin_lock(&nilfs->ns_inode_lock);
++ list_splice_init(&nilfs->ns_dirty_files, &head);
++ spin_unlock(&nilfs->ns_inode_lock);
++ if (list_empty(&head))
++ return;
++
++ set_nilfs_purging(nilfs);
++ list_for_each_entry_safe(ii, n, &head, i_dirty) {
++ spin_lock(&nilfs->ns_inode_lock);
++ list_del_init(&ii->i_dirty);
++ spin_unlock(&nilfs->ns_inode_lock);
++
++ iput(&ii->vfs_inode);
++ }
++ clear_nilfs_purging(nilfs);
++}
++
+ /**
+ * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
+ * @nilfs: nilfs object
+@@ -765,15 +793,19 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
+ if (unlikely(err)) {
+ nilfs_err(sb, "error %d writing segment for recovery",
+ err);
+- goto failed;
++ goto put_root;
+ }
+
+ nilfs_finish_roll_forward(nilfs, ri);
+ }
+
+- failed:
++put_root:
+ nilfs_put_root(root);
+ return err;
++
++failed:
++ nilfs_abort_roll_forward(nilfs);
++ goto put_root;
+ }
+
+ /**
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 7ec16879756e8c..0610cb12c11ca0 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -136,7 +136,7 @@ static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
+
+ #define nilfs_cnt32_ge(a, b) \
+ (typecheck(__u32, a) && typecheck(__u32, b) && \
+- ((__s32)(a) - (__s32)(b) >= 0))
++ ((__s32)((a) - (b)) >= 0))
+
+ static int nilfs_prepare_segment_lock(struct super_block *sb,
+ struct nilfs_transaction_info *ti)
+@@ -1694,6 +1694,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ if (bh->b_page != bd_page) {
+ if (bd_page) {
+ lock_page(bd_page);
++ wait_on_page_writeback(bd_page);
+ clear_page_dirty_for_io(bd_page);
+ set_page_writeback(bd_page);
+ unlock_page(bd_page);
+@@ -1704,10 +1705,10 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
+- set_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ if (bh->b_page != bd_page) {
+ lock_page(bd_page);
++ wait_on_page_writeback(bd_page);
+ clear_page_dirty_for_io(bd_page);
+ set_page_writeback(bd_page);
+ unlock_page(bd_page);
+@@ -1715,6 +1716,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ }
+ break;
+ }
++ set_buffer_async_write(bh);
+ if (bh->b_page != fs_page) {
+ nilfs_begin_page_io(fs_page);
+ fs_page = bh->b_page;
+@@ -1723,6 +1725,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ }
+ if (bd_page) {
+ lock_page(bd_page);
++ wait_on_page_writeback(bd_page);
+ clear_page_dirty_for_io(bd_page);
+ set_page_writeback(bd_page);
+ unlock_page(bd_page);
+@@ -1800,7 +1803,6 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
+- clear_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ clear_buffer_uptodate(bh);
+ if (bh->b_page != bd_page) {
+@@ -1809,6 +1811,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+ }
+ break;
+ }
++ clear_buffer_async_write(bh);
+ if (bh->b_page != fs_page) {
+ nilfs_end_page_io(fs_page, err);
+ fs_page = bh->b_page;
+@@ -1832,6 +1835,9 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
+ nilfs_abort_logs(&logs, ret ? : err);
+
+ list_splice_tail_init(&sci->sc_segbufs, &logs);
++ if (list_empty(&logs))
++ return; /* if the first segment buffer preparation failed */
++
+ nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
+ nilfs_free_incomplete_logs(&logs, nilfs);
+
+@@ -1896,8 +1902,9 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
+ BIT(BH_NILFS_Redirected));
+
+- set_mask_bits(&bh->b_state, clear_bits, set_bits);
+ if (bh == segbuf->sb_super_root) {
++ set_buffer_uptodate(bh);
++ clear_buffer_dirty(bh);
+ if (bh->b_page != bd_page) {
+ end_page_writeback(bd_page);
+ bd_page = bh->b_page;
+@@ -1905,6 +1912,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ update_sr = true;
+ break;
+ }
++ set_mask_bits(&bh->b_state, clear_bits, set_bits);
+ if (bh->b_page != fs_page) {
+ nilfs_end_page_io(fs_page, 0);
+ fs_page = bh->b_page;
+@@ -2074,7 +2082,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
+
+ err = nilfs_segctor_begin_construction(sci, nilfs);
+ if (unlikely(err))
+- goto out;
++ goto failed;
+
+ /* Update time stamp */
+ sci->sc_seg_ctime = ktime_get_real_seconds();
+@@ -2137,10 +2145,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
+ return err;
+
+ failed_to_write:
+- if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
+- nilfs_redirty_inodes(&sci->sc_dirty_files);
+-
+ failed:
++ if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE)
++ nilfs_redirty_inodes(&sci->sc_dirty_files);
+ if (nilfs_doing_gc())
+ nilfs_redirty_inodes(&sci->sc_gc_inodes);
+ nilfs_segctor_abort_construction(sci, nilfs, err);
+@@ -2159,8 +2166,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
+ {
+ spin_lock(&sci->sc_state_lock);
+ if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
+- sci->sc_timer.expires = jiffies + sci->sc_interval;
+- add_timer(&sci->sc_timer);
++ if (sci->sc_task) {
++ sci->sc_timer.expires = jiffies + sci->sc_interval;
++ add_timer(&sci->sc_timer);
++ }
+ sci->sc_state |= NILFS_SEGCTOR_COMMIT;
+ }
+ spin_unlock(&sci->sc_state_lock);
+@@ -2207,19 +2216,36 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
+ struct nilfs_segctor_wait_request wait_req;
+ int err = 0;
+
+- spin_lock(&sci->sc_state_lock);
+ init_wait(&wait_req.wq);
+ wait_req.err = 0;
+ atomic_set(&wait_req.done, 0);
++ init_waitqueue_entry(&wait_req.wq, current);
++
++ /*
++ * To prevent a race issue where completion notifications from the
++ * log writer thread are missed, increment the request sequence count
++ * "sc_seq_request" and insert a wait queue entry using the current
++ * sequence number into the "sc_wait_request" queue at the same time
++ * within the lock section of "sc_state_lock".
++ */
++ spin_lock(&sci->sc_state_lock);
+ wait_req.seq = ++sci->sc_seq_request;
++ add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
+ spin_unlock(&sci->sc_state_lock);
+
+- init_waitqueue_entry(&wait_req.wq, current);
+- add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
+- set_current_state(TASK_INTERRUPTIBLE);
+ wake_up(&sci->sc_wait_daemon);
+
+ for (;;) {
++ set_current_state(TASK_INTERRUPTIBLE);
++
++ /*
++ * Synchronize only while the log writer thread is alive.
++ * Leave flushing out after the log writer thread exits to
++ * the cleanup work in nilfs_segctor_destroy().
++ */
++ if (!sci->sc_task)
++ break;
++
+ if (atomic_read(&wait_req.done)) {
+ err = wait_req.err;
+ break;
+@@ -2235,7 +2261,7 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
+ return err;
+ }
+
+-static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
++static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
+ {
+ struct nilfs_segctor_wait_request *wrq, *n;
+ unsigned long flags;
+@@ -2243,7 +2269,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
+ spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
+ list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
+ if (!atomic_read(&wrq->done) &&
+- nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
++ (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
+ wrq->err = err;
+ atomic_set(&wrq->done, 1);
+ }
+@@ -2361,10 +2387,21 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
+ */
+ static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
+ {
++ bool thread_is_alive;
++
+ spin_lock(&sci->sc_state_lock);
+ sci->sc_seq_accepted = sci->sc_seq_request;
++ thread_is_alive = (bool)sci->sc_task;
+ spin_unlock(&sci->sc_state_lock);
+- del_timer_sync(&sci->sc_timer);
++
++ /*
++ * This function does not race with the log writer thread's
++ * termination. Therefore, deleting sc_timer, which should not be
++ * done after the log writer thread exits, can be done safely outside
++ * the area protected by sc_state_lock.
++ */
++ if (thread_is_alive)
++ del_timer_sync(&sci->sc_timer);
+ }
+
+ /**
+@@ -2381,7 +2418,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
+ if (mode == SC_LSEG_SR) {
+ sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
+ sci->sc_seq_done = sci->sc_seq_accepted;
+- nilfs_segctor_wakeup(sci, err);
++ nilfs_segctor_wakeup(sci, err, false);
+ sci->sc_flush_request = 0;
+ } else {
+ if (mode == SC_FLUSH_FILE)
+@@ -2390,7 +2427,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
+ sci->sc_flush_request &= ~FLUSH_DAT_BIT;
+
+ /* re-enable timer if checkpoint creation was not done */
+- if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
++ if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
+ time_before(jiffies, sci->sc_timer.expires))
+ add_timer(&sci->sc_timer);
+ }
+@@ -2580,6 +2617,7 @@ static int nilfs_segctor_thread(void *arg)
+ int timeout = 0;
+
+ sci->sc_timer_task = current;
++ timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
+
+ /* start sync. */
+ sci->sc_task = current;
+@@ -2646,6 +2684,7 @@ static int nilfs_segctor_thread(void *arg)
+ end_thread:
+ /* end sync. */
+ sci->sc_task = NULL;
++ timer_shutdown_sync(&sci->sc_timer);
+ wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
+ spin_unlock(&sci->sc_state_lock);
+ return 0;
+@@ -2709,7 +2748,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
+ INIT_LIST_HEAD(&sci->sc_gc_inodes);
+ INIT_LIST_HEAD(&sci->sc_iput_queue);
+ INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
+- timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
+
+ sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
+ sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
+@@ -2763,6 +2801,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
+ || sci->sc_seq_request != sci->sc_seq_done);
+ spin_unlock(&sci->sc_state_lock);
+
++ /*
++ * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
++ * be called from delayed iput() via nilfs_evict_inode() and can race
++ * with the above log writer thread termination.
++ */
++ nilfs_segctor_wakeup(sci, 0, true);
++
+ if (flush_work(&sci->sc_iput_work))
+ flag = true;
+
+@@ -2788,7 +2833,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
+
+ down_write(&nilfs->ns_segctor_sem);
+
+- timer_shutdown_sync(&sci->sc_timer);
+ kfree(sci);
+ }
+
+diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
+index 2c6078a6b8ecb5..58ca7c936393c4 100644
+--- a/fs/nilfs2/sufile.c
++++ b/fs/nilfs2/sufile.c
+@@ -501,15 +501,38 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
+
+ down_write(&NILFS_MDT(sufile)->mi_sem);
+ ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
+- if (!ret) {
+- mark_buffer_dirty(bh);
+- nilfs_mdt_mark_dirty(sufile);
+- kaddr = kmap_atomic(bh->b_page);
+- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
++ if (ret)
++ goto out_sem;
++
++ kaddr = kmap_atomic(bh->b_page);
++ su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
++ if (unlikely(nilfs_segment_usage_error(su))) {
++ struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
++
++ kunmap_atomic(kaddr);
++ brelse(bh);
++ if (nilfs_segment_is_active(nilfs, segnum)) {
++ nilfs_error(sufile->i_sb,
++ "active segment %llu is erroneous",
++ (unsigned long long)segnum);
++ } else {
++ /*
++ * Segments marked erroneous are never allocated by
++ * nilfs_sufile_alloc(); only active segments, ie,
++ * the segments indexed by ns_segnum or ns_nextnum,
++ * can be erroneous here.
++ */
++ WARN_ON_ONCE(1);
++ }
++ ret = -EIO;
++ } else {
+ nilfs_segment_usage_set_dirty(su);
+ kunmap_atomic(kaddr);
++ mark_buffer_dirty(bh);
++ nilfs_mdt_mark_dirty(sufile);
+ brelse(bh);
+ }
++out_sem:
+ up_write(&NILFS_MDT(sufile)->mi_sem);
+ return ret;
+ }
+@@ -536,9 +559,14 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
+
+ kaddr = kmap_atomic(bh->b_page);
+ su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+- WARN_ON(nilfs_segment_usage_error(su));
+- if (modtime)
++ if (modtime) {
++ /*
++ * Check segusage error and set su_lastmod only when updating
++ * this entry with a valid timestamp, not for cancellation.
++ */
++ WARN_ON_ONCE(nilfs_segment_usage_error(su));
+ su->su_lastmod = cpu_to_le64(modtime);
++ }
+ su->su_nblocks = cpu_to_le32(nblocks);
+ kunmap_atomic(kaddr);
+
+diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
+index 379d22e28ed62c..905c7eadf9676d 100644
+--- a/fs/nilfs2/sysfs.c
++++ b/fs/nilfs2/sysfs.c
+@@ -836,9 +836,15 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr,
+ struct the_nilfs *nilfs,
+ char *buf)
+ {
+- struct nilfs_super_block **sbp = nilfs->ns_sbp;
+- u32 major = le32_to_cpu(sbp[0]->s_rev_level);
+- u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level);
++ struct nilfs_super_block *raw_sb;
++ u32 major;
++ u16 minor;
++
++ down_read(&nilfs->ns_sem);
++ raw_sb = nilfs->ns_sbp[0];
++ major = le32_to_cpu(raw_sb->s_rev_level);
++ minor = le16_to_cpu(raw_sb->s_minor_rev_level);
++ up_read(&nilfs->ns_sem);
+
+ return sysfs_emit(buf, "%d.%d\n", major, minor);
+ }
+@@ -856,8 +862,13 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr,
+ struct the_nilfs *nilfs,
+ char *buf)
+ {
+- struct nilfs_super_block **sbp = nilfs->ns_sbp;
+- u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size);
++ struct nilfs_super_block *raw_sb;
++ u64 dev_size;
++
++ down_read(&nilfs->ns_sem);
++ raw_sb = nilfs->ns_sbp[0];
++ dev_size = le64_to_cpu(raw_sb->s_dev_size);
++ up_read(&nilfs->ns_sem);
+
+ return sysfs_emit(buf, "%llu\n", dev_size);
+ }
+@@ -879,9 +890,15 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr,
+ struct the_nilfs *nilfs,
+ char *buf)
+ {
+- struct nilfs_super_block **sbp = nilfs->ns_sbp;
++ struct nilfs_super_block *raw_sb;
++ ssize_t len;
+
+- return sysfs_emit(buf, "%pUb\n", sbp[0]->s_uuid);
++ down_read(&nilfs->ns_sem);
++ raw_sb = nilfs->ns_sbp[0];
++ len = sysfs_emit(buf, "%pUb\n", raw_sb->s_uuid);
++ up_read(&nilfs->ns_sem);
++
++ return len;
+ }
+
+ static
+@@ -889,10 +906,16 @@ ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr,
+ struct the_nilfs *nilfs,
+ char *buf)
+ {
+- struct nilfs_super_block **sbp = nilfs->ns_sbp;
++ struct nilfs_super_block *raw_sb;
++ ssize_t len;
++
++ down_read(&nilfs->ns_sem);
++ raw_sb = nilfs->ns_sbp[0];
++ len = scnprintf(buf, sizeof(raw_sb->s_volume_name), "%s\n",
++ raw_sb->s_volume_name);
++ up_read(&nilfs->ns_sem);
+
+- return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n",
+- sbp[0]->s_volume_name);
++ return len;
+ }
+
+ static const char dev_readme_str[] =
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 0f0667957c8100..be41e26b782469 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+ }
+
+ nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
++ if (nilfs->ns_first_ino < NILFS_USER_INO) {
++ nilfs_err(nilfs->ns_sb,
++ "too small lower limit for non-reserved inode numbers: %u",
++ nilfs->ns_first_ino);
++ return -EINVAL;
++ }
+
+ nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
+ if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
+@@ -716,7 +722,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
+ goto failed_sbh;
+ }
+ nilfs_release_super_block(nilfs);
+- sb_set_blocksize(sb, blocksize);
++ if (!sb_set_blocksize(sb, blocksize)) {
++ nilfs_err(sb, "bad blocksize %d", blocksize);
++ err = -EINVAL;
++ goto out;
++ }
+
+ err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
+ if (err)
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index cd4ae1b8ae165a..17fee562ee503e 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -182,7 +182,7 @@ struct the_nilfs {
+ unsigned long ns_nrsvsegs;
+ unsigned long ns_first_data_block;
+ int ns_inode_size;
+- int ns_first_ino;
++ unsigned int ns_first_ino;
+ u32 ns_crc_seed;
+
+ /* /sys/fs/<nilfs>/<device> */
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 7974e91ffe134f..b5d8f238fce42a 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -103,17 +103,13 @@ void fsnotify_sb_delete(struct super_block *sb)
+ * parent cares. Thus when an event happens on a child it can quickly tell
+ * if there is a need to find a parent and send the event to the parent.
+ */
+-void __fsnotify_update_child_dentry_flags(struct inode *inode)
++void fsnotify_set_children_dentry_flags(struct inode *inode)
+ {
+ struct dentry *alias;
+- int watched;
+
+ if (!S_ISDIR(inode->i_mode))
+ return;
+
+- /* determine if the children should tell inode about their events */
+- watched = fsnotify_inode_watches_children(inode);
+-
+ spin_lock(&inode->i_lock);
+ /* run all of the dentries associated with this inode. Since this is a
+ * directory, there damn well better only be one item on this list */
+@@ -129,10 +125,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ continue;
+
+ spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+- if (watched)
+- child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+- else
+- child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
++ child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+ spin_unlock(&child->d_lock);
+ }
+ spin_unlock(&alias->d_lock);
+@@ -140,6 +133,24 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ spin_unlock(&inode->i_lock);
+ }
+
++/*
++ * Lazily clear false positive PARENT_WATCHED flag for child whose parent had
++ * stopped watching children.
++ */
++static void fsnotify_clear_child_dentry_flag(struct inode *pinode,
++ struct dentry *dentry)
++{
++ spin_lock(&dentry->d_lock);
++ /*
++ * d_lock is a sufficient barrier to prevent observing a non-watched
++ * parent state from before the fsnotify_set_children_dentry_flags()
++ * or fsnotify_update_flags() call that had set PARENT_WATCHED.
++ */
++ if (!fsnotify_inode_watches_children(pinode))
++ dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
++ spin_unlock(&dentry->d_lock);
++}
++
+ /* Are inode/sb/mount interested in parent and name info with this event? */
+ static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt,
+ __u32 mask)
+@@ -208,7 +219,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ p_inode = parent->d_inode;
+ p_mask = fsnotify_inode_watches_children(p_inode);
+ if (unlikely(parent_watched && !p_mask))
+- __fsnotify_update_child_dentry_flags(p_inode);
++ fsnotify_clear_child_dentry_flag(p_inode, dentry);
+
+ /*
+ * Include parent/name in notification either if some notification
+diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
+index fde74eb333cc93..2b4267de86e6b2 100644
+--- a/fs/notify/fsnotify.h
++++ b/fs/notify/fsnotify.h
+@@ -74,7 +74,7 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
+ * update the dentry->d_flags of all of inode's children to indicate if inode cares
+ * about events that happen to its children.
+ */
+-extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
++extern void fsnotify_set_children_dentry_flags(struct inode *inode);
+
+ extern struct kmem_cache *fsnotify_mark_connector_cachep;
+
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index c74ef947447d67..4be6e883d492f6 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -176,6 +176,24 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ return fsnotify_update_iref(conn, want_iref);
+ }
+
++static bool fsnotify_conn_watches_children(
++ struct fsnotify_mark_connector *conn)
++{
++ if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
++ return false;
++
++ return fsnotify_inode_watches_children(fsnotify_conn_inode(conn));
++}
++
++static void fsnotify_conn_set_children_dentry_flags(
++ struct fsnotify_mark_connector *conn)
++{
++ if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
++ return;
++
++ fsnotify_set_children_dentry_flags(fsnotify_conn_inode(conn));
++}
++
+ /*
+ * Calculate mask of events for a list of marks. The caller must make sure
+ * connector and connector->obj cannot disappear under us. Callers achieve
+@@ -184,15 +202,23 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ */
+ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ {
++ bool update_children;
++
+ if (!conn)
+ return;
+
+ spin_lock(&conn->lock);
++ update_children = !fsnotify_conn_watches_children(conn);
+ __fsnotify_recalc_mask(conn);
++ update_children &= fsnotify_conn_watches_children(conn);
+ spin_unlock(&conn->lock);
+- if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
+- __fsnotify_update_child_dentry_flags(
+- fsnotify_conn_inode(conn));
++ /*
++ * Set children's PARENT_WATCHED flags only if parent started watching.
++ * When parent stops watching, we clear false positive PARENT_WATCHED
++ * flags lazily in __fsnotify_parent().
++ */
++ if (update_children)
++ fsnotify_conn_set_children_dentry_flags(conn);
+ }
+
+ /* Free all connectors queued for freeing once SRCU period ends */
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 63f70259edc0d4..fc6cea60044edf 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -231,7 +231,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
+ struct ntfs_sb_info *sbi;
+ struct ATTRIB *attr_s;
+ struct MFT_REC *rec;
+- u32 used, asize, rsize, aoff, align;
++ u32 used, asize, rsize, aoff;
+ bool is_data;
+ CLST len, alen;
+ char *next;
+@@ -252,10 +252,13 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
+ rsize = le32_to_cpu(attr->res.data_size);
+ is_data = attr->type == ATTR_DATA && !attr->name_len;
+
+- align = sbi->cluster_size;
+- if (is_attr_compressed(attr))
+- align <<= COMPRESSION_UNIT;
+- len = (rsize + align - 1) >> sbi->cluster_bits;
++ /* len - how many clusters required to store 'rsize' bytes */
++ if (is_attr_compressed(attr)) {
++ u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
++ len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
++ } else {
++ len = bytes_to_cluster(sbi, rsize);
++ }
+
+ run_init(run);
+
+@@ -670,7 +673,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ goto undo_2;
+ }
+
+- if (!is_mft)
++ /* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
++ if (ni->mi.rno != MFT_REC_MFT)
+ run_truncate_head(run, evcn + 1);
+
+ svcn = le64_to_cpu(attr->nres.svcn);
+@@ -886,7 +890,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ struct runs_tree *run = &ni->file.run;
+ struct ntfs_sb_info *sbi;
+ u8 cluster_bits;
+- struct ATTRIB *attr = NULL, *attr_b;
++ struct ATTRIB *attr, *attr_b;
+ struct ATTR_LIST_ENTRY *le, *le_b;
+ struct mft_inode *mi, *mi_b;
+ CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
+@@ -904,12 +908,8 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ *len = 0;
+ up_read(&ni->file.run_lock);
+
+- if (*len) {
+- if (*lcn != SPARSE_LCN || !new)
+- return 0; /* Fast normal way without allocation. */
+- else if (clen > *len)
+- clen = *len;
+- }
++ if (*len && (*lcn != SPARSE_LCN || !new))
++ return 0; /* Fast normal way without allocation. */
+
+ /* No cluster in cache or we need to allocate cluster in hole. */
+ sbi = ni->mi.sbi;
+@@ -918,6 +918,17 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ ni_lock(ni);
+ down_write(&ni->file.run_lock);
+
++ /* Repeat the code above (under write lock). */
++ if (!run_lookup_entry(run, vcn, lcn, len, NULL))
++ *len = 0;
++
++ if (*len) {
++ if (*lcn != SPARSE_LCN || !new)
++ goto out; /* normal way without allocation. */
++ if (clen > *len)
++ clen = *len;
++ }
++
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
+ if (!attr_b) {
+@@ -965,6 +976,19 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ if (err)
+ goto out;
+
++ /* Check for compressed frame. */
++ err = attr_is_frame_compressed(ni, attr, vcn >> NTFS_LZNT_CUNIT, &hint);
++ if (err)
++ goto out;
++
++ if (hint) {
++ /* if frame is compressed - don't touch it. */
++ *lcn = COMPRESSED_LCN;
++ *len = hint;
++ err = -EOPNOTSUPP;
++ goto out;
++ }
++
+ if (!*len) {
+ if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
+ if (*lcn != SPARSE_LCN || !new)
+@@ -1715,6 +1739,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+
+ attr_b->nres.total_size = cpu_to_le64(total_size);
+ inode_set_bytes(&ni->vfs_inode, total_size);
++ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+
+ mi_b->dirty = true;
+ mark_inode_dirty(&ni->vfs_inode);
+@@ -1736,8 +1761,10 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
+ 0, NULL, &mi_b);
+- if (!attr_b)
+- return -ENOENT;
++ if (!attr_b) {
++ err = -ENOENT;
++ goto out;
++ }
+
+ attr = attr_b;
+ le = le_b;
+@@ -1818,13 +1845,15 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ ok:
+ run_truncate_around(run, vcn);
+ out:
+- if (new_valid > data_size)
+- new_valid = data_size;
++ if (attr_b) {
++ if (new_valid > data_size)
++ new_valid = data_size;
+
+- valid_size = le64_to_cpu(attr_b->nres.valid_size);
+- if (new_valid != valid_size) {
+- attr_b->nres.valid_size = cpu_to_le64(valid_size);
+- mi_b->dirty = true;
++ valid_size = le64_to_cpu(attr_b->nres.valid_size);
++ if (new_valid != valid_size) {
++ attr_b->nres.valid_size = cpu_to_le64(valid_size);
++ mi_b->dirty = true;
++ }
+ }
+
+ return err;
+@@ -2073,7 +2102,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+
+ /* Update inode size. */
+ ni->i_valid = valid_size;
+- ni->vfs_inode.i_size = data_size;
++ i_size_write(&ni->vfs_inode, data_size);
+ inode_set_bytes(&ni->vfs_inode, total_size);
+ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+ mark_inode_dirty(&ni->vfs_inode);
+@@ -2488,7 +2517,7 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+ mi_b->dirty = true;
+
+ done:
+- ni->vfs_inode.i_size += bytes;
++ i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
+ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+ mark_inode_dirty(&ni->vfs_inode);
+
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index 7c01735d1219d8..9f4bd8d260901c 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -29,7 +29,7 @@ static inline bool al_is_valid_le(const struct ntfs_inode *ni,
+ void al_destroy(struct ntfs_inode *ni)
+ {
+ run_close(&ni->attr_list.run);
+- kfree(ni->attr_list.le);
++ kvfree(ni->attr_list.le);
+ ni->attr_list.le = NULL;
+ ni->attr_list.size = 0;
+ ni->attr_list.dirty = false;
+@@ -127,12 +127,13 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+ {
+ size_t off;
+ u16 sz;
++ const unsigned le_min_size = le_size(0);
+
+ if (!le) {
+ le = ni->attr_list.le;
+ } else {
+ sz = le16_to_cpu(le->size);
+- if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
++ if (sz < le_min_size) {
+ /* Impossible 'cause we should not return such le. */
+ return NULL;
+ }
+@@ -141,7 +142,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+
+ /* Check boundary. */
+ off = PtrOffset(ni->attr_list.le, le);
+- if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
++ if (off + le_min_size > ni->attr_list.size) {
+ /* The regular end of list. */
+ return NULL;
+ }
+@@ -149,8 +150,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+ sz = le16_to_cpu(le->size);
+
+ /* Check le for errors. */
+- if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
+- off + sz > ni->attr_list.size ||
++ if (sz < le_min_size || off + sz > ni->attr_list.size ||
+ sz < le->name_off + le->name_len * sizeof(short)) {
+ return NULL;
+ }
+@@ -318,7 +318,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
+ memcpy(ptr, al->le, off);
+ memcpy(Add2Ptr(ptr, off + sz), le, old_size - off);
+ le = Add2Ptr(ptr, off);
+- kfree(al->le);
++ kvfree(al->le);
+ al->le = ptr;
+ } else {
+ memmove(Add2Ptr(le, sz), le, old_size - off);
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index 63f14a0232f6a0..cf4fe21a50399b 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -124,7 +124,7 @@ void wnd_close(struct wnd_bitmap *wnd)
+ {
+ struct rb_node *node, *next;
+
+- kfree(wnd->free_bits);
++ kvfree(wnd->free_bits);
+ wnd->free_bits = NULL;
+ run_close(&wnd->run);
+
+@@ -654,7 +654,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
+ wnd->total_zeroes = nbits;
+ wnd->extent_max = MINUS_ONE_T;
+ wnd->zone_bit = wnd->zone_end = 0;
+- wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
++ wnd->nwnd = bytes_to_block(sb, ntfs3_bitmap_size(nbits));
+ wnd->bits_last = nbits & (wbits - 1);
+ if (!wnd->bits_last)
+ wnd->bits_last = wbits;
+@@ -1347,7 +1347,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+ return -EINVAL;
+
+ /* Align to 8 byte boundary. */
+- new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
++ new_wnd = bytes_to_block(sb, ntfs3_bitmap_size(new_bits));
+ new_last = new_bits & (wbits - 1);
+ if (!new_last)
+ new_last = wbits;
+@@ -1360,7 +1360,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+ memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
+ memset(new_free + wnd->nwnd, 0,
+ (new_wnd - wnd->nwnd) * sizeof(short));
+- kfree(wnd->free_bits);
++ kvfree(wnd->free_bits);
+ wnd->free_bits = new_free;
+ }
+
+@@ -1382,7 +1382,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+
+ err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
+ if (err)
+- break;
++ return err;
+
+ bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
+ if (!bh)
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index ec0566b322d5d0..e1b856ecce61d0 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -272,9 +272,12 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
+ return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
+ }
+
+-static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+- const struct NTFS_DE *e, u8 *name,
+- struct dir_context *ctx)
++/*
++ * returns false if 'ctx' if full
++ */
++static inline bool ntfs_dir_emit(struct ntfs_sb_info *sbi,
++ struct ntfs_inode *ni, const struct NTFS_DE *e,
++ u8 *name, struct dir_context *ctx)
+ {
+ const struct ATTR_FILE_NAME *fname;
+ unsigned long ino;
+@@ -284,48 +287,72 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ fname = Add2Ptr(e, sizeof(struct NTFS_DE));
+
+ if (fname->type == FILE_NAME_DOS)
+- return 0;
++ return true;
+
+ if (!mi_is_ref(&ni->mi, &fname->home))
+- return 0;
++ return true;
+
+ ino = ino_get(&e->ref);
+
+ if (ino == MFT_REC_ROOT)
+- return 0;
++ return true;
+
+ /* Skip meta files. Unless option to show metafiles is set. */
+ if (!sbi->options->showmeta && ntfs_is_meta_file(sbi, ino))
+- return 0;
++ return true;
+
+ if (sbi->options->nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
+- return 0;
++ return true;
+
+ name_len = ntfs_utf16_to_nls(sbi, fname->name, fname->name_len, name,
+ PATH_MAX);
+ if (name_len <= 0) {
+ ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
+ ino);
+- return 0;
++ return true;
+ }
+
+- /* NTFS: symlinks are "dir + reparse" or "file + reparse" */
+- if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
+- dt_type = DT_LNK;
+- else
+- dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
++ /*
++ * NTFS: symlinks are "dir + reparse" or "file + reparse"
++ * Unfortunately reparse attribute is used for many purposes (several dozens).
++ * It is not possible here to know is this name symlink or not.
++ * To get exactly the type of name we should to open inode (read mft).
++ * getattr for opened file (fstat) correctly returns symlink.
++ */
++ dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
++
++ /*
++ * It is not reliable to detect the type of name using duplicated information
++ * stored in parent directory.
++ * The only correct way to get the type of name - read MFT record and find ATTR_STD.
++ * The code below is not good idea.
++ * It does additional locks/reads just to get the type of name.
++ * Should we use additional mount option to enable branch below?
++ */
++ if (((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) ||
++ fname->dup.ea_size) &&
++ ino != ni->mi.rno) {
++ struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
++ if (!IS_ERR_OR_NULL(inode)) {
++ dt_type = fs_umode_to_dtype(inode->i_mode);
++ iput(inode);
++ }
++ }
+
+- return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
++ return dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
+ }
+
+ /*
+ * ntfs_read_hdr - Helper function for ntfs_readdir().
++ *
++ * returns 0 if ok.
++ * returns -EINVAL if directory is corrupted.
++ * returns +1 if 'ctx' is full.
+ */
+ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
+ u8 *name, struct dir_context *ctx)
+ {
+- int err;
+ const struct NTFS_DE *e;
+ u32 e_size;
+ u32 end = le32_to_cpu(hdr->used);
+@@ -333,12 +360,12 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+
+ for (;; off += e_size) {
+ if (off + sizeof(struct NTFS_DE) > end)
+- return -1;
++ return -EINVAL;
+
+ e = Add2Ptr(hdr, off);
+ e_size = le16_to_cpu(e->size);
+ if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
+- return -1;
++ return -EINVAL;
+
+ if (de_is_last(e))
+ return 0;
+@@ -348,14 +375,15 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ continue;
+
+ if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
+- return -1;
++ return -EINVAL;
+
+ ctx->pos = vbo + off;
+
+ /* Submit the name to the filldir callback. */
+- err = ntfs_filldir(sbi, ni, e, name, ctx);
+- if (err)
+- return err;
++ if (!ntfs_dir_emit(sbi, ni, e, name, ctx)) {
++ /* ctx is full. */
++ return +1;
++ }
+ }
+ }
+
+@@ -454,7 +482,6 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+
+ vbo = (u64)bit << index_bits;
+ if (vbo >= i_size) {
+- ntfs_inode_err(dir, "Looks like your dir is corrupt");
+ err = -EINVAL;
+ goto out;
+ }
+@@ -477,9 +504,16 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+ __putname(name);
+ put_indx_node(node);
+
+- if (err == -ENOENT) {
++ if (err == 1) {
++ /* 'ctx' is full. */
++ err = 0;
++ } else if (err == -ENOENT) {
+ err = 0;
+ ctx->pos = pos;
++ } else if (err < 0) {
++ if (err == -EINVAL)
++ ntfs_inode_err(dir, "directory corrupted");
++ ctx->pos = eod;
+ }
+
+ return err;
+@@ -495,11 +529,9 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ struct INDEX_HDR *hdr;
+ const struct ATTR_FILE_NAME *fname;
+ u32 e_size, off, end;
+- u64 vbo = 0;
+ size_t drs = 0, fles = 0, bit = 0;
+- loff_t i_size = ni->vfs_inode.i_size;
+ struct indx_node *node = NULL;
+- u8 index_bits = ni->dir.index_bits;
++ size_t max_indx = i_size_read(&ni->vfs_inode) >> ni->dir.index_bits;
+
+ if (is_empty)
+ *is_empty = true;
+@@ -543,7 +575,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ fles += 1;
+ }
+
+- if (vbo >= i_size)
++ if (bit >= max_indx)
+ goto out;
+
+ err = indx_used_bit(&ni->dir, ni, &bit);
+@@ -553,8 +585,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ if (bit == MINUS_ONE_T)
+ goto out;
+
+- vbo = (u64)bit << index_bits;
+- if (vbo >= i_size)
++ if (bit >= max_indx)
+ goto out;
+
+ err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
+@@ -564,7 +595,6 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+
+ hdr = &node->index->ihdr;
+ bit += 1;
+- vbo = (u64)bit << ni->dir.idx2vbn_bits;
+ }
+
+ out:
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index 1f7a194983c5db..f14d21b6c6d395 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -260,6 +260,9 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ bool rw = vma->vm_flags & VM_WRITE;
+ int err;
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (is_encrypted(ni)) {
+ ntfs_inode_warn(inode, "mmap encrypted not supported");
+ return -EOPNOTSUPP;
+@@ -296,10 +299,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ }
+
+ if (ni->i_valid < to) {
+- if (!inode_trylock(inode)) {
+- err = -EAGAIN;
+- goto out;
+- }
++ inode_lock(inode);
+ err = ntfs_extend_initialized_size(file, ni,
+ ni->i_valid, to);
+ inode_unlock(inode);
+@@ -418,7 +418,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
+ }
+
+ /*
+- * ntfs_fallocate
++ * ntfs_fallocate - file_operations::ntfs_fallocate
+ *
+ * Preallocate space for a file. This implements ntfs's fallocate file
+ * operation, which gets called from sys_fallocate system call. User
+@@ -498,10 +498,14 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ ni_lock(ni);
+ err = attr_punch_hole(ni, vbo, len, &frame_size);
+ ni_unlock(ni);
++ if (!err)
++ goto ok;
++
+ if (err != E_NTFS_NOTALIGNED)
+ goto out;
+
+ /* Process not aligned punch. */
++ err = 0;
+ mask = frame_size - 1;
+ vbo_a = (vbo + mask) & ~mask;
+ end_a = end & ~mask;
+@@ -524,6 +528,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ ni_lock(ni);
+ err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
+ ni_unlock(ni);
++ if (err)
++ goto out;
+ }
+ } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+ /*
+@@ -547,6 +553,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ ni_lock(ni);
+ err = attr_collapse_range(ni, vbo, len);
+ ni_unlock(ni);
++ if (err)
++ goto out;
+ } else if (mode & FALLOC_FL_INSERT_RANGE) {
+ /* Check new size. */
+ err = inode_newsize_ok(inode, new_size);
+@@ -563,6 +571,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ ni_lock(ni);
+ err = attr_insert_range(ni, vbo, len);
+ ni_unlock(ni);
++ if (err)
++ goto out;
+ } else {
+ /* Check new size. */
+ u8 cluster_bits = sbi->cluster_bits;
+@@ -632,11 +642,18 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ &ni->file.run, i_size, &ni->i_valid,
+ true, NULL);
+ ni_unlock(ni);
++ if (err)
++ goto out;
+ } else if (new_size > i_size) {
+- inode->i_size = new_size;
++ i_size_write(inode, new_size);
+ }
+ }
+
++ok:
++ err = file_modified(file);
++ if (err)
++ goto out;
++
+ out:
+ if (map_locked)
+ filemap_invalidate_unlock(mapping);
+@@ -662,6 +679,9 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ umode_t mode = inode->i_mode;
+ int err;
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ err = setattr_prepare(idmap, dentry, attr);
+ if (err)
+ goto out;
+@@ -675,7 +695,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ goto out;
+ }
+ inode_dio_wait(inode);
+- oldsize = inode->i_size;
++ oldsize = i_size_read(inode);
+ newsize = attr->ia_size;
+
+ if (newsize <= oldsize)
+@@ -687,7 +707,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ goto out;
+
+ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+- inode->i_size = newsize;
++ i_size_write(inode, newsize);
+ }
+
+ setattr_copy(idmap, inode, attr);
+@@ -717,6 +737,9 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ struct inode *inode = file->f_mapping->host;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (is_encrypted(ni)) {
+ ntfs_inode_warn(inode, "encrypted i/o not supported");
+ return -EOPNOTSUPP;
+@@ -751,6 +774,9 @@ static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
+ struct inode *inode = in->f_mapping->host;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (is_encrypted(ni)) {
+ ntfs_inode_warn(inode, "encrypted i/o not supported");
+ return -EOPNOTSUPP;
+@@ -820,7 +846,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
+ size_t count = iov_iter_count(from);
+ loff_t pos = iocb->ki_pos;
+ struct inode *inode = file_inode(file);
+- loff_t i_size = inode->i_size;
++ loff_t i_size = i_size_read(inode);
+ struct address_space *mapping = inode->i_mapping;
+ struct ntfs_inode *ni = ntfs_i(inode);
+ u64 valid = ni->i_valid;
+@@ -1027,6 +1053,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
+ iocb->ki_pos += written;
+ if (iocb->ki_pos > ni->i_valid)
+ ni->i_valid = iocb->ki_pos;
++ if (iocb->ki_pos > i_size)
++ i_size_write(inode, iocb->ki_pos);
+
+ return written;
+ }
+@@ -1040,8 +1068,12 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ ssize_t ret;
++ int err;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (is_encrypted(ni)) {
+ ntfs_inode_warn(inode, "encrypted i/o not supported");
+ return -EOPNOTSUPP;
+@@ -1067,6 +1099,12 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ if (ret <= 0)
+ goto out;
+
++ err = file_modified(iocb->ki_filp);
++ if (err) {
++ ret = err;
++ goto out;
++ }
++
+ if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
+ /* Should never be here, see ntfs_file_open(). */
+ ret = -EOPNOTSUPP;
+@@ -1096,6 +1134,9 @@ int ntfs_file_open(struct inode *inode, struct file *file)
+ {
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
+ (file->f_flags & O_DIRECT))) {
+ return -EOPNOTSUPP;
+@@ -1137,7 +1178,8 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
+ down_write(&ni->file.run_lock);
+
+ err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
+- inode->i_size, &ni->i_valid, false, NULL);
++ i_size_read(inode), &ni->i_valid, false,
++ NULL);
+
+ up_write(&ni->file.run_lock);
+ ni_unlock(ni);
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index dad976a6898596..61055bcfe8277f 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -778,7 +778,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
+ run_deallocate(sbi, &ni->attr_list.run, true);
+ run_close(&ni->attr_list.run);
+ ni->attr_list.size = 0;
+- kfree(ni->attr_list.le);
++ kvfree(ni->attr_list.le);
+ ni->attr_list.le = NULL;
+ ni->attr_list.dirty = false;
+
+@@ -927,7 +927,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ return 0;
+
+ out:
+- kfree(ni->attr_list.le);
++ kvfree(ni->attr_list.le);
+ ni->attr_list.le = NULL;
+ ni->attr_list.size = 0;
+ return err;
+@@ -1501,7 +1501,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
+
+ if (is_ext) {
+ if (flags & ATTR_FLAG_COMPRESSED)
+- attr->nres.c_unit = COMPRESSION_UNIT;
++ attr->nres.c_unit = NTFS_LZNT_CUNIT;
+ attr->nres.total_size = attr->nres.alloc_size;
+ }
+
+@@ -1601,8 +1601,10 @@ int ni_delete_all(struct ntfs_inode *ni)
+ asize = le32_to_cpu(attr->size);
+ roff = le16_to_cpu(attr->nres.run_off);
+
+- if (roff > asize)
++ if (roff > asize) {
++ _ntfs_bad_inode(&ni->vfs_inode);
+ return -EINVAL;
++ }
+
+ /* run==1 means unpack and deallocate. */
+ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
+@@ -1896,6 +1898,46 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
+ return REPARSE_LINK;
+ }
+
++/*
++ * fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent
++ * but it uses 'fe_k' instead of fieinfo->fi_extents_start
++ */
++static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo,
++ struct fiemap_extent *fe_k, u64 logical,
++ u64 phys, u64 len, u32 flags)
++{
++ struct fiemap_extent extent;
++
++ /* only count the extents */
++ if (fieinfo->fi_extents_max == 0) {
++ fieinfo->fi_extents_mapped++;
++ return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
++ }
++
++ if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
++ return 1;
++
++ if (flags & FIEMAP_EXTENT_DELALLOC)
++ flags |= FIEMAP_EXTENT_UNKNOWN;
++ if (flags & FIEMAP_EXTENT_DATA_ENCRYPTED)
++ flags |= FIEMAP_EXTENT_ENCODED;
++ if (flags & (FIEMAP_EXTENT_DATA_TAIL | FIEMAP_EXTENT_DATA_INLINE))
++ flags |= FIEMAP_EXTENT_NOT_ALIGNED;
++
++ memset(&extent, 0, sizeof(extent));
++ extent.fe_logical = logical;
++ extent.fe_physical = phys;
++ extent.fe_length = len;
++ extent.fe_flags = flags;
++
++ memcpy(fe_k + fieinfo->fi_extents_mapped, &extent, sizeof(extent));
++
++ fieinfo->fi_extents_mapped++;
++ if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
++ return 1;
++ return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
++}
++
+ /*
+ * ni_fiemap - Helper for file_fiemap().
+ *
+@@ -1906,6 +1948,7 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ __u64 vbo, __u64 len)
+ {
+ int err = 0;
++ struct fiemap_extent *fe_k = NULL;
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ u8 cluster_bits = sbi->cluster_bits;
+ struct runs_tree *run;
+@@ -1953,6 +1996,17 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ goto out;
+ }
+
++ /*
++ * To avoid lock problems replace pointer to user memory by pointer to kernel memory.
++ */
++ fe_k = kmalloc_array(fieinfo->fi_extents_max,
++ sizeof(struct fiemap_extent),
++ GFP_NOFS | __GFP_ZERO);
++ if (!fe_k) {
++ err = -ENOMEM;
++ goto out;
++ }
++
+ end = vbo + len;
+ alloc_size = le64_to_cpu(attr->nres.alloc_size);
+ if (end > alloc_size)
+@@ -2041,8 +2095,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ if (vbo + dlen >= end)
+ flags |= FIEMAP_EXTENT_LAST;
+
+- err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
+- flags);
++ err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo,
++ dlen, flags);
++
+ if (err < 0)
+ break;
+ if (err == 1) {
+@@ -2062,7 +2117,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ if (vbo + bytes >= end)
+ flags |= FIEMAP_EXTENT_LAST;
+
+- err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
++ err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo, bytes,
++ flags);
+ if (err < 0)
+ break;
+ if (err == 1) {
+@@ -2075,7 +2131,17 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+
+ up_read(run_lock);
+
++ /*
++ * Copy to user memory out of lock
++ */
++ if (copy_to_user(fieinfo->fi_extents_start, fe_k,
++ fieinfo->fi_extents_max *
++ sizeof(struct fiemap_extent))) {
++ err = -EFAULT;
++ }
++
+ out:
++ kfree(fe_k);
+ return err;
+ }
+
+@@ -2099,7 +2165,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
+ gfp_t gfp_mask;
+ struct page *pg;
+
+- if (vbo >= ni->vfs_inode.i_size) {
++ if (vbo >= i_size_read(&ni->vfs_inode)) {
+ SetPageUptodate(page);
+ err = 0;
+ goto out;
+@@ -2173,7 +2239,7 @@ int ni_decompress_file(struct ntfs_inode *ni)
+ {
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ struct inode *inode = &ni->vfs_inode;
+- loff_t i_size = inode->i_size;
++ loff_t i_size = i_size_read(inode);
+ struct address_space *mapping = inode->i_mapping;
+ gfp_t gfp_mask = mapping_gfp_mask(mapping);
+ struct page **pages = NULL;
+@@ -2508,6 +2574,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
+ err = -EOPNOTSUPP;
+ goto out1;
+ #else
++ loff_t i_size = i_size_read(&ni->vfs_inode);
+ u32 frame_bits = ni_ext_compress_bits(ni);
+ u64 frame64 = frame_vbo >> frame_bits;
+ u64 frames, vbo_data;
+@@ -2548,7 +2615,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
+ }
+ }
+
+- frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
++ frames = (i_size - 1) >> frame_bits;
+
+ err = attr_wof_frame_info(ni, attr, run, frame64, frames,
+ frame_bits, &ondisk_size, &vbo_data);
+@@ -2556,8 +2623,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
+ goto out2;
+
+ if (frame64 == frames) {
+- unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
+- (frame_size - 1));
++ unc_size = 1 + ((i_size - 1) & (frame_size - 1));
+ ondisk_size = attr_size(attr) - vbo_data;
+ } else {
+ unc_size = frame_size;
+@@ -3259,6 +3325,9 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
+ if (is_bad_inode(inode) || sb_rdonly(sb))
+ return 0;
+
++ if (unlikely(ntfs3_forced_shutdown(sb)))
++ return -EIO;
++
+ if (!ni_trylock(ni)) {
+ /* 'ni' is under modification, skip for now. */
+ mark_inode_dirty_sync(inode);
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 98ccb665085831..2a1aeab53ea4b5 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -465,7 +465,7 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
+ {
+ const struct RESTART_AREA *ra;
+ u16 cl, fl, ul;
+- u32 off, l_size, file_dat_bits, file_size_round;
++ u32 off, l_size, seq_bits;
+ u16 ro = le16_to_cpu(rhdr->ra_off);
+ u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
+
+@@ -511,13 +511,15 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
+ /* Make sure the sequence number bits match the log file size. */
+ l_size = le64_to_cpu(ra->l_size);
+
+- file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
+- file_size_round = 1u << (file_dat_bits + 3);
+- if (file_size_round != l_size &&
+- (file_size_round < l_size || (file_size_round / 2) > l_size)) {
+- return false;
++ seq_bits = sizeof(u64) * 8 + 3;
++ while (l_size) {
++ l_size >>= 1;
++ seq_bits -= 1;
+ }
+
++ if (seq_bits != ra->seq_num_bits)
++ return false;
++
+ /* The log page data offset and record header length must be quad-aligned. */
+ if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
+ !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
+@@ -607,14 +609,29 @@ static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
+ *head = cpu_to_le16(index);
+ }
+
++/*
++ * Enumerate restart table.
++ *
++ * @t - table to enumerate.
++ * @c - current enumerated element.
++ *
++ * enumeration starts with @c == NULL
++ * returns next element or NULL
++ */
+ static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
+ {
+ __le32 *e;
+ u32 bprt;
+- u16 rsize = t ? le16_to_cpu(t->size) : 0;
++ u16 rsize;
++
++ if (!t)
++ return NULL;
++
++ rsize = le16_to_cpu(t->size);
+
+ if (!c) {
+- if (!t || !t->total)
++ /* start enumeration. */
++ if (!t->total)
+ return NULL;
+ e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
+ } else {
+@@ -722,7 +739,8 @@ static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
+
+ if (!rsize || rsize > bytes ||
+ rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
+- le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
++ le16_to_cpu(rt->total) > ne ||
++ ff > ts - sizeof(__le32) || lf > ts - sizeof(__le32) ||
+ (ff && ff < sizeof(struct RESTART_TABLE)) ||
+ (lf && lf < sizeof(struct RESTART_TABLE))) {
+ return false;
+@@ -752,6 +770,9 @@ static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
+ return false;
+
+ off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
++
++ if (off > ts - sizeof(__le32))
++ return false;
+ }
+
+ return true;
+@@ -974,6 +995,16 @@ static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
+ return e;
+ }
+
++struct restart_info {
++ u64 last_lsn;
++ struct RESTART_HDR *r_page;
++ u32 vbo;
++ bool chkdsk_was_run;
++ bool valid_page;
++ bool initialized;
++ bool restart;
++};
++
+ #define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
+
+ #define NTFSLOG_WRAPPED 0x00000001
+@@ -987,6 +1018,7 @@ struct ntfs_log {
+ struct ntfs_inode *ni;
+
+ u32 l_size;
++ u32 orig_file_size;
+ u32 sys_page_size;
+ u32 sys_page_mask;
+ u32 page_size;
+@@ -1040,6 +1072,8 @@ struct ntfs_log {
+
+ struct CLIENT_ID client_id;
+ u32 client_undo_commit;
++
++ struct restart_info rst_info, rst_info2;
+ };
+
+ static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
+@@ -1105,16 +1139,6 @@ static inline bool verify_client_lsn(struct ntfs_log *log,
+ lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
+ }
+
+-struct restart_info {
+- u64 last_lsn;
+- struct RESTART_HDR *r_page;
+- u32 vbo;
+- bool chkdsk_was_run;
+- bool valid_page;
+- bool initialized;
+- bool restart;
+-};
+-
+ static int read_log_page(struct ntfs_log *log, u32 vbo,
+ struct RECORD_PAGE_HDR **buffer, bool *usa_error)
+ {
+@@ -1176,10 +1200,11 @@ static int read_log_page(struct ntfs_log *log, u32 vbo,
+ * restart page header. It will stop the first time we find a
+ * valid page header.
+ */
+-static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
++static int log_read_rst(struct ntfs_log *log, bool first,
+ struct restart_info *info)
+ {
+- u32 skip, vbo;
++ u32 skip;
++ u64 vbo;
+ struct RESTART_HDR *r_page = NULL;
+
+ /* Determine which restart area we are looking for. */
+@@ -1192,7 +1217,7 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ }
+
+ /* Loop continuously until we succeed. */
+- for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
++ for (; vbo < log->l_size; vbo = 2 * vbo + skip, skip = 0) {
+ bool usa_error;
+ bool brst, bchk;
+ struct RESTART_AREA *ra;
+@@ -1285,22 +1310,17 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ /*
+ * Ilog_init_pg_hdr - Init @log from restart page header.
+ */
+-static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
+- u32 page_size, u16 major_ver, u16 minor_ver)
++static void log_init_pg_hdr(struct ntfs_log *log, u16 major_ver, u16 minor_ver)
+ {
+- log->sys_page_size = sys_page_size;
+- log->sys_page_mask = sys_page_size - 1;
+- log->page_size = page_size;
+- log->page_mask = page_size - 1;
+- log->page_bits = blksize_bits(page_size);
++ log->sys_page_size = log->page_size;
++ log->sys_page_mask = log->page_mask;
+
+ log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
+ if (!log->clst_per_page)
+ log->clst_per_page = 1;
+
+- log->first_page = major_ver >= 2 ?
+- 0x22 * page_size :
+- ((sys_page_size << 1) + (page_size << 1));
++ log->first_page = major_ver >= 2 ? 0x22 * log->page_size :
++ 4 * log->page_size;
+ log->major_ver = major_ver;
+ log->minor_ver = minor_ver;
+ }
+@@ -1308,12 +1328,11 @@ static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
+ /*
+ * log_create - Init @log in cases when we don't have a restart area to use.
+ */
+-static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
++static void log_create(struct ntfs_log *log, const u64 last_lsn,
+ u32 open_log_count, bool wrapped, bool use_multi_page)
+ {
+- log->l_size = l_size;
+ /* All file offsets must be quadword aligned. */
+- log->file_data_bits = blksize_bits(l_size) - 3;
++ log->file_data_bits = blksize_bits(log->l_size) - 3;
+ log->seq_num_mask = (8 << log->file_data_bits) - 1;
+ log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
+ log->seq_num = (last_lsn >> log->file_data_bits) + 2;
+@@ -2992,7 +3011,7 @@ static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
+ if (is_ext) {
+ attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
+ if (is_attr_compressed(attr))
+- attr->nres.c_unit = COMPRESSION_UNIT;
++ attr->nres.c_unit = NTFS_LZNT_CUNIT;
+
+ attr->nres.run_off =
+ cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
+@@ -3720,10 +3739,10 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ struct ntfs_log *log;
+
+- struct restart_info rst_info, rst_info2;
+- u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
++ u64 rec_lsn, checkpt_lsn = 0, rlsn = 0;
+ struct ATTR_NAME_ENTRY *attr_names = NULL;
+- struct ATTR_NAME_ENTRY *ane;
++ u32 attr_names_bytes = 0;
++ u32 oatbl_bytes = 0;
+ struct RESTART_TABLE *dptbl = NULL;
+ struct RESTART_TABLE *trtbl = NULL;
+ const struct RESTART_TABLE *rt;
+@@ -3738,12 +3757,11 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ struct NTFS_RESTART *rst = NULL;
+ struct lcb *lcb = NULL;
+ struct OPEN_ATTR_ENRTY *oe;
++ struct ATTR_NAME_ENTRY *ane;
+ struct TRANSACTION_ENTRY *tr;
+ struct DIR_PAGE_ENTRY *dp;
+ u32 i, bytes_per_attr_entry;
+- u32 l_size = ni->vfs_inode.i_size;
+- u32 orig_file_size = l_size;
+- u32 page_size, vbo, tail, off, dlen;
++ u32 vbo, tail, off, dlen;
+ u32 saved_len, rec_len, transact_id;
+ bool use_second_page;
+ struct RESTART_AREA *ra2, *ra = NULL;
+@@ -3758,52 +3776,50 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ u16 t16;
+ u32 t32;
+
+- /* Get the size of page. NOTE: To replay we can use default page. */
+-#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
+- page_size = norm_file_page(PAGE_SIZE, &l_size, true);
+-#else
+- page_size = norm_file_page(PAGE_SIZE, &l_size, false);
+-#endif
+- if (!page_size)
+- return -EINVAL;
+-
+ log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS);
+ if (!log)
+ return -ENOMEM;
+
+ log->ni = ni;
+- log->l_size = l_size;
+- log->one_page_buf = kmalloc(page_size, GFP_NOFS);
++ log->l_size = log->orig_file_size = ni->vfs_inode.i_size;
+
++ /* Get the size of page. NOTE: To replay we can use default page. */
++#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
++ log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, true);
++#else
++ log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, false);
++#endif
++ if (!log->page_size) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ log->one_page_buf = kmalloc(log->page_size, GFP_NOFS);
+ if (!log->one_page_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+- log->page_size = page_size;
+- log->page_mask = page_size - 1;
+- log->page_bits = blksize_bits(page_size);
++ log->page_mask = log->page_size - 1;
++ log->page_bits = blksize_bits(log->page_size);
+
+ /* Look for a restart area on the disk. */
+- memset(&rst_info, 0, sizeof(struct restart_info));
+- err = log_read_rst(log, l_size, true, &rst_info);
++ err = log_read_rst(log, true, &log->rst_info);
+ if (err)
+ goto out;
+
+ /* remember 'initialized' */
+- *initialized = rst_info.initialized;
++ *initialized = log->rst_info.initialized;
+
+- if (!rst_info.restart) {
+- if (rst_info.initialized) {
++ if (!log->rst_info.restart) {
++ if (log->rst_info.initialized) {
+ /* No restart area but the file is not initialized. */
+ err = -EINVAL;
+ goto out;
+ }
+
+- log_init_pg_hdr(log, page_size, page_size, 1, 1);
+- log_create(log, l_size, 0, get_random_u32(), false, false);
+-
+- log->ra = ra;
++ log_init_pg_hdr(log, 1, 1);
++ log_create(log, 0, get_random_u32(), false, false);
+
+ ra = log_create_ra(log);
+ if (!ra) {
+@@ -3820,25 +3836,26 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ * If the restart offset above wasn't zero then we won't
+ * look for a second restart.
+ */
+- if (rst_info.vbo)
++ if (log->rst_info.vbo)
+ goto check_restart_area;
+
+- memset(&rst_info2, 0, sizeof(struct restart_info));
+- err = log_read_rst(log, l_size, false, &rst_info2);
++ err = log_read_rst(log, false, &log->rst_info2);
+ if (err)
+ goto out;
+
+ /* Determine which restart area to use. */
+- if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
++ if (!log->rst_info2.restart ||
++ log->rst_info2.last_lsn <= log->rst_info.last_lsn)
+ goto use_first_page;
+
+ use_second_page = true;
+
+- if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
++ if (log->rst_info.chkdsk_was_run &&
++ log->page_size != log->rst_info.vbo) {
+ struct RECORD_PAGE_HDR *sp = NULL;
+ bool usa_error;
+
+- if (!read_log_page(log, page_size, &sp, &usa_error) &&
++ if (!read_log_page(log, log->page_size, &sp, &usa_error) &&
+ sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
+ use_second_page = false;
+ }
+@@ -3846,52 +3863,43 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ }
+
+ if (use_second_page) {
+- kfree(rst_info.r_page);
+- memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
+- rst_info2.r_page = NULL;
++ kfree(log->rst_info.r_page);
++ memcpy(&log->rst_info, &log->rst_info2,
++ sizeof(struct restart_info));
++ log->rst_info2.r_page = NULL;
+ }
+
+ use_first_page:
+- kfree(rst_info2.r_page);
++ kfree(log->rst_info2.r_page);
+
+ check_restart_area:
+ /*
+ * If the restart area is at offset 0, we want
+ * to write the second restart area first.
+ */
+- log->init_ra = !!rst_info.vbo;
++ log->init_ra = !!log->rst_info.vbo;
+
+ /* If we have a valid page then grab a pointer to the restart area. */
+- ra2 = rst_info.valid_page ?
+- Add2Ptr(rst_info.r_page,
+- le16_to_cpu(rst_info.r_page->ra_off)) :
++ ra2 = log->rst_info.valid_page ?
++ Add2Ptr(log->rst_info.r_page,
++ le16_to_cpu(log->rst_info.r_page->ra_off)) :
+ NULL;
+
+- if (rst_info.chkdsk_was_run ||
++ if (log->rst_info.chkdsk_was_run ||
+ (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
+ bool wrapped = false;
+ bool use_multi_page = false;
+ u32 open_log_count;
+
+ /* Do some checks based on whether we have a valid log page. */
+- if (!rst_info.valid_page) {
+- open_log_count = get_random_u32();
+- goto init_log_instance;
+- }
+- open_log_count = le32_to_cpu(ra2->open_log_count);
+-
+- /*
+- * If the restart page size isn't changing then we want to
+- * check how much work we need to do.
+- */
+- if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
+- goto init_log_instance;
++ open_log_count = log->rst_info.valid_page ?
++ le32_to_cpu(ra2->open_log_count) :
++ get_random_u32();
+
+-init_log_instance:
+- log_init_pg_hdr(log, page_size, page_size, 1, 1);
++ log_init_pg_hdr(log, 1, 1);
+
+- log_create(log, l_size, rst_info.last_lsn, open_log_count,
+- wrapped, use_multi_page);
++ log_create(log, log->rst_info.last_lsn, open_log_count, wrapped,
++ use_multi_page);
+
+ ra = log_create_ra(log);
+ if (!ra) {
+@@ -3916,28 +3924,30 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ * use the log file. We must use the system page size instead of the
+ * default size if there is not a clean shutdown.
+ */
+- t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
+- if (page_size != t32) {
+- l_size = orig_file_size;
+- page_size =
+- norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
++ t32 = le32_to_cpu(log->rst_info.r_page->sys_page_size);
++ if (log->page_size != t32) {
++ log->l_size = log->orig_file_size;
++ log->page_size = norm_file_page(t32, &log->l_size,
++ t32 == DefaultLogPageSize);
+ }
+
+- if (page_size != t32 ||
+- page_size != le32_to_cpu(rst_info.r_page->page_size)) {
++ if (log->page_size != t32 ||
++ log->page_size != le32_to_cpu(log->rst_info.r_page->page_size)) {
+ err = -EINVAL;
+ goto out;
+ }
+
++ log->page_mask = log->page_size - 1;
++ log->page_bits = blksize_bits(log->page_size);
++
+ /* If the file size has shrunk then we won't mount it. */
+- if (l_size < le64_to_cpu(ra2->l_size)) {
++ if (log->l_size < le64_to_cpu(ra2->l_size)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+- log_init_pg_hdr(log, page_size, page_size,
+- le16_to_cpu(rst_info.r_page->major_ver),
+- le16_to_cpu(rst_info.r_page->minor_ver));
++ log_init_pg_hdr(log, le16_to_cpu(log->rst_info.r_page->major_ver),
++ le16_to_cpu(log->rst_info.r_page->minor_ver));
+
+ log->l_size = le64_to_cpu(ra2->l_size);
+ log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
+@@ -3945,7 +3955,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ log->seq_num_mask = (8 << log->file_data_bits) - 1;
+ log->last_lsn = le64_to_cpu(ra2->current_lsn);
+ log->seq_num = log->last_lsn >> log->file_data_bits;
+- log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
++ log->ra_off = le16_to_cpu(log->rst_info.r_page->ra_off);
+ log->restart_size = log->sys_page_size - log->ra_off;
+ log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
+ log->ra_size = le16_to_cpu(ra2->ra_len);
+@@ -4045,7 +4055,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ log->current_avail = current_log_avail(log);
+
+ /* Remember which restart area to write first. */
+- log->init_ra = rst_info.vbo;
++ log->init_ra = log->rst_info.vbo;
+
+ process_log:
+ /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */
+@@ -4105,7 +4115,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ log->client_id.seq_num = cr->seq_num;
+ log->client_id.client_idx = client;
+
+- err = read_rst_area(log, &rst, &ra_lsn);
++ err = read_rst_area(log, &rst, &checkpt_lsn);
+ if (err)
+ goto out;
+
+@@ -4114,9 +4124,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+
+ bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
+
+- checkpt_lsn = le64_to_cpu(rst->check_point_start);
+- if (!checkpt_lsn)
+- checkpt_lsn = ra_lsn;
++ if (rst->check_point_start)
++ checkpt_lsn = le64_to_cpu(rst->check_point_start);
+
+ /* Allocate and Read the Transaction Table. */
+ if (!rst->transact_table_len)
+@@ -4330,23 +4339,43 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ lcb = NULL;
+
+ check_attribute_names2:
+- if (!rst->attr_names_len)
+- goto trace_attribute_table;
+-
+- ane = attr_names;
+- if (!oatbl)
+- goto trace_attribute_table;
+- while (ane->off) {
+- /* TODO: Clear table on exit! */
+- oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
+- t16 = le16_to_cpu(ane->name_bytes);
+- oe->name_len = t16 / sizeof(short);
+- oe->ptr = ane->name;
+- oe->is_attr_name = 2;
+- ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
+- }
+-
+-trace_attribute_table:
++ if (attr_names && oatbl) {
++ off = 0;
++ for (;;) {
++ /* Check we can use attribute name entry 'ane'. */
++ static_assert(sizeof(*ane) == 4);
++ if (off + sizeof(*ane) > attr_names_bytes) {
++ /* just ignore the rest. */
++ break;
++ }
++
++ ane = Add2Ptr(attr_names, off);
++ t16 = le16_to_cpu(ane->off);
++ if (!t16) {
++ /* this is the only valid exit. */
++ break;
++ }
++
++ /* Check we can use open attribute entry 'oe'. */
++ if (t16 + sizeof(*oe) > oatbl_bytes) {
++ /* just ignore the rest. */
++ break;
++ }
++
++ /* TODO: Clear table on exit! */
++ oe = Add2Ptr(oatbl, t16);
++ t16 = le16_to_cpu(ane->name_bytes);
++ off += t16 + sizeof(*ane);
++ if (off > attr_names_bytes) {
++ /* just ignore the rest. */
++ break;
++ }
++ oe->name_len = t16 / sizeof(short);
++ oe->ptr = ane->name;
++ oe->is_attr_name = 2;
++ }
++ }
++
+ /*
+ * If the checkpt_lsn is zero, then this is a freshly
+ * formatted disk and we have no work to do.
+@@ -5189,7 +5218,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ kfree(oatbl);
+ kfree(dptbl);
+ kfree(attr_names);
+- kfree(rst_info.r_page);
++ kfree(log->rst_info.r_page);
+
+ kfree(ra);
+ kfree(log->one_page_buf);
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index fbfe21dbb42597..e19b13db4f91ee 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -522,7 +522,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
+ ni->mi.dirty = true;
+
+ /* Step 2: Resize $MFT::BITMAP. */
+- new_bitmap_bytes = bitmap_size(new_mft_total);
++ new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
+
+ err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
+ new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
+@@ -853,7 +853,8 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
+ /*
+ * sb can be NULL here. In this case sbi->flags should be 0 too.
+ */
+- if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
++ if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
++ unlikely(ntfs3_forced_shutdown(sb)))
+ return;
+
+ blocksize = sb->s_blocksize;
+@@ -1006,6 +1007,30 @@ static inline __le32 security_hash(const void *sd, size_t bytes)
+ return cpu_to_le32(hash);
+ }
+
++/*
++ * simple wrapper for sb_bread_unmovable.
++ */
++struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
++{
++ struct ntfs_sb_info *sbi = sb->s_fs_info;
++ struct buffer_head *bh;
++
++ if (unlikely(block >= sbi->volume.blocks)) {
++ /* prevent generic message "attempt to access beyond end of device" */
++ ntfs_err(sb, "try to read out of volume at offset 0x%llx",
++ (u64)block << sb->s_blocksize_bits);
++ return NULL;
++ }
++
++ bh = sb_bread_unmovable(sb, block);
++ if (bh)
++ return bh;
++
++ ntfs_err(sb, "failed to read volume at offset 0x%llx",
++ (u64)block << sb->s_blocksize_bits);
++ return NULL;
++}
++
+ int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
+ {
+ struct block_device *bdev = sb->s_bdev;
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index cf92b2433f7a75..9089c58a005ce1 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -978,7 +978,7 @@ static struct indx_node *indx_new(struct ntfs_index *indx,
+ hdr->used =
+ cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64));
+ de_set_vbn_le(e, *sub_vbn);
+- hdr->flags = 1;
++ hdr->flags = NTFS_INDEX_HDR_HAS_SUBNODES;
+ } else {
+ e->size = cpu_to_le16(sizeof(struct NTFS_DE));
+ hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE));
+@@ -1456,13 +1456,13 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+
+ alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size);
+
+- err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name,
+- in->name_len, &bitmap, NULL, NULL);
++ err = ni_insert_resident(ni, ntfs3_bitmap_size(1), ATTR_BITMAP,
++ in->name, in->name_len, &bitmap, NULL, NULL);
+ if (err)
+ goto out2;
+
+ if (in->name == I30_NAME) {
+- ni->vfs_inode.i_size = data_size;
++ i_size_write(&ni->vfs_inode, data_size);
+ inode_set_bytes(&ni->vfs_inode, alloc_size);
+ }
+
+@@ -1518,8 +1518,9 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ if (bmp) {
+ /* Increase bitmap. */
+ err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
+- &indx->bitmap_run, bitmap_size(bit + 1),
+- NULL, true, NULL);
++ &indx->bitmap_run,
++ ntfs3_bitmap_size(bit + 1), NULL, true,
++ NULL);
+ if (err)
+ goto out1;
+ }
+@@ -1533,6 +1534,11 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ goto out1;
+ }
+
++ if (data_size <= le64_to_cpu(alloc->nres.data_size)) {
++ /* Reuse index. */
++ goto out;
++ }
++
+ /* Increase allocation. */
+ err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
+ &indx->alloc_run, data_size, &data_size, true,
+@@ -1544,8 +1550,9 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ }
+
+ if (in->name == I30_NAME)
+- ni->vfs_inode.i_size = data_size;
++ i_size_write(&ni->vfs_inode, data_size);
+
++out:
+ *vbn = bit << indx->idx2vbn_bits;
+
+ return 0;
+@@ -1676,7 +1683,7 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
+ e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
+ e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST;
+
+- hdr->flags = 1;
++ hdr->flags = NTFS_INDEX_HDR_HAS_SUBNODES;
+ hdr->used = hdr->total =
+ cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr));
+
+@@ -2090,9 +2097,9 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
+ return err;
+
+ if (in->name == I30_NAME)
+- ni->vfs_inode.i_size = new_data;
++ i_size_write(&ni->vfs_inode, new_data);
+
+- bpb = bitmap_size(bit);
++ bpb = ntfs3_bitmap_size(bit);
+ if (bpb * 8 == nbits)
+ return 0;
+
+@@ -2576,7 +2583,7 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
+ err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
+ &indx->alloc_run, 0, NULL, false, NULL);
+ if (in->name == I30_NAME)
+- ni->vfs_inode.i_size = 0;
++ i_size_write(&ni->vfs_inode, 0);
+
+ err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
+ false, NULL);
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index d6d021e19aaa23..1545262995da21 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -37,7 +37,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ bool is_dir;
+ unsigned long ino = inode->i_ino;
+ u32 rp_fa = 0, asize, t32;
+- u16 roff, rsize, names = 0;
++ u16 roff, rsize, names = 0, links = 0;
+ const struct ATTR_FILE_NAME *fname = NULL;
+ const struct INDEX_ROOT *root;
+ struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
+@@ -198,11 +198,12 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ rsize < SIZEOF_ATTRIBUTE_FILENAME)
+ goto out;
+
++ names += 1;
+ fname = Add2Ptr(attr, roff);
+ if (fname->type == FILE_NAME_DOS)
+ goto next_attr;
+
+- names += 1;
++ links += 1;
+ if (name && name->len == fname->name_len &&
+ !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
+ NULL, false))
+@@ -410,7 +411,6 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ goto out;
+
+ if (!is_match && name) {
+- /* Reuse rec as buffer for ascii name. */
+ err = -ENOENT;
+ goto out;
+ }
+@@ -425,11 +425,12 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+
+ if (names != le16_to_cpu(rec->hard_links)) {
+ /* Correct minor error on the fly. Do not mark inode as dirty. */
++ ntfs_inode_warn(inode, "Correct links count -> %u.", names);
+ rec->hard_links = cpu_to_le16(names);
+ ni->mi.dirty = true;
+ }
+
+- set_nlink(inode, names);
++ set_nlink(inode, links);
+
+ if (S_ISDIR(mode)) {
+ ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
+@@ -570,13 +571,18 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
+ clear_buffer_uptodate(bh);
+
+ if (is_resident(ni)) {
+- ni_lock(ni);
+- err = attr_data_read_resident(ni, &folio->page);
+- ni_unlock(ni);
+-
+- if (!err)
+- set_buffer_uptodate(bh);
++ bh->b_blocknr = RESIDENT_LCN;
+ bh->b_size = block_size;
++ if (!folio) {
++ err = 0;
++ } else {
++ ni_lock(ni);
++ err = attr_data_read_resident(ni, &folio->page);
++ ni_unlock(ni);
++
++ if (!err)
++ set_buffer_uptodate(bh);
++ }
+ return err;
+ }
+
+@@ -851,9 +857,13 @@ static int ntfs_resident_writepage(struct folio *folio,
+ struct writeback_control *wbc, void *data)
+ {
+ struct address_space *mapping = data;
+- struct ntfs_inode *ni = ntfs_i(mapping->host);
++ struct inode *inode = mapping->host;
++ struct ntfs_inode *ni = ntfs_i(inode);
+ int ret;
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ ni_lock(ni);
+ ret = attr_data_write_resident(ni, &folio->page);
+ ni_unlock(ni);
+@@ -867,7 +877,12 @@ static int ntfs_resident_writepage(struct folio *folio,
+ static int ntfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+ {
+- if (is_resident(ntfs_i(mapping->host)))
++ struct inode *inode = mapping->host;
++
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
++ if (is_resident(ntfs_i(inode)))
+ return write_cache_pages(mapping, wbc, ntfs_resident_writepage,
+ mapping);
+ return mpage_writepages(mapping, wbc, ntfs_get_block);
+@@ -887,6 +902,9 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
+ struct inode *inode = mapping->host;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ *pagep = NULL;
+ if (is_resident(ni)) {
+ struct page *page =
+@@ -971,7 +989,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
+ }
+
+ if (pos + err > inode->i_size) {
+- inode->i_size = pos + err;
++ i_size_write(inode, pos + err);
+ dirty = true;
+ }
+
+@@ -1303,6 +1321,11 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ goto out1;
+ }
+
++ if (unlikely(ntfs3_forced_shutdown(sb))) {
++ err = -EIO;
++ goto out2;
++ }
++
+ /* Mark rw ntfs as dirty. it will be cleared at umount. */
+ ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
+
+@@ -1475,7 +1498,7 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
+ attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
+ attr->flags = ATTR_FLAG_COMPRESSED;
+- attr->nres.c_unit = COMPRESSION_UNIT;
++ attr->nres.c_unit = NTFS_LZNT_CUNIT;
+ asize = SIZEOF_NONRESIDENT_EX + 8;
+ } else {
+ attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
+@@ -1629,7 +1652,9 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ * The packed size of extended attribute is stored in direntry too.
+ * 'fname' here points to inside new_de.
+ */
+- ntfs_save_wsl_perm(inode, &fname->dup.ea_size);
++ err = ntfs_save_wsl_perm(inode, &fname->dup.ea_size);
++ if (err)
++ goto out6;
+
+ /*
+ * update ea_size in file_name attribute too.
+@@ -1671,6 +1696,12 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ goto out2;
+
+ out6:
++ attr = ni_find_attr(ni, NULL, NULL, ATTR_EA, NULL, 0, NULL, NULL);
++ if (attr && attr->non_res) {
++ /* Delete ATTR_EA, if non-resident. */
++ attr_set_size(ni, ATTR_EA, NULL, 0, NULL, 0, NULL, false, NULL);
++ }
++
+ if (rp_inserted)
+ ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
+
+@@ -2094,5 +2125,6 @@ const struct address_space_operations ntfs_aops = {
+ const struct address_space_operations ntfs_aops_cmpr = {
+ .read_folio = ntfs_read_folio,
+ .readahead = ntfs_readahead,
++ .dirty_folio = block_dirty_folio,
+ };
+ // clang-format on
+diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
+index eedacf94edd805..bcdc1ec90a96a8 100644
+--- a/fs/ntfs3/namei.c
++++ b/fs/ntfs3/namei.c
+@@ -181,6 +181,9 @@ static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
+ struct ntfs_inode *ni = ntfs_i(dir);
+ int err;
+
++ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
++ return -EIO;
++
+ ni_lock_dir(ni);
+
+ err = ntfs_unlink_inode(dir, dentry);
+@@ -199,6 +202,9 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
+ u32 size = strlen(symname);
+ struct inode *inode;
+
++ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
++ return -EIO;
++
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777, 0,
+ symname, size, NULL);
+
+@@ -227,6 +233,9 @@ static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
+ struct ntfs_inode *ni = ntfs_i(dir);
+ int err;
+
++ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
++ return -EIO;
++
+ ni_lock_dir(ni);
+
+ err = ntfs_unlink_inode(dir, dentry);
+@@ -264,6 +273,9 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
+ 1024);
+ static_assert(PATH_MAX >= 4 * 1024);
+
++ if (unlikely(ntfs3_forced_shutdown(sb)))
++ return -EIO;
++
+ if (flags & ~RENAME_NOREPLACE)
+ return -EINVAL;
+
+@@ -489,7 +501,7 @@ static int ntfs_d_hash(const struct dentry *dentry, struct qstr *name)
+ /*
+ * Try slow way with current upcase table
+ */
+- uni = __getname();
++ uni = kmem_cache_alloc(names_cachep, GFP_NOWAIT);
+ if (!uni)
+ return -ENOMEM;
+
+@@ -511,7 +523,7 @@ static int ntfs_d_hash(const struct dentry *dentry, struct qstr *name)
+ err = 0;
+
+ out:
+- __putname(uni);
++ kmem_cache_free(names_cachep, uni);
+ return err;
+ }
+
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index 86aecbb01a92f2..964e27c7b90164 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -59,7 +59,7 @@ struct GUID {
+ struct cpu_str {
+ u8 len;
+ u8 unused;
+- u16 name[10];
++ u16 name[];
+ };
+
+ struct le_str {
+@@ -82,9 +82,6 @@ typedef u32 CLST;
+ #define RESIDENT_LCN ((CLST)-2)
+ #define COMPRESSED_LCN ((CLST)-3)
+
+-#define COMPRESSION_UNIT 4
+-#define COMPRESS_MAX_CLUSTER 0x1000
+-
+ enum RECORD_NUM {
+ MFT_REC_MFT = 0,
+ MFT_REC_MIRR = 1,
+@@ -523,12 +520,10 @@ struct ATTR_LIST_ENTRY {
+ __le64 vcn; // 0x08: Starting VCN of this attribute.
+ struct MFT_REF ref; // 0x10: MFT record number with attribute.
+ __le16 id; // 0x18: struct ATTRIB ID.
+- __le16 name[3]; // 0x1A: Just to align. To get real name can use bNameOffset.
++ __le16 name[]; // 0x1A: Just to align. To get real name can use name_off.
+
+ }; // sizeof(0x20)
+
+-static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
+-
+ static inline u32 le_size(u8 name_len)
+ {
+ return ALIGN(offsetof(struct ATTR_LIST_ENTRY, name) +
+@@ -698,14 +693,15 @@ static inline bool de_has_vcn_ex(const struct NTFS_DE *e)
+ offsetof(struct ATTR_FILE_NAME, name) + \
+ NTFS_NAME_LEN * sizeof(short), 8)
+
++#define NTFS_INDEX_HDR_HAS_SUBNODES cpu_to_le32(1)
++
+ struct INDEX_HDR {
+ __le32 de_off; // 0x00: The offset from the start of this structure
+ // to the first NTFS_DE.
+ __le32 used; // 0x04: The size of this structure plus all
+ // entries (quad-word aligned).
+ __le32 total; // 0x08: The allocated size of for this structure plus all entries.
+- u8 flags; // 0x0C: 0x00 = Small directory, 0x01 = Large directory.
+- u8 res[3];
++ __le32 flags; // 0x0C: 0x00 = Small directory, 0x01 = Large directory.
+
+ //
+ // de_off + used <= total
+@@ -753,7 +749,7 @@ static inline struct NTFS_DE *hdr_next_de(const struct INDEX_HDR *hdr,
+
+ static inline bool hdr_has_subnode(const struct INDEX_HDR *hdr)
+ {
+- return hdr->flags & 1;
++ return hdr->flags & NTFS_INDEX_HDR_HAS_SUBNODES;
+ }
+
+ struct INDEX_BUFFER {
+@@ -773,7 +769,7 @@ static inline bool ib_is_empty(const struct INDEX_BUFFER *ib)
+
+ static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib)
+ {
+- return !(ib->ihdr.flags & 1);
++ return !(ib->ihdr.flags & NTFS_INDEX_HDR_HAS_SUBNODES);
+ }
+
+ /* Index root structure ( 0x90 ). */
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 0e6a2777870c3e..28788cf6ba4074 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -61,6 +61,8 @@ enum utf16_endian;
+
+ /* sbi->flags */
+ #define NTFS_FLAGS_NODISCARD 0x00000001
++/* ntfs in shutdown state. */
++#define NTFS_FLAGS_SHUTDOWN_BIT 0x00000002 /* == 4*/
+ /* Set when LogFile is replaying. */
+ #define NTFS_FLAGS_LOG_REPLAYING 0x00000008
+ /* Set when we changed first MFT's which copy must be updated in $MftMirr. */
+@@ -226,7 +228,7 @@ struct ntfs_sb_info {
+ u64 maxbytes; // Maximum size for normal files.
+ u64 maxbytes_sparse; // Maximum size for sparse file.
+
+- u32 flags; // See NTFS_FLAGS_XXX.
++ unsigned long flags; // See NTFS_FLAGS_
+
+ CLST zone_max; // Maximum MFT zone length in clusters
+ CLST bad_clusters; // The count of marked bad clusters.
+@@ -473,7 +475,7 @@ bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
+ int al_update(struct ntfs_inode *ni, int sync);
+ static inline size_t al_aligned(size_t size)
+ {
+- return (size + 1023) & ~(size_t)1023;
++ return size_add(size, 1023) & ~(size_t)1023;
+ }
+
+ /* Globals from bitfunc.c */
+@@ -584,6 +586,7 @@ bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
+ int log_replay(struct ntfs_inode *ni, bool *initialized);
+
+ /* Globals from fsntfs.c */
++struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block);
+ bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
+ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
+ bool simple);
+@@ -961,9 +964,9 @@ static inline bool run_is_empty(struct runs_tree *run)
+ }
+
+ /* NTFS uses quad aligned bitmaps. */
+-static inline size_t bitmap_size(size_t bits)
++static inline size_t ntfs3_bitmap_size(size_t bits)
+ {
+- return ALIGN((bits + 7) >> 3, 8);
++ return BITS_TO_U64(bits) * sizeof(u64);
+ }
+
+ #define _100ns2seconds 10000000
+@@ -999,6 +1002,11 @@ static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
+ return sb->s_fs_info;
+ }
+
++static inline int ntfs3_forced_shutdown(struct super_block *sb)
++{
++ return test_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags);
++}
++
+ /*
+ * ntfs_up_cluster - Align up on cluster boundary.
+ */
+@@ -1025,19 +1033,6 @@ static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
+ return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+ }
+
+-static inline struct buffer_head *ntfs_bread(struct super_block *sb,
+- sector_t block)
+-{
+- struct buffer_head *bh = sb_bread(sb, block);
+-
+- if (bh)
+- return bh;
+-
+- ntfs_err(sb, "failed to read volume at offset 0x%llx",
+- (u64)block << sb->s_blocksize_bits);
+- return NULL;
+-}
+-
+ static inline struct ntfs_inode *ntfs_i(struct inode *inode)
+ {
+ return container_of(inode, struct ntfs_inode, vfs_inode);
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index 53629b1f65e995..6c76503edc200d 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -279,7 +279,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ if (t16 > asize)
+ return NULL;
+
+- if (t16 + le32_to_cpu(attr->res.data_size) > asize)
++ if (le32_to_cpu(attr->res.data_size) > asize - t16)
+ return NULL;
+
+ t32 = sizeof(short) * attr->name_len;
+@@ -534,9 +534,14 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ if (aoff + asize > used)
+ return false;
+
+- if (ni && is_attr_indexed(attr)) {
+- le16_add_cpu(&ni->mi.mrec->hard_links, -1);
+- ni->mi.dirty = true;
++ if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
++ u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
++ if (!links) {
++ /* minor error. Not critical. */
++ } else {
++ ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
++ ni->mi.dirty = true;
++ }
+ }
+
+ used -= asize;
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index f763e3256ccc1b..c14b55cdea85c2 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -276,7 +276,7 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_string("iocharset", Opt_iocharset),
+ fsparam_flag_no("prealloc", Opt_prealloc),
+- fsparam_flag_no("nocase", Opt_nocase),
++ fsparam_flag_no("case", Opt_nocase),
+ {}
+ };
+ // clang-format on
+@@ -463,7 +463,7 @@ static int ntfs3_volinfo(struct seq_file *m, void *o)
+ struct super_block *sb = m->private;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+- seq_printf(m, "ntfs%d.%d\n%u\n%zu\n\%zu\n%zu\n%s\n%s\n",
++ seq_printf(m, "ntfs%d.%d\n%u\n%zu\n%zu\n%zu\n%s\n%s\n",
+ sbi->volume.major_ver, sbi->volume.minor_ver,
+ sbi->cluster_size, sbi->used.bitmap.nbits,
+ sbi->mft.bitmap.nbits,
+@@ -625,7 +625,7 @@ static void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
+ {
+ kfree(sbi->new_rec);
+ kvfree(ntfs_put_shared(sbi->upcase));
+- kfree(sbi->def_table);
++ kvfree(sbi->def_table);
+ kfree(sbi->compress.lznt);
+ #ifdef CONFIG_NTFS3_LZX_XPRESS
+ xpress_free_decompressor(sbi->compress.xpress);
+@@ -714,6 +714,14 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
+ return 0;
+ }
+
++/*
++ * ntfs_shutdown - super_operations::shutdown
++ */
++static void ntfs_shutdown(struct super_block *sb)
++{
++ set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags);
++}
++
+ /*
+ * ntfs_sync_fs - super_operations::sync_fs
+ */
+@@ -724,6 +732,9 @@ static int ntfs_sync_fs(struct super_block *sb, int wait)
+ struct ntfs_inode *ni;
+ struct inode *inode;
+
++ if (unlikely(ntfs3_forced_shutdown(sb)))
++ return -EIO;
++
+ ni = sbi->security.ni;
+ if (ni) {
+ inode = &ni->vfs_inode;
+@@ -763,6 +774,7 @@ static const struct super_operations ntfs_sops = {
+ .put_super = ntfs_put_super,
+ .statfs = ntfs_statfs,
+ .show_options = ntfs_show_options,
++ .shutdown = ntfs_shutdown,
+ .sync_fs = ntfs_sync_fs,
+ .write_inode = ntfs3_write_inode,
+ };
+@@ -865,6 +877,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ u16 fn, ao;
+ u8 cluster_bits;
+ u32 boot_off = 0;
++ sector_t boot_block = 0;
+ const char *hint = "Primary boot";
+
+ /* Save original dev_size. Used with alternative boot. */
+@@ -872,11 +885,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+
+ sbi->volume.blocks = dev_size >> PAGE_SHIFT;
+
+- bh = ntfs_bread(sb, 0);
++read_boot:
++ bh = ntfs_bread(sb, boot_block);
+ if (!bh)
+- return -EIO;
++ return boot_block ? -EINVAL : -EIO;
+
+-check_boot:
+ err = -EINVAL;
+
+ /* Corrupted image; do not read OOB */
+@@ -1107,26 +1120,24 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ }
+
+ out:
+- if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) {
++ brelse(bh);
++
++ if (err == -EINVAL && !boot_block && dev_size0 > PAGE_SHIFT) {
+ u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
+ u64 lbo = dev_size0 - sizeof(*boot);
+
+- /*
+- * Try alternative boot (last sector)
+- */
+- brelse(bh);
+-
+- sb_set_blocksize(sb, block_size);
+- bh = ntfs_bread(sb, lbo >> blksize_bits(block_size));
+- if (!bh)
+- return -EINVAL;
+-
++ boot_block = lbo >> blksize_bits(block_size);
+ boot_off = lbo & (block_size - 1);
+- hint = "Alternative boot";
+- dev_size = dev_size0; /* restore original size. */
+- goto check_boot;
++ if (boot_block && block_size >= boot_off + sizeof(*boot)) {
++ /*
++ * Try alternative boot (last sector)
++ */
++ sb_set_blocksize(sb, block_size);
++ hint = "Alternative boot";
++ dev_size = dev_size0; /* restore original size. */
++ goto read_boot;
++ }
+ }
+- brelse(bh);
+
+ return err;
+ }
+@@ -1330,7 +1341,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+
+ /* Check bitmap boundary. */
+ tt = sbi->used.bitmap.nbits;
+- if (inode->i_size < bitmap_size(tt)) {
++ if (inode->i_size < ntfs3_bitmap_size(tt)) {
+ ntfs_err(sb, "$Bitmap is corrupted.");
+ err = -EINVAL;
+ goto put_inode_out;
+@@ -1793,8 +1804,6 @@ static int __init init_ntfs_fs(void)
+ {
+ int err;
+
+- pr_info("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
+-
+ if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL))
+ pr_info("ntfs3: Enabled Linux POSIX ACLs support\n");
+ if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER))
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 4920548192a0cf..72bceb8cd164b6 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -219,6 +219,12 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ if (!ea->name_len)
+ break;
+
++ if (ea->name_len > ea_size) {
++ ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++ err = -EINVAL; /* corrupted fs */
++ break;
++ }
++
+ if (buffer) {
+ /* Check if we can use field ea->name */
+ if (off + ea_size > size)
+@@ -744,6 +750,9 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
+ int err;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ /* Dispatch request. */
+ if (!strcmp(name, SYSTEM_DOS_ATTRIB)) {
+ /* system.dos_attrib */
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index e75137a8e7cb40..62464d194da3f6 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -193,8 +193,8 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
+ inode->i_mode = new_mode;
+ inode_set_ctime_current(inode);
+ di->i_mode = cpu_to_le16(inode->i_mode);
+- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
++ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
++ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ocfs2_update_inode_fsync_trans(handle, inode, 0);
+
+ ocfs2_journal_dirty(handle, di_bh);
+diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
+index aef58f1395c870..f0937902f7b46e 100644
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -7436,10 +7436,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
+ }
+
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+
+- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
++ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
++ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+
+ ocfs2_update_inode_fsync_trans(handle, inode, 1);
+ ocfs2_journal_dirty(handle, di_bh);
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 0fdba30740ab52..315f7c2f6a02c1 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -156,9 +156,8 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
+ err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
+ &ext_flags);
+ if (err) {
+- mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
+- "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
+- (unsigned long long)p_blkno);
++ mlog(ML_ERROR, "get_blocks() failed, inode: 0x%p, "
++ "block: %llu\n", inode, (unsigned long long)iblock);
+ goto bail;
+ }
+
+@@ -2048,9 +2047,9 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
+ }
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ di->i_size = cpu_to_le64((u64)i_size_read(inode));
+- inode->i_mtime = inode_set_ctime_current(inode);
+- di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
+- di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
++ di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
++ di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
+ if (handle)
+ ocfs2_update_inode_fsync_trans(handle, inode, 1);
+ }
+@@ -2370,6 +2369,11 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
+ }
+
+ list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
++ ret = ocfs2_assure_trans_credits(handle, credits);
++ if (ret < 0) {
++ mlog_errno(ret);
++ break;
++ }
+ ret = ocfs2_mark_extent_written(inode, &et, handle,
+ ue->ue_cpos, 1,
+ ue->ue_phys,
+diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
+index 196638a22b48a6..45ca3cb7c0974c 100644
+--- a/fs/ocfs2/buffer_head_io.c
++++ b/fs/ocfs2/buffer_head_io.c
+@@ -235,7 +235,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
+ if (bhs[i] == NULL) {
+ bhs[i] = sb_getblk(sb, block++);
+ if (bhs[i] == NULL) {
+- ocfs2_metadata_cache_io_unlock(ci);
+ status = -ENOMEM;
+ mlog_errno(status);
+ /* Don't forget to put previous bh! */
+@@ -389,7 +388,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
+ /* Always set the buffer in the cache, even if it was
+ * a forced read, or read-ahead which hasn't yet
+ * completed. */
+- ocfs2_set_buffer_uptodate(ci, bh);
++ if (bh)
++ ocfs2_set_buffer_uptodate(ci, bh);
+ }
+ ocfs2_metadata_cache_io_unlock(ci);
+
+diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
+index 8b123d543e6e29..429c22f911fdae 100644
+--- a/fs/ocfs2/dir.c
++++ b/fs/ocfs2/dir.c
+@@ -294,13 +294,16 @@ static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
+ * bh passed here can be an inode block or a dir data block, depending
+ * on the inode inline data flag.
+ */
+-static int ocfs2_check_dir_entry(struct inode * dir,
+- struct ocfs2_dir_entry * de,
+- struct buffer_head * bh,
++static int ocfs2_check_dir_entry(struct inode *dir,
++ struct ocfs2_dir_entry *de,
++ struct buffer_head *bh,
++ char *buf,
++ unsigned int size,
+ unsigned long offset)
+ {
+ const char *error_msg = NULL;
+ const int rlen = le16_to_cpu(de->rec_len);
++ const unsigned long next_offset = ((char *) de - buf) + rlen;
+
+ if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
+ error_msg = "rec_len is smaller than minimal";
+@@ -308,9 +311,11 @@ static int ocfs2_check_dir_entry(struct inode * dir,
+ error_msg = "rec_len % 4 != 0";
+ else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
+ error_msg = "rec_len is too small for name_len";
+- else if (unlikely(
+- ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
+- error_msg = "directory entry across blocks";
++ else if (unlikely(next_offset > size))
++ error_msg = "directory entry overrun";
++ else if (unlikely(next_offset > size - OCFS2_DIR_REC_LEN(1)) &&
++ next_offset != size)
++ error_msg = "directory entry too close to end";
+
+ if (unlikely(error_msg != NULL))
+ mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
+@@ -352,16 +357,17 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
+ de_buf = first_de;
+ dlimit = de_buf + bytes;
+
+- while (de_buf < dlimit) {
++ while (de_buf < dlimit - OCFS2_DIR_MEMBER_LEN) {
+ /* this code is executed quadratically often */
+ /* do minimal checking `by hand' */
+
+ de = (struct ocfs2_dir_entry *) de_buf;
+
+- if (de_buf + namelen <= dlimit &&
++ if (de->name + namelen <= dlimit &&
+ ocfs2_match(namelen, name, de)) {
+ /* found a match - just to be sure, do a full check */
+- if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
++ if (!ocfs2_check_dir_entry(dir, de, bh, first_de,
++ bytes, offset)) {
+ ret = -1;
+ goto bail;
+ }
+@@ -1138,7 +1144,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
+ pde = NULL;
+ de = (struct ocfs2_dir_entry *) first_de;
+ while (i < bytes) {
+- if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
++ if (!ocfs2_check_dir_entry(dir, de, bh, first_de, bytes, i)) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+@@ -1638,7 +1644,8 @@ int __ocfs2_add_entry(handle_t *handle,
+ /* These checks should've already been passed by the
+ * prepare function, but I guess we can leave them
+ * here anyway. */
+- if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
++ if (!ocfs2_check_dir_entry(dir, de, insert_bh, data_start,
++ size, offset)) {
+ retval = -ENOENT;
+ goto bail;
+ }
+@@ -1658,7 +1665,8 @@ int __ocfs2_add_entry(handle_t *handle,
+ offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
+
+ if (ocfs2_dirent_would_fit(de, rec_len)) {
+- dir->i_mtime = inode_set_ctime_current(dir);
++ inode_set_mtime_to_ts(dir,
++ inode_set_ctime_current(dir));
+ retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
+ if (retval < 0) {
+ mlog_errno(retval);
+@@ -1776,7 +1784,8 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
+ }
+
+ de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
+- if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
++ if (!ocfs2_check_dir_entry(inode, de, di_bh, (char *)data->id_data,
++ i_size_read(inode), ctx->pos)) {
+ /* On error, skip the f_pos to the end. */
+ ctx->pos = i_size_read(inode);
+ break;
+@@ -1869,7 +1878,8 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
+ while (ctx->pos < i_size_read(inode)
+ && offset < sb->s_blocksize) {
+ de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
+- if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
++ if (!ocfs2_check_dir_entry(inode, de, bh, bh->b_data,
++ sb->s_blocksize, offset)) {
+ /* On error, skip the f_pos to the
+ next block. */
+ ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
+@@ -2962,11 +2972,11 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
+ ocfs2_dinode_new_extent_list(dir, di);
+
+ i_size_write(dir, sb->s_blocksize);
+- dir->i_mtime = inode_set_ctime_current(dir);
++ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+
+ di->i_size = cpu_to_le64(sb->s_blocksize);
+- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(dir).tv_sec);
+- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(dir).tv_nsec);
++ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(dir));
++ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(dir));
+ ocfs2_update_inode_fsync_trans(handle, dir, 1);
+
+ /*
+@@ -3341,7 +3351,7 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
+ struct super_block *sb = dir->i_sb;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_dir_entry *de, *last_de = NULL;
+- char *de_buf, *limit;
++ char *first_de, *de_buf, *limit;
+ unsigned long offset = 0;
+ unsigned int rec_len, new_rec_len, free_space;
+
+@@ -3354,14 +3364,16 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
+ else
+ free_space = dir->i_sb->s_blocksize - i_size_read(dir);
+
+- de_buf = di->id2.i_data.id_data;
++ first_de = di->id2.i_data.id_data;
++ de_buf = first_de;
+ limit = de_buf + i_size_read(dir);
+ rec_len = OCFS2_DIR_REC_LEN(namelen);
+
+ while (de_buf < limit) {
+ de = (struct ocfs2_dir_entry *)de_buf;
+
+- if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
++ if (!ocfs2_check_dir_entry(dir, de, di_bh, first_de,
++ i_size_read(dir), offset)) {
+ ret = -ENOENT;
+ goto out;
+ }
+@@ -3443,7 +3455,8 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
+ /* move to next block */
+ de = (struct ocfs2_dir_entry *) bh->b_data;
+ }
+- if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
++ if (!ocfs2_check_dir_entry(dir, de, bh, bh->b_data, blocksize,
++ offset)) {
+ status = -ENOENT;
+ goto bail;
+ }
+diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
+index 81265123ce6ce5..9b57d012fd5cfe 100644
+--- a/fs/ocfs2/dlmfs/dlmfs.c
++++ b/fs/ocfs2/dlmfs/dlmfs.c
+@@ -337,7 +337,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
+ if (inode) {
+ inode->i_ino = get_next_ino();
+ inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
+- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
++ simple_inode_init_ts(inode);
+ inc_nlink(inode);
+
+ inode->i_fop = &simple_dir_operations;
+@@ -360,7 +360,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
+
+ inode->i_ino = get_next_ino();
+ inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
+- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
++ simple_inode_init_ts(inode);
+
+ ip = DLMFS_I(inode);
+ ip->ip_conn = DLMFS_I(parent)->ip_conn;
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index c3e2961ee5db3c..64a6ef638495c2 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2162,7 +2162,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
+ struct ocfs2_meta_lvb *lvb;
+- struct timespec64 ctime = inode_get_ctime(inode);
++ struct timespec64 ts;
+
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+
+@@ -2183,12 +2183,12 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
+ lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
+ lvb->lvb_imode = cpu_to_be16(inode->i_mode);
+ lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
+- lvb->lvb_iatime_packed =
+- cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
+- lvb->lvb_ictime_packed =
+- cpu_to_be64(ocfs2_pack_timespec(&ctime));
+- lvb->lvb_imtime_packed =
+- cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
++ ts = inode_get_atime(inode);
++ lvb->lvb_iatime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
++ ts = inode_get_ctime(inode);
++ lvb->lvb_ictime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
++ ts = inode_get_mtime(inode);
++ lvb->lvb_imtime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
+ lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
+ lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
+ lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
+@@ -2209,7 +2209,7 @@ static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
+ struct ocfs2_meta_lvb *lvb;
+- struct timespec64 ctime;
++ struct timespec64 ts;
+
+ mlog_meta_lvb(0, lockres);
+
+@@ -2236,13 +2236,12 @@ static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
+ i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
+ inode->i_mode = be16_to_cpu(lvb->lvb_imode);
+ set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
+- ocfs2_unpack_timespec(&inode->i_atime,
+- be64_to_cpu(lvb->lvb_iatime_packed));
+- ocfs2_unpack_timespec(&inode->i_mtime,
+- be64_to_cpu(lvb->lvb_imtime_packed));
+- ocfs2_unpack_timespec(&ctime,
+- be64_to_cpu(lvb->lvb_ictime_packed));
+- inode_set_ctime_to_ts(inode, ctime);
++ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_iatime_packed));
++ inode_set_atime_to_ts(inode, ts);
++ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_imtime_packed));
++ inode_set_mtime_to_ts(inode, ts);
++ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_ictime_packed));
++ inode_set_ctime_to_ts(inode, ts);
+ spin_unlock(&oi->ip_lock);
+ return 0;
+ }
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index c45596c25c6653..8bbe4a2b48a2af 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -233,16 +233,18 @@ int ocfs2_should_update_atime(struct inode *inode,
+
+ if (vfsmnt->mnt_flags & MNT_RELATIME) {
+ struct timespec64 ctime = inode_get_ctime(inode);
++ struct timespec64 atime = inode_get_atime(inode);
++ struct timespec64 mtime = inode_get_mtime(inode);
+
+- if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
+- (timespec64_compare(&inode->i_atime, &ctime) <= 0))
++ if ((timespec64_compare(&atime, &mtime) <= 0) ||
++ (timespec64_compare(&atime, &ctime) <= 0))
+ return 1;
+
+ return 0;
+ }
+
+ now = current_time(inode);
+- if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
++ if ((now.tv_sec - inode_get_atime_sec(inode) <= osb->s_atime_quantum))
+ return 0;
+ else
+ return 1;
+@@ -275,9 +277,9 @@ int ocfs2_update_inode_atime(struct inode *inode,
+ * have i_rwsem to guard against concurrent changes to other
+ * inode fields.
+ */
+- inode->i_atime = current_time(inode);
+- di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
+- di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
++ inode_set_atime_to_ts(inode, current_time(inode));
++ di->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
++ di->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
+ ocfs2_update_inode_fsync_trans(handle, inode, 0);
+ ocfs2_journal_dirty(handle, bh);
+
+@@ -296,7 +298,7 @@ int ocfs2_set_inode_size(handle_t *handle,
+
+ i_size_write(inode, new_i_size);
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+
+ status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
+ if (status < 0) {
+@@ -417,12 +419,12 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
+ }
+
+ i_size_write(inode, new_i_size);
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+
+ di = (struct ocfs2_dinode *) fe_bh->b_data;
+ di->i_size = cpu_to_le64(new_i_size);
+- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
++ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
++ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ocfs2_update_inode_fsync_trans(handle, inode, 0);
+
+ ocfs2_journal_dirty(handle, fe_bh);
+@@ -821,9 +823,9 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
+ i_size_write(inode, abs_to);
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ di->i_size = cpu_to_le64((u64)i_size_read(inode));
+- inode->i_mtime = inode_set_ctime_current(inode);
+- di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
+- di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
++ di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
++ di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
+ di->i_mtime_nsec = di->i_ctime_nsec;
+ if (handle) {
+ ocfs2_journal_dirty(handle, di_bh);
+@@ -1934,6 +1936,8 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+
+ inode_lock(inode);
+
++ /* Wait all existing dio workers, newcomers will block on i_rwsem */
++ inode_dio_wait(inode);
+ /*
+ * This prevents concurrent writes on other nodes
+ */
+@@ -2040,7 +2044,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ goto out_inode_unlock;
+ }
+
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
+ if (ret < 0)
+ mlog_errno(ret);
+diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
+index e8771600b9304a..999111bfc27178 100644
+--- a/fs/ocfs2/inode.c
++++ b/fs/ocfs2/inode.c
+@@ -302,10 +302,10 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ inode->i_mapping->a_ops = &ocfs2_aops;
+ }
+- inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
+- inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
+- inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
+- inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
++ inode_set_atime(inode, le64_to_cpu(fe->i_atime),
++ le32_to_cpu(fe->i_atime_nsec));
++ inode_set_mtime(inode, le64_to_cpu(fe->i_mtime),
++ le32_to_cpu(fe->i_mtime_nsec));
+ inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
+ le32_to_cpu(fe->i_ctime_nsec));
+
+@@ -1312,12 +1312,12 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
+ fe->i_uid = cpu_to_le32(i_uid_read(inode));
+ fe->i_gid = cpu_to_le32(i_gid_read(inode));
+ fe->i_mode = cpu_to_le16(inode->i_mode);
+- fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
+- fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
+- fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+- fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+- fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
+- fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
++ fe->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
++ fe->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
++ fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
++ fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
++ fe->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
++ fe->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
+
+ ocfs2_journal_dirty(handle, bh);
+ ocfs2_update_inode_fsync_trans(handle, inode, 1);
+@@ -1348,10 +1348,10 @@ void ocfs2_refresh_inode(struct inode *inode,
+ inode->i_blocks = 0;
+ else
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+- inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
+- inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
+- inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
+- inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
++ inode_set_atime(inode, le64_to_cpu(fe->i_atime),
++ le32_to_cpu(fe->i_atime_nsec));
++ inode_set_mtime(inode, le64_to_cpu(fe->i_mtime),
++ le32_to_cpu(fe->i_mtime_nsec));
+ inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
+ le32_to_cpu(fe->i_ctime_nsec));
+
+diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
+index 82b28fdacc7e96..accf03d4765ed9 100644
+--- a/fs/ocfs2/inode.h
++++ b/fs/ocfs2/inode.h
+@@ -65,7 +65,7 @@ struct ocfs2_inode_info
+ tid_t i_sync_tid;
+ tid_t i_datasync_tid;
+
+- struct dquot *i_dquot[MAXQUOTAS];
++ struct dquot __rcu *i_dquot[MAXQUOTAS];
+ };
+
+ /*
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index ce215565d061ed..cbe3c12ff5f75c 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -445,6 +445,23 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
+ return status;
+ }
+
++/*
++ * Make sure handle has at least 'nblocks' credits available. If it does not
++ * have that many credits available, we will try to extend the handle to have
++ * enough credits. If that fails, we will restart transaction to have enough
++ * credits. Similar notes regarding data consistency and locking implications
++ * as for ocfs2_extend_trans() apply here.
++ */
++int ocfs2_assure_trans_credits(handle_t *handle, int nblocks)
++{
++ int old_nblks = jbd2_handle_buffer_credits(handle);
++
++ trace_ocfs2_assure_trans_credits(old_nblks);
++ if (old_nblks >= nblocks)
++ return 0;
++ return ocfs2_extend_trans(handle, nblocks - old_nblks);
++}
++
+ /*
+ * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
+ * If that fails, restart the transaction & regain write access for the
+@@ -479,12 +496,6 @@ int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
+ return status;
+ }
+
+-
+-struct ocfs2_triggers {
+- struct jbd2_buffer_trigger_type ot_triggers;
+- int ot_offset;
+-};
+-
+ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
+ {
+ return container_of(triggers, struct ocfs2_triggers, ot_triggers);
+@@ -548,85 +559,76 @@ static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
+ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
+ struct buffer_head *bh)
+ {
++ struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
++
+ mlog(ML_ERROR,
+ "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
+ "bh->b_blocknr = %llu\n",
+ (unsigned long)bh,
+ (unsigned long long)bh->b_blocknr);
+
+- ocfs2_error(bh->b_assoc_map->host->i_sb,
++ ocfs2_error(ot->sb,
+ "JBD2 has aborted our journal, ocfs2 cannot continue\n");
+ }
+
+-static struct ocfs2_triggers di_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+- .ot_offset = offsetof(struct ocfs2_dinode, i_check),
+-};
+-
+-static struct ocfs2_triggers eb_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+- .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
+-};
+-
+-static struct ocfs2_triggers rb_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+- .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
+-};
+-
+-static struct ocfs2_triggers gd_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+- .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
+-};
+-
+-static struct ocfs2_triggers db_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_db_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+-};
++static void ocfs2_setup_csum_triggers(struct super_block *sb,
++ enum ocfs2_journal_trigger_type type,
++ struct ocfs2_triggers *ot)
++{
++ BUG_ON(type >= OCFS2_JOURNAL_TRIGGER_COUNT);
+
+-static struct ocfs2_triggers xb_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+- .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
+-};
++ switch (type) {
++ case OCFS2_JTR_DI:
++ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++ ot->ot_offset = offsetof(struct ocfs2_dinode, i_check);
++ break;
++ case OCFS2_JTR_EB:
++ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++ ot->ot_offset = offsetof(struct ocfs2_extent_block, h_check);
++ break;
++ case OCFS2_JTR_RB:
++ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++ ot->ot_offset = offsetof(struct ocfs2_refcount_block, rf_check);
++ break;
++ case OCFS2_JTR_GD:
++ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++ ot->ot_offset = offsetof(struct ocfs2_group_desc, bg_check);
++ break;
++ case OCFS2_JTR_DB:
++ ot->ot_triggers.t_frozen = ocfs2_db_frozen_trigger;
++ break;
++ case OCFS2_JTR_XB:
++ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++ ot->ot_offset = offsetof(struct ocfs2_xattr_block, xb_check);
++ break;
++ case OCFS2_JTR_DQ:
++ ot->ot_triggers.t_frozen = ocfs2_dq_frozen_trigger;
++ break;
++ case OCFS2_JTR_DR:
++ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++ ot->ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check);
++ break;
++ case OCFS2_JTR_DL:
++ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++ ot->ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check);
++ break;
++ case OCFS2_JTR_NONE:
++ /* To make compiler happy... */
++ return;
++ }
+
+-static struct ocfs2_triggers dq_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_dq_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+-};
++ ot->ot_triggers.t_abort = ocfs2_abort_trigger;
++ ot->sb = sb;
++}
+
+-static struct ocfs2_triggers dr_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+- .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
+-};
++void ocfs2_initialize_journal_triggers(struct super_block *sb,
++ struct ocfs2_triggers triggers[])
++{
++ enum ocfs2_journal_trigger_type type;
+
+-static struct ocfs2_triggers dl_triggers = {
+- .ot_triggers = {
+- .t_frozen = ocfs2_frozen_trigger,
+- .t_abort = ocfs2_abort_trigger,
+- },
+- .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
+-};
++ for (type = OCFS2_JTR_DI; type < OCFS2_JOURNAL_TRIGGER_COUNT; type++)
++ ocfs2_setup_csum_triggers(sb, type, &triggers[type]);
++}
+
+ static int __ocfs2_journal_access(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+@@ -708,56 +710,91 @@ static int __ocfs2_journal_access(handle_t *handle,
+ int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_DI],
++ type);
+ }
+
+ int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_EB],
++ type);
+ }
+
+ int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_RB],
+ type);
+ }
+
+ int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_GD],
++ type);
+ }
+
+ int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_DB],
++ type);
+ }
+
+ int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_XB],
++ type);
+ }
+
+ int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_DQ],
++ type);
+ }
+
+ int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_DR],
++ type);
+ }
+
+ int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+ {
+- return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
++ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++ return __ocfs2_journal_access(handle, ci, bh,
++ &osb->s_journal_triggers[OCFS2_JTR_DL],
++ type);
+ }
+
+ int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
+@@ -778,13 +815,15 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
+ if (!is_handle_aborted(handle)) {
+ journal_t *journal = handle->h_transaction->t_journal;
+
+- mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
+- "Aborting transaction and journal.\n");
++ mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed: "
++ "handle type %u started at line %u, credits %u/%u "
++ "errcode %d. Aborting transaction and journal.\n",
++ handle->h_type, handle->h_line_no,
++ handle->h_requested_credits,
++ jbd2_handle_buffer_credits(handle), status);
+ handle->h_err = status;
+ jbd2_journal_abort_handle(handle);
+ jbd2_journal_abort(journal, status);
+- ocfs2_abort(bh->b_assoc_map->host->i_sb,
+- "Journal already aborted.\n");
+ }
+ }
+ }
+@@ -1016,7 +1055,7 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
+ if (!igrab(inode))
+ BUG();
+
+- num_running_trans = atomic_read(&(osb->journal->j_num_trans));
++ num_running_trans = atomic_read(&(journal->j_num_trans));
+ trace_ocfs2_journal_shutdown(num_running_trans);
+
+ /* Do a commit_cache here. It will flush our journal, *and*
+@@ -1035,9 +1074,10 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
+ osb->commit_task = NULL;
+ }
+
+- BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
++ BUG_ON(atomic_read(&(journal->j_num_trans)) != 0);
+
+- if (ocfs2_mount_local(osb)) {
++ if (ocfs2_mount_local(osb) &&
++ (journal->j_journal->j_flags & JBD2_LOADED)) {
+ jbd2_journal_lock_updates(journal->j_journal);
+ status = jbd2_journal_flush(journal->j_journal, 0);
+ jbd2_journal_unlock_updates(journal->j_journal);
+diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
+index 41c9fe7e62f9b7..e3c3a35dc5e0e7 100644
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -243,6 +243,8 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb,
+ int ocfs2_commit_trans(struct ocfs2_super *osb,
+ handle_t *handle);
+ int ocfs2_extend_trans(handle_t *handle, int nblocks);
++int ocfs2_assure_trans_credits(handle_t *handle,
++ int nblocks);
+ int ocfs2_allocate_extend_trans(handle_t *handle,
+ int thresh);
+
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index c803c10dd97ef1..d96011ede18a75 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -1008,6 +1008,25 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
+ start = bit_off + 1;
+ }
+
++ /* clear the contiguous bits until the end boundary */
++ if (count) {
++ blkno = la_start_blk +
++ ocfs2_clusters_to_blocks(osb->sb,
++ start - count);
++
++ trace_ocfs2_sync_local_to_main_free(
++ count, start - count,
++ (unsigned long long)la_start_blk,
++ (unsigned long long)blkno);
++
++ status = ocfs2_release_clusters(handle,
++ main_bm_inode,
++ main_bm_bh, blkno,
++ count);
++ if (status < 0)
++ mlog_errno(status);
++ }
++
+ bail:
+ if (status)
+ mlog_errno(status);
+diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
+index 05d67968a3a9e1..1f9ed117e78b61 100644
+--- a/fs/ocfs2/move_extents.c
++++ b/fs/ocfs2/move_extents.c
+@@ -951,8 +951,8 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ inode_set_ctime_current(inode);
+- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
++ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
++ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ocfs2_update_inode_fsync_trans(handle, inode, 0);
+
+ ocfs2_journal_dirty(handle, di_bh);
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 5cd6d7771cea10..21b3d5b9be6030 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -566,7 +566,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
+ fe->i_last_eb_blk = 0;
+ strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
+ fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
+- ktime_get_real_ts64(&ts);
++ ktime_get_coarse_real_ts64(&ts);
+ fe->i_atime = fe->i_ctime = fe->i_mtime =
+ cpu_to_le64(ts.tv_sec);
+ fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
+@@ -795,8 +795,9 @@ static int ocfs2_link(struct dentry *old_dentry,
+ inc_nlink(inode);
+ inode_set_ctime_current(inode);
+ ocfs2_set_links_count(fe, inode->i_nlink);
+- fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+- fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
++ fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
++ fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
++ ocfs2_update_inode_fsync_trans(handle, inode, 0);
+ ocfs2_journal_dirty(handle, fe_bh);
+
+ err = ocfs2_add_entry(handle, dentry, inode,
+@@ -993,9 +994,10 @@ static int ocfs2_unlink(struct inode *dir,
+ drop_nlink(inode);
+ drop_nlink(inode);
+ ocfs2_set_links_count(fe, inode->i_nlink);
++ ocfs2_update_inode_fsync_trans(handle, inode, 0);
+ ocfs2_journal_dirty(handle, fe_bh);
+
+- dir->i_mtime = inode_set_ctime_current(dir);
++ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ if (S_ISDIR(inode->i_mode))
+ drop_nlink(dir);
+
+@@ -1550,8 +1552,8 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
+ if (status >= 0) {
+ old_di = (struct ocfs2_dinode *) old_inode_bh->b_data;
+
+- old_di->i_ctime = cpu_to_le64(inode_get_ctime(old_inode).tv_sec);
+- old_di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(old_inode).tv_nsec);
++ old_di->i_ctime = cpu_to_le64(inode_get_ctime_sec(old_inode));
++ old_di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(old_inode));
+ ocfs2_journal_dirty(handle, old_inode_bh);
+ } else
+ mlog_errno(status);
+@@ -1592,7 +1594,7 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
+ drop_nlink(new_inode);
+ inode_set_ctime_current(new_inode);
+ }
+- old_dir->i_mtime = inode_set_ctime_current(old_dir);
++ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
+
+ if (update_dot_dot) {
+ status = ocfs2_update_entry(old_inode, handle,
+@@ -1614,8 +1616,8 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
+
+ if (old_dir != new_dir) {
+ /* Keep the same times on both directories.*/
+- new_dir->i_mtime = inode_set_ctime_to_ts(new_dir,
+- inode_get_ctime(old_dir));
++ inode_set_mtime_to_ts(new_dir,
++ inode_set_ctime_to_ts(new_dir, inode_get_ctime(old_dir)));
+
+ /*
+ * This will also pick up the i_nlink change from the
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index a503c553bab21b..8fe826143d7bf4 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -284,6 +284,30 @@ enum ocfs2_mount_options
+ #define OCFS2_OSB_ERROR_FS 0x0004
+ #define OCFS2_DEFAULT_ATIME_QUANTUM 60
+
++struct ocfs2_triggers {
++ struct jbd2_buffer_trigger_type ot_triggers;
++ int ot_offset;
++ struct super_block *sb;
++};
++
++enum ocfs2_journal_trigger_type {
++ OCFS2_JTR_DI,
++ OCFS2_JTR_EB,
++ OCFS2_JTR_RB,
++ OCFS2_JTR_GD,
++ OCFS2_JTR_DB,
++ OCFS2_JTR_XB,
++ OCFS2_JTR_DQ,
++ OCFS2_JTR_DR,
++ OCFS2_JTR_DL,
++ OCFS2_JTR_NONE /* This must be the last entry */
++};
++
++#define OCFS2_JOURNAL_TRIGGER_COUNT OCFS2_JTR_NONE
++
++void ocfs2_initialize_journal_triggers(struct super_block *sb,
++ struct ocfs2_triggers triggers[]);
++
+ struct ocfs2_journal;
+ struct ocfs2_slot_info;
+ struct ocfs2_recovery_map;
+@@ -351,6 +375,9 @@ struct ocfs2_super
+ struct ocfs2_journal *journal;
+ unsigned long osb_commit_interval;
+
++ /* Journal triggers for checksum */
++ struct ocfs2_triggers s_journal_triggers[OCFS2_JOURNAL_TRIGGER_COUNT];
++
+ struct delayed_work la_enable_wq;
+
+ /*
+diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
+index ac4fd1d5b128bc..6c3f4d7df7d6e0 100644
+--- a/fs/ocfs2/ocfs2_trace.h
++++ b/fs/ocfs2/ocfs2_trace.h
+@@ -2579,6 +2579,8 @@ DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end);
+
+ DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
+
++DEFINE_OCFS2_INT_EVENT(ocfs2_assure_trans_credits);
++
+ DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
+
+ DEFINE_OCFS2_INT_INT_EVENT(ocfs2_allocate_extend_trans);
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index dfaae1e524124d..257f13cdd14c1f 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -689,7 +689,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
+ int status;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_quota_recovery *rec;
+- int locked = 0;
++ int locked = 0, global_read = 0;
+
+ info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
+ info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
+@@ -697,6 +697,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
+ if (!oinfo) {
+ mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
+ " info.");
++ status = -ENOMEM;
+ goto out_err;
+ }
+ info->dqi_priv = oinfo;
+@@ -709,6 +710,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
+ status = ocfs2_global_read_info(sb, type);
+ if (status < 0)
+ goto out_err;
++ global_read = 1;
+
+ status = ocfs2_inode_lock(lqinode, &oinfo->dqi_lqi_bh, 1);
+ if (status < 0) {
+@@ -779,10 +781,12 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
+ if (locked)
+ ocfs2_inode_unlock(lqinode, 1);
+ ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
++ if (global_read)
++ cancel_delayed_work_sync(&oinfo->dqi_sync_work);
+ kfree(oinfo);
+ }
+ brelse(bh);
+- return -1;
++ return status;
+ }
+
+ /* Write local info to quota file */
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index 25c8ec3c8c3a5a..c71b79b5fb9be0 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -25,6 +25,7 @@
+ #include "namei.h"
+ #include "ocfs2_trace.h"
+ #include "file.h"
++#include "symlink.h"
+
+ #include <linux/bio.h>
+ #include <linux/blkdev.h>
+@@ -3751,8 +3752,8 @@ static int ocfs2_change_ctime(struct inode *inode,
+ }
+
+ inode_set_ctime_current(inode);
+- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
++ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
++ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+@@ -4075,10 +4076,10 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
+ */
+ inode_set_ctime_current(t_inode);
+
+- di->i_ctime = cpu_to_le64(inode_get_ctime(t_inode).tv_sec);
+- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(t_inode).tv_nsec);
++ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(t_inode));
++ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(t_inode));
+
+- t_inode->i_mtime = s_inode->i_mtime;
++ inode_set_mtime_to_ts(t_inode, inode_get_mtime(s_inode));
+ di->i_mtime = s_di->i_mtime;
+ di->i_mtime_nsec = s_di->i_mtime_nsec;
+ }
+@@ -4155,8 +4156,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
+ int ret;
+ struct inode *inode = d_inode(old_dentry);
+ struct buffer_head *new_bh = NULL;
++ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+- if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
++ if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
+ ret = -EINVAL;
+ mlog_errno(ret);
+ goto out;
+@@ -4182,6 +4184,26 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
+ goto out_unlock;
+ }
+
++ if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) &&
++ (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
++ /*
++ * Adjust extent record count to reserve space for extended attribute.
++ * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
++ */
++ struct ocfs2_inode_info *new_oi = OCFS2_I(new_inode);
++
++ if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
++ !(ocfs2_inode_is_fast_symlink(new_inode))) {
++ struct ocfs2_dinode *new_di = (struct ocfs2_dinode *)new_bh->b_data;
++ struct ocfs2_dinode *old_di = (struct ocfs2_dinode *)old_bh->b_data;
++ struct ocfs2_extent_list *el = &new_di->id2.i_list;
++ int inline_size = le16_to_cpu(old_di->i_xattr_inline_size);
++
++ le16_add_cpu(&el->l_count, -(inline_size /
++ sizeof(struct ocfs2_extent_rec)));
++ }
++ }
++
+ ret = ocfs2_create_reflink_node(inode, old_bh,
+ new_inode, new_bh, preserve);
+ if (ret) {
+@@ -4189,7 +4211,7 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
+ goto inode_unlock;
+ }
+
+- if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
++ if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
+ ret = ocfs2_reflink_xattrs(inode, old_bh,
+ new_inode, new_bh,
+ preserve);
+@@ -4456,7 +4478,7 @@ int ocfs2_reflink_update_dest(struct inode *dest,
+ if (newlen > i_size_read(dest))
+ i_size_write(dest, newlen);
+ spin_unlock(&OCFS2_I(dest)->ip_lock);
+- dest->i_mtime = inode_set_ctime_current(dest);
++ inode_set_mtime_to_ts(dest, inode_set_ctime_current(dest));
+
+ ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
+ if (ret) {
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 6b906424902b46..cfc093937a178d 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -122,7 +122,7 @@ static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend);
+ static int ocfs2_enable_quotas(struct ocfs2_super *osb);
+ static void ocfs2_disable_quotas(struct ocfs2_super *osb);
+
+-static struct dquot **ocfs2_get_dquots(struct inode *inode)
++static struct dquot __rcu **ocfs2_get_dquots(struct inode *inode)
+ {
+ return OCFS2_I(inode)->i_dquot;
+ }
+@@ -1075,9 +1075,11 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
+ osb, &ocfs2_osb_debug_fops);
+
+- if (ocfs2_meta_ecc(osb))
++ if (ocfs2_meta_ecc(osb)) {
++ ocfs2_initialize_journal_triggers(sb, osb->s_journal_triggers);
+ ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
+ osb->osb_debug_root);
++ }
+
+ status = ocfs2_mount_volume(sb);
+ if (status < 0)
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 6510ad783c912c..1cc28891807154 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -1062,13 +1062,13 @@ ssize_t ocfs2_listxattr(struct dentry *dentry,
+ return i_ret + b_ret;
+ }
+
+-static int ocfs2_xattr_find_entry(int name_index,
++static int ocfs2_xattr_find_entry(struct inode *inode, int name_index,
+ const char *name,
+ struct ocfs2_xattr_search *xs)
+ {
+ struct ocfs2_xattr_entry *entry;
+ size_t name_len;
+- int i, cmp = 1;
++ int i, name_offset, cmp = 1;
+
+ if (name == NULL)
+ return -EINVAL;
+@@ -1076,13 +1076,22 @@ static int ocfs2_xattr_find_entry(int name_index,
+ name_len = strlen(name);
+ entry = xs->here;
+ for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) {
++ if ((void *)entry >= xs->end) {
++ ocfs2_error(inode->i_sb, "corrupted xattr entries");
++ return -EFSCORRUPTED;
++ }
+ cmp = name_index - ocfs2_xattr_get_type(entry);
+ if (!cmp)
+ cmp = name_len - entry->xe_name_len;
+- if (!cmp)
+- cmp = memcmp(name, (xs->base +
+- le16_to_cpu(entry->xe_name_offset)),
+- name_len);
++ if (!cmp) {
++ name_offset = le16_to_cpu(entry->xe_name_offset);
++ if ((xs->base + name_offset + name_len) > xs->end) {
++ ocfs2_error(inode->i_sb,
++ "corrupted xattr entries");
++ return -EFSCORRUPTED;
++ }
++ cmp = memcmp(name, (xs->base + name_offset), name_len);
++ }
+ if (cmp == 0)
+ break;
+ entry += 1;
+@@ -1166,7 +1175,7 @@ static int ocfs2_xattr_ibody_get(struct inode *inode,
+ xs->base = (void *)xs->header;
+ xs->here = xs->header->xh_entries;
+
+- ret = ocfs2_xattr_find_entry(name_index, name, xs);
++ ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
+ if (ret)
+ return ret;
+ size = le64_to_cpu(xs->here->xe_value_size);
+@@ -2698,7 +2707,7 @@ static int ocfs2_xattr_ibody_find(struct inode *inode,
+
+ /* Find the named attribute. */
+ if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
+- ret = ocfs2_xattr_find_entry(name_index, name, xs);
++ ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
+ if (ret && ret != -ENODATA)
+ return ret;
+ xs->not_found = ret;
+@@ -2833,7 +2842,7 @@ static int ocfs2_xattr_block_find(struct inode *inode,
+ xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size;
+ xs->here = xs->header->xh_entries;
+
+- ret = ocfs2_xattr_find_entry(name_index, name, xs);
++ ret = ocfs2_xattr_find_entry(inode, name_index, name, xs);
+ } else
+ ret = ocfs2_xattr_index_block_find(inode, blk_bh,
+ name_index,
+@@ -3422,8 +3431,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
+ }
+
+ inode_set_ctime_current(inode);
+- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
++ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
++ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
+ }
+ out:
+@@ -6511,16 +6520,7 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
+ }
+
+ new_oi = OCFS2_I(args->new_inode);
+- /*
+- * Adjust extent record count to reserve space for extended attribute.
+- * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
+- */
+- if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
+- !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
+- struct ocfs2_extent_list *el = &new_di->id2.i_list;
+- le16_add_cpu(&el->l_count, -(inline_size /
+- sizeof(struct ocfs2_extent_rec)));
+- }
++
+ spin_lock(&new_oi->ip_lock);
+ new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
+ new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
+diff --git a/fs/open.c b/fs/open.c
+index 98f6601fbac65e..59db720693f9a0 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -200,13 +200,13 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
+ return error;
+ }
+
+-SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
++SYSCALL_DEFINE2(ftruncate, unsigned int, fd, off_t, length)
+ {
+ return do_sys_ftruncate(fd, length, 1);
+ }
+
+ #ifdef CONFIG_COMPAT
+-COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
++COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_off_t, length)
+ {
+ return do_sys_ftruncate(fd, length, 1);
+ }
+@@ -1069,8 +1069,6 @@ struct file *dentry_open(const struct path *path, int flags,
+ int error;
+ struct file *f;
+
+- validate_creds(cred);
+-
+ /* We must always pass in a valid mount pointer. */
+ BUG_ON(!path->mnt);
+
+@@ -1109,7 +1107,6 @@ struct file *dentry_create(const struct path *path, int flags, umode_t mode,
+ struct file *f;
+ int error;
+
+- validate_creds(cred);
+ f = alloc_empty_file(flags, cred);
+ if (IS_ERR(f))
+ return f;
+diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
+index b2457cb97fa008..e62e809a55265d 100644
+--- a/fs/openpromfs/inode.c
++++ b/fs/openpromfs/inode.c
+@@ -355,10 +355,10 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
+ return inode;
+ }
+
+-static int openprom_remount(struct super_block *sb, int *flags, char *data)
++static int openpromfs_reconfigure(struct fs_context *fc)
+ {
+- sync_filesystem(sb);
+- *flags |= SB_NOATIME;
++ sync_filesystem(fc->root->d_sb);
++ fc->sb_flags |= SB_NOATIME;
+ return 0;
+ }
+
+@@ -366,7 +366,6 @@ static const struct super_operations openprom_sops = {
+ .alloc_inode = openprom_alloc_inode,
+ .free_inode = openprom_free_inode,
+ .statfs = simple_statfs,
+- .remount_fs = openprom_remount,
+ };
+
+ static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
+@@ -415,6 +414,7 @@ static int openpromfs_get_tree(struct fs_context *fc)
+
+ static const struct fs_context_operations openpromfs_context_ops = {
+ .get_tree = openpromfs_get_tree,
++ .reconfigure = openpromfs_reconfigure,
+ };
+
+ static int openpromfs_init_fs_context(struct fs_context *fc)
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index 5254256a224d7a..24e028c119c1b5 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -201,7 +201,8 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ (long)new_op->downcall.resp.statfs.files_avail);
+
+ buf->f_type = sb->s_magic;
+- memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid));
++ buf->f_fsid.val[0] = ORANGEFS_SB(sb)->fs_id;
++ buf->f_fsid.val[1] = ORANGEFS_SB(sb)->id;
+ buf->f_bsize = new_op->downcall.resp.statfs.block_size;
+ buf->f_namelen = ORANGEFS_NAME_MAX;
+
+@@ -527,7 +528,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
+ sb->s_fs_info = kzalloc(sizeof(struct orangefs_sb_info_s), GFP_KERNEL);
+ if (!ORANGEFS_SB(sb)) {
+ d = ERR_PTR(-ENOMEM);
+- goto free_sb_and_op;
++ goto free_op;
+ }
+
+ ret = orangefs_fill_sb(sb,
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 033fc0458a3d82..54602f0bed8be0 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -327,9 +327,6 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
+ struct dentry *newdentry;
+ int err;
+
+- if (!attr->hardlink && !IS_POSIXACL(udir))
+- attr->mode &= ~current_umask();
+-
+ inode_lock_nested(udir, I_MUTEX_PARENT);
+ newdentry = ovl_create_real(ofs, udir,
+ ovl_lookup_upper(ofs, dentry->d_name.name,
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index 26b782c53910b5..611ff567a1aa6f 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -186,6 +186,10 @@ static int ovl_check_encode_origin(struct dentry *dentry)
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ bool decodable = ofs->config.nfs_export;
+
++ /* No upper layer? */
++ if (!ovl_upper_mnt(ofs))
++ return 1;
++
+ /* Lower file handle for non-upper non-decodable */
+ if (!ovl_dentry_upper(dentry) && !decodable)
+ return 1;
+@@ -214,7 +218,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
+ * ovl_connect_layer() will try to make origin's layer "connected" by
+ * copying up a "connectable" ancestor.
+ */
+- if (d_is_dir(dentry) && ovl_upper_mnt(ofs) && decodable)
++ if (d_is_dir(dentry) && decodable)
+ return ovl_connect_layer(dentry);
+
+ /* Lower file handle for indexed and non-upper dir/non-dir */
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 83ef66644c213b..fca29dba7b146a 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -171,7 +171,7 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
+
+ type = ovl_path_real(dentry, &realpath);
+ old_cred = ovl_override_creds(dentry->d_sb);
+- err = vfs_getattr(&realpath, stat, request_mask, flags);
++ err = ovl_do_getattr(&realpath, stat, request_mask, flags);
+ if (err)
+ goto out;
+
+@@ -196,8 +196,8 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
+ (!is_dir ? STATX_NLINK : 0);
+
+ ovl_path_lower(dentry, &realpath);
+- err = vfs_getattr(&realpath, &lowerstat,
+- lowermask, flags);
++ err = ovl_do_getattr(&realpath, &lowerstat, lowermask,
++ flags);
+ if (err)
+ goto out;
+
+@@ -249,8 +249,8 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
+
+ ovl_path_lowerdata(dentry, &realpath);
+ if (realpath.dentry) {
+- err = vfs_getattr(&realpath, &lowerdatastat,
+- lowermask, flags);
++ err = ovl_do_getattr(&realpath, &lowerdatastat,
++ lowermask, flags);
+ if (err)
+ goto out;
+ } else {
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 9817b2dcb132c2..09ca82ed0f8ced 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -397,6 +397,14 @@ static inline bool ovl_open_flags_need_copy_up(int flags)
+ return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC));
+ }
+
++static inline int ovl_do_getattr(const struct path *path, struct kstat *stat,
++ u32 request_mask, unsigned int flags)
++{
++ if (flags & AT_GETATTR_NOSEC)
++ return vfs_getattr_nosec(path, stat, request_mask, flags);
++ return vfs_getattr(path, stat, request_mask, flags);
++}
++
+ /* util.c */
+ int ovl_want_write(struct dentry *dentry);
+ void ovl_drop_write(struct dentry *dentry);
+diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
+index f6ff23fd101c8f..21d31aaef95d6c 100644
+--- a/fs/overlayfs/params.c
++++ b/fs/overlayfs/params.c
+@@ -43,8 +43,10 @@ module_param_named(metacopy, ovl_metacopy_def, bool, 0644);
+ MODULE_PARM_DESC(metacopy,
+ "Default to on or off for the metadata only copy up feature");
+
+-enum {
++enum ovl_opt {
+ Opt_lowerdir,
++ Opt_lowerdir_add,
++ Opt_datadir_add,
+ Opt_upperdir,
+ Opt_workdir,
+ Opt_default_permissions,
+@@ -140,8 +142,11 @@ static int ovl_verity_mode_def(void)
+ #define fsparam_string_empty(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
+
++
+ const struct fs_parameter_spec ovl_parameter_spec[] = {
+ fsparam_string_empty("lowerdir", Opt_lowerdir),
++ fsparam_string("lowerdir+", Opt_lowerdir_add),
++ fsparam_string("datadir+", Opt_datadir_add),
+ fsparam_string("upperdir", Opt_upperdir),
+ fsparam_string("workdir", Opt_workdir),
+ fsparam_flag("default_permissions", Opt_default_permissions),
+@@ -238,19 +243,8 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
+ pr_err("failed to resolve '%s': %i\n", name, err);
+ goto out;
+ }
+- err = -EINVAL;
+- if (ovl_dentry_weird(path->dentry)) {
+- pr_err("filesystem on '%s' not supported\n", name);
+- goto out_put;
+- }
+- if (!d_is_dir(path->dentry)) {
+- pr_err("'%s' not a directory\n", name);
+- goto out_put;
+- }
+ return 0;
+
+-out_put:
+- path_put_init(path);
+ out:
+ return err;
+ }
+@@ -268,7 +262,7 @@ static void ovl_unescape(char *s)
+ }
+ }
+
+-static int ovl_mount_dir(const char *name, struct path *path, bool upper)
++static int ovl_mount_dir(const char *name, struct path *path)
+ {
+ int err = -ENOMEM;
+ char *tmp = kstrdup(name, GFP_KERNEL);
+@@ -276,68 +270,156 @@ static int ovl_mount_dir(const char *name, struct path *path, bool upper)
+ if (tmp) {
+ ovl_unescape(tmp);
+ err = ovl_mount_dir_noesc(tmp, path);
+-
+- if (!err && upper && path->dentry->d_flags & DCACHE_OP_REAL) {
+- pr_err("filesystem on '%s' not supported as upperdir\n",
+- tmp);
+- path_put_init(path);
+- err = -EINVAL;
+- }
+ kfree(tmp);
+ }
+ return err;
+ }
+
+-static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
+- bool workdir)
++static int ovl_mount_dir_check(struct fs_context *fc, const struct path *path,
++ enum ovl_opt layer, const char *name, bool upper)
+ {
+- int err;
+- struct ovl_fs *ofs = fc->s_fs_info;
+- struct ovl_config *config = &ofs->config;
+ struct ovl_fs_context *ctx = fc->fs_private;
+- struct path path;
+- char *dup;
+
+- err = ovl_mount_dir(name, &path, true);
+- if (err)
+- return err;
++ if (!d_is_dir(path->dentry))
++ return invalfc(fc, "%s is not a directory", name);
++
++ /*
++ * Root dentries of case-insensitive capable filesystems might
++ * not have the dentry operations set, but still be incompatible
++ * with overlayfs. Check explicitly to prevent post-mount
++ * failures.
++ */
++ if (sb_has_encoding(path->mnt->mnt_sb))
++ return invalfc(fc, "case-insensitive capable filesystem on %s not supported", name);
++
++ if (ovl_dentry_weird(path->dentry))
++ return invalfc(fc, "filesystem on %s not supported", name);
+
+ /*
+ * Check whether upper path is read-only here to report failures
+ * early. Don't forget to recheck when the superblock is created
+ * as the mount attributes could change.
+ */
+- if (__mnt_is_readonly(path.mnt)) {
+- path_put(&path);
+- return -EINVAL;
++ if (upper) {
++ if (path->dentry->d_flags & DCACHE_OP_REAL)
++ return invalfc(fc, "filesystem on %s not supported as upperdir", name);
++ if (__mnt_is_readonly(path->mnt))
++ return invalfc(fc, "filesystem on %s is read-only", name);
++ } else {
++ if (ctx->lowerdir_all && layer != Opt_lowerdir)
++ return invalfc(fc, "lowerdir+ and datadir+ cannot follow lowerdir");
++ if (ctx->nr_data && layer == Opt_lowerdir_add)
++ return invalfc(fc, "regular lower layers cannot follow data layers");
++ if (ctx->nr == OVL_MAX_STACK)
++ return invalfc(fc, "too many lower directories, limit is %d",
++ OVL_MAX_STACK);
+ }
++ return 0;
++}
+
+- dup = kstrdup(name, GFP_KERNEL);
+- if (!dup) {
+- path_put(&path);
++static int ovl_ctx_realloc_lower(struct fs_context *fc)
++{
++ struct ovl_fs_context *ctx = fc->fs_private;
++ struct ovl_fs_context_layer *l;
++ size_t nr;
++
++ if (ctx->nr < ctx->capacity)
++ return 0;
++
++ nr = min_t(size_t, max(4096 / sizeof(*l), ctx->capacity * 2),
++ OVL_MAX_STACK);
++ l = krealloc_array(ctx->lower, nr, sizeof(*l), GFP_KERNEL_ACCOUNT);
++ if (!l)
+ return -ENOMEM;
++
++ ctx->lower = l;
++ ctx->capacity = nr;
++ return 0;
++}
++
++static void ovl_add_layer(struct fs_context *fc, enum ovl_opt layer,
++ struct path *path, char **pname)
++{
++ struct ovl_fs *ofs = fc->s_fs_info;
++ struct ovl_config *config = &ofs->config;
++ struct ovl_fs_context *ctx = fc->fs_private;
++ struct ovl_fs_context_layer *l;
++
++ switch (layer) {
++ case Opt_workdir:
++ swap(config->workdir, *pname);
++ swap(ctx->work, *path);
++ break;
++ case Opt_upperdir:
++ swap(config->upperdir, *pname);
++ swap(ctx->upper, *path);
++ break;
++ case Opt_datadir_add:
++ ctx->nr_data++;
++ fallthrough;
++ case Opt_lowerdir:
++ fallthrough;
++ case Opt_lowerdir_add:
++ WARN_ON(ctx->nr >= ctx->capacity);
++ l = &ctx->lower[ctx->nr++];
++ memset(l, 0, sizeof(*l));
++ swap(l->name, *pname);
++ swap(l->path, *path);
++ break;
++ default:
++ WARN_ON(1);
+ }
++}
+
+- if (workdir) {
+- kfree(config->workdir);
+- config->workdir = dup;
+- path_put(&ctx->work);
+- ctx->work = path;
+- } else {
+- kfree(config->upperdir);
+- config->upperdir = dup;
+- path_put(&ctx->upper);
+- ctx->upper = path;
++static int ovl_parse_layer(struct fs_context *fc, const char *layer_name, enum ovl_opt layer)
++{
++ char *name = kstrdup(layer_name, GFP_KERNEL);
++ bool upper = (layer == Opt_upperdir || layer == Opt_workdir);
++ struct path path;
++ int err;
++
++ if (!name)
++ return -ENOMEM;
++
++ if (upper || layer == Opt_lowerdir)
++ err = ovl_mount_dir(name, &path);
++ else
++ err = ovl_mount_dir_noesc(name, &path);
++ if (err)
++ goto out_free;
++
++ err = ovl_mount_dir_check(fc, &path, layer, name, upper);
++ if (err)
++ goto out_put;
++
++ if (!upper) {
++ err = ovl_ctx_realloc_lower(fc);
++ if (err)
++ goto out_put;
+ }
+- return 0;
++
++ /* Store the user provided path string in ctx to show in mountinfo */
++ ovl_add_layer(fc, layer, &path, &name);
++
++out_put:
++ path_put(&path);
++out_free:
++ kfree(name);
++ return err;
+ }
+
+-static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
++static void ovl_reset_lowerdirs(struct ovl_fs_context *ctx)
+ {
+- for (size_t nr = 0; nr < ctx->nr; nr++) {
+- path_put(&ctx->lower[nr].path);
+- kfree(ctx->lower[nr].name);
+- ctx->lower[nr].name = NULL;
++ struct ovl_fs_context_layer *l = ctx->lower;
++
++ // Reset old user provided lowerdir string
++ kfree(ctx->lowerdir_all);
++ ctx->lowerdir_all = NULL;
++
++ for (size_t nr = 0; nr < ctx->nr; nr++, l++) {
++ path_put(&l->path);
++ kfree(l->name);
++ l->name = NULL;
+ }
+ ctx->nr = 0;
+ ctx->nr_data = 0;
+@@ -346,7 +428,7 @@ static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
+ /*
+ * Parse lowerdir= mount option:
+ *
+- * (1) lowerdir=/lower1:/lower2:/lower3::/data1::/data2
++ * e.g.: lowerdir=/lower1:/lower2:/lower3::/data1::/data2
+ * Set "/lower1", "/lower2", and "/lower3" as lower layers and
+ * "/data1" and "/data2" as data lower layers. Any existing lower
+ * layers are replaced.
+@@ -355,10 +437,9 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
+ {
+ int err;
+ struct ovl_fs_context *ctx = fc->fs_private;
+- struct ovl_fs_context_layer *l;
+- char *dup = NULL, *dup_iter;
+- ssize_t nr_lower = 0, nr = 0, nr_data = 0;
+- bool append = false, data_layer = false;
++ char *dup = NULL, *iter;
++ ssize_t nr_lower, nr;
++ bool data_layer = false;
+
+ /*
+ * Ensure we're backwards compatible with mount(2)
+@@ -366,16 +447,21 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
+ */
+
+ /* drop all existing lower layers */
+- if (!*name) {
+- ovl_parse_param_drop_lowerdir(ctx);
++ ovl_reset_lowerdirs(ctx);
++
++ if (!*name)
+ return 0;
+- }
+
+ if (*name == ':') {
+- pr_err("cannot append lower layer");
++ pr_err("cannot append lower layer\n");
+ return -EINVAL;
+ }
+
++ // Store user provided lowerdir string to show in mount options
++ ctx->lowerdir_all = kstrdup(name, GFP_KERNEL);
++ if (!ctx->lowerdir_all)
++ return -ENOMEM;
++
+ dup = kstrdup(name, GFP_KERNEL);
+ if (!dup)
+ return -ENOMEM;
+@@ -385,102 +471,34 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
+ if (nr_lower < 0)
+ goto out_err;
+
+- if ((nr_lower > OVL_MAX_STACK) ||
+- (append && (size_add(ctx->nr, nr_lower) > OVL_MAX_STACK))) {
++ if (nr_lower > OVL_MAX_STACK) {
+ pr_err("too many lower directories, limit is %d\n", OVL_MAX_STACK);
+ goto out_err;
+ }
+
+- if (!append)
+- ovl_parse_param_drop_lowerdir(ctx);
+-
+- /*
+- * (1) append
+- *
+- * We want nr <= nr_lower <= capacity We know nr > 0 and nr <=
+- * capacity. If nr == 0 this wouldn't be append. If nr +
+- * nr_lower is <= capacity then nr <= nr_lower <= capacity
+- * already holds. If nr + nr_lower exceeds capacity, we realloc.
+- *
+- * (2) replace
+- *
+- * Ensure we're backwards compatible with mount(2) which allows
+- * "lowerdir=/a:/b:/c,lowerdir=/d:/e:/f" causing the last
+- * specified lowerdir mount option to win.
+- *
+- * We want nr <= nr_lower <= capacity We know either (i) nr == 0
+- * or (ii) nr > 0. We also know nr_lower > 0. The capacity
+- * could've been changed multiple times already so we only know
+- * nr <= capacity. If nr + nr_lower > capacity we realloc,
+- * otherwise nr <= nr_lower <= capacity holds already.
+- */
+- nr_lower += ctx->nr;
+- if (nr_lower > ctx->capacity) {
+- err = -ENOMEM;
+- l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower),
+- GFP_KERNEL_ACCOUNT);
+- if (!l)
+- goto out_err;
+-
+- ctx->lower = l;
+- ctx->capacity = nr_lower;
+- }
+-
+- /*
+- * (3) By (1) and (2) we know nr <= nr_lower <= capacity.
+- * (4) If ctx->nr == 0 => replace
+- * We have verified above that the lowerdir mount option
+- * isn't an append, i.e., the lowerdir mount option
+- * doesn't start with ":" or "::".
+- * (4.1) The lowerdir mount options only contains regular lower
+- * layers ":".
+- * => Nothing to verify.
+- * (4.2) The lowerdir mount options contains regular ":" and
+- * data "::" layers.
+- * => We need to verify that data lower layers "::" aren't
+- * followed by regular ":" lower layers
+- * (5) If ctx->nr > 0 => append
+- * We know that there's at least one regular layer
+- * otherwise we would've failed when parsing the previous
+- * lowerdir mount option.
+- * (5.1) The lowerdir mount option is a regular layer ":" append
+- * => We need to verify that no data layers have been
+- * specified before.
+- * (5.2) The lowerdir mount option is a data layer "::" append
+- * We know that there's at least one regular layer or
+- * other data layers. => There's nothing to verify.
+- */
+- dup_iter = dup;
+- for (nr = ctx->nr; nr < nr_lower; nr++) {
+- l = &ctx->lower[nr];
+- memset(l, 0, sizeof(*l));
+-
+- err = ovl_mount_dir(dup_iter, &l->path, false);
++ iter = dup;
++ for (nr = 0; nr < nr_lower; nr++) {
++ err = ovl_parse_layer(fc, iter, Opt_lowerdir);
+ if (err)
+- goto out_put;
+-
+- err = -ENOMEM;
+- l->name = kstrdup(dup_iter, GFP_KERNEL_ACCOUNT);
+- if (!l->name)
+- goto out_put;
++ goto out_err;
+
+ if (data_layer)
+- nr_data++;
++ ctx->nr_data++;
+
+ /* Calling strchr() again would overrun. */
+- if ((nr + 1) == nr_lower)
++ if (ctx->nr == nr_lower)
+ break;
+
+ err = -EINVAL;
+- dup_iter = strchr(dup_iter, '\0') + 1;
+- if (*dup_iter) {
++ iter = strchr(iter, '\0') + 1;
++ if (*iter) {
+ /*
+ * This is a regular layer so we require that
+ * there are no data layers.
+ */
+- if ((ctx->nr_data + nr_data) > 0) {
+- pr_err("regular lower layers cannot follow data lower layers");
+- goto out_put;
++ if (ctx->nr_data > 0) {
++ pr_err("regular lower layers cannot follow data lower layers\n");
++ goto out_err;
+ }
+
+ data_layer = false;
+@@ -489,30 +507,11 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
+
+ /* This is a data lower layer. */
+ data_layer = true;
+- dup_iter++;
++ iter++;
+ }
+- ctx->nr = nr_lower;
+- ctx->nr_data += nr_data;
+ kfree(dup);
+ return 0;
+
+-out_put:
+- /*
+- * We know nr >= ctx->nr < nr_lower. If we failed somewhere
+- * we want to undo until nr == ctx->nr. This is correct for
+- * both ctx->nr == 0 and ctx->nr > 0.
+- */
+- for (; nr >= ctx->nr; nr--) {
+- l = &ctx->lower[nr];
+- kfree(l->name);
+- l->name = NULL;
+- path_put(&l->path);
+-
+- /* don't overflow */
+- if (nr == 0)
+- break;
+- }
+-
+ out_err:
+ kfree(dup);
+
+@@ -556,11 +555,11 @@ static int ovl_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ case Opt_lowerdir:
+ err = ovl_parse_param_lowerdir(param->string, fc);
+ break;
++ case Opt_lowerdir_add:
++ case Opt_datadir_add:
+ case Opt_upperdir:
+- fallthrough;
+ case Opt_workdir:
+- err = ovl_parse_param_upperdir(param->string, fc,
+- (Opt_workdir == opt));
++ err = ovl_parse_layer(fc, param->string, opt);
+ break;
+ case Opt_default_permissions:
+ config->default_permissions = true;
+@@ -617,7 +616,7 @@ static int ovl_get_tree(struct fs_context *fc)
+
+ static inline void ovl_fs_context_free(struct ovl_fs_context *ctx)
+ {
+- ovl_parse_param_drop_lowerdir(ctx);
++ ovl_reset_lowerdirs(ctx);
+ path_put(&ctx->upper);
+ path_put(&ctx->work);
+ kfree(ctx->lower);
+@@ -762,11 +761,6 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
+ {
+ struct ovl_opt_set set = ctx->set;
+
+- if (ctx->nr_data > 0 && !config->metacopy) {
+- pr_err("lower data-only dirs require metacopy support.\n");
+- return -EINVAL;
+- }
+-
+ /* Workdir/index are useless in non-upper mount */
+ if (!config->upperdir) {
+ if (config->workdir) {
+@@ -918,6 +912,39 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
+ config->metacopy = false;
+ }
+
++ /*
++ * Fail if we don't have trusted xattr capability and a feature was
++ * explicitly requested that requires them.
++ */
++ if (!config->userxattr && !capable(CAP_SYS_ADMIN)) {
++ if (set.redirect &&
++ config->redirect_mode != OVL_REDIRECT_NOFOLLOW) {
++ pr_err("redirect_dir requires permission to access trusted xattrs\n");
++ return -EPERM;
++ }
++ if (config->metacopy && set.metacopy) {
++ pr_err("metacopy requires permission to access trusted xattrs\n");
++ return -EPERM;
++ }
++ if (config->verity_mode) {
++ pr_err("verity requires permission to access trusted xattrs\n");
++ return -EPERM;
++ }
++ if (ctx->nr_data > 0) {
++ pr_err("lower data-only dirs require permission to access trusted xattrs\n");
++ return -EPERM;
++ }
++ /*
++ * Other xattr-dependent features should be disabled without
++ * great disturbance to the user in ovl_make_workdir().
++ */
++ }
++
++ if (ctx->nr_data > 0 && !config->metacopy) {
++ pr_err("lower data-only dirs require metacopy support.\n");
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+@@ -933,23 +960,28 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
+ {
+ struct super_block *sb = dentry->d_sb;
+ struct ovl_fs *ofs = OVL_FS(sb);
+- size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
++ size_t nr, nr_merged_lower, nr_lower = 0;
++ char **lowerdirs = ofs->config.lowerdirs;
+
+ /*
+- * lowerdirs[] starts from offset 1, then
+- * >= 0 regular lower layers prefixed with : and
+- * >= 0 data-only lower layers prefixed with ::
+- *
+- * we need to escase comma and space like seq_show_option() does and
+- * we also need to escape the colon separator from lowerdir paths.
++ * lowerdirs[0] holds the colon separated list that user provided
++ * with lowerdir mount option.
++ * lowerdirs[1..numlayer] hold the lowerdir paths that were added
++ * using the lowerdir+ and datadir+ mount options.
++ * For now, we do not allow mixing the legacy lowerdir mount option
++ * with the new lowerdir+ and datadir+ mount options.
+ */
+- seq_puts(m, ",lowerdir=");
+- for (nr = 1; nr < ofs->numlayer; nr++) {
+- if (nr > 1)
+- seq_putc(m, ':');
+- if (nr >= nr_merged_lower)
+- seq_putc(m, ':');
+- seq_escape(m, ofs->config.lowerdirs[nr], ":, \t\n\\");
++ if (lowerdirs[0]) {
++ seq_show_option(m, "lowerdir", lowerdirs[0]);
++ } else {
++ nr_lower = ofs->numlayer;
++ nr_merged_lower = nr_lower - ofs->numdatalayer;
++ }
++ for (nr = 1; nr < nr_lower; nr++) {
++ if (nr < nr_merged_lower)
++ seq_show_option(m, "lowerdir+", lowerdirs[nr]);
++ else
++ seq_show_option(m, "datadir+", lowerdirs[nr]);
+ }
+ if (ofs->config.upperdir) {
+ seq_show_option(m, "upperdir", ofs->config.upperdir);
+diff --git a/fs/overlayfs/params.h b/fs/overlayfs/params.h
+index 8750da68ab2a46..c96d939820211d 100644
+--- a/fs/overlayfs/params.h
++++ b/fs/overlayfs/params.h
+@@ -32,6 +32,7 @@ struct ovl_fs_context {
+ size_t nr_data;
+ struct ovl_opt_set set;
+ struct ovl_fs_context_layer *lower;
++ char *lowerdir_all; /* user provided lowerdir string */
+ };
+
+ int ovl_init_fs_context(struct fs_context *fc);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 3fa2416264a4e6..2c056d737c27c3 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1374,8 +1374,11 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
+ ofs->layers = layers;
+ /*
+ * Layer 0 is reserved for upper even if there's no upper.
+- * For consistency, config.lowerdirs[0] is NULL.
++ * config.lowerdirs[0] is used for storing the user provided colon
++ * separated lowerdir string.
+ */
++ ofs->config.lowerdirs[0] = ctx->lowerdir_all;
++ ctx->lowerdir_all = NULL;
+ ofs->numlayer = 1;
+
+ sb->s_stack_depth = 0;
+@@ -1489,7 +1492,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
+ ovl_trusted_xattr_handlers;
+ sb->s_fs_info = ofs;
+ sb->s_flags |= SB_POSIXACL;
+- sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
++ sb->s_iflags |= SB_I_SKIP_SYNC;
+
+ err = -ENOMEM;
+ root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 139190165a1c22..ba4376341ddd2f 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -425,6 +425,18 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+ bool was_empty = false;
+ bool wake_next_writer = false;
+
++ /*
++ * Reject writing to watch queue pipes before the point where we lock
++ * the pipe.
++ * Otherwise, lockdep would be unhappy if the caller already has another
++ * pipe locked.
++ * If we had to support locking a normal pipe and a notification pipe at
++ * the same time, we could set up lockdep annotations for that, but
++ * since we don't actually need that, it's simpler to just bail here.
++ */
++ if (pipe_has_watch_queue(pipe))
++ return -EXDEV;
++
+ /* Null write succeeds. */
+ if (unlikely(total_len == 0))
+ return 0;
+@@ -437,13 +449,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
+ goto out;
+ }
+
+-#ifdef CONFIG_WATCH_QUEUE
+- if (pipe->watch_queue) {
+- ret = -EXDEV;
+- goto out;
+- }
+-#endif
+-
+ /*
+ * If it wasn't empty we try to merge new data into
+ * the last buffer.
+@@ -1307,6 +1312,11 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
+ pipe->tail = tail;
+ pipe->head = head;
+
++ if (!pipe_has_watch_queue(pipe)) {
++ pipe->max_usage = nr_slots;
++ pipe->nr_accounted = nr_slots;
++ }
++
+ spin_unlock_irq(&pipe->rd_wait.lock);
+
+ /* This might have made more room for writers */
+@@ -1324,10 +1334,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
+ unsigned int nr_slots, size;
+ long ret = 0;
+
+-#ifdef CONFIG_WATCH_QUEUE
+- if (pipe->watch_queue)
++ if (pipe_has_watch_queue(pipe))
+ return -EBUSY;
+-#endif
+
+ size = round_pipe_size(arg);
+ nr_slots = size >> PAGE_SHIFT;
+@@ -1360,8 +1368,6 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
+ if (ret < 0)
+ goto out_revert_acct;
+
+- pipe->max_usage = nr_slots;
+- pipe->nr_accounted = nr_slots;
+ return pipe->max_usage * PAGE_SIZE;
+
+ out_revert_acct:
+@@ -1379,10 +1385,8 @@ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
+
+ if (file->f_op != &pipefifo_fops || !pipe)
+ return NULL;
+-#ifdef CONFIG_WATCH_QUEUE
+- if (for_splice && pipe->watch_queue)
++ if (for_splice && pipe_has_watch_queue(pipe))
+ return NULL;
+-#endif
+ return pipe;
+ }
+
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 2c2efbe685d872..37b8061d84bb79 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -511,7 +511,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+
+ sigemptyset(&sigign);
+ sigemptyset(&sigcatch);
+- cutime = cstime = utime = stime = 0;
++ cutime = cstime = 0;
+ cgtime = gtime = 0;
+
+ if (lock_task_sighand(task, &flags)) {
+@@ -545,7 +545,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+
+ min_flt += sig->min_flt;
+ maj_flt += sig->maj_flt;
+- thread_group_cputime_adjusted(task, &utime, &stime);
+ gtime += sig->gtime;
+
+ if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
+@@ -561,10 +560,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+
+ if (permitted && (!whole || num_threads < 2))
+ wchan = !task_is_running(task);
+- if (!whole) {
++
++ if (whole) {
++ thread_group_cputime_adjusted(task, &utime, &stime);
++ } else {
++ task_cputime_adjusted(task, &utime, &stime);
+ min_flt = task->min_flt;
+ maj_flt = task->maj_flt;
+- task_cputime_adjusted(task, &utime, &stime);
+ gtime = task_gtime(task);
+ }
+
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index ffd54617c35478..699f085d4de7d7 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -85,6 +85,7 @@
+ #include <linux/elf.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/user_namespace.h>
++#include <linux/fs_parser.h>
+ #include <linux/fs_struct.h>
+ #include <linux/slab.h>
+ #include <linux/sched/autogroup.h>
+@@ -116,6 +117,40 @@
+ static u8 nlink_tid __ro_after_init;
+ static u8 nlink_tgid __ro_after_init;
+
++enum proc_mem_force {
++ PROC_MEM_FORCE_ALWAYS,
++ PROC_MEM_FORCE_PTRACE,
++ PROC_MEM_FORCE_NEVER
++};
++
++static enum proc_mem_force proc_mem_force_override __ro_after_init =
++ IS_ENABLED(CONFIG_PROC_MEM_NO_FORCE) ? PROC_MEM_FORCE_NEVER :
++ IS_ENABLED(CONFIG_PROC_MEM_FORCE_PTRACE) ? PROC_MEM_FORCE_PTRACE :
++ PROC_MEM_FORCE_ALWAYS;
++
++static const struct constant_table proc_mem_force_table[] __initconst = {
++ { "always", PROC_MEM_FORCE_ALWAYS },
++ { "ptrace", PROC_MEM_FORCE_PTRACE },
++ { "never", PROC_MEM_FORCE_NEVER },
++ { }
++};
++
++static int __init early_proc_mem_force_override(char *buf)
++{
++ if (!buf)
++ return -EINVAL;
++
++ /*
++ * lookup_constant() defaults to proc_mem_force_override to preseve
++ * the initial Kconfig choice in case an invalid param gets passed.
++ */
++ proc_mem_force_override = lookup_constant(proc_mem_force_table,
++ buf, proc_mem_force_override);
++
++ return 0;
++}
++early_param("proc_mem.force_override", early_proc_mem_force_override);
++
+ struct pid_entry {
+ const char *name;
+ unsigned int len;
+@@ -834,6 +869,28 @@ static int mem_open(struct inode *inode, struct file *file)
+ return ret;
+ }
+
++static bool proc_mem_foll_force(struct file *file, struct mm_struct *mm)
++{
++ struct task_struct *task;
++ bool ptrace_active = false;
++
++ switch (proc_mem_force_override) {
++ case PROC_MEM_FORCE_NEVER:
++ return false;
++ case PROC_MEM_FORCE_PTRACE:
++ task = get_proc_task(file_inode(file));
++ if (task) {
++ ptrace_active = READ_ONCE(task->ptrace) &&
++ READ_ONCE(task->mm) == mm &&
++ READ_ONCE(task->parent) == current;
++ put_task_struct(task);
++ }
++ return ptrace_active;
++ default:
++ return true;
++ }
++}
++
+ static ssize_t mem_rw(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos, int write)
+ {
+@@ -854,7 +911,9 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
+ if (!mmget_not_zero(mm))
+ goto free;
+
+- flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
++ flags = write ? FOLL_WRITE : 0;
++ if (proc_mem_foll_force(file, mm))
++ flags |= FOLL_FORCE;
+
+ while (count > 0) {
+ size_t this_len = min_t(size_t, count, PAGE_SIZE);
+@@ -3207,7 +3266,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
+ mm = get_task_mm(task);
+ if (mm) {
+ seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
+- seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
++ seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
+ seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
+ seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
+ mmput(mm);
+diff --git a/fs/proc/fd.c b/fs/proc/fd.c
+index 6276b393884279..4297287f6ca09d 100644
+--- a/fs/proc/fd.c
++++ b/fs/proc/fd.c
+@@ -74,7 +74,18 @@ static int seq_show(struct seq_file *m, void *v)
+ return 0;
+ }
+
+-static int proc_fdinfo_access_allowed(struct inode *inode)
++static int seq_fdinfo_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, seq_show, inode);
++}
++
++/**
++ * Shared /proc/pid/fdinfo and /proc/pid/fdinfo/fd permission helper to ensure
++ * that the current task has PTRACE_MODE_READ in addition to the normal
++ * POSIX-like checks.
++ */
++static int proc_fdinfo_permission(struct mnt_idmap *idmap, struct inode *inode,
++ int mask)
+ {
+ bool allowed = false;
+ struct task_struct *task = get_proc_task(inode);
+@@ -88,18 +99,13 @@ static int proc_fdinfo_access_allowed(struct inode *inode)
+ if (!allowed)
+ return -EACCES;
+
+- return 0;
++ return generic_permission(idmap, inode, mask);
+ }
+
+-static int seq_fdinfo_open(struct inode *inode, struct file *file)
+-{
+- int ret = proc_fdinfo_access_allowed(inode);
+-
+- if (ret)
+- return ret;
+-
+- return single_open(file, seq_show, inode);
+-}
++static const struct inode_operations proc_fdinfo_file_inode_operations = {
++ .permission = proc_fdinfo_permission,
++ .setattr = proc_setattr,
++};
+
+ static const struct file_operations proc_fdinfo_file_operations = {
+ .open = seq_fdinfo_open,
+@@ -385,6 +391,8 @@ static struct dentry *proc_fdinfo_instantiate(struct dentry *dentry,
+ ei = PROC_I(inode);
+ ei->fd = data->fd;
+
++ inode->i_op = &proc_fdinfo_file_inode_operations;
++
+ inode->i_fop = &proc_fdinfo_file_operations;
+ tid_fd_update_inode(task, inode, 0);
+
+@@ -404,23 +412,13 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
+ proc_fdinfo_instantiate);
+ }
+
+-static int proc_open_fdinfo(struct inode *inode, struct file *file)
+-{
+- int ret = proc_fdinfo_access_allowed(inode);
+-
+- if (ret)
+- return ret;
+-
+- return 0;
+-}
+-
+ const struct inode_operations proc_fdinfo_inode_operations = {
+ .lookup = proc_lookupfdinfo,
++ .permission = proc_fdinfo_permission,
+ .setattr = proc_setattr,
+ };
+
+ const struct file_operations proc_fdinfo_operations = {
+- .open = proc_open_fdinfo,
+ .read = generic_read_dir,
+ .iterate_shared = proc_readfdinfo,
+ .llseek = generic_file_llseek,
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index 23fc24d16b31e4..7e4fa9c68c1dd0 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -50,6 +50,20 @@ static struct proc_dir_entry *proc_root_kcore;
+ #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
+ #endif
+
++#ifndef kc_xlate_dev_mem_ptr
++#define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
++static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
++{
++ return __va(phys);
++}
++#endif
++#ifndef kc_unxlate_dev_mem_ptr
++#define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
++static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
++{
++}
++#endif
++
+ static LIST_HEAD(kclist_head);
+ static DECLARE_RWSEM(kclist_lock);
+ static int kcore_need_update = 1;
+@@ -471,6 +485,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
+ while (buflen) {
+ struct page *page;
+ unsigned long pfn;
++ phys_addr_t phys;
++ void *__start;
+
+ /*
+ * If this is the first iteration or the address is not within
+@@ -537,7 +553,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
+ }
+ break;
+ case KCORE_RAM:
+- pfn = __pa(start) >> PAGE_SHIFT;
++ phys = __pa(start);
++ pfn = phys >> PAGE_SHIFT;
+ page = pfn_to_online_page(pfn);
+
+ /*
+@@ -556,13 +573,28 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
+ fallthrough;
+ case KCORE_VMEMMAP:
+ case KCORE_TEXT:
++ if (m->type == KCORE_RAM) {
++ __start = kc_xlate_dev_mem_ptr(phys);
++ if (!__start) {
++ ret = -ENOMEM;
++ if (iov_iter_zero(tsz, iter) != tsz)
++ ret = -EFAULT;
++ goto out;
++ }
++ } else {
++ __start = (void *)start;
++ }
++
+ /*
+ * Sadly we must use a bounce buffer here to be able to
+ * make use of copy_from_kernel_nofault(), as these
+ * memory regions might not always be mapped on all
+ * architectures.
+ */
+- if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
++ ret = copy_from_kernel_nofault(buf, __start, tsz);
++ if (m->type == KCORE_RAM)
++ kc_unxlate_dev_mem_ptr(phys, __start);
++ if (ret) {
+ if (iov_iter_zero(tsz, iter) != tsz) {
+ ret = -EFAULT;
+ goto out;
+diff --git a/fs/proc/page.c b/fs/proc/page.c
+index 195b077c0facbf..9223856c934b40 100644
+--- a/fs/proc/page.c
++++ b/fs/proc/page.c
+@@ -67,7 +67,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
+ */
+ ppage = pfn_to_online_page(pfn);
+
+- if (!ppage || PageSlab(ppage) || page_has_type(ppage))
++ if (!ppage)
+ pcount = 0;
+ else
+ pcount = page_mapcount(ppage);
+@@ -124,11 +124,8 @@ u64 stable_page_flags(struct page *page)
+
+ /*
+ * pseudo flags for the well known (anonymous) memory mapped pages
+- *
+- * Note that page->_mapcount is overloaded in SLAB, so the
+- * simple test in page_mapped() is not enough.
+ */
+- if (!PageSlab(page) && page_mapped(page))
++ if (page_mapped(page))
+ u |= 1 << KPF_MMAP;
+ if (PageAnon(page))
+ u |= 1 << KPF_ANON;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index c88854df0b624f..071a71eb1a2d43 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -44,7 +44,7 @@ static struct ctl_table sysctl_mount_point[] = {
+ */
+ struct ctl_table_header *register_sysctl_mount_point(const char *path)
+ {
+- return register_sysctl_sz(path, sysctl_mount_point, 0);
++ return register_sysctl(path, sysctl_mount_point);
+ }
+ EXPORT_SYMBOL(register_sysctl_mount_point);
+
+@@ -233,7 +233,8 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header)
+ return -EROFS;
+
+ /* Am I creating a permanently empty directory? */
+- if (sysctl_is_perm_empty_ctl_table(header->ctl_table)) {
++ if (header->ctl_table_size > 0 &&
++ sysctl_is_perm_empty_ctl_table(header->ctl_table)) {
+ if (!RB_EMPTY_ROOT(&dir->root))
+ return -EINVAL;
+ sysctl_set_perm_empty_ctl_header(dir_h);
+@@ -479,12 +480,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
+ make_empty_dir_inode(inode);
+ }
+
++ inode->i_uid = GLOBAL_ROOT_UID;
++ inode->i_gid = GLOBAL_ROOT_GID;
+ if (root->set_ownership)
+- root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+- else {
+- inode->i_uid = GLOBAL_ROOT_UID;
+- inode->i_gid = GLOBAL_ROOT_GID;
+- }
++ root->set_ownership(head, &inode->i_uid, &inode->i_gid);
+
+ return inode;
+ }
+@@ -1213,6 +1212,10 @@ static bool get_links(struct ctl_dir *dir,
+ struct ctl_table_header *tmp_head;
+ struct ctl_table *entry, *link;
+
++ if (header->ctl_table_size == 0 ||
++ sysctl_is_perm_empty_ctl_table(header->ctl_table))
++ return true;
++
+ /* Are there links available for every entry in table? */
+ list_for_each_table_entry(entry, header) {
+ const char *procname = entry->procname;
+@@ -1576,7 +1579,6 @@ static const struct sysctl_alias sysctl_aliases[] = {
+ {"hung_task_panic", "kernel.hung_task_panic" },
+ {"numa_zonelist_order", "vm.numa_zonelist_order" },
+ {"softlockup_all_cpu_backtrace", "kernel.softlockup_all_cpu_backtrace" },
+- {"softlockup_panic", "kernel.softlockup_panic" },
+ { }
+ };
+
+@@ -1592,6 +1594,13 @@ static const char *sysctl_find_alias(char *param)
+ return NULL;
+ }
+
++bool sysctl_is_alias(char *param)
++{
++ const char *alias = sysctl_find_alias(param);
++
++ return alias != NULL;
++}
++
+ /* Set sysctl value passed on kernel command line. */
+ static int process_sysctl_arg(char *param, char *val,
+ const char *unused, void *arg)
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 3dd5be96691b4c..59571737e16771 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -965,12 +965,17 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
+ break;
+
+ /* Case 1 and 2 above */
+- if (vma->vm_start >= last_vma_end)
++ if (vma->vm_start >= last_vma_end) {
++ smap_gather_stats(vma, &mss, 0);
++ last_vma_end = vma->vm_end;
+ continue;
++ }
+
+ /* Case 4 above */
+- if (vma->vm_end > last_vma_end)
++ if (vma->vm_end > last_vma_end) {
+ smap_gather_stats(vma, &mss, last_vma_end);
++ last_vma_end = vma->vm_end;
++ }
+ }
+ } for_each_vma(vmi, vma);
+
+@@ -1353,8 +1358,7 @@ static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
+ return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
+ }
+
+-static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
+- struct pagemapread *pm)
++static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
+ {
+ pm->buffer[pm->pos++] = *pme;
+ if (pm->pos >= pm->len)
+@@ -1381,7 +1385,7 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
+ hole_end = end;
+
+ for (; addr < hole_end; addr += PAGE_SIZE) {
+- err = add_to_pagemap(addr, &pme, pm);
++ err = add_to_pagemap(&pme, pm);
+ if (err)
+ goto out;
+ }
+@@ -1393,7 +1397,7 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ pme = make_pme(0, PM_SOFT_DIRTY);
+ for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
+- err = add_to_pagemap(addr, &pme, pm);
++ err = add_to_pagemap(&pme, pm);
+ if (err)
+ goto out;
+ }
+@@ -1407,7 +1411,6 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
+ {
+ u64 frame = 0, flags = 0;
+ struct page *page = NULL;
+- bool migration = false;
+
+ if (pte_present(pte)) {
+ if (pm->show_pfn)
+@@ -1439,7 +1442,6 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
+ (offset << MAX_SWAPFILES_SHIFT);
+ }
+ flags |= PM_SWAP;
+- migration = is_migration_entry(entry);
+ if (is_pfn_swap_entry(entry))
+ page = pfn_swap_entry_to_page(entry);
+ if (pte_marker_entry_uffd_wp(entry))
+@@ -1448,7 +1450,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
+
+ if (page && !PageAnon(page))
+ flags |= PM_FILE;
+- if (page && !migration && page_mapcount(page) == 1)
++ if (page && (flags & PM_PRESENT) && page_mapcount(page) == 1)
+ flags |= PM_MMAP_EXCLUSIVE;
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ flags |= PM_SOFT_DIRTY;
+@@ -1465,10 +1467,10 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ pte_t *pte, *orig_pte;
+ int err = 0;
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+- bool migration = false;
+
+ ptl = pmd_trans_huge_lock(pmdp, vma);
+ if (ptl) {
++ unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
+ u64 flags = 0, frame = 0;
+ pmd_t pmd = *pmdp;
+ struct page *page = NULL;
+@@ -1485,8 +1487,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ if (pmd_uffd_wp(pmd))
+ flags |= PM_UFFD_WP;
+ if (pm->show_pfn)
+- frame = pmd_pfn(pmd) +
+- ((addr & ~PMD_MASK) >> PAGE_SHIFT);
++ frame = pmd_pfn(pmd) + idx;
+ }
+ #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+ else if (is_swap_pmd(pmd)) {
+@@ -1495,11 +1496,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+
+ if (pm->show_pfn) {
+ if (is_pfn_swap_entry(entry))
+- offset = swp_offset_pfn(entry);
++ offset = swp_offset_pfn(entry) + idx;
+ else
+- offset = swp_offset(entry);
+- offset = offset +
+- ((addr & ~PMD_MASK) >> PAGE_SHIFT);
++ offset = swp_offset(entry) + idx;
+ frame = swp_type(entry) |
+ (offset << MAX_SWAPFILES_SHIFT);
+ }
+@@ -1509,18 +1508,23 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ if (pmd_swp_uffd_wp(pmd))
+ flags |= PM_UFFD_WP;
+ VM_BUG_ON(!is_pmd_migration_entry(pmd));
+- migration = is_migration_entry(entry);
+ page = pfn_swap_entry_to_page(entry);
+ }
+ #endif
+
+- if (page && !migration && page_mapcount(page) == 1)
+- flags |= PM_MMAP_EXCLUSIVE;
++ if (page && !PageAnon(page))
++ flags |= PM_FILE;
++
++ for (; addr != end; addr += PAGE_SIZE, idx++) {
++ unsigned long cur_flags = flags;
++ pagemap_entry_t pme;
+
+- for (; addr != end; addr += PAGE_SIZE) {
+- pagemap_entry_t pme = make_pme(frame, flags);
++ if (page && (flags & PM_PRESENT) &&
++ page_mapcount(page + idx) == 1)
++ cur_flags |= PM_MMAP_EXCLUSIVE;
+
+- err = add_to_pagemap(addr, &pme, pm);
++ pme = make_pme(frame, cur_flags);
++ err = add_to_pagemap(&pme, pm);
+ if (err)
+ break;
+ if (pm->show_pfn) {
+@@ -1548,7 +1552,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ pagemap_entry_t pme;
+
+ pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
+- err = add_to_pagemap(addr, &pme, pm);
++ err = add_to_pagemap(&pme, pm);
+ if (err)
+ break;
+ }
+@@ -1598,7 +1602,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
+ for (; addr != end; addr += PAGE_SIZE) {
+ pagemap_entry_t pme = make_pme(frame, flags);
+
+- err = add_to_pagemap(addr, &pme, pm);
++ err = add_to_pagemap(&pme, pm);
+ if (err)
+ return err;
+ if (pm->show_pfn && (flags & PM_PRESENT))
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index 1fb213f379a5b6..d06607a1f137a7 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -383,6 +383,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
+ /* leave now if filled buffer already */
+ if (!iov_iter_count(iter))
+ return acc;
++
++ cond_resched();
+ }
+
+ list_for_each_entry(m, &vmcore_list, list) {
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index 585360706b335f..7dbbf3b6d98d3c 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -182,25 +182,21 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
+ {
+ struct pstore_private *p = d_inode(dentry)->i_private;
+ struct pstore_record *record = p->record;
+- int rc = 0;
+
+ if (!record->psi->erase)
+ return -EPERM;
+
+ /* Make sure we can't race while removing this file. */
+- mutex_lock(&records_list_lock);
+- if (!list_empty(&p->list))
+- list_del_init(&p->list);
+- else
+- rc = -ENOENT;
+- p->dentry = NULL;
+- mutex_unlock(&records_list_lock);
+- if (rc)
+- return rc;
+-
+- mutex_lock(&record->psi->read_mutex);
+- record->psi->erase(record);
+- mutex_unlock(&record->psi->read_mutex);
++ scoped_guard(mutex, &records_list_lock) {
++ if (!list_empty(&p->list))
++ list_del_init(&p->list);
++ else
++ return -ENOENT;
++ p->dentry = NULL;
++ }
++
++ scoped_guard(mutex, &record->psi->read_mutex)
++ record->psi->erase(record);
+
+ return simple_unlink(dir, dentry);
+ }
+@@ -292,19 +288,16 @@ static struct dentry *psinfo_lock_root(void)
+ {
+ struct dentry *root;
+
+- mutex_lock(&pstore_sb_lock);
++ guard(mutex)(&pstore_sb_lock);
+ /*
+ * Having no backend is fine -- no records appear.
+ * Not being mounted is fine -- nothing to do.
+ */
+- if (!psinfo || !pstore_sb) {
+- mutex_unlock(&pstore_sb_lock);
++ if (!psinfo || !pstore_sb)
+ return NULL;
+- }
+
+ root = pstore_sb->s_root;
+ inode_lock(d_inode(root));
+- mutex_unlock(&pstore_sb_lock);
+
+ return root;
+ }
+@@ -313,29 +306,25 @@ int pstore_put_backend_records(struct pstore_info *psi)
+ {
+ struct pstore_private *pos, *tmp;
+ struct dentry *root;
+- int rc = 0;
+
+ root = psinfo_lock_root();
+ if (!root)
+ return 0;
+
+- mutex_lock(&records_list_lock);
+- list_for_each_entry_safe(pos, tmp, &records_list, list) {
+- if (pos->record->psi == psi) {
+- list_del_init(&pos->list);
+- rc = simple_unlink(d_inode(root), pos->dentry);
+- if (WARN_ON(rc))
+- break;
+- d_drop(pos->dentry);
+- dput(pos->dentry);
+- pos->dentry = NULL;
++ scoped_guard(mutex, &records_list_lock) {
++ list_for_each_entry_safe(pos, tmp, &records_list, list) {
++ if (pos->record->psi == psi) {
++ list_del_init(&pos->list);
++ d_invalidate(pos->dentry);
++ simple_unlink(d_inode(root), pos->dentry);
++ pos->dentry = NULL;
++ }
+ }
+ }
+- mutex_unlock(&records_list_lock);
+
+ inode_unlock(d_inode(root));
+
+- return rc;
++ return 0;
+ }
+
+ /*
+@@ -355,20 +344,20 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
+ if (WARN_ON(!inode_is_locked(d_inode(root))))
+ return -EINVAL;
+
+- rc = -EEXIST;
++ guard(mutex)(&records_list_lock);
++
+ /* Skip records that are already present in the filesystem. */
+- mutex_lock(&records_list_lock);
+ list_for_each_entry(pos, &records_list, list) {
+ if (pos->record->type == record->type &&
+ pos->record->id == record->id &&
+ pos->record->psi == record->psi)
+- goto fail;
++ return -EEXIST;
+ }
+
+ rc = -ENOMEM;
+ inode = pstore_get_inode(root->d_sb);
+ if (!inode)
+- goto fail;
++ return -ENOMEM;
+ inode->i_mode = S_IFREG | 0444;
+ inode->i_fop = &pstore_file_operations;
+ scnprintf(name, sizeof(name), "%s-%s-%llu%s",
+@@ -395,7 +384,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
+ d_add(dentry, inode);
+
+ list_add(&private->list, &records_list);
+- mutex_unlock(&records_list_lock);
+
+ return 0;
+
+@@ -403,8 +391,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
+ free_pstore_private(private);
+ fail_inode:
+ iput(inode);
+-fail:
+- mutex_unlock(&records_list_lock);
+ return rc;
+ }
+
+@@ -450,9 +436,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
+ if (!sb->s_root)
+ return -ENOMEM;
+
+- mutex_lock(&pstore_sb_lock);
+- pstore_sb = sb;
+- mutex_unlock(&pstore_sb_lock);
++ scoped_guard(mutex, &pstore_sb_lock)
++ pstore_sb = sb;
+
+ pstore_get_records(0);
+
+@@ -467,17 +452,14 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type,
+
+ static void pstore_kill_sb(struct super_block *sb)
+ {
+- mutex_lock(&pstore_sb_lock);
++ guard(mutex)(&pstore_sb_lock);
+ WARN_ON(pstore_sb && pstore_sb != sb);
+
+ kill_litter_super(sb);
+ pstore_sb = NULL;
+
+- mutex_lock(&records_list_lock);
++ guard(mutex)(&records_list_lock);
+ INIT_LIST_HEAD(&records_list);
+- mutex_unlock(&records_list_lock);
+-
+- mutex_unlock(&pstore_sb_lock);
+ }
+
+ static struct file_system_type pstore_fs_type = {
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index e5bca9a004cccd..03425928d2fb3c 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -464,6 +464,8 @@ static int pstore_write_user_compat(struct pstore_record *record,
+ */
+ int pstore_register(struct pstore_info *psi)
+ {
++ char *new_backend;
++
+ if (backend && strcmp(backend, psi->name)) {
+ pr_warn("backend '%s' already in use: ignoring '%s'\n",
+ backend, psi->name);
+@@ -484,11 +486,16 @@ int pstore_register(struct pstore_info *psi)
+ return -EINVAL;
+ }
+
++ new_backend = kstrdup(psi->name, GFP_KERNEL);
++ if (!new_backend)
++ return -ENOMEM;
++
+ mutex_lock(&psinfo_lock);
+ if (psinfo) {
+ pr_warn("backend '%s' already loaded: ignoring '%s'\n",
+ psinfo->name, psi->name);
+ mutex_unlock(&psinfo_lock);
++ kfree(new_backend);
+ return -EBUSY;
+ }
+
+@@ -521,7 +528,7 @@ int pstore_register(struct pstore_info *psi)
+ * Update the module parameter backend, so it is visible
+ * through /sys/module/pstore/parameters/backend
+ */
+- backend = kstrdup(psi->name, GFP_KERNEL);
++ backend = new_backend;
+
+ pr_info("Registered %s as persistent store backend\n", psi->name);
+
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index d36702c7ab3c43..88b34fdbf7592f 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -529,6 +529,7 @@ static int ramoops_init_przs(const char *name,
+ }
+
+ zone_sz = mem_sz / *cnt;
++ zone_sz = ALIGN_DOWN(zone_sz, 2);
+ if (!zone_sz) {
+ dev_err(dev, "%s zone size == 0\n", name);
+ goto fail;
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 650e437b55e6b6..f1848cdd6d3485 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -190,7 +190,7 @@ static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
+ {
+ int numerr;
+ struct persistent_ram_buffer *buffer = prz->buffer;
+- int ecc_blocks;
++ size_t ecc_blocks;
+ size_t ecc_total;
+
+ if (!ecc_info || !ecc_info->ecc_size)
+diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c
+index 2770746bb7aa16..abca117725c816 100644
+--- a/fs/pstore/zone.c
++++ b/fs/pstore/zone.c
+@@ -973,6 +973,8 @@ static ssize_t psz_kmsg_read(struct pstore_zone *zone,
+ char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n",
+ kmsg_dump_reason_str(record->reason),
+ record->count);
++ if (!buf)
++ return -ENOMEM;
+ hlen = strlen(buf);
+ record->buf = krealloc(buf, hlen + size, GFP_KERNEL);
+ if (!record->buf) {
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 31e897ad5e6a79..23dbde1de25205 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -399,15 +399,17 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
+ EXPORT_SYMBOL(dquot_mark_dquot_dirty);
+
+ /* Dirtify all the dquots - this can block when journalling */
+-static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
++static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
+ {
+ int ret, err, cnt;
++ struct dquot *dquot;
+
+ ret = err = 0;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+- if (dquot[cnt])
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (dquot)
+ /* Even in case of error we have to continue */
+- ret = mark_dquot_dirty(dquot[cnt]);
++ ret = mark_dquot_dirty(dquot);
+ if (!err)
+ err = ret;
+ }
+@@ -993,9 +995,8 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid)
+ * smp_mb__before_atomic() in dquot_acquire().
+ */
+ smp_rmb();
+-#ifdef CONFIG_QUOTA_DEBUG
+- BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
+-#endif
++ /* Has somebody invalidated entry under us? */
++ WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash));
+ out:
+ if (empty)
+ do_destroy_dquot(empty);
+@@ -1004,14 +1005,14 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid)
+ }
+ EXPORT_SYMBOL(dqget);
+
+-static inline struct dquot **i_dquot(struct inode *inode)
++static inline struct dquot __rcu **i_dquot(struct inode *inode)
+ {
+ return inode->i_sb->s_op->get_dquots(inode);
+ }
+
+ static int dqinit_needed(struct inode *inode, int type)
+ {
+- struct dquot * const *dquots;
++ struct dquot __rcu * const *dquots;
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+@@ -1101,14 +1102,16 @@ static void remove_dquot_ref(struct super_block *sb, int type)
+ */
+ spin_lock(&dq_data_lock);
+ if (!IS_NOQUOTA(inode)) {
+- struct dquot **dquots = i_dquot(inode);
+- struct dquot *dquot = dquots[type];
++ struct dquot __rcu **dquots = i_dquot(inode);
++ struct dquot *dquot = srcu_dereference_check(
++ dquots[type], &dquot_srcu,
++ lockdep_is_held(&dq_data_lock));
+
+ #ifdef CONFIG_QUOTA_DEBUG
+ if (unlikely(inode_get_rsv_space(inode) > 0))
+ reserved = 1;
+ #endif
+- dquots[type] = NULL;
++ rcu_assign_pointer(dquots[type], NULL);
+ if (dquot)
+ dqput(dquot);
+ }
+@@ -1461,7 +1464,8 @@ static int inode_quota_active(const struct inode *inode)
+ static int __dquot_initialize(struct inode *inode, int type)
+ {
+ int cnt, init_needed = 0;
+- struct dquot **dquots, *got[MAXQUOTAS] = {};
++ struct dquot __rcu **dquots;
++ struct dquot *got[MAXQUOTAS] = {};
+ struct super_block *sb = inode->i_sb;
+ qsize_t rsv;
+ int ret = 0;
+@@ -1536,7 +1540,7 @@ static int __dquot_initialize(struct inode *inode, int type)
+ if (!got[cnt])
+ continue;
+ if (!dquots[cnt]) {
+- dquots[cnt] = got[cnt];
++ rcu_assign_pointer(dquots[cnt], got[cnt]);
+ got[cnt] = NULL;
+ /*
+ * Make quota reservation system happy if someone
+@@ -1544,12 +1548,16 @@ static int __dquot_initialize(struct inode *inode, int type)
+ */
+ rsv = inode_get_rsv_space(inode);
+ if (unlikely(rsv)) {
++ struct dquot *dquot = srcu_dereference_check(
++ dquots[cnt], &dquot_srcu,
++ lockdep_is_held(&dq_data_lock));
++
+ spin_lock(&inode->i_lock);
+ /* Get reservation again under proper lock */
+ rsv = __inode_get_rsv_space(inode);
+- spin_lock(&dquots[cnt]->dq_dqb_lock);
+- dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
+- spin_unlock(&dquots[cnt]->dq_dqb_lock);
++ spin_lock(&dquot->dq_dqb_lock);
++ dquot->dq_dqb.dqb_rsvspace += rsv;
++ spin_unlock(&dquot->dq_dqb_lock);
+ spin_unlock(&inode->i_lock);
+ }
+ }
+@@ -1571,7 +1579,7 @@ EXPORT_SYMBOL(dquot_initialize);
+
+ bool dquot_initialize_needed(struct inode *inode)
+ {
+- struct dquot **dquots;
++ struct dquot __rcu **dquots;
+ int i;
+
+ if (!inode_quota_active(inode))
+@@ -1596,13 +1604,14 @@ EXPORT_SYMBOL(dquot_initialize_needed);
+ static void __dquot_drop(struct inode *inode)
+ {
+ int cnt;
+- struct dquot **dquots = i_dquot(inode);
++ struct dquot __rcu **dquots = i_dquot(inode);
+ struct dquot *put[MAXQUOTAS];
+
+ spin_lock(&dq_data_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+- put[cnt] = dquots[cnt];
+- dquots[cnt] = NULL;
++ put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
++ lockdep_is_held(&dq_data_lock));
++ rcu_assign_pointer(dquots[cnt], NULL);
+ }
+ spin_unlock(&dq_data_lock);
+ dqput_all(put);
+@@ -1610,7 +1619,7 @@ static void __dquot_drop(struct inode *inode)
+
+ void dquot_drop(struct inode *inode)
+ {
+- struct dquot * const *dquots;
++ struct dquot __rcu * const *dquots;
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+@@ -1683,7 +1692,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
+ int cnt, ret = 0, index;
+ struct dquot_warn warn[MAXQUOTAS];
+ int reserve = flags & DQUOT_SPACE_RESERVE;
+- struct dquot **dquots;
++ struct dquot __rcu **dquots;
++ struct dquot *dquot;
+
+ if (!inode_quota_active(inode)) {
+ if (reserve) {
+@@ -1703,27 +1713,26 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
+ index = srcu_read_lock(&dquot_srcu);
+ spin_lock(&inode->i_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+- if (!dquots[cnt])
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (!dquot)
+ continue;
+ if (reserve) {
+- ret = dquot_add_space(dquots[cnt], 0, number, flags,
+- &warn[cnt]);
++ ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
+ } else {
+- ret = dquot_add_space(dquots[cnt], number, 0, flags,
+- &warn[cnt]);
++ ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
+ }
+ if (ret) {
+ /* Back out changes we already did */
+ for (cnt--; cnt >= 0; cnt--) {
+- if (!dquots[cnt])
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (!dquot)
+ continue;
+- spin_lock(&dquots[cnt]->dq_dqb_lock);
++ spin_lock(&dquot->dq_dqb_lock);
+ if (reserve)
+- dquot_free_reserved_space(dquots[cnt],
+- number);
++ dquot_free_reserved_space(dquot, number);
+ else
+- dquot_decr_space(dquots[cnt], number);
+- spin_unlock(&dquots[cnt]->dq_dqb_lock);
++ dquot_decr_space(dquot, number);
++ spin_unlock(&dquot->dq_dqb_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ goto out_flush_warn;
+@@ -1753,7 +1762,8 @@ int dquot_alloc_inode(struct inode *inode)
+ {
+ int cnt, ret = 0, index;
+ struct dquot_warn warn[MAXQUOTAS];
+- struct dquot * const *dquots;
++ struct dquot __rcu * const *dquots;
++ struct dquot *dquot;
+
+ if (!inode_quota_active(inode))
+ return 0;
+@@ -1764,17 +1774,19 @@ int dquot_alloc_inode(struct inode *inode)
+ index = srcu_read_lock(&dquot_srcu);
+ spin_lock(&inode->i_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+- if (!dquots[cnt])
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (!dquot)
+ continue;
+- ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
++ ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
+ if (ret) {
+ for (cnt--; cnt >= 0; cnt--) {
+- if (!dquots[cnt])
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (!dquot)
+ continue;
+ /* Back out changes we already did */
+- spin_lock(&dquots[cnt]->dq_dqb_lock);
+- dquot_decr_inodes(dquots[cnt], 1);
+- spin_unlock(&dquots[cnt]->dq_dqb_lock);
++ spin_lock(&dquot->dq_dqb_lock);
++ dquot_decr_inodes(dquot, 1);
++ spin_unlock(&dquot->dq_dqb_lock);
+ }
+ goto warn_put_all;
+ }
+@@ -1795,7 +1807,8 @@ EXPORT_SYMBOL(dquot_alloc_inode);
+ */
+ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+ {
+- struct dquot **dquots;
++ struct dquot __rcu **dquots;
++ struct dquot *dquot;
+ int cnt, index;
+
+ if (!inode_quota_active(inode)) {
+@@ -1811,9 +1824,8 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+ spin_lock(&inode->i_lock);
+ /* Claim reserved quotas to allocated quotas */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+- if (dquots[cnt]) {
+- struct dquot *dquot = dquots[cnt];
+-
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (dquot) {
+ spin_lock(&dquot->dq_dqb_lock);
+ if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
+ number = dquot->dq_dqb.dqb_rsvspace;
+@@ -1837,7 +1849,8 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
+ */
+ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+ {
+- struct dquot **dquots;
++ struct dquot __rcu **dquots;
++ struct dquot *dquot;
+ int cnt, index;
+
+ if (!inode_quota_active(inode)) {
+@@ -1853,9 +1866,8 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+ spin_lock(&inode->i_lock);
+ /* Claim reserved quotas to allocated quotas */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+- if (dquots[cnt]) {
+- struct dquot *dquot = dquots[cnt];
+-
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (dquot) {
+ spin_lock(&dquot->dq_dqb_lock);
+ if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
+ number = dquot->dq_dqb.dqb_curspace;
+@@ -1881,7 +1893,8 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
+ {
+ unsigned int cnt;
+ struct dquot_warn warn[MAXQUOTAS];
+- struct dquot **dquots;
++ struct dquot __rcu **dquots;
++ struct dquot *dquot;
+ int reserve = flags & DQUOT_SPACE_RESERVE, index;
+
+ if (!inode_quota_active(inode)) {
+@@ -1902,17 +1915,18 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
+ int wtype;
+
+ warn[cnt].w_type = QUOTA_NL_NOWARN;
+- if (!dquots[cnt])
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (!dquot)
+ continue;
+- spin_lock(&dquots[cnt]->dq_dqb_lock);
+- wtype = info_bdq_free(dquots[cnt], number);
++ spin_lock(&dquot->dq_dqb_lock);
++ wtype = info_bdq_free(dquot, number);
+ if (wtype != QUOTA_NL_NOWARN)
+- prepare_warning(&warn[cnt], dquots[cnt], wtype);
++ prepare_warning(&warn[cnt], dquot, wtype);
+ if (reserve)
+- dquot_free_reserved_space(dquots[cnt], number);
++ dquot_free_reserved_space(dquot, number);
+ else
+- dquot_decr_space(dquots[cnt], number);
+- spin_unlock(&dquots[cnt]->dq_dqb_lock);
++ dquot_decr_space(dquot, number);
++ spin_unlock(&dquot->dq_dqb_lock);
+ }
+ if (reserve)
+ *inode_reserved_space(inode) -= number;
+@@ -1936,7 +1950,8 @@ void dquot_free_inode(struct inode *inode)
+ {
+ unsigned int cnt;
+ struct dquot_warn warn[MAXQUOTAS];
+- struct dquot * const *dquots;
++ struct dquot __rcu * const *dquots;
++ struct dquot *dquot;
+ int index;
+
+ if (!inode_quota_active(inode))
+@@ -1947,16 +1962,16 @@ void dquot_free_inode(struct inode *inode)
+ spin_lock(&inode->i_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ int wtype;
+-
+ warn[cnt].w_type = QUOTA_NL_NOWARN;
+- if (!dquots[cnt])
++ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
++ if (!dquot)
+ continue;
+- spin_lock(&dquots[cnt]->dq_dqb_lock);
+- wtype = info_idq_free(dquots[cnt], 1);
++ spin_lock(&dquot->dq_dqb_lock);
++ wtype = info_idq_free(dquot, 1);
+ if (wtype != QUOTA_NL_NOWARN)
+- prepare_warning(&warn[cnt], dquots[cnt], wtype);
+- dquot_decr_inodes(dquots[cnt], 1);
+- spin_unlock(&dquots[cnt]->dq_dqb_lock);
++ prepare_warning(&warn[cnt], dquot, wtype);
++ dquot_decr_inodes(dquot, 1);
++ spin_unlock(&dquot->dq_dqb_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ mark_all_dquot_dirty(dquots);
+@@ -1982,8 +1997,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+ qsize_t cur_space;
+ qsize_t rsv_space = 0;
+ qsize_t inode_usage = 1;
++ struct dquot __rcu **dquots;
+ struct dquot *transfer_from[MAXQUOTAS] = {};
+- int cnt, ret = 0;
++ int cnt, index, ret = 0;
+ char is_valid[MAXQUOTAS] = {};
+ struct dquot_warn warn_to[MAXQUOTAS];
+ struct dquot_warn warn_from_inodes[MAXQUOTAS];
+@@ -2014,6 +2030,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+ }
+ cur_space = __inode_get_bytes(inode);
+ rsv_space = __inode_get_rsv_space(inode);
++ dquots = i_dquot(inode);
+ /*
+ * Build the transfer_from list, check limits, and update usage in
+ * the target structures.
+@@ -2028,7 +2045,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+ if (!sb_has_quota_active(inode->i_sb, cnt))
+ continue;
+ is_valid[cnt] = 1;
+- transfer_from[cnt] = i_dquot(inode)[cnt];
++ transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
++ &dquot_srcu, lockdep_is_held(&dq_data_lock));
+ ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
+ &warn_to[cnt]);
+ if (ret)
+@@ -2067,13 +2085,21 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+ rsv_space);
+ spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
+ }
+- i_dquot(inode)[cnt] = transfer_to[cnt];
++ rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
+ }
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&dq_data_lock);
+
+- mark_all_dquot_dirty(transfer_from);
+- mark_all_dquot_dirty(transfer_to);
++ /*
++ * These arrays are local and we hold dquot references so we don't need
++ * the srcu protection but still take dquot_srcu to avoid warning in
++ * mark_all_dquot_dirty().
++ */
++ index = srcu_read_lock(&dquot_srcu);
++ mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
++ mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
++ srcu_read_unlock(&dquot_srcu, index);
++
+ flush_warnings(warn_to);
+ flush_warnings(warn_from_inodes);
+ flush_warnings(warn_from_space);
+@@ -2351,6 +2377,20 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
+ if (sb_has_quota_loaded(sb, type))
+ return -EBUSY;
+
++ /*
++ * Quota files should never be encrypted. They should be thought of as
++ * filesystem metadata, not user data. New-style internal quota files
++ * cannot be encrypted by users anyway, but old-style external quota
++ * files could potentially be incorrectly created in an encrypted
++ * directory, hence this explicit check. Some reasons why encrypted
++ * quota files don't work include: (1) some filesystems that support
++ * encryption don't handle it in their quota_read and quota_write, and
++ * (2) cleaning up encrypted quota files at unmount would need special
++ * consideration, as quota files are cleaned up later than user files.
++ */
++ if (IS_ENCRYPTED(inode))
++ return -EINVAL;
++
+ dqopt->files[type] = igrab(inode);
+ if (!dqopt->files[type])
+ return -EIO;
+diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
+index 0f1493e0f6d059..254f6359b287fa 100644
+--- a/fs/quota/quota_tree.c
++++ b/fs/quota/quota_tree.c
+@@ -21,6 +21,12 @@ MODULE_AUTHOR("Jan Kara");
+ MODULE_DESCRIPTION("Quota trie support");
+ MODULE_LICENSE("GPL");
+
++/*
++ * Maximum quota tree depth we support. Only to limit recursion when working
++ * with the tree.
++ */
++#define MAX_QTREE_DEPTH 6
++
+ #define __QUOTA_QT_PARANOIA
+
+ static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
+@@ -327,27 +333,36 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
+
+ /* Insert reference to structure into the trie */
+ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+- uint *treeblk, int depth)
++ uint *blks, int depth)
+ {
+ char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
+ int ret = 0, newson = 0, newact = 0;
+ __le32 *ref;
+ uint newblk;
++ int i;
+
+ if (!buf)
+ return -ENOMEM;
+- if (!*treeblk) {
++ if (!blks[depth]) {
+ ret = get_free_dqblk(info);
+ if (ret < 0)
+ goto out_buf;
+- *treeblk = ret;
++ for (i = 0; i < depth; i++)
++ if (ret == blks[i]) {
++ quota_error(dquot->dq_sb,
++ "Free block already used in tree: block %u",
++ ret);
++ ret = -EIO;
++ goto out_buf;
++ }
++ blks[depth] = ret;
+ memset(buf, 0, info->dqi_usable_bs);
+ newact = 1;
+ } else {
+- ret = read_blk(info, *treeblk, buf);
++ ret = read_blk(info, blks[depth], buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't read tree quota "
+- "block %u", *treeblk);
++ "block %u", blks[depth]);
+ goto out_buf;
+ }
+ }
+@@ -357,8 +372,20 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ info->dqi_blocks - 1);
+ if (ret)
+ goto out_buf;
+- if (!newblk)
++ if (!newblk) {
+ newson = 1;
++ } else {
++ for (i = 0; i <= depth; i++)
++ if (newblk == blks[i]) {
++ quota_error(dquot->dq_sb,
++ "Cycle in quota tree detected: block %u index %u",
++ blks[depth],
++ get_index(info, dquot->dq_id, depth));
++ ret = -EIO;
++ goto out_buf;
++ }
++ }
++ blks[depth + 1] = newblk;
+ if (depth == info->dqi_qtree_depth - 1) {
+ #ifdef __QUOTA_QT_PARANOIA
+ if (newblk) {
+@@ -370,16 +397,16 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ goto out_buf;
+ }
+ #endif
+- newblk = find_free_dqentry(info, dquot, &ret);
++ blks[depth + 1] = find_free_dqentry(info, dquot, &ret);
+ } else {
+- ret = do_insert_tree(info, dquot, &newblk, depth+1);
++ ret = do_insert_tree(info, dquot, blks, depth + 1);
+ }
+ if (newson && ret >= 0) {
+ ref[get_index(info, dquot->dq_id, depth)] =
+- cpu_to_le32(newblk);
+- ret = write_blk(info, *treeblk, buf);
++ cpu_to_le32(blks[depth + 1]);
++ ret = write_blk(info, blks[depth], buf);
+ } else if (newact && ret < 0) {
+- put_free_dqblk(info, buf, *treeblk);
++ put_free_dqblk(info, buf, blks[depth]);
+ }
+ out_buf:
+ kfree(buf);
+@@ -390,7 +417,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
+ struct dquot *dquot)
+ {
+- int tmp = QT_TREEOFF;
++ uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
+
+ #ifdef __QUOTA_QT_PARANOIA
+ if (info->dqi_blocks <= QT_TREEOFF) {
+@@ -398,7 +425,11 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
+ return -EIO;
+ }
+ #endif
+- return do_insert_tree(info, dquot, &tmp, 0);
++ if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
++ quota_error(dquot->dq_sb, "Quota tree depth too big!");
++ return -EIO;
++ }
++ return do_insert_tree(info, dquot, blks, 0);
+ }
+
+ /*
+@@ -511,19 +542,20 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+
+ /* Remove reference to dquot from tree */
+ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+- uint *blk, int depth)
++ uint *blks, int depth)
+ {
+ char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
+ int ret = 0;
+ uint newblk;
+ __le32 *ref = (__le32 *)buf;
++ int i;
+
+ if (!buf)
+ return -ENOMEM;
+- ret = read_blk(info, *blk, buf);
++ ret = read_blk(info, blks[depth], buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't read quota data block %u",
+- *blk);
++ blks[depth]);
+ goto out_buf;
+ }
+ newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
+@@ -532,29 +564,38 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ if (ret)
+ goto out_buf;
+
++ for (i = 0; i <= depth; i++)
++ if (newblk == blks[i]) {
++ quota_error(dquot->dq_sb,
++ "Cycle in quota tree detected: block %u index %u",
++ blks[depth],
++ get_index(info, dquot->dq_id, depth));
++ ret = -EIO;
++ goto out_buf;
++ }
+ if (depth == info->dqi_qtree_depth - 1) {
+ ret = free_dqentry(info, dquot, newblk);
+- newblk = 0;
++ blks[depth + 1] = 0;
+ } else {
+- ret = remove_tree(info, dquot, &newblk, depth+1);
++ blks[depth + 1] = newblk;
++ ret = remove_tree(info, dquot, blks, depth + 1);
+ }
+- if (ret >= 0 && !newblk) {
+- int i;
++ if (ret >= 0 && !blks[depth + 1]) {
+ ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
+ /* Block got empty? */
+ for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
+ ;
+ /* Don't put the root block into the free block list */
+ if (i == (info->dqi_usable_bs >> 2)
+- && *blk != QT_TREEOFF) {
+- put_free_dqblk(info, buf, *blk);
+- *blk = 0;
++ && blks[depth] != QT_TREEOFF) {
++ put_free_dqblk(info, buf, blks[depth]);
++ blks[depth] = 0;
+ } else {
+- ret = write_blk(info, *blk, buf);
++ ret = write_blk(info, blks[depth], buf);
+ if (ret < 0)
+ quota_error(dquot->dq_sb,
+ "Can't write quota tree block %u",
+- *blk);
++ blks[depth]);
+ }
+ }
+ out_buf:
+@@ -565,11 +606,15 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ /* Delete dquot from tree */
+ int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+ {
+- uint tmp = QT_TREEOFF;
++ uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
+
+ if (!dquot->dq_off) /* Even not allocated? */
+ return 0;
+- return remove_tree(info, dquot, &tmp, 0);
++ if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
++ quota_error(dquot->dq_sb, "Quota tree depth too big!");
++ return -EIO;
++ }
++ return remove_tree(info, dquot, blks, 0);
+ }
+ EXPORT_SYMBOL(qtree_delete_dquot);
+
+@@ -613,18 +658,20 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
+
+ /* Find entry for given id in the tree */
+ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
+- struct dquot *dquot, uint blk, int depth)
++ struct dquot *dquot, uint *blks, int depth)
+ {
+ char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
+ loff_t ret = 0;
+ __le32 *ref = (__le32 *)buf;
++ uint blk;
++ int i;
+
+ if (!buf)
+ return -ENOMEM;
+- ret = read_blk(info, blk, buf);
++ ret = read_blk(info, blks[depth], buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't read quota tree block %u",
+- blk);
++ blks[depth]);
+ goto out_buf;
+ }
+ ret = 0;
+@@ -636,8 +683,19 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
+ if (ret)
+ goto out_buf;
+
++ /* Check for cycles in the tree */
++ for (i = 0; i <= depth; i++)
++ if (blk == blks[i]) {
++ quota_error(dquot->dq_sb,
++ "Cycle in quota tree detected: block %u index %u",
++ blks[depth],
++ get_index(info, dquot->dq_id, depth));
++ ret = -EIO;
++ goto out_buf;
++ }
++ blks[depth + 1] = blk;
+ if (depth < info->dqi_qtree_depth - 1)
+- ret = find_tree_dqentry(info, dquot, blk, depth+1);
++ ret = find_tree_dqentry(info, dquot, blks, depth + 1);
+ else
+ ret = find_block_dqentry(info, dquot, blk);
+ out_buf:
+@@ -649,7 +707,13 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
+ static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
+ struct dquot *dquot)
+ {
+- return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
++ uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
++
++ if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
++ quota_error(dquot->dq_sb, "Quota tree depth too big!");
++ return -EIO;
++ }
++ return find_tree_dqentry(info, dquot, blks, 0);
+ }
+
+ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index ae99e7b88205b2..7978ab671e0c6a 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -166,14 +166,17 @@ static int v2_read_file_info(struct super_block *sb, int type)
+ i_size_read(sb_dqopt(sb)->files[type]));
+ goto out_free;
+ }
+- if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
+- quota_error(sb, "Free block number too big (%u >= %u).",
+- qinfo->dqi_free_blk, qinfo->dqi_blocks);
++ if (qinfo->dqi_free_blk && (qinfo->dqi_free_blk <= QT_TREEOFF ||
++ qinfo->dqi_free_blk >= qinfo->dqi_blocks)) {
++ quota_error(sb, "Free block number %u out of range (%u, %u).",
++ qinfo->dqi_free_blk, QT_TREEOFF, qinfo->dqi_blocks);
+ goto out_free;
+ }
+- if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
+- quota_error(sb, "Block with free entry too big (%u >= %u).",
+- qinfo->dqi_free_entry, qinfo->dqi_blocks);
++ if (qinfo->dqi_free_entry && (qinfo->dqi_free_entry <= QT_TREEOFF ||
++ qinfo->dqi_free_entry >= qinfo->dqi_blocks)) {
++ quota_error(sb, "Block with free entry %u out of range (%u, %u).",
++ qinfo->dqi_free_entry, QT_TREEOFF,
++ qinfo->dqi_blocks);
+ goto out_free;
+ }
+ ret = 0;
+diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
+index 9c5704be243562..889341c6b8f0c3 100644
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -1324,8 +1324,8 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
+ struct inode *old_inode, *new_dentry_inode;
+ struct reiserfs_transaction_handle th;
+ int jbegin_count;
+- umode_t old_inode_mode;
+ unsigned long savelink = 1;
++ bool update_dir_parent = false;
+
+ if (flags & ~RENAME_NOREPLACE)
+ return -EINVAL;
+@@ -1375,8 +1375,7 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
+ return -ENOENT;
+ }
+
+- old_inode_mode = old_inode->i_mode;
+- if (S_ISDIR(old_inode_mode)) {
++ if (S_ISDIR(old_inode->i_mode)) {
+ /*
+ * make sure that directory being renamed has correct ".."
+ * and that its new parent directory has not too many links
+@@ -1389,24 +1388,28 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
+ }
+ }
+
+- /*
+- * directory is renamed, its parent directory will be changed,
+- * so find ".." entry
+- */
+- dot_dot_de.de_gen_number_bit_string = NULL;
+- retval =
+- reiserfs_find_entry(old_inode, "..", 2, &dot_dot_entry_path,
++ if (old_dir != new_dir) {
++ /*
++ * directory is renamed, its parent directory will be
++ * changed, so find ".." entry
++ */
++ dot_dot_de.de_gen_number_bit_string = NULL;
++ retval =
++ reiserfs_find_entry(old_inode, "..", 2,
++ &dot_dot_entry_path,
+ &dot_dot_de);
+- pathrelse(&dot_dot_entry_path);
+- if (retval != NAME_FOUND) {
+- reiserfs_write_unlock(old_dir->i_sb);
+- return -EIO;
+- }
++ pathrelse(&dot_dot_entry_path);
++ if (retval != NAME_FOUND) {
++ reiserfs_write_unlock(old_dir->i_sb);
++ return -EIO;
++ }
+
+- /* inode number of .. must equal old_dir->i_ino */
+- if (dot_dot_de.de_objectid != old_dir->i_ino) {
+- reiserfs_write_unlock(old_dir->i_sb);
+- return -EIO;
++ /* inode number of .. must equal old_dir->i_ino */
++ if (dot_dot_de.de_objectid != old_dir->i_ino) {
++ reiserfs_write_unlock(old_dir->i_sb);
++ return -EIO;
++ }
++ update_dir_parent = true;
+ }
+ }
+
+@@ -1486,7 +1489,7 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
+
+ reiserfs_prepare_for_journal(old_inode->i_sb, new_de.de_bh, 1);
+
+- if (S_ISDIR(old_inode->i_mode)) {
++ if (update_dir_parent) {
+ if ((retval =
+ search_by_entry_key(new_dir->i_sb,
+ &dot_dot_de.de_entry_key,
+@@ -1534,14 +1537,14 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
+ new_de.de_bh);
+ reiserfs_restore_prepared_buffer(old_inode->i_sb,
+ old_de.de_bh);
+- if (S_ISDIR(old_inode_mode))
++ if (update_dir_parent)
+ reiserfs_restore_prepared_buffer(old_inode->
+ i_sb,
+ dot_dot_de.
+ de_bh);
+ continue;
+ }
+- if (S_ISDIR(old_inode_mode)) {
++ if (update_dir_parent) {
+ if (item_moved(&dot_dot_ih, &dot_dot_entry_path) ||
+ !entry_points_to_object("..", 2, &dot_dot_de,
+ old_dir)) {
+@@ -1559,7 +1562,7 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
+ }
+ }
+
+- RFALSE(S_ISDIR(old_inode_mode) &&
++ RFALSE(update_dir_parent &&
+ !buffer_journal_prepared(dot_dot_de.de_bh), "");
+
+ break;
+@@ -1592,11 +1595,12 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
+ savelink = new_dentry_inode->i_nlink;
+ }
+
+- if (S_ISDIR(old_inode_mode)) {
++ if (update_dir_parent) {
+ /* adjust ".." of renamed directory */
+ set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
+ journal_mark_dirty(&th, dot_dot_de.de_bh);
+-
++ }
++ if (S_ISDIR(old_inode->i_mode)) {
+ /*
+ * there (in new_dir) was no directory, so it got new link
+ * (".." of renamed directory)
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index 7d12b8c5b2fa8c..e594ad8d759e2c 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -97,7 +97,7 @@ struct reiserfs_inode_info {
+ struct rw_semaphore i_xattr_sem;
+ #endif
+ #ifdef CONFIG_QUOTA
+- struct dquot *i_dquot[MAXQUOTAS];
++ struct dquot __rcu *i_dquot[MAXQUOTAS];
+ #endif
+
+ struct inode vfs_inode;
+diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
+index 3676e02a0232a4..4ab8cab6ea6147 100644
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -1407,7 +1407,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
+ INITIALIZE_PATH(path);
+ int item_len = 0;
+ int tb_init = 0;
+- struct cpu_key cpu_key;
++ struct cpu_key cpu_key = {};
+ int retval;
+ int quota_cut_bytes = 0;
+
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 7eaf36b3de12b4..309f9d39ba7244 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -802,7 +802,7 @@ static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
+ static ssize_t reiserfs_quota_read(struct super_block *, int, char *, size_t,
+ loff_t);
+
+-static struct dquot **reiserfs_get_dquots(struct inode *inode)
++static struct dquot __rcu **reiserfs_get_dquots(struct inode *inode)
+ {
+ return REISERFS_I(inode)->i_dquot;
+ }
+diff --git a/fs/romfs/super.c b/fs/romfs/super.c
+index 5c35f6c760377e..b1bdfbc211c3c0 100644
+--- a/fs/romfs/super.c
++++ b/fs/romfs/super.c
+@@ -593,7 +593,7 @@ static void romfs_kill_sb(struct super_block *sb)
+ #ifdef CONFIG_ROMFS_ON_BLOCK
+ if (sb->s_bdev) {
+ sync_blockdev(sb->s_bdev);
+- blkdev_put(sb->s_bdev, sb);
++ bdev_release(sb->s_bdev_handle);
+ }
+ #endif
+ }
+diff --git a/fs/select.c b/fs/select.c
+index 0ee55af1a55c29..d4d881d439dcdf 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -476,7 +476,7 @@ static inline void wait_key_set(poll_table *wait, unsigned long in,
+ wait->_key |= POLLOUT_SET;
+ }
+
+-static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
++static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
+ {
+ ktime_t expire, *to = NULL;
+ struct poll_wqueues table;
+diff --git a/fs/smb/client/Makefile b/fs/smb/client/Makefile
+index 0b07eb94c93b38..e11985f2460b26 100644
+--- a/fs/smb/client/Makefile
++++ b/fs/smb/client/Makefile
+@@ -12,7 +12,7 @@ cifs-y := trace.o cifsfs.o cifs_debug.o connect.o dir.o file.o \
+ smb2ops.o smb2maperror.o smb2transport.o \
+ smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
+ dns_resolve.o cifs_spnego_negtokeninit.asn1.o asn1.o \
+- namespace.o
++ namespace.o reparse.o
+
+ $(obj)/asn1.o: $(obj)/cifs_spnego_negtokeninit.asn1.h
+
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index fe1bf5b6e0cb3d..0ff2491c311d8a 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -32,7 +32,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ * fully cached or it may be in the process of
+ * being deleted due to a lease break.
+ */
+- if (!cfid->has_lease) {
++ if (!cfid->time || !cfid->has_lease) {
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+@@ -145,21 +145,27 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ struct cached_fid *cfid;
+ struct cached_fids *cfids;
+ const char *npath;
++ int retries = 0, cur_sleep = 1;
+
+ if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
+ is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
+ return -EOPNOTSUPP;
+
+ ses = tcon->ses;
+- server = ses->server;
+ cfids = tcon->cfids;
+
+- if (!server->ops->new_lease_key)
+- return -EIO;
+-
+ if (cifs_sb->root == NULL)
+ return -ENOENT;
+
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ oplock = SMB2_OPLOCK_LEVEL_II;
++ server = cifs_pick_channel(ses);
++
++ if (!server->ops->new_lease_key)
++ return -EIO;
++
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
+@@ -193,10 +199,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ npath = path_no_prefix(cifs_sb, path);
+ if (IS_ERR(npath)) {
+ rc = PTR_ERR(npath);
+- kfree(utf16_path);
+- return rc;
++ goto out;
+ }
+
++ if (!npath[0]) {
++ dentry = dget(cifs_sb->root);
++ } else {
++ dentry = path_to_dentry(cifs_sb, npath);
++ if (IS_ERR(dentry)) {
++ rc = -ENOENT;
++ goto out;
++ }
++ }
++ cfid->dentry = dentry;
++
+ /*
+ * We do not hold the lock for the open because in case
+ * SMB2_open needs to reconnect.
+@@ -223,9 +239,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ .tcon = tcon,
+ .path = path,
+ .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
+- .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES,
++ .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES |
++ FILE_READ_EA,
+ .disposition = FILE_OPEN,
+ .fid = pfid,
++ .replay = !!(retries),
+ };
+
+ rc = SMB2_open_init(tcon, server,
+@@ -249,6 +267,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+
+ smb2_set_related(&rqst[1]);
+
++ /*
++ * Set @cfid->has_lease to true before sending out compounded request so
++ * its lease reference can be put in cached_dir_lease_break() due to a
++ * potential lease break right after the request is sent or while @cfid
++ * is still being cached. Concurrent processes won't be to use it yet
++ * due to @cfid->time being zero.
++ */
++ cfid->has_lease = true;
++
++ if (retries) {
++ smb2_set_replay(server, &rqst[0]);
++ smb2_set_replay(server, &rqst[1]);
++ }
++
+ rc = compound_send_recv(xid, ses, server,
+ flags, 2, rqst,
+ resp_buftype, rsp_iov);
+@@ -263,6 +295,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ cfid->tcon = tcon;
+ cfid->is_open = true;
+
++ spin_lock(&cfids->cfid_list_lock);
++
+ o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+ oparms.fid->volatile_fid = o_rsp->VolatileFileId;
+@@ -270,18 +304,32 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
+ #endif /* CIFS_DEBUG2 */
+
+- if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
++
++ if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
++ spin_unlock(&cfids->cfid_list_lock);
++ rc = -EINVAL;
+ goto oshr_free;
++ }
+
+- smb2_parse_contexts(server, o_rsp,
+- &oparms.fid->epoch,
+- oparms.fid->lease_key, &oplock,
+- NULL, NULL);
+- if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
++ rc = smb2_parse_contexts(server, rsp_iov,
++ &oparms.fid->epoch,
++ oparms.fid->lease_key,
++ &oplock, NULL, NULL);
++ if (rc) {
++ spin_unlock(&cfids->cfid_list_lock);
+ goto oshr_free;
++ }
++
++ rc = -EINVAL;
++ if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
++ spin_unlock(&cfids->cfid_list_lock);
++ goto oshr_free;
++ }
+ qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+- if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
++ if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
++ spin_unlock(&cfids->cfid_list_lock);
+ goto oshr_free;
++ }
+ if (!smb2_validate_and_copy_iov(
+ le16_to_cpu(qi_rsp->OutputBufferOffset),
+ sizeof(struct smb2_file_all_info),
+@@ -289,37 +337,24 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ (char *)&cfid->file_all_info))
+ cfid->file_all_info_is_valid = true;
+
+- if (!npath[0])
+- dentry = dget(cifs_sb->root);
+- else {
+- dentry = path_to_dentry(cifs_sb, npath);
+- if (IS_ERR(dentry)) {
+- rc = -ENOENT;
+- goto oshr_free;
+- }
+- }
+- spin_lock(&cfids->cfid_list_lock);
+- cfid->dentry = dentry;
+ cfid->time = jiffies;
+- cfid->has_lease = true;
+ spin_unlock(&cfids->cfid_list_lock);
++ /* At this point the directory handle is fully cached */
++ rc = 0;
+
+ oshr_free:
+- kfree(utf16_path);
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+- spin_lock(&cfids->cfid_list_lock);
+- if (!cfid->has_lease) {
+- if (rc) {
+- if (cfid->on_list) {
+- list_del(&cfid->entry);
+- cfid->on_list = false;
+- cfids->num_entries--;
+- }
+- rc = -ENOENT;
+- } else {
++ if (rc) {
++ spin_lock(&cfids->cfid_list_lock);
++ if (cfid->on_list) {
++ list_del(&cfid->entry);
++ cfid->on_list = false;
++ cfids->num_entries--;
++ }
++ if (cfid->has_lease) {
+ /*
+ * We are guaranteed to have two references at this
+ * point. One for the caller and one for a potential
+@@ -327,24 +362,28 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ * will be closed when the caller closes the cached
+ * handle.
+ */
++ cfid->has_lease = false;
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ goto out;
+ }
++ spin_unlock(&cfids->cfid_list_lock);
+ }
+- spin_unlock(&cfids->cfid_list_lock);
++out:
+ if (rc) {
+ if (cfid->is_open)
+ SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
+ free_cached_dir(cfid);
+- cfid = NULL;
+- }
+-out:
+- if (rc == 0) {
++ } else {
+ *ret_cfid = cfid;
+ atomic_inc(&tcon->num_remote_opens);
+ }
++ kfree(utf16_path);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
+
+ return rc;
+ }
+@@ -378,6 +417,7 @@ smb2_close_cached_fid(struct kref *ref)
+ {
+ struct cached_fid *cfid = container_of(ref, struct cached_fid,
+ refcount);
++ int rc;
+
+ spin_lock(&cfid->cfids->cfid_list_lock);
+ if (cfid->on_list) {
+@@ -391,9 +431,10 @@ smb2_close_cached_fid(struct kref *ref)
+ cfid->dentry = NULL;
+
+ if (cfid->is_open) {
+- SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
++ rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
+- atomic_dec(&cfid->tcon->num_remote_opens);
++ if (rc) /* should we retry on -EBUSY or -EAGAIN? */
++ cifs_dbg(VFS, "close cached dir rc %d\n", rc);
+ }
+
+ free_cached_dir(cfid);
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 76922fcc4bc6e5..4a20e92474b234 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -40,11 +40,13 @@ void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
+ #ifdef CONFIG_CIFS_DEBUG2
+ struct smb_hdr *smb = buf;
+
+- cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n",
+- smb->Command, smb->Status.CifsError,
+- smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
+- cifs_dbg(VFS, "smb buf %p len %u\n", smb,
+- server->ops->calc_smb_size(smb));
++ cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d Wct: %d\n",
++ smb->Command, smb->Status.CifsError, smb->Flags,
++ smb->Flags2, smb->Mid, smb->Pid, smb->WordCount);
++ if (!server->ops->check_message(buf, server->total_read, server)) {
++ cifs_dbg(VFS, "smb buf %p len %u\n", smb,
++ server->ops->calc_smb_size(smb));
++ }
+ #endif /* CONFIG_CIFS_DEBUG2 */
+ }
+
+@@ -136,6 +138,11 @@ cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan)
+ {
+ struct TCP_Server_Info *server = chan->server;
+
++ if (!server) {
++ seq_printf(m, "\n\n\t\tChannel: %d DISABLED", i+1);
++ return;
++ }
++
+ seq_printf(m, "\n\n\t\tChannel: %d ConnectionId: 0x%llx"
+ "\n\t\tNumber of credits: %d,%d,%d Dialect 0x%x"
+ "\n\t\tTCP status: %d Instance: %d"
+@@ -243,6 +250,8 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ spin_lock(&tcon->open_file_lock);
+ list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+@@ -271,6 +280,24 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+ return 0;
+ }
+
++static __always_inline const char *compression_alg_str(__le16 alg)
++{
++ switch (alg) {
++ case SMB3_COMPRESS_NONE:
++ return "NONE";
++ case SMB3_COMPRESS_LZNT1:
++ return "LZNT1";
++ case SMB3_COMPRESS_LZ77:
++ return "LZ77";
++ case SMB3_COMPRESS_LZ77_HUFF:
++ return "LZ77-Huffman";
++ case SMB3_COMPRESS_PATTERN:
++ return "Pattern_V1";
++ default:
++ return "invalid";
++ }
++}
++
+ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ {
+ struct mid_q_entry *mid_entry;
+@@ -279,6 +306,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+ struct cifs_server_iface *iface;
++ size_t iface_weight = 0, iface_min_speed = 0;
++ struct cifs_server_iface *last_iface = NULL;
+ int c, i, j;
+
+ seq_puts(m,
+@@ -414,12 +443,6 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ server->echo_credits,
+ server->oplock_credits,
+ server->dialect);
+- if (server->compress_algorithm == SMB3_COMPRESS_LZNT1)
+- seq_printf(m, " COMPRESS_LZNT1");
+- else if (server->compress_algorithm == SMB3_COMPRESS_LZ77)
+- seq_printf(m, " COMPRESS_LZ77");
+- else if (server->compress_algorithm == SMB3_COMPRESS_LZ77_HUFF)
+- seq_printf(m, " COMPRESS_LZ77_HUFF");
+ if (server->sign)
+ seq_printf(m, " signed");
+ if (server->posix_ext_supported)
+@@ -427,6 +450,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ if (server->nosharesock)
+ seq_printf(m, " nosharesock");
+
++ seq_printf(m, "\nServer capabilities: 0x%x", server->capabilities);
++
+ if (server->rdma)
+ seq_printf(m, "\nRDMA ");
+ seq_printf(m, "\nTCP status: %d Instance: %d"
+@@ -449,9 +474,22 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ server->leaf_fullpath);
+ }
+
++ seq_puts(m, "\nCompression: ");
++ if (!server->compression.requested)
++ seq_puts(m, "disabled on mount");
++ else if (server->compression.enabled)
++ seq_printf(m, "enabled (%s)", compression_alg_str(server->compression.alg));
++ else
++ seq_puts(m, "disabled (not supported by this server)");
++
+ seq_printf(m, "\n\n\tSessions: ");
+ i = 0;
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ spin_lock(&ses->ses_lock);
++ if (ses->ses_status == SES_EXITING) {
++ spin_unlock(&ses->ses_lock);
++ continue;
++ }
+ i++;
+ if ((ses->serverDomain == NULL) ||
+ (ses->serverOS == NULL) ||
+@@ -472,6 +510,9 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ ses->ses_count, ses->serverOS, ses->serverNOS,
+ ses->capabilities, ses->ses_status);
+ }
++ if (ses->expired_pwd)
++ seq_puts(m, "password no longer valid ");
++ spin_unlock(&ses->ses_lock);
+
+ seq_printf(m, "\n\tSecurity type: %s ",
+ get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
+@@ -536,11 +577,25 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ "\tLast updated: %lu seconds ago",
+ ses->iface_count,
+ (jiffies - ses->iface_last_update) / HZ);
++
++ last_iface = list_last_entry(&ses->iface_list,
++ struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ j = 0;
+ list_for_each_entry(iface, &ses->iface_list,
+ iface_head) {
+ seq_printf(m, "\n\t%d)", ++j);
+ cifs_dump_iface(m, iface);
++
++ iface_weight = iface->speed / iface_min_speed;
++ seq_printf(m, "\t\tWeight (cur,total): (%zu,%zu)"
++ "\n\t\tAllocated channels: %u\n",
++ iface->weight_fulfilled,
++ iface_weight,
++ iface->num_channels);
++
+ if (is_ses_using_iface(ses, iface))
+ seq_puts(m, "\t\t[CONNECTED]\n");
+ }
+@@ -623,11 +678,14 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ }
+ #endif /* CONFIG_CIFS_STATS2 */
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ atomic_set(&tcon->num_smbs_sent, 0);
+ spin_lock(&tcon->stat_lock);
+ tcon->bytes_read = 0;
+ tcon->bytes_written = 0;
++ tcon->stats_from_time = ktime_get_real_seconds();
+ spin_unlock(&tcon->stat_lock);
+ if (server->ops->clear_stats)
+ server->ops->clear_stats(tcon);
+@@ -701,13 +759,16 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+ }
+ #endif /* STATS2 */
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ i++;
+ seq_printf(m, "\n%d) %s", i, tcon->tree_name);
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
+- seq_printf(m, "\nSMBs: %d",
+- atomic_read(&tcon->num_smbs_sent));
++ seq_printf(m, "\nSMBs: %d since %ptTs UTC",
++ atomic_read(&tcon->num_smbs_sent),
++ &tcon->stats_from_time);
+ if (server->ops->print_stats)
+ server->ops->print_stats(m, tcon);
+ }
+@@ -738,14 +799,14 @@ static ssize_t name##_write(struct file *file, const char __user *buffer, \
+ size_t count, loff_t *ppos) \
+ { \
+ int rc; \
+- rc = kstrtoint_from_user(buffer, count, 10, & name); \
++ rc = kstrtoint_from_user(buffer, count, 10, &name); \
+ if (rc) \
+ return rc; \
+ return count; \
+ } \
+ static int name##_proc_show(struct seq_file *m, void *v) \
+ { \
+- seq_printf(m, "%d\n", name ); \
++ seq_printf(m, "%d\n", name); \
+ return 0; \
+ } \
+ static int name##_open(struct inode *inode, struct file *file) \
+@@ -1011,7 +1072,7 @@ static int cifs_security_flags_proc_open(struct inode *inode, struct file *file)
+ static void
+ cifs_security_flags_handle_must_flags(unsigned int *flags)
+ {
+- unsigned int signflags = *flags & CIFSSEC_MUST_SIGN;
++ unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL);
+
+ if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
+ *flags = CIFSSEC_MUST_KRB5;
+diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
+index 332588e77c311f..26327442e383b1 100644
+--- a/fs/smb/client/cifs_ioctl.h
++++ b/fs/smb/client/cifs_ioctl.h
+@@ -26,6 +26,11 @@ struct smb_mnt_fs_info {
+ __u64 cifs_posix_caps;
+ } __packed;
+
++struct smb_mnt_tcon_info {
++ __u32 tid;
++ __u64 session_id;
++} __packed;
++
+ struct smb_snapshot_array {
+ __u32 number_of_snapshots;
+ __u32 number_of_snapshots_returned;
+@@ -108,6 +113,7 @@ struct smb3_notify_info {
+ #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
+ #define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+ #define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
++#define CIFS_IOC_GET_TCON_INFO _IOR(CIFS_IOCTL_MAGIC, 12, struct smb_mnt_tcon_info)
+ #define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+ /*
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+index 6f3285f1dfee58..af7849e5974ff3 100644
+--- a/fs/smb/client/cifs_spnego.c
++++ b/fs/smb/client/cifs_spnego.c
+@@ -64,8 +64,8 @@ struct key_type cifs_spnego_key_type = {
+ * strlen(";sec=ntlmsspi") */
+ #define MAX_MECH_STR_LEN 13
+
+-/* strlen of "host=" */
+-#define HOST_KEY_LEN 5
++/* strlen of ";host=" */
++#define HOST_KEY_LEN 6
+
+ /* strlen of ";ip4=" or ";ip6=" */
+ #define IP_KEY_LEN 5
+diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
+index ef4c2e3c9fa613..b0473c2567fe68 100644
+--- a/fs/smb/client/cifsencrypt.c
++++ b/fs/smb/client/cifsencrypt.c
+@@ -129,7 +129,7 @@ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize,
+ for (j = foffset / PAGE_SIZE; j < npages; j++) {
+ len = min_t(size_t, maxsize, PAGE_SIZE - offset);
+ p = kmap_local_page(folio_page(folio, j));
+- ret = crypto_shash_update(shash, p, len);
++ ret = crypto_shash_update(shash, p + offset, len);
+ kunmap_local(p);
+ if (ret < 0)
+ return ret;
+@@ -572,7 +572,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
+ UniStrupr(user);
+ } else {
+- memset(user, '\0', 2);
++ *(u16 *)user = 0;
+ }
+
+ rc = crypto_shash_update(ses->server->secmech.hmacmd5,
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 22869cda13565e..2d9f8bdb6d4efe 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -133,7 +133,7 @@ module_param(enable_oplocks, bool, 0644);
+ MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
+
+ module_param(enable_gcm_256, bool, 0644);
+-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
++MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
+
+ module_param(require_gcm_256, bool, 0644);
+ MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
+@@ -150,15 +150,12 @@ MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
+ "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
+ " and less secure. Default: n/N/0");
+
+-extern mempool_t *cifs_sm_req_poolp;
+-extern mempool_t *cifs_req_poolp;
+-extern mempool_t *cifs_mid_poolp;
+-
+ struct workqueue_struct *cifsiod_wq;
+ struct workqueue_struct *decrypt_wq;
+ struct workqueue_struct *fileinfo_put_wq;
+ struct workqueue_struct *cifsoplockd_wq;
+ struct workqueue_struct *deferredclose_wq;
++struct workqueue_struct *serverclose_wq;
+ __u32 cifs_lock_secret;
+
+ /*
+@@ -315,8 +312,17 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ struct TCP_Server_Info *server = tcon->ses->server;
+ unsigned int xid;
+ int rc = 0;
++ const char *full_path;
++ void *page;
+
+ xid = get_xid();
++ page = alloc_dentry_path();
++
++ full_path = build_path_from_dentry(dentry, page);
++ if (IS_ERR(full_path)) {
++ rc = PTR_ERR(full_path);
++ goto statfs_out;
++ }
+
+ if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
+ buf->f_namelen =
+@@ -332,8 +338,10 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ buf->f_ffree = 0; /* unlimited */
+
+ if (server->ops->queryfs)
+- rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
++ rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
+
++statfs_out:
++ free_dentry_path(page);
+ free_xid(xid);
+ return rc;
+ }
+@@ -391,6 +399,7 @@ cifs_alloc_inode(struct super_block *sb)
+ * server, can not assume caching of file data or metadata.
+ */
+ cifs_set_oplock_level(cifs_inode, 0);
++ cifs_inode->lease_granted = false;
+ cifs_inode->flags = 0;
+ spin_lock_init(&cifs_inode->writers_lock);
+ cifs_inode->writers = 0;
+@@ -672,6 +681,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ seq_printf(s, ",backupgid=%u",
+ from_kgid_munged(&init_user_ns,
+ cifs_sb->ctx->backupgid));
++ seq_show_option(s, "reparse",
++ cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
+
+ seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
+ seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
+@@ -680,6 +691,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
+ if (tcon->ses->server->min_offload)
+ seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
++ if (tcon->ses->server->retrans)
++ seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
+ seq_printf(s, ",echo_interval=%lu",
+ tcon->ses->server->echo_interval / HZ);
+
+@@ -737,6 +750,8 @@ static void cifs_umount_begin(struct super_block *sb)
+
+ spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_see_umount);
+ if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
+ /* we have other mounts to same share or we have
+ already tried to umount this and woken up
+@@ -1191,36 +1206,108 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
+
+ const struct inode_operations cifs_symlink_inode_ops = {
+ .get_link = cifs_get_link,
++ .setattr = cifs_setattr,
+ .permission = cifs_permission,
+ .listxattr = cifs_listxattr,
+ };
+
++/*
++ * Advance the EOF marker to after the source range.
++ */
++static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
++ struct cifs_tcon *src_tcon,
++ unsigned int xid, loff_t src_end)
++{
++ struct cifsFileInfo *writeable_srcfile;
++ int rc = -EINVAL;
++
++ writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
++ if (writeable_srcfile) {
++ if (src_tcon->ses->server->ops->set_file_size)
++ rc = src_tcon->ses->server->ops->set_file_size(
++ xid, src_tcon, writeable_srcfile,
++ src_inode->i_size, true /* no need to set sparse */);
++ else
++ rc = -ENOSYS;
++ cifsFileInfo_put(writeable_srcfile);
++ cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
++ }
++
++ if (rc < 0)
++ goto set_failed;
++
++ netfs_resize_file(&src_cifsi->netfs, src_end);
++ fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
++ return 0;
++
++set_failed:
++ return filemap_write_and_wait(src_inode->i_mapping);
++}
++
++/*
++ * Flush out either the folio that overlaps the beginning of a range in which
++ * pos resides or the folio that overlaps the end of a range unless that folio
++ * is entirely within the range we're going to invalidate. We extend the flush
++ * bounds to encompass the folio.
++ */
++static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
++ bool first)
++{
++ struct folio *folio;
++ unsigned long long fpos, fend;
++ pgoff_t index = pos / PAGE_SIZE;
++ size_t size;
++ int rc = 0;
++
++ folio = filemap_get_folio(inode->i_mapping, index);
++ if (IS_ERR(folio))
++ return 0;
++
++ size = folio_size(folio);
++ fpos = folio_pos(folio);
++ fend = fpos + size - 1;
++ *_fstart = min_t(unsigned long long, *_fstart, fpos);
++ *_fend = max_t(unsigned long long, *_fend, fend);
++ if ((first && pos == fpos) || (!first && pos == fend))
++ goto out;
++
++ rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
++out:
++ folio_put(folio);
++ return rc;
++}
++
+ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff, loff_t len,
+ unsigned int remap_flags)
+ {
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *target_inode = file_inode(dst_file);
++ struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
++ struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
+ struct cifsFileInfo *smb_file_src = src_file->private_data;
+- struct cifsFileInfo *smb_file_target;
+- struct cifs_tcon *target_tcon;
++ struct cifsFileInfo *smb_file_target = dst_file->private_data;
++ struct cifs_tcon *target_tcon, *src_tcon;
++ unsigned long long destend, fstart, fend, new_size;
+ unsigned int xid;
+ int rc;
+
+- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
++ if (remap_flags & REMAP_FILE_DEDUP)
++ return -EOPNOTSUPP;
++ if (remap_flags & ~REMAP_FILE_ADVISORY)
+ return -EINVAL;
+
+ cifs_dbg(FYI, "clone range\n");
+
+ xid = get_xid();
+
+- if (!src_file->private_data || !dst_file->private_data) {
++ if (!smb_file_src || !smb_file_target) {
+ rc = -EBADF;
+ cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+ goto out;
+ }
+
+- smb_file_target = dst_file->private_data;
++ src_tcon = tlink_tcon(smb_file_src->tlink);
+ target_tcon = tlink_tcon(smb_file_target->tlink);
+
+ /*
+@@ -1233,20 +1320,63 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ if (len == 0)
+ len = src_inode->i_size - off;
+
+- cifs_dbg(FYI, "about to flush pages\n");
+- /* should we flush first and last page first */
+- truncate_inode_pages_range(&target_inode->i_data, destoff,
+- PAGE_ALIGN(destoff + len)-1);
++ cifs_dbg(FYI, "clone range\n");
++
++ /* Flush the source buffer */
++ rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
++ off + len - 1);
++ if (rc)
++ goto unlock;
+
+- if (target_tcon->ses->server->ops->duplicate_extents)
++ /* The server-side copy will fail if the source crosses the EOF marker.
++ * Advance the EOF marker after the flush above to the end of the range
++ * if it's short of that.
++ */
++ if (src_cifsi->netfs.remote_i_size < off + len) {
++ rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
++ if (rc < 0)
++ goto unlock;
++ }
++
++ new_size = destoff + len;
++ destend = destoff + len - 1;
++
++ /* Flush the folios at either end of the destination range to prevent
++ * accidental loss of dirty data outside of the range.
++ */
++ fstart = destoff;
++ fend = destend;
++
++ rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
++ if (rc)
++ goto unlock;
++ rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
++ if (rc)
++ goto unlock;
++
++ /* Discard all the folios that overlap the destination region. */
++ cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
++ truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
++
++ fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
++ i_size_read(target_inode), 0);
++
++ rc = -EOPNOTSUPP;
++ if (target_tcon->ses->server->ops->duplicate_extents) {
+ rc = target_tcon->ses->server->ops->duplicate_extents(xid,
+ smb_file_src, smb_file_target, off, len, destoff);
+- else
+- rc = -EOPNOTSUPP;
++ if (rc == 0 && new_size > i_size_read(target_inode)) {
++ truncate_setsize(target_inode, new_size);
++ netfs_resize_file(&target_cifsi->netfs, new_size);
++ fscache_resize_cookie(cifs_inode_cookie(target_inode),
++ new_size);
++ }
++ }
+
+ /* force revalidate of size and timestamps of target file now
+ that target is updated on the server */
+ CIFS_I(target_inode)->time = 0;
++unlock:
+ /* although unlocking in the reverse order from locking is not
+ strictly necessary here it is a little cleaner to be consistent */
+ unlock_two_nondirectories(src_inode, target_inode);
+@@ -1262,10 +1392,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ {
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *target_inode = file_inode(dst_file);
++ struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
+ struct cifsFileInfo *smb_file_src;
+ struct cifsFileInfo *smb_file_target;
+ struct cifs_tcon *src_tcon;
+ struct cifs_tcon *target_tcon;
++ unsigned long long destend, fstart, fend;
+ ssize_t rc;
+
+ cifs_dbg(FYI, "copychunk range\n");
+@@ -1283,7 +1415,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ target_tcon = tlink_tcon(smb_file_target->tlink);
+
+ if (src_tcon->ses != target_tcon->ses) {
+- cifs_dbg(VFS, "source and target of copy not on same server\n");
++ cifs_dbg(FYI, "source and target of copy not on same server\n");
+ goto out;
+ }
+
+@@ -1305,13 +1437,41 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ if (rc)
+ goto unlock;
+
+- /* should we flush first and last page first */
+- truncate_inode_pages(&target_inode->i_data, 0);
++ /* The server-side copy will fail if the source crosses the EOF marker.
++ * Advance the EOF marker after the flush above to the end of the range
++ * if it's short of that.
++ */
++ if (src_cifsi->server_eof < off + len) {
++ rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
++ if (rc < 0)
++ goto unlock;
++ }
++
++ destend = destoff + len - 1;
++
++ /* Flush the folios at either end of the destination range to prevent
++ * accidental loss of dirty data outside of the range.
++ */
++ fstart = destoff;
++ fend = destend;
++
++ rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
++ if (rc)
++ goto unlock;
++ rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
++ if (rc)
++ goto unlock;
++
++ /* Discard all the folios that overlap the destination region. */
++ truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
+
+ rc = file_modified(dst_file);
+- if (!rc)
++ if (!rc) {
+ rc = target_tcon->ses->server->ops->copychunk_range(xid,
+ smb_file_src, smb_file_target, off, len, destoff);
++ if (rc > 0 && destoff + rc > i_size_read(target_inode))
++ truncate_setsize(target_inode, destoff + rc);
++ }
+
+ file_accessed(src_file);
+
+@@ -1732,9 +1892,16 @@ init_cifs(void)
+ goto out_destroy_cifsoplockd_wq;
+ }
+
++ serverclose_wq = alloc_workqueue("serverclose",
++ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++ if (!serverclose_wq) {
++ rc = -ENOMEM;
++ goto out_destroy_deferredclose_wq;
++ }
++
+ rc = cifs_init_inodecache();
+ if (rc)
+- goto out_destroy_deferredclose_wq;
++ goto out_destroy_serverclose_wq;
+
+ rc = init_mids();
+ if (rc)
+@@ -1796,6 +1963,8 @@ init_cifs(void)
+ destroy_mids();
+ out_destroy_inodecache:
+ cifs_destroy_inodecache();
++out_destroy_serverclose_wq:
++ destroy_workqueue(serverclose_wq);
+ out_destroy_deferredclose_wq:
+ destroy_workqueue(deferredclose_wq);
+ out_destroy_cifsoplockd_wq:
+@@ -1835,6 +2004,7 @@ exit_cifs(void)
+ destroy_workqueue(cifsoplockd_wq);
+ destroy_workqueue(decrypt_wq);
+ destroy_workqueue(fileinfo_put_wq);
++ destroy_workqueue(serverclose_wq);
+ destroy_workqueue(cifsiod_wq);
+ cifs_proc_clean();
+ }
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 02082621d8e07a..111540eff66e7d 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -49,6 +49,11 @@
+ */
+ #define CIFS_DEF_ACTIMEO (1 * HZ)
+
++/*
++ * max sleep time before retry to server
++ */
++#define CIFS_MAX_SLEEP 2000
++
+ /*
+ * max attribute cache timeout (jiffies) - 2^30
+ */
+@@ -82,7 +87,7 @@
+ #define SMB_INTERFACE_POLL_INTERVAL 600
+
+ /* maximum number of PDUs in one compound */
+-#define MAX_COMPOUND 5
++#define MAX_COMPOUND 7
+
+ /*
+ * Default number of credits to keep available for SMB3.
+@@ -148,6 +153,24 @@ enum securityEnum {
+ Kerberos, /* Kerberos via SPNEGO */
+ };
+
++enum cifs_reparse_type {
++ CIFS_REPARSE_TYPE_NFS,
++ CIFS_REPARSE_TYPE_WSL,
++ CIFS_REPARSE_TYPE_DEFAULT = CIFS_REPARSE_TYPE_NFS,
++};
++
++static inline const char *cifs_reparse_type_str(enum cifs_reparse_type type)
++{
++ switch (type) {
++ case CIFS_REPARSE_TYPE_NFS:
++ return "nfs";
++ case CIFS_REPARSE_TYPE_WSL:
++ return "wsl";
++ default:
++ return "unknown";
++ }
++}
++
+ struct session_key {
+ unsigned int len;
+ char *response;
+@@ -191,23 +214,31 @@ struct cifs_open_info_data {
+ bool reparse_point;
+ bool symlink;
+ };
+- __u32 reparse_tag;
++ struct {
++ /* ioctl response buffer */
++ struct {
++ int buftype;
++ struct kvec iov;
++ } io;
++ __u32 tag;
++ union {
++ struct reparse_data_buffer *buf;
++ struct reparse_posix_data *posix;
++ };
++ } reparse;
++ struct {
++ __u8 eas[SMB2_WSL_MAX_QUERY_EA_RESP_SIZE];
++ unsigned int eas_len;
++ } wsl;
+ char *symlink_target;
++ struct cifs_sid posix_owner;
++ struct cifs_sid posix_group;
+ union {
+ struct smb2_file_all_info fi;
+ struct smb311_posix_qinfo posix_fi;
+ };
+ };
+
+-#define cifs_open_data_reparse(d) \
+- ((d)->reparse_point || \
+- (le32_to_cpu((d)->fi.Attributes) & ATTR_REPARSE))
+-
+-static inline void cifs_free_open_info(struct cifs_open_info_data *data)
+-{
+- kfree(data->symlink_target);
+-}
+-
+ /*
+ *****************************************************************
+ * Except the CIFS PDUs themselves all the
+@@ -324,6 +355,9 @@ struct smb_version_operations {
+ /* informational QFS call */
+ void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
+ struct cifs_sb_info *);
++ /* query for server interfaces */
++ int (*query_server_interfaces)(const unsigned int, struct cifs_tcon *,
++ bool);
+ /* check if a path is accessible or not */
+ int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
+ struct cifs_sb_info *, const char *);
+@@ -349,7 +383,8 @@ struct smb_version_operations {
+ struct cifs_open_info_data *data);
+ /* set size by path */
+ int (*set_path_size)(const unsigned int, struct cifs_tcon *,
+- const char *, __u64, struct cifs_sb_info *, bool);
++ const char *, __u64, struct cifs_sb_info *, bool,
++ struct dentry *);
+ /* set size by file handle */
+ int (*set_file_size)(const unsigned int, struct cifs_tcon *,
+ struct cifsFileInfo *, __u64, bool);
+@@ -379,34 +414,38 @@ struct smb_version_operations {
+ struct cifs_sb_info *);
+ /* unlink file */
+ int (*unlink)(const unsigned int, struct cifs_tcon *, const char *,
+- struct cifs_sb_info *);
++ struct cifs_sb_info *, struct dentry *);
+ /* open, rename and delete file */
+ int (*rename_pending_delete)(const char *, struct dentry *,
+ const unsigned int);
+ /* send rename request */
+- int (*rename)(const unsigned int, struct cifs_tcon *, const char *,
+- const char *, struct cifs_sb_info *);
++ int (*rename)(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb);
+ /* send create hardlink request */
+- int (*create_hardlink)(const unsigned int, struct cifs_tcon *,
+- const char *, const char *,
+- struct cifs_sb_info *);
++ int (*create_hardlink)(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb);
+ /* query symlink target */
+ int (*query_symlink)(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ const char *full_path,
+- char **target_path,
+- struct kvec *rsp_iov);
++ char **target_path);
+ /* open a file for non-posix mounts */
+ int (*open)(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+ void *buf);
+ /* set fid protocol-specific info */
+ void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
+ /* close a file */
+- void (*close)(const unsigned int, struct cifs_tcon *,
++ int (*close)(const unsigned int, struct cifs_tcon *,
+ struct cifs_fid *);
+ /* close a file, returning file attributes and timestamps */
+- void (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
++ int (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *pfile_info);
+ /* send a flush request to the server */
+ int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
+@@ -446,7 +485,7 @@ struct smb_version_operations {
+ __u16 net_fid, struct cifsInodeInfo *cifs_inode);
+ /* query remote filesystem */
+ int (*queryfs)(const unsigned int, struct cifs_tcon *,
+- struct cifs_sb_info *, struct kstatfs *);
++ const char *, struct cifs_sb_info *, struct kstatfs *);
+ /* send mandatory brlock to the server */
+ int (*mand_lock)(const unsigned int, struct cifsFileInfo *, __u64,
+ __u64, __u32, int, int, bool);
+@@ -527,7 +566,8 @@ struct smb_version_operations {
+ struct mid_q_entry **, char **, int *);
+ enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
+ enum securityEnum);
+- int (*next_header)(char *);
++ int (*next_header)(struct TCP_Server_Info *server, char *buf,
++ unsigned int *noff);
+ /* ioctl passthrough for query_info */
+ int (*ioctl_query_info)(const unsigned int xid,
+ struct cifs_tcon *tcon,
+@@ -551,6 +591,15 @@ struct smb_version_operations {
+ bool (*is_status_io_timeout)(char *buf);
+ /* Check for STATUS_NETWORK_NAME_DELETED */
+ bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
++ int (*parse_reparse_point)(struct cifs_sb_info *cifs_sb,
++ struct kvec *rsp_iov,
++ struct cifs_open_info_data *data);
++ int (*create_reparse_symlink)(const unsigned int xid,
++ struct inode *inode,
++ struct dentry *dentry,
++ struct cifs_tcon *tcon,
++ const char *full_path,
++ const char *symname);
+ };
+
+ struct smb_version_values {
+@@ -650,6 +699,7 @@ struct TCP_Server_Info {
+ bool noautotune; /* do not autotune send buf sizes */
+ bool nosharesock;
+ bool tcp_nodelay;
++ bool terminate;
+ unsigned int credits; /* send no more requests at once */
+ unsigned int max_credits; /* can override large 32000 default at mnt */
+ unsigned int in_flight; /* number of requests on the wire to server */
+@@ -721,7 +771,12 @@ struct TCP_Server_Info {
+ unsigned int max_read;
+ unsigned int max_write;
+ unsigned int min_offload;
+- __le16 compress_algorithm;
++ unsigned int retrans;
++ struct {
++ bool requested; /* "compress" mount option set*/
++ bool enabled; /* actually negotiated with server */
++ __le16 alg; /* preferred alg negotiated with server */
++ } compression;
+ __u16 signing_algorithm;
+ __le16 cipher_type;
+ /* save initital negprot hash */
+@@ -969,6 +1024,8 @@ struct cifs_server_iface {
+ struct list_head iface_head;
+ struct kref refcount;
+ size_t speed;
++ size_t weight_fulfilled;
++ unsigned int num_channels;
+ unsigned int rdma_capable : 1;
+ unsigned int rss_capable : 1;
+ unsigned int is_active : 1; /* unset if non existent */
+@@ -982,7 +1039,6 @@ release_iface(struct kref *ref)
+ struct cifs_server_iface *iface = container_of(ref,
+ struct cifs_server_iface,
+ refcount);
+- list_del_init(&iface->iface_head);
+ kfree(iface);
+ }
+
+@@ -993,6 +1049,8 @@ struct cifs_chan {
+ __u8 signkey[SMB3_SIGN_KEY_SIZE];
+ };
+
++#define CIFS_SES_FLAG_SCALE_CHANNELS (0x1)
++
+ /*
+ * Session structure. One of these for each uid session with a particular host
+ */
+@@ -1019,12 +1077,15 @@ struct cifs_ses {
+ and after mount option parsing we fill it */
+ char *domainName;
+ char *password;
++ char *password2; /* When key rotation used, new password may be set before it expires */
+ char workstation_name[CIFS_MAX_WORKSTATION_LEN];
+ struct session_key auth_key;
+ struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
+ enum securityEnum sectype; /* what security flavor was specified? */
+ bool sign; /* is signing required? */
+ bool domainAuto:1;
++ bool expired_pwd; /* track if access denied or expired pwd so can know if need to update */
++ unsigned int flags;
+ __u16 session_flags;
+ __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
+ __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+@@ -1050,6 +1111,7 @@ struct cifs_ses {
+ spinlock_t chan_lock;
+ /* ========= begin: protected by chan_lock ======== */
+ #define CIFS_MAX_CHANNELS 16
++#define CIFS_INVAL_CHAN_INDEX (-1)
+ #define CIFS_ALL_CHANNELS_SET(ses) \
+ ((1UL << (ses)->chan_count) - 1)
+ #define CIFS_ALL_CHANS_GOOD(ses) \
+@@ -1128,6 +1190,7 @@ struct cifs_fattr {
+ */
+ struct cifs_tcon {
+ struct list_head tcon_list;
++ int debug_id; /* Debugging for tracing */
+ int tc_count;
+ struct list_head rlist; /* reconnect list */
+ spinlock_t tc_lock; /* protect anything here that is not protected */
+@@ -1175,6 +1238,7 @@ struct cifs_tcon {
+ __u64 bytes_read;
+ __u64 bytes_written;
+ spinlock_t stat_lock; /* protects the two fields above */
++ time64_t stats_from_time;
+ FILE_SYSTEM_DEVICE_INFO fsDevInfo;
+ FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
+ FILE_SYSTEM_UNIX_INFO fsUnixInfo;
+@@ -1213,13 +1277,14 @@ struct cifs_tcon {
+ __u32 max_cached_dirs;
+ #ifdef CONFIG_CIFS_FSCACHE
+ u64 resource_id; /* server resource id */
++ bool fscache_acquired; /* T if we've tried acquiring a cookie */
+ struct fscache_volume *fscache; /* cookie for share */
++ struct mutex fscache_lock; /* Prevent regetting a cookie */
+ #endif
+ struct list_head pending_opens; /* list of incomplete opens */
+ struct cached_fids *cfids;
+ /* BB add field for back pointer to sb struct(s)? */
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+- struct list_head dfs_ses_list;
+ struct delayed_work dfs_cache_work;
+ #endif
+ struct delayed_work query_interfaces; /* query interfaces workqueue job */
+@@ -1334,6 +1399,8 @@ struct cifs_open_parms {
+ struct cifs_fid *fid;
+ umode_t mode;
+ bool reconnect:1;
++ bool replay:1; /* indicates that this open is for a replay */
++ struct kvec *ea_cctx;
+ };
+
+ struct cifs_fid {
+@@ -1375,6 +1442,8 @@ struct cifsFileInfo {
+ bool invalidHandle:1; /* file closed via session abend */
+ bool swapfile:1;
+ bool oplock_break_cancelled:1;
++ bool status_file_deleted:1; /* file has been deleted */
++ bool offload:1; /* offload final part of _put to a wq */
+ unsigned int oplock_epoch; /* epoch from the lease break */
+ __u32 oplock_level; /* oplock/lease level from the lease break */
+ int count;
+@@ -1383,6 +1452,7 @@ struct cifsFileInfo {
+ struct cifs_search_info srch_inf;
+ struct work_struct oplock_break; /* work for oplock breaks */
+ struct work_struct put; /* work for the final part of _put */
++ struct work_struct serverclose; /* work for serverclose */
+ struct delayed_work deferred;
+ bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
+ char *symlink_target;
+@@ -1465,6 +1535,7 @@ struct cifs_writedata {
+ struct smbd_mr *mr;
+ #endif
+ struct cifs_credits credits;
++ bool replay;
+ };
+
+ /*
+@@ -1533,6 +1604,7 @@ struct cifsInodeInfo {
+ spinlock_t deferred_lock; /* protection on deferred list */
+ bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
+ char *symlink_target;
++ __u32 reparse_tag;
+ };
+
+ static inline struct cifsInodeInfo *
+@@ -1738,7 +1810,6 @@ struct cifs_mount_ctx {
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+- struct list_head dfs_ses_list;
+ };
+
+ static inline void __free_dfs_info_param(struct dfs_info3_param *param)
+@@ -1794,6 +1865,13 @@ static inline bool is_retryable_error(int error)
+ return false;
+ }
+
++static inline bool is_replayable_error(int error)
++{
++ if (error == -EAGAIN || error == -ECONNABORTED)
++ return true;
++ return false;
++}
++
+
+ /* cifs_get_writable_file() flags */
+ #define FIND_WR_ANY 0
+@@ -1844,7 +1922,7 @@ static inline bool is_retryable_error(int error)
+ #define CIFSSEC_MAY_SIGN 0x00001
+ #define CIFSSEC_MAY_NTLMV2 0x00004
+ #define CIFSSEC_MAY_KRB5 0x00008
+-#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */
++#define CIFSSEC_MAY_SEAL 0x00040
+ #define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */
+
+ #define CIFSSEC_MUST_SIGN 0x01001
+@@ -1854,15 +1932,15 @@ require use of the stronger protocol */
+ #define CIFSSEC_MUST_NTLMV2 0x04004
+ #define CIFSSEC_MUST_KRB5 0x08008
+ #ifdef CONFIG_CIFS_UPCALL
+-#define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */
++#define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */
+ #else
+-#define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */
++#define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */
+ #endif /* UPCALL */
+-#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
++#define CIFSSEC_MUST_SEAL 0x40040
+ #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */
+
+-#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP)
+-#define CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2)
++#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
++#define CIFSSEC_MAX (CIFSSEC_MAY_SIGN | CIFSSEC_MUST_KRB5 | CIFSSEC_MAY_SEAL)
+ #define CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
+ /*
+ *****************************************************************
+@@ -2032,8 +2110,11 @@ extern struct workqueue_struct *decrypt_wq;
+ extern struct workqueue_struct *fileinfo_put_wq;
+ extern struct workqueue_struct *cifsoplockd_wq;
+ extern struct workqueue_struct *deferredclose_wq;
++extern struct workqueue_struct *serverclose_wq;
+ extern __u32 cifs_lock_secret;
+
++extern mempool_t *cifs_sm_req_poolp;
++extern mempool_t *cifs_req_poolp;
+ extern mempool_t *cifs_mid_poolp;
+
+ /* Operations for different SMB versions */
+@@ -2143,6 +2224,7 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ unsigned int len, skip;
+ unsigned int nents = 0;
+ unsigned long addr;
++ size_t data_size;
+ int i, j;
+
+ /*
+@@ -2158,17 +2240,21 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+ */
+ for (i = 0; i < num_rqst; i++) {
++ data_size = iov_iter_count(&rqst[i].rq_iter);
++
+ /* We really don't want a mixture of pinned and unpinned pages
+ * in the sglist. It's hard to keep track of which is what.
+ * Instead, we convert to a BVEC-type iterator higher up.
+ */
+- if (WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
++ if (data_size &&
++ WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
+ return -EIO;
+
+ /* We also don't want to have any extra refs or pins to clean
+ * up in the sglist.
+ */
+- if (WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
++ if (data_size &&
++ WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
+ return -EIO;
+
+ for (j = 0; j < rqst[i].rq_nvec; j++) {
+@@ -2184,7 +2270,8 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ }
+ skip = 0;
+ }
+- nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
++ if (data_size)
++ nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
+ }
+ nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
+ return nents;
+@@ -2218,10 +2305,21 @@ static inline void cifs_sg_set_buf(struct sg_table *sgtable,
+ }
+ }
+
++#define CIFS_OPARMS(_cifs_sb, _tcon, _path, _da, _cd, _co, _mode) \
++ ((struct cifs_open_parms) { \
++ .tcon = _tcon, \
++ .path = _path, \
++ .desired_access = (_da), \
++ .disposition = (_cd), \
++ .create_options = cifs_create_options(_cifs_sb, (_co)), \
++ .mode = (_mode), \
++ .cifs_sb = _cifs_sb, \
++ })
++
+ struct smb2_compound_vars {
+ struct cifs_open_parms oparms;
+- struct kvec rsp_iov[3];
+- struct smb_rqst rqst[3];
++ struct kvec rsp_iov[MAX_COMPOUND];
++ struct smb_rqst rqst[MAX_COMPOUND];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov;
+ struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+@@ -2229,6 +2327,17 @@ struct smb2_compound_vars {
+ struct kvec close_iov;
+ struct smb2_file_rename_info rename_info;
+ struct smb2_file_link_info link_info;
++ struct kvec ea_iov;
+ };
+
++static inline bool cifs_ses_exiting(struct cifs_ses *ses)
++{
++ bool ret;
++
++ spin_lock(&ses->ses_lock);
++ ret = ses->ses_status == SES_EXITING;
++ spin_unlock(&ses->ses_lock);
++ return ret;
++}
++
+ #endif /* _CIFS_GLOB_H */
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index e17222fec9d290..c46d418c1c0c3e 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -882,11 +882,13 @@ typedef struct smb_com_open_rsp {
+ __u8 OplockLevel;
+ __u16 Fid;
+ __le32 CreateAction;
+- __le64 CreationTime;
+- __le64 LastAccessTime;
+- __le64 LastWriteTime;
+- __le64 ChangeTime;
+- __le32 FileAttributes;
++ struct_group_attr(common_attributes, __packed,
++ __le64 CreationTime;
++ __le64 LastAccessTime;
++ __le64 LastWriteTime;
++ __le64 ChangeTime;
++ __le32 FileAttributes;
++ );
+ __le64 AllocationSize;
+ __le64 EndOfFile;
+ __le16 FileType;
+@@ -1356,7 +1358,7 @@ typedef struct smb_com_transaction_ioctl_rsp {
+ __le32 DataDisplacement;
+ __u8 SetupCount; /* 1 */
+ __le16 ReturnedDataLen;
+- __u16 ByteCount;
++ __le16 ByteCount;
+ } __attribute__((packed)) TRANSACT_IOCTL_RSP;
+
+ #define CIFS_ACL_OWNER 1
+@@ -1509,7 +1511,7 @@ struct reparse_posix_data {
+ __le16 ReparseDataLength;
+ __u16 Reserved;
+ __le64 InodeType; /* LNK, FIFO, CHR etc. */
+- char PathBuffer[];
++ __u8 DataBuffer[];
+ } __attribute__((packed));
+
+ struct cifs_quota_data {
+@@ -2264,11 +2266,13 @@ typedef struct {
+ /* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
+ /******************************************************************************/
+ typedef struct { /* data block encoding of response to level 263 QPathInfo */
+- __le64 CreationTime;
+- __le64 LastAccessTime;
+- __le64 LastWriteTime;
+- __le64 ChangeTime;
+- __le32 Attributes;
++ struct_group_attr(common_attributes, __packed,
++ __le64 CreationTime;
++ __le64 LastAccessTime;
++ __le64 LastWriteTime;
++ __le64 ChangeTime;
++ __le32 Attributes;
++ );
+ __u32 Pad1;
+ __le64 AllocationSize;
+ __le64 EndOfFile; /* size ie offset to first free byte in file */
+@@ -2570,7 +2574,7 @@ typedef struct {
+
+
+ struct win_dev {
+- unsigned char type[8]; /* IntxCHR or IntxBLK */
++ unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+ __le64 major;
+ __le64 minor;
+ } __attribute__((packed));
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 0c37eefa18a57c..fbc358c09da3b1 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -81,7 +81,7 @@ extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
+ extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
+ char *cifs_build_devname(char *nodename, const char *prepath);
+ extern void delete_mid(struct mid_q_entry *mid);
+-extern void release_mid(struct mid_q_entry *mid);
++void __release_mid(struct kref *refcount);
+ extern void cifs_wake_up_task(struct mid_q_entry *mid);
+ extern int cifs_handle_standard(struct TCP_Server_Info *server,
+ struct mid_q_entry *mid);
+@@ -132,6 +132,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
+ struct smb_hdr *in_buf,
+ struct smb_hdr *out_buf,
+ int *bytes_returned);
++
+ void
+ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ bool all_channels);
+@@ -143,7 +144,8 @@ extern int cifs_reconnect(struct TCP_Server_Info *server,
+ extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr);
+ extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
+ extern bool backup_cred(struct cifs_sb_info *);
+-extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
++extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof,
++ bool from_readdir);
+ extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
+ unsigned int bytes_written);
+ extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
+@@ -200,18 +202,19 @@ extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
+ struct cifs_sb_info *cifs_sb);
+ extern void cifs_dir_info_to_fattr(struct cifs_fattr *, FILE_DIRECTORY_INFO *,
+ struct cifs_sb_info *);
+-extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
++extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
++ bool from_readdir);
+ extern struct inode *cifs_iget(struct super_block *sb,
+ struct cifs_fattr *fattr);
+
+ int cifs_get_inode_info(struct inode **inode, const char *full_path,
+ struct cifs_open_info_data *data, struct super_block *sb, int xid,
+ const struct cifs_fid *fid);
+-bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
+- struct cifs_fattr *fattr,
+- u32 tag);
+-extern int smb311_posix_get_inode_info(struct inode **pinode, const char *search_path,
+- struct super_block *sb, unsigned int xid);
++extern int smb311_posix_get_inode_info(struct inode **inode,
++ const char *full_path,
++ struct cifs_open_info_data *data,
++ struct super_block *sb,
++ const unsigned int xid);
+ extern int cifs_get_inode_info_unix(struct inode **pinode,
+ const unsigned char *search_path,
+ struct super_block *sb, unsigned int xid);
+@@ -291,12 +294,16 @@ extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+
+ extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+ const char *path);
++
++extern void cifs_mark_open_handles_for_deleted_file(struct inode *inode,
++ const char *path);
++
+ extern struct TCP_Server_Info *
+ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ struct TCP_Server_Info *primary_server);
+ extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
+ int from_reconnect);
+-extern void cifs_put_tcon(struct cifs_tcon *tcon);
++extern void cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace);
+
+ extern void cifs_release_automount_timer(void);
+
+@@ -397,7 +404,8 @@ extern int CIFSSMBSetFileDisposition(const unsigned int xid,
+ __u32 pid_of_opener);
+ extern int CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *file_name, __u64 size,
+- struct cifs_sb_info *cifs_sb, bool set_allocation);
++ struct cifs_sb_info *cifs_sb, bool set_allocation,
++ struct dentry *dentry);
+ extern int CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile, __u64 size,
+ bool set_allocation);
+@@ -433,17 +441,21 @@ extern int CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon,
+ const struct nls_table *nls_codepage,
+ int remap_special_chars);
+ extern int CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *name, struct cifs_sb_info *cifs_sb);
+-extern int CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb);
++ const char *name, struct cifs_sb_info *cifs_sb,
++ struct dentry *dentry);
++int CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb);
+ extern int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *tcon,
+ int netfid, const char *target_name,
+ const struct nls_table *nls_codepage,
+ int remap_special_chars);
+-extern int CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb);
++int CIFSCreateHardLink(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb);
+ extern int CIFSUnixCreateHardLink(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ const char *fromName, const char *toName,
+@@ -457,6 +469,12 @@ extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ const unsigned char *searchName, char **syminfo,
+ const struct nls_table *nls_codepage, int remap);
++extern int cifs_query_reparse_point(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct cifs_sb_info *cifs_sb,
++ const char *full_path,
++ u32 *tag, struct kvec *rsp,
++ int *rsp_buftype);
+ extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
+ __u16 fid, char **symlinkinfo,
+ const struct nls_table *nls_codepage);
+@@ -512,8 +530,9 @@ extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
+
+ extern struct cifs_ses *sesInfoAlloc(void);
+ extern void sesInfoFree(struct cifs_ses *);
+-extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled);
+-extern void tconInfoFree(struct cifs_tcon *);
++extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled,
++ enum smb3_tcon_ref_trace trace);
++extern void tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace);
+
+ extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ __u32 *pexpected_response_sequence_number);
+@@ -610,13 +629,13 @@ void cifs_free_hash(struct shash_desc **sdesc);
+
+ struct cifs_chan *
+ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
+-int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses);
++int cifs_try_adding_channels(struct cifs_ses *ses);
+ bool is_server_using_iface(struct TCP_Server_Info *server,
+ struct cifs_server_iface *iface);
+ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
+ void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
+
+-unsigned int
++int
+ cifs_ses_get_chan_index(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+ void
+@@ -640,7 +659,9 @@ cifs_chan_needs_reconnect(struct cifs_ses *ses,
+ bool
+ cifs_chan_is_iface_active(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+-int
++void
++cifs_disable_secondary_channels(struct cifs_ses *ses);
++void
+ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server);
+ int
+ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount);
+@@ -656,6 +677,12 @@ void cifs_put_tcp_super(struct super_block *sb);
+ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix);
+ char *extract_hostname(const char *unc);
+ char *extract_sharename(const char *unc);
++int parse_reparse_point(struct reparse_data_buffer *buf,
++ u32 plen, struct cifs_sb_info *cifs_sb,
++ bool unicode, struct cifs_open_info_data *data);
++int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev);
+
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
+@@ -695,35 +722,33 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
+ return options;
+ }
+
+-struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+-void cifs_put_tcon_super(struct super_block *sb);
+ int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
+
+-/* Put references of @ses and @ses->dfs_root_ses */
++/* Put references of @ses and its children */
+ static inline void cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+- struct cifs_ses *rses = ses->dfs_root_ses;
++ struct cifs_ses *next;
+
+- __cifs_put_smb_ses(ses);
+- if (rses)
+- __cifs_put_smb_ses(rses);
++ do {
++ next = ses->dfs_root_ses;
++ __cifs_put_smb_ses(ses);
++ } while ((ses = next));
+ }
+
+-/* Get an active reference of @ses and @ses->dfs_root_ses.
++/* Get an active reference of @ses and its children.
+ *
+ * NOTE: make sure to call this function when incrementing reference count of
+ * @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
+ * will also get its reference count incremented.
+ *
+- * cifs_put_smb_ses() will put both references, so call it when you're done.
++ * cifs_put_smb_ses() will put all references, so call it when you're done.
+ */
+ static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
+ {
+ lockdep_assert_held(&cifs_tcp_ses_lock);
+
+- ses->ses_count++;
+- if (ses->dfs_root_ses)
+- ses->dfs_root_ses->ses_count++;
++ for (; ses; ses = ses->dfs_root_ses)
++ ses->ses_count++;
+ }
+
+ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
+@@ -740,4 +765,16 @@ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
+ return true;
+ }
+
++static inline void release_mid(struct mid_q_entry *mid)
++{
++ kref_put(&mid->refcount, __release_mid);
++}
++
++static inline void cifs_free_open_info(struct cifs_open_info_data *data)
++{
++ kfree(data->symlink_target);
++ free_rsp_buf(data->reparse.io.buftype, data->reparse.io.iov.iov_base);
++ memset(data, 0, sizeof(*data));
++}
++
+ #endif /* _CIFSPROTO_H */
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index 25503f1a4fd213..301189ee1335ba 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -738,7 +738,7 @@ CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon,
+
+ int
+ CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+- struct cifs_sb_info *cifs_sb)
++ struct cifs_sb_info *cifs_sb, struct dentry *dentry)
+ {
+ DELETE_FILE_REQ *pSMB = NULL;
+ DELETE_FILE_RSP *pSMBr = NULL;
+@@ -1244,8 +1244,10 @@ CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock,
+ *oplock |= CIFS_CREATE_ACTION;
+
+ if (buf) {
+- /* copy from CreationTime to Attributes */
+- memcpy((char *)buf, (char *)&rsp->CreationTime, 36);
++ /* copy commonly used attributes */
++ memcpy(&buf->common_attributes,
++ &rsp->common_attributes,
++ sizeof(buf->common_attributes));
+ /* the file_info buf is endian converted by caller */
+ buf->AllocationSize = rsp->AllocationSize;
+ buf->EndOfFile = rsp->EndOfFile;
+@@ -2147,10 +2149,10 @@ CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id)
+ return rc;
+ }
+
+-int
+-CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb)
++int CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb)
+ {
+ int rc = 0;
+ RENAME_REQ *pSMB = NULL;
+@@ -2528,10 +2530,11 @@ CIFSUnixCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
+ return rc;
+ }
+
+-int
+-CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb)
++int CIFSCreateHardLink(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb)
+ {
+ int rc = 0;
+ NT_RENAME_REQ *pSMB = NULL;
+@@ -2690,136 +2693,107 @@ CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
+ return rc;
+ }
+
+-/*
+- * Recent Windows versions now create symlinks more frequently
+- * and they use the "reparse point" mechanism below. We can of course
+- * do symlinks nicely to Samba and other servers which support the
+- * CIFS Unix Extensions and we can also do SFU symlinks and "client only"
+- * "MF" symlinks optionally, but for recent Windows we really need to
+- * reenable the code below and fix the cifs_symlink callers to handle this.
+- * In the interim this code has been moved to its own config option so
+- * it is not compiled in by default until callers fixed up and more tested.
+- */
+-int
+-CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
+- __u16 fid, char **symlinkinfo,
+- const struct nls_table *nls_codepage)
++int cifs_query_reparse_point(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct cifs_sb_info *cifs_sb,
++ const char *full_path,
++ u32 *tag, struct kvec *rsp,
++ int *rsp_buftype)
+ {
+- int rc = 0;
+- int bytes_returned;
+- struct smb_com_transaction_ioctl_req *pSMB;
+- struct smb_com_transaction_ioctl_rsp *pSMBr;
+- bool is_unicode;
+- unsigned int sub_len;
+- char *sub_start;
+- struct reparse_symlink_data *reparse_buf;
+- struct reparse_posix_data *posix_buf;
+- __u32 data_offset, data_count;
+- char *end_of_smb;
++ struct reparse_data_buffer *buf;
++ struct cifs_open_parms oparms;
++ TRANSACT_IOCTL_REQ *io_req = NULL;
++ TRANSACT_IOCTL_RSP *io_rsp = NULL;
++ struct cifs_fid fid;
++ __u32 data_offset, data_count, len;
++ __u8 *start, *end;
++ int io_rsp_len;
++ int oplock = 0;
++ int rc;
+
+- cifs_dbg(FYI, "In Windows reparse style QueryLink for fid %u\n", fid);
+- rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+- (void **) &pSMBr);
++ cifs_tcon_dbg(FYI, "%s: path=%s\n", __func__, full_path);
++
++ if (cap_unix(tcon->ses))
++ return -EOPNOTSUPP;
++
++ oparms = (struct cifs_open_parms) {
++ .tcon = tcon,
++ .cifs_sb = cifs_sb,
++ .desired_access = FILE_READ_ATTRIBUTES,
++ .create_options = cifs_create_options(cifs_sb,
++ OPEN_REPARSE_POINT),
++ .disposition = FILE_OPEN,
++ .path = full_path,
++ .fid = &fid,
++ };
++
++ rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ if (rc)
+ return rc;
+
+- pSMB->TotalParameterCount = 0 ;
+- pSMB->TotalDataCount = 0;
+- pSMB->MaxParameterCount = cpu_to_le32(2);
+- /* BB find exact data count max from sess structure BB */
+- pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
+- pSMB->MaxSetupCount = 4;
+- pSMB->Reserved = 0;
+- pSMB->ParameterOffset = 0;
+- pSMB->DataCount = 0;
+- pSMB->DataOffset = 0;
+- pSMB->SetupCount = 4;
+- pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
+- pSMB->ParameterCount = pSMB->TotalParameterCount;
+- pSMB->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT);
+- pSMB->IsFsctl = 1; /* FSCTL */
+- pSMB->IsRootFlag = 0;
+- pSMB->Fid = fid; /* file handle always le */
+- pSMB->ByteCount = 0;
++ rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon,
++ (void **)&io_req, (void **)&io_rsp);
++ if (rc)
++ goto error;
+
+- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+- if (rc) {
+- cifs_dbg(FYI, "Send error in QueryReparseLinkInfo = %d\n", rc);
+- goto qreparse_out;
+- }
++ io_req->TotalParameterCount = 0;
++ io_req->TotalDataCount = 0;
++ io_req->MaxParameterCount = cpu_to_le32(2);
++ /* BB find exact data count max from sess structure BB */
++ io_req->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
++ io_req->MaxSetupCount = 4;
++ io_req->Reserved = 0;
++ io_req->ParameterOffset = 0;
++ io_req->DataCount = 0;
++ io_req->DataOffset = 0;
++ io_req->SetupCount = 4;
++ io_req->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
++ io_req->ParameterCount = io_req->TotalParameterCount;
++ io_req->FunctionCode = cpu_to_le32(FSCTL_GET_REPARSE_POINT);
++ io_req->IsFsctl = 1;
++ io_req->IsRootFlag = 0;
++ io_req->Fid = fid.netfid;
++ io_req->ByteCount = 0;
++
++ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *)io_req,
++ (struct smb_hdr *)io_rsp, &io_rsp_len, 0);
++ if (rc)
++ goto error;
+
+- data_offset = le32_to_cpu(pSMBr->DataOffset);
+- data_count = le32_to_cpu(pSMBr->DataCount);
+- if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) {
+- /* BB also check enough total bytes returned */
+- rc = -EIO; /* bad smb */
+- goto qreparse_out;
+- }
+- if (!data_count || (data_count > 2048)) {
++ data_offset = le32_to_cpu(io_rsp->DataOffset);
++ data_count = le32_to_cpu(io_rsp->DataCount);
++ if (get_bcc(&io_rsp->hdr) < 2 || data_offset > 512 ||
++ !data_count || data_count > 2048) {
+ rc = -EIO;
+- cifs_dbg(FYI, "Invalid return data count on get reparse info ioctl\n");
+- goto qreparse_out;
++ goto error;
+ }
+- end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
+- reparse_buf = (struct reparse_symlink_data *)
+- ((char *)&pSMBr->hdr.Protocol + data_offset);
+- if ((char *)reparse_buf >= end_of_smb) {
++
++ end = 2 + get_bcc(&io_rsp->hdr) + (__u8 *)&io_rsp->ByteCount;
++ start = (__u8 *)&io_rsp->hdr.Protocol + data_offset;
++ if (start >= end) {
+ rc = -EIO;
+- goto qreparse_out;
+- }
+- if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) {
+- cifs_dbg(FYI, "NFS style reparse tag\n");
+- posix_buf = (struct reparse_posix_data *)reparse_buf;
+-
+- if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) {
+- cifs_dbg(FYI, "unsupported file type 0x%llx\n",
+- le64_to_cpu(posix_buf->InodeType));
+- rc = -EOPNOTSUPP;
+- goto qreparse_out;
+- }
+- is_unicode = true;
+- sub_len = le16_to_cpu(reparse_buf->ReparseDataLength);
+- if (posix_buf->PathBuffer + sub_len > end_of_smb) {
+- cifs_dbg(FYI, "reparse buf beyond SMB\n");
+- rc = -EIO;
+- goto qreparse_out;
+- }
+- *symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer,
+- sub_len, is_unicode, nls_codepage);
+- goto qreparse_out;
+- } else if (reparse_buf->ReparseTag !=
+- cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) {
+- rc = -EOPNOTSUPP;
+- goto qreparse_out;
++ goto error;
+ }
+
+- /* Reparse tag is NTFS symlink */
+- sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) +
+- reparse_buf->PathBuffer;
+- sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength);
+- if (sub_start + sub_len > end_of_smb) {
+- cifs_dbg(FYI, "reparse buf beyond SMB\n");
++ data_count = le16_to_cpu(io_rsp->ByteCount);
++ buf = (struct reparse_data_buffer *)start;
++ len = sizeof(*buf);
++ if (data_count < len ||
++ data_count < le16_to_cpu(buf->ReparseDataLength) + len) {
+ rc = -EIO;
+- goto qreparse_out;
++ goto error;
+ }
+- if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
+- is_unicode = true;
+- else
+- is_unicode = false;
+-
+- /* BB FIXME investigate remapping reserved chars here */
+- *symlinkinfo = cifs_strndup_from_utf16(sub_start, sub_len, is_unicode,
+- nls_codepage);
+- if (!*symlinkinfo)
+- rc = -ENOMEM;
+-qreparse_out:
+- cifs_buf_release(pSMB);
+
+- /*
+- * Note: On -EAGAIN error only caller can retry on handle based calls
+- * since file handle passed in no longer valid.
+- */
++ *tag = le32_to_cpu(buf->ReparseTag);
++ rsp->iov_base = io_rsp;
++ rsp->iov_len = io_rsp_len;
++ *rsp_buftype = CIFS_LARGE_BUFFER;
++ CIFSSMBClose(xid, tcon, fid.netfid);
++ return 0;
++
++error:
++ cifs_buf_release(io_req);
++ CIFSSMBClose(xid, tcon, fid.netfid);
+ return rc;
+ }
+
+@@ -5019,7 +4993,7 @@ CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ int
+ CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *file_name, __u64 size, struct cifs_sb_info *cifs_sb,
+- bool set_allocation)
++ bool set_allocation, struct dentry *dentry)
+ {
+ struct smb_com_transaction2_spi_req *pSMB = NULL;
+ struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 7b923e36501b0b..e325e06357ffb7 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -52,9 +52,6 @@
+ #include "fs_context.h"
+ #include "cifs_swn.h"
+
+-extern mempool_t *cifs_req_poolp;
+-extern bool disable_legacy_dialects;
+-
+ /* FIXME: should these be tunable? */
+ #define TLINK_ERROR_EXPIRE (1 * HZ)
+ #define TLINK_IDLE_EXPIRE (600 * HZ)
+@@ -119,15 +116,26 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
+ static void smb2_query_server_interfaces(struct work_struct *work)
+ {
+ int rc;
++ int xid;
+ struct cifs_tcon *tcon = container_of(work,
+ struct cifs_tcon,
+ query_interfaces.work);
++ struct TCP_Server_Info *server = tcon->ses->server;
+
+ /*
+ * query server network interfaces, in case they change
+ */
+- rc = SMB3_request_interfaces(0, tcon, false);
++ if (!server->ops->query_server_interfaces)
++ return;
++
++ xid = get_xid();
++ rc = server->ops->query_server_interfaces(xid, tcon, false);
++ free_xid(xid);
++
+ if (rc) {
++ if (rc == -EOPNOTSUPP)
++ return;
++
+ cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
+ __func__, rc);
+ }
+@@ -156,20 +164,27 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ /* If server is a channel, select the primary channel */
+ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
+
+- spin_lock(&pserver->srv_lock);
++ /* if we need to signal just this channel */
+ if (!all_channels) {
+- pserver->tcpStatus = CifsNeedReconnect;
+- spin_unlock(&pserver->srv_lock);
++ spin_lock(&server->srv_lock);
++ if (server->tcpStatus != CifsExiting)
++ server->tcpStatus = CifsNeedReconnect;
++ spin_unlock(&server->srv_lock);
+ return;
+ }
+- spin_unlock(&pserver->srv_lock);
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ spin_lock(&ses->chan_lock);
+ for (i = 0; i < ses->chan_count; i++) {
++ if (!ses->chans[i].server)
++ continue;
++
+ spin_lock(&ses->chans[i].server->srv_lock);
+- ses->chans[i].server->tcpStatus = CifsNeedReconnect;
++ if (ses->chans[i].server->tcpStatus != CifsExiting)
++ ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&ses->chans[i].server->srv_lock);
+ }
+ spin_unlock(&ses->chan_lock);
+@@ -204,14 +219,41 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ /* If server is a channel, select the primary channel */
+ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
+
++ /*
++ * if the server has been marked for termination, there is a
++ * chance that the remaining channels all need reconnect. To be
++ * on the safer side, mark the session and trees for reconnect
++ * for this scenario. This might cause a few redundant session
++ * setup and tree connect requests, but it is better than not doing
++ * a tree connect when needed, and all following requests failing
++ */
++ if (server->terminate) {
++ mark_smb_session = true;
++ server = pserver;
++ }
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
+- /* check if iface is still active */
+- if (!cifs_chan_is_iface_active(ses, server))
+- cifs_chan_update_iface(ses, server);
++ spin_lock(&ses->ses_lock);
++ if (ses->ses_status == SES_EXITING) {
++ spin_unlock(&ses->ses_lock);
++ continue;
++ }
++ spin_unlock(&ses->ses_lock);
+
+ spin_lock(&ses->chan_lock);
++ if (cifs_ses_get_chan_index(ses, server) ==
++ CIFS_INVAL_CHAN_INDEX) {
++ spin_unlock(&ses->chan_lock);
++ continue;
++ }
++
++ if (!cifs_chan_is_iface_active(ses, server)) {
++ spin_unlock(&ses->chan_lock);
++ cifs_chan_update_iface(ses, server);
++ spin_lock(&ses->chan_lock);
++ }
++
+ if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
+ spin_unlock(&ses->chan_lock);
+ continue;
+@@ -241,6 +283,8 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ spin_lock(&tcon->tc_lock);
+ tcon->status = TID_NEED_RECON;
+ spin_unlock(&tcon->tc_lock);
++
++ cancel_delayed_work(&tcon->query_interfaces);
+ }
+ if (ses->tcon_ipc) {
+ ses->tcon_ipc->need_reconnect = true;
+@@ -454,6 +498,7 @@ static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_
+ static int reconnect_dfs_server(struct TCP_Server_Info *server)
+ {
+ struct dfs_cache_tgt_iterator *target_hint = NULL;
++
+ DFS_CACHE_TGT_LIST(tl);
+ int num_targets = 0;
+ int rc = 0;
+@@ -611,6 +656,19 @@ allocate_buffers(struct TCP_Server_Info *server)
+ static bool
+ server_unresponsive(struct TCP_Server_Info *server)
+ {
++ /*
++ * If we're in the process of mounting a share or reconnecting a session
++ * and the server abruptly shut down (e.g. socket wasn't closed, packet
++ * had been ACK'ed but no SMB response), don't wait longer than 20s to
++ * negotiate protocol.
++ */
++ spin_lock(&server->srv_lock);
++ if (server->tcpStatus == CifsInNegotiate &&
++ time_after(jiffies, server->lstrp + 20 * HZ)) {
++ spin_unlock(&server->srv_lock);
++ cifs_reconnect(server, false);
++ return true;
++ }
+ /*
+ * We need to wait 3 echo intervals to make sure we handle such
+ * situations right:
+@@ -622,7 +680,6 @@ server_unresponsive(struct TCP_Server_Info *server)
+ * 65s kernel_recvmsg times out, and we see that we haven't gotten
+ * a response in >60s.
+ */
+- spin_lock(&server->srv_lock);
+ if ((server->tcpStatus == CifsGood ||
+ server->tcpStatus == CifsNeedNegotiate) &&
+ (!server->ops->can_echo || server->ops->can_echo(server)) &&
+@@ -716,6 +773,7 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
+ {
+ struct msghdr smb_msg = {};
+ struct kvec iov = {.iov_base = buf, .iov_len = to_read};
++
+ iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
+
+ return cifs_readv_from_socket(server, &smb_msg);
+@@ -1179,7 +1237,12 @@ cifs_demultiplex_thread(void *p)
+ server->total_read += length;
+
+ if (server->ops->next_header) {
+- next_offset = server->ops->next_header(buf);
++ if (server->ops->next_header(server, buf, &next_offset)) {
++ cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
++ __func__, next_offset);
++ cifs_reconnect(server, true);
++ continue;
++ }
+ if (next_offset)
+ server->pdu_size = next_offset;
+ }
+@@ -1366,11 +1429,13 @@ cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
+ case AF_INET: {
+ struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
+ struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
++
+ return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
+ struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
++
+ return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr)
+ && saddr6->sin6_scope_id == vaddr6->sin6_scope_id);
+ }
+@@ -1536,6 +1601,9 @@ static int match_server(struct TCP_Server_Info *server,
+ if (server->min_offload != ctx->min_offload)
+ return 0;
+
++ if (server->retrans != ctx->retrans)
++ return 0;
++
+ return 1;
+ }
+
+@@ -1586,10 +1654,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+ list_del_init(&server->tcp_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+- /* For secondary channels, we pick up ref-count on the primary server */
+- if (SERVER_IS_CHAN(server))
+- cifs_put_tcp_session(server->primary_server, from_reconnect);
+-
+ cancel_delayed_work_sync(&server->echo);
+
+ if (from_reconnect)
+@@ -1603,6 +1667,10 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+ else
+ cancel_delayed_work_sync(&server->reconnect);
+
++ /* For secondary channels, we pick up ref-count on the primary server */
++ if (SERVER_IS_CHAN(server))
++ cifs_put_tcp_session(server->primary_server, from_reconnect);
++
+ spin_lock(&server->srv_lock);
+ server->tcpStatus = CifsExiting;
+ spin_unlock(&server->srv_lock);
+@@ -1689,7 +1757,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
+ tcp_ses->reconnect_instance = 1;
+ tcp_ses->lstrp = jiffies;
+- tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
++ tcp_ses->compression.requested = ctx->compress;
+ spin_lock_init(&tcp_ses->req_lock);
+ spin_lock_init(&tcp_ses->srv_lock);
+ spin_lock_init(&tcp_ses->mid_lock);
+@@ -1760,6 +1828,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ goto out_err_crypto_release;
+ }
+ tcp_ses->min_offload = ctx->min_offload;
++ tcp_ses->retrans = ctx->retrans;
+ /*
+ * at this point we are the only ones with the pointer
+ * to the struct since the kernel thread not created yet
+@@ -1811,6 +1880,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ ctx->sectype != ses->sectype)
+ return 0;
+
++ if (ctx->dfs_root_ses != ses->dfs_root_ses)
++ return 0;
++
+ /*
+ * If an existing session is limited to less channels than
+ * requested, it should not be reused
+@@ -1883,7 +1955,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ }
+
+ /* no need to setup directory caching on IPC share, so pass in false */
+- tcon = tcon_info_alloc(false);
++ tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc);
+ if (tcon == NULL)
+ return -ENOMEM;
+
+@@ -1900,7 +1972,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+
+ if (rc) {
+ cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
+- tconInfoFree(tcon);
++ tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail);
+ goto out;
+ }
+
+@@ -1914,31 +1986,6 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ return rc;
+ }
+
+-/**
+- * cifs_free_ipc - helper to release the session IPC tcon
+- * @ses: smb session to unmount the IPC from
+- *
+- * Needs to be called everytime a session is destroyed.
+- *
+- * On session close, the IPC is closed and the server must release all tcons of the session.
+- * No need to send a tree disconnect here.
+- *
+- * Besides, it will make the server to not close durable and resilient files on session close, as
+- * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
+- */
+-static int
+-cifs_free_ipc(struct cifs_ses *ses)
+-{
+- struct cifs_tcon *tcon = ses->tcon_ipc;
+-
+- if (tcon == NULL)
+- return 0;
+-
+- tconInfoFree(tcon);
+- ses->tcon_ipc = NULL;
+- return 0;
+-}
+-
+ static struct cifs_ses *
+ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ {
+@@ -1969,68 +2016,73 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+
+ void __cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+- unsigned int rc, xid;
+- unsigned int chan_count;
+ struct TCP_Server_Info *server = ses->server;
++ struct cifs_tcon *tcon;
++ unsigned int xid;
++ size_t i;
++ bool do_logoff;
++ int rc;
+
++ spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_EXITING) {
++ cifs_dbg(FYI, "%s: id=0x%llx ses_count=%d ses_status=%u ipc=%s\n",
++ __func__, ses->Suid, ses->ses_count, ses->ses_status,
++ ses->tcon_ipc ? ses->tcon_ipc->tree_name : "none");
++ if (ses->ses_status == SES_EXITING || --ses->ses_count > 0) {
+ spin_unlock(&ses->ses_lock);
++ spin_unlock(&cifs_tcp_ses_lock);
+ return;
+ }
+- spin_unlock(&ses->ses_lock);
++ /* ses_count can never go negative */
++ WARN_ON(ses->ses_count < 0);
+
+- cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
+- cifs_dbg(FYI,
+- "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
++ spin_lock(&ses->chan_lock);
++ cifs_chan_clear_need_reconnect(ses, server);
++ spin_unlock(&ses->chan_lock);
+
+- spin_lock(&cifs_tcp_ses_lock);
+- if (--ses->ses_count > 0) {
+- spin_unlock(&cifs_tcp_ses_lock);
+- return;
+- }
+- spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_GOOD)
+- ses->ses_status = SES_EXITING;
++ do_logoff = ses->ses_status == SES_GOOD && server->ops->logoff;
++ ses->ses_status = SES_EXITING;
++ tcon = ses->tcon_ipc;
++ ses->tcon_ipc = NULL;
+ spin_unlock(&ses->ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+- /* ses_count can never go negative */
+- WARN_ON(ses->ses_count < 0);
+-
+- spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_EXITING && server->ops->logoff) {
+- spin_unlock(&ses->ses_lock);
+- cifs_free_ipc(ses);
++ /*
++ * On session close, the IPC is closed and the server must release all
++ * tcons of the session. No need to send a tree disconnect here.
++ *
++ * Besides, it will make the server to not close durable and resilient
++ * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an
++ * SMB2 LOGOFF Request.
++ */
++ tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc);
++ if (do_logoff) {
+ xid = get_xid();
+ rc = server->ops->logoff(xid, ses);
+ if (rc)
+ cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+ __func__, rc);
+ _free_xid(xid);
+- } else {
+- spin_unlock(&ses->ses_lock);
+- cifs_free_ipc(ses);
+ }
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_del_init(&ses->smb_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+- chan_count = ses->chan_count;
+-
+ /* close any extra channels */
+- if (chan_count > 1) {
+- int i;
+-
+- for (i = 1; i < chan_count; i++) {
+- if (ses->chans[i].iface) {
+- kref_put(&ses->chans[i].iface->refcount, release_iface);
+- ses->chans[i].iface = NULL;
+- }
+- cifs_put_tcp_session(ses->chans[i].server, 0);
+- ses->chans[i].server = NULL;
++ for (i = 1; i < ses->chan_count; i++) {
++ if (ses->chans[i].iface) {
++ kref_put(&ses->chans[i].iface->refcount, release_iface);
++ ses->chans[i].iface = NULL;
+ }
++ cifs_put_tcp_session(ses->chans[i].server, 0);
++ ses->chans[i].server = NULL;
++ }
++
++ /* we now account for primary channel in iface->refcount */
++ if (ses->chans[0].iface) {
++ kref_put(&ses->chans[0].iface->refcount, release_iface);
++ ses->chans[0].server = NULL;
+ }
+
+ sesInfoFree(ses);
+@@ -2143,6 +2195,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
+ }
+
+ ++delim;
++ /* BB consider adding support for password2 (Key Rotation) for multiuser in future */
+ ctx->password = kstrndup(delim, len, GFP_KERNEL);
+ if (!ctx->password) {
+ cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
+@@ -2166,6 +2219,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
+ kfree(ctx->username);
+ ctx->username = NULL;
+ kfree_sensitive(ctx->password);
++ /* no need to free ctx->password2 since not allocated in this path */
+ ctx->password = NULL;
+ goto out_key_put;
+ }
+@@ -2277,6 +2331,12 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ if (!ses->password)
+ goto get_ses_fail;
+ }
++ /* ctx->password freed at unmount */
++ if (ctx->password2) {
++ ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
++ if (!ses->password2)
++ goto get_ses_fail;
++ }
+ if (ctx->domainname) {
+ ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
+ if (!ses->domainName)
+@@ -2323,9 +2383,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ * need to lock before changing something in the session.
+ */
+ spin_lock(&cifs_tcp_ses_lock);
++ if (ctx->dfs_root_ses)
++ cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
+ ses->dfs_root_ses = ctx->dfs_root_ses;
+- if (ses->dfs_root_ses)
+- ses->dfs_root_ses->ses_count++;
+ list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+@@ -2384,6 +2444,8 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ continue;
+ }
+ ++tcon->tc_count;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_get_find);
+ spin_unlock(&tcon->tc_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return tcon;
+@@ -2393,7 +2455,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ }
+
+ void
+-cifs_put_tcon(struct cifs_tcon *tcon)
++cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
+ {
+ unsigned int xid;
+ struct cifs_ses *ses;
+@@ -2409,6 +2471,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
+ cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
+ spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count - 1, trace);
+ if (--tcon->tc_count > 0) {
+ spin_unlock(&tcon->tc_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+@@ -2445,7 +2508,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
+ _free_xid(xid);
+
+ cifs_fscache_release_super_cookie(tcon);
+- tconInfoFree(tcon);
++ tconInfoFree(tcon, netfs_trace_tcon_ref_free);
+ cifs_put_smb_ses(ses);
+ }
+
+@@ -2499,7 +2562,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ nohandlecache = ctx->nohandlecache;
+ else
+ nohandlecache = true;
+- tcon = tcon_info_alloc(!nohandlecache);
++ tcon = tcon_info_alloc(!nohandlecache, netfs_trace_tcon_ref_new);
+ if (tcon == NULL) {
+ rc = -ENOMEM;
+ goto out_fail;
+@@ -2563,9 +2626,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
++ } else if (ses->server->vals->protocol_id == SMB10_PROT_ID)
++ if (cap_unix(ses))
++ cifs_dbg(FYI, "Unix Extensions requested on SMB1 mount\n");
++ else {
++ cifs_dbg(VFS, "SMB1 Unix Extensions not supported by server\n");
++ rc = -EOPNOTSUPP;
++ goto out_fail;
+ } else {
+- cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
+- "disabled but required for POSIX extensions\n");
++ cifs_dbg(VFS,
++ "Check vers= mount option. SMB3.11 disabled but required for POSIX extensions\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
+ }
+@@ -2689,7 +2759,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ return tcon;
+
+ out_fail:
+- tconInfoFree(tcon);
++ tconInfoFree(tcon, netfs_trace_tcon_ref_free_fail);
+ return ERR_PTR(rc);
+ }
+
+@@ -2706,9 +2776,8 @@ cifs_put_tlink(struct tcon_link *tlink)
+ }
+
+ if (!IS_ERR(tlink_tcon(tlink)))
+- cifs_put_tcon(tlink_tcon(tlink));
++ cifs_put_tcon(tlink_tcon(tlink), netfs_trace_tcon_ref_put_tlink);
+ kfree(tlink);
+- return;
+ }
+
+ static int
+@@ -2755,6 +2824,8 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+ return 0;
+ if (old->ctx->closetimeo != new->ctx->closetimeo)
+ return 0;
++ if (old->ctx->reparse_type != new->ctx->reparse_type)
++ return 0;
+
+ return 1;
+ }
+@@ -2849,6 +2920,7 @@ static inline void
+ cifs_reclassify_socket4(struct socket *sock)
+ {
+ struct sock *sk = sock->sk;
++
+ BUG_ON(!sock_allow_reclassification(sk));
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
+ &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
+@@ -2858,6 +2930,7 @@ static inline void
+ cifs_reclassify_socket6(struct socket *sock)
+ {
+ struct sock *sk = sock->sk;
++
+ BUG_ON(!sock_allow_reclassification(sk));
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
+ &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
+@@ -2892,15 +2965,18 @@ static int
+ bind_socket(struct TCP_Server_Info *server)
+ {
+ int rc = 0;
++
+ if (server->srcaddr.ss_family != AF_UNSPEC) {
+ /* Bind to the specified local IP address */
+ struct socket *socket = server->ssocket;
++
+ rc = kernel_bind(socket,
+ (struct sockaddr *) &server->srcaddr,
+ sizeof(server->srcaddr));
+ if (rc < 0) {
+ struct sockaddr_in *saddr4;
+ struct sockaddr_in6 *saddr6;
++
+ saddr4 = (struct sockaddr_in *)&server->srcaddr;
+ saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
+ if (saddr6->sin6_family == AF_INET6)
+@@ -3130,6 +3206,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
+
+ if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
+ __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
++
+ cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
+ /*
+ * check for reconnect case in which we do not
+@@ -3264,11 +3341,14 @@ void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
+ int rc = 0;
+
+ if (mnt_ctx->tcon)
+- cifs_put_tcon(mnt_ctx->tcon);
++ cifs_put_tcon(mnt_ctx->tcon, netfs_trace_tcon_ref_put_mnt_ctx);
+ else if (mnt_ctx->ses)
+ cifs_put_smb_ses(mnt_ctx->ses);
+ else if (mnt_ctx->server)
+ cifs_put_tcp_session(mnt_ctx->server, 0);
++ mnt_ctx->ses = NULL;
++ mnt_ctx->tcon = NULL;
++ mnt_ctx->server = NULL;
+ mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
+ free_xid(mnt_ctx->xid);
+ }
+@@ -3390,8 +3470,18 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
+ * the user on mount
+ */
+ if ((cifs_sb->ctx->wsize == 0) ||
+- (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
+- cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
++ (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) {
++ cifs_sb->ctx->wsize =
++ round_down(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE);
++ /*
++ * in the very unlikely event that the server sent a max write size under PAGE_SIZE,
++ * (which would get rounded down to 0) then reset wsize to absolute minimum eg 4096
++ */
++ if (cifs_sb->ctx->wsize == 0) {
++ cifs_sb->ctx->wsize = PAGE_SIZE;
++ cifs_dbg(VFS, "wsize too small, reset to minimum ie PAGE_SIZE, usually 4096\n");
++ }
++ }
+ if ((cifs_sb->ctx->rsize == 0) ||
+ (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
+ cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
+@@ -3537,8 +3627,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ bool isdfs;
+ int rc;
+
+- INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
+-
+ rc = dfs_mount_share(&mnt_ctx, &isdfs);
+ if (rc)
+ goto error;
+@@ -3560,7 +3648,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ ctx->prepath = NULL;
+
+ out:
+- cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
++ cifs_try_adding_channels(mnt_ctx.ses);
+ rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+ if (rc)
+ goto error;
+@@ -3569,7 +3657,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ return rc;
+
+ error:
+- dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
+ cifs_mount_put_conns(&mnt_ctx);
+ return rc;
+ }
+@@ -3584,6 +3671,18 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ goto error;
+
+ rc = cifs_mount_get_tcon(&mnt_ctx);
++ if (!rc) {
++ /*
++ * Prevent superblock from being created with any missing
++ * connections.
++ */
++ if (WARN_ON(!mnt_ctx.server))
++ rc = -EHOSTDOWN;
++ else if (WARN_ON(!mnt_ctx.ses))
++ rc = -EACCES;
++ else if (WARN_ON(!mnt_ctx.tcon))
++ rc = -ENOENT;
++ }
+ if (rc)
+ goto error;
+
+@@ -3606,6 +3705,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ }
+ #endif
+
++#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ /*
+ * Issue a TREE_CONNECT request.
+ */
+@@ -3633,7 +3733,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
+ smb_buffer_response = smb_buffer;
+
+ header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
+- NULL /*no tid */ , 4 /*wct */ );
++ NULL /*no tid */, 4 /*wct */);
+
+ smb_buffer->Mid = get_next_mid(ses->server);
+ smb_buffer->Uid = ses->Suid;
+@@ -3652,12 +3752,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
+ if (ses->server->sign)
+ smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+- if (ses->capabilities & CAP_STATUS32) {
++ if (ses->capabilities & CAP_STATUS32)
+ smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
+- }
+- if (ses->capabilities & CAP_DFS) {
++
++ if (ses->capabilities & CAP_DFS)
+ smb_buffer->Flags2 |= SMBFLG2_DFS;
+- }
++
+ if (ses->capabilities & CAP_UNICODE) {
+ smb_buffer->Flags2 |= SMBFLG2_UNICODE;
+ length =
+@@ -3727,11 +3827,25 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
+ else
+ tcon->Flags = 0;
+ cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
+- }
+
++ /*
++ * reset_cifs_unix_caps calls QFSInfo which requires
++ * need_reconnect to be false, but we would not need to call
++ * reset_caps if this were not a reconnect case so must check
++ * need_reconnect flag here. The caller will also clear
++ * need_reconnect when tcon was successful but needed to be
++ * cleared earlier in the case of unix extensions reconnect
++ */
++ if (tcon->need_reconnect && tcon->unix_ext) {
++ cifs_dbg(FYI, "resetting caps for %s\n", tcon->tree_name);
++ tcon->need_reconnect = false;
++ reset_cifs_unix_caps(xid, tcon, NULL, NULL);
++ }
++ }
+ cifs_buf_release(smb_buffer);
+ return rc;
+ }
++#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+
+ static void delayed_free(struct rcu_head *p)
+ {
+@@ -3849,8 +3963,12 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ spin_unlock(&ses->chan_lock);
+
+- if (!is_binding)
++ if (!is_binding) {
+ ses->ses_status = SES_IN_SETUP;
++
++ /* force iface_list refresh */
++ ses->iface_last_update = 0;
++ }
+ spin_unlock(&ses->ses_lock);
+
+ /* update ses ip_addr only for primary chan */
+@@ -3917,13 +4035,14 @@ cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
+ }
+
+ static struct cifs_tcon *
+-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
++__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ {
+ int rc;
+ struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon = NULL;
+ struct smb3_fs_context *ctx;
++ char *origin_fullpath = NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+@@ -3947,6 +4066,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ ctx->sign = master_tcon->ses->sign;
+ ctx->seal = master_tcon->seal;
+ ctx->witness = master_tcon->use_witness;
++ ctx->dfs_root_ses = master_tcon->ses->dfs_root_ses;
+
+ rc = cifs_set_vol_auth(ctx, master_tcon->ses);
+ if (rc) {
+@@ -3966,12 +4086,39 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ goto out;
+ }
+
++#ifdef CONFIG_CIFS_DFS_UPCALL
++ spin_lock(&master_tcon->tc_lock);
++ if (master_tcon->origin_fullpath) {
++ spin_unlock(&master_tcon->tc_lock);
++ origin_fullpath = dfs_get_path(cifs_sb, cifs_sb->ctx->source);
++ if (IS_ERR(origin_fullpath)) {
++ tcon = ERR_CAST(origin_fullpath);
++ origin_fullpath = NULL;
++ cifs_put_smb_ses(ses);
++ goto out;
++ }
++ } else {
++ spin_unlock(&master_tcon->tc_lock);
++ }
++#endif
++
+ tcon = cifs_get_tcon(ses, ctx);
+ if (IS_ERR(tcon)) {
+ cifs_put_smb_ses(ses);
+ goto out;
+ }
+
++#ifdef CONFIG_CIFS_DFS_UPCALL
++ if (origin_fullpath) {
++ spin_lock(&tcon->tc_lock);
++ tcon->origin_fullpath = origin_fullpath;
++ spin_unlock(&tcon->tc_lock);
++ origin_fullpath = NULL;
++ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
++ dfs_cache_get_ttl() * HZ);
++ }
++#endif
++
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ if (cap_unix(ses))
+ reset_cifs_unix_caps(0, tcon, NULL, ctx);
+@@ -3980,11 +4127,23 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ out:
+ kfree(ctx->username);
+ kfree_sensitive(ctx->password);
++ kfree(origin_fullpath);
+ kfree(ctx);
+
+ return tcon;
+ }
+
++static struct cifs_tcon *
++cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
++{
++ struct cifs_tcon *ret;
++
++ cifs_mount_lock();
++ ret = __cifs_construct_tcon(cifs_sb, fsuid);
++ cifs_mount_unlock();
++ return ret;
++}
++
+ struct cifs_tcon *
+ cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
+ {
+@@ -4176,6 +4335,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+
+ /* only send once per connect */
+ spin_lock(&tcon->tc_lock);
++
++ /* if tcon is marked for needing reconnect, update state */
++ if (tcon->need_reconnect)
++ tcon->status = TID_NEED_TCON;
++
+ if (tcon->status == TID_GOOD) {
+ spin_unlock(&tcon->tc_lock);
+ return 0;
+diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
+index 81b84151450d2d..3ec965547e3d4d 100644
+--- a/fs/smb/client/dfs.c
++++ b/fs/smb/client/dfs.c
+@@ -66,33 +66,20 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
+ }
+
+ /*
+- * Track individual DFS referral servers used by new DFS mount.
+- *
+- * On success, their lifetime will be shared by final tcon (dfs_ses_list).
+- * Otherwise, they will be put by dfs_put_root_smb_sessions() in cifs_mount().
++ * Get an active reference of @ses so that next call to cifs_put_tcon() won't
++ * release it as any new DFS referrals must go through its IPC tcon.
+ */
+-static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
++static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+- struct dfs_root_ses *root_ses;
+ struct cifs_ses *ses = mnt_ctx->ses;
+
+ if (ses) {
+- root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
+- if (!root_ses)
+- return -ENOMEM;
+-
+- INIT_LIST_HEAD(&root_ses->list);
+-
+ spin_lock(&cifs_tcp_ses_lock);
+ cifs_smb_ses_inc_refcount(ses);
+ spin_unlock(&cifs_tcp_ses_lock);
+- root_ses->ses = ses;
+- list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
+ }
+- /* Select new DFS referral server so that new referrals go through it */
+ ctx->dfs_root_ses = ses;
+- return 0;
+ }
+
+ static inline int parse_dfs_target(struct smb3_fs_context *ctx,
+@@ -185,11 +172,8 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
+ continue;
+ }
+
+- if (is_refsrv) {
+- rc = add_root_smb_session(mnt_ctx);
+- if (rc)
+- goto out;
+- }
++ if (is_refsrv)
++ add_root_smb_session(mnt_ctx);
+
+ rc = ref_walk_advance(rw);
+ if (!rc) {
+@@ -232,6 +216,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_tcon *tcon;
+ char *origin_fullpath;
++ bool new_tcon = true;
+ int rc;
+
+ origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
+@@ -239,6 +224,18 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ return PTR_ERR(origin_fullpath);
+
+ rc = dfs_referral_walk(mnt_ctx);
++ if (!rc) {
++ /*
++ * Prevent superblock from being created with any missing
++ * connections.
++ */
++ if (WARN_ON(!mnt_ctx->server))
++ rc = -EHOSTDOWN;
++ else if (WARN_ON(!mnt_ctx->ses))
++ rc = -EACCES;
++ else if (WARN_ON(!mnt_ctx->tcon))
++ rc = -ENOENT;
++ }
+ if (rc)
+ goto out;
+
+@@ -247,15 +244,14 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ if (!tcon->origin_fullpath) {
+ tcon->origin_fullpath = origin_fullpath;
+ origin_fullpath = NULL;
++ } else {
++ new_tcon = false;
+ }
+ spin_unlock(&tcon->tc_lock);
+
+- if (list_empty(&tcon->dfs_ses_list)) {
+- list_replace_init(&mnt_ctx->dfs_ses_list, &tcon->dfs_ses_list);
++ if (new_tcon) {
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ dfs_cache_get_ttl() * HZ);
+- } else {
+- dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
+ }
+
+ out:
+@@ -263,15 +259,23 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ return rc;
+ }
+
+-/* Resolve UNC hostname in @ctx->source and set ip addr in @ctx->dstaddr */
++/*
++ * If @ctx->dfs_automount, then update @ctx->dstaddr earlier with the DFS root
++ * server from where we'll start following any referrals. Otherwise rely on the
++ * value provided by mount(2) as the user might not have dns_resolver key set up
++ * and therefore failing to upcall to resolve UNC hostname under @ctx->source.
++ */
+ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx)
+ {
+ struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
+- int rc;
++ int rc = 0;
+
+- rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL);
+- if (!rc)
+- cifs_set_port(addr, ctx->port);
++ if (!ctx->nodfs && ctx->dfs_automount) {
++ rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL);
++ if (!rc)
++ cifs_set_port(addr, ctx->port);
++ ctx->dfs_automount = false;
++ }
+ return rc;
+ }
+
+@@ -290,7 +294,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ if (rc)
+ return rc;
+
+- ctx->dfs_root_ses = mnt_ctx->ses;
+ /*
+ * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
+ * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+@@ -316,7 +319,9 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+
+ *isdfs = true;
+ add_root_smb_session(mnt_ctx);
+- return __dfs_mount_share(mnt_ctx);
++ rc = __dfs_mount_share(mnt_ctx);
++ dfs_put_root_smb_sessions(mnt_ctx);
++ return rc;
+ }
+
+ /* Update dfs referral path of superblock */
+@@ -557,6 +562,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+
+ /* only send once per connect */
+ spin_lock(&tcon->tc_lock);
++
++ /* if tcon is marked for needing reconnect, update state */
++ if (tcon->need_reconnect)
++ tcon->status = TID_NEED_TCON;
++
+ if (tcon->status == TID_GOOD) {
+ spin_unlock(&tcon->tc_lock);
+ return 0;
+@@ -617,8 +627,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ spin_lock(&tcon->tc_lock);
+ if (tcon->status == TID_IN_TCON)
+ tcon->status = TID_GOOD;
+- spin_unlock(&tcon->tc_lock);
+ tcon->need_reconnect = false;
++ spin_unlock(&tcon->tc_lock);
+ }
+
+ return rc;
+diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
+index 875ab7ae57fcdf..e5c4dcf837503a 100644
+--- a/fs/smb/client/dfs.h
++++ b/fs/smb/client/dfs.h
+@@ -7,7 +7,9 @@
+ #define _CIFS_DFS_H
+
+ #include "cifsglob.h"
++#include "cifsproto.h"
+ #include "fs_context.h"
++#include "dfs_cache.h"
+ #include "cifs_unicode.h"
+ #include <linux/namei.h>
+
+@@ -114,11 +116,6 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
+ ref_walk_tit(rw));
+ }
+
+-struct dfs_root_ses {
+- struct list_head list;
+- struct cifs_ses *ses;
+-};
+-
+ int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
+ struct smb3_fs_context *ctx);
+ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
+@@ -133,20 +130,32 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++ struct cifs_ses *rses = ctx->dfs_root_ses ?: mnt_ctx->ses;
+
+- return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
++ return dfs_cache_find(mnt_ctx->xid, rses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), path, ref, tl);
+ }
+
+-static inline void dfs_put_root_smb_sessions(struct list_head *head)
++/*
++ * cifs_get_smb_ses() already guarantees an active reference of
++ * @ses->dfs_root_ses when a new session is created, so we need to put extra
++ * references of all DFS root sessions that were used across the mount process
++ * in dfs_mount_share().
++ */
++static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
+ {
+- struct dfs_root_ses *root, *tmp;
++ const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++ struct cifs_ses *ses = ctx->dfs_root_ses;
++ struct cifs_ses *cur;
++
++ if (!ses)
++ return;
+
+- list_for_each_entry_safe(root, tmp, head, list) {
+- list_del_init(&root->list);
+- cifs_put_smb_ses(root->ses);
+- kfree(root);
++ for (cur = ses; cur; cur = cur->dfs_root_ses) {
++ if (cur->dfs_root_ses)
++ cifs_put_smb_ses(cur->dfs_root_ses);
+ }
++ cifs_put_smb_ses(ses);
+ }
+
+ #endif /* _CIFS_DFS_H */
+diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
+index 508d831fabe378..11c8efecf7aa12 100644
+--- a/fs/smb/client/dfs_cache.c
++++ b/fs/smb/client/dfs_cache.c
+@@ -1172,8 +1172,8 @@ static bool is_ses_good(struct cifs_ses *ses)
+ return ret;
+ }
+
+-/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+-static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
++/* Refresh dfs referral of @ses and mark it for reconnect if needed */
++static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
+ {
+ struct TCP_Server_Info *server = ses->server;
+ DFS_CACHE_TGT_LIST(old_tl);
+@@ -1181,10 +1181,21 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
+ bool needs_refresh = false;
+ struct cache_entry *ce;
+ unsigned int xid;
++ char *path = NULL;
+ int rc = 0;
+
+ xid = get_xid();
+
++ mutex_lock(&server->refpath_lock);
++ if (server->leaf_fullpath) {
++ path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
++ if (!path)
++ rc = -ENOMEM;
++ }
++ mutex_unlock(&server->refpath_lock);
++ if (!path)
++ goto out;
++
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+@@ -1218,19 +1229,17 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
+ free_xid(xid);
+ dfs_cache_free_tgts(&old_tl);
+ dfs_cache_free_tgts(&new_tl);
+- return rc;
++ kfree(path);
+ }
+
+-static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
++static inline void refresh_ses_referral(struct cifs_ses *ses)
+ {
+- struct TCP_Server_Info *server = tcon->ses->server;
+- struct cifs_ses *ses = tcon->ses;
++ __refresh_ses_referral(ses, false);
++}
+
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
+- mutex_unlock(&server->refpath_lock);
+- return 0;
++static inline void force_refresh_ses_referral(struct cifs_ses *ses)
++{
++ __refresh_ses_referral(ses, true);
+ }
+
+ /**
+@@ -1271,34 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+ */
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+
+- return refresh_tcon(tcon, true);
++ force_refresh_ses_referral(tcon->ses);
++ return 0;
+ }
+
+ /* Refresh all DFS referrals related to DFS tcon */
+ void dfs_cache_refresh(struct work_struct *work)
+ {
+- struct TCP_Server_Info *server;
+- struct dfs_root_ses *rses;
+ struct cifs_tcon *tcon;
+ struct cifs_ses *ses;
+
+ tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
+- ses = tcon->ses;
+- server = ses->server;
+
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
+- mutex_unlock(&server->refpath_lock);
+-
+- list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
+- ses = rses->ses;
+- server = ses->server;
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
+- mutex_unlock(&server->refpath_lock);
+- }
++ for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
++ refresh_ses_referral(ses);
+
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ atomic_read(&dfs_cache_ttl) * HZ);
+diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
+index 580a27a3a7e62e..864b194dbaa0a0 100644
+--- a/fs/smb/client/dir.c
++++ b/fs/smb/client/dir.c
+@@ -189,6 +189,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ int disposition;
+ struct TCP_Server_Info *server = tcon->ses->server;
+ struct cifs_open_parms oparms;
++ int rdwr_for_fscache = 0;
+
+ *oplock = 0;
+ if (tcon->ses->server->oplocks)
+@@ -200,6 +201,10 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ return PTR_ERR(full_path);
+ }
+
++ /* If we're caching, we need to be able to fill in around partial writes. */
++ if (cifs_fscache_enabled(inode) && (oflags & O_ACCMODE) == O_WRONLY)
++ rdwr_for_fscache = 1;
++
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
+ (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+@@ -276,6 +281,8 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ desired_access |= GENERIC_READ; /* is this too little? */
+ if (OPEN_FMODE(oflags) & FMODE_WRITE)
+ desired_access |= GENERIC_WRITE;
++ if (rdwr_for_fscache == 1)
++ desired_access |= GENERIC_READ;
+
+ disposition = FILE_OVERWRITE_IF;
+ if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+@@ -304,6 +311,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
+ create_options |= CREATE_OPTION_READONLY;
+
++retry_open:
+ oparms = (struct cifs_open_parms) {
+ .tcon = tcon,
+ .cifs_sb = cifs_sb,
+@@ -317,8 +325,15 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ rc = server->ops->open(xid, &oparms, oplock, buf);
+ if (rc) {
+ cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc);
++ if (rc == -EACCES && rdwr_for_fscache == 1) {
++ desired_access &= ~GENERIC_READ;
++ rdwr_for_fscache = 2;
++ goto retry_open;
++ }
+ goto out;
+ }
++ if (rdwr_for_fscache == 2)
++ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
+
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ /*
+@@ -612,11 +627,18 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
+ goto mknod_out;
+ }
+
++ trace_smb3_mknod_enter(xid, tcon->ses->Suid, tcon->tid, full_path);
++
+ rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
+ full_path, mode,
+ device_number);
+
+ mknod_out:
++ if (rc)
++ trace_smb3_mknod_err(xid, tcon->ses->Suid, tcon->tid, rc);
++ else
++ trace_smb3_mknod_done(xid, tcon->ses->Suid, tcon->tid);
++
+ free_dentry_path(page);
+ free_xid(xid);
+ cifs_put_tlink(tlink);
+@@ -680,9 +702,10 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+ full_path, d_inode(direntry));
+
+ again:
+- if (pTcon->posix_extensions)
+- rc = smb311_posix_get_inode_info(&newInode, full_path, parent_dir_inode->i_sb, xid);
+- else if (pTcon->unix_ext) {
++ if (pTcon->posix_extensions) {
++ rc = smb311_posix_get_inode_info(&newInode, full_path, NULL,
++ parent_dir_inode->i_sb, xid);
++ } else if (pTcon->unix_ext) {
+ rc = cifs_get_inode_info_unix(&newInode, full_path,
+ parent_dir_inode->i_sb, xid);
+ } else {
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 2108b3b40ce92e..cb75b95efb7013 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -87,7 +87,7 @@ void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len
+ continue;
+ if (!folio_test_writeback(folio)) {
+ WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
+- len, start, folio_index(folio), end);
++ len, start, folio->index, end);
+ continue;
+ }
+
+@@ -120,7 +120,7 @@ void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len
+ continue;
+ if (!folio_test_writeback(folio)) {
+ WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
+- len, start, folio_index(folio), end);
++ len, start, folio->index, end);
+ continue;
+ }
+
+@@ -151,7 +151,7 @@ void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int le
+ xas_for_each(&xas, folio, end) {
+ if (!folio_test_writeback(folio)) {
+ WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
+- len, start, folio_index(folio), end);
++ len, start, folio->index, end);
+ continue;
+ }
+
+@@ -175,6 +175,9 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+
+ /* only send once per connect */
+ spin_lock(&tcon->tc_lock);
++ if (tcon->need_reconnect)
++ tcon->status = TID_NEED_RECON;
++
+ if (tcon->status != TID_NEED_RECON) {
+ spin_unlock(&tcon->tc_lock);
+ return;
+@@ -203,12 +206,12 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+ */
+ }
+
+-static inline int cifs_convert_flags(unsigned int flags)
++static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
+ {
+ if ((flags & O_ACCMODE) == O_RDONLY)
+ return GENERIC_READ;
+ else if ((flags & O_ACCMODE) == O_WRONLY)
+- return GENERIC_WRITE;
++ return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
+ else if ((flags & O_ACCMODE) == O_RDWR) {
+ /* GENERIC_ALL is too much permission to request
+ can cause unnecessary access denied on create */
+@@ -326,7 +329,7 @@ int cifs_posix_open(const char *full_path, struct inode **pinode,
+ }
+ } else {
+ cifs_revalidate_mapping(*pinode);
+- rc = cifs_fattr_to_inode(*pinode, &fattr);
++ rc = cifs_fattr_to_inode(*pinode, &fattr, false);
+ }
+
+ posix_open_ret:
+@@ -345,11 +348,16 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
+ int create_options = CREATE_NOT_DIR;
+ struct TCP_Server_Info *server = tcon->ses->server;
+ struct cifs_open_parms oparms;
++ int rdwr_for_fscache = 0;
+
+ if (!server->ops->open)
+ return -ENOSYS;
+
+- desired_access = cifs_convert_flags(f_flags);
++ /* If we're caching, we need to be able to fill in around partial writes. */
++ if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
++ rdwr_for_fscache = 1;
++
++ desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
+
+ /*********************************************************************
+ * open flag mapping table:
+@@ -386,6 +394,7 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
+ if (f_flags & O_DIRECT)
+ create_options |= CREATE_NO_BUFFER;
+
++retry_open:
+ oparms = (struct cifs_open_parms) {
+ .tcon = tcon,
+ .cifs_sb = cifs_sb,
+@@ -397,8 +406,16 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
+ };
+
+ rc = server->ops->open(xid, &oparms, oplock, buf);
+- if (rc)
++ if (rc) {
++ if (rc == -EACCES && rdwr_for_fscache == 1) {
++ desired_access = cifs_convert_flags(f_flags, 0);
++ rdwr_for_fscache = 2;
++ goto retry_open;
++ }
+ return rc;
++ }
++ if (rdwr_for_fscache == 2)
++ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
+
+ /* TODO: Add support for calling posix query info but with passing in fid */
+ if (tcon->unix_ext)
+@@ -442,6 +459,7 @@ cifs_down_write(struct rw_semaphore *sem)
+ }
+
+ static void cifsFileInfo_put_work(struct work_struct *work);
++void serverclose_work(struct work_struct *work);
+
+ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ struct tcon_link *tlink, __u32 oplock,
+@@ -488,6 +506,7 @@ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ cfile->tlink = cifs_get_tlink(tlink);
+ INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+ INIT_WORK(&cfile->put, cifsFileInfo_put_work);
++ INIT_WORK(&cfile->serverclose, serverclose_work);
+ INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
+ mutex_init(&cfile->fh_mutex);
+ spin_lock_init(&cfile->file_info_lock);
+@@ -579,6 +598,40 @@ static void cifsFileInfo_put_work(struct work_struct *work)
+ cifsFileInfo_put_final(cifs_file);
+ }
+
++void serverclose_work(struct work_struct *work)
++{
++ struct cifsFileInfo *cifs_file = container_of(work,
++ struct cifsFileInfo, serverclose);
++
++ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
++
++ struct TCP_Server_Info *server = tcon->ses->server;
++ int rc = 0;
++ int retries = 0;
++ int MAX_RETRIES = 4;
++
++ do {
++ if (server->ops->close_getattr)
++ rc = server->ops->close_getattr(0, tcon, cifs_file);
++ else if (server->ops->close)
++ rc = server->ops->close(0, tcon, &cifs_file->fid);
++
++ if (rc == -EBUSY || rc == -EAGAIN) {
++ retries++;
++ msleep(250);
++ }
++ } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
++ );
++
++ if (retries == MAX_RETRIES)
++ pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
++
++ if (cifs_file->offload)
++ queue_work(fileinfo_put_wq, &cifs_file->put);
++ else
++ cifsFileInfo_put_final(cifs_file);
++}
++
+ /**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+@@ -619,10 +672,13 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+ struct cifs_fid fid = {};
+ struct cifs_pending_open open;
+ bool oplock_break_cancelled;
++ bool serverclose_offloaded = false;
+
+ spin_lock(&tcon->open_file_lock);
+ spin_lock(&cifsi->open_file_lock);
+ spin_lock(&cifs_file->file_info_lock);
++
++ cifs_file->offload = offload;
+ if (--cifs_file->count > 0) {
+ spin_unlock(&cifs_file->file_info_lock);
+ spin_unlock(&cifsi->open_file_lock);
+@@ -664,13 +720,20 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+ if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
+ struct TCP_Server_Info *server = tcon->ses->server;
+ unsigned int xid;
++ int rc = 0;
+
+ xid = get_xid();
+ if (server->ops->close_getattr)
+- server->ops->close_getattr(xid, tcon, cifs_file);
++ rc = server->ops->close_getattr(xid, tcon, cifs_file);
+ else if (server->ops->close)
+- server->ops->close(xid, tcon, &cifs_file->fid);
++ rc = server->ops->close(xid, tcon, &cifs_file->fid);
+ _free_xid(xid);
++
++ if (rc == -EBUSY || rc == -EAGAIN) {
++ // Server close failed, hence offloading it as an async op
++ queue_work(serverclose_wq, &cifs_file->serverclose);
++ serverclose_offloaded = true;
++ }
+ }
+
+ if (oplock_break_cancelled)
+@@ -678,10 +741,15 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+
+ cifs_del_pending_open(&open);
+
+- if (offload)
+- queue_work(fileinfo_put_wq, &cifs_file->put);
+- else
+- cifsFileInfo_put_final(cifs_file);
++ // if serverclose has been offloaded to wq (on failure), it will
++ // handle offloading put as well. If serverclose not offloaded,
++ // we need to handle offloading put here.
++ if (!serverclose_offloaded) {
++ if (offload)
++ queue_work(fileinfo_put_wq, &cifs_file->put);
++ else
++ cifsFileInfo_put_final(cifs_file);
++ }
+ }
+
+ int cifs_open(struct inode *inode, struct file *file)
+@@ -831,11 +899,11 @@ int cifs_open(struct inode *inode, struct file *file)
+ use_cache:
+ fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
+ file->f_mode & FMODE_WRITE);
+- if (file->f_flags & O_DIRECT &&
+- (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
+- file->f_flags & O_APPEND))
+- cifs_invalidate_cache(file_inode(file),
+- FSCACHE_INVAL_DIO_WRITE);
++ if (!(file->f_flags & O_DIRECT))
++ goto out;
++ if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
++ goto out;
++ cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
+
+ out:
+ free_dentry_path(page);
+@@ -900,6 +968,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ int disposition = FILE_OPEN;
+ int create_options = CREATE_NOT_DIR;
+ struct cifs_open_parms oparms;
++ int rdwr_for_fscache = 0;
+
+ xid = get_xid();
+ mutex_lock(&cfile->fh_mutex);
+@@ -963,7 +1032,11 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ }
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+
+- desired_access = cifs_convert_flags(cfile->f_flags);
++ /* If we're caching, we need to be able to fill in around partial writes. */
++ if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
++ rdwr_for_fscache = 1;
++
++ desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
+
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (cfile->f_flags & O_SYNC)
+@@ -975,6 +1048,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ if (server->ops->get_lease_key)
+ server->ops->get_lease_key(inode, &cfile->fid);
+
++retry_open:
+ oparms = (struct cifs_open_parms) {
+ .tcon = tcon,
+ .cifs_sb = cifs_sb,
+@@ -1000,6 +1074,11 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ /* indicate that we need to relock the file */
+ oparms.reconnect = true;
+ }
++ if (rc == -EACCES && rdwr_for_fscache == 1) {
++ desired_access = cifs_convert_flags(cfile->f_flags, 0);
++ rdwr_for_fscache = 2;
++ goto retry_open;
++ }
+
+ if (rc) {
+ mutex_unlock(&cfile->fh_mutex);
+@@ -1008,6 +1087,9 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ goto reopen_error_exit;
+ }
+
++ if (rdwr_for_fscache == 2)
++ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
++
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ reopen_success:
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+@@ -1020,14 +1102,16 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ if (!is_interrupt_error(rc))
+ mapping_set_error(inode->i_mapping, rc);
+
+- if (tcon->posix_extensions)
+- rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
+- else if (tcon->unix_ext)
++ if (tcon->posix_extensions) {
++ rc = smb311_posix_get_inode_info(&inode, full_path,
++ NULL, inode->i_sb, xid);
++ } else if (tcon->unix_ext) {
+ rc = cifs_get_inode_info_unix(&inode, full_path,
+ inode->i_sb, xid);
+- else
++ } else {
+ rc = cifs_get_inode_info(&inode, full_path, NULL,
+ inode->i_sb, xid, NULL);
++ }
+ }
+ /*
+ * Else we are writing out data to server already and could deadlock if
+@@ -1067,6 +1151,19 @@ void smb2_deferred_work_close(struct work_struct *work)
+ _cifsFileInfo_put(cfile, true, false);
+ }
+
++static bool
++smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
++{
++ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ struct cifsInodeInfo *cinode = CIFS_I(inode);
++
++ return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
++ (cinode->oplock == CIFS_CACHE_RHW_FLG ||
++ cinode->oplock == CIFS_CACHE_RH_FLG) &&
++ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
++
++}
++
+ int cifs_close(struct inode *inode, struct file *file)
+ {
+ struct cifsFileInfo *cfile;
+@@ -1080,12 +1177,11 @@ int cifs_close(struct inode *inode, struct file *file)
+ cfile = file->private_data;
+ file->private_data = NULL;
+ dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
+- if ((cifs_sb->ctx->closetimeo && cinode->oplock == CIFS_CACHE_RHW_FLG)
+- && cinode->lease_granted &&
+- !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
+- dclose) {
++ if ((cfile->status_file_deleted == false) &&
++ (smb2_can_defer_close(inode, dclose))) {
+ if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode,
++ inode_set_ctime_current(inode));
+ }
+ spin_lock(&cinode->deferred_lock);
+ cifs_add_deferred_close(cfile, dclose);
+@@ -2596,7 +2692,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
+ write_data, to - from, &offset);
+ cifsFileInfo_put(open_file);
+ /* Does mm or vfs already set times? */
+- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
++ simple_inode_init_ts(inode);
+ if ((bytes_written > 0) && (offset))
+ rc = 0;
+ else if (bytes_written < 0)
+@@ -2618,20 +2714,20 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
+ * dirty pages if possible, but don't sleep while doing so.
+ */
+ static void cifs_extend_writeback(struct address_space *mapping,
++ struct xa_state *xas,
+ long *_count,
+ loff_t start,
+ int max_pages,
+- size_t max_len,
+- unsigned int *_len)
++ loff_t max_len,
++ size_t *_len)
+ {
+ struct folio_batch batch;
+ struct folio *folio;
+- unsigned int psize, nr_pages;
+- size_t len = *_len;
+- pgoff_t index = (start + len) / PAGE_SIZE;
++ unsigned int nr_pages;
++ pgoff_t index = (start + *_len) / PAGE_SIZE;
++ size_t len;
+ bool stop = true;
+ unsigned int i;
+- XA_STATE(xas, &mapping->i_pages, index);
+
+ folio_batch_init(&batch);
+
+@@ -2642,54 +2738,64 @@ static void cifs_extend_writeback(struct address_space *mapping,
+ */
+ rcu_read_lock();
+
+- xas_for_each(&xas, folio, ULONG_MAX) {
++ xas_for_each(xas, folio, ULONG_MAX) {
+ stop = true;
+- if (xas_retry(&xas, folio))
++ if (xas_retry(xas, folio))
+ continue;
+ if (xa_is_value(folio))
+ break;
+- if (folio_index(folio) != index)
++ if (folio->index != index) {
++ xas_reset(xas);
+ break;
+- if (!folio_try_get_rcu(folio)) {
+- xas_reset(&xas);
++ }
++
++ if (!folio_try_get(folio)) {
++ xas_reset(xas);
+ continue;
+ }
+ nr_pages = folio_nr_pages(folio);
+- if (nr_pages > max_pages)
++ if (nr_pages > max_pages) {
++ xas_reset(xas);
+ break;
++ }
+
+ /* Has the page moved or been split? */
+- if (unlikely(folio != xas_reload(&xas))) {
++ if (unlikely(folio != xas_reload(xas))) {
+ folio_put(folio);
++ xas_reset(xas);
+ break;
+ }
+
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
++ xas_reset(xas);
+ break;
+ }
+- if (!folio_test_dirty(folio) || folio_test_writeback(folio)) {
++ if (!folio_test_dirty(folio) ||
++ folio_test_writeback(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
++ xas_reset(xas);
+ break;
+ }
+
+ max_pages -= nr_pages;
+- psize = folio_size(folio);
+- len += psize;
++ len = folio_size(folio);
+ stop = false;
+- if (max_pages <= 0 || len >= max_len || *_count <= 0)
+- stop = true;
+
+ index += nr_pages;
++ *_count -= nr_pages;
++ *_len += len;
++ if (max_pages <= 0 || *_len >= max_len || *_count <= 0)
++ stop = true;
++
+ if (!folio_batch_add(&batch, folio))
+ break;
+ if (stop)
+ break;
+ }
+
+- if (!stop)
+- xas_pause(&xas);
++ xas_pause(xas);
+ rcu_read_unlock();
+
+ /* Now, if we obtained any pages, we can shift them to being
+@@ -2705,18 +2811,13 @@ static void cifs_extend_writeback(struct address_space *mapping,
+ */
+ if (!folio_clear_dirty_for_io(folio))
+ WARN_ON(1);
+- if (folio_start_writeback(folio))
+- WARN_ON(1);
+-
+- *_count -= folio_nr_pages(folio);
++ folio_start_writeback(folio);
+ folio_unlock(folio);
+ }
+
+ folio_batch_release(&batch);
+ cond_resched();
+ } while (!stop);
+-
+- *_len = len;
+ }
+
+ /*
+@@ -2724,8 +2825,10 @@ static void cifs_extend_writeback(struct address_space *mapping,
+ */
+ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
+ struct writeback_control *wbc,
++ struct xa_state *xas,
+ struct folio *folio,
+- loff_t start, loff_t end)
++ unsigned long long start,
++ unsigned long long end)
+ {
+ struct inode *inode = mapping->host;
+ struct TCP_Server_Info *server;
+@@ -2734,18 +2837,18 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
+ struct cifs_credits credits_on_stack;
+ struct cifs_credits *credits = &credits_on_stack;
+ struct cifsFileInfo *cfile = NULL;
+- unsigned int xid, wsize, len;
+- loff_t i_size = i_size_read(inode);
+- size_t max_len;
++ unsigned long long i_size = i_size_read(inode), max_len;
++ unsigned int xid, wsize;
++ size_t len = folio_size(folio);
+ long count = wbc->nr_to_write;
+ int rc;
+
+ /* The folio should be locked, dirty and not undergoing writeback. */
+- if (folio_start_writeback(folio))
+- WARN_ON(1);
++ if (!folio_clear_dirty_for_io(folio))
++ WARN_ON_ONCE(1);
++ folio_start_writeback(folio);
+
+ count -= folio_nr_pages(folio);
+- len = folio_size(folio);
+
+ xid = get_xid();
+ server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
+@@ -2775,9 +2878,10 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
+ wdata->server = server;
+ cfile = NULL;
+
+- /* Find all consecutive lockable dirty pages, stopping when we find a
+- * page that is not immediately lockable, is not dirty or is missing,
+- * or we reach the end of the range.
++ /* Find all consecutive lockable dirty pages that have contiguous
++ * written regions, stopping when we find a page that is not
++ * immediately lockable, is not dirty or is missing, or we reach the
++ * end of the range.
+ */
+ if (start < i_size) {
+ /* Trim the write to the EOF; the extra data is ignored. Also
+@@ -2797,19 +2901,18 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
+ max_pages -= folio_nr_pages(folio);
+
+ if (max_pages > 0)
+- cifs_extend_writeback(mapping, &count, start,
++ cifs_extend_writeback(mapping, xas, &count, start,
+ max_pages, max_len, &len);
+ }
+- len = min_t(loff_t, len, max_len);
+ }
+-
+- wdata->bytes = len;
++ len = min_t(unsigned long long, len, i_size - start);
+
+ /* We now have a contiguous set of dirty pages, each with writeback
+ * set; the first page is still locked at this point, but all the rest
+ * have been unlocked.
+ */
+ folio_unlock(folio);
++ wdata->bytes = len;
+
+ if (start < i_size) {
+ iov_iter_xarray(&wdata->iter, ITER_SOURCE, &mapping->i_pages,
+@@ -2860,102 +2963,118 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
+ /*
+ * write a region of pages back to the server
+ */
+-static int cifs_writepages_region(struct address_space *mapping,
+- struct writeback_control *wbc,
+- loff_t start, loff_t end, loff_t *_next)
++static ssize_t cifs_writepages_begin(struct address_space *mapping,
++ struct writeback_control *wbc,
++ struct xa_state *xas,
++ unsigned long long *_start,
++ unsigned long long end)
+ {
+- struct folio_batch fbatch;
++ struct folio *folio;
++ unsigned long long start = *_start;
++ ssize_t ret;
+ int skips = 0;
+
+- folio_batch_init(&fbatch);
+- do {
+- int nr;
+- pgoff_t index = start / PAGE_SIZE;
++search_again:
++ /* Find the first dirty page. */
++ rcu_read_lock();
+
+- nr = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
+- PAGECACHE_TAG_DIRTY, &fbatch);
+- if (!nr)
++ for (;;) {
++ folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
++ if (xas_retry(xas, folio) || xa_is_value(folio))
++ continue;
++ if (!folio)
+ break;
+
+- for (int i = 0; i < nr; i++) {
+- ssize_t ret;
+- struct folio *folio = fbatch.folios[i];
++ if (!folio_try_get(folio)) {
++ xas_reset(xas);
++ continue;
++ }
+
+-redo_folio:
+- start = folio_pos(folio); /* May regress with THPs */
++ if (unlikely(folio != xas_reload(xas))) {
++ folio_put(folio);
++ xas_reset(xas);
++ continue;
++ }
+
+- /* At this point we hold neither the i_pages lock nor the
+- * page lock: the page may be truncated or invalidated
+- * (changing page->mapping to NULL), or even swizzled
+- * back from swapper_space to tmpfs file mapping
+- */
+- if (wbc->sync_mode != WB_SYNC_NONE) {
+- ret = folio_lock_killable(folio);
+- if (ret < 0)
+- goto write_error;
+- } else {
+- if (!folio_trylock(folio))
+- goto skip_write;
+- }
++ xas_pause(xas);
++ break;
++ }
++ rcu_read_unlock();
++ if (!folio)
++ return 0;
+
+- if (folio_mapping(folio) != mapping ||
+- !folio_test_dirty(folio)) {
+- start += folio_size(folio);
+- folio_unlock(folio);
+- continue;
+- }
++ start = folio_pos(folio); /* May regress with THPs */
+
+- if (folio_test_writeback(folio) ||
+- folio_test_fscache(folio)) {
+- folio_unlock(folio);
+- if (wbc->sync_mode == WB_SYNC_NONE)
+- goto skip_write;
++ /* At this point we hold neither the i_pages lock nor the page lock:
++ * the page may be truncated or invalidated (changing page->mapping to
++ * NULL), or even swizzled back from swapper_space to tmpfs file
++ * mapping
++ */
++lock_again:
++ if (wbc->sync_mode != WB_SYNC_NONE) {
++ ret = folio_lock_killable(folio);
++ if (ret < 0)
++ return ret;
++ } else {
++ if (!folio_trylock(folio))
++ goto search_again;
++ }
++
++ if (folio->mapping != mapping ||
++ !folio_test_dirty(folio)) {
++ start += folio_size(folio);
++ folio_unlock(folio);
++ goto search_again;
++ }
+
+- folio_wait_writeback(folio);
++ if (folio_test_writeback(folio) ||
++ folio_test_fscache(folio)) {
++ folio_unlock(folio);
++ if (wbc->sync_mode != WB_SYNC_NONE) {
++ folio_wait_writeback(folio);
+ #ifdef CONFIG_CIFS_FSCACHE
+- folio_wait_fscache(folio);
++ folio_wait_fscache(folio);
+ #endif
+- goto redo_folio;
+- }
+-
+- if (!folio_clear_dirty_for_io(folio))
+- /* We hold the page lock - it should've been dirty. */
+- WARN_ON(1);
+-
+- ret = cifs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
+- if (ret < 0)
+- goto write_error;
+-
+- start += ret;
+- continue;
+-
+-write_error:
+- folio_batch_release(&fbatch);
+- *_next = start;
+- return ret;
++ goto lock_again;
++ }
+
+-skip_write:
+- /*
+- * Too many skipped writes, or need to reschedule?
+- * Treat it as a write error without an error code.
+- */
++ start += folio_size(folio);
++ if (wbc->sync_mode == WB_SYNC_NONE) {
+ if (skips >= 5 || need_resched()) {
+ ret = 0;
+- goto write_error;
++ goto out;
+ }
+-
+- /* Otherwise, just skip that folio and go on to the next */
+ skips++;
+- start += folio_size(folio);
+- continue;
+ }
++ goto search_again;
++ }
+
+- folio_batch_release(&fbatch);
+- cond_resched();
+- } while (wbc->nr_to_write > 0);
++ ret = cifs_write_back_from_locked_folio(mapping, wbc, xas, folio, start, end);
++out:
++ if (ret > 0)
++ *_start = start + ret;
++ return ret;
++}
+
+- *_next = start;
+- return 0;
++/*
++ * Write a region of pages back to the server
++ */
++static int cifs_writepages_region(struct address_space *mapping,
++ struct writeback_control *wbc,
++ unsigned long long *_start,
++ unsigned long long end)
++{
++ ssize_t ret;
++
++ XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
++
++ do {
++ ret = cifs_writepages_begin(mapping, wbc, &xas, _start, end);
++ if (ret > 0 && wbc->nr_to_write > 0)
++ cond_resched();
++ } while (ret > 0 && wbc->nr_to_write > 0);
++
++ return ret > 0 ? 0 : ret;
+ }
+
+ /*
+@@ -2964,7 +3083,7 @@ static int cifs_writepages_region(struct address_space *mapping,
+ static int cifs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+ {
+- loff_t start, next;
++ loff_t start, end;
+ int ret;
+
+ /* We have to be careful as we can end up racing with setattr()
+@@ -2972,28 +3091,34 @@ static int cifs_writepages(struct address_space *mapping,
+ * to prevent it.
+ */
+
+- if (wbc->range_cyclic) {
++ if (wbc->range_cyclic && mapping->writeback_index) {
+ start = mapping->writeback_index * PAGE_SIZE;
+- ret = cifs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
+- if (ret == 0) {
+- mapping->writeback_index = next / PAGE_SIZE;
+- if (start > 0 && wbc->nr_to_write > 0) {
+- ret = cifs_writepages_region(mapping, wbc, 0,
+- start, &next);
+- if (ret == 0)
+- mapping->writeback_index =
+- next / PAGE_SIZE;
+- }
++ ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX);
++ if (ret < 0)
++ goto out;
++
++ if (wbc->nr_to_write <= 0) {
++ mapping->writeback_index = start / PAGE_SIZE;
++ goto out;
+ }
++
++ start = 0;
++ end = mapping->writeback_index * PAGE_SIZE;
++ mapping->writeback_index = 0;
++ ret = cifs_writepages_region(mapping, wbc, &start, end);
++ if (ret == 0)
++ mapping->writeback_index = start / PAGE_SIZE;
+ } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
+- ret = cifs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
++ start = 0;
++ ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX);
+ if (wbc->nr_to_write > 0 && ret == 0)
+- mapping->writeback_index = next / PAGE_SIZE;
++ mapping->writeback_index = start / PAGE_SIZE;
+ } else {
+- ret = cifs_writepages_region(mapping, wbc,
+- wbc->range_start, wbc->range_end, &next);
++ start = wbc->range_start;
++ ret = cifs_writepages_region(mapping, wbc, &start, wbc->range_end);
+ }
+
++out:
+ return ret;
+ }
+
+@@ -3090,8 +3215,15 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
+ if (rc > 0) {
+ spin_lock(&inode->i_lock);
+ if (pos > inode->i_size) {
++ loff_t additional_blocks = (512 - 1 + copied) >> 9;
++
+ i_size_write(inode, pos);
+- inode->i_blocks = (512 - 1 + pos) >> 9;
++ /*
++ * Estimate new allocation size based on the amount written.
++ * This will be updated from server on close (and on queryinfo)
++ */
++ inode->i_blocks = min_t(blkcnt_t, (512 - 1 + pos) >> 9,
++ inode->i_blocks + additional_blocks);
+ }
+ spin_unlock(&inode->i_lock);
+ }
+@@ -3299,6 +3431,7 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
+ if (wdata->cfile->invalidHandle)
+ rc = -EAGAIN;
+ else {
++ wdata->replay = true;
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr) {
+ wdata->mr->need_invalidate = true;
+@@ -4647,11 +4780,13 @@ static void cifs_readahead(struct readahead_control *ractl)
+ static int cifs_readpage_worker(struct file *file, struct page *page,
+ loff_t *poffset)
+ {
++ struct inode *inode = file_inode(file);
++ struct timespec64 atime, mtime;
+ char *read_data;
+ int rc;
+
+ /* Is the page cached? */
+- rc = cifs_readpage_from_fscache(file_inode(file), page);
++ rc = cifs_readpage_from_fscache(inode, page);
+ if (rc == 0)
+ goto read_complete;
+
+@@ -4666,11 +4801,10 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
+ cifs_dbg(FYI, "Bytes read %d\n", rc);
+
+ /* we do not want atime to be less than mtime, it broke some apps */
+- file_inode(file)->i_atime = current_time(file_inode(file));
+- if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
+- file_inode(file)->i_atime = file_inode(file)->i_mtime;
+- else
+- file_inode(file)->i_atime = current_time(file_inode(file));
++ atime = inode_set_atime_to_ts(inode, current_time(inode));
++ mtime = inode_get_mtime(inode);
++ if (timespec64_compare(&atime, &mtime) < 0)
++ inode_set_atime_to_ts(inode, inode_get_mtime(inode));
+
+ if (PAGE_SIZE > rc)
+ memset(read_data + rc, 0, PAGE_SIZE - rc);
+@@ -4732,12 +4866,14 @@ static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
+ refreshing the inode only on increases in the file size
+ but this is tricky to do without racing with writebehind
+ page caching in the current Linux kernel design */
+-bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
++bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
++ bool from_readdir)
+ {
+ if (!cifsInode)
+ return true;
+
+- if (is_inode_writable(cifsInode)) {
++ if (is_inode_writable(cifsInode) ||
++ ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
+ /* This inode is open for write at least once */
+ struct cifs_sb_info *cifs_sb;
+
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index a3493da12ad1e6..3bbac925d0766b 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -37,7 +37,7 @@
+ #include "rfc1002pdu.h"
+ #include "fs_context.h"
+
+-static DEFINE_MUTEX(cifs_mount_mutex);
++DEFINE_MUTEX(cifs_mount_mutex);
+
+ static const match_table_t cifs_smb_version_tokens = {
+ { Smb_1, SMB1_VERSION_STRING },
+@@ -139,6 +139,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
+ fsparam_u32("dir_mode", Opt_dirmode),
+ fsparam_u32("port", Opt_port),
+ fsparam_u32("min_enc_offload", Opt_min_enc_offload),
++ fsparam_u32("retrans", Opt_retrans),
+ fsparam_u32("esize", Opt_min_enc_offload),
+ fsparam_u32("bsize", Opt_blocksize),
+ fsparam_u32("rasize", Opt_rasize),
+@@ -161,6 +162,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
+ fsparam_string("username", Opt_user),
+ fsparam_string("pass", Opt_pass),
+ fsparam_string("password", Opt_pass),
++ fsparam_string("password2", Opt_pass2),
+ fsparam_string("ip", Opt_ip),
+ fsparam_string("addr", Opt_ip),
+ fsparam_string("domain", Opt_domain),
+@@ -173,6 +175,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
+ fsparam_string("vers", Opt_vers),
+ fsparam_string("sec", Opt_sec),
+ fsparam_string("cache", Opt_cache),
++ fsparam_string("reparse", Opt_reparse),
+
+ /* Arguments that should be ignored */
+ fsparam_flag("guest", Opt_ignore),
+@@ -210,7 +213,7 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
+
+ switch (match_token(value, cifs_secflavor_tokens, args)) {
+ case Opt_sec_krb5p:
+- cifs_errorf(fc, "sec=krb5p is not supported!\n");
++ cifs_errorf(fc, "sec=krb5p is not supported. Use sec=krb5,seal instead\n");
+ return 1;
+ case Opt_sec_krb5i:
+ ctx->sign = true;
+@@ -295,6 +298,35 @@ cifs_parse_cache_flavor(struct fs_context *fc, char *value, struct smb3_fs_conte
+ return 0;
+ }
+
++static const match_table_t reparse_flavor_tokens = {
++ { Opt_reparse_default, "default" },
++ { Opt_reparse_nfs, "nfs" },
++ { Opt_reparse_wsl, "wsl" },
++ { Opt_reparse_err, NULL },
++};
++
++static int parse_reparse_flavor(struct fs_context *fc, char *value,
++ struct smb3_fs_context *ctx)
++{
++ substring_t args[MAX_OPT_ARGS];
++
++ switch (match_token(value, reparse_flavor_tokens, args)) {
++ case Opt_reparse_default:
++ ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT;
++ break;
++ case Opt_reparse_nfs:
++ ctx->reparse_type = CIFS_REPARSE_TYPE_NFS;
++ break;
++ case Opt_reparse_wsl:
++ ctx->reparse_type = CIFS_REPARSE_TYPE_WSL;
++ break;
++ default:
++ cifs_errorf(fc, "bad reparse= option: %s\n", value);
++ return 1;
++ }
++ return 0;
++}
++
+ #define DUP_CTX_STR(field) \
+ do { \
+ if (ctx->field) { \
+@@ -314,6 +346,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
+ new_ctx->nodename = NULL;
+ new_ctx->username = NULL;
+ new_ctx->password = NULL;
++ new_ctx->password2 = NULL;
+ new_ctx->server_hostname = NULL;
+ new_ctx->domainname = NULL;
+ new_ctx->UNC = NULL;
+@@ -326,6 +359,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
+ DUP_CTX_STR(prepath);
+ DUP_CTX_STR(username);
+ DUP_CTX_STR(password);
++ DUP_CTX_STR(password2);
+ DUP_CTX_STR(server_hostname);
+ DUP_CTX_STR(UNC);
+ DUP_CTX_STR(source);
+@@ -714,6 +748,16 @@ static int smb3_fs_context_validate(struct fs_context *fc)
+ /* set the port that we got earlier */
+ cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port);
+
++ if (ctx->uid_specified && !ctx->forceuid_specified) {
++ ctx->override_uid = 1;
++ pr_notice("enabling forceuid mount option implicitly because uid= option is specified\n");
++ }
++
++ if (ctx->gid_specified && !ctx->forcegid_specified) {
++ ctx->override_gid = 1;
++ pr_notice("enabling forcegid mount option implicitly because gid= option is specified\n");
++ }
++
+ if (ctx->override_uid && !ctx->uid_specified) {
+ ctx->override_uid = 0;
+ pr_notice("ignoring forceuid mount option specified with no uid= option\n");
+@@ -752,9 +796,9 @@ static int smb3_get_tree(struct fs_context *fc)
+
+ if (err)
+ return err;
+- mutex_lock(&cifs_mount_mutex);
++ cifs_mount_lock();
+ ret = smb3_get_tree_common(fc);
+- mutex_unlock(&cifs_mount_mutex);
++ cifs_mount_unlock();
+ return ret;
+ }
+
+@@ -771,7 +815,7 @@ static void smb3_fs_context_free(struct fs_context *fc)
+ */
+ static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
+ struct smb3_fs_context *new_ctx,
+- struct smb3_fs_context *old_ctx)
++ struct smb3_fs_context *old_ctx, bool need_recon)
+ {
+ if (new_ctx->posix_paths != old_ctx->posix_paths) {
+ cifs_errorf(fc, "can not change posixpaths during remount\n");
+@@ -797,8 +841,15 @@ static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
+ }
+ if (new_ctx->password &&
+ (!old_ctx->password || strcmp(new_ctx->password, old_ctx->password))) {
+- cifs_errorf(fc, "can not change password during remount\n");
+- return -EINVAL;
++ if (need_recon == false) {
++ cifs_errorf(fc,
++ "can not change password of active session during remount\n");
++ return -EINVAL;
++ } else if (old_ctx->sectype == Kerberos) {
++ cifs_errorf(fc,
++ "can not change password for Kerberos via remount\n");
++ return -EINVAL;
++ }
+ }
+ if (new_ctx->domainname &&
+ (!old_ctx->domainname || strcmp(new_ctx->domainname, old_ctx->domainname))) {
+@@ -842,9 +893,14 @@ static int smb3_reconfigure(struct fs_context *fc)
+ struct smb3_fs_context *ctx = smb3_fc2context(fc);
+ struct dentry *root = fc->root;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
++ struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses;
++ bool need_recon = false;
+ int rc;
+
+- rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx);
++ if (ses->expired_pwd)
++ need_recon = true;
++
++ rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx, need_recon);
+ if (rc)
+ return rc;
+
+@@ -857,7 +913,14 @@ static int smb3_reconfigure(struct fs_context *fc)
+ STEAL_STRING(cifs_sb, ctx, UNC);
+ STEAL_STRING(cifs_sb, ctx, source);
+ STEAL_STRING(cifs_sb, ctx, username);
+- STEAL_STRING_SENSITIVE(cifs_sb, ctx, password);
++ if (need_recon == false)
++ STEAL_STRING_SENSITIVE(cifs_sb, ctx, password);
++ else {
++ kfree_sensitive(ses->password);
++ ses->password = kstrdup(ctx->password, GFP_KERNEL);
++ kfree_sensitive(ses->password2);
++ ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
++ }
+ STEAL_STRING(cifs_sb, ctx, domainname);
+ STEAL_STRING(cifs_sb, ctx, nodename);
+ STEAL_STRING(cifs_sb, ctx, iocharset);
+@@ -915,7 +978,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+
+ switch (opt) {
+ case Opt_compress:
+- ctx->compression = UNKNOWN_TYPE;
++ ctx->compress = true;
+ cifs_dbg(VFS,
+ "SMB3 compression support is experimental\n");
+ break;
+@@ -966,12 +1029,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ ctx->override_uid = 0;
+ else
+ ctx->override_uid = 1;
++ ctx->forceuid_specified = true;
+ break;
+ case Opt_forcegid:
+ if (result.negated)
+ ctx->override_gid = 0;
+ else
+ ctx->override_gid = 1;
++ ctx->forcegid_specified = true;
+ break;
+ case Opt_perm:
+ if (result.negated)
+@@ -1064,6 +1129,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ case Opt_min_enc_offload:
+ ctx->min_offload = result.uint_32;
+ break;
++ case Opt_retrans:
++ ctx->retrans = result.uint_32;
++ break;
+ case Opt_blocksize:
+ /*
+ * inode blocksize realistically should never need to be
+@@ -1107,6 +1175,17 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ case Opt_wsize:
+ ctx->wsize = result.uint_32;
+ ctx->got_wsize = true;
++ if (ctx->wsize % PAGE_SIZE != 0) {
++ ctx->wsize = round_down(ctx->wsize, PAGE_SIZE);
++ if (ctx->wsize == 0) {
++ ctx->wsize = PAGE_SIZE;
++ cifs_dbg(VFS, "wsize too small, reset to minimum %ld\n", PAGE_SIZE);
++ } else {
++ cifs_dbg(VFS,
++ "wsize rounded down to %d to multiple of PAGE_SIZE %ld\n",
++ ctx->wsize, PAGE_SIZE);
++ }
++ }
+ break;
+ case Opt_acregmax:
+ ctx->acregmax = HZ * result.uint_32;
+@@ -1243,6 +1322,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ goto cifs_parse_mount_err;
+ }
+ break;
++ case Opt_pass2:
++ kfree_sensitive(ctx->password2);
++ ctx->password2 = NULL;
++ if (strlen(param->string) == 0)
++ break;
++
++ ctx->password2 = kstrdup(param->string, GFP_KERNEL);
++ if (ctx->password2 == NULL) {
++ cifs_errorf(fc, "OOM when copying password2 string\n");
++ goto cifs_parse_mount_err;
++ }
++ break;
+ case Opt_ip:
+ if (strlen(param->string) == 0) {
+ ctx->got_ip = false;
+@@ -1534,6 +1625,10 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ case Opt_rdma:
+ ctx->rdma = true;
+ break;
++ case Opt_reparse:
++ if (parse_reparse_flavor(fc, param->string, ctx))
++ goto cifs_parse_mount_err;
++ break;
+ }
+ /* case Opt_ignore: - is ignored as expected ... */
+
+@@ -1542,6 +1637,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ cifs_parse_mount_err:
+ kfree_sensitive(ctx->password);
+ ctx->password = NULL;
++ kfree_sensitive(ctx->password2);
++ ctx->password2 = NULL;
+ return -EINVAL;
+ }
+
+@@ -1619,6 +1716,9 @@ int smb3_init_fs_context(struct fs_context *fc)
+ ctx->backupuid_specified = false; /* no backup intent for a user */
+ ctx->backupgid_specified = false; /* no backup intent for a group */
+
++ ctx->retrans = 1;
++ ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT;
++
+ /*
+ * short int override_uid = -1;
+ * short int override_gid = -1;
+@@ -1644,6 +1744,8 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
+ ctx->username = NULL;
+ kfree_sensitive(ctx->password);
+ ctx->password = NULL;
++ kfree_sensitive(ctx->password2);
++ ctx->password2 = NULL;
+ kfree(ctx->server_hostname);
+ ctx->server_hostname = NULL;
+ kfree(ctx->UNC);
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index 9d8d34af021147..cf577ec0dd0ac4 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -41,6 +41,13 @@ enum {
+ Opt_cache_err
+ };
+
++enum cifs_reparse_parm {
++ Opt_reparse_default,
++ Opt_reparse_nfs,
++ Opt_reparse_wsl,
++ Opt_reparse_err
++};
++
+ enum cifs_sec_param {
+ Opt_sec_krb5,
+ Opt_sec_krb5i,
+@@ -118,6 +125,7 @@ enum cifs_param {
+ Opt_file_mode,
+ Opt_dirmode,
+ Opt_min_enc_offload,
++ Opt_retrans,
+ Opt_blocksize,
+ Opt_rasize,
+ Opt_rsize,
+@@ -137,6 +145,7 @@ enum cifs_param {
+ Opt_source,
+ Opt_user,
+ Opt_pass,
++ Opt_pass2,
+ Opt_ip,
+ Opt_domain,
+ Opt_srcaddr,
+@@ -147,6 +156,7 @@ enum cifs_param {
+ Opt_vers,
+ Opt_sec,
+ Opt_cache,
++ Opt_reparse,
+
+ /* Mount options to be ignored */
+ Opt_ignore,
+@@ -155,6 +165,8 @@ enum cifs_param {
+ };
+
+ struct smb3_fs_context {
++ bool forceuid_specified;
++ bool forcegid_specified;
+ bool uid_specified;
+ bool cruid_specified;
+ bool gid_specified;
+@@ -168,6 +180,7 @@ struct smb3_fs_context {
+
+ char *username;
+ char *password;
++ char *password2;
+ char *domainname;
+ char *source;
+ char *server_hostname;
+@@ -245,6 +258,7 @@ struct smb3_fs_context {
+ unsigned int rsize;
+ unsigned int wsize;
+ unsigned int min_offload;
++ unsigned int retrans;
+ bool sockopt_tcp_nodelay:1;
+ /* attribute cache timemout for files and directories in jiffies */
+ unsigned long acregmax;
+@@ -263,11 +277,13 @@ struct smb3_fs_context {
+ unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
+ unsigned int max_channels;
+ unsigned int max_cached_dirs;
+- __u16 compression; /* compression algorithm 0xFFFF default 0=disabled */
++ bool compress; /* enable SMB2 messages (READ/WRITE) de/compression */
+ bool rootfs:1; /* if it's a SMB root file system */
+ bool witness:1; /* use witness protocol */
+ char *leaf_fullpath;
+ struct cifs_ses *dfs_root_ses;
++ bool dfs_automount:1; /* set for dfs automount only */
++ enum cifs_reparse_type reparse_type;
+ };
+
+ extern const struct fs_parameter_spec smb3_fs_parameters[];
+@@ -292,4 +308,16 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+ #define MAX_CACHED_FIDS 16
+ extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+
++extern struct mutex cifs_mount_mutex;
++
++static inline void cifs_mount_lock(void)
++{
++ mutex_lock(&cifs_mount_mutex);
++}
++
++static inline void cifs_mount_unlock(void)
++{
++ mutex_unlock(&cifs_mount_mutex);
++}
++
+ #endif
+diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
+index e5cad149f5a2d7..98c5eebdc7b2f0 100644
+--- a/fs/smb/client/fscache.c
++++ b/fs/smb/client/fscache.c
+@@ -12,6 +12,16 @@
+ #include "cifs_fs_sb.h"
+ #include "cifsproto.h"
+
++/*
++ * Key for fscache inode. [!] Contents must match comparisons in cifs_find_inode().
++ */
++struct cifs_fscache_inode_key {
++
++ __le64 uniqueid; /* server inode number */
++ __le64 createtime; /* creation time on server */
++ u8 type; /* S_IFMT file type */
++} __packed;
++
+ static void cifs_fscache_fill_volume_coherency(
+ struct cifs_tcon *tcon,
+ struct cifs_fscache_volume_coherency_data *cd)
+@@ -33,12 +43,23 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
+ char *key;
+ int ret = -ENOMEM;
+
++ if (tcon->fscache_acquired)
++ return 0;
++
++ mutex_lock(&tcon->fscache_lock);
++ if (tcon->fscache_acquired) {
++ mutex_unlock(&tcon->fscache_lock);
++ return 0;
++ }
++ tcon->fscache_acquired = true;
++
+ tcon->fscache = NULL;
+ switch (sa->sa_family) {
+ case AF_INET:
+ case AF_INET6:
+ break;
+ default:
++ mutex_unlock(&tcon->fscache_lock);
+ cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
+ return -EINVAL;
+ }
+@@ -47,6 +68,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
+
+ sharename = extract_sharename(tcon->tree_name);
+ if (IS_ERR(sharename)) {
++ mutex_unlock(&tcon->fscache_lock);
+ cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
+ return PTR_ERR(sharename);
+ }
+@@ -72,6 +94,11 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
+ }
+ pr_err("Cache volume key already in use (%s)\n", key);
+ vcookie = NULL;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_see_fscache_collision);
++ } else {
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_see_fscache_okay);
+ }
+
+ tcon->fscache = vcookie;
+@@ -80,6 +107,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
+ kfree(key);
+ out:
+ kfree(sharename);
++ mutex_unlock(&tcon->fscache_lock);
+ return ret;
+ }
+
+@@ -92,20 +120,26 @@ void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
+ cifs_fscache_fill_volume_coherency(tcon, &cd);
+ fscache_relinquish_volume(tcon->fscache, &cd, false);
+ tcon->fscache = NULL;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_see_fscache_relinq);
+ }
+
+ void cifs_fscache_get_inode_cookie(struct inode *inode)
+ {
+ struct cifs_fscache_inode_coherency_data cd;
++ struct cifs_fscache_inode_key key;
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+
++ key.uniqueid = cpu_to_le64(cifsi->uniqueid);
++ key.createtime = cpu_to_le64(cifsi->createtime);
++ key.type = (inode->i_mode & S_IFMT) >> 12;
+ cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);
+
+ cifsi->netfs.cache =
+ fscache_acquire_cookie(tcon->fscache, 0,
+- &cifsi->uniqueid, sizeof(cifsi->uniqueid),
++ &key, sizeof(key),
+ &cd, sizeof(cd),
+ i_size_read(&cifsi->netfs.inode));
+ if (cifsi->netfs.cache)
+diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h
+index 84f3b09367d2c4..1f2ea9f5cc9a8a 100644
+--- a/fs/smb/client/fscache.h
++++ b/fs/smb/client/fscache.h
+@@ -49,12 +49,12 @@ static inline
+ void cifs_fscache_fill_coherency(struct inode *inode,
+ struct cifs_fscache_inode_coherency_data *cd)
+ {
+- struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct timespec64 ctime = inode_get_ctime(inode);
++ struct timespec64 mtime = inode_get_mtime(inode);
+
+ memset(cd, 0, sizeof(*cd));
+- cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
+- cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
++ cd->last_write_time_sec = cpu_to_le64(mtime.tv_sec);
++ cd->last_write_time_nsec = cpu_to_le32(mtime.tv_nsec);
+ cd->last_change_time_sec = cpu_to_le64(ctime.tv_sec);
+ cd->last_change_time_nsec = cpu_to_le32(ctime.tv_nsec);
+ }
+@@ -109,6 +109,11 @@ static inline void cifs_readahead_to_fscache(struct inode *inode,
+ __cifs_readahead_to_fscache(inode, pos, len);
+ }
+
++static inline bool cifs_fscache_enabled(struct inode *inode)
++{
++ return fscache_cookie_enabled(cifs_inode_cookie(inode));
++}
++
+ #else /* CONFIG_CIFS_FSCACHE */
+ static inline
+ void cifs_fscache_fill_coherency(struct inode *inode,
+@@ -124,6 +129,7 @@ static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
+ static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) {}
+ static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
+ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
++static inline bool cifs_fscache_enabled(struct inode *inode) { return false; }
+
+ static inline int cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index d7c302442c1ec8..e7970cbeb86111 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -26,6 +26,7 @@
+ #include "fs_context.h"
+ #include "cifs_ioctl.h"
+ #include "cached_dir.h"
++#include "reparse.h"
+
+ static void cifs_set_ops(struct inode *inode)
+ {
+@@ -82,6 +83,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
+ {
+ struct cifs_fscache_inode_coherency_data cd;
+ struct cifsInodeInfo *cifs_i = CIFS_I(inode);
++ struct timespec64 mtime;
+
+ cifs_dbg(FYI, "%s: revalidating inode %llu\n",
+ __func__, cifs_i->uniqueid);
+@@ -101,7 +103,8 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
+
+ /* revalidate if mtime or size have changed */
+ fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
+- if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
++ mtime = inode_get_mtime(inode);
++ if (timespec64_equal(&mtime, &fattr->cf_mtime) &&
+ cifs_i->server_eof == fattr->cf_eof) {
+ cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
+ __func__, cifs_i->uniqueid);
+@@ -145,7 +148,8 @@ cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+
+ /* populate an inode with info from a cifs_fattr struct */
+ int
+-cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
++cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
++ bool from_readdir)
+ {
+ struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+@@ -164,10 +168,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+ fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode);
+ /* we do not want atime to be less than mtime, it broke some apps */
+ if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
+- inode->i_atime = fattr->cf_mtime;
++ inode_set_atime_to_ts(inode, fattr->cf_mtime);
+ else
+- inode->i_atime = fattr->cf_atime;
+- inode->i_mtime = fattr->cf_mtime;
++ inode_set_atime_to_ts(inode, fattr->cf_atime);
++ inode_set_mtime_to_ts(inode, fattr->cf_mtime);
+ inode_set_ctime_to_ts(inode, fattr->cf_ctime);
+ inode->i_rdev = fattr->cf_rdev;
+ cifs_nlink_fattr_to_inode(inode, fattr);
+@@ -180,6 +184,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+ inode->i_mode = fattr->cf_mode;
+
+ cifs_i->cifsAttrs = fattr->cf_cifsattrs;
++ cifs_i->reparse_tag = fattr->cf_cifstag;
+
+ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+ cifs_i->time = 0;
+@@ -196,7 +201,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+ * Can't safely change the file size here if the client is writing to
+ * it due to potential races.
+ */
+- if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
++ if (is_size_safe_to_change(cifs_i, fattr->cf_eof, from_readdir)) {
+ i_size_write(inode, fattr->cf_eof);
+
+ /*
+@@ -207,7 +212,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+ inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
+ }
+
+- if (S_ISLNK(fattr->cf_mode)) {
++ if (S_ISLNK(fattr->cf_mode) && fattr->cf_symlink_target) {
+ kfree(cifs_i->symlink_target);
+ cifs_i->symlink_target = fattr->cf_symlink_target;
+ fattr->cf_symlink_target = NULL;
+@@ -365,7 +370,7 @@ static int update_inode_info(struct super_block *sb,
+ CIFS_I(*inode)->time = 0; /* force reval */
+ return -ESTALE;
+ }
+- return cifs_fattr_to_inode(*inode, fattr);
++ return cifs_fattr_to_inode(*inode, fattr, false);
+ }
+
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+@@ -396,11 +401,10 @@ cifs_get_file_info_unix(struct file *filp)
+ cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
+ } else if (rc == -EREMOTE) {
+ cifs_create_junction_fattr(&fattr, inode->i_sb);
+- rc = 0;
+ } else
+ goto cifs_gfiunix_out;
+
+- rc = cifs_fattr_to_inode(inode, &fattr);
++ rc = cifs_fattr_to_inode(inode, &fattr, false);
+
+ cifs_gfiunix_out:
+ free_xid(xid);
+@@ -457,8 +461,7 @@ static int cifs_get_unix_fattr(const unsigned char *full_path,
+ return -EOPNOTSUPP;
+ rc = server->ops->query_symlink(xid, tcon,
+ cifs_sb, full_path,
+- &fattr->cf_symlink_target,
+- NULL);
++ &fattr->cf_symlink_target);
+ cifs_dbg(FYI, "%s: query_symlink: %d\n", __func__, rc);
+ }
+ return rc;
+@@ -592,6 +595,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+ cifs_dbg(FYI, "Symlink\n");
+ fattr->cf_mode |= S_IFLNK;
+ fattr->cf_dtype = DT_LNK;
++ } else if (memcmp("LnxFIFO", pbuf, 8) == 0) {
++ cifs_dbg(FYI, "FIFO\n");
++ fattr->cf_mode |= S_IFIFO;
++ fattr->cf_dtype = DT_FIFO;
+ } else {
+ fattr->cf_mode |= S_IFREG; /* file? */
+ fattr->cf_dtype = DT_REG;
+@@ -659,8 +666,6 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
+ /* Fill a cifs_fattr struct with info from POSIX info struct */
+ static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr,
+ struct cifs_open_info_data *data,
+- struct cifs_sid *owner,
+- struct cifs_sid *group,
+ struct super_block *sb)
+ {
+ struct smb311_posix_qinfo *info = &data->posix_fi;
+@@ -686,73 +691,43 @@ static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr,
+ fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
+ }
+
++ /*
++ * The srv fs device id is overridden on network mount so setting
++ * @fattr->cf_rdev isn't needed here.
++ */
+ fattr->cf_eof = le64_to_cpu(info->EndOfFile);
+ fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+ fattr->cf_createtime = le64_to_cpu(info->CreationTime);
+-
+ fattr->cf_nlink = le32_to_cpu(info->HardLinks);
+ fattr->cf_mode = (umode_t) le32_to_cpu(info->Mode);
+- /* The srv fs device id is overridden on network mount so setting rdev isn't needed here */
+- /* fattr->cf_rdev = le32_to_cpu(info->DeviceId); */
+
+- if (data->symlink) {
+- fattr->cf_mode |= S_IFLNK;
+- fattr->cf_dtype = DT_LNK;
+- fattr->cf_symlink_target = data->symlink_target;
+- data->symlink_target = NULL;
+- } else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
++ if (cifs_open_data_reparse(data) &&
++ cifs_reparse_point_to_fattr(cifs_sb, fattr, data))
++ goto out_reparse;
++
++ fattr->cf_mode &= ~S_IFMT;
++ if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+ fattr->cf_mode |= S_IFDIR;
+ fattr->cf_dtype = DT_DIR;
+ } else { /* file */
+ fattr->cf_mode |= S_IFREG;
+ fattr->cf_dtype = DT_REG;
+ }
+- /* else if reparse point ... TODO: add support for FIFO and blk dev; special file types */
+
+- sid_to_id(cifs_sb, owner, fattr, SIDOWNER);
+- sid_to_id(cifs_sb, group, fattr, SIDGROUP);
++out_reparse:
++ if (S_ISLNK(fattr->cf_mode)) {
++ if (likely(data->symlink_target))
++ fattr->cf_eof = strnlen(data->symlink_target, PATH_MAX);
++ fattr->cf_symlink_target = data->symlink_target;
++ data->symlink_target = NULL;
++ }
++ sid_to_id(cifs_sb, &data->posix_owner, fattr, SIDOWNER);
++ sid_to_id(cifs_sb, &data->posix_group, fattr, SIDGROUP);
+
+ cifs_dbg(FYI, "POSIX query info: mode 0x%x uniqueid 0x%llx nlink %d\n",
+ fattr->cf_mode, fattr->cf_uniqueid, fattr->cf_nlink);
+ }
+
+-bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
+- struct cifs_fattr *fattr,
+- u32 tag)
+-{
+- switch (tag) {
+- case IO_REPARSE_TAG_LX_SYMLINK:
+- fattr->cf_mode |= S_IFLNK | cifs_sb->ctx->file_mode;
+- fattr->cf_dtype = DT_LNK;
+- break;
+- case IO_REPARSE_TAG_LX_FIFO:
+- fattr->cf_mode |= S_IFIFO | cifs_sb->ctx->file_mode;
+- fattr->cf_dtype = DT_FIFO;
+- break;
+- case IO_REPARSE_TAG_AF_UNIX:
+- fattr->cf_mode |= S_IFSOCK | cifs_sb->ctx->file_mode;
+- fattr->cf_dtype = DT_SOCK;
+- break;
+- case IO_REPARSE_TAG_LX_CHR:
+- fattr->cf_mode |= S_IFCHR | cifs_sb->ctx->file_mode;
+- fattr->cf_dtype = DT_CHR;
+- break;
+- case IO_REPARSE_TAG_LX_BLK:
+- fattr->cf_mode |= S_IFBLK | cifs_sb->ctx->file_mode;
+- fattr->cf_dtype = DT_BLK;
+- break;
+- case 0: /* SMB1 symlink */
+- case IO_REPARSE_TAG_SYMLINK:
+- case IO_REPARSE_TAG_NFS:
+- fattr->cf_mode = S_IFLNK;
+- fattr->cf_dtype = DT_LNK;
+- break;
+- default:
+- return false;
+- }
+- return true;
+-}
+-
+ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
+ struct cifs_open_info_data *data,
+ struct super_block *sb)
+@@ -783,9 +758,12 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
+ fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+ fattr->cf_createtime = le64_to_cpu(info->CreationTime);
+ fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
++ fattr->cf_uid = cifs_sb->ctx->linux_uid;
++ fattr->cf_gid = cifs_sb->ctx->linux_gid;
+
++ fattr->cf_mode = cifs_sb->ctx->file_mode;
+ if (cifs_open_data_reparse(data) &&
+- cifs_reparse_point_to_fattr(cifs_sb, fattr, data->reparse_tag))
++ cifs_reparse_point_to_fattr(cifs_sb, fattr, data))
+ goto out_reparse;
+
+ if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+@@ -801,10 +779,6 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
+ fattr->cf_mode = S_IFREG | cifs_sb->ctx->file_mode;
+ fattr->cf_dtype = DT_REG;
+
+- /* clear write bits if ATTR_READONLY is set */
+- if (fattr->cf_cifsattrs & ATTR_READONLY)
+- fattr->cf_mode &= ~(S_IWUGO);
+-
+ /*
+ * Don't accept zero nlink from non-unix servers unless
+ * delete is pending. Instead mark it as unknown.
+@@ -817,14 +791,17 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
+ }
+ }
+
++ /* clear write bits if ATTR_READONLY is set */
++ if (fattr->cf_cifsattrs & ATTR_READONLY)
++ fattr->cf_mode &= ~(S_IWUGO);
++
+ out_reparse:
+ if (S_ISLNK(fattr->cf_mode)) {
++ if (likely(data->symlink_target))
++ fattr->cf_eof = strnlen(data->symlink_target, PATH_MAX);
+ fattr->cf_symlink_target = data->symlink_target;
+ data->symlink_target = NULL;
+ }
+-
+- fattr->cf_uid = cifs_sb->ctx->linux_uid;
+- fattr->cf_gid = cifs_sb->ctx->linux_gid;
+ }
+
+ static int
+@@ -838,9 +815,14 @@ cifs_get_file_info(struct file *filp)
+ struct cifsFileInfo *cfile = filp->private_data;
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+ struct TCP_Server_Info *server = tcon->ses->server;
++ struct dentry *dentry = filp->f_path.dentry;
++ void *page = alloc_dentry_path();
++ const unsigned char *path;
+
+- if (!server->ops->query_file_info)
++ if (!server->ops->query_file_info) {
++ free_dentry_path(page);
+ return -ENOSYS;
++ }
+
+ xid = get_xid();
+ rc = server->ops->query_file_info(xid, tcon, cfile, &data);
+@@ -850,13 +832,19 @@ cifs_get_file_info(struct file *filp)
+ data.adjust_tz = false;
+ if (data.symlink_target) {
+ data.symlink = true;
+- data.reparse_tag = IO_REPARSE_TAG_SYMLINK;
++ data.reparse.tag = IO_REPARSE_TAG_SYMLINK;
++ }
++ path = build_path_from_dentry(dentry, page);
++ if (IS_ERR(path)) {
++ rc = PTR_ERR(path);
++ goto cgfi_exit;
+ }
+ cifs_open_info_to_fattr(&fattr, &data, inode->i_sb);
++ if (fattr.cf_flags & CIFS_FATTR_DELETE_PENDING)
++ cifs_mark_open_handles_for_deleted_file(inode, path);
+ break;
+ case -EREMOTE:
+ cifs_create_junction_fattr(&fattr, inode->i_sb);
+- rc = 0;
+ break;
+ case -EOPNOTSUPP:
+ case -EINVAL:
+@@ -879,9 +867,10 @@ cifs_get_file_info(struct file *filp)
+ fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
+ fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
+ /* if filetype is different, return error */
+- rc = cifs_fattr_to_inode(inode, &fattr);
++ rc = cifs_fattr_to_inode(inode, &fattr, false);
+ cgfi_exit:
+ cifs_free_open_info(&data);
++ free_dentry_path(page);
+ free_xid(xid);
+ return rc;
+ }
+@@ -1019,7 +1008,7 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct kvec rsp_iov, *iov = NULL;
+ int rsp_buftype = CIFS_NO_BUFFER;
+- u32 tag = data->reparse_tag;
++ u32 tag = data->reparse.tag;
+ int rc = 0;
+
+ if (!tag && server->ops->query_reparse_point) {
+@@ -1028,27 +1017,54 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
+ &rsp_iov, &rsp_buftype);
+ if (!rc)
+ iov = &rsp_iov;
++ } else if (data->reparse.io.buftype != CIFS_NO_BUFFER &&
++ data->reparse.io.iov.iov_base) {
++ iov = &data->reparse.io.iov;
+ }
+- switch ((data->reparse_tag = tag)) {
+- case 0: /* SMB1 symlink */
+- iov = NULL;
+- fallthrough;
+- case IO_REPARSE_TAG_NFS:
+- case IO_REPARSE_TAG_SYMLINK:
+- if (!data->symlink_target && server->ops->query_symlink) {
++
++ rc = -EOPNOTSUPP;
++ data->reparse.tag = tag;
++ if (!data->reparse.tag) {
++ if (server->ops->query_symlink) {
+ rc = server->ops->query_symlink(xid, tcon,
+ cifs_sb, full_path,
+- &data->symlink_target,
+- iov);
++ &data->symlink_target);
++ }
++ if (rc == -EOPNOTSUPP)
++ data->reparse.tag = IO_REPARSE_TAG_INTERNAL;
++ }
++
++ switch (data->reparse.tag) {
++ case 0: /* SMB1 symlink */
++ break;
++ case IO_REPARSE_TAG_INTERNAL:
++ rc = 0;
++ if (le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY) {
++ cifs_create_junction_fattr(fattr, sb);
++ goto out;
+ }
+ break;
+ case IO_REPARSE_TAG_MOUNT_POINT:
+ cifs_create_junction_fattr(fattr, sb);
++ rc = 0;
+ goto out;
++ default:
++ /* Check for cached reparse point data */
++ if (data->symlink_target || data->reparse.buf) {
++ rc = 0;
++ } else if (iov && server->ops->parse_reparse_point) {
++ rc = server->ops->parse_reparse_point(cifs_sb,
++ iov, data);
++ }
++ break;
+ }
+
+- cifs_open_info_to_fattr(fattr, data, sb);
++ if (tcon->posix_extensions)
++ smb311_posix_info_to_fattr(fattr, data, sb);
++ else
++ cifs_open_info_to_fattr(fattr, data, sb);
+ out:
++ fattr->cf_cifstag = data->reparse.tag;
+ free_rsp_buf(rsp_buftype, rsp_iov.iov_base);
+ return rc;
+ }
+@@ -1102,6 +1118,9 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
+ } else {
+ cifs_open_info_to_fattr(fattr, data, sb);
+ }
++ if (!rc && *inode &&
++ (fattr->cf_flags & CIFS_FATTR_DELETE_PENDING))
++ cifs_mark_open_handles_for_deleted_file(*inode, full_path);
+ break;
+ case -EREMOTE:
+ /* DFS link, no metadata available on this server */
+@@ -1193,11 +1212,14 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
+ __func__, rc);
+ goto out;
+ }
+- }
+-
+- /* fill in remaining high mode bits e.g. SUID, VTX */
+- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
++ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
++ /* fill in remaining high mode bits e.g. SUID, VTX */
+ cifs_sfu_mode(fattr, full_path, cifs_sb, xid);
++ else if (!(tcon->posix_extensions))
++ /* clear write bits if ATTR_READONLY is set */
++ if (fattr->cf_cifsattrs & ATTR_READONLY)
++ fattr->cf_mode &= ~(S_IWUGO);
++
+
+ /* check for Minshall+French symlinks */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+@@ -1236,31 +1258,34 @@ int cifs_get_inode_info(struct inode **inode,
+ return rc;
+ }
+
+-static int smb311_posix_get_fattr(struct cifs_fattr *fattr,
++static int smb311_posix_get_fattr(struct cifs_open_info_data *data,
++ struct cifs_fattr *fattr,
+ const char *full_path,
+ struct super_block *sb,
+ const unsigned int xid)
+ {
+- struct cifs_open_info_data data = {};
++ struct cifs_open_info_data tmp_data = {};
++ struct TCP_Server_Info *server;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_tcon *tcon;
+ struct tcon_link *tlink;
+- struct cifs_sid owner, group;
+ int tmprc;
+- int rc;
++ int rc = 0;
+
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
++ server = tcon->ses->server;
+
+ /*
+- * 1. Fetch file metadata
++ * 1. Fetch file metadata if not provided (data)
+ */
+-
+- rc = smb311_posix_query_path_info(xid, tcon, cifs_sb,
+- full_path, &data,
+- &owner, &group);
++ if (!data) {
++ rc = server->ops->query_path_info(xid, tcon, cifs_sb,
++ full_path, &tmp_data);
++ data = &tmp_data;
++ }
+
+ /*
+ * 2. Convert it to internal cifs metadata (fattr)
+@@ -1268,7 +1293,12 @@ static int smb311_posix_get_fattr(struct cifs_fattr *fattr,
+
+ switch (rc) {
+ case 0:
+- smb311_posix_info_to_fattr(fattr, &data, &owner, &group, sb);
++ if (cifs_open_data_reparse(data)) {
++ rc = reparse_info_to_fattr(data, sb, xid, tcon,
++ full_path, fattr);
++ } else {
++ smb311_posix_info_to_fattr(fattr, data, sb);
++ }
+ break;
+ case -EREMOTE:
+ /* DFS link, no metadata available on this server */
+@@ -1299,12 +1329,15 @@ static int smb311_posix_get_fattr(struct cifs_fattr *fattr,
+
+ out:
+ cifs_put_tlink(tlink);
+- cifs_free_open_info(&data);
++ cifs_free_open_info(data);
+ return rc;
+ }
+
+-int smb311_posix_get_inode_info(struct inode **inode, const char *full_path,
+- struct super_block *sb, const unsigned int xid)
++int smb311_posix_get_inode_info(struct inode **inode,
++ const char *full_path,
++ struct cifs_open_info_data *data,
++ struct super_block *sb,
++ const unsigned int xid)
+ {
+ struct cifs_fattr fattr = {};
+ int rc;
+@@ -1314,11 +1347,13 @@ int smb311_posix_get_inode_info(struct inode **inode, const char *full_path,
+ return 0;
+ }
+
+- rc = smb311_posix_get_fattr(&fattr, full_path, sb, xid);
++ rc = smb311_posix_get_fattr(data, &fattr, full_path, sb, xid);
+ if (rc)
+ goto out;
+
+ rc = update_inode_info(sb, &fattr, inode);
++ if (!rc && fattr.cf_flags & CIFS_FATTR_DELETE_PENDING)
++ cifs_mark_open_handles_for_deleted_file(*inode, full_path);
+ out:
+ kfree(fattr.cf_symlink_target);
+ return rc;
+@@ -1333,6 +1368,8 @@ cifs_find_inode(struct inode *inode, void *opaque)
+ {
+ struct cifs_fattr *fattr = opaque;
+
++ /* [!] The compared values must be the same in struct cifs_fscache_inode_key. */
++
+ /* don't match inode with different uniqueid */
+ if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
+ return 0;
+@@ -1411,7 +1448,7 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
+ }
+
+ /* can't fail - see cifs_find_inode() */
+- cifs_fattr_to_inode(inode, fattr);
++ cifs_fattr_to_inode(inode, fattr, false);
+ if (sb->s_flags & SB_NOATIME)
+ inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ if (inode->i_state & I_NEW) {
+@@ -1462,7 +1499,7 @@ struct inode *cifs_root_iget(struct super_block *sb)
+
+ convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
+ if (tcon->posix_extensions)
+- rc = smb311_posix_get_fattr(&fattr, path, sb, xid);
++ rc = smb311_posix_get_fattr(NULL, &fattr, path, sb, xid);
+ else
+ rc = cifs_get_fattr(NULL, sb, xid, NULL, &fattr, &inode, path);
+
+@@ -1480,6 +1517,9 @@ struct inode *cifs_root_iget(struct super_block *sb)
+ goto out;
+ }
+
++ if (!rc && fattr.cf_flags & CIFS_FATTR_DELETE_PENDING)
++ cifs_mark_open_handles_for_deleted_file(inode, path);
++
+ if (rc && tcon->pipe) {
+ cifs_dbg(FYI, "ipc connection - fake read inode\n");
+ spin_lock(&inode->i_lock);
+@@ -1766,20 +1806,24 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
+ goto psx_del_no_retry;
+ }
+
+- rc = server->ops->unlink(xid, tcon, full_path, cifs_sb);
++ rc = server->ops->unlink(xid, tcon, full_path, cifs_sb, dentry);
+
+ psx_del_no_retry:
+ if (!rc) {
+- if (inode)
++ if (inode) {
++ cifs_mark_open_handles_for_deleted_file(inode, full_path);
+ cifs_drop_nlink(inode);
++ }
+ } else if (rc == -ENOENT) {
+ d_drop(dentry);
+ } else if (rc == -EBUSY) {
+ if (server->ops->rename_pending_delete) {
+ rc = server->ops->rename_pending_delete(full_path,
+ dentry, xid);
+- if (rc == 0)
++ if (rc == 0) {
++ cifs_mark_open_handles_for_deleted_file(inode, full_path);
+ cifs_drop_nlink(inode);
++ }
+ }
+ } else if ((rc == -EACCES) && (dosattr == 0) && inode) {
+ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+@@ -1816,7 +1860,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
+ when needed */
+ inode_set_ctime_current(inode);
+ }
+- dir->i_mtime = inode_set_ctime_current(dir);
++ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ cifs_inode = CIFS_I(dir);
+ CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
+ unlink_out:
+@@ -1835,16 +1879,18 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
+ int rc = 0;
+ struct inode *inode = NULL;
+
+- if (tcon->posix_extensions)
+- rc = smb311_posix_get_inode_info(&inode, full_path, parent->i_sb, xid);
++ if (tcon->posix_extensions) {
++ rc = smb311_posix_get_inode_info(&inode, full_path,
++ NULL, parent->i_sb, xid);
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+- else if (tcon->unix_ext)
++ } else if (tcon->unix_ext) {
+ rc = cifs_get_inode_info_unix(&inode, full_path, parent->i_sb,
+ xid);
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+- else
++ } else {
+ rc = cifs_get_inode_info(&inode, full_path, NULL, parent->i_sb,
+ xid, NULL);
++ }
+
+ if (rc)
+ return rc;
+@@ -2131,7 +2177,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
+ cifsInode->time = 0;
+
+ inode_set_ctime_current(d_inode(direntry));
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+
+ rmdir_exit:
+ free_dentry_path(page);
+@@ -2165,7 +2211,8 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
+ return -ENOSYS;
+
+ /* try path-based rename first */
+- rc = server->ops->rename(xid, tcon, from_path, to_path, cifs_sb);
++ rc = server->ops->rename(xid, tcon, from_dentry,
++ from_path, to_path, cifs_sb);
+
+ /*
+ * Don't bother with rename by filehandle unless file is busy and
+@@ -2337,9 +2384,6 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
+ /* force revalidate to go get info when needed */
+ CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
+
+- source_dir->i_mtime = target_dir->i_mtime = inode_set_ctime_to_ts(source_dir,
+- inode_set_ctime_current(target_dir));
+-
+ cifs_rename_exit:
+ kfree(info_buf_source);
+ free_dentry_path(page2);
+@@ -2528,13 +2572,15 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
+ dentry, cifs_get_time(dentry), jiffies);
+
+ again:
+- if (cifs_sb_master_tcon(CIFS_SB(sb))->posix_extensions)
+- rc = smb311_posix_get_inode_info(&inode, full_path, sb, xid);
+- else if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
++ if (cifs_sb_master_tcon(CIFS_SB(sb))->posix_extensions) {
++ rc = smb311_posix_get_inode_info(&inode, full_path,
++ NULL, sb, xid);
++ } else if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext) {
+ rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
+- else
++ } else {
+ rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
+ xid, NULL);
++ }
+ if (rc == -EAGAIN && count++ < 10)
+ goto again;
+ out:
+@@ -2715,7 +2761,7 @@ void cifs_setsize(struct inode *inode, loff_t offset)
+
+ static int
+ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
+- unsigned int xid, const char *full_path)
++ unsigned int xid, const char *full_path, struct dentry *dentry)
+ {
+ int rc;
+ struct cifsFileInfo *open_file;
+@@ -2766,7 +2812,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
+ */
+ if (server->ops->set_path_size)
+ rc = server->ops->set_path_size(xid, tcon, full_path,
+- attrs->ia_size, cifs_sb, false);
++ attrs->ia_size, cifs_sb, false, dentry);
+ else
+ rc = -ENOSYS;
+ cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
+@@ -2856,7 +2902,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
+ rc = 0;
+
+ if (attrs->ia_valid & ATTR_SIZE) {
+- rc = cifs_set_file_size(inode, attrs, xid, full_path);
++ rc = cifs_set_file_size(inode, attrs, xid, full_path, direntry);
+ if (rc != 0)
+ goto out;
+ }
+@@ -3022,7 +3068,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ }
+
+ if (attrs->ia_valid & ATTR_SIZE) {
+- rc = cifs_set_file_size(inode, attrs, xid, full_path);
++ rc = cifs_set_file_size(inode, attrs, xid, full_path, direntry);
+ if (rc != 0)
+ goto cifs_setattr_exit;
+ }
+diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
+index f7160003e0ed92..855ac5a62edfaa 100644
+--- a/fs/smb/client/ioctl.c
++++ b/fs/smb/client/ioctl.c
+@@ -117,6 +117,20 @@ static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
+ return rc;
+ }
+
++static long smb_mnt_get_tcon_info(struct cifs_tcon *tcon, void __user *arg)
++{
++ int rc = 0;
++ struct smb_mnt_tcon_info tcon_inf;
++
++ tcon_inf.tid = tcon->tid;
++ tcon_inf.session_id = tcon->ses->Suid;
++
++ if (copy_to_user(arg, &tcon_inf, sizeof(struct smb_mnt_tcon_info)))
++ rc = -EFAULT;
++
++ return rc;
++}
++
+ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+ void __user *arg)
+ {
+@@ -129,6 +143,7 @@ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+
+ fsinf->version = 1;
+ fsinf->protocol_id = tcon->ses->server->vals->protocol_id;
++ fsinf->tcon_flags = tcon->Flags;
+ fsinf->device_characteristics =
+ le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics);
+ fsinf->device_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
+@@ -232,7 +247,9 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
+- if (ses_it->Suid == out.session_id) {
++ spin_lock(&ses_it->ses_lock);
++ if (ses_it->ses_status != SES_EXITING &&
++ ses_it->Suid == out.session_id) {
+ ses = ses_it;
+ /*
+ * since we are using the session outside the crit
+@@ -240,9 +257,11 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
+ * so increment its refcount
+ */
+ cifs_smb_ses_inc_refcount(ses);
++ spin_unlock(&ses_it->ses_lock);
+ found = true;
+ goto search_end;
+ }
++ spin_unlock(&ses_it->ses_lock);
+ }
+ }
+ search_end:
+@@ -330,6 +349,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+ xid = get_xid();
+
+ cifs_dbg(FYI, "cifs ioctl 0x%x\n", command);
++ if (pSMBFile == NULL)
++ trace_smb3_ioctl(xid, 0, command);
++ else
++ trace_smb3_ioctl(xid, pSMBFile->fid.persistent_fid, command);
++
+ switch (command) {
+ case FS_IOC_GETFLAGS:
+ if (pSMBFile == NULL)
+@@ -414,6 +438,17 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+ tcon = tlink_tcon(pSMBFile->tlink);
+ rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+ break;
++ case CIFS_IOC_GET_TCON_INFO:
++ cifs_sb = CIFS_SB(inode->i_sb);
++ tlink = cifs_sb_tlink(cifs_sb);
++ if (IS_ERR(tlink)) {
++ rc = PTR_ERR(tlink);
++ break;
++ }
++ tcon = tlink_tcon(tlink);
++ rc = smb_mnt_get_tcon_info(tcon, (void __user *)arg);
++ cifs_put_tlink(tlink);
++ break;
+ case CIFS_ENUMERATE_SNAPSHOTS:
+ if (pSMBFile == NULL)
+ break;
+diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
+index c66be4904e1fa0..d86da949a91905 100644
+--- a/fs/smb/client/link.c
++++ b/fs/smb/client/link.c
+@@ -42,23 +42,11 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
+
+ rc = cifs_alloc_hash("md5", &md5);
+ if (rc)
+- goto symlink_hash_err;
++ return rc;
+
+- rc = crypto_shash_init(md5);
+- if (rc) {
+- cifs_dbg(VFS, "%s: Could not init md5 shash\n", __func__);
+- goto symlink_hash_err;
+- }
+- rc = crypto_shash_update(md5, link_str, link_len);
+- if (rc) {
+- cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__);
+- goto symlink_hash_err;
+- }
+- rc = crypto_shash_final(md5, md5_hash);
++ rc = crypto_shash_digest(md5, link_str, link_len, md5_hash);
+ if (rc)
+ cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+-
+-symlink_hash_err:
+ cifs_free_hash(&md5);
+ return rc;
+ }
+@@ -522,8 +510,8 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
+ rc = -ENOSYS;
+ goto cifs_hl_exit;
+ }
+- rc = server->ops->create_hardlink(xid, tcon, from_name, to_name,
+- cifs_sb);
++ rc = server->ops->create_hardlink(xid, tcon, old_file,
++ from_name, to_name, cifs_sb);
+ if ((rc == -EIO) || (rc == -EINVAL))
+ rc = -EOPNOTSUPP;
+ }
+@@ -581,6 +569,7 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
+ int rc = -EOPNOTSUPP;
+ unsigned int xid;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ struct TCP_Server_Info *server;
+ struct tcon_link *tlink;
+ struct cifs_tcon *pTcon;
+ const char *full_path;
+@@ -602,6 +591,7 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
+ goto symlink_exit;
+ }
+ pTcon = tlink_tcon(tlink);
++ server = cifs_pick_channel(pTcon->ses);
+
+ full_path = build_path_from_dentry(direntry, page);
+ if (IS_ERR(full_path)) {
+@@ -613,27 +603,32 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
+ cifs_dbg(FYI, "symname is %s\n", symname);
+
+ /* BB what if DFS and this volume is on different share? BB */
+- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+ rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+- else if (pTcon->unix_ext)
++ } else if (pTcon->unix_ext) {
+ rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
+ cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+- /* else
+- rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
+- cifs_sb_target->local_nls); */
++ } else if (server->ops->create_reparse_symlink) {
++ rc = server->ops->create_reparse_symlink(xid, inode, direntry,
++ pTcon, full_path,
++ symname);
++ goto symlink_exit;
++ }
+
+ if (rc == 0) {
+- if (pTcon->posix_extensions)
+- rc = smb311_posix_get_inode_info(&newinode, full_path, inode->i_sb, xid);
+- else if (pTcon->unix_ext)
++ if (pTcon->posix_extensions) {
++ rc = smb311_posix_get_inode_info(&newinode, full_path,
++ NULL, inode->i_sb, xid);
++ } else if (pTcon->unix_ext) {
+ rc = cifs_get_inode_info_unix(&newinode, full_path,
+ inode->i_sb, xid);
+- else
++ } else {
+ rc = cifs_get_inode_info(&newinode, full_path, NULL,
+ inode->i_sb, xid, NULL);
++ }
+
+ if (rc != 0) {
+ cifs_dbg(FYI, "Create symlink ok, getinodeinfo fail rc = %d\n",
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 35b176457bbed0..65d4b72b4d51a9 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -27,9 +27,6 @@
+ #include "fs_context.h"
+ #include "cached_dir.h"
+
+-extern mempool_t *cifs_sm_req_poolp;
+-extern mempool_t *cifs_req_poolp;
+-
+ /* The xid serves as a useful identifier for each incoming vfs request,
+ in a similar way to the mid which is useful to track each sent smb,
+ and CurrentXid can also provide a running counter (although it
+@@ -101,6 +98,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
+ kfree(buf_to_free->serverDomain);
+ kfree(buf_to_free->serverNOS);
+ kfree_sensitive(buf_to_free->password);
++ kfree_sensitive(buf_to_free->password2);
+ kfree(buf_to_free->user_name);
+ kfree(buf_to_free->domainName);
+ kfree_sensitive(buf_to_free->auth_key.response);
+@@ -113,9 +111,10 @@ sesInfoFree(struct cifs_ses *buf_to_free)
+ }
+
+ struct cifs_tcon *
+-tcon_info_alloc(bool dir_leases_enabled)
++tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
+ {
+ struct cifs_tcon *ret_buf;
++ static atomic_t tcon_debug_id;
+
+ ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
+ if (!ret_buf)
+@@ -132,7 +131,8 @@ tcon_info_alloc(bool dir_leases_enabled)
+
+ atomic_inc(&tconInfoAllocCount);
+ ret_buf->status = TID_NEW;
+- ++ret_buf->tc_count;
++ ret_buf->debug_id = atomic_inc_return(&tcon_debug_id);
++ ret_buf->tc_count = 1;
+ spin_lock_init(&ret_buf->tc_lock);
+ INIT_LIST_HEAD(&ret_buf->openFileList);
+ INIT_LIST_HEAD(&ret_buf->tcon_list);
+@@ -140,27 +140,27 @@ tcon_info_alloc(bool dir_leases_enabled)
+ spin_lock_init(&ret_buf->stat_lock);
+ atomic_set(&ret_buf->num_local_opens, 0);
+ atomic_set(&ret_buf->num_remote_opens, 0);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+- INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
++ ret_buf->stats_from_time = ktime_get_real_seconds();
++#ifdef CONFIG_CIFS_FSCACHE
++ mutex_init(&ret_buf->fscache_lock);
+ #endif
++ trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
+
+ return ret_buf;
+ }
+
+ void
+-tconInfoFree(struct cifs_tcon *tcon)
++tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
+ {
+ if (tcon == NULL) {
+ cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
+ return;
+ }
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, trace);
+ free_cached_dirs(tcon->cfids);
+ atomic_dec(&tconInfoAllocCount);
+ kfree(tcon->nativeFileSystem);
+ kfree_sensitive(tcon->password);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+- dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
+-#endif
+ kfree(tcon->origin_fullpath);
+ kfree(tcon);
+ }
+@@ -363,6 +363,10 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
+ cifs_dbg(VFS, "Length less than smb header size\n");
+ }
+ return -EIO;
++ } else if (total_read < sizeof(*smb) + 2 * smb->WordCount) {
++ cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
++ __func__, smb->WordCount);
++ return -EIO;
+ }
+
+ /* otherwise, there is enough to get to the BCC */
+@@ -485,6 +489,8 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ /* look up tcon based on tid & uid */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->tid != buf->Tid)
+ continue;
+@@ -848,6 +854,40 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+ free_dentry_path(page);
+ }
+
++/*
++ * If a dentry has been deleted, all corresponding open handles should know that
++ * so that we do not defer close them.
++ */
++void cifs_mark_open_handles_for_deleted_file(struct inode *inode,
++ const char *path)
++{
++ struct cifsFileInfo *cfile;
++ void *page;
++ const char *full_path;
++ struct cifsInodeInfo *cinode = CIFS_I(inode);
++
++ page = alloc_dentry_path();
++ spin_lock(&cinode->open_file_lock);
++
++ /*
++ * note: we need to construct path from dentry and compare only if the
++ * inode has any hardlinks. When number of hardlinks is 1, we can just
++ * mark all open handles since they are going to be from the same file.
++ */
++ if (inode->i_nlink > 1) {
++ list_for_each_entry(cfile, &cinode->openFileList, flist) {
++ full_path = build_path_from_dentry(cfile->dentry, page);
++ if (!IS_ERR(full_path) && strcmp(full_path, path) == 0)
++ cfile->status_file_deleted = true;
++ }
++ } else {
++ list_for_each_entry(cfile, &cinode->openFileList, flist)
++ cfile->status_file_deleted = true;
++ }
++ spin_unlock(&cinode->open_file_lock);
++ free_dentry_path(page);
++}
++
+ /* parses DFS referral V3 structure
+ * caller is responsible for freeing target_nodes
+ * returns:
+@@ -1248,6 +1288,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
+ const char *full_path,
+ bool *islink)
+ {
++ struct TCP_Server_Info *server = tcon->ses->server;
+ struct cifs_ses *ses = tcon->ses;
+ size_t len;
+ char *path;
+@@ -1264,12 +1305,12 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
+ !is_tcon_dfs(tcon))
+ return 0;
+
+- spin_lock(&tcon->tc_lock);
+- if (!tcon->origin_fullpath) {
+- spin_unlock(&tcon->tc_lock);
++ spin_lock(&server->srv_lock);
++ if (!server->leaf_fullpath) {
++ spin_unlock(&server->srv_lock);
+ return 0;
+ }
+- spin_unlock(&tcon->tc_lock);
++ spin_unlock(&server->srv_lock);
+
+ /*
+ * Slow path - tcon is DFS and @full_path has prefix path, so attempt
+diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
+index c8f5ed8a69f1c1..4a517b280f2b79 100644
+--- a/fs/smb/client/namespace.c
++++ b/fs/smb/client/namespace.c
+@@ -117,6 +117,18 @@ cifs_build_devname(char *nodename, const char *prepath)
+ return dev;
+ }
+
++static bool is_dfs_mount(struct dentry *dentry)
++{
++ struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++ bool ret;
++
++ spin_lock(&tcon->tc_lock);
++ ret = !!tcon->origin_fullpath;
++ spin_unlock(&tcon->tc_lock);
++ return ret;
++}
++
+ /* Return full path out of a dentry set for automount */
+ static char *automount_fullpath(struct dentry *dentry, void *page)
+ {
+@@ -156,6 +168,21 @@ static char *automount_fullpath(struct dentry *dentry, void *page)
+ return s;
+ }
+
++static void fs_context_set_ids(struct smb3_fs_context *ctx)
++{
++ kuid_t uid = current_fsuid();
++ kgid_t gid = current_fsgid();
++
++ if (ctx->multiuser) {
++ if (!ctx->uid_specified)
++ ctx->linux_uid = uid;
++ if (!ctx->gid_specified)
++ ctx->linux_gid = gid;
++ }
++ if (!ctx->cruid_specified)
++ ctx->cred_uid = uid;
++}
++
+ /*
+ * Create a vfsmount that we can automount
+ */
+@@ -193,6 +220,7 @@ static struct vfsmount *cifs_do_automount(struct path *path)
+ tmp.leaf_fullpath = NULL;
+ tmp.UNC = tmp.prepath = NULL;
+ tmp.dfs_root_ses = NULL;
++ fs_context_set_ids(&tmp);
+
+ rc = smb3_fs_context_dup(ctx, &tmp);
+ if (rc) {
+@@ -212,8 +240,9 @@ static struct vfsmount *cifs_do_automount(struct path *path)
+ ctx->source = NULL;
+ goto out;
+ }
+- cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s\n",
+- __func__, ctx->source, ctx->UNC, ctx->prepath);
++ ctx->dfs_automount = is_dfs_mount(mntpt);
++ cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dfs_automount=%d\n",
++ __func__, ctx->source, ctx->UNC, ctx->prepath, ctx->dfs_automount);
+
+ mnt = fc_mount(fc);
+ out:
+diff --git a/fs/smb/client/ntlmssp.h b/fs/smb/client/ntlmssp.h
+index 2c5dde2ece588a..875de43b72de3e 100644
+--- a/fs/smb/client/ntlmssp.h
++++ b/fs/smb/client/ntlmssp.h
+@@ -133,8 +133,8 @@ typedef struct _AUTHENTICATE_MESSAGE {
+ SECURITY_BUFFER WorkstationName;
+ SECURITY_BUFFER SessionKey;
+ __le32 NegotiateFlags;
+- /* SECURITY_BUFFER for version info not present since we
+- do not set the version is present flag */
++ struct ntlmssp_version Version;
++ /* SECURITY_BUFFER */
+ char UserString[];
+ } __attribute__((packed)) AUTHENTICATE_MESSAGE, *PAUTHENTICATE_MESSAGE;
+
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index 47fc22de8d20c7..06111d9f395007 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -22,6 +22,7 @@
+ #include "smb2proto.h"
+ #include "fs_context.h"
+ #include "cached_dir.h"
++#include "reparse.h"
+
+ /*
+ * To be safe - for UCS to UTF-8 with strings loaded with the rare long
+@@ -71,6 +72,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+ struct super_block *sb = parent->d_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ int rc;
+
+ cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
+
+@@ -82,9 +84,11 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+ * We'll end up doing an on the wire call either way and
+ * this spares us an invalidation.
+ */
+- if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+- return;
+ retry:
++ if ((fattr->cf_cifsattrs & ATTR_REPARSE) ||
++ (fattr->cf_flags & CIFS_FATTR_NEED_REVAL))
++ return;
++
+ dentry = d_alloc_parallel(parent, name, &wq);
+ }
+ if (IS_ERR(dentry))
+@@ -104,12 +108,36 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
+ fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
+
+- /* update inode in place
+- * if both i_ino and i_mode didn't change */
+- if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
+- cifs_fattr_to_inode(inode, fattr) == 0) {
+- dput(dentry);
+- return;
++ /*
++ * Update inode in place if both i_ino and i_mode didn't
++ * change.
++ */
++ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
++ /*
++ * Query dir responses don't provide enough
++ * information about reparse points other than
++ * their reparse tags. Save an invalidation by
++ * not clobbering some existing attributes when
++ * reparse tag and ctime haven't changed.
++ */
++ rc = 0;
++ if (fattr->cf_cifsattrs & ATTR_REPARSE) {
++ if (likely(reparse_inode_match(inode, fattr))) {
++ fattr->cf_mode = inode->i_mode;
++ fattr->cf_rdev = inode->i_rdev;
++ fattr->cf_uid = inode->i_uid;
++ fattr->cf_gid = inode->i_gid;
++ fattr->cf_eof = CIFS_I(inode)->server_eof;
++ fattr->cf_symlink_target = NULL;
++ } else {
++ CIFS_I(inode)->time = 0;
++ rc = -ESTALE;
++ }
++ }
++ if (!rc && !cifs_fattr_to_inode(inode, fattr, true)) {
++ dput(dentry);
++ return;
++ }
+ }
+ }
+ d_invalidate(dentry);
+@@ -127,32 +155,13 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+ dput(dentry);
+ }
+
+-static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
+-{
+- if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
+- return false;
+- /*
+- * The DFS tags should be only intepreted by server side as per
+- * MS-FSCC 2.1.2.1, but let's include them anyway.
+- *
+- * Besides, if cf_cifstag is unset (0), then we still need it to be
+- * revalidated to know exactly what reparse point it is.
+- */
+- switch (fattr->cf_cifstag) {
+- case IO_REPARSE_TAG_DFS:
+- case IO_REPARSE_TAG_DFSR:
+- case IO_REPARSE_TAG_SYMLINK:
+- case IO_REPARSE_TAG_NFS:
+- case IO_REPARSE_TAG_MOUNT_POINT:
+- case 0:
+- return true;
+- }
+- return false;
+-}
+-
+ static void
+ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
+ {
++ struct cifs_open_info_data data = {
++ .reparse = { .tag = fattr->cf_cifstag, },
++ };
++
+ fattr->cf_uid = cifs_sb->ctx->linux_uid;
+ fattr->cf_gid = cifs_sb->ctx->linux_gid;
+
+@@ -165,7 +174,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
+ * reasonably map some of them to directories vs. files vs. symlinks
+ */
+ if ((fattr->cf_cifsattrs & ATTR_REPARSE) &&
+- cifs_reparse_point_to_fattr(cifs_sb, fattr, fattr->cf_cifstag))
++ cifs_reparse_point_to_fattr(cifs_sb, fattr, &data))
+ goto out_reparse;
+
+ if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
+@@ -177,14 +186,6 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
+ }
+
+ out_reparse:
+- /*
+- * We need to revalidate it further to make a decision about whether it
+- * is a symbolic link, DFS referral or a reparse point with a direct
+- * access like junctions, deduplicated files, NFS symlinks.
+- */
+- if (reparse_file_needs_reval(fattr))
+- fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+-
+ /* non-unix readdir doesn't provide nlink */
+ fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
+
+@@ -265,9 +266,6 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
+ fattr->cf_dtype = DT_REG;
+ }
+
+- if (reparse_file_needs_reval(fattr))
+- fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+-
+ sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
+ sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
+ }
+@@ -295,14 +293,16 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
+ }
+
+ static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
+- SEARCH_ID_FULL_DIR_INFO *info,
++ const void *info,
+ struct cifs_sb_info *cifs_sb)
+ {
++ const FILE_FULL_DIRECTORY_INFO *di = info;
++
+ __dir_info_to_fattr(fattr, info);
+
+- /* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
++ /* See MS-FSCC 2.4.14, 2.4.19 */
+ if (fattr->cf_cifsattrs & ATTR_REPARSE)
+- fattr->cf_cifstag = le32_to_cpu(info->EaSize);
++ fattr->cf_cifstag = le32_to_cpu(di->EaSize);
+ cifs_fill_common_info(fattr, cifs_sb);
+ }
+
+@@ -327,38 +327,6 @@ cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
+ cifs_fill_common_info(fattr, cifs_sb);
+ }
+
+-/* BB eventually need to add the following helper function to
+- resolve NT_STATUS_STOPPED_ON_SYMLINK return code when
+- we try to do FindFirst on (NTFS) directory symlinks */
+-/*
+-int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
+- unsigned int xid)
+-{
+- __u16 fid;
+- int len;
+- int oplock = 0;
+- int rc;
+- struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb);
+- char *tmpbuffer;
+-
+- rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
+- OPEN_REPARSE_POINT, &fid, &oplock, NULL,
+- cifs_sb->local_nls,
+- cifs_remap(cifs_sb);
+- if (!rc) {
+- tmpbuffer = kmalloc(maxpath);
+- rc = CIFSSMBQueryReparseLinkInfo(xid, ptcon, full_path,
+- tmpbuffer,
+- maxpath -1,
+- fid,
+- cifs_sb->local_nls);
+- if (CIFSSMBClose(xid, ptcon, fid)) {
+- cifs_dbg(FYI, "Error closing temporary reparsepoint open\n");
+- }
+- }
+-}
+- */
+-
+ static int
+ _initiate_cifs_search(const unsigned int xid, struct file *file,
+ const char *full_path)
+@@ -416,7 +384,7 @@ _initiate_cifs_search(const unsigned int xid, struct file *file,
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+ } else /* not srvinos - BB fixme add check for backlevel? */ {
+- cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
++ cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
+ }
+
+ search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+@@ -427,13 +395,10 @@ _initiate_cifs_search(const unsigned int xid, struct file *file,
+ &cifsFile->fid, search_flags,
+ &cifsFile->srch_inf);
+
+- if (rc == 0)
++ if (rc == 0) {
+ cifsFile->invalidHandle = false;
+- /* BB add following call to handle readdir on new NTFS symlink errors
+- else if STATUS_STOPPED_ON_SYMLINK
+- call get_symlink_reparse_path and retry with new path */
+- else if ((rc == -EOPNOTSUPP) &&
+- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
++ } else if ((rc == -EOPNOTSUPP) &&
++ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+ cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+ goto ffirst_retry;
+ }
+@@ -668,10 +633,10 @@ static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode)
+ static int is_dir_changed(struct file *file)
+ {
+ struct inode *inode = file_inode(file);
+- struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
++ struct cifsInodeInfo *cifs_inode_info = CIFS_I(inode);
+
+- if (cifsInfo->time == 0)
+- return 1; /* directory was changed, perhaps due to unlink */
++ if (cifs_inode_info->time == 0)
++ return 1; /* directory was changed, e.g. unlink or new file */
+ else
+ return 0;
+
+@@ -1010,10 +975,9 @@ static int cifs_filldir(char *find_entry, struct file *file,
+ (FIND_FILE_STANDARD_INFO *)find_entry,
+ cifs_sb);
+ break;
++ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+ case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+- cifs_fulldir_info_to_fattr(&fattr,
+- (SEARCH_ID_FULL_DIR_INFO *)find_entry,
+- cifs_sb);
++ cifs_fulldir_info_to_fattr(&fattr, find_entry, cifs_sb);
+ break;
+ default:
+ cifs_dir_info_to_fattr(&fattr,
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+new file mode 100644
+index 00000000000000..ad0e0de9a165d4
+--- /dev/null
++++ b/fs/smb/client/reparse.c
+@@ -0,0 +1,551 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Paulo Alcantara <pc@manguebit.com>
++ */
++
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/slab.h>
++#include "cifsglob.h"
++#include "smb2proto.h"
++#include "cifsproto.h"
++#include "cifs_unicode.h"
++#include "cifs_debug.h"
++#include "fs_context.h"
++#include "reparse.h"
++
++int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, const char *symname)
++{
++ struct reparse_symlink_data_buffer *buf = NULL;
++ struct cifs_open_info_data data;
++ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ struct inode *new;
++ struct kvec iov;
++ __le16 *path;
++ char *sym, sep = CIFS_DIR_SEP(cifs_sb);
++ u16 len, plen;
++ int rc = 0;
++
++ sym = kstrdup(symname, GFP_KERNEL);
++ if (!sym)
++ return -ENOMEM;
++
++ data = (struct cifs_open_info_data) {
++ .reparse_point = true,
++ .reparse = { .tag = IO_REPARSE_TAG_SYMLINK, },
++ .symlink_target = sym,
++ };
++
++ convert_delimiter(sym, sep);
++ path = cifs_convert_path_to_utf16(sym, cifs_sb);
++ if (!path) {
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ plen = 2 * UniStrnlen((wchar_t *)path, PATH_MAX);
++ len = sizeof(*buf) + plen * 2;
++ buf = kzalloc(len, GFP_KERNEL);
++ if (!buf) {
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ buf->ReparseTag = cpu_to_le32(IO_REPARSE_TAG_SYMLINK);
++ buf->ReparseDataLength = cpu_to_le16(len - sizeof(struct reparse_data_buffer));
++ buf->SubstituteNameOffset = cpu_to_le16(plen);
++ buf->SubstituteNameLength = cpu_to_le16(plen);
++ memcpy(&buf->PathBuffer[plen], path, plen);
++ buf->PrintNameOffset = 0;
++ buf->PrintNameLength = cpu_to_le16(plen);
++ memcpy(buf->PathBuffer, path, plen);
++ buf->Flags = cpu_to_le32(*symname != '/' ? SYMLINK_FLAG_RELATIVE : 0);
++ if (*sym != sep)
++ buf->Flags = cpu_to_le32(SYMLINK_FLAG_RELATIVE);
++
++ convert_delimiter(sym, '/');
++ iov.iov_base = buf;
++ iov.iov_len = len;
++ new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
++ tcon, full_path, &iov, NULL);
++ if (!IS_ERR(new))
++ d_instantiate(dentry, new);
++ else
++ rc = PTR_ERR(new);
++out:
++ kfree(path);
++ cifs_free_open_info(&data);
++ kfree(buf);
++ return rc;
++}
++
++static int nfs_set_reparse_buf(struct reparse_posix_data *buf,
++ mode_t mode, dev_t dev,
++ struct kvec *iov)
++{
++ u64 type;
++ u16 len, dlen;
++
++ len = sizeof(*buf);
++
++ switch ((type = reparse_mode_nfs_type(mode))) {
++ case NFS_SPECFILE_BLK:
++ case NFS_SPECFILE_CHR:
++ dlen = sizeof(__le64);
++ break;
++ case NFS_SPECFILE_FIFO:
++ case NFS_SPECFILE_SOCK:
++ dlen = 0;
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ buf->ReparseTag = cpu_to_le32(IO_REPARSE_TAG_NFS);
++ buf->Reserved = 0;
++ buf->InodeType = cpu_to_le64(type);
++ buf->ReparseDataLength = cpu_to_le16(len + dlen -
++ sizeof(struct reparse_data_buffer));
++ *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MAJOR(dev) << 32) |
++ MINOR(dev));
++ iov->iov_base = buf;
++ iov->iov_len = len + dlen;
++ return 0;
++}
++
++static int mknod_nfs(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev)
++{
++ struct cifs_open_info_data data;
++ struct reparse_posix_data *p;
++ struct inode *new;
++ struct kvec iov;
++ __u8 buf[sizeof(*p) + sizeof(__le64)];
++ int rc;
++
++ p = (struct reparse_posix_data *)buf;
++ rc = nfs_set_reparse_buf(p, mode, dev, &iov);
++ if (rc)
++ return rc;
++
++ data = (struct cifs_open_info_data) {
++ .reparse_point = true,
++ .reparse = { .tag = IO_REPARSE_TAG_NFS, .posix = p, },
++ };
++
++ new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
++ tcon, full_path, &iov, NULL);
++ if (!IS_ERR(new))
++ d_instantiate(dentry, new);
++ else
++ rc = PTR_ERR(new);
++ cifs_free_open_info(&data);
++ return rc;
++}
++
++static int wsl_set_reparse_buf(struct reparse_data_buffer *buf,
++ mode_t mode, struct kvec *iov)
++{
++ u32 tag;
++
++ switch ((tag = reparse_mode_wsl_tag(mode))) {
++ case IO_REPARSE_TAG_LX_BLK:
++ case IO_REPARSE_TAG_LX_CHR:
++ case IO_REPARSE_TAG_LX_FIFO:
++ case IO_REPARSE_TAG_AF_UNIX:
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ buf->ReparseTag = cpu_to_le32(tag);
++ buf->Reserved = 0;
++ buf->ReparseDataLength = 0;
++ iov->iov_base = buf;
++ iov->iov_len = sizeof(*buf);
++ return 0;
++}
++
++static struct smb2_create_ea_ctx *ea_create_context(u32 dlen, size_t *cc_len)
++{
++ struct smb2_create_ea_ctx *cc;
++
++ *cc_len = round_up(sizeof(*cc) + dlen, 8);
++ cc = kzalloc(*cc_len, GFP_KERNEL);
++ if (!cc)
++ return ERR_PTR(-ENOMEM);
++
++ cc->ctx.NameOffset = cpu_to_le16(offsetof(struct smb2_create_ea_ctx,
++ name));
++ cc->ctx.NameLength = cpu_to_le16(4);
++ memcpy(cc->name, SMB2_CREATE_EA_BUFFER, strlen(SMB2_CREATE_EA_BUFFER));
++ cc->ctx.DataOffset = cpu_to_le16(offsetof(struct smb2_create_ea_ctx, ea));
++ cc->ctx.DataLength = cpu_to_le32(dlen);
++ return cc;
++}
++
++struct wsl_xattr {
++ const char *name;
++ __le64 value;
++ u16 size;
++ u32 next;
++};
++
++static int wsl_set_xattrs(struct inode *inode, umode_t _mode,
++ dev_t _dev, struct kvec *iov)
++{
++ struct smb2_file_full_ea_info *ea;
++ struct smb2_create_ea_ctx *cc;
++ struct smb3_fs_context *ctx = CIFS_SB(inode->i_sb)->ctx;
++ __le64 uid = cpu_to_le64(from_kuid(current_user_ns(), ctx->linux_uid));
++ __le64 gid = cpu_to_le64(from_kgid(current_user_ns(), ctx->linux_gid));
++ __le64 dev = cpu_to_le64(((u64)MINOR(_dev) << 32) | MAJOR(_dev));
++ __le64 mode = cpu_to_le64(_mode);
++ struct wsl_xattr xattrs[] = {
++ { .name = SMB2_WSL_XATTR_UID, .value = uid, .size = SMB2_WSL_XATTR_UID_SIZE, },
++ { .name = SMB2_WSL_XATTR_GID, .value = gid, .size = SMB2_WSL_XATTR_GID_SIZE, },
++ { .name = SMB2_WSL_XATTR_MODE, .value = mode, .size = SMB2_WSL_XATTR_MODE_SIZE, },
++ { .name = SMB2_WSL_XATTR_DEV, .value = dev, .size = SMB2_WSL_XATTR_DEV_SIZE, },
++ };
++ size_t cc_len;
++ u32 dlen = 0, next = 0;
++ int i, num_xattrs;
++ u8 name_size = SMB2_WSL_XATTR_NAME_LEN + 1;
++
++ memset(iov, 0, sizeof(*iov));
++
++ /* Exclude $LXDEV xattr for sockets and fifos */
++ if (S_ISSOCK(_mode) || S_ISFIFO(_mode))
++ num_xattrs = ARRAY_SIZE(xattrs) - 1;
++ else
++ num_xattrs = ARRAY_SIZE(xattrs);
++
++ for (i = 0; i < num_xattrs; i++) {
++ xattrs[i].next = ALIGN(sizeof(*ea) + name_size +
++ xattrs[i].size, 4);
++ dlen += xattrs[i].next;
++ }
++
++ cc = ea_create_context(dlen, &cc_len);
++ if (IS_ERR(cc))
++ return PTR_ERR(cc);
++
++ ea = &cc->ea;
++ for (i = 0; i < num_xattrs; i++) {
++ ea = (void *)((u8 *)ea + next);
++ next = xattrs[i].next;
++ ea->next_entry_offset = cpu_to_le32(next);
++
++ ea->ea_name_length = name_size - 1;
++ ea->ea_value_length = cpu_to_le16(xattrs[i].size);
++ memcpy(ea->ea_data, xattrs[i].name, name_size);
++ memcpy(&ea->ea_data[name_size],
++ &xattrs[i].value, xattrs[i].size);
++ }
++ ea->next_entry_offset = 0;
++
++ iov->iov_base = cc;
++ iov->iov_len = cc_len;
++ return 0;
++}
++
++static int mknod_wsl(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev)
++{
++ struct cifs_open_info_data data;
++ struct reparse_data_buffer buf;
++ struct smb2_create_ea_ctx *cc;
++ struct inode *new;
++ unsigned int len;
++ struct kvec reparse_iov, xattr_iov;
++ int rc;
++
++ rc = wsl_set_reparse_buf(&buf, mode, &reparse_iov);
++ if (rc)
++ return rc;
++
++ rc = wsl_set_xattrs(inode, mode, dev, &xattr_iov);
++ if (rc)
++ return rc;
++
++ data = (struct cifs_open_info_data) {
++ .reparse_point = true,
++ .reparse = { .tag = le32_to_cpu(buf.ReparseTag), .buf = &buf, },
++ };
++
++ cc = xattr_iov.iov_base;
++ len = le32_to_cpu(cc->ctx.DataLength);
++ memcpy(data.wsl.eas, &cc->ea, len);
++ data.wsl.eas_len = len;
++
++ new = smb2_get_reparse_inode(&data, inode->i_sb,
++ xid, tcon, full_path,
++ &reparse_iov, &xattr_iov);
++ if (!IS_ERR(new))
++ d_instantiate(dentry, new);
++ else
++ rc = PTR_ERR(new);
++ cifs_free_open_info(&data);
++ kfree(xattr_iov.iov_base);
++ return rc;
++}
++
++int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev)
++{
++ struct smb3_fs_context *ctx = CIFS_SB(inode->i_sb)->ctx;
++ int rc = -EOPNOTSUPP;
++
++ switch (ctx->reparse_type) {
++ case CIFS_REPARSE_TYPE_NFS:
++ rc = mknod_nfs(xid, inode, dentry, tcon, full_path, mode, dev);
++ break;
++ case CIFS_REPARSE_TYPE_WSL:
++ rc = mknod_wsl(xid, inode, dentry, tcon, full_path, mode, dev);
++ break;
++ }
++ return rc;
++}
++
++/* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
++static int parse_reparse_posix(struct reparse_posix_data *buf,
++ struct cifs_sb_info *cifs_sb,
++ struct cifs_open_info_data *data)
++{
++ unsigned int len;
++ u64 type;
++
++ len = le16_to_cpu(buf->ReparseDataLength);
++ if (len < sizeof(buf->InodeType)) {
++ cifs_dbg(VFS, "srv returned malformed nfs buffer\n");
++ return -EIO;
++ }
++
++ len -= sizeof(buf->InodeType);
++
++ switch ((type = le64_to_cpu(buf->InodeType))) {
++ case NFS_SPECFILE_LNK:
++ data->symlink_target = cifs_strndup_from_utf16(buf->DataBuffer,
++ len, true,
++ cifs_sb->local_nls);
++ if (!data->symlink_target)
++ return -ENOMEM;
++ cifs_dbg(FYI, "%s: target path: %s\n",
++ __func__, data->symlink_target);
++ break;
++ case NFS_SPECFILE_CHR:
++ case NFS_SPECFILE_BLK:
++ case NFS_SPECFILE_FIFO:
++ case NFS_SPECFILE_SOCK:
++ break;
++ default:
++ cifs_dbg(VFS, "%s: unhandled inode type: 0x%llx\n",
++ __func__, type);
++ return -EOPNOTSUPP;
++ }
++ return 0;
++}
++
++static int parse_reparse_symlink(struct reparse_symlink_data_buffer *sym,
++ u32 plen, bool unicode,
++ struct cifs_sb_info *cifs_sb,
++ struct cifs_open_info_data *data)
++{
++ unsigned int len;
++ unsigned int offs;
++
++ /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
++
++ offs = le16_to_cpu(sym->SubstituteNameOffset);
++ len = le16_to_cpu(sym->SubstituteNameLength);
++ if (offs + 20 > plen || offs + len + 20 > plen) {
++ cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
++ return -EIO;
++ }
++
++ data->symlink_target = cifs_strndup_from_utf16(sym->PathBuffer + offs,
++ len, unicode,
++ cifs_sb->local_nls);
++ if (!data->symlink_target)
++ return -ENOMEM;
++
++ convert_delimiter(data->symlink_target, '/');
++ cifs_dbg(FYI, "%s: target path: %s\n", __func__, data->symlink_target);
++
++ return 0;
++}
++
++int parse_reparse_point(struct reparse_data_buffer *buf,
++ u32 plen, struct cifs_sb_info *cifs_sb,
++ bool unicode, struct cifs_open_info_data *data)
++{
++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++
++ data->reparse.buf = buf;
++
++ /* See MS-FSCC 2.1.2 */
++ switch (le32_to_cpu(buf->ReparseTag)) {
++ case IO_REPARSE_TAG_NFS:
++ return parse_reparse_posix((struct reparse_posix_data *)buf,
++ cifs_sb, data);
++ case IO_REPARSE_TAG_SYMLINK:
++ return parse_reparse_symlink(
++ (struct reparse_symlink_data_buffer *)buf,
++ plen, unicode, cifs_sb, data);
++ case IO_REPARSE_TAG_LX_SYMLINK:
++ case IO_REPARSE_TAG_AF_UNIX:
++ case IO_REPARSE_TAG_LX_FIFO:
++ case IO_REPARSE_TAG_LX_CHR:
++ case IO_REPARSE_TAG_LX_BLK:
++ break;
++ default:
++ cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n",
++ le32_to_cpu(buf->ReparseTag));
++ break;
++ }
++ return 0;
++}
++
++int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
++ struct kvec *rsp_iov,
++ struct cifs_open_info_data *data)
++{
++ struct reparse_data_buffer *buf;
++ struct smb2_ioctl_rsp *io = rsp_iov->iov_base;
++ u32 plen = le32_to_cpu(io->OutputCount);
++
++ buf = (struct reparse_data_buffer *)((u8 *)io +
++ le32_to_cpu(io->OutputOffset));
++ return parse_reparse_point(buf, plen, cifs_sb, true, data);
++}
++
++static void wsl_to_fattr(struct cifs_open_info_data *data,
++ struct cifs_sb_info *cifs_sb,
++ u32 tag, struct cifs_fattr *fattr)
++{
++ struct smb2_file_full_ea_info *ea;
++ u32 next = 0;
++
++ switch (tag) {
++ case IO_REPARSE_TAG_LX_SYMLINK:
++ fattr->cf_mode |= S_IFLNK;
++ break;
++ case IO_REPARSE_TAG_LX_FIFO:
++ fattr->cf_mode |= S_IFIFO;
++ break;
++ case IO_REPARSE_TAG_AF_UNIX:
++ fattr->cf_mode |= S_IFSOCK;
++ break;
++ case IO_REPARSE_TAG_LX_CHR:
++ fattr->cf_mode |= S_IFCHR;
++ break;
++ case IO_REPARSE_TAG_LX_BLK:
++ fattr->cf_mode |= S_IFBLK;
++ break;
++ }
++
++ if (!data->wsl.eas_len)
++ goto out;
++
++ ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
++ do {
++ const char *name;
++ void *v;
++ u8 nlen;
++
++ ea = (void *)((u8 *)ea + next);
++ next = le32_to_cpu(ea->next_entry_offset);
++ if (!le16_to_cpu(ea->ea_value_length))
++ continue;
++
++ name = ea->ea_data;
++ nlen = ea->ea_name_length;
++ v = (void *)((u8 *)ea->ea_data + ea->ea_name_length + 1);
++
++ if (!strncmp(name, SMB2_WSL_XATTR_UID, nlen))
++ fattr->cf_uid = wsl_make_kuid(cifs_sb, v);
++ else if (!strncmp(name, SMB2_WSL_XATTR_GID, nlen))
++ fattr->cf_gid = wsl_make_kgid(cifs_sb, v);
++ else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen))
++ fattr->cf_mode = (umode_t)le32_to_cpu(*(__le32 *)v);
++ else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen))
++ fattr->cf_rdev = wsl_mkdev(v);
++ } while (next);
++out:
++ fattr->cf_dtype = S_DT(fattr->cf_mode);
++}
++
++bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
++ struct cifs_fattr *fattr,
++ struct cifs_open_info_data *data)
++{
++ struct reparse_posix_data *buf = data->reparse.posix;
++ u32 tag = data->reparse.tag;
++
++ if (tag == IO_REPARSE_TAG_NFS && buf) {
++ if (le16_to_cpu(buf->ReparseDataLength) < sizeof(buf->InodeType))
++ return false;
++ switch (le64_to_cpu(buf->InodeType)) {
++ case NFS_SPECFILE_CHR:
++ if (le16_to_cpu(buf->ReparseDataLength) != sizeof(buf->InodeType) + 8)
++ return false;
++ fattr->cf_mode |= S_IFCHR;
++ fattr->cf_rdev = reparse_nfs_mkdev(buf);
++ break;
++ case NFS_SPECFILE_BLK:
++ if (le16_to_cpu(buf->ReparseDataLength) != sizeof(buf->InodeType) + 8)
++ return false;
++ fattr->cf_mode |= S_IFBLK;
++ fattr->cf_rdev = reparse_nfs_mkdev(buf);
++ break;
++ case NFS_SPECFILE_FIFO:
++ fattr->cf_mode |= S_IFIFO;
++ break;
++ case NFS_SPECFILE_SOCK:
++ fattr->cf_mode |= S_IFSOCK;
++ break;
++ case NFS_SPECFILE_LNK:
++ fattr->cf_mode |= S_IFLNK;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ return false;
++ }
++ goto out;
++ }
++
++ switch (tag) {
++ case IO_REPARSE_TAG_INTERNAL:
++ if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
++ return false;
++ fallthrough;
++ case IO_REPARSE_TAG_DFS:
++ case IO_REPARSE_TAG_DFSR:
++ case IO_REPARSE_TAG_MOUNT_POINT:
++ /* See cifs_create_junction_fattr() */
++ fattr->cf_mode = S_IFDIR | 0711;
++ break;
++ case IO_REPARSE_TAG_LX_SYMLINK:
++ case IO_REPARSE_TAG_LX_FIFO:
++ case IO_REPARSE_TAG_AF_UNIX:
++ case IO_REPARSE_TAG_LX_CHR:
++ case IO_REPARSE_TAG_LX_BLK:
++ wsl_to_fattr(data, cifs_sb, tag, fattr);
++ break;
++ case 0: /* SMB1 symlink */
++ case IO_REPARSE_TAG_SYMLINK:
++ case IO_REPARSE_TAG_NFS:
++ fattr->cf_mode |= S_IFLNK;
++ break;
++ default:
++ return false;
++ }
++out:
++ fattr->cf_dtype = S_DT(fattr->cf_mode);
++ return true;
++}
+diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
+new file mode 100644
+index 00000000000000..2c0644bc4e65a7
+--- /dev/null
++++ b/fs/smb/client/reparse.h
+@@ -0,0 +1,128 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (c) 2024 Paulo Alcantara <pc@manguebit.com>
++ */
++
++#ifndef _CIFS_REPARSE_H
++#define _CIFS_REPARSE_H
++
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/uidgid.h>
++#include "fs_context.h"
++#include "cifsglob.h"
++
++/*
++ * Used only by cifs.ko to ignore reparse points from files when client or
++ * server doesn't support FSCTL_GET_REPARSE_POINT.
++ */
++#define IO_REPARSE_TAG_INTERNAL ((__u32)~0U)
++
++static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf)
++{
++ u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer);
++
++ return MKDEV(v >> 32, v & 0xffffffff);
++}
++
++static inline dev_t wsl_mkdev(void *ptr)
++{
++ u64 v = le64_to_cpu(*(__le64 *)ptr);
++
++ return MKDEV(v & 0xffffffff, v >> 32);
++}
++
++static inline kuid_t wsl_make_kuid(struct cifs_sb_info *cifs_sb,
++ void *ptr)
++{
++ u32 uid = le32_to_cpu(*(__le32 *)ptr);
++
++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
++ return cifs_sb->ctx->linux_uid;
++ return make_kuid(current_user_ns(), uid);
++}
++
++static inline kgid_t wsl_make_kgid(struct cifs_sb_info *cifs_sb,
++ void *ptr)
++{
++ u32 gid = le32_to_cpu(*(__le32 *)ptr);
++
++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
++ return cifs_sb->ctx->linux_gid;
++ return make_kgid(current_user_ns(), gid);
++}
++
++static inline u64 reparse_mode_nfs_type(mode_t mode)
++{
++ switch (mode & S_IFMT) {
++ case S_IFBLK: return NFS_SPECFILE_BLK;
++ case S_IFCHR: return NFS_SPECFILE_CHR;
++ case S_IFIFO: return NFS_SPECFILE_FIFO;
++ case S_IFSOCK: return NFS_SPECFILE_SOCK;
++ }
++ return 0;
++}
++
++static inline u32 reparse_mode_wsl_tag(mode_t mode)
++{
++ switch (mode & S_IFMT) {
++ case S_IFBLK: return IO_REPARSE_TAG_LX_BLK;
++ case S_IFCHR: return IO_REPARSE_TAG_LX_CHR;
++ case S_IFIFO: return IO_REPARSE_TAG_LX_FIFO;
++ case S_IFSOCK: return IO_REPARSE_TAG_AF_UNIX;
++ }
++ return 0;
++}
++
++/*
++ * Match a reparse point inode if reparse tag and ctime haven't changed.
++ *
++ * Windows Server updates ctime of reparse points when their data have changed.
++ * The server doesn't allow changing reparse tags from existing reparse points,
++ * though it's worth checking.
++ */
++static inline bool reparse_inode_match(struct inode *inode,
++ struct cifs_fattr *fattr)
++{
++ struct cifsInodeInfo *cinode = CIFS_I(inode);
++ struct timespec64 ctime = inode_get_ctime(inode);
++
++ /*
++ * Do not match reparse tags when client or server doesn't support
++ * FSCTL_GET_REPARSE_POINT. @fattr->cf_cifstag should contain correct
++ * reparse tag from query dir response but the client won't be able to
++ * read the reparse point data anyway. This spares us a revalidation.
++ */
++ if (cinode->reparse_tag != IO_REPARSE_TAG_INTERNAL &&
++ cinode->reparse_tag != fattr->cf_cifstag)
++ return false;
++ return (cinode->cifsAttrs & ATTR_REPARSE) &&
++ timespec64_equal(&ctime, &fattr->cf_ctime);
++}
++
++static inline bool cifs_open_data_reparse(struct cifs_open_info_data *data)
++{
++ struct smb2_file_all_info *fi = &data->fi;
++ u32 attrs = le32_to_cpu(fi->Attributes);
++ bool ret;
++
++ ret = data->reparse_point || (attrs & ATTR_REPARSE);
++ if (ret)
++ attrs |= ATTR_REPARSE;
++ fi->Attributes = cpu_to_le32(attrs);
++ return ret;
++}
++
++bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
++ struct cifs_fattr *fattr,
++ struct cifs_open_info_data *data);
++int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, const char *symname);
++int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev);
++int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb, struct kvec *rsp_iov,
++ struct cifs_open_info_data *data);
++
++#endif /* _CIFS_REPARSE_H */
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 79f26c560edf89..3216f786908fbb 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -24,7 +24,7 @@
+ #include "fs_context.h"
+
+ static int
+-cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
++cifs_ses_add_channel(struct cifs_ses *ses,
+ struct cifs_server_iface *iface);
+
+ bool
+@@ -69,12 +69,16 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
+
+ /* channel helper functions. assumed that chan_lock is held by caller. */
+
+-unsigned int
++int
+ cifs_ses_get_chan_index(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+ {
+ unsigned int i;
+
++ /* if the channel is waiting for termination */
++ if (server && server->terminate)
++ return CIFS_INVAL_CHAN_INDEX;
++
+ for (i = 0; i < ses->chan_count; i++) {
+ if (ses->chans[i].server == server)
+ return i;
+@@ -84,15 +88,17 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
+ if (server)
+ cifs_dbg(VFS, "unable to get chan index for server: 0x%llx",
+ server->conn_id);
+- WARN_ON(1);
+- return 0;
++ return CIFS_INVAL_CHAN_INDEX;
+ }
+
+ void
+ cifs_chan_set_in_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+ {
+- unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
++ int chan_index = cifs_ses_get_chan_index(ses, server);
++
++ if (chan_index == CIFS_INVAL_CHAN_INDEX)
++ return;
+
+ ses->chans[chan_index].in_reconnect = true;
+ }
+@@ -103,6 +109,9 @@ cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
+ {
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
++ if (chan_index == CIFS_INVAL_CHAN_INDEX)
++ return;
++
+ ses->chans[chan_index].in_reconnect = false;
+ }
+
+@@ -112,6 +121,9 @@ cifs_chan_in_reconnect(struct cifs_ses *ses,
+ {
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
++ if (chan_index == CIFS_INVAL_CHAN_INDEX)
++ return true; /* err on the safer side */
++
+ return CIFS_CHAN_IN_RECONNECT(ses, chan_index);
+ }
+
+@@ -121,6 +133,9 @@ cifs_chan_set_need_reconnect(struct cifs_ses *ses,
+ {
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
++ if (chan_index == CIFS_INVAL_CHAN_INDEX)
++ return;
++
+ set_bit(chan_index, &ses->chans_need_reconnect);
+ cifs_dbg(FYI, "Set reconnect bitmask for chan %u; now 0x%lx\n",
+ chan_index, ses->chans_need_reconnect);
+@@ -132,6 +147,9 @@ cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
+ {
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
++ if (chan_index == CIFS_INVAL_CHAN_INDEX)
++ return;
++
+ clear_bit(chan_index, &ses->chans_need_reconnect);
+ cifs_dbg(FYI, "Cleared reconnect bitmask for chan %u; now 0x%lx\n",
+ chan_index, ses->chans_need_reconnect);
+@@ -143,6 +161,9 @@ cifs_chan_needs_reconnect(struct cifs_ses *ses,
+ {
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
++ if (chan_index == CIFS_INVAL_CHAN_INDEX)
++ return true; /* err on the safer side */
++
+ return CIFS_CHAN_NEEDS_RECONNECT(ses, chan_index);
+ }
+
+@@ -152,19 +173,24 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
+ {
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
++ if (chan_index == CIFS_INVAL_CHAN_INDEX)
++ return true; /* err on the safer side */
++
+ return ses->chans[chan_index].iface &&
+ ses->chans[chan_index].iface->is_active;
+ }
+
+ /* returns number of channels added */
+-int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
++int cifs_try_adding_channels(struct cifs_ses *ses)
+ {
+ struct TCP_Server_Info *server = ses->server;
+ int old_chan_count, new_chan_count;
+ int left;
+ int rc = 0;
+ int tries = 0;
++ size_t iface_weight = 0, iface_min_speed = 0;
+ struct cifs_server_iface *iface = NULL, *niface = NULL;
++ struct cifs_server_iface *last_iface = NULL;
+
+ spin_lock(&ses->chan_lock);
+
+@@ -186,28 +212,17 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ }
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+- ses->chan_max = 1;
+ spin_unlock(&ses->chan_lock);
+ cifs_server_dbg(VFS, "no multichannel support\n");
+ return 0;
+ }
+ spin_unlock(&ses->chan_lock);
+
+- /*
+- * Keep connecting to same, fastest, iface for all channels as
+- * long as its RSS. Try next fastest one if not RSS or channel
+- * creation fails.
+- */
+- spin_lock(&ses->iface_lock);
+- iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
+- iface_head);
+- spin_unlock(&ses->iface_lock);
+-
+ while (left > 0) {
+
+ tries++;
+ if (tries > 3*ses->chan_max) {
+- cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
++ cifs_dbg(VFS, "too many channel open attempts (%d channels left to open)\n",
+ left);
+ break;
+ }
+@@ -215,23 +230,41 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ spin_lock(&ses->iface_lock);
+ if (!ses->iface_count) {
+ spin_unlock(&ses->iface_lock);
++ cifs_dbg(ONCE, "server %s does not advertise interfaces\n",
++ ses->server->hostname);
+ break;
+ }
+
++ if (!iface)
++ iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
+ iface_head) {
++ /* do not mix rdma and non-rdma interfaces */
++ if (iface->rdma_capable != ses->server->rdma)
++ continue;
++
+ /* skip ifaces that are unusable */
+ if (!iface->is_active ||
+ (is_ses_using_iface(ses, iface) &&
+- !iface->rss_capable)) {
++ !iface->rss_capable))
++ continue;
++
++ /* check if we already allocated enough channels */
++ iface_weight = iface->speed / iface_min_speed;
++
++ if (iface->weight_fulfilled >= iface_weight)
+ continue;
+- }
+
+ /* take ref before unlock */
+ kref_get(&iface->refcount);
+
+ spin_unlock(&ses->iface_lock);
+- rc = cifs_ses_add_channel(cifs_sb, ses, iface);
++ rc = cifs_ses_add_channel(ses, iface);
+ spin_lock(&ses->iface_lock);
+
+ if (rc) {
+@@ -239,13 +272,26 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ &iface->sockaddr,
+ rc);
+ kref_put(&iface->refcount, release_iface);
++ /* failure to add chan should increase weight */
++ iface->weight_fulfilled++;
+ continue;
+ }
+
+- cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
++ iface->num_channels++;
++ iface->weight_fulfilled++;
++ cifs_dbg(VFS, "successfully opened new channel on iface:%pIS\n",
+ &iface->sockaddr);
+ break;
+ }
++
++ /* reached end of list. reset weight_fulfilled and start over */
++ if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
++ list_for_each_entry(iface, &ses->iface_list, iface_head)
++ iface->weight_fulfilled = 0;
++ spin_unlock(&ses->iface_lock);
++ iface = NULL;
++ continue;
++ }
+ spin_unlock(&ses->iface_lock);
+
+ left--;
+@@ -255,83 +301,190 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ return new_chan_count - old_chan_count;
+ }
+
++/*
++ * called when multichannel is disabled by the server.
++ * this always gets called from smb2_reconnect
++ * and cannot get called in parallel threads.
++ */
++void
++cifs_disable_secondary_channels(struct cifs_ses *ses)
++{
++ int i, chan_count;
++ struct TCP_Server_Info *server;
++ struct cifs_server_iface *iface;
++
++ spin_lock(&ses->chan_lock);
++ chan_count = ses->chan_count;
++ if (chan_count == 1)
++ goto done;
++
++ ses->chan_count = 1;
++
++ /* for all secondary channels reset the need reconnect bit */
++ ses->chans_need_reconnect &= 1;
++
++ for (i = 1; i < chan_count; i++) {
++ iface = ses->chans[i].iface;
++ server = ses->chans[i].server;
++
++ /*
++ * remove these references first, since we need to unlock
++ * the chan_lock here, since iface_lock is a higher lock
++ */
++ ses->chans[i].iface = NULL;
++ ses->chans[i].server = NULL;
++ spin_unlock(&ses->chan_lock);
++
++ if (iface) {
++ spin_lock(&ses->iface_lock);
++ iface->num_channels--;
++ if (iface->weight_fulfilled)
++ iface->weight_fulfilled--;
++ kref_put(&iface->refcount, release_iface);
++ spin_unlock(&ses->iface_lock);
++ }
++
++ if (server) {
++ if (!server->terminate) {
++ server->terminate = true;
++ cifs_signal_cifsd_for_reconnect(server, false);
++ }
++ cifs_put_tcp_session(server, false);
++ }
++
++ spin_lock(&ses->chan_lock);
++ }
++
++done:
++ spin_unlock(&ses->chan_lock);
++}
++
+ /*
+ * update the iface for the channel if necessary.
+- * will return 0 when iface is updated, 1 if removed, 2 otherwise
+ * Must be called with chan_lock held.
+ */
+-int
++void
+ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ {
+ unsigned int chan_index;
++ size_t iface_weight = 0, iface_min_speed = 0;
+ struct cifs_server_iface *iface = NULL;
+ struct cifs_server_iface *old_iface = NULL;
+- int rc = 0;
++ struct cifs_server_iface *last_iface = NULL;
++ struct sockaddr_storage ss;
+
+ spin_lock(&ses->chan_lock);
+ chan_index = cifs_ses_get_chan_index(ses, server);
+- if (!chan_index) {
++ if (chan_index == CIFS_INVAL_CHAN_INDEX) {
+ spin_unlock(&ses->chan_lock);
+- return 0;
++ return;
+ }
+
+ if (ses->chans[chan_index].iface) {
+ old_iface = ses->chans[chan_index].iface;
+ if (old_iface->is_active) {
+ spin_unlock(&ses->chan_lock);
+- return 1;
++ return;
+ }
+ }
+ spin_unlock(&ses->chan_lock);
+
++ spin_lock(&server->srv_lock);
++ ss = server->dstaddr;
++ spin_unlock(&server->srv_lock);
++
+ spin_lock(&ses->iface_lock);
++ if (!ses->iface_count) {
++ spin_unlock(&ses->iface_lock);
++ cifs_dbg(ONCE, "server %s does not advertise interfaces\n", ses->server->hostname);
++ return;
++ }
++
++ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
++ iface_head);
++ iface_min_speed = last_iface->speed;
++
+ /* then look for a new one */
+ list_for_each_entry(iface, &ses->iface_list, iface_head) {
++ if (!chan_index) {
++ /* if we're trying to get the updated iface for primary channel */
++ if (!cifs_match_ipaddr((struct sockaddr *) &ss,
++ (struct sockaddr *) &iface->sockaddr))
++ continue;
++
++ kref_get(&iface->refcount);
++ break;
++ }
++
++ /* do not mix rdma and non-rdma interfaces */
++ if (iface->rdma_capable != server->rdma)
++ continue;
++
+ if (!iface->is_active ||
+ (is_ses_using_iface(ses, iface) &&
+ !iface->rss_capable)) {
+ continue;
+ }
++
++ /* check if we already allocated enough channels */
++ iface_weight = iface->speed / iface_min_speed;
++
++ if (iface->weight_fulfilled >= iface_weight)
++ continue;
++
+ kref_get(&iface->refcount);
+ break;
+ }
+
+ if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
+- rc = 1;
+ iface = NULL;
+ cifs_dbg(FYI, "unable to find a suitable iface\n");
+ }
+
++ if (!iface) {
++ if (!chan_index)
++ cifs_dbg(FYI, "unable to get the interface matching: %pIS\n",
++ &ss);
++ else {
++ cifs_dbg(FYI, "unable to find another interface to replace: %pIS\n",
++ &old_iface->sockaddr);
++ }
++
++ spin_unlock(&ses->iface_lock);
++ return;
++ }
++
+ /* now drop the ref to the current iface */
+- if (old_iface && iface) {
++ if (old_iface) {
+ cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
+ &old_iface->sockaddr,
+ &iface->sockaddr);
++
++ old_iface->num_channels--;
++ if (old_iface->weight_fulfilled)
++ old_iface->weight_fulfilled--;
++ iface->num_channels++;
++ iface->weight_fulfilled++;
++
+ kref_put(&old_iface->refcount, release_iface);
+- } else if (old_iface) {
+- cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
+- &old_iface->sockaddr);
+- kref_put(&old_iface->refcount, release_iface);
+- } else {
+- WARN_ON(!iface);
+- cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
++ } else if (!chan_index) {
++ /* special case: update interface for primary channel */
++ cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
++ &iface->sockaddr);
++ iface->num_channels++;
++ iface->weight_fulfilled++;
+ }
+ spin_unlock(&ses->iface_lock);
+
+ spin_lock(&ses->chan_lock);
+ chan_index = cifs_ses_get_chan_index(ses, server);
+- ses->chans[chan_index].iface = iface;
+-
+- /* No iface is found. if secondary chan, drop connection */
+- if (!iface && SERVER_IS_CHAN(server))
+- ses->chans[chan_index].server = NULL;
++ if (chan_index == CIFS_INVAL_CHAN_INDEX) {
++ spin_unlock(&ses->chan_lock);
++ return;
++ }
+
++ ses->chans[chan_index].iface = iface;
+ spin_unlock(&ses->chan_lock);
+-
+- if (!iface && SERVER_IS_CHAN(server))
+- cifs_put_tcp_session(server, false);
+-
+- return rc;
+ }
+
+ /*
+@@ -355,7 +508,7 @@ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
+ }
+
+ static int
+-cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
++cifs_ses_add_channel(struct cifs_ses *ses,
+ struct cifs_server_iface *iface)
+ {
+ struct TCP_Server_Info *chan_server;
+@@ -434,7 +587,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+ * This will be used for encoding/decoding user/domain/pw
+ * during sess setup auth.
+ */
+- ctx->local_nls = cifs_sb->local_nls;
++ ctx->local_nls = ses->local_nls;
+
+ /* Use RDMA if possible */
+ ctx->rdma = iface->rdma_capable;
+@@ -480,20 +633,16 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+
+ rc = cifs_negotiate_protocol(xid, ses, chan->server);
+ if (!rc)
+- rc = cifs_setup_session(xid, ses, chan->server, cifs_sb->local_nls);
++ rc = cifs_setup_session(xid, ses, chan->server, ses->local_nls);
+
+ mutex_unlock(&ses->session_mutex);
+
+ out:
+ if (rc && chan->server) {
+- /*
+- * we should avoid race with these delayed works before we
+- * remove this channel
+- */
+- cancel_delayed_work_sync(&chan->server->echo);
+- cancel_delayed_work_sync(&chan->server->reconnect);
++ cifs_put_tcp_session(chan->server, 0);
+
+ spin_lock(&ses->chan_lock);
++
+ /* we rely on all bits beyond chan_count to be clear */
+ cifs_chan_clear_need_reconnect(ses, chan->server);
+ ses->chan_count--;
+@@ -503,8 +652,6 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+ */
+ WARN_ON(ses->chan_count < 1);
+ spin_unlock(&ses->chan_lock);
+-
+- cifs_put_tcp_session(chan->server, 0);
+ }
+
+ kfree(ctx->UNC);
+@@ -536,8 +683,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
+
+ /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
+
+- /* BB verify whether signing required on neg or just on auth frame
+- (and NTLM case) */
++ /* BB verify whether signing required on neg or just auth frame (and NTLM case) */
+
+ capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+ CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
+@@ -594,8 +740,10 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
+
+ /* copy domain */
+ if (ses->domainName == NULL) {
+- /* Sending null domain better than using a bogus domain name (as
+- we did briefly in 2.6.18) since server will use its default */
++ /*
++ * Sending null domain better than using a bogus domain name (as
++ * we did briefly in 2.6.18) since server will use its default
++ */
+ *bcc_ptr = 0;
+ *(bcc_ptr+1) = 0;
+ bytes_ret = 0;
+@@ -614,8 +762,7 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+ char *bcc_ptr = *pbcc_area;
+ int bytes_ret = 0;
+
+- /* BB FIXME add check that strings total less
+- than 335 or will need to send them as arrays */
++ /* BB FIXME add check that strings less than 335 or will need to send as arrays */
+
+ /* copy user */
+ if (ses->user_name == NULL) {
+@@ -660,8 +807,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_DOMAINNAME_LEN - 1;
+ bcc_ptr += len;
+- } /* else we will send a null domain name
+- so the server will default to its own domain */
++ } /* else we send a null domain name so server will default to its own domain */
+ *bcc_ptr = 0;
+ bcc_ptr++;
+
+@@ -757,11 +903,14 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
+ if (len > bleft)
+ return;
+
+- /* No domain field in LANMAN case. Domain is
+- returned by old servers in the SMB negprot response */
+- /* BB For newer servers which do not support Unicode,
+- but thus do return domain here we could add parsing
+- for it later, but it is not very important */
++ /*
++ * No domain field in LANMAN case. Domain is
++ * returned by old servers in the SMB negprot response
++ *
++ * BB For newer servers which do not support Unicode,
++ * but thus do return domain here, we could add parsing
++ * for it later, but it is not very important
++ */
+ cifs_dbg(FYI, "ascii: bytes left %d\n", bleft);
+ }
+ #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+@@ -817,9 +966,12 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
+ ses->ntlmssp->server_flags = server_flags;
+
+ memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
+- /* In particular we can examine sign flags */
+- /* BB spec says that if AvId field of MsvAvTimestamp is populated then
+- we must set the MIC field of the AUTHENTICATE_MESSAGE */
++ /*
++ * In particular we can examine sign flags
++ *
++ * BB spec says that if AvId field of MsvAvTimestamp is populated then
++ * we must set the MIC field of the AUTHENTICATE_MESSAGE
++ */
+
+ tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
+ tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
+@@ -1060,10 +1212,16 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
+ memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
+ sec_blob->MessageType = NtLmAuthenticate;
+
++ /* send version information in ntlmssp authenticate also */
+ flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
+- NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
+- /* we only send version information in ntlmssp negotiate, so do not set this flag */
+- flags = flags & ~NTLMSSP_NEGOTIATE_VERSION;
++ NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_VERSION |
++ NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
++
++ sec_blob->Version.ProductMajorVersion = LINUX_VERSION_MAJOR;
++ sec_blob->Version.ProductMinorVersion = LINUX_VERSION_PATCHLEVEL;
++ sec_blob->Version.ProductBuild = cpu_to_le16(SMB3_PRODUCT_BUILD);
++ sec_blob->Version.NTLMRevisionCurrent = NTLMSSP_REVISION_W2K3;
++
+ tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+ sec_blob->NegotiateFlags = cpu_to_le32(flags);
+
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index 9bf8735cdd1e8f..e3a195824b4037 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -753,11 +753,11 @@ cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
+ cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
+ }
+
+-static void
++static int
+ cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_fid *fid)
+ {
+- CIFSSMBClose(xid, tcon, fid->netfid);
++ return CIFSSMBClose(xid, tcon, fid->netfid);
+ }
+
+ static int
+@@ -909,7 +909,7 @@ cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+
+ static int
+ cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+- struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
++ const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
+ {
+ int rc = -EOPNOTSUPP;
+
+@@ -976,64 +976,37 @@ static int cifs_query_symlink(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ const char *full_path,
+- char **target_path,
+- struct kvec *rsp_iov)
++ char **target_path)
+ {
+ int rc;
+- int oplock = 0;
+- bool is_reparse_point = !!rsp_iov;
+- struct cifs_fid fid;
+- struct cifs_open_parms oparms;
+
+- cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
++ cifs_tcon_dbg(FYI, "%s: path=%s\n", __func__, full_path);
+
+- if (is_reparse_point) {
+- cifs_dbg(VFS, "reparse points not handled for SMB1 symlinks\n");
++ if (!cap_unix(tcon->ses))
+ return -EOPNOTSUPP;
+- }
+
+- /* Check for unix extensions */
+- if (cap_unix(tcon->ses)) {
+- rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
+- cifs_sb->local_nls,
+- cifs_remap(cifs_sb));
+- if (rc == -EREMOTE)
+- rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
+- target_path,
+- cifs_sb->local_nls);
+-
+- goto out;
+- }
+-
+- oparms = (struct cifs_open_parms) {
+- .tcon = tcon,
+- .cifs_sb = cifs_sb,
+- .desired_access = FILE_READ_ATTRIBUTES,
+- .create_options = cifs_create_options(cifs_sb,
+- OPEN_REPARSE_POINT),
+- .disposition = FILE_OPEN,
+- .path = full_path,
+- .fid = &fid,
+- };
+-
+- rc = CIFS_open(xid, &oparms, &oplock, NULL);
+- if (rc)
+- goto out;
+-
+- rc = CIFSSMBQuerySymLink(xid, tcon, fid.netfid, target_path,
+- cifs_sb->local_nls);
+- if (rc)
+- goto out_close;
+-
+- convert_delimiter(*target_path, '/');
+-out_close:
+- CIFSSMBClose(xid, tcon, fid.netfid);
+-out:
+- if (!rc)
+- cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
++ rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
++ cifs_sb->local_nls, cifs_remap(cifs_sb));
++ if (rc == -EREMOTE)
++ rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
++ target_path, cifs_sb->local_nls);
+ return rc;
+ }
+
++static int cifs_parse_reparse_point(struct cifs_sb_info *cifs_sb,
++ struct kvec *rsp_iov,
++ struct cifs_open_info_data *data)
++{
++ struct reparse_data_buffer *buf;
++ TRANSACT_IOCTL_RSP *io = rsp_iov->iov_base;
++ bool unicode = !!(io->hdr.Flags2 & SMBFLG2_UNICODE);
++ u32 plen = le16_to_cpu(io->ByteCount);
++
++ buf = (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
++ le32_to_cpu(io->DataOffset));
++ return parse_reparse_point(buf, plen, cifs_sb, unicode, data);
++}
++
+ static bool
+ cifs_is_read_op(__u32 oplock)
+ {
+@@ -1068,15 +1041,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ {
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct inode *newinode = NULL;
+- int rc = -EPERM;
+- struct cifs_open_info_data buf = {};
+- struct cifs_io_parms io_parms;
+- __u32 oplock = 0;
+- struct cifs_fid fid;
+- struct cifs_open_parms oparms;
+- unsigned int bytes_written;
+- struct win_dev *pdev;
+- struct kvec iov[2];
++ int rc;
+
+ if (tcon->unix_ext) {
+ /*
+@@ -1110,74 +1075,18 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ d_instantiate(dentry, newinode);
+ return rc;
+ }
+-
+ /*
+- * SMB1 SFU emulation: should work with all servers, but only
+- * support block and char device (no socket & fifo)
++ * Check if mounted with mount parm 'sfu' mount parm.
++ * SFU emulation should work with all servers, but only
++ * supports block and char device (no socket & fifo),
++ * and was used by default in earlier versions of Windows
+ */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+- return rc;
+-
+- if (!S_ISCHR(mode) && !S_ISBLK(mode))
+- return rc;
+-
+- cifs_dbg(FYI, "sfu compat create special file\n");
+-
+- oparms = (struct cifs_open_parms) {
+- .tcon = tcon,
+- .cifs_sb = cifs_sb,
+- .desired_access = GENERIC_WRITE,
+- .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+- CREATE_OPTION_SPECIAL),
+- .disposition = FILE_CREATE,
+- .path = full_path,
+- .fid = &fid,
+- };
+-
+- if (tcon->ses->server->oplocks)
+- oplock = REQ_OPLOCK;
+- else
+- oplock = 0;
+- rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
+- if (rc)
+- return rc;
+-
+- /*
+- * BB Do not bother to decode buf since no local inode yet to put
+- * timestamps in, but we can reuse it safely.
+- */
+-
+- pdev = (struct win_dev *)&buf.fi;
+- io_parms.pid = current->tgid;
+- io_parms.tcon = tcon;
+- io_parms.offset = 0;
+- io_parms.length = sizeof(struct win_dev);
+- iov[1].iov_base = &buf.fi;
+- iov[1].iov_len = sizeof(struct win_dev);
+- if (S_ISCHR(mode)) {
+- memcpy(pdev->type, "IntxCHR", 8);
+- pdev->major = cpu_to_le64(MAJOR(dev));
+- pdev->minor = cpu_to_le64(MINOR(dev));
+- rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+- &bytes_written, iov, 1);
+- } else if (S_ISBLK(mode)) {
+- memcpy(pdev->type, "IntxBLK", 8);
+- pdev->major = cpu_to_le64(MAJOR(dev));
+- pdev->minor = cpu_to_le64(MINOR(dev));
+- rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+- &bytes_written, iov, 1);
+- }
+- tcon->ses->server->ops->close(xid, tcon, &fid);
+- d_drop(dentry);
+-
+- /* FIXME: add code here to set EAs */
+-
+- cifs_free_open_info(&buf);
+- return rc;
++ return -EPERM;
++ return cifs_sfu_make_node(xid, inode, dentry, tcon,
++ full_path, mode, dev);
+ }
+
+-
+-
+ struct smb_version_operations smb1_operations = {
+ .send_cancel = send_nt_cancel,
+ .compare_fids = cifs_compare_fids,
+@@ -1214,6 +1123,7 @@ struct smb_version_operations smb1_operations = {
+ .is_path_accessible = cifs_is_path_accessible,
+ .can_echo = cifs_can_echo,
+ .query_path_info = cifs_query_path_info,
++ .query_reparse_point = cifs_query_reparse_point,
+ .query_file_info = cifs_query_file_info,
+ .get_srv_inum = cifs_get_srv_inum,
+ .set_path_size = CIFSSMBSetEOF,
+@@ -1229,6 +1139,7 @@ struct smb_version_operations smb1_operations = {
+ .rename = CIFSSMBRename,
+ .create_hardlink = CIFSCreateHardLink,
+ .query_symlink = cifs_query_symlink,
++ .parse_reparse_point = cifs_parse_reparse_point,
+ .open = cifs_open_file,
+ .set_fid = cifs_set_fid,
+ .close = cifs_close_file,
+diff --git a/fs/smb/client/smb2glob.h b/fs/smb/client/smb2glob.h
+index 82e916ad167c00..2466e61551369c 100644
+--- a/fs/smb/client/smb2glob.h
++++ b/fs/smb/client/smb2glob.h
+@@ -23,17 +23,22 @@
+ * Identifiers for functions that use the open, operation, close pattern
+ * in smb2inode.c:smb2_compound_op()
+ */
+-#define SMB2_OP_SET_DELETE 1
+-#define SMB2_OP_SET_INFO 2
+-#define SMB2_OP_QUERY_INFO 3
+-#define SMB2_OP_QUERY_DIR 4
+-#define SMB2_OP_MKDIR 5
+-#define SMB2_OP_RENAME 6
+-#define SMB2_OP_DELETE 7
+-#define SMB2_OP_HARDLINK 8
+-#define SMB2_OP_SET_EOF 9
+-#define SMB2_OP_RMDIR 10
+-#define SMB2_OP_POSIX_QUERY_INFO 11
++enum smb2_compound_ops {
++ SMB2_OP_SET_DELETE = 1,
++ SMB2_OP_SET_INFO,
++ SMB2_OP_QUERY_INFO,
++ SMB2_OP_QUERY_DIR,
++ SMB2_OP_MKDIR,
++ SMB2_OP_RENAME,
++ SMB2_OP_DELETE,
++ SMB2_OP_HARDLINK,
++ SMB2_OP_SET_EOF,
++ SMB2_OP_RMDIR,
++ SMB2_OP_POSIX_QUERY_INFO,
++ SMB2_OP_SET_REPARSE,
++ SMB2_OP_GET_REPARSE,
++ SMB2_OP_QUERY_WSL_EA,
++};
+
+ /* Used when constructing chained read requests. */
+ #define CHAINED_REQUEST 1
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 0b89f7008ac0f4..8010b3ed4b3fe4 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -26,13 +26,139 @@
+ #include "cached_dir.h"
+ #include "smb2status.h"
+
+-static void
+-free_set_inf_compound(struct smb_rqst *rqst)
++static inline __u32 file_create_options(struct dentry *dentry)
+ {
+- if (rqst[1].rq_iov)
+- SMB2_set_info_free(&rqst[1]);
+- if (rqst[2].rq_iov)
+- SMB2_close_free(&rqst[2]);
++ struct cifsInodeInfo *ci;
++
++ if (dentry) {
++ ci = CIFS_I(d_inode(dentry));
++ if (ci->cifsAttrs & ATTR_REPARSE)
++ return OPEN_REPARSE_POINT;
++ }
++ return 0;
++}
++
++static struct reparse_data_buffer *reparse_buf_ptr(struct kvec *iov)
++{
++ struct reparse_data_buffer *buf;
++ struct smb2_ioctl_rsp *io = iov->iov_base;
++ u32 off, count, len;
++
++ count = le32_to_cpu(io->OutputCount);
++ off = le32_to_cpu(io->OutputOffset);
++ if (check_add_overflow(off, count, &len) || len > iov->iov_len)
++ return ERR_PTR(-EIO);
++
++ buf = (struct reparse_data_buffer *)((u8 *)io + off);
++ len = sizeof(*buf);
++ if (count < len || count < le16_to_cpu(buf->ReparseDataLength) + len)
++ return ERR_PTR(-EIO);
++ return buf;
++}
++
++/* Parse owner and group from SMB3.1.1 POSIX query info */
++static int parse_posix_sids(struct cifs_open_info_data *data,
++ struct kvec *rsp_iov)
++{
++ struct smb2_query_info_rsp *qi = rsp_iov->iov_base;
++ unsigned int out_len = le32_to_cpu(qi->OutputBufferLength);
++ unsigned int qi_len = sizeof(data->posix_fi);
++ int owner_len, group_len;
++ u8 *sidsbuf, *sidsbuf_end;
++
++ if (out_len <= qi_len)
++ return -EINVAL;
++
++ sidsbuf = (u8 *)qi + le16_to_cpu(qi->OutputBufferOffset) + qi_len;
++ sidsbuf_end = sidsbuf + out_len - qi_len;
++
++ owner_len = posix_info_sid_size(sidsbuf, sidsbuf_end);
++ if (owner_len == -1)
++ return -EINVAL;
++
++ memcpy(&data->posix_owner, sidsbuf, owner_len);
++ group_len = posix_info_sid_size(sidsbuf + owner_len, sidsbuf_end);
++ if (group_len == -1)
++ return -EINVAL;
++
++ memcpy(&data->posix_group, sidsbuf + owner_len, group_len);
++ return 0;
++}
++
++struct wsl_query_ea {
++ __le32 next;
++ __u8 name_len;
++ __u8 name[SMB2_WSL_XATTR_NAME_LEN + 1];
++} __packed;
++
++#define NEXT_OFF cpu_to_le32(sizeof(struct wsl_query_ea))
++
++static const struct wsl_query_ea wsl_query_eas[] = {
++ { .next = NEXT_OFF, .name_len = SMB2_WSL_XATTR_NAME_LEN, .name = SMB2_WSL_XATTR_UID, },
++ { .next = NEXT_OFF, .name_len = SMB2_WSL_XATTR_NAME_LEN, .name = SMB2_WSL_XATTR_GID, },
++ { .next = NEXT_OFF, .name_len = SMB2_WSL_XATTR_NAME_LEN, .name = SMB2_WSL_XATTR_MODE, },
++ { .next = 0, .name_len = SMB2_WSL_XATTR_NAME_LEN, .name = SMB2_WSL_XATTR_DEV, },
++};
++
++static int check_wsl_eas(struct kvec *rsp_iov)
++{
++ struct smb2_file_full_ea_info *ea;
++ struct smb2_query_info_rsp *rsp = rsp_iov->iov_base;
++ unsigned long addr;
++ u32 outlen, next;
++ u16 vlen;
++ u8 nlen;
++ u8 *end;
++
++ outlen = le32_to_cpu(rsp->OutputBufferLength);
++ if (outlen < SMB2_WSL_MIN_QUERY_EA_RESP_SIZE ||
++ outlen > SMB2_WSL_MAX_QUERY_EA_RESP_SIZE)
++ return -EINVAL;
++
++ ea = (void *)((u8 *)rsp_iov->iov_base +
++ le16_to_cpu(rsp->OutputBufferOffset));
++ end = (u8 *)rsp_iov->iov_base + rsp_iov->iov_len;
++ for (;;) {
++ if ((u8 *)ea > end - sizeof(*ea))
++ return -EINVAL;
++
++ nlen = ea->ea_name_length;
++ vlen = le16_to_cpu(ea->ea_value_length);
++ if (nlen != SMB2_WSL_XATTR_NAME_LEN ||
++ (u8 *)ea + nlen + 1 + vlen > end)
++ return -EINVAL;
++
++ switch (vlen) {
++ case 4:
++ if (strncmp(ea->ea_data, SMB2_WSL_XATTR_UID, nlen) &&
++ strncmp(ea->ea_data, SMB2_WSL_XATTR_GID, nlen) &&
++ strncmp(ea->ea_data, SMB2_WSL_XATTR_MODE, nlen))
++ return -EINVAL;
++ break;
++ case 8:
++ if (strncmp(ea->ea_data, SMB2_WSL_XATTR_DEV, nlen))
++ return -EINVAL;
++ break;
++ case 0:
++ if (!strncmp(ea->ea_data, SMB2_WSL_XATTR_UID, nlen) ||
++ !strncmp(ea->ea_data, SMB2_WSL_XATTR_GID, nlen) ||
++ !strncmp(ea->ea_data, SMB2_WSL_XATTR_MODE, nlen) ||
++ !strncmp(ea->ea_data, SMB2_WSL_XATTR_DEV, nlen))
++ break;
++ fallthrough;
++ default:
++ return -EINVAL;
++ }
++
++ next = le32_to_cpu(ea->next_entry_offset);
++ if (!next)
++ break;
++ if (!IS_ALIGNED(next, 4) ||
++ check_add_overflow((unsigned long)ea, next, &addr))
++ return -EINVAL;
++ ea = (void *)addr;
++ }
++ return 0;
+ }
+
+ /*
+@@ -45,13 +171,14 @@ free_set_inf_compound(struct smb_rqst *rqst)
+ */
+ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+- __u32 desired_access, __u32 create_disposition, __u32 create_options,
+- umode_t mode, void *ptr, int command, struct cifsFileInfo *cfile,
+- __u8 **extbuf, size_t *extbuflen,
+- struct kvec *out_iov, int *out_buftype)
++ struct cifs_open_parms *oparms, struct kvec *in_iov,
++ int *cmds, int num_cmds, struct cifsFileInfo *cfile,
++ struct kvec *out_iov, int *out_buftype, struct dentry *dentry)
+ {
++
++ struct reparse_data_buffer *rbuf;
+ struct smb2_compound_vars *vars = NULL;
+- struct kvec *rsp_iov;
++ struct kvec *rsp_iov, *iov;
+ struct smb_rqst *rqst;
+ int rc;
+ __le16 *utf16_path = NULL;
+@@ -59,15 +186,24 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_fid fid;
+ struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server;
+- int num_rqst = 0;
+- int resp_buftype[3];
++ int num_rqst = 0, i;
++ int resp_buftype[MAX_COMPOUND];
+ struct smb2_query_info_rsp *qi_rsp = NULL;
+ struct cifs_open_info_data *idata;
++ struct inode *inode = NULL;
+ int flags = 0;
+ __u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
+ unsigned int size[2];
+ void *data[2];
+- int len;
++ unsigned int len;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ oplock = SMB2_OPLOCK_LEVEL_NONE;
++ num_rqst = 0;
++ server = cifs_pick_channel(ses);
+
+ vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+ if (vars == NULL)
+@@ -75,12 +211,11 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ rqst = &vars->rqst[0];
+ rsp_iov = &vars->rsp_iov[0];
+
+- server = cifs_pick_channel(ses);
+-
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+- resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
++ for (i = 0; i < ARRAY_SIZE(resp_buftype); i++)
++ resp_buftype[i] = CIFS_NO_BUFFER;
+
+ /* We already have a handle so we can skip the open */
+ if (cfile)
+@@ -93,16 +228,28 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ goto finished;
+ }
+
+- vars->oparms = (struct cifs_open_parms) {
+- .tcon = tcon,
+- .path = full_path,
+- .desired_access = desired_access,
+- .disposition = create_disposition,
+- .create_options = cifs_create_options(cifs_sb, create_options),
+- .fid = &fid,
+- .mode = mode,
+- .cifs_sb = cifs_sb,
+- };
++ /* if there is an existing lease, reuse it */
++
++ /*
++ * note: files with hardlinks cause unexpected behaviour. As per MS-SMB2,
++ * lease keys are associated with the filepath. We are maintaining lease keys
++ * with the inode on the client. If the file has hardlinks, it is possible
++ * that the lease for a file be reused for an operation on its hardlink or
++ * vice versa.
++ * As a workaround, send request using an existing lease key and if the server
++ * returns STATUS_INVALID_PARAMETER, which maps to EINVAL, send the request
++ * again without the lease.
++ */
++ if (dentry) {
++ inode = d_inode(dentry);
++ if (CIFS_I(inode)->lease_granted && server->ops->get_lease_key) {
++ oplock = SMB2_OPLOCK_LEVEL_LEASE;
++ server->ops->get_lease_key(inode, &fid);
++ }
++ }
++
++ vars->oparms = *oparms;
++ vars->oparms.fid = &fid;
+
+ rqst[num_rqst].rq_iov = &vars->open_iov[0];
+ rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
+@@ -118,242 +265,330 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ num_rqst++;
+ rc = 0;
+
+- /* Operation */
+- switch (command) {
+- case SMB2_OP_QUERY_INFO:
+- rqst[num_rqst].rq_iov = &vars->qi_iov;
+- rqst[num_rqst].rq_nvec = 1;
+-
+- if (cfile)
+- rc = SMB2_query_info_init(tcon, server,
+- &rqst[num_rqst],
+- cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid,
+- FILE_ALL_INFORMATION,
+- SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb2_file_all_info) +
+- PATH_MAX * 2, 0, NULL);
+- else {
+- rc = SMB2_query_info_init(tcon, server,
+- &rqst[num_rqst],
+- COMPOUND_FID,
+- COMPOUND_FID,
+- FILE_ALL_INFORMATION,
+- SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb2_file_all_info) +
+- PATH_MAX * 2, 0, NULL);
+- if (!rc) {
++ for (i = 0; i < num_cmds; i++) {
++ /* Operation */
++ switch (cmds[i]) {
++ case SMB2_OP_QUERY_INFO:
++ rqst[num_rqst].rq_iov = &vars->qi_iov;
++ rqst[num_rqst].rq_nvec = 1;
++
++ if (cfile) {
++ rc = SMB2_query_info_init(tcon, server,
++ &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid,
++ FILE_ALL_INFORMATION,
++ SMB2_O_INFO_FILE, 0,
++ sizeof(struct smb2_file_all_info) +
++ PATH_MAX * 2, 0, NULL);
++ } else {
++ rc = SMB2_query_info_init(tcon, server,
++ &rqst[num_rqst],
++ COMPOUND_FID,
++ COMPOUND_FID,
++ FILE_ALL_INFORMATION,
++ SMB2_O_INFO_FILE, 0,
++ sizeof(struct smb2_file_all_info) +
++ PATH_MAX * 2, 0, NULL);
++ }
++ if (!rc && (!cfile || num_rqst > 1)) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
++ } else if (rc) {
++ goto finished;
+ }
+- }
++ num_rqst++;
++ trace_smb3_query_info_compound_enter(xid, ses->Suid,
++ tcon->tid, full_path);
++ break;
++ case SMB2_OP_POSIX_QUERY_INFO:
++ rqst[num_rqst].rq_iov = &vars->qi_iov;
++ rqst[num_rqst].rq_nvec = 1;
+
+- if (rc)
+- goto finished;
+- num_rqst++;
+- trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
+- full_path);
+- break;
+- case SMB2_OP_POSIX_QUERY_INFO:
+- rqst[num_rqst].rq_iov = &vars->qi_iov;
+- rqst[num_rqst].rq_nvec = 1;
+-
+- if (cfile)
+- rc = SMB2_query_info_init(tcon, server,
+- &rqst[num_rqst],
+- cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid,
+- SMB_FIND_FILE_POSIX_INFO,
+- SMB2_O_INFO_FILE, 0,
++ if (cfile) {
+ /* TBD: fix following to allow for longer SIDs */
+- sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) +
+- (sizeof(struct cifs_sid) * 2), 0, NULL);
+- else {
+- rc = SMB2_query_info_init(tcon, server,
+- &rqst[num_rqst],
+- COMPOUND_FID,
+- COMPOUND_FID,
+- SMB_FIND_FILE_POSIX_INFO,
+- SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) + (PATH_MAX * 2) +
+- (sizeof(struct cifs_sid) * 2), 0, NULL);
+- if (!rc) {
++ rc = SMB2_query_info_init(tcon, server,
++ &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid,
++ SMB_FIND_FILE_POSIX_INFO,
++ SMB2_O_INFO_FILE, 0,
++ sizeof(struct smb311_posix_qinfo *) +
++ (PATH_MAX * 2) +
++ (sizeof(struct cifs_sid) * 2), 0, NULL);
++ } else {
++ rc = SMB2_query_info_init(tcon, server,
++ &rqst[num_rqst],
++ COMPOUND_FID,
++ COMPOUND_FID,
++ SMB_FIND_FILE_POSIX_INFO,
++ SMB2_O_INFO_FILE, 0,
++ sizeof(struct smb311_posix_qinfo *) +
++ (PATH_MAX * 2) +
++ (sizeof(struct cifs_sid) * 2), 0, NULL);
++ }
++ if (!rc && (!cfile || num_rqst > 1)) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
++ } else if (rc) {
++ goto finished;
+ }
+- }
+-
+- if (rc)
+- goto finished;
+- num_rqst++;
+- trace_smb3_posix_query_info_compound_enter(xid, ses->Suid, tcon->tid, full_path);
+- break;
+- case SMB2_OP_DELETE:
+- trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
+- break;
+- case SMB2_OP_MKDIR:
+- /*
+- * Directories are created through parameters in the
+- * SMB2_open() call.
+- */
+- trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
+- break;
+- case SMB2_OP_RMDIR:
+- rqst[num_rqst].rq_iov = &vars->si_iov[0];
+- rqst[num_rqst].rq_nvec = 1;
+-
+- size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
+- data[0] = &delete_pending[0];
+-
+- rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst], COMPOUND_FID,
+- COMPOUND_FID, current->tgid,
+- FILE_DISPOSITION_INFORMATION,
+- SMB2_O_INFO_FILE, 0, data, size);
+- if (rc)
+- goto finished;
+- smb2_set_next_command(tcon, &rqst[num_rqst]);
+- smb2_set_related(&rqst[num_rqst++]);
+- trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
+- break;
+- case SMB2_OP_SET_EOF:
+- rqst[num_rqst].rq_iov = &vars->si_iov[0];
+- rqst[num_rqst].rq_nvec = 1;
++ num_rqst++;
++ trace_smb3_posix_query_info_compound_enter(xid, ses->Suid,
++ tcon->tid, full_path);
++ break;
++ case SMB2_OP_DELETE:
++ trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
++ break;
++ case SMB2_OP_MKDIR:
++ /*
++ * Directories are created through parameters in the
++ * SMB2_open() call.
++ */
++ trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
++ break;
++ case SMB2_OP_RMDIR:
++ rqst[num_rqst].rq_iov = &vars->si_iov[0];
++ rqst[num_rqst].rq_nvec = 1;
+
+- size[0] = 8; /* sizeof __le64 */
+- data[0] = ptr;
++ size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
++ data[0] = &delete_pending[0];
+
+- if (cfile) {
+ rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst],
+- cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid,
+- current->tgid,
+- FILE_END_OF_FILE_INFORMATION,
+- SMB2_O_INFO_FILE, 0,
+- data, size);
+- } else {
+- rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst],
+- COMPOUND_FID,
+- COMPOUND_FID,
+- current->tgid,
+- FILE_END_OF_FILE_INFORMATION,
+- SMB2_O_INFO_FILE, 0,
+- data, size);
+- if (!rc) {
++ &rqst[num_rqst], COMPOUND_FID,
++ COMPOUND_FID, current->tgid,
++ FILE_DISPOSITION_INFORMATION,
++ SMB2_O_INFO_FILE, 0, data, size);
++ if (rc)
++ goto finished;
++ smb2_set_next_command(tcon, &rqst[num_rqst]);
++ smb2_set_related(&rqst[num_rqst++]);
++ trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
++ break;
++ case SMB2_OP_SET_EOF:
++ rqst[num_rqst].rq_iov = &vars->si_iov[0];
++ rqst[num_rqst].rq_nvec = 1;
++
++ size[0] = in_iov[i].iov_len;
++ data[0] = in_iov[i].iov_base;
++
++ if (cfile) {
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid,
++ current->tgid,
++ FILE_END_OF_FILE_INFORMATION,
++ SMB2_O_INFO_FILE, 0,
++ data, size);
++ } else {
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst],
++ COMPOUND_FID,
++ COMPOUND_FID,
++ current->tgid,
++ FILE_END_OF_FILE_INFORMATION,
++ SMB2_O_INFO_FILE, 0,
++ data, size);
++ }
++ if (!rc && (!cfile || num_rqst > 1)) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
++ } else if (rc) {
++ goto finished;
+ }
+- }
+- if (rc)
+- goto finished;
+- num_rqst++;
+- trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
+- break;
+- case SMB2_OP_SET_INFO:
+- rqst[num_rqst].rq_iov = &vars->si_iov[0];
+- rqst[num_rqst].rq_nvec = 1;
+-
+-
+- size[0] = sizeof(FILE_BASIC_INFO);
+- data[0] = ptr;
+-
+- if (cfile)
+- rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst],
+- cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid, current->tgid,
+- FILE_BASIC_INFORMATION,
+- SMB2_O_INFO_FILE, 0, data, size);
+- else {
+- rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst],
+- COMPOUND_FID,
+- COMPOUND_FID, current->tgid,
+- FILE_BASIC_INFORMATION,
+- SMB2_O_INFO_FILE, 0, data, size);
+- if (!rc) {
++ num_rqst++;
++ trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
++ break;
++ case SMB2_OP_SET_INFO:
++ rqst[num_rqst].rq_iov = &vars->si_iov[0];
++ rqst[num_rqst].rq_nvec = 1;
++
++ size[0] = in_iov[i].iov_len;
++ data[0] = in_iov[i].iov_base;
++
++ if (cfile) {
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid, current->tgid,
++ FILE_BASIC_INFORMATION,
++ SMB2_O_INFO_FILE, 0, data, size);
++ } else {
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst],
++ COMPOUND_FID,
++ COMPOUND_FID, current->tgid,
++ FILE_BASIC_INFORMATION,
++ SMB2_O_INFO_FILE, 0, data, size);
++ }
++ if (!rc && (!cfile || num_rqst > 1)) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
++ } else if (rc) {
++ goto finished;
+ }
+- }
+-
+- if (rc)
+- goto finished;
+- num_rqst++;
+- trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
+- full_path);
+- break;
+- case SMB2_OP_RENAME:
+- rqst[num_rqst].rq_iov = &vars->si_iov[0];
+- rqst[num_rqst].rq_nvec = 2;
++ num_rqst++;
++ trace_smb3_set_info_compound_enter(xid, ses->Suid,
++ tcon->tid, full_path);
++ break;
++ case SMB2_OP_RENAME:
++ rqst[num_rqst].rq_iov = &vars->si_iov[0];
++ rqst[num_rqst].rq_nvec = 2;
+
+- len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
++ len = in_iov[i].iov_len;
+
+- vars->rename_info.ReplaceIfExists = 1;
+- vars->rename_info.RootDirectory = 0;
+- vars->rename_info.FileNameLength = cpu_to_le32(len);
++ vars->rename_info.ReplaceIfExists = 1;
++ vars->rename_info.RootDirectory = 0;
++ vars->rename_info.FileNameLength = cpu_to_le32(len);
+
+- size[0] = sizeof(struct smb2_file_rename_info);
+- data[0] = &vars->rename_info;
++ size[0] = sizeof(struct smb2_file_rename_info);
++ data[0] = &vars->rename_info;
+
+- size[1] = len + 2 /* null */;
+- data[1] = (__le16 *)ptr;
++ size[1] = len + 2 /* null */;
++ data[1] = in_iov[i].iov_base;
+
+- if (cfile)
+- rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst],
+- cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid,
+- current->tgid, FILE_RENAME_INFORMATION,
+- SMB2_O_INFO_FILE, 0, data, size);
+- else {
+- rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst],
+- COMPOUND_FID, COMPOUND_FID,
+- current->tgid, FILE_RENAME_INFORMATION,
+- SMB2_O_INFO_FILE, 0, data, size);
+- if (!rc) {
++ if (cfile) {
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid,
++ current->tgid, FILE_RENAME_INFORMATION,
++ SMB2_O_INFO_FILE, 0, data, size);
++ } else {
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst],
++ COMPOUND_FID, COMPOUND_FID,
++ current->tgid, FILE_RENAME_INFORMATION,
++ SMB2_O_INFO_FILE, 0, data, size);
++ }
++ if (!rc && (!cfile || num_rqst > 1)) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
++ } else if (rc) {
++ goto finished;
+ }
+- }
+- if (rc)
+- goto finished;
+- num_rqst++;
+- trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
+- break;
+- case SMB2_OP_HARDLINK:
+- rqst[num_rqst].rq_iov = &vars->si_iov[0];
+- rqst[num_rqst].rq_nvec = 2;
++ num_rqst++;
++ trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
++ break;
++ case SMB2_OP_HARDLINK:
++ rqst[num_rqst].rq_iov = &vars->si_iov[0];
++ rqst[num_rqst].rq_nvec = 2;
+
+- len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
++ len = in_iov[i].iov_len;
+
+- vars->link_info.ReplaceIfExists = 0;
+- vars->link_info.RootDirectory = 0;
+- vars->link_info.FileNameLength = cpu_to_le32(len);
++ vars->link_info.ReplaceIfExists = 0;
++ vars->link_info.RootDirectory = 0;
++ vars->link_info.FileNameLength = cpu_to_le32(len);
+
+- size[0] = sizeof(struct smb2_file_link_info);
+- data[0] = &vars->link_info;
++ size[0] = sizeof(struct smb2_file_link_info);
++ data[0] = &vars->link_info;
+
+- size[1] = len + 2 /* null */;
+- data[1] = (__le16 *)ptr;
++ size[1] = len + 2 /* null */;
++ data[1] = in_iov[i].iov_base;
+
+- rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst], COMPOUND_FID,
+- COMPOUND_FID, current->tgid,
+- FILE_LINK_INFORMATION,
+- SMB2_O_INFO_FILE, 0, data, size);
+- if (rc)
+- goto finished;
+- smb2_set_next_command(tcon, &rqst[num_rqst]);
+- smb2_set_related(&rqst[num_rqst++]);
+- trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
+- break;
+- default:
+- cifs_dbg(VFS, "Invalid command\n");
+- rc = -EINVAL;
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst], COMPOUND_FID,
++ COMPOUND_FID, current->tgid,
++ FILE_LINK_INFORMATION,
++ SMB2_O_INFO_FILE, 0, data, size);
++ if (rc)
++ goto finished;
++ smb2_set_next_command(tcon, &rqst[num_rqst]);
++ smb2_set_related(&rqst[num_rqst++]);
++ trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
++ break;
++ case SMB2_OP_SET_REPARSE:
++ rqst[num_rqst].rq_iov = vars->io_iov;
++ rqst[num_rqst].rq_nvec = ARRAY_SIZE(vars->io_iov);
++
++ if (cfile) {
++ rc = SMB2_ioctl_init(tcon, server, &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid,
++ FSCTL_SET_REPARSE_POINT,
++ in_iov[i].iov_base,
++ in_iov[i].iov_len, 0);
++ } else {
++ rc = SMB2_ioctl_init(tcon, server, &rqst[num_rqst],
++ COMPOUND_FID, COMPOUND_FID,
++ FSCTL_SET_REPARSE_POINT,
++ in_iov[i].iov_base,
++ in_iov[i].iov_len, 0);
++ }
++ if (!rc && (!cfile || num_rqst > 1)) {
++ smb2_set_next_command(tcon, &rqst[num_rqst]);
++ smb2_set_related(&rqst[num_rqst]);
++ } else if (rc) {
++ goto finished;
++ }
++ num_rqst++;
++ trace_smb3_set_reparse_compound_enter(xid, ses->Suid,
++ tcon->tid, full_path);
++ break;
++ case SMB2_OP_GET_REPARSE:
++ rqst[num_rqst].rq_iov = vars->io_iov;
++ rqst[num_rqst].rq_nvec = ARRAY_SIZE(vars->io_iov);
++
++ if (cfile) {
++ rc = SMB2_ioctl_init(tcon, server, &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid,
++ FSCTL_GET_REPARSE_POINT,
++ NULL, 0, CIFSMaxBufSize);
++ } else {
++ rc = SMB2_ioctl_init(tcon, server, &rqst[num_rqst],
++ COMPOUND_FID, COMPOUND_FID,
++ FSCTL_GET_REPARSE_POINT,
++ NULL, 0, CIFSMaxBufSize);
++ }
++ if (!rc && (!cfile || num_rqst > 1)) {
++ smb2_set_next_command(tcon, &rqst[num_rqst]);
++ smb2_set_related(&rqst[num_rqst]);
++ } else if (rc) {
++ goto finished;
++ }
++ num_rqst++;
++ trace_smb3_get_reparse_compound_enter(xid, ses->Suid,
++ tcon->tid, full_path);
++ break;
++ case SMB2_OP_QUERY_WSL_EA:
++ rqst[num_rqst].rq_iov = &vars->ea_iov;
++ rqst[num_rqst].rq_nvec = 1;
++
++ if (cfile) {
++ rc = SMB2_query_info_init(tcon, server,
++ &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid,
++ FILE_FULL_EA_INFORMATION,
++ SMB2_O_INFO_FILE, 0,
++ SMB2_WSL_MAX_QUERY_EA_RESP_SIZE,
++ sizeof(wsl_query_eas),
++ (void *)wsl_query_eas);
++ } else {
++ rc = SMB2_query_info_init(tcon, server,
++ &rqst[num_rqst],
++ COMPOUND_FID,
++ COMPOUND_FID,
++ FILE_FULL_EA_INFORMATION,
++ SMB2_O_INFO_FILE, 0,
++ SMB2_WSL_MAX_QUERY_EA_RESP_SIZE,
++ sizeof(wsl_query_eas),
++ (void *)wsl_query_eas);
++ }
++ if (!rc && (!cfile || num_rqst > 1)) {
++ smb2_set_next_command(tcon, &rqst[num_rqst]);
++ smb2_set_related(&rqst[num_rqst]);
++ } else if (rc) {
++ goto finished;
++ }
++ num_rqst++;
++ break;
++ default:
++ cifs_dbg(VFS, "Invalid command\n");
++ rc = -EINVAL;
++ }
+ }
+ if (rc)
+ goto finished;
+@@ -375,157 +610,219 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ num_rqst++;
+
+ if (cfile) {
++ if (retries)
++ for (i = 1; i < num_rqst - 2; i++)
++ smb2_set_replay(server, &rqst[i]);
++
+ rc = compound_send_recv(xid, ses, server,
+ flags, num_rqst - 2,
+ &rqst[1], &resp_buftype[1],
+ &rsp_iov[1]);
+- } else
++ } else {
++ if (retries)
++ for (i = 0; i < num_rqst; i++)
++ smb2_set_replay(server, &rqst[i]);
++
+ rc = compound_send_recv(xid, ses, server,
+ flags, num_rqst,
+ rqst, resp_buftype,
+ rsp_iov);
++ }
+
+- finished:
+- SMB2_open_free(&rqst[0]);
++finished:
++ num_rqst = 0;
++ SMB2_open_free(&rqst[num_rqst++]);
+ if (rc == -EREMCHG) {
+ pr_warn_once("server share %s deleted\n", tcon->tree_name);
+ tcon->need_reconnect = true;
+ }
+
+- switch (command) {
+- case SMB2_OP_QUERY_INFO:
+- idata = ptr;
+- if (rc == 0 && cfile && cfile->symlink_target) {
+- idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+- if (!idata->symlink_target)
+- rc = -ENOMEM;
+- }
+- if (rc == 0) {
+- qi_rsp = (struct smb2_query_info_rsp *)
+- rsp_iov[1].iov_base;
+- rc = smb2_validate_and_copy_iov(
+- le16_to_cpu(qi_rsp->OutputBufferOffset),
+- le32_to_cpu(qi_rsp->OutputBufferLength),
+- &rsp_iov[1], sizeof(idata->fi), (char *)&idata->fi);
+- }
+- if (rqst[1].rq_iov)
+- SMB2_query_info_free(&rqst[1]);
+- if (rqst[2].rq_iov)
+- SMB2_close_free(&rqst[2]);
+- if (rc)
+- trace_smb3_query_info_compound_err(xid, ses->Suid,
+- tcon->tid, rc);
+- else
+- trace_smb3_query_info_compound_done(xid, ses->Suid,
+- tcon->tid);
+- break;
+- case SMB2_OP_POSIX_QUERY_INFO:
+- idata = ptr;
+- if (rc == 0 && cfile && cfile->symlink_target) {
+- idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+- if (!idata->symlink_target)
+- rc = -ENOMEM;
+- }
+- if (rc == 0) {
+- qi_rsp = (struct smb2_query_info_rsp *)
+- rsp_iov[1].iov_base;
+- rc = smb2_validate_and_copy_iov(
+- le16_to_cpu(qi_rsp->OutputBufferOffset),
+- le32_to_cpu(qi_rsp->OutputBufferLength),
+- &rsp_iov[1], sizeof(idata->posix_fi) /* add SIDs */,
+- (char *)&idata->posix_fi);
+- }
+- if (rc == 0) {
+- unsigned int length = le32_to_cpu(qi_rsp->OutputBufferLength);
+-
+- if (length > sizeof(idata->posix_fi)) {
+- char *base = (char *)rsp_iov[1].iov_base +
+- le16_to_cpu(qi_rsp->OutputBufferOffset) +
+- sizeof(idata->posix_fi);
+- *extbuflen = length - sizeof(idata->posix_fi);
+- *extbuf = kmemdup(base, *extbuflen, GFP_KERNEL);
+- if (!*extbuf)
++ for (i = 0; i < num_cmds; i++) {
++ switch (cmds[i]) {
++ case SMB2_OP_QUERY_INFO:
++ idata = in_iov[i].iov_base;
++ if (rc == 0 && cfile && cfile->symlink_target) {
++ idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
++ if (!idata->symlink_target)
+ rc = -ENOMEM;
++ }
++ if (rc == 0) {
++ qi_rsp = (struct smb2_query_info_rsp *)
++ rsp_iov[i + 1].iov_base;
++ rc = smb2_validate_and_copy_iov(
++ le16_to_cpu(qi_rsp->OutputBufferOffset),
++ le32_to_cpu(qi_rsp->OutputBufferLength),
++ &rsp_iov[i + 1], sizeof(idata->fi), (char *)&idata->fi);
++ }
++ SMB2_query_info_free(&rqst[num_rqst++]);
++ if (rc)
++ trace_smb3_query_info_compound_err(xid, ses->Suid,
++ tcon->tid, rc);
++ else
++ trace_smb3_query_info_compound_done(xid, ses->Suid,
++ tcon->tid);
++ break;
++ case SMB2_OP_POSIX_QUERY_INFO:
++ idata = in_iov[i].iov_base;
++ if (rc == 0 && cfile && cfile->symlink_target) {
++ idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
++ if (!idata->symlink_target)
++ rc = -ENOMEM;
++ }
++ if (rc == 0) {
++ qi_rsp = (struct smb2_query_info_rsp *)
++ rsp_iov[i + 1].iov_base;
++ rc = smb2_validate_and_copy_iov(
++ le16_to_cpu(qi_rsp->OutputBufferOffset),
++ le32_to_cpu(qi_rsp->OutputBufferLength),
++ &rsp_iov[i + 1], sizeof(idata->posix_fi) /* add SIDs */,
++ (char *)&idata->posix_fi);
++ }
++ if (rc == 0)
++ rc = parse_posix_sids(idata, &rsp_iov[i + 1]);
++
++ SMB2_query_info_free(&rqst[num_rqst++]);
++ if (rc)
++ trace_smb3_posix_query_info_compound_err(xid, ses->Suid,
++ tcon->tid, rc);
++ else
++ trace_smb3_posix_query_info_compound_done(xid, ses->Suid,
++ tcon->tid);
++ break;
++ case SMB2_OP_DELETE:
++ if (rc)
++ trace_smb3_delete_err(xid, ses->Suid, tcon->tid, rc);
++ else {
++ /*
++ * If dentry (hence, inode) is NULL, lease break is going to
++ * take care of degrading leases on handles for deleted files.
++ */
++ if (inode)
++ cifs_mark_open_handles_for_deleted_file(inode, full_path);
++ trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
++ }
++ break;
++ case SMB2_OP_MKDIR:
++ if (rc)
++ trace_smb3_mkdir_err(xid, ses->Suid, tcon->tid, rc);
++ else
++ trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
++ break;
++ case SMB2_OP_HARDLINK:
++ if (rc)
++ trace_smb3_hardlink_err(xid, ses->Suid, tcon->tid, rc);
++ else
++ trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
++ SMB2_set_info_free(&rqst[num_rqst++]);
++ break;
++ case SMB2_OP_RENAME:
++ if (rc)
++ trace_smb3_rename_err(xid, ses->Suid, tcon->tid, rc);
++ else
++ trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
++ SMB2_set_info_free(&rqst[num_rqst++]);
++ break;
++ case SMB2_OP_RMDIR:
++ if (rc)
++ trace_smb3_rmdir_err(xid, ses->Suid, tcon->tid, rc);
++ else
++ trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
++ SMB2_set_info_free(&rqst[num_rqst++]);
++ break;
++ case SMB2_OP_SET_EOF:
++ if (rc)
++ trace_smb3_set_eof_err(xid, ses->Suid, tcon->tid, rc);
++ else
++ trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
++ SMB2_set_info_free(&rqst[num_rqst++]);
++ break;
++ case SMB2_OP_SET_INFO:
++ if (rc)
++ trace_smb3_set_info_compound_err(xid, ses->Suid,
++ tcon->tid, rc);
++ else
++ trace_smb3_set_info_compound_done(xid, ses->Suid,
++ tcon->tid);
++ SMB2_set_info_free(&rqst[num_rqst++]);
++ break;
++ case SMB2_OP_SET_REPARSE:
++ if (rc) {
++ trace_smb3_set_reparse_compound_err(xid, ses->Suid,
++ tcon->tid, rc);
+ } else {
+- rc = -EINVAL;
++ trace_smb3_set_reparse_compound_done(xid, ses->Suid,
++ tcon->tid);
+ }
++ SMB2_ioctl_free(&rqst[num_rqst++]);
++ break;
++ case SMB2_OP_GET_REPARSE:
++ if (!rc) {
++ iov = &rsp_iov[i + 1];
++ idata = in_iov[i].iov_base;
++ idata->reparse.io.iov = *iov;
++ idata->reparse.io.buftype = resp_buftype[i + 1];
++ rbuf = reparse_buf_ptr(iov);
++ if (IS_ERR(rbuf)) {
++ rc = PTR_ERR(rbuf);
++ trace_smb3_set_reparse_compound_err(xid, ses->Suid,
++ tcon->tid, rc);
++ } else {
++ idata->reparse.tag = le32_to_cpu(rbuf->ReparseTag);
++ trace_smb3_set_reparse_compound_done(xid, ses->Suid,
++ tcon->tid);
++ }
++ memset(iov, 0, sizeof(*iov));
++ resp_buftype[i + 1] = CIFS_NO_BUFFER;
++ } else {
++ trace_smb3_set_reparse_compound_err(xid, ses->Suid,
++ tcon->tid, rc);
++ }
++ SMB2_ioctl_free(&rqst[num_rqst++]);
++ break;
++ case SMB2_OP_QUERY_WSL_EA:
++ if (!rc) {
++ idata = in_iov[i].iov_base;
++ qi_rsp = rsp_iov[i + 1].iov_base;
++ data[0] = (u8 *)qi_rsp + le16_to_cpu(qi_rsp->OutputBufferOffset);
++ size[0] = le32_to_cpu(qi_rsp->OutputBufferLength);
++ rc = check_wsl_eas(&rsp_iov[i + 1]);
++ if (!rc) {
++ memcpy(idata->wsl.eas, data[0], size[0]);
++ idata->wsl.eas_len = size[0];
++ }
++ }
++ if (!rc) {
++ trace_smb3_query_wsl_ea_compound_done(xid, ses->Suid,
++ tcon->tid);
++ } else {
++ trace_smb3_query_wsl_ea_compound_err(xid, ses->Suid,
++ tcon->tid, rc);
++ }
++ SMB2_query_info_free(&rqst[num_rqst++]);
++ break;
+ }
+- if (rqst[1].rq_iov)
+- SMB2_query_info_free(&rqst[1]);
+- if (rqst[2].rq_iov)
+- SMB2_close_free(&rqst[2]);
+- if (rc)
+- trace_smb3_posix_query_info_compound_err(xid, ses->Suid, tcon->tid, rc);
+- else
+- trace_smb3_posix_query_info_compound_done(xid, ses->Suid, tcon->tid);
+- break;
+- case SMB2_OP_DELETE:
+- if (rc)
+- trace_smb3_delete_err(xid, ses->Suid, tcon->tid, rc);
+- else
+- trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
+- if (rqst[1].rq_iov)
+- SMB2_close_free(&rqst[1]);
+- break;
+- case SMB2_OP_MKDIR:
+- if (rc)
+- trace_smb3_mkdir_err(xid, ses->Suid, tcon->tid, rc);
+- else
+- trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
+- if (rqst[1].rq_iov)
+- SMB2_close_free(&rqst[1]);
+- break;
+- case SMB2_OP_HARDLINK:
+- if (rc)
+- trace_smb3_hardlink_err(xid, ses->Suid, tcon->tid, rc);
+- else
+- trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
+- free_set_inf_compound(rqst);
+- break;
+- case SMB2_OP_RENAME:
+- if (rc)
+- trace_smb3_rename_err(xid, ses->Suid, tcon->tid, rc);
+- else
+- trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
+- free_set_inf_compound(rqst);
+- break;
+- case SMB2_OP_RMDIR:
+- if (rc)
+- trace_smb3_rmdir_err(xid, ses->Suid, tcon->tid, rc);
+- else
+- trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
+- free_set_inf_compound(rqst);
+- break;
+- case SMB2_OP_SET_EOF:
+- if (rc)
+- trace_smb3_set_eof_err(xid, ses->Suid, tcon->tid, rc);
+- else
+- trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
+- free_set_inf_compound(rqst);
+- break;
+- case SMB2_OP_SET_INFO:
+- if (rc)
+- trace_smb3_set_info_compound_err(xid, ses->Suid,
+- tcon->tid, rc);
+- else
+- trace_smb3_set_info_compound_done(xid, ses->Suid,
+- tcon->tid);
+- free_set_inf_compound(rqst);
+- break;
+ }
++ SMB2_close_free(&rqst[num_rqst]);
+
+- if (cfile)
+- cifsFileInfo_put(cfile);
+-
++ num_cmds += 2;
+ if (out_iov && out_buftype) {
+- memcpy(out_iov, rsp_iov, 3 * sizeof(*out_iov));
+- memcpy(out_buftype, resp_buftype, 3 * sizeof(*out_buftype));
++ memcpy(out_iov, rsp_iov, num_cmds * sizeof(*out_iov));
++ memcpy(out_buftype, resp_buftype,
++ num_cmds * sizeof(*out_buftype));
+ } else {
+- free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+- free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+- free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++ for (i = 0; i < num_cmds; i++)
++ free_rsp_buf(resp_buftype[i], rsp_iov[i].iov_base);
+ }
++ num_cmds -= 2; /* correct num_cmds as there could be a retry */
+ kfree(vars);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
++ if (cfile)
++ cifsFileInfo_put(cfile);
++
+ return rc;
+ }
+
+@@ -555,7 +852,7 @@ static int parse_create_response(struct cifs_open_info_data *data,
+ break;
+ }
+ data->reparse_point = reparse_point;
+- data->reparse_tag = tag;
++ data->reparse.tag = tag;
+ return rc;
+ }
+
+@@ -565,38 +862,64 @@ int smb2_query_path_info(const unsigned int xid,
+ const char *full_path,
+ struct cifs_open_info_data *data)
+ {
++ struct cifs_open_parms oparms;
+ __u32 create_options = 0;
+ struct cifsFileInfo *cfile;
+ struct cached_fid *cfid = NULL;
+ struct smb2_hdr *hdr;
+- struct kvec out_iov[3] = {};
++ struct kvec in_iov[3], out_iov[3] = {};
+ int out_buftype[3] = {};
++ int cmds[3];
+ bool islink;
++ int i, num_cmds = 0;
+ int rc, rc2;
+
+ data->adjust_tz = false;
+ data->reparse_point = false;
+
+- if (strcmp(full_path, ""))
+- rc = -ENOENT;
+- else
+- rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
+- /* If it is a root and its handle is cached then use it */
+- if (!rc) {
+- if (cfid->file_all_info_is_valid) {
+- memcpy(&data->fi, &cfid->file_all_info, sizeof(data->fi));
++ /*
++ * BB TODO: Add support for using cached root handle in SMB3.1.1 POSIX.
++ * Create SMB2_query_posix_info worker function to do non-compounded
++ * query when we already have an open file handle for this. For now this
++ * is fast enough (always using the compounded version).
++ */
++ if (!tcon->posix_extensions) {
++ if (*full_path) {
++ rc = -ENOENT;
+ } else {
+- rc = SMB2_query_info(xid, tcon, cfid->fid.persistent_fid,
+- cfid->fid.volatile_fid, &data->fi);
++ rc = open_cached_dir(xid, tcon, full_path,
++ cifs_sb, false, &cfid);
+ }
+- close_cached_dir(cfid);
+- return rc;
++ /* If it is a root and its handle is cached then use it */
++ if (!rc) {
++ if (cfid->file_all_info_is_valid) {
++ memcpy(&data->fi, &cfid->file_all_info,
++ sizeof(data->fi));
++ } else {
++ rc = SMB2_query_info(xid, tcon,
++ cfid->fid.persistent_fid,
++ cfid->fid.volatile_fid,
++ &data->fi);
++ }
++ close_cached_dir(cfid);
++ return rc;
++ }
++ cmds[num_cmds++] = SMB2_OP_QUERY_INFO;
++ } else {
++ cmds[num_cmds++] = SMB2_OP_POSIX_QUERY_INFO;
+ }
+
++ in_iov[0].iov_base = data;
++ in_iov[0].iov_len = sizeof(*data);
++ in_iov[1] = in_iov[0];
++ in_iov[2] = in_iov[0];
++
+ cifs_get_readable_path(tcon, full_path, &cfile);
+- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
+- create_options, ACL_NO_MODE, data, SMB2_OP_QUERY_INFO, cfile,
+- NULL, NULL, out_iov, out_buftype);
++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_READ_ATTRIBUTES,
++ FILE_OPEN, create_options, ACL_NO_MODE);
++ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
++ &oparms, in_iov, cmds, num_cmds,
++ cfile, out_iov, out_buftype, NULL);
+ hdr = out_iov[0].iov_base;
+ /*
+ * If first iov is unset, then SMB session was dropped or we've got a
+@@ -607,19 +930,34 @@ int smb2_query_path_info(const unsigned int xid,
+
+ switch (rc) {
+ case 0:
++ rc = parse_create_response(data, cifs_sb, &out_iov[0]);
++ break;
+ case -EOPNOTSUPP:
++ /*
++ * BB TODO: When support for special files added to Samba
++ * re-verify this path.
++ */
+ rc = parse_create_response(data, cifs_sb, &out_iov[0]);
+ if (rc || !data->reparse_point)
+ goto out;
+
+- create_options |= OPEN_REPARSE_POINT;
+- /* Failed on a symbolic link - query a reparse point info */
++ cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
++ /*
++ * Skip SMB2_OP_GET_REPARSE if symlink already parsed in create
++ * response.
++ */
++ if (data->reparse.tag != IO_REPARSE_TAG_SYMLINK)
++ cmds[num_cmds++] = SMB2_OP_GET_REPARSE;
++
++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
++ FILE_READ_ATTRIBUTES |
++ FILE_READ_EA | SYNCHRONIZE,
++ FILE_OPEN, create_options |
++ OPEN_REPARSE_POINT, ACL_NO_MODE);
+ cifs_get_readable_path(tcon, full_path, &cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+- FILE_READ_ATTRIBUTES, FILE_OPEN,
+- create_options, ACL_NO_MODE, data,
+- SMB2_OP_QUERY_INFO, cfile, NULL, NULL,
+- NULL, NULL);
++ &oparms, in_iov, cmds, num_cmds,
++ cfile, NULL, NULL, NULL);
+ break;
+ case -EREMOTE:
+ break;
+@@ -637,93 +975,8 @@ int smb2_query_path_info(const unsigned int xid,
+ }
+
+ out:
+- free_rsp_buf(out_buftype[0], out_iov[0].iov_base);
+- free_rsp_buf(out_buftype[1], out_iov[1].iov_base);
+- free_rsp_buf(out_buftype[2], out_iov[2].iov_base);
+- return rc;
+-}
+-
+-int smb311_posix_query_path_info(const unsigned int xid,
+- struct cifs_tcon *tcon,
+- struct cifs_sb_info *cifs_sb,
+- const char *full_path,
+- struct cifs_open_info_data *data,
+- struct cifs_sid *owner,
+- struct cifs_sid *group)
+-{
+- int rc;
+- __u32 create_options = 0;
+- struct cifsFileInfo *cfile;
+- struct kvec out_iov[3] = {};
+- int out_buftype[3] = {};
+- __u8 *sidsbuf = NULL;
+- __u8 *sidsbuf_end = NULL;
+- size_t sidsbuflen = 0;
+- size_t owner_len, group_len;
+-
+- data->adjust_tz = false;
+- data->reparse_point = false;
+-
+- /*
+- * BB TODO: Add support for using the cached root handle.
+- * Create SMB2_query_posix_info worker function to do non-compounded query
+- * when we already have an open file handle for this. For now this is fast enough
+- * (always using the compounded version).
+- */
+-
+- cifs_get_readable_path(tcon, full_path, &cfile);
+- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
+- create_options, ACL_NO_MODE, data, SMB2_OP_POSIX_QUERY_INFO, cfile,
+- &sidsbuf, &sidsbuflen, out_iov, out_buftype);
+- /*
+- * If first iov is unset, then SMB session was dropped or we've got a
+- * cached open file (@cfile).
+- */
+- if (!out_iov[0].iov_base || out_buftype[0] == CIFS_NO_BUFFER)
+- goto out;
+-
+- switch (rc) {
+- case 0:
+- case -EOPNOTSUPP:
+- /* BB TODO: When support for special files added to Samba re-verify this path */
+- rc = parse_create_response(data, cifs_sb, &out_iov[0]);
+- if (rc || !data->reparse_point)
+- goto out;
+-
+- create_options |= OPEN_REPARSE_POINT;
+- /* Failed on a symbolic link - query a reparse point info */
+- cifs_get_readable_path(tcon, full_path, &cfile);
+- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES,
+- FILE_OPEN, create_options, ACL_NO_MODE, data,
+- SMB2_OP_POSIX_QUERY_INFO, cfile,
+- &sidsbuf, &sidsbuflen, NULL, NULL);
+- break;
+- }
+-
+-out:
+- if (rc == 0) {
+- sidsbuf_end = sidsbuf + sidsbuflen;
+-
+- owner_len = posix_info_sid_size(sidsbuf, sidsbuf_end);
+- if (owner_len == -1) {
+- rc = -EINVAL;
+- goto out;
+- }
+- memcpy(owner, sidsbuf, owner_len);
+-
+- group_len = posix_info_sid_size(
+- sidsbuf + owner_len, sidsbuf_end);
+- if (group_len == -1) {
+- rc = -EINVAL;
+- goto out;
+- }
+- memcpy(group, sidsbuf + owner_len, group_len);
+- }
+-
+- kfree(sidsbuf);
+- free_rsp_buf(out_buftype[0], out_iov[0].iov_base);
+- free_rsp_buf(out_buftype[1], out_iov[1].iov_base);
+- free_rsp_buf(out_buftype[2], out_iov[2].iov_base);
++ for (i = 0; i < ARRAY_SIZE(out_buftype); i++)
++ free_rsp_buf(out_buftype[i], out_iov[i].iov_base);
+ return rc;
+ }
+
+@@ -732,10 +985,14 @@ smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
+ struct cifs_tcon *tcon, const char *name,
+ struct cifs_sb_info *cifs_sb)
+ {
+- return smb2_compound_op(xid, tcon, cifs_sb, name,
+- FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+- CREATE_NOT_FILE, mode, NULL, SMB2_OP_MKDIR,
+- NULL, NULL, NULL, NULL, NULL);
++ struct cifs_open_parms oparms;
++
++ oparms = CIFS_OPARMS(cifs_sb, tcon, name, FILE_WRITE_ATTRIBUTES,
++ FILE_CREATE, CREATE_NOT_FILE, mode);
++ return smb2_compound_op(xid, tcon, cifs_sb,
++ name, &oparms, NULL,
++ &(int){SMB2_OP_MKDIR}, 1,
++ NULL, NULL, NULL, NULL);
+ }
+
+ void
+@@ -743,21 +1000,26 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
+ struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
+ const unsigned int xid)
+ {
+- FILE_BASIC_INFO data;
++ struct cifs_open_parms oparms;
++ FILE_BASIC_INFO data = {};
+ struct cifsInodeInfo *cifs_i;
+ struct cifsFileInfo *cfile;
++ struct kvec in_iov;
+ u32 dosattrs;
+ int tmprc;
+
+- memset(&data, 0, sizeof(data));
++ in_iov.iov_base = &data;
++ in_iov.iov_len = sizeof(data);
+ cifs_i = CIFS_I(inode);
+ dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
+ data.Attributes = cpu_to_le32(dosattrs);
+ cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
++ oparms = CIFS_OPARMS(cifs_sb, tcon, name, FILE_WRITE_ATTRIBUTES,
++ FILE_CREATE, CREATE_NOT_FILE, ACL_NO_MODE);
+ tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
+- FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+- CREATE_NOT_FILE, ACL_NO_MODE,
+- &data, SMB2_OP_SET_INFO, cfile, NULL, NULL, NULL, NULL);
++ &oparms, &in_iov,
++ &(int){SMB2_OP_SET_INFO}, 1,
++ cfile, NULL, NULL, NULL);
+ if (tmprc == 0)
+ cifs_i->cifsAttrs = dosattrs;
+ }
+@@ -766,27 +1028,48 @@ int
+ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ struct cifs_sb_info *cifs_sb)
+ {
++ struct cifs_open_parms oparms;
++
+ drop_cached_dir_by_name(xid, tcon, name, cifs_sb);
+- return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+- CREATE_NOT_FILE, ACL_NO_MODE,
+- NULL, SMB2_OP_RMDIR, NULL, NULL, NULL, NULL, NULL);
++ oparms = CIFS_OPARMS(cifs_sb, tcon, name, DELETE,
++ FILE_OPEN, CREATE_NOT_FILE, ACL_NO_MODE);
++ return smb2_compound_op(xid, tcon, cifs_sb,
++ name, &oparms, NULL,
++ &(int){SMB2_OP_RMDIR}, 1,
++ NULL, NULL, NULL, NULL);
+ }
+
+ int
+ smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+- struct cifs_sb_info *cifs_sb)
++ struct cifs_sb_info *cifs_sb, struct dentry *dentry)
+ {
+- return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+- CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
+- ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL, NULL, NULL, NULL, NULL);
++ struct cifs_open_parms oparms;
++
++ oparms = CIFS_OPARMS(cifs_sb, tcon, name,
++ DELETE, FILE_OPEN,
++ CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
++ ACL_NO_MODE);
++ int rc = smb2_compound_op(xid, tcon, cifs_sb, name, &oparms,
++ NULL, &(int){SMB2_OP_DELETE}, 1,
++ NULL, NULL, NULL, dentry);
++ if (rc == -EINVAL) {
++ cifs_dbg(FYI, "invalid lease key, resending request without lease");
++ rc = smb2_compound_op(xid, tcon, cifs_sb, name, &oparms,
++ NULL, &(int){SMB2_OP_DELETE}, 1,
++ NULL, NULL, NULL, NULL);
++ }
++ return rc;
+ }
+
+-static int
+-smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb, __u32 access, int command,
+- struct cifsFileInfo *cfile)
++static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb,
++ __u32 create_options, __u32 access,
++ int command, struct cifsFileInfo *cfile,
++ struct dentry *dentry)
+ {
++ struct cifs_open_parms oparms;
++ struct kvec in_iov;
+ __le16 *smb2_to_name = NULL;
+ int rc;
+
+@@ -795,60 +1078,98 @@ smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
+ rc = -ENOMEM;
+ goto smb2_rename_path;
+ }
+- rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
+- FILE_OPEN, 0, ACL_NO_MODE, smb2_to_name,
+- command, cfile, NULL, NULL, NULL, NULL);
++ in_iov.iov_base = smb2_to_name;
++ in_iov.iov_len = 2 * UniStrnlen((wchar_t *)smb2_to_name, PATH_MAX);
++ oparms = CIFS_OPARMS(cifs_sb, tcon, from_name, access, FILE_OPEN,
++ create_options, ACL_NO_MODE);
++ rc = smb2_compound_op(xid, tcon, cifs_sb, from_name,
++ &oparms, &in_iov, &command, 1,
++ cfile, NULL, NULL, dentry);
+ smb2_rename_path:
+ kfree(smb2_to_name);
+ return rc;
+ }
+
+-int
+-smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb)
++int smb2_rename_path(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb)
+ {
+ struct cifsFileInfo *cfile;
++ __u32 co = file_create_options(source_dentry);
+
+ drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb);
+ cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
+
+- return smb2_set_path_attr(xid, tcon, from_name, to_name,
+- cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
++ int rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
++ co, DELETE, SMB2_OP_RENAME, cfile, source_dentry);
++ if (rc == -EINVAL) {
++ cifs_dbg(FYI, "invalid lease key, resending request without lease");
++ cifs_get_writable_path(tcon, from_name,
++ FIND_WR_WITH_DELETE, &cfile);
++ rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
++ co, DELETE, SMB2_OP_RENAME, cfile, NULL);
++ }
++ return rc;
+ }
+
+-int
+-smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb)
++int smb2_create_hardlink(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb)
+ {
+- return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
+- FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK,
+- NULL);
++ __u32 co = file_create_options(source_dentry);
++
++ return smb2_set_path_attr(xid, tcon, from_name, to_name,
++ cifs_sb, co, FILE_READ_ATTRIBUTES,
++ SMB2_OP_HARDLINK, NULL, NULL);
+ }
+
+ int
+ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *full_path, __u64 size,
+- struct cifs_sb_info *cifs_sb, bool set_alloc)
++ struct cifs_sb_info *cifs_sb, bool set_alloc,
++ struct dentry *dentry)
+ {
+- __le64 eof = cpu_to_le64(size);
++ struct cifs_open_parms oparms;
+ struct cifsFileInfo *cfile;
++ struct kvec in_iov;
++ __le64 eof = cpu_to_le64(size);
++ int rc;
+
++ in_iov.iov_base = &eof;
++ in_iov.iov_len = sizeof(eof);
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+- return smb2_compound_op(xid, tcon, cifs_sb, full_path,
+- FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
+- &eof, SMB2_OP_SET_EOF, cfile, NULL, NULL, NULL, NULL);
++
++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_DATA,
++ FILE_OPEN, 0, ACL_NO_MODE);
++ rc = smb2_compound_op(xid, tcon, cifs_sb,
++ full_path, &oparms, &in_iov,
++ &(int){SMB2_OP_SET_EOF}, 1,
++ cfile, NULL, NULL, dentry);
++ if (rc == -EINVAL) {
++ cifs_dbg(FYI, "invalid lease key, resending request without lease");
++ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++ rc = smb2_compound_op(xid, tcon, cifs_sb,
++ full_path, &oparms, &in_iov,
++ &(int){SMB2_OP_SET_EOF}, 1,
++ cfile, NULL, NULL, NULL);
++ }
++ return rc;
+ }
+
+ int
+ smb2_set_file_info(struct inode *inode, const char *full_path,
+ FILE_BASIC_INFO *buf, const unsigned int xid)
+ {
++ struct cifs_open_parms oparms;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
+ struct cifsFileInfo *cfile;
++ struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), };
+ int rc;
+
+ if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
+@@ -862,10 +1183,121 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
+ tcon = tlink_tcon(tlink);
+
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+- FILE_WRITE_ATTRIBUTES, FILE_OPEN,
+- 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile,
+- NULL, NULL, NULL, NULL);
++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES,
++ FILE_OPEN, 0, ACL_NO_MODE);
++ rc = smb2_compound_op(xid, tcon, cifs_sb,
++ full_path, &oparms, &in_iov,
++ &(int){SMB2_OP_SET_INFO}, 1,
++ cfile, NULL, NULL, NULL);
+ cifs_put_tlink(tlink);
+ return rc;
+ }
++
++struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
++ struct super_block *sb,
++ const unsigned int xid,
++ struct cifs_tcon *tcon,
++ const char *full_path,
++ struct kvec *reparse_iov,
++ struct kvec *xattr_iov)
++{
++ struct cifs_open_parms oparms;
++ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++ struct cifsFileInfo *cfile;
++ struct inode *new = NULL;
++ int out_buftype[4] = {};
++ struct kvec out_iov[4] = {};
++ struct kvec in_iov[2];
++ int cmds[2];
++ int rc;
++ int i;
++
++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
++ SYNCHRONIZE | DELETE |
++ FILE_READ_ATTRIBUTES |
++ FILE_WRITE_ATTRIBUTES,
++ FILE_CREATE,
++ CREATE_NOT_DIR | OPEN_REPARSE_POINT,
++ ACL_NO_MODE);
++ if (xattr_iov)
++ oparms.ea_cctx = xattr_iov;
++
++ cmds[0] = SMB2_OP_SET_REPARSE;
++ in_iov[0] = *reparse_iov;
++ in_iov[1].iov_base = data;
++ in_iov[1].iov_len = sizeof(*data);
++
++ if (tcon->posix_extensions) {
++ cmds[1] = SMB2_OP_POSIX_QUERY_INFO;
++ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms,
++ in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL);
++ if (!rc) {
++ rc = smb311_posix_get_inode_info(&new, full_path,
++ data, sb, xid);
++ }
++ } else {
++ cmds[1] = SMB2_OP_QUERY_INFO;
++ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms,
++ in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL);
++ if (!rc) {
++ rc = cifs_get_inode_info(&new, full_path,
++ data, sb, xid, NULL);
++ }
++ }
++
++
++ /*
++ * If CREATE was successful but SMB2_OP_SET_REPARSE failed then
++ * remove the intermediate object created by CREATE. Otherwise
++ * empty object stay on the server when reparse call failed.
++ */
++ if (rc &&
++ out_iov[0].iov_base != NULL && out_buftype[0] != CIFS_NO_BUFFER &&
++ ((struct smb2_hdr *)out_iov[0].iov_base)->Status == STATUS_SUCCESS &&
++ (out_iov[1].iov_base == NULL || out_buftype[1] == CIFS_NO_BUFFER ||
++ ((struct smb2_hdr *)out_iov[1].iov_base)->Status != STATUS_SUCCESS))
++ smb2_unlink(xid, tcon, full_path, cifs_sb, NULL);
++
++ for (i = 0; i < ARRAY_SIZE(out_buftype); i++)
++ free_rsp_buf(out_buftype[i], out_iov[i].iov_base);
++
++ return rc ? ERR_PTR(rc) : new;
++}
++
++int smb2_query_reparse_point(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct cifs_sb_info *cifs_sb,
++ const char *full_path,
++ u32 *tag, struct kvec *rsp,
++ int *rsp_buftype)
++{
++ struct cifs_open_parms oparms;
++ struct cifs_open_info_data data = {};
++ struct cifsFileInfo *cfile;
++ struct kvec in_iov = { .iov_base = &data, .iov_len = sizeof(data), };
++ int rc;
++
++ cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
++
++ cifs_get_readable_path(tcon, full_path, &cfile);
++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
++ FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE,
++ FILE_OPEN, OPEN_REPARSE_POINT, ACL_NO_MODE);
++ rc = smb2_compound_op(xid, tcon, cifs_sb,
++ full_path, &oparms, &in_iov,
++ &(int){SMB2_OP_GET_REPARSE}, 1,
++ cfile, NULL, NULL, NULL);
++ if (rc)
++ goto out;
++
++ *tag = data.reparse.tag;
++ *rsp = data.reparse.io.iov;
++ *rsp_buftype = data.reparse.io.buftype;
++ memset(&data.reparse.io.iov, 0, sizeof(data.reparse.io.iov));
++ data.reparse.io.buftype = CIFS_NO_BUFFER;
++out:
++ cifs_free_open_info(&data);
++ return rc;
++}
+diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c
+index 1a90dd78b238f0..ac1895358908ab 100644
+--- a/fs/smb/client/smb2maperror.c
++++ b/fs/smb/client/smb2maperror.c
+@@ -1210,6 +1210,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ {STATUS_INVALID_TASK_INDEX, -EIO, "STATUS_INVALID_TASK_INDEX"},
+ {STATUS_THREAD_ALREADY_IN_TASK, -EIO, "STATUS_THREAD_ALREADY_IN_TASK"},
+ {STATUS_CALLBACK_BYPASS, -EIO, "STATUS_CALLBACK_BYPASS"},
++ {STATUS_SERVER_UNAVAILABLE, -EAGAIN, "STATUS_SERVER_UNAVAILABLE"},
++ {STATUS_FILE_NOT_AVAILABLE, -EAGAIN, "STATUS_FILE_NOT_AVAILABLE"},
+ {STATUS_PORT_CLOSED, -EIO, "STATUS_PORT_CLOSED"},
+ {STATUS_MESSAGE_LOST, -EIO, "STATUS_MESSAGE_LOST"},
+ {STATUS_INVALID_MESSAGE, -EIO, "STATUS_INVALID_MESSAGE"},
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index 25f7cd6f23d64c..677ef6f99a5be4 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -173,6 +173,21 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
+ }
+
+ mid = le64_to_cpu(shdr->MessageId);
++ if (check_smb2_hdr(shdr, mid))
++ return 1;
++
++ if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
++ cifs_dbg(VFS, "Invalid structure size %u\n",
++ le16_to_cpu(shdr->StructureSize));
++ return 1;
++ }
++
++ command = le16_to_cpu(shdr->Command);
++ if (command >= NUMBER_OF_SMB2_COMMANDS) {
++ cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
++ return 1;
++ }
++
+ if (len < pdu_size) {
+ if ((len >= hdr_size)
+ && (shdr->Status != 0)) {
+@@ -193,21 +208,6 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
+ return 1;
+ }
+
+- if (check_smb2_hdr(shdr, mid))
+- return 1;
+-
+- if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+- cifs_dbg(VFS, "Invalid structure size %u\n",
+- le16_to_cpu(shdr->StructureSize));
+- return 1;
+- }
+-
+- command = le16_to_cpu(shdr->Command);
+- if (command >= NUMBER_OF_SMB2_COMMANDS) {
+- cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
+- return 1;
+- }
+-
+ if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
+ if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
+ pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2_LE)) {
+@@ -313,6 +313,9 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
+ char *
+ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
+ {
++ const int max_off = 4096;
++ const int max_len = 128 * 1024;
++
+ *off = 0;
+ *len = 0;
+
+@@ -384,29 +387,20 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
+ * Invalid length or offset probably means data area is invalid, but
+ * we have little choice but to ignore the data area in this case.
+ */
+- if (*off > 4096) {
+- cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
+- *len = 0;
++ if (unlikely(*off < 0 || *off > max_off ||
++ *len < 0 || *len > max_len)) {
++ cifs_dbg(VFS, "%s: invalid data area (off=%d len=%d)\n",
++ __func__, *off, *len);
+ *off = 0;
+- } else if (*off < 0) {
+- cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
+- *off);
+- *off = 0;
+- *len = 0;
+- } else if (*len < 0) {
+- cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
+- *len);
+ *len = 0;
+- } else if (*len > 128 * 1024) {
+- cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
++ } else if (*off == 0) {
+ *len = 0;
+ }
+
+ /* return pointer to beginning of data area, ie offset from SMB start */
+- if ((*off != 0) && (*len != 0))
++ if (*off > 0 && *len > 0)
+ return (char *)shdr + *off;
+- else
+- return NULL;
++ return NULL;
+ }
+
+ /*
+@@ -628,6 +622,8 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
+ /* look up tcon based on tid & uid */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ spin_lock(&tcon->open_file_lock);
+ cifs_stats_inc(
+@@ -703,6 +699,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ /* look up tcon based on tid & uid */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+
+ spin_lock(&tcon->open_file_lock);
+@@ -769,7 +767,7 @@ smb2_cancelled_close_fid(struct work_struct *work)
+ if (rc)
+ cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
+
+- cifs_put_tcon(tcon);
++ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close_fid);
+ kfree(cancelled);
+ }
+
+@@ -787,7 +785,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
+ {
+ struct close_cancelled_open *cancelled;
+
+- cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
++ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ if (!cancelled)
+ return -ENOMEM;
+
+@@ -813,6 +811,8 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
+ if (tcon->tc_count <= 0) {
+ struct TCP_Server_Info *server = NULL;
+
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_see_cancelled_close);
+ WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
+ spin_unlock(&cifs_tcp_ses_lock);
+
+@@ -825,12 +825,14 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
+ return 0;
+ }
+ tcon->tc_count++;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_get_cancelled_close);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
+ persistent_fid, volatile_fid);
+ if (rc)
+- cifs_put_tcon(tcon);
++ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close);
+
+ return rc;
+ }
+@@ -858,7 +860,7 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
+ rsp->PersistentFileId,
+ rsp->VolatileFileId);
+ if (rc)
+- cifs_put_tcon(tcon);
++ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_mid);
+
+ return rc;
+ }
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 9aeecee6b91b35..450e3050324c6c 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -28,6 +28,7 @@
+ #include "fscache.h"
+ #include "fs_context.h"
+ #include "cached_dir.h"
++#include "reparse.h"
+
+ /* Change credits for different ops and return the total number of credits */
+ static int
+@@ -403,8 +404,10 @@ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
+ cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
+ shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
+ shdr->Id.SyncId.ProcessId);
+- cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
+- server->ops->calc_smb_size(buf));
++ if (!server->ops->check_message(buf, server->total_read, server)) {
++ cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
++ server->ops->calc_smb_size(buf));
++ }
+ #endif
+ }
+
+@@ -593,16 +596,12 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ }
+
+ /*
+- * Go through iface_list and do kref_put to remove
+- * any unused ifaces. ifaces in use will be removed
+- * when the last user calls a kref_put on it
++ * Go through iface_list and mark them as inactive
+ */
+ list_for_each_entry_safe(iface, niface, &ses->iface_list,
+- iface_head) {
++ iface_head)
+ iface->is_active = 0;
+- kref_put(&iface->refcount, release_iface);
+- ses->iface_count--;
+- }
++
+ spin_unlock(&ses->iface_lock);
+
+ /*
+@@ -616,11 +615,12 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ "multichannel not available\n"
+ "Empty network interface list returned by server %s\n",
+ ses->server->hostname);
+- rc = -EINVAL;
++ rc = -EOPNOTSUPP;
++ ses->iface_last_update = jiffies;
+ goto out;
+ }
+
+- while (bytes_left >= sizeof(*p)) {
++ while (bytes_left >= (ssize_t)sizeof(*p)) {
+ memset(&tmp_iface, 0, sizeof(tmp_iface));
+ tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
+ tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+@@ -676,10 +676,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ iface_head) {
+ ret = iface_cmp(iface, &tmp_iface);
+ if (!ret) {
+- /* just get a ref so that it doesn't get picked/freed */
+ iface->is_active = 1;
+- kref_get(&iface->refcount);
+- ses->iface_count++;
+ spin_unlock(&ses->iface_lock);
+ goto next_iface;
+ } else if (ret < 0) {
+@@ -717,7 +714,6 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+
+ ses->iface_count++;
+ spin_unlock(&ses->iface_lock);
+- ses->iface_last_update = jiffies;
+ next_iface:
+ nb_iface++;
+ next = le32_to_cpu(p->Next);
+@@ -739,13 +735,23 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ if ((bytes_left > 8) || p->Next)
+ cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
++ ses->iface_last_update = jiffies;
+
+- if (!ses->iface_count) {
+- rc = -EINVAL;
+- goto out;
++out:
++ /*
++ * Go through the list again and put the inactive entries
++ */
++ spin_lock(&ses->iface_lock);
++ list_for_each_entry_safe(iface, niface, &ses->iface_list,
++ iface_head) {
++ if (!iface->is_active) {
++ list_del(&iface->iface_head);
++ kref_put(&iface->refcount, release_iface);
++ ses->iface_count--;
++ }
+ }
++ spin_unlock(&ses->iface_lock);
+
+-out:
+ return rc;
+ }
+
+@@ -756,6 +762,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ unsigned int ret_data_len = 0;
+ struct network_interface_info_ioctl_rsp *out_buf = NULL;
+ struct cifs_ses *ses = tcon->ses;
++ struct TCP_Server_Info *pserver;
+
+ /* do not query too frequently */
+ if (ses->iface_last_update &&
+@@ -780,6 +787,16 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ if (rc)
+ goto out;
+
++ /* check if iface is still active */
++ spin_lock(&ses->chan_lock);
++ pserver = ses->chans[0].server;
++ if (pserver && !cifs_chan_is_iface_active(ses, pserver)) {
++ spin_unlock(&ses->chan_lock);
++ cifs_chan_update_iface(ses, pserver);
++ spin_lock(&ses->chan_lock);
++ }
++ spin_unlock(&ses->chan_lock);
++
+ out:
+ kfree(out_buf);
+ return rc;
+@@ -1092,7 +1109,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ {
+ struct smb2_compound_vars *vars;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ struct smb_rqst *rqst;
+ struct kvec *rsp_iov;
+ __le16 *utf16_path = NULL;
+@@ -1108,6 +1125,13 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ struct smb2_file_full_ea_info *ea = NULL;
+ struct smb2_query_info_rsp *rsp;
+ int rc, used_len = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = CIFS_CP_CREATE_CLOSE_OP;
++ oplock = SMB2_OPLOCK_LEVEL_NONE;
++ server = cifs_pick_channel(ses);
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+@@ -1181,6 +1205,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ .disposition = FILE_OPEN,
+ .create_options = cifs_create_options(cifs_sb, 0),
+ .fid = &fid,
++ .replay = !!(retries),
+ };
+
+ rc = SMB2_open_init(tcon, server,
+@@ -1228,6 +1253,12 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ goto sea_exit;
+ smb2_set_related(&rqst[2]);
+
++ if (retries) {
++ smb2_set_replay(server, &rqst[0]);
++ smb2_set_replay(server, &rqst[1]);
++ smb2_set_replay(server, &rqst[2]);
++ }
++
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
+ resp_buftype, rsp_iov);
+@@ -1244,6 +1275,11 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ kfree(vars);
+ out_free_path:
+ kfree(utf16_path);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+ #endif
+@@ -1376,14 +1412,14 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
+ memcpy(cfile->fid.create_guid, fid->create_guid, 16);
+ }
+
+-static void
++static int
+ smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_fid *fid)
+ {
+- SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
++ return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+ }
+
+-static void
++static int
+ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile)
+ {
+@@ -1394,7 +1430,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+ rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, &file_inf);
+ if (rc)
+- return;
++ return rc;
+
+ inode = d_inode(cfile->dentry);
+
+@@ -1403,12 +1439,14 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+
+ /* Creation time should not need to be updated on close */
+ if (file_inf.LastWriteTime)
+- inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
++ inode_set_mtime_to_ts(inode,
++ cifs_NTtimeToUnix(file_inf.LastWriteTime));
+ if (file_inf.ChangeTime)
+ inode_set_ctime_to_ts(inode,
+ cifs_NTtimeToUnix(file_inf.ChangeTime));
+ if (file_inf.LastAccessTime)
+- inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
++ inode_set_atime_to_ts(inode,
++ cifs_NTtimeToUnix(file_inf.LastAccessTime));
+
+ /*
+ * i_blocks is not related to (i_size / i_blksize),
+@@ -1421,6 +1459,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
+
+ /* End of file and Attributes should not have to be updated on close */
+ spin_unlock(&inode->i_lock);
++ return rc;
+ }
+
+ static int
+@@ -1466,7 +1505,7 @@ smb2_ioctl_query_info(const unsigned int xid,
+ struct smb_rqst *rqst;
+ struct kvec *rsp_iov;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ char __user *arg = (char __user *)p;
+ struct smb_query_info qi;
+ struct smb_query_info __user *pqi;
+@@ -1483,6 +1522,13 @@ smb2_ioctl_query_info(const unsigned int xid,
+ void *data[2];
+ int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
+ void (*free_req1_func)(struct smb_rqst *r);
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = CIFS_CP_CREATE_CLOSE_OP;
++ oplock = SMB2_OPLOCK_LEVEL_NONE;
++ server = cifs_pick_channel(ses);
+
+ vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+ if (vars == NULL)
+@@ -1526,6 +1572,7 @@ smb2_ioctl_query_info(const unsigned int xid,
+ .disposition = FILE_OPEN,
+ .create_options = cifs_create_options(cifs_sb, create_options),
+ .fid = &fid,
++ .replay = !!(retries),
+ };
+
+ if (qi.flags & PASSTHRU_FSCTL) {
+@@ -1623,6 +1670,12 @@ smb2_ioctl_query_info(const unsigned int xid,
+ goto free_req_1;
+ smb2_set_related(&rqst[2]);
+
++ if (retries) {
++ smb2_set_replay(server, &rqst[0]);
++ smb2_set_replay(server, &rqst[1]);
++ smb2_set_replay(server, &rqst[2]);
++ }
++
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
+ resp_buftype, rsp_iov);
+@@ -1683,6 +1736,11 @@ smb2_ioctl_query_info(const unsigned int xid,
+ kfree(buffer);
+ free_vars:
+ kfree(vars);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -1913,7 +1971,6 @@ static int
+ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
+ {
+- __le64 eof = cpu_to_le64(size);
+ struct inode *inode;
+
+ /*
+@@ -1930,7 +1987,7 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
+ }
+
+ return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid, cfile->pid, &eof);
++ cfile->fid.volatile_fid, cfile->pid, size);
+ }
+
+ static int
+@@ -2210,8 +2267,14 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_open_parms oparms;
+ struct smb2_query_directory_rsp *qd_rsp = NULL;
+ struct smb2_create_rsp *op_rsp = NULL;
+- struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+- int retry_count = 0;
++ struct TCP_Server_Info *server;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ oplock = SMB2_OPLOCK_LEVEL_NONE;
++ server = cifs_pick_channel(tcon->ses);
+
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ if (!utf16_path)
+@@ -2236,6 +2299,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ .disposition = FILE_OPEN,
+ .create_options = cifs_create_options(cifs_sb, 0),
+ .fid = fid,
++ .replay = !!(retries),
+ };
+
+ rc = SMB2_open_init(tcon, server,
+@@ -2261,14 +2325,15 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+
+ smb2_set_related(&rqst[1]);
+
+-again:
++ if (retries) {
++ smb2_set_replay(server, &rqst[0]);
++ smb2_set_replay(server, &rqst[1]);
++ }
++
+ rc = compound_send_recv(xid, tcon->ses, server,
+ flags, 2, rqst,
+ resp_buftype, rsp_iov);
+
+- if (rc == -EAGAIN && retry_count++ < 10)
+- goto again;
+-
+ /* If the open failed there is nothing to do */
+ op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
+@@ -2316,6 +2381,11 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ SMB2_query_directory_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -2411,6 +2481,8 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ if (cifs_ses_exiting(ses))
++ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
+ spin_lock(&tcon->tc_lock);
+@@ -2440,6 +2512,22 @@ smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+ CIFS_CACHE_READ(cinode) ? 1 : 0);
+ }
+
++void
++smb2_set_replay(struct TCP_Server_Info *server, struct smb_rqst *rqst)
++{
++ struct smb2_hdr *shdr;
++
++ if (server->dialect < SMB30_PROT_ID)
++ return;
++
++ shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
++ if (shdr == NULL) {
++ cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
++ return;
++ }
++ shdr->Flags |= SMB2_FLAGS_REPLAY_OPERATION;
++}
++
+ void
+ smb2_set_related(struct smb_rqst *rqst)
+ {
+@@ -2512,6 +2600,27 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
+ shdr->NextCommand = cpu_to_le32(len);
+ }
+
++/*
++ * helper function for exponential backoff and check if replayable
++ */
++bool smb2_should_replay(struct cifs_tcon *tcon,
++ int *pretries,
++ int *pcur_sleep)
++{
++ if (!pretries || !pcur_sleep)
++ return false;
++
++ if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) {
++ msleep(*pcur_sleep);
++ (*pcur_sleep) = ((*pcur_sleep) << 1);
++ if ((*pcur_sleep) > CIFS_MAX_SLEEP)
++ (*pcur_sleep) = CIFS_MAX_SLEEP;
++ return true;
++ }
++
++ return false;
++}
++
+ /*
+ * Passes the query info response back to the caller on success.
+ * Caller need to free this with free_rsp_buf().
+@@ -2525,7 +2634,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ {
+ struct smb2_compound_vars *vars;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ int flags = CIFS_CP_CREATE_CLOSE_OP;
+ struct smb_rqst *rqst;
+ int resp_buftype[3];
+@@ -2536,6 +2645,13 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ int rc;
+ __le16 *utf16_path;
+ struct cached_fid *cfid = NULL;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = CIFS_CP_CREATE_CLOSE_OP;
++ oplock = SMB2_OPLOCK_LEVEL_NONE;
++ server = cifs_pick_channel(ses);
+
+ if (!path)
+ path = "";
+@@ -2572,6 +2688,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ .disposition = FILE_OPEN,
+ .create_options = cifs_create_options(cifs_sb, 0),
+ .fid = &fid,
++ .replay = !!(retries),
+ };
+
+ rc = SMB2_open_init(tcon, server,
+@@ -2616,6 +2733,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ goto qic_exit;
+ smb2_set_related(&rqst[2]);
+
++ if (retries) {
++ if (!cfid) {
++ smb2_set_replay(server, &rqst[0]);
++ smb2_set_replay(server, &rqst[2]);
++ }
++ smb2_set_replay(server, &rqst[1]);
++ }
++
+ if (cfid) {
+ rc = compound_send_recv(xid, ses, server,
+ flags, 1, &rqst[1],
+@@ -2648,12 +2773,17 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ kfree(vars);
+ out_free_path:
+ kfree(utf16_path);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+ static int
+ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+- struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
++ const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
+ {
+ struct smb2_query_info_rsp *rsp;
+ struct smb2_fs_full_size_info *info = NULL;
+@@ -2662,7 +2792,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+ int rc;
+
+
+- rc = smb2_query_info_compound(xid, tcon, "",
++ rc = smb2_query_info_compound(xid, tcon, path,
+ FILE_READ_ATTRIBUTES,
+ FS_FULL_SIZE_INFORMATION,
+ SMB2_O_INFO_FILESYSTEM,
+@@ -2690,28 +2820,33 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+
+ static int
+ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+- struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
++ const char *path, struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
+ {
+ int rc;
+- __le16 srch_path = 0; /* Null - open root of share */
++ __le16 *utf16_path = NULL;
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ struct cifs_open_parms oparms;
+ struct cifs_fid fid;
+
+ if (!tcon->posix_extensions)
+- return smb2_queryfs(xid, tcon, cifs_sb, buf);
++ return smb2_queryfs(xid, tcon, path, cifs_sb, buf);
+
+ oparms = (struct cifs_open_parms) {
+ .tcon = tcon,
+- .path = "",
++ .path = path,
+ .desired_access = FILE_READ_ATTRIBUTES,
+ .disposition = FILE_OPEN,
+ .create_options = cifs_create_options(cifs_sb, 0),
+ .fid = &fid,
+ };
+
+- rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
++ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
++ if (utf16_path == NULL)
++ return -ENOMEM;
++
++ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ NULL, NULL);
++ kfree(utf16_path);
+ if (rc)
+ return rc;
+
+@@ -2785,8 +2920,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
+ tcon = list_first_entry_or_null(&ses->tcon_list,
+ struct cifs_tcon,
+ tcon_list);
+- if (tcon)
++ if (tcon) {
+ tcon->tc_count++;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_get_dfs_refer);
++ }
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+
+@@ -2828,6 +2966,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
+ usleep_range(512, 2048);
+ } while (++retry_count < 5);
+
++ if (!rc && !dfs_rsp)
++ rc = -EIO;
+ if (rc) {
+ if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
+ cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
+@@ -2848,6 +2988,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
+ /* ipc tcons are not refcounted */
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tc_count--;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_dec_dfs_refer);
+ /* tc_count can never go negative */
+ WARN_ON(tcon->tc_count < 0);
+ spin_unlock(&cifs_tcp_ses_lock);
+@@ -2858,250 +3000,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
+ return rc;
+ }
+
+-static int
+-parse_reparse_posix(struct reparse_posix_data *symlink_buf,
+- u32 plen, char **target_path,
+- struct cifs_sb_info *cifs_sb)
+-{
+- unsigned int len;
+-
+- /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
+- len = le16_to_cpu(symlink_buf->ReparseDataLength);
+-
+- if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
+- cifs_dbg(VFS, "%lld not a supported symlink type\n",
+- le64_to_cpu(symlink_buf->InodeType));
+- return -EOPNOTSUPP;
+- }
+-
+- *target_path = cifs_strndup_from_utf16(
+- symlink_buf->PathBuffer,
+- len, true, cifs_sb->local_nls);
+- if (!(*target_path))
+- return -ENOMEM;
+-
+- convert_delimiter(*target_path, '/');
+- cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+-
+- return 0;
+-}
+-
+-static int
+-parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
+- u32 plen, char **target_path,
+- struct cifs_sb_info *cifs_sb)
+-{
+- unsigned int sub_len;
+- unsigned int sub_offset;
+-
+- /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
+-
+- sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
+- sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
+- if (sub_offset + 20 > plen ||
+- sub_offset + sub_len + 20 > plen) {
+- cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
+- return -EIO;
+- }
+-
+- *target_path = cifs_strndup_from_utf16(
+- symlink_buf->PathBuffer + sub_offset,
+- sub_len, true, cifs_sb->local_nls);
+- if (!(*target_path))
+- return -ENOMEM;
+-
+- convert_delimiter(*target_path, '/');
+- cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+-
+- return 0;
+-}
+-
+-static int
+-parse_reparse_point(struct reparse_data_buffer *buf,
+- u32 plen, char **target_path,
+- struct cifs_sb_info *cifs_sb)
+-{
+- if (plen < sizeof(struct reparse_data_buffer)) {
+- cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
+- plen);
+- return -EIO;
+- }
+-
+- if (plen < le16_to_cpu(buf->ReparseDataLength) +
+- sizeof(struct reparse_data_buffer)) {
+- cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
+- plen);
+- return -EIO;
+- }
+-
+- /* See MS-FSCC 2.1.2 */
+- switch (le32_to_cpu(buf->ReparseTag)) {
+- case IO_REPARSE_TAG_NFS:
+- return parse_reparse_posix(
+- (struct reparse_posix_data *)buf,
+- plen, target_path, cifs_sb);
+- case IO_REPARSE_TAG_SYMLINK:
+- return parse_reparse_symlink(
+- (struct reparse_symlink_data_buffer *)buf,
+- plen, target_path, cifs_sb);
+- default:
+- cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
+- le32_to_cpu(buf->ReparseTag));
+- return -EOPNOTSUPP;
+- }
+-}
+-
+-static int smb2_query_symlink(const unsigned int xid,
+- struct cifs_tcon *tcon,
+- struct cifs_sb_info *cifs_sb,
+- const char *full_path,
+- char **target_path,
+- struct kvec *rsp_iov)
+-{
+- struct reparse_data_buffer *buf;
+- struct smb2_ioctl_rsp *io = rsp_iov->iov_base;
+- u32 plen = le32_to_cpu(io->OutputCount);
+-
+- cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+-
+- buf = (struct reparse_data_buffer *)((u8 *)io +
+- le32_to_cpu(io->OutputOffset));
+- return parse_reparse_point(buf, plen, target_path, cifs_sb);
+-}
+-
+-static int smb2_query_reparse_point(const unsigned int xid,
+- struct cifs_tcon *tcon,
+- struct cifs_sb_info *cifs_sb,
+- const char *full_path,
+- u32 *tag, struct kvec *rsp,
+- int *rsp_buftype)
+-{
+- struct smb2_compound_vars *vars;
+- int rc;
+- __le16 *utf16_path = NULL;
+- __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+- struct cifs_open_parms oparms;
+- struct cifs_fid fid;
+- struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+- int flags = CIFS_CP_CREATE_CLOSE_OP;
+- struct smb_rqst *rqst;
+- int resp_buftype[3];
+- struct kvec *rsp_iov;
+- struct smb2_ioctl_rsp *ioctl_rsp;
+- struct reparse_data_buffer *reparse_buf;
+- u32 plen;
+-
+- cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+-
+- if (smb3_encryption_required(tcon))
+- flags |= CIFS_TRANSFORM_REQ;
+-
+- utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+- if (!utf16_path)
+- return -ENOMEM;
+-
+- resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+- vars = kzalloc(sizeof(*vars), GFP_KERNEL);
+- if (!vars) {
+- rc = -ENOMEM;
+- goto out_free_path;
+- }
+- rqst = vars->rqst;
+- rsp_iov = vars->rsp_iov;
+-
+- /*
+- * setup smb2open - TODO add optimization to call cifs_get_readable_path
+- * to see if there is a handle already open that we can use
+- */
+- rqst[0].rq_iov = vars->open_iov;
+- rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+-
+- oparms = (struct cifs_open_parms) {
+- .tcon = tcon,
+- .path = full_path,
+- .desired_access = FILE_READ_ATTRIBUTES,
+- .disposition = FILE_OPEN,
+- .create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
+- .fid = &fid,
+- };
+-
+- rc = SMB2_open_init(tcon, server,
+- &rqst[0], &oplock, &oparms, utf16_path);
+- if (rc)
+- goto query_rp_exit;
+- smb2_set_next_command(tcon, &rqst[0]);
+-
+-
+- /* IOCTL */
+- rqst[1].rq_iov = vars->io_iov;
+- rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
+-
+- rc = SMB2_ioctl_init(tcon, server,
+- &rqst[1], COMPOUND_FID,
+- COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0,
+- CIFSMaxBufSize -
+- MAX_SMB2_CREATE_RESPONSE_SIZE -
+- MAX_SMB2_CLOSE_RESPONSE_SIZE);
+- if (rc)
+- goto query_rp_exit;
+-
+- smb2_set_next_command(tcon, &rqst[1]);
+- smb2_set_related(&rqst[1]);
+-
+- /* Close */
+- rqst[2].rq_iov = &vars->close_iov;
+- rqst[2].rq_nvec = 1;
+-
+- rc = SMB2_close_init(tcon, server,
+- &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+- if (rc)
+- goto query_rp_exit;
+-
+- smb2_set_related(&rqst[2]);
+-
+- rc = compound_send_recv(xid, tcon->ses, server,
+- flags, 3, rqst,
+- resp_buftype, rsp_iov);
+-
+- ioctl_rsp = rsp_iov[1].iov_base;
+-
+- /*
+- * Open was successful and we got an ioctl response.
+- */
+- if (rc == 0) {
+- /* See MS-FSCC 2.3.23 */
+-
+- reparse_buf = (struct reparse_data_buffer *)
+- ((char *)ioctl_rsp +
+- le32_to_cpu(ioctl_rsp->OutputOffset));
+- plen = le32_to_cpu(ioctl_rsp->OutputCount);
+-
+- if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
+- rsp_iov[1].iov_len) {
+- cifs_tcon_dbg(FYI, "srv returned invalid ioctl len: %d\n",
+- plen);
+- rc = -EIO;
+- goto query_rp_exit;
+- }
+- *tag = le32_to_cpu(reparse_buf->ReparseTag);
+- *rsp = rsp_iov[1];
+- *rsp_buftype = resp_buftype[1];
+- resp_buftype[1] = CIFS_NO_BUFFER;
+- }
+-
+- query_rp_exit:
+- SMB2_open_free(&rqst[0]);
+- SMB2_ioctl_free(&rqst[1]);
+- SMB2_close_free(&rqst[2]);
+- free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+- free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+- free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+- kfree(vars);
+-out_free_path:
+- kfree(utf16_path);
+- return rc;
+-}
+-
+ static struct cifs_ntsd *
+ get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
+@@ -3293,15 +3191,17 @@ static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
+ }
+
+ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+- loff_t offset, loff_t len, bool keep_size)
++ unsigned long long offset, unsigned long long len,
++ bool keep_size)
+ {
+ struct cifs_ses *ses = tcon->ses;
+ struct inode *inode = file_inode(file);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifsFileInfo *cfile = file->private_data;
++ struct netfs_inode *ictx = netfs_inode(inode);
++ unsigned long long i_size, new_size, remote_size;
+ long rc;
+ unsigned int xid;
+- __le64 eof;
+
+ xid = get_xid();
+
+@@ -3311,6 +3211,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ inode_lock(inode);
+ filemap_invalidate_lock(inode->i_mapping);
+
++ i_size = i_size_read(inode);
++ remote_size = ictx->remote_i_size;
++ if (offset + len >= remote_size && offset < i_size) {
++ unsigned long long top = umin(offset + len, i_size);
++
++ rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1);
++ if (rc < 0)
++ goto zero_range_exit;
++ }
++
+ /*
+ * We zero the range through ioctl, so we need remove the page caches
+ * first, otherwise the data may be inconsistent with the server.
+@@ -3329,10 +3239,14 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ /*
+ * do we also need to change the size of the file?
+ */
+- if (keep_size == false && i_size_read(inode) < offset + len) {
+- eof = cpu_to_le64(offset + len);
++ new_size = offset + len;
++ if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid, cfile->pid, &eof);
++ cfile->fid.volatile_fid, cfile->pid, new_size);
++ if (rc >= 0) {
++ truncate_setsize(inode, new_size);
++ fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
++ }
+ }
+
+ zero_range_exit:
+@@ -3354,6 +3268,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+ struct inode *inode = file_inode(file);
+ struct cifsFileInfo *cfile = file->private_data;
+ struct file_zero_data_information fsctl_buf;
++ unsigned long long end = offset + len, i_size, remote_i_size;
+ long rc;
+ unsigned int xid;
+ __u8 set_sparse = 1;
+@@ -3385,6 +3300,27 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+ (char *)&fsctl_buf,
+ sizeof(struct file_zero_data_information),
+ CIFSMaxBufSize, NULL, NULL);
++
++ if (rc)
++ goto unlock;
++
++ /* If there's dirty data in the buffer that would extend the EOF if it
++ * were written, then we need to move the EOF marker over to the lower
++ * of the high end of the hole and the proposed EOF. The problem is
++ * that we locally hole-punch the tail of the dirty data, the proposed
++ * EOF update will end up in the wrong place.
++ */
++ i_size = i_size_read(inode);
++ remote_i_size = netfs_inode(inode)->remote_i_size;
++ if (end > remote_i_size && i_size > remote_i_size) {
++ unsigned long long extend_to = umin(end, i_size);
++ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid, cfile->pid, extend_to);
++ if (rc >= 0)
++ netfs_inode(inode)->remote_i_size = extend_to;
++ }
++
++unlock:
+ filemap_invalidate_unlock(inode->i_mapping);
+ out:
+ inode_unlock(inode);
+@@ -3521,7 +3457,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile = file->private_data;
+ long rc = -EOPNOTSUPP;
+ unsigned int xid;
+- __le64 eof;
++ loff_t new_eof;
+
+ xid = get_xid();
+
+@@ -3550,14 +3486,14 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+ if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
+ smb2_set_sparse(xid, tcon, cfile, inode, false);
+
+- eof = cpu_to_le64(off + len);
++ new_eof = off + len;
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid, cfile->pid, &eof);
++ cfile->fid.volatile_fid, cfile->pid, new_eof);
+ if (rc == 0) {
+- cifsi->server_eof = off + len;
+- cifs_setsize(inode, off + len);
++ cifsi->server_eof = new_eof;
++ cifs_setsize(inode, new_eof);
+ cifs_truncate_page(inode->i_mapping, inode->i_size);
+- truncate_setsize(inode, off + len);
++ truncate_setsize(inode, new_eof);
+ }
+ goto out;
+ }
+@@ -3648,8 +3584,7 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
+ struct inode *inode = file_inode(file);
+ struct cifsFileInfo *cfile = file->private_data;
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+- __le64 eof;
+- loff_t old_eof;
++ loff_t old_eof, new_eof;
+
+ xid = get_xid();
+
+@@ -3674,9 +3609,9 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
+ if (rc < 0)
+ goto out_2;
+
+- eof = cpu_to_le64(old_eof - len);
++ new_eof = old_eof - len;
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid, cfile->pid, &eof);
++ cfile->fid.volatile_fid, cfile->pid, new_eof);
+ if (rc < 0)
+ goto out_2;
+
+@@ -3700,8 +3635,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
+ unsigned int xid;
+ struct cifsFileInfo *cfile = file->private_data;
+ struct inode *inode = file_inode(file);
+- __le64 eof;
+- __u64 count, old_eof;
++ __u64 count, old_eof, new_eof;
+
+ xid = get_xid();
+
+@@ -3714,19 +3648,22 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
+ }
+
+ count = old_eof - off;
+- eof = cpu_to_le64(old_eof + len);
++ new_eof = old_eof + len;
+
+ filemap_invalidate_lock(inode->i_mapping);
+- rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1);
++ rc = filemap_write_and_wait_range(inode->i_mapping, off, new_eof - 1);
+ if (rc < 0)
+ goto out_2;
+ truncate_pagecache_range(inode, off, old_eof);
+
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+- cfile->fid.volatile_fid, cfile->pid, &eof);
++ cfile->fid.volatile_fid, cfile->pid, new_eof);
+ if (rc < 0)
+ goto out_2;
+
++ truncate_setsize(inode, new_eof);
++ fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
++
+ rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
+ if (rc < 0)
+ goto out_2;
+@@ -4313,7 +4250,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
+ */
+ static int
+ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+- struct smb_rqst *rqst, int enc)
++ struct smb_rqst *rqst, int enc, struct crypto_aead *tfm)
+ {
+ struct smb2_transform_hdr *tr_hdr =
+ (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
+@@ -4324,8 +4261,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ u8 key[SMB3_ENC_DEC_KEY_SIZE];
+ struct aead_request *req;
+ u8 *iv;
+- DECLARE_CRYPTO_WAIT(wait);
+- struct crypto_aead *tfm;
+ unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+ void *creq;
+ size_t sensitive_size;
+@@ -4337,14 +4272,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ return rc;
+ }
+
+- rc = smb3_crypto_aead_allocate(server);
+- if (rc) {
+- cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+- return rc;
+- }
+-
+- tfm = enc ? server->secmech.enc : server->secmech.dec;
+-
+ if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+ rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
+@@ -4384,11 +4311,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+ aead_request_set_ad(req, assoc_data_len);
+
+- aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+- crypto_req_done, &wait);
+-
+- rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+- : crypto_aead_decrypt(req), &wait);
++ rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+
+ if (!rc && enc)
+ memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+@@ -4495,7 +4418,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
+ /* fill the 1st iov with a transform header */
+ fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
+
+- rc = crypt_message(server, num_rqst, new_rq, 1);
++ rc = crypt_message(server, num_rqst, new_rq, 1, server->secmech.enc);
+ cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
+ if (rc)
+ goto err_free;
+@@ -4520,8 +4443,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+ unsigned int buf_data_size, struct iov_iter *iter,
+ bool is_offloaded)
+ {
+- struct kvec iov[2];
++ struct crypto_aead *tfm;
+ struct smb_rqst rqst = {NULL};
++ struct kvec iov[2];
+ size_t iter_size = 0;
+ int rc;
+
+@@ -4538,9 +4462,31 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+ iter_size = iov_iter_count(iter);
+ }
+
+- rc = crypt_message(server, 1, &rqst, 0);
++ if (is_offloaded) {
++ if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
++ (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
++ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
++ else
++ tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
++ if (IS_ERR(tfm)) {
++ rc = PTR_ERR(tfm);
++ cifs_server_dbg(VFS, "%s: Failed alloc decrypt TFM, rc=%d\n", __func__, rc);
++
++ return rc;
++ }
++ } else {
++ if (unlikely(!server->secmech.dec))
++ return -EIO;
++
++ tfm = server->secmech.dec;
++ }
++
++ rc = crypt_message(server, 1, &rqst, 0, tfm);
+ cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
+
++ if (is_offloaded)
++ crypto_free_aead(tfm);
++
+ if (rc)
+ return rc;
+
+@@ -4920,6 +4866,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ struct smb2_hdr *shdr;
+ unsigned int pdu_length = server->pdu_size;
+ unsigned int buf_size;
++ unsigned int next_cmd;
+ struct mid_q_entry *mid_entry;
+ int next_is_large;
+ char *next_buffer = NULL;
+@@ -4948,14 +4895,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ next_is_large = server->large_buf;
+ one_more:
+ shdr = (struct smb2_hdr *)buf;
+- if (shdr->NextCommand) {
++ next_cmd = le32_to_cpu(shdr->NextCommand);
++ if (next_cmd) {
++ if (WARN_ON_ONCE(next_cmd > pdu_length))
++ return -1;
+ if (next_is_large)
+ next_buffer = (char *)cifs_buf_get();
+ else
+ next_buffer = (char *)cifs_small_buf_get();
+- memcpy(next_buffer,
+- buf + le32_to_cpu(shdr->NextCommand),
+- pdu_length - le32_to_cpu(shdr->NextCommand));
++ memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
+ }
+
+ mid_entry = smb2_find_mid(server, buf);
+@@ -4979,8 +4927,8 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ else
+ ret = cifs_handle_standard(server, mid_entry);
+
+- if (ret == 0 && shdr->NextCommand) {
+- pdu_length -= le32_to_cpu(shdr->NextCommand);
++ if (ret == 0 && next_cmd) {
++ pdu_length -= next_cmd;
+ server->large_buf = next_is_large;
+ if (next_is_large)
+ server->bigbuf = buf = next_buffer;
+@@ -5043,105 +4991,125 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ NULL, 0, false);
+ }
+
+-static int
+-smb2_next_header(char *buf)
++static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
++ unsigned int *noff)
+ {
+ struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+ struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
+
+- if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
+- return sizeof(struct smb2_transform_hdr) +
+- le32_to_cpu(t_hdr->OriginalMessageSize);
+-
+- return le32_to_cpu(hdr->NextCommand);
++ if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
++ *noff = le32_to_cpu(t_hdr->OriginalMessageSize);
++ if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff)))
++ return -EINVAL;
++ } else {
++ *noff = le32_to_cpu(hdr->NextCommand);
++ }
++ if (unlikely(*noff && *noff < MID_HEADER_SIZE(server)))
++ return -EINVAL;
++ return 0;
+ }
+
+-static int
+-smb2_make_node(unsigned int xid, struct inode *inode,
+- struct dentry *dentry, struct cifs_tcon *tcon,
+- const char *full_path, umode_t mode, dev_t dev)
++static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev)
+ {
++ struct TCP_Server_Info *server = tcon->ses->server;
++ struct cifs_open_parms oparms;
++ struct cifs_io_parms io_parms = {};
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+- int rc = -EPERM;
+- struct cifs_open_info_data buf = {};
+- struct cifs_io_parms io_parms = {0};
+- __u32 oplock = 0;
+ struct cifs_fid fid;
+- struct cifs_open_parms oparms;
+ unsigned int bytes_written;
+- struct win_dev *pdev;
++ struct win_dev pdev = {};
+ struct kvec iov[2];
++ __u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
++ int rc;
+
+- /*
+- * Check if mounted with mount parm 'sfu' mount parm.
+- * SFU emulation should work with all servers, but only
+- * supports block and char device (no socket & fifo),
+- * and was used by default in earlier versions of Windows
+- */
+- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+- return rc;
++ switch (mode & S_IFMT) {
++ case S_IFCHR:
++ strscpy(pdev.type, "IntxCHR", strlen("IntxChr"));
++ pdev.major = cpu_to_le64(MAJOR(dev));
++ pdev.minor = cpu_to_le64(MINOR(dev));
++ break;
++ case S_IFBLK:
++ strscpy(pdev.type, "IntxBLK", strlen("IntxBLK"));
++ pdev.major = cpu_to_le64(MAJOR(dev));
++ pdev.minor = cpu_to_le64(MINOR(dev));
++ break;
++ case S_IFIFO:
++ strscpy(pdev.type, "LnxFIFO", strlen("LnxFIFO"));
++ break;
++ default:
++ return -EPERM;
++ }
+
+- /*
+- * TODO: Add ability to create instead via reparse point. Windows (e.g.
+- * their current NFS server) uses this approach to expose special files
+- * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
+- */
++ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
++ FILE_CREATE, CREATE_NOT_DIR |
++ CREATE_OPTION_SPECIAL, ACL_NO_MODE);
++ oparms.fid = &fid;
+
+- if (!S_ISCHR(mode) && !S_ISBLK(mode))
++ rc = server->ops->open(xid, &oparms, &oplock, NULL);
++ if (rc)
+ return rc;
+
+- cifs_dbg(FYI, "sfu compat create special file\n");
++ io_parms.pid = current->tgid;
++ io_parms.tcon = tcon;
++ io_parms.length = sizeof(pdev);
++ iov[1].iov_base = &pdev;
++ iov[1].iov_len = sizeof(pdev);
+
+- oparms = (struct cifs_open_parms) {
+- .tcon = tcon,
+- .cifs_sb = cifs_sb,
+- .desired_access = GENERIC_WRITE,
+- .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+- CREATE_OPTION_SPECIAL),
+- .disposition = FILE_CREATE,
+- .path = full_path,
+- .fid = &fid,
+- };
++ rc = server->ops->sync_write(xid, &fid, &io_parms,
++ &bytes_written, iov, 1);
++ server->ops->close(xid, tcon, &fid);
++ return rc;
++}
+
+- if (tcon->ses->server->oplocks)
+- oplock = REQ_OPLOCK;
+- else
+- oplock = 0;
+- rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
++int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev)
++{
++ struct inode *new = NULL;
++ int rc;
++
++ rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
++ full_path, mode, dev);
+ if (rc)
+ return rc;
+
++ if (tcon->posix_extensions) {
++ rc = smb311_posix_get_inode_info(&new, full_path, NULL,
++ inode->i_sb, xid);
++ } else if (tcon->unix_ext) {
++ rc = cifs_get_inode_info_unix(&new, full_path,
++ inode->i_sb, xid);
++ } else {
++ rc = cifs_get_inode_info(&new, full_path, NULL,
++ inode->i_sb, xid, NULL);
++ }
++ if (!rc)
++ d_instantiate(dentry, new);
++ return rc;
++}
++
++static int smb2_make_node(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev)
++{
++ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ int rc;
++
+ /*
+- * BB Do not bother to decode buf since no local inode yet to put
+- * timestamps in, but we can reuse it safely.
++ * Check if mounted with mount parm 'sfu' mount parm.
++ * SFU emulation should work with all servers, but only
++ * supports block and char device (no socket & fifo),
++ * and was used by default in earlier versions of Windows
+ */
+-
+- pdev = (struct win_dev *)&buf.fi;
+- io_parms.pid = current->tgid;
+- io_parms.tcon = tcon;
+- io_parms.offset = 0;
+- io_parms.length = sizeof(struct win_dev);
+- iov[1].iov_base = &buf.fi;
+- iov[1].iov_len = sizeof(struct win_dev);
+- if (S_ISCHR(mode)) {
+- memcpy(pdev->type, "IntxCHR", 8);
+- pdev->major = cpu_to_le64(MAJOR(dev));
+- pdev->minor = cpu_to_le64(MINOR(dev));
+- rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+- &bytes_written, iov, 1);
+- } else if (S_ISBLK(mode)) {
+- memcpy(pdev->type, "IntxBLK", 8);
+- pdev->major = cpu_to_le64(MAJOR(dev));
+- pdev->minor = cpu_to_le64(MINOR(dev));
+- rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+- &bytes_written, iov, 1);
+- }
+- tcon->ses->server->ops->close(xid, tcon, &fid);
+- d_drop(dentry);
+-
+- /* FIXME: add code here to set EAs */
+-
+- cifs_free_open_info(&buf);
++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
++ rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
++ full_path, mode, dev);
++ } else {
++ rc = smb2_mknod_reparse(xid, inode, dentry, tcon,
++ full_path, mode, dev);
++ }
+ return rc;
+ }
+
+@@ -5195,9 +5163,10 @@ struct smb_version_operations smb20_operations = {
+ .unlink = smb2_unlink,
+ .rename = smb2_rename_path,
+ .create_hardlink = smb2_create_hardlink,
+- .query_symlink = smb2_query_symlink,
++ .parse_reparse_point = smb2_parse_reparse_point,
+ .query_mf_symlink = smb3_query_mf_symlink,
+ .create_mf_symlink = smb3_create_mf_symlink,
++ .create_reparse_symlink = smb2_create_reparse_symlink,
+ .open = smb2_open_file,
+ .set_fid = smb2_set_fid,
+ .close = smb2_close_file,
+@@ -5297,9 +5266,10 @@ struct smb_version_operations smb21_operations = {
+ .unlink = smb2_unlink,
+ .rename = smb2_rename_path,
+ .create_hardlink = smb2_create_hardlink,
+- .query_symlink = smb2_query_symlink,
++ .parse_reparse_point = smb2_parse_reparse_point,
+ .query_mf_symlink = smb3_query_mf_symlink,
+ .create_mf_symlink = smb3_create_mf_symlink,
++ .create_reparse_symlink = smb2_create_reparse_symlink,
+ .open = smb2_open_file,
+ .set_fid = smb2_set_fid,
+ .close = smb2_close_file,
+@@ -5384,6 +5354,7 @@ struct smb_version_operations smb30_operations = {
+ .tree_connect = SMB2_tcon,
+ .tree_disconnect = SMB2_tdis,
+ .qfs_tcon = smb3_qfs_tcon,
++ .query_server_interfaces = SMB3_request_interfaces,
+ .is_path_accessible = smb2_is_path_accessible,
+ .can_echo = smb2_can_echo,
+ .echo = SMB2_echo,
+@@ -5402,9 +5373,10 @@ struct smb_version_operations smb30_operations = {
+ .unlink = smb2_unlink,
+ .rename = smb2_rename_path,
+ .create_hardlink = smb2_create_hardlink,
+- .query_symlink = smb2_query_symlink,
++ .parse_reparse_point = smb2_parse_reparse_point,
+ .query_mf_symlink = smb3_query_mf_symlink,
+ .create_mf_symlink = smb3_create_mf_symlink,
++ .create_reparse_symlink = smb2_create_reparse_symlink,
+ .open = smb2_open_file,
+ .set_fid = smb2_set_fid,
+ .close = smb2_close_file,
+@@ -5498,6 +5470,7 @@ struct smb_version_operations smb311_operations = {
+ .tree_connect = SMB2_tcon,
+ .tree_disconnect = SMB2_tdis,
+ .qfs_tcon = smb3_qfs_tcon,
++ .query_server_interfaces = SMB3_request_interfaces,
+ .is_path_accessible = smb2_is_path_accessible,
+ .can_echo = smb2_can_echo,
+ .echo = SMB2_echo,
+@@ -5516,9 +5489,10 @@ struct smb_version_operations smb311_operations = {
+ .unlink = smb2_unlink,
+ .rename = smb2_rename_path,
+ .create_hardlink = smb2_create_hardlink,
+- .query_symlink = smb2_query_symlink,
++ .parse_reparse_point = smb2_parse_reparse_point,
+ .query_mf_symlink = smb3_query_mf_symlink,
+ .create_mf_symlink = smb3_create_mf_symlink,
++ .create_reparse_symlink = smb2_create_reparse_symlink,
+ .open = smb2_open_file,
+ .set_fid = smb2_set_fid,
+ .close = smb2_close_file,
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index c75a80bb6d9eef..83a03201bb8628 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -80,6 +80,9 @@ int smb3_encryption_required(const struct cifs_tcon *tcon)
+ if (tcon->seal &&
+ (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ return 1;
++ if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
++ (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
++ return 1;
+ return 0;
+ }
+
+@@ -156,13 +159,64 @@ smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
+ return;
+ }
+
++/* helper function for code reuse */
++static int
++cifs_chan_skip_or_disable(struct cifs_ses *ses,
++ struct TCP_Server_Info *server,
++ bool from_reconnect)
++{
++ struct TCP_Server_Info *pserver;
++ unsigned int chan_index;
++
++ if (SERVER_IS_CHAN(server)) {
++ cifs_dbg(VFS,
++ "server %s does not support multichannel anymore. Skip secondary channel\n",
++ ses->server->hostname);
++
++ spin_lock(&ses->chan_lock);
++ chan_index = cifs_ses_get_chan_index(ses, server);
++ if (chan_index == CIFS_INVAL_CHAN_INDEX) {
++ spin_unlock(&ses->chan_lock);
++ goto skip_terminate;
++ }
++
++ ses->chans[chan_index].server = NULL;
++ server->terminate = true;
++ spin_unlock(&ses->chan_lock);
++
++ /*
++ * the above reference of server by channel
++ * needs to be dropped without holding chan_lock
++ * as cifs_put_tcp_session takes a higher lock
++ * i.e. cifs_tcp_ses_lock
++ */
++ cifs_put_tcp_session(server, from_reconnect);
++
++ cifs_signal_cifsd_for_reconnect(server, false);
++
++ /* mark primary server as needing reconnect */
++ pserver = server->primary_server;
++ cifs_signal_cifsd_for_reconnect(pserver, false);
++skip_terminate:
++ return -EHOSTDOWN;
++ }
++
++ cifs_server_dbg(VFS,
++ "server does not support multichannel anymore. Disable all other channels\n");
++ cifs_disable_secondary_channels(ses);
++
++
++ return 0;
++}
++
+ static int
+ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+- struct TCP_Server_Info *server)
++ struct TCP_Server_Info *server, bool from_reconnect)
+ {
+ int rc = 0;
+ struct nls_table *nls_codepage = NULL;
+ struct cifs_ses *ses;
++ int xid;
+
+ /*
+ * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
+@@ -223,6 +277,12 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ return -EAGAIN;
+ }
+ }
++
++ /* if server is marked for termination, cifsd will cleanup */
++ if (server->terminate) {
++ spin_unlock(&server->srv_lock);
++ return -EHOSTDOWN;
++ }
+ spin_unlock(&server->srv_lock);
+
+ again:
+@@ -241,12 +301,24 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ tcon->need_reconnect);
+
+ mutex_lock(&ses->session_mutex);
++ /*
++ * if this is called by delayed work, and the channel has been disabled
++ * in parallel, the delayed work can continue to execute in parallel
++ * there's a chance that this channel may not exist anymore
++ */
++ spin_lock(&server->srv_lock);
++ if (server->tcpStatus == CifsExiting) {
++ spin_unlock(&server->srv_lock);
++ mutex_unlock(&ses->session_mutex);
++ rc = -EHOSTDOWN;
++ goto out;
++ }
++
+ /*
+ * Recheck after acquire mutex. If another thread is negotiating
+ * and the server never sends an answer the socket will be closed
+ * and tcpStatus set to reconnect.
+ */
+- spin_lock(&server->srv_lock);
+ if (server->tcpStatus == CifsNeedReconnect) {
+ spin_unlock(&server->srv_lock);
+ mutex_unlock(&ses->session_mutex);
+@@ -283,7 +355,32 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+
+ rc = cifs_negotiate_protocol(0, ses, server);
+ if (!rc) {
++ /*
++ * if server stopped supporting multichannel
++ * and the first channel reconnected, disable all the others.
++ */
++ if (ses->chan_count > 1 &&
++ !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
++ rc = cifs_chan_skip_or_disable(ses, server,
++ from_reconnect);
++ if (rc) {
++ mutex_unlock(&ses->session_mutex);
++ goto out;
++ }
++ }
++
+ rc = cifs_setup_session(0, ses, server, nls_codepage);
++ if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
++ /*
++ * Try alternate password for next reconnect (key rotation
++ * could be enabled on the server e.g.) if an alternate
++ * password is available and the current password is expired,
++ * but do not swap on non pwd related errors like host down
++ */
++ if (ses->password2)
++ swap(ses->password2, ses->password);
++ }
++
+ if ((rc == -EACCES) && !tcon->retry) {
+ mutex_unlock(&ses->session_mutex);
+ rc = -EHOSTDOWN;
+@@ -307,15 +404,71 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ tcon->need_reopen_files = true;
+
+ rc = cifs_tree_connect(0, tcon, nls_codepage);
+- mutex_unlock(&ses->session_mutex);
+
+ cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
+ if (rc) {
+ /* If sess reconnected but tcon didn't, something strange ... */
++ mutex_unlock(&ses->session_mutex);
+ cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
+ goto out;
+ }
+
++ spin_lock(&ses->ses_lock);
++ if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) {
++ spin_unlock(&ses->ses_lock);
++ mutex_unlock(&ses->session_mutex);
++ goto skip_add_channels;
++ }
++ ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS;
++ spin_unlock(&ses->ses_lock);
++
++ if (!rc &&
++ (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
++ server->ops->query_server_interfaces) {
++ mutex_unlock(&ses->session_mutex);
++
++ /*
++ * query server network interfaces, in case they change
++ */
++ xid = get_xid();
++ rc = server->ops->query_server_interfaces(xid, tcon, false);
++ free_xid(xid);
++
++ if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
++ /*
++ * some servers like Azure SMB server do not advertise
++ * that multichannel has been disabled with server
++ * capabilities, rather return STATUS_NOT_IMPLEMENTED.
++ * treat this as server not supporting multichannel
++ */
++
++ rc = cifs_chan_skip_or_disable(ses, server,
++ from_reconnect);
++ goto skip_add_channels;
++ } else if (rc)
++ cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
++ __func__, rc);
++
++ if (ses->chan_max > ses->chan_count &&
++ ses->iface_count &&
++ !SERVER_IS_CHAN(server)) {
++ if (ses->chan_count == 1) {
++ cifs_server_dbg(VFS, "supports multichannel now\n");
++ queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
++ (SMB_INTERFACE_POLL_INTERVAL * HZ));
++ }
++
++ cifs_try_adding_channels(ses);
++ }
++ } else {
++ mutex_unlock(&ses->session_mutex);
++ }
++
++skip_add_channels:
++ spin_lock(&ses->ses_lock);
++ ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS;
++ spin_unlock(&ses->ses_lock);
++
+ if (smb2_command != SMB2_INTERNAL_CMD)
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
+
+@@ -376,10 +529,15 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
+ {
+ /* BB eventually switch this to SMB2 specific small buf size */
+- if (smb2_command == SMB2_SET_INFO)
++ switch (smb2_command) {
++ case SMB2_SET_INFO:
++ case SMB2_QUERY_INFO:
+ *request_buf = cifs_buf_get();
+- else
++ break;
++ default:
+ *request_buf = cifs_small_buf_get();
++ break;
++ }
+ if (*request_buf == NULL) {
+ /* BB should we add a retry in here if not a writepage? */
+ return -ENOMEM;
+@@ -404,7 +562,7 @@ static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ {
+ int rc;
+
+- rc = smb2_reconnect(smb2_command, tcon, server);
++ rc = smb2_reconnect(smb2_command, tcon, server, false);
+ if (rc)
+ return rc;
+
+@@ -588,7 +746,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
+ pneg_ctxt += sizeof(struct smb2_posix_neg_context);
+ neg_context_count++;
+
+- if (server->compress_algorithm) {
++ if (server->compression.requested) {
+ build_compression_ctxt((struct smb2_compression_capabilities_context *)
+ pneg_ctxt);
+ ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
+@@ -636,6 +794,9 @@ static void decode_compress_ctx(struct TCP_Server_Info *server,
+ struct smb2_compression_capabilities_context *ctxt)
+ {
+ unsigned int len = le16_to_cpu(ctxt->DataLength);
++ __le16 alg;
++
++ server->compression.enabled = false;
+
+ /*
+ * Caller checked that DataLength remains within SMB boundary. We still
+@@ -646,15 +807,22 @@ static void decode_compress_ctx(struct TCP_Server_Info *server,
+ pr_warn_once("server sent bad compression cntxt\n");
+ return;
+ }
++
+ if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
+- pr_warn_once("Invalid SMB3 compress algorithm count\n");
++ pr_warn_once("invalid SMB3 compress algorithm count\n");
+ return;
+ }
+- if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
+- pr_warn_once("unknown compression algorithm\n");
++
++ alg = ctxt->CompressionAlgorithms[0];
++
++ /* 'NONE' (0) compressor type is never negotiated */
++ if (alg == 0 || le16_to_cpu(alg) > 3) {
++ pr_warn_once("invalid compression algorithm '%u'\n", alg);
+ return;
+ }
+- server->compress_algorithm = ctxt->CompressionAlgorithms[0];
++
++ server->compression.alg = alg;
++ server->compression.enabled = true;
+ }
+
+ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
+@@ -1095,6 +1263,12 @@ SMB2_negotiate(const unsigned int xid,
+ else
+ cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
+ }
++
++ if (server->cipher_type && !rc) {
++ rc = smb3_crypto_aead_allocate(server);
++ if (rc)
++ cifs_server_dbg(VFS, "%s: crypto alloc failed, rc=%d\n", __func__, rc);
++ }
+ neg_exit:
+ free_rsp_buf(resp_buftype, rsp);
+ return rc;
+@@ -1393,6 +1567,11 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
+ &sess_data->buf0_type,
+ CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
+ cifs_small_buf_release(sess_data->iov[0].iov_base);
++ if (rc == 0)
++ sess_data->ses->expired_pwd = false;
++ else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED))
++ sess_data->ses->expired_pwd = true;
++
+ memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
+
+ return rc;
+@@ -1859,10 +2038,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ __le16 *unc_path = NULL;
+ int flags = 0;
+ unsigned int total_len;
+- struct TCP_Server_Info *server;
+-
+- /* always use master channel */
+- server = ses->server;
++ struct TCP_Server_Info *server = cifs_pick_channel(ses);
+
+ cifs_dbg(FYI, "TCON\n");
+
+@@ -1995,6 +2171,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
+ struct smb2_tree_disconnect_req *req; /* response is trivial */
+ int rc = 0;
+ struct cifs_ses *ses = tcon->ses;
++ struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ int flags = 0;
+ unsigned int total_len;
+ struct kvec iov[1];
+@@ -2017,7 +2194,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
+
+ invalidate_all_cached_dirs(tcon);
+
+- rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
++ rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server,
+ (void **) &req,
+ &total_len);
+ if (rc)
+@@ -2035,7 +2212,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+- rc = cifs_send_recv(xid, ses, ses->server,
++ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
+ cifs_small_buf_release(req);
+ if (rc) {
+@@ -2141,17 +2318,18 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
+ posix->nlink, posix->mode, posix->reparse_tag);
+ }
+
+-void
+-smb2_parse_contexts(struct TCP_Server_Info *server,
+- struct smb2_create_rsp *rsp,
+- unsigned int *epoch, char *lease_key, __u8 *oplock,
+- struct smb2_file_all_info *buf,
+- struct create_posix_rsp *posix)
++int smb2_parse_contexts(struct TCP_Server_Info *server,
++ struct kvec *rsp_iov,
++ unsigned int *epoch,
++ char *lease_key, __u8 *oplock,
++ struct smb2_file_all_info *buf,
++ struct create_posix_rsp *posix)
+ {
+- char *data_offset;
++ struct smb2_create_rsp *rsp = rsp_iov->iov_base;
+ struct create_context *cc;
+- unsigned int next;
+- unsigned int remaining;
++ size_t rem, off, len;
++ size_t doff, dlen;
++ size_t noff, nlen;
+ char *name;
+ static const char smb3_create_tag_posix[] = {
+ 0x93, 0xAD, 0x25, 0x50, 0x9C,
+@@ -2160,45 +2338,63 @@ smb2_parse_contexts(struct TCP_Server_Info *server,
+ };
+
+ *oplock = 0;
+- data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
+- remaining = le32_to_cpu(rsp->CreateContextsLength);
+- cc = (struct create_context *)data_offset;
++
++ off = le32_to_cpu(rsp->CreateContextsOffset);
++ rem = le32_to_cpu(rsp->CreateContextsLength);
++ if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len)
++ return -EINVAL;
++ cc = (struct create_context *)((u8 *)rsp + off);
+
+ /* Initialize inode number to 0 in case no valid data in qfid context */
+ if (buf)
+ buf->IndexNumber = 0;
+
+- while (remaining >= sizeof(struct create_context)) {
+- name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+- if (le16_to_cpu(cc->NameLength) == 4 &&
+- strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
+- *oplock = server->ops->parse_lease_buf(cc, epoch,
+- lease_key);
+- else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
+- strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
+- parse_query_id_ctxt(cc, buf);
+- else if ((le16_to_cpu(cc->NameLength) == 16)) {
+- if (posix &&
+- memcmp(name, smb3_create_tag_posix, 16) == 0)
++ while (rem >= sizeof(*cc)) {
++ doff = le16_to_cpu(cc->DataOffset);
++ dlen = le32_to_cpu(cc->DataLength);
++ if (check_add_overflow(doff, dlen, &len) || len > rem)
++ return -EINVAL;
++
++ noff = le16_to_cpu(cc->NameOffset);
++ nlen = le16_to_cpu(cc->NameLength);
++ if (noff + nlen > doff)
++ return -EINVAL;
++
++ name = (char *)cc + noff;
++ switch (nlen) {
++ case 4:
++ if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
++ *oplock = server->ops->parse_lease_buf(cc, epoch,
++ lease_key);
++ } else if (buf &&
++ !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) {
++ parse_query_id_ctxt(cc, buf);
++ }
++ break;
++ case 16:
++ if (posix && !memcmp(name, smb3_create_tag_posix, 16))
+ parse_posix_ctxt(cc, buf, posix);
++ break;
++ default:
++ cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n",
++ __func__, nlen, dlen);
++ if (IS_ENABLED(CONFIG_CIFS_DEBUG2))
++ cifs_dump_mem("context data: ", cc, dlen);
++ break;
+ }
+- /* else {
+- cifs_dbg(FYI, "Context not matched with len %d\n",
+- le16_to_cpu(cc->NameLength));
+- cifs_dump_mem("Cctxt name: ", name, 4);
+- } */
+-
+- next = le32_to_cpu(cc->Next);
+- if (!next)
++
++ off = le32_to_cpu(cc->Next);
++ if (!off)
+ break;
+- remaining -= next;
+- cc = (struct create_context *)((char *)cc + next);
++ if (check_sub_overflow(rem, off, &rem))
++ return -EINVAL;
++ cc = (struct create_context *)((u8 *)cc + off);
+ }
+
+ if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+ *oplock = rsp->OplockLevel;
+
+- return;
++ return 0;
+ }
+
+ static int
+@@ -2244,8 +2440,13 @@ create_durable_v2_buf(struct cifs_open_parms *oparms)
+ */
+ buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
+ buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
+- generate_random_uuid(buf->dcontext.CreateGuid);
+- memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
++
++ /* for replay, we should not overwrite the existing create guid */
++ if (!oparms->replay) {
++ generate_random_uuid(buf->dcontext.CreateGuid);
++ memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
++ } else
++ memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16);
+
+ /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
+ buf->Name[0] = 'D';
+@@ -2550,6 +2751,17 @@ add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
+ return 0;
+ }
+
++static void add_ea_context(struct cifs_open_parms *oparms,
++ struct kvec *rq_iov, unsigned int *num_iovs)
++{
++ struct kvec *iov = oparms->ea_cctx;
++
++ if (iov && iov->iov_base && iov->iov_len) {
++ rq_iov[(*num_iovs)++] = *iov;
++ memset(iov, 0, sizeof(*iov));
++ }
++}
++
+ static int
+ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
+ const char *treename, const __le16 *path)
+@@ -2618,7 +2830,14 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ int flags = 0;
+ unsigned int total_len;
+ __le16 *utf16_path = NULL;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ n_iov = 2;
++ server = cifs_pick_channel(ses);
+
+ cifs_dbg(FYI, "mkdir\n");
+
+@@ -2722,6 +2941,10 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ /* no need to inc num_remote_opens because we close it just below */
+ trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES);
++
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ /* resource #4: response buffer */
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+@@ -2759,6 +2982,11 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ cifs_small_buf_release(req);
+ err_free_path:
+ kfree(utf16_path);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -2900,6 +3128,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ }
+
+ add_query_id_context(iov, &n_iov);
++ add_ea_context(oparms, iov, &n_iov);
+
+ if (n_iov > 2) {
+ /*
+@@ -2954,12 +3183,19 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ struct smb2_create_rsp *rsp = NULL;
+ struct cifs_tcon *tcon = oparms->tcon;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ struct kvec iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype = CIFS_NO_BUFFER;
+ int rc = 0;
+ int flags = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ server = cifs_pick_channel(ses);
++ oparms->replay = !!(retries);
+
+ cifs_dbg(FYI, "create/open\n");
+ if (!ses || !server)
+@@ -2981,6 +3217,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
+ oparms->create_options, oparms->desired_access);
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
+ &rsp_iov);
+@@ -3029,11 +3268,16 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ }
+
+
+- smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
+- oparms->fid->lease_key, oplock, buf, posix);
++ rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch,
++ oparms->fid->lease_key, oplock, buf, posix);
+ creat_exit:
+ SMB2_open_free(&rqst);
+ free_rsp_buf(resp_buftype, rsp);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -3158,15 +3402,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ int resp_buftype = CIFS_NO_BUFFER;
+ int rc = 0;
+ int flags = 0;
+-
+- cifs_dbg(FYI, "SMB2 IOCTL\n");
+-
+- if (out_data != NULL)
+- *out_data = NULL;
+-
+- /* zero out returned data len, in case of error */
+- if (plen)
+- *plen = 0;
++ int retries = 0, cur_sleep = 1;
+
+ if (!tcon)
+ return -EIO;
+@@ -3175,10 +3411,23 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ if (!ses)
+ return -EIO;
+
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
+ server = cifs_pick_channel(ses);
++
+ if (!server)
+ return -EIO;
+
++ cifs_dbg(FYI, "SMB2 IOCTL\n");
++
++ if (out_data != NULL)
++ *out_data = NULL;
++
++ /* zero out returned data len, in case of error */
++ if (plen)
++ *plen = 0;
++
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+@@ -3193,6 +3442,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ if (rc)
+ goto ioctl_exit;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
+ &rsp_iov);
+@@ -3262,6 +3514,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ ioctl_exit:
+ SMB2_ioctl_free(&rqst);
+ free_rsp_buf(resp_buftype, rsp);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -3333,13 +3590,20 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ struct smb_rqst rqst;
+ struct smb2_close_rsp *rsp = NULL;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buftype = CIFS_NO_BUFFER;
+ int rc = 0;
+ int flags = 0;
+ bool query_attrs = false;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ query_attrs = false;
++ server = cifs_pick_channel(ses);
+
+ cifs_dbg(FYI, "Close\n");
+
+@@ -3365,6 +3629,9 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ if (rc)
+ goto close_exit;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+ rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
+@@ -3377,15 +3644,13 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ } else {
+ trace_smb3_close_done(xid, persistent_fid, tcon->tid,
+ ses->Suid);
+- /*
+- * Note that have to subtract 4 since struct network_open_info
+- * has a final 4 byte pad that close response does not have
+- */
+ if (pbuf)
+- memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
++ memcpy(&pbuf->network_open_info,
++ &rsp->network_open_info,
++ sizeof(pbuf->network_open_info));
++ atomic_dec(&tcon->num_remote_opens);
+ }
+
+- atomic_dec(&tcon->num_remote_opens);
+ close_exit:
+ SMB2_close_free(&rqst);
+ free_rsp_buf(resp_buftype, rsp);
+@@ -3400,6 +3665,11 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
+ persistent_fid, tmp_rc);
+ }
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -3475,8 +3745,13 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb2_query_info_req *req;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned int total_len;
++ size_t len;
+ int rc;
+
++ if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
++ len > CIFSMaxBufSize))
++ return -EINVAL;
++
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+ (void **) &req, &total_len);
+ if (rc)
+@@ -3498,7 +3773,7 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+
+ iov[0].iov_base = (char *)req;
+ /* 1 for Buffer */
+- iov[0].iov_len = total_len - 1 + input_len;
++ iov[0].iov_len = len;
+ return 0;
+ }
+
+@@ -3506,7 +3781,7 @@ void
+ SMB2_query_info_free(struct smb_rqst *rqst)
+ {
+ if (rqst && rqst->rq_iov)
+- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
++ cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ }
+
+ static int
+@@ -3525,12 +3800,19 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server;
+ int flags = 0;
+ bool allocated = false;
++ int retries = 0, cur_sleep = 1;
+
+ cifs_dbg(FYI, "Query Info\n");
+
+ if (!ses)
+ return -EIO;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ allocated = false;
+ server = cifs_pick_channel(ses);
++
+ if (!server)
+ return -EIO;
+
+@@ -3552,6 +3834,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
+ ses->Suid, info_class, (__u32)info_type);
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+ rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+@@ -3594,6 +3879,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ qinf_exit:
+ SMB2_query_info_free(&rqst);
+ free_rsp_buf(resp_buftype, rsp);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -3694,7 +3984,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+ u32 *plen /* returned data len */)
+ {
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ struct smb_rqst rqst;
+ struct smb2_change_notify_rsp *smb_rsp;
+ struct kvec iov[1];
+@@ -3702,6 +3992,12 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+ int resp_buftype = CIFS_NO_BUFFER;
+ int flags = 0;
+ int rc = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ server = cifs_pick_channel(ses);
+
+ cifs_dbg(FYI, "change notify\n");
+ if (!ses || !server)
+@@ -3726,6 +4022,10 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+
+ trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
+ (u8)watch_tree, completion_filter);
++
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+
+@@ -3760,6 +4060,11 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+ if (rqst.rq_iov)
+ cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -3797,17 +4102,33 @@ void smb2_reconnect_server(struct work_struct *work)
+ struct cifs_ses *ses, *ses2;
+ struct cifs_tcon *tcon, *tcon2;
+ struct list_head tmp_list, tmp_ses_list;
+- bool tcon_exist = false, ses_exist = false;
++ bool ses_exist = false;
+ bool tcon_selected = false;
+ int rc;
+ bool resched = false;
+
++ /* first check if ref count has reached 0, if not inc ref count */
++ spin_lock(&cifs_tcp_ses_lock);
++ if (!server->srv_count) {
++ spin_unlock(&cifs_tcp_ses_lock);
++ return;
++ }
++ server->srv_count++;
++ spin_unlock(&cifs_tcp_ses_lock);
++
+ /* If server is a channel, select the primary channel */
+ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
+
+ /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
+ mutex_lock(&pserver->reconnect_mutex);
+
++ /* if the server is marked for termination, drop the ref count here */
++ if (server->terminate) {
++ cifs_put_tcp_session(server, true);
++ mutex_unlock(&pserver->reconnect_mutex);
++ return;
++ }
++
+ INIT_LIST_HEAD(&tmp_list);
+ INIT_LIST_HEAD(&tmp_ses_list);
+ cifs_dbg(FYI, "Reconnecting tcons and channels\n");
+@@ -3826,8 +4147,10 @@ void smb2_reconnect_server(struct work_struct *work)
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->need_reconnect || tcon->need_reopen_files) {
+ tcon->tc_count++;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_get_reconnect_server);
+ list_add_tail(&tcon->rlist, &tmp_list);
+- tcon_selected = tcon_exist = true;
++ tcon_selected = true;
+ }
+ }
+ /*
+@@ -3836,7 +4159,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ */
+ if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
+ list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
+- tcon_selected = tcon_exist = true;
++ tcon_selected = true;
+ cifs_smb_ses_inc_refcount(ses);
+ }
+ /*
+@@ -3852,17 +4175,10 @@ void smb2_reconnect_server(struct work_struct *work)
+ }
+ spin_unlock(&ses->chan_lock);
+ }
+- /*
+- * Get the reference to server struct to be sure that the last call of
+- * cifs_put_tcon() in the loop below won't release the server pointer.
+- */
+- if (tcon_exist || ses_exist)
+- server->srv_count++;
+-
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
+- rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
++ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
+ if (!rc)
+ cifs_reopen_persistent_handles(tcon);
+ else
+@@ -3871,14 +4187,14 @@ void smb2_reconnect_server(struct work_struct *work)
+ if (tcon->ipc)
+ cifs_put_smb_ses(tcon->ses);
+ else
+- cifs_put_tcon(tcon);
++ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server);
+ }
+
+ if (!ses_exist)
+ goto done;
+
+ /* allocate a dummy tcon struct used for reconnect */
+- tcon = tcon_info_alloc(false);
++ tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server);
+ if (!tcon) {
+ resched = true;
+ list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+@@ -3895,13 +4211,13 @@ void smb2_reconnect_server(struct work_struct *work)
+ /* now reconnect sessions for necessary channels */
+ list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+ tcon->ses = ses;
+- rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
++ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
+ if (rc)
+ resched = true;
+ list_del_init(&ses->rlist);
+ cifs_put_smb_ses(ses);
+ }
+- tconInfoFree(tcon);
++ tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server);
+
+ done:
+ cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
+@@ -3910,8 +4226,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ mutex_unlock(&pserver->reconnect_mutex);
+
+ /* now we can safely release srv struct */
+- if (tcon_exist || ses_exist)
+- cifs_put_tcp_session(server, 1);
++ cifs_put_tcp_session(server, true);
+ }
+
+ int
+@@ -3994,10 +4309,16 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ struct smb_rqst rqst;
+ struct kvec iov[1];
+ struct kvec rsp_iov = {NULL, 0};
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ int resp_buftype = CIFS_NO_BUFFER;
+ int flags = 0;
+ int rc = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ server = cifs_pick_channel(ses);
+
+ cifs_dbg(FYI, "flush\n");
+ if (!ses || !(ses->server))
+@@ -4017,6 +4338,10 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ goto flush_exit;
+
+ trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
++
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+
+@@ -4031,6 +4356,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ flush_exit:
+ SMB2_flush_free(&rqst);
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -4107,7 +4437,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
+ * If we want to do a RDMA write, fill in and append
+ * smbd_buffer_descriptor_v1 to the end of read request
+ */
+- if (smb3_use_rdma_offload(io_parms)) {
++ if (rdata && smb3_use_rdma_offload(io_parms)) {
+ struct smbd_buffer_descriptor_v1 *v1;
+ bool need_invalidate = server->dialect == SMB30_PROT_ID;
+
+@@ -4510,7 +4840,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ struct cifs_io_parms *io_parms = NULL;
+ int credit_request;
+
+- if (!wdata->server)
++ if (!wdata->server || wdata->replay)
+ server = wdata->server = cifs_pick_channel(tcon->ses);
+
+ /*
+@@ -4595,6 +4925,8 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ rqst.rq_nvec = 1;
+ rqst.rq_iter = wdata->iter;
+ rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter);
++ if (wdata->replay)
++ smb2_set_replay(server, &rqst);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr)
+ iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+@@ -4668,18 +5000,21 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
+ int flags = 0;
+ unsigned int total_len;
+ struct TCP_Server_Info *server;
++ int retries = 0, cur_sleep = 1;
+
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
+ *nbytes = 0;
+-
+- if (n_vec < 1)
+- return rc;
+-
+ if (!io_parms->server)
+ io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
+ server = io_parms->server;
+ if (server == NULL)
+ return -ECONNABORTED;
+
++ if (n_vec < 1)
++ return rc;
++
+ rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
+ (void **) &req, &total_len);
+ if (rc)
+@@ -4713,6 +5048,9 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_vec + 1;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
+ &rqst,
+ &resp_buftype, flags, &rsp_iov);
+@@ -4737,6 +5075,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
+
+ cifs_small_buf_release(req);
+ free_rsp_buf(resp_buftype, rsp);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(io_parms->tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -4919,6 +5262,9 @@ int SMB2_query_directory_init(const unsigned int xid,
+ case SMB_FIND_FILE_POSIX_INFO:
+ req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
+ break;
++ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
++ req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
++ break;
+ default:
+ cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+ info_level);
+@@ -4988,6 +5334,9 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
+ /* note that posix payload are variable size */
+ info_buf_size = sizeof(struct smb2_posix_info);
+ break;
++ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
++ info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
++ break;
+ default:
+ cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+ srch_inf->info_level);
+@@ -5048,8 +5397,14 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ struct kvec rsp_iov;
+ int rc = 0;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ int flags = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ server = cifs_pick_channel(ses);
+
+ if (!ses || !(ses->server))
+ return -EIO;
+@@ -5069,6 +5424,9 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ if (rc)
+ goto qdir_exit;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+ rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
+@@ -5103,6 +5461,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ qdir_exit:
+ SMB2_query_directory_free(&rqst);
+ free_rsp_buf(resp_buftype, rsp);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -5169,8 +5532,14 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ int flags = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ server = cifs_pick_channel(ses);
+
+ if (!ses || !server)
+ return -EIO;
+@@ -5198,6 +5567,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
+ return rc;
+ }
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
+
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
+@@ -5213,23 +5584,28 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
+
+ free_rsp_buf(resp_buftype, rsp);
+ kfree(iov);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+ int
+ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+- u64 volatile_fid, u32 pid, __le64 *eof)
++ u64 volatile_fid, u32 pid, loff_t new_eof)
+ {
+ struct smb2_file_eof_info info;
+ void *data;
+ unsigned int size;
+
+- info.EndOfFile = *eof;
++ info.EndOfFile = cpu_to_le64(new_eof);
+
+ data = &info;
+ size = sizeof(struct smb2_file_eof_info);
+
+- trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, le64_to_cpu(*eof));
++ trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof);
+
+ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+ pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
+@@ -5265,12 +5641,18 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
+ int rc;
+ struct smb2_oplock_break *req = NULL;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ int flags = CIFS_OBREAK_OP;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = CIFS_OBREAK_OP;
++ server = cifs_pick_channel(ses);
+
+ cifs_dbg(FYI, "SMB2_oplock_break\n");
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
+@@ -5295,15 +5677,21 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
+ cifs_small_buf_release(req);
+-
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
+ cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
+ }
+
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -5373,6 +5761,11 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
+ return 0;
+ }
+
++static inline void free_qfs_info_req(struct kvec *iov)
++{
++ cifs_buf_release(iov->iov_base);
++}
++
+ int
+ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
+@@ -5384,9 +5777,15 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ FILE_SYSTEM_POSIX_INFO *info = NULL;
+ int flags = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ server = cifs_pick_channel(ses);
+
+ rc = build_qfs_info_req(&iov, tcon, server,
+ FS_POSIX_INFORMATION,
+@@ -5402,9 +5801,12 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+- cifs_small_buf_release(iov.iov_base);
++ free_qfs_info_req(&iov);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto posix_qfsinf_exit;
+@@ -5421,6 +5823,11 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+
+ posix_qfsinf_exit:
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -5435,9 +5842,15 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ struct smb2_fs_full_size_info *info = NULL;
+ int flags = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ server = cifs_pick_channel(ses);
+
+ rc = build_qfs_info_req(&iov, tcon, server,
+ FS_FULL_SIZE_INFORMATION,
+@@ -5453,9 +5866,12 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+- cifs_small_buf_release(iov.iov_base);
++ free_qfs_info_req(&iov);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto qfsinf_exit;
+@@ -5472,6 +5888,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+
+ qfsinf_exit:
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -5486,9 +5907,15 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+ int rc = 0;
+ int resp_buftype, max_len, min_len;
+ struct cifs_ses *ses = tcon->ses;
+- struct TCP_Server_Info *server = cifs_pick_channel(ses);
++ struct TCP_Server_Info *server;
+ unsigned int rsp_len, offset;
+ int flags = 0;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = 0;
++ server = cifs_pick_channel(ses);
+
+ if (level == FS_DEVICE_INFORMATION) {
+ max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+@@ -5520,9 +5947,12 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
+- cifs_small_buf_release(iov.iov_base);
++ free_qfs_info_req(&iov);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto qfsattr_exit;
+@@ -5557,6 +5987,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+
+ qfsattr_exit:
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
++
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+@@ -5574,7 +6009,13 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+ unsigned int count;
+ int flags = CIFS_NO_RSP_BUF;
+ unsigned int total_len;
+- struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
++ struct TCP_Server_Info *server;
++ int retries = 0, cur_sleep = 1;
++
++replay_again:
++ /* reinitialize for possible replay */
++ flags = CIFS_NO_RSP_BUF;
++ server = cifs_pick_channel(tcon->ses);
+
+ cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
+
+@@ -5605,6 +6046,9 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 2;
+
++ if (retries)
++ smb2_set_replay(server, &rqst);
++
+ rc = cifs_send_recv(xid, tcon->ses, server,
+ &rqst, &resp_buf_type, flags,
+ &rsp_iov);
+@@ -5616,6 +6060,10 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+ tcon->ses->Suid, rc);
+ }
+
++ if (is_replayable_error(rc) &&
++ smb2_should_replay(tcon, &retries, &cur_sleep))
++ goto replay_again;
++
+ return rc;
+ }
+
+diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
+index 220994d0a0f7f6..5c458ab3b05a44 100644
+--- a/fs/smb/client/smb2pdu.h
++++ b/fs/smb/client/smb2pdu.h
+@@ -117,9 +117,10 @@ struct share_redirect_error_context_rsp {
+ * [4] : posix context
+ * [5] : time warp context
+ * [6] : query id context
+- * [7] : compound padding
++ * [7] : create ea context
++ * [8] : compound padding
+ */
+-#define SMB2_CREATE_IOV_SIZE 8
++#define SMB2_CREATE_IOV_SIZE 9
+
+ /*
+ * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
+@@ -144,7 +145,7 @@ struct durable_context_v2 {
+ } __packed;
+
+ struct create_durable_v2 {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ struct durable_context_v2 dcontext;
+ } __packed;
+@@ -166,7 +167,7 @@ struct durable_reconnect_context_v2_rsp {
+ } __packed;
+
+ struct create_durable_handle_reconnect_v2 {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ struct durable_reconnect_context_v2 dcontext;
+ __u8 Pad[4];
+@@ -174,7 +175,7 @@ struct create_durable_handle_reconnect_v2 {
+
+ /* See MS-SMB2 2.2.13.2.5 */
+ struct crt_twarp_ctxt {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ __le64 Timestamp;
+
+@@ -182,12 +183,12 @@ struct crt_twarp_ctxt {
+
+ /* See MS-SMB2 2.2.13.2.9 */
+ struct crt_query_id_ctxt {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ } __packed;
+
+ struct crt_sd_ctxt {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ struct smb3_sd sd;
+ } __packed;
+@@ -319,13 +320,15 @@ struct smb2_file_reparse_point_info {
+ } __packed;
+
+ struct smb2_file_network_open_info {
+- __le64 CreationTime;
+- __le64 LastAccessTime;
+- __le64 LastWriteTime;
+- __le64 ChangeTime;
+- __le64 AllocationSize;
+- __le64 EndOfFile;
+- __le32 Attributes;
++ struct_group_attr(network_open_info, __packed,
++ __le64 CreationTime;
++ __le64 LastAccessTime;
++ __le64 LastWriteTime;
++ __le64 ChangeTime;
++ __le64 AllocationSize;
++ __le64 EndOfFile;
++ __le32 Attributes;
++ );
+ __le32 Reserved;
+ } __packed; /* level 34 Query also similar returned in close rsp and open rsp */
+
+@@ -411,4 +414,35 @@ struct smb2_posix_info_parsed {
+ const u8 *name;
+ };
+
++struct smb2_create_ea_ctx {
++ struct create_context_hdr ctx;
++ __u8 name[8];
++ struct smb2_file_full_ea_info ea;
++} __packed;
++
++#define SMB2_WSL_XATTR_UID "$LXUID"
++#define SMB2_WSL_XATTR_GID "$LXGID"
++#define SMB2_WSL_XATTR_MODE "$LXMOD"
++#define SMB2_WSL_XATTR_DEV "$LXDEV"
++#define SMB2_WSL_XATTR_NAME_LEN 6
++#define SMB2_WSL_NUM_XATTRS 4
++
++#define SMB2_WSL_XATTR_UID_SIZE 4
++#define SMB2_WSL_XATTR_GID_SIZE 4
++#define SMB2_WSL_XATTR_MODE_SIZE 4
++#define SMB2_WSL_XATTR_DEV_SIZE 8
++
++#define SMB2_WSL_MIN_QUERY_EA_RESP_SIZE \
++ (ALIGN((SMB2_WSL_NUM_XATTRS - 1) * \
++ (SMB2_WSL_XATTR_NAME_LEN + 1 + \
++ sizeof(struct smb2_file_full_ea_info)), 4) + \
++ SMB2_WSL_XATTR_NAME_LEN + 1 + sizeof(struct smb2_file_full_ea_info))
++
++#define SMB2_WSL_MAX_QUERY_EA_RESP_SIZE \
++ (ALIGN(SMB2_WSL_MIN_QUERY_EA_RESP_SIZE + \
++ SMB2_WSL_XATTR_UID_SIZE + \
++ SMB2_WSL_XATTR_GID_SIZE + \
++ SMB2_WSL_XATTR_MODE_SIZE + \
++ SMB2_WSL_XATTR_DEV_SIZE, 4))
++
+ #endif /* _SMB2PDU_H */
+diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
+index 46eff9ec302aad..732169d8a67a32 100644
+--- a/fs/smb/client/smb2proto.h
++++ b/fs/smb/client/smb2proto.h
+@@ -56,6 +56,19 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
+ extern int smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *path,
+ __u32 *reparse_tag);
++struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
++ struct super_block *sb,
++ const unsigned int xid,
++ struct cifs_tcon *tcon,
++ const char *full_path,
++ struct kvec *reparse_iov,
++ struct kvec *xattr_iov);
++int smb2_query_reparse_point(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct cifs_sb_info *cifs_sb,
++ const char *full_path,
++ u32 *tag, struct kvec *rsp,
++ int *rsp_buftype);
+ int smb2_query_path_info(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+@@ -63,7 +76,8 @@ int smb2_query_path_info(const unsigned int xid,
+ struct cifs_open_info_data *data);
+ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *full_path, __u64 size,
+- struct cifs_sb_info *cifs_sb, bool set_alloc);
++ struct cifs_sb_info *cifs_sb, bool set_alloc,
++ struct dentry *dentry);
+ extern int smb2_set_file_info(struct inode *inode, const char *full_path,
+ FILE_BASIC_INFO *buf, const unsigned int xid);
+ extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+@@ -79,13 +93,18 @@ extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
+ extern int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *name, struct cifs_sb_info *cifs_sb);
+ extern int smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *name, struct cifs_sb_info *cifs_sb);
+-extern int smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb);
+-extern int smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+- const char *from_name, const char *to_name,
+- struct cifs_sb_info *cifs_sb);
++ const char *name, struct cifs_sb_info *cifs_sb,
++ struct dentry *dentry);
++int smb2_rename_path(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb);
++int smb2_create_hardlink(const unsigned int xid,
++ struct cifs_tcon *tcon,
++ struct dentry *source_dentry,
++ const char *from_name, const char *to_name,
++ struct cifs_sb_info *cifs_sb);
+ extern int smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const unsigned char *path,
+ char *pbuf, unsigned int *pbytes_written);
+@@ -106,6 +125,11 @@ extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
+ extern void smb2_set_next_command(struct cifs_tcon *tcon,
+ struct smb_rqst *rqst);
+ extern void smb2_set_related(struct smb_rqst *rqst);
++extern void smb2_set_replay(struct TCP_Server_Info *server,
++ struct smb_rqst *rqst);
++extern bool smb2_should_replay(struct cifs_tcon *tcon,
++ int *pretries,
++ int *pcur_sleep);
+
+ /*
+ * SMB2 Worker functions - most of protocol specific implementation details
+@@ -205,7 +229,7 @@ extern int SMB2_query_directory_init(unsigned int xid, struct cifs_tcon *tcon,
+ extern void SMB2_query_directory_free(struct smb_rqst *rqst);
+ extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, u32 pid,
+- __le64 *eof);
++ loff_t new_eof);
+ extern int SMB2_set_info_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
+@@ -251,11 +275,13 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
+
+ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
+ enum securityEnum);
+-extern void smb2_parse_contexts(struct TCP_Server_Info *server,
+- struct smb2_create_rsp *rsp,
+- unsigned int *epoch, char *lease_key,
+- __u8 *oplock, struct smb2_file_all_info *buf,
+- struct create_posix_rsp *posix);
++int smb2_parse_contexts(struct TCP_Server_Info *server,
++ struct kvec *rsp_iov,
++ unsigned int *epoch,
++ char *lease_key, __u8 *oplock,
++ struct smb2_file_all_info *buf,
++ struct create_posix_rsp *posix);
++
+ extern int smb3_encryption_required(const struct cifs_tcon *tcon);
+ extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
+ struct kvec *iov, unsigned int min_buf_size);
+@@ -281,10 +307,15 @@ int smb311_posix_query_path_info(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ const char *full_path,
+- struct cifs_open_info_data *data,
+- struct cifs_sid *owner,
+- struct cifs_sid *group);
++ struct cifs_open_info_data *data);
+ int posix_info_parse(const void *beg, const void *end,
+ struct smb2_posix_info_parsed *out);
+ int posix_info_sid_size(const void *beg, const void *end);
++int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, const char *symname);
++int smb2_make_nfs_node(unsigned int xid, struct inode *inode,
++ struct dentry *dentry, struct cifs_tcon *tcon,
++ const char *full_path, umode_t mode, dev_t dev);
++
+ #endif /* _SMB2PROTO_H */
+diff --git a/fs/smb/client/smb2status.h b/fs/smb/client/smb2status.h
+index a9e958166fc53a..9c6d79b0bd4978 100644
+--- a/fs/smb/client/smb2status.h
++++ b/fs/smb/client/smb2status.h
+@@ -982,6 +982,8 @@ struct ntstatus {
+ #define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
+ #define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
+ #define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
++#define STATUS_SERVER_UNAVAILABLE cpu_to_le32(0xC0000466)
++#define STATUS_FILE_NOT_AVAILABLE cpu_to_le32(0xC0000467)
+ #define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
+ #define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
+ #define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 23c50ed7d4b590..4ca04e62a993cf 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -189,6 +189,8 @@ smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
+ if (tcon->tid != tid)
+ continue;
+ ++tcon->tc_count;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_get_find_sess_tcon);
+ return tcon;
+ }
+
+@@ -214,8 +216,8 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
+ }
+ tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+ if (!tcon) {
+- cifs_put_smb_ses(ses);
+ spin_unlock(&cifs_tcp_ses_lock);
++ cifs_put_smb_ses(ses);
+ return NULL;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+@@ -413,7 +415,13 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ ses->ses_status == SES_GOOD);
+
+ chan_index = cifs_ses_get_chan_index(ses, server);
+- /* TODO: introduce ref counting for channels when the can be freed */
++ if (chan_index == CIFS_INVAL_CHAN_INDEX) {
++ spin_unlock(&ses->chan_lock);
++ spin_unlock(&ses->ses_lock);
++
++ return -EINVAL;
++ }
++
+ spin_unlock(&ses->chan_lock);
+ spin_unlock(&ses->ses_lock);
+
+@@ -452,6 +460,8 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ ptriplet->encryption.context,
+ ses->smb3encryptionkey,
+ SMB3_ENC_DEC_KEY_SIZE);
++ if (rc)
++ return rc;
+ rc = generate_key(ses, ptriplet->decryption.label,
+ ptriplet->decryption.context,
+ ses->smb3decryptionkey,
+@@ -460,9 +470,6 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ return rc;
+ }
+
+- if (rc)
+- return rc;
+-
+ #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+ cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
+ /*
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 94df9eec3d8d1c..d74e829de51c22 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -2136,7 +2136,7 @@ static int allocate_mr_list(struct smbd_connection *info)
+ for (i = 0; i < info->responder_resources * 2; i++) {
+ smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+ if (!smbdirect_mr)
+- goto out;
++ goto cleanup_entries;
+ smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+@@ -2162,7 +2162,7 @@ static int allocate_mr_list(struct smbd_connection *info)
+
+ out:
+ kfree(smbdirect_mr);
+-
++cleanup_entries:
+ list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
+ list_del(&smbdirect_mr->list);
+ ib_dereg_mr(smbdirect_mr->mr);
+diff --git a/fs/smb/client/smbencrypt.c b/fs/smb/client/smbencrypt.c
+index f0ce26414f1737..1d1ee9f18f3735 100644
+--- a/fs/smb/client/smbencrypt.c
++++ b/fs/smb/client/smbencrypt.c
+@@ -26,13 +26,6 @@
+ #include "cifsproto.h"
+ #include "../common/md4.h"
+
+-#ifndef false
+-#define false 0
+-#endif
+-#ifndef true
+-#define true 1
+-#endif
+-
+ /* following came from the other byteorder.h to avoid include conflicts */
+ #define CVAL(buf,pos) (((unsigned char *)(buf))[pos])
+ #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
+diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
+index de199ec9f7263d..604e52876cd2d9 100644
+--- a/fs/smb/client/trace.h
++++ b/fs/smb/client/trace.h
+@@ -3,6 +3,9 @@
+ * Copyright (C) 2018, Microsoft Corporation.
+ *
+ * Author(s): Steve French <stfrench@microsoft.com>
++ *
++ * Please use this 3-part article as a reference for writing new tracepoints:
++ * https://lwn.net/Articles/379903/
+ */
+ #undef TRACE_SYSTEM
+ #define TRACE_SYSTEM cifs
+@@ -15,9 +18,70 @@
+ #include <linux/inet.h>
+
+ /*
+- * Please use this 3-part article as a reference for writing new tracepoints:
+- * https://lwn.net/Articles/379903/
++ * Specify enums for tracing information.
++ */
++#define smb3_tcon_ref_traces \
++ EM(netfs_trace_tcon_ref_dec_dfs_refer, "DEC DfsRef") \
++ EM(netfs_trace_tcon_ref_free, "FRE ") \
++ EM(netfs_trace_tcon_ref_free_fail, "FRE Fail ") \
++ EM(netfs_trace_tcon_ref_free_ipc, "FRE Ipc ") \
++ EM(netfs_trace_tcon_ref_free_ipc_fail, "FRE Ipc-F ") \
++ EM(netfs_trace_tcon_ref_free_reconnect_server, "FRE Reconn") \
++ EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \
++ EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \
++ EM(netfs_trace_tcon_ref_get_find, "GET Find ") \
++ EM(netfs_trace_tcon_ref_get_find_sess_tcon, "GET FndSes") \
++ EM(netfs_trace_tcon_ref_get_reconnect_server, "GET Reconn") \
++ EM(netfs_trace_tcon_ref_new, "NEW ") \
++ EM(netfs_trace_tcon_ref_new_ipc, "NEW Ipc ") \
++ EM(netfs_trace_tcon_ref_new_reconnect_server, "NEW Reconn") \
++ EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \
++ EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
++ EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \
++ EM(netfs_trace_tcon_ref_put_mnt_ctx, "PUT MntCtx") \
++ EM(netfs_trace_tcon_ref_put_reconnect_server, "PUT Reconn") \
++ EM(netfs_trace_tcon_ref_put_tlink, "PUT Tlink ") \
++ EM(netfs_trace_tcon_ref_see_cancelled_close, "SEE Cn-Cls") \
++ EM(netfs_trace_tcon_ref_see_fscache_collision, "SEE FV-CO!") \
++ EM(netfs_trace_tcon_ref_see_fscache_okay, "SEE FV-Ok ") \
++ EM(netfs_trace_tcon_ref_see_fscache_relinq, "SEE FV-Rlq") \
++ E_(netfs_trace_tcon_ref_see_umount, "SEE Umount")
++
++#undef EM
++#undef E_
++
++/*
++ * Define those tracing enums.
++ */
++#ifndef __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY
++#define __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY
++
++#define EM(a, b) a,
++#define E_(a, b) a
++
++enum smb3_tcon_ref_trace { smb3_tcon_ref_traces } __mode(byte);
++
++#undef EM
++#undef E_
++#endif
++
++/*
++ * Export enum symbols via userspace.
++ */
++#define EM(a, b) TRACE_DEFINE_ENUM(a);
++#define E_(a, b) TRACE_DEFINE_ENUM(a);
++
++smb3_tcon_ref_traces;
++
++#undef EM
++#undef E_
++
++/*
++ * Now redefine the EM() and E_() macros to map the enums to the strings that
++ * will be printed in the output.
+ */
++#define EM(a, b) { a, b },
++#define E_(a, b) { a, b }
+
+ /* For logging errors in read or write */
+ DECLARE_EVENT_CLASS(smb3_rw_err_class,
+@@ -370,10 +434,12 @@ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rename_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rmdir_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_reparse_compound_enter);
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(get_reparse_compound_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
+ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);
+-
++DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mknod_enter);
+
+ DECLARE_EVENT_CLASS(smb3_inf_compound_done_class,
+ TP_PROTO(unsigned int xid,
+@@ -408,10 +474,13 @@ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rename_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rmdir_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_eof_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_info_compound_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_reparse_compound_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(get_reparse_compound_done);
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_wsl_ea_compound_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
+ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(tdis_done);
+-
++DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mknod_done);
+
+ DECLARE_EVENT_CLASS(smb3_inf_compound_err_class,
+ TP_PROTO(unsigned int xid,
+@@ -451,9 +520,13 @@ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rename_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rmdir_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_eof_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_info_compound_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_reparse_compound_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(get_reparse_compound_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_wsl_ea_compound_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
+ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(tdis_err);
++DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mknod_err);
+
+ /*
+ * For logging SMB3 Status code and Command for responses which return errors
+@@ -1025,6 +1098,38 @@ DEFINE_EVENT(smb3_ses_class, smb3_##name, \
+
+ DEFINE_SMB3_SES_EVENT(ses_not_found);
+
++DECLARE_EVENT_CLASS(smb3_ioctl_class,
++ TP_PROTO(unsigned int xid,
++ __u64 fid,
++ unsigned int command),
++ TP_ARGS(xid, fid, command),
++ TP_STRUCT__entry(
++ __field(unsigned int, xid)
++ __field(__u64, fid)
++ __field(unsigned int, command)
++ ),
++ TP_fast_assign(
++ __entry->xid = xid;
++ __entry->fid = fid;
++ __entry->command = command;
++ ),
++ TP_printk("xid=%u fid=0x%llx ioctl cmd=0x%x",
++ __entry->xid, __entry->fid, __entry->command)
++)
++
++#define DEFINE_SMB3_IOCTL_EVENT(name) \
++DEFINE_EVENT(smb3_ioctl_class, smb3_##name, \
++ TP_PROTO(unsigned int xid, \
++ __u64 fid, \
++ unsigned int command), \
++ TP_ARGS(xid, fid, command))
++
++DEFINE_SMB3_IOCTL_EVENT(ioctl);
++
++
++
++
++
+ DECLARE_EVENT_CLASS(smb3_credit_class,
+ TP_PROTO(__u64 currmid,
+ __u64 conn_id,
+@@ -1084,6 +1189,30 @@ DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
+ DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
+ DEFINE_SMB3_CREDIT_EVENT(set_credits);
+
++
++TRACE_EVENT(smb3_tcon_ref,
++ TP_PROTO(unsigned int tcon_debug_id, int ref,
++ enum smb3_tcon_ref_trace trace),
++ TP_ARGS(tcon_debug_id, ref, trace),
++ TP_STRUCT__entry(
++ __field(unsigned int, tcon)
++ __field(int, ref)
++ __field(enum smb3_tcon_ref_trace, trace)
++ ),
++ TP_fast_assign(
++ __entry->tcon = tcon_debug_id;
++ __entry->ref = ref;
++ __entry->trace = trace;
++ ),
++ TP_printk("TC=%08x %s r=%u",
++ __entry->tcon,
++ __print_symbolic(__entry->trace, smb3_tcon_ref_traces),
++ __entry->ref)
++ );
++
++
++#undef EM
++#undef E_
+ #endif /* _CIFS_TRACE_H */
+
+ #undef TRACE_INCLUDE_PATH
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 14710afdc2a36c..ddf1a3aafee5c6 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -76,7 +76,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
+ return temp;
+ }
+
+-static void __release_mid(struct kref *refcount)
++void __release_mid(struct kref *refcount)
+ {
+ struct mid_q_entry *midEntry =
+ container_of(refcount, struct mid_q_entry, refcount);
+@@ -156,15 +156,6 @@ static void __release_mid(struct kref *refcount)
+ mempool_free(midEntry, cifs_mid_poolp);
+ }
+
+-void release_mid(struct mid_q_entry *mid)
+-{
+- struct TCP_Server_Info *server = mid->server;
+-
+- spin_lock(&server->mid_lock);
+- kref_put(&mid->refcount, __release_mid);
+- spin_unlock(&server->mid_lock);
+-}
+-
+ void
+ delete_mid(struct mid_q_entry *mid)
+ {
+@@ -409,10 +400,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ server->conn_id, server->hostname);
+ }
+ smbd_done:
+- if (rc < 0 && rc != -EINTR)
++ /*
++ * there's hardly any use for the layers above to know the
++ * actual error code here. All they should do at this point is
++ * to retry the connection and hope it goes away.
++ */
++ if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
+ cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
+ rc);
+- else if (rc > 0)
++ rc = -ECONNABORTED;
++ cifs_signal_cifsd_for_reconnect(server, false);
++ } else if (rc > 0)
+ rc = 0;
+ out:
+ cifs_in_send_dec(server);
+@@ -437,8 +435,8 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ if (!(flags & CIFS_TRANSFORM_REQ))
+ return __smb_send_rqst(server, num_rqst, rqst);
+
+- if (num_rqst > MAX_COMPOUND - 1)
+- return -ENOMEM;
++ if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
++ return -EIO;
+
+ if (!server->ops->init_transform_rq) {
+ cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
+@@ -911,12 +909,15 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+ list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
+ }
++ spin_unlock(&server->mid_lock);
+ cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
+ __func__, mid->mid, mid->mid_state);
+ rc = -EIO;
++ goto sync_mid_done;
+ }
+ spin_unlock(&server->mid_lock);
+
++sync_mid_done:
+ release_mid(mid);
+ return rc;
+ }
+@@ -1032,7 +1033,10 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+ spin_lock(&ses->chan_lock);
+ for (i = 0; i < ses->chan_count; i++) {
+ server = ses->chans[i].server;
+- if (!server)
++ if (!server || server->terminate)
++ continue;
++
++ if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
+ continue;
+
+ /*
+@@ -1056,9 +1060,11 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+ index = (uint)atomic_inc_return(&ses->chan_seq);
+ index %= ses->chan_count;
+ }
++
++ server = ses->chans[index].server;
+ spin_unlock(&ses->chan_lock);
+
+- return ses->chans[index].server;
++ return server;
+ }
+
+ int
+diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
+index 4ad5531686d81a..c2bf829310bee2 100644
+--- a/fs/smb/client/xattr.c
++++ b/fs/smb/client/xattr.c
+@@ -150,10 +150,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ goto out;
+
+- if (pTcon->ses->server->ops->set_EA)
++ if (pTcon->ses->server->ops->set_EA) {
+ rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+ full_path, name, value, (__u16)size,
+ cifs_sb->local_nls, cifs_sb);
++ if (rc == 0)
++ inode_set_ctime_current(inode);
++ }
+ break;
+
+ case XATTR_CIFS_ACL:
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index 319fb9ffc6a032..c3ee42188d252e 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -34,6 +34,7 @@
+ #define SMB2_QUERY_INFO_HE 0x0010
+ #define SMB2_SET_INFO_HE 0x0011
+ #define SMB2_OPLOCK_BREAK_HE 0x0012
++#define SMB2_SERVER_TO_CLIENT_NOTIFICATION 0x0013
+
+ /* The same list in little endian */
+ #define SMB2_NEGOTIATE cpu_to_le16(SMB2_NEGOTIATE_HE)
+@@ -207,38 +208,45 @@ struct smb2_transform_hdr {
+ __le64 SessionId;
+ } __packed;
+
++/*
++ * These are simplified versions from the spec, as we don't need a fully fledged
++ * form of both unchained and chained structs.
++ *
++ * Moreover, even in chained compressed payloads, the initial compression header
++ * has the form of the unchained one -- i.e. it never has the
++ * OriginalPayloadSize field and ::Offset field always represent an offset
++ * (instead of a length, as it is in the chained header).
++ *
++ * See MS-SMB2 2.2.42 for more details.
++ */
++#define SMB2_COMPRESSION_FLAG_NONE 0x0000
++#define SMB2_COMPRESSION_FLAG_CHAINED 0x0001
+
+-/* See MS-SMB2 2.2.42 */
+-struct smb2_compression_transform_hdr_unchained {
+- __le32 ProtocolId; /* 0xFC 'S' 'M' 'B' */
++struct smb2_compression_hdr {
++ __le32 ProtocolId; /* 0xFC 'S' 'M' 'B' */
+ __le32 OriginalCompressedSegmentSize;
+ __le16 CompressionAlgorithm;
+ __le16 Flags;
+- __le16 Length; /* if chained it is length, else offset */
++ __le32 Offset; /* this is the size of the uncompressed SMB2 header below */
++ /* uncompressed SMB2 header (READ or WRITE) goes here */
++ /* compressed data goes here */
+ } __packed;
+
+-/* See MS-SMB2 2.2.42.1 */
+-#define SMB2_COMPRESSION_FLAG_NONE 0x0000
+-#define SMB2_COMPRESSION_FLAG_CHAINED 0x0001
+-
+-struct compression_payload_header {
++/*
++ * ... OTOH, set compression payload header to always have OriginalPayloadSize
++ * as it's easier to pass the struct size minus sizeof(OriginalPayloadSize)
++ * than to juggle around the header/data memory.
++ */
++struct smb2_compression_payload_hdr {
+ __le16 CompressionAlgorithm;
+ __le16 Flags;
+ __le32 Length; /* length of compressed playload including field below if present */
+- /* __le32 OriginalPayloadSize; */ /* optional, present when LZNT1, LZ77, LZ77+Huffman */
++ __le32 OriginalPayloadSize; /* accounted when LZNT1, LZ77, LZ77+Huffman */
+ } __packed;
+
+-/* See MS-SMB2 2.2.42.2 */
+-struct smb2_compression_transform_hdr_chained {
+- __le32 ProtocolId; /* 0xFC 'S' 'M' 'B' */
+- __le32 OriginalCompressedSegmentSize;
+- /* struct compression_payload_header[] */
+-} __packed;
+-
+-/* See MS-SMB2 2.2.42.2.2 */
+-struct compression_pattern_payload_v1 {
+- __le16 Pattern;
+- __le16 Reserved1;
++struct smb2_compression_pattern_v1 {
++ __u8 Pattern;
++ __u8 Reserved1;
+ __le16 Reserved2;
+ __le32 Repetitions;
+ } __packed;
+@@ -272,15 +280,16 @@ struct smb3_blob_data {
+ #define SE_GROUP_RESOURCE 0x20000000
+ #define SE_GROUP_LOGON_ID 0xC0000000
+
+-/* struct sid_attr_data is SidData array in BlobData format then le32 Attr */
+-
+ struct sid_array_data {
+ __le16 SidAttrCount;
+ /* SidAttrList - array of sid_attr_data structs */
+ } __packed;
+
+-struct luid_attr_data {
+-
++/* struct sid_attr_data is SidData array in BlobData format then le32 Attr */
++struct sid_attr_data {
++ __le16 BlobSize;
++ __u8 BlobData[];
++ /* __le32 Attr */
+ } __packed;
+
+ /*
+@@ -411,6 +420,7 @@ struct smb2_tree_disconnect_rsp {
+ #define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
+ #define SMB2_GLOBAL_CAP_DIRECTORY_LEASING 0x00000020 /* New to SMB3 */
+ #define SMB2_GLOBAL_CAP_ENCRYPTION 0x00000040 /* New to SMB3 */
++#define SMB2_GLOBAL_CAP_NOTIFICATIONS 0x00000080 /* New to SMB3.1.1 */
+ /* Internal types */
+ #define SMB2_NT_FIND 0x00100000
+ #define SMB2_LARGE_FILES 0x00200000
+@@ -493,6 +503,7 @@ struct smb2_encryption_neg_context {
+ #define SMB3_COMPRESS_LZ77_HUFF cpu_to_le16(0x0003)
+ /* Pattern scanning algorithm See MS-SMB2 3.1.4.4.1 */
+ #define SMB3_COMPRESS_PATTERN cpu_to_le16(0x0004) /* Pattern_V1 */
++#define SMB3_COMPRESS_LZ4 cpu_to_le16(0x0005)
+
+ /* Compression Flags */
+ #define SMB2_COMPRESSION_CAPABILITIES_FLAG_NONE cpu_to_le32(0x00000000)
+@@ -700,13 +711,16 @@ struct smb2_close_rsp {
+ __le16 StructureSize; /* 60 */
+ __le16 Flags;
+ __le32 Reserved;
+- __le64 CreationTime;
+- __le64 LastAccessTime;
+- __le64 LastWriteTime;
+- __le64 ChangeTime;
+- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
+- __le64 EndOfFile;
+- __le32 Attributes;
++ struct_group_attr(network_open_info, __packed,
++ __le64 CreationTime;
++ __le64 LastAccessTime;
++ __le64 LastWriteTime;
++ __le64 ChangeTime;
++ /* Beginning of FILE_STANDARD_INFO equivalent */
++ __le64 AllocationSize;
++ __le64 EndOfFile;
++ __le32 Attributes;
++ );
+ } __packed;
+
+
+@@ -903,6 +917,40 @@ struct smb2_query_directory_rsp {
+ __u8 Buffer[];
+ } __packed;
+
++/* DeviceType Flags */
++#define FILE_DEVICE_CD_ROM 0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
++#define FILE_DEVICE_DFS 0x00000006
++#define FILE_DEVICE_DISK 0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
++#define FILE_DEVICE_FILE_SYSTEM 0x00000009
++#define FILE_DEVICE_NAMED_PIPE 0x00000011
++#define FILE_DEVICE_NETWORK 0x00000012
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL 0x00000015
++#define FILE_DEVICE_PARALLEL_PORT 0x00000016
++#define FILE_DEVICE_PRINTER 0x00000018
++#define FILE_DEVICE_SERIAL_PORT 0x0000001b
++#define FILE_DEVICE_STREAMS 0x0000001e
++#define FILE_DEVICE_TAPE 0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
++#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
++#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
++
++/* Device Characteristics */
++#define FILE_REMOVABLE_MEDIA 0x00000001
++#define FILE_READ_ONLY_DEVICE 0x00000002
++#define FILE_FLOPPY_DISKETTE 0x00000004
++#define FILE_WRITE_ONCE_MEDIA 0x00000008
++#define FILE_REMOTE_DEVICE 0x00000010
++#define FILE_DEVICE_IS_MOUNTED 0x00000020
++#define FILE_VIRTUAL_VOLUME 0x00000040
++#define FILE_DEVICE_SECURE_OPEN 0x00000100
++#define FILE_CHARACTERISTIC_TS_DEVICE 0x00001000
++#define FILE_CHARACTERISTIC_WEBDAV_DEVICE 0x00002000
++#define FILE_PORTABLE_DEVICE 0x00004000
++#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
++
+ /*
+ * Maximum number of iovs we need for a set-info request.
+ * The largest one is rename/hardlink
+@@ -981,6 +1029,19 @@ struct smb2_change_notify_rsp {
+ __u8 Buffer[]; /* array of file notify structs */
+ } __packed;
+
++/*
++ * SMB2_SERVER_TO_CLIENT_NOTIFICATION: See MS-SMB2 section 2.2.44
++ */
++
++#define SMB2_NOTIFY_SESSION_CLOSED 0x0000
++
++struct smb2_server_client_notification {
++ struct smb2_hdr hdr;
++ __le16 StructureSize;
++ __u16 Reserved; /* MBZ */
++ __le32 NotificationType;
++ __u8 NotificationBuffer[4]; /* MBZ */
++} __packed;
+
+ /*
+ * SMB2_CREATE See MS-SMB2 section 2.2.13
+@@ -1097,16 +1158,23 @@ struct smb2_change_notify_rsp {
+ #define FILE_WRITE_THROUGH_LE cpu_to_le32(0x00000002)
+ #define FILE_SEQUENTIAL_ONLY_LE cpu_to_le32(0x00000004)
+ #define FILE_NO_INTERMEDIATE_BUFFERING_LE cpu_to_le32(0x00000008)
++/* FILE_SYNCHRONOUS_IO_ALERT_LE cpu_to_le32(0x00000010) should be zero, ignored */
++/* FILE_SYNCHRONOUS_IO_NONALERT cpu_to_le32(0x00000020) should be zero, ignored */
+ #define FILE_NON_DIRECTORY_FILE_LE cpu_to_le32(0x00000040)
+ #define FILE_COMPLETE_IF_OPLOCKED_LE cpu_to_le32(0x00000100)
+ #define FILE_NO_EA_KNOWLEDGE_LE cpu_to_le32(0x00000200)
++/* FILE_OPEN_REMOTE_INSTANCE cpu_to_le32(0x00000400) should be zero, ignored */
+ #define FILE_RANDOM_ACCESS_LE cpu_to_le32(0x00000800)
+-#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000)
++#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000) /* MBZ */
+ #define FILE_OPEN_BY_FILE_ID_LE cpu_to_le32(0x00002000)
+ #define FILE_OPEN_FOR_BACKUP_INTENT_LE cpu_to_le32(0x00004000)
+ #define FILE_NO_COMPRESSION_LE cpu_to_le32(0x00008000)
++/* FILE_OPEN_REQUIRING_OPLOCK cpu_to_le32(0x00010000) should be zero, ignored */
++/* FILE_DISALLOW_EXCLUSIVE cpu_to_le32(0x00020000) should be zero, ignored */
++/* FILE_RESERVE_OPFILTER cpu_to_le32(0x00100000) MBZ */
+ #define FILE_OPEN_REPARSE_POINT_LE cpu_to_le32(0x00200000)
+ #define FILE_OPEN_NO_RECALL_LE cpu_to_le32(0x00400000)
++/* #define FILE_OPEN_FOR_FREE_SPACE_QUERY cpu_to_le32(0x00800000) should be zero, ignored */
+ #define CREATE_OPTIONS_MASK_LE cpu_to_le32(0x00FFFFFF)
+
+ #define FILE_READ_RIGHTS_LE (FILE_READ_DATA_LE | FILE_READ_EA_LE \
+@@ -1120,7 +1188,7 @@ struct smb2_change_notify_rsp {
+ #define SMB2_CREATE_SD_BUFFER "SecD" /* security descriptor */
+ #define SMB2_CREATE_DURABLE_HANDLE_REQUEST "DHnQ"
+ #define SMB2_CREATE_DURABLE_HANDLE_RECONNECT "DHnC"
+-#define SMB2_CREATE_ALLOCATION_SIZE "AISi"
++#define SMB2_CREATE_ALLOCATION_SIZE "AlSi"
+ #define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
+ #define SMB2_CREATE_TIMEWARP_REQUEST "TWrp"
+ #define SMB2_CREATE_QUERY_ON_DISK_ID "QFid"
+@@ -1137,12 +1205,15 @@ struct smb2_change_notify_rsp {
+ #define SMB2_CREATE_FLAG_REPARSEPOINT 0x01
+
+ struct create_context {
+- __le32 Next;
+- __le16 NameOffset;
+- __le16 NameLength;
+- __le16 Reserved;
+- __le16 DataOffset;
+- __le32 DataLength;
++ /* New members must be added within the struct_group() macro below. */
++ __struct_group(create_context_hdr, hdr, __packed,
++ __le32 Next;
++ __le16 NameOffset;
++ __le16 NameLength;
++ __le16 Reserved;
++ __le16 DataOffset;
++ __le32 DataLength;
++ );
+ __u8 Buffer[];
+ } __packed;
+
+@@ -1188,7 +1259,7 @@ struct smb2_create_rsp {
+ } __packed;
+
+ struct create_posix {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[16];
+ __le32 Mode;
+ __u32 Reserved;
+@@ -1196,7 +1267,7 @@ struct create_posix {
+
+ /* See MS-SMB2 2.2.13.2.3 and MS-SMB2 2.2.13.2.4 */
+ struct create_durable {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ union {
+ __u8 Reserved[16];
+@@ -1209,14 +1280,14 @@ struct create_durable {
+
+ /* See MS-SMB2 2.2.13.2.5 */
+ struct create_mxac_req {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ __le64 Timestamp;
+ } __packed;
+
+ /* See MS-SMB2 2.2.14.2.5 */
+ struct create_mxac_rsp {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ __le32 QueryStatus;
+ __le32 MaximalAccess;
+@@ -1228,6 +1299,7 @@ struct create_mxac_rsp {
+ #define SMB2_LEASE_WRITE_CACHING_LE cpu_to_le32(0x04)
+
+ #define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE cpu_to_le32(0x02)
++#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE cpu_to_le32(0x04)
+
+ #define SMB2_LEASE_KEY_SIZE 16
+
+@@ -1251,13 +1323,13 @@ struct lease_context_v2 {
+ } __packed;
+
+ struct create_lease {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ struct lease_context lcontext;
+ } __packed;
+
+ struct create_lease_v2 {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ struct lease_context_v2 lcontext;
+ __u8 Pad[4];
+@@ -1265,7 +1337,7 @@ struct create_lease_v2 {
+
+ /* See MS-SMB2 2.2.14.2.9 */
+ struct create_disk_id_rsp {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ __le64 DiskFileId;
+ __le64 VolumeId;
+@@ -1274,7 +1346,7 @@ struct create_disk_id_rsp {
+
+ /* See MS-SMB2 2.2.13.2.13 */
+ struct create_app_inst_id {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[16];
+ __le32 StructureSize; /* Must be 20 */
+ __u16 Reserved;
+@@ -1283,7 +1355,7 @@ struct create_app_inst_id {
+
+ /* See MS-SMB2 2.2.13.2.15 */
+ struct create_app_inst_id_vers {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[16];
+ __le32 StructureSize; /* Must be 24 */
+ __u16 Reserved;
+diff --git a/fs/smb/common/smbfsctl.h b/fs/smb/common/smbfsctl.h
+index edd7fc2a7921b8..a94d658b88e86b 100644
+--- a/fs/smb/common/smbfsctl.h
++++ b/fs/smb/common/smbfsctl.h
+@@ -158,12 +158,6 @@
+ #define IO_REPARSE_TAG_LX_CHR 0x80000025
+ #define IO_REPARSE_TAG_LX_BLK 0x80000026
+
+-#define IO_REPARSE_TAG_LX_SYMLINK_LE cpu_to_le32(0xA000001D)
+-#define IO_REPARSE_TAG_AF_UNIX_LE cpu_to_le32(0x80000023)
+-#define IO_REPARSE_TAG_LX_FIFO_LE cpu_to_le32(0x80000024)
+-#define IO_REPARSE_TAG_LX_CHR_LE cpu_to_le32(0x80000025)
+-#define IO_REPARSE_TAG_LX_BLK_LE cpu_to_le32(0x80000026)
+-
+ /* fsctl flags */
+ /* If Flags is set to this value, the request is an FSCTL not ioctl request */
+ #define SMB2_0_IOCTL_IS_FSCTL 0x00000001
+diff --git a/fs/smb/server/asn1.c b/fs/smb/server/asn1.c
+index 4a4b2b03ff33df..b931a99ab9c85e 100644
+--- a/fs/smb/server/asn1.c
++++ b/fs/smb/server/asn1.c
+@@ -214,10 +214,15 @@ static int ksmbd_neg_token_alloc(void *context, size_t hdrlen,
+ {
+ struct ksmbd_conn *conn = context;
+
++ if (!vlen)
++ return -EINVAL;
++
+ conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
+ if (!conn->mechToken)
+ return -ENOMEM;
+
++ conn->mechTokenLen = (unsigned int)vlen;
++
+ return 0;
+ }
+
+diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
+index 229a6527870d0e..09b20039636e75 100644
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -208,10 +208,12 @@ static int calc_ntlmv2_hash(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+
+ /**
+ * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
+- * @sess: session of connection
++ * @conn: connection
++ * @sess: session of connection
+ * @ntlmv2: NTLMv2 challenge response
+ * @blen: NTLMv2 blob length
+ * @domain_name: domain name
++ * @cryptkey: session crypto key
+ *
+ * Return: 0 on success, error number on error
+ */
+@@ -294,7 +296,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
+ * authenticate blob
+ * @authblob: authenticate blob source pointer
+- * @usr: user details
++ * @blob_len: length of the @authblob message
++ * @conn: connection
+ * @sess: session of connection
+ *
+ * Return: 0 on success, error number on error
+@@ -376,8 +379,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
+ * ksmbd_decode_ntlmssp_neg_blob() - helper function to construct
+ * negotiate blob
+ * @negblob: negotiate blob source pointer
+- * @rsp: response header pointer to be updated
+- * @sess: session of connection
++ * @blob_len: length of the @authblob message
++ * @conn: connection
+ *
+ */
+ int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
+@@ -403,8 +406,7 @@ int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
+ * ksmbd_build_ntlmssp_challenge_blob() - helper function to construct
+ * challenge blob
+ * @chgblob: challenge blob source pointer to initialize
+- * @rsp: response header pointer to be updated
+- * @sess: session of connection
++ * @conn: connection
+ *
+ */
+ unsigned int
+diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
+index 4b38c3a285f602..cac80e7bfefc74 100644
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -39,7 +39,8 @@ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ xa_destroy(&conn->sessions);
+ kvfree(conn->request_buf);
+ kfree(conn->preauth_info);
+- kfree(conn);
++ if (atomic_dec_and_test(&conn->refcnt))
++ kfree(conn);
+ }
+
+ /**
+@@ -68,6 +69,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ conn->um = NULL;
+ atomic_set(&conn->req_running, 0);
+ atomic_set(&conn->r_count, 0);
++ atomic_set(&conn->refcnt, 1);
+ conn->total_credits = 1;
+ conn->outstanding_credits = 0;
+
+@@ -165,25 +167,41 @@ void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ up_read(&conn_list_lock);
+ }
+
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
+ {
+- struct ksmbd_conn *bind_conn;
+-
+ wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
++}
+
+- down_read(&conn_list_lock);
+- list_for_each_entry(bind_conn, &conn_list, conns_list) {
+- if (bind_conn == conn)
+- continue;
++int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id)
++{
++ struct ksmbd_conn *conn;
++ int rc, retry_count = 0, max_timeout = 120;
++ int rcount = 1;
+
+- if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
+- !ksmbd_conn_releasing(bind_conn) &&
+- atomic_read(&bind_conn->req_running)) {
+- wait_event(bind_conn->req_running_q,
+- atomic_read(&bind_conn->req_running) == 0);
++retry_idle:
++ if (retry_count >= max_timeout)
++ return -EIO;
++
++ down_read(&conn_list_lock);
++ list_for_each_entry(conn, &conn_list, conns_list) {
++ if (conn->binding || xa_load(&conn->sessions, sess_id)) {
++ if (conn == curr_conn)
++ rcount = 2;
++ if (atomic_read(&conn->req_running) >= rcount) {
++ rc = wait_event_timeout(conn->req_running_q,
++ atomic_read(&conn->req_running) < rcount,
++ HZ);
++ if (!rc) {
++ up_read(&conn_list_lock);
++ retry_count++;
++ goto retry_idle;
++ }
++ }
+ }
+ }
+ up_read(&conn_list_lock);
++
++ return 0;
+ }
+
+ int ksmbd_conn_write(struct ksmbd_work *work)
+@@ -300,6 +318,7 @@ int ksmbd_conn_handler_loop(void *p)
+ goto out;
+
+ conn->last_active = jiffies;
++ set_freezable();
+ while (ksmbd_conn_alive(conn)) {
+ if (try_to_freeze())
+ continue;
+@@ -431,13 +450,7 @@ static void stop_sessions(void)
+ again:
+ down_read(&conn_list_lock);
+ list_for_each_entry(conn, &conn_list, conns_list) {
+- struct task_struct *task;
+-
+ t = conn->transport;
+- task = t->handler;
+- if (task)
+- ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+- task->comm, task_pid_nr(task));
+ ksmbd_conn_set_exiting(conn);
+ if (t->ops->shutdown) {
+ up_read(&conn_list_lock);
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 3c005246a32e8d..82343afc8d0499 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -88,6 +88,7 @@ struct ksmbd_conn {
+ __u16 dialect;
+
+ char *mechToken;
++ unsigned int mechTokenLen;
+
+ struct ksmbd_conn_ops *conn_ops;
+
+@@ -105,6 +106,7 @@ struct ksmbd_conn {
+ bool signing_negotiated;
+ __le16 signing_algorithm;
+ bool binding;
++ atomic_t refcnt;
+ };
+
+ struct ksmbd_conn_ops {
+@@ -134,7 +136,6 @@ struct ksmbd_transport_ops {
+ struct ksmbd_transport {
+ struct ksmbd_conn *conn;
+ struct ksmbd_transport_ops *ops;
+- struct task_struct *handler;
+ };
+
+ #define KSMBD_TCP_RECV_TIMEOUT (7 * HZ)
+@@ -145,7 +146,8 @@ extern struct list_head conn_list;
+ extern struct rw_semaphore conn_list_lock;
+
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
++int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id);
+ struct ksmbd_conn *ksmbd_conn_alloc(void);
+ void ksmbd_conn_free(struct ksmbd_conn *conn);
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+index b7521e41402e00..f4e55199938d58 100644
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -75,6 +75,7 @@ struct ksmbd_heartbeat {
+ #define KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION BIT(1)
+ #define KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL BIT(2)
+ #define KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF BIT(3)
++#define KSMBD_GLOBAL_FLAG_DURABLE_HANDLE BIT(4)
+
+ /*
+ * IPC request for ksmbd server startup
+@@ -166,7 +167,8 @@ struct ksmbd_share_config_response {
+ __u16 force_uid;
+ __u16 force_gid;
+ __s8 share_name[KSMBD_REQ_MAX_SHARE_NAME];
+- __u32 reserved[112]; /* Reserved room */
++ __u32 reserved[111]; /* Reserved room */
++ __u32 payload_sz;
+ __u32 veto_list_sz;
+ __s8 ____payload[];
+ };
+@@ -304,7 +306,8 @@ enum ksmbd_event {
+ KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
+ KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE = 15,
+
+- KSMBD_EVENT_MAX
++ __KSMBD_EVENT_MAX,
++ KSMBD_EVENT_MAX = __KSMBD_EVENT_MAX - 1
+ };
+
+ /*
+@@ -337,23 +340,24 @@ enum KSMBD_TREE_CONN_STATUS {
+ /*
+ * Share config flags.
+ */
+-#define KSMBD_SHARE_FLAG_INVALID (0)
+-#define KSMBD_SHARE_FLAG_AVAILABLE BIT(0)
+-#define KSMBD_SHARE_FLAG_BROWSEABLE BIT(1)
+-#define KSMBD_SHARE_FLAG_WRITEABLE BIT(2)
+-#define KSMBD_SHARE_FLAG_READONLY BIT(3)
+-#define KSMBD_SHARE_FLAG_GUEST_OK BIT(4)
+-#define KSMBD_SHARE_FLAG_GUEST_ONLY BIT(5)
+-#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS BIT(6)
+-#define KSMBD_SHARE_FLAG_OPLOCKS BIT(7)
+-#define KSMBD_SHARE_FLAG_PIPE BIT(8)
+-#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES BIT(9)
+-#define KSMBD_SHARE_FLAG_INHERIT_OWNER BIT(10)
+-#define KSMBD_SHARE_FLAG_STREAMS BIT(11)
+-#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12)
+-#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13)
+-#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
+-#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15)
++#define KSMBD_SHARE_FLAG_INVALID (0)
++#define KSMBD_SHARE_FLAG_AVAILABLE BIT(0)
++#define KSMBD_SHARE_FLAG_BROWSEABLE BIT(1)
++#define KSMBD_SHARE_FLAG_WRITEABLE BIT(2)
++#define KSMBD_SHARE_FLAG_READONLY BIT(3)
++#define KSMBD_SHARE_FLAG_GUEST_OK BIT(4)
++#define KSMBD_SHARE_FLAG_GUEST_ONLY BIT(5)
++#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS BIT(6)
++#define KSMBD_SHARE_FLAG_OPLOCKS BIT(7)
++#define KSMBD_SHARE_FLAG_PIPE BIT(8)
++#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES BIT(9)
++#define KSMBD_SHARE_FLAG_INHERIT_OWNER BIT(10)
++#define KSMBD_SHARE_FLAG_STREAMS BIT(11)
++#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12)
++#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13)
++#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
++#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15)
++#define KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY BIT(16)
+
+ /*
+ * Tree connect request flags.
+diff --git a/fs/smb/server/ksmbd_work.c b/fs/smb/server/ksmbd_work.c
+index 51def3ca74c018..d7c676c151e209 100644
+--- a/fs/smb/server/ksmbd_work.c
++++ b/fs/smb/server/ksmbd_work.c
+@@ -56,6 +56,9 @@ void ksmbd_free_work_struct(struct ksmbd_work *work)
+ kfree(work->tr_buf);
+ kvfree(work->request_buf);
+ kfree(work->iov);
++ if (!list_empty(&work->interim_entry))
++ list_del(&work->interim_entry);
++
+ if (work->async_id)
+ ksmbd_release_id(&work->conn->async_ida, work->async_id);
+ kmem_cache_free(work_cache, work);
+@@ -95,32 +98,42 @@ bool ksmbd_queue_work(struct ksmbd_work *work)
+ return queue_work(ksmbd_wq, &work->work);
+ }
+
+-static int ksmbd_realloc_iov_pin(struct ksmbd_work *work, void *ib,
+- unsigned int ib_len)
++static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
++ unsigned int ib_len)
++{
++ work->iov[++work->iov_idx].iov_base = ib;
++ work->iov[work->iov_idx].iov_len = ib_len;
++ work->iov_cnt++;
++}
++
++static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
++ void *aux_buf, unsigned int aux_size)
+ {
++ struct aux_read *ar = NULL;
++ int need_iov_cnt = 1;
++
++ if (aux_size) {
++ need_iov_cnt++;
++ ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
++ if (!ar)
++ return -ENOMEM;
++ }
+
+- if (work->iov_alloc_cnt <= work->iov_cnt) {
++ if (work->iov_alloc_cnt < work->iov_cnt + need_iov_cnt) {
+ struct kvec *new;
+
+ work->iov_alloc_cnt += 4;
+ new = krealloc(work->iov,
+ sizeof(struct kvec) * work->iov_alloc_cnt,
+ GFP_KERNEL | __GFP_ZERO);
+- if (!new)
++ if (!new) {
++ kfree(ar);
++ work->iov_alloc_cnt -= 4;
+ return -ENOMEM;
++ }
+ work->iov = new;
+ }
+
+- work->iov[++work->iov_idx].iov_base = ib;
+- work->iov[work->iov_idx].iov_len = ib_len;
+- work->iov_cnt++;
+-
+- return 0;
+-}
+-
+-static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
+- void *aux_buf, unsigned int aux_size)
+-{
+ /* Plus rfc_length size on first iov */
+ if (!work->iov_idx) {
+ work->iov[work->iov_idx].iov_base = work->response_buf;
+@@ -129,19 +142,13 @@ static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
+ work->iov_cnt++;
+ }
+
+- ksmbd_realloc_iov_pin(work, ib, len);
++ __ksmbd_iov_pin(work, ib, len);
+ inc_rfc1001_len(work->iov[0].iov_base, len);
+
+ if (aux_size) {
+- struct aux_read *ar;
+-
+- ksmbd_realloc_iov_pin(work, aux_buf, aux_size);
++ __ksmbd_iov_pin(work, aux_buf, aux_size);
+ inc_rfc1001_len(work->iov[0].iov_base, aux_size);
+
+- ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
+- if (!ar)
+- return -ENOMEM;
+-
+ ar->buf = aux_buf;
+ list_add(&ar->entry, &work->aux_read_list);
+ }
+diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
+index 328a412259dc1b..d8d03070ae44b4 100644
+--- a/fs/smb/server/mgmt/share_config.c
++++ b/fs/smb/server/mgmt/share_config.c
+@@ -15,6 +15,7 @@
+ #include "share_config.h"
+ #include "user_config.h"
+ #include "user_session.h"
++#include "../connection.h"
+ #include "../transport_ipc.h"
+ #include "../misc.h"
+
+@@ -120,12 +121,13 @@ static int parse_veto_list(struct ksmbd_share_config *share,
+ return 0;
+ }
+
+-static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
++static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work,
+ const char *name)
+ {
+ struct ksmbd_share_config_response *resp;
+ struct ksmbd_share_config *share = NULL;
+ struct ksmbd_share_config *lookup;
++ struct unicode_map *um = work->conn->um;
+ int ret;
+
+ resp = ksmbd_ipc_share_config_request(name);
+@@ -158,10 +160,19 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ share->name = kstrdup(name, GFP_KERNEL);
+
+ if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+- share->path = kstrdup(ksmbd_share_config_path(resp),
++ int path_len = PATH_MAX;
++
++ if (resp->payload_sz)
++ path_len = resp->payload_sz - resp->veto_list_sz;
++
++ share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
+ GFP_KERNEL);
+- if (share->path)
++ if (share->path) {
+ share->path_sz = strlen(share->path);
++ while (share->path_sz > 1 &&
++ share->path[share->path_sz - 1] == '/')
++ share->path[--share->path_sz] = '\0';
++ }
+ share->create_mask = resp->create_mask;
+ share->directory_mask = resp->directory_mask;
+ share->force_create_mode = resp->force_create_mode;
+@@ -172,7 +183,14 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ KSMBD_SHARE_CONFIG_VETO_LIST(resp),
+ resp->veto_list_sz);
+ if (!ret && share->path) {
++ if (__ksmbd_override_fsids(work, share)) {
++ kill_share(share);
++ share = NULL;
++ goto out;
++ }
++
+ ret = kern_path(share->path, 0, &share->vfs_path);
++ ksmbd_revert_fsids(work);
+ if (ret) {
+ ksmbd_debug(SMB, "failed to access '%s'\n",
+ share->path);
+@@ -205,7 +223,7 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ return share;
+ }
+
+-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
++struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
+ const char *name)
+ {
+ struct ksmbd_share_config *share;
+@@ -218,7 +236,7 @@ struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+
+ if (share)
+ return share;
+- return share_config_request(um, name);
++ return share_config_request(work, name);
+ }
+
+ bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+diff --git a/fs/smb/server/mgmt/share_config.h b/fs/smb/server/mgmt/share_config.h
+index 5f591751b92365..d4ac2dd4de2040 100644
+--- a/fs/smb/server/mgmt/share_config.h
++++ b/fs/smb/server/mgmt/share_config.h
+@@ -11,6 +11,8 @@
+ #include <linux/path.h>
+ #include <linux/unicode.h>
+
++struct ksmbd_work;
++
+ struct ksmbd_share_config {
+ char *name;
+ char *path;
+@@ -68,7 +70,7 @@ static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
+ __ksmbd_share_config_put(share);
+ }
+
+-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
++struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
+ const char *name);
+ bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+ const char *filename);
+diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
+index d2c81a8a11dda1..94a52a75014a43 100644
+--- a/fs/smb/server/mgmt/tree_connect.c
++++ b/fs/smb/server/mgmt/tree_connect.c
+@@ -16,17 +16,18 @@
+ #include "user_session.h"
+
+ struct ksmbd_tree_conn_status
+-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+- const char *share_name)
++ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
+ {
+ struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
+ struct ksmbd_tree_connect_response *resp = NULL;
+ struct ksmbd_share_config *sc;
+ struct ksmbd_tree_connect *tree_conn = NULL;
+ struct sockaddr *peer_addr;
++ struct ksmbd_conn *conn = work->conn;
++ struct ksmbd_session *sess = work->sess;
+ int ret;
+
+- sc = ksmbd_share_config_get(conn->um, share_name);
++ sc = ksmbd_share_config_get(work, share_name);
+ if (!sc)
+ return status;
+
+@@ -61,7 +62,7 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ struct ksmbd_share_config *new_sc;
+
+ ksmbd_share_config_del(sc);
+- new_sc = ksmbd_share_config_get(conn->um, share_name);
++ new_sc = ksmbd_share_config_get(work, share_name);
+ if (!new_sc) {
+ pr_err("Failed to update stale share config\n");
+ status.ret = -ESTALE;
+diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
+index 6377a70b811c89..a42cdd05104114 100644
+--- a/fs/smb/server/mgmt/tree_connect.h
++++ b/fs/smb/server/mgmt/tree_connect.h
+@@ -13,6 +13,7 @@
+ struct ksmbd_share_config;
+ struct ksmbd_user;
+ struct ksmbd_conn;
++struct ksmbd_work;
+
+ enum {
+ TREE_NEW = 0,
+@@ -50,8 +51,7 @@ static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
+ struct ksmbd_session;
+
+ struct ksmbd_tree_conn_status
+-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+- const char *share_name);
++ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name);
+ void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon);
+
+ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+diff --git a/fs/smb/server/mgmt/user_config.h b/fs/smb/server/mgmt/user_config.h
+index 6a44109617f149..e068a19fd90493 100644
+--- a/fs/smb/server/mgmt/user_config.h
++++ b/fs/smb/server/mgmt/user_config.h
+@@ -18,7 +18,6 @@ struct ksmbd_user {
+
+ size_t passkey_sz;
+ char *passkey;
+- unsigned int failed_login_count;
+ };
+
+ static inline bool user_guest(struct ksmbd_user *user)
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index 15f68ee0508946..9f40b9c473ba42 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -156,7 +156,7 @@ void ksmbd_session_destroy(struct ksmbd_session *sess)
+ kfree(sess);
+ }
+
+-static struct ksmbd_session *__session_lookup(unsigned long long id)
++struct ksmbd_session *__session_lookup(unsigned long long id)
+ {
+ struct ksmbd_session *sess;
+
+@@ -176,9 +176,10 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+
+ down_write(&conn->session_lock);
+ xa_for_each(&conn->sessions, id, sess) {
+- if (sess->state != SMB2_SESSION_VALID ||
+- time_after(jiffies,
+- sess->last_active + SMB2_SESSION_TIMEOUT)) {
++ if (atomic_read(&sess->refcnt) == 0 &&
++ (sess->state != SMB2_SESSION_VALID ||
++ time_after(jiffies,
++ sess->last_active + SMB2_SESSION_TIMEOUT))) {
+ xa_erase(&conn->sessions, sess->id);
+ hash_del(&sess->hlist);
+ ksmbd_session_destroy(sess);
+@@ -268,8 +269,6 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+
+ down_read(&sessions_table_lock);
+ sess = __session_lookup(id);
+- if (sess)
+- sess->last_active = jiffies;
+ up_read(&sessions_table_lock);
+
+ return sess;
+@@ -288,6 +287,22 @@ struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
+ return sess;
+ }
+
++void ksmbd_user_session_get(struct ksmbd_session *sess)
++{
++ atomic_inc(&sess->refcnt);
++}
++
++void ksmbd_user_session_put(struct ksmbd_session *sess)
++{
++ if (!sess)
++ return;
++
++ if (atomic_read(&sess->refcnt) <= 0)
++ WARN_ON(1);
++ else
++ atomic_dec(&sess->refcnt);
++}
++
+ struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+ u64 sess_id)
+ {
+@@ -305,6 +320,40 @@ struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+ return sess;
+ }
+
++void destroy_previous_session(struct ksmbd_conn *conn,
++ struct ksmbd_user *user, u64 id)
++{
++ struct ksmbd_session *prev_sess;
++ struct ksmbd_user *prev_user;
++ int err;
++
++ down_write(&sessions_table_lock);
++ down_write(&conn->session_lock);
++ prev_sess = __session_lookup(id);
++ if (!prev_sess || prev_sess->state == SMB2_SESSION_EXPIRED)
++ goto out;
++
++ prev_user = prev_sess->user;
++ if (!prev_user ||
++ strcmp(user->name, prev_user->name) ||
++ user->passkey_sz != prev_user->passkey_sz ||
++ memcmp(user->passkey, prev_user->passkey, user->passkey_sz))
++ goto out;
++
++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT);
++ err = ksmbd_conn_wait_idle_sess_id(conn, id);
++ if (err) {
++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
++ goto out;
++ }
++ ksmbd_destroy_file_table(&prev_sess->file_table);
++ prev_sess->state = SMB2_SESSION_EXPIRED;
++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
++out:
++ up_write(&conn->session_lock);
++ up_write(&sessions_table_lock);
++}
++
+ static bool ksmbd_preauth_session_id_match(struct preauth_session *sess,
+ unsigned long long id)
+ {
+@@ -356,6 +405,7 @@ static struct ksmbd_session *__session_create(int protocol)
+ xa_init(&sess->rpc_handle_list);
+ sess->sequence_number = 1;
+ rwlock_init(&sess->tree_conns_lock);
++ atomic_set(&sess->refcnt, 1);
+
+ ret = __init_smb2_session(sess);
+ if (ret)
+diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
+index 63cb08fffde84c..c1c4b20bd5c6cf 100644
+--- a/fs/smb/server/mgmt/user_session.h
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -61,6 +61,8 @@ struct ksmbd_session {
+ struct ksmbd_file_table file_table;
+ unsigned long last_active;
+ rwlock_t tree_conns_lock;
++
++ atomic_t refcnt;
+ };
+
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+@@ -88,8 +90,11 @@ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+ struct ksmbd_session *sess);
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn);
++struct ksmbd_session *__session_lookup(unsigned long long id);
+ struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
+ unsigned long long id);
++void destroy_previous_session(struct ksmbd_conn *conn,
++ struct ksmbd_user *user, u64 id);
+ struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
+ u64 sess_id);
+ struct preauth_session *ksmbd_preauth_session_lookup(struct ksmbd_conn *conn,
+@@ -101,4 +106,6 @@ void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id);
+ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name);
+ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id);
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id);
++void ksmbd_user_session_get(struct ksmbd_session *sess);
++void ksmbd_user_session_put(struct ksmbd_session *sess);
+ #endif /* __USER_SESSION_MANAGEMENT_H__ */
+diff --git a/fs/smb/server/misc.c b/fs/smb/server/misc.c
+index 9e8afaa686e3aa..1a5faa6f6e7bc3 100644
+--- a/fs/smb/server/misc.c
++++ b/fs/smb/server/misc.c
+@@ -261,6 +261,7 @@ char *ksmbd_casefold_sharename(struct unicode_map *um, const char *name)
+
+ /**
+ * ksmbd_extract_sharename() - get share name from tree connect request
++ * @um: pointer to a unicode_map structure for character encoding handling
+ * @treename: buffer containing tree name and share name
+ *
+ * Return: share name on success, otherwise error
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 9bc0103720f57c..8ee86478287f93 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -51,6 +51,7 @@ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work,
+ init_waitqueue_head(&opinfo->oplock_brk);
+ atomic_set(&opinfo->refcount, 1);
+ atomic_set(&opinfo->breaking_cnt, 0);
++ atomic_inc(&opinfo->conn->refcnt);
+
+ return opinfo;
+ }
+@@ -102,9 +103,10 @@ static int alloc_lease(struct oplock_info *opinfo, struct lease_ctx_info *lctx)
+ lease->new_state = 0;
+ lease->flags = lctx->flags;
+ lease->duration = lctx->duration;
++ lease->is_dir = lctx->is_dir;
+ memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
+ lease->version = lctx->version;
+- lease->epoch = 0;
++ lease->epoch = le16_to_cpu(lctx->epoch) + 1;
+ INIT_LIST_HEAD(&opinfo->lease_entry);
+ opinfo->o_lease = lease;
+
+@@ -123,6 +125,8 @@ static void free_opinfo(struct oplock_info *opinfo)
+ {
+ if (opinfo->is_lease)
+ free_lease(opinfo);
++ if (opinfo->conn && atomic_dec_and_test(&opinfo->conn->refcnt))
++ kfree(opinfo->conn);
+ kfree(opinfo);
+ }
+
+@@ -158,12 +162,11 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
+ opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
+ op_entry);
+ if (opinfo) {
+- if (!atomic_inc_not_zero(&opinfo->refcount))
++ if (opinfo->conn == NULL ||
++ !atomic_inc_not_zero(&opinfo->refcount))
+ opinfo = NULL;
+ else {
+- atomic_inc(&opinfo->conn->r_count);
+ if (ksmbd_conn_releasing(opinfo->conn)) {
+- atomic_dec(&opinfo->conn->r_count);
+ atomic_dec(&opinfo->refcount);
+ opinfo = NULL;
+ }
+@@ -175,26 +178,11 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
+ return opinfo;
+ }
+
+-static void opinfo_conn_put(struct oplock_info *opinfo)
++void opinfo_put(struct oplock_info *opinfo)
+ {
+- struct ksmbd_conn *conn;
+-
+ if (!opinfo)
+ return;
+
+- conn = opinfo->conn;
+- /*
+- * Checking waitqueue to dropping pending requests on
+- * disconnection. waitqueue_active is safe because it
+- * uses atomic operation for condition.
+- */
+- if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+- wake_up(&conn->r_count_q);
+- opinfo_put(opinfo);
+-}
+-
+-void opinfo_put(struct oplock_info *opinfo)
+-{
+ if (!atomic_dec_and_test(&opinfo->refcount))
+ return;
+
+@@ -205,9 +193,9 @@ static void opinfo_add(struct oplock_info *opinfo)
+ {
+ struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+
+- write_lock(&ci->m_lock);
++ down_write(&ci->m_lock);
+ list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
+- write_unlock(&ci->m_lock);
++ up_write(&ci->m_lock);
+ }
+
+ static void opinfo_del(struct oplock_info *opinfo)
+@@ -219,9 +207,9 @@ static void opinfo_del(struct oplock_info *opinfo)
+ lease_del_list(opinfo);
+ write_unlock(&lease_list_lock);
+ }
+- write_lock(&ci->m_lock);
++ down_write(&ci->m_lock);
+ list_del_rcu(&opinfo->op_entry);
+- write_unlock(&ci->m_lock);
++ up_write(&ci->m_lock);
+ }
+
+ static unsigned long opinfo_count(struct ksmbd_file *fp)
+@@ -395,8 +383,8 @@ void close_id_del_oplock(struct ksmbd_file *fp)
+ {
+ struct oplock_info *opinfo;
+
+- if (S_ISDIR(file_inode(fp->filp)->i_mode))
+- return;
++ if (fp->reserve_lease_break)
++ smb_lazy_parent_lease_break_close(fp);
+
+ opinfo = opinfo_get(fp);
+ if (!opinfo)
+@@ -524,47 +512,49 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
+ * Compare lease key and client_guid to know request from same owner
+ * of same client
+ */
+- read_lock(&ci->m_lock);
++ down_read(&ci->m_lock);
+ list_for_each_entry(opinfo, &ci->m_op_list, op_entry) {
+- if (!opinfo->is_lease)
++ if (!opinfo->is_lease || !opinfo->conn)
+ continue;
+- read_unlock(&ci->m_lock);
+ lease = opinfo->o_lease;
+
+ ret = compare_guid_key(opinfo, client_guid, lctx->lease_key);
+ if (ret) {
+ m_opinfo = opinfo;
+ /* skip upgrading lease about breaking lease */
+- if (atomic_read(&opinfo->breaking_cnt)) {
+- read_lock(&ci->m_lock);
++ if (atomic_read(&opinfo->breaking_cnt))
+ continue;
+- }
+
+ /* upgrading lease */
+ if ((atomic_read(&ci->op_count) +
+ atomic_read(&ci->sop_count)) == 1) {
+- if (lease->state ==
+- (lctx->req_state & lease->state)) {
++ if (lease->state != SMB2_LEASE_NONE_LE &&
++ lease->state == (lctx->req_state & lease->state)) {
++ lease->epoch++;
+ lease->state |= lctx->req_state;
+ if (lctx->req_state &
+ SMB2_LEASE_WRITE_CACHING_LE)
+ lease_read_to_write(opinfo);
++
+ }
+ } else if ((atomic_read(&ci->op_count) +
+ atomic_read(&ci->sop_count)) > 1) {
+ if (lctx->req_state ==
+ (SMB2_LEASE_READ_CACHING_LE |
+- SMB2_LEASE_HANDLE_CACHING_LE))
++ SMB2_LEASE_HANDLE_CACHING_LE)) {
++ lease->epoch++;
+ lease->state = lctx->req_state;
++ }
+ }
+
+ if (lctx->req_state && lease->state ==
+- SMB2_LEASE_NONE_LE)
++ SMB2_LEASE_NONE_LE) {
++ lease->epoch++;
+ lease_none_upgrade(opinfo, lctx->req_state);
++ }
+ }
+- read_lock(&ci->m_lock);
+ }
+- read_unlock(&ci->m_lock);
++ up_read(&ci->m_lock);
+
+ return m_opinfo;
+ }
+@@ -605,13 +595,28 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
+
+ if (opinfo->op_state == OPLOCK_CLOSING)
+ return -ENOENT;
+- else if (!opinfo->is_lease && opinfo->level <= req_op_level)
+- return 1;
++ else if (opinfo->level <= req_op_level) {
++ if (opinfo->is_lease == false)
++ return 1;
++
++ if (opinfo->o_lease->state !=
++ (SMB2_LEASE_HANDLE_CACHING_LE |
++ SMB2_LEASE_READ_CACHING_LE))
++ return 1;
++ }
+ }
+
+- if (!opinfo->is_lease && opinfo->level <= req_op_level) {
+- wake_up_oplock_break(opinfo);
+- return 1;
++ if (opinfo->level <= req_op_level) {
++ if (opinfo->is_lease == false) {
++ wake_up_oplock_break(opinfo);
++ return 1;
++ }
++ if (opinfo->o_lease->state !=
++ (SMB2_LEASE_HANDLE_CACHING_LE |
++ SMB2_LEASE_READ_CACHING_LE)) {
++ wake_up_oplock_break(opinfo);
++ return 1;
++ }
+ }
+ return 0;
+ }
+@@ -634,7 +639,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
+ struct smb2_hdr *rsp_hdr;
+ struct ksmbd_file *fp;
+
+- fp = ksmbd_lookup_durable_fd(br_info->fid);
++ fp = ksmbd_lookup_global_fd(br_info->fid);
+ if (!fp)
+ goto out;
+
+@@ -833,7 +838,8 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
+ interim_entry);
+ setup_async_work(in_work, NULL, NULL);
+ smb2_send_interim_resp(in_work, STATUS_PENDING);
+- list_del(&in_work->interim_entry);
++ list_del_init(&in_work->interim_entry);
++ release_async_work(in_work);
+ }
+ INIT_WORK(&work->work, __smb2_lease_break_noti);
+ ksmbd_queue_work(work);
+@@ -878,7 +884,6 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
+ struct lease *lease = brk_opinfo->o_lease;
+
+ atomic_inc(&brk_opinfo->breaking_cnt);
+-
+ err = oplock_break_pending(brk_opinfo, req_op_level);
+ if (err)
+ return err < 0 ? err : 0;
+@@ -899,7 +904,8 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
+ lease->new_state =
+ SMB2_LEASE_READ_CACHING_LE;
+ } else {
+- if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
++ if (lease->state & SMB2_LEASE_HANDLE_CACHING_LE &&
++ !lease->is_dir)
+ lease->new_state =
+ SMB2_LEASE_READ_CACHING_LE;
+ else
+@@ -1031,6 +1037,8 @@ static void copy_lease(struct oplock_info *op1, struct oplock_info *op2)
+ SMB2_LEASE_KEY_SIZE);
+ lease2->duration = lease1->duration;
+ lease2->flags = lease1->flags;
++ lease2->epoch = lease1->epoch;
++ lease2->version = lease1->version;
+ }
+
+ static int add_lease_global_list(struct oplock_info *opinfo)
+@@ -1080,6 +1088,79 @@ static void set_oplock_level(struct oplock_info *opinfo, int level,
+ }
+ }
+
++void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
++ struct lease_ctx_info *lctx)
++{
++ struct oplock_info *opinfo;
++ struct ksmbd_inode *p_ci = NULL;
++
++ if (lctx->version != 2)
++ return;
++
++ p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
++ if (!p_ci)
++ return;
++
++ down_read(&p_ci->m_lock);
++ list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
++ if (opinfo->conn == NULL || !opinfo->is_lease)
++ continue;
++
++ if (opinfo->o_lease->state != SMB2_OPLOCK_LEVEL_NONE &&
++ (!(lctx->flags & SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE) ||
++ !compare_guid_key(opinfo, fp->conn->ClientGUID,
++ lctx->parent_lease_key))) {
++ if (!atomic_inc_not_zero(&opinfo->refcount))
++ continue;
++
++ if (ksmbd_conn_releasing(opinfo->conn))
++ continue;
++
++ oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
++ opinfo_put(opinfo);
++ }
++ }
++ up_read(&p_ci->m_lock);
++
++ ksmbd_inode_put(p_ci);
++}
++
++void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
++{
++ struct oplock_info *opinfo;
++ struct ksmbd_inode *p_ci = NULL;
++
++ rcu_read_lock();
++ opinfo = rcu_dereference(fp->f_opinfo);
++ rcu_read_unlock();
++
++ if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2)
++ return;
++
++ p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
++ if (!p_ci)
++ return;
++
++ down_read(&p_ci->m_lock);
++ list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
++ if (opinfo->conn == NULL || !opinfo->is_lease)
++ continue;
++
++ if (opinfo->o_lease->state != SMB2_OPLOCK_LEVEL_NONE) {
++ if (!atomic_inc_not_zero(&opinfo->refcount))
++ continue;
++
++ if (ksmbd_conn_releasing(opinfo->conn))
++ continue;
++ oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
++ opinfo_put(opinfo);
++ }
++ }
++ up_read(&p_ci->m_lock);
++
++ ksmbd_inode_put(p_ci);
++}
++
+ /**
+ * smb_grant_oplock() - handle oplock/lease request on file open
+ * @work: smb work
+@@ -1103,9 +1184,13 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ bool prev_op_has_lease;
+ __le32 prev_op_state = 0;
+
+- /* not support directory lease */
+- if (S_ISDIR(file_inode(fp->filp)->i_mode))
+- return 0;
++ /* Only v2 leases handle the directory */
++ if (S_ISDIR(file_inode(fp->filp)->i_mode)) {
++ if (!lctx || lctx->version != 2 ||
++ (lctx->flags != SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE &&
++ !lctx->epoch))
++ return 0;
++ }
+
+ opinfo = alloc_opinfo(work, pid, tid);
+ if (!opinfo)
+@@ -1147,7 +1232,7 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ prev_opinfo = opinfo_get_list(ci);
+ if (!prev_opinfo ||
+ (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
+- opinfo_conn_put(prev_opinfo);
++ opinfo_put(prev_opinfo);
+ goto set_lev;
+ }
+ prev_op_has_lease = prev_opinfo->is_lease;
+@@ -1157,19 +1242,19 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
+ if (share_ret < 0 &&
+ prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+ err = share_ret;
+- opinfo_conn_put(prev_opinfo);
++ opinfo_put(prev_opinfo);
+ goto err_out;
+ }
+
+ if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
+ prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+- opinfo_conn_put(prev_opinfo);
++ opinfo_put(prev_opinfo);
+ goto op_break_not_needed;
+ }
+
+ list_add(&work->interim_entry, &prev_opinfo->interim_list);
+ err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
+- opinfo_conn_put(prev_opinfo);
++ opinfo_put(prev_opinfo);
+ if (err == -ENOENT)
+ goto set_lev;
+ /* Check all oplock was freed by close */
+@@ -1232,14 +1317,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
+ return;
+ if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
+ brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
+- opinfo_conn_put(brk_opinfo);
++ opinfo_put(brk_opinfo);
+ return;
+ }
+
+ brk_opinfo->open_trunc = is_trunc;
+ list_add(&work->interim_entry, &brk_opinfo->interim_list);
+ oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
+- opinfo_conn_put(brk_opinfo);
++ opinfo_put(brk_opinfo);
+ }
+
+ /**
+@@ -1265,14 +1350,14 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
++ if (brk_op->conn == NULL)
++ continue;
++
+ if (!atomic_inc_not_zero(&brk_op->refcount))
+ continue;
+
+- atomic_inc(&brk_op->conn->r_count);
+- if (ksmbd_conn_releasing(brk_op->conn)) {
+- atomic_dec(&brk_op->conn->r_count);
++ if (ksmbd_conn_releasing(brk_op->conn))
+ continue;
+- }
+
+ rcu_read_unlock();
+ if (brk_op->is_lease && (brk_op->o_lease->state &
+@@ -1303,7 +1388,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
+ brk_op->open_trunc = is_trunc;
+ oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
+ next:
+- opinfo_conn_put(brk_op);
++ opinfo_put(brk_op);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+@@ -1363,9 +1448,11 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+ memcpy(buf->lcontext.LeaseKey, lease->lease_key,
+ SMB2_LEASE_KEY_SIZE);
+ buf->lcontext.LeaseFlags = lease->flags;
++ buf->lcontext.Epoch = cpu_to_le16(lease->epoch);
+ buf->lcontext.LeaseState = lease->state;
+- memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+- SMB2_LEASE_KEY_SIZE);
++ if (lease->flags == SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
++ memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
++ SMB2_LEASE_KEY_SIZE);
+ buf->ccontext.DataOffset = cpu_to_le16(offsetof
+ (struct create_lease_v2, lcontext));
+ buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
+@@ -1400,7 +1487,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
+ * parse_lease_state() - parse lease context containted in file open request
+ * @open_req: buffer containing smb2 file open(create) request
+ *
+- * Return: oplock state, -ENOENT if create lease context not found
++ * Return: allocated lease context object on success, otherwise NULL
+ */
+ struct lease_ctx_info *parse_lease_state(void *open_req)
+ {
+@@ -1422,9 +1509,11 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+ lreq->req_state = lc->lcontext.LeaseState;
+ lreq->flags = lc->lcontext.LeaseFlags;
++ lreq->epoch = lc->lcontext.Epoch;
+ lreq->duration = lc->lcontext.LeaseDuration;
+- memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
+- SMB2_LEASE_KEY_SIZE);
++ if (lreq->flags == SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
++ memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
++ SMB2_LEASE_KEY_SIZE);
+ lreq->version = 2;
+ } else {
+ struct create_lease *lc = (struct create_lease *)cc;
+@@ -1542,6 +1631,8 @@ void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp)
+ buf->Name[3] = 'Q';
+
+ buf->Timeout = cpu_to_le32(fp->durable_timeout);
++ if (fp->is_persistent)
++ buf->Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
+ }
+
+ /**
+@@ -1709,3 +1800,71 @@ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
+ read_unlock(&lease_list_lock);
+ return ret_op;
+ }
++
++int smb2_check_durable_oplock(struct ksmbd_conn *conn,
++ struct ksmbd_share_config *share,
++ struct ksmbd_file *fp,
++ struct lease_ctx_info *lctx,
++ char *name)
++{
++ struct oplock_info *opinfo = opinfo_get(fp);
++ int ret = 0;
++
++ if (!opinfo)
++ return 0;
++
++ if (opinfo->is_lease == false) {
++ if (lctx) {
++ pr_err("create context include lease\n");
++ ret = -EBADF;
++ goto out;
++ }
++
++ if (opinfo->level != SMB2_OPLOCK_LEVEL_BATCH) {
++ pr_err("oplock level is not equal to SMB2_OPLOCK_LEVEL_BATCH\n");
++ ret = -EBADF;
++ }
++
++ goto out;
++ }
++
++ if (memcmp(conn->ClientGUID, fp->client_guid,
++ SMB2_CLIENT_GUID_SIZE)) {
++ ksmbd_debug(SMB, "Client guid of fp is not equal to the one of connection\n");
++ ret = -EBADF;
++ goto out;
++ }
++
++ if (!lctx) {
++ ksmbd_debug(SMB, "create context does not include lease\n");
++ ret = -EBADF;
++ goto out;
++ }
++
++ if (memcmp(opinfo->o_lease->lease_key, lctx->lease_key,
++ SMB2_LEASE_KEY_SIZE)) {
++ ksmbd_debug(SMB,
++ "lease key of fp does not match lease key in create context\n");
++ ret = -EBADF;
++ goto out;
++ }
++
++ if (!(opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE)) {
++ ksmbd_debug(SMB, "lease state does not contain SMB2_LEASE_HANDLE_CACHING\n");
++ ret = -EBADF;
++ goto out;
++ }
++
++ if (opinfo->o_lease->version != lctx->version) {
++ ksmbd_debug(SMB,
++ "lease version of fp does not match the one in create context\n");
++ ret = -EBADF;
++ goto out;
++ }
++
++ if (!ksmbd_inode_pending_delete(fp))
++ ret = ksmbd_validate_name_reconnect(share, fp, name);
++out:
++ opinfo_put(opinfo);
++ return ret;
++}
+diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h
+index 4b0fe6da76940f..e9da63f25b2061 100644
+--- a/fs/smb/server/oplock.h
++++ b/fs/smb/server/oplock.h
+@@ -34,7 +34,9 @@ struct lease_ctx_info {
+ __le32 flags;
+ __le64 duration;
+ __u8 parent_lease_key[SMB2_LEASE_KEY_SIZE];
++ __le16 epoch;
+ int version;
++ bool is_dir;
+ };
+
+ struct lease_table {
+@@ -53,6 +55,7 @@ struct lease {
+ __u8 parent_lease_key[SMB2_LEASE_KEY_SIZE];
+ int version;
+ unsigned short epoch;
++ bool is_dir;
+ struct lease_table *l_lb;
+ };
+
+@@ -124,4 +127,12 @@ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
+ int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
+ struct lease_ctx_info *lctx);
+ void destroy_lease_table(struct ksmbd_conn *conn);
++void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
++ struct lease_ctx_info *lctx);
++void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp);
++int smb2_check_durable_oplock(struct ksmbd_conn *conn,
++ struct ksmbd_share_config *share,
++ struct ksmbd_file *fp,
++ struct lease_ctx_info *lctx,
++ char *name);
+ #endif /* __KSMBD_OPLOCK_H */
+diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
+index 3079e607c5fe6d..d5d85300560d03 100644
+--- a/fs/smb/server/server.c
++++ b/fs/smb/server/server.c
+@@ -167,20 +167,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
+ int rc;
+ bool is_chained = false;
+
+- if (conn->ops->allocate_rsp_buf(work))
+- return;
+-
+ if (conn->ops->is_transform_hdr &&
+ conn->ops->is_transform_hdr(work->request_buf)) {
+ rc = conn->ops->decrypt_req(work);
+- if (rc < 0) {
+- conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
+- goto send;
+- }
+-
++ if (rc < 0)
++ return;
+ work->encrypted = true;
+ }
+
++ if (conn->ops->allocate_rsp_buf(work))
++ return;
++
+ rc = conn->ops->init_rsp_hdr(work);
+ if (rc) {
+ /* either uid or tid is not correct */
+@@ -241,6 +238,8 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
+ } while (is_chained == true);
+
+ send:
++ if (work->sess)
++ ksmbd_user_session_put(work->sess);
+ if (work->tcon)
+ ksmbd_tree_connect_put(work->tcon);
+ smb3_preauth_hash_rsp(work);
+diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
+index 23bd3d1209dfa5..727cb49926ee52 100644
+--- a/fs/smb/server/smb2misc.c
++++ b/fs/smb/server/smb2misc.c
+@@ -101,29 +101,46 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ *len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength);
+ break;
+ case SMB2_TREE_CONNECT:
+- *off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset);
++ *off = max_t(unsigned short int,
++ le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset),
++ offsetof(struct smb2_tree_connect_req, Buffer));
+ *len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength);
+ break;
+ case SMB2_CREATE:
+ {
++ unsigned short int name_off =
++ max_t(unsigned short int,
++ le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset),
++ offsetof(struct smb2_create_req, Buffer));
++ unsigned short int name_len =
++ le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++
+ if (((struct smb2_create_req *)hdr)->CreateContextsLength) {
+ *off = le32_to_cpu(((struct smb2_create_req *)
+ hdr)->CreateContextsOffset);
+ *len = le32_to_cpu(((struct smb2_create_req *)
+ hdr)->CreateContextsLength);
+- break;
++ if (!name_len)
++ break;
++
++ if (name_off + name_len < (u64)*off + *len)
++ break;
+ }
+
+- *off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
+- *len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++ *off = name_off;
++ *len = name_len;
+ break;
+ }
+ case SMB2_QUERY_INFO:
+- *off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset);
++ *off = max_t(unsigned int,
++ le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset),
++ offsetof(struct smb2_query_info_req, Buffer));
+ *len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength);
+ break;
+ case SMB2_SET_INFO:
+- *off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset);
++ *off = max_t(unsigned int,
++ le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset),
++ offsetof(struct smb2_set_info_req, Buffer));
+ *len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength);
+ break;
+ case SMB2_READ:
+@@ -133,7 +150,7 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ case SMB2_WRITE:
+ if (((struct smb2_write_req *)hdr)->DataOffset ||
+ ((struct smb2_write_req *)hdr)->Length) {
+- *off = max_t(unsigned int,
++ *off = max_t(unsigned short int,
+ le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset),
+ offsetof(struct smb2_write_req, Buffer));
+ *len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length);
+@@ -144,7 +161,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ *len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength);
+ break;
+ case SMB2_QUERY_DIRECTORY:
+- *off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset);
++ *off = max_t(unsigned short int,
++ le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset),
++ offsetof(struct smb2_query_directory_req, Buffer));
+ *len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength);
+ break;
+ case SMB2_LOCK:
+@@ -159,7 +178,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ break;
+ }
+ case SMB2_IOCTL:
+- *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
++ *off = max_t(unsigned int,
++ le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset),
++ offsetof(struct smb2_ioctl_req, Buffer));
+ *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
+ break;
+ default:
+diff --git a/fs/smb/server/smb2ops.c b/fs/smb/server/smb2ops.c
+index aed7704a067286..606aa3c5189a28 100644
+--- a/fs/smb/server/smb2ops.c
++++ b/fs/smb/server/smb2ops.c
+@@ -221,12 +221,18 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+- conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++ conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
++ SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
+
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION &&
+ conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
+ conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+
++ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
++ (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
++ conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
++ conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
++
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+ conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+ }
+@@ -245,7 +251,8 @@ void init_smb3_02_server(struct ksmbd_conn *conn)
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+- conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
++ conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
++ SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
+
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
+ (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
+@@ -254,6 +261,9 @@ void init_smb3_02_server(struct ksmbd_conn *conn)
+
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+ conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
++
++ if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE)
++ conn->vals->capabilities |= SMB2_GLOBAL_CAP_PERSISTENT_HANDLES;
+ }
+
+ /**
+@@ -270,16 +280,15 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
+
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
+- conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
+-
+- if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
+- (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
+- conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
+- conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
++ conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
++ SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
+
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
+ conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
+
++ if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE)
++ conn->vals->capabilities |= SMB2_GLOBAL_CAP_PERSISTENT_HANDLES;
++
+ INIT_LIST_HEAD(&conn->preauth_sess_table);
+ return 0;
+ }
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 93262ca3f58a77..cac9eb4663aadc 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -535,6 +535,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
+ if (cmd == SMB2_QUERY_INFO_HE) {
+ struct smb2_query_info_req *req;
+
++ if (get_rfc1002_len(work->request_buf) <
++ offsetof(struct smb2_query_info_req, OutputBufferLength))
++ return -EINVAL;
++
+ req = smb2_get_msg(work->request_buf);
+ if ((req->InfoType == SMB2_O_INFO_FILE &&
+ (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
+@@ -601,36 +605,14 @@ int smb2_check_user_session(struct ksmbd_work *work)
+
+ /* Check for validity of user session */
+ work->sess = ksmbd_session_lookup_all(conn, sess_id);
+- if (work->sess)
++ if (work->sess) {
++ ksmbd_user_session_get(work->sess);
+ return 1;
++ }
+ ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id);
+ return -ENOENT;
+ }
+
+-static void destroy_previous_session(struct ksmbd_conn *conn,
+- struct ksmbd_user *user, u64 id)
+-{
+- struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
+- struct ksmbd_user *prev_user;
+- struct channel *chann;
+- long index;
+-
+- if (!prev_sess)
+- return;
+-
+- prev_user = prev_sess->user;
+-
+- if (!prev_user ||
+- strcmp(user->name, prev_user->name) ||
+- user->passkey_sz != prev_user->passkey_sz ||
+- memcmp(user->passkey, prev_user->passkey, user->passkey_sz))
+- return;
+-
+- prev_sess->state = SMB2_SESSION_EXPIRED;
+- xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
+- ksmbd_conn_set_exiting(chann->conn);
+-}
+-
+ /**
+ * smb2_get_name() - get filename string from on the wire smb format
+ * @src: source buffer
+@@ -650,6 +632,12 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
+ return name;
+ }
+
++ if (*name == '\\') {
++ pr_err("not allow directory name included leading slash\n");
++ kfree(name);
++ return ERR_PTR(-EINVAL);
++ }
++
+ ksmbd_conv_path_to_unix(name);
+ ksmbd_strip_last_slash(name);
+ return name;
+@@ -657,13 +645,9 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
+
+ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
+ {
+- struct smb2_hdr *rsp_hdr;
+ struct ksmbd_conn *conn = work->conn;
+ int id;
+
+- rsp_hdr = ksmbd_resp_buf_next(work);
+- rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
+-
+ id = ksmbd_acquire_async_msg_id(&conn->async_ida);
+ if (id < 0) {
+ pr_err("Failed to alloc async message id\n");
+@@ -671,7 +655,6 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
+ }
+ work->asynchronous = true;
+ work->async_id = id;
+- rsp_hdr->Id.AsyncId = cpu_to_le64(id);
+
+ ksmbd_debug(SMB,
+ "Send interim Response to inform async request id : %d\n",
+@@ -723,6 +706,8 @@ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
+ __SMB2_HEADER_STRUCTURE_SIZE);
+
+ rsp_hdr = smb2_get_msg(in_work->response_buf);
++ rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
++ rsp_hdr->Id.AsyncId = cpu_to_le64(work->async_id);
+ smb2_set_err_rsp(in_work);
+ rsp_hdr->Status = status;
+
+@@ -1417,7 +1402,10 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
+ char *name;
+ unsigned int name_off, name_len, secbuf_len;
+
+- secbuf_len = le16_to_cpu(req->SecurityBufferLength);
++ if (conn->use_spnego && conn->mechToken)
++ secbuf_len = conn->mechTokenLen;
++ else
++ secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+ if (secbuf_len < sizeof(struct authenticate_message)) {
+ ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
+ return NULL;
+@@ -1508,7 +1496,10 @@ static int ntlm_authenticate(struct ksmbd_work *work,
+ struct authenticate_message *authblob;
+
+ authblob = user_authblob(conn, req);
+- sz = le16_to_cpu(req->SecurityBufferLength);
++ if (conn->use_spnego && conn->mechToken)
++ sz = conn->mechTokenLen;
++ else
++ sz = le16_to_cpu(req->SecurityBufferLength);
+ rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
+ if (rc) {
+ set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
+@@ -1698,6 +1689,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ rc = ksmbd_session_register(conn, sess);
+ if (rc)
+ goto out_err;
++
++ conn->binding = false;
+ } else if (conn->dialect >= SMB30_PROT_ID &&
+ (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+ req->Flags & SMB2_SESSION_REQ_FLAG_BINDING) {
+@@ -1752,6 +1745,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ }
+
+ conn->binding = true;
++ ksmbd_user_session_get(sess);
+ } else if ((conn->dialect < SMB30_PROT_ID ||
+ server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+ (req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+@@ -1776,13 +1770,15 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ sess = NULL;
+ goto out_err;
+ }
++
++ conn->binding = false;
++ ksmbd_user_session_get(sess);
+ }
+ work->sess = sess;
+
+ negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+ negblob_len = le16_to_cpu(req->SecurityBufferLength);
+- if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
+- negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
++ if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer)) {
+ rc = -EINVAL;
+ goto out_err;
+ }
+@@ -1791,8 +1787,15 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ negblob_off);
+
+ if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
+- if (conn->mechToken)
++ if (conn->mechToken) {
+ negblob = (struct negotiate_message *)conn->mechToken;
++ negblob_len = conn->mechTokenLen;
++ }
++ }
++
++ if (negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
++ rc = -EINVAL;
++ goto out_err;
+ }
+
+ if (server_conf.auth_mechs & conn->auth_mechs) {
+@@ -1937,12 +1940,12 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ struct ksmbd_session *sess = work->sess;
+ char *treename = NULL, *name = NULL;
+ struct ksmbd_tree_conn_status status;
+- struct ksmbd_share_config *share;
++ struct ksmbd_share_config *share = NULL;
+ int rc = -EINVAL;
+
+ WORK_BUFFERS(work, req, rsp);
+
+- treename = smb_strndup_from_utf16(req->Buffer,
++ treename = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->PathOffset),
+ le16_to_cpu(req->PathLength), true,
+ conn->local_nls);
+ if (IS_ERR(treename)) {
+@@ -1960,7 +1963,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
+ name, treename);
+
+- status = ksmbd_tree_conn_connect(conn, sess, name);
++ status = ksmbd_tree_conn_connect(work, name);
+ if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
+ rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
+ else
+@@ -1999,7 +2002,12 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ write_unlock(&sess->tree_conns_lock);
+ rsp->StructureSize = cpu_to_le16(16);
+ out_err1:
+- rsp->Capabilities = 0;
++ if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE && share &&
++ test_share_config_flag(share,
++ KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY))
++ rsp->Capabilities = SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY;
++ else
++ rsp->Capabilities = 0;
+ rsp->Reserved = 0;
+ /* default manual caching */
+ rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
+@@ -2051,15 +2059,22 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ * @access: file access flags
+ * @disposition: file disposition flags
+ * @may_flags: set with MAY_ flags
++ * @is_dir: is creating open flags for directory
+ *
+ * Return: file open flags
+ */
+ static int smb2_create_open_flags(bool file_present, __le32 access,
+ __le32 disposition,
+- int *may_flags)
++ int *may_flags,
++ bool is_dir)
+ {
+ int oflags = O_NONBLOCK | O_LARGEFILE;
+
++ if (is_dir) {
++ access &= ~FILE_WRITE_DESIRE_ACCESS_LE;
++ ksmbd_debug(SMB, "Discard write access to a directory\n");
++ }
++
+ if (access & FILE_READ_DESIRED_ACCESS_LE &&
+ access & FILE_WRITE_DESIRE_ACCESS_LE) {
+ oflags |= O_RDWR;
+@@ -2203,7 +2218,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ ksmbd_conn_unlock(conn);
+
+ ksmbd_close_session_fds(work);
+- ksmbd_conn_wait_idle(conn, sess_id);
++ ksmbd_conn_wait_idle(conn);
+
+ /*
+ * Re-lookup session to validate if session is deleted
+@@ -2218,7 +2233,9 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ }
+
+ ksmbd_destroy_file_table(&sess->file_table);
++ down_write(&conn->session_lock);
+ sess->state = SMB2_SESSION_EXPIRED;
++ up_write(&conn->session_lock);
+
+ ksmbd_free_user(sess->user);
+ sess->user = NULL;
+@@ -2314,11 +2331,12 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
+ * @eabuf: set info command buffer
+ * @buf_len: set info command buffer length
+ * @path: dentry path for get ea
++ * @get_write: get write access to a mount
+ *
+ * Return: 0 on success, otherwise error
+ */
+ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+- const struct path *path)
++ const struct path *path, bool get_write)
+ {
+ struct mnt_idmap *idmap = mnt_idmap(path->mnt);
+ char *attr_name = NULL, *value;
+@@ -2366,7 +2384,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ if (rc > 0) {
+ rc = ksmbd_vfs_remove_xattr(idmap,
+ path,
+- attr_name);
++ attr_name,
++ get_write);
+
+ if (rc < 0) {
+ ksmbd_debug(SMB,
+@@ -2380,7 +2399,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ rc = 0;
+ } else {
+ rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
+- le16_to_cpu(eabuf->EaValueLength), 0);
++ le16_to_cpu(eabuf->EaValueLength),
++ 0, get_write);
+ if (rc < 0) {
+ ksmbd_debug(SMB,
+ "ksmbd_vfs_setxattr is failed(%d)\n",
+@@ -2443,7 +2463,7 @@ static noinline int smb2_set_stream_name_xattr(const struct path *path,
+ return -EBADF;
+ }
+
+- rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0);
++ rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0, false);
+ if (rc < 0)
+ pr_err("Failed to store XATTR stream name :%d\n", rc);
+ return 0;
+@@ -2472,7 +2492,7 @@ static int smb2_remove_smb_xattrs(const struct path *path)
+ !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
+ STREAM_PREFIX_LEN)) {
+ err = ksmbd_vfs_remove_xattr(idmap, path,
+- name);
++ name, true);
+ if (err)
+ ksmbd_debug(SMB, "remove xattr failed : %s\n",
+ name);
+@@ -2518,7 +2538,7 @@ static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *
+ da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+ XATTR_DOSINFO_ITIME;
+
+- rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da);
++ rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, true);
+ if (rc)
+ ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
+ }
+@@ -2608,7 +2628,7 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
+ sizeof(struct create_sd_buf_req))
+ return -EINVAL;
+ return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
+- le32_to_cpu(sd_buf->ccontext.DataLength), true);
++ le32_to_cpu(sd_buf->ccontext.DataLength), true, false);
+ }
+
+ static void ksmbd_acls_fattr(struct smb_fattr *fattr,
+@@ -2631,6 +2651,165 @@ static void ksmbd_acls_fattr(struct smb_fattr *fattr,
+ }
+ }
+
++enum {
++ DURABLE_RECONN_V2 = 1,
++ DURABLE_RECONN,
++ DURABLE_REQ_V2,
++ DURABLE_REQ,
++};
++
++struct durable_info {
++ struct ksmbd_file *fp;
++ unsigned short int type;
++ bool persistent;
++ bool reconnected;
++ unsigned int timeout;
++ char *CreateGuid;
++};
++
++static int parse_durable_handle_context(struct ksmbd_work *work,
++ struct smb2_create_req *req,
++ struct lease_ctx_info *lc,
++ struct durable_info *dh_info)
++{
++ struct ksmbd_conn *conn = work->conn;
++ struct create_context *context;
++ int dh_idx, err = 0;
++ u64 persistent_id = 0;
++ int req_op_level;
++ static const char * const durable_arr[] = {"DH2C", "DHnC", "DH2Q", "DHnQ"};
++
++ req_op_level = req->RequestedOplockLevel;
++ for (dh_idx = DURABLE_RECONN_V2; dh_idx <= ARRAY_SIZE(durable_arr);
++ dh_idx++) {
++ context = smb2_find_context_vals(req, durable_arr[dh_idx - 1], 4);
++ if (IS_ERR(context)) {
++ err = PTR_ERR(context);
++ goto out;
++ }
++ if (!context)
++ continue;
++
++ switch (dh_idx) {
++ case DURABLE_RECONN_V2:
++ {
++ struct create_durable_reconn_v2_req *recon_v2;
++
++ if (dh_info->type == DURABLE_RECONN ||
++ dh_info->type == DURABLE_REQ_V2) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ recon_v2 = (struct create_durable_reconn_v2_req *)context;
++ persistent_id = recon_v2->Fid.PersistentFileId;
++ dh_info->fp = ksmbd_lookup_durable_fd(persistent_id);
++ if (!dh_info->fp) {
++ ksmbd_debug(SMB, "Failed to get durable handle state\n");
++ err = -EBADF;
++ goto out;
++ }
++
++ if (memcmp(dh_info->fp->create_guid, recon_v2->CreateGuid,
++ SMB2_CREATE_GUID_SIZE)) {
++ err = -EBADF;
++ ksmbd_put_durable_fd(dh_info->fp);
++ goto out;
++ }
++
++ dh_info->type = dh_idx;
++ dh_info->reconnected = true;
++ ksmbd_debug(SMB,
++ "reconnect v2 Persistent-id from reconnect = %llu\n",
++ persistent_id);
++ break;
++ }
++ case DURABLE_RECONN:
++ {
++ struct create_durable_reconn_req *recon;
++
++ if (dh_info->type == DURABLE_RECONN_V2 ||
++ dh_info->type == DURABLE_REQ_V2) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ recon = (struct create_durable_reconn_req *)context;
++ persistent_id = recon->Data.Fid.PersistentFileId;
++ dh_info->fp = ksmbd_lookup_durable_fd(persistent_id);
++ if (!dh_info->fp) {
++ ksmbd_debug(SMB, "Failed to get durable handle state\n");
++ err = -EBADF;
++ goto out;
++ }
++
++ dh_info->type = dh_idx;
++ dh_info->reconnected = true;
++ ksmbd_debug(SMB, "reconnect Persistent-id from reconnect = %llu\n",
++ persistent_id);
++ break;
++ }
++ case DURABLE_REQ_V2:
++ {
++ struct create_durable_req_v2 *durable_v2_blob;
++
++ if (dh_info->type == DURABLE_RECONN ||
++ dh_info->type == DURABLE_RECONN_V2) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ durable_v2_blob =
++ (struct create_durable_req_v2 *)context;
++ ksmbd_debug(SMB, "Request for durable v2 open\n");
++ dh_info->fp = ksmbd_lookup_fd_cguid(durable_v2_blob->CreateGuid);
++ if (dh_info->fp) {
++ if (!memcmp(conn->ClientGUID, dh_info->fp->client_guid,
++ SMB2_CLIENT_GUID_SIZE)) {
++ if (!(req->hdr.Flags & SMB2_FLAGS_REPLAY_OPERATION)) {
++ err = -ENOEXEC;
++ goto out;
++ }
++
++ dh_info->fp->conn = conn;
++ dh_info->reconnected = true;
++ goto out;
++ }
++ }
++
++ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
++ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) {
++ dh_info->CreateGuid =
++ durable_v2_blob->CreateGuid;
++ dh_info->persistent =
++ le32_to_cpu(durable_v2_blob->Flags);
++ dh_info->timeout =
++ le32_to_cpu(durable_v2_blob->Timeout);
++ dh_info->type = dh_idx;
++ }
++ break;
++ }
++ case DURABLE_REQ:
++ if (dh_info->type == DURABLE_RECONN)
++ goto out;
++ if (dh_info->type == DURABLE_RECONN_V2 ||
++ dh_info->type == DURABLE_REQ_V2) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ if ((lc && (lc->req_state & SMB2_LEASE_HANDLE_CACHING_LE)) ||
++ req_op_level == SMB2_OPLOCK_LEVEL_BATCH) {
++ ksmbd_debug(SMB, "Request for durable open\n");
++ dh_info->type = dh_idx;
++ }
++ }
++ }
++
++out:
++ return err;
++}
++
+ /**
+ * smb2_open() - handler for smb file open request
+ * @work: smb work containing request buffer
+@@ -2654,6 +2833,7 @@ int smb2_open(struct ksmbd_work *work)
+ struct lease_ctx_info *lc = NULL;
+ struct create_ea_buf_req *ea_buf = NULL;
+ struct oplock_info *opinfo;
++ struct durable_info dh_info = {0};
+ __le32 *next_ptr = NULL;
+ int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
+ int rc = 0;
+@@ -2686,22 +2866,13 @@ int smb2_open(struct ksmbd_work *work)
+ }
+
+ if (req->NameLength) {
+- if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
+- *(char *)req->Buffer == '\\') {
+- pr_err("not allow directory name included leading slash\n");
+- rc = -EINVAL;
+- goto err_out1;
+- }
+-
+- name = smb2_get_name(req->Buffer,
++ name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
+ le16_to_cpu(req->NameLength),
+ work->conn->local_nls);
+ if (IS_ERR(name)) {
+ rc = PTR_ERR(name);
+- if (rc != -ENOMEM)
+- rc = -ENOENT;
+ name = NULL;
+- goto err_out1;
++ goto err_out2;
+ }
+
+ ksmbd_debug(SMB, "converted name = %s\n", name);
+@@ -2709,33 +2880,72 @@ int smb2_open(struct ksmbd_work *work)
+ if (!test_share_config_flag(work->tcon->share_conf,
+ KSMBD_SHARE_FLAG_STREAMS)) {
+ rc = -EBADF;
+- goto err_out1;
++ goto err_out2;
+ }
+ rc = parse_stream_name(name, &stream_name, &s_type);
+ if (rc < 0)
+- goto err_out1;
++ goto err_out2;
+ }
+
+ rc = ksmbd_validate_filename(name);
+ if (rc < 0)
+- goto err_out1;
++ goto err_out2;
+
+ if (ksmbd_share_veto_filename(share, name)) {
+ rc = -ENOENT;
+ ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n",
+ name);
+- goto err_out1;
++ goto err_out2;
+ }
+ } else {
+ name = kstrdup("", GFP_KERNEL);
+ if (!name) {
+ rc = -ENOMEM;
+- goto err_out1;
++ goto err_out2;
+ }
+ }
+
+ req_op_level = req->RequestedOplockLevel;
+- if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
++
++ if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE &&
++ req->CreateContextsOffset) {
++ lc = parse_lease_state(req);
++ rc = parse_durable_handle_context(work, req, lc, &dh_info);
++ if (rc) {
++ ksmbd_debug(SMB, "error parsing durable handle context\n");
++ goto err_out2;
++ }
++
++ if (dh_info.reconnected == true) {
++ rc = smb2_check_durable_oplock(conn, share, dh_info.fp, lc, name);
++ if (rc) {
++ ksmbd_put_durable_fd(dh_info.fp);
++ goto err_out2;
++ }
++
++ rc = ksmbd_reopen_durable_fd(work, dh_info.fp);
++ if (rc) {
++ ksmbd_put_durable_fd(dh_info.fp);
++ goto err_out2;
++ }
++
++ if (ksmbd_override_fsids(work)) {
++ rc = -ENOMEM;
++ ksmbd_put_durable_fd(dh_info.fp);
++ goto err_out2;
++ }
++
++ fp = dh_info.fp;
++ file_info = FILE_OPENED;
++
++ rc = ksmbd_vfs_getattr(&fp->filp->f_path, &stat);
++ if (rc)
++ goto err_out2;
++
++ ksmbd_put_durable_fd(fp);
++ goto reconnected_fp;
++ }
++ } else if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
+ lc = parse_lease_state(req);
+
+ if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) {
+@@ -2743,14 +2953,14 @@ int smb2_open(struct ksmbd_work *work)
+ le32_to_cpu(req->ImpersonationLevel));
+ rc = -EIO;
+ rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL;
+- goto err_out1;
++ goto err_out2;
+ }
+
+ if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) {
+ pr_err("Invalid create options : 0x%x\n",
+ le32_to_cpu(req->CreateOptions));
+ rc = -EINVAL;
+- goto err_out1;
++ goto err_out2;
+ } else {
+ if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE &&
+ req->CreateOptions & FILE_RANDOM_ACCESS_LE)
+@@ -2760,13 +2970,13 @@ int smb2_open(struct ksmbd_work *work)
+ (FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION |
+ FILE_RESERVE_OPFILTER_LE)) {
+ rc = -EOPNOTSUPP;
+- goto err_out1;
++ goto err_out2;
+ }
+
+ if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+ if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) {
+ rc = -EINVAL;
+- goto err_out1;
++ goto err_out2;
+ } else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) {
+ req->CreateOptions = ~(FILE_NO_COMPRESSION_LE);
+ }
+@@ -2778,21 +2988,21 @@ int smb2_open(struct ksmbd_work *work)
+ pr_err("Invalid create disposition : 0x%x\n",
+ le32_to_cpu(req->CreateDisposition));
+ rc = -EINVAL;
+- goto err_out1;
++ goto err_out2;
+ }
+
+ if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) {
+ pr_err("Invalid desired access : 0x%x\n",
+ le32_to_cpu(req->DesiredAccess));
+ rc = -EACCES;
+- goto err_out1;
++ goto err_out2;
+ }
+
+ if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) {
+ pr_err("Invalid file attribute : 0x%x\n",
+ le32_to_cpu(req->FileAttributes));
+ rc = -EINVAL;
+- goto err_out1;
++ goto err_out2;
+ }
+
+ if (req->CreateContextsOffset) {
+@@ -2800,19 +3010,19 @@ int smb2_open(struct ksmbd_work *work)
+ context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
+ if (IS_ERR(context)) {
+ rc = PTR_ERR(context);
+- goto err_out1;
++ goto err_out2;
+ } else if (context) {
+ ea_buf = (struct create_ea_buf_req *)context;
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_ea_buf_req)) {
+ rc = -EINVAL;
+- goto err_out1;
++ goto err_out2;
+ }
+ if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
+ rsp->hdr.Status = STATUS_ACCESS_DENIED;
+ rc = -EACCES;
+- goto err_out1;
++ goto err_out2;
+ }
+ }
+
+@@ -2820,7 +3030,7 @@ int smb2_open(struct ksmbd_work *work)
+ SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
+ if (IS_ERR(context)) {
+ rc = PTR_ERR(context);
+- goto err_out1;
++ goto err_out2;
+ } else if (context) {
+ ksmbd_debug(SMB,
+ "get query maximal access context\n");
+@@ -2831,11 +3041,11 @@ int smb2_open(struct ksmbd_work *work)
+ SMB2_CREATE_TIMEWARP_REQUEST, 4);
+ if (IS_ERR(context)) {
+ rc = PTR_ERR(context);
+- goto err_out1;
++ goto err_out2;
+ } else if (context) {
+ ksmbd_debug(SMB, "get timewarp context\n");
+ rc = -EBADF;
+- goto err_out1;
++ goto err_out2;
+ }
+
+ if (tcon->posix_extensions) {
+@@ -2843,7 +3053,7 @@ int smb2_open(struct ksmbd_work *work)
+ SMB2_CREATE_TAG_POSIX, 16);
+ if (IS_ERR(context)) {
+ rc = PTR_ERR(context);
+- goto err_out1;
++ goto err_out2;
+ } else if (context) {
+ struct create_posix *posix =
+ (struct create_posix *)context;
+@@ -2851,7 +3061,7 @@ int smb2_open(struct ksmbd_work *work)
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_posix) - 4) {
+ rc = -EINVAL;
+- goto err_out1;
++ goto err_out2;
+ }
+ ksmbd_debug(SMB, "get posix context\n");
+
+@@ -2863,7 +3073,7 @@ int smb2_open(struct ksmbd_work *work)
+
+ if (ksmbd_override_fsids(work)) {
+ rc = -ENOMEM;
+- goto err_out1;
++ goto err_out2;
+ }
+
+ rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+@@ -2974,10 +3184,12 @@ int smb2_open(struct ksmbd_work *work)
+
+ open_flags = smb2_create_open_flags(file_present, daccess,
+ req->CreateDisposition,
+- &may_flags);
++ &may_flags,
++ req->CreateOptions & FILE_DIRECTORY_FILE_LE ||
++ (file_present && S_ISDIR(d_inode(path.dentry)->i_mode)));
+
+ if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+- if (open_flags & O_CREAT) {
++ if (open_flags & (O_CREAT | O_TRUNC)) {
+ ksmbd_debug(SMB,
+ "User does not have write permission\n");
+ rc = -EACCES;
+@@ -3009,7 +3221,7 @@ int smb2_open(struct ksmbd_work *work)
+
+ rc = smb2_set_ea(&ea_buf->ea,
+ le32_to_cpu(ea_buf->ccontext.DataLength),
+- &path);
++ &path, false);
+ if (rc == -EOPNOTSUPP)
+ rc = 0;
+ else if (rc)
+@@ -3038,7 +3250,7 @@ int smb2_open(struct ksmbd_work *work)
+ }
+ }
+
+- rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent));
++ rc = ksmbd_query_inode_status(path.dentry->d_parent);
+ if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
+ rc = -EBUSY;
+ goto err_out;
+@@ -3152,7 +3364,8 @@ int smb2_open(struct ksmbd_work *work)
+ idmap,
+ &path,
+ pntsd,
+- pntsd_size);
++ pntsd_size,
++ false);
+ kfree(pntsd);
+ if (rc)
+ pr_err("failed to store ntacl in xattr : %d\n",
+@@ -3175,19 +3388,14 @@ int smb2_open(struct ksmbd_work *work)
+
+ fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE |
+ FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE));
+- if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
+- !fp->attrib_only && !stream_name) {
+- smb_break_all_oplock(work, fp);
+- need_truncate = 1;
+- }
+
+ /* fp should be searchable through ksmbd_inode.m_fp_list
+ * after daccess, saccess, attrib_only, and stream are
+ * initialized.
+ */
+- write_lock(&fp->f_ci->m_lock);
++ down_write(&fp->f_ci->m_lock);
+ list_add(&fp->node, &fp->f_ci->m_fp_list);
+- write_unlock(&fp->f_ci->m_lock);
++ up_write(&fp->f_ci->m_lock);
+
+ /* Check delete pending among previous fp before oplock break */
+ if (ksmbd_inode_pending_delete(fp)) {
+@@ -3195,23 +3403,44 @@ int smb2_open(struct ksmbd_work *work)
+ goto err_out;
+ }
+
++ if (file_present || created)
++ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
++
++ if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
++ !fp->attrib_only && !stream_name) {
++ smb_break_all_oplock(work, fp);
++ need_truncate = 1;
++ }
++
+ share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
+ if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
+ (req_op_level == SMB2_OPLOCK_LEVEL_LEASE &&
+ !(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) {
+ if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) {
+ rc = share_ret;
+- goto err_out;
++ goto err_out1;
+ }
+ } else {
+- if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
++ if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && lc) {
++ if (S_ISDIR(file_inode(filp)->i_mode)) {
++ lc->req_state &= ~SMB2_LEASE_WRITE_CACHING_LE;
++ lc->is_dir = true;
++ }
++
++ /*
++ * Compare parent lease using parent key. If there is no
++ * a lease that has same parent key, Send lease break
++ * notification.
++ */
++ smb_send_parent_lease_break_noti(fp, lc);
++
+ req_op_level = smb2_map_lease_to_oplock(lc->req_state);
+ ksmbd_debug(SMB,
+ "lease req for(%s) req oplock state 0x%x, lease state 0x%x\n",
+ name, req_op_level, lc->req_state);
+ rc = find_same_lease_key(sess, fp->f_ci, lc);
+ if (rc)
+- goto err_out;
++ goto err_out1;
+ } else if (open_flags == O_RDONLY &&
+ (req_op_level == SMB2_OPLOCK_LEVEL_BATCH ||
+ req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
+@@ -3222,16 +3451,16 @@ int smb2_open(struct ksmbd_work *work)
+ le32_to_cpu(req->hdr.Id.SyncId.TreeId),
+ lc, share_ret);
+ if (rc < 0)
+- goto err_out;
++ goto err_out1;
+ }
+
+ if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
+ ksmbd_fd_set_delete_on_close(fp, file_info);
+
+ if (need_truncate) {
+- rc = smb2_create_truncate(&path);
++ rc = smb2_create_truncate(&fp->filp->f_path);
+ if (rc)
+- goto err_out;
++ goto err_out1;
+ }
+
+ if (req->CreateContextsOffset) {
+@@ -3241,7 +3470,7 @@ int smb2_open(struct ksmbd_work *work)
+ SMB2_CREATE_ALLOCATION_SIZE, 4);
+ if (IS_ERR(az_req)) {
+ rc = PTR_ERR(az_req);
+- goto err_out;
++ goto err_out1;
+ } else if (az_req) {
+ loff_t alloc_size;
+ int err;
+@@ -3250,7 +3479,7 @@ int smb2_open(struct ksmbd_work *work)
+ le32_to_cpu(az_req->ccontext.DataLength) <
+ sizeof(struct create_alloc_size_req)) {
+ rc = -EINVAL;
+- goto err_out;
++ goto err_out1;
+ }
+ alloc_size = le64_to_cpu(az_req->AllocationSize);
+ ksmbd_debug(SMB,
+@@ -3268,7 +3497,7 @@ int smb2_open(struct ksmbd_work *work)
+ context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
+ if (IS_ERR(context)) {
+ rc = PTR_ERR(context);
+- goto err_out;
++ goto err_out1;
+ } else if (context) {
+ ksmbd_debug(SMB, "get query on disk id context\n");
+ query_disk_id = 1;
+@@ -3277,7 +3506,7 @@ int smb2_open(struct ksmbd_work *work)
+
+ rc = ksmbd_vfs_getattr(&path, &stat);
+ if (rc)
+- goto err_out;
++ goto err_out1;
+
+ if (stat.result_mask & STATX_BTIME)
+ fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+@@ -3294,6 +3523,26 @@ int smb2_open(struct ksmbd_work *work)
+
+ memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
+
++ if (dh_info.type == DURABLE_REQ_V2 || dh_info.type == DURABLE_REQ) {
++ if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent &&
++ test_share_config_flag(work->tcon->share_conf,
++ KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY))
++ fp->is_persistent = true;
++ else
++ fp->is_durable = true;
++
++ if (dh_info.type == DURABLE_REQ_V2) {
++ memcpy(fp->create_guid, dh_info.CreateGuid,
++ SMB2_CREATE_GUID_SIZE);
++ if (dh_info.timeout)
++ fp->durable_timeout = min(dh_info.timeout,
++ 300000);
++ else
++ fp->durable_timeout = 60;
++ }
++ }
++
++reconnected_fp:
+ rsp->StructureSize = cpu_to_le16(89);
+ rcu_read_lock();
+ opinfo = rcu_dereference(fp->f_opinfo);
+@@ -3380,6 +3629,33 @@ int smb2_open(struct ksmbd_work *work)
+ next_off = conn->vals->create_disk_id_size;
+ }
+
++ if (dh_info.type == DURABLE_REQ || dh_info.type == DURABLE_REQ_V2) {
++ struct create_context *durable_ccontext;
++
++ durable_ccontext = (struct create_context *)(rsp->Buffer +
++ le32_to_cpu(rsp->CreateContextsLength));
++ contxt_cnt++;
++ if (dh_info.type == DURABLE_REQ) {
++ create_durable_rsp_buf(rsp->Buffer +
++ le32_to_cpu(rsp->CreateContextsLength));
++ le32_add_cpu(&rsp->CreateContextsLength,
++ conn->vals->create_durable_size);
++ iov_len += conn->vals->create_durable_size;
++ } else {
++ create_durable_v2_rsp_buf(rsp->Buffer +
++ le32_to_cpu(rsp->CreateContextsLength),
++ fp);
++ le32_add_cpu(&rsp->CreateContextsLength,
++ conn->vals->create_durable_v2_size);
++ iov_len += conn->vals->create_durable_v2_size;
++ }
++
++ if (next_ptr)
++ *next_ptr = cpu_to_le32(next_off);
++ next_ptr = &durable_ccontext->Next;
++ next_off = conn->vals->create_durable_size;
++ }
++
+ if (posix_ctxt) {
+ contxt_cnt++;
+ create_posix_rsp_buf(rsp->Buffer +
+@@ -3398,13 +3674,13 @@ int smb2_open(struct ksmbd_work *work)
+ }
+
+ err_out:
+- if (file_present || created) {
+- inode_unlock(d_inode(parent_path.dentry));
+- path_put(&path);
+- path_put(&parent_path);
+- }
+- ksmbd_revert_fsids(work);
++ if (rc && (file_present || created))
++ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
++
+ err_out1:
++ ksmbd_revert_fsids(work);
++
++err_out2:
+ if (!rc) {
+ ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
+ rc = ksmbd_iov_pin_rsp(work, (void *)rsp, iov_len);
+@@ -3444,7 +3720,7 @@ int smb2_open(struct ksmbd_work *work)
+ kfree(name);
+ kfree(lc);
+
+- return 0;
++ return rc;
+ }
+
+ static int readdir_info_level_struct_sz(int info_level)
+@@ -3805,11 +4081,16 @@ static int process_query_dir_entries(struct smb2_query_dir_private *priv)
+ }
+
+ ksmbd_kstat.kstat = &kstat;
+- if (priv->info_level != FILE_NAMES_INFORMATION)
+- ksmbd_vfs_fill_dentry_attrs(priv->work,
+- idmap,
+- dent,
+- &ksmbd_kstat);
++ if (priv->info_level != FILE_NAMES_INFORMATION) {
++ rc = ksmbd_vfs_fill_dentry_attrs(priv->work,
++ idmap,
++ dent,
++ &ksmbd_kstat);
++ if (rc) {
++ dput(dent);
++ continue;
++ }
++ }
+
+ rc = smb2_populate_readdir_entry(priv->work->conn,
+ priv->info_level,
+@@ -4052,7 +4333,7 @@ int smb2_query_dir(struct ksmbd_work *work)
+ }
+
+ srch_flag = req->Flags;
+- srch_ptr = smb_strndup_from_utf16(req->Buffer,
++ srch_ptr = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->FileNameOffset),
+ le16_to_cpu(req->FileNameLength), 1,
+ conn->local_nls);
+ if (IS_ERR(srch_ptr)) {
+@@ -4135,7 +4416,8 @@ int smb2_query_dir(struct ksmbd_work *work)
+ rsp->OutputBufferLength = cpu_to_le32(0);
+ rsp->Buffer[0] = 0;
+ rc = ksmbd_iov_pin_rsp(work, (void *)rsp,
+- sizeof(struct smb2_query_directory_rsp));
++ offsetof(struct smb2_query_directory_rsp, Buffer)
++ + 1);
+ if (rc)
+ goto err_out;
+ } else {
+@@ -4312,7 +4594,8 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
+ sizeof(struct smb2_ea_info_req))
+ return -EINVAL;
+
+- ea_req = (struct smb2_ea_info_req *)req->Buffer;
++ ea_req = (struct smb2_ea_info_req *)((char *)req +
++ le16_to_cpu(req->InputBufferOffset));
+ } else {
+ /* need to send all EAs, if no specific EA is requested*/
+ if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY)
+@@ -4457,6 +4740,7 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
+ struct smb2_file_basic_info *basic_info;
+ struct kstat stat;
+ u64 time;
++ int ret;
+
+ if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+ pr_err("no right to read the attributes : 0x%x\n",
+@@ -4464,9 +4748,12 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
+ return -EACCES;
+ }
+
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret)
++ return ret;
++
+ basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
+- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
+- file_inode(fp->filp), &stat);
+ basic_info->CreationTime = cpu_to_le64(fp->create_time);
+ time = ksmbd_UnixTimeToNT(stat.atime);
+ basic_info->LastAccessTime = cpu_to_le64(time);
+@@ -4481,27 +4768,31 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
+ return 0;
+ }
+
+-static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
+- struct ksmbd_file *fp, void *rsp_org)
++static int get_file_standard_info(struct smb2_query_info_rsp *rsp,
++ struct ksmbd_file *fp, void *rsp_org)
+ {
+ struct smb2_file_standard_info *sinfo;
+ unsigned int delete_pending;
+- struct inode *inode;
+ struct kstat stat;
++ int ret;
+
+- inode = file_inode(fp->filp);
+- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret)
++ return ret;
+
+ sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
+ delete_pending = ksmbd_inode_pending_delete(fp);
+
+- sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
++ sinfo->AllocationSize = cpu_to_le64(stat.blocks << 9);
+ sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
+ sinfo->DeletePending = delete_pending;
+ sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0;
+ rsp->OutputBufferLength =
+ cpu_to_le32(sizeof(struct smb2_file_standard_info));
++
++ return 0;
+ }
+
+ static void get_file_alignment_info(struct smb2_query_info_rsp *rsp,
+@@ -4523,11 +4814,11 @@ static int get_file_all_info(struct ksmbd_work *work,
+ struct ksmbd_conn *conn = work->conn;
+ struct smb2_file_all_info *file_info;
+ unsigned int delete_pending;
+- struct inode *inode;
+ struct kstat stat;
+ int conv_len;
+ char *filename;
+ u64 time;
++ int ret;
+
+ if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+ ksmbd_debug(SMB, "no right to read the attributes : 0x%x\n",
+@@ -4539,8 +4830,10 @@ static int get_file_all_info(struct ksmbd_work *work,
+ if (IS_ERR(filename))
+ return PTR_ERR(filename);
+
+- inode = file_inode(fp->filp);
+- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret)
++ return ret;
+
+ ksmbd_debug(SMB, "filename = %s\n", filename);
+ delete_pending = ksmbd_inode_pending_delete(fp);
+@@ -4556,7 +4849,7 @@ static int get_file_all_info(struct ksmbd_work *work,
+ file_info->Attributes = fp->f_ci->m_fattr;
+ file_info->Pad1 = 0;
+ file_info->AllocationSize =
+- cpu_to_le64(inode->i_blocks << 9);
++ cpu_to_le64(stat.blocks << 9);
+ file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ file_info->NumberOfLinks =
+ cpu_to_le32(get_nlink(&stat) - delete_pending);
+@@ -4600,10 +4893,10 @@ static void get_file_alternate_info(struct ksmbd_work *work,
+ cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len);
+ }
+
+-static void get_file_stream_info(struct ksmbd_work *work,
+- struct smb2_query_info_rsp *rsp,
+- struct ksmbd_file *fp,
+- void *rsp_org)
++static int get_file_stream_info(struct ksmbd_work *work,
++ struct smb2_query_info_rsp *rsp,
++ struct ksmbd_file *fp,
++ void *rsp_org)
+ {
+ struct ksmbd_conn *conn = work->conn;
+ struct smb2_file_stream_info *file_info;
+@@ -4614,9 +4907,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
+ int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
+ int buf_free_len;
+ struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
++ int ret;
++
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret)
++ return ret;
+
+- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
+- file_inode(fp->filp), &stat);
+ file_info = (struct smb2_file_stream_info *)rsp->Buffer;
+
+ buf_free_len =
+@@ -4697,29 +4994,37 @@ static void get_file_stream_info(struct ksmbd_work *work,
+ kvfree(xattr_list);
+
+ rsp->OutputBufferLength = cpu_to_le32(nbytes);
++
++ return 0;
+ }
+
+-static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
+- struct ksmbd_file *fp, void *rsp_org)
++static int get_file_internal_info(struct smb2_query_info_rsp *rsp,
++ struct ksmbd_file *fp, void *rsp_org)
+ {
+ struct smb2_file_internal_info *file_info;
+ struct kstat stat;
++ int ret;
++
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret)
++ return ret;
+
+- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
+- file_inode(fp->filp), &stat);
+ file_info = (struct smb2_file_internal_info *)rsp->Buffer;
+ file_info->IndexNumber = cpu_to_le64(stat.ino);
+ rsp->OutputBufferLength =
+ cpu_to_le32(sizeof(struct smb2_file_internal_info));
++
++ return 0;
+ }
+
+ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
+ struct ksmbd_file *fp, void *rsp_org)
+ {
+ struct smb2_file_ntwrk_info *file_info;
+- struct inode *inode;
+ struct kstat stat;
+ u64 time;
++ int ret;
+
+ if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
+ pr_err("no right to read the attributes : 0x%x\n",
+@@ -4727,10 +5032,12 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
+ return -EACCES;
+ }
+
+- file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret)
++ return ret;
+
+- inode = file_inode(fp->filp);
+- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
++ file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
+
+ file_info->CreationTime = cpu_to_le64(fp->create_time);
+ time = ksmbd_UnixTimeToNT(stat.atime);
+@@ -4740,8 +5047,7 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
+ time = ksmbd_UnixTimeToNT(stat.ctime);
+ file_info->ChangeTime = cpu_to_le64(time);
+ file_info->Attributes = fp->f_ci->m_fattr;
+- file_info->AllocationSize =
+- cpu_to_le64(inode->i_blocks << 9);
++ file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
+ file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ file_info->Reserved = cpu_to_le32(0);
+ rsp->OutputBufferLength =
+@@ -4781,14 +5087,17 @@ static void get_file_mode_info(struct smb2_query_info_rsp *rsp,
+ cpu_to_le32(sizeof(struct smb2_file_mode_info));
+ }
+
+-static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
+- struct ksmbd_file *fp, void *rsp_org)
++static int get_file_compression_info(struct smb2_query_info_rsp *rsp,
++ struct ksmbd_file *fp, void *rsp_org)
+ {
+ struct smb2_file_comp_info *file_info;
+ struct kstat stat;
++ int ret;
+
+- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
+- file_inode(fp->filp), &stat);
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret)
++ return ret;
+
+ file_info = (struct smb2_file_comp_info *)rsp->Buffer;
+ file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9);
+@@ -4800,6 +5109,8 @@ static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
+
+ rsp->OutputBufferLength =
+ cpu_to_le32(sizeof(struct smb2_file_comp_info));
++
++ return 0;
+ }
+
+ static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
+@@ -4821,7 +5132,7 @@ static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
+ return 0;
+ }
+
+-static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
++static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
+ struct ksmbd_file *fp, void *rsp_org)
+ {
+ struct smb311_posix_qinfo *file_info;
+@@ -4829,24 +5140,31 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
+ struct mnt_idmap *idmap = file_mnt_idmap(fp->filp);
+ vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
+ vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
++ struct kstat stat;
+ u64 time;
+ int out_buf_len = sizeof(struct smb311_posix_qinfo) + 32;
++ int ret;
++
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret)
++ return ret;
+
+ file_info = (struct smb311_posix_qinfo *)rsp->Buffer;
+ file_info->CreationTime = cpu_to_le64(fp->create_time);
+- time = ksmbd_UnixTimeToNT(inode->i_atime);
++ time = ksmbd_UnixTimeToNT(stat.atime);
+ file_info->LastAccessTime = cpu_to_le64(time);
+- time = ksmbd_UnixTimeToNT(inode->i_mtime);
++ time = ksmbd_UnixTimeToNT(stat.mtime);
+ file_info->LastWriteTime = cpu_to_le64(time);
+- time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
++ time = ksmbd_UnixTimeToNT(stat.ctime);
+ file_info->ChangeTime = cpu_to_le64(time);
+ file_info->DosAttributes = fp->f_ci->m_fattr;
+- file_info->Inode = cpu_to_le64(inode->i_ino);
+- file_info->EndOfFile = cpu_to_le64(inode->i_size);
+- file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
+- file_info->HardLinks = cpu_to_le32(inode->i_nlink);
+- file_info->Mode = cpu_to_le32(inode->i_mode & 0777);
+- file_info->DeviceId = cpu_to_le32(inode->i_rdev);
++ file_info->Inode = cpu_to_le64(stat.ino);
++ file_info->EndOfFile = cpu_to_le64(stat.size);
++ file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
++ file_info->HardLinks = cpu_to_le32(stat.nlink);
++ file_info->Mode = cpu_to_le32(stat.mode & 0777);
++ file_info->DeviceId = cpu_to_le32(stat.rdev);
+
+ /*
+ * Sids(32) contain two sids(Domain sid(16), UNIX group sid(16)).
+@@ -4859,6 +5177,8 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
+ SIDUNIX_GROUP, (struct smb_sid *)&file_info->Sids[16]);
+
+ rsp->OutputBufferLength = cpu_to_le32(out_buf_len);
++
++ return 0;
+ }
+
+ static int smb2_get_info_file(struct ksmbd_work *work,
+@@ -4907,7 +5227,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
+ break;
+
+ case FILE_STANDARD_INFORMATION:
+- get_file_standard_info(rsp, fp, work->response_buf);
++ rc = get_file_standard_info(rsp, fp, work->response_buf);
+ break;
+
+ case FILE_ALIGNMENT_INFORMATION:
+@@ -4923,11 +5243,11 @@ static int smb2_get_info_file(struct ksmbd_work *work,
+ break;
+
+ case FILE_STREAM_INFORMATION:
+- get_file_stream_info(work, rsp, fp, work->response_buf);
++ rc = get_file_stream_info(work, rsp, fp, work->response_buf);
+ break;
+
+ case FILE_INTERNAL_INFORMATION:
+- get_file_internal_info(rsp, fp, work->response_buf);
++ rc = get_file_internal_info(rsp, fp, work->response_buf);
+ break;
+
+ case FILE_NETWORK_OPEN_INFORMATION:
+@@ -4951,7 +5271,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
+ break;
+
+ case FILE_COMPRESSION_INFORMATION:
+- get_file_compression_info(rsp, fp, work->response_buf);
++ rc = get_file_compression_info(rsp, fp, work->response_buf);
+ break;
+
+ case FILE_ATTRIBUTE_TAG_INFORMATION:
+@@ -4962,7 +5282,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
+ pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
+ rc = -EOPNOTSUPP;
+ } else {
+- find_file_posix_info(rsp, fp, work->response_buf);
++ rc = find_file_posix_info(rsp, fp, work->response_buf);
+ }
+ break;
+ default:
+@@ -5014,8 +5334,13 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+
+ info = (struct filesystem_device_info *)rsp->Buffer;
+
+- info->DeviceType = cpu_to_le32(stfs.f_type);
+- info->DeviceCharacteristics = cpu_to_le32(0x00000020);
++ info->DeviceType = cpu_to_le32(FILE_DEVICE_DISK);
++ info->DeviceCharacteristics =
++ cpu_to_le32(FILE_DEVICE_IS_MOUNTED);
++ if (!test_tree_conn_flag(work->tcon,
++ KSMBD_TREE_CONN_FLAG_WRITABLE))
++ info->DeviceCharacteristics |=
++ cpu_to_le32(FILE_READ_ONLY_DEVICE);
+ rsp->OutputBufferLength = cpu_to_le32(8);
+ break;
+ }
+@@ -5282,6 +5607,11 @@ int smb2_query_info(struct ksmbd_work *work)
+
+ ksmbd_debug(SMB, "GOT query info request\n");
+
++ if (ksmbd_override_fsids(work)) {
++ rc = -ENOMEM;
++ goto err_out;
++ }
++
+ switch (req->InfoType) {
+ case SMB2_O_INFO_FILE:
+ ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
+@@ -5300,6 +5630,7 @@ int smb2_query_info(struct ksmbd_work *work)
+ req->InfoType);
+ rc = -EOPNOTSUPP;
+ }
++ ksmbd_revert_fsids(work);
+
+ if (!rc) {
+ rsp->StructureSize = cpu_to_le16(9);
+@@ -5309,6 +5640,7 @@ int smb2_query_info(struct ksmbd_work *work)
+ le32_to_cpu(rsp->OutputBufferLength));
+ }
+
++err_out:
+ if (rc < 0) {
+ if (rc == -EACCES)
+ rsp->hdr.Status = STATUS_ACCESS_DENIED;
+@@ -5375,7 +5707,6 @@ int smb2_close(struct ksmbd_work *work)
+ struct smb2_close_rsp *rsp;
+ struct ksmbd_conn *conn = work->conn;
+ struct ksmbd_file *fp;
+- struct inode *inode;
+ u64 time;
+ int err = 0;
+
+@@ -5430,24 +5761,33 @@ int smb2_close(struct ksmbd_work *work)
+ rsp->Reserved = 0;
+
+ if (req->Flags == SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB) {
++ struct kstat stat;
++ int ret;
++
+ fp = ksmbd_lookup_fd_fast(work, volatile_id);
+ if (!fp) {
+ err = -ENOENT;
+ goto out;
+ }
+
+- inode = file_inode(fp->filp);
++ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (ret) {
++ ksmbd_fd_put(work, fp);
++ goto out;
++ }
++
+ rsp->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
+- rsp->AllocationSize = S_ISDIR(inode->i_mode) ? 0 :
+- cpu_to_le64(inode->i_blocks << 9);
+- rsp->EndOfFile = cpu_to_le64(inode->i_size);
++ rsp->AllocationSize = S_ISDIR(stat.mode) ? 0 :
++ cpu_to_le64(stat.blocks << 9);
++ rsp->EndOfFile = cpu_to_le64(stat.size);
+ rsp->Attributes = fp->f_ci->m_fattr;
+ rsp->CreationTime = cpu_to_le64(fp->create_time);
+- time = ksmbd_UnixTimeToNT(inode->i_atime);
++ time = ksmbd_UnixTimeToNT(stat.atime);
+ rsp->LastAccessTime = cpu_to_le64(time);
+- time = ksmbd_UnixTimeToNT(inode->i_mtime);
++ time = ksmbd_UnixTimeToNT(stat.mtime);
+ rsp->LastWriteTime = cpu_to_le64(time);
+- time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
++ time = ksmbd_UnixTimeToNT(stat.ctime);
+ rsp->ChangeTime = cpu_to_le64(time);
+ ksmbd_fd_put(work, fp);
+ } else {
+@@ -5537,7 +5877,7 @@ static int smb2_rename(struct ksmbd_work *work,
+ rc = ksmbd_vfs_setxattr(file_mnt_idmap(fp->filp),
+ &fp->filp->f_path,
+ xattr_stream_name,
+- NULL, 0, 0);
++ NULL, 0, 0, true);
+ if (rc < 0) {
+ pr_err("failed to store stream name in xattr: %d\n",
+ rc);
+@@ -5559,6 +5899,8 @@ static int smb2_rename(struct ksmbd_work *work,
+ flags = RENAME_NOREPLACE;
+
+ rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
++ if (!rc)
++ smb_break_all_levII_oplock(work, fp, 0);
+ out:
+ kfree(new_name);
+ return rc;
+@@ -5630,11 +5972,9 @@ static int smb2_create_link(struct ksmbd_work *work,
+ if (rc)
+ rc = -EINVAL;
+ out:
+- if (file_present) {
+- inode_unlock(d_inode(parent_path.dentry));
+- path_put(&path);
+- path_put(&parent_path);
+- }
++ if (file_present)
++ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
++
+ if (!IS_ERR(link_name))
+ kfree(link_name);
+ kfree(pathname);
+@@ -5701,7 +6041,8 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+ XATTR_DOSINFO_ITIME;
+
+- rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da);
++ rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da,
++ true);
+ if (rc)
+ ksmbd_debug(SMB,
+ "failed to restore file attribute in EA\n");
+@@ -5736,15 +6077,21 @@ static int set_file_allocation_info(struct ksmbd_work *work,
+
+ loff_t alloc_blks;
+ struct inode *inode;
++ struct kstat stat;
+ int rc;
+
+ if (!(fp->daccess & FILE_WRITE_DATA_LE))
+ return -EACCES;
+
++ rc = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
++ AT_STATX_SYNC_AS_STAT);
++ if (rc)
++ return rc;
++
+ alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
+ inode = file_inode(fp->filp);
+
+- if (alloc_blks > inode->i_blocks) {
++ if (alloc_blks > stat.blocks) {
+ smb_break_all_levII_oplock(work, fp, 1);
+ rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
+ alloc_blks * 512);
+@@ -5752,7 +6099,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
+ pr_err("vfs_fallocate is failed : %d\n", rc);
+ return rc;
+ }
+- } else if (alloc_blks < inode->i_blocks) {
++ } else if (alloc_blks < stat.blocks) {
+ loff_t size;
+
+ /*
+@@ -5907,6 +6254,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ struct ksmbd_share_config *share)
+ {
+ unsigned int buf_len = le32_to_cpu(req->BufferLength);
++ char *buffer = (char *)req + le16_to_cpu(req->BufferOffset);
+
+ switch (req->FileInfoClass) {
+ case FILE_BASIC_INFORMATION:
+@@ -5914,7 +6262,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ if (buf_len < sizeof(struct smb2_file_basic_info))
+ return -EINVAL;
+
+- return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
++ return set_file_basic_info(fp, (struct smb2_file_basic_info *)buffer, share);
+ }
+ case FILE_ALLOCATION_INFORMATION:
+ {
+@@ -5922,7 +6270,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ return -EINVAL;
+
+ return set_file_allocation_info(work, fp,
+- (struct smb2_file_alloc_info *)req->Buffer);
++ (struct smb2_file_alloc_info *)buffer);
+ }
+ case FILE_END_OF_FILE_INFORMATION:
+ {
+@@ -5930,21 +6278,15 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ return -EINVAL;
+
+ return set_end_of_file_info(work, fp,
+- (struct smb2_file_eof_info *)req->Buffer);
++ (struct smb2_file_eof_info *)buffer);
+ }
+ case FILE_RENAME_INFORMATION:
+ {
+- if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+- ksmbd_debug(SMB,
+- "User does not have write permission\n");
+- return -EACCES;
+- }
+-
+ if (buf_len < sizeof(struct smb2_file_rename_info))
+ return -EINVAL;
+
+ return set_rename_info(work, fp,
+- (struct smb2_file_rename_info *)req->Buffer,
++ (struct smb2_file_rename_info *)buffer,
+ buf_len);
+ }
+ case FILE_LINK_INFORMATION:
+@@ -5953,23 +6295,17 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ return -EINVAL;
+
+ return smb2_create_link(work, work->tcon->share_conf,
+- (struct smb2_file_link_info *)req->Buffer,
++ (struct smb2_file_link_info *)buffer,
+ buf_len, fp->filp,
+ work->conn->local_nls);
+ }
+ case FILE_DISPOSITION_INFORMATION:
+ {
+- if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+- ksmbd_debug(SMB,
+- "User does not have write permission\n");
+- return -EACCES;
+- }
+-
+ if (buf_len < sizeof(struct smb2_file_disposition_info))
+ return -EINVAL;
+
+ return set_file_disposition_info(fp,
+- (struct smb2_file_disposition_info *)req->Buffer);
++ (struct smb2_file_disposition_info *)buffer);
+ }
+ case FILE_FULL_EA_INFORMATION:
+ {
+@@ -5982,22 +6318,22 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
+ if (buf_len < sizeof(struct smb2_ea_info))
+ return -EINVAL;
+
+- return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+- buf_len, &fp->filp->f_path);
++ return smb2_set_ea((struct smb2_ea_info *)buffer,
++ buf_len, &fp->filp->f_path, true);
+ }
+ case FILE_POSITION_INFORMATION:
+ {
+ if (buf_len < sizeof(struct smb2_file_pos_info))
+ return -EINVAL;
+
+- return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
++ return set_file_position_info(fp, (struct smb2_file_pos_info *)buffer);
+ }
+ case FILE_MODE_INFORMATION:
+ {
+ if (buf_len < sizeof(struct smb2_file_mode_info))
+ return -EINVAL;
+
+- return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
++ return set_file_mode_info(fp, (struct smb2_file_mode_info *)buffer);
+ }
+ }
+
+@@ -6013,7 +6349,7 @@ static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
+ fp->saccess |= FILE_SHARE_DELETE_LE;
+
+ return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
+- buf_len, false);
++ buf_len, false, true);
+ }
+
+ /**
+@@ -6026,7 +6362,7 @@ int smb2_set_info(struct ksmbd_work *work)
+ {
+ struct smb2_set_info_req *req;
+ struct smb2_set_info_rsp *rsp;
+- struct ksmbd_file *fp;
++ struct ksmbd_file *fp = NULL;
+ int rc = 0;
+ unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
+
+@@ -6046,6 +6382,13 @@ int smb2_set_info(struct ksmbd_work *work)
+ rsp = smb2_get_msg(work->response_buf);
+ }
+
++ if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
++ ksmbd_debug(SMB, "User does not have write permission\n");
++ pr_err("User does not have write permission\n");
++ rc = -EACCES;
++ goto err_out;
++ }
++
+ if (!has_file_id(id)) {
+ id = req->VolatileFileId;
+ pid = req->PersistentFileId;
+@@ -6071,7 +6414,7 @@ int smb2_set_info(struct ksmbd_work *work)
+ }
+ rc = smb2_set_info_sec(fp,
+ le32_to_cpu(req->AdditionalInformation),
+- req->Buffer,
++ (char *)req + le16_to_cpu(req->BufferOffset),
+ le32_to_cpu(req->BufferLength));
+ ksmbd_revert_fsids(work);
+ break;
+@@ -6155,8 +6498,10 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
+ err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
+ offsetof(struct smb2_read_rsp, Buffer),
+ aux_payload_buf, nbytes);
+- if (err)
++ if (err) {
++ kvfree(aux_payload_buf);
+ goto out;
++ }
+ kvfree(rpc_resp);
+ } else {
+ err = ksmbd_iov_pin_rsp(work, (void *)rsp,
+@@ -6366,8 +6711,10 @@ int smb2_read(struct ksmbd_work *work)
+ err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
+ offsetof(struct smb2_read_rsp, Buffer),
+ aux_payload_buf, nbytes);
+- if (err)
++ if (err) {
++ kvfree(aux_payload_buf);
+ goto out;
++ }
+ ksmbd_fd_put(work, fp);
+ return 0;
+
+@@ -7078,6 +7425,7 @@ int smb2_lock(struct ksmbd_work *work)
+ smb2_remove_blocked_lock,
+ argv);
+ if (rc) {
++ kfree(argv);
+ err = -ENOMEM;
+ goto out;
+ }
+@@ -7512,7 +7860,7 @@ static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
+ struct smb2_ioctl_rsp *rsp)
+ {
+ struct ksmbd_rpc_command *rpc_resp;
+- char *data_buf = (char *)&req->Buffer[0];
++ char *data_buf = (char *)req + le32_to_cpu(req->InputOffset);
+ int nbytes = 0;
+
+ rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf,
+@@ -7582,7 +7930,8 @@ static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
+
+ da.attr = le32_to_cpu(fp->f_ci->m_fattr);
+ ret = ksmbd_vfs_set_dos_attrib_xattr(idmap,
+- &fp->filp->f_path, &da);
++ &fp->filp->f_path,
++ &da, true);
+ if (ret)
+ fp->f_ci->m_fattr = old_fattr;
+ }
+@@ -7624,6 +7973,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ u64 id = KSMBD_NO_FID;
+ struct ksmbd_conn *conn = work->conn;
+ int ret = 0;
++ char *buffer;
+
+ if (work->next_smb2_rcv_hdr_off) {
+ req = ksmbd_req_buf_next(work);
+@@ -7646,6 +7996,8 @@ int smb2_ioctl(struct ksmbd_work *work)
+ goto out;
+ }
+
++ buffer = (char *)req + le32_to_cpu(req->InputOffset);
++
+ cnt_code = le32_to_cpu(req->CtlCode);
+ ret = smb2_calc_max_out_buf_len(work, 48,
+ le32_to_cpu(req->MaxOutputResponse));
+@@ -7703,7 +8055,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ }
+
+ ret = fsctl_validate_negotiate_info(conn,
+- (struct validate_negotiate_info_req *)&req->Buffer[0],
++ (struct validate_negotiate_info_req *)buffer,
+ (struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
+ in_buf_len);
+ if (ret < 0)
+@@ -7756,7 +8108,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ rsp->VolatileFileId = req->VolatileFileId;
+ rsp->PersistentFileId = req->PersistentFileId;
+ fsctl_copychunk(work,
+- (struct copychunk_ioctl_req *)&req->Buffer[0],
++ (struct copychunk_ioctl_req *)buffer,
+ le32_to_cpu(req->CtlCode),
+ le32_to_cpu(req->InputCount),
+ req->VolatileFileId,
+@@ -7769,8 +8121,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ goto out;
+ }
+
+- ret = fsctl_set_sparse(work, id,
+- (struct file_sparse *)&req->Buffer[0]);
++ ret = fsctl_set_sparse(work, id, (struct file_sparse *)buffer);
+ if (ret < 0)
+ goto out;
+ break;
+@@ -7793,7 +8144,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ }
+
+ zero_data =
+- (struct file_zero_data_information *)&req->Buffer[0];
++ (struct file_zero_data_information *)buffer;
+
+ off = le64_to_cpu(zero_data->FileOffset);
+ bfz = le64_to_cpu(zero_data->BeyondFinalZero);
+@@ -7824,7 +8175,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ }
+
+ ret = fsctl_query_allocated_ranges(work, id,
+- (struct file_allocated_range_buffer *)&req->Buffer[0],
++ (struct file_allocated_range_buffer *)buffer,
+ (struct file_allocated_range_buffer *)&rsp->Buffer[0],
+ out_buf_len /
+ sizeof(struct file_allocated_range_buffer), &nbytes);
+@@ -7868,7 +8219,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ goto out;
+ }
+
+- dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
++ dup_ext = (struct duplicate_extents_to_file *)buffer;
+
+ fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
+ dup_ext->PersistentFileHandle);
+@@ -8208,6 +8559,11 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ le32_to_cpu(req->LeaseState));
+ }
+
++ if (ret < 0) {
++ rsp->hdr.Status = err;
++ goto err_out;
++ }
++
+ lease_state = lease->state;
+ opinfo->op_state = OPLOCK_STATE_NONE;
+ wake_up_interruptible_all(&opinfo->oplock_q);
+@@ -8215,11 +8571,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ wake_up_interruptible_all(&opinfo->oplock_brk);
+ opinfo_put(opinfo);
+
+- if (ret < 0) {
+- rsp->hdr.Status = err;
+- goto err_out;
+- }
+-
+ rsp->StructureSize = cpu_to_le16(36);
+ rsp->Reserved = 0;
+ rsp->Flags = 0;
+@@ -8231,7 +8582,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
+ return;
+
+ err_out:
+- opinfo->op_state = OPLOCK_STATE_NONE;
+ wake_up_interruptible_all(&opinfo->oplock_q);
+ atomic_dec(&opinfo->breaking_cnt);
+ wake_up_interruptible_all(&opinfo->oplock_brk);
+diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
+index d12cfd3b09278e..643f5e1cfe3570 100644
+--- a/fs/smb/server/smb2pdu.h
++++ b/fs/smb/server/smb2pdu.h
+@@ -64,7 +64,7 @@ struct preauth_integrity_info {
+ #define SMB2_SESSION_TIMEOUT (10 * HZ)
+
+ struct create_durable_req_v2 {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ __le32 Timeout;
+ __le32 Flags;
+@@ -72,8 +72,20 @@ struct create_durable_req_v2 {
+ __u8 CreateGuid[16];
+ } __packed;
+
++struct create_durable_reconn_req {
++ struct create_context_hdr ccontext;
++ __u8 Name[8];
++ union {
++ __u8 Reserved[16];
++ struct {
++ __u64 PersistentFileId;
++ __u64 VolatileFileId;
++ } Fid;
++ } Data;
++} __packed;
++
+ struct create_durable_reconn_v2_req {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ struct {
+ __u64 PersistentFileId;
+@@ -84,13 +96,13 @@ struct create_durable_reconn_v2_req {
+ } __packed;
+
+ struct create_alloc_size_req {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ __le64 AllocationSize;
+ } __packed;
+
+ struct create_durable_rsp {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ union {
+ __u8 Reserved[8];
+@@ -98,8 +110,11 @@ struct create_durable_rsp {
+ } Data;
+ } __packed;
+
++/* See MS-SMB2 2.2.13.2.11 */
++/* Flags */
++#define SMB2_DHANDLE_FLAG_PERSISTENT 0x00000002
+ struct create_durable_v2_rsp {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ __le32 Timeout;
+ __le32 Flags;
+@@ -107,7 +122,7 @@ struct create_durable_v2_rsp {
+
+ /* equivalent of the contents of SMB3.1.1 POSIX open context response */
+ struct create_posix_rsp {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[16];
+ __le32 nlink;
+ __le32 reparse_tag;
+@@ -366,13 +381,13 @@ struct smb2_ea_info {
+ } __packed; /* level 15 Query */
+
+ struct create_ea_buf_req {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ struct smb2_ea_info ea;
+ } __packed;
+
+ struct create_sd_buf_req {
+- struct create_context ccontext;
++ struct create_context_hdr ccontext;
+ __u8 Name[8];
+ struct smb_ntsd ntsd;
+ } __packed;
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index e6ba1e9b8589aa..13818ecb6e1b2f 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -158,8 +158,12 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
+ */
+ bool ksmbd_smb_request(struct ksmbd_conn *conn)
+ {
+- __le32 *proto = (__le32 *)smb2_get_msg(conn->request_buf);
++ __le32 *proto;
+
++ if (conn->request_buf[0] != 0)
++ return false;
++
++ proto = (__le32 *)smb2_get_msg(conn->request_buf);
+ if (*proto == SMB2_COMPRESSION_TRANSFORM_ID) {
+ pr_err_ratelimited("smb2 compression not support yet");
+ return false;
+@@ -366,11 +370,22 @@ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+ return 0;
+ }
+
++/**
++ * set_smb1_rsp_status() - set error type in smb response header
++ * @work: smb work containing smb response header
++ * @err: error code to set in response
++ */
++static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
++{
++ work->send_no_response = 1;
++}
++
+ static struct smb_version_ops smb1_server_ops = {
+ .get_cmd_val = get_smb1_cmd_val,
+ .init_rsp_hdr = init_smb1_rsp_hdr,
+ .allocate_rsp_buf = smb1_allocate_rsp_buf,
+ .check_user_session = smb1_check_user_session,
++ .set_rsp_status = set_smb1_rsp_status,
+ };
+
+ static int smb1_negotiate(struct ksmbd_work *work)
+@@ -442,10 +457,13 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
+ }
+
+ ksmbd_kstat.kstat = &kstat;
+- ksmbd_vfs_fill_dentry_attrs(work,
+- idmap,
+- dentry,
+- &ksmbd_kstat);
++ rc = ksmbd_vfs_fill_dentry_attrs(work,
++ idmap,
++ dentry,
++ &ksmbd_kstat);
++ if (rc)
++ break;
++
+ rc = fn(conn, info_level, d_info, &ksmbd_kstat);
+ if (rc)
+ break;
+@@ -628,7 +646,7 @@ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
+ * Lookup fp in master fp list, and check desired access and
+ * shared mode between previous open and current open.
+ */
+- read_lock(&curr_fp->f_ci->m_lock);
++ down_read(&curr_fp->f_ci->m_lock);
+ list_for_each_entry(prev_fp, &curr_fp->f_ci->m_fp_list, node) {
+ if (file_inode(filp) != file_inode(prev_fp->filp))
+ continue;
+@@ -704,7 +722,7 @@ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
+ break;
+ }
+ }
+- read_unlock(&curr_fp->f_ci->m_lock);
++ up_read(&curr_fp->f_ci->m_lock);
+
+ return rc;
+ }
+@@ -714,10 +732,10 @@ bool is_asterisk(char *p)
+ return p && p[0] == '*';
+ }
+
+-int ksmbd_override_fsids(struct ksmbd_work *work)
++int __ksmbd_override_fsids(struct ksmbd_work *work,
++ struct ksmbd_share_config *share)
+ {
+ struct ksmbd_session *sess = work->sess;
+- struct ksmbd_share_config *share = work->tcon->share_conf;
+ struct cred *cred;
+ struct group_info *gi;
+ unsigned int uid;
+@@ -757,6 +775,11 @@ int ksmbd_override_fsids(struct ksmbd_work *work)
+ return 0;
+ }
+
++int ksmbd_override_fsids(struct ksmbd_work *work)
++{
++ return __ksmbd_override_fsids(work, work->tcon->share_conf);
++}
++
+ void ksmbd_revert_fsids(struct ksmbd_work *work)
+ {
+ const struct cred *cred;
+diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
+index f1092519c0c288..4a3148b0167f54 100644
+--- a/fs/smb/server/smb_common.h
++++ b/fs/smb/server/smb_common.h
+@@ -447,6 +447,8 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn,
+ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
+
+ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
++int __ksmbd_override_fsids(struct ksmbd_work *work,
++ struct ksmbd_share_config *share);
+ int ksmbd_override_fsids(struct ksmbd_work *work);
+ void ksmbd_revert_fsids(struct ksmbd_work *work);
+
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index 6c0305be895e56..1c9775f1efa56d 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -401,10 +401,6 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
+ return;
+
+- ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
+- if (!ppace)
+- return;
+-
+ ret = init_acl_state(&acl_state, num_aces);
+ if (ret)
+ return;
+@@ -414,6 +410,13 @@ static void parse_dacl(struct mnt_idmap *idmap,
+ return;
+ }
+
++ ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL);
++ if (!ppace) {
++ free_acl_state(&default_acl_state);
++ free_acl_state(&acl_state);
++ return;
++ }
++
+ /*
+ * reset rwx permissions for user/group/other.
+ * Also, if num_aces is 0 i.e. DACL has no ACEs,
+@@ -1107,6 +1110,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ struct smb_acl *pdacl;
+ struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
+ int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
++ int pntsd_alloc_size;
+
+ if (parent_pntsd->osidoffset) {
+ powner_sid = (struct smb_sid *)((char *)parent_pntsd +
+@@ -1119,9 +1123,10 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
+ }
+
+- pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
+- pgroup_sid_size + sizeof(struct smb_acl) +
+- nt_size, GFP_KERNEL);
++ pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
++ pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
++
++ pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
+ if (!pntsd) {
+ rc = -ENOMEM;
+ goto free_aces_base;
+@@ -1136,6 +1141,27 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ pntsd->gsidoffset = parent_pntsd->gsidoffset;
+ pntsd->dacloffset = parent_pntsd->dacloffset;
+
++ if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
++ if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
++ if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
+ if (pntsd->osidoffset) {
+ struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+@@ -1162,7 +1188,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
+ pntsd_size += sizeof(struct smb_acl) + nt_size;
+ }
+
+- ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size);
++ ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size, false);
+ kfree(pntsd);
+ }
+
+@@ -1354,7 +1380,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+
+ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+- bool type_check)
++ bool type_check, bool get_write)
+ {
+ int rc;
+ struct smb_fattr fattr = {{0}};
+@@ -1414,7 +1440,8 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
+ /* Update WinACL in xattr */
+ ksmbd_vfs_remove_sd_xattrs(idmap, path);
+- ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len);
++ ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len,
++ get_write);
+ }
+
+ out:
+diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
+index 49a8c292bd2e81..2b52861707d8c1 100644
+--- a/fs/smb/server/smbacl.h
++++ b/fs/smb/server/smbacl.h
+@@ -207,7 +207,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
+ __le32 *pdaccess, int uid);
+ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
+ const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
+- bool type_check);
++ bool type_check, bool get_write);
+ void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
+ void ksmbd_init_domain(u32 *sub_auth);
+
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index b49d47bdafc945..8752ac82c557bf 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -65,6 +65,7 @@ struct ipc_msg_table_entry {
+ struct hlist_node ipc_table_hlist;
+
+ void *response;
++ unsigned int msg_sz;
+ };
+
+ static struct delayed_work ipc_timer_work;
+@@ -74,7 +75,7 @@ static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
+ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
+ static int ksmbd_ipc_heartbeat_request(void);
+
+-static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
++static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX + 1] = {
+ [KSMBD_EVENT_UNSPEC] = {
+ .len = 0,
+ },
+@@ -275,6 +276,7 @@ static int handle_response(int type, void *payload, size_t sz)
+ }
+
+ memcpy(entry->response, payload, sz);
++ entry->msg_sz = sz;
+ wake_up_interruptible(&entry->wait);
+ ret = 0;
+ break;
+@@ -403,7 +405,7 @@ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
+ return -EPERM;
+ #endif
+
+- if (type >= KSMBD_EVENT_MAX) {
++ if (type > KSMBD_EVENT_MAX) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+@@ -453,6 +455,34 @@ static int ipc_msg_send(struct ksmbd_ipc_msg *msg)
+ return ret;
+ }
+
++static int ipc_validate_msg(struct ipc_msg_table_entry *entry)
++{
++ unsigned int msg_sz = entry->msg_sz;
++
++ if (entry->type == KSMBD_EVENT_RPC_REQUEST) {
++ struct ksmbd_rpc_command *resp = entry->response;
++
++ msg_sz = sizeof(struct ksmbd_rpc_command) + resp->payload_sz;
++ } else if (entry->type == KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST) {
++ struct ksmbd_spnego_authen_response *resp = entry->response;
++
++ msg_sz = sizeof(struct ksmbd_spnego_authen_response) +
++ resp->session_key_len + resp->spnego_blob_len;
++ } else if (entry->type == KSMBD_EVENT_SHARE_CONFIG_REQUEST) {
++ struct ksmbd_share_config_response *resp = entry->response;
++
++ if (resp->payload_sz) {
++ if (resp->payload_sz < resp->veto_list_sz)
++ return -EINVAL;
++
++ msg_sz = sizeof(struct ksmbd_share_config_response) +
++ resp->payload_sz;
++ }
++ }
++
++ return entry->msg_sz != msg_sz ? -EINVAL : 0;
++}
++
+ static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
+ {
+ struct ipc_msg_table_entry entry;
+@@ -477,6 +507,13 @@ static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle
+ ret = wait_event_interruptible_timeout(entry.wait,
+ entry.response != NULL,
+ IPC_WAIT_TIMEOUT);
++ if (entry.response) {
++ ret = ipc_validate_msg(&entry);
++ if (ret) {
++ kvfree(entry.response);
++ entry.response = NULL;
++ }
++ }
+ out:
+ down_write(&ipc_msg_table_lock);
+ hash_del(&entry.ipc_table_hlist);
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 3b269e1f523a17..8faa25c6e129b5 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -2039,6 +2039,7 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
+ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+ {
+ struct smb_direct_transport *t;
++ struct task_struct *handler;
+ int ret;
+
+ if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
+@@ -2056,11 +2057,11 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
+ if (ret)
+ goto out_err;
+
+- KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+- KSMBD_TRANS(t)->conn, "ksmbd:r%u",
+- smb_direct_port);
+- if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+- ret = PTR_ERR(KSMBD_TRANS(t)->handler);
++ handler = kthread_run(ksmbd_conn_handler_loop,
++ KSMBD_TRANS(t)->conn, "ksmbd:r%u",
++ smb_direct_port);
++ if (IS_ERR(handler)) {
++ ret = PTR_ERR(handler);
+ pr_err("Can't start thread\n");
+ goto out_err;
+ }
+@@ -2140,8 +2141,7 @@ static int smb_direct_ib_client_add(struct ib_device *ib_dev)
+ if (ib_dev->node_type != RDMA_NODE_IB_CA)
+ smb_direct_port = SMB_DIRECT_PORT_IWARP;
+
+- if (!ib_dev->ops.get_netdev ||
+- !rdma_frwr_is_supported(&ib_dev->attrs))
++ if (!rdma_frwr_is_supported(&ib_dev->attrs))
+ return 0;
+
+ smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
+@@ -2241,17 +2241,38 @@ bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
+ for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
+ struct net_device *ndev;
+
+- ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
+- i + 1);
+- if (!ndev)
+- continue;
++ if (smb_dev->ib_dev->ops.get_netdev) {
++ ndev = smb_dev->ib_dev->ops.get_netdev(
++ smb_dev->ib_dev, i + 1);
++ if (!ndev)
++ continue;
+
+- if (ndev == netdev) {
++ if (ndev == netdev) {
++ dev_put(ndev);
++ rdma_capable = true;
++ goto out;
++ }
+ dev_put(ndev);
+- rdma_capable = true;
+- goto out;
++ /* if ib_dev does not implement ops.get_netdev
++ * check for matching infiniband GUID in hw_addr
++ */
++ } else if (netdev->type == ARPHRD_INFINIBAND) {
++ struct netdev_hw_addr *ha;
++ union ib_gid gid;
++ u32 port_num;
++ int ret;
++
++ netdev_hw_addr_list_for_each(
++ ha, &netdev->dev_addrs) {
++ memcpy(&gid, ha->addr + 4, sizeof(gid));
++ ret = ib_find_gid(smb_dev->ib_dev, &gid,
++ &port_num, NULL);
++ if (!ret) {
++ rdma_capable = true;
++ goto out;
++ }
++ }
+ }
+- dev_put(ndev);
+ }
+ }
+ out:
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index eff7a1d793f003..2ce7f75059cb35 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -185,6 +185,7 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
+ struct sockaddr *csin;
+ int rc = 0;
+ struct tcp_transport *t;
++ struct task_struct *handler;
+
+ t = alloc_transport(client_sk);
+ if (!t) {
+@@ -199,13 +200,13 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
+ goto out_error;
+ }
+
+- KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
+- KSMBD_TRANS(t)->conn,
+- "ksmbd:%u",
+- ksmbd_tcp_get_port(csin));
+- if (IS_ERR(KSMBD_TRANS(t)->handler)) {
++ handler = kthread_run(ksmbd_conn_handler_loop,
++ KSMBD_TRANS(t)->conn,
++ "ksmbd:%u",
++ ksmbd_tcp_get_port(csin));
++ if (IS_ERR(handler)) {
+ pr_err("cannot start conn thread\n");
+- rc = PTR_ERR(KSMBD_TRANS(t)->handler);
++ rc = PTR_ERR(handler);
+ free_transport(t);
+ }
+ return rc;
+@@ -364,6 +365,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ * @t: TCP transport instance
+ * @buf: buffer to store read data from socket
+ * @to_read: number of bytes to read from socket
++ * @max_retries: number of retries if reading from socket fails
+ *
+ * Return: on success return number of bytes read from socket,
+ * otherwise return error number
+@@ -415,6 +417,7 @@ static void tcp_destroy_socket(struct socket *ksmbd_socket)
+
+ /**
+ * create_socket - create socket for ksmbd/0
++ * @iface: interface to bind the created socket to
+ *
+ * Return: 0 on success, error number otherwise
+ */
+@@ -445,6 +448,10 @@ static int create_socket(struct interface *iface)
+ sin6.sin6_family = PF_INET6;
+ sin6.sin6_addr = in6addr_any;
+ sin6.sin6_port = htons(server_conf.tcp_port);
++
++ lock_sock(ksmbd_socket->sk);
++ ksmbd_socket->sk->sk_ipv6only = false;
++ release_sock(ksmbd_socket->sk);
+ }
+
+ ksmbd_tcp_nodelay(ksmbd_socket);
+@@ -617,8 +624,10 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
+ for_each_netdev(&init_net, netdev) {
+ if (netif_is_bridge_port(netdev))
+ continue;
+- if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL)))
++ if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) {
++ rtnl_unlock();
+ return -ENOMEM;
++ }
+ }
+ rtnl_unlock();
+ bind_additional_ifaces = 1;
+diff --git a/fs/smb/server/unicode.c b/fs/smb/server/unicode.c
+index 393dd4a7432b65..43ed29ee44ead6 100644
+--- a/fs/smb/server/unicode.c
++++ b/fs/smb/server/unicode.c
+@@ -13,46 +13,10 @@
+ #include "unicode.h"
+ #include "smb_common.h"
+
+-/*
+- * smb_utf16_bytes() - how long will a string be after conversion?
+- * @from: pointer to input string
+- * @maxbytes: don't go past this many bytes of input string
+- * @codepage: destination codepage
+- *
+- * Walk a utf16le string and return the number of bytes that the string will
+- * be after being converted to the given charset, not including any null
+- * termination required. Don't walk past maxbytes in the source buffer.
+- *
+- * Return: string length after conversion
+- */
+-static int smb_utf16_bytes(const __le16 *from, int maxbytes,
+- const struct nls_table *codepage)
+-{
+- int i;
+- int charlen, outlen = 0;
+- int maxwords = maxbytes / 2;
+- char tmp[NLS_MAX_CHARSET_SIZE];
+- __u16 ftmp;
+-
+- for (i = 0; i < maxwords; i++) {
+- ftmp = get_unaligned_le16(&from[i]);
+- if (ftmp == 0)
+- break;
+-
+- charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
+- if (charlen > 0)
+- outlen += charlen;
+- else
+- outlen++;
+- }
+-
+- return outlen;
+-}
+-
+ /*
+ * cifs_mapchar() - convert a host-endian char to proper char in codepage
+ * @target: where converted character should be copied
+- * @src_char: 2 byte host-endian source character
++ * @from: host-endian source string
+ * @cp: codepage to which character should be converted
+ * @mapchar: should character be mapped according to mapchars mount option?
+ *
+@@ -63,10 +27,13 @@ static int smb_utf16_bytes(const __le16 *from, int maxbytes,
+ * Return: string length after conversion
+ */
+ static int
+-cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
++cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
+ bool mapchar)
+ {
+ int len = 1;
++ __u16 src_char;
++
++ src_char = *from;
+
+ if (!mapchar)
+ goto cp_convert;
+@@ -104,12 +71,66 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
+
+ cp_convert:
+ len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
+- if (len <= 0) {
+- *target = '?';
+- len = 1;
+- }
++ if (len <= 0)
++ goto surrogate_pair;
+
+ goto out;
++
++surrogate_pair:
++ /* convert SURROGATE_PAIR and IVS */
++ if (strcmp(cp->charset, "utf8"))
++ goto unknown;
++ len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
++ if (len <= 0)
++ goto unknown;
++ return len;
++
++unknown:
++ *target = '?';
++ len = 1;
++ goto out;
++}
++
++/*
++ * smb_utf16_bytes() - compute converted string length
++ * @from: pointer to input string
++ * @maxbytes: input string length
++ * @codepage: destination codepage
++ *
++ * Walk a utf16le string and return the number of bytes that the string will
++ * be after being converted to the given charset, not including any null
++ * termination required. Don't walk past maxbytes in the source buffer.
++ *
++ * Return: string length after conversion
++ */
++static int smb_utf16_bytes(const __le16 *from, int maxbytes,
++ const struct nls_table *codepage)
++{
++ int i, j;
++ int charlen, outlen = 0;
++ int maxwords = maxbytes / 2;
++ char tmp[NLS_MAX_CHARSET_SIZE];
++ __u16 ftmp[3];
++
++ for (i = 0; i < maxwords; i++) {
++ ftmp[0] = get_unaligned_le16(&from[i]);
++ if (ftmp[0] == 0)
++ break;
++ for (j = 1; j <= 2; j++) {
++ if (i + j < maxwords)
++ ftmp[j] = get_unaligned_le16(&from[i + j]);
++ else
++ ftmp[j] = 0;
++ }
++
++ charlen = cifs_mapchar(tmp, ftmp, codepage, 0);
++ if (charlen > 0)
++ outlen += charlen;
++ else
++ outlen++;
++ }
++
++ return outlen;
+ }
+
+ /*
+@@ -139,12 +160,12 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
+ static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+ const struct nls_table *codepage, bool mapchar)
+ {
+- int i, charlen, safelen;
++ int i, j, charlen, safelen;
+ int outlen = 0;
+ int nullsize = nls_nullsize(codepage);
+ int fromwords = fromlen / 2;
+ char tmp[NLS_MAX_CHARSET_SIZE];
+- __u16 ftmp;
++ __u16 ftmp[3]; /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
+
+ /*
+ * because the chars can be of varying widths, we need to take care
+@@ -155,9 +176,15 @@ static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+ safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
+
+ for (i = 0; i < fromwords; i++) {
+- ftmp = get_unaligned_le16(&from[i]);
+- if (ftmp == 0)
++ ftmp[0] = get_unaligned_le16(&from[i]);
++ if (ftmp[0] == 0)
+ break;
++ for (j = 1; j <= 2; j++) {
++ if (i + j < fromwords)
++ ftmp[j] = get_unaligned_le16(&from[i + j]);
++ else
++ ftmp[j] = 0;
++ }
+
+ /*
+ * check to see if converting this character might make the
+@@ -172,6 +199,19 @@ static int smb_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+ /* put converted char into 'to' buffer */
+ charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
+ outlen += charlen;
++
++ /*
++ * charlen (=bytes of UTF-8 for 1 character)
++ * 4bytes UTF-8(surrogate pair) is charlen=4
++ * (4bytes UTF-16 code)
++ * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
++ * (2 UTF-8 pairs divided to 2 UTF-16 pairs)
++ */
++ if (charlen == 4)
++ i++;
++ else if (charlen >= 5)
++ /* 5-6bytes UTF-8 */
++ i += 2;
+ }
+
+ /* properly null-terminate string */
+@@ -306,6 +346,9 @@ int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
+ char src_char;
+ __le16 dst_char;
+ wchar_t tmp;
++ wchar_t wchar_to[6]; /* UTF-16 */
++ int ret;
++ unicode_t u;
+
+ if (!mapchars)
+ return smb_strtoUTF16(target, source, srclen, cp);
+@@ -348,11 +391,57 @@ int smbConvertToUTF16(__le16 *target, const char *source, int srclen,
+ * if no match, use question mark, which at least in
+ * some cases serves as wild card
+ */
+- if (charlen < 1) {
+- dst_char = cpu_to_le16(0x003f);
+- charlen = 1;
++ if (charlen > 0)
++ goto ctoUTF16;
++
++ /* convert SURROGATE_PAIR */
++ if (strcmp(cp->charset, "utf8"))
++ goto unknown;
++ if (*(source + i) & 0x80) {
++ charlen = utf8_to_utf32(source + i, 6, &u);
++ if (charlen < 0)
++ goto unknown;
++ } else
++ goto unknown;
++ ret = utf8s_to_utf16s(source + i, charlen,
++ UTF16_LITTLE_ENDIAN,
++ wchar_to, 6);
++ if (ret < 0)
++ goto unknown;
++
++ i += charlen;
++ dst_char = cpu_to_le16(*wchar_to);
++ if (charlen <= 3)
++ /* 1-3bytes UTF-8 to 2bytes UTF-16 */
++ put_unaligned(dst_char, &target[j]);
++ else if (charlen == 4) {
++ /*
++ * 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
++ * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
++ * (charlen=3+4 or 4+4)
++ */
++ put_unaligned(dst_char, &target[j]);
++ dst_char = cpu_to_le16(*(wchar_to + 1));
++ j++;
++ put_unaligned(dst_char, &target[j]);
++ } else if (charlen >= 5) {
++ /* 5-6bytes UTF-8 to 6bytes UTF-16 */
++ put_unaligned(dst_char, &target[j]);
++ dst_char = cpu_to_le16(*(wchar_to + 1));
++ j++;
++ put_unaligned(dst_char, &target[j]);
++ dst_char = cpu_to_le16(*(wchar_to + 2));
++ j++;
++ put_unaligned(dst_char, &target[j]);
+ }
++ continue;
++
++unknown:
++ dst_char = cpu_to_le16(0x003f);
++ charlen = 1;
+ }
++
++ctoUTF16:
+ /*
+ * character may take more than one byte in the source string,
+ * but will take exactly two bytes in the target string
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index b5a5e50fc9ca3d..2c548e8efef060 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -49,6 +49,10 @@ static void ksmbd_vfs_inherit_owner(struct ksmbd_work *work,
+
+ /**
+ * ksmbd_vfs_lock_parent() - lock parent dentry if it is stable
++ * @parent: parent dentry
++ * @child: child dentry
++ *
++ * Returns: %0 on success, %-ENOENT if the parent dentry is not stable
+ */
+ int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child)
+ {
+@@ -97,6 +101,13 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
+ return -ENOENT;
+ }
+
++ err = mnt_want_write(parent_path->mnt);
++ if (err) {
++ path_put(parent_path);
++ putname(filename);
++ return -ENOENT;
++ }
++
+ inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT);
+ d = lookup_one_qstr_excl(&last, parent_path->dentry, 0);
+ if (IS_ERR(d))
+@@ -123,6 +134,7 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
+
+ err_out:
+ inode_unlock(d_inode(parent_path->dentry));
++ mnt_drop_write(parent_path->mnt);
+ path_put(parent_path);
+ putname(filename);
+ return -ENOENT;
+@@ -173,10 +185,6 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ return err;
+ }
+
+- err = mnt_want_write(path.mnt);
+- if (err)
+- goto out_err;
+-
+ mode |= S_IFREG;
+ err = vfs_create(mnt_idmap(path.mnt), d_inode(path.dentry),
+ dentry, mode, true);
+@@ -186,9 +194,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ } else {
+ pr_err("File(%s): creation failed (err:%d)\n", name, err);
+ }
+- mnt_drop_write(path.mnt);
+
+-out_err:
+ done_path_create(&path, dentry);
+ return err;
+ }
+@@ -219,10 +225,6 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ return err;
+ }
+
+- err = mnt_want_write(path.mnt);
+- if (err)
+- goto out_err2;
+-
+ idmap = mnt_idmap(path.mnt);
+ mode |= S_IFDIR;
+ err = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode);
+@@ -233,21 +235,19 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ dentry->d_name.len);
+ if (IS_ERR(d)) {
+ err = PTR_ERR(d);
+- goto out_err1;
++ goto out_err;
+ }
+ if (unlikely(d_is_negative(d))) {
+ dput(d);
+ err = -ENOENT;
+- goto out_err1;
++ goto out_err;
+ }
+
+ ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
+ dput(d);
+ }
+
+-out_err1:
+- mnt_drop_write(path.mnt);
+-out_err2:
++out_err:
+ done_path_create(&path, dentry);
+ if (err)
+ pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
+@@ -364,7 +364,7 @@ static int check_lock_range(struct file *filp, loff_t start, loff_t end,
+ /**
+ * ksmbd_vfs_read() - vfs helper for smb file read
+ * @work: smb work
+- * @fid: file id of open file
++ * @fp: ksmbd file pointer
+ * @count: read byte count
+ * @pos: file pos
+ * @rbuf: read data buffer
+@@ -463,7 +463,8 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ fp->stream.name,
+ (void *)stream_buf,
+ size,
+- 0);
++ 0,
++ true);
+ if (err < 0)
+ goto out;
+
+@@ -477,7 +478,7 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
+ /**
+ * ksmbd_vfs_write() - vfs helper for smb file write
+ * @work: work
+- * @fid: file id of open file
++ * @fp: ksmbd file pointer
+ * @buf: buf containing data for writing
+ * @count: read byte count
+ * @pos: file pos
+@@ -495,7 +496,7 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+ int err = 0;
+
+ if (work->conn->connection_type) {
+- if (!(fp->daccess & FILE_WRITE_DATA_LE)) {
++ if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) {
+ pr_err("no right to write(%pD)\n", fp->filp);
+ err = -EACCES;
+ goto out;
+@@ -520,6 +521,9 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+ }
+ }
+
++ /* Reserve lease break for parent dir at closing time */
++ fp->reserve_lease_break = true;
++
+ /* Do we need to break any of a levelII oplock? */
+ smb_break_all_levII_oplock(work, fp, 1);
+
+@@ -545,10 +549,8 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
+
+ /**
+ * ksmbd_vfs_getattr() - vfs helper for smb getattr
+- * @work: work
+- * @fid: file id of open file
+- * @attrs: inode attributes
+- *
++ * @path: path of dentry
++ * @stat: pointer to returned kernel stat structure
+ * Return: 0 on success, otherwise error
+ */
+ int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat)
+@@ -565,6 +567,7 @@ int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat)
+ * ksmbd_vfs_fsync() - vfs helper for smb fsync
+ * @work: work
+ * @fid: file id of open file
++ * @p_id: persistent file id
+ *
+ * Return: 0 on success, otherwise error
+ */
+@@ -587,7 +590,8 @@ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
+
+ /**
+ * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink
+- * @name: directory or file name that is relative to share
++ * @work: work
++ * @path: path of dentry
+ *
+ * Return: 0 on success, otherwise error
+ */
+@@ -605,10 +609,6 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
+ goto out_err;
+ }
+
+- err = mnt_want_write(path->mnt);
+- if (err)
+- goto out_err;
+-
+ idmap = mnt_idmap(path->mnt);
+ if (S_ISDIR(d_inode(path->dentry)->i_mode)) {
+ err = vfs_rmdir(idmap, d_inode(parent), path->dentry);
+@@ -619,7 +619,6 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
+ if (err)
+ ksmbd_debug(VFS, "unlink failed, err %d\n", err);
+ }
+- mnt_drop_write(path->mnt);
+
+ out_err:
+ ksmbd_revert_fsids(work);
+@@ -628,6 +627,7 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
+
+ /**
+ * ksmbd_vfs_link() - vfs helper for creating smb hardlink
++ * @work: work
+ * @oldname: source file name
+ * @newname: hardlink name that is relative to share
+ *
+@@ -665,16 +665,11 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
+ goto out3;
+ }
+
+- err = mnt_want_write(newpath.mnt);
+- if (err)
+- goto out3;
+-
+ err = vfs_link(oldpath.dentry, mnt_idmap(newpath.mnt),
+ d_inode(newpath.dentry),
+ dentry, NULL);
+ if (err)
+ ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
+- mnt_drop_write(newpath.mnt);
+
+ out3:
+ done_path_create(&newpath, dentry);
+@@ -732,7 +727,7 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
+ goto out3;
+ }
+
+- parent_fp = ksmbd_lookup_fd_inode(d_inode(old_child->d_parent));
++ parent_fp = ksmbd_lookup_fd_inode(old_child->d_parent);
+ if (parent_fp) {
+ if (parent_fp->daccess & FILE_DELETE_LE) {
+ pr_err("parent dir is opened with delete access\n");
+@@ -755,10 +750,15 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
+ goto out4;
+ }
+
++ /*
++ * explicitly handle file overwrite case, for compatibility with
++ * filesystems that may not support rename flags (e.g: fuse)
++ */
+ if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
+ err = -EEXIST;
+ goto out4;
+ }
++ flags &= ~(RENAME_NOREPLACE);
+
+ if (old_child == trap) {
+ err = -EINVAL;
+@@ -805,7 +805,7 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
+ /**
+ * ksmbd_vfs_truncate() - vfs helper for smb file truncate
+ * @work: work
+- * @fid: file id of old file
++ * @fp: ksmbd file pointer
+ * @size: truncate to given size
+ *
+ * Return: 0 on success, otherwise error
+@@ -848,7 +848,6 @@ int ksmbd_vfs_truncate(struct ksmbd_work *work,
+ * ksmbd_vfs_listxattr() - vfs helper for smb list extended attributes
+ * @dentry: dentry of file for listing xattrs
+ * @list: destination buffer
+- * @size: destination buffer length
+ *
+ * Return: xattr list length on success, otherwise error
+ */
+@@ -919,23 +918,27 @@ ssize_t ksmbd_vfs_getxattr(struct mnt_idmap *idmap,
+ /**
+ * ksmbd_vfs_setxattr() - vfs helper for smb set extended attributes value
+ * @idmap: idmap of the relevant mount
+- * @dentry: dentry to set XATTR at
++ * @path: path of dentry to set XATTR at
+ * @attr_name: xattr name for setxattr
+ * @attr_value: xattr value to set
+ * @attr_size: size of xattr value
+ * @flags: destination buffer length
++ * @get_write: get write access to a mount
+ *
+ * Return: 0 on success, otherwise error
+ */
+ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
+ const struct path *path, const char *attr_name,
+- void *attr_value, size_t attr_size, int flags)
++ void *attr_value, size_t attr_size, int flags,
++ bool get_write)
+ {
+ int err;
+
+- err = mnt_want_write(path->mnt);
+- if (err)
+- return err;
++ if (get_write == true) {
++ err = mnt_want_write(path->mnt);
++ if (err)
++ return err;
++ }
+
+ err = vfs_setxattr(idmap,
+ path->dentry,
+@@ -945,14 +948,15 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
+ flags);
+ if (err)
+ ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
+- mnt_drop_write(path->mnt);
++ if (get_write == true)
++ mnt_drop_write(path->mnt);
+ return err;
+ }
+
+ /**
+ * ksmbd_vfs_set_fadvise() - convert smb IO caching options to linux options
+ * @filp: file pointer for IO
+- * @options: smb IO options
++ * @option: smb IO options
+ */
+ void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option)
+ {
+@@ -1049,16 +1053,21 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+ }
+
+ int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
+- const struct path *path, char *attr_name)
++ const struct path *path, char *attr_name,
++ bool get_write)
+ {
+ int err;
+
+- err = mnt_want_write(path->mnt);
+- if (err)
+- return err;
++ if (get_write == true) {
++ err = mnt_want_write(path->mnt);
++ if (err)
++ return err;
++ }
+
+ err = vfs_removexattr(idmap, path->dentry, attr_name);
+- mnt_drop_write(path->mnt);
++
++ if (get_write == true)
++ mnt_drop_write(path->mnt);
+
+ return err;
+ }
+@@ -1101,9 +1110,10 @@ static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen,
+ struct ksmbd_readdir_data *buf;
+
+ buf = container_of(ctx, struct ksmbd_readdir_data, ctx);
+- buf->dirent_count++;
++ if (!is_dot_dotdot(name, namlen))
++ buf->dirent_count++;
+
+- return buf->dirent_count <= 2;
++ return !buf->dirent_count;
+ }
+
+ /**
+@@ -1123,7 +1133,7 @@ int ksmbd_vfs_empty_dir(struct ksmbd_file *fp)
+ readdir_data.dirent_count = 0;
+
+ err = iterate_dir(fp->filp, &readdir_data.ctx);
+- if (readdir_data.dirent_count > 2)
++ if (readdir_data.dirent_count)
+ err = -ENOTEMPTY;
+ else
+ err = 0;
+@@ -1152,7 +1162,7 @@ static bool __caseless_lookup(struct dir_context *ctx, const char *name,
+ if (cmp < 0)
+ cmp = strncasecmp((char *)buf->private, name, namlen);
+ if (!cmp) {
+- memcpy((char *)buf->private, name, namlen);
++ memcpy((char *)buf->private, name, buf->used);
+ buf->dirent_count = 1;
+ return false;
+ }
+@@ -1164,6 +1174,7 @@ static bool __caseless_lookup(struct dir_context *ctx, const char *name,
+ * @dir: path info
+ * @name: filename to lookup
+ * @namelen: filename length
++ * @um: &struct unicode_map to use
+ *
+ * Return: 0 on success, otherwise error
+ */
+@@ -1194,9 +1205,11 @@ static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name,
+
+ /**
+ * ksmbd_vfs_kern_path_locked() - lookup a file and get path info
+- * @name: file path that is relative to share
+- * @flags: lookup flags
+- * @path: if lookup succeed, return path info
++ * @work: work
++ * @name: file path that is relative to share
++ * @flags: lookup flags
++ * @parent_path: if lookup succeed, return parent_path info
++ * @path: if lookup succeed, return path info
+ * @caseless: caseless filename lookup
+ *
+ * Return: 0 on success, otherwise error
+@@ -1217,10 +1230,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ char *filepath;
+ size_t path_len, remain_len;
+
+- filepath = kstrdup(name, GFP_KERNEL);
+- if (!filepath)
+- return -ENOMEM;
+-
++ filepath = name;
+ path_len = strlen(filepath);
+ remain_len = path_len;
+
+@@ -1263,11 +1273,17 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ err = -EINVAL;
+ out2:
+ path_put(parent_path);
+-out1:
+- kfree(filepath);
+ }
+
++out1:
+ if (!err) {
++ err = mnt_want_write(parent_path->mnt);
++ if (err) {
++ path_put(path);
++ path_put(parent_path);
++ return err;
++ }
++
+ err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
+ if (err) {
+ path_put(path);
+@@ -1277,6 +1293,14 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ return err;
+ }
+
++void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path)
++{
++ inode_unlock(d_inode(parent_path->dentry));
++ mnt_drop_write(parent_path->mnt);
++ path_put(path);
++ path_put(parent_path);
++}
++
+ struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ const char *name,
+ unsigned int flags,
+@@ -1353,7 +1377,7 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
+ ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+
+ if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
+- err = ksmbd_vfs_remove_xattr(idmap, path, name);
++ err = ksmbd_vfs_remove_xattr(idmap, path, name, true);
+ if (err)
+ ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
+ }
+@@ -1431,7 +1455,8 @@ static struct xattr_smb_acl *ksmbd_vfs_make_xattr_posix_acl(struct mnt_idmap *id
+ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+ struct mnt_idmap *idmap,
+ const struct path *path,
+- struct smb_ntsd *pntsd, int len)
++ struct smb_ntsd *pntsd, int len,
++ bool get_write)
+ {
+ int rc;
+ struct ndr sd_ndr = {0}, acl_ndr = {0};
+@@ -1491,7 +1516,7 @@ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+
+ rc = ksmbd_vfs_setxattr(idmap, path,
+ XATTR_NAME_SD, sd_ndr.data,
+- sd_ndr.offset, 0);
++ sd_ndr.offset, 0, get_write);
+ if (rc < 0)
+ pr_err("Failed to store XATTR ntacl :%d\n", rc);
+
+@@ -1580,7 +1605,8 @@ int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
+
+ int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
+ const struct path *path,
+- struct xattr_dos_attrib *da)
++ struct xattr_dos_attrib *da,
++ bool get_write)
+ {
+ struct ndr n;
+ int err;
+@@ -1590,7 +1616,7 @@ int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
+ return err;
+
+ err = ksmbd_vfs_setxattr(idmap, path, XATTR_NAME_DOS_ATTRIBUTE,
+- (void *)n.data, n.offset, 0);
++ (void *)n.data, n.offset, 0, get_write);
+ if (err)
+ ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
+ kfree(n.data);
+@@ -1623,6 +1649,8 @@ int ksmbd_vfs_get_dos_attrib_xattr(struct mnt_idmap *idmap,
+ * ksmbd_vfs_init_kstat() - convert unix stat information to smb stat format
+ * @p: destination buffer
+ * @ksmbd_kstat: ksmbd kstat wrapper
++ *
++ * Returns: pointer to the converted &struct file_directory_info
+ */
+ void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat)
+ {
+@@ -1656,11 +1684,19 @@ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
+ struct dentry *dentry,
+ struct ksmbd_kstat *ksmbd_kstat)
+ {
++ struct ksmbd_share_config *share_conf = work->tcon->share_conf;
+ u64 time;
+ int rc;
++ struct path path = {
++ .mnt = share_conf->vfs_path.mnt,
++ .dentry = dentry,
++ };
+
+- generic_fillattr(idmap, STATX_BASIC_STATS, d_inode(dentry),
+- ksmbd_kstat->kstat);
++ rc = vfs_getattr(&path, ksmbd_kstat->kstat,
++ STATX_BASIC_STATS | STATX_BTIME,
++ AT_STATX_SYNC_AS_STAT);
++ if (rc)
++ return rc;
+
+ time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
+ ksmbd_kstat->create_time = time;
+@@ -1862,10 +1898,6 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
+ }
+ posix_state_to_acl(&acl_state, acls->a_entries);
+
+- rc = mnt_want_write(path->mnt);
+- if (rc)
+- goto out_err;
+-
+ rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
+ if (rc < 0)
+ ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+@@ -1877,9 +1909,7 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
+ ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+ rc);
+ }
+- mnt_drop_write(path->mnt);
+
+-out_err:
+ free_acl_state(&acl_state);
+ posix_acl_release(acls);
+ return rc;
+@@ -1909,10 +1939,6 @@ int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap,
+ }
+ }
+
+- rc = mnt_want_write(path->mnt);
+- if (rc)
+- goto out_err;
+-
+ rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
+ if (rc < 0)
+ ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
+@@ -1924,9 +1950,7 @@ int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap,
+ ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
+ rc);
+ }
+- mnt_drop_write(path->mnt);
+
+-out_err:
+ posix_acl_release(acls);
+ return rc;
+ }
+diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
+index 00968081856e38..cb76f4b5bafe8c 100644
+--- a/fs/smb/server/vfs.h
++++ b/fs/smb/server/vfs.h
+@@ -109,14 +109,17 @@ ssize_t ksmbd_vfs_casexattr_len(struct mnt_idmap *idmap,
+ int attr_name_len);
+ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
+ const struct path *path, const char *attr_name,
+- void *attr_value, size_t attr_size, int flags);
++ void *attr_value, size_t attr_size, int flags,
++ bool get_write);
+ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+ size_t *xattr_stream_name_size, int s_type);
+ int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
+- const struct path *path, char *attr_name);
++ const struct path *path, char *attr_name,
++ bool get_write);
+ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ unsigned int flags, struct path *parent_path,
+ struct path *path, bool caseless);
++void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path);
+ struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ const char *name,
+ unsigned int flags,
+@@ -144,14 +147,16 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
+ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
+ struct mnt_idmap *idmap,
+ const struct path *path,
+- struct smb_ntsd *pntsd, int len);
++ struct smb_ntsd *pntsd, int len,
++ bool get_write);
+ int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
+ struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ struct smb_ntsd **pntsd);
+ int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
+ const struct path *path,
+- struct xattr_dos_attrib *da);
++ struct xattr_dos_attrib *da,
++ bool get_write);
+ int ksmbd_vfs_get_dos_attrib_xattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ struct xattr_dos_attrib *da);
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+index c91eac6514dd95..271a23abc82fdd 100644
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -66,14 +66,14 @@ static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
+ return tmp & inode_hash_mask;
+ }
+
+-static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
++static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
+ {
+ struct hlist_head *head = inode_hashtable +
+- inode_hash(inode->i_sb, inode->i_ino);
++ inode_hash(d_inode(de)->i_sb, (unsigned long)de);
+ struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
+
+ hlist_for_each_entry(ci, head, m_hash) {
+- if (ci->m_inode == inode) {
++ if (ci->m_de == de) {
+ if (atomic_inc_not_zero(&ci->m_count))
+ ret_ci = ci;
+ break;
+@@ -84,26 +84,27 @@ static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
+
+ static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
+ {
+- return __ksmbd_inode_lookup(file_inode(fp->filp));
++ return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
+ }
+
+-static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
++struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
+ {
+ struct ksmbd_inode *ci;
+
+ read_lock(&inode_hash_lock);
+- ci = __ksmbd_inode_lookup(inode);
++ ci = __ksmbd_inode_lookup(d);
+ read_unlock(&inode_hash_lock);
++
+ return ci;
+ }
+
+-int ksmbd_query_inode_status(struct inode *inode)
++int ksmbd_query_inode_status(struct dentry *dentry)
+ {
+ struct ksmbd_inode *ci;
+ int ret = KSMBD_INODE_STATUS_UNKNOWN;
+
+ read_lock(&inode_hash_lock);
+- ci = __ksmbd_inode_lookup(inode);
++ ci = __ksmbd_inode_lookup(dentry);
+ if (ci) {
+ ret = KSMBD_INODE_STATUS_OK;
+ if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
+@@ -143,7 +144,7 @@ void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
+ static void ksmbd_inode_hash(struct ksmbd_inode *ci)
+ {
+ struct hlist_head *b = inode_hashtable +
+- inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
++ inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
+
+ hlist_add_head(&ci->m_hash, b);
+ }
+@@ -157,7 +158,6 @@ static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
+
+ static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
+ {
+- ci->m_inode = file_inode(fp->filp);
+ atomic_set(&ci->m_count, 1);
+ atomic_set(&ci->op_count, 0);
+ atomic_set(&ci->sop_count, 0);
+@@ -165,7 +165,8 @@ static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
+ ci->m_fattr = 0;
+ INIT_LIST_HEAD(&ci->m_fp_list);
+ INIT_LIST_HEAD(&ci->m_op_list);
+- rwlock_init(&ci->m_lock);
++ init_rwsem(&ci->m_lock);
++ ci->m_de = fp->filp->f_path.dentry;
+ return 0;
+ }
+
+@@ -209,7 +210,7 @@ static void ksmbd_inode_free(struct ksmbd_inode *ci)
+ kfree(ci);
+ }
+
+-static void ksmbd_inode_put(struct ksmbd_inode *ci)
++void ksmbd_inode_put(struct ksmbd_inode *ci)
+ {
+ if (atomic_dec_and_test(&ci->m_count))
+ ksmbd_inode_free(ci);
+@@ -253,21 +254,22 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
+ ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
+ err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
+ &filp->f_path,
+- fp->stream.name);
++ fp->stream.name,
++ true);
+ if (err)
+ pr_err("remove xattr failed : %s\n",
+ fp->stream.name);
+ }
+
+ if (atomic_dec_and_test(&ci->m_count)) {
+- write_lock(&ci->m_lock);
++ down_write(&ci->m_lock);
+ if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
+ ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
+- write_unlock(&ci->m_lock);
++ up_write(&ci->m_lock);
+ ksmbd_vfs_unlink(filp);
+- write_lock(&ci->m_lock);
++ down_write(&ci->m_lock);
+ }
+- write_unlock(&ci->m_lock);
++ up_write(&ci->m_lock);
+
+ ksmbd_inode_free(ci);
+ }
+@@ -288,9 +290,9 @@ static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp
+ if (!has_file_id(fp->volatile_id))
+ return;
+
+- write_lock(&fp->f_ci->m_lock);
++ down_write(&fp->f_ci->m_lock);
+ list_del_init(&fp->node);
+- write_unlock(&fp->f_ci->m_lock);
++ up_write(&fp->f_ci->m_lock);
+
+ write_lock(&ft->lock);
+ idr_remove(ft->idr, fp->volatile_id);
+@@ -304,7 +306,8 @@ static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
+
+ fd_limit_close();
+ __ksmbd_remove_durable_fd(fp);
+- __ksmbd_remove_fd(ft, fp);
++ if (ft)
++ __ksmbd_remove_fd(ft, fp);
+
+ close_id_del_oplock(fp);
+ filp = fp->filp;
+@@ -464,11 +467,32 @@ struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
+ return fp;
+ }
+
+-struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
++struct ksmbd_file *ksmbd_lookup_global_fd(unsigned long long id)
+ {
+ return __ksmbd_lookup_fd(&global_ft, id);
+ }
+
++struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
++{
++ struct ksmbd_file *fp;
++
++ fp = __ksmbd_lookup_fd(&global_ft, id);
++ if (fp && fp->conn) {
++ ksmbd_put_durable_fd(fp);
++ fp = NULL;
++ }
++
++ return fp;
++}
++
++void ksmbd_put_durable_fd(struct ksmbd_file *fp)
++{
++ if (!atomic_dec_and_test(&fp->refcount))
++ return;
++
++ __ksmbd_close_fd(NULL, fp);
++}
++
+ struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
+ {
+ struct ksmbd_file *fp = NULL;
+@@ -488,26 +512,29 @@ struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
+ return fp;
+ }
+
+-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
++struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
+ {
+ struct ksmbd_file *lfp;
+ struct ksmbd_inode *ci;
++ struct inode *inode = d_inode(dentry);
+
+- ci = ksmbd_inode_lookup_by_vfsinode(inode);
++ read_lock(&inode_hash_lock);
++ ci = __ksmbd_inode_lookup(dentry);
++ read_unlock(&inode_hash_lock);
+ if (!ci)
+ return NULL;
+
+- read_lock(&ci->m_lock);
++ down_read(&ci->m_lock);
+ list_for_each_entry(lfp, &ci->m_fp_list, node) {
+ if (inode == file_inode(lfp->filp)) {
+ atomic_dec(&ci->m_count);
+ lfp = ksmbd_fp_get(lfp);
+- read_unlock(&ci->m_lock);
++ up_read(&ci->m_lock);
+ return lfp;
+ }
+ }
+ atomic_dec(&ci->m_count);
+- read_unlock(&ci->m_lock);
++ up_read(&ci->m_lock);
+ return NULL;
+ }
+
+@@ -635,6 +662,32 @@ __close_file_table_ids(struct ksmbd_file_table *ft,
+ return num;
+ }
+
++static inline bool is_reconnectable(struct ksmbd_file *fp)
++{
++ struct oplock_info *opinfo = opinfo_get(fp);
++ bool reconn = false;
++
++ if (!opinfo)
++ return false;
++
++ if (opinfo->op_state != OPLOCK_STATE_NONE) {
++ opinfo_put(opinfo);
++ return false;
++ }
++
++ if (fp->is_resilient || fp->is_persistent)
++ reconn = true;
++ else if (fp->is_durable && opinfo->is_lease &&
++ opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
++ reconn = true;
++
++ else if (fp->is_durable && opinfo->level == SMB2_OPLOCK_LEVEL_BATCH)
++ reconn = true;
++
++ opinfo_put(opinfo);
++ return reconn;
++}
++
+ static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
+ struct ksmbd_file *fp)
+ {
+@@ -644,7 +697,30 @@ static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
+ static bool session_fd_check(struct ksmbd_tree_connect *tcon,
+ struct ksmbd_file *fp)
+ {
+- return false;
++ struct ksmbd_inode *ci;
++ struct oplock_info *op;
++ struct ksmbd_conn *conn;
++
++ if (!is_reconnectable(fp))
++ return false;
++
++ conn = fp->conn;
++ ci = fp->f_ci;
++ down_write(&ci->m_lock);
++ list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
++ if (op->conn != conn)
++ continue;
++ if (op->conn && atomic_dec_and_test(&op->conn->refcnt))
++ kfree(op->conn);
++ op->conn = NULL;
++ }
++ up_write(&ci->m_lock);
++
++ fp->conn = NULL;
++ fp->tcon = NULL;
++ fp->volatile_id = KSMBD_NO_FID;
++
++ return true;
+ }
+
+ void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
+@@ -683,6 +759,69 @@ void ksmbd_free_global_file_table(void)
+ ksmbd_destroy_file_table(&global_ft);
+ }
+
++int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share,
++ struct ksmbd_file *fp, char *name)
++{
++ char *pathname, *ab_pathname;
++ int ret = 0;
++
++ pathname = kmalloc(PATH_MAX, GFP_KERNEL);
++ if (!pathname)
++ return -EACCES;
++
++ ab_pathname = d_path(&fp->filp->f_path, pathname, PATH_MAX);
++ if (IS_ERR(ab_pathname)) {
++ kfree(pathname);
++ return -EACCES;
++ }
++
++ if (name && strcmp(&ab_pathname[share->path_sz + 1], name)) {
++ ksmbd_debug(SMB, "invalid name reconnect %s\n", name);
++ ret = -EINVAL;
++ }
++
++ kfree(pathname);
++
++ return ret;
++}
++
++int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
++{
++ struct ksmbd_inode *ci;
++ struct oplock_info *op;
++
++ if (!fp->is_durable || fp->conn || fp->tcon) {
++ pr_err("Invalid durable fd [%p:%p]\n", fp->conn, fp->tcon);
++ return -EBADF;
++ }
++
++ if (has_file_id(fp->volatile_id)) {
++ pr_err("Still in use durable fd: %llu\n", fp->volatile_id);
++ return -EBADF;
++ }
++
++ fp->conn = work->conn;
++ fp->tcon = work->tcon;
++
++ ci = fp->f_ci;
++ down_write(&ci->m_lock);
++ list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
++ if (op->conn)
++ continue;
++ op->conn = fp->conn;
++ atomic_inc(&op->conn->refcnt);
++ }
++ up_write(&ci->m_lock);
++
++ __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
++ if (!has_file_id(fp->volatile_id)) {
++ fp->conn = NULL;
++ fp->tcon = NULL;
++ return -EBADF;
++ }
++ return 0;
++}
++
+ int ksmbd_init_file_table(struct ksmbd_file_table *ft)
+ {
+ ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
+diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
+index 03d0bf941216f8..5a225e7055f191 100644
+--- a/fs/smb/server/vfs_cache.h
++++ b/fs/smb/server/vfs_cache.h
+@@ -14,6 +14,7 @@
+ #include <linux/workqueue.h>
+
+ #include "vfs.h"
++#include "mgmt/share_config.h"
+
+ /* Windows style file permissions for extended response */
+ #define FILE_GENERIC_ALL 0x1F01FF
+@@ -46,12 +47,12 @@ struct stream {
+ };
+
+ struct ksmbd_inode {
+- rwlock_t m_lock;
++ struct rw_semaphore m_lock;
+ atomic_t m_count;
+ atomic_t op_count;
+ /* opinfo count for streams */
+ atomic_t sop_count;
+- struct inode *m_inode;
++ struct dentry *m_de;
+ unsigned int m_flags;
+ struct hlist_node m_hash;
+ struct list_head m_fp_list;
+@@ -105,6 +106,10 @@ struct ksmbd_file {
+ struct ksmbd_readdir_data readdir_data;
+ int dot_dotdot[2];
+ unsigned int f_state;
++ bool reserve_lease_break;
++ bool is_durable;
++ bool is_persistent;
++ bool is_resilient;
+ };
+
+ static inline void set_ctx_actor(struct dir_context *ctx,
+@@ -138,9 +143,13 @@ struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id);
+ struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
+ u64 pid);
+ void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
++struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d);
++void ksmbd_inode_put(struct ksmbd_inode *ci);
++struct ksmbd_file *ksmbd_lookup_global_fd(unsigned long long id);
+ struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
++void ksmbd_put_durable_fd(struct ksmbd_file *fp);
+ struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
+-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode);
++struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry);
+ unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
+ struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
+ void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
+@@ -164,12 +173,15 @@ enum KSMBD_INODE_STATUS {
+ KSMBD_INODE_STATUS_PENDING_DELETE,
+ };
+
+-int ksmbd_query_inode_status(struct inode *inode);
++int ksmbd_query_inode_status(struct dentry *dentry);
+ bool ksmbd_inode_pending_delete(struct ksmbd_file *fp);
+ void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp);
+ void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
+ void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
+ int file_info);
++int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp);
++int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share,
++ struct ksmbd_file *fp, char *name);
+ int ksmbd_init_file_cache(void);
+ void ksmbd_exit_file_cache(void);
+ #endif /* __VFS_CACHE_H__ */
+diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
+index 581ce951933901..2dc730800f448d 100644
+--- a/fs/squashfs/block.c
++++ b/fs/squashfs/block.c
+@@ -321,7 +321,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
+ TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
+ compressed ? "" : "un", length);
+ }
+- if (length < 0 || length > output->length ||
++ if (length <= 0 || length > output->length ||
+ (index + length) > msblk->bytes_used) {
+ res = -EIO;
+ goto out;
+diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
+index 8ba8c4c5077078..e8df6430444b01 100644
+--- a/fs/squashfs/file.c
++++ b/fs/squashfs/file.c
+@@ -544,7 +544,8 @@ static void squashfs_readahead(struct readahead_control *ractl)
+ struct squashfs_page_actor *actor;
+ unsigned int nr_pages = 0;
+ struct page **pages;
+- int i, file_end = i_size_read(inode) >> msblk->block_log;
++ int i;
++ loff_t file_end = i_size_read(inode) >> msblk->block_log;
+ unsigned int max_pages = 1UL << shift;
+
+ readahead_expand(ractl, start, (len | mask) + 1);
+diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
+index f1ccad519e28cc..763a3f7a75f6dd 100644
+--- a/fs/squashfs/file_direct.c
++++ b/fs/squashfs/file_direct.c
+@@ -26,10 +26,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
+ struct inode *inode = target_page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+
+- int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
++ loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+- int start_index = target_page->index & ~mask;
+- int end_index = start_index | mask;
++ loff_t start_index = target_page->index & ~mask;
++ loff_t end_index = start_index | mask;
+ int i, n, pages, bytes, res = -ENOMEM;
+ struct page **page;
+ struct squashfs_page_actor *actor;
+diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
+index c6e626b00546b9..d5918eba27e371 100644
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -48,6 +48,10 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
+ gid_t i_gid;
+ int err;
+
++ inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
++ if (inode->i_ino == 0)
++ return -EINVAL;
++
+ err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
+ if (err)
+ return err;
+@@ -58,10 +62,9 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
+
+ i_uid_write(inode, i_uid);
+ i_gid_write(inode, i_gid);
+- inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+- inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
+- inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
+- inode_set_ctime(inode, inode->i_mtime.tv_sec, 0);
++ inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0);
++ inode_set_atime(inode, inode_get_mtime_sec(inode), 0);
++ inode_set_ctime(inode, inode_get_mtime_sec(inode), 0);
+ inode->i_mode = le16_to_cpu(sqsh_ino->mode);
+ inode->i_size = 0;
+
+@@ -276,8 +279,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ if (err < 0)
+ goto failed_read;
+
+- set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
++ if (inode->i_size > PAGE_SIZE) {
++ ERROR("Corrupted symlink\n");
++ return -EINVAL;
++ }
++
++ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ inode->i_op = &squashfs_symlink_inode_ops;
+ inode_nohighmem(inode);
+ inode->i_data.a_ops = &squashfs_symlink_aops;
+diff --git a/fs/stat.c b/fs/stat.c
+index d43a5cc1bfa46b..5375be5f97ccfd 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -133,7 +133,8 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
+ idmap = mnt_idmap(path->mnt);
+ if (inode->i_op->getattr)
+ return inode->i_op->getattr(idmap, path, stat,
+- request_mask, query_flags);
++ request_mask,
++ query_flags | AT_GETATTR_NOSEC);
+
+ generic_fillattr(idmap, request_mask, inode, stat);
+ return 0;
+@@ -166,6 +167,9 @@ int vfs_getattr(const struct path *path, struct kstat *stat,
+ {
+ int retval;
+
++ if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC))
++ return -EPERM;
++
+ retval = security_inode_getattr(path);
+ if (retval)
+ return retval;
+diff --git a/fs/super.c b/fs/super.c
+index 2d762ce67f6e6c..b142e71eb8dfdd 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -781,6 +781,17 @@ struct super_block *sget_fc(struct fs_context *fc,
+ struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
+ int err;
+
++ /*
++ * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
++ * not set, as the filesystem is likely unprepared to handle it.
++ * This can happen when fsconfig() is called from init_user_ns with
++ * an fs_fd opened in another user namespace.
++ */
++ if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
++ errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
++ return ERR_PTR(-EPERM);
++ }
++
+ retry:
+ spin_lock(&sb_lock);
+ if (test) {
+@@ -1479,14 +1490,16 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
+ struct fs_context *fc)
+ {
+ blk_mode_t mode = sb_open_mode(sb_flags);
++ struct bdev_handle *bdev_handle;
+ struct block_device *bdev;
+
+- bdev = blkdev_get_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
+- if (IS_ERR(bdev)) {
++ bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
++ if (IS_ERR(bdev_handle)) {
+ if (fc)
+ errorf(fc, "%s: Can't open blockdev", fc->source);
+- return PTR_ERR(bdev);
++ return PTR_ERR(bdev_handle);
+ }
++ bdev = bdev_handle->bdev;
+
+ /*
+ * This really should be in blkdev_get_by_dev, but right now can't due
+@@ -1494,7 +1507,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
+ * writable from userspace even for a read-only block device.
+ */
+ if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
+- blkdev_put(bdev, sb);
++ bdev_release(bdev_handle);
+ return -EACCES;
+ }
+
+@@ -1510,10 +1523,11 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ if (fc)
+ warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
+- blkdev_put(bdev, sb);
++ bdev_release(bdev_handle);
+ return -EBUSY;
+ }
+ spin_lock(&sb_lock);
++ sb->s_bdev_handle = bdev_handle;
+ sb->s_bdev = bdev;
+ sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
+ if (bdev_stable_writes(bdev))
+@@ -1646,7 +1660,7 @@ void kill_block_super(struct super_block *sb)
+ generic_shutdown_super(sb);
+ if (bdev) {
+ sync_blockdev(bdev);
+- blkdev_put(bdev, sb);
++ bdev_release(sb->s_bdev_handle);
+ }
+ }
+
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index a12ac0356c69cd..f21e73d107249d 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -450,6 +450,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ kn = kernfs_find_and_get(kobj->sd, attr->name);
+ if (kn)
+ kernfs_break_active_protection(kn);
++ else
++ kobject_put(kobj);
+ return kn;
+ }
+ EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
+index edb94e55de8e5d..7b2a07a31e463c 100644
+--- a/fs/sysv/itree.c
++++ b/fs/sysv/itree.c
+@@ -82,9 +82,6 @@ static inline sysv_zone_t *block_end(struct buffer_head *bh)
+ return (sysv_zone_t*)((char*)bh->b_data + bh->b_size);
+ }
+
+-/*
+- * Requires read_lock(&pointers_lock) or write_lock(&pointers_lock)
+- */
+ static Indirect *get_branch(struct inode *inode,
+ int depth,
+ int offsets[],
+@@ -104,15 +101,18 @@ static Indirect *get_branch(struct inode *inode,
+ bh = sb_bread(sb, block);
+ if (!bh)
+ goto failure;
++ read_lock(&pointers_lock);
+ if (!verify_chain(chain, p))
+ goto changed;
+ add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
++ read_unlock(&pointers_lock);
+ if (!p->key)
+ goto no_block;
+ }
+ return NULL;
+
+ changed:
++ read_unlock(&pointers_lock);
+ brelse(bh);
+ *err = -EAGAIN;
+ goto no_block;
+@@ -218,9 +218,7 @@ static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *b
+ goto out;
+
+ reread:
+- read_lock(&pointers_lock);
+ partial = get_branch(inode, depth, offsets, chain, &err);
+- read_unlock(&pointers_lock);
+
+ /* Simplest case - block found, no allocation needed */
+ if (!partial) {
+@@ -290,9 +288,9 @@ static Indirect *find_shared(struct inode *inode,
+ *top = 0;
+ for (k = depth; k > 1 && !offsets[k-1]; k--)
+ ;
++ partial = get_branch(inode, k, offsets, chain, &err);
+
+ write_lock(&pointers_lock);
+- partial = get_branch(inode, k, offsets, chain, &err);
+ if (!partial)
+ partial = chain + k-1;
+ /*
+diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
+index 8c8d64e76103e1..73a08db3b7a851 100644
+--- a/fs/tracefs/event_inode.c
++++ b/fs/tracefs/event_inode.c
+@@ -2,8 +2,9 @@
+ /*
+ * event_inode.c - part of tracefs, a pseudo file system for activating tracing
+ *
+- * Copyright (C) 2020-23 VMware Inc, author: Steven Rostedt (VMware) <rostedt@goodmis.org>
++ * Copyright (C) 2020-23 VMware Inc, author: Steven Rostedt <rostedt@goodmis.org>
+ * Copyright (C) 2020-23 VMware Inc, author: Ajay Kaher <akaher@vmware.com>
++ * Copyright (C) 2023 Google, author: Steven Rostedt <rostedt@goodmis.org>
+ *
+ * eventfs is used to dynamically create inodes and dentries based on the
+ * meta data provided by the tracing system.
+@@ -23,779 +24,903 @@
+ #include <linux/delay.h>
+ #include "internal.h"
+
+-struct eventfs_inode {
+- struct list_head e_top_files;
++/*
++ * eventfs_mutex protects the eventfs_inode (ei) dentry. Any access
++ * to the ei->dentry must be done under this mutex and after checking
++ * if ei->is_freed is not set. When ei->is_freed is set, the dentry
++ * is on its way to being freed after the last dput() is made on it.
++ */
++static DEFINE_MUTEX(eventfs_mutex);
++
++/* Choose something "unique" ;-) */
++#define EVENTFS_FILE_INODE_INO 0x12c4e37
++
++struct eventfs_root_inode {
++ struct eventfs_inode ei;
++ struct inode *parent_inode;
++ struct dentry *events_dir;
+ };
+
++static struct eventfs_root_inode *get_root_inode(struct eventfs_inode *ei)
++{
++ WARN_ON_ONCE(!ei->is_events);
++ return container_of(ei, struct eventfs_root_inode, ei);
++}
++
++/* Just try to make something consistent and unique */
++static int eventfs_dir_ino(struct eventfs_inode *ei)
++{
++ if (!ei->ino) {
++ ei->ino = get_next_ino();
++ /* Must not have the file inode number */
++ if (ei->ino == EVENTFS_FILE_INODE_INO)
++ ei->ino = get_next_ino();
++ }
++
++ return ei->ino;
++}
++
+ /*
+- * struct eventfs_file - hold the properties of the eventfs files and
+- * directories.
+- * @name: the name of the file or directory to create
+- * @d_parent: holds parent's dentry
+- * @dentry: once accessed holds dentry
+- * @list: file or directory to be added to parent directory
+- * @ei: list of files and directories within directory
+- * @fop: file_operations for file or directory
+- * @iop: inode_operations for file or directory
+- * @data: something that the caller will want to get to later on
+- * @mode: the permission that the file or directory should have
++ * The eventfs_inode (ei) itself is protected by SRCU. It is released from
++ * its parent's list and will have is_freed set (under eventfs_mutex).
++ * After the SRCU grace period is over and the last dput() is called
++ * the ei is freed.
+ */
+-struct eventfs_file {
+- const char *name;
+- struct dentry *d_parent;
+- struct dentry *dentry;
+- struct list_head list;
+- struct eventfs_inode *ei;
+- const struct file_operations *fop;
+- const struct inode_operations *iop;
+- /*
+- * Union - used for deletion
+- * @del_list: list of eventfs_file to delete
+- * @rcu: eventfs_file to delete in RCU
+- * @is_freed: node is freed if one of the above is set
+- */
+- union {
+- struct list_head del_list;
+- struct rcu_head rcu;
+- unsigned long is_freed;
+- };
+- void *data;
+- umode_t mode;
++DEFINE_STATIC_SRCU(eventfs_srcu);
++
++/* Mode is unsigned short, use the upper bits for flags */
++enum {
++ EVENTFS_SAVE_MODE = BIT(16),
++ EVENTFS_SAVE_UID = BIT(17),
++ EVENTFS_SAVE_GID = BIT(18),
+ };
+
+-static DEFINE_MUTEX(eventfs_mutex);
+-DEFINE_STATIC_SRCU(eventfs_srcu);
++#define EVENTFS_MODE_MASK (EVENTFS_SAVE_MODE - 1)
++
++static void free_ei_rcu(struct rcu_head *rcu)
++{
++ struct eventfs_inode *ei = container_of(rcu, struct eventfs_inode, rcu);
++ struct eventfs_root_inode *rei;
++
++ kfree(ei->entry_attrs);
++ kfree_const(ei->name);
++ if (ei->is_events) {
++ rei = get_root_inode(ei);
++ kfree(rei);
++ } else {
++ kfree(ei);
++ }
++}
++
++/*
++ * eventfs_inode reference count management.
++ *
++ * NOTE! We count only references from dentries, in the
++ * form 'dentry->d_fsdata'. There are also references from
++ * directory inodes ('ti->private'), but the dentry reference
++ * count is always a superset of the inode reference count.
++ */
++static void release_ei(struct kref *ref)
++{
++ struct eventfs_inode *ei = container_of(ref, struct eventfs_inode, kref);
++ const struct eventfs_entry *entry;
++
++ WARN_ON_ONCE(!ei->is_freed);
++
++ for (int i = 0; i < ei->nr_entries; i++) {
++ entry = &ei->entries[i];
++ if (entry->release)
++ entry->release(entry->name, ei->data);
++ }
++
++ call_srcu(&eventfs_srcu, &ei->rcu, free_ei_rcu);
++}
++
++static inline void put_ei(struct eventfs_inode *ei)
++{
++ if (ei)
++ kref_put(&ei->kref, release_ei);
++}
++
++static inline void free_ei(struct eventfs_inode *ei)
++{
++ if (ei) {
++ ei->is_freed = 1;
++ put_ei(ei);
++ }
++}
++
++/*
++ * Called when creation of an ei fails, do not call release() functions.
++ */
++static inline void cleanup_ei(struct eventfs_inode *ei)
++{
++ if (ei) {
++ /* Set nr_entries to 0 to prevent release() function being called */
++ ei->nr_entries = 0;
++ free_ei(ei);
++ }
++}
++
++static inline struct eventfs_inode *get_ei(struct eventfs_inode *ei)
++{
++ if (ei)
++ kref_get(&ei->kref);
++ return ei;
++}
+
+ static struct dentry *eventfs_root_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags);
+-static int dcache_dir_open_wrapper(struct inode *inode, struct file *file);
+-static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx);
+-static int eventfs_release(struct inode *inode, struct file *file);
++static int eventfs_iterate(struct file *file, struct dir_context *ctx);
++
++static void update_attr(struct eventfs_attr *attr, struct iattr *iattr)
++{
++ unsigned int ia_valid = iattr->ia_valid;
++
++ if (ia_valid & ATTR_MODE) {
++ attr->mode = (attr->mode & ~EVENTFS_MODE_MASK) |
++ (iattr->ia_mode & EVENTFS_MODE_MASK) |
++ EVENTFS_SAVE_MODE;
++ }
++ if (ia_valid & ATTR_UID) {
++ attr->mode |= EVENTFS_SAVE_UID;
++ attr->uid = iattr->ia_uid;
++ }
++ if (ia_valid & ATTR_GID) {
++ attr->mode |= EVENTFS_SAVE_GID;
++ attr->gid = iattr->ia_gid;
++ }
++}
+
+-static const struct inode_operations eventfs_root_dir_inode_operations = {
++static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
++ struct iattr *iattr)
++{
++ const struct eventfs_entry *entry;
++ struct eventfs_inode *ei;
++ const char *name;
++ int ret;
++
++ mutex_lock(&eventfs_mutex);
++ ei = dentry->d_fsdata;
++ if (ei->is_freed) {
++ /* Do not allow changes if the event is about to be removed. */
++ mutex_unlock(&eventfs_mutex);
++ return -ENODEV;
++ }
++
++ /* Preallocate the children mode array if necessary */
++ if (!(dentry->d_inode->i_mode & S_IFDIR)) {
++ if (!ei->entry_attrs) {
++ ei->entry_attrs = kcalloc(ei->nr_entries, sizeof(*ei->entry_attrs),
++ GFP_NOFS);
++ if (!ei->entry_attrs) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ }
++ }
++
++ ret = simple_setattr(idmap, dentry, iattr);
++ if (ret < 0)
++ goto out;
++
++ /*
++ * If this is a dir, then update the ei cache, only the file
++ * mode is saved in the ei->m_children, and the ownership is
++ * determined by the parent directory.
++ */
++ if (dentry->d_inode->i_mode & S_IFDIR) {
++ update_attr(&ei->attr, iattr);
++
++ } else {
++ name = dentry->d_name.name;
++
++ for (int i = 0; i < ei->nr_entries; i++) {
++ entry = &ei->entries[i];
++ if (strcmp(name, entry->name) == 0) {
++ update_attr(&ei->entry_attrs[i], iattr);
++ break;
++ }
++ }
++ }
++ out:
++ mutex_unlock(&eventfs_mutex);
++ return ret;
++}
++
++static void update_events_attr(struct eventfs_inode *ei, struct super_block *sb)
++{
++ struct eventfs_root_inode *rei;
++ struct inode *parent;
++
++ rei = get_root_inode(ei);
++
++ /* Use the parent inode permissions unless root set its permissions */
++ parent = rei->parent_inode;
++
++ if (rei->ei.attr.mode & EVENTFS_SAVE_UID)
++ ei->attr.uid = rei->ei.attr.uid;
++ else
++ ei->attr.uid = parent->i_uid;
++
++ if (rei->ei.attr.mode & EVENTFS_SAVE_GID)
++ ei->attr.gid = rei->ei.attr.gid;
++ else
++ ei->attr.gid = parent->i_gid;
++}
++
++static void set_top_events_ownership(struct inode *inode)
++{
++ struct tracefs_inode *ti = get_tracefs(inode);
++ struct eventfs_inode *ei = ti->private;
++
++ /* The top events directory doesn't get automatically updated */
++ if (!ei || !ei->is_events)
++ return;
++
++ update_events_attr(ei, inode->i_sb);
++
++ if (!(ei->attr.mode & EVENTFS_SAVE_UID))
++ inode->i_uid = ei->attr.uid;
++
++ if (!(ei->attr.mode & EVENTFS_SAVE_GID))
++ inode->i_gid = ei->attr.gid;
++}
++
++static int eventfs_get_attr(struct mnt_idmap *idmap,
++ const struct path *path, struct kstat *stat,
++ u32 request_mask, unsigned int flags)
++{
++ struct dentry *dentry = path->dentry;
++ struct inode *inode = d_backing_inode(dentry);
++
++ set_top_events_ownership(inode);
++
++ generic_fillattr(idmap, request_mask, inode, stat);
++ return 0;
++}
++
++static int eventfs_permission(struct mnt_idmap *idmap,
++ struct inode *inode, int mask)
++{
++ set_top_events_ownership(inode);
++ return generic_permission(idmap, inode, mask);
++}
++
++static const struct inode_operations eventfs_dir_inode_operations = {
+ .lookup = eventfs_root_lookup,
++ .setattr = eventfs_set_attr,
++ .getattr = eventfs_get_attr,
++ .permission = eventfs_permission,
++};
++
++static const struct inode_operations eventfs_file_inode_operations = {
++ .setattr = eventfs_set_attr,
+ };
+
+ static const struct file_operations eventfs_file_operations = {
+- .open = dcache_dir_open_wrapper,
+ .read = generic_read_dir,
+- .iterate_shared = dcache_readdir_wrapper,
++ .iterate_shared = eventfs_iterate,
+ .llseek = generic_file_llseek,
+- .release = eventfs_release,
+ };
+
++static void eventfs_set_attrs(struct eventfs_inode *ei, bool update_uid, kuid_t uid,
++ bool update_gid, kgid_t gid, int level)
++{
++ struct eventfs_inode *ei_child;
++
++ /* Update events/<system>/<event> */
++ if (WARN_ON_ONCE(level > 3))
++ return;
++
++ if (update_uid) {
++ ei->attr.mode &= ~EVENTFS_SAVE_UID;
++ ei->attr.uid = uid;
++ }
++
++ if (update_gid) {
++ ei->attr.mode &= ~EVENTFS_SAVE_GID;
++ ei->attr.gid = gid;
++ }
++
++ list_for_each_entry(ei_child, &ei->children, list) {
++ eventfs_set_attrs(ei_child, update_uid, uid, update_gid, gid, level + 1);
++ }
++
++ if (!ei->entry_attrs)
++ return;
++
++ for (int i = 0; i < ei->nr_entries; i++) {
++ if (update_uid) {
++ ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_UID;
++ ei->entry_attrs[i].uid = uid;
++ }
++ if (update_gid) {
++ ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_GID;
++ ei->entry_attrs[i].gid = gid;
++ }
++ }
++
++}
++
++/*
++ * On a remount of tracefs, if UID or GID options are set, then
++ * the mount point inode permissions should be used.
++ * Reset the saved permission flags appropriately.
++ */
++void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid)
++{
++ struct eventfs_inode *ei = ti->private;
++
++ /* Only the events directory does the updates */
++ if (!ei || !ei->is_events || ei->is_freed)
++ return;
++
++ eventfs_set_attrs(ei, update_uid, ti->vfs_inode.i_uid,
++ update_gid, ti->vfs_inode.i_gid, 0);
++}
++
++/* Return the evenfs_inode of the "events" directory */
++static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
++{
++ struct eventfs_inode *ei;
++
++ do {
++ // The parent is stable because we do not do renames
++ dentry = dentry->d_parent;
++ // ... and directories always have d_fsdata
++ ei = dentry->d_fsdata;
++
++ /*
++ * If the ei is being freed, the ownership of the children
++ * doesn't matter.
++ */
++ if (ei->is_freed)
++ return NULL;
++
++ // Walk upwards until you find the events inode
++ } while (!ei->is_events);
++
++ update_events_attr(ei, dentry->d_sb);
++
++ return ei;
++}
++
++static void update_inode_attr(struct dentry *dentry, struct inode *inode,
++ struct eventfs_attr *attr, umode_t mode)
++{
++ struct eventfs_inode *events_ei = eventfs_find_events(dentry);
++
++ if (!events_ei)
++ return;
++
++ inode->i_mode = mode;
++ inode->i_uid = events_ei->attr.uid;
++ inode->i_gid = events_ei->attr.gid;
++
++ if (!attr)
++ return;
++
++ if (attr->mode & EVENTFS_SAVE_MODE)
++ inode->i_mode = attr->mode & EVENTFS_MODE_MASK;
++
++ if (attr->mode & EVENTFS_SAVE_UID)
++ inode->i_uid = attr->uid;
++
++ if (attr->mode & EVENTFS_SAVE_GID)
++ inode->i_gid = attr->gid;
++}
++
+ /**
+- * create_file - create a file in the tracefs filesystem
+- * @name: the name of the file to create.
++ * lookup_file - look up a file in the tracefs filesystem
++ * @dentry: the dentry to look up
+ * @mode: the permission that the file should have.
+- * @parent: parent dentry for this file.
++ * @attr: saved attributes changed by user
+ * @data: something that the caller will want to get to later on.
+ * @fop: struct file_operations that should be used for this file.
+ *
+- * This is the basic "create a file" function for tracefs. It allows for a
+- * wide range of flexibility in creating a file.
+- *
+- * This function will return a pointer to a dentry if it succeeds. This
+- * pointer must be passed to the tracefs_remove() function when the file is
+- * to be removed (no automatic cleanup happens if your module is unloaded,
+- * you are responsible here.) If an error occurs, %NULL will be returned.
+- *
+- * If tracefs is not enabled in the kernel, the value -%ENODEV will be
+- * returned.
++ * This function creates a dentry that represents a file in the eventsfs_inode
++ * directory. The inode.i_private pointer will point to @data in the open()
++ * call.
+ */
+-static struct dentry *create_file(const char *name, umode_t mode,
+- struct dentry *parent, void *data,
++static struct dentry *lookup_file(struct eventfs_inode *parent_ei,
++ struct dentry *dentry,
++ umode_t mode,
++ struct eventfs_attr *attr,
++ void *data,
+ const struct file_operations *fop)
+ {
+ struct tracefs_inode *ti;
+- struct dentry *dentry;
+ struct inode *inode;
+
+ if (!(mode & S_IFMT))
+ mode |= S_IFREG;
+
+ if (WARN_ON_ONCE(!S_ISREG(mode)))
+- return NULL;
+-
+- dentry = eventfs_start_creating(name, parent);
+-
+- if (IS_ERR(dentry))
+- return dentry;
++ return ERR_PTR(-EIO);
+
+ inode = tracefs_get_inode(dentry->d_sb);
+ if (unlikely(!inode))
+- return eventfs_failed_creating(dentry);
++ return ERR_PTR(-ENOMEM);
+
+- inode->i_mode = mode;
++ /* If the user updated the directory's attributes, use them */
++ update_inode_attr(dentry, inode, attr, mode);
++
++ inode->i_op = &eventfs_file_inode_operations;
+ inode->i_fop = fop;
+ inode->i_private = data;
+
++ /* All files will have the same inode number */
++ inode->i_ino = EVENTFS_FILE_INODE_INO;
++
+ ti = get_tracefs(inode);
+ ti->flags |= TRACEFS_EVENT_INODE;
+- d_instantiate(dentry, inode);
+- fsnotify_create(dentry->d_parent->d_inode, dentry);
+- return eventfs_end_creating(dentry);
++
++ // Files have their parent's ei as their fsdata
++ dentry->d_fsdata = get_ei(parent_ei);
++
++ d_add(dentry, inode);
++ return NULL;
+ };
+
+ /**
+- * create_dir - create a dir in the tracefs filesystem
+- * @name: the name of the file to create.
+- * @parent: parent dentry for this file.
+- * @data: something that the caller will want to get to later on.
+- *
+- * This is the basic "create a dir" function for eventfs. It allows for a
+- * wide range of flexibility in creating a dir.
++ * lookup_dir_entry - look up a dir in the tracefs filesystem
++ * @dentry: the directory to look up
++ * @ei: the eventfs_inode that represents the directory to create
+ *
+- * This function will return a pointer to a dentry if it succeeds. This
+- * pointer must be passed to the tracefs_remove() function when the file is
+- * to be removed (no automatic cleanup happens if your module is unloaded,
+- * you are responsible here.) If an error occurs, %NULL will be returned.
+- *
+- * If tracefs is not enabled in the kernel, the value -%ENODEV will be
+- * returned.
++ * This function will look up a dentry for a directory represented by
++ * a eventfs_inode.
+ */
+-static struct dentry *create_dir(const char *name, struct dentry *parent, void *data)
++static struct dentry *lookup_dir_entry(struct dentry *dentry,
++ struct eventfs_inode *pei, struct eventfs_inode *ei)
+ {
+ struct tracefs_inode *ti;
+- struct dentry *dentry;
+ struct inode *inode;
+
+- dentry = eventfs_start_creating(name, parent);
+- if (IS_ERR(dentry))
+- return dentry;
+-
+ inode = tracefs_get_inode(dentry->d_sb);
+ if (unlikely(!inode))
+- return eventfs_failed_creating(dentry);
++ return ERR_PTR(-ENOMEM);
+
+- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+- inode->i_op = &eventfs_root_dir_inode_operations;
++ /* If the user updated the directory's attributes, use them */
++ update_inode_attr(dentry, inode, &ei->attr,
++ S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
++
++ inode->i_op = &eventfs_dir_inode_operations;
+ inode->i_fop = &eventfs_file_operations;
+- inode->i_private = data;
++
++ /* All directories will have the same inode number */
++ inode->i_ino = eventfs_dir_ino(ei);
+
+ ti = get_tracefs(inode);
+ ti->flags |= TRACEFS_EVENT_INODE;
++ /* Only directories have ti->private set to an ei, not files */
++ ti->private = ei;
+
+- inc_nlink(inode);
+- d_instantiate(dentry, inode);
+- inc_nlink(dentry->d_parent->d_inode);
+- fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
+- return eventfs_end_creating(dentry);
++ dentry->d_fsdata = get_ei(ei);
++
++ d_add(dentry, inode);
++ return NULL;
+ }
+
+-/**
+- * eventfs_set_ef_status_free - set the ef->status to free
+- * @ti: the tracefs_inode of the dentry
+- * @dentry: dentry who's status to be freed
+- *
+- * eventfs_set_ef_status_free will be called if no more
+- * references remain
+- */
+-void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry)
++static inline struct eventfs_inode *init_ei(struct eventfs_inode *ei, const char *name)
+ {
+- struct tracefs_inode *ti_parent;
+- struct eventfs_inode *ei;
+- struct eventfs_file *ef, *tmp;
+-
+- /* The top level events directory may be freed by this */
+- if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) {
+- LIST_HEAD(ef_del_list);
+-
+- mutex_lock(&eventfs_mutex);
+-
+- ei = ti->private;
+-
+- /* Record all the top level files */
+- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+- lockdep_is_held(&eventfs_mutex)) {
+- list_add_tail(&ef->del_list, &ef_del_list);
+- }
+-
+- /* Nothing should access this, but just in case! */
+- ti->private = NULL;
++ ei->name = kstrdup_const(name, GFP_KERNEL);
++ if (!ei->name)
++ return NULL;
++ kref_init(&ei->kref);
++ return ei;
++}
+
+- mutex_unlock(&eventfs_mutex);
++static inline struct eventfs_inode *alloc_ei(const char *name)
++{
++ struct eventfs_inode *ei = kzalloc(sizeof(*ei), GFP_KERNEL);
++ struct eventfs_inode *result;
+
+- /* Now safely free the top level files and their children */
+- list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
+- list_del(&ef->del_list);
+- eventfs_remove(ef);
+- }
++ if (!ei)
++ return NULL;
+
++ result = init_ei(ei, name);
++ if (!result)
+ kfree(ei);
+- return;
+- }
+
+- mutex_lock(&eventfs_mutex);
++ return result;
++}
+
+- ti_parent = get_tracefs(dentry->d_parent->d_inode);
+- if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
+- goto out;
++static inline struct eventfs_inode *alloc_root_ei(const char *name)
++{
++ struct eventfs_root_inode *rei = kzalloc(sizeof(*rei), GFP_KERNEL);
++ struct eventfs_inode *ei;
+
+- ef = dentry->d_fsdata;
+- if (!ef)
+- goto out;
++ if (!rei)
++ return NULL;
+
+- /*
+- * If ef was freed, then the LSB bit is set for d_fsdata.
+- * But this should not happen, as it should still have a
+- * ref count that prevents it. Warn in case it does.
+- */
+- if (WARN_ON_ONCE((unsigned long)ef & 1))
+- goto out;
++ rei->ei.is_events = 1;
++ ei = init_ei(&rei->ei, name);
++ if (!ei)
++ kfree(rei);
+
+- dentry->d_fsdata = NULL;
+- ef->dentry = NULL;
+-out:
+- mutex_unlock(&eventfs_mutex);
++ return ei;
+ }
+
+ /**
+- * eventfs_post_create_dir - post create dir routine
+- * @ef: eventfs_file of recently created dir
++ * eventfs_d_release - dentry is going away
++ * @dentry: dentry which has the reference to remove.
+ *
+- * Map the meta-data of files within an eventfs dir to their parent dentry
++ * Remove the association between a dentry from an eventfs_inode.
+ */
+-static void eventfs_post_create_dir(struct eventfs_file *ef)
++void eventfs_d_release(struct dentry *dentry)
+ {
+- struct eventfs_file *ef_child;
+- struct tracefs_inode *ti;
+-
+- /* srcu lock already held */
+- /* fill parent-child relation */
+- list_for_each_entry_srcu(ef_child, &ef->ei->e_top_files, list,
+- srcu_read_lock_held(&eventfs_srcu)) {
+- ef_child->d_parent = ef->dentry;
+- }
+-
+- ti = get_tracefs(ef->dentry->d_inode);
+- ti->private = ef->ei;
++ put_ei(dentry->d_fsdata);
+ }
+
+ /**
+- * create_dentry - helper function to create dentry
+- * @ef: eventfs_file of file or directory to create
+- * @parent: parent dentry
+- * @lookup: true if called from lookup routine
++ * lookup_file_dentry - create a dentry for a file of an eventfs_inode
++ * @ei: the eventfs_inode that the file will be created under
++ * @idx: the index into the entry_attrs[] of the @ei
++ * @parent: The parent dentry of the created file.
++ * @name: The name of the file to create
++ * @mode: The mode of the file.
++ * @data: The data to use to set the inode of the file with on open()
++ * @fops: The fops of the file to be created.
+ *
+- * Used to create a dentry for file/dir, executes post dentry creation routine
++ * Create a dentry for a file of an eventfs_inode @ei and place it into the
++ * address located at @e_dentry.
+ */
+ static struct dentry *
+-create_dentry(struct eventfs_file *ef, struct dentry *parent, bool lookup)
++lookup_file_dentry(struct dentry *dentry,
++ struct eventfs_inode *ei, int idx,
++ umode_t mode, void *data,
++ const struct file_operations *fops)
+ {
+- bool invalidate = false;
+- struct dentry *dentry;
++ struct eventfs_attr *attr = NULL;
+
+- mutex_lock(&eventfs_mutex);
+- if (ef->is_freed) {
+- mutex_unlock(&eventfs_mutex);
+- return NULL;
+- }
+- if (ef->dentry) {
+- dentry = ef->dentry;
+- /* On dir open, up the ref count */
+- if (!lookup)
+- dget(dentry);
+- mutex_unlock(&eventfs_mutex);
+- return dentry;
+- }
+- mutex_unlock(&eventfs_mutex);
+-
+- if (!lookup)
+- inode_lock(parent->d_inode);
+-
+- if (ef->ei)
+- dentry = create_dir(ef->name, parent, ef->data);
+- else
+- dentry = create_file(ef->name, ef->mode, parent,
+- ef->data, ef->fop);
+-
+- if (!lookup)
+- inode_unlock(parent->d_inode);
+-
+- mutex_lock(&eventfs_mutex);
+- if (IS_ERR_OR_NULL(dentry)) {
+- /* If the ef was already updated get it */
+- dentry = ef->dentry;
+- if (dentry && !lookup)
+- dget(dentry);
+- mutex_unlock(&eventfs_mutex);
+- return dentry;
+- }
+-
+- if (!ef->dentry && !ef->is_freed) {
+- ef->dentry = dentry;
+- if (ef->ei)
+- eventfs_post_create_dir(ef);
+- dentry->d_fsdata = ef;
+- } else {
+- /* A race here, should try again (unless freed) */
+- invalidate = true;
++ if (ei->entry_attrs)
++ attr = &ei->entry_attrs[idx];
+
+- /*
+- * Should never happen unless we get here due to being freed.
+- * Otherwise it means two dentries exist with the same name.
+- */
+- WARN_ON_ONCE(!ef->is_freed);
+- }
+- mutex_unlock(&eventfs_mutex);
+- if (invalidate)
+- d_invalidate(dentry);
+-
+- if (lookup || invalidate)
+- dput(dentry);
+-
+- return invalidate ? NULL : dentry;
+-}
+-
+-static bool match_event_file(struct eventfs_file *ef, const char *name)
+-{
+- bool ret;
+-
+- mutex_lock(&eventfs_mutex);
+- ret = !ef->is_freed && strcmp(ef->name, name) == 0;
+- mutex_unlock(&eventfs_mutex);
+-
+- return ret;
++ return lookup_file(ei, dentry, mode, attr, data, fops);
+ }
+
+ /**
+ * eventfs_root_lookup - lookup routine to create file/dir
+ * @dir: in which a lookup is being done
+ * @dentry: file/dir dentry
+- * @flags: to pass as flags parameter to simple lookup
++ * @flags: Just passed to simple_lookup()
+ *
+- * Used to create a dynamic file/dir within @dir. Use the eventfs_inode
+- * list of meta data to find the information needed to create the file/dir.
++ * Used to create dynamic file/dir with-in @dir, search with-in @ei
++ * list, if @dentry found go ahead and create the file/dir
+ */
++
+ static struct dentry *eventfs_root_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags)
+ {
++ struct eventfs_inode *ei_child;
+ struct tracefs_inode *ti;
+ struct eventfs_inode *ei;
+- struct eventfs_file *ef;
+- struct dentry *ret = NULL;
+- int idx;
++ const char *name = dentry->d_name.name;
++ struct dentry *result = NULL;
+
+ ti = get_tracefs(dir);
+ if (!(ti->flags & TRACEFS_EVENT_INODE))
+- return NULL;
++ return ERR_PTR(-EIO);
++
++ mutex_lock(&eventfs_mutex);
+
+ ei = ti->private;
+- idx = srcu_read_lock(&eventfs_srcu);
+- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+- srcu_read_lock_held(&eventfs_srcu)) {
+- if (!match_event_file(ef, dentry->d_name.name))
++ if (!ei || ei->is_freed)
++ goto out;
++
++ list_for_each_entry(ei_child, &ei->children, list) {
++ if (strcmp(ei_child->name, name) != 0)
+ continue;
+- ret = simple_lookup(dir, dentry, flags);
+- create_dentry(ef, ef->d_parent, true);
+- break;
++ if (ei_child->is_freed)
++ goto out;
++ result = lookup_dir_entry(dentry, ei, ei_child);
++ goto out;
+ }
+- srcu_read_unlock(&eventfs_srcu, idx);
+- return ret;
+-}
+
+-struct dentry_list {
+- void *cursor;
+- struct dentry **dentries;
+-};
++ for (int i = 0; i < ei->nr_entries; i++) {
++ void *data;
++ umode_t mode;
++ const struct file_operations *fops;
++ const struct eventfs_entry *entry = &ei->entries[i];
+
+-/**
+- * eventfs_release - called to release eventfs file/dir
+- * @inode: inode to be released
+- * @file: file to be released (not used)
+- */
+-static int eventfs_release(struct inode *inode, struct file *file)
+-{
+- struct tracefs_inode *ti;
+- struct dentry_list *dlist = file->private_data;
+- void *cursor;
+- int i;
+-
+- ti = get_tracefs(inode);
+- if (!(ti->flags & TRACEFS_EVENT_INODE))
+- return -EINVAL;
++ if (strcmp(name, entry->name) != 0)
++ continue;
+
+- if (WARN_ON_ONCE(!dlist))
+- return -EINVAL;
++ data = ei->data;
++ if (entry->callback(name, &mode, &data, &fops) <= 0)
++ goto out;
+
+- for (i = 0; dlist->dentries && dlist->dentries[i]; i++) {
+- dput(dlist->dentries[i]);
++ result = lookup_file_dentry(dentry, ei, i, mode, data, fops);
++ goto out;
+ }
+-
+- cursor = dlist->cursor;
+- kfree(dlist->dentries);
+- kfree(dlist);
+- file->private_data = cursor;
+- return dcache_dir_close(inode, file);
++ out:
++ mutex_unlock(&eventfs_mutex);
++ return result;
+ }
+
+-/**
+- * dcache_dir_open_wrapper - eventfs open wrapper
+- * @inode: not used
+- * @file: dir to be opened (to create its child)
+- *
+- * Used to dynamically create the file/dir within @file. @file is really a
+- * directory and all the files/dirs of the children within @file will be
+- * created. If any of the files/dirs have already been created, their
+- * reference count will be incremented.
++/*
++ * Walk the children of a eventfs_inode to fill in getdents().
+ */
+-static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
++static int eventfs_iterate(struct file *file, struct dir_context *ctx)
+ {
++ const struct file_operations *fops;
++ struct inode *f_inode = file_inode(file);
++ const struct eventfs_entry *entry;
++ struct eventfs_inode *ei_child;
+ struct tracefs_inode *ti;
+ struct eventfs_inode *ei;
+- struct eventfs_file *ef;
+- struct dentry_list *dlist;
+- struct dentry **dentries = NULL;
+- struct dentry *dentry = file_dentry(file);
+- struct dentry *d;
+- struct inode *f_inode = file_inode(file);
+- int cnt = 0;
++ const char *name;
++ umode_t mode;
+ int idx;
+- int ret;
++ int ret = -EINVAL;
++ int ino;
++ int i, r, c;
++
++ if (!dir_emit_dots(file, ctx))
++ return 0;
+
+ ti = get_tracefs(f_inode);
+ if (!(ti->flags & TRACEFS_EVENT_INODE))
+ return -EINVAL;
+
+- if (WARN_ON_ONCE(file->private_data))
+- return -EINVAL;
+-
+- dlist = kmalloc(sizeof(*dlist), GFP_KERNEL);
+- if (!dlist)
+- return -ENOMEM;
++ c = ctx->pos - 2;
+
+- ei = ti->private;
+ idx = srcu_read_lock(&eventfs_srcu);
+- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+- srcu_read_lock_held(&eventfs_srcu)) {
+- d = create_dentry(ef, dentry, false);
+- if (d) {
+- struct dentry **tmp;
+
+- tmp = krealloc(dentries, sizeof(d) * (cnt + 2), GFP_KERNEL);
+- if (!tmp)
+- break;
+- tmp[cnt] = d;
+- tmp[cnt + 1] = NULL;
+- cnt++;
+- dentries = tmp;
+- }
+- }
+- srcu_read_unlock(&eventfs_srcu, idx);
+- ret = dcache_dir_open(inode, file);
++ mutex_lock(&eventfs_mutex);
++ ei = READ_ONCE(ti->private);
++ if (ei && ei->is_freed)
++ ei = NULL;
++ mutex_unlock(&eventfs_mutex);
++
++ if (!ei)
++ goto out;
+
+ /*
+- * dcache_dir_open() sets file->private_data to a dentry cursor.
+- * Need to save that but also save all the dentries that were
+- * opened by this function.
++ * Need to create the dentries and inodes to have a consistent
++ * inode number.
+ */
+- dlist->cursor = file->private_data;
+- dlist->dentries = dentries;
+- file->private_data = dlist;
+- return ret;
+-}
++ ret = 0;
+
+-/*
+- * This just sets the file->private_data back to the cursor and back.
+- */
+-static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx)
+-{
+- struct dentry_list *dlist = file->private_data;
+- int ret;
++ /* Start at 'c' to jump over already read entries */
++ for (i = c; i < ei->nr_entries; i++, ctx->pos++) {
++ void *cdata = ei->data;
+
+- file->private_data = dlist->cursor;
+- ret = dcache_readdir(file, ctx);
+- dlist->cursor = file->private_data;
+- file->private_data = dlist;
+- return ret;
+-}
++ entry = &ei->entries[i];
++ name = entry->name;
+
+-/**
+- * eventfs_prepare_ef - helper function to prepare eventfs_file
+- * @name: the name of the file/directory to create.
+- * @mode: the permission that the file should have.
+- * @fop: struct file_operations that should be used for this file/directory.
+- * @iop: struct inode_operations that should be used for this file/directory.
+- * @data: something that the caller will want to get to later on. The
+- * inode.i_private pointer will point to this value on the open() call.
+- *
+- * This function allocates and fills the eventfs_file structure.
+- */
+-static struct eventfs_file *eventfs_prepare_ef(const char *name, umode_t mode,
+- const struct file_operations *fop,
+- const struct inode_operations *iop,
+- void *data)
+-{
+- struct eventfs_file *ef;
++ mutex_lock(&eventfs_mutex);
++ /* If ei->is_freed then just bail here, nothing more to do */
++ if (ei->is_freed) {
++ mutex_unlock(&eventfs_mutex);
++ goto out;
++ }
++ r = entry->callback(name, &mode, &cdata, &fops);
++ mutex_unlock(&eventfs_mutex);
++ if (r <= 0)
++ continue;
+
+- ef = kzalloc(sizeof(*ef), GFP_KERNEL);
+- if (!ef)
+- return ERR_PTR(-ENOMEM);
++ ino = EVENTFS_FILE_INODE_INO;
+
+- ef->name = kstrdup(name, GFP_KERNEL);
+- if (!ef->name) {
+- kfree(ef);
+- return ERR_PTR(-ENOMEM);
++ if (!dir_emit(ctx, name, strlen(name), ino, DT_REG))
++ goto out;
+ }
+
+- if (S_ISDIR(mode)) {
+- ef->ei = kzalloc(sizeof(*ef->ei), GFP_KERNEL);
+- if (!ef->ei) {
+- kfree(ef->name);
+- kfree(ef);
+- return ERR_PTR(-ENOMEM);
+- }
+- INIT_LIST_HEAD(&ef->ei->e_top_files);
+- } else {
+- ef->ei = NULL;
+- }
++ /* Subtract the skipped entries above */
++ c -= min((unsigned int)c, (unsigned int)ei->nr_entries);
+
+- ef->iop = iop;
+- ef->fop = fop;
+- ef->mode = mode;
+- ef->data = data;
+- return ef;
+-}
++ list_for_each_entry_srcu(ei_child, &ei->children, list,
++ srcu_read_lock_held(&eventfs_srcu)) {
+
+-/**
+- * eventfs_create_events_dir - create the trace event structure
+- * @name: the name of the directory to create.
+- * @parent: parent dentry for this file. This should be a directory dentry
+- * if set. If this parameter is NULL, then the directory will be
+- * created in the root of the tracefs filesystem.
+- *
+- * This function creates the top of the trace event directory.
+- */
+-struct dentry *eventfs_create_events_dir(const char *name,
+- struct dentry *parent)
+-{
+- struct dentry *dentry = tracefs_start_creating(name, parent);
+- struct eventfs_inode *ei;
+- struct tracefs_inode *ti;
+- struct inode *inode;
++ if (c > 0) {
++ c--;
++ continue;
++ }
+
+- if (security_locked_down(LOCKDOWN_TRACEFS))
+- return NULL;
++ ctx->pos++;
+
+- if (IS_ERR(dentry))
+- return dentry;
++ if (ei_child->is_freed)
++ continue;
+
+- ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+- if (!ei)
+- return ERR_PTR(-ENOMEM);
+- inode = tracefs_get_inode(dentry->d_sb);
+- if (unlikely(!inode)) {
+- kfree(ei);
+- tracefs_failed_creating(dentry);
+- return ERR_PTR(-ENOMEM);
+- }
++ name = ei_child->name;
+
+- INIT_LIST_HEAD(&ei->e_top_files);
++ ino = eventfs_dir_ino(ei_child);
+
+- ti = get_tracefs(inode);
+- ti->flags |= TRACEFS_EVENT_INODE | TRACEFS_EVENT_TOP_INODE;
+- ti->private = ei;
++ if (!dir_emit(ctx, name, strlen(name), ino, DT_DIR))
++ goto out_dec;
++ }
++ ret = 1;
++ out:
++ srcu_read_unlock(&eventfs_srcu, idx);
+
+- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+- inode->i_op = &eventfs_root_dir_inode_operations;
+- inode->i_fop = &eventfs_file_operations;
++ return ret;
+
+- /* directory inodes start off with i_nlink == 2 (for "." entry) */
+- inc_nlink(inode);
+- d_instantiate(dentry, inode);
+- inc_nlink(dentry->d_parent->d_inode);
+- fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
+- return tracefs_end_creating(dentry);
++ out_dec:
++ /* Incremented ctx->pos without adding something, reset it */
++ ctx->pos--;
++ goto out;
+ }
+
+ /**
+- * eventfs_add_subsystem_dir - add eventfs subsystem_dir to list to create later
+- * @name: the name of the file to create.
+- * @parent: parent dentry for this dir.
++ * eventfs_create_dir - Create the eventfs_inode for this directory
++ * @name: The name of the directory to create.
++ * @parent: The eventfs_inode of the parent directory.
++ * @entries: A list of entries that represent the files under this directory
++ * @size: The number of @entries
++ * @data: The default data to pass to the files (an entry may override it).
+ *
+- * This function adds eventfs subsystem dir to list.
+- * And all these dirs are created on the fly when they are looked up,
+- * and the dentry and inodes will be removed when they are done.
++ * This function creates the descriptor to represent a directory in the
++ * eventfs. This descriptor is an eventfs_inode, and it is returned to be
++ * used to create other children underneath.
++ *
++ * The @entries is an array of eventfs_entry structures which has:
++ * const char *name
++ * eventfs_callback callback;
++ *
++ * The name is the name of the file, and the callback is a pointer to a function
++ * that will be called when the file is reference (either by lookup or by
++ * reading a directory). The callback is of the prototype:
++ *
++ * int callback(const char *name, umode_t *mode, void **data,
++ * const struct file_operations **fops);
++ *
++ * When a file needs to be created, this callback will be called with
++ * name = the name of the file being created (so that the same callback
++ * may be used for multiple files).
++ * mode = a place to set the file's mode
++ * data = A pointer to @data, and the callback may replace it, which will
++ * cause the file created to pass the new data to the open() call.
++ * fops = the fops to use for the created file.
++ *
++ * NB. @callback is called while holding internal locks of the eventfs
++ * system. The callback must not call any code that might also call into
++ * the tracefs or eventfs system or it will risk creating a deadlock.
+ */
+-struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
+- struct dentry *parent)
++struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent,
++ const struct eventfs_entry *entries,
++ int size, void *data)
+ {
+- struct tracefs_inode *ti_parent;
+- struct eventfs_inode *ei_parent;
+- struct eventfs_file *ef;
+-
+- if (security_locked_down(LOCKDOWN_TRACEFS))
+- return NULL;
++ struct eventfs_inode *ei;
+
+ if (!parent)
+ return ERR_PTR(-EINVAL);
+
+- ti_parent = get_tracefs(parent->d_inode);
+- ei_parent = ti_parent->private;
++ ei = alloc_ei(name);
++ if (!ei)
++ return ERR_PTR(-ENOMEM);
+
+- ef = eventfs_prepare_ef(name, S_IFDIR, NULL, NULL, NULL);
+- if (IS_ERR(ef))
+- return ef;
++ ei->entries = entries;
++ ei->nr_entries = size;
++ ei->data = data;
++ INIT_LIST_HEAD(&ei->children);
++ INIT_LIST_HEAD(&ei->list);
+
+ mutex_lock(&eventfs_mutex);
+- list_add_tail(&ef->list, &ei_parent->e_top_files);
+- ef->d_parent = parent;
++ if (!parent->is_freed)
++ list_add_tail(&ei->list, &parent->children);
+ mutex_unlock(&eventfs_mutex);
+- return ef;
+-}
+-
+-/**
+- * eventfs_add_dir - add eventfs dir to list to create later
+- * @name: the name of the file to create.
+- * @ef_parent: parent eventfs_file for this dir.
+- *
+- * This function adds eventfs dir to list.
+- * And all these dirs are created on the fly when they are looked up,
+- * and the dentry and inodes will be removed when they are done.
+- */
+-struct eventfs_file *eventfs_add_dir(const char *name,
+- struct eventfs_file *ef_parent)
+-{
+- struct eventfs_file *ef;
+-
+- if (security_locked_down(LOCKDOWN_TRACEFS))
+- return NULL;
+-
+- if (!ef_parent)
+- return ERR_PTR(-EINVAL);
+
+- ef = eventfs_prepare_ef(name, S_IFDIR, NULL, NULL, NULL);
+- if (IS_ERR(ef))
+- return ef;
+-
+- mutex_lock(&eventfs_mutex);
+- list_add_tail(&ef->list, &ef_parent->ei->e_top_files);
+- ef->d_parent = ef_parent->dentry;
+- mutex_unlock(&eventfs_mutex);
+- return ef;
++ /* Was the parent freed? */
++ if (list_empty(&ei->list)) {
++ cleanup_ei(ei);
++ ei = ERR_PTR(-EBUSY);
++ }
++ return ei;
+ }
+
+ /**
+- * eventfs_add_events_file - add the data needed to create a file for later reference
+- * @name: the name of the file to create.
+- * @mode: the permission that the file should have.
+- * @parent: parent dentry for this file.
+- * @data: something that the caller will want to get to later on.
+- * @fop: struct file_operations that should be used for this file.
++ * eventfs_create_events_dir - create the top level events directory
++ * @name: The name of the top level directory to create.
++ * @parent: Parent dentry for this file in the tracefs directory.
++ * @entries: A list of entries that represent the files under this directory
++ * @size: The number of @entries
++ * @data: The default data to pass to the files (an entry may override it).
+ *
+- * This function is used to add the information needed to create a
+- * dentry/inode within the top level events directory. The file created
+- * will have the @mode permissions. The @data will be used to fill the
+- * inode.i_private when the open() call is done. The dentry and inodes are
+- * all created when they are referenced, and removed when they are no
+- * longer referenced.
++ * This function creates the top of the trace event directory.
++ *
++ * See eventfs_create_dir() for use of @entries.
+ */
+-int eventfs_add_events_file(const char *name, umode_t mode,
+- struct dentry *parent, void *data,
+- const struct file_operations *fop)
++struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent,
++ const struct eventfs_entry *entries,
++ int size, void *data)
+ {
+- struct tracefs_inode *ti;
++ struct dentry *dentry = tracefs_start_creating(name, parent);
++ struct eventfs_root_inode *rei;
+ struct eventfs_inode *ei;
+- struct eventfs_file *ef;
++ struct tracefs_inode *ti;
++ struct inode *inode;
++ kuid_t uid;
++ kgid_t gid;
+
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+- return -ENODEV;
++ return NULL;
+
+- if (!parent)
+- return -EINVAL;
++ if (IS_ERR(dentry))
++ return ERR_CAST(dentry);
+
+- if (!(mode & S_IFMT))
+- mode |= S_IFREG;
++ ei = alloc_root_ei(name);
++ if (!ei)
++ goto fail;
+
+- if (!parent->d_inode)
+- return -EINVAL;
++ inode = tracefs_get_inode(dentry->d_sb);
++ if (unlikely(!inode))
++ goto fail;
+
+- ti = get_tracefs(parent->d_inode);
+- if (!(ti->flags & TRACEFS_EVENT_INODE))
+- return -EINVAL;
++ // Note: we have a ref to the dentry from tracefs_start_creating()
++ rei = get_root_inode(ei);
++ rei->events_dir = dentry;
++ rei->parent_inode = d_inode(dentry->d_sb->s_root);
+
+- ei = ti->private;
+- ef = eventfs_prepare_ef(name, mode, fop, NULL, data);
++ ei->entries = entries;
++ ei->nr_entries = size;
++ ei->data = data;
+
+- if (IS_ERR(ef))
+- return -ENOMEM;
++ /* Save the ownership of this directory */
++ uid = d_inode(dentry->d_parent)->i_uid;
++ gid = d_inode(dentry->d_parent)->i_gid;
+
+- mutex_lock(&eventfs_mutex);
+- list_add_tail(&ef->list, &ei->e_top_files);
+- ef->d_parent = parent;
+- mutex_unlock(&eventfs_mutex);
+- return 0;
+-}
++ ei->attr.uid = uid;
++ ei->attr.gid = gid;
+
+-/**
+- * eventfs_add_file - add eventfs file to list to create later
+- * @name: the name of the file to create.
+- * @mode: the permission that the file should have.
+- * @ef_parent: parent eventfs_file for this file.
+- * @data: something that the caller will want to get to later on.
+- * @fop: struct file_operations that should be used for this file.
+- *
+- * This function is used to add the information needed to create a
+- * file within a subdirectory of the events directory. The file created
+- * will have the @mode permissions. The @data will be used to fill the
+- * inode.i_private when the open() call is done. The dentry and inodes are
+- * all created when they are referenced, and removed when they are no
+- * longer referenced.
+- */
+-int eventfs_add_file(const char *name, umode_t mode,
+- struct eventfs_file *ef_parent,
+- void *data,
+- const struct file_operations *fop)
+-{
+- struct eventfs_file *ef;
++ /*
++ * When the "events" directory is created, it takes on the
++ * permissions of its parent. But can be reset on remount.
++ */
++ ei->attr.mode |= EVENTFS_SAVE_UID | EVENTFS_SAVE_GID;
+
+- if (security_locked_down(LOCKDOWN_TRACEFS))
+- return -ENODEV;
++ INIT_LIST_HEAD(&ei->children);
++ INIT_LIST_HEAD(&ei->list);
+
+- if (!ef_parent)
+- return -EINVAL;
++ ti = get_tracefs(inode);
++ ti->flags |= TRACEFS_EVENT_INODE;
++ ti->private = ei;
+
+- if (!(mode & S_IFMT))
+- mode |= S_IFREG;
++ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++ inode->i_uid = uid;
++ inode->i_gid = gid;
++ inode->i_op = &eventfs_dir_inode_operations;
++ inode->i_fop = &eventfs_file_operations;
+
+- ef = eventfs_prepare_ef(name, mode, fop, NULL, data);
+- if (IS_ERR(ef))
+- return -ENOMEM;
++ dentry->d_fsdata = get_ei(ei);
+
+- mutex_lock(&eventfs_mutex);
+- list_add_tail(&ef->list, &ef_parent->ei->e_top_files);
+- ef->d_parent = ef_parent->dentry;
+- mutex_unlock(&eventfs_mutex);
+- return 0;
+-}
++ /*
++ * Keep all eventfs directories with i_nlink == 1.
++ * Due to the dynamic nature of the dentry creations and not
++ * wanting to add a pointer to the parent eventfs_inode in the
++ * eventfs_inode structure, keeping the i_nlink in sync with the
++ * number of directories would cause too much complexity for
++ * something not worth much. Keeping directory links at 1
++ * tells userspace not to trust the link number.
++ */
++ d_instantiate(dentry, inode);
++ /* The dentry of the "events" parent does keep track though */
++ inc_nlink(dentry->d_parent->d_inode);
++ fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
++ tracefs_end_creating(dentry);
+
+-static void free_ef(struct rcu_head *head)
+-{
+- struct eventfs_file *ef = container_of(head, struct eventfs_file, rcu);
++ return ei;
+
+- kfree(ef->name);
+- kfree(ef->ei);
+- kfree(ef);
++ fail:
++ cleanup_ei(ei);
++ tracefs_failed_creating(dentry);
++ return ERR_PTR(-ENOMEM);
+ }
+
+ /**
+ * eventfs_remove_rec - remove eventfs dir or file from list
+- * @ef: eventfs_file to be removed.
+- * @head: to create list of eventfs_file to be deleted
+- * @level: to check recursion depth
++ * @ei: eventfs_inode to be removed.
++ * @level: prevent recursion from going more than 3 levels deep.
+ *
+- * The helper function eventfs_remove_rec() is used to clean up and free the
+- * associated data from eventfs for both of the added functions.
++ * This function recursively removes eventfs_inodes which
++ * contains info of files and/or directories.
+ */
+-static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head, int level)
++static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
+ {
+- struct eventfs_file *ef_child;
++ struct eventfs_inode *ei_child;
+
+- if (!ef)
+- return;
+ /*
+ * Check recursion depth. It should never be greater than 3:
+ * 0 - events/
+@@ -806,100 +931,56 @@ static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head,
+ if (WARN_ON_ONCE(level > 3))
+ return;
+
+- if (ef->ei) {
+- /* search for nested folders or files */
+- list_for_each_entry_srcu(ef_child, &ef->ei->e_top_files, list,
+- lockdep_is_held(&eventfs_mutex)) {
+- eventfs_remove_rec(ef_child, head, level + 1);
+- }
+- }
++ /* search for nested folders or files */
++ list_for_each_entry(ei_child, &ei->children, list)
++ eventfs_remove_rec(ei_child, level + 1);
+
+- list_del_rcu(&ef->list);
+- list_add_tail(&ef->del_list, head);
++ list_del_rcu(&ei->list);
++ free_ei(ei);
+ }
+
+ /**
+- * eventfs_remove - remove eventfs dir or file from list
+- * @ef: eventfs_file to be removed.
++ * eventfs_remove_dir - remove eventfs dir or file from list
++ * @ei: eventfs_inode to be removed.
+ *
+ * This function acquire the eventfs_mutex lock and call eventfs_remove_rec()
+ */
+-void eventfs_remove(struct eventfs_file *ef)
++void eventfs_remove_dir(struct eventfs_inode *ei)
+ {
+- struct eventfs_file *tmp;
+- LIST_HEAD(ef_del_list);
+- struct dentry *dentry_list = NULL;
+- struct dentry *dentry;
+-
+- if (!ef)
++ if (!ei)
+ return;
+
+ mutex_lock(&eventfs_mutex);
+- eventfs_remove_rec(ef, &ef_del_list, 0);
+- list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
+- if (ef->dentry) {
+- unsigned long ptr = (unsigned long)dentry_list;
+-
+- /* Keep the dentry from being freed yet */
+- dget(ef->dentry);
+-
+- /*
+- * Paranoid: The dget() above should prevent the dentry
+- * from being freed and calling eventfs_set_ef_status_free().
+- * But just in case, set the link list LSB pointer to 1
+- * and have eventfs_set_ef_status_free() check that to
+- * make sure that if it does happen, it will not think
+- * the d_fsdata is an event_file.
+- *
+- * For this to work, no event_file should be allocated
+- * on a odd space, as the ef should always be allocated
+- * to be at least word aligned. Check for that too.
+- */
+- WARN_ON_ONCE(ptr & 1);
+-
+- ef->dentry->d_fsdata = (void *)(ptr | 1);
+- dentry_list = ef->dentry;
+- ef->dentry = NULL;
+- }
+- call_srcu(&eventfs_srcu, &ef->rcu, free_ef);
+- }
++ eventfs_remove_rec(ei, 0);
+ mutex_unlock(&eventfs_mutex);
+-
+- while (dentry_list) {
+- unsigned long ptr;
+-
+- dentry = dentry_list;
+- ptr = (unsigned long)dentry->d_fsdata & ~1UL;
+- dentry_list = (struct dentry *)ptr;
+- dentry->d_fsdata = NULL;
+- d_invalidate(dentry);
+- mutex_lock(&eventfs_mutex);
+- /* dentry should now have at least a single reference */
+- WARN_ONCE((int)d_count(dentry) < 1,
+- "dentry %p less than one reference (%d) after invalidate\n",
+- dentry, d_count(dentry));
+- mutex_unlock(&eventfs_mutex);
+- dput(dentry);
+- }
+ }
+
+ /**
+- * eventfs_remove_events_dir - remove eventfs dir or file from list
+- * @dentry: events's dentry to be removed.
++ * eventfs_remove_events_dir - remove the top level eventfs directory
++ * @ei: the event_inode returned by eventfs_create_events_dir().
+ *
+- * This function remove events main directory
++ * This function removes the events main directory
+ */
+-void eventfs_remove_events_dir(struct dentry *dentry)
++void eventfs_remove_events_dir(struct eventfs_inode *ei)
+ {
+- struct tracefs_inode *ti;
++ struct eventfs_root_inode *rei;
++ struct dentry *dentry;
+
+- if (!dentry || !dentry->d_inode)
++ rei = get_root_inode(ei);
++ dentry = rei->events_dir;
++ if (!dentry)
+ return;
+
+- ti = get_tracefs(dentry->d_inode);
+- if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
+- return;
++ rei->events_dir = NULL;
++ eventfs_remove_dir(ei);
+
++ /*
++ * Matches the dget() done by tracefs_start_creating()
++ * in eventfs_create_events_dir() when it the dentry was
++ * created. In other words, it's a normal dentry that
++ * sticks around while the other ei->dentry are created
++ * and destroyed dynamically.
++ */
+ d_invalidate(dentry);
+ dput(dentry);
+ }
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index 891653ba9cf358..7d389dd5ed5195 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -30,22 +30,44 @@ static struct vfsmount *tracefs_mount;
+ static int tracefs_mount_count;
+ static bool tracefs_registered;
+
++/*
++ * Keep track of all tracefs_inodes in order to update their
++ * flags if necessary on a remount.
++ */
++static DEFINE_SPINLOCK(tracefs_inode_lock);
++static LIST_HEAD(tracefs_inodes);
++
+ static struct inode *tracefs_alloc_inode(struct super_block *sb)
+ {
+ struct tracefs_inode *ti;
++ unsigned long flags;
+
+- ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL);
++ ti = alloc_inode_sb(sb, tracefs_inode_cachep, GFP_KERNEL);
+ if (!ti)
+ return NULL;
+
+- ti->flags = 0;
++ spin_lock_irqsave(&tracefs_inode_lock, flags);
++ list_add_rcu(&ti->list, &tracefs_inodes);
++ spin_unlock_irqrestore(&tracefs_inode_lock, flags);
+
+ return &ti->vfs_inode;
+ }
+
+ static void tracefs_free_inode(struct inode *inode)
+ {
+- kmem_cache_free(tracefs_inode_cachep, get_tracefs(inode));
++ struct tracefs_inode *ti = get_tracefs(inode);
++
++ kmem_cache_free(tracefs_inode_cachep, ti);
++}
++
++static void tracefs_destroy_inode(struct inode *inode)
++{
++ struct tracefs_inode *ti = get_tracefs(inode);
++ unsigned long flags;
++
++ spin_lock_irqsave(&tracefs_inode_lock, flags);
++ list_del_rcu(&ti->list);
++ spin_unlock_irqrestore(&tracefs_inode_lock, flags);
+ }
+
+ static ssize_t default_read_file(struct file *file, char __user *buf,
+@@ -91,6 +113,7 @@ static int tracefs_syscall_mkdir(struct mnt_idmap *idmap,
+ struct inode *inode, struct dentry *dentry,
+ umode_t mode)
+ {
++ struct tracefs_inode *ti;
+ char *name;
+ int ret;
+
+@@ -98,6 +121,15 @@ static int tracefs_syscall_mkdir(struct mnt_idmap *idmap,
+ if (!name)
+ return -ENOMEM;
+
++ /*
++ * This is a new directory that does not take the default of
++ * the rootfs. It becomes the default permissions for all the
++ * files and directories underneath it.
++ */
++ ti = get_tracefs(inode);
++ ti->flags |= TRACEFS_INSTANCE_INODE;
++ ti->private = inode;
++
+ /*
+ * The mkdir call can call the generic functions that create
+ * the files within the tracefs system. It is up to the individual
+@@ -141,10 +173,99 @@ static int tracefs_syscall_rmdir(struct inode *inode, struct dentry *dentry)
+ return ret;
+ }
+
+-static const struct inode_operations tracefs_dir_inode_operations = {
++static void set_tracefs_inode_owner(struct inode *inode)
++{
++ struct tracefs_inode *ti = get_tracefs(inode);
++ struct inode *root_inode = ti->private;
++ kuid_t uid;
++ kgid_t gid;
++
++ uid = root_inode->i_uid;
++ gid = root_inode->i_gid;
++
++ /*
++ * If the root is not the mount point, then check the root's
++ * permissions. If it was never set, then default to the
++ * mount point.
++ */
++ if (root_inode != d_inode(root_inode->i_sb->s_root)) {
++ struct tracefs_inode *rti;
++
++ rti = get_tracefs(root_inode);
++ root_inode = d_inode(root_inode->i_sb->s_root);
++
++ if (!(rti->flags & TRACEFS_UID_PERM_SET))
++ uid = root_inode->i_uid;
++
++ if (!(rti->flags & TRACEFS_GID_PERM_SET))
++ gid = root_inode->i_gid;
++ }
++
++ /*
++ * If this inode has never been referenced, then update
++ * the permissions to the superblock.
++ */
++ if (!(ti->flags & TRACEFS_UID_PERM_SET))
++ inode->i_uid = uid;
++
++ if (!(ti->flags & TRACEFS_GID_PERM_SET))
++ inode->i_gid = gid;
++}
++
++static int tracefs_permission(struct mnt_idmap *idmap,
++ struct inode *inode, int mask)
++{
++ set_tracefs_inode_owner(inode);
++ return generic_permission(idmap, inode, mask);
++}
++
++static int tracefs_getattr(struct mnt_idmap *idmap,
++ const struct path *path, struct kstat *stat,
++ u32 request_mask, unsigned int flags)
++{
++ struct inode *inode = d_backing_inode(path->dentry);
++
++ set_tracefs_inode_owner(inode);
++ generic_fillattr(idmap, request_mask, inode, stat);
++ return 0;
++}
++
++static int tracefs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
++ struct iattr *attr)
++{
++ unsigned int ia_valid = attr->ia_valid;
++ struct inode *inode = d_inode(dentry);
++ struct tracefs_inode *ti = get_tracefs(inode);
++
++ if (ia_valid & ATTR_UID)
++ ti->flags |= TRACEFS_UID_PERM_SET;
++
++ if (ia_valid & ATTR_GID)
++ ti->flags |= TRACEFS_GID_PERM_SET;
++
++ return simple_setattr(idmap, dentry, attr);
++}
++
++static const struct inode_operations tracefs_instance_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .mkdir = tracefs_syscall_mkdir,
+ .rmdir = tracefs_syscall_rmdir,
++ .permission = tracefs_permission,
++ .getattr = tracefs_getattr,
++ .setattr = tracefs_setattr,
++};
++
++static const struct inode_operations tracefs_dir_inode_operations = {
++ .lookup = simple_lookup,
++ .permission = tracefs_permission,
++ .getattr = tracefs_getattr,
++ .setattr = tracefs_setattr,
++};
++
++static const struct inode_operations tracefs_file_inode_operations = {
++ .permission = tracefs_permission,
++ .getattr = tracefs_getattr,
++ .setattr = tracefs_setattr,
+ };
+
+ struct inode *tracefs_get_inode(struct super_block *sb)
+@@ -183,77 +304,6 @@ struct tracefs_fs_info {
+ struct tracefs_mount_opts mount_opts;
+ };
+
+-static void change_gid(struct dentry *dentry, kgid_t gid)
+-{
+- if (!dentry->d_inode)
+- return;
+- dentry->d_inode->i_gid = gid;
+-}
+-
+-/*
+- * Taken from d_walk, but without he need for handling renames.
+- * Nothing can be renamed while walking the list, as tracefs
+- * does not support renames. This is only called when mounting
+- * or remounting the file system, to set all the files to
+- * the given gid.
+- */
+-static void set_gid(struct dentry *parent, kgid_t gid)
+-{
+- struct dentry *this_parent;
+- struct list_head *next;
+-
+- this_parent = parent;
+- spin_lock(&this_parent->d_lock);
+-
+- change_gid(this_parent, gid);
+-repeat:
+- next = this_parent->d_subdirs.next;
+-resume:
+- while (next != &this_parent->d_subdirs) {
+- struct list_head *tmp = next;
+- struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+- next = tmp->next;
+-
+- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+-
+- change_gid(dentry, gid);
+-
+- if (!list_empty(&dentry->d_subdirs)) {
+- spin_unlock(&this_parent->d_lock);
+- spin_release(&dentry->d_lock.dep_map, _RET_IP_);
+- this_parent = dentry;
+- spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
+- goto repeat;
+- }
+- spin_unlock(&dentry->d_lock);
+- }
+- /*
+- * All done at this level ... ascend and resume the search.
+- */
+- rcu_read_lock();
+-ascend:
+- if (this_parent != parent) {
+- struct dentry *child = this_parent;
+- this_parent = child->d_parent;
+-
+- spin_unlock(&child->d_lock);
+- spin_lock(&this_parent->d_lock);
+-
+- /* go into the first sibling still alive */
+- do {
+- next = child->d_child.next;
+- if (next == &this_parent->d_subdirs)
+- goto ascend;
+- child = list_entry(next, struct dentry, d_child);
+- } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
+- rcu_read_unlock();
+- goto resume;
+- }
+- rcu_read_unlock();
+- spin_unlock(&this_parent->d_lock);
+- return;
+-}
+-
+ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
+ {
+ substring_t args[MAX_OPT_ARGS];
+@@ -310,6 +360,8 @@ static int tracefs_apply_options(struct super_block *sb, bool remount)
+ struct tracefs_fs_info *fsi = sb->s_fs_info;
+ struct inode *inode = d_inode(sb->s_root);
+ struct tracefs_mount_opts *opts = &fsi->mount_opts;
++ struct tracefs_inode *ti;
++ bool update_uid, update_gid;
+ umode_t tmp_mode;
+
+ /*
+@@ -326,9 +378,26 @@ static int tracefs_apply_options(struct super_block *sb, bool remount)
+ if (!remount || opts->opts & BIT(Opt_uid))
+ inode->i_uid = opts->uid;
+
+- if (!remount || opts->opts & BIT(Opt_gid)) {
+- /* Set all the group ids to the mount option */
+- set_gid(sb->s_root, opts->gid);
++ if (!remount || opts->opts & BIT(Opt_gid))
++ inode->i_gid = opts->gid;
++
++ if (remount && (opts->opts & BIT(Opt_uid) || opts->opts & BIT(Opt_gid))) {
++
++ update_uid = opts->opts & BIT(Opt_uid);
++ update_gid = opts->opts & BIT(Opt_gid);
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(ti, &tracefs_inodes, list) {
++ if (update_uid)
++ ti->flags &= ~TRACEFS_UID_PERM_SET;
++
++ if (update_gid)
++ ti->flags &= ~TRACEFS_GID_PERM_SET;
++
++ if (ti->flags & TRACEFS_EVENT_INODE)
++ eventfs_remount(ti, update_uid, update_gid);
++ }
++ rcu_read_unlock();
+ }
+
+ return 0;
+@@ -367,30 +436,56 @@ static int tracefs_show_options(struct seq_file *m, struct dentry *root)
+ return 0;
+ }
+
++static int tracefs_drop_inode(struct inode *inode)
++{
++ struct tracefs_inode *ti = get_tracefs(inode);
++
++ /*
++ * This inode is being freed and cannot be used for
++ * eventfs. Clear the flag so that it doesn't call into
++ * eventfs during the remount flag updates. The eventfs_inode
++ * gets freed after an RCU cycle, so the content will still
++ * be safe if the iteration is going on now.
++ */
++ ti->flags &= ~TRACEFS_EVENT_INODE;
++
++ return 1;
++}
++
+ static const struct super_operations tracefs_super_operations = {
+ .alloc_inode = tracefs_alloc_inode,
+ .free_inode = tracefs_free_inode,
+- .drop_inode = generic_delete_inode,
++ .destroy_inode = tracefs_destroy_inode,
++ .drop_inode = tracefs_drop_inode,
+ .statfs = simple_statfs,
+ .remount_fs = tracefs_remount,
+ .show_options = tracefs_show_options,
+ };
+
+-static void tracefs_dentry_iput(struct dentry *dentry, struct inode *inode)
++/*
++ * It would be cleaner if eventfs had its own dentry ops.
++ *
++ * Note that d_revalidate is called potentially under RCU,
++ * so it can't take the eventfs mutex etc. It's fine - if
++ * we open a file just as it's marked dead, things will
++ * still work just fine, and just see the old stale case.
++ */
++static void tracefs_d_release(struct dentry *dentry)
+ {
+- struct tracefs_inode *ti;
++ if (dentry->d_fsdata)
++ eventfs_d_release(dentry);
++}
+
+- if (!dentry || !inode)
+- return;
++static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags)
++{
++ struct eventfs_inode *ei = dentry->d_fsdata;
+
+- ti = get_tracefs(inode);
+- if (ti && ti->flags & TRACEFS_EVENT_INODE)
+- eventfs_set_ef_status_free(ti, dentry);
+- iput(inode);
++ return !(ei && ei->is_freed);
+ }
+
+ static const struct dentry_operations tracefs_dentry_operations = {
+- .d_iput = tracefs_dentry_iput,
++ .d_revalidate = tracefs_d_revalidate,
++ .d_release = tracefs_d_release,
+ };
+
+ static int trace_fill_super(struct super_block *sb, void *data, int silent)
+@@ -494,78 +589,24 @@ struct dentry *tracefs_end_creating(struct dentry *dentry)
+ return dentry;
+ }
+
+-/**
+- * eventfs_start_creating - start the process of creating a dentry
+- * @name: Name of the file created for the dentry
+- * @parent: The parent dentry where this dentry will be created
+- *
+- * This is a simple helper function for the dynamically created eventfs
+- * files. When the directory of the eventfs files are accessed, their
+- * dentries are created on the fly. This function is used to start that
+- * process.
+- */
+-struct dentry *eventfs_start_creating(const char *name, struct dentry *parent)
++/* Find the inode that this will use for default */
++static struct inode *instance_inode(struct dentry *parent, struct inode *inode)
+ {
+- struct dentry *dentry;
+- int error;
+-
+- error = simple_pin_fs(&trace_fs_type, &tracefs_mount,
+- &tracefs_mount_count);
+- if (error)
+- return ERR_PTR(error);
++ struct tracefs_inode *ti;
+
+- /*
+- * If the parent is not specified, we create it in the root.
+- * We need the root dentry to do this, which is in the super
+- * block. A pointer to that is in the struct vfsmount that we
+- * have around.
+- */
++ /* If parent is NULL then use root inode */
+ if (!parent)
+- parent = tracefs_mount->mnt_root;
++ return d_inode(inode->i_sb->s_root);
+
+- if (unlikely(IS_DEADDIR(parent->d_inode)))
+- dentry = ERR_PTR(-ENOENT);
+- else
+- dentry = lookup_one_len(name, parent, strlen(name));
+-
+- if (!IS_ERR(dentry) && dentry->d_inode) {
+- dput(dentry);
+- dentry = ERR_PTR(-EEXIST);
++ /* Find the inode that is flagged as an instance or the root inode */
++ while (!IS_ROOT(parent)) {
++ ti = get_tracefs(d_inode(parent));
++ if (ti->flags & TRACEFS_INSTANCE_INODE)
++ break;
++ parent = parent->d_parent;
+ }
+
+- if (IS_ERR(dentry))
+- simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+-
+- return dentry;
+-}
+-
+-/**
+- * eventfs_failed_creating - clean up a failed eventfs dentry creation
+- * @dentry: The dentry to clean up
+- *
+- * If after calling eventfs_start_creating(), a failure is detected, the
+- * resources created by eventfs_start_creating() needs to be cleaned up. In
+- * that case, this function should be called to perform that clean up.
+- */
+-struct dentry *eventfs_failed_creating(struct dentry *dentry)
+-{
+- dput(dentry);
+- simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+- return NULL;
+-}
+-
+-/**
+- * eventfs_end_creating - Finish the process of creating a eventfs dentry
+- * @dentry: The dentry that has successfully been created.
+- *
+- * This function is currently just a place holder to match
+- * eventfs_start_creating(). In case any synchronization needs to be added,
+- * this function will be used to implement that without having to modify
+- * the callers of eventfs_start_creating().
+- */
+-struct dentry *eventfs_end_creating(struct dentry *dentry)
+-{
+- return dentry;
++ return d_inode(parent);
+ }
+
+ /**
+@@ -598,6 +639,7 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops)
+ {
++ struct tracefs_inode *ti;
+ struct dentry *dentry;
+ struct inode *inode;
+
+@@ -616,7 +658,11 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ if (unlikely(!inode))
+ return tracefs_failed_creating(dentry);
+
++ ti = get_tracefs(inode);
++ ti->private = instance_inode(parent, inode);
++
+ inode->i_mode = mode;
++ inode->i_op = &tracefs_file_inode_operations;
+ inode->i_fop = fops ? fops : &tracefs_file_operations;
+ inode->i_private = data;
+ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+@@ -629,6 +675,7 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ static struct dentry *__create_dir(const char *name, struct dentry *parent,
+ const struct inode_operations *ops)
+ {
++ struct tracefs_inode *ti;
+ struct dentry *dentry = tracefs_start_creating(name, parent);
+ struct inode *inode;
+
+@@ -646,6 +693,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
+ inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+ inode->i_gid = d_inode(dentry->d_parent)->i_gid;
+
++ ti = get_tracefs(inode);
++ ti->private = instance_inode(parent, inode);
++
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ d_instantiate(dentry, inode);
+@@ -676,7 +726,7 @@ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
+ if (security_locked_down(LOCKDOWN_TRACEFS))
+ return NULL;
+
+- return __create_dir(name, parent, &simple_dir_inode_operations);
++ return __create_dir(name, parent, &tracefs_dir_inode_operations);
+ }
+
+ /**
+@@ -707,7 +757,7 @@ __init struct dentry *tracefs_create_instance_dir(const char *name,
+ if (WARN_ON(tracefs_ops.mkdir || tracefs_ops.rmdir))
+ return NULL;
+
+- dentry = __create_dir(name, parent, &tracefs_dir_inode_operations);
++ dentry = __create_dir(name, parent, &tracefs_instance_dir_inode_operations);
+ if (!dentry)
+ return NULL;
+
+@@ -752,7 +802,11 @@ static void init_once(void *foo)
+ {
+ struct tracefs_inode *ti = (struct tracefs_inode *) foo;
+
++ /* inode_init_once() calls memset() on the vfs_inode portion */
+ inode_init_once(&ti->vfs_inode);
++
++ /* Zero out the rest */
++ memset_after(ti, 0, vfs_inode);
+ }
+
+ static int __init tracefs_init(void)
+diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h
+index 4f2e49e2197b14..d83c2a25f288e0 100644
+--- a/fs/tracefs/internal.h
++++ b/fs/tracefs/internal.h
+@@ -4,13 +4,63 @@
+
+ enum {
+ TRACEFS_EVENT_INODE = BIT(1),
+- TRACEFS_EVENT_TOP_INODE = BIT(2),
++ TRACEFS_GID_PERM_SET = BIT(2),
++ TRACEFS_UID_PERM_SET = BIT(3),
++ TRACEFS_INSTANCE_INODE = BIT(4),
+ };
+
+ struct tracefs_inode {
++ struct inode vfs_inode;
++ /* The below gets initialized with memset_after(ti, 0, vfs_inode) */
++ struct list_head list;
+ unsigned long flags;
+ void *private;
+- struct inode vfs_inode;
++};
++
++/*
++ * struct eventfs_attr - cache the mode and ownership of a eventfs entry
++ * @mode: saved mode plus flags of what is saved
++ * @uid: saved uid if changed
++ * @gid: saved gid if changed
++ */
++struct eventfs_attr {
++ int mode;
++ kuid_t uid;
++ kgid_t gid;
++};
++
++/*
++ * struct eventfs_inode - hold the properties of the eventfs directories.
++ * @list: link list into the parent directory
++ * @rcu: Union with @list for freeing
++ * @children: link list into the child eventfs_inode
++ * @entries: the array of entries representing the files in the directory
++ * @name: the name of the directory to create
++ * @entry_attrs: Saved mode and ownership of the @d_children
++ * @data: The private data to pass to the callbacks
++ * @attr: Saved mode and ownership of eventfs_inode itself
++ * @is_freed: Flag set if the eventfs is on its way to be freed
++ * Note if is_freed is set, then dentry is corrupted.
++ * @is_events: Flag set for only the top level "events" directory
++ * @nr_entries: The number of items in @entries
++ * @ino: The saved inode number
++ */
++struct eventfs_inode {
++ union {
++ struct list_head list;
++ struct rcu_head rcu;
++ };
++ struct list_head children;
++ const struct eventfs_entry *entries;
++ const char *name;
++ struct eventfs_attr *entry_attrs;
++ void *data;
++ struct eventfs_attr attr;
++ struct kref kref;
++ unsigned int is_freed:1;
++ unsigned int is_events:1;
++ unsigned int nr_entries:30;
++ unsigned int ino;
+ };
+
+ static inline struct tracefs_inode *get_tracefs(const struct inode *inode)
+@@ -22,9 +72,8 @@ struct dentry *tracefs_start_creating(const char *name, struct dentry *parent);
+ struct dentry *tracefs_end_creating(struct dentry *dentry);
+ struct dentry *tracefs_failed_creating(struct dentry *dentry);
+ struct inode *tracefs_get_inode(struct super_block *sb);
+-struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
+-struct dentry *eventfs_failed_creating(struct dentry *dentry);
+-struct dentry *eventfs_end_creating(struct dentry *dentry);
+-void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry);
++
++void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid);
++void eventfs_d_release(struct dentry *dentry);
+
+ #endif /* _TRACEFS_INTERNAL_H */
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 2f48c58d47cdd0..be45972ff95d9c 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -1225,6 +1225,8 @@ static int ubifs_symlink(struct mnt_idmap *idmap, struct inode *dir,
+ dir_ui->ui_size = dir->i_size;
+ mutex_unlock(&dir_ui->ui_mutex);
+ out_inode:
++ /* Free inode->i_link before inode is marked as bad. */
++ fscrypt_free_inode(inode);
+ make_bad_inode(inode);
+ iput(inode);
+ out_fname:
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index e5382f0b258782..781206d0ec845c 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -261,9 +261,6 @@ static int write_begin_slow(struct address_space *mapping,
+ return err;
+ }
+ }
+-
+- SetPageUptodate(page);
+- ClearPageError(page);
+ }
+
+ if (PagePrivate(page))
+@@ -462,9 +459,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
+ return err;
+ }
+ }
+-
+- SetPageUptodate(page);
+- ClearPageError(page);
+ }
+
+ err = allocate_budget(c, page, ui, appending);
+@@ -474,10 +468,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
+ * If we skipped reading the page because we were going to
+ * write all of it, then it is not up to date.
+ */
+- if (skipped_read) {
++ if (skipped_read)
+ ClearPageChecked(page);
+- ClearPageUptodate(page);
+- }
+ /*
+ * Budgeting failed which means it would have to force
+ * write-back but didn't, because we set the @fast flag in the
+@@ -568,6 +560,9 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
+ goto out;
+ }
+
++ if (len == PAGE_SIZE)
++ SetPageUptodate(page);
++
+ if (!PagePrivate(page)) {
+ attach_page_private(page, (void *)1);
+ atomic_long_inc(&c->dirty_pg_cnt);
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 6b7d95b65f4b63..f4728e65d1bda4 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
+ else {
+ ubifs_err(c, "old idx added twice!");
+ kfree(old_idx);
++ return;
+ }
+ }
+ rb_link_node(&old_idx->rb, parent, p);
+diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
+index ab3ffc355949dc..bb471ec3640467 100644
+--- a/fs/udf/balloc.c
++++ b/fs/udf/balloc.c
+@@ -18,6 +18,7 @@
+ #include "udfdecl.h"
+
+ #include <linux/bitops.h>
++#include <linux/overflow.h>
+
+ #include "udf_i.h"
+ #include "udf_sb.h"
+@@ -64,8 +65,12 @@ static int read_block_bitmap(struct super_block *sb,
+ }
+
+ for (i = 0; i < count; i++)
+- if (udf_test_bit(i + off, bh->b_data))
++ if (udf_test_bit(i + off, bh->b_data)) {
++ bitmap->s_block_bitmap[bitmap_nr] =
++ ERR_PTR(-EFSCORRUPTED);
++ brelse(bh);
+ return -EFSCORRUPTED;
++ }
+ return 0;
+ }
+
+@@ -81,8 +86,15 @@ static int __load_block_bitmap(struct super_block *sb,
+ block_group, nr_groups);
+ }
+
+- if (bitmap->s_block_bitmap[block_group])
++ if (bitmap->s_block_bitmap[block_group]) {
++ /*
++ * The bitmap failed verification in the past. No point in
++ * trying again.
++ */
++ if (IS_ERR(bitmap->s_block_bitmap[block_group]))
++ return PTR_ERR(bitmap->s_block_bitmap[block_group]);
+ return block_group;
++ }
+
+ retval = read_block_bitmap(sb, bitmap, block_group, block_group);
+ if (retval < 0)
+@@ -129,7 +141,6 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
+ {
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct buffer_head *bh = NULL;
+- struct udf_part_map *partmap;
+ unsigned long block;
+ unsigned long block_group;
+ unsigned long bit;
+@@ -138,19 +149,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
+ unsigned long overflow;
+
+ mutex_lock(&sbi->s_alloc_mutex);
+- partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
+- if (bloc->logicalBlockNum + count < count ||
+- (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
+- udf_debug("%u < %d || %u + %u > %u\n",
+- bloc->logicalBlockNum, 0,
+- bloc->logicalBlockNum, count,
+- partmap->s_partition_len);
+- goto error_return;
+- }
+-
++ /* We make sure this cannot overflow when mounting the filesystem */
+ block = bloc->logicalBlockNum + offset +
+ (sizeof(struct spaceBitmapDesc) << 3);
+-
+ do {
+ overflow = 0;
+ block_group = block >> (sb->s_blocksize_bits + 3);
+@@ -380,7 +381,6 @@ static void udf_table_free_blocks(struct super_block *sb,
+ uint32_t count)
+ {
+ struct udf_sb_info *sbi = UDF_SB(sb);
+- struct udf_part_map *partmap;
+ uint32_t start, end;
+ uint32_t elen;
+ struct kernel_lb_addr eloc;
+@@ -389,16 +389,6 @@ static void udf_table_free_blocks(struct super_block *sb,
+ struct udf_inode_info *iinfo;
+
+ mutex_lock(&sbi->s_alloc_mutex);
+- partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
+- if (bloc->logicalBlockNum + count < count ||
+- (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
+- udf_debug("%u < %d || %u + %u > %u\n",
+- bloc->logicalBlockNum, 0,
+- bloc->logicalBlockNum, count,
+- partmap->s_partition_len);
+- goto error_return;
+- }
+-
+ iinfo = UDF_I(table);
+ udf_add_free_space(sb, sbi->s_partition, count);
+
+@@ -673,6 +663,17 @@ void udf_free_blocks(struct super_block *sb, struct inode *inode,
+ {
+ uint16_t partition = bloc->partitionReferenceNum;
+ struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
++ uint32_t blk;
++
++ if (check_add_overflow(bloc->logicalBlockNum, offset, &blk) ||
++ check_add_overflow(blk, count, &blk) ||
++ bloc->logicalBlockNum + count > map->s_partition_len) {
++ udf_debug("Invalid request to free blocks: (%d, %u), off %u, "
++ "len %u, partition len %u\n",
++ partition, bloc->logicalBlockNum, offset, count,
++ map->s_partition_len);
++ return;
++ }
+
+ if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
+ udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index 0ceac4b5937c74..94daaaf76f71c7 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -232,7 +232,9 @@ static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
++ filemap_invalidate_lock(inode->i_mapping);
+ error = udf_setsize(inode, attr->ia_size);
++ filemap_invalidate_unlock(inode->i_mapping);
+ if (error)
+ return error;
+ }
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index a17a6184cc39e1..8db07d1f56bc94 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -341,7 +341,7 @@ const struct address_space_operations udf_aops = {
+ */
+ int udf_expand_file_adinicb(struct inode *inode)
+ {
+- struct page *page;
++ struct folio *folio;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ int err;
+
+@@ -357,12 +357,13 @@ int udf_expand_file_adinicb(struct inode *inode)
+ return 0;
+ }
+
+- page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
+- if (!page)
+- return -ENOMEM;
++ folio = __filemap_get_folio(inode->i_mapping, 0,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_KERNEL);
++ if (IS_ERR(folio))
++ return PTR_ERR(folio);
+
+- if (!PageUptodate(page))
+- udf_adinicb_readpage(page);
++ if (!folio_test_uptodate(folio))
++ udf_adinicb_readpage(&folio->page);
+ down_write(&iinfo->i_data_sem);
+ memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
+ iinfo->i_lenAlloc);
+@@ -371,22 +372,22 @@ int udf_expand_file_adinicb(struct inode *inode)
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
+ else
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+- set_page_dirty(page);
+- unlock_page(page);
++ folio_mark_dirty(folio);
++ folio_unlock(folio);
+ up_write(&iinfo->i_data_sem);
+ err = filemap_fdatawrite(inode->i_mapping);
+ if (err) {
+ /* Restore everything back so that we don't lose data... */
+- lock_page(page);
++ folio_lock(folio);
+ down_write(&iinfo->i_data_sem);
+- memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
+- inode->i_size);
+- unlock_page(page);
++ memcpy_from_folio(iinfo->i_data + iinfo->i_lenEAttr,
++ folio, 0, inode->i_size);
++ folio_unlock(folio);
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
+ iinfo->i_lenAlloc = inode->i_size;
+ up_write(&iinfo->i_data_sem);
+ }
+- put_page(page);
++ folio_put(folio);
+ mark_inode_dirty(inode);
+
+ return err;
+@@ -1251,7 +1252,6 @@ int udf_setsize(struct inode *inode, loff_t newsize)
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ return -EPERM;
+
+- filemap_invalidate_lock(inode->i_mapping);
+ iinfo = UDF_I(inode);
+ if (newsize > inode->i_size) {
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+@@ -1264,11 +1264,11 @@ int udf_setsize(struct inode *inode, loff_t newsize)
+ }
+ err = udf_expand_file_adinicb(inode);
+ if (err)
+- goto out_unlock;
++ return err;
+ }
+ err = udf_extend_file(inode, newsize);
+ if (err)
+- goto out_unlock;
++ return err;
+ set_size:
+ truncate_setsize(inode, newsize);
+ } else {
+@@ -1286,14 +1286,14 @@ int udf_setsize(struct inode *inode, loff_t newsize)
+ err = block_truncate_page(inode->i_mapping, newsize,
+ udf_get_block);
+ if (err)
+- goto out_unlock;
++ return err;
+ truncate_setsize(inode, newsize);
+ down_write(&iinfo->i_data_sem);
+ udf_clear_extent_cache(inode);
+ err = udf_truncate_extents(inode);
+ up_write(&iinfo->i_data_sem);
+ if (err)
+- goto out_unlock;
++ return err;
+ }
+ update_time:
+ inode->i_mtime = inode_set_ctime_current(inode);
+@@ -1301,8 +1301,6 @@ int udf_setsize(struct inode *inode, loff_t newsize)
+ udf_sync_inode(inode);
+ else
+ mark_inode_dirty(inode);
+-out_unlock:
+- filemap_invalidate_unlock(inode->i_mapping);
+ return err;
+ }
+
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index ae55ab8859b6d9..605f182da42cbb 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -874,8 +874,6 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ if (has_diriter) {
+ diriter.fi.icb.extLocation =
+ cpu_to_lelb(UDF_I(new_dir)->i_location);
+- udf_update_tag((char *)&diriter.fi,
+- udf_dir_entry_len(&diriter.fi));
+ udf_fiiter_write_fi(&diriter, NULL);
+ udf_fiiter_release(&diriter);
+
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 928a04d9d9e0ad..3c78535f406b00 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -269,7 +269,8 @@ static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
+ int nr_groups = bitmap->s_nr_groups;
+
+ for (i = 0; i < nr_groups; i++)
+- brelse(bitmap->s_block_bitmap[i]);
++ if (!IS_ERR_OR_NULL(bitmap->s_block_bitmap[i]))
++ brelse(bitmap->s_block_bitmap[i]);
+
+ kvfree(bitmap);
+ }
+@@ -1079,12 +1080,19 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ struct udf_part_map *map;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct partitionHeaderDesc *phd;
++ u32 sum;
+ int err;
+
+ map = &sbi->s_partmaps[p_index];
+
+ map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
+ map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
++ if (check_add_overflow(map->s_partition_root, map->s_partition_len,
++ &sum)) {
++ udf_err(sb, "Partition %d has invalid location %u + %u\n",
++ p_index, map->s_partition_root, map->s_partition_len);
++ return -EFSCORRUPTED;
++ }
+
+ if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
+ map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
+@@ -1140,6 +1148,14 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ bitmap->s_extPosition = le32_to_cpu(
+ phd->unallocSpaceBitmap.extPosition);
+ map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
++ /* Check whether math over bitmap won't overflow. */
++ if (check_add_overflow(map->s_partition_len,
++ sizeof(struct spaceBitmapDesc) << 3,
++ &sum)) {
++ udf_err(sb, "Partition %d is too long (%u)\n", p_index,
++ map->s_partition_len);
++ return -EFSCORRUPTED;
++ }
+ udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
+ p_index, bitmap->s_extPosition);
+ }
+diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
+index 758163af39c262..78ecc633606fb3 100644
+--- a/fs/udf/udftime.c
++++ b/fs/udf/udftime.c
+@@ -46,13 +46,18 @@ udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src)
+ dest->tv_sec = mktime64(year, src.month, src.day, src.hour, src.minute,
+ src.second);
+ dest->tv_sec -= offset * 60;
+- dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
+- src.hundredsOfMicroseconds * 100 + src.microseconds);
++
+ /*
+ * Sanitize nanosecond field since reportedly some filesystems are
+ * recorded with bogus sub-second values.
+ */
+- dest->tv_nsec %= NSEC_PER_SEC;
++ if (src.centiseconds < 100 && src.hundredsOfMicroseconds < 100 &&
++ src.microseconds < 100) {
++ dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
++ src.hundredsOfMicroseconds * 100 + src.microseconds);
++ } else {
++ dest->tv_nsec = 0;
++ }
+ }
+
+ void
+diff --git a/fs/unicode/mkutf8data.c b/fs/unicode/mkutf8data.c
+index bc1a7c8b5c8dfc..e779252be1648b 100644
+--- a/fs/unicode/mkutf8data.c
++++ b/fs/unicode/mkutf8data.c
+@@ -2230,75 +2230,6 @@ static void nfdicf_init(void)
+ file_fail(fold_name);
+ }
+
+-static void ignore_init(void)
+-{
+- FILE *file;
+- unsigned int unichar;
+- unsigned int first;
+- unsigned int last;
+- unsigned int *um;
+- int count;
+- int ret;
+-
+- if (verbose > 0)
+- printf("Parsing %s\n", prop_name);
+- file = fopen(prop_name, "r");
+- if (!file)
+- open_fail(prop_name, errno);
+- assert(file);
+- count = 0;
+- while (fgets(line, LINESIZE, file)) {
+- ret = sscanf(line, "%X..%X ; %s # ", &first, &last, buf0);
+- if (ret == 3) {
+- if (strcmp(buf0, "Default_Ignorable_Code_Point"))
+- continue;
+- if (!utf32valid(first) || !utf32valid(last))
+- line_fail(prop_name, line);
+- for (unichar = first; unichar <= last; unichar++) {
+- free(unicode_data[unichar].utf32nfdi);
+- um = malloc(sizeof(unsigned int));
+- *um = 0;
+- unicode_data[unichar].utf32nfdi = um;
+- free(unicode_data[unichar].utf32nfdicf);
+- um = malloc(sizeof(unsigned int));
+- *um = 0;
+- unicode_data[unichar].utf32nfdicf = um;
+- count++;
+- }
+- if (verbose > 1)
+- printf(" %X..%X Default_Ignorable_Code_Point\n",
+- first, last);
+- continue;
+- }
+- ret = sscanf(line, "%X ; %s # ", &unichar, buf0);
+- if (ret == 2) {
+- if (strcmp(buf0, "Default_Ignorable_Code_Point"))
+- continue;
+- if (!utf32valid(unichar))
+- line_fail(prop_name, line);
+- free(unicode_data[unichar].utf32nfdi);
+- um = malloc(sizeof(unsigned int));
+- *um = 0;
+- unicode_data[unichar].utf32nfdi = um;
+- free(unicode_data[unichar].utf32nfdicf);
+- um = malloc(sizeof(unsigned int));
+- *um = 0;
+- unicode_data[unichar].utf32nfdicf = um;
+- if (verbose > 1)
+- printf(" %X Default_Ignorable_Code_Point\n",
+- unichar);
+- count++;
+- continue;
+- }
+- }
+- fclose(file);
+-
+- if (verbose > 0)
+- printf("Found %d entries\n", count);
+- if (count == 0)
+- file_fail(prop_name);
+-}
+-
+ static void corrections_init(void)
+ {
+ FILE *file;
+@@ -3410,7 +3341,6 @@ int main(int argc, char *argv[])
+ ccc_init();
+ nfdi_init();
+ nfdicf_init();
+- ignore_init();
+ corrections_init();
+ hangul_decompose();
+ nfdi_decompose();
+diff --git a/fs/unicode/utf8data.c_shipped b/fs/unicode/utf8data.c_shipped
+index d9b62901aa96b7..1aab5257a331f0 100644
+--- a/fs/unicode/utf8data.c_shipped
++++ b/fs/unicode/utf8data.c_shipped
+@@ -82,58 +82,58 @@ static const struct utf8data utf8nfdidata[] = {
+ { 0xc0100, 20736 }
+ };
+
+-static const unsigned char utf8data[64256] = {
++static const unsigned char utf8data[64080] = {
+ /* nfdicf_30100 */
+- 0xd7,0x07,0x66,0x84,0x0c,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x99,0x1a,0xe3,0x63,0x15,
+- 0xe2,0x4c,0x0e,0xc1,0xe0,0x4e,0x0d,0xcf,0x86,0x65,0x2d,0x0d,0x01,0x00,0xd4,0xb8,
+- 0xd3,0x27,0xe2,0x89,0xa3,0xe1,0xce,0x35,0xe0,0x2c,0x22,0xcf,0x86,0xc5,0xe4,0x15,
+- 0x6d,0xe3,0x60,0x68,0xe2,0xf6,0x65,0xe1,0x29,0x65,0xe0,0xee,0x64,0xcf,0x86,0xe5,
+- 0xb3,0x64,0x64,0x96,0x64,0x0b,0x00,0xd2,0x0e,0xe1,0xb5,0x3c,0xe0,0xba,0xa3,0xcf,
+- 0x86,0xcf,0x06,0x01,0x00,0xd1,0x0c,0xe0,0x1e,0xa9,0xcf,0x86,0xcf,0x06,0x02,0xff,
++ 0xd7,0x07,0x66,0x84,0x0c,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x96,0x1a,0xe3,0x60,0x15,
++ 0xe2,0x49,0x0e,0xc1,0xe0,0x4b,0x0d,0xcf,0x86,0x65,0x2d,0x0d,0x01,0x00,0xd4,0xb8,
++ 0xd3,0x27,0xe2,0x03,0xa3,0xe1,0xcb,0x35,0xe0,0x29,0x22,0xcf,0x86,0xc5,0xe4,0xfa,
++ 0x6c,0xe3,0x45,0x68,0xe2,0xdb,0x65,0xe1,0x0e,0x65,0xe0,0xd3,0x64,0xcf,0x86,0xe5,
++ 0x98,0x64,0x64,0x7b,0x64,0x0b,0x00,0xd2,0x0e,0xe1,0xb3,0x3c,0xe0,0x34,0xa3,0xcf,
++ 0x86,0xcf,0x06,0x01,0x00,0xd1,0x0c,0xe0,0x98,0xa8,0xcf,0x86,0xcf,0x06,0x02,0xff,
+ 0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,
+- 0x00,0xe4,0xe1,0x45,0xe3,0x3b,0x45,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x87,0xad,
+- 0xd0,0x21,0xcf,0x86,0xe5,0x81,0xaa,0xe4,0x00,0xaa,0xe3,0xbf,0xa9,0xe2,0x9e,0xa9,
+- 0xe1,0x8d,0xa9,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,
+- 0x00,0xcf,0x86,0xe5,0x63,0xac,0xd4,0x19,0xe3,0xa2,0xab,0xe2,0x81,0xab,0xe1,0x70,
+- 0xab,0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,
+- 0x09,0xac,0xe2,0xe8,0xab,0xe1,0xd7,0xab,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,
+- 0x01,0xff,0xe9,0x9b,0xbb,0x00,0x83,0xe2,0x19,0xfa,0xe1,0xf2,0xf6,0xe0,0x6f,0xf5,
+- 0xcf,0x86,0xd5,0x31,0xc4,0xe3,0x54,0x4e,0xe2,0xf5,0x4c,0xe1,0xa4,0xcc,0xe0,0x9c,
+- 0x4b,0xcf,0x86,0xe5,0x8e,0x49,0xe4,0xaf,0x46,0xe3,0x11,0xbd,0xe2,0x68,0xbc,0xe1,
+- 0x43,0xbc,0xe0,0x1c,0xbc,0xcf,0x86,0xe5,0xe9,0xbb,0x94,0x07,0x63,0xd4,0xbb,0x07,
+- 0x00,0x07,0x00,0xe4,0xdb,0xf4,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,
+- 0xe1,0xea,0xe1,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xd9,0xe2,0xcf,0x86,
+- 0xe5,0x9e,0xe2,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xd9,0xe2,0xcf,0x06,
+- 0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x74,0xf4,0xe3,0x5d,0xf3,
+- 0xd2,0xa0,0xe1,0x13,0xe7,0xd0,0x21,0xcf,0x86,0xe5,0x14,0xe4,0xe4,0x90,0xe3,0xe3,
+- 0x4e,0xe3,0xe2,0x2d,0xe3,0xe1,0x1b,0xe3,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,
+- 0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x70,0xe5,0xe3,0x2f,0xe5,
+- 0xe2,0x0e,0xe5,0xe1,0xfd,0xe4,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,
+- 0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xf7,0xe5,0xe1,0xe6,0xe5,0x10,0x09,
+- 0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x17,
+- 0xe6,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,
+- 0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x5d,0xe6,0xd2,0x14,0xe1,0x2c,0xe6,
++ 0x00,0xe4,0xdf,0x45,0xe3,0x39,0x45,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x01,0xad,
++ 0xd0,0x21,0xcf,0x86,0xe5,0xfb,0xa9,0xe4,0x7a,0xa9,0xe3,0x39,0xa9,0xe2,0x18,0xa9,
++ 0xe1,0x07,0xa9,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,
++ 0x00,0xcf,0x86,0xe5,0xdd,0xab,0xd4,0x19,0xe3,0x1c,0xab,0xe2,0xfb,0xaa,0xe1,0xea,
++ 0xaa,0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,
++ 0x83,0xab,0xe2,0x62,0xab,0xe1,0x51,0xab,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,
++ 0x01,0xff,0xe9,0x9b,0xbb,0x00,0x83,0xe2,0x68,0xf9,0xe1,0x52,0xf6,0xe0,0xcf,0xf4,
++ 0xcf,0x86,0xd5,0x31,0xc4,0xe3,0x51,0x4e,0xe2,0xf2,0x4c,0xe1,0x09,0xcc,0xe0,0x99,
++ 0x4b,0xcf,0x86,0xe5,0x8b,0x49,0xe4,0xac,0x46,0xe3,0x76,0xbc,0xe2,0xcd,0xbb,0xe1,
++ 0xa8,0xbb,0xe0,0x81,0xbb,0xcf,0x86,0xe5,0x4e,0xbb,0x94,0x07,0x63,0x39,0xbb,0x07,
++ 0x00,0x07,0x00,0xe4,0x3b,0xf4,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,
++ 0xe1,0x4a,0xe1,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x39,0xe2,0xcf,0x86,
++ 0xe5,0xfe,0xe1,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x39,0xe2,0xcf,0x06,
++ 0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xd4,0xf3,0xe3,0xbd,0xf2,
++ 0xd2,0xa0,0xe1,0x73,0xe6,0xd0,0x21,0xcf,0x86,0xe5,0x74,0xe3,0xe4,0xf0,0xe2,0xe3,
++ 0xae,0xe2,0xe2,0x8d,0xe2,0xe1,0x7b,0xe2,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,
++ 0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xd0,0xe4,0xe3,0x8f,0xe4,
++ 0xe2,0x6e,0xe4,0xe1,0x5d,0xe4,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,
++ 0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0x57,0xe5,0xe1,0x46,0xe5,0x10,0x09,
++ 0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x77,
++ 0xe5,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,
++ 0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0xbd,0xe5,0xd2,0x14,0xe1,0x8c,0xe5,
+ 0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,
+- 0x38,0xe6,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,
+- 0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x8d,0xeb,0xd4,0x19,0xe3,0xc6,0xea,0xe2,0xa4,
+- 0xea,0xe1,0x93,0xea,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,
+- 0xb7,0x00,0xd3,0x18,0xe2,0x10,0xeb,0xe1,0xff,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3,
+- 0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x28,0xeb,0x10,
++ 0x98,0xe5,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,
++ 0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0xed,0xea,0xd4,0x19,0xe3,0x26,0xea,0xe2,0x04,
++ 0xea,0xe1,0xf3,0xe9,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,
++ 0xb7,0x00,0xd3,0x18,0xe2,0x70,0xea,0xe1,0x5f,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3,
++ 0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x88,0xea,0x10,
+ 0x08,0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,
+ 0x08,0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,
+- 0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x2a,
+- 0xed,0xd4,0x1a,0xe3,0x62,0xec,0xe2,0x48,0xec,0xe1,0x35,0xec,0x10,0x08,0x05,0xff,
+- 0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0xaa,0xec,
+- 0xe1,0x98,0xec,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,
+- 0x00,0xd2,0x13,0xe1,0xc6,0xec,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,
++ 0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x8a,
++ 0xec,0xd4,0x1a,0xe3,0xc2,0xeb,0xe2,0xa8,0xeb,0xe1,0x95,0xeb,0x10,0x08,0x05,0xff,
++ 0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x0a,0xec,
++ 0xe1,0xf8,0xeb,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,
++ 0x00,0xd2,0x13,0xe1,0x26,0xec,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,
+ 0xe7,0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,
+ 0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,
+- 0xff,0xe7,0xaa,0xae,0x00,0xe0,0xdc,0xef,0xcf,0x86,0xd5,0x1d,0xe4,0x51,0xee,0xe3,
+- 0x0d,0xee,0xe2,0xeb,0xed,0xe1,0xda,0xed,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,
+- 0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xf8,0xee,0xe2,0xd4,0xee,0xe1,
+- 0xc3,0xee,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,
+- 0xd3,0x18,0xe2,0x43,0xef,0xe1,0x32,0xef,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,
+- 0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x5b,0xef,0x10,0x08,0x05,
++ 0xff,0xe7,0xaa,0xae,0x00,0xe0,0x3c,0xef,0xcf,0x86,0xd5,0x1d,0xe4,0xb1,0xed,0xe3,
++ 0x6d,0xed,0xe2,0x4b,0xed,0xe1,0x3a,0xed,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,
++ 0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x58,0xee,0xe2,0x34,0xee,0xe1,
++ 0x23,0xee,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,
++ 0xd3,0x18,0xe2,0xa3,0xee,0xe1,0x92,0xee,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,
++ 0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xbb,0xee,0x10,0x08,0x05,
+ 0xff,0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,
+ 0xff,0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,
+ 0x9e,0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+@@ -141,152 +141,152 @@ static const unsigned char utf8data[64256] = {
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ /* nfdi_30100 */
+- 0x57,0x04,0x01,0x00,0xc6,0xd5,0x16,0xe4,0xc2,0x59,0xe3,0xfb,0x54,0xe2,0x74,0x4f,
+- 0xc1,0xe0,0xa0,0x4d,0xcf,0x86,0x65,0x84,0x4d,0x01,0x00,0xd4,0xb8,0xd3,0x27,0xe2,
+- 0x0c,0xa0,0xe1,0xdf,0x8d,0xe0,0x39,0x71,0xcf,0x86,0xc5,0xe4,0x98,0x69,0xe3,0xe3,
+- 0x64,0xe2,0x79,0x62,0xe1,0xac,0x61,0xe0,0x71,0x61,0xcf,0x86,0xe5,0x36,0x61,0x64,
+- 0x19,0x61,0x0b,0x00,0xd2,0x0e,0xe1,0xc2,0xa0,0xe0,0x3d,0xa0,0xcf,0x86,0xcf,0x06,
+- 0x01,0x00,0xd1,0x0c,0xe0,0xa1,0xa5,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd0,0x08,
+- 0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x9e,
+- 0xb6,0xe3,0x18,0xae,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x0a,0xaa,0xd0,0x21,0xcf,
+- 0x86,0xe5,0x04,0xa7,0xe4,0x83,0xa6,0xe3,0x42,0xa6,0xe2,0x21,0xa6,0xe1,0x10,0xa6,
+- 0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0xcf,0x86,
+- 0xe5,0xe6,0xa8,0xd4,0x19,0xe3,0x25,0xa8,0xe2,0x04,0xa8,0xe1,0xf3,0xa7,0x10,0x08,
+- 0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,0x8c,0xa8,0xe2,
+- 0x6b,0xa8,0xe1,0x5a,0xa8,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9,
+- 0x9b,0xbb,0x00,0x83,0xe2,0x9c,0xf6,0xe1,0x75,0xf3,0xe0,0xf2,0xf1,0xcf,0x86,0xd5,
+- 0x31,0xc4,0xe3,0x6d,0xcc,0xe2,0x46,0xca,0xe1,0x27,0xc9,0xe0,0xb7,0xbf,0xcf,0x86,
+- 0xe5,0xaa,0xbb,0xe4,0xa3,0xba,0xe3,0x94,0xb9,0xe2,0xeb,0xb8,0xe1,0xc6,0xb8,0xe0,
+- 0x9f,0xb8,0xcf,0x86,0xe5,0x6c,0xb8,0x94,0x07,0x63,0x57,0xb8,0x07,0x00,0x07,0x00,
+- 0xe4,0x5e,0xf1,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0x6d,0xde,
+- 0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x5c,0xdf,0xcf,0x86,0xe5,0x21,0xdf,
+- 0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x5c,0xdf,0xcf,0x06,0x13,0x00,0xcf,
+- 0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xf7,0xf0,0xe3,0xe0,0xef,0xd2,0xa0,0xe1,
+- 0x96,0xe3,0xd0,0x21,0xcf,0x86,0xe5,0x97,0xe0,0xe4,0x13,0xe0,0xe3,0xd1,0xdf,0xe2,
+- 0xb0,0xdf,0xe1,0x9e,0xdf,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4,
+- 0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xf3,0xe1,0xe3,0xb2,0xe1,0xe2,0x91,0xe1,
+- 0xe1,0x80,0xe1,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6,
+- 0x00,0xd4,0x34,0xd3,0x18,0xe2,0x7a,0xe2,0xe1,0x69,0xe2,0x10,0x09,0x05,0xff,0xf0,
+- 0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x9a,0xe2,0x91,0x11,
+- 0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05,
+- 0xff,0xe5,0xac,0xbe,0x00,0xe3,0xe0,0xe2,0xd2,0x14,0xe1,0xaf,0xe2,0x10,0x08,0x05,
+- 0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0xbb,0xe2,0x10,
+- 0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0,
+- 0x6a,0xcf,0x86,0xe5,0x10,0xe8,0xd4,0x19,0xe3,0x49,0xe7,0xe2,0x27,0xe7,0xe1,0x16,
+- 0xe7,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3,
+- 0x18,0xe2,0x93,0xe7,0xe1,0x82,0xe7,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00,
+- 0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xab,0xe7,0x10,0x08,0x05,0xff,
+- 0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff,
+- 0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7,
+- 0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0xad,0xe9,0xd4,0x1a,
+- 0xe3,0xe5,0xe8,0xe2,0xcb,0xe8,0xe1,0xb8,0xe8,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4,
+- 0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x2d,0xe9,0xe1,0x1b,0xe9,
+- 0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13,
+- 0xe1,0x49,0xe9,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80,
+- 0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5,
+- 0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa,
+- 0xae,0x00,0xe0,0x5f,0xec,0xcf,0x86,0xd5,0x1d,0xe4,0xd4,0xea,0xe3,0x90,0xea,0xe2,
+- 0x6e,0xea,0xe1,0x5d,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff,
+- 0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x7b,0xeb,0xe2,0x57,0xeb,0xe1,0x46,0xeb,0x10,
+- 0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2,
+- 0xc6,0xeb,0xe1,0xb5,0xeb,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff,
+- 0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xde,0xeb,0x10,0x08,0x05,0xff,0xe8,0x9a,
+- 0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c,
+- 0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00,
+- 0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x57,0x04,0x01,0x00,0xc6,0xd5,0x13,0xe4,0xa8,0x59,0xe3,0xe2,0x54,0xe2,0x5b,0x4f,
++ 0xc1,0xe0,0x87,0x4d,0xcf,0x06,0x01,0x00,0xd4,0xb8,0xd3,0x27,0xe2,0x89,0x9f,0xe1,
++ 0x91,0x8d,0xe0,0x21,0x71,0xcf,0x86,0xc5,0xe4,0x80,0x69,0xe3,0xcb,0x64,0xe2,0x61,
++ 0x62,0xe1,0x94,0x61,0xe0,0x59,0x61,0xcf,0x86,0xe5,0x1e,0x61,0x64,0x01,0x61,0x0b,
++ 0x00,0xd2,0x0e,0xe1,0x3f,0xa0,0xe0,0xba,0x9f,0xcf,0x86,0xcf,0x06,0x01,0x00,0xd1,
++ 0x0c,0xe0,0x1e,0xa5,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf,
++ 0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x1b,0xb6,0xe3,0x95,
++ 0xad,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x87,0xa9,0xd0,0x21,0xcf,0x86,0xe5,0x81,
++ 0xa6,0xe4,0x00,0xa6,0xe3,0xbf,0xa5,0xe2,0x9e,0xa5,0xe1,0x8d,0xa5,0x10,0x08,0x01,
++ 0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0xcf,0x86,0xe5,0x63,0xa8,
++ 0xd4,0x19,0xe3,0xa2,0xa7,0xe2,0x81,0xa7,0xe1,0x70,0xa7,0x10,0x08,0x01,0xff,0xe9,
++ 0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,0x09,0xa8,0xe2,0xe8,0xa7,0xe1,
++ 0xd7,0xa7,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9,0x9b,0xbb,0x00,
++ 0x83,0xe2,0xee,0xf5,0xe1,0xd8,0xf2,0xe0,0x55,0xf1,0xcf,0x86,0xd5,0x31,0xc4,0xe3,
++ 0xd5,0xcb,0xe2,0xae,0xc9,0xe1,0x8f,0xc8,0xe0,0x1f,0xbf,0xcf,0x86,0xe5,0x12,0xbb,
++ 0xe4,0x0b,0xba,0xe3,0xfc,0xb8,0xe2,0x53,0xb8,0xe1,0x2e,0xb8,0xe0,0x07,0xb8,0xcf,
++ 0x86,0xe5,0xd4,0xb7,0x94,0x07,0x63,0xbf,0xb7,0x07,0x00,0x07,0x00,0xe4,0xc1,0xf0,
++ 0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0xd0,0xdd,0xcf,0x86,0xcf,
++ 0x06,0x05,0x00,0xd1,0x0e,0xe0,0xbf,0xde,0xcf,0x86,0xe5,0x84,0xde,0xcf,0x06,0x11,
++ 0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xbf,0xde,0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06,
++ 0xcf,0x06,0x00,0x00,0xe4,0x5a,0xf0,0xe3,0x43,0xef,0xd2,0xa0,0xe1,0xf9,0xe2,0xd0,
++ 0x21,0xcf,0x86,0xe5,0xfa,0xdf,0xe4,0x76,0xdf,0xe3,0x34,0xdf,0xe2,0x13,0xdf,0xe1,
++ 0x01,0xdf,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00,
++ 0xcf,0x86,0xd5,0x1c,0xe4,0x56,0xe1,0xe3,0x15,0xe1,0xe2,0xf4,0xe0,0xe1,0xe3,0xe0,
++ 0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34,
++ 0xd3,0x18,0xe2,0xdd,0xe1,0xe1,0xcc,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa1,0x9a,0xa8,
++ 0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0xfd,0xe1,0x91,0x11,0x10,0x09,0x05,
++ 0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac,
++ 0xbe,0x00,0xe3,0x43,0xe2,0xd2,0x14,0xe1,0x12,0xe2,0x10,0x08,0x05,0xff,0xe5,0xaf,
++ 0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x1e,0xe2,0x10,0x08,0x05,0xff,
++ 0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86,
++ 0xe5,0x73,0xe7,0xd4,0x19,0xe3,0xac,0xe6,0xe2,0x8a,0xe6,0xe1,0x79,0xe6,0x10,0x08,
++ 0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0xf6,
++ 0xe6,0xe1,0xe5,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0,
++ 0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x0e,0xe7,0x10,0x08,0x05,0xff,0xe7,0x81,0xbd,
++ 0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff,0xe7,0x85,0x85,
++ 0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7,0x86,0x9c,0x00,
++ 0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x10,0xe9,0xd4,0x1a,0xe3,0x48,0xe8,
++ 0xe2,0x2e,0xe8,0xe1,0x1b,0xe8,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4,0x00,0x05,0xff,
++ 0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x90,0xe8,0xe1,0x7e,0xe8,0x10,0x08,0x05,
++ 0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13,0xe1,0xac,0xe8,
++ 0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80,0x00,0xd1,0x12,
++ 0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,
++ 0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0,
++ 0xc2,0xeb,0xcf,0x86,0xd5,0x1d,0xe4,0x37,0xea,0xe3,0xf3,0xe9,0xe2,0xd1,0xe9,0xe1,
++ 0xc0,0xe9,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95,
++ 0x00,0xd4,0x19,0xe3,0xde,0xea,0xe2,0xba,0xea,0xe1,0xa9,0xea,0x10,0x08,0x05,0xff,
++ 0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2,0x29,0xeb,0xe1,
++ 0x18,0xeb,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83,
++ 0x92,0x00,0xd2,0x13,0xe1,0x41,0xeb,0x10,0x08,0x05,0xff,0xe8,0x9a,0x88,0x00,0x05,
++ 0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c,0xa8,0x00,0x05,
++ 0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00,0x05,0xff,0xe4,
++ 0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ /* nfdicf_30200 */
+- 0xd7,0x07,0x66,0x84,0x05,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x99,0x13,0xe3,0x63,0x0e,
+- 0xe2,0x4c,0x07,0xc1,0xe0,0x4e,0x06,0xcf,0x86,0x65,0x2d,0x06,0x01,0x00,0xd4,0x2a,
+- 0xe3,0xd0,0x35,0xe2,0x88,0x9c,0xe1,0xcd,0x2e,0xe0,0x2b,0x1b,0xcf,0x86,0xc5,0xe4,
+- 0x14,0x66,0xe3,0x5f,0x61,0xe2,0xf5,0x5e,0xe1,0x28,0x5e,0xe0,0xed,0x5d,0xcf,0x86,
+- 0xe5,0xb2,0x5d,0x64,0x95,0x5d,0x0b,0x00,0x83,0xe2,0xa7,0xf3,0xe1,0x80,0xf0,0xe0,
+- 0xfd,0xee,0xcf,0x86,0xd5,0x31,0xc4,0xe3,0xe2,0x47,0xe2,0x83,0x46,0xe1,0x32,0xc6,
+- 0xe0,0x2a,0x45,0xcf,0x86,0xe5,0x1c,0x43,0xe4,0x3d,0x40,0xe3,0x9f,0xb6,0xe2,0xf6,
+- 0xb5,0xe1,0xd1,0xb5,0xe0,0xaa,0xb5,0xcf,0x86,0xe5,0x77,0xb5,0x94,0x07,0x63,0x62,
+- 0xb5,0x07,0x00,0x07,0x00,0xe4,0x69,0xee,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,
+- 0xd2,0x0b,0xe1,0x78,0xdb,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x67,0xdc,
+- 0xcf,0x86,0xe5,0x2c,0xdc,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x67,0xdc,
+- 0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x02,0xee,0xe3,
+- 0xeb,0xec,0xd2,0xa0,0xe1,0xa1,0xe0,0xd0,0x21,0xcf,0x86,0xe5,0xa2,0xdd,0xe4,0x1e,
+- 0xdd,0xe3,0xdc,0xdc,0xe2,0xbb,0xdc,0xe1,0xa9,0xdc,0x10,0x08,0x05,0xff,0xe4,0xb8,
+- 0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xfe,0xde,0xe3,
+- 0xbd,0xde,0xe2,0x9c,0xde,0xe1,0x8b,0xde,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,
+- 0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0x85,0xdf,0xe1,0x74,0xdf,
++ 0xd7,0x07,0x66,0x84,0x05,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x96,0x13,0xe3,0x60,0x0e,
++ 0xe2,0x49,0x07,0xc1,0xe0,0x4b,0x06,0xcf,0x86,0x65,0x2d,0x06,0x01,0x00,0xd4,0x2a,
++ 0xe3,0xce,0x35,0xe2,0x02,0x9c,0xe1,0xca,0x2e,0xe0,0x28,0x1b,0xcf,0x86,0xc5,0xe4,
++ 0xf9,0x65,0xe3,0x44,0x61,0xe2,0xda,0x5e,0xe1,0x0d,0x5e,0xe0,0xd2,0x5d,0xcf,0x86,
++ 0xe5,0x97,0x5d,0x64,0x7a,0x5d,0x0b,0x00,0x83,0xe2,0xf6,0xf2,0xe1,0xe0,0xef,0xe0,
++ 0x5d,0xee,0xcf,0x86,0xd5,0x31,0xc4,0xe3,0xdf,0x47,0xe2,0x80,0x46,0xe1,0x97,0xc5,
++ 0xe0,0x27,0x45,0xcf,0x86,0xe5,0x19,0x43,0xe4,0x3a,0x40,0xe3,0x04,0xb6,0xe2,0x5b,
++ 0xb5,0xe1,0x36,0xb5,0xe0,0x0f,0xb5,0xcf,0x86,0xe5,0xdc,0xb4,0x94,0x07,0x63,0xc7,
++ 0xb4,0x07,0x00,0x07,0x00,0xe4,0xc9,0xed,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,
++ 0xd2,0x0b,0xe1,0xd8,0xda,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xc7,0xdb,
++ 0xcf,0x86,0xe5,0x8c,0xdb,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xc7,0xdb,
++ 0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x62,0xed,0xe3,
++ 0x4b,0xec,0xd2,0xa0,0xe1,0x01,0xe0,0xd0,0x21,0xcf,0x86,0xe5,0x02,0xdd,0xe4,0x7e,
++ 0xdc,0xe3,0x3c,0xdc,0xe2,0x1b,0xdc,0xe1,0x09,0xdc,0x10,0x08,0x05,0xff,0xe4,0xb8,
++ 0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x5e,0xde,0xe3,
++ 0x1d,0xde,0xe2,0xfc,0xdd,0xe1,0xeb,0xdd,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,
++ 0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xe5,0xde,0xe1,0xd4,0xde,
+ 0x10,0x09,0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,
+- 0xe2,0xa5,0xdf,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,
+- 0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0xeb,0xdf,0xd2,0x14,0xe1,
+- 0xba,0xdf,0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,
+- 0x00,0xe1,0xc6,0xdf,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,
+- 0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x1b,0xe5,0xd4,0x19,0xe3,0x54,0xe4,
+- 0xe2,0x32,0xe4,0xe1,0x21,0xe4,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,
+- 0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0x9e,0xe4,0xe1,0x8d,0xe4,0x10,0x09,0x05,0xff,
+- 0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xb6,
++ 0xe2,0x05,0xdf,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,
++ 0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x4b,0xdf,0xd2,0x14,0xe1,
++ 0x1a,0xdf,0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,
++ 0x00,0xe1,0x26,0xdf,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,
++ 0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x7b,0xe4,0xd4,0x19,0xe3,0xb4,0xe3,
++ 0xe2,0x92,0xe3,0xe1,0x81,0xe3,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,
++ 0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0xfe,0xe3,0xe1,0xed,0xe3,0x10,0x09,0x05,0xff,
++ 0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x16,
+ 0xe4,0x10,0x08,0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,
+ 0x11,0x10,0x08,0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,
+ 0x10,0x08,0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,
+- 0xe5,0xb8,0xe6,0xd4,0x1a,0xe3,0xf0,0xe5,0xe2,0xd6,0xe5,0xe1,0xc3,0xe5,0x10,0x08,
++ 0xe5,0x18,0xe6,0xd4,0x1a,0xe3,0x50,0xe5,0xe2,0x36,0xe5,0xe1,0x23,0xe5,0x10,0x08,
+ 0x05,0xff,0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,
+- 0x38,0xe6,0xe1,0x26,0xe6,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,
+- 0x83,0xa3,0x00,0xd2,0x13,0xe1,0x54,0xe6,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,
++ 0x98,0xe5,0xe1,0x86,0xe5,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,
++ 0x83,0xa3,0x00,0xd2,0x13,0xe1,0xb4,0xe5,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,
+ 0x05,0xff,0xe7,0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,
+ 0x00,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,
+- 0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0,0x6a,0xe9,0xcf,0x86,0xd5,0x1d,0xe4,0xdf,
+- 0xe7,0xe3,0x9b,0xe7,0xe2,0x79,0xe7,0xe1,0x68,0xe7,0x10,0x09,0x05,0xff,0xf0,0xa3,
+- 0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x86,0xe8,0xe2,0x62,
+- 0xe8,0xe1,0x51,0xe8,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,
+- 0x8a,0x00,0xd3,0x18,0xe2,0xd1,0xe8,0xe1,0xc0,0xe8,0x10,0x09,0x05,0xff,0xf0,0xa6,
+- 0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xe9,0xe8,0x10,
++ 0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0,0xca,0xe8,0xcf,0x86,0xd5,0x1d,0xe4,0x3f,
++ 0xe7,0xe3,0xfb,0xe6,0xe2,0xd9,0xe6,0xe1,0xc8,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa3,
++ 0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xe6,0xe7,0xe2,0xc2,
++ 0xe7,0xe1,0xb1,0xe7,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,
++ 0x8a,0x00,0xd3,0x18,0xe2,0x31,0xe8,0xe1,0x20,0xe8,0x10,0x09,0x05,0xff,0xf0,0xa6,
++ 0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x49,0xe8,0x10,
+ 0x08,0x05,0xff,0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,
+ 0x08,0x05,0xff,0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,
+ 0xff,0xe8,0x9e,0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,
+ /* nfdi_30200 */
+- 0x57,0x04,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x82,0x53,0xe3,0xbb,0x4e,0xe2,0x34,0x49,
+- 0xc1,0xe0,0x60,0x47,0xcf,0x86,0x65,0x44,0x47,0x01,0x00,0xd4,0x2a,0xe3,0x1c,0x9a,
+- 0xe2,0xcb,0x99,0xe1,0x9e,0x87,0xe0,0xf8,0x6a,0xcf,0x86,0xc5,0xe4,0x57,0x63,0xe3,
+- 0xa2,0x5e,0xe2,0x38,0x5c,0xe1,0x6b,0x5b,0xe0,0x30,0x5b,0xcf,0x86,0xe5,0xf5,0x5a,
+- 0x64,0xd8,0x5a,0x0b,0x00,0x83,0xe2,0xea,0xf0,0xe1,0xc3,0xed,0xe0,0x40,0xec,0xcf,
+- 0x86,0xd5,0x31,0xc4,0xe3,0xbb,0xc6,0xe2,0x94,0xc4,0xe1,0x75,0xc3,0xe0,0x05,0xba,
+- 0xcf,0x86,0xe5,0xf8,0xb5,0xe4,0xf1,0xb4,0xe3,0xe2,0xb3,0xe2,0x39,0xb3,0xe1,0x14,
+- 0xb3,0xe0,0xed,0xb2,0xcf,0x86,0xe5,0xba,0xb2,0x94,0x07,0x63,0xa5,0xb2,0x07,0x00,
+- 0x07,0x00,0xe4,0xac,0xeb,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,
+- 0xbb,0xd8,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xaa,0xd9,0xcf,0x86,0xe5,
+- 0x6f,0xd9,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xaa,0xd9,0xcf,0x06,0x13,
+- 0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x45,0xeb,0xe3,0x2e,0xea,0xd2,
+- 0xa0,0xe1,0xe4,0xdd,0xd0,0x21,0xcf,0x86,0xe5,0xe5,0xda,0xe4,0x61,0xda,0xe3,0x1f,
+- 0xda,0xe2,0xfe,0xd9,0xe1,0xec,0xd9,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,
+- 0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x41,0xdc,0xe3,0x00,0xdc,0xe2,
+- 0xdf,0xdb,0xe1,0xce,0xdb,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,
+- 0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xc8,0xdc,0xe1,0xb7,0xdc,0x10,0x09,0x05,
+- 0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0xe8,0xdc,
+- 0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,
+- 0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x2e,0xdd,0xd2,0x14,0xe1,0xfd,0xdc,0x10,
+- 0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x09,
+- 0xdd,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,
+- 0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x5e,0xe2,0xd4,0x19,0xe3,0x97,0xe1,0xe2,0x75,0xe1,
+- 0xe1,0x64,0xe1,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,
+- 0x00,0xd3,0x18,0xe2,0xe1,0xe1,0xe1,0xd0,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,
+- 0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xf9,0xe1,0x10,0x08,
+- 0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,
+- 0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,
+- 0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0xfb,0xe3,
+- 0xd4,0x1a,0xe3,0x33,0xe3,0xe2,0x19,0xe3,0xe1,0x06,0xe3,0x10,0x08,0x05,0xff,0xe7,
+- 0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x7b,0xe3,0xe1,
+- 0x69,0xe3,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,
+- 0xd2,0x13,0xe1,0x97,0xe3,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,
+- 0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,
+- 0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,
+- 0xe7,0xaa,0xae,0x00,0xe0,0xad,0xe6,0xcf,0x86,0xd5,0x1d,0xe4,0x22,0xe5,0xe3,0xde,
+- 0xe4,0xe2,0xbc,0xe4,0xe1,0xab,0xe4,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,
+- 0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xc9,0xe5,0xe2,0xa5,0xe5,0xe1,0x94,
+- 0xe5,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,
+- 0x18,0xe2,0x14,0xe6,0xe1,0x03,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,
+- 0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x2c,0xe6,0x10,0x08,0x05,0xff,
+- 0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,
+- 0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,
+- 0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x57,0x04,0x01,0x00,0xc6,0xd5,0x13,0xe4,0x68,0x53,0xe3,0xa2,0x4e,0xe2,0x1b,0x49,
++ 0xc1,0xe0,0x47,0x47,0xcf,0x06,0x01,0x00,0xd4,0x2a,0xe3,0x99,0x99,0xe2,0x48,0x99,
++ 0xe1,0x50,0x87,0xe0,0xe0,0x6a,0xcf,0x86,0xc5,0xe4,0x3f,0x63,0xe3,0x8a,0x5e,0xe2,
++ 0x20,0x5c,0xe1,0x53,0x5b,0xe0,0x18,0x5b,0xcf,0x86,0xe5,0xdd,0x5a,0x64,0xc0,0x5a,
++ 0x0b,0x00,0x83,0xe2,0x3c,0xf0,0xe1,0x26,0xed,0xe0,0xa3,0xeb,0xcf,0x86,0xd5,0x31,
++ 0xc4,0xe3,0x23,0xc6,0xe2,0xfc,0xc3,0xe1,0xdd,0xc2,0xe0,0x6d,0xb9,0xcf,0x86,0xe5,
++ 0x60,0xb5,0xe4,0x59,0xb4,0xe3,0x4a,0xb3,0xe2,0xa1,0xb2,0xe1,0x7c,0xb2,0xe0,0x55,
++ 0xb2,0xcf,0x86,0xe5,0x22,0xb2,0x94,0x07,0x63,0x0d,0xb2,0x07,0x00,0x07,0x00,0xe4,
++ 0x0f,0xeb,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0x1e,0xd8,0xcf,
++ 0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x0d,0xd9,0xcf,0x86,0xe5,0xd2,0xd8,0xcf,
++ 0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x0d,0xd9,0xcf,0x06,0x13,0x00,0xcf,0x86,
++ 0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xa8,0xea,0xe3,0x91,0xe9,0xd2,0xa0,0xe1,0x47,
++ 0xdd,0xd0,0x21,0xcf,0x86,0xe5,0x48,0xda,0xe4,0xc4,0xd9,0xe3,0x82,0xd9,0xe2,0x61,
++ 0xd9,0xe1,0x4f,0xd9,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4,0xb8,
++ 0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xa4,0xdb,0xe3,0x63,0xdb,0xe2,0x42,0xdb,0xe1,
++ 0x31,0xdb,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6,0x00,
++ 0xd4,0x34,0xd3,0x18,0xe2,0x2b,0xdc,0xe1,0x1a,0xdc,0x10,0x09,0x05,0xff,0xf0,0xa1,
++ 0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x4b,0xdc,0x91,0x11,0x10,
++ 0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05,0xff,
++ 0xe5,0xac,0xbe,0x00,0xe3,0x91,0xdc,0xd2,0x14,0xe1,0x60,0xdc,0x10,0x08,0x05,0xff,
++ 0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x6c,0xdc,0x10,0x08,
++ 0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0,0x6a,
++ 0xcf,0x86,0xe5,0xc1,0xe1,0xd4,0x19,0xe3,0xfa,0xe0,0xe2,0xd8,0xe0,0xe1,0xc7,0xe0,
++ 0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3,0x18,
++ 0xe2,0x44,0xe1,0xe1,0x33,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00,0x05,
++ 0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x5c,0xe1,0x10,0x08,0x05,0xff,0xe7,
++ 0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff,0xe7,
++ 0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7,0x86,
++ 0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x5e,0xe3,0xd4,0x1a,0xe3,
++ 0x96,0xe2,0xe2,0x7c,0xe2,0xe1,0x69,0xe2,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4,0x00,
++ 0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0xde,0xe2,0xe1,0xcc,0xe2,0x10,
++ 0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13,0xe1,
++ 0xfa,0xe2,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80,0x00,
++ 0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5,0xaa,
++ 0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa,0xae,
++ 0x00,0xe0,0x10,0xe6,0xcf,0x86,0xd5,0x1d,0xe4,0x85,0xe4,0xe3,0x41,0xe4,0xe2,0x1f,
++ 0xe4,0xe1,0x0e,0xe4,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff,0xe4,
++ 0x8f,0x95,0x00,0xd4,0x19,0xe3,0x2c,0xe5,0xe2,0x08,0xe5,0xe1,0xf7,0xe4,0x10,0x08,
++ 0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2,0x77,
++ 0xe5,0xe1,0x66,0xe5,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff,0xf0,
++ 0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x8f,0xe5,0x10,0x08,0x05,0xff,0xe8,0x9a,0x88,
++ 0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c,0xa8,
++ 0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00,0x05,
++ 0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ /* nfdicf_c0100 */
+ 0xd7,0xb0,0x56,0x04,0x01,0x00,0x95,0xa8,0xd4,0x5e,0xd3,0x2e,0xd2,0x16,0xd1,0x0a,
+ 0x10,0x04,0x01,0x00,0x01,0xff,0x61,0x00,0x10,0x06,0x01,0xff,0x62,0x00,0x01,0xff,
+@@ -299,3184 +299,3174 @@ static const unsigned char utf8data[64256] = {
+ 0xd1,0x0c,0x10,0x06,0x01,0xff,0x74,0x00,0x01,0xff,0x75,0x00,0x10,0x06,0x01,0xff,
+ 0x76,0x00,0x01,0xff,0x77,0x00,0x92,0x16,0xd1,0x0c,0x10,0x06,0x01,0xff,0x78,0x00,
+ 0x01,0xff,0x79,0x00,0x10,0x06,0x01,0xff,0x7a,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+- 0xc6,0xe5,0xf9,0x14,0xe4,0x6f,0x0d,0xe3,0x39,0x08,0xe2,0x22,0x01,0xc1,0xd0,0x24,
+- 0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x07,0x63,0xd8,0x43,0x01,0x00,0x93,0x13,0x52,
+- 0x04,0x01,0x00,0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xce,0xbc,0x00,0x01,0x00,
+- 0x01,0x00,0xcf,0x86,0xe5,0xb3,0x44,0xd4,0x7f,0xd3,0x3f,0xd2,0x20,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01,
+- 0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x07,0x01,0xff,0xc3,
+- 0xa6,0x00,0x01,0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0x65,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,
+- 0x82,0x00,0x01,0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,
+- 0x80,0x00,0x01,0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x82,0x00,
+- 0x01,0xff,0x69,0xcc,0x88,0x00,0xd3,0x3b,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff,
+- 0xc3,0xb0,0x00,0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80,
+- 0x00,0x01,0xff,0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82,
+- 0x00,0x01,0xff,0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01,
+- 0x00,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb8,0x00,0x01,0xff,0x75,0xcc,
+- 0x80,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00,
+- 0x10,0x07,0x01,0xff,0xc3,0xbe,0x00,0x01,0xff,0x73,0x73,0x00,0xe1,0xd4,0x03,0xe0,
+- 0xeb,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0x61,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,
+- 0x61,0xcc,0x86,0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0x61,0xcc,0xa8,0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,
+- 0x81,0x00,0x01,0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0x63,0xcc,0x82,0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,
+- 0x87,0x00,0x01,0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc,
+- 0x8c,0x00,0x01,0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x8c,0x00,
+- 0x01,0xff,0x64,0xcc,0x8c,0x00,0xd3,0x3b,0xd2,0x1b,0xd1,0x0b,0x10,0x07,0x01,0xff,
+- 0xc4,0x91,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x84,0x00,0x01,0xff,0x65,
+- 0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x86,0x00,0x01,0xff,0x65,
+- 0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87,
+- 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa8,0x00,0x01,0xff,0x65,
+- 0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82,
+- 0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4,
+- 0x7b,0xd3,0x3b,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x87,0x00,0x01,
+- 0xff,0x67,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0xa7,0x00,0x01,0xff,0x67,
+- 0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0xff,0x68,
+- 0xcc,0x82,0x00,0x10,0x07,0x01,0xff,0xc4,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0x69,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08,
+- 0x01,0xff,0x69,0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0x69,0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,
+- 0x69,0xcc,0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x37,0xd2,0x17,0xd1,0x0c,
+- 0x10,0x08,0x01,0xff,0x69,0xcc,0x87,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc4,0xb3,
+- 0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6a,0xcc,0x82,0x00,0x01,0xff,0x6a,
+- 0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x6b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,
+- 0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6c,0xcc,0x81,0x00,0x10,
+- 0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff,0x6c,0xcc,0xa7,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x10,0x08,0x01,
+- 0xff,0x6c,0xcc,0x8c,0x00,0x01,0xff,0xc5,0x80,0x00,0xcf,0x86,0xd5,0xed,0xd4,0x72,
+- 0xd3,0x37,0xd2,0x17,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc5,0x82,0x00,0x10,
+- 0x04,0x01,0x00,0x01,0xff,0x6e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,
+- 0xcc,0x81,0x00,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7,
+- 0x00,0x01,0xff,0x6e,0xcc,0x8c,0x00,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,
+- 0xcc,0x8c,0x00,0x01,0xff,0xca,0xbc,0x6e,0x00,0x10,0x07,0x01,0xff,0xc5,0x8b,0x00,
+- 0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,
+- 0x84,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00,
+- 0xd3,0x3b,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0xff,
+- 0x6f,0xcc,0x8b,0x00,0x10,0x07,0x01,0xff,0xc5,0x93,0x00,0x01,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x72,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01,
+- 0xff,0x72,0xcc,0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x72,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01,
+- 0xff,0x73,0xcc,0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x73,0xcc,0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x73,
+- 0xcc,0xa7,0x00,0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x7b,0xd3,0x3b,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x73,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10,
+- 0x08,0x01,0xff,0x74,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x74,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x10,0x07,0x01,
+- 0xff,0xc5,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,
+- 0x83,0x00,0x01,0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x84,0x00,
+- 0x01,0xff,0x75,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x86,0x00,
+- 0x01,0xff,0x75,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x8a,0x00,0x01,0xff,
+- 0x75,0xcc,0x8a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,
+- 0x8b,0x00,0x01,0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa8,0x00,
+- 0x01,0xff,0x75,0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x82,0x00,
+- 0x01,0xff,0x77,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x82,0x00,0x01,0xff,
+- 0x79,0xcc,0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,0x88,0x00,
+- 0x01,0xff,0x7a,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff,
+- 0x7a,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff,
+- 0x7a,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0xff,0x73,0x00,
+- 0xe0,0x65,0x01,0xcf,0x86,0xd5,0xb4,0xd4,0x5a,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,
+- 0x04,0x01,0x00,0x01,0xff,0xc9,0x93,0x00,0x10,0x07,0x01,0xff,0xc6,0x83,0x00,0x01,
+- 0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,
+- 0xc9,0x94,0x00,0x01,0xff,0xc6,0x88,0x00,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x01,0x00,
+- 0x01,0xff,0xc9,0x96,0x00,0x10,0x07,0x01,0xff,0xc9,0x97,0x00,0x01,0xff,0xc6,0x8c,
+- 0x00,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xc7,0x9d,0x00,0x01,0xff,0xc9,0x99,
+- 0x00,0xd3,0x32,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0x9b,0x00,0x01,0xff,
+- 0xc6,0x92,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xa0,0x00,0xd1,0x0b,0x10,0x07,
+- 0x01,0xff,0xc9,0xa3,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0xa9,0x00,0x01,0xff,
+- 0xc9,0xa8,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0x99,0x00,0x01,0x00,
+- 0x01,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0xaf,0x00,0x01,0xff,0xc9,0xb2,0x00,
+- 0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xb5,0x00,0xd4,0x5d,0xd3,0x34,0xd2,0x1b,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x10,
+- 0x07,0x01,0xff,0xc6,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0xa5,
+- 0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x80,0x00,0x01,0xff,0xc6,0xa8,0x00,0xd2,
+- 0x0f,0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x83,0x00,0x01,0x00,0xd1,0x0b,
+- 0x10,0x07,0x01,0xff,0xc6,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x88,0x00,
+- 0x01,0xff,0x75,0xcc,0x9b,0x00,0xd3,0x33,0xd2,0x1d,0xd1,0x0f,0x10,0x08,0x01,0xff,
+- 0x75,0xcc,0x9b,0x00,0x01,0xff,0xca,0x8a,0x00,0x10,0x07,0x01,0xff,0xca,0x8b,0x00,
+- 0x01,0xff,0xc6,0xb4,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc6,0xb6,0x00,
+- 0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x92,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,
+- 0xff,0xc6,0xb9,0x00,0x01,0x00,0x01,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xbd,
+- 0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x44,0xd3,0x16,0x52,0x04,0x01,
+- 0x00,0x51,0x07,0x01,0xff,0xc7,0x86,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc7,0x89,
+- 0x00,0xd2,0x12,0x91,0x0b,0x10,0x07,0x01,0xff,0xc7,0x89,0x00,0x01,0x00,0x01,0xff,
+- 0xc7,0x8c,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x61,0xcc,0x8c,0x00,0x10,
+- 0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff,0x69,0xcc,0x8c,0x00,0xd3,0x46,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x6f,0xcc,0x8c,
+- 0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x8c,0x00,0xd1,
+- 0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,
+- 0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x88,
+- 0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,
+- 0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,
+- 0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,
+- 0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x88,
+- 0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x87,0xd3,0x41,0xd2,
+- 0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61,
+- 0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x01,0xff,
+- 0xc3,0xa6,0xcc,0x84,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc7,0xa5,0x00,0x01,0x00,
+- 0x10,0x08,0x01,0xff,0x67,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,
+- 0x10,0x08,0x01,0xff,0x6f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,
+- 0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,
+- 0x84,0x00,0x10,0x09,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,
+- 0x8c,0x00,0xd3,0x38,0xd2,0x1a,0xd1,0x0f,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,
+- 0x01,0xff,0xc7,0xb3,0x00,0x10,0x07,0x01,0xff,0xc7,0xb3,0x00,0x01,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0x67,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x10,0x07,
+- 0x04,0xff,0xc6,0x95,0x00,0x04,0xff,0xc6,0xbf,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,
+- 0x04,0xff,0x6e,0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,
+- 0x61,0xcc,0x8a,0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12,
+- 0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,
+- 0x10,0x09,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,
+- 0xe2,0x31,0x02,0xe1,0xc3,0x44,0xe0,0xc8,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3,
+- 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x8f,0x00,0x01,0xff,0x61,
+- 0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f,
+- 0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f,
+- 0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10,
+- 0x08,0x01,0xff,0x6f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f,
+- 0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10,
+- 0x08,0x01,0xff,0x75,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x04,0xff,0x73,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10,
+- 0x08,0x04,0xff,0x74,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0xd1,0x0b,0x10,
+- 0x07,0x04,0xff,0xc8,0x9d,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x68,0xcc,0x8c,0x00,
+- 0x04,0xff,0x68,0xcc,0x8c,0x00,0xd4,0x79,0xd3,0x31,0xd2,0x16,0xd1,0x0b,0x10,0x07,
+- 0x06,0xff,0xc6,0x9e,0x00,0x07,0x00,0x10,0x07,0x04,0xff,0xc8,0xa3,0x00,0x04,0x00,
+- 0xd1,0x0b,0x10,0x07,0x04,0xff,0xc8,0xa5,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x61,
+- 0xcc,0x87,0x00,0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,
+- 0xff,0x65,0xcc,0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x6f,
+- 0xcc,0x88,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,0x00,0xd1,0x14,0x10,
+- 0x0a,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,
+- 0x00,0x10,0x08,0x04,0xff,0x6f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0xd3,
+- 0x27,0xe2,0x21,0x43,0xd1,0x14,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,
+- 0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x79,0xcc,0x84,0x00,
+- 0x04,0xff,0x79,0xcc,0x84,0x00,0xd2,0x13,0x51,0x04,0x08,0x00,0x10,0x08,0x08,0xff,
+- 0xe2,0xb1,0xa5,0x00,0x08,0xff,0xc8,0xbc,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,
+- 0xff,0xc6,0x9a,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa6,0x00,0x08,0x00,0xcf,0x86,
+- 0x95,0x5f,0x94,0x5b,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff,
+- 0xc9,0x82,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xc6,0x80,0x00,0xd1,0x0e,0x10,0x07,
+- 0x09,0xff,0xca,0x89,0x00,0x09,0xff,0xca,0x8c,0x00,0x10,0x07,0x09,0xff,0xc9,0x87,
+- 0x00,0x09,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x89,0x00,0x09,0x00,
+- 0x10,0x07,0x09,0xff,0xc9,0x8b,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,
+- 0x8d,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xc9,0x8f,0x00,0x09,0x00,0x01,0x00,0x01,
+- 0x00,0xd1,0x8b,0xd0,0x0c,0xcf,0x86,0xe5,0x10,0x43,0x64,0xef,0x42,0x01,0xe6,0xcf,
+- 0x86,0xd5,0x2a,0xe4,0x99,0x43,0xe3,0x7f,0x43,0xd2,0x11,0xe1,0x5e,0x43,0x10,0x07,
+- 0x01,0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0xe1,0x65,0x43,0x10,0x09,0x01,
+- 0xff,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0x00,0xd4,0x0f,0x93,0x0b,0x92,
+- 0x07,0x61,0xab,0x43,0x01,0xea,0x06,0xe6,0x06,0xe6,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,
+- 0x10,0x07,0x0a,0xff,0xcd,0xb1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb3,0x00,
+- 0x0a,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x10,0x07,0x0a,
+- 0xff,0xcd,0xb7,0x00,0x0a,0x00,0xd2,0x07,0x61,0x97,0x43,0x00,0x00,0x51,0x04,0x09,
+- 0x00,0x10,0x06,0x01,0xff,0x3b,0x00,0x10,0xff,0xcf,0xb3,0x00,0xe0,0x31,0x01,0xcf,
+- 0x86,0xd5,0xd3,0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01,
+- 0x00,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x81,
+- 0x00,0x01,0xff,0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,
+- 0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,
+- 0xcc,0x81,0x00,0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,
+- 0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc,
+- 0x81,0x00,0xd3,0x3c,0xd2,0x20,0xd1,0x12,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88,
+- 0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0x00,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,
+- 0xff,0xce,0xb3,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb4,0x00,0x01,0xff,0xce,
+- 0xb5,0x00,0x10,0x07,0x01,0xff,0xce,0xb6,0x00,0x01,0xff,0xce,0xb7,0x00,0xd2,0x1c,
+- 0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb8,0x00,0x01,0xff,0xce,0xb9,0x00,0x10,0x07,
+- 0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xce,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,
+- 0xce,0xbc,0x00,0x01,0xff,0xce,0xbd,0x00,0x10,0x07,0x01,0xff,0xce,0xbe,0x00,0x01,
+- 0xff,0xce,0xbf,0x00,0xe4,0x85,0x43,0xd3,0x35,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,
+- 0xff,0xcf,0x80,0x00,0x01,0xff,0xcf,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,
+- 0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcf,0x84,0x00,0x01,0xff,0xcf,0x85,0x00,
+- 0x10,0x07,0x01,0xff,0xcf,0x86,0x00,0x01,0xff,0xcf,0x87,0x00,0xe2,0x2b,0x43,0xd1,
+- 0x0e,0x10,0x07,0x01,0xff,0xcf,0x88,0x00,0x01,0xff,0xcf,0x89,0x00,0x10,0x09,0x01,
+- 0xff,0xce,0xb9,0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xcf,0x86,0xd5,
+- 0x94,0xd4,0x3c,0xd3,0x13,0x92,0x0f,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,
+- 0x83,0x00,0x01,0x00,0x01,0x00,0xd2,0x07,0x61,0x3a,0x43,0x01,0x00,0xd1,0x12,0x10,
+- 0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10,
+- 0x09,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x0a,0xff,0xcf,0x97,0x00,0xd3,0x2c,0xd2,
+- 0x11,0xe1,0x46,0x43,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb8,0x00,
+- 0xd1,0x10,0x10,0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0xff,0xcf,0x86,0x00,
+- 0x10,0x07,0x01,0xff,0xcf,0x80,0x00,0x04,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,
+- 0xff,0xcf,0x99,0x00,0x06,0x00,0x10,0x07,0x01,0xff,0xcf,0x9b,0x00,0x04,0x00,0xd1,
+- 0x0b,0x10,0x07,0x01,0xff,0xcf,0x9d,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0x9f,
+- 0x00,0x04,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,
+- 0xa1,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,
+- 0x07,0x01,0xff,0xcf,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xa7,0x00,0x01,
+- 0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa9,0x00,0x01,0x00,0x10,0x07,
+- 0x01,0xff,0xcf,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xad,0x00,
+- 0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xaf,0x00,0x01,0x00,0xd3,0x2b,0xd2,0x12,0x91,
+- 0x0e,0x10,0x07,0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xcf,0x81,0x00,0x01,0x00,0xd1,
+- 0x0e,0x10,0x07,0x05,0xff,0xce,0xb8,0x00,0x05,0xff,0xce,0xb5,0x00,0x10,0x04,0x06,
+- 0x00,0x07,0xff,0xcf,0xb8,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x07,0x00,0x07,0xff,
+- 0xcf,0xb2,0x00,0x10,0x07,0x07,0xff,0xcf,0xbb,0x00,0x07,0x00,0xd1,0x0b,0x10,0x04,
+- 0x08,0x00,0x08,0xff,0xcd,0xbb,0x00,0x10,0x07,0x08,0xff,0xcd,0xbc,0x00,0x08,0xff,
+- 0xcd,0xbd,0x00,0xe3,0xed,0x46,0xe2,0x3d,0x05,0xe1,0x27,0x02,0xe0,0x66,0x01,0xcf,
+- 0x86,0xd5,0xf0,0xd4,0x7e,0xd3,0x40,0xd2,0x22,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,
+- 0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x07,0x01,0xff,0xd1,
+- 0x92,0x00,0x01,0xff,0xd0,0xb3,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,
+- 0x94,0x00,0x01,0xff,0xd1,0x95,0x00,0x10,0x07,0x01,0xff,0xd1,0x96,0x00,0x01,0xff,
+- 0xd1,0x96,0xcc,0x88,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x98,0x00,
+- 0x01,0xff,0xd1,0x99,0x00,0x10,0x07,0x01,0xff,0xd1,0x9a,0x00,0x01,0xff,0xd1,0x9b,
+- 0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,
+- 0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0xff,0xd1,0x9f,
+- 0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xb0,0x00,0x01,0xff,
+- 0xd0,0xb1,0x00,0x10,0x07,0x01,0xff,0xd0,0xb2,0x00,0x01,0xff,0xd0,0xb3,0x00,0xd1,
+- 0x0e,0x10,0x07,0x01,0xff,0xd0,0xb4,0x00,0x01,0xff,0xd0,0xb5,0x00,0x10,0x07,0x01,
+- 0xff,0xd0,0xb6,0x00,0x01,0xff,0xd0,0xb7,0x00,0xd2,0x1e,0xd1,0x10,0x10,0x07,0x01,
+- 0xff,0xd0,0xb8,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x10,0x07,0x01,0xff,0xd0,
+- 0xba,0x00,0x01,0xff,0xd0,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xbc,0x00,
+- 0x01,0xff,0xd0,0xbd,0x00,0x10,0x07,0x01,0xff,0xd0,0xbe,0x00,0x01,0xff,0xd0,0xbf,
+- 0x00,0xe4,0x25,0x42,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x80,
+- 0x00,0x01,0xff,0xd1,0x81,0x00,0x10,0x07,0x01,0xff,0xd1,0x82,0x00,0x01,0xff,0xd1,
+- 0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x84,0x00,0x01,0xff,0xd1,0x85,0x00,
+- 0x10,0x07,0x01,0xff,0xd1,0x86,0x00,0x01,0xff,0xd1,0x87,0x00,0xd2,0x1c,0xd1,0x0e,
+- 0x10,0x07,0x01,0xff,0xd1,0x88,0x00,0x01,0xff,0xd1,0x89,0x00,0x10,0x07,0x01,0xff,
+- 0xd1,0x8a,0x00,0x01,0xff,0xd1,0x8b,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x8c,
+- 0x00,0x01,0xff,0xd1,0x8d,0x00,0x10,0x07,0x01,0xff,0xd1,0x8e,0x00,0x01,0xff,0xd1,
+- 0x8f,0x00,0xcf,0x86,0xd5,0x07,0x64,0xcf,0x41,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2,
+- 0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,
+- 0xd1,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa5,0x00,0x01,0x00,
+- 0x10,0x07,0x01,0xff,0xd1,0xa7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,
+- 0xff,0xd1,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xab,0x00,0x01,0x00,0xd1,
+- 0x0b,0x10,0x07,0x01,0xff,0xd1,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xaf,
+- 0x00,0x01,0x00,0xd3,0x33,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb1,0x00,
+- 0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xb3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,
+- 0xff,0xd1,0xb5,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01,
+- 0xff,0xd1,0xb5,0xcc,0x8f,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb9,
+- 0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,
+- 0x01,0xff,0xd1,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbf,0x00,0x01,0x00,
+- 0xe0,0x41,0x01,0xcf,0x86,0xd5,0x8e,0xd4,0x36,0xd3,0x11,0xe2,0x91,0x41,0xe1,0x88,
+- 0x41,0x10,0x07,0x01,0xff,0xd2,0x81,0x00,0x01,0x00,0xd2,0x0f,0x51,0x04,0x04,0x00,
+- 0x10,0x07,0x06,0xff,0xd2,0x8b,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff,0xd2,
+- 0x8d,0x00,0x04,0x00,0x10,0x07,0x04,0xff,0xd2,0x8f,0x00,0x04,0x00,0xd3,0x2c,0xd2,
+- 0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x91,0x00,0x01,0x00,0x10,0x07,0x01,0xff,
+- 0xd2,0x93,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x95,0x00,0x01,0x00,
+- 0x10,0x07,0x01,0xff,0xd2,0x97,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,
+- 0xff,0xd2,0x99,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9b,0x00,0x01,0x00,0xd1,
+- 0x0b,0x10,0x07,0x01,0xff,0xd2,0x9d,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9f,
+- 0x00,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,
+- 0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,
+- 0x07,0x01,0xff,0xd2,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa7,0x00,0x01,
+- 0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa9,0x00,0x01,0x00,0x10,0x07,
+- 0x01,0xff,0xd2,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xad,0x00,
+- 0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xaf,0x00,0x01,0x00,0xd3,0x2c,0xd2,0x16,0xd1,
+- 0x0b,0x10,0x07,0x01,0xff,0xd2,0xb1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xb3,
+- 0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb5,0x00,0x01,0x00,0x10,0x07,
+- 0x01,0xff,0xd2,0xb7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,
+- 0xb9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,
+- 0x07,0x01,0xff,0xd2,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbf,0x00,0x01,
+- 0x00,0xcf,0x86,0xd5,0xdc,0xd4,0x5a,0xd3,0x36,0xd2,0x20,0xd1,0x10,0x10,0x07,0x01,
+- 0xff,0xd3,0x8f,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,
+- 0xb6,0xcc,0x86,0x00,0x01,0xff,0xd3,0x84,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x06,
+- 0xff,0xd3,0x86,0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x88,0x00,0xd2,0x16,0xd1,
+- 0x0b,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8a,0x00,0x10,0x04,0x06,0x00,0x01,0xff,
+- 0xd3,0x8c,0x00,0xe1,0x69,0x40,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8e,0x00,0xd3,
+- 0x41,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x01,0xff,
+- 0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x01,0xff,
+- 0xd0,0xb0,0xcc,0x88,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x95,0x00,0x01,0x00,
+- 0x10,0x09,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,
+- 0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x99,0x00,0x01,0x00,0x10,0x09,0x01,
+- 0xff,0xd3,0x99,0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10,
+- 0x09,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10,
+- 0x09,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4,
+- 0x82,0xd3,0x41,0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa1,0x00,0x01,0x00,
+- 0x10,0x09,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,
+- 0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc,
+- 0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc,
+- 0x88,0x00,0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa9,0x00,0x01,0x00,0x10,
+- 0x09,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,
+- 0x12,0x10,0x09,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,
+- 0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,
+- 0x00,0xd3,0x41,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,
+- 0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,
+- 0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x87,0xcc,
+- 0x88,0x00,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00,0x10,0x07,0x08,0xff,0xd3,0xb7,0x00,
+- 0x08,0x00,0xd2,0x1d,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x01,
+- 0xff,0xd1,0x8b,0xcc,0x88,0x00,0x10,0x07,0x09,0xff,0xd3,0xbb,0x00,0x09,0x00,0xd1,
+- 0x0b,0x10,0x07,0x09,0xff,0xd3,0xbd,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd3,0xbf,
+- 0x00,0x09,0x00,0xe1,0x26,0x02,0xe0,0x78,0x01,0xcf,0x86,0xd5,0xb0,0xd4,0x58,0xd3,
+- 0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x81,0x00,0x06,0x00,0x10,0x07,
+- 0x06,0xff,0xd4,0x83,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x85,0x00,
+- 0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x87,0x00,0x06,0x00,0xd2,0x16,0xd1,0x0b,0x10,
+- 0x07,0x06,0xff,0xd4,0x89,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8b,0x00,0x06,
+- 0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x8d,0x00,0x06,0x00,0x10,0x07,0x06,0xff,
+- 0xd4,0x8f,0x00,0x06,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xd4,
+- 0x91,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd4,0x93,0x00,0x09,0x00,0xd1,0x0b,0x10,
+- 0x07,0x0a,0xff,0xd4,0x95,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x97,0x00,0x0a,
+- 0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x99,0x00,0x0a,0x00,0x10,0x07,
+- 0x0a,0xff,0xd4,0x9b,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x9d,0x00,
+- 0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x9f,0x00,0x0a,0x00,0xd4,0x58,0xd3,0x2c,0xd2,
+- 0x16,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0xa1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,
+- 0xd4,0xa3,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xd4,0xa5,0x00,0x0b,0x00,
+- 0x10,0x07,0x0c,0xff,0xd4,0xa7,0x00,0x0c,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x10,
+- 0xff,0xd4,0xa9,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xab,0x00,0x10,0x00,0xd1,
+- 0x0b,0x10,0x07,0x10,0xff,0xd4,0xad,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xaf,
+- 0x00,0x10,0x00,0xd3,0x35,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,
+- 0xa1,0x00,0x10,0x07,0x01,0xff,0xd5,0xa2,0x00,0x01,0xff,0xd5,0xa3,0x00,0xd1,0x0e,
+- 0x10,0x07,0x01,0xff,0xd5,0xa4,0x00,0x01,0xff,0xd5,0xa5,0x00,0x10,0x07,0x01,0xff,
+- 0xd5,0xa6,0x00,0x01,0xff,0xd5,0xa7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,
+- 0xd5,0xa8,0x00,0x01,0xff,0xd5,0xa9,0x00,0x10,0x07,0x01,0xff,0xd5,0xaa,0x00,0x01,
+- 0xff,0xd5,0xab,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xac,0x00,0x01,0xff,0xd5,
+- 0xad,0x00,0x10,0x07,0x01,0xff,0xd5,0xae,0x00,0x01,0xff,0xd5,0xaf,0x00,0xcf,0x86,
+- 0xe5,0x08,0x3f,0xd4,0x70,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,
+- 0xb0,0x00,0x01,0xff,0xd5,0xb1,0x00,0x10,0x07,0x01,0xff,0xd5,0xb2,0x00,0x01,0xff,
+- 0xd5,0xb3,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb4,0x00,0x01,0xff,0xd5,0xb5,
+- 0x00,0x10,0x07,0x01,0xff,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb7,0x00,0xd2,0x1c,0xd1,
+- 0x0e,0x10,0x07,0x01,0xff,0xd5,0xb8,0x00,0x01,0xff,0xd5,0xb9,0x00,0x10,0x07,0x01,
+- 0xff,0xd5,0xba,0x00,0x01,0xff,0xd5,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,
+- 0xbc,0x00,0x01,0xff,0xd5,0xbd,0x00,0x10,0x07,0x01,0xff,0xd5,0xbe,0x00,0x01,0xff,
+- 0xd5,0xbf,0x00,0xe3,0x87,0x3e,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x80,
+- 0x00,0x01,0xff,0xd6,0x81,0x00,0x10,0x07,0x01,0xff,0xd6,0x82,0x00,0x01,0xff,0xd6,
+- 0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x84,0x00,0x01,0xff,0xd6,0x85,0x00,
+- 0x10,0x07,0x01,0xff,0xd6,0x86,0x00,0x00,0x00,0xe0,0x2f,0x3f,0xcf,0x86,0xe5,0xc0,
+- 0x3e,0xe4,0x97,0x3e,0xe3,0x76,0x3e,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
+- 0x04,0x01,0x00,0x01,0xff,0xd5,0xa5,0xd6,0x82,0x00,0xe4,0x3e,0x25,0xe3,0xc3,0x1a,
+- 0xe2,0x7b,0x81,0xe1,0xc0,0x13,0xd0,0x1e,0xcf,0x86,0xc5,0xe4,0x08,0x4b,0xe3,0x53,
+- 0x46,0xe2,0xe9,0x43,0xe1,0x1c,0x43,0xe0,0xe1,0x42,0xcf,0x86,0xe5,0xa6,0x42,0x64,
+- 0x89,0x42,0x0b,0x00,0xcf,0x86,0xe5,0xfa,0x01,0xe4,0x03,0x56,0xe3,0x76,0x01,0xe2,
+- 0x8e,0x53,0xd1,0x0c,0xe0,0xef,0x52,0xcf,0x86,0x65,0x8d,0x52,0x04,0x00,0xe0,0x0d,
+- 0x01,0xcf,0x86,0xd5,0x0a,0xe4,0x10,0x53,0x63,0xff,0x52,0x0a,0x00,0xd4,0x80,0xd3,
+- 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x80,0x00,0x01,0xff,0xe2,
+- 0xb4,0x81,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x82,0x00,0x01,0xff,0xe2,0xb4,0x83,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x84,0x00,0x01,0xff,0xe2,0xb4,0x85,
+- 0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x86,0x00,0x01,0xff,0xe2,0xb4,0x87,0x00,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x88,0x00,0x01,0xff,0xe2,0xb4,0x89,
+- 0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x8a,0x00,0x01,0xff,0xe2,0xb4,0x8b,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x8c,0x00,0x01,0xff,0xe2,0xb4,0x8d,0x00,0x10,
+- 0x08,0x01,0xff,0xe2,0xb4,0x8e,0x00,0x01,0xff,0xe2,0xb4,0x8f,0x00,0xd3,0x40,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x90,0x00,0x01,0xff,0xe2,0xb4,0x91,
+- 0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x92,0x00,0x01,0xff,0xe2,0xb4,0x93,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x94,0x00,0x01,0xff,0xe2,0xb4,0x95,0x00,0x10,
+- 0x08,0x01,0xff,0xe2,0xb4,0x96,0x00,0x01,0xff,0xe2,0xb4,0x97,0x00,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x98,0x00,0x01,0xff,0xe2,0xb4,0x99,0x00,0x10,
+- 0x08,0x01,0xff,0xe2,0xb4,0x9a,0x00,0x01,0xff,0xe2,0xb4,0x9b,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0xe2,0xb4,0x9c,0x00,0x01,0xff,0xe2,0xb4,0x9d,0x00,0x10,0x08,0x01,
+- 0xff,0xe2,0xb4,0x9e,0x00,0x01,0xff,0xe2,0xb4,0x9f,0x00,0xcf,0x86,0xe5,0x42,0x52,
+- 0x94,0x50,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa0,0x00,
+- 0x01,0xff,0xe2,0xb4,0xa1,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa2,0x00,0x01,0xff,
+- 0xe2,0xb4,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa4,0x00,0x01,0xff,
+- 0xe2,0xb4,0xa5,0x00,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xa7,0x00,0x52,0x04,
+- 0x00,0x00,0x91,0x0c,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xad,0x00,0x00,0x00,
+- 0x01,0x00,0xd2,0x1b,0xe1,0xfc,0x52,0xe0,0xad,0x52,0xcf,0x86,0x95,0x0f,0x94,0x0b,
+- 0x93,0x07,0x62,0x92,0x52,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd1,0x13,0xe0,
+- 0xd3,0x53,0xcf,0x86,0x95,0x0a,0xe4,0xa8,0x53,0x63,0x97,0x53,0x04,0x00,0x04,0x00,
+- 0xd0,0x0d,0xcf,0x86,0x95,0x07,0x64,0x22,0x54,0x08,0x00,0x04,0x00,0xcf,0x86,0x55,
+- 0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x07,0x62,0x2f,0x54,0x04,0x00,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0xb0,0x00,0x11,0xff,0xe1,0x8f,0xb1,0x00,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0xb2,0x00,0x11,0xff,0xe1,0x8f,0xb3,0x00,0x91,0x10,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0xb4,0x00,0x11,0xff,0xe1,0x8f,0xb5,0x00,0x00,0x00,
+- 0xd4,0x1c,0xe3,0xe0,0x56,0xe2,0x17,0x56,0xe1,0xda,0x55,0xe0,0xbb,0x55,0xcf,0x86,
+- 0x95,0x0a,0xe4,0xa4,0x55,0x63,0x88,0x55,0x04,0x00,0x04,0x00,0xe3,0xd2,0x01,0xe2,
+- 0x2b,0x5a,0xd1,0x0c,0xe0,0x4c,0x59,0xcf,0x86,0x65,0x25,0x59,0x0a,0x00,0xe0,0x9c,
+- 0x59,0xcf,0x86,0xd5,0xc5,0xd4,0x45,0xd3,0x31,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x12,
+- 0xff,0xd0,0xb2,0x00,0x12,0xff,0xd0,0xb4,0x00,0x10,0x07,0x12,0xff,0xd0,0xbe,0x00,
+- 0x12,0xff,0xd1,0x81,0x00,0x51,0x07,0x12,0xff,0xd1,0x82,0x00,0x10,0x07,0x12,0xff,
+- 0xd1,0x8a,0x00,0x12,0xff,0xd1,0xa3,0x00,0x92,0x10,0x91,0x0c,0x10,0x08,0x12,0xff,
+- 0xea,0x99,0x8b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x14,0xff,0xe1,0x83,0x90,0x00,0x14,0xff,0xe1,0x83,0x91,0x00,0x10,0x08,
+- 0x14,0xff,0xe1,0x83,0x92,0x00,0x14,0xff,0xe1,0x83,0x93,0x00,0xd1,0x10,0x10,0x08,
+- 0x14,0xff,0xe1,0x83,0x94,0x00,0x14,0xff,0xe1,0x83,0x95,0x00,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0x96,0x00,0x14,0xff,0xe1,0x83,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x14,0xff,0xe1,0x83,0x98,0x00,0x14,0xff,0xe1,0x83,0x99,0x00,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0x9a,0x00,0x14,0xff,0xe1,0x83,0x9b,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0x9c,0x00,0x14,0xff,0xe1,0x83,0x9d,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,
+- 0x9e,0x00,0x14,0xff,0xe1,0x83,0x9f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x14,0xff,0xe1,0x83,0xa0,0x00,0x14,0xff,0xe1,0x83,0xa1,0x00,0x10,0x08,
+- 0x14,0xff,0xe1,0x83,0xa2,0x00,0x14,0xff,0xe1,0x83,0xa3,0x00,0xd1,0x10,0x10,0x08,
+- 0x14,0xff,0xe1,0x83,0xa4,0x00,0x14,0xff,0xe1,0x83,0xa5,0x00,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0xa6,0x00,0x14,0xff,0xe1,0x83,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x14,0xff,0xe1,0x83,0xa8,0x00,0x14,0xff,0xe1,0x83,0xa9,0x00,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0xaa,0x00,0x14,0xff,0xe1,0x83,0xab,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0xac,0x00,0x14,0xff,0xe1,0x83,0xad,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,
+- 0xae,0x00,0x14,0xff,0xe1,0x83,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x14,0xff,0xe1,0x83,0xb0,0x00,0x14,0xff,0xe1,0x83,0xb1,0x00,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0xb2,0x00,0x14,0xff,0xe1,0x83,0xb3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0xb4,0x00,0x14,0xff,0xe1,0x83,0xb5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,
+- 0xb6,0x00,0x14,0xff,0xe1,0x83,0xb7,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x14,0xff,
+- 0xe1,0x83,0xb8,0x00,0x14,0xff,0xe1,0x83,0xb9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,
+- 0xba,0x00,0x00,0x00,0xd1,0x0c,0x10,0x04,0x00,0x00,0x14,0xff,0xe1,0x83,0xbd,0x00,
+- 0x10,0x08,0x14,0xff,0xe1,0x83,0xbe,0x00,0x14,0xff,0xe1,0x83,0xbf,0x00,0xe2,0x9d,
+- 0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3,0x40,
+- 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa5,0x00,0x01,0xff,0x61,0xcc,
+- 0xa5,0x00,0x10,0x08,0x01,0xff,0x62,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87,0x00,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0x62,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3,0x00,
+- 0x10,0x08,0x01,0xff,0x62,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2,0x24,
+- 0xd1,0x14,0x10,0x0a,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc,
+- 0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x87,0x00,0x01,0xff,0x64,0xcc,
+- 0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa3,0x00,0x01,0xff,0x64,0xcc,
+- 0xa3,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1,0x00,
+- 0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa7,0x00,0x01,0xff,
+- 0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xad,0x00,0x01,0xff,0x64,0xcc,
+- 0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,
+- 0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,
+- 0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0x65,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,
+- 0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,
+- 0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,
+- 0x66,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84,0x00,
+- 0x10,0x08,0x01,0xff,0x68,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0x68,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10,0x08,
+- 0x01,0xff,0x68,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0x68,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10,0x08,
+- 0x01,0xff,0x68,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0x69,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01,0xff,
+- 0x69,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3,0x40,
+- 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x81,0x00,0x01,0xff,0x6b,0xcc,
+- 0x81,0x00,0x10,0x08,0x01,0xff,0x6b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3,0x00,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1,0x00,
+- 0x10,0x08,0x01,0xff,0x6c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2,0x24,
+- 0xd1,0x14,0x10,0x0a,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c,0xcc,
+- 0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0xb1,0x00,0x01,0xff,0x6c,0xcc,
+- 0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xad,0x00,0x01,0xff,0x6c,0xcc,
+- 0xad,0x00,0x10,0x08,0x01,0xff,0x6d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81,0x00,
+- 0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x6d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6d,
+- 0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,
+- 0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa3,
+- 0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,
+- 0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xad,
+- 0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83,
+- 0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,
+- 0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48,0xd2,
+- 0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x6f,
+- 0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0x01,
+- 0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x70,0xcc,0x81,
+- 0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x70,0xcc,0x87,0x00,0x01,
+- 0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x87,
+- 0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xa3,0x00,0x01,
+- 0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,
+- 0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xb1,
+- 0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x73,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08,0x01,
+- 0xff,0x73,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,
+- 0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x10,
+- 0x0a,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87,
+- 0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x01,
+- 0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0x87,0x00,0x01,
+- 0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xa3,0x00,0x01,
+- 0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0xb1,0x00,0x01,0xff,0x74,
+- 0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xad,
+- 0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa4,0x00,0x01,
+- 0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xb0,0x00,0x01,
+- 0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xad,0x00,0x01,0xff,0x75,
+- 0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,
+- 0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x84,
+- 0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x76,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x76,
+- 0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x11,0x02,0xcf,0x86,0xd5,0xe2,
+- 0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x80,0x00,
+- 0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x81,0x00,0x01,0xff,
+- 0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x88,0x00,0x01,0xff,
+- 0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x87,0x00,0x01,0xff,0x77,0xcc,
+- 0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0xa3,0x00,0x01,0xff,
+- 0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x78,0xcc,0x87,0x00,0x01,0xff,0x78,0xcc,
+- 0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x78,0xcc,0x88,0x00,0x01,0xff,0x78,0xcc,
+- 0x88,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87,0x00,
+- 0xd3,0x33,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x82,0x00,0x01,0xff,
+- 0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0xa3,0x00,0x01,0xff,0x7a,0xcc,
+- 0xa3,0x00,0xe1,0x12,0x59,0x10,0x08,0x01,0xff,0x7a,0xcc,0xb1,0x00,0x01,0xff,0x7a,
+- 0xcc,0xb1,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,
+- 0xff,0x79,0xcc,0x8a,0x00,0x10,0x08,0x01,0xff,0x61,0xca,0xbe,0x00,0x02,0xff,0x73,
+- 0xcc,0x87,0x00,0x51,0x04,0x0a,0x00,0x10,0x07,0x0a,0xff,0x73,0x73,0x00,0x0a,0x00,
+- 0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa3,0x00,
+- 0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x89,0x00,0x01,0xff,
+- 0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,
+- 0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,
+- 0x80,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,
+- 0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,
+- 0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,
+- 0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,
+- 0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,
+- 0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,
+- 0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,
+- 0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,
+- 0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff,
+- 0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,
+- 0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0x65,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,
+- 0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,
+- 0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,
+- 0x81,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,0xd4,
+- 0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,
+- 0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,
+- 0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,
+- 0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10,
+- 0x0a,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,
+- 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x89,0x00,0x01,0xff,0x69,
+- 0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3,
+- 0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3,
+- 0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x01,
+- 0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,
+- 0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,
+- 0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01,
+- 0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2,
+- 0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f,
+- 0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0x01,
+- 0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,
+- 0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,
+- 0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,0xd3,
+- 0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x01,
+- 0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,
+- 0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,
+- 0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x89,
+- 0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,
+- 0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,0x01,
+- 0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1,
+- 0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b,
+- 0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75,
+- 0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,
+- 0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,0x01,
+- 0xff,0x79,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x79,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79,
+- 0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x79,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x10,0x08,0x0a,0xff,0xe1,
+- 0xbb,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbd,0x00,0x0a,
+- 0x00,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbf,0x00,0x0a,0x00,0xe1,0xbf,0x02,0xe0,0xa1,
+- 0x01,0xcf,0x86,0xd5,0xc6,0xd4,0x6c,0xd3,0x18,0xe2,0x0e,0x59,0xe1,0xf7,0x58,0x10,
+- 0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00,0xd2,
+- 0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,
+- 0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,
+- 0xce,0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,
+- 0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,
+- 0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,
+- 0x00,0xd3,0x18,0xe2,0x4a,0x59,0xe1,0x33,0x59,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,
+- 0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,
+- 0xff,0xce,0xb5,0xcc,0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,
+- 0xff,0xce,0xb5,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,
+- 0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,
+- 0xce,0xb5,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd4,0x6c,0xd3,0x18,0xe2,0x74,0x59,
+- 0xe1,0x5d,0x59,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,
+- 0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,
+- 0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,
+- 0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,
+- 0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,
+- 0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,
+- 0xcc,0x94,0xcd,0x82,0x00,0xd3,0x18,0xe2,0xb0,0x59,0xe1,0x99,0x59,0x10,0x09,0x01,
+- 0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0xd2,0x28,0xd1,
+- 0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,
+- 0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,
+- 0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,
+- 0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,
+- 0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00,0xcf,
+- 0x86,0xd5,0xac,0xd4,0x5a,0xd3,0x18,0xe2,0xed,0x59,0xe1,0xd6,0x59,0x10,0x09,0x01,
+- 0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0xd2,0x28,0xd1,
+- 0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,
+- 0x00,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,
+- 0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,
+- 0x81,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x18,0xe2,
+- 0x17,0x5a,0xe1,0x00,0x5a,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,
+- 0xcf,0x85,0xcc,0x94,0x00,0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,
+- 0x85,0xcc,0x94,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x80,
+- 0x00,0xd1,0x0f,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,
+- 0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xe4,0xd3,0x5a,
+- 0xd3,0x18,0xe2,0x52,0x5a,0xe1,0x3b,0x5a,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,
+- 0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,
+- 0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,
+- 0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0x00,
+- 0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xcf,
+- 0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,
+- 0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xe0,0xd9,0x02,0xcf,0x86,0xe5,
+- 0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,
+- 0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,
+- 0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,
+- 0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,
+- 0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,
+- 0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,
+- 0xb1,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,
+- 0xce,0xb1,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,
+- 0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,
+- 0xb1,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,
+- 0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,
+- 0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,
+- 0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x64,0xd2,0x30,0xd1,0x16,
+- 0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,
+- 0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9,
+- 0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,
+- 0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,
+- 0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,
+- 0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,
+- 0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,
+- 0xb7,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,
+- 0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,
+- 0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,
+- 0xb7,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,
+- 0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,
+- 0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,
+- 0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,
+- 0xcf,0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,
+- 0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,
+- 0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,
+- 0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,
+- 0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,
+- 0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,
+- 0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,
+- 0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,
+- 0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,
+- 0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,
+- 0x89,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x49,0xd2,0x26,0xd1,0x12,0x10,0x09,
+- 0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc,0x84,0x00,0x10,0x0b,
+- 0x01,0xff,0xce,0xb1,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00,
+- 0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,
+- 0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcd,0x82,0xce,0xb9,
+- 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,
+- 0xce,0xb1,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff,
+- 0xce,0xb1,0xcc,0x81,0x00,0xe1,0xf3,0x5a,0x10,0x09,0x01,0xff,0xce,0xb1,0xce,0xb9,
+- 0x00,0x01,0x00,0xcf,0x86,0xd5,0xbd,0xd4,0x7e,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,
+- 0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,
+- 0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b,
+- 0x01,0xff,0xce,0xb7,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,
+- 0xb7,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1,
+- 0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,
+- 0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,
+- 0x00,0xe1,0x02,0x5b,0x10,0x09,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0x01,0xff,0xe1,
+- 0xbe,0xbf,0xcc,0x80,0x00,0xd3,0x18,0xe2,0x28,0x5b,0xe1,0x11,0x5b,0x10,0x09,0x01,
+- 0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0xe2,0x4c,0x5b,
+- 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,
+- 0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,
+- 0x81,0x00,0xd4,0x51,0xd3,0x18,0xe2,0x6f,0x5b,0xe1,0x58,0x5b,0x10,0x09,0x01,0xff,
+- 0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0xd2,0x24,0xd1,0x12,
+- 0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,
+- 0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,
+- 0xe1,0x8f,0x5b,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,
+- 0xcc,0x80,0x00,0xd3,0x3b,0xd2,0x18,0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,
+- 0x89,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0xd1,0x0f,0x10,
+- 0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,
+- 0xcf,0x89,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,
+- 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,
+- 0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,
+- 0x81,0x00,0xe1,0x99,0x5b,0x10,0x09,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0x01,0xff,
+- 0xc2,0xb4,0x00,0xe0,0x0c,0x68,0xcf,0x86,0xe5,0x23,0x02,0xe4,0x25,0x01,0xe3,0x85,
+- 0x5e,0xd2,0x2a,0xe1,0x5f,0x5c,0xe0,0xdd,0x5b,0xcf,0x86,0xe5,0xbb,0x5b,0x94,0x1b,
+- 0xe3,0xa4,0x5b,0x92,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,
+- 0xff,0xe2,0x80,0x83,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd1,0xd6,0xd0,0x46,0xcf,
+- 0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04,0x01,
+- 0x00,0x10,0x07,0x01,0xff,0xcf,0x89,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01,0x00,
+- 0x10,0x06,0x01,0xff,0x6b,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x01,0x00,0xe3,0x25,
+- 0x5d,0x92,0x10,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0x8e,0x00,0x01,
+- 0x00,0x01,0x00,0xcf,0x86,0xd5,0x0a,0xe4,0x42,0x5d,0x63,0x2d,0x5d,0x06,0x00,0x94,
+- 0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb0,0x00,0x01,
+- 0xff,0xe2,0x85,0xb1,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb2,0x00,0x01,0xff,0xe2,
+- 0x85,0xb3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb4,0x00,0x01,0xff,0xe2,
+- 0x85,0xb5,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb6,0x00,0x01,0xff,0xe2,0x85,0xb7,
+- 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb8,0x00,0x01,0xff,0xe2,
+- 0x85,0xb9,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xba,0x00,0x01,0xff,0xe2,0x85,0xbb,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xbc,0x00,0x01,0xff,0xe2,0x85,0xbd,
+- 0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xbe,0x00,0x01,0xff,0xe2,0x85,0xbf,0x00,0x01,
+- 0x00,0xe0,0x34,0x5d,0xcf,0x86,0xe5,0x13,0x5d,0xe4,0xf2,0x5c,0xe3,0xe1,0x5c,0xe2,
+- 0xd4,0x5c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0xff,0xe2,0x86,0x84,0x00,
+- 0xe3,0x23,0x61,0xe2,0xf0,0x60,0xd1,0x0c,0xe0,0x9d,0x60,0xcf,0x86,0x65,0x7e,0x60,
+- 0x01,0x00,0xd0,0x62,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x18,
+- 0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x90,0x00,
+- 0x01,0xff,0xe2,0x93,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,
+- 0x92,0x00,0x01,0xff,0xe2,0x93,0x93,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x94,0x00,
+- 0x01,0xff,0xe2,0x93,0x95,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x96,0x00,
+- 0x01,0xff,0xe2,0x93,0x97,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x98,0x00,0x01,0xff,
+- 0xe2,0x93,0x99,0x00,0xcf,0x86,0xe5,0x57,0x60,0x94,0x80,0xd3,0x40,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x9a,0x00,0x01,0xff,0xe2,0x93,0x9b,0x00,0x10,
+- 0x08,0x01,0xff,0xe2,0x93,0x9c,0x00,0x01,0xff,0xe2,0x93,0x9d,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0xe2,0x93,0x9e,0x00,0x01,0xff,0xe2,0x93,0x9f,0x00,0x10,0x08,0x01,
+- 0xff,0xe2,0x93,0xa0,0x00,0x01,0xff,0xe2,0x93,0xa1,0x00,0xd2,0x20,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0xe2,0x93,0xa2,0x00,0x01,0xff,0xe2,0x93,0xa3,0x00,0x10,0x08,0x01,
+- 0xff,0xe2,0x93,0xa4,0x00,0x01,0xff,0xe2,0x93,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0xe2,0x93,0xa6,0x00,0x01,0xff,0xe2,0x93,0xa7,0x00,0x10,0x08,0x01,0xff,0xe2,
+- 0x93,0xa8,0x00,0x01,0xff,0xe2,0x93,0xa9,0x00,0x01,0x00,0xd4,0x0c,0xe3,0x33,0x62,
+- 0xe2,0x2c,0x62,0xcf,0x06,0x04,0x00,0xe3,0x0c,0x65,0xe2,0xff,0x63,0xe1,0x2e,0x02,
+- 0xe0,0x84,0x01,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x08,0xff,0xe2,0xb0,0xb0,0x00,0x08,0xff,0xe2,0xb0,0xb1,0x00,0x10,0x08,
+- 0x08,0xff,0xe2,0xb0,0xb2,0x00,0x08,0xff,0xe2,0xb0,0xb3,0x00,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe2,0xb0,0xb4,0x00,0x08,0xff,0xe2,0xb0,0xb5,0x00,0x10,0x08,0x08,0xff,
+- 0xe2,0xb0,0xb6,0x00,0x08,0xff,0xe2,0xb0,0xb7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe2,0xb0,0xb8,0x00,0x08,0xff,0xe2,0xb0,0xb9,0x00,0x10,0x08,0x08,0xff,
+- 0xe2,0xb0,0xba,0x00,0x08,0xff,0xe2,0xb0,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe2,0xb0,0xbc,0x00,0x08,0xff,0xe2,0xb0,0xbd,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,
+- 0xbe,0x00,0x08,0xff,0xe2,0xb0,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe2,0xb1,0x80,0x00,0x08,0xff,0xe2,0xb1,0x81,0x00,0x10,0x08,0x08,0xff,
+- 0xe2,0xb1,0x82,0x00,0x08,0xff,0xe2,0xb1,0x83,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe2,0xb1,0x84,0x00,0x08,0xff,0xe2,0xb1,0x85,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
+- 0x86,0x00,0x08,0xff,0xe2,0xb1,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe2,0xb1,0x88,0x00,0x08,0xff,0xe2,0xb1,0x89,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
+- 0x8a,0x00,0x08,0xff,0xe2,0xb1,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
+- 0x8c,0x00,0x08,0xff,0xe2,0xb1,0x8d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8e,0x00,
+- 0x08,0xff,0xe2,0xb1,0x8f,0x00,0x94,0x7c,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe2,0xb1,0x90,0x00,0x08,0xff,0xe2,0xb1,0x91,0x00,0x10,0x08,0x08,0xff,
+- 0xe2,0xb1,0x92,0x00,0x08,0xff,0xe2,0xb1,0x93,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe2,0xb1,0x94,0x00,0x08,0xff,0xe2,0xb1,0x95,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
+- 0x96,0x00,0x08,0xff,0xe2,0xb1,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe2,0xb1,0x98,0x00,0x08,0xff,0xe2,0xb1,0x99,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
+- 0x9a,0x00,0x08,0xff,0xe2,0xb1,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
+- 0x9c,0x00,0x08,0xff,0xe2,0xb1,0x9d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9e,0x00,
+- 0x00,0x00,0x08,0x00,0xcf,0x86,0xd5,0x07,0x64,0xef,0x61,0x08,0x00,0xd4,0x63,0xd3,
+- 0x32,0xd2,0x1b,0xd1,0x0c,0x10,0x08,0x09,0xff,0xe2,0xb1,0xa1,0x00,0x09,0x00,0x10,
+- 0x07,0x09,0xff,0xc9,0xab,0x00,0x09,0xff,0xe1,0xb5,0xbd,0x00,0xd1,0x0b,0x10,0x07,
+- 0x09,0xff,0xc9,0xbd,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xa8,
+- 0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xaa,0x00,0x10,
+- 0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xac,0x00,0xd1,0x0b,0x10,0x04,0x09,0x00,0x0a,
+- 0xff,0xc9,0x91,0x00,0x10,0x07,0x0a,0xff,0xc9,0xb1,0x00,0x0a,0xff,0xc9,0x90,0x00,
+- 0xd3,0x27,0xd2,0x17,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xc9,0x92,0x00,0x0a,0x00,0x10,
+- 0x08,0x0a,0xff,0xe2,0xb1,0xb3,0x00,0x0a,0x00,0x91,0x0c,0x10,0x04,0x09,0x00,0x09,
+- 0xff,0xe2,0xb1,0xb6,0x00,0x09,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,
+- 0x07,0x0b,0xff,0xc8,0xbf,0x00,0x0b,0xff,0xc9,0x80,0x00,0xe0,0x83,0x01,0xcf,0x86,
+- 0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
+- 0x81,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x83,0x00,0x08,0x00,0xd1,0x0c,
+- 0x10,0x08,0x08,0xff,0xe2,0xb2,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,
+- 0x87,0x00,0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x89,0x00,
+- 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
+- 0x08,0xff,0xe2,0xb2,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8f,0x00,
+- 0x08,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x91,0x00,
+- 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
+- 0x08,0xff,0xe2,0xb2,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x97,0x00,
+- 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x99,0x00,0x08,0x00,
+- 0x10,0x08,0x08,0xff,0xe2,0xb2,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+- 0xe2,0xb2,0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x9f,0x00,0x08,0x00,
+- 0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa1,0x00,
+- 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
+- 0x08,0xff,0xe2,0xb2,0xa5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa7,0x00,
+- 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa9,0x00,0x08,0x00,
+- 0x10,0x08,0x08,0xff,0xe2,0xb2,0xab,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+- 0xe2,0xb2,0xad,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xaf,0x00,0x08,0x00,
+- 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb1,0x00,0x08,0x00,
+- 0x10,0x08,0x08,0xff,0xe2,0xb2,0xb3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+- 0xe2,0xb2,0xb5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb7,0x00,0x08,0x00,
+- 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb9,0x00,0x08,0x00,0x10,0x08,
+- 0x08,0xff,0xe2,0xb2,0xbb,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
+- 0xbd,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbf,0x00,0x08,0x00,0xcf,0x86,
+- 0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,
+- 0x81,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x83,0x00,0x08,0x00,0xd1,0x0c,
+- 0x10,0x08,0x08,0xff,0xe2,0xb3,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,
+- 0x87,0x00,0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x89,0x00,
+- 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
+- 0x08,0xff,0xe2,0xb3,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8f,0x00,
+- 0x08,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x91,0x00,
+- 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
+- 0x08,0xff,0xe2,0xb3,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x97,0x00,
+- 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x99,0x00,0x08,0x00,
+- 0x10,0x08,0x08,0xff,0xe2,0xb3,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
+- 0xe2,0xb3,0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x9f,0x00,0x08,0x00,
+- 0xd4,0x3b,0xd3,0x1c,0x92,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa1,0x00,
+- 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa3,0x00,0x08,0x00,0x08,0x00,0xd2,0x10,
+- 0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0xff,0xe2,0xb3,0xac,0x00,0xe1,0x3b,
+- 0x5f,0x10,0x04,0x0b,0x00,0x0b,0xff,0xe2,0xb3,0xae,0x00,0xe3,0x40,0x5f,0x92,0x10,
+- 0x51,0x04,0x0b,0xe6,0x10,0x08,0x0d,0xff,0xe2,0xb3,0xb3,0x00,0x0d,0x00,0x00,0x00,
+- 0xe2,0x98,0x08,0xd1,0x0b,0xe0,0x11,0x67,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe0,0x65,
+- 0x6c,0xcf,0x86,0xe5,0xa7,0x05,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x0c,0xe2,0xf8,
+- 0x67,0xe1,0x8f,0x67,0xcf,0x06,0x04,0x00,0xe2,0xdb,0x01,0xe1,0x26,0x01,0xd0,0x09,
+- 0xcf,0x86,0x65,0xf4,0x67,0x0a,0x00,0xcf,0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,
+- 0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,
+- 0xff,0xea,0x99,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x85,
+- 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,
+- 0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,
+- 0x99,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x8d,0x00,0x0a,
+- 0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,
+- 0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,
+- 0x99,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x95,0x00,0x0a,
+- 0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x97,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,
+- 0x08,0x0a,0xff,0xea,0x99,0x99,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x9b,
+- 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x9d,0x00,0x0a,0x00,0x10,
+- 0x08,0x0a,0xff,0xea,0x99,0x9f,0x00,0x0a,0x00,0xe4,0x5d,0x67,0xd3,0x30,0xd2,0x18,
+- 0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x99,0xa1,0x00,0x0c,0x00,0x10,0x08,0x0a,0xff,
+- 0xea,0x99,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0xa5,0x00,
+- 0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,
+- 0x10,0x08,0x0a,0xff,0xea,0x99,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,
+- 0xab,0x00,0x0a,0x00,0xe1,0x0c,0x67,0x10,0x08,0x0a,0xff,0xea,0x99,0xad,0x00,0x0a,
+- 0x00,0xe0,0x35,0x67,0xcf,0x86,0x95,0xab,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,
+- 0x10,0x08,0x0a,0xff,0xea,0x9a,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,
+- 0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x85,0x00,0x0a,0x00,
+- 0x10,0x08,0x0a,0xff,0xea,0x9a,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,
+- 0x0a,0xff,0xea,0x9a,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8b,0x00,
+- 0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8d,0x00,0x0a,0x00,0x10,0x08,
+- 0x0a,0xff,0xea,0x9a,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,
+- 0x0a,0xff,0xea,0x9a,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x93,0x00,
+- 0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x95,0x00,0x0a,0x00,0x10,0x08,
+- 0x0a,0xff,0xea,0x9a,0x97,0x00,0x0a,0x00,0xe2,0x92,0x66,0xd1,0x0c,0x10,0x08,0x10,
+- 0xff,0xea,0x9a,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9a,0x9b,0x00,0x10,
+- 0x00,0x0b,0x00,0xe1,0x10,0x02,0xd0,0xb9,0xcf,0x86,0xd5,0x07,0x64,0x9e,0x66,0x08,
+- 0x00,0xd4,0x58,0xd3,0x28,0xd2,0x10,0x51,0x04,0x09,0x00,0x10,0x08,0x0a,0xff,0xea,
+- 0x9c,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa5,0x00,0x0a,
+- 0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,
+- 0x08,0x0a,0xff,0xea,0x9c,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xab,
+- 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xad,0x00,0x0a,0x00,0x10,
+- 0x08,0x0a,0xff,0xea,0x9c,0xaf,0x00,0x0a,0x00,0xd3,0x28,0xd2,0x10,0x51,0x04,0x0a,
+- 0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
+- 0xff,0xea,0x9c,0xb5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb7,0x00,0x0a,
+- 0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb9,0x00,0x0a,0x00,0x10,
+- 0x08,0x0a,0xff,0xea,0x9c,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
+- 0x9c,0xbd,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbf,0x00,0x0a,0x00,0xcf,
+- 0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
+- 0x9d,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x83,0x00,0x0a,0x00,0xd1,
+- 0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x85,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,
+- 0x9d,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x89,
+- 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10,
+- 0x08,0x0a,0xff,0xea,0x9d,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8f,
+- 0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x91,
+- 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10,
+- 0x08,0x0a,0xff,0xea,0x9d,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x97,
+- 0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x99,0x00,0x0a,
+- 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
+- 0xff,0xea,0x9d,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9f,0x00,0x0a,
+- 0x00,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa1,
+- 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,
+- 0x08,0x0a,0xff,0xea,0x9d,0xa5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa7,
+- 0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa9,0x00,0x0a,
+- 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xab,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
+- 0xff,0xea,0x9d,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xaf,0x00,0x0a,
+- 0x00,0x53,0x04,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,
+- 0x9d,0xba,0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xbc,0x00,0xd1,0x0c,0x10,
+- 0x04,0x0a,0x00,0x0a,0xff,0xe1,0xb5,0xb9,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xbf,
+- 0x00,0x0a,0x00,0xe0,0x71,0x01,0xcf,0x86,0xd5,0xa6,0xd4,0x4e,0xd3,0x30,0xd2,0x18,
+- 0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,
+- 0xea,0x9e,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x85,0x00,
+- 0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9e,0x87,0x00,0x0a,0x00,0xd2,0x10,0x51,0x04,
+- 0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9e,0x8c,0x00,0xe1,0x9a,0x64,0x10,
+- 0x04,0x0a,0x00,0x0c,0xff,0xc9,0xa5,0x00,0xd3,0x28,0xd2,0x18,0xd1,0x0c,0x10,0x08,
+- 0x0c,0xff,0xea,0x9e,0x91,0x00,0x0c,0x00,0x10,0x08,0x0d,0xff,0xea,0x9e,0x93,0x00,
+- 0x0d,0x00,0x51,0x04,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x97,0x00,0x10,0x00,
+- 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x99,0x00,0x10,0x00,0x10,0x08,
+- 0x10,0xff,0xea,0x9e,0x9b,0x00,0x10,0x00,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,
+- 0x9d,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x9f,0x00,0x10,0x00,0xd4,0x63,
+- 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa1,0x00,0x0c,0x00,
+- 0x10,0x08,0x0c,0xff,0xea,0x9e,0xa3,0x00,0x0c,0x00,0xd1,0x0c,0x10,0x08,0x0c,0xff,
+- 0xea,0x9e,0xa5,0x00,0x0c,0x00,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa7,0x00,0x0c,0x00,
+- 0xd2,0x1a,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa9,0x00,0x0c,0x00,0x10,0x07,
+- 0x0d,0xff,0xc9,0xa6,0x00,0x10,0xff,0xc9,0x9c,0x00,0xd1,0x0e,0x10,0x07,0x10,0xff,
+- 0xc9,0xa1,0x00,0x10,0xff,0xc9,0xac,0x00,0x10,0x07,0x12,0xff,0xc9,0xaa,0x00,0x14,
+- 0x00,0xd3,0x35,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x10,0xff,0xca,0x9e,0x00,0x10,0xff,
+- 0xca,0x87,0x00,0x10,0x07,0x11,0xff,0xca,0x9d,0x00,0x11,0xff,0xea,0xad,0x93,0x00,
+- 0xd1,0x0c,0x10,0x08,0x11,0xff,0xea,0x9e,0xb5,0x00,0x11,0x00,0x10,0x08,0x11,0xff,
+- 0xea,0x9e,0xb7,0x00,0x11,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x14,0xff,0xea,0x9e,
+- 0xb9,0x00,0x14,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbb,0x00,0x15,0x00,0xd1,0x0c,
+- 0x10,0x08,0x15,0xff,0xea,0x9e,0xbd,0x00,0x15,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,
+- 0xbf,0x00,0x15,0x00,0xcf,0x86,0xe5,0xd4,0x63,0x94,0x2f,0x93,0x2b,0xd2,0x10,0x51,
+- 0x04,0x00,0x00,0x10,0x08,0x15,0xff,0xea,0x9f,0x83,0x00,0x15,0x00,0xd1,0x0f,0x10,
+- 0x08,0x15,0xff,0xea,0x9e,0x94,0x00,0x15,0xff,0xca,0x82,0x00,0x10,0x08,0x15,0xff,
+- 0xe1,0xb6,0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0xb4,0x66,0xd3,0x1d,0xe2,
+- 0x5b,0x64,0xe1,0x0a,0x64,0xe0,0xf7,0x63,0xcf,0x86,0xe5,0xd8,0x63,0x94,0x0b,0x93,
+- 0x07,0x62,0xc3,0x63,0x08,0x00,0x08,0x00,0x08,0x00,0xd2,0x0f,0xe1,0x5a,0x65,0xe0,
+- 0x27,0x65,0xcf,0x86,0x65,0x0c,0x65,0x0a,0x00,0xd1,0xab,0xd0,0x1a,0xcf,0x86,0xe5,
+- 0x17,0x66,0xe4,0xfa,0x65,0xe3,0xe1,0x65,0xe2,0xd4,0x65,0x91,0x08,0x10,0x04,0x00,
+- 0x00,0x0c,0x00,0x0c,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x0b,0x93,0x07,0x62,
+- 0x27,0x66,0x11,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,
+- 0xe1,0x8e,0xa0,0x00,0x11,0xff,0xe1,0x8e,0xa1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,
+- 0xa2,0x00,0x11,0xff,0xe1,0x8e,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,
+- 0xa4,0x00,0x11,0xff,0xe1,0x8e,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa6,0x00,
+- 0x11,0xff,0xe1,0x8e,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,
+- 0xa8,0x00,0x11,0xff,0xe1,0x8e,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xaa,0x00,
+- 0x11,0xff,0xe1,0x8e,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xac,0x00,
+- 0x11,0xff,0xe1,0x8e,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xae,0x00,0x11,0xff,
+- 0xe1,0x8e,0xaf,0x00,0xe0,0xb2,0x65,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,
+- 0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb0,0x00,0x11,0xff,0xe1,0x8e,
+- 0xb1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb2,0x00,0x11,0xff,0xe1,0x8e,0xb3,0x00,
+- 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb4,0x00,0x11,0xff,0xe1,0x8e,0xb5,0x00,
+- 0x10,0x08,0x11,0xff,0xe1,0x8e,0xb6,0x00,0x11,0xff,0xe1,0x8e,0xb7,0x00,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb8,0x00,0x11,0xff,0xe1,0x8e,0xb9,0x00,
+- 0x10,0x08,0x11,0xff,0xe1,0x8e,0xba,0x00,0x11,0xff,0xe1,0x8e,0xbb,0x00,0xd1,0x10,
+- 0x10,0x08,0x11,0xff,0xe1,0x8e,0xbc,0x00,0x11,0xff,0xe1,0x8e,0xbd,0x00,0x10,0x08,
+- 0x11,0xff,0xe1,0x8e,0xbe,0x00,0x11,0xff,0xe1,0x8e,0xbf,0x00,0xd3,0x40,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0x80,0x00,0x11,0xff,0xe1,0x8f,0x81,0x00,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0x82,0x00,0x11,0xff,0xe1,0x8f,0x83,0x00,0xd1,0x10,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0x84,0x00,0x11,0xff,0xe1,0x8f,0x85,0x00,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0x86,0x00,0x11,0xff,0xe1,0x8f,0x87,0x00,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0x88,0x00,0x11,0xff,0xe1,0x8f,0x89,0x00,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0x8a,0x00,0x11,0xff,0xe1,0x8f,0x8b,0x00,0xd1,0x10,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0x8c,0x00,0x11,0xff,0xe1,0x8f,0x8d,0x00,0x10,0x08,0x11,0xff,
+- 0xe1,0x8f,0x8e,0x00,0x11,0xff,0xe1,0x8f,0x8f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0x90,0x00,0x11,0xff,0xe1,0x8f,0x91,0x00,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0x92,0x00,0x11,0xff,0xe1,0x8f,0x93,0x00,0xd1,0x10,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0x94,0x00,0x11,0xff,0xe1,0x8f,0x95,0x00,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0x96,0x00,0x11,0xff,0xe1,0x8f,0x97,0x00,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0x98,0x00,0x11,0xff,0xe1,0x8f,0x99,0x00,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0x9a,0x00,0x11,0xff,0xe1,0x8f,0x9b,0x00,0xd1,0x10,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0x9c,0x00,0x11,0xff,0xe1,0x8f,0x9d,0x00,0x10,0x08,0x11,0xff,
+- 0xe1,0x8f,0x9e,0x00,0x11,0xff,0xe1,0x8f,0x9f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x11,0xff,0xe1,0x8f,0xa0,0x00,0x11,0xff,0xe1,0x8f,0xa1,0x00,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0xa2,0x00,0x11,0xff,0xe1,0x8f,0xa3,0x00,0xd1,0x10,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0xa4,0x00,0x11,0xff,0xe1,0x8f,0xa5,0x00,0x10,0x08,0x11,0xff,
+- 0xe1,0x8f,0xa6,0x00,0x11,0xff,0xe1,0x8f,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x11,0xff,0xe1,0x8f,0xa8,0x00,0x11,0xff,0xe1,0x8f,0xa9,0x00,0x10,0x08,0x11,0xff,
+- 0xe1,0x8f,0xaa,0x00,0x11,0xff,0xe1,0x8f,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,
+- 0xe1,0x8f,0xac,0x00,0x11,0xff,0xe1,0x8f,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
+- 0xae,0x00,0x11,0xff,0xe1,0x8f,0xaf,0x00,0xd1,0x0c,0xe0,0xeb,0x63,0xcf,0x86,0xcf,
+- 0x06,0x02,0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,
+- 0xcf,0x06,0x01,0x00,0xd4,0xae,0xd3,0x09,0xe2,0x54,0x64,0xcf,0x06,0x01,0x00,0xd2,
+- 0x27,0xe1,0x1f,0x70,0xe0,0x26,0x6e,0xcf,0x86,0xe5,0x3f,0x6d,0xe4,0xce,0x6c,0xe3,
+- 0x99,0x6c,0xe2,0x78,0x6c,0xe1,0x67,0x6c,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00,
+- 0x01,0xff,0xe5,0xba,0xa6,0x00,0xe1,0x74,0x74,0xe0,0xe8,0x73,0xcf,0x86,0xe5,0x22,
+- 0x73,0xd4,0x3b,0x93,0x37,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x01,0xff,0x66,0x66,0x00,
+- 0x01,0xff,0x66,0x69,0x00,0x10,0x07,0x01,0xff,0x66,0x6c,0x00,0x01,0xff,0x66,0x66,
+- 0x69,0x00,0xd1,0x0f,0x10,0x08,0x01,0xff,0x66,0x66,0x6c,0x00,0x01,0xff,0x73,0x74,
+- 0x00,0x10,0x07,0x01,0xff,0x73,0x74,0x00,0x00,0x00,0x00,0x00,0xe3,0xc8,0x72,0xd2,
+- 0x11,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xb6,0x00,
+- 0xd1,0x12,0x10,0x09,0x01,0xff,0xd5,0xb4,0xd5,0xa5,0x00,0x01,0xff,0xd5,0xb4,0xd5,
+- 0xab,0x00,0x10,0x09,0x01,0xff,0xd5,0xbe,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb4,0xd5,
+- 0xad,0x00,0xd3,0x09,0xe2,0x40,0x74,0xcf,0x06,0x01,0x00,0xd2,0x13,0xe1,0x30,0x75,
+- 0xe0,0xc1,0x74,0xcf,0x86,0xe5,0x9e,0x74,0x64,0x8d,0x74,0x06,0xff,0x00,0xe1,0x96,
+- 0x75,0xe0,0x63,0x75,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x7c,
+- 0xd3,0x3c,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xef,0xbd,0x81,0x00,
+- 0x10,0x08,0x01,0xff,0xef,0xbd,0x82,0x00,0x01,0xff,0xef,0xbd,0x83,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xef,0xbd,0x84,0x00,0x01,0xff,0xef,0xbd,0x85,0x00,0x10,0x08,
+- 0x01,0xff,0xef,0xbd,0x86,0x00,0x01,0xff,0xef,0xbd,0x87,0x00,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xef,0xbd,0x88,0x00,0x01,0xff,0xef,0xbd,0x89,0x00,0x10,0x08,
+- 0x01,0xff,0xef,0xbd,0x8a,0x00,0x01,0xff,0xef,0xbd,0x8b,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xef,0xbd,0x8c,0x00,0x01,0xff,0xef,0xbd,0x8d,0x00,0x10,0x08,0x01,0xff,
+- 0xef,0xbd,0x8e,0x00,0x01,0xff,0xef,0xbd,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xef,0xbd,0x90,0x00,0x01,0xff,0xef,0xbd,0x91,0x00,0x10,0x08,
+- 0x01,0xff,0xef,0xbd,0x92,0x00,0x01,0xff,0xef,0xbd,0x93,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xef,0xbd,0x94,0x00,0x01,0xff,0xef,0xbd,0x95,0x00,0x10,0x08,0x01,0xff,
+- 0xef,0xbd,0x96,0x00,0x01,0xff,0xef,0xbd,0x97,0x00,0x92,0x1c,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xef,0xbd,0x98,0x00,0x01,0xff,0xef,0xbd,0x99,0x00,0x10,0x08,0x01,0xff,
+- 0xef,0xbd,0x9a,0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0x87,0xb3,0xe1,0x60,0xb0,0xe0,
+- 0xdd,0xae,0xcf,0x86,0xe5,0x81,0x9b,0xc4,0xe3,0xc1,0x07,0xe2,0x62,0x06,0xe1,0x11,
+- 0x86,0xe0,0x09,0x05,0xcf,0x86,0xe5,0xfb,0x02,0xd4,0x1c,0xe3,0x7f,0x76,0xe2,0xd6,
+- 0x75,0xe1,0xb1,0x75,0xe0,0x8a,0x75,0xcf,0x86,0xe5,0x57,0x75,0x94,0x07,0x63,0x42,
+- 0x75,0x07,0x00,0x07,0x00,0xe3,0x2b,0x78,0xe2,0xf0,0x77,0xe1,0x77,0x01,0xe0,0x88,
+- 0x77,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,
+- 0x05,0xff,0xf0,0x90,0x90,0xa8,0x00,0x05,0xff,0xf0,0x90,0x90,0xa9,0x00,0x10,0x09,
+- 0x05,0xff,0xf0,0x90,0x90,0xaa,0x00,0x05,0xff,0xf0,0x90,0x90,0xab,0x00,0xd1,0x12,
+- 0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xac,0x00,0x05,0xff,0xf0,0x90,0x90,0xad,0x00,
+- 0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xae,0x00,0x05,0xff,0xf0,0x90,0x90,0xaf,0x00,
+- 0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb0,0x00,0x05,0xff,0xf0,
+- 0x90,0x90,0xb1,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb2,0x00,0x05,0xff,0xf0,
+- 0x90,0x90,0xb3,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb4,0x00,0x05,
+- 0xff,0xf0,0x90,0x90,0xb5,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb6,0x00,0x05,
+- 0xff,0xf0,0x90,0x90,0xb7,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,
+- 0xf0,0x90,0x90,0xb8,0x00,0x05,0xff,0xf0,0x90,0x90,0xb9,0x00,0x10,0x09,0x05,0xff,
+- 0xf0,0x90,0x90,0xba,0x00,0x05,0xff,0xf0,0x90,0x90,0xbb,0x00,0xd1,0x12,0x10,0x09,
+- 0x05,0xff,0xf0,0x90,0x90,0xbc,0x00,0x05,0xff,0xf0,0x90,0x90,0xbd,0x00,0x10,0x09,
+- 0x05,0xff,0xf0,0x90,0x90,0xbe,0x00,0x05,0xff,0xf0,0x90,0x90,0xbf,0x00,0xd2,0x24,
+- 0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x80,0x00,0x05,0xff,0xf0,0x90,0x91,
+- 0x81,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x82,0x00,0x05,0xff,0xf0,0x90,0x91,
+- 0x83,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x84,0x00,0x05,0xff,0xf0,
+- 0x90,0x91,0x85,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x86,0x00,0x05,0xff,0xf0,
+- 0x90,0x91,0x87,0x00,0x94,0x4c,0x93,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,
+- 0xf0,0x90,0x91,0x88,0x00,0x05,0xff,0xf0,0x90,0x91,0x89,0x00,0x10,0x09,0x05,0xff,
+- 0xf0,0x90,0x91,0x8a,0x00,0x05,0xff,0xf0,0x90,0x91,0x8b,0x00,0xd1,0x12,0x10,0x09,
+- 0x05,0xff,0xf0,0x90,0x91,0x8c,0x00,0x05,0xff,0xf0,0x90,0x91,0x8d,0x00,0x10,0x09,
+- 0x07,0xff,0xf0,0x90,0x91,0x8e,0x00,0x07,0xff,0xf0,0x90,0x91,0x8f,0x00,0x05,0x00,
+- 0x05,0x00,0xd0,0xa0,0xcf,0x86,0xd5,0x07,0x64,0x30,0x76,0x07,0x00,0xd4,0x07,0x63,
+- 0x3d,0x76,0x07,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,
+- 0x93,0x98,0x00,0x12,0xff,0xf0,0x90,0x93,0x99,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,
+- 0x93,0x9a,0x00,0x12,0xff,0xf0,0x90,0x93,0x9b,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,
+- 0xf0,0x90,0x93,0x9c,0x00,0x12,0xff,0xf0,0x90,0x93,0x9d,0x00,0x10,0x09,0x12,0xff,
+- 0xf0,0x90,0x93,0x9e,0x00,0x12,0xff,0xf0,0x90,0x93,0x9f,0x00,0xd2,0x24,0xd1,0x12,
+- 0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa0,0x00,0x12,0xff,0xf0,0x90,0x93,0xa1,0x00,
+- 0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa2,0x00,0x12,0xff,0xf0,0x90,0x93,0xa3,0x00,
+- 0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa4,0x00,0x12,0xff,0xf0,0x90,0x93,
+- 0xa5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa6,0x00,0x12,0xff,0xf0,0x90,0x93,
+- 0xa7,0x00,0xcf,0x86,0xe5,0xc6,0x75,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,
+- 0x09,0x12,0xff,0xf0,0x90,0x93,0xa8,0x00,0x12,0xff,0xf0,0x90,0x93,0xa9,0x00,0x10,
+- 0x09,0x12,0xff,0xf0,0x90,0x93,0xaa,0x00,0x12,0xff,0xf0,0x90,0x93,0xab,0x00,0xd1,
+- 0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xac,0x00,0x12,0xff,0xf0,0x90,0x93,0xad,
+- 0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xae,0x00,0x12,0xff,0xf0,0x90,0x93,0xaf,
+- 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb0,0x00,0x12,0xff,
+- 0xf0,0x90,0x93,0xb1,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb2,0x00,0x12,0xff,
+- 0xf0,0x90,0x93,0xb3,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb4,0x00,
+- 0x12,0xff,0xf0,0x90,0x93,0xb5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb6,0x00,
+- 0x12,0xff,0xf0,0x90,0x93,0xb7,0x00,0x93,0x28,0x92,0x24,0xd1,0x12,0x10,0x09,0x12,
+- 0xff,0xf0,0x90,0x93,0xb8,0x00,0x12,0xff,0xf0,0x90,0x93,0xb9,0x00,0x10,0x09,0x12,
+- 0xff,0xf0,0x90,0x93,0xba,0x00,0x12,0xff,0xf0,0x90,0x93,0xbb,0x00,0x00,0x00,0x12,
+- 0x00,0xd4,0x1f,0xe3,0xdf,0x76,0xe2,0x6a,0x76,0xe1,0x09,0x76,0xe0,0xea,0x75,0xcf,
+- 0x86,0xe5,0xb7,0x75,0x94,0x0a,0xe3,0xa2,0x75,0x62,0x99,0x75,0x07,0x00,0x07,0x00,
+- 0xe3,0xde,0x78,0xe2,0xaf,0x78,0xd1,0x09,0xe0,0x4c,0x78,0xcf,0x06,0x0b,0x00,0xe0,
+- 0x7f,0x78,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,
+- 0x09,0x11,0xff,0xf0,0x90,0xb3,0x80,0x00,0x11,0xff,0xf0,0x90,0xb3,0x81,0x00,0x10,
+- 0x09,0x11,0xff,0xf0,0x90,0xb3,0x82,0x00,0x11,0xff,0xf0,0x90,0xb3,0x83,0x00,0xd1,
+- 0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x84,0x00,0x11,0xff,0xf0,0x90,0xb3,0x85,
+- 0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x86,0x00,0x11,0xff,0xf0,0x90,0xb3,0x87,
+- 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x88,0x00,0x11,0xff,
+- 0xf0,0x90,0xb3,0x89,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8a,0x00,0x11,0xff,
+- 0xf0,0x90,0xb3,0x8b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8c,0x00,
+- 0x11,0xff,0xf0,0x90,0xb3,0x8d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8e,0x00,
+- 0x11,0xff,0xf0,0x90,0xb3,0x8f,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,
+- 0xff,0xf0,0x90,0xb3,0x90,0x00,0x11,0xff,0xf0,0x90,0xb3,0x91,0x00,0x10,0x09,0x11,
+- 0xff,0xf0,0x90,0xb3,0x92,0x00,0x11,0xff,0xf0,0x90,0xb3,0x93,0x00,0xd1,0x12,0x10,
+- 0x09,0x11,0xff,0xf0,0x90,0xb3,0x94,0x00,0x11,0xff,0xf0,0x90,0xb3,0x95,0x00,0x10,
+- 0x09,0x11,0xff,0xf0,0x90,0xb3,0x96,0x00,0x11,0xff,0xf0,0x90,0xb3,0x97,0x00,0xd2,
+- 0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x98,0x00,0x11,0xff,0xf0,0x90,
+- 0xb3,0x99,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9a,0x00,0x11,0xff,0xf0,0x90,
+- 0xb3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9c,0x00,0x11,0xff,
+- 0xf0,0x90,0xb3,0x9d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9e,0x00,0x11,0xff,
+- 0xf0,0x90,0xb3,0x9f,0x00,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,
+- 0xff,0xf0,0x90,0xb3,0xa0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa1,0x00,0x10,0x09,0x11,
+- 0xff,0xf0,0x90,0xb3,0xa2,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa3,0x00,0xd1,0x12,0x10,
+- 0x09,0x11,0xff,0xf0,0x90,0xb3,0xa4,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa5,0x00,0x10,
+- 0x09,0x11,0xff,0xf0,0x90,0xb3,0xa6,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa7,0x00,0xd2,
+- 0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xa8,0x00,0x11,0xff,0xf0,0x90,
+- 0xb3,0xa9,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xaa,0x00,0x11,0xff,0xf0,0x90,
+- 0xb3,0xab,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xac,0x00,0x11,0xff,
+- 0xf0,0x90,0xb3,0xad,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xae,0x00,0x11,0xff,
+- 0xf0,0x90,0xb3,0xaf,0x00,0x93,0x23,0x92,0x1f,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,
+- 0x90,0xb3,0xb0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xb1,0x00,0x10,0x09,0x11,0xff,0xf0,
+- 0x90,0xb3,0xb2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x15,0xe4,0x91,
+- 0x7b,0xe3,0x9b,0x79,0xe2,0x94,0x78,0xe1,0xe4,0x77,0xe0,0x9d,0x77,0xcf,0x06,0x0c,
+- 0x00,0xe4,0xeb,0x7e,0xe3,0x44,0x7e,0xe2,0xed,0x7d,0xd1,0x0c,0xe0,0xb2,0x7d,0xcf,
+- 0x86,0x65,0x93,0x7d,0x14,0x00,0xe0,0xb6,0x7d,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,
+- 0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x80,0x00,
+- 0x10,0xff,0xf0,0x91,0xa3,0x81,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x82,0x00,
+- 0x10,0xff,0xf0,0x91,0xa3,0x83,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,
+- 0x84,0x00,0x10,0xff,0xf0,0x91,0xa3,0x85,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,
+- 0x86,0x00,0x10,0xff,0xf0,0x91,0xa3,0x87,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,
+- 0xff,0xf0,0x91,0xa3,0x88,0x00,0x10,0xff,0xf0,0x91,0xa3,0x89,0x00,0x10,0x09,0x10,
+- 0xff,0xf0,0x91,0xa3,0x8a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8b,0x00,0xd1,0x12,0x10,
+- 0x09,0x10,0xff,0xf0,0x91,0xa3,0x8c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8d,0x00,0x10,
+- 0x09,0x10,0xff,0xf0,0x91,0xa3,0x8e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8f,0x00,0xd3,
+- 0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x90,0x00,0x10,0xff,
+- 0xf0,0x91,0xa3,0x91,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x92,0x00,0x10,0xff,
+- 0xf0,0x91,0xa3,0x93,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x94,0x00,
+- 0x10,0xff,0xf0,0x91,0xa3,0x95,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x96,0x00,
+- 0x10,0xff,0xf0,0x91,0xa3,0x97,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,
+- 0x91,0xa3,0x98,0x00,0x10,0xff,0xf0,0x91,0xa3,0x99,0x00,0x10,0x09,0x10,0xff,0xf0,
+- 0x91,0xa3,0x9a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x10,
+- 0xff,0xf0,0x91,0xa3,0x9c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9d,0x00,0x10,0x09,0x10,
+- 0xff,0xf0,0x91,0xa3,0x9e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9f,0x00,0xd1,0x11,0xe0,
+- 0x12,0x81,0xcf,0x86,0xe5,0x09,0x81,0xe4,0xd2,0x80,0xcf,0x06,0x00,0x00,0xe0,0xdb,
+- 0x82,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xd4,0x09,0xe3,0x10,0x81,0xcf,0x06,
+- 0x0c,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xe2,0x3b,0x82,0xe1,0x16,0x82,0xd0,0x06,
+- 0xcf,0x06,0x00,0x00,0xcf,0x86,0xa5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,
+- 0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa1,
+- 0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa3,
+- 0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa4,0x00,0x14,0xff,0xf0,0x96,
+- 0xb9,0xa5,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa6,0x00,0x14,0xff,0xf0,0x96,
+- 0xb9,0xa7,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa8,0x00,
+- 0x14,0xff,0xf0,0x96,0xb9,0xa9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xaa,0x00,
+- 0x14,0xff,0xf0,0x96,0xb9,0xab,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,
+- 0xac,0x00,0x14,0xff,0xf0,0x96,0xb9,0xad,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,
+- 0xae,0x00,0x14,0xff,0xf0,0x96,0xb9,0xaf,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,
+- 0x09,0x14,0xff,0xf0,0x96,0xb9,0xb0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb1,0x00,0x10,
+- 0x09,0x14,0xff,0xf0,0x96,0xb9,0xb2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb3,0x00,0xd1,
+- 0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb5,
+- 0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb7,
+- 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb8,0x00,0x14,0xff,
+- 0xf0,0x96,0xb9,0xb9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xba,0x00,0x14,0xff,
+- 0xf0,0x96,0xb9,0xbb,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbc,0x00,
+- 0x14,0xff,0xf0,0x96,0xb9,0xbd,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbe,0x00,
+- 0x14,0xff,0xf0,0x96,0xb9,0xbf,0x00,0x14,0x00,0xd2,0x14,0xe1,0x25,0x82,0xe0,0x1c,
+- 0x82,0xcf,0x86,0xe5,0xdd,0x81,0xe4,0x9a,0x81,0xcf,0x06,0x12,0x00,0xd1,0x0b,0xe0,
+- 0x51,0x83,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0x95,0x8b,0xcf,0x86,0xd5,0x22,0xe4,
+- 0xd0,0x88,0xe3,0x93,0x88,0xe2,0x38,0x88,0xe1,0x31,0x88,0xe0,0x2a,0x88,0xcf,0x86,
+- 0xe5,0xfb,0x87,0xe4,0xe2,0x87,0x93,0x07,0x62,0xd1,0x87,0x12,0xe6,0x12,0xe6,0xe4,
+- 0x36,0x89,0xe3,0x2f,0x89,0xd2,0x09,0xe1,0xb8,0x88,0xcf,0x06,0x10,0x00,0xe1,0x1f,
+- 0x89,0xe0,0xec,0x88,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,
+- 0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa3,
+- 0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa5,
+- 0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa6,0x00,0x12,0xff,0xf0,0x9e,
+- 0xa4,0xa7,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa8,0x00,0x12,0xff,0xf0,0x9e,
+- 0xa4,0xa9,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xaa,0x00,
+- 0x12,0xff,0xf0,0x9e,0xa4,0xab,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xac,0x00,
+- 0x12,0xff,0xf0,0x9e,0xa4,0xad,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,
+- 0xae,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xaf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,
+- 0xb0,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb1,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,
+- 0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb3,0x00,0x10,
+- 0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb5,0x00,0xd1,
+- 0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb7,
+- 0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb9,
+- 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xba,0x00,0x12,0xff,
+- 0xf0,0x9e,0xa4,0xbb,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbc,0x00,0x12,0xff,
+- 0xf0,0x9e,0xa4,0xbd,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbe,0x00,
+- 0x12,0xff,0xf0,0x9e,0xa4,0xbf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa5,0x80,0x00,
+- 0x12,0xff,0xf0,0x9e,0xa5,0x81,0x00,0x94,0x1e,0x93,0x1a,0x92,0x16,0x91,0x12,0x10,
+- 0x09,0x12,0xff,0xf0,0x9e,0xa5,0x82,0x00,0x12,0xff,0xf0,0x9e,0xa5,0x83,0x00,0x12,
+- 0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- /* nfdi_c0100 */
+- 0x57,0x04,0x01,0x00,0xc6,0xe5,0xac,0x13,0xe4,0x41,0x0c,0xe3,0x7a,0x07,0xe2,0xf3,
+- 0x01,0xc1,0xd0,0x1f,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x15,0x53,0x04,0x01,0x00,
+- 0x52,0x04,0x01,0x00,0x91,0x09,0x10,0x04,0x01,0x00,0x01,0xff,0x00,0x01,0x00,0x01,
+- 0x00,0xcf,0x86,0xd5,0xe4,0xd4,0x7c,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x41,0xcc,0x80,0x00,0x01,0xff,0x41,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x41,
+- 0xcc,0x82,0x00,0x01,0xff,0x41,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,
+- 0xcc,0x88,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x43,
+- 0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x80,0x00,0x01,
+- 0xff,0x45,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x82,0x00,0x01,0xff,0x45,
+- 0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x80,0x00,0x01,0xff,0x49,
+- 0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x82,0x00,0x01,0xff,0x49,0xcc,0x88,
+- 0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x4e,0xcc,0x83,
+- 0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x80,0x00,0x01,0xff,0x4f,0xcc,0x81,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x82,0x00,0x01,0xff,0x4f,0xcc,0x83,0x00,0x10,
+- 0x08,0x01,0xff,0x4f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,
+- 0x00,0x01,0xff,0x55,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x81,0x00,0x01,
+- 0xff,0x55,0xcc,0x82,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x88,0x00,0x01,
+- 0xff,0x59,0xcc,0x81,0x00,0x01,0x00,0xd4,0x7c,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01,
+- 0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x01,
++ 0xc6,0xe5,0xf6,0x14,0xe4,0x6c,0x0d,0xe3,0x36,0x08,0xe2,0x1f,0x01,0xc1,0xd0,0x21,
++ 0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x13,0x52,0x04,0x01,0x00,
++ 0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xce,0xbc,0x00,0x01,0x00,0x01,0x00,0xcf,
++ 0x86,0xe5,0x9d,0x44,0xd4,0x7f,0xd3,0x3f,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,
++ 0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,
++ 0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x07,0x01,0xff,0xc3,0xa6,0x00,0x01,
+ 0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x80,
+ 0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x82,0x00,0x01,
+ 0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x80,0x00,0x01,
+ 0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x82,0x00,0x01,0xff,0x69,
+- 0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6e,
+- 0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x81,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82,0x00,0x01,0xff,0x6f,0xcc,0x83,
+- 0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10,
+- 0x04,0x01,0x00,0x01,0xff,0x75,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x81,
+- 0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x88,
+- 0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x79,0xcc,0x88,
+- 0x00,0xe1,0x9a,0x03,0xe0,0xd3,0x01,0xcf,0x86,0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84,
+- 0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x86,0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa8,0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,
+- 0x08,0x01,0xff,0x43,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x82,0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10,
+- 0x08,0x01,0xff,0x43,0xcc,0x87,0x00,0x01,0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x43,0xcc,0x8c,0x00,0x01,0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,
+- 0xff,0x44,0xcc,0x8c,0x00,0x01,0xff,0x64,0xcc,0x8c,0x00,0xd3,0x34,0xd2,0x14,0x51,
+- 0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x84,0x00,0x01,0xff,0x65,0xcc,0x84,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0x86,
+- 0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87,0x00,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8,
+- 0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82,0x00,0x10,
+- 0x08,0x01,0xff,0x47,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4,0x74,0xd3,
+- 0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x87,0x00,0x01,0xff,0x67,
+- 0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7,
+- 0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0x82,0x00,0x01,0xff,0x68,0xcc,0x82,
+- 0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x83,0x00,0x01,
+- 0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x84,0x00,0x01,0xff,0x69,
+- 0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x86,0x00,0x01,0xff,0x69,
+- 0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8,
+- 0x00,0xd3,0x30,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x49,0xcc,0x87,0x00,0x01,
+- 0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4a,0xcc,0x82,0x00,0x01,0xff,0x6a,
+- 0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,
+- 0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x4c,0xcc,0x81,0x00,0x10,
+- 0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff,0x4c,0xcc,0xa7,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff,0x4c,0xcc,0x8c,0x00,0x10,0x08,0x01,
+- 0xff,0x6c,0xcc,0x8c,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x60,0xd3,0x30,0xd2,
+- 0x10,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x4e,0xcc,0x81,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00,0x01,0xff,0x4e,0xcc,0xa7,0x00,0x10,
+- 0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff,0x4e,0xcc,0x8c,0x00,0xd2,0x10,0x91,
+- 0x0c,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00,0x01,0x00,0x01,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x4f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0x84,0x00,0x10,0x08,0x01,
+- 0xff,0x4f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00,0xd3,0x34,0xd2,0x14,0x91,
+- 0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8b,0x00,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81,
+- 0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,
+- 0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10,
+- 0x08,0x01,0xff,0x53,0xcc,0xa7,0x00,0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x74,0xd3,
+- 0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x8c,0x00,0x01,0xff,0x73,
+- 0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,
+- 0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,
+- 0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x83,0x00,0x01,
+- 0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x84,0x00,0x01,0xff,0x75,
+- 0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x86,0x00,0x01,0xff,0x75,
+- 0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a,
+- 0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x8b,0x00,0x01,
+- 0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa8,0x00,0x01,0xff,0x75,
+- 0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x82,0x00,0x01,0xff,0x77,
+- 0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x82,0x00,0x01,0xff,0x79,0xcc,0x82,
+- 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x59,0xcc,0x88,0x00,0x01,0xff,0x5a,
+- 0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff,0x5a,0xcc,0x87,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff,0x5a,0xcc,0x8c,
+- 0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0x00,0xd0,0x4a,0xcf,0x86,0x55,
+- 0x04,0x01,0x00,0xd4,0x2c,0xd3,0x18,0x92,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0x4f,
+- 0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,
+- 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x55,0xcc,0x9b,0x00,0x93,
+- 0x14,0x92,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x75,0xcc,0x9b,0x00,0x01,0x00,0x01,
+- 0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xb4,0xd4,0x24,0x53,0x04,0x01,0x00,0x52,
+- 0x04,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x41,0xcc,0x8c,0x00,0x10,
+- 0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff,0x49,0xcc,0x8c,0x00,0xd3,0x46,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x4f,0xcc,0x8c,
+- 0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x8c,0x00,0xd1,
+- 0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x84,
+- 0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x55,0xcc,0x88,
+- 0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,
+- 0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,
+- 0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,
+- 0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x88,
+- 0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x80,0xd3,0x3a,0xd2,
+- 0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61,
+- 0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0x86,0xcc,0x84,0x00,0x01,0xff,
+- 0xc3,0xa6,0xcc,0x84,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0x8c,
+- 0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,
+- 0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa8,
+- 0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa8,
+- 0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc6,
+- 0xb7,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0xd3,0x24,0xd2,0x10,0x91,
+- 0x0c,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,0x01,0x00,0x01,0x00,0x91,0x10,0x10,
+- 0x08,0x01,0xff,0x47,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x04,0x00,0xd2,
+- 0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x4e,0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,
+- 0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x8a,0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,
+- 0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xc3,0x86,0xcc,0x81,0x00,0x01,0xff,
+- 0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xc3,0x98,0xcc,0x81,0x00,0x01,0xff,
+- 0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x07,0x02,0xe1,0xae,0x01,0xe0,0x93,0x01,0xcf,0x86,
+- 0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,
+- 0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x91,0x00,
+- 0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x8f,0x00,
+- 0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x91,0x00,0x01,0xff,
+- 0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x8f,0x00,
+- 0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x91,0x00,0x01,0xff,
+- 0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8f,0x00,0x01,0xff,
+- 0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,
+- 0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x8f,0x00,
+- 0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0x91,0x00,0x01,0xff,
+- 0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x8f,0x00,0x01,0xff,
+- 0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,
+- 0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x04,0xff,0x53,0xcc,0xa6,0x00,0x04,0xff,
+- 0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff,0x54,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,
+- 0xa6,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04,0xff,0x48,0xcc,0x8c,0x00,0x04,0xff,
+- 0x68,0xcc,0x8c,0x00,0xd4,0x68,0xd3,0x20,0xd2,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,
+- 0x07,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04,0xff,0x41,0xcc,0x87,0x00,
+- 0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x45,0xcc,
+- 0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x88,0xcc,
++ 0xcc,0x88,0x00,0xd3,0x3b,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb0,0x00,
++ 0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80,0x00,0x01,0xff,
++ 0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82,0x00,0x01,0xff,
++ 0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1f,
++ 0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb8,0x00,0x01,0xff,0x75,0xcc,0x80,0x00,0x10,
++ 0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x07,0x01,
++ 0xff,0xc3,0xbe,0x00,0x01,0xff,0x73,0x73,0x00,0xe1,0xd4,0x03,0xe0,0xeb,0x01,0xcf,
++ 0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,
++ 0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x86,
++ 0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa8,
++ 0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,0x81,0x00,0x01,
++ 0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc,0x82,
++ 0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,0x87,0x00,0x01,
++ 0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc,0x8c,0x00,0x01,
++ 0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x8c,0x00,0x01,0xff,0x64,
++ 0xcc,0x8c,0x00,0xd3,0x3b,0xd2,0x1b,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc4,0x91,0x00,
++ 0x01,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x84,0x00,0x01,0xff,0x65,0xcc,0x84,0x00,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0x86,0x00,
++ 0x10,0x08,0x01,0xff,0x65,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87,0x00,0xd2,0x20,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8,0x00,
++ 0x10,0x08,0x01,0xff,0x65,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x67,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82,0x00,0x10,0x08,
++ 0x01,0xff,0x67,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4,0x7b,0xd3,0x3b,
++ 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x87,0x00,0x01,0xff,0x67,0xcc,
++ 0x87,0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7,0x00,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0xff,0x68,0xcc,0x82,0x00,
++ 0x10,0x07,0x01,0xff,0xc4,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0x69,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x69,
++ 0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,
++ 0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0xa8,
++ 0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x37,0xd2,0x17,0xd1,0x0c,0x10,0x08,0x01,
++ 0xff,0x69,0xcc,0x87,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc4,0xb3,0x00,0x01,0x00,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0x6a,0xcc,0x82,0x00,0x01,0xff,0x6a,0xcc,0x82,0x00,
++ 0x10,0x08,0x01,0xff,0x6b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,0x00,0xd2,0x1c,
++ 0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6c,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,
++ 0x6c,0xcc,0x81,0x00,0x01,0xff,0x6c,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x6c,0xcc,0xa7,0x00,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,
++ 0x8c,0x00,0x01,0xff,0xc5,0x80,0x00,0xcf,0x86,0xd5,0xed,0xd4,0x72,0xd3,0x37,0xd2,
++ 0x17,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc5,0x82,0x00,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0x6e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00,
++ 0x01,0xff,0x6e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff,
++ 0x6e,0xcc,0x8c,0x00,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00,
++ 0x01,0xff,0xca,0xbc,0x6e,0x00,0x10,0x07,0x01,0xff,0xc5,0x8b,0x00,0x01,0x00,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0x84,0x00,0x10,
++ 0x08,0x01,0xff,0x6f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00,0xd3,0x3b,0xd2,
++ 0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0xff,0x6f,0xcc,0x8b,
++ 0x00,0x10,0x07,0x01,0xff,0xc5,0x93,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x72,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,
++ 0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x72,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x73,0xcc,
++ 0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x73,0xcc,
++ 0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x73,0xcc,0xa7,0x00,
++ 0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x7b,0xd3,0x3b,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x73,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,
++ 0x74,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x74,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x10,0x07,0x01,0xff,0xc5,0xa7,
++ 0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x83,0x00,0x01,
++ 0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x84,0x00,0x01,0xff,0x75,
++ 0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x86,0x00,0x01,0xff,0x75,
++ 0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a,
++ 0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x8b,0x00,0x01,
++ 0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa8,0x00,0x01,0xff,0x75,
++ 0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x82,0x00,0x01,0xff,0x77,
++ 0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x82,0x00,0x01,0xff,0x79,0xcc,0x82,
++ 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,0x88,0x00,0x01,0xff,0x7a,
++ 0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff,0x7a,0xcc,0x87,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff,0x7a,0xcc,0x8c,
++ 0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0xff,0x73,0x00,0xe0,0x65,0x01,
++ 0xcf,0x86,0xd5,0xb4,0xd4,0x5a,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0xc9,0x93,0x00,0x10,0x07,0x01,0xff,0xc6,0x83,0x00,0x01,0x00,0xd1,0x0b,
++ 0x10,0x07,0x01,0xff,0xc6,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0x94,0x00,
++ 0x01,0xff,0xc6,0x88,0x00,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc9,
++ 0x96,0x00,0x10,0x07,0x01,0xff,0xc9,0x97,0x00,0x01,0xff,0xc6,0x8c,0x00,0x51,0x04,
++ 0x01,0x00,0x10,0x07,0x01,0xff,0xc7,0x9d,0x00,0x01,0xff,0xc9,0x99,0x00,0xd3,0x32,
++ 0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0x9b,0x00,0x01,0xff,0xc6,0x92,0x00,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xa0,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc9,
++ 0xa3,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0xa9,0x00,0x01,0xff,0xc9,0xa8,0x00,
++ 0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0x99,0x00,0x01,0x00,0x01,0x00,0xd1,
++ 0x0e,0x10,0x07,0x01,0xff,0xc9,0xaf,0x00,0x01,0xff,0xc9,0xb2,0x00,0x10,0x04,0x01,
++ 0x00,0x01,0xff,0xc9,0xb5,0x00,0xd4,0x5d,0xd3,0x34,0xd2,0x1b,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x10,0x07,0x01,0xff,
++ 0xc6,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0xa5,0x00,0x01,0x00,
++ 0x10,0x07,0x01,0xff,0xca,0x80,0x00,0x01,0xff,0xc6,0xa8,0x00,0xd2,0x0f,0x91,0x0b,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x83,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,
++ 0xff,0xc6,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x88,0x00,0x01,0xff,0x75,
++ 0xcc,0x9b,0x00,0xd3,0x33,0xd2,0x1d,0xd1,0x0f,0x10,0x08,0x01,0xff,0x75,0xcc,0x9b,
++ 0x00,0x01,0xff,0xca,0x8a,0x00,0x10,0x07,0x01,0xff,0xca,0x8b,0x00,0x01,0xff,0xc6,
++ 0xb4,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc6,0xb6,0x00,0x10,0x04,0x01,
++ 0x00,0x01,0xff,0xca,0x92,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xb9,
++ 0x00,0x01,0x00,0x01,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xbd,0x00,0x01,0x00,
++ 0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x44,0xd3,0x16,0x52,0x04,0x01,0x00,0x51,0x07,
++ 0x01,0xff,0xc7,0x86,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc7,0x89,0x00,0xd2,0x12,
++ 0x91,0x0b,0x10,0x07,0x01,0xff,0xc7,0x89,0x00,0x01,0x00,0x01,0xff,0xc7,0x8c,0x00,
++ 0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x61,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,
++ 0x61,0xcc,0x8c,0x00,0x01,0xff,0x69,0xcc,0x8c,0x00,0xd3,0x46,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x10,0x08,
++ 0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x8c,0x00,0xd1,0x12,0x10,0x08,
++ 0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x10,0x0a,
++ 0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00,
++ 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,
++ 0x75,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00,
++ 0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0x75,0xcc,
++ 0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,
++ 0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x87,0xd3,0x41,0xd2,0x26,0xd1,0x14,
++ 0x10,0x0a,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x87,0xcc,
++ 0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x01,0xff,0xc3,0xa6,0xcc,
++ 0x84,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc7,0xa5,0x00,0x01,0x00,0x10,0x08,0x01,
++ 0xff,0x67,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x10,0x08,0x01,
++ 0xff,0x6f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,0x10,0x0a,0x01,
++ 0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x10,
++ 0x09,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0xd3,
++ 0x38,0xd2,0x1a,0xd1,0x0f,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,0x01,0xff,0xc7,
++ 0xb3,0x00,0x10,0x07,0x01,0xff,0xc7,0xb3,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0x67,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x10,0x07,0x04,0xff,0xc6,
++ 0x95,0x00,0x04,0xff,0xc6,0xbf,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x6e,
++ 0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x8a,
++ 0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,
++ 0xff,0xc3,0xa6,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01,
++ 0xff,0xc3,0xb8,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x31,0x02,
++ 0xe1,0xad,0x44,0xe0,0xc8,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00,
++ 0x10,0x08,0x01,0xff,0x61,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x65,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08,
++ 0x01,0xff,0x65,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x69,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08,
++ 0x01,0xff,0x69,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x6f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,
++ 0x6f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x72,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08,
++ 0x01,0xff,0x72,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x75,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,
++ 0x75,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x04,0xff,0x73,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff,
++ 0x74,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff,
++ 0xc8,0x9d,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x68,0xcc,0x8c,0x00,0x04,0xff,0x68,
++ 0xcc,0x8c,0x00,0xd4,0x79,0xd3,0x31,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xc6,
++ 0x9e,0x00,0x07,0x00,0x10,0x07,0x04,0xff,0xc8,0xa3,0x00,0x04,0x00,0xd1,0x0b,0x10,
++ 0x07,0x04,0xff,0xc8,0xa5,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x61,0xcc,0x87,0x00,
++ 0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x65,0xcc,
++ 0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x88,0xcc,
+ 0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,0x00,0xd1,0x14,0x10,0x0a,0x04,0xff,
+- 0x4f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x10,0x08,
+- 0x04,0xff,0x4f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0x93,0x30,0xd2,0x24,
+- 0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x87,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,
+- 0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x59,0xcc,0x84,0x00,0x04,0xff,0x79,0xcc,
+- 0x84,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0xcf,0x86,
+- 0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x08,0x00,0x09,0x00,0x09,0x00,
+- 0x09,0x00,0x01,0x00,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18,
+- 0x53,0x04,0x01,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00,
+- 0x11,0x04,0x04,0x00,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01,0x00,
+- 0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+- 0x04,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x04,0x00,
+- 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00,0x07,0x00,0xe1,0x35,0x01,0xd0,
+- 0x72,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0xe6,0xd3,0x10,0x52,0x04,0x01,0xe6,0x91,
+- 0x08,0x10,0x04,0x01,0xe6,0x01,0xe8,0x01,0xdc,0x92,0x0c,0x51,0x04,0x01,0xdc,0x10,
+- 0x04,0x01,0xe8,0x01,0xd8,0x01,0xdc,0xd4,0x2c,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,
+- 0x04,0x01,0xdc,0x01,0xca,0x10,0x04,0x01,0xca,0x01,0xdc,0x51,0x04,0x01,0xdc,0x10,
+- 0x04,0x01,0xdc,0x01,0xca,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0xca,0x01,0xdc,0x01,
+- 0xdc,0x01,0xdc,0xd3,0x08,0x12,0x04,0x01,0xdc,0x01,0x01,0xd2,0x0c,0x91,0x08,0x10,
+- 0x04,0x01,0x01,0x01,0xdc,0x01,0xdc,0x91,0x08,0x10,0x04,0x01,0xdc,0x01,0xe6,0x01,
+- 0xe6,0xcf,0x86,0xd5,0x7f,0xd4,0x47,0xd3,0x2e,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,
+- 0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0x10,0x04,0x01,0xe6,0x01,0xff,0xcc,
+- 0x93,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcc,0x88,0xcc,0x81,0x00,0x01,0xf0,0x10,
+- 0x04,0x04,0xe6,0x04,0xdc,0xd2,0x08,0x11,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,
+- 0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x04,0xdc,0x06,0xff,0x00,0xd3,0x18,0xd2,0x0c,
+- 0x51,0x04,0x07,0xe6,0x10,0x04,0x07,0xe6,0x07,0xdc,0x51,0x04,0x07,0xdc,0x10,0x04,
+- 0x07,0xdc,0x07,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe8,0x08,0xdc,0x10,0x04,
+- 0x08,0xdc,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xe9,0x07,0xea,0x10,0x04,0x07,0xea,
+- 0x07,0xe9,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0xea,0x10,0x04,0x04,0xe9,
+- 0x06,0xe6,0x06,0xe6,0x06,0xe6,0xd3,0x13,0x52,0x04,0x0a,0x00,0x91,0x0b,0x10,0x07,
+- 0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,
+- 0x04,0x01,0x00,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x06,0x01,0xff,0x3b,0x00,0x10,
+- 0x00,0xd0,0xe1,0xcf,0x86,0xd5,0x7a,0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1,
+- 0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,
+- 0xce,0x91,0xcc,0x81,0x00,0x01,0xff,0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09,
+- 0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00,0x10,0x09,
+- 0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce,
+- 0x9f,0xcc,0x81,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0x01,
+- 0xff,0xce,0xa9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,
+- 0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,
+- 0x4a,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,
+- 0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x88,0x00,
+- 0x01,0xff,0xce,0xa5,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,
+- 0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,
+- 0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13,0x91,0x0f,0x10,
+- 0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+- 0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x39,0x53,0x04,0x01,0x00,0xd2,0x16,0x51,0x04,
+- 0x01,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc,
+- 0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf,
+- 0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x0a,0x00,0xd3,
+- 0x26,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xcf,0x92,0xcc,
+- 0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0x00,0x10,
+- 0x04,0x01,0x00,0x04,0x00,0xd2,0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x01,0x00,0x04,
+- 0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd4,
+- 0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x01,0x00,0x01,
+- 0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x06,
+- 0x00,0x07,0x00,0x12,0x04,0x07,0x00,0x08,0x00,0xe3,0x47,0x04,0xe2,0xbe,0x02,0xe1,
+- 0x07,0x01,0xd0,0x8b,0xcf,0x86,0xd5,0x6c,0xd4,0x53,0xd3,0x30,0xd2,0x1f,0xd1,0x12,
+- 0x10,0x09,0x04,0xff,0xd0,0x95,0xcc,0x80,0x00,0x01,0xff,0xd0,0x95,0xcc,0x88,0x00,
+- 0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x93,0xcc,0x81,0x00,0x51,0x04,0x01,0x00,0x10,
+- 0x04,0x01,0x00,0x01,0xff,0xd0,0x86,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1,0x12,
+- 0x10,0x09,0x01,0xff,0xd0,0x9a,0xcc,0x81,0x00,0x04,0xff,0xd0,0x98,0xcc,0x80,0x00,
+- 0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x86,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0x92,
+- 0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x98,0xcc,0x86,0x00,0x01,0x00,
+- 0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x11,0x91,0x0d,0x10,0x04,
+- 0x01,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,
+- 0x57,0x54,0x04,0x01,0x00,0xd3,0x30,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,
+- 0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x04,0x01,0x00,0x01,
+- 0xff,0xd0,0xb3,0xcc,0x81,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
+- 0xd1,0x96,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,
+- 0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd1,
+- 0x83,0xcc,0x86,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x1a,0x52,0x04,0x01,0x00,
+- 0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb4,0xcc,0x8f,0x00,0x01,0xff,0xd1,
+- 0xb5,0xcc,0x8f,0x00,0x01,0x00,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x94,0x24,0xd3,0x18,
+- 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0x51,0x04,0x01,0xe6,
+- 0x10,0x04,0x01,0xe6,0x0a,0xe6,0x92,0x08,0x11,0x04,0x04,0x00,0x06,0x00,0x04,0x00,
+- 0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xbe,0xd4,0x4a,0xd3,0x2a,0xd2,0x1a,0xd1,0x0d,
+- 0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x96,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,
+- 0xb6,0xcc,0x86,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,
+- 0x06,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,
+- 0x06,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,
+- 0x09,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x90,0xcc,0x86,
+- 0x00,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0x90,0xcc,0x88,
+- 0x00,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,
+- 0xd0,0x95,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0xd2,0x16,0x51,0x04,
+- 0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x98,0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc,
+- 0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x96,0xcc,0x88,0x00,0x01,0xff,0xd0,
+- 0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x97,0xcc,0x88,0x00,0x01,0xff,0xd0,
+- 0xb7,0xcc,0x88,0x00,0xd4,0x74,0xd3,0x3a,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,
+- 0x01,0xff,0xd0,0x98,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0xd1,0x12,
+- 0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,
+- 0x10,0x09,0x01,0xff,0xd0,0x9e,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,
+- 0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0xa8,0xcc,0x88,0x00,0x01,
+- 0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0xad,0xcc,0x88,
+- 0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x84,
+- 0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09,
+- 0x01,0xff,0xd0,0xa3,0xcc,0x88,0x00,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x10,0x09,
+- 0x01,0xff,0xd0,0xa3,0xcc,0x8b,0x00,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0x91,0x12,
+- 0x10,0x09,0x01,0xff,0xd0,0xa7,0xcc,0x88,0x00,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00,
+- 0x08,0x00,0x92,0x16,0x91,0x12,0x10,0x09,0x01,0xff,0xd0,0xab,0xcc,0x88,0x00,0x01,
+- 0xff,0xd1,0x8b,0xcc,0x88,0x00,0x09,0x00,0x09,0x00,0xd1,0x74,0xd0,0x36,0xcf,0x86,
+- 0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
+- 0xd4,0x10,0x93,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04,0x0b,0x00,0x0c,0x00,0x10,0x00,
+- 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+- 0x01,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,
+- 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
+- 0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0xba,
+- 0xcf,0x86,0xd5,0x4c,0xd4,0x24,0x53,0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
+- 0x14,0x00,0x01,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,
+- 0x10,0x00,0x10,0x04,0x10,0x00,0x0d,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
+- 0x00,0x00,0x02,0xdc,0x02,0xe6,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc,0x02,0xe6,
+- 0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xde,0x02,0xdc,0x02,0xe6,0xd4,0x2c,
+- 0xd3,0x10,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x08,0xdc,0x02,0xdc,0x02,0xdc,
+- 0xd2,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc,0x02,0xe6,0xd1,0x08,0x10,0x04,
+- 0x02,0xe6,0x02,0xde,0x10,0x04,0x02,0xe4,0x02,0xe6,0xd3,0x20,0xd2,0x10,0xd1,0x08,
+- 0x10,0x04,0x01,0x0a,0x01,0x0b,0x10,0x04,0x01,0x0c,0x01,0x0d,0xd1,0x08,0x10,0x04,
+- 0x01,0x0e,0x01,0x0f,0x10,0x04,0x01,0x10,0x01,0x11,0xd2,0x10,0xd1,0x08,0x10,0x04,
+- 0x01,0x12,0x01,0x13,0x10,0x04,0x09,0x13,0x01,0x14,0xd1,0x08,0x10,0x04,0x01,0x15,
+- 0x01,0x16,0x10,0x04,0x01,0x00,0x01,0x17,0xcf,0x86,0xd5,0x28,0x94,0x24,0x93,0x20,
+- 0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x18,0x10,0x04,0x01,0x19,0x01,0x00,
+- 0xd1,0x08,0x10,0x04,0x02,0xe6,0x08,0xdc,0x10,0x04,0x08,0x00,0x08,0x12,0x00,0x00,
+- 0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,
+- 0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x93,0x10,
+- 0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0xe2,0xfb,0x01,0xe1,0x2b,0x01,0xd0,0xa8,0xcf,0x86,0xd5,0x55,0xd4,0x28,0xd3,0x10,
+- 0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x0a,0x00,0xd2,0x0c,
+- 0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x08,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+- 0x07,0x00,0x07,0x00,0xd3,0x0c,0x52,0x04,0x07,0xe6,0x11,0x04,0x07,0xe6,0x0a,0xe6,
+- 0xd2,0x10,0xd1,0x08,0x10,0x04,0x0a,0x1e,0x0a,0x1f,0x10,0x04,0x0a,0x20,0x01,0x00,
+- 0xd1,0x09,0x10,0x05,0x0f,0xff,0x00,0x00,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0xd4,
+- 0x3d,0x93,0x39,0xd2,0x1a,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x09,0x01,
+- 0xff,0xd8,0xa7,0xd9,0x93,0x00,0x01,0xff,0xd8,0xa7,0xd9,0x94,0x00,0xd1,0x12,0x10,
+- 0x09,0x01,0xff,0xd9,0x88,0xd9,0x94,0x00,0x01,0xff,0xd8,0xa7,0xd9,0x95,0x00,0x10,
+- 0x09,0x01,0xff,0xd9,0x8a,0xd9,0x94,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,
+- 0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x86,
+- 0xd5,0x5c,0xd4,0x20,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,
+- 0x01,0x00,0x01,0x1b,0xd1,0x08,0x10,0x04,0x01,0x1c,0x01,0x1d,0x10,0x04,0x01,0x1e,
+- 0x01,0x1f,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x20,0x01,0x21,0x10,0x04,
+- 0x01,0x22,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x07,0xdc,
+- 0x07,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0xe6,0x08,0xe6,0x08,0xe6,0xd1,0x08,
+- 0x10,0x04,0x08,0xdc,0x08,0xe6,0x10,0x04,0x08,0xe6,0x0c,0xdc,0xd4,0x10,0x53,0x04,
+- 0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06,0x00,0x93,0x10,0x92,0x0c,
+- 0x91,0x08,0x10,0x04,0x01,0x23,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x22,
+- 0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,
+- 0x11,0x04,0x04,0x00,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,
+- 0xcf,0x86,0xd5,0x5b,0xd4,0x2e,0xd3,0x1e,0x92,0x1a,0xd1,0x0d,0x10,0x09,0x01,0xff,
+- 0xdb,0x95,0xd9,0x94,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xdb,0x81,0xd9,0x94,0x00,
++ 0x6f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x10,0x08,
++ 0x04,0xff,0x6f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0xd3,0x27,0xe2,0x0b,
++ 0x43,0xd1,0x14,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x04,0xff,0x6f,
++ 0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x79,0xcc,0x84,0x00,0x04,0xff,0x79,
++ 0xcc,0x84,0x00,0xd2,0x13,0x51,0x04,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa5,
++ 0x00,0x08,0xff,0xc8,0xbc,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff,0xc6,0x9a,
++ 0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa6,0x00,0x08,0x00,0xcf,0x86,0x95,0x5f,0x94,
++ 0x5b,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff,0xc9,0x82,0x00,
++ 0x10,0x04,0x09,0x00,0x09,0xff,0xc6,0x80,0x00,0xd1,0x0e,0x10,0x07,0x09,0xff,0xca,
++ 0x89,0x00,0x09,0xff,0xca,0x8c,0x00,0x10,0x07,0x09,0xff,0xc9,0x87,0x00,0x09,0x00,
++ 0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x89,0x00,0x09,0x00,0x10,0x07,0x09,
++ 0xff,0xc9,0x8b,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x8d,0x00,0x09,
++ 0x00,0x10,0x07,0x09,0xff,0xc9,0x8f,0x00,0x09,0x00,0x01,0x00,0x01,0x00,0xd1,0x8b,
++ 0xd0,0x0c,0xcf,0x86,0xe5,0xfa,0x42,0x64,0xd9,0x42,0x01,0xe6,0xcf,0x86,0xd5,0x2a,
++ 0xe4,0x82,0x43,0xe3,0x69,0x43,0xd2,0x11,0xe1,0x48,0x43,0x10,0x07,0x01,0xff,0xcc,
++ 0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0xe1,0x4f,0x43,0x10,0x09,0x01,0xff,0xcc,0x88,
++ 0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0x00,0xd4,0x0f,0x93,0x0b,0x92,0x07,0x61,0x94,
++ 0x43,0x01,0xea,0x06,0xe6,0x06,0xe6,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x0a,
++ 0xff,0xcd,0xb1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb3,0x00,0x0a,0x00,0xd1,
++ 0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb7,
++ 0x00,0x0a,0x00,0xd2,0x07,0x61,0x80,0x43,0x00,0x00,0x51,0x04,0x09,0x00,0x10,0x06,
++ 0x01,0xff,0x3b,0x00,0x10,0xff,0xcf,0xb3,0x00,0xe0,0x31,0x01,0xcf,0x86,0xd5,0xd3,
++ 0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,
++ 0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x81,0x00,0x01,0xff,
++ 0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,
++ 0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,
++ 0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x00,0x00,0x10,
++ 0x09,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0xd3,
++ 0x3c,0xd2,0x20,0xd1,0x12,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,
++ 0x01,0xff,0xce,0xb1,0x00,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb3,
++ 0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb4,0x00,0x01,0xff,0xce,0xb5,0x00,0x10,
++ 0x07,0x01,0xff,0xce,0xb6,0x00,0x01,0xff,0xce,0xb7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,
++ 0x07,0x01,0xff,0xce,0xb8,0x00,0x01,0xff,0xce,0xb9,0x00,0x10,0x07,0x01,0xff,0xce,
++ 0xba,0x00,0x01,0xff,0xce,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xbc,0x00,
++ 0x01,0xff,0xce,0xbd,0x00,0x10,0x07,0x01,0xff,0xce,0xbe,0x00,0x01,0xff,0xce,0xbf,
++ 0x00,0xe4,0x6e,0x43,0xd3,0x35,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcf,0x80,
++ 0x00,0x01,0xff,0xcf,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x83,0x00,0xd1,
++ 0x0e,0x10,0x07,0x01,0xff,0xcf,0x84,0x00,0x01,0xff,0xcf,0x85,0x00,0x10,0x07,0x01,
++ 0xff,0xcf,0x86,0x00,0x01,0xff,0xcf,0x87,0x00,0xe2,0x14,0x43,0xd1,0x0e,0x10,0x07,
++ 0x01,0xff,0xcf,0x88,0x00,0x01,0xff,0xcf,0x89,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,
++ 0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xcf,0x86,0xd5,0x94,0xd4,0x3c,
++ 0xd3,0x13,0x92,0x0f,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0x83,0x00,0x01,
++ 0x00,0x01,0x00,0xd2,0x07,0x61,0x23,0x43,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,
++ 0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,
++ 0xcf,0x89,0xcc,0x81,0x00,0x0a,0xff,0xcf,0x97,0x00,0xd3,0x2c,0xd2,0x11,0xe1,0x2f,
++ 0x43,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb8,0x00,0xd1,0x10,0x10,
++ 0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0xff,0xcf,0x86,0x00,0x10,0x07,0x01,
++ 0xff,0xcf,0x80,0x00,0x04,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xcf,0x99,
++ 0x00,0x06,0x00,0x10,0x07,0x01,0xff,0xcf,0x9b,0x00,0x04,0x00,0xd1,0x0b,0x10,0x07,
++ 0x01,0xff,0xcf,0x9d,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0x9f,0x00,0x04,0x00,
++ 0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa1,0x00,0x04,
++ 0x00,0x10,0x07,0x01,0xff,0xcf,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,
++ 0xcf,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xa7,0x00,0x01,0x00,0xd2,0x16,
++ 0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,
++ 0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xad,0x00,0x01,0x00,0x10,
++ 0x07,0x01,0xff,0xcf,0xaf,0x00,0x01,0x00,0xd3,0x2b,0xd2,0x12,0x91,0x0e,0x10,0x07,
++ 0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xcf,0x81,0x00,0x01,0x00,0xd1,0x0e,0x10,0x07,
++ 0x05,0xff,0xce,0xb8,0x00,0x05,0xff,0xce,0xb5,0x00,0x10,0x04,0x06,0x00,0x07,0xff,
++ 0xcf,0xb8,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x07,0x00,0x07,0xff,0xcf,0xb2,0x00,
++ 0x10,0x07,0x07,0xff,0xcf,0xbb,0x00,0x07,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,
++ 0xff,0xcd,0xbb,0x00,0x10,0x07,0x08,0xff,0xcd,0xbc,0x00,0x08,0xff,0xcd,0xbd,0x00,
++ 0xe3,0xd6,0x46,0xe2,0x3d,0x05,0xe1,0x27,0x02,0xe0,0x66,0x01,0xcf,0x86,0xd5,0xf0,
++ 0xd4,0x7e,0xd3,0x40,0xd2,0x22,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0xb5,0xcc,0x80,
++ 0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x07,0x01,0xff,0xd1,0x92,0x00,0x01,
++ 0xff,0xd0,0xb3,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x94,0x00,0x01,
++ 0xff,0xd1,0x95,0x00,0x10,0x07,0x01,0xff,0xd1,0x96,0x00,0x01,0xff,0xd1,0x96,0xcc,
++ 0x88,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x98,0x00,0x01,0xff,0xd1,
++ 0x99,0x00,0x10,0x07,0x01,0xff,0xd1,0x9a,0x00,0x01,0xff,0xd1,0x9b,0x00,0xd1,0x12,
++ 0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80,0x00,
++ 0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0xff,0xd1,0x9f,0x00,0xd3,0x38,
++ 0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xb0,0x00,0x01,0xff,0xd0,0xb1,0x00,
++ 0x10,0x07,0x01,0xff,0xd0,0xb2,0x00,0x01,0xff,0xd0,0xb3,0x00,0xd1,0x0e,0x10,0x07,
++ 0x01,0xff,0xd0,0xb4,0x00,0x01,0xff,0xd0,0xb5,0x00,0x10,0x07,0x01,0xff,0xd0,0xb6,
++ 0x00,0x01,0xff,0xd0,0xb7,0x00,0xd2,0x1e,0xd1,0x10,0x10,0x07,0x01,0xff,0xd0,0xb8,
++ 0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x10,0x07,0x01,0xff,0xd0,0xba,0x00,0x01,
++ 0xff,0xd0,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xbc,0x00,0x01,0xff,0xd0,
++ 0xbd,0x00,0x10,0x07,0x01,0xff,0xd0,0xbe,0x00,0x01,0xff,0xd0,0xbf,0x00,0xe4,0x0e,
++ 0x42,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x80,0x00,0x01,0xff,
++ 0xd1,0x81,0x00,0x10,0x07,0x01,0xff,0xd1,0x82,0x00,0x01,0xff,0xd1,0x83,0x00,0xd1,
++ 0x0e,0x10,0x07,0x01,0xff,0xd1,0x84,0x00,0x01,0xff,0xd1,0x85,0x00,0x10,0x07,0x01,
++ 0xff,0xd1,0x86,0x00,0x01,0xff,0xd1,0x87,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,
++ 0xff,0xd1,0x88,0x00,0x01,0xff,0xd1,0x89,0x00,0x10,0x07,0x01,0xff,0xd1,0x8a,0x00,
++ 0x01,0xff,0xd1,0x8b,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x8c,0x00,0x01,0xff,
++ 0xd1,0x8d,0x00,0x10,0x07,0x01,0xff,0xd1,0x8e,0x00,0x01,0xff,0xd1,0x8f,0x00,0xcf,
++ 0x86,0xd5,0x07,0x64,0xb8,0x41,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,
++ 0x10,0x07,0x01,0xff,0xd1,0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xa3,0x00,
++ 0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,
++ 0xff,0xd1,0xa7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa9,
++ 0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,
++ 0x01,0xff,0xd1,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xaf,0x00,0x01,0x00,
++ 0xd3,0x33,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb1,0x00,0x01,0x00,0x10,
++ 0x07,0x01,0xff,0xd1,0xb3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb5,
++ 0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01,0xff,0xd1,0xb5,
++ 0xcc,0x8f,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb9,0x00,0x01,0x00,
++ 0x10,0x07,0x01,0xff,0xd1,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,
++ 0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbf,0x00,0x01,0x00,0xe0,0x41,0x01,
++ 0xcf,0x86,0xd5,0x8e,0xd4,0x36,0xd3,0x11,0xe2,0x7a,0x41,0xe1,0x71,0x41,0x10,0x07,
++ 0x01,0xff,0xd2,0x81,0x00,0x01,0x00,0xd2,0x0f,0x51,0x04,0x04,0x00,0x10,0x07,0x06,
++ 0xff,0xd2,0x8b,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff,0xd2,0x8d,0x00,0x04,
++ 0x00,0x10,0x07,0x04,0xff,0xd2,0x8f,0x00,0x04,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,
++ 0x10,0x07,0x01,0xff,0xd2,0x91,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x93,0x00,
++ 0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x95,0x00,0x01,0x00,0x10,0x07,0x01,
++ 0xff,0xd2,0x97,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x99,
++ 0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9b,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,
++ 0x01,0xff,0xd2,0x9d,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9f,0x00,0x01,0x00,
++ 0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa1,0x00,0x01,
++ 0x00,0x10,0x07,0x01,0xff,0xd2,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,
++ 0xd2,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa7,0x00,0x01,0x00,0xd2,0x16,
++ 0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,
++ 0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xad,0x00,0x01,0x00,0x10,
++ 0x07,0x01,0xff,0xd2,0xaf,0x00,0x01,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,
++ 0x01,0xff,0xd2,0xb1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xb3,0x00,0x01,0x00,
++ 0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,
++ 0xb7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb9,0x00,0x01,
++ 0x00,0x10,0x07,0x01,0xff,0xd2,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,
++ 0xd2,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbf,0x00,0x01,0x00,0xcf,0x86,
++ 0xd5,0xdc,0xd4,0x5a,0xd3,0x36,0xd2,0x20,0xd1,0x10,0x10,0x07,0x01,0xff,0xd3,0x8f,
++ 0x00,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb6,0xcc,0x86,
++ 0x00,0x01,0xff,0xd3,0x84,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x86,
++ 0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x88,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04,
++ 0x01,0x00,0x06,0xff,0xd3,0x8a,0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x8c,0x00,
++ 0xe1,0x52,0x40,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8e,0x00,0xd3,0x41,0xd2,0x24,
++ 0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb0,0xcc,
++ 0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb0,0xcc,
++ 0x88,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x95,0x00,0x01,0x00,0x10,0x09,0x01,
++ 0xff,0xd0,0xb5,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0xd2,0x1d,0xd1,
++ 0x0b,0x10,0x07,0x01,0xff,0xd3,0x99,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x99,
++ 0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,
++ 0xd0,0xb6,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,
++ 0xd0,0xb7,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4,0x82,0xd3,0x41,
++ 0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa1,0x00,0x01,0x00,0x10,0x09,0x01,
++ 0xff,0xd0,0xb8,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0xd1,0x12,0x10,
++ 0x09,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x10,
++ 0x09,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0xd2,
++ 0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa9,0x00,0x01,0x00,0x10,0x09,0x01,0xff,
++ 0xd3,0xa9,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,
++ 0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09,
++ 0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x41,
++ 0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x01,0xff,0xd1,
++ 0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0x01,0xff,0xd1,
++ 0x83,0xcc,0x8b,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00,0x01,
++ 0xff,0xd1,0x87,0xcc,0x88,0x00,0x10,0x07,0x08,0xff,0xd3,0xb7,0x00,0x08,0x00,0xd2,
++ 0x1d,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x01,0xff,0xd1,0x8b,
++ 0xcc,0x88,0x00,0x10,0x07,0x09,0xff,0xd3,0xbb,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,
++ 0x09,0xff,0xd3,0xbd,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd3,0xbf,0x00,0x09,0x00,
++ 0xe1,0x26,0x02,0xe0,0x78,0x01,0xcf,0x86,0xd5,0xb0,0xd4,0x58,0xd3,0x2c,0xd2,0x16,
++ 0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x81,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,
++ 0x83,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x85,0x00,0x06,0x00,0x10,
++ 0x07,0x06,0xff,0xd4,0x87,0x00,0x06,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,
++ 0xd4,0x89,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8b,0x00,0x06,0x00,0xd1,0x0b,
++ 0x10,0x07,0x06,0xff,0xd4,0x8d,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8f,0x00,
++ 0x06,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xd4,0x91,0x00,0x09,
++ 0x00,0x10,0x07,0x09,0xff,0xd4,0x93,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff,
++ 0xd4,0x95,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x97,0x00,0x0a,0x00,0xd2,0x16,
++ 0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x99,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,
++ 0x9b,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x9d,0x00,0x0a,0x00,0x10,
++ 0x07,0x0a,0xff,0xd4,0x9f,0x00,0x0a,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,
++ 0x10,0x07,0x0a,0xff,0xd4,0xa1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0xa3,0x00,
++ 0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xd4,0xa5,0x00,0x0b,0x00,0x10,0x07,0x0c,
++ 0xff,0xd4,0xa7,0x00,0x0c,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x10,0xff,0xd4,0xa9,
++ 0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xab,0x00,0x10,0x00,0xd1,0x0b,0x10,0x07,
++ 0x10,0xff,0xd4,0xad,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xaf,0x00,0x10,0x00,
++ 0xd3,0x35,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xa1,0x00,0x10,
++ 0x07,0x01,0xff,0xd5,0xa2,0x00,0x01,0xff,0xd5,0xa3,0x00,0xd1,0x0e,0x10,0x07,0x01,
++ 0xff,0xd5,0xa4,0x00,0x01,0xff,0xd5,0xa5,0x00,0x10,0x07,0x01,0xff,0xd5,0xa6,0x00,
++ 0x01,0xff,0xd5,0xa7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xa8,0x00,
++ 0x01,0xff,0xd5,0xa9,0x00,0x10,0x07,0x01,0xff,0xd5,0xaa,0x00,0x01,0xff,0xd5,0xab,
++ 0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xac,0x00,0x01,0xff,0xd5,0xad,0x00,0x10,
++ 0x07,0x01,0xff,0xd5,0xae,0x00,0x01,0xff,0xd5,0xaf,0x00,0xcf,0x86,0xe5,0xf1,0x3e,
++ 0xd4,0x70,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb0,0x00,0x01,
++ 0xff,0xd5,0xb1,0x00,0x10,0x07,0x01,0xff,0xd5,0xb2,0x00,0x01,0xff,0xd5,0xb3,0x00,
++ 0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb4,0x00,0x01,0xff,0xd5,0xb5,0x00,0x10,0x07,
++ 0x01,0xff,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,
++ 0x01,0xff,0xd5,0xb8,0x00,0x01,0xff,0xd5,0xb9,0x00,0x10,0x07,0x01,0xff,0xd5,0xba,
++ 0x00,0x01,0xff,0xd5,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xbc,0x00,0x01,
++ 0xff,0xd5,0xbd,0x00,0x10,0x07,0x01,0xff,0xd5,0xbe,0x00,0x01,0xff,0xd5,0xbf,0x00,
++ 0xe3,0x70,0x3e,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x80,0x00,0x01,0xff,
++ 0xd6,0x81,0x00,0x10,0x07,0x01,0xff,0xd6,0x82,0x00,0x01,0xff,0xd6,0x83,0x00,0xd1,
++ 0x0e,0x10,0x07,0x01,0xff,0xd6,0x84,0x00,0x01,0xff,0xd6,0x85,0x00,0x10,0x07,0x01,
++ 0xff,0xd6,0x86,0x00,0x00,0x00,0xe0,0x18,0x3f,0xcf,0x86,0xe5,0xa9,0x3e,0xe4,0x80,
++ 0x3e,0xe3,0x5f,0x3e,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0xd5,0xa5,0xd6,0x82,0x00,0xe4,0x3e,0x25,0xe3,0xc4,0x1a,0xe2,0xf8,0x80,
++ 0xe1,0xc0,0x13,0xd0,0x1e,0xcf,0x86,0xc5,0xe4,0xf0,0x4a,0xe3,0x3b,0x46,0xe2,0xd1,
++ 0x43,0xe1,0x04,0x43,0xe0,0xc9,0x42,0xcf,0x86,0xe5,0x8e,0x42,0x64,0x71,0x42,0x0b,
++ 0x00,0xcf,0x86,0xe5,0xfa,0x01,0xe4,0xd5,0x55,0xe3,0x76,0x01,0xe2,0x76,0x53,0xd1,
++ 0x0c,0xe0,0xd7,0x52,0xcf,0x86,0x65,0x75,0x52,0x04,0x00,0xe0,0x0d,0x01,0xcf,0x86,
++ 0xd5,0x0a,0xe4,0xf8,0x52,0x63,0xe7,0x52,0x0a,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x80,0x00,0x01,0xff,0xe2,0xb4,0x81,0x00,
++ 0x10,0x08,0x01,0xff,0xe2,0xb4,0x82,0x00,0x01,0xff,0xe2,0xb4,0x83,0x00,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0xe2,0xb4,0x84,0x00,0x01,0xff,0xe2,0xb4,0x85,0x00,0x10,0x08,
++ 0x01,0xff,0xe2,0xb4,0x86,0x00,0x01,0xff,0xe2,0xb4,0x87,0x00,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0xe2,0xb4,0x88,0x00,0x01,0xff,0xe2,0xb4,0x89,0x00,0x10,0x08,
++ 0x01,0xff,0xe2,0xb4,0x8a,0x00,0x01,0xff,0xe2,0xb4,0x8b,0x00,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0xe2,0xb4,0x8c,0x00,0x01,0xff,0xe2,0xb4,0x8d,0x00,0x10,0x08,0x01,0xff,
++ 0xe2,0xb4,0x8e,0x00,0x01,0xff,0xe2,0xb4,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0xe2,0xb4,0x90,0x00,0x01,0xff,0xe2,0xb4,0x91,0x00,0x10,0x08,
++ 0x01,0xff,0xe2,0xb4,0x92,0x00,0x01,0xff,0xe2,0xb4,0x93,0x00,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0xe2,0xb4,0x94,0x00,0x01,0xff,0xe2,0xb4,0x95,0x00,0x10,0x08,0x01,0xff,
++ 0xe2,0xb4,0x96,0x00,0x01,0xff,0xe2,0xb4,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0xe2,0xb4,0x98,0x00,0x01,0xff,0xe2,0xb4,0x99,0x00,0x10,0x08,0x01,0xff,
++ 0xe2,0xb4,0x9a,0x00,0x01,0xff,0xe2,0xb4,0x9b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0xe2,0xb4,0x9c,0x00,0x01,0xff,0xe2,0xb4,0x9d,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,
++ 0x9e,0x00,0x01,0xff,0xe2,0xb4,0x9f,0x00,0xcf,0x86,0xe5,0x2a,0x52,0x94,0x50,0xd3,
++ 0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa0,0x00,0x01,0xff,0xe2,
++ 0xb4,0xa1,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa2,0x00,0x01,0xff,0xe2,0xb4,0xa3,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa4,0x00,0x01,0xff,0xe2,0xb4,0xa5,
++ 0x00,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xa7,0x00,0x52,0x04,0x00,0x00,0x91,
++ 0x0c,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xad,0x00,0x00,0x00,0x01,0x00,0xd2,
++ 0x1b,0xe1,0xce,0x52,0xe0,0x7f,0x52,0xcf,0x86,0x95,0x0f,0x94,0x0b,0x93,0x07,0x62,
++ 0x64,0x52,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd1,0x13,0xe0,0xa5,0x53,0xcf,
++ 0x86,0x95,0x0a,0xe4,0x7a,0x53,0x63,0x69,0x53,0x04,0x00,0x04,0x00,0xd0,0x0d,0xcf,
++ 0x86,0x95,0x07,0x64,0xf4,0x53,0x08,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,
++ 0x54,0x04,0x04,0x00,0xd3,0x07,0x62,0x01,0x54,0x04,0x00,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x11,0xff,0xe1,0x8f,0xb0,0x00,0x11,0xff,0xe1,0x8f,0xb1,0x00,0x10,0x08,0x11,
++ 0xff,0xe1,0x8f,0xb2,0x00,0x11,0xff,0xe1,0x8f,0xb3,0x00,0x91,0x10,0x10,0x08,0x11,
++ 0xff,0xe1,0x8f,0xb4,0x00,0x11,0xff,0xe1,0x8f,0xb5,0x00,0x00,0x00,0xd4,0x1c,0xe3,
++ 0x92,0x56,0xe2,0xc9,0x55,0xe1,0x8c,0x55,0xe0,0x6d,0x55,0xcf,0x86,0x95,0x0a,0xe4,
++ 0x56,0x55,0x63,0x45,0x55,0x04,0x00,0x04,0x00,0xe3,0xd2,0x01,0xe2,0xdd,0x59,0xd1,
++ 0x0c,0xe0,0xfe,0x58,0xcf,0x86,0x65,0xd7,0x58,0x0a,0x00,0xe0,0x4e,0x59,0xcf,0x86,
++ 0xd5,0xc5,0xd4,0x45,0xd3,0x31,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x12,0xff,0xd0,0xb2,
++ 0x00,0x12,0xff,0xd0,0xb4,0x00,0x10,0x07,0x12,0xff,0xd0,0xbe,0x00,0x12,0xff,0xd1,
++ 0x81,0x00,0x51,0x07,0x12,0xff,0xd1,0x82,0x00,0x10,0x07,0x12,0xff,0xd1,0x8a,0x00,
++ 0x12,0xff,0xd1,0xa3,0x00,0x92,0x10,0x91,0x0c,0x10,0x08,0x12,0xff,0xea,0x99,0x8b,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,
++ 0xff,0xe1,0x83,0x90,0x00,0x14,0xff,0xe1,0x83,0x91,0x00,0x10,0x08,0x14,0xff,0xe1,
++ 0x83,0x92,0x00,0x14,0xff,0xe1,0x83,0x93,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
++ 0x83,0x94,0x00,0x14,0xff,0xe1,0x83,0x95,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x96,
++ 0x00,0x14,0xff,0xe1,0x83,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
++ 0x83,0x98,0x00,0x14,0xff,0xe1,0x83,0x99,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x9a,
++ 0x00,0x14,0xff,0xe1,0x83,0x9b,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0x9c,
++ 0x00,0x14,0xff,0xe1,0x83,0x9d,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x9e,0x00,0x14,
++ 0xff,0xe1,0x83,0x9f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,
++ 0xff,0xe1,0x83,0xa0,0x00,0x14,0xff,0xe1,0x83,0xa1,0x00,0x10,0x08,0x14,0xff,0xe1,
++ 0x83,0xa2,0x00,0x14,0xff,0xe1,0x83,0xa3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
++ 0x83,0xa4,0x00,0x14,0xff,0xe1,0x83,0xa5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xa6,
++ 0x00,0x14,0xff,0xe1,0x83,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
++ 0x83,0xa8,0x00,0x14,0xff,0xe1,0x83,0xa9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xaa,
++ 0x00,0x14,0xff,0xe1,0x83,0xab,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xac,
++ 0x00,0x14,0xff,0xe1,0x83,0xad,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xae,0x00,0x14,
++ 0xff,0xe1,0x83,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,
++ 0x83,0xb0,0x00,0x14,0xff,0xe1,0x83,0xb1,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xb2,
++ 0x00,0x14,0xff,0xe1,0x83,0xb3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xb4,
++ 0x00,0x14,0xff,0xe1,0x83,0xb5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xb6,0x00,0x14,
++ 0xff,0xe1,0x83,0xb7,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xb8,
++ 0x00,0x14,0xff,0xe1,0x83,0xb9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xba,0x00,0x00,
++ 0x00,0xd1,0x0c,0x10,0x04,0x00,0x00,0x14,0xff,0xe1,0x83,0xbd,0x00,0x10,0x08,0x14,
++ 0xff,0xe1,0x83,0xbe,0x00,0x14,0xff,0xe1,0x83,0xbf,0x00,0xe2,0x9d,0x08,0xe1,0x48,
++ 0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3,0x40,0xd2,0x20,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa5,0x00,0x01,0xff,0x61,0xcc,0xa5,0x00,0x10,
++ 0x08,0x01,0xff,0x62,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87,0x00,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x62,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3,0x00,0x10,0x08,0x01,
++ 0xff,0x62,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2,0x24,0xd1,0x14,0x10,
++ 0x0a,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81,
++ 0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x87,0x00,0x01,0xff,0x64,0xcc,0x87,0x00,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa3,0x00,0x01,0xff,0x64,0xcc,0xa3,0x00,0x10,
++ 0x08,0x01,0xff,0x64,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1,0x00,0xd3,0x48,0xd2,
++ 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa7,0x00,0x01,0xff,0x64,0xcc,0xa7,
++ 0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xad,0x00,0x01,0xff,0x64,0xcc,0xad,0x00,0xd1,
++ 0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x84,
++ 0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0x01,0xff,0x65,
++ 0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xad,
++ 0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0xb0,0x00,0x01,
++ 0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,
++ 0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x66,0xcc,0x87,
++ 0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x67,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84,0x00,0x10,0x08,0x01,
++ 0xff,0x68,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0x68,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x68,
++ 0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0x68,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x68,
++ 0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,
++ 0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01,0xff,0x69,0xcc,0x88,
++ 0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3,0x40,0xd2,0x20,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x81,0x00,0x01,0xff,0x6b,0xcc,0x81,0x00,0x10,
++ 0x08,0x01,0xff,0x6b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3,0x00,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x10,0x08,0x01,
++ 0xff,0x6c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2,0x24,0xd1,0x14,0x10,
++ 0x0a,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84,
++ 0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0xb1,0x00,0x01,0xff,0x6c,0xcc,0xb1,0x00,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xad,0x00,0x01,0xff,0x6c,0xcc,0xad,0x00,0x10,
++ 0x08,0x01,0xff,0x6d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81,0x00,0xcf,0x86,0xe5,
++ 0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6d,0xcc,
++ 0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6d,0xcc,0xa3,0x00,
++ 0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x87,0x00,
++ 0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa3,0x00,0x01,0xff,
++ 0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0xb1,0x00,
++ 0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xad,0x00,0x01,0xff,
++ 0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,
++ 0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83,0xcc,
++ 0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48,0xd2,0x28,0xd1,0x14,
++ 0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x84,0xcc,
++ 0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,
++ 0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x70,0xcc,0x81,0x00,0x01,0xff,
++ 0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x70,0xcc,0x87,0x00,0x01,0xff,0x70,0xcc,
++ 0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x87,0x00,0x01,0xff,
++ 0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xa3,0x00,0x01,0xff,0x72,0xcc,
++ 0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,
++ 0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xb1,0x00,0x01,0xff,
++ 0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x73,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x73,0xcc,
++ 0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc,
++ 0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x10,0x0a,0x01,0xff,
++ 0x73,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87,0x00,0xd2,0x24,
++ 0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,
++ 0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0x87,0x00,0x01,0xff,0x74,0xcc,
++ 0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xa3,0x00,0x01,0xff,0x74,0xcc,
++ 0xa3,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc,0xb1,0x00,
++ 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xad,0x00,0x01,0xff,
++ 0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa4,0x00,0x01,0xff,0x75,0xcc,
++ 0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xb0,0x00,0x01,0xff,0x75,0xcc,
++ 0xb0,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xad,0x00,0x01,0xff,0x75,0xcc,0xad,0x00,
++ 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x01,0xff,
++ 0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,
++ 0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x76,0xcc,
++ 0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x76,0xcc,0xa3,0x00,
++ 0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x11,0x02,0xcf,0x86,0xd5,0xe2,0xd4,0x80,0xd3,
++ 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x80,0x00,0x01,0xff,0x77,
++ 0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x81,0x00,0x01,0xff,0x77,0xcc,0x81,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x88,0x00,0x01,0xff,0x77,0xcc,0x88,
++ 0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x87,0x00,0x01,0xff,0x77,0xcc,0x87,0x00,0xd2,
++ 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0xa3,0x00,0x01,0xff,0x77,0xcc,0xa3,
++ 0x00,0x10,0x08,0x01,0xff,0x78,0xcc,0x87,0x00,0x01,0xff,0x78,0xcc,0x87,0x00,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x78,0xcc,0x88,0x00,0x01,0xff,0x78,0xcc,0x88,0x00,0x10,
++ 0x08,0x01,0xff,0x79,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87,0x00,0xd3,0x33,0xd2,
++ 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x82,0x00,0x01,0xff,0x7a,0xcc,0x82,
++ 0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0xa3,0x00,0x01,0xff,0x7a,0xcc,0xa3,0x00,0xe1,
++ 0xc4,0x58,0x10,0x08,0x01,0xff,0x7a,0xcc,0xb1,0x00,0x01,0xff,0x7a,0xcc,0xb1,0x00,
++ 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff,0x79,0xcc,
++ 0x8a,0x00,0x10,0x08,0x01,0xff,0x61,0xca,0xbe,0x00,0x02,0xff,0x73,0xcc,0x87,0x00,
++ 0x51,0x04,0x0a,0x00,0x10,0x07,0x0a,0xff,0x73,0x73,0x00,0x0a,0x00,0xd4,0x98,0xd3,
++ 0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa3,0x00,0x01,0xff,0x61,
++ 0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x89,
++ 0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x01,0xff,0x61,
++ 0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0x01,
++ 0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,
++ 0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01,
++ 0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0xd1,
++ 0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0xa3,
++ 0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0x01,0xff,0x61,
++ 0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,
++ 0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,0x10,0x0a,0x01,
++ 0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0xd1,
++ 0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x86,
++ 0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0x01,0xff,0x61,
++ 0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa3,
++ 0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x89,0x00,0x01,
++ 0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x83,0x00,0x01,
++ 0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0x01,
++ 0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,0xd4,0x90,0xd3,0x50,
++ 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x01,0xff,
++ 0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,
++ 0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,
++ 0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,
++ 0x65,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,0x00,0xd2,0x20,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x89,0x00,0x01,0xff,0x69,0xcc,0x89,0x00,
++ 0x10,0x08,0x01,0xff,0x69,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3,0x00,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x10,0x08,
++ 0x01,0xff,0x6f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3,0x50,0xd2,0x28,
++ 0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,
++ 0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0x01,0xff,
++ 0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,
++ 0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,
++ 0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2,0x28,0xd1,0x14,
++ 0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f,0xcc,0xa3,0xcc,
++ 0x82,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,
++ 0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,
++ 0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,
++ 0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,0xd3,0x48,0xd2,0x28,
++ 0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,
++ 0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,
++ 0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xa3,0x00,
++ 0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x89,0x00,0x01,0xff,
++ 0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,
++ 0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,
++ 0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,
++ 0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00,
++ 0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,
++ 0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,
++ 0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,
++ 0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,
++ 0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x89,0x00,
++ 0x01,0xff,0x79,0xcc,0x89,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,
++ 0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbb,0x00,
++ 0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbd,0x00,0x0a,0x00,0x10,0x08,
++ 0x0a,0xff,0xe1,0xbb,0xbf,0x00,0x0a,0x00,0xe1,0xbf,0x02,0xe0,0xa1,0x01,0xcf,0x86,
++ 0xd5,0xc6,0xd4,0x6c,0xd3,0x18,0xe2,0xc0,0x58,0xe1,0xa9,0x58,0x10,0x09,0x01,0xff,
++ 0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,
++ 0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00,
++ 0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb1,0xcc,
++ 0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,
++ 0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,
++ 0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x18,
++ 0xe2,0xfc,0x58,0xe1,0xe5,0x58,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x93,0x00,0x01,
++ 0xff,0xce,0xb5,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,
++ 0xcc,0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5,
++ 0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,
++ 0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,
++ 0x94,0xcc,0x81,0x00,0x00,0x00,0xd4,0x6c,0xd3,0x18,0xe2,0x26,0x59,0xe1,0x0f,0x59,
++ 0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,
++ 0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,
++ 0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01,
++ 0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,
++ 0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,
++ 0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,
++ 0x82,0x00,0xd3,0x18,0xe2,0x62,0x59,0xe1,0x4b,0x59,0x10,0x09,0x01,0xff,0xce,0xb9,
++ 0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,
++ 0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0x10,0x0b,
++ 0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,
++ 0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x81,0x00,0x01,
++ 0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,
++ 0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00,0xcf,0x86,0xd5,0xac,
++ 0xd4,0x5a,0xd3,0x18,0xe2,0x9f,0x59,0xe1,0x88,0x59,0x10,0x09,0x01,0xff,0xce,0xbf,
++ 0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,
++ 0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0x10,0x0b,
++ 0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,
++ 0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81,0x00,0x01,
++ 0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x18,0xe2,0xc9,0x59,0xe1,
++ 0xb2,0x59,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc,
++ 0x94,0x00,0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,
++ 0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f,
++ 0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00,
++ 0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xe4,0x85,0x5a,0xd3,0x18,0xe2,
++ 0x04,0x5a,0xe1,0xed,0x59,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,
++ 0xcf,0x89,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,
++ 0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,
++ 0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,
++ 0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,
++ 0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,
++ 0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xe0,0xd9,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4,
++ 0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xce,
++ 0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,
++ 0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80,
++ 0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xce,
++ 0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,
++ 0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,
++ 0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,
++ 0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,
++ 0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,
++ 0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,
++ 0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,
++ 0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,
++ 0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,
++ 0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xce,0xb9,
++ 0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,
++ 0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,
++ 0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,
++ 0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,
++ 0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,
++ 0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,
++ 0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,
++ 0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,
++ 0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,
++ 0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0xce,
++ 0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd4,0xc8,0xd3,
++ 0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xce,0xb9,0x00,
++ 0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,
++ 0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0xce,0xb9,
++ 0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,
++ 0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf,
++ 0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,
++ 0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xce,
++ 0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf,
++ 0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,
++ 0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0xce,
++ 0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,
++ 0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,
++ 0xcd,0x82,0xce,0xb9,0x00,0xd3,0x49,0xd2,0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
++ 0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,
++ 0xb1,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00,0xd1,0x0f,0x10,
++ 0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,
++ 0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,
++ 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc,
++ 0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff,0xce,0xb1,0xcc,
++ 0x81,0x00,0xe1,0xa5,0x5a,0x10,0x09,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00,0x01,0x00,
++ 0xcf,0x86,0xd5,0xbd,0xd4,0x7e,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x80,0xce,
++ 0xb9,0x00,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,
++ 0xb7,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcd,0x82,
++ 0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,
++ 0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10,0x09,
++ 0x01,0xff,0xce,0xb7,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0xe1,0xb4,
++ 0x5a,0x10,0x09,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0x01,0xff,0xe1,0xbe,0xbf,0xcc,
++ 0x80,0x00,0xd3,0x18,0xe2,0xda,0x5a,0xe1,0xc3,0x5a,0x10,0x09,0x01,0xff,0xce,0xb9,
++ 0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0xe2,0xfe,0x5a,0xd1,0x12,0x10,
++ 0x09,0x01,0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10,
++ 0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd4,
++ 0x51,0xd3,0x18,0xe2,0x21,0x5b,0xe1,0x0a,0x5b,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,
++ 0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,
++ 0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0x10,0x09,0x01,
++ 0xff,0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0xe1,0x41,0x5b,
++ 0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00,
++ 0xd3,0x3b,0xd2,0x18,0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80,
++ 0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,
++ 0xcf,0x89,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd,
++ 0x82,0x00,0x01,0xff,0xcf,0x89,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1,0x12,0x10,
++ 0x09,0x01,0xff,0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10,
++ 0x09,0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0xe1,
++ 0x4b,0x5b,0x10,0x09,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0x01,0xff,0xc2,0xb4,0x00,
++ 0xe0,0xa2,0x67,0xcf,0x86,0xe5,0x24,0x02,0xe4,0x26,0x01,0xe3,0x1b,0x5e,0xd2,0x2b,
++ 0xe1,0xf5,0x5b,0xe0,0x7a,0x5b,0xcf,0x86,0xe5,0x5f,0x5b,0x94,0x1c,0x93,0x18,0x92,
++ 0x14,0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83,
++ 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd1,0xd6,0xd0,0x46,0xcf,0x86,0x55,
++ 0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
++ 0x07,0x01,0xff,0xcf,0x89,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01,0x00,0x10,0x06,
++ 0x01,0xff,0x6b,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x01,0x00,0xe3,0xba,0x5c,0x92,
++ 0x10,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0x8e,0x00,0x01,0x00,0x01,
++ 0x00,0xcf,0x86,0xd5,0x0a,0xe4,0xd7,0x5c,0x63,0xc2,0x5c,0x06,0x00,0x94,0x80,0xd3,
++ 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb0,0x00,0x01,0xff,0xe2,
++ 0x85,0xb1,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb2,0x00,0x01,0xff,0xe2,0x85,0xb3,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb4,0x00,0x01,0xff,0xe2,0x85,0xb5,
++ 0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb6,0x00,0x01,0xff,0xe2,0x85,0xb7,0x00,0xd2,
++ 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb8,0x00,0x01,0xff,0xe2,0x85,0xb9,
++ 0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xba,0x00,0x01,0xff,0xe2,0x85,0xbb,0x00,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xbc,0x00,0x01,0xff,0xe2,0x85,0xbd,0x00,0x10,
++ 0x08,0x01,0xff,0xe2,0x85,0xbe,0x00,0x01,0xff,0xe2,0x85,0xbf,0x00,0x01,0x00,0xe0,
++ 0xc9,0x5c,0xcf,0x86,0xe5,0xa8,0x5c,0xe4,0x87,0x5c,0xe3,0x76,0x5c,0xe2,0x69,0x5c,
++ 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0xff,0xe2,0x86,0x84,0x00,0xe3,0xb8,
++ 0x60,0xe2,0x85,0x60,0xd1,0x0c,0xe0,0x32,0x60,0xcf,0x86,0x65,0x13,0x60,0x01,0x00,
++ 0xd0,0x62,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x18,0x52,0x04,
++ 0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x90,0x00,0x01,0xff,
++ 0xe2,0x93,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x92,0x00,
++ 0x01,0xff,0xe2,0x93,0x93,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x94,0x00,0x01,0xff,
++ 0xe2,0x93,0x95,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x96,0x00,0x01,0xff,
++ 0xe2,0x93,0x97,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x98,0x00,0x01,0xff,0xe2,0x93,
++ 0x99,0x00,0xcf,0x86,0xe5,0xec,0x5f,0x94,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0xe2,0x93,0x9a,0x00,0x01,0xff,0xe2,0x93,0x9b,0x00,0x10,0x08,0x01,
++ 0xff,0xe2,0x93,0x9c,0x00,0x01,0xff,0xe2,0x93,0x9d,0x00,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xe2,0x93,0x9e,0x00,0x01,0xff,0xe2,0x93,0x9f,0x00,0x10,0x08,0x01,0xff,0xe2,
++ 0x93,0xa0,0x00,0x01,0xff,0xe2,0x93,0xa1,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xe2,0x93,0xa2,0x00,0x01,0xff,0xe2,0x93,0xa3,0x00,0x10,0x08,0x01,0xff,0xe2,
++ 0x93,0xa4,0x00,0x01,0xff,0xe2,0x93,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,
++ 0x93,0xa6,0x00,0x01,0xff,0xe2,0x93,0xa7,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0xa8,
++ 0x00,0x01,0xff,0xe2,0x93,0xa9,0x00,0x01,0x00,0xd4,0x0c,0xe3,0xc8,0x61,0xe2,0xc1,
++ 0x61,0xcf,0x06,0x04,0x00,0xe3,0xa1,0x64,0xe2,0x94,0x63,0xe1,0x2e,0x02,0xe0,0x84,
++ 0x01,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x08,0xff,0xe2,0xb0,0xb0,0x00,0x08,0xff,0xe2,0xb0,0xb1,0x00,0x10,0x08,0x08,0xff,
++ 0xe2,0xb0,0xb2,0x00,0x08,0xff,0xe2,0xb0,0xb3,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
++ 0xe2,0xb0,0xb4,0x00,0x08,0xff,0xe2,0xb0,0xb5,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,
++ 0xb6,0x00,0x08,0xff,0xe2,0xb0,0xb7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
++ 0xe2,0xb0,0xb8,0x00,0x08,0xff,0xe2,0xb0,0xb9,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,
++ 0xba,0x00,0x08,0xff,0xe2,0xb0,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb0,
++ 0xbc,0x00,0x08,0xff,0xe2,0xb0,0xbd,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,0xbe,0x00,
++ 0x08,0xff,0xe2,0xb0,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
++ 0xe2,0xb1,0x80,0x00,0x08,0xff,0xe2,0xb1,0x81,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
++ 0x82,0x00,0x08,0xff,0xe2,0xb1,0x83,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
++ 0x84,0x00,0x08,0xff,0xe2,0xb1,0x85,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x86,0x00,
++ 0x08,0xff,0xe2,0xb1,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
++ 0x88,0x00,0x08,0xff,0xe2,0xb1,0x89,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8a,0x00,
++ 0x08,0xff,0xe2,0xb1,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8c,0x00,
++ 0x08,0xff,0xe2,0xb1,0x8d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8e,0x00,0x08,0xff,
++ 0xe2,0xb1,0x8f,0x00,0x94,0x7c,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
++ 0xe2,0xb1,0x90,0x00,0x08,0xff,0xe2,0xb1,0x91,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,
++ 0x92,0x00,0x08,0xff,0xe2,0xb1,0x93,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
++ 0x94,0x00,0x08,0xff,0xe2,0xb1,0x95,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x96,0x00,
++ 0x08,0xff,0xe2,0xb1,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,
++ 0x98,0x00,0x08,0xff,0xe2,0xb1,0x99,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9a,0x00,
++ 0x08,0xff,0xe2,0xb1,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9c,0x00,
++ 0x08,0xff,0xe2,0xb1,0x9d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9e,0x00,0x00,0x00,
++ 0x08,0x00,0xcf,0x86,0xd5,0x07,0x64,0x84,0x61,0x08,0x00,0xd4,0x63,0xd3,0x32,0xd2,
++ 0x1b,0xd1,0x0c,0x10,0x08,0x09,0xff,0xe2,0xb1,0xa1,0x00,0x09,0x00,0x10,0x07,0x09,
++ 0xff,0xc9,0xab,0x00,0x09,0xff,0xe1,0xb5,0xbd,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff,
++ 0xc9,0xbd,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xa8,0x00,0xd2,
++ 0x18,0xd1,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xaa,0x00,0x10,0x04,0x09,
++ 0x00,0x09,0xff,0xe2,0xb1,0xac,0x00,0xd1,0x0b,0x10,0x04,0x09,0x00,0x0a,0xff,0xc9,
++ 0x91,0x00,0x10,0x07,0x0a,0xff,0xc9,0xb1,0x00,0x0a,0xff,0xc9,0x90,0x00,0xd3,0x27,
++ 0xd2,0x17,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xc9,0x92,0x00,0x0a,0x00,0x10,0x08,0x0a,
++ 0xff,0xe2,0xb1,0xb3,0x00,0x0a,0x00,0x91,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,
++ 0xb1,0xb6,0x00,0x09,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x07,0x0b,
++ 0xff,0xc8,0xbf,0x00,0x0b,0xff,0xc9,0x80,0x00,0xe0,0x83,0x01,0xcf,0x86,0xd5,0xc0,
++ 0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x81,0x00,
++ 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x83,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
++ 0x08,0xff,0xe2,0xb2,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x87,0x00,
++ 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x89,0x00,0x08,0x00,
++ 0x10,0x08,0x08,0xff,0xe2,0xb2,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++ 0xe2,0xb2,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8f,0x00,0x08,0x00,
++ 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x91,0x00,0x08,0x00,
++ 0x10,0x08,0x08,0xff,0xe2,0xb2,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++ 0xe2,0xb2,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x97,0x00,0x08,0x00,
++ 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x99,0x00,0x08,0x00,0x10,0x08,
++ 0x08,0xff,0xe2,0xb2,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
++ 0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x9f,0x00,0x08,0x00,0xd4,0x60,
++ 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa1,0x00,0x08,0x00,
++ 0x10,0x08,0x08,0xff,0xe2,0xb2,0xa3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++ 0xe2,0xb2,0xa5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa7,0x00,0x08,0x00,
++ 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa9,0x00,0x08,0x00,0x10,0x08,
++ 0x08,0xff,0xe2,0xb2,0xab,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
++ 0xad,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xaf,0x00,0x08,0x00,0xd3,0x30,
++ 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb1,0x00,0x08,0x00,0x10,0x08,
++ 0x08,0xff,0xe2,0xb2,0xb3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,
++ 0xb5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb7,0x00,0x08,0x00,0xd2,0x18,
++ 0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb9,0x00,0x08,0x00,0x10,0x08,0x08,0xff,
++ 0xe2,0xb2,0xbb,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbd,0x00,
++ 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbf,0x00,0x08,0x00,0xcf,0x86,0xd5,0xc0,
++ 0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x81,0x00,
++ 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x83,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,
++ 0x08,0xff,0xe2,0xb3,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x87,0x00,
++ 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x89,0x00,0x08,0x00,
++ 0x10,0x08,0x08,0xff,0xe2,0xb3,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++ 0xe2,0xb3,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8f,0x00,0x08,0x00,
++ 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x91,0x00,0x08,0x00,
++ 0x10,0x08,0x08,0xff,0xe2,0xb3,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,
++ 0xe2,0xb3,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x97,0x00,0x08,0x00,
++ 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x99,0x00,0x08,0x00,0x10,0x08,
++ 0x08,0xff,0xe2,0xb3,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,
++ 0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x9f,0x00,0x08,0x00,0xd4,0x3b,
++ 0xd3,0x1c,0x92,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa1,0x00,0x08,0x00,
++ 0x10,0x08,0x08,0xff,0xe2,0xb3,0xa3,0x00,0x08,0x00,0x08,0x00,0xd2,0x10,0x51,0x04,
++ 0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0xff,0xe2,0xb3,0xac,0x00,0xe1,0xd0,0x5e,0x10,
++ 0x04,0x0b,0x00,0x0b,0xff,0xe2,0xb3,0xae,0x00,0xe3,0xd5,0x5e,0x92,0x10,0x51,0x04,
++ 0x0b,0xe6,0x10,0x08,0x0d,0xff,0xe2,0xb3,0xb3,0x00,0x0d,0x00,0x00,0x00,0xe2,0x98,
++ 0x08,0xd1,0x0b,0xe0,0x8d,0x66,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe0,0xe1,0x6b,0xcf,
++ 0x86,0xe5,0xa7,0x05,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x0c,0xe2,0x74,0x67,0xe1,
++ 0x0b,0x67,0xcf,0x06,0x04,0x00,0xe2,0xdb,0x01,0xe1,0x26,0x01,0xd0,0x09,0xcf,0x86,
++ 0x65,0x70,0x67,0x0a,0x00,0xcf,0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,
++ 0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,
++ 0x99,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x85,0x00,0x0a,
++ 0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,
++ 0x08,0x0a,0xff,0xea,0x99,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x8b,
++ 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x8d,0x00,0x0a,0x00,0x10,
++ 0x08,0x0a,0xff,0xea,0x99,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,
++ 0x08,0x0a,0xff,0xea,0x99,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x93,
++ 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x95,0x00,0x0a,0x00,0x10,
++ 0x08,0x0a,0xff,0xea,0x99,0x97,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,
++ 0xff,0xea,0x99,0x99,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x9b,0x00,0x0a,
++ 0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a,
++ 0xff,0xea,0x99,0x9f,0x00,0x0a,0x00,0xe4,0xd9,0x66,0xd3,0x30,0xd2,0x18,0xd1,0x0c,
++ 0x10,0x08,0x0c,0xff,0xea,0x99,0xa1,0x00,0x0c,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,
++ 0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0xa5,0x00,0x0a,0x00,
++ 0x10,0x08,0x0a,0xff,0xea,0x99,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,
++ 0x0a,0xff,0xea,0x99,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0xab,0x00,
++ 0x0a,0x00,0xe1,0x88,0x66,0x10,0x08,0x0a,0xff,0xea,0x99,0xad,0x00,0x0a,0x00,0xe0,
++ 0xb1,0x66,0xcf,0x86,0x95,0xab,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,
++ 0x0a,0xff,0xea,0x9a,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x83,0x00,
++ 0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x85,0x00,0x0a,0x00,0x10,0x08,
++ 0x0a,0xff,0xea,0x9a,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,
++ 0xea,0x9a,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8b,0x00,0x0a,0x00,
++ 0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,
++ 0xea,0x9a,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,
++ 0xea,0x9a,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x93,0x00,0x0a,0x00,
++ 0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,
++ 0xea,0x9a,0x97,0x00,0x0a,0x00,0xe2,0x0e,0x66,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,
++ 0x9a,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9a,0x9b,0x00,0x10,0x00,0x0b,
++ 0x00,0xe1,0x10,0x02,0xd0,0xb9,0xcf,0x86,0xd5,0x07,0x64,0x1a,0x66,0x08,0x00,0xd4,
++ 0x58,0xd3,0x28,0xd2,0x10,0x51,0x04,0x09,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa3,
++ 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa5,0x00,0x0a,0x00,0x10,
++ 0x08,0x0a,0xff,0xea,0x9c,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,
++ 0xff,0xea,0x9c,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xab,0x00,0x0a,
++ 0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a,
++ 0xff,0xea,0x9c,0xaf,0x00,0x0a,0x00,0xd3,0x28,0xd2,0x10,0x51,0x04,0x0a,0x00,0x10,
++ 0x08,0x0a,0xff,0xea,0x9c,0xb3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
++ 0x9c,0xb5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb7,0x00,0x0a,0x00,0xd2,
++ 0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb9,0x00,0x0a,0x00,0x10,0x08,0x0a,
++ 0xff,0xea,0x9c,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbd,
++ 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbf,0x00,0x0a,0x00,0xcf,0x86,0xd5,
++ 0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x81,
++ 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,
++ 0x08,0x0a,0xff,0xea,0x9d,0x85,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x87,
++ 0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x89,0x00,0x0a,
++ 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
++ 0xff,0xea,0x9d,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8f,0x00,0x0a,
++ 0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x91,0x00,0x0a,
++ 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
++ 0xff,0xea,0x9d,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x97,0x00,0x0a,
++ 0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x99,0x00,0x0a,0x00,0x10,
++ 0x08,0x0a,0xff,0xea,0x9d,0x9b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
++ 0x9d,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9f,0x00,0x0a,0x00,0xd4,
++ 0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa1,0x00,0x0a,
++ 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,
++ 0xff,0xea,0x9d,0xa5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa7,0x00,0x0a,
++ 0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa9,0x00,0x0a,0x00,0x10,
++ 0x08,0x0a,0xff,0xea,0x9d,0xab,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,
++ 0x9d,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xaf,0x00,0x0a,0x00,0x53,
++ 0x04,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xba,
++ 0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xbc,0x00,0xd1,0x0c,0x10,0x04,0x0a,
++ 0x00,0x0a,0xff,0xe1,0xb5,0xb9,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xbf,0x00,0x0a,
++ 0x00,0xe0,0x71,0x01,0xcf,0x86,0xd5,0xa6,0xd4,0x4e,0xd3,0x30,0xd2,0x18,0xd1,0x0c,
++ 0x10,0x08,0x0a,0xff,0xea,0x9e,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9e,
++ 0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x85,0x00,0x0a,0x00,
++ 0x10,0x08,0x0a,0xff,0xea,0x9e,0x87,0x00,0x0a,0x00,0xd2,0x10,0x51,0x04,0x0a,0x00,
++ 0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9e,0x8c,0x00,0xe1,0x16,0x64,0x10,0x04,0x0a,
++ 0x00,0x0c,0xff,0xc9,0xa5,0x00,0xd3,0x28,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff,
++ 0xea,0x9e,0x91,0x00,0x0c,0x00,0x10,0x08,0x0d,0xff,0xea,0x9e,0x93,0x00,0x0d,0x00,
++ 0x51,0x04,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x97,0x00,0x10,0x00,0xd2,0x18,
++ 0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff,
++ 0xea,0x9e,0x9b,0x00,0x10,0x00,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x9d,0x00,
++ 0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x9f,0x00,0x10,0x00,0xd4,0x63,0xd3,0x30,
++ 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa1,0x00,0x0c,0x00,0x10,0x08,
++ 0x0c,0xff,0xea,0x9e,0xa3,0x00,0x0c,0x00,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,
++ 0xa5,0x00,0x0c,0x00,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa7,0x00,0x0c,0x00,0xd2,0x1a,
++ 0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa9,0x00,0x0c,0x00,0x10,0x07,0x0d,0xff,
++ 0xc9,0xa6,0x00,0x10,0xff,0xc9,0x9c,0x00,0xd1,0x0e,0x10,0x07,0x10,0xff,0xc9,0xa1,
++ 0x00,0x10,0xff,0xc9,0xac,0x00,0x10,0x07,0x12,0xff,0xc9,0xaa,0x00,0x14,0x00,0xd3,
++ 0x35,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x10,0xff,0xca,0x9e,0x00,0x10,0xff,0xca,0x87,
++ 0x00,0x10,0x07,0x11,0xff,0xca,0x9d,0x00,0x11,0xff,0xea,0xad,0x93,0x00,0xd1,0x0c,
++ 0x10,0x08,0x11,0xff,0xea,0x9e,0xb5,0x00,0x11,0x00,0x10,0x08,0x11,0xff,0xea,0x9e,
++ 0xb7,0x00,0x11,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x14,0xff,0xea,0x9e,0xb9,0x00,
++ 0x14,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbb,0x00,0x15,0x00,0xd1,0x0c,0x10,0x08,
++ 0x15,0xff,0xea,0x9e,0xbd,0x00,0x15,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbf,0x00,
++ 0x15,0x00,0xcf,0x86,0xe5,0x50,0x63,0x94,0x2f,0x93,0x2b,0xd2,0x10,0x51,0x04,0x00,
++ 0x00,0x10,0x08,0x15,0xff,0xea,0x9f,0x83,0x00,0x15,0x00,0xd1,0x0f,0x10,0x08,0x15,
++ 0xff,0xea,0x9e,0x94,0x00,0x15,0xff,0xca,0x82,0x00,0x10,0x08,0x15,0xff,0xe1,0xb6,
++ 0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0x30,0x66,0xd3,0x1d,0xe2,0xd7,0x63,
++ 0xe1,0x86,0x63,0xe0,0x73,0x63,0xcf,0x86,0xe5,0x54,0x63,0x94,0x0b,0x93,0x07,0x62,
++ 0x3f,0x63,0x08,0x00,0x08,0x00,0x08,0x00,0xd2,0x0f,0xe1,0xd6,0x64,0xe0,0xa3,0x64,
++ 0xcf,0x86,0x65,0x88,0x64,0x0a,0x00,0xd1,0xab,0xd0,0x1a,0xcf,0x86,0xe5,0x93,0x65,
++ 0xe4,0x76,0x65,0xe3,0x5d,0x65,0xe2,0x50,0x65,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,
++ 0x00,0x0c,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x0b,0x93,0x07,0x62,0xa3,0x65,
++ 0x11,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,
++ 0xa0,0x00,0x11,0xff,0xe1,0x8e,0xa1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa2,0x00,
++ 0x11,0xff,0xe1,0x8e,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa4,0x00,
++ 0x11,0xff,0xe1,0x8e,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa6,0x00,0x11,0xff,
++ 0xe1,0x8e,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa8,0x00,
++ 0x11,0xff,0xe1,0x8e,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xaa,0x00,0x11,0xff,
++ 0xe1,0x8e,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xac,0x00,0x11,0xff,
++ 0xe1,0x8e,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xae,0x00,0x11,0xff,0xe1,0x8e,
++ 0xaf,0x00,0xe0,0x2e,0x65,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,
++ 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb0,0x00,0x11,0xff,0xe1,0x8e,0xb1,0x00,
++ 0x10,0x08,0x11,0xff,0xe1,0x8e,0xb2,0x00,0x11,0xff,0xe1,0x8e,0xb3,0x00,0xd1,0x10,
++ 0x10,0x08,0x11,0xff,0xe1,0x8e,0xb4,0x00,0x11,0xff,0xe1,0x8e,0xb5,0x00,0x10,0x08,
++ 0x11,0xff,0xe1,0x8e,0xb6,0x00,0x11,0xff,0xe1,0x8e,0xb7,0x00,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x11,0xff,0xe1,0x8e,0xb8,0x00,0x11,0xff,0xe1,0x8e,0xb9,0x00,0x10,0x08,
++ 0x11,0xff,0xe1,0x8e,0xba,0x00,0x11,0xff,0xe1,0x8e,0xbb,0x00,0xd1,0x10,0x10,0x08,
++ 0x11,0xff,0xe1,0x8e,0xbc,0x00,0x11,0xff,0xe1,0x8e,0xbd,0x00,0x10,0x08,0x11,0xff,
++ 0xe1,0x8e,0xbe,0x00,0x11,0xff,0xe1,0x8e,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x11,0xff,0xe1,0x8f,0x80,0x00,0x11,0xff,0xe1,0x8f,0x81,0x00,0x10,0x08,
++ 0x11,0xff,0xe1,0x8f,0x82,0x00,0x11,0xff,0xe1,0x8f,0x83,0x00,0xd1,0x10,0x10,0x08,
++ 0x11,0xff,0xe1,0x8f,0x84,0x00,0x11,0xff,0xe1,0x8f,0x85,0x00,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0x86,0x00,0x11,0xff,0xe1,0x8f,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x11,0xff,0xe1,0x8f,0x88,0x00,0x11,0xff,0xe1,0x8f,0x89,0x00,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0x8a,0x00,0x11,0xff,0xe1,0x8f,0x8b,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0x8c,0x00,0x11,0xff,0xe1,0x8f,0x8d,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
++ 0x8e,0x00,0x11,0xff,0xe1,0x8f,0x8f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x11,0xff,0xe1,0x8f,0x90,0x00,0x11,0xff,0xe1,0x8f,0x91,0x00,0x10,0x08,
++ 0x11,0xff,0xe1,0x8f,0x92,0x00,0x11,0xff,0xe1,0x8f,0x93,0x00,0xd1,0x10,0x10,0x08,
++ 0x11,0xff,0xe1,0x8f,0x94,0x00,0x11,0xff,0xe1,0x8f,0x95,0x00,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0x96,0x00,0x11,0xff,0xe1,0x8f,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x11,0xff,0xe1,0x8f,0x98,0x00,0x11,0xff,0xe1,0x8f,0x99,0x00,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0x9a,0x00,0x11,0xff,0xe1,0x8f,0x9b,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0x9c,0x00,0x11,0xff,0xe1,0x8f,0x9d,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
++ 0x9e,0x00,0x11,0xff,0xe1,0x8f,0x9f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x11,0xff,0xe1,0x8f,0xa0,0x00,0x11,0xff,0xe1,0x8f,0xa1,0x00,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0xa2,0x00,0x11,0xff,0xe1,0x8f,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0xa4,0x00,0x11,0xff,0xe1,0x8f,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
++ 0xa6,0x00,0x11,0xff,0xe1,0x8f,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,
++ 0xe1,0x8f,0xa8,0x00,0x11,0xff,0xe1,0x8f,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,
++ 0xaa,0x00,0x11,0xff,0xe1,0x8f,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,
++ 0xac,0x00,0x11,0xff,0xe1,0x8f,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,0xae,0x00,
++ 0x11,0xff,0xe1,0x8f,0xaf,0x00,0xd1,0x0c,0xe0,0x67,0x63,0xcf,0x86,0xcf,0x06,0x02,
++ 0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,
++ 0x01,0x00,0xd4,0xae,0xd3,0x09,0xe2,0xd0,0x63,0xcf,0x06,0x01,0x00,0xd2,0x27,0xe1,
++ 0x9b,0x6f,0xe0,0xa2,0x6d,0xcf,0x86,0xe5,0xbb,0x6c,0xe4,0x4a,0x6c,0xe3,0x15,0x6c,
++ 0xe2,0xf4,0x6b,0xe1,0xe3,0x6b,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00,0x01,0xff,
++ 0xe5,0xba,0xa6,0x00,0xe1,0xf0,0x73,0xe0,0x64,0x73,0xcf,0x86,0xe5,0x9e,0x72,0xd4,
++ 0x3b,0x93,0x37,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x01,0xff,0x66,0x66,0x00,0x01,0xff,
++ 0x66,0x69,0x00,0x10,0x07,0x01,0xff,0x66,0x6c,0x00,0x01,0xff,0x66,0x66,0x69,0x00,
++ 0xd1,0x0f,0x10,0x08,0x01,0xff,0x66,0x66,0x6c,0x00,0x01,0xff,0x73,0x74,0x00,0x10,
++ 0x07,0x01,0xff,0x73,0x74,0x00,0x00,0x00,0x00,0x00,0xe3,0x44,0x72,0xd2,0x11,0x51,
++ 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xb6,0x00,0xd1,0x12,
++ 0x10,0x09,0x01,0xff,0xd5,0xb4,0xd5,0xa5,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xab,0x00,
++ 0x10,0x09,0x01,0xff,0xd5,0xbe,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xad,0x00,
++ 0xd3,0x09,0xe2,0xbc,0x73,0xcf,0x06,0x01,0x00,0xd2,0x12,0xe1,0xab,0x74,0xe0,0x3c,
++ 0x74,0xcf,0x86,0xe5,0x19,0x74,0x64,0x08,0x74,0x06,0x00,0xe1,0x11,0x75,0xe0,0xde,
++ 0x74,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
++ 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x7c,0xd3,0x3c,0xd2,
++ 0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xef,0xbd,0x81,0x00,0x10,0x08,0x01,
++ 0xff,0xef,0xbd,0x82,0x00,0x01,0xff,0xef,0xbd,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xef,0xbd,0x84,0x00,0x01,0xff,0xef,0xbd,0x85,0x00,0x10,0x08,0x01,0xff,0xef,
++ 0xbd,0x86,0x00,0x01,0xff,0xef,0xbd,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xef,0xbd,0x88,0x00,0x01,0xff,0xef,0xbd,0x89,0x00,0x10,0x08,0x01,0xff,0xef,
++ 0xbd,0x8a,0x00,0x01,0xff,0xef,0xbd,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xef,
++ 0xbd,0x8c,0x00,0x01,0xff,0xef,0xbd,0x8d,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x8e,
++ 0x00,0x01,0xff,0xef,0xbd,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xef,0xbd,0x90,0x00,0x01,0xff,0xef,0xbd,0x91,0x00,0x10,0x08,0x01,0xff,0xef,
++ 0xbd,0x92,0x00,0x01,0xff,0xef,0xbd,0x93,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xef,
++ 0xbd,0x94,0x00,0x01,0xff,0xef,0xbd,0x95,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x96,
++ 0x00,0x01,0xff,0xef,0xbd,0x97,0x00,0x92,0x1c,0xd1,0x10,0x10,0x08,0x01,0xff,0xef,
++ 0xbd,0x98,0x00,0x01,0xff,0xef,0xbd,0x99,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x9a,
++ 0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0xd9,0xb2,0xe1,0xc3,0xaf,0xe0,0x40,0xae,0xcf,
++ 0x86,0xe5,0xe4,0x9a,0xc4,0xe3,0xc1,0x07,0xe2,0x62,0x06,0xe1,0x79,0x85,0xe0,0x09,
++ 0x05,0xcf,0x86,0xe5,0xfb,0x02,0xd4,0x1c,0xe3,0xe7,0x75,0xe2,0x3e,0x75,0xe1,0x19,
++ 0x75,0xe0,0xf2,0x74,0xcf,0x86,0xe5,0xbf,0x74,0x94,0x07,0x63,0xaa,0x74,0x07,0x00,
++ 0x07,0x00,0xe3,0x93,0x77,0xe2,0x58,0x77,0xe1,0x77,0x01,0xe0,0xf0,0x76,0xcf,0x86,
++ 0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,
++ 0x90,0x90,0xa8,0x00,0x05,0xff,0xf0,0x90,0x90,0xa9,0x00,0x10,0x09,0x05,0xff,0xf0,
++ 0x90,0x90,0xaa,0x00,0x05,0xff,0xf0,0x90,0x90,0xab,0x00,0xd1,0x12,0x10,0x09,0x05,
++ 0xff,0xf0,0x90,0x90,0xac,0x00,0x05,0xff,0xf0,0x90,0x90,0xad,0x00,0x10,0x09,0x05,
++ 0xff,0xf0,0x90,0x90,0xae,0x00,0x05,0xff,0xf0,0x90,0x90,0xaf,0x00,0xd2,0x24,0xd1,
++ 0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb0,0x00,0x05,0xff,0xf0,0x90,0x90,0xb1,
++ 0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb2,0x00,0x05,0xff,0xf0,0x90,0x90,0xb3,
++ 0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb4,0x00,0x05,0xff,0xf0,0x90,
++ 0x90,0xb5,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb6,0x00,0x05,0xff,0xf0,0x90,
++ 0x90,0xb7,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,
++ 0xb8,0x00,0x05,0xff,0xf0,0x90,0x90,0xb9,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,
++ 0xba,0x00,0x05,0xff,0xf0,0x90,0x90,0xbb,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,
++ 0x90,0x90,0xbc,0x00,0x05,0xff,0xf0,0x90,0x90,0xbd,0x00,0x10,0x09,0x05,0xff,0xf0,
++ 0x90,0x90,0xbe,0x00,0x05,0xff,0xf0,0x90,0x90,0xbf,0x00,0xd2,0x24,0xd1,0x12,0x10,
++ 0x09,0x05,0xff,0xf0,0x90,0x91,0x80,0x00,0x05,0xff,0xf0,0x90,0x91,0x81,0x00,0x10,
++ 0x09,0x05,0xff,0xf0,0x90,0x91,0x82,0x00,0x05,0xff,0xf0,0x90,0x91,0x83,0x00,0xd1,
++ 0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x84,0x00,0x05,0xff,0xf0,0x90,0x91,0x85,
++ 0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x86,0x00,0x05,0xff,0xf0,0x90,0x91,0x87,
++ 0x00,0x94,0x4c,0x93,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,
++ 0x88,0x00,0x05,0xff,0xf0,0x90,0x91,0x89,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,
++ 0x8a,0x00,0x05,0xff,0xf0,0x90,0x91,0x8b,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,
++ 0x90,0x91,0x8c,0x00,0x05,0xff,0xf0,0x90,0x91,0x8d,0x00,0x10,0x09,0x07,0xff,0xf0,
++ 0x90,0x91,0x8e,0x00,0x07,0xff,0xf0,0x90,0x91,0x8f,0x00,0x05,0x00,0x05,0x00,0xd0,
++ 0xa0,0xcf,0x86,0xd5,0x07,0x64,0x98,0x75,0x07,0x00,0xd4,0x07,0x63,0xa5,0x75,0x07,
++ 0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0x98,0x00,
++ 0x12,0xff,0xf0,0x90,0x93,0x99,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0x9a,0x00,
++ 0x12,0xff,0xf0,0x90,0x93,0x9b,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,
++ 0x9c,0x00,0x12,0xff,0xf0,0x90,0x93,0x9d,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,
++ 0x9e,0x00,0x12,0xff,0xf0,0x90,0x93,0x9f,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,
++ 0xff,0xf0,0x90,0x93,0xa0,0x00,0x12,0xff,0xf0,0x90,0x93,0xa1,0x00,0x10,0x09,0x12,
++ 0xff,0xf0,0x90,0x93,0xa2,0x00,0x12,0xff,0xf0,0x90,0x93,0xa3,0x00,0xd1,0x12,0x10,
++ 0x09,0x12,0xff,0xf0,0x90,0x93,0xa4,0x00,0x12,0xff,0xf0,0x90,0x93,0xa5,0x00,0x10,
++ 0x09,0x12,0xff,0xf0,0x90,0x93,0xa6,0x00,0x12,0xff,0xf0,0x90,0x93,0xa7,0x00,0xcf,
++ 0x86,0xe5,0x2e,0x75,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,
++ 0xf0,0x90,0x93,0xa8,0x00,0x12,0xff,0xf0,0x90,0x93,0xa9,0x00,0x10,0x09,0x12,0xff,
++ 0xf0,0x90,0x93,0xaa,0x00,0x12,0xff,0xf0,0x90,0x93,0xab,0x00,0xd1,0x12,0x10,0x09,
++ 0x12,0xff,0xf0,0x90,0x93,0xac,0x00,0x12,0xff,0xf0,0x90,0x93,0xad,0x00,0x10,0x09,
++ 0x12,0xff,0xf0,0x90,0x93,0xae,0x00,0x12,0xff,0xf0,0x90,0x93,0xaf,0x00,0xd2,0x24,
++ 0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb0,0x00,0x12,0xff,0xf0,0x90,0x93,
++ 0xb1,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb2,0x00,0x12,0xff,0xf0,0x90,0x93,
++ 0xb3,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb4,0x00,0x12,0xff,0xf0,
++ 0x90,0x93,0xb5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb6,0x00,0x12,0xff,0xf0,
++ 0x90,0x93,0xb7,0x00,0x93,0x28,0x92,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,
++ 0x93,0xb8,0x00,0x12,0xff,0xf0,0x90,0x93,0xb9,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,
++ 0x93,0xba,0x00,0x12,0xff,0xf0,0x90,0x93,0xbb,0x00,0x00,0x00,0x12,0x00,0xd4,0x1f,
++ 0xe3,0x47,0x76,0xe2,0xd2,0x75,0xe1,0x71,0x75,0xe0,0x52,0x75,0xcf,0x86,0xe5,0x1f,
++ 0x75,0x94,0x0a,0xe3,0x0a,0x75,0x62,0x01,0x75,0x07,0x00,0x07,0x00,0xe3,0x46,0x78,
++ 0xe2,0x17,0x78,0xd1,0x09,0xe0,0xb4,0x77,0xcf,0x06,0x0b,0x00,0xe0,0xe7,0x77,0xcf,
++ 0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,
++ 0xf0,0x90,0xb3,0x80,0x00,0x11,0xff,0xf0,0x90,0xb3,0x81,0x00,0x10,0x09,0x11,0xff,
++ 0xf0,0x90,0xb3,0x82,0x00,0x11,0xff,0xf0,0x90,0xb3,0x83,0x00,0xd1,0x12,0x10,0x09,
++ 0x11,0xff,0xf0,0x90,0xb3,0x84,0x00,0x11,0xff,0xf0,0x90,0xb3,0x85,0x00,0x10,0x09,
++ 0x11,0xff,0xf0,0x90,0xb3,0x86,0x00,0x11,0xff,0xf0,0x90,0xb3,0x87,0x00,0xd2,0x24,
++ 0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x88,0x00,0x11,0xff,0xf0,0x90,0xb3,
++ 0x89,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8a,0x00,0x11,0xff,0xf0,0x90,0xb3,
++ 0x8b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8c,0x00,0x11,0xff,0xf0,
++ 0x90,0xb3,0x8d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8e,0x00,0x11,0xff,0xf0,
++ 0x90,0xb3,0x8f,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,
++ 0xb3,0x90,0x00,0x11,0xff,0xf0,0x90,0xb3,0x91,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,
++ 0xb3,0x92,0x00,0x11,0xff,0xf0,0x90,0xb3,0x93,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,
++ 0xf0,0x90,0xb3,0x94,0x00,0x11,0xff,0xf0,0x90,0xb3,0x95,0x00,0x10,0x09,0x11,0xff,
++ 0xf0,0x90,0xb3,0x96,0x00,0x11,0xff,0xf0,0x90,0xb3,0x97,0x00,0xd2,0x24,0xd1,0x12,
++ 0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x98,0x00,0x11,0xff,0xf0,0x90,0xb3,0x99,0x00,
++ 0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9a,0x00,0x11,0xff,0xf0,0x90,0xb3,0x9b,0x00,
++ 0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9c,0x00,0x11,0xff,0xf0,0x90,0xb3,
++ 0x9d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9e,0x00,0x11,0xff,0xf0,0x90,0xb3,
++ 0x9f,0x00,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,
++ 0xb3,0xa0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa1,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,
++ 0xb3,0xa2,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa3,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,
++ 0xf0,0x90,0xb3,0xa4,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa5,0x00,0x10,0x09,0x11,0xff,
++ 0xf0,0x90,0xb3,0xa6,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa7,0x00,0xd2,0x24,0xd1,0x12,
++ 0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xa8,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa9,0x00,
++ 0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xaa,0x00,0x11,0xff,0xf0,0x90,0xb3,0xab,0x00,
++ 0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xac,0x00,0x11,0xff,0xf0,0x90,0xb3,
++ 0xad,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xae,0x00,0x11,0xff,0xf0,0x90,0xb3,
++ 0xaf,0x00,0x93,0x23,0x92,0x1f,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xb0,
++ 0x00,0x11,0xff,0xf0,0x90,0xb3,0xb1,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xb2,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x15,0xe4,0xf9,0x7a,0xe3,0x03,
++ 0x79,0xe2,0xfc,0x77,0xe1,0x4c,0x77,0xe0,0x05,0x77,0xcf,0x06,0x0c,0x00,0xe4,0x53,
++ 0x7e,0xe3,0xac,0x7d,0xe2,0x55,0x7d,0xd1,0x0c,0xe0,0x1a,0x7d,0xcf,0x86,0x65,0xfb,
++ 0x7c,0x14,0x00,0xe0,0x1e,0x7d,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x90,0xd3,0x48,
++ 0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x80,0x00,0x10,0xff,0xf0,
++ 0x91,0xa3,0x81,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x82,0x00,0x10,0xff,0xf0,
++ 0x91,0xa3,0x83,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x84,0x00,0x10,
++ 0xff,0xf0,0x91,0xa3,0x85,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x86,0x00,0x10,
++ 0xff,0xf0,0x91,0xa3,0x87,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,
++ 0xa3,0x88,0x00,0x10,0xff,0xf0,0x91,0xa3,0x89,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,
++ 0xa3,0x8a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8b,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,
++ 0xf0,0x91,0xa3,0x8c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8d,0x00,0x10,0x09,0x10,0xff,
++ 0xf0,0x91,0xa3,0x8e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8f,0x00,0xd3,0x48,0xd2,0x24,
++ 0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x90,0x00,0x10,0xff,0xf0,0x91,0xa3,
++ 0x91,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x92,0x00,0x10,0xff,0xf0,0x91,0xa3,
++ 0x93,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x94,0x00,0x10,0xff,0xf0,
++ 0x91,0xa3,0x95,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x96,0x00,0x10,0xff,0xf0,
++ 0x91,0xa3,0x97,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x98,
++ 0x00,0x10,0xff,0xf0,0x91,0xa3,0x99,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x9a,
++ 0x00,0x10,0xff,0xf0,0x91,0xa3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,
++ 0xa3,0x9c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9d,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,
++ 0xa3,0x9e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9f,0x00,0xd1,0x11,0xe0,0x7a,0x80,0xcf,
++ 0x86,0xe5,0x71,0x80,0xe4,0x3a,0x80,0xcf,0x06,0x00,0x00,0xe0,0x43,0x82,0xcf,0x86,
++ 0xd5,0x06,0xcf,0x06,0x00,0x00,0xd4,0x09,0xe3,0x78,0x80,0xcf,0x06,0x0c,0x00,0xd3,
++ 0x06,0xcf,0x06,0x00,0x00,0xe2,0xa3,0x81,0xe1,0x7e,0x81,0xd0,0x06,0xcf,0x06,0x00,
++ 0x00,0xcf,0x86,0xa5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,
++ 0x14,0xff,0xf0,0x96,0xb9,0xa0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa1,0x00,0x10,0x09,
++ 0x14,0xff,0xf0,0x96,0xb9,0xa2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa3,0x00,0xd1,0x12,
++ 0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa5,0x00,
++ 0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa7,0x00,
++ 0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa8,0x00,0x14,0xff,0xf0,
++ 0x96,0xb9,0xa9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xaa,0x00,0x14,0xff,0xf0,
++ 0x96,0xb9,0xab,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xac,0x00,0x14,
++ 0xff,0xf0,0x96,0xb9,0xad,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xae,0x00,0x14,
++ 0xff,0xf0,0x96,0xb9,0xaf,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,
++ 0xf0,0x96,0xb9,0xb0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb1,0x00,0x10,0x09,0x14,0xff,
++ 0xf0,0x96,0xb9,0xb2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb3,0x00,0xd1,0x12,0x10,0x09,
++ 0x14,0xff,0xf0,0x96,0xb9,0xb4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb5,0x00,0x10,0x09,
++ 0x14,0xff,0xf0,0x96,0xb9,0xb6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb7,0x00,0xd2,0x24,
++ 0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb8,0x00,0x14,0xff,0xf0,0x96,0xb9,
++ 0xb9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xba,0x00,0x14,0xff,0xf0,0x96,0xb9,
++ 0xbb,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbc,0x00,0x14,0xff,0xf0,
++ 0x96,0xb9,0xbd,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbe,0x00,0x14,0xff,0xf0,
++ 0x96,0xb9,0xbf,0x00,0x14,0x00,0xd2,0x14,0xe1,0x8d,0x81,0xe0,0x84,0x81,0xcf,0x86,
++ 0xe5,0x45,0x81,0xe4,0x02,0x81,0xcf,0x06,0x12,0x00,0xd1,0x0b,0xe0,0xb8,0x82,0xcf,
++ 0x86,0xcf,0x06,0x00,0x00,0xe0,0xf8,0x8a,0xcf,0x86,0xd5,0x22,0xe4,0x33,0x88,0xe3,
++ 0xf6,0x87,0xe2,0x9b,0x87,0xe1,0x94,0x87,0xe0,0x8d,0x87,0xcf,0x86,0xe5,0x5e,0x87,
++ 0xe4,0x45,0x87,0x93,0x07,0x62,0x34,0x87,0x12,0xe6,0x12,0xe6,0xe4,0x99,0x88,0xe3,
++ 0x92,0x88,0xd2,0x09,0xe1,0x1b,0x88,0xcf,0x06,0x10,0x00,0xe1,0x82,0x88,0xe0,0x4f,
++ 0x88,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,
++ 0x12,0xff,0xf0,0x9e,0xa4,0xa2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa3,0x00,0x10,0x09,
++ 0x12,0xff,0xf0,0x9e,0xa4,0xa4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa5,0x00,0xd1,0x12,
++ 0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa7,0x00,
++ 0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa9,0x00,
++ 0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xaa,0x00,0x12,0xff,0xf0,
++ 0x9e,0xa4,0xab,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xac,0x00,0x12,0xff,0xf0,
++ 0x9e,0xa4,0xad,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xae,0x00,0x12,
++ 0xff,0xf0,0x9e,0xa4,0xaf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb0,0x00,0x12,
++ 0xff,0xf0,0x9e,0xa4,0xb1,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,
++ 0xf0,0x9e,0xa4,0xb2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb3,0x00,0x10,0x09,0x12,0xff,
++ 0xf0,0x9e,0xa4,0xb4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb5,0x00,0xd1,0x12,0x10,0x09,
++ 0x12,0xff,0xf0,0x9e,0xa4,0xb6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb7,0x00,0x10,0x09,
++ 0x12,0xff,0xf0,0x9e,0xa4,0xb8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb9,0x00,0xd2,0x24,
++ 0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xba,0x00,0x12,0xff,0xf0,0x9e,0xa4,
++ 0xbb,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbc,0x00,0x12,0xff,0xf0,0x9e,0xa4,
++ 0xbd,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbe,0x00,0x12,0xff,0xf0,
++ 0x9e,0xa4,0xbf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa5,0x80,0x00,0x12,0xff,0xf0,
++ 0x9e,0xa5,0x81,0x00,0x94,0x1e,0x93,0x1a,0x92,0x16,0x91,0x12,0x10,0x09,0x12,0xff,
++ 0xf0,0x9e,0xa5,0x82,0x00,0x12,0xff,0xf0,0x9e,0xa5,0x83,0x00,0x12,0x00,0x12,0x00,
++ 0x12,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ /* nfdi_c0100 */
++ 0x57,0x04,0x01,0x00,0xc6,0xe5,0x91,0x13,0xe4,0x27,0x0c,0xe3,0x61,0x07,0xe2,0xda,
++ 0x01,0xc1,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0xe4,0xd4,0x7c,0xd3,0x3c,
++ 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x80,0x00,0x01,0xff,0x41,0xcc,
++ 0x81,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x82,0x00,0x01,0xff,0x41,0xcc,0x83,0x00,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x88,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0x43,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x45,0xcc,0x80,0x00,0x01,0xff,0x45,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,
++ 0x45,0xcc,0x82,0x00,0x01,0xff,0x45,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x49,0xcc,0x80,0x00,0x01,0xff,0x49,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,
++ 0x82,0x00,0x01,0xff,0x49,0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04,
++ 0x01,0x00,0x01,0xff,0x4e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x80,0x00,
++ 0x01,0xff,0x4f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x82,0x00,
++ 0x01,0xff,0x4f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x88,0x00,0x01,0x00,
++ 0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x55,0xcc,0x80,0x00,0x10,0x08,
++ 0x01,0xff,0x55,0xcc,0x81,0x00,0x01,0xff,0x55,0xcc,0x82,0x00,0x91,0x10,0x10,0x08,
++ 0x01,0xff,0x55,0xcc,0x88,0x00,0x01,0xff,0x59,0xcc,0x81,0x00,0x01,0x00,0xd4,0x7c,
++ 0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff,
++ 0x61,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,
++ 0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc,
++ 0x8a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x65,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,
++ 0x01,0xff,0x65,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x69,0xcc,0x80,0x00,0x01,0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,
++ 0x69,0xcc,0x82,0x00,0x01,0xff,0x69,0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,
++ 0x80,0x00,0x01,0xff,0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,
++ 0x82,0x00,0x01,0xff,0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,
++ 0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x75,0xcc,0x80,0x00,
++ 0x10,0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x04,
++ 0x01,0x00,0x01,0xff,0x79,0xcc,0x88,0x00,0xe1,0x9a,0x03,0xe0,0xd3,0x01,0xcf,0x86,
++ 0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,
++ 0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x86,0x00,
++ 0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa8,0x00,
++ 0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x43,0xcc,0x81,0x00,0x01,0xff,
++ 0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x82,0x00,
++ 0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x43,0xcc,0x87,0x00,0x01,0xff,
++ 0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x8c,0x00,0x01,0xff,
++ 0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x8c,0x00,0x01,0xff,0x64,0xcc,
++ 0x8c,0x00,0xd3,0x34,0xd2,0x14,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,
++ 0x84,0x00,0x01,0xff,0x65,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,
++ 0x86,0x00,0x01,0xff,0x65,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x87,0x00,
++ 0x01,0xff,0x65,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,
++ 0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x8c,0x00,
++ 0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x82,0x00,
++ 0x01,0xff,0x67,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0x86,0x00,0x01,0xff,
++ 0x67,0xcc,0x86,0x00,0xd4,0x74,0xd3,0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x47,0xcc,0x87,0x00,0x01,0xff,0x67,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,
++ 0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,
++ 0x82,0x00,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x49,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,
++ 0x49,0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x49,0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,
++ 0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x30,0xd2,0x10,0x91,0x0c,0x10,0x08,
++ 0x01,0xff,0x49,0xcc,0x87,0x00,0x01,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x4a,0xcc,0x82,0x00,0x01,0xff,0x6a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,
++ 0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0x4c,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff,
++ 0x4c,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff,
++ 0x4c,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x01,0x00,0xcf,0x86,
++ 0xd5,0xd4,0xd4,0x60,0xd3,0x30,0xd2,0x10,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0x4e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00,
++ 0x01,0xff,0x4e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff,
++ 0x4e,0xcc,0x8c,0x00,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00,
++ 0x01,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x84,0x00,0x01,0xff,
++ 0x6f,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,
++ 0x86,0x00,0xd3,0x34,0xd2,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8b,0x00,
++ 0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,
++ 0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa7,0x00,
++ 0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,
++ 0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0x81,0x00,
++ 0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x82,0x00,
++ 0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0xa7,0x00,0x01,0xff,
++ 0x73,0xcc,0xa7,0x00,0xd4,0x74,0xd3,0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x53,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,
++ 0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,
++ 0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x55,0xcc,0x83,0x00,0x01,0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,
++ 0x55,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x55,0xcc,0x86,0x00,0x01,0xff,0x75,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,
++ 0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x55,0xcc,0x8b,0x00,0x01,0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,
++ 0x55,0xcc,0xa8,0x00,0x01,0xff,0x75,0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x57,0xcc,0x82,0x00,0x01,0xff,0x77,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,
++ 0x82,0x00,0x01,0xff,0x79,0xcc,0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x59,0xcc,0x88,0x00,0x01,0xff,0x5a,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,
++ 0x81,0x00,0x01,0xff,0x5a,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,
++ 0x87,0x00,0x01,0xff,0x5a,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,
++ 0x01,0x00,0xd0,0x4a,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x2c,0xd3,0x18,0x92,0x14,
++ 0x91,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,
+ 0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+- 0x04,0x00,0xd3,0x19,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
+- 0xdb,0x92,0xd9,0x94,0x00,0x11,0x04,0x01,0x00,0x01,0xe6,0x52,0x04,0x01,0xe6,0xd1,
+- 0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0xd4,0x38,0xd3,
+- 0x1c,0xd2,0x0c,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6,0x01,0xdc,0xd1,0x08,0x10,
+- 0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0xd2,0x10,0xd1,0x08,0x10,
+- 0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0xdc,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,
+- 0xe6,0x01,0xdc,0x07,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x04,
+- 0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00,0xd1,0xc8,0xd0,0x76,0xcf,
+- 0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,
+- 0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,
+- 0x00,0x04,0x24,0x04,0x00,0x04,0x00,0x04,0x00,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,
+- 0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07,0x00,0x07,0x00,0xd3,0x1c,0xd2,
+- 0x0c,0x91,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,
+- 0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x0c,0x51,0x04,0x04,0xdc,0x10,
+- 0x04,0x04,0xe6,0x04,0xdc,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,
+- 0xdc,0x04,0xe6,0xcf,0x86,0xd5,0x3c,0x94,0x38,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x04,
+- 0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,
+- 0x04,0x04,0xdc,0x04,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,
+- 0x04,0x04,0xe6,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x08,
+- 0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0a,
+- 0x00,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,
+- 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x14,0x53,0x04,0x09,0x00,0x92,0x0c,0x51,
+- 0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xe6,0x09,0xe6,0xd3,0x10,0x92,0x0c,0x51,
+- 0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,
+- 0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x14,0xdc,0x14,
+- 0x00,0xe4,0xf8,0x57,0xe3,0x45,0x3f,0xe2,0xf4,0x3e,0xe1,0xc7,0x2c,0xe0,0x21,0x10,
+- 0xcf,0x86,0xc5,0xe4,0x80,0x08,0xe3,0xcb,0x03,0xe2,0x61,0x01,0xd1,0x94,0xd0,0x5a,
+- 0xcf,0x86,0xd5,0x20,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04,
+- 0x0b,0x00,0x0b,0xe6,0x92,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04,0x0b,0x00,0x0b,0xe6,
+- 0x0b,0xe6,0xd4,0x24,0xd3,0x10,0x52,0x04,0x0b,0xe6,0x91,0x08,0x10,0x04,0x0b,0x00,
+- 0x0b,0xe6,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6,
+- 0x11,0x04,0x0b,0xe6,0x00,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,
+- 0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x86,0xd5,0x20,0x54,0x04,0x0c,0x00,
+- 0x53,0x04,0x0c,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x0c,0xdc,0x0c,0xdc,
+- 0x51,0x04,0x00,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x13,0x00,
+- 0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0xd0,0x4a,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x20,0xd3,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x0d,0x00,0x10,0x00,0x0d,0x00,0x0d,0x00,0x52,0x04,0x0d,0x00,0x91,0x08,
+- 0x10,0x04,0x0d,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,
+- 0x10,0x04,0x10,0x00,0x11,0x00,0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x12,0x00,
+- 0x52,0x04,0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,
+- 0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x14,0xdc,
+- 0x12,0xe6,0x12,0xe6,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x12,0xe6,0x10,0x04,
+- 0x12,0x00,0x11,0xdc,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xdc,0x0d,0xe6,0xd2,0x0c,
+- 0x91,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xe6,0x91,0x08,0x10,0x04,0x0d,0xe6,
+- 0x0d,0xdc,0x0d,0xdc,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0x1b,0x0d,0x1c,
+- 0x10,0x04,0x0d,0x1d,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xdc,0x0d,0xe6,
+- 0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x10,0x04,0x0d,0xdc,0x0d,0xe6,
+- 0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x10,0xe6,0xe1,0x3a,0x01,0xd0,0x77,0xcf,
+- 0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x01,
+- 0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,
+- 0x00,0xd4,0x1b,0x53,0x04,0x01,0x00,0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01,
+- 0xff,0xe0,0xa4,0xa8,0xe0,0xa4,0xbc,0x00,0x01,0x00,0x01,0x00,0xd3,0x26,0xd2,0x13,
+- 0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xb0,0xe0,0xa4,0xbc,0x00,0x01,
+- 0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xb3,0xe0,0xa4,0xbc,0x00,0x01,0x00,
+- 0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x91,0x08,0x10,0x04,0x01,0x07,
+- 0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x8c,0xd4,0x18,0x53,0x04,0x01,0x00,0x52,0x04,
+- 0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x10,0x04,0x0b,0x00,0x0c,0x00,
+- 0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x10,0x04,0x01,0xdc,
+- 0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x0b,0x00,0x0c,0x00,0xd2,0x2c,0xd1,0x16,
+- 0x10,0x0b,0x01,0xff,0xe0,0xa4,0x95,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0x96,
+- 0xe0,0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x97,0xe0,0xa4,0xbc,0x00,0x01,
+- 0xff,0xe0,0xa4,0x9c,0xe0,0xa4,0xbc,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa4,
+- 0xa1,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xa2,0xe0,0xa4,0xbc,0x00,0x10,0x0b,
+- 0x01,0xff,0xe0,0xa4,0xab,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xaf,0xe0,0xa4,
+- 0xbc,0x00,0x54,0x04,0x01,0x00,0xd3,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,
+- 0x0a,0x00,0x10,0x04,0x0a,0x00,0x0c,0x00,0x0c,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
+- 0x10,0x00,0x0b,0x00,0x10,0x04,0x0b,0x00,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,
+- 0x08,0x00,0x09,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,
+- 0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,
+- 0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,
++ 0x01,0xff,0x55,0xcc,0x9b,0x00,0x93,0x14,0x92,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,
++ 0x75,0xcc,0x9b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xb4,
++ 0xd4,0x24,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0x41,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff,
++ 0x49,0xcc,0x8c,0x00,0xd3,0x46,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,
++ 0x8c,0x00,0x01,0xff,0x4f,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00,
++ 0x01,0xff,0x55,0xcc,0x8c,0x00,0xd1,0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00,
++ 0x01,0xff,0x55,0xcc,0x88,0xcc,0x84,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,
++ 0x84,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,
++ 0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x8c,0x00,
++ 0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,
++ 0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00,
++ 0x10,0x0a,0x01,0xff,0x41,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc,
++ 0x84,0x00,0xd4,0x80,0xd3,0x3a,0xd2,0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,
++ 0x87,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,
++ 0xc3,0x86,0xcc,0x84,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x51,0x04,0x01,0x00,
++ 0x10,0x08,0x01,0xff,0x47,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,
++ 0x10,0x08,0x01,0xff,0x4f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,
++ 0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,
++ 0x84,0x00,0x10,0x09,0x01,0xff,0xc6,0xb7,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,
++ 0x8c,0x00,0xd3,0x24,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,
++ 0x01,0x00,0x01,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x81,0x00,0x01,0xff,
++ 0x67,0xcc,0x81,0x00,0x04,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x4e,0xcc,
++ 0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x8a,0xcc,
++ 0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,
++ 0xc3,0x86,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,
++ 0xc3,0x98,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x07,0x02,0xe1,
++ 0xae,0x01,0xe0,0x93,0x01,0xcf,0x86,0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00,0x10,
++ 0x08,0x01,0xff,0x41,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x45,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08,0x01,
++ 0xff,0x45,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x49,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08,0x01,
++ 0xff,0x49,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0x4f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x4f,
++ 0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x52,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08,0x01,
++ 0xff,0x52,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0x55,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x55,
++ 0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x04,
++ 0xff,0x53,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff,0x54,
++ 0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04,
++ 0xff,0x48,0xcc,0x8c,0x00,0x04,0xff,0x68,0xcc,0x8c,0x00,0xd4,0x68,0xd3,0x20,0xd2,
++ 0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x07,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,
++ 0x08,0x04,0xff,0x41,0xcc,0x87,0x00,0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,
++ 0x10,0x10,0x08,0x04,0xff,0x45,0xcc,0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,
++ 0x0a,0x04,0xff,0x4f,0xcc,0x88,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,
++ 0x00,0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,
++ 0xcc,0x83,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x4f,0xcc,0x87,0x00,0x04,0xff,0x6f,
++ 0xcc,0x87,0x00,0x93,0x30,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x87,
++ 0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x59,
++ 0xcc,0x84,0x00,0x04,0xff,0x79,0xcc,0x84,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07,
++ 0x00,0x08,0x00,0x08,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,
++ 0x04,0x08,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x01,0x00,0x01,0x00,0xd0,0x22,0xcf,
++ 0x86,0x55,0x04,0x01,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2,0x0c,0x91,0x08,0x10,
++ 0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x07,0x00,0x01,0x00,0xcf,
++ 0x86,0xd5,0x18,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,
++ 0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2,
++ 0x08,0x11,0x04,0x01,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,
++ 0x00,0x07,0x00,0xe1,0x34,0x01,0xd0,0x72,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0xe6,
++ 0xd3,0x10,0x52,0x04,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0xe8,0x01,0xdc,
++ 0x92,0x0c,0x51,0x04,0x01,0xdc,0x10,0x04,0x01,0xe8,0x01,0xd8,0x01,0xdc,0xd4,0x2c,
++ 0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0xdc,0x01,0xca,0x10,0x04,0x01,0xca,
++ 0x01,0xdc,0x51,0x04,0x01,0xdc,0x10,0x04,0x01,0xdc,0x01,0xca,0x92,0x0c,0x91,0x08,
++ 0x10,0x04,0x01,0xca,0x01,0xdc,0x01,0xdc,0x01,0xdc,0xd3,0x08,0x12,0x04,0x01,0xdc,
++ 0x01,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x01,0x01,0xdc,0x01,0xdc,0x91,0x08,
++ 0x10,0x04,0x01,0xdc,0x01,0xe6,0x01,0xe6,0xcf,0x86,0xd5,0x7e,0xd4,0x46,0xd3,0x2e,
++ 0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00,
++ 0x10,0x04,0x01,0xe6,0x01,0xff,0xcc,0x93,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcc,
++ 0x88,0xcc,0x81,0x00,0x01,0xf0,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x08,0x11,0x04,
++ 0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x04,0xdc,
++ 0x06,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x07,0xe6,0x10,0x04,0x07,0xe6,0x07,0xdc,
++ 0x51,0x04,0x07,0xdc,0x10,0x04,0x07,0xdc,0x07,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,
++ 0x08,0xe8,0x08,0xdc,0x10,0x04,0x08,0xdc,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xe9,
++ 0x07,0xea,0x10,0x04,0x07,0xea,0x07,0xe9,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,
++ 0x01,0xea,0x10,0x04,0x04,0xe9,0x06,0xe6,0x06,0xe6,0x06,0xe6,0xd3,0x13,0x52,0x04,
++ 0x0a,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x0a,0x00,0xd2,
++ 0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x01,0x00,0x09,0x00,0x51,0x04,0x09,0x00,0x10,
++ 0x06,0x01,0xff,0x3b,0x00,0x10,0x00,0xd0,0xe1,0xcf,0x86,0xd5,0x7a,0xd4,0x5f,0xd3,
++ 0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcc,
++ 0x81,0x00,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0x01,0xff,0xc2,0xb7,0x00,
++ 0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x01,0xff,0xce,
++ 0x97,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0x00,0x00,0xd1,
++ 0x0d,0x10,0x09,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x00,0x00,0x10,0x09,0x01,0xff,
++ 0xce,0xa5,0xcc,0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13,
++ 0x91,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01,
++ 0x00,0x01,0x00,0x01,0x00,0xd4,0x4a,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,
++ 0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,
++ 0xff,0xce,0x99,0xcc,0x88,0x00,0x01,0xff,0xce,0xa5,0xcc,0x88,0x00,0xd1,0x12,0x10,
++ 0x09,0x01,0xff,0xce,0xb1,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10,
++ 0x09,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0x93,
++ 0x17,0x92,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x81,0x00,
++ 0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x39,0x53,0x04,
++ 0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x88,
++ 0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf,
++ 0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,
++ 0xcc,0x81,0x00,0x0a,0x00,0xd3,0x26,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++ 0x00,0x01,0xff,0xcf,0x92,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcf,0x92,
++ 0xcc,0x88,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd2,0x0c,0x51,0x04,0x06,
++ 0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,
++ 0x04,0x01,0x00,0x04,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,
++ 0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,
++ 0x04,0x05,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0x12,0x04,0x07,0x00,0x08,0x00,0xe3,
++ 0x47,0x04,0xe2,0xbe,0x02,0xe1,0x07,0x01,0xd0,0x8b,0xcf,0x86,0xd5,0x6c,0xd4,0x53,
++ 0xd3,0x30,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0x95,0xcc,0x80,0x00,0x01,
++ 0xff,0xd0,0x95,0xcc,0x88,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x93,0xcc,0x81,
++ 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x86,0xcc,0x88,0x00,
++ 0x52,0x04,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x9a,0xcc,0x81,0x00,0x04,
++ 0xff,0xd0,0x98,0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x86,0x00,0x01,
++ 0x00,0x53,0x04,0x01,0x00,0x92,0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,
++ 0x98,0xcc,0x86,0x00,0x01,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,
++ 0x92,0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x01,
++ 0x00,0x01,0x00,0xcf,0x86,0xd5,0x57,0x54,0x04,0x01,0x00,0xd3,0x30,0xd2,0x1f,0xd1,
++ 0x12,0x10,0x09,0x04,0xff,0xd0,0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,
++ 0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0xb3,0xcc,0x81,0x00,0x51,0x04,0x01,0x00,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0xd1,0x96,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1,
++ 0x12,0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80,
++ 0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0x00,0x54,0x04,0x01,0x00,
++ 0x93,0x1a,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb4,
++ 0xcc,0x8f,0x00,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01,0x00,0xd0,0x2e,0xcf,0x86,
++ 0x95,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++ 0x01,0xe6,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6,0x0a,0xe6,0x92,0x08,0x11,0x04,
++ 0x04,0x00,0x06,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xbe,0xd4,0x4a,
++ 0xd3,0x2a,0xd2,0x1a,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x96,0xcc,0x86,
++ 0x00,0x10,0x09,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,
++ 0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
++ 0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,
++ 0x06,0x00,0x10,0x04,0x06,0x00,0x09,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09,
++ 0x01,0xff,0xd0,0x90,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09,
++ 0x01,0xff,0xd0,0x90,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x51,0x04,
++ 0x01,0x00,0x10,0x09,0x01,0xff,0xd0,0x95,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,
++ 0x86,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x98,0xcc,0x88,
++ 0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x96,
++ 0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x97,
++ 0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4,0x74,0xd3,0x3a,0xd2,0x16,
++ 0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x84,0x00,0x01,0xff,0xd0,
++ 0xb8,0xcc,0x84,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x88,0x00,0x01,
++ 0xff,0xd0,0xb8,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x9e,0xcc,0x88,0x00,0x01,
++ 0xff,0xd0,0xbe,0xcc,0x88,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,
++ 0xd3,0xa8,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,
++ 0x04,0xff,0xd0,0xad,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09,
++ 0x01,0xff,0xd0,0xa3,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x3a,
++ 0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x88,0x00,0x01,0xff,0xd1,
++ 0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x8b,0x00,0x01,0xff,0xd1,
++ 0x83,0xcc,0x8b,0x00,0x91,0x12,0x10,0x09,0x01,0xff,0xd0,0xa7,0xcc,0x88,0x00,0x01,
++ 0xff,0xd1,0x87,0xcc,0x88,0x00,0x08,0x00,0x92,0x16,0x91,0x12,0x10,0x09,0x01,0xff,
++ 0xd0,0xab,0xcc,0x88,0x00,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x09,0x00,0x09,0x00,
++ 0xd1,0x74,0xd0,0x36,0xcf,0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,
++ 0x09,0x00,0x0a,0x00,0x0a,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04,
++ 0x0b,0x00,0x0c,0x00,0x10,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
++ 0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0x00,
++ 0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,
++ 0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14,
++ 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++ 0x01,0x00,0x01,0x00,0xd0,0xba,0xcf,0x86,0xd5,0x4c,0xd4,0x24,0x53,0x04,0x01,0x00,
++ 0xd2,0x10,0xd1,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x10,0x04,0x04,0x00,0x00,0x00,
++ 0xd1,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x0d,0x00,0xd3,0x18,
++ 0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02,0xdc,0x02,0xe6,0x51,0x04,0x02,0xe6,
++ 0x10,0x04,0x02,0xdc,0x02,0xe6,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xde,
++ 0x02,0xdc,0x02,0xe6,0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,
++ 0x08,0xdc,0x02,0xdc,0x02,0xdc,0xd2,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc,
++ 0x02,0xe6,0xd1,0x08,0x10,0x04,0x02,0xe6,0x02,0xde,0x10,0x04,0x02,0xe4,0x02,0xe6,
++ 0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x0a,0x01,0x0b,0x10,0x04,0x01,0x0c,
++ 0x01,0x0d,0xd1,0x08,0x10,0x04,0x01,0x0e,0x01,0x0f,0x10,0x04,0x01,0x10,0x01,0x11,
++ 0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x12,0x01,0x13,0x10,0x04,0x09,0x13,0x01,0x14,
++ 0xd1,0x08,0x10,0x04,0x01,0x15,0x01,0x16,0x10,0x04,0x01,0x00,0x01,0x17,0xcf,0x86,
++ 0xd5,0x28,0x94,0x24,0x93,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x18,
++ 0x10,0x04,0x01,0x19,0x01,0x00,0xd1,0x08,0x10,0x04,0x02,0xe6,0x08,0xdc,0x10,0x04,
++ 0x08,0x00,0x08,0x12,0x00,0x00,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c,
++ 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
++ 0x00,0x00,0x14,0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0xe2,0xfa,0x01,0xe1,0x2a,0x01,0xd0,0xa7,0xcf,0x86,
++ 0xd5,0x54,0xd4,0x28,0xd3,0x10,0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,
++ 0x10,0x00,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x08,0x00,
++ 0x91,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0xd3,0x0c,0x52,0x04,0x07,0xe6,
++ 0x11,0x04,0x07,0xe6,0x0a,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0a,0x1e,0x0a,0x1f,
++ 0x10,0x04,0x0a,0x20,0x01,0x00,0xd1,0x08,0x10,0x04,0x0f,0x00,0x00,0x00,0x10,0x04,
++ 0x08,0x00,0x01,0x00,0xd4,0x3d,0x93,0x39,0xd2,0x1a,0xd1,0x08,0x10,0x04,0x0c,0x00,
++ 0x01,0x00,0x10,0x09,0x01,0xff,0xd8,0xa7,0xd9,0x93,0x00,0x01,0xff,0xd8,0xa7,0xd9,
++ 0x94,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd9,0x88,0xd9,0x94,0x00,0x01,0xff,0xd8,
++ 0xa7,0xd9,0x95,0x00,0x10,0x09,0x01,0xff,0xd9,0x8a,0xd9,0x94,0x00,0x01,0x00,0x01,
++ 0x00,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0a,
++ 0x00,0x0a,0x00,0xcf,0x86,0xd5,0x5c,0xd4,0x20,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,
++ 0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0x1b,0xd1,0x08,0x10,0x04,0x01,0x1c,0x01,
++ 0x1d,0x10,0x04,0x01,0x1e,0x01,0x1f,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,
++ 0x20,0x01,0x21,0x10,0x04,0x01,0x22,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04,
++ 0xdc,0x10,0x04,0x07,0xdc,0x07,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0xe6,0x08,
++ 0xe6,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xdc,0x08,0xe6,0x10,0x04,0x08,0xe6,0x0c,
++ 0xdc,0xd4,0x10,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06,
++ 0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x23,0x01,0x00,0x01,0x00,0x01,
++ 0x00,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,
++ 0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x04,0x00,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
++ 0x04,0x01,0x00,0x04,0x00,0xcf,0x86,0xd5,0x5b,0xd4,0x2e,0xd3,0x1e,0x92,0x1a,0xd1,
++ 0x0d,0x10,0x09,0x01,0xff,0xdb,0x95,0xd9,0x94,0x00,0x01,0x00,0x10,0x09,0x01,0xff,
++ 0xdb,0x81,0xd9,0x94,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,
++ 0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd3,0x19,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,
++ 0x04,0x01,0x00,0x01,0xff,0xdb,0x92,0xd9,0x94,0x00,0x11,0x04,0x01,0x00,0x01,0xe6,
++ 0x52,0x04,0x01,0xe6,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,
++ 0x01,0xe6,0xd4,0x38,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6,
++ 0x01,0xdc,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,
++ 0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0xdc,0x01,0xe6,
++ 0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0xdc,0x07,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,
++ 0x11,0x04,0x01,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00,
++ 0xd1,0xc8,0xd0,0x76,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04,
++ 0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x93,0x10,0x92,0x0c,
++ 0x91,0x08,0x10,0x04,0x04,0x00,0x04,0x24,0x04,0x00,0x04,0x00,0x04,0x00,0xd4,0x14,
++ 0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07,0x00,
++ 0x07,0x00,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x04,0xe6,
++ 0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x0c,
++ 0x51,0x04,0x04,0xdc,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd1,0x08,0x10,0x04,0x04,0xdc,
++ 0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xcf,0x86,0xd5,0x3c,0x94,0x38,0xd3,0x1c,
++ 0xd2,0x0c,0x51,0x04,0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,
++ 0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,
++ 0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,
++ 0x07,0x00,0x07,0x00,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00,
++ 0x11,0x04,0x08,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x04,0x00,
++ 0x54,0x04,0x04,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x14,0x53,0x04,
++ 0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xe6,0x09,0xe6,
++ 0xd3,0x10,0x92,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0x00,
++ 0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x91,0x08,0x10,0x04,
++ 0x00,0x00,0x14,0xdc,0x14,0x00,0xe4,0x78,0x57,0xe3,0xda,0x3e,0xe2,0x89,0x3e,0xe1,
++ 0x91,0x2c,0xe0,0x21,0x10,0xcf,0x86,0xc5,0xe4,0x80,0x08,0xe3,0xcb,0x03,0xe2,0x61,
++ 0x01,0xd1,0x94,0xd0,0x5a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52,
++ 0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6,0x92,0x0c,0x51,0x04,0x0b,0xe6,0x10,
++ 0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6,0xd4,0x24,0xd3,0x10,0x52,0x04,0x0b,0xe6,0x91,
++ 0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,
++ 0x00,0x0b,0xe6,0x0b,0xe6,0x11,0x04,0x0b,0xe6,0x00,0x00,0x53,0x04,0x0b,0x00,0x52,
++ 0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x86,0xd5,
++ 0x20,0x54,0x04,0x0c,0x00,0x53,0x04,0x0c,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0c,
++ 0x00,0x0c,0xdc,0x0c,0xdc,0x51,0x04,0x00,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x94,
++ 0x14,0x53,0x04,0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0xd0,0x4a,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x20,0xd3,
++ 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x0d,0x00,0x0d,0x00,0x52,
++ 0x04,0x0d,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2,
++ 0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x11,0x00,0x91,0x08,0x10,0x04,0x11,
++ 0x00,0x00,0x00,0x12,0x00,0x52,0x04,0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0xcf,
++ 0x86,0xd5,0x18,0x54,0x04,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,
++ 0x04,0x00,0x00,0x14,0xdc,0x12,0xe6,0x12,0xe6,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x51,
++ 0x04,0x12,0xe6,0x10,0x04,0x12,0x00,0x11,0xdc,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,
++ 0xdc,0x0d,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xe6,0x91,
++ 0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xdc,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,
++ 0x04,0x0d,0x1b,0x0d,0x1c,0x10,0x04,0x0d,0x1d,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10,
++ 0x04,0x0d,0xdc,0x0d,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x10,
++ 0x04,0x0d,0xdc,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x10,0xe6,0xe1,
++ 0x3a,0x01,0xd0,0x77,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08,
++ 0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00,
++ 0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x1b,0x53,0x04,0x01,0x00,0x92,0x13,0x91,0x0f,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xa8,0xe0,0xa4,0xbc,0x00,0x01,0x00,0x01,
++ 0x00,0xd3,0x26,0xd2,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xb0,
++ 0xe0,0xa4,0xbc,0x00,0x01,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xb3,0xe0,
++ 0xa4,0xbc,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x91,
++ 0x08,0x10,0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x8c,0xd4,0x18,0x53,
++ 0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x10,
++ 0x04,0x0b,0x00,0x0c,0x00,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,
++ 0xe6,0x10,0x04,0x01,0xdc,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x0b,0x00,0x0c,
++ 0x00,0xd2,0x2c,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x95,0xe0,0xa4,0xbc,0x00,
++ 0x01,0xff,0xe0,0xa4,0x96,0xe0,0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x97,
++ 0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0x9c,0xe0,0xa4,0xbc,0x00,0xd1,0x16,0x10,
++ 0x0b,0x01,0xff,0xe0,0xa4,0xa1,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xa2,0xe0,
++ 0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xab,0xe0,0xa4,0xbc,0x00,0x01,0xff,
++ 0xe0,0xa4,0xaf,0xe0,0xa4,0xbc,0x00,0x54,0x04,0x01,0x00,0xd3,0x14,0x92,0x10,0xd1,
++ 0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0c,0x00,0x0c,0x00,0xd2,
++ 0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x0b,0x00,0x10,0x04,0x0b,0x00,0x09,0x00,0x91,
++ 0x08,0x10,0x04,0x09,0x00,0x08,0x00,0x09,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4,
++ 0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,
++ 0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,
++ 0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,
++ 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,
++ 0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,
++ 0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,
++ 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11,
++ 0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x07,0x00,0x01,0x00,0xcf,
++ 0x86,0xd5,0x7b,0xd4,0x42,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,
++ 0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01,
++ 0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa6,0xbe,0x00,
++ 0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa7,0x97,0x00,0x01,0x09,0x10,
++ 0x04,0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,
++ 0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,
++ 0xa6,0xa1,0xe0,0xa6,0xbc,0x00,0x01,0xff,0xe0,0xa6,0xa2,0xe0,0xa6,0xbc,0x00,0x10,
++ 0x04,0x00,0x00,0x01,0xff,0xe0,0xa6,0xaf,0xe0,0xa6,0xbc,0x00,0xd4,0x10,0x93,0x0c,
++ 0x52,0x04,0x01,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,
++ 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x13,0x00,
++ 0x10,0x04,0x14,0xe6,0x00,0x00,0xe2,0x48,0x02,0xe1,0x4f,0x01,0xd0,0xa4,0xcf,0x86,
++ 0xd5,0x4c,0xd4,0x34,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x07,0x00,
++ 0x10,0x04,0x01,0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,
++ 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,
+ 0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,
+ 0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,
+ 0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,
+- 0xd3,0x18,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,
+- 0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,
+- 0x91,0x08,0x10,0x04,0x01,0x07,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x42,
+- 0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,
+- 0x00,0x00,0x01,0x00,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,
+- 0x00,0x00,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa6,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01,
+- 0xff,0xe0,0xa7,0x87,0xe0,0xa7,0x97,0x00,0x01,0x09,0x10,0x04,0x08,0x00,0x00,0x00,
+- 0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
+- 0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa6,0xa1,0xe0,0xa6,0xbc,
+- 0x00,0x01,0xff,0xe0,0xa6,0xa2,0xe0,0xa6,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0xff,
+- 0xe0,0xa6,0xaf,0xe0,0xa6,0xbc,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x01,0x00,0x11,
+- 0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
+- 0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x14,0xe6,0x00,
+- 0x00,0xe2,0x48,0x02,0xe1,0x4f,0x01,0xd0,0xa4,0xcf,0x86,0xd5,0x4c,0xd4,0x34,0xd3,
+- 0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x10,0x04,0x01,0x00,0x07,
+- 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
+- 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,
+- 0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,
+- 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,
+- 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x2e,0xd2,0x17,0xd1,
+- 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa8,0xb2,
+- 0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,
+- 0xe0,0xa8,0xb8,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,
+- 0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x00,0x00,0x01,0x00,0xcf,0x86,0xd5,0x80,0xd4,
+- 0x34,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,
+- 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,
+- 0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x01,
+- 0x09,0x00,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x00,
+- 0x00,0x00,0x00,0xd2,0x25,0xd1,0x0f,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xa8,0x96,
+- 0xe0,0xa8,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0x97,0xe0,0xa8,0xbc,0x00,0x01,
+- 0xff,0xe0,0xa8,0x9c,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,
+- 0x10,0x0b,0x01,0xff,0xe0,0xa8,0xab,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd4,0x10,0x93,
+- 0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x14,0x52,
+- 0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04,0x14,0x00,0x00,
+- 0x00,0x00,0x00,0xd0,0x82,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,
+- 0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,
+- 0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x10,
+- 0x04,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00,
+- 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,
+- 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x0c,0x91,
+- 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,
+- 0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,
+- 0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x10,0x52,0x04,0x01,
+- 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
+- 0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00,
+- 0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0x00,0x00,0x00,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x07,
+- 0x00,0x07,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91,
+- 0x08,0x10,0x04,0x0d,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,
+- 0x04,0x00,0x00,0x11,0x00,0x13,0x00,0x13,0x00,0xe1,0x24,0x01,0xd0,0x86,0xcf,0x86,
+- 0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,
+- 0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,
+- 0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,
+- 0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
+- 0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
+- 0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
+- 0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x01,0x00,
+- 0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x01,0x00,
+- 0x01,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x45,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,
+- 0x10,0x04,0x0a,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f,
+- 0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x96,0x00,0x00,0x00,0x10,0x04,0x00,
+- 0x00,0x01,0xff,0xe0,0xad,0x87,0xe0,0xac,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,
+- 0xe0,0xad,0x87,0xe0,0xad,0x97,0x00,0x01,0x09,0x00,0x00,0xd3,0x0c,0x52,0x04,0x00,
+- 0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01,
+- 0xff,0xe0,0xac,0xa1,0xe0,0xac,0xbc,0x00,0x01,0xff,0xe0,0xac,0xa2,0xe0,0xac,0xbc,
+- 0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01,
+- 0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,
+- 0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x0c,0x00,0x0c,0x00,0x00,0x00,0xd0,0xb1,0xcf,
+- 0x86,0xd5,0x63,0xd4,0x28,0xd3,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x91,
+- 0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,
+- 0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd3,0x1f,0xd2,0x0c,0x91,
+- 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,
+- 0xae,0x92,0xe0,0xaf,0x97,0x00,0x01,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
+- 0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+- 0x00,0x00,0x01,0x00,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,
+- 0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c,
+- 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,
+- 0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x08,0x00,0x01,0x00,
+- 0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xcf,0x86,
+- 0xd5,0x61,0xd4,0x45,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+- 0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08,0x10,0x04,0x01,0x00,
+- 0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0,0xae,0xbe,0x00,0x01,0xff,0xe0,
+- 0xaf,0x87,0xe0,0xae,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0,
+- 0xaf,0x97,0x00,0x01,0x09,0x00,0x00,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0a,
+- 0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x00,
+- 0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x08,
+- 0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+- 0x00,0x07,0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,
+- 0x00,0x00,0x00,0xe3,0x1c,0x04,0xe2,0x1a,0x02,0xd1,0xf3,0xd0,0x76,0xcf,0x86,0xd5,
+- 0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,
+- 0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91,
+- 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,
+- 0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,
+- 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,
+- 0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0xd2,
+- 0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x01,
+- 0x00,0xcf,0x86,0xd5,0x53,0xd4,0x2f,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,
+- 0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,
+- 0xb1,0x86,0xe0,0xb1,0x96,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+- 0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,
+- 0x01,0x54,0x10,0x04,0x01,0x5b,0x00,0x00,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,
+- 0x11,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01,0x00,
+- 0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x52,0x04,0x00,0x00,
+- 0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0a,0x00,0xd0,0x76,0xcf,0x86,
+- 0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x10,0x00,
+- 0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,
+- 0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,
++ 0xd3,0x2e,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0xe0,0xa8,0xb2,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,
++ 0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0xb8,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd2,0x08,
++ 0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x00,0x00,0x01,0x00,
++ 0xcf,0x86,0xd5,0x80,0xd4,0x34,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,
++ 0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x10,
++ 0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,
++ 0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
++ 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0xd2,0x25,0xd1,0x0f,0x10,0x04,0x00,0x00,
++ 0x01,0xff,0xe0,0xa8,0x96,0xe0,0xa8,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0x97,
++ 0xe0,0xa8,0xbc,0x00,0x01,0xff,0xe0,0xa8,0x9c,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,
++ 0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0xab,0xe0,0xa8,0xbc,0x00,
++ 0x00,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,
++ 0x01,0x00,0x93,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00,
++ 0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0xd0,0x82,0xcf,0x86,0xd5,0x40,0xd4,0x2c,
++ 0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08,
++ 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,
++ 0x07,0x00,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,
++ 0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,
+ 0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,
+- 0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,
+- 0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x07,0x07,0x07,0x00,
+- 0x01,0x00,0xcf,0x86,0xd5,0x82,0xd4,0x5e,0xd3,0x2a,0xd2,0x13,0x91,0x0f,0x10,0x0b,
+- 0x01,0xff,0xe0,0xb2,0xbf,0xe0,0xb3,0x95,0x00,0x01,0x00,0x01,0x00,0xd1,0x08,0x10,
+- 0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,
+- 0x95,0x00,0xd2,0x28,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x96,
+- 0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x82,0x00,0x01,0xff,
+- 0xe0,0xb3,0x86,0xe0,0xb3,0x82,0xe0,0xb3,0x95,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
+- 0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,
+- 0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,
+- 0x10,0x04,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01,0x00,
+- 0x09,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,
+- 0x10,0x04,0x00,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0xe1,0x06,0x01,0xd0,0x6e,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,
+- 0x08,0x10,0x04,0x13,0x00,0x10,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,
+- 0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,
+- 0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,
+- 0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,
+- 0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
+- 0x00,0x10,0x04,0x0c,0x00,0x13,0x09,0x91,0x08,0x10,0x04,0x13,0x09,0x0a,0x00,0x01,
+- 0x00,0xcf,0x86,0xd5,0x65,0xd4,0x45,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,
+- 0x04,0x0a,0x00,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,
+- 0x00,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb4,0xbe,0x00,0x01,0xff,0xe0,0xb5,
+- 0x87,0xe0,0xb4,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb5,
+- 0x97,0x00,0x01,0x09,0x10,0x04,0x0c,0x00,0x12,0x00,0xd3,0x10,0x52,0x04,0x00,0x00,
+- 0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x01,0x00,0x52,0x04,0x12,0x00,0x51,0x04,
+- 0x12,0x00,0x10,0x04,0x12,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,
+- 0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x0c,0x52,0x04,
+- 0x0a,0x00,0x11,0x04,0x0a,0x00,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,
+- 0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x5a,0xcf,0x86,0xd5,0x34,0xd4,0x18,0x93,0x14,
+- 0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x04,0x00,
+- 0x04,0x00,0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,
+- 0x04,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x54,0x04,
+- 0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00,
+- 0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x04,0x00,0x00,0x00,
+- 0xcf,0x86,0xd5,0x77,0xd4,0x28,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,
+- 0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x04,0x09,
+- 0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0xd3,0x14,0x52,0x04,
+- 0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x10,0x04,0x04,0x00,0x00,0x00,
+- 0xd2,0x13,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8a,
+- 0x00,0x04,0x00,0xd1,0x19,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0x00,
+- 0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0xe0,0xb7,0x8a,0x00,0x10,0x0b,0x04,0xff,
+- 0xe0,0xb7,0x99,0xe0,0xb7,0x9f,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x00,
+- 0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x14,0xd2,0x08,0x11,0x04,0x00,
+- 0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe2,
+- 0x31,0x01,0xd1,0x58,0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,
+- 0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+- 0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x67,0x10,0x04,
+- 0x01,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xcf,0x86,
+- 0x95,0x18,0xd4,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01,0x6b,0x01,0x00,0x53,0x04,
+- 0x01,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd0,0x9e,0xcf,0x86,0xd5,0x54,
+- 0xd4,0x3c,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,
+- 0x01,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x15,0x00,
+- 0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x15,0x00,0x10,0x04,0x01,0x00,
+- 0x00,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x15,0x00,0xd3,0x08,0x12,0x04,
+- 0x15,0x00,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x01,0x00,
+- 0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,
+- 0x01,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
+- 0xd2,0x08,0x11,0x04,0x15,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,
+- 0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x76,0x10,0x04,0x15,0x09,
+- 0x01,0x00,0x11,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0x95,0x34,0xd4,0x20,0xd3,0x14,
+- 0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,
+- 0x00,0x00,0x52,0x04,0x01,0x7a,0x11,0x04,0x01,0x00,0x00,0x00,0x53,0x04,0x01,0x00,
+- 0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x01,0x00,0x0d,0x00,0x00,0x00,
+- 0xe1,0x2b,0x01,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04,0x02,0x00,0x53,0x04,0x02,
+- 0x00,0x92,0x08,0x11,0x04,0x02,0xdc,0x02,0x00,0x02,0x00,0x54,0x04,0x02,0x00,0xd3,
+- 0x14,0x52,0x04,0x02,0x00,0xd1,0x08,0x10,0x04,0x02,0x00,0x02,0xdc,0x10,0x04,0x02,
+- 0x00,0x02,0xdc,0x92,0x0c,0x91,0x08,0x10,0x04,0x02,0x00,0x02,0xd8,0x02,0x00,0x02,
+- 0x00,0xcf,0x86,0xd5,0x73,0xd4,0x36,0xd3,0x17,0x92,0x13,0x51,0x04,0x02,0x00,0x10,
+- 0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x82,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd2,0x0c,
+- 0x91,0x08,0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00,
+- 0x02,0xff,0xe0,0xbd,0x8c,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd3,0x26,0xd2,0x13,0x51,
+- 0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x91,0xe0,0xbe,0xb7,0x00,0x02,0x00,
+- 0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x96,0xe0,0xbe,0xb7,
+- 0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x9b,0xe0,0xbe,
+- 0xb7,0x00,0x02,0x00,0x02,0x00,0xd4,0x27,0x53,0x04,0x02,0x00,0xd2,0x17,0xd1,0x0f,
+- 0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x80,0xe0,0xbe,0xb5,0x00,0x10,0x04,0x04,
+- 0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0xd3,0x35,0xd2,
+- 0x17,0xd1,0x08,0x10,0x04,0x00,0x00,0x02,0x81,0x10,0x04,0x02,0x82,0x02,0xff,0xe0,
+- 0xbd,0xb1,0xe0,0xbd,0xb2,0x00,0xd1,0x0f,0x10,0x04,0x02,0x84,0x02,0xff,0xe0,0xbd,
+- 0xb1,0xe0,0xbd,0xb4,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xb2,0xe0,0xbe,0x80,0x00,
+- 0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xb3,0xe0,0xbe,0x80,
+- 0x00,0x02,0x00,0x02,0x82,0x11,0x04,0x02,0x82,0x02,0x00,0xd0,0xd3,0xcf,0x86,0xd5,
+- 0x65,0xd4,0x27,0xd3,0x1f,0xd2,0x13,0x91,0x0f,0x10,0x04,0x02,0x82,0x02,0xff,0xe0,
+- 0xbd,0xb1,0xe0,0xbe,0x80,0x00,0x02,0xe6,0x91,0x08,0x10,0x04,0x02,0x09,0x02,0x00,
+- 0x02,0xe6,0x12,0x04,0x02,0x00,0x0c,0x00,0xd3,0x1f,0xd2,0x13,0x51,0x04,0x02,0x00,
+- 0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x92,0xe0,0xbe,0xb7,0x00,0x51,0x04,0x02,
+- 0x00,0x10,0x04,0x04,0x00,0x02,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02,
+- 0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x9c,0xe0,0xbe,
+- 0xb7,0x00,0x02,0x00,0xd4,0x3d,0xd3,0x26,0xd2,0x13,0x51,0x04,0x02,0x00,0x10,0x0b,
+- 0x02,0xff,0xe0,0xbe,0xa1,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x51,0x04,0x02,0x00,0x10,
+- 0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0xa6,0xe0,0xbe,0xb7,0x00,0x52,0x04,0x02,0x00,
+- 0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xab,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x04,
+- 0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x02,0x00,0x02,0x00,0x02,
+- 0x00,0xd2,0x13,0x91,0x0f,0x10,0x04,0x04,0x00,0x02,0xff,0xe0,0xbe,0x90,0xe0,0xbe,
+- 0xb5,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,
+- 0x95,0x4c,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,
+- 0x04,0xdc,0x04,0x00,0x52,0x04,0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00,
+- 0x10,0x04,0x0a,0x00,0x04,0x00,0xd3,0x14,0xd2,0x08,0x11,0x04,0x08,0x00,0x0a,0x00,
+- 0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,
+- 0x0b,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,
+- 0xe5,0xf7,0x04,0xe4,0x79,0x03,0xe3,0x7b,0x01,0xe2,0x04,0x01,0xd1,0x7f,0xd0,0x65,
+- 0xcf,0x86,0x55,0x04,0x04,0x00,0xd4,0x33,0xd3,0x1f,0xd2,0x0c,0x51,0x04,0x04,0x00,
+- 0x10,0x04,0x0a,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe1,0x80,
+- 0xa5,0xe1,0x80,0xae,0x00,0x04,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0a,0x00,0x04,
+- 0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x04,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x04,
+- 0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x04,0x00,0x04,
+- 0x07,0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0x09,0x10,0x04,0x0a,0x09,0x0a,
+- 0x00,0x0a,0x00,0xcf,0x86,0x95,0x14,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,
+- 0x08,0x11,0x04,0x04,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x2e,0xcf,0x86,0x95,
+- 0x28,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,
+- 0x00,0x0a,0xdc,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,0x0b,
+- 0x00,0x11,0x04,0x0b,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,
+- 0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x52,
+- 0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d,0x00,0x00,0x00,0x01,0x00,0x54,
+- 0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+- 0x00,0x06,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x06,0x00,0x08,0x00,0x10,0x04,0x08,
+- 0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0d,0x00,0x0d,0x00,0xd1,0x3e,0xd0,
+- 0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x1d,0x54,0x04,0x01,0x00,0x53,0x04,0x01,
+- 0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,
+- 0x00,0x01,0xff,0x00,0x94,0x15,0x93,0x11,0x92,0x0d,0x91,0x09,0x10,0x05,0x01,0xff,
+- 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,
+- 0x04,0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+- 0x00,0x0b,0x00,0x0b,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,
+- 0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x0b,
+- 0x00,0xe2,0x21,0x01,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,
+- 0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,
+- 0x04,0x00,0x04,0x00,0xcf,0x86,0x95,0x48,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,
+- 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,
+- 0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,
+- 0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,
+- 0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,
+- 0xd0,0x62,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,
+- 0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,
+- 0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd4,0x14,0x53,0x04,
+- 0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,
+- 0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,
+- 0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,
+- 0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,
+- 0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00,
+- 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x93,0x10,0x52,0x04,0x04,0x00,
+- 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x94,0x14,0x53,0x04,
+- 0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,
+- 0x04,0x00,0xd1,0x9c,0xd0,0x3e,0xcf,0x86,0x95,0x38,0xd4,0x14,0x53,0x04,0x04,0x00,
+- 0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3,0x14,
+- 0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,
+- 0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,
+- 0x04,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04,
+- 0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0xd2,0x0c,
+- 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,
+- 0x0c,0xe6,0x10,0x04,0x0c,0xe6,0x08,0xe6,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x08,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x53,0x04,0x04,0x00,
+- 0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,
+- 0xcf,0x86,0x95,0x14,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,
+- 0x08,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,
+- 0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x11,0x00,
+- 0x00,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0xd3,0x30,0xd2,0x2a,
+- 0xd1,0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x0b,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,
+- 0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd2,0x6c,0xd1,0x24,
+- 0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,
+- 0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0b,0x00,
+- 0x0b,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,
+- 0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00,
+- 0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x04,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
+- 0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x46,0xcf,0x86,0xd5,0x28,
+- 0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x00,
+- 0x00,0x00,0x06,0x00,0x93,0x10,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x09,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x06,0x00,0x93,0x14,0x52,0x04,0x06,0x00,
+- 0xd1,0x08,0x10,0x04,0x06,0x09,0x06,0x00,0x10,0x04,0x06,0x00,0x00,0x00,0x00,0x00,
+- 0xcf,0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x06,0x00,0x00,0x00,
+- 0x00,0x00,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,
+- 0x06,0x00,0x00,0x00,0x06,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,
+- 0x00,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0xd0,0x1b,0xcf,0x86,0x55,0x04,0x04,0x00,
+- 0x54,0x04,0x04,0x00,0x93,0x0d,0x52,0x04,0x04,0x00,0x11,0x05,0x04,0xff,0x00,0x04,
+- 0x00,0x04,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,
+- 0x04,0x04,0x00,0x10,0x04,0x04,0x09,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91,
+- 0x08,0x10,0x04,0x04,0x00,0x07,0xe6,0x00,0x00,0xd4,0x10,0x53,0x04,0x04,0x00,0x92,
+- 0x08,0x11,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x08,0x11,
+- 0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xe4,0xb7,0x03,0xe3,0x58,0x01,0xd2,0x8f,0xd1,
+- 0x53,0xd0,0x35,0xcf,0x86,0x95,0x2f,0xd4,0x1f,0x53,0x04,0x04,0x00,0xd2,0x0d,0x51,
+- 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x04,0xff,0x00,0x51,0x05,0x04,0xff,0x00,0x10,
+- 0x05,0x04,0xff,0x00,0x00,0x00,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,
+- 0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,
+- 0x53,0x04,0x04,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,
+- 0x00,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x04,0x00,0x94,0x18,0x53,0x04,0x04,0x00,
+- 0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0xe4,0x10,0x04,0x0a,0x00,0x00,0x00,
+- 0x00,0x00,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c,
+- 0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x42,
+- 0xcf,0x86,0xd5,0x1c,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,
+- 0xd1,0x08,0x10,0x04,0x07,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd4,0x0c,
+- 0x53,0x04,0x07,0x00,0x12,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x10,
+- 0xd1,0x08,0x10,0x04,0x07,0x00,0x07,0xde,0x10,0x04,0x07,0xe6,0x07,0xdc,0x00,0x00,
+- 0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,
+- 0x00,0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd4,0x10,0x53,0x04,0x07,0x00,
+- 0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x07,0x00,
+- 0x91,0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf,0x86,
+- 0x55,0x04,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,
+- 0x0b,0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x95,0x28,0xd4,0x10,0x53,0x04,0x08,0x00,
+- 0x92,0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0xd2,0x0c,
+- 0x51,0x04,0x08,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x08,0x00,
+- 0x07,0x00,0xd2,0xe4,0xd1,0x80,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x54,0x04,0x08,0x00,
+- 0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x08,0xe6,
+- 0xd2,0x0c,0x91,0x08,0x10,0x04,0x08,0xdc,0x08,0x00,0x08,0x00,0x11,0x04,0x00,0x00,
+- 0x08,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,
+- 0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xd4,0x14,
+- 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,
+- 0x0b,0x00,0xd3,0x10,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,
+- 0x0b,0xe6,0x52,0x04,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xe6,0x00,0x00,0x10,0x04,
+- 0x00,0x00,0x0b,0xdc,0xd0,0x5e,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0b,0x00,
+- 0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,
+- 0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd4,0x10,0x53,0x04,0x0b,0x00,0x52,0x04,
+- 0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x10,0xe6,0x91,0x08,
+- 0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0xdc,0xd2,0x0c,0x51,0x04,0x10,0xdc,0x10,0x04,
+- 0x10,0xdc,0x10,0xe6,0xd1,0x08,0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0x04,0x10,0x00,
+- 0x00,0x00,0xcf,0x06,0x00,0x00,0xe1,0x1e,0x01,0xd0,0xaa,0xcf,0x86,0xd5,0x6e,0xd4,
+- 0x53,0xd3,0x17,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,
+- 0xac,0x85,0xe1,0xac,0xb5,0x00,0x09,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x09,0xff,
+- 0xe1,0xac,0x87,0xe1,0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x89,
+- 0xe1,0xac,0xb5,0x00,0x09,0x00,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8b,0xe1,
+- 0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8d,0xe1,0xac,0xb5,0x00,
+- 0x09,0x00,0x93,0x17,0x92,0x13,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,
+- 0x91,0xe1,0xac,0xb5,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x54,0x04,0x09,0x00,0xd3,
+- 0x10,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x07,0x09,0x00,0x09,0x00,0xd2,
+- 0x13,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xba,0xe1,0xac,
+- 0xb5,0x00,0x91,0x0f,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xbc,0xe1,0xac,0xb5,
+- 0x00,0x09,0x00,0xcf,0x86,0xd5,0x3d,0x94,0x39,0xd3,0x31,0xd2,0x25,0xd1,0x16,0x10,
+- 0x0b,0x09,0xff,0xe1,0xac,0xbe,0xe1,0xac,0xb5,0x00,0x09,0xff,0xe1,0xac,0xbf,0xe1,
+- 0xac,0xb5,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xad,0x82,0xe1,0xac,0xb5,0x00,
+- 0x91,0x08,0x10,0x04,0x09,0x09,0x09,0x00,0x09,0x00,0x12,0x04,0x09,0x00,0x00,0x00,
+- 0x09,0x00,0xd4,0x1c,0x53,0x04,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,
+- 0x09,0x00,0x09,0xe6,0x91,0x08,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0xe6,0xd3,0x08,
+- 0x12,0x04,0x09,0xe6,0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,
+- 0x00,0x00,0x00,0x00,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x18,0x53,0x04,
+- 0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x09,0x0d,0x09,0x11,0x04,
+- 0x0d,0x00,0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x0d,0x00,
+- 0x0d,0x00,0xcf,0x86,0x55,0x04,0x0c,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x0c,0x00,
+- 0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x07,0x0c,0x00,0x0c,0x00,0xd3,0x0c,0x92,0x08,
+- 0x11,0x04,0x0c,0x00,0x0c,0x09,0x00,0x00,0x12,0x04,0x00,0x00,0x0c,0x00,0xe3,0xb2,
+- 0x01,0xe2,0x09,0x01,0xd1,0x4c,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04,
+- 0x0a,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,
+- 0x0a,0x07,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,
+- 0xcf,0x86,0x95,0x1c,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,
+- 0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,
+- 0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x54,0x04,0x14,0x00,
+- 0x53,0x04,0x14,0x00,0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,
+- 0x91,0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x14,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x08,
+- 0x13,0x04,0x0d,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04,
+- 0x0b,0xe6,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x01,0x0b,0xdc,0x0b,0xdc,0x92,0x08,
+- 0x11,0x04,0x0b,0xdc,0x0b,0xe6,0x0b,0xdc,0xd4,0x28,0xd3,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x01,0x0b,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04,
+- 0x0b,0x01,0x0b,0x00,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xdc,0x0b,0x00,
+- 0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0d,0x00,0xd1,0x08,
+- 0x10,0x04,0x0d,0xe6,0x0d,0x00,0x10,0x04,0x0d,0x00,0x13,0x00,0x92,0x0c,0x51,0x04,
+- 0x10,0xe6,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,
+- 0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x94,0x0c,0x53,0x04,0x07,0x00,0x12,0x04,
+- 0x07,0x00,0x08,0x00,0x08,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0xd5,0x40,
+- 0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0xe6,0x10,0x04,0x08,0xdc,0x08,0xe6,
+- 0x09,0xe6,0xd2,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x0a,0xe6,0xd1,0x08,
+- 0x10,0x04,0x0a,0xe6,0x0a,0xea,0x10,0x04,0x0a,0xd6,0x0a,0xdc,0x93,0x10,0x92,0x0c,
+- 0x91,0x08,0x10,0x04,0x0a,0xca,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0xd4,0x14,
+- 0x93,0x10,0x52,0x04,0x0a,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xe6,
+- 0x10,0xe6,0xd3,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x13,0xe8,
+- 0x13,0xe4,0xd2,0x10,0xd1,0x08,0x10,0x04,0x13,0xe4,0x13,0xdc,0x10,0x04,0x00,0x00,
+- 0x12,0xe6,0xd1,0x08,0x10,0x04,0x0c,0xe9,0x0b,0xdc,0x10,0x04,0x09,0xe6,0x09,0xdc,
+- 0xe2,0x80,0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,
+- 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa5,0x00,0x01,0xff,
+- 0x61,0xcc,0xa5,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,
+- 0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x42,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,
+- 0xa3,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,
+- 0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x43,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,
+- 0x63,0xcc,0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x87,0x00,0x01,0xff,
+- 0x64,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa3,0x00,0x01,0xff,
+- 0x64,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,
+- 0xb1,0x00,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa7,0x00,
+- 0x01,0xff,0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xad,0x00,0x01,0xff,
+- 0x64,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x80,0x00,
+- 0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,
+- 0x81,0x00,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0x45,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,
+- 0x45,0xcc,0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,
+- 0x45,0xcc,0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,
+- 0x01,0xff,0x46,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,
+- 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,
+- 0x84,0x00,0x10,0x08,0x01,0xff,0x48,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,
+- 0x10,0x08,0x01,0xff,0x48,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,
+- 0x10,0x08,0x01,0xff,0x48,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0x49,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,
+- 0x01,0xff,0x49,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,
+- 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x81,0x00,0x01,0xff,
+- 0x6b,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,
+- 0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,
+- 0xb1,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,
+- 0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,
+- 0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xb1,0x00,0x01,0xff,
+- 0x6c,0xcc,0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4c,0xcc,0xad,0x00,0x01,0xff,
+- 0x6c,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x4d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,
+- 0x81,0x00,0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x4d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,
+- 0xff,0x4d,0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x4e,0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x4e,
+- 0xcc,0xa3,0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x4e,0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x4e,
+- 0xcc,0xad,0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,
+- 0xcc,0x83,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,
+- 0xff,0x4f,0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,
+- 0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x80,0x00,0x01,
+- 0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x81,
+- 0x00,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x50,
+- 0xcc,0x81,0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x50,0xcc,0x87,
+- 0x00,0x01,0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,
+- 0xcc,0x87,0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa3,
+- 0x00,0x01,0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x52,0xcc,0xa3,
+- 0xcc,0x84,0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x52,
+- 0xcc,0xb1,0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,
+- 0x08,0x01,0xff,0x53,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,
+- 0x0a,0x01,0xff,0x53,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,
+- 0x00,0x10,0x0a,0x01,0xff,0x53,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,
+- 0xcc,0x87,0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x53,0xcc,0xa3,0xcc,0x87,
+- 0x00,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0x87,
+- 0x00,0x01,0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0xa3,
+- 0x00,0x01,0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xb1,0x00,0x01,
+- 0xff,0x74,0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,
+- 0xcc,0xad,0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa4,
+- 0x00,0x01,0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0xb0,
+- 0x00,0x01,0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xad,0x00,0x01,
+- 0xff,0x75,0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x83,
+- 0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x55,
+- 0xcc,0x84,0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0x56,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,
+- 0xff,0x56,0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x10,0x02,0xcf,0x86,
+- 0xd5,0xe1,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,
+- 0x80,0x00,0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x81,0x00,
+- 0x01,0xff,0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x88,0x00,
+- 0x01,0xff,0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x87,0x00,0x01,0xff,
+- 0x77,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0xa3,0x00,
+- 0x01,0xff,0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x58,0xcc,0x87,0x00,0x01,0xff,
+- 0x78,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x58,0xcc,0x88,0x00,0x01,0xff,
+- 0x78,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,
+- 0x87,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0x82,0x00,
+- 0x01,0xff,0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x5a,0xcc,0xa3,0x00,0x01,0xff,
+- 0x7a,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0xb1,0x00,0x01,0xff,
+- 0x7a,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x68,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc,
+- 0x88,0x00,0x92,0x1d,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff,
+- 0x79,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x02,0xff,0xc5,0xbf,0xcc,0x87,0x00,0x0a,
+- 0x00,0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa3,
+- 0x00,0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x89,0x00,0x01,
+- 0xff,0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x81,
+- 0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,
+- 0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,
+- 0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,
+- 0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,
+- 0xcc,0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x82,0x00,0x01,
+- 0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x81,
+- 0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,
+- 0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,
+- 0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,
+- 0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x83,0x00,0x01,
+- 0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x86,
+- 0x00,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0x45,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x45,
+- 0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,
+- 0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,
+- 0xcc,0x81,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,
+- 0xd4,0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc,
+- 0x80,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,
+- 0x82,0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,
+- 0x01,0xff,0x45,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,
+- 0x10,0x0a,0x01,0xff,0x45,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,
+- 0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x89,0x00,0x01,0xff,
+- 0x69,0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,
+- 0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,
+- 0xa3,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,
+- 0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x81,0x00,
+- 0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,
+- 0x80,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,
+- 0x4f,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,
+- 0x01,0xff,0x4f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,
+- 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,
+- 0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x81,0x00,
+- 0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,
+- 0x9b,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,
+- 0x4f,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,
+- 0xd3,0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x83,0x00,
+- 0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,
+- 0xa3,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0x55,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,
+- 0x89,0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,
+- 0x55,0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,
+- 0x01,0xff,0x55,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,
+- 0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,
+- 0x9b,0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,
+- 0x75,0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,
+- 0x55,0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,
+- 0x01,0xff,0x59,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0x59,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,
+- 0x59,0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0x92,0x14,0x91,0x10,0x10,0x08,
+- 0x01,0xff,0x59,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x0a,0x00,0x0a,0x00,
+- 0xe1,0xc0,0x04,0xe0,0x80,0x02,0xcf,0x86,0xe5,0x2d,0x01,0xd4,0xa8,0xd3,0x54,0xd2,
+- 0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,
+- 0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,
+- 0xce,0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,
+- 0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,
+- 0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,
+- 0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x93,0x00,0x01,0xff,
+- 0xce,0x91,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0x00,
+- 0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,
+- 0x91,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x81,0x00,0x10,
+- 0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,
+- 0xcd,0x82,0x00,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,
+- 0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,
+- 0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,
+- 0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,
+- 0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc,
+- 0x93,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x95,0xcc,
+- 0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,
+- 0x0b,0x01,0xff,0xce,0x95,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,
+- 0xcc,0x81,0x00,0x00,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,
+- 0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,
+- 0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,
+- 0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,
+- 0xce,0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,
+- 0x82,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1,0x12,0x10,
+- 0x09,0x01,0xff,0xce,0x97,0xcc,0x93,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0x00,0x10,
+- 0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,
+- 0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0x00,
+- 0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,
+- 0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x54,0xd2,
+- 0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,
+- 0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,
+- 0xce,0xb9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,
+- 0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,
+- 0xff,0xce,0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,
+- 0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x93,0x00,0x01,0xff,
+- 0xce,0x99,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcc,0x80,0x00,
+- 0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,
+- 0x99,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x81,0x00,0x10,
+- 0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,
+- 0xcd,0x82,0x00,0xcf,0x86,0xe5,0x13,0x01,0xd4,0x84,0xd3,0x42,0xd2,0x28,0xd1,0x12,
+- 0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,
+- 0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,
+- 0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81,
+- 0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,
+- 0x10,0x09,0x01,0xff,0xce,0x9f,0xcc,0x93,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0x00,
+- 0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,
+- 0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x81,
+- 0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x54,0xd2,0x28,
+- 0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc,
+- 0x94,0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,
+- 0x85,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,
+- 0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,
+- 0xcf,0x85,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,
+- 0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0x00,0x10,
+- 0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f,0x10,0x04,
+- 0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00,0x00,0x01,
+- 0xff,0xce,0xa5,0xcc,0x94,0xcd,0x82,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,
+- 0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,
+- 0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,
+- 0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,
+- 0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,
+- 0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,
+- 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xa9,0xcc,0x93,0x00,0x01,0xff,0xce,0xa9,0xcc,
+- 0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,
+- 0xa9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,
+- 0xcc,0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,
+- 0xce,0xa9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0x00,
+- 0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,
+- 0xff,0xce,0xb1,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,
+- 0xff,0xce,0xb5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80,
+- 0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,
+- 0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,
+- 0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,
+- 0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x91,0x12,0x10,0x09,
+- 0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x00,0x00,
+- 0xe0,0xe1,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,
+- 0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,
+- 0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xcd,0x85,
+- 0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,
+- 0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,
+- 0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,
+- 0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,
+- 0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,
+- 0x91,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,
+- 0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,
+- 0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,
+- 0x91,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,
+- 0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,
+- 0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x85,
+- 0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,
+- 0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xcd,
+- 0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xcd,0x85,
+- 0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,
+- 0xce,0xb7,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,
+- 0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,
+- 0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,
+- 0xce,0x97,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,
+- 0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,
+- 0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,
+- 0x01,0xff,0xce,0x97,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,
+- 0x94,0xcd,0x82,0xcd,0x85,0x00,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,
+- 0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,
+- 0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,
+- 0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,
+- 0xcf,0x89,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,
+- 0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xcd,0x85,
+- 0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,
+- 0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,
+- 0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0xcd,0x85,
+- 0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,
+- 0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,
+- 0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x82,
+- 0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3,0x49,
+- 0xd2,0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,
+- 0xb1,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x80,0xcd,0x85,0x00,0x01,
+- 0xff,0xce,0xb1,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,
+- 0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,
+- 0xce,0xb1,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
+- 0x91,0xcc,0x86,0x00,0x01,0xff,0xce,0x91,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,
+- 0x91,0xcc,0x80,0x00,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,
+- 0xff,0xce,0x91,0xcd,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xb9,0x00,0x01,
+- 0x00,0xcf,0x86,0xe5,0x16,0x01,0xd4,0x8f,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04,
+- 0x01,0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,
+- 0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,
+- 0xff,0xce,0xb7,0xcc,0x81,0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,
+- 0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,
+- 0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x81,0x00,
+- 0x10,0x09,0x01,0xff,0xce,0x97,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00,
+- 0xd1,0x13,0x10,0x09,0x01,0xff,0xce,0x97,0xcd,0x85,0x00,0x01,0xff,0xe1,0xbe,0xbf,
+- 0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbe,0xbf,0xcc,0x81,0x00,0x01,0xff,0xe1,
+- 0xbe,0xbf,0xcd,0x82,0x00,0xd3,0x40,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
+- 0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,
+- 0xb9,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x51,
+- 0x04,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,
+- 0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,
+- 0x86,0x00,0x01,0xff,0xce,0x99,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,
+- 0x80,0x00,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x04,0x00,0x00,0x01,
+- 0xff,0xe1,0xbf,0xbe,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbf,0xbe,0xcc,0x81,
+- 0x00,0x01,0xff,0xe1,0xbf,0xbe,0xcd,0x82,0x00,0xd4,0x93,0xd3,0x4e,0xd2,0x28,0xd1,
+- 0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,
+- 0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,
+- 0xcc,0x88,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x93,0x00,
+- 0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcd,0x82,0x00,
+- 0x01,0xff,0xcf,0x85,0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,
+- 0xff,0xce,0xa5,0xcc,0x86,0x00,0x01,0xff,0xce,0xa5,0xcc,0x84,0x00,0x10,0x09,0x01,
+- 0xff,0xce,0xa5,0xcc,0x80,0x00,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0xd1,0x12,0x10,
+- 0x09,0x01,0xff,0xce,0xa1,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00,0x10,
+- 0x09,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x01,0xff,0x60,0x00,0xd3,0x3b,0xd2,0x18,
+- 0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80,0xcd,0x85,0x00,0x01,
+- 0xff,0xcf,0x89,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81,
+- 0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd,0x82,0x00,0x01,0xff,
+- 0xcf,0x89,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
+- 0x9f,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,
+- 0xa9,0xcc,0x80,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0xd1,0x10,0x10,0x09,0x01,
+- 0xff,0xce,0xa9,0xcd,0x85,0x00,0x01,0xff,0xc2,0xb4,0x00,0x10,0x04,0x01,0x00,0x00,
+- 0x00,0xe0,0x7e,0x0c,0xcf,0x86,0xe5,0xbb,0x08,0xe4,0x14,0x06,0xe3,0xf7,0x02,0xe2,
+- 0xbd,0x01,0xd1,0xd0,0xd0,0x4f,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0xd3,0x18,0x92,0x14,
+- 0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83,0x00,
+- 0x01,0x00,0x01,0x00,0x92,0x0d,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
+- 0x00,0x01,0xff,0x00,0x01,0x00,0x94,0x1b,0x53,0x04,0x01,0x00,0xd2,0x09,0x11,0x04,
+- 0x01,0x00,0x01,0xff,0x00,0x51,0x05,0x01,0xff,0x00,0x10,0x05,0x01,0xff,0x00,0x04,
+- 0x00,0x01,0x00,0xcf,0x86,0xd5,0x48,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,
+- 0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x52,0x04,0x04,0x00,0x11,0x04,0x04,
+- 0x00,0x06,0x00,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x06,0x00,0x07,
+- 0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0x52,
+- 0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0xd4,0x23,0xd3,
+- 0x14,0x52,0x05,0x06,0xff,0x00,0x91,0x0a,0x10,0x05,0x0a,0xff,0x00,0x00,0xff,0x00,
+- 0x0f,0xff,0x00,0x92,0x0a,0x11,0x05,0x0f,0xff,0x00,0x01,0xff,0x00,0x01,0xff,0x00,
+- 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x00,0x00,0x01,0x00,
+- 0x01,0x00,0xd0,0x7e,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x53,0x04,0x01,0x00,0x52,0x04,
+- 0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,
+- 0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0c,0x00,0x0c,0x00,0x52,0x04,0x0c,0x00,
+- 0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,
+- 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x02,0x00,0x91,0x08,0x10,0x04,
+- 0x03,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0xd2,0x08,0x11,0x04,0x06,0x00,0x08,0x00,
+- 0x11,0x04,0x08,0x00,0x0b,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,
+- 0x10,0x04,0x0e,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11,0x00,0x13,0x00,
+- 0xcf,0x86,0xd5,0x28,0x54,0x04,0x00,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x01,0xe6,
+- 0x01,0x01,0x01,0xe6,0xd2,0x0c,0x51,0x04,0x01,0x01,0x10,0x04,0x01,0x01,0x01,0xe6,
+- 0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c,
+- 0x91,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x04,0x00,0xd1,0x08,0x10,0x04,0x06,0x00,
+- 0x06,0x01,0x10,0x04,0x06,0x01,0x06,0xe6,0x92,0x10,0xd1,0x08,0x10,0x04,0x06,0xdc,
+- 0x06,0xe6,0x10,0x04,0x06,0x01,0x08,0x01,0x09,0xdc,0x93,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x0a,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x81,0xd0,0x4f,
+- 0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04,
+- 0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xa9,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01,
+- 0x00,0x10,0x06,0x01,0xff,0x4b,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,0x01,0x00,0x53,
+- 0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x04,
+- 0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0x95,
+- 0x2c,0xd4,0x18,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0xd1,0x08,0x10,0x04,0x08,
+- 0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,
+- 0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x68,0xcf,
+- 0x86,0xd5,0x48,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+- 0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x92,0x0c,0x91,
+- 0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x11,0x00,0x00,0x00,0x53,0x04,0x01,0x00,0x92,
+- 0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x90,0xcc,0xb8,0x00,0x01,
+- 0xff,0xe2,0x86,0x92,0xcc,0xb8,0x00,0x01,0x00,0x94,0x1a,0x53,0x04,0x01,0x00,0x52,
+- 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x94,0xcc,0xb8,
+- 0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0x53,0x04,0x01,0x00,0x52,
+- 0x04,0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x87,0x90,0xcc,0xb8,
+- 0x00,0x10,0x0a,0x01,0xff,0xe2,0x87,0x94,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x87,0x92,
+- 0xcc,0xb8,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,
+- 0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x06,
+- 0x00,0x06,0x00,0xe2,0x38,0x02,0xe1,0x3f,0x01,0xd0,0x68,0xcf,0x86,0xd5,0x3e,0x94,
+- 0x3a,0xd3,0x16,0x52,0x04,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0x83,
+- 0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd2,0x12,0x91,0x0e,0x10,0x04,0x01,0x00,0x01,
+- 0xff,0xe2,0x88,0x88,0xcc,0xb8,0x00,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff,0xe2,
+- 0x88,0x8b,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x24,0x93,0x20,0x52,
+- 0x04,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa3,0xcc,0xb8,0x00,0x01,
+- 0x00,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa5,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,
+- 0x00,0xcf,0x86,0xd5,0x48,0x94,0x44,0xd3,0x2e,0xd2,0x12,0x91,0x0e,0x10,0x04,0x01,
+- 0x00,0x01,0xff,0xe2,0x88,0xbc,0xcc,0xb8,0x00,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01,
+- 0xff,0xe2,0x89,0x83,0xcc,0xb8,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,
+- 0x89,0x85,0xcc,0xb8,0x00,0x92,0x12,0x91,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,
+- 0x89,0x88,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x40,0xd3,0x1e,0x92,
+- 0x1a,0xd1,0x0c,0x10,0x08,0x01,0xff,0x3d,0xcc,0xb8,0x00,0x01,0x00,0x10,0x0a,0x01,
+- 0xff,0xe2,0x89,0xa1,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,
+- 0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x89,0x8d,0xcc,0xb8,0x00,0x10,0x08,0x01,
+- 0xff,0x3c,0xcc,0xb8,0x00,0x01,0xff,0x3e,0xcc,0xb8,0x00,0xd3,0x30,0xd2,0x18,0x91,
+- 0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xa4,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xa5,
+- 0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xb2,0xcc,0xb8,
+- 0x00,0x01,0xff,0xe2,0x89,0xb3,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18,0x91,0x14,0x10,
+- 0x0a,0x01,0xff,0xe2,0x89,0xb6,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xb7,0xcc,0xb8,
+- 0x00,0x01,0x00,0x01,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x50,0x94,0x4c,0xd3,0x30,0xd2,
+- 0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xba,0xcc,0xb8,0x00,0x01,0xff,0xe2,
+- 0x89,0xbb,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x82,
+- 0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x83,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18,0x91,
+- 0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x86,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x87,
+- 0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x30,0x53,0x04,0x01,0x00,0x52,
+- 0x04,0x01,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa2,0xcc,0xb8,0x00,0x01,
+- 0xff,0xe2,0x8a,0xa8,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa9,0xcc,0xb8,
+- 0x00,0x01,0xff,0xe2,0x8a,0xab,0xcc,0xb8,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,
+- 0x00,0xd4,0x5c,0xd3,0x2c,0x92,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xbc,
+- 0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xbd,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,
+- 0x8a,0x91,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x92,0xcc,0xb8,0x00,0x01,0x00,0xd2,
+- 0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb2,0xcc,0xb8,0x00,0x01,
+- 0xff,0xe2,0x8a,0xb3,0xcc,0xb8,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb4,
+- 0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb5,0xcc,0xb8,0x00,0x01,0x00,0x93,0x0c,0x92,
+- 0x08,0x11,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xd1,0x64,0xd0,0x3e,0xcf,
+- 0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04,
+- 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x20,0x53,0x04,0x01,0x00,0x92,
+- 0x18,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x80,0x88,0x00,0x10,0x08,0x01,
+- 0xff,0xe3,0x80,0x89,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,
+- 0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,
+- 0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00,0x04,0x00,0xd0,
+- 0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51,
+- 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0xd5,
+- 0x2c,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00,0x10,
+- 0x04,0x06,0x00,0x07,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x08,
+- 0x00,0x08,0x00,0x08,0x00,0x12,0x04,0x08,0x00,0x09,0x00,0xd4,0x14,0x53,0x04,0x09,
+- 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0xd3,
+- 0x08,0x12,0x04,0x0c,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,
+- 0x00,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x13,0x00,0xd3,0xa6,0xd2,
+- 0x74,0xd1,0x40,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18,0x93,0x14,0x52,
+- 0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x04,0x00,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x92,
+- 0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,
+- 0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x14,0x53,
+- 0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x06,
+- 0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x06,
+- 0x00,0x07,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54,
+- 0x04,0x01,0x00,0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06,0x00,0x06,
+- 0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x13,0x04,0x04,
+- 0x00,0x06,0x00,0xd2,0xdc,0xd1,0x48,0xd0,0x26,0xcf,0x86,0x95,0x20,0x54,0x04,0x01,
+- 0x00,0xd3,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x07,0x00,0x06,0x00,0x92,0x0c,0x91,
+- 0x08,0x10,0x04,0x08,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,
+- 0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x04,0x00,0x06,
+- 0x00,0x06,0x00,0x52,0x04,0x06,0x00,0x11,0x04,0x06,0x00,0x08,0x00,0xd0,0x5e,0xcf,
+- 0x86,0xd5,0x2c,0xd4,0x10,0x53,0x04,0x06,0x00,0x92,0x08,0x11,0x04,0x06,0x00,0x07,
+- 0x00,0x07,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x52,
+- 0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0a,0x00,0x0b,0x00,0xd4,0x10,0x93,
+- 0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd3,0x10,0x92,
+- 0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x52,0x04,0x0a,
+- 0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x1c,0x94,
+- 0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,
+- 0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0b,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,
+- 0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0c,0x00,0x0b,0x00,0x0b,0x00,0xd1,
+- 0xa8,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,
+- 0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x01,
+- 0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x53,
+- 0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01,
+- 0x00,0x01,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x18,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
+- 0x00,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x04,0x0c,0x00,0x01,0x00,0xd3,
+- 0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0x51,0x04,0x0c,
+- 0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
+- 0x04,0x01,0x00,0x0c,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,
+- 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x06,0x00,0x93,0x0c,0x52,0x04,0x06,0x00,0x11,
+- 0x04,0x06,0x00,0x01,0x00,0x01,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01,
+- 0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x0c,
+- 0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,
+- 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
+- 0x04,0x01,0x00,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10,0x52,0x04,0x08,
+- 0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,
+- 0x00,0x10,0x04,0x09,0x00,0x0d,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0d,0x00,0x0c,
+- 0x00,0x06,0x00,0x94,0x0c,0x53,0x04,0x06,0x00,0x12,0x04,0x06,0x00,0x0a,0x00,0x06,
+- 0x00,0xe4,0x39,0x01,0xd3,0x0c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xcf,0x06,0x06,0x00,
+- 0xd2,0x30,0xd1,0x06,0xcf,0x06,0x06,0x00,0xd0,0x06,0xcf,0x06,0x06,0x00,0xcf,0x86,
+- 0x95,0x1e,0x54,0x04,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x0e,
+- 0x10,0x0a,0x06,0xff,0xe2,0xab,0x9d,0xcc,0xb8,0x00,0x06,0x00,0x06,0x00,0x06,0x00,
+- 0xd1,0x80,0xd0,0x3a,0xcf,0x86,0xd5,0x28,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04,
+- 0x07,0x00,0x11,0x04,0x07,0x00,0x08,0x00,0xd3,0x08,0x12,0x04,0x08,0x00,0x09,0x00,
+- 0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x94,0x0c,
+- 0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x30,
+- 0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,
+- 0x10,0x00,0x10,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,
+- 0x0b,0x00,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x10,0x00,0x10,0x00,0x54,0x04,
+- 0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00,
+- 0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,
+- 0x11,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,
+- 0xd2,0x08,0x11,0x04,0x10,0x00,0x14,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x10,0x00,
+- 0x10,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x10,0x00,0x15,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,
+- 0x10,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd4,0x0c,0x53,0x04,
+- 0x14,0x00,0x12,0x04,0x14,0x00,0x11,0x00,0x53,0x04,0x14,0x00,0x52,0x04,0x14,0x00,
+- 0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0xe3,0xb9,0x01,0xd2,0xac,0xd1,
+- 0x68,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x14,0x53,0x04,0x08,0x00,0x52,
+- 0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x08,0x00,0xcf,
+- 0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00,0x51,
+- 0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xd4,0x14,0x53,0x04,0x09,0x00,0x52,
+- 0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0xd3,0x10,0x92,
+- 0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0a,0x00,0x0a,0x00,0x09,0x00,0x52,0x04,0x0a,
+- 0x00,0x11,0x04,0x0a,0x00,0x0b,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0x55,
+- 0x04,0x08,0x00,0xd4,0x1c,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04,0x08,0x00,0x10,
+- 0x04,0x08,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd3,
+- 0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0d,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,
+- 0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd1,0x6c,0xd0,0x2a,0xcf,0x86,0x55,
+- 0x04,0x08,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,
+- 0x04,0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d,
+- 0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c,0xd3,0x0c,0x52,
+- 0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,
+- 0x00,0x10,0x04,0x00,0x00,0x08,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,
+- 0x04,0x00,0x00,0x0c,0x09,0xd0,0x5a,0xcf,0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x93,
+- 0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x00,
+- 0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,
+- 0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,
+- 0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,
+- 0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xcf,
+- 0x86,0x95,0x40,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,
+- 0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,
+- 0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,
+- 0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,
+- 0x00,0x0a,0xe6,0xd2,0x9c,0xd1,0x68,0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x08,
+- 0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x0a,0x00,0x11,0x04,0x08,0x00,0x0a,0x00,0x54,
+- 0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0d,
+- 0x00,0x0d,0x00,0x12,0x04,0x0d,0x00,0x10,0x00,0xcf,0x86,0x95,0x30,0x94,0x2c,0xd3,
+- 0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x12,0x00,0x91,0x08,0x10,
+- 0x04,0x12,0x00,0x13,0x00,0x13,0x00,0xd2,0x08,0x11,0x04,0x13,0x00,0x14,0x00,0x51,
+- 0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,
+- 0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51,0x04,0x04,
+- 0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,
+- 0x00,0x54,0x04,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd1,
+- 0x06,0xcf,0x06,0x04,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0xd5,0x14,0x54,
+- 0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x00,
+- 0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x04,0x00,0x12,0x04,0x04,0x00,0x00,0x00,0xcf,
+- 0x86,0xe5,0xa6,0x05,0xe4,0x9f,0x05,0xe3,0x96,0x04,0xe2,0xe4,0x03,0xe1,0xc0,0x01,
+- 0xd0,0x3e,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c,
+- 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0xda,0x01,0xe4,0x91,0x08,0x10,0x04,0x01,0xe8,
+- 0x01,0xde,0x01,0xe0,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,
+- 0x04,0x00,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x04,0x00,0x01,0x00,0xcf,0x86,
+- 0xd5,0xaa,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,
+- 0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,
+- 0x8b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x8d,0xe3,0x82,
+- 0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,
+- 0x8f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x91,0xe3,0x82,
+- 0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x93,0xe3,0x82,0x99,
+- 0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x95,0xe3,0x82,0x99,0x00,0x01,0x00,
+- 0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x97,0xe3,0x82,0x99,0x00,0x01,
+- 0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x99,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,
+- 0x10,0x0b,0x01,0xff,0xe3,0x81,0x9b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,
+- 0xff,0xe3,0x81,0x9d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd4,0x53,0xd3,0x3c,0xd2,0x1e,
+- 0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,
+- 0x0b,0x01,0xff,0xe3,0x81,0xa1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x04,
+- 0x01,0x00,0x01,0xff,0xe3,0x81,0xa4,0xe3,0x82,0x99,0x00,0x10,0x04,0x01,0x00,0x01,
+- 0xff,0xe3,0x81,0xa6,0xe3,0x82,0x99,0x00,0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,
+- 0x01,0xff,0xe3,0x81,0xa8,0xe3,0x82,0x99,0x00,0x01,0x00,0x01,0x00,0xd3,0x4a,0xd2,
+- 0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x99,0x00,0x01,0xff,
+- 0xe3,0x81,0xaf,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xb2,
+- 0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb2,0xe3,0x82,0x9a,
+- 0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x99,0x00,0x01,0xff,
+- 0xe3,0x81,0xb5,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,
+- 0xff,0xe3,0x81,0xb8,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb8,0xe3,
+- 0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82,
+- 0x99,0x00,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82,0x9a,0x00,0x01,0x00,0xd0,0xee,0xcf,
+- 0x86,0xd5,0x42,0x54,0x04,0x01,0x00,0xd3,0x1b,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,
+- 0x0b,0x01,0xff,0xe3,0x81,0x86,0xe3,0x82,0x99,0x00,0x06,0x00,0x10,0x04,0x06,0x00,
+- 0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x08,0x10,0x04,0x01,0x08,
+- 0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0x9d,0xe3,0x82,0x99,
+- 0x00,0x06,0x00,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x01,
+- 0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,
+- 0x82,0xab,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xad,0xe3,
+- 0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,
+- 0x82,0xaf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb1,0xe3,
+- 0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb3,0xe3,0x82,
+- 0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb5,0xe3,0x82,0x99,0x00,0x01,
+- 0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb7,0xe3,0x82,0x99,0x00,
+- 0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb9,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,
+- 0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbb,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,
+- 0x01,0xff,0xe3,0x82,0xbd,0xe3,0x82,0x99,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd5,0xd4,
+- 0x53,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbf,0xe3,0x82,
+- 0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x81,0xe3,0x82,0x99,0x00,0x01,
+- 0x00,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x84,0xe3,0x82,0x99,0x00,
+- 0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x86,0xe3,0x82,0x99,0x00,0x92,0x13,0x91,
+- 0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x88,0xe3,0x82,0x99,0x00,0x01,0x00,
+- 0x01,0x00,0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x83,0x8f,0xe3,
+- 0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x8f,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00,
+- 0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,
+- 0x83,0x92,0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x95,0xe3,
+- 0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x95,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f,
+- 0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01,
+- 0xff,0xe3,0x83,0x98,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,
+- 0xe3,0x83,0x9b,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x9b,0xe3,0x82,0x9a,0x00,
+- 0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x22,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,
+- 0x01,0xff,0xe3,0x82,0xa6,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x01,
+- 0xff,0xe3,0x83,0xaf,0xe3,0x82,0x99,0x00,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,
+- 0xe3,0x83,0xb0,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0xb1,0xe3,0x82,0x99,0x00,
+- 0x10,0x0b,0x01,0xff,0xe3,0x83,0xb2,0xe3,0x82,0x99,0x00,0x01,0x00,0x51,0x04,0x01,
+- 0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xbd,0xe3,0x82,0x99,0x00,0x06,0x00,0xd1,0x65,
+- 0xd0,0x46,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x91,0x08,
+- 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x18,0x53,0x04,
+- 0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04,
+- 0x13,0x00,0x14,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,
+- 0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x15,0x93,0x11,
+- 0x52,0x04,0x01,0x00,0x91,0x09,0x10,0x05,0x01,0xff,0x00,0x01,0x00,0x01,0x00,0x01,
+- 0x00,0x01,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x52,
+- 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x54,
+- 0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,
+- 0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x08,0x00,0x0a,0x00,0x94,
+- 0x0c,0x93,0x08,0x12,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0xd2,0xa4,0xd1,
+- 0x5c,0xd0,0x22,0xcf,0x86,0x95,0x1c,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,
+- 0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x10,0x04,0x07,0x00,0x00,
+- 0x00,0x01,0x00,0xcf,0x86,0xd5,0x20,0xd4,0x0c,0x93,0x08,0x12,0x04,0x01,0x00,0x0b,
+- 0x00,0x0b,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x06,0x00,0x06,
+- 0x00,0x06,0x00,0x06,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
+- 0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,
+- 0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,
+- 0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0xd5,0x10,0x94,0x0c,0x53,
+- 0x04,0x01,0x00,0x12,0x04,0x01,0x00,0x07,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,
+- 0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x16,
+- 0x00,0xd1,0x30,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,
+- 0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+- 0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x01,0x00,0x01,
+- 0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x01,0x00,0x53,
+- 0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x07,0x00,0x54,0x04,0x01,
+- 0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+- 0x00,0x07,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd1,0x48,0xd0,0x40,0xcf,
+- 0x86,0xd5,0x06,0xcf,0x06,0x04,0x00,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x2c,0xd2,
+- 0x06,0xcf,0x06,0x04,0x00,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x1a,0xcf,0x86,0x55,
+- 0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,
+- 0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x07,0x00,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf,
+- 0x06,0x01,0x00,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe2,0x71,0x05,0xd1,0x8c,0xd0,0x08,
+- 0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xd4,0x06,
+- 0xcf,0x06,0x01,0x00,0xd3,0x06,0xcf,0x06,0x01,0x00,0xd2,0x06,0xcf,0x06,0x01,0x00,
+- 0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x10,
+- 0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x08,0x00,0x08,0x00,0x53,0x04,
+- 0x08,0x00,0x12,0x04,0x08,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0xd3,0x08,
+- 0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,
+- 0x11,0x00,0x11,0x00,0x93,0x0c,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x13,0x00,
+- 0x13,0x00,0x94,0x14,0x53,0x04,0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,
+- 0x13,0x00,0x14,0x00,0x14,0x00,0x00,0x00,0xe0,0xdb,0x04,0xcf,0x86,0xe5,0xdf,0x01,
+- 0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x74,0xd2,0x6e,0xd1,0x06,0xcf,0x06,0x04,0x00,
+- 0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,
+- 0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c,
+- 0x92,0x08,0x11,0x04,0x04,0x00,0x06,0x00,0x04,0x00,0x04,0x00,0x93,0x10,0x52,0x04,
+- 0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,
+- 0x95,0x24,0x94,0x20,0x93,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,
+- 0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x10,0x04,0x04,0x00,0x00,0x00,
+- 0x00,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x06,0x0a,0x00,0xd2,0x84,0xd1,0x4c,0xd0,0x16,
+- 0xcf,0x86,0x55,0x04,0x0a,0x00,0x94,0x0c,0x53,0x04,0x0a,0x00,0x12,0x04,0x0a,0x00,
+- 0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x1c,0xd3,0x0c,0x92,0x08,
+- 0x11,0x04,0x0c,0x00,0x0a,0x00,0x0a,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,
+- 0x10,0x04,0x0a,0x00,0x0a,0xe6,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0d,0xe6,0x52,0x04,
+- 0x0d,0xe6,0x11,0x04,0x0a,0xe6,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,
+- 0x0a,0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,
+- 0x11,0xe6,0x0d,0xe6,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,
+- 0x93,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x00,0x00,0xd1,0x40,
+- 0xd0,0x3a,0xcf,0x86,0xd5,0x24,0x54,0x04,0x08,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,
+- 0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,
+- 0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,
+- 0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x06,0x0a,0x00,0xd0,0x5e,
+- 0xcf,0x86,0xd5,0x28,0xd4,0x18,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0xd1,0x08,
+- 0x10,0x04,0x0a,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x11,0x00,0x93,0x0c,0x92,0x08,
+- 0x11,0x04,0x0c,0x00,0x0d,0x00,0x10,0x00,0x10,0x00,0xd4,0x1c,0x53,0x04,0x0c,0x00,
+- 0xd2,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0d,0x00,0x10,0x00,0x51,0x04,0x10,0x00,
+- 0x10,0x04,0x12,0x00,0x14,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00,
+- 0x11,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x1c,
+- 0x94,0x18,0x93,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x51,0x04,0x15,0x00,
+- 0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0xd3,0x10,
+- 0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x92,0x0c,
+- 0x51,0x04,0x0d,0x00,0x10,0x04,0x0c,0x00,0x0a,0x00,0x0a,0x00,0xe4,0xf2,0x02,0xe3,
+- 0x65,0x01,0xd2,0x98,0xd1,0x48,0xd0,0x36,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,
+- 0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x09,0x08,0x00,0x08,0x00,
+- 0x08,0x00,0xd4,0x0c,0x53,0x04,0x08,0x00,0x12,0x04,0x08,0x00,0x00,0x00,0x53,0x04,
+- 0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,
+- 0x09,0x00,0x54,0x04,0x09,0x00,0x13,0x04,0x09,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,
+- 0x0a,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,
+- 0x10,0x04,0x0a,0x09,0x12,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,
+- 0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,
+- 0x54,0x04,0x0b,0xe6,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,
+- 0x52,0x04,0x0b,0x00,0x11,0x04,0x11,0x00,0x14,0x00,0xd1,0x60,0xd0,0x22,0xcf,0x86,
+- 0x55,0x04,0x0a,0x00,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,
+- 0x10,0x04,0x0a,0x00,0x0a,0xdc,0x11,0x04,0x0a,0xdc,0x0a,0x00,0x0a,0x00,0xcf,0x86,
+- 0xd5,0x24,0x54,0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,
+- 0x0a,0x00,0x0a,0x09,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
+- 0x00,0x00,0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,
+- 0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,
+- 0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,
+- 0x0b,0x00,0x0b,0x07,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x20,0xd3,0x10,
+- 0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x52,0x04,
+- 0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x53,0x04,0x0b,0x00,
+- 0xd2,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x0b,0x00,0x54,0x04,
+- 0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,
+- 0x10,0x00,0x00,0x00,0xd2,0xd0,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0a,0x00,
+- 0x54,0x04,0x0a,0x00,0x93,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,
+- 0x0a,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0a,0x00,
+- 0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,
+- 0x11,0x04,0x0a,0x00,0x00,0x00,0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,
+- 0x12,0x04,0x0b,0x00,0x10,0x00,0xd0,0x3a,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,
+- 0x0b,0x00,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0xe6,
+- 0xd1,0x08,0x10,0x04,0x0b,0xdc,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd2,0x0c,
+- 0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6,
+- 0xcf,0x86,0xd5,0x2c,0xd4,0x18,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,
+- 0x0b,0xe6,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x00,0x00,
+- 0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04,
+- 0x0d,0x00,0x93,0x10,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,
+- 0x00,0x00,0x00,0x00,0xd1,0x8c,0xd0,0x72,0xcf,0x86,0xd5,0x4c,0xd4,0x30,0xd3,0x18,
+- 0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,
+- 0x10,0x04,0x0c,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,
+- 0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,
+- 0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,
+- 0x0c,0x00,0x00,0x00,0x00,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04,
+- 0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,
+- 0x10,0x04,0x0c,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x10,
+- 0x93,0x0c,0x52,0x04,0x11,0x00,0x11,0x04,0x10,0x00,0x15,0x00,0x00,0x00,0x11,0x00,
+- 0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04,
+- 0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x00,0x00,
+- 0x53,0x04,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,
+- 0x02,0xff,0xff,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd1,0x76,0xd0,0x09,0xcf,0x86,
+- 0xcf,0x06,0x02,0xff,0xff,0xcf,0x86,0x85,0xd4,0x07,0xcf,0x06,0x02,0xff,0xff,0xd3,
+- 0x07,0xcf,0x06,0x02,0xff,0xff,0xd2,0x07,0xcf,0x06,0x02,0xff,0xff,0xd1,0x07,0xcf,
+- 0x06,0x02,0xff,0xff,0xd0,0x18,0xcf,0x86,0x55,0x05,0x02,0xff,0xff,0x94,0x0d,0x93,
+- 0x09,0x12,0x05,0x02,0xff,0xff,0x00,0x00,0x00,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x24,
+- 0x94,0x20,0xd3,0x10,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,
+- 0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,
+- 0x0b,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x00,0x00,
+- 0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,
+- 0xe4,0x9c,0x10,0xe3,0x16,0x08,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x08,0x04,0xe0,
+- 0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0x10,0x08,0x01,
+- 0xff,0xe8,0xbb,0x8a,0x00,0x01,0xff,0xe8,0xb3,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0xe6,0xbb,0x91,0x00,0x01,0xff,0xe4,0xb8,0xb2,0x00,0x10,0x08,0x01,0xff,0xe5,
+- 0x8f,0xa5,0x00,0x01,0xff,0xe9,0xbe,0x9c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0xe9,0xbe,0x9c,0x00,0x01,0xff,0xe5,0xa5,0x91,0x00,0x10,0x08,0x01,0xff,0xe9,
+- 0x87,0x91,0x00,0x01,0xff,0xe5,0x96,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,
+- 0xa5,0x88,0x00,0x01,0xff,0xe6,0x87,0xb6,0x00,0x10,0x08,0x01,0xff,0xe7,0x99,0xa9,
+- 0x00,0x01,0xff,0xe7,0xbe,0x85,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0xe8,0x98,0xbf,0x00,0x01,0xff,0xe8,0x9e,0xba,0x00,0x10,0x08,0x01,0xff,0xe8,
+- 0xa3,0xb8,0x00,0x01,0xff,0xe9,0x82,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,
+- 0xa8,0x82,0x00,0x01,0xff,0xe6,0xb4,0x9b,0x00,0x10,0x08,0x01,0xff,0xe7,0x83,0x99,
+- 0x00,0x01,0xff,0xe7,0x8f,0x9e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,
+- 0x90,0xbd,0x00,0x01,0xff,0xe9,0x85,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0xa7,0xb1,
+- 0x00,0x01,0xff,0xe4,0xba,0x82,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x8d,0xb5,
+- 0x00,0x01,0xff,0xe6,0xac,0x84,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x9b,0x00,0x01,
+- 0xff,0xe8,0x98,0xad,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0xe9,0xb8,0x9e,0x00,0x01,0xff,0xe5,0xb5,0x90,0x00,0x10,0x08,0x01,0xff,0xe6,
+- 0xbf,0xab,0x00,0x01,0xff,0xe8,0x97,0x8d,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,
+- 0xa5,0xa4,0x00,0x01,0xff,0xe6,0x8b,0x89,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0x98,
+- 0x00,0x01,0xff,0xe8,0xa0,0x9f,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,
+- 0xbb,0x8a,0x00,0x01,0xff,0xe6,0x9c,0x97,0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0xaa,
+- 0x00,0x01,0xff,0xe7,0x8b,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x83,0x8e,
+- 0x00,0x01,0xff,0xe4,0xbe,0x86,0x00,0x10,0x08,0x01,0xff,0xe5,0x86,0xb7,0x00,0x01,
+- 0xff,0xe5,0x8b,0x9e,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,
+- 0x93,0x84,0x00,0x01,0xff,0xe6,0xab,0x93,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x90,
+- 0x00,0x01,0xff,0xe7,0x9b,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x80,0x81,
+- 0x00,0x01,0xff,0xe8,0x98,0x86,0x00,0x10,0x08,0x01,0xff,0xe8,0x99,0x9c,0x00,0x01,
+- 0xff,0xe8,0xb7,0xaf,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9c,0xb2,
+- 0x00,0x01,0xff,0xe9,0xad,0xaf,0x00,0x10,0x08,0x01,0xff,0xe9,0xb7,0xba,0x00,0x01,
+- 0xff,0xe7,0xa2,0x8c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa5,0xbf,0x00,0x01,
+- 0xff,0xe7,0xb6,0xa0,0x00,0x10,0x08,0x01,0xff,0xe8,0x8f,0x89,0x00,0x01,0xff,0xe9,
+- 0x8c,0x84,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0x10,0x08,
+- 0x01,0xff,0xe5,0xa3,0x9f,0x00,0x01,0xff,0xe5,0xbc,0x84,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe7,0xb1,0xa0,0x00,0x01,0xff,0xe8,0x81,0xbe,0x00,0x10,0x08,0x01,0xff,
+- 0xe7,0x89,0xa2,0x00,0x01,0xff,0xe7,0xa3,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe8,0xb3,0x82,0x00,0x01,0xff,0xe9,0x9b,0xb7,0x00,0x10,0x08,0x01,0xff,
+- 0xe5,0xa3,0x98,0x00,0x01,0xff,0xe5,0xb1,0xa2,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe6,0xa8,0x93,0x00,0x01,0xff,0xe6,0xb7,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc,
+- 0x8f,0x00,0x01,0xff,0xe7,0xb4,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9,0x99,0x8b,0x00,0x10,0x08,0x01,0xff,
+- 0xe5,0x8b,0x92,0x00,0x01,0xff,0xe8,0x82,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe5,0x87,0x9c,0x00,0x01,0xff,0xe5,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe7,0xa8,
+- 0x9c,0x00,0x01,0xff,0xe7,0xb6,0xbe,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe8,0x8f,0xb1,0x00,0x01,0xff,0xe9,0x99,0xb5,0x00,0x10,0x08,0x01,0xff,0xe8,0xae,
+- 0x80,0x00,0x01,0xff,0xe6,0x8b,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,
+- 0x82,0x00,0x01,0xff,0xe8,0xab,0xbe,0x00,0x10,0x08,0x01,0xff,0xe4,0xb8,0xb9,0x00,
+- 0x01,0xff,0xe5,0xaf,0xa7,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe6,0x80,0x92,0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0x10,0x08,0x01,0xff,
+- 0xe7,0x95,0xb0,0x00,0x01,0xff,0xe5,0x8c,0x97,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe7,0xa3,0xbb,0x00,0x01,0xff,0xe4,0xbe,0xbf,0x00,0x10,0x08,0x01,0xff,0xe5,0xbe,
+- 0xa9,0x00,0x01,0xff,0xe4,0xb8,0x8d,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe6,0xb3,0x8c,0x00,0x01,0xff,0xe6,0x95,0xb8,0x00,0x10,0x08,0x01,0xff,0xe7,0xb4,
+- 0xa2,0x00,0x01,0xff,0xe5,0x8f,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa1,
+- 0x9e,0x00,0x01,0xff,0xe7,0x9c,0x81,0x00,0x10,0x08,0x01,0xff,0xe8,0x91,0x89,0x00,
+- 0x01,0xff,0xe8,0xaa,0xaa,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe6,0xae,0xba,0x00,0x01,0xff,0xe8,0xbe,0xb0,0x00,0x10,0x08,0x01,0xff,0xe6,0xb2,
+- 0x88,0x00,0x01,0xff,0xe6,0x8b,0xbe,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8b,
+- 0xa5,0x00,0x01,0xff,0xe6,0x8e,0xa0,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xa5,0x00,
+- 0x01,0xff,0xe4,0xba,0xae,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,
+- 0xa9,0x00,0x01,0xff,0xe5,0x87,0x89,0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0x81,0x00,
+- 0x01,0xff,0xe7,0xb3,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x89,0xaf,0x00,
+- 0x01,0xff,0xe8,0xab,0x92,0x00,0x10,0x08,0x01,0xff,0xe9,0x87,0x8f,0x00,0x01,0xff,
+- 0xe5,0x8b,0xb5,0x00,0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,
+- 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x91,0x82,0x00,0x01,0xff,0xe5,0xa5,
+- 0xb3,0x00,0x10,0x08,0x01,0xff,0xe5,0xbb,0xac,0x00,0x01,0xff,0xe6,0x97,0x85,0x00,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xbf,0xbe,0x00,0x01,0xff,0xe7,0xa4,0xaa,0x00,
+- 0x10,0x08,0x01,0xff,0xe9,0x96,0xad,0x00,0x01,0xff,0xe9,0xa9,0xaa,0x00,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xba,0x97,0x00,0x01,0xff,0xe9,0xbb,0x8e,0x00,
+- 0x10,0x08,0x01,0xff,0xe5,0x8a,0x9b,0x00,0x01,0xff,0xe6,0x9b,0x86,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe6,0xad,0xb7,0x00,0x01,0xff,0xe8,0xbd,0xa2,0x00,0x10,0x08,
+- 0x01,0xff,0xe5,0xb9,0xb4,0x00,0x01,0xff,0xe6,0x86,0x90,0x00,0xd3,0x40,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x88,0x80,0x00,0x01,0xff,0xe6,0x92,0x9a,0x00,
+- 0x10,0x08,0x01,0xff,0xe6,0xbc,0xa3,0x00,0x01,0xff,0xe7,0x85,0x89,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe7,0x92,0x89,0x00,0x01,0xff,0xe7,0xa7,0x8a,0x00,0x10,0x08,
+- 0x01,0xff,0xe7,0xb7,0xb4,0x00,0x01,0xff,0xe8,0x81,0xaf,0x00,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe8,0xbc,0xa6,0x00,0x01,0xff,0xe8,0x93,0xae,0x00,0x10,0x08,
+- 0x01,0xff,0xe9,0x80,0xa3,0x00,0x01,0xff,0xe9,0x8d,0x8a,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe5,0x88,0x97,0x00,0x01,0xff,0xe5,0x8a,0xa3,0x00,0x10,0x08,0x01,0xff,
+- 0xe5,0x92,0xbd,0x00,0x01,0xff,0xe7,0x83,0x88,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa3,0x82,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00,
+- 0x10,0x08,0x01,0xff,0xe5,0xbb,0x89,0x00,0x01,0xff,0xe5,0xbf,0xb5,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe6,0x8d,0xbb,0x00,0x01,0xff,0xe6,0xae,0xae,0x00,0x10,0x08,
+- 0x01,0xff,0xe7,0xb0,0xbe,0x00,0x01,0xff,0xe7,0x8d,0xb5,0x00,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe4,0xbb,0xa4,0x00,0x01,0xff,0xe5,0x9b,0xb9,0x00,0x10,0x08,
+- 0x01,0xff,0xe5,0xaf,0xa7,0x00,0x01,0xff,0xe5,0xb6,0xba,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe6,0x80,0x9c,0x00,0x01,0xff,0xe7,0x8e,0xb2,0x00,0x10,0x08,0x01,0xff,
+- 0xe7,0x91,0xa9,0x00,0x01,0xff,0xe7,0xbe,0x9a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe8,0x81,0x86,0x00,0x01,0xff,0xe9,0x88,0xb4,0x00,0x10,0x08,
+- 0x01,0xff,0xe9,0x9b,0xb6,0x00,0x01,0xff,0xe9,0x9d,0x88,0x00,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe9,0xa0,0x98,0x00,0x01,0xff,0xe4,0xbe,0x8b,0x00,0x10,0x08,0x01,0xff,
+- 0xe7,0xa6,0xae,0x00,0x01,0xff,0xe9,0x86,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe9,0x9a,0xb8,0x00,0x01,0xff,0xe6,0x83,0xa1,0x00,0x10,0x08,0x01,0xff,
+- 0xe4,0xba,0x86,0x00,0x01,0xff,0xe5,0x83,0x9a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe5,0xaf,0xae,0x00,0x01,0xff,0xe5,0xb0,0xbf,0x00,0x10,0x08,0x01,0xff,0xe6,0x96,
+- 0x99,0x00,0x01,0xff,0xe6,0xa8,0x82,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,
+- 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0x87,0x8e,0x00,0x01,0xff,0xe7,
+- 0x99,0x82,0x00,0x10,0x08,0x01,0xff,0xe8,0x93,0xbc,0x00,0x01,0xff,0xe9,0x81,0xbc,
+- 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xbe,0x8d,0x00,0x01,0xff,0xe6,0x9a,0x88,
+- 0x00,0x10,0x08,0x01,0xff,0xe9,0x98,0xae,0x00,0x01,0xff,0xe5,0x8a,0x89,0x00,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x9d,0xbb,0x00,0x01,0xff,0xe6,0x9f,0xb3,
+- 0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0x81,0x00,0x01,0xff,0xe6,0xba,0x9c,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe7,0x90,0x89,0x00,0x01,0xff,0xe7,0x95,0x99,0x00,0x10,
+- 0x08,0x01,0xff,0xe7,0xa1,0xab,0x00,0x01,0xff,0xe7,0xb4,0x90,0x00,0xd3,0x40,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa1,0x9e,0x00,0x01,0xff,0xe5,0x85,0xad,
+- 0x00,0x10,0x08,0x01,0xff,0xe6,0x88,0xae,0x00,0x01,0xff,0xe9,0x99,0xb8,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe5,0x80,0xab,0x00,0x01,0xff,0xe5,0xb4,0x99,0x00,0x10,
+- 0x08,0x01,0xff,0xe6,0xb7,0xaa,0x00,0x01,0xff,0xe8,0xbc,0xaa,0x00,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe5,0xbe,0x8b,0x00,0x01,0xff,0xe6,0x85,0x84,0x00,0x10,
+- 0x08,0x01,0xff,0xe6,0xa0,0x97,0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0xe9,0x9a,0x86,0x00,0x01,0xff,0xe5,0x88,0xa9,0x00,0x10,0x08,0x01,
+- 0xff,0xe5,0x90,0x8f,0x00,0x01,0xff,0xe5,0xb1,0xa5,0x00,0xd4,0x80,0xd3,0x40,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x98,0x93,0x00,0x01,0xff,0xe6,0x9d,0x8e,
+- 0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0xa8,0x00,0x01,0xff,0xe6,0xb3,0xa5,0x00,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe7,0x90,0x86,0x00,0x01,0xff,0xe7,0x97,0xa2,0x00,0x10,
+- 0x08,0x01,0xff,0xe7,0xbd,0xb9,0x00,0x01,0xff,0xe8,0xa3,0x8f,0x00,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe8,0xa3,0xa1,0x00,0x01,0xff,0xe9,0x87,0x8c,0x00,0x10,
+- 0x08,0x01,0xff,0xe9,0x9b,0xa2,0x00,0x01,0xff,0xe5,0x8c,0xbf,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0xe6,0xba,0xba,0x00,0x01,0xff,0xe5,0x90,0x9d,0x00,0x10,0x08,0x01,
+- 0xff,0xe7,0x87,0x90,0x00,0x01,0xff,0xe7,0x92,0x98,0x00,0xd3,0x40,0xd2,0x20,0xd1,
+- 0x10,0x10,0x08,0x01,0xff,0xe8,0x97,0xba,0x00,0x01,0xff,0xe9,0x9a,0xa3,0x00,0x10,
+- 0x08,0x01,0xff,0xe9,0xb1,0x97,0x00,0x01,0xff,0xe9,0xba,0x9f,0x00,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0xe6,0x9e,0x97,0x00,0x01,0xff,0xe6,0xb7,0x8b,0x00,0x10,0x08,0x01,
+- 0xff,0xe8,0x87,0xa8,0x00,0x01,0xff,0xe7,0xab,0x8b,0x00,0xd2,0x20,0xd1,0x10,0x10,
+- 0x08,0x01,0xff,0xe7,0xac,0xa0,0x00,0x01,0xff,0xe7,0xb2,0x92,0x00,0x10,0x08,0x01,
+- 0xff,0xe7,0x8b,0x80,0x00,0x01,0xff,0xe7,0x82,0x99,0x00,0xd1,0x10,0x10,0x08,0x01,
+- 0xff,0xe8,0xad,0x98,0x00,0x01,0xff,0xe4,0xbb,0x80,0x00,0x10,0x08,0x01,0xff,0xe8,
+- 0x8c,0xb6,0x00,0x01,0xff,0xe5,0x88,0xba,0x00,0xe2,0xad,0x06,0xe1,0xc4,0x03,0xe0,
+- 0xcb,0x01,0xcf,0x86,0xd5,0xe4,0xd4,0x74,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x01,0xff,0xe5,0x88,0x87,0x00,0x01,0xff,0xe5,0xba,0xa6,0x00,0x10,0x08,0x01,0xff,
+- 0xe6,0x8b,0x93,0x00,0x01,0xff,0xe7,0xb3,0x96,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe5,0xae,0x85,0x00,0x01,0xff,0xe6,0xb4,0x9e,0x00,0x10,0x08,0x01,0xff,0xe6,0x9a,
+- 0xb4,0x00,0x01,0xff,0xe8,0xbc,0xbb,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
+- 0xe8,0xa1,0x8c,0x00,0x01,0xff,0xe9,0x99,0x8d,0x00,0x10,0x08,0x01,0xff,0xe8,0xa6,
+- 0x8b,0x00,0x01,0xff,0xe5,0xbb,0x93,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,
+- 0x80,0x00,0x01,0xff,0xe5,0x97,0x80,0x00,0x01,0x00,0xd3,0x34,0xd2,0x18,0xd1,0x0c,
+- 0x10,0x08,0x01,0xff,0xe5,0xa1,0x9a,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe6,0x99,
+- 0xb4,0x00,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe5,0x87,0x9e,0x00,
+- 0x10,0x08,0x01,0xff,0xe7,0x8c,0xaa,0x00,0x01,0xff,0xe7,0x9b,0x8a,0x00,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa4,0xbc,0x00,0x01,0xff,0xe7,0xa5,0x9e,0x00,
+- 0x10,0x08,0x01,0xff,0xe7,0xa5,0xa5,0x00,0x01,0xff,0xe7,0xa6,0x8f,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe9,0x9d,0x96,0x00,0x01,0xff,0xe7,0xb2,0xbe,0x00,0x10,0x08,
+- 0x01,0xff,0xe7,0xbe,0xbd,0x00,0x01,0x00,0xd4,0x64,0xd3,0x30,0xd2,0x18,0xd1,0x0c,
+- 0x10,0x08,0x01,0xff,0xe8,0x98,0x92,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe8,0xab,
+- 0xb8,0x00,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe9,0x80,0xb8,0x00,
+- 0x10,0x08,0x01,0xff,0xe9,0x83,0xbd,0x00,0x01,0x00,0xd2,0x14,0x51,0x04,0x01,0x00,
+- 0x10,0x08,0x01,0xff,0xe9,0xa3,0xaf,0x00,0x01,0xff,0xe9,0xa3,0xbc,0x00,0xd1,0x10,
+- 0x10,0x08,0x01,0xff,0xe9,0xa4,0xa8,0x00,0x01,0xff,0xe9,0xb6,0xb4,0x00,0x10,0x08,
+- 0x0d,0xff,0xe9,0x83,0x9e,0x00,0x0d,0xff,0xe9,0x9a,0xb7,0x00,0xd3,0x40,0xd2,0x20,
+- 0xd1,0x10,0x10,0x08,0x06,0xff,0xe4,0xbe,0xae,0x00,0x06,0xff,0xe5,0x83,0xa7,0x00,
+- 0x10,0x08,0x06,0xff,0xe5,0x85,0x8d,0x00,0x06,0xff,0xe5,0x8b,0x89,0x00,0xd1,0x10,
+- 0x10,0x08,0x06,0xff,0xe5,0x8b,0xa4,0x00,0x06,0xff,0xe5,0x8d,0x91,0x00,0x10,0x08,
+- 0x06,0xff,0xe5,0x96,0x9d,0x00,0x06,0xff,0xe5,0x98,0x86,0x00,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x06,0xff,0xe5,0x99,0xa8,0x00,0x06,0xff,0xe5,0xa1,0x80,0x00,0x10,0x08,
+- 0x06,0xff,0xe5,0xa2,0xa8,0x00,0x06,0xff,0xe5,0xb1,0xa4,0x00,0xd1,0x10,0x10,0x08,
+- 0x06,0xff,0xe5,0xb1,0xae,0x00,0x06,0xff,0xe6,0x82,0x94,0x00,0x10,0x08,0x06,0xff,
+- 0xe6,0x85,0xa8,0x00,0x06,0xff,0xe6,0x86,0x8e,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,
+- 0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe6,0x87,0xb2,0x00,0x06,
+- 0xff,0xe6,0x95,0x8f,0x00,0x10,0x08,0x06,0xff,0xe6,0x97,0xa2,0x00,0x06,0xff,0xe6,
+- 0x9a,0x91,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe6,0xa2,0x85,0x00,0x06,0xff,0xe6,
+- 0xb5,0xb7,0x00,0x10,0x08,0x06,0xff,0xe6,0xb8,0x9a,0x00,0x06,0xff,0xe6,0xbc,0xa2,
+- 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0x85,0xae,0x00,0x06,0xff,0xe7,
+- 0x88,0xab,0x00,0x10,0x08,0x06,0xff,0xe7,0x90,0xa2,0x00,0x06,0xff,0xe7,0xa2,0x91,
+- 0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa4,0xbe,0x00,0x06,0xff,0xe7,0xa5,0x89,
+- 0x00,0x10,0x08,0x06,0xff,0xe7,0xa5,0x88,0x00,0x06,0xff,0xe7,0xa5,0x90,0x00,0xd3,
+- 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa5,0x96,0x00,0x06,0xff,0xe7,
+- 0xa5,0x9d,0x00,0x10,0x08,0x06,0xff,0xe7,0xa6,0x8d,0x00,0x06,0xff,0xe7,0xa6,0x8e,
+- 0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa9,0x80,0x00,0x06,0xff,0xe7,0xaa,0x81,
+- 0x00,0x10,0x08,0x06,0xff,0xe7,0xaf,0x80,0x00,0x06,0xff,0xe7,0xb7,0xb4,0x00,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xb8,0x89,0x00,0x06,0xff,0xe7,0xb9,0x81,
+- 0x00,0x10,0x08,0x06,0xff,0xe7,0xbd,0xb2,0x00,0x06,0xff,0xe8,0x80,0x85,0x00,0xd1,
+- 0x10,0x10,0x08,0x06,0xff,0xe8,0x87,0xad,0x00,0x06,0xff,0xe8,0x89,0xb9,0x00,0x10,
+- 0x08,0x06,0xff,0xe8,0x89,0xb9,0x00,0x06,0xff,0xe8,0x91,0x97,0x00,0xd4,0x75,0xd3,
+- 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8,0xa4,0x90,0x00,0x06,0xff,0xe8,
+- 0xa6,0x96,0x00,0x10,0x08,0x06,0xff,0xe8,0xac,0x81,0x00,0x06,0xff,0xe8,0xac,0xb9,
+- 0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8,0xb3,0x93,0x00,0x06,0xff,0xe8,0xb4,0x88,
+- 0x00,0x10,0x08,0x06,0xff,0xe8,0xbe,0xb6,0x00,0x06,0xff,0xe9,0x80,0xb8,0x00,0xd2,
+- 0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe9,0x9b,0xa3,0x00,0x06,0xff,0xe9,0x9f,0xbf,
+- 0x00,0x10,0x08,0x06,0xff,0xe9,0xa0,0xbb,0x00,0x0b,0xff,0xe6,0x81,0xb5,0x00,0x91,
+- 0x11,0x10,0x09,0x0b,0xff,0xf0,0xa4,0x8b,0xae,0x00,0x0b,0xff,0xe8,0x88,0x98,0x00,
+- 0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe4,0xb8,0xa6,0x00,
+- 0x08,0xff,0xe5,0x86,0xb5,0x00,0x10,0x08,0x08,0xff,0xe5,0x85,0xa8,0x00,0x08,0xff,
+- 0xe4,0xbe,0x80,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0x85,0x85,0x00,0x08,0xff,
+- 0xe5,0x86,0x80,0x00,0x10,0x08,0x08,0xff,0xe5,0x8b,0x87,0x00,0x08,0xff,0xe5,0x8b,
+- 0xba,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0x96,0x9d,0x00,0x08,0xff,
+- 0xe5,0x95,0x95,0x00,0x10,0x08,0x08,0xff,0xe5,0x96,0x99,0x00,0x08,0xff,0xe5,0x97,
+- 0xa2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xa1,0x9a,0x00,0x08,0xff,0xe5,0xa2,
+- 0xb3,0x00,0x10,0x08,0x08,0xff,0xe5,0xa5,0x84,0x00,0x08,0xff,0xe5,0xa5,0x94,0x00,
+- 0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x08,0xff,0xe5,0xa9,0xa2,0x00,0x08,0xff,0xe5,0xac,0xa8,0x00,0x10,0x08,
+- 0x08,0xff,0xe5,0xbb,0x92,0x00,0x08,0xff,0xe5,0xbb,0x99,0x00,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe5,0xbd,0xa9,0x00,0x08,0xff,0xe5,0xbe,0xad,0x00,0x10,0x08,0x08,0xff,
+- 0xe6,0x83,0x98,0x00,0x08,0xff,0xe6,0x85,0x8e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe6,0x84,0x88,0x00,0x08,0xff,0xe6,0x86,0x8e,0x00,0x10,0x08,0x08,0xff,
+- 0xe6,0x85,0xa0,0x00,0x08,0xff,0xe6,0x87,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe6,0x88,0xb4,0x00,0x08,0xff,0xe6,0x8f,0x84,0x00,0x10,0x08,0x08,0xff,0xe6,0x90,
+- 0x9c,0x00,0x08,0xff,0xe6,0x91,0x92,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe6,0x95,0x96,0x00,0x08,0xff,0xe6,0x99,0xb4,0x00,0x10,0x08,0x08,0xff,
+- 0xe6,0x9c,0x97,0x00,0x08,0xff,0xe6,0x9c,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe6,0x9d,0x96,0x00,0x08,0xff,0xe6,0xad,0xb9,0x00,0x10,0x08,0x08,0xff,0xe6,0xae,
+- 0xba,0x00,0x08,0xff,0xe6,0xb5,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe6,0xbb,0x9b,0x00,0x08,0xff,0xe6,0xbb,0x8b,0x00,0x10,0x08,0x08,0xff,0xe6,0xbc,
+- 0xa2,0x00,0x08,0xff,0xe7,0x80,0x9e,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x85,
+- 0xae,0x00,0x08,0xff,0xe7,0x9e,0xa7,0x00,0x10,0x08,0x08,0xff,0xe7,0x88,0xb5,0x00,
+- 0x08,0xff,0xe7,0x8a,0xaf,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe7,0x8c,0xaa,0x00,0x08,0xff,0xe7,0x91,0xb1,0x00,0x10,0x08,0x08,0xff,
+- 0xe7,0x94,0x86,0x00,0x08,0xff,0xe7,0x94,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe7,0x98,0x9d,0x00,0x08,0xff,0xe7,0x98,0x9f,0x00,0x10,0x08,0x08,0xff,0xe7,0x9b,
+- 0x8a,0x00,0x08,0xff,0xe7,0x9b,0x9b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe7,0x9b,0xb4,0x00,0x08,0xff,0xe7,0x9d,0x8a,0x00,0x10,0x08,0x08,0xff,0xe7,0x9d,
+- 0x80,0x00,0x08,0xff,0xe7,0xa3,0x8c,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xaa,
+- 0xb1,0x00,0x08,0xff,0xe7,0xaf,0x80,0x00,0x10,0x08,0x08,0xff,0xe7,0xb1,0xbb,0x00,
+- 0x08,0xff,0xe7,0xb5,0x9b,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe7,0xb7,0xb4,0x00,0x08,0xff,0xe7,0xbc,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0x80,
+- 0x85,0x00,0x08,0xff,0xe8,0x8d,0x92,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0x8f,
+- 0xaf,0x00,0x08,0xff,0xe8,0x9d,0xb9,0x00,0x10,0x08,0x08,0xff,0xe8,0xa5,0x81,0x00,
+- 0x08,0xff,0xe8,0xa6,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xa6,
+- 0x96,0x00,0x08,0xff,0xe8,0xaa,0xbf,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xb8,0x00,
+- 0x08,0xff,0xe8,0xab,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xac,0x81,0x00,
+- 0x08,0xff,0xe8,0xab,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xad,0x00,0x08,0xff,
+- 0xe8,0xac,0xb9,0x00,0xcf,0x86,0x95,0xde,0xd4,0x81,0xd3,0x40,0xd2,0x20,0xd1,0x10,
+- 0x10,0x08,0x08,0xff,0xe8,0xae,0x8a,0x00,0x08,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08,
+- 0x08,0xff,0xe8,0xbc,0xb8,0x00,0x08,0xff,0xe9,0x81,0xb2,0x00,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe9,0x86,0x99,0x00,0x08,0xff,0xe9,0x89,0xb6,0x00,0x10,0x08,0x08,0xff,
+- 0xe9,0x99,0xbc,0x00,0x08,0xff,0xe9,0x9b,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,
+- 0x08,0xff,0xe9,0x9d,0x96,0x00,0x08,0xff,0xe9,0x9f,0x9b,0x00,0x10,0x08,0x08,0xff,
+- 0xe9,0x9f,0xbf,0x00,0x08,0xff,0xe9,0xa0,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,
+- 0xe9,0xa0,0xbb,0x00,0x08,0xff,0xe9,0xac,0x92,0x00,0x10,0x08,0x08,0xff,0xe9,0xbe,
+- 0x9c,0x00,0x08,0xff,0xf0,0xa2,0xa1,0x8a,0x00,0xd3,0x45,0xd2,0x22,0xd1,0x12,0x10,
+- 0x09,0x08,0xff,0xf0,0xa2,0xa1,0x84,0x00,0x08,0xff,0xf0,0xa3,0x8f,0x95,0x00,0x10,
+- 0x08,0x08,0xff,0xe3,0xae,0x9d,0x00,0x08,0xff,0xe4,0x80,0x98,0x00,0xd1,0x11,0x10,
+- 0x08,0x08,0xff,0xe4,0x80,0xb9,0x00,0x08,0xff,0xf0,0xa5,0x89,0x89,0x00,0x10,0x09,
+- 0x08,0xff,0xf0,0xa5,0xb3,0x90,0x00,0x08,0xff,0xf0,0xa7,0xbb,0x93,0x00,0x92,0x14,
+- 0x91,0x10,0x10,0x08,0x08,0xff,0xe9,0xbd,0x83,0x00,0x08,0xff,0xe9,0xbe,0x8e,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0xe1,0x94,0x01,0xe0,0x08,0x01,0xcf,0x86,0xd5,0x42,
+- 0xd4,0x14,0x93,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
+- 0x00,0x00,0x00,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,
+- 0x01,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x00,0x00,0x04,0xff,
+- 0xd7,0x99,0xd6,0xb4,0x00,0x10,0x04,0x01,0x1a,0x01,0xff,0xd7,0xb2,0xd6,0xb7,0x00,
+- 0xd4,0x42,0x53,0x04,0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,
+- 0xd7,0xa9,0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd7,0x82,0x00,0xd1,0x16,0x10,0x0b,
+- 0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7,
+- 0x82,0x00,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xb7,0x00,0x01,0xff,0xd7,0x90,0xd6,
+- 0xb8,0x00,0xd3,0x43,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xbc,
+- 0x00,0x01,0xff,0xd7,0x91,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x92,0xd6,0xbc,
+- 0x00,0x01,0xff,0xd7,0x93,0xd6,0xbc,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x94,
+- 0xd6,0xbc,0x00,0x01,0xff,0xd7,0x95,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x96,
+- 0xd6,0xbc,0x00,0x00,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x98,0xd6,
+- 0xbc,0x00,0x01,0xff,0xd7,0x99,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x9a,0xd6,
+- 0xbc,0x00,0x01,0xff,0xd7,0x9b,0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,
+- 0x9c,0xd6,0xbc,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xd7,0x9e,0xd6,0xbc,0x00,0x00,
+- 0x00,0xcf,0x86,0x95,0x85,0x94,0x81,0xd3,0x3e,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,
+- 0xff,0xd7,0xa0,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa1,0xd6,0xbc,0x00,0x10,0x04,0x00,
+- 0x00,0x01,0xff,0xd7,0xa3,0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0xa4,
+- 0xd6,0xbc,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xd7,0xa6,0xd6,0xbc,0x00,0x01,0xff,
+- 0xd7,0xa7,0xd6,0xbc,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa8,0xd6,
+- 0xbc,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0xaa,0xd6,
+- 0xbc,0x00,0x01,0xff,0xd7,0x95,0xd6,0xb9,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,
+- 0x91,0xd6,0xbf,0x00,0x01,0xff,0xd7,0x9b,0xd6,0xbf,0x00,0x10,0x09,0x01,0xff,0xd7,
+- 0xa4,0xd6,0xbf,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,
+- 0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,
+- 0x0c,0x00,0x0c,0x00,0xcf,0x86,0x95,0x24,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,
+- 0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,
+- 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x5a,0xd2,0x06,
+- 0xcf,0x06,0x01,0x00,0xd1,0x14,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x08,
+- 0x14,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54,0x04,
+- 0x01,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+- 0x01,0x00,0xcf,0x86,0xd5,0x0c,0x94,0x08,0x13,0x04,0x01,0x00,0x00,0x00,0x05,0x00,
+- 0x54,0x04,0x05,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,
+- 0x06,0x00,0x07,0x00,0x00,0x00,0xd2,0xce,0xd1,0xa5,0xd0,0x37,0xcf,0x86,0xd5,0x15,
+- 0x54,0x05,0x06,0xff,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x00,
+- 0x00,0x00,0x00,0x94,0x1c,0xd3,0x10,0x52,0x04,0x01,0xe6,0x51,0x04,0x0a,0xe6,0x10,
+- 0x04,0x0a,0xe6,0x10,0xdc,0x52,0x04,0x10,0xdc,0x11,0x04,0x10,0xdc,0x11,0xe6,0x01,
+- 0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,
+- 0x04,0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0x92,0x0c,0x91,0x08,0x10,
+- 0x04,0x07,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,
+- 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd4,0x18,0xd3,0x10,0x52,
+- 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x12,0x04,0x01,
+- 0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,
+- 0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf,
+- 0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,
++ 0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,
++ 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,
++ 0x91,0x08,0x10,0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x3c,0xd4,0x28,
++ 0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
++ 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,
++ 0x01,0x00,0x01,0x09,0x00,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x91,0x08,
++ 0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,
++ 0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x07,0x00,0x00,0x00,0x00,0x00,
++ 0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x11,0x00,0x13,0x00,0x13,0x00,0xe1,0x24,
++ 0x01,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,
++ 0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,
+ 0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,
+- 0x00,0x01,0xff,0x00,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,
+- 0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+- 0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,
+- 0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x94,0x14,
+- 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
+- 0x01,0x00,0x01,0x00,0xd0,0x2f,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x15,0x93,0x11,
+- 0x92,0x0d,0x91,0x09,0x10,0x05,0x01,0xff,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,
+- 0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
+- 0x00,0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x18,0xd3,0x0c,0x92,0x08,0x11,0x04,0x00,
+- 0x00,0x01,0x00,0x01,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,
+- 0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x00,
+- 0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd4,0x20,0xd3,
+- 0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,
+- 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x53,0x05,0x00,
+- 0xff,0x00,0xd2,0x0d,0x91,0x09,0x10,0x05,0x00,0xff,0x00,0x04,0x00,0x04,0x00,0x91,
+- 0x08,0x10,0x04,0x03,0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0x46,0x3e,0xe1,0x1f,0x3b,
+- 0xe0,0x9c,0x39,0xcf,0x86,0xe5,0x40,0x26,0xc4,0xe3,0x16,0x14,0xe2,0xef,0x11,0xe1,
+- 0xd0,0x10,0xe0,0x60,0x07,0xcf,0x86,0xe5,0x53,0x03,0xe4,0x4c,0x02,0xe3,0x3d,0x01,
+- 0xd2,0x94,0xd1,0x70,0xd0,0x4a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x07,0x00,
+- 0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,
+- 0xd4,0x14,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,
+- 0x00,0x00,0x07,0x00,0x53,0x04,0x07,0x00,0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,
+- 0x07,0x00,0x00,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86,
+- 0x95,0x20,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,
+- 0x00,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,
+- 0x00,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x54,0x04,
+- 0x07,0x00,0x53,0x04,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,
+- 0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,
+- 0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x51,0x04,0x00,0x00,
+- 0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07,0x00,0x93,0x10,
+- 0x52,0x04,0x07,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,
+- 0xcf,0x06,0x08,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4,0x20,0x53,0x04,0x08,0x00,
+- 0xd2,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x10,0x00,0xd1,0x08,0x10,0x04,
+- 0x10,0x00,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x12,0x04,
+- 0x0a,0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,
+- 0x00,0x00,0x0a,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,
+- 0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x00,0x00,0xd2,0x5e,0xd1,0x06,0xcf,0x06,
+- 0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,
+- 0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,
+- 0xcf,0x86,0xd5,0x18,0x54,0x04,0x0a,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
+- 0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,
+- 0x91,0x08,0x10,0x04,0x10,0xdc,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04,
+- 0x10,0x00,0x12,0x04,0x10,0x00,0x00,0x00,0xd1,0x70,0xd0,0x36,0xcf,0x86,0xd5,0x18,
+- 0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x51,0x04,0x05,0x00,
+- 0x10,0x04,0x05,0x00,0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x05,0x00,0x00,0x00,
+- 0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x05,0x00,
+- 0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x05,0x00,0x92,0x0c,0x51,0x04,0x05,0x00,
+- 0x10,0x04,0x05,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x0c,
+- 0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x10,0xe6,0x92,0x0c,0x51,0x04,0x10,0xe6,
+- 0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,
+- 0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,
+- 0x00,0x00,0x07,0x00,0x08,0x00,0xcf,0x86,0x95,0x1c,0xd4,0x0c,0x93,0x08,0x12,0x04,
+- 0x08,0x00,0x00,0x00,0x08,0x00,0x93,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0xba,0xd2,0x80,0xd1,0x34,0xd0,0x1a,0xcf,0x86,
+- 0x55,0x04,0x05,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,
+- 0x07,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x53,0x04,0x05,0x00,
+- 0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd0,0x2a,
+- 0xcf,0x86,0xd5,0x14,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,
+- 0x11,0x04,0x07,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x07,0x00,0x92,0x08,0x11,0x04,
+- 0x07,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xcf,0x86,0xd5,0x10,0x54,0x04,0x12,0x00,
+- 0x93,0x08,0x12,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x54,0x04,0x12,0x00,0x53,0x04,
+- 0x12,0x00,0x12,0x04,0x12,0x00,0x00,0x00,0xd1,0x34,0xd0,0x12,0xcf,0x86,0x55,0x04,
+- 0x10,0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,
+- 0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x10,0x00,0x00,0x00,0x52,0x04,0x00,0x00,
+- 0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,
+- 0xd2,0x06,0xcf,0x06,0x10,0x00,0xd1,0x40,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,
+- 0x54,0x04,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,
+- 0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c,
+- 0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x08,0x13,0x04,
+- 0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4,0xce,0x02,0xe3,0x45,0x01,
+- 0xd2,0xd0,0xd1,0x70,0xd0,0x52,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3,0x0c,0x52,0x04,
+- 0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,
+- 0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07,0x00,0xd3,0x10,0x52,0x04,
+- 0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xd2,0x0c,0x91,0x08,
+- 0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x00,0x00,
+- 0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86,0x95,0x18,0x54,0x04,0x0b,0x00,0x93,0x10,
+- 0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,
+- 0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,
+- 0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14,
+- 0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,
+- 0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x11,0x00,0xd3,0x14,
+- 0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x04,0x11,0x00,
+- 0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x11,0x00,
+- 0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x1c,0x54,0x04,0x09,0x00,0x53,0x04,0x09,0x00,
+- 0xd2,0x08,0x11,0x04,0x09,0x00,0x0b,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,
+- 0x09,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,
+- 0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0xcf,0x06,0x00,0x00,
+- 0xd0,0x1a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0x53,0x04,0x0d,0x00,
+- 0x52,0x04,0x00,0x00,0x11,0x04,0x11,0x00,0x0d,0x00,0xcf,0x86,0x95,0x14,0x54,0x04,
+- 0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x11,0x00,0x11,0x00,0x11,0x00,
+- 0x11,0x00,0xd2,0xec,0xd1,0xa4,0xd0,0x76,0xcf,0x86,0xd5,0x48,0xd4,0x28,0xd3,0x14,
+- 0x52,0x04,0x08,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x10,0x04,0x08,0x00,
+- 0x00,0x00,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x08,0x00,0x08,0xdc,0x10,0x04,
+- 0x08,0x00,0x08,0xe6,0xd3,0x10,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x00,0x00,
+- 0x08,0x00,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00,
+- 0x08,0x00,0x54,0x04,0x08,0x00,0xd3,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x14,0x00,
+- 0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe6,0x08,0x01,0x10,0x04,0x08,0xdc,
+- 0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08,0x09,0xcf,0x86,0x95,0x28,
+- 0xd4,0x14,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,
+- 0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x08,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x10,0x00,
+- 0x00,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x24,0xd3,0x14,0x52,0x04,0x10,0x00,
+- 0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0xe6,0x10,0x04,0x10,0xdc,0x00,0x00,0x92,0x0c,
+- 0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x52,0x04,
+- 0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd1,0x54,
+- 0xd0,0x26,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52,0x04,
+- 0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
+- 0x0b,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x0b,0x00,0x93,0x0c,
+- 0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x0b,0x00,0x54,0x04,0x0b,0x00,
+- 0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,
+- 0x0b,0x00,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x54,0x04,0x10,0x00,0xd3,0x0c,0x92,0x08,
+- 0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
+- 0x10,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14,
+- 0x53,0x04,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,
+- 0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x96,0xd2,0x68,0xd1,0x24,0xd0,0x06,
+- 0xcf,0x06,0x0b,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x0b,0x00,0x92,0x0c,
+- 0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0xd0,0x1e,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0x93,0x10,0x92,0x0c,
+- 0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,
+- 0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x11,0x00,
+- 0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x11,0x00,
+- 0x11,0x00,0xd1,0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14,0x00,0xd4,0x0c,0x93,0x08,
+- 0x12,0x04,0x14,0x00,0x14,0xe6,0x00,0x00,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04,
+- 0x14,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a,
+- 0xd1,0x24,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,
+- 0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,
+- 0x0b,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x58,0xd0,0x12,0xcf,0x86,0x55,0x04,
+- 0x14,0x00,0x94,0x08,0x13,0x04,0x14,0x00,0x00,0x00,0x14,0x00,0xcf,0x86,0x95,0x40,
+- 0xd4,0x24,0xd3,0x0c,0x52,0x04,0x14,0x00,0x11,0x04,0x14,0x00,0x14,0xdc,0xd2,0x0c,
+- 0x51,0x04,0x14,0xe6,0x10,0x04,0x14,0xe6,0x14,0xdc,0x91,0x08,0x10,0x04,0x14,0xe6,
+- 0x14,0xdc,0x14,0xdc,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0xdc,0x14,0x00,
+- 0x14,0x00,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x15,0x00,
+- 0x93,0x10,0x52,0x04,0x15,0x00,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,
+- 0x00,0x00,0xcf,0x86,0xe5,0x0f,0x06,0xe4,0xf8,0x03,0xe3,0x02,0x02,0xd2,0xfb,0xd1,
+- 0x4c,0xd0,0x06,0xcf,0x06,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x1c,0xd3,0x10,0x52,
+- 0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x09,0x0c,0x00,0x52,0x04,0x0c,
+- 0x00,0x11,0x04,0x0c,0x00,0x00,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x0c,
+- 0x00,0x0c,0x00,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04,0x00,0x00,0x52,0x04,0x00,
+- 0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x09,0xd0,0x69,0xcf,0x86,0xd5,
+- 0x32,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x15,0x51,0x04,0x0b,0x00,0x10,
+- 0x0d,0x0b,0xff,0xf0,0x91,0x82,0x99,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x91,0x11,
+- 0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x9b,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x0b,
+- 0x00,0xd4,0x1d,0x53,0x04,0x0b,0x00,0x92,0x15,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,
+- 0x00,0x0b,0xff,0xf0,0x91,0x82,0xa5,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x53,0x04,
+- 0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x10,0x04,0x0b,0x07,
+- 0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3,0x0c,0x92,0x08,0x11,0x04,
+- 0x0b,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,
+- 0x14,0x00,0x00,0x00,0x0d,0x00,0xd4,0x14,0x53,0x04,0x0d,0x00,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x08,
+- 0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0xd1,0x96,0xd0,0x5c,0xcf,0x86,0xd5,0x18,
+- 0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x0d,0x00,
+- 0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd4,0x26,0x53,0x04,0x0d,0x00,0x52,0x04,0x0d,0x00,
+- 0x51,0x04,0x0d,0x00,0x10,0x0d,0x0d,0xff,0xf0,0x91,0x84,0xb1,0xf0,0x91,0x84,0xa7,
+- 0x00,0x0d,0xff,0xf0,0x91,0x84,0xb2,0xf0,0x91,0x84,0xa7,0x00,0x93,0x18,0xd2,0x0c,
+- 0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x0d,0x09,0x91,0x08,0x10,0x04,0x0d,0x09,
+- 0x00,0x00,0x0d,0x00,0x0d,0x00,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,
+- 0x0d,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x10,0x00,
+- 0x54,0x04,0x10,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,
+- 0x10,0x07,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0,0x06,
+- 0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x91,0x08,
+- 0x10,0x04,0x0d,0x09,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,
+- 0x0d,0x00,0x11,0x00,0x10,0x04,0x11,0x07,0x11,0x00,0x91,0x08,0x10,0x04,0x11,0x00,
+- 0x10,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04,
+- 0x10,0x00,0x11,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
+- 0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00,
+- 0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xc8,0xd1,0x48,
+- 0xd0,0x42,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,
+- 0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x54,0x04,0x10,0x00,
+- 0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0x09,0x10,0x04,
+- 0x10,0x07,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x12,0x00,
+- 0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x10,
+- 0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0xd2,0x0c,
+- 0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,
+- 0x00,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,
+- 0x10,0x04,0x00,0x00,0x11,0x00,0x94,0x10,0x53,0x04,0x11,0x00,0x92,0x08,0x11,0x04,
+- 0x11,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x18,
+- 0x53,0x04,0x10,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0x07,0x10,0x04,
+- 0x10,0x09,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,
+- 0x00,0x00,0x00,0x00,0xe1,0x27,0x01,0xd0,0x8a,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3,
+- 0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x10,0x00,0x10,0x00,0x91,0x08,0x10,
+- 0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,
+- 0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,
+- 0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0xd4,
+- 0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,
+- 0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,
+- 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10,
+- 0x00,0x10,0x04,0x00,0x00,0x14,0x07,0x91,0x08,0x10,0x04,0x10,0x07,0x10,0x00,0x10,
+- 0x00,0xcf,0x86,0xd5,0x6a,0xd4,0x42,0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,
+- 0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0xd2,0x19,0xd1,0x08,0x10,
+- 0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0xff,0xf0,0x91,0x8d,0x87,0xf0,
+- 0x91,0x8c,0xbe,0x00,0x91,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x8d,0x87,0xf0,0x91,
+- 0x8d,0x97,0x00,0x10,0x09,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,
+- 0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x52,
+- 0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd4,0x1c,0xd3,
+- 0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0xe6,0x52,0x04,0x10,0xe6,0x91,
+- 0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x10,0xe6,0x91,
+- 0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe3,
+- 0x30,0x01,0xd2,0xb7,0xd1,0x48,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x95,0x3c,
+- 0xd4,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x09,0x12,0x00,
+- 0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x07,0x12,0x00,0x12,0x00,0x53,0x04,0x12,0x00,
+- 0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x00,0x00,0x12,0x00,0xd1,0x08,0x10,0x04,
+- 0x00,0x00,0x12,0x00,0x10,0x04,0x14,0xe6,0x15,0x00,0x00,0x00,0xd0,0x45,0xcf,0x86,
+- 0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0xd2,0x15,0x51,0x04,
+- 0x10,0x00,0x10,0x04,0x10,0x00,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xba,
+- 0x00,0xd1,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xb0,0x00,
+- 0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xbd,0x00,0x10,
+- 0x00,0xcf,0x86,0x95,0x24,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,
+- 0x04,0x10,0x09,0x10,0x07,0x10,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11,
+- 0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,
+- 0x40,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x0c,0x52,0x04,0x10,
+- 0x00,0x11,0x04,0x10,0x00,0x00,0x00,0xd2,0x1e,0x51,0x04,0x10,0x00,0x10,0x0d,0x10,
+- 0xff,0xf0,0x91,0x96,0xb8,0xf0,0x91,0x96,0xaf,0x00,0x10,0xff,0xf0,0x91,0x96,0xb9,
+- 0xf0,0x91,0x96,0xaf,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10,0x09,0xcf,
+- 0x86,0x95,0x2c,0xd4,0x1c,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x07,0x10,
+- 0x00,0x10,0x00,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0x53,
+- 0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0xd2,
+- 0xa0,0xd1,0x5c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,
+- 0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10,
+- 0x09,0xcf,0x86,0xd5,0x24,0xd4,0x14,0x93,0x10,0x52,0x04,0x10,0x00,0x91,0x08,0x10,
+- 0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11,
+- 0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12,
+- 0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x2a,0xcf,
+- 0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0xd3,0x10,0x52,0x04,0x0d,0x00,0x51,
+- 0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x0d,0x07,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x53,0x04,0x0d,
+- 0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,
+- 0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x11,0x00,0x53,0x04,0x11,0x00,0xd2,
+- 0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00,
+- 0x00,0x11,0x00,0x11,0x00,0x94,0x14,0x53,0x04,0x11,0x00,0x92,0x0c,0x51,0x04,0x11,
+- 0x00,0x10,0x04,0x11,0x00,0x11,0x09,0x00,0x00,0x11,0x00,0xcf,0x06,0x00,0x00,0xcf,
+- 0x06,0x00,0x00,0xe4,0x59,0x01,0xd3,0xb2,0xd2,0x5c,0xd1,0x28,0xd0,0x22,0xcf,0x86,
+- 0x55,0x04,0x14,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14,0x00,0x92,0x10,0xd1,0x08,
+- 0x10,0x04,0x14,0x00,0x14,0x09,0x10,0x04,0x14,0x07,0x14,0x00,0x00,0x00,0xcf,0x06,
+- 0x00,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,
+- 0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,
+- 0x10,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
+- 0x00,0x00,0x10,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,
+- 0x00,0x00,0x94,0x10,0x53,0x04,0x15,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00,
+- 0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x15,0x00,0x53,0x04,0x15,0x00,
+- 0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x94,0x1c,0x93,0x18,0xd2,0x0c,
+- 0x91,0x08,0x10,0x04,0x15,0x09,0x15,0x00,0x15,0x00,0x91,0x08,0x10,0x04,0x15,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1,0x3c,0xd0,0x1e,0xcf,0x86,
+- 0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x93,0x10,0x52,0x04,0x13,0x00,0x91,0x08,
+- 0x10,0x04,0x13,0x09,0x13,0x00,0x13,0x00,0x13,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,
+- 0x93,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x13,0x09,
+- 0x00,0x00,0x13,0x00,0x13,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4,0x10,0x93,0x0c,
+- 0x52,0x04,0x13,0x00,0x11,0x04,0x15,0x00,0x13,0x00,0x13,0x00,0x53,0x04,0x13,0x00,
+- 0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x13,0x09,0x13,0x00,0x91,0x08,0x10,0x04,
+- 0x13,0x00,0x14,0x00,0x13,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x13,0x00,
+- 0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,
+- 0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
+- 0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe3,0xa9,0x01,0xd2,
+- 0xb0,0xd1,0x6c,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92,
+- 0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x54,
+- 0x04,0x12,0x00,0xd3,0x10,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,
+- 0x00,0x00,0x00,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x12,
+- 0x09,0xcf,0x86,0xd5,0x14,0x94,0x10,0x93,0x0c,0x52,0x04,0x12,0x00,0x11,0x04,0x12,
+- 0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x94,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12,
+- 0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xd0,0x3e,0xcf,
+- 0x86,0xd5,0x14,0x54,0x04,0x12,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x12,
+- 0x00,0x12,0x00,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,
+- 0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x93,0x10,0x52,0x04,0x12,0x00,0x51,
+- 0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,
+- 0xa0,0xd0,0x52,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04,0x13,0x00,0x51,
+- 0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,
+- 0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x54,0x04,0x13,0x00,0xd3,0x10,0x52,
+- 0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0xd2,0x0c,0x51,
+- 0x04,0x00,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x00,
+- 0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x51,0x04,0x13,
+- 0x00,0x10,0x04,0x13,0x07,0x13,0x00,0x11,0x04,0x13,0x09,0x13,0x00,0x00,0x00,0x53,
+- 0x04,0x13,0x00,0x92,0x08,0x11,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x94,0x20,0xd3,
+- 0x10,0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x92,
+- 0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd0,
+- 0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x14,0x53,0x04,0x14,0x00,0x52,0x04,0x14,0x00,0x51,
+- 0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x14,
+- 0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x14,
+- 0x09,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x94,
+- 0x10,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,
+- 0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14,
+- 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,
+- 0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x15,
+- 0x00,0x54,0x04,0x15,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x15,0x00,0x00,0x00,0x00,
+- 0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0xd0,
+- 0xca,0xcf,0x86,0xd5,0xc2,0xd4,0x54,0xd3,0x06,0xcf,0x06,0x09,0x00,0xd2,0x06,0xcf,
+- 0x06,0x09,0x00,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09,
+- 0x00,0x94,0x14,0x53,0x04,0x09,0x00,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10,
+- 0x04,0x09,0x00,0x10,0x00,0x10,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x10,
+- 0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x11,0x00,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x68,0xd2,0x46,0xd1,0x40,0xd0,
+- 0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x20,0xd3,0x10,0x92,
+- 0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10,
+- 0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x09,
+- 0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x11,
+- 0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x95,0x10,0x94,0x0c,0x93,
+- 0x08,0x12,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,
+- 0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x4c,0xd4,0x06,0xcf,
+- 0x06,0x0b,0x00,0xd3,0x40,0xd2,0x3a,0xd1,0x34,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0b,
+- 0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,
+- 0x04,0x0b,0x00,0x00,0x00,0x53,0x04,0x15,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,
++ 0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,
++ 0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,
++ 0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,
++ 0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,
++ 0x00,0x07,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,
++ 0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x45,0xd3,0x14,0x52,
++ 0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,
++ 0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x96,0x00,
++ 0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xad,0x87,0xe0,0xac,0xbe,0x00,0x91,
++ 0x0f,0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x97,0x00,0x01,0x09,0x00,0x00,
++ 0xd3,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00,
++ 0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xac,0xa1,0xe0,0xac,0xbc,0x00,0x01,0xff,0xe0,
++ 0xac,0xa2,0xe0,0xac,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd4,0x14,0x93,0x10,
++ 0xd2,0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,
++ 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x0c,0x00,0x0c,0x00,
++ 0x00,0x00,0xd0,0xb1,0xcf,0x86,0xd5,0x63,0xd4,0x28,0xd3,0x14,0xd2,0x08,0x11,0x04,
++ 0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c,
++ 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,
++ 0xd3,0x1f,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x0f,
++ 0x10,0x0b,0x01,0xff,0xe0,0xae,0x92,0xe0,0xaf,0x97,0x00,0x01,0x00,0x00,0x00,0xd2,
++ 0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91,
++ 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x51,
++ 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,
++ 0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,
++ 0x04,0x00,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,
++ 0x04,0x08,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,
++ 0x00,0x01,0x00,0xcf,0x86,0xd5,0x61,0xd4,0x45,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x01,
++ 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,
++ 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0,0xae,
++ 0xbe,0x00,0x01,0xff,0xe0,0xaf,0x87,0xe0,0xae,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01,
++ 0xff,0xe0,0xaf,0x86,0xe0,0xaf,0x97,0x00,0x01,0x09,0x00,0x00,0x93,0x18,0xd2,0x0c,
++ 0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
++ 0x00,0x00,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,
++ 0x00,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,
++ 0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,
++ 0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xe3,0x1c,0x04,0xe2,0x1a,0x02,0xd1,0xf3,
++ 0xd0,0x76,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
++ 0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,
++ 0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,
++ 0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++ 0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,
++ 0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x10,0x00,
++ 0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,
++ 0x00,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x53,0xd4,0x2f,0xd3,0x10,0x52,0x04,
++ 0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd2,0x13,0x91,0x0f,
++ 0x10,0x0b,0x01,0xff,0xe0,0xb1,0x86,0xe0,0xb1,0x96,0x00,0x00,0x00,0x01,0x00,0x91,
++ 0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,
++ 0x08,0x10,0x04,0x00,0x00,0x01,0x54,0x10,0x04,0x01,0x5b,0x00,0x00,0x92,0x0c,0x51,
++ 0x04,0x0a,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,
++ 0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,
++ 0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0a,
++ 0x00,0xd0,0x76,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,
++ 0x04,0x12,0x00,0x10,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,
++ 0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,
++ 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,
++ 0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,
++ 0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x00,
++ 0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,
++ 0x04,0x07,0x07,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x82,0xd4,0x5e,0xd3,0x2a,0xd2,
++ 0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb2,0xbf,0xe0,0xb3,0x95,0x00,0x01,0x00,
++ 0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
++ 0xe0,0xb3,0x86,0xe0,0xb3,0x95,0x00,0xd2,0x28,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,
++ 0xb3,0x86,0xe0,0xb3,0x96,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0,
++ 0xb3,0x82,0x00,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x82,0xe0,0xb3,0x95,0x00,0x91,
++ 0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,
++ 0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x00,
++ 0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,
++ 0x08,0x11,0x04,0x01,0x00,0x09,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,
++ 0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0xe1,0x06,0x01,0xd0,0x6e,0xcf,0x86,0xd5,0x3c,0xd4,0x28,
++ 0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x10,0x00,0x01,0x00,0x91,0x08,
++ 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,
++ 0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,
++ 0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,
++ 0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,
++ 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x0c,0x00,0x13,0x09,0x91,0x08,0x10,0x04,
++ 0x13,0x09,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x65,0xd4,0x45,0xd3,0x10,0x52,0x04,
++ 0x01,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08,
++ 0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb4,0xbe,
++ 0x00,0x01,0xff,0xe0,0xb5,0x87,0xe0,0xb4,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,
++ 0xe0,0xb5,0x86,0xe0,0xb5,0x97,0x00,0x01,0x09,0x10,0x04,0x0c,0x00,0x12,0x00,0xd3,
++ 0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x01,0x00,0x52,
++ 0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x11,0x00,0xd4,0x14,0x93,
++ 0x10,0xd2,0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,
++ 0x00,0xd3,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x12,0x00,0x92,0x0c,0x91,
++ 0x08,0x10,0x04,0x12,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x5a,0xcf,0x86,0xd5,
++ 0x34,0xd4,0x18,0x93,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10,
++ 0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,
++ 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x04,
++ 0x00,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10,
++ 0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x00,
++ 0x00,0x04,0x00,0x00,0x00,0xcf,0x86,0xd5,0x77,0xd4,0x28,0xd3,0x10,0x52,0x04,0x04,
++ 0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x00,
++ 0x00,0x10,0x04,0x04,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x04,
++ 0x00,0xd3,0x14,0x52,0x04,0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x10,
++ 0x04,0x04,0x00,0x00,0x00,0xd2,0x13,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe0,
++ 0xb7,0x99,0xe0,0xb7,0x8a,0x00,0x04,0x00,0xd1,0x19,0x10,0x0b,0x04,0xff,0xe0,0xb7,
++ 0x99,0xe0,0xb7,0x8f,0x00,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0xe0,0xb7,0x8a,
++ 0x00,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x9f,0x00,0x04,0x00,0xd4,0x10,
++ 0x93,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x14,
++ 0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0xe2,0x31,0x01,0xd1,0x58,0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94,
++ 0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,
++ 0x00,0x01,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,
++ 0x04,0x01,0x67,0x10,0x04,0x01,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,
++ 0x00,0x01,0x00,0xcf,0x86,0x95,0x18,0xd4,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01,
++ 0x6b,0x01,0x00,0x53,0x04,0x01,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd0,
++ 0x9e,0xcf,0x86,0xd5,0x54,0xd4,0x3c,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,
++ 0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,
++ 0x00,0x10,0x04,0x15,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x15,
++ 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x15,
++ 0x00,0xd3,0x08,0x12,0x04,0x15,0x00,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,
++ 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,
++ 0x04,0x15,0x00,0x01,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,
++ 0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x15,0x00,0x01,0x00,0x91,0x08,0x10,
++ 0x04,0x15,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,
++ 0x76,0x10,0x04,0x15,0x09,0x01,0x00,0x11,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0x95,
++ 0x34,0xd4,0x20,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,
++ 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x01,0x7a,0x11,0x04,0x01,0x00,0x00,
++ 0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x01,
++ 0x00,0x0d,0x00,0x00,0x00,0xe1,0x2b,0x01,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04,
++ 0x02,0x00,0x53,0x04,0x02,0x00,0x92,0x08,0x11,0x04,0x02,0xdc,0x02,0x00,0x02,0x00,
++ 0x54,0x04,0x02,0x00,0xd3,0x14,0x52,0x04,0x02,0x00,0xd1,0x08,0x10,0x04,0x02,0x00,
++ 0x02,0xdc,0x10,0x04,0x02,0x00,0x02,0xdc,0x92,0x0c,0x91,0x08,0x10,0x04,0x02,0x00,
++ 0x02,0xd8,0x02,0x00,0x02,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x36,0xd3,0x17,0x92,0x13,
++ 0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x82,0xe0,0xbe,0xb7,
++ 0x00,0x02,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91,
++ 0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x8c,0xe0,0xbe,0xb7,0x00,0x02,0x00,
++ 0xd3,0x26,0xd2,0x13,0x51,0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x91,0xe0,
++ 0xbe,0xb7,0x00,0x02,0x00,0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,
++ 0xbd,0x96,0xe0,0xbe,0xb7,0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff,
++ 0xe0,0xbd,0x9b,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x02,0x00,0xd4,0x27,0x53,0x04,0x02,
++ 0x00,0xd2,0x17,0xd1,0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x80,0xe0,0xbe,
++ 0xb5,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,
++ 0x00,0x00,0xd3,0x35,0xd2,0x17,0xd1,0x08,0x10,0x04,0x00,0x00,0x02,0x81,0x10,0x04,
++ 0x02,0x82,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbd,0xb2,0x00,0xd1,0x0f,0x10,0x04,0x02,
++ 0x84,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbd,0xb4,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe,
++ 0xb2,0xe0,0xbe,0x80,0x00,0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,
++ 0xbe,0xb3,0xe0,0xbe,0x80,0x00,0x02,0x00,0x02,0x82,0x11,0x04,0x02,0x82,0x02,0x00,
++ 0xd0,0xd3,0xcf,0x86,0xd5,0x65,0xd4,0x27,0xd3,0x1f,0xd2,0x13,0x91,0x0f,0x10,0x04,
++ 0x02,0x82,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbe,0x80,0x00,0x02,0xe6,0x91,0x08,0x10,
++ 0x04,0x02,0x09,0x02,0x00,0x02,0xe6,0x12,0x04,0x02,0x00,0x0c,0x00,0xd3,0x1f,0xd2,
++ 0x13,0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x92,0xe0,0xbe,
++ 0xb7,0x00,0x51,0x04,0x02,0x00,0x10,0x04,0x04,0x00,0x02,0x00,0xd2,0x0c,0x91,0x08,
++ 0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00,0x02,0xff,
++ 0xe0,0xbe,0x9c,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd4,0x3d,0xd3,0x26,0xd2,0x13,0x51,
++ 0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xa1,0xe0,0xbe,0xb7,0x00,0x02,0x00,
++ 0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0xa6,0xe0,0xbe,0xb7,
++ 0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xab,0xe0,0xbe,
++ 0xb7,0x00,0x02,0x00,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,
++ 0x02,0x00,0x02,0x00,0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x04,0x04,0x00,0x02,0xff,
++ 0xe0,0xbe,0x90,0xe0,0xbe,0xb5,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,
++ 0x00,0x04,0x00,0xcf,0x86,0x95,0x4c,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,
++ 0x04,0x04,0x00,0x10,0x04,0x04,0xdc,0x04,0x00,0x52,0x04,0x04,0x00,0xd1,0x08,0x10,
++ 0x04,0x04,0x00,0x00,0x00,0x10,0x04,0x0a,0x00,0x04,0x00,0xd3,0x14,0xd2,0x08,0x11,
++ 0x04,0x08,0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92,
++ 0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0xcf,0x86,0xe5,0xcc,0x04,0xe4,0x63,0x03,0xe3,0x65,0x01,0xe2,0x04,
++ 0x01,0xd1,0x7f,0xd0,0x65,0xcf,0x86,0x55,0x04,0x04,0x00,0xd4,0x33,0xd3,0x1f,0xd2,
++ 0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x0a,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,
++ 0x0b,0x04,0xff,0xe1,0x80,0xa5,0xe1,0x80,0xae,0x00,0x04,0x00,0x92,0x10,0xd1,0x08,
++ 0x10,0x04,0x0a,0x00,0x04,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x04,0x00,0xd3,0x18,
++ 0xd2,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x51,0x04,0x0a,0x00,
++ 0x10,0x04,0x04,0x00,0x04,0x07,0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0x09,
++ 0x10,0x04,0x0a,0x09,0x0a,0x00,0x0a,0x00,0xcf,0x86,0x95,0x14,0x54,0x04,0x04,0x00,
++ 0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,
++ 0xd0,0x2e,0xcf,0x86,0x95,0x28,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,
++ 0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08,
++ 0x11,0x04,0x0a,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86,
++ 0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,
++ 0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d,0x00,
++ 0x00,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,
++ 0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x06,0x00,
++ 0x08,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0d,0x00,
++ 0x0d,0x00,0xd1,0x28,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x1c,0x54,0x04,
++ 0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,
++ 0x0b,0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,
++ 0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++ 0x0b,0x00,0x0b,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,
++ 0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x0b,0x00,
++ 0xe2,0x21,0x01,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x52,
++ 0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x04,
++ 0x00,0x04,0x00,0xcf,0x86,0x95,0x48,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,
++ 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,
++ 0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x04,
++ 0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,
++ 0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd0,
++ 0x62,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,
++ 0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,
++ 0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd4,0x14,0x53,0x04,0x04,
++ 0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3,
++ 0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,
++ 0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,
++ 0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,
++ 0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51,
++ 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x93,0x10,0x52,0x04,0x04,0x00,0x51,
++ 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x94,0x14,0x53,0x04,0x04,
++ 0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,
++ 0x00,0xd1,0x9c,0xd0,0x3e,0xcf,0x86,0x95,0x38,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,
++ 0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3,0x14,0xd2,
++ 0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,
++ 0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,
++ 0x00,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,
++ 0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0xd2,0x0c,0x51,
++ 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x0c,
++ 0xe6,0x10,0x04,0x0c,0xe6,0x08,0xe6,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,
++ 0x04,0x08,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0x52,
++ 0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf,
++ 0x86,0x95,0x14,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,
++ 0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,
++ 0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x11,0x00,0x00,
++ 0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0xd3,0x30,0xd2,0x2a,0xd1,
++ 0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,
++ 0x04,0x0b,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,
++ 0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd2,0x6c,0xd1,0x24,0xd0,
++ 0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,
++ 0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0b,0x00,0x0b,
++ 0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x52,
++ 0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf,
++ 0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,
++ 0x04,0x04,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x46,0xcf,0x86,0xd5,0x28,0xd4,
++ 0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x00,
++ 0x00,0x06,0x00,0x93,0x10,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x09,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x06,0x00,0x93,0x14,0x52,0x04,0x06,0x00,0xd1,
++ 0x08,0x10,0x04,0x06,0x09,0x06,0x00,0x10,0x04,0x06,0x00,0x00,0x00,0x00,0x00,0xcf,
++ 0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x06,0x00,0x00,0x00,0x00,
++ 0x00,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,
++ 0x00,0x00,0x00,0x06,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x00,
++ 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0xd5,
++ 0x24,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04,
++ 0x09,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07,
++ 0xe6,0x00,0x00,0xd4,0x10,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x00,
++ 0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x08,0x11,0x04,0x07,0x00,0x00,0x00,0x00,
++ 0x00,0xe4,0xac,0x03,0xe3,0x4d,0x01,0xd2,0x84,0xd1,0x48,0xd0,0x2a,0xcf,0x86,0x95,
++ 0x24,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,
++ 0x04,0x04,0x00,0x00,0x00,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x00,
++ 0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x53,
++ 0x04,0x04,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x04,0x00,0x94,0x18,0x53,0x04,0x04,0x00,0x92,
++ 0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0xe4,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,
++ 0x00,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c,0x52,
++ 0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x42,0xcf,
++ 0x86,0xd5,0x1c,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0xd1,
++ 0x08,0x10,0x04,0x07,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd4,0x0c,0x53,
++ 0x04,0x07,0x00,0x12,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x10,0xd1,
++ 0x08,0x10,0x04,0x07,0x00,0x07,0xde,0x10,0x04,0x07,0xe6,0x07,0xdc,0x00,0x00,0xcf,
++ 0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x00,
++ 0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,
++ 0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x07,0x00,0x91,
++ 0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55,
++ 0x04,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x0b,
++ 0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x95,0x28,0xd4,0x10,0x53,0x04,0x08,0x00,0x92,
++ 0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,
++ 0x04,0x08,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x08,0x00,0x07,
++ 0x00,0xd2,0xe4,0xd1,0x80,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x54,0x04,0x08,0x00,0xd3,
++ 0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x08,0xe6,0xd2,
++ 0x0c,0x91,0x08,0x10,0x04,0x08,0xdc,0x08,0x00,0x08,0x00,0x11,0x04,0x00,0x00,0x08,
++ 0x00,0x0b,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,
++ 0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xd4,0x14,0x93,
++ 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,
++ 0x00,0xd3,0x10,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b,
++ 0xe6,0x52,0x04,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xe6,0x00,0x00,0x10,0x04,0x00,
++ 0x00,0x0b,0xdc,0xd0,0x5e,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0b,0x00,0x92,
++ 0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,0x11,
++ 0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd4,0x10,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,
++ 0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x10,0xe6,0x91,0x08,0x10,
++ 0x04,0x10,0xe6,0x10,0xdc,0x10,0xdc,0xd2,0x0c,0x51,0x04,0x10,0xdc,0x10,0x04,0x10,
++ 0xdc,0x10,0xe6,0xd1,0x08,0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0x04,0x10,0x00,0x00,
++ 0x00,0xcf,0x06,0x00,0x00,0xe1,0x1e,0x01,0xd0,0xaa,0xcf,0x86,0xd5,0x6e,0xd4,0x53,
++ 0xd3,0x17,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,
++ 0x85,0xe1,0xac,0xb5,0x00,0x09,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1,
++ 0xac,0x87,0xe1,0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x89,0xe1,
++ 0xac,0xb5,0x00,0x09,0x00,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8b,0xe1,0xac,
++ 0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8d,0xe1,0xac,0xb5,0x00,0x09,
++ 0x00,0x93,0x17,0x92,0x13,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x91,
++ 0xe1,0xac,0xb5,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x54,0x04,0x09,0x00,0xd3,0x10,
++ 0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x07,0x09,0x00,0x09,0x00,0xd2,0x13,
++ 0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xba,0xe1,0xac,0xb5,
++ 0x00,0x91,0x0f,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xbc,0xe1,0xac,0xb5,0x00,
++ 0x09,0x00,0xcf,0x86,0xd5,0x3d,0x94,0x39,0xd3,0x31,0xd2,0x25,0xd1,0x16,0x10,0x0b,
++ 0x09,0xff,0xe1,0xac,0xbe,0xe1,0xac,0xb5,0x00,0x09,0xff,0xe1,0xac,0xbf,0xe1,0xac,
++ 0xb5,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xad,0x82,0xe1,0xac,0xb5,0x00,0x91,
++ 0x08,0x10,0x04,0x09,0x09,0x09,0x00,0x09,0x00,0x12,0x04,0x09,0x00,0x00,0x00,0x09,
++ 0x00,0xd4,0x1c,0x53,0x04,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,
++ 0x00,0x09,0xe6,0x91,0x08,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0xe6,0xd3,0x08,0x12,
++ 0x04,0x09,0xe6,0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x00,
++ 0x00,0x00,0x00,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x18,0x53,0x04,0x0a,
++ 0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x09,0x0d,0x09,0x11,0x04,0x0d,
++ 0x00,0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x0d,0x00,0x0d,
++ 0x00,0xcf,0x86,0x55,0x04,0x0c,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x0c,0x00,0x51,
++ 0x04,0x0c,0x00,0x10,0x04,0x0c,0x07,0x0c,0x00,0x0c,0x00,0xd3,0x0c,0x92,0x08,0x11,
++ 0x04,0x0c,0x00,0x0c,0x09,0x00,0x00,0x12,0x04,0x00,0x00,0x0c,0x00,0xe3,0xb2,0x01,
++ 0xe2,0x09,0x01,0xd1,0x4c,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04,0x0a,
++ 0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,
++ 0x07,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0xcf,
++ 0x86,0x95,0x1c,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,0x00,
++ 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,
++ 0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,
++ 0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x54,0x04,0x14,0x00,0x53,
++ 0x04,0x14,0x00,0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x91,
++ 0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x14,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x08,0x13,
++ 0x04,0x0d,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04,0x0b,
++ 0xe6,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x01,0x0b,0xdc,0x0b,0xdc,0x92,0x08,0x11,
++ 0x04,0x0b,0xdc,0x0b,0xe6,0x0b,0xdc,0xd4,0x28,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,
++ 0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x01,0x0b,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,
++ 0x01,0x0b,0x00,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xdc,0x0b,0x00,0xd3,
++ 0x1c,0xd2,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0d,0x00,0xd1,0x08,0x10,
++ 0x04,0x0d,0xe6,0x0d,0x00,0x10,0x04,0x0d,0x00,0x13,0x00,0x92,0x0c,0x51,0x04,0x10,
++ 0xe6,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x07,
++ 0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x94,0x0c,0x53,0x04,0x07,0x00,0x12,0x04,0x07,
++ 0x00,0x08,0x00,0x08,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0xd5,0x40,0xd4,
++ 0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0xe6,0x10,0x04,0x08,0xdc,0x08,0xe6,0x09,
++ 0xe6,0xd2,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x0a,0xe6,0xd1,0x08,0x10,
++ 0x04,0x0a,0xe6,0x0a,0xea,0x10,0x04,0x0a,0xd6,0x0a,0xdc,0x93,0x10,0x92,0x0c,0x91,
++ 0x08,0x10,0x04,0x0a,0xca,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0xd4,0x14,0x93,
++ 0x10,0x52,0x04,0x0a,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xe6,0x10,
++ 0xe6,0xd3,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x13,0xe8,0x13,
++ 0xe4,0xd2,0x10,0xd1,0x08,0x10,0x04,0x13,0xe4,0x13,0xdc,0x10,0x04,0x00,0x00,0x12,
++ 0xe6,0xd1,0x08,0x10,0x04,0x0c,0xe9,0x0b,0xdc,0x10,0x04,0x09,0xe6,0x09,0xdc,0xe2,
++ 0x80,0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3,
++ 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa5,0x00,0x01,0xff,0x61,
++ 0xcc,0xa5,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x42,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3,
++ 0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2,
++ 0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x43,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63,
++ 0xcc,0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x87,0x00,0x01,0xff,0x64,
++ 0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa3,0x00,0x01,0xff,0x64,
++ 0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1,
++ 0x00,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa7,0x00,0x01,
++ 0xff,0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xad,0x00,0x01,0xff,0x64,
++ 0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x80,0x00,0x01,
++ 0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x81,
++ 0x00,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0x45,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x45,
++ 0xcc,0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,
++ 0xcc,0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01,
++ 0xff,0x46,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2,
++ 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84,
++ 0x00,0x10,0x08,0x01,0xff,0x48,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10,
++ 0x08,0x01,0xff,0x48,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10,
++ 0x08,0x01,0xff,0x48,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0x49,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01,
++ 0xff,0x49,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3,
++ 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x81,0x00,0x01,0xff,0x6b,
++ 0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1,
++ 0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2,
++ 0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c,
++ 0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xb1,0x00,0x01,0xff,0x6c,
++ 0xcc,0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4c,0xcc,0xad,0x00,0x01,0xff,0x6c,
++ 0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x4d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81,
++ 0x00,0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x4d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,
++ 0x4d,0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x4e,0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x4e,0xcc,
++ 0xa3,0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x4e,0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x4e,0xcc,
++ 0xad,0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,
++ 0x83,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,
++ 0x4f,0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48,
++ 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,
++ 0x6f,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x81,0x00,
++ 0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x50,0xcc,
++ 0x81,0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x50,0xcc,0x87,0x00,
++ 0x01,0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,
++ 0x87,0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa3,0x00,
++ 0x01,0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x52,0xcc,0xa3,0xcc,
++ 0x84,0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,
++ 0xb1,0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x01,0xff,0x53,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08,
++ 0x01,0xff,0x53,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,
++ 0x01,0xff,0x53,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,
++ 0x10,0x0a,0x01,0xff,0x53,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc,
++ 0x87,0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x53,0xcc,0xa3,0xcc,0x87,0x00,
++ 0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0x87,0x00,
++ 0x01,0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0xa3,0x00,
++ 0x01,0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xb1,0x00,0x01,0xff,
++ 0x74,0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,
++ 0xad,0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa4,0x00,
++ 0x01,0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0xb0,0x00,
++ 0x01,0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xad,0x00,0x01,0xff,
++ 0x75,0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x83,0xcc,
++ 0x81,0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc,
++ 0x84,0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0x56,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,
++ 0x56,0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x10,0x02,0xcf,0x86,0xd5,
++ 0xe1,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x80,
++ 0x00,0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x81,0x00,0x01,
++ 0xff,0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x88,0x00,0x01,
++ 0xff,0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x87,0x00,0x01,0xff,0x77,
++ 0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0xa3,0x00,0x01,
++ 0xff,0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x58,0xcc,0x87,0x00,0x01,0xff,0x78,
++ 0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x58,0xcc,0x88,0x00,0x01,0xff,0x78,
++ 0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87,
++ 0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0x82,0x00,0x01,
++ 0xff,0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x5a,0xcc,0xa3,0x00,0x01,0xff,0x7a,
++ 0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0xb1,0x00,0x01,0xff,0x7a,
++ 0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x68,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc,0x88,
++ 0x00,0x92,0x1d,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff,0x79,
++ 0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x02,0xff,0xc5,0xbf,0xcc,0x87,0x00,0x0a,0x00,
++ 0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa3,0x00,
++ 0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x89,0x00,0x01,0xff,
++ 0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x81,0x00,
++ 0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,
++ 0x80,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,
++ 0x01,0xff,0x41,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,
++ 0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,
++ 0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,
++ 0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x81,0x00,
++ 0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,
++ 0x01,0xff,0x41,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,
++ 0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,
++ 0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff,
++ 0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x86,0x00,
++ 0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0x45,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,
++ 0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,
++ 0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc,
++ 0x81,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,0xd4,
++ 0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc,0x80,
++ 0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,
++ 0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,
++ 0xff,0x45,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10,
++ 0x0a,0x01,0xff,0x45,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,
++ 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x89,0x00,0x01,0xff,0x69,
++ 0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3,
++ 0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3,
++ 0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x81,0x00,0x01,
++ 0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x80,
++ 0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,
++ 0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01,
++ 0xff,0x4f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2,
++ 0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f,
++ 0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x81,0x00,0x01,
++ 0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,
++ 0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f,
++ 0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,0xd3,
++ 0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x83,0x00,0x01,
++ 0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0xa3,
++ 0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,
++ 0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x89,
++ 0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,
++ 0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,0x01,
++ 0xff,0x55,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1,
++ 0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b,
++ 0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75,
++ 0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,
++ 0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,0x01,
++ 0xff,0x59,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0x59,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x59,
++ 0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0x92,0x14,0x91,0x10,0x10,0x08,0x01,
++ 0xff,0x59,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x0a,0x00,0x0a,0x00,0xe1,
++ 0xc0,0x04,0xe0,0x80,0x02,0xcf,0x86,0xe5,0x2d,0x01,0xd4,0xa8,0xd3,0x54,0xd2,0x28,
++ 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,
++ 0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,
++ 0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,
++ 0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,
++ 0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0x00,
++ 0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x93,0x00,0x01,0xff,0xce,
++ 0x91,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0x00,0x01,
++ 0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x91,
++ 0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,
++ 0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd,
++ 0x82,0x00,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x93,
++ 0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,
++ 0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,
++ 0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,
++ 0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x93,
++ 0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x95,0xcc,0x93,
++ 0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,
++ 0x01,0xff,0xce,0x95,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc,
++ 0x81,0x00,0x00,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,
++ 0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,
++ 0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00,
++ 0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,
++ 0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,
++ 0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,
++ 0x01,0xff,0xce,0x97,0xcc,0x93,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0x00,0x10,0x0b,
++ 0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,
++ 0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0x00,0x01,
++ 0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,
++ 0xcd,0x82,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x54,0xd2,0x28,
++ 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,
++ 0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,
++ 0xb9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,
++ 0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,
++ 0xce,0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00,
++ 0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x93,0x00,0x01,0xff,0xce,
++ 0x99,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcc,0x80,0x00,0x01,
++ 0xff,0xce,0x99,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x99,
++ 0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,
++ 0x01,0xff,0xce,0x99,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcd,
++ 0x82,0x00,0xcf,0x86,0xe5,0x13,0x01,0xd4,0x84,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10,
++ 0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0x10,
++ 0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,
++ 0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81,0x00,
++ 0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10,
++ 0x09,0x01,0xff,0xce,0x9f,0xcc,0x93,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0x00,0x10,
++ 0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,
++ 0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x81,0x00,
++ 0x01,0xff,0xce,0x9f,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x54,0xd2,0x28,0xd1,
++ 0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,
++ 0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,
++ 0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc,
++ 0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,
++ 0x85,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xd2,
++ 0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0x00,0x10,0x04,
++ 0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f,0x10,0x04,0x00,
++ 0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff,
++ 0xce,0xa5,0xcc,0x94,0xcd,0x82,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10,
++ 0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10,
++ 0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,
++ 0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00,
++ 0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,
++ 0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1,
++ 0x12,0x10,0x09,0x01,0xff,0xce,0xa9,0xcc,0x93,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,
++ 0x00,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xa9,
++ 0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,
++ 0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,
++ 0xa9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0x00,0xd3,
++ 0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff,
++ 0xce,0xb1,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff,
++ 0xce,0xb5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80,0x00,
++ 0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00,
++ 0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,
++ 0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf,
++ 0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x91,0x12,0x10,0x09,0x01,
++ 0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x00,0x00,0xe0,
++ 0xe1,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,
++ 0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,
++ 0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,
++ 0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,
++ 0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,
++ 0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xcd,
++ 0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,
++ 0x16,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,
++ 0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0xcd,
++ 0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,
++ 0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,
++ 0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,
++ 0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3,
++ 0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x85,0x00,
++ 0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,
++ 0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xcd,0x85,
++ 0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,
++ 0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,
++ 0xb7,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,
++ 0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcd,
++ 0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,
++ 0x97,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x80,
++ 0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0xcd,
++ 0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,
++ 0xff,0xce,0x97,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,
++ 0xcd,0x82,0xcd,0x85,0x00,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,
++ 0xff,0xcf,0x89,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x85,
++ 0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,
++ 0xcf,0x89,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,
++ 0x89,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,
++ 0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,
++ 0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10,
++ 0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,
++ 0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,
++ 0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,
++ 0xff,0xce,0xa9,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,
++ 0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x82,0xcd,
++ 0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3,0x49,0xd2,
++ 0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,
++ 0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,
++ 0xce,0xb1,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xcd,
++ 0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce,
++ 0xb1,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91,
++ 0xcc,0x86,0x00,0x01,0xff,0xce,0x91,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x91,
++ 0xcc,0x80,0x00,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,
++ 0xce,0x91,0xcd,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xb9,0x00,0x01,0x00,
++ 0xcf,0x86,0xe5,0x16,0x01,0xd4,0x8f,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04,0x01,
++ 0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x80,
++ 0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,
++ 0xce,0xb7,0xcc,0x81,0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcd,
++ 0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,
++ 0x09,0x01,0xff,0xce,0x95,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x10,
++ 0x09,0x01,0xff,0xce,0x97,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00,0xd1,
++ 0x13,0x10,0x09,0x01,0xff,0xce,0x97,0xcd,0x85,0x00,0x01,0xff,0xe1,0xbe,0xbf,0xcc,
++ 0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbe,0xbf,0xcc,0x81,0x00,0x01,0xff,0xe1,0xbe,
++ 0xbf,0xcd,0x82,0x00,0xd3,0x40,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,
++ 0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,
++ 0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x51,0x04,
++ 0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,
++ 0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x86,
++ 0x00,0x01,0xff,0xce,0x99,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x80,
++ 0x00,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x04,0x00,0x00,0x01,0xff,
++ 0xe1,0xbf,0xbe,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbf,0xbe,0xcc,0x81,0x00,
++ 0x01,0xff,0xe1,0xbf,0xbe,0xcd,0x82,0x00,0xd4,0x93,0xd3,0x4e,0xd2,0x28,0xd1,0x12,
++ 0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,
++ 0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,
++ 0x88,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x93,0x00,0x01,
++ 0xff,0xcf,0x81,0xcc,0x94,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcd,0x82,0x00,0x01,
++ 0xff,0xcf,0x85,0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,
++ 0xce,0xa5,0xcc,0x86,0x00,0x01,0xff,0xce,0xa5,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,
++ 0xce,0xa5,0xcc,0x80,0x00,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,
++ 0x01,0xff,0xce,0xa1,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00,0x10,0x09,
++ 0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x01,0xff,0x60,0x00,0xd3,0x3b,0xd2,0x18,0x51,
++ 0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,
++ 0xcf,0x89,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81,0xcd,
++ 0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd,0x82,0x00,0x01,0xff,0xcf,
++ 0x89,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x9f,
++ 0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xa9,
++ 0xcc,0x80,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0xd1,0x10,0x10,0x09,0x01,0xff,
++ 0xce,0xa9,0xcd,0x85,0x00,0x01,0xff,0xc2,0xb4,0x00,0x10,0x04,0x01,0x00,0x00,0x00,
++ 0xe0,0x62,0x0c,0xcf,0x86,0xe5,0x9f,0x08,0xe4,0xf8,0x05,0xe3,0xdb,0x02,0xe2,0xa1,
++ 0x01,0xd1,0xb4,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0x92,0x14,0x91,
++ 0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83,0x00,0x01,
++ 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
++ 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x01,0x00,0xcf,0x86,0xd5,
++ 0x48,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,
++ 0x00,0x06,0x00,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x06,0x00,0xd3,0x1c,0xd2,
++ 0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0xd1,0x08,0x10,0x04,0x07,
++ 0x00,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,
++ 0x00,0x10,0x04,0x08,0x00,0x06,0x00,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x06,0x00,0x91,
++ 0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x0f,0x00,0x92,0x08,0x11,0x04,0x0f,0x00,0x01,
++ 0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x00,
++ 0x00,0x01,0x00,0x01,0x00,0xd0,0x7e,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x53,0x04,0x01,
++ 0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd3,
++ 0x10,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0c,0x00,0x0c,0x00,0x52,
++ 0x04,0x0c,0x00,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xd4,0x1c,0x53,
++ 0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x02,0x00,0x91,
++ 0x08,0x10,0x04,0x03,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0xd2,0x08,0x11,0x04,0x06,
++ 0x00,0x08,0x00,0x11,0x04,0x08,0x00,0x0b,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0b,
++ 0x00,0x0c,0x00,0x10,0x04,0x0e,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11,
++ 0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0x54,0x04,0x00,0x00,0xd3,0x0c,0x92,0x08,0x11,
++ 0x04,0x01,0xe6,0x01,0x01,0x01,0xe6,0xd2,0x0c,0x51,0x04,0x01,0x01,0x10,0x04,0x01,
++ 0x01,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3,
++ 0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x04,0x00,0xd1,0x08,0x10,
++ 0x04,0x06,0x00,0x06,0x01,0x10,0x04,0x06,0x01,0x06,0xe6,0x92,0x10,0xd1,0x08,0x10,
++ 0x04,0x06,0xdc,0x06,0xe6,0x10,0x04,0x06,0x01,0x08,0x01,0x09,0xdc,0x93,0x10,0x92,
++ 0x0c,0x91,0x08,0x10,0x04,0x0a,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,
++ 0x81,0xd0,0x4f,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,
++ 0x00,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xa9,0x00,0x01,0x00,0x92,0x12,
++ 0x51,0x04,0x01,0x00,0x10,0x06,0x01,0xff,0x4b,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,
++ 0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,
++ 0x10,0x04,0x04,0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x06,0x00,0x06,0x00,
++ 0xcf,0x86,0x95,0x2c,0xd4,0x18,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0xd1,0x08,
++ 0x10,0x04,0x08,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x93,0x10,0x92,0x0c,
++ 0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++ 0xd0,0x68,0xcf,0x86,0xd5,0x48,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,
++ 0x10,0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
++ 0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x11,0x00,0x00,0x00,0x53,0x04,
++ 0x01,0x00,0x92,0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x90,0xcc,
++ 0xb8,0x00,0x01,0xff,0xe2,0x86,0x92,0xcc,0xb8,0x00,0x01,0x00,0x94,0x1a,0x53,0x04,
++ 0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,
++ 0x94,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0x53,0x04,
++ 0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x87,
++ 0x90,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x87,0x94,0xcc,0xb8,0x00,0x01,0xff,
++ 0xe2,0x87,0x92,0xcc,0xb8,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,
++ 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x93,0x08,0x12,0x04,
++ 0x04,0x00,0x06,0x00,0x06,0x00,0xe2,0x38,0x02,0xe1,0x3f,0x01,0xd0,0x68,0xcf,0x86,
++ 0xd5,0x3e,0x94,0x3a,0xd3,0x16,0x52,0x04,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff,
++ 0xe2,0x88,0x83,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd2,0x12,0x91,0x0e,0x10,0x04,
++ 0x01,0x00,0x01,0xff,0xe2,0x88,0x88,0xcc,0xb8,0x00,0x01,0x00,0x91,0x0e,0x10,0x0a,
++ 0x01,0xff,0xe2,0x88,0x8b,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x24,
++ 0x93,0x20,0x52,0x04,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa3,0xcc,
++ 0xb8,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa5,0xcc,0xb8,0x00,0x01,0x00,
++ 0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x48,0x94,0x44,0xd3,0x2e,0xd2,0x12,0x91,0x0e,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x88,0xbc,0xcc,0xb8,0x00,0x01,0x00,0xd1,0x0e,
++ 0x10,0x0a,0x01,0xff,0xe2,0x89,0x83,0xcc,0xb8,0x00,0x01,0x00,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0xe2,0x89,0x85,0xcc,0xb8,0x00,0x92,0x12,0x91,0x0e,0x10,0x04,0x01,0x00,
++ 0x01,0xff,0xe2,0x89,0x88,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x40,
++ 0xd3,0x1e,0x92,0x1a,0xd1,0x0c,0x10,0x08,0x01,0xff,0x3d,0xcc,0xb8,0x00,0x01,0x00,
++ 0x10,0x0a,0x01,0xff,0xe2,0x89,0xa1,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x52,0x04,
++ 0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x89,0x8d,0xcc,0xb8,0x00,
++ 0x10,0x08,0x01,0xff,0x3c,0xcc,0xb8,0x00,0x01,0xff,0x3e,0xcc,0xb8,0x00,0xd3,0x30,
++ 0xd2,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xa4,0xcc,0xb8,0x00,0x01,0xff,
++ 0xe2,0x89,0xa5,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,
++ 0xb2,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xb3,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18,
++ 0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xb6,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,
++ 0xb7,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x50,0x94,0x4c,
++ 0xd3,0x30,0xd2,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xba,0xcc,0xb8,0x00,
++ 0x01,0xff,0xe2,0x89,0xbb,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,
++ 0xe2,0x8a,0x82,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x83,0xcc,0xb8,0x00,0x01,0x00,
++ 0x92,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x86,0xcc,0xb8,0x00,0x01,0xff,
++ 0xe2,0x8a,0x87,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x30,0x53,0x04,
++ 0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa2,0xcc,
++ 0xb8,0x00,0x01,0xff,0xe2,0x8a,0xa8,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,
++ 0xa9,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xab,0xcc,0xb8,0x00,0x01,0x00,0xcf,0x86,
++ 0x55,0x04,0x01,0x00,0xd4,0x5c,0xd3,0x2c,0x92,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,
++ 0xe2,0x89,0xbc,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xbd,0xcc,0xb8,0x00,0x10,0x0a,
++ 0x01,0xff,0xe2,0x8a,0x91,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x92,0xcc,0xb8,0x00,
++ 0x01,0x00,0xd2,0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb2,0xcc,
++ 0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb3,0xcc,0xb8,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,
++ 0xe2,0x8a,0xb4,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb5,0xcc,0xb8,0x00,0x01,0x00,
++ 0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xd1,0x64,
++ 0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
++ 0x01,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x20,0x53,0x04,
++ 0x01,0x00,0x92,0x18,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x80,0x88,0x00,
++ 0x10,0x08,0x01,0xff,0xe3,0x80,0x89,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,
++ 0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,
++ 0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00,
++ 0x04,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,
++ 0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x06,0x00,0x06,0x00,
++ 0xcf,0x86,0xd5,0x2c,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,
++ 0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
++ 0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x12,0x04,0x08,0x00,0x09,0x00,0xd4,0x14,
++ 0x53,0x04,0x09,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x0c,0x00,
++ 0x0c,0x00,0xd3,0x08,0x12,0x04,0x0c,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10,0x00,
++ 0x10,0x04,0x10,0x00,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x13,0x00,
++ 0xd3,0xa6,0xd2,0x74,0xd1,0x40,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18,
++ 0x93,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,
++ 0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,
++ 0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,
++ 0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,
++ 0x06,0x00,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00,
++ 0x10,0x04,0x06,0x00,0x07,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x1a,0xcf,0x86,
++ 0x95,0x14,0x54,0x04,0x01,0x00,0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,
++ 0x06,0x00,0x06,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,
++ 0x13,0x04,0x04,0x00,0x06,0x00,0xd2,0xdc,0xd1,0x48,0xd0,0x26,0xcf,0x86,0x95,0x20,
++ 0x54,0x04,0x01,0x00,0xd3,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x07,0x00,0x06,0x00,
++ 0x92,0x0c,0x91,0x08,0x10,0x04,0x08,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,
++ 0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,
++ 0x04,0x00,0x06,0x00,0x06,0x00,0x52,0x04,0x06,0x00,0x11,0x04,0x06,0x00,0x08,0x00,
++ 0xd0,0x5e,0xcf,0x86,0xd5,0x2c,0xd4,0x10,0x53,0x04,0x06,0x00,0x92,0x08,0x11,0x04,
++ 0x06,0x00,0x07,0x00,0x07,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,
++ 0x08,0x00,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0a,0x00,0x0b,0x00,
++ 0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00,
++ 0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
++ 0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,
++ 0xd5,0x1c,0x94,0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,
++ 0x51,0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0b,0x00,0x94,0x14,0x93,0x10,
++ 0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0c,0x00,0x0b,0x00,
++ 0x0b,0x00,0xd1,0xa8,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,
++ 0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++ 0x0c,0x00,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00,
++ 0x94,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,
++ 0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x18,0x53,0x04,0x01,0x00,
++ 0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x04,0x0c,0x00,
++ 0x01,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,
++ 0x51,0x04,0x0c,0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x52,0x04,0x01,0x00,0x51,0x04,
++ 0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
++ 0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x06,0x00,0x93,0x0c,0x52,0x04,
++ 0x06,0x00,0x11,0x04,0x06,0x00,0x01,0x00,0x01,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18,
++ 0x54,0x04,0x01,0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++ 0x0c,0x00,0x0c,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,
++ 0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,
++ 0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10,
++ 0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0xd2,0x0c,
++ 0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0d,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,
++ 0x0d,0x00,0x0c,0x00,0x06,0x00,0x94,0x0c,0x53,0x04,0x06,0x00,0x12,0x04,0x06,0x00,
++ 0x0a,0x00,0x06,0x00,0xe4,0x39,0x01,0xd3,0x0c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xcf,
++ 0x06,0x06,0x00,0xd2,0x30,0xd1,0x06,0xcf,0x06,0x06,0x00,0xd0,0x06,0xcf,0x06,0x06,
++ 0x00,0xcf,0x86,0x95,0x1e,0x54,0x04,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,
++ 0x00,0x91,0x0e,0x10,0x0a,0x06,0xff,0xe2,0xab,0x9d,0xcc,0xb8,0x00,0x06,0x00,0x06,
++ 0x00,0x06,0x00,0xd1,0x80,0xd0,0x3a,0xcf,0x86,0xd5,0x28,0xd4,0x10,0x53,0x04,0x07,
++ 0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x08,0x00,0xd3,0x08,0x12,0x04,0x08,
++ 0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,
++ 0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf,
++ 0x86,0xd5,0x30,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,
++ 0x04,0x0a,0x00,0x10,0x00,0x10,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,
++ 0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x10,0x00,0x10,
++ 0x00,0x54,0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,
++ 0x00,0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c,0x52,
++ 0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x53,
++ 0x04,0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x14,0x00,0x91,0x08,0x10,0x04,0x14,
++ 0x00,0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,
++ 0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x15,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x92,
++ 0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd4,
++ 0x0c,0x53,0x04,0x14,0x00,0x12,0x04,0x14,0x00,0x11,0x00,0x53,0x04,0x14,0x00,0x52,
++ 0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0xe3,0xb9,0x01,
++ 0xd2,0xac,0xd1,0x68,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x14,0x53,0x04,
++ 0x08,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,
++ 0x08,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04,
++ 0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xd4,0x14,0x53,0x04,
++ 0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
++ 0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0a,0x00,0x0a,0x00,0x09,0x00,
++ 0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x0b,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,
++ 0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04,
++ 0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,
++ 0x0b,0xe6,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0d,0x00,0x00,0x00,0x92,0x0c,
++ 0x91,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd1,0x6c,0xd0,0x2a,
++ 0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,
++ 0x08,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,
++ 0x00,0x00,0x0d,0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c,
++ 0xd3,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,
++ 0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,
++ 0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,
++ 0x00,0x00,0x10,0x04,0x00,0x00,0x0c,0x09,0xd0,0x5a,0xcf,0x86,0xd5,0x18,0x54,0x04,
++ 0x08,0x00,0x93,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,
++ 0x00,0x00,0x00,0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,
++ 0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,
++ 0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,
++ 0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,
++ 0x00,0x00,0xcf,0x86,0x95,0x40,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,
++ 0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,
++ 0x10,0x04,0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,
++ 0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,
++ 0x08,0x00,0x00,0x00,0x0a,0xe6,0xd2,0x9c,0xd1,0x68,0xd0,0x32,0xcf,0x86,0xd5,0x14,
++ 0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x0a,0x00,0x11,0x04,0x08,0x00,
++ 0x0a,0x00,0x54,0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,
++ 0x0b,0x00,0x0d,0x00,0x0d,0x00,0x12,0x04,0x0d,0x00,0x10,0x00,0xcf,0x86,0x95,0x30,
++ 0x94,0x2c,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x12,0x00,
++ 0x91,0x08,0x10,0x04,0x12,0x00,0x13,0x00,0x13,0x00,0xd2,0x08,0x11,0x04,0x13,0x00,
++ 0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0x00,0x00,0x00,0x00,
++ 0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,
++ 0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,
++ 0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x00,0x00,
++ 0x00,0x00,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,
++ 0xd5,0x14,0x54,0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,
++ 0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x04,0x00,0x12,0x04,0x04,0x00,
++ 0x00,0x00,0xcf,0x86,0xe5,0x8d,0x05,0xe4,0x86,0x05,0xe3,0x7d,0x04,0xe2,0xe4,0x03,
++ 0xe1,0xc0,0x01,0xd0,0x3e,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,
++ 0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0xda,0x01,0xe4,0x91,0x08,0x10,
++ 0x04,0x01,0xe8,0x01,0xde,0x01,0xe0,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x04,
++ 0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x04,0x00,0x01,
++ 0x00,0xcf,0x86,0xd5,0xaa,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
++ 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,
++ 0xff,0xe3,0x81,0x8b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,
++ 0x8d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,
++ 0xff,0xe3,0x81,0x8f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,
++ 0x91,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x93,
++ 0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x95,0xe3,0x82,0x99,
++ 0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x97,0xe3,0x82,
++ 0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x99,0xe3,0x82,0x99,0x00,0x01,
++ 0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9b,0xe3,0x82,0x99,0x00,0x01,0x00,
++ 0x10,0x0b,0x01,0xff,0xe3,0x81,0x9d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd4,0x53,0xd3,
++ 0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9f,0xe3,0x82,0x99,0x00,
++ 0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xa1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,
++ 0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xa4,0xe3,0x82,0x99,0x00,0x10,0x04,
++ 0x01,0x00,0x01,0xff,0xe3,0x81,0xa6,0xe3,0x82,0x99,0x00,0x92,0x13,0x91,0x0f,0x10,
++ 0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xa8,0xe3,0x82,0x99,0x00,0x01,0x00,0x01,0x00,
++ 0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x99,
++ 0x00,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,
++ 0xe3,0x81,0xb2,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb2,
++ 0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x99,
++ 0x00,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x04,
++ 0x01,0x00,0x01,0xff,0xe3,0x81,0xb8,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3,
++ 0x81,0xb8,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,
++ 0xbb,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82,0x9a,0x00,0x01,0x00,
++ 0xd0,0xee,0xcf,0x86,0xd5,0x42,0x54,0x04,0x01,0x00,0xd3,0x1b,0x52,0x04,0x01,0x00,
++ 0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x86,0xe3,0x82,0x99,0x00,0x06,0x00,0x10,
++ 0x04,0x06,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x08,0x10,
++ 0x04,0x01,0x08,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0x9d,
++ 0xe3,0x82,0x99,0x00,0x06,0x00,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
++ 0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,
++ 0x01,0xff,0xe3,0x82,0xab,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,
++ 0x82,0xad,0xe3,0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,
++ 0x01,0xff,0xe3,0x82,0xaf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,
++ 0x82,0xb1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,
++ 0xb3,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb5,0xe3,0x82,
++ 0x99,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb7,0xe3,
++ 0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb9,0xe3,0x82,0x99,0x00,
++ 0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbb,0xe3,0x82,0x99,0x00,0x01,
++ 0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbd,0xe3,0x82,0x99,0x00,0x01,0x00,0xcf,0x86,
++ 0xd5,0xd5,0xd4,0x53,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,
++ 0xbf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x81,0xe3,0x82,
++ 0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x84,0xe3,
++ 0x82,0x99,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x86,0xe3,0x82,0x99,0x00,
++ 0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x88,0xe3,0x82,0x99,
++ 0x00,0x01,0x00,0x01,0x00,0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,
++ 0x83,0x8f,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x8f,0xe3,0x82,0x9a,0x00,0x10,
++ 0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,
++ 0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,
++ 0x83,0x95,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x95,0xe3,0x82,0x9a,0x00,0xd2,
++ 0x1e,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x99,0x00,
++ 0x10,0x0b,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,
++ 0x0b,0x01,0xff,0xe3,0x83,0x9b,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x9b,0xe3,
++ 0x82,0x9a,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x22,0x52,0x04,0x01,0x00,0xd1,
++ 0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xa6,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x04,
++ 0x01,0x00,0x01,0xff,0xe3,0x83,0xaf,0xe3,0x82,0x99,0x00,0xd2,0x25,0xd1,0x16,0x10,
++ 0x0b,0x01,0xff,0xe3,0x83,0xb0,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0xb1,0xe3,
++ 0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xb2,0xe3,0x82,0x99,0x00,0x01,0x00,
++ 0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xbd,0xe3,0x82,0x99,0x00,0x06,
++ 0x00,0xd1,0x4c,0xd0,0x46,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x00,
++ 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,
++ 0x18,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,
++ 0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
++ 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x06,0x01,0x00,0xd0,0x32,0xcf,
++ 0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,
++ 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x54,0x04,0x04,0x00,0x53,0x04,0x04,
++ 0x00,0x92,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xcf,
++ 0x86,0xd5,0x08,0x14,0x04,0x08,0x00,0x0a,0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x0a,
++ 0x00,0x00,0x00,0x00,0x00,0x06,0x00,0xd2,0xa4,0xd1,0x5c,0xd0,0x22,0xcf,0x86,0x95,
++ 0x1c,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,
++ 0x04,0x01,0x00,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x01,0x00,0xcf,0x86,0xd5,
++ 0x20,0xd4,0x0c,0x93,0x08,0x12,0x04,0x01,0x00,0x0b,0x00,0x0b,0x00,0x93,0x10,0x92,
++ 0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x54,
++ 0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x07,0x00,0x10,
++ 0x04,0x08,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,
++ 0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06,
++ 0x00,0x06,0x00,0xcf,0x86,0xd5,0x10,0x94,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01,
++ 0x00,0x07,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
++ 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x16,0x00,0xd1,0x30,0xd0,0x06,0xcf,
++ 0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x52,
++ 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0x92,0x0c,0x51,
++ 0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01,
++ 0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,
++ 0x00,0x11,0x04,0x01,0x00,0x07,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,
++ 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0xcf,0x06,0x04,
++ 0x00,0xcf,0x06,0x04,0x00,0xd1,0x48,0xd0,0x40,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x04,
++ 0x00,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x2c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xd1,
++ 0x06,0xcf,0x06,0x04,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,
++ 0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xcf,
++ 0x06,0x07,0x00,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf,
++ 0x06,0x01,0x00,0xe2,0x71,0x05,0xd1,0x8c,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,
++ 0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xd4,0x06,0xcf,0x06,0x01,0x00,0xd3,0x06,
++ 0xcf,0x06,0x01,0x00,0xd2,0x06,0xcf,0x06,0x01,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00,
++ 0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x01,0x00,
++ 0x11,0x04,0x01,0x00,0x08,0x00,0x08,0x00,0x53,0x04,0x08,0x00,0x12,0x04,0x08,0x00,
++ 0x0a,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00,
++ 0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x11,0x00,0x11,0x00,0x93,0x0c,
++ 0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x13,0x00,0x13,0x00,0x94,0x14,0x53,0x04,
++ 0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00,
++ 0x00,0x00,0xe0,0xdb,0x04,0xcf,0x86,0xe5,0xdf,0x01,0xd4,0x06,0xcf,0x06,0x04,0x00,
++ 0xd3,0x74,0xd2,0x6e,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18,
++ 0x94,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,
++ 0x00,0x00,0x00,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x04,0x00,
++ 0x06,0x00,0x04,0x00,0x04,0x00,0x93,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,
++ 0x06,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,0x95,0x24,0x94,0x20,0x93,0x1c,
++ 0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x04,0x00,0xd1,0x08,0x10,0x04,
++ 0x04,0x00,0x06,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0x0b,0x00,
++ 0xcf,0x06,0x0a,0x00,0xd2,0x84,0xd1,0x4c,0xd0,0x16,0xcf,0x86,0x55,0x04,0x0a,0x00,
++ 0x94,0x0c,0x53,0x04,0x0a,0x00,0x12,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,
++ 0x55,0x04,0x0a,0x00,0xd4,0x1c,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x0a,0x00,
++ 0x0a,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xe6,
++ 0xd3,0x08,0x12,0x04,0x0a,0x00,0x0d,0xe6,0x52,0x04,0x0d,0xe6,0x11,0x04,0x0a,0xe6,
++ 0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,
++ 0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11,0xe6,0x0d,0xe6,0x0b,0x00,
++ 0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,
++ 0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x24,
++ 0x54,0x04,0x08,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,
++ 0x08,0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,
++ 0x0a,0x00,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,
++ 0x0a,0x00,0x0a,0x00,0xcf,0x06,0x0a,0x00,0xd0,0x5e,0xcf,0x86,0xd5,0x28,0xd4,0x18,
++ 0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0xd1,0x08,0x10,0x04,0x0a,0x00,0x0c,0x00,
++ 0x10,0x04,0x0c,0x00,0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x0d,0x00,
++ 0x10,0x00,0x10,0x00,0xd4,0x1c,0x53,0x04,0x0c,0x00,0xd2,0x0c,0x51,0x04,0x0c,0x00,
++ 0x10,0x04,0x0d,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x12,0x00,0x14,0x00,
++ 0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0x92,0x08,0x11,0x04,
++ 0x14,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x1c,0x94,0x18,0x93,0x14,0xd2,0x08,
++ 0x11,0x04,0x00,0x00,0x15,0x00,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04,
++ 0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x92,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04,
++ 0x0c,0x00,0x0a,0x00,0x0a,0x00,0xe4,0xf2,0x02,0xe3,0x65,0x01,0xd2,0x98,0xd1,0x48,
++ 0xd0,0x36,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x08,0x00,0x51,0x04,
++ 0x08,0x00,0x10,0x04,0x08,0x09,0x08,0x00,0x08,0x00,0x08,0x00,0xd4,0x0c,0x53,0x04,
++ 0x08,0x00,0x12,0x04,0x08,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,0x11,0x04,
++ 0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0x54,0x04,0x09,0x00,
++ 0x13,0x04,0x09,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x0a,0x00,0xcf,0x86,0xd5,0x2c,
++ 0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x09,0x12,0x00,
++ 0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x0a,0x00,0x53,0x04,0x0a,0x00,
++ 0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x0b,0xe6,0xd3,0x0c,
++ 0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x11,0x04,
++ 0x11,0x00,0x14,0x00,0xd1,0x60,0xd0,0x22,0xcf,0x86,0x55,0x04,0x0a,0x00,0x94,0x18,
++ 0x53,0x04,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xdc,
++ 0x11,0x04,0x0a,0xdc,0x0a,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x0a,0x00,
++ 0xd3,0x10,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0x09,0x00,0x00,
++ 0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x54,0x04,
++ 0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,
++ 0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,
++ 0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0x07,0x0b,0x00,
++ 0x0b,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x20,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
++ 0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,
++ 0x10,0x04,0x00,0x00,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x08,0x11,0x04,0x0b,0x00,
++ 0x00,0x00,0x11,0x04,0x00,0x00,0x0b,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,
++ 0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd2,0xd0,
++ 0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04,0x0a,0x00,0x93,0x10,
++ 0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,
++ 0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x11,0x04,
++ 0x0a,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00,
++ 0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x10,0x00,
++ 0xd0,0x3a,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0xd3,0x1c,0xd2,0x0c,
++ 0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xdc,
++ 0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0xe6,
++ 0x0b,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6,0xcf,0x86,0xd5,0x2c,0xd4,0x18,
++ 0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x10,0x04,0x0b,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,
++ 0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04,0x0d,0x00,0x93,0x10,0x52,0x04,
++ 0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x00,0x00,0x00,0x00,0xd1,0x8c,
++ 0xd0,0x72,0xcf,0x86,0xd5,0x4c,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
++ 0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,
++ 0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,
++ 0x10,0x04,0x0c,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
++ 0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,
++ 0x94,0x20,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,
++ 0x00,0x00,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,
++ 0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x11,0x00,
++ 0x11,0x04,0x10,0x00,0x15,0x00,0x00,0x00,0x11,0x00,0xd0,0x06,0xcf,0x06,0x11,0x00,
++ 0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,
++ 0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,
++ 0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x02,0xff,0xff,0xcf,0x86,0xcf,
++ 0x06,0x02,0xff,0xff,0xd1,0x76,0xd0,0x09,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xcf,
++ 0x86,0x85,0xd4,0x07,0xcf,0x06,0x02,0xff,0xff,0xd3,0x07,0xcf,0x06,0x02,0xff,0xff,
++ 0xd2,0x07,0xcf,0x06,0x02,0xff,0xff,0xd1,0x07,0xcf,0x06,0x02,0xff,0xff,0xd0,0x18,
++ 0xcf,0x86,0x55,0x05,0x02,0xff,0xff,0x94,0x0d,0x93,0x09,0x12,0x05,0x02,0xff,0xff,
++ 0x00,0x00,0x00,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04,
++ 0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x51,0x04,
++ 0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04,0x0b,0x00,
++ 0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,
++ 0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x9c,0x10,0xe3,0x16,0x08,
++ 0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x08,0x04,0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01,
++ 0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,
++ 0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0x10,0x08,0x01,0xff,0xe8,0xbb,0x8a,0x00,0x01,
++ 0xff,0xe8,0xb3,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xbb,0x91,0x00,0x01,
++ 0xff,0xe4,0xb8,0xb2,0x00,0x10,0x08,0x01,0xff,0xe5,0x8f,0xa5,0x00,0x01,0xff,0xe9,
++ 0xbe,0x9c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xbe,0x9c,0x00,0x01,
++ 0xff,0xe5,0xa5,0x91,0x00,0x10,0x08,0x01,0xff,0xe9,0x87,0x91,0x00,0x01,0xff,0xe5,
++ 0x96,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa5,0x88,0x00,0x01,0xff,0xe6,
++ 0x87,0xb6,0x00,0x10,0x08,0x01,0xff,0xe7,0x99,0xa9,0x00,0x01,0xff,0xe7,0xbe,0x85,
++ 0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x98,0xbf,0x00,0x01,
++ 0xff,0xe8,0x9e,0xba,0x00,0x10,0x08,0x01,0xff,0xe8,0xa3,0xb8,0x00,0x01,0xff,0xe9,
++ 0x82,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x82,0x00,0x01,0xff,0xe6,
++ 0xb4,0x9b,0x00,0x10,0x08,0x01,0xff,0xe7,0x83,0x99,0x00,0x01,0xff,0xe7,0x8f,0x9e,
++ 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x90,0xbd,0x00,0x01,0xff,0xe9,
++ 0x85,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0xa7,0xb1,0x00,0x01,0xff,0xe4,0xba,0x82,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x8d,0xb5,0x00,0x01,0xff,0xe6,0xac,0x84,
++ 0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x9b,0x00,0x01,0xff,0xe8,0x98,0xad,0x00,0xd4,
++ 0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xb8,0x9e,0x00,0x01,
++ 0xff,0xe5,0xb5,0x90,0x00,0x10,0x08,0x01,0xff,0xe6,0xbf,0xab,0x00,0x01,0xff,0xe8,
++ 0x97,0x8d,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa5,0xa4,0x00,0x01,0xff,0xe6,
++ 0x8b,0x89,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0x98,0x00,0x01,0xff,0xe8,0xa0,0x9f,
++ 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xbb,0x8a,0x00,0x01,0xff,0xe6,
++ 0x9c,0x97,0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0xaa,0x00,0x01,0xff,0xe7,0x8b,0xbc,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x83,0x8e,0x00,0x01,0xff,0xe4,0xbe,0x86,
++ 0x00,0x10,0x08,0x01,0xff,0xe5,0x86,0xb7,0x00,0x01,0xff,0xe5,0x8b,0x9e,0x00,0xd3,
++ 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x93,0x84,0x00,0x01,0xff,0xe6,
++ 0xab,0x93,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x90,0x00,0x01,0xff,0xe7,0x9b,0xa7,
++ 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x80,0x81,0x00,0x01,0xff,0xe8,0x98,0x86,
++ 0x00,0x10,0x08,0x01,0xff,0xe8,0x99,0x9c,0x00,0x01,0xff,0xe8,0xb7,0xaf,0x00,0xd2,
++ 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9c,0xb2,0x00,0x01,0xff,0xe9,0xad,0xaf,
++ 0x00,0x10,0x08,0x01,0xff,0xe9,0xb7,0xba,0x00,0x01,0xff,0xe7,0xa2,0x8c,0x00,0xd1,
++ 0x10,0x10,0x08,0x01,0xff,0xe7,0xa5,0xbf,0x00,0x01,0xff,0xe7,0xb6,0xa0,0x00,0x10,
++ 0x08,0x01,0xff,0xe8,0x8f,0x89,0x00,0x01,0xff,0xe9,0x8c,0x84,0x00,0xcf,0x86,0xe5,
++ 0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xb9,
++ 0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0x10,0x08,0x01,0xff,0xe5,0xa3,0x9f,0x00,
++ 0x01,0xff,0xe5,0xbc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xb1,0xa0,0x00,
++ 0x01,0xff,0xe8,0x81,0xbe,0x00,0x10,0x08,0x01,0xff,0xe7,0x89,0xa2,0x00,0x01,0xff,
++ 0xe7,0xa3,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xb3,0x82,0x00,
++ 0x01,0xff,0xe9,0x9b,0xb7,0x00,0x10,0x08,0x01,0xff,0xe5,0xa3,0x98,0x00,0x01,0xff,
++ 0xe5,0xb1,0xa2,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x93,0x00,0x01,0xff,
++ 0xe6,0xb7,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc,0x8f,0x00,0x01,0xff,0xe7,0xb4,
++ 0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,
++ 0x01,0xff,0xe9,0x99,0x8b,0x00,0x10,0x08,0x01,0xff,0xe5,0x8b,0x92,0x00,0x01,0xff,
++ 0xe8,0x82,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x87,0x9c,0x00,0x01,0xff,
++ 0xe5,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe7,0xa8,0x9c,0x00,0x01,0xff,0xe7,0xb6,
++ 0xbe,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8f,0xb1,0x00,0x01,0xff,
++ 0xe9,0x99,0xb5,0x00,0x10,0x08,0x01,0xff,0xe8,0xae,0x80,0x00,0x01,0xff,0xe6,0x8b,
++ 0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x82,0x00,0x01,0xff,0xe8,0xab,
++ 0xbe,0x00,0x10,0x08,0x01,0xff,0xe4,0xb8,0xb9,0x00,0x01,0xff,0xe5,0xaf,0xa7,0x00,
++ 0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x80,0x92,0x00,
++ 0x01,0xff,0xe7,0x8e,0x87,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xb0,0x00,0x01,0xff,
++ 0xe5,0x8c,0x97,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa3,0xbb,0x00,0x01,0xff,
++ 0xe4,0xbe,0xbf,0x00,0x10,0x08,0x01,0xff,0xe5,0xbe,0xa9,0x00,0x01,0xff,0xe4,0xb8,
++ 0x8d,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xb3,0x8c,0x00,0x01,0xff,
++ 0xe6,0x95,0xb8,0x00,0x10,0x08,0x01,0xff,0xe7,0xb4,0xa2,0x00,0x01,0xff,0xe5,0x8f,
++ 0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa1,0x9e,0x00,0x01,0xff,0xe7,0x9c,
++ 0x81,0x00,0x10,0x08,0x01,0xff,0xe8,0x91,0x89,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00,
++ 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xae,0xba,0x00,0x01,0xff,
++ 0xe8,0xbe,0xb0,0x00,0x10,0x08,0x01,0xff,0xe6,0xb2,0x88,0x00,0x01,0xff,0xe6,0x8b,
++ 0xbe,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8b,0xa5,0x00,0x01,0xff,0xe6,0x8e,
++ 0xa0,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xa5,0x00,0x01,0xff,0xe4,0xba,0xae,0x00,
++ 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,0xa9,0x00,0x01,0xff,0xe5,0x87,
++ 0x89,0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0x81,0x00,0x01,0xff,0xe7,0xb3,0xa7,0x00,
++ 0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x89,0xaf,0x00,0x01,0xff,0xe8,0xab,0x92,0x00,
++ 0x10,0x08,0x01,0xff,0xe9,0x87,0x8f,0x00,0x01,0xff,0xe5,0x8b,0xb5,0x00,0xe0,0x04,
++ 0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,
++ 0x01,0xff,0xe5,0x91,0x82,0x00,0x01,0xff,0xe5,0xa5,0xb3,0x00,0x10,0x08,0x01,0xff,
++ 0xe5,0xbb,0xac,0x00,0x01,0xff,0xe6,0x97,0x85,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0xe6,0xbf,0xbe,0x00,0x01,0xff,0xe7,0xa4,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0x96,
++ 0xad,0x00,0x01,0xff,0xe9,0xa9,0xaa,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0xe9,0xba,0x97,0x00,0x01,0xff,0xe9,0xbb,0x8e,0x00,0x10,0x08,0x01,0xff,0xe5,0x8a,
++ 0x9b,0x00,0x01,0xff,0xe6,0x9b,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xad,
++ 0xb7,0x00,0x01,0xff,0xe8,0xbd,0xa2,0x00,0x10,0x08,0x01,0xff,0xe5,0xb9,0xb4,0x00,
++ 0x01,0xff,0xe6,0x86,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0xe6,0x88,0x80,0x00,0x01,0xff,0xe6,0x92,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc,
++ 0xa3,0x00,0x01,0xff,0xe7,0x85,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0x92,
++ 0x89,0x00,0x01,0xff,0xe7,0xa7,0x8a,0x00,0x10,0x08,0x01,0xff,0xe7,0xb7,0xb4,0x00,
++ 0x01,0xff,0xe8,0x81,0xaf,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xbc,
++ 0xa6,0x00,0x01,0xff,0xe8,0x93,0xae,0x00,0x10,0x08,0x01,0xff,0xe9,0x80,0xa3,0x00,
++ 0x01,0xff,0xe9,0x8d,0x8a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x88,0x97,0x00,
++ 0x01,0xff,0xe5,0x8a,0xa3,0x00,0x10,0x08,0x01,0xff,0xe5,0x92,0xbd,0x00,0x01,0xff,
++ 0xe7,0x83,0x88,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0xe8,0xa3,0x82,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00,0x10,0x08,0x01,0xff,0xe5,0xbb,
++ 0x89,0x00,0x01,0xff,0xe5,0xbf,0xb5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x8d,
++ 0xbb,0x00,0x01,0xff,0xe6,0xae,0xae,0x00,0x10,0x08,0x01,0xff,0xe7,0xb0,0xbe,0x00,
++ 0x01,0xff,0xe7,0x8d,0xb5,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe4,0xbb,
++ 0xa4,0x00,0x01,0xff,0xe5,0x9b,0xb9,0x00,0x10,0x08,0x01,0xff,0xe5,0xaf,0xa7,0x00,
++ 0x01,0xff,0xe5,0xb6,0xba,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x80,0x9c,0x00,
++ 0x01,0xff,0xe7,0x8e,0xb2,0x00,0x10,0x08,0x01,0xff,0xe7,0x91,0xa9,0x00,0x01,0xff,
++ 0xe7,0xbe,0x9a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x81,
++ 0x86,0x00,0x01,0xff,0xe9,0x88,0xb4,0x00,0x10,0x08,0x01,0xff,0xe9,0x9b,0xb6,0x00,
++ 0x01,0xff,0xe9,0x9d,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa0,0x98,0x00,
++ 0x01,0xff,0xe4,0xbe,0x8b,0x00,0x10,0x08,0x01,0xff,0xe7,0xa6,0xae,0x00,0x01,0xff,
++ 0xe9,0x86,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9a,0xb8,0x00,
++ 0x01,0xff,0xe6,0x83,0xa1,0x00,0x10,0x08,0x01,0xff,0xe4,0xba,0x86,0x00,0x01,0xff,
++ 0xe5,0x83,0x9a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xaf,0xae,0x00,0x01,0xff,
++ 0xe5,0xb0,0xbf,0x00,0x10,0x08,0x01,0xff,0xe6,0x96,0x99,0x00,0x01,0xff,0xe6,0xa8,
++ 0x82,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x01,0xff,0xe7,0x87,0x8e,0x00,0x01,0xff,0xe7,0x99,0x82,0x00,0x10,0x08,0x01,
++ 0xff,0xe8,0x93,0xbc,0x00,0x01,0xff,0xe9,0x81,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xe9,0xbe,0x8d,0x00,0x01,0xff,0xe6,0x9a,0x88,0x00,0x10,0x08,0x01,0xff,0xe9,
++ 0x98,0xae,0x00,0x01,0xff,0xe5,0x8a,0x89,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xe6,0x9d,0xbb,0x00,0x01,0xff,0xe6,0x9f,0xb3,0x00,0x10,0x08,0x01,0xff,0xe6,
++ 0xb5,0x81,0x00,0x01,0xff,0xe6,0xba,0x9c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,
++ 0x90,0x89,0x00,0x01,0xff,0xe7,0x95,0x99,0x00,0x10,0x08,0x01,0xff,0xe7,0xa1,0xab,
++ 0x00,0x01,0xff,0xe7,0xb4,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xe9,0xa1,0x9e,0x00,0x01,0xff,0xe5,0x85,0xad,0x00,0x10,0x08,0x01,0xff,0xe6,
++ 0x88,0xae,0x00,0x01,0xff,0xe9,0x99,0xb8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,
++ 0x80,0xab,0x00,0x01,0xff,0xe5,0xb4,0x99,0x00,0x10,0x08,0x01,0xff,0xe6,0xb7,0xaa,
++ 0x00,0x01,0xff,0xe8,0xbc,0xaa,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,
++ 0xbe,0x8b,0x00,0x01,0xff,0xe6,0x85,0x84,0x00,0x10,0x08,0x01,0xff,0xe6,0xa0,0x97,
++ 0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9a,0x86,
++ 0x00,0x01,0xff,0xe5,0x88,0xa9,0x00,0x10,0x08,0x01,0xff,0xe5,0x90,0x8f,0x00,0x01,
++ 0xff,0xe5,0xb1,0xa5,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,
++ 0xff,0xe6,0x98,0x93,0x00,0x01,0xff,0xe6,0x9d,0x8e,0x00,0x10,0x08,0x01,0xff,0xe6,
++ 0xa2,0xa8,0x00,0x01,0xff,0xe6,0xb3,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,
++ 0x90,0x86,0x00,0x01,0xff,0xe7,0x97,0xa2,0x00,0x10,0x08,0x01,0xff,0xe7,0xbd,0xb9,
++ 0x00,0x01,0xff,0xe8,0xa3,0x8f,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,
++ 0xa3,0xa1,0x00,0x01,0xff,0xe9,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe9,0x9b,0xa2,
++ 0x00,0x01,0xff,0xe5,0x8c,0xbf,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xba,0xba,
++ 0x00,0x01,0xff,0xe5,0x90,0x9d,0x00,0x10,0x08,0x01,0xff,0xe7,0x87,0x90,0x00,0x01,
++ 0xff,0xe7,0x92,0x98,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,
++ 0x97,0xba,0x00,0x01,0xff,0xe9,0x9a,0xa3,0x00,0x10,0x08,0x01,0xff,0xe9,0xb1,0x97,
++ 0x00,0x01,0xff,0xe9,0xba,0x9f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x9e,0x97,
++ 0x00,0x01,0xff,0xe6,0xb7,0x8b,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0xa8,0x00,0x01,
++ 0xff,0xe7,0xab,0x8b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xac,0xa0,
++ 0x00,0x01,0xff,0xe7,0xb2,0x92,0x00,0x10,0x08,0x01,0xff,0xe7,0x8b,0x80,0x00,0x01,
++ 0xff,0xe7,0x82,0x99,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xad,0x98,0x00,0x01,
++ 0xff,0xe4,0xbb,0x80,0x00,0x10,0x08,0x01,0xff,0xe8,0x8c,0xb6,0x00,0x01,0xff,0xe5,
++ 0x88,0xba,0x00,0xe2,0xad,0x06,0xe1,0xc4,0x03,0xe0,0xcb,0x01,0xcf,0x86,0xd5,0xe4,
++ 0xd4,0x74,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00,
++ 0x01,0xff,0xe5,0xba,0xa6,0x00,0x10,0x08,0x01,0xff,0xe6,0x8b,0x93,0x00,0x01,0xff,
++ 0xe7,0xb3,0x96,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xae,0x85,0x00,0x01,0xff,
++ 0xe6,0xb4,0x9e,0x00,0x10,0x08,0x01,0xff,0xe6,0x9a,0xb4,0x00,0x01,0xff,0xe8,0xbc,
++ 0xbb,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa1,0x8c,0x00,0x01,0xff,
++ 0xe9,0x99,0x8d,0x00,0x10,0x08,0x01,0xff,0xe8,0xa6,0x8b,0x00,0x01,0xff,0xe5,0xbb,
++ 0x93,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,0x80,0x00,0x01,0xff,0xe5,0x97,
++ 0x80,0x00,0x01,0x00,0xd3,0x34,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x01,0xff,0xe5,0xa1,
++ 0x9a,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe6,0x99,0xb4,0x00,0x01,0x00,0xd1,0x0c,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0xe5,0x87,0x9e,0x00,0x10,0x08,0x01,0xff,0xe7,0x8c,
++ 0xaa,0x00,0x01,0xff,0xe7,0x9b,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,
++ 0xe7,0xa4,0xbc,0x00,0x01,0xff,0xe7,0xa5,0x9e,0x00,0x10,0x08,0x01,0xff,0xe7,0xa5,
++ 0xa5,0x00,0x01,0xff,0xe7,0xa6,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9d,
++ 0x96,0x00,0x01,0xff,0xe7,0xb2,0xbe,0x00,0x10,0x08,0x01,0xff,0xe7,0xbe,0xbd,0x00,
++ 0x01,0x00,0xd4,0x64,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x01,0xff,0xe8,0x98,
++ 0x92,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe8,0xab,0xb8,0x00,0x01,0x00,0xd1,0x0c,
++ 0x10,0x04,0x01,0x00,0x01,0xff,0xe9,0x80,0xb8,0x00,0x10,0x08,0x01,0xff,0xe9,0x83,
++ 0xbd,0x00,0x01,0x00,0xd2,0x14,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe9,0xa3,
++ 0xaf,0x00,0x01,0xff,0xe9,0xa3,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa4,
++ 0xa8,0x00,0x01,0xff,0xe9,0xb6,0xb4,0x00,0x10,0x08,0x0d,0xff,0xe9,0x83,0x9e,0x00,
++ 0x0d,0xff,0xe9,0x9a,0xb7,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,
++ 0xe4,0xbe,0xae,0x00,0x06,0xff,0xe5,0x83,0xa7,0x00,0x10,0x08,0x06,0xff,0xe5,0x85,
++ 0x8d,0x00,0x06,0xff,0xe5,0x8b,0x89,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0x8b,
++ 0xa4,0x00,0x06,0xff,0xe5,0x8d,0x91,0x00,0x10,0x08,0x06,0xff,0xe5,0x96,0x9d,0x00,
++ 0x06,0xff,0xe5,0x98,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0x99,
++ 0xa8,0x00,0x06,0xff,0xe5,0xa1,0x80,0x00,0x10,0x08,0x06,0xff,0xe5,0xa2,0xa8,0x00,
++ 0x06,0xff,0xe5,0xb1,0xa4,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0xb1,0xae,0x00,
++ 0x06,0xff,0xe6,0x82,0x94,0x00,0x10,0x08,0x06,0xff,0xe6,0x85,0xa8,0x00,0x06,0xff,
++ 0xe6,0x86,0x8e,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,
++ 0x10,0x10,0x08,0x06,0xff,0xe6,0x87,0xb2,0x00,0x06,0xff,0xe6,0x95,0x8f,0x00,0x10,
++ 0x08,0x06,0xff,0xe6,0x97,0xa2,0x00,0x06,0xff,0xe6,0x9a,0x91,0x00,0xd1,0x10,0x10,
++ 0x08,0x06,0xff,0xe6,0xa2,0x85,0x00,0x06,0xff,0xe6,0xb5,0xb7,0x00,0x10,0x08,0x06,
++ 0xff,0xe6,0xb8,0x9a,0x00,0x06,0xff,0xe6,0xbc,0xa2,0x00,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x06,0xff,0xe7,0x85,0xae,0x00,0x06,0xff,0xe7,0x88,0xab,0x00,0x10,0x08,0x06,
++ 0xff,0xe7,0x90,0xa2,0x00,0x06,0xff,0xe7,0xa2,0x91,0x00,0xd1,0x10,0x10,0x08,0x06,
++ 0xff,0xe7,0xa4,0xbe,0x00,0x06,0xff,0xe7,0xa5,0x89,0x00,0x10,0x08,0x06,0xff,0xe7,
++ 0xa5,0x88,0x00,0x06,0xff,0xe7,0xa5,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x06,0xff,0xe7,0xa5,0x96,0x00,0x06,0xff,0xe7,0xa5,0x9d,0x00,0x10,0x08,0x06,
++ 0xff,0xe7,0xa6,0x8d,0x00,0x06,0xff,0xe7,0xa6,0x8e,0x00,0xd1,0x10,0x10,0x08,0x06,
++ 0xff,0xe7,0xa9,0x80,0x00,0x06,0xff,0xe7,0xaa,0x81,0x00,0x10,0x08,0x06,0xff,0xe7,
++ 0xaf,0x80,0x00,0x06,0xff,0xe7,0xb7,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,
++ 0xff,0xe7,0xb8,0x89,0x00,0x06,0xff,0xe7,0xb9,0x81,0x00,0x10,0x08,0x06,0xff,0xe7,
++ 0xbd,0xb2,0x00,0x06,0xff,0xe8,0x80,0x85,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8,
++ 0x87,0xad,0x00,0x06,0xff,0xe8,0x89,0xb9,0x00,0x10,0x08,0x06,0xff,0xe8,0x89,0xb9,
++ 0x00,0x06,0xff,0xe8,0x91,0x97,0x00,0xd4,0x75,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,
++ 0x08,0x06,0xff,0xe8,0xa4,0x90,0x00,0x06,0xff,0xe8,0xa6,0x96,0x00,0x10,0x08,0x06,
++ 0xff,0xe8,0xac,0x81,0x00,0x06,0xff,0xe8,0xac,0xb9,0x00,0xd1,0x10,0x10,0x08,0x06,
++ 0xff,0xe8,0xb3,0x93,0x00,0x06,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08,0x06,0xff,0xe8,
++ 0xbe,0xb6,0x00,0x06,0xff,0xe9,0x80,0xb8,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,
++ 0xff,0xe9,0x9b,0xa3,0x00,0x06,0xff,0xe9,0x9f,0xbf,0x00,0x10,0x08,0x06,0xff,0xe9,
++ 0xa0,0xbb,0x00,0x0b,0xff,0xe6,0x81,0xb5,0x00,0x91,0x11,0x10,0x09,0x0b,0xff,0xf0,
++ 0xa4,0x8b,0xae,0x00,0x0b,0xff,0xe8,0x88,0x98,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,
++ 0xd1,0x10,0x10,0x08,0x08,0xff,0xe4,0xb8,0xa6,0x00,0x08,0xff,0xe5,0x86,0xb5,0x00,
++ 0x10,0x08,0x08,0xff,0xe5,0x85,0xa8,0x00,0x08,0xff,0xe4,0xbe,0x80,0x00,0xd1,0x10,
++ 0x10,0x08,0x08,0xff,0xe5,0x85,0x85,0x00,0x08,0xff,0xe5,0x86,0x80,0x00,0x10,0x08,
++ 0x08,0xff,0xe5,0x8b,0x87,0x00,0x08,0xff,0xe5,0x8b,0xba,0x00,0xd2,0x20,0xd1,0x10,
++ 0x10,0x08,0x08,0xff,0xe5,0x96,0x9d,0x00,0x08,0xff,0xe5,0x95,0x95,0x00,0x10,0x08,
++ 0x08,0xff,0xe5,0x96,0x99,0x00,0x08,0xff,0xe5,0x97,0xa2,0x00,0xd1,0x10,0x10,0x08,
++ 0x08,0xff,0xe5,0xa1,0x9a,0x00,0x08,0xff,0xe5,0xa2,0xb3,0x00,0x10,0x08,0x08,0xff,
++ 0xe5,0xa5,0x84,0x00,0x08,0xff,0xe5,0xa5,0x94,0x00,0xe0,0x04,0x02,0xcf,0x86,0xe5,
++ 0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xa9,
++ 0xa2,0x00,0x08,0xff,0xe5,0xac,0xa8,0x00,0x10,0x08,0x08,0xff,0xe5,0xbb,0x92,0x00,
++ 0x08,0xff,0xe5,0xbb,0x99,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xbd,0xa9,0x00,
++ 0x08,0xff,0xe5,0xbe,0xad,0x00,0x10,0x08,0x08,0xff,0xe6,0x83,0x98,0x00,0x08,0xff,
++ 0xe6,0x85,0x8e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x84,0x88,0x00,
++ 0x08,0xff,0xe6,0x86,0x8e,0x00,0x10,0x08,0x08,0xff,0xe6,0x85,0xa0,0x00,0x08,0xff,
++ 0xe6,0x87,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x88,0xb4,0x00,0x08,0xff,
++ 0xe6,0x8f,0x84,0x00,0x10,0x08,0x08,0xff,0xe6,0x90,0x9c,0x00,0x08,0xff,0xe6,0x91,
++ 0x92,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x95,0x96,0x00,
++ 0x08,0xff,0xe6,0x99,0xb4,0x00,0x10,0x08,0x08,0xff,0xe6,0x9c,0x97,0x00,0x08,0xff,
++ 0xe6,0x9c,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x9d,0x96,0x00,0x08,0xff,
++ 0xe6,0xad,0xb9,0x00,0x10,0x08,0x08,0xff,0xe6,0xae,0xba,0x00,0x08,0xff,0xe6,0xb5,
++ 0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0xbb,0x9b,0x00,0x08,0xff,
++ 0xe6,0xbb,0x8b,0x00,0x10,0x08,0x08,0xff,0xe6,0xbc,0xa2,0x00,0x08,0xff,0xe7,0x80,
++ 0x9e,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x85,0xae,0x00,0x08,0xff,0xe7,0x9e,
++ 0xa7,0x00,0x10,0x08,0x08,0xff,0xe7,0x88,0xb5,0x00,0x08,0xff,0xe7,0x8a,0xaf,0x00,
++ 0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x8c,0xaa,0x00,
++ 0x08,0xff,0xe7,0x91,0xb1,0x00,0x10,0x08,0x08,0xff,0xe7,0x94,0x86,0x00,0x08,0xff,
++ 0xe7,0x94,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x98,0x9d,0x00,0x08,0xff,
++ 0xe7,0x98,0x9f,0x00,0x10,0x08,0x08,0xff,0xe7,0x9b,0x8a,0x00,0x08,0xff,0xe7,0x9b,
++ 0x9b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x9b,0xb4,0x00,0x08,0xff,
++ 0xe7,0x9d,0x8a,0x00,0x10,0x08,0x08,0xff,0xe7,0x9d,0x80,0x00,0x08,0xff,0xe7,0xa3,
++ 0x8c,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xaa,0xb1,0x00,0x08,0xff,0xe7,0xaf,
++ 0x80,0x00,0x10,0x08,0x08,0xff,0xe7,0xb1,0xbb,0x00,0x08,0xff,0xe7,0xb5,0x9b,0x00,
++ 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xb7,0xb4,0x00,0x08,0xff,
++ 0xe7,0xbc,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0x80,0x85,0x00,0x08,0xff,0xe8,0x8d,
++ 0x92,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0x8f,0xaf,0x00,0x08,0xff,0xe8,0x9d,
++ 0xb9,0x00,0x10,0x08,0x08,0xff,0xe8,0xa5,0x81,0x00,0x08,0xff,0xe8,0xa6,0x86,0x00,
++ 0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xa6,0x96,0x00,0x08,0xff,0xe8,0xaa,
++ 0xbf,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xb8,0x00,0x08,0xff,0xe8,0xab,0x8b,0x00,
++ 0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xac,0x81,0x00,0x08,0xff,0xe8,0xab,0xbe,0x00,
++ 0x10,0x08,0x08,0xff,0xe8,0xab,0xad,0x00,0x08,0xff,0xe8,0xac,0xb9,0x00,0xcf,0x86,
++ 0x95,0xde,0xd4,0x81,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xae,
++ 0x8a,0x00,0x08,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08,0x08,0xff,0xe8,0xbc,0xb8,0x00,
++ 0x08,0xff,0xe9,0x81,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0x86,0x99,0x00,
++ 0x08,0xff,0xe9,0x89,0xb6,0x00,0x10,0x08,0x08,0xff,0xe9,0x99,0xbc,0x00,0x08,0xff,
++ 0xe9,0x9b,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0x9d,0x96,0x00,
++ 0x08,0xff,0xe9,0x9f,0x9b,0x00,0x10,0x08,0x08,0xff,0xe9,0x9f,0xbf,0x00,0x08,0xff,
++ 0xe9,0xa0,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0xa0,0xbb,0x00,0x08,0xff,
++ 0xe9,0xac,0x92,0x00,0x10,0x08,0x08,0xff,0xe9,0xbe,0x9c,0x00,0x08,0xff,0xf0,0xa2,
++ 0xa1,0x8a,0x00,0xd3,0x45,0xd2,0x22,0xd1,0x12,0x10,0x09,0x08,0xff,0xf0,0xa2,0xa1,
++ 0x84,0x00,0x08,0xff,0xf0,0xa3,0x8f,0x95,0x00,0x10,0x08,0x08,0xff,0xe3,0xae,0x9d,
++ 0x00,0x08,0xff,0xe4,0x80,0x98,0x00,0xd1,0x11,0x10,0x08,0x08,0xff,0xe4,0x80,0xb9,
++ 0x00,0x08,0xff,0xf0,0xa5,0x89,0x89,0x00,0x10,0x09,0x08,0xff,0xf0,0xa5,0xb3,0x90,
++ 0x00,0x08,0xff,0xf0,0xa7,0xbb,0x93,0x00,0x92,0x14,0x91,0x10,0x10,0x08,0x08,0xff,
++ 0xe9,0xbd,0x83,0x00,0x08,0xff,0xe9,0xbe,0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0xe1,0x94,0x01,0xe0,0x08,0x01,0xcf,0x86,0xd5,0x42,0xd4,0x14,0x93,0x10,0x52,0x04,
++ 0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd3,0x10,
++ 0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,
++ 0x00,0x00,0xd1,0x0d,0x10,0x04,0x00,0x00,0x04,0xff,0xd7,0x99,0xd6,0xb4,0x00,0x10,
++ 0x04,0x01,0x1a,0x01,0xff,0xd7,0xb2,0xd6,0xb7,0x00,0xd4,0x42,0x53,0x04,0x01,0x00,
++ 0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd7,0xa9,0xd7,0x81,0x00,0x01,
++ 0xff,0xd7,0xa9,0xd7,0x82,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xd7,0xa9,0xd6,0xbc,
++ 0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7,0x82,0x00,0x10,0x09,0x01,0xff,
++ 0xd7,0x90,0xd6,0xb7,0x00,0x01,0xff,0xd7,0x90,0xd6,0xb8,0x00,0xd3,0x43,0xd2,0x24,
++ 0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x91,0xd6,
++ 0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x92,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x93,0xd6,
++ 0xbc,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x94,0xd6,0xbc,0x00,0x01,0xff,0xd7,
++ 0x95,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x96,0xd6,0xbc,0x00,0x00,0x00,0xd2,
++ 0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x98,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x99,
++ 0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x9a,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x9b,
++ 0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0x9c,0xd6,0xbc,0x00,0x00,0x00,
++ 0x10,0x09,0x01,0xff,0xd7,0x9e,0xd6,0xbc,0x00,0x00,0x00,0xcf,0x86,0x95,0x85,0x94,
++ 0x81,0xd3,0x3e,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa0,0xd6,0xbc,0x00,
++ 0x01,0xff,0xd7,0xa1,0xd6,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd7,0xa3,0xd6,
++ 0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0xa4,0xd6,0xbc,0x00,0x00,0x00,0x10,
++ 0x09,0x01,0xff,0xd7,0xa6,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa7,0xd6,0xbc,0x00,0xd2,
++ 0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa8,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa9,
++ 0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0xaa,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x95,
++ 0xd6,0xb9,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x91,0xd6,0xbf,0x00,0x01,0xff,
++ 0xd7,0x9b,0xd6,0xbf,0x00,0x10,0x09,0x01,0xff,0xd7,0xa4,0xd6,0xbf,0x00,0x01,0x00,
++ 0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,
++ 0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0xcf,0x86,
++ 0x95,0x24,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,
++ 0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x5a,0xd2,0x06,0xcf,0x06,0x01,0x00,0xd1,0x14,
++ 0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x08,0x14,0x04,0x00,0x00,0x01,0x00,
++ 0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54,0x04,0x01,0x00,0x93,0x0c,0x92,0x08,
++ 0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x0c,
++ 0x94,0x08,0x13,0x04,0x01,0x00,0x00,0x00,0x05,0x00,0x54,0x04,0x05,0x00,0x53,0x04,
++ 0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x07,0x00,0x00,0x00,
++ 0xd2,0xcc,0xd1,0xa4,0xd0,0x36,0xcf,0x86,0xd5,0x14,0x54,0x04,0x06,0x00,0x53,0x04,
++ 0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x94,0x1c,0xd3,0x10,
++ 0x52,0x04,0x01,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xdc,0x52,0x04,
++ 0x10,0xdc,0x11,0x04,0x10,0xdc,0x11,0xe6,0x01,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,
++ 0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,
++ 0x06,0x00,0x07,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00,
++ 0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,
++ 0x01,0x00,0x01,0x00,0xd4,0x18,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,
++ 0x10,0x04,0x01,0x00,0x00,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,
++ 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++ 0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,
++ 0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,
++ 0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd1,0x50,0xd0,0x1e,
++ 0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
++ 0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18,
++ 0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,
++ 0x10,0x04,0x01,0x00,0x06,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,
++ 0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,
++ 0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,
++ 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x18,
++ 0xd3,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x92,0x08,0x11,0x04,
++ 0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,
++ 0x01,0x00,0xd2,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,
++ 0x00,0x00,0x00,0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,
++ 0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,
++ 0x01,0x00,0x00,0x00,0x53,0x04,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,
++ 0x04,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x03,0x00,0x01,0x00,0x01,0x00,0x83,0xe2,
++ 0x30,0x3e,0xe1,0x1a,0x3b,0xe0,0x97,0x39,0xcf,0x86,0xe5,0x3b,0x26,0xc4,0xe3,0x16,
++ 0x14,0xe2,0xef,0x11,0xe1,0xd0,0x10,0xe0,0x60,0x07,0xcf,0x86,0xe5,0x53,0x03,0xe4,
++ 0x4c,0x02,0xe3,0x3d,0x01,0xd2,0x94,0xd1,0x70,0xd0,0x4a,0xcf,0x86,0xd5,0x18,0x94,
++ 0x14,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,
++ 0x00,0x07,0x00,0x07,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07,
++ 0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x07,0x00,0x53,0x04,0x07,0x00,0xd2,0x0c,0x51,
++ 0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,
++ 0x00,0x07,0x00,0xcf,0x86,0x95,0x20,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04,0x07,
++ 0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11,
++ 0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55,
++ 0x04,0x07,0x00,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,
++ 0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,
++ 0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,
++ 0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,
++ 0x04,0x07,0x00,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,
++ 0x00,0x07,0x00,0x07,0x00,0xcf,0x06,0x08,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4,
++ 0x20,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x10,
++ 0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x53,
++ 0x04,0x0a,0x00,0x12,0x04,0x0a,0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,
++ 0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,
++ 0x86,0xd5,0x08,0x14,0x04,0x00,0x00,0x0a,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,
++ 0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x00,0x00,0xd2,
++ 0x5e,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a,
++ 0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,
++ 0x00,0x00,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0a,0x00,0x93,0x10,0x92,
++ 0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,
++ 0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0xdc,0x10,0x00,0x10,0x00,0x10,
++ 0x00,0x10,0x00,0x53,0x04,0x10,0x00,0x12,0x04,0x10,0x00,0x00,0x00,0xd1,0x70,0xd0,
++ 0x36,0xcf,0x86,0xd5,0x18,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,
++ 0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x10,0x00,0x94,0x18,0xd3,0x08,0x12,
++ 0x04,0x05,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x13,
++ 0x00,0x13,0x00,0x05,0x00,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x05,0x00,0x92,
++ 0x0c,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x54,
++ 0x04,0x10,0x00,0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x10,0xe6,0x92,
++ 0x0c,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,
++ 0x86,0x95,0x18,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x51,
++ 0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x08,0x00,0xcf,0x86,0x95,0x1c,0xd4,
++ 0x0c,0x93,0x08,0x12,0x04,0x08,0x00,0x00,0x00,0x08,0x00,0x93,0x0c,0x52,0x04,0x08,
++ 0x00,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0xba,0xd2,0x80,0xd1,
++ 0x34,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x05,
++ 0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0x95,0x14,0x94,
++ 0x10,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x07,
++ 0x00,0x07,0x00,0xd0,0x2a,0xcf,0x86,0xd5,0x14,0x54,0x04,0x07,0x00,0x53,0x04,0x07,
++ 0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x07,
++ 0x00,0x92,0x08,0x11,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xcf,0x86,0xd5,
++ 0x10,0x54,0x04,0x12,0x00,0x93,0x08,0x12,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x54,
++ 0x04,0x12,0x00,0x53,0x04,0x12,0x00,0x12,0x04,0x12,0x00,0x00,0x00,0xd1,0x34,0xd0,
++ 0x12,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x10,
++ 0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x10,0x00,0x00,
++ 0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x00,
++ 0x00,0xcf,0x06,0x00,0x00,0xd2,0x06,0xcf,0x06,0x10,0x00,0xd1,0x40,0xd0,0x1e,0xcf,
++ 0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51,
++ 0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x14,0x54,
++ 0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x00,
++ 0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4,
++ 0xce,0x02,0xe3,0x45,0x01,0xd2,0xd0,0xd1,0x70,0xd0,0x52,0xcf,0x86,0xd5,0x20,0x94,
++ 0x1c,0xd3,0x0c,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x92,0x0c,0x91,
++ 0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07,
++ 0x00,0xd3,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,
++ 0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x08,0x10,
++ 0x04,0x07,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86,0x95,0x18,0x54,
++ 0x04,0x0b,0x00,0x93,0x10,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00,
++ 0x00,0x0b,0x00,0x0b,0x00,0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,
++ 0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,
++ 0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,
++ 0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,
++ 0x04,0x11,0x00,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,
++ 0x00,0x11,0x04,0x11,0x00,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,
++ 0x00,0x11,0x00,0x11,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x1c,0x54,0x04,0x09,
++ 0x00,0x53,0x04,0x09,0x00,0xd2,0x08,0x11,0x04,0x09,0x00,0x0b,0x00,0x51,0x04,0x00,
++ 0x00,0x10,0x04,0x00,0x00,0x09,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,
++ 0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,
++ 0x00,0xcf,0x06,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,
++ 0x00,0x53,0x04,0x0d,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x11,0x00,0x0d,0x00,0xcf,
++ 0x86,0x95,0x14,0x54,0x04,0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x11,
++ 0x00,0x11,0x00,0x11,0x00,0x11,0x00,0xd2,0xec,0xd1,0xa4,0xd0,0x76,0xcf,0x86,0xd5,
++ 0x48,0xd4,0x28,0xd3,0x14,0x52,0x04,0x08,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x08,
++ 0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x08,
++ 0x00,0x08,0xdc,0x10,0x04,0x08,0x00,0x08,0xe6,0xd3,0x10,0x52,0x04,0x08,0x00,0x91,
++ 0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
++ 0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x54,0x04,0x08,0x00,0xd3,0x0c,0x52,0x04,0x08,
++ 0x00,0x11,0x04,0x14,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe6,0x08,
++ 0x01,0x10,0x04,0x08,0xdc,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08,
++ 0x09,0xcf,0x86,0x95,0x28,0xd4,0x14,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,
++ 0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,
++ 0x08,0x10,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x0a,0xcf,
++ 0x86,0x15,0x04,0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x24,0xd3,
++ 0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0xe6,0x10,0x04,0x10,
++ 0xdc,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,
++ 0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,
++ 0x00,0x00,0x00,0xd1,0x54,0xd0,0x26,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,
++ 0x00,0xd3,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x91,
++ 0x08,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x14,0x54,
++ 0x04,0x0b,0x00,0x93,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x0b,
++ 0x00,0x54,0x04,0x0b,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,
++ 0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x54,0x04,0x10,
++ 0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c,0x91,
++ 0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,
++ 0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,
++ 0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x96,0xd2,
++ 0x68,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x0b,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,
++ 0x04,0x0b,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11,
++ 0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0xd3,0x10,0x92,
++ 0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x92,0x08,0x11,
++ 0x04,0x00,0x00,0x11,0x00,0x11,0x00,0xd1,0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14,
++ 0x00,0xd4,0x0c,0x93,0x08,0x12,0x04,0x14,0x00,0x14,0xe6,0x00,0x00,0x53,0x04,0x14,
++ 0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,
++ 0x06,0x00,0x00,0xd2,0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,
++ 0x04,0x00,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,
++ 0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x58,0xd0,
++ 0x12,0xcf,0x86,0x55,0x04,0x14,0x00,0x94,0x08,0x13,0x04,0x14,0x00,0x00,0x00,0x14,
++ 0x00,0xcf,0x86,0x95,0x40,0xd4,0x24,0xd3,0x0c,0x52,0x04,0x14,0x00,0x11,0x04,0x14,
++ 0x00,0x14,0xdc,0xd2,0x0c,0x51,0x04,0x14,0xe6,0x10,0x04,0x14,0xe6,0x14,0xdc,0x91,
++ 0x08,0x10,0x04,0x14,0xe6,0x14,0xdc,0x14,0xdc,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,
++ 0x04,0x14,0xdc,0x14,0x00,0x14,0x00,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,
++ 0x00,0x54,0x04,0x15,0x00,0x93,0x10,0x52,0x04,0x15,0x00,0x51,0x04,0x15,0x00,0x10,
++ 0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xe5,0x0f,0x06,0xe4,0xf8,0x03,0xe3,
++ 0x02,0x02,0xd2,0xfb,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0c,0x00,0xcf,0x86,0xd5,0x2c,
++ 0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x09,
++ 0x0c,0x00,0x52,0x04,0x0c,0x00,0x11,0x04,0x0c,0x00,0x00,0x00,0x93,0x0c,0x92,0x08,
++ 0x11,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04,
++ 0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x09,
++ 0xd0,0x69,0xcf,0x86,0xd5,0x32,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x15,
++ 0x51,0x04,0x0b,0x00,0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x99,0xf0,0x91,0x82,0xba,
++ 0x00,0x0b,0x00,0x91,0x11,0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x9b,0xf0,0x91,0x82,
++ 0xba,0x00,0x0b,0x00,0x0b,0x00,0xd4,0x1d,0x53,0x04,0x0b,0x00,0x92,0x15,0x51,0x04,
++ 0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xff,0xf0,0x91,0x82,0xa5,0xf0,0x91,0x82,0xba,
++ 0x00,0x0b,0x00,0x53,0x04,0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b,
++ 0x09,0x10,0x04,0x0b,0x07,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3,
++ 0x0c,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91,
++ 0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x00,0x00,0x0d,0x00,0xd4,0x14,0x53,0x04,0x0d,
++ 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,
++ 0x04,0x0d,0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0xd1,0x96,0xd0,
++ 0x5c,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x0d,0xe6,0x10,
++ 0x04,0x0d,0xe6,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd4,0x26,0x53,0x04,0x0d,
++ 0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x0d,0x0d,0xff,0xf0,0x91,0x84,
++ 0xb1,0xf0,0x91,0x84,0xa7,0x00,0x0d,0xff,0xf0,0x91,0x84,0xb2,0xf0,0x91,0x84,0xa7,
++ 0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x0d,0x09,0x91,
++ 0x08,0x10,0x04,0x0d,0x09,0x00,0x00,0x0d,0x00,0x0d,0x00,0xcf,0x86,0xd5,0x18,0x94,
++ 0x14,0x93,0x10,0x52,0x04,0x0d,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,
++ 0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x10,
++ 0x00,0x10,0x04,0x10,0x00,0x10,0x07,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,
++ 0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3,
++ 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x09,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd2,
++ 0x10,0xd1,0x08,0x10,0x04,0x0d,0x00,0x11,0x00,0x10,0x04,0x11,0x07,0x11,0x00,0x91,
++ 0x08,0x10,0x04,0x11,0x00,0x10,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x0c,0x51,
++ 0x04,0x0d,0x00,0x10,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0x92,
++ 0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x93,
++ 0x10,0x52,0x04,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0xd2,0xc8,0xd1,0x48,0xd0,0x42,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x93,
++ 0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,
++ 0x00,0x54,0x04,0x10,0x00,0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,
++ 0x00,0x10,0x09,0x10,0x04,0x10,0x07,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,
++ 0x00,0x10,0x04,0x12,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x52,0xcf,0x86,0xd5,
++ 0x3c,0xd4,0x28,0xd3,0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11,
++ 0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x00,0x51,
++ 0x04,0x11,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11,
++ 0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x94,0x10,0x53,0x04,0x11,
++ 0x00,0x92,0x08,0x11,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,
++ 0x04,0x10,0x00,0xd4,0x18,0x53,0x04,0x10,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x10,
++ 0x00,0x10,0x07,0x10,0x04,0x10,0x09,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,
++ 0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xe1,0x27,0x01,0xd0,0x8a,0xcf,0x86,
++ 0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x10,0x00,
++ 0x10,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10,0x00,
++ 0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x93,0x14,
++ 0x92,0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,
++ 0x10,0x00,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
++ 0x10,0x00,0x00,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,
++ 0x10,0x00,0x00,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,
++ 0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x14,0x07,0x91,0x08,0x10,0x04,
++ 0x10,0x07,0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x6a,0xd4,0x42,0xd3,0x14,0x52,0x04,
++ 0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,
++ 0xd2,0x19,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0xff,
++ 0xf0,0x91,0x8d,0x87,0xf0,0x91,0x8c,0xbe,0x00,0x91,0x11,0x10,0x0d,0x10,0xff,0xf0,
++ 0x91,0x8d,0x87,0xf0,0x91,0x8d,0x97,0x00,0x10,0x09,0x00,0x00,0xd3,0x18,0xd2,0x0c,
++ 0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
++ 0x00,0x00,0x10,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,
++ 0x10,0x00,0xd4,0x1c,0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0xe6,
++ 0x52,0x04,0x10,0xe6,0x91,0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x93,0x10,
++ 0x52,0x04,0x10,0xe6,0x91,0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0xcf,0x06,0x00,0x00,0xe3,0x30,0x01,0xd2,0xb7,0xd1,0x48,0xd0,0x06,0xcf,0x06,0x12,
++ 0x00,0xcf,0x86,0x95,0x3c,0xd4,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,
++ 0x04,0x12,0x09,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x07,0x12,0x00,0x12,
++ 0x00,0x53,0x04,0x12,0x00,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x00,0x00,0x12,
++ 0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x12,0x00,0x10,0x04,0x14,0xe6,0x15,0x00,0x00,
++ 0x00,0xd0,0x45,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,
++ 0x00,0xd2,0x15,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10,0xff,0xf0,0x91,0x92,
++ 0xb9,0xf0,0x91,0x92,0xba,0x00,0xd1,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,
++ 0xf0,0x91,0x92,0xb0,0x00,0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,
++ 0x91,0x92,0xbd,0x00,0x10,0x00,0xcf,0x86,0x95,0x24,0xd4,0x14,0x93,0x10,0x92,0x0c,
++ 0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x09,0x10,0x07,0x10,0x00,0x00,0x00,0x53,0x04,
++ 0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x06,
++ 0xcf,0x06,0x00,0x00,0xd0,0x40,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,
++ 0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0xd2,0x1e,0x51,0x04,
++ 0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x96,0xb8,0xf0,0x91,0x96,0xaf,0x00,0x10,
++ 0xff,0xf0,0x91,0x96,0xb9,0xf0,0x91,0x96,0xaf,0x00,0x51,0x04,0x10,0x00,0x10,0x04,
++ 0x10,0x00,0x10,0x09,0xcf,0x86,0x95,0x2c,0xd4,0x1c,0xd3,0x10,0x92,0x0c,0x91,0x08,
++ 0x10,0x04,0x10,0x07,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,
++ 0x11,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,
++ 0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1,0x5c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,
++ 0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,
++ 0x10,0x04,0x10,0x00,0x10,0x09,0xcf,0x86,0xd5,0x24,0xd4,0x14,0x93,0x10,0x52,0x04,
++ 0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,
++ 0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14,0x53,0x04,
++ 0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0xd3,0x10,
++ 0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x0d,0x07,0x92,0x0c,
++ 0x91,0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x14,
++ 0x94,0x10,0x53,0x04,0x0d,0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x11,0x00,
++ 0x53,0x04,0x11,0x00,0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x14,0x00,0x00,0x00,
++ 0x91,0x08,0x10,0x04,0x00,0x00,0x11,0x00,0x11,0x00,0x94,0x14,0x53,0x04,0x11,0x00,
++ 0x92,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x11,0x09,0x00,0x00,0x11,0x00,
++ 0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4,0x59,0x01,0xd3,0xb2,0xd2,0x5c,0xd1,
++ 0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14,
++ 0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x14,0x00,0x14,0x09,0x10,0x04,0x14,0x07,0x14,
++ 0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x00,0x00,0x10,
++ 0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x10,0x92,0x0c,0x51,
++ 0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,
++ 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,
++ 0x1a,0xcf,0x86,0x55,0x04,0x00,0x00,0x94,0x10,0x53,0x04,0x15,0x00,0x92,0x08,0x11,
++ 0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x15,
++ 0x00,0x53,0x04,0x15,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x94,
++ 0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x15,0x09,0x15,0x00,0x15,0x00,0x91,
++ 0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1,
++ 0x3c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x93,0x10,0x52,
++ 0x04,0x13,0x00,0x91,0x08,0x10,0x04,0x13,0x09,0x13,0x00,0x13,0x00,0x13,0x00,0xcf,
++ 0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,
++ 0x04,0x13,0x00,0x13,0x09,0x00,0x00,0x13,0x00,0x13,0x00,0xd0,0x46,0xcf,0x86,0xd5,
++ 0x2c,0xd4,0x10,0x93,0x0c,0x52,0x04,0x13,0x00,0x11,0x04,0x15,0x00,0x13,0x00,0x13,
++ 0x00,0x53,0x04,0x13,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x13,0x09,0x13,
++ 0x00,0x91,0x08,0x10,0x04,0x13,0x00,0x14,0x00,0x13,0x00,0x94,0x14,0x93,0x10,0x92,
++ 0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92,
++ 0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,
++ 0x00,0xe3,0xa9,0x01,0xd2,0xb0,0xd1,0x6c,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,
++ 0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x12,0x00,
++ 0x12,0x00,0x12,0x00,0x54,0x04,0x12,0x00,0xd3,0x10,0x52,0x04,0x12,0x00,0x51,0x04,
++ 0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,
++ 0x10,0x04,0x12,0x00,0x12,0x09,0xcf,0x86,0xd5,0x14,0x94,0x10,0x93,0x0c,0x52,0x04,
++ 0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x94,0x14,0x53,0x04,
++ 0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,
++ 0x12,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04,0x12,0x00,0x93,0x0c,0x92,0x08,
++ 0x11,0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00,
++ 0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x93,0x10,
++ 0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,
++ 0xcf,0x06,0x00,0x00,0xd1,0xa0,0xd0,0x52,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10,
++ 0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x92,0x0c,
++ 0x51,0x04,0x13,0x00,0x10,0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x54,0x04,
++ 0x13,0x00,0xd3,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,
++ 0x00,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x51,0x04,
++ 0x13,0x00,0x10,0x04,0x00,0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0x93,0x14,
++ 0xd2,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x07,0x13,0x00,0x11,0x04,0x13,0x09,
++ 0x13,0x00,0x00,0x00,0x53,0x04,0x13,0x00,0x92,0x08,0x11,0x04,0x13,0x00,0x00,0x00,
++ 0x00,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,
++ 0x00,0x00,0x14,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x14,0x00,
++ 0x14,0x00,0x14,0x00,0xd0,0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x14,0x53,0x04,0x14,0x00,
++ 0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0xd3,0x18,
++ 0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x51,0x04,0x14,0x00,
++ 0x10,0x04,0x14,0x00,0x14,0x09,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06,
++ 0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,
++ 0x14,0x00,0x53,0x04,0x14,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,
++ 0xcf,0x86,0x55,0x04,0x15,0x00,0x54,0x04,0x15,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,
++ 0x15,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,
++ 0x00,0x00,0x15,0x00,0xd0,0xca,0xcf,0x86,0xd5,0xc2,0xd4,0x54,0xd3,0x06,0xcf,0x06,
++ 0x09,0x00,0xd2,0x06,0xcf,0x06,0x09,0x00,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x09,0x00,
++ 0xcf,0x86,0x55,0x04,0x09,0x00,0x94,0x14,0x53,0x04,0x09,0x00,0x52,0x04,0x09,0x00,
++ 0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00,0x10,0x00,0xd0,0x1e,0xcf,0x86,
++ 0x95,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,
++ 0x10,0x00,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x68,
++ 0xd2,0x46,0xd1,0x40,0xd0,0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,
++ 0xd4,0x20,0xd3,0x10,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00,
++ 0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,
++ 0x93,0x10,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0xcf,0x06,0x11,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,
++ 0x95,0x10,0x94,0x0c,0x93,0x08,0x12,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,
++ 0xd5,0x4c,0xd4,0x06,0xcf,0x06,0x0b,0x00,0xd3,0x40,0xd2,0x3a,0xd1,0x34,0xd0,0x2e,
++ 0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,
++ 0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x53,0x04,0x15,0x00,0x92,0x0c,
++ 0x91,0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,
++ 0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,
++ 0xd1,0x4c,0xd0,0x44,0xcf,0x86,0xd5,0x3c,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,
++ 0xcf,0x06,0x11,0x00,0xd2,0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,
++ 0x95,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,
++ 0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,
++ 0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0xd2,0x01,0xcf,
++ 0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x0b,0x01,0xd3,0x06,0xcf,0x06,0x0c,0x00,
++ 0xd2,0x84,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0c,0x00,0x54,0x04,0x0c,0x00,
++ 0x53,0x04,0x0c,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,
++ 0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14,0x53,0x04,
++ 0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00,
++ 0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x00,0x00,
++ 0x10,0x00,0xd4,0x10,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,
++ 0x00,0x00,0x93,0x10,0x52,0x04,0x10,0x01,0x91,0x08,0x10,0x04,0x10,0x01,0x10,0x00,
++ 0x00,0x00,0x00,0x00,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,
++ 0x10,0x00,0x93,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6,
++ 0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x24,0xd4,0x10,0x93,0x0c,0x52,0x04,0x10,0x00,
++ 0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04,
++ 0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,
++ 0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04,
++ 0x10,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,
++ 0xd0,0x0e,0xcf,0x86,0x95,0x08,0x14,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,
++ 0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2,0x30,0xd1,0x0c,0xd0,0x06,0xcf,0x06,
++ 0x00,0x00,0xcf,0x06,0x14,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x14,0x00,
++ 0x53,0x04,0x14,0x00,0x92,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0d,0x00,
++ 0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10,0x52,0x04,0x0d,0x00,0x91,0x08,0x10,0x04,
++ 0x0d,0x00,0x15,0x00,0x15,0x00,0xd2,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,
++ 0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0d,0x00,0x54,0x04,
++ 0x0d,0x00,0x53,0x04,0x0d,0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,
++ 0x0d,0x00,0x15,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x15,0x00,
++ 0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x0d,0x00,
++ 0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,
++ 0x10,0x04,0x12,0x00,0x13,0x00,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,
++ 0xcf,0x06,0x12,0x00,0xe2,0xc5,0x01,0xd1,0x8e,0xd0,0x86,0xcf,0x86,0xd5,0x48,0xd4,
++ 0x06,0xcf,0x06,0x12,0x00,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x06,0xcf,0x06,0x12,
++ 0x00,0xd1,0x06,0xcf,0x06,0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,
++ 0x04,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,
++ 0x04,0x12,0x00,0x14,0x00,0x14,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x14,0x00,0x15,
++ 0x00,0x15,0x00,0x00,0x00,0xd4,0x36,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x2a,0xd1,
++ 0x06,0xcf,0x06,0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12,
++ 0x00,0x54,0x04,0x12,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,
+- 0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x4c,0xd0,0x44,0xcf,
+- 0x86,0xd5,0x3c,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x11,0x00,0xd2,
+- 0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,
+- 0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,
+- 0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,
+- 0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0xd2,0x01,0xcf,0x86,0xd5,0x06,0xcf,0x06,
+- 0x00,0x00,0xe4,0x0b,0x01,0xd3,0x06,0xcf,0x06,0x0c,0x00,0xd2,0x84,0xd1,0x50,0xd0,
+- 0x1e,0xcf,0x86,0x55,0x04,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04,0x0c,0x00,0x92,
+- 0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,
+- 0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,
+- 0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x10,0x00,0xd2,0x08,0x11,
+- 0x04,0x10,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x00,0x00,0xd0,0x06,0xcf,
+- 0x06,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x00,0x00,0x10,0x00,0xd4,0x10,0x53,
+- 0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x93,0x10,0x52,
+- 0x04,0x10,0x01,0x91,0x08,0x10,0x04,0x10,0x01,0x10,0x00,0x00,0x00,0x00,0x00,0xd1,
+- 0x6c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x10,0x52,
+- 0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6,0x10,0x00,0x10,0x00,0xcf,
+- 0x86,0xd5,0x24,0xd4,0x10,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,
+- 0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00,
+- 0x00,0x10,0x00,0x10,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,
+- 0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x00,
+- 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd0,0x0e,0xcf,0x86,0x95,
+- 0x08,0x14,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,
+- 0x06,0x00,0x00,0xd2,0x30,0xd1,0x0c,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x06,0x14,
+- 0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x14,0x00,0x53,0x04,0x14,0x00,0x92,
+- 0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,
+- 0x06,0x00,0x00,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x2c,0x94,
+- 0x28,0xd3,0x10,0x52,0x04,0x0d,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x15,0x00,0x15,
+- 0x00,0xd2,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,0x51,0x04,0x00,
+- 0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0d,0x00,0x54,0x04,0x0d,0x00,0x53,0x04,0x0d,
+- 0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x15,0x00,0xd0,
+- 0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x15,0x00,0x52,0x04,0x00,0x00,0x51,
+- 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x0d,0x00,0x00,0x00,0xcf,0x86,0x55,
+- 0x04,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x13,
+- 0x00,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xcf,0x06,0x12,0x00,0xe2,
+- 0xc6,0x01,0xd1,0x8e,0xd0,0x86,0xcf,0x86,0xd5,0x48,0xd4,0x06,0xcf,0x06,0x12,0x00,
+- 0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x06,0xcf,0x06,0x12,0x00,0xd1,0x06,0xcf,0x06,
+- 0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12,0x00,0xd4,0x14,
+- 0x53,0x04,0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x14,0x00,
+- 0x14,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x14,0x00,0x15,0x00,0x15,0x00,0x00,0x00,
+- 0xd4,0x36,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06,0x12,0x00,
+- 0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12,0x00,0x54,0x04,0x12,0x00,
+- 0x93,0x10,0x92,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,
+- 0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,
+- 0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0xa2,0xd4,0x9c,0xd3,0x74,
+- 0xd2,0x26,0xd1,0x20,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08,
+- 0x11,0x04,0x0c,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0xcf,0x06,
+- 0x13,0x00,0xcf,0x06,0x13,0x00,0xd1,0x48,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,
+- 0x13,0x00,0x53,0x04,0x13,0x00,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,
+- 0x13,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x00,0x00,0x93,0x10,
+- 0x92,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+- 0x94,0x0c,0x93,0x08,0x12,0x04,0x00,0x00,0x15,0x00,0x00,0x00,0x13,0x00,0xcf,0x06,
+- 0x13,0x00,0xd2,0x22,0xd1,0x06,0xcf,0x06,0x13,0x00,0xd0,0x06,0xcf,0x06,0x13,0x00,
+- 0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x53,0x04,0x13,0x00,0x12,0x04,
+- 0x13,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd4,0x06,0xcf,0x06,
+- 0x00,0x00,0xd3,0x7f,0xd2,0x79,0xd1,0x34,0xd0,0x06,0xcf,0x06,0x10,0x00,0xcf,0x86,
+- 0x55,0x04,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04,0x10,0x00,
+- 0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,
+- 0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0,0x3f,0xcf,0x86,0xd5,0x2c,
+- 0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,
+- 0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x00,0x00,
+- 0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x01,0x10,0x00,0x94,0x0d,0x93,0x09,0x12,0x05,
+- 0x10,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,
+- 0x00,0xcf,0x06,0x00,0x00,0xe1,0x96,0x04,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,
+- 0xcf,0x86,0xe5,0x33,0x04,0xe4,0x83,0x02,0xe3,0xf8,0x01,0xd2,0x26,0xd1,0x06,0xcf,
+- 0x06,0x05,0x00,0xd0,0x06,0xcf,0x06,0x05,0x00,0xcf,0x86,0x55,0x04,0x05,0x00,0x54,
+- 0x04,0x05,0x00,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x00,0x00,0x00,
+- 0x00,0xd1,0xef,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94,0x20,0xd3,0x10,0x52,
+- 0x04,0x05,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00,0x00,0x92,0x0c,0x91,
+- 0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0xd5,
+- 0x2a,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x51,0x04,0x05,
+- 0x00,0x10,0x0d,0x05,0xff,0xf0,0x9d,0x85,0x97,0xf0,0x9d,0x85,0xa5,0x00,0x05,0xff,
+- 0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0x00,0xd4,0x75,0xd3,0x61,0xd2,0x44,0xd1,
+- 0x22,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,
+- 0xae,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xaf,
+- 0x00,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,
+- 0xb0,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xb1,
+- 0x00,0xd1,0x15,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,
+- 0x9d,0x85,0xb2,0x00,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x01,0xd2,0x08,0x11,0x04,
+- 0x05,0x01,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe2,0x05,0xd8,0xd3,0x12,
+- 0x92,0x0d,0x51,0x04,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0xff,0x00,0x05,0xff,0x00,
+- 0x92,0x0e,0x51,0x05,0x05,0xff,0x00,0x10,0x05,0x05,0xff,0x00,0x05,0xdc,0x05,0xdc,
++ 0x86,0xcf,0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,
++ 0xa2,0xd4,0x9c,0xd3,0x74,0xd2,0x26,0xd1,0x20,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x94,
++ 0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13,
++ 0x00,0x13,0x00,0xcf,0x06,0x13,0x00,0xcf,0x06,0x13,0x00,0xd1,0x48,0xd0,0x1e,0xcf,
++ 0x86,0x95,0x18,0x54,0x04,0x13,0x00,0x53,0x04,0x13,0x00,0x52,0x04,0x13,0x00,0x51,
++ 0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,
++ 0x04,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x00,0x00,0x15,0x00,0x00,
++ 0x00,0x13,0x00,0xcf,0x06,0x13,0x00,0xd2,0x22,0xd1,0x06,0xcf,0x06,0x13,0x00,0xd0,
++ 0x06,0xcf,0x06,0x13,0x00,0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x53,
++ 0x04,0x13,0x00,0x12,0x04,0x13,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,
++ 0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x7e,0xd2,0x78,0xd1,0x34,0xd0,0x06,0xcf,
++ 0x06,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,
++ 0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,
++ 0x00,0x52,0x04,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0,
++ 0x3e,0xcf,0x86,0xd5,0x2c,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,
++ 0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0xd2,0x08,0x11,
++ 0x04,0x10,0x00,0x00,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x01,0x10,0x00,0x94,
++ 0x0c,0x93,0x08,0x12,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,
++ 0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xe1,0x92,0x04,0xd0,0x08,0xcf,0x86,
++ 0xcf,0x06,0x00,0x00,0xcf,0x86,0xe5,0x2f,0x04,0xe4,0x7f,0x02,0xe3,0xf4,0x01,0xd2,
++ 0x26,0xd1,0x06,0xcf,0x06,0x05,0x00,0xd0,0x06,0xcf,0x06,0x05,0x00,0xcf,0x86,0x55,
++ 0x04,0x05,0x00,0x54,0x04,0x05,0x00,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05,
++ 0x00,0x00,0x00,0x00,0x00,0xd1,0xeb,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94,
++ 0x20,0xd3,0x10,0x52,0x04,0x05,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00,
++ 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x05,0x00,0x05,0x00,0x05,
++ 0x00,0xcf,0x86,0xd5,0x2a,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,
++ 0x00,0x51,0x04,0x05,0x00,0x10,0x0d,0x05,0xff,0xf0,0x9d,0x85,0x97,0xf0,0x9d,0x85,
++ 0xa5,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0x00,0xd4,0x75,0xd3,
++ 0x61,0xd2,0x44,0xd1,0x22,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,
++ 0xa5,0xf0,0x9d,0x85,0xae,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,
++ 0xf0,0x9d,0x85,0xaf,0x00,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,
++ 0xa5,0xf0,0x9d,0x85,0xb0,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,
++ 0xf0,0x9d,0x85,0xb1,0x00,0xd1,0x15,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,
++ 0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xb2,0x00,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x01,
++ 0xd2,0x08,0x11,0x04,0x05,0x01,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe2,
++ 0x05,0xd8,0xd3,0x10,0x92,0x0c,0x51,0x04,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x00,
++ 0x05,0x00,0x92,0x0c,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x05,0xdc,0x05,0xdc,
+ 0xd0,0x97,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x05,0xdc,
+ 0x10,0x04,0x05,0xdc,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe6,0x05,0xe6,
+ 0x92,0x08,0x11,0x04,0x05,0xe6,0x05,0xdc,0x05,0x00,0x05,0x00,0xd4,0x14,0x53,0x04,
+@@ -4090,21 +4080,20 @@ static const unsigned char utf8data[64256] = {
+ 0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2,0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,
+ 0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,
+ 0x04,0x00,0x00,0x53,0x04,0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,
+- 0x00,0xd4,0xd9,0xd3,0x81,0xd2,0x79,0xd1,0x71,0xd0,0x69,0xcf,0x86,0xd5,0x60,0xd4,
+- 0x59,0xd3,0x52,0xd2,0x33,0xd1,0x2c,0xd0,0x25,0xcf,0x86,0x95,0x1e,0x94,0x19,0x93,
+- 0x14,0x92,0x0f,0x91,0x0a,0x10,0x05,0x00,0xff,0x00,0x05,0xff,0x00,0x00,0xff,0x00,
+- 0x00,0xff,0x00,0x00,0xff,0x00,0x00,0xff,0x00,0x05,0xff,0x00,0xcf,0x06,0x05,0xff,
+- 0x00,0xcf,0x06,0x00,0xff,0x00,0xd1,0x07,0xcf,0x06,0x07,0xff,0x00,0xd0,0x07,0xcf,
+- 0x06,0x07,0xff,0x00,0xcf,0x86,0x55,0x05,0x07,0xff,0x00,0x14,0x05,0x07,0xff,0x00,
+- 0x00,0xff,0x00,0xcf,0x06,0x00,0xff,0x00,0xcf,0x06,0x00,0xff,0x00,0xcf,0x06,0x00,
+- 0xff,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,
+- 0xcf,0x06,0x00,0x00,0xd2,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xd1,0x08,0xcf,0x86,
+- 0xcf,0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x06,
+- 0xcf,0x06,0x00,0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,
+- 0xd2,0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,
+- 0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x00,0x00,
+- 0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,0x00,0xcf,0x86,0xcf,0x06,0x02,0x00,
+- 0x81,0x80,0xcf,0x86,0x85,0x84,0xcf,0x86,0xcf,0x06,0x02,0x00,0x00,0x00,0x00,0x00
++ 0x00,0xd4,0xc8,0xd3,0x70,0xd2,0x68,0xd1,0x60,0xd0,0x58,0xcf,0x86,0xd5,0x50,0xd4,
++ 0x4a,0xd3,0x44,0xd2,0x2a,0xd1,0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,
++ 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x05,0x00,0xcf,0x06,0x05,0x00,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,
++ 0x06,0x07,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x14,
++ 0x04,0x07,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,
++ 0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,
++ 0x06,0x00,0x00,0xd2,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xd1,0x08,0xcf,0x86,0xcf,
++ 0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x06,0xcf,
++ 0x06,0x00,0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2,
++ 0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,
++ 0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x00,0x00,0x52,
++ 0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,0x00,0xcf,0x86,0xcf,0x06,0x02,0x00,0x81,
++ 0x80,0xcf,0x86,0x85,0x84,0xcf,0x86,0xcf,0x06,0x02,0x00,0x00,0x00,0x00,0x00,0x00
+ };
+
+ struct utf8data_table utf8_data_table = {
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 56eaae9dac1ab2..5d3e595f9da96a 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -921,6 +921,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
+ prev = vma;
+ continue;
+ }
++ /* Reset ptes for the whole vma range if wr-protected */
++ if (userfaultfd_wp(vma))
++ uffd_wp_range(vma, vma->vm_start,
++ vma->vm_end - vma->vm_start, false);
+ new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
+ prev = vma_merge(&vmi, mm, prev, vma->vm_start, vma->vm_end,
+ new_flags, vma->anon_vma,
+@@ -2046,7 +2050,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ goto out;
+ features = uffdio_api.features;
+ ret = -EINVAL;
+- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
++ if (uffdio_api.api != UFFD_API)
+ goto err_out;
+ ret = -EPERM;
+ if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+@@ -2064,6 +2068,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
+ uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
+ #endif
++
++ ret = -EINVAL;
++ if (features & ~uffdio_api.features)
++ goto err_out;
++
+ uffdio_api.ioctls = UFFD_API_IOCTLS;
+ ret = -EFAULT;
+ if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
+index 2307f8037efc3d..118dedef8ebe8d 100644
+--- a/fs/vboxsf/file.c
++++ b/fs/vboxsf/file.c
+@@ -218,6 +218,7 @@ const struct file_operations vboxsf_reg_fops = {
+ .release = vboxsf_file_release,
+ .fsync = noop_fsync,
+ .splice_read = filemap_splice_read,
++ .setlease = simple_nosetlease,
+ };
+
+ const struct inode_operations vboxsf_reg_iops = {
+diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
+index 1fb8f4df60cbb3..9848af78215bf0 100644
+--- a/fs/vboxsf/super.c
++++ b/fs/vboxsf/super.c
+@@ -151,7 +151,7 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
+ if (!sbi->nls) {
+ vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
+ err = -EINVAL;
+- goto fail_free;
++ goto fail_destroy_idr;
+ }
+ }
+
+@@ -224,6 +224,7 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
+ ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
+ if (sbi->nls)
+ unload_nls(sbi->nls);
++fail_destroy_idr:
+ idr_destroy(&sbi->ino_idr);
+ kfree(sbi);
+ return err;
+diff --git a/fs/verity/init.c b/fs/verity/init.c
+index a29f062f6047b8..c59156b55e4ff5 100644
+--- a/fs/verity/init.c
++++ b/fs/verity/init.c
+@@ -10,8 +10,6 @@
+ #include <linux/ratelimit.h>
+
+ #ifdef CONFIG_SYSCTL
+-static struct ctl_table_header *fsverity_sysctl_header;
+-
+ static struct ctl_table fsverity_sysctl_table[] = {
+ #ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
+ {
+@@ -29,10 +27,7 @@ static struct ctl_table fsverity_sysctl_table[] = {
+
+ static void __init fsverity_init_sysctl(void)
+ {
+- fsverity_sysctl_header = register_sysctl("fs/verity",
+- fsverity_sysctl_table);
+- if (!fsverity_sysctl_header)
+- panic("fsverity sysctl registration failed");
++ register_sysctl_init("fs/verity", fsverity_sysctl_table);
+ }
+ #else /* CONFIG_SYSCTL */
+ static inline void fsverity_init_sysctl(void)
+diff --git a/fs/xattr.c b/fs/xattr.c
+index efd4736bc94b09..c20046548f218e 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -631,10 +631,9 @@ int do_setxattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ ctx->kvalue, ctx->size, ctx->flags);
+ }
+
+-static long
+-setxattr(struct mnt_idmap *idmap, struct dentry *d,
+- const char __user *name, const void __user *value, size_t size,
+- int flags)
++static int path_setxattr(const char __user *pathname,
++ const char __user *name, const void __user *value,
++ size_t size, int flags, unsigned int lookup_flags)
+ {
+ struct xattr_name kname;
+ struct xattr_ctx ctx = {
+@@ -644,33 +643,20 @@ setxattr(struct mnt_idmap *idmap, struct dentry *d,
+ .kname = &kname,
+ .flags = flags,
+ };
++ struct path path;
+ int error;
+
+ error = setxattr_copy(name, &ctx);
+ if (error)
+ return error;
+
+- error = do_setxattr(idmap, d, &ctx);
+-
+- kvfree(ctx.kvalue);
+- return error;
+-}
+-
+-static int path_setxattr(const char __user *pathname,
+- const char __user *name, const void __user *value,
+- size_t size, int flags, unsigned int lookup_flags)
+-{
+- struct path path;
+- int error;
+-
+ retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
+ if (error)
+- return error;
++ goto out;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(mnt_idmap(path.mnt), path.dentry, name,
+- value, size, flags);
++ error = do_setxattr(mnt_idmap(path.mnt), path.dentry, &ctx);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -678,6 +664,9 @@ static int path_setxattr(const char __user *pathname,
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
++
++out:
++ kvfree(ctx.kvalue);
+ return error;
+ }
+
+@@ -698,20 +687,32 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
+ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
+ const void __user *,value, size_t, size, int, flags)
+ {
+- struct fd f = fdget(fd);
+- int error = -EBADF;
++ struct xattr_name kname;
++ struct xattr_ctx ctx = {
++ .cvalue = value,
++ .kvalue = NULL,
++ .size = size,
++ .kname = &kname,
++ .flags = flags,
++ };
++ int error;
+
++ CLASS(fd, f)(fd);
+ if (!f.file)
+- return error;
++ return -EBADF;
++
+ audit_file(f.file);
++ error = setxattr_copy(name, &ctx);
++ if (error)
++ return error;
++
+ error = mnt_want_write_file(f.file);
+ if (!error) {
+- error = setxattr(file_mnt_idmap(f.file),
+- f.file->f_path.dentry, name,
+- value, size, flags);
++ error = do_setxattr(file_mnt_idmap(f.file),
++ f.file->f_path.dentry, &ctx);
+ mnt_drop_write_file(f.file);
+ }
+- fdput(f);
++ kvfree(ctx.kvalue);
+ return error;
+ }
+
+@@ -900,9 +901,17 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
+ * Extended attribute REMOVE operations
+ */
+ static long
+-removexattr(struct mnt_idmap *idmap, struct dentry *d,
+- const char __user *name)
++removexattr(struct mnt_idmap *idmap, struct dentry *d, const char *name)
+ {
++ if (is_posix_acl_xattr(name))
++ return vfs_remove_acl(idmap, d, name);
++ return vfs_removexattr(idmap, d, name);
++}
++
++static int path_removexattr(const char __user *pathname,
++ const char __user *name, unsigned int lookup_flags)
++{
++ struct path path;
+ int error;
+ char kname[XATTR_NAME_MAX + 1];
+
+@@ -911,25 +920,13 @@ removexattr(struct mnt_idmap *idmap, struct dentry *d,
+ error = -ERANGE;
+ if (error < 0)
+ return error;
+-
+- if (is_posix_acl_xattr(kname))
+- return vfs_remove_acl(idmap, d, kname);
+-
+- return vfs_removexattr(idmap, d, kname);
+-}
+-
+-static int path_removexattr(const char __user *pathname,
+- const char __user *name, unsigned int lookup_flags)
+-{
+- struct path path;
+- int error;
+ retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
+ if (error)
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = removexattr(mnt_idmap(path.mnt), path.dentry, name);
++ error = removexattr(mnt_idmap(path.mnt), path.dentry, kname);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -955,15 +952,23 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
+ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
+ {
+ struct fd f = fdget(fd);
++ char kname[XATTR_NAME_MAX + 1];
+ int error = -EBADF;
+
+ if (!f.file)
+ return error;
+ audit_file(f.file);
++
++ error = strncpy_from_user(kname, name, sizeof(kname));
++ if (error == 0 || error == sizeof(kname))
++ error = -ERANGE;
++ if (error < 0)
++ return error;
++
+ error = mnt_want_write_file(f.file);
+ if (!error) {
+ error = removexattr(file_mnt_idmap(f.file),
+- f.file->f_path.dentry, name);
++ f.file->f_path.dentry, kname);
+ mnt_drop_write_file(f.file);
+ }
+ fdput(f);
+diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
+index ed0bc8cbc703d9..567fb37274d35a 100644
+--- a/fs/xfs/Kconfig
++++ b/fs/xfs/Kconfig
+@@ -147,7 +147,7 @@ config XFS_ONLINE_SCRUB_STATS
+ bool "XFS online metadata check usage data collection"
+ default y
+ depends on XFS_ONLINE_SCRUB
+- select XFS_DEBUG
++ select DEBUG_FS
+ help
+ If you say Y here, the kernel will gather usage data about
+ the online metadata check subsystem. This includes the number
+diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
+index f9f4d694640d04..1531bd0ee359c9 100644
+--- a/fs/xfs/libxfs/xfs_ag.c
++++ b/fs/xfs/libxfs/xfs_ag.c
+@@ -332,6 +332,31 @@ xfs_agino_range(
+ return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last);
+ }
+
++/*
++ * Free perag within the specified AG range, it is only used to free unused
++ * perags under the error handling path.
++ */
++void
++xfs_free_unused_perag_range(
++ struct xfs_mount *mp,
++ xfs_agnumber_t agstart,
++ xfs_agnumber_t agend)
++{
++ struct xfs_perag *pag;
++ xfs_agnumber_t index;
++
++ for (index = agstart; index < agend; index++) {
++ spin_lock(&mp->m_perag_lock);
++ pag = radix_tree_delete(&mp->m_perag_tree, index);
++ spin_unlock(&mp->m_perag_lock);
++ if (!pag)
++ break;
++ xfs_buf_hash_destroy(pag);
++ xfs_defer_drain_free(&pag->pag_intents_drain);
++ kmem_free(pag);
++ }
++}
++
+ int
+ xfs_initialize_perag(
+ struct xfs_mount *mp,
+@@ -424,19 +449,14 @@ xfs_initialize_perag(
+
+ out_remove_pag:
+ xfs_defer_drain_free(&pag->pag_intents_drain);
++ spin_lock(&mp->m_perag_lock);
+ radix_tree_delete(&mp->m_perag_tree, index);
++ spin_unlock(&mp->m_perag_lock);
+ out_free_pag:
+ kmem_free(pag);
+ out_unwind_new_pags:
+ /* unwind any prior newly initialized pags */
+- for (index = first_initialised; index < agcount; index++) {
+- pag = radix_tree_delete(&mp->m_perag_tree, index);
+- if (!pag)
+- break;
+- xfs_buf_hash_destroy(pag);
+- xfs_defer_drain_free(&pag->pag_intents_drain);
+- kmem_free(pag);
+- }
++ xfs_free_unused_perag_range(mp, first_initialised, agcount);
+ return error;
+ }
+
+@@ -959,14 +979,23 @@ xfs_ag_shrink_space(
+
+ if (error) {
+ /*
+- * if extent allocation fails, need to roll the transaction to
++ * If extent allocation fails, need to roll the transaction to
+ * ensure that the AGFL fixup has been committed anyway.
++ *
++ * We need to hold the AGF across the roll to ensure nothing can
++ * access the AG for allocation until the shrink is fully
++ * cleaned up. And due to the resetting of the AG block
++ * reservation space needing to lock the AGI, we also have to
++ * hold that so we don't get AGI/AGF lock order inversions in
++ * the error handling path.
+ */
+ xfs_trans_bhold(*tpp, agfbp);
++ xfs_trans_bhold(*tpp, agibp);
+ err2 = xfs_trans_roll(tpp);
+ if (err2)
+ return err2;
+ xfs_trans_bjoin(*tpp, agfbp);
++ xfs_trans_bjoin(*tpp, agibp);
+ goto resv_init_out;
+ }
+
+diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
+index 2e0aef87d633e0..40d7b6427afb5d 100644
+--- a/fs/xfs/libxfs/xfs_ag.h
++++ b/fs/xfs/libxfs/xfs_ag.h
+@@ -133,6 +133,8 @@ __XFS_AG_OPSTATE(prefers_metadata, PREFERS_METADATA)
+ __XFS_AG_OPSTATE(allows_inodes, ALLOWS_INODES)
+ __XFS_AG_OPSTATE(agfl_needs_reset, AGFL_NEEDS_RESET)
+
++void xfs_free_unused_perag_range(struct xfs_mount *mp, xfs_agnumber_t agstart,
++ xfs_agnumber_t agend);
+ int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
+ xfs_rfsblock_t dcount, xfs_agnumber_t *maxagi);
+ int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index 3069194527dd06..100ab5931b3132 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -2275,16 +2275,37 @@ xfs_alloc_min_freelist(
+
+ ASSERT(mp->m_alloc_maxlevels > 0);
+
++ /*
++ * For a btree shorter than the maximum height, the worst case is that
++ * every level gets split and a new level is added, then while inserting
++ * another entry to refill the AGFL, every level under the old root gets
++ * split again. This is:
++ *
++ * (full height split reservation) + (AGFL refill split height)
++ * = (current height + 1) + (current height - 1)
++ * = (new height) + (new height - 2)
++ * = 2 * new height - 2
++ *
++ * For a btree of maximum height, the worst case is that every level
++ * under the root gets split, then while inserting another entry to
++ * refill the AGFL, every level under the root gets split again. This is
++ * also:
++ *
++ * 2 * (current height - 1)
++ * = 2 * (new height - 1)
++ * = 2 * new height - 2
++ */
++
+ /* space needed by-bno freespace btree */
+ min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
+- mp->m_alloc_maxlevels);
++ mp->m_alloc_maxlevels) * 2 - 2;
+ /* space needed by-size freespace btree */
+ min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
+- mp->m_alloc_maxlevels);
++ mp->m_alloc_maxlevels) * 2 - 2;
+ /* space needed reverse mapping used space btree */
+ if (xfs_has_rmapbt(mp))
+ min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
+- mp->m_rmap_maxlevels);
++ mp->m_rmap_maxlevels) * 2 - 2;
+
+ return min_free;
+ }
+diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
+index e28d93d232de49..33edf047e0ad2f 100644
+--- a/fs/xfs/libxfs/xfs_attr.c
++++ b/fs/xfs/libxfs/xfs_attr.c
+@@ -421,10 +421,10 @@ xfs_attr_complete_op(
+ bool do_replace = args->op_flags & XFS_DA_OP_REPLACE;
+
+ args->op_flags &= ~XFS_DA_OP_REPLACE;
+- if (do_replace) {
+- args->attr_filter &= ~XFS_ATTR_INCOMPLETE;
++ args->attr_filter &= ~XFS_ATTR_INCOMPLETE;
++ if (do_replace)
+ return replace_state;
+- }
++
+ return XFS_DAS_DONE;
+ }
+
+@@ -1565,12 +1565,23 @@ xfs_attr_node_get(
+ return error;
+ }
+
++/* Enforce that there is at most one namespace bit per attr. */
++inline bool xfs_attr_check_namespace(unsigned int attr_flags)
++{
++ return hweight32(attr_flags & XFS_ATTR_NSP_ONDISK_MASK) < 2;
++}
++
+ /* Returns true if the attribute entry name is valid. */
+ bool
+ xfs_attr_namecheck(
++ unsigned int attr_flags,
+ const void *name,
+ size_t length)
+ {
++ /* Only one namespace bit allowed. */
++ if (!xfs_attr_check_namespace(attr_flags))
++ return false;
++
+ /*
+ * MAXNAMELEN includes the trailing null, but (name/length) leave it
+ * out, so use >= for the length check.
+diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
+index 81be9b3e40047b..c877f05e3cd1a4 100644
+--- a/fs/xfs/libxfs/xfs_attr.h
++++ b/fs/xfs/libxfs/xfs_attr.h
+@@ -547,7 +547,9 @@ int xfs_attr_get(struct xfs_da_args *args);
+ int xfs_attr_set(struct xfs_da_args *args);
+ int xfs_attr_set_iter(struct xfs_attr_intent *attr);
+ int xfs_attr_remove_iter(struct xfs_attr_intent *attr);
+-bool xfs_attr_namecheck(const void *name, size_t length);
++bool xfs_attr_check_namespace(unsigned int attr_flags);
++bool xfs_attr_namecheck(unsigned int attr_flags, const void *name,
++ size_t length);
+ int xfs_attr_calc_size(struct xfs_da_args *args, int *local);
+ void xfs_init_attr_trans(struct xfs_da_args *args, struct xfs_trans_res *tres,
+ unsigned int *total);
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
+index 2580ae47209a6b..51ff4406867539 100644
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -984,6 +984,10 @@ xfs_attr_shortform_to_leaf(
+ nargs.hashval = xfs_da_hashname(sfe->nameval,
+ sfe->namelen);
+ nargs.attr_filter = sfe->flags & XFS_ATTR_NSP_ONDISK_MASK;
++ if (!xfs_attr_check_namespace(sfe->flags)) {
++ error = -EFSCORRUPTED;
++ goto out;
++ }
+ error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
+ ASSERT(error == -ENOATTR);
+ error = xfs_attr3_leaf_add(bp, &nargs);
+@@ -1105,7 +1109,7 @@ xfs_attr_shortform_verify(
+ * one namespace flag per xattr, so we can just count the
+ * bits (i.e. hweight) here.
+ */
+- if (hweight8(sfep->flags & XFS_ATTR_NSP_ONDISK_MASK) > 1)
++ if (!xfs_attr_check_namespace(sfep->flags))
+ return __this_address;
+
+ sfep = next_sfep;
+diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
+index d440393b40eb8e..54de405cbab5ac 100644
+--- a/fs/xfs/libxfs/xfs_attr_remote.c
++++ b/fs/xfs/libxfs/xfs_attr_remote.c
+@@ -619,7 +619,6 @@ xfs_attr_rmtval_set_blk(
+ if (error)
+ return error;
+
+- ASSERT(nmap == 1);
+ ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
+ (map->br_startblock != HOLESTARTBLOCK));
+
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 30c931b38853c9..e6ea35098e07f8 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -21,7 +21,7 @@
+ #include "xfs_bmap.h"
+ #include "xfs_bmap_util.h"
+ #include "xfs_bmap_btree.h"
+-#include "xfs_rtalloc.h"
++#include "xfs_rtbitmap.h"
+ #include "xfs_errortag.h"
+ #include "xfs_error.h"
+ #include "xfs_quota.h"
+@@ -1549,6 +1549,7 @@ xfs_bmap_add_extent_delay_real(
+ if (error)
+ goto done;
+ }
++ ASSERT(da_new <= da_old);
+ break;
+
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+@@ -1578,6 +1579,7 @@ xfs_bmap_add_extent_delay_real(
+ if (error)
+ goto done;
+ }
++ ASSERT(da_new <= da_old);
+ break;
+
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
+@@ -1611,6 +1613,7 @@ xfs_bmap_add_extent_delay_real(
+ if (error)
+ goto done;
+ }
++ ASSERT(da_new <= da_old);
+ break;
+
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
+@@ -1643,6 +1646,7 @@ xfs_bmap_add_extent_delay_real(
+ goto done;
+ }
+ }
++ ASSERT(da_new <= da_old);
+ break;
+
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
+@@ -1680,6 +1684,7 @@ xfs_bmap_add_extent_delay_real(
+ if (error)
+ goto done;
+ }
++ ASSERT(da_new <= da_old);
+ break;
+
+ case BMAP_LEFT_FILLING:
+@@ -1767,6 +1772,7 @@ xfs_bmap_add_extent_delay_real(
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
+ xfs_iext_next(ifp, &bma->icur);
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
++ ASSERT(da_new <= da_old);
+ break;
+
+ case BMAP_RIGHT_FILLING:
+@@ -1814,6 +1820,7 @@ xfs_bmap_add_extent_delay_real(
+ PREV.br_blockcount = temp;
+ xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
+ xfs_iext_next(ifp, &bma->icur);
++ ASSERT(da_new <= da_old);
+ break;
+
+ case 0:
+@@ -1934,11 +1941,9 @@ xfs_bmap_add_extent_delay_real(
+ }
+
+ /* adjust for changes in reserved delayed indirect blocks */
+- if (da_new != da_old) {
+- ASSERT(state == 0 || da_new < da_old);
++ if (da_new != da_old)
+ error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
+- false);
+- }
++ true);
+
+ xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
+ done:
+@@ -3969,20 +3974,32 @@ xfs_bmapi_reserve_delalloc(
+ xfs_extlen_t alen;
+ xfs_extlen_t indlen;
+ int error;
+- xfs_fileoff_t aoff = off;
++ xfs_fileoff_t aoff;
++ bool use_cowextszhint =
++ whichfork == XFS_COW_FORK && !prealloc;
+
++retry:
+ /*
+ * Cap the alloc length. Keep track of prealloc so we know whether to
+ * tag the inode before we return.
+ */
++ aoff = off;
+ alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
+ if (!eof)
+ alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
+ if (prealloc && alen >= len)
+ prealloc = alen - len;
+
+- /* Figure out the extent size, adjust alen */
+- if (whichfork == XFS_COW_FORK) {
++ /*
++ * If we're targetting the COW fork but aren't creating a speculative
++ * posteof preallocation, try to expand the reservation to align with
++ * the COW extent size hint if there's sufficient free space.
++ *
++ * Unlike the data fork, the CoW cancellation functions will free all
++ * the reservations at inactivation, so we don't require that every
++ * delalloc reservation have a dirty pagecache.
++ */
++ if (use_cowextszhint) {
+ struct xfs_bmbt_irec prev;
+ xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
+
+@@ -4001,7 +4018,7 @@ xfs_bmapi_reserve_delalloc(
+ */
+ error = xfs_quota_reserve_blkres(ip, alen);
+ if (error)
+- return error;
++ goto out;
+
+ /*
+ * Split changing sb for alen and indlen since they could be coming
+@@ -4046,6 +4063,17 @@ xfs_bmapi_reserve_delalloc(
+ out_unreserve_quota:
+ if (XFS_IS_QUOTA_ON(mp))
+ xfs_quota_unreserve_blkres(ip, alen);
++out:
++ if (error == -ENOSPC || error == -EDQUOT) {
++ trace_xfs_delalloc_enospc(ip, off, len);
++
++ if (prealloc || use_cowextszhint) {
++ /* retry without any preallocation */
++ use_cowextszhint = false;
++ prealloc = 0;
++ goto retry;
++ }
++ }
+ return error;
+ }
+
+@@ -4128,8 +4156,10 @@ xfs_bmapi_allocate(
+ } else {
+ error = xfs_bmap_alloc_userdata(bma);
+ }
+- if (error || bma->blkno == NULLFSBLOCK)
++ if (error)
+ return error;
++ if (bma->blkno == NULLFSBLOCK)
++ return -ENOSPC;
+
+ if (bma->flags & XFS_BMAPI_ZERO) {
+ error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
+@@ -4309,6 +4339,15 @@ xfs_bmapi_finish(
+ * extent state if necessary. Details behaviour is controlled by the flags
+ * parameter. Only allocates blocks from a single allocation group, to avoid
+ * locking problems.
++ *
++ * Returns 0 on success and places the extent mappings in mval. nmaps is used
++ * as an input/output parameter where the caller specifies the maximum number
++ * of mappings that may be returned and xfs_bmapi_write passes back the number
++ * of mappings (including existing mappings) it found.
++ *
++ * Returns a negative error code on failure, including -ENOSPC when it could not
++ * allocate any blocks and -ENOSR when it did allocate blocks to convert a
++ * delalloc range, but those blocks were before the passed in range.
+ */
+ int
+ xfs_bmapi_write(
+@@ -4436,10 +4475,16 @@ xfs_bmapi_write(
+ ASSERT(len > 0);
+ ASSERT(bma.length > 0);
+ error = xfs_bmapi_allocate(&bma);
+- if (error)
++ if (error) {
++ /*
++ * If we already allocated space in a previous
++ * iteration return what we go so far when
++ * running out of space.
++ */
++ if (error == -ENOSPC && bma.nallocs)
++ break;
+ goto error0;
+- if (bma.blkno == NULLFSBLOCK)
+- break;
++ }
+
+ /*
+ * If this is a CoW allocation, record the data in
+@@ -4477,7 +4522,6 @@ xfs_bmapi_write(
+ if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
+ eof = true;
+ }
+- *nmap = n;
+
+ error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
+ whichfork);
+@@ -4488,7 +4532,22 @@ xfs_bmapi_write(
+ ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
+ xfs_bmapi_finish(&bma, whichfork, 0);
+ xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
+- orig_nmap, *nmap);
++ orig_nmap, n);
++
++ /*
++ * When converting delayed allocations, xfs_bmapi_allocate ignores
++ * the passed in bno and always converts from the start of the found
++ * delalloc extent.
++ *
++ * To avoid a successful return with *nmap set to 0, return the magic
++ * -ENOSR error code for this particular case so that the caller can
++ * handle it.
++ */
++ if (!n) {
++ ASSERT(bma.nallocs >= *nmap);
++ return -ENOSR;
++ }
++ *nmap = n;
+ return 0;
+ error0:
+ xfs_bmapi_finish(&bma, whichfork, error);
+@@ -4501,8 +4560,8 @@ xfs_bmapi_write(
+ * invocations to allocate the target offset if a large enough physical extent
+ * is not available.
+ */
+-int
+-xfs_bmapi_convert_delalloc(
++static int
++xfs_bmapi_convert_one_delalloc(
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_off_t offset,
+@@ -4559,7 +4618,8 @@ xfs_bmapi_convert_delalloc(
+ if (!isnullstartblock(bma.got.br_startblock)) {
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
+ xfs_iomap_inode_sequence(ip, flags));
+- *seq = READ_ONCE(ifp->if_seq);
++ if (seq)
++ *seq = READ_ONCE(ifp->if_seq);
+ goto out_trans_cancel;
+ }
+
+@@ -4595,9 +4655,6 @@ xfs_bmapi_convert_delalloc(
+ if (error)
+ goto out_finish;
+
+- error = -ENOSPC;
+- if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
+- goto out_finish;
+ error = -EFSCORRUPTED;
+ if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
+ goto out_finish;
+@@ -4608,7 +4665,8 @@ xfs_bmapi_convert_delalloc(
+ ASSERT(!isnullstartblock(bma.got.br_startblock));
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
+ xfs_iomap_inode_sequence(ip, flags));
+- *seq = READ_ONCE(ifp->if_seq);
++ if (seq)
++ *seq = READ_ONCE(ifp->if_seq);
+
+ if (whichfork == XFS_COW_FORK)
+ xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
+@@ -4631,6 +4689,36 @@ xfs_bmapi_convert_delalloc(
+ return error;
+ }
+
++/*
++ * Pass in a dellalloc extent and convert it to real extents, return the real
++ * extent that maps offset_fsb in iomap.
++ */
++int
++xfs_bmapi_convert_delalloc(
++ struct xfs_inode *ip,
++ int whichfork,
++ loff_t offset,
++ struct iomap *iomap,
++ unsigned int *seq)
++{
++ int error;
++
++ /*
++ * Attempt to allocate whatever delalloc extent currently backs offset
++ * and put the result into iomap. Allocate in a loop because it may
++ * take several attempts to allocate real blocks for a contiguous
++ * delalloc extent if free space is sufficiently fragmented.
++ */
++ do {
++ error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
++ iomap, seq);
++ if (error)
++ return error;
++ } while (iomap->offset + iomap->length <= offset);
++
++ return 0;
++}
++
+ int
+ xfs_bmapi_remap(
+ struct xfs_trans *tp,
+@@ -4827,7 +4915,7 @@ xfs_bmap_del_extent_delay(
+ ASSERT(got_endoff >= del_endoff);
+
+ if (isrt) {
+- uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
++ uint64_t rtexts = del->br_blockcount;
+
+ do_div(rtexts, mp->m_sb.sb_rextsize);
+ xfs_mod_frextents(mp, rtexts);
+@@ -5014,7 +5102,6 @@ xfs_bmap_del_extent_real(
+ xfs_fileoff_t del_endoff; /* first offset past del */
+ int do_fx; /* free extent at end of routine */
+ int error; /* error return value */
+- int flags = 0;/* inode logging flags */
+ struct xfs_bmbt_irec got; /* current extent entry */
+ xfs_fileoff_t got_endoff; /* first offset past got */
+ int i; /* temp state */
+@@ -5027,6 +5114,8 @@ xfs_bmap_del_extent_real(
+ uint32_t state = xfs_bmap_fork_to_state(whichfork);
+ struct xfs_bmbt_irec old;
+
++ *logflagsp = 0;
++
+ mp = ip->i_mount;
+ XFS_STATS_INC(mp, xs_del_exlist);
+
+@@ -5039,7 +5128,6 @@ xfs_bmap_del_extent_real(
+ ASSERT(got_endoff >= del_endoff);
+ ASSERT(!isnullstartblock(got.br_startblock));
+ qfield = 0;
+- error = 0;
+
+ /*
+ * If it's the case where the directory code is running with no block
+@@ -5055,45 +5143,30 @@ xfs_bmap_del_extent_real(
+ del->br_startoff > got.br_startoff && del_endoff < got_endoff)
+ return -ENOSPC;
+
+- flags = XFS_ILOG_CORE;
++ *logflagsp = XFS_ILOG_CORE;
+ if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
+- xfs_filblks_t len;
+- xfs_extlen_t mod;
+-
+- len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
+- &mod);
+- ASSERT(mod == 0);
+-
+ if (!(bflags & XFS_BMAPI_REMAP)) {
+- xfs_fsblock_t bno;
+-
+- bno = div_u64_rem(del->br_startblock,
+- mp->m_sb.sb_rextsize, &mod);
+- ASSERT(mod == 0);
+-
+- error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
++ error = xfs_rtfree_blocks(tp, del->br_startblock,
++ del->br_blockcount);
+ if (error)
+- goto done;
++ return error;
+ }
+
+ do_fx = 0;
+- nblks = len * mp->m_sb.sb_rextsize;
+ qfield = XFS_TRANS_DQ_RTBCOUNT;
+ } else {
+ do_fx = 1;
+- nblks = del->br_blockcount;
+ qfield = XFS_TRANS_DQ_BCOUNT;
+ }
++ nblks = del->br_blockcount;
+
+ del_endblock = del->br_startblock + del->br_blockcount;
+ if (cur) {
+ error = xfs_bmbt_lookup_eq(cur, &got, &i);
+ if (error)
+- goto done;
+- if (XFS_IS_CORRUPT(mp, i != 1)) {
+- error = -EFSCORRUPTED;
+- goto done;
+- }
++ return error;
++ if (XFS_IS_CORRUPT(mp, i != 1))
++ return -EFSCORRUPTED;
+ }
+
+ if (got.br_startoff == del->br_startoff)
+@@ -5110,17 +5183,15 @@ xfs_bmap_del_extent_real(
+ xfs_iext_prev(ifp, icur);
+ ifp->if_nextents--;
+
+- flags |= XFS_ILOG_CORE;
++ *logflagsp |= XFS_ILOG_CORE;
+ if (!cur) {
+- flags |= xfs_ilog_fext(whichfork);
++ *logflagsp |= xfs_ilog_fext(whichfork);
+ break;
+ }
+ if ((error = xfs_btree_delete(cur, &i)))
+- goto done;
+- if (XFS_IS_CORRUPT(mp, i != 1)) {
+- error = -EFSCORRUPTED;
+- goto done;
+- }
++ return error;
++ if (XFS_IS_CORRUPT(mp, i != 1))
++ return -EFSCORRUPTED;
+ break;
+ case BMAP_LEFT_FILLING:
+ /*
+@@ -5131,12 +5202,12 @@ xfs_bmap_del_extent_real(
+ got.br_blockcount -= del->br_blockcount;
+ xfs_iext_update_extent(ip, state, icur, &got);
+ if (!cur) {
+- flags |= xfs_ilog_fext(whichfork);
++ *logflagsp |= xfs_ilog_fext(whichfork);
+ break;
+ }
+ error = xfs_bmbt_update(cur, &got);
+ if (error)
+- goto done;
++ return error;
+ break;
+ case BMAP_RIGHT_FILLING:
+ /*
+@@ -5145,12 +5216,12 @@ xfs_bmap_del_extent_real(
+ got.br_blockcount -= del->br_blockcount;
+ xfs_iext_update_extent(ip, state, icur, &got);
+ if (!cur) {
+- flags |= xfs_ilog_fext(whichfork);
++ *logflagsp |= xfs_ilog_fext(whichfork);
+ break;
+ }
+ error = xfs_bmbt_update(cur, &got);
+ if (error)
+- goto done;
++ return error;
+ break;
+ case 0:
+ /*
+@@ -5167,18 +5238,18 @@ xfs_bmap_del_extent_real(
+ new.br_state = got.br_state;
+ new.br_startblock = del_endblock;
+
+- flags |= XFS_ILOG_CORE;
++ *logflagsp |= XFS_ILOG_CORE;
+ if (cur) {
+ error = xfs_bmbt_update(cur, &got);
+ if (error)
+- goto done;
++ return error;
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+- goto done;
++ return error;
+ cur->bc_rec.b = new;
+ error = xfs_btree_insert(cur, &i);
+ if (error && error != -ENOSPC)
+- goto done;
++ return error;
+ /*
+ * If get no-space back from btree insert, it tried a
+ * split, and we have a zero block reservation. Fix up
+@@ -5191,33 +5262,28 @@ xfs_bmap_del_extent_real(
+ */
+ error = xfs_bmbt_lookup_eq(cur, &got, &i);
+ if (error)
+- goto done;
+- if (XFS_IS_CORRUPT(mp, i != 1)) {
+- error = -EFSCORRUPTED;
+- goto done;
+- }
++ return error;
++ if (XFS_IS_CORRUPT(mp, i != 1))
++ return -EFSCORRUPTED;
+ /*
+ * Update the btree record back
+ * to the original value.
+ */
+ error = xfs_bmbt_update(cur, &old);
+ if (error)
+- goto done;
++ return error;
+ /*
+ * Reset the extent record back
+ * to the original value.
+ */
+ xfs_iext_update_extent(ip, state, icur, &old);
+- flags = 0;
+- error = -ENOSPC;
+- goto done;
+- }
+- if (XFS_IS_CORRUPT(mp, i != 1)) {
+- error = -EFSCORRUPTED;
+- goto done;
++ *logflagsp = 0;
++ return -ENOSPC;
+ }
++ if (XFS_IS_CORRUPT(mp, i != 1))
++ return -EFSCORRUPTED;
+ } else
+- flags |= xfs_ilog_fext(whichfork);
++ *logflagsp |= xfs_ilog_fext(whichfork);
+
+ ifp->if_nextents++;
+ xfs_iext_next(ifp, icur);
+@@ -5241,7 +5307,7 @@ xfs_bmap_del_extent_real(
+ ((bflags & XFS_BMAPI_NODISCARD) ||
+ del->br_state == XFS_EXT_UNWRITTEN));
+ if (error)
+- goto done;
++ return error;
+ }
+ }
+
+@@ -5256,9 +5322,7 @@ xfs_bmap_del_extent_real(
+ if (qfield && !(bflags & XFS_BMAPI_REMAP))
+ xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
+
+-done:
+- *logflagsp = flags;
+- return error;
++ return 0;
+ }
+
+ /*
+diff --git a/fs/xfs/libxfs/xfs_btree_staging.c b/fs/xfs/libxfs/xfs_btree_staging.c
+index dd75e208b543e7..29e3f8ccb1852a 100644
+--- a/fs/xfs/libxfs/xfs_btree_staging.c
++++ b/fs/xfs/libxfs/xfs_btree_staging.c
+@@ -342,9 +342,7 @@ xfs_btree_bload_drop_buf(
+ if (*bpp == NULL)
+ return;
+
+- if (!xfs_buf_delwri_queue(*bpp, buffers_list))
+- ASSERT(0);
+-
++ xfs_buf_delwri_queue_here(*bpp, buffers_list);
+ xfs_buf_relse(*bpp);
+ *bpp = NULL;
+ }
+diff --git a/fs/xfs/libxfs/xfs_btree_staging.h b/fs/xfs/libxfs/xfs_btree_staging.h
+index f0d2976050aea4..5f638f711246ee 100644
+--- a/fs/xfs/libxfs/xfs_btree_staging.h
++++ b/fs/xfs/libxfs/xfs_btree_staging.h
+@@ -37,12 +37,6 @@ struct xbtree_ifakeroot {
+
+ /* Number of bytes available for this fork in the inode. */
+ unsigned int if_fork_size;
+-
+- /* Fork format. */
+- unsigned int if_format;
+-
+- /* Number of records. */
+- unsigned int if_extents;
+ };
+
+ /* Cursor interactions with fake roots for inode-rooted btrees. */
+diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
+index e576560b46e978..12e3cca804b7ec 100644
+--- a/fs/xfs/libxfs/xfs_da_btree.c
++++ b/fs/xfs/libxfs/xfs_da_btree.c
+@@ -2158,8 +2158,8 @@ xfs_da_grow_inode_int(
+ struct xfs_inode *dp = args->dp;
+ int w = args->whichfork;
+ xfs_rfsblock_t nblks = dp->i_nblocks;
+- struct xfs_bmbt_irec map, *mapp;
+- int nmap, error, got, i, mapi;
++ struct xfs_bmbt_irec map, *mapp = &map;
++ int nmap, error, got, i, mapi = 1;
+
+ /*
+ * Find a spot in the file space to put the new block.
+@@ -2175,14 +2175,7 @@ xfs_da_grow_inode_int(
+ error = xfs_bmapi_write(tp, dp, *bno, count,
+ xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
+ args->total, &map, &nmap);
+- if (error)
+- return error;
+-
+- ASSERT(nmap <= 1);
+- if (nmap == 1) {
+- mapp = &map;
+- mapi = 1;
+- } else if (nmap == 0 && count > 1) {
++ if (error == -ENOSPC && count > 1) {
+ xfs_fileoff_t b;
+ int c;
+
+@@ -2199,16 +2192,13 @@ xfs_da_grow_inode_int(
+ args->total, &mapp[mapi], &nmap);
+ if (error)
+ goto out_free_map;
+- if (nmap < 1)
+- break;
+ mapi += nmap;
+ b = mapp[mapi - 1].br_startoff +
+ mapp[mapi - 1].br_blockcount;
+ }
+- } else {
+- mapi = 0;
+- mapp = NULL;
+ }
++ if (error)
++ goto out_free_map;
+
+ /*
+ * Count the blocks we got, make sure it matches the total.
+@@ -2316,10 +2306,17 @@ xfs_da3_swap_lastblock(
+ return error;
+ /*
+ * Copy the last block into the dead buffer and log it.
++ * On CRC-enabled file systems, also update the stamped in blkno.
+ */
+ memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
++ if (xfs_has_crc(mp)) {
++ struct xfs_da3_blkinfo *da3 = dead_buf->b_addr;
++
++ da3->blkno = cpu_to_be64(xfs_buf_daddr(dead_buf));
++ }
+ xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
+ dead_info = dead_buf->b_addr;
++
+ /*
+ * Get values from the moved block.
+ */
+diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
+index f9015f88eca706..ebcb9066398f47 100644
+--- a/fs/xfs/libxfs/xfs_da_format.h
++++ b/fs/xfs/libxfs/xfs_da_format.h
+@@ -703,8 +703,13 @@ struct xfs_attr3_leafblock {
+ #define XFS_ATTR_ROOT (1u << XFS_ATTR_ROOT_BIT)
+ #define XFS_ATTR_SECURE (1u << XFS_ATTR_SECURE_BIT)
+ #define XFS_ATTR_INCOMPLETE (1u << XFS_ATTR_INCOMPLETE_BIT)
++
+ #define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
+
++#define XFS_ATTR_ONDISK_MASK (XFS_ATTR_NSP_ONDISK_MASK | \
++ XFS_ATTR_LOCAL | \
++ XFS_ATTR_INCOMPLETE)
++
+ /*
+ * Alignment for namelist and valuelist entries (since they are mixed
+ * there can be only one alignment value)
+diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
+index bcfb6a4203cdd9..363da37a8e7fbe 100644
+--- a/fs/xfs/libxfs/xfs_defer.c
++++ b/fs/xfs/libxfs/xfs_defer.c
+@@ -245,26 +245,63 @@ xfs_defer_create_intents(
+ return ret;
+ }
+
++static inline void
++xfs_defer_pending_abort(
++ struct xfs_mount *mp,
++ struct xfs_defer_pending *dfp)
++{
++ const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
++
++ trace_xfs_defer_pending_abort(mp, dfp);
++
++ if (dfp->dfp_intent && !dfp->dfp_done) {
++ ops->abort_intent(dfp->dfp_intent);
++ dfp->dfp_intent = NULL;
++ }
++}
++
++static inline void
++xfs_defer_pending_cancel_work(
++ struct xfs_mount *mp,
++ struct xfs_defer_pending *dfp)
++{
++ const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
++ struct list_head *pwi;
++ struct list_head *n;
++
++ trace_xfs_defer_cancel_list(mp, dfp);
++
++ list_del(&dfp->dfp_list);
++ list_for_each_safe(pwi, n, &dfp->dfp_work) {
++ list_del(pwi);
++ dfp->dfp_count--;
++ trace_xfs_defer_cancel_item(mp, dfp, pwi);
++ ops->cancel_item(pwi);
++ }
++ ASSERT(dfp->dfp_count == 0);
++ kmem_cache_free(xfs_defer_pending_cache, dfp);
++}
++
++STATIC void
++xfs_defer_pending_abort_list(
++ struct xfs_mount *mp,
++ struct list_head *dop_list)
++{
++ struct xfs_defer_pending *dfp;
++
++ /* Abort intent items that don't have a done item. */
++ list_for_each_entry(dfp, dop_list, dfp_list)
++ xfs_defer_pending_abort(mp, dfp);
++}
++
+ /* Abort all the intents that were committed. */
+ STATIC void
+ xfs_defer_trans_abort(
+ struct xfs_trans *tp,
+ struct list_head *dop_pending)
+ {
+- struct xfs_defer_pending *dfp;
+- const struct xfs_defer_op_type *ops;
+-
+ trace_xfs_defer_trans_abort(tp, _RET_IP_);
+-
+- /* Abort intent items that don't have a done item. */
+- list_for_each_entry(dfp, dop_pending, dfp_list) {
+- ops = defer_op_types[dfp->dfp_type];
+- trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
+- if (dfp->dfp_intent && !dfp->dfp_done) {
+- ops->abort_intent(dfp->dfp_intent);
+- dfp->dfp_intent = NULL;
+- }
+- }
++ xfs_defer_pending_abort_list(tp->t_mountp, dop_pending);
+ }
+
+ /*
+@@ -382,27 +419,13 @@ xfs_defer_cancel_list(
+ {
+ struct xfs_defer_pending *dfp;
+ struct xfs_defer_pending *pli;
+- struct list_head *pwi;
+- struct list_head *n;
+- const struct xfs_defer_op_type *ops;
+
+ /*
+ * Free the pending items. Caller should already have arranged
+ * for the intent items to be released.
+ */
+- list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
+- ops = defer_op_types[dfp->dfp_type];
+- trace_xfs_defer_cancel_list(mp, dfp);
+- list_del(&dfp->dfp_list);
+- list_for_each_safe(pwi, n, &dfp->dfp_work) {
+- list_del(pwi);
+- dfp->dfp_count--;
+- trace_xfs_defer_cancel_item(mp, dfp, pwi);
+- ops->cancel_item(pwi);
+- }
+- ASSERT(dfp->dfp_count == 0);
+- kmem_cache_free(xfs_defer_pending_cache, dfp);
+- }
++ list_for_each_entry_safe(dfp, pli, dop_list, dfp_list)
++ xfs_defer_pending_cancel_work(mp, dfp);
+ }
+
+ /*
+@@ -658,6 +681,39 @@ xfs_defer_add(
+ dfp->dfp_count++;
+ }
+
++/*
++ * Create a pending deferred work item to replay the recovered intent item
++ * and add it to the list.
++ */
++void
++xfs_defer_start_recovery(
++ struct xfs_log_item *lip,
++ enum xfs_defer_ops_type dfp_type,
++ struct list_head *r_dfops)
++{
++ struct xfs_defer_pending *dfp;
++
++ dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
++ GFP_NOFS | __GFP_NOFAIL);
++ dfp->dfp_type = dfp_type;
++ dfp->dfp_intent = lip;
++ INIT_LIST_HEAD(&dfp->dfp_work);
++ list_add_tail(&dfp->dfp_list, r_dfops);
++}
++
++/*
++ * Cancel a deferred work item created to recover a log intent item. @dfp
++ * will be freed after this function returns.
++ */
++void
++xfs_defer_cancel_recovery(
++ struct xfs_mount *mp,
++ struct xfs_defer_pending *dfp)
++{
++ xfs_defer_pending_abort(mp, dfp);
++ xfs_defer_pending_cancel_work(mp, dfp);
++}
++
+ /*
+ * Move deferred ops from one transaction to another and reset the source to
+ * initial state. This is primarily used to carry state forward across
+@@ -756,12 +812,13 @@ xfs_defer_ops_capture(
+
+ /* Release all resources that we used to capture deferred ops. */
+ void
+-xfs_defer_ops_capture_free(
++xfs_defer_ops_capture_abort(
+ struct xfs_mount *mp,
+ struct xfs_defer_capture *dfc)
+ {
+ unsigned short i;
+
++ xfs_defer_pending_abort_list(mp, &dfc->dfc_dfops);
+ xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
+
+ for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
+@@ -802,7 +859,7 @@ xfs_defer_ops_capture_and_commit(
+ /* Commit the transaction and add the capture structure to the list. */
+ error = xfs_trans_commit(tp);
+ if (error) {
+- xfs_defer_ops_capture_free(mp, dfc);
++ xfs_defer_ops_capture_abort(mp, dfc);
+ return error;
+ }
+
+diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
+index 114a3a4930a3c4..5dce938ba3d594 100644
+--- a/fs/xfs/libxfs/xfs_defer.h
++++ b/fs/xfs/libxfs/xfs_defer.h
+@@ -121,10 +121,15 @@ int xfs_defer_ops_capture_and_commit(struct xfs_trans *tp,
+ struct list_head *capture_list);
+ void xfs_defer_ops_continue(struct xfs_defer_capture *d, struct xfs_trans *tp,
+ struct xfs_defer_resources *dres);
+-void xfs_defer_ops_capture_free(struct xfs_mount *mp,
++void xfs_defer_ops_capture_abort(struct xfs_mount *mp,
+ struct xfs_defer_capture *d);
+ void xfs_defer_resources_rele(struct xfs_defer_resources *dres);
+
++void xfs_defer_start_recovery(struct xfs_log_item *lip,
++ enum xfs_defer_ops_type dfp_type, struct list_head *r_dfops);
++void xfs_defer_cancel_recovery(struct xfs_mount *mp,
++ struct xfs_defer_pending *dfp);
++
+ int __init xfs_defer_init_item_caches(void);
+ void xfs_defer_destroy_item_caches(void);
+
+diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
+index 371dc07233e059..20acb8573d7a28 100644
+--- a/fs/xfs/libxfs/xfs_format.h
++++ b/fs/xfs/libxfs/xfs_format.h
+@@ -98,7 +98,7 @@ typedef struct xfs_sb {
+ uint32_t sb_blocksize; /* logical block size, bytes */
+ xfs_rfsblock_t sb_dblocks; /* number of data blocks */
+ xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */
+- xfs_rtblock_t sb_rextents; /* number of realtime extents */
++ xfs_rtbxlen_t sb_rextents; /* number of realtime extents */
+ uuid_t sb_uuid; /* user-visible file system unique id */
+ xfs_fsblock_t sb_logstart; /* starting block of log if internal */
+ xfs_ino_t sb_rootino; /* root inode number */
+diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
+index a35781577cad9f..423d39b6b91744 100644
+--- a/fs/xfs/libxfs/xfs_inode_buf.c
++++ b/fs/xfs/libxfs/xfs_inode_buf.c
+@@ -366,17 +366,40 @@ xfs_dinode_verify_fork(
+ /*
+ * For fork types that can contain local data, check that the fork
+ * format matches the size of local data contained within the fork.
+- *
+- * For all types, check that when the size says the should be in extent
+- * or btree format, the inode isn't claiming it is in local format.
+ */
+ if (whichfork == XFS_DATA_FORK) {
+- if (S_ISDIR(mode) || S_ISLNK(mode)) {
++ /*
++ * A directory small enough to fit in the inode must be stored
++ * in local format. The directory sf <-> extents conversion
++ * code updates the directory size accordingly. Directories
++ * being truncated have zero size and are not subject to this
++ * check.
++ */
++ if (S_ISDIR(mode)) {
++ if (dip->di_size &&
++ be64_to_cpu(dip->di_size) <= fork_size &&
++ fork_format != XFS_DINODE_FMT_LOCAL)
++ return __this_address;
++ }
++
++ /*
++ * A symlink with a target small enough to fit in the inode can
++ * be stored in extents format if xattrs were added (thus
++ * converting the data fork from shortform to remote format)
++ * and then removed.
++ */
++ if (S_ISLNK(mode)) {
+ if (be64_to_cpu(dip->di_size) <= fork_size &&
++ fork_format != XFS_DINODE_FMT_EXTENTS &&
+ fork_format != XFS_DINODE_FMT_LOCAL)
+ return __this_address;
+ }
+
++ /*
++ * For all types, check that when the size says the fork should
++ * be in extent or btree format, the inode isn't claiming to be
++ * in local format.
++ */
+ if (be64_to_cpu(dip->di_size) > fork_size &&
+ fork_format == XFS_DINODE_FMT_LOCAL)
+ return __this_address;
+@@ -492,9 +515,19 @@ xfs_dinode_verify(
+ if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
+ return __this_address;
+
+- /* No zero-length symlinks/dirs. */
+- if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
+- return __this_address;
++ /*
++ * No zero-length symlinks/dirs unless they're unlinked and hence being
++ * inactivated.
++ */
++ if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) {
++ if (dip->di_version > 1) {
++ if (dip->di_nlink)
++ return __this_address;
++ } else {
++ if (dip->di_onlink)
++ return __this_address;
++ }
++ }
+
+ fa = xfs_dinode_verify_nrext64(mp, dip);
+ if (fa)
+@@ -508,6 +541,9 @@ xfs_dinode_verify(
+ if (mode && nextents + naextents > nblocks)
+ return __this_address;
+
++ if (nextents + naextents == 0 && nblocks != 0)
++ return __this_address;
++
+ if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents)
+ return __this_address;
+
+diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
+index a5100a11faf9cd..13583df9f2397f 100644
+--- a/fs/xfs/libxfs/xfs_log_recover.h
++++ b/fs/xfs/libxfs/xfs_log_recover.h
+@@ -153,4 +153,9 @@ xlog_recover_resv(const struct xfs_trans_res *r)
+ return ret;
+ }
+
++void xlog_recover_intent_item(struct xlog *log, struct xfs_log_item *lip,
++ xfs_lsn_t lsn, unsigned int dfp_type);
++void xlog_recover_transfer_intent(struct xfs_trans *tp,
++ struct xfs_defer_pending *dfp);
++
+ #endif /* __XFS_LOG_RECOVER_H__ */
+diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
+index fa180ab66b73a7..760172a65aff31 100644
+--- a/fs/xfs/libxfs/xfs_rtbitmap.c
++++ b/fs/xfs/libxfs/xfs_rtbitmap.c
+@@ -16,6 +16,7 @@
+ #include "xfs_trans.h"
+ #include "xfs_rtalloc.h"
+ #include "xfs_error.h"
++#include "xfs_rtbitmap.h"
+
+ /*
+ * Realtime allocator bitmap functions shared with userspace.
+@@ -1005,6 +1006,39 @@ xfs_rtfree_extent(
+ return 0;
+ }
+
++/*
++ * Free some blocks in the realtime subvolume. rtbno and rtlen are in units of
++ * rt blocks, not rt extents; must be aligned to the rt extent size; and rtlen
++ * cannot exceed XFS_MAX_BMBT_EXTLEN.
++ */
++int
++xfs_rtfree_blocks(
++ struct xfs_trans *tp,
++ xfs_fsblock_t rtbno,
++ xfs_filblks_t rtlen)
++{
++ struct xfs_mount *mp = tp->t_mountp;
++ xfs_rtblock_t bno;
++ xfs_filblks_t len;
++ xfs_extlen_t mod;
++
++ ASSERT(rtlen <= XFS_MAX_BMBT_EXTLEN);
++
++ len = div_u64_rem(rtlen, mp->m_sb.sb_rextsize, &mod);
++ if (mod) {
++ ASSERT(mod == 0);
++ return -EIO;
++ }
++
++ bno = div_u64_rem(rtbno, mp->m_sb.sb_rextsize, &mod);
++ if (mod) {
++ ASSERT(mod == 0);
++ return -EIO;
++ }
++
++ return xfs_rtfree_extent(tp, bno, len);
++}
++
+ /* Find all the free records within a given range. */
+ int
+ xfs_rtalloc_query_range(
+@@ -1096,3 +1130,4 @@ xfs_rtalloc_extent_is_free(
+ *is_free = matches;
+ return 0;
+ }
++
+diff --git a/fs/xfs/libxfs/xfs_rtbitmap.h b/fs/xfs/libxfs/xfs_rtbitmap.h
+new file mode 100644
+index 00000000000000..b8971298334775
+--- /dev/null
++++ b/fs/xfs/libxfs/xfs_rtbitmap.h
+@@ -0,0 +1,83 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
++ * All Rights Reserved.
++ */
++#ifndef __XFS_RTBITMAP_H__
++#define __XFS_RTBITMAP_H__
++
++/*
++ * XXX: Most of the realtime allocation functions deal in units of realtime
++ * extents, not realtime blocks. This looks funny when paired with the type
++ * name and screams for a larger cleanup.
++ */
++struct xfs_rtalloc_rec {
++ xfs_rtblock_t ar_startext;
++ xfs_rtbxlen_t ar_extcount;
++};
++
++typedef int (*xfs_rtalloc_query_range_fn)(
++ struct xfs_mount *mp,
++ struct xfs_trans *tp,
++ const struct xfs_rtalloc_rec *rec,
++ void *priv);
++
++#ifdef CONFIG_XFS_RT
++int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
++int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_rtblock_t start, xfs_extlen_t len, int val,
++ xfs_rtblock_t *new, int *stat);
++int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_rtblock_t start, xfs_rtblock_t limit,
++ xfs_rtblock_t *rtblock);
++int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_rtblock_t start, xfs_rtblock_t limit,
++ xfs_rtblock_t *rtblock);
++int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_rtblock_t start, xfs_extlen_t len, int val);
++int xfs_rtmodify_summary_int(struct xfs_mount *mp, struct xfs_trans *tp,
++ int log, xfs_rtblock_t bbno, int delta,
++ struct xfs_buf **rbpp, xfs_fsblock_t *rsb,
++ xfs_suminfo_t *sum);
++int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
++ xfs_rtblock_t bbno, int delta, struct xfs_buf **rbpp,
++ xfs_fsblock_t *rsb);
++int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_rtblock_t start, xfs_extlen_t len,
++ struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
++int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
++ const struct xfs_rtalloc_rec *low_rec,
++ const struct xfs_rtalloc_rec *high_rec,
++ xfs_rtalloc_query_range_fn fn, void *priv);
++int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_rtalloc_query_range_fn fn,
++ void *priv);
++bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
++int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_rtblock_t start, xfs_extlen_t len,
++ bool *is_free);
++/*
++ * Free an extent in the realtime subvolume. Length is expressed in
++ * realtime extents, as is the block number.
++ */
++int /* error */
++xfs_rtfree_extent(
++ struct xfs_trans *tp, /* transaction pointer */
++ xfs_rtblock_t bno, /* starting block number to free */
++ xfs_extlen_t len); /* length of extent freed */
++
++/* Same as above, but in units of rt blocks. */
++int xfs_rtfree_blocks(struct xfs_trans *tp, xfs_fsblock_t rtbno,
++ xfs_filblks_t rtlen);
++
++#else /* CONFIG_XFS_RT */
++# define xfs_rtfree_extent(t,b,l) (-ENOSYS)
++# define xfs_rtfree_blocks(t,rb,rl) (-ENOSYS)
++# define xfs_rtalloc_query_range(m,t,l,h,f,p) (-ENOSYS)
++# define xfs_rtalloc_query_all(m,t,f,p) (-ENOSYS)
++# define xfs_rtbuf_get(m,t,b,i,p) (-ENOSYS)
++# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (-ENOSYS)
++#endif /* CONFIG_XFS_RT */
++
++#endif /* __XFS_RTBITMAP_H__ */
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index 6264daaab37b06..424acdd4b0fca4 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -25,6 +25,7 @@
+ #include "xfs_da_format.h"
+ #include "xfs_health.h"
+ #include "xfs_ag.h"
++#include "xfs_rtbitmap.h"
+
+ /*
+ * Physical superblock buffer manipulations. Shared with libxfs in userspace.
+@@ -508,8 +509,9 @@ xfs_validate_sb_common(
+ rbmblocks = howmany_64(sbp->sb_rextents,
+ NBBY * sbp->sb_blocksize);
+
+- if (sbp->sb_rextents != rexts ||
+- sbp->sb_rextslog != xfs_highbit32(sbp->sb_rextents) ||
++ if (!xfs_validate_rtextents(rexts) ||
++ sbp->sb_rextents != rexts ||
++ sbp->sb_rextslog != xfs_compute_rextslog(rexts) ||
+ sbp->sb_rbmblocks != rbmblocks) {
+ xfs_notice(mp,
+ "realtime geometry sanity check failed");
+@@ -528,7 +530,8 @@ xfs_validate_sb_common(
+ }
+
+ if (!xfs_validate_stripe_geometry(mp, XFS_FSB_TO_B(mp, sbp->sb_unit),
+- XFS_FSB_TO_B(mp, sbp->sb_width), 0, false))
++ XFS_FSB_TO_B(mp, sbp->sb_width), 0,
++ xfs_buf_daddr(bp) == XFS_SB_DADDR, false))
+ return -EFSCORRUPTED;
+
+ /*
+@@ -1028,11 +1031,12 @@ xfs_log_sb(
+ * and hence we don't need have to update it here.
+ */
+ if (xfs_has_lazysbcount(mp)) {
+- mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
++ mp->m_sb.sb_icount = percpu_counter_sum_positive(&mp->m_icount);
+ mp->m_sb.sb_ifree = min_t(uint64_t,
+- percpu_counter_sum(&mp->m_ifree),
++ percpu_counter_sum_positive(&mp->m_ifree),
+ mp->m_sb.sb_icount);
+- mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
++ mp->m_sb.sb_fdblocks =
++ percpu_counter_sum_positive(&mp->m_fdblocks);
+ }
+
+ xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
+@@ -1317,8 +1321,10 @@ xfs_sb_get_secondary(
+ }
+
+ /*
+- * sunit, swidth, sectorsize(optional with 0) should be all in bytes,
+- * so users won't be confused by values in error messages.
++ * sunit, swidth, sectorsize(optional with 0) should be all in bytes, so users
++ * won't be confused by values in error messages. This function returns false
++ * if the stripe geometry is invalid and the caller is unable to repair the
++ * stripe configuration later in the mount process.
+ */
+ bool
+ xfs_validate_stripe_geometry(
+@@ -1326,20 +1332,21 @@ xfs_validate_stripe_geometry(
+ __s64 sunit,
+ __s64 swidth,
+ int sectorsize,
++ bool may_repair,
+ bool silent)
+ {
+ if (swidth > INT_MAX) {
+ if (!silent)
+ xfs_notice(mp,
+ "stripe width (%lld) is too large", swidth);
+- return false;
++ goto check_override;
+ }
+
+ if (sunit > swidth) {
+ if (!silent)
+ xfs_notice(mp,
+ "stripe unit (%lld) is larger than the stripe width (%lld)", sunit, swidth);
+- return false;
++ goto check_override;
+ }
+
+ if (sectorsize && (int)sunit % sectorsize) {
+@@ -1347,21 +1354,21 @@ xfs_validate_stripe_geometry(
+ xfs_notice(mp,
+ "stripe unit (%lld) must be a multiple of the sector size (%d)",
+ sunit, sectorsize);
+- return false;
++ goto check_override;
+ }
+
+ if (sunit && !swidth) {
+ if (!silent)
+ xfs_notice(mp,
+ "invalid stripe unit (%lld) and stripe width of 0", sunit);
+- return false;
++ goto check_override;
+ }
+
+ if (!sunit && swidth) {
+ if (!silent)
+ xfs_notice(mp,
+ "invalid stripe width (%lld) and stripe unit of 0", swidth);
+- return false;
++ goto check_override;
+ }
+
+ if (sunit && (int)swidth % (int)sunit) {
+@@ -1369,7 +1376,39 @@ xfs_validate_stripe_geometry(
+ xfs_notice(mp,
+ "stripe width (%lld) must be a multiple of the stripe unit (%lld)",
+ swidth, sunit);
+- return false;
++ goto check_override;
+ }
+ return true;
++
++check_override:
++ if (!may_repair)
++ return false;
++ /*
++ * During mount, mp->m_dalign will not be set unless the sunit mount
++ * option was set. If it was set, ignore the bad stripe alignment values
++ * and allow the validation and overwrite later in the mount process to
++ * attempt to overwrite the bad stripe alignment values with the values
++ * supplied by mount options.
++ */
++ if (!mp->m_dalign)
++ return false;
++ if (!silent)
++ xfs_notice(mp,
++"Will try to correct with specified mount options sunit (%d) and swidth (%d)",
++ BBTOB(mp->m_dalign), BBTOB(mp->m_swidth));
++ return true;
++}
++
++/*
++ * Compute the maximum level number of the realtime summary file, as defined by
++ * mkfs. The historic use of highbit32 on a 64-bit quantity prohibited correct
++ * use of rt volumes with more than 2^32 extents.
++ */
++uint8_t
++xfs_compute_rextslog(
++ xfs_rtbxlen_t rtextents)
++{
++ if (!rtextents)
++ return 0;
++ return xfs_highbit64(rtextents);
+ }
+diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
+index a5e14740ec9ac3..37b1ed1bc2095e 100644
+--- a/fs/xfs/libxfs/xfs_sb.h
++++ b/fs/xfs/libxfs/xfs_sb.h
+@@ -25,7 +25,7 @@ extern uint64_t xfs_sb_version_to_features(struct xfs_sb *sbp);
+
+ extern int xfs_update_secondary_sbs(struct xfs_mount *mp);
+
+-#define XFS_FS_GEOM_MAX_STRUCT_VER (4)
++#define XFS_FS_GEOM_MAX_STRUCT_VER (5)
+ extern void xfs_fs_geometry(struct xfs_mount *mp, struct xfs_fsop_geom *geo,
+ int struct_version);
+ extern int xfs_sb_read_secondary(struct xfs_mount *mp,
+@@ -35,7 +35,10 @@ extern int xfs_sb_get_secondary(struct xfs_mount *mp,
+ struct xfs_trans *tp, xfs_agnumber_t agno,
+ struct xfs_buf **bpp);
+
+-extern bool xfs_validate_stripe_geometry(struct xfs_mount *mp,
+- __s64 sunit, __s64 swidth, int sectorsize, bool silent);
++bool xfs_validate_stripe_geometry(struct xfs_mount *mp,
++ __s64 sunit, __s64 swidth, int sectorsize, bool may_repair,
++ bool silent);
++
++uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents);
+
+ #endif /* __XFS_SB_H__ */
+diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
+index 85122002148417..311c5ee6774825 100644
+--- a/fs/xfs/libxfs/xfs_types.h
++++ b/fs/xfs/libxfs/xfs_types.h
+@@ -31,6 +31,7 @@ typedef uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */
+ typedef uint64_t xfs_rtblock_t; /* extent (block) in realtime area */
+ typedef uint64_t xfs_fileoff_t; /* block number in a file */
+ typedef uint64_t xfs_filblks_t; /* number of blocks in a file */
++typedef uint64_t xfs_rtbxlen_t; /* rtbitmap extent length in rtextents */
+
+ typedef int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */
+
+@@ -239,4 +240,16 @@ bool xfs_verify_fileoff(struct xfs_mount *mp, xfs_fileoff_t off);
+ bool xfs_verify_fileext(struct xfs_mount *mp, xfs_fileoff_t off,
+ xfs_fileoff_t len);
+
++/* Do we support an rt volume having this number of rtextents? */
++static inline bool
++xfs_validate_rtextents(
++ xfs_rtbxlen_t rtextents)
++{
++ /* No runt rt volumes */
++ if (rtextents == 0)
++ return false;
++
++ return true;
++}
++
+ #endif /* __XFS_TYPES_H__ */
+diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
+index 6c16d9530ccaca..147babe738d201 100644
+--- a/fs/xfs/scrub/attr.c
++++ b/fs/xfs/scrub/attr.c
+@@ -182,32 +182,23 @@ xchk_xattr_listent(
+ return;
+ }
+
++ if (flags & ~XFS_ATTR_ONDISK_MASK) {
++ xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
++ goto fail_xref;
++ }
++
+ if (flags & XFS_ATTR_INCOMPLETE) {
+ /* Incomplete attr key, just mark the inode for preening. */
+ xchk_ino_set_preen(sx->sc, context->dp->i_ino);
+ return;
+ }
+
+- /* Only one namespace bit allowed. */
+- if (hweight32(flags & XFS_ATTR_NSP_ONDISK_MASK) > 1) {
+- xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
+- goto fail_xref;
+- }
+-
+ /* Does this name make sense? */
+- if (!xfs_attr_namecheck(name, namelen)) {
++ if (!xfs_attr_namecheck(flags, name, namelen)) {
+ xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
+ goto fail_xref;
+ }
+
+- /*
+- * Local xattr values are stored in the attr leaf block, so we don't
+- * need to retrieve the value from a remote block to detect corruption
+- * problems.
+- */
+- if (flags & XFS_ATTR_LOCAL)
+- goto fail_xref;
+-
+ /*
+ * Try to allocate enough memory to extrat the attr value. If that
+ * doesn't work, we overload the seen_enough variable to convey
+@@ -223,6 +214,11 @@ xchk_xattr_listent(
+
+ args.value = ab->value;
+
++ /*
++ * Get the attr value to ensure that lookup can find this attribute
++ * through the dabtree indexing and that remote value retrieval also
++ * works correctly.
++ */
+ error = xfs_attr_get_ilocked(&args);
+ /* ENODATA means the hash lookup failed and the attr is bad */
+ if (error == -ENODATA)
+@@ -463,7 +459,6 @@ xchk_xattr_rec(
+ xfs_dahash_t hash;
+ int nameidx;
+ int hdrsize;
+- unsigned int badflags;
+ int error;
+
+ ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+@@ -493,10 +488,15 @@ xchk_xattr_rec(
+
+ /* Retrieve the entry and check it. */
+ hash = be32_to_cpu(ent->hashval);
+- badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
+- XFS_ATTR_INCOMPLETE);
+- if ((ent->flags & badflags) != 0)
++ if (ent->flags & ~XFS_ATTR_ONDISK_MASK) {
++ xchk_da_set_corrupt(ds, level);
++ return 0;
++ }
++ if (!xfs_attr_check_namespace(ent->flags)) {
+ xchk_da_set_corrupt(ds, level);
++ return 0;
++ }
++
+ if (ent->flags & XFS_ATTR_LOCAL) {
+ lentry = (struct xfs_attr_leaf_name_local *)
+ (((char *)bp->b_addr) + nameidx);
+@@ -561,6 +561,15 @@ xchk_xattr_check_sf(
+ break;
+ }
+
++ /*
++ * Shortform entries do not set LOCAL or INCOMPLETE, so the
++ * only valid flag bits here are for namespaces.
++ */
++ if (sfe->flags & ~XFS_ATTR_NSP_ONDISK_MASK) {
++ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
++ break;
++ }
++
+ if (!xchk_xattr_set_map(sc, ab->usedmap,
+ (char *)sfe - (char *)sf,
+ sizeof(struct xfs_attr_sf_entry))) {
+diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
+index 1935b9ce1885c9..c3a9f33e5a8d12 100644
+--- a/fs/xfs/scrub/btree.c
++++ b/fs/xfs/scrub/btree.c
+@@ -385,7 +385,12 @@ xchk_btree_check_block_owner(
+ agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
+ agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
+
+- init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
++ /*
++ * If the btree being examined is not itself a per-AG btree, initialize
++ * sc->sa so that we can check for the presence of an ownership record
++ * in the rmap btree for the AG containing the block.
++ */
++ init_sa = bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE;
+ if (init_sa) {
+ error = xchk_ag_init_existing(bs->sc, agno, &bs->sc->sa);
+ if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
+diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
+index de24532fe08309..f10cd4fb0abd0d 100644
+--- a/fs/xfs/scrub/common.c
++++ b/fs/xfs/scrub/common.c
+@@ -733,7 +733,9 @@ xchk_iget(
+ xfs_ino_t inum,
+ struct xfs_inode **ipp)
+ {
+- return xfs_iget(sc->mp, sc->tp, inum, XFS_IGET_UNTRUSTED, 0, ipp);
++ ASSERT(sc->tp != NULL);
++
++ return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp);
+ }
+
+ /*
+@@ -784,8 +786,8 @@ xchk_iget_agi(
+ if (error)
+ return error;
+
+- error = xfs_iget(mp, tp, inum,
+- XFS_IGET_NORETRY | XFS_IGET_UNTRUSTED, 0, ipp);
++ error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0,
++ ipp);
+ if (error == -EAGAIN) {
+ /*
+ * The inode may be in core but temporarily unavailable and may
+@@ -882,8 +884,8 @@ xchk_iget_for_scrubbing(
+ if (!xfs_verify_ino(sc->mp, sc->sm->sm_ino))
+ return -ENOENT;
+
+- /* Try a regular untrusted iget. */
+- error = xchk_iget(sc, sc->sm->sm_ino, &ip);
++ /* Try a safe untrusted iget. */
++ error = xchk_iget_safe(sc, sc->sm->sm_ino, &ip);
+ if (!error)
+ return xchk_install_handle_inode(sc, ip);
+ if (error == -ENOENT)
+@@ -976,9 +978,7 @@ xchk_irele(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
+ {
+- if (current->journal_info != NULL) {
+- ASSERT(current->journal_info == sc->tp);
+-
++ if (sc->tp) {
+ /*
+ * If we are in a transaction, we /cannot/ drop the inode
+ * ourselves, because the VFS will trigger writeback, which
+@@ -994,12 +994,6 @@ xchk_irele(
+ spin_lock(&VFS_I(ip)->i_lock);
+ VFS_I(ip)->i_state &= ~I_DONTCACHE;
+ spin_unlock(&VFS_I(ip)->i_lock);
+- } else if (atomic_read(&VFS_I(ip)->i_count) == 1) {
+- /*
+- * If this is the last reference to the inode and the caller
+- * permits it, set DONTCACHE to avoid thrashing.
+- */
+- d_mark_dontcache(VFS_I(ip));
+ }
+
+ xfs_irele(ip);
+diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
+index cabdc0e16838c7..c83cf9e5b55f0f 100644
+--- a/fs/xfs/scrub/common.h
++++ b/fs/xfs/scrub/common.h
+@@ -151,12 +151,37 @@ void xchk_iunlock(struct xfs_scrub *sc, unsigned int ilock_flags);
+
+ void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
+
++/*
++ * Grab the inode at @inum. The caller must have created a scrub transaction
++ * so that we can confirm the inumber by walking the inobt and not deadlock on
++ * a loop in the inobt.
++ */
+ int xchk_iget(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp);
+ int xchk_iget_agi(struct xfs_scrub *sc, xfs_ino_t inum,
+ struct xfs_buf **agi_bpp, struct xfs_inode **ipp);
+ void xchk_irele(struct xfs_scrub *sc, struct xfs_inode *ip);
+ int xchk_install_handle_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
+
++/*
++ * Safe version of (untrusted) xchk_iget that uses an empty transaction to
++ * avoid deadlocking on loops in the inobt. This should only be used in a
++ * scrub or repair setup routine, and only prior to grabbing a transaction.
++ */
++static inline int
++xchk_iget_safe(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp)
++{
++ int error;
++
++ ASSERT(sc->tp == NULL);
++
++ error = xchk_trans_alloc(sc, 0);
++ if (error)
++ return error;
++ error = xchk_iget(sc, inum, ipp);
++ xchk_trans_cancel(sc);
++ return error;
++}
++
+ /*
+ * Don't bother cross-referencing if we already found corruption or cross
+ * referencing discrepancies.
+diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
+index 05be757668bb25..5799e9a94f1f66 100644
+--- a/fs/xfs/scrub/fscounters.c
++++ b/fs/xfs/scrub/fscounters.c
+@@ -16,7 +16,7 @@
+ #include "xfs_health.h"
+ #include "xfs_btree.h"
+ #include "xfs_ag.h"
+-#include "xfs_rtalloc.h"
++#include "xfs_rtbitmap.h"
+ #include "xfs_inode.h"
+ #include "xfs_icache.h"
+ #include "scrub/scrub.h"
+diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
+index 59d7912fb75f1e..d03de74fd76f34 100644
+--- a/fs/xfs/scrub/inode.c
++++ b/fs/xfs/scrub/inode.c
+@@ -94,8 +94,8 @@ xchk_setup_inode(
+ if (!xfs_verify_ino(sc->mp, sc->sm->sm_ino))
+ return -ENOENT;
+
+- /* Try a regular untrusted iget. */
+- error = xchk_iget(sc, sc->sm->sm_ino, &ip);
++ /* Try a safe untrusted iget. */
++ error = xchk_iget_safe(sc, sc->sm->sm_ino, &ip);
+ if (!error)
+ return xchk_install_handle_iscrub(sc, ip);
+ if (error == -ENOENT)
+@@ -337,6 +337,10 @@ xchk_inode_flags2(
+ if (xfs_dinode_has_bigtime(dip) && !xfs_has_bigtime(mp))
+ goto bad;
+
++ /* no large extent counts without the filesystem feature */
++ if ((flags2 & XFS_DIFLAG2_NREXT64) && !xfs_has_large_extent_counts(mp))
++ goto bad;
++
+ return;
+ bad:
+ xchk_ino_set_corrupt(sc, ino);
+diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c
+index 86a62420e02c65..822f5adf7f7cc9 100644
+--- a/fs/xfs/scrub/reap.c
++++ b/fs/xfs/scrub/reap.c
+@@ -247,7 +247,7 @@ xreap_agextent_binval(
+ max_fsbs = min_t(xfs_agblock_t, agbno_next - bno,
+ xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX));
+
+- for (fsbcount = 1; fsbcount < max_fsbs; fsbcount++) {
++ for (fsbcount = 1; fsbcount <= max_fsbs; fsbcount++) {
+ struct xfs_buf *bp = NULL;
+ xfs_daddr_t daddr;
+ int error;
+diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
+index 008ddb599e1324..0f574a1d2cb1ad 100644
+--- a/fs/xfs/scrub/rtbitmap.c
++++ b/fs/xfs/scrub/rtbitmap.c
+@@ -11,9 +11,10 @@
+ #include "xfs_mount.h"
+ #include "xfs_log_format.h"
+ #include "xfs_trans.h"
+-#include "xfs_rtalloc.h"
++#include "xfs_rtbitmap.h"
+ #include "xfs_inode.h"
+ #include "xfs_bmap.h"
++#include "xfs_sb.h"
+ #include "scrub/scrub.h"
+ #include "scrub/common.h"
+
+diff --git a/fs/xfs/scrub/rtsummary.c b/fs/xfs/scrub/rtsummary.c
+index 437ed9acbb2738..7676718dac7287 100644
+--- a/fs/xfs/scrub/rtsummary.c
++++ b/fs/xfs/scrub/rtsummary.c
+@@ -13,9 +13,10 @@
+ #include "xfs_inode.h"
+ #include "xfs_log_format.h"
+ #include "xfs_trans.h"
+-#include "xfs_rtalloc.h"
++#include "xfs_rtbitmap.h"
+ #include "xfs_bit.h"
+ #include "xfs_bmap.h"
++#include "xfs_sb.h"
+ #include "scrub/scrub.h"
+ #include "scrub/common.h"
+ #include "scrub/trace.h"
+diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
+index 1ef9c6b4842a13..869a10fe9d7d77 100644
+--- a/fs/xfs/scrub/scrub.h
++++ b/fs/xfs/scrub/scrub.h
+@@ -17,6 +17,13 @@ struct xfs_scrub;
+ #define XCHK_GFP_FLAGS ((__force gfp_t)(GFP_KERNEL | __GFP_NOWARN | \
+ __GFP_RETRY_MAYFAIL))
+
++/*
++ * For opening files by handle for fsck operations, we don't trust the inumber
++ * or the allocation state; therefore, perform an untrusted lookup. We don't
++ * want these inodes to pollute the cache, so mark them for immediate removal.
++ */
++#define XCHK_IGET_FLAGS (XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE)
++
+ /* Type info and names for the scrub types. */
+ enum xchk_type {
+ ST_NONE = 1, /* disabled */
+diff --git a/fs/xfs/scrub/stats.c b/fs/xfs/scrub/stats.c
+index cd91db4a554896..82499270e20b9b 100644
+--- a/fs/xfs/scrub/stats.c
++++ b/fs/xfs/scrub/stats.c
+@@ -329,9 +329,9 @@ xchk_stats_register(
+ if (!cs->cs_debugfs)
+ return;
+
+- debugfs_create_file("stats", 0644, cs->cs_debugfs, cs,
++ debugfs_create_file("stats", 0444, cs->cs_debugfs, cs,
+ &scrub_stats_fops);
+- debugfs_create_file("clear_stats", 0400, cs->cs_debugfs, cs,
++ debugfs_create_file("clear_stats", 0200, cs->cs_debugfs, cs,
+ &clear_scrub_stats_fops);
+ }
+
+diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
+index cbd4d01e253c06..df49ca2e8c2346 100644
+--- a/fs/xfs/scrub/trace.h
++++ b/fs/xfs/scrub/trace.h
+@@ -1037,7 +1037,8 @@ TRACE_EVENT(xfarray_sort_stats,
+ #ifdef CONFIG_XFS_RT
+ TRACE_EVENT(xchk_rtsum_record_free,
+ TP_PROTO(struct xfs_mount *mp, xfs_rtblock_t start,
+- uint64_t len, unsigned int log, loff_t pos, xfs_suminfo_t v),
++ xfs_rtbxlen_t len, unsigned int log, loff_t pos,
++ xfs_suminfo_t v),
+ TP_ARGS(mp, start, len, log, pos, v),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 465d7630bb2185..688ac031d3a1c0 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -233,45 +233,6 @@ xfs_imap_valid(
+ return true;
+ }
+
+-/*
+- * Pass in a dellalloc extent and convert it to real extents, return the real
+- * extent that maps offset_fsb in wpc->iomap.
+- *
+- * The current page is held locked so nothing could have removed the block
+- * backing offset_fsb, although it could have moved from the COW to the data
+- * fork by another thread.
+- */
+-static int
+-xfs_convert_blocks(
+- struct iomap_writepage_ctx *wpc,
+- struct xfs_inode *ip,
+- int whichfork,
+- loff_t offset)
+-{
+- int error;
+- unsigned *seq;
+-
+- if (whichfork == XFS_COW_FORK)
+- seq = &XFS_WPC(wpc)->cow_seq;
+- else
+- seq = &XFS_WPC(wpc)->data_seq;
+-
+- /*
+- * Attempt to allocate whatever delalloc extent currently backs offset
+- * and put the result into wpc->iomap. Allocate in a loop because it
+- * may take several attempts to allocate real blocks for a contiguous
+- * delalloc extent if free space is sufficiently fragmented.
+- */
+- do {
+- error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
+- &wpc->iomap, seq);
+- if (error)
+- return error;
+- } while (wpc->iomap.offset + wpc->iomap.length <= offset);
+-
+- return 0;
+-}
+-
+ static int
+ xfs_map_blocks(
+ struct iomap_writepage_ctx *wpc,
+@@ -289,6 +250,7 @@ xfs_map_blocks(
+ struct xfs_iext_cursor icur;
+ int retries = 0;
+ int error = 0;
++ unsigned int *seq;
+
+ if (xfs_is_shutdown(mp))
+ return -EIO;
+@@ -386,7 +348,19 @@ xfs_map_blocks(
+ trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
+ return 0;
+ allocate_blocks:
+- error = xfs_convert_blocks(wpc, ip, whichfork, offset);
++ /*
++ * Convert a dellalloc extent to a real one. The current page is held
++ * locked so nothing could have removed the block backing offset_fsb,
++ * although it could have moved from the COW to the data fork by another
++ * thread.
++ */
++ if (whichfork == XFS_COW_FORK)
++ seq = &XFS_WPC(wpc)->cow_seq;
++ else
++ seq = &XFS_WPC(wpc)->data_seq;
++
++ error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
++ &wpc->iomap, seq);
+ if (error) {
+ /*
+ * If we failed to find the extent in the COW fork we might have
+@@ -502,13 +476,6 @@ xfs_vm_writepages(
+ {
+ struct xfs_writepage_ctx wpc = { };
+
+- /*
+- * Writing back data in a transaction context can result in recursive
+- * transactions. This is bad, so issue a warning and get out of here.
+- */
+- if (WARN_ON_ONCE(current->journal_info))
+- return 0;
+-
+ xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
+ return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
+ }
+diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c
+index 36fe2abb16e6e3..df86c9c097209e 100644
+--- a/fs/xfs/xfs_attr_item.c
++++ b/fs/xfs/xfs_attr_item.c
+@@ -329,6 +329,13 @@ xfs_xattri_finish_update(
+ goto out;
+ }
+
++ /* If an attr removal is trivially complete, we're done. */
++ if (attr->xattri_op_flags == XFS_ATTRI_OP_FLAGS_REMOVE &&
++ !xfs_inode_hasattr(args->dp)) {
++ error = 0;
++ goto out;
++ }
++
+ error = xfs_attr_set_iter(attr);
+ if (!error && attr->xattri_dela_state != XFS_DAS_DONE)
+ error = -EAGAIN;
+@@ -503,6 +510,9 @@ xfs_attri_validate(
+ unsigned int op = attrp->alfi_op_flags &
+ XFS_ATTRI_OP_FLAGS_TYPE_MASK;
+
++ if (!xfs_sb_version_haslogxattrs(&mp->m_sb))
++ return false;
++
+ if (attrp->__pad != 0)
+ return false;
+
+@@ -512,6 +522,10 @@ xfs_attri_validate(
+ if (attrp->alfi_attr_filter & ~XFS_ATTRI_FILTER_MASK)
+ return false;
+
++ if (!xfs_attr_check_namespace(attrp->alfi_attr_filter &
++ XFS_ATTR_NSP_ONDISK_MASK))
++ return false;
++
+ /* alfi_op_flags should be either a set or remove */
+ switch (op) {
+ case XFS_ATTRI_OP_FLAGS_SET:
+@@ -538,9 +552,10 @@ xfs_attri_validate(
+ */
+ STATIC int
+ xfs_attri_item_recover(
+- struct xfs_log_item *lip,
++ struct xfs_defer_pending *dfp,
+ struct list_head *capture_list)
+ {
++ struct xfs_log_item *lip = dfp->dfp_intent;
+ struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip);
+ struct xfs_attr_intent *attr;
+ struct xfs_mount *mp = lip->li_log->l_mp;
+@@ -561,7 +576,8 @@ xfs_attri_item_recover(
+ */
+ attrp = &attrip->attri_format;
+ if (!xfs_attri_validate(mp, attrp) ||
+- !xfs_attr_namecheck(nv->name.i_addr, nv->name.i_len))
++ !xfs_attr_namecheck(attrp->alfi_attr_filter, nv->name.i_addr,
++ nv->name.i_len))
+ return -EFSCORRUPTED;
+
+ error = xlog_recover_iget(mp, attrp->alfi_ino, &ip);
+@@ -594,8 +610,6 @@ xfs_attri_item_recover(
+ args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT |
+ XFS_DA_OP_LOGGED;
+
+- ASSERT(xfs_sb_version_haslogxattrs(&mp->m_sb));
+-
+ switch (attr->xattri_op_flags) {
+ case XFS_ATTRI_OP_FLAGS_SET:
+ case XFS_ATTRI_OP_FLAGS_REPLACE:
+@@ -608,8 +622,6 @@ xfs_attri_item_recover(
+ attr->xattri_dela_state = xfs_attr_init_add_state(args);
+ break;
+ case XFS_ATTRI_OP_FLAGS_REMOVE:
+- if (!xfs_inode_hasattr(args->dp))
+- goto out;
+ attr->xattri_dela_state = xfs_attr_init_remove_state(args);
+ break;
+ default:
+@@ -626,6 +638,7 @@ xfs_attri_item_recover(
+
+ args->trans = tp;
+ done_item = xfs_trans_get_attrd(tp, attrip);
++ xlog_recover_transfer_intent(tp, dfp);
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+@@ -711,48 +724,112 @@ xlog_recover_attri_commit_pass2(
+ const void *attr_value = NULL;
+ const void *attr_name;
+ size_t len;
+-
+- attri_formatp = item->ri_buf[0].i_addr;
+- attr_name = item->ri_buf[1].i_addr;
++ unsigned int op, i = 0;
+
+ /* Validate xfs_attri_log_format before the large memory allocation */
+ len = sizeof(struct xfs_attri_log_format);
+- if (item->ri_buf[0].i_len != len) {
++ if (item->ri_buf[i].i_len != len) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
++ attri_formatp = item->ri_buf[i].i_addr;
+ if (!xfs_attri_validate(mp, attri_formatp)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+- item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
++ attri_formatp, len);
++ return -EFSCORRUPTED;
++ }
++
++ /* Check the number of log iovecs makes sense for the op code. */
++ op = attri_formatp->alfi_op_flags & XFS_ATTRI_OP_FLAGS_TYPE_MASK;
++ switch (op) {
++ case XFS_ATTRI_OP_FLAGS_SET:
++ case XFS_ATTRI_OP_FLAGS_REPLACE:
++ /* Log item, attr name, attr value */
++ if (item->ri_total != 3) {
++ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
++ attri_formatp, len);
++ return -EFSCORRUPTED;
++ }
++ break;
++ case XFS_ATTRI_OP_FLAGS_REMOVE:
++ /* Log item, attr name */
++ if (item->ri_total != 2) {
++ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
++ attri_formatp, len);
++ return -EFSCORRUPTED;
++ }
++ break;
++ default:
++ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
++ attri_formatp, len);
+ return -EFSCORRUPTED;
+ }
++ i++;
+
+ /* Validate the attr name */
+- if (item->ri_buf[1].i_len !=
++ if (item->ri_buf[i].i_len !=
+ xlog_calc_iovec_len(attri_formatp->alfi_name_len)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+- item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
++ attri_formatp, len);
+ return -EFSCORRUPTED;
+ }
+
+- if (!xfs_attr_namecheck(attr_name, attri_formatp->alfi_name_len)) {
++ attr_name = item->ri_buf[i].i_addr;
++ if (!xfs_attr_namecheck(attri_formatp->alfi_attr_filter, attr_name,
++ attri_formatp->alfi_name_len)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+- item->ri_buf[1].i_addr, item->ri_buf[1].i_len);
++ attri_formatp, len);
+ return -EFSCORRUPTED;
+ }
++ i++;
+
+ /* Validate the attr value, if present */
+ if (attri_formatp->alfi_value_len != 0) {
+- if (item->ri_buf[2].i_len != xlog_calc_iovec_len(attri_formatp->alfi_value_len)) {
++ if (item->ri_buf[i].i_len != xlog_calc_iovec_len(attri_formatp->alfi_value_len)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ item->ri_buf[0].i_addr,
+ item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+- attr_value = item->ri_buf[2].i_addr;
++ attr_value = item->ri_buf[i].i_addr;
++ i++;
++ }
++
++ /*
++ * Make sure we got the correct number of buffers for the operation
++ * that we just loaded.
++ */
++ if (i != item->ri_total) {
++ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
++ attri_formatp, len);
++ return -EFSCORRUPTED;
++ }
++
++ switch (op) {
++ case XFS_ATTRI_OP_FLAGS_REMOVE:
++ /* Regular remove operations operate only on names. */
++ if (attr_value != NULL || attri_formatp->alfi_value_len != 0) {
++ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
++ attri_formatp, len);
++ return -EFSCORRUPTED;
++ }
++ fallthrough;
++ case XFS_ATTRI_OP_FLAGS_SET:
++ case XFS_ATTRI_OP_FLAGS_REPLACE:
++ /*
++ * Regular xattr set/remove/replace operations require a name
++ * and do not take a newname. Values are optional for set and
++ * replace.
++ */
++ if (attr_name == NULL || attri_formatp->alfi_name_len == 0) {
++ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
++ attri_formatp, len);
++ return -EFSCORRUPTED;
++ }
++ break;
+ }
+
+ /*
+@@ -767,14 +844,8 @@ xlog_recover_attri_commit_pass2(
+ attrip = xfs_attri_init(mp, nv);
+ memcpy(&attrip->attri_format, attri_formatp, len);
+
+- /*
+- * The ATTRI has two references. One for the ATTRD and one for ATTRI to
+- * ensure it makes it into the AIL. Insert the ATTRI into the AIL
+- * directly and drop the ATTRI reference. Note that
+- * xfs_trans_ail_update() drops the AIL lock.
+- */
+- xfs_trans_ail_insert(log->l_ailp, &attrip->attri_item, lsn);
+- xfs_attri_release(attrip);
++ xlog_recover_intent_item(log, &attrip->attri_item, lsn,
++ XFS_DEFER_OPS_TYPE_ATTR);
+ xfs_attri_log_nameval_put(nv);
+ return 0;
+ }
+diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
+index 99bbbe1a0e4478..9ee1d7d2ba76c2 100644
+--- a/fs/xfs/xfs_attr_list.c
++++ b/fs/xfs/xfs_attr_list.c
+@@ -82,7 +82,8 @@ xfs_attr_shortform_list(
+ (dp->i_af.if_bytes + sf->hdr.count * 16) < context->bufsize)) {
+ for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+- !xfs_attr_namecheck(sfe->nameval,
++ !xfs_attr_namecheck(sfe->flags,
++ sfe->nameval,
+ sfe->namelen)))
+ return -EFSCORRUPTED;
+ context->put_listent(context,
+@@ -120,7 +121,8 @@ xfs_attr_shortform_list(
+ for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
+ if (unlikely(
+ ((char *)sfe < (char *)sf) ||
+- ((char *)sfe >= ((char *)sf + dp->i_af.if_bytes)))) {
++ ((char *)sfe >= ((char *)sf + dp->i_af.if_bytes)) ||
++ !xfs_attr_check_namespace(sfe->flags))) {
+ XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
+ XFS_ERRLEVEL_LOW,
+ context->dp->i_mount, sfe,
+@@ -174,7 +176,7 @@ xfs_attr_shortform_list(
+ cursor->offset = 0;
+ }
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+- !xfs_attr_namecheck(sbp->name,
++ !xfs_attr_namecheck(sbp->flags, sbp->name,
+ sbp->namelen))) {
+ error = -EFSCORRUPTED;
+ goto out;
+@@ -465,7 +467,8 @@ xfs_attr3_leaf_list_int(
+ }
+
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+- !xfs_attr_namecheck(name, namelen)))
++ !xfs_attr_namecheck(entry->flags, name,
++ namelen)))
+ return -EFSCORRUPTED;
+ context->put_listent(context, entry->flags,
+ name, namelen, valuelen);
+diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
+index e736a0844c89d6..b6d63b8bdad5a2 100644
+--- a/fs/xfs/xfs_bmap_item.c
++++ b/fs/xfs/xfs_bmap_item.c
+@@ -486,11 +486,12 @@ xfs_bui_validate(
+ */
+ STATIC int
+ xfs_bui_item_recover(
+- struct xfs_log_item *lip,
++ struct xfs_defer_pending *dfp,
+ struct list_head *capture_list)
+ {
+ struct xfs_bmap_intent fake = { };
+ struct xfs_trans_res resv;
++ struct xfs_log_item *lip = dfp->dfp_intent;
+ struct xfs_bui_log_item *buip = BUI_ITEM(lip);
+ struct xfs_trans *tp;
+ struct xfs_inode *ip = NULL;
+@@ -523,6 +524,8 @@ xfs_bui_item_recover(
+ goto err_rele;
+
+ budp = xfs_trans_get_bud(tp, buip);
++ xlog_recover_transfer_intent(tp, dfp);
++
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
+@@ -681,12 +684,9 @@ xlog_recover_bui_commit_pass2(
+ buip = xfs_bui_init(mp);
+ xfs_bui_copy_format(&buip->bui_format, bui_formatp);
+ atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
+- /*
+- * Insert the intent into the AIL directly and drop one reference so
+- * that finishing or canceling the work will drop the other.
+- */
+- xfs_trans_ail_insert(log->l_ailp, &buip->bui_item, lsn);
+- xfs_bui_release(buip);
++
++ xlog_recover_intent_item(log, &buip->bui_item, lsn,
++ XFS_DEFER_OPS_TYPE_BMAP);
+ return 0;
+ }
+
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index fcefab68728598..f9d72d8e3c3599 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -636,13 +636,11 @@ xfs_bmap_punch_delalloc_range(
+
+ /*
+ * Test whether it is appropriate to check an inode for and free post EOF
+- * blocks. The 'force' parameter determines whether we should also consider
+- * regular files that are marked preallocated or append-only.
++ * blocks.
+ */
+ bool
+ xfs_can_free_eofblocks(
+- struct xfs_inode *ip,
+- bool force)
++ struct xfs_inode *ip)
+ {
+ struct xfs_bmbt_irec imap;
+ struct xfs_mount *mp = ip->i_mount;
+@@ -676,11 +674,11 @@ xfs_can_free_eofblocks(
+ return false;
+
+ /*
+- * Do not free real preallocated or append-only files unless the file
+- * has delalloc blocks and we are forced to remove them.
++ * Only free real extents for inodes with persistent preallocations or
++ * the append-only flag.
+ */
+ if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
+- if (!force || ip->i_delayed_blks == 0)
++ if (ip->i_delayed_blks == 0)
+ return false;
+
+ /*
+@@ -734,6 +732,22 @@ xfs_free_eofblocks(
+ /* Wait on dio to ensure i_size has settled. */
+ inode_dio_wait(VFS_I(ip));
+
++ /*
++ * For preallocated files only free delayed allocations.
++ *
++ * Note that this means we also leave speculative preallocations in
++ * place for preallocated files.
++ */
++ if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
++ if (ip->i_delayed_blks) {
++ xfs_bmap_punch_delalloc_range(ip,
++ round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
++ LLONG_MAX);
++ }
++ xfs_inode_clear_eofblocks_tag(ip);
++ return 0;
++ }
++
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ if (error) {
+ ASSERT(xfs_is_shutdown(mp));
+@@ -780,12 +794,10 @@ xfs_alloc_file_space(
+ {
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_off_t count;
+- xfs_filblks_t allocated_fsb;
+ xfs_filblks_t allocatesize_fsb;
+ xfs_extlen_t extsz, temp;
+ xfs_fileoff_t startoffset_fsb;
+ xfs_fileoff_t endoffset_fsb;
+- int nimaps;
+ int rt;
+ xfs_trans_t *tp;
+ xfs_bmbt_irec_t imaps[1], *imapp;
+@@ -808,7 +820,6 @@ xfs_alloc_file_space(
+
+ count = len;
+ imapp = &imaps[0];
+- nimaps = 1;
+ startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
+ endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
+ allocatesize_fsb = endoffset_fsb - startoffset_fsb;
+@@ -819,6 +830,7 @@ xfs_alloc_file_space(
+ while (allocatesize_fsb && !error) {
+ xfs_fileoff_t s, e;
+ unsigned int dblocks, rblocks, resblks;
++ int nimaps = 1;
+
+ /*
+ * Determine space reservations for data/realtime.
+@@ -870,29 +882,32 @@ xfs_alloc_file_space(
+ if (error)
+ goto error;
+
++ /*
++ * If the allocator cannot find a single free extent large
++ * enough to cover the start block of the requested range,
++ * xfs_bmapi_write will return -ENOSR.
++ *
++ * In that case we simply need to keep looping with the same
++ * startoffset_fsb so that one of the following allocations
++ * will eventually reach the requested range.
++ */
+ error = xfs_bmapi_write(tp, ip, startoffset_fsb,
+ allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
+ &nimaps);
+- if (error)
+- goto error;
++ if (error) {
++ if (error != -ENOSR)
++ goto error;
++ error = 0;
++ } else {
++ startoffset_fsb += imapp->br_blockcount;
++ allocatesize_fsb -= imapp->br_blockcount;
++ }
+
+ ip->i_diflags |= XFS_DIFLAG_PREALLOC;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+- if (error)
+- break;
+-
+- allocated_fsb = imapp->br_blockcount;
+-
+- if (nimaps == 0) {
+- error = -ENOSPC;
+- break;
+- }
+-
+- startoffset_fsb += allocated_fsb;
+- allocatesize_fsb -= allocated_fsb;
+ }
+
+ return error;
+@@ -1047,7 +1062,7 @@ xfs_prepare_shift(
+ * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
+ * into the accessible region of the file.
+ */
+- if (xfs_can_free_eofblocks(ip, true)) {
++ if (xfs_can_free_eofblocks(ip)) {
+ error = xfs_free_eofblocks(ip);
+ if (error)
+ return error;
+diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
+index 6888078f5c31e0..1383019ccdb755 100644
+--- a/fs/xfs/xfs_bmap_util.h
++++ b/fs/xfs/xfs_bmap_util.h
+@@ -63,7 +63,7 @@ int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset,
+ xfs_off_t len);
+
+ /* EOF block manipulation functions */
+-bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
++bool xfs_can_free_eofblocks(struct xfs_inode *ip);
+ int xfs_free_eofblocks(struct xfs_inode *ip);
+
+ int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index c1ece4a08ff446..20c1d146af1da7 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -2049,6 +2049,14 @@ xfs_alloc_buftarg(
+ return NULL;
+ }
+
++static inline void
++xfs_buf_list_del(
++ struct xfs_buf *bp)
++{
++ list_del_init(&bp->b_list);
++ wake_up_var(&bp->b_list);
++}
++
+ /*
+ * Cancel a delayed write list.
+ *
+@@ -2066,7 +2074,7 @@ xfs_buf_delwri_cancel(
+
+ xfs_buf_lock(bp);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+- list_del_init(&bp->b_list);
++ xfs_buf_list_del(bp);
+ xfs_buf_relse(bp);
+ }
+ }
+@@ -2119,6 +2127,34 @@ xfs_buf_delwri_queue(
+ return true;
+ }
+
++/*
++ * Queue a buffer to this delwri list as part of a data integrity operation.
++ * If the buffer is on any other delwri list, we'll wait for that to clear
++ * so that the caller can submit the buffer for IO and wait for the result.
++ * Callers must ensure the buffer is not already on the list.
++ */
++void
++xfs_buf_delwri_queue_here(
++ struct xfs_buf *bp,
++ struct list_head *buffer_list)
++{
++ /*
++ * We need this buffer to end up on the /caller's/ delwri list, not any
++ * old list. This can happen if the buffer is marked stale (which
++ * clears DELWRI_Q) after the AIL queues the buffer to its list but
++ * before the AIL has a chance to submit the list.
++ */
++ while (!list_empty(&bp->b_list)) {
++ xfs_buf_unlock(bp);
++ wait_var_event(&bp->b_list, list_empty(&bp->b_list));
++ xfs_buf_lock(bp);
++ }
++
++ ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
++
++ xfs_buf_delwri_queue(bp, buffer_list);
++}
++
+ /*
+ * Compare function is more complex than it needs to be because
+ * the return value is only 32 bits and we are doing comparisons
+@@ -2181,7 +2217,7 @@ xfs_buf_delwri_submit_buffers(
+ * reference and remove it from the list here.
+ */
+ if (!(bp->b_flags & _XBF_DELWRI_Q)) {
+- list_del_init(&bp->b_list);
++ xfs_buf_list_del(bp);
+ xfs_buf_relse(bp);
+ continue;
+ }
+@@ -2201,7 +2237,7 @@ xfs_buf_delwri_submit_buffers(
+ list_move_tail(&bp->b_list, wait_list);
+ } else {
+ bp->b_flags |= XBF_ASYNC;
+- list_del_init(&bp->b_list);
++ xfs_buf_list_del(bp);
+ }
+ __xfs_buf_submit(bp, false);
+ }
+@@ -2255,7 +2291,7 @@ xfs_buf_delwri_submit(
+ while (!list_empty(&wait_list)) {
+ bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
+
+- list_del_init(&bp->b_list);
++ xfs_buf_list_del(bp);
+
+ /*
+ * Wait on the locked buffer, check for errors and unlock and
+diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
+index df8f47953bb4e9..5896b58c5f4db6 100644
+--- a/fs/xfs/xfs_buf.h
++++ b/fs/xfs/xfs_buf.h
+@@ -318,6 +318,7 @@ extern void xfs_buf_stale(struct xfs_buf *bp);
+ /* Delayed Write Buffer Routines */
+ extern void xfs_buf_delwri_cancel(struct list_head *);
+ extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
++void xfs_buf_delwri_queue_here(struct xfs_buf *bp, struct list_head *bl);
+ extern int xfs_buf_delwri_submit(struct list_head *);
+ extern int xfs_buf_delwri_submit_nowait(struct list_head *);
+ extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index ac6ba646624df5..9b67f05d92a193 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -333,7 +333,6 @@ xfs_dquot_disk_alloc(
+ goto err_cancel;
+
+ ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
+- ASSERT(nmaps == 1);
+ ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
+ (map.br_startblock != HOLESTARTBLOCK));
+
+@@ -562,7 +561,8 @@ xfs_dquot_from_disk(
+ struct xfs_dquot *dqp,
+ struct xfs_buf *bp)
+ {
+- struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
++ struct xfs_dqblk *dqb = xfs_buf_offset(bp, dqp->q_bufoffset);
++ struct xfs_disk_dquot *ddqp = &dqb->dd_diskdq;
+
+ /*
+ * Ensure that we got the type and ID we were looking for.
+@@ -1250,7 +1250,7 @@ xfs_qm_dqflush(
+ }
+
+ /* Flush the incore dquot to the ondisk buffer. */
+- dqblk = bp->b_addr + dqp->q_bufoffset;
++ dqblk = xfs_buf_offset(bp, dqp->q_bufoffset);
+ xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
+
+ /*
+diff --git a/fs/xfs/xfs_dquot_item_recover.c b/fs/xfs/xfs_dquot_item_recover.c
+index 8966ba842395bf..2c2720ce692382 100644
+--- a/fs/xfs/xfs_dquot_item_recover.c
++++ b/fs/xfs/xfs_dquot_item_recover.c
+@@ -19,6 +19,7 @@
+ #include "xfs_log.h"
+ #include "xfs_log_priv.h"
+ #include "xfs_log_recover.h"
++#include "xfs_error.h"
+
+ STATIC void
+ xlog_recover_dquot_ra_pass2(
+@@ -65,6 +66,7 @@ xlog_recover_dquot_commit_pass2(
+ {
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_buf *bp;
++ struct xfs_dqblk *dqb;
+ struct xfs_disk_dquot *ddq, *recddq;
+ struct xfs_dq_logformat *dq_f;
+ xfs_failaddr_t fa;
+@@ -130,14 +132,14 @@ xlog_recover_dquot_commit_pass2(
+ return error;
+
+ ASSERT(bp);
+- ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
++ dqb = xfs_buf_offset(bp, dq_f->qlf_boffset);
++ ddq = &dqb->dd_diskdq;
+
+ /*
+ * If the dquot has an LSN in it, recover the dquot only if it's less
+ * than the lsn of the transaction we are replaying.
+ */
+ if (xfs_has_crc(mp)) {
+- struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
+ xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
+
+ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+@@ -147,10 +149,23 @@ xlog_recover_dquot_commit_pass2(
+
+ memcpy(ddq, recddq, item->ri_buf[1].i_len);
+ if (xfs_has_crc(mp)) {
+- xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
++ xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF);
+ }
+
++ /* Validate the recovered dquot. */
++ fa = xfs_dqblk_verify(log->l_mp, dqb, dq_f->qlf_id);
++ if (fa) {
++ XFS_CORRUPTION_ERROR("Bad dquot after recovery",
++ XFS_ERRLEVEL_LOW, mp, dqb,
++ sizeof(struct xfs_dqblk));
++ xfs_alert(mp,
++ "Metadata corruption detected at %pS, dquot 0x%x",
++ fa, dq_f->qlf_id);
++ error = -EFSCORRUPTED;
++ goto out_release;
++ }
++
+ ASSERT(dq_f->qlf_size == 2);
+ ASSERT(bp->b_mount == mp);
+ bp->b_flags |= _XBF_LOGRECOVERY;
+diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
+index 3fa8789820ad9d..c9908fb337657a 100644
+--- a/fs/xfs/xfs_extfree_item.c
++++ b/fs/xfs/xfs_extfree_item.c
+@@ -657,10 +657,11 @@ xfs_efi_validate_ext(
+ */
+ STATIC int
+ xfs_efi_item_recover(
+- struct xfs_log_item *lip,
++ struct xfs_defer_pending *dfp,
+ struct list_head *capture_list)
+ {
+ struct xfs_trans_res resv;
++ struct xfs_log_item *lip = dfp->dfp_intent;
+ struct xfs_efi_log_item *efip = EFI_ITEM(lip);
+ struct xfs_mount *mp = lip->li_log->l_mp;
+ struct xfs_efd_log_item *efdp;
+@@ -688,7 +689,9 @@ xfs_efi_item_recover(
+ error = xfs_trans_alloc(mp, &resv, 0, 0, 0, &tp);
+ if (error)
+ return error;
++
+ efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
++ xlog_recover_transfer_intent(tp, dfp);
+
+ for (i = 0; i < efip->efi_format.efi_nextents; i++) {
+ struct xfs_extent_free_item fake = {
+@@ -820,12 +823,9 @@ xlog_recover_efi_commit_pass2(
+ return error;
+ }
+ atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
+- /*
+- * Insert the intent into the AIL directly and drop one reference so
+- * that finishing or canceling the work will drop the other.
+- */
+- xfs_trans_ail_insert(log->l_ailp, &efip->efi_item, lsn);
+- xfs_efi_release(efip);
++
++ xlog_recover_intent_item(log, &efip->efi_item, lsn,
++ XFS_DEFER_OPS_TYPE_FREE);
+ return 0;
+ }
+
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 203700278ddbb6..e33e5e13b95f46 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -214,6 +214,43 @@ xfs_ilock_iocb(
+ return 0;
+ }
+
++static int
++xfs_ilock_iocb_for_write(
++ struct kiocb *iocb,
++ unsigned int *lock_mode)
++{
++ ssize_t ret;
++ struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
++
++ ret = xfs_ilock_iocb(iocb, *lock_mode);
++ if (ret)
++ return ret;
++
++ if (*lock_mode == XFS_IOLOCK_EXCL)
++ return 0;
++ if (!xfs_iflags_test(ip, XFS_IREMAPPING))
++ return 0;
++
++ xfs_iunlock(ip, *lock_mode);
++ *lock_mode = XFS_IOLOCK_EXCL;
++ return xfs_ilock_iocb(iocb, *lock_mode);
++}
++
++static unsigned int
++xfs_ilock_for_write_fault(
++ struct xfs_inode *ip)
++{
++ /* get a shared lock if no remapping in progress */
++ xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
++ if (!xfs_iflags_test(ip, XFS_IREMAPPING))
++ return XFS_MMAPLOCK_SHARED;
++
++ /* wait for remapping to complete */
++ xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
++ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
++ return XFS_MMAPLOCK_EXCL;
++}
++
+ STATIC ssize_t
+ xfs_file_dio_read(
+ struct kiocb *iocb,
+@@ -551,7 +588,7 @@ xfs_file_dio_write_aligned(
+ unsigned int iolock = XFS_IOLOCK_SHARED;
+ ssize_t ret;
+
+- ret = xfs_ilock_iocb(iocb, iolock);
++ ret = xfs_ilock_iocb_for_write(iocb, &iolock);
+ if (ret)
+ return ret;
+ ret = xfs_file_write_checks(iocb, from, &iolock);
+@@ -618,7 +655,7 @@ xfs_file_dio_write_unaligned(
+ flags = IOMAP_DIO_FORCE_WAIT;
+ }
+
+- ret = xfs_ilock_iocb(iocb, iolock);
++ ret = xfs_ilock_iocb_for_write(iocb, &iolock);
+ if (ret)
+ return ret;
+
+@@ -1180,7 +1217,7 @@ xfs_file_remap_range(
+ if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
+ xfs_log_force_inode(dest);
+ out_unlock:
+- xfs_iunlock2_io_mmap(src, dest);
++ xfs_iunlock2_remapping(src, dest);
+ if (ret)
+ trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
+ return remapped > 0 ? remapped : ret;
+@@ -1328,6 +1365,7 @@ __xfs_filemap_fault(
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct xfs_inode *ip = XFS_I(inode);
+ vm_fault_t ret;
++ unsigned int lock_mode = 0;
+
+ trace_xfs_filemap_fault(ip, order, write_fault);
+
+@@ -1336,25 +1374,24 @@ __xfs_filemap_fault(
+ file_update_time(vmf->vma->vm_file);
+ }
+
++ if (IS_DAX(inode) || write_fault)
++ lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
++
+ if (IS_DAX(inode)) {
+ pfn_t pfn;
+
+- xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ ret = xfs_dax_fault(vmf, order, write_fault, &pfn);
+ if (ret & VM_FAULT_NEEDDSYNC)
+ ret = dax_finish_sync_fault(vmf, order, pfn);
+- xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
++ } else if (write_fault) {
++ ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
+ } else {
+- if (write_fault) {
+- xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+- ret = iomap_page_mkwrite(vmf,
+- &xfs_page_mkwrite_iomap_ops);
+- xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+- } else {
+- ret = filemap_fault(vmf);
+- }
++ ret = filemap_fault(vmf);
+ }
+
++ if (lock_mode)
++ xfs_iunlock(XFS_I(inode), lock_mode);
++
+ if (write_fault)
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
+index 736e5545f58402..8982c5d6cbd065 100644
+--- a/fs/xfs/xfs_fsmap.c
++++ b/fs/xfs/xfs_fsmap.c
+@@ -23,7 +23,7 @@
+ #include "xfs_refcount.h"
+ #include "xfs_refcount_btree.h"
+ #include "xfs_alloc_btree.h"
+-#include "xfs_rtalloc.h"
++#include "xfs_rtbitmap.h"
+ #include "xfs_ag.h"
+
+ /* Convert an xfs_fsmap to an fsmap. */
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index 7cb75cb6b8e9b4..c3f0e3cae87e56 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -134,6 +134,10 @@ xfs_growfs_data_private(
+ if (delta < 0 && nagcount < 2)
+ return -EINVAL;
+
++ /* No work to do */
++ if (delta == 0)
++ return 0;
++
+ oagcount = mp->m_sb.sb_agcount;
+ /* allocate the new per-ag structures */
+ if (nagcount > oagcount) {
+@@ -153,7 +157,7 @@ xfs_growfs_data_private(
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0,
+ 0, &tp);
+ if (error)
+- return error;
++ goto out_free_unused_perag;
+
+ last_pag = xfs_perag_get(mp, oagcount - 1);
+ if (delta > 0) {
+@@ -227,6 +231,9 @@ xfs_growfs_data_private(
+
+ out_trans_cancel:
+ xfs_trans_cancel(tp);
++out_free_unused_perag:
++ if (nagcount > oagcount)
++ xfs_free_unused_perag_range(mp, oagcount, nagcount);
+ return error;
+ }
+
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index 3c210ac8371368..57a9f23175250a 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -1149,7 +1149,7 @@ xfs_inode_free_eofblocks(
+ }
+ *lockflags |= XFS_IOLOCK_EXCL;
+
+- if (xfs_can_free_eofblocks(ip, false))
++ if (xfs_can_free_eofblocks(ip))
+ return xfs_free_eofblocks(ip);
+
+ /* inode could be preallocated or append-only */
+@@ -2031,8 +2031,10 @@ xfs_inodegc_want_queue_work(
+ * - Memory shrinkers queued the inactivation worker and it hasn't finished.
+ * - The queue depth exceeds the maximum allowable percpu backlog.
+ *
+- * Note: If the current thread is running a transaction, we don't ever want to
+- * wait for other transactions because that could introduce a deadlock.
++ * Note: If we are in a NOFS context here (e.g. current thread is running a
++ * transaction) the we don't want to block here as inodegc progress may require
++ * filesystem resources we hold to make progress and that could result in a
++ * deadlock. Hence we skip out of here if we are in a scoped NOFS context.
+ */
+ static inline bool
+ xfs_inodegc_want_flush_work(
+@@ -2040,7 +2042,7 @@ xfs_inodegc_want_flush_work(
+ unsigned int items,
+ unsigned int shrinker_hits)
+ {
+- if (current->journal_info)
++ if (current->flags & PF_MEMALLOC_NOFS)
+ return false;
+
+ if (shrinker_hits > 0)
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 4d55f58d99b7ac..7aa73855fab65a 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -918,6 +918,13 @@ xfs_droplink(
+ xfs_trans_t *tp,
+ xfs_inode_t *ip)
+ {
++ if (VFS_I(ip)->i_nlink == 0) {
++ xfs_alert(ip->i_mount,
++ "%s: Attempt to drop inode (%llu) with nlink zero.",
++ __func__, ip->i_ino);
++ return -EFSCORRUPTED;
++ }
++
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+
+ drop_nlink(VFS_I(ip));
+@@ -1232,8 +1239,19 @@ xfs_link(
+ */
+ if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
+ tdp->i_projid != sip->i_projid)) {
+- error = -EXDEV;
+- goto error_return;
++ /*
++ * Project quota setup skips special files which can
++ * leave inodes in a PROJINHERIT directory without a
++ * project ID set. We need to allow links to be made
++ * to these "project-less" inodes because userspace
++ * expects them to succeed after project ID setup,
++ * but everything else should be rejected.
++ */
++ if (!special_file(VFS_I(sip)->i_mode) ||
++ sip->i_projid != 0) {
++ error = -EXDEV;
++ goto error_return;
++ }
+ }
+
+ if (!resblks) {
+@@ -1451,7 +1469,7 @@ xfs_release(
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
+ return 0;
+
+- if (xfs_can_free_eofblocks(ip, false)) {
++ if (xfs_can_free_eofblocks(ip)) {
+ /*
+ * Check if the inode is being opened, written and closed
+ * frequently and we have delayed allocation blocks outstanding
+@@ -1667,15 +1685,13 @@ xfs_inode_needs_inactive(
+
+ /*
+ * This file isn't being freed, so check if there are post-eof blocks
+- * to free. @force is true because we are evicting an inode from the
+- * cache. Post-eof blocks must be freed, lest we end up with broken
+- * free space accounting.
++ * to free.
+ *
+ * Note: don't bother with iolock here since lockdep complains about
+ * acquiring it in reclaim context. We have the only reference to the
+ * inode at this point anyways.
+ */
+- return xfs_can_free_eofblocks(ip, true);
++ return xfs_can_free_eofblocks(ip);
+ }
+
+ /*
+@@ -1723,15 +1739,11 @@ xfs_inactive(
+
+ if (VFS_I(ip)->i_nlink != 0) {
+ /*
+- * force is true because we are evicting an inode from the
+- * cache. Post-eof blocks must be freed, lest we end up with
+- * broken free space accounting.
+- *
+ * Note: don't bother with iolock here since lockdep complains
+ * about acquiring it in reclaim context. We have the only
+ * reference to the inode at this point anyways.
+ */
+- if (xfs_can_free_eofblocks(ip, true))
++ if (xfs_can_free_eofblocks(ip))
+ error = xfs_free_eofblocks(ip);
+
+ goto out;
+@@ -2311,11 +2323,26 @@ xfs_ifree_cluster(
+ * This buffer may not have been correctly initialised as we
+ * didn't read it from disk. That's not important because we are
+ * only using to mark the buffer as stale in the log, and to
+- * attach stale cached inodes on it. That means it will never be
+- * dispatched for IO. If it is, we want to know about it, and we
+- * want it to fail. We can acheive this by adding a write
+- * verifier to the buffer.
++ * attach stale cached inodes on it.
++ *
++ * For the inode that triggered the cluster freeing, this
++ * attachment may occur in xfs_inode_item_precommit() after we
++ * have marked this buffer stale. If this buffer was not in
++ * memory before xfs_ifree_cluster() started, it will not be
++ * marked XBF_DONE and this will cause problems later in
++ * xfs_inode_item_precommit() when we trip over a (stale, !done)
++ * buffer to attached to the transaction.
++ *
++ * Hence we have to mark the buffer as XFS_DONE here. This is
++ * safe because we are also marking the buffer as XBF_STALE and
++ * XFS_BLI_STALE. That means it will never be dispatched for
++ * IO and it won't be unlocked until the cluster freeing has
++ * been committed to the journal and the buffer unpinned. If it
++ * is written, we want to know about it, and we want it to
++ * fail. We can acheive this by adding a write verifier to the
++ * buffer.
+ */
++ bp->b_flags |= XBF_DONE;
+ bp->b_ops = &xfs_inode_buf_ops;
+
+ /*
+@@ -3621,6 +3648,23 @@ xfs_iunlock2_io_mmap(
+ inode_unlock(VFS_I(ip1));
+ }
+
++/* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
++void
++xfs_iunlock2_remapping(
++ struct xfs_inode *ip1,
++ struct xfs_inode *ip2)
++{
++ xfs_iflags_clear(ip1, XFS_IREMAPPING);
++
++ if (ip1 != ip2)
++ xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
++ xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
++
++ if (ip1 != ip2)
++ inode_unlock_shared(VFS_I(ip1));
++ inode_unlock(VFS_I(ip2));
++}
++
+ /*
+ * Reload the incore inode list for this inode. Caller should ensure that
+ * the link count cannot change, either by taking ILOCK_SHARED or otherwise
+diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
+index 0c5bdb91152e1c..3beb470f18920d 100644
+--- a/fs/xfs/xfs_inode.h
++++ b/fs/xfs/xfs_inode.h
+@@ -347,6 +347,14 @@ static inline bool xfs_inode_has_large_extent_counts(struct xfs_inode *ip)
+ /* Quotacheck is running but inode has not been added to quota counts. */
+ #define XFS_IQUOTAUNCHECKED (1 << 14)
+
++/*
++ * Remap in progress. Callers that wish to update file data while
++ * holding a shared IOLOCK or MMAPLOCK must drop the lock and retake
++ * the lock in exclusive mode. Relocking the file will block until
++ * IREMAPPING is cleared.
++ */
++#define XFS_IREMAPPING (1U << 15)
++
+ /* All inode state flags related to inode reclaim. */
+ #define XFS_ALL_IRECLAIM_FLAGS (XFS_IRECLAIMABLE | \
+ XFS_IRECLAIM | \
+@@ -561,6 +569,14 @@ extern void xfs_setup_inode(struct xfs_inode *ip);
+ extern void xfs_setup_iops(struct xfs_inode *ip);
+ extern void xfs_diflags_to_iflags(struct xfs_inode *ip, bool init);
+
++static inline void xfs_update_stable_writes(struct xfs_inode *ip)
++{
++ if (bdev_stable_writes(xfs_inode_buftarg(ip)->bt_bdev))
++ mapping_set_stable_writes(VFS_I(ip)->i_mapping);
++ else
++ mapping_clear_stable_writes(VFS_I(ip)->i_mapping);
++}
++
+ /*
+ * When setting up a newly allocated inode, we need to call
+ * xfs_finish_inode_setup() once the inode is fully instantiated at
+@@ -595,6 +611,7 @@ void xfs_end_io(struct work_struct *work);
+
+ int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
+ void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
++void xfs_iunlock2_remapping(struct xfs_inode *ip1, struct xfs_inode *ip2);
+
+ static inline bool
+ xfs_inode_unlinked_incomplete(
+diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
+index 127b2410eb206f..155a8b31287550 100644
+--- a/fs/xfs/xfs_inode_item.c
++++ b/fs/xfs/xfs_inode_item.c
+@@ -556,6 +556,9 @@ xfs_inode_to_log_dinode(
+ memset(to->di_pad2, 0, sizeof(to->di_pad2));
+ uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
+ to->di_v3_pad = 0;
++
++ /* dummy value for initialisation */
++ to->di_crc = 0;
+ } else {
+ to->di_version = 2;
+ to->di_flushiter = ip->i_flushiter;
+diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
+index 0e5dba2343ea13..144198a6b2702c 100644
+--- a/fs/xfs/xfs_inode_item_recover.c
++++ b/fs/xfs/xfs_inode_item_recover.c
+@@ -286,6 +286,7 @@ xlog_recover_inode_commit_pass2(
+ struct xfs_log_dinode *ldip;
+ uint isize;
+ int need_free = 0;
++ xfs_failaddr_t fa;
+
+ if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
+ in_f = item->ri_buf[0].i_addr;
+@@ -369,24 +370,26 @@ xlog_recover_inode_commit_pass2(
+ * superblock flag to determine whether we need to look at di_flushiter
+ * to skip replay when the on disk inode is newer than the log one
+ */
+- if (!xfs_has_v3inodes(mp) &&
+- ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+- /*
+- * Deal with the wrap case, DI_MAX_FLUSH is less
+- * than smaller numbers
+- */
+- if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
+- ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+- /* do nothing */
+- } else {
+- trace_xfs_log_recover_inode_skip(log, in_f);
+- error = 0;
+- goto out_release;
++ if (!xfs_has_v3inodes(mp)) {
++ if (ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
++ /*
++ * Deal with the wrap case, DI_MAX_FLUSH is less
++ * than smaller numbers
++ */
++ if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
++ ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
++ /* do nothing */
++ } else {
++ trace_xfs_log_recover_inode_skip(log, in_f);
++ error = 0;
++ goto out_release;
++ }
+ }
++
++ /* Take the opportunity to reset the flush iteration count */
++ ldip->di_flushiter = 0;
+ }
+
+- /* Take the opportunity to reset the flush iteration count */
+- ldip->di_flushiter = 0;
+
+ if (unlikely(S_ISREG(ldip->di_mode))) {
+ if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+@@ -528,8 +531,19 @@ xlog_recover_inode_commit_pass2(
+ (dip->di_mode != 0))
+ error = xfs_recover_inode_owner_change(mp, dip, in_f,
+ buffer_list);
+- /* re-generate the checksum. */
++ /* re-generate the checksum and validate the recovered inode. */
+ xfs_dinode_calc_crc(log->l_mp, dip);
++ fa = xfs_dinode_verify(log->l_mp, in_f->ilf_ino, dip);
++ if (fa) {
++ XFS_CORRUPTION_ERROR(
++ "Bad dinode after recovery",
++ XFS_ERRLEVEL_LOW, mp, dip, sizeof(*dip));
++ xfs_alert(mp,
++ "Metadata corruption detected at %pS, inode 0x%llx",
++ fa, in_f->ilf_ino);
++ error = -EFSCORRUPTED;
++ goto out_release;
++ }
+
+ ASSERT(bp->b_mount == mp);
+ bp->b_flags |= _XBF_LOGRECOVERY;
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 55bb01173cde8c..535f6d38cdb540 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1120,23 +1120,25 @@ xfs_ioctl_setattr_xflags(
+ struct fileattr *fa)
+ {
+ struct xfs_mount *mp = ip->i_mount;
++ bool rtflag = (fa->fsx_xflags & FS_XFLAG_REALTIME);
+ uint64_t i_flags2;
+
+- /* Can't change realtime flag if any extents are allocated. */
+- if ((ip->i_df.if_nextents || ip->i_delayed_blks) &&
+- XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME))
+- return -EINVAL;
++ if (rtflag != XFS_IS_REALTIME_INODE(ip)) {
++ /* Can't change realtime flag if any extents are allocated. */
++ if (ip->i_df.if_nextents || ip->i_delayed_blks)
++ return -EINVAL;
++ }
+
+- /* If realtime flag is set then must have realtime device */
+- if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
++ if (rtflag) {
++ /* If realtime flag is set then must have realtime device */
+ if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
+ (ip->i_extsize % mp->m_sb.sb_rextsize))
+ return -EINVAL;
+- }
+
+- /* Clear reflink if we are actually able to set the rt flag. */
+- if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
+- ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
++ /* Clear reflink if we are actually able to set the rt flag. */
++ if (xfs_is_reflink_inode(ip))
++ ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
++ }
+
+ /* diflags2 only valid for v3 inodes. */
+ i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
+@@ -1147,6 +1149,14 @@ xfs_ioctl_setattr_xflags(
+ ip->i_diflags2 = i_flags2;
+
+ xfs_diflags_to_iflags(ip, false);
++
++ /*
++ * Make the stable writes flag match that of the device the inode
++ * resides on when flipping the RT flag.
++ */
++ if (rtflag != XFS_IS_REALTIME_INODE(ip) && S_ISREG(VFS_I(ip)->i_mode))
++ xfs_update_stable_writes(ip);
++
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ XFS_STATS_INC(mp, xs_ig_attrchg);
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index 18c8f168b1532d..9ce2f48b4ebc01 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -317,14 +317,6 @@ xfs_iomap_write_direct(
+ if (error)
+ goto out_unlock;
+
+- /*
+- * Copy any maps to caller's array and return any error.
+- */
+- if (nimaps == 0) {
+- error = -ENOSPC;
+- goto out_unlock;
+- }
+-
+ if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
+ error = xfs_alert_fsblock_zero(ip, imap);
+
+@@ -1013,6 +1005,24 @@ xfs_buffered_write_iomap_begin(
+ goto out_unlock;
+ }
+
++ /*
++ * For zeroing, trim a delalloc extent that extends beyond the EOF
++ * block. If it starts beyond the EOF block, convert it to an
++ * unwritten extent.
++ */
++ if ((flags & IOMAP_ZERO) && imap.br_startoff <= offset_fsb &&
++ isnullstartblock(imap.br_startblock)) {
++ xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
++
++ if (offset_fsb >= eof_fsb)
++ goto convert_delay;
++ if (end_fsb > eof_fsb) {
++ end_fsb = eof_fsb;
++ xfs_trim_extent(&imap, offset_fsb,
++ end_fsb - offset_fsb);
++ }
++ }
++
+ /*
+ * Search the COW fork extent list even if we did not find a data fork
+ * extent. This serves two purposes: first this implements the
+@@ -1117,47 +1127,48 @@ xfs_buffered_write_iomap_begin(
+ }
+ }
+
+-retry:
+- error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
+- end_fsb - offset_fsb, prealloc_blocks,
+- allocfork == XFS_DATA_FORK ? &imap : &cmap,
+- allocfork == XFS_DATA_FORK ? &icur : &ccur,
+- allocfork == XFS_DATA_FORK ? eof : cow_eof);
+- switch (error) {
+- case 0:
+- break;
+- case -ENOSPC:
+- case -EDQUOT:
+- /* retry without any preallocation */
+- trace_xfs_delalloc_enospc(ip, offset, count);
+- if (prealloc_blocks) {
+- prealloc_blocks = 0;
+- goto retry;
+- }
+- fallthrough;
+- default:
+- goto out_unlock;
+- }
+-
+ if (allocfork == XFS_COW_FORK) {
++ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
++ end_fsb - offset_fsb, prealloc_blocks, &cmap,
++ &ccur, cow_eof);
++ if (error)
++ goto out_unlock;
++
+ trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
+ goto found_cow;
+ }
+
++ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
++ end_fsb - offset_fsb, prealloc_blocks, &imap, &icur,
++ eof);
++ if (error)
++ goto out_unlock;
++
+ /*
+ * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+ * them out if the write happens to fail.
+ */
+ seq = xfs_iomap_inode_sequence(ip, IOMAP_F_NEW);
+- xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ xfs_iunlock(ip, lockmode);
+ trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW, seq);
+
+ found_imap:
+ seq = xfs_iomap_inode_sequence(ip, 0);
+- xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ xfs_iunlock(ip, lockmode);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
+
++convert_delay:
++ xfs_iunlock(ip, lockmode);
++ truncate_pagecache(inode, offset);
++ error = xfs_bmapi_convert_delalloc(ip, XFS_DATA_FORK, offset,
++ iomap, NULL);
++ if (error)
++ return error;
++
++ trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &imap);
++ return 0;
++
+ found_cow:
+ seq = xfs_iomap_inode_sequence(ip, 0);
+ if (imap.br_startoff <= offset_fsb) {
+@@ -1165,17 +1176,17 @@ xfs_buffered_write_iomap_begin(
+ if (error)
+ goto out_unlock;
+ seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
+- xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ xfs_iunlock(ip, lockmode);
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
+ IOMAP_F_SHARED, seq);
+ }
+
+ xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
+- xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ xfs_iunlock(ip, lockmode);
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0, seq);
+
+ out_unlock:
+- xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ xfs_iunlock(ip, lockmode);
+ return error;
+ }
+
+@@ -1323,7 +1334,7 @@ xfs_seek_iomap_begin(
+ if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
+ if (data_fsb < cow_fsb + cmap.br_blockcount)
+ end_fsb = min(end_fsb, data_fsb);
+- xfs_trim_extent(&cmap, offset_fsb, end_fsb);
++ xfs_trim_extent(&cmap, offset_fsb, end_fsb - offset_fsb);
+ seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
+ IOMAP_F_SHARED, seq);
+@@ -1348,7 +1359,7 @@ xfs_seek_iomap_begin(
+ imap.br_state = XFS_EXT_NORM;
+ done:
+ seq = xfs_iomap_inode_sequence(ip, 0);
+- xfs_trim_extent(&imap, offset_fsb, end_fsb);
++ xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
+ out_unlock:
+ xfs_iunlock(ip, lockmode);
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 2b3b05c28e9e48..b8ec045708c318 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -1298,6 +1298,13 @@ xfs_setup_inode(
+ gfp_mask = mapping_gfp_mask(inode->i_mapping);
+ mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
+
++ /*
++ * For real-time inodes update the stable write flags to that of the RT
++ * device instead of the data device.
++ */
++ if (S_ISREG(inode->i_mode) && XFS_IS_REALTIME_INODE(ip))
++ xfs_update_stable_writes(ip);
++
+ /*
+ * If there is no attribute fork no ACL can exist on this inode,
+ * and it can't have any file capabilities attached to it either.
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 51c100c861770f..a1650fc81382f9 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -1542,6 +1542,7 @@ xlog_alloc_log(
+ log->l_covered_state = XLOG_STATE_COVER_IDLE;
+ set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
+ INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
++ INIT_LIST_HEAD(&log->r_dfops);
+
+ log->l_prev_block = -1;
+ /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
+@@ -1893,9 +1894,7 @@ xlog_write_iclog(
+ * the buffer manually, the code needs to be kept in sync
+ * with the I/O completion path.
+ */
+- xlog_state_done_syncing(iclog);
+- up(&iclog->ic_sema);
+- return;
++ goto sync;
+ }
+
+ /*
+@@ -1925,20 +1924,17 @@ xlog_write_iclog(
+ * avoid shutdown re-entering this path and erroring out again.
+ */
+ if (log->l_targ != log->l_mp->m_ddev_targp &&
+- blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) {
+- xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
+- return;
+- }
++ blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev))
++ goto shutdown;
+ }
+ if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
+ iclog->ic_bio.bi_opf |= REQ_FUA;
+
+ iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
+
+- if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
+- xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
+- return;
+- }
++ if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count))
++ goto shutdown;
++
+ if (is_vmalloc_addr(iclog->ic_data))
+ flush_kernel_vmap_range(iclog->ic_data, count);
+
+@@ -1959,6 +1955,12 @@ xlog_write_iclog(
+ }
+
+ submit_bio(&iclog->ic_bio);
++ return;
++shutdown:
++ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
++sync:
++ xlog_state_done_syncing(iclog);
++ up(&iclog->ic_sema);
+ }
+
+ /*
+diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
+index fa3ad1d7b31c85..e30c06ec20e33b 100644
+--- a/fs/xfs/xfs_log_priv.h
++++ b/fs/xfs/xfs_log_priv.h
+@@ -407,6 +407,7 @@ struct xlog {
+ long l_opstate; /* operational state */
+ uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
+ struct list_head *l_buf_cancel_table;
++ struct list_head r_dfops; /* recovered log intent items */
+ int l_iclog_hsize; /* size of iclog header */
+ int l_iclog_heads; /* # of iclog header sectors */
+ uint l_sectBBsize; /* sector size in BBs (2^n) */
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 13b94d2e605bd9..9f9d3abad2cf35 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -1723,30 +1723,24 @@ xlog_clear_stale_blocks(
+ */
+ void
+ xlog_recover_release_intent(
+- struct xlog *log,
+- unsigned short intent_type,
+- uint64_t intent_id)
++ struct xlog *log,
++ unsigned short intent_type,
++ uint64_t intent_id)
+ {
+- struct xfs_ail_cursor cur;
+- struct xfs_log_item *lip;
+- struct xfs_ail *ailp = log->l_ailp;
++ struct xfs_defer_pending *dfp, *n;
++
++ list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
++ struct xfs_log_item *lip = dfp->dfp_intent;
+
+- spin_lock(&ailp->ail_lock);
+- for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
+- lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
+ if (lip->li_type != intent_type)
+ continue;
+ if (!lip->li_ops->iop_match(lip, intent_id))
+ continue;
+
+- spin_unlock(&ailp->ail_lock);
+- lip->li_ops->iop_release(lip);
+- spin_lock(&ailp->ail_lock);
+- break;
+- }
++ ASSERT(xlog_item_is_intent(lip));
+
+- xfs_trans_ail_cursor_done(&cur);
+- spin_unlock(&ailp->ail_lock);
++ xfs_defer_cancel_recovery(log->l_mp, dfp);
++ }
+ }
+
+ int
+@@ -1939,6 +1933,29 @@ xlog_buf_readahead(
+ xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
+ }
+
++/*
++ * Create a deferred work structure for resuming and tracking the progress of a
++ * log intent item that was found during recovery.
++ */
++void
++xlog_recover_intent_item(
++ struct xlog *log,
++ struct xfs_log_item *lip,
++ xfs_lsn_t lsn,
++ unsigned int dfp_type)
++{
++ ASSERT(xlog_item_is_intent(lip));
++
++ xfs_defer_start_recovery(lip, dfp_type, &log->r_dfops);
++
++ /*
++ * Insert the intent into the AIL directly and drop one reference so
++ * that finishing or canceling the work will drop the other.
++ */
++ xfs_trans_ail_insert(log->l_ailp, lip, lsn);
++ lip->li_ops->iop_unpin(lip, 0);
++}
++
+ STATIC int
+ xlog_recover_items_pass2(
+ struct xlog *log,
+@@ -2511,7 +2528,7 @@ xlog_abort_defer_ops(
+
+ list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
+ list_del_init(&dfc->dfc_list);
+- xfs_defer_ops_capture_free(mp, dfc);
++ xfs_defer_ops_capture_abort(mp, dfc);
+ }
+ }
+
+@@ -2533,29 +2550,22 @@ xlog_abort_defer_ops(
+ */
+ STATIC int
+ xlog_recover_process_intents(
+- struct xlog *log)
++ struct xlog *log)
+ {
+ LIST_HEAD(capture_list);
+- struct xfs_ail_cursor cur;
+- struct xfs_log_item *lip;
+- struct xfs_ail *ailp;
+- int error = 0;
++ struct xfs_defer_pending *dfp, *n;
++ int error = 0;
+ #if defined(DEBUG) || defined(XFS_WARN)
+- xfs_lsn_t last_lsn;
+-#endif
++ xfs_lsn_t last_lsn;
+
+- ailp = log->l_ailp;
+- spin_lock(&ailp->ail_lock);
+-#if defined(DEBUG) || defined(XFS_WARN)
+ last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
+ #endif
+- for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+- lip != NULL;
+- lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
+- const struct xfs_item_ops *ops;
+
+- if (!xlog_item_is_intent(lip))
+- break;
++ list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
++ struct xfs_log_item *lip = dfp->dfp_intent;
++ const struct xfs_item_ops *ops = lip->li_ops;
++
++ ASSERT(xlog_item_is_intent(lip));
+
+ /*
+ * We should never see a redo item with a LSN higher than
+@@ -2573,19 +2583,15 @@ xlog_recover_process_intents(
+ * The recovery function can free the log item, so we must not
+ * access lip after it returns.
+ */
+- spin_unlock(&ailp->ail_lock);
+- ops = lip->li_ops;
+- error = ops->iop_recover(lip, &capture_list);
+- spin_lock(&ailp->ail_lock);
++ error = ops->iop_recover(dfp, &capture_list);
+ if (error) {
+ trace_xlog_intent_recovery_failed(log->l_mp, error,
+ ops->iop_recover);
+ break;
+ }
+- }
+
+- xfs_trans_ail_cursor_done(&cur);
+- spin_unlock(&ailp->ail_lock);
++ xfs_defer_cancel_recovery(log->l_mp, dfp);
++ }
+ if (error)
+ goto err;
+
+@@ -2606,27 +2612,27 @@ xlog_recover_process_intents(
+ */
+ STATIC void
+ xlog_recover_cancel_intents(
+- struct xlog *log)
++ struct xlog *log)
+ {
+- struct xfs_log_item *lip;
+- struct xfs_ail_cursor cur;
+- struct xfs_ail *ailp;
+-
+- ailp = log->l_ailp;
+- spin_lock(&ailp->ail_lock);
+- lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+- while (lip != NULL) {
+- if (!xlog_item_is_intent(lip))
+- break;
++ struct xfs_defer_pending *dfp, *n;
+
+- spin_unlock(&ailp->ail_lock);
+- lip->li_ops->iop_release(lip);
+- spin_lock(&ailp->ail_lock);
+- lip = xfs_trans_ail_cursor_next(ailp, &cur);
++ list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
++ ASSERT(xlog_item_is_intent(dfp->dfp_intent));
++
++ xfs_defer_cancel_recovery(log->l_mp, dfp);
+ }
++}
+
+- xfs_trans_ail_cursor_done(&cur);
+- spin_unlock(&ailp->ail_lock);
++/*
++ * Transfer ownership of the recovered log intent item to the recovery
++ * transaction.
++ */
++void
++xlog_recover_transfer_intent(
++ struct xfs_trans *tp,
++ struct xfs_defer_pending *dfp)
++{
++ dfp->dfp_intent = NULL;
+ }
+
+ /*
+@@ -2959,7 +2965,7 @@ xlog_do_recovery_pass(
+ int error = 0, h_size, h_len;
+ int error2 = 0;
+ int bblks, split_bblks;
+- int hblks, split_hblks, wrapped_hblks;
++ int hblks = 1, split_hblks, wrapped_hblks;
+ int i;
+ struct hlist_head rhash[XLOG_RHASH_SIZE];
+ LIST_HEAD (buffer_list);
+@@ -3015,14 +3021,22 @@ xlog_do_recovery_pass(
+ if (error)
+ goto bread_err1;
+
+- hblks = xlog_logrec_hblks(log, rhead);
+- if (hblks != 1) {
+- kmem_free(hbp);
+- hbp = xlog_alloc_buffer(log, hblks);
++ /*
++ * This open codes xlog_logrec_hblks so that we can reuse the
++ * fixed up h_size value calculated above. Without that we'd
++ * still allocate the buffer based on the incorrect on-disk
++ * size.
++ */
++ if (h_size > XLOG_HEADER_CYCLE_SIZE &&
++ (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) {
++ hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
++ if (hblks > 1) {
++ kmem_free(hbp);
++ hbp = xlog_alloc_buffer(log, hblks);
++ }
+ }
+ } else {
+ ASSERT(log->l_sectBBsize == 1);
+- hblks = 1;
+ hbp = xlog_alloc_buffer(log, 1);
+ h_size = XLOG_BIG_RECORD_BSIZE;
+ }
+@@ -3197,11 +3211,28 @@ xlog_do_recovery_pass(
+ kmem_free(hbp);
+
+ /*
+- * Submit buffers that have been added from the last record processed,
+- * regardless of error status.
++ * Submit buffers that have been dirtied by the last record recovered.
+ */
+- if (!list_empty(&buffer_list))
++ if (!list_empty(&buffer_list)) {
++ if (error) {
++ /*
++ * If there has been an item recovery error then we
++ * cannot allow partial checkpoint writeback to
++ * occur. We might have multiple checkpoints with the
++ * same start LSN in this buffer list, and partial
++ * writeback of a checkpoint in this situation can
++ * prevent future recovery of all the changes in the
++ * checkpoints at this start LSN.
++ *
++ * Note: Shutting down the filesystem will result in the
++ * delwri submission marking all the buffers stale,
++ * completing them and cleaning up _XBF_LOGRECOVERY
++ * state without doing any IO.
++ */
++ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
++ }
+ error2 = xfs_buf_delwri_submit(&buffer_list);
++ }
+
+ if (error && first_bad)
+ *first_bad = rhead_blk;
+diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
+index 2d4444d61e9876..f1b2592238022e 100644
+--- a/fs/xfs/xfs_refcount_item.c
++++ b/fs/xfs/xfs_refcount_item.c
+@@ -474,10 +474,11 @@ xfs_cui_validate_phys(
+ */
+ STATIC int
+ xfs_cui_item_recover(
+- struct xfs_log_item *lip,
++ struct xfs_defer_pending *dfp,
+ struct list_head *capture_list)
+ {
+ struct xfs_trans_res resv;
++ struct xfs_log_item *lip = dfp->dfp_intent;
+ struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
+ struct xfs_cud_log_item *cudp;
+ struct xfs_trans *tp;
+@@ -522,6 +523,7 @@ xfs_cui_item_recover(
+ return error;
+
+ cudp = xfs_trans_get_cud(tp, cuip);
++ xlog_recover_transfer_intent(tp, dfp);
+
+ for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
+ struct xfs_refcount_intent fake = { };
+@@ -696,12 +698,9 @@ xlog_recover_cui_commit_pass2(
+ cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
+ xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
+ atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
+- /*
+- * Insert the intent into the AIL directly and drop one reference so
+- * that finishing or canceling the work will drop the other.
+- */
+- xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
+- xfs_cui_release(cuip);
++
++ xlog_recover_intent_item(log, &cuip->cui_item, lsn,
++ XFS_DEFER_OPS_TYPE_REFCOUNT);
+ return 0;
+ }
+
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index eb9102453affbf..3431d0d8b6f3a1 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -429,13 +429,6 @@ xfs_reflink_fill_cow_hole(
+ if (error)
+ return error;
+
+- /*
+- * Allocation succeeded but the requested range was not even partially
+- * satisfied? Bail out!
+- */
+- if (nimaps == 0)
+- return -ENOSPC;
+-
+ convert:
+ return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
+
+@@ -498,13 +491,6 @@ xfs_reflink_fill_delalloc(
+ error = xfs_trans_commit(tp);
+ if (error)
+ return error;
+-
+- /*
+- * Allocation succeeded but the requested range was not even
+- * partially satisfied? Bail out!
+- */
+- if (nimaps == 0)
+- return -ENOSPC;
+ } while (cmap->br_startoff + cmap->br_blockcount <= imap->br_startoff);
+
+ return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
+@@ -730,12 +716,6 @@ xfs_reflink_end_cow_extent(
+ int nmaps;
+ int error;
+
+- /* No COW extents? That's easy! */
+- if (ifp->if_bytes == 0) {
+- *offset_fsb = end_fsb;
+- return 0;
+- }
+-
+ resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
+ XFS_TRANS_RESERVE, &tp);
+@@ -784,6 +764,7 @@ xfs_reflink_end_cow_extent(
+ }
+ }
+ del = got;
++ xfs_trim_extent(&del, *offset_fsb, end_fsb - *offset_fsb);
+
+ /* Grab the corresponding mapping in the data fork. */
+ nmaps = 1;
+@@ -1540,6 +1521,10 @@ xfs_reflink_remap_prep(
+ if (ret)
+ goto out_unlock;
+
++ xfs_iflags_set(src, XFS_IREMAPPING);
++ if (inode_in != inode_out)
++ xfs_ilock_demote(src, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
++
+ return 0;
+ out_unlock:
+ xfs_iunlock2_io_mmap(src, dest);
+diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
+index 0e0e747028da2e..5e8a02d2b045de 100644
+--- a/fs/xfs/xfs_rmap_item.c
++++ b/fs/xfs/xfs_rmap_item.c
+@@ -504,10 +504,11 @@ xfs_rui_validate_map(
+ */
+ STATIC int
+ xfs_rui_item_recover(
+- struct xfs_log_item *lip,
++ struct xfs_defer_pending *dfp,
+ struct list_head *capture_list)
+ {
+ struct xfs_trans_res resv;
++ struct xfs_log_item *lip = dfp->dfp_intent;
+ struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
+ struct xfs_rud_log_item *rudp;
+ struct xfs_trans *tp;
+@@ -536,7 +537,9 @@ xfs_rui_item_recover(
+ XFS_TRANS_RESERVE, &tp);
+ if (error)
+ return error;
++
+ rudp = xfs_trans_get_rud(tp, ruip);
++ xlog_recover_transfer_intent(tp, dfp);
+
+ for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
+ struct xfs_rmap_intent fake = { };
+@@ -702,12 +705,9 @@ xlog_recover_rui_commit_pass2(
+ ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
+ xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
+ atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
+- /*
+- * Insert the intent into the AIL directly and drop one reference so
+- * that finishing or canceling the work will drop the other.
+- */
+- xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
+- xfs_rui_release(ruip);
++
++ xlog_recover_intent_item(log, &ruip->rui_item, lsn,
++ XFS_DEFER_OPS_TYPE_RMAP);
+ return 0;
+ }
+
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 16534e9873f694..608db1ab88a485 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -19,6 +19,7 @@
+ #include "xfs_icache.h"
+ #include "xfs_rtalloc.h"
+ #include "xfs_sb.h"
++#include "xfs_rtbitmap.h"
+
+ /*
+ * Read and return the summary information for a given extent size,
+@@ -211,6 +212,23 @@ xfs_rtallocate_range(
+ return error;
+ }
+
++/*
++ * Make sure we don't run off the end of the rt volume. Be careful that
++ * adjusting maxlen downwards doesn't cause us to fail the alignment checks.
++ */
++static inline xfs_extlen_t
++xfs_rtallocate_clamp_len(
++ struct xfs_mount *mp,
++ xfs_rtblock_t startrtx,
++ xfs_extlen_t rtxlen,
++ xfs_extlen_t prod)
++{
++ xfs_extlen_t ret;
++
++ ret = min(mp->m_sb.sb_rextents, startrtx + rtxlen) - startrtx;
++ return rounddown(ret, prod);
++}
++
+ /*
+ * Attempt to allocate an extent minlen<=len<=maxlen starting from
+ * bitmap block bbno. If we don't get maxlen then use prod to trim
+@@ -248,7 +266,7 @@ xfs_rtallocate_extent_block(
+ i <= end;
+ i++) {
+ /* Make sure we don't scan off the end of the rt volume. */
+- maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
++ maxlen = xfs_rtallocate_clamp_len(mp, i, maxlen, prod);
+
+ /*
+ * See if there's a free extent of maxlen starting at i.
+@@ -300,7 +318,7 @@ xfs_rtallocate_extent_block(
+ /*
+ * Searched the whole thing & didn't find a maxlen free extent.
+ */
+- if (minlen < maxlen && besti != -1) {
++ if (minlen <= maxlen && besti != -1) {
+ xfs_extlen_t p; /* amount to trim length by */
+
+ /*
+@@ -355,7 +373,8 @@ xfs_rtallocate_extent_exact(
+ int isfree; /* extent is free */
+ xfs_rtblock_t next; /* next block to try (dummy) */
+
+- ASSERT(minlen % prod == 0 && maxlen % prod == 0);
++ ASSERT(minlen % prod == 0);
++ ASSERT(maxlen % prod == 0);
+ /*
+ * Check if the range in question (for maxlen) is free.
+ */
+@@ -438,7 +457,9 @@ xfs_rtallocate_extent_near(
+ xfs_rtblock_t n; /* next block to try */
+ xfs_rtblock_t r; /* result block */
+
+- ASSERT(minlen % prod == 0 && maxlen % prod == 0);
++ ASSERT(minlen % prod == 0);
++ ASSERT(maxlen % prod == 0);
++
+ /*
+ * If the block number given is off the end, silently set it to
+ * the last block.
+@@ -447,7 +468,7 @@ xfs_rtallocate_extent_near(
+ bno = mp->m_sb.sb_rextents - 1;
+
+ /* Make sure we don't run off the end of the rt volume. */
+- maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
++ maxlen = xfs_rtallocate_clamp_len(mp, bno, maxlen, prod);
+ if (maxlen < minlen) {
+ *rtblock = NULLRTBLOCK;
+ return 0;
+@@ -638,7 +659,8 @@ xfs_rtallocate_extent_size(
+ xfs_rtblock_t r; /* result block number */
+ xfs_suminfo_t sum; /* summary information for extents */
+
+- ASSERT(minlen % prod == 0 && maxlen % prod == 0);
++ ASSERT(minlen % prod == 0);
++ ASSERT(maxlen % prod == 0);
+ ASSERT(maxlen != 0);
+
+ /*
+@@ -818,8 +840,6 @@ xfs_growfs_rt_alloc(
+ nmap = 1;
+ error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
+ XFS_BMAPI_METADATA, 0, &map, &nmap);
+- if (!error && nmap < 1)
+- error = -ENOSPC;
+ if (error)
+ goto out_trans_cancel;
+ /*
+@@ -954,7 +974,7 @@ xfs_growfs_rt(
+ return -EINVAL;
+
+ /* Unsupported realtime features. */
+- if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp))
++ if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp) || xfs_has_quota(mp))
+ return -EOPNOTSUPP;
+
+ nrblocks = in->newblocks;
+@@ -976,8 +996,10 @@ xfs_growfs_rt(
+ */
+ nrextents = nrblocks;
+ do_div(nrextents, in->extsize);
++ if (!xfs_validate_rtextents(nrextents))
++ return -EINVAL;
+ nrbmblocks = howmany_64(nrextents, NBBY * sbp->sb_blocksize);
+- nrextslog = xfs_highbit32(nrextents);
++ nrextslog = xfs_compute_rextslog(nrextents);
+ nrsumlevels = nrextslog + 1;
+ nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks;
+ nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize);
+@@ -1039,13 +1061,16 @@ xfs_growfs_rt(
+ nsbp->sb_rextents = nsbp->sb_rblocks;
+ do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
+ ASSERT(nsbp->sb_rextents != 0);
+- nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
++ nsbp->sb_rextslog = xfs_compute_rextslog(nsbp->sb_rextents);
+ nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
+ nrsumsize =
+ (uint)sizeof(xfs_suminfo_t) * nrsumlevels *
+ nsbp->sb_rbmblocks;
+ nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize);
+ nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
++ /* recompute growfsrt reservation from new rsumsize */
++ xfs_trans_resv_calc(nmp, &nmp->m_resv);
++
+ /*
+ * Start a transaction, get the log reservation.
+ */
+@@ -1129,6 +1154,8 @@ xfs_growfs_rt(
+ */
+ mp->m_rsumlevels = nrsumlevels;
+ mp->m_rsumsize = nrsumsize;
++ /* recompute growfsrt reservation from new rsumsize */
++ xfs_trans_resv_calc(mp, &mp->m_resv);
+
+ error = xfs_trans_commit(tp);
+ if (error)
+diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
+index 62c7ad79cbb618..11859c259a1c43 100644
+--- a/fs/xfs/xfs_rtalloc.h
++++ b/fs/xfs/xfs_rtalloc.h
+@@ -11,22 +11,6 @@
+ struct xfs_mount;
+ struct xfs_trans;
+
+-/*
+- * XXX: Most of the realtime allocation functions deal in units of realtime
+- * extents, not realtime blocks. This looks funny when paired with the type
+- * name and screams for a larger cleanup.
+- */
+-struct xfs_rtalloc_rec {
+- xfs_rtblock_t ar_startext;
+- xfs_rtblock_t ar_extcount;
+-};
+-
+-typedef int (*xfs_rtalloc_query_range_fn)(
+- struct xfs_mount *mp,
+- struct xfs_trans *tp,
+- const struct xfs_rtalloc_rec *rec,
+- void *priv);
+-
+ #ifdef CONFIG_XFS_RT
+ /*
+ * Function prototypes for exported functions.
+@@ -48,15 +32,6 @@ xfs_rtallocate_extent(
+ xfs_extlen_t prod, /* extent product factor */
+ xfs_rtblock_t *rtblock); /* out: start block allocated */
+
+-/*
+- * Free an extent in the realtime subvolume. Length is expressed in
+- * realtime extents, as is the block number.
+- */
+-int /* error */
+-xfs_rtfree_extent(
+- struct xfs_trans *tp, /* transaction pointer */
+- xfs_rtblock_t bno, /* starting block number to free */
+- xfs_extlen_t len); /* length of extent freed */
+
+ /*
+ * Initialize realtime fields in the mount structure.
+@@ -98,55 +73,12 @@ xfs_growfs_rt(
+ struct xfs_mount *mp, /* file system mount structure */
+ xfs_growfs_rt_t *in); /* user supplied growfs struct */
+
+-/*
+- * From xfs_rtbitmap.c
+- */
+-int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
+- xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
+-int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
+- xfs_rtblock_t start, xfs_extlen_t len, int val,
+- xfs_rtblock_t *new, int *stat);
+-int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
+- xfs_rtblock_t start, xfs_rtblock_t limit,
+- xfs_rtblock_t *rtblock);
+-int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
+- xfs_rtblock_t start, xfs_rtblock_t limit,
+- xfs_rtblock_t *rtblock);
+-int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
+- xfs_rtblock_t start, xfs_extlen_t len, int val);
+-int xfs_rtmodify_summary_int(struct xfs_mount *mp, struct xfs_trans *tp,
+- int log, xfs_rtblock_t bbno, int delta,
+- struct xfs_buf **rbpp, xfs_fsblock_t *rsb,
+- xfs_suminfo_t *sum);
+-int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
+- xfs_rtblock_t bbno, int delta, struct xfs_buf **rbpp,
+- xfs_fsblock_t *rsb);
+-int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
+- xfs_rtblock_t start, xfs_extlen_t len,
+- struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
+-int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
+- const struct xfs_rtalloc_rec *low_rec,
+- const struct xfs_rtalloc_rec *high_rec,
+- xfs_rtalloc_query_range_fn fn, void *priv);
+-int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
+- xfs_rtalloc_query_range_fn fn,
+- void *priv);
+-bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
+-int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
+- xfs_rtblock_t start, xfs_extlen_t len,
+- bool *is_free);
+ int xfs_rtalloc_reinit_frextents(struct xfs_mount *mp);
+ #else
+-# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS)
+-# define xfs_rtfree_extent(t,b,l) (ENOSYS)
+-# define xfs_rtpick_extent(m,t,l,rb) (ENOSYS)
+-# define xfs_growfs_rt(mp,in) (ENOSYS)
+-# define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS)
+-# define xfs_rtalloc_query_all(m,t,f,p) (ENOSYS)
+-# define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS)
+-# define xfs_verify_rtbno(m, r) (false)
+-# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (ENOSYS)
+-# define xfs_rtalloc_reinit_frextents(m) (0)
++# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (-ENOSYS)
++# define xfs_rtpick_extent(m,t,l,rb) (-ENOSYS)
++# define xfs_growfs_rt(mp,in) (-ENOSYS)
++# define xfs_rtalloc_reinit_frextents(m) (0)
+ static inline int /* error */
+ xfs_rtmount_init(
+ xfs_mount_t *mp) /* file system mount structure */
+@@ -157,7 +89,7 @@ xfs_rtmount_init(
+ xfs_warn(mp, "Not built with CONFIG_XFS_RT");
+ return -ENOSYS;
+ }
+-# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
++# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (-ENOSYS))
+ # define xfs_rtunmount_inodes(m)
+ #endif /* CONFIG_XFS_RT */
+
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 819a3568b28f46..13007b6bc9f337 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1503,6 +1503,18 @@ xfs_fs_fill_super(
+
+ mp->m_super = sb;
+
++ /*
++ * Copy VFS mount flags from the context now that all parameter parsing
++ * is guaranteed to have been completed by either the old mount API or
++ * the newer fsopen/fsconfig API.
++ */
++ if (fc->sb_flags & SB_RDONLY)
++ set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
++ if (fc->sb_flags & SB_DIRSYNC)
++ mp->m_features |= XFS_FEAT_DIRSYNC;
++ if (fc->sb_flags & SB_SYNCHRONOUS)
++ mp->m_features |= XFS_FEAT_WSYNC;
++
+ error = xfs_fs_validate_params(mp);
+ if (error)
+ return error;
+@@ -1972,6 +1984,11 @@ static const struct fs_context_operations xfs_context_ops = {
+ .free = xfs_fs_free,
+ };
+
++/*
++ * WARNING: do not initialise any parameters in this function that depend on
++ * mount option parsing having already been performed as this can be called from
++ * fsopen() before any parameters have been set.
++ */
+ static int xfs_init_fs_context(
+ struct fs_context *fc)
+ {
+@@ -2003,16 +2020,6 @@ static int xfs_init_fs_context(
+ mp->m_logbsize = -1;
+ mp->m_allocsize_log = 16; /* 64k */
+
+- /*
+- * Copy binary VFS mount flags we are interested in.
+- */
+- if (fc->sb_flags & SB_RDONLY)
+- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+- if (fc->sb_flags & SB_DIRSYNC)
+- mp->m_features |= XFS_FEAT_DIRSYNC;
+- if (fc->sb_flags & SB_SYNCHRONOUS)
+- mp->m_features |= XFS_FEAT_WSYNC;
+-
+ fc->s_fs_info = mp;
+ fc->ops = &xfs_context_ops;
+
+diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
+index 6e3646d524ceb6..ead65f5f8dc329 100644
+--- a/fs/xfs/xfs_trans.h
++++ b/fs/xfs/xfs_trans.h
+@@ -66,6 +66,8 @@ struct xfs_log_item {
+ { (1u << XFS_LI_DIRTY), "DIRTY" }, \
+ { (1u << XFS_LI_WHITEOUT), "WHITEOUT" }
+
++struct xfs_defer_pending;
++
+ struct xfs_item_ops {
+ unsigned flags;
+ void (*iop_size)(struct xfs_log_item *, int *, int *);
+@@ -78,7 +80,7 @@ struct xfs_item_ops {
+ xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
+ uint (*iop_push)(struct xfs_log_item *, struct list_head *);
+ void (*iop_release)(struct xfs_log_item *);
+- int (*iop_recover)(struct xfs_log_item *lip,
++ int (*iop_recover)(struct xfs_defer_pending *dfp,
+ struct list_head *capture_list);
+ bool (*iop_match)(struct xfs_log_item *item, uint64_t id);
+ struct xfs_log_item *(*iop_relog)(struct xfs_log_item *intent,
+@@ -275,19 +277,14 @@ static inline void
+ xfs_trans_set_context(
+ struct xfs_trans *tp)
+ {
+- ASSERT(current->journal_info == NULL);
+ tp->t_pflags = memalloc_nofs_save();
+- current->journal_info = tp;
+ }
+
+ static inline void
+ xfs_trans_clear_context(
+ struct xfs_trans *tp)
+ {
+- if (current->journal_info == tp) {
+- memalloc_nofs_restore(tp->t_pflags);
+- current->journal_info = NULL;
+- }
++ memalloc_nofs_restore(tp->t_pflags);
+ }
+
+ static inline void
+@@ -295,10 +292,8 @@ xfs_trans_switch_context(
+ struct xfs_trans *old_tp,
+ struct xfs_trans *new_tp)
+ {
+- ASSERT(current->journal_info == old_tp);
+ new_tp->t_pflags = old_tp->t_pflags;
+ old_tp->t_pflags = 0;
+- current->journal_info = new_tp;
+ }
+
+ #endif /* __XFS_TRANS_H__ */
+diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
+index b2c9b35df8f76d..897b12ec61e29c 100644
+--- a/fs/zonefs/file.c
++++ b/fs/zonefs/file.c
+@@ -348,7 +348,12 @@ static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ if (error) {
+- zonefs_io_error(inode, true);
++ /*
++ * For Sync IOs, error recovery is called from
++ * zonefs_file_dio_write().
++ */
++ if (!is_sync_kiocb(iocb))
++ zonefs_io_error(inode, true);
+ return error;
+ }
+
+@@ -491,6 +496,14 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
++ /*
++ * Advance the zone write pointer offset. This assumes that the
++ * IO will succeed, which is OK to do because we do not allow
++ * partial writes (IOMAP_DIO_PARTIAL is not set) and if the IO
++ * fails, the error path will correct the write pointer offset.
++ */
++ z->z_wpoffset += count;
++ zonefs_inode_account_active(inode);
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+@@ -504,20 +517,19 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+ if (ret == -ENOTBLK)
+ ret = -EBUSY;
+
+- if (zonefs_zone_is_seq(z) &&
+- (ret > 0 || ret == -EIOCBQUEUED)) {
+- if (ret > 0)
+- count = ret;
+-
+- /*
+- * Update the zone write pointer offset assuming the write
+- * operation succeeded. If it did not, the error recovery path
+- * will correct it. Also do active seq file accounting.
+- */
+- mutex_lock(&zi->i_truncate_mutex);
+- z->z_wpoffset += count;
+- zonefs_inode_account_active(inode);
+- mutex_unlock(&zi->i_truncate_mutex);
++ /*
++ * For a failed IO or partial completion, trigger error recovery
++ * to update the zone write pointer offset to a correct value.
++ * For asynchronous IOs, zonefs_file_write_dio_end_io() may already
++ * have executed error recovery if the IO already completed when we
++ * reach here. However, we cannot know that and execute error recovery
++ * again (that will not change anything).
++ */
++ if (zonefs_zone_is_seq(z)) {
++ if (ret > 0 && ret != count)
++ ret = -EIO;
++ if (ret < 0 && ret != -EIOCBQUEUED)
++ zonefs_io_error(inode, true);
+ }
+
+ inode_unlock:
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index 9d1a9808fbbba6..cc364669d723de 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -246,16 +246,18 @@ static void zonefs_inode_update_mode(struct inode *inode)
+ z->z_mode = inode->i_mode;
+ }
+
+-struct zonefs_ioerr_data {
+- struct inode *inode;
+- bool write;
+-};
+-
+ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+ {
+- struct zonefs_ioerr_data *err = data;
+- struct inode *inode = err->inode;
++ struct blk_zone *z = data;
++
++ *z = *zone;
++ return 0;
++}
++
++static void zonefs_handle_io_error(struct inode *inode, struct blk_zone *zone,
++ bool write)
++{
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+@@ -270,8 +272,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ data_size = zonefs_check_zone_condition(sb, z, zone);
+ isize = i_size_read(inode);
+ if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
+- !err->write && isize == data_size)
+- return 0;
++ !write && isize == data_size)
++ return;
+
+ /*
+ * At this point, we detected either a bad zone or an inconsistency
+@@ -292,7 +294,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ * In all cases, warn about inode size inconsistency and handle the
+ * IO error according to the zone condition and to the mount options.
+ */
+- if (zonefs_zone_is_seq(z) && isize != data_size)
++ if (isize != data_size)
+ zonefs_warn(sb,
+ "inode %lu: invalid size %lld (should be %lld)\n",
+ inode->i_ino, isize, data_size);
+@@ -352,8 +354,6 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ zonefs_i_size_write(inode, data_size);
+ z->z_wpoffset = data_size;
+ zonefs_inode_account_active(inode);
+-
+- return 0;
+ }
+
+ /*
+@@ -367,23 +367,25 @@ void __zonefs_io_error(struct inode *inode, bool write)
+ {
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+- struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ unsigned int noio_flag;
+- unsigned int nr_zones = 1;
+- struct zonefs_ioerr_data err = {
+- .inode = inode,
+- .write = write,
+- };
++ struct blk_zone zone;
+ int ret;
+
+ /*
+- * The only files that have more than one zone are conventional zone
+- * files with aggregated conventional zones, for which the inode zone
+- * size is always larger than the device zone size.
++ * Conventional zone have no write pointer and cannot become read-only
++ * or offline. So simply fake a report for a single or aggregated zone
++ * and let zonefs_handle_io_error() correct the zone inode information
++ * according to the mount options.
+ */
+- if (z->z_size > bdev_zone_sectors(sb->s_bdev))
+- nr_zones = z->z_size >>
+- (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
++ if (!zonefs_zone_is_seq(z)) {
++ zone.start = z->z_sector;
++ zone.len = z->z_size >> SECTOR_SHIFT;
++ zone.wp = zone.start + zone.len;
++ zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
++ zone.cond = BLK_ZONE_COND_NOT_WP;
++ zone.capacity = zone.len;
++ goto handle_io_error;
++ }
+
+ /*
+ * Memory allocations in blkdev_report_zones() can trigger a memory
+@@ -394,12 +396,20 @@ void __zonefs_io_error(struct inode *inode, bool write)
+ * the GFP_NOIO context avoids both problems.
+ */
+ noio_flag = memalloc_noio_save();
+- ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
+- zonefs_io_error_cb, &err);
+- if (ret != nr_zones)
++ ret = blkdev_report_zones(sb->s_bdev, z->z_sector, 1,
++ zonefs_io_error_cb, &zone);
++ memalloc_noio_restore(noio_flag);
++
++ if (ret != 1) {
+ zonefs_err(sb, "Get inode %lu zone information failed %d\n",
+ inode->i_ino, ret);
+- memalloc_noio_restore(noio_flag);
++ zonefs_warn(sb, "remounting filesystem read-only\n");
++ sb->s_flags |= SB_RDONLY;
++ return;
++ }
++
++handle_io_error:
++ zonefs_handle_io_error(inode, &zone, write);
+ }
+
+ static struct kmem_cache *zonefs_inode_cachep;
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index 254685085c825c..d9c20ae23b6326 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -539,6 +539,7 @@ int acpi_device_set_power(struct acpi_device *device, int state);
+ int acpi_bus_init_power(struct acpi_device *device);
+ int acpi_device_fix_up_power(struct acpi_device *device);
+ void acpi_device_fix_up_power_extended(struct acpi_device *adev);
++void acpi_device_fix_up_power_children(struct acpi_device *adev);
+ int acpi_bus_update_power(acpi_handle handle, int *state_p);
+ int acpi_device_update_power(struct acpi_device *device, int *state_p);
+ bool acpi_bus_power_manageable(acpi_handle handle);
+@@ -651,6 +652,7 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+ bool acpi_quirk_skip_acpi_ac_and_battery(void);
+ int acpi_install_cmos_rtc_space_handler(acpi_handle handle);
+ void acpi_remove_cmos_rtc_space_handler(acpi_handle handle);
++int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
+ #else
+ static inline bool acpi_device_override_status(struct acpi_device *adev,
+ unsigned long long *status)
+@@ -668,23 +670,22 @@ static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
+ static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
+ {
+ }
++static inline int
++acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
++{
++ *skip = false;
++ return 0;
++}
+ #endif
+
+ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+ bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
+-int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
+ bool acpi_quirk_skip_gpio_event_handlers(void);
+ #else
+ static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
+ {
+ return false;
+ }
+-static inline int
+-acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+-{
+- *skip = false;
+- return 0;
+-}
+ static inline bool acpi_quirk_skip_gpio_event_handlers(void)
+ {
+ return false;
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 3d90716f952296..a3c9dd4a1ac33c 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -660,6 +660,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ void *context))
+ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_execute_reg_methods(acpi_handle device,
++ u32 nax_depth,
+ acpi_adr_space_type
+ space_id))
+ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
+index 6126c977ece041..ec425d2834f869 100644
+--- a/include/acpi/cppc_acpi.h
++++ b/include/acpi/cppc_acpi.h
+@@ -64,6 +64,8 @@ struct cpc_desc {
+ int cpu_id;
+ int write_cmd_status;
+ int write_cmd_id;
++ /* Lock used for RMW operations in cpc_write() */
++ spinlock_t rmw_lock;
+ struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT];
+ struct acpi_psd_package domain_info;
+ struct kobject kobj;
+@@ -139,6 +141,7 @@ struct cppc_cpudata {
+ #ifdef CONFIG_ACPI_CPPC_LIB
+ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
+ extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
++extern int cppc_get_highest_perf(int cpunum, u64 *highest_perf);
+ extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
+ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
+ extern int cppc_set_enable(int cpu, bool enable);
+@@ -165,6 +168,10 @@ static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+ {
+ return -ENOTSUPP;
+ }
++static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
++{
++ return -ENOTSUPP;
++}
+ static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
+ {
+ return -ENOTSUPP;
+diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
+index 3c8bba9f1114a8..be1dd4c1a91744 100644
+--- a/include/acpi/ghes.h
++++ b/include/acpi/ghes.h
+@@ -73,8 +73,12 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
+ void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
+
+ struct list_head *ghes_get_devices(void);
++
++void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
+ #else
+ static inline struct list_head *ghes_get_devices(void) { return NULL; }
++
++static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
+ #endif
+
+ int ghes_estatus_pool_init(unsigned int num_ghes);
+diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
+index 961f4d88f9ef78..1985c22d90ca47 100644
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -296,5 +296,13 @@ do { \
+ #define io_stop_wc() do { } while (0)
+ #endif
+
++/*
++ * Architectures that guarantee an implicit smp_mb() in switch_mm()
++ * can override smp_mb__after_switch_mm.
++ */
++#ifndef smp_mb__after_switch_mm
++# define smp_mb__after_switch_mm() smp_mb()
++#endif
++
+ #endif /* !__ASSEMBLY__ */
+ #endif /* __ASM_GENERIC_BARRIER_H */
+diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
+index 84ec53ccc45029..7ee8a179d1036e 100644
+--- a/include/asm-generic/cacheflush.h
++++ b/include/asm-generic/cacheflush.h
+@@ -91,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+ }
+ #endif
+
++#ifndef flush_cache_vmap_early
++static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
++{
++}
++#endif
++
+ #ifndef flush_cache_vunmap
+ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+ {
+diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
+index 3df9f59a544e48..f27d66fdc00a28 100644
+--- a/include/asm-generic/cmpxchg-local.h
++++ b/include/asm-generic/cmpxchg-local.h
+@@ -34,7 +34,7 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
+ *(u16 *)ptr = (new & 0xffffu);
+ break;
+ case 4: prev = *(u32 *)ptr;
+- if (prev == (old & 0xffffffffffu))
++ if (prev == (old & 0xffffffffu))
+ *(u32 *)ptr = (new & 0xffffffffu);
+ break;
+ case 8: prev = *(u64 *)ptr;
+diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
+index 1a3ad6d2983308..c32e0cf23c9096 100644
+--- a/include/asm-generic/numa.h
++++ b/include/asm-generic/numa.h
+@@ -35,6 +35,7 @@ int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+ void __init numa_set_distance(int from, int to, int distance);
+ void __init numa_free_distance(void);
+ void __init early_map_cpu_to_node(unsigned int cpu, int nid);
++int __init early_cpu_to_node(int cpu);
+ void numa_store_cpu_info(unsigned int cpu);
+ void numa_add_cpu(unsigned int cpu);
+ void numa_remove_cpu(unsigned int cpu);
+@@ -46,6 +47,7 @@ static inline void numa_add_cpu(unsigned int cpu) { }
+ static inline void numa_remove_cpu(unsigned int cpu) { }
+ static inline void arch_numa_init(void) { }
+ static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
++static inline int early_cpu_to_node(int cpu) { return 0; }
+
+ #endif /* CONFIG_NUMA */
+
+diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
+index 995513fa26904a..0655aa5b57b290 100644
+--- a/include/asm-generic/qspinlock.h
++++ b/include/asm-generic/qspinlock.h
+@@ -70,7 +70,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+ */
+ static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+ {
+- return !atomic_read(&lock.val);
++ return !lock.val.counter;
+ }
+
+ /**
+diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
+index 699650f819706d..a84c64e5f11ece 100644
+--- a/include/asm-generic/unaligned.h
++++ b/include/asm-generic/unaligned.h
+@@ -104,9 +104,9 @@ static inline u32 get_unaligned_le24(const void *p)
+
+ static inline void __put_unaligned_be24(const u32 val, u8 *p)
+ {
+- *p++ = val >> 16;
+- *p++ = val >> 8;
+- *p++ = val;
++ *p++ = (val >> 16) & 0xff;
++ *p++ = (val >> 8) & 0xff;
++ *p++ = val & 0xff;
+ }
+
+ static inline void put_unaligned_be24(const u32 val, void *p)
+@@ -116,9 +116,9 @@ static inline void put_unaligned_be24(const u32 val, void *p)
+
+ static inline void __put_unaligned_le24(const u32 val, u8 *p)
+ {
+- *p++ = val;
+- *p++ = val >> 8;
+- *p++ = val >> 16;
++ *p++ = val & 0xff;
++ *p++ = (val >> 8) & 0xff;
++ *p++ = (val >> 16) & 0xff;
+ }
+
+ static inline void put_unaligned_le24(const u32 val, void *p)
+@@ -128,12 +128,12 @@ static inline void put_unaligned_le24(const u32 val, void *p)
+
+ static inline void __put_unaligned_be48(const u64 val, u8 *p)
+ {
+- *p++ = val >> 40;
+- *p++ = val >> 32;
+- *p++ = val >> 24;
+- *p++ = val >> 16;
+- *p++ = val >> 8;
+- *p++ = val;
++ *p++ = (val >> 40) & 0xff;
++ *p++ = (val >> 32) & 0xff;
++ *p++ = (val >> 24) & 0xff;
++ *p++ = (val >> 16) & 0xff;
++ *p++ = (val >> 8) & 0xff;
++ *p++ = val & 0xff;
+ }
+
+ static inline void put_unaligned_be48(const u64 val, void *p)
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 67d8dd2f1bdec1..63029bc7c9dd01 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -101,7 +101,7 @@
+ #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
+ #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
+ #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
+-#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
++#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral*
+ #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
+ #else
+ #define TEXT_MAIN .text
+@@ -356,7 +356,6 @@
+ *(.ref.data) \
+ *(.data..shared_aligned) /* percpu related */ \
+ MEM_KEEP(init.data*) \
+- MEM_KEEP(exit.data*) \
+ *(.data.unlikely) \
+ __start_once = .; \
+ *(.data.once) \
+@@ -521,7 +520,6 @@
+ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
+ *(.ref.rodata) \
+ MEM_KEEP(init.rodata) \
+- MEM_KEEP(exit.rodata) \
+ } \
+ \
+ /* Built-in module parameters. */ \
+@@ -574,7 +572,6 @@
+ *(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
+ MEM_KEEP(init.text*) \
+- MEM_KEEP(exit.text*) \
+
+
+ /* sched.text is aling to function alignment to secure we have same
+@@ -714,13 +711,10 @@
+ *(.exit.data .exit.data.*) \
+ *(.fini_array .fini_array.*) \
+ *(.dtors .dtors.*) \
+- MEM_DISCARD(exit.data*) \
+- MEM_DISCARD(exit.rodata*)
+
+ #define EXIT_TEXT \
+ *(.exit.text) \
+ *(.text.exit) \
+- MEM_DISCARD(exit.text)
+
+ #define EXIT_CALL \
+ *(.exitcall.exit)
+diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
+index c0f56fe6d22aed..d116f18de899c3 100644
+--- a/include/clocksource/timer-xilinx.h
++++ b/include/clocksource/timer-xilinx.h
+@@ -41,7 +41,7 @@ struct regmap;
+ struct xilinx_timer_priv {
+ struct regmap *map;
+ struct clk *clk;
+- u32 max;
++ u64 max;
+ };
+
+ /**
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index ef8ce86b1f7887..08b803a4fcde4c 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -136,6 +136,7 @@ struct af_alg_async_req {
+ * recvmsg is invoked.
+ * @init: True if metadata has been sent.
+ * @len: Length of memory allocated for this data structure.
++ * @inflight: Non-zero when AIO requests are in flight.
+ */
+ struct af_alg_ctx {
+ struct list_head tsgl_list;
+@@ -154,6 +155,8 @@ struct af_alg_ctx {
+ bool init;
+
+ unsigned int len;
++
++ unsigned int inflight;
+ };
+
+ int af_alg_register_type(const struct af_alg_type *type);
+diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
+index d2316242a98843..be97b97a75dd2d 100644
+--- a/include/crypto/internal/simd.h
++++ b/include/crypto/internal/simd.h
+@@ -14,11 +14,10 @@
+ struct simd_skcipher_alg;
+ struct skcipher_alg;
+
+-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
++struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
++ const char *algname,
+ const char *drvname,
+ const char *basename);
+-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+- const char *basename);
+ void simd_skcipher_free(struct simd_skcipher_alg *alg);
+
+ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
+@@ -32,13 +31,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
+ struct simd_aead_alg;
+ struct aead_alg;
+
+-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+- const char *drvname,
+- const char *basename);
+-struct simd_aead_alg *simd_aead_create(const char *algname,
+- const char *basename);
+-void simd_aead_free(struct simd_aead_alg *alg);
+-
+ int simd_register_aeads_compat(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs);
+
+diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
+index 05100e91ecb96c..6fc9bb2979e451 100644
+--- a/include/drm/bridge/samsung-dsim.h
++++ b/include/drm/bridge/samsung-dsim.h
+@@ -53,6 +53,7 @@ struct samsung_dsim_driver_data {
+ unsigned int plltmr_reg;
+ unsigned int has_freqband:1;
+ unsigned int has_clklane_stop:1;
++ unsigned int has_broken_fifoctrl_emptyhdr:1;
+ unsigned int num_clks;
+ unsigned int min_freq;
+ unsigned int max_freq;
+diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
+index 86f24a759268ae..65d76f9e84305d 100644
+--- a/include/drm/display/drm_dp_helper.h
++++ b/include/drm/display/drm_dp_helper.h
+@@ -449,9 +449,15 @@ struct drm_dp_aux {
+ * @is_remote: Is this AUX CH actually using sideband messaging.
+ */
+ bool is_remote;
++
++ /**
++ * @powered_down: If true then the remote endpoint is powered down.
++ */
++ bool powered_down;
+ };
+
+ int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
++void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
+ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index ed5c9660563c49..8eeb6730ac6ded 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -832,7 +832,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
+ int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count);
+
+-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
++int drm_dp_calc_pbn_mode(int clock, int bpp);
+
+ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
+
+diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
+index d4955062c77e39..f93e23985f4e4b 100644
+--- a/include/drm/drm_accel.h
++++ b/include/drm/drm_accel.h
+@@ -51,11 +51,10 @@
+
+ #if IS_ENABLED(CONFIG_DRM_ACCEL)
+
++extern struct xarray accel_minors_xa;
++
+ void accel_core_exit(void);
+ int accel_core_init(void);
+-void accel_minor_remove(int index);
+-int accel_minor_alloc(void);
+-void accel_minor_replace(struct drm_minor *minor, int index);
+ void accel_set_device_instance_params(struct device *kdev, int index);
+ int accel_open(struct inode *inode, struct file *filp);
+ void accel_debugfs_init(struct drm_minor *minor, int minor_id);
+@@ -72,19 +71,6 @@ static inline int __init accel_core_init(void)
+ return 0;
+ }
+
+-static inline void accel_minor_remove(int index)
+-{
+-}
+-
+-static inline int accel_minor_alloc(void)
+-{
+- return -EOPNOTSUPP;
+-}
+-
+-static inline void accel_minor_replace(struct drm_minor *minor, int index)
+-{
+-}
+-
+ static inline void accel_set_device_instance_params(struct device *kdev, int index)
+ {
+ }
+diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
+index 536a0b0091c3a9..006b5c977ad772 100644
+--- a/include/drm/drm_atomic_helper.h
++++ b/include/drm/drm_atomic_helper.h
+@@ -97,6 +97,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+
+ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
++void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
++ struct drm_atomic_state *state);
+
+ #define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
+ #define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)
+diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
+index c339fc85fd076d..b20cfe5bf30b3f 100644
+--- a/include/drm/drm_bridge.h
++++ b/include/drm/drm_bridge.h
+@@ -192,7 +192,7 @@ struct drm_bridge_funcs {
+ * or &drm_encoder_helper_funcs.dpms hook.
+ *
+ * The bridge must assume that the display pipe (i.e. clocks and timing
+- * singals) feeding it is no longer running when this callback is
++ * signals) feeding it is no longer running when this callback is
+ * called.
+ *
+ * The @post_disable callback is optional.
+@@ -555,6 +555,37 @@ struct drm_bridge_funcs {
+ int (*get_modes)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
++ /**
++ * @edid_read:
++ *
++ * Read the EDID data of the connected display.
++ *
++ * The @edid_read callback is the preferred way of reporting mode
++ * information for a display connected to the bridge output. Bridges
++ * that support reading EDID shall implement this callback and leave
++ * the @get_modes callback unimplemented.
++ *
++ * The caller of this operation shall first verify the output
++ * connection status and refrain from reading EDID from a disconnected
++ * output.
++ *
++ * This callback is optional. Bridges that implement it shall set the
++ * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
++ *
++ * The connector parameter shall be used for the sole purpose of EDID
++ * retrieval, and shall not be stored internally by bridge drivers for
++ * future usage.
++ *
++ * RETURNS:
++ *
++ * An edid structure newly allocated with drm_edid_alloc() or returned
++ * from drm_edid_read() family of functions on success, or NULL
++ * otherwise. The caller is responsible for freeing the returned edid
++ * structure with drm_edid_free().
++ */
++ const struct drm_edid *(*edid_read)(struct drm_bridge *bridge,
++ struct drm_connector *connector);
++
+ /**
+ * @get_edid:
+ *
+@@ -888,6 +919,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+ int drm_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector);
++const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
++ struct drm_connector *connector);
+ struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+ void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
+index 81c298488b0c87..6b5eec10c3db37 100644
+--- a/include/drm/drm_color_mgmt.h
++++ b/include/drm/drm_color_mgmt.h
+@@ -24,6 +24,7 @@
+ #define __DRM_COLOR_MGMT_H__
+
+ #include <linux/ctype.h>
++#include <linux/math64.h>
+ #include <drm/drm_property.h>
+
+ struct drm_crtc;
+diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
+index 566497eeb3b812..bc1f6b378195fd 100644
+--- a/include/drm/drm_displayid.h
++++ b/include/drm/drm_displayid.h
+@@ -30,7 +30,6 @@ struct drm_edid;
+ #define VESA_IEEE_OUI 0x3a0292
+
+ /* DisplayID Structure versions */
+-#define DISPLAY_ID_STRUCTURE_VER_12 0x12
+ #define DISPLAY_ID_STRUCTURE_VER_20 0x20
+
+ /* DisplayID Structure v1r2 Data Blocks */
+diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
+index 9813fa759b75d4..0a72b13781f139 100644
+--- a/include/drm/drm_drv.h
++++ b/include/drm/drm_drv.h
+@@ -110,6 +110,15 @@ enum drm_driver_feature {
+ * Driver supports user defined GPU VA bindings for GEM objects.
+ */
+ DRIVER_GEM_GPUVA = BIT(8),
++ /**
++ * @DRIVER_CURSOR_HOTSPOT:
++ *
++ * Driver supports and requires cursor hotspot information in the
++ * cursor plane (e.g. cursor plane has to actually track the mouse
++ * cursor and the clients are required to set hotspot in order for
++ * the cursor planes to work correctly).
++ */
++ DRIVER_CURSOR_HOTSPOT = BIT(9),
+
+ /* IMPORTANT: Below are all the legacy flags, add new ones above. */
+
+diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
+index 010239392adfb9..cc61f6a2b2ad6e 100644
+--- a/include/drm/drm_file.h
++++ b/include/drm/drm_file.h
+@@ -45,6 +45,8 @@ struct drm_printer;
+ struct device;
+ struct file;
+
++extern struct xarray drm_minors_xa;
++
+ /*
+ * FIXME: Not sure we want to have drm_minor here in the end, but to avoid
+ * header include loops we need it here for now.
+@@ -228,6 +230,18 @@ struct drm_file {
+ */
+ bool is_master;
+
++ /**
++ * @supports_virtualized_cursor_plane:
++ *
++ * This client is capable of handling the cursor plane with the
++ * restrictions imposed on it by the virtualized drivers.
++ *
++ * This implies that the cursor plane has to behave like a cursor
++ * i.e. track cursor movement. It also requires setting of the
++ * hotspot properties by the client on the cursor plane.
++ */
++ bool supports_virtualized_cursor_plane;
++
+ /**
+ * @master:
+ *
+@@ -256,8 +270,15 @@ struct drm_file {
+ /** @master_lookup_lock: Serializes @master. */
+ spinlock_t master_lookup_lock;
+
+- /** @pid: Process that opened this file. */
+- struct pid *pid;
++ /**
++ * @pid: Process that is using this file.
++ *
++ * Must only be dereferenced under a rcu_read_lock or equivalent.
++ *
++ * Updates are guarded with dev->filelist_mutex and reference must be
++ * dropped after a RCU grace period to accommodate lockless readers.
++ */
++ struct pid __rcu *pid;
+
+ /** @client_id: A unique id for fdinfo */
+ u64 client_id;
+@@ -420,6 +441,11 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
+ return file_priv->minor->type == DRM_MINOR_ACCEL;
+ }
+
++void drm_file_update_pid(struct drm_file *);
++
++struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
++void drm_minor_release(struct drm_minor *minor);
++
+ int drm_open(struct inode *inode, struct file *filp);
+ int drm_open_helper(struct file *filp, struct drm_minor *minor);
+ ssize_t drm_read(struct file *filp, char __user *buffer,
+diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
+index 6ea339d5de0884..81572d32db0c2b 100644
+--- a/include/drm/drm_fixed.h
++++ b/include/drm/drm_fixed.h
+@@ -71,7 +71,6 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+ }
+
+ #define DRM_FIXED_POINT 32
+-#define DRM_FIXED_POINT_HALF 16
+ #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
+ #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
+ #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
+@@ -90,12 +89,12 @@ static inline int drm_fixp2int(s64 a)
+
+ static inline int drm_fixp2int_round(s64 a)
+ {
+- return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
++ return drm_fixp2int(a + DRM_FIXED_ONE / 2);
+ }
+
+ static inline int drm_fixp2int_ceil(s64 a)
+ {
+- if (a > 0)
++ if (a >= 0)
+ return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
+ else
+ return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
+diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
+index bc9f6aa2f3fec3..7c2ec139c464ad 100644
+--- a/include/drm/drm_gem.h
++++ b/include/drm/drm_gem.h
+@@ -544,6 +544,19 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
+
+ int drm_gem_evict(struct drm_gem_object *obj);
+
++/**
++ * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
++ *
++ * This helper should only be used for fdinfo shared memory stats to determine
++ * if a GEM object is shared.
++ *
++ * @obj: obj in question
++ */
++static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj)
++{
++ return (obj->handle_count > 1) || obj->dma_buf;
++}
++
+ #ifdef CONFIG_LOCKDEP
+ /**
+ * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
+diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
+index ba483c87f0e7bd..3ae19892229db2 100644
+--- a/include/drm/drm_kunit_helpers.h
++++ b/include/drm/drm_kunit_helpers.h
+@@ -3,6 +3,8 @@
+ #ifndef DRM_KUNIT_HELPERS_H_
+ #define DRM_KUNIT_HELPERS_H_
+
++#include <drm/drm_drv.h>
++
+ #include <linux/device.h>
+
+ #include <kunit/test.h>
+diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
+index c9df0407980c94..900262f4c2349e 100644
+--- a/include/drm/drm_mipi_dsi.h
++++ b/include/drm/drm_mipi_dsi.h
+@@ -168,6 +168,7 @@ struct mipi_dsi_device_info {
+ * struct mipi_dsi_device - DSI peripheral device
+ * @host: DSI host for this peripheral
+ * @dev: driver model device node for this peripheral
++ * @attached: the DSI device has been successfully attached
+ * @name: DSI peripheral chip type
+ * @channel: virtual channel assigned to the peripheral
+ * @format: pixel format for video mode
+@@ -184,6 +185,7 @@ struct mipi_dsi_device_info {
+ struct mipi_dsi_device {
+ struct mipi_dsi_host *host;
+ struct device dev;
++ bool attached;
+
+ char name[DSI_DEV_NAME_SIZE];
+ unsigned int channel;
+@@ -239,9 +241,9 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
+ int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
+ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ u16 value);
+-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+- const struct drm_dsc_picture_parameter_set *pps);
++int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
++int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
++ const struct drm_dsc_picture_parameter_set *pps);
+
+ ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ size_t size);
+@@ -303,17 +305,17 @@ int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ * @dsi: DSI peripheral device
+ * @seq: buffer containing the payload
+ */
+-#define mipi_dsi_generic_write_seq(dsi, seq...) \
+- do { \
+- static const u8 d[] = { seq }; \
+- struct device *dev = &dsi->dev; \
+- int ret; \
+- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+- if (ret < 0) { \
+- dev_err_ratelimited(dev, "transmit data failed: %d\n", \
+- ret); \
+- return ret; \
+- } \
++#define mipi_dsi_generic_write_seq(dsi, seq...) \
++ do { \
++ static const u8 d[] = { seq }; \
++ struct device *dev = &dsi->dev; \
++ ssize_t ret; \
++ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
++ if (ret < 0) { \
++ dev_err_ratelimited(dev, "transmit data failed: %zd\n", \
++ ret); \
++ return ret; \
++ } \
+ } while (0)
+
+ /**
+@@ -322,18 +324,18 @@ int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ * @cmd: Command
+ * @seq: buffer containing data to be transmitted
+ */
+-#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \
+- do { \
+- static const u8 d[] = { cmd, seq }; \
+- struct device *dev = &dsi->dev; \
+- int ret; \
+- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+- if (ret < 0) { \
+- dev_err_ratelimited( \
+- dev, "sending command %#02x failed: %d\n", \
+- cmd, ret); \
+- return ret; \
+- } \
++#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \
++ do { \
++ static const u8 d[] = { cmd, seq }; \
++ struct device *dev = &dsi->dev; \
++ ssize_t ret; \
++ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
++ if (ret < 0) { \
++ dev_err_ratelimited( \
++ dev, "sending command %#02x failed: %zd\n", \
++ cmd, ret); \
++ return ret; \
++ } \
+ } while (0)
+
+ /**
+diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
+index e3c3ac61590947..159213786e6e1c 100644
+--- a/include/drm/drm_modeset_helper_vtables.h
++++ b/include/drm/drm_modeset_helper_vtables.h
+@@ -898,7 +898,8 @@ struct drm_connector_helper_funcs {
+ *
+ * RETURNS:
+ *
+- * The number of modes added by calling drm_mode_probed_add().
++ * The number of modes added by calling drm_mode_probed_add(). Return 0
++ * on failures (no modes) instead of negative error codes.
+ */
+ int (*get_modes)(struct drm_connector *connector);
+
+diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
+index 79d62856defbf7..fef775200a81fa 100644
+--- a/include/drm/drm_plane.h
++++ b/include/drm/drm_plane.h
+@@ -190,6 +190,16 @@ struct drm_plane_state {
+ */
+ struct drm_property_blob *fb_damage_clips;
+
++ /**
++ * @ignore_damage_clips:
++ *
++ * Set by drivers to indicate the drm_atomic_helper_damage_iter_init()
++ * helper that the @fb_damage_clips blob property should be ignored.
++ *
++ * See :ref:`damage_tracking_properties` for more information.
++ */
++ bool ignore_damage_clips;
++
+ /**
+ * @src:
+ *
+diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
+index a7abf9f3e69729..2a1d01e5b56b8c 100644
+--- a/include/drm/drm_prime.h
++++ b/include/drm/drm_prime.h
+@@ -60,12 +60,19 @@ enum dma_data_direction;
+
+ struct drm_device;
+ struct drm_gem_object;
++struct drm_file;
+
+ /* core prime functions */
+ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
+ struct dma_buf_export_info *exp_info);
+ void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+
++int drm_gem_prime_fd_to_handle(struct drm_device *dev,
++ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
++int drm_gem_prime_handle_to_fd(struct drm_device *dev,
++ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
++ int *prime_fd);
++
+ /* helper functions for exporting */
+ int drm_gem_map_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach);
+diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
+index a93a387f8a1a15..2ad9c9f9e90ffd 100644
+--- a/include/drm/drm_print.h
++++ b/include/drm/drm_print.h
+@@ -122,7 +122,8 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
+
+ /**
+ * struct drm_print_iterator - local struct used with drm_printer_coredump
+- * @data: Pointer to the devcoredump output buffer
++ * @data: Pointer to the devcoredump output buffer, can be NULL if using
++ * drm_printer_coredump to determine size of devcoredump
+ * @start: The offset within the buffer to start writing
+ * @remain: The number of bytes to write for this iteration
+ */
+@@ -167,6 +168,57 @@ struct drm_print_iterator {
+ * coredump_read, ...)
+ * }
+ *
++ * The above example has a time complexity of O(N^2), where N is the size of the
++ * devcoredump. This is acceptable for small devcoredumps but scales poorly for
++ * larger ones.
++ *
++ * Another use case for drm_coredump_printer is to capture the devcoredump into
++ * a saved buffer before the dev_coredump() callback. This involves two passes:
++ * one to determine the size of the devcoredump and another to print it to a
++ * buffer. Then, in dev_coredump(), copy from the saved buffer into the
++ * devcoredump read buffer.
++ *
++ * For example::
++ *
++ * char *devcoredump_saved_buffer;
++ *
++ * ssize_t __coredump_print(char *buffer, ssize_t count, ...)
++ * {
++ * struct drm_print_iterator iter;
++ * struct drm_printer p;
++ *
++ * iter.data = buffer;
++ * iter.start = 0;
++ * iter.remain = count;
++ *
++ * p = drm_coredump_printer(&iter);
++ *
++ * drm_printf(p, "foo=%d\n", foo);
++ * ...
++ * return count - iter.remain;
++ * }
++ *
++ * void coredump_print(...)
++ * {
++ * ssize_t count;
++ *
++ * count = __coredump_print(NULL, INT_MAX, ...);
++ * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL);
++ * __coredump_print(devcoredump_saved_buffer, count, ...);
++ * }
++ *
++ * void coredump_read(char *buffer, loff_t offset, size_t count,
++ * void *data, size_t datalen)
++ * {
++ * ...
++ * memcpy(buffer, devcoredump_saved_buffer + offset, count);
++ * ...
++ * }
++ *
++ * The above example has a time complexity of O(N*2), where N is the size of the
++ * devcoredump. This scales better than the previous example for larger
++ * devcoredumps.
++ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
+index 30a347e5aa1149..4490d43c63e33b 100644
+--- a/include/drm/ttm/ttm_pool.h
++++ b/include/drm/ttm/ttm_pool.h
+@@ -74,7 +74,7 @@ struct ttm_pool {
+ bool use_dma32;
+
+ struct {
+- struct ttm_pool_type orders[MAX_ORDER + 1];
++ struct ttm_pool_type orders[NR_PAGE_ORDERS];
+ } caching[TTM_NUM_CACHING_TYPES];
+ };
+
+diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
+index a4eff85b1f4498..2b9d856ff388d8 100644
+--- a/include/drm/ttm/ttm_tt.h
++++ b/include/drm/ttm/ttm_tt.h
+@@ -79,6 +79,12 @@ struct ttm_tt {
+ * page_flags = TTM_TT_FLAG_EXTERNAL |
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ *
++ * TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as
++ * not encrypted. The framework will try to match what the dma layer
++ * is doing, but note that it is a little fragile because ttm page
++ * fault handling abuses the DMA api a bit and dma_map_attrs can't be
++ * used to assure pgprot always matches.
++ *
+ * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
+ * set by TTM after ttm_tt_populate() has successfully returned, and is
+ * then unset when TTM calls ttm_tt_unpopulate().
+@@ -87,8 +93,9 @@ struct ttm_tt {
+ #define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
+ #define TTM_TT_FLAG_EXTERNAL BIT(2)
+ #define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
++#define TTM_TT_FLAG_DECRYPTED BIT(4)
+
+-#define TTM_TT_FLAG_PRIV_POPULATED BIT(4)
++#define TTM_TT_FLAG_PRIV_POPULATED BIT(5)
+ uint32_t page_flags;
+ /** @num_pages: Number of pages in the page array. */
+ uint32_t num_pages;
+diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
+index 255e3aa9432373..54cfccff85086a 100644
+--- a/include/dt-bindings/clock/exynos7885.h
++++ b/include/dt-bindings/clock/exynos7885.h
+@@ -136,12 +136,12 @@
+ #define CLK_MOUT_FSYS_MMC_CARD_USER 2
+ #define CLK_MOUT_FSYS_MMC_EMBD_USER 3
+ #define CLK_MOUT_FSYS_MMC_SDIO_USER 4
+-#define CLK_MOUT_FSYS_USB30DRD_USER 4
+ #define CLK_GOUT_MMC_CARD_ACLK 5
+ #define CLK_GOUT_MMC_CARD_SDCLKIN 6
+ #define CLK_GOUT_MMC_EMBD_ACLK 7
+ #define CLK_GOUT_MMC_EMBD_SDCLKIN 8
+ #define CLK_GOUT_MMC_SDIO_ACLK 9
+ #define CLK_GOUT_MMC_SDIO_SDCLKIN 10
++#define CLK_MOUT_FSYS_USB30DRD_USER 11
+
+ #endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
+diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
+index e893415ae13d0f..2569f874fe13c6 100644
+--- a/include/dt-bindings/clock/qcom,gcc-sc8180x.h
++++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
+@@ -246,6 +246,9 @@
+ #define GCC_PCIE_3_CLKREF_CLK 236
+ #define GCC_USB3_PRIM_CLKREF_CLK 237
+ #define GCC_USB3_SEC_CLKREF_CLK 238
++#define GCC_UFS_MEM_CLKREF_EN 239
++#define GCC_UFS_CARD_CLKREF_EN 240
++#define GPLL9 241
+
+ #define GCC_EMAC_BCR 0
+ #define GCC_GPU_BCR 1
+diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
+index e24ee840cfdb80..c557b78dc572f5 100644
+--- a/include/dt-bindings/clock/qcom,videocc-sm8150.h
++++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
+@@ -16,6 +16,10 @@
+
+ /* VIDEO_CC Resets */
+ #define VIDEO_CC_MVSC_CORE_CLK_BCR 0
++#define VIDEO_CC_INTERFACE_BCR 1
++#define VIDEO_CC_MVS0_BCR 2
++#define VIDEO_CC_MVS1_BCR 3
++#define VIDEO_CC_MVSC_BCR 4
+
+ /* VIDEO_CC GDSCRs */
+ #define VENUS_GDSC 0
+diff --git a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
+index 754c54a6eb06a4..7850cdc62e2854 100644
+--- a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
++++ b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
+@@ -86,5 +86,6 @@
+ #define R8A779G0_CLK_CPEX 74
+ #define R8A779G0_CLK_CBFUSA 75
+ #define R8A779G0_CLK_R 76
++#define R8A779G0_CLK_CP 77
+
+ #endif /* __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ */
+diff --git a/include/dt-bindings/dma/fsl-edma.h b/include/dt-bindings/dma/fsl-edma.h
+new file mode 100644
+index 00000000000000..fd11478cfe9cc2
+--- /dev/null
++++ b/include/dt-bindings/dma/fsl-edma.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
++
++#ifndef _FSL_EDMA_DT_BINDING_H_
++#define _FSL_EDMA_DT_BINDING_H_
++
++/* Receive Channel */
++#define FSL_EDMA_RX 0x1
++
++/* iMX8 audio remote DMA */
++#define FSL_EDMA_REMOTE 0x2
++
++/* FIFO is continue memory region */
++#define FSL_EDMA_MULTI_FIFO 0x4
++
++/* Channel need stick to even channel */
++#define FSL_EDMA_EVEN_CH 0x8
++
++/* Channel need stick to odd channel */
++#define FSL_EDMA_ODD_CH 0x10
++
++#endif
+diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
+index 31029f4f7be851..c4aabbf002f7cf 100644
+--- a/include/kvm/arm_pmu.h
++++ b/include/kvm/arm_pmu.h
+@@ -86,7 +86,7 @@ void kvm_vcpu_pmu_resync_el0(void);
+ */
+ #define kvm_pmu_update_vcpu_events(vcpu) \
+ do { \
+- if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
++ if (!has_vhe() && kvm_arm_support_pmu_v3()) \
+ vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
+ } while (0)
+
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index afd94c9b8b8afd..1b76d2f83eac6a 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -571,8 +571,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
+ #define OSC_SB_PCLPI_SUPPORT 0x00000080
+ #define OSC_SB_OSLPI_SUPPORT 0x00000100
+ #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
+-#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+ #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
++#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000
+ #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+ #define OSC_SB_PRM_SUPPORT 0x00200000
+ #define OSC_SB_FFH_OPR_SUPPORT 0x00400000
+diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
+index 446394f8460647..68fc1bd8d851e9 100644
+--- a/include/linux/amd-pstate.h
++++ b/include/linux/amd-pstate.h
+@@ -52,6 +52,9 @@ struct amd_aperf_mperf {
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value
+ * @boost_supported: check whether the Processor or SBIOS supports boost mode
++ * @hw_prefcore: check whether HW supports preferred core featue.
++ * Only when hw_prefcore and early prefcore param are true,
++ * AMD P-State driver supports preferred core featue.
+ * @epp_policy: Last saved policy used to set energy-performance preference
+ * @epp_cached: Cached CPPC energy-performance preference value
+ * @policy: Cpufreq policy value
+@@ -70,6 +73,10 @@ struct amd_cpudata {
+ u32 nominal_perf;
+ u32 lowest_nonlinear_perf;
+ u32 lowest_perf;
++ u32 min_limit_perf;
++ u32 max_limit_perf;
++ u32 min_limit_freq;
++ u32 max_limit_freq;
+
+ u32 max_freq;
+ u32 min_freq;
+@@ -81,6 +88,7 @@ struct amd_cpudata {
+
+ u64 freq;
+ bool boost_supported;
++ bool hw_prefcore;
+
+ /* EPP feature related attributes*/
+ s16 epp_policy;
+diff --git a/include/linux/async.h b/include/linux/async.h
+index cce4ad31e8fcf0..33c9ff4afb492f 100644
+--- a/include/linux/async.h
++++ b/include/linux/async.h
+@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
+ return async_schedule_node(func, dev, dev_to_node(dev));
+ }
+
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
++
+ /**
+ * async_schedule_dev_domain - A device specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
+index b83ef19da13de2..313a765710194e 100644
+--- a/include/linux/atomic/atomic-arch-fallback.h
++++ b/include/linux/atomic/atomic-arch-fallback.h
+@@ -2221,7 +2221,7 @@ raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+
+ /**
+ * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: int value to add
++ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+@@ -4333,7 +4333,7 @@ raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+
+ /**
+ * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: s64 value to add
++ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+@@ -4649,4 +4649,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
+ }
+
+ #endif /* _LINUX_ATOMIC_FALLBACK_H */
+-// 2fdd6702823fa842f9cea57a002e6e4476ae780c
++// f8888b25626bea006e7f11f7add7cecc33d0fa2e
+diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
+index d401b406ef7c44..ce1af59e1c68d0 100644
+--- a/include/linux/atomic/atomic-instrumented.h
++++ b/include/linux/atomic/atomic-instrumented.h
+@@ -1341,7 +1341,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+
+ /**
+ * atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: int value to add
++ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+@@ -2905,7 +2905,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+
+ /**
+ * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: s64 value to add
++ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+@@ -4469,7 +4469,7 @@ atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+
+ /**
+ * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: long value to add
++ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+@@ -5000,4 +5000,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
+
+
+ #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+-// 1568f875fef72097413caab8339120c065a39aa4
++// 5f7bb165838dcca35625e7d4b42540b790abd19b
+diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
+index c82947170ddc8a..aa4a5c09660fdd 100644
+--- a/include/linux/atomic/atomic-long.h
++++ b/include/linux/atomic/atomic-long.h
+@@ -1527,7 +1527,7 @@ raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+
+ /**
+ * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: long value to add
++ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+@@ -1795,4 +1795,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
+ }
+
+ #endif /* _LINUX_ATOMIC_LONG_H */
+-// 4ef23f98c73cff96d239896175fd26b10b88899e
++// f8204cfa718c04a01e3c7a15257ac85bbef54c23
+diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
+index d0807ad43f933d..6e950594215a0f 100644
+--- a/include/linux/avf/virtchnl.h
++++ b/include/linux/avf/virtchnl.h
+@@ -4,6 +4,11 @@
+ #ifndef _VIRTCHNL_H_
+ #define _VIRTCHNL_H_
+
++#include <linux/bitops.h>
++#include <linux/bits.h>
++#include <linux/overflow.h>
++#include <uapi/linux/if_ether.h>
++
+ /* Description:
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
+index ae12696ec492c6..2ad261082bba5f 100644
+--- a/include/linux/backing-dev-defs.h
++++ b/include/linux/backing-dev-defs.h
+@@ -141,8 +141,6 @@ struct bdi_writeback {
+ struct delayed_work dwork; /* work item used for writeback */
+ struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
+
+- unsigned long dirty_sleep; /* last wait */
+-
+ struct list_head bdi_node; /* anchored at bdi->wb_list */
+
+ #ifdef CONFIG_CGROUP_WRITEBACK
+@@ -179,6 +177,11 @@ struct backing_dev_info {
+ * any dirty wbs, which is depended upon by bdi_has_dirty().
+ */
+ atomic_long_t tot_write_bandwidth;
++ /*
++ * Jiffies when last process was dirty throttled on this bdi. Used by
++ * blk-wbt.
++ */
++ unsigned long last_bdp_sleep;
+
+ struct bdi_writeback wb; /* the root writeback info for this bdi */
+ struct list_head wb_list; /* list of all wbs */
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 41d417ee134997..0286bada25ce72 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -286,6 +286,11 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ {
+ struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
+
++ if (unlikely(i >= bio->bi_vcnt)) {
++ fi->folio = NULL;
++ return;
++ }
++
+ fi->folio = page_folio(bvec->bv_page);
+ fi->offset = bvec->bv_offset +
+ PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
+@@ -303,10 +308,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+ fi->offset = 0;
+ fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+- } else if (fi->_i + 1 < bio->bi_vcnt) {
+- bio_first_folio(fi, bio, fi->_i + 1);
+ } else {
+- fi->folio = NULL;
++ bio_first_folio(fi, bio, fi->_i + 1);
+ }
+ }
+
+diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
+index 03644237e1efb5..729ec2453149f9 100644
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -77,6 +77,10 @@ struct device;
+ * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
+ * bitmap_get_value8(map, start) Get 8bit value from map at start
+ * bitmap_set_value8(map, value, start) Set 8bit value to map at start
++ * bitmap_read(map, start, nbits) Read an nbits-sized value from
++ * map at start
++ * bitmap_write(map, value, start, nbits) Write an nbits-sized value to
++ * map at start
+ *
+ * Note, bitmap_zero() and bitmap_fill() operate over the region of
+ * unsigned longs, that is, bits behind bitmap till the unsigned long
+@@ -237,9 +241,11 @@ extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+ #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+
++#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
++
+ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+ {
+- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++ unsigned int len = bitmap_size(nbits);
+
+ if (small_const_nbits(nbits))
+ *dst = 0;
+@@ -249,7 +255,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+
+ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+ {
+- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++ unsigned int len = bitmap_size(nbits);
+
+ if (small_const_nbits(nbits))
+ *dst = ~0UL;
+@@ -260,7 +266,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
+ unsigned int nbits)
+ {
+- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++ unsigned int len = bitmap_size(nbits);
+
+ if (small_const_nbits(nbits))
+ *dst = *src;
+@@ -279,6 +285,18 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
+ dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
+ }
+
++static inline void bitmap_copy_and_extend(unsigned long *to,
++ const unsigned long *from,
++ unsigned int count, unsigned int size)
++{
++ unsigned int copy = BITS_TO_LONGS(count);
++
++ memcpy(to, from, copy * sizeof(long));
++ if (count % BITS_PER_LONG)
++ to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
++ memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
++}
++
+ /*
+ * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+@@ -599,6 +617,79 @@ static inline void bitmap_set_value8(unsigned long *map, unsigned long value,
+ map[index] |= value << offset;
+ }
+
++/**
++ * bitmap_read - read a value of n-bits from the memory region
++ * @map: address to the bitmap memory region
++ * @start: bit offset of the n-bit value
++ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG
++ *
++ * Returns: value of @nbits bits located at the @start bit offset within the
++ * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
++ * value is undefined.
++ */
++static inline unsigned long bitmap_read(const unsigned long *map,
++ unsigned long start,
++ unsigned long nbits)
++{
++ size_t index = BIT_WORD(start);
++ unsigned long offset = start % BITS_PER_LONG;
++ unsigned long space = BITS_PER_LONG - offset;
++ unsigned long value_low, value_high;
++
++ if (unlikely(!nbits || nbits > BITS_PER_LONG))
++ return 0;
++
++ if (space >= nbits)
++ return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits);
++
++ value_low = map[index] & BITMAP_FIRST_WORD_MASK(start);
++ value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits);
++ return (value_low >> offset) | (value_high << space);
++}
++
++/**
++ * bitmap_write - write n-bit value within a memory region
++ * @map: address to the bitmap memory region
++ * @value: value to write, clamped to nbits
++ * @start: bit offset of the n-bit value
++ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG.
++ *
++ * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(),
++ * i.e. bits beyond @nbits are ignored:
++ *
++ * for (bit = 0; bit < nbits; bit++)
++ * __assign_bit(start + bit, bitmap, val & BIT(bit));
++ *
++ * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
++ */
++static inline void bitmap_write(unsigned long *map, unsigned long value,
++ unsigned long start, unsigned long nbits)
++{
++ size_t index;
++ unsigned long offset;
++ unsigned long space;
++ unsigned long mask;
++ bool fit;
++
++ if (unlikely(!nbits || nbits > BITS_PER_LONG))
++ return;
++
++ mask = BITMAP_LAST_WORD_MASK(nbits);
++ value &= mask;
++ offset = start % BITS_PER_LONG;
++ space = BITS_PER_LONG - offset;
++ fit = space >= nbits;
++ index = BIT_WORD(start);
++
++ map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start));
++ map[index] |= value << offset;
++ if (fit)
++ return;
++
++ map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits);
++ map[index + 1] |= (value >> space);
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* __LINUX_BITMAP_H */
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index 2ba557e067fe69..f7f5a783da2aa8 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -80,6 +80,7 @@ __check_bitop_pr(__test_and_set_bit);
+ __check_bitop_pr(__test_and_clear_bit);
+ __check_bitop_pr(__test_and_change_bit);
+ __check_bitop_pr(test_bit);
++__check_bitop_pr(test_bit_acquire);
+
+ #undef __check_bitop_pr
+
+diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
+index 378b2459efe2da..f7cc8080672cc2 100644
+--- a/include/linux/blk-integrity.h
++++ b/include/linux/blk-integrity.h
+@@ -105,14 +105,13 @@ static inline bool blk_integrity_rq(struct request *rq)
+ }
+
+ /*
+- * Return the first bvec that contains integrity data. Only drivers that are
+- * limited to a single integrity segment should use this helper.
++ * Return the current bvec that contains the integrity data. bip_iter may be
++ * advanced to iterate over the integrity data.
+ */
+-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
++static inline struct bio_vec rq_integrity_vec(struct request *rq)
+ {
+- if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
+- return NULL;
+- return rq->bio->bi_integrity->bip_vec;
++ return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
++ rq->bio->bi_integrity->bip_iter);
+ }
+ #else /* CONFIG_BLK_DEV_INTEGRITY */
+ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+@@ -176,9 +175,10 @@ static inline int blk_integrity_rq(struct request *rq)
+ return 0;
+ }
+
+-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
++static inline struct bio_vec rq_integrity_vec(struct request *rq)
+ {
+- return NULL;
++ /* the optimizer will remove all calls to this function */
++ return (struct bio_vec){ };
+ }
+ #endif /* CONFIG_BLK_DEV_INTEGRITY */
+ #endif /* _LINUX_BLK_INTEGRITY_H */
+diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
+index d5c5e59ddbd25a..92c8997b193816 100644
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -69,6 +69,7 @@ struct block_device {
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+ bool bd_make_it_fail;
+ #endif
++ bool bd_ro_warned;
+ /*
+ * keep this out-of-line as it's both big and not needed in the fast
+ * path
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index eef450f259828d..a7b65d4ab616e2 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -229,6 +229,19 @@ static inline unsigned int disk_openers(struct gendisk *disk)
+ return atomic_read(&disk->part0->bd_openers);
+ }
+
++/**
++ * disk_has_partscan - return %true if partition scanning is enabled on a disk
++ * @disk: disk to check
++ *
++ * Returns %true if partitions scanning is enabled for @disk, or %false if
++ * partition scanning is disabled either permanently or temporarily.
++ */
++static inline bool disk_has_partscan(struct gendisk *disk)
++{
++ return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
++ !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
++}
++
+ /*
+ * The gendisk is refcounted by the part0 block_device, and the bd_device
+ * therein is also used for device model presentation in sysfs.
+@@ -538,7 +551,7 @@ struct request_queue {
+ #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
+ #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
+ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
+-#define QUEUE_FLAG_HW_WC 18 /* Write back caching supported */
++#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */
+ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
+ #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
+ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
+@@ -1479,14 +1492,24 @@ extern const struct blk_holder_ops fs_holder_ops;
+ #define sb_open_mode(flags) \
+ (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+
++struct bdev_handle {
++ struct block_device *bdev;
++ void *holder;
++};
++
+ struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+ struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
++struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
++ const struct blk_holder_ops *hops);
++struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
++ void *holder, const struct blk_holder_ops *hops);
+ int bd_prepare_to_claim(struct block_device *bdev, void *holder,
+ const struct blk_holder_ops *hops);
+ void bd_abort_claiming(struct block_device *bdev, void *holder);
+ void blkdev_put(struct block_device *bdev, void *holder);
++void bdev_release(struct bdev_handle *handle);
+
+ /* just for blk-cgroup, don't use elsewhere */
+ struct block_device *blkdev_get_no_open(dev_t dev);
+diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
+index ca73940e26df83..4195444ec45d15 100644
+--- a/include/linux/bootconfig.h
++++ b/include/linux/bootconfig.h
+@@ -287,7 +287,12 @@ int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
+ int __init xbc_get_info(int *node_size, size_t *data_size);
+
+ /* XBC cleanup data structures */
+-void __init xbc_exit(void);
++void __init _xbc_exit(bool early);
++
++static inline void xbc_exit(void)
++{
++ _xbc_exit(false);
++}
+
+ /* XBC embedded bootconfig data in kernel */
+ #ifdef CONFIG_BOOT_CONFIG_EMBED
+diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
+index 8506690dbb9ca4..d4f2c8706042cd 100644
+--- a/include/linux/bpf-cgroup.h
++++ b/include/linux/bpf-cgroup.h
+@@ -120,6 +120,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
+
+ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ struct sockaddr *uaddr,
++ int *uaddrlen,
+ enum cgroup_bpf_attach_type atype,
+ void *t_ctx,
+ u32 *flags);
+@@ -137,11 +138,12 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
+ enum cgroup_bpf_attach_type atype);
+
+ int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
+- int *optname, char __user *optval,
++ int *optname, sockptr_t optval,
+ int *optlen, char **kernel_optval);
++
+ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+- int optname, char __user *optval,
+- int __user *optlen, int max_optlen,
++ int optname, sockptr_t optval,
++ sockptr_t optlen, int max_optlen,
+ int retval);
+
+ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+@@ -230,22 +232,22 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
+
+-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
++#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
+ ({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) \
+- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+- NULL, NULL); \
++ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
++ atype, NULL, NULL); \
+ __ret; \
+ })
+
+-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
++#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
+ ({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+- t_ctx, NULL); \
++ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
++ atype, t_ctx, NULL); \
+ release_sock(sk); \
+ } \
+ __ret; \
+@@ -256,14 +258,14 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
+ * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
+ */
+-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
++#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
+ ({ \
+ u32 __flags = 0; \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+- NULL, &__flags); \
++ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
++ atype, NULL, &__flags); \
+ release_sock(sk); \
+ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
+ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
+@@ -276,29 +278,29 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
+ (sk)->sk_prot->pre_connect)
+
+-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
+- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
++#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \
++ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
+
+-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
+- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
++#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \
++ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
+
+-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
+- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
++#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \
++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
+
+-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
+- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
++#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \
++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
+
+-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
+- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
++#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
+
+-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
+- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
++#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
+
+-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
+- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
++#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
+
+-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
+- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
++#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
+
+ /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
+ * fullsock and its parent fullsock cannot be traced by
+@@ -373,14 +375,6 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ __ret; \
+ })
+
+-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
+-({ \
+- int __ret = 0; \
+- if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+- get_user(__ret, optlen); \
+- __ret; \
+-})
+-
+ #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
+ max_optlen, retval) \
+ ({ \
+@@ -477,28 +471,27 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ }
+
+ #define cgroup_bpf_enabled(atype) (0)
+-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
+-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
++#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
++#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
+ #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
+ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
+-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
++#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
+-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
+ #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
+ optlen, max_optlen, retval) ({ retval; })
+ #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 49f8b691496c45..1e05cc80e0485f 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -106,7 +106,11 @@ struct bpf_map_ops {
+ /* funcs called by prog_array and perf_event_array map */
+ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
+ int fd);
+- void (*map_fd_put_ptr)(void *ptr);
++ /* If need_defer is true, the implementation should guarantee that
++ * the to-be-put element is still alive before the bpf program, which
++ * may manipulate it, exists.
++ */
++ void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
+ int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
+ u32 (*map_fd_sys_lookup_elem)(void *ptr);
+ void (*map_seq_show_elem)(struct bpf_map *map, void *key,
+@@ -271,7 +275,11 @@ struct bpf_map {
+ */
+ atomic64_t refcnt ____cacheline_aligned;
+ atomic64_t usercnt;
+- struct work_struct work;
++ /* rcu is used before freeing and work is only used during freeing */
++ union {
++ struct work_struct work;
++ struct rcu_head rcu;
++ };
+ struct mutex freeze_mutex;
+ atomic64_t writecnt;
+ /* 'Ownership' of program-containing map is claimed by the first program
+@@ -280,6 +288,7 @@ struct bpf_map {
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+ struct {
++ const struct btf_type *attach_func_proto;
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+@@ -287,6 +296,9 @@ struct bpf_map {
+ } owner;
+ bool bypass_spec_v1;
+ bool frozen; /* write-once; write-protected by freeze_mutex */
++ bool free_after_mult_rcu_gp;
++ bool free_after_rcu_gp;
++ atomic64_t sleepable_refcnt;
+ s64 __percpu *elem_count;
+ };
+
+@@ -664,6 +676,11 @@ enum bpf_type_flag {
+ /* DYNPTR points to xdp_buff */
+ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+
++ /* Memory must be aligned on some architectures, used in combination with
++ * MEM_FIXED_SIZE.
++ */
++ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
++
+ __BPF_TYPE_FLAG_MAX,
+ __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
+ };
+@@ -700,8 +717,6 @@ enum bpf_arg_type {
+ ARG_ANYTHING, /* any (initialized) argument is ok */
+ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
+ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
+- ARG_PTR_TO_INT, /* pointer to int */
+- ARG_PTR_TO_LONG, /* pointer to long */
+ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
+ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
+ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
+@@ -903,10 +918,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
+ aux->ctx_field_size = size;
+ }
+
++static bool bpf_is_ldimm64(const struct bpf_insn *insn)
++{
++ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
++}
++
+ static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+ {
+- return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+- insn->src_reg == BPF_PSEUDO_FUNC;
++ return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+ }
+
+ struct bpf_prog_ops {
+@@ -1029,6 +1048,11 @@ struct btf_func_model {
+ */
+ #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+
++/* Indicate that current trampoline is in a tail call context. Then, it has to
++ * cache and restore tail_call_cnt to avoid infinite tail call loop.
++ */
++#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
++
+ /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+ * bytes on x86.
+ */
+@@ -1506,12 +1530,26 @@ struct bpf_link {
+ enum bpf_link_type type;
+ const struct bpf_link_ops *ops;
+ struct bpf_prog *prog;
+- struct work_struct work;
++ /* rcu is used before freeing, work can be used to schedule that
++ * RCU-based freeing before that, so they never overlap
++ */
++ union {
++ struct rcu_head rcu;
++ struct work_struct work;
++ };
+ };
+
+ struct bpf_link_ops {
+ void (*release)(struct bpf_link *link);
++ /* deallocate link resources callback, called without RCU grace period
++ * waiting
++ */
+ void (*dealloc)(struct bpf_link *link);
++ /* deallocate link resources callback, called after RCU grace period;
++ * if underlying BPF program is sleepable we go through tasks trace
++ * RCU GP and then "classic" RCU GP
++ */
++ void (*dealloc_deferred)(struct bpf_link *link);
+ int (*detach)(struct bpf_link *link);
+ int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog);
+@@ -3134,6 +3172,9 @@ enum bpf_text_poke_type {
+ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *addr1, void *addr2);
+
++void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++ struct bpf_prog *new, struct bpf_prog *old);
++
+ void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+ int bpf_arch_text_invalidate(void *dst, size_t len);
+
+diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
+index d644bbb298af4a..bb1223b2130877 100644
+--- a/include/linux/bpf_mem_alloc.h
++++ b/include/linux/bpf_mem_alloc.h
+@@ -11,6 +11,7 @@ struct bpf_mem_caches;
+ struct bpf_mem_alloc {
+ struct bpf_mem_caches __percpu *caches;
+ struct bpf_mem_cache __percpu *cache;
++ bool percpu;
+ struct work_struct work;
+ };
+
+diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
+index fc0d6f32c68760..dfaae3e3ec1537 100644
+--- a/include/linux/bpf_types.h
++++ b/include/linux/bpf_types.h
+@@ -142,9 +142,12 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
+ #ifdef CONFIG_NET
+ BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
++BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
++BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
+ #endif
+ #ifdef CONFIG_PERF_EVENTS
+ BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
+ #endif
+ BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
+ BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
++BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index b6e58dab8e2756..92919d52f7e1b2 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -300,6 +300,17 @@ struct bpf_func_state {
+ bool in_callback_fn;
+ struct tnum callback_ret_range;
+ bool in_async_callback_fn;
++ /* For callback calling functions that limit number of possible
++ * callback executions (e.g. bpf_loop) keeps track of current
++ * simulated iteration number.
++ * Value in frame N refers to number of times callback with frame
++ * N+1 was simulated, e.g. for the following call:
++ *
++ * bpf_loop(..., fn, ...); | suppose current frame is N
++ * | fn would be simulated in frame N+1
++ * | number of simulations is tracked in frame N
++ */
++ u32 callback_depth;
+
+ /* The following fields should be last. See copy_func_state() */
+ int acquired_refs;
+@@ -372,10 +383,25 @@ struct bpf_verifier_state {
+ struct bpf_active_lock active_lock;
+ bool speculative;
+ bool active_rcu_lock;
++ /* If this state was ever pointed-to by other state's loop_entry field
++ * this flag would be set to true. Used to avoid freeing such states
++ * while they are still in use.
++ */
++ bool used_as_loop_entry;
+
+ /* first and last insn idx of this verifier state */
+ u32 first_insn_idx;
+ u32 last_insn_idx;
++ /* If this state is a part of states loop this field points to some
++ * parent of this state such that:
++ * - it is also a member of the same states loop;
++ * - DFS states traversal starting from initial state visits loop_entry
++ * state before this state.
++ * Used to compute topmost loop entry for state loops.
++ * State loops might appear because of open coded iterators logic.
++ * See get_loop_entry() for more information.
++ */
++ struct bpf_verifier_state *loop_entry;
+ /* jmp history recorded from first to last.
+ * backtracking is using it to go from last to first.
+ * For most states jmp_history_cnt is [0-3].
+@@ -383,6 +409,8 @@ struct bpf_verifier_state {
+ */
+ struct bpf_idx_pair *jmp_history;
+ u32 jmp_history_cnt;
++ u32 dfs_depth;
++ u32 callback_unroll_depth;
+ };
+
+ #define bpf_get_spilled_reg(slot, frame) \
+@@ -490,6 +518,10 @@ struct bpf_insn_aux_data {
+ * this instruction, regardless of any heuristics
+ */
+ bool force_checkpoint;
++ /* true if instruction is a call to a helper function that
++ * accepts callback function as a parameter.
++ */
++ bool calls_callback;
+ };
+
+ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+@@ -728,8 +760,8 @@ static inline u32 type_flag(u32 type)
+ /* only use after check_attach_btf_id() */
+ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+ {
+- return prog->type == BPF_PROG_TYPE_EXT ?
+- prog->aux->dst_prog->type : prog->type;
++ return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
++ prog->aux->saved_dst_prog_type : prog->type;
+ }
+
+ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+diff --git a/include/linux/bvec.h b/include/linux/bvec.h
+index 555aae5448ae4e..bd1e361b351c5a 100644
+--- a/include/linux/bvec.h
++++ b/include/linux/bvec.h
+@@ -83,7 +83,7 @@ struct bvec_iter {
+
+ unsigned int bi_bvec_done; /* number of bytes completed in
+ current bvec */
+-} __packed;
++} __packed __aligned(4);
+
+ struct bvec_iter_all {
+ struct bio_vec bv;
+diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
+index 98c6fd0b39b634..fdfb61ccf55aef 100644
+--- a/include/linux/cdrom.h
++++ b/include/linux/cdrom.h
+@@ -77,7 +77,7 @@ struct cdrom_device_ops {
+ unsigned int clearing, int slot);
+ int (*tray_move) (struct cdrom_device_info *, int);
+ int (*lock_door) (struct cdrom_device_info *, int);
+- int (*select_speed) (struct cdrom_device_info *, int);
++ int (*select_speed) (struct cdrom_device_info *, unsigned long);
+ int (*get_last_session) (struct cdrom_device_info *,
+ struct cdrom_multisession *);
+ int (*get_mcn) (struct cdrom_device_info *,
+diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
+index 4c3e0648dc2775..1f2171dd01bfa3 100644
+--- a/include/linux/ceph/mdsmap.h
++++ b/include/linux/ceph/mdsmap.h
+@@ -5,6 +5,8 @@
+ #include <linux/bug.h>
+ #include <linux/ceph/types.h>
+
++struct ceph_mds_client;
++
+ /*
+ * mds map - describe servers in the mds cluster.
+ *
+@@ -25,7 +27,11 @@ struct ceph_mdsmap {
+ u32 m_session_timeout; /* seconds */
+ u32 m_session_autoclose; /* seconds */
+ u64 m_max_file_size;
+- u64 m_max_xattr_size; /* maximum size for xattrs blob */
++ /*
++ * maximum size for xattrs blob.
++ * Zeroed by default to force the usage of the (sync) SETXATTR Op.
++ */
++ u64 m_max_xattr_size;
+ u32 m_max_mds; /* expected up:active mds number */
+ u32 m_num_active_mds; /* actual up:active mds number */
+ u32 possible_max_rank; /* possible max rank index */
+@@ -65,7 +71,8 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
+ }
+
+ extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
+-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
++struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
++ void *end, bool msgr2);
+ extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
+ extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
+
+diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
+index 2eaaabbe98cb64..1717cc57cdacd3 100644
+--- a/include/linux/ceph/messenger.h
++++ b/include/linux/ceph/messenger.h
+@@ -283,7 +283,7 @@ struct ceph_msg {
+ struct kref kref;
+ bool more_to_follow;
+ bool needs_out_seq;
+- bool sparse_read;
++ u64 sparse_read_total;
+ int front_alloc_len;
+
+ struct ceph_msgpool *pool;
+diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
+index bf9823956758c4..f703fb8030de26 100644
+--- a/include/linux/ceph/osd_client.h
++++ b/include/linux/ceph/osd_client.h
+@@ -45,6 +45,7 @@ enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
++ CEPH_SPARSE_READ_DATA_PRE,
+ CEPH_SPARSE_READ_DATA,
+ };
+
+@@ -64,7 +65,7 @@ struct ceph_sparse_read {
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+- __le32 sr_datalen; /* length of actual data */
++ u32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 265da00a1a8b1b..6eefe5153a6ff7 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -543,6 +543,10 @@ struct cgroup_root {
+ /* Unique id for this hierarchy. */
+ int hierarchy_id;
+
++ /* A list running through the active hierarchies */
++ struct list_head root_list;
++ struct rcu_head rcu; /* Must be near the top */
++
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. cgrp->ancestors[0] will be used overflowing into the
+@@ -556,9 +560,6 @@ struct cgroup_root {
+ /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
+ atomic_t nr_cgrps;
+
+- /* A list running through the active hierarchies */
+- struct list_head root_list;
+-
+ /* Hierarchy-specific flags */
+ unsigned int flags;
+
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index ec32ec58c59f72..1293c38ddb7f73 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -74,7 +74,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core,
+ unsigned long parent_rate);
+
+ /**
+- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
++ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
+ *
+ * @num: Numerator of the duty cycle ratio
+ * @den: Denominator of the duty cycle ratio
+@@ -129,7 +129,7 @@ struct clk_duty {
+ * @restore_context: Restore the context of the clock after a restoration
+ * of power.
+ *
+- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
++ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
+ * parent rate is an input parameter. It is up to the caller to
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+@@ -448,15 +448,15 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ */
+ #define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
+ parent_hw, flags, fixed_rate, fixed_accuracy) \
+- __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
+- NULL, NULL, (flags), (fixed_rate), \
++ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
++ NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0, false)
+ /**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+@@ -471,7 +471,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+- * @parent_name: name of clock's parent
++ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+@@ -649,7 +649,7 @@ struct clk_div_table {
+ * Clock with an adjustable divider affecting its output frequency. Implements
+ * .recalc_rate, .set_rate and .round_rate
+ *
+- * Flags:
++ * @flags:
+ * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
+ * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
+ * the raw value read from the register, with the value of zero considered
+@@ -1130,11 +1130,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ * @mwidth: width of the numerator bit field
+ * @nshift: shift to the denominator bit field
+ * @nwidth: width of the denominator bit field
++ * @approximation: clk driver's callback for calculating the divider clock
+ * @lock: register lock
+ *
+ * Clock with adjustable fractional divider affecting its output frequency.
+ *
+- * Flags:
++ * @flags:
+ * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+ * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+ * is set then the numerator and denominator are both the value read
+@@ -1191,7 +1192,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
+ * Clock with an adjustable multiplier affecting its output frequency.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ *
+- * Flags:
++ * @flags:
+ * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
+ * from the register, with 0 being a valid value effectively
+ * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index 1d42d4b1732717..0ad8b550bb4b46 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -291,7 +291,19 @@ static inline void timer_probe(void) {}
+ #define TIMER_ACPI_DECLARE(name, table_id, fn) \
+ ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
+
+-extern ulong max_cswd_read_retries;
++static inline unsigned int clocksource_get_max_watchdog_retry(void)
++{
++ /*
++ * When system is in the boot phase or under heavy workload, there
++ * can be random big latencies during the clocksource/watchdog
++ * read, so allow retries to filter the noise latency. As the
++ * latency's frequency and maximum value goes up with the number of
++ * CPUs, scale the number of retries with the number of online
++ * CPUs.
++ */
++ return (ilog2(num_online_cpus()) / 2) + 1;
++}
++
+ void clocksource_verify_percpu(struct clocksource *cs);
+
+ #endif /* _LINUX_CLOCKSOURCE_H */
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 1cfa4f0f490aa2..5981d3eadaee1e 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -609,7 +609,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd,
+ asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ struct compat_statfs64 __user *buf);
+ asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
++asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t);
+ /* No generic prototype for truncate64, ftruncate64, fallocate */
+ asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
+ int flags, umode_t mode);
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 7af9e34ec261bf..8c9a095c17571a 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -66,6 +66,26 @@
+ __builtin_unreachable(); \
+ } while (0)
+
++/*
++ * GCC 'asm goto' with outputs miscompiles certain code sequences:
++ *
++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
++ *
++ * Work around it via the same compiler barrier quirk that we used
++ * to use for the old 'asm goto' workaround.
++ *
++ * Also, always mark such 'asm goto' statements as volatile: all
++ * asm goto statements are supposed to be volatile as per the
++ * documentation, but some versions of gcc didn't actually do
++ * that for asms with outputs:
++ *
++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619
++ */
++#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND
++#define asm_goto_output(x...) \
++ do { asm volatile goto(x); asm (""); } while (0)
++#endif
++
+ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+ #define __HAVE_BUILTIN_BSWAP32__
+ #define __HAVE_BUILTIN_BSWAP64__
+diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
+index 28566624f008f4..f5859b8c68b420 100644
+--- a/include/linux/compiler_attributes.h
++++ b/include/linux/compiler_attributes.h
+@@ -333,6 +333,18 @@
+ */
+ #define __section(section) __attribute__((__section__(section)))
+
++/*
++ * Optional: only supported since gcc >= 12
++ *
++ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
++ * clang: https://clang.llvm.org/docs/AttributeReference.html#uninitialized
++ */
++#if __has_attribute(__uninitialized__)
++# define __uninitialized __attribute__((__uninitialized__))
++#else
++# define __uninitialized
++#endif
++
+ /*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index c523c6683789d8..0a182f088c897e 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -280,6 +280,17 @@ struct ftrace_likely_data {
+ # define __no_kcsan
+ #endif
+
++#ifdef __SANITIZE_MEMORY__
++/*
++ * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
++ * functions, therefore disabling KMSAN checks also requires disabling inlining.
++ *
++ * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
++ * within the function and marks all its outputs as initialized.
++ */
++# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
++#endif
++
+ #ifndef __no_sanitize_or_inline
+ #define __no_sanitize_or_inline __always_inline
+ #endif
+@@ -352,8 +363,15 @@ struct ftrace_likely_data {
+ # define __realloc_size(x, ...)
+ #endif
+
+-#ifndef asm_volatile_goto
+-#define asm_volatile_goto(x...) asm goto(x)
++/*
++ * Some versions of gcc do not mark 'asm goto' volatile:
++ *
++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
++ *
++ * We do it here by hand, because it doesn't hurt.
++ */
++#ifndef asm_goto_output
++#define asm_goto_output(x...) asm volatile goto(x)
+ #endif
+
+ #ifdef CONFIG_CC_HAS_ASM_INLINE
+diff --git a/include/linux/counter.h b/include/linux/counter.h
+index 702e9108bbb44e..b767b5c821f58e 100644
+--- a/include/linux/counter.h
++++ b/include/linux/counter.h
+@@ -359,7 +359,6 @@ struct counter_ops {
+ * @num_counts: number of Counts specified in @counts
+ * @ext: optional array of Counter device extensions
+ * @num_ext: number of Counter device extensions specified in @ext
+- * @priv: optional private data supplied by driver
+ * @dev: internal device structure
+ * @chrdev: internal character device structure
+ * @events_list: list of current watching Counter events
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index eb768a866fe31e..a7d91a167a8b64 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -75,6 +75,8 @@ extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_gds(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+@@ -155,6 +157,8 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
+ static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
+ #endif /* !CONFIG_HOTPLUG_CPU */
+
++DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
++
+ #ifdef CONFIG_PM_SLEEP_SMP
+ extern int freeze_secondary_cpus(int primary);
+ extern void thaw_secondary_cpus(void);
+@@ -210,7 +214,18 @@ void cpuhp_report_idle_dead(void);
+ static inline void cpuhp_report_idle_dead(void) { }
+ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
++#ifdef CONFIG_CPU_MITIGATIONS
+ extern bool cpu_mitigations_off(void);
+ extern bool cpu_mitigations_auto_nosmt(void);
++#else
++static inline bool cpu_mitigations_off(void)
++{
++ return true;
++}
++static inline bool cpu_mitigations_auto_nosmt(void)
++{
++ return false;
++}
++#endif
+
+ #endif /* _LINUX_CPU_H_ */
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 71d186d6933a5e..9ca4211c063f39 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -1021,6 +1021,18 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ efficiencies);
+ }
+
++static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
++{
++ unsigned int freq;
++
++ if (idx < 0)
++ return false;
++
++ freq = policy->freq_table[idx].frequency;
++
++ return freq == clamp_val(freq, policy->min, policy->max);
++}
++
+ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+@@ -1054,7 +1066,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ return 0;
+ }
+
+- if (idx < 0 && efficiencies) {
++ /* Limit frequency index to honor policy->min/max */
++ if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
+ efficiencies = false;
+ goto retry;
+ }
+@@ -1111,10 +1124,9 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
+ const char *cell_name,
+ struct of_phandle_args *args)
+ {
+- struct device_node *cpu_np;
+ int ret;
+
+- cpu_np = of_cpu_device_node_get(cpu);
++ struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ return -ENODEV;
+
+@@ -1122,9 +1134,6 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
+ args);
+ if (ret < 0)
+ return ret;
+-
+- of_node_put(cpu_np);
+-
+ return 0;
+ }
+
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 068f7738be22ab..624d4a38c358a0 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -189,10 +189,12 @@ enum cpuhp_state {
+ /* Must be the last timer callback */
+ CPUHP_AP_DUMMY_TIMER_STARTING,
+ CPUHP_AP_ARM_XEN_STARTING,
++ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ CPUHP_AP_ARM64_ISNDEP_STARTING,
+ CPUHP_AP_SMPCFD_DYING,
++ CPUHP_AP_HRTIMERS_DYING,
+ CPUHP_AP_X86_TBOOT_DYING,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
+ CPUHP_AP_ONLINE,
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index f10fb87d49dbe3..dbdbf1451cadd7 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -821,7 +821,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+ */
+ static inline unsigned int cpumask_size(void)
+ {
+- return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long);
++ return bitmap_size(large_cpumask_bits);
+ }
+
+ /*
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index f923528d5cc43a..bb55703e116641 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -108,14 +108,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
+ * same context as task->real_cred.
+ */
+ struct cred {
+- atomic_t usage;
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- atomic_t subscribers; /* number of processes subscribed */
+- void *put_addr;
+- unsigned magic;
+-#define CRED_MAGIC 0x43736564
+-#define CRED_MAGIC_DEAD 0x44656144
+-#endif
++ atomic_long_t usage;
+ kuid_t uid; /* real UID of the task */
+ kgid_t gid; /* real GID of the task */
+ kuid_t suid; /* saved UID of the task */
+@@ -171,46 +164,6 @@ extern int cred_fscmp(const struct cred *, const struct cred *);
+ extern void __init cred_init(void);
+ extern int set_cred_ucounts(struct cred *);
+
+-/*
+- * check for validity of credentials
+- */
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+-extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned);
+-extern void __validate_process_creds(struct task_struct *,
+- const char *, unsigned);
+-
+-extern bool creds_are_invalid(const struct cred *cred);
+-
+-static inline void __validate_creds(const struct cred *cred,
+- const char *file, unsigned line)
+-{
+- if (unlikely(creds_are_invalid(cred)))
+- __invalid_creds(cred, file, line);
+-}
+-
+-#define validate_creds(cred) \
+-do { \
+- __validate_creds((cred), __FILE__, __LINE__); \
+-} while(0)
+-
+-#define validate_process_creds() \
+-do { \
+- __validate_process_creds(current, __FILE__, __LINE__); \
+-} while(0)
+-
+-extern void validate_creds_for_do_exit(struct task_struct *);
+-#else
+-static inline void validate_creds(const struct cred *cred)
+-{
+-}
+-static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+-{
+-}
+-static inline void validate_process_creds(void)
+-{
+-}
+-#endif
+-
+ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+ {
+ return cap_issubset(cred->cap_ambient,
+@@ -227,7 +180,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+ */
+ static inline struct cred *get_new_cred(struct cred *cred)
+ {
+- atomic_inc(&cred->usage);
++ atomic_long_inc(&cred->usage);
+ return cred;
+ }
+
+@@ -249,7 +202,6 @@ static inline const struct cred *get_cred(const struct cred *cred)
+ struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return cred;
+- validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
+ return get_new_cred(nonconst_cred);
+ }
+@@ -259,9 +211,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
+ struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return NULL;
+- if (!atomic_inc_not_zero(&nonconst_cred->usage))
++ if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
+ return NULL;
+- validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
+ return cred;
+ }
+@@ -282,8 +233,7 @@ static inline void put_cred(const struct cred *_cred)
+ struct cred *cred = (struct cred *) _cred;
+
+ if (cred) {
+- validate_creds(cred);
+- if (atomic_dec_and_test(&(cred)->usage))
++ if (atomic_long_dec_and_test(&(cred)->usage))
+ __put_cred(cred);
+ }
+ }
+diff --git a/include/linux/damon.h b/include/linux/damon.h
+index ae2664d1d5f1de..a953d7083cd593 100644
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -522,8 +522,20 @@ struct damon_ctx {
+ struct damon_attrs attrs;
+
+ /* private: internal use only */
+- struct timespec64 last_aggregation;
+- struct timespec64 last_ops_update;
++ /* number of sample intervals that passed since this context started */
++ unsigned long passed_sample_intervals;
++ /*
++ * number of sample intervals that should be passed before next
++ * aggregation
++ */
++ unsigned long next_aggregation_sis;
++ /*
++ * number of sample intervals that should be passed before next ops
++ * update
++ */
++ unsigned long next_ops_update_sis;
++ /* for waiting until the execution of the kdamond_fn is started */
++ struct completion kdamond_started;
+
+ /* public: */
+ struct task_struct *kdamond;
+@@ -642,6 +654,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+ }
+
++static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
++{
++ /* {aggr,sample}_interval are unsigned long, hence could overflow */
++ return min(attrs->aggr_interval / attrs->sample_interval,
++ (unsigned long)UINT_MAX);
++}
++
+
+ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+diff --git a/include/linux/dax.h b/include/linux/dax.h
+index 22cd9902345d74..b463502b16e17f 100644
+--- a/include/linux/dax.h
++++ b/include/linux/dax.h
+@@ -159,8 +159,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
+
+ struct page *dax_layout_busy_page(struct address_space *mapping);
+ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
+-dax_entry_t dax_lock_page(struct page *page);
+-void dax_unlock_page(struct page *page, dax_entry_t cookie);
++dax_entry_t dax_lock_folio(struct folio *folio);
++void dax_unlock_folio(struct folio *folio, dax_entry_t cookie);
+ dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page);
+ void dax_unlock_mapping_entry(struct address_space *mapping,
+@@ -182,14 +182,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
+ return -EOPNOTSUPP;
+ }
+
+-static inline dax_entry_t dax_lock_page(struct page *page)
++static inline dax_entry_t dax_lock_folio(struct folio *folio)
+ {
+- if (IS_DAX(page->mapping->host))
++ if (IS_DAX(folio->mapping->host))
+ return ~0UL;
+ return 0;
+ }
+
+-static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
++static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
+ {
+ }
+
+diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
+index 6bfe70decc9fb3..ae80a303c216be 100644
+--- a/include/linux/dev_printk.h
++++ b/include/linux/dev_printk.h
+@@ -129,6 +129,16 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
++/*
++ * Dummy dev_printk for disabled debugging statements to use whilst maintaining
++ * gcc's format checking.
++ */
++#define dev_no_printk(level, dev, fmt, ...) \
++ ({ \
++ if (0) \
++ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
++ })
++
+ /*
+ * #defines for all the dev_<level> macros to prefix with whatever
+ * possible use of #define dev_fmt(fmt) ...
+@@ -158,10 +168,7 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #else
+ #define dev_dbg(dev, fmt, ...) \
+-({ \
+- if (0) \
+- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-})
++ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+
+ #ifdef CONFIG_PRINTK
+@@ -247,20 +254,14 @@ do { \
+ } while (0)
+ #else
+ #define dev_dbg_ratelimited(dev, fmt, ...) \
+-do { \
+- if (0) \
+- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-} while (0)
++ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+
+ #ifdef VERBOSE_DEBUG
+ #define dev_vdbg dev_dbg
+ #else
+ #define dev_vdbg(dev, fmt, ...) \
+-({ \
+- if (0) \
+- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-})
++ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+
+ /*
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 56d93a1ffb7b6a..a070160fbcb8e0 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -1007,6 +1007,8 @@ static inline void device_unlock(struct device *dev)
+ mutex_unlock(&dev->mutex);
+ }
+
++DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
++
+ static inline void device_lock_assert(struct device *dev)
+ {
+ lockdep_assert_held(&dev->mutex);
+@@ -1248,6 +1250,7 @@ void device_link_del(struct device_link *link);
+ void device_link_remove(void *consumer, struct device *supplier);
+ void device_links_supplier_sync_state_pause(void);
+ void device_links_supplier_sync_state_resume(void);
++void device_link_wait_removal(void);
+
+ /* Create alias, so I can be autoloaded. */
+ #define MODULE_ALIAS_CHARDEV(major,minor) \
+diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
+index 7595142f3fc57d..7b2968612b7e69 100644
+--- a/include/linux/dm-io.h
++++ b/include/linux/dm-io.h
+@@ -80,7 +80,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
+ * error occurred doing io to the corresponding region.
+ */
+ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+- struct dm_io_region *region, unsigned int long *sync_error_bits);
++ struct dm_io_region *region, unsigned int long *sync_error_bits,
++ unsigned short ioprio);
+
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_DM_IO_H */
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index ebe78bd3d121dd..b3772edca2e6e0 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -498,6 +498,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
+ return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+ }
+
++/**
++ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
++ * @f1: the first fence from the same context
++ * @f2: the second fence from the same context
++ *
++ * Returns true if f1 is chronologically later than f2 or the same fence. Both
++ * fences must be from the same context, since a seqno is not re-used across
++ * contexts.
++ */
++static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
++ struct dma_fence *f2)
++{
++ return f1 == f2 || dma_fence_is_later(f1, f2);
++}
++
+ /**
+ * dma_fence_later - return the chronologically later fence
+ * @f1: the first fence from the same context
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index c3656e59021310..cff3dba6582094 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -955,7 +955,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
+
+ static inline bool is_slave_direction(enum dma_transfer_direction direction)
+ {
+- return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
++ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
++ (direction == DMA_DEV_TO_DEV);
+ }
+
+ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
+diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
+index dca2969015d802..6fbfbde68a37c3 100644
+--- a/include/linux/dsa/ocelot.h
++++ b/include/linux/dsa/ocelot.h
+@@ -5,6 +5,8 @@
+ #ifndef _NET_DSA_TAG_OCELOT_H
+ #define _NET_DSA_TAG_OCELOT_H
+
++#include <linux/if_bridge.h>
++#include <linux/if_vlan.h>
+ #include <linux/kthread.h>
+ #include <linux/packing.h>
+ #include <linux/skbuff.h>
+@@ -273,4 +275,49 @@ static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+ return rew_op;
+ }
+
++/**
++ * ocelot_xmit_get_vlan_info: Determine VLAN_TCI and TAG_TYPE for injected frame
++ * @skb: Pointer to socket buffer
++ * @br: Pointer to bridge device that the port is under, if any
++ * @vlan_tci:
++ * @tag_type:
++ *
++ * If the port is under a VLAN-aware bridge, remove the VLAN header from the
++ * payload and move it into the DSA tag, which will make the switch classify
++ * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
++ * which is the pvid of standalone ports (OCELOT_STANDALONE_PVID), although not
++ * of VLAN-unaware bridge ports (that would be ocelot_vlan_unaware_pvid()).
++ * Anyway, VID 0 is fine because it is stripped on egress for these port modes,
++ * and source address learning is not performed for packets injected from the
++ * CPU anyway, so it doesn't matter that the VID is "wrong".
++ */
++static inline void ocelot_xmit_get_vlan_info(struct sk_buff *skb,
++ struct net_device *br,
++ u64 *vlan_tci, u64 *tag_type)
++{
++ struct vlan_ethhdr *hdr;
++ u16 proto, tci;
++
++ if (!br || !br_vlan_enabled(br)) {
++ *vlan_tci = 0;
++ *tag_type = IFH_TAG_TYPE_C;
++ return;
++ }
++
++ hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
++ br_vlan_get_proto(br, &proto);
++
++ if (ntohs(hdr->h_vlan_proto) == proto) {
++ vlan_remove_tag(skb, &tci);
++ *vlan_tci = tci;
++ } else {
++ rcu_read_lock();
++ br_vlan_get_pvid_rcu(br, &tci);
++ rcu_read_unlock();
++ *vlan_tci = tci;
++ }
++
++ *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
++}
++
+ #endif
+diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
+index 224645f17c333b..297231854ada51 100644
+--- a/include/linux/etherdevice.h
++++ b/include/linux/etherdevice.h
+@@ -607,6 +607,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ eth_hw_addr_set(dev, addr);
+ }
+
++/**
++ * eth_skb_pkt_type - Assign packet type if destination address does not match
++ * @skb: Assigned a packet type if address does not match @dev address
++ * @dev: Network device used to compare packet address against
++ *
++ * If the destination MAC address of the packet does not match the network
++ * device address, assign an appropriate packet type.
++ */
++static inline void eth_skb_pkt_type(struct sk_buff *skb,
++ const struct net_device *dev)
++{
++ const struct ethhdr *eth = eth_hdr(skb);
++
++ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
++ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
++ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
++ skb->pkt_type = PACKET_BROADCAST;
++ else
++ skb->pkt_type = PACKET_MULTICAST;
++ } else {
++ skb->pkt_type = PACKET_OTHERHOST;
++ }
++ }
++}
++
+ /**
+ * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * @skb: Buffer to pad
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 62b61527bcc4f6..1b523fd48586f6 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -1045,10 +1045,10 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+
+ /**
+ * ethtool_sprintf - Write formatted string to ethtool string data
+- * @data: Pointer to start of string to update
++ * @data: Pointer to a pointer to the start of string to update
+ * @fmt: Format of string to write
+ *
+- * Write formatted string to data. Update data to point at start of
++ * Write formatted string to *data. Update *data to point at start of
+ * next string.
+ */
+ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+diff --git a/include/linux/evm.h b/include/linux/evm.h
+index 01fc495a83e278..36ec884320d9f5 100644
+--- a/include/linux/evm.h
++++ b/include/linux/evm.h
+@@ -31,6 +31,7 @@ extern void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len);
++extern int evm_inode_copy_up_xattr(const char *name);
+ extern int evm_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *xattr_name);
+ extern void evm_inode_post_removexattr(struct dentry *dentry,
+@@ -117,6 +118,11 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry,
+ return;
+ }
+
++static inline int evm_inode_copy_up_xattr(const char *name)
++{
++ return 0;
++}
++
+ static inline int evm_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *xattr_name)
+diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
+index 45fca09b231943..5280194777340d 100644
+--- a/include/linux/export-internal.h
++++ b/include/linux/export-internal.h
+@@ -16,10 +16,13 @@
+ * and eliminates the need for absolute relocations that require runtime
+ * processing on relocatable kernels.
+ */
++#define __KSYM_ALIGN ".balign 4"
+ #define __KSYM_REF(sym) ".long " #sym "- ."
+ #elif defined(CONFIG_64BIT)
++#define __KSYM_ALIGN ".balign 8"
+ #define __KSYM_REF(sym) ".quad " #sym
+ #else
++#define __KSYM_ALIGN ".balign 4"
+ #define __KSYM_REF(sym) ".long " #sym
+ #endif
+
+@@ -42,7 +45,7 @@
+ " .asciz \"" ns "\"" "\n" \
+ " .previous" "\n" \
+ " .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \
+- " .balign 4" "\n" \
++ __KSYM_ALIGN "\n" \
+ "__ksymtab_" #name ":" "\n" \
+ __KSYM_REF(sym) "\n" \
+ __KSYM_REF(__kstrtab_ ##name) "\n" \
+@@ -63,6 +66,7 @@
+
+ #define SYMBOL_CRC(sym, crc, sec) \
+ asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
++ ".balign 4" "\n" \
+ "__crc_" #sym ":" "\n" \
+ ".long " #crc "\n" \
+ ".previous" "\n")
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index a82a4bb6ce68bf..b9affa64b7fa22 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -27,6 +27,7 @@
+
+ #define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
+ #define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
++#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
+
+ /* 0, 1(node nid), 2(meta nid) are reserved node id */
+ #define F2FS_RESERVED_NODE_NUM 3
+@@ -40,12 +41,6 @@
+
+ #define F2FS_ENC_UTF8_12_1 1
+
+-#define F2FS_IO_SIZE(sbi) BIT(F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
+-#define F2FS_IO_SIZE_KB(sbi) BIT(F2FS_OPTION(sbi).write_io_size_bits + 2) /* KB */
+-#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
+-#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
+-#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1)
+-
+ /* This flag is used by node and meta inodes, and by recovery */
+ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
+
+@@ -81,6 +76,7 @@ enum stop_cp_reason {
+ STOP_CP_REASON_CORRUPTED_SUMMARY,
+ STOP_CP_REASON_UPDATE_INODE,
+ STOP_CP_REASON_FLUSH_FAIL,
++ STOP_CP_REASON_NO_SEGMENT,
+ STOP_CP_REASON_MAX,
+ };
+
+@@ -104,6 +100,7 @@ enum f2fs_error {
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_INVALID_NODE_REFERENCE,
++ ERROR_INCONSISTENT_NAT,
+ ERROR_MAX,
+ };
+
+@@ -265,7 +262,7 @@ struct f2fs_extent {
+ #define F2FS_INLINE_DATA 0x02 /* file inline data flag */
+ #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
+ #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
+-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
++#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */
+ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
+ #define F2FS_PIN_FILE 0x40 /* file should not be gced */
+ #define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index c14576458228a3..322b4d20afa558 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -690,6 +690,10 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
++#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \
++ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
++ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
++
+ /*
+ * Initializes struct fb_ops for deferred I/O.
+ */
+diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
+index e066816f3519fb..fcff696860b8fe 100644
+--- a/include/linux/fdtable.h
++++ b/include/linux/fdtable.h
+@@ -22,7 +22,6 @@
+ * as this is the granularity returned by copy_fdset().
+ */
+ #define NR_OPEN_DEFAULT BITS_PER_LONG
+-#define NR_OPEN_MAX ~0U
+
+ struct fdtable {
+ unsigned int max_fds;
+@@ -117,7 +116,10 @@ struct task_struct;
+
+ void put_files_struct(struct files_struct *fs);
+ int unshare_files(void);
+-struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
++struct fd_range {
++ unsigned int from, to;
++};
++struct files_struct *dup_fd(struct files_struct *, struct fd_range *) __latent_entropy;
+ void do_close_on_exec(struct files_struct *);
+ int iterate_fd(struct files_struct *, unsigned,
+ int (*)(const void *, struct file *, unsigned),
+@@ -126,8 +128,6 @@ int iterate_fd(struct files_struct *, unsigned,
+ extern int close_fd(unsigned int fd);
+ extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
+ extern struct file *close_fd_get_file(unsigned int fd);
+-extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
+- struct files_struct **new_fdp);
+
+ extern struct kmem_cache *files_cachep;
+
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 761af6b3cf2bce..5090e940ba3e46 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -505,24 +505,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
+ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
+ u64, __ur_3, u64, __ur_4, u64, __ur_5)
+
+-#define BPF_CALL_x(x, name, ...) \
++#define BPF_CALL_x(x, attr, name, ...) \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
++ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
++ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ { \
+ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ } \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
+
+-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
+-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
+-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
+-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
+-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
+-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
++#define __NOATTR
++#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
++#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
++
++#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
+
+ #define bpf_ctx_range(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+@@ -1285,6 +1288,7 @@ struct bpf_sock_addr_kern {
+ */
+ u64 tmp_reg;
+ void *t_ctx; /* Attach type specific context. */
++ u32 uaddrlen;
+ };
+
+ struct bpf_sock_ops_kern {
+diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
+index da51a83b28293c..f7e1895367fa1d 100644
+--- a/include/linux/fortify-string.h
++++ b/include/linux/fortify-string.h
+@@ -31,17 +31,30 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
+ __ret; \
+ })
+
+-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
++#if defined(__SANITIZE_ADDRESS__)
++
++#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
++extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
++extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
++extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
++#elif defined(CONFIG_KASAN_GENERIC)
++extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
++extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
++extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
++#else /* CONFIG_KASAN_SW_TAGS */
++extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
++extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
++extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
++#endif
++
+ extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+ extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+-extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+-extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+-extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+ extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+ extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+ extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+ extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
++
+ #else
+
+ #if defined(__SANITIZE_MEMORY__)
+@@ -66,6 +79,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
+ #define __underlying_strlen __builtin_strlen
+ #define __underlying_strncat __builtin_strncat
+ #define __underlying_strncpy __builtin_strncpy
++
+ #endif
+
+ /**
+diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
+index 223da48a6d18b5..94c4edd047e54f 100644
+--- a/include/linux/fpga/fpga-bridge.h
++++ b/include/linux/fpga/fpga-bridge.h
+@@ -45,6 +45,7 @@ struct fpga_bridge_info {
+ * @dev: FPGA bridge device
+ * @mutex: enforces exclusive reference to bridge
+ * @br_ops: pointer to struct of FPGA bridge ops
++ * @br_ops_owner: module containing the br_ops
+ * @info: fpga image specific information
+ * @node: FPGA bridge list node
+ * @priv: low level driver private date
+@@ -54,6 +55,7 @@ struct fpga_bridge {
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to bridge */
+ const struct fpga_bridge_ops *br_ops;
++ struct module *br_ops_owner;
+ struct fpga_image_info *info;
+ struct list_head node;
+ void *priv;
+@@ -79,10 +81,12 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list);
+
++#define fpga_bridge_register(parent, name, br_ops, priv) \
++ __fpga_bridge_register(parent, name, br_ops, priv, THIS_MODULE)
+ struct fpga_bridge *
+-fpga_bridge_register(struct device *parent, const char *name,
+- const struct fpga_bridge_ops *br_ops,
+- void *priv);
++__fpga_bridge_register(struct device *parent, const char *name,
++ const struct fpga_bridge_ops *br_ops, void *priv,
++ struct module *owner);
+ void fpga_bridge_unregister(struct fpga_bridge *br);
+
+ #endif /* _LINUX_FPGA_BRIDGE_H */
+diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
+index 54f63459efd6e2..0d4fe068f3d8af 100644
+--- a/include/linux/fpga/fpga-mgr.h
++++ b/include/linux/fpga/fpga-mgr.h
+@@ -201,6 +201,7 @@ struct fpga_manager_ops {
+ * @state: state of fpga manager
+ * @compat_id: FPGA manager id for compatibility check.
+ * @mops: pointer to struct of fpga manager ops
++ * @mops_owner: module containing the mops
+ * @priv: low level driver private date
+ */
+ struct fpga_manager {
+@@ -210,6 +211,7 @@ struct fpga_manager {
+ enum fpga_mgr_states state;
+ struct fpga_compat_id *compat_id;
+ const struct fpga_manager_ops *mops;
++ struct module *mops_owner;
+ void *priv;
+ };
+
+@@ -230,18 +232,30 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
+
+ void fpga_mgr_put(struct fpga_manager *mgr);
+
++#define fpga_mgr_register_full(parent, info) \
++ __fpga_mgr_register_full(parent, info, THIS_MODULE)
+ struct fpga_manager *
+-fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
++__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
++ struct module *owner);
+
++#define fpga_mgr_register(parent, name, mops, priv) \
++ __fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
+ struct fpga_manager *
+-fpga_mgr_register(struct device *parent, const char *name,
+- const struct fpga_manager_ops *mops, void *priv);
++__fpga_mgr_register(struct device *parent, const char *name,
++ const struct fpga_manager_ops *mops, void *priv, struct module *owner);
++
+ void fpga_mgr_unregister(struct fpga_manager *mgr);
+
++#define devm_fpga_mgr_register_full(parent, info) \
++ __devm_fpga_mgr_register_full(parent, info, THIS_MODULE)
+ struct fpga_manager *
+-devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
++__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
++ struct module *owner);
++#define devm_fpga_mgr_register(parent, name, mops, priv) \
++ __devm_fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
+ struct fpga_manager *
+-devm_fpga_mgr_register(struct device *parent, const char *name,
+- const struct fpga_manager_ops *mops, void *priv);
++__devm_fpga_mgr_register(struct device *parent, const char *name,
++ const struct fpga_manager_ops *mops, void *priv,
++ struct module *owner);
+
+ #endif /*_LINUX_FPGA_MGR_H */
+diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
+index 9d4d32909340ab..5fbc05fe70a6b7 100644
+--- a/include/linux/fpga/fpga-region.h
++++ b/include/linux/fpga/fpga-region.h
+@@ -36,6 +36,7 @@ struct fpga_region_info {
+ * @mgr: FPGA manager
+ * @info: FPGA image info
+ * @compat_id: FPGA region id for compatibility check.
++ * @ops_owner: module containing the get_bridges function
+ * @priv: private data
+ * @get_bridges: optional function to get bridges to a list
+ */
+@@ -46,6 +47,7 @@ struct fpga_region {
+ struct fpga_manager *mgr;
+ struct fpga_image_info *info;
+ struct fpga_compat_id *compat_id;
++ struct module *ops_owner;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+ };
+@@ -58,12 +60,17 @@ fpga_region_class_find(struct device *start, const void *data,
+
+ int fpga_region_program_fpga(struct fpga_region *region);
+
++#define fpga_region_register_full(parent, info) \
++ __fpga_region_register_full(parent, info, THIS_MODULE)
+ struct fpga_region *
+-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info);
++__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
++ struct module *owner);
+
++#define fpga_region_register(parent, mgr, get_bridges) \
++ __fpga_region_register(parent, mgr, get_bridges, THIS_MODULE)
+ struct fpga_region *
+-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+- int (*get_bridges)(struct fpga_region *));
++__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
++ int (*get_bridges)(struct fpga_region *), struct module *owner);
+ void fpga_region_unregister(struct fpga_region *region);
+
+ #endif /* _FPGA_REGION_H */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 4a40823c3c6784..6c3d86532e3f91 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -352,6 +352,8 @@ enum rw_hint {
+ * unrelated IO (like cache flushing, new IO generation, etc).
+ */
+ #define IOCB_DIO_CALLER_COMP (1 << 22)
++/* kiocb is a read or write operation submitted by fs/aio.c. */
++#define IOCB_AIO_RW (1 << 23)
+
+ /* for use in trace events */
+ #define TRACE_IOCB_STRINGS \
+@@ -640,6 +642,7 @@ struct inode {
+ umode_t i_mode;
+ unsigned short i_opflags;
+ kuid_t i_uid;
++ struct list_head i_lru; /* inode LRU list */
+ kgid_t i_gid;
+ unsigned int i_flags;
+
+@@ -701,7 +704,6 @@ struct inode {
+ u16 i_wb_frn_avg_time;
+ u16 i_wb_frn_history;
+ #endif
+- struct list_head i_lru; /* inode LRU list */
+ struct list_head i_sb_list;
+ struct list_head i_wb_list; /* backing dev writeback list */
+ union {
+@@ -1034,7 +1036,7 @@ struct file_handle {
+ __u32 handle_bytes;
+ int handle_type;
+ /* file identifier */
+- unsigned char f_handle[];
++ unsigned char f_handle[] __counted_by(handle_bytes);
+ };
+
+ static inline struct file *get_file(struct file *f)
+@@ -1221,6 +1223,7 @@ struct super_block {
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
+ struct list_head s_mounts; /* list of mounts; _not_ for fs use */
+ struct block_device *s_bdev;
++ struct bdev_handle *s_bdev_handle;
+ struct backing_dev_info *s_bdi;
+ struct mtd_info *s_mtd;
+ struct hlist_node s_instances;
+@@ -1511,24 +1514,81 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
+ struct timespec64 current_time(struct inode *inode);
+ struct timespec64 inode_set_ctime_current(struct inode *inode);
+
+-/**
+- * inode_get_ctime - fetch the current ctime from the inode
+- * @inode: inode from which to fetch ctime
+- *
+- * Grab the current ctime from the inode and return it.
+- */
++static inline time64_t inode_get_atime_sec(const struct inode *inode)
++{
++ return inode->i_atime.tv_sec;
++}
++
++static inline long inode_get_atime_nsec(const struct inode *inode)
++{
++ return inode->i_atime.tv_nsec;
++}
++
++static inline struct timespec64 inode_get_atime(const struct inode *inode)
++{
++ return inode->i_atime;
++}
++
++static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
++ struct timespec64 ts)
++{
++ inode->i_atime = ts;
++ return ts;
++}
++
++static inline struct timespec64 inode_set_atime(struct inode *inode,
++ time64_t sec, long nsec)
++{
++ struct timespec64 ts = { .tv_sec = sec,
++ .tv_nsec = nsec };
++ return inode_set_atime_to_ts(inode, ts);
++}
++
++static inline time64_t inode_get_mtime_sec(const struct inode *inode)
++{
++ return inode->i_mtime.tv_sec;
++}
++
++static inline long inode_get_mtime_nsec(const struct inode *inode)
++{
++ return inode->i_mtime.tv_nsec;
++}
++
++static inline struct timespec64 inode_get_mtime(const struct inode *inode)
++{
++ return inode->i_mtime;
++}
++
++static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
++ struct timespec64 ts)
++{
++ inode->i_mtime = ts;
++ return ts;
++}
++
++static inline struct timespec64 inode_set_mtime(struct inode *inode,
++ time64_t sec, long nsec)
++{
++ struct timespec64 ts = { .tv_sec = sec,
++ .tv_nsec = nsec };
++ return inode_set_mtime_to_ts(inode, ts);
++}
++
++static inline time64_t inode_get_ctime_sec(const struct inode *inode)
++{
++ return inode->__i_ctime.tv_sec;
++}
++
++static inline long inode_get_ctime_nsec(const struct inode *inode)
++{
++ return inode->__i_ctime.tv_nsec;
++}
++
+ static inline struct timespec64 inode_get_ctime(const struct inode *inode)
+ {
+ return inode->__i_ctime;
+ }
+
+-/**
+- * inode_set_ctime_to_ts - set the ctime in the inode
+- * @inode: inode in which to set the ctime
+- * @ts: value to set in the ctime field
+- *
+- * Set the ctime in @inode to @ts
+- */
+ static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+ {
+@@ -1553,6 +1613,8 @@ static inline struct timespec64 inode_set_ctime(struct inode *inode,
+ return inode_set_ctime_to_ts(inode, ts);
+ }
+
++struct timespec64 simple_inode_init_ts(struct inode *inode);
++
+ /*
+ * Snapshotting support.
+ */
+@@ -2018,7 +2080,7 @@ struct super_operations {
+ #ifdef CONFIG_QUOTA
+ ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
+ ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
+- struct dquot **(*get_dquots)(struct inode *);
++ struct dquot __rcu **(*get_dquots)(struct inode *);
+ #endif
+ long (*nr_cached_objects)(struct super_block *,
+ struct shrink_control *);
+@@ -2203,6 +2265,9 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+ *
+ * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
+ *
++ * I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding
++ * i_count.
++ *
+ * Q: What is the difference between I_WILL_FREE and I_FREEING?
+ */
+ #define I_DIRTY_SYNC (1 << 0)
+@@ -2226,6 +2291,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+ #define I_DONTCACHE (1 << 16)
+ #define I_SYNC_QUEUED (1 << 17)
+ #define I_PINNING_FSCACHE_WB (1 << 18)
++#define __I_LRU_ISOLATING 19
++#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING)
+
+ #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+ #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+@@ -2777,6 +2844,17 @@ extern bool path_is_under(const struct path *, const struct path *);
+
+ extern char *file_path(struct file *, char *, int);
+
++/**
++ * is_dot_dotdot - returns true only if @name is "." or ".."
++ * @name: file name to check
++ * @len: length of file name, in bytes
++ */
++static inline bool is_dot_dotdot(const char *name, size_t len)
++{
++ return len && unlikely(name[0] == '.') &&
++ (len == 1 || (len == 2 && name[1] == '.'));
++}
++
+ #include <linux/err.h>
+
+ /* needed for stackable file system support */
+@@ -3134,6 +3212,15 @@ extern int generic_check_addressable(unsigned, u64);
+
+ extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
+
++static inline bool sb_has_encoding(const struct super_block *sb)
++{
++#if IS_ENABLED(CONFIG_UNICODE)
++ return !!sb->s_encoding;
++#else
++ return false;
++#endif
++}
++
+ int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ unsigned int ia_valid);
+ int setattr_prepare(struct mnt_idmap *, struct dentry *, struct iattr *);
+diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
+index a174cedf4d9072..35e86d2f2887bb 100644
+--- a/include/linux/fscache-cache.h
++++ b/include/linux/fscache-cache.h
+@@ -19,6 +19,7 @@
+ enum fscache_cache_trace;
+ enum fscache_cookie_trace;
+ enum fscache_access_trace;
++enum fscache_volume_trace;
+
+ enum fscache_cache_state {
+ FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
+@@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
+
+ extern void fscache_io_error(struct fscache_cache *cache);
+
++extern struct fscache_volume *
++fscache_try_get_volume(struct fscache_volume *volume,
++ enum fscache_volume_trace where);
++extern void fscache_put_volume(struct fscache_volume *volume,
++ enum fscache_volume_trace where);
+ extern void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
+index df25fffdc0ae71..623ccfcbf39c35 100644
+--- a/include/linux/fsl/enetc_mdio.h
++++ b/include/linux/fsl/enetc_mdio.h
+@@ -59,7 +59,8 @@ static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
+ static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum, u16 value)
+ { return -EINVAL; }
+-struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
++static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
++ void __iomem *port_regs)
+ { return ERR_PTR(-EINVAL); }
+
+ #endif
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index ed48e4f1e755fb..0dea8d0fdb0b8c 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -93,7 +93,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
+ {
+ const struct path *path;
+
+- if (file->f_mode & FMODE_NONOTIFY)
++ /*
++ * FMODE_NONOTIFY are fds generated by fanotify itself which should not
++ * generate new events. We also don't want to generate events for
++ * FMODE_PATH fds (involves open & close events) as they are just
++ * handle creation / destruction events and not "real" file events.
++ */
++ if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
+ return 0;
+
+ /* Overlayfs internal files have fake f_path */
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index c0892d75ce3339..575415b5134972 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -563,12 +563,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
+
+ static inline int fsnotify_inode_watches_children(struct inode *inode)
+ {
++ __u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
++
+ /* FS_EVENT_ON_CHILD is set if the inode may care */
+- if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
++ if (!(parent_mask & FS_EVENT_ON_CHILD))
+ return 0;
+ /* this inode might care about child events, does it care about the
+ * specific set of events that can happen on a child? */
+- return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
++ return parent_mask & FS_EVENTS_POSS_ON_CHILD;
+ }
+
+ /*
+@@ -582,7 +584,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
+ /*
+ * Serialisation of setting PARENT_WATCHED on the dentries is provided
+ * by d_lock. If inotify_inode_watched changes after we have taken
+- * d_lock, the following __fsnotify_update_child_dentry_flags call will
++ * d_lock, the following fsnotify_set_children_dentry_flags call will
+ * find our entry, so it will spin until we complete here, and update
+ * us with the new state.
+ */
+diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
+index 107613f7d79202..f6cd0f909d9fb9 100644
+--- a/include/linux/generic-radix-tree.h
++++ b/include/linux/generic-radix-tree.h
+@@ -38,6 +38,7 @@
+
+ #include <asm/page.h>
+ #include <linux/bug.h>
++#include <linux/limits.h>
+ #include <linux/log2.h>
+ #include <linux/math.h>
+ #include <linux/types.h>
+@@ -184,6 +185,12 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
+ static inline void __genradix_iter_advance(struct genradix_iter *iter,
+ size_t obj_size)
+ {
++ if (iter->offset + obj_size < iter->offset) {
++ iter->offset = SIZE_MAX;
++ iter->pos = SIZE_MAX;
++ return;
++ }
++
+ iter->offset += obj_size;
+
+ if (!is_power_of_2(obj_size) &&
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 665f06675c834e..a0803ed4b469a5 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -343,6 +343,15 @@ static inline bool gfp_has_io_fs(gfp_t gfp)
+ return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
+ }
+
++/*
++ * Check if the gfp flags allow compaction - GFP_NOIO is a really
++ * tricky context because the migration might require IO.
++ */
++static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
++{
++ return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
++}
++
+ extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
+
+ #ifdef CONFIG_CONTIG_ALLOC
+diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
+index 6583a58670c571..dfde1e1e321c38 100644
+--- a/include/linux/gfp_types.h
++++ b/include/linux/gfp_types.h
+@@ -2,6 +2,8 @@
+ #ifndef __LINUX_GFP_TYPES_H
+ #define __LINUX_GFP_TYPES_H
+
++#include <linux/bits.h>
++
+ /* The typedef is in types.h but we want the documentation here */
+ #if 0
+ /**
+diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
+index 4f0c5d62c8f38b..d6e38a500833f5 100644
+--- a/include/linux/gpio/driver.h
++++ b/include/linux/gpio/driver.h
+@@ -607,6 +607,12 @@ extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip
+ extern struct gpio_chip *gpiochip_find(void *data,
+ int (*match)(struct gpio_chip *gc, void *data));
+
++struct gpio_device *gpio_device_find(void *data,
++ int (*match)(struct gpio_chip *gc, void *data));
++
++struct gpio_device *gpio_device_get(struct gpio_device *gdev);
++void gpio_device_put(struct gpio_device *gdev);
++
+ bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
+ int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
+ void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset);
+diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
+index 6c75c8bd44a0bb..1a14e239221f7e 100644
+--- a/include/linux/gpio/property.h
++++ b/include/linux/gpio/property.h
+@@ -2,7 +2,6 @@
+ #ifndef __LINUX_GPIO_PROPERTY_H
+ #define __LINUX_GPIO_PROPERTY_H
+
+-#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
+ #include <linux/property.h>
+
+ #define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 964ca1f15e3f6a..3b08a295722983 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -679,6 +679,7 @@ struct hid_device { /* device report descriptor */
+ struct list_head debug_list;
+ spinlock_t debug_list_lock;
+ wait_queue_head_t debug_wait;
++ struct kref ref;
+
+ unsigned int id; /* system unique id */
+
+@@ -687,6 +688,8 @@ struct hid_device { /* device report descriptor */
+ #endif /* CONFIG_BPF */
+ };
+
++void hiddev_free(struct kref *ref);
++
+ #define to_hid_device(pdev) \
+ container_of(pdev, struct hid_device, dev)
+
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 99c474de800ddc..75607d4ba26cb7 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -454,7 +454,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
+ memcpy(to, from, chunk);
+ kunmap_local(from);
+
+- from += chunk;
++ to += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
+index 39fbfb4be944bb..5c4b3a68053f51 100644
+--- a/include/linux/hisi_acc_qm.h
++++ b/include/linux/hisi_acc_qm.h
+@@ -144,6 +144,13 @@ enum qm_vf_state {
+ QM_NOT_READY,
+ };
+
++enum qm_misc_ctl_bits {
++ QM_DRIVER_REMOVING = 0x0,
++ QM_RST_SCHED,
++ QM_RESETTING,
++ QM_MODULE_PARAM,
++};
++
+ enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+@@ -153,6 +160,11 @@ enum qm_cap_bits {
+ QM_SUPPORT_RPM,
+ };
+
++struct qm_dev_alg {
++ u64 alg_msk;
++ const char *alg;
++};
++
+ struct dfx_diff_registers {
+ u32 *regs;
+ u32 reg_offset;
+@@ -258,6 +270,16 @@ struct hisi_qm_cap_info {
+ u32 v3_val;
+ };
+
++struct hisi_qm_cap_record {
++ u32 type;
++ u32 cap_val;
++};
++
++struct hisi_qm_cap_tables {
++ struct hisi_qm_cap_record *qm_cap_table;
++ struct hisi_qm_cap_record *dev_cap_table;
++};
++
+ struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+@@ -269,6 +291,7 @@ struct hisi_qm_poll_data {
+ struct hisi_qm *qm;
+ struct work_struct work;
+ u16 *qp_finish_id;
++ u16 eqe_num;
+ };
+
+ /**
+@@ -344,7 +367,6 @@ struct hisi_qm {
+ struct work_struct rst_work;
+ struct work_struct cmd_process;
+
+- const char *algs;
+ bool use_sva;
+
+ resource_size_t phys_base;
+@@ -355,6 +377,8 @@ struct hisi_qm {
+ u32 mb_qos;
+ u32 type_rate;
+ struct qm_err_isolate isolate_data;
++
++ struct hisi_qm_cap_tables cap_tables;
+ };
+
+ struct hisi_qp_status {
+@@ -528,6 +552,8 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+ u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read);
++int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
++ u32 dev_algs_size);
+
+ /* Used by VFIO ACC live migration driver */
+ struct pci_driver *hisi_sec_get_pf_driver(void);
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index 0ee140176f102f..254d4a898179c0 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -197,6 +197,7 @@ enum hrtimer_base_type {
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+ * expired
++ * @online: CPU is online from an hrtimers point of view
+ * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
+ * callback to finish.
+ * @expires_next: absolute time of the next event, is required for remote
+@@ -219,7 +220,8 @@ struct hrtimer_cpu_base {
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+- softirq_activated : 1;
++ softirq_activated : 1,
++ online : 1;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int nr_events;
+ unsigned short nr_retries;
+@@ -531,9 +533,9 @@ extern void sysrq_timer_list_show(void);
+
+ int hrtimers_prepare_cpu(unsigned int cpu);
+ #ifdef CONFIG_HOTPLUG_CPU
+-int hrtimers_dead_cpu(unsigned int cpu);
++int hrtimers_cpu_dying(unsigned int cpu);
+ #else
+-#define hrtimers_dead_cpu NULL
++#define hrtimers_cpu_dying NULL
+ #endif
+
+ #endif
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 47d25a5e1933d0..0c50c4fceb95dd 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -713,6 +713,7 @@ HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+ /* Defines one hugetlb page size */
+ struct hstate {
+ struct mutex resize_lock;
++ struct lock_class_key resize_key;
+ int next_nid_to_alloc;
+ int next_nid_to_free;
+ unsigned int order;
+@@ -1265,10 +1266,7 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+ return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
+ }
+
+-static inline bool __vma_private_lock(struct vm_area_struct *vma)
+-{
+- return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+-}
++bool __vma_private_lock(struct vm_area_struct *vma);
+
+ /*
+ * Safe version of huge_pte_offset() to check the locks. See comments
+diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
+index 8a3115516a1ba9..136e9842120e88 100644
+--- a/include/linux/hw_random.h
++++ b/include/linux/hw_random.h
+@@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng);
+ extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
+
+ extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
++extern long hwrng_yield(struct hwrng *rng);
+
+ #endif /* LINUX_HWRANDOM_H_ */
+diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
+index bfe7c1f1ac6d1c..f0231dbc477710 100644
+--- a/include/linux/hwspinlock.h
++++ b/include/linux/hwspinlock.h
+@@ -68,6 +68,7 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
+ int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
+ void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+ int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
+ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
+ struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
+ struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+@@ -127,6 +128,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ {
+ }
+
++static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
++{
++ return 0;
++}
++
+ static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+ {
+ return 0;
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 2b00faf98017cc..96ceb4095425eb 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -164,8 +164,28 @@ struct hv_ring_buffer {
+ u8 buffer[];
+ } __packed;
+
++
++/*
++ * If the requested ring buffer size is at least 8 times the size of the
++ * header, steal space from the ring buffer for the header. Otherwise, add
++ * space for the header so that is doesn't take too much of the ring buffer
++ * space.
++ *
++ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
++ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
++ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
++ * large allocation that will be almost half wasted. As a contrasting example,
++ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
++ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
++ * In this latter case, we must add 64 Kbytes for the header and not worry
++ * about what's wasted.
++ */
++#define VMBUS_HEADER_ADJ(payload_sz) \
++ ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \
++ 0 : sizeof(struct hv_ring_buffer))
++
+ /* Calculate the proper size of a ringbuffer, it must be page-aligned */
+-#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
++#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
+ (payload_sz))
+
+ struct hv_ring_buffer_info {
+@@ -812,6 +832,7 @@ struct vmbus_gpadl {
+ u32 gpadl_handle;
+ u32 size;
+ void *buffer;
++ bool decrypted;
+ };
+
+ struct vmbus_channel {
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 0dae9db275380b..a3166100f0cce2 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -746,6 +746,11 @@ struct i2c_adapter {
+
+ struct irq_domain *host_notify_domain;
+ struct regulator *bus_regulator;
++
++ struct dentry *debugfs;
++
++ /* 7bit address space */
++ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7);
+ };
+ #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
+
+@@ -1033,7 +1038,7 @@ static inline int of_i2c_get_board_info(struct device *dev,
+ struct acpi_resource;
+ struct acpi_resource_i2c_serialbus;
+
+-#if IS_ENABLED(CONFIG_ACPI)
++#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
+ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c);
+ int i2c_acpi_client_count(struct acpi_device *adev);
+diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
+index 90fa83464f0039..ef6217da8253bb 100644
+--- a/include/linux/i3c/device.h
++++ b/include/linux/i3c/device.h
+@@ -54,6 +54,7 @@ enum i3c_hdr_mode {
+ * struct i3c_priv_xfer - I3C SDR private transfer
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ * @len: transfer length in bytes of the transfer
++ * @actual_len: actual length in bytes are transferred by the controller
+ * @data: input/output buffer
+ * @data.in: input buffer. Must point to a DMA-able buffer
+ * @data.out: output buffer. Must point to a DMA-able buffer
+@@ -62,6 +63,7 @@ enum i3c_hdr_mode {
+ struct i3c_priv_xfer {
+ u8 rnw;
+ u16 len;
++ u16 actual_len;
+ union {
+ void *in;
+ const void *out;
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index a0dce14090a9e1..da5f5fa4a3a6ae 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
+ */
+ #define idr_for_each_entry_ul(idr, entry, tmp, id) \
+ for (tmp = 0, id = 0; \
+- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ tmp = id, ++id)
+
+ /**
+@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
+ * @id: Entry ID.
+ *
+ * Continue to iterate over entries, continuing after the current position.
++ * After normal termination @entry is left with the value NULL. This
++ * is convenient for a "not found" value.
+ */
+ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
+ for (tmp = id; \
+- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
++ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
+ tmp = id, ++id)
+
+ /*
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index b24fb80782c5a1..5f1e5a16d7b2c3 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -951,17 +951,24 @@ struct ieee80211_wide_bw_chansw_ie {
+ * @dtim_count: DTIM Count
+ * @dtim_period: DTIM Period
+ * @bitmap_ctrl: Bitmap Control
++ * @required_octet: "Syntatic sugar" to force the struct size to the
++ * minimum valid size when carried in a non-S1G PPDU
+ * @virtual_map: Partial Virtual Bitmap
+ *
+ * This structure represents the payload of the "TIM element" as
+- * described in IEEE Std 802.11-2020 section 9.4.2.5.
++ * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this
++ * definition is only applicable when the element is carried in a
++ * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap
++ * Control and Partial Virtual Bitmap may not be present.
+ */
+ struct ieee80211_tim_ie {
+ u8 dtim_count;
+ u8 dtim_period;
+ u8 bitmap_ctrl;
+- /* variable size: 1 - 251 bytes */
+- u8 virtual_map[1];
++ union {
++ u8 required_octet;
++ DECLARE_FLEX_ARRAY(u8, virtual_map);
++ };
+ } __packed;
+
+ /**
+@@ -2790,12 +2797,14 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
+ static inline const struct ieee80211_he_6ghz_oper *
+ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
+ {
+- const u8 *ret = (const void *)&he_oper->optional;
++ const u8 *ret;
+ u32 he_oper_params;
+
+ if (!he_oper)
+ return NULL;
+
++ ret = (const void *)&he_oper->optional;
++
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+
+ if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
+@@ -4381,7 +4390,8 @@ ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+- action != WLAN_PUB_ACTION_FILS_DISCOVERY;
++ action != WLAN_PUB_ACTION_FILS_DISCOVERY &&
++ action != WLAN_PUB_ACTION_VENDOR_SPECIFIC;
+ }
+
+ /**
+@@ -4942,7 +4952,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+ bool check_common_len = false;
+ u16 control;
+
+- if (len < fixed)
++ if (!data || len < fixed)
+ return false;
+
+ control = le16_to_cpu(mle->control);
+@@ -5078,7 +5088,7 @@ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
+ info_len += 1;
+
+ return prof->sta_info_len >= info_len &&
+- fixed + prof->sta_info_len <= len;
++ fixed + prof->sta_info_len - 1 <= len;
+ }
+
+ /**
+diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
+index 7852f6c9a714c6..719cf9cc6e1ac4 100644
+--- a/include/linux/iio/adc/ad_sigma_delta.h
++++ b/include/linux/iio/adc/ad_sigma_delta.h
+@@ -8,6 +8,8 @@
+ #ifndef __AD_SIGMA_DELTA_H__
+ #define __AD_SIGMA_DELTA_H__
+
++#include <linux/iio/iio.h>
++
+ enum ad_sigma_delta_mode {
+ AD_SD_MODE_CONTINUOUS = 0,
+ AD_SD_MODE_SINGLE = 1,
+@@ -99,7 +101,7 @@ struct ad_sigma_delta {
+ * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
+ * rounded to 16 bytes to take into account padding.
+ */
+- uint8_t tx_buf[4] ____cacheline_aligned;
++ uint8_t tx_buf[4] __aligned(IIO_DMA_MINALIGN);
+ uint8_t rx_buf[16] __aligned(8);
+ };
+
+diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
+deleted file mode 100644
+index 52620e5b80522e..00000000000000
+--- a/include/linux/iio/adc/adi-axi-adc.h
++++ /dev/null
+@@ -1,64 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Analog Devices Generic AXI ADC IP core driver/library
+- * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+- *
+- * Copyright 2012-2020 Analog Devices Inc.
+- */
+-#ifndef __ADI_AXI_ADC_H__
+-#define __ADI_AXI_ADC_H__
+-
+-struct device;
+-struct iio_chan_spec;
+-
+-/**
+- * struct adi_axi_adc_chip_info - Chip specific information
+- * @name Chip name
+- * @id Chip ID (usually product ID)
+- * @channels Channel specifications of type @struct iio_chan_spec
+- * @num_channels Number of @channels
+- * @scale_table Supported scales by the chip; tuples of 2 ints
+- * @num_scales Number of scales in the table
+- * @max_rate Maximum sampling rate supported by the device
+- */
+-struct adi_axi_adc_chip_info {
+- const char *name;
+- unsigned int id;
+-
+- const struct iio_chan_spec *channels;
+- unsigned int num_channels;
+-
+- const unsigned int (*scale_table)[2];
+- int num_scales;
+-
+- unsigned long max_rate;
+-};
+-
+-/**
+- * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC
+- * @chip_info chip info details for the client ADC
+- * @preenable_setup op to run in the client before enabling the AXI ADC
+- * @reg_access IIO debugfs_reg_access hook for the client ADC
+- * @read_raw IIO read_raw hook for the client ADC
+- * @write_raw IIO write_raw hook for the client ADC
+- */
+-struct adi_axi_adc_conv {
+- const struct adi_axi_adc_chip_info *chip_info;
+-
+- int (*preenable_setup)(struct adi_axi_adc_conv *conv);
+- int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg,
+- unsigned int writeval, unsigned int *readval);
+- int (*read_raw)(struct adi_axi_adc_conv *conv,
+- struct iio_chan_spec const *chan,
+- int *val, int *val2, long mask);
+- int (*write_raw)(struct adi_axi_adc_conv *conv,
+- struct iio_chan_spec const *chan,
+- int val, int val2, long mask);
+-};
+-
+-struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+- size_t sizeof_priv);
+-
+-void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv);
+-
+-#endif
+diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
+new file mode 100644
+index 00000000000000..a6d79381866ece
+--- /dev/null
++++ b/include/linux/iio/backend.h
+@@ -0,0 +1,72 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++#ifndef _IIO_BACKEND_H_
++#define _IIO_BACKEND_H_
++
++#include <linux/types.h>
++
++struct fwnode_handle;
++struct iio_backend;
++struct device;
++struct iio_dev;
++
++enum iio_backend_data_type {
++ IIO_BACKEND_TWOS_COMPLEMENT,
++ IIO_BACKEND_OFFSET_BINARY,
++ IIO_BACKEND_DATA_TYPE_MAX
++};
++
++/**
++ * struct iio_backend_data_fmt - Backend data format
++ * @type: Data type.
++ * @sign_extend: Bool to tell if the data is sign extended.
++ * @enable: Enable/Disable the data format module. If disabled,
++ * not formatting will happen.
++ */
++struct iio_backend_data_fmt {
++ enum iio_backend_data_type type;
++ bool sign_extend;
++ bool enable;
++};
++
++/**
++ * struct iio_backend_ops - operations structure for an iio_backend
++ * @enable: Enable backend.
++ * @disable: Disable backend.
++ * @chan_enable: Enable one channel.
++ * @chan_disable: Disable one channel.
++ * @data_format_set: Configure the data format for a specific channel.
++ * @request_buffer: Request an IIO buffer.
++ * @free_buffer: Free an IIO buffer.
++ **/
++struct iio_backend_ops {
++ int (*enable)(struct iio_backend *back);
++ void (*disable)(struct iio_backend *back);
++ int (*chan_enable)(struct iio_backend *back, unsigned int chan);
++ int (*chan_disable)(struct iio_backend *back, unsigned int chan);
++ int (*data_format_set)(struct iio_backend *back, unsigned int chan,
++ const struct iio_backend_data_fmt *data);
++ struct iio_buffer *(*request_buffer)(struct iio_backend *back,
++ struct iio_dev *indio_dev);
++ void (*free_buffer)(struct iio_backend *back,
++ struct iio_buffer *buffer);
++};
++
++int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan);
++int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan);
++int devm_iio_backend_enable(struct device *dev, struct iio_backend *back);
++int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
++ const struct iio_backend_data_fmt *data);
++int devm_iio_backend_request_buffer(struct device *dev,
++ struct iio_backend *back,
++ struct iio_dev *indio_dev);
++
++void *iio_backend_get_priv(const struct iio_backend *conv);
++struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name);
++struct iio_backend *
++__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
++ struct fwnode_handle *fwnode);
++
++int devm_iio_backend_register(struct device *dev,
++ const struct iio_backend_ops *ops, void *priv);
++
++#endif
+diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
+index 5c355be8981493..cbb8ba957fade3 100644
+--- a/include/linux/iio/buffer-dmaengine.h
++++ b/include/linux/iio/buffer-dmaengine.h
+@@ -10,6 +10,9 @@
+ struct iio_dev;
+ struct device;
+
++struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
++ const char *channel);
++void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+ int devm_iio_dmaengine_buffer_setup(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel);
+diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
+index 607c3a89a6471d..f9ae5cdd884f5b 100644
+--- a/include/linux/iio/common/st_sensors.h
++++ b/include/linux/iio/common/st_sensors.h
+@@ -258,9 +258,9 @@ struct st_sensor_data {
+ bool hw_irq_trigger;
+ s64 hw_timestamp;
+
+- char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
+-
+ struct mutex odr_lock;
++
++ char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
+ };
+
+ #ifdef CONFIG_IIO_BUFFER
+diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
+index dc9ea299e0885c..8898966bc0f08c 100644
+--- a/include/linux/iio/imu/adis.h
++++ b/include/linux/iio/imu/adis.h
+@@ -11,6 +11,7 @@
+
+ #include <linux/spi/spi.h>
+ #include <linux/interrupt.h>
++#include <linux/iio/iio.h>
+ #include <linux/iio/types.h>
+
+ #define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
+@@ -131,7 +132,7 @@ struct adis {
+ unsigned long irq_flag;
+ void *buffer;
+
+- u8 tx[10] ____cacheline_aligned;
++ u8 tx[10] __aligned(IIO_DMA_MINALIGN);
+ u8 rx[4];
+ };
+
+diff --git a/include/linux/init.h b/include/linux/init.h
+index 266c3e1640d47a..01b52c9c75268f 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -89,9 +89,6 @@
+ __latent_entropy
+ #define __meminitdata __section(".meminit.data")
+ #define __meminitconst __section(".meminit.rodata")
+-#define __memexit __section(".memexit.text") __exitused __cold notrace
+-#define __memexitdata __section(".memexit.data")
+-#define __memexitconst __section(".memexit.rodata")
+
+ /* For assembly routines */
+ #define __HEAD .section ".head.text","ax"
+diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
+index 33f21bd85dbf27..f3196f82fd8a14 100644
+--- a/include/linux/intel_rapl.h
++++ b/include/linux/intel_rapl.h
+@@ -178,6 +178,12 @@ struct rapl_package {
+ struct rapl_if_priv *priv;
+ };
+
++struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
++ bool id_is_cpu);
++struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv,
++ bool id_is_cpu);
++void rapl_remove_package_cpuslocked(struct rapl_package *rp);
++
+ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu);
+ struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu);
+ void rapl_remove_package(struct rapl_package *rp);
+diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h
+index f422612c28d6b4..8ff8eabb4a987c 100644
+--- a/include/linux/intel_tcc.h
++++ b/include/linux/intel_tcc.h
+@@ -13,6 +13,6 @@
+ int intel_tcc_get_tjmax(int cpu);
+ int intel_tcc_get_offset(int cpu);
+ int intel_tcc_set_offset(int cpu, int offset);
+-int intel_tcc_get_temp(int cpu, bool pkg);
++int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
+
+ #endif /* __INTEL_TCC_H__ */
+diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
+index 04d937ad4dc4f5..ee07393445f9f2 100644
+--- a/include/linux/intel_tpmi.h
++++ b/include/linux/intel_tpmi.h
+@@ -6,6 +6,12 @@
+ #ifndef _INTEL_TPMI_H_
+ #define _INTEL_TPMI_H_
+
++#include <linux/bitfield.h>
++
++#define TPMI_VERSION_INVALID 0xff
++#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
++#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
++
+ /**
+ * struct intel_tpmi_plat_info - Platform information for a TPMI device instance
+ * @package_id: CPU Package id
+diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
+index 106cdc55ff3bd6..f99ff6de926cbf 100644
+--- a/include/linux/io_uring.h
++++ b/include/linux/io_uring.h
+@@ -46,7 +46,6 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd);
+ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+ unsigned issue_flags);
+-struct sock *io_uring_get_socket(struct file *file);
+ void __io_uring_cancel(bool cancel_all);
+ void __io_uring_free(struct task_struct *tsk);
+ void io_uring_unreg_ringfd(void);
+@@ -82,6 +81,7 @@ static inline void io_uring_free(struct task_struct *tsk)
+ __io_uring_free(tsk);
+ }
+ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
++bool io_is_uring_fops(struct file *file);
+ #else
+ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+@@ -100,10 +100,6 @@ static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ {
+ }
+-static inline struct sock *io_uring_get_socket(struct file *file)
+-{
+- return NULL;
+-}
+ static inline void io_uring_task_cancel(void)
+ {
+ }
+@@ -122,6 +118,10 @@ static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
+ {
+ return -EOPNOTSUPP;
+ }
++static inline bool io_is_uring_fops(struct file *file)
++{
++ return false;
++}
+ #endif
+
+ #endif
+diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
+index 13d19b9be9f4a7..8215e193178aa8 100644
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -250,7 +250,6 @@ struct io_ring_ctx {
+
+ struct io_submit_state submit_state;
+
+- struct io_buffer_list *io_bl;
+ struct xarray io_bl_xa;
+
+ struct io_hash_table cancel_table_locked;
+@@ -327,6 +326,9 @@ struct io_ring_ctx {
+
+ struct list_head io_buffers_cache;
+
++ /* deferred free list, protected by ->uring_lock */
++ struct hlist_head io_buf_list;
++
+ /* Keep this last, we don't need it for the fast path */
+ struct wait_queue_head poll_wq;
+ struct io_restriction restrictions;
+@@ -344,9 +346,6 @@ struct io_ring_ctx {
+
+ struct list_head io_buffers_pages;
+
+- #if defined(CONFIG_UNIX)
+- struct socket *ring_sock;
+- #endif
+ /* hashed buffered write serialization */
+ struct io_wq_hash *hash_map;
+
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index c50a769d569a60..b6ef263e85c061 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -703,6 +703,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
+ dev->iommu->priv = priv;
+ }
+
++extern struct mutex iommu_probe_device_lock;
+ int iommu_probe_device(struct device *dev);
+
+ int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
+@@ -1198,7 +1199,7 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle);
+ static inline struct iommu_sva *
+ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+ {
+- return NULL;
++ return ERR_PTR(-ENODEV);
+ }
+
+ static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index d8a6fdce93738c..90081afa10ce52 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -215,8 +215,6 @@ struct irq_data {
+ * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
+ * IRQD_CAN_RESERVE - Can use reservation mode
+- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
+- * required
+ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
+ * from actual interrupt context.
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+@@ -247,11 +245,10 @@ enum {
+ IRQD_SINGLE_TARGET = BIT(24),
+ IRQD_DEFAULT_TRIGGER_SET = BIT(25),
+ IRQD_CAN_RESERVE = BIT(26),
+- IRQD_MSI_NOMASK_QUIRK = BIT(27),
+- IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28),
+- IRQD_AFFINITY_ON_ACTIVATE = BIT(29),
+- IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30),
+- IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31),
++ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
++ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
++ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
++ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
+ };
+
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+@@ -426,21 +423,6 @@ static inline bool irqd_can_reserve(struct irq_data *d)
+ return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+ }
+
+-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
+-{
+- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+-{
+- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+-{
+- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+ static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+ {
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
+index 8cd11a22326055..136f2980cba303 100644
+--- a/include/linux/irq_work.h
++++ b/include/linux/irq_work.h
+@@ -66,6 +66,9 @@ void irq_work_sync(struct irq_work *work);
+ void irq_work_run(void);
+ bool irq_work_needs_cpu(void);
+ void irq_work_single(void *arg);
++
++void arch_irq_work_raise(void);
++
+ #else
+ static inline bool irq_work_needs_cpu(void) { return false; }
+ static inline void irq_work_run(void) { }
+diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
+index 2c63375bbd43f4..bf9e0640288d1d 100644
+--- a/include/linux/irqchip/arm-gic-v4.h
++++ b/include/linux/irqchip/arm-gic-v4.h
+@@ -58,10 +58,12 @@ struct its_vpe {
+ bool enabled;
+ bool group;
+ } sgi_config[16];
+- atomic_t vmapp_count;
+ };
+ };
+
++ /* Track the VPE being mapped */
++ atomic_t vmapp_count;
++
+ /*
+ * Ensures mutual exclusion between affinity setting of the
+ * vPE and vLPI operations using vpe->col_idx.
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 2b665c32f5fe66..2e09c269bf9d8e 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -126,7 +126,7 @@ do { \
+ # define lockdep_softirq_enter() do { } while (0)
+ # define lockdep_softirq_exit() do { } while (0)
+ # define lockdep_hrtimer_enter(__hrtimer) false
+-# define lockdep_hrtimer_exit(__context) do { } while (0)
++# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
+ # define lockdep_posixtimer_enter() do { } while (0)
+ # define lockdep_posixtimer_exit() do { } while (0)
+ # define lockdep_irq_work_enter(__work) do { } while (0)
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 52772c826c8682..f0bc9aa5aed3f6 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1083,6 +1083,13 @@ struct journal_s
+ */
+ int j_revoke_records_per_block;
+
++ /**
++ * @j_transaction_overhead_buffers:
++ *
++ * Number of blocks each transaction needs for its own bookkeeping
++ */
++ int j_transaction_overhead_buffers;
++
+ /**
+ * @j_commit_interval:
+ *
+@@ -1374,6 +1381,9 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
+ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
+ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
+
++/* Journal high priority write IO operation flags */
++#define JBD2_JOURNAL_REQ_FLAGS (REQ_META | REQ_SYNC | REQ_IDLE)
++
+ /*
+ * Journal flag definitions
+ */
+@@ -1663,11 +1673,6 @@ int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
+ int jbd2_fc_release_bufs(journal_t *journal);
+
+-static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
+-{
+- return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
+-}
+-
+ /*
+ * is_journal_abort
+ *
+diff --git a/include/linux/kcov.h b/include/linux/kcov.h
+index b851ba415e03fd..3b479a3d235a97 100644
+--- a/include/linux/kcov.h
++++ b/include/linux/kcov.h
+@@ -21,6 +21,8 @@ enum kcov_mode {
+ KCOV_MODE_TRACE_PC = 2,
+ /* Collecting comparison operands mode. */
+ KCOV_MODE_TRACE_CMP = 3,
++ /* The process owns a KCOV remote reference. */
++ KCOV_MODE_REMOTE = 4,
+ };
+
+ #define KCOV_IN_CTXSW (1 << 30)
+diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
+index 2a36f3218b5106..5a952d00ea159e 100644
+--- a/include/linux/kernfs.h
++++ b/include/linux/kernfs.h
+@@ -223,6 +223,8 @@ struct kernfs_node {
+ unsigned short flags;
+ umode_t mode;
+ struct kernfs_iattrs *iattr;
++
++ struct rcu_head rcu;
+ };
+
+ /*
+diff --git a/include/linux/key-type.h b/include/linux/key-type.h
+index 7d985a1dfe4af9..5caf3ce823733a 100644
+--- a/include/linux/key-type.h
++++ b/include/linux/key-type.h
+@@ -73,6 +73,7 @@ struct key_type {
+
+ unsigned int flags;
+ #define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
++#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
+
+ /* vet a description */
+ int (*vet_description)(const char *description);
+diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
+index 85a64cb95d755c..45d5b0a76b0bd5 100644
+--- a/include/linux/kprobes.h
++++ b/include/linux/kprobes.h
+@@ -140,7 +140,7 @@ static inline bool kprobe_ftrace(struct kprobe *p)
+ *
+ */
+ struct kretprobe_holder {
+- struct kretprobe *rp;
++ struct kretprobe __rcu *rp;
+ refcount_t ref;
+ };
+
+@@ -202,10 +202,8 @@ extern int arch_trampoline_kprobe(struct kprobe *p);
+ #ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+ {
+- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+- "Kretprobe is accessed from instance under preemptive context");
+-
+- return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
++ /* rethook::data is non-changed field, so that you can access it freely. */
++ return (struct kretprobe *)ri->node.rethook->data;
+ }
+ static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+ {
+@@ -250,10 +248,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
+
+ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+ {
+- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+- "Kretprobe is accessed from instance under preemptive context");
+-
+- return READ_ONCE(ri->rph->rp);
++ return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
+ }
+
+ static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+@@ -388,11 +383,15 @@ static inline void wait_for_kprobe_optimizer(void) { }
+ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs);
+ extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
++/* Set when ftrace has been killed: kprobes on ftrace must be disabled for safety */
++extern bool kprobe_ftrace_disabled __read_mostly;
++extern void kprobe_ftrace_kill(void);
+ #else
+ static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
+ {
+ return -EINVAL;
+ }
++static inline void kprobe_ftrace_kill(void) {}
+ #endif /* CONFIG_KPROBES_ON_FTRACE */
+
+ /* Get the kprobe at this addr (if any) - called with preemption disabled */
+@@ -501,6 +500,9 @@ static inline void kprobe_flush_task(struct task_struct *tk)
+ static inline void kprobe_free_init_mem(void)
+ {
+ }
++static inline void kprobe_ftrace_kill(void)
++{
++}
+ static inline int disable_kprobe(struct kprobe *kp)
+ {
+ return -EOPNOTSUPP;
+diff --git a/include/linux/ksm.h b/include/linux/ksm.h
+index c2dd786a30e1f7..b9cdeba03668ae 100644
+--- a/include/linux/ksm.h
++++ b/include/linux/ksm.h
+@@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
+ */
+ #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
+
+-extern unsigned long ksm_zero_pages;
++extern atomic_long_t ksm_zero_pages;
++
++static inline void ksm_map_zero_page(struct mm_struct *mm)
++{
++ atomic_long_inc(&ksm_zero_pages);
++ atomic_long_inc(&mm->ksm_zero_pages);
++}
+
+ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+ {
+ if (is_ksm_zero_pte(pte)) {
+- ksm_zero_pages--;
+- mm->ksm_zero_pages--;
++ atomic_long_dec(&ksm_zero_pages);
++ atomic_long_dec(&mm->ksm_zero_pages);
+ }
+ }
+
++static inline long mm_ksm_zero_pages(struct mm_struct *mm)
++{
++ return atomic_long_read(&mm->ksm_zero_pages);
++}
++
+ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+ int ret;
+diff --git a/include/linux/kthread.h b/include/linux/kthread.h
+index 2c30ade43bc87a..b11f53c1ba2e6f 100644
+--- a/include/linux/kthread.h
++++ b/include/linux/kthread.h
+@@ -86,6 +86,7 @@ void free_kthread_struct(struct task_struct *k);
+ void kthread_bind(struct task_struct *k, unsigned int cpu);
+ void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
+ int kthread_stop(struct task_struct *k);
++int kthread_stop_put(struct task_struct *k);
+ bool kthread_should_stop(void);
+ bool kthread_should_park(void);
+ bool kthread_should_stop_or_park(void);
+diff --git a/include/linux/leds.h b/include/linux/leds.h
+index aa16dc2a8230fa..d3056bc6f0a1a3 100644
+--- a/include/linux/leds.h
++++ b/include/linux/leds.h
+@@ -474,6 +474,9 @@ struct led_trigger {
+ int (*activate)(struct led_classdev *led_cdev);
+ void (*deactivate)(struct led_classdev *led_cdev);
+
++ /* Brightness set by led_trigger_event */
++ enum led_brightness brightness;
++
+ /* LED-private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
+
+@@ -527,22 +530,11 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+ return led_cdev->trigger_data;
+ }
+
+-/**
+- * led_trigger_rename_static - rename a trigger
+- * @name: the new trigger name
+- * @trig: the LED trigger to rename
+- *
+- * Change a LED trigger name by copying the string passed in
+- * name into current trigger name, which MUST be large
+- * enough for the new string.
+- *
+- * Note that name must NOT point to the same string used
+- * during LED registration, as that could lead to races.
+- *
+- * This is meant to be used on triggers with statically
+- * allocated name.
+- */
+-void led_trigger_rename_static(const char *name, struct led_trigger *trig);
++static inline enum led_brightness
++led_trigger_get_brightness(const struct led_trigger *trigger)
++{
++ return trigger ? trigger->brightness : LED_OFF;
++}
+
+ #define module_led_trigger(__led_trigger) \
+ module_driver(__led_trigger, led_trigger_register, \
+@@ -580,6 +572,12 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+ return NULL;
+ }
+
++static inline enum led_brightness
++led_trigger_get_brightness(const struct led_trigger *trigger)
++{
++ return LED_OFF;
++}
++
+ #endif /* CONFIG_LEDS_TRIGGERS */
+
+ /* Trigger specific enum */
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 2a7d2af0ed80a8..91c4e11cb6abb4 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -107,6 +107,7 @@ enum {
+
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */
+ ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */
++ ATA_DFLAG_RESUMING = (1 << 22), /* Device is resuming */
+ ATA_DFLAG_DETACH = (1 << 24),
+ ATA_DFLAG_DETACHED = (1 << 25),
+ ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
+@@ -1241,6 +1242,7 @@ extern int ata_slave_link_init(struct ata_port *ap);
+ extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
+ struct ata_port_info *, struct Scsi_Host *);
+ extern void ata_port_probe(struct ata_port *ap);
++extern void ata_port_free(struct ata_port *ap);
+ extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+ extern void ata_sas_tport_delete(struct ata_port *ap);
+ extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index ac962c4cb44b10..2923754c13bce6 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+ LSM_HOOK(int, 0, syslog, int type)
+ LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ const struct timezone *tz)
+-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
++LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
+ LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+ LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+@@ -171,6 +171,8 @@ LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+ LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
+ LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
+ unsigned long arg)
++LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
++ unsigned long arg)
+ LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
+ LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+@@ -273,7 +275,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+ LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+ LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
++LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
+ u32 *ctxlen)
+
+ #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+@@ -309,9 +311,9 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
+ LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
+ LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
+ LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
+-LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
++LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock,
+ sockptr_t optval, sockptr_t optlen, unsigned int len)
+-LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
++LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+ LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
+ LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
+@@ -388,7 +390,7 @@ LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer)
+
+ #ifdef CONFIG_AUDIT
+ LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
+- void **lsmrule)
++ void **lsmrule, gfp_t gfp)
+ LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
+ LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+ LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
+index b0da04fe087bb8..34dfcc77f505aa 100644
+--- a/include/linux/mc146818rtc.h
++++ b/include/linux/mc146818rtc.h
+@@ -126,10 +126,11 @@ struct cmos_rtc_board_info {
+ #endif /* ARCH_RTC_LOCATION */
+
+ bool mc146818_does_rtc_work(void);
+-int mc146818_get_time(struct rtc_time *time);
++int mc146818_get_time(struct rtc_time *time, int timeout);
+ int mc146818_set_time(struct rtc_time *time);
+
+ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
++ int timeout,
+ void *param);
+
+ #endif /* _MC146818RTC_H */
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index 1c1072e3ca0635..ed57c23f80ac2b 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -118,6 +118,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
+ int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+ #endif
+ void memblock_trim_memory(phys_addr_t align);
++unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
++ phys_addr_t base2, phys_addr_t size2);
+ bool memblock_overlaps_region(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size);
+ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
+diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
+index 47e7a3a61ce694..e8bcad641d8c20 100644
+--- a/include/linux/mfd/core.h
++++ b/include/linux/mfd/core.h
+@@ -92,7 +92,7 @@ struct mfd_cell {
+ * (above) when matching OF nodes with devices that have identical
+ * compatible strings
+ */
+- const u64 of_reg;
++ u64 of_reg;
+
+ /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ bool use_of_reg;
+diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
+index f198a8ac7ee72c..11bf3212f7822d 100644
+--- a/include/linux/mhi_ep.h
++++ b/include/linux/mhi_ep.h
+@@ -49,6 +49,27 @@ struct mhi_ep_db_info {
+ u32 status;
+ };
+
++/**
++ * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
++ * @mhi_dev: MHI device associated with this buffer
++ * @dev_addr: Address of the buffer in endpoint
++ * @host_addr: Address of the bufffer in host
++ * @size: Size of the buffer
++ * @code: Transfer completion code
++ * @cb: Callback to be executed by controller drivers after transfer completion (async)
++ * @cb_buf: Opaque buffer to be passed to the callback
++ */
++struct mhi_ep_buf_info {
++ struct mhi_ep_device *mhi_dev;
++ void *dev_addr;
++ u64 host_addr;
++ size_t size;
++ int code;
++
++ void (*cb)(struct mhi_ep_buf_info *buf_info);
++ void *cb_buf;
++};
++
+ /**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+@@ -82,8 +103,10 @@ struct mhi_ep_db_info {
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
+ * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
+- * @read_from_host: CB function for reading from host memory from endpoint
+- * @write_to_host: CB function for writing to host memory from endpoint
++ * @read_sync: CB function for reading from host memory synchronously
++ * @write_sync: CB function for writing to host memory synchronously
++ * @read_async: CB function for reading from host memory asynchronously
++ * @write_async: CB function for writing to host memory asynchronously
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
+@@ -128,14 +151,19 @@ struct mhi_ep_cntrl {
+ struct work_struct reset_work;
+ struct work_struct cmd_ring_work;
+ struct work_struct ch_ring_work;
++ struct kmem_cache *ring_item_cache;
++ struct kmem_cache *ev_ring_el_cache;
++ struct kmem_cache *tre_buf_cache;
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
+ int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
+ void __iomem **virt, size_t size);
+ void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
+ void __iomem *virt, size_t size);
+- int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
+- int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
++ int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
++ int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
++ int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
++ int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+
+ enum mhi_state mhi_state;
+
+diff --git a/include/linux/minmax.h b/include/linux/minmax.h
+index 83aebc244cbaad..2ec559284a9f6c 100644
+--- a/include/linux/minmax.h
++++ b/include/linux/minmax.h
+@@ -2,60 +2,77 @@
+ #ifndef _LINUX_MINMAX_H
+ #define _LINUX_MINMAX_H
+
++#include <linux/build_bug.h>
++#include <linux/compiler.h>
+ #include <linux/const.h>
+ #include <linux/types.h>
+
+ /*
+ * min()/max()/clamp() macros must accomplish three things:
+ *
+- * - avoid multiple evaluations of the arguments (so side-effects like
++ * - Avoid multiple evaluations of the arguments (so side-effects like
+ * "x++" happen only once) when non-constant.
+- * - perform strict type-checking (to generate warnings instead of
+- * nasty runtime surprises). See the "unnecessary" pointer comparison
+- * in __typecheck().
+- * - retain result as a constant expressions when called with only
++ * - Retain result as a constant expressions when called with only
+ * constant expressions (to avoid tripping VLA warnings in stack
+ * allocation usage).
++ * - Perform signed v unsigned type-checking (to generate compile
++ * errors instead of nasty runtime surprises).
++ * - Unsigned char/short are always promoted to signed int and can be
++ * compared against signed or unsigned arguments.
++ * - Unsigned arguments can be compared against non-negative signed constants.
++ * - Comparison of a signed argument against an unsigned constant fails
++ * even if the constant is below __INT_MAX__ and could be cast to int.
+ */
+ #define __typecheck(x, y) \
+ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+
+-#define __no_side_effects(x, y) \
+- (__is_constexpr(x) && __is_constexpr(y))
++/* is_signed_type() isn't a constexpr for pointer types */
++#define __is_signed(x) \
++ __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \
++ is_signed_type(typeof(x)), 0)
+
+-#define __safe_cmp(x, y) \
+- (__typecheck(x, y) && __no_side_effects(x, y))
++/* True for a non-negative signed int constant */
++#define __is_noneg_int(x) \
++ (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
+
+-#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
++#define __types_ok(x, y) \
++ (__is_signed(x) == __is_signed(y) || \
++ __is_signed((x) + 0) == __is_signed((y) + 0) || \
++ __is_noneg_int(x) || __is_noneg_int(y))
+
+-#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
+- typeof(x) unique_x = (x); \
+- typeof(y) unique_y = (y); \
+- __cmp(unique_x, unique_y, op); })
++#define __cmp_op_min <
++#define __cmp_op_max >
+
+-#define __careful_cmp(x, y, op) \
+- __builtin_choose_expr(__safe_cmp(x, y), \
+- __cmp(x, y, op), \
+- __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
++#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
++
++#define __cmp_once(op, x, y, unique_x, unique_y) ({ \
++ typeof(x) unique_x = (x); \
++ typeof(y) unique_y = (y); \
++ static_assert(__types_ok(x, y), \
++ #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
++ __cmp(op, unique_x, unique_y); })
++
++#define __careful_cmp(op, x, y) \
++ __builtin_choose_expr(__is_constexpr((x) - (y)), \
++ __cmp(op, x, y), \
++ __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y)))
+
+ #define __clamp(val, lo, hi) \
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+
+-#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
+- typeof(val) unique_val = (val); \
+- typeof(lo) unique_lo = (lo); \
+- typeof(hi) unique_hi = (hi); \
+- __clamp(unique_val, unique_lo, unique_hi); })
+-
+-#define __clamp_input_check(lo, hi) \
+- (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+- __is_constexpr((lo) > (hi)), (lo) > (hi), false)))
++#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
++ typeof(val) unique_val = (val); \
++ typeof(lo) unique_lo = (lo); \
++ typeof(hi) unique_hi = (hi); \
++ static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
++ (lo) <= (hi), true), \
++ "clamp() low limit " #lo " greater than high limit " #hi); \
++ static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \
++ static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \
++ __clamp(unique_val, unique_lo, unique_hi); })
+
+ #define __careful_clamp(val, lo, hi) ({ \
+- __clamp_input_check(lo, hi) + \
+- __builtin_choose_expr(__typecheck(val, lo) && __typecheck(val, hi) && \
+- __typecheck(hi, lo) && __is_constexpr(val) && \
+- __is_constexpr(lo) && __is_constexpr(hi), \
++ __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \
+ __clamp(val, lo, hi), \
+ __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
+ __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
+@@ -65,14 +82,31 @@
+ * @x: first value
+ * @y: second value
+ */
+-#define min(x, y) __careful_cmp(x, y, <)
++#define min(x, y) __careful_cmp(min, x, y)
+
+ /**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+-#define max(x, y) __careful_cmp(x, y, >)
++#define max(x, y) __careful_cmp(max, x, y)
++
++/**
++ * umin - return minimum of two non-negative values
++ * Signed types are zero extended to match a larger unsigned type.
++ * @x: first value
++ * @y: second value
++ */
++#define umin(x, y) \
++ __careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
++
++/**
++ * umax - return maximum of two non-negative values
++ * @x: first value
++ * @y: second value
++ */
++#define umax(x, y) \
++ __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+ /**
+ * min3 - return minimum of three values
+@@ -124,7 +158,7 @@
+ * @x: first value
+ * @y: second value
+ */
+-#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
++#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y))
+
+ /**
+ * max_t - return maximum of two values, using the specified type
+@@ -132,28 +166,7 @@
+ * @x: first value
+ * @y: second value
+ */
+-#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
+-
+-/*
+- * Remove a const qualifier from integer types
+- * _Generic(foo, type-name: association, ..., default: association) performs a
+- * comparison against the foo type (not the qualified type).
+- * Do not use the const keyword in the type-name as it will not match the
+- * unqualified type of foo.
+- */
+-#define __unconst_integer_type_cases(type) \
+- unsigned type: (unsigned type)0, \
+- signed type: (signed type)0
+-
+-#define __unconst_integer_typeof(x) typeof( \
+- _Generic((x), \
+- char: (char)0, \
+- __unconst_integer_type_cases(char), \
+- __unconst_integer_type_cases(short), \
+- __unconst_integer_type_cases(int), \
+- __unconst_integer_type_cases(long), \
+- __unconst_integer_type_cases(long long), \
+- default: (x)))
++#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y))
+
+ /*
+ * Do not check the array parameter using __must_be_array().
+@@ -169,13 +182,13 @@
+ * 'int *buff' and 'int buff[N]' types.
+ *
+ * The array can be an array of const items.
+- * typeof() keeps the const qualifier. Use __unconst_integer_typeof() in order
++ * typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order
+ * to discard the const qualifier for the __element variable.
+ */
+ #define __minmax_array(op, array, len) ({ \
+ typeof(&(array)[0]) __array = (array); \
+ typeof(len) __len = (len); \
+- __unconst_integer_typeof(__array[0]) __element = __array[--__len]; \
++ __unqual_scalar_typeof(__array[0]) __element = __array[--__len];\
+ while (__len--) \
+ __element = op(__element, __array[__len]); \
+ __element; })
+diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
+index 4d5be378fa8ccc..26333d602a5051 100644
+--- a/include/linux/mlx5/device.h
++++ b/include/linux/mlx5/device.h
+@@ -366,6 +366,8 @@ enum mlx5_driver_event {
+ MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
+ MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
++ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
++ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ };
+
+ enum {
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 3033bbaeac81c3..ffb98bc43b2db2 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -852,6 +852,7 @@ struct mlx5_cmd_work_ent {
+ void *context;
+ int idx;
+ struct completion handling;
++ struct completion slotted;
+ struct completion done;
+ struct mlx5_cmd *cmd;
+ struct work_struct work;
+@@ -1027,6 +1028,8 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+ void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
+ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
+
++void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data);
++
+ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
+ int mlx5_health_init(struct mlx5_core_dev *dev);
+ void mlx5_start_health_poll(struct mlx5_core_dev *dev);
+diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
+index 1e00c2436377fc..3fb428ce7d1c7c 100644
+--- a/include/linux/mlx5/fs.h
++++ b/include/linux/mlx5/fs.h
+@@ -67,6 +67,7 @@ enum {
+ MLX5_FLOW_TABLE_TERMINATION = BIT(2),
+ MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
+ MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
++ MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
+ };
+
+ #define LEFTOVERS_RULE_NUM 2
+@@ -131,6 +132,7 @@ struct mlx5_flow_handle;
+
+ enum {
+ FLOW_CONTEXT_HAS_TAG = BIT(0),
++ FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
+ };
+
+ struct mlx5_flow_context {
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index fc3db401f8a287..9106771bb92f01 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -620,7 +620,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
+
+ u8 reserved_at_140[0x8];
+ u8 bth_dst_qp[0x18];
+- u8 reserved_at_160[0x20];
++ u8 inner_esp_spi[0x20];
+ u8 outer_esp_spi[0x20];
+ u8 reserved_at_1a0[0x60];
+ };
+@@ -1010,7 +1010,8 @@ struct mlx5_ifc_qos_cap_bits {
+
+ u8 max_tsar_bw_share[0x20];
+
+- u8 reserved_at_100[0x20];
++ u8 nic_element_type[0x10];
++ u8 nic_tsar_type[0x10];
+
+ u8 reserved_at_120[0x3];
+ u8 log_meter_aso_granularity[0x5];
+@@ -1102,7 +1103,7 @@ struct mlx5_ifc_roce_cap_bits {
+ u8 sw_r_roce_src_udp_port[0x1];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 fl_rc_qp_when_roce_enabled[0x1];
+- u8 reserved_at_7[0x1];
++ u8 roce_cc_general[0x1];
+ u8 qp_ooo_transmit_default[0x1];
+ u8 reserved_at_9[0x15];
+ u8 qp_ts_format[0x2];
+@@ -3536,7 +3537,7 @@ struct mlx5_ifc_flow_context_bits {
+ u8 action[0x10];
+
+ u8 extended_destination[0x1];
+- u8 reserved_at_81[0x1];
++ u8 uplink_hairpin_en[0x1];
+ u8 flow_source[0x2];
+ u8 encrypt_decrypt_type[0x4];
+ u8 destination_list_size[0x18];
+@@ -3843,10 +3844,11 @@ enum {
+ };
+
+ enum {
+- ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
++ ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0,
+ ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
++ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
+ };
+
+ struct mlx5_ifc_scheduling_context_bits {
+@@ -4546,6 +4548,12 @@ enum {
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+ };
+
++enum {
++ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
++ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
++ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
++};
++
+ struct mlx5_ifc_tsar_element_bits {
+ u8 reserved_at_0[0x8];
+ u8 tsar_type[0x8];
+@@ -10154,11 +10162,13 @@ struct mlx5_ifc_mcam_access_reg_bits {
+
+ u8 regs_63_to_46[0x12];
+ u8 mrtc[0x1];
+- u8 regs_44_to_32[0xd];
++ u8 regs_44_to_41[0x4];
++ u8 mfrl[0x1];
++ u8 regs_39_to_32[0x8];
+
+- u8 regs_31_to_10[0x16];
++ u8 regs_31_to_11[0x15];
+ u8 mtmp[0x1];
+- u8 regs_8_to_0[0x9];
++ u8 regs_9_to_0[0xa];
+ };
+
+ struct mlx5_ifc_mcam_access_reg_bits1 {
+@@ -11936,6 +11946,13 @@ enum {
+ MLX5_IPSEC_ASO_INC_SN = 0x2,
+ };
+
++enum {
++ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
++ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
++ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
++ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
++};
++
+ struct mlx5_ifc_ipsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_201[0x1];
+diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
+index 98b2e1e149f93c..5cc34216f23c32 100644
+--- a/include/linux/mlx5/port.h
++++ b/include/linux/mlx5/port.h
+@@ -115,7 +115,7 @@ enum mlx5e_ext_link_mode {
+ MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
+ MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
+ MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
+- MLX5E_400GAUI_8 = 15,
++ MLX5E_400GAUI_8_400GBASE_CR8 = 15,
+ MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_EXT_LINK_MODES_NUMBER,
+ };
+diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
+index bd53cf4be7bdcb..ad1ce650146cb7 100644
+--- a/include/linux/mlx5/qp.h
++++ b/include/linux/mlx5/qp.h
+@@ -269,7 +269,10 @@ struct mlx5_wqe_eth_seg {
+ union {
+ struct {
+ __be16 sz;
+- u8 start[2];
++ union {
++ u8 start[2];
++ DECLARE_FLEX_ARRAY(u8, data);
++ };
+ } inline_hdr;
+ struct {
+ __be16 type;
+@@ -573,9 +576,12 @@ static inline const char *mlx5_qp_state_str(int state)
+
+ static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+ {
+- return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
+- MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+- MLX5_TIMESTAMP_FORMAT_DEFAULT;
++ u8 supported_ts_cap = mlx5_get_roce_state(dev) ?
++ MLX5_CAP_ROCE(dev, qp_ts_format) :
++ MLX5_CAP_GEN(dev, sq_ts_format);
++
++ return supported_ts_cap ? MLX5_TIMESTAMP_FORMAT_DEFAULT :
++ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+
+ #endif /* MLX5_QP_H */
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index bf5d0b1b16f434..b6a4d6471b4a72 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -95,6 +95,10 @@ extern const int mmap_rnd_compat_bits_max;
+ extern int mmap_rnd_compat_bits __read_mostly;
+ #endif
+
++#ifndef PHYSMEM_END
++# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
++#endif
++
+ #include <asm/page.h>
+ #include <asm/processor.h>
+
+@@ -1184,14 +1188,16 @@ static inline void page_mapcount_reset(struct page *page)
+ * a large folio, it includes the number of times this page is mapped
+ * as part of that folio.
+ *
+- * The result is undefined for pages which cannot be mapped into userspace.
+- * For example SLAB or special types of pages. See function page_has_type().
+- * They use this field in struct page differently.
++ * Will report 0 for pages which cannot be mapped into userspace, eg
++ * slab, page tables and similar.
+ */
+ static inline int page_mapcount(struct page *page)
+ {
+ int mapcount = atomic_read(&page->_mapcount) + 1;
+
++ /* Handle page_has_type() pages */
++ if (mapcount < 0)
++ mapcount = 0;
+ if (unlikely(PageCompound(page)))
+ mapcount += folio_entire_mapcount(page_folio(page));
+
+@@ -1726,8 +1732,8 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
+ unsigned int pid_bit;
+
+ pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
+- if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->access_pids[1])) {
+- __set_bit(pid_bit, &vma->numab_state->access_pids[1]);
++ if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
++ __set_bit(pid_bit, &vma->numab_state->pids_active[1]);
+ }
+ }
+ #else /* !CONFIG_NUMA_BALANCING */
+diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
+index 8148b30a9df108..96b1c157554c08 100644
+--- a/include/linux/mm_inline.h
++++ b/include/linux/mm_inline.h
+@@ -231,22 +231,27 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
+ if (folio_test_unevictable(folio) || !lrugen->enabled)
+ return false;
+ /*
+- * There are three common cases for this page:
+- * 1. If it's hot, e.g., freshly faulted in or previously hot and
+- * migrated, add it to the youngest generation.
+- * 2. If it's cold but can't be evicted immediately, i.e., an anon page
+- * not in swapcache or a dirty page pending writeback, add it to the
+- * second oldest generation.
+- * 3. Everything else (clean, cold) is added to the oldest generation.
++ * There are four common cases for this page:
++ * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
++ * generation, and it's protected over the rest below.
++ * 2. If it can't be evicted immediately, i.e., a dirty page pending
++ * writeback, add it to the second youngest generation.
++ * 3. If it should be evicted first, e.g., cold and clean from
++ * folio_rotate_reclaimable(), add it to the oldest generation.
++ * 4. Everything else falls between 2 & 3 above and is added to the
++ * second oldest generation if it's considered inactive, or the
++ * oldest generation otherwise. See lru_gen_is_active().
+ */
+ if (folio_test_active(folio))
+ seq = lrugen->max_seq;
+ else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+- seq = lrugen->min_seq[type] + 1;
+- else
++ seq = lrugen->max_seq - 1;
++ else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
+ seq = lrugen->min_seq[type];
++ else
++ seq = lrugen->min_seq[type] + 1;
+
+ gen = lru_gen_from_seq(seq);
+ flags = (gen + 1UL) << LRU_GEN_PGOFF;
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 36c5b43999e608..43c19d85dfe7fe 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -551,9 +551,36 @@ struct vma_lock {
+ };
+
+ struct vma_numab_state {
++ /*
++ * Initialised as time in 'jiffies' after which VMA
++ * should be scanned. Delays first scan of new VMA by at
++ * least sysctl_numa_balancing_scan_delay:
++ */
+ unsigned long next_scan;
+- unsigned long next_pid_reset;
+- unsigned long access_pids[2];
++
++ /*
++ * Time in jiffies when pids_active[] is reset to
++ * detect phase change behaviour:
++ */
++ unsigned long pids_active_reset;
++
++ /*
++ * Approximate tracking of PIDs that trapped a NUMA hinting
++ * fault. May produce false positives due to hash collisions.
++ *
++ * [0] Previous PID tracking
++ * [1] Current PID tracking
++ *
++ * Window moves after next_pid_reset has expired approximately
++ * every VMA_PID_RESET_PERIOD jiffies:
++ */
++ unsigned long pids_active[2];
++
++ /*
++ * MM scan sequence ID when the VMA was last completely scanned.
++ * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
++ */
++ int prev_scan_seq;
+ };
+
+ /*
+@@ -899,7 +926,7 @@ struct mm_struct {
+ * Represent how many empty pages are merged with kernel zero
+ * pages when enabling KSM use_zero_pages.
+ */
+- unsigned long ksm_zero_pages;
++ atomic_long_t ksm_zero_pages;
+ #endif /* CONFIG_KSM */
+ #ifdef CONFIG_LRU_GEN
+ struct {
+diff --git a/include/linux/mman.h b/include/linux/mman.h
+index 40d94411d49204..db4741007bef05 100644
+--- a/include/linux/mman.h
++++ b/include/linux/mman.h
+@@ -161,6 +161,14 @@ calc_vm_flag_bits(unsigned long flags)
+
+ unsigned long vm_commit_limit(void);
+
++#ifndef arch_memory_deny_write_exec_supported
++static inline bool arch_memory_deny_write_exec_supported(void)
++{
++ return true;
++}
++#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
++#endif
++
+ /*
+ * Denies creating a writable executable mapping or gaining executable permissions.
+ *
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index daa2f40d9ce65f..7b12eebc5586dc 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -295,7 +295,9 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+ #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
+ #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
++#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+
++ bool written_flag; /* Indicates eMMC has been written since power on */
+ bool reenable_cmdq; /* Re-enable Command Queue */
+
+ unsigned int erase_size; /* erase size in sectors */
+diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
+index 5d3d15e97868a9..66272fdce43d8c 100644
+--- a/include/linux/mmc/slot-gpio.h
++++ b/include/linux/mmc/slot-gpio.h
+@@ -21,6 +21,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ unsigned int debounce);
+ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ unsigned int idx, unsigned int debounce);
++int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
+ void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ irqreturn_t (*isr)(int irq, void *dev_id));
+ int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 4106fbc5b4b324..05092c37a430c2 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -34,6 +34,8 @@
+
+ #define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
+
++#define NR_PAGE_ORDERS (MAX_ORDER + 1)
++
+ /*
+ * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
+ * costly to service. That is between allocation orders which should
+@@ -95,7 +97,7 @@ static inline bool migratetype_is_mergeable(int mt)
+ }
+
+ #define for_each_migratetype_order(order, type) \
+- for (order = 0; order <= MAX_ORDER; order++) \
++ for (order = 0; order < NR_PAGE_ORDERS; order++) \
+ for (type = 0; type < MIGRATE_TYPES; type++)
+
+ extern int page_group_by_mobility_disabled;
+@@ -505,33 +507,37 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+ * the old generation, is incremented when all its bins become empty.
+ *
+ * There are four operations:
+- * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its
++ * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
+ * current generation (old or young) and updates its "seg" to "head";
+- * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its
++ * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
+ * current generation (old or young) and updates its "seg" to "tail";
+- * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old
++ * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
+ * generation, updates its "gen" to "old" and resets its "seg" to "default";
+- * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the
++ * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
+ * young generation, updates its "gen" to "young" and resets its "seg" to
+ * "default".
+ *
+ * The events that trigger the above operations are:
+ * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
+- * 2. The first attempt to reclaim an memcg below low, which triggers
++ * 2. The first attempt to reclaim a memcg below low, which triggers
+ * MEMCG_LRU_TAIL;
+- * 3. The first attempt to reclaim an memcg below reclaimable size threshold,
+- * which triggers MEMCG_LRU_TAIL;
+- * 4. The second attempt to reclaim an memcg below reclaimable size threshold,
+- * which triggers MEMCG_LRU_YOUNG;
+- * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG;
++ * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
++ * threshold, which triggers MEMCG_LRU_TAIL;
++ * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
++ * threshold, which triggers MEMCG_LRU_YOUNG;
++ * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
+- * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
++ * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
+ *
+- * Note that memcg LRU only applies to global reclaim, and the round-robin
+- * incrementing of their max_seq counters ensures the eventual fairness to all
+- * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
++ * Notes:
++ * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
++ * of their max_seq counters ensures the eventual fairness to all eligible
++ * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
++ * 2. There are only two valid generations: old (seq) and young (seq+1).
++ * MEMCG_NR_GENS is set to three so that when reading the generation counter
++ * locklessly, a stale value (seq-1) does not wraparound to young.
+ */
+-#define MEMCG_NR_GENS 2
++#define MEMCG_NR_GENS 3
+ #define MEMCG_NR_BINS 8
+
+ struct lru_gen_memcg {
+@@ -658,13 +664,12 @@ enum zone_watermarks {
+ };
+
+ /*
+- * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
+- * for THP which will usually be GFP_MOVABLE. Even if it is another type,
+- * it should not contribute to serious fragmentation causing THP allocation
+- * failures.
++ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
++ * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
++ * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
+ */
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define NR_PCP_THP 1
++#define NR_PCP_THP 2
+ #else
+ #define NR_PCP_THP 0
+ #endif
+@@ -925,7 +930,7 @@ struct zone {
+ CACHELINE_PADDING(_pad1_);
+
+ /* free areas of different sizes */
+- struct free_area free_area[MAX_ORDER + 1];
++ struct free_area free_area[NR_PAGE_ORDERS];
+
+ #ifdef CONFIG_UNACCEPTED_MEMORY
+ /* Pages to be accepted. All pages on the list are MAX_ORDER */
+@@ -1770,6 +1775,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
+ #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
+
+ struct mem_section_usage {
++ struct rcu_head rcu;
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+ #endif
+@@ -1962,8 +1968,9 @@ static inline int subsection_map_index(unsigned long pfn)
+ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+ {
+ int idx = subsection_map_index(pfn);
++ struct mem_section_usage *usage = READ_ONCE(ms->usage);
+
+- return test_bit(idx, ms->usage->subsection_map);
++ return usage ? test_bit(idx, usage->subsection_map) : 0;
+ }
+ #else
+ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+@@ -1987,6 +1994,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+ static inline int pfn_valid(unsigned long pfn)
+ {
+ struct mem_section *ms;
++ int ret;
+
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+@@ -2000,13 +2008,19 @@ static inline int pfn_valid(unsigned long pfn)
+ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ return 0;
+ ms = __pfn_to_section(pfn);
+- if (!valid_section(ms))
++ rcu_read_lock_sched();
++ if (!valid_section(ms)) {
++ rcu_read_unlock_sched();
+ return 0;
++ }
+ /*
+ * Traditionally early sections always returned pfn_valid() for
+ * the entire section-sized span.
+ */
+- return early_section(ms) || pfn_section_valid(ms, pfn);
++ ret = early_section(ms) || pfn_section_valid(ms, pfn);
++ rcu_read_unlock_sched();
++
++ return ret;
+ }
+ #endif
+
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index b0678b093cb271..0f51bc24ae5952 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -690,6 +690,8 @@ struct x86_cpu_id {
+ __u16 model;
+ __u16 steppings;
+ __u16 feature; /* bit index */
++ /* Solely for kernel-internal use: DO NOT EXPORT to userspace! */
++ __u16 flags;
+ kernel_ulong_t driver_data;
+ };
+
+diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+index 001b2ce83832ed..89b1e0ed981144 100644
+--- a/include/linux/moduleloader.h
++++ b/include/linux/moduleloader.h
+@@ -115,6 +115,14 @@ int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod);
+
++#ifdef CONFIG_MODULES
++void flush_module_init_free_work(void);
++#else
++static inline void flush_module_init_free_work(void)
++{
++}
++#endif
++
+ /* Any cleanup needed when module leaves. */
+ void module_arch_cleanup(struct module *mod);
+
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index a50ea79522f85a..ddace8c34dcf95 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -547,12 +547,6 @@ enum {
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
+ /* Free MSI descriptors */
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
+- /*
+- * Quirk to handle MSI implementations which do not provide
+- * masking. Currently known to affect x86, but has to be partially
+- * handled in the core MSI code.
+- */
+- MSI_FLAG_NOMASK_QUIRK = (1 << 7),
+
+ /* Mask for the generic functionality */
+ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
+diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
+index c29ace15a053a6..9d0fc5109af664 100644
+--- a/include/linux/mtd/rawnand.h
++++ b/include/linux/mtd/rawnand.h
+@@ -1265,6 +1265,7 @@ struct nand_secure_region {
+ * @cont_read: Sequential page read internals
+ * @cont_read.ongoing: Whether a continuous read is ongoing or not
+ * @cont_read.first_page: Start of the continuous read operation
++ * @cont_read.pause_page: End of the current sequential cache read operation
+ * @cont_read.last_page: End of the continuous read operation
+ * @controller: The hardware controller structure which is shared among multiple
+ * independent devices
+@@ -1321,6 +1322,7 @@ struct nand_chip {
+ struct {
+ bool ongoing;
+ unsigned int first_page;
++ unsigned int pause_page;
+ unsigned int last_page;
+ } cont_read;
+
+diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
+index 3e285c09d16d95..5c2ccc6494529e 100644
+--- a/include/linux/mtd/spinand.h
++++ b/include/linux/mtd/spinand.h
+@@ -169,7 +169,7 @@
+ struct spinand_op;
+ struct spinand_device;
+
+-#define SPINAND_MAX_ID_LEN 4
++#define SPINAND_MAX_ID_LEN 5
+ /*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index a33aa9eb9fc3b0..5b5630e58407a5 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -21,6 +21,8 @@
+ #include <linux/debug_locks.h>
+ #include <linux/cleanup.h>
+
++struct device;
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { \
+@@ -171,6 +173,31 @@ do { \
+ } while (0)
+ #endif /* CONFIG_PREEMPT_RT */
+
++#ifdef CONFIG_DEBUG_MUTEXES
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock);
++
++#else
++
++static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++ /*
++ * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
++ * no really need to register it in the devm subsystem.
++ */
++ return 0;
++}
++
++#endif
++
++#define devm_mutex_init(dev, mutex) \
++({ \
++ typeof(mutex) mutex_ = (mutex); \
++ \
++ mutex_init(mutex_); \
++ __devm_mutex_init(dev, mutex_); \
++})
++
+ /*
+ * See kernel/locking/mutex.c for detailed documentation of these APIs.
+ * Also see Documentation/locking/mutex-design.rst.
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 0896aaa91dd7bf..8f5ac20b4c03d9 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1774,6 +1774,13 @@ enum netdev_ml_priv_type {
+ ML_PRIV_CAN,
+ };
+
++enum netdev_stat_type {
++ NETDEV_PCPU_STAT_NONE,
++ NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
++ NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
++ NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
++};
++
+ /**
+ * struct net_device - The DEVICE structure.
+ *
+@@ -1968,10 +1975,14 @@ enum netdev_ml_priv_type {
+ *
+ * @ml_priv: Mid-layer private
+ * @ml_priv_type: Mid-layer private type
+- * @lstats: Loopback statistics
+- * @tstats: Tunnel statistics
+- * @dstats: Dummy statistics
+- * @vstats: Virtual ethernet statistics
++ *
++ * @pcpu_stat_type: Type of device statistics which the core should
++ * allocate/free: none, lstats, tstats, dstats. none
++ * means the driver is handling statistics allocation/
++ * freeing internally.
++ * @lstats: Loopback statistics: packets, bytes
++ * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
++ * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
+ *
+ * @garp_port: GARP
+ * @mrp_port: MRP
+@@ -2328,6 +2339,7 @@ struct net_device {
+ void *ml_priv;
+ enum netdev_ml_priv_type ml_priv_type;
+
++ enum netdev_stat_type pcpu_stat_type:8;
+ union {
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+@@ -2725,6 +2737,16 @@ struct pcpu_sw_netstats {
+ struct u64_stats_sync syncp;
+ } __aligned(4 * sizeof(u64));
+
++struct pcpu_dstats {
++ u64 rx_packets;
++ u64 rx_bytes;
++ u64 rx_drops;
++ u64 tx_packets;
++ u64 tx_bytes;
++ u64 tx_drops;
++ struct u64_stats_sync syncp;
++} __aligned(8 * sizeof(u64));
++
+ struct pcpu_lstats {
+ u64_stats_t packets;
+ u64_stats_t bytes;
+@@ -5007,6 +5029,24 @@ void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
+ void netif_inherit_tso_max(struct net_device *to,
+ const struct net_device *from);
+
++static inline unsigned int
++netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
++{
++ /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
++ return skb->protocol == htons(ETH_P_IPV6) ?
++ READ_ONCE(dev->gro_max_size) :
++ READ_ONCE(dev->gro_ipv4_max_size);
++}
++
++static inline unsigned int
++netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
++{
++ /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
++ return skb->protocol == htons(ETH_P_IPV6) ?
++ READ_ONCE(dev->gso_max_size) :
++ READ_ONCE(dev->gso_ipv4_max_size);
++}
++
+ static inline bool netif_is_macsec(const struct net_device *dev)
+ {
+ return dev->priv_flags & IFF_MACSEC;
+@@ -5214,5 +5254,6 @@ extern struct net_device *blackhole_netdev;
+ #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+ #define DEV_STATS_ADD(DEV, FIELD, VAL) \
+ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
+
+ #endif /* _LINUX_NETDEVICE_H */
+diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
+index d68644b7c299eb..cc5a2a220af8e3 100644
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -464,6 +464,7 @@ struct nf_ct_hook {
+ const struct sk_buff *);
+ void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
+ void (*set_closing)(struct nf_conntrack *nfct);
++ int (*confirm)(struct sk_buff *skb);
+ };
+ extern const struct nf_ct_hook __rcu *nf_ct_hook;
+
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
+index e8c350a3ade153..e9f4f845d760af 100644
+--- a/include/linux/netfilter/ipset/ip_set.h
++++ b/include/linux/netfilter/ipset/ip_set.h
+@@ -186,6 +186,8 @@ struct ip_set_type_variant {
+ /* Return true if "b" set is the same as "a"
+ * according to the create set parameters */
+ bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
++ /* Cancel ongoing garbage collectors before destroying the set*/
++ void (*cancel_gc)(struct ip_set *set);
+ /* Region-locking is used */
+ bool region_lock;
+ };
+@@ -242,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type);
+
+ /* A generic IP set */
+ struct ip_set {
++ /* For call_cru in destroy */
++ struct rcu_head rcu;
+ /* The name of the set */
+ char name[IPSET_MAXNAMELEN];
+ /* Lock protecting the set data */
+diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
+index f980edfdd2783e..743475ca7e9d51 100644
+--- a/include/linux/netfilter_bridge.h
++++ b/include/linux/netfilter_bridge.h
+@@ -42,7 +42,7 @@ static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
+ if (!nf_bridge)
+ return 0;
+
+- return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
++ return nf_bridge->physinif;
+ }
+
+ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
+@@ -56,11 +56,11 @@ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
+ }
+
+ static inline struct net_device *
+-nf_bridge_get_physindev(const struct sk_buff *skb)
++nf_bridge_get_physindev(const struct sk_buff *skb, struct net *net)
+ {
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+- return nf_bridge ? nf_bridge->physindev : NULL;
++ return nf_bridge ? dev_get_by_index_rcu(net, nf_bridge->physinif) : NULL;
+ }
+
+ static inline struct net_device *
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h
+index 75d7de34c90874..e8d713a37d1769 100644
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -289,6 +289,7 @@ struct netlink_callback {
+ u16 answer_flags;
+ u32 min_dump_alloc;
+ unsigned int prev_seq, seq;
++ int flags;
+ bool strict_check;
+ union {
+ u8 ctx[48];
+@@ -321,6 +322,7 @@ struct netlink_dump_control {
+ void *data;
+ struct module *module;
+ u32 min_dump_alloc;
++ int flags;
+ };
+
+ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 279262057a9258..832b7e354b4e36 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -612,6 +612,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
+ extern int nfs_commit_inode(struct inode *, int);
+ extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+ extern void nfs_commit_free(struct nfs_commit_data *data);
++void nfs_commit_begin(struct nfs_mds_commit_info *cinfo);
+ bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
+
+ static inline bool nfs_have_writebacks(const struct inode *inode)
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index cd628c4b011e54..86d96e00c2e3de 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -238,6 +238,7 @@ struct nfs_server {
+ struct list_head layouts;
+ struct list_head delegations;
+ struct list_head ss_copies;
++ struct list_head ss_src_copies;
+
+ unsigned long mig_gen;
+ unsigned long mig_status;
+diff --git a/include/linux/numa.h b/include/linux/numa.h
+index 59df211d051fa8..1d43371fafd2fc 100644
+--- a/include/linux/numa.h
++++ b/include/linux/numa.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ #ifndef _LINUX_NUMA_H
+ #define _LINUX_NUMA_H
++#include <linux/init.h>
+ #include <linux/types.h>
+
+ #ifdef CONFIG_NODES_SHIFT
+@@ -12,6 +13,7 @@
+ #define MAX_NUMNODES (1 << NODES_SHIFT)
+
+ #define NUMA_NO_NODE (-1)
++#define NUMA_NO_MEMBLK (-1)
+
+ /* optionally keep NUMA memory info available post init */
+ #ifdef CONFIG_NUMA_KEEP_MEMINFO
+@@ -21,33 +23,27 @@
+ #endif
+
+ #ifdef CONFIG_NUMA
+-#include <linux/printk.h>
+ #include <asm/sparsemem.h>
+
+ /* Generic implementation available */
+-int numa_map_to_online_node(int node);
++int numa_nearest_node(int node, unsigned int state);
+
+ #ifndef memory_add_physaddr_to_nid
+-static inline int memory_add_physaddr_to_nid(u64 start)
+-{
+- pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
+- start);
+- return 0;
+-}
++int memory_add_physaddr_to_nid(u64 start);
+ #endif
++
+ #ifndef phys_to_target_node
+-static inline int phys_to_target_node(u64 start)
+-{
+- pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
+- start);
+- return 0;
+-}
++int phys_to_target_node(u64 start);
+ #endif
++
++int numa_fill_memblks(u64 start, u64 end);
++
+ #else /* !CONFIG_NUMA */
+-static inline int numa_map_to_online_node(int node)
++static inline int numa_nearest_node(int node, unsigned int state)
+ {
+ return NUMA_NO_NODE;
+ }
++
+ static inline int memory_add_physaddr_to_nid(u64 start)
+ {
+ return 0;
+@@ -58,6 +54,8 @@ static inline int phys_to_target_node(u64 start)
+ }
+ #endif
+
++#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
++
+ #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+ extern const struct attribute_group arch_node_dev_group;
+ #endif
+diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
+index 57ebe1267f7fbe..e07e8978d691b7 100644
+--- a/include/linux/nvme-tcp.h
++++ b/include/linux/nvme-tcp.h
+@@ -18,6 +18,12 @@ enum nvme_tcp_pfv {
+ NVME_TCP_PFV_1_0 = 0x0,
+ };
+
++enum nvme_tcp_tls_cipher {
++ NVME_TCP_TLS_CIPHER_INVALID = 0,
++ NVME_TCP_TLS_CIPHER_SHA256 = 1,
++ NVME_TCP_TLS_CIPHER_SHA384 = 2,
++};
++
+ enum nvme_tcp_fatal_error_status {
+ NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
+ NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index 26dd3f859d9d7e..b61038de139e55 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -90,8 +90,8 @@ enum {
+ NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
+ };
+
+-/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
+- * RDMA_QPTYPE field
++/* RDMA Provider Type codes for Discovery Log Page entry TSAS
++ * RDMA_PRTYPE field
+ */
+ enum {
+ NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
+diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
+index 4523e4e8331970..526025561df199 100644
+--- a/include/linux/nvmem-consumer.h
++++ b/include/linux/nvmem-consumer.h
+@@ -81,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf);
+
+ const char *nvmem_dev_name(struct nvmem_device *nvmem);
++size_t nvmem_dev_size(struct nvmem_device *nvmem);
+
+ void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
+ size_t nentries);
+diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
+index dae26295e6bedf..1b81adebdb8beb 100644
+--- a/include/linux/nvmem-provider.h
++++ b/include/linux/nvmem-provider.h
+@@ -82,6 +82,7 @@ struct nvmem_cell_info {
+ * @owner: Pointer to exporter module. Used for refcounting.
+ * @cells: Optional array of pre-defined NVMEM cells.
+ * @ncells: Number of elements in cells.
++ * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax.
+ * @keepout: Optional array of keepout ranges (sorted ascending by start).
+ * @nkeepout: Number of elements in the keepout array.
+ * @type: Type of the nvmem storage
+@@ -112,6 +113,7 @@ struct nvmem_config {
+ struct module *owner;
+ const struct nvmem_cell_info *cells;
+ int ncells;
++ bool add_legacy_fixed_of_cells;
+ const struct nvmem_keepout *keepout;
+ unsigned int nkeepout;
+ enum nvmem_type type;
+diff --git a/include/linux/objagg.h b/include/linux/objagg.h
+index 78021777df4626..6df5b887dc547c 100644
+--- a/include/linux/objagg.h
++++ b/include/linux/objagg.h
+@@ -8,7 +8,6 @@ struct objagg_ops {
+ size_t obj_size;
+ bool (*delta_check)(void *priv, const void *parent_obj,
+ const void *obj);
+- int (*hints_obj_cmp)(const void *obj1, const void *obj2);
+ void * (*delta_create)(void *priv, void *parent_obj, void *obj);
+ void (*delta_destroy)(void *priv, void *delta_priv);
+ void * (*root_create)(void *priv, void *obj, unsigned int root_id);
+diff --git a/include/linux/objtool.h b/include/linux/objtool.h
+index 03f82c2c2ebf6f..33212e93f4a631 100644
+--- a/include/linux/objtool.h
++++ b/include/linux/objtool.h
+@@ -48,13 +48,13 @@
+ #define ANNOTATE_NOENDBR \
+ "986: \n\t" \
+ ".pushsection .discard.noendbr\n\t" \
+- ".long 986b - .\n\t" \
++ ".long 986b\n\t" \
+ ".popsection\n\t"
+
+ #define ASM_REACHABLE \
+ "998:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+- ".long 998b - .\n\t" \
++ ".long 998b\n\t" \
+ ".popsection\n\t"
+
+ #else /* __ASSEMBLY__ */
+@@ -66,7 +66,7 @@
+ #define ANNOTATE_INTRA_FUNCTION_CALL \
+ 999: \
+ .pushsection .discard.intra_function_calls; \
+- .long 999b - .; \
++ .long 999b; \
+ .popsection;
+
+ /*
+@@ -118,7 +118,7 @@
+ .macro ANNOTATE_NOENDBR
+ .Lhere_\@:
+ .pushsection .discard.noendbr
+- .long .Lhere_\@ - .
++ .long .Lhere_\@
+ .popsection
+ .endm
+
+@@ -130,7 +130,8 @@
+ * it will be ignored.
+ */
+ .macro VALIDATE_UNRET_BEGIN
+-#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
++#if defined(CONFIG_NOINSTR_VALIDATION) && \
++ (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
+ .Lhere_\@:
+ .pushsection .discard.validate_unret
+ .long .Lhere_\@ - .
+@@ -141,7 +142,7 @@
+ .macro REACHABLE
+ .Lhere_\@:
+ .pushsection .discard.reachable
+- .long .Lhere_\@ - .
++ .long .Lhere_\@
+ .popsection
+ .endm
+
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 6a9ddf20e79abd..024dda54b9c776 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -13,6 +13,7 @@
+ */
+ #include <linux/types.h>
+ #include <linux/bitops.h>
++#include <linux/cleanup.h>
+ #include <linux/errno.h>
+ #include <linux/kobject.h>
+ #include <linux/mod_devicetable.h>
+@@ -134,6 +135,7 @@ static inline struct device_node *of_node_get(struct device_node *node)
+ }
+ static inline void of_node_put(struct device_node *node) { }
+ #endif /* !CONFIG_OF_DYNAMIC */
++DEFINE_FREE(device_node, struct device_node *, if (_T) of_node_put(_T))
+
+ /* Pointer for first entry in chain of all nodes. */
+ extern struct device_node *of_root;
+@@ -1428,10 +1430,23 @@ static inline int of_property_read_s32(const struct device_node *np,
+ #define for_each_child_of_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
++
++#define for_each_child_of_node_scoped(parent, child) \
++ for (struct device_node *child __free(device_node) = \
++ of_get_next_child(parent, NULL); \
++ child != NULL; \
++ child = of_get_next_child(parent, child))
++
+ #define for_each_available_child_of_node(parent, child) \
+ for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+ child = of_get_next_available_child(parent, child))
+
++#define for_each_available_child_of_node_scoped(parent, child) \
++ for (struct device_node *child __free(device_node) = \
++ of_get_next_available_child(parent, NULL); \
++ child != NULL; \
++ child = of_get_next_available_child(parent, child))
++
+ #define for_each_of_cpu_node(cpu) \
+ for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
+ cpu = of_get_next_cpu_node(cpu))
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h
+index f9b60313eaea2c..e04f6794764737 100644
+--- a/include/linux/overflow.h
++++ b/include/linux/overflow.h
+@@ -31,8 +31,10 @@
+ * credit to Christian Biere.
+ */
+ #define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+-#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+-#define type_min(T) ((T)((T)-type_max(T)-(T)1))
++#define __type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
++#define type_max(t) __type_max(typeof(t))
++#define __type_min(T) ((T)((T)-type_max(T)-(T)1))
++#define type_min(t) __type_min(typeof(t))
+
+ /*
+ * Avoids triggering -Wtype-limits compilation warning,
+@@ -130,10 +132,10 @@ static inline bool __must_check __must_check_overflow(bool overflow)
+
+ #define __overflows_type_constexpr(x, T) ( \
+ is_unsigned_type(typeof(x)) ? \
+- (x) > type_max(typeof(T)) : \
++ (x) > type_max(T) : \
+ is_unsigned_type(typeof(T)) ? \
+- (x) < 0 || (x) > type_max(typeof(T)) : \
+- (x) < type_min(typeof(T)) || (x) > type_max(typeof(T)))
++ (x) < 0 || (x) > type_max(T) : \
++ (x) < type_min(T) || (x) > type_max(T))
+
+ #define __overflows_type(x, T) ({ \
+ typeof(T) v = 0; \
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index 5c02720c53a584..a77f3a7d21d12f 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -190,7 +190,6 @@ enum pageflags {
+
+ /* At least one page in this folio has the hwpoison flag set */
+ PG_has_hwpoisoned = PG_error,
+- PG_hugetlb = PG_active,
+ PG_large_rmappable = PG_workingset, /* anon or file-backed */
+ };
+
+@@ -432,30 +431,51 @@ static __always_inline int TestClearPage##uname(struct page *page) \
+ TESTSETFLAG(uname, lname, policy) \
+ TESTCLEARFLAG(uname, lname, policy)
+
++#define FOLIO_TEST_FLAG_FALSE(name) \
++static inline bool folio_test_##name(const struct folio *folio) \
++{ return false; }
++#define FOLIO_SET_FLAG_NOOP(name) \
++static inline void folio_set_##name(struct folio *folio) { }
++#define FOLIO_CLEAR_FLAG_NOOP(name) \
++static inline void folio_clear_##name(struct folio *folio) { }
++#define __FOLIO_SET_FLAG_NOOP(name) \
++static inline void __folio_set_##name(struct folio *folio) { }
++#define __FOLIO_CLEAR_FLAG_NOOP(name) \
++static inline void __folio_clear_##name(struct folio *folio) { }
++#define FOLIO_TEST_SET_FLAG_FALSE(name) \
++static inline bool folio_test_set_##name(struct folio *folio) \
++{ return false; }
++#define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
++static inline bool folio_test_clear_##name(struct folio *folio) \
++{ return false; }
++
++#define FOLIO_FLAG_FALSE(name) \
++FOLIO_TEST_FLAG_FALSE(name) \
++FOLIO_SET_FLAG_NOOP(name) \
++FOLIO_CLEAR_FLAG_NOOP(name)
++
+ #define TESTPAGEFLAG_FALSE(uname, lname) \
+-static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
++FOLIO_TEST_FLAG_FALSE(lname) \
+ static inline int Page##uname(const struct page *page) { return 0; }
+
+ #define SETPAGEFLAG_NOOP(uname, lname) \
+-static inline void folio_set_##lname(struct folio *folio) { } \
++FOLIO_SET_FLAG_NOOP(lname) \
+ static inline void SetPage##uname(struct page *page) { }
+
+ #define CLEARPAGEFLAG_NOOP(uname, lname) \
+-static inline void folio_clear_##lname(struct folio *folio) { } \
++FOLIO_CLEAR_FLAG_NOOP(lname) \
+ static inline void ClearPage##uname(struct page *page) { }
+
+ #define __CLEARPAGEFLAG_NOOP(uname, lname) \
+-static inline void __folio_clear_##lname(struct folio *folio) { } \
++__FOLIO_CLEAR_FLAG_NOOP(lname) \
+ static inline void __ClearPage##uname(struct page *page) { }
+
+ #define TESTSETFLAG_FALSE(uname, lname) \
+-static inline bool folio_test_set_##lname(struct folio *folio) \
+-{ return 0; } \
++FOLIO_TEST_SET_FLAG_FALSE(lname) \
+ static inline int TestSetPage##uname(struct page *page) { return 0; }
+
+ #define TESTCLEARFLAG_FALSE(uname, lname) \
+-static inline bool folio_test_clear_##lname(struct folio *folio) \
+-{ return 0; } \
++FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
+ static inline int TestClearPage##uname(struct page *page) { return 0; }
+
+ #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
+@@ -815,29 +835,6 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
+
+ #define PG_head_mask ((1UL << PG_head))
+
+-#ifdef CONFIG_HUGETLB_PAGE
+-int PageHuge(struct page *page);
+-SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
+-CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
+-
+-/**
+- * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
+- * @folio: The folio to test.
+- *
+- * Context: Any context. Caller should have a reference on the folio to
+- * prevent it from being turned into a tail page.
+- * Return: True for hugetlbfs folios, false for anon folios or folios
+- * belonging to other filesystems.
+- */
+-static inline bool folio_test_hugetlb(struct folio *folio)
+-{
+- return folio_test_large(folio) &&
+- test_bit(PG_hugetlb, folio_flags(folio, 1));
+-}
+-#else
+-TESTPAGEFLAG_FALSE(Huge, hugetlb)
+-#endif
+-
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /*
+ * PageHuge() only returns true for hugetlbfs pages, but not for
+@@ -893,34 +890,23 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+ TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+ #endif
+
+-/*
+- * Check if a page is currently marked HWPoisoned. Note that this check is
+- * best effort only and inherently racy: there is no way to synchronize with
+- * failing hardware.
+- */
+-static inline bool is_page_hwpoison(struct page *page)
+-{
+- if (PageHWPoison(page))
+- return true;
+- return PageHuge(page) && PageHWPoison(compound_head(page));
+-}
+-
+ /*
+ * For pages that are never mapped to userspace (and aren't PageSlab),
+ * page_type may be used. Because it is initialised to -1, we invert the
+ * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
+ * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
+- * low bits so that an underflow or overflow of page_mapcount() won't be
++ * low bits so that an underflow or overflow of _mapcount won't be
+ * mistaken for a page type value.
+ */
+
+ #define PAGE_TYPE_BASE 0xf0000000
+-/* Reserve 0x0000007f to catch underflows of page_mapcount */
++/* Reserve 0x0000007f to catch underflows of _mapcount */
+ #define PAGE_MAPCOUNT_RESERVE -128
+ #define PG_buddy 0x00000080
+ #define PG_offline 0x00000100
+ #define PG_table 0x00000200
+ #define PG_guard 0x00000400
++#define PG_hugetlb 0x00000800
+
+ #define PageType(page, flag) \
+ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+@@ -937,35 +923,38 @@ static inline int page_has_type(struct page *page)
+ return page_type_has_type(page->page_type);
+ }
+
++#define FOLIO_TYPE_OPS(lname, fname) \
++static __always_inline bool folio_test_##fname(const struct folio *folio)\
++{ \
++ return folio_test_type(folio, PG_##lname); \
++} \
++static __always_inline void __folio_set_##fname(struct folio *folio) \
++{ \
++ VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
++ folio->page.page_type &= ~PG_##lname; \
++} \
++static __always_inline void __folio_clear_##fname(struct folio *folio) \
++{ \
++ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
++ folio->page.page_type |= PG_##lname; \
++}
++
+ #define PAGE_TYPE_OPS(uname, lname, fname) \
++FOLIO_TYPE_OPS(lname, fname) \
+ static __always_inline int Page##uname(const struct page *page) \
+ { \
+ return PageType(page, PG_##lname); \
+ } \
+-static __always_inline int folio_test_##fname(const struct folio *folio)\
+-{ \
+- return folio_test_type(folio, PG_##lname); \
+-} \
+ static __always_inline void __SetPage##uname(struct page *page) \
+ { \
+ VM_BUG_ON_PAGE(!PageType(page, 0), page); \
+ page->page_type &= ~PG_##lname; \
+ } \
+-static __always_inline void __folio_set_##fname(struct folio *folio) \
+-{ \
+- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
+- folio->page.page_type &= ~PG_##lname; \
+-} \
+ static __always_inline void __ClearPage##uname(struct page *page) \
+ { \
+ VM_BUG_ON_PAGE(!Page##uname(page), page); \
+ page->page_type |= PG_##lname; \
+-} \
+-static __always_inline void __folio_clear_##fname(struct folio *folio) \
+-{ \
+- VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+- folio->page.page_type |= PG_##lname; \
+-} \
++}
+
+ /*
+ * PageBuddy() indicates that the page is free and in the buddy system
+@@ -1012,6 +1001,37 @@ PAGE_TYPE_OPS(Table, table, pgtable)
+ */
+ PAGE_TYPE_OPS(Guard, guard, guard)
+
++#ifdef CONFIG_HUGETLB_PAGE
++FOLIO_TYPE_OPS(hugetlb, hugetlb)
++#else
++FOLIO_TEST_FLAG_FALSE(hugetlb)
++#endif
++
++/**
++ * PageHuge - Determine if the page belongs to hugetlbfs
++ * @page: The page to test.
++ *
++ * Context: Any context.
++ * Return: True for hugetlbfs pages, false for anon pages or pages
++ * belonging to other filesystems.
++ */
++static inline bool PageHuge(const struct page *page)
++{
++ return folio_test_hugetlb(page_folio(page));
++}
++
++/*
++ * Check if a page is currently marked HWPoisoned. Note that this check is
++ * best effort only and inherently racy: there is no way to synchronize with
++ * failing hardware.
++ */
++static inline bool is_page_hwpoison(struct page *page)
++{
++ if (PageHWPoison(page))
++ return true;
++ return PageHuge(page) && PageHWPoison(compound_head(page));
++}
++
+ extern bool is_free_buddy_page(struct page *page);
+
+ PAGEFLAG(Isolated, isolated, PF_ANY);
+@@ -1078,7 +1098,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
+ */
+ #define PAGE_FLAGS_SECOND \
+ (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
+- 1UL << PG_hugetlb | 1UL << PG_large_rmappable)
++ 1UL << PG_large_rmappable)
+
+ #define PAGE_FLAGS_PRIVATE \
+ (1UL << PG_private | 1UL << PG_private_2)
+diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
+index d7c2d33baa7f8e..fdd2a75adb0379 100644
+--- a/include/linux/page_ref.h
++++ b/include/linux/page_ref.h
+@@ -263,54 +263,9 @@ static inline bool folio_try_get(struct folio *folio)
+ return folio_ref_add_unless(folio, 1, 0);
+ }
+
+-static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
+-{
+-#ifdef CONFIG_TINY_RCU
+- /*
+- * The caller guarantees the folio will not be freed from interrupt
+- * context, so (on !SMP) we only need preemption to be disabled
+- * and TINY_RCU does that for us.
+- */
+-# ifdef CONFIG_PREEMPT_COUNT
+- VM_BUG_ON(!in_atomic() && !irqs_disabled());
+-# endif
+- VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
+- folio_ref_add(folio, count);
+-#else
+- if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
+- /* Either the folio has been freed, or will be freed. */
+- return false;
+- }
+-#endif
+- return true;
+-}
+-
+-/**
+- * folio_try_get_rcu - Attempt to increase the refcount on a folio.
+- * @folio: The folio.
+- *
+- * This is a version of folio_try_get() optimised for non-SMP kernels.
+- * If you are still holding the rcu_read_lock() after looking up the
+- * page and know that the page cannot have its refcount decreased to
+- * zero in interrupt context, you can use this instead of folio_try_get().
+- *
+- * Example users include get_user_pages_fast() (as pages are not unmapped
+- * from interrupt context) and the page cache lookups (as pages are not
+- * truncated from interrupt context). We also know that pages are not
+- * frozen in interrupt context for the purposes of splitting or migration.
+- *
+- * You can also use this function if you're holding a lock that prevents
+- * pages being frozen & removed; eg the i_pages lock for the page cache
+- * or the mmap_lock or page table lock for page tables. In this case,
+- * it will always succeed, and you could have used a plain folio_get(),
+- * but it's sometimes more convenient to have a common function called
+- * from both locked and RCU-protected contexts.
+- *
+- * Return: True if the reference count was successfully incremented.
+- */
+-static inline bool folio_try_get_rcu(struct folio *folio)
++static inline bool folio_ref_try_add(struct folio *folio, int count)
+ {
+- return folio_ref_try_add_rcu(folio, 1);
++ return folio_ref_add_unless(folio, count, 0);
+ }
+
+ static inline int page_ref_freeze(struct page *page, int count)
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 351c3b7f93a14e..15793a4af9d445 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -204,6 +204,8 @@ enum mapping_flags {
+ AS_NO_WRITEBACK_TAGS = 5,
+ AS_LARGE_FOLIO_SUPPORT = 6,
+ AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
++ AS_STABLE_WRITES, /* must wait for writeback before modifying
++ folio contents */
+ };
+
+ /**
+@@ -289,6 +291,21 @@ static inline void mapping_clear_release_always(struct address_space *mapping)
+ clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+ }
+
++static inline bool mapping_stable_writes(const struct address_space *mapping)
++{
++ return test_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
++static inline void mapping_set_stable_writes(struct address_space *mapping)
++{
++ set_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
++static inline void mapping_clear_stable_writes(struct address_space *mapping)
++{
++ clear_bit(AS_STABLE_WRITES, &mapping->flags);
++}
++
+ static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+ {
+ return mapping->gfp_mask;
+@@ -310,6 +327,26 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
+ m->gfp_mask = mask;
+ }
+
++/*
++ * There are some parts of the kernel which assume that PMD entries
++ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
++ * limit the maximum allocation order to PMD size. I'm not aware of any
++ * assumptions about maximum order if THP are disabled, but 8 seems like
++ * a good order (that's 1MB if you're using 4kB pages)
++ */
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
++#else
++#define PREFERRED_MAX_PAGECACHE_ORDER 8
++#endif
++
++/*
++ * xas_split_alloc() does not support arbitrary orders. This implies no
++ * 512MB THP on ARM64 with 64KB base page size.
++ */
++#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
++#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
++
+ /**
+ * mapping_set_large_folios() - Indicate the file supports large folios.
+ * @mapping: The file.
+@@ -336,6 +373,14 @@ static inline bool mapping_large_folio_support(struct address_space *mapping)
+ test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ }
+
++/* Return the maximum folio size for this pagecache mapping, in bytes. */
++static inline size_t mapping_max_folio_size(struct address_space *mapping)
++{
++ if (mapping_large_folio_support(mapping))
++ return PAGE_SIZE << MAX_PAGECACHE_ORDER;
++ return PAGE_SIZE;
++}
++
+ static inline int filemap_nr_thps(struct address_space *mapping)
+ {
+ #ifdef CONFIG_READ_ONLY_THP_FOR_FS
+@@ -494,19 +539,6 @@ static inline void *detach_page_private(struct page *page)
+ return folio_detach_private(page_folio(page));
+ }
+
+-/*
+- * There are some parts of the kernel which assume that PMD entries
+- * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+- * limit the maximum allocation order to PMD size. I'm not aware of any
+- * assumptions about maximum order if THP are disabled, but 8 seems like
+- * a good order (that's 1MB if you're using 4kB pages)
+- */
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+-#else
+-#define MAX_PAGECACHE_ORDER 8
+-#endif
+-
+ #ifdef CONFIG_NUMA
+ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
+ #else
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 8c7c2c3c6c6524..2b7e45bae94089 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1146,6 +1146,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
+ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
+ struct pci_dev *pci_dev_get(struct pci_dev *dev);
+ void pci_dev_put(struct pci_dev *dev);
++DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+ void pci_remove_bus(struct pci_bus *b);
+ void pci_stop_and_remove_bus_device(struct pci_dev *dev);
+ void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
+@@ -1181,6 +1182,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
+ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
+ unsigned int devfn);
+ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
++struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
++
+ int pci_dev_present(const struct pci_device_id *ids);
+
+ int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
+@@ -1391,6 +1394,7 @@ int pci_load_and_free_saved_state(struct pci_dev *dev,
+ struct pci_saved_state **state);
+ int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
+ int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
++int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
+ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
+ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
+ void pci_pme_active(struct pci_dev *dev, bool enable);
+@@ -1594,6 +1598,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
+
+ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata);
++void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
++ void *userdata);
+ int pci_cfg_space_size(struct pci_dev *dev);
+ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
+ void pci_setup_bridge(struct pci_bus *bus);
+@@ -1777,6 +1783,7 @@ extern bool pcie_ports_native;
+ int pci_disable_link_state(struct pci_dev *pdev, int state);
+ int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+ int pci_enable_link_state(struct pci_dev *pdev, int state);
++int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
+ void pcie_no_aspm(void);
+ bool pcie_aspm_support_enabled(void);
+ bool pcie_aspm_enabled(struct pci_dev *pdev);
+@@ -1787,6 +1794,8 @@ static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
+ { return 0; }
+ static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
+ { return 0; }
++static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
++{ return 0; }
+ static inline void pcie_no_aspm(void) { }
+ static inline bool pcie_aspm_support_enabled(void) { return false; }
+ static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
+@@ -1819,6 +1828,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
+ void pci_dev_lock(struct pci_dev *dev);
+ int pci_dev_trylock(struct pci_dev *dev);
+ void pci_dev_unlock(struct pci_dev *dev);
++DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+
+ /*
+ * PCI domain support. Sometimes called PCI segment (eg by ACPI),
+@@ -1924,6 +1934,9 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
+ struct pci_dev *from)
+ { return NULL; }
+
++static inline struct pci_dev *pci_get_base_class(unsigned int class,
++ struct pci_dev *from)
++{ return NULL; }
+
+ static inline int pci_dev_present(const struct pci_device_id *ids)
+ { return 0; }
+@@ -1961,6 +1974,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; }
+ static inline void pci_restore_state(struct pci_dev *dev) { }
+ static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+ { return 0; }
++static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
++{ return 0; }
+ static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+ { return 0; }
+ static inline pci_power_t pci_choose_state(struct pci_dev *dev,
+@@ -2072,14 +2087,14 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
+ (pci_resource_end((dev), (bar)) ? \
+ resource_size(pci_resource_n((dev), (bar))) : 0)
+
+-#define __pci_dev_for_each_res0(dev, res, ...) \
+- for (unsigned int __b = 0; \
+- res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
++#define __pci_dev_for_each_res0(dev, res, ...) \
++ for (unsigned int __b = 0; \
++ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
+ __b++)
+
+-#define __pci_dev_for_each_res1(dev, res, __b) \
+- for (__b = 0; \
+- res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
++#define __pci_dev_for_each_res1(dev, res, __b) \
++ for (__b = 0; \
++ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
+ __b++)
+
+ #define pci_dev_for_each_resource(dev, res, ...) \
+@@ -2448,6 +2463,16 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+ return NULL;
+ }
+
++static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
++{
++ /*
++ * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
++ * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
++ * the value (e.g. inside the loop in pci_dev_wait()).
++ */
++ return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
++}
++
+ void pci_request_acs(void);
+ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+ bool pci_acs_path_enabled(struct pci_dev *start,
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 5fb3d4c393a9ec..3dce2be622e745 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -180,6 +180,8 @@
+ #define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
+ #define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+
++#define PCI_VENDOR_ID_ITTIM 0x0b48
++
+ #define PCI_VENDOR_ID_COMPAQ 0x0e11
+ #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
+ #define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
+@@ -578,7 +580,10 @@
+ #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
+ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
+ #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
++#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3 0x124b
++#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3 0x12bb
+ #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
++#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE 0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
+@@ -2121,6 +2126,8 @@
+
+ #define PCI_VENDOR_ID_CHELSIO 0x1425
+
++#define PCI_VENDOR_ID_EDIMAX 0x1432
++
+ #define PCI_VENDOR_ID_ADLINK 0x144a
+
+ #define PCI_VENDOR_ID_SAMSUNG 0x144d
+@@ -2648,6 +2655,8 @@
+ #define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
+ #define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
+
++#define PCI_VENDOR_ID_GLENFLY 0x6766
++
+ #define PCI_VENDOR_ID_INTEL 0x8086
+ #define PCI_DEVICE_ID_INTEL_EESSC 0x0008
+ #define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
+@@ -2680,8 +2689,10 @@
+ #define PCI_DEVICE_ID_INTEL_I960 0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+ #define PCI_DEVICE_ID_INTEL_HDA_HSW_0 0x0a0c
++#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+ #define PCI_DEVICE_ID_INTEL_HDA_HSW_2 0x0c0c
+ #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
++#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
+ #define PCI_DEVICE_ID_INTEL_HDA_HSW_3 0x0d0c
+ #define PCI_DEVICE_ID_INTEL_HDA_BYT 0x0f04
+ #define PCI_DEVICE_ID_INTEL_SST_BYT 0x0f28
+@@ -3061,6 +3072,7 @@
+ #define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
+ #define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
+ #define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
++#define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728
+ #define PCI_DEVICE_ID_INTEL_HDA_RPL_S 0x7a50
+ #define PCI_DEVICE_ID_INTEL_HDA_ADL_S 0x7ad0
+ #define PCI_DEVICE_ID_INTEL_HDA_MTL 0x7e28
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 7b5406e3288d98..7a5563ffe61b53 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -786,6 +786,7 @@ struct perf_event {
+ struct irq_work pending_irq;
+ struct callback_head pending_task;
+ unsigned int pending_work;
++ struct rcuwait pending_work_wait;
+
+ atomic_t event_limit;
+
+@@ -843,11 +844,11 @@ struct perf_event {
+ };
+
+ /*
+- * ,-----------------------[1:n]----------------------.
+- * V V
+- * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
+- * ^ ^ | |
+- * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-'
++ * ,-----------------------[1:n]------------------------.
++ * V V
++ * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
++ * | |
++ * `--[n:1]-> pmu <-[1:n]--'
+ *
+ *
+ * struct perf_event_pmu_context lifetime is refcount based and RCU freed
+@@ -865,6 +866,9 @@ struct perf_event {
+ * ctx->mutex pinning the configuration. Since we hold a reference on
+ * group_leader (through the filedesc) it can't go away, therefore it's
+ * associated pmu_ctx must exist and cannot change due to ctx->mutex.
++ *
++ * perf_event holds a refcount on perf_event_context
++ * perf_event holds a refcount on perf_event_pmu_context
+ */
+ struct perf_event_pmu_context {
+ struct pmu *pmu;
+@@ -879,6 +883,7 @@ struct perf_event_pmu_context {
+ unsigned int embedded : 1;
+
+ unsigned int nr_events;
++ unsigned int nr_cgroups;
+
+ atomic_t refcount; /* event <-> epc */
+ struct rcu_head rcu_head;
+@@ -1594,13 +1599,7 @@ static inline int perf_is_paranoid(void)
+ return sysctl_perf_event_paranoid > -1;
+ }
+
+-static inline int perf_allow_kernel(struct perf_event_attr *attr)
+-{
+- if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
+- return -EACCES;
+-
+- return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
+-}
++int perf_allow_kernel(struct perf_event_attr *attr);
+
+ static inline int perf_allow_cpu(struct perf_event_attr *attr)
+ {
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index af7639c3b0a3a4..8b7daccd11bef2 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -292,6 +292,27 @@ static inline pmd_t pmdp_get(pmd_t *pmdp)
+ }
+ #endif
+
++#ifndef pudp_get
++static inline pud_t pudp_get(pud_t *pudp)
++{
++ return READ_ONCE(*pudp);
++}
++#endif
++
++#ifndef p4dp_get
++static inline p4d_t p4dp_get(p4d_t *p4dp)
++{
++ return READ_ONCE(*p4dp);
++}
++#endif
++
++#ifndef pgdp_get
++static inline pgd_t pgdp_get(pgd_t *pgdp)
++{
++ return READ_ONCE(*pgdp);
++}
++#endif
++
+ #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 1351b802ffcff1..5aa30ee9981047 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -1099,7 +1099,7 @@ struct phy_driver {
+ u8 index, enum led_brightness value);
+
+ /**
+- * @led_blink_set: Set a PHY LED brightness. Index indicates
++ * @led_blink_set: Set a PHY LED blinking. Index indicates
+ * which of the PHYs led should be configured to blink. Delays
+ * are in milliseconds and if both are zero then a sensible
+ * default should be chosen. The call should adjust the
+diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
+index 70998e6dd6fdc9..6ca51e0080ec09 100644
+--- a/include/linux/phy/tegra/xusb.h
++++ b/include/linux/phy/tegra/xusb.h
+@@ -26,6 +26,7 @@ void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
+ int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
+ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ unsigned int port);
++int tegra_xusb_padctl_get_port_number(struct phy *phy);
+ int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ enum usb_device_speed speed);
+ int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 608a9eb86bff8d..288a8081a9db6f 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -124,6 +124,22 @@ struct pipe_buf_operations {
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ };
+
++/**
++ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
++ * i.e. it was created with O_NOTIFICATION_PIPE
++ * @pipe: The pipe to check
++ *
++ * Return: true if pipe is a watch queue, false otherwise.
++ */
++static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
++{
++#ifdef CONFIG_WATCH_QUEUE
++ return pipe->watch_queue != NULL;
++#else
++ return false;
++#endif
++}
++
+ /**
+ * pipe_empty - Return true if the pipe is empty
+ * @head: The pipe ring head pointer
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 1400c37b29c757..629c1633bbd00d 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -374,24 +374,39 @@ const struct dev_pm_ops name = { \
+ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+ }
+
+-#ifdef CONFIG_PM
+-#define _EXPORT_DEV_PM_OPS(name, license, ns) \
++#define _EXPORT_PM_OPS(name, license, ns) \
+ const struct dev_pm_ops name; \
+ __EXPORT_SYMBOL(name, license, ns); \
+ const struct dev_pm_ops name
+-#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
+-#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
+-#else
+-#define _EXPORT_DEV_PM_OPS(name, license, ns) \
++
++#define _DISCARD_PM_OPS(name, license, ns) \
+ static __maybe_unused const struct dev_pm_ops __static_##name
++
++#ifdef CONFIG_PM
++#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
++#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
++#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
++#else
++#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
+ #define EXPORT_PM_FN_GPL(name)
+ #define EXPORT_PM_FN_NS_GPL(name, ns)
+ #endif
+
+-#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+-#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
+-#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+-#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
++#ifdef CONFIG_PM_SLEEP
++#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
++#else
++#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
++#endif
++
++#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
++#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
++#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
++#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
++
++#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "")
++#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "")
++#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns)
++#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns)
+
+ /*
+ * Use this if you want to use the same suspend and resume callbacks for suspend
+@@ -404,19 +419,19 @@ const struct dev_pm_ops name = { \
+ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
+
+ #define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+- EXPORT_DEV_PM_OPS(name) = { \
++ EXPORT_DEV_SLEEP_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+ #define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+- EXPORT_GPL_DEV_PM_OPS(name) = { \
++ EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+ #define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+- EXPORT_NS_DEV_PM_OPS(name, ns) = { \
++ EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+ #define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+- EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
++ EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+
+diff --git a/include/linux/poll.h b/include/linux/poll.h
+index a9e0e1c2d1f2ff..d1ea4f3714a848 100644
+--- a/include/linux/poll.h
++++ b/include/linux/poll.h
+@@ -14,11 +14,7 @@
+
+ /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
+ additional memory. */
+-#ifdef __clang__
+-#define MAX_STACK_ALLOC 768
+-#else
+ #define MAX_STACK_ALLOC 832
+-#endif
+ #define FRONTEND_STACK_ALLOC 256
+ #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
+ #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
+diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
+index a427f13c757f4c..85b86768c0b913 100644
+--- a/include/linux/power_supply.h
++++ b/include/linux/power_supply.h
+@@ -767,7 +767,7 @@ struct power_supply_battery_info {
+ int bti_resistance_tolerance;
+ };
+
+-extern struct atomic_notifier_head power_supply_notifier;
++extern struct blocking_notifier_head power_supply_notifier;
+ extern int power_supply_reg_notifier(struct notifier_block *nb);
+ extern void power_supply_unreg_notifier(struct notifier_block *nb);
+ #if IS_ENABLED(CONFIG_POWER_SUPPLY)
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 1424670df161de..9aa6358a1a16b4 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -99,14 +99,21 @@ static __always_inline unsigned char interrupt_context_level(void)
+ return level;
+ }
+
++/*
++ * These macro definitions avoid redundant invocations of preempt_count()
++ * because such invocations would result in redundant loads given that
++ * preempt_count() is commonly implemented with READ_ONCE().
++ */
++
+ #define nmi_count() (preempt_count() & NMI_MASK)
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+ #ifdef CONFIG_PREEMPT_RT
+ # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
++# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
+ #else
+ # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
+ #endif
+-#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
+
+ /*
+ * Macros to retrieve the current execution context:
+@@ -119,7 +126,11 @@ static __always_inline unsigned char interrupt_context_level(void)
+ #define in_nmi() (nmi_count())
+ #define in_hardirq() (hardirq_count())
+ #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+-#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
++#ifdef CONFIG_PREEMPT_RT
++# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
++#else
++# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
++#endif
+
+ /*
+ * The following macros are deprecated and should not be used in new code:
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 8ef499ab3c1ed2..e4878bb58f6633 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -126,7 +126,7 @@ struct va_format {
+ #define no_printk(fmt, ...) \
+ ({ \
+ if (0) \
+- printk(fmt, ##__VA_ARGS__); \
++ _printk(fmt, ##__VA_ARGS__); \
+ 0; \
+ })
+
+diff --git a/include/linux/profile.h b/include/linux/profile.h
+index 11db1ec516e272..12da750a88a04d 100644
+--- a/include/linux/profile.h
++++ b/include/linux/profile.h
+@@ -11,7 +11,6 @@
+
+ #define CPU_PROFILING 1
+ #define SCHED_PROFILING 2
+-#define SLEEP_PROFILING 3
+ #define KVM_PROFILING 4
+
+ struct proc_dir_entry;
+diff --git a/include/linux/property.h b/include/linux/property.h
+index 8c3c6685a2ae37..d32b8052e0863b 100644
+--- a/include/linux/property.h
++++ b/include/linux/property.h
+@@ -11,6 +11,7 @@
+ #define _LINUX_PROPERTY_H_
+
+ #include <linux/bits.h>
++#include <linux/cleanup.h>
+ #include <linux/fwnode.h>
+ #include <linux/stddef.h>
+ #include <linux/types.h>
+@@ -79,12 +80,38 @@ int fwnode_property_match_string(const struct fwnode_handle *fwnode,
+
+ bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
+
++static inline bool fwnode_device_is_big_endian(const struct fwnode_handle *fwnode)
++{
++ if (fwnode_property_present(fwnode, "big-endian"))
++ return true;
++ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
++ fwnode_property_present(fwnode, "native-endian"))
++ return true;
++ return false;
++}
++
+ static inline
+ bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char *compat)
+ {
+ return fwnode_property_match_string(fwnode, "compatible", compat) >= 0;
+ }
+
++/**
++ * device_is_big_endian - check if a device has BE registers
++ * @dev: Pointer to the struct device
++ *
++ * Returns: true if the device has a "big-endian" property, or if the kernel
++ * was compiled for BE *and* the device has a "native-endian" property.
++ * Returns false otherwise.
++ *
++ * Callers would nominally use ioread32be/iowrite32be if
++ * device_is_big_endian() == true, or readl/writel otherwise.
++ */
++static inline bool device_is_big_endian(const struct device *dev)
++{
++ return fwnode_device_is_big_endian(dev_fwnode(dev));
++}
++
+ /**
+ * device_is_compatible - match 'compatible' property of the device with a given string
+ * @dev: Pointer to the struct device
+@@ -141,6 +168,11 @@ struct fwnode_handle *device_get_next_child_node(const struct device *dev,
+ for (child = device_get_next_child_node(dev, NULL); child; \
+ child = device_get_next_child_node(dev, child))
+
++#define device_for_each_child_node_scoped(dev, child) \
++ for (struct fwnode_handle *child __free(fwnode_handle) = \
++ device_get_next_child_node(dev, NULL); \
++ child; child = device_get_next_child_node(dev, child))
++
+ struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname);
+ struct fwnode_handle *device_get_named_child_node(const struct device *dev,
+@@ -149,6 +181,8 @@ struct fwnode_handle *device_get_named_child_node(const struct device *dev,
+ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
+ void fwnode_handle_put(struct fwnode_handle *fwnode);
+
++DEFINE_FREE(fwnode_handle, struct fwnode_handle *, fwnode_handle_put(_T))
++
+ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
+ int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name);
+
+diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
+index fb724c65c77bcf..5ce0cd76956e0d 100644
+--- a/include/linux/pse-pd/pse.h
++++ b/include/linux/pse-pd/pse.h
+@@ -114,14 +114,14 @@ static inline int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+ {
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+ }
+
+ static inline int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+ {
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+ }
+
+ #endif
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index eaaef3ffec221b..90507d4afcd6de 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -393,6 +393,10 @@ static inline void user_single_step_report(struct pt_regs *regs)
+ #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
+ #endif
+
++#ifndef exception_ip
++#define exception_ip(x) instruction_pointer(x)
++#endif
++
+ extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
+
+ extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index d2f9f690a9c145..63426d8255e4ae 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -41,8 +41,8 @@ struct pwm_args {
+ };
+
+ enum {
+- PWMF_REQUESTED = 1 << 0,
+- PWMF_EXPORTED = 1 << 1,
++ PWMF_REQUESTED = 0,
++ PWMF_EXPORTED = 1,
+ };
+
+ /*
+@@ -95,8 +95,8 @@ struct pwm_device {
+ * @state: state to fill with the current PWM state
+ *
+ * The returned PWM state represents the state that was applied by a previous call to
+- * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to
+- * hardware. If pwm_apply_state() was never called, this returns either the current hardware
++ * pwm_apply_might_sleep(). Drivers may have to slightly tweak that state before programming it to
++ * hardware. If pwm_apply_might_sleep() was never called, this returns either the current hardware
+ * state (if supported) or the default settings.
+ */
+ static inline void pwm_get_state(const struct pwm_device *pwm,
+@@ -160,20 +160,20 @@ static inline void pwm_get_args(const struct pwm_device *pwm,
+ }
+
+ /**
+- * pwm_init_state() - prepare a new state to be applied with pwm_apply_state()
++ * pwm_init_state() - prepare a new state to be applied with pwm_apply_might_sleep()
+ * @pwm: PWM device
+ * @state: state to fill with the prepared PWM state
+ *
+ * This functions prepares a state that can later be tweaked and applied
+- * to the PWM device with pwm_apply_state(). This is a convenient function
++ * to the PWM device with pwm_apply_might_sleep(). This is a convenient function
+ * that first retrieves the current PWM state and the replaces the period
+ * and polarity fields with the reference values defined in pwm->args.
+ * Once the function returns, you can adjust the ->enabled and ->duty_cycle
+- * fields according to your needs before calling pwm_apply_state().
++ * fields according to your needs before calling pwm_apply_might_sleep().
+ *
+ * ->duty_cycle is initially set to zero to avoid cases where the current
+ * ->duty_cycle value exceed the pwm_args->period one, which would trigger
+- * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle
++ * an error if the user calls pwm_apply_might_sleep() without adjusting ->duty_cycle
+ * first.
+ */
+ static inline void pwm_init_state(const struct pwm_device *pwm,
+@@ -229,7 +229,7 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
+ *
+ * pwm_init_state(pwm, &state);
+ * pwm_set_relative_duty_cycle(&state, 50, 100);
+- * pwm_apply_state(pwm, &state);
++ * pwm_apply_might_sleep(pwm, &state);
+ *
+ * This functions returns -EINVAL if @duty_cycle and/or @scale are
+ * inconsistent (@scale == 0 or @duty_cycle > @scale).
+@@ -309,7 +309,7 @@ struct pwm_chip {
+
+ #if IS_ENABLED(CONFIG_PWM)
+ /* PWM user APIs */
+-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
++int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state);
+ int pwm_adjust_config(struct pwm_device *pwm);
+
+ /**
+@@ -337,7 +337,7 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
+
+ state.duty_cycle = duty_ns;
+ state.period = period_ns;
+- return pwm_apply_state(pwm, &state);
++ return pwm_apply_might_sleep(pwm, &state);
+ }
+
+ /**
+@@ -358,7 +358,7 @@ static inline int pwm_enable(struct pwm_device *pwm)
+ return 0;
+
+ state.enabled = true;
+- return pwm_apply_state(pwm, &state);
++ return pwm_apply_might_sleep(pwm, &state);
+ }
+
+ /**
+@@ -377,7 +377,7 @@ static inline void pwm_disable(struct pwm_device *pwm)
+ return;
+
+ state.enabled = false;
+- pwm_apply_state(pwm, &state);
++ pwm_apply_might_sleep(pwm, &state);
+ }
+
+ /* PWM provider APIs */
+@@ -408,8 +408,8 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id);
+ #else
+-static inline int pwm_apply_state(struct pwm_device *pwm,
+- const struct pwm_state *state)
++static inline int pwm_apply_might_sleep(struct pwm_device *pwm,
++ const struct pwm_state *state)
+ {
+ might_sleep();
+ return -ENOTSUPP;
+@@ -536,7 +536,7 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
+ state.period = pwm->args.period;
+ state.usage_power = false;
+
+- pwm_apply_state(pwm, &state);
++ pwm_apply_might_sleep(pwm, &state);
+ }
+
+ struct pwm_lookup {
+diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
+index 5d868505a94e43..6d92b68efbf6c3 100644
+--- a/include/linux/randomize_kstack.h
++++ b/include/linux/randomize_kstack.h
+@@ -80,7 +80,7 @@ DECLARE_PER_CPU(u32, kstack_offset);
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+ u32 offset = raw_cpu_read(kstack_offset); \
+- offset ^= (rand); \
++ offset = ror32(offset, 5) ^ (rand); \
+ raw_cpu_write(kstack_offset, offset); \
+ } \
+ } while (0)
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 5e5f920ade9094..6466c2f792923c 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -189,9 +189,9 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
+ do { \
+ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
+ \
+- if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \
++ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
+ likely(!___rttq_nesting)) { \
+- rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \
++ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
+ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
+ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
+ rcu_tasks_trace_qs_blkd(t); \
+@@ -252,6 +252,37 @@ do { \
+ cond_resched(); \
+ } while (0)
+
++/**
++ * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states
++ * @old_ts: jiffies at start of processing.
++ *
++ * This helper is for long-running softirq handlers, such as NAPI threads in
++ * networking. The caller should initialize the variable passed in as @old_ts
++ * at the beginning of the softirq handler. When invoked frequently, this macro
++ * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will
++ * provide both RCU and RCU-Tasks quiescent states. Note that this macro
++ * modifies its old_ts argument.
++ *
++ * Because regions of code that have disabled softirq act as RCU read-side
++ * critical sections, this macro should be invoked with softirq (and
++ * preemption) enabled.
++ *
++ * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would
++ * have more chance to invoke schedule() calls and provide necessary quiescent
++ * states. As a contrast, calling cond_resched() only won't achieve the same
++ * effect because cond_resched() does not provide RCU-Tasks quiescent states.
++ */
++#define rcu_softirq_qs_periodic(old_ts) \
++do { \
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \
++ time_after(jiffies, (old_ts) + HZ / 10)) { \
++ preempt_disable(); \
++ rcu_softirq_qs(); \
++ preempt_enable(); \
++ (old_ts) = jiffies; \
++ } \
++} while (0)
++
+ /*
+ * Infrastructure to implement the synchronize_() primitives in
+ * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
+@@ -303,6 +334,11 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
+ lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
+ }
+
++static inline void rcu_try_lock_acquire(struct lockdep_map *map)
++{
++ lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
++}
++
+ static inline void rcu_lock_release(struct lockdep_map *map)
+ {
+ lock_release(map, _THIS_IP_);
+@@ -317,6 +353,7 @@ int rcu_read_lock_any_held(void);
+ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+ # define rcu_lock_acquire(a) do { } while (0)
++# define rcu_try_lock_acquire(a) do { } while (0)
+ # define rcu_lock_release(a) do { } while (0)
+
+ static inline int rcu_read_lock_held(void)
+diff --git a/include/linux/regmap.h b/include/linux/regmap.h
+index c9182a47736ef8..113261287af288 100644
+--- a/include/linux/regmap.h
++++ b/include/linux/regmap.h
+@@ -1225,6 +1225,7 @@ int regmap_multi_reg_write_bypassed(struct regmap *map,
+ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
++int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val);
+ int regmap_raw_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
+ int regmap_noinc_read(struct regmap *map, unsigned int reg,
+@@ -1734,6 +1735,13 @@ static inline int regmap_read(struct regmap *map, unsigned int reg,
+ return -EINVAL;
+ }
+
++static inline int regmap_read_bypassed(struct regmap *map, unsigned int reg,
++ unsigned int *val)
++{
++ WARN_ONCE(1, "regmap API is disabled");
++ return -EINVAL;
++}
++
+ static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+ {
+diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
+index 39b666b40ea61e..25d0684d37b3e3 100644
+--- a/include/linux/regulator/consumer.h
++++ b/include/linux/regulator/consumer.h
+@@ -365,13 +365,13 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
+
+ static inline int devm_regulator_get_enable(struct device *dev, const char *id)
+ {
+- return -ENODEV;
++ return 0;
+ }
+
+ static inline int devm_regulator_get_enable_optional(struct device *dev,
+ const char *id)
+ {
+- return -ENODEV;
++ return 0;
+ }
+
+ static inline struct regulator *__must_check
+@@ -489,6 +489,14 @@ static inline int of_regulator_bulk_get_all(struct device *dev, struct device_no
+ return 0;
+ }
+
++static inline int devm_regulator_bulk_get_const(
++ struct device *dev, int num_consumers,
++ const struct regulator_bulk_data *in_consumers,
++ struct regulator_bulk_data **out_consumers)
++{
++ return 0;
++}
++
+ static inline int regulator_bulk_enable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+ {
+diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
+index 4b7eceb3828b0a..2dbf87233f85a4 100644
+--- a/include/linux/regulator/driver.h
++++ b/include/linux/regulator/driver.h
+@@ -304,6 +304,8 @@ enum regulator_type {
+ * @vsel_range_reg: Register for range selector when using pickable ranges
+ * and ``regulator_map_*_voltage_*_pickable`` functions.
+ * @vsel_range_mask: Mask for register bitfield used for range selector
++ * @range_applied_by_vsel: A flag to indicate that changes to vsel_range_reg
++ * are only effective after vsel_reg is written
+ * @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*``
+ * @vsel_mask: Mask for register bitfield used for selector
+ * @vsel_step: Specify the resolution of selector stepping when setting
+@@ -394,6 +396,7 @@ struct regulator_desc {
+
+ unsigned int vsel_range_reg;
+ unsigned int vsel_range_mask;
++ bool range_applied_by_vsel;
+ unsigned int vsel_reg;
+ unsigned int vsel_mask;
+ unsigned int vsel_step;
+diff --git a/include/linux/rethook.h b/include/linux/rethook.h
+index 26b6f3c81a7638..544e1bbfad2841 100644
+--- a/include/linux/rethook.h
++++ b/include/linux/rethook.h
+@@ -29,7 +29,12 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long,
+ */
+ struct rethook {
+ void *data;
+- rethook_handler_t handler;
++ /*
++ * To avoid sparse warnings, this uses a raw function pointer with
++ * __rcu, instead of rethook_handler_t. But this must be same as
++ * rethook_handler_t.
++ */
++ void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
+ struct freelist_head pool;
+ refcount_t ref;
+ struct rcu_head rcu;
+diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
+index 782e14f62201f7..ded528d23f855b 100644
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -98,6 +98,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
+ __ring_buffer_alloc((size), (flags), &__key); \
+ })
+
++typedef bool (*ring_buffer_cond_fn)(void *data);
+ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table, int full);
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 51cc21ebb568b0..b1fb58b435a98d 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -261,8 +261,8 @@ static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
+ * guarantee the pinned page won't be randomly replaced in the
+ * future on write faults.
+ */
+- if (likely(!is_device_private_page(page) &&
+- unlikely(page_needs_cow_for_dma(vma, page))))
++ if (likely(!is_device_private_page(page)) &&
++ unlikely(page_needs_cow_for_dma(vma, page)))
+ return -EBUSY;
+
+ ClearPageAnonExclusive(page);
+diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
+index d662cf136021d6..189140bf11fc40 100644
+--- a/include/linux/sbitmap.h
++++ b/include/linux/sbitmap.h
+@@ -36,6 +36,11 @@ struct sbitmap_word {
+ * @cleared: word holding cleared bits
+ */
+ unsigned long cleared ____cacheline_aligned_in_smp;
++
++ /**
++ * @swap_lock: serializes simultaneous updates of ->word and ->cleared
++ */
++ raw_spinlock_t swap_lock;
+ } ____cacheline_aligned_in_smp;
+
+ /**
+diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
+index 0ee96ea7a0e908..1b37fa8fc723da 100644
+--- a/include/linux/sched/coredump.h
++++ b/include/linux/sched/coredump.h
+@@ -91,4 +91,14 @@ static inline int get_dumpable(struct mm_struct *mm)
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK)
+
+ #define MMF_VM_MERGE_ANY 29
++#define MMF_HAS_MDWE_NO_INHERIT 30
++
++static inline unsigned long mmf_init_flags(unsigned long flags)
++{
++ if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
++ flags &= ~((1UL << MMF_HAS_MDWE) |
++ (1UL << MMF_HAS_MDWE_NO_INHERIT));
++ return flags & MMF_INIT_MASK;
++}
++
+ #endif /* _LINUX_SCHED_COREDUMP_H */
+diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
+index 3988762efe15c0..b69afb8630db4a 100644
+--- a/include/linux/sched/numa_balancing.h
++++ b/include/linux/sched/numa_balancing.h
+@@ -15,6 +15,16 @@
+ #define TNF_FAULT_LOCAL 0x08
+ #define TNF_MIGRATE_FAIL 0x10
+
++enum numa_vmaskip_reason {
++ NUMAB_SKIP_UNSUITABLE,
++ NUMAB_SKIP_SHARED_RO,
++ NUMAB_SKIP_INACCESSIBLE,
++ NUMAB_SKIP_SCAN_DELAY,
++ NUMAB_SKIP_PID_INACTIVE,
++ NUMAB_SKIP_IGNORE_PID,
++ NUMAB_SKIP_SEQ_COMPLETED,
++};
++
+ #ifdef CONFIG_NUMA_BALANCING
+ extern void task_numa_fault(int last_node, int node, int pages, int flags);
+ extern pid_t task_numa_group_id(struct task_struct *p);
+diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h
+index 837a23624a66a3..3612de6ea1c5d4 100644
+--- a/include/linux/sched/vhost_task.h
++++ b/include/linux/sched/vhost_task.h
+@@ -5,7 +5,8 @@
+
+ struct vhost_task;
+
+-struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
++struct vhost_task *vhost_task_create(bool (*fn)(void *),
++ void (*handle_kill)(void *), void *arg,
+ const char *name);
+ void vhost_task_start(struct vhost_task *vtsk);
+ void vhost_task_stop(struct vhost_task *vtsk);
+diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
+index e6fe4f73ffe62b..71923ae63b014b 100644
+--- a/include/linux/scmi_protocol.h
++++ b/include/linux/scmi_protocol.h
+@@ -97,10 +97,17 @@ struct scmi_clk_proto_ops {
+ u32 clk_id);
+ };
+
++struct scmi_perf_domain_info {
++ char name[SCMI_MAX_STR_SIZE];
++ bool set_perf;
++};
++
+ /**
+ * struct scmi_perf_proto_ops - represents the various operations provided
+ * by SCMI Performance Protocol
+ *
++ * @num_domains_get: gets the number of supported performance domains
++ * @info_get: get the information of a performance domain
+ * @limits_set: sets limits on the performance level of a domain
+ * @limits_get: gets limits on the performance level of a domain
+ * @level_set: sets the performance level of a domain
+@@ -120,6 +127,9 @@ struct scmi_clk_proto_ops {
+ * or in some other (abstract) scale
+ */
+ struct scmi_perf_proto_ops {
++ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
++ const struct scmi_perf_domain_info __must_check *(*info_get)
++ (const struct scmi_protocol_handle *ph, u32 domain);
+ int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 max_perf, u32 min_perf);
+ int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain,
+diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
+index eab7081392d502..6a4a3cec4638be 100644
+--- a/include/linux/screen_info.h
++++ b/include/linux/screen_info.h
+@@ -4,6 +4,142 @@
+
+ #include <uapi/linux/screen_info.h>
+
++#include <linux/bits.h>
++
++/**
++ * SCREEN_INFO_MAX_RESOURCES - maximum number of resources per screen_info
++ */
++#define SCREEN_INFO_MAX_RESOURCES 3
++
++struct pci_dev;
++struct resource;
++
++static inline bool __screen_info_has_lfb(unsigned int type)
++{
++ return (type == VIDEO_TYPE_VLFB) || (type == VIDEO_TYPE_EFI);
++}
++
++static inline u64 __screen_info_lfb_base(const struct screen_info *si)
++{
++ u64 lfb_base = si->lfb_base;
++
++ if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
++ lfb_base |= (u64)si->ext_lfb_base << 32;
++
++ return lfb_base;
++}
++
++static inline void __screen_info_set_lfb_base(struct screen_info *si, u64 lfb_base)
++{
++ si->lfb_base = lfb_base & GENMASK_ULL(31, 0);
++ si->ext_lfb_base = (lfb_base & GENMASK_ULL(63, 32)) >> 32;
++
++ if (si->ext_lfb_base)
++ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
++ else
++ si->capabilities &= ~VIDEO_CAPABILITY_64BIT_BASE;
++}
++
++static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned int type)
++{
++ u64 lfb_size = si->lfb_size;
++
++ if (type == VIDEO_TYPE_VLFB)
++ lfb_size <<= 16;
++ return lfb_size;
++}
++
++static inline bool __screen_info_vbe_mode_nonvga(const struct screen_info *si)
++{
++ /*
++ * VESA modes typically run on VGA hardware. Set bit 5 signals that this
++ * is not the case. Drivers can then not make use of VGA resources. See
++ * Sec 4.4 of the VBE 2.0 spec.
++ */
++ return si->vesa_attributes & BIT(5);
++}
++
++static inline unsigned int __screen_info_video_type(unsigned int type)
++{
++ switch (type) {
++ case VIDEO_TYPE_MDA:
++ case VIDEO_TYPE_CGA:
++ case VIDEO_TYPE_EGAM:
++ case VIDEO_TYPE_EGAC:
++ case VIDEO_TYPE_VGAC:
++ case VIDEO_TYPE_VLFB:
++ case VIDEO_TYPE_PICA_S3:
++ case VIDEO_TYPE_MIPS_G364:
++ case VIDEO_TYPE_SGI:
++ case VIDEO_TYPE_TGAC:
++ case VIDEO_TYPE_SUN:
++ case VIDEO_TYPE_SUNPCI:
++ case VIDEO_TYPE_PMAC:
++ case VIDEO_TYPE_EFI:
++ return type;
++ default:
++ return 0;
++ }
++}
++
++/**
++ * screen_info_video_type() - Decodes the video type from struct screen_info
++ * @si: an instance of struct screen_info
++ *
++ * Returns:
++ * A VIDEO_TYPE_ constant representing si's type of video display, or 0 otherwise.
++ */
++static inline unsigned int screen_info_video_type(const struct screen_info *si)
++{
++ unsigned int type;
++
++ // check if display output is on
++ if (!si->orig_video_isVGA)
++ return 0;
++
++ // check for a known VIDEO_TYPE_ constant
++ type = __screen_info_video_type(si->orig_video_isVGA);
++ if (type)
++ return si->orig_video_isVGA;
++
++ // check if text mode has been initialized
++ if (!si->orig_video_lines || !si->orig_video_cols)
++ return 0;
++
++ // 80x25 text, mono
++ if (si->orig_video_mode == 0x07) {
++ if ((si->orig_video_ega_bx & 0xff) != 0x10)
++ return VIDEO_TYPE_EGAM;
++ else
++ return VIDEO_TYPE_MDA;
++ }
++
++ // EGA/VGA, 16 colors
++ if ((si->orig_video_ega_bx & 0xff) != 0x10) {
++ if (si->orig_video_isVGA)
++ return VIDEO_TYPE_VGAC;
++ else
++ return VIDEO_TYPE_EGAC;
++ }
++
++ // the rest...
++ return VIDEO_TYPE_CGA;
++}
++
++ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
++
++#if defined(CONFIG_PCI)
++void screen_info_apply_fixups(void);
++struct pci_dev *screen_info_pci_dev(const struct screen_info *si);
++#else
++static inline void screen_info_apply_fixups(void)
++{ }
++static inline struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
++{
++ return NULL;
++}
++#endif
++
+ extern struct screen_info screen_info;
+
+ #endif /* _SCREEN_INFO_H */
+diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
+index 35f3a4a8ceb1e3..acf7e1a3f3def9 100644
+--- a/include/linux/secretmem.h
++++ b/include/linux/secretmem.h
+@@ -13,10 +13,10 @@ static inline bool folio_is_secretmem(struct folio *folio)
+ /*
+ * Using folio_mapping() is quite slow because of the actual call
+ * instruction.
+- * We know that secretmem pages are not compound and LRU so we can
++ * We know that secretmem pages are not compound, so we can
+ * save a couple of cycles here.
+ */
+- if (folio_test_large(folio) || !folio_test_lru(folio))
++ if (folio_test_large(folio))
+ return false;
+
+ mapping = (struct address_space *)
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 5f16eecde00bc7..4bd0f6fc553e7b 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -389,6 +389,8 @@ int security_file_permission(struct file *file, int mask);
+ int security_file_alloc(struct file *file);
+ void security_file_free(struct file *file);
+ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
++int security_file_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg);
+ int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags);
+ int security_mmap_addr(unsigned long addr);
+@@ -987,6 +989,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
+ return 0;
+ }
+
++static inline int security_file_ioctl_compat(struct file *file,
++ unsigned int cmd,
++ unsigned long arg)
++{
++ return 0;
++}
++
+ static inline int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags)
+ {
+@@ -1944,7 +1953,8 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
+
+ #ifdef CONFIG_AUDIT
+ #ifdef CONFIG_SECURITY
+-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
++int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
++ gfp_t gfp);
+ int security_audit_rule_known(struct audit_krule *krule);
+ int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
+ void security_audit_rule_free(void *lsmrule);
+@@ -1952,7 +1962,7 @@ void security_audit_rule_free(void *lsmrule);
+ #else
+
+ static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
+- void **lsmrule)
++ void **lsmrule, gfp_t gfp)
+ {
+ return 0;
+ }
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index bb6f073bc15915..052df85dfd597a 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -470,6 +470,7 @@ struct uart_port {
+ unsigned char iotype; /* io access style */
+ unsigned char quirks; /* internal quirks */
+
++#define UPIO_UNKNOWN ((unsigned char)~0U) /* UCHAR_MAX */
+ #define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
+ #define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
+ #define UPIO_MEM (SERIAL_IO_MEM) /* driver-specific */
+@@ -588,6 +589,85 @@ struct uart_port {
+ void *private_data; /* generic platform data pointer */
+ };
+
++/**
++ * uart_port_lock - Lock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock(struct uart_port *up)
++{
++ spin_lock(&up->lock);
++}
++
++/**
++ * uart_port_lock_irq - Lock the UART port and disable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock_irq(struct uart_port *up)
++{
++ spin_lock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ */
++static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ spin_lock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_trylock - Try to lock the UART port
++ * @up: Pointer to UART port structure
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock(struct uart_port *up)
++{
++ return spin_trylock(&up->lock);
++}
++
++/**
++ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ return spin_trylock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_unlock - Unlock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock(struct uart_port *up)
++{
++ spin_unlock(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock_irq(struct uart_port *up)
++{
++ spin_unlock_irq(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
++ * @up: Pointer to UART port structure
++ * @flags: The saved interrupt flags for restore
++ */
++static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
++{
++ spin_unlock_irqrestore(&up->lock, flags);
++}
++
+ static inline int serial_port_in(struct uart_port *up, int offset)
+ {
+ return up->serial_in(up, offset);
+@@ -669,8 +749,17 @@ struct uart_driver {
+
+ void uart_write_wakeup(struct uart_port *port);
+
+-#define __uart_port_tx(uport, ch, tx_ready, put_char, tx_done, for_test, \
+- for_post) \
++/**
++ * enum UART_TX_FLAGS -- flags for uart_port_tx_flags()
++ *
++ * @UART_TX_NOSTOP: don't call port->ops->stop_tx() on empty buffer
++ */
++enum UART_TX_FLAGS {
++ UART_TX_NOSTOP = BIT(0),
++};
++
++#define __uart_port_tx(uport, ch, flags, tx_ready, put_char, tx_done, \
++ for_test, for_post) \
+ ({ \
+ struct uart_port *__port = (uport); \
+ struct circ_buf *xmit = &__port->state->xmit; \
+@@ -698,7 +787,7 @@ void uart_write_wakeup(struct uart_port *port);
+ if (pending < WAKEUP_CHARS) { \
+ uart_write_wakeup(__port); \
+ \
+- if (pending == 0) \
++ if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
+ __port->ops->stop_tx(__port); \
+ } \
+ \
+@@ -733,10 +822,28 @@ void uart_write_wakeup(struct uart_port *port);
+ */
+ #define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \
+ unsigned int __count = (count); \
+- __uart_port_tx(port, ch, tx_ready, put_char, tx_done, __count, \
++ __uart_port_tx(port, ch, 0, tx_ready, put_char, tx_done, __count, \
+ __count--); \
+ })
+
++/**
++ * uart_port_tx_limited_flags -- transmit helper for uart_port with count limiting with flags
++ * @port: uart port
++ * @ch: variable to store a character to be written to the HW
++ * @flags: %UART_TX_NOSTOP or similar
++ * @count: a limit of characters to send
++ * @tx_ready: can HW accept more data function
++ * @put_char: function to write a character
++ * @tx_done: function to call after the loop is done
++ *
++ * See uart_port_tx_limited() for more details.
++ */
++#define uart_port_tx_limited_flags(port, ch, flags, count, tx_ready, put_char, tx_done) ({ \
++ unsigned int __count = (count); \
++ __uart_port_tx(port, ch, flags, tx_ready, put_char, tx_done, __count, \
++ __count--); \
++})
++
+ /**
+ * uart_port_tx -- transmit helper for uart_port
+ * @port: uart port
+@@ -747,8 +854,21 @@ void uart_write_wakeup(struct uart_port *port);
+ * See uart_port_tx_limited() for more details.
+ */
+ #define uart_port_tx(port, ch, tx_ready, put_char) \
+- __uart_port_tx(port, ch, tx_ready, put_char, ({}), true, ({}))
++ __uart_port_tx(port, ch, 0, tx_ready, put_char, ({}), true, ({}))
++
+
++/**
++ * uart_port_tx_flags -- transmit helper for uart_port with flags
++ * @port: uart port
++ * @ch: variable to store a character to be written to the HW
++ * @flags: %UART_TX_NOSTOP or similar
++ * @tx_ready: can HW accept more data function
++ * @put_char: function to write a character
++ *
++ * See uart_port_tx_limited() for more details.
++ */
++#define uart_port_tx_flags(port, ch, flags, tx_ready, put_char) \
++ __uart_port_tx(port, ch, flags, tx_ready, put_char, ({}), true, ({}))
+ /*
+ * Baud rate helpers.
+ */
+@@ -858,6 +978,8 @@ int uart_register_driver(struct uart_driver *uart);
+ void uart_unregister_driver(struct uart_driver *uart);
+ int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
+ void uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
++int uart_read_port_properties(struct uart_port *port);
++int uart_read_and_validate_port_properties(struct uart_port *port);
+ bool uart_match_port(const struct uart_port *port1,
+ const struct uart_port *port2);
+
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 6b0c626620f5c8..134c686c8676cf 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -32,7 +32,7 @@ struct shmem_inode_info {
+ struct timespec64 i_crtime; /* file creation time */
+ unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
+ #ifdef CONFIG_TMPFS_QUOTA
+- struct dquot *i_dquot[MAXQUOTAS];
++ struct dquot __rcu *i_dquot[MAXQUOTAS];
+ #endif
+ struct offset_ctx dir_offsets; /* stable entry offsets */
+ struct inode vfs_inode;
+@@ -110,8 +110,17 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+ int shmem_unuse(unsigned int type);
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ struct mm_struct *mm, unsigned long vm_flags);
++#else
++static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
++ struct mm_struct *mm, unsigned long vm_flags)
++{
++ return false;
++}
++#endif
++
+ #ifdef CONFIG_SHMEM
+ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
+ #else
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 97bfef071255f3..5f11f987334190 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -295,7 +295,7 @@ struct nf_bridge_info {
+ u8 bridged_dnat:1;
+ u8 sabotage_in_done:1;
+ __u16 frag_max_size;
+- struct net_device *physindev;
++ int physinif;
+
+ /* always valid & non-NULL from FORWARD on, for physdev match */
+ struct net_device *physoutdev;
+@@ -736,8 +736,6 @@ typedef unsigned char *sk_buff_data_t;
+ * @list: queue head
+ * @ll_node: anchor in an llist (eg socket defer_list)
+ * @sk: Socket we are owned by
+- * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
+- * fragmentation management
+ * @dev: Device we arrived on/are leaving by
+ * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
+ * @cb: Control buffer. Free for use by every layer. Put private vars here
+@@ -860,10 +858,7 @@ struct sk_buff {
+ struct llist_node ll_node;
+ };
+
+- union {
+- struct sock *sk;
+- int ip_defrag_offset;
+- };
++ struct sock *sk;
+
+ union {
+ ktime_t tstamp;
+@@ -2632,6 +2627,8 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
+ void *skb_push(struct sk_buff *skb, unsigned int len);
+ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
+ {
++ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
++
+ skb->data -= len;
+ skb->len += len;
+ return skb->data;
+@@ -2640,6 +2637,8 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
+ void *skb_pull(struct sk_buff *skb, unsigned int len);
+ static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
+ {
++ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
++
+ skb->len -= len;
+ if (unlikely(skb->len < skb->data_len)) {
+ #if defined(CONFIG_DEBUG_NET)
+@@ -2664,6 +2663,8 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta);
+ static inline enum skb_drop_reason
+ pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
+ {
++ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
++
+ if (likely(len <= skb_headlen(skb)))
+ return SKB_NOT_DROPPED_YET;
+
+@@ -2836,6 +2837,11 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
+ skb->inner_network_header += offset;
+ }
+
++static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
++{
++ return skb->inner_network_header > 0;
++}
++
+ static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
+ {
+ return skb->head + skb->inner_mac_header;
+@@ -2956,6 +2962,21 @@ static inline void skb_mac_header_rebuild(struct sk_buff *skb)
+ }
+ }
+
++/* Move the full mac header up to current network_header.
++ * Leaves skb->data pointing at offset skb->mac_len into the mac_header.
++ * Must be provided the complete mac header length.
++ */
++static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len)
++{
++ if (skb_mac_header_was_set(skb)) {
++ const unsigned char *old_mac = skb_mac_header(skb);
++
++ skb_set_mac_header(skb, -full_mac_len);
++ memmove(skb_mac_header(skb), old_mac, full_mac_len);
++ __skb_push(skb, full_mac_len - skb->mac_len);
++ }
++}
++
+ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
+ {
+ return skb->csum_start - skb_headroom(skb);
+@@ -3438,6 +3459,16 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
+
+ bool napi_pp_put_page(struct page *page, bool napi_safe);
+
++static inline void
++skb_page_unref(const struct sk_buff *skb, struct page *page, bool napi_safe)
++{
++#ifdef CONFIG_PAGE_POOL
++ if (skb->pp_recycle && napi_pp_put_page(page, napi_safe))
++ return;
++#endif
++ put_page(page);
++}
++
+ static inline void
+ napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
+ {
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index c1637515a8a416..062fe440f5d095 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -106,6 +106,7 @@ struct sk_psock {
+ struct mutex work_mutex;
+ struct sk_psock_work_state work_state;
+ struct delayed_work work;
++ struct sock *sk_pair;
+ struct rcu_work rwork;
+ };
+
+@@ -455,10 +456,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
+
+ static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
+ {
++ read_lock_bh(&sk->sk_callback_lock);
+ if (psock->saved_data_ready)
+ psock->saved_data_ready(sk);
+ else
+ sk->sk_data_ready(sk);
++ read_unlock_bh(&sk->sk_callback_lock);
+ }
+
+ static inline void psock_set_prog(struct bpf_prog **pprog,
+@@ -499,12 +502,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
+ return !!psock->saved_data_ready;
+ }
+
+-static inline bool sk_is_udp(const struct sock *sk)
+-{
+- return sk->sk_type == SOCK_DGRAM &&
+- sk->sk_protocol == IPPROTO_UDP;
+-}
+-
+ #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
+
+ #define BPF_F_STRPARSER (1UL << 1)
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 8228d1276a2f63..a761cc3559f295 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -228,7 +228,7 @@ void kfree(const void *objp);
+ void kfree_sensitive(const void *objp);
+ size_t __ksize(const void *objp);
+
+-DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
++DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
+
+ /**
+ * ksize - Report actual allocation size of associated object
+@@ -245,8 +245,9 @@ DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+ size_t ksize(const void *objp);
+
+ #ifdef CONFIG_PRINTK
+-bool kmem_valid_obj(void *object);
+-void kmem_dump_obj(void *object);
++bool kmem_dump_obj(void *object);
++#else
++static inline bool kmem_dump_obj(void *object) { return false; }
+ #endif
+
+ /*
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index 91ea4a67f8ca2a..2e3f605c346bdf 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -218,6 +218,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
+ static inline void kick_all_cpus_sync(void) { }
+ static inline void wake_up_all_idle_cpus(void) { }
+
++#define setup_max_cpus 0
++
+ #ifdef CONFIG_UP_LATE_INIT
+ extern void __init up_late_init(void);
+ static inline void smp_init(void) { up_late_init(); }
+diff --git a/include/linux/soc/andes/irq.h b/include/linux/soc/andes/irq.h
+new file mode 100644
+index 00000000000000..edc3182d6e661e
+--- /dev/null
++++ b/include/linux/soc/andes/irq.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2023 Andes Technology Corporation
++ */
++#ifndef __ANDES_IRQ_H
++#define __ANDES_IRQ_H
++
++/* Andes PMU irq number */
++#define ANDES_RV_IRQ_PMOVI 18
++#define ANDES_RV_IRQ_LAST ANDES_RV_IRQ_PMOVI
++#define ANDES_SLI_CAUSE_BASE 256
++
++/* Andes PMU related registers */
++#define ANDES_CSR_SLIE 0x9c4
++#define ANDES_CSR_SLIP 0x9c5
++#define ANDES_CSR_SCOUNTEROF 0x9d4
++
++#endif /* __ANDES_IRQ_H */
+diff --git a/include/linux/soc/qcom/pmic_glink.h b/include/linux/soc/qcom/pmic_glink.h
+index fd124aa18c81a4..7cddf10277528e 100644
+--- a/include/linux/soc/qcom/pmic_glink.h
++++ b/include/linux/soc/qcom/pmic_glink.h
+@@ -23,10 +23,11 @@ struct pmic_glink_hdr {
+
+ int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len);
+
+-struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+- unsigned int id,
+- void (*cb)(const void *, size_t, void *),
+- void (*pdr)(void *, int),
+- void *priv);
++struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev,
++ unsigned int id,
++ void (*cb)(const void *, size_t, void *),
++ void (*pdr)(void *, int),
++ void *priv);
++void pmic_glink_client_register(struct pmic_glink_client *client);
+
+ #endif
+diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
+index a36a3b9d4929e5..03187bc9585182 100644
+--- a/include/linux/soc/qcom/smem.h
++++ b/include/linux/soc/qcom/smem.h
+@@ -14,4 +14,6 @@ phys_addr_t qcom_smem_virt_to_phys(void *p);
+
+ int qcom_smem_get_soc_id(u32 *id);
+
++int qcom_smem_bust_hwspin_lock_by_host(unsigned int host);
++
+ #endif
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 39b74d83c7c4a7..cfcb7e2c3813f2 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -383,6 +383,7 @@ struct ucred {
+ #define SOL_MPTCP 284
+ #define SOL_MCTP 285
+ #define SOL_SMC 286
++#define SOL_VSOCK 287
+
+ /* IPX options */
+ #define IPX_TYPE 1
+diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
+index bae5e2369b4f7a..1c1a5d926b1713 100644
+--- a/include/linux/sockptr.h
++++ b/include/linux/sockptr.h
+@@ -50,11 +50,36 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src,
+ return 0;
+ }
+
++/* Deprecated.
++ * This is unsafe, unless caller checked user provided optlen.
++ * Prefer copy_safe_from_sockptr() instead.
++ */
+ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
+ {
+ return copy_from_sockptr_offset(dst, src, 0, size);
+ }
+
++/**
++ * copy_safe_from_sockptr: copy a struct from sockptr
++ * @dst: Destination address, in kernel space. This buffer must be @ksize
++ * bytes long.
++ * @ksize: Size of @dst struct.
++ * @optval: Source address. (in user or kernel space)
++ * @optlen: Size of @optval data.
++ *
++ * Returns:
++ * * -EINVAL: @optlen < @ksize
++ * * -EFAULT: access to userspace failed.
++ * * 0 : @ksize bytes were copied
++ */
++static inline int copy_safe_from_sockptr(void *dst, size_t ksize,
++ sockptr_t optval, unsigned int optlen)
++{
++ if (optlen < ksize)
++ return -EINVAL;
++ return copy_from_sockptr(dst, optval, ksize);
++}
++
+ static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
+ const void *src, size_t size)
+ {
+diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
+index 4f3d14bb15385a..c383579a008ba9 100644
+--- a/include/linux/soundwire/sdw.h
++++ b/include/linux/soundwire/sdw.h
+@@ -886,7 +886,8 @@ struct sdw_master_ops {
+ * struct sdw_bus - SoundWire bus
+ * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
+ * @md: Master device
+- * @link_id: Link id number, can be 0 to N, unique for each Master
++ * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
++ * @link_id: Link id number, can be 0 to N, unique for each Controller
+ * @id: bus system-wide unique id
+ * @slaves: list of Slaves on this bus
+ * @assigned: Bitmap for Slave device numbers.
+@@ -918,6 +919,7 @@ struct sdw_master_ops {
+ struct sdw_bus {
+ struct device *dev;
+ struct sdw_master_device *md;
++ int controller_id;
+ unsigned int link_id;
+ int id;
+ struct list_head slaves;
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 7f8b478fdeb3d6..e5baf43bcfbb65 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -566,6 +566,7 @@ struct spi_controller {
+ #define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
+ #define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
+ #define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
++#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+
+ /* Flag indicating if the allocation of this struct is devres-managed */
+ bool devm_allocated;
+@@ -1048,12 +1049,13 @@ struct spi_transfer {
+ unsigned dummy_data:1;
+ unsigned cs_off:1;
+ unsigned cs_change:1;
+- unsigned tx_nbits:3;
+- unsigned rx_nbits:3;
++ unsigned tx_nbits:4;
++ unsigned rx_nbits:4;
+ unsigned timestamped:1;
+ #define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
+ #define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
+ #define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
++#define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */
+ u8 bits_per_word;
+ struct spi_delay delay;
+ struct spi_delay cs_change_delay;
+diff --git a/include/linux/srcu.h b/include/linux/srcu.h
+index 127ef3b2e6073b..236610e4a8fa5d 100644
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -229,7 +229,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
+
+ srcu_check_nmi_safety(ssp, true);
+ retval = __srcu_read_lock_nmisafe(ssp);
+- rcu_lock_acquire(&ssp->dep_map);
++ rcu_try_lock_acquire(&ssp->dep_map);
+ return retval;
+ }
+
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index ce89cc3e491354..42ff5a4de8ee76 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -117,7 +117,6 @@ struct stmmac_axi {
+
+ #define EST_GCL 1024
+ struct stmmac_est {
+- struct mutex lock;
+ int enable;
+ u32 btr_reserve[2];
+ u32 btr_offset[2];
+@@ -139,6 +138,7 @@ struct stmmac_rxq_cfg {
+
+ struct stmmac_txq_cfg {
+ u32 weight;
++ bool coe_unsupported;
+ u8 mode_to_use;
+ /* Credit Base Shaper parameters */
+ u32 send_slope;
+@@ -174,6 +174,7 @@ struct stmmac_fpe_cfg {
+ bool hs_enable; /* FPE handshake enable */
+ enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
+ enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
++ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
+ };
+
+ struct stmmac_safety_feature_cfg {
+diff --git a/include/linux/string.h b/include/linux/string.h
+index dbfc66400050f7..5077776e995e01 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -5,7 +5,9 @@
+ #include <linux/compiler.h> /* for inline */
+ #include <linux/types.h> /* for size_t */
+ #include <linux/stddef.h> /* for NULL */
++#include <linux/err.h> /* for ERR_PTR() */
+ #include <linux/errno.h> /* for E2BIG */
++#include <linux/overflow.h> /* for check_mul_overflow() */
+ #include <linux/stdarg.h>
+ #include <uapi/linux/string.h>
+
+@@ -14,6 +16,44 @@ extern void *memdup_user(const void __user *, size_t);
+ extern void *vmemdup_user(const void __user *, size_t);
+ extern void *memdup_user_nul(const void __user *, size_t);
+
++/**
++ * memdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result is physically
++ * contiguous, to be freed by kfree().
++ */
++static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
++{
++ size_t nbytes;
++
++ if (check_mul_overflow(n, size, &nbytes))
++ return ERR_PTR(-EOVERFLOW);
++
++ return memdup_user(src, nbytes);
++}
++
++/**
++ * vmemdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result may be not
++ * physically contiguous. Use kvfree() to free.
++ */
++static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
++{
++ size_t nbytes;
++
++ if (check_mul_overflow(n, size, &nbytes))
++ return ERR_PTR(-EOVERFLOW);
++
++ return vmemdup_user(src, nbytes);
++}
++
+ /*
+ * Include machine specific inline routines
+ */
+@@ -277,10 +317,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ */
+ #define strtomem_pad(dest, src, pad) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
++ const size_t _src_len = __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+- memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
++ memcpy_and_pad(dest, _dest_len, src, \
++ strnlen(src, min(_src_len, _dest_len)), pad); \
+ } while (0)
+
+ /**
+@@ -298,10 +340,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ */
+ #define strtomem(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
++ const size_t _src_len = __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+- memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
++ memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
+ } while (0)
+
+ /**
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index af7358277f1c34..17d84b3ee8a018 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -92,6 +92,7 @@ struct rpc_clnt {
+ };
+ const struct cred *cl_cred;
+ unsigned int cl_max_connect; /* max number of transports not to the same IP */
++ struct super_block *pipefs_sb;
+ };
+
+ /*
+@@ -138,6 +139,7 @@ struct rpc_create_args {
+ const char *servername;
+ const char *nodename;
+ const struct rpc_program *program;
++ struct rpc_stat *stats;
+ u32 prognumber; /* overrides program->number */
+ u32 version;
+ rpc_authflavor_t authflavor;
+diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
+index 8ada7dc802d305..8f9bee0e21c3bc 100644
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -186,7 +186,7 @@ struct rpc_wait_queue {
+ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
+ unsigned char priority; /* current priority */
+ unsigned char nr; /* # tasks remaining for cookie */
+- unsigned short qlen; /* total # tasks waiting in queue */
++ unsigned int qlen; /* total # tasks waiting in queue */
+ struct rpc_timer timer_list;
+ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
+ const char * name;
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index dbf5b21feafe48..3d8b215f32d5b0 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -336,7 +336,6 @@ struct svc_program {
+ const struct svc_version **pg_vers; /* version array */
+ char * pg_name; /* service name */
+ char * pg_class; /* class name: services sharing authentication */
+- struct svc_stat * pg_stats; /* rpc statistics */
+ enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp);
+ __be32 (*pg_init_request)(struct svc_rqst *,
+ const struct svc_program *,
+@@ -408,7 +407,9 @@ bool svc_rqst_replace_page(struct svc_rqst *rqstp,
+ void svc_rqst_release_pages(struct svc_rqst *rqstp);
+ void svc_rqst_free(struct svc_rqst *);
+ void svc_exit_thread(struct svc_rqst *);
+-struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
++struct svc_serv * svc_create_pooled(struct svc_program *prog,
++ struct svc_stat *stats,
++ unsigned int bufsize,
+ int (*threadfn)(void *data));
+ int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+ int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 493487ed7c388b..cb25db2a93dd1b 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -552,6 +552,11 @@ static inline int swap_duplicate(swp_entry_t swp)
+ return 0;
+ }
+
++static inline int swapcache_prepare(swp_entry_t swp)
++{
++ return 0;
++}
++
+ static inline void swap_free(swp_entry_t swp)
+ {
+ }
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index bff1e8d97de0e0..925c84653af5e9 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -390,6 +390,35 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
+ }
+ #endif /* CONFIG_MIGRATION */
+
++#ifdef CONFIG_MEMORY_FAILURE
++
++/*
++ * Support for hardware poisoned pages
++ */
++static inline swp_entry_t make_hwpoison_entry(struct page *page)
++{
++ BUG_ON(!PageLocked(page));
++ return swp_entry(SWP_HWPOISON, page_to_pfn(page));
++}
++
++static inline int is_hwpoison_entry(swp_entry_t entry)
++{
++ return swp_type(entry) == SWP_HWPOISON;
++}
++
++#else
++
++static inline swp_entry_t make_hwpoison_entry(struct page *page)
++{
++ return swp_entry(0, 0);
++}
++
++static inline int is_hwpoison_entry(swp_entry_t swp)
++{
++ return 0;
++}
++#endif
++
+ typedef unsigned long pte_marker;
+
+ #define PTE_MARKER_UFFD_WP BIT(0)
+@@ -470,8 +499,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+
+ /*
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+- * in the swap offset. They are used to represent unaddressable device memory
+- * and to restrict access to a page undergoing migration.
++ * in the swap offset. They can either be used to represent unaddressable device
++ * memory, to restrict access to a page undergoing migration or to represent a
++ * pfn which has been hwpoisoned and unmapped.
+ */
+ static inline bool is_pfn_swap_entry(swp_entry_t entry)
+ {
+@@ -479,7 +509,7 @@ static inline bool is_pfn_swap_entry(swp_entry_t entry)
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
+ return is_migration_entry(entry) || is_device_private_entry(entry) ||
+- is_device_exclusive_entry(entry);
++ is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
+ }
+
+ struct page_vma_mapped_walk;
+@@ -548,35 +578,6 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
+ }
+ #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+-#ifdef CONFIG_MEMORY_FAILURE
+-
+-/*
+- * Support for hardware poisoned pages
+- */
+-static inline swp_entry_t make_hwpoison_entry(struct page *page)
+-{
+- BUG_ON(!PageLocked(page));
+- return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+-}
+-
+-static inline int is_hwpoison_entry(swp_entry_t entry)
+-{
+- return swp_type(entry) == SWP_HWPOISON;
+-}
+-
+-#else
+-
+-static inline swp_entry_t make_hwpoison_entry(struct page *page)
+-{
+- return swp_entry(0, 0);
+-}
+-
+-static inline int is_hwpoison_entry(swp_entry_t swp)
+-{
+- return 0;
+-}
+-#endif
+-
+ static inline int non_swap_entry(swp_entry_t entry)
+ {
+ return swp_type(entry) >= MAX_SWAPFILES;
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 22bc6bc147f899..36c592e43d6520 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -125,6 +125,7 @@ struct cachestat;
+ #define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
+ #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
+ #define __SC_CAST(t, a) (__force t) a
++#define __SC_TYPE(t, a) t
+ #define __SC_ARGS(t, a) a
+ #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
+
+@@ -409,7 +410,7 @@ asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
+ asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
+ struct statfs64 __user *buf);
+ asmlinkage long sys_truncate(const char __user *path, long length);
+-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
++asmlinkage long sys_ftruncate(unsigned int fd, off_t length);
+ #if BITS_PER_LONG == 32
+ asmlinkage long sys_truncate64(const char __user *path, loff_t length);
+ asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
+@@ -839,9 +840,15 @@ asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
+ const struct rlimit64 __user *new_rlim,
+ struct rlimit64 __user *old_rlim);
+ asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags);
++#if defined(CONFIG_ARCH_SPLIT_ARG64)
++asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
++ unsigned int mask_1, unsigned int mask_2,
++ int dfd, const char __user * pathname);
++#else
+ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ u64 mask, int fd,
+ const char __user *pathname);
++#endif
+ asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
+ struct file_handle __user *handle,
+ int __user *mnt_id, int flag);
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 09d7429d67c0ee..698a71422a14b6 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -205,7 +205,6 @@ struct ctl_table_root {
+ struct ctl_table_set default_set;
+ struct ctl_table_set *(*lookup)(struct ctl_table_root *root);
+ void (*set_ownership)(struct ctl_table_header *head,
+- struct ctl_table *table,
+ kuid_t *uid, kgid_t *gid);
+ int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
+ };
+@@ -242,6 +241,7 @@ extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
+
+ void do_sysctl_args(void);
++bool sysctl_is_alias(char *param);
+ int do_proc_douintvec(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+@@ -287,6 +287,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
+ static inline void do_sysctl_args(void)
+ {
+ }
++
++static inline bool sysctl_is_alias(char *param)
++{
++ return false;
++}
+ #endif /* CONFIG_SYSCTL */
+
+ int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
+diff --git a/include/linux/task_work.h b/include/linux/task_work.h
+index 795ef5a6842946..26b8a47f41fcac 100644
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -30,7 +30,8 @@ int task_work_add(struct task_struct *task, struct callback_head *twork,
+
+ struct callback_head *task_work_cancel_match(struct task_struct *task,
+ bool (*match)(struct callback_head *, void *data), void *data);
+-struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
++struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
++bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
+ void task_work_run(void);
+
+ static inline void exit_task_work(struct task_struct *task)
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index 3c5efeeb024f65..9b371aa7c79623 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -377,6 +377,14 @@ struct tcp_sock {
+ * Total data bytes retransmitted
+ */
+ u32 total_retrans; /* Total retransmits for entire connection */
++ u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */
++ u16 total_rto; /* Total number of RTO timeouts, including
++ * SYN/SYN-ACK and recurring timeouts.
++ */
++ u16 total_rto_recoveries; /* Total number of RTO recoveries,
++ * including any unfinished recovery.
++ */
++ u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */
+
+ u32 urg_seq; /* Seq of received urgent pointer */
+ unsigned int keepalive_time; /* time before keep alive takes place */
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index a5ae4af955ff92..4012f440bfdcc2 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -150,6 +150,7 @@ struct thermal_cooling_device {
+ * @node: node in thermal_tz_list (in thermal_core.c)
+ * @poll_queue: delayed work for polling
+ * @notify_event: Last notification event
++ * @suspended: thermal zone suspend indicator
+ */
+ struct thermal_zone_device {
+ int id;
+@@ -183,6 +184,7 @@ struct thermal_zone_device {
+ struct list_head node;
+ struct delayed_work poll_queue;
+ enum thermal_notify_event notify_event;
++ bool suspended;
+ };
+
+ /**
+diff --git a/include/linux/topology.h b/include/linux/topology.h
+index fea32377f7c773..52f5850730b3e5 100644
+--- a/include/linux/topology.h
++++ b/include/linux/topology.h
+@@ -251,7 +251,7 @@ extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int
+ #else
+ static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+ {
+- return cpumask_nth(cpu, cpus);
++ return cpumask_nth_and(cpu, cpus, cpu_online_mask);
+ }
+
+ static inline const struct cpumask *
+diff --git a/include/linux/torture.h b/include/linux/torture.h
+index bb466eec01e426..017f0f710815a8 100644
+--- a/include/linux/torture.h
++++ b/include/linux/torture.h
+@@ -81,7 +81,8 @@ static inline void torture_random_init(struct torture_random_state *trsp)
+ }
+
+ /* Definitions for high-resolution-timer sleeps. */
+-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
++int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
++ struct torture_random_state *trsp);
+ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
+ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
+ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 21ae37e49319a1..cb8bd759e80057 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -492,6 +492,7 @@ enum {
+ EVENT_FILE_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_PID_FILTER_BIT,
+ EVENT_FILE_FL_WAS_ENABLED_BIT,
++ EVENT_FILE_FL_FREED_BIT,
+ };
+
+ extern struct trace_event_file *trace_get_event_file(const char *instance,
+@@ -630,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+ * TRIGGER_COND - When set, one or more triggers has an associated filter
+ * PID_FILTER - When set, the event is filtered based on pid
+ * WAS_ENABLED - Set when enabled to know to clear trace on module removal
++ * FREED - File descriptor is freed, all fields should be considered invalid
+ */
+ enum {
+ EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
+@@ -643,13 +645,14 @@ enum {
+ EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
+ EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
++ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
+ };
+
+ struct trace_event_file {
+ struct list_head list;
+ struct trace_event_call *event_call;
+ struct event_filter __rcu *filter;
+- struct eventfs_file *ef;
++ struct eventfs_inode *ei;
+ struct trace_array *tr;
+ struct trace_subsystem_dir *system;
+ struct list_head triggers;
+@@ -671,6 +674,7 @@ struct trace_event_file {
+ * caching and such. Which is mostly OK ;-)
+ */
+ unsigned long flags;
++ atomic_t ref; /* ref count for opened files */
+ atomic_t sm_ref; /* soft-mode reference counter */
+ atomic_t tm_ref; /* trigger-mode reference counter */
+ };
+@@ -865,7 +869,6 @@ do { \
+ struct perf_event;
+
+ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
+-DECLARE_PER_CPU(int, bpf_kprobe_override);
+
+ extern int perf_trace_init(struct perf_event *event);
+ extern void perf_trace_destroy(struct perf_event *event);
+diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
+index 009072792fa36a..d03f746587167e 100644
+--- a/include/linux/tracefs.h
++++ b/include/linux/tracefs.h
+@@ -23,26 +23,72 @@ struct file_operations;
+
+ struct eventfs_file;
+
+-struct dentry *eventfs_create_events_dir(const char *name,
+- struct dentry *parent);
++/**
++ * eventfs_callback - A callback function to create dynamic files in eventfs
++ * @name: The name of the file that is to be created
++ * @mode: return the file mode for the file (RW access, etc)
++ * @data: data to pass to the created file ops
++ * @fops: the file operations of the created file
++ *
++ * The evetnfs files are dynamically created. The struct eventfs_entry array
++ * is passed to eventfs_create_dir() or eventfs_create_events_dir() that will
++ * be used to create the files within those directories. When a lookup
++ * or access to a file within the directory is made, the struct eventfs_entry
++ * array is used to find a callback() with the matching name that is being
++ * referenced (for lookups, the entire array is iterated and each callback
++ * will be called).
++ *
++ * The callback will be called with @name for the name of the file to create.
++ * The callback can return less than 1 to indicate that no file should be
++ * created.
++ *
++ * If a file is to be created, then @mode should be populated with the file
++ * mode (permissions) for which the file is created for. This would be
++ * used to set the created inode i_mode field.
++ *
++ * The @data should be set to the data passed to the other file operations
++ * (read, write, etc). Note, @data will also point to the data passed in
++ * to eventfs_create_dir() or eventfs_create_events_dir(), but the callback
++ * can replace the data if it chooses to. Otherwise, the original data
++ * will be used for the file operation functions.
++ *
++ * The @fops should be set to the file operations that will be used to create
++ * the inode.
++ *
++ * NB. This callback is called while holding internal locks of the eventfs
++ * system. The callback must not call any code that might also call into
++ * the tracefs or eventfs system or it will risk creating a deadlock.
++ */
++typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
++ const struct file_operations **fops);
+
+-struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
+- struct dentry *parent);
++typedef void (*eventfs_release)(const char *name, void *data);
+
+-struct eventfs_file *eventfs_add_dir(const char *name,
+- struct eventfs_file *ef_parent);
++/**
++ * struct eventfs_entry - dynamically created eventfs file call back handler
++ * @name: Then name of the dynamic file in an eventfs directory
++ * @callback: The callback to get the fops of the file when it is created
++ *
++ * See evenfs_callback() typedef for how to set up @callback.
++ */
++struct eventfs_entry {
++ const char *name;
++ eventfs_callback callback;
++ eventfs_release release;
++};
+
+-int eventfs_add_file(const char *name, umode_t mode,
+- struct eventfs_file *ef_parent, void *data,
+- const struct file_operations *fops);
++struct eventfs_inode;
+
+-int eventfs_add_events_file(const char *name, umode_t mode,
+- struct dentry *parent, void *data,
+- const struct file_operations *fops);
++struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent,
++ const struct eventfs_entry *entries,
++ int size, void *data);
+
+-void eventfs_remove(struct eventfs_file *ef);
++struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent,
++ const struct eventfs_entry *entries,
++ int size, void *data);
+
+-void eventfs_remove_events_dir(struct dentry *dentry);
++void eventfs_remove_events_dir(struct eventfs_inode *ei);
++void eventfs_remove_dir(struct eventfs_inode *ei);
+
+ struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index 18beff0cec1abb..b4f99f6a5385ab 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -155,6 +155,13 @@ struct serial_struct;
+ *
+ * Optional. Called under the @tty->termios_rwsem. May sleep.
+ *
++ * @ldisc_ok: ``int ()(struct tty_struct *tty, int ldisc)``
++ *
++ * This routine allows the @tty driver to decide if it can deal
++ * with a particular @ldisc.
++ *
++ * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem.
++ *
+ * @set_ldisc: ``void ()(struct tty_struct *tty)``
+ *
+ * This routine allows the @tty driver to be notified when the device's
+@@ -373,6 +380,7 @@ struct tty_operations {
+ void (*hangup)(struct tty_struct *tty);
+ int (*break_ctl)(struct tty_struct *tty, int state);
+ void (*flush_buffer)(struct tty_struct *tty);
++ int (*ldisc_ok)(struct tty_struct *tty, int ldisc);
+ void (*set_ldisc)(struct tty_struct *tty);
+ void (*wait_until_sent)(struct tty_struct *tty, int timeout);
+ void (*send_xchar)(struct tty_struct *tty, char ch);
+diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
+index ffe48e69b3f3ae..457879938fc198 100644
+--- a/include/linux/u64_stats_sync.h
++++ b/include/linux/u64_stats_sync.h
+@@ -135,10 +135,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
+ p->v++;
+ }
+
+-static inline void u64_stats_init(struct u64_stats_sync *syncp)
+-{
+- seqcount_init(&syncp->seq);
+-}
++#define u64_stats_init(syncp) \
++ do { \
++ struct u64_stats_sync *__s = (syncp); \
++ seqcount_init(&__s->seq); \
++ } while (0)
+
+ static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
+ {
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index 43c1fb2d2c21af..00790bb5cbde66 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
+ return (num + net_hash_mix(net)) & mask;
+ }
+
++enum {
++ UDP_FLAGS_CORK, /* Cork is required */
++ UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
++ UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
++ UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
++ UDP_FLAGS_ACCEPT_FRAGLIST,
++ UDP_FLAGS_ACCEPT_L4,
++ UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
++ UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
++ UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
++};
++
+ struct udp_sock {
+ /* inet_sock has to be the first member */
+ struct inet_sock inet;
+ #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
+ #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
+ #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
++
++ unsigned long udp_flags;
++
+ int pending; /* Any pending frames ? */
+- unsigned int corkflag; /* Cork is required */
+ __u8 encap_type; /* Is this an Encapsulation socket? */
+- unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
+- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+- encap_enabled:1, /* This socket enabled encap
+- * processing; UDP tunnels and
+- * different encapsulation layer set
+- * this
+- */
+- gro_enabled:1, /* Request GRO aggregation */
+- accept_udp_l4:1,
+- accept_udp_fraglist:1;
++
+ /*
+ * Following member retains the information to create a UDP header
+ * when the socket is uncorked.
+@@ -62,12 +67,6 @@ struct udp_sock {
+ */
+ __u16 pcslen;
+ __u16 pcrlen;
+-/* indicator bits used by pcflag: */
+-#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
+-#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
+-#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
+- __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
+- __u8 unused[3];
+ /*
+ * For encapsulation sockets.
+ */
+@@ -95,28 +94,39 @@ struct udp_sock {
+ int forward_threshold;
+ };
+
+-#define UDP_MAX_SEGMENTS (1 << 6UL)
++#define udp_test_bit(nr, sk) \
++ test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_set_bit(nr, sk) \
++ set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_test_and_set_bit(nr, sk) \
++ test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_clear_bit(nr, sk) \
++ clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
++#define udp_assign_bit(nr, sk, val) \
++ assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
++
++#define UDP_MAX_SEGMENTS (1 << 7UL)
+
+ #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
+
+ static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
+ {
+- udp_sk(sk)->no_check6_tx = val;
++ udp_assign_bit(NO_CHECK6_TX, sk, val);
+ }
+
+ static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
+ {
+- udp_sk(sk)->no_check6_rx = val;
++ udp_assign_bit(NO_CHECK6_RX, sk, val);
+ }
+
+-static inline bool udp_get_no_check6_tx(struct sock *sk)
++static inline bool udp_get_no_check6_tx(const struct sock *sk)
+ {
+- return udp_sk(sk)->no_check6_tx;
++ return udp_test_bit(NO_CHECK6_TX, sk);
+ }
+
+-static inline bool udp_get_no_check6_rx(struct sock *sk)
++static inline bool udp_get_no_check6_rx(const struct sock *sk)
+ {
+- return udp_sk(sk)->no_check6_rx;
++ return udp_test_bit(NO_CHECK6_RX, sk);
+ }
+
+ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+@@ -130,15 +140,45 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+ }
+ }
+
++DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
++#if IS_ENABLED(CONFIG_IPV6)
++DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
++#endif
++
++static inline bool udp_encap_needed(void)
++{
++ if (static_branch_unlikely(&udp_encap_needed_key))
++ return true;
++
++#if IS_ENABLED(CONFIG_IPV6)
++ if (static_branch_unlikely(&udpv6_encap_needed_key))
++ return true;
++#endif
++
++ return false;
++}
++
+ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ {
+ if (!skb_is_gso(skb))
+ return false;
+
+- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++ !udp_test_bit(ACCEPT_L4, sk))
++ return true;
++
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
++ !udp_test_bit(ACCEPT_FRAGLIST, sk))
+ return true;
+
+- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
++ /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
++ * land in a tunnel as the socket check in udp_gro_receive cannot be
++ * foolproof.
++ */
++ if (udp_encap_needed() &&
++ READ_ONCE(udp_sk(sk)->encap_rcv) &&
++ !(skb_shinfo(skb)->gso_type &
++ (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
+ return true;
+
+ return false;
+@@ -146,8 +186,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+
+ static inline void udp_allow_gso(struct sock *sk)
+ {
+- udp_sk(sk)->accept_udp_l4 = 1;
+- udp_sk(sk)->accept_udp_fraglist = 1;
++ udp_set_bit(ACCEPT_L4, sk);
++ udp_set_bit(ACCEPT_FRAGLIST, sk);
+ }
+
+ #define udp_portaddr_for_each_entry(__sk, list) \
+diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
+index f46e0ca0169c72..d91e32aff5a134 100644
+--- a/include/linux/uprobes.h
++++ b/include/linux/uprobes.h
+@@ -76,6 +76,8 @@ struct uprobe_task {
+ struct uprobe *active_uprobe;
+ unsigned long xol_vaddr;
+
++ struct arch_uprobe *auprobe;
++
+ struct return_instance *return_instances;
+ unsigned int depth;
+ };
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 61d4f0b793dcdc..d0e19ac3ba6ce4 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -372,8 +372,9 @@ struct hc_driver {
+ * or bandwidth constraints.
+ */
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+- /* Returns the hardware-chosen device address */
+- int (*address_device)(struct usb_hcd *, struct usb_device *udev);
++ /* Set the hardware-chosen device address */
++ int (*address_device)(struct usb_hcd *, struct usb_device *udev,
++ unsigned int timeout_ms);
+ /* prepares the hardware to send commands to the device */
+ int (*enable_device)(struct usb_hcd *, struct usb_device *udev);
+ /* Notifies the HCD after a hub descriptor is fetched.
+diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
+index b513749582d775..e4de6bc1f69b62 100644
+--- a/include/linux/usb/phy.h
++++ b/include/linux/usb/phy.h
+@@ -144,10 +144,6 @@ struct usb_phy {
+ */
+ int (*set_wakeup)(struct usb_phy *x, bool enabled);
+
+- /* notify phy port status change */
+- int (*notify_port_status)(struct usb_phy *x, int port,
+- u16 portstatus, u16 portchange);
+-
+ /* notify phy connect status change */
+ int (*notify_connect)(struct usb_phy *x,
+ enum usb_device_speed speed);
+@@ -320,15 +316,6 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled)
+ return 0;
+ }
+
+-static inline int
+-usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange)
+-{
+- if (x && x->notify_port_status)
+- return x->notify_port_status(x, port, portstatus, portchange);
+- else
+- return 0;
+-}
+-
+ static inline int
+ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
+ {
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index eeb7c2157c72fb..59409c1fc3dee7 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -72,4 +72,7 @@
+ /* device has endpoints that should be ignored */
+ #define USB_QUIRK_ENDPOINT_IGNORE BIT(15)
+
++/* short SET_ADDRESS request timeout */
++#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT BIT(16)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
+index 287e9d83fb8bc3..33a4c146dc19c4 100644
+--- a/include/linux/usb/r8152.h
++++ b/include/linux/usb/r8152.h
+@@ -30,6 +30,7 @@
+ #define VENDOR_ID_NVIDIA 0x0955
+ #define VENDOR_ID_TPLINK 0x2357
+ #define VENDOR_ID_DLINK 0x2001
++#define VENDOR_ID_ASUS 0x0b05
+
+ #if IS_REACHABLE(CONFIG_USB_RTL8152)
+ extern u8 rtl8152_get_version(struct usb_interface *intf);
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index 9f08a584d70785..0b9f1e598e3a6b 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -76,8 +76,23 @@ struct usbnet {
+ # define EVENT_LINK_CHANGE 11
+ # define EVENT_SET_RX_MODE 12
+ # define EVENT_NO_IP_ALIGN 13
++/* This one is special, as it indicates that the device is going away
++ * there are cyclic dependencies between tasklet, timer and bh
++ * that must be broken
++ */
++# define EVENT_UNPLUG 31
+ };
+
++static inline bool usbnet_going_away(struct usbnet *ubn)
++{
++ return test_bit(EVENT_UNPLUG, &ubn->flags);
++}
++
++static inline void usbnet_mark_going_away(struct usbnet *ubn)
++{
++ set_bit(EVENT_UNPLUG, &ubn->flags);
++}
++
+ static inline struct usb_driver *driver_of(struct usb_interface *intf)
+ {
+ return to_usb_driver(intf->dev.driver);
+diff --git a/include/linux/verification.h b/include/linux/verification.h
+index f34e50ebcf60a4..cb2d47f2809103 100644
+--- a/include/linux/verification.h
++++ b/include/linux/verification.h
+@@ -8,6 +8,7 @@
+ #ifndef _LINUX_VERIFICATION_H
+ #define _LINUX_VERIFICATION_H
+
++#include <linux/errno.h>
+ #include <linux/types.h>
+
+ /*
+diff --git a/include/linux/vfio.h b/include/linux/vfio.h
+index 454e9295970c49..5ac5f182ce0bb6 100644
+--- a/include/linux/vfio.h
++++ b/include/linux/vfio.h
+@@ -289,16 +289,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ /*
+ * External user API
+ */
+-#if IS_ENABLED(CONFIG_VFIO_GROUP)
+ struct iommu_group *vfio_file_iommu_group(struct file *file);
++
++#if IS_ENABLED(CONFIG_VFIO_GROUP)
+ bool vfio_file_is_group(struct file *file);
+ bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
+ #else
+-static inline struct iommu_group *vfio_file_iommu_group(struct file *file)
+-{
+- return NULL;
+-}
+-
+ static inline bool vfio_file_is_group(struct file *file)
+ {
+ return false;
+@@ -353,6 +349,7 @@ struct virqfd {
+ wait_queue_entry_t wait;
+ poll_table pt;
+ struct work_struct shutdown;
++ struct work_struct flush_inject;
+ struct virqfd **pvirqfd;
+ };
+
+@@ -360,5 +357,6 @@ int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
+ void (*thread)(void *, void *), void *data,
+ struct virqfd **pvirqfd, int fd);
+ void vfio_virqfd_disable(struct virqfd **pvirqfd);
++void vfio_virqfd_flush_thread(struct virqfd **pvirqfd);
+
+ #endif /* VFIO_H */
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 27cc1d4643219a..02a9f4dc594d02 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -3,6 +3,8 @@
+ #define _LINUX_VIRTIO_NET_H
+
+ #include <linux/if_vlan.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
+ #include <linux/udp.h>
+ #include <uapi/linux/tcp.h>
+ #include <uapi/linux/virtio_net.h>
+@@ -49,6 +51,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian)
+ {
++ unsigned int nh_min_len = sizeof(struct iphdr);
+ unsigned int gso_type = 0;
+ unsigned int thlen = 0;
+ unsigned int p_off = 0;
+@@ -65,6 +68,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ gso_type = SKB_GSO_TCPV6;
+ ip_proto = IPPROTO_TCP;
+ thlen = sizeof(struct tcphdr);
++ nh_min_len = sizeof(struct ipv6hdr);
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
+@@ -99,8 +103,11 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+
+ if (!skb_partial_csum_set(skb, start, off))
+ return -EINVAL;
++ if (skb_transport_offset(skb) < nh_min_len)
++ return -EINVAL;
+
+- p_off = skb_transport_offset(skb) + thlen;
++ nh_min_len = skb_transport_offset(skb);
++ p_off = nh_min_len + thlen;
+ if (!pskb_may_pull(skb, p_off))
+ return -EINVAL;
+ } else {
+@@ -140,7 +147,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+
+ skb_set_transport_header(skb, keys.control.thoff);
+ } else if (gso_type) {
+- p_off = thlen;
++ p_off = nh_min_len + thlen;
+ if (!pskb_may_pull(skb, p_off))
+ return -EINVAL;
+ }
+@@ -166,6 +173,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ if (gso_type != SKB_GSO_UDP_L4)
+ return -EINVAL;
+ break;
++ case SKB_GSO_TCPV4:
++ case SKB_GSO_TCPV6:
++ if (skb->ip_summed == CHECKSUM_PARTIAL &&
++ skb->csum_offset != offsetof(struct tcphdr, check))
++ return -EINVAL;
++ break;
+ }
+
+ /* Kernel has a special handling for GSO_BY_FRAGS. */
+diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
+index c58453699ee987..fbf30721bac9e5 100644
+--- a/include/linux/virtio_vsock.h
++++ b/include/linux/virtio_vsock.h
+@@ -246,4 +246,5 @@ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
+ void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
+ int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
+ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
++int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val);
+ #endif /* _LINUX_VIRTIO_VSOCK_H */
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 1c1d06804d4509..52c6dd6d80ac09 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -84,7 +84,7 @@ enum {
+ WORK_BUSY_RUNNING = 1 << 1,
+
+ /* maximum string length for set_worker_desc() */
+- WORKER_DESC_LEN = 24,
++ WORKER_DESC_LEN = 32,
+ };
+
+ /* Convenience constants - of type 'unsigned long', not 'enum'! */
+@@ -274,18 +274,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+ * to generate better code.
+ */
+ #ifdef CONFIG_LOCKDEP
+-#define __INIT_WORK(_work, _func, _onstack) \
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
+ do { \
+- static struct lock_class_key __key; \
+- \
+ __init_work((_work), _onstack); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
+- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
++ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->func = (_func); \
+ } while (0)
+ #else
+-#define __INIT_WORK(_work, _func, _onstack) \
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
+ do { \
+ __init_work((_work), _onstack); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
+@@ -294,12 +292,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+ } while (0)
+ #endif
+
++#define __INIT_WORK(_work, _func, _onstack) \
++ do { \
++ static __maybe_unused struct lock_class_key __key; \
++ \
++ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
++ } while (0)
++
+ #define INIT_WORK(_work, _func) \
+ __INIT_WORK((_work), (_func), 0)
+
+ #define INIT_WORK_ONSTACK(_work, _func) \
+ __INIT_WORK((_work), (_func), 1)
+
++#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
++ __INIT_WORK_KEY((_work), (_func), 1, _key)
++
+ #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func)); \
+@@ -693,8 +701,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+ return fn(arg);
+ }
+ #else
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key);
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu(_cpu, _fn, _arg) \
++({ \
++ static struct lock_class_key __key; \
++ \
++ work_on_cpu_key(_cpu, _fn, _arg, &__key); \
++})
++
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key);
++
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu_safe(_cpu, _fn, _arg) \
++({ \
++ static struct lock_class_key __key; \
++ \
++ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
++})
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_FREEZER
+diff --git a/include/linux/xarray.h b/include/linux/xarray.h
+index cb571dfcf4b167..d9d479334c9e65 100644
+--- a/include/linux/xarray.h
++++ b/include/linux/xarray.h
+@@ -1548,6 +1548,7 @@ void xas_create_range(struct xa_state *);
+
+ #ifdef CONFIG_XARRAY_MULTI
+ int xa_get_order(struct xarray *, unsigned long index);
++int xas_get_order(struct xa_state *xas);
+ void xas_split(struct xa_state *, void *entry, unsigned int order);
+ void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
+ #else
+@@ -1556,6 +1557,11 @@ static inline int xa_get_order(struct xarray *xa, unsigned long index)
+ return 0;
+ }
+
++static inline int xas_get_order(struct xa_state *xas)
++{
++ return 0;
++}
++
+ static inline void xas_split(struct xa_state *xas, void *entry,
+ unsigned int order)
+ {
+diff --git a/include/media/cec.h b/include/media/cec.h
+index 9c007f83569aaf..ffd17371302ca9 100644
+--- a/include/media/cec.h
++++ b/include/media/cec.h
+@@ -247,6 +247,7 @@ struct cec_adapter {
+ u16 phys_addr;
+ bool needs_hpd;
+ bool is_enabled;
++ bool is_claiming_log_addrs;
+ bool is_configuring;
+ bool must_reconfigure;
+ bool is_configured;
+diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h
+index bdc654a455216b..783bda6d5cc3fd 100644
+--- a/include/media/ipu-bridge.h
++++ b/include/media/ipu-bridge.h
+@@ -108,7 +108,7 @@ struct ipu_node_names {
+ char ivsc_sensor_port[7];
+ char ivsc_ipu_port[7];
+ char endpoint[11];
+- char remote_port[7];
++ char remote_port[9];
+ char vcm[16];
+ };
+
+diff --git a/include/media/media-entity.h b/include/media/media-entity.h
+index 2b6cd343ee9e01..4d95893c898460 100644
+--- a/include/media/media-entity.h
++++ b/include/media/media-entity.h
+@@ -225,6 +225,7 @@ enum media_pad_signal_type {
+ * @graph_obj: Embedded structure containing the media object common data
+ * @entity: Entity this pad belongs to
+ * @index: Pad index in the entity pads array, numbered from 0 to n
++ * @num_links: Number of links connected to this pad
+ * @sig_type: Type of the signal inside a media pad
+ * @flags: Pad flags, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+@@ -236,6 +237,7 @@ struct media_pad {
+ struct media_gobj graph_obj; /* must be first field in struct */
+ struct media_entity *entity;
+ u16 index;
++ u16 num_links;
+ enum media_pad_signal_type sig_type;
+ unsigned long flags;
+
+diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
+index 0f6803e4b17e97..8b0b361b464ccc 100644
+--- a/include/media/v4l2-cci.h
++++ b/include/media/v4l2-cci.h
+@@ -7,6 +7,8 @@
+ #ifndef _V4L2_CCI_H
+ #define _V4L2_CCI_H
+
++#include <linux/bitfield.h>
++#include <linux/bits.h>
+ #include <linux/types.h>
+
+ struct i2c_client;
+@@ -33,11 +35,20 @@ struct cci_reg_sequence {
+ #define CCI_REG_WIDTH_SHIFT 16
+ #define CCI_REG_WIDTH_MASK GENMASK(19, 16)
+
++#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x)
++#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3)
++#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x)
++#define CCI_REG_LE BIT(20)
++
+ #define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x))
+ #define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x))
++#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x))
+
+ /**
+ * cci_read() - Read a value from a single CCI register
+diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
+index d9fca929c10b53..ab2a7ef61d420f 100644
+--- a/include/media/v4l2-subdev.h
++++ b/include/media/v4l2-subdev.h
+@@ -446,7 +446,9 @@ enum v4l2_subdev_pre_streamon_flags {
+ * @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the
+ * sub-device. Failure on stop will remove any resources acquired in
+ * streaming start, while the error code is still returned by the driver.
+- * Also see call_s_stream wrapper in v4l2-subdev.c.
++ * The caller shall track the subdev state, and shall not start or stop an
++ * already started or stopped subdev. Also see call_s_stream wrapper in
++ * v4l2-subdev.c.
+ *
+ * @g_pixelaspect: callback to return the pixelaspect ratio.
+ *
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 82da55101b5a30..facb7a469efad6 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -31,17 +31,22 @@ struct prefix_info {
+ __u8 length;
+ __u8 prefix_len;
+
++ union __packed {
++ __u8 flags;
++ struct __packed {
+ #if defined(__BIG_ENDIAN_BITFIELD)
+- __u8 onlink : 1,
++ __u8 onlink : 1,
+ autoconf : 1,
+ reserved : 6;
+ #elif defined(__LITTLE_ENDIAN_BITFIELD)
+- __u8 reserved : 6,
++ __u8 reserved : 6,
+ autoconf : 1,
+ onlink : 1;
+ #else
+ #error "Please fix <asm/byteorder.h>"
+ #endif
++ };
++ };
+ __be32 valid;
+ __be32 prefered;
+ __be32 reserved2;
+@@ -49,6 +54,9 @@ struct prefix_info {
+ struct in6_addr prefix;
+ };
+
++/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
++static_assert(sizeof(struct prefix_info) == 32);
++
+ #include <linux/ipv6.h>
+ #include <linux/netdevice.h>
+ #include <net/if_inet6.h>
+@@ -429,6 +437,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
+ refcount_inc(&ifp->refcnt);
+ }
+
++static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
++{
++ return refcount_inc_not_zero(&ifp->refcnt);
++}
+
+ /*
+ * compute link-local solicited-node multicast address
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 824c258143a3ab..77bf30203d3cf6 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -46,12 +46,6 @@ struct scm_stat {
+
+ #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
+
+-#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
+-#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
+-#define unix_state_lock_nested(s) \
+- spin_lock_nested(&unix_sk(s)->lock, \
+- SINGLE_DEPTH_NESTING)
+-
+ /* The AF_UNIX socket */
+ struct unix_sock {
+ /* WARNING: sk has to be the first member */
+@@ -61,7 +55,7 @@ struct unix_sock {
+ struct mutex iolock, bindlock;
+ struct sock *peer;
+ struct list_head link;
+- atomic_long_t inflight;
++ unsigned long inflight;
+ spinlock_t lock;
+ unsigned long gc_flags;
+ #define UNIX_GC_CANDIDATE 0
+@@ -75,6 +69,24 @@ struct unix_sock {
+ };
+
+ #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
++#define unix_peer(sk) (unix_sk(sk)->peer)
++
++#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
++#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
++enum unix_socket_lock_class {
++ U_LOCK_NORMAL,
++ U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
++ U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
++ U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
++ * candidates to close a small race window.
++ */
++};
++
++static inline void unix_state_lock_nested(struct sock *sk,
++ enum unix_socket_lock_class subclass)
++{
++ spin_lock_nested(&unix_sk(sk)->lock, subclass);
++}
+
+ #define peer_wait peer_wq.wait
+
+diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
+index b01cf9ac243715..f8b09a82f62e1d 100644
+--- a/include/net/af_vsock.h
++++ b/include/net/af_vsock.h
+@@ -137,7 +137,6 @@ struct vsock_transport {
+ u64 (*stream_rcvhiwat)(struct vsock_sock *);
+ bool (*stream_is_active)(struct vsock_sock *);
+ bool (*stream_allow)(u32 cid, u32 port);
+- int (*set_rcvlowat)(struct vsock_sock *vsk, int val);
+
+ /* SEQ_PACKET. */
+ ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+@@ -168,6 +167,7 @@ struct vsock_transport {
+ struct vsock_transport_send_notify_data *);
+ /* sk_lock held by the caller */
+ void (*notify_buffer_size)(struct vsock_sock *, u64 *);
++ int (*notify_set_rcvlowat)(struct vsock_sock *vsk, int val);
+
+ /* Shutdown. */
+ int (*shutdown)(struct vsock_sock *, int);
+@@ -227,8 +227,12 @@ struct vsock_tap {
+ int vsock_add_tap(struct vsock_tap *vt);
+ int vsock_remove_tap(struct vsock_tap *vt);
+ void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
++int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
++ int flags);
+ int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
++int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
++ size_t len, int flags);
+ int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
+
+diff --git a/include/net/ax25.h b/include/net/ax25.h
+index 0d939e5aee4eca..c2a85fd3f5ea40 100644
+--- a/include/net/ax25.h
++++ b/include/net/ax25.h
+@@ -216,7 +216,7 @@ typedef struct {
+ struct ctl_table;
+
+ typedef struct ax25_dev {
+- struct ax25_dev *next;
++ struct list_head list;
+
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+@@ -330,7 +330,6 @@ int ax25_addr_size(const ax25_digi *);
+ void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+
+ /* ax25_dev.c */
+-extern ax25_dev *ax25_dev_list;
+ extern spinlock_t ax25_dev_lock;
+
+ #if IS_ENABLED(CONFIG_AX25)
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index aa90adc3b2a4d7..e4a6831133f818 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -164,6 +164,8 @@ struct bt_voice {
+ #define BT_ISO_QOS_BIG_UNSET 0xff
+ #define BT_ISO_QOS_BIS_UNSET 0xff
+
++#define BT_ISO_SYNC_TIMEOUT 0x07d0 /* 20 secs */
++
+ struct bt_iso_io_qos {
+ __u32 interval;
+ __u16 latency;
+@@ -583,6 +585,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
+ return skb;
+ }
+
++static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
++ sockptr_t src, size_t src_size)
++{
++ if (dst_size > src_size)
++ return -EINVAL;
++
++ return copy_from_sockptr(dst, src, dst_size);
++}
++
+ int bt_to_errno(u16 code);
+ __u8 bt_status(int err);
+
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 87d92accc26ea9..2129d071c3725f 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -1,6 +1,7 @@
+ /*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
++ Copyright 2023 NXP
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+@@ -32,9 +33,6 @@
+ #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
+
+ #define HCI_LINK_KEY_SIZE 16
+-#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
+-
+-#define HCI_MAX_AMP_ASSOC_SIZE 672
+
+ #define HCI_MAX_CPB_DATA_SIZE 252
+
+@@ -70,26 +68,6 @@
+ #define HCI_SMD 9
+ #define HCI_VIRTIO 10
+
+-/* HCI controller types */
+-#define HCI_PRIMARY 0x00
+-#define HCI_AMP 0x01
+-
+-/* First BR/EDR Controller shall have ID = 0 */
+-#define AMP_ID_BREDR 0x00
+-
+-/* AMP controller types */
+-#define AMP_TYPE_BREDR 0x00
+-#define AMP_TYPE_80211 0x01
+-
+-/* AMP controller status */
+-#define AMP_STATUS_POWERED_DOWN 0x00
+-#define AMP_STATUS_BLUETOOTH_ONLY 0x01
+-#define AMP_STATUS_NO_CAPACITY 0x02
+-#define AMP_STATUS_LOW_CAPACITY 0x03
+-#define AMP_STATUS_MEDIUM_CAPACITY 0x04
+-#define AMP_STATUS_HIGH_CAPACITY 0x05
+-#define AMP_STATUS_FULL_CAPACITY 0x06
+-
+ /* HCI device quirks */
+ enum {
+ /* When this quirk is set, the HCI Reset command is send when
+@@ -175,6 +153,15 @@ enum {
+ */
+ HCI_QUIRK_USE_BDADDR_PROPERTY,
+
++ /* When this quirk is set, the Bluetooth Device Address provided by
++ * the 'local-bd-address' fwnode property is incorrectly specified in
++ * big-endian order.
++ *
++ * This quirk can be set before hci_register_dev is called or
++ * during the hdev->setup vendor callback.
++ */
++ HCI_QUIRK_BDADDR_PROPERTY_BROKEN,
++
+ /* When this quirk is set, the duplicate filtering during
+ * scanning is based on Bluetooth devices addresses. To allow
+ * RSSI based updates, restart scanning if needed.
+@@ -329,6 +316,14 @@ enum {
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_LE_CODED,
++
++ /*
++ * When this quirk is set, the HCI_OP_READ_ENC_KEY_SIZE command is
++ * skipped during an HCI_EV_ENCRYPT_CHANGE event. This is required
++ * for Actions Semiconductor ATS2851 based controllers, which erroneously
++ * claim to support it.
++ */
++ HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
+ };
+
+ /* HCI device flags */
+@@ -392,7 +387,6 @@ enum {
+ HCI_LIMITED_PRIVACY,
+ HCI_RPA_EXPIRED,
+ HCI_RPA_RESOLVING,
+- HCI_HS_ENABLED,
+ HCI_LE_ENABLED,
+ HCI_ADVERTISING,
+ HCI_ADVERTISING_CONNECTABLE,
+@@ -436,7 +430,7 @@ enum {
+ #define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
+ #define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
+ #define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+-#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
++#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
+ #define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
+ #define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
+
+@@ -510,7 +504,6 @@ enum {
+ #define ESCO_LINK 0x02
+ /* Low Energy links do not have defined link type. Use invented one */
+ #define LE_LINK 0x80
+-#define AMP_LINK 0x81
+ #define ISO_LINK 0x82
+ #define INVALID_LINK 0xff
+
+@@ -652,6 +645,7 @@ enum {
+ #define HCI_ERROR_PIN_OR_KEY_MISSING 0x06
+ #define HCI_ERROR_MEMORY_EXCEEDED 0x07
+ #define HCI_ERROR_CONNECTION_TIMEOUT 0x08
++#define HCI_ERROR_COMMAND_DISALLOWED 0x0c
+ #define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
+ #define HCI_ERROR_REJ_BAD_ADDR 0x0f
+ #define HCI_ERROR_INVALID_PARAMETERS 0x12
+@@ -660,6 +654,7 @@ enum {
+ #define HCI_ERROR_REMOTE_POWER_OFF 0x15
+ #define HCI_ERROR_LOCAL_HOST_TERM 0x16
+ #define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
++#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
+ #define HCI_ERROR_INVALID_LL_PARAMS 0x1e
+ #define HCI_ERROR_UNSPECIFIED 0x1f
+ #define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
+@@ -673,6 +668,8 @@ enum {
+ #define HCI_TX_POWER_INVALID 127
+ #define HCI_RSSI_INVALID 127
+
++#define HCI_SYNC_HANDLE_INVALID 0xffff
++
+ #define HCI_ROLE_MASTER 0x00
+ #define HCI_ROLE_SLAVE 0x01
+
+@@ -922,56 +919,6 @@ struct hci_cp_io_capability_neg_reply {
+ __u8 reason;
+ } __packed;
+
+-#define HCI_OP_CREATE_PHY_LINK 0x0435
+-struct hci_cp_create_phy_link {
+- __u8 phy_handle;
+- __u8 key_len;
+- __u8 key_type;
+- __u8 key[HCI_AMP_LINK_KEY_SIZE];
+-} __packed;
+-
+-#define HCI_OP_ACCEPT_PHY_LINK 0x0436
+-struct hci_cp_accept_phy_link {
+- __u8 phy_handle;
+- __u8 key_len;
+- __u8 key_type;
+- __u8 key[HCI_AMP_LINK_KEY_SIZE];
+-} __packed;
+-
+-#define HCI_OP_DISCONN_PHY_LINK 0x0437
+-struct hci_cp_disconn_phy_link {
+- __u8 phy_handle;
+- __u8 reason;
+-} __packed;
+-
+-struct ext_flow_spec {
+- __u8 id;
+- __u8 stype;
+- __le16 msdu;
+- __le32 sdu_itime;
+- __le32 acc_lat;
+- __le32 flush_to;
+-} __packed;
+-
+-#define HCI_OP_CREATE_LOGICAL_LINK 0x0438
+-#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439
+-struct hci_cp_create_accept_logical_link {
+- __u8 phy_handle;
+- struct ext_flow_spec tx_flow_spec;
+- struct ext_flow_spec rx_flow_spec;
+-} __packed;
+-
+-#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a
+-struct hci_cp_disconn_logical_link {
+- __le16 log_handle;
+-} __packed;
+-
+-#define HCI_OP_LOGICAL_LINK_CANCEL 0x043b
+-struct hci_cp_logical_link_cancel {
+- __u8 phy_handle;
+- __u8 flow_spec_id;
+-} __packed;
+-
+ #define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
+ struct hci_coding_format {
+ __u8 id;
+@@ -1593,46 +1540,6 @@ struct hci_rp_read_enc_key_size {
+ __u8 key_size;
+ } __packed;
+
+-#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
+-struct hci_rp_read_local_amp_info {
+- __u8 status;
+- __u8 amp_status;
+- __le32 total_bw;
+- __le32 max_bw;
+- __le32 min_latency;
+- __le32 max_pdu;
+- __u8 amp_type;
+- __le16 pal_cap;
+- __le16 max_assoc_size;
+- __le32 max_flush_to;
+- __le32 be_flush_to;
+-} __packed;
+-
+-#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
+-struct hci_cp_read_local_amp_assoc {
+- __u8 phy_handle;
+- __le16 len_so_far;
+- __le16 max_len;
+-} __packed;
+-struct hci_rp_read_local_amp_assoc {
+- __u8 status;
+- __u8 phy_handle;
+- __le16 rem_len;
+- __u8 frag[];
+-} __packed;
+-
+-#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
+-struct hci_cp_write_remote_amp_assoc {
+- __u8 phy_handle;
+- __le16 len_so_far;
+- __le16 rem_len;
+- __u8 frag[];
+-} __packed;
+-struct hci_rp_write_remote_amp_assoc {
+- __u8 status;
+- __u8 phy_handle;
+-} __packed;
+-
+ #define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c
+
+ #define HCI_OP_ENABLE_DUT_MODE 0x1803
+@@ -1644,6 +1551,15 @@ struct hci_cp_le_set_event_mask {
+ __u8 mask[8];
+ } __packed;
+
++/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E
++ * 7.8.2 LE Read Buffer Size command
++ * MAX_LE_MTU is 0xffff.
++ * 0 is also valid. It means that no dedicated LE Buffer exists.
++ * It should use the HCI_Read_Buffer_Size command and mtu is shared
++ * between BR/EDR and LE.
++ */
++#define HCI_MIN_LE_MTU 0x001b
++
+ #define HCI_OP_LE_READ_BUFFER_SIZE 0x2002
+ struct hci_rp_le_read_buffer_size {
+ __u8 status;
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c33348ba1657e3..0f50c0cefcb7dc 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -126,7 +126,6 @@ enum suspended_state {
+ struct hci_conn_hash {
+ struct list_head list;
+ unsigned int acl_num;
+- unsigned int amp_num;
+ unsigned int sco_num;
+ unsigned int iso_num;
+ unsigned int le_num;
+@@ -336,25 +335,18 @@ struct adv_monitor {
+ /* Default authenticated payload timeout 30s */
+ #define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8
+
+-struct amp_assoc {
+- __u16 len;
+- __u16 offset;
+- __u16 rem_len;
+- __u16 len_so_far;
+- __u8 data[HCI_MAX_AMP_ASSOC_SIZE];
+-};
+-
+ #define HCI_MAX_PAGES 3
+
+ struct hci_dev {
+ struct list_head list;
+ struct mutex lock;
+
++ struct ida unset_handle_ida;
++
+ const char *name;
+ unsigned long flags;
+ __u16 id;
+ __u8 bus;
+- __u8 dev_type;
+ bdaddr_t bdaddr;
+ bdaddr_t setup_addr;
+ bdaddr_t public_addr;
+@@ -460,21 +452,6 @@ struct hci_dev {
+ __u16 sniff_min_interval;
+ __u16 sniff_max_interval;
+
+- __u8 amp_status;
+- __u32 amp_total_bw;
+- __u32 amp_max_bw;
+- __u32 amp_min_latency;
+- __u32 amp_max_pdu;
+- __u8 amp_type;
+- __u16 amp_pal_cap;
+- __u16 amp_assoc_size;
+- __u32 amp_max_flush_to;
+- __u32 amp_be_flush_to;
+-
+- struct amp_assoc loc_assoc;
+-
+- __u8 flow_ctl_mode;
+-
+ unsigned int auto_accept_delay;
+
+ unsigned long quirks;
+@@ -494,11 +471,6 @@ struct hci_dev {
+ unsigned int le_pkts;
+ unsigned int iso_pkts;
+
+- __u16 block_len;
+- __u16 block_mtu;
+- __u16 num_blocks;
+- __u16 block_cnt;
+-
+ unsigned long acl_last_tx;
+ unsigned long sco_last_tx;
+ unsigned long le_last_tx;
+@@ -546,6 +518,7 @@ struct hci_dev {
+ __u32 req_status;
+ __u32 req_result;
+ struct sk_buff *req_skb;
++ struct sk_buff *req_rsp;
+
+ void *smp_data;
+ void *smp_bredr_data;
+@@ -699,6 +672,7 @@ struct hci_conn {
+ __u16 handle;
+ __u16 sync_handle;
+ __u16 state;
++ __u16 mtu;
+ __u8 mode;
+ __u8 type;
+ __u8 role;
+@@ -767,7 +741,6 @@ struct hci_conn {
+ void *l2cap_data;
+ void *sco_data;
+ void *iso_data;
+- struct amp_mgr *amp_mgr;
+
+ struct list_head link_list;
+ struct hci_conn *parent;
+@@ -794,7 +767,6 @@ struct hci_chan {
+ struct sk_buff_head data_q;
+ unsigned int sent;
+ __u8 state;
+- bool amp;
+ };
+
+ struct hci_conn_params {
+@@ -950,7 +922,6 @@ void hci_inquiry_cache_flush(struct hci_dev *hdev);
+ /* ----- HCI Connections ----- */
+ enum {
+ HCI_CONN_AUTH_PEND,
+- HCI_CONN_REAUTH_PEND,
+ HCI_CONN_ENCRYPT_PEND,
+ HCI_CONN_RSWITCH_PEND,
+ HCI_CONN_MODE_CHANGE_PEND,
+@@ -1004,9 +975,6 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
+ case ACL_LINK:
+ h->acl_num++;
+ break;
+- case AMP_LINK:
+- h->amp_num++;
+- break;
+ case LE_LINK:
+ h->le_num++;
+ if (c->role == HCI_ROLE_SLAVE)
+@@ -1033,9 +1001,6 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
+ case ACL_LINK:
+ h->acl_num--;
+ break;
+- case AMP_LINK:
+- h->amp_num--;
+- break;
+ case LE_LINK:
+ h->le_num--;
+ if (c->role == HCI_ROLE_SLAVE)
+@@ -1057,8 +1022,6 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+ switch (type) {
+ case ACL_LINK:
+ return h->acl_num;
+- case AMP_LINK:
+- return h->amp_num;
+ case LE_LINK:
+ return h->le_num;
+ case SCO_LINK:
+@@ -1075,7 +1038,25 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
+ {
+ struct hci_conn_hash *c = &hdev->conn_hash;
+
+- return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
++ return c->acl_num + c->sco_num + c->le_num + c->iso_num;
++}
++
++static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
++{
++ struct hci_conn_hash *h = &hdev->conn_hash;
++ struct hci_conn *c;
++
++ rcu_read_lock();
++
++ list_for_each_entry_rcu(c, &h->list, list) {
++ if (c == conn) {
++ rcu_read_unlock();
++ return true;
++ }
++ }
++ rcu_read_unlock();
++
++ return false;
+ }
+
+ static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
+@@ -1225,11 +1206,11 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
+ continue;
+
+ /* Match CIG ID if set */
+- if (cig != BT_ISO_QOS_CIG_UNSET && cig != c->iso_qos.ucast.cig)
++ if (cig != c->iso_qos.ucast.cig)
+ continue;
+
+ /* Match CIS ID if set */
+- if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis)
++ if (id != c->iso_qos.ucast.cis)
+ continue;
+
+ /* Match destination address if set */
+@@ -1314,7 +1295,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *
+ }
+
+ static inline struct hci_conn *
+-hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big)
++hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
+ {
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+@@ -1336,6 +1317,29 @@ hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big)
+ return NULL;
+ }
+
++static inline struct hci_conn *
++hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
++{
++ struct hci_conn_hash *h = &hdev->conn_hash;
++ struct hci_conn *c;
++
++ rcu_read_lock();
++
++ list_for_each_entry_rcu(c, &h->list, list) {
++ if (c->type != ISO_LINK ||
++ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
++ continue;
++
++ if (c->sync_handle == sync_handle) {
++ rcu_read_unlock();
++ return c;
++ }
++ }
++ rcu_read_unlock();
++
++ return NULL;
++}
++
+ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
+ __u8 type, __u16 state)
+ {
+@@ -1426,10 +1430,11 @@ int hci_le_create_cis_pending(struct hci_dev *hdev);
+ int hci_conn_check_create_cis(struct hci_conn *conn);
+
+ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+- u8 role);
++ u8 role, u16 handle);
++struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
++ bdaddr_t *dst, u8 role);
+ void hci_conn_del(struct hci_conn *conn);
+ void hci_conn_hash_flush(struct hci_dev *hdev);
+-void hci_conn_check_pending(struct hci_dev *hdev);
+
+ struct hci_chan *hci_chan_create(struct hci_conn *conn);
+ void hci_chan_del(struct hci_chan *chan);
+@@ -1443,6 +1448,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role);
++void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
+ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 sec_level, u8 auth_type,
+ enum conn_reasons conn_reason);
+@@ -1536,10 +1542,6 @@ static inline void hci_conn_drop(struct hci_conn *conn)
+ }
+ break;
+
+- case AMP_LINK:
+- timeo = conn->disc_timeout;
+- break;
+-
+ default:
+ timeo = 0;
+ break;
+@@ -1833,6 +1835,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ #define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
+ (hdev->commands[39] & 0x04))
+
++#define read_key_size_capable(dev) \
++ ((dev)->commands[20] & 0x10 && \
++ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
++
+ /* Use enhanced synchronous connection if command is supported and its quirk
+ * has not been set.
+ */
+@@ -2078,18 +2084,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
+ {
+ u16 max_latency;
+
+- if (min > max || min < 6 || max > 3200)
++ if (min > max) {
++ BT_WARN("min %d > max %d", min, max);
+ return -EINVAL;
++ }
++
++ if (min < 6) {
++ BT_WARN("min %d < 6", min);
++ return -EINVAL;
++ }
++
++ if (max > 3200) {
++ BT_WARN("max %d > 3200", max);
++ return -EINVAL;
++ }
++
++ if (to_multiplier < 10) {
++ BT_WARN("to_multiplier %d < 10", to_multiplier);
++ return -EINVAL;
++ }
+
+- if (to_multiplier < 10 || to_multiplier > 3200)
++ if (to_multiplier > 3200) {
++ BT_WARN("to_multiplier %d > 3200", to_multiplier);
+ return -EINVAL;
++ }
+
+- if (max >= to_multiplier * 8)
++ if (max >= to_multiplier * 8) {
++ BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
+ return -EINVAL;
++ }
+
+ max_latency = (to_multiplier * 4 / max) - 1;
+- if (latency > 499 || latency > max_latency)
++ if (latency > 499) {
++ BT_WARN("latency %d > 499", latency);
+ return -EINVAL;
++ }
++
++ if (latency > max_latency) {
++ BT_WARN("latency %d > max_latency %d", latency, max_latency);
++ return -EINVAL;
++ }
+
+ return 0;
+ }
+@@ -2190,8 +2224,8 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ bool mgmt_connected);
+ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status);
+-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+- u8 addr_type, u8 status);
++void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn,
++ u8 status);
+ void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+ void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index 57eeb07aeb2511..3cb2d10cac930b 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -38,16 +38,34 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
++int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
++ const void *param, u32 timeout);
+
+ void hci_cmd_sync_init(struct hci_dev *hdev);
+ void hci_cmd_sync_clear(struct hci_dev *hdev);
+ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
++void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err);
+
+ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
++int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy);
++int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy);
++int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy);
++struct hci_cmd_sync_work_entry *
++hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy);
++void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
++ struct hci_cmd_sync_work_entry *entry);
++bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy);
++bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
++ hci_cmd_sync_work_func_t func, void *data,
++ hci_cmd_sync_work_destroy_t destroy);
+
+ int hci_update_eir_sync(struct hci_dev *hdev);
+ int hci_update_class_sync(struct hci_dev *hdev);
+@@ -80,6 +98,8 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
+ u8 *data, u32 flags, u16 min_interval,
+ u16 max_interval, u16 sync_interval);
+
++int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance);
++
+ int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
+ int hci_disable_advertising_sync(struct hci_dev *hdev);
+@@ -125,8 +145,6 @@ struct hci_conn;
+
+ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
+
+-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
+-
+ int hci_le_create_cis_sync(struct hci_dev *hdev);
+
+ int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
+@@ -136,3 +154,9 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason);
+ int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle);
+
+ int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle);
++
++int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
++
++int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
++
++int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index cf393e72d6ed67..d2a1154121d0d5 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -59,8 +59,6 @@
+ #define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
+ #define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
+
+-#define L2CAP_A2MP_DEFAULT_MTU 670
+-
+ /* L2CAP socket address */
+ struct sockaddr_l2 {
+ sa_family_t l2_family;
+@@ -109,12 +107,6 @@ struct l2cap_conninfo {
+ #define L2CAP_ECHO_RSP 0x09
+ #define L2CAP_INFO_REQ 0x0a
+ #define L2CAP_INFO_RSP 0x0b
+-#define L2CAP_CREATE_CHAN_REQ 0x0c
+-#define L2CAP_CREATE_CHAN_RSP 0x0d
+-#define L2CAP_MOVE_CHAN_REQ 0x0e
+-#define L2CAP_MOVE_CHAN_RSP 0x0f
+-#define L2CAP_MOVE_CHAN_CFM 0x10
+-#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
+ #define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
+ #define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
+ #define L2CAP_LE_CONN_REQ 0x14
+@@ -144,7 +136,6 @@ struct l2cap_conninfo {
+ /* L2CAP fixed channels */
+ #define L2CAP_FC_SIG_BREDR 0x02
+ #define L2CAP_FC_CONNLESS 0x04
+-#define L2CAP_FC_A2MP 0x08
+ #define L2CAP_FC_ATT 0x10
+ #define L2CAP_FC_SIG_LE 0x20
+ #define L2CAP_FC_SMP_LE 0x40
+@@ -267,7 +258,6 @@ struct l2cap_conn_rsp {
+ /* channel identifier */
+ #define L2CAP_CID_SIGNALING 0x0001
+ #define L2CAP_CID_CONN_LESS 0x0002
+-#define L2CAP_CID_A2MP 0x0003
+ #define L2CAP_CID_ATT 0x0004
+ #define L2CAP_CID_LE_SIGNALING 0x0005
+ #define L2CAP_CID_SMP 0x0006
+@@ -282,7 +272,6 @@ struct l2cap_conn_rsp {
+ #define L2CAP_CR_BAD_PSM 0x0002
+ #define L2CAP_CR_SEC_BLOCK 0x0003
+ #define L2CAP_CR_NO_MEM 0x0004
+-#define L2CAP_CR_BAD_AMP 0x0005
+ #define L2CAP_CR_INVALID_SCID 0x0006
+ #define L2CAP_CR_SCID_IN_USE 0x0007
+
+@@ -404,29 +393,6 @@ struct l2cap_info_rsp {
+ __u8 data[];
+ } __packed;
+
+-struct l2cap_create_chan_req {
+- __le16 psm;
+- __le16 scid;
+- __u8 amp_id;
+-} __packed;
+-
+-struct l2cap_create_chan_rsp {
+- __le16 dcid;
+- __le16 scid;
+- __le16 result;
+- __le16 status;
+-} __packed;
+-
+-struct l2cap_move_chan_req {
+- __le16 icid;
+- __u8 dest_amp_id;
+-} __packed;
+-
+-struct l2cap_move_chan_rsp {
+- __le16 icid;
+- __le16 result;
+-} __packed;
+-
+ #define L2CAP_MR_SUCCESS 0x0000
+ #define L2CAP_MR_PEND 0x0001
+ #define L2CAP_MR_BAD_ID 0x0002
+@@ -539,8 +505,6 @@ struct l2cap_seq_list {
+
+ struct l2cap_chan {
+ struct l2cap_conn *conn;
+- struct hci_conn *hs_hcon;
+- struct hci_chan *hs_hchan;
+ struct kref kref;
+ atomic_t nesting;
+
+@@ -584,6 +548,9 @@ struct l2cap_chan {
+ __u16 tx_credits;
+ __u16 rx_credits;
+
++ /* estimated available receive buffer space or -1 if unknown */
++ ssize_t rx_avail;
++
+ __u8 tx_state;
+ __u8 rx_state;
+
+@@ -591,12 +558,6 @@ struct l2cap_chan {
+ unsigned long conn_state;
+ unsigned long flags;
+
+- __u8 remote_amp_id;
+- __u8 local_amp_id;
+- __u8 move_id;
+- __u8 move_state;
+- __u8 move_role;
+-
+ __u16 next_tx_seq;
+ __u16 expected_ack_seq;
+ __u16 expected_tx_seq;
+@@ -724,10 +685,15 @@ struct l2cap_user {
+ /* ----- L2CAP socket info ----- */
+ #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+
++struct l2cap_rx_busy {
++ struct list_head list;
++ struct sk_buff *skb;
++};
++
+ struct l2cap_pinfo {
+ struct bt_sock bt;
+ struct l2cap_chan *chan;
+- struct sk_buff *rx_busy_skb;
++ struct list_head rx_busy;
+ };
+
+ enum {
+@@ -985,6 +951,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
+ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+ void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
++void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail);
+ int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
+ void l2cap_chan_set_defaults(struct l2cap_chan *chan);
+ int l2cap_ertm_init(struct l2cap_chan *chan);
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 5b8b1b644a2dbf..94594026a5c554 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -258,7 +258,7 @@ struct bonding {
+ #ifdef CONFIG_XFRM_OFFLOAD
+ struct list_head ipsec_list;
+ /* protecting ipsec_list */
+- spinlock_t ipsec_lock;
++ struct mutex ipsec_lock;
+ #endif /* CONFIG_XFRM_OFFLOAD */
+ struct bpf_prog *xdp_prog;
+ };
+diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
+index 4dabeb6c76d31d..9f2ce4d05c2656 100644
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -64,7 +64,7 @@ static inline bool sk_can_busy_loop(struct sock *sk)
+ static inline unsigned long busy_loop_current_time(void)
+ {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+- return (unsigned long)(local_clock() >> 10);
++ return (unsigned long)(ktime_get_ns() >> 10);
+ #else
+ return 0;
+ #endif
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 7192346e4a22df..802ea3080d0b36 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -4816,6 +4816,7 @@ struct cfg80211_ops {
+ * @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys.
+ * @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for
+ * NL80211_REGDOM_SET_BY_DRIVER.
++ * @WIPHY_FLAG_DISABLE_WEXT: disable wireless extensions for this device
+ */
+ enum wiphy_flags {
+ WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
+@@ -4827,6 +4828,7 @@ enum wiphy_flags {
+ WIPHY_FLAG_4ADDR_STATION = BIT(6),
+ WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
+ WIPHY_FLAG_IBSS_RSN = BIT(8),
++ WIPHY_FLAG_DISABLE_WEXT = BIT(9),
+ WIPHY_FLAG_MESH_AUTH = BIT(10),
+ WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
+ /* use hole at 12 */
+@@ -5826,6 +5828,16 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+ */
+ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+
++/**
++ * wiphy_work_flush - flush previously queued work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush, this can be %NULL to flush all work
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
++
+ struct wiphy_delayed_work {
+ struct wiphy_work work;
+ struct wiphy *wiphy;
+@@ -5869,6 +5881,17 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
++/**
++ * wiphy_delayed_work_flush - flush previously queued delayed work
++ * @wiphy: the wiphy, for debug purposes
++ * @work: the work to flush
++ *
++ * Flush the work (i.e. run it if pending). This must be called
++ * under the wiphy mutex acquired by wiphy_lock().
++ */
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++ struct wiphy_delayed_work *dwork);
++
+ /**
+ * struct wireless_dev - wireless device state
+ *
+diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
+index f79ce133e51a7c..519d23941b541e 100644
+--- a/include/net/cfg802154.h
++++ b/include/net/cfg802154.h
+@@ -378,6 +378,7 @@ struct ieee802154_llsec_key {
+
+ struct ieee802154_llsec_key_entry {
+ struct list_head list;
++ struct rcu_head rcu;
+
+ struct ieee802154_llsec_key_id id;
+ struct ieee802154_llsec_key *key;
+diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
+index 6d1c8541183dbe..3a9001a042a5c3 100644
+--- a/include/net/dst_ops.h
++++ b/include/net/dst_ops.h
+@@ -24,7 +24,7 @@ struct dst_ops {
+ void (*destroy)(struct dst_entry *);
+ void (*ifdown)(struct dst_entry *,
+ struct net_device *dev);
+- struct dst_entry * (*negative_advice)(struct dst_entry *);
++ void (*negative_advice)(struct sock *sk, struct dst_entry *);
+ void (*link_failure)(struct sk_buff *);
+ void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu,
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 7f0adda3bf2fed..335bbc52171c10 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -40,8 +40,8 @@ struct flowi_common {
+ #define FLOWI_FLAG_KNOWN_NH 0x02
+ __u32 flowic_secid;
+ kuid_t flowic_uid;
+- struct flowi_tunnel flowic_tun_key;
+ __u32 flowic_multipath_hash;
++ struct flowi_tunnel flowic_tun_key;
+ };
+
+ union flowi_uli {
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index e18a4c0d69eedc..c53244f2043704 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -12,10 +12,12 @@
+ * struct genl_multicast_group - generic netlink multicast group
+ * @name: name of the multicast group, names are per-family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
++ * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
+ */
+ struct genl_multicast_group {
+ char name[GENL_NAMSIZ];
+ u8 flags;
++ u8 cap_sys_admin:1;
+ };
+
+ struct genl_split_ops;
+diff --git a/include/net/gro.h b/include/net/gro.h
+index 88644b3ca6600c..018343254c90a6 100644
+--- a/include/net/gro.h
++++ b/include/net/gro.h
+@@ -86,6 +86,15 @@ struct napi_gro_cb {
+
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
++
++ /* L3 offsets */
++ union {
++ struct {
++ u16 network_offset;
++ u16 inner_network_offset;
++ };
++ u16 network_offsets[2];
++ };
+ };
+
+ #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
+index c8490729b4aea7..31bf475eca762a 100644
+--- a/include/net/if_inet6.h
++++ b/include/net/if_inet6.h
+@@ -22,10 +22,6 @@
+ #define IF_RS_SENT 0x10
+ #define IF_READY 0x80000000
+
+-/* prefix flags */
+-#define IF_PREFIX_ONLINK 0x01
+-#define IF_PREFIX_AUTOCONF 0x02
+-
+ enum {
+ INET6_IFADDR_STATE_PREDAD,
+ INET6_IFADDR_STATE_DAD,
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 5d2fcc137b8814..fee1e565055100 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -173,6 +173,7 @@ void inet_csk_init_xmit_timers(struct sock *sk,
+ void (*delack_handler)(struct timer_list *),
+ void (*keepalive_handler)(struct timer_list *));
+ void inet_csk_clear_xmit_timers(struct sock *sk);
++void inet_csk_clear_xmit_timers_sync(struct sock *sk);
+
+ static inline void inet_csk_schedule_ack(struct sock *sk)
+ {
+@@ -263,7 +264,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ struct request_sock *req,
+ struct sock *child);
+-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
++bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ unsigned long timeout);
+ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ struct request_sock *req,
+@@ -347,4 +348,12 @@ static inline bool inet_csk_has_ulp(const struct sock *sk)
+ return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
+ }
+
++static inline void inet_init_csk_locks(struct sock *sk)
++{
++ struct inet_connection_sock *icsk = inet_csk(sk);
++
++ spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
++ spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
++}
++
+ #endif /* _INET_CONNECTION_SOCK_H */
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index 2de0e4d4a02788..2790ba58ffe5ca 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -301,11 +301,6 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
+ #define inet_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
+-static inline bool sk_is_inet(struct sock *sk)
+-{
+- return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
+-}
+-
+ /**
+ * sk_to_full_sk - Access to a full socket
+ * @sk: pointer to a socket
+diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
+index 4a8e578405cb37..9365e5af8d6da8 100644
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -114,7 +114,7 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo
+
+ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
+
+-void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family);
++void inet_twsk_purge(struct inet_hashinfo *hashinfo);
+
+ static inline
+ struct net *twsk_net(const struct inet_timewait_sock *twsk)
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 3489a1cca5e7bc..7db5912e0c5f63 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -497,8 +497,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+ return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
+ }
+
+-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+- int fc_mx_len,
++struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len,
+ struct netlink_ext_ack *extack);
+ static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
+ {
+@@ -758,7 +757,7 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
+ * Functions provided by ip_sockglue.c
+ */
+
+-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
++void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst);
+ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, int tlen, int offset);
+ int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
+@@ -787,6 +786,8 @@ static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+ }
+
+ bool icmp_global_allow(void);
++void icmp_global_consume(void);
++
+ extern int sysctl_icmp_msgs_per_sec;
+ extern int sysctl_icmp_msgs_burst;
+
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index 1ba9f4ddf2f6db..9ba6413fd2e3ea 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -179,9 +179,6 @@ struct fib6_info {
+
+ refcount_t fib6_ref;
+ unsigned long expires;
+-
+- struct hlist_node gc_link;
+-
+ struct dst_metrics *fib6_metrics;
+ #define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
+
+@@ -250,6 +247,19 @@ static inline bool fib6_requires_src(const struct fib6_info *rt)
+ return rt->fib6_src.plen > 0;
+ }
+
++static inline void fib6_clean_expires(struct fib6_info *f6i)
++{
++ f6i->fib6_flags &= ~RTF_EXPIRES;
++ f6i->expires = 0;
++}
++
++static inline void fib6_set_expires(struct fib6_info *f6i,
++ unsigned long expires)
++{
++ f6i->expires = expires;
++ f6i->fib6_flags |= RTF_EXPIRES;
++}
++
+ static inline bool fib6_check_expired(const struct fib6_info *f6i)
+ {
+ if (f6i->fib6_flags & RTF_EXPIRES)
+@@ -257,11 +267,6 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
+ return false;
+ }
+
+-static inline bool fib6_has_expires(const struct fib6_info *f6i)
+-{
+- return f6i->fib6_flags & RTF_EXPIRES;
+-}
+-
+ /* Function to safely get fn->fn_sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+@@ -383,7 +388,6 @@ struct fib6_table {
+ struct inet_peer_base tb6_peers;
+ unsigned int flags;
+ unsigned int fib_seq;
+- struct hlist_head tb6_gc_hlist; /* GC candidates */
+ #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
+ };
+
+@@ -500,48 +504,6 @@ void fib6_gc_cleanup(void);
+
+ int fib6_init(void);
+
+-/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be
+- * NULL.
+- */
+-static inline void fib6_set_expires_locked(struct fib6_info *f6i,
+- unsigned long expires)
+-{
+- struct fib6_table *tb6;
+-
+- tb6 = f6i->fib6_table;
+- f6i->expires = expires;
+- if (tb6 && !fib6_has_expires(f6i))
+- hlist_add_head(&f6i->gc_link, &tb6->tb6_gc_hlist);
+- f6i->fib6_flags |= RTF_EXPIRES;
+-}
+-
+-/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be
+- * NULL. If fib6_table is NULL, the fib6_info will no be inserted into the
+- * list of GC candidates until it is inserted into a table.
+- */
+-static inline void fib6_set_expires(struct fib6_info *f6i,
+- unsigned long expires)
+-{
+- spin_lock_bh(&f6i->fib6_table->tb6_lock);
+- fib6_set_expires_locked(f6i, expires);
+- spin_unlock_bh(&f6i->fib6_table->tb6_lock);
+-}
+-
+-static inline void fib6_clean_expires_locked(struct fib6_info *f6i)
+-{
+- if (fib6_has_expires(f6i))
+- hlist_del_init(&f6i->gc_link);
+- f6i->fib6_flags &= ~RTF_EXPIRES;
+- f6i->expires = 0;
+-}
+-
+-static inline void fib6_clean_expires(struct fib6_info *f6i)
+-{
+- spin_lock_bh(&f6i->fib6_table->tb6_lock);
+- fib6_clean_expires_locked(f6i);
+- spin_unlock_bh(&f6i->fib6_table->tb6_lock);
+-}
+-
+ struct ipv6_route_iter {
+ struct seq_net_private p;
+ struct fib6_walker w;
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index b32539bb0fb05c..61cfc8891f8204 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -128,18 +128,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
+
+ static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
+ const struct in6_addr *daddr,
+- unsigned int prefs,
++ unsigned int prefs, int l3mdev_index,
+ struct in6_addr *saddr)
+ {
++ struct net_device *l3mdev;
++ struct net_device *dev;
++ bool same_vrf;
+ int err = 0;
+
+- if (f6i && f6i->fib6_prefsrc.plen) {
++ rcu_read_lock();
++
++ l3mdev = dev_get_by_index_rcu(net, l3mdev_index);
++ if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev)
++ dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
++ same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev;
++ if (f6i && f6i->fib6_prefsrc.plen && same_vrf)
+ *saddr = f6i->fib6_prefsrc.addr;
+- } else {
+- struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
++ else
++ err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr);
+
+- err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr);
+- }
++ rcu_read_unlock();
+
+ return err;
+ }
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 15de07d3654052..ca1700c2a5733b 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -173,6 +173,7 @@ struct fib_result {
+ unsigned char type;
+ unsigned char scope;
+ u32 tclassid;
++ dscp_t dscp;
+ struct fib_nh_common *nhc;
+ struct fib_info *fi;
+ struct fib_table *table;
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index f346b4efbc307b..4e69f52a51177f 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -360,6 +360,40 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+ return pskb_network_may_pull(skb, nhlen);
+ }
+
++/* Variant of pskb_inet_may_pull().
++ */
++static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
++ bool inner_proto_inherit)
++{
++ int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
++ __be16 type = skb->protocol;
++
++ /* Essentially this is skb_protocol(skb, true)
++ * And we get MAC len.
++ */
++ if (eth_type_vlan(type))
++ type = __vlan_get_protocol(skb, type, &maclen);
++
++ switch (type) {
++#if IS_ENABLED(CONFIG_IPV6)
++ case htons(ETH_P_IPV6):
++ nhlen = sizeof(struct ipv6hdr);
++ break;
++#endif
++ case htons(ETH_P_IP):
++ nhlen = sizeof(struct iphdr);
++ break;
++ }
++ /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
++ * a base network header in skb->head.
++ */
++ if (!pskb_may_pull(skb, maclen + nhlen))
++ return false;
++
++ skb_set_network_header(skb, maclen);
++ return true;
++}
++
+ static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
+ {
+ const struct ip_tunnel_encap_ops *ops;
+diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
+index c48186bf473727..21da31e1dff5d3 100644
+--- a/include/net/ipv6_stubs.h
++++ b/include/net/ipv6_stubs.h
+@@ -85,6 +85,11 @@ struct ipv6_bpf_stub {
+ sockptr_t optval, unsigned int optlen);
+ int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
++ int (*ipv6_dev_get_saddr)(struct net *net,
++ const struct net_device *dst_dev,
++ const struct in6_addr *daddr,
++ unsigned int prefs,
++ struct in6_addr *saddr);
+ };
+ extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
+
+diff --git a/include/net/kcm.h b/include/net/kcm.h
+index 90279e5e09a5c0..441e993be634ce 100644
+--- a/include/net/kcm.h
++++ b/include/net/kcm.h
+@@ -70,6 +70,7 @@ struct kcm_sock {
+ struct work_struct tx_work;
+ struct list_head wait_psock_list;
+ struct sk_buff *seq_skb;
++ struct mutex tx_mutex;
+ u32 tx_stopped : 1;
+
+ /* Don't use bit fields here, these are set under different locks */
+diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
+index 7e73f8e5e4970d..1d55ba7c45be16 100644
+--- a/include/net/llc_pdu.h
++++ b/include/net/llc_pdu.h
+@@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
+ */
+ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
+ {
+- if (skb->protocol == htons(ETH_P_802_2))
+- memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
++ memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+ }
+
+ /**
+@@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
+ */
+ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
+ {
+- if (skb->protocol == htons(ETH_P_802_2))
+- memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
++ memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+ }
+
+ /**
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 7c707358d15c81..47ade676565dbc 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -936,6 +936,9 @@ enum mac80211_tx_info_flags {
+ * of their QoS TID or other priority field values.
+ * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
+ * for sequence number assignment
++ * @IEEE80211_TX_CTRL_DONT_USE_RATE_MASK: Don't use rate mask for this frame
++ * which is transmitted due to scanning or offchannel TX, not in normal
++ * operation on the interface.
+ * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
+ * frame should be transmitted on the specific link. This really is
+ * only relevant for frames that do not have data present, and is
+@@ -956,6 +959,7 @@ enum mac80211_tx_control_flags {
+ IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
+ IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
+ IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
++ IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = BIT(10),
+ IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
+ };
+
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index ebf9bc54036a5f..75340c3e0c8b54 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -303,6 +303,7 @@ struct macsec_ops {
+ int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
+ int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
+ int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
++ bool rx_uses_md_dst;
+ };
+
+ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
+index 88b6ef7ce1a6ef..3965343fdee0cd 100644
+--- a/include/net/mana/gdma.h
++++ b/include/net/mana/gdma.h
+@@ -222,7 +222,15 @@ struct gdma_dev {
+ struct auxiliary_device *adev;
+ };
+
+-#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
++/* MANA_PAGE_SIZE is the DMA unit */
++#define MANA_PAGE_SHIFT 12
++#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
++#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
++#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
++#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
++
++/* Required by HW */
++#define MANA_MIN_QSIZE MANA_PAGE_SIZE
+
+ #define GDMA_CQE_SIZE 64
+ #define GDMA_EQE_SIZE 16
+diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
+index 4d43adf186064c..7892b79854f62e 100644
+--- a/include/net/mana/mana.h
++++ b/include/net/mana/mana.h
+@@ -39,11 +39,11 @@ enum TRI_STATE {
+ #define COMP_ENTRY_SIZE 64
+
+ #define RX_BUFFERS_PER_QUEUE 512
+-#define MANA_RX_DATA_ALIGN 64
+
+ #define MAX_SEND_BUFFERS_PER_QUEUE 256
+
+-#define EQ_SIZE (8 * PAGE_SIZE)
++#define EQ_SIZE (8 * MANA_PAGE_SIZE)
++
+ #define LOG2_EQ_THROTTLE 3
+
+ #define MAX_PORTS_IN_MANA_DEV 256
+@@ -98,6 +98,8 @@ struct mana_txq {
+
+ atomic_t pending_sends;
+
++ bool napi_initialized;
++
+ struct mana_stats_tx stats;
+ };
+
+@@ -275,6 +277,7 @@ struct mana_cq {
+ /* NAPI data */
+ struct napi_struct napi;
+ int work_done;
++ int work_done_since_doorbell;
+ int budget;
+ };
+
+diff --git a/include/net/mctp.h b/include/net/mctp.h
+index da86e106c91d57..1eb1b4393e46bd 100644
+--- a/include/net/mctp.h
++++ b/include/net/mctp.h
+@@ -249,6 +249,7 @@ struct mctp_route {
+ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
+ mctp_eid_t daddr);
+
++/* always takes ownership of skb */
+ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
+
+@@ -292,7 +293,7 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev);
+ int mctp_routes_init(void);
+ void mctp_routes_exit(void);
+
+-void mctp_device_init(void);
++int mctp_device_init(void);
+ void mctp_device_exit(void);
+
+ #endif /* __NET_MCTP_H */
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 07022bb0d44d4b..0d28172193fa63 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -162,7 +162,7 @@ struct neighbour {
+ struct rcu_head rcu;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+- u8 primary_key[0];
++ u8 primary_key[];
+ } __randomize_layout;
+
+ struct neigh_ops {
+diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
+index d68b0a48343150..8b8ed4e13d74df 100644
+--- a/include/net/netdev_queues.h
++++ b/include/net/netdev_queues.h
+@@ -128,7 +128,7 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
+ netdev_txq_completed_mb(txq, pkts, bytes); \
+ \
+ _res = -1; \
+- if (pkts && likely(get_desc > start_thrs)) { \
++ if (pkts && likely(get_desc >= start_thrs)) { \
+ _res = 1; \
+ if (unlikely(netif_tx_queue_stopped(txq)) && \
+ !(down_cond)) { \
+diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
+index 078d3c52c03f98..e5f2f0b73a9a0d 100644
+--- a/include/net/netfilter/nf_conntrack_act_ct.h
++++ b/include/net/netfilter/nf_conntrack_act_ct.h
+@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
+ #endif
+ }
+
+-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
++static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
++ enum ip_conntrack_info ctinfo)
++{
++#if IS_ENABLED(CONFIG_NET_ACT_CT)
++ struct nf_conn_act_ct_ext *act_ct_ext;
++
++ act_ct_ext = nf_conn_act_ct_ext_find(ct);
++ if (dev_net(skb->dev) == &init_net && act_ct_ext)
++ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
++#endif
++}
++
++static inline struct
++nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
++ struct nf_conn *ct,
++ enum ip_conntrack_info ctinfo)
+ {
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
+@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
+ return act_ct;
+
+ act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
++ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+ return act_ct;
+ #else
+ return NULL;
+ #endif
+ }
+
+-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+- enum ip_conntrack_info ctinfo)
+-{
+-#if IS_ENABLED(CONFIG_NET_ACT_CT)
+- struct nf_conn_act_ct_ext *act_ct_ext;
+-
+- act_ct_ext = nf_conn_act_ct_ext_find(ct);
+- if (dev_net(skb->dev) == &init_net && act_ct_ext)
+- act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+-#endif
+-}
+-
+ #endif /* _NF_CONNTRACK_ACT_CT_H */
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index fe1507c1db828b..df7775afb92b93 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -62,6 +62,8 @@ struct nf_flowtable_type {
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+ void (*free)(struct nf_flowtable *ft);
++ void (*get)(struct nf_flowtable *ft);
++ void (*put)(struct nf_flowtable *ft);
+ nf_hookfn *hook;
+ struct module *owner;
+ };
+@@ -240,6 +242,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ }
+
+ list_add_tail(&block_cb->list, &block->cb_list);
++ up_write(&flow_table->flow_block_lock);
++
++ if (flow_table->type->get)
++ flow_table->type->get(flow_table);
++ return 0;
+
+ unlock:
+ up_write(&flow_table->flow_block_lock);
+@@ -262,10 +269,13 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ WARN_ON(true);
+ }
+ up_write(&flow_table->flow_block_lock);
++
++ if (flow_table->type->put)
++ flow_table->type->put(flow_table);
+ }
+
+ void flow_offload_route_init(struct flow_offload *flow,
+- const struct nf_flow_route *route);
++ struct nf_flow_route *route);
+
+ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+ void flow_offload_refresh(struct nf_flowtable *flow_table,
+@@ -325,7 +335,7 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
+ int nf_flow_table_offload_init(void);
+ void nf_flow_table_offload_exit(void);
+
+-static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
++static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
+ {
+ __be16 proto;
+
+@@ -341,6 +351,16 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+ return 0;
+ }
+
++static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
++{
++ if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
++ return false;
++
++ *inner_proto = __nf_flow_pppoe_proto(skb);
++
++ return true;
++}
++
+ #define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
+ #define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
+ #define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 7c816359d5a988..1b95c34a4e3d11 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg)
+ return *(__force __be32 *)sreg;
+ }
+
+-static inline void nft_reg_store64(u32 *dreg, u64 val)
++static inline void nft_reg_store64(u64 *dreg, u64 val)
+ {
+- put_unaligned(val, (u64 *)dreg);
++ put_unaligned(val, dreg);
+ }
+
+ static inline u64 nft_reg_load64(const u32 *sreg)
+@@ -297,9 +297,22 @@ struct nft_set_elem {
+ void *priv;
+ };
+
++/**
++ * enum nft_iter_type - nftables set iterator type
++ *
++ * @NFT_ITER_READ: read-only iteration over set elements
++ * @NFT_ITER_UPDATE: iteration under mutex to update set element state
++ */
++enum nft_iter_type {
++ NFT_ITER_UNSPEC,
++ NFT_ITER_READ,
++ NFT_ITER_UPDATE,
++};
++
+ struct nft_set;
+ struct nft_set_iter {
+ u8 genmask;
++ enum nft_iter_type type:8;
+ unsigned int count;
+ unsigned int skip;
+ int err;
+@@ -587,6 +600,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
+ return (void *)set->data;
+ }
+
++static inline enum nft_data_types nft_set_datatype(const struct nft_set *set)
++{
++ return set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
++}
++
+ static inline bool nft_set_gc_is_pending(const struct nft_set *s)
+ {
+ return refcount_read(&s->refs) != 1;
+@@ -1307,6 +1325,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
+ * @type: stateful object numeric type
+ * @owner: module owner
+ * @maxattr: maximum netlink attribute
++ * @family: address family for AF-specific object types
+ * @policy: netlink attribute policy
+ */
+ struct nft_object_type {
+@@ -1316,6 +1335,7 @@ struct nft_object_type {
+ struct list_head list;
+ u32 type;
+ unsigned int maxattr;
++ u8 family;
+ struct module *owner;
+ const struct nla_policy *policy;
+ };
+diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
+index 947973623dc77e..fcf967286e37cb 100644
+--- a/include/net/netfilter/nf_tables_ipv4.h
++++ b/include/net/netfilter/nf_tables_ipv4.h
+@@ -19,7 +19,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
+ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+ {
+ struct iphdr *iph, _iph;
+- u32 len, thoff;
++ u32 len, thoff, skb_len;
+
+ iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*iph), &_iph);
+@@ -31,7 +31,9 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+
+ len = iph_totlen(pkt->skb, iph);
+ thoff = iph->ihl * 4;
+- if (pkt->skb->len < len)
++ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
++
++ if (skb_len < len)
+ return -1;
+ else if (len < thoff)
+ return -1;
+@@ -40,7 +42,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
+
+ pkt->flags = NFT_PKTINFO_L4PROTO;
+ pkt->tprot = iph->protocol;
+- pkt->thoff = thoff;
++ pkt->thoff = skb_network_offset(pkt->skb) + thoff;
+ pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
+index 467d59b9e5334c..a0633eeaec977b 100644
+--- a/include/net/netfilter/nf_tables_ipv6.h
++++ b/include/net/netfilter/nf_tables_ipv6.h
+@@ -31,8 +31,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+ struct ipv6hdr *ip6h, _ip6h;
+ unsigned int thoff = 0;
+ unsigned short frag_off;
++ u32 pkt_len, skb_len;
+ int protohdr;
+- u32 pkt_len;
+
+ ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
+ sizeof(*ip6h), &_ip6h);
+@@ -43,7 +43,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
+ return -1;
+
+ pkt_len = ntohs(ip6h->payload_len);
+- if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
++ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
++ if (pkt_len + sizeof(*ip6h) > skb_len)
+ return -1;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
+index 02bbdc577f8e28..a6a0bf4a247e51 100644
+--- a/include/net/netns/netfilter.h
++++ b/include/net/netns/netfilter.h
+@@ -15,6 +15,9 @@ struct netns_nf {
+ const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
+ #ifdef CONFIG_SYSCTL
+ struct ctl_table_header *nf_log_dir_header;
++#ifdef CONFIG_LWTUNNEL
++ struct ctl_table_header *nf_lwtnl_dir_header;
++#endif
+ #endif
+ struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
+ struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 6506221c5fe31f..c1fa6fee0acfa7 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -12,6 +12,7 @@ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
+ enum rtnl_link_flags {
+ RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
+ RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
++ RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
+ };
+
+ enum rtnl_kinds {
+@@ -27,6 +28,15 @@ static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
+ return msgtype & RTNL_KIND_MASK;
+ }
+
++struct rtnl_msg_handler {
++ struct module *owner;
++ int protocol;
++ int msgtype;
++ rtnl_doit_func doit;
++ rtnl_dumpit_func dumpit;
++ int flags;
++};
++
+ void rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
+ int rtnl_register_module(struct module *owner, int protocol, int msgtype,
+@@ -34,6 +44,14 @@ int rtnl_register_module(struct module *owner, int protocol, int msgtype,
+ int rtnl_unregister(int protocol, int msgtype);
+ void rtnl_unregister_all(int protocol);
+
++int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n);
++void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n);
++
++#define rtnl_register_many(handlers) \
++ __rtnl_register_many(handlers, ARRAY_SIZE(handlers))
++#define rtnl_unregister_many(handlers) \
++ __rtnl_unregister_many(handlers, ARRAY_SIZE(handlers))
++
+ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
+ {
+ if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg))
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index f232512505f896..326d3a322c109e 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -126,6 +126,7 @@ struct Qdisc {
+
+ struct rcu_head rcu;
+ netdevice_tracker dev_tracker;
++ struct lock_class_key root_lock_key;
+ /* private data */
+ long privdata[] ____cacheline_aligned;
+ };
+@@ -376,6 +377,10 @@ struct tcf_proto_ops {
+ struct nlattr **tca,
+ struct netlink_ext_ack *extack);
+ void (*tmplt_destroy)(void *tmplt_priv);
++ void (*tmplt_reoffload)(struct tcf_chain *chain,
++ bool add,
++ flow_setup_cb_t *cb,
++ void *cb_priv);
+ struct tcf_exts * (*get_exts)(const struct tcf_proto *tp,
+ u32 handle);
+
+@@ -840,7 +845,6 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+ {
+- qdisc_calculate_pkt_len(skb, sch);
+ return sch->enqueue(skb, sch, to_free);
+ }
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 92f7ea62a9159c..c3961050b8e394 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -902,6 +902,8 @@ static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
+ hlist_for_each_entry(__sk, list, sk_bind_node)
+ #define sk_for_each_bound_bhash2(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_bind2_node)
++#define sk_for_each_bound_safe(__sk, tmp, list) \
++ hlist_for_each_entry_safe(__sk, tmp, list, sk_bind_node)
+
+ /**
+ * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
+@@ -1458,33 +1460,36 @@ sk_memory_allocated(const struct sock *sk)
+
+ /* 1 MB per cpu, in page units */
+ #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
++extern int sysctl_mem_pcpu_rsv;
++
++static inline void proto_memory_pcpu_drain(struct proto *proto)
++{
++ int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
++
++ if (val)
++ atomic_long_add(val, proto->memory_allocated);
++}
+
+ static inline void
+-sk_memory_allocated_add(struct sock *sk, int amt)
++sk_memory_allocated_add(const struct sock *sk, int val)
+ {
+- int local_reserve;
++ struct proto *proto = sk->sk_prot;
+
+- preempt_disable();
+- local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+- if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
+- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+- }
+- preempt_enable();
++ val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
++
++ if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
++ proto_memory_pcpu_drain(proto);
+ }
+
+ static inline void
+-sk_memory_allocated_sub(struct sock *sk, int amt)
++sk_memory_allocated_sub(const struct sock *sk, int val)
+ {
+- int local_reserve;
++ struct proto *proto = sk->sk_prot;
+
+- preempt_disable();
+- local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
+- if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
+- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
+- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
+- }
+- preempt_enable();
++ val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
++
++ if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
++ proto_memory_pcpu_drain(proto);
+ }
+
+ #define SK_ALLOC_PERCPU_COUNTER_BATCH 16
+@@ -1808,6 +1813,13 @@ static inline void sock_owned_by_me(const struct sock *sk)
+ #endif
+ }
+
++static inline void sock_not_owned_by_me(const struct sock *sk)
++{
++#ifdef CONFIG_LOCKDEP
++ WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks);
++#endif
++}
++
+ static inline bool sock_owned_by_user(const struct sock *sk)
+ {
+ sock_owned_by_me(sk);
+@@ -1865,11 +1877,13 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+ int sock_setsockopt(struct socket *sock, int level, int op,
+ sockptr_t optval, unsigned int optlen);
++int do_sock_setsockopt(struct socket *sock, bool compat, int level,
++ int optname, sockptr_t optval, int optlen);
++int do_sock_getsockopt(struct socket *sock, bool compat, int level,
++ int optname, sockptr_t optval, sockptr_t optlen);
+
+ int sk_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
+-int sock_getsockopt(struct socket *sock, int level, int op,
+- char __user *optval, int __user *optlen);
+ int sock_gettstamp(struct socket *sock, void __user *userstamp,
+ bool timeval, bool time32);
+ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+@@ -2006,21 +2020,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+ /* sk_tx_queue_mapping accept only upto a 16-bit value */
+ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ return;
+- sk->sk_tx_queue_mapping = tx_queue;
++ /* Paired with READ_ONCE() in sk_tx_queue_get() and
++ * other WRITE_ONCE() because socket lock might be not held.
++ */
++ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+ }
+
+ #define NO_QUEUE_MAPPING USHRT_MAX
+
+ static inline void sk_tx_queue_clear(struct sock *sk)
+ {
+- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
++ /* Paired with READ_ONCE() in sk_tx_queue_get() and
++ * other WRITE_ONCE() because socket lock might be not held.
++ */
++ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
+ }
+
+ static inline int sk_tx_queue_get(const struct sock *sk)
+ {
+- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+- return sk->sk_tx_queue_mapping;
++ if (sk) {
++ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
++ * and sk_tx_queue_set().
++ */
++ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+
++ if (val != NO_QUEUE_MAPPING)
++ return val;
++ }
+ return -1;
+ }
+
+@@ -2140,14 +2166,14 @@ static inline bool sk_rethink_txhash(struct sock *sk)
+ }
+
+ static inline struct dst_entry *
+-__sk_dst_get(struct sock *sk)
++__sk_dst_get(const struct sock *sk)
+ {
+ return rcu_dereference_check(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
+ }
+
+ static inline struct dst_entry *
+-sk_dst_get(struct sock *sk)
++sk_dst_get(const struct sock *sk)
+ {
+ struct dst_entry *dst;
+
+@@ -2161,17 +2187,10 @@ sk_dst_get(struct sock *sk)
+
+ static inline void __dst_negative_advice(struct sock *sk)
+ {
+- struct dst_entry *ndst, *dst = __sk_dst_get(sk);
++ struct dst_entry *dst = __sk_dst_get(sk);
+
+- if (dst && dst->ops->negative_advice) {
+- ndst = dst->ops->negative_advice(dst);
+-
+- if (ndst != dst) {
+- rcu_assign_pointer(sk->sk_dst_cache, ndst);
+- sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
+- }
+- }
++ if (dst && dst->ops->negative_advice)
++ dst->ops->negative_advice(sk, dst);
+ }
+
+ static inline void dst_negative_advice(struct sock *sk)
+@@ -2186,7 +2205,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
+ rcu_assign_pointer(sk->sk_dst_cache, dst);
+@@ -2199,7 +2218,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ dst_release(old_dst);
+ }
+@@ -2781,9 +2800,30 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+ &skb_shinfo(skb)->tskey);
+ }
+
++static inline bool sk_is_inet(const struct sock *sk)
++{
++ int family = READ_ONCE(sk->sk_family);
++
++ return family == AF_INET || family == AF_INET6;
++}
++
+ static inline bool sk_is_tcp(const struct sock *sk)
+ {
+- return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
++ return sk_is_inet(sk) &&
++ sk->sk_type == SOCK_STREAM &&
++ sk->sk_protocol == IPPROTO_TCP;
++}
++
++static inline bool sk_is_udp(const struct sock *sk)
++{
++ return sk_is_inet(sk) &&
++ sk->sk_type == SOCK_DGRAM &&
++ sk->sk_protocol == IPPROTO_UDP;
++}
++
++static inline bool sk_is_stream_unix(const struct sock *sk)
++{
++ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+ }
+
+ /**
+diff --git a/include/net/switchdev.h b/include/net/switchdev.h
+index a43062d4c734bb..8346b0d29542c3 100644
+--- a/include/net/switchdev.h
++++ b/include/net/switchdev.h
+@@ -308,6 +308,9 @@ void switchdev_deferred_process(void);
+ int switchdev_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack);
++bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
++ enum switchdev_notifier_type nt,
++ const struct switchdev_obj *obj);
+ int switchdev_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack);
+diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
+index b24ea2d9400bab..1dc2f827d0bcfb 100644
+--- a/include/net/tc_act/tc_ct.h
++++ b/include/net/tc_act/tc_ct.h
+@@ -57,6 +57,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+ return to_ct_params(a)->nf_ft;
+ }
+
++static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
++{
++ return to_ct_params(a)->helper;
++}
++
+ #else
+ static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
+ static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
+@@ -64,6 +69,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+ {
+ return NULL;
+ }
++static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
++{
++ return NULL;
++}
+ #endif /* CONFIG_NF_CONNTRACK */
+
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 4b03ca7cb8a5eb..b3917af309e0f1 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -344,7 +344,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
+ void tcp_rcv_space_adjust(struct sock *sk);
+ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
+ void tcp_twsk_destructor(struct sock *sk);
+-void tcp_twsk_purge(struct list_head *net_exit_list, int family);
++void tcp_twsk_purge(struct list_head *net_exit_list);
+ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+@@ -624,6 +624,7 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+ /* tcp_input.c */
+ void tcp_rearm_rto(struct sock *sk);
+ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
++void tcp_done_with_error(struct sock *sk, int err);
+ void tcp_reset(struct sock *sk, struct sk_buff *skb);
+ void tcp_fin(struct sock *sk);
+ void tcp_check_space(struct sock *sk);
+@@ -723,6 +724,8 @@ static inline void tcp_fast_path_check(struct sock *sk)
+ tcp_fast_path_on(tp);
+ }
+
++u32 tcp_delack_max(const struct sock *sk);
++
+ /* Compute the actual rto_min value */
+ static inline u32 tcp_rto_min(struct sock *sk)
+ {
+@@ -801,7 +804,7 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+ }
+
+ /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+-static inline u32 tcp_ns_to_ts(u64 ns)
++static inline u64 tcp_ns_to_ts(u64 ns)
+ {
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+ }
+@@ -1137,7 +1140,7 @@ extern struct tcp_congestion_ops tcp_reno;
+
+ struct tcp_congestion_ops *tcp_ca_find(const char *name);
+ struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
+-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
++u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
+ #ifdef CONFIG_INET
+ char *tcp_ca_get_name_by_key(u32 key, char *buffer);
+ #else
+@@ -1458,13 +1461,14 @@ static inline int tcp_space_from_win(const struct sock *sk, int win)
+ return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
+ }
+
++/* Assume a 50% default for skb->len/skb->truesize ratio.
++ * This may be adjusted later in tcp_measure_rcv_mss().
++ */
++#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
++
+ static inline void tcp_scaling_ratio_init(struct sock *sk)
+ {
+- /* Assume a conservative default of 1200 bytes of payload per 4K page.
+- * This may be adjusted later in tcp_measure_rcv_mss().
+- */
+- tcp_sk(sk)->scaling_ratio = (1200 << TCP_RMEM_TO_WIN_SCALE) /
+- SKB_TRUESIZE(4096);
++ tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
+ }
+
+ /* Note: caller must be prepared to deal with negative returns */
+@@ -1480,17 +1484,22 @@ static inline int tcp_full_space(const struct sock *sk)
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
+ }
+
+-static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
++static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
+ {
+ int unused_mem = sk_unused_reserved_mem(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+- tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
++ tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
+ if (unused_mem)
+ tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ tcp_win_from_space(sk, unused_mem));
+ }
+
++static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
++{
++ __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
++}
++
+ void tcp_cleanup_rbuf(struct sock *sk, int copied);
+ void __tcp_cleanup_rbuf(struct sock *sk, int copied);
+
+@@ -2221,9 +2230,26 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
+ {
+ const struct sk_buff *skb = tcp_rtx_queue_head(sk);
+ u32 rto = inet_csk(sk)->icsk_rto;
+- u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
+
+- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
++ if (likely(skb)) {
++ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
++
++ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
++ } else {
++ WARN_ONCE(1,
++ "rtx queue emtpy: "
++ "out:%u sacked:%u lost:%u retrans:%u "
++ "tlp_high_seq:%u sk_state:%u ca_state:%u "
++ "advmss:%u mss_cache:%u pmtu:%u\n",
++ tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
++ tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
++ tcp_sk(sk)->tlp_high_seq, sk->sk_state,
++ inet_csk(sk)->icsk_ca_state,
++ tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
++ inet_csk(sk)->icsk_pmtu_cookie);
++ return jiffies_to_usecs(rto);
++ }
++
+ }
+
+ /*
+@@ -2336,7 +2362,7 @@ struct tcp_ulp_ops {
+ /* cleanup ulp */
+ void (*release)(struct sock *sk);
+ /* diagnostic */
+- int (*get_info)(const struct sock *sk, struct sk_buff *skb);
++ int (*get_info)(struct sock *sk, struct sk_buff *skb);
+ size_t (*get_info_size)(const struct sock *sk);
+ /* clone ulp */
+ void (*clone)(const struct request_sock *req, struct sock *newsk,
+diff --git a/include/net/tcx.h b/include/net/tcx.h
+index 264f147953bae9..a0f78fd5cb2879 100644
+--- a/include/net/tcx.h
++++ b/include/net/tcx.h
+@@ -13,7 +13,7 @@ struct mini_Qdisc;
+ struct tcx_entry {
+ struct mini_Qdisc __rcu *miniq;
+ struct bpf_mprog_bundle bundle;
+- bool miniq_active;
++ u32 miniq_active;
+ struct rcu_head rcu;
+ };
+
+@@ -129,11 +129,16 @@ static inline void tcx_skeys_dec(bool ingress)
+ tcx_dec();
+ }
+
+-static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry,
+- const bool active)
++static inline void tcx_miniq_inc(struct bpf_mprog_entry *entry)
+ {
+ ASSERT_RTNL();
+- tcx_entry(entry)->miniq_active = active;
++ tcx_entry(entry)->miniq_active++;
++}
++
++static inline void tcx_miniq_dec(struct bpf_mprog_entry *entry)
++{
++ ASSERT_RTNL();
++ tcx_entry(entry)->miniq_active--;
+ }
+
+ static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry)
+diff --git a/include/net/tls.h b/include/net/tls.h
+index a2b44578dcb753..2ad28545b15f01 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -96,9 +96,6 @@ struct tls_sw_context_tx {
+ struct tls_rec *open_rec;
+ struct list_head tx_list;
+ atomic_t encrypt_pending;
+- /* protect crypto_wait with encrypt_pending */
+- spinlock_t encrypt_compl_lock;
+- int async_notify;
+ u8 async_capable:1;
+
+ #define BIT_TX_SCHEDULED 0
+@@ -113,7 +110,8 @@ struct tls_strparser {
+ u32 stopped : 1;
+ u32 copy_mode : 1;
+ u32 mixed_decrypted : 1;
+- u32 msg_ready : 1;
++
++ bool msg_ready;
+
+ struct strp_msg stm;
+
+@@ -135,8 +133,6 @@ struct tls_sw_context_rx {
+ struct tls_strparser strp;
+
+ atomic_t decrypt_pending;
+- /* protect crypto_wait with decrypt_pending*/
+- spinlock_t decrypt_compl_lock;
+ struct sk_buff_head async_hold;
+ struct wait_queue_head wq;
+ };
+diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
+index 0ca9b7a11baf5c..29251c3519cf0c 100644
+--- a/include/net/udp_tunnel.h
++++ b/include/net/udp_tunnel.h
+@@ -174,16 +174,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
+ }
+ #endif
+
+-static inline void udp_tunnel_encap_enable(struct socket *sock)
++static inline void udp_tunnel_encap_enable(struct sock *sk)
+ {
+- struct udp_sock *up = udp_sk(sock->sk);
+-
+- if (up->encap_enabled)
++ if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
+ return;
+
+- up->encap_enabled = 1;
+ #if IS_ENABLED(CONFIG_IPV6)
+- if (sock->sk->sk_family == PF_INET6)
++ if (READ_ONCE(sk->sk_family) == PF_INET6)
+ ipv6_stub->udpv6_encap_enable();
+ #endif
+ udp_encap_enable();
+diff --git a/include/net/udplite.h b/include/net/udplite.h
+index bd33ff2b8f426d..786919d29f8de7 100644
+--- a/include/net/udplite.h
++++ b/include/net/udplite.h
+@@ -66,14 +66,18 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
+ /* Fast-path computation of checksum. Socket may not be locked. */
+ static inline __wsum udplite_csum(struct sk_buff *skb)
+ {
+- const struct udp_sock *up = udp_sk(skb->sk);
+ const int off = skb_transport_offset(skb);
++ const struct sock *sk = skb->sk;
+ int len = skb->len - off;
+
+- if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+- if (0 < up->pcslen)
+- len = up->pcslen;
+- udp_hdr(skb)->len = htons(up->pcslen);
++ if (udp_test_bit(UDPLITE_SEND_CC, sk)) {
++ u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen);
++
++ if (pcslen < len) {
++ if (pcslen > 0)
++ len = pcslen;
++ udp_hdr(skb)->len = htons(pcslen);
++ }
+ }
+ skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
+
+diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
+index 1617af3801620f..69b472604b86f5 100644
+--- a/include/net/xdp_sock.h
++++ b/include/net/xdp_sock.h
+@@ -14,6 +14,8 @@
+ #include <linux/mm.h>
+ #include <net/sock.h>
+
++#define XDP_UMEM_SG_FLAG (1 << 1)
++
+ struct net_device;
+ struct xsk_queue;
+ struct xdp_buff;
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 1f6fc8c7a84c6c..5425f7ad5ebdec 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -147,11 +147,29 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ return ret;
+ }
+
++static inline void xsk_buff_del_tail(struct xdp_buff *tail)
++{
++ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
++
++ list_del(&xskb->xskb_list_node);
++}
++
++static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
++{
++ struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
++ struct xdp_buff_xsk *frag;
++
++ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
++ xskb_list_node);
++ return &frag->xdp;
++}
++
+ static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+ {
+ xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+ xdp->data_meta = xdp->data;
+ xdp->data_end = xdp->data + size;
++ xdp->flags = 0;
+ }
+
+ static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+@@ -309,6 +327,15 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ return NULL;
+ }
+
++static inline void xsk_buff_del_tail(struct xdp_buff *tail)
++{
++}
++
++static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
++{
++ return NULL;
++}
++
+ static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
+ {
+ }
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 363c7d5105542e..b280e7c4601160 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -176,7 +176,10 @@ struct xfrm_state {
+ struct hlist_node gclist;
+ struct hlist_node bydst;
+ };
+- struct hlist_node bysrc;
++ union {
++ struct hlist_node dev_gclist;
++ struct hlist_node bysrc;
++ };
+ struct hlist_node byspi;
+ struct hlist_node byseq;
+
+@@ -1047,6 +1050,9 @@ struct xfrm_offload {
+ #define CRYPTO_INVALID_PACKET_SYNTAX 64
+ #define CRYPTO_INVALID_PROTOCOL 128
+
++ /* Used to keep whole l2 header for transport mode GRO */
++ __u32 orig_mac_len;
++
+ __u8 proto;
+ __u8 inner_ipproto;
+ };
+@@ -1581,7 +1587,7 @@ int xfrm_state_check_expire(struct xfrm_state *x);
+ static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
+ {
+ struct xfrm_dev_offload *xdo = &x->xso;
+- struct net_device *dev = xdo->dev;
++ struct net_device *dev = READ_ONCE(xdo->dev);
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ return;
+@@ -1940,13 +1946,16 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
+ struct xfrm_user_offload *xuo, u8 dir,
+ struct netlink_ext_ack *extack);
+ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
++void xfrm_dev_state_delete(struct xfrm_state *x);
++void xfrm_dev_state_free(struct xfrm_state *x);
+
+ static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+ {
+ struct xfrm_dev_offload *xso = &x->xso;
++ struct net_device *dev = READ_ONCE(xso->dev);
+
+- if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
+- xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
++ if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
++ dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
+ }
+
+ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
+@@ -1967,28 +1976,6 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
+ return false;
+ }
+
+-static inline void xfrm_dev_state_delete(struct xfrm_state *x)
+-{
+- struct xfrm_dev_offload *xso = &x->xso;
+-
+- if (xso->dev)
+- xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
+-}
+-
+-static inline void xfrm_dev_state_free(struct xfrm_state *x)
+-{
+- struct xfrm_dev_offload *xso = &x->xso;
+- struct net_device *dev = xso->dev;
+-
+- if (dev && dev->xfrmdev_ops) {
+- if (dev->xfrmdev_ops->xdo_dev_state_free)
+- dev->xfrmdev_ops->xdo_dev_state_free(x);
+- xso->dev = NULL;
+- xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+- netdev_put(dev, &xso->dev_tracker);
+- }
+-}
+-
+ static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
+ {
+ struct xfrm_dev_offload *xdo = &x->xdo;
+diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
+index 95896472a82bfb..565a850445414d 100644
+--- a/include/rdma/ib_umem.h
++++ b/include/rdma/ib_umem.h
+@@ -77,6 +77,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ {
+ __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
+ umem->sgt_append.sgt.nents, pgsz);
++ biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
++ biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
++}
++
++static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
++{
++ return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
+ }
+
+ /**
+@@ -92,7 +99,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ */
+ #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
+ for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
+- __rdma_block_iter_next(biter);)
++ __rdma_umem_block_iter_next(biter);)
+
+ #ifdef CONFIG_INFINIBAND_USER_MEM
+
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 533ab92684d81e..62f9d126a71ad1 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2846,6 +2846,7 @@ struct ib_block_iter {
+ /* internal states */
+ struct scatterlist *__sg; /* sg holding the current aligned block */
+ dma_addr_t __dma_addr; /* unaligned DMA address of this block */
++ size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */
+ unsigned int __sg_nents; /* number of SG entries */
+ unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
+ unsigned int __pg_bit; /* alignment of current block */
+diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
+index 526def14e7fb75..6ae00983a6126e 100644
+--- a/include/scsi/scsi_cmnd.h
++++ b/include/scsi/scsi_cmnd.h
+@@ -234,7 +234,7 @@ static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+
+ static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
+ {
+- unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
++ unsigned int shift = ilog2(scmd->device->sector_size);
+
+ return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
+ }
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 65e49fae8da7a0..c38f4fe5e64cf4 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -100,10 +100,6 @@ struct scsi_vpd {
+ unsigned char data[];
+ };
+
+-enum scsi_vpd_parameters {
+- SCSI_VPD_HEADER_SIZE = 4,
+-};
+-
+ struct scsi_device {
+ struct Scsi_Host *host;
+ struct request_queue *request_queue;
+@@ -167,19 +163,25 @@ struct scsi_device {
+ * power state for system suspend/resume (suspend to RAM and
+ * hibernation) operations.
+ */
+- bool manage_system_start_stop;
++ unsigned manage_system_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for runtime device suspand and resume operations.
+ */
+- bool manage_runtime_start_stop;
++ unsigned manage_runtime_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system shutdown (power off) operations.
+ */
+- bool manage_shutdown;
++ unsigned manage_shutdown:1;
++
++ /*
++ * If set and if the device is runtime suspended, ask the high-level
++ * device driver (sd) to force a runtime resume of the device.
++ */
++ unsigned force_runtime_start_on_system_start:1;
+
+ unsigned removable:1;
+ unsigned changed:1; /* Data invalid due to media change */
+@@ -202,6 +204,7 @@ struct scsi_device {
+ unsigned use_10_for_rw:1; /* first try 10-byte read / write */
+ unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+ unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
++ unsigned read_before_ms:1; /* perform a READ before MODE SENSE */
+ unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
+ unsigned no_write_same:1; /* no WRITE SAME command */
+ unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
+@@ -213,7 +216,6 @@ struct scsi_device {
+ unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
+ unsigned no_start_on_add:1; /* do not issue start on add */
+ unsigned allow_restart:1; /* issue START_UNIT in error handler */
+- unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */
+ unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
+ unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
+ unsigned select_no_atn:1;
+diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
+index 4ce1988b2ba01c..f40915d2eceef4 100644
+--- a/include/scsi/scsi_driver.h
++++ b/include/scsi/scsi_driver.h
+@@ -12,6 +12,7 @@ struct request;
+ struct scsi_driver {
+ struct device_driver gendrv;
+
++ int (*resume)(struct device *);
+ void (*rescan)(struct device *);
+ blk_status_t (*init_command)(struct scsi_cmnd *);
+ void (*uninit_command)(struct scsi_cmnd *);
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index 4c2dc8150c6d71..f9d5ce6170a706 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -764,6 +764,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
+ #define scsi_template_proc_dir(sht) NULL
+ #endif
+ extern void scsi_scan_host(struct Scsi_Host *);
++extern int scsi_resume_device(struct scsi_device *sdev);
+ extern int scsi_rescan_device(struct scsi_device *sdev);
+ extern void scsi_remove_host(struct Scsi_Host *);
+ extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
+index 0e75b9277c8c6a..e3b6ce3cbf8835 100644
+--- a/include/scsi/scsi_transport_sas.h
++++ b/include/scsi/scsi_transport_sas.h
+@@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *);
+ void sas_disable_tlr(struct scsi_device *);
+ void sas_enable_tlr(struct scsi_device *);
+
++bool sas_ata_ncq_prio_supported(struct scsi_device *sdev);
++
+ extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
+ extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
+ void sas_rphy_free(struct sas_rphy *);
+diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
+index 1e1b40f4e664e3..846132ca5503d7 100644
+--- a/include/soc/mscc/ocelot.h
++++ b/include/soc/mscc/ocelot.h
+@@ -813,6 +813,9 @@ struct ocelot {
+ const u32 *const *map;
+ struct list_head stats_regions;
+
++ spinlock_t inj_lock;
++ spinlock_t xtr_lock;
++
+ u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
+ int packet_buffer_size;
+ int num_frame_refs;
+@@ -966,10 +969,17 @@ void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 val, u32 reg, u32 offset);
+
+ /* Packet I/O */
++void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp);
++void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp);
++void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp);
++void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp);
++void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp);
++void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp);
+ bool ocelot_can_inject(struct ocelot *ocelot, int grp);
+ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
+ u32 rew_op, struct sk_buff *skb);
+-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag);
++void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
++ u32 rew_op, struct sk_buff *skb);
+ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
+ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
+ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
+index c8bb56e6852a8c..47a6cab75e630c 100644
+--- a/include/soc/qcom/cmd-db.h
++++ b/include/soc/qcom/cmd-db.h
+@@ -1,5 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
++/*
++ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
+
+ #ifndef __QCOM_COMMAND_DB_H__
+ #define __QCOM_COMMAND_DB_H__
+@@ -21,6 +24,8 @@ u32 cmd_db_read_addr(const char *resource_id);
+
+ const void *cmd_db_read_aux_data(const char *resource_id, size_t *len);
+
++bool cmd_db_match_resource_addr(u32 addr1, u32 addr2);
++
+ enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id);
+
+ int cmd_db_ready(void);
+@@ -31,6 +36,9 @@ static inline u32 cmd_db_read_addr(const char *resource_id)
+ static inline const void *cmd_db_read_aux_data(const char *resource_id, size_t *len)
+ { return ERR_PTR(-ENODEV); }
+
++static inline bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
++{ return false; }
++
+ static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id)
+ { return -ENODEV; }
+
+diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h
+index c47cc71a999ec9..fdd462b295927d 100644
+--- a/include/soc/qcom/qcom-spmi-pmic.h
++++ b/include/soc/qcom/qcom-spmi-pmic.h
+@@ -48,7 +48,7 @@
+ #define PMK8350_SUBTYPE 0x2f
+ #define PMR735B_SUBTYPE 0x34
+ #define PM6350_SUBTYPE 0x36
+-#define PM2250_SUBTYPE 0x37
++#define PM4125_SUBTYPE 0x37
+
+ #define PMI8998_FAB_ID_SMIC 0x11
+ #define PMI8998_FAB_ID_GF 0x30
+diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
+index 5842e38bb2880d..f5e4ac5b8cce89 100644
+--- a/include/soc/tegra/bpmp.h
++++ b/include/soc/tegra/bpmp.h
+@@ -102,8 +102,12 @@ struct tegra_bpmp {
+ #ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_mirror;
+ #endif
++
++ bool suspended;
+ };
+
++#define TEGRA_BPMP_MESSAGE_RESET BIT(0)
++
+ struct tegra_bpmp_message {
+ unsigned int mrq;
+
+@@ -117,6 +121,8 @@ struct tegra_bpmp_message {
+ size_t size;
+ int ret;
+ } rx;
++
++ unsigned long flags;
+ };
+
+ #if IS_ENABLED(CONFIG_TEGRA_BPMP)
+diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
+index 1bf757901d0246..2fe8c6b0d4cf38 100644
+--- a/include/sound/cs35l41.h
++++ b/include/sound/cs35l41.h
+@@ -11,7 +11,6 @@
+ #define __CS35L41_H
+
+ #include <linux/regmap.h>
+-#include <linux/completion.h>
+ #include <linux/firmware/cirrus/cs_dsp.h>
+
+ #define CS35L41_FIRSTREG 0x00000000
+@@ -902,7 +901,8 @@ int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap);
+ int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
+ struct cs35l41_hw_cfg *hw_cfg);
+ bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
++int cs35l41_mdsync_up(struct regmap *regmap);
+ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
+- int enable, struct completion *pll_lock, bool firmware_running);
++ int enable, bool firmware_running);
+
+ #endif /* __CS35L41_H */
+diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
+index 3950322bf3cbbc..c0f2135968fec1 100644
+--- a/include/sound/cs35l56.h
++++ b/include/sound/cs35l56.h
+@@ -273,6 +273,7 @@ extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
+ extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
+
+ int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
++int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
+ int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
+ int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
+ int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base);
+@@ -286,6 +287,7 @@ int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base);
+ int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire);
+ void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
+ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
++int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
+ int cs35l56_get_bclk_freq_id(unsigned int freq);
+ void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
+
+diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
+index d70c55f17df7ca..94dbb23580f2f1 100644
+--- a/include/sound/dmaengine_pcm.h
++++ b/include/sound/dmaengine_pcm.h
+@@ -36,6 +36,7 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream
+ int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ struct dma_chan *chan);
+ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
++int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream);
+
+ int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ dma_filter_fn filter_fn, void *filter_data);
+diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
+index 1af9e68193920d..9cc10fab01a8ce 100644
+--- a/include/sound/emu10k1.h
++++ b/include/sound/emu10k1.h
+@@ -1684,8 +1684,7 @@ struct snd_emu1010 {
+ unsigned int clock_fallback;
+ unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
+ unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
+- struct work_struct firmware_work;
+- struct work_struct clock_work;
++ struct work_struct work;
+ };
+
+ struct snd_emu10k1 {
+diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
+index e49b97d9e3ff20..845e7608ac375e 100644
+--- a/include/sound/soc-acpi-intel-match.h
++++ b/include/sound/soc-acpi-intel-match.h
+@@ -32,6 +32,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[];
+ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[];
+ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
+ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_machines[];
++extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[];
+
+ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
+ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
+@@ -42,6 +43,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[];
+ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[];
+ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
+ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[];
++extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[];
+
+ /*
+ * generic table used for HDA codec-based platforms, possibly with
+diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
+index 6d31d535e8f6dd..23d6d6bfb07364 100644
+--- a/include/sound/soc-acpi.h
++++ b/include/sound/soc-acpi.h
+@@ -68,6 +68,10 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
+ * @i2s_link_mask: I2S/TDM links enabled on the board
+ * @num_dai_drivers: number of elements in @dai_drivers
+ * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
++ * @subsystem_vendor: optional PCI SSID vendor value
++ * @subsystem_device: optional PCI SSID device value
++ * @subsystem_id_set: true if a value has been written to
++ * subsystem_vendor and subsystem_device.
+ */
+ struct snd_soc_acpi_mach_params {
+ u32 acpi_ipc_irq_index;
+@@ -80,6 +84,9 @@ struct snd_soc_acpi_mach_params {
+ u32 i2s_link_mask;
+ u32 num_dai_drivers;
+ struct snd_soc_dai_driver *dai_drivers;
++ unsigned short subsystem_vendor;
++ unsigned short subsystem_device;
++ bool subsystem_id_set;
+ };
+
+ /**
+diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
+index fc94dfb0021fd3..1f4c39922d8250 100644
+--- a/include/sound/soc-card.h
++++ b/include/sound/soc-card.h
+@@ -30,6 +30,8 @@ static inline void snd_soc_card_mutex_unlock(struct snd_soc_card *card)
+
+ struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+ const char *name);
++struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
++ const char *name);
+ int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
+ struct snd_soc_jack *jack);
+ int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
+@@ -59,6 +61,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card,
+ void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link);
+
++#ifdef CONFIG_PCI
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++ unsigned short vendor,
++ unsigned short device)
++{
++ card->pci_subsystem_vendor = vendor;
++ card->pci_subsystem_device = device;
++ card->pci_subsystem_set = true;
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++ unsigned short *vendor,
++ unsigned short *device)
++{
++ if (!card->pci_subsystem_set)
++ return -ENOENT;
++
++ *vendor = card->pci_subsystem_vendor;
++ *device = card->pci_subsystem_device;
++
++ return 0;
++}
++#else /* !CONFIG_PCI */
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++ unsigned short vendor,
++ unsigned short device)
++{
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++ unsigned short *vendor,
++ unsigned short *device)
++{
++ return -ENOENT;
++}
++#endif /* CONFIG_PCI */
++
+ /* device driver data */
+ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
+ void *data)
+@@ -78,8 +117,8 @@ struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd;
+
+ for_each_card_rtds(card, rtd) {
+- if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name))
+- return asoc_rtd_to_codec(rtd, 0);
++ if (!strcmp(snd_soc_rtd_to_codec(rtd, 0)->name, dai_name))
++ return snd_soc_rtd_to_codec(rtd, 0);
+ }
+
+ return NULL;
+diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
+index 5fcfba47d98cca..adcd8719d3435a 100644
+--- a/include/sound/soc-dai.h
++++ b/include/sound/soc-dai.h
+@@ -370,6 +370,7 @@ struct snd_soc_dai_ops {
+
+ /* bit field */
+ unsigned int no_capture_mute:1;
++ unsigned int mute_unmute_on_trigger:1;
+ };
+
+ struct snd_soc_cdai_ops {
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 37f9d3fe302a60..c1acc46529b9db 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -774,37 +774,42 @@ struct snd_soc_dai_link {
+ #endif
+ };
+
++/* REMOVE ME */
++#define asoc_link_to_cpu snd_soc_link_to_cpu
++#define asoc_link_to_codec snd_soc_link_to_codec
++#define asoc_link_to_platform snd_soc_link_to_platform
++
+ static inline struct snd_soc_dai_link_component*
+-asoc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
++snd_soc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
+ return &(link)->cpus[n];
+ }
+
+ static inline struct snd_soc_dai_link_component*
+-asoc_link_to_codec(struct snd_soc_dai_link *link, int n) {
++snd_soc_link_to_codec(struct snd_soc_dai_link *link, int n) {
+ return &(link)->codecs[n];
+ }
+
+ static inline struct snd_soc_dai_link_component*
+-asoc_link_to_platform(struct snd_soc_dai_link *link, int n) {
++snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) {
+ return &(link)->platforms[n];
+ }
+
+ #define for_each_link_codecs(link, i, codec) \
+ for ((i) = 0; \
+ ((i) < link->num_codecs) && \
+- ((codec) = asoc_link_to_codec(link, i)); \
++ ((codec) = snd_soc_link_to_codec(link, i)); \
+ (i)++)
+
+ #define for_each_link_platforms(link, i, platform) \
+ for ((i) = 0; \
+ ((i) < link->num_platforms) && \
+- ((platform) = asoc_link_to_platform(link, i)); \
++ ((platform) = snd_soc_link_to_platform(link, i)); \
+ (i)++)
+
+ #define for_each_link_cpus(link, i, cpu) \
+ for ((i) = 0; \
+ ((i) < link->num_cpus) && \
+- ((cpu) = asoc_link_to_cpu(link, i)); \
++ ((cpu) = snd_soc_link_to_cpu(link, i)); \
+ (i)++)
+
+ /*
+@@ -894,8 +899,11 @@ asoc_link_to_platform(struct snd_soc_dai_link *link, int n) {
+ #define COMP_CODEC_CONF(_name) { .name = _name }
+ #define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", }
+
++/* REMOVE ME */
++#define asoc_dummy_dlc snd_soc_dummy_dlc
++
+ extern struct snd_soc_dai_link_component null_dailink_component[0];
+-extern struct snd_soc_dai_link_component asoc_dummy_dlc;
++extern struct snd_soc_dai_link_component snd_soc_dummy_dlc;
+
+
+ struct snd_soc_codec_conf {
+@@ -932,6 +940,17 @@ struct snd_soc_card {
+ #ifdef CONFIG_DMI
+ char dmi_longname[80];
+ #endif /* CONFIG_DMI */
++
++#ifdef CONFIG_PCI
++ /*
++ * PCI does not define 0 as invalid, so pci_subsystem_set indicates
++ * whether a value has been written to these fields.
++ */
++ unsigned short pci_subsystem_vendor;
++ unsigned short pci_subsystem_device;
++ bool pci_subsystem_set;
++#endif /* CONFIG_PCI */
++
+ char topology_shortname[32];
+
+ struct device *dev;
+@@ -1102,8 +1121,8 @@ struct snd_soc_pcm_runtime {
+ * dais = cpu_dai + codec_dai
+ * see
+ * soc_new_pcm_runtime()
+- * asoc_rtd_to_cpu()
+- * asoc_rtd_to_codec()
++ * snd_soc_rtd_to_cpu()
++ * snd_soc_rtd_to_codec()
+ */
+ struct snd_soc_dai **dais;
+
+@@ -1131,10 +1150,16 @@ struct snd_soc_pcm_runtime {
+ int num_components;
+ struct snd_soc_component *components[]; /* CPU/Codec/Platform */
+ };
++
++/* REMOVE ME */
++#define asoc_rtd_to_cpu snd_soc_rtd_to_cpu
++#define asoc_rtd_to_codec snd_soc_rtd_to_codec
++#define asoc_substream_to_rtd snd_soc_substream_to_rtd
++
+ /* see soc_new_pcm_runtime() */
+-#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
+-#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
+-#define asoc_substream_to_rtd(substream) \
++#define snd_soc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
++#define snd_soc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
++#define snd_soc_substream_to_rtd(substream) \
+ (struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream)
+
+ #define for_each_rtd_components(rtd, i, component) \
+@@ -1143,11 +1168,11 @@ struct snd_soc_pcm_runtime {
+ (i)++)
+ #define for_each_rtd_cpu_dais(rtd, i, dai) \
+ for ((i) = 0; \
+- ((i) < rtd->dai_link->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \
++ ((i) < rtd->dai_link->num_cpus) && ((dai) = snd_soc_rtd_to_cpu(rtd, i)); \
+ (i)++)
+ #define for_each_rtd_codec_dais(rtd, i, dai) \
+ for ((i) = 0; \
+- ((i) < rtd->dai_link->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \
++ ((i) < rtd->dai_link->num_codecs) && ((dai) = snd_soc_rtd_to_codec(rtd, i)); \
+ (i)++)
+ #define for_each_rtd_dais(rtd, i, dai) \
+ for ((i) = 0; \
+diff --git a/include/sound/sof.h b/include/sound/sof.h
+index d3c41f87ac3191..31121c6df02721 100644
+--- a/include/sound/sof.h
++++ b/include/sound/sof.h
+@@ -52,11 +52,14 @@ enum sof_dsp_power_states {
+
+ /* Definitions for multiple IPCs */
+ enum sof_ipc_type {
+- SOF_IPC,
+- SOF_INTEL_IPC4,
++ SOF_IPC_TYPE_3,
++ SOF_IPC_TYPE_4,
+ SOF_IPC_TYPE_COUNT
+ };
+
++#define SOF_IPC SOF_IPC_TYPE_3
++#define SOF_INTEL_IPC4 SOF_IPC_TYPE_4
++
+ /*
+ * SOF Platform data.
+ */
+@@ -64,6 +67,14 @@ struct snd_sof_pdata {
+ const char *name;
+ const char *platform;
+
++ /*
++ * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set
++ * flag indicates that a value has been written to these members.
++ */
++ unsigned short subsystem_vendor;
++ unsigned short subsystem_device;
++ bool subsystem_id_set;
++
+ struct device *dev;
+
+ /*
+diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h
+index bd1b72bf47a5e4..af3319dab230ac 100644
+--- a/include/sound/tas2781-dsp.h
++++ b/include/sound/tas2781-dsp.h
+@@ -2,7 +2,7 @@
+ //
+ // ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+ //
+-// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
++// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+ // https://www.ti.com
+ //
+ // The TAS2781 driver implements a flexible and configurable
+@@ -13,8 +13,8 @@
+ // Author: Kevin Lu <kevin-lu@ti.com>
+ //
+
+-#ifndef __TASDEVICE_DSP_H__
+-#define __TASDEVICE_DSP_H__
++#ifndef __TAS2781_DSP_H__
++#define __TAS2781_DSP_H__
+
+ #define MAIN_ALL_DEVICES 0x0d
+ #define MAIN_DEVICE_A 0x01
+@@ -112,10 +112,17 @@ struct tasdevice_fw {
+ struct device *dev;
+ };
+
+-enum tasdevice_dsp_fw_state {
+- TASDEVICE_DSP_FW_NONE = 0,
++enum tasdevice_fw_state {
++ /* Driver in startup mode, not load any firmware. */
+ TASDEVICE_DSP_FW_PENDING,
++ /* DSP firmware in the system, but parsing error. */
+ TASDEVICE_DSP_FW_FAIL,
++ /*
++ * Only RCA (Reconfigurable Architecture) firmware load
++ * successfully.
++ */
++ TASDEVICE_RCA_FW_OK,
++ /* Both RCA and DSP firmware load successfully. */
+ TASDEVICE_DSP_FW_ALL_OK,
+ };
+
+@@ -175,7 +182,6 @@ void tasdevice_calbin_remove(void *context);
+ int tasdevice_select_tuningprm_cfg(void *context, int prm,
+ int cfg_no, int rca_conf_no);
+ int tasdevice_prmg_load(void *context, int prm_no);
+-int tasdevice_prmg_calibdata_load(void *context, int prm_no);
+ void tasdevice_tuning_switch(void *context, int state);
+ int tas2781_load_calibration(void *context, char *file_name,
+ unsigned short i);
+diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
+index 4038dd421150a3..1dc59005d241fb 100644
+--- a/include/sound/tas2781-tlv.h
++++ b/include/sound/tas2781-tlv.h
+@@ -15,7 +15,7 @@
+ #ifndef __TAS2781_TLV_H__
+ #define __TAS2781_TLV_H__
+
+-static const DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
++static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
+ static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
+
+ #endif
+diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
+index a6c808b223183a..f97f386e5a55a3 100644
+--- a/include/sound/tas2781.h
++++ b/include/sound/tas2781.h
+@@ -78,11 +78,6 @@ struct tasdevice {
+ bool is_loaderr;
+ };
+
+-struct tasdevice_irqinfo {
+- int irq_gpio;
+- int irq;
+-};
+-
+ struct calidata {
+ unsigned char *data;
+ unsigned long total_sz;
+@@ -90,7 +85,6 @@ struct calidata {
+
+ struct tasdevice_priv {
+ struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS];
+- struct tasdevice_irqinfo irq_info;
+ struct tasdevice_rca rcabin;
+ struct calidata cali_data;
+ struct tasdevice_fw *fmw;
+@@ -101,7 +95,6 @@ struct tasdevice_priv {
+ struct tm tm;
+
+ enum device_catlog_id catlog_id;
+- const char *acpi_subsystem_id;
+ unsigned char cal_binaryname[TASDEVICE_MAX_CHANNELS][64];
+ unsigned char crc8_lkp_tbl[CRC8_TABLE_SIZE];
+ unsigned char coef_binaryname[64];
+@@ -112,6 +105,7 @@ struct tasdevice_priv {
+ unsigned int chip_id;
+ unsigned int sysclk;
+
++ int irq;
+ int cur_prog;
+ int cur_conf;
+ int fw_state;
+@@ -131,14 +125,20 @@ struct tasdevice_priv {
+ const struct firmware *fmw, int offset);
+ int (*tasdevice_load_block)(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block);
++
++ int (*save_calibration)(struct tasdevice_priv *tas_priv);
++ void (*apply_calibration)(struct tasdevice_priv *tas_priv);
+ };
+
+ void tas2781_reset(struct tasdevice_priv *tas_dev);
+ int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
++ struct module *module,
+ void (*cont)(const struct firmware *fw, void *context));
+ struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
+ int tasdevice_init(struct tasdevice_priv *tas_priv);
+ void tasdevice_remove(struct tasdevice_priv *tas_priv);
++int tasdevice_save_calibration(struct tasdevice_priv *tas_priv);
++void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv);
+ int tasdevice_dev_read(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int *value);
+ int tasdevice_dev_write(struct tasdevice_priv *tas_priv,
+diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
+index 28c364c63245d0..d099ae27f8491a 100644
+--- a/include/sound/ump_convert.h
++++ b/include/sound/ump_convert.h
+@@ -13,6 +13,7 @@ struct ump_cvt_to_ump_bank {
+ unsigned char cc_nrpn_msb, cc_nrpn_lsb;
+ unsigned char cc_data_msb, cc_data_lsb;
+ unsigned char cc_bank_msb, cc_bank_lsb;
++ bool cc_data_msb_set, cc_data_lsb_set;
+ };
+
+ /* context for converting from MIDI1 byte stream to UMP packet */
+diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
+index 4dfa6d7f83baa9..cd104a1343e2d6 100644
+--- a/include/trace/events/9p.h
++++ b/include/trace/events/9p.h
+@@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump,
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u16, tag )
+- __array( unsigned char, line, P9_PROTO_DUMP_SZ )
++ __dynamic_array(unsigned char, line,
++ min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = pdu->id;
+ __entry->tag = pdu->tag;
+- memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
++ memcpy(__get_dynamic_array(line), pdu->sdata,
++ __get_dynamic_array_len(line));
+ ),
+- TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
++ TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
+ (unsigned long)__entry->clnt, show_9p_op(__entry->type),
+- __entry->tag, 0, __entry->line, 16, __entry->line + 16)
++ __entry->tag, __get_dynamic_array_len(line),
++ __get_dynamic_array(line))
+ );
+
+
+diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
+index 4d8ef71090af1c..97a434d0213564 100644
+--- a/include/trace/events/asoc.h
++++ b/include/trace/events/asoc.h
+@@ -12,6 +12,8 @@
+ #define DAPM_DIRECT "(direct)"
+ #define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
+
++TRACE_DEFINE_ENUM(SND_SOC_DAPM_DIR_OUT);
++
+ struct snd_soc_jack;
+ struct snd_soc_card;
+ struct snd_soc_dapm_widget;
+diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
+index b2db2c2f1c577d..3c4d5ef6d44636 100644
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -2430,6 +2430,14 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
+ TP_ARGS(fs_info, sinfo, old, diff)
+ );
+
++DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable,
++
++ TP_PROTO(const struct btrfs_fs_info *fs_info,
++ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
++
++ TP_ARGS(fs_info, sinfo, old, diff)
++);
++
+ DECLARE_EVENT_CLASS(btrfs_raid56_bio,
+
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
+diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
+index cf4b98b9a9edc7..7d931db02b9346 100644
+--- a/include/trace/events/cachefiles.h
++++ b/include/trace/events/cachefiles.h
+@@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
+ cachefiles_obj_see_withdrawal,
+ cachefiles_obj_get_ondemand_fd,
+ cachefiles_obj_put_ondemand_fd,
++ cachefiles_obj_get_read_req,
++ cachefiles_obj_put_read_req,
+ };
+
+ enum fscache_why_object_killed {
+@@ -127,7 +129,11 @@ enum cachefiles_error_trace {
+ EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
+ EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
+ EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
+- E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
++ EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \
++ EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \
++ EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \
++ EM(cachefiles_obj_get_read_req, "GET read_req") \
++ E_(cachefiles_obj_put_read_req, "PUT read_req")
+
+ #define cachefiles_coherency_traces \
+ EM(cachefiles_coherency_check_aux, "BAD aux ") \
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index 793f82cc1515a8..b6ffae01a8cd86 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
+ { CP_NODE_NEED_CP, "node needs cp" }, \
+ { CP_FASTBOOT_MODE, "fastboot mode" }, \
+ { CP_SPEC_LOG_NUM, "log type is 2" }, \
+- { CP_RECOVER_DIR, "dir needs recovery" })
++ { CP_RECOVER_DIR, "dir needs recovery" }, \
++ { CP_XATTR_DIR, "dir's xattr updated" })
+
+ #define show_shutdown_mode(type) \
+ __print_symbolic(type, \
+diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
+index a6190aa1b4060f..f1a73aa83fbbfb 100644
+--- a/include/trace/events/fscache.h
++++ b/include/trace/events/fscache.h
+@@ -35,12 +35,14 @@ enum fscache_volume_trace {
+ fscache_volume_get_cookie,
+ fscache_volume_get_create_work,
+ fscache_volume_get_hash_collision,
++ fscache_volume_get_withdraw,
+ fscache_volume_free,
+ fscache_volume_new_acquire,
+ fscache_volume_put_cookie,
+ fscache_volume_put_create_work,
+ fscache_volume_put_hash_collision,
+ fscache_volume_put_relinquish,
++ fscache_volume_put_withdraw,
+ fscache_volume_see_create_work,
+ fscache_volume_see_hash_wake,
+ fscache_volume_wait_create_work,
+@@ -120,12 +122,14 @@ enum fscache_access_trace {
+ EM(fscache_volume_get_cookie, "GET cook ") \
+ EM(fscache_volume_get_create_work, "GET creat") \
+ EM(fscache_volume_get_hash_collision, "GET hcoll") \
++ EM(fscache_volume_get_withdraw, "GET withd") \
+ EM(fscache_volume_free, "FREE ") \
+ EM(fscache_volume_new_acquire, "NEW acq ") \
+ EM(fscache_volume_put_cookie, "PUT cook ") \
+ EM(fscache_volume_put_create_work, "PUT creat") \
+ EM(fscache_volume_put_hash_collision, "PUT hcoll") \
+ EM(fscache_volume_put_relinquish, "PUT relnq") \
++ EM(fscache_volume_put_withdraw, "PUT withd") \
+ EM(fscache_volume_see_create_work, "SEE creat") \
+ EM(fscache_volume_see_hash_wake, "SEE hwake") \
+ E_(fscache_volume_wait_create_work, "WAIT crea")
+diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
+index d7353024016cce..af0af3f1d9b7c0 100644
+--- a/include/trace/events/intel_ifs.h
++++ b/include/trace/events/intel_ifs.h
+@@ -10,25 +10,25 @@
+
+ TRACE_EVENT(ifs_status,
+
+- TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status),
++ TP_PROTO(int cpu, int start, int stop, u64 status),
+
+- TP_ARGS(cpu, activate, status),
++ TP_ARGS(cpu, start, stop, status),
+
+ TP_STRUCT__entry(
+ __field( u64, status )
+ __field( int, cpu )
+- __field( u8, start )
+- __field( u8, stop )
++ __field( u16, start )
++ __field( u16, stop )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+- __entry->start = activate.start;
+- __entry->stop = activate.stop;
+- __entry->status = status.data;
++ __entry->start = start;
++ __entry->stop = stop;
++ __entry->status = status;
+ ),
+
+- TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx",
++ TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx",
+ __entry->cpu,
+ __entry->start,
+ __entry->stop,
+diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
+index 1478b9dd05fae4..e010618f93264f 100644
+--- a/include/trace/events/mmflags.h
++++ b/include/trace/events/mmflags.h
+@@ -135,6 +135,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
+ #define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
+
+ #define __def_pagetype_names \
++ DEF_PAGETYPE_NAME(hugetlb), \
+ DEF_PAGETYPE_NAME(offline), \
+ DEF_PAGETYPE_NAME(guard), \
+ DEF_PAGETYPE_NAME(table), \
+diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
+index 563e48617374d3..54e8fb5a229cdd 100644
+--- a/include/trace/events/mptcp.h
++++ b/include/trace/events/mptcp.h
+@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
+ struct sock *ssk;
+
+ __entry->active = mptcp_subflow_active(subflow);
+- __entry->backup = subflow->backup;
++ __entry->backup = subflow->backup || subflow->request_bkup;
+
+ if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
+ __entry->free = sk_stream_memory_free(subflow->tcp_sock);
+diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
+index a3995925cb0570..061fd496030354 100644
+--- a/include/trace/events/qdisc.h
++++ b/include/trace/events/qdisc.h
+@@ -81,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+- __string( dev, qdisc_dev(q) )
+- __string( kind, q->ops->id )
+- __field( u32, parent )
+- __field( u32, handle )
++ __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" )
++ __string( kind, q->ops->id )
++ __field( u32, parent )
++ __field( u32, handle )
+ ),
+
+ TP_fast_assign(
+- __assign_str(dev, qdisc_dev(q));
++ __assign_str(dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)");
+ __assign_str(kind, q->ops->id);
+ __entry->parent = q->parent;
+ __entry->handle = q->handle;
+@@ -106,14 +106,14 @@ TRACE_EVENT(qdisc_destroy,
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+- __string( dev, qdisc_dev(q) )
+- __string( kind, q->ops->id )
+- __field( u32, parent )
+- __field( u32, handle )
++ __string( dev, qdisc_dev(q)->name )
++ __string( kind, q->ops->id )
++ __field( u32, parent )
++ __field( u32, handle )
+ ),
+
+ TP_fast_assign(
+- __assign_str(dev, qdisc_dev(q));
++ __assign_str(dev, qdisc_dev(q)->name);
+ __assign_str(kind, q->ops->id);
+ __entry->parent = q->parent;
+ __entry->handle = q->handle;
+diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
+index ba2d96a1bc2f94..78704f1209d3e4 100644
+--- a/include/trace/events/rpcgss.h
++++ b/include/trace/events/rpcgss.h
+@@ -54,7 +54,7 @@ TRACE_DEFINE_ENUM(GSS_S_UNSEQ_TOKEN);
+ TRACE_DEFINE_ENUM(GSS_S_GAP_TOKEN);
+
+ #define show_gss_status(x) \
+- __print_flags(x, "|", \
++ __print_symbolic(x, \
+ { GSS_S_BAD_MECH, "GSS_S_BAD_MECH" }, \
+ { GSS_S_BAD_NAME, "GSS_S_BAD_NAME" }, \
+ { GSS_S_BAD_NAMETYPE, "GSS_S_BAD_NAMETYPE" }, \
+@@ -609,7 +609,7 @@ TRACE_EVENT(rpcgss_context,
+ __field(unsigned int, timeout)
+ __field(u32, window_size)
+ __field(int, len)
+- __string(acceptor, data)
++ __string_len(acceptor, data, len)
+ ),
+
+ TP_fast_assign(
+@@ -618,7 +618,7 @@ TRACE_EVENT(rpcgss_context,
+ __entry->timeout = timeout;
+ __entry->window_size = window_size;
+ __entry->len = len;
+- strncpy(__get_str(acceptor), data, len);
++ __assign_str(acceptor, data);
+ ),
+
+ TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index 4c53a5ef6257b8..3322fb93a260be 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -83,7 +83,7 @@
+ EM(rxrpc_badmsg_bad_abort, "bad-abort") \
+ EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \
+ EM(rxrpc_badmsg_short_ack, "short-ack") \
+- EM(rxrpc_badmsg_short_ack_info, "short-ack-info") \
++ EM(rxrpc_badmsg_short_ack_trailer, "short-ack-trailer") \
+ EM(rxrpc_badmsg_short_hdr, "short-hdr") \
+ EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \
+ EM(rxrpc_badmsg_zero_call, "zero-call") \
+@@ -128,6 +128,7 @@
+ EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
+ EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
+ EM(rxrpc_skb_get_conn_work, "GET conn-work") \
++ EM(rxrpc_skb_get_last_nack, "GET last-nack") \
+ EM(rxrpc_skb_get_local_work, "GET locl-work") \
+ EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
+ EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
+@@ -141,6 +142,7 @@
+ EM(rxrpc_skb_put_error_report, "PUT error-rep") \
+ EM(rxrpc_skb_put_input, "PUT input ") \
+ EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
++ EM(rxrpc_skb_put_last_nack, "PUT last-nack") \
+ EM(rxrpc_skb_put_purge, "PUT purge ") \
+ EM(rxrpc_skb_put_rotate, "PUT rotate ") \
+ EM(rxrpc_skb_put_unknown, "PUT unknown ") \
+@@ -328,7 +330,7 @@
+ E_(rxrpc_rtt_tx_ping, "PING")
+
+ #define rxrpc_rtt_rx_traces \
+- EM(rxrpc_rtt_rx_cancel, "CNCL") \
++ EM(rxrpc_rtt_rx_other_ack, "OACK") \
+ EM(rxrpc_rtt_rx_obsolete, "OBSL") \
+ EM(rxrpc_rtt_rx_lost, "LOST") \
+ EM(rxrpc_rtt_rx_ping_response, "PONG") \
+@@ -1549,7 +1551,7 @@ TRACE_EVENT(rxrpc_congest,
+ memcpy(&__entry->sum, summary, sizeof(__entry->sum));
+ ),
+
+- TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
++ TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s",
+ __entry->call,
+ __entry->ack_serial,
+ __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
+@@ -1557,9 +1559,9 @@ TRACE_EVENT(rxrpc_congest,
+ __print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
+ __entry->sum.cwnd,
+ __entry->sum.ssthresh,
+- __entry->sum.nr_acks, __entry->sum.saw_nacks,
++ __entry->sum.nr_acks, __entry->sum.nr_retained_nacks,
+ __entry->sum.nr_new_acks,
+- __entry->sum.nr_rot_new_acks,
++ __entry->sum.nr_new_nacks,
+ __entry->top - __entry->hard_ack,
+ __entry->sum.cumulative_acks,
+ __entry->sum.dup_acks,
+diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
+index fbb99a61f714cb..010ba1b7cb0eac 100644
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -664,6 +664,58 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
+ TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
+ );
+
++#ifdef CONFIG_NUMA_BALANCING
++#define NUMAB_SKIP_REASON \
++ EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \
++ EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \
++ EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \
++ EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \
++ EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \
++ EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \
++ EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" )
++
++/* Redefine for export. */
++#undef EM
++#undef EMe
++#define EM(a, b) TRACE_DEFINE_ENUM(a);
++#define EMe(a, b) TRACE_DEFINE_ENUM(a);
++
++NUMAB_SKIP_REASON
++
++/* Redefine for symbolic printing. */
++#undef EM
++#undef EMe
++#define EM(a, b) { a, b },
++#define EMe(a, b) { a, b }
++
++TRACE_EVENT(sched_skip_vma_numa,
++
++ TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma,
++ enum numa_vmaskip_reason reason),
++
++ TP_ARGS(mm, vma, reason),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, numa_scan_offset)
++ __field(unsigned long, vm_start)
++ __field(unsigned long, vm_end)
++ __field(enum numa_vmaskip_reason, reason)
++ ),
++
++ TP_fast_assign(
++ __entry->numa_scan_offset = mm->numa_scan_offset;
++ __entry->vm_start = vma->vm_start;
++ __entry->vm_end = vma->vm_end;
++ __entry->reason = reason;
++ ),
++
++ TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s",
++ __entry->numa_scan_offset,
++ __entry->vm_start,
++ __entry->vm_end,
++ __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
++);
++#endif /* CONFIG_NUMA_BALANCING */
+
+ /*
+ * Tracepoint for waking a polling cpu without an IPI.
+diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
+index abe087c53b4b04..05c412c5823995 100644
+--- a/include/uapi/asm-generic/unistd.h
++++ b/include/uapi/asm-generic/unistd.h
+@@ -737,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
+ #define __NR_ppoll_time64 414
+ __SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
+ #define __NR_io_pgetevents_time64 416
+-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
++__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
+ #define __NR_recvmmsg_time64 417
+ __SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
+ #define __NR_mq_timedsend_time64 418
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
+index 8db7fd3f743e4f..5eed091d4c291f 100644
+--- a/include/uapi/drm/drm_fourcc.h
++++ b/include/uapi/drm/drm_fourcc.h
+@@ -1474,6 +1474,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+ #define AMD_FMT_MOD_TILE_VER_GFX10 2
+ #define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+ #define AMD_FMT_MOD_TILE_VER_GFX11 4
++#define AMD_FMT_MOD_TILE_VER_GFX12 5
+
+ /*
+ * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
+@@ -1484,6 +1485,8 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+ /*
+ * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
+ * GFX9 as canonical version.
++ *
++ * 64K_D_2D on GFX12 is identical to 64K_D on GFX11.
+ */
+ #define AMD_FMT_MOD_TILE_GFX9_64K_D 10
+ #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
+@@ -1491,6 +1494,21 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+ #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+ #define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+
++/* Gfx12 swizzle modes:
++ * 0 - LINEAR
++ * 1 - 256B_2D - 2D block dimensions
++ * 2 - 4KB_2D
++ * 3 - 64KB_2D
++ * 4 - 256KB_2D
++ * 5 - 4KB_3D - 3D block dimensions
++ * 6 - 64KB_3D
++ * 7 - 256KB_3D
++ */
++#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1
++#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2
++#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3
++#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4
++
+ #define AMD_FMT_MOD_DCC_BLOCK_64B 0
+ #define AMD_FMT_MOD_DCC_BLOCK_128B 1
+ #define AMD_FMT_MOD_DCC_BLOCK_256B 2
+diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
+index 0bade1592f34f2..c3d8dc7512971f 100644
+--- a/include/uapi/drm/nouveau_drm.h
++++ b/include/uapi/drm/nouveau_drm.h
+@@ -54,6 +54,27 @@ extern "C" {
+ */
+ #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
+
++/*
++ * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
++ *
++ * Query the VRAM BAR size.
++ */
++#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
++
++/*
++ * NOUVEAU_GETPARAM_VRAM_USED
++ *
++ * Get remaining VRAM size.
++ */
++#define NOUVEAU_GETPARAM_VRAM_USED 19
++
++/*
++ * NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
++ *
++ * Query whether tile mode and PTE kind are accepted with VM allocs or not.
++ */
++#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
++
+ struct drm_nouveau_getparam {
+ __u64 param;
+ __u64 value;
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 0448700890f77d..ba6e346c8d669a 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -77,12 +77,29 @@ struct bpf_insn {
+ __s32 imm; /* signed immediate constant */
+ };
+
+-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
++/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
++ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
++ * the trailing flexible array member) instead.
++ */
+ struct bpf_lpm_trie_key {
+ __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
+ __u8 data[0]; /* Arbitrary size */
+ };
+
++/* Header for bpf_lpm_trie_key structs */
++struct bpf_lpm_trie_key_hdr {
++ __u32 prefixlen;
++};
++
++/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
++struct bpf_lpm_trie_key_u8 {
++ union {
++ struct bpf_lpm_trie_key_hdr hdr;
++ __u32 prefixlen;
++ };
++ __u8 data[]; /* Arbitrary size */
++};
++
+ struct bpf_cgroup_storage_key {
+ __u64 cgroup_inode_id; /* cgroup inode id */
+ __u32 attach_type; /* program attach type (enum bpf_attach_type) */
+@@ -3257,6 +3274,11 @@ union bpf_attr {
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
++ * **BPF_FIB_LOOKUP_SRC**
++ * Derive and set source IP addr in *params*->ipv{4,6}_src
++ * for the nexthop. If the src addr cannot be derived,
++ * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
++ * case, *params*->dmac and *params*->smac are not set either.
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+@@ -4490,6 +4512,8 @@ union bpf_attr {
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ * Description
+ * Return a user or a kernel stack in bpf program provided buffer.
++ * Note: the user stack will only be populated if the *task* is
++ * the current task; all other tasks will return -EOPNOTSUPP.
+ * To achieve this, the helper needs *task*, which is a valid
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
+@@ -4501,6 +4525,7 @@ union bpf_attr {
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
++ * The *task* must be the current task.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+@@ -6953,6 +6978,7 @@ enum {
+ BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ BPF_FIB_LOOKUP_TBID = (1U << 3),
++ BPF_FIB_LOOKUP_SRC = (1U << 4),
+ };
+
+ enum {
+@@ -6965,6 +6991,7 @@ enum {
+ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
+ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
+ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
++ BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
+ };
+
+ struct bpf_fib_lookup {
+@@ -6984,7 +7011,7 @@ struct bpf_fib_lookup {
+
+ /* output: MTU value */
+ __u16 mtu_result;
+- };
++ } __attribute__((packed, aligned(2)));
+ /* input: L3 device index for lookup
+ * output: device index from FIB lookup
+ */
+@@ -6999,6 +7026,9 @@ struct bpf_fib_lookup {
+ __u32 rt_metric;
+ };
+
++ /* input: source address to consider for lookup
++ * output: source address result from lookup
++ */
+ union {
+ __be32 ipv4_src;
+ __u32 ipv6_src[4]; /* in6_addr; network order */
+diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
+index dbb8b96da50d30..6f776faaa791c1 100644
+--- a/include/uapi/linux/btrfs.h
++++ b/include/uapi/linux/btrfs.h
+@@ -612,6 +612,9 @@ struct btrfs_ioctl_clone_range_args {
+ */
+ #define BTRFS_DEFRAG_RANGE_COMPRESS 1
+ #define BTRFS_DEFRAG_RANGE_START_IO 2
++#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
++ BTRFS_DEFRAG_RANGE_START_IO)
++
+ struct btrfs_ioctl_defrag_range_args {
+ /* start of the defrag operation */
+ __u64 start;
+diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
+index b8e071abaea5ac..3eba3934512e60 100644
+--- a/include/uapi/linux/cec.h
++++ b/include/uapi/linux/cec.h
+@@ -132,6 +132,8 @@ static inline void cec_msg_init(struct cec_msg *msg,
+ * Set the msg destination to the orig initiator and the msg initiator to the
+ * orig destination. Note that msg and orig may be the same pointer, in which
+ * case the change is done in place.
++ *
++ * It also zeroes the reply, timeout and flags fields.
+ */
+ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
+ struct cec_msg *orig)
+@@ -139,7 +141,9 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
+ /* The destination becomes the initiator and vice versa */
+ msg->msg[0] = (cec_msg_destination(orig) << 4) |
+ cec_msg_initiator(orig);
+- msg->reply = msg->timeout = 0;
++ msg->reply = 0;
++ msg->timeout = 0;
++ msg->flags = 0;
+ }
+
+ /**
+diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
+index f2afb7cc4926cd..18e3745b86cd48 100644
+--- a/include/uapi/linux/cn_proc.h
++++ b/include/uapi/linux/cn_proc.h
+@@ -69,8 +69,7 @@ struct proc_input {
+
+ static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type)
+ {
+- ev_type &= PROC_EVENT_ALL;
+- return ev_type;
++ return (enum proc_cn_event)(ev_type & PROC_EVENT_ALL);
+ }
+
+ /*
+diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
+index 6c80f96049bd07..282e90aeb163c0 100644
+--- a/include/uapi/linux/fcntl.h
++++ b/include/uapi/linux/fcntl.h
+@@ -116,5 +116,8 @@
+ #define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
+ compare object identity and may not
+ be usable to open_by_handle_at(2) */
++#if defined(__KERNEL__)
++#define AT_GETATTR_NOSEC 0x80000000
++#endif
+
+ #endif /* _UAPI_LINUX_FCNTL_H */
+diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
+index db92a7202b342b..e7418d15fe3906 100644
+--- a/include/uapi/linux/fuse.h
++++ b/include/uapi/linux/fuse.h
+@@ -209,7 +209,7 @@
+ * - add FUSE_HAS_EXPIRE_ONLY
+ *
+ * 7.39
+- * - add FUSE_DIRECT_IO_RELAX
++ * - add FUSE_DIRECT_IO_ALLOW_MMAP
+ * - add FUSE_STATX and related structures
+ */
+
+@@ -409,8 +409,7 @@ struct fuse_file_lock {
+ * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
+ * symlink and mknod (single group that matches parent)
+ * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+- * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now
+- * allow shared mmap
++ * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode.
+ */
+ #define FUSE_ASYNC_READ (1 << 0)
+ #define FUSE_POSIX_LOCKS (1 << 1)
+@@ -449,7 +448,10 @@ struct fuse_file_lock {
+ #define FUSE_HAS_INODE_DAX (1ULL << 33)
+ #define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
+ #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
+-#define FUSE_DIRECT_IO_RELAX (1ULL << 36)
++#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36)
++
++/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
++#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
+
+ /**
+ * CUSE INIT request/reply flags
+diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
+index c4c53a9ab9595b..ff8d21f9e95b77 100644
+--- a/include/uapi/linux/in6.h
++++ b/include/uapi/linux/in6.h
+@@ -145,7 +145,7 @@ struct in6_flowlabel_req {
+ #define IPV6_TLV_PADN 1
+ #define IPV6_TLV_ROUTERALERT 5
+ #define IPV6_TLV_CALIPSO 7 /* RFC 5570 */
+-#define IPV6_TLV_IOAM 49 /* TEMPORARY IANA allocation for IOAM */
++#define IPV6_TLV_IOAM 49 /* RFC 9486 */
+ #define IPV6_TLV_JUMBO 194
+ #define IPV6_TLV_HAO 201 /* home address option */
+
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index 022a520e31fc23..a4206723f50333 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -602,6 +602,7 @@
+
+ #define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
+ #define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
++#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */
+
+ #define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
+ #define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
+@@ -617,6 +618,8 @@
+ #define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
+ #define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
+ #define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
++#define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */
++#define KEY_DO_NOT_DISTURB 0x24f /* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/
+
+ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
+ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+index eeb2fdcbdcb708..cd924c959d7327 100644
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -909,14 +909,25 @@ enum kfd_dbg_trap_exception_code {
+ KFD_EC_MASK(EC_DEVICE_NEW))
+ #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
+ KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
++#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
++ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
++ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
++ KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
++ KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
++ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
++ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
++ KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
+
+ /* Checks for exception code types for KFD search */
++#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
+ #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
+- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
++ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
+ #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
+- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
++ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
+ #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
+- (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
++ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
++#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \
++ (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
+
+
+ /* Runtime enable states */
+diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
+index ca30232b7bc8af..9c29015d09c10f 100644
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -285,9 +285,11 @@ enum nft_rule_attributes {
+ /**
+ * enum nft_rule_compat_flags - nf_tables rule compat flags
+ *
++ * @NFT_RULE_COMPAT_F_UNUSED: unused
+ * @NFT_RULE_COMPAT_F_INV: invert the check result
+ */
+ enum nft_rule_compat_flags {
++ NFT_RULE_COMPAT_F_UNUSED = (1 << 0),
+ NFT_RULE_COMPAT_F_INV = (1 << 1),
+ NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV,
+ };
+@@ -1370,7 +1372,7 @@ enum nft_secmark_attributes {
+ #define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1)
+
+ /* Max security context length */
+-#define NFT_SECMARK_CTX_MAXLEN 256
++#define NFT_SECMARK_CTX_MAXLEN 4096
+
+ /**
+ * enum nft_reject_types - nf_tables reject expression reject types
+@@ -1688,7 +1690,7 @@ enum nft_flowtable_flags {
+ *
+ * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
+ * @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING)
+- * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
++ * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration (NLA_NESTED)
+ * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
+ * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
+ * @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index e5f558d9649396..ade8dabf621085 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -1045,6 +1045,7 @@
+ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
+ #define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
+ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
++#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */
+
+ #define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
+
+diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
+index 3c36aeade991e9..370ed14b1ae092 100644
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -283,7 +283,8 @@ struct prctl_mm_map {
+
+ /* Memory deny write / execute */
+ #define PR_SET_MDWE 65
+-# define PR_MDWE_REFUSE_EXEC_GAIN 1
++# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
++# define PR_MDWE_NO_INHERIT (1UL << 1)
+
+ #define PR_GET_MDWE 66
+
+diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
+index 26f33a4c253d75..b2b72886cb6d1d 100644
+--- a/include/uapi/linux/snmp.h
++++ b/include/uapi/linux/snmp.h
+@@ -24,7 +24,7 @@ enum
+ IPSTATS_MIB_INOCTETS, /* InOctets */
+ IPSTATS_MIB_INDELIVERS, /* InDelivers */
+ IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */
+- IPSTATS_MIB_OUTPKTS, /* OutRequests */
++ IPSTATS_MIB_OUTREQUESTS, /* OutRequests */
+ IPSTATS_MIB_OUTOCTETS, /* OutOctets */
+ /* other fields */
+ IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */
+@@ -57,6 +57,7 @@ enum
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
+ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
++ IPSTATS_MIB_OUTPKTS, /* OutTransmits */
+ __IPSTATS_MIB_MAX
+ };
+
+diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
+index 5c6c4269f7efe4..2ec6f35cda32e9 100644
+--- a/include/uapi/linux/stddef.h
++++ b/include/uapi/linux/stddef.h
+@@ -27,7 +27,7 @@
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+- }
++ } ATTRS
+
+ #ifdef __cplusplus
+ /* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
+index 879eeb0a084b4d..d1d08da6331ab5 100644
+--- a/include/uapi/linux/tcp.h
++++ b/include/uapi/linux/tcp.h
+@@ -289,6 +289,18 @@ struct tcp_info {
+ */
+
+ __u32 tcpi_rehash; /* PLB or timeout triggered rehash attempts */
++
++ __u16 tcpi_total_rto; /* Total number of RTO timeouts, including
++ * SYN/SYN-ACK and recurring timeouts.
++ */
++ __u16 tcpi_total_rto_recoveries; /* Total number of RTO
++ * recoveries, including any
++ * unfinished recovery.
++ */
++ __u32 tcpi_total_rto_time; /* Total time spent in RTO recoveries
++ * in milliseconds, including any
++ * unfinished recovery.
++ */
+ };
+
+ /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
+diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
+index b9cfc5c9626823..3830b428ecf19d 100644
+--- a/include/uapi/linux/ublk_cmd.h
++++ b/include/uapi/linux/ublk_cmd.h
+@@ -173,7 +173,13 @@
+ /* use ioctl encoding for uring command */
+ #define UBLK_F_CMD_IOCTL_ENCODE (1UL << 6)
+
+-/* Copy between request and user buffer by pread()/pwrite() */
++/*
++ * Copy between request and user buffer by pread()/pwrite()
++ *
++ * Not available for UBLK_F_UNPRIVILEGED_DEV, otherwise userspace may
++ * deceive us by not filling request buffer, then kernel uninitialized
++ * data may be leaked.
++ */
+ #define UBLK_F_USER_COPY (1UL << 7)
+
+ /*
+diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h
+index 2984aae4a2b4fb..f74f3aedd49ce9 100644
+--- a/include/uapi/linux/user_events.h
++++ b/include/uapi/linux/user_events.h
+@@ -17,6 +17,15 @@
+ /* Create dynamic location entry within a 32-bit value */
+ #define DYN_LOC(offset, size) ((size) << 16 | (offset))
+
++/* List of supported registration flags */
++enum user_reg_flag {
++ /* Event will not delete upon last reference closing */
++ USER_EVENT_REG_PERSIST = 1U << 0,
++
++ /* This value or above is currently non-ABI */
++ USER_EVENT_REG_MAX = 1U << 1,
++};
++
+ /*
+ * Describes an event registration and stores the results of the registration.
+ * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum
+@@ -33,7 +42,7 @@ struct user_reg {
+ /* Input: Enable size in bytes at address */
+ __u8 enable_size;
+
+- /* Input: Flags for future use, set to 0 */
++ /* Input: Flags to use, if any */
+ __u16 flags;
+
+ /* Input: Address to update when enabled */
+diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
+index 4a195b68f28f6d..b383c2fe0cf354 100644
+--- a/include/uapi/linux/v4l2-subdev.h
++++ b/include/uapi/linux/v4l2-subdev.h
+@@ -239,7 +239,7 @@ struct v4l2_subdev_routing {
+ * set (which is the default), the 'stream' fields will be forced to 0 by the
+ * kernel.
+ */
+- #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0)
++ #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+
+ /**
+ * struct v4l2_subdev_client_capability - Capabilities of the client accessing
+diff --git a/include/uapi/linux/virtio_bt.h b/include/uapi/linux/virtio_bt.h
+index af798f4c968042..3cc7d633456b6b 100644
+--- a/include/uapi/linux/virtio_bt.h
++++ b/include/uapi/linux/virtio_bt.h
+@@ -13,7 +13,6 @@
+
+ enum virtio_bt_config_type {
+ VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0,
+- VIRTIO_BT_CONFIG_TYPE_AMP = 1,
+ };
+
+ enum virtio_bt_config_vendor {
+diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
+index c60ca33eac594b..ed07181d4eff91 100644
+--- a/include/uapi/linux/vm_sockets.h
++++ b/include/uapi/linux/vm_sockets.h
+@@ -191,4 +191,21 @@ struct sockaddr_vm {
+
+ #define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
+
++/* MSG_ZEROCOPY notifications are encoded in the standard error format,
++ * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in
++ * kernel source tree for more details.
++ */
++
++/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define SOL_VSOCK 287
++
++/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define VSOCK_RECVERR 1
++
+ #endif /* _UAPI_VM_SOCKETS_H */
+diff --git a/include/uapi/linux/zorro_ids.h b/include/uapi/linux/zorro_ids.h
+index 6e574d7b7d79cd..393f2ee9c04228 100644
+--- a/include/uapi/linux/zorro_ids.h
++++ b/include/uapi/linux/zorro_ids.h
+@@ -449,6 +449,9 @@
+ #define ZORRO_PROD_VMC_ISDN_BLASTER_Z2 ZORRO_ID(VMC, 0x01, 0)
+ #define ZORRO_PROD_VMC_HYPERCOM_4 ZORRO_ID(VMC, 0x02, 0)
+
++#define ZORRO_MANUF_CSLAB 0x1400
++#define ZORRO_PROD_CSLAB_WARP_1260 ZORRO_ID(CSLAB, 0x65, 0)
++
+ #define ZORRO_MANUF_INFORMATION 0x157C
+ #define ZORRO_PROD_INFORMATION_ISDN_ENGINE_I ZORRO_ID(INFORMATION, 0x64, 0)
+
+diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
+index 6e7c67a0cca3a8..3342276aeac137 100644
+--- a/include/uapi/rdma/bnxt_re-abi.h
++++ b/include/uapi/rdma/bnxt_re-abi.h
+@@ -54,6 +54,8 @@ enum {
+ BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
+ BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
+ BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
++ BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL,
++ BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40,
+ };
+
+ enum bnxt_re_wqe_mode {
+@@ -62,6 +64,14 @@ enum bnxt_re_wqe_mode {
+ BNXT_QPLIB_WQE_MODE_INVALID = 0x02,
+ };
+
++enum {
++ BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
++};
++
++struct bnxt_re_uctx_req {
++ __aligned_u64 comp_mask;
++};
++
+ struct bnxt_re_uctx_resp {
+ __u32 dev_id;
+ __u32 max_qp;
+diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
+index 907d345f04f93b..353183e863e47a 100644
+--- a/include/uapi/scsi/scsi_bsg_mpi3mr.h
++++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
+@@ -382,7 +382,7 @@ struct mpi3mr_bsg_in_reply_buf {
+ __u8 mpi_reply_type;
+ __u8 rsvd1;
+ __u16 rsvd2;
+- __u8 reply_buf[1];
++ __u8 reply_buf[];
+ };
+
+ /**
+diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
+index 375718ba4ab620..e145bca5105c59 100644
+--- a/include/uapi/xen/privcmd.h
++++ b/include/uapi/xen/privcmd.h
+@@ -102,7 +102,7 @@ struct privcmd_mmap_resource {
+ #define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0)
+
+ struct privcmd_irqfd {
+- void __user *dm_op;
++ __u64 dm_op;
+ __u32 size; /* Size of structure pointed by dm_op */
+ __u32 fd;
+ __u32 flags;
+@@ -138,6 +138,6 @@ struct privcmd_irqfd {
+ #define IOCTL_PRIVCMD_MMAP_RESOURCE \
+ _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
+ #define IOCTL_PRIVCMD_IRQFD \
+- _IOC(_IOC_NONE, 'P', 8, sizeof(struct privcmd_irqfd))
++ _IOW('P', 8, struct privcmd_irqfd)
+
+ #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 7d07b256e906b9..2a7d6f269d9e30 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -1064,6 +1064,7 @@ struct ufs_hba {
+ bool ext_iid_sup;
+ bool scsi_host_added;
+ bool mcq_sup;
++ bool lsdb_sup;
+ bool mcq_enabled;
+ struct ufshcd_res_info res[RES_MAX];
+ void __iomem *mcq_base;
+@@ -1117,6 +1118,12 @@ static inline bool is_mcq_enabled(struct ufs_hba *hba)
+ return hba->mcq_enabled;
+ }
+
++static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
++ enum ufshcd_mcq_opr opr, int idx)
++{
++ return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx;
++}
++
+ #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+ static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
+ {
+diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
+index d5accacae6bca7..ae93b30d25893e 100644
+--- a/include/ufs/ufshci.h
++++ b/include/ufs/ufshci.h
+@@ -75,6 +75,7 @@ enum {
+ MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
+ MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
+ MASK_CRYPTO_SUPPORT = 0x10000000,
++ MASK_LSDB_SUPPORT = 0x20000000,
+ MASK_MCQ_SUPPORT = 0x40000000,
+ };
+
+diff --git a/include/video/sticore.h b/include/video/sticore.h
+index 945ad60463a189..012b5b46ad7d0b 100644
+--- a/include/video/sticore.h
++++ b/include/video/sticore.h
+@@ -232,7 +232,7 @@ struct sti_rom_font {
+ u8 height;
+ u8 font_type; /* language type */
+ u8 bytes_per_char;
+- u32 next_font;
++ s32 next_font; /* note: signed int */
+ u8 underline_height;
+ u8 underline_pos;
+ u8 res008[2];
+diff --git a/include/xen/events.h b/include/xen/events.h
+index 23932b0673dc74..7488cd51fbf4f2 100644
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -101,8 +101,8 @@ void xen_poll_irq_timeout(int irq, u64 timeout);
+
+ /* Determine the IRQ which is bound to an event channel */
+ unsigned int irq_from_evtchn(evtchn_port_t evtchn);
+-int irq_from_virq(unsigned int cpu, unsigned int virq);
+-evtchn_port_t evtchn_from_irq(unsigned irq);
++int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
++ evtchn_port_t *evtchn);
+
+ int xen_set_callback_via(uint64_t via);
+ int xen_evtchn_do_upcall(void);
+diff --git a/init/Kconfig b/init/Kconfig
+index 6d35728b94b2b3..6054ba684c5396 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -89,6 +89,15 @@ config CC_HAS_ASM_GOTO_TIED_OUTPUT
+ # Detect buggy gcc and clang, fixed in gcc-11 clang-14.
+ def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
+
++config GCC_ASM_GOTO_OUTPUT_WORKAROUND
++ bool
++ depends on CC_IS_GCC && CC_HAS_ASM_GOTO_OUTPUT
++ # Fixed in GCC 14, 13.3, 12.4 and 11.5
++ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
++ default y if GCC_VERSION < 110500
++ default y if GCC_VERSION >= 120000 && GCC_VERSION < 120400
++ default y if GCC_VERSION >= 130000 && GCC_VERSION < 130300
++
+ config TOOLS_SUPPORT_RELR
+ def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
+
+@@ -867,14 +876,14 @@ config CC_IMPLICIT_FALLTHROUGH
+ default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
+ default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
+
+-# Currently, disable gcc-11+ array-bounds globally.
++# Currently, disable gcc-10+ array-bounds globally.
+ # It's still broken in gcc-13, so no upper bound yet.
+-config GCC11_NO_ARRAY_BOUNDS
++config GCC10_NO_ARRAY_BOUNDS
+ def_bool y
+
+ config CC_NO_ARRAY_BOUNDS
+ bool
+- default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
++ default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS
+
+ #
+ # For architectures that know their GCC __int128 support is sound
+@@ -1885,11 +1894,12 @@ config RUST
+ bool "Rust support"
+ depends on HAVE_RUST
+ depends on RUST_IS_AVAILABLE
++ depends on !CFI_CLANG
+ depends on !MODVERSIONS
+ depends on !GCC_PLUGINS
+ depends on !RANDSTRUCT
++ depends on !SHADOW_CALL_STACK
+ depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
+- select CONSTRUCTORS
+ help
+ Enables Rust support in the kernel.
+
+@@ -1906,12 +1916,15 @@ config RUST
+ config RUSTC_VERSION_TEXT
+ string
+ depends on RUST
+- default $(shell,command -v $(RUSTC) >/dev/null 2>&1 && $(RUSTC) --version || echo n)
++ default "$(shell,$(RUSTC) --version 2>/dev/null)"
+
+ config BINDGEN_VERSION_TEXT
+ string
+ depends on RUST
+- default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version || echo n)
++ # The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
++ # (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
++ # the minimum version is upgraded past that (0.69.1 already fixed the issue).
++ default "$(shell,$(BINDGEN) --version workaround-for-0.69.0 2>/dev/null)"
+
+ #
+ # Place an empty function call at each tracepoint site. Can be
+diff --git a/init/Makefile b/init/Makefile
+index ec557ada3c12ef..cbac576c57d63f 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -60,4 +60,5 @@ include/generated/utsversion.h: FORCE
+ $(obj)/version-timestamp.o: include/generated/utsversion.h
+ CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
+ KASAN_SANITIZE_version-timestamp.o := n
++KCSAN_SANITIZE_version-timestamp.o := n
+ GCOV_PROFILE_version-timestamp.o := n
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index 5dfd30b13f4857..21d065a55ad884 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -510,7 +510,10 @@ struct file_system_type rootfs_fs_type = {
+
+ void __init init_rootfs(void)
+ {
+- if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
+- (!root_fs_names || strstr(root_fs_names, "tmpfs")))
+- is_tmpfs = true;
++ if (IS_ENABLED(CONFIG_TMPFS)) {
++ if (!saved_root_name[0] && !root_fs_names)
++ is_tmpfs = true;
++ else if (root_fs_names && !!strstr(root_fs_names, "tmpfs"))
++ is_tmpfs = true;
++ }
+ }
+diff --git a/init/initramfs.c b/init/initramfs.c
+index 8d0fd946cdd2b3..efc477b905a482 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -673,7 +673,7 @@ static void __init populate_initrd_image(char *err)
+
+ printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
+ err);
+- file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
++ file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);
+ if (IS_ERR(file))
+ return;
+
+diff --git a/init/main.c b/init/main.c
+index 436d73261810bd..c787e94cc8982b 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -88,6 +88,7 @@
+ #include <linux/sched/task_stack.h>
+ #include <linux/context_tracking.h>
+ #include <linux/random.h>
++#include <linux/moduleloader.h>
+ #include <linux/list.h>
+ #include <linux/integrity.h>
+ #include <linux/proc_ns.h>
+@@ -530,6 +531,10 @@ static int __init unknown_bootoption(char *param, char *val,
+ {
+ size_t len = strlen(param);
+
++ /* Handle params aliased to sysctls */
++ if (sysctl_is_alias(param))
++ return 0;
++
+ repair_env_string(param, val);
+
+ /* Handle obsolete-style parameters */
+@@ -599,7 +604,6 @@ static int __init rdinit_setup(char *str)
+ __setup("rdinit=", rdinit_setup);
+
+ #ifndef CONFIG_SMP
+-static const unsigned int setup_max_cpus = NR_CPUS;
+ static inline void setup_nr_cpu_ids(void) { }
+ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
+ #endif
+@@ -625,6 +629,8 @@ static void __init setup_command_line(char *command_line)
+ if (!saved_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
+
++ len = xlen + strlen(command_line) + 1;
++
+ static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!static_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+@@ -1398,11 +1404,11 @@ static void mark_readonly(void)
+ if (rodata_enabled) {
+ /*
+ * load_module() results in W+X mappings, which are cleaned
+- * up with call_rcu(). Let's make sure that queued work is
++ * up with init_free_wq. Let's make sure that queued work is
+ * flushed so that we don't hit false positives looking for
+ * insecure pages which are W+X.
+ */
+- rcu_barrier();
++ flush_module_init_free_work();
+ mark_rodata_ro();
+ rodata_test();
+ } else
+diff --git a/io_uring/cancel.c b/io_uring/cancel.c
+index 7b23607cf4afd9..a5d51471feebb1 100644
+--- a/io_uring/cancel.c
++++ b/io_uring/cancel.c
+@@ -263,7 +263,7 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
+ };
+ ktime_t timeout = KTIME_MAX;
+ struct io_uring_sync_cancel_reg sc;
+- struct fd f = { };
++ struct file *file = NULL;
+ DEFINE_WAIT(wait);
+ int ret, i;
+
+@@ -285,10 +285,10 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
+ /* we can grab a normal file descriptor upfront */
+ if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
+ !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
+- f = fdget(sc.fd);
+- if (!f.file)
++ file = fget(sc.fd);
++ if (!file)
+ return -EBADF;
+- cd.file = f.file;
++ cd.file = file;
+ }
+
+ ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
+@@ -338,6 +338,7 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
+ if (ret == -ENOENT || ret > 0)
+ ret = 0;
+ out:
+- fdput(f);
++ if (file)
++ fput(file);
+ return ret;
+ }
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index f04a43044d917c..976e9500f6518c 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -145,13 +145,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
+ if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
+ struct io_sq_data *sq = ctx->sq_data;
+
+- if (mutex_trylock(&sq->lock)) {
+- if (sq->thread) {
+- sq_pid = task_pid_nr(sq->thread);
+- sq_cpu = task_cpu(sq->thread);
+- }
+- mutex_unlock(&sq->lock);
+- }
++ sq_pid = sq->task_pid;
++ sq_cpu = sq->sq_cpu;
+ }
+
+ seq_printf(m, "SqThread:\t%d\n", sq_pid);
+diff --git a/io_uring/filetable.c b/io_uring/filetable.c
+index e7d749991de426..6e86e6188dbeeb 100644
+--- a/io_uring/filetable.c
++++ b/io_uring/filetable.c
+@@ -87,13 +87,10 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
+ io_file_bitmap_clear(&ctx->file_table, slot_index);
+ }
+
+- ret = io_scm_file_account(ctx, file);
+- if (!ret) {
+- *io_get_tag_slot(ctx->file_data, slot_index) = 0;
+- io_fixed_file_set(file_slot, file);
+- io_file_bitmap_set(&ctx->file_table, slot_index);
+- }
+- return ret;
++ *io_get_tag_slot(ctx->file_data, slot_index) = 0;
++ io_fixed_file_set(file_slot, file);
++ io_file_bitmap_set(&ctx->file_table, slot_index);
++ return 0;
+ }
+
+ int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file,
+diff --git a/io_uring/fs.c b/io_uring/fs.c
+index 08e3b175469c68..eccea851dd5a28 100644
+--- a/io_uring/fs.c
++++ b/io_uring/fs.c
+@@ -254,7 +254,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ lnk->flags = READ_ONCE(sqe->hardlink_flags);
+
+- lnk->oldpath = getname(oldf);
++ lnk->oldpath = getname_uflags(oldf, lnk->flags);
+ if (IS_ERR(lnk->oldpath))
+ return PTR_ERR(lnk->oldpath);
+
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 522196dfb0ff5a..a1e31723c9ed69 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -13,6 +13,7 @@
+ #include <linux/slab.h>
+ #include <linux/rculist_nulls.h>
+ #include <linux/cpu.h>
++#include <linux/cpuset.h>
+ #include <linux/task_work.h>
+ #include <linux/audit.h>
+ #include <linux/mmu_context.h>
+@@ -23,12 +24,13 @@
+ #include "io_uring.h"
+
+ #define WORKER_IDLE_TIMEOUT (5 * HZ)
++#define WORKER_INIT_LIMIT 3
+
+ enum {
+- IO_WORKER_F_UP = 1, /* up and active */
+- IO_WORKER_F_RUNNING = 2, /* account as running */
+- IO_WORKER_F_FREE = 4, /* worker on free list */
+- IO_WORKER_F_BOUND = 8, /* is doing bounded work */
++ IO_WORKER_F_UP = 0, /* up and active */
++ IO_WORKER_F_RUNNING = 1, /* account as running */
++ IO_WORKER_F_FREE = 2, /* worker on free list */
++ IO_WORKER_F_BOUND = 3, /* is doing bounded work */
+ };
+
+ enum {
+@@ -44,7 +46,8 @@ enum {
+ */
+ struct io_worker {
+ refcount_t ref;
+- unsigned flags;
++ int create_index;
++ unsigned long flags;
+ struct hlist_nulls_node nulls_node;
+ struct list_head all_list;
+ struct task_struct *task;
+@@ -58,7 +61,7 @@ struct io_worker {
+
+ unsigned long create_state;
+ struct callback_head create_work;
+- int create_index;
++ int init_retries;
+
+ union {
+ struct rcu_head rcu;
+@@ -165,7 +168,7 @@ static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
+
+ static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
+ {
+- return io_get_acct(worker->wq, worker->flags & IO_WORKER_F_BOUND);
++ return io_get_acct(worker->wq, test_bit(IO_WORKER_F_BOUND, &worker->flags));
+ }
+
+ static void io_worker_ref_put(struct io_wq *wq)
+@@ -225,7 +228,7 @@ static void io_worker_exit(struct io_worker *worker)
+ wait_for_completion(&worker->ref_done);
+
+ raw_spin_lock(&wq->lock);
+- if (worker->flags & IO_WORKER_F_FREE)
++ if (test_bit(IO_WORKER_F_FREE, &worker->flags))
+ hlist_nulls_del_rcu(&worker->nulls_node);
+ list_del_rcu(&worker->all_list);
+ raw_spin_unlock(&wq->lock);
+@@ -410,7 +413,7 @@ static void io_wq_dec_running(struct io_worker *worker)
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
+ struct io_wq *wq = worker->wq;
+
+- if (!(worker->flags & IO_WORKER_F_UP))
++ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
+ return;
+
+ if (!atomic_dec_and_test(&acct->nr_running))
+@@ -430,8 +433,8 @@ static void io_wq_dec_running(struct io_worker *worker)
+ */
+ static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
+ {
+- if (worker->flags & IO_WORKER_F_FREE) {
+- worker->flags &= ~IO_WORKER_F_FREE;
++ if (test_bit(IO_WORKER_F_FREE, &worker->flags)) {
++ clear_bit(IO_WORKER_F_FREE, &worker->flags);
+ raw_spin_lock(&wq->lock);
+ hlist_nulls_del_init_rcu(&worker->nulls_node);
+ raw_spin_unlock(&wq->lock);
+@@ -444,8 +447,8 @@ static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
+ static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
+ __must_hold(wq->lock)
+ {
+- if (!(worker->flags & IO_WORKER_F_FREE)) {
+- worker->flags |= IO_WORKER_F_FREE;
++ if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) {
++ set_bit(IO_WORKER_F_FREE, &worker->flags);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
+ }
+ }
+@@ -564,10 +567,7 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
+ * clear the stalled flag.
+ */
+ work = io_get_next_work(acct, worker);
+- raw_spin_unlock(&acct->lock);
+ if (work) {
+- __io_worker_busy(wq, worker);
+-
+ /*
+ * Make sure cancelation can find this, even before
+ * it becomes the active work. That avoids a window
+@@ -578,9 +578,15 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
+ raw_spin_lock(&worker->lock);
+ worker->next_work = work;
+ raw_spin_unlock(&worker->lock);
+- } else {
+- break;
+ }
++
++ raw_spin_unlock(&acct->lock);
++
++ if (!work)
++ break;
++
++ __io_worker_busy(wq, worker);
++
+ io_assign_current_work(worker, work);
+ __set_current_state(TASK_RUNNING);
+
+@@ -631,7 +637,8 @@ static int io_wq_worker(void *data)
+ bool exit_mask = false, last_timeout = false;
+ char buf[TASK_COMM_LEN];
+
+- worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
++ set_mask_bits(&worker->flags, 0,
++ BIT(IO_WORKER_F_UP) | BIT(IO_WORKER_F_RUNNING));
+
+ snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
+ set_task_comm(current, buf);
+@@ -695,11 +702,11 @@ void io_wq_worker_running(struct task_struct *tsk)
+
+ if (!worker)
+ return;
+- if (!(worker->flags & IO_WORKER_F_UP))
++ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
+ return;
+- if (worker->flags & IO_WORKER_F_RUNNING)
++ if (test_bit(IO_WORKER_F_RUNNING, &worker->flags))
+ return;
+- worker->flags |= IO_WORKER_F_RUNNING;
++ set_bit(IO_WORKER_F_RUNNING, &worker->flags);
+ io_wq_inc_running(worker);
+ }
+
+@@ -713,12 +720,12 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
+
+ if (!worker)
+ return;
+- if (!(worker->flags & IO_WORKER_F_UP))
++ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
+ return;
+- if (!(worker->flags & IO_WORKER_F_RUNNING))
++ if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags))
+ return;
+
+- worker->flags &= ~IO_WORKER_F_RUNNING;
++ clear_bit(IO_WORKER_F_RUNNING, &worker->flags);
+ io_wq_dec_running(worker);
+ }
+
+@@ -732,7 +739,7 @@ static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker,
+ raw_spin_lock(&wq->lock);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
+ list_add_tail_rcu(&worker->all_list, &wq->all_list);
+- worker->flags |= IO_WORKER_F_FREE;
++ set_bit(IO_WORKER_F_FREE, &worker->flags);
+ raw_spin_unlock(&wq->lock);
+ wake_up_new_task(tsk);
+ }
+@@ -742,7 +749,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
+ return true;
+ }
+
+-static inline bool io_should_retry_thread(long err)
++static inline bool io_should_retry_thread(struct io_worker *worker, long err)
+ {
+ /*
+ * Prevent perpetual task_work retry, if the task (or its group) is
+@@ -750,6 +757,8 @@ static inline bool io_should_retry_thread(long err)
+ */
+ if (fatal_signal_pending(current))
+ return false;
++ if (worker->init_retries++ >= WORKER_INIT_LIMIT)
++ return false;
+
+ switch (err) {
+ case -EAGAIN:
+@@ -776,7 +785,7 @@ static void create_worker_cont(struct callback_head *cb)
+ io_init_new_worker(wq, worker, tsk);
+ io_worker_release(worker);
+ return;
+- } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
++ } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
+
+ atomic_dec(&acct->nr_running);
+@@ -838,12 +847,12 @@ static bool create_io_worker(struct io_wq *wq, int index)
+ init_completion(&worker->ref_done);
+
+ if (index == IO_WQ_ACCT_BOUND)
+- worker->flags |= IO_WORKER_F_BOUND;
++ set_bit(IO_WORKER_F_BOUND, &worker->flags);
+
+ tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
+ if (!IS_ERR(tsk)) {
+ io_init_new_worker(wq, worker, tsk);
+- } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
++ } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
+ kfree(worker);
+ goto fail;
+ } else {
+@@ -924,8 +933,12 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
+ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
+ {
+ struct io_wq_acct *acct = io_work_get_acct(wq, work);
+- struct io_cb_cancel_data match;
+- unsigned work_flags = work->flags;
++ unsigned long work_flags = work->flags;
++ struct io_cb_cancel_data match = {
++ .fn = io_wq_work_match_item,
++ .data = work,
++ .cancel_all = false,
++ };
+ bool do_create;
+
+ /*
+@@ -963,10 +976,6 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
+ raw_spin_unlock(&wq->lock);
+
+ /* fatal condition, failed to create the first worker */
+- match.fn = io_wq_work_match_item,
+- match.data = work,
+- match.cancel_all = false,
+-
+ io_acct_cancel_pending_work(wq, acct, &match);
+ }
+ }
+@@ -1161,7 +1170,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+
+ if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL))
+ goto err;
+- cpumask_copy(wq->cpu_mask, cpu_possible_mask);
++ cpuset_cpus_allowed(data->task, wq->cpu_mask);
+ wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
+ wq->acct[IO_WQ_ACCT_UNBOUND].max_workers =
+ task_rlimit(current, RLIMIT_NPROC);
+@@ -1316,17 +1325,29 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
+
+ int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
+ {
++ cpumask_var_t allowed_mask;
++ int ret = 0;
++
+ if (!tctx || !tctx->io_wq)
+ return -EINVAL;
+
++ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
++ return -ENOMEM;
++
+ rcu_read_lock();
+- if (mask)
+- cpumask_copy(tctx->io_wq->cpu_mask, mask);
+- else
+- cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
++ cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask);
++ if (mask) {
++ if (cpumask_subset(mask, allowed_mask))
++ cpumask_copy(tctx->io_wq->cpu_mask, mask);
++ else
++ ret = -EINVAL;
++ } else {
++ cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask);
++ }
+ rcu_read_unlock();
+
+- return 0;
++ free_cpumask_var(allowed_mask);
++ return ret;
+ }
+
+ /*
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 8d1bc6cdfe712e..39d8d1fc5c2bc0 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -60,7 +60,6 @@
+ #include <linux/net.h>
+ #include <net/sock.h>
+ #include <net/af_unix.h>
+-#include <net/scm.h>
+ #include <linux/anon_inodes.h>
+ #include <linux/sched/mm.h>
+ #include <linux/uaccess.h>
+@@ -149,6 +148,7 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ static void io_queue_sqe(struct io_kiocb *req);
+
+ struct kmem_cache *req_cachep;
++static struct workqueue_struct *iou_wq __ro_after_init;
+
+ static int __read_mostly sysctl_io_uring_disabled;
+ static int __read_mostly sysctl_io_uring_group = -1;
+@@ -175,19 +175,6 @@ static struct ctl_table kernel_io_uring_disabled_table[] = {
+ };
+ #endif
+
+-struct sock *io_uring_get_socket(struct file *file)
+-{
+-#if defined(CONFIG_UNIX)
+- if (io_is_uring_fops(file)) {
+- struct io_ring_ctx *ctx = file->private_data;
+-
+- return ctx->ring_sock->sk;
+- }
+-#endif
+- return NULL;
+-}
+-EXPORT_SYMBOL(io_uring_get_socket);
+-
+ static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
+ {
+ if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
+@@ -269,6 +256,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
+ struct io_kiocb *req, *tmp;
+ struct io_tw_state ts = { .locked = true, };
+
++ percpu_ref_get(&ctx->refs);
+ mutex_lock(&ctx->uring_lock);
+ llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
+ req->io_task_work.func(req, &ts);
+@@ -276,6 +264,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
+ return;
+ io_submit_flush_completions(ctx);
+ mutex_unlock(&ctx->uring_lock);
++ percpu_ref_put(&ctx->refs);
+ }
+
+ static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
+@@ -323,6 +312,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
+ INIT_LIST_HEAD(&ctx->sqd_list);
+ INIT_LIST_HEAD(&ctx->cq_overflow_list);
+ INIT_LIST_HEAD(&ctx->io_buffers_cache);
++ INIT_HLIST_HEAD(&ctx->io_buf_list);
+ io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
+ sizeof(struct io_rsrc_node));
+ io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
+@@ -354,7 +344,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
+ err:
+ kfree(ctx->cancel_table.hbs);
+ kfree(ctx->cancel_table_locked.hbs);
+- kfree(ctx->io_bl);
+ xa_destroy(&ctx->io_bl_xa);
+ kfree(ctx);
+ return NULL;
+@@ -712,6 +701,21 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
+ memcpy(cqe, &ocqe->cqe, cqe_size);
+ list_del(&ocqe->list);
+ kfree(ocqe);
++
++ /*
++ * For silly syzbot cases that deliberately overflow by huge
++ * amounts, check if we need to resched and drop and
++ * reacquire the locks if so. Nothing real would ever hit this.
++ * Ideally we'd have a non-posting unlock for this, but hard
++ * to care for a non-real case.
++ */
++ if (need_resched()) {
++ io_cq_unlock_post(ctx);
++ mutex_unlock(&ctx->uring_lock);
++ cond_resched();
++ mutex_lock(&ctx->uring_lock);
++ io_cq_lock(ctx);
++ }
+ }
+
+ if (list_empty(&ctx->cq_overflow_list)) {
+@@ -1178,12 +1182,11 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
+
+ static unsigned int handle_tw_list(struct llist_node *node,
+ struct io_ring_ctx **ctx,
+- struct io_tw_state *ts,
+- struct llist_node *last)
++ struct io_tw_state *ts)
+ {
+ unsigned int count = 0;
+
+- while (node && node != last) {
++ do {
+ struct llist_node *next = node->next;
+ struct io_kiocb *req = container_of(node, struct io_kiocb,
+ io_task_work.node);
+@@ -1207,7 +1210,7 @@ static unsigned int handle_tw_list(struct llist_node *node,
+ *ctx = NULL;
+ cond_resched();
+ }
+- }
++ } while (node);
+
+ return count;
+ }
+@@ -1226,22 +1229,6 @@ static inline struct llist_node *io_llist_xchg(struct llist_head *head,
+ return xchg(&head->first, new);
+ }
+
+-/**
+- * io_llist_cmpxchg - possibly swap all entries in a lock-less list
+- * @head: the head of lock-less list to delete all entries
+- * @old: expected old value of the first entry of the list
+- * @new: new entry as the head of the list
+- *
+- * perform a cmpxchg on the first entry of the list.
+- */
+-
+-static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
+- struct llist_node *old,
+- struct llist_node *new)
+-{
+- return cmpxchg(&head->first, old, new);
+-}
+-
+ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+ {
+ struct llist_node *node = llist_del_all(&tctx->task_list);
+@@ -1276,9 +1263,7 @@ void tctx_task_work(struct callback_head *cb)
+ struct io_ring_ctx *ctx = NULL;
+ struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
+ task_work);
+- struct llist_node fake = {};
+ struct llist_node *node;
+- unsigned int loops = 0;
+ unsigned int count = 0;
+
+ if (unlikely(current->flags & PF_EXITING)) {
+@@ -1286,21 +1271,9 @@ void tctx_task_work(struct callback_head *cb)
+ return;
+ }
+
+- do {
+- loops++;
+- node = io_llist_xchg(&tctx->task_list, &fake);
+- count += handle_tw_list(node, &ctx, &ts, &fake);
+-
+- /* skip expensive cmpxchg if there are items in the list */
+- if (READ_ONCE(tctx->task_list.first) != &fake)
+- continue;
+- if (ts.locked && !wq_list_empty(&ctx->submit_state.compl_reqs)) {
+- io_submit_flush_completions(ctx);
+- if (READ_ONCE(tctx->task_list.first) != &fake)
+- continue;
+- }
+- node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
+- } while (node != &fake);
++ node = llist_del_all(&tctx->task_list);
++ if (node)
++ count = handle_tw_list(node, &ctx, &ts);
+
+ ctx_flush_and_put(ctx, &ts);
+
+@@ -1308,7 +1281,7 @@ void tctx_task_work(struct callback_head *cb)
+ if (unlikely(atomic_read(&tctx->in_cancel)))
+ io_uring_drop_tctx_refs(current);
+
+- trace_io_uring_task_work_run(tctx, count, loops);
++ trace_io_uring_task_work_run(tctx, count, 1);
+ }
+
+ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
+@@ -1336,7 +1309,7 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
+ nr_tw = nr_tw_prev + 1;
+ /* Large enough to fail the nr_wait comparison below */
+ if (!(flags & IOU_F_TWQ_LAZY_WAKE))
+- nr_tw = -1U;
++ nr_tw = INT_MAX;
+
+ req->nr_tw = nr_tw;
+ req->io_task_work.node.next = first;
+@@ -1405,7 +1378,20 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
+ }
+ }
+
+-static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts)
++static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
++ int min_events)
++{
++ if (llist_empty(&ctx->work_llist))
++ return false;
++ if (events < min_events)
++ return true;
++ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
++ atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
++ return false;
++}
++
++static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
++ int min_events)
+ {
+ struct llist_node *node;
+ unsigned int loops = 0;
+@@ -1434,18 +1420,20 @@ static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts)
+ }
+ loops++;
+
+- if (!llist_empty(&ctx->work_llist))
++ if (io_run_local_work_continue(ctx, ret, min_events))
+ goto again;
+ if (ts->locked) {
+ io_submit_flush_completions(ctx);
+- if (!llist_empty(&ctx->work_llist))
++ if (io_run_local_work_continue(ctx, ret, min_events))
+ goto again;
+ }
++
+ trace_io_uring_local_work_run(ctx, ret, loops);
+ return ret;
+ }
+
+-static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
++static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
++ int min_events)
+ {
+ struct io_tw_state ts = { .locked = true, };
+ int ret;
+@@ -1453,20 +1441,20 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
+ if (llist_empty(&ctx->work_llist))
+ return 0;
+
+- ret = __io_run_local_work(ctx, &ts);
++ ret = __io_run_local_work(ctx, &ts, min_events);
+ /* shouldn't happen! */
+ if (WARN_ON_ONCE(!ts.locked))
+ mutex_lock(&ctx->uring_lock);
+ return ret;
+ }
+
+-static int io_run_local_work(struct io_ring_ctx *ctx)
++static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
+ {
+ struct io_tw_state ts = {};
+ int ret;
+
+ ts.locked = mutex_trylock(&ctx->uring_lock);
+- ret = __io_run_local_work(ctx, &ts);
++ ret = __io_run_local_work(ctx, &ts, min_events);
+ if (ts.locked)
+ mutex_unlock(&ctx->uring_lock);
+
+@@ -1662,7 +1650,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
+ io_task_work_pending(ctx)) {
+ u32 tail = ctx->cached_cq_tail;
+
+- (void) io_run_local_work_locked(ctx);
++ (void) io_run_local_work_locked(ctx, min);
+
+ if (task_work_pending(current) ||
+ wq_list_empty(&ctx->iopoll_list)) {
+@@ -1888,7 +1876,11 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ io_req_complete_defer(req);
+ else
+ io_req_complete_post(req, issue_flags);
+- } else if (ret != IOU_ISSUE_SKIP_COMPLETE)
++
++ return 0;
++ }
++
++ if (ret != IOU_ISSUE_SKIP_COMPLETE)
+ return ret;
+
+ /* If the op doesn't have a file, we're not polling for it */
+@@ -2140,6 +2132,13 @@ static void io_init_req_drain(struct io_kiocb *req)
+ }
+ }
+
++static __cold int io_init_fail_req(struct io_kiocb *req, int err)
++{
++ /* ensure per-opcode data is cleared if we fail before prep */
++ memset(&req->cmd.data, 0, sizeof(req->cmd.data));
++ return err;
++}
++
+ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+ __must_hold(&ctx->uring_lock)
+@@ -2160,29 +2159,29 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+
+ if (unlikely(opcode >= IORING_OP_LAST)) {
+ req->opcode = 0;
+- return -EINVAL;
++ return io_init_fail_req(req, -EINVAL);
+ }
+ def = &io_issue_defs[opcode];
+ if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
+ /* enforce forwards compatibility on users */
+ if (sqe_flags & ~SQE_VALID_FLAGS)
+- return -EINVAL;
++ return io_init_fail_req(req, -EINVAL);
+ if (sqe_flags & IOSQE_BUFFER_SELECT) {
+ if (!def->buffer_select)
+- return -EOPNOTSUPP;
++ return io_init_fail_req(req, -EOPNOTSUPP);
+ req->buf_index = READ_ONCE(sqe->buf_group);
+ }
+ if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
+ ctx->drain_disabled = true;
+ if (sqe_flags & IOSQE_IO_DRAIN) {
+ if (ctx->drain_disabled)
+- return -EOPNOTSUPP;
++ return io_init_fail_req(req, -EOPNOTSUPP);
+ io_init_req_drain(req);
+ }
+ }
+ if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
+ if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
+- return -EACCES;
++ return io_init_fail_req(req, -EACCES);
+ /* knock it to the slow queue path, will be drained there */
+ if (ctx->drain_active)
+ req->flags |= REQ_F_FORCE_ASYNC;
+@@ -2195,9 +2194,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ }
+
+ if (!def->ioprio && sqe->ioprio)
+- return -EINVAL;
++ return io_init_fail_req(req, -EINVAL);
+ if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
+- return -EINVAL;
++ return io_init_fail_req(req, -EINVAL);
+
+ if (def->needs_file) {
+ struct io_submit_state *state = &ctx->submit_state;
+@@ -2221,12 +2220,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+
+ req->creds = xa_load(&ctx->personalities, personality);
+ if (!req->creds)
+- return -EINVAL;
++ return io_init_fail_req(req, -EINVAL);
+ get_cred(req->creds);
+ ret = security_uring_override_creds(req->creds);
+ if (ret) {
+ put_cred(req->creds);
+- return ret;
++ return io_init_fail_req(req, ret);
+ }
+ req->flags |= REQ_F_CREDS;
+ }
+@@ -2501,7 +2500,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
+ {
+ if (!llist_empty(&ctx->work_llist)) {
+ __set_current_state(TASK_RUNNING);
+- if (io_run_local_work(ctx) > 0)
++ if (io_run_local_work(ctx, INT_MAX) > 0)
+ return 0;
+ }
+ if (io_run_task_work() > 0)
+@@ -2524,13 +2523,13 @@ static bool current_pending_io(void)
+ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq)
+ {
+- int io_wait, ret;
++ int ret;
+
+ if (unlikely(READ_ONCE(ctx->check_cq)))
+ return 1;
+ if (unlikely(!llist_empty(&ctx->work_llist)))
+ return 1;
+- if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL)))
++ if (unlikely(task_work_pending(current)))
+ return 1;
+ if (unlikely(task_sigpending(current)))
+ return -EINTR;
+@@ -2542,7 +2541,6 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ * can take into account that the task is waiting for IO - turns out
+ * to be important for low QD IO.
+ */
+- io_wait = current->in_iowait;
+ if (current_pending_io())
+ current->in_iowait = 1;
+ ret = 0;
+@@ -2550,7 +2548,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ schedule();
+ else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
+ ret = -ETIME;
+- current->in_iowait = io_wait;
++ current->in_iowait = 0;
+ return ret;
+ }
+
+@@ -2569,26 +2567,13 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ if (!io_allowed_run_tw(ctx))
+ return -EEXIST;
+ if (!llist_empty(&ctx->work_llist))
+- io_run_local_work(ctx);
++ io_run_local_work(ctx, min_events);
+ io_run_task_work();
+ io_cqring_overflow_flush(ctx);
+ /* if user messes with these they will just get an early return */
+ if (__io_cqring_events_user(ctx) >= min_events)
+ return 0;
+
+- if (sig) {
+-#ifdef CONFIG_COMPAT
+- if (in_compat_syscall())
+- ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+- sigsz);
+- else
+-#endif
+- ret = set_user_sigmask(sig, sigsz);
+-
+- if (ret)
+- return ret;
+- }
+-
+ init_waitqueue_func_entry(&iowq.wq, io_wake_function);
+ iowq.wq.private = current;
+ INIT_LIST_HEAD(&iowq.wq.entry);
+@@ -2605,13 +2590,25 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+ }
+
++ if (sig) {
++#ifdef CONFIG_COMPAT
++ if (in_compat_syscall())
++ ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
++ sigsz);
++ else
++#endif
++ ret = set_user_sigmask(sig, sigsz);
++
++ if (ret)
++ return ret;
++ }
++
+ trace_io_uring_cqring_wait(ctx, min_events);
+ do {
++ int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
+ unsigned long check_cq;
+
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+- int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
+-
+ atomic_set(&ctx->cq_wait_nr, nr_wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ } else {
+@@ -2623,16 +2620,26 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ __set_current_state(TASK_RUNNING);
+ atomic_set(&ctx->cq_wait_nr, 0);
+
+- if (ret < 0)
+- break;
+ /*
+ * Run task_work after scheduling and before io_should_wake().
+ * If we got woken because of task_work being processed, run it
+ * now rather than let the caller do another wait loop.
+ */
+- io_run_task_work();
+ if (!llist_empty(&ctx->work_llist))
+- io_run_local_work(ctx);
++ io_run_local_work(ctx, nr_wait);
++ io_run_task_work();
++
++ /*
++ * Non-local task_work will be run on exit to userspace, but
++ * if we're using DEFER_TASKRUN, then we could have waited
++ * with a timeout for a number of requests. If the timeout
++ * hits, we could have some requests ready to process. Ensure
++ * this break is _after_ we have run task_work, to avoid
++ * deferring running potentially pending requests until the
++ * next time we wait for events.
++ */
++ if (ret < 0)
++ break;
+
+ check_cq = READ_ONCE(ctx->check_cq);
+ if (unlikely(check_cq)) {
+@@ -2659,7 +2666,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
+ }
+
+-static void io_mem_free(void *ptr)
++void io_mem_free(void *ptr)
+ {
+ if (!ptr)
+ return;
+@@ -2690,7 +2697,8 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
+ {
+ struct page **page_array;
+ unsigned int nr_pages;
+- int ret, i;
++ void *page_addr;
++ int ret, i, pinned;
+
+ *npages = 0;
+
+@@ -2704,39 +2712,45 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
+ if (!page_array)
+ return ERR_PTR(-ENOMEM);
+
+- ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+- page_array);
+- if (ret != nr_pages) {
+-err:
+- io_pages_free(&page_array, ret > 0 ? ret : 0);
+- return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT);
++
++ pinned = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
++ page_array);
++ if (pinned != nr_pages) {
++ ret = (pinned < 0) ? pinned : -EFAULT;
++ goto free_pages;
+ }
+- /*
+- * Should be a single page. If the ring is small enough that we can
+- * use a normal page, that is fine. If we need multiple pages, then
+- * userspace should use a huge page. That's the only way to guarantee
+- * that we get contigious memory, outside of just being lucky or
+- * (currently) having low memory fragmentation.
+- */
+- if (page_array[0] != page_array[ret - 1])
+- goto err;
+
+- /*
+- * Can't support mapping user allocated ring memory on 32-bit archs
+- * where it could potentially reside in highmem. Just fail those with
+- * -EINVAL, just like we did on kernels that didn't support this
+- * feature.
+- */
++ page_addr = page_address(page_array[0]);
+ for (i = 0; i < nr_pages; i++) {
+- if (PageHighMem(page_array[i])) {
+- ret = -EINVAL;
+- goto err;
+- }
++ ret = -EINVAL;
++
++ /*
++ * Can't support mapping user allocated ring memory on 32-bit
++ * archs where it could potentially reside in highmem. Just
++ * fail those with -EINVAL, just like we did on kernels that
++ * didn't support this feature.
++ */
++ if (PageHighMem(page_array[i]))
++ goto free_pages;
++
++ /*
++ * No support for discontig pages for now, should either be a
++ * single normal page, or a huge page. Later on we can add
++ * support for remapping discontig pages, for now we will
++ * just fail them with EINVAL.
++ */
++ if (page_address(page_array[i]) != page_addr)
++ goto free_pages;
++ page_addr += PAGE_SIZE;
+ }
+
+ *pages = page_array;
+ *npages = nr_pages;
+ return page_to_virt(page_array[0]);
++
++free_pages:
++ io_pages_free(&page_array, pinned > 0 ? pinned : 0);
++ return ERR_PTR(ret);
+ }
+
+ static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
+@@ -2758,17 +2772,18 @@ static void io_rings_free(struct io_ring_ctx *ctx)
+ if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
+ io_mem_free(ctx->rings);
+ io_mem_free(ctx->sq_sqes);
+- ctx->rings = NULL;
+- ctx->sq_sqes = NULL;
+ } else {
+ io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
+ ctx->n_ring_pages = 0;
+ io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
+ ctx->n_sqe_pages = 0;
+ }
++
++ ctx->rings = NULL;
++ ctx->sq_sqes = NULL;
+ }
+
+-static void *io_mem_alloc(size_t size)
++void *io_mem_alloc(size_t size)
+ {
+ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
+ void *ret;
+@@ -2924,13 +2939,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ io_rsrc_node_destroy(ctx, ctx->rsrc_node);
+
+ WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
+-
+-#if defined(CONFIG_UNIX)
+- if (ctx->ring_sock) {
+- ctx->ring_sock->file = NULL; /* so that iput() is called */
+- sock_release(ctx->ring_sock);
+- }
+-#endif
+ WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
+
+ io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free);
+@@ -2939,6 +2947,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ ctx->mm_account = NULL;
+ }
+ io_rings_free(ctx);
++ io_kbuf_mmap_list_free(ctx);
+
+ percpu_ref_exit(&ctx->refs);
+ free_uid(ctx->user);
+@@ -2947,7 +2956,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ io_wq_put_hash(ctx->hash_map);
+ kfree(ctx->cancel_table.hbs);
+ kfree(ctx->cancel_table_locked.hbs);
+- kfree(ctx->io_bl);
+ xa_destroy(&ctx->io_bl_xa);
+ kfree(ctx);
+ }
+@@ -3133,12 +3141,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
+ init_completion(&exit.completion);
+ init_task_work(&exit.task_work, io_tctx_exit_cb);
+ exit.ctx = ctx;
+- /*
+- * Some may use context even when all refs and requests have been put,
+- * and they are free to do so while still holding uring_lock or
+- * completion_lock, see io_req_task_submit(). Apart from other work,
+- * this lock/unlock section also waits them to finish.
+- */
++
+ mutex_lock(&ctx->uring_lock);
+ while (!list_empty(&ctx->tctx_list)) {
+ WARN_ON_ONCE(time_after(jiffies, timeout));
+@@ -3200,7 +3203,7 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
+ * noise and overhead, there's no discernable change in runtime
+ * over using system_wq.
+ */
+- queue_work(system_unbound_wq, &ctx->exit_work);
++ queue_work(iou_wq, &ctx->exit_work);
+ }
+
+ static int io_uring_release(struct inode *inode, struct file *file)
+@@ -3319,7 +3322,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+
+ if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
+ io_allowed_defer_tw_run(ctx))
+- ret |= io_run_local_work(ctx) > 0;
++ ret |= io_run_local_work(ctx, INT_MAX) > 0;
+ ret |= io_cancel_defer_files(ctx, task, cancel_all);
+ mutex_lock(&ctx->uring_lock);
+ ret |= io_poll_remove_all(ctx, task, cancel_all);
+@@ -3362,8 +3365,11 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
+ bool loop = false;
+
+ io_uring_drop_tctx_refs(current);
++ if (!tctx_inflight(tctx, !cancel_all))
++ break;
++
+ /* read completions before cancelations */
+- inflight = tctx_inflight(tctx, !cancel_all);
++ inflight = tctx_inflight(tctx, false);
+ if (!inflight)
+ break;
+
+@@ -3433,27 +3439,30 @@ static void *io_uring_validate_mmap_request(struct file *file,
+ struct page *page;
+ void *ptr;
+
+- /* Don't allow mmap if the ring was setup without it */
+- if (ctx->flags & IORING_SETUP_NO_MMAP)
+- return ERR_PTR(-EINVAL);
+-
+ switch (offset & IORING_OFF_MMAP_MASK) {
+ case IORING_OFF_SQ_RING:
+ case IORING_OFF_CQ_RING:
++ /* Don't allow mmap if the ring was setup without it */
++ if (ctx->flags & IORING_SETUP_NO_MMAP)
++ return ERR_PTR(-EINVAL);
+ ptr = ctx->rings;
+ break;
+ case IORING_OFF_SQES:
++ /* Don't allow mmap if the ring was setup without it */
++ if (ctx->flags & IORING_SETUP_NO_MMAP)
++ return ERR_PTR(-EINVAL);
+ ptr = ctx->sq_sqes;
+ break;
+ case IORING_OFF_PBUF_RING: {
++ struct io_buffer_list *bl;
+ unsigned int bgid;
+
+ bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
+- mutex_lock(&ctx->uring_lock);
+- ptr = io_pbuf_get_address(ctx, bgid);
+- mutex_unlock(&ctx->uring_lock);
+- if (!ptr)
+- return ERR_PTR(-EINVAL);
++ bl = io_pbuf_get_bl(ctx, bgid);
++ if (IS_ERR(bl))
++ return bl;
++ ptr = bl->buf_ring;
++ io_put_bl(ctx, bl);
+ break;
+ }
+ default:
+@@ -3603,7 +3612,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
+ size_t, argsz)
+ {
+ struct io_ring_ctx *ctx;
+- struct fd f;
++ struct file *file;
+ long ret;
+
+ if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
+@@ -3621,20 +3630,19 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
+ if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
+ return -EINVAL;
+ fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
+- f.file = tctx->registered_rings[fd];
+- f.flags = 0;
+- if (unlikely(!f.file))
++ file = tctx->registered_rings[fd];
++ if (unlikely(!file))
+ return -EBADF;
+ } else {
+- f = fdget(fd);
+- if (unlikely(!f.file))
++ file = fget(fd);
++ if (unlikely(!file))
+ return -EBADF;
+ ret = -EOPNOTSUPP;
+- if (unlikely(!io_is_uring_fops(f.file)))
++ if (unlikely(!io_is_uring_fops(file)))
+ goto out;
+ }
+
+- ctx = f.file->private_data;
++ ctx = file->private_data;
+ ret = -EBADFD;
+ if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
+ goto out;
+@@ -3677,7 +3685,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
+ * it should handle ownership problems if any.
+ */
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+- (void)io_run_local_work_locked(ctx);
++ (void)io_run_local_work_locked(ctx, min_complete);
+ }
+ mutex_unlock(&ctx->uring_lock);
+ }
+@@ -3728,7 +3736,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
+ }
+ }
+ out:
+- fdput(f);
++ if (!(flags & IORING_ENTER_REGISTERED_RING))
++ fput(file);
+ return ret;
+ }
+
+@@ -3820,32 +3829,12 @@ static int io_uring_install_fd(struct file *file)
+ /*
+ * Allocate an anonymous fd, this is what constitutes the application
+ * visible backing of an io_uring instance. The application mmaps this
+- * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
+- * we have to tie this fd to a socket for file garbage collection purposes.
++ * fd to gain access to the SQ/CQ ring details.
+ */
+ static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
+ {
+- struct file *file;
+-#if defined(CONFIG_UNIX)
+- int ret;
+-
+- ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
+- &ctx->ring_sock);
+- if (ret)
+- return ERR_PTR(ret);
+-#endif
+-
+- file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
++ return anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
+ O_RDWR | O_CLOEXEC, NULL);
+-#if defined(CONFIG_UNIX)
+- if (IS_ERR(file)) {
+- sock_release(ctx->ring_sock);
+- ctx->ring_sock = NULL;
+- } else {
+- ctx->ring_sock->file = file;
+- }
+-#endif
+- return file;
+ }
+
+ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
+@@ -4569,7 +4558,7 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
+ {
+ struct io_ring_ctx *ctx;
+ long ret = -EBADF;
+- struct fd f;
++ struct file *file;
+ bool use_registered_ring;
+
+ use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
+@@ -4588,27 +4577,27 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
+ if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
+ return -EINVAL;
+ fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
+- f.file = tctx->registered_rings[fd];
+- f.flags = 0;
+- if (unlikely(!f.file))
++ file = tctx->registered_rings[fd];
++ if (unlikely(!file))
+ return -EBADF;
+ } else {
+- f = fdget(fd);
+- if (unlikely(!f.file))
++ file = fget(fd);
++ if (unlikely(!file))
+ return -EBADF;
+ ret = -EOPNOTSUPP;
+- if (!io_is_uring_fops(f.file))
++ if (!io_is_uring_fops(file))
+ goto out_fput;
+ }
+
+- ctx = f.file->private_data;
++ ctx = file->private_data;
+
+ mutex_lock(&ctx->uring_lock);
+ ret = __io_uring_register(ctx, opcode, arg, nr_args);
+ mutex_unlock(&ctx->uring_lock);
+ trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
+ out_fput:
+- fdput(f);
++ if (!use_registered_ring)
++ fput(file);
+ return ret;
+ }
+
+@@ -4702,6 +4691,8 @@ static int __init io_uring_init(void)
+ offsetof(struct io_kiocb, cmd.data),
+ sizeof_field(struct io_kiocb, cmd.data), NULL);
+
++ iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
++
+ #ifdef CONFIG_SYSCTL
+ register_sysctl_init("kernel", kernel_io_uring_disabled_table);
+ #endif
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 0bc145614a6e66..8242820742eef3 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -30,6 +30,13 @@ enum {
+ IOU_OK = 0,
+ IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
+
++ /*
++ * Requeue the task_work to restart operations on this request. The
++ * actual value isn't important, should just be not an otherwise
++ * valid error code, yet less than -MAX_ERRNO and valid internally.
++ */
++ IOU_REQUEUE = -3072,
++
+ /*
+ * Intended only when both IO_URING_F_MULTISHOT is passed
+ * to indicate to the poll runner that multishot should be
+@@ -54,7 +61,6 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
+ unsigned issue_flags);
+
+ void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
+-bool io_is_uring_fops(struct file *file);
+ bool io_alloc_async_data(struct io_kiocb *req);
+ void io_req_task_queue(struct io_kiocb *req);
+ void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
+@@ -86,6 +92,9 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
+ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ bool cancel_all);
+
++void *io_mem_alloc(size_t size);
++void io_mem_free(void *ptr);
++
+ #if defined(CONFIG_PROVE_LOCKING)
+ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
+ {
+@@ -253,7 +262,14 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
+ {
+ struct io_rings *r = ctx->rings;
+
+- return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
++ /*
++ * SQPOLL must use the actual sqring head, as using the cached_sq_head
++ * is race prone if the SQPOLL thread has grabbed entries but not yet
++ * committed them to the ring. For !SQPOLL, this doesn't matter, but
++ * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
++ * just read the actual sqring head unconditionally.
++ */
++ return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
+ }
+
+ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
+@@ -295,7 +311,7 @@ static inline int io_run_task_work(void)
+
+ static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+ {
+- return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
++ return task_work_pending(current) || !llist_empty(&ctx->work_llist);
+ }
+
+ static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 9123138aa9f48b..702c08c26cd4fa 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -17,33 +17,49 @@
+
+ #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
+
+-#define BGID_ARRAY 64
++/* BIDs are addressed by a 16-bit field in a CQE */
++#define MAX_BIDS_PER_BGID (1 << 16)
+
+ struct io_provide_buf {
+ struct file *file;
+ __u64 addr;
+ __u32 len;
+ __u32 bgid;
+- __u16 nbufs;
++ __u32 nbufs;
+ __u16 bid;
+ };
+
++static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
++ unsigned int bgid)
++{
++ return xa_load(&ctx->io_bl_xa, bgid);
++}
++
++struct io_buf_free {
++ struct hlist_node list;
++ void *mem;
++ size_t size;
++ int inuse;
++};
++
+ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
+ unsigned int bgid)
+ {
+- if (ctx->io_bl && bgid < BGID_ARRAY)
+- return &ctx->io_bl[bgid];
++ lockdep_assert_held(&ctx->uring_lock);
+
+- return xa_load(&ctx->io_bl_xa, bgid);
++ return __io_buffer_get_list(ctx, bgid);
+ }
+
+ static int io_buffer_add_list(struct io_ring_ctx *ctx,
+ struct io_buffer_list *bl, unsigned int bgid)
+ {
++ /*
++ * Store buffer group ID and finally mark the list as visible.
++ * The normal lookup doesn't care about the visibility as we're
++ * always under the ->uring_lock, but the RCU lookup from mmap does.
++ */
+ bl->bgid = bgid;
+- if (bgid < BGID_ARRAY)
+- return 0;
+-
++ atomic_set(&bl->refs, 1);
+ return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+ }
+
+@@ -152,7 +168,8 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ req->buf_list = bl;
+ req->buf_index = buf->bid;
+
+- if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
++ if (issue_flags & IO_URING_F_UNLOCKED ||
++ (req->file && !file_can_poll(req->file))) {
+ /*
+ * If we came in unlocked, we have no choice but to consume the
+ * buffer here, otherwise nothing ensures that the buffer won't
+@@ -189,21 +206,22 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
+ return ret;
+ }
+
+-static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
++/*
++ * Mark the given mapped range as free for reuse
++ */
++static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
+ {
+- int i;
+-
+- ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
+- GFP_KERNEL);
+- if (!ctx->io_bl)
+- return -ENOMEM;
++ struct io_buf_free *ibf;
+
+- for (i = 0; i < BGID_ARRAY; i++) {
+- INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
+- ctx->io_bl[i].bgid = i;
++ hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
++ if (bl->buf_ring == ibf->mem) {
++ ibf->inuse = 0;
++ return;
++ }
+ }
+
+- return 0;
++ /* can't happen... */
++ WARN_ON_ONCE(1);
+ }
+
+ static int __io_remove_buffers(struct io_ring_ctx *ctx,
+@@ -218,7 +236,11 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
+ if (bl->is_mapped) {
+ i = bl->buf_ring->tail - bl->head;
+ if (bl->is_mmap) {
+- folio_put(virt_to_folio(bl->buf_ring));
++ /*
++ * io_kbuf_list_free() will free the page(s) at
++ * ->release() time.
++ */
++ io_kbuf_mark_free(ctx, bl);
+ bl->buf_ring = NULL;
+ bl->is_mmap = 0;
+ } else if (bl->buf_nr_pages) {
+@@ -252,22 +274,22 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
+ return i;
+ }
+
++void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
++{
++ if (atomic_dec_and_test(&bl->refs)) {
++ __io_remove_buffers(ctx, bl, -1U);
++ kfree_rcu(bl, rcu);
++ }
++}
++
+ void io_destroy_buffers(struct io_ring_ctx *ctx)
+ {
+ struct io_buffer_list *bl;
+ unsigned long index;
+- int i;
+-
+- for (i = 0; i < BGID_ARRAY; i++) {
+- if (!ctx->io_bl)
+- break;
+- __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
+- }
+
+ xa_for_each(&ctx->io_bl_xa, index, bl) {
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
+- __io_remove_buffers(ctx, bl, -1U);
+- kfree(bl);
++ io_put_bl(ctx, bl);
+ }
+
+ while (!list_empty(&ctx->io_buffers_pages)) {
+@@ -289,7 +311,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ return -EINVAL;
+
+ tmp = READ_ONCE(sqe->fd);
+- if (!tmp || tmp > USHRT_MAX)
++ if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ return -EINVAL;
+
+ memset(p, 0, sizeof(*p));
+@@ -332,7 +354,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ return -EINVAL;
+
+ tmp = READ_ONCE(sqe->fd);
+- if (!tmp || tmp > USHRT_MAX)
++ if (!tmp || tmp > MAX_BIDS_PER_BGID)
+ return -E2BIG;
+ p->nbufs = tmp;
+ p->addr = READ_ONCE(sqe->addr);
+@@ -352,7 +374,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ tmp = READ_ONCE(sqe->off);
+ if (tmp > USHRT_MAX)
+ return -E2BIG;
+- if (tmp + p->nbufs >= USHRT_MAX)
++ if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
+ return -EINVAL;
+ p->bid = tmp;
+ return 0;
+@@ -436,12 +458,6 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
+
+ io_ring_submit_lock(ctx, issue_flags);
+
+- if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
+- ret = io_init_bl_list(ctx);
+- if (ret)
+- goto err;
+- }
+-
+ bl = io_buffer_get_list(ctx, p->bgid);
+ if (unlikely(!bl)) {
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
+@@ -452,7 +468,11 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
+ INIT_LIST_HEAD(&bl->buf_list);
+ ret = io_buffer_add_list(ctx, bl, p->bgid);
+ if (ret) {
+- kfree(bl);
++ /*
++ * Doesn't need rcu free as it was never visible, but
++ * let's keep it consistent throughout.
++ */
++ kfree_rcu(bl, rcu);
+ goto err;
+ }
+ }
+@@ -523,19 +543,63 @@ static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
+ return -EINVAL;
+ }
+
+-static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg,
++/*
++ * See if we have a suitable region that we can reuse, rather than allocate
++ * both a new io_buf_free and mem region again. We leave it on the list as
++ * even a reused entry will need freeing at ring release.
++ */
++static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
++ size_t ring_size)
++{
++ struct io_buf_free *ibf, *best = NULL;
++ size_t best_dist;
++
++ hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
++ size_t dist;
++
++ if (ibf->inuse || ibf->size < ring_size)
++ continue;
++ dist = ibf->size - ring_size;
++ if (!best || dist < best_dist) {
++ best = ibf;
++ if (!dist)
++ break;
++ best_dist = dist;
++ }
++ }
++
++ return best;
++}
++
++static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
++ struct io_uring_buf_reg *reg,
+ struct io_buffer_list *bl)
+ {
+- gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
++ struct io_buf_free *ibf;
+ size_t ring_size;
+ void *ptr;
+
+ ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
+- ptr = (void *) __get_free_pages(gfp, get_order(ring_size));
+- if (!ptr)
+- return -ENOMEM;
+
+- bl->buf_ring = ptr;
++ /* Reuse existing entry, if we can */
++ ibf = io_lookup_buf_free_entry(ctx, ring_size);
++ if (!ibf) {
++ ptr = io_mem_alloc(ring_size);
++ if (IS_ERR(ptr))
++ return PTR_ERR(ptr);
++
++ /* Allocate and store deferred free entry */
++ ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
++ if (!ibf) {
++ io_mem_free(ptr);
++ return -ENOMEM;
++ }
++ ibf->mem = ptr;
++ ibf->size = ring_size;
++ hlist_add_head(&ibf->list, &ctx->io_buf_list);
++ }
++ ibf->inuse = 1;
++ bl->buf_ring = ibf->mem;
+ bl->is_mapped = 1;
+ bl->is_mmap = 1;
+ return 0;
+@@ -547,6 +611,8 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ struct io_buffer_list *bl, *free_bl = NULL;
+ int ret;
+
++ lockdep_assert_held(&ctx->uring_lock);
++
+ if (copy_from_user(&reg, arg, sizeof(reg)))
+ return -EFAULT;
+
+@@ -571,12 +637,6 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ if (reg.ring_entries >= 65536)
+ return -EINVAL;
+
+- if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
+- int ret = io_init_bl_list(ctx);
+- if (ret)
+- return ret;
+- }
+-
+ bl = io_buffer_get_list(ctx, reg.bgid);
+ if (bl) {
+ /* if mapped buffer ring OR classic exists, don't allow */
+@@ -591,7 +651,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ if (!(reg.flags & IOU_PBUF_RING_MMAP))
+ ret = io_pin_pbuf_ring(&reg, bl);
+ else
+- ret = io_alloc_pbuf_ring(&reg, bl);
++ ret = io_alloc_pbuf_ring(ctx, &reg, bl);
+
+ if (!ret) {
+ bl->nr_entries = reg.ring_entries;
+@@ -601,7 +661,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ return 0;
+ }
+
+- kfree(free_bl);
++ kfree_rcu(free_bl, rcu);
+ return ret;
+ }
+
+@@ -610,6 +670,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ struct io_uring_buf_reg reg;
+ struct io_buffer_list *bl;
+
++ lockdep_assert_held(&ctx->uring_lock);
++
+ if (copy_from_user(&reg, arg, sizeof(reg)))
+ return -EFAULT;
+ if (reg.resv[0] || reg.resv[1] || reg.resv[2])
+@@ -623,21 +685,54 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ if (!bl->is_mapped)
+ return -EINVAL;
+
+- __io_remove_buffers(ctx, bl, -1U);
+- if (bl->bgid >= BGID_ARRAY) {
+- xa_erase(&ctx->io_bl_xa, bl->bgid);
+- kfree(bl);
+- }
++ xa_erase(&ctx->io_bl_xa, bl->bgid);
++ io_put_bl(ctx, bl);
+ return 0;
+ }
+
+-void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
++struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
++ unsigned long bgid)
+ {
+ struct io_buffer_list *bl;
++ bool ret;
+
+- bl = io_buffer_get_list(ctx, bgid);
+- if (!bl || !bl->is_mmap)
+- return NULL;
++ /*
++ * We have to be a bit careful here - we're inside mmap and cannot grab
++ * the uring_lock. This means the buffer_list could be simultaneously
++ * going away, if someone is trying to be sneaky. Look it up under rcu
++ * so we know it's not going away, and attempt to grab a reference to
++ * it. If the ref is already zero, then fail the mapping. If successful,
++ * the caller will call io_put_bl() to drop the the reference at at the
++ * end. This may then safely free the buffer_list (and drop the pages)
++ * at that point, vm_insert_pages() would've already grabbed the
++ * necessary vma references.
++ */
++ rcu_read_lock();
++ bl = xa_load(&ctx->io_bl_xa, bgid);
++ /* must be a mmap'able buffer ring and have pages */
++ ret = false;
++ if (bl && bl->is_mmap)
++ ret = atomic_inc_not_zero(&bl->refs);
++ rcu_read_unlock();
++
++ if (ret)
++ return bl;
++
++ return ERR_PTR(-EINVAL);
++}
+
+- return bl->buf_ring;
++/*
++ * Called at or after ->release(), free the mmap'ed buffers that we used
++ * for memory mapped provided buffer rings.
++ */
++void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
++{
++ struct io_buf_free *ibf;
++ struct hlist_node *tmp;
++
++ hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
++ hlist_del(&ibf->list);
++ io_mem_free(ibf->mem);
++ kfree(ibf);
++ }
+ }
+diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
+index d14345ef61fc8d..8d7929369501d2 100644
+--- a/io_uring/kbuf.h
++++ b/io_uring/kbuf.h
+@@ -15,6 +15,7 @@ struct io_buffer_list {
+ struct page **buf_pages;
+ struct io_uring_buf_ring *buf_ring;
+ };
++ struct rcu_head rcu;
+ };
+ __u16 bgid;
+
+@@ -24,6 +25,8 @@ struct io_buffer_list {
+ __u16 head;
+ __u16 mask;
+
++ atomic_t refs;
++
+ /* ring mapped provided buffers */
+ __u8 is_mapped;
+ /* ring mapped provided buffers, but mmap'ed by application */
+@@ -51,11 +54,15 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
+ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+
++void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
++
+ unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
+
+ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+
+-void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
++void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
++struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
++ unsigned long bgid);
+
+ static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
+ {
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 7a8e298af81b3b..7412904387bfa0 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -60,6 +60,7 @@ struct io_sr_msg {
+ unsigned len;
+ unsigned done_io;
+ unsigned msg_flags;
++ unsigned nr_multishot_loops;
+ u16 flags;
+ /* initialised and used only by !msg send variants */
+ u16 addr_len;
+@@ -70,6 +71,13 @@ struct io_sr_msg {
+ struct io_kiocb *notif;
+ };
+
++/*
++ * Number of times we'll try and do receives if there's more data. If we
++ * exceed this limit, then add us to the back of the queue and retry from
++ * there. This helps fairness between flooding clients.
++ */
++#define MULTISHOT_MAX_RETRY 32
++
+ static inline bool io_check_multishot(struct io_kiocb *req,
+ unsigned int issue_flags)
+ {
+@@ -79,7 +87,7 @@ static inline bool io_check_multishot(struct io_kiocb *req,
+ * generic paths but multipoll may decide to post extra cqes.
+ */
+ return !(issue_flags & IO_URING_F_IOWQ) ||
+- !(issue_flags & IO_URING_F_MULTISHOT) ||
++ !(req->flags & REQ_F_APOLL_MULTISHOT) ||
+ !req->ctx->task_complete;
+ }
+
+@@ -196,16 +204,115 @@ static int io_setup_async_msg(struct io_kiocb *req,
+ return -EAGAIN;
+ }
+
++#ifdef CONFIG_COMPAT
++static int io_compat_msg_copy_hdr(struct io_kiocb *req,
++ struct io_async_msghdr *iomsg,
++ struct compat_msghdr *msg, int ddir)
++{
++ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++ struct compat_iovec __user *uiov;
++ int ret;
++
++ if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
++ return -EFAULT;
++
++ uiov = compat_ptr(msg->msg_iov);
++ if (req->flags & REQ_F_BUFFER_SELECT) {
++ compat_ssize_t clen;
++
++ iomsg->free_iov = NULL;
++ if (msg->msg_iovlen == 0) {
++ sr->len = 0;
++ } else if (msg->msg_iovlen > 1) {
++ return -EINVAL;
++ } else {
++ if (!access_ok(uiov, sizeof(*uiov)))
++ return -EFAULT;
++ if (__get_user(clen, &uiov->iov_len))
++ return -EFAULT;
++ if (clen < 0)
++ return -EINVAL;
++ sr->len = clen;
++ }
++
++ return 0;
++ }
++
++ iomsg->free_iov = iomsg->fast_iov;
++ ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
++ UIO_FASTIOV, &iomsg->free_iov,
++ &iomsg->msg.msg_iter, true);
++ if (unlikely(ret < 0))
++ return ret;
++
++ return 0;
++}
++#endif
++
++static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
++ struct user_msghdr *msg, int ddir)
++{
++ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++ int ret;
++
++ if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
++ return -EFAULT;
++
++ if (req->flags & REQ_F_BUFFER_SELECT) {
++ if (msg->msg_iovlen == 0) {
++ sr->len = iomsg->fast_iov[0].iov_len = 0;
++ iomsg->fast_iov[0].iov_base = NULL;
++ iomsg->free_iov = NULL;
++ } else if (msg->msg_iovlen > 1) {
++ return -EINVAL;
++ } else {
++ if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
++ sizeof(*msg->msg_iov)))
++ return -EFAULT;
++ sr->len = iomsg->fast_iov[0].iov_len;
++ iomsg->free_iov = NULL;
++ }
++
++ return 0;
++ }
++
++ iomsg->free_iov = iomsg->fast_iov;
++ ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
++ &iomsg->free_iov, &iomsg->msg.msg_iter, false);
++ if (unlikely(ret < 0))
++ return ret;
++
++ return 0;
++}
++
+ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg)
+ {
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++ struct user_msghdr msg;
+ int ret;
+
+ iomsg->msg.msg_name = &iomsg->addr;
+- iomsg->free_iov = iomsg->fast_iov;
+- ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
+- &iomsg->free_iov);
++ iomsg->msg.msg_iter.nr_segs = 0;
++
++#ifdef CONFIG_COMPAT
++ if (unlikely(req->ctx->compat)) {
++ struct compat_msghdr cmsg;
++
++ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
++ if (unlikely(ret))
++ return ret;
++
++ return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
++ }
++#endif
++
++ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
++ if (unlikely(ret))
++ return ret;
++
++ ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
++
+ /* save msg_control as sys_sendmsg() overwrites it */
+ sr->msg_control = iomsg->msg.msg_control_user;
+ return ret;
+@@ -427,142 +534,77 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
+ return IOU_OK;
+ }
+
+-static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
++static int io_recvmsg_mshot_prep(struct io_kiocb *req,
++ struct io_async_msghdr *iomsg,
++ int namelen, size_t controllen)
+ {
+- int hdr;
+-
+- if (iomsg->namelen < 0)
+- return true;
+- if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
+- iomsg->namelen, &hdr))
+- return true;
+- if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
+- return true;
++ if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
++ (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
++ int hdr;
++
++ if (unlikely(namelen < 0))
++ return -EOVERFLOW;
++ if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
++ namelen, &hdr))
++ return -EOVERFLOW;
++ if (check_add_overflow(hdr, controllen, &hdr))
++ return -EOVERFLOW;
++
++ iomsg->namelen = namelen;
++ iomsg->controllen = controllen;
++ return 0;
++ }
+
+- return false;
++ return 0;
+ }
+
+-static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
+- struct io_async_msghdr *iomsg)
++static int io_recvmsg_copy_hdr(struct io_kiocb *req,
++ struct io_async_msghdr *iomsg)
+ {
+- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct user_msghdr msg;
+ int ret;
+
+- if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
+- return -EFAULT;
+-
+- ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
+- if (ret)
+- return ret;
+-
+- if (req->flags & REQ_F_BUFFER_SELECT) {
+- if (msg.msg_iovlen == 0) {
+- sr->len = iomsg->fast_iov[0].iov_len = 0;
+- iomsg->fast_iov[0].iov_base = NULL;
+- iomsg->free_iov = NULL;
+- } else if (msg.msg_iovlen > 1) {
+- return -EINVAL;
+- } else {
+- if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
+- return -EFAULT;
+- sr->len = iomsg->fast_iov[0].iov_len;
+- iomsg->free_iov = NULL;
+- }
+-
+- if (req->flags & REQ_F_APOLL_MULTISHOT) {
+- iomsg->namelen = msg.msg_namelen;
+- iomsg->controllen = msg.msg_controllen;
+- if (io_recvmsg_multishot_overflow(iomsg))
+- return -EOVERFLOW;
+- }
+- } else {
+- iomsg->free_iov = iomsg->fast_iov;
+- ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
+- &iomsg->free_iov, &iomsg->msg.msg_iter,
+- false);
+- if (ret > 0)
+- ret = 0;
+- }
+-
+- return ret;
+-}
++ iomsg->msg.msg_name = &iomsg->addr;
++ iomsg->msg.msg_iter.nr_segs = 0;
+
+ #ifdef CONFIG_COMPAT
+-static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
+- struct io_async_msghdr *iomsg)
+-{
+- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+- struct compat_msghdr msg;
+- struct compat_iovec __user *uiov;
+- int ret;
+-
+- if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
+- return -EFAULT;
++ if (unlikely(req->ctx->compat)) {
++ struct compat_msghdr cmsg;
+
+- ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
+- if (ret)
+- return ret;
+-
+- uiov = compat_ptr(msg.msg_iov);
+- if (req->flags & REQ_F_BUFFER_SELECT) {
+- compat_ssize_t clen;
+-
+- iomsg->free_iov = NULL;
+- if (msg.msg_iovlen == 0) {
+- sr->len = 0;
+- } else if (msg.msg_iovlen > 1) {
+- return -EINVAL;
+- } else {
+- if (!access_ok(uiov, sizeof(*uiov)))
+- return -EFAULT;
+- if (__get_user(clen, &uiov->iov_len))
+- return -EFAULT;
+- if (clen < 0)
+- return -EINVAL;
+- sr->len = clen;
+- }
++ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
++ if (unlikely(ret))
++ return ret;
+
+- if (req->flags & REQ_F_APOLL_MULTISHOT) {
+- iomsg->namelen = msg.msg_namelen;
+- iomsg->controllen = msg.msg_controllen;
+- if (io_recvmsg_multishot_overflow(iomsg))
+- return -EOVERFLOW;
+- }
+- } else {
+- iomsg->free_iov = iomsg->fast_iov;
+- ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
+- UIO_FASTIOV, &iomsg->free_iov,
+- &iomsg->msg.msg_iter, true);
+- if (ret < 0)
++ ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
++ if (unlikely(ret))
+ return ret;
+- }
+
+- return 0;
+-}
++ return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
++ cmsg.msg_controllen);
++ }
+ #endif
+
+-static int io_recvmsg_copy_hdr(struct io_kiocb *req,
+- struct io_async_msghdr *iomsg)
+-{
+- iomsg->msg.msg_name = &iomsg->addr;
+- iomsg->msg.msg_iter.nr_segs = 0;
++ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
++ if (unlikely(ret))
++ return ret;
+
+-#ifdef CONFIG_COMPAT
+- if (req->ctx->compat)
+- return __io_compat_recvmsg_copy_hdr(req, iomsg);
+-#endif
++ ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
++ if (unlikely(ret))
++ return ret;
+
+- return __io_recvmsg_copy_hdr(req, iomsg);
++ return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
++ msg.msg_controllen);
+ }
+
+ int io_recvmsg_prep_async(struct io_kiocb *req)
+ {
++ struct io_async_msghdr *iomsg;
+ int ret;
+
+ if (!io_msg_alloc_async_prep(req))
+ return -ENOMEM;
+- ret = io_recvmsg_copy_hdr(req, req->async_data);
++ iomsg = req->async_data;
++ ret = io_recvmsg_copy_hdr(req, iomsg);
+ if (!ret)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return ret;
+@@ -611,6 +653,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ sr->msg_flags |= MSG_CMSG_COMPAT;
+ #endif
+ sr->done_io = 0;
++ sr->nr_multishot_loops = 0;
+ return 0;
+ }
+
+@@ -645,23 +688,35 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
+ return true;
+ }
+
+- if (!mshot_finished) {
+- if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+- *ret, cflags | IORING_CQE_F_MORE)) {
+- io_recv_prep_retry(req);
+- /* Known not-empty or unknown state, retry */
+- if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
+- msg->msg_inq == -1)
++ if (mshot_finished)
++ goto finish;
++
++ /*
++ * Fill CQE for this receive and see if we should keep trying to
++ * receive from this socket.
++ */
++ if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
++ *ret, cflags | IORING_CQE_F_MORE)) {
++ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
++ int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
++
++ io_recv_prep_retry(req);
++ /* Known not-empty or unknown state, retry */
++ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
++ if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
+ return false;
+- if (issue_flags & IO_URING_F_MULTISHOT)
+- *ret = IOU_ISSUE_SKIP_COMPLETE;
+- else
+- *ret = -EAGAIN;
+- return true;
++ /* mshot retries exceeded, force a requeue */
++ sr->nr_multishot_loops = 0;
++ mshot_retry_ret = IOU_REQUEUE;
+ }
+- /* Otherwise stop multishot but use the current result. */
++ if (issue_flags & IO_URING_F_MULTISHOT)
++ *ret = mshot_retry_ret;
++ else
++ *ret = -EAGAIN;
++ return true;
+ }
+-
++ /* Otherwise stop multishot but use the current result. */
++finish:
+ io_req_set_res(req, *ret, cflags);
+
+ if (issue_flags & IO_URING_F_MULTISHOT)
+@@ -860,7 +915,8 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
+ kfree(kmsg->free_iov);
+ io_netmsg_recycle(req, issue_flags);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+- }
++ } else if (ret == -EAGAIN)
++ return io_setup_async_msg(req, kmsg, issue_flags);
+
+ return ret;
+ }
+@@ -874,6 +930,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
+ int ret, min_ret = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ size_t len = sr->len;
++ bool mshot_finished;
+
+ if (!(req->flags & REQ_F_POLLED) &&
+ (sr->flags & IORING_RECVSEND_POLL_FIRST))
+@@ -902,6 +959,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
+ if (!buf)
+ return -ENOBUFS;
+ sr->buf = buf;
++ sr->len = len;
+ }
+
+ ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
+@@ -942,6 +1000,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
+ req_set_fail(req);
+ }
+
++ mshot_finished = ret <= 0;
+ if (ret > 0)
+ ret += sr->done_io;
+ else if (sr->done_io)
+@@ -949,7 +1008,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
+ else
+ io_kbuf_recycle(req, issue_flags);
+
+- if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
++ if (!io_recv_finish(req, &ret, &msg, mshot_finished, issue_flags))
+ goto retry_multishot;
+
+ return ret;
+@@ -1217,6 +1276,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
+
+ if (req_has_async_data(req)) {
+ kmsg = req->async_data;
++ kmsg->msg.msg_control_user = sr->msg_control;
+ } else {
+ ret = io_sendmsg_copy_hdr(req, &iomsg);
+ if (ret)
+@@ -1350,7 +1410,7 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
+ * has already been done
+ */
+ if (issue_flags & IO_URING_F_MULTISHOT)
+- ret = IOU_ISSUE_SKIP_COMPLETE;
++ return IOU_ISSUE_SKIP_COMPLETE;
+ return ret;
+ }
+ if (ret == -ERESTARTSYS)
+@@ -1375,7 +1435,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
+ ret, IORING_CQE_F_MORE))
+ goto retry;
+
+- return -ECANCELED;
++ io_req_set_res(req, ret, 0);
++ return IOU_STOP_MULTISHOT;
+ }
+
+ int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+@@ -1461,16 +1522,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ int ret;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+
+- if (connect->in_progress) {
+- struct socket *socket;
+-
+- ret = -ENOTSOCK;
+- socket = sock_from_file(req->file);
+- if (socket)
+- ret = sock_error(socket->sk);
+- goto out;
+- }
+-
+ if (req_has_async_data(req)) {
+ io = req->async_data;
+ } else {
+@@ -1490,9 +1541,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ && force_nonblock) {
+ if (ret == -EINPROGRESS) {
+ connect->in_progress = true;
+- return -EAGAIN;
+- }
+- if (ret == -ECONNABORTED) {
++ } else if (ret == -ECONNABORTED) {
+ if (connect->seen_econnaborted)
+ goto out;
+ connect->seen_econnaborted = true;
+@@ -1506,6 +1555,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ memcpy(req->async_data, &__io, sizeof(__io));
+ return -EAGAIN;
+ }
++ if (connect->in_progress) {
++ /*
++ * At least bluetooth will return -EBADFD on a re-connect
++ * attempt, and it's (supposedly) also valid to get -EISCONN
++ * which means the previous result is good. For both of these,
++ * grab the sock_error() and use that for the completion.
++ */
++ if (ret == -EBADFD || ret == -EISCONN)
++ ret = sock_error(sock_from_file(req->file)->sk);
++ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ out:
+diff --git a/io_uring/nop.c b/io_uring/nop.c
+index d956599a3c1b8f..1a4e312dfe510a 100644
+--- a/io_uring/nop.c
++++ b/io_uring/nop.c
+@@ -12,6 +12,8 @@
+
+ int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
++ if (READ_ONCE(sqe->rw_flags))
++ return -EINVAL;
+ return 0;
+ }
+
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 4c360ba8793a50..5cf4fffe8b6c81 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -226,8 +226,24 @@ enum {
+ IOU_POLL_NO_ACTION = 1,
+ IOU_POLL_REMOVE_POLL_USE_RES = 2,
+ IOU_POLL_REISSUE = 3,
++ IOU_POLL_REQUEUE = 4,
+ };
+
++static void __io_poll_execute(struct io_kiocb *req, int mask)
++{
++ io_req_set_res(req, mask, 0);
++ req->io_task_work.func = io_poll_task_func;
++
++ trace_io_uring_task_add(req, mask);
++ io_req_task_work_add(req);
++}
++
++static inline void io_poll_execute(struct io_kiocb *req, int res)
++{
++ if (io_poll_get_ownership(req))
++ __io_poll_execute(req, res);
++}
++
+ /*
+ * All poll tw should go through this. Checks for poll events, manages
+ * references, does rewait, etc.
+@@ -309,6 +325,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
+ int ret = io_poll_issue(req, ts);
+ if (ret == IOU_STOP_MULTISHOT)
+ return IOU_POLL_REMOVE_POLL_USE_RES;
++ else if (ret == IOU_REQUEUE)
++ return IOU_POLL_REQUEUE;
+ if (ret < 0)
+ return ret;
+ }
+@@ -331,8 +349,12 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
+ int ret;
+
+ ret = io_poll_check_events(req, ts);
+- if (ret == IOU_POLL_NO_ACTION)
++ if (ret == IOU_POLL_NO_ACTION) {
++ return;
++ } else if (ret == IOU_POLL_REQUEUE) {
++ __io_poll_execute(req, 0);
+ return;
++ }
+ io_poll_remove_entries(req);
+ io_poll_tw_hash_eject(req, ts);
+
+@@ -364,21 +386,6 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
+ }
+ }
+
+-static void __io_poll_execute(struct io_kiocb *req, int mask)
+-{
+- io_req_set_res(req, mask, 0);
+- req->io_task_work.func = io_poll_task_func;
+-
+- trace_io_uring_task_add(req, mask);
+- io_req_task_work_add(req);
+-}
+-
+-static inline void io_poll_execute(struct io_kiocb *req, int res)
+-{
+- if (io_poll_get_ownership(req))
+- __io_poll_execute(req, res);
+-}
+-
+ static void io_poll_cancel_req(struct io_kiocb *req)
+ {
+ io_poll_mark_cancelled(req);
+@@ -974,7 +981,6 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
+ struct io_hash_bucket *bucket;
+ struct io_kiocb *preq;
+ int ret2, ret = 0;
+- struct io_tw_state ts = { .locked = true };
+
+ io_ring_submit_lock(ctx, issue_flags);
+ preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
+@@ -1023,7 +1029,8 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
+
+ req_set_fail(preq);
+ io_req_set_res(preq, -ECANCELED, 0);
+- io_req_task_complete(preq, &ts);
++ preq->io_task_work.func = io_req_task_complete;
++ io_req_task_work_add(preq);
+ out:
+ io_ring_submit_unlock(ctx, issue_flags);
+ if (ret < 0) {
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index d9c853d1058780..0f9dcde72ebffa 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -24,7 +24,6 @@ struct io_rsrc_update {
+ };
+
+ static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
+-static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
+ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
+ struct io_mapped_ubuf **pimu,
+ struct page **last_hpage);
+@@ -157,7 +156,7 @@ static void io_rsrc_put_work(struct io_rsrc_node *node)
+
+ switch (node->type) {
+ case IORING_RSRC_FILE:
+- io_rsrc_file_put(node->ctx, prsrc);
++ fput(prsrc->file);
+ break;
+ case IORING_RSRC_BUFFER:
+ io_rsrc_buf_put(node->ctx, prsrc);
+@@ -251,6 +250,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
+
+ ret = io_run_task_work_sig(ctx);
+ if (ret < 0) {
++ __set_current_state(TASK_RUNNING);
+ mutex_lock(&ctx->uring_lock);
+ if (list_empty(&ctx->rsrc_ref_list))
+ ret = 0;
+@@ -402,23 +402,13 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ break;
+ }
+ /*
+- * Don't allow io_uring instances to be registered. If
+- * UNIX isn't enabled, then this causes a reference
+- * cycle and this instance can never get freed. If UNIX
+- * is enabled we'll handle it just fine, but there's
+- * still no point in allowing a ring fd as it doesn't
+- * support regular read/write anyway.
++ * Don't allow io_uring instances to be registered.
+ */
+ if (io_is_uring_fops(file)) {
+ fput(file);
+ err = -EBADF;
+ break;
+ }
+- err = io_scm_file_account(ctx, file);
+- if (err) {
+- fput(file);
+- break;
+- }
+ *io_get_tag_slot(data, i) = tag;
+ io_fixed_file_set(file_slot, file);
+ io_file_bitmap_set(&ctx->file_table, i);
+@@ -675,22 +665,12 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ struct file *file = io_file_from_index(&ctx->file_table, i);
+
+- /* skip scm accounted files, they'll be freed by ->ring_sock */
+- if (!file || io_file_need_scm(file))
++ if (!file)
+ continue;
+ io_file_bitmap_clear(&ctx->file_table, i);
+ fput(file);
+ }
+
+-#if defined(CONFIG_UNIX)
+- if (ctx->ring_sock) {
+- struct sock *sock = ctx->ring_sock->sk;
+- struct sk_buff *skb;
+-
+- while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
+- kfree_skb(skb);
+- }
+-#endif
+ io_free_file_tables(&ctx->file_table);
+ io_file_table_set_alloc_range(ctx, 0, 0);
+ io_rsrc_data_free(ctx->file_data);
+@@ -718,137 +698,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+ return ret;
+ }
+
+-/*
+- * Ensure the UNIX gc is aware of our file set, so we are certain that
+- * the io_uring can be safely unregistered on process exit, even if we have
+- * loops in the file referencing. We account only files that can hold other
+- * files because otherwise they can't form a loop and so are not interesting
+- * for GC.
+- */
+-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
+-{
+-#if defined(CONFIG_UNIX)
+- struct sock *sk = ctx->ring_sock->sk;
+- struct sk_buff_head *head = &sk->sk_receive_queue;
+- struct scm_fp_list *fpl;
+- struct sk_buff *skb;
+-
+- if (likely(!io_file_need_scm(file)))
+- return 0;
+-
+- /*
+- * See if we can merge this file into an existing skb SCM_RIGHTS
+- * file set. If there's no room, fall back to allocating a new skb
+- * and filling it in.
+- */
+- spin_lock_irq(&head->lock);
+- skb = skb_peek(head);
+- if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
+- __skb_unlink(skb, head);
+- else
+- skb = NULL;
+- spin_unlock_irq(&head->lock);
+-
+- if (!skb) {
+- fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
+- if (!fpl)
+- return -ENOMEM;
+-
+- skb = alloc_skb(0, GFP_KERNEL);
+- if (!skb) {
+- kfree(fpl);
+- return -ENOMEM;
+- }
+-
+- fpl->user = get_uid(current_user());
+- fpl->max = SCM_MAX_FD;
+- fpl->count = 0;
+-
+- UNIXCB(skb).fp = fpl;
+- skb->sk = sk;
+- skb->destructor = io_uring_destruct_scm;
+- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+- }
+-
+- fpl = UNIXCB(skb).fp;
+- fpl->fp[fpl->count++] = get_file(file);
+- unix_inflight(fpl->user, file);
+- skb_queue_head(head, skb);
+- fput(file);
+-#endif
+- return 0;
+-}
+-
+-static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
+-{
+-#if defined(CONFIG_UNIX)
+- struct sock *sock = ctx->ring_sock->sk;
+- struct sk_buff_head list, *head = &sock->sk_receive_queue;
+- struct sk_buff *skb;
+- int i;
+-
+- __skb_queue_head_init(&list);
+-
+- /*
+- * Find the skb that holds this file in its SCM_RIGHTS. When found,
+- * remove this entry and rearrange the file array.
+- */
+- skb = skb_dequeue(head);
+- while (skb) {
+- struct scm_fp_list *fp;
+-
+- fp = UNIXCB(skb).fp;
+- for (i = 0; i < fp->count; i++) {
+- int left;
+-
+- if (fp->fp[i] != file)
+- continue;
+-
+- unix_notinflight(fp->user, fp->fp[i]);
+- left = fp->count - 1 - i;
+- if (left) {
+- memmove(&fp->fp[i], &fp->fp[i + 1],
+- left * sizeof(struct file *));
+- }
+- fp->count--;
+- if (!fp->count) {
+- kfree_skb(skb);
+- skb = NULL;
+- } else {
+- __skb_queue_tail(&list, skb);
+- }
+- fput(file);
+- file = NULL;
+- break;
+- }
+-
+- if (!file)
+- break;
+-
+- __skb_queue_tail(&list, skb);
+-
+- skb = skb_dequeue(head);
+- }
+-
+- if (skb_peek(&list)) {
+- spin_lock_irq(&head->lock);
+- while ((skb = __skb_dequeue(&list)) != NULL)
+- __skb_queue_tail(head, skb);
+- spin_unlock_irq(&head->lock);
+- }
+-#endif
+-}
+-
+-static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
+-{
+- struct file *file = prsrc->file;
+-
+- if (likely(!io_file_need_scm(file)))
+- fput(file);
+- else
+- io_rsrc_file_scm_put(ctx, file);
+-}
+-
+ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned nr_args, u64 __user *tags)
+ {
+@@ -897,21 +746,12 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+ goto fail;
+
+ /*
+- * Don't allow io_uring instances to be registered. If UNIX
+- * isn't enabled, then this causes a reference cycle and this
+- * instance can never get freed. If UNIX is enabled we'll
+- * handle it just fine, but there's still no point in allowing
+- * a ring fd as it doesn't support regular read/write anyway.
++ * Don't allow io_uring instances to be registered.
+ */
+ if (io_is_uring_fops(file)) {
+ fput(file);
+ goto fail;
+ }
+- ret = io_scm_file_account(ctx, file);
+- if (ret) {
+- fput(file);
+- goto fail;
+- }
+ file_slot = io_fixed_file_slot(&ctx->file_table, i);
+ io_fixed_file_set(file_slot, file);
+ io_file_bitmap_set(&ctx->file_table, i);
+@@ -1261,14 +1101,13 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
+ */
+ const struct bio_vec *bvec = imu->bvec;
+
+- if (offset <= bvec->bv_len) {
++ if (offset < bvec->bv_len) {
+ /*
+ * Note, huge pages buffers consists of one large
+ * bvec entry and should always go this way. The other
+ * branch doesn't expect non PAGE_SIZE'd chunks.
+ */
+ iter->bvec = bvec;
+- iter->nr_segs = bvec->bv_len;
+ iter->count -= offset;
+ iter->iov_offset = offset;
+ } else {
+diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
+index 8625181fb87acf..7238b9cfe33b60 100644
+--- a/io_uring/rsrc.h
++++ b/io_uring/rsrc.h
+@@ -75,28 +75,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx);
+ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned nr_args, u64 __user *tags);
+
+-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
+-
+-#if defined(CONFIG_UNIX)
+-static inline bool io_file_need_scm(struct file *filp)
+-{
+- return !!unix_get_socket(filp);
+-}
+-#else
+-static inline bool io_file_need_scm(struct file *filp)
+-{
+- return false;
+-}
+-#endif
+-
+-static inline int io_scm_file_account(struct io_ring_ctx *ctx,
+- struct file *file)
+-{
+- if (likely(!io_file_need_scm(file)))
+- return 0;
+- return __io_scm_file_account(ctx, file);
+-}
+-
+ int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned nr_args);
+ int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 8f68d5ad4564fe..0a0c1c9db0f905 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -549,15 +549,19 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
+ struct iovec *iov;
+ int ret;
+
++ iorw->bytes_done = 0;
++ iorw->free_iovec = NULL;
++
+ /* submission path, ->uring_lock should already be taken */
+ ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
+ if (unlikely(ret < 0))
+ return ret;
+
+- iorw->bytes_done = 0;
+- iorw->free_iovec = iov;
+- if (iov)
++ if (iov) {
++ iorw->free_iovec = iov;
+ req->flags |= REQ_F_NEED_CLEANUP;
++ }
++
+ return 0;
+ }
+
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index bd6c2c7959a5bf..cdf8b567cb9443 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/audit.h>
+ #include <linux/security.h>
++#include <linux/cpuset.h>
+ #include <linux/io_uring.h>
+
+ #include <uapi/linux/io_uring.h>
+@@ -214,6 +215,7 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
+ did_sig = get_signal(&ksig);
+ cond_resched();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
+ }
+@@ -229,10 +231,23 @@ static int io_sq_thread(void *data)
+ snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
+ set_task_comm(current, buf);
+
+- if (sqd->sq_cpu != -1)
++ /* reset to our pid after we've set task_comm, for fdinfo */
++ sqd->task_pid = current->pid;
++
++ if (sqd->sq_cpu != -1) {
+ set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+- else
++ } else {
+ set_cpus_allowed_ptr(current, cpu_online_mask);
++ sqd->sq_cpu = raw_smp_processor_id();
++ }
++
++ /*
++ * Force audit context to get setup, in case we do prep side async
++ * operations that would trigger an audit call before any issue side
++ * audit has been done.
++ */
++ audit_uring_entry(IORING_OP_NOP);
++ audit_uring_exit(true, 0);
+
+ mutex_lock(&sqd->lock);
+ while (1) {
+@@ -261,6 +276,7 @@ static int io_sq_thread(void *data)
+ mutex_unlock(&sqd->lock);
+ cond_resched();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ continue;
+ }
+@@ -294,6 +310,7 @@ static int io_sq_thread(void *data)
+ mutex_unlock(&sqd->lock);
+ schedule();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ atomic_andnot(IORING_SQ_NEED_WAKEUP,
+@@ -385,11 +402,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ return 0;
+
+ if (p->flags & IORING_SETUP_SQ_AFF) {
++ cpumask_var_t allowed_mask;
+ int cpu = p->sq_thread_cpu;
+
+ ret = -EINVAL;
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+ goto err_sqpoll;
++ ret = -ENOMEM;
++ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
++ goto err_sqpoll;
++ ret = -EINVAL;
++ cpuset_cpus_allowed(current, allowed_mask);
++ if (!cpumask_test_cpu(cpu, allowed_mask)) {
++ free_cpumask_var(allowed_mask);
++ goto err_sqpoll;
++ }
++ free_cpumask_var(allowed_mask);
+ sqd->sq_cpu = cpu;
+ } else {
+ sqd->sq_cpu = -1;
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index 7fd7dbb211d642..4f1f710197d623 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -644,7 +644,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
+
+ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
+ bool cancel_all)
+- __must_hold(&req->ctx->timeout_lock)
++ __must_hold(&head->ctx->timeout_lock)
+ {
+ struct io_kiocb *req;
+
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index 537795fddc87d9..5fa19861cda546 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -7,7 +7,7 @@
+ #include <linux/nospec.h>
+
+ #include <uapi/linux/io_uring.h>
+-#include <uapi/asm-generic/ioctls.h>
++#include <asm/ioctls.h>
+
+ #include "io_uring.h"
+ #include "rsrc.h"
+diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
+index 8c62e443f78b3c..b2f39a86f47341 100644
+--- a/ipc/ipc_sysctl.c
++++ b/ipc/ipc_sysctl.c
+@@ -14,6 +14,7 @@
+ #include <linux/ipc_namespace.h>
+ #include <linux/msg.h>
+ #include <linux/slab.h>
++#include <linux/cred.h>
+ #include "util.h"
+
+ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
+@@ -190,25 +191,56 @@ static int set_is_seen(struct ctl_table_set *set)
+ return &current->nsproxy->ipc_ns->ipc_set == set;
+ }
+
++static void ipc_set_ownership(struct ctl_table_header *head,
++ kuid_t *uid, kgid_t *gid)
++{
++ struct ipc_namespace *ns =
++ container_of(head->set, struct ipc_namespace, ipc_set);
++
++ kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
++ kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
++
++ *uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
++ *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
++}
++
+ static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table)
+ {
+ int mode = table->mode;
+
+ #ifdef CONFIG_CHECKPOINT_RESTORE
+- struct ipc_namespace *ns = current->nsproxy->ipc_ns;
++ struct ipc_namespace *ns =
++ container_of(head->set, struct ipc_namespace, ipc_set);
+
+ if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) ||
+ (table->data == &ns->ids[IPC_MSG_IDS].next_id) ||
+ (table->data == &ns->ids[IPC_SHM_IDS].next_id)) &&
+ checkpoint_restore_ns_capable(ns->user_ns))
+ mode = 0666;
++ else
+ #endif
+- return mode;
++ {
++ kuid_t ns_root_uid;
++ kgid_t ns_root_gid;
++
++ ipc_set_ownership(head, &ns_root_uid, &ns_root_gid);
++
++ if (uid_eq(current_euid(), ns_root_uid))
++ mode >>= 6;
++
++ else if (in_egroup_p(ns_root_gid))
++ mode >>= 3;
++ }
++
++ mode &= 7;
++
++ return (mode << 6) | (mode << 3) | mode;
+ }
+
+ static struct ctl_table_root set_root = {
+ .lookup = set_lookup,
+ .permissions = ipc_permissions,
++ .set_ownership = ipc_set_ownership,
+ };
+
+ bool setup_ipc_sysctls(struct ipc_namespace *ns)
+diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
+index ebb5ed81c151a8..6bb1c5397c69b9 100644
+--- a/ipc/mq_sysctl.c
++++ b/ipc/mq_sysctl.c
+@@ -12,6 +12,7 @@
+ #include <linux/stat.h>
+ #include <linux/capability.h>
+ #include <linux/slab.h>
++#include <linux/cred.h>
+
+ static int msg_max_limit_min = MIN_MSGMAX;
+ static int msg_max_limit_max = HARD_MSGMAX;
+@@ -76,8 +77,42 @@ static int set_is_seen(struct ctl_table_set *set)
+ return &current->nsproxy->ipc_ns->mq_set == set;
+ }
+
++static void mq_set_ownership(struct ctl_table_header *head,
++ kuid_t *uid, kgid_t *gid)
++{
++ struct ipc_namespace *ns =
++ container_of(head->set, struct ipc_namespace, mq_set);
++
++ kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
++ kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
++
++ *uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
++ *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
++}
++
++static int mq_permissions(struct ctl_table_header *head, struct ctl_table *table)
++{
++ int mode = table->mode;
++ kuid_t ns_root_uid;
++ kgid_t ns_root_gid;
++
++ mq_set_ownership(head, &ns_root_uid, &ns_root_gid);
++
++ if (uid_eq(current_euid(), ns_root_uid))
++ mode >>= 6;
++
++ else if (in_egroup_p(ns_root_gid))
++ mode >>= 3;
++
++ mode &= 7;
++
++ return (mode << 6) | (mode << 3) | mode;
++}
++
+ static struct ctl_table_root set_root = {
+ .lookup = set_lookup,
++ .permissions = mq_permissions,
++ .set_ownership = mq_set_ownership,
+ };
+
+ bool setup_mq_sysctls(struct ipc_namespace *ns)
+diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
+index 9bfe68fe967624..37e488d5b4fc0a 100644
+--- a/kernel/Kconfig.kexec
++++ b/kernel/Kconfig.kexec
+@@ -36,6 +36,8 @@ config KEXEC
+ config KEXEC_FILE
+ bool "Enable kexec file based system call"
+ depends on ARCH_SUPPORTS_KEXEC_FILE
++ select CRYPTO
++ select CRYPTO_SHA256
+ select KEXEC_CORE
+ help
+ This is new version of kexec system call. This system call is
+@@ -94,10 +96,8 @@ config KEXEC_JUMP
+ config CRASH_DUMP
+ bool "kernel crash dumps"
+ depends on ARCH_SUPPORTS_CRASH_DUMP
+- depends on ARCH_SUPPORTS_KEXEC
+ select CRASH_CORE
+ select KEXEC_CORE
+- select KEXEC
+ help
+ Generate crash dump after being started by kexec.
+ This should be normally only set in special crash dump kernels
+diff --git a/kernel/Makefile b/kernel/Makefile
+index 3947122d618bf1..ce105a5558fcfa 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -114,6 +114,7 @@ obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
+ obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
+ obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
+ obj-$(CONFIG_CFI_CLANG) += cfi.o
++obj-$(CONFIG_NUMA) += numa.o
+
+ obj-$(CONFIG_PERF_EVENTS) += events/
+
+diff --git a/kernel/async.c b/kernel/async.c
+index b2c4ba5686ee49..673bba6bdf3a0b 100644
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
+ wake_up(&async_done);
+ }
+
++static async_cookie_t __async_schedule_node_domain(async_func_t func,
++ void *data, int node,
++ struct async_domain *domain,
++ struct async_entry *entry)
++{
++ async_cookie_t newcookie;
++ unsigned long flags;
++
++ INIT_LIST_HEAD(&entry->domain_list);
++ INIT_LIST_HEAD(&entry->global_list);
++ INIT_WORK(&entry->work, async_run_entry_fn);
++ entry->func = func;
++ entry->data = data;
++ entry->domain = domain;
++
++ spin_lock_irqsave(&async_lock, flags);
++
++ /* allocate cookie and queue */
++ newcookie = entry->cookie = next_cookie++;
++
++ list_add_tail(&entry->domain_list, &domain->pending);
++ if (domain->registered)
++ list_add_tail(&entry->global_list, &async_global_pending);
++
++ atomic_inc(&entry_count);
++ spin_unlock_irqrestore(&async_lock, flags);
++
++ /* schedule for execution */
++ queue_work_node(node, system_unbound_wq, &entry->work);
++
++ return newcookie;
++}
++
+ /**
+ * async_schedule_node_domain - NUMA specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
+ func(data, newcookie);
+ return newcookie;
+ }
+- INIT_LIST_HEAD(&entry->domain_list);
+- INIT_LIST_HEAD(&entry->global_list);
+- INIT_WORK(&entry->work, async_run_entry_fn);
+- entry->func = func;
+- entry->data = data;
+- entry->domain = domain;
+-
+- spin_lock_irqsave(&async_lock, flags);
+-
+- /* allocate cookie and queue */
+- newcookie = entry->cookie = next_cookie++;
+-
+- list_add_tail(&entry->domain_list, &domain->pending);
+- if (domain->registered)
+- list_add_tail(&entry->global_list, &async_global_pending);
+-
+- atomic_inc(&entry_count);
+- spin_unlock_irqrestore(&async_lock, flags);
+-
+- /* schedule for execution */
+- queue_work_node(node, system_unbound_wq, &entry->work);
+
+- return newcookie;
++ return __async_schedule_node_domain(func, data, node, domain, entry);
+ }
+ EXPORT_SYMBOL_GPL(async_schedule_node_domain);
+
+@@ -231,6 +243,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
+ }
+ EXPORT_SYMBOL_GPL(async_schedule_node);
+
++/**
++ * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
++ * @func: function to execute asynchronously
++ * @dev: device argument to be passed to function
++ *
++ * @dev is used as both the argument for the function and to provide NUMA
++ * context for where to run the function.
++ *
++ * If the asynchronous execution of @func is scheduled successfully, return
++ * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
++ * that will run the function synchronously then.
++ */
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
++{
++ struct async_entry *entry;
++
++ entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
++
++ /* Give up if there is no memory or too much work. */
++ if (!entry || atomic_read(&entry_count) > MAX_WORK) {
++ kfree(entry);
++ return false;
++ }
++
++ __async_schedule_node_domain(func, dev, dev_to_node(dev),
++ &async_dfl_domain, entry);
++ return true;
++}
++
+ /**
+ * async_synchronize_full - synchronize all asynchronous function calls
+ *
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 16205dd29843b7..9c8e5f732c4c79 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -487,15 +487,19 @@ static void auditd_conn_free(struct rcu_head *rcu)
+ * @pid: auditd PID
+ * @portid: auditd netlink portid
+ * @net: auditd network namespace pointer
++ * @skb: the netlink command from the audit daemon
++ * @ack: netlink ack flag, cleared if ack'd here
+ *
+ * Description:
+ * This function will obtain and drop network namespace references as
+ * necessary. Returns zero on success, negative values on failure.
+ */
+-static int auditd_set(struct pid *pid, u32 portid, struct net *net)
++static int auditd_set(struct pid *pid, u32 portid, struct net *net,
++ struct sk_buff *skb, bool *ack)
+ {
+ unsigned long flags;
+ struct auditd_connection *ac_old, *ac_new;
++ struct nlmsghdr *nlh;
+
+ if (!pid || !net)
+ return -EINVAL;
+@@ -507,6 +511,13 @@ static int auditd_set(struct pid *pid, u32 portid, struct net *net)
+ ac_new->portid = portid;
+ ac_new->net = get_net(net);
+
++ /* send the ack now to avoid a race with the queue backlog */
++ if (*ack) {
++ nlh = nlmsg_hdr(skb);
++ netlink_ack(skb, nlh, 0, NULL);
++ *ack = false;
++ }
++
+ spin_lock_irqsave(&auditd_conn_lock, flags);
+ ac_old = rcu_dereference_protected(auditd_conn,
+ lockdep_is_held(&auditd_conn_lock));
+@@ -1200,7 +1211,8 @@ static int audit_replace(struct pid *pid)
+ return auditd_send_unicast_skb(skb);
+ }
+
+-static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
++static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
++ bool *ack)
+ {
+ u32 seq;
+ void *data;
+@@ -1293,7 +1305,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ /* register a new auditd connection */
+ err = auditd_set(req_pid,
+ NETLINK_CB(skb).portid,
+- sock_net(NETLINK_CB(skb).sk));
++ sock_net(NETLINK_CB(skb).sk),
++ skb, ack);
+ if (audit_enabled != AUDIT_OFF)
+ audit_log_config_change("audit_pid",
+ new_pid,
+@@ -1538,9 +1551,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ * Parse the provided skb and deal with any messages that may be present,
+ * malformed skbs are discarded.
+ */
+-static void audit_receive(struct sk_buff *skb)
++static void audit_receive(struct sk_buff *skb)
+ {
+ struct nlmsghdr *nlh;
++ bool ack;
+ /*
+ * len MUST be signed for nlmsg_next to be able to dec it below 0
+ * if the nlmsg_len was not aligned
+@@ -1553,9 +1567,12 @@ static void audit_receive(struct sk_buff *skb)
+
+ audit_ctl_lock();
+ while (nlmsg_ok(nlh, len)) {
+- err = audit_receive_msg(skb, nlh);
+- /* if err or if this message says it wants a response */
+- if (err || (nlh->nlmsg_flags & NLM_F_ACK))
++ ack = nlh->nlmsg_flags & NLM_F_ACK;
++ err = audit_receive_msg(skb, nlh, &ack);
++
++ /* send an ack if the user asked for one and audit_receive_msg
++ * didn't already do it, or if there was an error. */
++ if (ack || err)
+ netlink_ack(skb, nlh, err, NULL);
+
+ nlh = nlmsg_next(nlh, &len);
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 65075f1e4ac8c8..7a98cd176a127d 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -527,11 +527,18 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
+ unsigned long ino;
+ dev_t dev;
+
+- exe_file = get_task_exe_file(tsk);
++ /* only do exe filtering if we are recording @current events/records */
++ if (tsk != current)
++ return 0;
++
++ if (!current->mm)
++ return 0;
++ exe_file = get_mm_exe_file(current->mm);
+ if (!exe_file)
+ return 0;
+ ino = file_inode(exe_file)->i_ino;
+ dev = file_inode(exe_file)->i_sb->s_dev;
+ fput(exe_file);
++
+ return audit_mark_compare(mark, ino, dev);
+ }
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index 8317a37dea0bbd..685bccb20b6f05 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -529,7 +529,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ entry->rule.buflen += f_val;
+ f->lsm_str = str;
+ err = security_audit_rule_init(f->type, f->op, str,
+- (void **)&f->lsm_rule);
++ (void **)&f->lsm_rule,
++ GFP_KERNEL);
+ /* Keep currently invalid fields around in case they
+ * become valid after a policy reload. */
+ if (err == -EINVAL) {
+@@ -799,7 +800,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
+
+ /* our own (refreshed) copy of lsm_rule */
+ ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
+- (void **)&df->lsm_rule);
++ (void **)&df->lsm_rule, GFP_KERNEL);
+ /* Keep currently invalid fields around in case they
+ * become valid after a policy reload. */
+ if (ret == -EINVAL) {
+diff --git a/kernel/bounds.c b/kernel/bounds.c
+index b529182e8b04fc..29b2cd00df2ccf 100644
+--- a/kernel/bounds.c
++++ b/kernel/bounds.c
+@@ -19,7 +19,7 @@ int main(void)
+ DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+ DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
+ #ifdef CONFIG_SMP
+- DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
++ DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
+ #endif
+ DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
+ #ifdef CONFIG_LRU_GEN
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 2058e89b5ddd00..1811efcfbd6e3b 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -73,6 +73,9 @@ int array_map_alloc_check(union bpf_attr *attr)
+ /* avoid overflow on round_up(map->value_size) */
+ if (attr->value_size > INT_MAX)
+ return -E2BIG;
++ /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
++ if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
++ return -E2BIG;
+
+ return 0;
+ }
+@@ -867,11 +870,11 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
+ }
+
+ if (old_ptr)
+- map->ops->map_fd_put_ptr(old_ptr);
++ map->ops->map_fd_put_ptr(map, old_ptr, true);
+ return 0;
+ }
+
+-static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
++static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
+ {
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ void *old_ptr;
+@@ -890,13 +893,18 @@ static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
+ }
+
+ if (old_ptr) {
+- map->ops->map_fd_put_ptr(old_ptr);
++ map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
+ return 0;
+ } else {
+ return -ENOENT;
+ }
+ }
+
++static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
++{
++ return __fd_array_map_delete_elem(map, key, true);
++}
++
+ static void *prog_fd_array_get_ptr(struct bpf_map *map,
+ struct file *map_file, int fd)
+ {
+@@ -913,8 +921,9 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
+ return prog;
+ }
+
+-static void prog_fd_array_put_ptr(void *ptr)
++static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
++ /* bpf_prog is freed after one RCU or tasks trace grace period */
+ bpf_prog_put(ptr);
+ }
+
+@@ -924,13 +933,13 @@ static u32 prog_fd_array_sys_lookup_elem(void *ptr)
+ }
+
+ /* decrement refcnt of all bpf_progs that are stored in this map */
+-static void bpf_fd_array_map_clear(struct bpf_map *map)
++static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
+ {
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ int i;
+
+ for (i = 0; i < array->map.max_entries; i++)
+- fd_array_map_delete_elem(map, &i);
++ __fd_array_map_delete_elem(map, &i, need_defer);
+ }
+
+ static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
+@@ -1012,11 +1021,16 @@ static void prog_array_map_poke_untrack(struct bpf_map *map,
+ mutex_unlock(&aux->poke_mutex);
+ }
+
++void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
++ struct bpf_prog *new, struct bpf_prog *old)
++{
++ WARN_ON_ONCE(1);
++}
++
+ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ struct bpf_prog *old,
+ struct bpf_prog *new)
+ {
+- u8 *old_addr, *new_addr, *old_bypass_addr;
+ struct prog_poke_elem *elem;
+ struct bpf_array_aux *aux;
+
+@@ -1025,7 +1039,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+
+ list_for_each_entry(elem, &aux->poke_progs, list) {
+ struct bpf_jit_poke_descriptor *poke;
+- int i, ret;
++ int i;
+
+ for (i = 0; i < elem->aux->size_poke_tab; i++) {
+ poke = &elem->aux->poke_tab[i];
+@@ -1044,21 +1058,10 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ * activated, so tail call updates can arrive from here
+ * while JIT is still finishing its final fixup for
+ * non-activated poke entries.
+- * 3) On program teardown, the program's kallsym entry gets
+- * removed out of RCU callback, but we can only untrack
+- * from sleepable context, therefore bpf_arch_text_poke()
+- * might not see that this is in BPF text section and
+- * bails out with -EINVAL. As these are unreachable since
+- * RCU grace period already passed, we simply skip them.
+- * 4) Also programs reaching refcount of zero while patching
++ * 3) Also programs reaching refcount of zero while patching
+ * is in progress is okay since we're protected under
+ * poke_mutex and untrack the programs before the JIT
+- * buffer is freed. When we're still in the middle of
+- * patching and suddenly kallsyms entry of the program
+- * gets evicted, we just skip the rest which is fine due
+- * to point 3).
+- * 5) Any other error happening below from bpf_arch_text_poke()
+- * is a unexpected bug.
++ * buffer is freed.
+ */
+ if (!READ_ONCE(poke->tailcall_target_stable))
+ continue;
+@@ -1068,39 +1071,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
+ poke->tail_call.key != key)
+ continue;
+
+- old_bypass_addr = old ? NULL : poke->bypass_addr;
+- old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
+- new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
+-
+- if (new) {
+- ret = bpf_arch_text_poke(poke->tailcall_target,
+- BPF_MOD_JUMP,
+- old_addr, new_addr);
+- BUG_ON(ret < 0 && ret != -EINVAL);
+- if (!old) {
+- ret = bpf_arch_text_poke(poke->tailcall_bypass,
+- BPF_MOD_JUMP,
+- poke->bypass_addr,
+- NULL);
+- BUG_ON(ret < 0 && ret != -EINVAL);
+- }
+- } else {
+- ret = bpf_arch_text_poke(poke->tailcall_bypass,
+- BPF_MOD_JUMP,
+- old_bypass_addr,
+- poke->bypass_addr);
+- BUG_ON(ret < 0 && ret != -EINVAL);
+- /* let other CPUs finish the execution of program
+- * so that it will not possible to expose them
+- * to invalid nop, stack unwind, nop state
+- */
+- if (!ret)
+- synchronize_rcu();
+- ret = bpf_arch_text_poke(poke->tailcall_target,
+- BPF_MOD_JUMP,
+- old_addr, NULL);
+- BUG_ON(ret < 0 && ret != -EINVAL);
+- }
++ bpf_arch_poke_desc_update(poke, new, old);
+ }
+ }
+ }
+@@ -1109,7 +1080,7 @@ static void prog_array_map_clear_deferred(struct work_struct *work)
+ {
+ struct bpf_map *map = container_of(work, struct bpf_array_aux,
+ work)->map;
+- bpf_fd_array_map_clear(map);
++ bpf_fd_array_map_clear(map, true);
+ bpf_map_put(map);
+ }
+
+@@ -1239,8 +1210,9 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
+ return ee;
+ }
+
+-static void perf_event_fd_array_put_ptr(void *ptr)
++static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
++ /* bpf_perf_event is freed after one RCU grace period */
+ bpf_event_entry_free_rcu(ptr);
+ }
+
+@@ -1258,7 +1230,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
+ for (i = 0; i < array->map.max_entries; i++) {
+ ee = READ_ONCE(array->ptrs[i]);
+ if (ee && ee->map_file == map_file)
+- fd_array_map_delete_elem(map, &i);
++ __fd_array_map_delete_elem(map, &i, true);
+ }
+ rcu_read_unlock();
+ }
+@@ -1266,7 +1238,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
+ static void perf_event_fd_array_map_free(struct bpf_map *map)
+ {
+ if (map->map_flags & BPF_F_PRESERVE_ELEMS)
+- bpf_fd_array_map_clear(map);
++ bpf_fd_array_map_clear(map, false);
+ fd_array_map_free(map);
+ }
+
+@@ -1294,7 +1266,7 @@ static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
+ return cgroup_get_from_fd(fd);
+ }
+
+-static void cgroup_fd_array_put_ptr(void *ptr)
++static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
+ /* cgroup_put free cgrp after a rcu grace period */
+ cgroup_put(ptr);
+@@ -1302,7 +1274,7 @@ static void cgroup_fd_array_put_ptr(void *ptr)
+
+ static void cgroup_fd_array_free(struct bpf_map *map)
+ {
+- bpf_fd_array_map_clear(map);
++ bpf_fd_array_map_clear(map, false);
+ fd_array_map_free(map);
+ }
+
+@@ -1347,7 +1319,7 @@ static void array_of_map_free(struct bpf_map *map)
+ * is protected by fdget/fdput.
+ */
+ bpf_map_meta_free(map->inner_map_meta);
+- bpf_fd_array_map_clear(map);
++ bpf_fd_array_map_clear(map, false);
+ fd_array_map_free(map);
+ }
+
+diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c
+index addf3dd57b59b5..35e1ddca74d210 100644
+--- a/kernel/bpf/bloom_filter.c
++++ b/kernel/bpf/bloom_filter.c
+@@ -80,6 +80,18 @@ static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key
+ return -EOPNOTSUPP;
+ }
+
++/* Called from syscall */
++static int bloom_map_alloc_check(union bpf_attr *attr)
++{
++ if (attr->value_size > KMALLOC_MAX_SIZE)
++ /* if value_size is bigger, the user space won't be able to
++ * access the elements.
++ */
++ return -E2BIG;
++
++ return 0;
++}
++
+ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
+ {
+ u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
+@@ -191,6 +203,7 @@ static u64 bloom_map_mem_usage(const struct bpf_map *map)
+ BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
+ const struct bpf_map_ops bloom_filter_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
++ .map_alloc_check = bloom_map_alloc_check,
+ .map_alloc = bloom_map_alloc,
+ .map_free = bloom_map_free,
+ .map_get_next_key = bloom_map_get_next_key,
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
+index 146824cc96893c..e8d02212da7039 100644
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -808,8 +808,8 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
+ nbuckets = max_t(u32, 2, nbuckets);
+ smap->bucket_log = ilog2(nbuckets);
+
+- smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
+- nbuckets, GFP_USER | __GFP_NOWARN);
++ smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
++ sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
+ if (!smap->buckets) {
+ err = -ENOMEM;
+ goto free_smap;
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 8090d7fb11ef68..e0e4d4f490e87c 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -405,7 +405,7 @@ const char *btf_type_str(const struct btf_type *t)
+ struct btf_show {
+ u64 flags;
+ void *target; /* target of show operation (seq file, buffer) */
+- void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
++ __printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
+ const struct btf *btf;
+ /* below are used during iteration */
+ struct {
+@@ -7070,8 +7070,8 @@ static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
+ btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
+ }
+
+-static void btf_seq_show(struct btf_show *show, const char *fmt,
+- va_list args)
++__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
++ va_list args)
+ {
+ seq_vprintf((struct seq_file *)show->target, fmt, args);
+ }
+@@ -7104,8 +7104,8 @@ struct btf_show_snprintf {
+ int len; /* length we would have written */
+ };
+
+-static void btf_snprintf_show(struct btf_show *show, const char *fmt,
+- va_list args)
++__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
++ va_list args)
+ {
+ struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
+ int len;
+@@ -7832,6 +7832,7 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
+ case BPF_PROG_TYPE_SYSCALL:
+ return BTF_KFUNC_HOOK_SYSCALL;
+ case BPF_PROG_TYPE_CGROUP_SKB:
++ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ return BTF_KFUNC_HOOK_CGROUP_SKB;
+ case BPF_PROG_TYPE_SCHED_ACT:
+ return BTF_KFUNC_HOOK_SCHED_ACT;
+@@ -8420,6 +8421,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ struct bpf_core_cand_list cands = {};
+ struct bpf_core_relo_res targ_res;
+ struct bpf_core_spec *specs;
++ const struct btf_type *type;
+ int err;
+
+ /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
+@@ -8429,6 +8431,13 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ if (!specs)
+ return -ENOMEM;
+
++ type = btf_type_by_id(ctx->btf, relo->type_id);
++ if (!type) {
++ bpf_log(ctx->log, "relo #%u: bad type id %u\n",
++ relo_idx, relo->type_id);
++ return -EINVAL;
++ }
++
+ if (need_cands) {
+ struct bpf_cand_cache *cc;
+ int i;
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index 03b3d4492980d0..913a6a7e62ca67 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -1450,6 +1450,9 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
+ * provided by user sockaddr
+ * @sk: sock struct that will use sockaddr
+ * @uaddr: sockaddr struct provided by user
++ * @uaddrlen: Pointer to the size of the sockaddr struct provided by user. It is
++ * read-only for AF_INET[6] uaddr but can be modified for AF_UNIX
++ * uaddr.
+ * @atype: The type of program to be executed
+ * @t_ctx: Pointer to attach type specific context
+ * @flags: Pointer to u32 which contains higher bits of BPF program
+@@ -1462,6 +1465,7 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
+ */
+ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ struct sockaddr *uaddr,
++ int *uaddrlen,
+ enum cgroup_bpf_attach_type atype,
+ void *t_ctx,
+ u32 *flags)
+@@ -1473,6 +1477,7 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ };
+ struct sockaddr_storage unspec;
+ struct cgroup *cgrp;
++ int ret;
+
+ /* Check socket family since not all sockets represent network
+ * endpoint (e.g. AF_UNIX).
+@@ -1483,11 +1488,19 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ if (!ctx.uaddr) {
+ memset(&unspec, 0, sizeof(unspec));
+ ctx.uaddr = (struct sockaddr *)&unspec;
++ ctx.uaddrlen = 0;
++ } else {
++ ctx.uaddrlen = *uaddrlen;
+ }
+
+ cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+- return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
+- 0, flags);
++ ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
++ 0, flags);
++
++ if (!ret && uaddr)
++ *uaddrlen = ctx.uaddrlen;
++
++ return ret;
+ }
+ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
+
+@@ -1786,7 +1799,7 @@ static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
+ }
+
+ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
+- int *optname, char __user *optval,
++ int *optname, sockptr_t optval,
+ int *optlen, char **kernel_optval)
+ {
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+@@ -1809,7 +1822,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
+
+ ctx.optlen = *optlen;
+
+- if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
++ if (copy_from_sockptr(ctx.optval, optval,
++ min(*optlen, max_optlen))) {
+ ret = -EFAULT;
+ goto out;
+ }
+@@ -1876,8 +1890,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
+ }
+
+ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+- int optname, char __user *optval,
+- int __user *optlen, int max_optlen,
++ int optname, sockptr_t optval,
++ sockptr_t optlen, int max_optlen,
+ int retval)
+ {
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+@@ -1904,8 +1918,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ * one that kernel returned as well to let
+ * BPF programs inspect the value.
+ */
+-
+- if (get_user(ctx.optlen, optlen)) {
++ if (copy_from_sockptr(&ctx.optlen, optlen,
++ sizeof(ctx.optlen))) {
+ ret = -EFAULT;
+ goto out;
+ }
+@@ -1916,8 +1930,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ }
+ orig_optlen = ctx.optlen;
+
+- if (copy_from_user(ctx.optval, optval,
+- min(ctx.optlen, max_optlen)) != 0) {
++ if (copy_from_sockptr(ctx.optval, optval,
++ min(ctx.optlen, max_optlen))) {
+ ret = -EFAULT;
+ goto out;
+ }
+@@ -1931,7 +1945,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ if (ret < 0)
+ goto out;
+
+- if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
++ if (!sockptr_is_null(optval) &&
++ (ctx.optlen > max_optlen || ctx.optlen < 0)) {
+ if (orig_optlen > PAGE_SIZE && ctx.optlen >= 0) {
+ pr_info_once("bpf getsockopt: ignoring program buffer with optlen=%d (max_optlen=%d)\n",
+ ctx.optlen, max_optlen);
+@@ -1943,11 +1958,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ }
+
+ if (ctx.optlen != 0) {
+- if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
++ if (!sockptr_is_null(optval) &&
++ copy_to_sockptr(optval, ctx.optval, ctx.optlen)) {
+ ret = -EFAULT;
+ goto out;
+ }
+- if (put_user(ctx.optlen, optlen)) {
++ if (copy_to_sockptr(optlen, &ctx.optlen, sizeof(ctx.optlen))) {
+ ret = -EFAULT;
+ goto out;
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 4e3ce0542e31f6..58ee17f429a33a 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -371,14 +371,18 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
+ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
+ s32 end_new, s32 curr, const bool probe_pass)
+ {
+- const s32 off_min = S16_MIN, off_max = S16_MAX;
++ s64 off_min, off_max, off;
+ s32 delta = end_new - end_old;
+- s32 off;
+
+- if (insn->code == (BPF_JMP32 | BPF_JA))
++ if (insn->code == (BPF_JMP32 | BPF_JA)) {
+ off = insn->imm;
+- else
++ off_min = S32_MIN;
++ off_max = S32_MAX;
++ } else {
+ off = insn->off;
++ off_min = S16_MIN;
++ off_max = S16_MAX;
++ }
+
+ if (curr < pos && curr + off + 1 >= end_old)
+ off += delta;
+@@ -623,7 +627,11 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
+
+ if (val < ksym->start)
+ return -1;
+- if (val >= ksym->end)
++ /* Ensure that we detect return addresses as part of the program, when
++ * the final instruction is a call for a program part of the stack
++ * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
++ */
++ if (val > ksym->end)
+ return 1;
+
+ return 0;
+@@ -855,7 +863,12 @@ static LIST_HEAD(pack_list);
+ * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
+ */
+ #ifdef PMD_SIZE
+-#define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
++/* PMD_SIZE is really big for some archs. It doesn't make sense to
++ * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
++ * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
++ * greater than or equal to 2MB.
++ */
++#define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
+ #else
+ #define BPF_PROG_PACK_SIZE PAGE_SIZE
+ #endif
+@@ -2161,6 +2174,7 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
+ u64 stack[stack_size / sizeof(u64)]; \
+ u64 regs[MAX_BPF_EXT_REG] = {}; \
+ \
++ kmsan_unpoison_memory(stack, sizeof(stack)); \
+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ ARG1 = (u64) (unsigned long) ctx; \
+ return ___bpf_prog_run(regs, insn); \
+@@ -2174,6 +2188,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
+ u64 stack[stack_size / sizeof(u64)]; \
+ u64 regs[MAX_BPF_EXT_REG]; \
+ \
++ kmsan_unpoison_memory(stack, sizeof(stack)); \
+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ BPF_R1 = r1; \
+ BPF_R2 = r2; \
+@@ -2244,6 +2259,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ {
+ enum bpf_prog_type prog_type = resolve_prog_type(fp);
+ bool ret;
++ struct bpf_prog_aux *aux = fp->aux;
+
+ if (fp->kprobe_override)
+ return false;
+@@ -2253,7 +2269,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ * in the case of devmap and cpumap). Until device checks
+ * are implemented, prohibit adding dev-bound programs to program maps.
+ */
+- if (bpf_prog_is_dev_bound(fp->aux))
++ if (bpf_prog_is_dev_bound(aux))
+ return false;
+
+ spin_lock(&map->owner.lock);
+@@ -2263,12 +2279,26 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
+ */
+ map->owner.type = prog_type;
+ map->owner.jited = fp->jited;
+- map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
++ map->owner.xdp_has_frags = aux->xdp_has_frags;
++ map->owner.attach_func_proto = aux->attach_func_proto;
+ ret = true;
+ } else {
+ ret = map->owner.type == prog_type &&
+ map->owner.jited == fp->jited &&
+- map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
++ map->owner.xdp_has_frags == aux->xdp_has_frags;
++ if (ret &&
++ map->owner.attach_func_proto != aux->attach_func_proto) {
++ switch (prog_type) {
++ case BPF_PROG_TYPE_TRACING:
++ case BPF_PROG_TYPE_LSM:
++ case BPF_PROG_TYPE_EXT:
++ case BPF_PROG_TYPE_STRUCT_OPS:
++ ret = false;
++ break;
++ default:
++ break;
++ }
++ }
+ }
+ spin_unlock(&map->owner.lock);
+
+@@ -2660,12 +2690,16 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+ struct bpf_map **used_maps, u32 len)
+ {
+ struct bpf_map *map;
++ bool sleepable;
+ u32 i;
+
++ sleepable = aux->sleepable;
+ for (i = 0; i < len; i++) {
+ map = used_maps[i];
+ if (map->ops->map_poke_untrack)
+ map->ops->map_poke_untrack(map, aux);
++ if (sleepable)
++ atomic64_dec(&map->sleepable_refcnt);
+ bpf_map_put(map);
+ }
+ }
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index e42a1bdb7f5365..df03e66a687c1c 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -178,7 +178,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
+ void **frames, int n,
+ struct xdp_cpumap_stats *stats)
+ {
+- struct xdp_rxq_info rxq;
++ struct xdp_rxq_info rxq = {};
+ struct xdp_buff xdp;
+ int i, nframes = 0;
+
+@@ -262,6 +262,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
+ static int cpu_map_kthread_run(void *data)
+ {
+ struct bpf_cpu_map_entry *rcpu = data;
++ unsigned long last_qs = jiffies;
+
+ complete(&rcpu->kthread_running);
+ set_current_state(TASK_INTERRUPTIBLE);
+@@ -287,10 +288,12 @@ static int cpu_map_kthread_run(void *data)
+ if (__ptr_ring_empty(rcpu->queue)) {
+ schedule();
+ sched = 1;
++ last_qs = jiffies;
+ } else {
+ __set_current_state(TASK_RUNNING);
+ }
+ } else {
++ rcu_softirq_qs_periodic(last_qs);
+ sched = cond_resched();
+ }
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 4d42f6ed6c11ae..69e78dc4bb18e8 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -130,13 +130,14 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
+ bpf_map_init_from_attr(&dtab->map, attr);
+
+ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+- dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
+-
+- if (!dtab->n_buckets) /* Overflow check */
++ /* hash table size must be power of 2; roundup_pow_of_two() can
++ * overflow into UB on 32-bit arches, so check that first
++ */
++ if (dtab->map.max_entries > 1UL << 31)
+ return -EINVAL;
+- }
+
+- if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
++ dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
++
+ dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
+ dtab->map.numa_node);
+ if (!dtab->dev_index_head)
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index a8c7e1c5abfac5..7c64ad4f3732be 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -155,13 +155,15 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+
+ preempt_disable();
++ local_irq_save(flags);
+ if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
+ __this_cpu_dec(*(htab->map_locked[hash]));
++ local_irq_restore(flags);
+ preempt_enable();
+ return -EBUSY;
+ }
+
+- raw_spin_lock_irqsave(&b->raw_lock, flags);
++ raw_spin_lock(&b->raw_lock);
+ *pflags = flags;
+
+ return 0;
+@@ -172,8 +174,9 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ unsigned long flags)
+ {
+ hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
+- raw_spin_unlock_irqrestore(&b->raw_lock, flags);
++ raw_spin_unlock(&b->raw_lock);
+ __this_cpu_dec(*(htab->map_locked[hash]));
++ local_irq_restore(flags);
+ preempt_enable();
+ }
+
+@@ -455,6 +458,9 @@ static int htab_map_alloc_check(union bpf_attr *attr)
+ * kmalloc-able later in htab_map_update_elem()
+ */
+ return -E2BIG;
++ /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
++ if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
++ return -E2BIG;
+
+ return 0;
+ }
+@@ -495,7 +501,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+ num_possible_cpus());
+ }
+
+- /* hash table size must be power of 2 */
++ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
++ * into UB on 32-bit arches, so check that first
++ */
++ err = -E2BIG;
++ if (htab->map.max_entries > 1UL << 31)
++ goto free_htab;
++
+ htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
+
+ htab->elem_size = sizeof(struct htab_elem) +
+@@ -505,10 +517,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+ else
+ htab->elem_size += round_up(htab->map.value_size, 8);
+
+- err = -E2BIG;
+- /* prevent zero size kmalloc and check for u32 overflow */
+- if (htab->n_buckets == 0 ||
+- htab->n_buckets > U32_MAX / sizeof(struct bucket))
++ /* check for u32 overflow */
++ if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
+ goto free_htab;
+
+ err = bpf_map_init_elem_count(&htab->map);
+@@ -894,7 +904,7 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
+
+ if (map->ops->map_fd_put_ptr) {
+ ptr = fd_htab_map_get_ptr(map, l);
+- map->ops->map_fd_put_ptr(ptr);
++ map->ops->map_fd_put_ptr(map, ptr, true);
+ }
+ }
+
+@@ -2481,7 +2491,7 @@ static void fd_htab_map_free(struct bpf_map *map)
+ hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
+ void *ptr = fd_htab_map_get_ptr(map, l);
+
+- map->ops->map_fd_put_ptr(ptr);
++ map->ops->map_fd_put_ptr(map, ptr, false);
+ }
+ }
+
+@@ -2522,7 +2532,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
+
+ ret = htab_map_update_elem(map, key, &ptr, map_flags);
+ if (ret)
+- map->ops->map_fd_put_ptr(ptr);
++ map->ops->map_fd_put_ptr(map, ptr, false);
+
+ return ret;
+ }
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 8bd3812fb8df44..3dba5bb294d8e4 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -31,12 +31,13 @@
+ *
+ * Different map implementations will rely on rcu in map methods
+ * lookup/update/delete, therefore eBPF programs must run under rcu lock
+- * if program is allowed to access maps, so check rcu_read_lock_held in
+- * all three functions.
++ * if program is allowed to access maps, so check rcu_read_lock_held() or
++ * rcu_read_lock_trace_held() in all three functions.
+ */
+ BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
+ {
+- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
++ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
++ !rcu_read_lock_bh_held());
+ return (unsigned long) map->ops->map_lookup_elem(map, key);
+ }
+
+@@ -52,7 +53,8 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
+ BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
+ void *, value, u64, flags)
+ {
+- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
++ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
++ !rcu_read_lock_bh_held());
+ return map->ops->map_update_elem(map, key, value, flags);
+ }
+
+@@ -69,7 +71,8 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
+
+ BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
+ {
+- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
++ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
++ !rcu_read_lock_bh_held());
+ return map->ops->map_delete_elem(map, key);
+ }
+
+@@ -330,7 +333,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
+ __this_cpu_write(irqsave_flags, flags);
+ }
+
+-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
++NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+ {
+ __bpf_spin_lock_irqsave(lock);
+ return 0;
+@@ -353,7 +356,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
+ local_irq_restore(flags);
+ }
+
+-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
++NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+ {
+ __bpf_spin_unlock_irqrestore(lock);
+ return 0;
+@@ -513,11 +516,12 @@ static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
+ }
+
+ BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
+- long *, res)
++ s64 *, res)
+ {
+ long long _res;
+ int err;
+
++ *res = 0;
+ err = __bpf_strtoll(buf, buf_len, flags, &_res);
+ if (err < 0)
+ return err;
+@@ -534,16 +538,18 @@ const struct bpf_func_proto bpf_strtol_proto = {
+ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_ANYTHING,
+- .arg4_type = ARG_PTR_TO_LONG,
++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg4_size = sizeof(s64),
+ };
+
+ BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
+- unsigned long *, res)
++ u64 *, res)
+ {
+ unsigned long long _res;
+ bool is_negative;
+ int err;
+
++ *res = 0;
+ err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
+ if (err < 0)
+ return err;
+@@ -562,7 +568,8 @@ const struct bpf_func_proto bpf_strtoul_proto = {
+ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_ANYTHING,
+- .arg4_type = ARG_PTR_TO_LONG,
++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg4_size = sizeof(u64),
+ };
+
+ BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
+@@ -1075,11 +1082,20 @@ const struct bpf_func_proto bpf_snprintf_proto = {
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
+ };
+
++struct bpf_async_cb {
++ struct bpf_map *map;
++ struct bpf_prog *prog;
++ void __rcu *callback_fn;
++ void *value;
++ struct rcu_head rcu;
++ u64 flags;
++};
++
+ /* BPF map elements can contain 'struct bpf_timer'.
+ * Such map owns all of its BPF timers.
+ * 'struct bpf_timer' is allocated as part of map element allocation
+ * and it's zero initialized.
+- * That space is used to keep 'struct bpf_timer_kern'.
++ * That space is used to keep 'struct bpf_async_kern'.
+ * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
+ * remembers 'struct bpf_map *' pointer it's part of.
+ * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
+@@ -1092,16 +1108,17 @@ const struct bpf_func_proto bpf_snprintf_proto = {
+ * freeing the timers when inner map is replaced or deleted by user space.
+ */
+ struct bpf_hrtimer {
++ struct bpf_async_cb cb;
+ struct hrtimer timer;
+- struct bpf_map *map;
+- struct bpf_prog *prog;
+- void __rcu *callback_fn;
+- void *value;
++ atomic_t cancelling;
+ };
+
+ /* the actual struct hidden inside uapi struct bpf_timer */
+-struct bpf_timer_kern {
+- struct bpf_hrtimer *timer;
++struct bpf_async_kern {
++ union {
++ struct bpf_async_cb *cb;
++ struct bpf_hrtimer *timer;
++ };
+ /* bpf_spin_lock is used here instead of spinlock_t to make
+ * sure that it always fits into space reserved by struct bpf_timer
+ * regardless of LOCKDEP and spinlock debug flags.
+@@ -1109,19 +1126,23 @@ struct bpf_timer_kern {
+ struct bpf_spin_lock lock;
+ } __attribute__((aligned(8)));
+
++enum bpf_async_type {
++ BPF_ASYNC_TYPE_TIMER = 0,
++};
++
+ static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
+
+ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
+ {
+ struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
+- struct bpf_map *map = t->map;
+- void *value = t->value;
++ struct bpf_map *map = t->cb.map;
++ void *value = t->cb.value;
+ bpf_callback_t callback_fn;
+ void *key;
+ u32 idx;
+
+ BTF_TYPE_EMIT(struct bpf_timer);
+- callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
++ callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
+ if (!callback_fn)
+ goto out;
+
+@@ -1150,57 +1171,93 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
+ return HRTIMER_NORESTART;
+ }
+
+-BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
+- u64, flags)
++static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
++ enum bpf_async_type type)
+ {
+- clockid_t clockid = flags & (MAX_CLOCKS - 1);
++ struct bpf_async_cb *cb;
+ struct bpf_hrtimer *t;
++ clockid_t clockid;
++ size_t size;
+ int ret = 0;
+
+- BUILD_BUG_ON(MAX_CLOCKS != 16);
+- BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
+- BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
+-
+ if (in_nmi())
+ return -EOPNOTSUPP;
+
+- if (flags >= MAX_CLOCKS ||
+- /* similar to timerfd except _ALARM variants are not supported */
+- (clockid != CLOCK_MONOTONIC &&
+- clockid != CLOCK_REALTIME &&
+- clockid != CLOCK_BOOTTIME))
++ switch (type) {
++ case BPF_ASYNC_TYPE_TIMER:
++ size = sizeof(struct bpf_hrtimer);
++ break;
++ default:
+ return -EINVAL;
+- __bpf_spin_lock_irqsave(&timer->lock);
+- t = timer->timer;
++ }
++
++ __bpf_spin_lock_irqsave(&async->lock);
++ t = async->timer;
+ if (t) {
+ ret = -EBUSY;
+ goto out;
+ }
++
++ /* allocate hrtimer via map_kmalloc to use memcg accounting */
++ cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
++ if (!cb) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ if (type == BPF_ASYNC_TYPE_TIMER) {
++ clockid = flags & (MAX_CLOCKS - 1);
++ t = (struct bpf_hrtimer *)cb;
++
++ atomic_set(&t->cancelling, 0);
++ hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
++ t->timer.function = bpf_timer_cb;
++ cb->value = (void *)async - map->record->timer_off;
++ }
++ cb->map = map;
++ cb->prog = NULL;
++ cb->flags = flags;
++ rcu_assign_pointer(cb->callback_fn, NULL);
++
++ WRITE_ONCE(async->cb, cb);
++ /* Guarantee the order between async->cb and map->usercnt. So
++ * when there are concurrent uref release and bpf timer init, either
++ * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
++ * timer or atomic64_read() below returns a zero usercnt.
++ */
++ smp_mb();
+ if (!atomic64_read(&map->usercnt)) {
+ /* maps with timers must be either held by user space
+ * or pinned in bpffs.
+ */
++ WRITE_ONCE(async->cb, NULL);
++ kfree(cb);
+ ret = -EPERM;
+- goto out;
+- }
+- /* allocate hrtimer via map_kmalloc to use memcg accounting */
+- t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
+- if (!t) {
+- ret = -ENOMEM;
+- goto out;
+ }
+- t->value = (void *)timer - map->record->timer_off;
+- t->map = map;
+- t->prog = NULL;
+- rcu_assign_pointer(t->callback_fn, NULL);
+- hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+- t->timer.function = bpf_timer_cb;
+- timer->timer = t;
+ out:
+- __bpf_spin_unlock_irqrestore(&timer->lock);
++ __bpf_spin_unlock_irqrestore(&async->lock);
+ return ret;
+ }
+
++BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
++ u64, flags)
++{
++ clock_t clockid = flags & (MAX_CLOCKS - 1);
++
++ BUILD_BUG_ON(MAX_CLOCKS != 16);
++ BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
++ BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
++
++ if (flags >= MAX_CLOCKS ||
++ /* similar to timerfd except _ALARM variants are not supported */
++ (clockid != CLOCK_MONOTONIC &&
++ clockid != CLOCK_REALTIME &&
++ clockid != CLOCK_BOOTTIME))
++ return -EINVAL;
++
++ return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
++}
++
+ static const struct bpf_func_proto bpf_timer_init_proto = {
+ .func = bpf_timer_init,
+ .gpl_only = true,
+@@ -1210,7 +1267,7 @@ static const struct bpf_func_proto bpf_timer_init_proto = {
+ .arg3_type = ARG_ANYTHING,
+ };
+
+-BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
++BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
+ struct bpf_prog_aux *, aux)
+ {
+ struct bpf_prog *prev, *prog = aux->prog;
+@@ -1225,7 +1282,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb
+ ret = -EINVAL;
+ goto out;
+ }
+- if (!atomic64_read(&t->map->usercnt)) {
++ if (!atomic64_read(&t->cb.map->usercnt)) {
+ /* maps with timers must be either held by user space
+ * or pinned in bpffs. Otherwise timer might still be
+ * running even when bpf prog is detached and user space
+@@ -1234,7 +1291,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb
+ ret = -EPERM;
+ goto out;
+ }
+- prev = t->prog;
++ prev = t->cb.prog;
+ if (prev != prog) {
+ /* Bump prog refcnt once. Every bpf_timer_set_callback()
+ * can pick different callback_fn-s within the same prog.
+@@ -1247,9 +1304,9 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb
+ if (prev)
+ /* Drop prev prog refcnt when swapping with new prog */
+ bpf_prog_put(prev);
+- t->prog = prog;
++ t->cb.prog = prog;
+ }
+- rcu_assign_pointer(t->callback_fn, callback_fn);
++ rcu_assign_pointer(t->cb.callback_fn, callback_fn);
+ out:
+ __bpf_spin_unlock_irqrestore(&timer->lock);
+ return ret;
+@@ -1263,7 +1320,7 @@ static const struct bpf_func_proto bpf_timer_set_callback_proto = {
+ .arg2_type = ARG_PTR_TO_FUNC,
+ };
+
+-BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
++BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
+ {
+ struct bpf_hrtimer *t;
+ int ret = 0;
+@@ -1275,7 +1332,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla
+ return -EINVAL;
+ __bpf_spin_lock_irqsave(&timer->lock);
+ t = timer->timer;
+- if (!t || !t->prog) {
++ if (!t || !t->cb.prog) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1300,45 +1357,77 @@ static const struct bpf_func_proto bpf_timer_start_proto = {
+ .arg3_type = ARG_ANYTHING,
+ };
+
+-static void drop_prog_refcnt(struct bpf_hrtimer *t)
++static void drop_prog_refcnt(struct bpf_async_cb *async)
+ {
+- struct bpf_prog *prog = t->prog;
++ struct bpf_prog *prog = async->prog;
+
+ if (prog) {
+ bpf_prog_put(prog);
+- t->prog = NULL;
+- rcu_assign_pointer(t->callback_fn, NULL);
++ async->prog = NULL;
++ rcu_assign_pointer(async->callback_fn, NULL);
+ }
+ }
+
+-BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
++BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
+ {
+- struct bpf_hrtimer *t;
++ struct bpf_hrtimer *t, *cur_t;
++ bool inc = false;
+ int ret = 0;
+
+ if (in_nmi())
+ return -EOPNOTSUPP;
++ rcu_read_lock();
+ __bpf_spin_lock_irqsave(&timer->lock);
+ t = timer->timer;
+ if (!t) {
+ ret = -EINVAL;
+ goto out;
+ }
+- if (this_cpu_read(hrtimer_running) == t) {
++
++ cur_t = this_cpu_read(hrtimer_running);
++ if (cur_t == t) {
+ /* If bpf callback_fn is trying to bpf_timer_cancel()
+ * its own timer the hrtimer_cancel() will deadlock
+- * since it waits for callback_fn to finish
++ * since it waits for callback_fn to finish.
+ */
+ ret = -EDEADLK;
+ goto out;
+ }
+- drop_prog_refcnt(t);
++
++ /* Only account in-flight cancellations when invoked from a timer
++ * callback, since we want to avoid waiting only if other _callbacks_
++ * are waiting on us, to avoid introducing lockups. Non-callback paths
++ * are ok, since nobody would synchronously wait for their completion.
++ */
++ if (!cur_t)
++ goto drop;
++ atomic_inc(&t->cancelling);
++ /* Need full barrier after relaxed atomic_inc */
++ smp_mb__after_atomic();
++ inc = true;
++ if (atomic_read(&cur_t->cancelling)) {
++ /* We're cancelling timer t, while some other timer callback is
++ * attempting to cancel us. In such a case, it might be possible
++ * that timer t belongs to the other callback, or some other
++ * callback waiting upon it (creating transitive dependencies
++ * upon us), and we will enter a deadlock if we continue
++ * cancelling and waiting for it synchronously, since it might
++ * do the same. Bail!
++ */
++ ret = -EDEADLK;
++ goto out;
++ }
++drop:
++ drop_prog_refcnt(&t->cb);
+ out:
+ __bpf_spin_unlock_irqrestore(&timer->lock);
+ /* Cancel the timer and wait for associated callback to finish
+ * if it was running.
+ */
+ ret = ret ?: hrtimer_cancel(&t->timer);
++ if (inc)
++ atomic_dec(&t->cancelling);
++ rcu_read_unlock();
+ return ret;
+ }
+
+@@ -1354,7 +1443,7 @@ static const struct bpf_func_proto bpf_timer_cancel_proto = {
+ */
+ void bpf_timer_cancel_and_free(void *val)
+ {
+- struct bpf_timer_kern *timer = val;
++ struct bpf_async_kern *timer = val;
+ struct bpf_hrtimer *t;
+
+ /* Performance optimization: read timer->timer without lock first. */
+@@ -1366,11 +1455,11 @@ void bpf_timer_cancel_and_free(void *val)
+ t = timer->timer;
+ if (!t)
+ goto out;
+- drop_prog_refcnt(t);
++ drop_prog_refcnt(&t->cb);
+ /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
+ * this timer, since it won't be initialized.
+ */
+- timer->timer = NULL;
++ WRITE_ONCE(timer->timer, NULL);
+ out:
+ __bpf_spin_unlock_irqrestore(&timer->lock);
+ if (!t)
+@@ -1393,7 +1482,7 @@ void bpf_timer_cancel_and_free(void *val)
+ */
+ if (this_cpu_read(hrtimer_running) != t)
+ hrtimer_cancel(&t->timer);
+- kfree(t);
++ kfree_rcu(t, cb.rcu);
+ }
+
+ BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
+@@ -2197,7 +2286,12 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
+ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
+ struct cgroup *ancestor)
+ {
+- return task_under_cgroup_hierarchy(task, ancestor);
++ long ret;
++
++ rcu_read_lock();
++ ret = task_under_cgroup_hierarchy(task, ancestor);
++ rcu_read_unlock();
++ return ret;
+ }
+ #endif /* CONFIG_CGROUPS */
+
+diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
+index 17c7e7782a1f7f..d0febf07051edf 100644
+--- a/kernel/bpf/lpm_trie.c
++++ b/kernel/bpf/lpm_trie.c
+@@ -164,13 +164,13 @@ static inline int extract_bit(const u8 *data, size_t index)
+ */
+ static size_t longest_prefix_match(const struct lpm_trie *trie,
+ const struct lpm_trie_node *node,
+- const struct bpf_lpm_trie_key *key)
++ const struct bpf_lpm_trie_key_u8 *key)
+ {
+ u32 limit = min(node->prefixlen, key->prefixlen);
+ u32 prefixlen = 0, i = 0;
+
+ BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32));
+- BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key, data) % sizeof(u32));
++ BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key_u8, data) % sizeof(u32));
+
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(CONFIG_64BIT)
+
+@@ -229,7 +229,10 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
+ {
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ struct lpm_trie_node *node, *found = NULL;
+- struct bpf_lpm_trie_key *key = _key;
++ struct bpf_lpm_trie_key_u8 *key = _key;
++
++ if (key->prefixlen > trie->max_prefixlen)
++ return NULL;
+
+ /* Start walking the trie from the root node ... */
+
+@@ -305,8 +308,9 @@ static long trie_update_elem(struct bpf_map *map,
+ {
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
++ struct lpm_trie_node *free_node = NULL;
+ struct lpm_trie_node __rcu **slot;
+- struct bpf_lpm_trie_key *key = _key;
++ struct bpf_lpm_trie_key_u8 *key = _key;
+ unsigned long irq_flags;
+ unsigned int next_bit;
+ size_t matchlen = 0;
+@@ -379,7 +383,7 @@ static long trie_update_elem(struct bpf_map *map,
+ trie->n_entries--;
+
+ rcu_assign_pointer(*slot, new_node);
+- kfree_rcu(node, rcu);
++ free_node = node;
+
+ goto out;
+ }
+@@ -426,6 +430,7 @@ static long trie_update_elem(struct bpf_map *map,
+ }
+
+ spin_unlock_irqrestore(&trie->lock, irq_flags);
++ kfree_rcu(free_node, rcu);
+
+ return ret;
+ }
+@@ -434,7 +439,8 @@ static long trie_update_elem(struct bpf_map *map,
+ static long trie_delete_elem(struct bpf_map *map, void *_key)
+ {
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+- struct bpf_lpm_trie_key *key = _key;
++ struct lpm_trie_node *free_node = NULL, *free_parent = NULL;
++ struct bpf_lpm_trie_key_u8 *key = _key;
+ struct lpm_trie_node __rcu **trim, **trim2;
+ struct lpm_trie_node *node, *parent;
+ unsigned long irq_flags;
+@@ -503,8 +509,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
+ else
+ rcu_assign_pointer(
+ *trim2, rcu_access_pointer(parent->child[0]));
+- kfree_rcu(parent, rcu);
+- kfree_rcu(node, rcu);
++ free_parent = parent;
++ free_node = node;
+ goto out;
+ }
+
+@@ -518,10 +524,12 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
+ rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1]));
+ else
+ RCU_INIT_POINTER(*trim, NULL);
+- kfree_rcu(node, rcu);
++ free_node = node;
+
+ out:
+ spin_unlock_irqrestore(&trie->lock, irq_flags);
++ kfree_rcu(free_parent, rcu);
++ kfree_rcu(free_node, rcu);
+
+ return ret;
+ }
+@@ -533,7 +541,7 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
+ sizeof(struct lpm_trie_node))
+ #define LPM_VAL_SIZE_MIN 1
+
+-#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key) + (X))
++#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key_u8) + (X))
+ #define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX)
+ #define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
+
+@@ -562,7 +570,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
+ /* copy mandatory map attributes */
+ bpf_map_init_from_attr(&trie->map, attr);
+ trie->data_size = attr->key_size -
+- offsetof(struct bpf_lpm_trie_key, data);
++ offsetof(struct bpf_lpm_trie_key_u8, data);
+ trie->max_prefixlen = trie->data_size * 8;
+
+ spin_lock_init(&trie->lock);
+@@ -613,7 +621,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
+ {
+ struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root;
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+- struct bpf_lpm_trie_key *key = _key, *next_key = _next_key;
++ struct bpf_lpm_trie_key_u8 *key = _key, *next_key = _next_key;
+ struct lpm_trie_node **node_stack = NULL;
+ int err = 0, stack_ptr = -1;
+ unsigned int next_bit;
+@@ -700,7 +708,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
+ }
+ do_copy:
+ next_key->prefixlen = next_node->prefixlen;
+- memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data),
++ memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key_u8, data),
+ next_node->data, trie->data_size);
+ free_stack:
+ kfree(node_stack);
+@@ -712,7 +720,7 @@ static int trie_check_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
+ {
+- /* Keys must have struct bpf_lpm_trie_key embedded. */
++ /* Keys must have struct bpf_lpm_trie_key_u8 embedded. */
+ return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ?
+ -EINVAL : 0;
+ }
+diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
+index cd5eafaba97e22..8ef269e66ba502 100644
+--- a/kernel/bpf/map_in_map.c
++++ b/kernel/bpf/map_in_map.c
+@@ -127,12 +127,21 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
+ return inner_map;
+ }
+
+-void bpf_map_fd_put_ptr(void *ptr)
++void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
+ {
+- /* ptr->ops->map_free() has to go through one
+- * rcu grace period by itself.
++ struct bpf_map *inner_map = ptr;
++
++ /* Defer the freeing of inner map according to the sleepable attribute
++ * of bpf program which owns the outer map, so unnecessary waiting for
++ * RCU tasks trace grace period can be avoided.
+ */
+- bpf_map_put(ptr);
++ if (need_defer) {
++ if (atomic64_read(&map->sleepable_refcnt))
++ WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
++ else
++ WRITE_ONCE(inner_map->free_after_rcu_gp, true);
++ }
++ bpf_map_put(inner_map);
+ }
+
+ u32 bpf_map_fd_sys_lookup_elem(void *ptr)
+diff --git a/kernel/bpf/map_in_map.h b/kernel/bpf/map_in_map.h
+index bcb7534afb3c0d..7d61602354de80 100644
+--- a/kernel/bpf/map_in_map.h
++++ b/kernel/bpf/map_in_map.h
+@@ -13,7 +13,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd);
+ void bpf_map_meta_free(struct bpf_map *map_meta);
+ void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
+ int ufd);
+-void bpf_map_fd_put_ptr(void *ptr);
++void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer);
+ u32 bpf_map_fd_sys_lookup_elem(void *ptr);
+
+ #endif
+diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
+index d93ddac283d401..85f9501ff6e66a 100644
+--- a/kernel/bpf/memalloc.c
++++ b/kernel/bpf/memalloc.c
+@@ -486,31 +486,6 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
+ alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
+ }
+
+-static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
+-{
+- struct llist_node *first;
+- unsigned int obj_size;
+-
+- /* For per-cpu allocator, the size of free objects in free list doesn't
+- * match with unit_size and now there is no way to get the size of
+- * per-cpu pointer saved in free object, so just skip the checking.
+- */
+- if (c->percpu_size)
+- return 0;
+-
+- first = c->free_llist.first;
+- if (!first)
+- return 0;
+-
+- obj_size = ksize(first);
+- if (obj_size != c->unit_size) {
+- WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
+- idx, obj_size, c->unit_size);
+- return -EINVAL;
+- }
+- return 0;
+-}
+-
+ /* When size != 0 bpf_mem_cache for each cpu.
+ * This is typical bpf hash map use case when all elements have equal size.
+ *
+@@ -521,10 +496,12 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
+ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
+ {
+ static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
+- int cpu, i, err, unit_size, percpu_size = 0;
+ struct bpf_mem_caches *cc, __percpu *pcc;
+ struct bpf_mem_cache *c, __percpu *pc;
+ struct obj_cgroup *objcg = NULL;
++ int cpu, i, unit_size, percpu_size = 0;
++
++ ma->percpu = percpu;
+
+ if (size) {
+ pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
+@@ -562,7 +539,6 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
+ pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
+ if (!pcc)
+ return -ENOMEM;
+- err = 0;
+ #ifdef CONFIG_MEMCG_KMEM
+ objcg = get_obj_cgroup_from_current();
+ #endif
+@@ -575,28 +551,12 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
+ c->tgt = c;
+
+ init_refill_work(c);
+- /* Another bpf_mem_cache will be used when allocating
+- * c->unit_size in bpf_mem_alloc(), so doesn't prefill
+- * for the bpf_mem_cache because these free objects will
+- * never be used.
+- */
+- if (i != bpf_mem_cache_idx(c->unit_size))
+- continue;
+ prefill_mem_cache(c, cpu);
+- err = check_obj_size(c, i);
+- if (err)
+- goto out;
+ }
+ }
+
+-out:
+ ma->caches = pcc;
+- /* refill_work is either zeroed or initialized, so it is safe to
+- * call irq_work_sync().
+- */
+- if (err)
+- bpf_mem_alloc_destroy(ma);
+- return err;
++ return 0;
+ }
+
+ static void drain_mem_cache(struct bpf_mem_cache *c)
+@@ -860,7 +820,7 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
+ void *ret;
+
+ if (!size)
+- return ZERO_SIZE_PTR;
++ return NULL;
+
+ idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
+ if (idx < 0)
+@@ -872,13 +832,15 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
+
+ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
+ {
++ struct bpf_mem_cache *c;
+ int idx;
+
+ if (!ptr)
+ return;
+
+- idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
+- if (idx < 0)
++ c = *(void **)(ptr - LLIST_NODE_SZ);
++ idx = bpf_mem_cache_idx(c->unit_size);
++ if (WARN_ON_ONCE(idx < 0))
+ return;
+
+ unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
+@@ -886,13 +848,15 @@ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
+
+ void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
+ {
++ struct bpf_mem_cache *c;
+ int idx;
+
+ if (!ptr)
+ return;
+
+- idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
+- if (idx < 0)
++ c = *(void **)(ptr - LLIST_NODE_SZ);
++ idx = bpf_mem_cache_idx(c->unit_size);
++ if (WARN_ON_ONCE(idx < 0))
+ return;
+
+ unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
+@@ -958,41 +922,11 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
+ memcg = get_memcg(c);
+ old_memcg = set_active_memcg(memcg);
+ ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
++ if (ret)
++ *(struct bpf_mem_cache **)ret = c;
+ set_active_memcg(old_memcg);
+ mem_cgroup_put(memcg);
+ }
+
+ return !ret ? NULL : ret + LLIST_NODE_SZ;
+ }
+-
+-static __init int bpf_mem_cache_adjust_size(void)
+-{
+- unsigned int size;
+-
+- /* Adjusting the indexes in size_index() according to the object_size
+- * of underlying slab cache, so bpf_mem_alloc() will select a
+- * bpf_mem_cache with unit_size equal to the object_size of
+- * the underlying slab cache.
+- *
+- * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
+- * 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
+- */
+- for (size = 192; size >= 8; size -= 8) {
+- unsigned int kmalloc_size, index;
+-
+- kmalloc_size = kmalloc_size_roundup(size);
+- if (kmalloc_size == size)
+- continue;
+-
+- if (kmalloc_size <= 192)
+- index = size_index[(kmalloc_size - 1) / 8];
+- else
+- index = fls(kmalloc_size - 1) - 1;
+- /* Only overwrite if necessary */
+- if (size_index[(size - 1) / 8] != index)
+- size_index[(size - 1) / 8] = index;
+- }
+-
+- return 0;
+-}
+-subsys_initcall(bpf_mem_cache_adjust_size);
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index f045fde632e5f1..238d9b206bbdec 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -51,7 +51,8 @@ struct bpf_ringbuf {
+ * This prevents a user-space application from modifying the
+ * position and ruining in-kernel tracking. The permissions of the
+ * pages depend on who is producing samples: user-space or the
+- * kernel.
++ * kernel. Note that the pending counter is placed in the same
++ * page as the producer, so that it shares the same cache line.
+ *
+ * Kernel-producer
+ * ---------------
+@@ -70,6 +71,7 @@ struct bpf_ringbuf {
+ */
+ unsigned long consumer_pos __aligned(PAGE_SIZE);
+ unsigned long producer_pos __aligned(PAGE_SIZE);
++ unsigned long pending_pos;
+ char data[] __aligned(PAGE_SIZE);
+ };
+
+@@ -179,6 +181,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
+ rb->mask = data_sz - 1;
+ rb->consumer_pos = 0;
+ rb->producer_pos = 0;
++ rb->pending_pos = 0;
+
+ return rb;
+ }
+@@ -404,9 +407,9 @@ bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
+
+ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ {
+- unsigned long cons_pos, prod_pos, new_prod_pos, flags;
+- u32 len, pg_off;
++ unsigned long cons_pos, prod_pos, new_prod_pos, pend_pos, flags;
+ struct bpf_ringbuf_hdr *hdr;
++ u32 len, pg_off, tmp_size, hdr_len;
+
+ if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
+ return NULL;
+@@ -424,13 +427,29 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ spin_lock_irqsave(&rb->spinlock, flags);
+ }
+
++ pend_pos = rb->pending_pos;
+ prod_pos = rb->producer_pos;
+ new_prod_pos = prod_pos + len;
+
+- /* check for out of ringbuf space by ensuring producer position
+- * doesn't advance more than (ringbuf_size - 1) ahead
++ while (pend_pos < prod_pos) {
++ hdr = (void *)rb->data + (pend_pos & rb->mask);
++ hdr_len = READ_ONCE(hdr->len);
++ if (hdr_len & BPF_RINGBUF_BUSY_BIT)
++ break;
++ tmp_size = hdr_len & ~BPF_RINGBUF_DISCARD_BIT;
++ tmp_size = round_up(tmp_size + BPF_RINGBUF_HDR_SZ, 8);
++ pend_pos += tmp_size;
++ }
++ rb->pending_pos = pend_pos;
++
++ /* check for out of ringbuf space:
++ * - by ensuring producer position doesn't advance more than
++ * (ringbuf_size - 1) ahead
++ * - by ensuring oldest not yet committed record until newest
++ * record does not span more than (ringbuf_size - 1)
+ */
+- if (new_prod_pos - cons_pos > rb->mask) {
++ if (new_prod_pos - cons_pos > rb->mask ||
++ new_prod_pos - pend_pos > rb->mask) {
+ spin_unlock_irqrestore(&rb->spinlock, flags);
+ return NULL;
+ }
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index 458bb80b14d574..a330f38ae7335e 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -91,11 +91,14 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
+ } else if (value_size / 8 > sysctl_perf_event_max_stack)
+ return ERR_PTR(-EINVAL);
+
+- /* hash table size must be power of 2 */
+- n_buckets = roundup_pow_of_two(attr->max_entries);
+- if (!n_buckets)
++ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
++ * into UB on 32-bit arches, so check that first
++ */
++ if (attr->max_entries > 1UL << 31)
+ return ERR_PTR(-E2BIG);
+
++ n_buckets = roundup_pow_of_two(attr->max_entries);
++
+ cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
+ smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
+ if (!smap)
+@@ -388,6 +391,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ {
+ u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
+ bool user_build_id = flags & BPF_F_USER_BUILD_ID;
++ bool crosstask = task && task != current;
+ u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
+ bool user = flags & BPF_F_USER_STACK;
+ struct perf_callchain_entry *trace;
+@@ -410,6 +414,14 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ if (task && user && !user_mode(regs))
+ goto err_fault;
+
++ /* get_perf_callchain does not support crosstask user stack walking
++ * but returns an empty stack instead of NULL.
++ */
++ if (crosstask && user) {
++ err = -EOPNOTSUPP;
++ goto clear;
++ }
++
+ num_elem = size / elem_size;
+ max_depth = num_elem + skip;
+ if (sysctl_perf_event_max_stack < max_depth)
+@@ -421,7 +433,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ trace = get_callchain_entry_for_task(task, max_depth);
+ else
+ trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
+- false, false);
++ crosstask, false);
+ if (unlikely(!trace))
+ goto err_fault;
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index d77b2f8b93641b..b1933d074f0519 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -692,6 +692,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
+ {
+ struct bpf_map *map = container_of(work, struct bpf_map, work);
+ struct btf_record *rec = map->record;
++ struct btf *btf = map->btf;
+
+ security_bpf_map_free(map);
+ bpf_map_release_memcg(map);
+@@ -707,6 +708,10 @@ static void bpf_map_free_deferred(struct work_struct *work)
+ * template bpf_map struct used during verification.
+ */
+ btf_record_free(rec);
++ /* Delay freeing of btf for maps, as map_free callback may need
++ * struct_meta info which will be freed with btf_put().
++ */
++ btf_put(btf);
+ }
+
+ static void bpf_map_put_uref(struct bpf_map *map)
+@@ -717,6 +722,28 @@ static void bpf_map_put_uref(struct bpf_map *map)
+ }
+ }
+
++static void bpf_map_free_in_work(struct bpf_map *map)
++{
++ INIT_WORK(&map->work, bpf_map_free_deferred);
++ /* Avoid spawning kworkers, since they all might contend
++ * for the same mutex like slab_mutex.
++ */
++ queue_work(system_unbound_wq, &map->work);
++}
++
++static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
++{
++ bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
++}
++
++static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
++{
++ if (rcu_trace_implies_rcu_gp())
++ bpf_map_free_rcu_gp(rcu);
++ else
++ call_rcu(rcu, bpf_map_free_rcu_gp);
++}
++
+ /* decrement map refcnt and schedule it for freeing via workqueue
+ * (underlying map implementation ops->map_free() might sleep)
+ */
+@@ -725,12 +752,14 @@ void bpf_map_put(struct bpf_map *map)
+ if (atomic64_dec_and_test(&map->refcnt)) {
+ /* bpf_map_free_id() must be called first */
+ bpf_map_free_id(map);
+- btf_put(map->btf);
+- INIT_WORK(&map->work, bpf_map_free_deferred);
+- /* Avoid spawning kworkers, since they all might contend
+- * for the same mutex like slab_mutex.
+- */
+- queue_work(system_unbound_wq, &map->work);
++
++ WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
++ if (READ_ONCE(map->free_after_mult_rcu_gp))
++ call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
++ else if (READ_ONCE(map->free_after_rcu_gp))
++ call_rcu(&map->rcu, bpf_map_free_rcu_gp);
++ else
++ bpf_map_free_in_work(map);
+ }
+ }
+ EXPORT_SYMBOL_GPL(bpf_map_put);
+@@ -1673,6 +1702,9 @@ int generic_map_delete_batch(struct bpf_map *map,
+ if (!max_count)
+ return 0;
+
++ if (put_user(0, &uattr->batch.count))
++ return -EFAULT;
++
+ key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
+ if (!key)
+ return -ENOMEM;
+@@ -1730,6 +1762,9 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
+ if (!max_count)
+ return 0;
+
++ if (put_user(0, &uattr->batch.count))
++ return -EFAULT;
++
+ key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
+ if (!key)
+ return -ENOMEM;
+@@ -2795,6 +2830,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
+ void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog)
+ {
++ WARN_ON(ops->dealloc && ops->dealloc_deferred);
+ atomic64_set(&link->refcnt, 1);
+ link->type = type;
+ link->id = 0;
+@@ -2834,17 +2870,46 @@ void bpf_link_inc(struct bpf_link *link)
+ atomic64_inc(&link->refcnt);
+ }
+
++static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
++{
++ struct bpf_link *link = container_of(rcu, struct bpf_link, rcu);
++
++ /* free bpf_link and its containing memory */
++ link->ops->dealloc_deferred(link);
++}
++
++static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
++{
++ if (rcu_trace_implies_rcu_gp())
++ bpf_link_defer_dealloc_rcu_gp(rcu);
++ else
++ call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
++}
++
+ /* bpf_link_free is guaranteed to be called from process context */
+ static void bpf_link_free(struct bpf_link *link)
+ {
++ const struct bpf_link_ops *ops = link->ops;
++ bool sleepable = false;
++
+ bpf_link_free_id(link->id);
+ if (link->prog) {
++ sleepable = link->prog->aux->sleepable;
+ /* detach BPF program, clean up used resources */
+- link->ops->release(link);
++ ops->release(link);
+ bpf_prog_put(link->prog);
+ }
+- /* free bpf_link and its containing memory */
+- link->ops->dealloc(link);
++ if (ops->dealloc_deferred) {
++ /* schedule BPF link deallocation; if underlying BPF program
++ * is sleepable, we need to first wait for RCU tasks trace
++ * sync, then go through "classic" RCU grace period
++ */
++ if (sleepable)
++ call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
++ else
++ call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
++ } else if (ops->dealloc)
++ ops->dealloc(link);
+ }
+
+ static void bpf_link_put_deferred(struct work_struct *work)
+@@ -3171,6 +3236,10 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ *
+ * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
+ * was detached and is going for re-attachment.
++ *
++ * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
++ * are NULL, then program was already attached and user did not provide
++ * tgt_prog_fd so we have no way to find out or create trampoline
+ */
+ if (!prog->aux->dst_trampoline && !tgt_prog) {
+ /*
+@@ -3184,6 +3253,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ err = -EINVAL;
+ goto out_unlock;
+ }
++ /* We can allow re-attach only if we have valid attach_btf. */
++ if (!prog->aux->attach_btf) {
++ err = -EINVAL;
++ goto out_unlock;
++ }
+ btf_id = prog->aux->attach_btf_id;
+ key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
+ }
+@@ -3340,7 +3414,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
+
+ static const struct bpf_link_ops bpf_raw_tp_link_lops = {
+ .release = bpf_raw_tp_link_release,
+- .dealloc = bpf_raw_tp_link_dealloc,
++ .dealloc_deferred = bpf_raw_tp_link_dealloc,
+ .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
+ .fill_link_info = bpf_raw_tp_link_fill_link_info,
+ };
+@@ -3739,6 +3813,11 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
+ * check permissions at attach time.
+ */
+ return -EPERM;
++
++ ptype = attach_type_to_prog_type(attach_type);
++ if (prog->type != ptype)
++ return -EINVAL;
++
+ return prog->enforce_expected_attach_type &&
+ prog->expected_attach_type != attach_type ?
+ -EINVAL : 0;
+@@ -5283,6 +5362,11 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
+ goto out_unlock;
+ }
+
++ /* The bpf program will not access the bpf map, but for the sake of
++ * simplicity, increase sleepable_refcnt for sleepable program as well.
++ */
++ if (prog->aux->sleepable)
++ atomic64_inc(&map->sleepable_refcnt);
+ memcpy(used_maps_new, used_maps_old,
+ sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
+ used_maps_new[prog->aux->used_map_cnt] = map;
+@@ -5563,6 +5647,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
+
+ BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
+ {
++ *res = 0;
+ if (flags)
+ return -EINVAL;
+
+@@ -5583,7 +5668,8 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
+ .arg1_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+- .arg4_type = ARG_PTR_TO_LONG,
++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg4_size = sizeof(u64),
+ };
+
+ static const struct bpf_func_proto *
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 53ff50cac61eaa..e97aeda3a86b55 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
+ goto out;
+ }
+
+- /* clear all bits except SHARE_IPMODIFY */
+- tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
++ /* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
++ tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
+
+ if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
+ tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 873ade146f3deb..3032a464d31bbf 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -542,12 +542,11 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
+ return func_id == BPF_FUNC_dynptr_data;
+ }
+
+-static bool is_callback_calling_kfunc(u32 btf_id);
++static bool is_sync_callback_calling_kfunc(u32 btf_id);
+
+-static bool is_callback_calling_function(enum bpf_func_id func_id)
++static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
+ {
+ return func_id == BPF_FUNC_for_each_map_elem ||
+- func_id == BPF_FUNC_timer_set_callback ||
+ func_id == BPF_FUNC_find_vma ||
+ func_id == BPF_FUNC_loop ||
+ func_id == BPF_FUNC_user_ringbuf_drain;
+@@ -558,6 +557,18 @@ static bool is_async_callback_calling_function(enum bpf_func_id func_id)
+ return func_id == BPF_FUNC_timer_set_callback;
+ }
+
++static bool is_callback_calling_function(enum bpf_func_id func_id)
++{
++ return is_sync_callback_calling_function(func_id) ||
++ is_async_callback_calling_function(func_id);
++}
++
++static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
++{
++ return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) ||
++ (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm));
++}
++
+ static bool is_storage_get_function(enum bpf_func_id func_id)
+ {
+ return func_id == BPF_FUNC_sk_storage_get ||
+@@ -1515,7 +1526,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
+ if (state->in_async_callback_fn)
+ verbose(env, " async_cb");
+ verbose(env, "\n");
+- mark_verifier_state_clean(env);
++ if (!print_all)
++ mark_verifier_state_clean(env);
+ }
+
+ static inline u32 vlog_alignment(u32 pos)
+@@ -1631,7 +1643,10 @@ static int resize_reference_state(struct bpf_func_state *state, size_t n)
+ return 0;
+ }
+
+-static int grow_stack_state(struct bpf_func_state *state, int size)
++/* Possibly update state->allocated_stack to be at least size bytes. Also
++ * possibly update the function's high-water mark in its bpf_subprog_info.
++ */
++static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size)
+ {
+ size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
+
+@@ -1643,6 +1658,11 @@ static int grow_stack_state(struct bpf_func_state *state, int size)
+ return -ENOMEM;
+
+ state->allocated_stack = size;
++
++ /* update known max for given subprogram */
++ if (env->subprog_info[state->subprogno].stack_depth < size)
++ env->subprog_info[state->subprogno].stack_depth = size;
++
+ return 0;
+ }
+
+@@ -1762,6 +1782,9 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ dst_state->parent = src->parent;
+ dst_state->first_insn_idx = src->first_insn_idx;
+ dst_state->last_insn_idx = src->last_insn_idx;
++ dst_state->dfs_depth = src->dfs_depth;
++ dst_state->callback_unroll_depth = src->callback_unroll_depth;
++ dst_state->used_as_loop_entry = src->used_as_loop_entry;
+ for (i = 0; i <= src->curframe; i++) {
+ dst = dst_state->frame[i];
+ if (!dst) {
+@@ -1777,11 +1800,203 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ return 0;
+ }
+
++static u32 state_htab_size(struct bpf_verifier_env *env)
++{
++ return env->prog->len;
++}
++
++static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx)
++{
++ struct bpf_verifier_state *cur = env->cur_state;
++ struct bpf_func_state *state = cur->frame[cur->curframe];
++
++ return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
++}
++
++static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b)
++{
++ int fr;
++
++ if (a->curframe != b->curframe)
++ return false;
++
++ for (fr = a->curframe; fr >= 0; fr--)
++ if (a->frame[fr]->callsite != b->frame[fr]->callsite)
++ return false;
++
++ return true;
++}
++
++/* Open coded iterators allow back-edges in the state graph in order to
++ * check unbounded loops that iterators.
++ *
++ * In is_state_visited() it is necessary to know if explored states are
++ * part of some loops in order to decide whether non-exact states
++ * comparison could be used:
++ * - non-exact states comparison establishes sub-state relation and uses
++ * read and precision marks to do so, these marks are propagated from
++ * children states and thus are not guaranteed to be final in a loop;
++ * - exact states comparison just checks if current and explored states
++ * are identical (and thus form a back-edge).
++ *
++ * Paper "A New Algorithm for Identifying Loops in Decompilation"
++ * by Tao Wei, Jian Mao, Wei Zou and Yu Chen [1] presents a convenient
++ * algorithm for loop structure detection and gives an overview of
++ * relevant terminology. It also has helpful illustrations.
++ *
++ * [1] https://api.semanticscholar.org/CorpusID:15784067
++ *
++ * We use a similar algorithm but because loop nested structure is
++ * irrelevant for verifier ours is significantly simpler and resembles
++ * strongly connected components algorithm from Sedgewick's textbook.
++ *
++ * Define topmost loop entry as a first node of the loop traversed in a
++ * depth first search starting from initial state. The goal of the loop
++ * tracking algorithm is to associate topmost loop entries with states
++ * derived from these entries.
++ *
++ * For each step in the DFS states traversal algorithm needs to identify
++ * the following situations:
++ *
++ * initial initial initial
++ * | | |
++ * V V V
++ * ... ... .---------> hdr
++ * | | | |
++ * V V | V
++ * cur .-> succ | .------...
++ * | | | | | |
++ * V | V | V V
++ * succ '-- cur | ... ...
++ * | | |
++ * | V V
++ * | succ <- cur
++ * | |
++ * | V
++ * | ...
++ * | |
++ * '----'
++ *
++ * (A) successor state of cur (B) successor state of cur or it's entry
++ * not yet traversed are in current DFS path, thus cur and succ
++ * are members of the same outermost loop
++ *
++ * initial initial
++ * | |
++ * V V
++ * ... ...
++ * | |
++ * V V
++ * .------... .------...
++ * | | | |
++ * V V V V
++ * .-> hdr ... ... ...
++ * | | | | |
++ * | V V V V
++ * | succ <- cur succ <- cur
++ * | | |
++ * | V V
++ * | ... ...
++ * | | |
++ * '----' exit
++ *
++ * (C) successor state of cur is a part of some loop but this loop
++ * does not include cur or successor state is not in a loop at all.
++ *
++ * Algorithm could be described as the following python code:
++ *
++ * traversed = set() # Set of traversed nodes
++ * entries = {} # Mapping from node to loop entry
++ * depths = {} # Depth level assigned to graph node
++ * path = set() # Current DFS path
++ *
++ * # Find outermost loop entry known for n
++ * def get_loop_entry(n):
++ * h = entries.get(n, None)
++ * while h in entries and entries[h] != h:
++ * h = entries[h]
++ * return h
++ *
++ * # Update n's loop entry if h's outermost entry comes
++ * # before n's outermost entry in current DFS path.
++ * def update_loop_entry(n, h):
++ * n1 = get_loop_entry(n) or n
++ * h1 = get_loop_entry(h) or h
++ * if h1 in path and depths[h1] <= depths[n1]:
++ * entries[n] = h1
++ *
++ * def dfs(n, depth):
++ * traversed.add(n)
++ * path.add(n)
++ * depths[n] = depth
++ * for succ in G.successors(n):
++ * if succ not in traversed:
++ * # Case A: explore succ and update cur's loop entry
++ * # only if succ's entry is in current DFS path.
++ * dfs(succ, depth + 1)
++ * h = get_loop_entry(succ)
++ * update_loop_entry(n, h)
++ * else:
++ * # Case B or C depending on `h1 in path` check in update_loop_entry().
++ * update_loop_entry(n, succ)
++ * path.remove(n)
++ *
++ * To adapt this algorithm for use with verifier:
++ * - use st->branch == 0 as a signal that DFS of succ had been finished
++ * and cur's loop entry has to be updated (case A), handle this in
++ * update_branch_counts();
++ * - use st->branch > 0 as a signal that st is in the current DFS path;
++ * - handle cases B and C in is_state_visited();
++ * - update topmost loop entry for intermediate states in get_loop_entry().
++ */
++static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_state *st)
++{
++ struct bpf_verifier_state *topmost = st->loop_entry, *old;
++
++ while (topmost && topmost->loop_entry && topmost != topmost->loop_entry)
++ topmost = topmost->loop_entry;
++ /* Update loop entries for intermediate states to avoid this
++ * traversal in future get_loop_entry() calls.
++ */
++ while (st && st->loop_entry != topmost) {
++ old = st->loop_entry;
++ st->loop_entry = topmost;
++ st = old;
++ }
++ return topmost;
++}
++
++static void update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr)
++{
++ struct bpf_verifier_state *cur1, *hdr1;
++
++ cur1 = get_loop_entry(cur) ?: cur;
++ hdr1 = get_loop_entry(hdr) ?: hdr;
++ /* The head1->branches check decides between cases B and C in
++ * comment for get_loop_entry(). If hdr1->branches == 0 then
++ * head's topmost loop entry is not in current DFS path,
++ * hence 'cur' and 'hdr' are not in the same loop and there is
++ * no need to update cur->loop_entry.
++ */
++ if (hdr1->branches && hdr1->dfs_depth <= cur1->dfs_depth) {
++ cur->loop_entry = hdr;
++ hdr->used_as_loop_entry = true;
++ }
++}
++
+ static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
+ {
+ while (st) {
+ u32 br = --st->branches;
+
++ /* br == 0 signals that DFS exploration for 'st' is finished,
++ * thus it is necessary to update parent's loop entry if it
++ * turned out that st is a part of some loop.
++ * This is a part of 'case A' in get_loop_entry() comment.
++ */
++ if (br == 0 && st->parent && st->loop_entry)
++ update_loop_entry(st->parent, st->loop_entry);
++
+ /* WARN_ON(br > 1) technically makes sense here,
+ * but see comment in push_stack(), hence:
+ */
+@@ -2324,6 +2539,8 @@ static void mark_btf_ld_reg(struct bpf_verifier_env *env,
+ regs[regno].type = PTR_TO_BTF_ID | flag;
+ regs[regno].btf = btf;
+ regs[regno].btf_id = btf_id;
++ if (type_may_be_null(flag))
++ regs[regno].id = ++env->id_gen;
+ }
+
+ #define DEF_NOT_SUBREG (0)
+@@ -2847,8 +3064,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
+
+ if (code == (BPF_JMP | BPF_CALL) &&
+ insn[i].src_reg == 0 &&
+- insn[i].imm == BPF_FUNC_tail_call)
++ insn[i].imm == BPF_FUNC_tail_call) {
+ subprog[cur_subprog].has_tail_call = true;
++ subprog[cur_subprog].tail_call_reachable = true;
++ }
+ if (BPF_CLASS(code) == BPF_LD &&
+ (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
+ subprog[cur_subprog].has_ld_abs = true;
+@@ -3118,13 +3337,11 @@ static void mark_insn_zext(struct bpf_verifier_env *env,
+ reg->subreg_def = DEF_NOT_SUBREG;
+ }
+
+-static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
+- enum reg_arg_type t)
++static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno,
++ enum reg_arg_type t)
+ {
+- struct bpf_verifier_state *vstate = env->cur_state;
+- struct bpf_func_state *state = vstate->frame[vstate->curframe];
+ struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
+- struct bpf_reg_state *reg, *regs = state->regs;
++ struct bpf_reg_state *reg;
+ bool rw64;
+
+ if (regno >= MAX_BPF_REG) {
+@@ -3165,6 +3382,15 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
+ return 0;
+ }
+
++static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
++ enum reg_arg_type t)
++{
++ struct bpf_verifier_state *vstate = env->cur_state;
++ struct bpf_func_state *state = vstate->frame[vstate->curframe];
++
++ return __check_reg_arg(env, state->regs, regno, t);
++}
++
+ static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
+ {
+ env->insn_aux_data[idx].jmp_point = true;
+@@ -3200,12 +3426,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
+
+ /* Backtrack one insn at a time. If idx is not at the top of recorded
+ * history then previous instruction came from straight line execution.
++ * Return -ENOENT if we exhausted all instructions within given state.
++ *
++ * It's legal to have a bit of a looping with the same starting and ending
++ * insn index within the same state, e.g.: 3->4->5->3, so just because current
++ * instruction index is the same as state's first_idx doesn't mean we are
++ * done. If there is still some jump history left, we should keep going. We
++ * need to take into account that we might have a jump history between given
++ * state's parent and itself, due to checkpointing. In this case, we'll have
++ * history entry recording a jump from last instruction of parent state and
++ * first instruction of given state.
+ */
+ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
+ u32 *history)
+ {
+ u32 cnt = *history;
+
++ if (i == st->first_insn_idx) {
++ if (cnt == 0)
++ return -ENOENT;
++ if (cnt == 1 && st->jmp_history[0].idx == i)
++ return -ENOENT;
++ }
++
+ if (cnt && st->jmp_history[cnt - 1].idx == i) {
+ i = st->jmp_history[cnt - 1].prev_idx;
+ (*history)--;
+@@ -3386,6 +3629,8 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
+ }
+ }
+
++static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
++
+ /* For given verifier state backtrack_insn() is called from the last insn to
+ * the first insn. Its purpose is to compute a bitmask of registers and
+ * stack slots that needs precision in the parent verifier state.
+@@ -3426,14 +3671,20 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ if (class == BPF_ALU || class == BPF_ALU64) {
+ if (!bt_is_reg_set(bt, dreg))
+ return 0;
+- if (opcode == BPF_MOV) {
++ if (opcode == BPF_END || opcode == BPF_NEG) {
++ /* sreg is reserved and unused
++ * dreg still need precision before this insn
++ */
++ return 0;
++ } else if (opcode == BPF_MOV) {
+ if (BPF_SRC(insn->code) == BPF_X) {
+ /* dreg = sreg or dreg = (s8, s16, s32)sreg
+ * dreg needs precision after this insn
+ * sreg needs precision before this insn
+ */
+ bt_clear_reg(bt, dreg);
+- bt_set_reg(bt, sreg);
++ if (sreg != BPF_REG_FP)
++ bt_set_reg(bt, sreg);
+ } else {
+ /* dreg = K
+ * dreg needs precision after this insn.
+@@ -3449,7 +3700,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ * both dreg and sreg need precision
+ * before this insn
+ */
+- bt_set_reg(bt, sreg);
++ if (sreg != BPF_REG_FP)
++ bt_set_reg(bt, sreg);
+ } /* else dreg += K
+ * dreg still needs precision before this insn
+ */
+@@ -3556,16 +3808,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ return -EFAULT;
+ return 0;
+ }
+- } else if ((bpf_helper_call(insn) &&
+- is_callback_calling_function(insn->imm) &&
+- !is_async_callback_calling_function(insn->imm)) ||
+- (bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) {
+- /* callback-calling helper or kfunc call, which means
+- * we are exiting from subprog, but unlike the subprog
+- * call handling above, we shouldn't propagate
+- * precision of r1-r5 (if any requested), as they are
+- * not actually arguments passed directly to callback
+- * subprogs
++ } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) {
++ /* exit from callback subprog to callback-calling helper or
++ * kfunc call. Use idx/subseq_idx check to discern it from
++ * straight line code backtracking.
++ * Unlike the subprog call handling above, we shouldn't
++ * propagate precision of r1-r5 (if any requested), as they are
++ * not actually arguments passed directly to callback subprogs
+ */
+ if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
+ verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
+@@ -3600,10 +3849,18 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ } else if (opcode == BPF_EXIT) {
+ bool r0_precise;
+
++ /* Backtracking to a nested function call, 'idx' is a part of
++ * the inner frame 'subseq_idx' is a part of the outer frame.
++ * In case of a regular function call, instructions giving
++ * precision to registers R1-R5 should have been found already.
++ * In case of a callback, it is ok to have R1-R5 marked for
++ * backtracking, as these registers are set by the function
++ * invoking callback.
++ */
++ if (subseq_idx >= 0 && calls_callback(env, subseq_idx))
++ for (i = BPF_REG_1; i <= BPF_REG_5; i++)
++ bt_clear_reg(bt, i);
+ if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
+- /* if backtracing was looking for registers R1-R5
+- * they should have been found already.
+- */
+ verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+@@ -4080,10 +4337,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ * Nothing to be tracked further in the parent state.
+ */
+ return 0;
+- if (i == first_idx)
+- break;
+ subseq_idx = i;
+ i = get_prev_insn_idx(st, i, &history);
++ if (i == -ENOENT)
++ break;
+ if (i >= env->prog->len) {
+ /* This can happen if backtracking reached insn 0
+ * and there are still reg_mask or stack_mask
+@@ -4300,14 +4557,11 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg = NULL;
+ u32 dst_reg = insn->dst_reg;
+
+- err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
+- if (err)
+- return err;
+ /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
+ * so it's aligned access and [off, off + size) are within stack limits
+ */
+ if (!env->allow_ptr_leaks &&
+- state->stack[spi].slot_type[0] == STACK_SPILL &&
++ is_spilled_reg(&state->stack[spi]) &&
+ size != BPF_REG_SIZE) {
+ verbose(env, "attempt to corrupt spilled pointer on stack\n");
+ return -EACCES;
+@@ -4358,7 +4612,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ insn->imm != 0 && env->bpf_capable) {
+ struct bpf_reg_state fake_reg = {};
+
+- __mark_reg_known(&fake_reg, (u32)insn->imm);
++ __mark_reg_known(&fake_reg, insn->imm);
+ fake_reg.type = SCALAR_VALUE;
+ save_register_state(state, spi, &fake_reg, size);
+ } else if (reg && is_spillable_regtype(reg->type)) {
+@@ -4458,10 +4712,6 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
+ (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
+ writing_zero = true;
+
+- err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
+- if (err)
+- return err;
+-
+ for (i = min_off; i < max_off; i++) {
+ int spi;
+
+@@ -5127,8 +5377,6 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
+ rcu_safe_kptr(kptr_field) && in_rcu_cs(env) ?
+ PTR_MAYBE_NULL | MEM_RCU :
+ PTR_MAYBE_NULL | PTR_UNTRUSTED);
+- /* For mark_ptr_or_null_reg */
+- val_reg->id = ++env->id_gen;
+ } else if (class == BPF_STX) {
+ val_reg = reg_state(env, value_regno);
+ if (!register_is_null(val_reg) &&
+@@ -5438,7 +5686,8 @@ static bool is_trusted_reg(const struct bpf_reg_state *reg)
+ return true;
+
+ /* Types listed in the reg2btf_ids are always trusted */
+- if (reg2btf_ids[base_type(reg->type)])
++ if (reg2btf_ids[base_type(reg->type)] &&
++ !bpf_type_has_unsafe_modifiers(reg->type))
+ return true;
+
+ /* If a register is not referenced, it is trusted if it has the
+@@ -5576,20 +5825,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
+ strict);
+ }
+
+-static int update_stack_depth(struct bpf_verifier_env *env,
+- const struct bpf_func_state *func,
+- int off)
+-{
+- u16 stack = env->subprog_info[func->subprogno].stack_depth;
+-
+- if (stack >= -off)
+- return 0;
+-
+- /* update known max for given subprogram */
+- env->subprog_info[func->subprogno].stack_depth = -off;
+- return 0;
+-}
+-
+ /* starting from main bpf function walk all instructions of the function
+ * and recursively walk all callees that given function can call.
+ * Ignore jump and exit insns.
+@@ -5926,6 +6161,7 @@ static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
+ }
+ reg->u32_min_value = 0;
+ reg->u32_max_value = U32_MAX;
++ reg->var_off = tnum_subreg(tnum_unknown);
+ }
+
+ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
+@@ -5970,6 +6206,7 @@ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
+ reg->s32_max_value = s32_max;
+ reg->u32_min_value = (u32)s32_min;
+ reg->u32_max_value = (u32)s32_max;
++ reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max));
+ return;
+ }
+
+@@ -6031,6 +6268,7 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
+ #define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu)
+ #define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null)
+ #define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted)
++#define BTF_TYPE_SAFE_TRUSTED_OR_NULL(__type) __PASTE(__type, __safe_trusted_or_null)
+
+ /*
+ * Allow list few fields as RCU trusted or full trusted.
+@@ -6094,7 +6332,7 @@ BTF_TYPE_SAFE_TRUSTED(struct dentry) {
+ struct inode *d_inode;
+ };
+
+-BTF_TYPE_SAFE_TRUSTED(struct socket) {
++BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket) {
+ struct sock *sk;
+ };
+
+@@ -6129,11 +6367,20 @@ static bool type_is_trusted(struct bpf_verifier_env *env,
+ BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
+ BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
+ BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
+- BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket));
+
+ return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted");
+ }
+
++static bool type_is_trusted_or_null(struct bpf_verifier_env *env,
++ struct bpf_reg_state *reg,
++ const char *field_name, u32 btf_id)
++{
++ BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket));
++
++ return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id,
++ "__safe_trusted_or_null");
++}
++
+ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs,
+ int regno, int off, int size,
+@@ -6242,6 +6489,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ */
+ if (type_is_trusted(env, reg, field_name, btf_id)) {
+ flag |= PTR_TRUSTED;
++ } else if (type_is_trusted_or_null(env, reg, field_name, btf_id)) {
++ flag |= PTR_TRUSTED | PTR_MAYBE_NULL;
+ } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) {
+ if (type_is_rcu(env, reg, field_name, btf_id)) {
+ /* ignore __rcu tag and mark it MEM_RCU */
+@@ -6348,13 +6597,14 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
+ * The minimum valid offset is -MAX_BPF_STACK for writes, and
+ * -state->allocated_stack for reads.
+ */
+-static int check_stack_slot_within_bounds(int off,
+- struct bpf_func_state *state,
+- enum bpf_access_type t)
++static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
++ s64 off,
++ struct bpf_func_state *state,
++ enum bpf_access_type t)
+ {
+ int min_valid_off;
+
+- if (t == BPF_WRITE)
++ if (t == BPF_WRITE || env->allow_uninit_stack)
+ min_valid_off = -MAX_BPF_STACK;
+ else
+ min_valid_off = -state->allocated_stack;
+@@ -6377,7 +6627,7 @@ static int check_stack_access_within_bounds(
+ struct bpf_reg_state *regs = cur_regs(env);
+ struct bpf_reg_state *reg = regs + regno;
+ struct bpf_func_state *state = func(env, reg);
+- int min_off, max_off;
++ s64 min_off, max_off;
+ int err;
+ char *err_extra;
+
+@@ -6390,11 +6640,8 @@ static int check_stack_access_within_bounds(
+ err_extra = " write to";
+
+ if (tnum_is_const(reg->var_off)) {
+- min_off = reg->var_off.value + off;
+- if (access_size > 0)
+- max_off = min_off + access_size - 1;
+- else
+- max_off = min_off;
++ min_off = (s64)reg->var_off.value + off;
++ max_off = min_off + access_size;
+ } else {
+ if (reg->smax_value >= BPF_MAX_VAR_OFF ||
+ reg->smin_value <= -BPF_MAX_VAR_OFF) {
+@@ -6403,15 +6650,17 @@ static int check_stack_access_within_bounds(
+ return -EACCES;
+ }
+ min_off = reg->smin_value + off;
+- if (access_size > 0)
+- max_off = reg->smax_value + off + access_size - 1;
+- else
+- max_off = min_off;
++ max_off = reg->smax_value + off + access_size;
+ }
+
+- err = check_stack_slot_within_bounds(min_off, state, type);
+- if (!err)
+- err = check_stack_slot_within_bounds(max_off, state, type);
++ err = check_stack_slot_within_bounds(env, min_off, state, type);
++ if (!err && max_off > 0)
++ err = -EINVAL; /* out of stack access into non-negative offsets */
++ if (!err && access_size < 0)
++ /* access_size should not be negative (or overflow an int); others checks
++ * along the way should have prevented such an access.
++ */
++ err = -EFAULT; /* invalid negative access size; integer overflow? */
+
+ if (err) {
+ if (tnum_is_const(reg->var_off)) {
+@@ -6424,8 +6673,10 @@ static int check_stack_access_within_bounds(
+ verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
+ err_extra, regno, tn_buf, access_size);
+ }
++ return err;
+ }
+- return err;
++
++ return grow_stack_state(env, state, round_up(-min_off, BPF_REG_SIZE));
+ }
+
+ /* check whether memory at (regno + off) is accessible for t = (read | write)
+@@ -6440,7 +6691,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ {
+ struct bpf_reg_state *regs = cur_regs(env);
+ struct bpf_reg_state *reg = regs + regno;
+- struct bpf_func_state *state;
+ int size, err = 0;
+
+ size = bpf_size_to_bytes(bpf_size);
+@@ -6583,11 +6833,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ if (err)
+ return err;
+
+- state = func(env, reg);
+- err = update_stack_depth(env, state, off);
+- if (err)
+- return err;
+-
+ if (t == BPF_READ)
+ err = check_stack_read(env, regno, off, size,
+ value_regno);
+@@ -6782,7 +7027,8 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
+
+ /* When register 'regno' is used to read the stack (either directly or through
+ * a helper function) make sure that it's within stack boundary and, depending
+- * on the access type, that all elements of the stack are initialized.
++ * on the access type and privileges, that all elements of the stack are
++ * initialized.
+ *
+ * 'off' includes 'regno->off', but not its dynamic part (if any).
+ *
+@@ -6890,8 +7136,11 @@ static int check_stack_range_initialized(
+
+ slot = -i - 1;
+ spi = slot / BPF_REG_SIZE;
+- if (state->allocated_stack <= slot)
+- goto err;
++ if (state->allocated_stack <= slot) {
++ verbose(env, "verifier bug: allocated_stack too small");
++ return -EFAULT;
++ }
++
+ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+ if (*stype == STACK_MISC)
+ goto mark;
+@@ -6915,7 +7164,6 @@ static int check_stack_range_initialized(
+ goto mark;
+ }
+
+-err:
+ if (tnum_is_const(reg->var_off)) {
+ verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
+ err_extra, regno, min_off, i - min_off, access_size);
+@@ -6940,7 +7188,7 @@ static int check_stack_range_initialized(
+ * helper may write to the entire memory range.
+ */
+ }
+- return update_stack_depth(env, state, min_off);
++ return 0;
+ }
+
+ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+@@ -7523,6 +7771,90 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
+ return 0;
+ }
+
++/* Look for a previous loop entry at insn_idx: nearest parent state
++ * stopped at insn_idx with callsites matching those in cur->frame.
++ */
++static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env,
++ struct bpf_verifier_state *cur,
++ int insn_idx)
++{
++ struct bpf_verifier_state_list *sl;
++ struct bpf_verifier_state *st;
++
++ /* Explored states are pushed in stack order, most recent states come first */
++ sl = *explored_state(env, insn_idx);
++ for (; sl; sl = sl->next) {
++ /* If st->branches != 0 state is a part of current DFS verification path,
++ * hence cur & st for a loop.
++ */
++ st = &sl->state;
++ if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) &&
++ st->dfs_depth < cur->dfs_depth)
++ return st;
++ }
++
++ return NULL;
++}
++
++static void reset_idmap_scratch(struct bpf_verifier_env *env);
++static bool regs_exact(const struct bpf_reg_state *rold,
++ const struct bpf_reg_state *rcur,
++ struct bpf_idmap *idmap);
++
++static void maybe_widen_reg(struct bpf_verifier_env *env,
++ struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
++ struct bpf_idmap *idmap)
++{
++ if (rold->type != SCALAR_VALUE)
++ return;
++ if (rold->type != rcur->type)
++ return;
++ if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap))
++ return;
++ __mark_reg_unknown(env, rcur);
++}
++
++static int widen_imprecise_scalars(struct bpf_verifier_env *env,
++ struct bpf_verifier_state *old,
++ struct bpf_verifier_state *cur)
++{
++ struct bpf_func_state *fold, *fcur;
++ int i, fr;
++
++ reset_idmap_scratch(env);
++ for (fr = old->curframe; fr >= 0; fr--) {
++ fold = old->frame[fr];
++ fcur = cur->frame[fr];
++
++ for (i = 0; i < MAX_BPF_REG; i++)
++ maybe_widen_reg(env,
++ &fold->regs[i],
++ &fcur->regs[i],
++ &env->idmap_scratch);
++
++ for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) {
++ if (!is_spilled_reg(&fold->stack[i]) ||
++ !is_spilled_reg(&fcur->stack[i]))
++ continue;
++
++ maybe_widen_reg(env,
++ &fold->stack[i].spilled_ptr,
++ &fcur->stack[i].spilled_ptr,
++ &env->idmap_scratch);
++ }
++ }
++ return 0;
++}
++
++static struct bpf_reg_state *get_iter_from_state(struct bpf_verifier_state *cur_st,
++ struct bpf_kfunc_call_arg_meta *meta)
++{
++ int iter_frameno = meta->iter.frameno;
++ int iter_spi = meta->iter.spi;
++
++ return &cur_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
++}
++
+ /* process_iter_next_call() is called when verifier gets to iterator's next
+ * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
+ * to it as just "iter_next()" in comments below.
+@@ -7564,33 +7896,53 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
+ * is some statically known limit on number of iterations (e.g., if there is
+ * an explicit `if n > 100 then break;` statement somewhere in the loop).
+ *
+- * One very subtle but very important aspect is that we *always* simulate NULL
+- * condition first (as the current state) before we simulate non-NULL case.
+- * This has to do with intricacies of scalar precision tracking. By simulating
+- * "exit condition" of iter_next() returning NULL first, we make sure all the
+- * relevant precision marks *that will be set **after** we exit iterator loop*
+- * are propagated backwards to common parent state of NULL and non-NULL
+- * branches. Thanks to that, state equivalence checks done later in forked
+- * state, when reaching iter_next() for ACTIVE iterator, can assume that
+- * precision marks are finalized and won't change. Because simulating another
+- * ACTIVE iterator iteration won't change them (because given same input
+- * states we'll end up with exactly same output states which we are currently
+- * comparing; and verification after the loop already propagated back what
+- * needs to be **additionally** tracked as precise). It's subtle, grok
+- * precision tracking for more intuitive understanding.
++ * Iteration convergence logic in is_state_visited() relies on exact
++ * states comparison, which ignores read and precision marks.
++ * This is necessary because read and precision marks are not finalized
++ * while in the loop. Exact comparison might preclude convergence for
++ * simple programs like below:
++ *
++ * i = 0;
++ * while(iter_next(&it))
++ * i++;
++ *
++ * At each iteration step i++ would produce a new distinct state and
++ * eventually instruction processing limit would be reached.
++ *
++ * To avoid such behavior speculatively forget (widen) range for
++ * imprecise scalar registers, if those registers were not precise at the
++ * end of the previous iteration and do not match exactly.
++ *
++ * This is a conservative heuristic that allows to verify wide range of programs,
++ * however it precludes verification of programs that conjure an
++ * imprecise value on the first loop iteration and use it as precise on a second.
++ * For example, the following safe program would fail to verify:
++ *
++ * struct bpf_num_iter it;
++ * int arr[10];
++ * int i = 0, a = 0;
++ * bpf_iter_num_new(&it, 0, 10);
++ * while (bpf_iter_num_next(&it)) {
++ * if (a == 0) {
++ * a = 1;
++ * i = 7; // Because i changed verifier would forget
++ * // it's range on second loop entry.
++ * } else {
++ * arr[i] = 42; // This would fail to verify.
++ * }
++ * }
++ * bpf_iter_num_destroy(&it);
+ */
+ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
+ struct bpf_kfunc_call_arg_meta *meta)
+ {
+- struct bpf_verifier_state *cur_st = env->cur_state, *queued_st;
++ struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st;
+ struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr;
+ struct bpf_reg_state *cur_iter, *queued_iter;
+- int iter_frameno = meta->iter.frameno;
+- int iter_spi = meta->iter.spi;
+
+ BTF_TYPE_EMIT(struct bpf_iter);
+
+- cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
++ cur_iter = get_iter_from_state(cur_st, meta);
+
+ if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE &&
+ cur_iter->iter.state != BPF_ITER_STATE_DRAINED) {
+@@ -7600,14 +7952,29 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
+ }
+
+ if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) {
++ /* Because iter_next() call is a checkpoint is_state_visitied()
++ * should guarantee parent state with same call sites and insn_idx.
++ */
++ if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx ||
++ !same_callsites(cur_st->parent, cur_st)) {
++ verbose(env, "bug: bad parent state for iter next call");
++ return -EFAULT;
++ }
++ /* Note cur_st->parent in the call below, it is necessary to skip
++ * checkpoint created for cur_st by is_state_visited()
++ * right at this instruction.
++ */
++ prev_st = find_prev_entry(env, cur_st->parent, insn_idx);
+ /* branch out active iter state */
+ queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
+ if (!queued_st)
+ return -ENOMEM;
+
+- queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
++ queued_iter = get_iter_from_state(queued_st, meta);
+ queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
+ queued_iter->iter.depth++;
++ if (prev_st)
++ widen_imprecise_scalars(env, prev_st, queued_st);
+
+ queued_fr = queued_st->frame[queued_st->curframe];
+ mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]);
+@@ -7627,6 +7994,12 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
+ type == ARG_CONST_SIZE_OR_ZERO;
+ }
+
++static bool arg_type_is_raw_mem(enum bpf_arg_type type)
++{
++ return base_type(type) == ARG_PTR_TO_MEM &&
++ type & MEM_UNINIT;
++}
++
+ static bool arg_type_is_release(enum bpf_arg_type type)
+ {
+ return type & OBJ_RELEASE;
+@@ -7637,16 +8010,6 @@ static bool arg_type_is_dynptr(enum bpf_arg_type type)
+ return base_type(type) == ARG_PTR_TO_DYNPTR;
+ }
+
+-static int int_ptr_type_to_size(enum bpf_arg_type type)
+-{
+- if (type == ARG_PTR_TO_INT)
+- return sizeof(u32);
+- else if (type == ARG_PTR_TO_LONG)
+- return sizeof(u64);
+-
+- return -EINVAL;
+-}
+-
+ static int resolve_map_arg_type(struct bpf_verifier_env *env,
+ const struct bpf_call_arg_meta *meta,
+ enum bpf_arg_type *arg_type)
+@@ -7719,16 +8082,6 @@ static const struct bpf_reg_types mem_types = {
+ },
+ };
+
+-static const struct bpf_reg_types int_ptr_types = {
+- .types = {
+- PTR_TO_STACK,
+- PTR_TO_PACKET,
+- PTR_TO_PACKET_META,
+- PTR_TO_MAP_KEY,
+- PTR_TO_MAP_VALUE,
+- },
+-};
+-
+ static const struct bpf_reg_types spin_lock_types = {
+ .types = {
+ PTR_TO_MAP_VALUE,
+@@ -7783,8 +8136,6 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
+ [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
+ [ARG_PTR_TO_MEM] = &mem_types,
+ [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types,
+- [ARG_PTR_TO_INT] = &int_ptr_types,
+- [ARG_PTR_TO_LONG] = &int_ptr_types,
+ [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
+ [ARG_PTR_TO_FUNC] = &func_ptr_types,
+ [ARG_PTR_TO_STACK] = &stack_ptr_types,
+@@ -8291,9 +8642,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ */
+ meta->raw_mode = arg_type & MEM_UNINIT;
+ if (arg_type & MEM_FIXED_SIZE) {
+- err = check_helper_mem_access(env, regno,
+- fn->arg_size[arg], false,
+- meta);
++ err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta);
++ if (err)
++ return err;
++ if (arg_type & MEM_ALIGNED)
++ err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true);
+ }
+ break;
+ case ARG_CONST_SIZE:
+@@ -8318,17 +8671,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ if (err)
+ return err;
+ break;
+- case ARG_PTR_TO_INT:
+- case ARG_PTR_TO_LONG:
+- {
+- int size = int_ptr_type_to_size(arg_type);
+-
+- err = check_helper_mem_access(env, regno, size, false, meta);
+- if (err)
+- return err;
+- err = check_ptr_alignment(env, reg, 0, size, true);
+- break;
+- }
+ case ARG_PTR_TO_CONST_STR:
+ {
+ struct bpf_map *map = reg->map_ptr;
+@@ -8386,7 +8728,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+ enum bpf_attach_type eatype = env->prog->expected_attach_type;
+ enum bpf_prog_type type = resolve_prog_type(env->prog);
+
+- if (func_id != BPF_FUNC_map_update_elem)
++ if (func_id != BPF_FUNC_map_update_elem &&
++ func_id != BPF_FUNC_map_delete_elem)
+ return false;
+
+ /* It's not possible to get access to a locked struct sock in these
+@@ -8397,6 +8740,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+ if (eatype == BPF_TRACE_ITER)
+ return true;
+ break;
++ case BPF_PROG_TYPE_SOCK_OPS:
++ /* map_update allowed only via dedicated helpers with event type checks */
++ if (func_id == BPF_FUNC_map_delete_elem)
++ return true;
++ break;
+ case BPF_PROG_TYPE_SOCKET_FILTER:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+@@ -8492,7 +8840,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+ case BPF_MAP_TYPE_SOCKMAP:
+ if (func_id != BPF_FUNC_sk_redirect_map &&
+ func_id != BPF_FUNC_sock_map_update &&
+- func_id != BPF_FUNC_map_delete_elem &&
+ func_id != BPF_FUNC_msg_redirect_map &&
+ func_id != BPF_FUNC_sk_select_reuseport &&
+ func_id != BPF_FUNC_map_lookup_elem &&
+@@ -8502,7 +8849,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+ case BPF_MAP_TYPE_SOCKHASH:
+ if (func_id != BPF_FUNC_sk_redirect_hash &&
+ func_id != BPF_FUNC_sock_hash_update &&
+- func_id != BPF_FUNC_map_delete_elem &&
+ func_id != BPF_FUNC_msg_redirect_hash &&
+ func_id != BPF_FUNC_sk_select_reuseport &&
+ func_id != BPF_FUNC_map_lookup_elem &&
+@@ -8676,15 +9022,15 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
+ {
+ int count = 0;
+
+- if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
++ if (arg_type_is_raw_mem(fn->arg1_type))
+ count++;
+- if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
++ if (arg_type_is_raw_mem(fn->arg2_type))
+ count++;
+- if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
++ if (arg_type_is_raw_mem(fn->arg3_type))
+ count++;
+- if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
++ if (arg_type_is_raw_mem(fn->arg4_type))
+ count++;
+- if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
++ if (arg_type_is_raw_mem(fn->arg5_type))
+ count++;
+
+ /* We only support one arg being in raw mode at the moment,
+@@ -8837,7 +9183,7 @@ static void clear_caller_saved_regs(struct bpf_verifier_env *env,
+ /* after the call registers r0 - r5 were scratched */
+ for (i = 0; i < CALLER_SAVED_REGS; i++) {
+ mark_reg_not_init(env, regs, caller_saved[i]);
+- check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
++ __check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK);
+ }
+ }
+
+@@ -8850,11 +9196,10 @@ static int set_callee_state(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee, int insn_idx);
+
+-static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+- int *insn_idx, int subprog,
+- set_callee_state_fn set_callee_state_cb)
++static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite,
++ set_callee_state_fn set_callee_state_cb,
++ struct bpf_verifier_state *state)
+ {
+- struct bpf_verifier_state *state = env->cur_state;
+ struct bpf_func_state *caller, *callee;
+ int err;
+
+@@ -8864,53 +9209,71 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ return -E2BIG;
+ }
+
+- caller = state->frame[state->curframe];
+ if (state->frame[state->curframe + 1]) {
+ verbose(env, "verifier bug. Frame %d already allocated\n",
+ state->curframe + 1);
+ return -EFAULT;
+ }
+
++ caller = state->frame[state->curframe];
++ callee = kzalloc(sizeof(*callee), GFP_KERNEL);
++ if (!callee)
++ return -ENOMEM;
++ state->frame[state->curframe + 1] = callee;
++
++ /* callee cannot access r0, r6 - r9 for reading and has to write
++ * into its own stack before reading from it.
++ * callee can read/write into caller's stack
++ */
++ init_func_state(env, callee,
++ /* remember the callsite, it will be used by bpf_exit */
++ callsite,
++ state->curframe + 1 /* frameno within this callchain */,
++ subprog /* subprog number within this prog */);
++ /* Transfer references to the callee */
++ err = copy_reference_state(callee, caller);
++ err = err ?: set_callee_state_cb(env, caller, callee, callsite);
++ if (err)
++ goto err_out;
++
++ /* only increment it after check_reg_arg() finished */
++ state->curframe++;
++
++ return 0;
++
++err_out:
++ free_func_state(callee);
++ state->frame[state->curframe + 1] = NULL;
++ return err;
++}
++
++static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
++ int insn_idx, int subprog,
++ set_callee_state_fn set_callee_state_cb)
++{
++ struct bpf_verifier_state *state = env->cur_state, *callback_state;
++ struct bpf_func_state *caller, *callee;
++ int err;
++
++ caller = state->frame[state->curframe];
+ err = btf_check_subprog_call(env, subprog, caller->regs);
+ if (err == -EFAULT)
+ return err;
+- if (subprog_is_global(env, subprog)) {
+- if (err) {
+- verbose(env, "Caller passes invalid args into func#%d\n",
+- subprog);
+- return err;
+- } else {
+- if (env->log.level & BPF_LOG_LEVEL)
+- verbose(env,
+- "Func#%d is global and valid. Skipping.\n",
+- subprog);
+- clear_caller_saved_regs(env, caller->regs);
+-
+- /* All global functions return a 64-bit SCALAR_VALUE */
+- mark_reg_unknown(env, caller->regs, BPF_REG_0);
+- caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
+-
+- /* continue with next insn after call */
+- return 0;
+- }
+- }
+
+ /* set_callee_state is used for direct subprog calls, but we are
+ * interested in validating only BPF helpers that can call subprogs as
+ * callbacks
+ */
+- if (set_callee_state_cb != set_callee_state) {
+- if (bpf_pseudo_kfunc_call(insn) &&
+- !is_callback_calling_kfunc(insn->imm)) {
+- verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
+- func_id_name(insn->imm), insn->imm);
+- return -EFAULT;
+- } else if (!bpf_pseudo_kfunc_call(insn) &&
+- !is_callback_calling_function(insn->imm)) { /* helper */
+- verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
+- func_id_name(insn->imm), insn->imm);
+- return -EFAULT;
+- }
++ if (bpf_pseudo_kfunc_call(insn) &&
++ !is_sync_callback_calling_kfunc(insn->imm)) {
++ verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
++ func_id_name(insn->imm), insn->imm);
++ return -EFAULT;
++ } else if (!bpf_pseudo_kfunc_call(insn) &&
++ !is_callback_calling_function(insn->imm)) { /* helper */
++ verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
++ func_id_name(insn->imm), insn->imm);
++ return -EFAULT;
+ }
+
+ if (insn->code == (BPF_JMP | BPF_CALL) &&
+@@ -8921,53 +9284,83 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ /* there is no real recursion here. timer callbacks are async */
+ env->subprog_info[subprog].is_async_cb = true;
+ async_cb = push_async_cb(env, env->subprog_info[subprog].start,
+- *insn_idx, subprog);
++ insn_idx, subprog);
+ if (!async_cb)
+ return -EFAULT;
+ callee = async_cb->frame[0];
+ callee->async_entry_cnt = caller->async_entry_cnt + 1;
+
+ /* Convert bpf_timer_set_callback() args into timer callback args */
+- err = set_callee_state_cb(env, caller, callee, *insn_idx);
++ err = set_callee_state_cb(env, caller, callee, insn_idx);
+ if (err)
+ return err;
+
++ return 0;
++ }
++
++ /* for callback functions enqueue entry to callback and
++ * proceed with next instruction within current frame.
++ */
++ callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false);
++ if (!callback_state)
++ return -ENOMEM;
++
++ err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb,
++ callback_state);
++ if (err)
++ return err;
++
++ callback_state->callback_unroll_depth++;
++ callback_state->frame[callback_state->curframe - 1]->callback_depth++;
++ caller->callback_depth = 0;
++ return 0;
++}
++
++static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
++ int *insn_idx)
++{
++ struct bpf_verifier_state *state = env->cur_state;
++ struct bpf_func_state *caller;
++ int err, subprog, target_insn;
++
++ target_insn = *insn_idx + insn->imm + 1;
++ subprog = find_subprog(env, target_insn);
++ if (subprog < 0) {
++ verbose(env, "verifier bug. No program starts at insn %d\n", target_insn);
++ return -EFAULT;
++ }
++
++ caller = state->frame[state->curframe];
++ err = btf_check_subprog_call(env, subprog, caller->regs);
++ if (err == -EFAULT)
++ return err;
++ if (subprog_is_global(env, subprog)) {
++ if (err) {
++ verbose(env, "Caller passes invalid args into func#%d\n", subprog);
++ return err;
++ }
++
++ if (env->log.level & BPF_LOG_LEVEL)
++ verbose(env, "Func#%d is global and valid. Skipping.\n", subprog);
+ clear_caller_saved_regs(env, caller->regs);
++
++ /* All global functions return a 64-bit SCALAR_VALUE */
+ mark_reg_unknown(env, caller->regs, BPF_REG_0);
+ caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
++
+ /* continue with next insn after call */
+ return 0;
+ }
+
+- callee = kzalloc(sizeof(*callee), GFP_KERNEL);
+- if (!callee)
+- return -ENOMEM;
+- state->frame[state->curframe + 1] = callee;
+-
+- /* callee cannot access r0, r6 - r9 for reading and has to write
+- * into its own stack before reading from it.
+- * callee can read/write into caller's stack
++ /* for regular function entry setup new frame and continue
++ * from that frame.
+ */
+- init_func_state(env, callee,
+- /* remember the callsite, it will be used by bpf_exit */
+- *insn_idx /* callsite */,
+- state->curframe + 1 /* frameno within this callchain */,
+- subprog /* subprog number within this prog */);
+-
+- /* Transfer references to the callee */
+- err = copy_reference_state(callee, caller);
++ err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state);
+ if (err)
+- goto err_out;
+-
+- err = set_callee_state_cb(env, caller, callee, *insn_idx);
+- if (err)
+- goto err_out;
++ return err;
+
+ clear_caller_saved_regs(env, caller->regs);
+
+- /* only increment it after check_reg_arg() finished */
+- state->curframe++;
+-
+ /* and go analyze first insn of the callee */
+ *insn_idx = env->subprog_info[subprog].start - 1;
+
+@@ -8975,14 +9368,10 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ verbose(env, "caller:\n");
+ print_verifier_state(env, caller, true);
+ verbose(env, "callee:\n");
+- print_verifier_state(env, callee, true);
++ print_verifier_state(env, state->frame[state->curframe], true);
+ }
+- return 0;
+
+-err_out:
+- free_func_state(callee);
+- state->frame[state->curframe + 1] = NULL;
+- return err;
++ return 0;
+ }
+
+ int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+@@ -9026,22 +9415,6 @@ static int set_callee_state(struct bpf_verifier_env *env,
+ return 0;
+ }
+
+-static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+- int *insn_idx)
+-{
+- int subprog, target_insn;
+-
+- target_insn = *insn_idx + insn->imm + 1;
+- subprog = find_subprog(env, target_insn);
+- if (subprog < 0) {
+- verbose(env, "verifier bug. No program starts at insn %d\n",
+- target_insn);
+- return -EFAULT;
+- }
+-
+- return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
+-}
+-
+ static int set_map_elem_callback_state(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee,
+@@ -9234,9 +9607,10 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
+
+ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+ {
+- struct bpf_verifier_state *state = env->cur_state;
++ struct bpf_verifier_state *state = env->cur_state, *prev_st;
+ struct bpf_func_state *caller, *callee;
+ struct bpf_reg_state *r0;
++ bool in_callback_fn;
+ int err;
+
+ callee = state->frame[state->curframe];
+@@ -9261,10 +9635,22 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+ verbose(env, "R0 not a scalar value\n");
+ return -EACCES;
+ }
++
++ /* we are going to rely on register's precise value */
++ err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
++ err = err ?: mark_chain_precision(env, BPF_REG_0);
++ if (err)
++ return err;
++
+ if (!tnum_in(range, r0->var_off)) {
+ verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
+ return -EINVAL;
+ }
++ if (!calls_callback(env, callee->callsite)) {
++ verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n",
++ *insn_idx, callee->callsite);
++ return -EFAULT;
++ }
+ } else {
+ /* return to the caller whatever r0 had in the callee */
+ caller->regs[BPF_REG_0] = *r0;
+@@ -9282,7 +9668,16 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+ return err;
+ }
+
+- *insn_idx = callee->callsite + 1;
++ /* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite,
++ * there function call logic would reschedule callback visit. If iteration
++ * converges is_state_visited() would prune that visit eventually.
++ */
++ in_callback_fn = callee->in_callback_fn;
++ if (in_callback_fn)
++ *insn_idx = callee->callsite;
++ else
++ *insn_idx = callee->callsite + 1;
++
+ if (env->log.level & BPF_LOG_LEVEL) {
+ verbose(env, "returning from callee:\n");
+ print_verifier_state(env, callee, true);
+@@ -9292,6 +9687,24 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+ /* clear everything in the callee */
+ free_func_state(callee);
+ state->frame[state->curframe--] = NULL;
++
++ /* for callbacks widen imprecise scalars to make programs like below verify:
++ *
++ * struct ctx { int i; }
++ * void cb(int idx, struct ctx *ctx) { ctx->i++; ... }
++ * ...
++ * struct ctx = { .i = 0; }
++ * bpf_loop(100, cb, &ctx, 0);
++ *
++ * This is similar to what is done in process_iter_next_call() for open
++ * coded iterators.
++ */
++ prev_st = in_callback_fn ? find_prev_entry(env, state, *insn_idx) : NULL;
++ if (prev_st) {
++ err = widen_imprecise_scalars(env, prev_st, state);
++ if (err)
++ return err;
++ }
+ return 0;
+ }
+
+@@ -9673,24 +10086,37 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ }
+ break;
+ case BPF_FUNC_for_each_map_elem:
+- err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+- set_map_elem_callback_state);
++ err = push_callback_call(env, insn, insn_idx, meta.subprogno,
++ set_map_elem_callback_state);
+ break;
+ case BPF_FUNC_timer_set_callback:
+- err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+- set_timer_callback_state);
++ err = push_callback_call(env, insn, insn_idx, meta.subprogno,
++ set_timer_callback_state);
+ break;
+ case BPF_FUNC_find_vma:
+- err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+- set_find_vma_callback_state);
++ err = push_callback_call(env, insn, insn_idx, meta.subprogno,
++ set_find_vma_callback_state);
+ break;
+ case BPF_FUNC_snprintf:
+ err = check_bpf_snprintf_call(env, regs);
+ break;
+ case BPF_FUNC_loop:
+ update_loop_inline_state(env, meta.subprogno);
+- err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+- set_loop_callback_state);
++ /* Verifier relies on R1 value to determine if bpf_loop() iteration
++ * is finished, thus mark it precise.
++ */
++ err = mark_chain_precision(env, BPF_REG_1);
++ if (err)
++ return err;
++ if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) {
++ err = push_callback_call(env, insn, insn_idx, meta.subprogno,
++ set_loop_callback_state);
++ } else {
++ cur_func(env)->callback_depth = 0;
++ if (env->log.level & BPF_LOG_LEVEL2)
++ verbose(env, "frame%d bpf_loop iteration limit reached\n",
++ env->cur_state->curframe);
++ }
+ break;
+ case BPF_FUNC_dynptr_from_mem:
+ if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
+@@ -9769,8 +10195,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ break;
+ }
+ case BPF_FUNC_user_ringbuf_drain:
+- err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+- set_user_ringbuf_callback_state);
++ err = push_callback_call(env, insn, insn_idx, meta.subprogno,
++ set_user_ringbuf_callback_state);
+ break;
+ }
+
+@@ -10620,7 +11046,7 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id)
+ btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
+ }
+
+-static bool is_callback_calling_kfunc(u32 btf_id)
++static bool is_sync_callback_calling_kfunc(u32 btf_id)
+ {
+ return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
+ }
+@@ -11202,6 +11628,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ break;
+ }
+ case KF_ARG_PTR_TO_CALLBACK:
++ if (reg->type != PTR_TO_FUNC) {
++ verbose(env, "arg%d expected pointer to func\n", i);
++ return -EINVAL;
++ }
+ meta->subprogno = reg->subprogno;
+ break;
+ case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
+@@ -11320,6 +11750,21 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ return -EACCES;
+ }
+
++ /* Check the arguments */
++ err = check_kfunc_args(env, &meta, insn_idx);
++ if (err < 0)
++ return err;
++
++ if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
++ err = push_callback_call(env, insn, insn_idx, meta.subprogno,
++ set_rbtree_add_callback_state);
++ if (err) {
++ verbose(env, "kfunc %s#%d failed callback verification\n",
++ func_name, meta.func_id);
++ return err;
++ }
++ }
++
+ rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
+ rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
+
+@@ -11354,10 +11799,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ return -EINVAL;
+ }
+
+- /* Check the arguments */
+- err = check_kfunc_args(env, &meta, insn_idx);
+- if (err < 0)
+- return err;
+ /* In case of release function, we get register number of refcounted
+ * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
+ */
+@@ -11391,16 +11832,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ }
+ }
+
+- if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+- err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+- set_rbtree_add_callback_state);
+- if (err) {
+- verbose(env, "kfunc %s#%d failed callback verification\n",
+- func_name, meta.func_id);
+- return err;
+- }
+- }
+-
+ for (i = 0; i < CALLER_SAVED_REGS; i++)
+ mark_reg_not_init(env, regs, caller_saved[i]);
+
+@@ -11571,6 +12002,17 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ regs[BPF_REG_0].btf = desc_btf;
+ regs[BPF_REG_0].type = PTR_TO_BTF_ID;
+ regs[BPF_REG_0].btf_id = ptr_type_id;
++
++ if (is_iter_next_kfunc(&meta)) {
++ struct bpf_reg_state *cur_iter;
++
++ cur_iter = get_iter_from_state(env->cur_state, &meta);
++
++ if (cur_iter->type & MEM_RCU) /* KF_RCU_PROTECTED */
++ regs[BPF_REG_0].type |= MEM_RCU;
++ else
++ regs[BPF_REG_0].type |= PTR_TRUSTED;
++ }
+ }
+
+ if (is_kfunc_ret_null(&meta)) {
+@@ -12072,6 +12514,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ }
+
+ switch (base_type(ptr_reg->type)) {
++ case PTR_TO_FLOW_KEYS:
++ if (known)
++ break;
++ fallthrough;
+ case CONST_PTR_TO_MAP:
+ /* smin_val represents the known value */
+ if (known && smin_val == 0 && opcode == BPF_ADD)
+@@ -14135,6 +14581,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ !sanitize_speculative_path(env, insn, *insn_idx + 1,
+ *insn_idx))
+ return -EFAULT;
++ if (env->log.level & BPF_LOG_LEVEL)
++ print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ *insn_idx += insn->off;
+ return 0;
+ } else if (pred == 0) {
+@@ -14147,6 +14595,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ *insn_idx + insn->off + 1,
+ *insn_idx))
+ return -EFAULT;
++ if (env->log.level & BPF_LOG_LEVEL)
++ print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ return 0;
+ }
+
+@@ -14679,21 +15129,6 @@ enum {
+ BRANCH = 2,
+ };
+
+-static u32 state_htab_size(struct bpf_verifier_env *env)
+-{
+- return env->prog->len;
+-}
+-
+-static struct bpf_verifier_state_list **explored_state(
+- struct bpf_verifier_env *env,
+- int idx)
+-{
+- struct bpf_verifier_state *cur = env->cur_state;
+- struct bpf_func_state *state = cur->frame[cur->curframe];
+-
+- return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
+-}
+-
+ static void mark_prune_point(struct bpf_verifier_env *env, int idx)
+ {
+ env->insn_aux_data[idx].prune_point = true;
+@@ -14714,6 +15149,15 @@ static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
+ return env->insn_aux_data[insn_idx].force_checkpoint;
+ }
+
++static void mark_calls_callback(struct bpf_verifier_env *env, int idx)
++{
++ env->insn_aux_data[idx].calls_callback = true;
++}
++
++static bool calls_callback(struct bpf_verifier_env *env, int insn_idx)
++{
++ return env->insn_aux_data[insn_idx].calls_callback;
++}
+
+ enum {
+ DONE_EXPLORING = 0,
+@@ -14725,8 +15169,7 @@ enum {
+ * w - next instruction
+ * e - edge
+ */
+-static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
+- bool loop_ok)
++static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
+ {
+ int *insn_stack = env->cfg.insn_stack;
+ int *insn_state = env->cfg.insn_state;
+@@ -14758,7 +15201,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
+ insn_stack[env->cfg.cur_stack++] = w;
+ return KEEP_EXPLORING;
+ } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
+- if (loop_ok && env->bpf_capable)
++ if (env->bpf_capable)
+ return DONE_EXPLORING;
+ verbose_linfo(env, t, "%d: ", t);
+ verbose_linfo(env, w, "%d: ", w);
+@@ -14778,24 +15221,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ struct bpf_verifier_env *env,
+ bool visit_callee)
+ {
+- int ret;
++ int ret, insn_sz;
+
+- ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
++ insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
++ ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
+ if (ret)
+ return ret;
+
+- mark_prune_point(env, t + 1);
++ mark_prune_point(env, t + insn_sz);
+ /* when we exit from subprog, we need to record non-linear history */
+- mark_jmp_point(env, t + 1);
++ mark_jmp_point(env, t + insn_sz);
+
+ if (visit_callee) {
+ mark_prune_point(env, t);
+- ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
+- /* It's ok to allow recursion from CFG point of
+- * view. __check_func_call() will do the actual
+- * check.
+- */
+- bpf_pseudo_func(insns + t));
++ ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
+ }
+ return ret;
+ }
+@@ -14808,15 +15247,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ static int visit_insn(int t, struct bpf_verifier_env *env)
+ {
+ struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
+- int ret, off;
++ int ret, off, insn_sz;
+
+ if (bpf_pseudo_func(insn))
+ return visit_func_call_insn(t, insns, env, true);
+
+ /* All non-branch instructions have a single fall-through edge. */
+ if (BPF_CLASS(insn->code) != BPF_JMP &&
+- BPF_CLASS(insn->code) != BPF_JMP32)
+- return push_insn(t, t + 1, FALLTHROUGH, env, false);
++ BPF_CLASS(insn->code) != BPF_JMP32) {
++ insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
++ return push_insn(t, t + insn_sz, FALLTHROUGH, env);
++ }
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_EXIT:
+@@ -14830,6 +15271,21 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
+ * async state will be pushed for further exploration.
+ */
+ mark_prune_point(env, t);
++ /* For functions that invoke callbacks it is not known how many times
++ * callback would be called. Verifier models callback calling functions
++ * by repeatedly visiting callback bodies and returning to origin call
++ * instruction.
++ * In order to stop such iteration verifier needs to identify when a
++ * state identical some state from a previous iteration is reached.
++ * Check below forces creation of checkpoint before callback calling
++ * instruction to allow search for such identical states.
++ */
++ if (is_sync_callback_calling_insn(insn)) {
++ mark_calls_callback(env, t);
++ mark_force_checkpoint(env, t);
++ mark_prune_point(env, t);
++ mark_jmp_point(env, t);
++ }
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+ struct bpf_kfunc_call_arg_meta meta;
+
+@@ -14862,8 +15318,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
+ off = insn->imm;
+
+ /* unconditional jump with single edge */
+- ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
+- true);
++ ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
+ if (ret)
+ return ret;
+
+@@ -14876,11 +15331,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
+ /* conditional jump with two edges */
+ mark_prune_point(env, t);
+
+- ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
++ ret = push_insn(t, t + 1, FALLTHROUGH, env);
+ if (ret)
+ return ret;
+
+- return push_insn(t, t + insn->off + 1, BRANCH, env, true);
++ return push_insn(t, t + insn->off + 1, BRANCH, env);
+ }
+ }
+
+@@ -14935,11 +15390,21 @@ static int check_cfg(struct bpf_verifier_env *env)
+ }
+
+ for (i = 0; i < insn_cnt; i++) {
++ struct bpf_insn *insn = &env->prog->insnsi[i];
++
+ if (insn_state[i] != EXPLORED) {
+ verbose(env, "unreachable insn %d\n", i);
+ ret = -EINVAL;
+ goto err_free;
+ }
++ if (bpf_is_ldimm64(insn)) {
++ if (insn_state[i + 1] != 0) {
++ verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
++ ret = -EINVAL;
++ goto err_free;
++ }
++ i++; /* skip second half of ldimm64 */
++ }
+ }
+ ret = 0; /* cfg looks good */
+
+@@ -15494,18 +15959,14 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
+ struct bpf_verifier_state *cur)
+ {
+ struct bpf_verifier_state_list *sl;
+- int i;
+
+ sl = *explored_state(env, insn);
+ while (sl) {
+ if (sl->state.branches)
+ goto next;
+ if (sl->state.insn_idx != insn ||
+- sl->state.curframe != cur->curframe)
++ !same_callsites(&sl->state, cur))
+ goto next;
+- for (i = 0; i <= cur->curframe; i++)
+- if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
+- goto next;
+ clean_verifier_state(env, &sl->state);
+ next:
+ sl = sl->next;
+@@ -15523,8 +15984,11 @@ static bool regs_exact(const struct bpf_reg_state *rold,
+
+ /* Returns true if (rold safe implies rcur safe) */
+ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
+- struct bpf_reg_state *rcur, struct bpf_idmap *idmap)
++ struct bpf_reg_state *rcur, struct bpf_idmap *idmap, bool exact)
+ {
++ if (exact)
++ return regs_exact(rold, rcur, idmap);
++
+ if (!(rold->live & REG_LIVE_READ))
+ /* explored state didn't use this */
+ return true;
+@@ -15641,7 +16105,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
+ }
+
+ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+- struct bpf_func_state *cur, struct bpf_idmap *idmap)
++ struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact)
+ {
+ int i, spi;
+
+@@ -15654,7 +16118,13 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+
+ spi = i / BPF_REG_SIZE;
+
+- if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
++ if (exact &&
++ (i >= cur->allocated_stack ||
++ old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
++ cur->stack[spi].slot_type[i % BPF_REG_SIZE]))
++ return false;
++
++ if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) && !exact) {
+ i += BPF_REG_SIZE - 1;
+ /* explored state didn't use this */
+ continue;
+@@ -15704,7 +16174,7 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+ * return false to continue verification of this path
+ */
+ if (!regsafe(env, &old->stack[spi].spilled_ptr,
+- &cur->stack[spi].spilled_ptr, idmap))
++ &cur->stack[spi].spilled_ptr, idmap, exact))
+ return false;
+ break;
+ case STACK_DYNPTR:
+@@ -15786,16 +16256,19 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
+ * the current state will reach 'bpf_exit' instruction safely
+ */
+ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
+- struct bpf_func_state *cur)
++ struct bpf_func_state *cur, bool exact)
+ {
+ int i;
+
++ if (old->callback_depth > cur->callback_depth)
++ return false;
++
+ for (i = 0; i < MAX_BPF_REG; i++)
+ if (!regsafe(env, &old->regs[i], &cur->regs[i],
+- &env->idmap_scratch))
++ &env->idmap_scratch, exact))
+ return false;
+
+- if (!stacksafe(env, old, cur, &env->idmap_scratch))
++ if (!stacksafe(env, old, cur, &env->idmap_scratch, exact))
+ return false;
+
+ if (!refsafe(old, cur, &env->idmap_scratch))
+@@ -15804,17 +16277,23 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
+ return true;
+ }
+
++static void reset_idmap_scratch(struct bpf_verifier_env *env)
++{
++ env->idmap_scratch.tmp_id_gen = env->id_gen;
++ memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map));
++}
++
+ static bool states_equal(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *old,
+- struct bpf_verifier_state *cur)
++ struct bpf_verifier_state *cur,
++ bool exact)
+ {
+ int i;
+
+ if (old->curframe != cur->curframe)
+ return false;
+
+- env->idmap_scratch.tmp_id_gen = env->id_gen;
+- memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map));
++ reset_idmap_scratch(env);
+
+ /* Verification state from speculative execution simulation
+ * must never prune a non-speculative execution one.
+@@ -15844,7 +16323,7 @@ static bool states_equal(struct bpf_verifier_env *env,
+ for (i = 0; i <= old->curframe; i++) {
+ if (old->frame[i]->callsite != cur->frame[i]->callsite)
+ return false;
+- if (!func_states_equal(env, old->frame[i], cur->frame[i]))
++ if (!func_states_equal(env, old->frame[i], cur->frame[i], exact))
+ return false;
+ }
+ return true;
+@@ -16098,10 +16577,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ {
+ struct bpf_verifier_state_list *new_sl;
+ struct bpf_verifier_state_list *sl, **pprev;
+- struct bpf_verifier_state *cur = env->cur_state, *new;
+- int i, j, err, states_cnt = 0;
++ struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry;
++ int i, j, n, err, states_cnt = 0;
+ bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx);
+ bool add_new_state = force_new_state;
++ bool force_exact;
+
+ /* bpf progs typically have pruning point every 4 instructions
+ * http://vger.kernel.org/bpfconf2019.html#session-1
+@@ -16154,9 +16634,33 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ * It's safe to assume that iterator loop will finish, taking into
+ * account iter_next() contract of eventually returning
+ * sticky NULL result.
++ *
++ * Note, that states have to be compared exactly in this case because
++ * read and precision marks might not be finalized inside the loop.
++ * E.g. as in the program below:
++ *
++ * 1. r7 = -16
++ * 2. r6 = bpf_get_prandom_u32()
++ * 3. while (bpf_iter_num_next(&fp[-8])) {
++ * 4. if (r6 != 42) {
++ * 5. r7 = -32
++ * 6. r6 = bpf_get_prandom_u32()
++ * 7. continue
++ * 8. }
++ * 9. r0 = r10
++ * 10. r0 += r7
++ * 11. r8 = *(u64 *)(r0 + 0)
++ * 12. r6 = bpf_get_prandom_u32()
++ * 13. }
++ *
++ * Here verifier would first visit path 1-3, create a checkpoint at 3
++ * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does
++ * not have read or precision mark for r7 yet, thus inexact states
++ * comparison would discard current state with r7=-32
++ * => unsafe memory access at 11 would not be caught.
+ */
+ if (is_iter_next_insn(env, insn_idx)) {
+- if (states_equal(env, &sl->state, cur)) {
++ if (states_equal(env, &sl->state, cur, true)) {
+ struct bpf_func_state *cur_frame;
+ struct bpf_reg_state *iter_state, *iter_reg;
+ int spi;
+@@ -16172,17 +16676,29 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ */
+ spi = __get_spi(iter_reg->off + iter_reg->var_off.value);
+ iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr;
+- if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE)
++ if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) {
++ update_loop_entry(cur, &sl->state);
+ goto hit;
++ }
+ }
+ goto skip_inf_loop_check;
+ }
++ if (calls_callback(env, insn_idx)) {
++ if (states_equal(env, &sl->state, cur, true))
++ goto hit;
++ goto skip_inf_loop_check;
++ }
+ /* attempt to detect infinite loop to avoid unnecessary doomed work */
+ if (states_maybe_looping(&sl->state, cur) &&
+- states_equal(env, &sl->state, cur) &&
+- !iter_active_depths_differ(&sl->state, cur)) {
++ states_equal(env, &sl->state, cur, false) &&
++ !iter_active_depths_differ(&sl->state, cur) &&
++ sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
+ verbose_linfo(env, insn_idx, "; ");
+ verbose(env, "infinite loop detected at insn %d\n", insn_idx);
++ verbose(env, "cur state:");
++ print_verifier_state(env, cur->frame[cur->curframe], true);
++ verbose(env, "old state:");
++ print_verifier_state(env, sl->state.frame[cur->curframe], true);
+ return -EINVAL;
+ }
+ /* if the verifier is processing a loop, avoid adding new state
+@@ -16204,7 +16720,36 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ add_new_state = false;
+ goto miss;
+ }
+- if (states_equal(env, &sl->state, cur)) {
++ /* If sl->state is a part of a loop and this loop's entry is a part of
++ * current verification path then states have to be compared exactly.
++ * 'force_exact' is needed to catch the following case:
++ *
++ * initial Here state 'succ' was processed first,
++ * | it was eventually tracked to produce a
++ * V state identical to 'hdr'.
++ * .---------> hdr All branches from 'succ' had been explored
++ * | | and thus 'succ' has its .branches == 0.
++ * | V
++ * | .------... Suppose states 'cur' and 'succ' correspond
++ * | | | to the same instruction + callsites.
++ * | V V In such case it is necessary to check
++ * | ... ... if 'succ' and 'cur' are states_equal().
++ * | | | If 'succ' and 'cur' are a part of the
++ * | V V same loop exact flag has to be set.
++ * | succ <- cur To check if that is the case, verify
++ * | | if loop entry of 'succ' is in current
++ * | V DFS path.
++ * | ...
++ * | |
++ * '----'
++ *
++ * Additional details are in the comment before get_loop_entry().
++ */
++ loop_entry = get_loop_entry(&sl->state);
++ force_exact = loop_entry && loop_entry->branches > 0;
++ if (states_equal(env, &sl->state, cur, force_exact)) {
++ if (force_exact)
++ update_loop_entry(cur, loop_entry);
+ hit:
+ sl->hit_cnt++;
+ /* reached equivalent register/stack state,
+@@ -16243,13 +16788,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ * to keep checking from state equivalence point of view.
+ * Higher numbers increase max_states_per_insn and verification time,
+ * but do not meaningfully decrease insn_processed.
++ * 'n' controls how many times state could miss before eviction.
++ * Use bigger 'n' for checkpoints because evicting checkpoint states
++ * too early would hinder iterator convergence.
+ */
+- if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
++ n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3;
++ if (sl->miss_cnt > sl->hit_cnt * n + n) {
+ /* the state is unlikely to be useful. Remove it to
+ * speed up verification
+ */
+ *pprev = sl->next;
+- if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
++ if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE &&
++ !sl->state.used_as_loop_entry) {
+ u32 br = sl->state.branches;
+
+ WARN_ONCE(br,
+@@ -16318,6 +16868,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+
+ cur->parent = new;
+ cur->first_insn_idx = insn_idx;
++ cur->dfs_depth = new->dfs_depth + 1;
+ clear_jmp_history(cur);
+ new_sl->next = *explored_state(env, insn_idx);
+ *explored_state(env, insn_idx) = new_sl;
+@@ -17121,8 +17672,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
+ f = fdget(fd);
+ map = __bpf_map_get(f);
+ if (IS_ERR(map)) {
+- verbose(env, "fd %d is not pointing to valid bpf_map\n",
+- insn[0].imm);
++ verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
+ return PTR_ERR(map);
+ }
+
+@@ -17180,10 +17730,12 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
+ return -E2BIG;
+ }
+
++ if (env->prog->aux->sleepable)
++ atomic64_inc(&map->sleepable_refcnt);
+ /* hold the map. If the program is rejected by verifier,
+ * the map will be released by release_maps() or it
+ * will be used by the valid program until it's unloaded
+- * and all maps are released in free_used_maps()
++ * and all maps are released in bpf_free_used_maps()
+ */
+ bpf_map_inc(map);
+
+@@ -19641,6 +20193,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ if (!tr)
+ return -ENOMEM;
+
++ if (tgt_prog && tgt_prog->aux->tail_call_reachable)
++ tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
++
+ prog->aux->dst_trampoline = tr;
+ return 0;
+ }
+diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
+index c56071f150f2ae..5e17f01ced9fd2 100644
+--- a/kernel/cgroup/cgroup-internal.h
++++ b/kernel/cgroup/cgroup-internal.h
+@@ -170,7 +170,8 @@ extern struct list_head cgroup_roots;
+
+ /* iterate across the hierarchies */
+ #define for_each_root(root) \
+- list_for_each_entry((root), &cgroup_roots, root_list)
++ list_for_each_entry_rcu((root), &cgroup_roots, root_list, \
++ lockdep_is_held(&cgroup_mutex))
+
+ /**
+ * for_each_subsys - iterate all enabled cgroup subsystems
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index 76db6c67e39a92..9cb00ebe9ac6d0 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -802,7 +802,7 @@ void cgroup1_release_agent(struct work_struct *work)
+ goto out_free;
+
+ ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
+- if (ret < 0 || ret >= PATH_MAX)
++ if (ret < 0)
+ goto out_free;
+
+ argv[0] = agentbuf;
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 1fb7f562289d53..660817c125e73d 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1313,7 +1313,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root)
+
+ void cgroup_free_root(struct cgroup_root *root)
+ {
+- kfree(root);
++ kfree_rcu(root, rcu);
+ }
+
+ static void cgroup_destroy_root(struct cgroup_root *root)
+@@ -1346,7 +1346,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
+ spin_unlock_irq(&css_set_lock);
+
+ if (!list_empty(&root->root_list)) {
+- list_del(&root->root_list);
++ list_del_rcu(&root->root_list);
+ cgroup_root_count--;
+ }
+
+@@ -1386,7 +1386,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
+ }
+ }
+
+- BUG_ON(!res_cgroup);
++ /*
++ * If cgroup_mutex is not held, the cgrp_cset_link will be freed
++ * before we remove the cgroup root from the root_list. Consequently,
++ * when accessing a cgroup root, the cset_link may have already been
++ * freed, resulting in a NULL res_cgroup. However, by holding the
++ * cgroup_mutex, we ensure that res_cgroup can't be NULL.
++ * If we don't hold cgroup_mutex in the caller, we must do the NULL
++ * check.
++ */
+ return res_cgroup;
+ }
+
+@@ -1445,7 +1453,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void)
+ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
+ struct cgroup_root *root)
+ {
+- lockdep_assert_held(&cgroup_mutex);
+ lockdep_assert_held(&css_set_lock);
+
+ return __cset_cgroup_from_root(cset, root);
+@@ -1453,7 +1460,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
+
+ /*
+ * Return the cgroup for "task" from the given hierarchy. Must be
+- * called with cgroup_mutex and css_set_lock held.
++ * called with css_set_lock held to prevent task's groups from being modified.
++ * Must be called with either cgroup_mutex or rcu read lock to prevent the
++ * cgroup root from being destroyed.
+ */
+ struct cgroup *task_cgroup_from_root(struct task_struct *task,
+ struct cgroup_root *root)
+@@ -1719,13 +1728,13 @@ static int css_populate_dir(struct cgroup_subsys_state *css)
+
+ if (!css->ss) {
+ if (cgroup_on_dfl(cgrp)) {
+- ret = cgroup_addrm_files(&cgrp->self, cgrp,
++ ret = cgroup_addrm_files(css, cgrp,
+ cgroup_base_files, true);
+ if (ret < 0)
+ return ret;
+
+ if (cgroup_psi_enabled()) {
+- ret = cgroup_addrm_files(&cgrp->self, cgrp,
++ ret = cgroup_addrm_files(css, cgrp,
+ cgroup_psi_files, true);
+ if (ret < 0)
+ return ret;
+@@ -1820,9 +1829,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
+ RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
+ rcu_assign_pointer(dcgrp->subsys[ssid], css);
+ ss->root = dst_root;
+- css->cgroup = dcgrp;
+
+ spin_lock_irq(&css_set_lock);
++ css->cgroup = dcgrp;
+ WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
+ list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
+ e_cset_node[ss->id]) {
+@@ -1887,7 +1896,7 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
+ len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
+ spin_unlock_irq(&css_set_lock);
+
+- if (len >= PATH_MAX)
++ if (len == -E2BIG)
+ len = -ERANGE;
+ else if (len > 0) {
+ seq_escape(sf, buf, " \t\n\\");
+@@ -2014,7 +2023,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx)
+ struct cgroup_root *root = ctx->root;
+ struct cgroup *cgrp = &root->cgrp;
+
+- INIT_LIST_HEAD(&root->root_list);
++ INIT_LIST_HEAD_RCU(&root->root_list);
+ atomic_set(&root->nr_cgrps, 1);
+ cgrp->root = root;
+ init_cgroup_housekeeping(cgrp);
+@@ -2097,7 +2106,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
+ * care of subsystems' refcounts, which are explicitly dropped in
+ * the failure exit path.
+ */
+- list_add(&root->root_list, &cgroup_roots);
++ list_add_rcu(&root->root_list, &cgroup_roots);
+ cgroup_root_count++;
+
+ /*
+@@ -3867,14 +3876,6 @@ static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
+ return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
+ }
+
+-static int cgroup_pressure_open(struct kernfs_open_file *of)
+-{
+- if (of->file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
+- return -EPERM;
+-
+- return 0;
+-}
+-
+ static void cgroup_pressure_release(struct kernfs_open_file *of)
+ {
+ struct cgroup_file_ctx *ctx = of->priv;
+@@ -5275,7 +5276,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "io.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_io_pressure_show,
+ .write = cgroup_io_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5284,7 +5284,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "memory.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_memory_pressure_show,
+ .write = cgroup_memory_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5293,7 +5292,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "cpu.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_cpu_pressure_show,
+ .write = cgroup_cpu_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5303,7 +5301,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "irq.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_irq_pressure_show,
+ .write = cgroup_irq_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -6289,7 +6286,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
+ retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
+ current->nsproxy->cgroup_ns);
+- if (retval >= PATH_MAX)
++ if (retval == -E2BIG)
+ retval = -ENAMETOOLONG;
+ if (retval < 0)
+ goto out_unlock;
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 58ec88efa4f82c..3646426c69e253 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -21,6 +21,7 @@
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
++#include "cgroup-internal.h"
+
+ #include <linux/cpu.h>
+ #include <linux/cpumask.h>
+@@ -1304,13 +1305,23 @@ static int update_partition_exclusive(struct cpuset *cs, int new_prs)
+ *
+ * Changing load balance flag will automatically call
+ * rebuild_sched_domains_locked().
++ * This function is for cgroup v2 only.
+ */
+ static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
+ {
+ int new_prs = cs->partition_root_state;
+- bool new_lb = (new_prs != PRS_ISOLATED);
+ bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
++ bool new_lb;
+
++ /*
++ * If cs is not a valid partition root, the load balance state
++ * will follow its parent.
++ */
++ if (new_prs > 0) {
++ new_lb = (new_prs != PRS_ISOLATED);
++ } else {
++ new_lb = is_sched_load_balance(parent_cs(cs));
++ }
+ if (new_lb != !!is_sched_load_balance(cs)) {
+ rebuild_domains = true;
+ if (new_lb)
+@@ -1938,7 +1949,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ }
+ out_free:
+ free_cpumasks(NULL, &tmp);
+- return 0;
++ return retval;
+ }
+
+ /*
+@@ -2219,7 +2230,7 @@ bool current_cpuset_is_being_rebound(void)
+ static int update_relax_domain_level(struct cpuset *cs, s64 val)
+ {
+ #ifdef CONFIG_SMP
+- if (val < -1 || val >= sched_domain_level_max)
++ if (val < -1 || val > sched_domain_level_max + 1)
+ return -EINVAL;
+ #endif
+
+@@ -4283,11 +4294,15 @@ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
+ if (!buf)
+ goto out;
+
+- css = task_get_css(tsk, cpuset_cgrp_id);
+- retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
+- current->nsproxy->cgroup_ns);
+- css_put(css);
+- if (retval >= PATH_MAX)
++ rcu_read_lock();
++ spin_lock_irq(&css_set_lock);
++ css = task_css(tsk, cpuset_cgrp_id);
++ retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
++ current->nsproxy->cgroup_ns);
++ spin_unlock_irq(&css_set_lock);
++ rcu_read_unlock();
++
++ if (retval == -E2BIG)
+ retval = -ENAMETOOLONG;
+ if (retval < 0)
+ goto out_free;
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index 122dacb3a44390..66d1708042a72b 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -66,9 +66,15 @@ static struct freezer *parent_freezer(struct freezer *freezer)
+ bool cgroup_freezing(struct task_struct *task)
+ {
+ bool ret;
++ unsigned int state;
+
+ rcu_read_lock();
+- ret = task_freezer(task)->state & CGROUP_FREEZING;
++ /* Check if the cgroup is still FREEZING, but not FROZEN. The extra
++ * !FROZEN check is required, because the FREEZING bit is not cleared
++ * when the state FROZEN is reached.
++ */
++ state = task_freezer(task)->state;
++ ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
+ rcu_read_unlock();
+
+ return ret;
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 6de7c6bb74eeea..0c72b94ed076a3 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -659,11 +659,19 @@ static inline bool cpu_smt_thread_allowed(unsigned int cpu)
+ #endif
+ }
+
+-static inline bool cpu_smt_allowed(unsigned int cpu)
++static inline bool cpu_bootable(unsigned int cpu)
+ {
+ if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
+ return true;
+
++ /* All CPUs are bootable if controls are not configured */
++ if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
++ return true;
++
++ /* All CPUs are bootable if CPU is not SMT capable */
++ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ return true;
++
+ if (topology_is_primary_thread(cpu))
+ return true;
+
+@@ -685,7 +693,7 @@ bool cpu_smt_possible(void)
+ EXPORT_SYMBOL_GPL(cpu_smt_possible);
+
+ #else
+-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++static inline bool cpu_bootable(unsigned int cpu) { return true; }
+ #endif
+
+ static inline enum cpuhp_state
+@@ -788,10 +796,10 @@ static int bringup_wait_for_ap_online(unsigned int cpu)
+ * SMT soft disabling on X86 requires to bring the CPU out of the
+ * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
+ * CPU marked itself as booted_once in notify_cpu_starting() so the
+- * cpu_smt_allowed() check will now return false if this is not the
++ * cpu_bootable() check will now return false if this is not the
+ * primary sibling.
+ */
+- if (!cpu_smt_allowed(cpu))
++ if (!cpu_bootable(cpu))
+ return -ECANCELED;
+ return 0;
+ }
+@@ -1515,11 +1523,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ /*
+ * Ensure that the control task does not run on the to be offlined
+ * CPU to prevent a deadlock against cfs_b->period_timer.
++ * Also keep at least one housekeeping cpu onlined to avoid generating
++ * an empty sched_domain span.
+ */
+- cpu = cpumask_any_but(cpu_online_mask, cpu);
+- if (cpu >= nr_cpu_ids)
+- return -EBUSY;
+- return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++ for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
++ if (cpu != work.cpu)
++ return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++ }
++ return -EBUSY;
+ }
+
+ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
+@@ -1741,7 +1752,7 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
+ err = -EBUSY;
+ goto out;
+ }
+- if (!cpu_smt_allowed(cpu)) {
++ if (!cpu_bootable(cpu)) {
+ err = -EPERM;
+ goto out;
+ }
+@@ -1896,6 +1907,9 @@ static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return fals
+
+ void __init bringup_nonboot_cpus(unsigned int setup_max_cpus)
+ {
++ if (!setup_max_cpus)
++ return;
++
+ /* Try parallel bringup optimization if enabled */
+ if (cpuhp_bringup_cpus_parallel(setup_max_cpus))
+ return;
+@@ -2098,7 +2112,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+ [CPUHP_HRTIMERS_PREPARE] = {
+ .name = "hrtimers:prepare",
+ .startup.single = hrtimers_prepare_cpu,
+- .teardown.single = hrtimers_dead_cpu,
++ .teardown.single = NULL,
+ },
+ [CPUHP_SMPCFD_PREPARE] = {
+ .name = "smpcfd:prepare",
+@@ -2190,6 +2204,12 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+ .startup.single = NULL,
+ .teardown.single = smpcfd_dying_cpu,
+ },
++ [CPUHP_AP_HRTIMERS_DYING] = {
++ .name = "hrtimers:dying",
++ .startup.single = NULL,
++ .teardown.single = hrtimers_cpu_dying,
++ },
++
+ /* Entry state on starting. Interrupts enabled from here on. Transient
+ * state for synchronsization */
+ [CPUHP_AP_ONLINE] = {
+@@ -2478,7 +2498,7 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+ * The caller needs to hold cpus read locked while calling this function.
+ * Return:
+ * On success:
+- * Positive state number if @state is CPUHP_AP_ONLINE_DYN;
++ * Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
+ * 0 for all other states
+ * On failure: proper (negative) error code
+ */
+@@ -2501,7 +2521,7 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
+ ret = cpuhp_store_callbacks(state, name, startup, teardown,
+ multi_instance);
+
+- dynstate = state == CPUHP_AP_ONLINE_DYN;
++ dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
+ if (ret > 0 && dynstate) {
+ state = ret;
+ ret = 0;
+@@ -2532,8 +2552,8 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
+ out:
+ mutex_unlock(&cpuhp_state_mutex);
+ /*
+- * If the requested state is CPUHP_AP_ONLINE_DYN, return the
+- * dynamically allocated state in case of success.
++ * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
++ * return the dynamically allocated state in case of success.
+ */
+ if (!ret && dynstate)
+ return state;
+@@ -2708,6 +2728,16 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ return ret;
+ }
+
++/**
++ * Check if the core a CPU belongs to is online
++ */
++#if !defined(topology_is_core_online)
++static inline bool topology_is_core_online(unsigned int cpu)
++{
++ return true;
++}
++#endif
++
+ int cpuhp_smt_enable(void)
+ {
+ int cpu, ret = 0;
+@@ -2718,7 +2748,7 @@ int cpuhp_smt_enable(void)
+ /* Skip online CPUs and CPUs on offline nodes */
+ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+ continue;
+- if (!cpu_smt_thread_allowed(cpu))
++ if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
+ continue;
+ ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+ if (ret)
+@@ -3180,6 +3210,7 @@ void __init boot_cpu_hotplug_init(void)
+ this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
+ }
+
++#ifdef CONFIG_CPU_MITIGATIONS
+ /*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+@@ -3190,8 +3221,7 @@ enum cpu_mitigations {
+ CPU_MITIGATIONS_AUTO_NOSMT,
+ };
+
+-static enum cpu_mitigations cpu_mitigations __ro_after_init =
+- CPU_MITIGATIONS_AUTO;
++static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+
+ static int __init mitigations_parse_cmdline(char *arg)
+ {
+@@ -3207,7 +3237,6 @@ static int __init mitigations_parse_cmdline(char *arg)
+
+ return 0;
+ }
+-early_param("mitigations", mitigations_parse_cmdline);
+
+ /* mitigations=off */
+ bool cpu_mitigations_off(void)
+@@ -3222,3 +3251,11 @@ bool cpu_mitigations_auto_nosmt(void)
+ return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+ }
+ EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
++#else
++static int __init mitigations_parse_cmdline(char *arg)
++{
++ pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
++ return 0;
++}
++#endif
++early_param("mitigations", mitigations_parse_cmdline);
+diff --git a/kernel/crash_core.c b/kernel/crash_core.c
+index 2f675ef045d40d..cef8e07bc52850 100644
+--- a/kernel/crash_core.c
++++ b/kernel/crash_core.c
+@@ -660,7 +660,7 @@ static int __init crash_save_vmcoreinfo_init(void)
+ VMCOREINFO_OFFSET(list_head, prev);
+ VMCOREINFO_OFFSET(vmap_area, va_start);
+ VMCOREINFO_OFFSET(vmap_area, list);
+- VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER + 1);
++ VMCOREINFO_LENGTH(zone.free_area, NR_PAGE_ORDERS);
+ log_buf_vmcoreinfo_setup();
+ VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
+ VMCOREINFO_NUMBER(NR_FREE_PAGES);
+@@ -675,11 +675,10 @@ static int __init crash_save_vmcoreinfo_init(void)
+ VMCOREINFO_NUMBER(PG_head_mask);
+ #define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
+ VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
+-#ifdef CONFIG_HUGETLB_PAGE
+- VMCOREINFO_NUMBER(PG_hugetlb);
++#define PAGE_HUGETLB_MAPCOUNT_VALUE (~PG_hugetlb)
++ VMCOREINFO_NUMBER(PAGE_HUGETLB_MAPCOUNT_VALUE);
+ #define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline)
+ VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
+-#endif
+
+ #ifdef CONFIG_KALLSYMS
+ VMCOREINFO_SYMBOL(kallsyms_names);
+diff --git a/kernel/cred.c b/kernel/cred.c
+index 98cb4eca23fb2f..64404d51c05278 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -43,10 +43,6 @@ static struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
+ */
+ struct cred init_cred = {
+ .usage = ATOMIC_INIT(4),
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- .subscribers = ATOMIC_INIT(2),
+- .magic = CRED_MAGIC,
+-#endif
+ .uid = GLOBAL_ROOT_UID,
+ .gid = GLOBAL_ROOT_GID,
+ .suid = GLOBAL_ROOT_UID,
+@@ -66,31 +62,6 @@ struct cred init_cred = {
+ .ucounts = &init_ucounts,
+ };
+
+-static inline void set_cred_subscribers(struct cred *cred, int n)
+-{
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- atomic_set(&cred->subscribers, n);
+-#endif
+-}
+-
+-static inline int read_cred_subscribers(const struct cred *cred)
+-{
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- return atomic_read(&cred->subscribers);
+-#else
+- return 0;
+-#endif
+-}
+-
+-static inline void alter_cred_subscribers(const struct cred *_cred, int n)
+-{
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- struct cred *cred = (struct cred *) _cred;
+-
+- atomic_add(n, &cred->subscribers);
+-#endif
+-}
+-
+ /*
+ * The RCU callback to actually dispose of a set of credentials
+ */
+@@ -100,20 +71,9 @@ static void put_cred_rcu(struct rcu_head *rcu)
+
+ kdebug("put_cred_rcu(%p)", cred);
+
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- if (cred->magic != CRED_MAGIC_DEAD ||
+- atomic_read(&cred->usage) != 0 ||
+- read_cred_subscribers(cred) != 0)
+- panic("CRED: put_cred_rcu() sees %p with"
+- " mag %x, put %p, usage %d, subscr %d\n",
+- cred, cred->magic, cred->put_addr,
+- atomic_read(&cred->usage),
+- read_cred_subscribers(cred));
+-#else
+- if (atomic_read(&cred->usage) != 0)
+- panic("CRED: put_cred_rcu() sees %p with usage %d\n",
+- cred, atomic_read(&cred->usage));
+-#endif
++ if (atomic_long_read(&cred->usage) != 0)
++ panic("CRED: put_cred_rcu() sees %p with usage %ld\n",
++ cred, atomic_long_read(&cred->usage));
+
+ security_cred_free(cred);
+ key_put(cred->session_keyring);
+@@ -137,16 +97,10 @@ static void put_cred_rcu(struct rcu_head *rcu)
+ */
+ void __put_cred(struct cred *cred)
+ {
+- kdebug("__put_cred(%p{%d,%d})", cred,
+- atomic_read(&cred->usage),
+- read_cred_subscribers(cred));
+-
+- BUG_ON(atomic_read(&cred->usage) != 0);
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- BUG_ON(read_cred_subscribers(cred) != 0);
+- cred->magic = CRED_MAGIC_DEAD;
+- cred->put_addr = __builtin_return_address(0);
+-#endif
++ kdebug("__put_cred(%p{%ld})", cred,
++ atomic_long_read(&cred->usage));
++
++ BUG_ON(atomic_long_read(&cred->usage) != 0);
+ BUG_ON(cred == current->cred);
+ BUG_ON(cred == current->real_cred);
+
+@@ -164,20 +118,15 @@ void exit_creds(struct task_struct *tsk)
+ {
+ struct cred *cred;
+
+- kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
+- atomic_read(&tsk->cred->usage),
+- read_cred_subscribers(tsk->cred));
++ kdebug("exit_creds(%u,%p,%p,{%ld})", tsk->pid, tsk->real_cred, tsk->cred,
++ atomic_long_read(&tsk->cred->usage));
+
+ cred = (struct cred *) tsk->real_cred;
+ tsk->real_cred = NULL;
+- validate_creds(cred);
+- alter_cred_subscribers(cred, -1);
+ put_cred(cred);
+
+ cred = (struct cred *) tsk->cred;
+ tsk->cred = NULL;
+- validate_creds(cred);
+- alter_cred_subscribers(cred, -1);
+ put_cred(cred);
+
+ #ifdef CONFIG_KEYS_REQUEST_CACHE
+@@ -224,10 +173,7 @@ struct cred *cred_alloc_blank(void)
+ if (!new)
+ return NULL;
+
+- atomic_set(&new->usage, 1);
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- new->magic = CRED_MAGIC;
+-#endif
++ atomic_long_set(&new->usage, 1);
+ if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
+ goto error;
+
+@@ -258,8 +204,6 @@ struct cred *prepare_creds(void)
+ const struct cred *old;
+ struct cred *new;
+
+- validate_process_creds();
+-
+ new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
+ if (!new)
+ return NULL;
+@@ -270,8 +214,7 @@ struct cred *prepare_creds(void)
+ memcpy(new, old, sizeof(struct cred));
+
+ new->non_rcu = 0;
+- atomic_set(&new->usage, 1);
+- set_cred_subscribers(new, 0);
++ atomic_long_set(&new->usage, 1);
+ get_group_info(new->group_info);
+ get_uid(new->user);
+ get_user_ns(new->user_ns);
+@@ -294,7 +237,6 @@ struct cred *prepare_creds(void)
+ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+ goto error;
+
+- validate_creds(new);
+ return new;
+
+ error:
+@@ -357,10 +299,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
+ ) {
+ p->real_cred = get_cred(p->cred);
+ get_cred(p->cred);
+- alter_cred_subscribers(p->cred, 2);
+- kdebug("share_creds(%p{%d,%d})",
+- p->cred, atomic_read(&p->cred->usage),
+- read_cred_subscribers(p->cred));
++ kdebug("share_creds(%p{%ld})",
++ p->cred, atomic_long_read(&p->cred->usage));
+ inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
+ return 0;
+ }
+@@ -399,8 +339,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
+
+ p->cred = p->real_cred = get_cred(new);
+ inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
+- alter_cred_subscribers(new, 2);
+- validate_creds(new);
+ return 0;
+
+ error_put:
+@@ -452,17 +390,11 @@ int commit_creds(struct cred *new)
+ struct task_struct *task = current;
+ const struct cred *old = task->real_cred;
+
+- kdebug("commit_creds(%p{%d,%d})", new,
+- atomic_read(&new->usage),
+- read_cred_subscribers(new));
++ kdebug("commit_creds(%p{%ld})", new,
++ atomic_long_read(&new->usage));
+
+ BUG_ON(task->cred != old);
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- BUG_ON(read_cred_subscribers(old) < 2);
+- validate_creds(old);
+- validate_creds(new);
+-#endif
+- BUG_ON(atomic_read(&new->usage) < 1);
++ BUG_ON(atomic_long_read(&new->usage) < 1);
+
+ get_cred(new); /* we will require a ref for the subj creds too */
+
+@@ -497,14 +429,12 @@ int commit_creds(struct cred *new)
+ * RLIMIT_NPROC limits on user->processes have already been checked
+ * in set_user().
+ */
+- alter_cred_subscribers(new, 2);
+ if (new->user != old->user || new->user_ns != old->user_ns)
+ inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
+ rcu_assign_pointer(task->real_cred, new);
+ rcu_assign_pointer(task->cred, new);
+ if (new->user != old->user || new->user_ns != old->user_ns)
+ dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
+- alter_cred_subscribers(old, -2);
+
+ /* send notifications */
+ if (!uid_eq(new->uid, old->uid) ||
+@@ -535,14 +465,10 @@ EXPORT_SYMBOL(commit_creds);
+ */
+ void abort_creds(struct cred *new)
+ {
+- kdebug("abort_creds(%p{%d,%d})", new,
+- atomic_read(&new->usage),
+- read_cred_subscribers(new));
++ kdebug("abort_creds(%p{%ld})", new,
++ atomic_long_read(&new->usage));
+
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- BUG_ON(read_cred_subscribers(new) != 0);
+-#endif
+- BUG_ON(atomic_read(&new->usage) < 1);
++ BUG_ON(atomic_long_read(&new->usage) < 1);
+ put_cred(new);
+ }
+ EXPORT_SYMBOL(abort_creds);
+@@ -558,12 +484,8 @@ const struct cred *override_creds(const struct cred *new)
+ {
+ const struct cred *old = current->cred;
+
+- kdebug("override_creds(%p{%d,%d})", new,
+- atomic_read(&new->usage),
+- read_cred_subscribers(new));
+-
+- validate_creds(old);
+- validate_creds(new);
++ kdebug("override_creds(%p{%ld})", new,
++ atomic_long_read(&new->usage));
+
+ /*
+ * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
+@@ -572,18 +494,12 @@ const struct cred *override_creds(const struct cred *new)
+ * we are only installing the cred into the thread-synchronous
+ * '->cred' pointer, not the '->real_cred' pointer that is
+ * visible to other threads under RCU.
+- *
+- * Also note that we did validate_creds() manually, not depending
+- * on the validation in 'get_cred()'.
+ */
+ get_new_cred((struct cred *)new);
+- alter_cred_subscribers(new, 1);
+ rcu_assign_pointer(current->cred, new);
+- alter_cred_subscribers(old, -1);
+
+- kdebug("override_creds() = %p{%d,%d}", old,
+- atomic_read(&old->usage),
+- read_cred_subscribers(old));
++ kdebug("override_creds() = %p{%ld}", old,
++ atomic_long_read(&old->usage));
+ return old;
+ }
+ EXPORT_SYMBOL(override_creds);
+@@ -599,15 +515,10 @@ void revert_creds(const struct cred *old)
+ {
+ const struct cred *override = current->cred;
+
+- kdebug("revert_creds(%p{%d,%d})", old,
+- atomic_read(&old->usage),
+- read_cred_subscribers(old));
++ kdebug("revert_creds(%p{%ld})", old,
++ atomic_long_read(&old->usage));
+
+- validate_creds(old);
+- validate_creds(override);
+- alter_cred_subscribers(old, 1);
+ rcu_assign_pointer(current->cred, old);
+- alter_cred_subscribers(override, -1);
+ put_cred(override);
+ }
+ EXPORT_SYMBOL(revert_creds);
+@@ -727,12 +638,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
+ kdebug("prepare_kernel_cred() alloc %p", new);
+
+ old = get_task_cred(daemon);
+- validate_creds(old);
+
+ *new = *old;
+ new->non_rcu = 0;
+- atomic_set(&new->usage, 1);
+- set_cred_subscribers(new, 0);
++ atomic_long_set(&new->usage, 1);
+ get_uid(new->user);
+ get_user_ns(new->user_ns);
+ get_group_info(new->group_info);
+@@ -756,7 +665,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
+ goto error;
+
+ put_cred(old);
+- validate_creds(new);
+ return new;
+
+ error:
+@@ -821,109 +729,3 @@ int set_create_files_as(struct cred *new, struct inode *inode)
+ return security_kernel_create_files_as(new, inode);
+ }
+ EXPORT_SYMBOL(set_create_files_as);
+-
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+-
+-bool creds_are_invalid(const struct cred *cred)
+-{
+- if (cred->magic != CRED_MAGIC)
+- return true;
+- return false;
+-}
+-EXPORT_SYMBOL(creds_are_invalid);
+-
+-/*
+- * dump invalid credentials
+- */
+-static void dump_invalid_creds(const struct cred *cred, const char *label,
+- const struct task_struct *tsk)
+-{
+- pr_err("%s credentials: %p %s%s%s\n",
+- label, cred,
+- cred == &init_cred ? "[init]" : "",
+- cred == tsk->real_cred ? "[real]" : "",
+- cred == tsk->cred ? "[eff]" : "");
+- pr_err("->magic=%x, put_addr=%p\n",
+- cred->magic, cred->put_addr);
+- pr_err("->usage=%d, subscr=%d\n",
+- atomic_read(&cred->usage),
+- read_cred_subscribers(cred));
+- pr_err("->*uid = { %d,%d,%d,%d }\n",
+- from_kuid_munged(&init_user_ns, cred->uid),
+- from_kuid_munged(&init_user_ns, cred->euid),
+- from_kuid_munged(&init_user_ns, cred->suid),
+- from_kuid_munged(&init_user_ns, cred->fsuid));
+- pr_err("->*gid = { %d,%d,%d,%d }\n",
+- from_kgid_munged(&init_user_ns, cred->gid),
+- from_kgid_munged(&init_user_ns, cred->egid),
+- from_kgid_munged(&init_user_ns, cred->sgid),
+- from_kgid_munged(&init_user_ns, cred->fsgid));
+-#ifdef CONFIG_SECURITY
+- pr_err("->security is %p\n", cred->security);
+- if ((unsigned long) cred->security >= PAGE_SIZE &&
+- (((unsigned long) cred->security & 0xffffff00) !=
+- (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)))
+- pr_err("->security {%x, %x}\n",
+- ((u32*)cred->security)[0],
+- ((u32*)cred->security)[1]);
+-#endif
+-}
+-
+-/*
+- * report use of invalid credentials
+- */
+-void __noreturn __invalid_creds(const struct cred *cred, const char *file, unsigned line)
+-{
+- pr_err("Invalid credentials\n");
+- pr_err("At %s:%u\n", file, line);
+- dump_invalid_creds(cred, "Specified", current);
+- BUG();
+-}
+-EXPORT_SYMBOL(__invalid_creds);
+-
+-/*
+- * check the credentials on a process
+- */
+-void __validate_process_creds(struct task_struct *tsk,
+- const char *file, unsigned line)
+-{
+- if (tsk->cred == tsk->real_cred) {
+- if (unlikely(read_cred_subscribers(tsk->cred) < 2 ||
+- creds_are_invalid(tsk->cred)))
+- goto invalid_creds;
+- } else {
+- if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 ||
+- read_cred_subscribers(tsk->cred) < 1 ||
+- creds_are_invalid(tsk->real_cred) ||
+- creds_are_invalid(tsk->cred)))
+- goto invalid_creds;
+- }
+- return;
+-
+-invalid_creds:
+- pr_err("Invalid process credentials\n");
+- pr_err("At %s:%u\n", file, line);
+-
+- dump_invalid_creds(tsk->real_cred, "Real", tsk);
+- if (tsk->cred != tsk->real_cred)
+- dump_invalid_creds(tsk->cred, "Effective", tsk);
+- else
+- pr_err("Effective creds == Real creds\n");
+- BUG();
+-}
+-EXPORT_SYMBOL(__validate_process_creds);
+-
+-/*
+- * check creds for do_exit()
+- */
+-void validate_creds_for_do_exit(struct task_struct *tsk)
+-{
+- kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
+- tsk->real_cred, tsk->cred,
+- atomic_read(&tsk->cred->usage),
+- read_cred_subscribers(tsk->cred));
+-
+- __validate_process_creds(tsk, __FILE__, __LINE__);
+-}
+-
+-#endif /* CONFIG_DEBUG_CREDENTIALS */
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 621037a0aa870e..ce1bb2301c061d 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg)
+ if (panic_timeout)
+ return;
+
++ debug_locks_off();
++ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++
+ if (dbg_kdb_mode)
+ kdb_printf("PANIC: %s\n", msg);
+
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 9443bc63c5a245..4799f6250bb269 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -184,6 +184,33 @@ char kdb_getchar(void)
+ unreachable();
+ }
+
++/**
++ * kdb_position_cursor() - Place cursor in the correct horizontal position
++ * @prompt: Nil-terminated string containing the prompt string
++ * @buffer: Nil-terminated string containing the entire command line
++ * @cp: Cursor position, pointer the character in buffer where the cursor
++ * should be positioned.
++ *
++ * The cursor is positioned by sending a carriage-return and then printing
++ * the content of the line until we reach the correct cursor position.
++ *
++ * There is some additional fine detail here.
++ *
++ * Firstly, even though kdb_printf() will correctly format zero-width fields
++ * we want the second call to kdb_printf() to be conditional. That keeps things
++ * a little cleaner when LOGGING=1.
++ *
++ * Secondly, we can't combine everything into one call to kdb_printf() since
++ * that renders into a fixed length buffer and the combined print could result
++ * in unwanted truncation.
++ */
++static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
++{
++ kdb_printf("\r%s", prompt);
++ if (cp > buffer)
++ kdb_printf("%.*s", (int)(cp - buffer), buffer);
++}
++
+ /*
+ * kdb_read
+ *
+@@ -212,7 +239,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ * and null byte */
+ char *lastchar;
+ char *p_tmp;
+- char tmp;
+ static char tmpbuffer[CMD_BUFLEN];
+ int len = strlen(buffer);
+ int len_tmp;
+@@ -249,12 +275,8 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ }
+ *(--lastchar) = '\0';
+ --cp;
+- kdb_printf("\b%s \r", cp);
+- tmp = *cp;
+- *cp = '\0';
+- kdb_printf(kdb_prompt_str);
+- kdb_printf("%s", buffer);
+- *cp = tmp;
++ kdb_printf("\b%s ", cp);
++ kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ }
+ break;
+ case 10: /* linefeed */
+@@ -272,19 +294,14 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
+ memcpy(cp, tmpbuffer, lastchar - cp - 1);
+ *(--lastchar) = '\0';
+- kdb_printf("%s \r", cp);
+- tmp = *cp;
+- *cp = '\0';
+- kdb_printf(kdb_prompt_str);
+- kdb_printf("%s", buffer);
+- *cp = tmp;
++ kdb_printf("%s ", cp);
++ kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ }
+ break;
+ case 1: /* Home */
+ if (cp > buffer) {
+- kdb_printf("\r");
+- kdb_printf(kdb_prompt_str);
+ cp = buffer;
++ kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ }
+ break;
+ case 5: /* End */
+@@ -300,11 +317,10 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ }
+ break;
+ case 14: /* Down */
+- memset(tmpbuffer, ' ',
+- strlen(kdb_prompt_str) + (lastchar-buffer));
+- *(tmpbuffer+strlen(kdb_prompt_str) +
+- (lastchar-buffer)) = '\0';
+- kdb_printf("\r%s\r", tmpbuffer);
++ case 16: /* Up */
++ kdb_printf("\r%*c\r",
++ (int)(strlen(kdb_prompt_str) + (lastchar - buffer)),
++ ' ');
+ *lastchar = (char)key;
+ *(lastchar+1) = '\0';
+ return lastchar;
+@@ -314,15 +330,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ ++cp;
+ }
+ break;
+- case 16: /* Up */
+- memset(tmpbuffer, ' ',
+- strlen(kdb_prompt_str) + (lastchar-buffer));
+- *(tmpbuffer+strlen(kdb_prompt_str) +
+- (lastchar-buffer)) = '\0';
+- kdb_printf("\r%s\r", tmpbuffer);
+- *lastchar = (char)key;
+- *(lastchar+1) = '\0';
+- return lastchar;
+ case 9: /* Tab */
+ if (tab < 2)
+ ++tab;
+@@ -364,17 +371,27 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ if (i >= dtab_count)
+ kdb_printf("...");
+ kdb_printf("\n");
+- kdb_printf(kdb_prompt_str);
++ kdb_printf("%s", kdb_prompt_str);
+ kdb_printf("%s", buffer);
++ if (cp != lastchar)
++ kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ } else if (tab != 2 && count > 0) {
+- len_tmp = strlen(p_tmp);
+- strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
+- len_tmp = strlen(p_tmp);
+- strncpy(cp, p_tmp+len, len_tmp-len + 1);
+- len = len_tmp - len;
+- kdb_printf("%s", cp);
+- cp += len;
+- lastchar += len;
++ /* How many new characters do we want from tmpbuffer? */
++ len_tmp = strlen(p_tmp) - len;
++ if (lastchar + len_tmp >= bufend)
++ len_tmp = bufend - lastchar;
++
++ if (len_tmp) {
++ /* + 1 ensures the '\0' is memmove'd */
++ memmove(cp+len_tmp, cp, (lastchar-cp) + 1);
++ memcpy(cp, p_tmp+len, len_tmp);
++ kdb_printf("%s", cp);
++ cp += len_tmp;
++ lastchar += len_tmp;
++ if (cp != lastchar)
++ kdb_position_cursor(kdb_prompt_str,
++ buffer, cp);
++ }
+ }
+ kdb_nextline = 1; /* reset output line number */
+ break;
+@@ -385,13 +402,9 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ memcpy(cp+1, tmpbuffer, lastchar - cp);
+ *++lastchar = '\0';
+ *cp = key;
+- kdb_printf("%s\r", cp);
++ kdb_printf("%s", cp);
+ ++cp;
+- tmp = *cp;
+- *cp = '\0';
+- kdb_printf(kdb_prompt_str);
+- kdb_printf("%s", buffer);
+- *cp = tmp;
++ kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ } else {
+ *++lastchar = '\0';
+ *cp++ = key;
+@@ -450,7 +463,7 @@ char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
+ {
+ if (prompt && kdb_prompt_str != prompt)
+ strscpy(kdb_prompt_str, prompt, CMD_BUFLEN);
+- kdb_printf(kdb_prompt_str);
++ kdb_printf("%s", kdb_prompt_str);
+ kdb_nextline = 1; /* Prompt and input resets line number */
+ return kdb_read(buffer, bufsize);
+ }
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 438b868cbfa922..35aa2e98a92a96 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -1349,8 +1349,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
+ /* PROMPT can only be set if we have MEM_READ permission. */
+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+ raw_smp_processor_id());
+- if (defcmd_in_progress)
+- strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+
+ /*
+ * Fetch command from keyboard
+diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
+index c21abc77c53e9a..ff5683a57f7712 100644
+--- a/kernel/dma/coherent.c
++++ b/kernel/dma/coherent.c
+@@ -132,8 +132,10 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+
+ void dma_release_coherent_memory(struct device *dev)
+ {
+- if (dev)
++ if (dev) {
+ _dma_release_coherent_memory(dev->dma_mem);
++ dev->dma_mem = NULL;
++ }
+ }
+
+ static void *__dma_alloc_from_coherent(struct device *dev,
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index 06366acd27b082..e472cc37d7de46 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -415,8 +415,11 @@ static unsigned long long phys_addr(struct dma_debug_entry *entry)
+ * dma_active_cacheline entry to track per event. dma_map_sg(), on the
+ * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+ * entries into the tree.
++ *
++ * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
++ * up right back in the DMA debugging code, leading to a deadlock.
+ */
+-static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
++static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
+ static DEFINE_SPINLOCK(radix_lock);
+ #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+ #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
+index 9596ae1aa0dacf..fc2d10b2aca6fc 100644
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -295,7 +295,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
+ } else {
+ ret = page_address(page);
+ if (dma_set_decrypted(dev, ret, size))
+- goto out_free_pages;
++ goto out_leak_pages;
+ }
+
+ memset(ret, 0, size);
+@@ -316,6 +316,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
+ out_free_pages:
+ __dma_direct_free_pages(dev, page, size);
+ return NULL;
++out_leak_pages:
++ return NULL;
+ }
+
+ void dma_direct_free(struct device *dev, size_t size,
+@@ -378,12 +380,11 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
+
+ ret = page_address(page);
+ if (dma_set_decrypted(dev, ret, size))
+- goto out_free_pages;
++ goto out_leak_pages;
+ memset(ret, 0, size);
+ *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ return page;
+-out_free_pages:
+- __dma_direct_free_pages(dev, page, size);
++out_leak_pages:
+ return NULL;
+ }
+
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index 02205ab53b7e93..cc19a3efea8960 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -89,6 +89,22 @@ static int map_benchmark_thread(void *data)
+ atomic64_add(map_sq, &map->sum_sq_map);
+ atomic64_add(unmap_sq, &map->sum_sq_unmap);
+ atomic64_inc(&map->loops);
++
++ /*
++ * We may test for a long time so periodically check whether
++ * we need to schedule to avoid starving the others. Otherwise
++ * we may hangup the kernel in a non-preemptible kernel when
++ * the test kthreads number >= CPU number, the test kthreads
++ * will run endless on every CPU since the thread resposible
++ * for notifying the kthread stop (in do_map_benchmark())
++ * could not be scheduled.
++ *
++ * Note this may degrade the test concurrency since the test
++ * threads may need to share the CPU time with other load
++ * in the system. So it's recommended to run this benchmark
++ * on an idle system.
++ */
++ cond_resched();
+ }
+
+ out:
+@@ -101,7 +117,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ struct task_struct **tsk;
+ int threads = map->bparam.threads;
+ int node = map->bparam.node;
+- const cpumask_t *cpu_mask = cpumask_of_node(node);
+ u64 loops;
+ int ret = 0;
+ int i;
+@@ -118,11 +133,13 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ if (IS_ERR(tsk[i])) {
+ pr_err("create dma_map thread failed\n");
+ ret = PTR_ERR(tsk[i]);
++ while (--i >= 0)
++ kthread_stop(tsk[i]);
+ goto out;
+ }
+
+ if (node != NUMA_NO_NODE)
+- kthread_bind_mask(tsk[i], cpu_mask);
++ kthread_bind_mask(tsk[i], cpumask_of_node(node));
+ }
+
+ /* clear the old value in the previous benchmark */
+@@ -139,13 +156,17 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+
+ msleep_interruptible(map->bparam.seconds * 1000);
+
+- /* wait for the completion of benchmark threads */
++ /* wait for the completion of all started benchmark threads */
+ for (i = 0; i < threads; i++) {
+- ret = kthread_stop(tsk[i]);
+- if (ret)
+- goto out;
++ int kthread_ret = kthread_stop_put(tsk[i]);
++
++ if (kthread_ret)
++ ret = kthread_ret;
+ }
+
++ if (ret)
++ goto out;
++
+ loops = atomic64_read(&map->loops);
+ if (likely(loops > 0)) {
+ u64 map_variance, unmap_variance;
+@@ -170,8 +191,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ }
+
+ out:
+- for (i = 0; i < threads; i++)
+- put_task_struct(tsk[i]);
+ put_device(map->dev);
+ kfree(tsk);
+ return ret;
+@@ -208,7 +227,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ }
+
+ if (map->bparam.node != NUMA_NO_NODE &&
+- !node_possible(map->bparam.node)) {
++ (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
++ !node_possible(map->bparam.node))) {
+ pr_err("invalid numa node\n");
+ return -EINVAL;
+ }
+@@ -252,6 +272,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ * dma_mask changed by benchmark
+ */
+ dma_set_mask(map->dev, old_dma_mask);
++
++ if (ret)
++ return ret;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index e323ca48f7f2a4..f1d9f01b283d7d 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -67,8 +67,8 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
+ {
+ struct dma_devres match_data = { size, vaddr, dma_handle };
+
+- dma_free_coherent(dev, size, vaddr, dma_handle);
+ WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
++ dma_free_coherent(dev, size, vaddr, dma_handle);
+ }
+ EXPORT_SYMBOL(dmam_free_coherent);
+
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index dff067bd56b1e2..e7c3fbd0737ec0 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -69,11 +69,14 @@
+ * @alloc_size: Size of the allocated buffer.
+ * @list: The free list describing the number of free entries available
+ * from each index.
++ * @pad_slots: Number of preceding padding slots. Valid only in the first
++ * allocated non-padding slot.
+ */
+ struct io_tlb_slot {
+ phys_addr_t orig_addr;
+ size_t alloc_size;
+- unsigned int list;
++ unsigned short list;
++ unsigned short pad_slots;
+ };
+
+ static bool swiotlb_force_bounce;
+@@ -283,9 +286,11 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
+ }
+
+ for (i = 0; i < mem->nslabs; i++) {
+- mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
++ mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
++ mem->nslabs - i);
+ mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
+ mem->slots[i].alloc_size = 0;
++ mem->slots[i].pad_slots = 0;
+ }
+
+ memset(vaddr, 0, bytes);
+@@ -558,29 +563,40 @@ void __init swiotlb_exit(void)
+ * alloc_dma_pages() - allocate pages to be used for DMA
+ * @gfp: GFP flags for the allocation.
+ * @bytes: Size of the buffer.
++ * @phys_limit: Maximum allowed physical address of the buffer.
+ *
+ * Allocate pages from the buddy allocator. If successful, make the allocated
+ * pages decrypted that they can be used for DMA.
+ *
+- * Return: Decrypted pages, or %NULL on failure.
++ * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
++ * if the allocated physical address was above @phys_limit.
+ */
+-static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
++static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
+ {
+ unsigned int order = get_order(bytes);
+ struct page *page;
++ phys_addr_t paddr;
+ void *vaddr;
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ return NULL;
+
+- vaddr = page_address(page);
++ paddr = page_to_phys(page);
++ if (paddr + bytes - 1 > phys_limit) {
++ __free_pages(page, order);
++ return ERR_PTR(-EAGAIN);
++ }
++
++ vaddr = phys_to_virt(paddr);
+ if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
+ goto error;
+ return page;
+
+ error:
+- __free_pages(page, order);
++ /* Intentional leak if pages cannot be encrypted again. */
++ if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
++ __free_pages(page, order);
+ return NULL;
+ }
+
+@@ -618,11 +634,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
+ else if (phys_limit <= DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+
+- while ((page = alloc_dma_pages(gfp, bytes)) &&
+- page_to_phys(page) + bytes - 1 > phys_limit) {
+- /* allocated, but too high */
+- __free_pages(page, get_order(bytes));
+-
++ while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+ phys_limit < DMA_BIT_MASK(64) &&
+ !(gfp & (__GFP_DMA32 | __GFP_DMA)))
+@@ -813,12 +825,30 @@ void swiotlb_dev_init(struct device *dev)
+ #endif
+ }
+
+-/*
+- * Return the offset into a iotlb slot required to keep the device happy.
++/**
++ * swiotlb_align_offset() - Get required offset into an IO TLB allocation.
++ * @dev: Owning device.
++ * @align_mask: Allocation alignment mask.
++ * @addr: DMA address.
++ *
++ * Return the minimum offset from the start of an IO TLB allocation which is
++ * required for a given buffer address and allocation alignment to keep the
++ * device happy.
++ *
++ * First, the address bits covered by min_align_mask must be identical in the
++ * original address and the bounce buffer address. High bits are preserved by
++ * choosing a suitable IO TLB slot, but bits below IO_TLB_SHIFT require extra
++ * padding bytes before the bounce buffer.
++ *
++ * Second, @align_mask specifies which bits of the first allocated slot must
++ * be zero. This may require allocating additional padding slots, and then the
++ * offset (in bytes) from the first such padding slot is returned.
+ */
+-static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
++static unsigned int swiotlb_align_offset(struct device *dev,
++ unsigned int align_mask, u64 addr)
+ {
+- return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
++ return addr & dma_get_min_align_mask(dev) &
++ (align_mask | (IO_TLB_SIZE - 1));
+ }
+
+ /*
+@@ -839,7 +869,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
+ return;
+
+ tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
+- orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
++ orig_addr_offset = swiotlb_align_offset(dev, 0, orig_addr);
+ if (tlb_offset < orig_addr_offset) {
+ dev_WARN_ONCE(dev, 1,
+ "Access before mapping start detected. orig offset %u, requested offset %u.\n",
+@@ -973,10 +1003,9 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
+ dma_addr_t tbl_dma_addr =
+ phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
+ unsigned long max_slots = get_max_slots(boundary_mask);
+- unsigned int iotlb_align_mask =
+- dma_get_min_align_mask(dev) | alloc_align_mask;
++ unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
+ unsigned int nslots = nr_slots(alloc_size), stride;
+- unsigned int offset = swiotlb_align_offset(dev, orig_addr);
++ unsigned int offset = swiotlb_align_offset(dev, 0, orig_addr);
+ unsigned int index, slots_checked, count = 0, i;
+ unsigned long flags;
+ unsigned int slot_base;
+@@ -986,18 +1015,29 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
+ BUG_ON(area_index >= pool->nareas);
+
+ /*
+- * For allocations of PAGE_SIZE or larger only look for page aligned
+- * allocations.
++ * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
++ * page-aligned in the absence of any other alignment requirements.
++ * 'alloc_align_mask' was later introduced to specify the alignment
++ * explicitly, however this is passed as zero for streaming mappings
++ * and so we preserve the old behaviour there in case any drivers are
++ * relying on it.
++ */
++ if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
++ alloc_align_mask = PAGE_SIZE - 1;
++
++ /*
++ * Ensure that the allocation is at least slot-aligned and update
++ * 'iotlb_align_mask' to ignore bits that will be preserved when
++ * offsetting into the allocation.
+ */
+- if (alloc_size >= PAGE_SIZE)
+- iotlb_align_mask |= ~PAGE_MASK;
+- iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
++ alloc_align_mask |= (IO_TLB_SIZE - 1);
++ iotlb_align_mask &= ~alloc_align_mask;
+
+ /*
+ * For mappings with an alignment requirement don't bother looping to
+ * unaligned slots once we found an aligned one.
+ */
+- stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
++ stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
+
+ spin_lock_irqsave(&area->lock, flags);
+ if (unlikely(nslots > pool->area_nslabs - area->used))
+@@ -1007,11 +1047,14 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
+ index = area->index;
+
+ for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
++ phys_addr_t tlb_addr;
++
+ slot_index = slot_base + index;
++ tlb_addr = slot_addr(tbl_dma_addr, slot_index);
+
+- if (orig_addr &&
+- (slot_addr(tbl_dma_addr, slot_index) &
+- iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
++ if ((tlb_addr & alloc_align_mask) ||
++ (orig_addr && (tlb_addr & iotlb_align_mask) !=
++ (orig_addr & iotlb_align_mask))) {
+ index = wrap_area_index(pool, index + 1);
+ slots_checked++;
+ continue;
+@@ -1261,11 +1304,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
+ unsigned long attrs)
+ {
+ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+- unsigned int offset = swiotlb_align_offset(dev, orig_addr);
++ unsigned int offset;
+ struct io_tlb_pool *pool;
+ unsigned int i;
+ int index;
+ phys_addr_t tlb_addr;
++ unsigned short pad_slots;
+
+ if (!mem || !mem->nslabs) {
+ dev_warn_ratelimited(dev,
+@@ -1282,6 +1326,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+ }
+
++ offset = swiotlb_align_offset(dev, alloc_align_mask, orig_addr);
+ index = swiotlb_find_slots(dev, orig_addr,
+ alloc_size + offset, alloc_align_mask, &pool);
+ if (index == -1) {
+@@ -1297,6 +1342,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
+ * This is needed when we sync the memory. Then we sync the buffer if
+ * needed.
+ */
++ pad_slots = offset >> IO_TLB_SHIFT;
++ offset &= (IO_TLB_SIZE - 1);
++ index += pad_slots;
++ pool->slots[index].pad_slots = pad_slots;
+ for (i = 0; i < nr_slots(alloc_size + offset); i++)
+ pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
+ tlb_addr = slot_addr(pool->start, index) + offset;
+@@ -1315,13 +1364,17 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
+ {
+ struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
+ unsigned long flags;
+- unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
+- int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
+- int nslots = nr_slots(mem->slots[index].alloc_size + offset);
+- int aindex = index / mem->area_nslabs;
+- struct io_tlb_area *area = &mem->areas[aindex];
++ unsigned int offset = swiotlb_align_offset(dev, 0, tlb_addr);
++ int index, nslots, aindex;
++ struct io_tlb_area *area;
+ int count, i;
+
++ index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
++ index -= mem->slots[index].pad_slots;
++ nslots = nr_slots(mem->slots[index].alloc_size + offset);
++ aindex = index / mem->area_nslabs;
++ area = &mem->areas[aindex];
++
+ /*
+ * Return the buffer to the free list by setting the corresponding
+ * entries to indicate the number of contiguous entries available.
+@@ -1344,6 +1397,7 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
+ mem->slots[i].list = ++count;
+ mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
+ mem->slots[i].alloc_size = 0;
++ mem->slots[i].pad_slots = 0;
+ }
+
+ /*
+@@ -1598,16 +1652,24 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
+ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+ struct io_tlb_pool *pool;
+ phys_addr_t tlb_addr;
++ unsigned int align;
+ int index;
+
+ if (!mem)
+ return NULL;
+
+- index = swiotlb_find_slots(dev, 0, size, 0, &pool);
++ align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
++ index = swiotlb_find_slots(dev, 0, size, align, &pool);
+ if (index == -1)
+ return NULL;
+
+ tlb_addr = slot_addr(pool->start, index);
++ if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
++ dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
++ &tlb_addr);
++ swiotlb_release_slots(dev, tlb_addr);
++ return NULL;
++ }
+
+ return pfn_to_page(PFN_DOWN(tlb_addr));
+ }
+@@ -1673,6 +1735,7 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
+ mem->for_alloc = true;
+ #ifdef CONFIG_SWIOTLB_DYNAMIC
+ spin_lock_init(&mem->lock);
++ INIT_LIST_HEAD_RCU(&mem->pools);
+ #endif
+ add_mem_pool(mem, pool);
+
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index d7ee4bc3f2ba3e..5ff4f1cd364455 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -77,8 +77,14 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ /* Either of the above might have changed the syscall number */
+ syscall = syscall_get_nr(current, regs);
+
+- if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
++ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) {
+ trace_sys_enter(regs, syscall);
++ /*
++ * Probes or BPF hooks in the tracepoint may have changed the
++ * system call number as well.
++ */
++ syscall = syscall_get_nr(current, regs);
++ }
+
+ syscall_enter_audit(regs, syscall);
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index a2f2a9525d72ea..ec0fae49a0dd9a 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -264,6 +264,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
+ {
+ struct perf_event_context *ctx = event->ctx;
+ struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
++ struct perf_cpu_context *cpuctx;
+ struct event_function_struct efs = {
+ .event = event,
+ .func = func,
+@@ -291,22 +292,25 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
+ if (!task_function_call(task, event_function, &efs))
+ return;
+
+- raw_spin_lock_irq(&ctx->lock);
++ local_irq_disable();
++ cpuctx = this_cpu_ptr(&perf_cpu_context);
++ perf_ctx_lock(cpuctx, ctx);
+ /*
+ * Reload the task pointer, it might have been changed by
+ * a concurrent perf_event_context_sched_out().
+ */
+ task = ctx->task;
+- if (task == TASK_TOMBSTONE) {
+- raw_spin_unlock_irq(&ctx->lock);
+- return;
+- }
++ if (task == TASK_TOMBSTONE)
++ goto unlock;
+ if (ctx->is_active) {
+- raw_spin_unlock_irq(&ctx->lock);
++ perf_ctx_unlock(cpuctx, ctx);
++ local_irq_enable();
+ goto again;
+ }
+ func(event, NULL, ctx, data);
+- raw_spin_unlock_irq(&ctx->lock);
++unlock:
++ perf_ctx_unlock(cpuctx, ctx);
++ local_irq_enable();
+ }
+
+ /*
+@@ -375,6 +379,7 @@ enum event_type_t {
+ EVENT_TIME = 0x4,
+ /* see ctx_resched() for details */
+ EVENT_CPU = 0x8,
++ EVENT_CGROUP = 0x10,
+ EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+ };
+
+@@ -684,20 +689,26 @@ do { \
+ ___p; \
+ })
+
+-static void perf_ctx_disable(struct perf_event_context *ctx)
++static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
+ {
+ struct perf_event_pmu_context *pmu_ctx;
+
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++ if (cgroup && !pmu_ctx->nr_cgroups)
++ continue;
+ perf_pmu_disable(pmu_ctx->pmu);
++ }
+ }
+
+-static void perf_ctx_enable(struct perf_event_context *ctx)
++static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
+ {
+ struct perf_event_pmu_context *pmu_ctx;
+
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++ if (cgroup && !pmu_ctx->nr_cgroups)
++ continue;
+ perf_pmu_enable(pmu_ctx->pmu);
++ }
+ }
+
+ static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
+@@ -856,9 +867,9 @@ static void perf_cgroup_switch(struct task_struct *task)
+ return;
+
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+- perf_ctx_disable(&cpuctx->ctx);
++ perf_ctx_disable(&cpuctx->ctx, true);
+
+- ctx_sched_out(&cpuctx->ctx, EVENT_ALL);
++ ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
+ /*
+ * must not be done before ctxswout due
+ * to update_cgrp_time_from_cpuctx() in
+@@ -870,9 +881,9 @@ static void perf_cgroup_switch(struct task_struct *task)
+ * perf_cgroup_set_timestamp() in ctx_sched_in()
+ * to not have to pass task around
+ */
+- ctx_sched_in(&cpuctx->ctx, EVENT_ALL);
++ ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
+
+- perf_ctx_enable(&cpuctx->ctx);
++ perf_ctx_enable(&cpuctx->ctx, true);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ }
+
+@@ -965,6 +976,8 @@ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ct
+ if (!is_cgroup_event(event))
+ return;
+
++ event->pmu_ctx->nr_cgroups++;
++
+ /*
+ * Because cgroup events are always per-cpu events,
+ * @ctx == &cpuctx->ctx.
+@@ -985,6 +998,8 @@ perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *c
+ if (!is_cgroup_event(event))
+ return;
+
++ event->pmu_ctx->nr_cgroups--;
++
+ /*
+ * Because cgroup events are always per-cpu events,
+ * @ctx == &cpuctx->ctx.
+@@ -1244,8 +1259,9 @@ static void put_ctx(struct perf_event_context *ctx)
+ * perf_event_context::mutex
+ * perf_event::child_mutex;
+ * perf_event_context::lock
+- * perf_event::mmap_mutex
+ * mmap_lock
++ * perf_event::mmap_mutex
++ * perf_buffer::aux_mutex
+ * perf_addr_filters_head::lock
+ *
+ * cpu_hotplug_lock
+@@ -1803,31 +1819,34 @@ static inline void perf_event__state_init(struct perf_event *event)
+ PERF_EVENT_STATE_INACTIVE;
+ }
+
+-static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
++static int __perf_event_read_size(u64 read_format, int nr_siblings)
+ {
+ int entry = sizeof(u64); /* value */
+ int size = 0;
+ int nr = 1;
+
+- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
++ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ size += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ size += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_ID)
++ if (read_format & PERF_FORMAT_ID)
+ entry += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_LOST)
++ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_GROUP) {
++ if (read_format & PERF_FORMAT_GROUP) {
+ nr += nr_siblings;
+ size += sizeof(u64);
+ }
+
+- size += entry * nr;
+- event->read_size = size;
++ /*
++ * Since perf_event_validate_size() limits this to 16k and inhibits
++ * adding more siblings, this will never overflow.
++ */
++ return size + nr * entry;
+ }
+
+ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
+@@ -1877,8 +1896,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
+ */
+ static void perf_event__header_size(struct perf_event *event)
+ {
+- __perf_event_read_size(event,
+- event->group_leader->nr_siblings);
++ event->read_size =
++ __perf_event_read_size(event->attr.read_format,
++ event->group_leader->nr_siblings);
+ __perf_event_header_size(event, event->attr.sample_type);
+ }
+
+@@ -1909,23 +1929,44 @@ static void perf_event__id_header_size(struct perf_event *event)
+ event->id_header_size = size;
+ }
+
++/*
++ * Check that adding an event to the group does not result in anybody
++ * overflowing the 64k event limit imposed by the output buffer.
++ *
++ * Specifically, check that the read_size for the event does not exceed 16k,
++ * read_size being the one term that grows with groups size. Since read_size
++ * depends on per-event read_format, also (re)check the existing events.
++ *
++ * This leaves 48k for the constant size fields and things like callchains,
++ * branch stacks and register sets.
++ */
+ static bool perf_event_validate_size(struct perf_event *event)
+ {
+- /*
+- * The values computed here will be over-written when we actually
+- * attach the event.
+- */
+- __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+- __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+- perf_event__id_header_size(event);
++ struct perf_event *sibling, *group_leader = event->group_leader;
++
++ if (__perf_event_read_size(event->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
++ return false;
++
++ if (__perf_event_read_size(group_leader->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
++ return false;
+
+ /*
+- * Sum the lot; should not exceed the 64k limit we have on records.
+- * Conservative limit to allow for callchains and other variable fields.
++ * When creating a new group leader, group_leader->ctx is initialized
++ * after the size has been validated, but we cannot safely use
++ * for_each_sibling_event() until group_leader->ctx is set. A new group
++ * leader cannot have any siblings yet, so we can safely skip checking
++ * the non-existent siblings.
+ */
+- if (event->read_size + event->header_size +
+- event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
+- return false;
++ if (event == group_leader)
++ return true;
++
++ for_each_sibling_event(sibling, group_leader) {
++ if (__perf_event_read_size(sibling->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
++ return false;
++ }
+
+ return true;
+ }
+@@ -2248,18 +2289,14 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
+ }
+
+ if (event->pending_sigtrap) {
+- bool dec = true;
+-
+ event->pending_sigtrap = 0;
+ if (state != PERF_EVENT_STATE_OFF &&
+- !event->pending_work) {
++ !event->pending_work &&
++ !task_work_add(current, &event->pending_task, TWA_RESUME)) {
+ event->pending_work = 1;
+- dec = false;
+- WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
+- task_work_add(current, &event->pending_task, TWA_RESUME);
+- }
+- if (dec)
++ } else {
+ local_dec(&event->ctx->nr_pending);
++ }
+ }
+
+ perf_event_set_state(event, state);
+@@ -2679,9 +2716,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
+
+ event_type &= EVENT_ALL;
+
+- perf_ctx_disable(&cpuctx->ctx);
++ perf_ctx_disable(&cpuctx->ctx, false);
+ if (task_ctx) {
+- perf_ctx_disable(task_ctx);
++ perf_ctx_disable(task_ctx, false);
+ task_ctx_sched_out(task_ctx, event_type);
+ }
+
+@@ -2699,9 +2736,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
+
+ perf_event_sched_in(cpuctx, task_ctx);
+
+- perf_ctx_enable(&cpuctx->ctx);
++ perf_ctx_enable(&cpuctx->ctx, false);
+ if (task_ctx)
+- perf_ctx_enable(task_ctx);
++ perf_ctx_enable(task_ctx, false);
+ }
+
+ void perf_pmu_resched(struct pmu *pmu)
+@@ -3246,6 +3283,9 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ struct perf_event_pmu_context *pmu_ctx;
+ int is_active = ctx->is_active;
++ bool cgroup = event_type & EVENT_CGROUP;
++
++ event_type &= ~EVENT_CGROUP;
+
+ lockdep_assert_held(&ctx->lock);
+
+@@ -3292,8 +3332,11 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
+
+ is_active ^= ctx->is_active; /* changed bits */
+
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
++ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++ if (cgroup && !pmu_ctx->nr_cgroups)
++ continue;
+ __pmu_ctx_sched_out(pmu_ctx, is_active);
++ }
+ }
+
+ /*
+@@ -3484,7 +3527,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
+ raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ if (context_equiv(ctx, next_ctx)) {
+
+- perf_ctx_disable(ctx);
++ perf_ctx_disable(ctx, false);
+
+ /* PMIs are disabled; ctx->nr_pending is stable. */
+ if (local_read(&ctx->nr_pending) ||
+@@ -3504,7 +3547,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
+ perf_ctx_sched_task_cb(ctx, false);
+ perf_event_swap_task_ctx_data(ctx, next_ctx);
+
+- perf_ctx_enable(ctx);
++ perf_ctx_enable(ctx, false);
+
+ /*
+ * RCU_INIT_POINTER here is safe because we've not
+@@ -3528,13 +3571,13 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
+
+ if (do_switch) {
+ raw_spin_lock(&ctx->lock);
+- perf_ctx_disable(ctx);
++ perf_ctx_disable(ctx, false);
+
+ inside_switch:
+ perf_ctx_sched_task_cb(ctx, false);
+ task_ctx_sched_out(ctx, EVENT_ALL);
+
+- perf_ctx_enable(ctx);
++ perf_ctx_enable(ctx, false);
+ raw_spin_unlock(&ctx->lock);
+ }
+ }
+@@ -3820,47 +3863,32 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ return 0;
+ }
+
+-static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void pmu_groups_sched_in(struct perf_event_context *ctx,
++ struct perf_event_groups *groups,
++ struct pmu *pmu)
+ {
+- struct perf_event_pmu_context *pmu_ctx;
+ int can_add_hw = 1;
+-
+- if (pmu) {
+- visit_groups_merge(ctx, &ctx->pinned_groups,
+- smp_processor_id(), pmu,
+- merge_sched_in, &can_add_hw);
+- } else {
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
+- can_add_hw = 1;
+- visit_groups_merge(ctx, &ctx->pinned_groups,
+- smp_processor_id(), pmu_ctx->pmu,
+- merge_sched_in, &can_add_hw);
+- }
+- }
++ visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
++ merge_sched_in, &can_add_hw);
+ }
+
+-static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void ctx_groups_sched_in(struct perf_event_context *ctx,
++ struct perf_event_groups *groups,
++ bool cgroup)
+ {
+ struct perf_event_pmu_context *pmu_ctx;
+- int can_add_hw = 1;
+
+- if (pmu) {
+- visit_groups_merge(ctx, &ctx->flexible_groups,
+- smp_processor_id(), pmu,
+- merge_sched_in, &can_add_hw);
+- } else {
+- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
+- can_add_hw = 1;
+- visit_groups_merge(ctx, &ctx->flexible_groups,
+- smp_processor_id(), pmu_ctx->pmu,
+- merge_sched_in, &can_add_hw);
+- }
++ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
++ if (cgroup && !pmu_ctx->nr_cgroups)
++ continue;
++ pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
+ }
+ }
+
+-static void __pmu_ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
++static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
++ struct pmu *pmu)
+ {
+- ctx_flexible_sched_in(ctx, pmu);
++ pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
+ }
+
+ static void
+@@ -3868,6 +3896,9 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
+ {
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
+ int is_active = ctx->is_active;
++ bool cgroup = event_type & EVENT_CGROUP;
++
++ event_type &= ~EVENT_CGROUP;
+
+ lockdep_assert_held(&ctx->lock);
+
+@@ -3900,11 +3931,11 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
+ * in order to give them the best chance of going on.
+ */
+ if (is_active & EVENT_PINNED)
+- ctx_pinned_sched_in(ctx, NULL);
++ ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
+
+ /* Then walk through the lower prio flexible groups */
+ if (is_active & EVENT_FLEXIBLE)
+- ctx_flexible_sched_in(ctx, NULL);
++ ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
+ }
+
+ static void perf_event_context_sched_in(struct task_struct *task)
+@@ -3919,11 +3950,11 @@ static void perf_event_context_sched_in(struct task_struct *task)
+
+ if (cpuctx->task_ctx == ctx) {
+ perf_ctx_lock(cpuctx, ctx);
+- perf_ctx_disable(ctx);
++ perf_ctx_disable(ctx, false);
+
+ perf_ctx_sched_task_cb(ctx, true);
+
+- perf_ctx_enable(ctx);
++ perf_ctx_enable(ctx, false);
+ perf_ctx_unlock(cpuctx, ctx);
+ goto rcu_unlock;
+ }
+@@ -3936,7 +3967,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ if (!ctx->nr_events)
+ goto unlock;
+
+- perf_ctx_disable(ctx);
++ perf_ctx_disable(ctx, false);
+ /*
+ * We want to keep the following priority order:
+ * cpu pinned (that don't need to move), task pinned,
+@@ -3946,7 +3977,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ * events, no need to flip the cpuctx's events around.
+ */
+ if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
+- perf_ctx_disable(&cpuctx->ctx);
++ perf_ctx_disable(&cpuctx->ctx, false);
+ ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
+ }
+
+@@ -3955,9 +3986,9 @@ static void perf_event_context_sched_in(struct task_struct *task)
+ perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
+
+ if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
+- perf_ctx_enable(&cpuctx->ctx);
++ perf_ctx_enable(&cpuctx->ctx, false);
+
+- perf_ctx_enable(ctx);
++ perf_ctx_enable(ctx, false);
+
+ unlock:
+ perf_ctx_unlock(cpuctx, ctx);
+@@ -4073,7 +4104,11 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
+ period = perf_calculate_period(event, nsec, count);
+
+ delta = (s64)(period - hwc->sample_period);
+- delta = (delta + 7) / 8; /* low pass filter */
++ if (delta >= 0)
++ delta += 7;
++ else
++ delta -= 7;
++ delta /= 8; /* low pass filter */
+
+ sample_period = hwc->sample_period + delta;
+
+@@ -4811,6 +4846,11 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
+ void *task_ctx_data = NULL;
+
+ if (!ctx->task) {
++ /*
++ * perf_pmu_migrate_context() / __perf_pmu_install_event()
++ * relies on the fact that find_get_pmu_context() cannot fail
++ * for CPU contexts.
++ */
+ struct perf_cpu_pmu_context *cpc;
+
+ cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
+@@ -5140,9 +5180,35 @@ static bool exclusive_event_installable(struct perf_event *event,
+ static void perf_addr_filters_splice(struct perf_event *event,
+ struct list_head *head);
+
++static void perf_pending_task_sync(struct perf_event *event)
++{
++ struct callback_head *head = &event->pending_task;
++
++ if (!event->pending_work)
++ return;
++ /*
++ * If the task is queued to the current task's queue, we
++ * obviously can't wait for it to complete. Simply cancel it.
++ */
++ if (task_work_cancel(current, head)) {
++ event->pending_work = 0;
++ local_dec(&event->ctx->nr_pending);
++ return;
++ }
++
++ /*
++ * All accesses related to the event are within the same
++ * non-preemptible section in perf_pending_task(). The RCU
++ * grace period before the event is freed will make sure all
++ * those accesses are complete by then.
++ */
++ rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
++}
++
+ static void _free_event(struct perf_event *event)
+ {
+ irq_work_sync(&event->pending_irq);
++ perf_pending_task_sync(event);
+
+ unaccount_event(event);
+
+@@ -5318,6 +5384,7 @@ int perf_event_release_kernel(struct perf_event *event)
+ again:
+ mutex_lock(&event->child_mutex);
+ list_for_each_entry(child, &event->child_list, child_list) {
++ void *var = NULL;
+
+ /*
+ * Cannot change, child events are not migrated, see the
+@@ -5358,11 +5425,23 @@ int perf_event_release_kernel(struct perf_event *event)
+ * this can't be the last reference.
+ */
+ put_event(event);
++ } else {
++ var = &ctx->refcount;
+ }
+
+ mutex_unlock(&event->child_mutex);
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
++
++ if (var) {
++ /*
++ * If perf_event_free_task() has deleted all events from the
++ * ctx while the child_mutex got released above, make sure to
++ * notify about the preceding put_ctx().
++ */
++ smp_mb(); /* pairs with wait_var_event() */
++ wake_up_var(var);
++ }
+ goto again;
+ }
+ mutex_unlock(&event->child_mutex);
+@@ -6282,12 +6361,11 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ event->pmu->event_unmapped(event, vma->vm_mm);
+
+ /*
+- * rb->aux_mmap_count will always drop before rb->mmap_count and
+- * event->mmap_count, so it is ok to use event->mmap_mutex to
+- * serialize with perf_mmap here.
++ * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
++ * to avoid complications.
+ */
+ if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
+- atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
++ atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
+ /*
+ * Stop all AUX events that are writing to this buffer,
+ * so that we can free its AUX pages and corresponding PMU
+@@ -6304,7 +6382,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ rb_free_aux(rb);
+ WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
+
+- mutex_unlock(&event->mmap_mutex);
++ mutex_unlock(&rb->aux_mutex);
+ }
+
+ if (atomic_dec_and_test(&rb->mmap_count))
+@@ -6392,6 +6470,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ struct perf_event *event = file->private_data;
+ unsigned long user_locked, user_lock_limit;
+ struct user_struct *user = current_user();
++ struct mutex *aux_mutex = NULL;
+ struct perf_buffer *rb = NULL;
+ unsigned long locked, lock_limit;
+ unsigned long vma_size;
+@@ -6430,6 +6509,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ return -EINVAL;
+
+ nr_pages = vma_size / PAGE_SIZE;
++ if (nr_pages > INT_MAX)
++ return -ENOMEM;
+
+ mutex_lock(&event->mmap_mutex);
+ ret = -EINVAL;
+@@ -6438,6 +6519,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ if (!rb)
+ goto aux_unlock;
+
++ aux_mutex = &rb->aux_mutex;
++ mutex_lock(aux_mutex);
++
+ aux_offset = READ_ONCE(rb->user_page->aux_offset);
+ aux_size = READ_ONCE(rb->user_page->aux_size);
+
+@@ -6588,6 +6672,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ atomic_dec(&rb->mmap_count);
+ }
+ aux_unlock:
++ if (aux_mutex)
++ mutex_unlock(aux_mutex);
+ mutex_unlock(&event->mmap_mutex);
+
+ /*
+@@ -6760,24 +6846,28 @@ static void perf_pending_task(struct callback_head *head)
+ struct perf_event *event = container_of(head, struct perf_event, pending_task);
+ int rctx;
+
++ /*
++ * All accesses to the event must belong to the same implicit RCU read-side
++ * critical section as the ->pending_work reset. See comment in
++ * perf_pending_task_sync().
++ */
++ preempt_disable_notrace();
+ /*
+ * If we 'fail' here, that's OK, it means recursion is already disabled
+ * and we won't recurse 'further'.
+ */
+- preempt_disable_notrace();
+ rctx = perf_swevent_get_recursion_context();
+
+ if (event->pending_work) {
+ event->pending_work = 0;
+ perf_sigtrap(event);
+ local_dec(&event->ctx->nr_pending);
++ rcuwait_wake_up(&event->pending_work_wait);
+ }
+
+ if (rctx >= 0)
+ perf_swevent_put_recursion_context(rctx);
+ preempt_enable_notrace();
+-
+- put_event(event);
+ }
+
+ #ifdef CONFIG_GUEST_PERF_EVENTS
+@@ -9223,21 +9313,19 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
+ bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
+ int i;
+
+- if (prog->aux->func_cnt == 0) {
+- perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
+- (u64)(unsigned long)prog->bpf_func,
+- prog->jited_len, unregister,
+- prog->aux->ksym.name);
+- } else {
+- for (i = 0; i < prog->aux->func_cnt; i++) {
+- struct bpf_prog *subprog = prog->aux->func[i];
+-
+- perf_event_ksymbol(
+- PERF_RECORD_KSYMBOL_TYPE_BPF,
+- (u64)(unsigned long)subprog->bpf_func,
+- subprog->jited_len, unregister,
+- subprog->aux->ksym.name);
+- }
++ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
++ (u64)(unsigned long)prog->bpf_func,
++ prog->jited_len, unregister,
++ prog->aux->ksym.name);
++
++ for (i = 1; i < prog->aux->func_cnt; i++) {
++ struct bpf_prog *subprog = prog->aux->func[i];
++
++ perf_event_ksymbol(
++ PERF_RECORD_KSYMBOL_TYPE_BPF,
++ (u64)(unsigned long)subprog->bpf_func,
++ subprog->jited_len, unregister,
++ subprog->aux->ksym.name);
+ }
+ }
+
+@@ -11378,9 +11466,30 @@ static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
+ static struct attribute *pmu_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_perf_event_mux_interval_ms.attr,
++ &dev_attr_nr_addr_filters.attr,
++ NULL,
++};
++
++static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
++{
++ struct device *dev = kobj_to_dev(kobj);
++ struct pmu *pmu = dev_get_drvdata(dev);
++
++ if (n == 2 && !pmu->nr_addr_filters)
++ return 0;
++
++ return a->mode;
++}
++
++static struct attribute_group pmu_dev_attr_group = {
++ .is_visible = pmu_dev_is_visible,
++ .attrs = pmu_dev_attrs,
++};
++
++static const struct attribute_group *pmu_dev_groups[] = {
++ &pmu_dev_attr_group,
+ NULL,
+ };
+-ATTRIBUTE_GROUPS(pmu_dev);
+
+ static int pmu_bus_running;
+ static struct bus_type pmu_bus = {
+@@ -11417,18 +11526,11 @@ static int pmu_dev_alloc(struct pmu *pmu)
+ if (ret)
+ goto free_dev;
+
+- /* For PMUs with address filters, throw in an extra attribute: */
+- if (pmu->nr_addr_filters)
+- ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
+-
+- if (ret)
+- goto del_dev;
+-
+- if (pmu->attr_update)
++ if (pmu->attr_update) {
+ ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
+-
+- if (ret)
+- goto del_dev;
++ if (ret)
++ goto del_dev;
++ }
+
+ out:
+ return ret;
+@@ -11867,6 +11969,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ init_waitqueue_head(&event->waitq);
+ init_irq_work(&event->pending_irq, perf_pending_irq);
+ init_task_work(&event->pending_task, perf_pending_task);
++ rcuwait_init(&event->pending_work_wait);
+
+ mutex_init(&event->mmap_mutex);
+ raw_spin_lock_init(&event->addr_filters.lock);
+@@ -12872,6 +12975,9 @@ static void __perf_pmu_install_event(struct pmu *pmu,
+ int cpu, struct perf_event *event)
+ {
+ struct perf_event_pmu_context *epc;
++ struct perf_event_context *old_ctx = event->ctx;
++
++ get_ctx(ctx); /* normally find_get_context() */
+
+ event->cpu = cpu;
+ epc = find_get_pmu_context(pmu, ctx, event);
+@@ -12880,6 +12986,11 @@ static void __perf_pmu_install_event(struct pmu *pmu,
+ if (event->state >= PERF_EVENT_STATE_OFF)
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ perf_install_in_context(ctx, event, cpu);
++
++ /*
++ * Now that event->ctx is updated and visible, put the old ctx.
++ */
++ put_ctx(old_ctx);
+ }
+
+ static void __perf_pmu_install(struct perf_event_context *ctx,
+@@ -12918,6 +13029,10 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+ struct perf_event_context *src_ctx, *dst_ctx;
+ LIST_HEAD(events);
+
++ /*
++ * Since per-cpu context is persistent, no need to grab an extra
++ * reference.
++ */
+ src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
+ dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
+
+@@ -13223,6 +13338,15 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+ return &event->attr;
+ }
+
++int perf_allow_kernel(struct perf_event_attr *attr)
++{
++ if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
++ return -EACCES;
++
++ return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
++}
++EXPORT_SYMBOL_GPL(perf_allow_kernel);
++
+ /*
+ * Inherit an event from parent task to child task.
+ *
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 5150d5f84c033e..f376b057320ce8 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -40,6 +40,7 @@ struct perf_buffer {
+ struct user_struct *mmap_user;
+
+ /* AUX area */
++ struct mutex aux_mutex;
+ long aux_head;
+ unsigned int aux_nest;
+ long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
+@@ -128,7 +129,7 @@ static inline unsigned long perf_data_size(struct perf_buffer *rb)
+
+ static inline unsigned long perf_aux_size(struct perf_buffer *rb)
+ {
+- return rb->aux_nr_pages << PAGE_SHIFT;
++ return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
+ }
+
+ #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index fb1e180b5f0af7..b0930b41855276 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -333,6 +333,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
+ */
+ if (!rb->nr_pages)
+ rb->paused = 1;
++
++ mutex_init(&rb->aux_mutex);
+ }
+
+ void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
+@@ -684,7 +686,9 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
+ * max_order, to aid PMU drivers in double buffering.
+ */
+ if (!watermark)
+- watermark = nr_pages << (PAGE_SHIFT - 1);
++ watermark = min_t(unsigned long,
++ U32_MAX,
++ (unsigned long)nr_pages << (PAGE_SHIFT - 1));
+
+ /*
+ * Use aux_watermark as the basis for chunking to
+@@ -700,6 +704,12 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
+ watermark = 0;
+ }
+
++ /*
++ * kcalloc_node() is unable to allocate buffer if the size is larger
++ * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
++ */
++ if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
++ return -ENOMEM;
+ rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
+ node);
+ if (!rb->aux_pages)
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 3048589e2e8516..6dac0b5798213b 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1480,7 +1480,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
+ uprobe_opcode_t insn = UPROBE_SWBP_INSN;
+ struct xol_area *area;
+
+- area = kmalloc(sizeof(*area), GFP_KERNEL);
++ area = kzalloc(sizeof(*area), GFP_KERNEL);
+ if (unlikely(!area))
+ goto out;
+
+@@ -1490,9 +1490,8 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
+ goto free_area;
+
+ area->xol_mapping.name = "[uprobes]";
+- area->xol_mapping.fault = NULL;
+ area->xol_mapping.pages = area->pages;
+- area->pages[0] = alloc_page(GFP_HIGHUSER);
++ area->pages[0] = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+ if (!area->pages[0])
+ goto free_bitmap;
+ area->pages[1] = NULL;
+@@ -2072,6 +2071,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
+ bool need_prep = false; /* prepare return uprobe, when needed */
+
+ down_read(&uprobe->register_rwsem);
++ current->utask->auprobe = &uprobe->arch;
+ for (uc = uprobe->consumers; uc; uc = uc->next) {
+ int rc = 0;
+
+@@ -2086,6 +2086,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
+
+ remove &= rc;
+ }
++ current->utask->auprobe = NULL;
+
+ if (need_prep && !remove)
+ prepare_uretprobe(uprobe, regs); /* put bp at return */
+diff --git a/kernel/exit.c b/kernel/exit.c
+index edb50b4c99728e..3540b2c9b1b6a0 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -485,6 +485,8 @@ void mm_update_next_owner(struct mm_struct *mm)
+ * Search through everything else, we should not get here often.
+ */
+ for_each_process(g) {
++ if (atomic_read(&mm->mm_users) <= 1)
++ break;
+ if (g->flags & PF_KTHREAD)
+ continue;
+ for_each_thread(g, c) {
+@@ -824,8 +826,6 @@ void __noreturn do_exit(long code)
+ ptrace_event(PTRACE_EVENT_EXIT, code);
+ user_events_exit(tsk);
+
+- validate_creds_for_do_exit(tsk);
+-
+ io_uring_files_cancel();
+ exit_signals(tsk); /* sets PF_EXITING */
+
+@@ -912,7 +912,6 @@ void __noreturn do_exit(long code)
+ if (tsk->task_frag.page)
+ put_page(tsk->task_frag.page);
+
+- validate_creds_for_do_exit(tsk);
+ exit_task_stack_account(tsk);
+
+ check_stack_usage();
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 3b6d20dfb9a85e..32ffbc1c96bae3 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1288,7 +1288,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ hugetlb_count_init(mm);
+
+ if (current->mm) {
+- mm->flags = current->mm->flags & MMF_INIT_MASK;
++ mm->flags = mmf_init_flags(current->mm->flags);
+ mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
+ } else {
+ mm->flags = default_dump_filter;
+@@ -1767,33 +1767,30 @@ static int copy_files(unsigned long clone_flags, struct task_struct *tsk,
+ int no_files)
+ {
+ struct files_struct *oldf, *newf;
+- int error = 0;
+
+ /*
+ * A background process may not have any files ...
+ */
+ oldf = current->files;
+ if (!oldf)
+- goto out;
++ return 0;
+
+ if (no_files) {
+ tsk->files = NULL;
+- goto out;
++ return 0;
+ }
+
+ if (clone_flags & CLONE_FILES) {
+ atomic_inc(&oldf->count);
+- goto out;
++ return 0;
+ }
+
+- newf = dup_fd(oldf, NR_OPEN_MAX, &error);
+- if (!newf)
+- goto out;
++ newf = dup_fd(oldf, NULL);
++ if (IS_ERR(newf))
++ return PTR_ERR(newf);
+
+ tsk->files = newf;
+- error = 0;
+-out:
+- return error;
++ return 0;
+ }
+
+ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
+@@ -3358,17 +3355,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+ /*
+ * Unshare file descriptor table if it is being shared
+ */
+-int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
+- struct files_struct **new_fdp)
++static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
+ {
+ struct files_struct *fd = current->files;
+- int error = 0;
+
+ if ((unshare_flags & CLONE_FILES) &&
+ (fd && atomic_read(&fd->count) > 1)) {
+- *new_fdp = dup_fd(fd, max_fds, &error);
+- if (!*new_fdp)
+- return error;
++ fd = dup_fd(fd, NULL);
++ if (IS_ERR(fd))
++ return PTR_ERR(fd);
++ *new_fdp = fd;
+ }
+
+ return 0;
+@@ -3426,7 +3422,7 @@ int ksys_unshare(unsigned long unshare_flags)
+ err = unshare_fs(unshare_flags, &new_fs);
+ if (err)
+ goto bad_unshare_out;
+- err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd);
++ err = unshare_fd(unshare_flags, &new_fd);
+ if (err)
+ goto bad_unshare_cleanup_fs;
+ err = unshare_userns(unshare_flags, &new_cred);
+@@ -3518,7 +3514,7 @@ int unshare_files(void)
+ struct files_struct *old, *copy = NULL;
+ int error;
+
+- error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, &copy);
++ error = unshare_fd(CLONE_FILES, &copy);
+ if (error || !copy)
+ return error;
+
+diff --git a/kernel/futex/core.c b/kernel/futex/core.c
+index f10587d1d48170..f30a93e50f65e8 100644
+--- a/kernel/futex/core.c
++++ b/kernel/futex/core.c
+@@ -248,7 +248,17 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
+ * but access_ok() should be faster than find_vma()
+ */
+ if (!fshared) {
+- key->private.mm = mm;
++ /*
++ * On no-MMU, shared futexes are treated as private, therefore
++ * we must not include the current process in the key. Since
++ * there is only one address space, the address is a unique key
++ * on its own.
++ */
++ if (IS_ENABLED(CONFIG_MMU))
++ key->private.mm = mm;
++ else
++ key->private.mm = NULL;
++
+ key->private.address = address;
+ return 0;
+ }
+diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
+index 74a4ef1da9ad77..fd75b4a484d76a 100644
+--- a/kernel/gcov/gcc_4_7.c
++++ b/kernel/gcov/gcc_4_7.c
+@@ -18,7 +18,9 @@
+ #include <linux/mm.h>
+ #include "gcov.h"
+
+-#if (__GNUC__ >= 10)
++#if (__GNUC__ >= 14)
++#define GCOV_COUNTERS 9
++#elif (__GNUC__ >= 10)
+ #define GCOV_COUNTERS 8
+ #elif (__GNUC__ >= 7)
+ #define GCOV_COUNTERS 9
+diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
+index 6d443ea22bb732..383fd43ac61222 100755
+--- a/kernel/gen_kheaders.sh
++++ b/kernel/gen_kheaders.sh
+@@ -14,7 +14,12 @@ include/
+ arch/$SRCARCH/include/
+ "
+
+-type cpio > /dev/null
++if ! command -v cpio >/dev/null; then
++ echo >&2 "***"
++ echo >&2 "*** 'cpio' could not be found."
++ echo >&2 "***"
++ exit 1
++fi
+
+ # Support incremental builds by skipping archive generation
+ # if timestamps of files being archived are not changed.
+@@ -84,7 +89,7 @@ find $cpio_dir -type f -print0 |
+
+ # Create archive and try to normalize metadata for reproducibility.
+ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+- --owner=0 --group=0 --sort=name --numeric-owner \
++ --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
+ -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
+
+ echo $headers_md5 > kernel/kheaders.md5
+diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
+index 1ed2b1739363b8..eb86283901565b 100644
+--- a/kernel/irq/cpuhotplug.c
++++ b/kernel/irq/cpuhotplug.c
+@@ -69,6 +69,14 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ return false;
+ }
+
++ /*
++ * Complete an eventually pending irq move cleanup. If this
++ * interrupt was moved in hard irq context, then the vectors need
++ * to be cleaned up. It can't wait until this interrupt actually
++ * happens and this CPU was involved.
++ */
++ irq_force_complete_move(desc);
++
+ /*
+ * No move required, if:
+ * - Interrupt is per cpu
+@@ -87,14 +95,6 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ return false;
+ }
+
+- /*
+- * Complete an eventually pending irq move cleanup. If this
+- * interrupt was moved in hard irq context, then the vectors need
+- * to be cleaned up. It can't wait until this interrupt actually
+- * happens and this CPU was involved.
+- */
+- irq_force_complete_move(desc);
+-
+ /*
+ * If there is a setaffinity pending, then try to reuse the pending
+ * mask, so the last change of the affinity does not get lost. If
+@@ -130,6 +130,22 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ * CPU.
+ */
+ err = irq_do_set_affinity(d, affinity, false);
++
++ /*
++ * If there are online CPUs in the affinity mask, but they have no
++ * vectors left to make the migration work, try to break the
++ * affinity by migrating to any online CPU.
++ */
++ if (err == -ENOSPC && !irqd_affinity_is_managed(d) && affinity != cpu_online_mask) {
++ pr_debug("IRQ%u: set affinity failed for %*pbl, re-try with online CPUs\n",
++ d->irq, cpumask_pr_args(affinity));
++
++ affinity = cpu_online_mask;
++ brokeaff = true;
++
++ err = irq_do_set_affinity(d, affinity, false);
++ }
++
+ if (err) {
+ pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
+ d->irq, err);
+@@ -195,10 +211,15 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
+ !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
+ return;
+
+- if (irqd_is_managed_and_shutdown(data)) {
+- irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
++ /*
++ * Don't restore suspended interrupts here when a system comes back
++ * from S3. They are reenabled via resume_device_irqs().
++ */
++ if (desc->istate & IRQS_SUSPENDED)
+ return;
+- }
++
++ if (irqd_is_managed_and_shutdown(data))
++ irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
+
+ /*
+ * If the interrupt can only be directed to a single target
+diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
+index 5971a66be03472..aae0402507ed74 100644
+--- a/kernel/irq/debugfs.c
++++ b/kernel/irq/debugfs.c
+@@ -121,7 +121,6 @@ static const struct irq_bit_descr irqdata_states[] = {
+ BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
+ BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+ BIT_MASK_DESCR(IRQD_CAN_RESERVE),
+- BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
+
+ BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
+
+diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
+index c653cd31548d03..5a452b94b64348 100644
+--- a/kernel/irq/generic-chip.c
++++ b/kernel/irq/generic-chip.c
+@@ -544,21 +544,34 @@ EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
+ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ unsigned int clr, unsigned int set)
+ {
+- unsigned int i = gc->irq_base;
++ unsigned int i, virq;
+
+ raw_spin_lock(&gc_lock);
+ list_del(&gc->list);
+ raw_spin_unlock(&gc_lock);
+
+- for (; msk; msk >>= 1, i++) {
++ for (i = 0; msk; msk >>= 1, i++) {
+ if (!(msk & 0x01))
+ continue;
+
++ /*
++ * Interrupt domain based chips store the base hardware
++ * interrupt number in gc::irq_base. Otherwise gc::irq_base
++ * contains the base Linux interrupt number.
++ */
++ if (gc->domain) {
++ virq = irq_find_mapping(gc->domain, gc->irq_base + i);
++ if (!virq)
++ continue;
++ } else {
++ virq = gc->irq_base + i;
++ }
++
+ /* Remove handler first. That will mask the irq line */
+- irq_set_handler(i, NULL);
+- irq_set_chip(i, &no_irq_chip);
+- irq_set_chip_data(i, NULL);
+- irq_modify_status(i, clr, set);
++ irq_set_handler(virq, NULL);
++ irq_set_chip(virq, &no_irq_chip);
++ irq_set_chip_data(virq, NULL);
++ irq_modify_status(virq, clr, set);
+ }
+ }
+ EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 27ca1c866f298b..46094f0c9fcdad 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -148,7 +148,10 @@ static int irq_find_free_area(unsigned int from, unsigned int cnt)
+ static unsigned int irq_find_at_or_after(unsigned int offset)
+ {
+ unsigned long index = offset;
+- struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
++ struct irq_desc *desc;
++
++ guard(rcu)();
++ desc = mt_find(&sparse_irqs, &index, nr_irqs);
+
+ return desc ? irq_desc_get_irq(desc) : nr_irqs;
+ }
+@@ -514,6 +517,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
+ flags = IRQD_AFFINITY_MANAGED |
+ IRQD_MANAGED_SHUTDOWN;
+ }
++ flags |= IRQD_AFFINITY_SET;
+ mask = &affinity->mask;
+ node = cpu_to_node(cpumask_first(mask));
+ affinity++;
+@@ -600,7 +604,7 @@ int __init early_irq_init(void)
+ mutex_init(&desc[i].request_mutex);
+ init_waitqueue_head(&desc[i].wait_for_threads);
+ desc_set_defaults(i, &desc[i], node, NULL, NULL);
+- irq_resend_init(desc);
++ irq_resend_init(&desc[i]);
+ }
+ return arch_early_irq_init();
+ }
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index 0bdef4fe925bf5..ddaaccdc09faef 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -154,7 +154,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ switch (fwid->type) {
+ case IRQCHIP_FWNODE_NAMED:
+ case IRQCHIP_FWNODE_NAMED_ID:
+- domain->fwnode = fwnode;
+ domain->name = kstrdup(fwid->name, GFP_KERNEL);
+ if (!domain->name) {
+ kfree(domain);
+@@ -163,7 +162,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ break;
+ default:
+- domain->fwnode = fwnode;
+ domain->name = fwid->name;
+ break;
+ }
+@@ -183,7 +181,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ }
+
+ domain->name = strreplace(name, '/', ':');
+- domain->fwnode = fwnode;
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ }
+
+@@ -199,8 +196,8 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
+ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+ }
+
+- fwnode_handle_get(fwnode);
+- fwnode_dev_initialized(fwnode, true);
++ domain->fwnode = fwnode_handle_get(fwnode);
++ fwnode_dev_initialized(domain->fwnode, true);
+
+ /* Fill structure */
+ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index d309ba84e08a9d..8a936c1ffad390 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -796,10 +796,14 @@ void __enable_irq(struct irq_desc *desc)
+ irq_settings_set_noprobe(desc);
+ /*
+ * Call irq_startup() not irq_enable() here because the
+- * interrupt might be marked NOAUTOEN. So irq_startup()
+- * needs to be invoked when it gets enabled the first
+- * time. If it was already started up, then irq_startup()
+- * will invoke irq_enable() under the hood.
++ * interrupt might be marked NOAUTOEN so irq_startup()
++ * needs to be invoked when it gets enabled the first time.
++ * This is also required when __enable_irq() is invoked for
++ * a managed and shutdown interrupt from the S3 resume
++ * path.
++ *
++ * If it was already started up, then irq_startup() will
++ * invoke irq_enable() under the hood.
+ */
+ irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
+ break;
+@@ -1332,7 +1336,7 @@ static int irq_thread(void *data)
+ * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
+ * oneshot mask bit can be set.
+ */
+- task_work_cancel(current, irq_thread_dtor);
++ task_work_cancel_func(current, irq_thread_dtor);
+ return 0;
+ }
+
+@@ -1852,15 +1856,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ struct task_struct *t = new->thread;
+
+ new->thread = NULL;
+- kthread_stop(t);
+- put_task_struct(t);
++ kthread_stop_put(t);
+ }
+ if (new->secondary && new->secondary->thread) {
+ struct task_struct *t = new->secondary->thread;
+
+ new->secondary->thread = NULL;
+- kthread_stop(t);
+- put_task_struct(t);
++ kthread_stop_put(t);
+ }
+ out_mput:
+ module_put(desc->owner);
+@@ -1971,12 +1973,9 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
+ * the same bit to a newly requested action.
+ */
+ if (action->thread) {
+- kthread_stop(action->thread);
+- put_task_struct(action->thread);
+- if (action->secondary && action->secondary->thread) {
+- kthread_stop(action->secondary->thread);
+- put_task_struct(action->secondary->thread);
+- }
++ kthread_stop_put(action->thread);
++ if (action->secondary && action->secondary->thread)
++ kthread_stop_put(action->secondary->thread);
+ }
+
+ /* Last action releases resources */
+diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
+index 1698e77645acf7..75d0ae490e29cd 100644
+--- a/kernel/irq/matrix.c
++++ b/kernel/irq/matrix.c
+@@ -466,16 +466,16 @@ unsigned int irq_matrix_reserved(struct irq_matrix *m)
+ }
+
+ /**
+- * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
++ * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU
+ * @m: Pointer to the matrix to search
+ *
+- * This returns number of allocated irqs
++ * This returns number of allocated non-managed interrupts.
+ */
+ unsigned int irq_matrix_allocated(struct irq_matrix *m)
+ {
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+- return cm->allocated;
++ return cm->allocated - cm->managed_allocated;
+ }
+
+ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index b4c31a5c11473c..79b4a58ba9c3f2 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -1204,7 +1204,6 @@ static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
+
+ #define VIRQ_CAN_RESERVE 0x01
+ #define VIRQ_ACTIVATE 0x02
+-#define VIRQ_NOMASK_QUIRK 0x04
+
+ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
+ {
+@@ -1213,8 +1212,6 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag
+
+ if (!(vflags & VIRQ_CAN_RESERVE)) {
+ irqd_clr_can_reserve(irqd);
+- if (vflags & VIRQ_NOMASK_QUIRK)
+- irqd_set_msi_nomask_quirk(irqd);
+
+ /*
+ * If the interrupt is managed but no CPU is available to
+@@ -1275,15 +1272,8 @@ static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain
+ * Interrupt can use a reserved vector and will not occupy
+ * a real device vector until the interrupt is requested.
+ */
+- if (msi_check_reservation_mode(domain, info, dev)) {
++ if (msi_check_reservation_mode(domain, info, dev))
+ vflags |= VIRQ_CAN_RESERVE;
+- /*
+- * MSI affinity setting requires a special quirk (X86) when
+- * reservation mode is active.
+- */
+- if (info->flags & MSI_FLAG_NOMASK_QUIRK)
+- vflags |= VIRQ_NOMASK_QUIRK;
+- }
+
+ xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
+ if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED))
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index d9c822bbffb8d3..554e04b25b13a8 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -131,7 +131,7 @@ bool static_key_fast_inc_not_disabled(struct static_key *key)
+ STATIC_KEY_CHECK_USE(key);
+ /*
+ * Negative key->enabled has a special meaning: it sends
+- * static_key_slow_inc() down the slow path, and it is non-zero
++ * static_key_slow_inc/dec() down the slow path, and it is non-zero
+ * so it counts as "enabled" in jump_label_update(). Note that
+ * atomic_inc_unless_negative() checks >= 0, so roll our own.
+ */
+@@ -150,7 +150,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
+ lockdep_assert_cpus_held();
+
+ /*
+- * Careful if we get concurrent static_key_slow_inc() calls;
++ * Careful if we get concurrent static_key_slow_inc/dec() calls;
+ * later calls must wait for the first one to _finish_ the
+ * jump_label_update() process. At the same time, however,
+ * the jump_label_update() call below wants to see
+@@ -159,22 +159,24 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
+ if (static_key_fast_inc_not_disabled(key))
+ return true;
+
+- jump_label_lock();
+- if (atomic_read(&key->enabled) == 0) {
+- atomic_set(&key->enabled, -1);
++ guard(mutex)(&jump_label_mutex);
++ /* Try to mark it as 'enabling in progress. */
++ if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
+ jump_label_update(key);
+ /*
+- * Ensure that if the above cmpxchg loop observes our positive
+- * value, it must also observe all the text changes.
++ * Ensure that when static_key_fast_inc_not_disabled() or
++ * static_key_dec_not_one() observe the positive value,
++ * they must also observe all the text changes.
+ */
+ atomic_set_release(&key->enabled, 1);
+ } else {
+- if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
+- jump_label_unlock();
++ /*
++ * While holding the mutex this should never observe
++ * anything else than a value >= 1 and succeed
++ */
++ if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
+ return false;
+- }
+ }
+- jump_label_unlock();
+ return true;
+ }
+
+@@ -231,7 +233,7 @@ void static_key_disable_cpuslocked(struct static_key *key)
+ }
+
+ jump_label_lock();
+- if (atomic_cmpxchg(&key->enabled, 1, 0))
++ if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ jump_label_update(key);
+ jump_label_unlock();
+ }
+@@ -245,36 +247,69 @@ void static_key_disable(struct static_key *key)
+ }
+ EXPORT_SYMBOL_GPL(static_key_disable);
+
+-static bool static_key_slow_try_dec(struct static_key *key)
++static bool static_key_dec_not_one(struct static_key *key)
+ {
+- int val;
+-
+- val = atomic_fetch_add_unless(&key->enabled, -1, 1);
+- if (val == 1)
+- return false;
++ int v;
+
+ /*
+- * The negative count check is valid even when a negative
+- * key->enabled is in use by static_key_slow_inc(); a
+- * __static_key_slow_dec() before the first static_key_slow_inc()
+- * returns is unbalanced, because all other static_key_slow_inc()
+- * instances block while the update is in progress.
++ * Go into the slow path if key::enabled is less than or equal than
++ * one. One is valid to shut down the key, anything less than one
++ * is an imbalance, which is handled at the call site.
++ *
++ * That includes the special case of '-1' which is set in
++ * static_key_slow_inc_cpuslocked(), but that's harmless as it is
++ * fully serialized in the slow path below. By the time this task
++ * acquires the jump label lock the value is back to one and the
++ * retry under the lock must succeed.
+ */
+- WARN(val < 0, "jump label: negative count!\n");
++ v = atomic_read(&key->enabled);
++ do {
++ /*
++ * Warn about the '-1' case though; since that means a
++ * decrement is concurrent with a first (0->1) increment. IOW
++ * people are trying to disable something that wasn't yet fully
++ * enabled. This suggests an ordering problem on the user side.
++ */
++ WARN_ON_ONCE(v < 0);
++
++ /*
++ * Warn about underflow, and lie about success in an attempt to
++ * not make things worse.
++ */
++ if (WARN_ON_ONCE(v == 0))
++ return true;
++
++ if (v <= 1)
++ return false;
++ } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
++
+ return true;
+ }
+
+ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
+ {
+ lockdep_assert_cpus_held();
++ int val;
+
+- if (static_key_slow_try_dec(key))
++ if (static_key_dec_not_one(key))
++ return;
++
++ guard(mutex)(&jump_label_mutex);
++ val = atomic_read(&key->enabled);
++ /*
++ * It should be impossible to observe -1 with jump_label_mutex held,
++ * see static_key_slow_inc_cpuslocked().
++ */
++ if (WARN_ON_ONCE(val == -1))
++ return;
++ /*
++ * Cannot already be 0, something went sideways.
++ */
++ if (WARN_ON_ONCE(val == 0))
+ return;
+
+- jump_label_lock();
+ if (atomic_dec_and_test(&key->enabled))
+ jump_label_update(key);
+- jump_label_unlock();
+ }
+
+ static void __static_key_slow_dec(struct static_key *key)
+@@ -311,7 +346,7 @@ void __static_key_slow_dec_deferred(struct static_key *key,
+ {
+ STATIC_KEY_CHECK_USE(key);
+
+- if (static_key_slow_try_dec(key))
++ if (static_key_dec_not_one(key))
+ return;
+
+ schedule_delayed_work(work, timeout);
+diff --git a/kernel/kcov.c b/kernel/kcov.c
+index f9ac2e9e460fc8..72d9aa6fb50c3e 100644
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -161,6 +161,15 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
+ kmsan_unpoison_memory(&area->list, sizeof(area->list));
+ }
+
++/*
++ * Unlike in_serving_softirq(), this function returns false when called during
++ * a hardirq or an NMI that happened in the softirq context.
++ */
++static inline bool in_softirq_really(void)
++{
++ return in_serving_softirq() && !in_hardirq() && !in_nmi();
++}
++
+ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+ {
+ unsigned int mode;
+@@ -170,7 +179,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
+ * so we ignore code executed in interrupts, unless we are in a remote
+ * coverage collection section in a softirq.
+ */
+- if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
++ if (!in_task() && !(in_softirq_really() && t->kcov_softirq))
+ return false;
+ mode = READ_ONCE(t->kcov_mode);
+ /*
+@@ -631,6 +640,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
+ return -EINVAL;
+ kcov->mode = mode;
+ t->kcov = kcov;
++ t->kcov_mode = KCOV_MODE_REMOTE;
+ kcov->t = t;
+ kcov->remote = true;
+ kcov->remote_size = remote_arg->area_size;
+@@ -847,7 +857,7 @@ void kcov_remote_start(u64 handle)
+
+ if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
+ return;
+- if (!in_task() && !in_serving_softirq())
++ if (!in_task() && !in_softirq_really())
+ return;
+
+ local_lock_irqsave(&kcov_percpu_data.lock, flags);
+@@ -989,7 +999,7 @@ void kcov_remote_stop(void)
+ int sequence;
+ unsigned long flags;
+
+- if (!in_task() && !in_serving_softirq())
++ if (!in_task() && !in_softirq_really())
+ return;
+
+ local_lock_irqsave(&kcov_percpu_data.lock, flags);
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 107f355eac1012..8f35a5a42af852 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -247,7 +247,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+ ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
+ return -EINVAL;
+
+- ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
++ ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0]));
+ if (IS_ERR(ksegments))
+ return PTR_ERR(ksegments);
+
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index 9dc728982d79a4..b7246b7171b73b 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -1271,6 +1271,7 @@ int kernel_kexec(void)
+ kexec_in_progress = true;
+ kernel_restart_prepare("kexec reboot");
+ migrate_to_reboot_cpu();
++ syscore_shutdown();
+
+ /*
+ * migrate_to_reboot_cpu() disables CPU hotplug assuming that
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index f9a419cd22d4c7..830344627e9f20 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -728,7 +728,7 @@ static int kexec_calculate_store_digests(struct kimage *image)
+
+ #ifdef CONFIG_CRASH_HOTPLUG
+ /* Exclude elfcorehdr segment to allow future changes via hotplug */
+- if (j == image->elfcorehdr_index)
++ if (i == image->elfcorehdr_index)
+ continue;
+ #endif
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 0c6185aefaef57..c10954bd84448b 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1068,6 +1068,7 @@ static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
+
+ static int kprobe_ipmodify_enabled;
+ static int kprobe_ftrace_enabled;
++bool kprobe_ftrace_disabled;
+
+ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
+ int *cnt)
+@@ -1136,6 +1137,11 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
+ ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
+ ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
+ }
++
++void kprobe_ftrace_kill(void)
++{
++ kprobe_ftrace_disabled = true;
++}
+ #else /* !CONFIG_KPROBES_ON_FTRACE */
+ static inline int arm_kprobe_ftrace(struct kprobe *p)
+ {
+@@ -1552,8 +1558,8 @@ static bool is_cfi_preamble_symbol(unsigned long addr)
+ if (lookup_symbol_name(addr, symbuf))
+ return false;
+
+- return str_has_prefix("__cfi_", symbuf) ||
+- str_has_prefix("__pfx_", symbuf);
++ return str_has_prefix(symbuf, "__cfi_") ||
++ str_has_prefix(symbuf, "__pfx_");
+ }
+
+ static int check_kprobe_address_safe(struct kprobe *p,
+@@ -1567,10 +1573,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ jump_label_lock();
+ preempt_disable();
+
+- /* Ensure it is not in reserved area nor out of text */
+- if (!(core_kernel_text((unsigned long) p->addr) ||
+- is_module_text_address((unsigned long) p->addr)) ||
+- in_gate_area_no_mm((unsigned long) p->addr) ||
++ /* Ensure the address is in a text area, and find a module if exists. */
++ *probed_mod = NULL;
++ if (!core_kernel_text((unsigned long) p->addr)) {
++ *probed_mod = __module_text_address((unsigned long) p->addr);
++ if (!(*probed_mod)) {
++ ret = -EINVAL;
++ goto out;
++ }
++ }
++ /* Ensure it is not in reserved area. */
++ if (in_gate_area_no_mm((unsigned long) p->addr) ||
+ within_kprobe_blacklist((unsigned long) p->addr) ||
+ jump_label_text_reserved(p->addr, p->addr) ||
+ static_call_text_reserved(p->addr, p->addr) ||
+@@ -1580,8 +1593,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ goto out;
+ }
+
+- /* Check if 'p' is probing a module. */
+- *probed_mod = __module_text_address((unsigned long) p->addr);
++ /* Get module refcount and reject __init functions for loaded modules. */
+ if (*probed_mod) {
+ /*
+ * We must hold a refcount of the probed module while updating
+@@ -2253,7 +2265,7 @@ int register_kretprobe(struct kretprobe *rp)
+ if (!rp->rph)
+ return -ENOMEM;
+
+- rp->rph->rp = rp;
++ rcu_assign_pointer(rp->rph->rp, rp);
+ for (i = 0; i < rp->maxactive; i++) {
+ inst = kzalloc(struct_size(inst, data, rp->data_size), GFP_KERNEL);
+ if (inst == NULL) {
+@@ -2313,7 +2325,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
+ #ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ rethook_free(rps[i]->rh);
+ #else
+- rps[i]->rph->rp = NULL;
++ rcu_assign_pointer(rps[i]->rph->rp, NULL);
+ #endif
+ }
+ mutex_unlock(&kprobe_mutex);
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 1eea53050babcd..980e6b325b7dc7 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -622,6 +622,8 @@ void kthread_unpark(struct task_struct *k)
+ {
+ struct kthread *kthread = to_kthread(k);
+
++ if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
++ return;
+ /*
+ * Newly created kthread was parked when the CPU was offline.
+ * The binding was lost and we need to set it again.
+@@ -715,6 +717,24 @@ int kthread_stop(struct task_struct *k)
+ }
+ EXPORT_SYMBOL(kthread_stop);
+
++/**
++ * kthread_stop_put - stop a thread and put its task struct
++ * @k: thread created by kthread_create().
++ *
++ * Stops a thread created by kthread_create() and put its task_struct.
++ * Only use when holding an extra task struct reference obtained by
++ * calling get_task_struct().
++ */
++int kthread_stop_put(struct task_struct *k)
++{
++ int ret;
++
++ ret = kthread_stop(k);
++ put_task_struct(k);
++ return ret;
++}
++EXPORT_SYMBOL(kthread_stop_put);
++
+ int kthreadd(void *unused)
+ {
+ struct task_struct *tsk = current;
+@@ -826,8 +846,16 @@ int kthread_worker_fn(void *worker_ptr)
+ * event only cares about the address.
+ */
+ trace_sched_kthread_work_execute_end(work, func);
+- } else if (!freezing(current))
++ } else if (!freezing(current)) {
+ schedule();
++ } else {
++ /*
++ * Handle the case where the current remains
++ * TASK_INTERRUPTIBLE. try_to_freeze() expects
++ * the current to be TASK_RUNNING.
++ */
++ __set_current_state(TASK_RUNNING);
++ }
+
+ try_to_freeze();
+ cond_resched();
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index 61328328c474c0..ecbc9b6aba3a10 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -243,7 +243,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
+ * symbols are exported and normal relas can be used instead.
+ */
+ if (!sec_vmlinux && sym_vmlinux) {
+- pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
++ pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
+ sym_name);
+ return -EINVAL;
+ }
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index e85b5ad3e20698..3468d8230e5f75 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3497,7 +3497,8 @@ static int alloc_chain_hlocks(int req)
+ size = chain_block_size(curr);
+ if (likely(size >= req)) {
+ del_chain_block(0, size, chain_block_next(curr));
+- add_chain_block(curr + req, size - req);
++ if (size > req)
++ add_chain_block(curr + req, size - req);
+ return curr;
+ }
+ }
+@@ -6183,25 +6184,27 @@ static struct pending_free *get_pending_free(void)
+ static void free_zapped_rcu(struct rcu_head *cb);
+
+ /*
+- * Schedule an RCU callback if no RCU callback is pending. Must be called with
+- * the graph lock held.
+- */
+-static void call_rcu_zapped(struct pending_free *pf)
++* See if we need to queue an RCU callback, must called with
++* the lockdep lock held, returns false if either we don't have
++* any pending free or the callback is already scheduled.
++* Otherwise, a call_rcu() must follow this function call.
++*/
++static bool prepare_call_rcu_zapped(struct pending_free *pf)
+ {
+ WARN_ON_ONCE(inside_selftest());
+
+ if (list_empty(&pf->zapped))
+- return;
++ return false;
+
+ if (delayed_free.scheduled)
+- return;
++ return false;
+
+ delayed_free.scheduled = true;
+
+ WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
+ delayed_free.index ^= 1;
+
+- call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++ return true;
+ }
+
+ /* The caller must hold the graph lock. May be called from RCU context. */
+@@ -6227,6 +6230,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
+ {
+ struct pending_free *pf;
+ unsigned long flags;
++ bool need_callback;
+
+ if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
+ return;
+@@ -6238,14 +6242,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
+ pf = delayed_free.pf + (delayed_free.index ^ 1);
+ __free_zapped_classes(pf);
+ delayed_free.scheduled = false;
++ need_callback =
++ prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
++ lockdep_unlock();
++ raw_local_irq_restore(flags);
+
+ /*
+- * If there's anything on the open list, close and start a new callback.
+- */
+- call_rcu_zapped(delayed_free.pf + delayed_free.index);
++ * If there's pending free and its callback has not been scheduled,
++ * queue an RCU callback.
++ */
++ if (need_callback)
++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+
+- lockdep_unlock();
+- raw_local_irq_restore(flags);
+ }
+
+ /*
+@@ -6285,6 +6293,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
+ {
+ struct pending_free *pf;
+ unsigned long flags;
++ bool need_callback;
+
+ init_data_structures_once();
+
+@@ -6292,10 +6301,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
+ lockdep_lock();
+ pf = get_pending_free();
+ __lockdep_free_key_range(pf, start, size);
+- call_rcu_zapped(pf);
++ need_callback = prepare_call_rcu_zapped(pf);
+ lockdep_unlock();
+ raw_local_irq_restore(flags);
+-
++ if (need_callback)
++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ /*
+ * Wait for any possible iterators from look_up_lock_class() to pass
+ * before continuing to free the memory they refer to.
+@@ -6389,6 +6399,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
+ struct pending_free *pf;
+ unsigned long flags;
+ int locked;
++ bool need_callback = false;
+
+ raw_local_irq_save(flags);
+ locked = graph_lock();
+@@ -6397,11 +6408,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
+
+ pf = get_pending_free();
+ __lockdep_reset_lock(pf, lock);
+- call_rcu_zapped(pf);
++ need_callback = prepare_call_rcu_zapped(pf);
+
+ graph_unlock();
+ out_irq:
+ raw_local_irq_restore(flags);
++ if (need_callback)
++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ }
+
+ /*
+@@ -6445,6 +6458,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
+ struct pending_free *pf;
+ unsigned long flags;
+ bool found = false;
++ bool need_callback = false;
+
+ might_sleep();
+
+@@ -6465,11 +6479,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
+ if (found) {
+ pf = get_pending_free();
+ __lockdep_free_key_range(pf, key, 1);
+- call_rcu_zapped(pf);
++ need_callback = prepare_call_rcu_zapped(pf);
+ }
+ lockdep_unlock();
+ raw_local_irq_restore(flags);
+
++ if (need_callback)
++ call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++
+ /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
+ synchronize_rcu();
+ }
+diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
+index bc8abb8549d20d..6e6f6071cfa279 100644
+--- a/kernel/locking/mutex-debug.c
++++ b/kernel/locking/mutex-debug.c
+@@ -12,6 +12,7 @@
+ */
+ #include <linux/mutex.h>
+ #include <linux/delay.h>
++#include <linux/device.h>
+ #include <linux/export.h>
+ #include <linux/poison.h>
+ #include <linux/sched.h>
+@@ -89,6 +90,17 @@ void debug_mutex_init(struct mutex *lock, const char *name,
+ lock->magic = lock;
+ }
+
++static void devm_mutex_release(void *res)
++{
++ mutex_destroy(res);
++}
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++ return devm_add_action_or_reset(dev, devm_mutex_release, lock);
++}
++EXPORT_SYMBOL_GPL(__devm_mutex_init);
++
+ /***
+ * mutex_destroy - mark a mutex unusable
+ * @lock: the mutex to be destroyed
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 21db0df0eb0007..bf3a28ee7d8f47 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1624,6 +1624,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
+ }
+
+ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
++ struct rt_mutex_base *lock,
+ struct rt_mutex_waiter *w)
+ {
+ /*
+@@ -1636,10 +1637,10 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ if (build_ww_mutex() && w->ww_ctx)
+ return;
+
+- /*
+- * Yell loudly and stop the task right here.
+- */
++ raw_spin_unlock_irq(&lock->wait_lock);
++
+ WARN(1, "rtmutex deadlock detected\n");
++
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+@@ -1693,7 +1694,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
+ } else {
+ __set_current_state(TASK_RUNNING);
+ remove_waiter(lock, waiter);
+- rt_mutex_handle_deadlock(ret, chwalk, waiter);
++ rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
+ }
+
+ /*
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index 9eabd585ce7afa..11ed7ce6579e81 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -1297,7 +1297,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ /*
+ * lock for writing
+ */
+-static inline int __down_write_common(struct rw_semaphore *sem, int state)
++static __always_inline int __down_write_common(struct rw_semaphore *sem, int state)
+ {
+ int ret = 0;
+
+@@ -1310,12 +1310,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state)
+ return ret;
+ }
+
+-static inline void __down_write(struct rw_semaphore *sem)
++static __always_inline void __down_write(struct rw_semaphore *sem)
+ {
+ __down_write_common(sem, TASK_UNINTERRUPTIBLE);
+ }
+
+-static inline int __down_write_killable(struct rw_semaphore *sem)
++static __always_inline int __down_write_killable(struct rw_semaphore *sem)
+ {
+ return __down_write_common(sem, TASK_KILLABLE);
+ }
+diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
+index 93cca6e698600a..7c5a8f05497f2c 100644
+--- a/kernel/locking/test-ww_mutex.c
++++ b/kernel/locking/test-ww_mutex.c
+@@ -466,7 +466,6 @@ static void stress_inorder_work(struct work_struct *work)
+ } while (!time_after(jiffies, stress->timeout));
+
+ kfree(order);
+- kfree(stress);
+ }
+
+ struct reorder_lock {
+@@ -531,7 +530,6 @@ static void stress_reorder_work(struct work_struct *work)
+ list_for_each_entry_safe(ll, ln, &locks, link)
+ kfree(ll);
+ kfree(order);
+- kfree(stress);
+ }
+
+ static void stress_one_work(struct work_struct *work)
+@@ -552,8 +550,6 @@ static void stress_one_work(struct work_struct *work)
+ break;
+ }
+ } while (!time_after(jiffies, stress->timeout));
+-
+- kfree(stress);
+ }
+
+ #define STRESS_INORDER BIT(0)
+@@ -564,15 +560,24 @@ static void stress_one_work(struct work_struct *work)
+ static int stress(int nlocks, int nthreads, unsigned int flags)
+ {
+ struct ww_mutex *locks;
+- int n;
++ struct stress *stress_array;
++ int n, count;
+
+ locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
+ if (!locks)
+ return -ENOMEM;
+
++ stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
++ GFP_KERNEL);
++ if (!stress_array) {
++ kfree(locks);
++ return -ENOMEM;
++ }
++
+ for (n = 0; n < nlocks; n++)
+ ww_mutex_init(&locks[n], &ww_class);
+
++ count = 0;
+ for (n = 0; nthreads; n++) {
+ struct stress *stress;
+ void (*fn)(struct work_struct *work);
+@@ -596,9 +601,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+ if (!fn)
+ continue;
+
+- stress = kmalloc(sizeof(*stress), GFP_KERNEL);
+- if (!stress)
+- break;
++ stress = &stress_array[count++];
+
+ INIT_WORK(&stress->work, fn);
+ stress->locks = locks;
+@@ -613,6 +616,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+
+ for (n = 0; n < nlocks; n++)
+ ww_mutex_destroy(&locks[n]);
++ kfree(stress_array);
+ kfree(locks);
+
+ return 0;
+diff --git a/kernel/module/Makefile b/kernel/module/Makefile
+index a10b2b9a6fdfc6..50ffcc413b5450 100644
+--- a/kernel/module/Makefile
++++ b/kernel/module/Makefile
+@@ -5,7 +5,7 @@
+
+ # These are called from save_stack_trace() on slub debug path,
+ # and produce insane amounts of uninteresting coverage.
+-KCOV_INSTRUMENT_module.o := n
++KCOV_INSTRUMENT_main.o := n
+
+ obj-y += main.o
+ obj-y += strict_rwx.o
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index 87440f714c0ca2..474e68f0f06349 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -100,7 +100,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ s.next_in = buf + gzip_hdr_len;
+ s.avail_in = size - gzip_hdr_len;
+
+- s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
++ s.workspace = kvmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ if (!s.workspace)
+ return -ENOMEM;
+
+@@ -138,7 +138,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
+ out_inflate_end:
+ zlib_inflateEnd(&s);
+ out:
+- kfree(s.workspace);
++ kvfree(s.workspace);
+ return retval;
+ }
+ #elif defined(CONFIG_MODULE_COMPRESS_XZ)
+@@ -241,7 +241,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ }
+
+ wksp_size = zstd_dstream_workspace_bound(header.windowSize);
+- wksp = vmalloc(wksp_size);
++ wksp = kvmalloc(wksp_size, GFP_KERNEL);
+ if (!wksp) {
+ retval = -ENOMEM;
+ goto out;
+@@ -284,7 +284,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ retval = new_size;
+
+ out:
+- vfree(wksp);
++ kvfree(wksp);
+ return retval;
+ }
+ #else
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 98fedfdb8db52f..b00e31721a73e3 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -2486,6 +2486,11 @@ static void do_free_init(struct work_struct *w)
+ }
+ }
+
++void flush_module_init_free_work(void)
++{
++ flush_work(&init_free_wq);
++}
++
+ #undef MODULE_PARAM_PREFIX
+ #define MODULE_PARAM_PREFIX "module."
+ /* Default value for module->async_probe_requested */
+@@ -2590,8 +2595,8 @@ static noinline int do_init_module(struct module *mod)
+ * Note that module_alloc() on most architectures creates W+X page
+ * mappings which won't be cleaned up until do_free_init() runs. Any
+ * code such as mark_rodata_ro() which depends on those mappings to
+- * be cleaned up needs to sync with the queued work - ie
+- * rcu_barrier()
++ * be cleaned up needs to sync with the queued work by invoking
++ * flush_module_init_free_work().
+ */
+ if (llist_add(&freeinit->node, &init_free_list))
+ schedule_work(&init_free_wq);
+@@ -3076,7 +3081,7 @@ static bool idempotent(struct idempotent *u, const void *cookie)
+ struct idempotent *existing;
+ bool first;
+
+- u->ret = 0;
++ u->ret = -EINTR;
+ u->cookie = cookie;
+ init_completion(&u->complete);
+
+@@ -3112,7 +3117,7 @@ static int idempotent_complete(struct idempotent *u, int ret)
+ hlist_for_each_entry_safe(pos, next, head, entry) {
+ if (pos->cookie != cookie)
+ continue;
+- hlist_del(&pos->entry);
++ hlist_del_init(&pos->entry);
+ pos->ret = ret;
+ complete(&pos->complete);
+ }
+@@ -3120,6 +3125,28 @@ static int idempotent_complete(struct idempotent *u, int ret)
+ return ret;
+ }
+
++/*
++ * Wait for the idempotent worker.
++ *
++ * If we get interrupted, we need to remove ourselves from the
++ * the idempotent list, and the completion may still come in.
++ *
++ * The 'idem_lock' protects against the race, and 'idem.ret' was
++ * initialized to -EINTR and is thus always the right return
++ * value even if the idempotent work then completes between
++ * the wait_for_completion and the cleanup.
++ */
++static int idempotent_wait_for_completion(struct idempotent *u)
++{
++ if (wait_for_completion_interruptible(&u->complete)) {
++ spin_lock(&idem_lock);
++ if (!hlist_unhashed(&u->entry))
++ hlist_del(&u->entry);
++ spin_unlock(&idem_lock);
++ }
++ return u->ret;
++}
++
+ static int init_module_from_file(struct file *f, const char __user * uargs, int flags)
+ {
+ struct load_info info = { };
+@@ -3155,15 +3182,16 @@ static int idempotent_init_module(struct file *f, const char __user * uargs, int
+ if (!f || !(f->f_mode & FMODE_READ))
+ return -EBADF;
+
+- /* See if somebody else is doing the operation? */
+- if (idempotent(&idem, file_inode(f))) {
+- wait_for_completion(&idem.complete);
+- return idem.ret;
++ /* Are we the winners of the race and get to do this? */
++ if (!idempotent(&idem, file_inode(f))) {
++ int ret = init_module_from_file(f, uargs, flags);
++ return idempotent_complete(&idem, ret);
+ }
+
+- /* Otherwise, we'll do it and complete others */
+- return idempotent_complete(&idem,
+- init_module_from_file(f, uargs, flags));
++ /*
++ * Somebody else won the race and is loading the module.
++ */
++ return idempotent_wait_for_completion(&idem);
+ }
+
+ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
+diff --git a/kernel/numa.c b/kernel/numa.c
+new file mode 100644
+index 00000000000000..67ca6b8585c06f
+--- /dev/null
++++ b/kernel/numa.c
+@@ -0,0 +1,26 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++#include <linux/printk.h>
++#include <linux/numa.h>
++
++/* Stub functions: */
++
++#ifndef memory_add_physaddr_to_nid
++int memory_add_physaddr_to_nid(u64 start)
++{
++ pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
++ start);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
++#endif
++
++#ifndef phys_to_target_node
++int phys_to_target_node(u64 start)
++{
++ pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
++ start);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(phys_to_target_node);
++#endif
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 222d60195de66f..9bf77b58ee08d4 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -106,7 +106,7 @@ static int __init padata_work_alloc_mt(int nworks, void *data,
+ {
+ int i;
+
+- spin_lock(&padata_works_lock);
++ spin_lock_bh(&padata_works_lock);
+ /* Start at 1 because the current task participates in the job. */
+ for (i = 1; i < nworks; ++i) {
+ struct padata_work *pw = padata_work_alloc();
+@@ -116,7 +116,7 @@ static int __init padata_work_alloc_mt(int nworks, void *data,
+ padata_work_init(pw, padata_mt_helper, data, 0);
+ list_add(&pw->pw_list, head);
+ }
+- spin_unlock(&padata_works_lock);
++ spin_unlock_bh(&padata_works_lock);
+
+ return i;
+ }
+@@ -134,12 +134,12 @@ static void __init padata_works_free(struct list_head *works)
+ if (list_empty(works))
+ return;
+
+- spin_lock(&padata_works_lock);
++ spin_lock_bh(&padata_works_lock);
+ list_for_each_entry_safe(cur, next, works, pw_list) {
+ list_del(&cur->pw_list);
+ padata_work_free(cur);
+ }
+- spin_unlock(&padata_works_lock);
++ spin_unlock_bh(&padata_works_lock);
+ }
+
+ static void padata_parallel_worker(struct work_struct *parallel_work)
+@@ -202,7 +202,7 @@ int padata_do_parallel(struct padata_shell *ps,
+ *cb_cpu = cpu;
+ }
+
+- err = -EBUSY;
++ err = -EBUSY;
+ if ((pinst->flags & PADATA_RESET))
+ goto out;
+
+@@ -404,7 +404,8 @@ void padata_do_serial(struct padata_priv *padata)
+ /* Sort in ascending order of sequence number. */
+ list_for_each_prev(pos, &reorder->list) {
+ cur = list_entry(pos, struct padata_priv, list);
+- if (cur->seq_nr < padata->seq_nr)
++ /* Compare by difference to consider integer wrap around */
++ if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
+ break;
+ }
+ list_add(&padata->list, pos);
+@@ -511,11 +512,21 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
+ * thread function. Load balance large jobs between threads by
+ * increasing the number of chunks, guarantee at least the minimum
+ * chunk size from the caller, and honor the caller's alignment.
++ * Ensure chunk_size is at least 1 to prevent divide-by-0
++ * panic in padata_mt_helper().
+ */
+ ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
+ ps.chunk_size = max(ps.chunk_size, job->min_chunk);
++ ps.chunk_size = max(ps.chunk_size, 1ul);
+ ps.chunk_size = roundup(ps.chunk_size, job->align);
+
++ /*
++ * chunk_size can be 0 if the caller sets min_chunk to 0. So force it
++ * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
++ */
++ if (!ps.chunk_size)
++ ps.chunk_size = 1U;
++
+ list_for_each_entry(pw, &works, pw_list)
+ queue_work(system_unbound_wq, &pw->pw_work);
+
+@@ -1102,12 +1113,16 @@ EXPORT_SYMBOL(padata_alloc_shell);
+ */
+ void padata_free_shell(struct padata_shell *ps)
+ {
++ struct parallel_data *pd;
++
+ if (!ps)
+ return;
+
+ mutex_lock(&ps->pinst->lock);
+ list_del(&ps->list);
+- padata_free_pd(rcu_dereference_protected(ps->pd, 1));
++ pd = rcu_dereference_protected(ps->pd, 1);
++ if (refcount_dec_and_test(&pd->refcnt))
++ padata_free_pd(pd);
+ mutex_unlock(&ps->pinst->lock);
+
+ kfree(ps);
+diff --git a/kernel/panic.c b/kernel/panic.c
+index ffa037fa777d5f..ef9f9a4e928de6 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -442,6 +442,14 @@ void panic(const char *fmt, ...)
+
+ /* Do not scroll important messages printed above */
+ suppress_printk = 1;
++
++ /*
++ * The final messages may not have been printed if in a context that
++ * defers printing (such as NMI) and irq_work is not available.
++ * Explicitly flush the kernel log buffer one last time.
++ */
++ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++
+ local_irq_enable();
+ for (i = 0; ; i += PANIC_TIMER_STEP) {
+ touch_softlockup_watchdog();
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index 619972c78774f7..e9b2bb260ee6c8 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -217,6 +217,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
+ */
+ do {
+ clear_thread_flag(TIF_SIGPENDING);
++ clear_thread_flag(TIF_NOTIFY_SIGNAL);
+ rc = kernel_wait4(-1, NULL, __WALL, NULL);
+ } while (rc != -ECHILD);
+
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 0f12e0a97e432e..50a15408c3fcad 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2545,8 +2545,9 @@ static void *get_highmem_page_buffer(struct page *page,
+ pbe->copy_page = tmp;
+ } else {
+ /* Copy of the page will be stored in normal memory */
+- kaddr = safe_pages_list;
+- safe_pages_list = safe_pages_list->next;
++ kaddr = __get_safe_page(ca->gfp_mask);
++ if (!kaddr)
++ return ERR_PTR(-ENOMEM);
+ pbe->copy_page = virt_to_page(kaddr);
+ }
+ pbe->next = highmem_pblist;
+@@ -2750,8 +2751,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
+ return ERR_PTR(-ENOMEM);
+ }
+ pbe->orig_address = page_address(page);
+- pbe->address = safe_pages_list;
+- safe_pages_list = safe_pages_list->next;
++ pbe->address = __get_safe_page(ca->gfp_mask);
++ if (!pbe->address)
++ return ERR_PTR(-ENOMEM);
+ pbe->next = restore_pblist;
+ restore_pblist = pbe;
+ return pbe->address;
+@@ -2783,8 +2785,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
+ return 0;
+
+- handle->sync_read = 1;
+-
+ if (!handle->cur) {
+ if (!buffer)
+ /* This makes the buffer be freed by swsusp_free() */
+@@ -2827,7 +2827,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ memory_bm_position_reset(&zero_bm);
+ restore_pblist = NULL;
+ handle->buffer = get_buffer(&orig_bm, &ca);
+- handle->sync_read = 0;
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+ }
+@@ -2837,9 +2836,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ handle->buffer = get_buffer(&orig_bm, &ca);
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+- if (handle->buffer != buffer)
+- handle->sync_read = 0;
+ }
++ handle->sync_read = (handle->buffer == buffer);
+ handle->cur++;
+
+ /* Zero pages were not included in the image, memset it and move on. */
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index fa3bf161d13f79..3aae526cc4aaca 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -106,6 +106,12 @@ static void s2idle_enter(void)
+ swait_event_exclusive(s2idle_wait_head,
+ s2idle_state == S2IDLE_STATE_WAKE);
+
++ /*
++ * Kick all CPUs to ensure that they resume their timers and restore
++ * consistent system state.
++ */
++ wake_up_all_idle_cpus();
++
+ cpus_read_unlock();
+
+ raw_spin_lock_irq(&s2idle_lock);
+@@ -192,6 +198,7 @@ static int __init mem_sleep_default_setup(char *str)
+ if (mem_sleep_labels[state] &&
+ !strcmp(str, mem_sleep_labels[state])) {
+ mem_sleep_default = state;
++ mem_sleep_current = state;
+ break;
+ }
+
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index 74edbce2320bae..d71c590550d282 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -605,11 +605,11 @@ static int crc32_threadfn(void *data)
+ unsigned i;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -618,7 +618,7 @@ static int crc32_threadfn(void *data)
+ for (i = 0; i < d->run_threads; i++)
+ *d->crc32 = crc32_le(*d->crc32,
+ d->unc[i], *d->unc_len[i]);
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -648,12 +648,12 @@ static int lzo_compress_threadfn(void *data)
+ struct cmp_data *d = data;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -662,7 +662,7 @@ static int lzo_compress_threadfn(void *data)
+ d->ret = lzo1x_1_compress(d->unc, d->unc_len,
+ d->cmp + LZO_HEADER, &d->cmp_len,
+ d->wrk);
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -797,7 +797,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
+
+ data[thr].unc_len = off;
+
+- atomic_set(&data[thr].ready, 1);
++ atomic_set_release(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
+ }
+
+@@ -805,12 +805,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ break;
+
+ crc->run_threads = thr;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+- atomic_read(&data[thr].stop));
++ atomic_read_acquire(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
+@@ -849,7 +849,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ }
+ }
+
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
+
+@@ -1131,12 +1131,12 @@ static int lzo_decompress_threadfn(void *data)
+ struct dec_data *d = data;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -1149,7 +1149,7 @@ static int lzo_decompress_threadfn(void *data)
+ flush_icache_range((unsigned long)d->unc,
+ (unsigned long)d->unc + d->unc_len);
+
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -1334,7 +1334,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ }
+
+ if (crc->run_threads) {
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ crc->run_threads = 0;
+ }
+@@ -1370,7 +1370,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ pg = 0;
+ }
+
+- atomic_set(&data[thr].ready, 1);
++ atomic_set_release(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
+ }
+
+@@ -1389,7 +1389,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+- atomic_read(&data[thr].stop));
++ atomic_read_acquire(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
+@@ -1420,7 +1420,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ ret = snapshot_write_next(snapshot);
+ if (ret <= 0) {
+ crc->run_threads = thr + 1;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+ goto out_finish;
+ }
+@@ -1428,13 +1428,13 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ }
+
+ crc->run_threads = thr;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+ }
+
+ out_finish:
+ if (crc->run_threads) {
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
+ stop = ktime_get();
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 0b3af1529778c0..0fca282c0a2547 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1850,10 +1850,23 @@ static bool console_waiter;
+ */
+ static void console_lock_spinning_enable(void)
+ {
++ /*
++ * Do not use spinning in panic(). The panic CPU wants to keep the lock.
++ * Non-panic CPUs abandon the flush anyway.
++ *
++ * Just keep the lockdep annotation. The panic-CPU should avoid
++ * taking console_owner_lock because it might cause a deadlock.
++ * This looks like the easiest way how to prevent false lockdep
++ * reports without handling races a lockless way.
++ */
++ if (panic_in_progress())
++ goto lockdep;
++
+ raw_spin_lock(&console_owner_lock);
+ console_owner = current;
+ raw_spin_unlock(&console_owner_lock);
+
++lockdep:
+ /* The waiter may spin on us after setting console_owner */
+ spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
+ }
+@@ -1878,6 +1891,22 @@ static int console_lock_spinning_disable_and_check(int cookie)
+ {
+ int waiter;
+
++ /*
++ * Ignore spinning waiters during panic() because they might get stopped
++ * or blocked at any time,
++ *
++ * It is safe because nobody is allowed to start spinning during panic
++ * in the first place. If there has been a waiter then non panic CPUs
++ * might stay spinning. They would get stopped anyway. The panic context
++ * will never start spinning and an interrupted spin on panic CPU will
++ * never continue.
++ */
++ if (panic_in_progress()) {
++ /* Keep lockdep happy. */
++ spin_release(&console_owner_dep_map, _THIS_IP_);
++ return 0;
++ }
++
+ raw_spin_lock(&console_owner_lock);
+ waiter = READ_ONCE(console_waiter);
+ console_owner = NULL;
+@@ -1978,6 +2007,12 @@ static int console_trylock_spinning(void)
+ */
+ mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+
++ /*
++ * Update @console_may_schedule for trylock because the previous
++ * owner may have been schedulable.
++ */
++ console_may_schedule = 0;
++
+ return 1;
+ }
+
+@@ -2274,8 +2309,7 @@ asmlinkage int vprintk_emit(int facility, int level,
+ if (unlikely(suppress_printk))
+ return 0;
+
+- if (unlikely(suppress_panic_printk) &&
+- atomic_read(&panic_cpu) != raw_smp_processor_id())
++ if (unlikely(suppress_panic_printk) && other_cpu_in_panic())
+ return 0;
+
+ if (level == LOGLEVEL_SCHED) {
+@@ -3255,6 +3289,21 @@ static int __init keep_bootcon_setup(char *str)
+
+ early_param("keep_bootcon", keep_bootcon_setup);
+
++static int console_call_setup(struct console *newcon, char *options)
++{
++ int err;
++
++ if (!newcon->setup)
++ return 0;
++
++ /* Synchronize with possible boot console. */
++ console_lock();
++ err = newcon->setup(newcon, options);
++ console_unlock();
++
++ return err;
++}
++
+ /*
+ * This is called by register_console() to try to match
+ * the newly registered console with any of the ones selected
+@@ -3290,8 +3339,8 @@ static int try_enable_preferred_console(struct console *newcon,
+ if (_braille_register_console(newcon, c))
+ return 0;
+
+- if (newcon->setup &&
+- (err = newcon->setup(newcon, c->options)) != 0)
++ err = console_call_setup(newcon, c->options);
++ if (err)
+ return err;
+ }
+ newcon->flags |= CON_ENABLED;
+@@ -3317,7 +3366,7 @@ static void try_enable_default_console(struct console *newcon)
+ if (newcon->index < 0)
+ newcon->index = 0;
+
+- if (newcon->setup && newcon->setup(newcon, NULL) != 0)
++ if (console_call_setup(newcon, NULL) != 0)
+ return;
+
+ newcon->flags |= CON_ENABLED;
+diff --git a/kernel/profile.c b/kernel/profile.c
+index 8a77769bc4b4cb..984f819b701c9d 100644
+--- a/kernel/profile.c
++++ b/kernel/profile.c
+@@ -57,20 +57,11 @@ static DEFINE_MUTEX(profile_flip_mutex);
+ int profile_setup(char *str)
+ {
+ static const char schedstr[] = "schedule";
+- static const char sleepstr[] = "sleep";
+ static const char kvmstr[] = "kvm";
+ const char *select = NULL;
+ int par;
+
+- if (!strncmp(str, sleepstr, strlen(sleepstr))) {
+-#ifdef CONFIG_SCHEDSTATS
+- force_schedstat_enabled();
+- prof_on = SLEEP_PROFILING;
+- select = sleepstr;
+-#else
+- pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
+-#endif /* CONFIG_SCHEDSTATS */
+- } else if (!strncmp(str, schedstr, strlen(schedstr))) {
++ if (!strncmp(str, schedstr, strlen(schedstr))) {
+ prof_on = SCHED_PROFILING;
+ select = schedstr;
+ } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
+diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
+index 98e13be411afd3..de0afabfbd4409 100644
+--- a/kernel/rcu/rcu.h
++++ b/kernel/rcu/rcu.h
+@@ -10,6 +10,7 @@
+ #ifndef __LINUX_RCU_H
+ #define __LINUX_RCU_H
+
++#include <linux/slab.h>
+ #include <trace/events/rcu.h>
+
+ /*
+@@ -248,6 +249,12 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
+ }
+ #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
++static inline void debug_rcu_head_callback(struct rcu_head *rhp)
++{
++ if (unlikely(!rhp->func))
++ kmem_dump_obj(rhp);
++}
++
+ extern int rcu_cpu_stall_suppress_at_boot;
+
+ static inline bool rcu_stall_is_suppressed_at_boot(void)
+@@ -493,6 +500,7 @@ static inline void rcu_expedite_gp(void) { }
+ static inline void rcu_unexpedite_gp(void) { }
+ static inline void rcu_async_hurry(void) { }
+ static inline void rcu_async_relax(void) { }
++static inline bool rcu_cpu_online(int cpu) { return true; }
+ #else /* #ifdef CONFIG_TINY_RCU */
+ bool rcu_gp_is_normal(void); /* Internal RCU use. */
+ bool rcu_gp_is_expedited(void); /* Internal RCU use. */
+@@ -502,6 +510,7 @@ void rcu_unexpedite_gp(void);
+ void rcu_async_hurry(void);
+ void rcu_async_relax(void);
+ void rcupdate_announce_bootup_oddness(void);
++bool rcu_cpu_online(int cpu);
+ #ifdef CONFIG_TASKS_RCU_GENERIC
+ void show_rcu_tasks_gp_kthreads(void);
+ #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
+index ffdb30495e3cc3..ed46d9e8c0e434 100644
+--- a/kernel/rcu/rcuscale.c
++++ b/kernel/rcu/rcuscale.c
+@@ -498,7 +498,7 @@ rcu_scale_writer(void *arg)
+ schedule_timeout_idle(torture_random(&tr) % writer_holdoff_jiffies + 1);
+ wdp = &wdpp[i];
+ *wdp = ktime_get_mono_fast_ns();
+- if (gp_async) {
++ if (gp_async && !WARN_ON_ONCE(!cur_ops->async)) {
+ retry:
+ if (!rhp)
+ rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
+@@ -554,7 +554,7 @@ rcu_scale_writer(void *arg)
+ i++;
+ rcu_scale_wait_shutdown();
+ } while (!torture_must_stop());
+- if (gp_async) {
++ if (gp_async && cur_ops->async) {
+ cur_ops->gp_barrier();
+ }
+ writer_n_durations[me] = i_max + 1;
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index ade42d6a9d9b64..46612fb15fc6d9 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -1992,7 +1992,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
+ preempt_disable();
+ pipe_count = READ_ONCE(p->rtort_pipe_count);
+ if (pipe_count > RCU_TORTURE_PIPE_LEN) {
+- /* Should not happen, but... */
++ // Should not happen in a correct RCU implementation,
++ // happens quite often for torture_type=busted.
+ pipe_count = RCU_TORTURE_PIPE_LEN;
+ }
+ completed = cur_ops->get_gp_seq();
+@@ -2463,8 +2464,8 @@ static int rcu_torture_stall(void *args)
+ preempt_disable();
+ pr_alert("%s start on CPU %d.\n",
+ __func__, raw_smp_processor_id());
+- while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
+- stop_at))
++ while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
++ !kthread_should_stop())
+ if (stall_cpu_block) {
+ #ifdef CONFIG_PREEMPTION
+ preempt_schedule();
+@@ -2591,7 +2592,7 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
+ spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
+ rfcpp = rfp->rcu_fwd_cb_tail;
+ rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
+- WRITE_ONCE(*rfcpp, rfcp);
++ smp_store_release(rfcpp, rfcp);
+ WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
+ i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
+ if (i >= ARRAY_SIZE(rfp->n_launders_hist))
+@@ -3012,11 +3013,12 @@ static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
+ }
+
+ /* IPI handler to get callback posted on desired CPU, if online. */
+-static void rcu_torture_barrier1cb(void *rcu_void)
++static int rcu_torture_barrier1cb(void *rcu_void)
+ {
+ struct rcu_head *rhp = rcu_void;
+
+ cur_ops->call(rhp, rcu_torture_barrier_cbf);
++ return 0;
+ }
+
+ /* kthread function to register callbacks used to test RCU barriers. */
+@@ -3042,11 +3044,9 @@ static int rcu_torture_barrier_cbs(void *arg)
+ * The above smp_load_acquire() ensures barrier_phase load
+ * is ordered before the following ->call().
+ */
+- if (smp_call_function_single(myid, rcu_torture_barrier1cb,
+- &rcu, 1)) {
+- // IPI failed, so use direct call from current CPU.
++ if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
+ cur_ops->call(&rcu, rcu_torture_barrier_cbf);
+- }
++
+ if (atomic_dec_and_test(&barrier_cbs_count))
+ wake_up(&barrier_wq);
+ } while (!torture_must_stop());
+diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
+index 336af24e0fe358..c38e5933a5d693 100644
+--- a/kernel/rcu/srcutiny.c
++++ b/kernel/rcu/srcutiny.c
+@@ -138,6 +138,7 @@ void srcu_drive_gp(struct work_struct *wp)
+ while (lh) {
+ rhp = lh;
+ lh = lh->next;
++ debug_rcu_head_callback(rhp);
+ local_bh_disable();
+ rhp->func(rhp);
+ local_bh_enable();
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 20d7a238d675a8..2f770a9a2a13a6 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -223,7 +223,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
+ snp->grplo = cpu;
+ snp->grphi = cpu;
+ }
+- sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
++ sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
+ }
+ smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
+ return true;
+@@ -782,8 +782,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
+ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+- rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
++ WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
+ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
+ WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
+ WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
+@@ -833,7 +832,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
+ int cpu;
+
+ for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
+- if (!(mask & (1 << (cpu - snp->grplo))))
++ if (!(mask & (1UL << (cpu - snp->grplo))))
+ continue;
+ srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
+ }
+@@ -1242,10 +1241,37 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
+ spin_lock_irqsave_sdp_contention(sdp, &flags);
+ if (rhp)
+ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
++ /*
++ * The snapshot for acceleration must be taken _before_ the read of the
++ * current gp sequence used for advancing, otherwise advancing may fail
++ * and acceleration may then fail too.
++ *
++ * This could happen if:
++ *
++ * 1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
++ * RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
++ *
++ * 2) The grace period for RCU_WAIT_TAIL is seen as started but not
++ * completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
++ *
++ * 3) This value is passed to rcu_segcblist_advance() which can't move
++ * any segment forward and fails.
++ *
++ * 4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
++ * But then the call to rcu_seq_snap() observes the grace period for the
++ * RCU_WAIT_TAIL segment as completed and the subsequent one for the
++ * RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
++ * so it returns a snapshot of the next grace period, which is X + 12.
++ *
++ * 5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
++ * freshly enqueued callback in RCU_NEXT_TAIL can't move to
++ * RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
++ * period (gp_num = X + 8). So acceleration fails.
++ */
++ s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+- s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
++ WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
+ if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
+ sdp->srcu_gp_seq_needed = s;
+ needgp = true;
+@@ -1692,6 +1718,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ ssp = sdp->ssp;
+ rcu_cblist_init(&ready_cbs);
+ spin_lock_irq_rcu_node(sdp);
++ WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+ if (sdp->srcu_cblist_invoking ||
+@@ -1708,6 +1735,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ rhp = rcu_cblist_dequeue(&ready_cbs);
+ for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
+ debug_rcu_head_unqueue(rhp);
++ debug_rcu_head_callback(rhp);
+ local_bh_disable();
+ rhp->func(rhp);
+ local_bh_enable();
+@@ -1720,8 +1748,6 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ */
+ spin_lock_irq_rcu_node(sdp);
+ rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+- rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
+ sdp->srcu_cblist_invoking = false;
+ more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
+ spin_unlock_irq_rcu_node(sdp);
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 8d65f7d576a341..df81506cf2bde7 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -538,6 +538,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
+ raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+ len = rcl.len;
+ for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
++ debug_rcu_head_callback(rhp);
+ local_bh_disable();
+ rhp->func(rhp);
+ local_bh_enable();
+@@ -892,10 +893,36 @@ static void rcu_tasks_pregp_step(struct list_head *hop)
+ synchronize_rcu();
+ }
+
++/* Check for quiescent states since the pregp's synchronize_rcu() */
++static bool rcu_tasks_is_holdout(struct task_struct *t)
++{
++ int cpu;
++
++ /* Has the task been seen voluntarily sleeping? */
++ if (!READ_ONCE(t->on_rq))
++ return false;
++
++ /*
++ * Idle tasks (or idle injection) within the idle loop are RCU-tasks
++ * quiescent states. But CPU boot code performed by the idle task
++ * isn't a quiescent state.
++ */
++ if (is_idle_task(t))
++ return false;
++
++ cpu = task_cpu(t);
++
++ /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
++ if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
++ return false;
++
++ return true;
++}
++
+ /* Per-task initial processing. */
+ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
+ {
+- if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
++ if (t != current && rcu_tasks_is_holdout(t)) {
+ get_task_struct(t);
+ t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
+ WRITE_ONCE(t->rcu_tasks_holdout, true);
+@@ -944,7 +971,7 @@ static void check_holdout_task(struct task_struct *t,
+
+ if (!READ_ONCE(t->rcu_tasks_holdout) ||
+ t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
+- !READ_ONCE(t->on_rq) ||
++ !rcu_tasks_is_holdout(t) ||
+ (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
+ !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
+ WRITE_ONCE(t->rcu_tasks_holdout, false);
+@@ -1522,7 +1549,7 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
+ } else {
+ // The task is not running, so C-language access is safe.
+ nesting = t->trc_reader_nesting;
+- WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
++ WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
+ n_heavy_reader_ofl_updates++;
+ }
+@@ -1649,6 +1676,16 @@ static void rcu_tasks_trace_pregp_step(struct list_head *hop)
+ // allow safe access to the hop list.
+ for_each_online_cpu(cpu) {
+ rcu_read_lock();
++ // Note that cpu_curr_snapshot() picks up the target
++ // CPU's current task while its runqueue is locked with
++ // an smp_mb__after_spinlock(). This ensures that either
++ // the grace-period kthread will see that task's read-side
++ // critical section or the task will see the updater's pre-GP
++ // accesses. The trailing smp_mb() in cpu_curr_snapshot()
++ // does not currently play a role other than simplify
++ // that function's ordering semantics. If these simplified
++ // ordering semantics continue to be redundant, that smp_mb()
++ // might be removed.
+ t = cpu_curr_snapshot(cpu);
+ if (rcu_tasks_trace_pertask_prep(t, true))
+ trc_add_holdout(t, hop);
+@@ -1912,7 +1949,7 @@ void show_rcu_tasks_trace_gp_kthread(void)
+ {
+ char buf[64];
+
+- sprintf(buf, "N%lu h:%lu/%lu/%lu",
++ snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
+ data_race(n_trc_holdouts),
+ data_race(n_heavy_reader_ofl_updates),
+ data_race(n_heavy_reader_updates),
+diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
+index 42f7589e51e09e..fec804b7908032 100644
+--- a/kernel/rcu/tiny.c
++++ b/kernel/rcu/tiny.c
+@@ -97,6 +97,7 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head)
+
+ trace_rcu_invoke_callback("", head);
+ f = head->func;
++ debug_rcu_head_callback(head);
+ WRITE_ONCE(head->func, (rcu_callback_t)0L);
+ f(head);
+ rcu_lock_release(&rcu_callback_map);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index cb1caefa8bd070..3d7b119f6e2a36 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -31,6 +31,7 @@
+ #include <linux/bitops.h>
+ #include <linux/export.h>
+ #include <linux/completion.h>
++#include <linux/kmemleak.h>
+ #include <linux/moduleparam.h>
+ #include <linux/panic.h>
+ #include <linux/panic_notifier.h>
+@@ -754,14 +755,19 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
+ }
+
+ /*
+- * Return true if the specified CPU has passed through a quiescent
+- * state by virtue of being in or having passed through an dynticks
+- * idle state since the last call to dyntick_save_progress_counter()
+- * for this same CPU, or by virtue of having been offline.
++ * Returns positive if the specified CPU has passed through a quiescent state
++ * by virtue of being in or having passed through an dynticks idle state since
++ * the last call to dyntick_save_progress_counter() for this same CPU, or by
++ * virtue of having been offline.
++ *
++ * Returns negative if the specified CPU needs a force resched.
++ *
++ * Returns zero otherwise.
+ */
+ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+ {
+ unsigned long jtsq;
++ int ret = 0;
+ struct rcu_node *rnp = rdp->mynode;
+
+ /*
+@@ -847,8 +853,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+ (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
+ rcu_state.cbovld)) {
+ WRITE_ONCE(rdp->rcu_urgent_qs, true);
+- resched_cpu(rdp->cpu);
+ WRITE_ONCE(rdp->last_fqs_resched, jiffies);
++ ret = -1;
+ }
+
+ /*
+@@ -861,8 +867,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+ if (time_after(jiffies, rcu_state.jiffies_resched)) {
+ if (time_after(jiffies,
+ READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
+- resched_cpu(rdp->cpu);
+ WRITE_ONCE(rdp->last_fqs_resched, jiffies);
++ ret = -1;
+ }
+ if (IS_ENABLED(CONFIG_IRQ_WORK) &&
+ !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
+@@ -891,7 +897,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+ }
+ }
+
+- return 0;
++ return ret;
+ }
+
+ /* Trace-event wrapper function for trace_rcu_future_grace_period. */
+@@ -1007,6 +1013,38 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
+ return needmore;
+ }
+
++static void swake_up_one_online_ipi(void *arg)
++{
++ struct swait_queue_head *wqh = arg;
++
++ swake_up_one(wqh);
++}
++
++static void swake_up_one_online(struct swait_queue_head *wqh)
++{
++ int cpu = get_cpu();
++
++ /*
++ * If called from rcutree_report_cpu_starting(), wake up
++ * is dangerous that late in the CPU-down hotplug process. The
++ * scheduler might queue an ignored hrtimer. Defer the wake up
++ * to an online CPU instead.
++ */
++ if (unlikely(cpu_is_offline(cpu))) {
++ int target;
++
++ target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
++ cpu_online_mask);
++
++ smp_call_function_single(target, swake_up_one_online_ipi,
++ wqh, 0);
++ put_cpu();
++ } else {
++ put_cpu();
++ swake_up_one(wqh);
++ }
++}
++
+ /*
+ * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
+ * interrupt or softirq handler, in which case we just might immediately
+@@ -1031,7 +1069,7 @@ static void rcu_gp_kthread_wake(void)
+ return;
+ WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
+ WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
+- swake_up_one(&rcu_state.gp_wq);
++ swake_up_one_online(&rcu_state.gp_wq);
+ }
+
+ /*
+@@ -1260,7 +1298,7 @@ EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
+ /* Unregister a counter, with NULL for not caring which. */
+ void rcu_gp_slow_unregister(atomic_t *rgssp)
+ {
+- WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
++ WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
+
+ WRITE_ONCE(rcu_gp_slow_suppress, NULL);
+ }
+@@ -1556,10 +1594,22 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
+ */
+ static void rcu_gp_fqs(bool first_time)
+ {
++ int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
+ struct rcu_node *rnp = rcu_get_root();
+
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
++
++ WARN_ON_ONCE(nr_fqs > 3);
++ /* Only countdown nr_fqs for stall purposes if jiffies moves. */
++ if (nr_fqs) {
++ if (nr_fqs == 1) {
++ WRITE_ONCE(rcu_state.jiffies_stall,
++ jiffies + rcu_jiffies_till_stall_check());
++ }
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
++ }
++
+ if (first_time) {
+ /* Collect dyntick-idle snapshots. */
+ force_qs_rnp(dyntick_save_progress_counter);
+@@ -2135,6 +2185,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
+ trace_rcu_invoke_callback(rcu_state.name, rhp);
+
+ f = rhp->func;
++ debug_rcu_head_callback(rhp);
+ WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
+ f(rhp);
+
+@@ -2257,15 +2308,15 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
+ {
+ int cpu;
+ unsigned long flags;
+- unsigned long mask;
+- struct rcu_data *rdp;
+ struct rcu_node *rnp;
+
+ rcu_state.cbovld = rcu_state.cbovldnext;
+ rcu_state.cbovldnext = false;
+ rcu_for_each_leaf_node(rnp) {
++ unsigned long mask = 0;
++ unsigned long rsmask = 0;
++
+ cond_resched_tasks_rcu_qs();
+- mask = 0;
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rcu_state.cbovldnext |= !!rnp->cbovldmask;
+ if (rnp->qsmask == 0) {
+@@ -2283,11 +2334,17 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
+ continue;
+ }
+ for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
++ struct rcu_data *rdp;
++ int ret;
++
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+- if (f(rdp)) {
++ ret = f(rdp);
++ if (ret > 0) {
+ mask |= rdp->grpmask;
+ rcu_disable_urgency_upon_qs(rdp);
+ }
++ if (ret < 0)
++ rsmask |= rdp->grpmask;
+ }
+ if (mask != 0) {
+ /* Idle/offline CPUs, report (releases rnp->lock). */
+@@ -2296,6 +2353,9 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
+ /* Nothing to do here, so just drop the lock. */
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
++
++ for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
++ resched_cpu(cpu);
+ }
+ }
+
+@@ -2667,8 +2727,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
+ }
+
+ check_cb_ovld(rdp);
+- if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
++ if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
++ local_irq_restore(flags);
+ return; // Enqueued onto ->nocb_bypass, so just leave.
++ }
+ // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
+ rcu_segcblist_enqueue(&rdp->cblist, head);
+ if (__is_kvfree_rcu_offset((unsigned long)func))
+@@ -2686,8 +2748,8 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
+ __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
+ } else {
+ __call_rcu_core(rdp, head, flags);
+- local_irq_restore(flags);
+ }
++ local_irq_restore(flags);
+ }
+
+ #ifdef CONFIG_RCU_LAZY
+@@ -3388,6 +3450,14 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
+ success = true;
+ }
+
++ /*
++ * The kvfree_rcu() caller considers the pointer freed at this point
++ * and likely removes any references to it. Since the actual slab
++ * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
++ * this object (no scanning or false positives reporting).
++ */
++ kmemleak_ignore(ptr);
++
+ // Set timer to drain after KFREE_DRAIN_JIFFIES.
+ if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
+ schedule_delayed_monitor_work(krcp);
+@@ -4104,6 +4174,13 @@ static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
+ return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
+ }
+
++bool rcu_cpu_online(int cpu)
++{
++ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
++
++ return rcu_rdp_cpu_online(rdp);
++}
++
+ #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
+
+ /*
+@@ -4521,11 +4598,15 @@ void rcutree_migrate_callbacks(int cpu)
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ bool needwake;
+
+- if (rcu_rdp_is_offloaded(rdp) ||
+- rcu_segcblist_empty(&rdp->cblist))
+- return; /* No callbacks to migrate. */
++ if (rcu_rdp_is_offloaded(rdp))
++ return;
+
+ raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
++ if (rcu_segcblist_empty(&rdp->cblist)) {
++ raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
++ return; /* No callbacks to migrate. */
++ }
++
+ WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
+ rcu_barrier_entrain(rdp);
+ my_rdp = this_cpu_ptr(&rcu_data);
+@@ -4547,8 +4628,9 @@ void rcutree_migrate_callbacks(int cpu)
+ __call_rcu_nocb_wake(my_rdp, true, flags);
+ } else {
+ rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
+- raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
++ raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
+ }
++ local_irq_restore(flags);
+ if (needwake)
+ rcu_gp_kthread_wake();
+ lockdep_assert_irqs_enabled();
+@@ -4597,13 +4679,16 @@ static void __init rcu_start_exp_gp_kworkers(void)
+ rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
+ if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
+ pr_err("Failed to create %s!\n", gp_kworker_name);
++ rcu_exp_gp_kworker = NULL;
+ return;
+ }
+
+ rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
+ if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
+ pr_err("Failed to create %s!\n", par_gp_kworker_name);
++ rcu_exp_par_gp_kworker = NULL;
+ kthread_destroy_worker(rcu_exp_gp_kworker);
++ rcu_exp_gp_kworker = NULL;
+ return;
+ }
+
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 192536916f9a60..9eb43b501ff5c8 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -224,7 +224,6 @@ struct rcu_data {
+ struct swait_queue_head nocb_state_wq; /* For offloading state changes */
+ struct task_struct *nocb_gp_kthread;
+ raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
+- atomic_t nocb_lock_contended; /* Contention experienced. */
+ int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
+ struct timer_list nocb_timer; /* Enforce finite deferral. */
+ unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
+@@ -386,6 +385,10 @@ struct rcu_state {
+ /* in jiffies. */
+ unsigned long jiffies_stall; /* Time at which to check */
+ /* for CPU stalls. */
++ int nr_fqs_jiffies_stall; /* Number of fqs loops after
++ * which read jiffies and set
++ * jiffies_stall. Stall
++ * warnings disabled if !0. */
+ unsigned long jiffies_resched; /* Time at which to resched */
+ /* a reluctant CPU. */
+ unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 8239b39d945bdf..733b18077f5a01 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -173,7 +173,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
+ return ret;
+ }
+
+-
+ /*
+ * Report the exit from RCU read-side critical section for the last task
+ * that queued itself during or before the current expedited preemptible-RCU
+@@ -201,7 +200,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ if (wake) {
+ smp_mb(); /* EGP done before wake_up(). */
+- swake_up_one(&rcu_state.expedited_wq);
++ swake_up_one_online(&rcu_state.expedited_wq);
+ }
+ break;
+ }
+@@ -429,7 +428,12 @@ static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
+ __sync_rcu_exp_select_node_cpus(rewp);
+ }
+
+-static inline bool rcu_gp_par_worker_started(void)
++static inline bool rcu_exp_worker_started(void)
++{
++ return !!READ_ONCE(rcu_exp_gp_kworker);
++}
++
++static inline bool rcu_exp_par_worker_started(void)
+ {
+ return !!READ_ONCE(rcu_exp_par_gp_kworker);
+ }
+@@ -479,7 +483,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
+ __sync_rcu_exp_select_node_cpus(rewp);
+ }
+
+-static inline bool rcu_gp_par_worker_started(void)
++static inline bool rcu_exp_worker_started(void)
++{
++ return !!READ_ONCE(rcu_gp_wq);
++}
++
++static inline bool rcu_exp_par_worker_started(void)
+ {
+ return !!READ_ONCE(rcu_par_gp_wq);
+ }
+@@ -542,7 +551,7 @@ static void sync_rcu_exp_select_cpus(void)
+ rnp->exp_need_flush = false;
+ if (!READ_ONCE(rnp->expmask))
+ continue; /* Avoid early boot non-existent wq. */
+- if (!rcu_gp_par_worker_started() ||
++ if (!rcu_exp_par_worker_started() ||
+ rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
+ rcu_is_last_leaf_node(rnp)) {
+ /* No worker started yet or last leaf, do direct call. */
+@@ -953,7 +962,7 @@ static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
+ */
+ void synchronize_rcu_expedited(void)
+ {
+- bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
++ bool use_worker;
+ unsigned long flags;
+ struct rcu_exp_work rew;
+ struct rcu_node *rnp;
+@@ -964,6 +973,9 @@ void synchronize_rcu_expedited(void)
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
+
++ use_worker = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
++ rcu_exp_worker_started();
++
+ /* Is the state is such that the call is a grace period? */
+ if (rcu_blocking_is_gp()) {
+ // Note well that this code runs with !PREEMPT && !SMP.
+@@ -993,7 +1005,7 @@ void synchronize_rcu_expedited(void)
+ return; /* Someone else did our work for us. */
+
+ /* Ensure that load happens before action based on it. */
+- if (unlikely(boottime)) {
++ if (unlikely(!use_worker)) {
+ /* Direct call during scheduler init and early_initcalls(). */
+ rcu_exp_sel_wait_wake(s);
+ } else {
+@@ -1011,7 +1023,7 @@ void synchronize_rcu_expedited(void)
+ /* Let the next expedited grace period start. */
+ mutex_unlock(&rcu_state.exp_mutex);
+
+- if (likely(!boottime))
++ if (likely(use_worker))
+ synchronize_rcu_expedited_destroy_work(&rew);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
+index 5598212d1f2742..8993b2322be2b0 100644
+--- a/kernel/rcu/tree_nocb.h
++++ b/kernel/rcu/tree_nocb.h
+@@ -91,8 +91,7 @@ module_param(nocb_nobypass_lim_per_jiffy, int, 0);
+
+ /*
+ * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
+- * lock isn't immediately available, increment ->nocb_lock_contended to
+- * flag the contention.
++ * lock isn't immediately available, perform minimal sanity check.
+ */
+ static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+ __acquires(&rdp->nocb_bypass_lock)
+@@ -100,29 +99,12 @@ static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+ lockdep_assert_irqs_disabled();
+ if (raw_spin_trylock(&rdp->nocb_bypass_lock))
+ return;
+- atomic_inc(&rdp->nocb_lock_contended);
++ /*
++ * Contention expected only when local enqueue collide with
++ * remote flush from kthreads.
++ */
+ WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+- smp_mb__after_atomic(); /* atomic_inc() before lock. */
+ raw_spin_lock(&rdp->nocb_bypass_lock);
+- smp_mb__before_atomic(); /* atomic_dec() after lock. */
+- atomic_dec(&rdp->nocb_lock_contended);
+-}
+-
+-/*
+- * Spinwait until the specified rcu_data structure's ->nocb_lock is
+- * not contended. Please note that this is extremely special-purpose,
+- * relying on the fact that at most two kthreads and one CPU contend for
+- * this lock, and also that the two kthreads are guaranteed to have frequent
+- * grace-period-duration time intervals between successive acquisitions
+- * of the lock. This allows us to use an extremely simple throttling
+- * mechanism, and further to apply it only to the CPU doing floods of
+- * call_rcu() invocations. Don't try this at home!
+- */
+-static void rcu_nocb_wait_contended(struct rcu_data *rdp)
+-{
+- WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+- while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
+- cpu_relax();
+ }
+
+ /*
+@@ -238,7 +220,10 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
+ raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+ if (needwake) {
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
+- wake_up_process(rdp_gp->nocb_gp_kthread);
++ if (cpu_is_offline(raw_smp_processor_id()))
++ swake_up_one_online(&rdp_gp->nocb_gp_wq);
++ else
++ wake_up_process(rdp_gp->nocb_gp_kthread);
+ }
+
+ return needwake;
+@@ -510,7 +495,6 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+ }
+
+ // We need to use the bypass.
+- rcu_nocb_wait_contended(rdp);
+ rcu_nocb_bypass_lock(rdp);
+ ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+ rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+@@ -532,9 +516,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+ // 2. Both of these conditions are met:
+ // a. The bypass list previously had only lazy CBs, and:
+ // b. The new CB is non-lazy.
+- if (ncbs && (!bypass_is_lazy || lazy)) {
+- local_irq_restore(flags);
+- } else {
++ if (!ncbs || (bypass_is_lazy && !lazy)) {
+ // No-CBs GP kthread might be indefinitely asleep, if so, wake.
+ rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
+ if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
+@@ -544,7 +526,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+ } else {
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+ TPS("FirstBQnoWake"));
+- rcu_nocb_unlock_irqrestore(rdp, flags);
++ rcu_nocb_unlock(rdp);
+ }
+ }
+ return true; // Callback already enqueued.
+@@ -570,7 +552,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
+ // If we are being polled or there is no kthread, just leave.
+ t = READ_ONCE(rdp->nocb_gp_kthread);
+ if (rcu_nocb_poll || !t) {
+- rcu_nocb_unlock_irqrestore(rdp, flags);
++ rcu_nocb_unlock(rdp);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+ TPS("WakeNotPoll"));
+ return;
+@@ -583,17 +565,23 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
+ rdp->qlen_last_fqs_check = len;
+ // Only lazy CBs in bypass list
+ if (lazy_len && bypass_len == lazy_len) {
+- rcu_nocb_unlock_irqrestore(rdp, flags);
++ rcu_nocb_unlock(rdp);
+ wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
+ TPS("WakeLazy"));
+- } else if (!irqs_disabled_flags(flags)) {
++ } else if (!irqs_disabled_flags(flags) && cpu_online(rdp->cpu)) {
+ /* ... if queue was empty ... */
+- rcu_nocb_unlock_irqrestore(rdp, flags);
++ rcu_nocb_unlock(rdp);
+ wake_nocb_gp(rdp, false);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+ TPS("WakeEmpty"));
+ } else {
+- rcu_nocb_unlock_irqrestore(rdp, flags);
++ /*
++ * Don't do the wake-up upfront on fragile paths.
++ * Also offline CPUs can't call swake_up_one_online() from
++ * (soft-)IRQs. Rely on the final deferred wake-up from
++ * rcutree_report_cpu_dead()
++ */
++ rcu_nocb_unlock(rdp);
+ wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
+ TPS("WakeEmptyIsDeferred"));
+ }
+@@ -611,15 +599,15 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
+ if ((rdp->nocb_cb_sleep ||
+ !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
+ !timer_pending(&rdp->nocb_timer)) {
+- rcu_nocb_unlock_irqrestore(rdp, flags);
++ rcu_nocb_unlock(rdp);
+ wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
+ TPS("WakeOvfIsDeferred"));
+ } else {
+- rcu_nocb_unlock_irqrestore(rdp, flags);
++ rcu_nocb_unlock(rdp);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+ }
+ } else {
+- rcu_nocb_unlock_irqrestore(rdp, flags);
++ rcu_nocb_unlock(rdp);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+ }
+ }
+@@ -1383,7 +1371,7 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ continue;
+ }
+- WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
++ rcu_nocb_try_flush_bypass(rdp, jiffies);
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ wake_nocb_gp(rdp, false);
+ sc->nr_to_scan -= _count;
+@@ -1668,12 +1656,11 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
+
+ sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
+ sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
+- pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
++ pr_info(" CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
+ rdp->cpu, rdp->nocb_gp_rdp->cpu,
+ nocb_next_rdp ? nocb_next_rdp->cpu : -1,
+ "kK"[!!rdp->nocb_cb_kthread],
+ "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
+- "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
+ "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
+ "sS"[!!rdp->nocb_cb_sleep],
+ ".W"[swait_active(&rdp->nocb_cb_wq)],
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index 6f06dc12904adb..11a1fac3a58985 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -149,12 +149,17 @@ static void panic_on_rcu_stall(void)
+ /**
+ * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
+ *
++ * To perform the reset request from the caller, disable stall detection until
++ * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
++ * loaded. It should be safe to do from the fqs loop as enough timer
++ * interrupts and context switches should have passed.
++ *
+ * The caller must disable hard irqs.
+ */
+ void rcu_cpu_stall_reset(void)
+ {
+- WRITE_ONCE(rcu_state.jiffies_stall,
+- jiffies + rcu_jiffies_till_stall_check());
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
++ WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+@@ -170,6 +175,7 @@ static void record_gp_stall_check_time(void)
+ WRITE_ONCE(rcu_state.gp_start, j);
+ j1 = rcu_jiffies_till_stall_check();
+ smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
+ WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
+ rcu_state.jiffies_resched = j + j1 / 2;
+ rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
+@@ -497,7 +503,8 @@ static void print_cpu_stall_info(int cpu)
+ rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
+ rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
+ if (rcuc_starved)
+- sprintf(buf, " rcuc=%ld jiffies(starved)", j);
++ // Print signed value, as negative values indicate a probable bug.
++ snprintf(buf, sizeof(buf), " rcuc=%ld jiffies(starved)", j);
+ pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
+ cpu,
+ "O."[!!cpu_online(cpu)],
+@@ -725,6 +732,16 @@ static void check_cpu_stall(struct rcu_data *rdp)
+ !rcu_gp_in_progress())
+ return;
+ rcu_stall_kick_kthreads();
++
++ /*
++ * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
++ * loop has to set jiffies to ensure a non-stale jiffies value. This
++ * is required to have good jiffies value after coming out of long
++ * breaks of jiffies updates. Not doing so can cause false positives.
++ */
++ if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
++ return;
++
+ j = jiffies;
+
+ /*
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index 3bba88c7ffc6be..6ebef11c887601 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -74,6 +74,7 @@ void __weak (*pm_power_off)(void);
+ void emergency_restart(void)
+ {
+ kmsg_dump(KMSG_DUMP_EMERG);
++ system_state = SYSTEM_RESTART;
+ machine_emergency_restart();
+ }
+ EXPORT_SYMBOL_GPL(emergency_restart);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index b1763b2fd7ef3e..635e858db0fe81 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -493,20 +493,62 @@ static int __region_intersects(struct resource *parent, resource_size_t start,
+ size_t size, unsigned long flags,
+ unsigned long desc)
+ {
+- struct resource res;
++ resource_size_t ostart, oend;
+ int type = 0; int other = 0;
+- struct resource *p;
++ struct resource *p, *dp;
++ bool is_type, covered;
++ struct resource res;
+
+ res.start = start;
+ res.end = start + size - 1;
+
+ for (p = parent->child; p ; p = p->sibling) {
+- bool is_type = (((p->flags & flags) == flags) &&
+- ((desc == IORES_DESC_NONE) ||
+- (desc == p->desc)));
+-
+- if (resource_overlaps(p, &res))
+- is_type ? type++ : other++;
++ if (!resource_overlaps(p, &res))
++ continue;
++ is_type = (p->flags & flags) == flags &&
++ (desc == IORES_DESC_NONE || desc == p->desc);
++ if (is_type) {
++ type++;
++ continue;
++ }
++ /*
++ * Continue to search in descendant resources as if the
++ * matched descendant resources cover some ranges of 'p'.
++ *
++ * |------------- "CXL Window 0" ------------|
++ * |-- "System RAM" --|
++ *
++ * will behave similar as the following fake resource
++ * tree when searching "System RAM".
++ *
++ * |-- "System RAM" --||-- "CXL Window 0a" --|
++ */
++ covered = false;
++ ostart = max(res.start, p->start);
++ oend = min(res.end, p->end);
++ for_each_resource(p, dp, false) {
++ if (!resource_overlaps(dp, &res))
++ continue;
++ is_type = (dp->flags & flags) == flags &&
++ (desc == IORES_DESC_NONE || desc == dp->desc);
++ if (is_type) {
++ type++;
++ /*
++ * Range from 'ostart' to 'dp->start'
++ * isn't covered by matched resource.
++ */
++ if (dp->start > ostart)
++ break;
++ if (dp->end >= oend) {
++ covered = true;
++ break;
++ }
++ /* Remove covered range */
++ ostart = max(ostart, dp->end + 1);
++ }
++ }
++ if (!covered)
++ other++;
+ }
+
+ if (type == 0)
+@@ -1778,8 +1820,7 @@ static resource_size_t gfr_start(struct resource *base, resource_size_t size,
+ if (flags & GFR_DESCENDING) {
+ resource_size_t end;
+
+- end = min_t(resource_size_t, base->end,
+- (1ULL << MAX_PHYSMEM_BITS) - 1);
++ end = min_t(resource_size_t, base->end, PHYSMEM_END);
+ return end - size + 1;
+ }
+
+@@ -1796,8 +1837,7 @@ static bool gfr_continue(struct resource *base, resource_size_t addr,
+ * @size did not wrap 0.
+ */
+ return addr > addr - size &&
+- addr <= min_t(resource_size_t, base->end,
+- (1ULL << MAX_PHYSMEM_BITS) - 1);
++ addr <= min_t(resource_size_t, base->end, PHYSMEM_END);
+ }
+
+ static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
+@@ -1847,8 +1887,8 @@ get_free_mem_region(struct device *dev, struct resource *base,
+
+ write_lock(&resource_lock);
+ for (addr = gfr_start(base, size, align, flags);
+- gfr_continue(base, addr, size, flags);
+- addr = gfr_next(addr, size, flags)) {
++ gfr_continue(base, addr, align, flags);
++ addr = gfr_next(addr, align, flags)) {
+ if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
+ REGION_DISJOINT)
+ continue;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 802551e0009bf1..9b406d9886541b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -722,7 +722,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
+
+ rq->prev_irq_time += irq_delta;
+ delta -= irq_delta;
+- psi_account_irqtime(rq->curr, irq_delta);
+ delayacct_irq(rq->curr, irq_delta);
+ #endif
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+@@ -1305,27 +1304,24 @@ int tg_nop(struct task_group *tg, void *data)
+ static void set_load_weight(struct task_struct *p, bool update_load)
+ {
+ int prio = p->static_prio - MAX_RT_PRIO;
+- struct load_weight *load = &p->se.load;
++ struct load_weight lw;
+
+- /*
+- * SCHED_IDLE tasks get minimal weight:
+- */
+ if (task_has_idle_policy(p)) {
+- load->weight = scale_load(WEIGHT_IDLEPRIO);
+- load->inv_weight = WMULT_IDLEPRIO;
+- return;
++ lw.weight = scale_load(WEIGHT_IDLEPRIO);
++ lw.inv_weight = WMULT_IDLEPRIO;
++ } else {
++ lw.weight = scale_load(sched_prio_to_weight[prio]);
++ lw.inv_weight = sched_prio_to_wmult[prio];
+ }
+
+ /*
+ * SCHED_OTHER tasks have to update their load when changing their
+ * weight
+ */
+- if (update_load && p->sched_class == &fair_sched_class) {
+- reweight_task(p, prio);
+- } else {
+- load->weight = scale_load(sched_prio_to_weight[prio]);
+- load->inv_weight = sched_prio_to_wmult[prio];
+- }
++ if (update_load && p->sched_class == &fair_sched_class)
++ reweight_task(p, &lw);
++ else
++ p->se.load = lw;
+ }
+
+ #ifdef CONFIG_UCLAMP_TASK
+@@ -2664,9 +2660,11 @@ static int migration_cpu_stop(void *data)
+ * it.
+ */
+ WARN_ON_ONCE(!pending->stop_pending);
++ preempt_disable();
+ task_rq_unlock(rq, p, &rf);
+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
++ preempt_enable();
+ return 0;
+ }
+ out:
+@@ -2986,12 +2984,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ complete = true;
+ }
+
++ preempt_disable();
+ task_rq_unlock(rq, p, rf);
+-
+ if (push_task) {
+ stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ p, &rq->push_work);
+ }
++ preempt_enable();
+
+ if (complete)
+ complete_all(&pending->done);
+@@ -3057,12 +3056,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ if (flags & SCA_MIGRATE_ENABLE)
+ p->migration_flags &= ~MDF_PUSH;
+
++ preempt_disable();
+ task_rq_unlock(rq, p, rf);
+-
+ if (!stop_pending) {
+ stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
+ }
++ preempt_enable();
+
+ if (flags & SCA_MIGRATE_ENABLE)
+ return 0;
+@@ -4435,12 +4435,7 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg)
+ * @cpu: The CPU on which to snapshot the task.
+ *
+ * Returns the task_struct pointer of the task "currently" running on
+- * the specified CPU. If the same task is running on that CPU throughout,
+- * the return value will be a pointer to that task's task_struct structure.
+- * If the CPU did any context switches even vaguely concurrently with the
+- * execution of this function, the return value will be a pointer to the
+- * task_struct structure of a randomly chosen task that was running on
+- * that CPU somewhere around the time that this function was executing.
++ * the specified CPU.
+ *
+ * If the specified CPU was offline, the return value is whatever it
+ * is, perhaps a pointer to the task_struct structure of that CPU's idle
+@@ -4454,11 +4449,16 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg)
+ */
+ struct task_struct *cpu_curr_snapshot(int cpu)
+ {
++ struct rq *rq = cpu_rq(cpu);
+ struct task_struct *t;
++ struct rq_flags rf;
+
+- smp_mb(); /* Pairing determined by caller's synchronization design. */
++ rq_lock_irqsave(rq, &rf);
++ smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
+ t = rcu_dereference(cpu_curr(cpu));
++ rq_unlock_irqrestore(rq, &rf);
+ smp_mb(); /* Pairing determined by caller's synchronization design. */
++
+ return t;
+ }
+
+@@ -5374,8 +5374,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ /* switch_mm_cid() requires the memory barriers above. */
+ switch_mm_cid(rq, prev, next);
+
+- rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+-
+ prepare_lock_switch(rq, next, rf);
+
+ /* Here we just switch the register state and the stack. */
+@@ -5639,7 +5637,7 @@ void scheduler_tick(void)
+ {
+ int cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+- struct task_struct *curr = rq->curr;
++ struct task_struct *curr;
+ struct rq_flags rf;
+ unsigned long thermal_pressure;
+ u64 resched_latency;
+@@ -5651,6 +5649,9 @@ void scheduler_tick(void)
+
+ rq_lock(rq, &rf);
+
++ curr = rq->curr;
++ psi_account_irqtime(rq, curr, NULL);
++
+ update_rq_clock(rq);
+ thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
+ update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
+@@ -6615,6 +6616,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ /* Promote REQ to ACT */
+ rq->clock_update_flags <<= 1;
+ update_rq_clock(rq);
++ rq->clock_update_flags = RQCF_UPDATED;
+
+ switch_count = &prev->nivcsw;
+
+@@ -6677,8 +6679,9 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ *
+ * Here are the schemes providing that barrier on the
+ * various architectures:
+- * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
+- * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
++ * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
++ * on PowerPC and on RISC-V.
+ * - finish_lock_switch() for weakly-ordered
+ * architectures where spin_unlock is a full barrier,
+ * - switch_to() for arm64 (weakly-ordered, spin_unlock
+@@ -6687,6 +6690,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ ++*switch_count;
+
+ migrate_disable_switch(rq, prev);
++ psi_account_irqtime(rq, prev, next);
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+
+ trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+@@ -6694,8 +6698,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ /* Also unlocks the rq: */
+ rq = context_switch(rq, prev, next, &rf);
+ } else {
+- rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+-
+ rq_unpin_lock(rq, &rf);
+ __balance_callbacks(rq);
+ raw_spin_rq_unlock_irq(rq);
+@@ -9505,9 +9507,11 @@ static void balance_push(struct rq *rq)
+ * Temporarily drop rq->lock such that we can wake-up the stop task.
+ * Both preemption and IRQs are still disabled.
+ */
++ preempt_disable();
+ raw_spin_rq_unlock(rq);
+ stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
+ this_cpu_ptr(&push_work));
++ preempt_enable();
+ /*
+ * At this point need_resched() is true and we'll take the loop in
+ * schedule(). The next pick is obviously going to be the stop task
+@@ -9593,6 +9597,30 @@ void set_rq_offline(struct rq *rq)
+ }
+ }
+
++static inline void sched_set_rq_online(struct rq *rq, int cpu)
++{
++ struct rq_flags rf;
++
++ rq_lock_irqsave(rq, &rf);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_online(rq);
++ }
++ rq_unlock_irqrestore(rq, &rf);
++}
++
++static inline void sched_set_rq_offline(struct rq *rq, int cpu)
++{
++ struct rq_flags rf;
++
++ rq_lock_irqsave(rq, &rf);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_offline(rq);
++ }
++ rq_unlock_irqrestore(rq, &rf);
++}
++
+ /*
+ * used to mark begin/end of suspend/resume:
+ */
+@@ -9643,10 +9671,25 @@ static int cpuset_cpu_inactive(unsigned int cpu)
+ return 0;
+ }
+
++static inline void sched_smt_present_inc(int cpu)
++{
++#ifdef CONFIG_SCHED_SMT
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++}
++
++static inline void sched_smt_present_dec(int cpu)
++{
++#ifdef CONFIG_SCHED_SMT
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_dec_cpuslocked(&sched_smt_present);
++#endif
++}
++
+ int sched_cpu_activate(unsigned int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+- struct rq_flags rf;
+
+ /*
+ * Clear the balance_push callback and prepare to schedule
+@@ -9654,13 +9697,10 @@ int sched_cpu_activate(unsigned int cpu)
+ */
+ balance_push_set(cpu, false);
+
+-#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going up, increment the number of cores with SMT present.
+ */
+- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+- static_branch_inc_cpuslocked(&sched_smt_present);
+-#endif
++ sched_smt_present_inc(cpu);
+ set_cpu_active(cpu, true);
+
+ if (sched_smp_initialized) {
+@@ -9678,12 +9718,7 @@ int sched_cpu_activate(unsigned int cpu)
+ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
+ * domains.
+ */
+- rq_lock_irqsave(rq, &rf);
+- if (rq->rd) {
+- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+- set_rq_online(rq);
+- }
+- rq_unlock_irqrestore(rq, &rf);
++ sched_set_rq_online(rq, cpu);
+
+ return 0;
+ }
+@@ -9691,7 +9726,6 @@ int sched_cpu_activate(unsigned int cpu)
+ int sched_cpu_deactivate(unsigned int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+- struct rq_flags rf;
+ int ret;
+
+ /*
+@@ -9722,20 +9756,14 @@ int sched_cpu_deactivate(unsigned int cpu)
+ */
+ synchronize_rcu();
+
+- rq_lock_irqsave(rq, &rf);
+- if (rq->rd) {
+- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+- set_rq_offline(rq);
+- }
+- rq_unlock_irqrestore(rq, &rf);
++ sched_set_rq_offline(rq, cpu);
+
+-#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going down, decrement the number of cores with SMT present.
+ */
+- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+- static_branch_dec_cpuslocked(&sched_smt_present);
++ sched_smt_present_dec(cpu);
+
++#ifdef CONFIG_SCHED_SMT
+ sched_core_cpu_deactivate(cpu);
+ #endif
+
+@@ -9745,6 +9773,8 @@ int sched_cpu_deactivate(unsigned int cpu)
+ sched_update_numa(cpu, false);
+ ret = cpuset_cpu_inactive(cpu);
+ if (ret) {
++ sched_smt_present_inc(cpu);
++ sched_set_rq_online(rq, cpu);
+ balance_push_set(cpu, false);
+ set_cpu_active(cpu, true);
+ sched_update_numa(cpu, true);
+@@ -10865,11 +10895,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
+ * Prevent race between setting of cfs_rq->runtime_enabled and
+ * unthrottle_offline_cfs_rqs().
+ */
+- cpus_read_lock();
+- mutex_lock(&cfs_constraints_mutex);
++ guard(cpus_read_lock)();
++ guard(mutex)(&cfs_constraints_mutex);
++
+ ret = __cfs_schedulable(tg, period, quota);
+ if (ret)
+- goto out_unlock;
++ return ret;
+
+ runtime_enabled = quota != RUNTIME_INF;
+ runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
+@@ -10879,39 +10910,38 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
+ */
+ if (runtime_enabled && !runtime_was_enabled)
+ cfs_bandwidth_usage_inc();
+- raw_spin_lock_irq(&cfs_b->lock);
+- cfs_b->period = ns_to_ktime(period);
+- cfs_b->quota = quota;
+- cfs_b->burst = burst;
+
+- __refill_cfs_bandwidth_runtime(cfs_b);
++ scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
++ cfs_b->period = ns_to_ktime(period);
++ cfs_b->quota = quota;
++ cfs_b->burst = burst;
+
+- /* Restart the period timer (if active) to handle new period expiry: */
+- if (runtime_enabled)
+- start_cfs_bandwidth(cfs_b);
++ __refill_cfs_bandwidth_runtime(cfs_b);
+
+- raw_spin_unlock_irq(&cfs_b->lock);
++ /*
++ * Restart the period timer (if active) to handle new
++ * period expiry:
++ */
++ if (runtime_enabled)
++ start_cfs_bandwidth(cfs_b);
++ }
+
+ for_each_online_cpu(i) {
+ struct cfs_rq *cfs_rq = tg->cfs_rq[i];
+ struct rq *rq = cfs_rq->rq;
+- struct rq_flags rf;
+
+- rq_lock_irq(rq, &rf);
++ guard(rq_lock_irq)(rq);
+ cfs_rq->runtime_enabled = runtime_enabled;
+ cfs_rq->runtime_remaining = 0;
+
+ if (cfs_rq->throttled)
+ unthrottle_cfs_rq(cfs_rq);
+- rq_unlock_irq(rq, &rf);
+ }
++
+ if (runtime_was_enabled && !runtime_enabled)
+ cfs_bandwidth_usage_dec();
+-out_unlock:
+- mutex_unlock(&cfs_constraints_mutex);
+- cpus_read_unlock();
+
+- return ret;
++ return 0;
+ }
+
+ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
+@@ -11426,7 +11456,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
+ {
+ struct task_group *tg = css_tg(of_css(of));
+ u64 period = tg_get_cfs_period(tg);
+- u64 burst = tg_get_cfs_burst(tg);
++ u64 burst = tg->cfs_bandwidth.burst;
+ u64 quota;
+ int ret;
+
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index af7952f12e6cf1..b453f8a6a7c764 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -595,6 +595,12 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ }
+
+ stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
++ /*
++ * Because mul_u64_u64_div_u64() can approximate on some
++ * achitectures; enforce the constraint that: a*b/(b+c) <= a.
++ */
++ if (unlikely(stime > rtime))
++ stime = rtime;
+
+ update:
+ /*
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 58b542bf289343..d78f2e8769fb4c 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2449,9 +2449,11 @@ static void pull_dl_task(struct rq *this_rq)
+ double_unlock_balance(this_rq, src_rq);
+
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(this_rq);
+ stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ push_task, &src_rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(this_rq);
+ }
+ }
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index df348aa55d3c7e..5eb4807bad209c 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -533,7 +533,7 @@ static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
+
+ static int se_is_idle(struct sched_entity *se)
+ {
+- return 0;
++ return task_has_idle_policy(task_of(se));
+ }
+
+ #endif /* CONFIG_FAIR_GROUP_SCHED */
+@@ -707,15 +707,21 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ *
+ * XXX could add max_slice to the augmented data to track this.
+ */
+-static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
++static s64 entity_lag(u64 avruntime, struct sched_entity *se)
+ {
+- s64 lag, limit;
++ s64 vlag, limit;
++
++ vlag = avruntime - se->vruntime;
++ limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
++
++ return clamp(vlag, -limit, limit);
++}
+
++static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
++{
+ SCHED_WARN_ON(!se->on_rq);
+- lag = avg_vruntime(cfs_rq) - se->vruntime;
+
+- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+- se->vlag = clamp(lag, -limit, limit);
++ se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
+ }
+
+ /*
+@@ -3182,7 +3188,7 @@ static void reset_ptenuma_scan(struct task_struct *p)
+ p->mm->numa_scan_offset = 0;
+ }
+
+-static bool vma_is_accessed(struct vm_area_struct *vma)
++static bool vma_is_accessed(struct mm_struct *mm, struct vm_area_struct *vma)
+ {
+ unsigned long pids;
+ /*
+@@ -3194,8 +3200,29 @@ static bool vma_is_accessed(struct vm_area_struct *vma)
+ if (READ_ONCE(current->mm->numa_scan_seq) < 2)
+ return true;
+
+- pids = vma->numab_state->access_pids[0] | vma->numab_state->access_pids[1];
+- return test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids);
++ pids = vma->numab_state->pids_active[0] | vma->numab_state->pids_active[1];
++ if (test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids))
++ return true;
++
++ /*
++ * Complete a scan that has already started regardless of PID access, or
++ * some VMAs may never be scanned in multi-threaded applications:
++ */
++ if (mm->numa_scan_offset > vma->vm_start) {
++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_IGNORE_PID);
++ return true;
++ }
++
++ /*
++ * This vma has not been accessed for a while, and if the number
++ * the threads in the same process is low, which means no other
++ * threads can help scan this vma, force a vma scan.
++ */
++ if (READ_ONCE(mm->numa_scan_seq) >
++ (vma->numab_state->prev_scan_seq + get_nr_threads(current)))
++ return true;
++
++ return false;
+ }
+
+ #define VMA_PID_RESET_PERIOD (4 * sysctl_numa_balancing_scan_delay)
+@@ -3215,6 +3242,8 @@ static void task_numa_work(struct callback_head *work)
+ unsigned long nr_pte_updates = 0;
+ long pages, virtpages;
+ struct vma_iterator vmi;
++ bool vma_pids_skipped;
++ bool vma_pids_forced = false;
+
+ SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
+
+@@ -3257,7 +3286,6 @@ static void task_numa_work(struct callback_head *work)
+ */
+ p->node_stamp += 2 * TICK_NSEC;
+
+- start = mm->numa_scan_offset;
+ pages = sysctl_numa_balancing_scan_size;
+ pages <<= 20 - PAGE_SHIFT; /* MB in pages */
+ virtpages = pages * 8; /* Scan up to this much virtual space */
+@@ -3267,6 +3295,16 @@ static void task_numa_work(struct callback_head *work)
+
+ if (!mmap_read_trylock(mm))
+ return;
++
++ /*
++ * VMAs are skipped if the current PID has not trapped a fault within
++ * the VMA recently. Allow scanning to be forced if there is no
++ * suitable VMA remaining.
++ */
++ vma_pids_skipped = false;
++
++retry_pids:
++ start = mm->numa_scan_offset;
+ vma_iter_init(&vmi, mm, start);
+ vma = vma_next(&vmi);
+ if (!vma) {
+@@ -3279,6 +3317,7 @@ static void task_numa_work(struct callback_head *work)
+ do {
+ if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
+ is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_UNSUITABLE);
+ continue;
+ }
+
+@@ -3289,15 +3328,19 @@ static void task_numa_work(struct callback_head *work)
+ * as migrating the pages will be of marginal benefit.
+ */
+ if (!vma->vm_mm ||
+- (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
++ (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) {
++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SHARED_RO);
+ continue;
++ }
+
+ /*
+ * Skip inaccessible VMAs to avoid any confusion between
+ * PROT_NONE and NUMA hinting ptes
+ */
+- if (!vma_is_accessible(vma))
++ if (!vma_is_accessible(vma)) {
++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_INACCESSIBLE);
+ continue;
++ }
+
+ /* Initialise new per-VMA NUMAB state. */
+ if (!vma->numab_state) {
+@@ -3310,8 +3353,15 @@ static void task_numa_work(struct callback_head *work)
+ msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
+
+ /* Reset happens after 4 times scan delay of scan start */
+- vma->numab_state->next_pid_reset = vma->numab_state->next_scan +
++ vma->numab_state->pids_active_reset = vma->numab_state->next_scan +
+ msecs_to_jiffies(VMA_PID_RESET_PERIOD);
++
++ /*
++ * Ensure prev_scan_seq does not match numa_scan_seq,
++ * to prevent VMAs being skipped prematurely on the
++ * first scan:
++ */
++ vma->numab_state->prev_scan_seq = mm->numa_scan_seq - 1;
+ }
+
+ /*
+@@ -3319,23 +3369,35 @@ static void task_numa_work(struct callback_head *work)
+ * delay the scan for new VMAs.
+ */
+ if (mm->numa_scan_seq && time_before(jiffies,
+- vma->numab_state->next_scan))
++ vma->numab_state->next_scan)) {
++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SCAN_DELAY);
+ continue;
++ }
+
+- /* Do not scan the VMA if task has not accessed */
+- if (!vma_is_accessed(vma))
++ /* RESET access PIDs regularly for old VMAs. */
++ if (mm->numa_scan_seq &&
++ time_after(jiffies, vma->numab_state->pids_active_reset)) {
++ vma->numab_state->pids_active_reset = vma->numab_state->pids_active_reset +
++ msecs_to_jiffies(VMA_PID_RESET_PERIOD);
++ vma->numab_state->pids_active[0] = READ_ONCE(vma->numab_state->pids_active[1]);
++ vma->numab_state->pids_active[1] = 0;
++ }
++
++ /* Do not rescan VMAs twice within the same sequence. */
++ if (vma->numab_state->prev_scan_seq == mm->numa_scan_seq) {
++ mm->numa_scan_offset = vma->vm_end;
++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SEQ_COMPLETED);
+ continue;
++ }
+
+ /*
+- * RESET access PIDs regularly for old VMAs. Resetting after checking
+- * vma for recent access to avoid clearing PID info before access..
++ * Do not scan the VMA if task has not accessed it, unless no other
++ * VMA candidate exists.
+ */
+- if (mm->numa_scan_seq &&
+- time_after(jiffies, vma->numab_state->next_pid_reset)) {
+- vma->numab_state->next_pid_reset = vma->numab_state->next_pid_reset +
+- msecs_to_jiffies(VMA_PID_RESET_PERIOD);
+- vma->numab_state->access_pids[0] = READ_ONCE(vma->numab_state->access_pids[1]);
+- vma->numab_state->access_pids[1] = 0;
++ if (!vma_pids_forced && !vma_is_accessed(mm, vma)) {
++ vma_pids_skipped = true;
++ trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_PID_INACTIVE);
++ continue;
+ }
+
+ do {
+@@ -3362,8 +3424,28 @@ static void task_numa_work(struct callback_head *work)
+
+ cond_resched();
+ } while (end != vma->vm_end);
++
++ /* VMA scan is complete, do not scan until next sequence. */
++ vma->numab_state->prev_scan_seq = mm->numa_scan_seq;
++
++ /*
++ * Only force scan within one VMA at a time, to limit the
++ * cost of scanning a potentially uninteresting VMA.
++ */
++ if (vma_pids_forced)
++ break;
+ } for_each_vma(vmi, vma);
+
++ /*
++ * If no VMAs are remaining and VMAs were skipped due to the PID
++ * not accessing the VMA previously, then force a scan to ensure
++ * forward progress:
++ */
++ if (!vma && !vma_pids_forced && vma_pids_skipped) {
++ vma_pids_forced = true;
++ goto retry_pids;
++ }
++
+ out:
+ /*
+ * It is possible to reach the end of the VMA list but the last few
+@@ -3626,41 +3708,140 @@ static inline void
+ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+ #endif
+
++static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
++ unsigned long weight)
++{
++ unsigned long old_weight = se->load.weight;
++ s64 vlag, vslice;
++
++ /*
++ * VRUNTIME
++ * ========
++ *
++ * COROLLARY #1: The virtual runtime of the entity needs to be
++ * adjusted if re-weight at !0-lag point.
++ *
++ * Proof: For contradiction assume this is not true, so we can
++ * re-weight without changing vruntime at !0-lag point.
++ *
++ * Weight VRuntime Avg-VRuntime
++ * before w v V
++ * after w' v' V'
++ *
++ * Since lag needs to be preserved through re-weight:
++ *
++ * lag = (V - v)*w = (V'- v')*w', where v = v'
++ * ==> V' = (V - v)*w/w' + v (1)
++ *
++ * Let W be the total weight of the entities before reweight,
++ * since V' is the new weighted average of entities:
++ *
++ * V' = (WV + w'v - wv) / (W + w' - w) (2)
++ *
++ * by using (1) & (2) we obtain:
++ *
++ * (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
++ * ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
++ * ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
++ * ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
++ *
++ * Since we are doing at !0-lag point which means V != v, we
++ * can simplify (3):
++ *
++ * ==> W / (W + w' - w) = w / w'
++ * ==> Ww' = Ww + ww' - ww
++ * ==> W * (w' - w) = w * (w' - w)
++ * ==> W = w (re-weight indicates w' != w)
++ *
++ * So the cfs_rq contains only one entity, hence vruntime of
++ * the entity @v should always equal to the cfs_rq's weighted
++ * average vruntime @V, which means we will always re-weight
++ * at 0-lag point, thus breach assumption. Proof completed.
++ *
++ *
++ * COROLLARY #2: Re-weight does NOT affect weighted average
++ * vruntime of all the entities.
++ *
++ * Proof: According to corollary #1, Eq. (1) should be:
++ *
++ * (V - v)*w = (V' - v')*w'
++ * ==> v' = V' - (V - v)*w/w' (4)
++ *
++ * According to the weighted average formula, we have:
++ *
++ * V' = (WV - wv + w'v') / (W - w + w')
++ * = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
++ * = (WV - wv + w'V' - Vw + wv) / (W - w + w')
++ * = (WV + w'V' - Vw) / (W - w + w')
++ *
++ * ==> V'*(W - w + w') = WV + w'V' - Vw
++ * ==> V' * (W - w) = (W - w) * V (5)
++ *
++ * If the entity is the only one in the cfs_rq, then reweight
++ * always occurs at 0-lag point, so V won't change. Or else
++ * there are other entities, hence W != w, then Eq. (5) turns
++ * into V' = V. So V won't change in either case, proof done.
++ *
++ *
++ * So according to corollary #1 & #2, the effect of re-weight
++ * on vruntime should be:
++ *
++ * v' = V' - (V - v) * w / w' (4)
++ * = V - (V - v) * w / w'
++ * = V - vl * w / w'
++ * = V - vl'
++ */
++ if (avruntime != se->vruntime) {
++ vlag = entity_lag(avruntime, se);
++ vlag = div_s64(vlag * old_weight, weight);
++ se->vruntime = avruntime - vlag;
++ }
++
++ /*
++ * DEADLINE
++ * ========
++ *
++ * When the weight changes, the virtual time slope changes and
++ * we should adjust the relative virtual deadline accordingly.
++ *
++ * d' = v' + (d - v)*w/w'
++ * = V' - (V - v)*w/w' + (d - v)*w/w'
++ * = V - (V - v)*w/w' + (d - v)*w/w'
++ * = V + (d - V)*w/w'
++ */
++ vslice = (s64)(se->deadline - avruntime);
++ vslice = div_s64(vslice * old_weight, weight);
++ se->deadline = avruntime + vslice;
++}
++
+ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long weight)
+ {
+- unsigned long old_weight = se->load.weight;
++ bool curr = cfs_rq->curr == se;
++ u64 avruntime;
+
+ if (se->on_rq) {
+ /* commit outstanding execution time */
+- if (cfs_rq->curr == se)
+- update_curr(cfs_rq);
+- else
+- avg_vruntime_sub(cfs_rq, se);
++ update_curr(cfs_rq);
++ avruntime = avg_vruntime(cfs_rq);
++ if (!curr)
++ __dequeue_entity(cfs_rq, se);
+ update_load_sub(&cfs_rq->load, se->load.weight);
+ }
+ dequeue_load_avg(cfs_rq, se);
+
+- update_load_set(&se->load, weight);
+-
+- if (!se->on_rq) {
++ if (se->on_rq) {
++ reweight_eevdf(se, avruntime, weight);
++ } else {
+ /*
+ * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
+ * we need to scale se->vlag when w_i changes.
+ */
+- se->vlag = div_s64(se->vlag * old_weight, weight);
+- } else {
+- s64 deadline = se->deadline - se->vruntime;
+- /*
+- * When the weight changes, the virtual time slope changes and
+- * we should adjust the relative virtual deadline accordingly.
+- */
+- deadline = div_s64(deadline * old_weight, weight);
+- se->deadline = se->vruntime + deadline;
+- if (se != cfs_rq->curr)
+- min_deadline_cb_propagate(&se->run_node, NULL);
++ se->vlag = div_s64(se->vlag * se->load.weight, weight);
+ }
+
++ update_load_set(&se->load, weight);
++
+ #ifdef CONFIG_SMP
+ do {
+ u32 divider = get_pelt_divider(&se->avg);
+@@ -3672,20 +3853,28 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ enqueue_load_avg(cfs_rq, se);
+ if (se->on_rq) {
+ update_load_add(&cfs_rq->load, se->load.weight);
+- if (cfs_rq->curr != se)
+- avg_vruntime_add(cfs_rq, se);
++ if (!curr)
++ __enqueue_entity(cfs_rq, se);
++
++ /*
++ * The entity's vruntime has been adjusted, so let's check
++ * whether the rq-wide min_vruntime needs updated too. Since
++ * the calculations above require stable min_vruntime rather
++ * than up-to-date one, we do the update at the end of the
++ * reweight process.
++ */
++ update_min_vruntime(cfs_rq);
+ }
+ }
+
+-void reweight_task(struct task_struct *p, int prio)
++void reweight_task(struct task_struct *p, const struct load_weight *lw)
+ {
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct load_weight *load = &se->load;
+- unsigned long weight = scale_load(sched_prio_to_weight[prio]);
+
+- reweight_entity(cfs_rq, se, weight);
+- load->inv_weight = sched_prio_to_wmult[prio];
++ reweight_entity(cfs_rq, se, lw->weight);
++ load->inv_weight = lw->inv_weight;
+ }
+
+ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
+@@ -3817,14 +4006,11 @@ static void update_cfs_group(struct sched_entity *se)
+
+ #ifndef CONFIG_SMP
+ shares = READ_ONCE(gcfs_rq->tg->shares);
+-
+- if (likely(se->load.weight == shares))
+- return;
+ #else
+- shares = calc_group_shares(gcfs_rq);
++ shares = calc_group_shares(gcfs_rq);
+ #endif
+-
+- reweight_entity(cfs_rq_of(se), se, shares);
++ if (unlikely(se->load.weight != shares))
++ reweight_entity(cfs_rq_of(se), se, shares);
+ }
+
+ #else /* CONFIG_FAIR_GROUP_SCHED */
+@@ -4626,22 +4812,6 @@ static inline unsigned long task_util_est(struct task_struct *p)
+ return max(task_util(p), _task_util_est(p));
+ }
+
+-#ifdef CONFIG_UCLAMP_TASK
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+- unsigned long uclamp_min,
+- unsigned long uclamp_max)
+-{
+- return clamp(task_util_est(p), uclamp_min, uclamp_max);
+-}
+-#else
+-static inline unsigned long uclamp_task_util(struct task_struct *p,
+- unsigned long uclamp_min,
+- unsigned long uclamp_max)
+-{
+- return task_util_est(p);
+-}
+-#endif
+-
+ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
+ struct task_struct *p)
+ {
+@@ -4932,7 +5102,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+
+ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+ {
+- return true;
++ return !cfs_rq->nr_running;
+ }
+
+ #define UPDATE_TG 0x0
+@@ -6469,22 +6639,42 @@ static inline void hrtick_update(struct rq *rq)
+ #ifdef CONFIG_SMP
+ static inline bool cpu_overutilized(int cpu)
+ {
+- unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+- unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
++ unsigned long rq_util_min, rq_util_max;
++
++ if (!sched_energy_enabled())
++ return false;
++
++ rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
++ rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+
+ /* Return true only if the utilization doesn't fit CPU's capacity */
+ return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
+ }
+
+-static inline void update_overutilized_status(struct rq *rq)
++static inline void set_rd_overutilized_status(struct root_domain *rd,
++ unsigned int status)
+ {
+- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
+- WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
+- trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
+- }
++ if (!sched_energy_enabled())
++ return;
++
++ WRITE_ONCE(rd->overutilized, status);
++ trace_sched_overutilized_tp(rd, !!status);
++}
++
++static inline void check_update_overutilized_status(struct rq *rq)
++{
++ /*
++ * overutilized field is used for load balancing decisions only
++ * if energy aware scheduler is being used
++ */
++ if (!sched_energy_enabled())
++ return;
++
++ if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
++ set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
+ }
+ #else
+-static inline void update_overutilized_status(struct rq *rq) { }
++static inline void check_update_overutilized_status(struct rq *rq) { }
+ #endif
+
+ /* Runqueue only has SCHED_IDLE tasks enqueued */
+@@ -6585,7 +6775,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ * and the following generally works well enough in practice.
+ */
+ if (!task_new)
+- update_overutilized_status(rq);
++ check_update_overutilized_status(rq);
+
+ enqueue_throttle:
+ assert_list_leaf_cfs_rq(rq);
+@@ -7095,7 +7285,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
+ if (!available_idle_cpu(cpu)) {
+ idle = false;
+ if (*idle_cpu == -1) {
+- if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
++ if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) {
+ *idle_cpu = cpu;
+ break;
+ }
+@@ -7103,7 +7293,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
+ }
+ break;
+ }
+- if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
++ if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus))
+ *idle_cpu = cpu;
+ }
+
+@@ -7117,13 +7307,19 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
+ /*
+ * Scan the local SMT mask for idle CPUs.
+ */
+-static int select_idle_smt(struct task_struct *p, int target)
++static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ int cpu;
+
+ for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
+ if (cpu == target)
+ continue;
++ /*
++ * Check if the CPU is in the LLC scheduling domain of @target.
++ * Due to isolcpus, there is no guarantee that all the siblings are in the domain.
++ */
++ if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
++ continue;
+ if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
+ return cpu;
+ }
+@@ -7147,7 +7343,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
+ return __select_idle_cpu(core, p);
+ }
+
+-static inline int select_idle_smt(struct task_struct *p, int target)
++static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ return -1;
+ }
+@@ -7409,7 +7605,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ has_idle_core = test_idle_cores(target);
+
+ if (!has_idle_core && cpus_share_cache(prev, target)) {
+- i = select_idle_smt(p, prev);
++ i = select_idle_smt(p, sd, prev);
+ if ((unsigned int)i < nr_cpumask_bits)
+ return i;
+ }
+@@ -7756,7 +7952,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ target = prev_cpu;
+
+ sync_entity_load_avg(&p->se);
+- if (!uclamp_task_util(p, p_util_min, p_util_max))
++ if (!task_util_est(p) && p_util_min == 0)
+ goto unlock;
+
+ eenv_task_busy_time(&eenv, p, prev_cpu);
+@@ -7764,11 +7960,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ for (; pd; pd = pd->next) {
+ unsigned long util_min = p_util_min, util_max = p_util_max;
+ unsigned long cpu_cap, cpu_thermal_cap, util;
+- unsigned long cur_delta, max_spare_cap = 0;
++ long prev_spare_cap = -1, max_spare_cap = -1;
+ unsigned long rq_util_min, rq_util_max;
+- unsigned long prev_spare_cap = 0;
++ unsigned long cur_delta, base_energy;
+ int max_spare_cap_cpu = -1;
+- unsigned long base_energy;
+ int fits, max_fits = -1;
+
+ cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
+@@ -7831,7 +8026,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ prev_spare_cap = cpu_cap;
+ prev_fits = fits;
+ } else if ((fits > max_fits) ||
+- ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
++ ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) {
+ /*
+ * Find the CPU with the maximum spare capacity
+ * among the remaining CPUs in the performance
+@@ -7843,7 +8038,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ }
+ }
+
+- if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
++ if (max_spare_cap_cpu < 0 && prev_spare_cap < 0)
+ continue;
+
+ eenv_pd_busy_time(&eenv, cpus, p);
+@@ -7851,7 +8046,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ base_energy = compute_energy(&eenv, pd, cpus, p, -1);
+
+ /* Evaluate the energy impact of using prev_cpu. */
+- if (prev_spare_cap > 0) {
++ if (prev_spare_cap > -1) {
+ prev_delta = compute_energy(&eenv, pd, cpus, p,
+ prev_cpu);
+ /* CPU utilization has changed */
+@@ -8090,16 +8285,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ if (test_tsk_need_resched(curr))
+ return;
+
+- /* Idle tasks are by definition preempted by non-idle tasks. */
+- if (unlikely(task_has_idle_policy(curr)) &&
+- likely(!task_has_idle_policy(p)))
+- goto preempt;
+-
+- /*
+- * Batch and idle tasks do not preempt non-idle tasks (their preemption
+- * is driven by the tick):
+- */
+- if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
++ if (!sched_feat(WAKEUP_PREEMPTION))
+ return;
+
+ find_matching_se(&se, &pse);
+@@ -8109,7 +8295,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ pse_is_idle = se_is_idle(pse);
+
+ /*
+- * Preempt an idle group in favor of a non-idle group (and don't preempt
++ * Preempt an idle entity in favor of a non-idle entity (and don't preempt
+ * in the inverse case).
+ */
+ if (cse_is_idle && !pse_is_idle)
+@@ -8117,9 +8303,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ if (cse_is_idle != pse_is_idle)
+ return;
+
++ /*
++ * BATCH and IDLE tasks do not preempt others.
++ */
++ if (unlikely(p->policy != SCHED_NORMAL))
++ return;
++
+ cfs_rq = cfs_rq_of(se);
+ update_curr(cfs_rq);
+-
+ /*
+ * XXX pick_eevdf(cfs_rq) != se ?
+ */
+@@ -8857,12 +9048,8 @@ static int detach_tasks(struct lb_env *env)
+ break;
+
+ env->loop++;
+- /*
+- * We've more or less seen every task there is, call it quits
+- * unless we haven't found any movable task yet.
+- */
+- if (env->loop > env->loop_max &&
+- !(env->flags & LBF_ALL_PINNED))
++ /* We've more or less seen every task there is, call it quits */
++ if (env->loop > env->loop_max)
+ break;
+
+ /* take a breather every nr_migrate tasks */
+@@ -10400,19 +10587,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
+ env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+
+ if (!env->sd->parent) {
+- struct root_domain *rd = env->dst_rq->rd;
+-
+ /* update overload indicator if we are at root domain */
+- WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
++ WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
+
+ /* Update over-utilization (tipping point, U >= 0) indicator */
+- WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
+- trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
++ set_rd_overutilized_status(env->dst_rq->rd,
++ sg_status & SG_OVERUTILIZED);
+ } else if (sg_status & SG_OVERUTILIZED) {
+- struct root_domain *rd = env->dst_rq->rd;
+-
+- WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
+- trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
++ set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED);
+ }
+
+ update_idle_cpu_scan(env, sum_util);
+@@ -11033,12 +11215,16 @@ static int should_we_balance(struct lb_env *env)
+ continue;
+ }
+
+- /* Are we the first idle CPU? */
++ /*
++ * Are we the first idle core in a non-SMT domain or higher,
++ * or the first idle CPU in a SMT domain?
++ */
+ return cpu == env->dst_cpu;
+ }
+
+- if (idle_smt == env->dst_cpu)
+- return true;
++ /* Are we the first idle CPU with busy siblings? */
++ if (idle_smt != -1)
++ return idle_smt == env->dst_cpu;
+
+ /* Are we the first CPU of this group ? */
+ return group_balance_cpu(sg) == env->dst_cpu;
+@@ -11140,9 +11326,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+
+ if (env.flags & LBF_NEED_BREAK) {
+ env.flags &= ~LBF_NEED_BREAK;
+- /* Stop if we tried all running tasks */
+- if (env.loop < busiest->nr_running)
+- goto more_balance;
++ goto more_balance;
+ }
+
+ /*
+@@ -11251,13 +11435,15 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ busiest->push_cpu = this_cpu;
+ active_balance = 1;
+ }
+- raw_spin_rq_unlock_irqrestore(busiest, flags);
+
++ preempt_disable();
++ raw_spin_rq_unlock_irqrestore(busiest, flags);
+ if (active_balance) {
+ stop_one_cpu_nowait(cpu_of(busiest),
+ active_load_balance_cpu_stop, busiest,
+ &busiest->active_balance_work);
+ }
++ preempt_enable();
+ }
+ } else {
+ sd->nr_balance_failed = 0;
+@@ -12397,7 +12583,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+ task_tick_numa(rq, curr);
+
+ update_misfit_status(curr, rq);
+- update_overutilized_status(task_rq(curr));
++ check_update_overutilized_status(task_rq(curr));
+
+ task_tick_core(rq, curr);
+ }
+diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
+index 373d42c707bc5d..82e2f7fc7c267d 100644
+--- a/kernel/sched/isolation.c
++++ b/kernel/sched/isolation.c
+@@ -109,6 +109,7 @@ static void __init housekeeping_setup_type(enum hk_type type,
+ static int __init housekeeping_setup(char *str, unsigned long flags)
+ {
+ cpumask_var_t non_housekeeping_mask, housekeeping_staging;
++ unsigned int first_cpu;
+ int err = 0;
+
+ if ((flags & HK_FLAG_TICK) && !(housekeeping.flags & HK_FLAG_TICK)) {
+@@ -129,7 +130,8 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
+ cpumask_andnot(housekeeping_staging,
+ cpu_possible_mask, non_housekeeping_mask);
+
+- if (!cpumask_intersects(cpu_present_mask, housekeeping_staging)) {
++ first_cpu = cpumask_first_and(cpu_present_mask, housekeeping_staging);
++ if (first_cpu >= nr_cpu_ids || first_cpu >= setup_max_cpus) {
+ __cpumask_set_cpu(smp_processor_id(), housekeeping_staging);
+ __cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
+ if (!housekeeping.flags) {
+@@ -138,6 +140,9 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
+ }
+ }
+
++ if (cpumask_empty(non_housekeeping_mask))
++ goto free_housekeeping_staging;
++
+ if (!housekeeping.flags) {
+ /* First setup call ("nohz_full=" or "isolcpus=") */
+ enum hk_type type;
+diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
+index 2ad881d07752c1..4e715b9b278e7f 100644
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -162,6 +162,9 @@
+ | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
+ | MEMBARRIER_CMD_GET_REGISTRATIONS)
+
++static DEFINE_MUTEX(membarrier_ipi_mutex);
++#define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
++
+ static void ipi_mb(void *info)
+ {
+ smp_mb(); /* IPIs should be serializing but paranoid. */
+@@ -259,6 +262,7 @@ static int membarrier_global_expedited(void)
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+
++ SERIALIZE_IPI();
+ cpus_read_lock();
+ rcu_read_lock();
+ for_each_online_cpu(cpu) {
+@@ -347,6 +351,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
+ if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+
++ SERIALIZE_IPI();
+ cpus_read_lock();
+
+ if (cpu_id >= 0) {
+@@ -460,6 +465,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
+ * between threads which are users of @mm has its membarrier state
+ * updated.
+ */
++ SERIALIZE_IPI();
+ cpus_read_lock();
+ rcu_read_lock();
+ for_each_online_cpu(cpu) {
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 1d0f634725a6e3..f97e1473389ff1 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -776,14 +776,16 @@ static void record_times(struct psi_group_cpu *groupc, u64 now)
+ }
+
+ static void psi_group_change(struct psi_group *group, int cpu,
+- unsigned int clear, unsigned int set, u64 now,
++ unsigned int clear, unsigned int set,
+ bool wake_clock)
+ {
+ struct psi_group_cpu *groupc;
+ unsigned int t, m;
+ enum psi_states s;
+ u32 state_mask;
++ u64 now;
+
++ lockdep_assert_rq_held(cpu_rq(cpu));
+ groupc = per_cpu_ptr(group->pcpu, cpu);
+
+ /*
+@@ -796,6 +798,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ * SOME and FULL time these may have resulted in.
+ */
+ write_seqcount_begin(&groupc->seq);
++ now = cpu_clock(cpu);
+
+ /*
+ * Start with TSK_ONCPU, which doesn't have a corresponding
+@@ -909,18 +912,15 @@ void psi_task_change(struct task_struct *task, int clear, int set)
+ {
+ int cpu = task_cpu(task);
+ struct psi_group *group;
+- u64 now;
+
+ if (!task->pid)
+ return;
+
+ psi_flags_change(task, clear, set);
+
+- now = cpu_clock(cpu);
+-
+ group = task_psi_group(task);
+ do {
+- psi_group_change(group, cpu, clear, set, now, true);
++ psi_group_change(group, cpu, clear, set, true);
+ } while ((group = group->parent));
+ }
+
+@@ -929,7 +929,6 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ {
+ struct psi_group *group, *common = NULL;
+ int cpu = task_cpu(prev);
+- u64 now = cpu_clock(cpu);
+
+ if (next->pid) {
+ psi_flags_change(next, 0, TSK_ONCPU);
+@@ -946,7 +945,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ break;
+ }
+
+- psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
++ psi_group_change(group, cpu, 0, TSK_ONCPU, true);
+ } while ((group = group->parent));
+ }
+
+@@ -984,7 +983,7 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ do {
+ if (group == common)
+ break;
+- psi_group_change(group, cpu, clear, set, now, wake_clock);
++ psi_group_change(group, cpu, clear, set, wake_clock);
+ } while ((group = group->parent));
+
+ /*
+@@ -996,32 +995,44 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
+ clear &= ~TSK_ONCPU;
+ for (; group; group = group->parent)
+- psi_group_change(group, cpu, clear, set, now, wake_clock);
++ psi_group_change(group, cpu, clear, set, wake_clock);
+ }
+ }
+ }
+
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+-void psi_account_irqtime(struct task_struct *task, u32 delta)
++void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
+ {
+- int cpu = task_cpu(task);
++ int cpu = task_cpu(curr);
+ struct psi_group *group;
+ struct psi_group_cpu *groupc;
+- u64 now;
++ s64 delta;
++ u64 irq;
+
+- if (!task->pid)
++ if (!curr->pid)
+ return;
+
+- now = cpu_clock(cpu);
++ lockdep_assert_rq_held(rq);
++ group = task_psi_group(curr);
++ if (prev && task_psi_group(prev) == group)
++ return;
++
++ irq = irq_time_read(cpu);
++ delta = (s64)(irq - rq->psi_irq_time);
++ if (delta < 0)
++ return;
++ rq->psi_irq_time = irq;
+
+- group = task_psi_group(task);
+ do {
++ u64 now;
++
+ if (!group->enabled)
+ continue;
+
+ groupc = per_cpu_ptr(group->pcpu, cpu);
+
+ write_seqcount_begin(&groupc->seq);
++ now = cpu_clock(cpu);
+
+ record_times(groupc, now);
+ groupc->times[PSI_IRQ_FULL] += delta;
+@@ -1220,11 +1231,9 @@ void psi_cgroup_restart(struct psi_group *group)
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+- u64 now;
+
+ rq_lock_irq(rq, &rf);
+- now = cpu_clock(cpu);
+- psi_group_change(group, cpu, 0, 0, now, true);
++ psi_group_change(group, cpu, 0, 0, true);
+ rq_unlock_irq(rq, &rf);
+ }
+ }
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 0597ba0f85ff30..4ac36eb4cdee58 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -37,6 +37,8 @@ static struct ctl_table sched_rt_sysctls[] = {
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_rt_handler,
++ .extra1 = SYSCTL_ONE,
++ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "sched_rt_runtime_us",
+@@ -44,6 +46,8 @@ static struct ctl_table sched_rt_sysctls[] = {
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = sched_rt_handler,
++ .extra1 = SYSCTL_NEG_ONE,
++ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "sched_rr_timeslice_ms",
+@@ -2109,9 +2113,11 @@ static int push_rt_task(struct rq *rq, bool pull)
+ */
+ push_task = get_push_task(rq);
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(rq);
+ stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
+ push_task, &rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(rq);
+ }
+
+@@ -2448,9 +2454,11 @@ static void pull_rt_task(struct rq *this_rq)
+ double_unlock_balance(this_rq, src_rq);
+
+ if (push_task) {
++ preempt_disable();
+ raw_spin_rq_unlock(this_rq);
+ stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
+ push_task, &src_rq->push_work);
++ preempt_enable();
+ raw_spin_rq_lock(this_rq);
+ }
+ }
+@@ -2985,9 +2993,6 @@ static int sched_rt_global_constraints(void)
+ #ifdef CONFIG_SYSCTL
+ static int sched_rt_global_validate(void)
+ {
+- if (sysctl_sched_rt_period <= 0)
+- return -EINVAL;
+-
+ if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
+ ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
+ ((u64)sysctl_sched_rt_runtime *
+@@ -3018,7 +3023,7 @@ static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
+ old_period = sysctl_sched_rt_period;
+ old_runtime = sysctl_sched_rt_runtime;
+
+- ret = proc_dointvec(table, write, buffer, lenp, ppos);
++ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!ret && write) {
+ ret = sched_rt_global_validate();
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 04846272409cc0..8cbbbea7fdbbd6 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -88,6 +88,8 @@
+ # include <asm/paravirt_api_clock.h>
+ #endif
+
++#include <asm/barrier.h>
++
+ #include "cpupri.h"
+ #include "cpudeadline.h"
+
+@@ -1092,6 +1094,7 @@ struct rq {
+
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ u64 prev_irq_time;
++ u64 psi_irq_time;
+ #endif
+ #ifdef CONFIG_PARAVIRT
+ u64 prev_steal_time;
+@@ -2432,7 +2435,7 @@ extern void init_sched_dl_class(void);
+ extern void init_sched_rt_class(void);
+ extern void init_sched_fair_class(void);
+
+-extern void reweight_task(struct task_struct *p, int prio);
++extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
+
+ extern void resched_curr(struct rq *rq);
+ extern void resched_cpu(int cpu);
+@@ -3500,13 +3503,19 @@ static inline void switch_mm_cid(struct rq *rq,
+ * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
+ * Provide it here.
+ */
+- if (!prev->mm) // from kernel
++ if (!prev->mm) { // from kernel
+ smp_mb();
+- /*
+- * user -> user transition guarantees a memory barrier through
+- * switch_mm() when current->mm changes. If current->mm is
+- * unchanged, no barrier is needed.
+- */
++ } else { // from user
++ /*
++ * user->user transition relies on an implicit
++ * memory barrier in switch_mm() when
++ * current->mm changes. If the architecture
++ * switch_mm() does not have an implicit memory
++ * barrier, it is emitted here. If current->mm
++ * is unchanged, no barrier is needed.
++ */
++ smp_mb__after_switch_mm();
++ }
+ }
+ if (prev->mm_cid_active) {
+ mm_cid_snapshot_time(rq, prev->mm);
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 857f837f52cbed..966f4eacfe51d6 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -92,16 +92,6 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
+
+ trace_sched_stat_blocked(p, delta);
+
+- /*
+- * Blocking time is in units of nanosecs, so shift by
+- * 20 to get a milliseconds-range estimation of the
+- * amount of time that the task spent sleeping:
+- */
+- if (unlikely(prof_on == SLEEP_PROFILING)) {
+- profile_hits(SLEEP_PROFILING,
+- (void *)get_wchan(p),
+- delta >> 20);
+- }
+ account_scheduler_latency(p, delta >> 10, 0);
+ }
+ }
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 38f3698f5e5b31..b02dfc32295100 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -110,8 +110,12 @@ __schedstats_from_se(struct sched_entity *se)
+ void psi_task_change(struct task_struct *task, int clear, int set);
+ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ bool sleep);
+-void psi_account_irqtime(struct task_struct *task, u32 delta);
+-
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev);
++#else
++static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
++ struct task_struct *prev) {}
++#endif /*CONFIG_IRQ_TIME_ACCOUNTING */
+ /*
+ * PSI tracks state that persists across sleeps, such as iowaits and
+ * memory stalls. As a result, it has to distinguish between sleeps,
+@@ -192,7 +196,8 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {}
+ static inline void psi_sched_switch(struct task_struct *prev,
+ struct task_struct *next,
+ bool sleep) {}
+-static inline void psi_account_irqtime(struct task_struct *task, u32 delta) {}
++static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
++ struct task_struct *prev) {}
+ #endif /* CONFIG_PSI */
+
+ #ifdef CONFIG_SCHED_INFO
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 05a5bc678c0894..3a13cecf177402 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -1452,7 +1452,7 @@ static void set_domain_attribute(struct sched_domain *sd,
+ } else
+ request = attr->relax_domain_level;
+
+- if (sd->level > request) {
++ if (sd->level >= request) {
+ /* Turn off idle balance on this domain: */
+ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
+ }
+@@ -2122,12 +2122,19 @@ static int hop_cmp(const void *a, const void *b)
+ */
+ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
+ {
+- struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu };
++ struct __cmp_key k = { .cpus = cpus, .cpu = cpu };
+ struct cpumask ***hop_masks;
+ int hop, ret = nr_cpu_ids;
+
++ if (node == NUMA_NO_NODE)
++ return cpumask_nth_and(cpu, cpus, cpu_online_mask);
++
+ rcu_read_lock();
+
++ /* CPU-less node entries are uninitialized in sched_domains_numa_masks */
++ node = numa_nearest_node(node, N_CPU);
++ k.node = node;
++
+ k.masks = rcu_dereference(sched_domains_numa_masks);
+ if (!k.masks)
+ goto unlock;
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 09019017d6690a..21903f524ef86f 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2587,6 +2587,14 @@ static void do_freezer_trap(void)
+ spin_unlock_irq(&current->sighand->siglock);
+ cgroup_enter_frozen();
+ schedule();
++
++ /*
++ * We could've been woken by task_work, run it to clear
++ * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
++ */
++ clear_notify_signal();
++ if (unlikely(task_work_pending(current)))
++ task_work_run();
+ }
+
+ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 8455a53465af8c..3eeffeaf5450c6 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -170,6 +170,8 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
+
+ static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
+ module_param(csd_lock_timeout, ulong, 0444);
++static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
++module_param(panic_on_ipistall, int, 0444);
+
+ static atomic_t csd_bug_count = ATOMIC_INIT(0);
+
+@@ -230,6 +232,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ }
+
+ ts2 = sched_clock();
++ /* How long since we last checked for a stuck CSD lock.*/
+ ts_delta = ts2 - *ts1;
+ if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
+ return false;
+@@ -243,9 +246,17 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ else
+ cpux = cpu;
+ cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
++ /* How long since this CSD lock was stuck. */
++ ts_delta = ts2 - ts0;
+ pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
+- firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
++ firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts_delta,
+ cpu, csd->func, csd->info);
++ /*
++ * If the CSD lock is still stuck after 5 minutes, it is unlikely
++ * to become unstuck. Use a signed comparison to avoid triggering
++ * on underflows when the TSC is out of sync between sockets.
++ */
++ BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
+ if (cpu_cur_csd && csd != cpu_cur_csd) {
+ pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
+ *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
+@@ -1108,6 +1119,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
+
+ queue_work_on(cpu, system_wq, &sscs.work);
+ wait_for_completion(&sscs.done);
++ destroy_work_on_stack(&sscs.work);
+
+ return sscs.ret;
+ }
+diff --git a/kernel/smpboot.c b/kernel/smpboot.c
+index f47d8f375946bd..1992b62e980b76 100644
+--- a/kernel/smpboot.c
++++ b/kernel/smpboot.c
+@@ -272,8 +272,7 @@ static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
+ struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
+
+ if (tsk) {
+- kthread_stop(tsk);
+- put_task_struct(tsk);
++ kthread_stop_put(tsk);
+ *per_cpu_ptr(ht->store, cpu) = NULL;
+ }
+ }
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 210cf5f8d92c2c..bd9716d7bb6383 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -507,7 +507,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
+ static inline void lockdep_softirq_end(bool in_hardirq) { }
+ #endif
+
+-asmlinkage __visible void __softirq_entry __do_softirq(void)
++static void handle_softirqs(bool ksirqd)
+ {
+ unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
+ unsigned long old_flags = current->flags;
+@@ -562,8 +562,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ pending >>= softirq_bit;
+ }
+
+- if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
+- __this_cpu_read(ksoftirqd) == current)
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
+ rcu_softirq_qs();
+
+ local_irq_disable();
+@@ -583,6 +582,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ current_restore_flags(old_flags, PF_MEMALLOC);
+ }
+
++asmlinkage __visible void __softirq_entry __do_softirq(void)
++{
++ handle_softirqs(false);
++}
++
+ /**
+ * irq_enter_rcu - Enter an interrupt context with RCU watching
+ */
+@@ -918,7 +922,7 @@ static void run_ksoftirqd(unsigned int cpu)
+ * We can safely run softirq on inline stack, as we are not deep
+ * in the task stack here.
+ */
+- __do_softirq();
++ handle_softirqs(true);
+ ksoftirqd_run_end();
+ cond_resched();
+ return;
+diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
+index 639397b5491ca0..5259cda486d058 100644
+--- a/kernel/static_call_inline.c
++++ b/kernel/static_call_inline.c
+@@ -411,6 +411,17 @@ static void static_call_del_module(struct module *mod)
+
+ for (site = start; site < stop; site++) {
+ key = static_call_key(site);
++
++ /*
++ * If the key was not updated due to a memory allocation
++ * failure in __static_call_init() then treating key::sites
++ * as key::mods in the code below would cause random memory
++ * access and #GP. In that case all subsequent sites have
++ * not been touched either, so stop iterating.
++ */
++ if (!static_call_key_has_mods(key))
++ break;
++
+ if (key == prev_key)
+ continue;
+
+@@ -442,7 +453,7 @@ static int static_call_module_notify(struct notifier_block *nb,
+ case MODULE_STATE_COMING:
+ ret = static_call_add_module(mod);
+ if (ret) {
+- WARN(1, "Failed to allocate memory for static calls");
++ pr_warn("Failed to allocate memory for static calls\n");
+ static_call_del_module(mod);
+ }
+ break;
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 2410e3999ebe5c..44b5759903332b 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -2368,19 +2368,48 @@ static int prctl_set_vma(unsigned long opt, unsigned long start,
+ }
+ #endif /* CONFIG_ANON_VMA_NAME */
+
++static inline unsigned long get_current_mdwe(void)
++{
++ unsigned long ret = 0;
++
++ if (test_bit(MMF_HAS_MDWE, &current->mm->flags))
++ ret |= PR_MDWE_REFUSE_EXEC_GAIN;
++ if (test_bit(MMF_HAS_MDWE_NO_INHERIT, &current->mm->flags))
++ ret |= PR_MDWE_NO_INHERIT;
++
++ return ret;
++}
++
+ static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5)
+ {
++ unsigned long current_bits;
++
+ if (arg3 || arg4 || arg5)
+ return -EINVAL;
+
+- if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN))
++ if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT))
+ return -EINVAL;
+
++ /* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */
++ if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))
++ return -EINVAL;
++
++ /*
++ * EOPNOTSUPP might be more appropriate here in principle, but
++ * existing userspace depends on EINVAL specifically.
++ */
++ if (!arch_memory_deny_write_exec_supported())
++ return -EINVAL;
++
++ current_bits = get_current_mdwe();
++ if (current_bits && current_bits != bits)
++ return -EPERM; /* Cannot unset the flags */
++
++ if (bits & PR_MDWE_NO_INHERIT)
++ set_bit(MMF_HAS_MDWE_NO_INHERIT, &current->mm->flags);
+ if (bits & PR_MDWE_REFUSE_EXEC_GAIN)
+ set_bit(MMF_HAS_MDWE, &current->mm->flags);
+- else if (test_bit(MMF_HAS_MDWE, &current->mm->flags))
+- return -EPERM; /* Cannot unset the flag */
+
+ return 0;
+ }
+@@ -2390,9 +2419,7 @@ static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3,
+ {
+ if (arg2 || arg3 || arg4 || arg5)
+ return -EINVAL;
+-
+- return test_bit(MMF_HAS_MDWE, &current->mm->flags) ?
+- PR_MDWE_REFUSE_EXEC_GAIN : 0;
++ return get_current_mdwe();
+ }
+
+ static int prctl_get_auxv(void __user *addr, unsigned long len)
+diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
+index e137c1385c569e..e8e11778737704 100644
+--- a/kernel/sys_ni.c
++++ b/kernel/sys_ni.c
+@@ -46,8 +46,8 @@ COND_SYSCALL(io_getevents_time32);
+ COND_SYSCALL(io_getevents);
+ COND_SYSCALL(io_pgetevents_time32);
+ COND_SYSCALL(io_pgetevents);
+-COND_SYSCALL_COMPAT(io_pgetevents_time32);
+ COND_SYSCALL_COMPAT(io_pgetevents);
++COND_SYSCALL_COMPAT(io_pgetevents_time64);
+ COND_SYSCALL(io_uring_setup);
+ COND_SYSCALL(io_uring_enter);
+ COND_SYSCALL(io_uring_register);
+@@ -200,6 +200,20 @@ COND_SYSCALL(recvmmsg_time32);
+ COND_SYSCALL_COMPAT(recvmmsg_time32);
+ COND_SYSCALL_COMPAT(recvmmsg_time64);
+
++/* Posix timer syscalls may be configured out */
++COND_SYSCALL(timer_create);
++COND_SYSCALL(timer_gettime);
++COND_SYSCALL(timer_getoverrun);
++COND_SYSCALL(timer_settime);
++COND_SYSCALL(timer_delete);
++COND_SYSCALL(clock_adjtime);
++COND_SYSCALL(getitimer);
++COND_SYSCALL(setitimer);
++COND_SYSCALL(alarm);
++COND_SYSCALL_COMPAT(timer_create);
++COND_SYSCALL_COMPAT(getitimer);
++COND_SYSCALL_COMPAT(setitimer);
++
+ /*
+ * Architecture specific syscalls: see further below
+ */
+diff --git a/kernel/task_work.c b/kernel/task_work.c
+index 95a7e1b7f1dab2..2134ac8057a94e 100644
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -120,9 +120,9 @@ static bool task_work_func_match(struct callback_head *cb, void *data)
+ }
+
+ /**
+- * task_work_cancel - cancel a pending work added by task_work_add()
+- * @task: the task which should execute the work
+- * @func: identifies the work to remove
++ * task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
++ * @task: the task which should execute the func's work
++ * @func: identifies the func to match with a work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+@@ -131,11 +131,35 @@ static bool task_work_func_match(struct callback_head *cb, void *data)
+ * The found work or NULL if not found.
+ */
+ struct callback_head *
+-task_work_cancel(struct task_struct *task, task_work_func_t func)
++task_work_cancel_func(struct task_struct *task, task_work_func_t func)
+ {
+ return task_work_cancel_match(task, task_work_func_match, func);
+ }
+
++static bool task_work_match(struct callback_head *cb, void *data)
++{
++ return cb == data;
++}
++
++/**
++ * task_work_cancel - cancel a pending work added by task_work_add()
++ * @task: the task which should execute the work
++ * @cb: the callback to remove if queued
++ *
++ * Remove a callback from a task's queue if queued.
++ *
++ * RETURNS:
++ * True if the callback was queued and got cancelled, false otherwise.
++ */
++bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
++{
++ struct callback_head *ret;
++
++ ret = task_work_cancel_match(task, task_work_match, cb);
++
++ return ret == cb;
++}
++
+ /**
+ * task_work_run - execute the works added by task_work_add()
+ *
+@@ -168,7 +192,7 @@ void task_work_run(void)
+ if (!work)
+ break;
+ /*
+- * Synchronize with task_work_cancel(). It can not remove
++ * Synchronize with task_work_cancel_match(). It can not remove
+ * the first entry == work, cmpxchg(task_works) must fail.
+ * But it can remove another entry from the ->next list.
+ */
+diff --git a/kernel/time/clocksource-wdtest.c b/kernel/time/clocksource-wdtest.c
+index df922f49d171ba..d06185e054ea21 100644
+--- a/kernel/time/clocksource-wdtest.c
++++ b/kernel/time/clocksource-wdtest.c
+@@ -104,8 +104,8 @@ static void wdtest_ktime_clocksource_reset(void)
+ static int wdtest_func(void *arg)
+ {
+ unsigned long j1, j2;
++ int i, max_retries;
+ char *s;
+- int i;
+
+ schedule_timeout_uninterruptible(holdoff * HZ);
+
+@@ -139,18 +139,19 @@ static int wdtest_func(void *arg)
+ WARN_ON_ONCE(time_before(j2, j1 + NSEC_PER_USEC));
+
+ /* Verify tsc-like stability with various numbers of errors injected. */
+- for (i = 0; i <= max_cswd_read_retries + 1; i++) {
+- if (i <= 1 && i < max_cswd_read_retries)
++ max_retries = clocksource_get_max_watchdog_retry();
++ for (i = 0; i <= max_retries + 1; i++) {
++ if (i <= 1 && i < max_retries)
+ s = "";
+- else if (i <= max_cswd_read_retries)
++ else if (i <= max_retries)
+ s = ", expect message";
+ else
+ s = ", expect clock skew";
+- pr_info("--- Watchdog with %dx error injection, %lu retries%s.\n", i, max_cswd_read_retries, s);
++ pr_info("--- Watchdog with %dx error injection, %d retries%s.\n", i, max_retries, s);
+ WRITE_ONCE(wdtest_ktime_read_ndelays, i);
+ schedule_timeout_uninterruptible(2 * HZ);
+ WARN_ON_ONCE(READ_ONCE(wdtest_ktime_read_ndelays));
+- WARN_ON_ONCE((i <= max_cswd_read_retries) !=
++ WARN_ON_ONCE((i <= max_retries) !=
+ !(clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE));
+ wdtest_ktime_clocksource_reset();
+ }
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index c108ed8a9804ad..aa864999dc21be 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -20,6 +20,16 @@
+ #include "tick-internal.h"
+ #include "timekeeping_internal.h"
+
++static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
++{
++ u64 delta = clocksource_delta(end, start, cs->mask);
++
++ if (likely(delta < cs->max_cycles))
++ return clocksource_cyc2ns(delta, cs->mult, cs->shift);
++
++ return mul_u64_u32_shr(delta, cs->mult, cs->shift);
++}
++
+ /**
+ * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
+ * @mult: pointer to mult variable
+@@ -99,6 +109,7 @@ static u64 suspend_start;
+ * Interval: 0.5sec.
+ */
+ #define WATCHDOG_INTERVAL (HZ >> 1)
++#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
+
+ /*
+ * Threshold: 0.0312s, when doubled: 0.0625s.
+@@ -134,6 +145,7 @@ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
+ static DEFINE_SPINLOCK(watchdog_lock);
+ static int watchdog_running;
+ static atomic_t watchdog_reset_pending;
++static int64_t watchdog_max_interval;
+
+ static inline void clocksource_watchdog_lock(unsigned long *flags)
+ {
+@@ -208,9 +220,6 @@ void clocksource_mark_unstable(struct clocksource *cs)
+ spin_unlock_irqrestore(&watchdog_lock, flags);
+ }
+
+-ulong max_cswd_read_retries = 2;
+-module_param(max_cswd_read_retries, ulong, 0644);
+-EXPORT_SYMBOL_GPL(max_cswd_read_retries);
+ static int verify_n_cpus = 8;
+ module_param(verify_n_cpus, int, 0644);
+
+@@ -222,11 +231,12 @@ enum wd_read_status {
+
+ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
+ {
+- unsigned int nretries;
+- u64 wd_end, wd_end2, wd_delta;
++ unsigned int nretries, max_retries;
+ int64_t wd_delay, wd_seq_delay;
++ u64 wd_end, wd_end2;
+
+- for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) {
++ max_retries = clocksource_get_max_watchdog_retry();
++ for (nretries = 0; nretries <= max_retries; nretries++) {
+ local_irq_disable();
+ *wdnow = watchdog->read(watchdog);
+ *csnow = cs->read(cs);
+@@ -234,11 +244,9 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
+ wd_end2 = watchdog->read(watchdog);
+ local_irq_enable();
+
+- wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
+- wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
+- watchdog->shift);
++ wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
+ if (wd_delay <= WATCHDOG_MAX_SKEW) {
+- if (nretries > 1 || nretries >= max_cswd_read_retries) {
++ if (nretries > 1 && nretries >= max_retries) {
+ pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
+ smp_processor_id(), watchdog->name, nretries);
+ }
+@@ -254,8 +262,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
+ * report system busy, reinit the watchdog and skip the current
+ * watchdog test.
+ */
+- wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask);
+- wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift);
++ wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
+ if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
+ goto skip_test;
+ }
+@@ -366,8 +373,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ delta = (csnow_end - csnow_mid) & cs->mask;
+ if (delta < 0)
+ cpumask_set_cpu(cpu, &cpus_ahead);
+- delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
+- cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
++ cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
+ if (cs_nsec > cs_nsec_max)
+ cs_nsec_max = cs_nsec;
+ if (cs_nsec < cs_nsec_min)
+@@ -398,9 +404,9 @@ static inline void clocksource_reset_watchdog(void)
+
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+- u64 csnow, wdnow, cslast, wdlast, delta;
++ int64_t wd_nsec, cs_nsec, interval;
++ u64 csnow, wdnow, cslast, wdlast;
+ int next_cpu, reset_pending;
+- int64_t wd_nsec, cs_nsec;
+ struct clocksource *cs;
+ enum wd_read_status read_ret;
+ unsigned long extra_wait = 0;
+@@ -456,12 +462,8 @@ static void clocksource_watchdog(struct timer_list *unused)
+ continue;
+ }
+
+- delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
+- wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
+- watchdog->shift);
+-
+- delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
+- cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
++ wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
++ cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
+ wdlast = cs->wd_last; /* save these in case we print them */
+ cslast = cs->cs_last;
+ cs->cs_last = csnow;
+@@ -470,6 +472,27 @@ static void clocksource_watchdog(struct timer_list *unused)
+ if (atomic_read(&watchdog_reset_pending))
+ continue;
+
++ /*
++ * The processing of timer softirqs can get delayed (usually
++ * on account of ksoftirqd not getting to run in a timely
++ * manner), which causes the watchdog interval to stretch.
++ * Skew detection may fail for longer watchdog intervals
++ * on account of fixed margins being used.
++ * Some clocksources, e.g. acpi_pm, cannot tolerate
++ * watchdog intervals longer than a few seconds.
++ */
++ interval = max(cs_nsec, wd_nsec);
++ if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
++ if (system_state > SYSTEM_SCHEDULING &&
++ interval > 2 * watchdog_max_interval) {
++ watchdog_max_interval = interval;
++ pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
++ cs_nsec, wd_nsec);
++ }
++ watchdog_timer.expires = jiffies;
++ continue;
++ }
++
+ /* Check the deviation from the watchdog clocksource. */
+ md = cs->uncertainty_margin + watchdog->uncertainty_margin;
+ if (abs(cs_nsec - wd_nsec) > md) {
+@@ -811,7 +834,7 @@ void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
+ */
+ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+ {
+- u64 now, delta, nsec = 0;
++ u64 now, nsec = 0;
+
+ if (!suspend_clocksource)
+ return 0;
+@@ -826,12 +849,8 @@ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+ else
+ now = suspend_clocksource->read(suspend_clocksource);
+
+- if (now > suspend_start) {
+- delta = clocksource_delta(now, suspend_start,
+- suspend_clocksource->mask);
+- nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
+- suspend_clocksource->shift);
+- }
++ if (now > suspend_start)
++ nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
+
+ /*
+ * Disable the suspend timer to save power if current clocksource is
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 238262e4aba7e2..57e5cb36f1bc93 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -38,6 +38,7 @@
+ #include <linux/sched/deadline.h>
+ #include <linux/sched/nohz.h>
+ #include <linux/sched/debug.h>
++#include <linux/sched/isolation.h>
+ #include <linux/timer.h>
+ #include <linux/freezer.h>
+ #include <linux/compat.h>
+@@ -1085,6 +1086,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
+ enum hrtimer_mode mode)
+ {
+ debug_activate(timer, mode);
++ WARN_ON_ONCE(!base->cpu_base->online);
+
+ base->cpu_base->active_bases |= 1 << base->index;
+
+@@ -1286,6 +1288,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ struct hrtimer_clock_base *base;
+ unsigned long flags;
+
++ if (WARN_ON_ONCE(!timer->function))
++ return;
+ /*
+ * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
+ * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
+@@ -2183,6 +2187,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
+ cpu_base->softirq_next_timer = NULL;
+ cpu_base->expires_next = KTIME_MAX;
+ cpu_base->softirq_expires_next = KTIME_MAX;
++ cpu_base->online = 1;
+ hrtimer_cpu_base_init_expiry_lock(cpu_base);
+ return 0;
+ }
+@@ -2219,29 +2224,22 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ }
+ }
+
+-int hrtimers_dead_cpu(unsigned int scpu)
++int hrtimers_cpu_dying(unsigned int dying_cpu)
+ {
++ int i, ncpu = cpumask_any_and(cpu_active_mask, housekeeping_cpumask(HK_TYPE_TIMER));
+ struct hrtimer_cpu_base *old_base, *new_base;
+- int i;
+
+- BUG_ON(cpu_online(scpu));
+- tick_cancel_sched_timer(scpu);
++ tick_cancel_sched_timer(dying_cpu);
++
++ old_base = this_cpu_ptr(&hrtimer_bases);
++ new_base = &per_cpu(hrtimer_bases, ncpu);
+
+- /*
+- * this BH disable ensures that raise_softirq_irqoff() does
+- * not wakeup ksoftirqd (and acquire the pi-lock) while
+- * holding the cpu_base lock
+- */
+- local_bh_disable();
+- local_irq_disable();
+- old_base = &per_cpu(hrtimer_bases, scpu);
+- new_base = this_cpu_ptr(&hrtimer_bases);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+- raw_spin_lock(&new_base->lock);
+- raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock(&old_base->lock);
++ raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING);
+
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ migrate_hrtimer_list(&old_base->clock_base[i],
+@@ -2252,15 +2250,14 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ * The migration might have changed the first expiring softirq
+ * timer on this CPU. Update it.
+ */
+- hrtimer_update_softirq_timer(new_base, false);
++ __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
++ /* Tell the other CPU to retrigger the next event */
++ smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+
+- raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
++ old_base->online = 0;
++ raw_spin_unlock(&old_base->lock);
+
+- /* Check, if we got expired work to do */
+- __hrtimer_peek_ahead_timers();
+- local_irq_enable();
+- local_bh_enable();
+ return 0;
+ }
+
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 406dccb79c2b6b..8d2dd214ec6822 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -727,17 +727,16 @@ static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
+ }
+
+ if (txc->modes & ADJ_MAXERROR)
+- time_maxerror = txc->maxerror;
++ time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT);
+
+ if (txc->modes & ADJ_ESTERROR)
+- time_esterror = txc->esterror;
++ time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT);
+
+ if (txc->modes & ADJ_TIMECONST) {
+- time_constant = txc->constant;
++ time_constant = clamp(txc->constant, 0, MAXTC);
+ if (!(time_status & STA_NANO))
+ time_constant += 4;
+- time_constant = min(time_constant, (long)MAXTC);
+- time_constant = max(time_constant, 0l);
++ time_constant = clamp(time_constant, 0, MAXTC);
+ }
+
+ if (txc->modes & ADJ_TAI &&
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index 77c0c2370b6d1d..8127673bfc45e6 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -299,6 +299,9 @@ static int pc_clock_settime(clockid_t id, const struct timespec64 *ts)
+ goto out;
+ }
+
++ if (!timespec64_valid_strict(ts))
++ return -EINVAL;
++
+ if (cd.clk->ops.clock_settime)
+ err = cd.clk->ops.clock_settime(cd.clk, ts);
+ else
+diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
+index 828aeecbd1e8a9..9b6fcb8d85e78d 100644
+--- a/kernel/time/posix-stubs.c
++++ b/kernel/time/posix-stubs.c
+@@ -17,40 +17,6 @@
+ #include <linux/time_namespace.h>
+ #include <linux/compat.h>
+
+-#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+-/* Architectures may override SYS_NI and COMPAT_SYS_NI */
+-#include <asm/syscall_wrapper.h>
+-#endif
+-
+-asmlinkage long sys_ni_posix_timers(void)
+-{
+- pr_err_once("process %d (%s) attempted a POSIX timer syscall "
+- "while CONFIG_POSIX_TIMERS is not set\n",
+- current->pid, current->comm);
+- return -ENOSYS;
+-}
+-
+-#ifndef SYS_NI
+-#define SYS_NI(name) SYSCALL_ALIAS(sys_##name, sys_ni_posix_timers)
+-#endif
+-
+-#ifndef COMPAT_SYS_NI
+-#define COMPAT_SYS_NI(name) SYSCALL_ALIAS(compat_sys_##name, sys_ni_posix_timers)
+-#endif
+-
+-SYS_NI(timer_create);
+-SYS_NI(timer_gettime);
+-SYS_NI(timer_getoverrun);
+-SYS_NI(timer_settime);
+-SYS_NI(timer_delete);
+-SYS_NI(clock_adjtime);
+-SYS_NI(getitimer);
+-SYS_NI(setitimer);
+-SYS_NI(clock_adjtime32);
+-#ifdef __ARCH_WANT_SYS_ALARM
+-SYS_NI(alarm);
+-#endif
+-
+ /*
+ * We preserve minimal support for CLOCK_REALTIME and CLOCK_MONOTONIC
+ * as it is easy to remain compatible with little code. CLOCK_BOOTTIME
+@@ -158,18 +124,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+ which_clock);
+ }
+
+-#ifdef CONFIG_COMPAT
+-COMPAT_SYS_NI(timer_create);
+-#endif
+-
+-#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA)
+-COMPAT_SYS_NI(getitimer);
+-COMPAT_SYS_NI(setitimer);
+-#endif
+-
+ #ifdef CONFIG_COMPAT_32BIT_TIME
+-SYS_NI(timer_settime32);
+-SYS_NI(timer_gettime32);
+
+ SYSCALL_DEFINE2(clock_settime32, const clockid_t, which_clock,
+ struct old_timespec32 __user *, tp)
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index 771d1e040303b5..ed58eebb4e8f42 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -1148,6 +1148,30 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
+ bc = tick_broadcast_device.evtdev;
+
+ if (bc && broadcast_needs_cpu(bc, deadcpu)) {
++ /*
++ * If the broadcast force bit of the current CPU is set,
++ * then the current CPU has not yet reprogrammed the local
++ * timer device to avoid a ping-pong race. See
++ * ___tick_broadcast_oneshot_control().
++ *
++ * If the broadcast device is hrtimer based then
++ * programming the broadcast event below does not have any
++ * effect because the local clockevent device is not
++ * running and not programmed because the broadcast event
++ * is not earlier than the pending event of the local clock
++ * event device. As a consequence all CPUs waiting for a
++ * broadcast event are stuck forever.
++ *
++ * Detect this condition and reprogram the cpu local timer
++ * device to avoid the starvation.
++ */
++ if (tick_check_broadcast_expired()) {
++ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
++
++ cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask);
++ tick_program_event(td->evtdev->next_event, 1);
++ }
++
+ /* This moves the broadcast assignment to this CPU: */
+ clockevents_program_event(bc, bc->next_event, 1);
+ }
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index e9138cd7a0f52f..7f2b17fc8ce403 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -179,26 +179,6 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
+ }
+ }
+
+-#ifdef CONFIG_NO_HZ_FULL
+-static void giveup_do_timer(void *info)
+-{
+- int cpu = *(unsigned int *)info;
+-
+- WARN_ON(tick_do_timer_cpu != smp_processor_id());
+-
+- tick_do_timer_cpu = cpu;
+-}
+-
+-static void tick_take_do_timer_from_boot(void)
+-{
+- int cpu = smp_processor_id();
+- int from = tick_do_timer_boot_cpu;
+-
+- if (from >= 0 && from != cpu)
+- smp_call_function_single(from, giveup_do_timer, &cpu, 1);
+-}
+-#endif
+-
+ /*
+ * Setup the tick device
+ */
+@@ -222,19 +202,25 @@ static void tick_setup_device(struct tick_device *td,
+ tick_next_period = ktime_get();
+ #ifdef CONFIG_NO_HZ_FULL
+ /*
+- * The boot CPU may be nohz_full, in which case set
+- * tick_do_timer_boot_cpu so the first housekeeping
+- * secondary that comes up will take do_timer from
+- * us.
++ * The boot CPU may be nohz_full, in which case the
++ * first housekeeping secondary will take do_timer()
++ * from it.
+ */
+ if (tick_nohz_full_cpu(cpu))
+ tick_do_timer_boot_cpu = cpu;
+
+- } else if (tick_do_timer_boot_cpu != -1 &&
+- !tick_nohz_full_cpu(cpu)) {
+- tick_take_do_timer_from_boot();
++ } else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) {
+ tick_do_timer_boot_cpu = -1;
+- WARN_ON(tick_do_timer_cpu != cpu);
++ /*
++ * The boot CPU will stay in periodic (NOHZ disabled)
++ * mode until clocksource_done_booting() called after
++ * smp_init() selects a high resolution clocksource and
++ * timekeeping_notify() kicks the NOHZ stuff alive.
++ *
++ * So this WRITE_ONCE can only race with the READ_ONCE
++ * check in tick_periodic() but this race is harmless.
++ */
++ WRITE_ONCE(tick_do_timer_cpu, cpu);
+ #endif
+ }
+
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 87015e9deacc99..55cbc49f70d14c 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1547,13 +1547,23 @@ void tick_setup_sched_timer(void)
+ void tick_cancel_sched_timer(int cpu)
+ {
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
++ ktime_t idle_sleeptime, iowait_sleeptime;
++ unsigned long idle_calls, idle_sleeps;
+
+ # ifdef CONFIG_HIGH_RES_TIMERS
+ if (ts->sched_timer.base)
+ hrtimer_cancel(&ts->sched_timer);
+ # endif
+
++ idle_sleeptime = ts->idle_sleeptime;
++ iowait_sleeptime = ts->iowait_sleeptime;
++ idle_calls = ts->idle_calls;
++ idle_sleeps = ts->idle_sleeps;
+ memset(ts, 0, sizeof(*ts));
++ ts->idle_sleeptime = idle_sleeptime;
++ ts->iowait_sleeptime = iowait_sleeptime;
++ ts->idle_calls = idle_calls;
++ ts->idle_sleeps = idle_sleeps;
+ }
+ #endif
+
+diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
+index 5ed5a9d41d5a7a..03a586e12cf893 100644
+--- a/kernel/time/tick-sched.h
++++ b/kernel/time/tick-sched.h
+@@ -61,7 +61,6 @@ struct tick_sched {
+ unsigned int tick_stopped : 1;
+ unsigned int idle_active : 1;
+ unsigned int do_timer_last : 1;
+- unsigned int got_idle_tick : 1;
+
+ /* Tick handling: jiffies stall check */
+ unsigned int stalled_jiffies;
+@@ -73,6 +72,7 @@ struct tick_sched {
+ ktime_t next_tick;
+ unsigned long idle_jiffies;
+ ktime_t idle_waketime;
++ unsigned int got_idle_tick;
+
+ /* Idle entry */
+ seqcount_t idle_sleeptime_seq;
+diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
+index ca058c8af6bafd..3e5d422dd15cbf 100644
+--- a/kernel/time/time_test.c
++++ b/kernel/time/time_test.c
+@@ -73,7 +73,7 @@ static void time64_to_tm_test_date_range(struct kunit *test)
+
+ days = div_s64(secs, 86400);
+
+- #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \
++ #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %lld", \
+ year, month, mdday, yday, days
+
+ KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 266d02809dbb1d..11b7000d5e1d42 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1180,13 +1180,15 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history,
+ }
+
+ /*
+- * cycle_between - true if test occurs chronologically between before and after
++ * timestamp_in_interval - true if ts is chronologically in [start, end]
++ *
++ * True if ts occurs chronologically at or after start, and before or at end.
+ */
+-static bool cycle_between(u64 before, u64 test, u64 after)
++static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
+ {
+- if (test > before && test < after)
++ if (ts >= start && ts <= end)
+ return true;
+- if (test < before && before > after)
++ if (start > end && (ts >= start || ts <= end))
+ return true;
+ return false;
+ }
+@@ -1246,7 +1248,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
+ */
+ now = tk_clock_read(&tk->tkr_mono);
+ interval_start = tk->tkr_mono.cycle_last;
+- if (!cycle_between(interval_start, cycles, now)) {
++ if (!timestamp_in_interval(interval_start, now, cycles)) {
+ clock_was_set_seq = tk->clock_was_set_seq;
+ cs_was_changed_seq = tk->cs_was_changed_seq;
+ cycles = interval_start;
+@@ -1259,10 +1261,8 @@ int get_device_system_crosststamp(int (*get_time_fn)
+ tk_core.timekeeper.offs_real);
+ base_raw = tk->tkr_raw.base;
+
+- nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
+- system_counterval.cycles);
+- nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
+- system_counterval.cycles);
++ nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
++ nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
+ } while (read_seqcount_retry(&tk_core.seq, seq));
+
+ xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
+@@ -1277,13 +1277,13 @@ int get_device_system_crosststamp(int (*get_time_fn)
+ bool discontinuity;
+
+ /*
+- * Check that the counter value occurs after the provided
++ * Check that the counter value is not before the provided
+ * history reference and that the history doesn't cross a
+ * clocksource change
+ */
+ if (!history_begin ||
+- !cycle_between(history_begin->cycles,
+- system_counterval.cycles, cycles) ||
++ !timestamp_in_interval(history_begin->cycles,
++ cycles, system_counterval.cycles) ||
+ history_begin->cs_was_changed_seq != cs_was_changed_seq)
+ return -EINVAL;
+ partial_history_cycles = cycles - system_counterval.cycles;
+@@ -2476,7 +2476,7 @@ int do_adjtimex(struct __kernel_timex *txc)
+ clock_set |= timekeeping_advance(TK_ADV_FREQ);
+
+ if (clock_set)
+- clock_was_set(CLOCK_REALTIME);
++ clock_was_set(CLOCK_SET_WALL);
+
+ ntp_notify_cmos_timer();
+
+diff --git a/kernel/torture.c b/kernel/torture.c
+index b28b05bbef0270..c7b475883b9a8d 100644
+--- a/kernel/torture.c
++++ b/kernel/torture.c
+@@ -87,14 +87,15 @@ EXPORT_SYMBOL_GPL(verbose_torout_sleep);
+ * nanosecond random fuzz. This function and its friends desynchronize
+ * testing from the timer wheel.
+ */
+-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp)
++int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
++ struct torture_random_state *trsp)
+ {
+ ktime_t hto = baset_ns;
+
+ if (trsp)
+ hto += torture_random(trsp) % fuzzt_ns;
+ set_current_state(TASK_IDLE);
+- return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
++ return schedule_hrtimeout(&hto, mode);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
+
+@@ -106,7 +107,7 @@ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state
+ {
+ ktime_t baset_ns = baset_us * NSEC_PER_USEC;
+
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_us);
+
+@@ -123,7 +124,7 @@ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state
+ fuzzt_ns = (u32)~0U;
+ else
+ fuzzt_ns = fuzzt_us * NSEC_PER_USEC;
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_ms);
+
+@@ -136,7 +137,7 @@ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp)
+ {
+ ktime_t baset_ns = jiffies_to_nsecs(baset_j);
+
+- return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp);
++ return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies);
+
+@@ -153,7 +154,7 @@ int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *
+ fuzzt_ns = (u32)~0U;
+ else
+ fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC;
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_s);
+
+@@ -720,7 +721,7 @@ static void torture_shutdown_cleanup(void)
+ * suddenly applied to or removed from the system.
+ */
+ static struct task_struct *stutter_task;
+-static int stutter_pause_test;
++static ktime_t stutter_till_abs_time;
+ static int stutter;
+ static int stutter_gap;
+
+@@ -730,30 +731,16 @@ static int stutter_gap;
+ */
+ bool stutter_wait(const char *title)
+ {
+- unsigned int i = 0;
+ bool ret = false;
+- int spt;
++ ktime_t till_ns;
+
+ cond_resched_tasks_rcu_qs();
+- spt = READ_ONCE(stutter_pause_test);
+- for (; spt; spt = READ_ONCE(stutter_pause_test)) {
+- if (!ret && !rt_task(current)) {
+- sched_set_normal(current, MAX_NICE);
+- ret = true;
+- }
+- if (spt == 1) {
+- torture_hrtimeout_jiffies(1, NULL);
+- } else if (spt == 2) {
+- while (READ_ONCE(stutter_pause_test)) {
+- if (!(i++ & 0xffff))
+- torture_hrtimeout_us(10, 0, NULL);
+- cond_resched();
+- }
+- } else {
+- torture_hrtimeout_jiffies(round_jiffies_relative(HZ), NULL);
+- }
+- torture_shutdown_absorb(title);
++ till_ns = READ_ONCE(stutter_till_abs_time);
++ if (till_ns && ktime_before(ktime_get(), till_ns)) {
++ torture_hrtimeout_ns(till_ns, 0, HRTIMER_MODE_ABS, NULL);
++ ret = true;
+ }
++ torture_shutdown_absorb(title);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(stutter_wait);
+@@ -764,23 +751,16 @@ EXPORT_SYMBOL_GPL(stutter_wait);
+ */
+ static int torture_stutter(void *arg)
+ {
+- DEFINE_TORTURE_RANDOM(rand);
+- int wtime;
++ ktime_t till_ns;
+
+ VERBOSE_TOROUT_STRING("torture_stutter task started");
+ do {
+ if (!torture_must_stop() && stutter > 1) {
+- wtime = stutter;
+- if (stutter > 2) {
+- WRITE_ONCE(stutter_pause_test, 1);
+- wtime = stutter - 3;
+- torture_hrtimeout_jiffies(wtime, &rand);
+- wtime = 2;
+- }
+- WRITE_ONCE(stutter_pause_test, 2);
+- torture_hrtimeout_jiffies(wtime, NULL);
++ till_ns = ktime_add_ns(ktime_get(),
++ jiffies_to_nsecs(stutter));
++ WRITE_ONCE(stutter_till_abs_time, till_ns);
++ torture_hrtimeout_jiffies(stutter - 1, NULL);
+ }
+- WRITE_ONCE(stutter_pause_test, 0);
+ if (!torture_must_stop())
+ torture_hrtimeout_jiffies(stutter_gap, NULL);
+ torture_shutdown_absorb("torture_stutter");
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index 61c541c36596d9..bcc97f1bd3833b 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -1123,7 +1123,7 @@ config PREEMPTIRQ_DELAY_TEST
+
+ config SYNTH_EVENT_GEN_TEST
+ tristate "Test module for in-kernel synthetic event generation"
+- depends on SYNTH_EVENTS
++ depends on SYNTH_EVENTS && m
+ help
+ This option creates a test module to check the base
+ functionality of in-kernel synthetic event definition and
+@@ -1136,7 +1136,7 @@ config SYNTH_EVENT_GEN_TEST
+
+ config KPROBE_EVENT_GEN_TEST
+ tristate "Test module for in-kernel kprobe event generation"
+- depends on KPROBE_EVENTS
++ depends on KPROBE_EVENTS && m
+ help
+ This option creates a test module to check the base
+ functionality of in-kernel kprobe event definition.
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 868008f56fec24..eca858bde80470 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -41,6 +41,9 @@
+ #define bpf_event_rcu_dereference(p) \
+ rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
+
++#define MAX_UPROBE_MULTI_CNT (1U << 20)
++#define MAX_KPROBE_MULTI_CNT (1U << 20)
++
+ #ifdef CONFIG_MODULES
+ struct bpf_trace_module {
+ struct module *module;
+@@ -1217,7 +1220,8 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = {
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+- .arg3_type = ARG_PTR_TO_LONG,
++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg3_size = sizeof(u64),
+ };
+
+ BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
+@@ -1233,7 +1237,8 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = {
+ .func = get_func_ret,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+- .arg2_type = ARG_PTR_TO_LONG,
++ .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg2_size = sizeof(u64),
+ };
+
+ BPF_CALL_1(get_func_arg_cnt, void *, ctx)
+@@ -2636,7 +2641,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
+
+ static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
+ .release = bpf_kprobe_multi_link_release,
+- .dealloc = bpf_kprobe_multi_link_dealloc,
++ .dealloc_deferred = bpf_kprobe_multi_link_dealloc,
+ .fill_link_info = bpf_kprobe_multi_link_fill_link_info,
+ };
+
+@@ -2895,6 +2900,8 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ cnt = attr->link_create.kprobe_multi.cnt;
+ if (!cnt)
+ return -EINVAL;
++ if (cnt > MAX_KPROBE_MULTI_CNT)
++ return -E2BIG;
+
+ size = cnt * sizeof(*addrs);
+ addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
+@@ -3025,6 +3032,7 @@ struct bpf_uprobe_multi_link;
+ struct bpf_uprobe {
+ struct bpf_uprobe_multi_link *link;
+ loff_t offset;
++ unsigned long ref_ctr_offset;
+ u64 cookie;
+ struct uprobe_consumer consumer;
+ };
+@@ -3060,6 +3068,9 @@ static void bpf_uprobe_multi_link_release(struct bpf_link *link)
+
+ umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+ bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
++ if (umulti_link->task)
++ put_task_struct(umulti_link->task);
++ path_put(&umulti_link->path);
+ }
+
+ static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
+@@ -3067,16 +3078,13 @@ static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
+ struct bpf_uprobe_multi_link *umulti_link;
+
+ umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+- if (umulti_link->task)
+- put_task_struct(umulti_link->task);
+- path_put(&umulti_link->path);
+ kvfree(umulti_link->uprobes);
+ kfree(umulti_link);
+ }
+
+ static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
+ .release = bpf_uprobe_multi_link_release,
+- .dealloc = bpf_uprobe_multi_link_dealloc,
++ .dealloc_deferred = bpf_uprobe_multi_link_dealloc,
+ };
+
+ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+@@ -3093,7 +3101,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+ struct bpf_run_ctx *old_run_ctx;
+ int err = 0;
+
+- if (link->task && current != link->task)
++ if (link->task && current->mm != link->task->mm)
+ return 0;
+
+ if (sleepable)
+@@ -3164,7 +3172,6 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ {
+ struct bpf_uprobe_multi_link *link = NULL;
+ unsigned long __user *uref_ctr_offsets;
+- unsigned long *ref_ctr_offsets = NULL;
+ struct bpf_link_primer link_primer;
+ struct bpf_uprobe *uprobes = NULL;
+ struct task_struct *task = NULL;
+@@ -3195,9 +3202,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
+ uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
+ cnt = attr->link_create.uprobe_multi.cnt;
++ pid = attr->link_create.uprobe_multi.pid;
+
+- if (!upath || !uoffsets || !cnt)
++ if (!upath || !uoffsets || !cnt || pid < 0)
+ return -EINVAL;
++ if (cnt > MAX_UPROBE_MULTI_CNT)
++ return -E2BIG;
+
+ uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
+ ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
+@@ -3218,10 +3228,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ goto error_path_put;
+ }
+
+- pid = attr->link_create.uprobe_multi.pid;
+ if (pid) {
+ rcu_read_lock();
+- task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
++ task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
+ rcu_read_unlock();
+ if (!task) {
+ err = -ESRCH;
+@@ -3237,18 +3246,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ if (!uprobes || !link)
+ goto error_free;
+
+- if (uref_ctr_offsets) {
+- ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL);
+- if (!ref_ctr_offsets)
+- goto error_free;
+- }
+-
+ for (i = 0; i < cnt; i++) {
+ if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
+ err = -EFAULT;
+ goto error_free;
+ }
+- if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) {
++ if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
+ err = -EFAULT;
+ goto error_free;
+ }
+@@ -3279,23 +3282,24 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ for (i = 0; i < cnt; i++) {
+ err = uprobe_register_refctr(d_real_inode(link->path.dentry),
+ uprobes[i].offset,
+- ref_ctr_offsets ? ref_ctr_offsets[i] : 0,
++ uprobes[i].ref_ctr_offset,
+ &uprobes[i].consumer);
+ if (err) {
+- bpf_uprobe_unregister(&path, uprobes, i);
+- goto error_free;
++ link->cnt = i;
++ goto error_unregister;
+ }
+ }
+
+ err = bpf_link_prime(&link->link, &link_primer);
+ if (err)
+- goto error_free;
++ goto error_unregister;
+
+- kvfree(ref_ctr_offsets);
+ return bpf_link_settle(&link_primer);
+
++error_unregister:
++ bpf_uprobe_unregister(&path, uprobes, link->cnt);
++
+ error_free:
+- kvfree(ref_ctr_offsets);
+ kvfree(uprobes);
+ kfree(link);
+ if (task)
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 8de8bec5f36640..175eba24f5629a 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1183,18 +1183,19 @@ static void __add_hash_entry(struct ftrace_hash *hash,
+ hash->count++;
+ }
+
+-static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
++static struct ftrace_func_entry *
++add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
+ {
+ struct ftrace_func_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+- return -ENOMEM;
++ return NULL;
+
+ entry->ip = ip;
+ __add_hash_entry(hash, entry);
+
+- return 0;
++ return entry;
+ }
+
+ static void
+@@ -1349,7 +1350,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
+ struct ftrace_func_entry *entry;
+ struct ftrace_hash *new_hash;
+ int size;
+- int ret;
+ int i;
+
+ new_hash = alloc_ftrace_hash(size_bits);
+@@ -1366,8 +1366,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+- ret = add_hash_entry(new_hash, entry->ip);
+- if (ret < 0)
++ if (add_hash_entry(new_hash, entry->ip) == NULL)
+ goto free_hash;
+ }
+ }
+@@ -1596,12 +1595,15 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
+ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+ {
+ struct dyn_ftrace *rec;
++ unsigned long ip = 0;
+
++ rcu_read_lock();
+ rec = lookup_rec(start, end);
+ if (rec)
+- return rec->ip;
++ ip = rec->ip;
++ rcu_read_unlock();
+
+- return 0;
++ return ip;
+ }
+
+ /**
+@@ -1614,25 +1616,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+ */
+ unsigned long ftrace_location(unsigned long ip)
+ {
+- struct dyn_ftrace *rec;
++ unsigned long loc;
+ unsigned long offset;
+ unsigned long size;
+
+- rec = lookup_rec(ip, ip);
+- if (!rec) {
++ loc = ftrace_location_range(ip, ip);
++ if (!loc) {
+ if (!kallsyms_lookup_size_offset(ip, &size, &offset))
+ goto out;
+
+ /* map sym+0 to __fentry__ */
+ if (!offset)
+- rec = lookup_rec(ip, ip + size - 1);
++ loc = ftrace_location_range(ip, ip + size - 1);
+ }
+
+- if (rec)
+- return rec->ip;
+-
+ out:
+- return 0;
++ return loc;
+ }
+
+ /**
+@@ -2536,7 +2535,7 @@ ftrace_find_unique_ops(struct dyn_ftrace *rec)
+
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ /* Protected by rcu_tasks for reading, and direct_mutex for writing */
+-static struct ftrace_hash *direct_functions = EMPTY_HASH;
++static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
+ static DEFINE_MUTEX(direct_mutex);
+ int ftrace_direct_func_count;
+
+@@ -2555,39 +2554,6 @@ unsigned long ftrace_find_rec_direct(unsigned long ip)
+ return entry->direct;
+ }
+
+-static struct ftrace_func_entry*
+-ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
+- struct ftrace_hash **free_hash)
+-{
+- struct ftrace_func_entry *entry;
+-
+- if (ftrace_hash_empty(direct_functions) ||
+- direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
+- struct ftrace_hash *new_hash;
+- int size = ftrace_hash_empty(direct_functions) ? 0 :
+- direct_functions->count + 1;
+-
+- if (size < 32)
+- size = 32;
+-
+- new_hash = dup_hash(direct_functions, size);
+- if (!new_hash)
+- return NULL;
+-
+- *free_hash = direct_functions;
+- direct_functions = new_hash;
+- }
+-
+- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+- if (!entry)
+- return NULL;
+-
+- entry->ip = ip;
+- entry->direct = addr;
+- __add_hash_entry(direct_functions, entry);
+- return entry;
+-}
+-
+ static void call_direct_funcs(unsigned long ip, unsigned long pip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+ {
+@@ -4223,8 +4189,8 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
+ /* Do nothing if it exists */
+ if (entry)
+ return 0;
+-
+- ret = add_hash_entry(hash, rec->ip);
++ if (add_hash_entry(hash, rec->ip) == NULL)
++ ret = -ENOMEM;
+ }
+ return ret;
+ }
+@@ -5266,7 +5232,8 @@ __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
+ return 0;
+ }
+
+- return add_hash_entry(hash, ip);
++ entry = add_hash_entry(hash, ip);
++ return entry ? 0 : -ENOMEM;
+ }
+
+ static int
+@@ -5358,7 +5325,17 @@ static LIST_HEAD(ftrace_direct_funcs);
+
+ static int register_ftrace_function_nolock(struct ftrace_ops *ops);
+
++/*
++ * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
++ * call will be jumped from ftrace_regs_caller. Only if the architecture does
++ * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
++ * jumps from ftrace_caller for multiple ftrace_ops.
++ */
++#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
+ #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
++#else
++#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
++#endif
+
+ static int check_direct_multi(struct ftrace_ops *ops)
+ {
+@@ -5410,7 +5387,7 @@ static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long
+ */
+ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+ {
+- struct ftrace_hash *hash, *free_hash = NULL;
++ struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
+ struct ftrace_func_entry *entry, *new;
+ int err = -EBUSY, size, i;
+
+@@ -5436,17 +5413,44 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+ }
+ }
+
+- /* ... and insert them to direct_functions hash. */
+ err = -ENOMEM;
++
++ /* Make a copy hash to place the new and the old entries in */
++ size = hash->count + direct_functions->count;
++ if (size > 32)
++ size = 32;
++ new_hash = alloc_ftrace_hash(fls(size));
++ if (!new_hash)
++ goto out_unlock;
++
++ /* Now copy over the existing direct entries */
++ size = 1 << direct_functions->size_bits;
++ for (i = 0; i < size; i++) {
++ hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
++ new = add_hash_entry(new_hash, entry->ip);
++ if (!new)
++ goto out_unlock;
++ new->direct = entry->direct;
++ }
++ }
++
++ /* ... and add the new entries */
++ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+- new = ftrace_add_rec_direct(entry->ip, addr, &free_hash);
++ new = add_hash_entry(new_hash, entry->ip);
+ if (!new)
+- goto out_remove;
++ goto out_unlock;
++ /* Update both the copy and the hash entry */
++ new->direct = addr;
+ entry->direct = addr;
+ }
+ }
+
++ free_hash = direct_functions;
++ rcu_assign_pointer(direct_functions, new_hash);
++ new_hash = NULL;
++
+ ops->func = call_direct_funcs;
+ ops->flags = MULTI_FLAGS;
+ ops->trampoline = FTRACE_REGS_ADDR;
+@@ -5454,17 +5458,17 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+
+ err = register_ftrace_function_nolock(ops);
+
+- out_remove:
+- if (err)
+- remove_direct_functions_hash(hash, addr);
+-
+ out_unlock:
+ mutex_unlock(&direct_mutex);
+
+- if (free_hash) {
++ if (free_hash && free_hash != EMPTY_HASH) {
+ synchronize_rcu_tasks();
+ free_ftrace_hash(free_hash);
+ }
++
++ if (new_hash)
++ free_ftrace_hash(new_hash);
++
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(register_ftrace_direct);
+@@ -6309,7 +6313,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
+
+ if (entry)
+ continue;
+- if (add_hash_entry(hash, rec->ip) < 0)
++ if (add_hash_entry(hash, rec->ip) == NULL)
+ goto out;
+ } else {
+ if (entry) {
+@@ -6589,6 +6593,8 @@ static int ftrace_process_locs(struct module *mod,
+ /* We should have used all pages unless we skipped some */
+ if (pg_unuse) {
+ WARN_ON(!skipped);
++ /* Need to synchronize with ftrace_location_range() */
++ synchronize_rcu();
+ ftrace_free_pages(pg_unuse);
+ }
+ return ret;
+@@ -6802,6 +6808,9 @@ void ftrace_release_mod(struct module *mod)
+ out_unlock:
+ mutex_unlock(&ftrace_lock);
+
++ /* Need to synchronize with ftrace_location_range() */
++ if (tmp_page)
++ synchronize_rcu();
+ for (pg = tmp_page; pg; pg = tmp_page) {
+
+ /* Needs to be called outside of ftrace_lock */
+@@ -7135,6 +7144,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ unsigned long start = (unsigned long)(start_ptr);
+ unsigned long end = (unsigned long)(end_ptr);
+ struct ftrace_page **last_pg = &ftrace_pages_start;
++ struct ftrace_page *tmp_page = NULL;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ struct dyn_ftrace key;
+@@ -7176,12 +7186,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ ftrace_update_tot_cnt--;
+ if (!pg->index) {
+ *last_pg = pg->next;
+- if (pg->records) {
+- free_pages((unsigned long)pg->records, pg->order);
+- ftrace_number_of_pages -= 1 << pg->order;
+- }
+- ftrace_number_of_groups--;
+- kfree(pg);
++ pg->next = tmp_page;
++ tmp_page = pg;
+ pg = container_of(last_pg, struct ftrace_page, next);
+ if (!(*last_pg))
+ ftrace_pages = pg;
+@@ -7198,6 +7204,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ clear_func_from_hashes(func);
+ kfree(func);
+ }
++ /* Need to synchronize with ftrace_location_range() */
++ if (tmp_page) {
++ synchronize_rcu();
++ ftrace_free_pages(tmp_page);
++ }
+ }
+
+ void __init ftrace_free_init_mem(void)
+@@ -7888,6 +7899,7 @@ void ftrace_kill(void)
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
+ ftrace_trace_function = ftrace_stub;
++ kprobe_ftrace_kill();
+ }
+
+ /**
+diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c
+index 95106d02b32d82..85de221c0b6f22 100644
+--- a/kernel/trace/pid_list.c
++++ b/kernel/trace/pid_list.c
+@@ -354,7 +354,7 @@ static void pid_list_refill_irq(struct irq_work *iwork)
+ while (upper_count-- > 0) {
+ union upper_chunk *chunk;
+
+- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
++ chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
+ if (!chunk)
+ break;
+ *upper_next = chunk;
+@@ -365,7 +365,7 @@ static void pid_list_refill_irq(struct irq_work *iwork)
+ while (lower_count-- > 0) {
+ union lower_chunk *chunk;
+
+- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
++ chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
+ if (!chunk)
+ break;
+ *lower_next = chunk;
+diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
+index 8c4ffd07616244..cb0871fbdb07f0 100644
+--- a/kernel/trace/preemptirq_delay_test.c
++++ b/kernel/trace/preemptirq_delay_test.c
+@@ -215,4 +215,5 @@ static void __exit preemptirq_delay_exit(void)
+
+ module_init(preemptirq_delay_init)
+ module_exit(preemptirq_delay_exit)
++MODULE_DESCRIPTION("Preempt / IRQ disable delay thread to test latency tracers");
+ MODULE_LICENSE("GPL v2");
+diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
+index 5eb9b598f4e9c2..3cebcbaf35a442 100644
+--- a/kernel/trace/rethook.c
++++ b/kernel/trace/rethook.c
+@@ -63,7 +63,7 @@ static void rethook_free_rcu(struct rcu_head *head)
+ */
+ void rethook_stop(struct rethook *rh)
+ {
+- WRITE_ONCE(rh->handler, NULL);
++ rcu_assign_pointer(rh->handler, NULL);
+ }
+
+ /**
+@@ -78,11 +78,17 @@ void rethook_stop(struct rethook *rh)
+ */
+ void rethook_free(struct rethook *rh)
+ {
+- WRITE_ONCE(rh->handler, NULL);
++ rethook_stop(rh);
+
+ call_rcu(&rh->rcu, rethook_free_rcu);
+ }
+
++static inline rethook_handler_t rethook_get_handler(struct rethook *rh)
++{
++ return (rethook_handler_t)rcu_dereference_check(rh->handler,
++ rcu_read_lock_any_held());
++}
++
+ /**
+ * rethook_alloc() - Allocate struct rethook.
+ * @data: a data to pass the @handler when hooking the return.
+@@ -102,7 +108,7 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler)
+ }
+
+ rh->data = data;
+- rh->handler = handler;
++ rcu_assign_pointer(rh->handler, handler);
+ rh->pool.head = NULL;
+ refcount_set(&rh->ref, 1);
+
+@@ -142,9 +148,10 @@ static void free_rethook_node_rcu(struct rcu_head *head)
+ */
+ void rethook_recycle(struct rethook_node *node)
+ {
+- lockdep_assert_preemption_disabled();
++ rethook_handler_t handler;
+
+- if (likely(READ_ONCE(node->rethook->handler)))
++ handler = rethook_get_handler(node->rethook);
++ if (likely(handler))
+ freelist_add(&node->freelist, &node->rethook->pool);
+ else
+ call_rcu(&node->rcu, free_rethook_node_rcu);
+@@ -160,11 +167,9 @@ NOKPROBE_SYMBOL(rethook_recycle);
+ */
+ struct rethook_node *rethook_try_get(struct rethook *rh)
+ {
+- rethook_handler_t handler = READ_ONCE(rh->handler);
++ rethook_handler_t handler = rethook_get_handler(rh);
+ struct freelist_node *fn;
+
+- lockdep_assert_preemption_disabled();
+-
+ /* Check whether @rh is going to be freed. */
+ if (unlikely(!handler))
+ return NULL;
+@@ -312,7 +317,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ rhn = container_of(first, struct rethook_node, llist);
+ if (WARN_ON_ONCE(rhn->frame != frame))
+ break;
+- handler = READ_ONCE(rhn->rethook->handler);
++ handler = rethook_get_handler(rhn->rethook);
+ if (handler)
+ handler(rhn, rhn->rethook->data,
+ correct_ret_addr, regs);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 515cafdb18d98a..61caff3d4091f7 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -412,7 +412,6 @@ struct rb_irq_work {
+ struct irq_work work;
+ wait_queue_head_t waiters;
+ wait_queue_head_t full_waiters;
+- long wait_index;
+ bool waiters_pending;
+ bool full_waiters_pending;
+ bool wakeup_full;
+@@ -644,8 +643,8 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
+
+ *cnt = rb_time_cnt(top);
+
+- /* If top and bottom counts don't match, this interrupted a write */
+- if (*cnt != rb_time_cnt(bottom))
++ /* If top, msb or bottom counts don't match, this interrupted a write */
++ if (*cnt != rb_time_cnt(msb) || *cnt != rb_time_cnt(bottom))
+ return false;
+
+ /* The shift to msb will lose its cnt bits */
+@@ -700,44 +699,6 @@ rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
+ return local_try_cmpxchg(l, &expect, set);
+ }
+
+-static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+-{
+- unsigned long cnt, top, bottom, msb;
+- unsigned long cnt2, top2, bottom2, msb2;
+- u64 val;
+-
+- /* The cmpxchg always fails if it interrupted an update */
+- if (!__rb_time_read(t, &val, &cnt2))
+- return false;
+-
+- if (val != expect)
+- return false;
+-
+- cnt = local_read(&t->cnt);
+- if ((cnt & 3) != cnt2)
+- return false;
+-
+- cnt2 = cnt + 1;
+-
+- rb_time_split(val, &top, &bottom, &msb);
+- top = rb_time_val_cnt(top, cnt);
+- bottom = rb_time_val_cnt(bottom, cnt);
+-
+- rb_time_split(set, &top2, &bottom2, &msb2);
+- top2 = rb_time_val_cnt(top2, cnt2);
+- bottom2 = rb_time_val_cnt(bottom2, cnt2);
+-
+- if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
+- return false;
+- if (!rb_time_read_cmpxchg(&t->msb, msb, msb2))
+- return false;
+- if (!rb_time_read_cmpxchg(&t->top, top, top2))
+- return false;
+- if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
+- return false;
+- return true;
+-}
+-
+ #else /* 64 bits */
+
+ /* local64_t always succeeds */
+@@ -751,11 +712,6 @@ static void rb_time_set(rb_time_t *t, u64 val)
+ {
+ local64_set(&t->time, val);
+ }
+-
+-static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+-{
+- return local64_try_cmpxchg(&t->time, &expect, set);
+-}
+ #endif
+
+ /*
+@@ -924,9 +880,14 @@ static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int f
+ if (!nr_pages || !full)
+ return true;
+
+- dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
++ /*
++ * Add one as dirty will never equal nr_pages, as the sub-buffer
++ * that the writer is on is not counted as dirty.
++ * This is needed if "buffer_percent" is set to 100.
++ */
++ dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
+
+- return (dirty * 100) > (full * nr_pages);
++ return (dirty * 100) >= (full * nr_pages);
+ }
+
+ /*
+@@ -941,8 +902,19 @@ static void rb_wake_up_waiters(struct irq_work *work)
+
+ wake_up_all(&rbwork->waiters);
+ if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
++ /* Only cpu_buffer sets the above flags */
++ struct ring_buffer_per_cpu *cpu_buffer =
++ container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
++
++ /* Called from interrupt context */
++ raw_spin_lock(&cpu_buffer->reader_lock);
+ rbwork->wakeup_full = false;
+ rbwork->full_waiters_pending = false;
++
++ /* Waking up all waiters, they will reset the shortest full */
++ cpu_buffer->shortest_full = 0;
++ raw_spin_unlock(&cpu_buffer->reader_lock);
++
+ wake_up_all(&rbwork->full_waiters);
+ }
+ }
+@@ -983,11 +955,93 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+ rbwork = &cpu_buffer->irq_work;
+ }
+
+- rbwork->wait_index++;
+- /* make sure the waiters see the new index */
+- smp_wmb();
++ /* This can be called in any context */
++ irq_work_queue(&rbwork->work);
++}
++
++static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
++{
++ struct ring_buffer_per_cpu *cpu_buffer;
++ bool ret = false;
++
++ /* Reads of all CPUs always waits for any data */
++ if (cpu == RING_BUFFER_ALL_CPUS)
++ return !ring_buffer_empty(buffer);
+
+- rb_wake_up_waiters(&rbwork->work);
++ cpu_buffer = buffer->buffers[cpu];
++
++ if (!ring_buffer_empty_cpu(buffer, cpu)) {
++ unsigned long flags;
++ bool pagebusy;
++
++ if (!full)
++ return true;
++
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
++ ret = !pagebusy && full_hit(buffer, cpu, full);
++
++ if (!ret && (!cpu_buffer->shortest_full ||
++ cpu_buffer->shortest_full > full)) {
++ cpu_buffer->shortest_full = full;
++ }
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ }
++ return ret;
++}
++
++static inline bool
++rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
++ int cpu, int full, ring_buffer_cond_fn cond, void *data)
++{
++ if (rb_watermark_hit(buffer, cpu, full))
++ return true;
++
++ if (cond(data))
++ return true;
++
++ /*
++ * The events can happen in critical sections where
++ * checking a work queue can cause deadlocks.
++ * After adding a task to the queue, this flag is set
++ * only to notify events to try to wake up the queue
++ * using irq_work.
++ *
++ * We don't clear it even if the buffer is no longer
++ * empty. The flag only causes the next event to run
++ * irq_work to do the work queue wake up. The worse
++ * that can happen if we race with !trace_empty() is that
++ * an event will cause an irq_work to try to wake up
++ * an empty queue.
++ *
++ * There's no reason to protect this flag either, as
++ * the work queue and irq_work logic will do the necessary
++ * synchronization for the wake ups. The only thing
++ * that is necessary is that the wake up happens after
++ * a task has been queued. It's OK for spurious wake ups.
++ */
++ if (full)
++ rbwork->full_waiters_pending = true;
++ else
++ rbwork->waiters_pending = true;
++
++ return false;
++}
++
++/*
++ * The default wait condition for ring_buffer_wait() is to just to exit the
++ * wait loop the first time it is woken up.
++ */
++static bool rb_wait_once(void *data)
++{
++ long *once = data;
++
++ /* wait_event() actually calls this twice before scheduling*/
++ if (*once > 1)
++ return true;
++
++ (*once)++;
++ return false;
+ }
+
+ /**
+@@ -1003,101 +1057,39 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
+ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
+ {
+ struct ring_buffer_per_cpu *cpu_buffer;
+- DEFINE_WAIT(wait);
+- struct rb_irq_work *work;
+- long wait_index;
++ struct wait_queue_head *waitq;
++ ring_buffer_cond_fn cond;
++ struct rb_irq_work *rbwork;
++ void *data;
++ long once = 0;
+ int ret = 0;
+
++ cond = rb_wait_once;
++ data = &once;
++
+ /*
+ * Depending on what the caller is waiting for, either any
+ * data in any cpu buffer, or a specific buffer, put the
+ * caller on the appropriate wait queue.
+ */
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+- work = &buffer->irq_work;
++ rbwork = &buffer->irq_work;
+ /* Full only makes sense on per cpu reads */
+ full = 0;
+ } else {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return -ENODEV;
+ cpu_buffer = buffer->buffers[cpu];
+- work = &cpu_buffer->irq_work;
+- }
+-
+- wait_index = READ_ONCE(work->wait_index);
+-
+- while (true) {
+- if (full)
+- prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
+- else
+- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+-
+- /*
+- * The events can happen in critical sections where
+- * checking a work queue can cause deadlocks.
+- * After adding a task to the queue, this flag is set
+- * only to notify events to try to wake up the queue
+- * using irq_work.
+- *
+- * We don't clear it even if the buffer is no longer
+- * empty. The flag only causes the next event to run
+- * irq_work to do the work queue wake up. The worse
+- * that can happen if we race with !trace_empty() is that
+- * an event will cause an irq_work to try to wake up
+- * an empty queue.
+- *
+- * There's no reason to protect this flag either, as
+- * the work queue and irq_work logic will do the necessary
+- * synchronization for the wake ups. The only thing
+- * that is necessary is that the wake up happens after
+- * a task has been queued. It's OK for spurious wake ups.
+- */
+- if (full)
+- work->full_waiters_pending = true;
+- else
+- work->waiters_pending = true;
+-
+- if (signal_pending(current)) {
+- ret = -EINTR;
+- break;
+- }
+-
+- if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
+- break;
+-
+- if (cpu != RING_BUFFER_ALL_CPUS &&
+- !ring_buffer_empty_cpu(buffer, cpu)) {
+- unsigned long flags;
+- bool pagebusy;
+- bool done;
+-
+- if (!full)
+- break;
+-
+- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+- pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+- done = !pagebusy && full_hit(buffer, cpu, full);
+-
+- if (!cpu_buffer->shortest_full ||
+- cpu_buffer->shortest_full > full)
+- cpu_buffer->shortest_full = full;
+- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+- if (done)
+- break;
+- }
+-
+- schedule();
+-
+- /* Make sure to see the new wait index */
+- smp_rmb();
+- if (wait_index != work->wait_index)
+- break;
++ rbwork = &cpu_buffer->irq_work;
+ }
+
+ if (full)
+- finish_wait(&work->full_waiters, &wait);
++ waitq = &rbwork->full_waiters;
+ else
+- finish_wait(&work->waiters, &wait);
++ waitq = &rbwork->waiters;
++
++ ret = wait_event_interruptible((*waitq),
++ rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
+
+ return ret;
+ }
+@@ -1121,30 +1113,51 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table, int full)
+ {
+ struct ring_buffer_per_cpu *cpu_buffer;
+- struct rb_irq_work *work;
++ struct rb_irq_work *rbwork;
+
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+- work = &buffer->irq_work;
++ rbwork = &buffer->irq_work;
+ full = 0;
+ } else {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+- return -EINVAL;
++ return EPOLLERR;
+
+ cpu_buffer = buffer->buffers[cpu];
+- work = &cpu_buffer->irq_work;
++ rbwork = &cpu_buffer->irq_work;
+ }
+
+ if (full) {
+- poll_wait(filp, &work->full_waiters, poll_table);
+- work->full_waiters_pending = true;
++ unsigned long flags;
++
++ poll_wait(filp, &rbwork->full_waiters, poll_table);
++
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
+- } else {
+- poll_wait(filp, &work->waiters, poll_table);
+- work->waiters_pending = true;
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ if (full_hit(buffer, cpu, full))
++ return EPOLLIN | EPOLLRDNORM;
++ /*
++ * Only allow full_waiters_pending update to be seen after
++ * the shortest_full is set. If the writer sees the
++ * full_waiters_pending flag set, it will compare the
++ * amount in the ring buffer to shortest_full. If the amount
++ * in the ring buffer is greater than the shortest_full
++ * percent, it will call the irq_work handler to wake up
++ * this list. The irq_handler will reset shortest_full
++ * back to zero. That's done under the reader_lock, but
++ * the below smp_mb() makes sure that the update to
++ * full_waiters_pending doesn't leak up into the above.
++ */
++ smp_mb();
++ rbwork->full_waiters_pending = true;
++ return 0;
+ }
+
++ poll_wait(filp, &rbwork->waiters, poll_table);
++ rbwork->waiters_pending = true;
++
+ /*
+ * There's a tight race between setting the waiters_pending and
+ * checking if the ring buffer is empty. Once the waiters_pending bit
+@@ -1160,9 +1173,6 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ */
+ smp_mb();
+
+- if (full)
+- return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
+-
+ if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
+ (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+ return EPOLLIN | EPOLLRDNORM;
+@@ -1526,7 +1536,6 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
+ old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
+
+- local_inc(&cpu_buffer->pages_touched);
+ /*
+ * Just make sure we have seen our old_write and synchronize
+ * with any interrupts that come in.
+@@ -1563,8 +1572,9 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ */
+ local_set(&next_page->page->commit, 0);
+
+- /* Again, either we update tail_page or an interrupt does */
+- (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
++ /* Either we update tail_page or an interrupt does */
++ if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
++ local_inc(&cpu_buffer->pages_touched);
+ }
+ }
+
+@@ -1582,6 +1592,11 @@ static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+ *
+ * As a safety measure we check to make sure the data pages have not
+ * been corrupted.
++ *
++ * Callers of this function need to guarantee that the list of pages doesn't get
++ * modified during the check. In particular, if it's possible that the function
++ * is invoked with concurrent readers which can swap in a new reader page then
++ * the caller should take cpu_buffer->reader_lock.
+ */
+ static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+@@ -1787,6 +1802,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
+ free_buffer_page(bpage);
+ }
+
++ free_page((unsigned long)cpu_buffer->free_page);
++
+ kfree(cpu_buffer);
+ }
+
+@@ -2319,8 +2336,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ */
+ synchronize_rcu();
+ for_each_buffer_cpu(buffer, cpu) {
++ unsigned long flags;
++
+ cpu_buffer = buffer->buffers[cpu];
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ rb_check_pages(cpu_buffer);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ }
+ atomic_dec(&buffer->record_disabled);
+ }
+@@ -2407,7 +2428,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
+ */
+ barrier();
+
+- if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
++ if ((iter->head + length) > commit || length > BUF_PAGE_SIZE)
+ /* Writer corrupted the read? */
+ goto reset;
+
+@@ -2981,25 +3002,6 @@ static unsigned rb_calculate_event_length(unsigned length)
+ return length;
+ }
+
+-static u64 rb_time_delta(struct ring_buffer_event *event)
+-{
+- switch (event->type_len) {
+- case RINGBUF_TYPE_PADDING:
+- return 0;
+-
+- case RINGBUF_TYPE_TIME_EXTEND:
+- return rb_event_time_stamp(event);
+-
+- case RINGBUF_TYPE_TIME_STAMP:
+- return 0;
+-
+- case RINGBUF_TYPE_DATA:
+- return event->time_delta;
+- default:
+- return 0;
+- }
+-}
+-
+ static inline bool
+ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ struct ring_buffer_event *event)
+@@ -3007,8 +3009,6 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ unsigned long new_index, old_index;
+ struct buffer_page *bpage;
+ unsigned long addr;
+- u64 write_stamp;
+- u64 delta;
+
+ new_index = rb_event_index(event);
+ old_index = new_index + rb_event_ts_length(event);
+@@ -3017,41 +3017,34 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+
+ bpage = READ_ONCE(cpu_buffer->tail_page);
+
+- delta = rb_time_delta(event);
+-
+- if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
+- return false;
+-
+- /* Make sure the write stamp is read before testing the location */
+- barrier();
+-
++ /*
++ * Make sure the tail_page is still the same and
++ * the next write location is the end of this event
++ */
+ if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
+ unsigned long write_mask =
+ local_read(&bpage->write) & ~RB_WRITE_MASK;
+ unsigned long event_length = rb_event_length(event);
+
+- /* Something came in, can't discard */
+- if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
+- write_stamp, write_stamp - delta))
+- return false;
+-
+ /*
+- * It's possible that the event time delta is zero
+- * (has the same time stamp as the previous event)
+- * in which case write_stamp and before_stamp could
+- * be the same. In such a case, force before_stamp
+- * to be different than write_stamp. It doesn't
+- * matter what it is, as long as its different.
++ * For the before_stamp to be different than the write_stamp
++ * to make sure that the next event adds an absolute
++ * value and does not rely on the saved write stamp, which
++ * is now going to be bogus.
++ *
++ * By setting the before_stamp to zero, the next event
++ * is not going to use the write_stamp and will instead
++ * create an absolute timestamp. This means there's no
++ * reason to update the wirte_stamp!
+ */
+- if (!delta)
+- rb_time_set(&cpu_buffer->before_stamp, 0);
++ rb_time_set(&cpu_buffer->before_stamp, 0);
+
+ /*
+ * If an event were to come in now, it would see that the
+ * write_stamp and the before_stamp are different, and assume
+ * that this event just added itself before updating
+ * the write stamp. The interrupting event will fix the
+- * write stamp for us, and use the before stamp as its delta.
++ * write stamp for us, and use an absolute timestamp.
+ */
+
+ /*
+@@ -3488,7 +3481,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
+ return;
+
+ /*
+- * If this interrupted another event,
++ * If this interrupted another event,
+ */
+ if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
+ goto out;
+@@ -3582,7 +3575,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ * absolute timestamp.
+ * Don't bother if this is the start of a new page (w == 0).
+ */
+- if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
++ if (!w) {
++ /* Use the sub-buffer timestamp */
++ info->delta = 0;
++ } else if (unlikely(!a_ok || !b_ok || info->before != info->after)) {
+ info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
+ info->length += RB_LEN_TIME_EXTEND;
+ } else {
+@@ -3605,26 +3601,19 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+
+ /* See if we shot pass the end of this buffer page */
+ if (unlikely(write > BUF_PAGE_SIZE)) {
+- /* before and after may now different, fix it up*/
+- b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+- a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+- if (a_ok && b_ok && info->before != info->after)
+- (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
+- info->before, info->after);
+- if (a_ok && b_ok)
+- check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
++ check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
+ return rb_move_tail(cpu_buffer, tail, info);
+ }
+
+ if (likely(tail == w)) {
+- u64 save_before;
+- bool s_ok;
+-
+ /* Nothing interrupted us between A and C */
+ /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts);
+- barrier();
+- /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
+- RB_WARN_ON(cpu_buffer, !s_ok);
++ /*
++ * If something came in between C and D, the write stamp
++ * may now not be in sync. But that's fine as the before_stamp
++ * will be different and then next event will just be forced
++ * to use an absolute timestamp.
++ */
+ if (likely(!(info->add_timestamp &
+ (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
+ /* This did not interrupt any time update */
+@@ -3632,41 +3621,40 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ else
+ /* Just use full timestamp for interrupting event */
+ info->delta = info->ts;
+- barrier();
+ check_buffer(cpu_buffer, info, tail);
+- if (unlikely(info->ts != save_before)) {
+- /* SLOW PATH - Interrupted between C and E */
+-
+- a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+- RB_WARN_ON(cpu_buffer, !a_ok);
+-
+- /* Write stamp must only go forward */
+- if (save_before > info->after) {
+- /*
+- * We do not care about the result, only that
+- * it gets updated atomically.
+- */
+- (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
+- info->after, save_before);
+- }
+- }
+ } else {
+ u64 ts;
+ /* SLOW PATH - Interrupted between A and C */
+- a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+- /* Was interrupted before here, write_stamp must be valid */
++
++ /* Save the old before_stamp */
++ a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+ RB_WARN_ON(cpu_buffer, !a_ok);
++
++ /*
++ * Read a new timestamp and update the before_stamp to make
++ * the next event after this one force using an absolute
++ * timestamp. This is in case an interrupt were to come in
++ * between E and F.
++ */
+ ts = rb_time_stamp(cpu_buffer->buffer);
++ rb_time_set(&cpu_buffer->before_stamp, ts);
++
++ barrier();
++ /*E*/ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
++ /* Was interrupted before here, write_stamp must be valid */
++ RB_WARN_ON(cpu_buffer, !a_ok);
+ barrier();
+- /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
+- info->after < ts &&
+- rb_time_cmpxchg(&cpu_buffer->write_stamp,
+- info->after, ts)) {
+- /* Nothing came after this event between C and E */
++ /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
++ info->after == info->before && info->after < ts) {
++ /*
++ * Nothing came after this event between C and F, it is
++ * safe to use info->after for the delta as it
++ * matched info->before and is still valid.
++ */
+ info->delta = ts - info->after;
+ } else {
+ /*
+- * Interrupted between C and E:
++ * Interrupted between C and F:
+ * Lost the previous events time stamp. Just set the
+ * delta to zero, and this will be the same time as
+ * the event this event interrupted. And the events that
+@@ -3717,6 +3705,12 @@ rb_reserve_next_event(struct trace_buffer *buffer,
+ int nr_loops = 0;
+ int add_ts_default;
+
++ /* ring buffer does cmpxchg, make sure it is safe in NMI context */
++ if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
++ (unlikely(in_nmi()))) {
++ return NULL;
++ }
++
+ rb_start_commit(cpu_buffer);
+ /* The commit page can not change after this */
+
+@@ -3740,6 +3734,8 @@ rb_reserve_next_event(struct trace_buffer *buffer,
+ if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
+ add_ts_default = RB_ADD_STAMP_ABSOLUTE;
+ info.length += RB_LEN_TIME_EXTEND;
++ if (info.length > BUF_MAX_DATA_SIZE)
++ goto out_fail;
+ } else {
+ add_ts_default = RB_ADD_STAMP_NONE;
+ }
+@@ -4449,7 +4445,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
+ cpu_buffer = iter->cpu_buffer;
+ reader = cpu_buffer->reader_page;
+ head_page = cpu_buffer->head_page;
+- commit_page = cpu_buffer->commit_page;
++ commit_page = READ_ONCE(cpu_buffer->commit_page);
+ commit_ts = commit_page->page->time_stamp;
+
+ /*
+@@ -5121,7 +5117,8 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
+ if (!iter)
+ return NULL;
+
+- iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
++ /* Holds the entire event: data and meta data */
++ iter->event = kmalloc(BUF_PAGE_SIZE, flags);
+ if (!iter->event) {
+ kfree(iter);
+ return NULL;
+diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
+index 2f68e93fff0bc9..df0745a42a3f35 100644
+--- a/kernel/trace/rv/rv.c
++++ b/kernel/trace/rv/rv.c
+@@ -245,6 +245,7 @@ static int __rv_disable_monitor(struct rv_monitor_def *mdef, bool sync)
+
+ /**
+ * rv_disable_monitor - disable a given runtime monitor
++ * @mdef: Pointer to the monitor definition structure.
+ *
+ * Returns 0 on success.
+ */
+@@ -256,6 +257,7 @@ int rv_disable_monitor(struct rv_monitor_def *mdef)
+
+ /**
+ * rv_enable_monitor - enable a given runtime monitor
++ * @mdef: Pointer to the monitor definition structure.
+ *
+ * Returns 0 on success, error otherwise.
+ */
+diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
+index 8dfe85499d4a2b..354c2117be43f7 100644
+--- a/kernel/trace/synth_event_gen_test.c
++++ b/kernel/trace/synth_event_gen_test.c
+@@ -477,6 +477,17 @@ static int __init synth_event_gen_test_init(void)
+
+ ret = test_trace_synth_event();
+ WARN_ON(ret);
++
++ /* Disable when done */
++ trace_array_set_clr_event(gen_synth_test->tr,
++ "synthetic",
++ "gen_synth_test", false);
++ trace_array_set_clr_event(empty_synth_test->tr,
++ "synthetic",
++ "empty_synth_test", false);
++ trace_array_set_clr_event(create_synth_test->tr,
++ "synthetic",
++ "create_synth_test", false);
+ out:
+ return ret;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index abaaf516fcae9b..4f93d57cc02990 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -39,6 +39,7 @@
+ #include <linux/ctype.h>
+ #include <linux/init.h>
+ #include <linux/panic_notifier.h>
++#include <linux/kmemleak.h>
+ #include <linux/poll.h>
+ #include <linux/nmi.h>
+ #include <linux/fs.h>
+@@ -1893,6 +1894,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
+ __update_max_tr(tr, tsk, cpu);
+
+ arch_spin_unlock(&tr->max_lock);
++
++ /* Any waiters on the old snapshot buffer need to wake up */
++ ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
+ }
+
+ /**
+@@ -1944,12 +1948,23 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+
+ static int wait_on_pipe(struct trace_iterator *iter, int full)
+ {
++ int ret;
++
+ /* Iterators are static, they should be filled or empty */
+ if (trace_buffer_iter(iter, iter->cpu_file))
+ return 0;
+
+- return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
+- full);
++ ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++ /*
++ * Make sure this is still the snapshot buffer, as if a snapshot were
++ * to happen, this would now be the main buffer.
++ */
++ if (iter->snapshot)
++ iter->array_buffer = &iter->tr->max_buffer;
++#endif
++ return ret;
+ }
+
+ #ifdef CONFIG_FTRACE_STARTUP_TEST
+@@ -2297,10 +2312,14 @@ struct saved_cmdlines_buffer {
+ unsigned *map_cmdline_to_pid;
+ unsigned cmdline_num;
+ int cmdline_idx;
+- char *saved_cmdlines;
++ char saved_cmdlines[];
+ };
+ static struct saved_cmdlines_buffer *savedcmd;
+
++/* Holds the size of a cmdline and pid element */
++#define SAVED_CMDLINE_MAP_ELEMENT_SIZE(s) \
++ (TASK_COMM_LEN + sizeof((s)->map_cmdline_to_pid[0]))
++
+ static inline char *get_saved_cmdlines(int idx)
+ {
+ return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
+@@ -2311,47 +2330,54 @@ static inline void set_cmdline(int idx, const char *cmdline)
+ strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
+ }
+
+-static int allocate_cmdlines_buffer(unsigned int val,
+- struct saved_cmdlines_buffer *s)
++static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+ {
+- s->map_cmdline_to_pid = kmalloc_array(val,
+- sizeof(*s->map_cmdline_to_pid),
+- GFP_KERNEL);
+- if (!s->map_cmdline_to_pid)
+- return -ENOMEM;
++ int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
+
+- s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
+- if (!s->saved_cmdlines) {
+- kfree(s->map_cmdline_to_pid);
+- return -ENOMEM;
+- }
++ kmemleak_free(s);
++ free_pages((unsigned long)s, order);
++}
+
+- s->cmdline_idx = 0;
++static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
++{
++ struct saved_cmdlines_buffer *s;
++ struct page *page;
++ int orig_size, size;
++ int order;
++
++ /* Figure out how much is needed to hold the given number of cmdlines */
++ orig_size = sizeof(*s) + val * SAVED_CMDLINE_MAP_ELEMENT_SIZE(s);
++ order = get_order(orig_size);
++ size = 1 << (order + PAGE_SHIFT);
++ page = alloc_pages(GFP_KERNEL, order);
++ if (!page)
++ return NULL;
++
++ s = page_address(page);
++ kmemleak_alloc(s, size, 1, GFP_KERNEL);
++ memset(s, 0, sizeof(*s));
++
++ /* Round up to actual allocation */
++ val = (size - sizeof(*s)) / SAVED_CMDLINE_MAP_ELEMENT_SIZE(s);
+ s->cmdline_num = val;
++
++ /* Place map_cmdline_to_pid array right after saved_cmdlines */
++ s->map_cmdline_to_pid = (unsigned *)&s->saved_cmdlines[val * TASK_COMM_LEN];
++
++ s->cmdline_idx = 0;
+ memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
+ sizeof(s->map_pid_to_cmdline));
+ memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
+ val * sizeof(*s->map_cmdline_to_pid));
+
+- return 0;
++ return s;
+ }
+
+ static int trace_create_savedcmd(void)
+ {
+- int ret;
++ savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
+
+- savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
+- if (!savedcmd)
+- return -ENOMEM;
+-
+- ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
+- if (ret < 0) {
+- kfree(savedcmd);
+- savedcmd = NULL;
+- return -ENOMEM;
+- }
+-
+- return 0;
++ return savedcmd ? 0 : -ENOMEM;
+ }
+
+ int is_tracing_stopped(void)
+@@ -2359,13 +2385,7 @@ int is_tracing_stopped(void)
+ return global_trace.stop_count;
+ }
+
+-/**
+- * tracing_start - quick start of the tracer
+- *
+- * If tracing is enabled but was stopped by tracing_stop,
+- * this will start the tracer back up.
+- */
+-void tracing_start(void)
++static void tracing_start_tr(struct trace_array *tr)
+ {
+ struct trace_buffer *buffer;
+ unsigned long flags;
+@@ -2373,119 +2393,83 @@ void tracing_start(void)
+ if (tracing_disabled)
+ return;
+
+- raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+- if (--global_trace.stop_count) {
+- if (global_trace.stop_count < 0) {
++ raw_spin_lock_irqsave(&tr->start_lock, flags);
++ if (--tr->stop_count) {
++ if (WARN_ON_ONCE(tr->stop_count < 0)) {
+ /* Someone screwed up their debugging */
+- WARN_ON_ONCE(1);
+- global_trace.stop_count = 0;
++ tr->stop_count = 0;
+ }
+ goto out;
+ }
+
+ /* Prevent the buffers from switching */
+- arch_spin_lock(&global_trace.max_lock);
++ arch_spin_lock(&tr->max_lock);
+
+- buffer = global_trace.array_buffer.buffer;
++ buffer = tr->array_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_enable(buffer);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+- buffer = global_trace.max_buffer.buffer;
++ buffer = tr->max_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_enable(buffer);
+ #endif
+
+- arch_spin_unlock(&global_trace.max_lock);
+-
+- out:
+- raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+-}
+-
+-static void tracing_start_tr(struct trace_array *tr)
+-{
+- struct trace_buffer *buffer;
+- unsigned long flags;
+-
+- if (tracing_disabled)
+- return;
+-
+- /* If global, we need to also start the max tracer */
+- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+- return tracing_start();
+-
+- raw_spin_lock_irqsave(&tr->start_lock, flags);
+-
+- if (--tr->stop_count) {
+- if (tr->stop_count < 0) {
+- /* Someone screwed up their debugging */
+- WARN_ON_ONCE(1);
+- tr->stop_count = 0;
+- }
+- goto out;
+- }
+-
+- buffer = tr->array_buffer.buffer;
+- if (buffer)
+- ring_buffer_record_enable(buffer);
++ arch_spin_unlock(&tr->max_lock);
+
+ out:
+ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+ }
+
+ /**
+- * tracing_stop - quick stop of the tracer
++ * tracing_start - quick start of the tracer
+ *
+- * Light weight way to stop tracing. Use in conjunction with
+- * tracing_start.
++ * If tracing is enabled but was stopped by tracing_stop,
++ * this will start the tracer back up.
+ */
+-void tracing_stop(void)
++void tracing_start(void)
++
++{
++ return tracing_start_tr(&global_trace);
++}
++
++static void tracing_stop_tr(struct trace_array *tr)
+ {
+ struct trace_buffer *buffer;
+ unsigned long flags;
+
+- raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+- if (global_trace.stop_count++)
++ raw_spin_lock_irqsave(&tr->start_lock, flags);
++ if (tr->stop_count++)
+ goto out;
+
+ /* Prevent the buffers from switching */
+- arch_spin_lock(&global_trace.max_lock);
++ arch_spin_lock(&tr->max_lock);
+
+- buffer = global_trace.array_buffer.buffer;
++ buffer = tr->array_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_disable(buffer);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+- buffer = global_trace.max_buffer.buffer;
++ buffer = tr->max_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_disable(buffer);
+ #endif
+
+- arch_spin_unlock(&global_trace.max_lock);
++ arch_spin_unlock(&tr->max_lock);
+
+ out:
+- raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
++ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+ }
+
+-static void tracing_stop_tr(struct trace_array *tr)
++/**
++ * tracing_stop - quick stop of the tracer
++ *
++ * Light weight way to stop tracing. Use in conjunction with
++ * tracing_start.
++ */
++void tracing_stop(void)
+ {
+- struct trace_buffer *buffer;
+- unsigned long flags;
+-
+- /* If global, we need to also stop the max tracer */
+- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+- return tracing_stop();
+-
+- raw_spin_lock_irqsave(&tr->start_lock, flags);
+- if (tr->stop_count++)
+- goto out;
+-
+- buffer = tr->array_buffer.buffer;
+- if (buffer)
+- ring_buffer_record_disable(buffer);
+-
+- out:
+- raw_spin_unlock_irqrestore(&tr->start_lock, flags);
++ return tracing_stop_tr(&global_trace);
+ }
+
+ static int trace_save_cmdline(struct task_struct *tsk)
+@@ -2769,8 +2753,11 @@ void trace_buffered_event_enable(void)
+ for_each_tracing_cpu(cpu) {
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
+- if (!page)
+- goto failed;
++ /* This is just an optimization and can handle failures */
++ if (!page) {
++ pr_err("Failed to allocate event buffer\n");
++ break;
++ }
+
+ event = page_address(page);
+ memset(event, 0, sizeof(*event));
+@@ -2784,10 +2771,6 @@ void trace_buffered_event_enable(void)
+ WARN_ON_ONCE(1);
+ preempt_enable();
+ }
+-
+- return;
+- failed:
+- trace_buffered_event_disable();
+ }
+
+ static void enable_trace_buffered_event(void *data)
+@@ -2822,11 +2805,9 @@ void trace_buffered_event_disable(void)
+ if (--trace_buffered_event_ref)
+ return;
+
+- preempt_disable();
+ /* For each CPU, set the buffer as used. */
+- smp_call_function_many(tracing_buffer_mask,
+- disable_trace_buffered_event, NULL, 1);
+- preempt_enable();
++ on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
++ NULL, true);
+
+ /* Wait for all current users to finish */
+ synchronize_rcu();
+@@ -2835,17 +2816,19 @@ void trace_buffered_event_disable(void)
+ free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
+ per_cpu(trace_buffered_event, cpu) = NULL;
+ }
++
+ /*
+- * Make sure trace_buffered_event is NULL before clearing
+- * trace_buffered_event_cnt.
++ * Wait for all CPUs that potentially started checking if they can use
++ * their event buffer only after the previous synchronize_rcu() call and
++ * they still read a valid pointer from trace_buffered_event. It must be
++ * ensured they don't see cleared trace_buffered_event_cnt else they
++ * could wrongly decide to use the pointed-to buffer which is now freed.
+ */
+- smp_wmb();
++ synchronize_rcu();
+
+- preempt_disable();
+- /* Do the work on each cpu */
+- smp_call_function_many(tracing_buffer_mask,
+- enable_trace_buffered_event, NULL, 1);
+- preempt_enable();
++ /* For each CPU, relinquish the buffer */
++ on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
++ true);
+ }
+
+ static struct trace_buffer *temp_buffer;
+@@ -4171,6 +4154,8 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
+ break;
+ entries++;
+ ring_buffer_iter_advance(buf_iter);
++ /* This could be a big loop */
++ cond_resched();
+ }
+
+ per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
+@@ -4773,7 +4758,11 @@ static int s_show(struct seq_file *m, void *v)
+ iter->leftover = ret;
+
+ } else {
+- print_trace_line(iter);
++ ret = print_trace_line(iter);
++ if (ret == TRACE_TYPE_PARTIAL_LINE) {
++ iter->seq.full = 0;
++ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
++ }
+ ret = trace_print_seq(m, &iter->seq);
+ /*
+ * If we overflow the seq_file buffer, then it will
+@@ -4986,6 +4975,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
+ if (ret)
+ return ret;
+
++ mutex_lock(&event_mutex);
++
++ /* Fail if the file is marked for removal */
++ if (file->flags & EVENT_FILE_FL_FREED) {
++ trace_array_put(file->tr);
++ ret = -ENODEV;
++ } else {
++ event_file_get(file);
++ }
++
++ mutex_unlock(&event_mutex);
++ if (ret)
++ return ret;
++
+ filp->private_data = inode->i_private;
+
+ return 0;
+@@ -4996,10 +4999,17 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
++ event_file_put(file);
+
+ return 0;
+ }
+
++int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
++{
++ tracing_release_file_tr(inode, filp);
++ return single_release(inode, filp);
++}
++
+ static int tracing_mark_open(struct inode *inode, struct file *filp)
+ {
+ stream_open(inode, filp);
+@@ -6060,26 +6070,14 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ }
+
+-static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+-{
+- kfree(s->saved_cmdlines);
+- kfree(s->map_cmdline_to_pid);
+- kfree(s);
+-}
+-
+ static int tracing_resize_saved_cmdlines(unsigned int val)
+ {
+ struct saved_cmdlines_buffer *s, *savedcmd_temp;
+
+- s = kmalloc(sizeof(*s), GFP_KERNEL);
++ s = allocate_cmdlines_buffer(val);
+ if (!s)
+ return -ENOMEM;
+
+- if (allocate_cmdlines_buffer(val, s) < 0) {
+- kfree(s);
+- return -ENOMEM;
+- }
+-
+ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
+ savedcmd_temp = savedcmd;
+@@ -6380,13 +6378,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ if (!tr->array_buffer.buffer)
+ return 0;
+
++ /* Do not allow tracing while resizing ring buffer */
++ tracing_stop_tr(tr);
++
+ ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
+ if (ret < 0)
+- return ret;
++ goto out_start;
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+- if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
+- !tr->current_trace->use_max_tr)
++ if (!tr->allocated_snapshot)
+ goto out;
+
+ ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
+@@ -6411,7 +6411,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ WARN_ON(1);
+ tracing_disabled = 1;
+ }
+- return ret;
++ goto out_start;
+ }
+
+ update_buffer_entries(&tr->max_buffer, cpu);
+@@ -6420,7 +6420,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+
+ update_buffer_entries(&tr->array_buffer, cpu);
+-
++ out_start:
++ tracing_start_tr(tr);
+ return ret;
+ }
+
+@@ -8358,6 +8359,20 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
+ return size;
+ }
+
++static int tracing_buffers_flush(struct file *file, fl_owner_t id)
++{
++ struct ftrace_buffer_info *info = file->private_data;
++ struct trace_iterator *iter = &info->iter;
++
++ iter->wait_index++;
++ /* Make sure the waiters see the new wait_index */
++ smp_wmb();
++
++ ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
++
++ return 0;
++}
++
+ static int tracing_buffers_release(struct inode *inode, struct file *file)
+ {
+ struct ftrace_buffer_info *info = file->private_data;
+@@ -8369,12 +8384,6 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
+
+ __trace_array_put(iter->tr);
+
+- iter->wait_index++;
+- /* Make sure the waiters see the new wait_index */
+- smp_wmb();
+-
+- ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
+-
+ if (info->spare)
+ ring_buffer_free_read_page(iter->array_buffer->buffer,
+ info->spare_cpu, info->spare);
+@@ -8539,7 +8548,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+
+ wait_index = READ_ONCE(iter->wait_index);
+
+- ret = wait_on_pipe(iter, iter->tr->buffer_percent);
++ ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
+ if (ret)
+ goto out;
+
+@@ -8588,6 +8597,7 @@ static const struct file_operations tracing_buffers_fops = {
+ .read = tracing_buffers_read,
+ .poll = tracing_buffers_poll,
+ .release = tracing_buffers_release,
++ .flush = tracing_buffers_flush,
+ .splice_read = tracing_buffers_splice_read,
+ .unlocked_ioctl = tracing_buffers_ioctl,
+ .llseek = no_llseek,
+@@ -9759,7 +9769,6 @@ static __init void create_trace_instances(struct dentry *d_tracer)
+ static void
+ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
+ {
+- struct trace_event_file *file;
+ int cpu;
+
+ trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
+@@ -9792,11 +9801,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
+ trace_create_file("trace_marker", 0220, d_tracer,
+ tr, &tracing_mark_fops);
+
+- file = __find_event_file(tr, "ftrace", "print");
+- if (file && file->ef)
+- eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
+- file, &event_trigger_fops);
+- tr->trace_marker_file = file;
++ tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
+
+ trace_create_file("trace_marker_raw", 0220, d_tracer,
+ tr, &tracing_mark_raw_fops);
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 77debe53f07cf5..3db42bae73f8e0 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -381,7 +381,7 @@ struct trace_array {
+ struct dentry *dir;
+ struct dentry *options;
+ struct dentry *percpu_dir;
+- struct dentry *event_dir;
++ struct eventfs_inode *event_dir;
+ struct trace_options *topts;
+ struct list_head systems;
+ struct list_head events;
+@@ -612,6 +612,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
+ int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+ int tracing_open_file_tr(struct inode *inode, struct file *filp);
+ int tracing_release_file_tr(struct inode *inode, struct file *filp);
++int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
+ bool tracing_is_disabled(void);
+ bool tracer_tracing_is_on(struct trace_array *tr);
+ void tracer_tracing_on(struct trace_array *tr);
+@@ -1344,7 +1345,7 @@ struct trace_subsystem_dir {
+ struct list_head list;
+ struct event_subsystem *subsystem;
+ struct trace_array *tr;
+- struct eventfs_file *ef;
++ struct eventfs_inode *ei;
+ int ref_count;
+ int nr_events;
+ };
+@@ -1554,6 +1555,29 @@ static inline void *event_file_data(struct file *filp)
+ extern struct mutex event_mutex;
+ extern struct list_head ftrace_events;
+
++/*
++ * When the trace_event_file is the filp->i_private pointer,
++ * it must be taken under the event_mutex lock, and then checked
++ * if the EVENT_FILE_FL_FREED flag is set. If it is, then the
++ * data pointed to by the trace_event_file can not be trusted.
++ *
++ * Use the event_file_file() to access the trace_event_file from
++ * the filp the first time under the event_mutex and check for
++ * NULL. If it is needed to be retrieved again and the event_mutex
++ * is still held, then the event_file_data() can be used and it
++ * is guaranteed to be valid.
++ */
++static inline struct trace_event_file *event_file_file(struct file *filp)
++{
++ struct trace_event_file *file;
++
++ lockdep_assert_held(&event_mutex);
++ file = READ_ONCE(file_inode(filp)->i_private);
++ if (!file || file->flags & EVENT_FILE_FL_FREED)
++ return NULL;
++ return file;
++}
++
+ extern const struct file_operations event_trigger_fops;
+ extern const struct file_operations event_hist_fops;
+ extern const struct file_operations event_hist_debug_fops;
+@@ -1664,6 +1688,9 @@ extern void event_trigger_unregister(struct event_command *cmd_ops,
+ char *glob,
+ struct event_trigger_data *trigger_data);
+
++extern void event_file_get(struct trace_event_file *file);
++extern void event_file_put(struct trace_event_file *file);
++
+ /**
+ * struct event_trigger_ops - callbacks for trace event triggers
+ *
+diff --git a/kernel/trace/trace_btf.c b/kernel/trace/trace_btf.c
+index ca224d53bfdcd0..5bbdbcbbde3cd2 100644
+--- a/kernel/trace/trace_btf.c
++++ b/kernel/trace/trace_btf.c
+@@ -91,8 +91,8 @@ const struct btf_member *btf_find_struct_member(struct btf *btf,
+ for_each_member(i, type, member) {
+ if (!member->name_off) {
+ /* Anonymous union/struct: push it for later use */
+- type = btf_type_skip_modifiers(btf, member->type, &tid);
+- if (type && top < BTF_ANON_STACK_MAX) {
++ if (btf_type_skip_modifiers(btf, member->type, &tid) &&
++ top < BTF_ANON_STACK_MAX) {
+ anon_stack[top].tid = tid;
+ anon_stack[top++].offset =
+ cur_offset + member->offset;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index f49d6ddb634255..c68dc50c8becfb 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -984,19 +984,41 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
+ return;
+
+ if (!--dir->nr_events) {
+- eventfs_remove(dir->ef);
++ eventfs_remove_dir(dir->ei);
+ list_del(&dir->list);
+ __put_system_dir(dir);
+ }
+ }
+
++void event_file_get(struct trace_event_file *file)
++{
++ atomic_inc(&file->ref);
++}
++
++void event_file_put(struct trace_event_file *file)
++{
++ if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
++ if (file->flags & EVENT_FILE_FL_FREED)
++ kmem_cache_free(file_cachep, file);
++ return;
++ }
++
++ if (atomic_dec_and_test(&file->ref)) {
++ /* Count should only go to zero when it is freed */
++ if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
++ return;
++ kmem_cache_free(file_cachep, file);
++ }
++}
++
+ static void remove_event_file_dir(struct trace_event_file *file)
+ {
+- eventfs_remove(file->ef);
++ eventfs_remove_dir(file->ei);
+ list_del(&file->list);
+ remove_subsystem(file->system);
+ free_event_filter(file->filter);
+- kmem_cache_free(file_cachep, file);
++ file->flags |= EVENT_FILE_FL_FREED;
++ event_file_put(file);
+ }
+
+ /*
+@@ -1364,7 +1386,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+ char buf[4] = "0";
+
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
++ file = event_file_file(filp);
+ if (likely(file))
+ flags = file->flags;
+ mutex_unlock(&event_mutex);
+@@ -1406,7 +1428,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ case 1:
+ ret = -ENODEV;
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
++ file = event_file_file(filp);
+ if (likely(file))
+ ret = ftrace_event_enable_disable(file, val);
+ mutex_unlock(&event_mutex);
+@@ -1516,7 +1538,8 @@ enum {
+
+ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
+ {
+- struct trace_event_call *call = event_file_data(m->private);
++ struct trace_event_file *file = event_file_data(m->private);
++ struct trace_event_call *call = file->event_call;
+ struct list_head *common_head = &ftrace_common_fields;
+ struct list_head *head = trace_get_fields(call);
+ struct list_head *node = v;
+@@ -1548,7 +1571,8 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
+
+ static int f_show(struct seq_file *m, void *v)
+ {
+- struct trace_event_call *call = event_file_data(m->private);
++ struct trace_event_file *file = event_file_data(m->private);
++ struct trace_event_call *call = file->event_call;
+ struct ftrace_event_field *field;
+ const char *array_descriptor;
+
+@@ -1603,12 +1627,14 @@ static int f_show(struct seq_file *m, void *v)
+
+ static void *f_start(struct seq_file *m, loff_t *pos)
+ {
++ struct trace_event_file *file;
+ void *p = (void *)FORMAT_HEADER;
+ loff_t l = 0;
+
+ /* ->stop() is called even if ->start() fails */
+ mutex_lock(&event_mutex);
+- if (!event_file_data(m->private))
++ file = event_file_file(m->private);
++ if (!file)
+ return ERR_PTR(-ENODEV);
+
+ while (l < *pos && p)
+@@ -1646,6 +1672,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
+ return 0;
+ }
+
++#ifdef CONFIG_PERF_EVENTS
+ static ssize_t
+ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+ {
+@@ -1660,6 +1687,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+ }
++#endif
+
+ static ssize_t
+ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+@@ -1680,7 +1708,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+ trace_seq_init(s);
+
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
++ file = event_file_file(filp);
+ if (file)
+ print_event_filter(file, s);
+ mutex_unlock(&event_mutex);
+@@ -1710,9 +1738,13 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ return PTR_ERR(buf);
+
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
+- if (file)
+- err = apply_event_filter(file, buf);
++ file = event_file_file(filp);
++ if (file) {
++ if (file->flags & EVENT_FILE_FL_FREED)
++ err = -ENODEV;
++ else
++ err = apply_event_filter(file, buf);
++ }
+ mutex_unlock(&event_mutex);
+
+ kfree(buf);
+@@ -2104,10 +2136,12 @@ static const struct file_operations ftrace_event_format_fops = {
+ .release = seq_release,
+ };
+
++#ifdef CONFIG_PERF_EVENTS
+ static const struct file_operations ftrace_event_id_fops = {
+ .read = event_id_read,
+ .llseek = default_llseek,
+ };
++#endif
+
+ static const struct file_operations ftrace_event_filter_fops = {
+ .open = tracing_open_file_tr,
+@@ -2280,14 +2314,40 @@ create_new_subsystem(const char *name)
+ return NULL;
+ }
+
+-static struct eventfs_file *
++static int system_callback(const char *name, umode_t *mode, void **data,
++ const struct file_operations **fops)
++{
++ if (strcmp(name, "filter") == 0)
++ *fops = &ftrace_subsystem_filter_fops;
++
++ else if (strcmp(name, "enable") == 0)
++ *fops = &ftrace_system_enable_fops;
++
++ else
++ return 0;
++
++ *mode = TRACE_MODE_WRITE;
++ return 1;
++}
++
++static struct eventfs_inode *
+ event_subsystem_dir(struct trace_array *tr, const char *name,
+- struct trace_event_file *file, struct dentry *parent)
++ struct trace_event_file *file, struct eventfs_inode *parent)
+ {
+ struct event_subsystem *system, *iter;
+ struct trace_subsystem_dir *dir;
+- struct eventfs_file *ef;
+- int res;
++ struct eventfs_inode *ei;
++ int nr_entries;
++ static struct eventfs_entry system_entries[] = {
++ {
++ .name = "filter",
++ .callback = system_callback,
++ },
++ {
++ .name = "enable",
++ .callback = system_callback,
++ }
++ };
+
+ /* First see if we did not already create this dir */
+ list_for_each_entry(dir, &tr->systems, list) {
+@@ -2295,7 +2355,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
+ if (strcmp(system->name, name) == 0) {
+ dir->nr_events++;
+ file->system = dir;
+- return dir->ef;
++ return dir->ei;
+ }
+ }
+
+@@ -2319,39 +2379,29 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
+ } else
+ __get_system(system);
+
+- ef = eventfs_add_subsystem_dir(name, parent);
+- if (IS_ERR(ef)) {
++ /* ftrace only has directories no files */
++ if (strcmp(name, "ftrace") == 0)
++ nr_entries = 0;
++ else
++ nr_entries = ARRAY_SIZE(system_entries);
++
++ ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir);
++ if (IS_ERR(ei)) {
+ pr_warn("Failed to create system directory %s\n", name);
+ __put_system(system);
+ goto out_free;
+ }
+
+- dir->ef = ef;
++ dir->ei = ei;
+ dir->tr = tr;
+ dir->ref_count = 1;
+ dir->nr_events = 1;
+ dir->subsystem = system;
+ file->system = dir;
+
+- /* the ftrace system is special, do not create enable or filter files */
+- if (strcmp(name, "ftrace") != 0) {
+-
+- res = eventfs_add_file("filter", TRACE_MODE_WRITE,
+- dir->ef, dir,
+- &ftrace_subsystem_filter_fops);
+- if (res) {
+- kfree(system->filter);
+- system->filter = NULL;
+- pr_warn("Could not create tracefs '%s/filter' entry\n", name);
+- }
+-
+- eventfs_add_file("enable", TRACE_MODE_WRITE, dir->ef, dir,
+- &ftrace_system_enable_fops);
+- }
+-
+ list_add(&dir->list, &tr->systems);
+
+- return dir->ef;
++ return dir->ei;
+
+ out_free:
+ kfree(dir);
+@@ -2400,15 +2450,142 @@ event_define_fields(struct trace_event_call *call)
+ return ret;
+ }
+
++static int event_callback(const char *name, umode_t *mode, void **data,
++ const struct file_operations **fops)
++{
++ struct trace_event_file *file = *data;
++ struct trace_event_call *call = file->event_call;
++
++ if (strcmp(name, "format") == 0) {
++ *mode = TRACE_MODE_READ;
++ *fops = &ftrace_event_format_fops;
++ return 1;
++ }
++
++ /*
++ * Only event directories that can be enabled should have
++ * triggers or filters, with the exception of the "print"
++ * event that can have a "trigger" file.
++ */
++ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
++ if (call->class->reg && strcmp(name, "enable") == 0) {
++ *mode = TRACE_MODE_WRITE;
++ *fops = &ftrace_enable_fops;
++ return 1;
++ }
++
++ if (strcmp(name, "filter") == 0) {
++ *mode = TRACE_MODE_WRITE;
++ *fops = &ftrace_event_filter_fops;
++ return 1;
++ }
++ }
++
++ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
++ strcmp(trace_event_name(call), "print") == 0) {
++ if (strcmp(name, "trigger") == 0) {
++ *mode = TRACE_MODE_WRITE;
++ *fops = &event_trigger_fops;
++ return 1;
++ }
++ }
++
++#ifdef CONFIG_PERF_EVENTS
++ if (call->event.type && call->class->reg &&
++ strcmp(name, "id") == 0) {
++ *mode = TRACE_MODE_READ;
++ *data = (void *)(long)call->event.type;
++ *fops = &ftrace_event_id_fops;
++ return 1;
++ }
++#endif
++
++#ifdef CONFIG_HIST_TRIGGERS
++ if (strcmp(name, "hist") == 0) {
++ *mode = TRACE_MODE_READ;
++ *fops = &event_hist_fops;
++ return 1;
++ }
++#endif
++#ifdef CONFIG_HIST_TRIGGERS_DEBUG
++ if (strcmp(name, "hist_debug") == 0) {
++ *mode = TRACE_MODE_READ;
++ *fops = &event_hist_debug_fops;
++ return 1;
++ }
++#endif
++#ifdef CONFIG_TRACE_EVENT_INJECT
++ if (call->event.type && call->class->reg &&
++ strcmp(name, "inject") == 0) {
++ *mode = 0200;
++ *fops = &event_inject_fops;
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++/* The file is incremented on creation and freeing the enable file decrements it */
++static void event_release(const char *name, void *data)
++{
++ struct trace_event_file *file = data;
++
++ event_file_put(file);
++}
++
+ static int
+-event_create_dir(struct dentry *parent, struct trace_event_file *file)
++event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file)
+ {
+ struct trace_event_call *call = file->event_call;
+- struct eventfs_file *ef_subsystem = NULL;
+ struct trace_array *tr = file->tr;
+- struct eventfs_file *ef;
++ struct eventfs_inode *e_events;
++ struct eventfs_inode *ei;
+ const char *name;
++ int nr_entries;
+ int ret;
++ static struct eventfs_entry event_entries[] = {
++ {
++ .name = "enable",
++ .callback = event_callback,
++ .release = event_release,
++ },
++ {
++ .name = "filter",
++ .callback = event_callback,
++ },
++ {
++ .name = "trigger",
++ .callback = event_callback,
++ },
++ {
++ .name = "format",
++ .callback = event_callback,
++ },
++#ifdef CONFIG_PERF_EVENTS
++ {
++ .name = "id",
++ .callback = event_callback,
++ },
++#endif
++#ifdef CONFIG_HIST_TRIGGERS
++ {
++ .name = "hist",
++ .callback = event_callback,
++ },
++#endif
++#ifdef CONFIG_HIST_TRIGGERS_DEBUG
++ {
++ .name = "hist_debug",
++ .callback = event_callback,
++ },
++#endif
++#ifdef CONFIG_TRACE_EVENT_INJECT
++ {
++ .name = "inject",
++ .callback = event_callback,
++ },
++#endif
++ };
+
+ /*
+ * If the trace point header did not define TRACE_SYSTEM
+@@ -2418,29 +2595,20 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
+ if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0))
+ return -ENODEV;
+
+- ef_subsystem = event_subsystem_dir(tr, call->class->system, file, parent);
+- if (!ef_subsystem)
++ e_events = event_subsystem_dir(tr, call->class->system, file, parent);
++ if (!e_events)
+ return -ENOMEM;
+
++ nr_entries = ARRAY_SIZE(event_entries);
++
+ name = trace_event_name(call);
+- ef = eventfs_add_dir(name, ef_subsystem);
+- if (IS_ERR(ef)) {
++ ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file);
++ if (IS_ERR(ei)) {
+ pr_warn("Could not create tracefs '%s' directory\n", name);
+ return -1;
+ }
+
+- file->ef = ef;
+-
+- if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+- eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file,
+- &ftrace_enable_fops);
+-
+-#ifdef CONFIG_PERF_EVENTS
+- if (call->event.type && call->class->reg)
+- eventfs_add_file("id", TRACE_MODE_READ, file->ef,
+- (void *)(long)call->event.type,
+- &ftrace_event_id_fops);
+-#endif
++ file->ei = ei;
+
+ ret = event_define_fields(call);
+ if (ret < 0) {
+@@ -2448,34 +2616,8 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
+ return ret;
+ }
+
+- /*
+- * Only event directories that can be enabled should have
+- * triggers or filters.
+- */
+- if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
+- eventfs_add_file("filter", TRACE_MODE_WRITE, file->ef,
+- file, &ftrace_event_filter_fops);
+-
+- eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
+- file, &event_trigger_fops);
+- }
+-
+-#ifdef CONFIG_HIST_TRIGGERS
+- eventfs_add_file("hist", TRACE_MODE_READ, file->ef, file,
+- &event_hist_fops);
+-#endif
+-#ifdef CONFIG_HIST_TRIGGERS_DEBUG
+- eventfs_add_file("hist_debug", TRACE_MODE_READ, file->ef, file,
+- &event_hist_debug_fops);
+-#endif
+- eventfs_add_file("format", TRACE_MODE_READ, file->ef, call,
+- &ftrace_event_format_fops);
+-
+-#ifdef CONFIG_TRACE_EVENT_INJECT
+- if (call->event.type && call->class->reg)
+- eventfs_add_file("inject", 0200, file->ef, file,
+- &event_inject_fops);
+-#endif
++ /* Gets decremented on freeing of the "enable" file */
++ event_file_get(file);
+
+ return 0;
+ }
+@@ -2803,6 +2945,7 @@ trace_create_new_event(struct trace_event_call *call,
+ atomic_set(&file->tm_ref, 0);
+ INIT_LIST_HEAD(&file->triggers);
+ list_add(&file->list, &tr->events);
++ event_file_get(file);
+
+ return file;
+ }
+@@ -3621,30 +3764,65 @@ static __init int setup_trace_event(char *str)
+ }
+ __setup("trace_event=", setup_trace_event);
+
++static int events_callback(const char *name, umode_t *mode, void **data,
++ const struct file_operations **fops)
++{
++ if (strcmp(name, "enable") == 0) {
++ *mode = TRACE_MODE_WRITE;
++ *fops = &ftrace_tr_enable_fops;
++ return 1;
++ }
++
++ if (strcmp(name, "header_page") == 0)
++ *data = ring_buffer_print_page_header;
++
++ else if (strcmp(name, "header_event") == 0)
++ *data = ring_buffer_print_entry_header;
++
++ else
++ return 0;
++
++ *mode = TRACE_MODE_READ;
++ *fops = &ftrace_show_header_fops;
++ return 1;
++}
++
+ /* Expects to have event_mutex held when called */
+ static int
+ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
+ {
+- struct dentry *d_events;
++ struct eventfs_inode *e_events;
+ struct dentry *entry;
+- int error = 0;
++ int nr_entries;
++ static struct eventfs_entry events_entries[] = {
++ {
++ .name = "enable",
++ .callback = events_callback,
++ },
++ {
++ .name = "header_page",
++ .callback = events_callback,
++ },
++ {
++ .name = "header_event",
++ .callback = events_callback,
++ },
++ };
+
+ entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
+ tr, &ftrace_set_event_fops);
+ if (!entry)
+ return -ENOMEM;
+
+- d_events = eventfs_create_events_dir("events", parent);
+- if (IS_ERR(d_events)) {
++ nr_entries = ARRAY_SIZE(events_entries);
++
++ e_events = eventfs_create_events_dir("events", parent, events_entries,
++ nr_entries, tr);
++ if (IS_ERR(e_events)) {
+ pr_warn("Could not create tracefs 'events' directory\n");
+ return -ENOMEM;
+ }
+
+- error = eventfs_add_events_file("enable", TRACE_MODE_WRITE, d_events,
+- tr, &ftrace_tr_enable_fops);
+- if (error)
+- return -ENOMEM;
+-
+ /* There are not as crucial, just warn if they are not created */
+
+ trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent,
+@@ -3654,16 +3832,7 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
+ TRACE_MODE_WRITE, parent, tr,
+ &ftrace_set_event_notrace_pid_fops);
+
+- /* ring buffer internal formats */
+- eventfs_add_events_file("header_page", TRACE_MODE_READ, d_events,
+- ring_buffer_print_page_header,
+- &ftrace_show_header_fops);
+-
+- eventfs_add_events_file("header_event", TRACE_MODE_READ, d_events,
+- ring_buffer_print_entry_header,
+- &ftrace_show_header_fops);
+-
+- tr->event_dir = d_events;
++ tr->event_dir = e_events;
+
+ return 0;
+ }
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 33264e510d161f..0c611b281a5b5f 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -2349,6 +2349,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
+ struct event_filter *filter = NULL;
+ int err;
+
++ if (file->flags & EVENT_FILE_FL_FREED)
++ return -ENODEV;
++
+ if (!strcmp(strstrip(filter_string), "0")) {
+ filter_disable(file);
+ filter = event_filter(file);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index d06938ae071740..dd16faf0d1500c 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -5609,7 +5609,7 @@ static int hist_show(struct seq_file *m, void *v)
+
+ mutex_lock(&event_mutex);
+
+- event_file = event_file_data(m->private);
++ event_file = event_file_file(m->private);
+ if (unlikely(!event_file)) {
+ ret = -ENODEV;
+ goto out_unlock;
+@@ -5630,10 +5630,12 @@ static int event_hist_open(struct inode *inode, struct file *file)
+ {
+ int ret;
+
+- ret = security_locked_down(LOCKDOWN_TRACEFS);
++ ret = tracing_open_file_tr(inode, file);
+ if (ret)
+ return ret;
+
++ /* Clear private_data to avoid warning in single_open() */
++ file->private_data = NULL;
+ return single_open(file, hist_show, file);
+ }
+
+@@ -5641,7 +5643,7 @@ const struct file_operations event_hist_fops = {
+ .open = event_hist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = tracing_single_release_file_tr,
+ };
+
+ #ifdef CONFIG_HIST_TRIGGERS_DEBUG
+@@ -5886,7 +5888,7 @@ static int hist_debug_show(struct seq_file *m, void *v)
+
+ mutex_lock(&event_mutex);
+
+- event_file = event_file_data(m->private);
++ event_file = event_file_file(m->private);
+ if (unlikely(!event_file)) {
+ ret = -ENODEV;
+ goto out_unlock;
+@@ -5907,10 +5909,12 @@ static int event_hist_debug_open(struct inode *inode, struct file *file)
+ {
+ int ret;
+
+- ret = security_locked_down(LOCKDOWN_TRACEFS);
++ ret = tracing_open_file_tr(inode, file);
+ if (ret)
+ return ret;
+
++ /* Clear private_data to avoid warning in single_open() */
++ file->private_data = NULL;
+ return single_open(file, hist_debug_show, file);
+ }
+
+@@ -5918,7 +5922,7 @@ const struct file_operations event_hist_debug_fops = {
+ .open = event_hist_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = tracing_single_release_file_tr,
+ };
+ #endif
+
+diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c
+index 8650562bdaa988..a8f076809db4d5 100644
+--- a/kernel/trace/trace_events_inject.c
++++ b/kernel/trace/trace_events_inject.c
+@@ -299,7 +299,7 @@ event_inject_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ strim(buf);
+
+ mutex_lock(&event_mutex);
+- file = event_file_data(filp);
++ file = event_file_file(filp);
+ if (file) {
+ call = file->event_call;
+ size = parse_entry(buf, call, &entry);
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 14cb275a0bab01..624e0867316d02 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -441,8 +441,9 @@ static unsigned int trace_string(struct synth_trace_event *entry,
+ if (is_dynamic) {
+ union trace_synth_field *data = &entry->fields[*n_u64];
+
++ len = fetch_store_strlen((unsigned long)str_val);
+ data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
+- data->as_dynamic.len = fetch_store_strlen((unsigned long)str_val);
++ data->as_dynamic.len = len;
+
+ ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
+
+@@ -452,7 +453,7 @@ static unsigned int trace_string(struct synth_trace_event *entry,
+
+ #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)str_val < TASK_SIZE)
+- ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
++ ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
+ else
+ #endif
+ ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 46439e3bcec4d2..76abc9a45f971a 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -159,7 +159,7 @@ static void *trigger_start(struct seq_file *m, loff_t *pos)
+
+ /* ->stop() is called even if ->start() fails */
+ mutex_lock(&event_mutex);
+- event_file = event_file_data(m->private);
++ event_file = event_file_file(m->private);
+ if (unlikely(!event_file))
+ return ERR_PTR(-ENODEV);
+
+@@ -213,7 +213,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
+
+ mutex_lock(&event_mutex);
+
+- if (unlikely(!event_file_data(file))) {
++ if (unlikely(!event_file_file(file))) {
+ mutex_unlock(&event_mutex);
+ return -ENODEV;
+ }
+@@ -293,7 +293,7 @@ static ssize_t event_trigger_regex_write(struct file *file,
+ strim(buf);
+
+ mutex_lock(&event_mutex);
+- event_file = event_file_data(file);
++ event_file = event_file_file(file);
+ if (unlikely(!event_file)) {
+ mutex_unlock(&event_mutex);
+ kfree(buf);
+@@ -1470,8 +1470,10 @@ register_snapshot_trigger(char *glob,
+ struct event_trigger_data *data,
+ struct trace_event_file *file)
+ {
+- if (tracing_alloc_snapshot_instance(file->tr) != 0)
+- return 0;
++ int ret = tracing_alloc_snapshot_instance(file->tr);
++
++ if (ret < 0)
++ return ret;
+
+ return register_trigger(glob, data, file);
+ }
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index b87f41187c6a92..2461786b1e4d22 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -49,18 +49,6 @@
+ #define EVENT_STATUS_PERF BIT(1)
+ #define EVENT_STATUS_OTHER BIT(7)
+
+-/*
+- * User register flags are not allowed yet, keep them here until we are
+- * ready to expose them out to the user ABI.
+- */
+-enum user_reg_flag {
+- /* Event will not delete upon last reference closing */
+- USER_EVENT_REG_PERSIST = 1U << 0,
+-
+- /* This value or above is currently non-ABI */
+- USER_EVENT_REG_MAX = 1U << 1,
+-};
+-
+ /*
+ * Stores the system name, tables, and locks for a group of events. This
+ * allows isolation for events by various means.
+@@ -214,12 +202,25 @@ static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
+ static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
+ static void user_event_mm_put(struct user_event_mm *mm);
+ static int destroy_user_event(struct user_event *user);
++static bool user_fields_match(struct user_event *user, int argc,
++ const char **argv);
+
+ static u32 user_event_key(char *name)
+ {
+ return jhash(name, strlen(name), 0);
+ }
+
++static bool user_event_capable(u16 reg_flags)
++{
++ /* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */
++ if (reg_flags & USER_EVENT_REG_PERSIST) {
++ if (!perfmon_capable())
++ return false;
++ }
++
++ return true;
++}
++
+ static struct user_event *user_event_get(struct user_event *user)
+ {
+ refcount_inc(&user->refcnt);
+@@ -1494,17 +1495,24 @@ static int destroy_user_event(struct user_event *user)
+ }
+
+ static struct user_event *find_user_event(struct user_event_group *group,
+- char *name, u32 *outkey)
++ char *name, int argc, const char **argv,
++ u32 flags, u32 *outkey)
+ {
+ struct user_event *user;
+ u32 key = user_event_key(name);
+
+ *outkey = key;
+
+- hash_for_each_possible(group->register_table, user, node, key)
+- if (!strcmp(EVENT_NAME(user), name))
++ hash_for_each_possible(group->register_table, user, node, key) {
++ if (strcmp(EVENT_NAME(user), name))
++ continue;
++
++ if (user_fields_match(user, argc, argv))
+ return user_event_get(user);
+
++ return ERR_PTR(-EADDRINUSE);
++ }
++
+ return NULL;
+ }
+
+@@ -1811,6 +1819,9 @@ static int user_event_free(struct dyn_event *ev)
+ if (!user_event_last_ref(user))
+ return -EBUSY;
+
++ if (!user_event_capable(user->reg_flags))
++ return -EPERM;
++
+ return destroy_user_event(user);
+ }
+
+@@ -1858,6 +1869,9 @@ static bool user_fields_match(struct user_event *user, int argc,
+ struct list_head *head = &user->fields;
+ int i = 0;
+
++ if (argc == 0)
++ return list_empty(head);
++
+ list_for_each_entry_reverse(field, head, link) {
+ if (!user_field_match(field, argc, argv, &i))
+ return false;
+@@ -1878,10 +1892,8 @@ static bool user_event_match(const char *system, const char *event,
+ match = strcmp(EVENT_NAME(user), event) == 0 &&
+ (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
+
+- if (match && argc > 0)
++ if (match)
+ match = user_fields_match(user, argc, argv);
+- else if (match && argc == 0)
+- match = list_empty(&user->fields);
+
+ return match;
+ }
+@@ -1911,6 +1923,80 @@ static int user_event_trace_register(struct user_event *user)
+ return ret;
+ }
+
++/*
++ * Counts how many ';' without a trailing space are in the args.
++ */
++static int count_semis_no_space(char *args)
++{
++ int count = 0;
++
++ while ((args = strchr(args, ';'))) {
++ args++;
++
++ if (!isspace(*args))
++ count++;
++ }
++
++ return count;
++}
++
++/*
++ * Copies the arguments while ensuring all ';' have a trailing space.
++ */
++static char *insert_space_after_semis(char *args, int count)
++{
++ char *fixed, *pos;
++ int len;
++
++ len = strlen(args) + count;
++ fixed = kmalloc(len + 1, GFP_KERNEL);
++
++ if (!fixed)
++ return NULL;
++
++ pos = fixed;
++
++ /* Insert a space after ';' if there is no trailing space. */
++ while (*args) {
++ *pos = *args++;
++
++ if (*pos++ == ';' && !isspace(*args))
++ *pos++ = ' ';
++ }
++
++ *pos = '\0';
++
++ return fixed;
++}
++
++static char **user_event_argv_split(char *args, int *argc)
++{
++ char **split;
++ char *fixed;
++ int count;
++
++ /* Count how many ';' without a trailing space */
++ count = count_semis_no_space(args);
++
++ /* No fixup is required */
++ if (!count)
++ return argv_split(GFP_KERNEL, args, argc);
++
++ /* We must fixup 'field;field' to 'field; field' */
++ fixed = insert_space_after_semis(args, count);
++
++ if (!fixed)
++ return NULL;
++
++ /* We do a normal split afterwards */
++ split = argv_split(GFP_KERNEL, fixed, argc);
++
++ /* We can free since argv_split makes a copy */
++ kfree(fixed);
++
++ return split;
++}
++
+ /*
+ * Parses the event name, arguments and flags then registers if successful.
+ * The name buffer lifetime is owned by this method for success cases only.
+@@ -1920,51 +2006,47 @@ static int user_event_parse(struct user_event_group *group, char *name,
+ char *args, char *flags,
+ struct user_event **newuser, int reg_flags)
+ {
+- int ret;
+- u32 key;
+ struct user_event *user;
++ char **argv = NULL;
+ int argc = 0;
+- char **argv;
++ int ret;
++ u32 key;
+
+- /* User register flags are not ready yet */
+- if (reg_flags != 0 || flags != NULL)
++ /* Currently don't support any text based flags */
++ if (flags != NULL)
+ return -EINVAL;
+
++ if (!user_event_capable(reg_flags))
++ return -EPERM;
++
++ if (args) {
++ argv = user_event_argv_split(args, &argc);
++
++ if (!argv)
++ return -ENOMEM;
++ }
++
+ /* Prevent dyn_event from racing */
+ mutex_lock(&event_mutex);
+- user = find_user_event(group, name, &key);
++ user = find_user_event(group, name, argc, (const char **)argv,
++ reg_flags, &key);
+ mutex_unlock(&event_mutex);
+
+- if (user) {
+- if (args) {
+- argv = argv_split(GFP_KERNEL, args, &argc);
+- if (!argv) {
+- ret = -ENOMEM;
+- goto error;
+- }
++ if (argv)
++ argv_free(argv);
+
+- ret = user_fields_match(user, argc, (const char **)argv);
+- argv_free(argv);
+-
+- } else
+- ret = list_empty(&user->fields);
+-
+- if (ret) {
+- *newuser = user;
+- /*
+- * Name is allocated by caller, free it since it already exists.
+- * Caller only worries about failure cases for freeing.
+- */
+- kfree(name);
+- } else {
+- ret = -EADDRINUSE;
+- goto error;
+- }
++ if (IS_ERR(user))
++ return PTR_ERR(user);
++
++ if (user) {
++ *newuser = user;
++ /*
++ * Name is allocated by caller, free it since it already exists.
++ * Caller only worries about failure cases for freeing.
++ */
++ kfree(name);
+
+ return 0;
+-error:
+- user_event_put(user, false);
+- return ret;
+ }
+
+ user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
+@@ -2047,22 +2129,33 @@ static int user_event_parse(struct user_event_group *group, char *name,
+ }
+
+ /*
+- * Deletes a previously created event if it is no longer being used.
++ * Deletes previously created events if they are no longer being used.
+ */
+ static int delete_user_event(struct user_event_group *group, char *name)
+ {
+- u32 key;
+- struct user_event *user = find_user_event(group, name, &key);
++ struct user_event *user;
++ struct hlist_node *tmp;
++ u32 key = user_event_key(name);
++ int ret = -ENOENT;
+
+- if (!user)
+- return -ENOENT;
++ /* Attempt to delete all event(s) with the name passed in */
++ hash_for_each_possible_safe(group->register_table, user, tmp, node, key) {
++ if (strcmp(EVENT_NAME(user), name))
++ continue;
+
+- user_event_put(user, true);
++ if (!user_event_last_ref(user))
++ return -EBUSY;
+
+- if (!user_event_last_ref(user))
+- return -EBUSY;
++ if (!user_event_capable(user->reg_flags))
++ return -EPERM;
+
+- return destroy_user_event(user);
++ ret = destroy_user_event(user);
++
++ if (ret)
++ goto out;
++ }
++out:
++ return ret;
+ }
+
+ /*
+diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
+index 8bfe23af9c739a..7d2ddbcfa377cf 100644
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -927,11 +927,12 @@ static int parse_symbol_and_return(int argc, const char *argv[],
+ for (i = 2; i < argc; i++) {
+ tmp = strstr(argv[i], "$retval");
+ if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
++ if (is_tracepoint) {
++ trace_probe_log_set_index(i);
++ trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
++ return -EINVAL;
++ }
+ *is_return = true;
+- /*
+- * NOTE: Don't check is_tracepoint here, because it will
+- * be checked when the argument is parsed.
+- */
+ break;
+ }
+ }
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index b791524a6536ac..3bd6071441ade9 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -520,6 +520,8 @@ static void hwlat_hotplug_workfn(struct work_struct *dummy)
+ if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU)
+ goto out_unlock;
+
++ if (!cpu_online(cpu))
++ goto out_unlock;
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
+ goto out_unlock;
+
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index e834f149695b75..47812aa16bb574 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1020,9 +1020,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
+ /**
+ * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
++ * @kretprobe: Is this a return probe?
+ * @name: The name of the kprobe event
+ * @loc: The location of the kprobe event
+- * @kretprobe: Is this a return probe?
+ * @...: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index bd0d01d00fb9d5..3e2bc029fa8c83 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -228,6 +228,11 @@ static inline struct osnoise_variables *this_cpu_osn_var(void)
+ return this_cpu_ptr(&per_cpu_osnoise_var);
+ }
+
++/*
++ * Protect the interface.
++ */
++static struct mutex interface_lock;
++
+ #ifdef CONFIG_TIMERLAT_TRACER
+ /*
+ * Runtime information for the timer mode.
+@@ -259,14 +264,20 @@ static inline void tlat_var_reset(void)
+ {
+ struct timerlat_variables *tlat_var;
+ int cpu;
++
++ /* Synchronize with the timerlat interfaces */
++ mutex_lock(&interface_lock);
+ /*
+ * So far, all the values are initialized as 0, so
+ * zeroing the structure is perfect.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
++ if (tlat_var->kthread)
++ hrtimer_cancel(&tlat_var->timer);
+ memset(tlat_var, 0, sizeof(*tlat_var));
+ }
++ mutex_unlock(&interface_lock);
+ }
+ #else /* CONFIG_TIMERLAT_TRACER */
+ #define tlat_var_reset() do {} while (0)
+@@ -331,11 +342,6 @@ struct timerlat_sample {
+ };
+ #endif
+
+-/*
+- * Protect the interface.
+- */
+-static struct mutex interface_lock;
+-
+ /*
+ * Tracer data.
+ */
+@@ -1612,6 +1618,7 @@ static int run_osnoise(void)
+
+ static struct cpumask osnoise_cpumask;
+ static struct cpumask save_cpumask;
++static struct cpumask kthread_cpumask;
+
+ /*
+ * osnoise_sleep - sleep until the next period
+@@ -1675,6 +1682,7 @@ static inline int osnoise_migration_pending(void)
+ */
+ mutex_lock(&interface_lock);
+ this_cpu_osn_var()->kthread = NULL;
++ cpumask_clear_cpu(smp_processor_id(), &kthread_cpumask);
+ mutex_unlock(&interface_lock);
+
+ return 1;
+@@ -1945,11 +1953,12 @@ static void stop_kthread(unsigned int cpu)
+ {
+ struct task_struct *kthread;
+
+- kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
++ kthread = xchg_relaxed(&(per_cpu(per_cpu_osnoise_var, cpu).kthread), NULL);
+ if (kthread) {
+- if (test_bit(OSN_WORKLOAD, &osnoise_options)) {
++ if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) &&
++ !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) {
+ kthread_stop(kthread);
+- } else {
++ } else if (!WARN_ON(test_bit(OSN_WORKLOAD, &osnoise_options))) {
+ /*
+ * This is a user thread waiting on the timerlat_fd. We need
+ * to close all users, and the best way to guarantee this is
+@@ -1958,7 +1967,6 @@ static void stop_kthread(unsigned int cpu)
+ kill_pid(kthread->thread_pid, SIGKILL, 1);
+ put_task_struct(kthread);
+ }
+- per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+ } else {
+ /* if no workload, just return */
+ if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
+@@ -1967,7 +1975,6 @@ static void stop_kthread(unsigned int cpu)
+ */
+ per_cpu(per_cpu_osnoise_var, cpu).sampling = false;
+ barrier();
+- return;
+ }
+ }
+ }
+@@ -1999,6 +2006,10 @@ static int start_kthread(unsigned int cpu)
+ void *main = osnoise_main;
+ char comm[24];
+
++ /* Do not start a new thread if it is already running */
++ if (per_cpu(per_cpu_osnoise_var, cpu).kthread)
++ return 0;
++
+ if (timerlat_enabled()) {
+ snprintf(comm, 24, "timerlat/%d", cpu);
+ main = timerlat_main;
+@@ -2021,6 +2032,7 @@ static int start_kthread(unsigned int cpu)
+ }
+
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
++ cpumask_set_cpu(cpu, &kthread_cpumask);
+
+ return 0;
+ }
+@@ -2048,8 +2060,15 @@ static int start_per_cpu_kthreads(void)
+ */
+ cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask);
+
+- for_each_possible_cpu(cpu)
+- per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
++ for_each_possible_cpu(cpu) {
++ if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask)) {
++ struct task_struct *kthread;
++
++ kthread = xchg_relaxed(&(per_cpu(per_cpu_osnoise_var, cpu).kthread), NULL);
++ if (!WARN_ON(!kthread))
++ kthread_stop(kthread);
++ }
++ }
+
+ for_each_cpu(cpu, current_mask) {
+ retval = start_kthread(cpu);
+@@ -2078,6 +2097,8 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy)
+ mutex_lock(&interface_lock);
+ cpus_read_lock();
+
++ if (!cpu_online(cpu))
++ goto out_unlock;
+ if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
+ goto out_unlock;
+
+@@ -2444,6 +2465,9 @@ static int timerlat_fd_open(struct inode *inode, struct file *file)
+ tlat = this_cpu_tmr_var();
+ tlat->count = 0;
+
++ hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
++ tlat->timer.function = timerlat_irq;
++
+ migrate_enable();
+ return 0;
+ };
+@@ -2526,9 +2550,6 @@ timerlat_fd_read(struct file *file, char __user *ubuf, size_t count,
+ tlat->tracing_thread = false;
+ tlat->kthread = current;
+
+- hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
+- tlat->timer.function = timerlat_irq;
+-
+ /* Annotate now to drift new period */
+ tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
+
+@@ -2579,7 +2600,8 @@ static int timerlat_fd_release(struct inode *inode, struct file *file)
+ osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
+ tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
+
+- hrtimer_cancel(&tlat_var->timer);
++ if (tlat_var->kthread)
++ hrtimer_cancel(&tlat_var->timer);
+ memset(tlat_var, 0, sizeof(*tlat_var));
+
+ osn_var->sampling = 0;
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 4dc74d73fc1df5..ae162ba36a4803 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -553,6 +553,10 @@ static int parse_btf_field(char *fieldname, const struct btf_type *type,
+ anon_offs = 0;
+ field = btf_find_struct_member(ctx->btf, type, fieldname,
+ &anon_offs);
++ if (IS_ERR(field)) {
++ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
++ return PTR_ERR(field);
++ }
+ if (!field) {
+ trace_probe_log_err(ctx->offset, NO_BTF_FIELD);
+ return -ENOENT;
+@@ -1159,9 +1163,12 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
+ if (!(ctx->flags & TPARG_FL_TEVENT) &&
+ (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
+ strncmp(arg, "\\\"", 2) == 0)) {
+- /* The type of $comm must be "string", and not an array. */
+- if (parg->count || (t && strcmp(t, "string")))
++ /* The type of $comm must be "string", and not an array type. */
++ if (parg->count || (t && strcmp(t, "string"))) {
++ trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0),
++ NEED_STRING_TYPE);
+ goto out;
++ }
+ parg->type = find_fetch_type("string", ctx->flags);
+ } else
+ parg->type = find_fetch_type(t, ctx->flags);
+@@ -1169,18 +1176,6 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
+ trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0), BAD_TYPE);
+ goto out;
+ }
+- parg->offset = *size;
+- *size += parg->type->size * (parg->count ?: 1);
+-
+- ret = -ENOMEM;
+- if (parg->count) {
+- len = strlen(parg->type->fmttype) + 6;
+- parg->fmt = kmalloc(len, GFP_KERNEL);
+- if (!parg->fmt)
+- goto out;
+- snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype,
+- parg->count);
+- }
+
+ code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL);
+ if (!code)
+@@ -1204,6 +1199,19 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
+ goto fail;
+ }
+ }
++ parg->offset = *size;
++ *size += parg->type->size * (parg->count ?: 1);
++
++ if (parg->count) {
++ len = strlen(parg->type->fmttype) + 6;
++ parg->fmt = kmalloc(len, GFP_KERNEL);
++ if (!parg->fmt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype,
++ parg->count);
++ }
+
+ ret = -EINVAL;
+ /* Store operation */
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 850d9ecb6765a8..c1877d0182691c 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -515,7 +515,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ C(BAD_HYPHEN, "Failed to parse single hyphen. Forgot '>'?"), \
+ C(NO_BTF_FIELD, "This field is not found."), \
+ C(BAD_BTF_TID, "Failed to get BTF type info."),\
+- C(BAD_TYPE4STR, "This type does not fit for string."),
++ C(BAD_TYPE4STR, "This type does not fit for string."),\
++ C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),
+
+ #undef C
+ #define C(a, b) TP_ERR_##a
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index c774e560f2f957..3a56e7c8aa4f67 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -454,7 +454,7 @@ static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
+ struct tracing_map_elt *elt = NULL;
+ int idx;
+
+- idx = atomic_inc_return(&map->next_elt);
++ idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts);
+ if (idx < map->max_elts) {
+ elt = *(TRACING_MAP_ELT(map->elts, idx));
+ if (map->ops && map->ops->elt_init)
+@@ -574,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
+ }
+
+ memcpy(elt->key, key, map->key_size);
+- entry->val = elt;
++ /*
++ * Ensure the initialization is visible and
++ * publish the elt.
++ */
++ smp_wmb();
++ WRITE_ONCE(entry->val, elt);
+ atomic64_inc(&map->hits);
+
+ return entry->val;
+@@ -694,7 +699,7 @@ void tracing_map_clear(struct tracing_map *map)
+ {
+ unsigned int i;
+
+- atomic_set(&map->next_elt, -1);
++ atomic_set(&map->next_elt, 0);
+ atomic64_set(&map->hits, 0);
+ atomic64_set(&map->drops, 0);
+
+@@ -778,7 +783,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits,
+
+ map->map_bits = map_bits;
+ map->max_elts = (1 << map_bits);
+- atomic_set(&map->next_elt, -1);
++ atomic_set(&map->next_elt, 0);
+
+ map->map_size = (1 << (map_bits + 1));
+ map->ops = ops;
+diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
+index da35e5b7f04738..8800f5acc00717 100644
+--- a/kernel/vhost_task.c
++++ b/kernel/vhost_task.c
+@@ -10,38 +10,32 @@
+
+ enum vhost_task_flags {
+ VHOST_TASK_FLAGS_STOP,
++ VHOST_TASK_FLAGS_KILLED,
+ };
+
+ struct vhost_task {
+ bool (*fn)(void *data);
++ void (*handle_sigkill)(void *data);
+ void *data;
+ struct completion exited;
+ unsigned long flags;
+ struct task_struct *task;
++ /* serialize SIGKILL and vhost_task_stop calls */
++ struct mutex exit_mutex;
+ };
+
+ static int vhost_task_fn(void *data)
+ {
+ struct vhost_task *vtsk = data;
+- bool dead = false;
+
+ for (;;) {
+ bool did_work;
+
+- if (!dead && signal_pending(current)) {
++ if (signal_pending(current)) {
+ struct ksignal ksig;
+- /*
+- * Calling get_signal will block in SIGSTOP,
+- * or clear fatal_signal_pending, but remember
+- * what was set.
+- *
+- * This thread won't actually exit until all
+- * of the file descriptors are closed, and
+- * the release function is called.
+- */
+- dead = get_signal(&ksig);
+- if (dead)
+- clear_thread_flag(TIF_SIGPENDING);
++
++ if (get_signal(&ksig))
++ break;
+ }
+
+ /* mb paired w/ vhost_task_stop */
+@@ -57,7 +51,19 @@ static int vhost_task_fn(void *data)
+ schedule();
+ }
+
++ mutex_lock(&vtsk->exit_mutex);
++ /*
++ * If a vhost_task_stop and SIGKILL race, we can ignore the SIGKILL.
++ * When the vhost layer has called vhost_task_stop it's already stopped
++ * new work and flushed.
++ */
++ if (!test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) {
++ set_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags);
++ vtsk->handle_sigkill(vtsk->data);
++ }
++ mutex_unlock(&vtsk->exit_mutex);
+ complete(&vtsk->exited);
++
+ do_exit(0);
+ }
+
+@@ -78,12 +84,17 @@ EXPORT_SYMBOL_GPL(vhost_task_wake);
+ * @vtsk: vhost_task to stop
+ *
+ * vhost_task_fn ensures the worker thread exits after
+- * VHOST_TASK_FLAGS_SOP becomes true.
++ * VHOST_TASK_FLAGS_STOP becomes true.
+ */
+ void vhost_task_stop(struct vhost_task *vtsk)
+ {
+- set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
+- vhost_task_wake(vtsk);
++ mutex_lock(&vtsk->exit_mutex);
++ if (!test_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags)) {
++ set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
++ vhost_task_wake(vtsk);
++ }
++ mutex_unlock(&vtsk->exit_mutex);
++
+ /*
+ * Make sure vhost_task_fn is no longer accessing the vhost_task before
+ * freeing it below.
+@@ -96,14 +107,16 @@ EXPORT_SYMBOL_GPL(vhost_task_stop);
+ /**
+ * vhost_task_create - create a copy of a task to be used by the kernel
+ * @fn: vhost worker function
+- * @arg: data to be passed to fn
++ * @handle_sigkill: vhost function to handle when we are killed
++ * @arg: data to be passed to fn and handled_kill
+ * @name: the thread's name
+ *
+ * This returns a specialized task for use by the vhost layer or NULL on
+ * failure. The returned task is inactive, and the caller must fire it up
+ * through vhost_task_start().
+ */
+-struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
++struct vhost_task *vhost_task_create(bool (*fn)(void *),
++ void (*handle_sigkill)(void *), void *arg,
+ const char *name)
+ {
+ struct kernel_clone_args args = {
+@@ -122,8 +135,10 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
+ if (!vtsk)
+ return NULL;
+ init_completion(&vtsk->exited);
++ mutex_init(&vtsk->exit_mutex);
+ vtsk->data = arg;
+ vtsk->fn = fn;
++ vtsk->handle_sigkill = handle_sigkill;
+
+ args.fn_arg = vtsk;
+
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index d0b6b390ee4233..778b4056700ff5 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -331,7 +331,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
+ filter.__reserved != 0)
+ return -EINVAL;
+
+- tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
++ tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf));
+ if (IS_ERR(tf))
+ return PTR_ERR(tf);
+
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index d145305d95fe81..5cd6d4e2691579 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -283,6 +283,13 @@ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
+ static DEFINE_PER_CPU(bool, softlockup_touch_sync);
+ static unsigned long soft_lockup_nmi_warn;
+
++static int __init softlockup_panic_setup(char *str)
++{
++ softlockup_panic = simple_strtoul(str, NULL, 0);
++ return 1;
++}
++__setup("softlockup_panic=", softlockup_panic_setup);
++
+ static int __init nowatchdog_setup(char *str)
+ {
+ watchdog_user_enabled = 0;
+diff --git a/kernel/watchdog_perf.c b/kernel/watchdog_perf.c
+index 8ea00c4a24b2d9..0052afe18b7fc5 100644
+--- a/kernel/watchdog_perf.c
++++ b/kernel/watchdog_perf.c
+@@ -75,11 +75,15 @@ static bool watchdog_check_timestamp(void)
+ __this_cpu_write(last_timestamp, now);
+ return true;
+ }
+-#else
+-static inline bool watchdog_check_timestamp(void)
++
++static void watchdog_init_timestamp(void)
+ {
+- return true;
++ __this_cpu_write(nmi_rearmed, 0);
++ __this_cpu_write(last_timestamp, ktime_get_mono_fast_ns());
+ }
++#else
++static inline bool watchdog_check_timestamp(void) { return true; }
++static inline void watchdog_init_timestamp(void) { }
+ #endif
+
+ static struct perf_event_attr wd_hw_attr = {
+@@ -147,6 +151,7 @@ void watchdog_hardlockup_enable(unsigned int cpu)
+ if (!atomic_fetch_inc(&watchdog_cpus))
+ pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
+
++ watchdog_init_timestamp();
+ perf_event_enable(this_cpu_read(watchdog_ev));
+ }
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index a3522b70218d3a..7fa1c7c9151aef 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1135,8 +1135,12 @@ static bool kick_pool(struct worker_pool *pool)
+ !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
+ struct work_struct *work = list_first_entry(&pool->worklist,
+ struct work_struct, entry);
+- p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
+- get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
++ int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
++ cpu_online_mask);
++ if (wake_cpu < nr_cpu_ids) {
++ p->wake_cpu = wake_cpu;
++ get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
++ }
+ }
+ #endif
+ wake_up_process(p);
+@@ -1684,9 +1688,6 @@ static int wq_select_unbound_cpu(int cpu)
+ pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
+ }
+
+- if (cpumask_empty(wq_unbound_cpumask))
+- return cpu;
+-
+ new_cpu = __this_cpu_read(wq_rr_cpu_last);
+ new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
+ if (unlikely(new_cpu >= nr_cpu_ids)) {
+@@ -5622,50 +5623,54 @@ static void work_for_cpu_fn(struct work_struct *work)
+ }
+
+ /**
+- * work_on_cpu - run a function in thread context on a particular cpu
++ * work_on_cpu_key - run a function in thread context on a particular cpu
+ * @cpu: the cpu to run on
+ * @fn: the function to run
+ * @arg: the function arg
++ * @key: The lock class key for lock debugging purposes
+ *
+ * It is up to the caller to ensure that the cpu doesn't go offline.
+ * The caller must not hold any locks which would prevent @fn from completing.
+ *
+ * Return: The value @fn returns.
+ */
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key)
+ {
+ struct work_for_cpu wfc = { .fn = fn, .arg = arg };
+
+- INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
++ INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
+ schedule_work_on(cpu, &wfc.work);
+ flush_work(&wfc.work);
+ destroy_work_on_stack(&wfc.work);
+ return wfc.ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu);
++EXPORT_SYMBOL_GPL(work_on_cpu_key);
+
+ /**
+- * work_on_cpu_safe - run a function in thread context on a particular cpu
++ * work_on_cpu_safe_key - run a function in thread context on a particular cpu
+ * @cpu: the cpu to run on
+ * @fn: the function to run
+ * @arg: the function argument
++ * @key: The lock class key for lock debugging purposes
+ *
+ * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
+ * any locks which would prevent @fn from completing.
+ *
+ * Return: The value @fn returns.
+ */
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key)
+ {
+ long ret = -ENODEV;
+
+ cpus_read_lock();
+ if (cpu_online(cpu))
+- ret = work_on_cpu(cpu, fn, arg);
++ ret = work_on_cpu_key(cpu, fn, arg, key);
+ cpus_read_unlock();
+ return ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu_safe);
++EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_FREEZER
+@@ -5792,13 +5797,9 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
+ list_for_each_entry(wq, &workqueues, list) {
+ if (!(wq->flags & WQ_UNBOUND))
+ continue;
+-
+ /* creating multiple pwqs breaks ordering guarantee */
+- if (!list_empty(&wq->pwqs)) {
+- if (wq->flags & __WQ_ORDERED_EXPLICIT)
+- continue;
+- wq->flags &= ~__WQ_ORDERED;
+- }
++ if (wq->flags & __WQ_ORDERED)
++ continue;
+
+ ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
+ if (IS_ERR(ctx)) {
+@@ -6455,10 +6456,18 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
+
+ notrace void wq_watchdog_touch(int cpu)
+ {
++ unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
++ unsigned long touch_ts = READ_ONCE(wq_watchdog_touched);
++ unsigned long now = jiffies;
++
+ if (cpu >= 0)
+- per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
++ per_cpu(wq_watchdog_touched_cpu, cpu) = now;
++ else
++ WARN_ONCE(1, "%s should be called with valid CPU", __func__);
+
+- wq_watchdog_touched = jiffies;
++ /* Don't unnecessarily store to global cacheline */
++ if (time_after(now, touch_ts + thresh / 4))
++ WRITE_ONCE(wq_watchdog_touched, jiffies);
+ }
+
+ static void wq_watchdog_set_thresh(unsigned long thresh)
+@@ -6511,6 +6520,17 @@ static inline void wq_watchdog_init(void) { }
+
+ #endif /* CONFIG_WQ_WATCHDOG */
+
++static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
++{
++ if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
++ pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
++ cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
++ return;
++ }
++
++ cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
++}
++
+ /**
+ * workqueue_init_early - early init for workqueue subsystem
+ *
+@@ -6530,11 +6550,11 @@ void __init workqueue_init_early(void)
+ BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
+
+ BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
+- cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
+- cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
+-
++ cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
++ restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
++ restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
+ if (!cpumask_empty(&wq_cmdline_cpumask))
+- cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask);
++ restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
+
+ pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
+
+diff --git a/lib/Kconfig b/lib/Kconfig
+index c686f4adc1246a..ee365b7402f193 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -539,13 +539,7 @@ config CPUMASK_OFFSTACK
+ stack overflow.
+
+ config FORCE_NR_CPUS
+- bool "Set number of CPUs at compile time"
+- depends on SMP && EXPERT && !COMPILE_TEST
+- help
+- Say Yes if you have NR_CPUS set to an actual number of possible
+- CPUs in your system, not to a default value. This forces the core
+- code to rely on compile-time value and optimize kernel routines
+- better.
++ def_bool !SMP
+
+ config CPU_RMAP
+ bool
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index fa307f93fa2e20..da5513cfc12588 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -373,7 +373,7 @@ config DEBUG_INFO_SPLIT
+ Incompatible with older versions of ccache.
+
+ config DEBUG_INFO_BTF
+- bool "Generate BTF typeinfo"
++ bool "Generate BTF type information"
+ depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
+ depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+ depends on BPF_SYSCALL
+@@ -404,7 +404,8 @@ config PAHOLE_HAS_LANG_EXCLUDE
+ using DEBUG_INFO_BTF_MODULES.
+
+ config DEBUG_INFO_BTF_MODULES
+- def_bool y
++ bool "Generate BTF type information for kernel modules"
++ default y
+ depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
+ help
+ Generate compact split BTF type information for kernel modules.
+@@ -1730,21 +1731,6 @@ config DEBUG_MAPLE_TREE
+
+ endmenu
+
+-config DEBUG_CREDENTIALS
+- bool "Debug credential management"
+- depends on DEBUG_KERNEL
+- help
+- Enable this to turn on some debug checking for credential
+- management. The additional code keeps track of the number of
+- pointers from task_structs to any given cred struct, and checks to
+- see that this number never exceeds the usage count of the cred
+- struct.
+-
+- Furthermore, if SELinux is enabled, this also checks that the
+- security pointer in the cred struct is never seen to be invalid.
+-
+- If unsure, say N.
+-
+ source "kernel/rcu/Kconfig.debug"
+
+ config DEBUG_WQ_FORCE_RR_CPU
+@@ -2240,6 +2226,7 @@ config TEST_DIV64
+ config TEST_IOV_ITER
+ tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
+ depends on KUNIT
++ depends on MMU
+ default KUNIT_ALL_TESTS
+ help
+ Enable this to turn on testing of the operation of the I/O iterator
+diff --git a/lib/bootconfig.c b/lib/bootconfig.c
+index c59d26068a6401..97f8911ea339e6 100644
+--- a/lib/bootconfig.c
++++ b/lib/bootconfig.c
+@@ -61,9 +61,12 @@ static inline void * __init xbc_alloc_mem(size_t size)
+ return memblock_alloc(size, SMP_CACHE_BYTES);
+ }
+
+-static inline void __init xbc_free_mem(void *addr, size_t size)
++static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
+ {
+- memblock_free(addr, size);
++ if (early)
++ memblock_free(addr, size);
++ else if (addr)
++ memblock_free_late(__pa(addr), size);
+ }
+
+ #else /* !__KERNEL__ */
+@@ -73,7 +76,7 @@ static inline void *xbc_alloc_mem(size_t size)
+ return malloc(size);
+ }
+
+-static inline void xbc_free_mem(void *addr, size_t size)
++static inline void xbc_free_mem(void *addr, size_t size, bool early)
+ {
+ free(addr);
+ }
+@@ -898,19 +901,20 @@ static int __init xbc_parse_tree(void)
+ }
+
+ /**
+- * xbc_exit() - Clean up all parsed bootconfig
++ * _xbc_exit() - Clean up all parsed bootconfig
++ * @early: Set true if this is called before budy system is initialized.
+ *
+ * This clears all data structures of parsed bootconfig on memory.
+ * If you need to reuse xbc_init() with new boot config, you can
+ * use this.
+ */
+-void __init xbc_exit(void)
++void __init _xbc_exit(bool early)
+ {
+- xbc_free_mem(xbc_data, xbc_data_size);
++ xbc_free_mem(xbc_data, xbc_data_size, early);
+ xbc_data = NULL;
+ xbc_data_size = 0;
+ xbc_node_num = 0;
+- xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
++ xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX, early);
+ xbc_nodes = NULL;
+ brace_index = 0;
+ }
+@@ -963,7 +967,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
+ if (!xbc_nodes) {
+ if (emsg)
+ *emsg = "Failed to allocate bootconfig nodes";
+- xbc_exit();
++ _xbc_exit(true);
+ return -ENOMEM;
+ }
+ memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
+@@ -977,7 +981,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
+ *epos = xbc_err_pos;
+ if (emsg)
+ *emsg = xbc_err_msg;
+- xbc_exit();
++ _xbc_exit(true);
+ } else
+ ret = xbc_node_num;
+
+diff --git a/lib/build_OID_registry b/lib/build_OID_registry
+index d7fc32ea8ac224..8267e8d71338b0 100755
+--- a/lib/build_OID_registry
++++ b/lib/build_OID_registry
+@@ -8,6 +8,7 @@
+ #
+
+ use strict;
++use Cwd qw(abs_path);
+
+ my @names = ();
+ my @oids = ();
+@@ -17,6 +18,8 @@ if ($#ARGV != 1) {
+ exit(2);
+ }
+
++my $abs_srctree = abs_path($ENV{'srctree'});
++
+ #
+ # Open the file to read from
+ #
+@@ -35,7 +38,9 @@ close IN_FILE || die;
+ #
+ open C_FILE, ">$ARGV[1]" or die;
+ print C_FILE "/*\n";
+-print C_FILE " * Automatically generated by ", $0, ". Do not edit\n";
++my $scriptname = $0;
++$scriptname =~ s#^\Q$abs_srctree/\E##;
++print C_FILE " * Automatically generated by ", $scriptname, ". Do not edit\n";
+ print C_FILE " */\n";
+
+ #
+diff --git a/lib/buildid.c b/lib/buildid.c
+index e3a7acdeef0ed4..d3bc3d0528d5c8 100644
+--- a/lib/buildid.c
++++ b/lib/buildid.c
+@@ -18,31 +18,37 @@ static int parse_build_id_buf(unsigned char *build_id,
+ const void *note_start,
+ Elf32_Word note_size)
+ {
+- Elf32_Word note_offs = 0, new_offs;
+-
+- while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
+- Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
++ const char note_name[] = "GNU";
++ const size_t note_name_sz = sizeof(note_name);
++ u64 note_off = 0, new_off, name_sz, desc_sz;
++ const char *data;
++
++ while (note_off + sizeof(Elf32_Nhdr) < note_size &&
++ note_off + sizeof(Elf32_Nhdr) > note_off /* overflow */) {
++ Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_off);
++
++ name_sz = READ_ONCE(nhdr->n_namesz);
++ desc_sz = READ_ONCE(nhdr->n_descsz);
++
++ new_off = note_off + sizeof(Elf32_Nhdr);
++ if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) ||
++ check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) ||
++ new_off > note_size)
++ break;
+
+ if (nhdr->n_type == BUILD_ID &&
+- nhdr->n_namesz == sizeof("GNU") &&
+- !strcmp((char *)(nhdr + 1), "GNU") &&
+- nhdr->n_descsz > 0 &&
+- nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
+- memcpy(build_id,
+- note_start + note_offs +
+- ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
+- nhdr->n_descsz);
+- memset(build_id + nhdr->n_descsz, 0,
+- BUILD_ID_SIZE_MAX - nhdr->n_descsz);
++ name_sz == note_name_sz &&
++ memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
++ desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
++ data = note_start + note_off + ALIGN(note_name_sz, 4);
++ memcpy(build_id, data, desc_sz);
++ memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz);
+ if (size)
+- *size = nhdr->n_descsz;
++ *size = desc_sz;
+ return 0;
+ }
+- new_offs = note_offs + sizeof(Elf32_Nhdr) +
+- ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
+- if (new_offs <= note_offs) /* overflow */
+- break;
+- note_offs = new_offs;
++
++ note_off = new_off;
+ }
+
+ return -EINVAL;
+@@ -71,20 +77,28 @@ static int get_build_id_32(const void *page_addr, unsigned char *build_id,
+ {
+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
+ Elf32_Phdr *phdr;
+- int i;
++ __u32 i, phnum;
++
++ /*
++ * FIXME
++ * Neither ELF spec nor ELF loader require that program headers
++ * start immediately after ELF header.
++ */
++ if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
++ return -EINVAL;
+
++ phnum = READ_ONCE(ehdr->e_phnum);
+ /* only supports phdr that fits in one page */
+- if (ehdr->e_phnum >
+- (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
++ if (phnum > (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+
+- for (i = 0; i < ehdr->e_phnum; ++i) {
++ for (i = 0; i < phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+- page_addr + phdr[i].p_offset,
+- phdr[i].p_filesz))
++ page_addr + READ_ONCE(phdr[i].p_offset),
++ READ_ONCE(phdr[i].p_filesz)))
+ return 0;
+ }
+ return -EINVAL;
+@@ -96,20 +110,28 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id,
+ {
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
+ Elf64_Phdr *phdr;
+- int i;
++ __u32 i, phnum;
++
++ /*
++ * FIXME
++ * Neither ELF spec nor ELF loader require that program headers
++ * start immediately after ELF header.
++ */
++ if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
++ return -EINVAL;
+
++ phnum = READ_ONCE(ehdr->e_phnum);
+ /* only supports phdr that fits in one page */
+- if (ehdr->e_phnum >
+- (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
++ if (phnum > (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+
+- for (i = 0; i < ehdr->e_phnum; ++i) {
++ for (i = 0; i < phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+- page_addr + phdr[i].p_offset,
+- phdr[i].p_filesz))
++ page_addr + READ_ONCE(phdr[i].p_offset),
++ READ_ONCE(phdr[i].p_filesz)))
+ return 0;
+ }
+ return -EINVAL;
+@@ -138,6 +160,10 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ page = find_get_page(vma->vm_file->f_mapping, 0);
+ if (!page)
+ return -EFAULT; /* page not mapped */
++ if (!PageUptodate(page)) {
++ put_page(page);
++ return -EFAULT;
++ }
+
+ ret = -EINVAL;
+ page_addr = kmap_atomic(page);
+diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
+index d4572dbc914539..705b82736be089 100644
+--- a/lib/cmdline_kunit.c
++++ b/lib/cmdline_kunit.c
+@@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in,
+ n, e[0], r[0]);
+
+ p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
+- KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
++ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r);
+ }
+
+ static void cmdline_test_range(struct kunit *test)
+diff --git a/lib/cpumask.c b/lib/cpumask.c
+index a7fd02b5ae264c..34335c1e72653d 100644
+--- a/lib/cpumask.c
++++ b/lib/cpumask.c
+@@ -146,9 +146,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
+ /* Wrap: we always want a cpu. */
+ i %= num_online_cpus();
+
+- cpu = (node == NUMA_NO_NODE) ?
+- cpumask_nth(i, cpu_online_mask) :
+- sched_numa_find_nth_cpu(cpu_online_mask, i, node);
++ cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
+
+ WARN_ON(cpu >= nr_cpu_ids);
+ return cpu;
+diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
+index 40f5908e57a4f0..e16dca1e23d520 100644
+--- a/lib/crypto/mpi/ec.c
++++ b/lib/crypto/mpi/ec.c
+@@ -584,6 +584,9 @@ void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ ctx->a = mpi_copy(a);
+ ctx->b = mpi_copy(b);
+
++ ctx->d = NULL;
++ ctx->t.two_inv_p = NULL;
++
+ ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
+
+ mpi_ec_get_reset(ctx);
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index a517256a270b71..9d401355d560d2 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -141,13 +141,14 @@ static void fill_pool(void)
+ * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
+ * sections.
+ */
+- while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
++ while (READ_ONCE(obj_nr_tofree) &&
++ READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ /*
+ * Recheck with the lock held as the worker thread might have
+ * won the race and freed the global free list already.
+ */
+- while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
++ while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
+ obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
+@@ -620,9 +621,8 @@ static void debug_objects_fill_pool(void)
+ static void
+ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
+ {
+- enum debug_obj_state state;
++ struct debug_obj *obj, o;
+ struct debug_bucket *db;
+- struct debug_obj *obj;
+ unsigned long flags;
+
+ debug_objects_fill_pool();
+@@ -643,24 +643,18 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_INIT;
+- break;
+-
+- case ODEBUG_STATE_ACTIVE:
+- state = obj->state;
+- raw_spin_unlock_irqrestore(&db->lock, flags);
+- debug_print_object(obj, "init");
+- debug_object_fixup(descr->fixup_init, addr, state);
+- return;
+-
+- case ODEBUG_STATE_DESTROYED:
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+- debug_print_object(obj, "init");
+ return;
+ default:
+ break;
+ }
+
++ o = *obj;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
++ debug_print_object(&o, "init");
++
++ if (o.state == ODEBUG_STATE_ACTIVE)
++ debug_object_fixup(descr->fixup_init, addr, o.state);
+ }
+
+ /**
+@@ -701,11 +695,9 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
+ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+ {
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+- enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+- int ret;
+
+ if (!debug_objects_enabled)
+ return 0;
+@@ -717,49 +709,38 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+- if (likely(!IS_ERR_OR_NULL(obj))) {
+- bool print_object = false;
+-
++ if (unlikely(!obj)) {
++ raw_spin_unlock_irqrestore(&db->lock, flags);
++ debug_objects_oom();
++ return 0;
++ } else if (likely(!IS_ERR(obj))) {
+ switch (obj->state) {
+- case ODEBUG_STATE_INIT:
+- case ODEBUG_STATE_INACTIVE:
+- obj->state = ODEBUG_STATE_ACTIVE;
+- ret = 0;
+- break;
+-
+ case ODEBUG_STATE_ACTIVE:
+- state = obj->state;
+- raw_spin_unlock_irqrestore(&db->lock, flags);
+- debug_print_object(obj, "activate");
+- ret = debug_object_fixup(descr->fixup_activate, addr, state);
+- return ret ? 0 : -EINVAL;
+-
+ case ODEBUG_STATE_DESTROYED:
+- print_object = true;
+- ret = -EINVAL;
++ o = *obj;
+ break;
++ case ODEBUG_STATE_INIT:
++ case ODEBUG_STATE_INACTIVE:
++ obj->state = ODEBUG_STATE_ACTIVE;
++ fallthrough;
+ default:
+- ret = 0;
+- break;
++ raw_spin_unlock_irqrestore(&db->lock, flags);
++ return 0;
+ }
+- raw_spin_unlock_irqrestore(&db->lock, flags);
+- if (print_object)
+- debug_print_object(obj, "activate");
+- return ret;
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
++ debug_print_object(&o, "activate");
+
+- /* If NULL the allocation has hit OOM */
+- if (!obj) {
+- debug_objects_oom();
+- return 0;
++ switch (o.state) {
++ case ODEBUG_STATE_ACTIVE:
++ case ODEBUG_STATE_NOTAVAILABLE:
++ if (debug_object_fixup(descr->fixup_activate, addr, o.state))
++ return 0;
++ fallthrough;
++ default:
++ return -EINVAL;
+ }
+-
+- /* Object is neither static nor tracked. It's not initialized */
+- debug_print_object(&o, "activate");
+- ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
+- return ret ? 0 : -EINVAL;
+ }
+ EXPORT_SYMBOL_GPL(debug_object_activate);
+
+@@ -770,10 +751,10 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
+ */
+ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
+ {
++ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+- bool print_object = false;
+
+ if (!debug_objects_enabled)
+ return;
+@@ -785,33 +766,24 @@ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
+ obj = lookup_object(addr, db);
+ if (obj) {
+ switch (obj->state) {
++ case ODEBUG_STATE_DESTROYED:
++ break;
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ case ODEBUG_STATE_ACTIVE:
+- if (!obj->astate)
+- obj->state = ODEBUG_STATE_INACTIVE;
+- else
+- print_object = true;
+- break;
+-
+- case ODEBUG_STATE_DESTROYED:
+- print_object = true;
+- break;
++ if (obj->astate)
++ break;
++ obj->state = ODEBUG_STATE_INACTIVE;
++ fallthrough;
+ default:
+- break;
++ raw_spin_unlock_irqrestore(&db->lock, flags);
++ return;
+ }
++ o = *obj;
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+- if (!obj) {
+- struct debug_obj o = { .object = addr,
+- .state = ODEBUG_STATE_NOTAVAILABLE,
+- .descr = descr };
+-
+- debug_print_object(&o, "deactivate");
+- } else if (print_object) {
+- debug_print_object(obj, "deactivate");
+- }
++ debug_print_object(&o, "deactivate");
+ }
+ EXPORT_SYMBOL_GPL(debug_object_deactivate);
+
+@@ -822,11 +794,9 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
+ */
+ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
+ {
+- enum debug_obj_state state;
++ struct debug_obj *obj, o;
+ struct debug_bucket *db;
+- struct debug_obj *obj;
+ unsigned long flags;
+- bool print_object = false;
+
+ if (!debug_objects_enabled)
+ return;
+@@ -836,32 +806,31 @@ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+- if (!obj)
+- goto out_unlock;
++ if (!obj) {
++ raw_spin_unlock_irqrestore(&db->lock, flags);
++ return;
++ }
+
+ switch (obj->state) {
++ case ODEBUG_STATE_ACTIVE:
++ case ODEBUG_STATE_DESTROYED:
++ break;
+ case ODEBUG_STATE_NONE:
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_DESTROYED;
+- break;
+- case ODEBUG_STATE_ACTIVE:
+- state = obj->state;
++ fallthrough;
++ default:
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+- debug_print_object(obj, "destroy");
+- debug_object_fixup(descr->fixup_destroy, addr, state);
+ return;
+-
+- case ODEBUG_STATE_DESTROYED:
+- print_object = true;
+- break;
+- default:
+- break;
+ }
+-out_unlock:
++
++ o = *obj;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+- if (print_object)
+- debug_print_object(obj, "destroy");
++ debug_print_object(&o, "destroy");
++
++ if (o.state == ODEBUG_STATE_ACTIVE)
++ debug_object_fixup(descr->fixup_destroy, addr, o.state);
+ }
+ EXPORT_SYMBOL_GPL(debug_object_destroy);
+
+@@ -872,9 +841,8 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
+ */
+ void debug_object_free(void *addr, const struct debug_obj_descr *descr)
+ {
+- enum debug_obj_state state;
++ struct debug_obj *obj, o;
+ struct debug_bucket *db;
+- struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+@@ -885,24 +853,26 @@ void debug_object_free(void *addr, const struct debug_obj_descr *descr)
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+- if (!obj)
+- goto out_unlock;
++ if (!obj) {
++ raw_spin_unlock_irqrestore(&db->lock, flags);
++ return;
++ }
+
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+- state = obj->state;
+- raw_spin_unlock_irqrestore(&db->lock, flags);
+- debug_print_object(obj, "free");
+- debug_object_fixup(descr->fixup_free, addr, state);
+- return;
++ break;
+ default:
+ hlist_del(&obj->node);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ free_object(obj);
+ return;
+ }
+-out_unlock:
++
++ o = *obj;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
++ debug_print_object(&o, "free");
++
++ debug_object_fixup(descr->fixup_free, addr, o.state);
+ }
+ EXPORT_SYMBOL_GPL(debug_object_free);
+
+@@ -954,10 +924,10 @@ void
+ debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
+ unsigned int expect, unsigned int next)
+ {
++ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+- bool print_object = false;
+
+ if (!debug_objects_enabled)
+ return;
+@@ -970,28 +940,19 @@ debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
+ if (obj) {
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+- if (obj->astate == expect)
+- obj->astate = next;
+- else
+- print_object = true;
+- break;
+-
++ if (obj->astate != expect)
++ break;
++ obj->astate = next;
++ raw_spin_unlock_irqrestore(&db->lock, flags);
++ return;
+ default:
+- print_object = true;
+ break;
+ }
++ o = *obj;
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+- if (!obj) {
+- struct debug_obj o = { .object = addr,
+- .state = ODEBUG_STATE_NOTAVAILABLE,
+- .descr = descr };
+-
+- debug_print_object(&o, "active_state");
+- } else if (print_object) {
+- debug_print_object(obj, "active_state");
+- }
++ debug_print_object(&o, "active_state");
+ }
+ EXPORT_SYMBOL_GPL(debug_object_active_state);
+
+@@ -999,12 +960,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
+ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
+ {
+ unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
+- const struct debug_obj_descr *descr;
+- enum debug_obj_state state;
++ int cnt, objs_checked = 0;
++ struct debug_obj *obj, o;
+ struct debug_bucket *db;
+ struct hlist_node *tmp;
+- struct debug_obj *obj;
+- int cnt, objs_checked = 0;
+
+ saddr = (unsigned long) address;
+ eaddr = saddr + size;
+@@ -1026,12 +985,10 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
+
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+- descr = obj->descr;
+- state = obj->state;
++ o = *obj;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+- debug_print_object(obj, "free");
+- debug_object_fixup(descr->fixup_free,
+- (void *) oaddr, state);
++ debug_print_object(&o, "free");
++ debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
+ goto repeat;
+ default:
+ hlist_del(&obj->node);
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 3518e7394eca8e..ca736166f10009 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd)
+ RUNB) */
+ symCount = symTotal+2;
+ for (j = 0; j < groupCount; j++) {
+- unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
++ unsigned char length[MAX_SYMBOLS];
++ unsigned short temp[MAX_HUFCODE_BITS+1];
+ int minLen, maxLen, pp;
+ /* Read Huffman code lengths for each symbol. They're
+ stored in a way similar to mtf; record a starting
+diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
+index 6fba6423cc10b5..a5a687e1c91926 100644
+--- a/lib/dynamic_debug.c
++++ b/lib/dynamic_debug.c
+@@ -302,7 +302,11 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
+ } else {
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+- BUG_ON(end == buf);
++ if (end == buf) {
++ pr_err("parse err after word:%d=%s\n", nwords,
++ nwords ? words[nwords - 1] : "<none>");
++ return -EINVAL;
++ }
+ }
+
+ /* `buf' is start of word, `end' is one past its end */
+diff --git a/lib/errname.c b/lib/errname.c
+index 67739b174a8cc4..0c336b0f12f604 100644
+--- a/lib/errname.c
++++ b/lib/errname.c
+@@ -111,9 +111,6 @@ static const char *names_0[] = {
+ E(ENOSPC),
+ E(ENOSR),
+ E(ENOSTR),
+-#ifdef ENOSYM
+- E(ENOSYM),
+-#endif
+ E(ENOSYS),
+ E(ENOTBLK),
+ E(ENOTCONN),
+@@ -144,9 +141,6 @@ static const char *names_0[] = {
+ #endif
+ E(EREMOTE),
+ E(EREMOTEIO),
+-#ifdef EREMOTERELEASE
+- E(EREMOTERELEASE),
+-#endif
+ E(ERESTART),
+ E(ERFKILL),
+ E(EROFS),
+diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
+index c8c33cbaae9ec9..24f8d6fda2b3bb 100644
+--- a/lib/fortify_kunit.c
++++ b/lib/fortify_kunit.c
+@@ -228,28 +228,28 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
+ \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
+- vfree(p)); \
++ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+- vfree(p)); \
++ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
+- vfree(p)); \
++ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+- vfree(p)); \
++ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
+- vfree(p)); \
++ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
+- vfree(p)); \
++ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
+- vfree(p)); \
++ kvfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
+- vfree(p)); \
++ kvfree(p)); \
+ \
+ prev_size = (expected_pages) * PAGE_SIZE; \
+ orig = kvmalloc(prev_size, gfp); \
+diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
+index f25eb111c0516f..78f081d695d0b7 100644
+--- a/lib/generic-radix-tree.c
++++ b/lib/generic-radix-tree.c
+@@ -131,6 +131,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
+ v = new_root;
+ new_node = NULL;
++ } else {
++ new_node->children[0] = NULL;
+ }
+ }
+
+@@ -166,6 +168,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
++
++ if (iter->offset == SIZE_MAX)
++ return NULL;
++
+ restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+@@ -184,10 +190,17 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
++ size_t objs_per_ptr = genradix_depth_size(level);
++
++ if (iter->offset + objs_per_ptr < iter->offset) {
++ iter->offset = SIZE_MAX;
++ iter->pos = SIZE_MAX;
++ return NULL;
++ }
++
+ i++;
+- iter->offset = round_down(iter->offset +
+- genradix_depth_size(level),
+- genradix_depth_size(level));
++ iter->offset = round_down(iter->offset + objs_per_ptr,
++ objs_per_ptr);
+ iter->pos = (iter->offset >> PAGE_SHIFT) *
+ objs_per_page;
+ if (i == GENRADIX_ARY)
+diff --git a/lib/group_cpus.c b/lib/group_cpus.c
+index aa3f6815bb1240..ee272c4cefcc13 100644
+--- a/lib/group_cpus.c
++++ b/lib/group_cpus.c
+@@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
+ if (!masks)
+ goto fail_node_to_cpumask;
+
+- /* Stabilize the cpumasks */
+- cpus_read_lock();
+ build_node_to_cpumask(node_to_cpumask);
+
++ /*
++ * Make a local cache of 'cpu_present_mask', so the two stages
++ * spread can observe consistent 'cpu_present_mask' without holding
++ * cpu hotplug lock, then we can reduce deadlock risk with cpu
++ * hotplug code.
++ *
++ * Here CPU hotplug may happen when reading `cpu_present_mask`, and
++ * we can live with the case because it only affects that hotplug
++ * CPU is handled in the 1st or 2nd stage, and either way is correct
++ * from API user viewpoint since 2-stage spread is sort of
++ * optimization.
++ */
++ cpumask_copy(npresmsk, data_race(cpu_present_mask));
++
+ /* grouping present CPUs first */
+ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+- cpu_present_mask, nmsk, masks);
++ npresmsk, nmsk, masks);
+ if (ret < 0)
+ goto fail_build_affinity;
+ nr_present = ret;
+@@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
+ curgrp = 0;
+ else
+ curgrp = nr_present;
+- cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
++ cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk);
+ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ if (ret >= 0)
+ nr_others = ret;
+
+ fail_build_affinity:
+- cpus_read_unlock();
+-
+ if (ret >= 0)
+ WARN_ON(nr_present + nr_others < numgrps);
+
+diff --git a/lib/idr.c b/lib/idr.c
+index 13f2758c237735..da36054c3ca020 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -508,7 +508,7 @@ void ida_free(struct ida *ida, unsigned int id)
+ goto delete;
+ xas_store(&xas, xa_mk_value(v));
+ } else {
+- if (!test_bit(bit, bitmap->bitmap))
++ if (!bitmap || !test_bit(bit, bitmap->bitmap))
+ goto err;
+ __clear_bit(bit, bitmap->bitmap);
+ xas_set_mark(&xas, XA_FREE_MARK);
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 59dbcbdb1c916d..72fa20f405f152 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -74,10 +74,12 @@ static int create_dir(struct kobject *kobj)
+ if (error)
+ return error;
+
+- error = sysfs_create_groups(kobj, ktype->default_groups);
+- if (error) {
+- sysfs_remove_dir(kobj);
+- return error;
++ if (ktype) {
++ error = sysfs_create_groups(kobj, ktype->default_groups);
++ if (error) {
++ sysfs_remove_dir(kobj);
++ return error;
++ }
+ }
+
+ /*
+@@ -589,7 +591,8 @@ static void __kobject_del(struct kobject *kobj)
+ sd = kobj->sd;
+ ktype = get_ktype(kobj);
+
+- sysfs_remove_groups(kobj, ktype->default_groups);
++ if (ktype)
++ sysfs_remove_groups(kobj, ktype->default_groups);
+
+ /* send "remove" if the caller did not do it but sent "add" */
+ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
+@@ -666,6 +669,10 @@ static void kobject_cleanup(struct kobject *kobj)
+ pr_debug("'%s' (%p): %s, parent %p\n",
+ kobject_name(kobj), kobj, __func__, kobj->parent);
+
++ if (t && !t->release)
++ pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
++ kobject_name(kobj), kobj);
++
+ /* remove from sysfs if the caller did not do it */
+ if (kobj->state_in_sysfs) {
+ pr_debug("'%s' (%p): auto cleanup kobject_del\n",
+@@ -676,13 +683,10 @@ static void kobject_cleanup(struct kobject *kobj)
+ parent = NULL;
+ }
+
+- if (t->release) {
++ if (t && t->release) {
+ pr_debug("'%s' (%p): calling ktype release\n",
+ kobject_name(kobj), kobj);
+ t->release(kobj);
+- } else {
+- pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
+- kobject_name(kobj), kobj);
+ }
+
+ /* free name if we allocated it */
+@@ -1056,7 +1060,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *pa
+ {
+ const struct kobj_ns_type_operations *ops = NULL;
+
+- if (parent && parent->ktype->child_ns_type)
++ if (parent && parent->ktype && parent->ktype->child_ns_type)
+ ops = parent->ktype->child_ns_type(parent);
+
+ return ops;
+diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
+index 7c44b7ae4c5c34..d397b1ad5ccf01 100644
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -432,8 +432,23 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
+ len = strlen(env->envp[i]) + 1;
+
+ if (i != env->envp_idx - 1) {
++ /* @env->envp[] contains pointers to @env->buf[]
++ * with @env->buflen chars, and we are removing
++ * variable MODALIAS here pointed by @env->envp[i]
++ * with length @len as shown below:
++ *
++ * 0 @env->buf[] @env->buflen
++ * ---------------------------------------------
++ * ^ ^ ^ ^
++ * | |-> @len <-| target block |
++ * @env->envp[0] @env->envp[i] @env->envp[i + 1]
++ *
++ * so the "target block" indicated above is moved
++ * backward by @len, and its right size is
++ * @env->buflen - (@env->envp[i + 1] - @env->envp[0]).
++ */
+ memmove(env->envp[i], env->envp[i + 1],
+- env->buflen - len);
++ env->buflen - (env->envp[i + 1] - env->envp[0]));
+
+ for (j = i; j < env->envp_idx - 1; j++)
+ env->envp[j] = env->envp[j + 1] - len;
+diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
+index 22c5c496a68f54..35ddb329dad61c 100644
+--- a/lib/kunit/debugfs.c
++++ b/lib/kunit/debugfs.c
+@@ -53,12 +53,14 @@ static void debugfs_print_result(struct seq_file *seq,
+ static int debugfs_print_results(struct seq_file *seq, void *v)
+ {
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+- enum kunit_status success = kunit_suite_has_succeeded(suite);
++ enum kunit_status success;
+ struct kunit_case *test_case;
+
+ if (!suite)
+ return 0;
+
++ success = kunit_suite_has_succeeded(suite);
++
+ /* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
+ seq_puts(seq, "KTAP version 1\n");
+ seq_puts(seq, "1..1\n");
+diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
+index a6348489d45fe8..51013feba58b13 100644
+--- a/lib/kunit/executor.c
++++ b/lib/kunit/executor.c
+@@ -137,11 +137,17 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set)
+ {
+ struct kunit_suite * const *suites;
+
+- for (suites = suite_set.start; suites < suite_set.end; suites++)
++ for (suites = suite_set.start; suites < suite_set.end; suites++) {
++ kfree((*suites)->test_cases);
+ kfree(*suites);
++ }
+ kfree(suite_set.start);
+ }
+
++/*
++ * Filter and reallocate test suites. Must return the filtered test suites set
++ * allocated at a valid virtual address or NULL in case of error.
++ */
+ struct kunit_suite_set
+ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+@@ -155,10 +161,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ struct kunit_suite_set filtered = {NULL, NULL};
+ struct kunit_glob_filter parsed_glob;
+ struct kunit_attr_filter *parsed_filters = NULL;
++ struct kunit_suite * const *suites;
+
+ const size_t max = suite_set->end - suite_set->start;
+
+- copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
++ copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL);
+ if (!copy) { /* won't be able to run anything, return an empty set */
+ return filtered;
+ }
+@@ -193,7 +200,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ parsed_glob.test_glob);
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+- goto free_parsed_filters;
++ goto free_filtered_suite;
+ }
+ }
+ if (filter_count > 0 && parsed_filters != NULL) {
+@@ -210,11 +217,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ filtered_suite = new_filtered_suite;
+
+ if (*err)
+- goto free_parsed_filters;
++ goto free_filtered_suite;
+
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+- goto free_parsed_filters;
++ goto free_filtered_suite;
+ }
+ if (!filtered_suite)
+ break;
+@@ -229,6 +236,14 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ filtered.start = copy_start;
+ filtered.end = copy;
+
++free_filtered_suite:
++ if (*err) {
++ for (suites = copy_start; suites < copy; suites++) {
++ kfree((*suites)->test_cases);
++ kfree(*suites);
++ }
++ }
++
+ free_parsed_filters:
+ if (filter_count)
+ kfree(parsed_filters);
+@@ -241,7 +256,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
+
+ free_copy:
+ if (*err)
+- kfree(copy);
++ kfree(copy_start);
+
+ return filtered;
+ }
+diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
+index b4f6f96b28445f..3f7f967e3688ee 100644
+--- a/lib/kunit/executor_test.c
++++ b/lib/kunit/executor_test.c
+@@ -9,7 +9,7 @@
+ #include <kunit/test.h>
+ #include <kunit/attributes.h>
+
+-static void kfree_at_end(struct kunit *test, const void *to_free);
++static void free_suite_set_at_end(struct kunit *test, const void *to_free);
+ static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name,
+ struct kunit_case *test_cases);
+@@ -56,7 +56,7 @@ static void filter_suites_test(struct kunit *test)
+ got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start);
++ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have suite2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+@@ -82,7 +82,7 @@ static void filter_suites_test_glob_test(struct kunit *test)
+ got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start);
++ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have suite2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+@@ -109,7 +109,7 @@ static void filter_suites_to_empty_test(struct kunit *test)
+
+ got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start); /* just in case */
++ free_suite_set_at_end(test, &got); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+@@ -129,7 +129,7 @@ static void parse_filter_attr_test(struct kunit *test)
+ GFP_KERNEL);
+ for (j = 0; j < filter_count; j++) {
+ parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
+- KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
++ KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter from '%s'", filters);
+ }
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed");
+@@ -172,7 +172,7 @@ static void filter_attr_test(struct kunit *test)
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start);
++ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have normal_suite */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+@@ -200,7 +200,7 @@ static void filter_attr_empty_test(struct kunit *test)
+
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start); /* just in case */
++ free_suite_set_at_end(test, &got); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+@@ -222,7 +222,7 @@ static void filter_attr_skip_test(struct kunit *test)
+ got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+- kfree_at_end(test, got.start);
++ free_suite_set_at_end(test, &got);
+
+ /* Validate we have both the slow and normal test */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+@@ -256,18 +256,26 @@ kunit_test_suites(&executor_test_suite);
+
+ /* Test helpers */
+
+-/* Use the resource API to register a call to kfree(to_free).
++static void free_suite_set(void *suite_set)
++{
++ kunit_free_suite_set(*(struct kunit_suite_set *)suite_set);
++ kfree(suite_set);
++}
++
++/* Use the resource API to register a call to free_suite_set.
+ * Since we never actually use the resource, it's safe to use on const data.
+ */
+-static void kfree_at_end(struct kunit *test, const void *to_free)
++static void free_suite_set_at_end(struct kunit *test, const void *to_free)
+ {
+- /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
+- if (IS_ERR_OR_NULL(to_free))
++ struct kunit_suite_set *free;
++
++ if (!((struct kunit_suite_set *)to_free)->start)
+ return;
+
+- kunit_add_action(test,
+- (kunit_action_t *)kfree,
+- (void *)to_free);
++ free = kzalloc(sizeof(struct kunit_suite_set), GFP_KERNEL);
++ *free = *(struct kunit_suite_set *)to_free;
++
++ kunit_add_action(test, free_suite_set, (void *)free);
+ }
+
+ static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+diff --git a/lib/kunit/test.c b/lib/kunit/test.c
+index 421f1398141230..1d151f6dc1cde1 100644
+--- a/lib/kunit/test.c
++++ b/lib/kunit/test.c
+@@ -16,6 +16,7 @@
+ #include <linux/panic.h>
+ #include <linux/sched/debug.h>
+ #include <linux/sched.h>
++#include <linux/mm.h>
+
+ #include "debugfs.h"
+ #include "hooks-impl.h"
+@@ -372,6 +373,36 @@ void kunit_init_test(struct kunit *test, const char *name, char *log)
+ }
+ EXPORT_SYMBOL_GPL(kunit_init_test);
+
++/* Only warn when a test takes more than twice the threshold */
++#define KUNIT_SPEED_WARNING_MULTIPLIER 2
++
++/* Slow tests are defined as taking more than 1s */
++#define KUNIT_SPEED_SLOW_THRESHOLD_S 1
++
++#define KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S \
++ (KUNIT_SPEED_WARNING_MULTIPLIER * KUNIT_SPEED_SLOW_THRESHOLD_S)
++
++#define s_to_timespec64(s) ns_to_timespec64((s) * NSEC_PER_SEC)
++
++static void kunit_run_case_check_speed(struct kunit *test,
++ struct kunit_case *test_case,
++ struct timespec64 duration)
++{
++ struct timespec64 slow_thr =
++ s_to_timespec64(KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S);
++ enum kunit_speed speed = test_case->attr.speed;
++
++ if (timespec64_compare(&duration, &slow_thr) < 0)
++ return;
++
++ if (speed == KUNIT_SPEED_VERY_SLOW || speed == KUNIT_SPEED_SLOW)
++ return;
++
++ kunit_warn(test,
++ "Test should be marked slow (runtime: %lld.%09lds)",
++ duration.tv_sec, duration.tv_nsec);
++}
++
+ /*
+ * Initializes and runs test case. Does not clean up or do post validations.
+ */
+@@ -379,6 +410,8 @@ static void kunit_run_case_internal(struct kunit *test,
+ struct kunit_suite *suite,
+ struct kunit_case *test_case)
+ {
++ struct timespec64 start, end;
++
+ if (suite->init) {
+ int ret;
+
+@@ -390,7 +423,13 @@ static void kunit_run_case_internal(struct kunit *test,
+ }
+ }
+
++ ktime_get_ts64(&start);
++
+ test_case->run_case(test);
++
++ ktime_get_ts64(&end);
++
++ kunit_run_case_check_speed(test, test_case, timespec64_sub(end, start));
+ }
+
+ static void kunit_case_internal_cleanup(struct kunit *test)
+@@ -702,6 +741,8 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
+ return 0;
+ }
+
++ kunit_suite_counter = 1;
++
+ static_branch_inc(&kunit_running);
+
+ for (i = 0; i < num_suites; i++) {
+@@ -728,8 +769,6 @@ void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites)
+
+ for (i = 0; i < num_suites; i++)
+ kunit_exit_suite(suites[i]);
+-
+- kunit_suite_counter = 1;
+ }
+ EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
+
+@@ -769,12 +808,19 @@ static void kunit_module_exit(struct module *mod)
+ };
+ const char *action = kunit_action();
+
++ /*
++ * Check if the start address is a valid virtual address to detect
++ * if the module load sequence has failed and the suite set has not
++ * been initialized and filtered.
++ */
++ if (!suite_set.start || !virt_addr_valid(suite_set.start))
++ return;
++
+ if (!action)
+ __kunit_test_suites_exit(mod->kunit_suites,
+ mod->num_kunit_suites);
+
+- if (suite_set.start)
+- kunit_free_suite_set(suite_set);
++ kunit_free_suite_set(suite_set);
+ }
+
+ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
+@@ -784,12 +830,12 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
+
+ switch (val) {
+ case MODULE_STATE_LIVE:
++ kunit_module_init(mod);
+ break;
+ case MODULE_STATE_GOING:
+ kunit_module_exit(mod);
+ break;
+ case MODULE_STATE_COMING:
+- kunit_module_init(mod);
+ break;
+ case MODULE_STATE_UNFORMED:
+ break;
+diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
+index f7825991d576ab..9c9e4dcf06d961 100644
+--- a/lib/kunit/try-catch.c
++++ b/lib/kunit/try-catch.c
+@@ -11,6 +11,7 @@
+ #include <linux/completion.h>
+ #include <linux/kernel.h>
+ #include <linux/kthread.h>
++#include <linux/sched/task.h>
+
+ #include "try-catch-impl.h"
+
+@@ -65,22 +66,23 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ try_catch->context = context;
+ try_catch->try_completion = &try_completion;
+ try_catch->try_result = 0;
+- task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
+- try_catch,
+- "kunit_try_catch_thread");
++ task_struct = kthread_create(kunit_generic_run_threadfn_adapter,
++ try_catch, "kunit_try_catch_thread");
+ if (IS_ERR(task_struct)) {
+ try_catch->catch(try_catch->context);
+ return;
+ }
++ get_task_struct(task_struct);
++ wake_up_process(task_struct);
+
+ time_remaining = wait_for_completion_timeout(&try_completion,
+ kunit_test_timeout());
+ if (time_remaining == 0) {
+- kunit_err(test, "try timed out\n");
+ try_catch->try_result = -ETIMEDOUT;
+ kthread_stop(task_struct);
+ }
+
++ put_task_struct(task_struct);
+ exit_code = try_catch->try_result;
+
+ if (!exit_code)
+@@ -90,6 +92,8 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ try_catch->try_result = 0;
+ else if (exit_code == -EINTR)
+ kunit_err(test, "wake_up_process() was never called\n");
++ else if (exit_code == -ETIMEDOUT)
++ kunit_err(test, "try timed out\n");
+ else if (exit_code)
+ kunit_err(test, "Unknown error: %d\n", exit_code);
+
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index bb24d84a4922f0..4e05511c8d1eba 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -2228,6 +2228,8 @@ static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
+
+ /*
+ * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
++ * If @mas->index cannot be found within the containing
++ * node, we traverse to the last entry in the node.
+ * @wr_mas: The maple write state
+ *
+ * Uses mas_slot_locked() and does not need to worry about dead nodes.
+@@ -3643,7 +3645,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas)
+ return true;
+ }
+
+-static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
++static void mas_wr_walk_index(struct ma_wr_state *wr_mas)
+ {
+ struct ma_state *mas = wr_mas->mas;
+
+@@ -3652,11 +3654,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
+- return true;
++ return;
+ mas_wr_walk_traverse(wr_mas);
+-
+ }
+- return true;
+ }
+ /*
+ * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
+@@ -3892,8 +3892,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
+ memset(&b_node, 0, sizeof(struct maple_big_node));
+ /* Copy l_mas and store the value in b_node. */
+ mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
+- /* Copy r_mas into b_node. */
+- if (r_mas.offset <= r_wr_mas.node_end)
++ /* Copy r_mas into b_node if there is anything to copy. */
++ if (r_mas.max > r_mas.last)
+ mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
+ &b_node, b_node.b_end + 1);
+ else
+@@ -5085,18 +5085,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ if (size == 0 || max - min < size - 1)
+ return -EINVAL;
+
+- if (mas_is_start(mas)) {
++ if (mas_is_start(mas))
+ mas_start(mas);
+- mas->offset = mas_data_end(mas);
+- } else if (mas->offset >= 2) {
+- mas->offset -= 2;
+- } else if (!mas_rewind_node(mas)) {
++ else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
+ return -EBUSY;
+- }
+
+- /* Empty set. */
+- if (mas_is_none(mas) || mas_is_ptr(mas))
++ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
+ return mas_sparse_area(mas, min, max, size, false);
++ else if (mas->offset >= 2)
++ mas->offset -= 2;
++ else
++ mas->offset = mas_data_end(mas);
++
+
+ /* The start of the window can only be within these values. */
+ mas->index = min;
+@@ -5501,6 +5501,17 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
+
+ mas_wr_end_piv(&wr_mas);
+ node_size = mas_wr_new_end(&wr_mas);
++
++ /* Slot store, does not require additional nodes */
++ if (node_size == wr_mas.node_end) {
++ /* reuse node */
++ if (!mt_in_rcu(mas->tree))
++ return 0;
++ /* shifting boundary */
++ if (wr_mas.offset_end - mas->offset == 1)
++ return 0;
++ }
++
+ if (node_size >= mt_slots[wr_mas.type]) {
+ /* Split, worst case for now. */
+ request = 1 + mas_mt_height(mas) * 2;
+diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
+index d42cebf7407fc4..d3b64b10da1c5e 100644
+--- a/lib/math/prime_numbers.c
++++ b/lib/math/prime_numbers.c
+@@ -6,8 +6,6 @@
+ #include <linux/prime_numbers.h>
+ #include <linux/slab.h>
+
+-#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
+-
+ struct primes {
+ struct rcu_head rcu;
+ unsigned long last, sz;
+diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
+index 440aee705cccab..30e00ef0bf2e0f 100644
+--- a/lib/memcpy_kunit.c
++++ b/lib/memcpy_kunit.c
+@@ -32,7 +32,7 @@ struct some_bytes {
+ BUILD_BUG_ON(sizeof(instance.data) != 32); \
+ for (size_t i = 0; i < sizeof(instance.data); i++) { \
+ KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
+- "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
++ "line %d: '%s' not initialized to 0x%02x @ %zu (saw 0x%02x)\n", \
+ __LINE__, #instance, v, i, instance.data[i]); \
+ } \
+ } while (0)
+@@ -41,7 +41,7 @@ struct some_bytes {
+ BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
+ for (size_t i = 0; i < sizeof(one); i++) { \
+ KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
+- "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
++ "line %d: %s.data[%zu] (0x%02x) != %s.data[%zu] (0x%02x)\n", \
+ __LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
+ } \
+ kunit_info(test, "ok: " TEST_OP "() " name "\n"); \
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index 7a2b6c38fd597f..ba698a097fc810 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
+ [NLA_S16] = sizeof(s16),
+ [NLA_S32] = sizeof(s32),
+ [NLA_S64] = sizeof(s64),
++ [NLA_BE16] = sizeof(__be16),
++ [NLA_BE32] = sizeof(__be32),
+ };
+
+ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
+@@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
+ [NLA_S16] = sizeof(s16),
+ [NLA_S32] = sizeof(s32),
+ [NLA_S64] = sizeof(s64),
++ [NLA_BE16] = sizeof(__be16),
++ [NLA_BE32] = sizeof(__be32),
+ };
+
+ /*
+diff --git a/lib/objagg.c b/lib/objagg.c
+index 1e248629ed6431..1608895b009c8b 100644
+--- a/lib/objagg.c
++++ b/lib/objagg.c
+@@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg,
+ {
+ void *delta_priv;
+
++ if (WARN_ON(!objagg_obj_is_root(parent)))
++ return -EINVAL;
++
+ delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
+ objagg_obj->obj);
+ if (IS_ERR(delta_priv))
+@@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = {
+ [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
+ };
+
+-static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
+- const void *obj)
+-{
+- struct rhashtable *ht = arg->ht;
+- struct objagg_hints *objagg_hints =
+- container_of(ht, struct objagg_hints, node_ht);
+- const struct objagg_ops *ops = objagg_hints->ops;
+- const char *ptr = obj;
+-
+- ptr += ht->p.key_offset;
+- return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
+- memcmp(ptr, arg->key, ht->p.key_len);
+-}
+-
+ /**
+ * objagg_hints_get - obtains hints instance
+ * @objagg: objagg instance
+@@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg,
+ offsetof(struct objagg_hints_node, obj);
+ objagg_hints->ht_params.head_offset =
+ offsetof(struct objagg_hints_node, ht_node);
+- objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
+
+ err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
+ if (err)
+diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
+index ce39ce9f3526ea..2829ddb0e316b4 100644
+--- a/lib/pci_iomap.c
++++ b/lib/pci_iomap.c
+@@ -170,8 +170,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+- iounmap(p);
+ #endif
++ iounmap(p);
+ }
+ EXPORT_SYMBOL(pci_iounmap);
+
+diff --git a/lib/sbitmap.c b/lib/sbitmap.c
+index d0a5081dfd122e..1d5e1574869225 100644
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -60,12 +60,30 @@ static inline void update_alloc_hint_after_get(struct sbitmap *sb,
+ /*
+ * See if we have deferred clears that we can batch move
+ */
+-static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
++static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
++ unsigned int depth, unsigned int alloc_hint, bool wrap)
+ {
+- unsigned long mask;
++ unsigned long mask, word_mask;
+
+- if (!READ_ONCE(map->cleared))
+- return false;
++ guard(raw_spinlock_irqsave)(&map->swap_lock);
++
++ if (!map->cleared) {
++ if (depth == 0)
++ return false;
++
++ word_mask = (~0UL) >> (BITS_PER_LONG - depth);
++ /*
++ * The current behavior is to always retry after moving
++ * ->cleared to word, and we change it to retry in case
++ * of any free bits. To avoid an infinite loop, we need
++ * to take wrap & alloc_hint into account, otherwise a
++ * soft lockup may occur.
++ */
++ if (!wrap && alloc_hint)
++ word_mask &= ~((1UL << alloc_hint) - 1);
++
++ return (READ_ONCE(map->word) & word_mask) != word_mask;
++ }
+
+ /*
+ * First get a stable cleared mask, setting the old mask to 0.
+@@ -85,6 +103,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
+ bool alloc_hint)
+ {
+ unsigned int bits_per_word;
++ int i;
+
+ if (shift < 0)
+ shift = sbitmap_calculate_shift(depth);
+@@ -116,6 +135,9 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
+ return -ENOMEM;
+ }
+
++ for (i = 0; i < sb->map_nr; i++)
++ raw_spin_lock_init(&sb->map[i].swap_lock);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(sbitmap_init_node);
+@@ -126,7 +148,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
+ unsigned int i;
+
+ for (i = 0; i < sb->map_nr; i++)
+- sbitmap_deferred_clear(&sb->map[i]);
++ sbitmap_deferred_clear(&sb->map[i], 0, 0, 0);
+
+ sb->depth = depth;
+ sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+@@ -179,7 +201,7 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
+ alloc_hint, wrap);
+ if (nr != -1)
+ break;
+- if (!sbitmap_deferred_clear(map))
++ if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap))
+ break;
+ } while (1);
+
+@@ -499,18 +521,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ struct sbitmap_word *map = &sb->map[index];
+ unsigned long get_mask;
+ unsigned int map_depth = __map_depth(sb, index);
++ unsigned long val;
+
+- sbitmap_deferred_clear(map);
+- if (map->word == (1UL << (map_depth - 1)) - 1)
++ sbitmap_deferred_clear(map, 0, 0, 0);
++ val = READ_ONCE(map->word);
++ if (val == (1UL << (map_depth - 1)) - 1)
+ goto next;
+
+- nr = find_first_zero_bit(&map->word, map_depth);
++ nr = find_first_zero_bit(&val, map_depth);
+ if (nr + nr_tags <= map_depth) {
+ atomic_long_t *ptr = (atomic_long_t *) &map->word;
+- unsigned long val;
+
+ get_mask = ((1UL << nr_tags) - 1) << nr;
+- val = READ_ONCE(map->word);
+ while (!atomic_long_try_cmpxchg(ptr, &val,
+ get_mask | val))
+ ;
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 68b45c82c37a69..7bc2220fea8058 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -1124,7 +1124,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter,
+ do {
+ res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
+ extraction_flags, &off);
+- if (res < 0)
++ if (res <= 0)
+ goto failed;
+
+ len = res;
+diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
+index d4a3730b08fa7e..4ce9604388069d 100644
+--- a/lib/slub_kunit.c
++++ b/lib/slub_kunit.c
+@@ -55,7 +55,7 @@ static void test_next_pointer(struct kunit *test)
+
+ ptr_addr = (unsigned long *)(p + s->offset);
+ tmp = *ptr_addr;
+- p[s->offset] = 0x12;
++ p[s->offset] = ~p[s->offset];
+
+ /*
+ * Expecting three errors.
+diff --git a/lib/stackdepot.c b/lib/stackdepot.c
+index 2f5aa851834ebb..15a055865d109a 100644
+--- a/lib/stackdepot.c
++++ b/lib/stackdepot.c
+@@ -402,10 +402,10 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ /*
+ * Zero out zone modifiers, as we don't have specific zone
+ * requirements. Keep the flags related to allocation in atomic
+- * contexts and I/O.
++ * contexts, I/O, nolockdep.
+ */
+ alloc_flags &= ~GFP_ZONEMASK;
+- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
++ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
+ alloc_flags |= __GFP_NOWARN;
+ page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
+ if (page)
+diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
+index 4c40580a99a364..f247089d63c085 100644
+--- a/lib/test_blackhole_dev.c
++++ b/lib/test_blackhole_dev.c
+@@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void)
+ {
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+- struct ethhdr *ethh;
+ struct udphdr *uh;
+ int data_len;
+ int ret;
+@@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void)
+ ip6h->saddr = in6addr_loopback;
+ ip6h->daddr = in6addr_loopback;
+ /* Ether */
+- ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
++ skb_push(skb, sizeof(struct ethhdr));
+ skb_set_mac_header(skb, 0);
+
+ skb->protocol = htons(ETH_P_IPV6);
+diff --git a/lib/test_hmm.c b/lib/test_hmm.c
+index 717dcb83012733..b823ba7cb6a156 100644
+--- a/lib/test_hmm.c
++++ b/lib/test_hmm.c
+@@ -1226,8 +1226,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+
+- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
++ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
++ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+
+ migrate_device_range(src_pfns, start_pfn, npages);
+ for (i = 0; i < npages; i++) {
+@@ -1250,8 +1250,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+ }
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+- kfree(src_pfns);
+- kfree(dst_pfns);
++ kvfree(src_pfns);
++ kvfree(dst_pfns);
+ }
+
+ /* Removes free pages from the free list so they can't be re-allocated */
+diff --git a/lib/test_ida.c b/lib/test_ida.c
+index b0688062596150..55105baa19da9a 100644
+--- a/lib/test_ida.c
++++ b/lib/test_ida.c
+@@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida)
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+
++/*
++ * Check various situations where we attempt to free an ID we don't own.
++ */
++static void ida_check_bad_free(struct ida *ida)
++{
++ unsigned long i;
++
++ printk("vvv Ignore \"not allocated\" warnings\n");
++ /* IDA is empty; all of these will fail */
++ ida_free(ida, 0);
++ for (i = 0; i < 31; i++)
++ ida_free(ida, 1 << i);
++
++ /* IDA contains a single value entry */
++ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
++ ida_free(ida, 0);
++ for (i = 0; i < 31; i++)
++ ida_free(ida, 1 << i);
++
++ /* IDA contains a single bitmap */
++ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
++ ida_free(ida, 0);
++ for (i = 0; i < 31; i++)
++ ida_free(ida, 1 << i);
++
++ /* IDA contains a tree */
++ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
++ ida_free(ida, 0);
++ for (i = 0; i < 31; i++)
++ ida_free(ida, 1 << i);
++ printk("^^^ \"not allocated\" warnings over\n");
++
++ ida_free(ida, 3);
++ ida_free(ida, 1023);
++ ida_free(ida, (1 << 20) - 1);
++
++ IDA_BUG_ON(ida, !ida_is_empty(ida));
++}
++
+ static DEFINE_IDA(ida);
+
+ static int ida_checks(void)
+@@ -162,6 +201,7 @@ static int ida_checks(void)
+ ida_check_leaf(&ida, 1024 * 64);
+ ida_check_max(&ida);
+ ida_check_conv(&ida);
++ ida_check_bad_free(&ida);
+
+ printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run != tests_passed) ? 0 : -EINVAL;
+diff --git a/lib/test_meminit.c b/lib/test_meminit.c
+index 0ae35223d77335..0dc173849a5420 100644
+--- a/lib/test_meminit.c
++++ b/lib/test_meminit.c
+@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
+ int failures = 0, num_tests = 0;
+ int i;
+
+- for (i = 0; i <= MAX_ORDER; i++)
++ for (i = 0; i < NR_PAGE_ORDERS; i++)
+ num_tests += do_alloc_pages_order(i, &failures);
+
+ REPORT_FAILURES_IN_FN();
+diff --git a/lib/test_xarray.c b/lib/test_xarray.c
+index e77d4856442c3f..542926da61a3ed 100644
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -1756,6 +1756,97 @@ static noinline void check_get_order(struct xarray *xa)
+ }
+ }
+
++static noinline void check_xas_get_order(struct xarray *xa)
++{
++ XA_STATE(xas, xa, 0);
++
++ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
++ unsigned int order;
++ unsigned long i, j;
++
++ for (order = 0; order < max_order; order++) {
++ for (i = 0; i < 10; i++) {
++ xas_set_order(&xas, i << order, order);
++ do {
++ xas_lock(&xas);
++ xas_store(&xas, xa_mk_value(i));
++ xas_unlock(&xas);
++ } while (xas_nomem(&xas, GFP_KERNEL));
++
++ for (j = i << order; j < (i + 1) << order; j++) {
++ xas_set_order(&xas, j, 0);
++ rcu_read_lock();
++ xas_load(&xas);
++ XA_BUG_ON(xa, xas_get_order(&xas) != order);
++ rcu_read_unlock();
++ }
++
++ xas_lock(&xas);
++ xas_set_order(&xas, i << order, order);
++ xas_store(&xas, NULL);
++ xas_unlock(&xas);
++ }
++ }
++}
++
++static noinline void check_xas_conflict_get_order(struct xarray *xa)
++{
++ XA_STATE(xas, xa, 0);
++
++ void *entry;
++ int only_once;
++ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
++ unsigned int order;
++ unsigned long i, j, k;
++
++ for (order = 0; order < max_order; order++) {
++ for (i = 0; i < 10; i++) {
++ xas_set_order(&xas, i << order, order);
++ do {
++ xas_lock(&xas);
++ xas_store(&xas, xa_mk_value(i));
++ xas_unlock(&xas);
++ } while (xas_nomem(&xas, GFP_KERNEL));
++
++ /*
++ * Ensure xas_get_order works with xas_for_each_conflict.
++ */
++ j = i << order;
++ for (k = 0; k < order; k++) {
++ only_once = 0;
++ xas_set_order(&xas, j + (1 << k), k);
++ xas_lock(&xas);
++ xas_for_each_conflict(&xas, entry) {
++ XA_BUG_ON(xa, entry != xa_mk_value(i));
++ XA_BUG_ON(xa, xas_get_order(&xas) != order);
++ only_once++;
++ }
++ XA_BUG_ON(xa, only_once != 1);
++ xas_unlock(&xas);
++ }
++
++ if (order < max_order - 1) {
++ only_once = 0;
++ xas_set_order(&xas, (i & ~1UL) << order, order + 1);
++ xas_lock(&xas);
++ xas_for_each_conflict(&xas, entry) {
++ XA_BUG_ON(xa, entry != xa_mk_value(i));
++ XA_BUG_ON(xa, xas_get_order(&xas) != order);
++ only_once++;
++ }
++ XA_BUG_ON(xa, only_once != 1);
++ xas_unlock(&xas);
++ }
++
++ xas_set_order(&xas, i << order, order);
++ xas_lock(&xas);
++ xas_store(&xas, NULL);
++ xas_unlock(&xas);
++ }
++ }
++}
++
++
+ static noinline void check_destroy(struct xarray *xa)
+ {
+ unsigned long index;
+@@ -1805,6 +1896,8 @@ static int xarray_checks(void)
+ check_reserve(&xa0);
+ check_multi_store(&array);
+ check_get_order(&array);
++ check_xas_get_order(&array);
++ check_xas_conflict_get_order(&array);
+ check_xa_alloc();
+ check_find(&array);
+ check_find_entry(&array);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index afb88b24fa7482..2aa408441cd3e7 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -2110,15 +2110,20 @@ char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
+
+ /* Loop starting from the root node to the current node. */
+ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
+- struct fwnode_handle *__fwnode =
+- fwnode_get_nth_parent(fwnode, depth);
++ /*
++ * Only get a reference for other nodes (i.e. parent nodes).
++ * fwnode refcount may be 0 here.
++ */
++ struct fwnode_handle *__fwnode = depth ?
++ fwnode_get_nth_parent(fwnode, depth) : fwnode;
+
+ buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
+ default_str_spec);
+ buf = string(buf, end, fwnode_get_name(__fwnode),
+ default_str_spec);
+
+- fwnode_handle_put(__fwnode);
++ if (depth)
++ fwnode_handle_put(__fwnode);
+ }
+
+ return buf;
+diff --git a/lib/xarray.c b/lib/xarray.c
+index 39f07bfc4dccac..da79128ad754fc 100644
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -1750,39 +1750,52 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
+ EXPORT_SYMBOL(xa_store_range);
+
+ /**
+- * xa_get_order() - Get the order of an entry.
+- * @xa: XArray.
+- * @index: Index of the entry.
++ * xas_get_order() - Get the order of an entry.
++ * @xas: XArray operation state.
++ *
++ * Called after xas_load, the xas should not be in an error state.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+-int xa_get_order(struct xarray *xa, unsigned long index)
++int xas_get_order(struct xa_state *xas)
+ {
+- XA_STATE(xas, xa, index);
+- void *entry;
+ int order = 0;
+
+- rcu_read_lock();
+- entry = xas_load(&xas);
+-
+- if (!entry)
+- goto unlock;
+-
+- if (!xas.xa_node)
+- goto unlock;
++ if (!xas->xa_node)
++ return 0;
+
+ for (;;) {
+- unsigned int slot = xas.xa_offset + (1 << order);
++ unsigned int slot = xas->xa_offset + (1 << order);
+
+ if (slot >= XA_CHUNK_SIZE)
+ break;
+- if (!xa_is_sibling(xas.xa_node->slots[slot]))
++ if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
+ break;
+ order++;
+ }
+
+- order += xas.xa_node->shift;
+-unlock:
++ order += xas->xa_node->shift;
++ return order;
++}
++EXPORT_SYMBOL_GPL(xas_get_order);
++
++/**
++ * xa_get_order() - Get the order of an entry.
++ * @xa: XArray.
++ * @index: Index of the entry.
++ *
++ * Return: A number between 0 and 63 indicating the order of the entry.
++ */
++int xa_get_order(struct xarray *xa, unsigned long index)
++{
++ XA_STATE(xas, xa, index);
++ int order = 0;
++ void *entry;
++
++ rcu_read_lock();
++ entry = xas_load(&xas);
++ if (entry)
++ order = xas_get_order(&xas);
+ rcu_read_unlock();
+
+ return order;
+diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
+index 88a2c35e1b5971..5627b00fca296e 100644
+--- a/lib/xz/xz_crc32.c
++++ b/lib/xz/xz_crc32.c
+@@ -29,7 +29,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
+
+ XZ_EXTERN void xz_crc32_init(void)
+ {
+- const uint32_t poly = CRC32_POLY_LE;
++ const uint32_t poly = 0xEDB88320;
+
+ uint32_t i;
+ uint32_t j;
+diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
+index bf1e94ec7873cf..d9fd49b45fd758 100644
+--- a/lib/xz/xz_private.h
++++ b/lib/xz/xz_private.h
+@@ -105,10 +105,6 @@
+ # endif
+ #endif
+
+-#ifndef CRC32_POLY_LE
+-#define CRC32_POLY_LE 0xedb88320
+-#endif
+-
+ /*
+ * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
+ * before calling xz_dec_lzma2_run().
+diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
+index a0d06095be83de..8dcb8ca39767c8 100644
+--- a/lib/zstd/common/fse_decompress.c
++++ b/lib/zstd/common/fse_decompress.c
+@@ -312,7 +312,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
+
+ typedef struct {
+ short ncount[FSE_MAX_SYMBOL_VALUE + 1];
+- FSE_DTable dtable[1]; /* Dynamically sized */
++ FSE_DTable dtable[]; /* Dynamically sized */
+ } FSE_DecompressWksp;
+
+
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 264a2df5ecf5b9..c11cd01169e8d1 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -147,12 +147,15 @@ config ZSWAP_ZPOOL_DEFAULT_ZBUD
+ help
+ Use the zbud allocator as the default allocator.
+
+-config ZSWAP_ZPOOL_DEFAULT_Z3FOLD
+- bool "z3fold"
+- select Z3FOLD
++config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
++ bool "z3foldi (DEPRECATED)"
++ select Z3FOLD_DEPRECATED
+ help
+ Use the z3fold allocator as the default allocator.
+
++ Deprecated and scheduled for removal in a few cycles,
++ see CONFIG_Z3FOLD_DEPRECATED.
++
+ config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
+ bool "zsmalloc"
+ select ZSMALLOC
+@@ -164,7 +167,7 @@ config ZSWAP_ZPOOL_DEFAULT
+ string
+ depends on ZSWAP
+ default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
+- default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD
++ default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
+ default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
+ default ""
+
+@@ -178,15 +181,25 @@ config ZBUD
+ deterministic reclaim properties that make it preferable to a higher
+ density approach when reclaim will be used.
+
+-config Z3FOLD
+- tristate "3:1 compression allocator (z3fold)"
++config Z3FOLD_DEPRECATED
++ tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
+ depends on ZSWAP
+ help
++ Deprecated and scheduled for removal in a few cycles. If you have
++ a good reason for using Z3FOLD over ZSMALLOC, please contact
++ linux-mm@kvack.org and the zswap maintainers.
++
+ A special purpose allocator for storing compressed pages.
+ It is designed to store up to three compressed pages per physical
+ page. It is a ZBUD derivative so the simplicity and determinism are
+ still there.
+
++config Z3FOLD
++ tristate
++ default y if Z3FOLD_DEPRECATED=y
++ default m if Z3FOLD_DEPRECATED=m
++ depends on Z3FOLD_DEPRECATED
++
+ config ZSMALLOC
+ tristate
+ prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
+@@ -704,6 +717,17 @@ config HUGETLB_PAGE_SIZE_VARIABLE
+ config CONTIG_ALLOC
+ def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
+
++config PCP_BATCH_SCALE_MAX
++ int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free"
++ default 5
++ range 0 6
++ help
++ In page allocator, PCP (Per-CPU pageset) is refilled and drained in
++ batches. The batch number is scaled automatically to improve page
++ allocation/free throughput. But too large scale factor may hurt
++ latency. This option sets the upper limit of scale factor to limit
++ the maximum latency.
++
+ config PHYS_ADDR_T_64BIT
+ def_bool 64BIT
+
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 1e3447bccdb14d..e039d05304dd9c 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -436,7 +436,6 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
+ INIT_LIST_HEAD(&wb->work_list);
+ INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
+ INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn);
+- wb->dirty_sleep = jiffies;
+
+ err = fprop_local_init_percpu(&wb->completions, gfp);
+ if (err)
+@@ -921,6 +920,7 @@ int bdi_init(struct backing_dev_info *bdi)
+ INIT_LIST_HEAD(&bdi->bdi_list);
+ INIT_LIST_HEAD(&bdi->wb_list);
+ init_waitqueue_head(&bdi->wb_waitq);
++ bdi->last_bdp_sleep = jiffies;
+
+ return cgwb_bdi_init(bdi);
+ }
+diff --git a/mm/cma.c b/mm/cma.c
+index da2967c6a22389..ac363f16d3923c 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -187,10 +187,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ if (!size || !memblock_is_region_reserved(base, size))
+ return -EINVAL;
+
+- /* alignment should be aligned with order_per_bit */
+- if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
+- return -EINVAL;
+-
+ /* ensure minimal alignment required by mm core */
+ if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
+ return -EINVAL;
+@@ -505,7 +501,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
+ */
+ if (page) {
+ for (i = 0; i < count; i++)
+- page_kasan_tag_reset(page + i);
++ page_kasan_tag_reset(nth_page(page, i));
+ }
+
+ if (ret && !no_warn) {
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 38c8d216c6a3bf..61c741f11e9bb3 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -2225,7 +2225,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
+
+ /* Direct compactor: Is a suitable page free? */
+ ret = COMPACT_NO_SUITABLE_PAGE;
+- for (order = cc->order; order <= MAX_ORDER; order++) {
++ for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
+ struct free_area *area = &cc->zone->free_area[order];
+ bool can_steal;
+
+@@ -2684,16 +2684,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
+ unsigned int alloc_flags, const struct alloc_context *ac,
+ enum compact_priority prio, struct page **capture)
+ {
+- int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
+ struct zoneref *z;
+ struct zone *zone;
+ enum compact_result rc = COMPACT_SKIPPED;
+
+- /*
+- * Check if the GFP flags allow compaction - GFP_NOIO is really
+- * tricky context because the migration might require IO
+- */
+- if (!may_perform_io)
++ if (!gfp_compaction_allowed(gfp_mask))
+ return COMPACT_SKIPPED;
+
+ trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index bcd2bd9d6c104f..ae55f20835b069 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -423,12 +423,16 @@ struct damon_ctx *damon_new_ctx(void)
+ if (!ctx)
+ return NULL;
+
++ init_completion(&ctx->kdamond_started);
++
+ ctx->attrs.sample_interval = 5 * 1000;
+ ctx->attrs.aggr_interval = 100 * 1000;
+ ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
+
+- ktime_get_coarse_ts64(&ctx->last_aggregation);
+- ctx->last_ops_update = ctx->last_aggregation;
++ ctx->passed_sample_intervals = 0;
++ /* These will be set from kdamond_init_intervals_sis() */
++ ctx->next_aggregation_sis = 0;
++ ctx->next_ops_update_sis = 0;
+
+ mutex_init(&ctx->kdamond_lock);
+
+@@ -476,20 +480,14 @@ static unsigned int damon_age_for_new_attrs(unsigned int age,
+ static unsigned int damon_accesses_bp_to_nr_accesses(
+ unsigned int accesses_bp, struct damon_attrs *attrs)
+ {
+- unsigned int max_nr_accesses =
+- attrs->aggr_interval / attrs->sample_interval;
+-
+- return accesses_bp * max_nr_accesses / 10000;
++ return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
+ }
+
+ /* convert nr_accesses to access ratio in bp (per 10,000) */
+ static unsigned int damon_nr_accesses_to_accesses_bp(
+ unsigned int nr_accesses, struct damon_attrs *attrs)
+ {
+- unsigned int max_nr_accesses =
+- attrs->aggr_interval / attrs->sample_interval;
+-
+- return nr_accesses * 10000 / max_nr_accesses;
++ return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
+ }
+
+ static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
+@@ -548,6 +546,9 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx,
+ */
+ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
+ {
++ unsigned long sample_interval = attrs->sample_interval ?
++ attrs->sample_interval : 1;
++
+ if (attrs->min_nr_regions < 3)
+ return -EINVAL;
+ if (attrs->min_nr_regions > attrs->max_nr_regions)
+@@ -555,6 +556,11 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
+ if (attrs->sample_interval > attrs->aggr_interval)
+ return -EINVAL;
+
++ ctx->next_aggregation_sis = ctx->passed_sample_intervals +
++ attrs->aggr_interval / sample_interval;
++ ctx->next_ops_update_sis = ctx->passed_sample_intervals +
++ attrs->ops_update_interval / sample_interval;
++
+ damon_update_monitoring_results(ctx, attrs);
+ ctx->attrs = *attrs;
+ return 0;
+@@ -632,11 +638,14 @@ static int __damon_start(struct damon_ctx *ctx)
+ mutex_lock(&ctx->kdamond_lock);
+ if (!ctx->kdamond) {
+ err = 0;
++ reinit_completion(&ctx->kdamond_started);
+ ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
+ nr_running_ctxs);
+ if (IS_ERR(ctx->kdamond)) {
+ err = PTR_ERR(ctx->kdamond);
+ ctx->kdamond = NULL;
++ } else {
++ wait_for_completion(&ctx->kdamond_started);
+ }
+ }
+ mutex_unlock(&ctx->kdamond_lock);
+@@ -699,8 +708,7 @@ static int __damon_stop(struct damon_ctx *ctx)
+ if (tsk) {
+ get_task_struct(tsk);
+ mutex_unlock(&ctx->kdamond_lock);
+- kthread_stop(tsk);
+- put_task_struct(tsk);
++ kthread_stop_put(tsk);
+ return 0;
+ }
+ mutex_unlock(&ctx->kdamond_lock);
+@@ -728,38 +736,6 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
+ return err;
+ }
+
+-/*
+- * damon_check_reset_time_interval() - Check if a time interval is elapsed.
+- * @baseline: the time to check whether the interval has elapsed since
+- * @interval: the time interval (microseconds)
+- *
+- * See whether the given time interval has passed since the given baseline
+- * time. If so, it also updates the baseline to current time for next check.
+- *
+- * Return: true if the time interval has passed, or false otherwise.
+- */
+-static bool damon_check_reset_time_interval(struct timespec64 *baseline,
+- unsigned long interval)
+-{
+- struct timespec64 now;
+-
+- ktime_get_coarse_ts64(&now);
+- if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
+- interval * 1000)
+- return false;
+- *baseline = now;
+- return true;
+-}
+-
+-/*
+- * Check whether it is time to flush the aggregated information
+- */
+-static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
+-{
+- return damon_check_reset_time_interval(&ctx->last_aggregation,
+- ctx->attrs.aggr_interval);
+-}
+-
+ /*
+ * Reset the aggregated monitoring results ('nr_accesses' of each region).
+ */
+@@ -920,7 +896,7 @@ static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
+ matched = true;
+ break;
+ default:
+- break;
++ return false;
+ }
+
+ return matched == filter->matching;
+@@ -1145,14 +1121,31 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
+ * access frequencies are similar. This is for minimizing the monitoring
+ * overhead under the dynamically changeable access pattern. If a merge was
+ * unnecessarily made, later 'kdamond_split_regions()' will revert it.
++ *
++ * The total number of regions could be higher than the user-defined limit,
++ * max_nr_regions for some cases. For example, the user can update
++ * max_nr_regions to a number that lower than the current number of regions
++ * while DAMON is running. For such a case, repeat merging until the limit is
++ * met while increasing @threshold up to possible maximum level.
+ */
+ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
+ unsigned long sz_limit)
+ {
+ struct damon_target *t;
+-
+- damon_for_each_target(t, c)
+- damon_merge_regions_of(t, threshold, sz_limit);
++ unsigned int nr_regions;
++ unsigned int max_thres;
++
++ max_thres = c->attrs.aggr_interval /
++ (c->attrs.sample_interval ? c->attrs.sample_interval : 1);
++ do {
++ nr_regions = 0;
++ damon_for_each_target(t, c) {
++ damon_merge_regions_of(t, threshold, sz_limit);
++ nr_regions += damon_nr_regions(t);
++ }
++ threshold = max(1, threshold * 2);
++ } while (nr_regions > c->attrs.max_nr_regions &&
++ threshold / 2 < max_thres);
+ }
+
+ /*
+@@ -1240,18 +1233,6 @@ static void kdamond_split_regions(struct damon_ctx *ctx)
+ last_nr_regions = nr_regions;
+ }
+
+-/*
+- * Check whether it is time to check and apply the operations-related data
+- * structures.
+- *
+- * Returns true if it is.
+- */
+-static bool kdamond_need_update_operations(struct damon_ctx *ctx)
+-{
+- return damon_check_reset_time_interval(&ctx->last_ops_update,
+- ctx->attrs.ops_update_interval);
+-}
+-
+ /*
+ * Check whether current monitoring should be stopped
+ *
+@@ -1363,6 +1344,17 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
+ return -EBUSY;
+ }
+
++static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
++{
++ unsigned long sample_interval = ctx->attrs.sample_interval ?
++ ctx->attrs.sample_interval : 1;
++
++ ctx->passed_sample_intervals = 0;
++ ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
++ ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
++ sample_interval;
++}
++
+ /*
+ * The monitoring daemon that runs as a kernel thread
+ */
+@@ -1376,6 +1368,9 @@ static int kdamond_fn(void *data)
+
+ pr_debug("kdamond (%d) starts\n", current->pid);
+
++ complete(&ctx->kdamond_started);
++ kdamond_init_intervals_sis(ctx);
++
+ if (ctx->ops.init)
+ ctx->ops.init(ctx);
+ if (ctx->callback.before_start && ctx->callback.before_start(ctx))
+@@ -1384,6 +1379,17 @@ static int kdamond_fn(void *data)
+ sz_limit = damon_region_sz_limit(ctx);
+
+ while (!kdamond_need_stop(ctx)) {
++ /*
++ * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
++ * be changed from after_wmarks_check() or after_aggregation()
++ * callbacks. Read the values here, and use those for this
++ * iteration. That is, damon_set_attrs() updated new values
++ * are respected from next iteration.
++ */
++ unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
++ unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
++ unsigned long sample_interval = ctx->attrs.sample_interval;
++
+ if (kdamond_wait_activation(ctx))
+ break;
+
+@@ -1393,12 +1399,17 @@ static int kdamond_fn(void *data)
+ ctx->callback.after_sampling(ctx))
+ break;
+
+- kdamond_usleep(ctx->attrs.sample_interval);
++ kdamond_usleep(sample_interval);
++ ctx->passed_sample_intervals++;
+
+ if (ctx->ops.check_accesses)
+ max_nr_accesses = ctx->ops.check_accesses(ctx);
+
+- if (kdamond_aggregate_interval_passed(ctx)) {
++ sample_interval = ctx->attrs.sample_interval ?
++ ctx->attrs.sample_interval : 1;
++ if (ctx->passed_sample_intervals == next_aggregation_sis) {
++ ctx->next_aggregation_sis = next_aggregation_sis +
++ ctx->attrs.aggr_interval / sample_interval;
+ kdamond_merge_regions(ctx,
+ max_nr_accesses / 10,
+ sz_limit);
+@@ -1413,7 +1424,10 @@ static int kdamond_fn(void *data)
+ ctx->ops.reset_aggregated(ctx);
+ }
+
+- if (kdamond_need_update_operations(ctx)) {
++ if (ctx->passed_sample_intervals == next_ops_update_sis) {
++ ctx->next_ops_update_sis = next_ops_update_sis +
++ ctx->attrs.ops_update_interval /
++ sample_interval;
+ if (ctx->ops.update)
+ ctx->ops.update(ctx);
+ sz_limit = damon_region_sz_limit(ctx);
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 7b8fce2f67a8d6..e84495ab92cf3b 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -183,9 +183,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
+ return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
+ }
+
++static void damon_lru_sort_copy_quota_status(struct damos_quota *dst,
++ struct damos_quota *src)
++{
++ dst->total_charged_sz = src->total_charged_sz;
++ dst->total_charged_ns = src->total_charged_ns;
++ dst->charged_sz = src->charged_sz;
++ dst->charged_from = src->charged_from;
++ dst->charge_target_from = src->charge_target_from;
++ dst->charge_addr_from = src->charge_addr_from;
++}
++
+ static int damon_lru_sort_apply_parameters(void)
+ {
+- struct damos *scheme;
++ struct damos *scheme, *hot_scheme, *cold_scheme;
++ struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL;
+ unsigned int hot_thres, cold_thres;
+ int err = 0;
+
+@@ -193,20 +205,35 @@ static int damon_lru_sort_apply_parameters(void)
+ if (err)
+ return err;
+
+- /* aggr_interval / sample_interval is the maximum nr_accesses */
+- hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
+- damon_lru_sort_mon_attrs.sample_interval *
++ damon_for_each_scheme(scheme, ctx) {
++ if (!old_hot_scheme) {
++ old_hot_scheme = scheme;
++ continue;
++ }
++ old_cold_scheme = scheme;
++ }
++
++ hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
+ hot_thres_access_freq / 1000;
+- scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+- if (!scheme)
++ hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
++ if (!hot_scheme)
+ return -ENOMEM;
+- damon_set_schemes(ctx, &scheme, 1);
++ if (old_hot_scheme)
++ damon_lru_sort_copy_quota_status(&hot_scheme->quota,
++ &old_hot_scheme->quota);
+
+ cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
+- scheme = damon_lru_sort_new_cold_scheme(cold_thres);
+- if (!scheme)
++ cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
++ if (!cold_scheme) {
++ damon_destroy_scheme(hot_scheme);
+ return -ENOMEM;
+- damon_add_scheme(ctx, scheme);
++ }
++ if (old_cold_scheme)
++ damon_lru_sort_copy_quota_status(&cold_scheme->quota,
++ &old_cold_scheme->quota);
++
++ damon_set_schemes(ctx, &hot_scheme, 1);
++ damon_add_scheme(ctx, cold_scheme);
+
+ return damon_set_region_biggest_system_ram_default(target,
+ &monitor_region_start,
+diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
+index ac1c3fa80f9847..d25d99cb5f2bb9 100644
+--- a/mm/damon/ops-common.c
++++ b/mm/damon/ops-common.c
+@@ -73,7 +73,6 @@ void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr
+ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ struct damos *s)
+ {
+- unsigned int max_nr_accesses;
+ int freq_subscore;
+ unsigned int age_in_sec;
+ int age_in_log, age_subscore;
+@@ -81,8 +80,8 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ unsigned int age_weight = s->quota.weight_age;
+ int hotness;
+
+- max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
+- freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
++ freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE /
++ damon_max_nr_accesses(&c->attrs);
+
+ age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
+ for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
+diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
+index 648d2a85523abe..eca9d000ecc53d 100644
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -148,9 +148,20 @@ static struct damos *damon_reclaim_new_scheme(void)
+ &damon_reclaim_wmarks);
+ }
+
++static void damon_reclaim_copy_quota_status(struct damos_quota *dst,
++ struct damos_quota *src)
++{
++ dst->total_charged_sz = src->total_charged_sz;
++ dst->total_charged_ns = src->total_charged_ns;
++ dst->charged_sz = src->charged_sz;
++ dst->charged_from = src->charged_from;
++ dst->charge_target_from = src->charge_target_from;
++ dst->charge_addr_from = src->charge_addr_from;
++}
++
+ static int damon_reclaim_apply_parameters(void)
+ {
+- struct damos *scheme;
++ struct damos *scheme, *old_scheme;
+ struct damos_filter *filter;
+ int err = 0;
+
+@@ -162,6 +173,11 @@ static int damon_reclaim_apply_parameters(void)
+ scheme = damon_reclaim_new_scheme();
+ if (!scheme)
+ return -ENOMEM;
++ if (!list_empty(&ctx->schemes)) {
++ damon_for_each_scheme(old_scheme, ctx)
++ damon_reclaim_copy_quota_status(&scheme->quota,
++ &old_scheme->quota);
++ }
+ if (skip_anon) {
+ filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
+ if (!filter) {
+diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
+index 527e7d17eb3b23..36dcd881a19c06 100644
+--- a/mm/damon/sysfs-schemes.c
++++ b/mm/damon/sysfs-schemes.c
+@@ -126,6 +126,9 @@ damon_sysfs_scheme_regions_alloc(void)
+ struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions),
+ GFP_KERNEL);
+
++ if (!regions)
++ return NULL;
++
+ regions->kobj = (struct kobject){};
+ INIT_LIST_HEAD(&regions->regions_list);
+ regions->nr_regions = 0;
+@@ -1752,6 +1755,8 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
+ return 0;
+
+ region = damon_sysfs_scheme_region_alloc(r);
++ if (!region)
++ return 0;
+ list_add_tail(&region->list, &sysfs_regions->regions_list);
+ sysfs_regions->nr_regions++;
+ if (kobject_init_and_add(&region->kobj,
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index f60e56150feb69..b317f51dcc9876 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1150,58 +1150,75 @@ static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
+ return err;
+ }
+
+-/*
+- * Search a target in a context that corresponds to the sysfs target input.
+- *
+- * Return: pointer to the target if found, NULL if not found, or negative
+- * error code if the search failed.
+- */
+-static struct damon_target *damon_sysfs_existing_target(
+- struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
++static int damon_sysfs_update_target_pid(struct damon_target *target, int pid)
+ {
+- struct pid *pid;
+- struct damon_target *t;
++ struct pid *pid_new;
+
+- if (!damon_target_has_pid(ctx)) {
+- /* Up to only one target for paddr could exist */
+- damon_for_each_target(t, ctx)
+- return t;
+- return NULL;
++ pid_new = find_get_pid(pid);
++ if (!pid_new)
++ return -EINVAL;
++
++ if (pid_new == target->pid) {
++ put_pid(pid_new);
++ return 0;
+ }
+
+- /* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
+- pid = find_get_pid(sys_target->pid);
+- if (!pid)
+- return ERR_PTR(-EINVAL);
+- damon_for_each_target(t, ctx) {
+- if (t->pid == pid) {
+- put_pid(pid);
+- return t;
+- }
++ put_pid(target->pid);
++ target->pid = pid_new;
++ return 0;
++}
++
++static int damon_sysfs_update_target(struct damon_target *target,
++ struct damon_ctx *ctx,
++ struct damon_sysfs_target *sys_target)
++{
++ int err = 0;
++
++ if (damon_target_has_pid(ctx)) {
++ err = damon_sysfs_update_target_pid(target, sys_target->pid);
++ if (err)
++ return err;
+ }
+- put_pid(pid);
+- return NULL;
++
++ /*
++ * Do monitoring target region boundary update only if one or more
++ * regions are set by the user. This is for keeping current monitoring
++ * target results and range easier, especially for dynamic monitoring
++ * target regions update ops like 'vaddr'.
++ */
++ if (sys_target->regions->nr)
++ err = damon_sysfs_set_regions(target, sys_target->regions);
++ return err;
+ }
+
+ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
+ struct damon_sysfs_targets *sysfs_targets)
+ {
+- int i, err;
++ struct damon_target *t, *next;
++ int i = 0, err;
+
+ /* Multiple physical address space monitoring targets makes no sense */
+ if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
+ return -EINVAL;
+
+- for (i = 0; i < sysfs_targets->nr; i++) {
++ damon_for_each_target_safe(t, next, ctx) {
++ if (i < sysfs_targets->nr) {
++ err = damon_sysfs_update_target(t, ctx,
++ sysfs_targets->targets_arr[i]);
++ if (err)
++ return err;
++ } else {
++ if (damon_target_has_pid(ctx))
++ put_pid(t->pid);
++ damon_destroy_target(t);
++ }
++ i++;
++ }
++
++ for (; i < sysfs_targets->nr; i++) {
+ struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
+- struct damon_target *t = damon_sysfs_existing_target(st, ctx);
+-
+- if (IS_ERR(t))
+- return PTR_ERR(t);
+- if (!t)
+- err = damon_sysfs_add_target(st, ctx);
+- else
+- err = damon_sysfs_set_regions(t, st->regions);
++
++ err = damon_sysfs_add_target(st, ctx);
+ if (err)
+ return err;
+ }
+diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
+index cf8a9fc5c9d1a6..530f01fedd3554 100644
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -126,6 +126,7 @@ static int __damon_va_three_regions(struct mm_struct *mm,
+ * If this is too slow, it can be optimised to examine the maple
+ * tree gaps.
+ */
++ rcu_read_lock();
+ for_each_vma(vmi, vma) {
+ unsigned long gap;
+
+@@ -146,6 +147,7 @@ static int __damon_va_three_regions(struct mm_struct *mm,
+ next:
+ prev = vma;
+ }
++ rcu_read_unlock();
+
+ if (!sz_range(&second_gap) || !sz_range(&first_gap))
+ return -EINVAL;
+diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
+index 48e329ea5ba37c..68af76ca8bc992 100644
+--- a/mm/debug_vm_pgtable.c
++++ b/mm/debug_vm_pgtable.c
+@@ -39,22 +39,7 @@
+ * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
+ * expectations that are being validated here. All future changes in here
+ * or the documentation need to be in sync.
+- *
+- * On s390 platform, the lower 4 bits are used to identify given page table
+- * entry type. But these bits might affect the ability to clear entries with
+- * pxx_clear() because of how dynamic page table folding works on s390. So
+- * while loading up the entries do not change the lower 4 bits. It does not
+- * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
+- * used to mark a pte entry.
+ */
+-#define S390_SKIP_MASK GENMASK(3, 0)
+-#if __BITS_PER_LONG == 64
+-#define PPC64_SKIP_MASK GENMASK(62, 62)
+-#else
+-#define PPC64_SKIP_MASK 0x0
+-#endif
+-#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
+-#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
+ #define RANDOM_NZVALUE GENMASK(7, 0)
+
+ struct pgtable_debug_args {
+@@ -362,6 +347,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
+ vaddr &= HPAGE_PUD_MASK;
+
+ pud = pfn_pud(args->pud_pfn, args->page_prot);
++ /*
++ * Some architectures have debug checks to make sure
++ * huge pud mapping are only found with devmap entries
++ * For now test with only devmap entries.
++ */
++ pud = pud_mkdevmap(pud);
+ set_pud_at(args->mm, vaddr, args->pudp, pud);
+ flush_dcache_page(page);
+ pudp_set_wrprotect(args->mm, vaddr, args->pudp);
+@@ -374,6 +365,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
+ WARN_ON(!pud_none(pud));
+ #endif /* __PAGETABLE_PMD_FOLDED */
+ pud = pfn_pud(args->pud_pfn, args->page_prot);
++ pud = pud_mkdevmap(pud);
+ pud = pud_wrprotect(pud);
+ pud = pud_mkclean(pud);
+ set_pud_at(args->mm, vaddr, args->pudp, pud);
+@@ -391,6 +383,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
+ #endif /* __PAGETABLE_PMD_FOLDED */
+
+ pud = pfn_pud(args->pud_pfn, args->page_prot);
++ pud = pud_mkdevmap(pud);
+ pud = pud_mkyoung(pud);
+ set_pud_at(args->mm, vaddr, args->pudp, pud);
+ flush_dcache_page(page);
+@@ -502,8 +495,7 @@ static void __init pud_clear_tests(struct pgtable_debug_args *args)
+ return;
+
+ pr_debug("Validating PUD clear\n");
+- pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
+- WRITE_ONCE(*args->pudp, pud);
++ WARN_ON(pud_none(pud));
+ pud_clear(args->pudp);
+ pud = READ_ONCE(*args->pudp);
+ WARN_ON(!pud_none(pud));
+@@ -539,8 +531,7 @@ static void __init p4d_clear_tests(struct pgtable_debug_args *args)
+ return;
+
+ pr_debug("Validating P4D clear\n");
+- p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
+- WRITE_ONCE(*args->p4dp, p4d);
++ WARN_ON(p4d_none(p4d));
+ p4d_clear(args->p4dp);
+ p4d = READ_ONCE(*args->p4dp);
+ WARN_ON(!p4d_none(p4d));
+@@ -573,8 +564,7 @@ static void __init pgd_clear_tests(struct pgtable_debug_args *args)
+ return;
+
+ pr_debug("Validating PGD clear\n");
+- pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
+- WRITE_ONCE(*args->pgdp, pgd);
++ WARN_ON(pgd_none(pgd));
+ pgd_clear(args->pgdp);
+ pgd = READ_ONCE(*args->pgdp);
+ WARN_ON(!pgd_none(pgd));
+@@ -625,10 +615,8 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args)
+ if (WARN_ON(!args->ptep))
+ return;
+
+-#ifndef CONFIG_RISCV
+- pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
+-#endif
+ set_pte_at(args->mm, args->vaddr, args->ptep, pte);
++ WARN_ON(pte_none(pte));
+ flush_dcache_page(page);
+ barrier();
+ ptep_clear(args->mm, args->vaddr, args->ptep);
+@@ -641,8 +629,7 @@ static void __init pmd_clear_tests(struct pgtable_debug_args *args)
+ pmd_t pmd = READ_ONCE(*args->pmdp);
+
+ pr_debug("Validating PMD clear\n");
+- pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
+- WRITE_ONCE(*args->pmdp, pmd);
++ WARN_ON(pmd_none(pmd));
+ pmd_clear(args->pmdp);
+ pmd = READ_ONCE(*args->pmdp);
+ WARN_ON(!pmd_none(pmd));
+diff --git a/mm/filemap.c b/mm/filemap.c
+index f0a15ce1bd1ba1..e6c112f3a211fe 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -846,6 +846,8 @@ noinline int __filemap_add_folio(struct address_space *mapping,
+ {
+ XA_STATE(xas, &mapping->i_pages, index);
+ int huge = folio_test_hugetlb(folio);
++ void *alloced_shadow = NULL;
++ int alloced_order = 0;
+ bool charged = false;
+ long nr = 1;
+
+@@ -868,13 +870,10 @@ noinline int __filemap_add_folio(struct address_space *mapping,
+ folio->mapping = mapping;
+ folio->index = xas.xa_index;
+
+- do {
+- unsigned int order = xa_get_order(xas.xa, xas.xa_index);
++ for (;;) {
++ int order = -1, split_order = 0;
+ void *entry, *old = NULL;
+
+- if (order > folio_order(folio))
+- xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
+- order, gfp);
+ xas_lock_irq(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ old = entry;
+@@ -882,19 +881,33 @@ noinline int __filemap_add_folio(struct address_space *mapping,
+ xas_set_err(&xas, -EEXIST);
+ goto unlock;
+ }
++ /*
++ * If a larger entry exists,
++ * it will be the first and only entry iterated.
++ */
++ if (order == -1)
++ order = xas_get_order(&xas);
++ }
++
++ /* entry may have changed before we re-acquire the lock */
++ if (alloced_order && (old != alloced_shadow || order != alloced_order)) {
++ xas_destroy(&xas);
++ alloced_order = 0;
+ }
+
+ if (old) {
+- if (shadowp)
+- *shadowp = old;
+- /* entry may have been split before we acquired lock */
+- order = xa_get_order(xas.xa, xas.xa_index);
+- if (order > folio_order(folio)) {
++ if (order > 0 && order > folio_order(folio)) {
+ /* How to handle large swap entries? */
+ BUG_ON(shmem_mapping(mapping));
++ if (!alloced_order) {
++ split_order = order;
++ goto unlock;
++ }
+ xas_split(&xas, old, order);
+ xas_reset(&xas);
+ }
++ if (shadowp)
++ *shadowp = old;
+ }
+
+ xas_store(&xas, folio);
+@@ -910,9 +923,24 @@ noinline int __filemap_add_folio(struct address_space *mapping,
+ __lruvec_stat_mod_folio(folio,
+ NR_FILE_THPS, nr);
+ }
++
+ unlock:
+ xas_unlock_irq(&xas);
+- } while (xas_nomem(&xas, gfp));
++
++ /* split needed, alloc here and retry. */
++ if (split_order) {
++ xas_split_alloc(&xas, old, split_order, gfp);
++ if (xas_error(&xas))
++ goto error;
++ alloced_shadow = old;
++ alloced_order = split_order;
++ xas_reset(&xas);
++ continue;
++ }
++
++ if (!xas_nomem(&xas, gfp))
++ break;
++ }
+
+ if (xas_error(&xas))
+ goto error;
+@@ -1831,7 +1859,7 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
+ if (!folio || xa_is_value(folio))
+ goto out;
+
+- if (!folio_try_get_rcu(folio))
++ if (!folio_try_get(folio))
+ goto repeat;
+
+ if (unlikely(folio != xas_reload(&xas))) {
+@@ -1987,7 +2015,7 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
+ if (!folio || xa_is_value(folio))
+ return folio;
+
+- if (!folio_try_get_rcu(folio))
++ if (!folio_try_get(folio))
+ goto reset;
+
+ if (unlikely(folio != xas_reload(xas))) {
+@@ -2205,7 +2233,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
+ if (xa_is_value(folio))
+ goto update_start;
+
+- if (!folio_try_get_rcu(folio))
++ if (!folio_try_get(folio))
+ goto retry;
+
+ if (unlikely(folio != xas_reload(&xas)))
+@@ -2340,7 +2368,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
+ break;
+ if (xa_is_sibling(folio))
+ break;
+- if (!folio_try_get_rcu(folio))
++ if (!folio_try_get(folio))
+ goto retry;
+
+ if (unlikely(folio != xas_reload(&xas)))
+@@ -2666,6 +2694,15 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
+ goto put_folios;
+ end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
+
++ /*
++ * Pairs with a barrier in
++ * block_write_end()->mark_buffer_dirty() or other page
++ * dirtying routines like iomap_write_end() to ensure
++ * changes to page contents are visible before we see
++ * increased inode size.
++ */
++ smp_rmb();
++
+ /*
+ * Once we start copying data, we don't want to be touching any
+ * cachelines that might be contended:
+@@ -3148,7 +3185,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /* Use the readahead code, even if readahead is disabled */
+- if (vm_flags & VM_HUGEPAGE) {
++ if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
+ ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
+ ra->size = HPAGE_PMD_NR;
+@@ -3422,7 +3459,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
+ }
+ }
+
+- if (pmd_none(*vmf->pmd))
++ if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
+ pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
+
+ return false;
+@@ -3443,7 +3480,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
+ continue;
+ if (folio_test_locked(folio))
+ continue;
+- if (!folio_try_get_rcu(folio))
++ if (!folio_try_get(folio))
+ continue;
+ /* Has the page moved or been split? */
+ if (unlikely(folio != xas_reload(xas)))
+@@ -4150,28 +4187,40 @@ static void filemap_cachestat(struct address_space *mapping,
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, last_index) {
++ int order;
+ unsigned long nr_pages;
+ pgoff_t folio_first_index, folio_last_index;
+
++ /*
++ * Don't deref the folio. It is not pinned, and might
++ * get freed (and reused) underneath us.
++ *
++ * We *could* pin it, but that would be expensive for
++ * what should be a fast and lightweight syscall.
++ *
++ * Instead, derive all information of interest from
++ * the rcu-protected xarray.
++ */
++
+ if (xas_retry(&xas, folio))
+ continue;
+
++ order = xa_get_order(xas.xa, xas.xa_index);
++ nr_pages = 1 << order;
++ folio_first_index = round_down(xas.xa_index, 1 << order);
++ folio_last_index = folio_first_index + nr_pages - 1;
++
++ /* Folios might straddle the range boundaries, only count covered pages */
++ if (folio_first_index < first_index)
++ nr_pages -= first_index - folio_first_index;
++
++ if (folio_last_index > last_index)
++ nr_pages -= folio_last_index - last_index;
++
+ if (xa_is_value(folio)) {
+ /* page is evicted */
+ void *shadow = (void *)folio;
+ bool workingset; /* not used */
+- int order = xa_get_order(xas.xa, xas.xa_index);
+-
+- nr_pages = 1 << order;
+- folio_first_index = round_down(xas.xa_index, 1 << order);
+- folio_last_index = folio_first_index + nr_pages - 1;
+-
+- /* Folios might straddle the range boundaries, only count covered pages */
+- if (folio_first_index < first_index)
+- nr_pages -= first_index - folio_first_index;
+-
+- if (folio_last_index > last_index)
+- nr_pages -= folio_last_index - last_index;
+
+ cs->nr_evicted += nr_pages;
+
+@@ -4180,7 +4229,23 @@ static void filemap_cachestat(struct address_space *mapping,
+ /* shmem file - in swap cache */
+ swp_entry_t swp = radix_to_swp_entry(folio);
+
++ /* swapin error results in poisoned entry */
++ if (non_swap_entry(swp))
++ goto resched;
++
++ /*
++ * Getting a swap entry from the shmem
++ * inode means we beat
++ * shmem_unuse(). rcu_read_lock()
++ * ensures swapoff waits for us before
++ * freeing the swapper space. However,
++ * we can race with swapping and
++ * invalidation, so there might not be
++ * a shadow in the swapcache (yet).
++ */
+ shadow = get_shadow_from_swap_cache(swp);
++ if (!shadow)
++ goto resched;
+ }
+ #endif
+ if (workingset_test_recent(shadow, true, &workingset))
+@@ -4189,24 +4254,13 @@ static void filemap_cachestat(struct address_space *mapping,
+ goto resched;
+ }
+
+- nr_pages = folio_nr_pages(folio);
+- folio_first_index = folio_pgoff(folio);
+- folio_last_index = folio_first_index + nr_pages - 1;
+-
+- /* Folios might straddle the range boundaries, only count covered pages */
+- if (folio_first_index < first_index)
+- nr_pages -= first_index - folio_first_index;
+-
+- if (folio_last_index > last_index)
+- nr_pages -= folio_last_index - last_index;
+-
+ /* page is in cache */
+ cs->nr_cache += nr_pages;
+
+- if (folio_test_dirty(folio))
++ if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
+ cs->nr_dirty += nr_pages;
+
+- if (folio_test_writeback(folio))
++ if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
+ cs->nr_writeback += nr_pages;
+
+ resched:
+diff --git a/mm/gup.c b/mm/gup.c
+index 2f8a2d89fde19d..fdd75384160d8d 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -76,7 +76,7 @@ static inline struct folio *try_get_folio(struct page *page, int refs)
+ folio = page_folio(page);
+ if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
+ return NULL;
+- if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
++ if (unlikely(!folio_ref_try_add(folio, refs)))
+ return NULL;
+
+ /*
+@@ -97,95 +97,6 @@ static inline struct folio *try_get_folio(struct page *page, int refs)
+ return folio;
+ }
+
+-/**
+- * try_grab_folio() - Attempt to get or pin a folio.
+- * @page: pointer to page to be grabbed
+- * @refs: the value to (effectively) add to the folio's refcount
+- * @flags: gup flags: these are the FOLL_* flag values.
+- *
+- * "grab" names in this file mean, "look at flags to decide whether to use
+- * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
+- *
+- * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
+- * same time. (That's true throughout the get_user_pages*() and
+- * pin_user_pages*() APIs.) Cases:
+- *
+- * FOLL_GET: folio's refcount will be incremented by @refs.
+- *
+- * FOLL_PIN on large folios: folio's refcount will be incremented by
+- * @refs, and its pincount will be incremented by @refs.
+- *
+- * FOLL_PIN on single-page folios: folio's refcount will be incremented by
+- * @refs * GUP_PIN_COUNTING_BIAS.
+- *
+- * Return: The folio containing @page (with refcount appropriately
+- * incremented) for success, or NULL upon failure. If neither FOLL_GET
+- * nor FOLL_PIN was set, that's considered failure, and furthermore,
+- * a likely bug in the caller, so a warning is also emitted.
+- */
+-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
+-{
+- struct folio *folio;
+-
+- if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
+- return NULL;
+-
+- if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
+- return NULL;
+-
+- if (flags & FOLL_GET)
+- return try_get_folio(page, refs);
+-
+- /* FOLL_PIN is set */
+-
+- /*
+- * Don't take a pin on the zero page - it's not going anywhere
+- * and it is used in a *lot* of places.
+- */
+- if (is_zero_page(page))
+- return page_folio(page);
+-
+- folio = try_get_folio(page, refs);
+- if (!folio)
+- return NULL;
+-
+- /*
+- * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
+- * right zone, so fail and let the caller fall back to the slow
+- * path.
+- */
+- if (unlikely((flags & FOLL_LONGTERM) &&
+- !folio_is_longterm_pinnable(folio))) {
+- if (!put_devmap_managed_page_refs(&folio->page, refs))
+- folio_put_refs(folio, refs);
+- return NULL;
+- }
+-
+- /*
+- * When pinning a large folio, use an exact count to track it.
+- *
+- * However, be sure to *also* increment the normal folio
+- * refcount field at least once, so that the folio really
+- * is pinned. That's why the refcount from the earlier
+- * try_get_folio() is left intact.
+- */
+- if (folio_test_large(folio))
+- atomic_add(refs, &folio->_pincount);
+- else
+- folio_ref_add(folio,
+- refs * (GUP_PIN_COUNTING_BIAS - 1));
+- /*
+- * Adjust the pincount before re-checking the PTE for changes.
+- * This is essentially a smp_mb() and is paired with a memory
+- * barrier in page_try_share_anon_rmap().
+- */
+- smp_mb__after_atomic();
+-
+- node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
+-
+- return folio;
+-}
+-
+ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
+ {
+ if (flags & FOLL_PIN) {
+@@ -203,58 +114,59 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
+ }
+
+ /**
+- * try_grab_page() - elevate a page's refcount by a flag-dependent amount
+- * @page: pointer to page to be grabbed
+- * @flags: gup flags: these are the FOLL_* flag values.
++ * try_grab_folio() - add a folio's refcount by a flag-dependent amount
++ * @folio: pointer to folio to be grabbed
++ * @refs: the value to (effectively) add to the folio's refcount
++ * @flags: gup flags: these are the FOLL_* flag values
+ *
+ * This might not do anything at all, depending on the flags argument.
+ *
+ * "grab" names in this file mean, "look at flags to decide whether to use
+- * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
++ * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
+ *
+ * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
+- * time. Cases: please see the try_grab_folio() documentation, with
+- * "refs=1".
++ * time.
+ *
+ * Return: 0 for success, or if no action was required (if neither FOLL_PIN
+ * nor FOLL_GET was set, nothing is done). A negative error code for failure:
+ *
+- * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not
++ * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not
+ * be grabbed.
++ *
++ * It is called when we have a stable reference for the folio, typically in
++ * GUP slow path.
+ */
+-int __must_check try_grab_page(struct page *page, unsigned int flags)
++int __must_check try_grab_folio(struct folio *folio, int refs,
++ unsigned int flags)
+ {
+- struct folio *folio = page_folio(page);
+-
+ if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
+ return -ENOMEM;
+
+- if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
++ if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page)))
+ return -EREMOTEIO;
+
+ if (flags & FOLL_GET)
+- folio_ref_inc(folio);
++ folio_ref_add(folio, refs);
+ else if (flags & FOLL_PIN) {
+ /*
+ * Don't take a pin on the zero page - it's not going anywhere
+ * and it is used in a *lot* of places.
+ */
+- if (is_zero_page(page))
++ if (is_zero_folio(folio))
+ return 0;
+
+ /*
+- * Similar to try_grab_folio(): be sure to *also*
+- * increment the normal page refcount field at least once,
++ * Increment the normal page refcount field at least once,
+ * so that the page really is pinned.
+ */
+ if (folio_test_large(folio)) {
+- folio_ref_add(folio, 1);
+- atomic_add(1, &folio->_pincount);
++ folio_ref_add(folio, refs);
++ atomic_add(refs, &folio->_pincount);
+ } else {
+- folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
++ folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS);
+ }
+
+- node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
++ node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
+ }
+
+ return 0;
+@@ -647,8 +559,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
+ VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
+ !PageAnonExclusive(page), page);
+
+- /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
+- ret = try_grab_page(page, flags);
++ /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
++ ret = try_grab_folio(page_folio(page), 1, flags);
+ if (unlikely(ret)) {
+ page = ERR_PTR(ret);
+ goto out;
+@@ -899,7 +811,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
+ goto unmap;
+ *page = pte_page(entry);
+ }
+- ret = try_grab_page(*page, gup_flags);
++ ret = try_grab_folio(page_folio(*page), 1, gup_flags);
+ if (unlikely(ret))
+ goto unmap;
+ out:
+@@ -1204,6 +1116,22 @@ static long __get_user_pages(struct mm_struct *mm,
+
+ /* first iteration or cross vma bound */
+ if (!vma || start >= vma->vm_end) {
++ /*
++ * MADV_POPULATE_(READ|WRITE) wants to handle VMA
++ * lookups+error reporting differently.
++ */
++ if (gup_flags & FOLL_MADV_POPULATE) {
++ vma = vma_lookup(mm, start);
++ if (!vma) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ if (check_vma_flags(vma, gup_flags)) {
++ ret = -EINVAL;
++ goto out;
++ }
++ goto retry;
++ }
+ vma = gup_vma_lookup(mm, start);
+ if (!vma && in_gate_area(mm, start)) {
+ ret = get_gate_page(mm, start & PAGE_MASK,
+@@ -1286,20 +1214,19 @@ static long __get_user_pages(struct mm_struct *mm,
+ * pages.
+ */
+ if (page_increm > 1) {
+- struct folio *folio;
++ struct folio *folio = page_folio(page);
+
+ /*
+ * Since we already hold refcount on the
+ * large folio, this should never fail.
+ */
+- folio = try_grab_folio(page, page_increm - 1,
+- foll_flags);
+- if (WARN_ON_ONCE(!folio)) {
++ if (try_grab_folio(folio, page_increm - 1,
++ foll_flags)) {
+ /*
+ * Release the 1st page ref if the
+ * folio is problematic, fail hard.
+ */
+- gup_put_folio(page_folio(page), 1,
++ gup_put_folio(folio, 1,
+ foll_flags);
+ ret = -EFAULT;
+ goto out;
+@@ -1670,35 +1597,35 @@ long populate_vma_page_range(struct vm_area_struct *vma,
+ }
+
+ /*
+- * faultin_vma_page_range() - populate (prefault) page tables inside the
+- * given VMA range readable/writable
++ * faultin_page_range() - populate (prefault) page tables inside the
++ * given range readable/writable
+ *
+ * This takes care of mlocking the pages, too, if VM_LOCKED is set.
+ *
+- * @vma: target vma
++ * @mm: the mm to populate page tables in
+ * @start: start address
+ * @end: end address
+ * @write: whether to prefault readable or writable
+ * @locked: whether the mmap_lock is still held
+ *
+- * Returns either number of processed pages in the vma, or a negative error
+- * code on error (see __get_user_pages()).
++ * Returns either number of processed pages in the MM, or a negative error
++ * code on error (see __get_user_pages()). Note that this function reports
++ * errors related to VMAs, such as incompatible mappings, as expected by
++ * MADV_POPULATE_(READ|WRITE).
+ *
+- * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
+- * covered by the VMA. If it's released, *@locked will be set to 0.
++ * The range must be page-aligned.
++ *
++ * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
+ */
+-long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
+- unsigned long end, bool write, int *locked)
++long faultin_page_range(struct mm_struct *mm, unsigned long start,
++ unsigned long end, bool write, int *locked)
+ {
+- struct mm_struct *mm = vma->vm_mm;
+ unsigned long nr_pages = (end - start) / PAGE_SIZE;
+ int gup_flags;
+ long ret;
+
+ VM_BUG_ON(!PAGE_ALIGNED(start));
+ VM_BUG_ON(!PAGE_ALIGNED(end));
+- VM_BUG_ON_VMA(start < vma->vm_start, vma);
+- VM_BUG_ON_VMA(end > vma->vm_end, vma);
+ mmap_assert_locked(mm);
+
+ /*
+@@ -1710,19 +1637,13 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
+ * a poisoned page.
+ * !FOLL_FORCE: Require proper access permissions.
+ */
+- gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE;
++ gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
++ FOLL_MADV_POPULATE;
+ if (write)
+ gup_flags |= FOLL_WRITE;
+
+- /*
+- * We want to report -EINVAL instead of -EFAULT for any permission
+- * problems or incompatible mappings.
+- */
+- if (check_vma_flags(vma, gup_flags))
+- return -EINVAL;
+-
+- ret = __get_user_pages(mm, start, nr_pages, gup_flags,
+- NULL, locked);
++ ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
++ gup_flags);
+ lru_add_drain();
+ return ret;
+ }
+@@ -2227,12 +2148,11 @@ static bool is_valid_gup_args(struct page **pages, int *locked,
+ /*
+ * These flags not allowed to be specified externally to the gup
+ * interfaces:
+- * - FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
++ * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
+ * - FOLL_REMOTE is internal only and used on follow_page()
+ * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
+ */
+- if (WARN_ON_ONCE(gup_flags & (FOLL_PIN | FOLL_TRIED | FOLL_UNLOCKABLE |
+- FOLL_REMOTE | FOLL_FAST_ONLY)))
++ if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
+ return false;
+
+ gup_flags |= to_set;
+@@ -2532,6 +2452,102 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
+ }
+ }
+
++/**
++ * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
++ * @page: pointer to page to be grabbed
++ * @refs: the value to (effectively) add to the folio's refcount
++ * @flags: gup flags: these are the FOLL_* flag values.
++ *
++ * "grab" names in this file mean, "look at flags to decide whether to use
++ * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
++ *
++ * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
++ * same time. (That's true throughout the get_user_pages*() and
++ * pin_user_pages*() APIs.) Cases:
++ *
++ * FOLL_GET: folio's refcount will be incremented by @refs.
++ *
++ * FOLL_PIN on large folios: folio's refcount will be incremented by
++ * @refs, and its pincount will be incremented by @refs.
++ *
++ * FOLL_PIN on single-page folios: folio's refcount will be incremented by
++ * @refs * GUP_PIN_COUNTING_BIAS.
++ *
++ * Return: The folio containing @page (with refcount appropriately
++ * incremented) for success, or NULL upon failure. If neither FOLL_GET
++ * nor FOLL_PIN was set, that's considered failure, and furthermore,
++ * a likely bug in the caller, so a warning is also emitted.
++ *
++ * It uses add ref unless zero to elevate the folio refcount and must be called
++ * in fast path only.
++ */
++static struct folio *try_grab_folio_fast(struct page *page, int refs,
++ unsigned int flags)
++{
++ struct folio *folio;
++
++ /* Raise warn if it is not called in fast GUP */
++ VM_WARN_ON_ONCE(!irqs_disabled());
++
++ if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
++ return NULL;
++
++ if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
++ return NULL;
++
++ if (flags & FOLL_GET)
++ return try_get_folio(page, refs);
++
++ /* FOLL_PIN is set */
++
++ /*
++ * Don't take a pin on the zero page - it's not going anywhere
++ * and it is used in a *lot* of places.
++ */
++ if (is_zero_page(page))
++ return page_folio(page);
++
++ folio = try_get_folio(page, refs);
++ if (!folio)
++ return NULL;
++
++ /*
++ * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
++ * right zone, so fail and let the caller fall back to the slow
++ * path.
++ */
++ if (unlikely((flags & FOLL_LONGTERM) &&
++ !folio_is_longterm_pinnable(folio))) {
++ if (!put_devmap_managed_page_refs(&folio->page, refs))
++ folio_put_refs(folio, refs);
++ return NULL;
++ }
++
++ /*
++ * When pinning a large folio, use an exact count to track it.
++ *
++ * However, be sure to *also* increment the normal folio
++ * refcount field at least once, so that the folio really
++ * is pinned. That's why the refcount from the earlier
++ * try_get_folio() is left intact.
++ */
++ if (folio_test_large(folio))
++ atomic_add(refs, &folio->_pincount);
++ else
++ folio_ref_add(folio,
++ refs * (GUP_PIN_COUNTING_BIAS - 1));
++ /*
++ * Adjust the pincount before re-checking the PTE for changes.
++ * This is essentially a smp_mb() and is paired with a memory
++ * barrier in folio_try_share_anon_rmap_*().
++ */
++ smp_mb__after_atomic();
++
++ node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
++
++ return folio;
++}
++
+ #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
+ /*
+ * Fast-gup relies on pte change detection to avoid concurrent pgtable
+@@ -2596,7 +2612,7 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+
+- folio = try_grab_folio(page, 1, flags);
++ folio = try_grab_folio_fast(page, 1, flags);
+ if (!folio)
+ goto pte_unmap;
+
+@@ -2690,7 +2706,7 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
+
+ SetPageReferenced(page);
+ pages[*nr] = page;
+- if (unlikely(try_grab_page(page, flags))) {
++ if (unlikely(try_grab_folio(page_folio(page), 1, flags))) {
+ undo_dev_pagemap(nr, nr_start, flags, pages);
+ break;
+ }
+@@ -2799,7 +2815,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
+ refs = record_subpages(page, addr, end, pages + *nr);
+
+- folio = try_grab_folio(page, refs, flags);
++ folio = try_grab_folio_fast(page, refs, flags);
+ if (!folio)
+ return 0;
+
+@@ -2870,7 +2886,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
+ page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
+ refs = record_subpages(page, addr, end, pages + *nr);
+
+- folio = try_grab_folio(page, refs, flags);
++ folio = try_grab_folio_fast(page, refs, flags);
+ if (!folio)
+ return 0;
+
+@@ -2914,7 +2930,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
+ page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
+ refs = record_subpages(page, addr, end, pages + *nr);
+
+- folio = try_grab_folio(page, refs, flags);
++ folio = try_grab_folio_fast(page, refs, flags);
+ if (!folio)
+ return 0;
+
+@@ -2954,7 +2970,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
+ page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+ refs = record_subpages(page, addr, end, pages + *nr);
+
+- folio = try_grab_folio(page, refs, flags);
++ folio = try_grab_folio_fast(page, refs, flags);
+ if (!folio)
+ return 0;
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 064fbd90822b49..9aea11b1477c82 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -37,6 +37,7 @@
+ #include <linux/page_owner.h>
+ #include <linux/sched/sysctl.h>
+ #include <linux/memory-tiers.h>
++#include <linux/compat.h>
+
+ #include <asm/tlb.h>
+ #include <asm/pgalloc.h>
+@@ -601,6 +602,9 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
+ loff_t off_align = round_up(off, size);
+ unsigned long len_pad, ret;
+
++ if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
++ return 0;
++
+ if (off_end <= off_align || (off_end - off_align) < size)
+ return 0;
+
+@@ -1052,7 +1056,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
+ if (!*pgmap)
+ return ERR_PTR(-EFAULT);
+ page = pfn_to_page(pfn);
+- ret = try_grab_page(page, flags);
++ ret = try_grab_folio(page_folio(page), 1, flags);
+ if (ret)
+ page = ERR_PTR(ret);
+
+@@ -1210,7 +1214,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+ return ERR_PTR(-EFAULT);
+ page = pfn_to_page(pfn);
+
+- ret = try_grab_page(page, flags);
++ ret = try_grab_folio(page_folio(page), 1, flags);
+ if (ret)
+ page = ERR_PTR(ret);
+
+@@ -1471,7 +1475,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
+ !PageAnonExclusive(page), page);
+
+- ret = try_grab_page(page, flags);
++ ret = try_grab_folio(page_folio(page), 1, flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+@@ -1500,7 +1504,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ spin_unlock(vmf->ptl);
+- goto out;
++ return 0;
+ }
+
+ pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+@@ -1544,23 +1548,16 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ if (migrated) {
+ flags |= TNF_MIGRATED;
+ page_nid = target_nid;
+- } else {
+- flags |= TNF_MIGRATE_FAIL;
+- vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+- spin_unlock(vmf->ptl);
+- goto out;
+- }
+- goto out_map;
++ task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
++ return 0;
+ }
+
+-out:
+- if (page_nid != NUMA_NO_NODE)
+- task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
+- flags);
+-
+- return 0;
+-
++ flags |= TNF_MIGRATE_FAIL;
++ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
++ if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
++ spin_unlock(vmf->ptl);
++ return 0;
++ }
+ out_map:
+ /* Restore the PMD */
+ pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+@@ -1570,7 +1567,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
+ update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+ spin_unlock(vmf->ptl);
+- goto out;
++
++ if (page_nid != NUMA_NO_NODE)
++ task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
++ return 0;
+ }
+
+ /*
+@@ -2125,32 +2125,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ return __split_huge_zero_page_pmd(vma, haddr, pmd);
+ }
+
+- /*
+- * Up to this point the pmd is present and huge and userland has the
+- * whole access to the hugepage during the split (which happens in
+- * place). If we overwrite the pmd with the not-huge version pointing
+- * to the pte here (which of course we could if all CPUs were bug
+- * free), userland could trigger a small page size TLB miss on the
+- * small sized TLB while the hugepage TLB entry is still established in
+- * the huge TLB. Some CPU doesn't like that.
+- * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
+- * 383 on page 105. Intel should be safe but is also warns that it's
+- * only safe if the permission and cache attributes of the two entries
+- * loaded in the two TLB is identical (which should be the case here).
+- * But it is generally safer to never allow small and huge TLB entries
+- * for the same virtual address to be loaded simultaneously. So instead
+- * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
+- * current pmd notpresent (atomically because here the pmd_trans_huge
+- * must remain set at all times on the pmd until the split is complete
+- * for this pmd), then we flush the SMP TLB and finally we write the
+- * non-huge version of the pmd entry with pmd_populate.
+- */
+- old_pmd = pmdp_invalidate(vma, haddr, pmd);
+-
+- pmd_migration = is_pmd_migration_entry(old_pmd);
++ pmd_migration = is_pmd_migration_entry(*pmd);
+ if (unlikely(pmd_migration)) {
+ swp_entry_t entry;
+
++ old_pmd = *pmd;
+ entry = pmd_to_swp_entry(old_pmd);
+ page = pfn_swap_entry_to_page(entry);
+ write = is_writable_migration_entry(entry);
+@@ -2161,6 +2140,30 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ soft_dirty = pmd_swp_soft_dirty(old_pmd);
+ uffd_wp = pmd_swp_uffd_wp(old_pmd);
+ } else {
++ /*
++ * Up to this point the pmd is present and huge and userland has
++ * the whole access to the hugepage during the split (which
++ * happens in place). If we overwrite the pmd with the not-huge
++ * version pointing to the pte here (which of course we could if
++ * all CPUs were bug free), userland could trigger a small page
++ * size TLB miss on the small sized TLB while the hugepage TLB
++ * entry is still established in the huge TLB. Some CPU doesn't
++ * like that. See
++ * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
++ * 383 on page 105. Intel should be safe but is also warns that
++ * it's only safe if the permission and cache attributes of the
++ * two entries loaded in the two TLB is identical (which should
++ * be the case here). But it is generally safer to never allow
++ * small and huge TLB entries for the same virtual address to be
++ * loaded simultaneously. So instead of doing "pmd_populate();
++ * flush_pmd_tlb_range();" we first mark the current pmd
++ * notpresent (atomically because here the pmd_trans_huge must
++ * remain set at all times on the pmd until the split is
++ * complete for this pmd), then we flush the SMP TLB and finally
++ * we write the non-huge version of the pmd entry with
++ * pmd_populate.
++ */
++ old_pmd = pmdp_invalidate(vma, haddr, pmd);
+ page = pmd_page(old_pmd);
+ if (pmd_dirty(old_pmd)) {
+ dirty = true;
+@@ -2737,13 +2740,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ int nr = folio_nr_pages(folio);
+
+ xas_split(&xas, folio, folio_order(folio));
+- if (folio_test_swapbacked(folio)) {
+- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
+- -nr);
+- } else {
+- __lruvec_stat_mod_folio(folio, NR_FILE_THPS,
+- -nr);
+- filemap_nr_thps_dec(mapping);
++ if (folio_test_pmd_mappable(folio)) {
++ if (folio_test_swapbacked(folio)) {
++ __lruvec_stat_mod_folio(folio,
++ NR_SHMEM_THPS, -nr);
++ } else {
++ __lruvec_stat_mod_folio(folio,
++ NR_FILE_THPS, -nr);
++ filemap_nr_thps_dec(mapping);
++ }
+ }
+ }
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 1301ba7b2c9a90..0acb04c3e95291 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1189,6 +1189,13 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
+ return (get_vma_private_data(vma) & flag) != 0;
+ }
+
++bool __vma_private_lock(struct vm_area_struct *vma)
++{
++ return !(vma->vm_flags & VM_MAYSHARE) &&
++ get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
++ is_vma_resv_set(vma, HPAGE_RESV_OWNER);
++}
++
+ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
+ {
+ VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+@@ -1623,7 +1630,7 @@ static inline void __clear_hugetlb_destructor(struct hstate *h,
+ {
+ lockdep_assert_held(&hugetlb_lock);
+
+- folio_clear_hugetlb(folio);
++ __folio_clear_hugetlb(folio);
+ }
+
+ /*
+@@ -1710,7 +1717,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
+ h->surplus_huge_pages_node[nid]++;
+ }
+
+- folio_set_hugetlb(folio);
++ __folio_set_hugetlb(folio);
+ folio_change_private(folio, NULL);
+ /*
+ * We have to set hugetlb_vmemmap_optimized again as above
+@@ -1740,8 +1747,6 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
+ static void __update_and_free_hugetlb_folio(struct hstate *h,
+ struct folio *folio)
+ {
+- bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
+-
+ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ return;
+
+@@ -1764,23 +1769,23 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
+ return;
+ }
+
+- /*
+- * Move PageHWPoison flag from head page to the raw error pages,
+- * which makes any healthy subpages reusable.
+- */
+- if (unlikely(folio_test_hwpoison(folio)))
+- folio_clear_hugetlb_hwpoison(folio);
+-
+ /*
+ * If vmemmap pages were allocated above, then we need to clear the
+ * hugetlb destructor under the hugetlb lock.
+ */
+- if (clear_dtor) {
++ if (folio_test_hugetlb(folio)) {
+ spin_lock_irq(&hugetlb_lock);
+ __clear_hugetlb_destructor(h, folio);
+ spin_unlock_irq(&hugetlb_lock);
+ }
+
++ /*
++ * Move PageHWPoison flag from head page to the raw error pages,
++ * which makes any healthy subpages reusable.
++ */
++ if (unlikely(folio_test_hwpoison(folio)))
++ folio_clear_hugetlb_hwpoison(folio);
++
+ /*
+ * Non-gigantic pages demoted from CMA allocated gigantic pages
+ * need to be given back to CMA in free_gigantic_folio.
+@@ -1964,7 +1969,7 @@ static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
+ {
+ hugetlb_vmemmap_optimize(h, &folio->page);
+ INIT_LIST_HEAD(&folio->lru);
+- folio_set_hugetlb(folio);
++ __folio_set_hugetlb(folio);
+ hugetlb_set_folio_subpool(folio, NULL);
+ set_hugetlb_cgroup(folio, NULL);
+ set_hugetlb_cgroup_rsvd(folio, NULL);
+@@ -2067,22 +2072,6 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
+ return __prep_compound_gigantic_folio(folio, order, true);
+ }
+
+-/*
+- * PageHuge() only returns true for hugetlbfs pages, but not for normal or
+- * transparent huge pages. See the PageTransHuge() documentation for more
+- * details.
+- */
+-int PageHuge(struct page *page)
+-{
+- struct folio *folio;
+-
+- if (!PageCompound(page))
+- return 0;
+- folio = page_folio(page);
+- return folio_test_hugetlb(folio);
+-}
+-EXPORT_SYMBOL_GPL(PageHuge);
+-
+ /*
+ * Find and lock address space (mapping) in write mode.
+ *
+@@ -2529,6 +2518,23 @@ struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *v
+ return folio;
+ }
+
++static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
++{
++#ifdef CONFIG_NUMA
++ struct mempolicy *mpol = get_task_policy(current);
++
++ /*
++ * Only enforce MPOL_BIND policy which overlaps with cpuset policy
++ * (from policy_nodemask) specifically for hugetlb case
++ */
++ if (mpol->mode == MPOL_BIND &&
++ (apply_policy_zone(mpol, gfp_zone(gfp)) &&
++ cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
++ return &mpol->nodes;
++#endif
++ return NULL;
++}
++
+ /*
+ * Increase the hugetlb pool such that it can accommodate a reservation
+ * of size 'delta'.
+@@ -2542,6 +2548,8 @@ static int gather_surplus_pages(struct hstate *h, long delta)
+ long i;
+ long needed, allocated;
+ bool alloc_ok = true;
++ int node;
++ nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
+
+ lockdep_assert_held(&hugetlb_lock);
+ needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
+@@ -2556,8 +2564,15 @@ static int gather_surplus_pages(struct hstate *h, long delta)
+ retry:
+ spin_unlock_irq(&hugetlb_lock);
+ for (i = 0; i < needed; i++) {
+- folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
+- NUMA_NO_NODE, NULL);
++ folio = NULL;
++ for_each_node_mask(node, cpuset_current_mems_allowed) {
++ if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) {
++ folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
++ node, NULL);
++ if (folio)
++ break;
++ }
++ }
+ if (!folio) {
+ alloc_ok = false;
+ break;
+@@ -3146,9 +3161,12 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+
+ rsv_adjust = hugepage_subpool_put_pages(spool, 1);
+ hugetlb_acct_memory(h, -rsv_adjust);
+- if (deferred_reserve)
++ if (deferred_reserve) {
++ spin_lock_irq(&hugetlb_lock);
+ hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
+ pages_per_huge_page(h), folio);
++ spin_unlock_irq(&hugetlb_lock);
++ }
+ }
+ return folio;
+
+@@ -4316,7 +4334,7 @@ void __init hugetlb_add_hstate(unsigned int order)
+ BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
+ BUG_ON(order == 0);
+ h = &hstates[hugetlb_max_hstate++];
+- mutex_init(&h->resize_lock);
++ __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
+ h->order = order;
+ h->mask = ~(huge_page_size(h) - 1);
+ for (i = 0; i < MAX_NUMNODES; ++i)
+@@ -4539,23 +4557,6 @@ static int __init default_hugepagesz_setup(char *s)
+ }
+ __setup("default_hugepagesz=", default_hugepagesz_setup);
+
+-static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
+-{
+-#ifdef CONFIG_NUMA
+- struct mempolicy *mpol = get_task_policy(current);
+-
+- /*
+- * Only enforce MPOL_BIND policy which overlaps with cpuset policy
+- * (from policy_nodemask) specifically for hugetlb case
+- */
+- if (mpol->mode == MPOL_BIND &&
+- (apply_policy_zone(mpol, gfp_zone(gfp)) &&
+- cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
+- return &mpol->nodes;
+-#endif
+- return NULL;
+-}
+-
+ static unsigned int allowed_mems_nr(struct hstate *h)
+ {
+ int node;
+@@ -6520,7 +6521,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ }
+ }
+
+- page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
++ page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
+
+ /*
+ * Note that page may be a sub-page, and with vmemmap
+@@ -6531,7 +6532,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ * try_grab_page() should always be able to get the page here,
+ * because we hold the ptl lock and have verified pte_present().
+ */
+- ret = try_grab_page(page, flags);
++ ret = try_grab_folio(page_folio(page), 1, flags);
+
+ if (WARN_ON_ONCE(ret)) {
+ page = ERR_PTR(ret);
+@@ -6646,9 +6647,13 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
+ if (!pte_same(pte, newpte))
+ set_huge_pte_at(mm, address, ptep, newpte, psize);
+ } else if (unlikely(is_pte_marker(pte))) {
+- /* No other markers apply for now. */
+- WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
+- if (uffd_wp_resolve)
++ /*
++ * Do nothing on a poison marker; page is
++ * corrupted, permissons do not apply. Here
++ * pte_marker_uffd_wp()==true implies !poison
++ * because they're mutual exclusive.
++ */
++ if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
+ /* Safe to modify directly (non-present->none). */
+ huge_pte_clear(mm, address, ptep, psize);
+ } else if (!huge_pte_none(pte)) {
+@@ -7468,9 +7473,9 @@ void __init hugetlb_cma_reserve(int order)
+ * huge page demotion.
+ */
+ res = cma_declare_contiguous_nid(0, size, 0,
+- PAGE_SIZE << HUGETLB_PAGE_ORDER,
+- 0, false, name,
+- &hugetlb_cma[nid], nid);
++ PAGE_SIZE << HUGETLB_PAGE_ORDER,
++ HUGETLB_PAGE_ORDER, false, name,
++ &hugetlb_cma[nid], nid);
+ if (res) {
+ pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
+ res, nid);
+diff --git a/mm/internal.h b/mm/internal.h
+index 30cf724ddbce33..ef8d787a510c5c 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -581,9 +581,8 @@ struct anon_vma *folio_anon_vma(struct folio *folio);
+ void unmap_mapping_folio(struct folio *folio);
+ extern long populate_vma_page_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, int *locked);
+-extern long faultin_vma_page_range(struct vm_area_struct *vma,
+- unsigned long start, unsigned long end,
+- bool write, int *locked);
++extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
++ unsigned long end, bool write, int *locked);
+ extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
+ unsigned long bytes);
+ /*
+@@ -939,8 +938,8 @@ int migrate_device_coherent_page(struct page *page);
+ /*
+ * mm/gup.c
+ */
+-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
+-int __must_check try_grab_page(struct page *page, unsigned int flags);
++int __must_check try_grab_folio(struct folio *folio, int refs,
++ unsigned int flags);
+
+ /*
+ * mm/huge_memory.c
+@@ -962,8 +961,14 @@ enum {
+ FOLL_FAST_ONLY = 1 << 20,
+ /* allow unlocking the mmap lock */
+ FOLL_UNLOCKABLE = 1 << 21,
++ /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
++ FOLL_MADV_POPULATE = 1 << 22,
+ };
+
++#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
++ FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
++ FOLL_MADV_POPULATE)
++
+ /*
+ * Indicates for which pages that are write-protected in the page table,
+ * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
+diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
+index b61cc6a42541ad..0119075d2e58e8 100644
+--- a/mm/kasan/kasan_test.c
++++ b/mm/kasan/kasan_test.c
+@@ -450,7 +450,8 @@ static void kmalloc_oob_16(struct kunit *test)
+ /* This test is specifically crafted for the generic mode. */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+
+- ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
++ /* RELOC_HIDE to prevent gcc from warning about short alloc */
++ ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+
+ ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
+diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
+index 3adb4c1d3b1937..38a3bff23e8d00 100644
+--- a/mm/kmsan/core.c
++++ b/mm/kmsan/core.c
+@@ -262,8 +262,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ u32 origin, bool checked)
+ {
+ u64 address = (u64)addr;
+- void *shadow_start;
+- u32 *origin_start;
++ u32 *shadow_start, *origin_start;
+ size_t pad = 0;
+
+ KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
+@@ -291,8 +290,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ origin_start =
+ (u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
+
+- for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
+- origin_start[i] = origin;
++ /*
++ * If the new origin is non-zero, assume that the shadow byte is also non-zero,
++ * and unconditionally overwrite the old origin slot.
++ * If the new origin is zero, overwrite the old origin slot iff the
++ * corresponding shadow slot is zero.
++ */
++ for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
++ if (origin || !shadow_start[i])
++ origin_start[i] = origin;
++ }
+ }
+
+ struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
+diff --git a/mm/kmsan/init.c b/mm/kmsan/init.c
+index ffedf4dbc49d77..103e2e88ea033f 100644
+--- a/mm/kmsan/init.c
++++ b/mm/kmsan/init.c
+@@ -96,7 +96,7 @@ void __init kmsan_init_shadow(void)
+ struct metadata_page_pair {
+ struct page *shadow, *origin;
+ };
+-static struct metadata_page_pair held_back[MAX_ORDER + 1] __initdata;
++static struct metadata_page_pair held_back[NR_PAGE_ORDERS] __initdata;
+
+ /*
+ * Eager metadata allocation. When the memblock allocator is freeing pages to
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 981af9c72e7a3e..2e4cd681622def 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -282,7 +282,7 @@ static unsigned int zero_checksum __read_mostly;
+ static bool ksm_use_zero_pages __read_mostly;
+
+ /* The number of zero pages which is placed by KSM */
+-unsigned long ksm_zero_pages;
++atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
+
+ #ifdef CONFIG_NUMA
+ /* Zeroed when merging across nodes is not allowed */
+@@ -1242,8 +1242,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
+ * the dirty bit in zero page's PTE is set.
+ */
+ newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
+- ksm_zero_pages++;
+- mm->ksm_zero_pages++;
++ ksm_map_zero_page(mm);
+ /*
+ * We're replacing an anonymous page with a zero page, which is
+ * not anonymous. We need to do proper accounting otherwise we
+@@ -2486,18 +2485,16 @@ static void ksm_do_scan(unsigned int scan_npages)
+ {
+ struct ksm_rmap_item *rmap_item;
+ struct page *page;
+- unsigned int npages = scan_npages;
+
+- while (npages-- && likely(!freezing(current))) {
++ while (scan_npages-- && likely(!freezing(current))) {
+ cond_resched();
+ rmap_item = scan_get_next_rmap_item(&page);
+ if (!rmap_item)
+ return;
+ cmp_and_merge_page(page, rmap_item);
+ put_page(page);
++ ksm_pages_scanned++;
+ }
+-
+- ksm_pages_scanned += scan_npages - npages;
+ }
+
+ static int ksmd_should_run(void)
+@@ -3107,7 +3104,7 @@ static void wait_while_offlining(void)
+ #ifdef CONFIG_PROC_FS
+ long ksm_process_profit(struct mm_struct *mm)
+ {
+- return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
++ return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
+ mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
+ }
+ #endif /* CONFIG_PROC_FS */
+@@ -3386,7 +3383,7 @@ KSM_ATTR_RO(pages_volatile);
+ static ssize_t ksm_zero_pages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+ {
+- return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
++ return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
+ }
+ KSM_ATTR_RO(ksm_zero_pages);
+
+@@ -3395,7 +3392,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
+ {
+ long general_profit;
+
+- general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
++ general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
+ ksm_rmap_items * sizeof(struct ksm_rmap_item);
+
+ return sysfs_emit(buf, "%ld\n", general_profit);
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 4dded5d27e7eaa..98fdb9288a68a8 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -917,27 +917,14 @@ static long madvise_populate(struct vm_area_struct *vma,
+ {
+ const bool write = behavior == MADV_POPULATE_WRITE;
+ struct mm_struct *mm = vma->vm_mm;
+- unsigned long tmp_end;
+ int locked = 1;
+ long pages;
+
+ *prev = vma;
+
+ while (start < end) {
+- /*
+- * We might have temporarily dropped the lock. For example,
+- * our VMA might have been split.
+- */
+- if (!vma || start >= vma->vm_end) {
+- vma = vma_lookup(mm, start);
+- if (!vma)
+- return -ENOMEM;
+- }
+-
+- tmp_end = min_t(unsigned long, end, vma->vm_end);
+ /* Populate (prefault) page tables readable/writable. */
+- pages = faultin_vma_page_range(vma, start, tmp_end, write,
+- &locked);
++ pages = faultin_page_range(mm, start, end, write, &locked);
+ if (!locked) {
+ mmap_read_lock(mm);
+ locked = 1;
+@@ -958,7 +945,7 @@ static long madvise_populate(struct vm_area_struct *vma,
+ pr_warn_once("%s: unhandled return value: %ld\n",
+ __func__, pages);
+ fallthrough;
+- case -ENOMEM:
++ case -ENOMEM: /* No VMA or out of memory. */
+ return -ENOMEM;
+ }
+ }
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 913b2520a9a002..d630f5c2bdb90e 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -180,8 +180,9 @@ static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
+ /*
+ * Address comparison utilities
+ */
+-static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+- phys_addr_t base2, phys_addr_t size2)
++unsigned long __init_memblock
++memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
++ phys_addr_t size2)
+ {
+ return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
+ }
+@@ -2119,6 +2120,9 @@ static void __init memmap_init_reserved_pages(void)
+ start = region->base;
+ end = start + region->size;
+
++ if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES)
++ nid = early_pfn_to_nid(PFN_DOWN(start));
++
+ reserve_bootmem_region(start, end, nid);
+ }
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 5b009b233ab892..110afda740a18a 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2864,7 +2864,8 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+ * Moreover, it should not come from DMA buffer and is not readily
+ * reclaimable. So those GFP bits should be masked off.
+ */
+-#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
++#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
++ __GFP_ACCOUNT | __GFP_NOFAIL)
+
+ /*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+@@ -4879,9 +4880,12 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
+ buf = endp + 1;
+
+ cfd = simple_strtoul(buf, &endp, 10);
+- if ((*endp != ' ') && (*endp != '\0'))
++ if (*endp == '\0')
++ buf = endp;
++ else if (*endp == ' ')
++ buf = endp + 1;
++ else
+ return -EINVAL;
+- buf = endp + 1;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+@@ -5166,11 +5170,28 @@ static struct cftype mem_cgroup_legacy_files[] = {
+
+ #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
+ static DEFINE_IDR(mem_cgroup_idr);
++static DEFINE_SPINLOCK(memcg_idr_lock);
++
++static int mem_cgroup_alloc_id(void)
++{
++ int ret;
++
++ idr_preload(GFP_KERNEL);
++ spin_lock(&memcg_idr_lock);
++ ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
++ GFP_NOWAIT);
++ spin_unlock(&memcg_idr_lock);
++ idr_preload_end();
++ return ret;
++}
+
+ static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
+ {
+ if (memcg->id.id > 0) {
++ spin_lock(&memcg_idr_lock);
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
++ spin_unlock(&memcg_idr_lock);
++
+ memcg->id.id = 0;
+ }
+ }
+@@ -5293,8 +5314,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ if (!memcg)
+ return ERR_PTR(error);
+
+- memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+- 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
++ memcg->id.id = mem_cgroup_alloc_id();
+ if (memcg->id.id < 0) {
+ error = memcg->id.id;
+ goto fail;
+@@ -5429,7 +5449,9 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
+ * publish it here at the end of onlining. This matches the
+ * regular ID destruction during offlining.
+ */
++ spin_lock(&memcg_idr_lock);
+ idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
++ spin_unlock(&memcg_idr_lock);
+
+ return 0;
+ offline_kmem:
+@@ -7612,9 +7634,13 @@ bool mem_cgroup_swap_full(struct folio *folio)
+
+ static int __init setup_swap_account(char *s)
+ {
+- pr_warn_once("The swapaccount= commandline option is deprecated. "
+- "Please report your usecase to linux-mm@kvack.org if you "
+- "depend on this functionality.\n");
++ bool res;
++
++ if (!kstrtobool(s, &res) && !res)
++ pr_warn_once("The swapaccount=0 commandline option is deprecated "
++ "in favor of configuring swap control via cgroupfs. "
++ "Please report your usecase to linux-mm@kvack.org if you "
++ "depend on this functionality.\n");
+ return 1;
+ }
+ __setup("swapaccount=", setup_swap_account);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 4d6e43c88489a0..9018a1162efc9d 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -154,11 +154,23 @@ static int __page_handle_poison(struct page *page)
+ {
+ int ret;
+
+- zone_pcp_disable(page_zone(page));
++ /*
++ * zone_pcp_disable() can't be used here. It will
++ * hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
++ * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
++ * optimization is enabled. This will break current lock dependency
++ * chain and leads to deadlock.
++ * Disabling pcp before dissolving the page was a deterministic
++ * approach because we made sure that those pages cannot end up in any
++ * PCP list. Draining PCP lists expels those pages to the buddy system,
++ * but nothing guarantees that those pages do not get back to a PCP
++ * queue if we need to refill those.
++ */
+ ret = dissolve_free_huge_page(page);
+- if (!ret)
++ if (!ret) {
++ drain_all_pages(page_zone(page));
+ ret = take_page_off_buddy(page);
+- zone_pcp_enable(page_zone(page));
++ }
+
+ return ret;
+ }
+@@ -595,10 +607,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
+ /*
+ * Collect processes when the error hit an anonymous page.
+ */
+-static void collect_procs_anon(struct page *page, struct list_head *to_kill,
+- int force_early)
++static void collect_procs_anon(struct folio *folio, struct page *page,
++ struct list_head *to_kill, int force_early)
+ {
+- struct folio *folio = page_folio(page);
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct anon_vma *av;
+@@ -633,12 +644,12 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
+ /*
+ * Collect processes when the error hit a file mapped page.
+ */
+-static void collect_procs_file(struct page *page, struct list_head *to_kill,
+- int force_early)
++static void collect_procs_file(struct folio *folio, struct page *page,
++ struct list_head *to_kill, int force_early)
+ {
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+- struct address_space *mapping = page->mapping;
++ struct address_space *mapping = folio->mapping;
+ pgoff_t pgoff;
+
+ i_mmap_lock_read(mapping);
+@@ -704,17 +715,17 @@ static void collect_procs_fsdax(struct page *page,
+ /*
+ * Collect the processes who have the corrupted page mapped to kill.
+ */
+-static void collect_procs(struct page *page, struct list_head *tokill,
+- int force_early)
++static void collect_procs(struct folio *folio, struct page *page,
++ struct list_head *tokill, int force_early)
+ {
+- if (!page->mapping)
++ if (!folio->mapping)
+ return;
+ if (unlikely(PageKsm(page)))
+ collect_procs_ksm(page, tokill, force_early);
+ else if (PageAnon(page))
+- collect_procs_anon(page, tokill, force_early);
++ collect_procs_anon(folio, page, tokill, force_early);
+ else
+- collect_procs_file(page, tokill, force_early);
++ collect_procs_file(folio, page, tokill, force_early);
+ }
+
+ struct hwpoison_walk {
+@@ -1182,26 +1193,26 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
+ */
+ static int me_huge_page(struct page_state *ps, struct page *p)
+ {
++ struct folio *folio = page_folio(p);
+ int res;
+- struct page *hpage = compound_head(p);
+ struct address_space *mapping;
+ bool extra_pins = false;
+
+- mapping = page_mapping(hpage);
++ mapping = folio_mapping(folio);
+ if (mapping) {
+- res = truncate_error_page(hpage, page_to_pfn(p), mapping);
++ res = truncate_error_page(&folio->page, page_to_pfn(p), mapping);
+ /* The page is kept in page cache. */
+ extra_pins = true;
+- unlock_page(hpage);
++ folio_unlock(folio);
+ } else {
+- unlock_page(hpage);
++ folio_unlock(folio);
+ /*
+ * migration entry prevents later access on error hugepage,
+ * so we can free and dissolve it into buddy to save healthy
+ * subpages.
+ */
+- put_page(hpage);
+- if (__page_handle_poison(p) >= 0) {
++ folio_put(folio);
++ if (__page_handle_poison(p) > 0) {
+ page_ref_inc(p);
+ res = MF_RECOVERED;
+ } else {
+@@ -1571,7 +1582,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ * This check implies we don't kill processes if their pages
+ * are in the swap cache early. Those are always late kills.
+ */
+- if (!page_mapped(hpage))
++ if (!page_mapped(p))
+ return true;
+
+ if (PageSwapCache(p)) {
+@@ -1602,7 +1613,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ * mapped in dirty form. This has to be done before try_to_unmap,
+ * because ttu takes the rmap data structures down.
+ */
+- collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
++ collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
+
+ if (PageHuge(hpage) && !PageAnon(hpage)) {
+ /*
+@@ -1622,10 +1633,10 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ try_to_unmap(folio, ttu);
+ }
+
+- unmap_success = !page_mapped(hpage);
++ unmap_success = !page_mapped(p);
+ if (!unmap_success)
+ pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
+- pfn, page_mapcount(hpage));
++ pfn, page_mapcount(p));
+
+ /*
+ * try_to_unmap() might put mlocked page in lru cache, so call
+@@ -1705,7 +1716,7 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
+ * mapping being torn down is communicated in siginfo, see
+ * kill_proc()
+ */
+- loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
++ loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
+
+ unmap_mapping_range(mapping, start, size, 0);
+ }
+@@ -1713,20 +1724,23 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
+ kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
+ }
+
++/*
++ * Only dev_pagemap pages get here, such as fsdax when the filesystem
++ * either do not claim or fails to claim a hwpoison event, or devdax.
++ * The fsdax pages are initialized per base page, and the devdax pages
++ * could be initialized either as base pages, or as compound pages with
++ * vmemmap optimization enabled. Devdax is simplistic in its dealing with
++ * hwpoison, such that, if a subpage of a compound page is poisoned,
++ * simply mark the compound head page is by far sufficient.
++ */
+ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
+ struct dev_pagemap *pgmap)
+ {
+- struct page *page = pfn_to_page(pfn);
++ struct folio *folio = pfn_folio(pfn);
+ LIST_HEAD(to_kill);
+ dax_entry_t cookie;
+ int rc = 0;
+
+- /*
+- * Pages instantiated by device-dax (not filesystem-dax)
+- * may be compound pages.
+- */
+- page = compound_head(page);
+-
+ /*
+ * Prevent the inode from being freed while we are interrogating
+ * the address_space, typically this would be handled by
+@@ -1734,11 +1748,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
+ * also prevents changes to the mapping of this pfn until
+ * poison signaling is complete.
+ */
+- cookie = dax_lock_page(page);
++ cookie = dax_lock_folio(folio);
+ if (!cookie)
+ return -EBUSY;
+
+- if (hwpoison_filter(page)) {
++ if (hwpoison_filter(&folio->page)) {
+ rc = -EOPNOTSUPP;
+ goto unlock;
+ }
+@@ -1760,7 +1774,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
+ * Use this flag as an indication that the dax page has been
+ * remapped UC to prevent speculative consumption of poison.
+ */
+- SetPageHWPoison(page);
++ SetPageHWPoison(&folio->page);
+
+ /*
+ * Unlike System-RAM there is no possibility to swap in a
+@@ -1769,11 +1783,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
+ * SIGBUS (i.e. MF_MUST_KILL)
+ */
+ flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+- collect_procs(page, &to_kill, true);
++ collect_procs(folio, &folio->page, &to_kill, true);
+
+- unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
++ unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
+ unlock:
+- dax_unlock_page(page, cookie);
++ dax_unlock_folio(folio, cookie);
+ return rc;
+ }
+
+@@ -2068,7 +2082,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
+ */
+ if (res == 0) {
+ folio_unlock(folio);
+- if (__page_handle_poison(p) >= 0) {
++ if (__page_handle_poison(p) > 0) {
+ page_ref_inc(p);
+ res = MF_RECOVERED;
+ } else {
+@@ -2381,7 +2395,7 @@ struct memory_failure_entry {
+ struct memory_failure_cpu {
+ DECLARE_KFIFO(fifo, struct memory_failure_entry,
+ MEMORY_FAILURE_FIFO_SIZE);
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct work_struct work;
+ };
+
+@@ -2407,20 +2421,22 @@ void memory_failure_queue(unsigned long pfn, int flags)
+ {
+ struct memory_failure_cpu *mf_cpu;
+ unsigned long proc_flags;
++ bool buffer_overflow;
+ struct memory_failure_entry entry = {
+ .pfn = pfn,
+ .flags = flags,
+ };
+
+ mf_cpu = &get_cpu_var(memory_failure_cpu);
+- spin_lock_irqsave(&mf_cpu->lock, proc_flags);
+- if (kfifo_put(&mf_cpu->fifo, entry))
++ raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
++ buffer_overflow = !kfifo_put(&mf_cpu->fifo, entry);
++ if (!buffer_overflow)
+ schedule_work_on(smp_processor_id(), &mf_cpu->work);
+- else
++ raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
++ put_cpu_var(memory_failure_cpu);
++ if (buffer_overflow)
+ pr_err("buffer overflow when queuing memory failure at %#lx\n",
+ pfn);
+- spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
+- put_cpu_var(memory_failure_cpu);
+ }
+ EXPORT_SYMBOL_GPL(memory_failure_queue);
+
+@@ -2433,9 +2449,9 @@ static void memory_failure_work_func(struct work_struct *work)
+
+ mf_cpu = container_of(work, struct memory_failure_cpu, work);
+ for (;;) {
+- spin_lock_irqsave(&mf_cpu->lock, proc_flags);
++ raw_spin_lock_irqsave(&mf_cpu->lock, proc_flags);
+ gotten = kfifo_get(&mf_cpu->fifo, &entry);
+- spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
++ raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
+ if (!gotten)
+ break;
+ if (entry.flags & MF_SOFT_OFFLINE)
+@@ -2465,7 +2481,7 @@ static int __init memory_failure_init(void)
+
+ for_each_possible_cpu(cpu) {
+ mf_cpu = &per_cpu(memory_failure_cpu, cpu);
+- spin_lock_init(&mf_cpu->lock);
++ raw_spin_lock_init(&mf_cpu->lock);
+ INIT_KFIFO(mf_cpu->fifo);
+ INIT_WORK(&mf_cpu->work, memory_failure_work_func);
+ }
+@@ -2521,6 +2537,13 @@ int unpoison_memory(unsigned long pfn)
+ goto unlock_mutex;
+ }
+
++ if (is_huge_zero_page(&folio->page)) {
++ unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n",
++ pfn, &unpoison_rs);
++ ret = -EOPNOTSUPP;
++ goto unlock_mutex;
++ }
++
+ if (!PageHWPoison(p)) {
+ unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
+ pfn, &unpoison_rs);
+diff --git a/mm/memory.c b/mm/memory.c
+index 517221f0130353..b6ddfe22c5d5c0 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2424,11 +2424,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
+ return 0;
+ }
+
+-/*
+- * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
+- * must have pre-validated the caching bits of the pgprot_t.
+- */
+-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+ {
+ pgd_t *pgd;
+@@ -2481,6 +2477,27 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ return 0;
+ }
+
++/*
++ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
++ * must have pre-validated the caching bits of the pgprot_t.
++ */
++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, unsigned long size, pgprot_t prot)
++{
++ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
++
++ if (!error)
++ return 0;
++
++ /*
++ * A partial pfn range mapping is dangerous: it does not
++ * maintain page reference counts, and callers may free
++ * pages due to the error. So zap it early.
++ */
++ zap_page_range_single(vma, addr, size, NULL);
++ return error;
++}
++
+ /**
+ * remap_pfn_range - remap kernel memory to userspace
+ * @vma: user vma to map to
+@@ -3565,8 +3582,8 @@ EXPORT_SYMBOL_GPL(unmap_mapping_pages);
+ void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows)
+ {
+- pgoff_t hba = holebegin >> PAGE_SHIFT;
+- pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
++ pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /* Check for overflow. */
+ if (sizeof(holelen) > sizeof(hlen)) {
+@@ -3726,6 +3743,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ struct page *page;
+ struct swap_info_struct *si = NULL;
+ rmap_t rmap_flags = RMAP_NONE;
++ bool need_clear_cache = false;
+ bool exclusive = false;
+ swp_entry_t entry;
+ pte_t pte;
+@@ -3794,6 +3812,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ if (!folio) {
+ if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
+ __swap_count(entry) == 1) {
++ /*
++ * Prevent parallel swapin from proceeding with
++ * the cache flag. Otherwise, another thread may
++ * finish swapin first, free the entry, and swapout
++ * reusing the same entry. It's undetectable as
++ * pte_same() returns true due to entry reuse.
++ */
++ if (swapcache_prepare(entry)) {
++ /* Relax a bit to prevent rapid repeated page faults */
++ schedule_timeout_uninterruptible(1);
++ goto out;
++ }
++ need_clear_cache = true;
++
+ /* skip swapcache */
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
+ vma, vmf->address, false);
+@@ -4040,6 +4072,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ if (vmf->pte)
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ out:
++ /* Clear the swap cache pin for direct swapin after PTL unlock */
++ if (need_clear_cache)
++ swapcache_clear(si, entry);
+ if (si)
+ put_swap_device(si);
+ return ret;
+@@ -4054,6 +4089,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ folio_unlock(swapcache);
+ folio_put(swapcache);
+ }
++ if (need_clear_cache)
++ swapcache_clear(si, entry);
+ if (si)
+ put_swap_device(si);
+ return ret;
+@@ -4333,7 +4370,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
+ struct vm_area_struct *vma = vmf->vma;
+ bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
+- bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
++ bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
+ pte_t entry;
+
+ flush_icache_pages(vma, page, nr);
+@@ -4755,7 +4792,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
+ spin_lock(vmf->ptl);
+ if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+- goto out;
++ return 0;
+ }
+
+ /* Get the normal PTE */
+@@ -4820,23 +4857,19 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
+ if (migrate_misplaced_page(page, vma, target_nid)) {
+ page_nid = target_nid;
+ flags |= TNF_MIGRATED;
+- } else {
+- flags |= TNF_MIGRATE_FAIL;
+- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+- vmf->address, &vmf->ptl);
+- if (unlikely(!vmf->pte))
+- goto out;
+- if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
+- pte_unmap_unlock(vmf->pte, vmf->ptl);
+- goto out;
+- }
+- goto out_map;
++ task_numa_fault(last_cpupid, page_nid, 1, flags);
++ return 0;
+ }
+
+-out:
+- if (page_nid != NUMA_NO_NODE)
+- task_numa_fault(last_cpupid, page_nid, 1, flags);
+- return 0;
++ flags |= TNF_MIGRATE_FAIL;
++ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
++ vmf->address, &vmf->ptl);
++ if (unlikely(!vmf->pte))
++ return 0;
++ if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
++ pte_unmap_unlock(vmf->pte, vmf->ptl);
++ return 0;
++ }
+ out_map:
+ /*
+ * Make it present again, depending on how arch implements
+@@ -4850,7 +4883,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
+ ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
+ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+- goto out;
++
++ if (page_nid != NUMA_NO_NODE)
++ task_numa_fault(last_cpupid, page_nid, 1, flags);
++ return 0;
+ }
+
+ static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
+@@ -5315,7 +5351,7 @@ static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs
+ return true;
+
+ if (regs && !user_mode(regs)) {
+- unsigned long ip = instruction_pointer(regs);
++ unsigned long ip = exception_ip(regs);
+ if (!search_exception_tables(ip))
+ return false;
+ }
+@@ -5340,7 +5376,7 @@ static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_r
+ {
+ mmap_read_unlock(mm);
+ if (regs && !user_mode(regs)) {
+- unsigned long ip = instruction_pointer(regs);
++ unsigned long ip = exception_ip(regs);
+ if (!search_exception_tables(ip))
+ return false;
+ }
+@@ -5654,6 +5690,10 @@ int follow_phys(struct vm_area_struct *vma,
+ goto out;
+ pte = ptep_get(ptep);
+
++ /* Never return PFNs of anon folios in COW mappings. */
++ if (vm_normal_folio(vma, address, pte))
++ goto unlock;
++
+ if ((flags & FOLL_WRITE) && !pte_write(pte))
+ goto unlock;
+
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 1b03f4ec6fd21b..9beed7c71a8e91 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -101,9 +101,11 @@ static int set_memmap_mode(const char *val, const struct kernel_param *kp)
+
+ static int get_memmap_mode(char *buffer, const struct kernel_param *kp)
+ {
+- if (*((int *)kp->arg) == MEMMAP_ON_MEMORY_FORCE)
+- return sprintf(buffer, "force\n");
+- return param_get_bool(buffer, kp);
++ int mode = *((int *)kp->arg);
++
++ if (mode == MEMMAP_ON_MEMORY_FORCE)
++ return sprintf(buffer, "force\n");
++ return sprintf(buffer, "%c\n", mode ? 'Y' : 'N');
+ }
+
+ static const struct kernel_param_ops memmap_mode_ops = {
+@@ -1129,6 +1131,9 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
+ kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
+ }
+
++/*
++ * Must be called with mem_hotplug_lock in write mode.
++ */
+ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
+ {
+@@ -1149,7 +1154,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
+ return -EINVAL;
+
+- mem_hotplug_begin();
+
+ /* associate pfn range with the zone */
+ move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
+@@ -1208,7 +1212,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ writeback_set_ratelimit();
+
+ memory_notify(MEM_ONLINE, &arg);
+- mem_hotplug_done();
+ return 0;
+
+ failed_addition:
+@@ -1217,7 +1220,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
+ memory_notify(MEM_CANCEL_ONLINE, &arg);
+ remove_pfn_range_from_zone(zone, pfn, nr_pages);
+- mem_hotplug_done();
+ return ret;
+ }
+
+@@ -1458,7 +1460,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(start, size, params.altmap, group);
+ if (ret) {
+- arch_remove_memory(start, size, NULL);
++ arch_remove_memory(start, size, params.altmap);
+ goto error_free;
+ }
+
+@@ -1608,7 +1610,7 @@ struct range __weak arch_get_mappable_range(void)
+
+ struct range mhp_get_pluggable_range(bool need_mapping)
+ {
+- const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1;
++ const u64 max_phys = PHYSMEM_END;
+ struct range mhp_range;
+
+ if (need_mapping) {
+@@ -1689,7 +1691,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
+ */
+ if (HPageMigratable(head))
+ goto found;
+- skip = compound_nr(head) - (page - head);
++ skip = compound_nr(head) - (pfn - page_to_pfn(head));
+ pfn += skip - 1;
+ }
+ return -ENOENT;
+@@ -1863,6 +1865,9 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
+ return 0;
+ }
+
++/*
++ * Must be called with mem_hotplug_lock in write mode.
++ */
+ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
+ {
+@@ -1885,8 +1890,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
+ return -EINVAL;
+
+- mem_hotplug_begin();
+-
+ /*
+ * Don't allow to offline memory blocks that contain holes.
+ * Consequently, memory blocks with holes can never get onlined
+@@ -2027,7 +2030,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+
+ memory_notify(MEM_OFFLINE, &arg);
+ remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
+- mem_hotplug_done();
+ return 0;
+
+ failed_removal_isolated:
+@@ -2042,7 +2044,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ (unsigned long long) start_pfn << PAGE_SHIFT,
+ ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
+ reason);
+- mem_hotplug_done();
+ return ret;
+ }
+
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 29ebf1e7898cf0..4cae854c0f28d1 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -131,22 +131,26 @@ static struct mempolicy default_policy = {
+ static struct mempolicy preferred_node_policy[MAX_NUMNODES];
+
+ /**
+- * numa_map_to_online_node - Find closest online node
++ * numa_nearest_node - Find nearest node by state
+ * @node: Node id to start the search
++ * @state: State to filter the search
+ *
+- * Lookup the next closest node by distance if @nid is not online.
++ * Lookup the closest node by distance if @nid is not in state.
+ *
+- * Return: this @node if it is online, otherwise the closest node by distance
++ * Return: this @node if it is in state, otherwise the closest node by distance
+ */
+-int numa_map_to_online_node(int node)
++int numa_nearest_node(int node, unsigned int state)
+ {
+ int min_dist = INT_MAX, dist, n, min_node;
+
+- if (node == NUMA_NO_NODE || node_online(node))
++ if (state >= NR_NODE_STATES)
++ return -EINVAL;
++
++ if (node == NUMA_NO_NODE || node_state(node, state))
+ return node;
+
+ min_node = node;
+- for_each_online_node(n) {
++ for_each_node_state(n, state) {
+ dist = node_distance(node, n);
+ if (dist < min_dist) {
+ min_dist = dist;
+@@ -156,7 +160,7 @@ int numa_map_to_online_node(int node)
+
+ return min_node;
+ }
+-EXPORT_SYMBOL_GPL(numa_map_to_online_node);
++EXPORT_SYMBOL_GPL(numa_nearest_node);
+
+ struct mempolicy *get_task_policy(struct task_struct *p)
+ {
+@@ -3130,8 +3134,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
+ * @pol: pointer to mempolicy to be formatted
+ *
+ * Convert @pol into a string. If @buffer is too short, truncate the string.
+- * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
+- * longest flag, "relative", and to display at least a few node ids.
++ * Recommend a @maxlen of at least 51 for the longest mode, "weighted
++ * interleave", plus the longest flag flags, "relative|balancing", and to
++ * display at least a few node ids.
+ */
+ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ {
+@@ -3140,7 +3145,10 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ unsigned short mode = MPOL_DEFAULT;
+ unsigned short flags = 0;
+
+- if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
++ if (pol &&
++ pol != &default_policy &&
++ !(pol >= &preferred_node_policy[0] &&
++ pol <= &preferred_node_policy[ARRAY_SIZE(preferred_node_policy) - 1])) {
+ mode = pol->mode;
+ flags = pol->flags;
+ }
+@@ -3167,12 +3175,18 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+ p += snprintf(p, buffer + maxlen - p, "=");
+
+ /*
+- * Currently, the only defined flags are mutually exclusive
++ * Static and relative are mutually exclusive.
+ */
+ if (flags & MPOL_F_STATIC_NODES)
+ p += snprintf(p, buffer + maxlen - p, "static");
+ else if (flags & MPOL_F_RELATIVE_NODES)
+ p += snprintf(p, buffer + maxlen - p, "relative");
++
++ if (flags & MPOL_F_NUMA_BALANCING) {
++ if (!is_power_of_2(flags & MPOL_MODE_FLAGS))
++ p += snprintf(p, buffer + maxlen - p, "|");
++ p += snprintf(p, buffer + maxlen - p, "balancing");
++ }
+ }
+
+ if (!nodes_empty(nodes))
+diff --git a/mm/memtest.c b/mm/memtest.c
+index 32f3e9dda8370f..c2c609c3911994 100644
+--- a/mm/memtest.c
++++ b/mm/memtest.c
+@@ -51,10 +51,10 @@ static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size
+ last_bad = 0;
+
+ for (p = start; p < end; p++)
+- *p = pattern;
++ WRITE_ONCE(*p, pattern);
+
+ for (p = start; p < end; p++, start_phys_aligned += incr) {
+- if (*p == pattern)
++ if (READ_ONCE(*p) == pattern)
+ continue;
+ if (start_phys_aligned == last_bad + incr) {
+ last_bad += incr;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 06086dc9da288f..5d7d39b1c06991 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -405,6 +405,7 @@ int folio_migrate_mapping(struct address_space *mapping,
+ int dirty;
+ int expected_count = folio_expected_refs(mapping, folio) + extra_count;
+ long nr = folio_nr_pages(folio);
++ long entries, i;
+
+ if (!mapping) {
+ /* Anonymous page without mapping */
+@@ -442,8 +443,10 @@ int folio_migrate_mapping(struct address_space *mapping,
+ folio_set_swapcache(newfolio);
+ newfolio->private = folio_get_private(folio);
+ }
++ entries = nr;
+ } else {
+ VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
++ entries = 1;
+ }
+
+ /* Move dirty while page refs frozen and newpage not yet exposed */
+@@ -453,7 +456,11 @@ int folio_migrate_mapping(struct address_space *mapping,
+ folio_set_dirty(newfolio);
+ }
+
+- xas_store(&xas, newfolio);
++ /* Swap cache still stores N entries instead of a high-order entry */
++ for (i = 0; i < entries; i++) {
++ xas_store(&xas, newfolio);
++ xas_next(&xas);
++ }
+
+ /*
+ * Drop cache reference from old page by unfreezing
+@@ -1019,32 +1026,31 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
+ }
+
+ /*
+- * To record some information during migration, we use some unused
+- * fields (mapping and private) of struct folio of the newly allocated
+- * destination folio. This is safe because nobody is using them
+- * except us.
++ * To record some information during migration, we use unused private
++ * field of struct folio of the newly allocated destination folio.
++ * This is safe because nobody is using it except us.
+ */
+-union migration_ptr {
+- struct anon_vma *anon_vma;
+- struct address_space *mapping;
++enum {
++ PAGE_WAS_MAPPED = BIT(0),
++ PAGE_WAS_MLOCKED = BIT(1),
++ PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
+ };
++
+ static void __migrate_folio_record(struct folio *dst,
+- unsigned long page_was_mapped,
++ int old_page_state,
+ struct anon_vma *anon_vma)
+ {
+- union migration_ptr ptr = { .anon_vma = anon_vma };
+- dst->mapping = ptr.mapping;
+- dst->private = (void *)page_was_mapped;
++ dst->private = (void *)anon_vma + old_page_state;
+ }
+
+ static void __migrate_folio_extract(struct folio *dst,
+- int *page_was_mappedp,
++ int *old_page_state,
+ struct anon_vma **anon_vmap)
+ {
+- union migration_ptr ptr = { .mapping = dst->mapping };
+- *anon_vmap = ptr.anon_vma;
+- *page_was_mappedp = (unsigned long)dst->private;
+- dst->mapping = NULL;
++ unsigned long private = (unsigned long)dst->private;
++
++ *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
++ *old_page_state = private & PAGE_OLD_STATES;
+ dst->private = NULL;
+ }
+
+@@ -1104,7 +1110,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ {
+ struct folio *dst;
+ int rc = -EAGAIN;
+- int page_was_mapped = 0;
++ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+ bool is_lru = !__PageMovable(&src->page);
+ bool locked = false;
+@@ -1158,6 +1164,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ folio_lock(src);
+ }
+ locked = true;
++ if (folio_test_mlocked(src))
++ old_page_state |= PAGE_WAS_MLOCKED;
+
+ if (folio_test_writeback(src)) {
+ /*
+@@ -1207,7 +1215,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ dst_locked = true;
+
+ if (unlikely(!is_lru)) {
+- __migrate_folio_record(dst, page_was_mapped, anon_vma);
++ __migrate_folio_record(dst, old_page_state, anon_vma);
+ return MIGRATEPAGE_UNMAP;
+ }
+
+@@ -1233,11 +1241,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ VM_BUG_ON_FOLIO(folio_test_anon(src) &&
+ !folio_test_ksm(src) && !anon_vma, src);
+ try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
+- page_was_mapped = 1;
++ old_page_state |= PAGE_WAS_MAPPED;
+ }
+
+ if (!folio_mapped(src)) {
+- __migrate_folio_record(dst, page_was_mapped, anon_vma);
++ __migrate_folio_record(dst, old_page_state, anon_vma);
+ return MIGRATEPAGE_UNMAP;
+ }
+
+@@ -1249,7 +1257,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
+ if (rc == -EAGAIN)
+ ret = NULL;
+
+- migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
++ migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
++ anon_vma, locked, ret);
+ migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
+
+ return rc;
+@@ -1262,12 +1271,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
+ struct list_head *ret)
+ {
+ int rc;
+- int page_was_mapped = 0;
++ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+ bool is_lru = !__PageMovable(&src->page);
+ struct list_head *prev;
+
+- __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
++ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+ prev = dst->lru.prev;
+ list_del(&dst->lru);
+
+@@ -1288,10 +1297,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
+ * isolated from the unevictable LRU: but this case is the easiest.
+ */
+ folio_add_lru(dst);
+- if (page_was_mapped)
++ if (old_page_state & PAGE_WAS_MLOCKED)
+ lru_add_drain();
+
+- if (page_was_mapped)
++ if (old_page_state & PAGE_WAS_MAPPED)
+ remove_migration_ptes(src, dst, false);
+
+ out_unlock_both:
+@@ -1323,11 +1332,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
+ */
+ if (rc == -EAGAIN) {
+ list_add(&dst->lru, prev);
+- __migrate_folio_record(dst, page_was_mapped, anon_vma);
++ __migrate_folio_record(dst, old_page_state, anon_vma);
+ return rc;
+ }
+
+- migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
++ migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
++ anon_vma, true, ret);
+ migrate_folio_undo_dst(dst, true, put_new_folio, private);
+
+ return rc;
+@@ -1795,12 +1805,12 @@ static int migrate_pages_batch(struct list_head *from,
+ dst = list_first_entry(&dst_folios, struct folio, lru);
+ dst2 = list_next_entry(dst, lru);
+ list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
+- int page_was_mapped = 0;
++ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+
+- __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
+- migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
+- true, ret_folios);
++ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
++ migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
++ anon_vma, true, ret_folios);
+ list_del(&dst->lru);
+ migrate_folio_undo_dst(dst, true, put_new_folio, private);
+ dst = dst2;
+@@ -2512,6 +2522,14 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
+ if (managed_zone(pgdat->node_zones + z))
+ break;
+ }
++
++ /*
++ * If there are no managed zones, it should not proceed
++ * further.
++ */
++ if (z < 0)
++ return 0;
++
+ wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
+ return 0;
+ }
+diff --git a/mm/mm_init.c b/mm/mm_init.c
+index 50f2f34745afa9..77fd04c83d046d 100644
+--- a/mm/mm_init.c
++++ b/mm/mm_init.c
+@@ -26,6 +26,7 @@
+ #include <linux/pgtable.h>
+ #include <linux/swap.h>
+ #include <linux/cma.h>
++#include <linux/crash_dump.h>
+ #include "internal.h"
+ #include "slab.h"
+ #include "shuffle.h"
+@@ -381,6 +382,11 @@ static void __init find_zone_movable_pfns_for_nodes(void)
+ goto out;
+ }
+
++ if (is_kdump_kernel()) {
++ pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
++ goto out;
++ }
++
+ for_each_mem_region(r) {
+ if (memblock_is_mirror(r))
+ continue;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 9e018d8dd7d693..6530e9cac45875 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -949,13 +949,21 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
+ } else if (merge_prev) { /* case 2 */
+ if (curr) {
+ vma_start_write(curr);
+- err = dup_anon_vma(prev, curr, &anon_dup);
+ if (end == curr->vm_end) { /* case 7 */
++ /*
++ * can_vma_merge_after() assumed we would not be
++ * removing prev vma, so it skipped the check
++ * for vm_ops->close, but we are removing curr
++ */
++ if (curr->vm_ops && curr->vm_ops->close)
++ err = -EINVAL;
+ remove = curr;
+ } else { /* case 5 */
+ adjust = curr;
+ adj_start = (end - curr->vm_start);
+ }
++ if (!err)
++ err = dup_anon_vma(prev, curr, &anon_dup);
+ }
+ } else { /* merge_next */
+ vma_start_write(next);
+@@ -3017,8 +3025,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+ flags |= MAP_LOCKED;
+
+ file = get_file(vma->vm_file);
++ ret = security_mmap_file(vma->vm_file, prot, flags);
++ if (ret)
++ goto out_fput;
+ ret = do_mmap(vma->vm_file, start, size,
+ prot, flags, 0, pgoff, &populate, NULL);
++out_fput:
+ fput(file);
+ out:
+ mmap_write_unlock(mm);
+diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
+index 1854850b4b897f..368b840e75082c 100644
+--- a/mm/mmap_lock.c
++++ b/mm/mmap_lock.c
+@@ -19,14 +19,7 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
+
+ #ifdef CONFIG_MEMCG
+
+-/*
+- * Our various events all share the same buffer (because we don't want or need
+- * to allocate a set of buffers *per event type*), so we need to protect against
+- * concurrent _reg() and _unreg() calls, and count how many _reg() calls have
+- * been made.
+- */
+-static DEFINE_MUTEX(reg_lock);
+-static int reg_refcount; /* Protected by reg_lock. */
++static atomic_t reg_refcount;
+
+ /*
+ * Size of the buffer for memcg path names. Ignoring stack trace support,
+@@ -34,136 +27,22 @@ static int reg_refcount; /* Protected by reg_lock. */
+ */
+ #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
+
+-/*
+- * How many contexts our trace events might be called in: normal, softirq, irq,
+- * and NMI.
+- */
+-#define CONTEXT_COUNT 4
+-
+-struct memcg_path {
+- local_lock_t lock;
+- char __rcu *buf;
+- local_t buf_idx;
+-};
+-static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
+- .lock = INIT_LOCAL_LOCK(lock),
+- .buf_idx = LOCAL_INIT(0),
+-};
+-
+-static char **tmp_bufs;
+-
+-/* Called with reg_lock held. */
+-static void free_memcg_path_bufs(void)
+-{
+- struct memcg_path *memcg_path;
+- int cpu;
+- char **old = tmp_bufs;
+-
+- for_each_possible_cpu(cpu) {
+- memcg_path = per_cpu_ptr(&memcg_paths, cpu);
+- *(old++) = rcu_dereference_protected(memcg_path->buf,
+- lockdep_is_held(&reg_lock));
+- rcu_assign_pointer(memcg_path->buf, NULL);
+- }
+-
+- /* Wait for inflight memcg_path_buf users to finish. */
+- synchronize_rcu();
+-
+- old = tmp_bufs;
+- for_each_possible_cpu(cpu) {
+- kfree(*(old++));
+- }
+-
+- kfree(tmp_bufs);
+- tmp_bufs = NULL;
+-}
+-
+ int trace_mmap_lock_reg(void)
+ {
+- int cpu;
+- char *new;
+-
+- mutex_lock(&reg_lock);
+-
+- /* If the refcount is going 0->1, proceed with allocating buffers. */
+- if (reg_refcount++)
+- goto out;
+-
+- tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
+- GFP_KERNEL);
+- if (tmp_bufs == NULL)
+- goto out_fail;
+-
+- for_each_possible_cpu(cpu) {
+- new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
+- if (new == NULL)
+- goto out_fail_free;
+- rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
+- /* Don't need to wait for inflights, they'd have gotten NULL. */
+- }
+-
+-out:
+- mutex_unlock(&reg_lock);
++ atomic_inc(&reg_refcount);
+ return 0;
+-
+-out_fail_free:
+- free_memcg_path_bufs();
+-out_fail:
+- /* Since we failed, undo the earlier ref increment. */
+- --reg_refcount;
+-
+- mutex_unlock(&reg_lock);
+- return -ENOMEM;
+ }
+
+ void trace_mmap_lock_unreg(void)
+ {
+- mutex_lock(&reg_lock);
+-
+- /* If the refcount is going 1->0, proceed with freeing buffers. */
+- if (--reg_refcount)
+- goto out;
+-
+- free_memcg_path_bufs();
+-
+-out:
+- mutex_unlock(&reg_lock);
+-}
+-
+-static inline char *get_memcg_path_buf(void)
+-{
+- struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
+- char *buf;
+- int idx;
+-
+- rcu_read_lock();
+- buf = rcu_dereference(memcg_path->buf);
+- if (buf == NULL) {
+- rcu_read_unlock();
+- return NULL;
+- }
+- idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
+- MEMCG_PATH_BUF_SIZE;
+- return &buf[idx];
++ atomic_dec(&reg_refcount);
+ }
+
+-static inline void put_memcg_path_buf(void)
+-{
+- local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
+- rcu_read_unlock();
+-}
+-
+-#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
+- do { \
+- const char *memcg_path; \
+- local_lock(&memcg_paths.lock); \
+- memcg_path = get_mm_memcg_path(mm); \
+- trace_mmap_lock_##type(mm, \
+- memcg_path != NULL ? memcg_path : "", \
+- ##__VA_ARGS__); \
+- if (likely(memcg_path != NULL)) \
+- put_memcg_path_buf(); \
+- local_unlock(&memcg_paths.lock); \
++#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
++ do { \
++ char buf[MEMCG_PATH_BUF_SIZE]; \
++ get_mm_memcg_path(mm, buf, sizeof(buf)); \
++ trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \
+ } while (0)
+
+ #else /* !CONFIG_MEMCG */
+@@ -185,37 +64,23 @@ void trace_mmap_lock_unreg(void)
+ #ifdef CONFIG_TRACING
+ #ifdef CONFIG_MEMCG
+ /*
+- * Write the given mm_struct's memcg path to a percpu buffer, and return a
+- * pointer to it. If the path cannot be determined, or no buffer was available
+- * (because the trace event is being unregistered), NULL is returned.
+- *
+- * Note: buffers are allocated per-cpu to avoid locking, so preemption must be
+- * disabled by the caller before calling us, and re-enabled only after the
+- * caller is done with the pointer.
+- *
+- * The caller must call put_memcg_path_buf() once the buffer is no longer
+- * needed. This must be done while preemption is still disabled.
++ * Write the given mm_struct's memcg path to a buffer. If the path cannot be
++ * determined or the trace event is being unregistered, empty string is written.
+ */
+-static const char *get_mm_memcg_path(struct mm_struct *mm)
++static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
+ {
+- char *buf = NULL;
+- struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
++ struct mem_cgroup *memcg;
+
++ buf[0] = '\0';
++ /* No need to get path if no trace event is registered. */
++ if (!atomic_read(&reg_refcount))
++ return;
++ memcg = get_mem_cgroup_from_mm(mm);
+ if (memcg == NULL)
+- goto out;
+- if (unlikely(memcg->css.cgroup == NULL))
+- goto out_put;
+-
+- buf = get_memcg_path_buf();
+- if (buf == NULL)
+- goto out_put;
+-
+- cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE);
+-
+-out_put:
++ return;
++ if (memcg->css.cgroup)
++ cgroup_path(memcg->css.cgroup, buf, buflen);
+ css_put(&memcg->css);
+-out:
+- return buf;
+ }
+
+ #endif /* CONFIG_MEMCG */
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 382e81c33fc437..df71010baabe7e 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -238,6 +238,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ {
+ spinlock_t *old_ptl, *new_ptl;
+ struct mm_struct *mm = vma->vm_mm;
++ bool res = false;
+ pmd_t pmd;
+
+ if (!arch_supports_page_table_move())
+@@ -277,19 +278,25 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+
+- /* Clear the pmd */
+ pmd = *old_pmd;
++
++ /* Racing with collapse? */
++ if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
++ goto out_unlock;
++ /* Clear the pmd */
+ pmd_clear(old_pmd);
++ res = true;
+
+ VM_BUG_ON(!pmd_none(*new_pmd));
+
+ pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
+ flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
++out_unlock:
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
+ spin_unlock(old_ptl);
+
+- return true;
++ return res;
+ }
+ #else
+ static inline bool move_normal_pmd(struct vm_area_struct *vma,
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index b8d3d7040a506a..e632ec9b642109 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -415,13 +415,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
+ else
+ bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
+
+- if (bg_thresh >= thresh)
+- bg_thresh = thresh / 2;
+ tsk = current;
+ if (rt_task(tsk)) {
+ bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
+ thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
+ }
++ /*
++ * Dirty throttling logic assumes the limits in page units fit into
++ * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++ */
++ if (thresh > UINT_MAX)
++ thresh = UINT_MAX;
++ /* This makes sure bg_thresh is within 32-bits as well */
++ if (bg_thresh >= thresh)
++ bg_thresh = thresh / 2;
+ dtc->thresh = thresh;
+ dtc->bg_thresh = bg_thresh;
+
+@@ -471,7 +478,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
+ if (rt_task(tsk))
+ dirty += dirty / 4;
+
+- return dirty;
++ /*
++ * Dirty throttling logic assumes the limits in page units fit into
++ * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++ */
++ return min_t(unsigned long, dirty, UINT_MAX);
+ }
+
+ /**
+@@ -508,10 +519,17 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+ {
+ int ret;
++ unsigned long old_bytes = dirty_background_bytes;
+
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+- if (ret == 0 && write)
++ if (ret == 0 && write) {
++ if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
++ UINT_MAX) {
++ dirty_background_bytes = old_bytes;
++ return -ERANGE;
++ }
+ dirty_background_ratio = 0;
++ }
+ return ret;
+ }
+
+@@ -537,6 +555,10 @@ static int dirty_bytes_handler(struct ctl_table *table, int write,
+
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
++ if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
++ vm_dirty_bytes = old_bytes;
++ return -ERANGE;
++ }
+ writeback_set_ratelimit();
+ vm_dirty_ratio = 0;
+ }
+@@ -1921,7 +1943,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
+ break;
+ }
+ __set_current_state(TASK_KILLABLE);
+- wb->dirty_sleep = now;
++ bdi->last_bdp_sleep = jiffies;
+ io_schedule_timeout(pause);
+
+ current->dirty_paused_when = now + pause;
+@@ -3110,7 +3132,7 @@ EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
+ */
+ void folio_wait_stable(struct folio *folio)
+ {
+- if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES)
++ if (mapping_stable_writes(folio_mapping(folio)))
+ folio_wait_writeback(folio);
+ }
+ EXPORT_SYMBOL_GPL(folio_wait_stable);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 85741403948f55..edb32635037f47 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -302,7 +302,7 @@ EXPORT_SYMBOL(nr_online_nodes);
+
+ static bool page_contains_unaccepted(struct page *page, unsigned int order);
+ static void accept_page(struct page *page, unsigned int order);
+-static bool try_to_accept_memory(struct zone *zone, unsigned int order);
++static bool cond_accept_memory(struct zone *zone, unsigned int order);
+ static inline bool has_unaccepted_memory(void);
+ static bool __free_unaccepted(struct page *page);
+
+@@ -519,10 +519,15 @@ static void bad_page(struct page *page, const char *reason)
+
+ static inline unsigned int order_to_pindex(int migratetype, int order)
+ {
++ bool __maybe_unused movable;
++
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (order > PAGE_ALLOC_COSTLY_ORDER) {
+ VM_BUG_ON(order != pageblock_order);
+- return NR_LOWORDER_PCP_LISTS;
++
++ movable = migratetype == MIGRATE_MOVABLE;
++
++ return NR_LOWORDER_PCP_LISTS + movable;
+ }
+ #else
+ VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
+@@ -536,7 +541,7 @@ static inline int pindex_to_order(unsigned int pindex)
+ int order = pindex / MIGRATE_PCPTYPES;
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+- if (pindex == NR_LOWORDER_PCP_LISTS)
++ if (pindex >= NR_LOWORDER_PCP_LISTS)
+ order = pageblock_order;
+ #else
+ VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
+@@ -1570,7 +1575,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
+ struct page *page;
+
+ /* Find a page of the appropriate size in the preferred list */
+- for (current_order = order; current_order <= MAX_ORDER; ++current_order) {
++ for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
+ area = &(zone->free_area[current_order]);
+ page = get_page_from_free_area(area, migratetype);
+ if (!page)
+@@ -1940,7 +1945,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
+ continue;
+
+ spin_lock_irqsave(&zone->lock, flags);
+- for (order = 0; order <= MAX_ORDER; order++) {
++ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct free_area *area = &(zone->free_area[order]);
+
+ page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
+@@ -2050,8 +2055,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
+ return false;
+
+ find_smallest:
+- for (current_order = order; current_order <= MAX_ORDER;
+- current_order++) {
++ for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
+ area = &(zone->free_area[current_order]);
+ fallback_mt = find_suitable_fallback(area, current_order,
+ start_migratetype, false, &can_steal);
+@@ -2181,14 +2185,21 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ */
+ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+ {
+- struct per_cpu_pages *pcp;
++ struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
++ int count;
+
+- pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
+- if (pcp->count) {
++ do {
+ spin_lock(&pcp->lock);
+- free_pcppages_bulk(zone, pcp->count, pcp, 0);
++ count = pcp->count;
++ if (count) {
++ int to_drain = min(count,
++ pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
++
++ free_pcppages_bulk(zone, to_drain, pcp, 0);
++ count -= to_drain;
++ }
+ spin_unlock(&pcp->lock);
+- }
++ } while (count);
+ }
+
+ /*
+@@ -2339,7 +2350,7 @@ static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
+ * freeing of pages without any allocation.
+ */
+ batch <<= pcp->free_factor;
+- if (batch < max_nr_free)
++ if (batch < max_nr_free && pcp->free_factor < CONFIG_PCP_BATCH_SCALE_MAX)
+ pcp->free_factor++;
+ batch = clamp(batch, min_nr_free, max_nr_free);
+
+@@ -2819,9 +2830,6 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
+ if (!(alloc_flags & ALLOC_CMA))
+ unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
+ #endif
+-#ifdef CONFIG_UNACCEPTED_MEMORY
+- unusable_free += zone_page_state(z, NR_UNACCEPTED);
+-#endif
+
+ return unusable_free;
+ }
+@@ -2884,7 +2892,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
+ return true;
+
+ /* For a high-order request, check at least one suitable page is free */
+- for (o = order; o <= MAX_ORDER; o++) {
++ for (o = order; o < NR_PAGE_ORDERS; o++) {
+ struct free_area *area = &z->free_area[o];
+ int mt;
+
+@@ -3115,16 +3123,16 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
+ }
+ }
+
++ cond_accept_memory(zone, order);
++
+ mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
+ if (!zone_watermark_fast(zone, order, mark,
+ ac->highest_zoneidx, alloc_flags,
+ gfp_mask)) {
+ int ret;
+
+- if (has_unaccepted_memory()) {
+- if (try_to_accept_memory(zone, order))
+- goto try_this_zone;
+- }
++ if (cond_accept_memory(zone, order))
++ goto try_this_zone;
+
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+ /*
+@@ -3178,10 +3186,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
+
+ return page;
+ } else {
+- if (has_unaccepted_memory()) {
+- if (try_to_accept_memory(zone, order))
+- goto try_this_zone;
+- }
++ if (cond_accept_memory(zone, order))
++ goto try_this_zone;
+
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+ /* Try again if zone has deferred pages */
+@@ -3809,14 +3815,9 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
+ else
+ (*no_progress_loops)++;
+
+- /*
+- * Make sure we converge to OOM if we cannot make any progress
+- * several times in the row.
+- */
+- if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+- /* Before OOM, exhaust highatomic_reserve */
+- return unreserve_highatomic_pageblock(ac, true);
+- }
++ if (*no_progress_loops > MAX_RECLAIM_RETRIES)
++ goto out;
++
+
+ /*
+ * Keep reclaiming pages while there is a chance this will lead
+@@ -3859,6 +3860,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
+ schedule_timeout_uninterruptible(1);
+ else
+ cond_resched();
++out:
++ /* Before OOM, exhaust highatomic_reserve */
++ if (!ret)
++ return unreserve_highatomic_pageblock(ac, true);
++
+ return ret;
+ }
+
+@@ -3900,6 +3906,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ struct alloc_context *ac)
+ {
+ bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
++ bool can_compact = gfp_compaction_allowed(gfp_mask);
+ const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
+ struct page *page = NULL;
+ unsigned int alloc_flags;
+@@ -3970,7 +3977,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ * Don't try this for allocations that are allowed to ignore
+ * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
+ */
+- if (can_direct_reclaim &&
++ if (can_direct_reclaim && can_compact &&
+ (costly_order ||
+ (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
+ && !gfp_pfmemalloc_allowed(gfp_mask)) {
+@@ -4068,9 +4075,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+
+ /*
+ * Do not retry costly high order allocations unless they are
+- * __GFP_RETRY_MAYFAIL
++ * __GFP_RETRY_MAYFAIL and we can compact
+ */
+- if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
++ if (costly_order && (!can_compact ||
++ !(gfp_mask & __GFP_RETRY_MAYFAIL)))
+ goto nopage;
+
+ if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
+@@ -4083,7 +4091,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ * implementation of the compaction depends on the sufficient amount
+ * of free memory (see __compaction_suitable)
+ */
+- if (did_some_progress > 0 &&
++ if (did_some_progress > 0 && can_compact &&
+ should_compact_retry(ac, order, alloc_flags,
+ compact_result, &compact_priority,
+ &compaction_retries))
+@@ -6440,7 +6448,7 @@ bool is_free_buddy_page(struct page *page)
+ unsigned long pfn = page_to_pfn(page);
+ unsigned int order;
+
+- for (order = 0; order <= MAX_ORDER; order++) {
++ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct page *page_head = page - (pfn & ((1 << order) - 1));
+
+ if (PageBuddy(page_head) &&
+@@ -6499,7 +6507,7 @@ bool take_page_off_buddy(struct page *page)
+ bool ret = false;
+
+ spin_lock_irqsave(&zone->lock, flags);
+- for (order = 0; order <= MAX_ORDER; order++) {
++ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct page *page_head = page - (pfn & ((1 << order) - 1));
+ int page_order = buddy_order(page_head);
+
+@@ -6606,9 +6614,6 @@ static bool try_to_accept_memory_one(struct zone *zone)
+ struct page *page;
+ bool last;
+
+- if (list_empty(&zone->unaccepted_pages))
+- return false;
+-
+ spin_lock_irqsave(&zone->lock, flags);
+ page = list_first_entry_or_null(&zone->unaccepted_pages,
+ struct page, lru);
+@@ -6634,23 +6639,29 @@ static bool try_to_accept_memory_one(struct zone *zone)
+ return true;
+ }
+
+-static bool try_to_accept_memory(struct zone *zone, unsigned int order)
++static bool cond_accept_memory(struct zone *zone, unsigned int order)
+ {
+ long to_accept;
+- int ret = false;
++ bool ret = false;
++
++ if (!has_unaccepted_memory())
++ return false;
++
++ if (list_empty(&zone->unaccepted_pages))
++ return false;
+
+ /* How much to accept to get to high watermark? */
+ to_accept = high_wmark_pages(zone) -
+ (zone_page_state(zone, NR_FREE_PAGES) -
+- __zone_watermark_unusable_free(zone, order, 0));
++ __zone_watermark_unusable_free(zone, order, 0) -
++ zone_page_state(zone, NR_UNACCEPTED));
+
+- /* Accept at least one page */
+- do {
++ while (to_accept > 0) {
+ if (!try_to_accept_memory_one(zone))
+ break;
+ ret = true;
+ to_accept -= MAX_ORDER_NR_PAGES;
+- } while (to_accept > 0);
++ }
+
+ return ret;
+ }
+@@ -6693,7 +6704,7 @@ static void accept_page(struct page *page, unsigned int order)
+ {
+ }
+
+-static bool try_to_accept_memory(struct zone *zone, unsigned int order)
++static bool cond_accept_memory(struct zone *zone, unsigned int order)
+ {
+ return false;
+ }
+diff --git a/mm/page_reporting.c b/mm/page_reporting.c
+index b021f482a4cb36..66369cc5279bf4 100644
+--- a/mm/page_reporting.c
++++ b/mm/page_reporting.c
+@@ -276,7 +276,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
+ return err;
+
+ /* Process each free list starting from lowest order/mt */
+- for (order = page_reporting_order; order <= MAX_ORDER; order++) {
++ for (order = page_reporting_order; order < NR_PAGE_ORDERS; order++) {
+ for (mt = 0; mt < MIGRATE_TYPES; mt++) {
+ /* We do not pull pages from the isolate free list */
+ if (is_migrate_isolate(mt))
+diff --git a/mm/page_table_check.c b/mm/page_table_check.c
+index af69c3c8f7c2d5..509c6ef8de400e 100644
+--- a/mm/page_table_check.c
++++ b/mm/page_table_check.c
+@@ -7,6 +7,8 @@
+ #include <linux/kstrtox.h>
+ #include <linux/mm.h>
+ #include <linux/page_table_check.h>
++#include <linux/swap.h>
++#include <linux/swapops.h>
+
+ #undef pr_fmt
+ #define pr_fmt(fmt) "page_table_check: " fmt
+@@ -71,6 +73,9 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
+ page = pfn_to_page(pfn);
+ page_ext = page_ext_get(page);
+
++ if (!page_ext)
++ return;
++
+ BUG_ON(PageSlab(page));
+ anon = PageAnon(page);
+
+@@ -108,6 +113,9 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
+ page = pfn_to_page(pfn);
+ page_ext = page_ext_get(page);
+
++ if (!page_ext)
++ return;
++
+ BUG_ON(PageSlab(page));
+ anon = PageAnon(page);
+
+@@ -138,7 +146,10 @@ void __page_table_check_zero(struct page *page, unsigned int order)
+ BUG_ON(PageSlab(page));
+
+ page_ext = page_ext_get(page);
+- BUG_ON(!page_ext);
++
++ if (!page_ext)
++ return;
++
+ for (i = 0; i < (1ul << order); i++) {
+ struct page_table_check *ptc = get_page_table_check(page_ext);
+
+@@ -182,6 +193,22 @@ void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+ }
+ EXPORT_SYMBOL(__page_table_check_pud_clear);
+
++/* Whether the swap entry cached writable information */
++static inline bool swap_cached_writable(swp_entry_t entry)
++{
++ return is_writable_device_exclusive_entry(entry) ||
++ is_writable_device_private_entry(entry) ||
++ is_writable_migration_entry(entry);
++}
++
++static inline void page_table_check_pte_flags(pte_t pte)
++{
++ if (pte_present(pte) && pte_uffd_wp(pte))
++ WARN_ON_ONCE(pte_write(pte));
++ else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte))
++ WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte)));
++}
++
+ void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
+ unsigned int nr)
+ {
+@@ -190,6 +217,8 @@ void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
+ if (&init_mm == mm)
+ return;
+
++ page_table_check_pte_flags(pte);
++
+ for (i = 0; i < nr; i++)
+ __page_table_check_pte_clear(mm, ptep_get(ptep + i));
+ if (pte_user_accessible_page(pte))
+@@ -197,11 +226,21 @@ void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
+ }
+ EXPORT_SYMBOL(__page_table_check_ptes_set);
+
++static inline void page_table_check_pmd_flags(pmd_t pmd)
++{
++ if (pmd_present(pmd) && pmd_uffd_wp(pmd))
++ WARN_ON_ONCE(pmd_write(pmd));
++ else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd))
++ WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
++}
++
+ void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
+ {
+ if (&init_mm == mm)
+ return;
+
++ page_table_check_pmd_flags(pmd);
++
+ __page_table_check_pmd_clear(mm, *pmdp);
+ if (pmd_user_accessible_page(pmd)) {
+ page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
+diff --git a/mm/percpu.c b/mm/percpu.c
+index a7665de8485fd9..d287cebd58caa3 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -3306,13 +3306,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t
+ if (rc < 0)
+ panic("failed to map percpu area, err=%d\n", rc);
+
+- /*
+- * FIXME: Archs with virtual cache should flush local
+- * cache for the linear mapping here - something
+- * equivalent to flush_cache_vmap() on the local cpu.
+- * flush_cache_vmap() can't be used as most supporting
+- * data structures are not set up yet.
+- */
++ flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
+
+ /* copy static data */
+ memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
+diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
+index 4fcd959dcc4d02..a78a4adf711ac2 100644
+--- a/mm/pgtable-generic.c
++++ b/mm/pgtable-generic.c
+@@ -198,6 +198,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+ {
++ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ return old;
+@@ -208,6 +209,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+ {
++ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ return pmdp_invalidate(vma, address, pmdp);
+ }
+ #endif
+diff --git a/mm/readahead.c b/mm/readahead.c
+index e815c114de21e3..7c0449f8bec7f4 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -469,7 +469,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
+
+ if (!folio)
+ return -ENOMEM;
+- mark = round_up(mark, 1UL << order);
++ mark = round_down(mark, 1UL << order);
+ if (index == mark)
+ folio_set_readahead(folio);
+ err = filemap_add_folio(ractl->mapping, folio, index, gfp);
+@@ -490,6 +490,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ pgoff_t index = readahead_index(ractl);
+ pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
+ pgoff_t mark = index + ra->size - ra->async_size;
++ unsigned int nofs;
+ int err = 0;
+ gfp_t gfp = readahead_gfp_mask(mapping);
+
+@@ -506,6 +507,8 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ new_order--;
+ }
+
++ /* See comment in page_cache_ra_unbounded() */
++ nofs = memalloc_nofs_save();
+ filemap_invalidate_lock_shared(mapping);
+ while (index <= limit) {
+ unsigned int order = new_order;
+@@ -534,6 +537,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
+
+ read_pages(ractl);
+ filemap_invalidate_unlock_shared(mapping);
++ memalloc_nofs_restore(nofs);
+
+ /*
+ * If there were already pages in the page cache, then we may have
+@@ -577,7 +581,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
+ * It's the expected callback index, assume sequential access.
+ * Ramp up sizes, and push forward the readahead window.
+ */
+- expected = round_up(ra->start + ra->size - ra->async_size,
++ expected = round_down(ra->start + ra->size - ra->async_size,
+ 1UL << order);
+ if (index == expected || index == (ra->start + ra->size)) {
+ ra->start += ra->size;
+@@ -735,7 +739,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
+ */
+ ret = -EINVAL;
+ if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+- !S_ISREG(file_inode(f.file)->i_mode))
++ (!S_ISREG(file_inode(f.file)->i_mode) &&
++ !S_ISBLK(file_inode(f.file)->i_mode)))
+ goto out;
+
+ ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+diff --git a/mm/secretmem.c b/mm/secretmem.c
+index 3afb5ad701e14a..399552814fd0ff 100644
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -238,7 +238,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
+ /* make sure local flags do not confict with global fcntl.h */
+ BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
+
+- if (!secretmem_enable)
++ if (!secretmem_enable || !can_set_direct_map())
+ return -ENOSYS;
+
+ if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
+@@ -280,7 +280,7 @@ static struct file_system_type secretmem_fs = {
+
+ static int __init secretmem_init(void)
+ {
+- if (!secretmem_enable)
++ if (!secretmem_enable || !can_set_direct_map())
+ return 0;
+
+ secretmem_mnt = kern_mount(&secretmem_fs);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 69595d3418829f..3d721d5591dd72 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -311,7 +311,7 @@ static void shmem_disable_quotas(struct super_block *sb)
+ dquot_quota_off(sb, type);
+ }
+
+-static struct dquot **shmem_get_dquots(struct inode *inode)
++static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
+ {
+ return SHMEM_I(inode)->i_dquot;
+ }
+@@ -535,8 +535,9 @@ static bool shmem_confirm_swap(struct address_space *mapping,
+
+ static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
+
+-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+- struct mm_struct *mm, unsigned long vm_flags)
++static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
++ bool shmem_huge_force, struct mm_struct *mm,
++ unsigned long vm_flags)
+ {
+ loff_t i_size;
+
+@@ -567,6 +568,16 @@ bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ }
+ }
+
++bool shmem_is_huge(struct inode *inode, pgoff_t index,
++ bool shmem_huge_force, struct mm_struct *mm,
++ unsigned long vm_flags)
++{
++ if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
++ return false;
++
++ return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
++}
++
+ #if defined(CONFIG_SYSFS)
+ static int shmem_parse_huge(const char *str)
+ {
+@@ -742,12 +753,6 @@ static long shmem_unused_huge_count(struct super_block *sb,
+
+ #define shmem_huge SHMEM_HUGE_DENY
+
+-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+- struct mm_struct *mm, unsigned long vm_flags)
+-{
+- return false;
+-}
+-
+ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
+ struct shrink_control *sc, unsigned long nr_to_split)
+ {
+@@ -1098,7 +1103,24 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ }
+ VM_BUG_ON_FOLIO(folio_test_writeback(folio),
+ folio);
+- truncate_inode_folio(mapping, folio);
++
++ if (!folio_test_large(folio)) {
++ truncate_inode_folio(mapping, folio);
++ } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
++ /*
++ * If we split a page, reset the loop so
++ * that we pick up the new sub pages.
++ * Otherwise the THP was entirely
++ * dropped or the target range was
++ * zeroed, so just continue the loop as
++ * is.
++ */
++ if (!folio_test_large(folio)) {
++ folio_unlock(folio);
++ index = start;
++ break;
++ }
++ }
+ }
+ folio_unlock(folio);
+ }
+diff --git a/mm/shmem_quota.c b/mm/shmem_quota.c
+index 062d1c1097ae35..ce514e700d2f65 100644
+--- a/mm/shmem_quota.c
++++ b/mm/shmem_quota.c
+@@ -116,7 +116,7 @@ static int shmem_free_file_info(struct super_block *sb, int type)
+ static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
+ {
+ struct mem_dqinfo *info = sb_dqinfo(sb, qid->type);
+- struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
++ struct rb_node *node;
+ qid_t id = from_kqid(&init_user_ns, *qid);
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_id *entry = NULL;
+@@ -126,6 +126,7 @@ static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
+ return -ESRCH;
+
+ down_read(&dqopt->dqio_sem);
++ node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ while (node) {
+ entry = rb_entry(node, struct quota_id, node);
+
+@@ -165,7 +166,7 @@ static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
+ static int shmem_acquire_dquot(struct dquot *dquot)
+ {
+ struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
+- struct rb_node **n = &((struct rb_root *)info->dqi_priv)->rb_node;
++ struct rb_node **n;
+ struct shmem_sb_info *sbinfo = dquot->dq_sb->s_fs_info;
+ struct rb_node *parent = NULL, *new_node = NULL;
+ struct quota_id *new_entry, *entry;
+@@ -176,6 +177,8 @@ static int shmem_acquire_dquot(struct dquot *dquot)
+ mutex_lock(&dquot->dq_lock);
+
+ down_write(&dqopt->dqio_sem);
++ n = &((struct rb_root *)info->dqi_priv)->rb_node;
++
+ while (*n) {
+ parent = *n;
+ entry = rb_entry(parent, struct quota_id, node);
+@@ -264,7 +267,7 @@ static bool shmem_is_empty_dquot(struct dquot *dquot)
+ static int shmem_release_dquot(struct dquot *dquot)
+ {
+ struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
+- struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
++ struct rb_node *node;
+ qid_t id = from_kqid(&init_user_ns, dquot->dq_id);
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ struct quota_id *entry = NULL;
+@@ -275,6 +278,7 @@ static int shmem_release_dquot(struct dquot *dquot)
+ goto out_dqlock;
+
+ down_write(&dqopt->dqio_sem);
++ node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ while (node) {
+ entry = rb_entry(node, struct quota_id, node);
+
+diff --git a/mm/show_mem.c b/mm/show_mem.c
+index 4b888b18bddea9..b896e54e3a26cf 100644
+--- a/mm/show_mem.c
++++ b/mm/show_mem.c
+@@ -355,8 +355,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
+
+ for_each_populated_zone(zone) {
+ unsigned int order;
+- unsigned long nr[MAX_ORDER + 1], flags, total = 0;
+- unsigned char types[MAX_ORDER + 1];
++ unsigned long nr[NR_PAGE_ORDERS], flags, total = 0;
++ unsigned char types[NR_PAGE_ORDERS];
+
+ if (zone_idx(zone) > max_zone_idx)
+ continue;
+@@ -366,7 +366,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
+ printk(KERN_CONT "%s: ", zone->name);
+
+ spin_lock_irqsave(&zone->lock, flags);
+- for (order = 0; order <= MAX_ORDER; order++) {
++ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct free_area *area = &zone->free_area[order];
+ int type;
+
+@@ -380,7 +380,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+- for (order = 0; order <= MAX_ORDER; order++) {
++ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ printk(KERN_CONT "%lu*%lukB ",
+ nr[order], K(1UL) << order);
+ if (nr[order])
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 9bbffe82d65af1..ef971fcdaa0708 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -528,26 +528,6 @@ bool slab_is_available(void)
+ }
+
+ #ifdef CONFIG_PRINTK
+-/**
+- * kmem_valid_obj - does the pointer reference a valid slab object?
+- * @object: pointer to query.
+- *
+- * Return: %true if the pointer is to a not-yet-freed object from
+- * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
+- * is to an already-freed object, and %false otherwise.
+- */
+-bool kmem_valid_obj(void *object)
+-{
+- struct folio *folio;
+-
+- /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
+- if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
+- return false;
+- folio = virt_to_folio(object);
+- return folio_test_slab(folio);
+-}
+-EXPORT_SYMBOL_GPL(kmem_valid_obj);
+-
+ static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ if (__kfence_obj_info(kpp, object, slab))
+@@ -566,11 +546,11 @@ static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *
+ * and, if available, the slab name, return address, and stack trace from
+ * the allocation and last free path of that object.
+ *
+- * This function will splat if passed a pointer to a non-slab object.
+- * If you are not sure what type of object you have, you should instead
+- * use mem_dump_obj().
++ * Return: %true if the pointer is to a not-yet-freed object from
++ * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
++ * is to an already-freed object, and %false otherwise.
+ */
+-void kmem_dump_obj(void *object)
++bool kmem_dump_obj(void *object)
+ {
+ char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
+ int i;
+@@ -578,13 +558,13 @@ void kmem_dump_obj(void *object)
+ unsigned long ptroffset;
+ struct kmem_obj_info kp = { };
+
+- if (WARN_ON_ONCE(!virt_addr_valid(object)))
+- return;
++ /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
++ if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
++ return false;
+ slab = virt_to_slab(object);
+- if (WARN_ON_ONCE(!slab)) {
+- pr_cont(" non-slab memory.\n");
+- return;
+- }
++ if (!slab)
++ return false;
++
+ kmem_obj_info(&kp, object, slab);
+ if (kp.kp_slab_cache)
+ pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+@@ -621,6 +601,7 @@ void kmem_dump_obj(void *object)
+ pr_info(" %pS\n", kp.kp_free_stack[i]);
+ }
+
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(kmem_dump_obj);
+ #endif
+@@ -1407,6 +1388,13 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
+
+ /* If the object still fits, repoison it precisely. */
+ if (ks >= new_size) {
++ /* Zero out spare memory. */
++ if (want_init_on_alloc(flags)) {
++ kasan_disable_current();
++ memset((void *)p + new_size, 0, ks - new_size);
++ kasan_enable_current();
++ }
++
+ p = kasan_krealloc((void *)p, new_size, flags);
+ return (void *)p;
+ }
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 77d91e565045ca..0706113c4c8433 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -129,7 +129,7 @@ static inline int sparse_early_nid(struct mem_section *section)
+ static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
+ unsigned long *end_pfn)
+ {
+- unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
++ unsigned long max_sparsemem_pfn = (PHYSMEM_END + 1) >> PAGE_SHIFT;
+
+ /*
+ * Sanity checks - do not allow an architecture to pass
+@@ -791,6 +791,13 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ if (empty) {
+ unsigned long section_nr = pfn_to_section_nr(pfn);
+
++ /*
++ * Mark the section invalid so that valid_section()
++ * return false. This prevents code from dereferencing
++ * ms->usage array.
++ */
++ ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
++
+ /*
+ * When removing an early section, the usage map is kept (as the
+ * usage maps of other sections fall into the same page). It
+@@ -799,16 +806,10 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
+ * was allocated during boot.
+ */
+ if (!PageReserved(virt_to_page(ms->usage))) {
+- kfree(ms->usage);
+- ms->usage = NULL;
++ kfree_rcu(ms->usage, rcu);
++ WRITE_ONCE(ms->usage, NULL);
+ }
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+- /*
+- * Mark the section invalid so that valid_section()
+- * return false. This prevents code from dereferencing
+- * ms->usage array.
+- */
+- ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
+ }
+
+ /*
+diff --git a/mm/swap.h b/mm/swap.h
+index 8a3c7a0ace4f0c..693d1b2815598f 100644
+--- a/mm/swap.h
++++ b/mm/swap.h
+@@ -38,6 +38,7 @@ void __delete_from_swap_cache(struct folio *folio,
+ void delete_from_swap_cache(struct folio *folio);
+ void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end);
++void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
+ struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr);
+ struct folio *filemap_get_incore_folio(struct address_space *mapping,
+@@ -96,6 +97,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
+ return 0;
+ }
+
++static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
++{
++}
++
+ static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
+ {
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index e52f486834ebf7..c856d6bb2daf3c 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1226,6 +1226,11 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+ * with get_swap_device() and put_swap_device(), unless the swap
+ * functions call get/put_swap_device() by themselves.
+ *
++ * Note that when only holding the PTL, swapoff might succeed immediately
++ * after freeing a swap entry. Therefore, immediately after
++ * __swap_entry_free(), the swap info might become stale and should not
++ * be touched without a prior get_swap_device().
++ *
+ * Check whether swap entry is valid in the swap device. If so,
+ * return pointer to swap_info_struct, and keep the swap entry valid
+ * via preventing the swap device from being swapoff, until
+@@ -1603,13 +1608,19 @@ int free_swap_and_cache(swp_entry_t entry)
+ if (non_swap_entry(entry))
+ return 1;
+
+- p = _swap_info_get(entry);
++ p = get_swap_device(entry);
+ if (p) {
++ if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) {
++ put_swap_device(p);
++ return 0;
++ }
++
+ count = __swap_entry_free(p, entry);
+ if (count == SWAP_HAS_CACHE &&
+ !swap_page_trans_huge_swapped(p, entry))
+ __try_to_reclaim_swap(p, swp_offset(entry),
+ TTRS_UNMAPPED | TTRS_FULL);
++ put_swap_device(p);
+ }
+ return p != NULL;
+ }
+@@ -1992,7 +2003,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type)
+
+ mmap_read_lock(mm);
+ for_each_vma(vmi, vma) {
+- if (vma->anon_vma) {
++ if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
+ ret = unuse_vma(vma, type);
+ if (ret)
+ break;
+@@ -3362,6 +3373,19 @@ int swapcache_prepare(swp_entry_t entry)
+ return __swap_duplicate(entry, SWAP_HAS_CACHE);
+ }
+
++void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
++{
++ struct swap_cluster_info *ci;
++ unsigned long offset = swp_offset(entry);
++ unsigned char usage;
++
++ ci = lock_cluster_or_swap_info(si, offset);
++ usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
++ unlock_cluster_or_swap_info(si, ci);
++ if (!usage)
++ free_swap_slot(entry);
++}
++
+ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+ {
+ return swap_type_to_swap_info(swp_type(entry));
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 8e3aa9e8618ed8..70c09213bb9200 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -174,7 +174,7 @@ static void truncate_cleanup_folio(struct folio *folio)
+ if (folio_mapped(folio))
+ unmap_mapping_folio(folio);
+
+- if (folio_has_private(folio))
++ if (folio_needs_release(folio))
+ folio_invalidate(folio, 0, folio_size(folio));
+
+ /*
+@@ -235,7 +235,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
+ */
+ folio_zero_range(folio, offset, length);
+
+- if (folio_has_private(folio))
++ if (folio_needs_release(folio))
+ folio_invalidate(folio, offset, length);
+ if (!folio_test_large(folio))
+ return true;
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 96d9eae5c7cc8e..92fe2a76f4b512 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -213,6 +213,38 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
+ goto out;
+ }
+
++static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
++ struct vm_area_struct *dst_vma,
++ unsigned long dst_addr)
++{
++ struct folio *folio;
++ int ret = -ENOMEM;
++
++ folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
++ if (!folio)
++ return ret;
++
++ if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
++ goto out_put;
++
++ /*
++ * The memory barrier inside __folio_mark_uptodate makes sure that
++ * zeroing out the folio become visible before mapping the page
++ * using set_pte_at(). See do_anonymous_page().
++ */
++ __folio_mark_uptodate(folio);
++
++ ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
++ &folio->page, true, 0);
++ if (ret)
++ goto out_put;
++
++ return 0;
++out_put:
++ folio_put(folio);
++ return ret;
++}
++
+ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr)
+@@ -221,6 +253,9 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
+ spinlock_t *ptl;
+ int ret;
+
++ if (mm_forbids_zeropage(dst_vma->vm_mm))
++ return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
++
+ _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
+ dst_vma->vm_page_prot));
+ ret = -EAGAIN;
+@@ -357,6 +392,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
+ unsigned long dst_start,
+ unsigned long src_start,
+ unsigned long len,
++ atomic_t *mmap_changing,
+ uffd_flags_t flags)
+ {
+ struct mm_struct *dst_mm = dst_vma->vm_mm;
+@@ -472,6 +508,15 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
+ goto out;
+ }
+ mmap_read_lock(dst_mm);
++ /*
++ * If memory mappings are changing because of non-cooperative
++ * operation (e.g. mremap) running in parallel, bail out and
++ * request the user to retry later
++ */
++ if (mmap_changing && atomic_read(mmap_changing)) {
++ err = -EAGAIN;
++ break;
++ }
+
+ dst_vma = NULL;
+ goto retry;
+@@ -506,6 +551,7 @@ extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
+ unsigned long dst_start,
+ unsigned long src_start,
+ unsigned long len,
++ atomic_t *mmap_changing,
+ uffd_flags_t flags);
+ #endif /* CONFIG_HUGETLB_PAGE */
+
+@@ -622,8 +668,8 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
+ * If this is a HUGETLB vma, pass off to appropriate routine
+ */
+ if (is_vm_hugetlb_page(dst_vma))
+- return mfill_atomic_hugetlb(dst_vma, dst_start,
+- src_start, len, flags);
++ return mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
++ len, mmap_changing, flags);
+
+ if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
+ goto out_unlock;
+@@ -653,27 +699,30 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
+ }
+
+ dst_pmdval = pmdp_get_lockless(dst_pmd);
+- /*
+- * If the dst_pmd is mapped as THP don't
+- * override it and just be strict.
+- */
+- if (unlikely(pmd_trans_huge(dst_pmdval))) {
+- err = -EEXIST;
+- break;
+- }
+ if (unlikely(pmd_none(dst_pmdval)) &&
+ unlikely(__pte_alloc(dst_mm, dst_pmd))) {
+ err = -ENOMEM;
+ break;
+ }
+- /* If an huge pmd materialized from under us fail */
+- if (unlikely(pmd_trans_huge(*dst_pmd))) {
++ dst_pmdval = pmdp_get_lockless(dst_pmd);
++ /*
++ * If the dst_pmd is THP don't override it and just be strict.
++ * (This includes the case where the PMD used to be THP and
++ * changed back to none after __pte_alloc().)
++ */
++ if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) ||
++ pmd_devmap(dst_pmdval))) {
++ err = -EEXIST;
++ break;
++ }
++ if (unlikely(pmd_bad(dst_pmdval))) {
+ err = -EFAULT;
+ break;
+ }
+-
+- BUG_ON(pmd_none(*dst_pmd));
+- BUG_ON(pmd_trans_huge(*dst_pmd));
++ /*
++ * For shmem mappings, khugepaged is allowed to remove page
++ * tables under us; pte_offset_map_lock() will deal with that.
++ */
+
+ err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
+ src_addr, flags, &folio);
+diff --git a/mm/util.c b/mm/util.c
+index 8cbbfd3a3d5984..08d49489655221 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -414,6 +414,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
+
+ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ {
++#ifdef CONFIG_STACK_GROWSUP
++ /*
++ * For an upwards growing stack the calculation is much simpler.
++ * Memory for the maximum stack size is reserved at the top of the
++ * task. mmap_base starts directly below the stack and grows
++ * downwards.
++ */
++ return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
++#else
+ unsigned long gap = rlim_stack->rlim_cur;
+ unsigned long pad = stack_guard_gap;
+
+@@ -425,12 +434,13 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ if (gap + pad > gap)
+ gap += pad;
+
+- if (gap < MIN_GAP)
++ if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+ return PAGE_ALIGN(STACK_TOP - gap - rnd);
++#endif
+ }
+
+ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+@@ -1060,10 +1070,8 @@ void mem_dump_obj(void *object)
+ {
+ const char *type;
+
+- if (kmem_valid_obj(object)) {
+- kmem_dump_obj(object);
++ if (kmem_dump_obj(object))
+ return;
+- }
+
+ if (vmalloc_dump_obj(object))
+ return;
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index a3fedb3ee0dbd4..0148be0814af73 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1939,6 +1939,7 @@ struct vmap_block {
+ struct list_head free_list;
+ struct rcu_head rcu_head;
+ struct list_head purge;
++ unsigned int cpu;
+ };
+
+ /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
+@@ -1983,7 +1984,15 @@ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
+ static struct xarray *
+ addr_to_vb_xa(unsigned long addr)
+ {
+- int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
++ int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
++
++ /*
++ * Please note, nr_cpu_ids points on a highest set
++ * possible bit, i.e. we never invoke cpumask_next()
++ * if an index points on it which is nr_cpu_ids - 1.
++ */
++ if (!cpu_possible(index))
++ index = cpumask_next(index, cpu_possible_mask);
+
+ return &per_cpu(vmap_block_queue, index).vmap_blocks;
+ }
+@@ -2057,6 +2066,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ vb->dirty_max = 0;
+ bitmap_set(vb->used_map, 0, (1UL << order));
+ INIT_LIST_HEAD(&vb->free_list);
++ vb->cpu = raw_smp_processor_id();
+
+ xa = addr_to_vb_xa(va->va_start);
+ vb_idx = addr_to_vb_idx(va->va_start);
+@@ -2066,8 +2076,14 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ free_vmap_area(va);
+ return ERR_PTR(err);
+ }
+-
+- vbq = raw_cpu_ptr(&vmap_block_queue);
++ /*
++ * list_add_tail_rcu could happened in another core
++ * rather than vb->cpu due to task migration, which
++ * is safe as list_add_tail_rcu will ensure the list's
++ * integrity together with list_for_each_rcu from read
++ * side.
++ */
++ vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
+ spin_lock(&vbq->lock);
+ list_add_tail_rcu(&vb->free_list, &vbq->free);
+ spin_unlock(&vbq->lock);
+@@ -2093,9 +2109,10 @@ static void free_vmap_block(struct vmap_block *vb)
+ }
+
+ static bool purge_fragmented_block(struct vmap_block *vb,
+- struct vmap_block_queue *vbq, struct list_head *purge_list,
+- bool force_purge)
++ struct list_head *purge_list, bool force_purge)
+ {
++ struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
++
+ if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
+ vb->dirty == VMAP_BBMAP_BITS)
+ return false;
+@@ -2143,7 +2160,7 @@ static void purge_fragmented_blocks(int cpu)
+ continue;
+
+ spin_lock(&vb->lock);
+- purge_fragmented_block(vb, vbq, &purge, true);
++ purge_fragmented_block(vb, &purge, true);
+ spin_unlock(&vb->lock);
+ }
+ rcu_read_unlock();
+@@ -2280,7 +2297,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
+ * not purgeable, check whether there is dirty
+ * space to be flushed.
+ */
+- if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
++ if (!purge_fragmented_block(vb, &purge_list, false) &&
+ vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
+ unsigned long va_start = vb->va->va_start;
+ unsigned long s, e;
+@@ -2994,7 +3011,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+ {
+ unsigned int nr_allocated = 0;
+ gfp_t alloc_gfp = gfp;
+- bool nofail = false;
++ bool nofail = gfp & __GFP_NOFAIL;
+ struct page *page;
+ int i;
+
+@@ -3051,27 +3068,19 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+ * and compaction etc.
+ */
+ alloc_gfp &= ~__GFP_NOFAIL;
+- nofail = true;
+ }
+
+ /* High-order pages or fallback path if "bulk" fails. */
+ while (nr_allocated < nr_pages) {
+- if (fatal_signal_pending(current))
++ if (!nofail && fatal_signal_pending(current))
+ break;
+
+ if (nid == NUMA_NO_NODE)
+ page = alloc_pages(alloc_gfp, order);
+ else
+ page = alloc_pages_node(nid, alloc_gfp, order);
+- if (unlikely(!page)) {
+- if (!nofail)
+- break;
+-
+- /* fall back to the zero order allocations */
+- alloc_gfp |= __GFP_NOFAIL;
+- order = 0;
+- continue;
+- }
++ if (unlikely(!page))
++ break;
+
+ /*
+ * Higher order allocations must be able to be treated as
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 6f13394b112eae..3c91b86d59e935 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2261,25 +2261,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
+
+ }
+
+-#ifdef CONFIG_CMA
+-/*
+- * It is waste of effort to scan and reclaim CMA pages if it is not available
+- * for current allocation context. Kswapd can not be enrolled as it can not
+- * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
+- */
+-static bool skip_cma(struct folio *folio, struct scan_control *sc)
+-{
+- return !current_is_kswapd() &&
+- gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
+- get_pageblock_migratetype(&folio->page) == MIGRATE_CMA;
+-}
+-#else
+-static bool skip_cma(struct folio *folio, struct scan_control *sc)
+-{
+- return false;
+-}
+-#endif
+-
+ /*
+ * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
+ *
+@@ -2326,8 +2307,7 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
+ nr_pages = folio_nr_pages(folio);
+ total_scan += nr_pages;
+
+- if (folio_zonenum(folio) > sc->reclaim_idx ||
+- skip_cma(folio, sc)) {
++ if (folio_zonenum(folio) > sc->reclaim_idx) {
+ nr_skipped[folio_zonenum(folio)] += nr_pages;
+ move_to = &folios_skipped;
+ goto move;
+@@ -4546,6 +4526,32 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+ * working set protection
+ ******************************************************************************/
+
++static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
++{
++ int priority;
++ unsigned long reclaimable;
++
++ if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
++ return;
++ /*
++ * Determine the initial priority based on
++ * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim,
++ * where reclaimed_to_scanned_ratio = inactive / total.
++ */
++ reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
++ if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
++ reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
++
++ /* round down reclaimable and round up sc->nr_to_reclaim */
++ priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
++
++ /*
++ * The estimation is based on LRU pages only, so cap it to prevent
++ * overshoots of shrinker objects by large margins.
++ */
++ sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
++}
++
+ static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
+ {
+ int gen, type, zone;
+@@ -4579,19 +4585,17 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ DEFINE_MIN_SEQ(lruvec);
+
+- /* see the comment on lru_gen_folio */
+- gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
+- birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
+-
+- if (time_is_after_jiffies(birth + min_ttl))
++ if (mem_cgroup_below_min(NULL, memcg))
+ return false;
+
+ if (!lruvec_is_sizable(lruvec, sc))
+ return false;
+
+- mem_cgroup_calculate_protection(NULL, memcg);
++ /* see the comment on lru_gen_folio */
++ gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
++ birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
+
+- return !mem_cgroup_below_min(NULL, memcg);
++ return time_is_before_jiffies(birth + min_ttl);
+ }
+
+ /* to protect the working set of the last N jiffies */
+@@ -4601,23 +4605,20 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
+ {
+ struct mem_cgroup *memcg;
+ unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
++ bool reclaimable = !min_ttl;
+
+ VM_WARN_ON_ONCE(!current_is_kswapd());
+
+- /* check the order to exclude compaction-induced reclaim */
+- if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
+- return;
++ set_initial_priority(pgdat, sc);
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
+
+- if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) {
+- mem_cgroup_iter_break(NULL, memcg);
+- return;
+- }
++ mem_cgroup_calculate_protection(NULL, memcg);
+
+- cond_resched();
++ if (!reclaimable)
++ reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl);
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+
+ /*
+@@ -4625,7 +4626,7 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
+ * younger than min_ttl. However, another possibility is all memcgs are
+ * either too small or below min.
+ */
+- if (mutex_trylock(&oom_lock)) {
++ if (!reclaimable && mutex_trylock(&oom_lock)) {
+ struct oom_control oc = {
+ .gfp_mask = sc->gfp_mask,
+ };
+@@ -4656,6 +4657,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+ int young = 0;
+ pte_t *pte = pvmw->pte;
+ unsigned long addr = pvmw->address;
++ struct vm_area_struct *vma = pvmw->vma;
+ struct folio *folio = pfn_folio(pvmw->pfn);
+ bool can_swap = !folio_is_file_lru(folio);
+ struct mem_cgroup *memcg = folio_memcg(folio);
+@@ -4670,11 +4672,15 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+ if (spin_is_contended(pvmw->ptl))
+ return;
+
++ /* exclude special VMAs containing anon pages from COW */
++ if (vma->vm_flags & VM_SPECIAL)
++ return;
++
+ /* avoid taking the LRU lock under the PTL when possible */
+ walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
+
+- start = max(addr & PMD_MASK, pvmw->vma->vm_start);
+- end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
++ start = max(addr & PMD_MASK, vma->vm_start);
++ end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1;
+
+ if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
+ if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
+@@ -4699,7 +4705,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+ unsigned long pfn;
+ pte_t ptent = ptep_get(pte + i);
+
+- pfn = get_pte_pfn(ptent, pvmw->vma, addr);
++ pfn = get_pte_pfn(ptent, vma, addr);
+ if (pfn == -1)
+ continue;
+
+@@ -4710,7 +4716,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+ if (!folio)
+ continue;
+
+- if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
++ if (!ptep_test_and_clear_young(vma, addr, pte + i))
+ VM_WARN_ON_ONCE(true);
+
+ young++;
+@@ -4790,6 +4796,9 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
+ else
+ VM_WARN_ON_ONCE(true);
+
++ WRITE_ONCE(lruvec->lrugen.seg, seg);
++ WRITE_ONCE(lruvec->lrugen.gen, new);
++
+ hlist_nulls_del_rcu(&lruvec->lrugen.list);
+
+ if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD)
+@@ -4800,9 +4809,6 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
+ pgdat->memcg_lru.nr_memcgs[old]--;
+ pgdat->memcg_lru.nr_memcgs[new]++;
+
+- lruvec->lrugen.gen = new;
+- WRITE_ONCE(lruvec->lrugen.seg, seg);
+-
+ if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
+ WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
+
+@@ -4825,11 +4831,11 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
+
+ gen = get_memcg_gen(pgdat->memcg_lru.seq);
+
++ lruvec->lrugen.gen = gen;
++
+ hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
+ pgdat->memcg_lru.nr_memcgs[gen]++;
+
+- lruvec->lrugen.gen = gen;
+-
+ spin_unlock_irq(&pgdat->memcg_lru.lock);
+ }
+ }
+@@ -4933,7 +4939,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
+ }
+
+ /* protected */
+- if (tier > tier_idx) {
++ if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) {
+ int hist = lru_hist_from_seq(lrugen->min_seq[type]);
+
+ gen = folio_inc_gen(lruvec, folio, false);
+@@ -4945,7 +4951,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
+ }
+
+ /* ineligible */
+- if (zone > sc->reclaim_idx || skip_cma(folio, sc)) {
++ if (zone > sc->reclaim_idx) {
+ gen = folio_inc_gen(lruvec, folio, false);
+ list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ return true;
+@@ -5221,7 +5227,6 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
+
+ /* retry folios that may have missed folio_rotate_reclaimable() */
+ list_move(&folio->lru, &clean);
+- sc->nr_scanned -= folio_nr_pages(folio);
+ }
+
+ spin_lock_irq(&lruvec->lru_lock);
+@@ -5291,7 +5296,12 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
+ }
+
+ /* try to scrape all its memory if this memcg was deleted */
+- *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
++ if (!mem_cgroup_online(memcg)) {
++ *nr_to_scan = total;
++ return false;
++ }
++
++ *nr_to_scan = total >> sc->priority;
+
+ /*
+ * The aging tries to be lazy to reduce the overhead, while the eviction
+@@ -5328,7 +5338,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
+ DEFINE_MAX_SEQ(lruvec);
+
+ if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
+- return 0;
++ return -1;
+
+ if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
+ return nr_to_scan;
+@@ -5341,20 +5351,41 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool
+ return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0;
+ }
+
+-static unsigned long get_nr_to_reclaim(struct scan_control *sc)
++static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
+ {
++ int i;
++ enum zone_watermarks mark;
++
+ /* don't abort memcg reclaim to ensure fairness */
+ if (!root_reclaim(sc))
+- return -1;
++ return false;
++
++ if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order)))
++ return true;
++
++ /* check the order to exclude compaction-induced reclaim */
++ if (!current_is_kswapd() || sc->order)
++ return false;
++
++ mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ?
++ WMARK_PROMO : WMARK_HIGH;
+
+- return max(sc->nr_to_reclaim, compact_gap(sc->order));
++ for (i = 0; i <= sc->reclaim_idx; i++) {
++ struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
++ unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH;
++
++ if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0))
++ return false;
++ }
++
++ /* kswapd should abort if all eligible zones are safe */
++ return true;
+ }
+
+ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ {
+ long nr_to_scan;
+ unsigned long scanned = 0;
+- unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
+ int swappiness = get_swappiness(lruvec, sc);
+
+ /* clean file folios are more likely to exist */
+@@ -5376,13 +5407,13 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ if (scanned >= nr_to_scan)
+ break;
+
+- if (sc->nr_reclaimed >= nr_to_reclaim)
++ if (should_abort_scan(lruvec, sc))
+ break;
+
+ cond_resched();
+ }
+
+- /* whether try_to_inc_max_seq() was successful */
++ /* whether this lruvec should be rotated */
+ return nr_to_scan < 0;
+ }
+
+@@ -5391,22 +5422,16 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
+ bool success;
+ unsigned long scanned = sc->nr_scanned;
+ unsigned long reclaimed = sc->nr_reclaimed;
+- int seg = lru_gen_memcg_seg(lruvec);
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+
+- /* see the comment on MEMCG_NR_GENS */
+- if (!lruvec_is_sizable(lruvec, sc))
+- return seg != MEMCG_LRU_TAIL ? MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
+-
+- mem_cgroup_calculate_protection(NULL, memcg);
+-
++ /* lru_gen_age_node() called mem_cgroup_calculate_protection() */
+ if (mem_cgroup_below_min(NULL, memcg))
+ return MEMCG_LRU_YOUNG;
+
+ if (mem_cgroup_below_low(NULL, memcg)) {
+ /* see the comment on MEMCG_NR_GENS */
+- if (seg != MEMCG_LRU_TAIL)
++ if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL)
+ return MEMCG_LRU_TAIL;
+
+ memcg_memory_event(memcg, MEMCG_LOW);
+@@ -5422,7 +5447,15 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
+
+ flush_reclaim_state(sc);
+
+- return success ? MEMCG_LRU_YOUNG : 0;
++ if (success && mem_cgroup_online(memcg))
++ return MEMCG_LRU_YOUNG;
++
++ if (!success && lruvec_is_sizable(lruvec, sc))
++ return 0;
++
++ /* one retry if offlined or too small */
++ return lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL ?
++ MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
+ }
+
+ #ifdef CONFIG_MEMCG
+@@ -5436,14 +5469,13 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
+ struct lruvec *lruvec;
+ struct lru_gen_folio *lrugen;
+ struct mem_cgroup *memcg;
+- const struct hlist_nulls_node *pos;
+- unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
++ struct hlist_nulls_node *pos;
+
++ gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
+ bin = first_bin = get_random_u32_below(MEMCG_NR_BINS);
+ restart:
+ op = 0;
+ memcg = NULL;
+- gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
+
+ rcu_read_lock();
+
+@@ -5454,6 +5486,10 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
+ }
+
+ mem_cgroup_put(memcg);
++ memcg = NULL;
++
++ if (gen != READ_ONCE(lrugen->gen))
++ continue;
+
+ lruvec = container_of(lrugen, struct lruvec, lrugen);
+ memcg = lruvec_memcg(lruvec);
+@@ -5470,7 +5506,7 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
+
+ rcu_read_lock();
+
+- if (sc->nr_reclaimed >= nr_to_reclaim)
++ if (should_abort_scan(lruvec, sc))
+ break;
+ }
+
+@@ -5481,7 +5517,7 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
+
+ mem_cgroup_put(memcg);
+
+- if (sc->nr_reclaimed >= nr_to_reclaim)
++ if (!is_a_nulls(pos))
+ return;
+
+ /* restart if raced with lru_gen_rotate_memcg() */
+@@ -5529,31 +5565,6 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
+
+ #endif
+
+-static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
+-{
+- int priority;
+- unsigned long reclaimable;
+- struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
+-
+- if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH)
+- return;
+- /*
+- * Determine the initial priority based on ((total / MEMCG_NR_GENS) >>
+- * priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, where the
+- * estimated reclaimed_to_scanned_ratio = inactive / total.
+- */
+- reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
+- if (get_swappiness(lruvec, sc))
+- reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
+-
+- reclaimable /= MEMCG_NR_GENS;
+-
+- /* round down reclaimable and round up sc->nr_to_reclaim */
+- priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1);
+-
+- sc->priority = clamp(priority, 0, DEF_PRIORITY);
+-}
+-
+ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
+ {
+ struct blk_plug plug;
+@@ -5592,8 +5603,8 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *
+
+ blk_finish_plug(&plug);
+ done:
+- /* kswapd should never fail */
+- pgdat->kswapd_failures = 0;
++ if (sc->nr_reclaimed > reclaimed)
++ pgdat->kswapd_failures = 0;
+ }
+
+ /******************************************************************************
+@@ -6391,7 +6402,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ /* Use reclaim/compaction for costly allocs or under memory pressure */
+ static bool in_reclaim_compaction(struct scan_control *sc)
+ {
+- if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
++ if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
+ (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+ sc->priority < DEF_PRIORITY - 2))
+ return true;
+@@ -6636,6 +6647,9 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
+ {
+ unsigned long watermark;
+
++ if (!gfp_compaction_allowed(sc->gfp_mask))
++ return false;
++
+ /* Allocation can already succeed, nothing to do */
+ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
+ sc->reclaim_idx, 0))
+@@ -7313,6 +7327,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
+ {
+ struct zone *zone;
+ int z;
++ unsigned long nr_reclaimed = sc->nr_reclaimed;
+
+ /* Reclaim a number of pages proportional to the number of zones */
+ sc->nr_to_reclaim = 0;
+@@ -7340,7 +7355,8 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
+ if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
+ sc->order = 0;
+
+- return sc->nr_scanned >= sc->nr_to_reclaim;
++ /* account for progress from mm_account_reclaimed_pages() */
++ return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim;
+ }
+
+ /* Page allocator PCP high watermark is lowered if reclaim is active. */
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 00e81e99c6ee24..e9616c4ca12db8 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1055,7 +1055,7 @@ static void fill_contig_page_info(struct zone *zone,
+ info->free_blocks_total = 0;
+ info->free_blocks_suitable = 0;
+
+- for (order = 0; order <= MAX_ORDER; order++) {
++ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ unsigned long blocks;
+
+ /*
+@@ -1471,7 +1471,7 @@ static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
+ int order;
+
+ seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
+- for (order = 0; order <= MAX_ORDER; ++order)
++ for (order = 0; order < NR_PAGE_ORDERS; ++order)
+ /*
+ * Access to nr_free is lockless as nr_free is used only for
+ * printing purposes. Use data_race to avoid KCSAN warning.
+@@ -1500,7 +1500,7 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
+ pgdat->node_id,
+ zone->name,
+ migratetype_names[mtype]);
+- for (order = 0; order <= MAX_ORDER; ++order) {
++ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
+ unsigned long freecount = 0;
+ struct free_area *area;
+ struct list_head *curr;
+@@ -1540,7 +1540,7 @@ static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
+
+ /* Print header */
+ seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
+- for (order = 0; order <= MAX_ORDER; ++order)
++ for (order = 0; order < NR_PAGE_ORDERS; ++order)
+ seq_printf(m, "%6d ", order);
+ seq_putc(m, '\n');
+
+@@ -2176,7 +2176,7 @@ static void unusable_show_print(struct seq_file *m,
+ seq_printf(m, "Node %d, zone %8s ",
+ pgdat->node_id,
+ zone->name);
+- for (order = 0; order <= MAX_ORDER; ++order) {
++ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
+ fill_contig_page_info(zone, order, &info);
+ index = unusable_free_index(order, &info);
+ seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
+@@ -2228,7 +2228,7 @@ static void extfrag_show_print(struct seq_file *m,
+ seq_printf(m, "Node %d, zone %8s ",
+ pgdat->node_id,
+ zone->name);
+- for (order = 0; order <= MAX_ORDER; ++order) {
++ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
+ fill_contig_page_info(zone, order, &info);
+ index = __fragmentation_index(order, &info);
+ seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
+diff --git a/mm/workingset.c b/mm/workingset.c
+index da58a26d0d4d76..9110957bec5b30 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -313,10 +313,10 @@ static void lru_gen_refault(struct folio *folio, void *shadow)
+ * 1. For pages accessed through page tables, hotter pages pushed out
+ * hot pages which refaulted immediately.
+ * 2. For pages accessed multiple times through file descriptors,
+- * numbers of accesses might have been out of the range.
++ * they would have been protected by sort_folio().
+ */
+- if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) {
+- folio_set_workingset(folio);
++ if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) {
++ set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset));
+ mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
+ }
+ unlock:
+@@ -664,7 +664,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
+ struct lruvec *lruvec;
+ int i;
+
+- mem_cgroup_flush_stats();
++ mem_cgroup_flush_stats_ratelimited();
+ lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
+ for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
+ pages += lruvec_page_state_local(lruvec,
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 37d2b1cb2ecb46..69681b9173fdcb 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -1100,6 +1100,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
+ if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
+ spin_unlock(&tree->lock);
+ delete_from_swap_cache(page_folio(page));
++ unlock_page(page);
++ put_page(page);
+ ret = -ENOMEM;
+ goto fail;
+ }
+@@ -1215,7 +1217,7 @@ bool zswap_store(struct folio *folio)
+ if (folio_test_large(folio))
+ return false;
+
+- if (!zswap_enabled || !tree)
++ if (!tree)
+ return false;
+
+ /*
+@@ -1231,6 +1233,9 @@ bool zswap_store(struct folio *folio)
+ }
+ spin_unlock(&tree->lock);
+
++ if (!zswap_enabled)
++ return false;
++
+ /*
+ * XXX: zswap reclaim does not work with cgroups yet. Without a
+ * cgroup-aware entry LRU, we will push out entries system-wide based on
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 0beb44f2fe1f0d..9404dd551dfd28 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -407,6 +407,8 @@ int vlan_vids_add_by_dev(struct net_device *dev,
+ return 0;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
++ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++ continue;
+ err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
+ if (err)
+ goto unwind;
+@@ -417,6 +419,8 @@ int vlan_vids_add_by_dev(struct net_device *dev,
+ list_for_each_entry_continue_reverse(vid_info,
+ &vlan_info->vid_list,
+ list) {
++ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++ continue;
+ vlan_vid_del(dev, vid_info->proto, vid_info->vid);
+ }
+
+@@ -436,8 +440,11 @@ void vlan_vids_del_by_dev(struct net_device *dev,
+ if (!vlan_info)
+ return;
+
+- list_for_each_entry(vid_info, &vlan_info->vid_list, list)
++ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
++ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
++ continue;
+ vlan_vid_del(dev, vid_info->proto, vid_info->vid);
++ }
+ }
+ EXPORT_SYMBOL(vlan_vids_del_by_dev);
+
+@@ -471,6 +478,8 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
+ if (unlikely(!vhdr))
+ goto out;
+
++ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
++
+ type = vhdr->h_vlan_encapsulated_proto;
+
+ ptype = gro_find_receive_by_type(type);
+diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
+index 214532173536b7..a3b68243fd4b18 100644
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ }
+ if (data[IFLA_VLAN_INGRESS_QOS]) {
+ nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
++ if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
++ continue;
+ m = nla_data(attr);
+ vlan_dev_set_ingress_priority(dev, m->to, m->from);
+ }
+ }
+ if (data[IFLA_VLAN_EGRESS_QOS]) {
+ nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
++ if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
++ continue;
+ m = nla_data(attr);
+ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+ if (err)
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 86bbc7147fc148..b05f73c291b4b9 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -235,6 +235,8 @@ static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
+ if (!fc->sdata)
+ return -ENOMEM;
+ fc->capacity = alloc_msize;
++ fc->id = 0;
++ fc->tag = P9_NOTAG;
+ return 0;
+ }
+
+@@ -540,12 +542,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
+ return 0;
+
+ if (!p9_is_proto_dotl(c)) {
+- char *ename;
++ char *ename = NULL;
+
+ err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
+ &ename, &ecode);
+- if (err)
++ if (err) {
++ kfree(ename);
+ goto out_err;
++ }
+
+ if (p9_is_proto_dotu(c) && ecode < 512)
+ err = -ecode;
+@@ -1581,7 +1585,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ received = rsize;
+ }
+
+- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
++ p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received);
+
+ if (non_zc) {
+ int n = copy_to_iter(dataptr, received, to);
+@@ -1607,9 +1611,6 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ int total = 0;
+ *err = 0;
+
+- p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
+- fid->fid, offset, iov_iter_count(from));
+-
+ while (iov_iter_count(from)) {
+ int count = iov_iter_count(from);
+ int rsize = fid->iounit;
+@@ -1621,6 +1622,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ if (count < rsize)
+ rsize = count;
+
++ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n",
++ fid->fid, offset, rsize, count);
++
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
+@@ -1648,7 +1652,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ written = rsize;
+ }
+
+- p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
++ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written);
+
+ p9_req_put(clnt, req);
+ iov_iter_revert(from, count - written - iov_iter_count(from));
+@@ -1979,7 +1983,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ goto error;
+ }
+ p9_debug(P9_DEBUG_9P,
+- ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
++ ">>> TXATTRWALK file_fid %d, attr_fid %d name '%s'\n",
+ file_fid->fid, attr_fid->fid, attr_name);
+
+ req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
+diff --git a/net/9p/protocol.c b/net/9p/protocol.c
+index 4e3a2a1ffcb3fa..0e6603b1ec906a 100644
+--- a/net/9p/protocol.c
++++ b/net/9p/protocol.c
+@@ -394,6 +394,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ uint16_t *nwname = va_arg(ap, uint16_t *);
+ char ***wnames = va_arg(ap, char ***);
+
++ *wnames = NULL;
++
+ errcode = p9pdu_readf(pdu, proto_version,
+ "w", nwname);
+ if (!errcode) {
+@@ -403,6 +405,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ GFP_NOFS);
+ if (!*wnames)
+ errcode = -ENOMEM;
++ else
++ (*wnames)[0] = NULL;
+ }
+
+ if (!errcode) {
+@@ -414,8 +418,10 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ proto_version,
+ "s",
+ &(*wnames)[i]);
+- if (errcode)
++ if (errcode) {
++ (*wnames)[i] = NULL;
+ break;
++ }
+ }
+ }
+
+@@ -423,11 +429,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
+ if (*wnames) {
+ int i;
+
+- for (i = 0; i < *nwname; i++)
++ for (i = 0; i < *nwname; i++) {
++ if (!(*wnames)[i])
++ break;
+ kfree((*wnames)[i]);
++ }
++ kfree(*wnames);
++ *wnames = NULL;
+ }
+- kfree(*wnames);
+- *wnames = NULL;
+ }
+ }
+ break;
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index c4015f30f9fa79..d0eb03ada704dc 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -832,14 +832,21 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
+ goto out_free_ts;
+ if (!(ts->rd->f_mode & FMODE_READ))
+ goto out_put_rd;
+- /* prevent workers from hanging on IO when fd is a pipe */
+- ts->rd->f_flags |= O_NONBLOCK;
++ /* Prevent workers from hanging on IO when fd is a pipe.
++ * It's technically possible for userspace or concurrent mounts to
++ * modify this flag concurrently, which will likely result in a
++ * broken filesystem. However, just having bad flags here should
++ * not crash the kernel or cause any other sort of bug, so mark this
++ * particular data race as intentional so that tooling (like KCSAN)
++ * can allow it and detect further problems.
++ */
++ data_race(ts->rd->f_flags |= O_NONBLOCK);
+ ts->wr = fget(wfd);
+ if (!ts->wr)
+ goto out_put_rd;
+ if (!(ts->wr->f_mode & FMODE_WRITE))
+ goto out_put_wr;
+- ts->wr->f_flags |= O_NONBLOCK;
++ data_race(ts->wr->f_flags |= O_NONBLOCK);
+
+ client->trans = ts;
+ client->status = Connected;
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index 8978fb6212ffb6..b070a89912000a 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1811,15 +1811,14 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ break;
+ }
+ case TIOCINQ: {
+- /*
+- * These two are safe on a single CPU system as only
+- * user tasks fiddle here
+- */
+- struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
++ struct sk_buff *skb;
+ long amount = 0;
+
++ spin_lock_irq(&sk->sk_receive_queue.lock);
++ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
+ amount = skb->len - sizeof(struct ddpehdr);
++ spin_unlock_irq(&sk->sk_receive_queue.lock);
+ rc = put_user(amount, (int __user *)argp);
+ break;
+ }
+diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
+index 838ebf0cabbfb7..f81f8d56f5c0c5 100644
+--- a/net/atm/ioctl.c
++++ b/net/atm/ioctl.c
+@@ -73,14 +73,17 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
+ case SIOCINQ:
+ {
+ struct sk_buff *skb;
++ int amount;
+
+ if (sock->state != SS_CONNECTED) {
+ error = -EINVAL;
+ goto done;
+ }
++ spin_lock_irq(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+- error = put_user(skb ? skb->len : 0,
+- (int __user *)argp) ? -EFAULT : 0;
++ amount = skb ? skb->len : 0;
++ spin_unlock_irq(&sk->sk_receive_queue.lock);
++ error = put_user(amount, (int __user *)argp) ? -EFAULT : 0;
+ goto done;
+ }
+ case ATM_SETSC:
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 5db805d5f74d73..26a3095bec4620 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -103,7 +103,7 @@ static void ax25_kill_by_device(struct net_device *dev)
+ s->ax25_dev = NULL;
+ if (sk->sk_socket) {
+ netdev_put(ax25_dev->dev,
+- &ax25_dev->dev_tracker);
++ &s->dev_tracker);
+ ax25_dev_put(ax25_dev);
+ }
+ ax25_cb_del(s);
+@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ {
+ struct sk_buff *skb;
+ struct sock *newsk;
++ ax25_dev *ax25_dev;
+ DEFINE_WAIT(wait);
+ struct sock *sk;
++ ax25_cb *ax25;
+ int err = 0;
+
+ if (sock->state != SS_UNCONNECTED)
+@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ kfree_skb(skb);
+ sk_acceptq_removed(sk);
+ newsock->state = SS_CONNECTED;
++ ax25 = sk_to_ax25(newsk);
++ ax25_dev = ax25->ax25_dev;
++ netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
++ ax25_dev_hold(ax25_dev);
+
+ out:
+ release_sock(sk);
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index c5462486dbca10..67ae6b8c52989b 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -22,11 +22,12 @@
+ #include <net/sock.h>
+ #include <linux/uaccess.h>
+ #include <linux/fcntl.h>
++#include <linux/list.h>
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+
+-ax25_dev *ax25_dev_list;
++static LIST_HEAD(ax25_dev_list);
+ DEFINE_SPINLOCK(ax25_dev_lock);
+
+ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
+@@ -34,10 +35,11 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
+ ax25_dev *ax25_dev, *res = NULL;
+
+ spin_lock_bh(&ax25_dev_lock);
+- for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
++ list_for_each_entry(ax25_dev, &ax25_dev_list, list)
+ if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
+ res = ax25_dev;
+ ax25_dev_hold(ax25_dev);
++ break;
+ }
+ spin_unlock_bh(&ax25_dev_lock);
+
+@@ -59,7 +61,6 @@ void ax25_dev_device_up(struct net_device *dev)
+ }
+
+ refcount_set(&ax25_dev->refcount, 1);
+- dev->ax25_ptr = ax25_dev;
+ ax25_dev->dev = dev;
+ netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL);
+ ax25_dev->forward = NULL;
+@@ -85,10 +86,9 @@ void ax25_dev_device_up(struct net_device *dev)
+ #endif
+
+ spin_lock_bh(&ax25_dev_lock);
+- ax25_dev->next = ax25_dev_list;
+- ax25_dev_list = ax25_dev;
++ list_add(&ax25_dev->list, &ax25_dev_list);
++ dev->ax25_ptr = ax25_dev;
+ spin_unlock_bh(&ax25_dev_lock);
+- ax25_dev_hold(ax25_dev);
+
+ ax25_register_dev_sysctl(ax25_dev);
+ }
+@@ -105,38 +105,25 @@ void ax25_dev_device_down(struct net_device *dev)
+ spin_lock_bh(&ax25_dev_lock);
+
+ #ifdef CONFIG_AX25_DAMA_SLAVE
+- ax25_ds_del_timer(ax25_dev);
++ timer_shutdown_sync(&ax25_dev->dama.slave_timer);
+ #endif
+
+ /*
+ * Remove any packet forwarding that points to this device.
+ */
+- for (s = ax25_dev_list; s != NULL; s = s->next)
++ list_for_each_entry(s, &ax25_dev_list, list)
+ if (s->forward == dev)
+ s->forward = NULL;
+
+- if ((s = ax25_dev_list) == ax25_dev) {
+- ax25_dev_list = s->next;
+- goto unlock_put;
+- }
+-
+- while (s != NULL && s->next != NULL) {
+- if (s->next == ax25_dev) {
+- s->next = ax25_dev->next;
+- goto unlock_put;
++ list_for_each_entry(s, &ax25_dev_list, list) {
++ if (s == ax25_dev) {
++ list_del(&s->list);
++ break;
+ }
+-
+- s = s->next;
+ }
+- spin_unlock_bh(&ax25_dev_lock);
+- dev->ax25_ptr = NULL;
+- ax25_dev_put(ax25_dev);
+- return;
+
+-unlock_put:
+- spin_unlock_bh(&ax25_dev_lock);
+- ax25_dev_put(ax25_dev);
+ dev->ax25_ptr = NULL;
++ spin_unlock_bh(&ax25_dev_lock);
+ netdev_put(dev, &ax25_dev->dev_tracker);
+ ax25_dev_put(ax25_dev);
+ }
+@@ -200,16 +187,13 @@ struct net_device *ax25_fwd_dev(struct net_device *dev)
+ */
+ void __exit ax25_dev_free(void)
+ {
+- ax25_dev *s, *ax25_dev;
++ ax25_dev *s, *n;
+
+ spin_lock_bh(&ax25_dev_lock);
+- ax25_dev = ax25_dev_list;
+- while (ax25_dev != NULL) {
+- s = ax25_dev;
+- netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
+- ax25_dev = ax25_dev->next;
+- kfree(s);
++ list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
++ netdev_put(s->dev, &s->dev_tracker);
++ list_del(&s->list);
++ ax25_dev_put(s);
+ }
+- ax25_dev_list = NULL;
+ spin_unlock_bh(&ax25_dev_lock);
+ }
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index 34903df4fe9362..7388d2ad7b5d84 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -12,6 +12,7 @@
+ #include <linux/errno.h>
+ #include <linux/etherdevice.h>
+ #include <linux/gfp.h>
++#include <linux/if_vlan.h>
+ #include <linux/jiffies.h>
+ #include <linux/kref.h>
+ #include <linux/list.h>
+@@ -131,6 +132,29 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+ return vlan;
+ }
+
++/**
++ * batadv_vlan_id_valid() - check if vlan id is in valid batman-adv encoding
++ * @vid: the VLAN identifier
++ *
++ * Return: true when either no vlan is set or if VLAN is in correct range,
++ * false otherwise
++ */
++static bool batadv_vlan_id_valid(unsigned short vid)
++{
++ unsigned short non_vlan = vid & ~(BATADV_VLAN_HAS_TAG | VLAN_VID_MASK);
++
++ if (vid == 0)
++ return true;
++
++ if (!(vid & BATADV_VLAN_HAS_TAG))
++ return false;
++
++ if (non_vlan)
++ return false;
++
++ return true;
++}
++
+ /**
+ * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
+ * object
+@@ -149,6 +173,9 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+ {
+ struct batadv_orig_node_vlan *vlan;
+
++ if (!batadv_vlan_id_valid(vid))
++ return NULL;
++
+ spin_lock_bh(&orig_node->vlan_list_lock);
+
+ /* first look if an object for this vid already exists */
+@@ -1238,6 +1265,8 @@ void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
+ /* for all origins... */
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
++ if (hlist_empty(head))
++ continue;
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index b95c36765d045c..2243cec18ecc86 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -3948,7 +3948,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+
+- while (true) {
++ while (timeout) {
+ table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ if (packet_size_max >= table_size)
+ break;
+diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
+index da7cac0a1b716b..6b2b65a667008b 100644
+--- a/net/bluetooth/Kconfig
++++ b/net/bluetooth/Kconfig
+@@ -62,14 +62,6 @@ source "net/bluetooth/cmtp/Kconfig"
+
+ source "net/bluetooth/hidp/Kconfig"
+
+-config BT_HS
+- bool "Bluetooth High Speed (HS) features"
+- depends on BT_BREDR
+- help
+- Bluetooth High Speed includes support for off-loading
+- Bluetooth connections via 802.11 (wifi) physical layer
+- available with Bluetooth version 3.0 or later.
+-
+ config BT_LE
+ bool "Bluetooth Low Energy (LE) features"
+ depends on BT
+diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
+index 141ac1fda0bfa5..628d448d78be3a 100644
+--- a/net/bluetooth/Makefile
++++ b/net/bluetooth/Makefile
+@@ -21,7 +21,6 @@ bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o
+
+ bluetooth-$(CONFIG_BT_BREDR) += sco.o
+ bluetooth-$(CONFIG_BT_LE) += iso.o
+-bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
+ bluetooth-$(CONFIG_BT_LEDS) += leds.o
+ bluetooth-$(CONFIG_BT_MSFTEXT) += msft.o
+ bluetooth-$(CONFIG_BT_AOSPEXT) += aosp.o
+diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
+deleted file mode 100644
+index e7adb8a98cf90f..00000000000000
+--- a/net/bluetooth/a2mp.c
++++ /dev/null
+@@ -1,1054 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+- Copyright (c) 2011,2012 Intel Corp.
+-
+-*/
+-
+-#include <net/bluetooth/bluetooth.h>
+-#include <net/bluetooth/hci_core.h>
+-#include <net/bluetooth/l2cap.h>
+-
+-#include "hci_request.h"
+-#include "a2mp.h"
+-#include "amp.h"
+-
+-#define A2MP_FEAT_EXT 0x8000
+-
+-/* Global AMP Manager list */
+-static LIST_HEAD(amp_mgr_list);
+-static DEFINE_MUTEX(amp_mgr_list_lock);
+-
+-/* A2MP build & send command helper functions */
+-static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
+-{
+- struct a2mp_cmd *cmd;
+- int plen;
+-
+- plen = sizeof(*cmd) + len;
+- cmd = kzalloc(plen, GFP_KERNEL);
+- if (!cmd)
+- return NULL;
+-
+- cmd->code = code;
+- cmd->ident = ident;
+- cmd->len = cpu_to_le16(len);
+-
+- memcpy(cmd->data, data, len);
+-
+- return cmd;
+-}
+-
+-static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
+-{
+- struct l2cap_chan *chan = mgr->a2mp_chan;
+- struct a2mp_cmd *cmd;
+- u16 total_len = len + sizeof(*cmd);
+- struct kvec iv;
+- struct msghdr msg;
+-
+- cmd = __a2mp_build(code, ident, len, data);
+- if (!cmd)
+- return;
+-
+- iv.iov_base = cmd;
+- iv.iov_len = total_len;
+-
+- memset(&msg, 0, sizeof(msg));
+-
+- iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len);
+-
+- l2cap_chan_send(chan, &msg, total_len);
+-
+- kfree(cmd);
+-}
+-
+-static u8 __next_ident(struct amp_mgr *mgr)
+-{
+- if (++mgr->ident == 0)
+- mgr->ident = 1;
+-
+- return mgr->ident;
+-}
+-
+-static struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
+-{
+- struct amp_mgr *mgr;
+-
+- mutex_lock(&amp_mgr_list_lock);
+- list_for_each_entry(mgr, &amp_mgr_list, list) {
+- if (test_and_clear_bit(state, &mgr->state)) {
+- amp_mgr_get(mgr);
+- mutex_unlock(&amp_mgr_list_lock);
+- return mgr;
+- }
+- }
+- mutex_unlock(&amp_mgr_list_lock);
+-
+- return NULL;
+-}
+-
+-/* hci_dev_list shall be locked */
+-static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
+-{
+- struct hci_dev *hdev;
+- int i = 1;
+-
+- cl[0].id = AMP_ID_BREDR;
+- cl[0].type = AMP_TYPE_BREDR;
+- cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
+-
+- list_for_each_entry(hdev, &hci_dev_list, list) {
+- if (hdev->dev_type == HCI_AMP) {
+- cl[i].id = hdev->id;
+- cl[i].type = hdev->amp_type;
+- if (test_bit(HCI_UP, &hdev->flags))
+- cl[i].status = hdev->amp_status;
+- else
+- cl[i].status = AMP_STATUS_POWERED_DOWN;
+- i++;
+- }
+- }
+-}
+-
+-/* Processing A2MP messages */
+-static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_cmd_rej *rej = (void *) skb->data;
+-
+- if (le16_to_cpu(hdr->len) < sizeof(*rej))
+- return -EINVAL;
+-
+- BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason));
+-
+- skb_pull(skb, sizeof(*rej));
+-
+- return 0;
+-}
+-
+-static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_discov_req *req = (void *) skb->data;
+- u16 len = le16_to_cpu(hdr->len);
+- struct a2mp_discov_rsp *rsp;
+- u16 ext_feat;
+- u8 num_ctrl;
+- struct hci_dev *hdev;
+-
+- if (len < sizeof(*req))
+- return -EINVAL;
+-
+- skb_pull(skb, sizeof(*req));
+-
+- ext_feat = le16_to_cpu(req->ext_feat);
+-
+- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
+-
+- /* check that packet is not broken for now */
+- while (ext_feat & A2MP_FEAT_EXT) {
+- if (len < sizeof(ext_feat))
+- return -EINVAL;
+-
+- ext_feat = get_unaligned_le16(skb->data);
+- BT_DBG("efm 0x%4.4x", ext_feat);
+- len -= sizeof(ext_feat);
+- skb_pull(skb, sizeof(ext_feat));
+- }
+-
+- read_lock(&hci_dev_list_lock);
+-
+- /* at minimum the BR/EDR needs to be listed */
+- num_ctrl = 1;
+-
+- list_for_each_entry(hdev, &hci_dev_list, list) {
+- if (hdev->dev_type == HCI_AMP)
+- num_ctrl++;
+- }
+-
+- len = struct_size(rsp, cl, num_ctrl);
+- rsp = kmalloc(len, GFP_ATOMIC);
+- if (!rsp) {
+- read_unlock(&hci_dev_list_lock);
+- return -ENOMEM;
+- }
+-
+- rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+- rsp->ext_feat = 0;
+-
+- __a2mp_add_cl(mgr, rsp->cl);
+-
+- read_unlock(&hci_dev_list_lock);
+-
+- a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
+-
+- kfree(rsp);
+- return 0;
+-}
+-
+-static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_discov_rsp *rsp = (void *) skb->data;
+- u16 len = le16_to_cpu(hdr->len);
+- struct a2mp_cl *cl;
+- u16 ext_feat;
+- bool found = false;
+-
+- if (len < sizeof(*rsp))
+- return -EINVAL;
+-
+- len -= sizeof(*rsp);
+- skb_pull(skb, sizeof(*rsp));
+-
+- ext_feat = le16_to_cpu(rsp->ext_feat);
+-
+- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
+-
+- /* check that packet is not broken for now */
+- while (ext_feat & A2MP_FEAT_EXT) {
+- if (len < sizeof(ext_feat))
+- return -EINVAL;
+-
+- ext_feat = get_unaligned_le16(skb->data);
+- BT_DBG("efm 0x%4.4x", ext_feat);
+- len -= sizeof(ext_feat);
+- skb_pull(skb, sizeof(ext_feat));
+- }
+-
+- cl = (void *) skb->data;
+- while (len >= sizeof(*cl)) {
+- BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type,
+- cl->status);
+-
+- if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
+- struct a2mp_info_req req;
+-
+- found = true;
+-
+- memset(&req, 0, sizeof(req));
+-
+- req.id = cl->id;
+- a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
+- sizeof(req), &req);
+- }
+-
+- len -= sizeof(*cl);
+- cl = skb_pull(skb, sizeof(*cl));
+- }
+-
+- /* Fall back to L2CAP init sequence */
+- if (!found) {
+- struct l2cap_conn *conn = mgr->l2cap_conn;
+- struct l2cap_chan *chan;
+-
+- mutex_lock(&conn->chan_lock);
+-
+- list_for_each_entry(chan, &conn->chan_l, list) {
+-
+- BT_DBG("chan %p state %s", chan,
+- state_to_string(chan->state));
+-
+- if (chan->scid == L2CAP_CID_A2MP)
+- continue;
+-
+- l2cap_chan_lock(chan);
+-
+- if (chan->state == BT_CONNECT)
+- l2cap_send_conn_req(chan);
+-
+- l2cap_chan_unlock(chan);
+- }
+-
+- mutex_unlock(&conn->chan_lock);
+- }
+-
+- return 0;
+-}
+-
+-static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_cl *cl = (void *) skb->data;
+-
+- while (skb->len >= sizeof(*cl)) {
+- BT_DBG("Controller id %u type %u status %u", cl->id, cl->type,
+- cl->status);
+- cl = skb_pull(skb, sizeof(*cl));
+- }
+-
+- /* TODO send A2MP_CHANGE_RSP */
+-
+- return 0;
+-}
+-
+-static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
+- u16 opcode)
+-{
+- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+-
+- a2mp_send_getinfo_rsp(hdev);
+-}
+-
+-static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_info_req *req = (void *) skb->data;
+- struct hci_dev *hdev;
+- struct hci_request hreq;
+- int err = 0;
+-
+- if (le16_to_cpu(hdr->len) < sizeof(*req))
+- return -EINVAL;
+-
+- BT_DBG("id %u", req->id);
+-
+- hdev = hci_dev_get(req->id);
+- if (!hdev || hdev->dev_type != HCI_AMP) {
+- struct a2mp_info_rsp rsp;
+-
+- memset(&rsp, 0, sizeof(rsp));
+-
+- rsp.id = req->id;
+- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+-
+- a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
+- &rsp);
+-
+- goto done;
+- }
+-
+- set_bit(READ_LOC_AMP_INFO, &mgr->state);
+- hci_req_init(&hreq, hdev);
+- hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+- err = hci_req_run(&hreq, read_local_amp_info_complete);
+- if (err < 0)
+- a2mp_send_getinfo_rsp(hdev);
+-
+-done:
+- if (hdev)
+- hci_dev_put(hdev);
+-
+- skb_pull(skb, sizeof(*req));
+- return 0;
+-}
+-
+-static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
+- struct a2mp_amp_assoc_req req;
+- struct amp_ctrl *ctrl;
+-
+- if (le16_to_cpu(hdr->len) < sizeof(*rsp))
+- return -EINVAL;
+-
+- BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status);
+-
+- if (rsp->status)
+- return -EINVAL;
+-
+- ctrl = amp_ctrl_add(mgr, rsp->id);
+- if (!ctrl)
+- return -ENOMEM;
+-
+- memset(&req, 0, sizeof(req));
+-
+- req.id = rsp->id;
+- a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
+- &req);
+-
+- skb_pull(skb, sizeof(*rsp));
+- return 0;
+-}
+-
+-static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_amp_assoc_req *req = (void *) skb->data;
+- struct hci_dev *hdev;
+- struct amp_mgr *tmp;
+-
+- if (le16_to_cpu(hdr->len) < sizeof(*req))
+- return -EINVAL;
+-
+- BT_DBG("id %u", req->id);
+-
+- /* Make sure that other request is not processed */
+- tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
+-
+- hdev = hci_dev_get(req->id);
+- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
+- struct a2mp_amp_assoc_rsp rsp;
+-
+- memset(&rsp, 0, sizeof(rsp));
+- rsp.id = req->id;
+-
+- if (tmp) {
+- rsp.status = A2MP_STATUS_COLLISION_OCCURED;
+- amp_mgr_put(tmp);
+- } else {
+- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+- }
+-
+- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
+- &rsp);
+-
+- goto done;
+- }
+-
+- amp_read_loc_assoc(hdev, mgr);
+-
+-done:
+- if (hdev)
+- hci_dev_put(hdev);
+-
+- skb_pull(skb, sizeof(*req));
+- return 0;
+-}
+-
+-static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
+- u16 len = le16_to_cpu(hdr->len);
+- struct hci_dev *hdev;
+- struct amp_ctrl *ctrl;
+- struct hci_conn *hcon;
+- size_t assoc_len;
+-
+- if (len < sizeof(*rsp))
+- return -EINVAL;
+-
+- assoc_len = len - sizeof(*rsp);
+-
+- BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
+- assoc_len);
+-
+- if (rsp->status)
+- return -EINVAL;
+-
+- /* Save remote ASSOC data */
+- ctrl = amp_ctrl_lookup(mgr, rsp->id);
+- if (ctrl) {
+- u8 *assoc;
+-
+- assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL);
+- if (!assoc) {
+- amp_ctrl_put(ctrl);
+- return -ENOMEM;
+- }
+-
+- ctrl->assoc = assoc;
+- ctrl->assoc_len = assoc_len;
+- ctrl->assoc_rem_len = assoc_len;
+- ctrl->assoc_len_so_far = 0;
+-
+- amp_ctrl_put(ctrl);
+- }
+-
+- /* Create Phys Link */
+- hdev = hci_dev_get(rsp->id);
+- if (!hdev)
+- return -EINVAL;
+-
+- hcon = phylink_add(hdev, mgr, rsp->id, true);
+- if (!hcon)
+- goto done;
+-
+- BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id);
+-
+- mgr->bredr_chan->remote_amp_id = rsp->id;
+-
+- amp_create_phylink(hdev, mgr, hcon);
+-
+-done:
+- hci_dev_put(hdev);
+- skb_pull(skb, len);
+- return 0;
+-}
+-
+-static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_physlink_req *req = (void *) skb->data;
+- struct a2mp_physlink_rsp rsp;
+- struct hci_dev *hdev;
+- struct hci_conn *hcon;
+- struct amp_ctrl *ctrl;
+-
+- if (le16_to_cpu(hdr->len) < sizeof(*req))
+- return -EINVAL;
+-
+- BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id);
+-
+- memset(&rsp, 0, sizeof(rsp));
+-
+- rsp.local_id = req->remote_id;
+- rsp.remote_id = req->local_id;
+-
+- hdev = hci_dev_get(req->remote_id);
+- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
+- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+- goto send_rsp;
+- }
+-
+- ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
+- if (!ctrl) {
+- ctrl = amp_ctrl_add(mgr, rsp.remote_id);
+- if (ctrl) {
+- amp_ctrl_get(ctrl);
+- } else {
+- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+- goto send_rsp;
+- }
+- }
+-
+- if (ctrl) {
+- size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
+- u8 *assoc;
+-
+- assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
+- if (!assoc) {
+- amp_ctrl_put(ctrl);
+- hci_dev_put(hdev);
+- return -ENOMEM;
+- }
+-
+- ctrl->assoc = assoc;
+- ctrl->assoc_len = assoc_len;
+- ctrl->assoc_rem_len = assoc_len;
+- ctrl->assoc_len_so_far = 0;
+-
+- amp_ctrl_put(ctrl);
+- }
+-
+- hcon = phylink_add(hdev, mgr, req->local_id, false);
+- if (hcon) {
+- amp_accept_phylink(hdev, mgr, hcon);
+- rsp.status = A2MP_STATUS_SUCCESS;
+- } else {
+- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+- }
+-
+-send_rsp:
+- if (hdev)
+- hci_dev_put(hdev);
+-
+- /* Reply error now and success after HCI Write Remote AMP Assoc
+- command complete with success status
+- */
+- if (rsp.status != A2MP_STATUS_SUCCESS) {
+- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
+- sizeof(rsp), &rsp);
+- } else {
+- set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
+- mgr->ident = hdr->ident;
+- }
+-
+- skb_pull(skb, le16_to_cpu(hdr->len));
+- return 0;
+-}
+-
+-static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- struct a2mp_physlink_req *req = (void *) skb->data;
+- struct a2mp_physlink_rsp rsp;
+- struct hci_dev *hdev;
+- struct hci_conn *hcon;
+-
+- if (le16_to_cpu(hdr->len) < sizeof(*req))
+- return -EINVAL;
+-
+- BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id);
+-
+- memset(&rsp, 0, sizeof(rsp));
+-
+- rsp.local_id = req->remote_id;
+- rsp.remote_id = req->local_id;
+- rsp.status = A2MP_STATUS_SUCCESS;
+-
+- hdev = hci_dev_get(req->remote_id);
+- if (!hdev) {
+- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+- goto send_rsp;
+- }
+-
+- hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+- &mgr->l2cap_conn->hcon->dst);
+- if (!hcon) {
+- bt_dev_err(hdev, "no phys link exist");
+- rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
+- goto clean;
+- }
+-
+- /* TODO Disconnect Phys Link here */
+-
+-clean:
+- hci_dev_put(hdev);
+-
+-send_rsp:
+- a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
+-
+- skb_pull(skb, sizeof(*req));
+- return 0;
+-}
+-
+-static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+- struct a2mp_cmd *hdr)
+-{
+- BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code);
+-
+- skb_pull(skb, le16_to_cpu(hdr->len));
+- return 0;
+-}
+-
+-/* Handle A2MP signalling */
+-static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+-{
+- struct a2mp_cmd *hdr;
+- struct amp_mgr *mgr = chan->data;
+- int err = 0;
+-
+- amp_mgr_get(mgr);
+-
+- while (skb->len >= sizeof(*hdr)) {
+- u16 len;
+-
+- hdr = (void *) skb->data;
+- len = le16_to_cpu(hdr->len);
+-
+- BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len);
+-
+- skb_pull(skb, sizeof(*hdr));
+-
+- if (len > skb->len || !hdr->ident) {
+- err = -EINVAL;
+- break;
+- }
+-
+- mgr->ident = hdr->ident;
+-
+- switch (hdr->code) {
+- case A2MP_COMMAND_REJ:
+- a2mp_command_rej(mgr, skb, hdr);
+- break;
+-
+- case A2MP_DISCOVER_REQ:
+- err = a2mp_discover_req(mgr, skb, hdr);
+- break;
+-
+- case A2MP_CHANGE_NOTIFY:
+- err = a2mp_change_notify(mgr, skb, hdr);
+- break;
+-
+- case A2MP_GETINFO_REQ:
+- err = a2mp_getinfo_req(mgr, skb, hdr);
+- break;
+-
+- case A2MP_GETAMPASSOC_REQ:
+- err = a2mp_getampassoc_req(mgr, skb, hdr);
+- break;
+-
+- case A2MP_CREATEPHYSLINK_REQ:
+- err = a2mp_createphyslink_req(mgr, skb, hdr);
+- break;
+-
+- case A2MP_DISCONNPHYSLINK_REQ:
+- err = a2mp_discphyslink_req(mgr, skb, hdr);
+- break;
+-
+- case A2MP_DISCOVER_RSP:
+- err = a2mp_discover_rsp(mgr, skb, hdr);
+- break;
+-
+- case A2MP_GETINFO_RSP:
+- err = a2mp_getinfo_rsp(mgr, skb, hdr);
+- break;
+-
+- case A2MP_GETAMPASSOC_RSP:
+- err = a2mp_getampassoc_rsp(mgr, skb, hdr);
+- break;
+-
+- case A2MP_CHANGE_RSP:
+- case A2MP_CREATEPHYSLINK_RSP:
+- case A2MP_DISCONNPHYSLINK_RSP:
+- err = a2mp_cmd_rsp(mgr, skb, hdr);
+- break;
+-
+- default:
+- BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
+- err = -EINVAL;
+- break;
+- }
+- }
+-
+- if (err) {
+- struct a2mp_cmd_rej rej;
+-
+- memset(&rej, 0, sizeof(rej));
+-
+- rej.reason = cpu_to_le16(0);
+- hdr = (void *) skb->data;
+-
+- BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
+-
+- a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
+- &rej);
+- }
+-
+- /* Always free skb and return success error code to prevent
+- from sending L2CAP Disconnect over A2MP channel */
+- kfree_skb(skb);
+-
+- amp_mgr_put(mgr);
+-
+- return 0;
+-}
+-
+-static void a2mp_chan_close_cb(struct l2cap_chan *chan)
+-{
+- l2cap_chan_put(chan);
+-}
+-
+-static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
+- int err)
+-{
+- struct amp_mgr *mgr = chan->data;
+-
+- if (!mgr)
+- return;
+-
+- BT_DBG("chan %p state %s", chan, state_to_string(state));
+-
+- chan->state = state;
+-
+- switch (state) {
+- case BT_CLOSED:
+- if (mgr)
+- amp_mgr_put(mgr);
+- break;
+- }
+-}
+-
+-static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
+- unsigned long hdr_len,
+- unsigned long len, int nb)
+-{
+- struct sk_buff *skb;
+-
+- skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
+- if (!skb)
+- return ERR_PTR(-ENOMEM);
+-
+- return skb;
+-}
+-
+-static const struct l2cap_ops a2mp_chan_ops = {
+- .name = "L2CAP A2MP channel",
+- .recv = a2mp_chan_recv_cb,
+- .close = a2mp_chan_close_cb,
+- .state_change = a2mp_chan_state_change_cb,
+- .alloc_skb = a2mp_chan_alloc_skb_cb,
+-
+- /* Not implemented for A2MP */
+- .new_connection = l2cap_chan_no_new_connection,
+- .teardown = l2cap_chan_no_teardown,
+- .ready = l2cap_chan_no_ready,
+- .defer = l2cap_chan_no_defer,
+- .resume = l2cap_chan_no_resume,
+- .set_shutdown = l2cap_chan_no_set_shutdown,
+- .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
+-};
+-
+-static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
+-{
+- struct l2cap_chan *chan;
+- int err;
+-
+- chan = l2cap_chan_create();
+- if (!chan)
+- return NULL;
+-
+- BT_DBG("chan %p", chan);
+-
+- chan->chan_type = L2CAP_CHAN_FIXED;
+- chan->scid = L2CAP_CID_A2MP;
+- chan->dcid = L2CAP_CID_A2MP;
+- chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
+- chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
+- chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+-
+- chan->ops = &a2mp_chan_ops;
+-
+- l2cap_chan_set_defaults(chan);
+- chan->remote_max_tx = chan->max_tx;
+- chan->remote_tx_win = chan->tx_win;
+-
+- chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+- chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+-
+- skb_queue_head_init(&chan->tx_q);
+-
+- chan->mode = L2CAP_MODE_ERTM;
+-
+- err = l2cap_ertm_init(chan);
+- if (err < 0) {
+- l2cap_chan_del(chan, 0);
+- return NULL;
+- }
+-
+- chan->conf_state = 0;
+-
+- if (locked)
+- __l2cap_chan_add(conn, chan);
+- else
+- l2cap_chan_add(conn, chan);
+-
+- chan->remote_mps = chan->omtu;
+- chan->mps = chan->omtu;
+-
+- chan->state = BT_CONNECTED;
+-
+- return chan;
+-}
+-
+-/* AMP Manager functions */
+-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
+-{
+- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
+-
+- kref_get(&mgr->kref);
+-
+- return mgr;
+-}
+-
+-static void amp_mgr_destroy(struct kref *kref)
+-{
+- struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
+-
+- BT_DBG("mgr %p", mgr);
+-
+- mutex_lock(&amp_mgr_list_lock);
+- list_del(&mgr->list);
+- mutex_unlock(&amp_mgr_list_lock);
+-
+- amp_ctrl_list_flush(mgr);
+- kfree(mgr);
+-}
+-
+-int amp_mgr_put(struct amp_mgr *mgr)
+-{
+- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
+-
+- return kref_put(&mgr->kref, &amp_mgr_destroy);
+-}
+-
+-static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
+-{
+- struct amp_mgr *mgr;
+- struct l2cap_chan *chan;
+-
+- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+- if (!mgr)
+- return NULL;
+-
+- BT_DBG("conn %p mgr %p", conn, mgr);
+-
+- mgr->l2cap_conn = conn;
+-
+- chan = a2mp_chan_open(conn, locked);
+- if (!chan) {
+- kfree(mgr);
+- return NULL;
+- }
+-
+- mgr->a2mp_chan = chan;
+- chan->data = mgr;
+-
+- conn->hcon->amp_mgr = mgr;
+-
+- kref_init(&mgr->kref);
+-
+- /* Remote AMP ctrl list initialization */
+- INIT_LIST_HEAD(&mgr->amp_ctrls);
+- mutex_init(&mgr->amp_ctrls_lock);
+-
+- mutex_lock(&amp_mgr_list_lock);
+- list_add(&mgr->list, &amp_mgr_list);
+- mutex_unlock(&amp_mgr_list_lock);
+-
+- return mgr;
+-}
+-
+-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+- struct sk_buff *skb)
+-{
+- struct amp_mgr *mgr;
+-
+- if (conn->hcon->type != ACL_LINK)
+- return NULL;
+-
+- mgr = amp_mgr_create(conn, false);
+- if (!mgr) {
+- BT_ERR("Could not create AMP manager");
+- return NULL;
+- }
+-
+- BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
+-
+- return mgr->a2mp_chan;
+-}
+-
+-void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
+-{
+- struct amp_mgr *mgr;
+- struct a2mp_info_rsp rsp;
+-
+- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
+- if (!mgr)
+- return;
+-
+- BT_DBG("%s mgr %p", hdev->name, mgr);
+-
+- memset(&rsp, 0, sizeof(rsp));
+-
+- rsp.id = hdev->id;
+- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+-
+- if (hdev->amp_type != AMP_TYPE_BREDR) {
+- rsp.status = 0;
+- rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
+- rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
+- rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
+- rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
+- rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+- }
+-
+- a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
+- amp_mgr_put(mgr);
+-}
+-
+-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
+-{
+- struct amp_mgr *mgr;
+- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+- struct a2mp_amp_assoc_rsp *rsp;
+- size_t len;
+-
+- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
+- if (!mgr)
+- return;
+-
+- BT_DBG("%s mgr %p", hdev->name, mgr);
+-
+- len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
+- rsp = kzalloc(len, GFP_KERNEL);
+- if (!rsp) {
+- amp_mgr_put(mgr);
+- return;
+- }
+-
+- rsp->id = hdev->id;
+-
+- if (status) {
+- rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
+- } else {
+- rsp->status = A2MP_STATUS_SUCCESS;
+- memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
+- }
+-
+- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
+- amp_mgr_put(mgr);
+- kfree(rsp);
+-}
+-
+-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
+-{
+- struct amp_mgr *mgr;
+- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+- struct a2mp_physlink_req *req;
+- struct l2cap_chan *bredr_chan;
+- size_t len;
+-
+- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
+- if (!mgr)
+- return;
+-
+- len = sizeof(*req) + loc_assoc->len;
+-
+- BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
+-
+- req = kzalloc(len, GFP_KERNEL);
+- if (!req) {
+- amp_mgr_put(mgr);
+- return;
+- }
+-
+- bredr_chan = mgr->bredr_chan;
+- if (!bredr_chan)
+- goto clean;
+-
+- req->local_id = hdev->id;
+- req->remote_id = bredr_chan->remote_amp_id;
+- memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
+-
+- a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
+-
+-clean:
+- amp_mgr_put(mgr);
+- kfree(req);
+-}
+-
+-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
+-{
+- struct amp_mgr *mgr;
+- struct a2mp_physlink_rsp rsp;
+- struct hci_conn *hs_hcon;
+-
+- mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
+- if (!mgr)
+- return;
+-
+- memset(&rsp, 0, sizeof(rsp));
+-
+- hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
+- if (!hs_hcon) {
+- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+- } else {
+- rsp.remote_id = hs_hcon->remote_id;
+- rsp.status = A2MP_STATUS_SUCCESS;
+- }
+-
+- BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
+- status);
+-
+- rsp.local_id = hdev->id;
+- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
+- amp_mgr_put(mgr);
+-}
+-
+-void a2mp_discover_amp(struct l2cap_chan *chan)
+-{
+- struct l2cap_conn *conn = chan->conn;
+- struct amp_mgr *mgr = conn->hcon->amp_mgr;
+- struct a2mp_discov_req req;
+-
+- BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
+-
+- if (!mgr) {
+- mgr = amp_mgr_create(conn, true);
+- if (!mgr)
+- return;
+- }
+-
+- mgr->bredr_chan = chan;
+-
+- memset(&req, 0, sizeof(req));
+-
+- req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+- req.ext_feat = 0;
+- a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
+-}
+diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
+deleted file mode 100644
+index 2fd253a61a2a16..00000000000000
+--- a/net/bluetooth/a2mp.h
++++ /dev/null
+@@ -1,154 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-only */
+-/*
+- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+- Copyright (c) 2011,2012 Intel Corp.
+-
+-*/
+-
+-#ifndef __A2MP_H
+-#define __A2MP_H
+-
+-#include <net/bluetooth/l2cap.h>
+-
+-enum amp_mgr_state {
+- READ_LOC_AMP_INFO,
+- READ_LOC_AMP_ASSOC,
+- READ_LOC_AMP_ASSOC_FINAL,
+- WRITE_REMOTE_AMP_ASSOC,
+-};
+-
+-struct amp_mgr {
+- struct list_head list;
+- struct l2cap_conn *l2cap_conn;
+- struct l2cap_chan *a2mp_chan;
+- struct l2cap_chan *bredr_chan;
+- struct kref kref;
+- __u8 ident;
+- __u8 handle;
+- unsigned long state;
+- unsigned long flags;
+-
+- struct list_head amp_ctrls;
+- struct mutex amp_ctrls_lock;
+-};
+-
+-struct a2mp_cmd {
+- __u8 code;
+- __u8 ident;
+- __le16 len;
+- __u8 data[];
+-} __packed;
+-
+-/* A2MP command codes */
+-#define A2MP_COMMAND_REJ 0x01
+-struct a2mp_cmd_rej {
+- __le16 reason;
+- __u8 data[];
+-} __packed;
+-
+-#define A2MP_DISCOVER_REQ 0x02
+-struct a2mp_discov_req {
+- __le16 mtu;
+- __le16 ext_feat;
+-} __packed;
+-
+-struct a2mp_cl {
+- __u8 id;
+- __u8 type;
+- __u8 status;
+-} __packed;
+-
+-#define A2MP_DISCOVER_RSP 0x03
+-struct a2mp_discov_rsp {
+- __le16 mtu;
+- __le16 ext_feat;
+- struct a2mp_cl cl[];
+-} __packed;
+-
+-#define A2MP_CHANGE_NOTIFY 0x04
+-#define A2MP_CHANGE_RSP 0x05
+-
+-#define A2MP_GETINFO_REQ 0x06
+-struct a2mp_info_req {
+- __u8 id;
+-} __packed;
+-
+-#define A2MP_GETINFO_RSP 0x07
+-struct a2mp_info_rsp {
+- __u8 id;
+- __u8 status;
+- __le32 total_bw;
+- __le32 max_bw;
+- __le32 min_latency;
+- __le16 pal_cap;
+- __le16 assoc_size;
+-} __packed;
+-
+-#define A2MP_GETAMPASSOC_REQ 0x08
+-struct a2mp_amp_assoc_req {
+- __u8 id;
+-} __packed;
+-
+-#define A2MP_GETAMPASSOC_RSP 0x09
+-struct a2mp_amp_assoc_rsp {
+- __u8 id;
+- __u8 status;
+- __u8 amp_assoc[];
+-} __packed;
+-
+-#define A2MP_CREATEPHYSLINK_REQ 0x0A
+-#define A2MP_DISCONNPHYSLINK_REQ 0x0C
+-struct a2mp_physlink_req {
+- __u8 local_id;
+- __u8 remote_id;
+- __u8 amp_assoc[];
+-} __packed;
+-
+-#define A2MP_CREATEPHYSLINK_RSP 0x0B
+-#define A2MP_DISCONNPHYSLINK_RSP 0x0D
+-struct a2mp_physlink_rsp {
+- __u8 local_id;
+- __u8 remote_id;
+- __u8 status;
+-} __packed;
+-
+-/* A2MP response status */
+-#define A2MP_STATUS_SUCCESS 0x00
+-#define A2MP_STATUS_INVALID_CTRL_ID 0x01
+-#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
+-#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
+-#define A2MP_STATUS_COLLISION_OCCURED 0x03
+-#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
+-#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
+-#define A2MP_STATUS_SECURITY_VIOLATION 0x06
+-
+-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+-
+-#if IS_ENABLED(CONFIG_BT_HS)
+-int amp_mgr_put(struct amp_mgr *mgr);
+-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+- struct sk_buff *skb);
+-void a2mp_discover_amp(struct l2cap_chan *chan);
+-#else
+-static inline int amp_mgr_put(struct amp_mgr *mgr)
+-{
+- return 0;
+-}
+-
+-static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+- struct sk_buff *skb)
+-{
+- return NULL;
+-}
+-
+-static inline void a2mp_discover_amp(struct l2cap_chan *chan)
+-{
+-}
+-#endif
+-
+-void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
+-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
+-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
+-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status);
+-
+-#endif /* __A2MP_H */
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 336a7616545468..e39fba5565c5d4 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -312,7 +312,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ skb = skb_recv_datagram(sk, flags, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+- return 0;
++ err = 0;
+
+ return err;
+ }
+@@ -565,10 +565,11 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ if (sk->sk_state == BT_LISTEN)
+ return -EINVAL;
+
+- lock_sock(sk);
++ spin_lock(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ amount = skb ? skb->len : 0;
+- release_sock(sk);
++ spin_unlock(&sk->sk_receive_queue.lock);
++
+ err = put_user(amount, (int __user *)arg);
+ break;
+
+@@ -824,11 +825,14 @@ static int __init bt_init(void)
+ bt_sysfs_cleanup();
+ cleanup_led:
+ bt_leds_cleanup();
++ debugfs_remove_recursive(bt_debugfs);
+ return err;
+ }
+
+ static void __exit bt_exit(void)
+ {
++ iso_exit();
++
+ mgmt_exit();
+
+ sco_exit();
+diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
+deleted file mode 100644
+index 2134f92bd7ac21..00000000000000
+--- a/net/bluetooth/amp.c
++++ /dev/null
+@@ -1,591 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- Copyright (c) 2011,2012 Intel Corp.
+-
+-*/
+-
+-#include <net/bluetooth/bluetooth.h>
+-#include <net/bluetooth/hci.h>
+-#include <net/bluetooth/hci_core.h>
+-#include <crypto/hash.h>
+-
+-#include "hci_request.h"
+-#include "a2mp.h"
+-#include "amp.h"
+-
+-/* Remote AMP Controllers interface */
+-void amp_ctrl_get(struct amp_ctrl *ctrl)
+-{
+- BT_DBG("ctrl %p orig refcnt %d", ctrl,
+- kref_read(&ctrl->kref));
+-
+- kref_get(&ctrl->kref);
+-}
+-
+-static void amp_ctrl_destroy(struct kref *kref)
+-{
+- struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
+-
+- BT_DBG("ctrl %p", ctrl);
+-
+- kfree(ctrl->assoc);
+- kfree(ctrl);
+-}
+-
+-int amp_ctrl_put(struct amp_ctrl *ctrl)
+-{
+- BT_DBG("ctrl %p orig refcnt %d", ctrl,
+- kref_read(&ctrl->kref));
+-
+- return kref_put(&ctrl->kref, &amp_ctrl_destroy);
+-}
+-
+-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
+-{
+- struct amp_ctrl *ctrl;
+-
+- ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+- if (!ctrl)
+- return NULL;
+-
+- kref_init(&ctrl->kref);
+- ctrl->id = id;
+-
+- mutex_lock(&mgr->amp_ctrls_lock);
+- list_add(&ctrl->list, &mgr->amp_ctrls);
+- mutex_unlock(&mgr->amp_ctrls_lock);
+-
+- BT_DBG("mgr %p ctrl %p", mgr, ctrl);
+-
+- return ctrl;
+-}
+-
+-void amp_ctrl_list_flush(struct amp_mgr *mgr)
+-{
+- struct amp_ctrl *ctrl, *n;
+-
+- BT_DBG("mgr %p", mgr);
+-
+- mutex_lock(&mgr->amp_ctrls_lock);
+- list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
+- list_del(&ctrl->list);
+- amp_ctrl_put(ctrl);
+- }
+- mutex_unlock(&mgr->amp_ctrls_lock);
+-}
+-
+-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
+-{
+- struct amp_ctrl *ctrl;
+-
+- BT_DBG("mgr %p id %u", mgr, id);
+-
+- mutex_lock(&mgr->amp_ctrls_lock);
+- list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
+- if (ctrl->id == id) {
+- amp_ctrl_get(ctrl);
+- mutex_unlock(&mgr->amp_ctrls_lock);
+- return ctrl;
+- }
+- }
+- mutex_unlock(&mgr->amp_ctrls_lock);
+-
+- return NULL;
+-}
+-
+-/* Physical Link interface */
+-static u8 __next_handle(struct amp_mgr *mgr)
+-{
+- if (++mgr->handle == 0)
+- mgr->handle = 1;
+-
+- return mgr->handle;
+-}
+-
+-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+- u8 remote_id, bool out)
+-{
+- bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
+- struct hci_conn *hcon;
+- u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
+-
+- hcon = hci_conn_add(hdev, AMP_LINK, dst, role);
+- if (!hcon)
+- return NULL;
+-
+- BT_DBG("hcon %p dst %pMR", hcon, dst);
+-
+- hcon->state = BT_CONNECT;
+- hcon->attempt++;
+- hcon->handle = __next_handle(mgr);
+- hcon->remote_id = remote_id;
+- hcon->amp_mgr = amp_mgr_get(mgr);
+-
+- return hcon;
+-}
+-
+-/* AMP crypto key generation interface */
+-static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
+-{
+- struct crypto_shash *tfm;
+- struct shash_desc *shash;
+- int ret;
+-
+- if (!ksize)
+- return -EINVAL;
+-
+- tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+- if (IS_ERR(tfm)) {
+- BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
+- return PTR_ERR(tfm);
+- }
+-
+- ret = crypto_shash_setkey(tfm, key, ksize);
+- if (ret) {
+- BT_DBG("crypto_ahash_setkey failed: err %d", ret);
+- goto failed;
+- }
+-
+- shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+- GFP_KERNEL);
+- if (!shash) {
+- ret = -ENOMEM;
+- goto failed;
+- }
+-
+- shash->tfm = tfm;
+-
+- ret = crypto_shash_digest(shash, plaintext, psize, output);
+-
+- kfree(shash);
+-
+-failed:
+- crypto_free_shash(tfm);
+- return ret;
+-}
+-
+-int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
+-{
+- struct hci_dev *hdev = conn->hdev;
+- struct link_key *key;
+- u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
+- u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
+- int err;
+-
+- if (!hci_conn_check_link_mode(conn))
+- return -EACCES;
+-
+- BT_DBG("conn %p key_type %d", conn, conn->key_type);
+-
+- /* Legacy key */
+- if (conn->key_type < 3) {
+- bt_dev_err(hdev, "legacy key type %u", conn->key_type);
+- return -EACCES;
+- }
+-
+- *type = conn->key_type;
+- *len = HCI_AMP_LINK_KEY_SIZE;
+-
+- key = hci_find_link_key(hdev, &conn->dst);
+- if (!key) {
+- BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
+- return -EACCES;
+- }
+-
+- /* BR/EDR Link Key concatenated together with itself */
+- memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
+- memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
+-
+- /* Derive Generic AMP Link Key (gamp) */
+- err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
+- if (err) {
+- bt_dev_err(hdev, "could not derive Generic AMP Key: err %d", err);
+- return err;
+- }
+-
+- if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
+- BT_DBG("Use Generic AMP Key (gamp)");
+- memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
+- return err;
+- }
+-
+- /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
+- return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
+-}
+-
+-static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+- u16 opcode, struct sk_buff *skb)
+-{
+- struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
+- struct amp_assoc *assoc = &hdev->loc_assoc;
+- size_t rem_len, frag_len;
+-
+- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+-
+- if (rp->status)
+- goto send_rsp;
+-
+- frag_len = skb->len - sizeof(*rp);
+- rem_len = __le16_to_cpu(rp->rem_len);
+-
+- if (rem_len > frag_len) {
+- BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+-
+- memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+- assoc->offset += frag_len;
+-
+- /* Read other fragments */
+- amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+-
+- return;
+- }
+-
+- memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+- assoc->len = assoc->offset + rem_len;
+- assoc->offset = 0;
+-
+-send_rsp:
+- /* Send A2MP Rsp when all fragments are received */
+- a2mp_send_getampassoc_rsp(hdev, rp->status);
+- a2mp_send_create_phy_link_req(hdev, rp->status);
+-}
+-
+-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
+-{
+- struct hci_cp_read_local_amp_assoc cp;
+- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+- struct hci_request req;
+- int err;
+-
+- BT_DBG("%s handle %u", hdev->name, phy_handle);
+-
+- cp.phy_handle = phy_handle;
+- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+- cp.len_so_far = cpu_to_le16(loc_assoc->offset);
+-
+- hci_req_init(&req, hdev);
+- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+- if (err < 0)
+- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+-}
+-
+-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
+-{
+- struct hci_cp_read_local_amp_assoc cp;
+- struct hci_request req;
+- int err;
+-
+- memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
+- memset(&cp, 0, sizeof(cp));
+-
+- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+-
+- set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
+- hci_req_init(&req, hdev);
+- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+- if (err < 0)
+- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+-}
+-
+-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+- struct hci_conn *hcon)
+-{
+- struct hci_cp_read_local_amp_assoc cp;
+- struct amp_mgr *mgr = hcon->amp_mgr;
+- struct hci_request req;
+- int err;
+-
+- if (!mgr)
+- return;
+-
+- cp.phy_handle = hcon->handle;
+- cp.len_so_far = cpu_to_le16(0);
+- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+-
+- set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
+-
+- /* Read Local AMP Assoc final link information data */
+- hci_req_init(&req, hdev);
+- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+- if (err < 0)
+- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+-}
+-
+-static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+- u16 opcode, struct sk_buff *skb)
+-{
+- struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
+-
+- BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+- hdev->name, rp->status, rp->phy_handle);
+-
+- if (rp->status)
+- return;
+-
+- amp_write_rem_assoc_continue(hdev, rp->phy_handle);
+-}
+-
+-/* Write AMP Assoc data fragments, returns true with last fragment written*/
+-static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
+- struct hci_conn *hcon)
+-{
+- struct hci_cp_write_remote_amp_assoc *cp;
+- struct amp_mgr *mgr = hcon->amp_mgr;
+- struct amp_ctrl *ctrl;
+- struct hci_request req;
+- u16 frag_len, len;
+-
+- ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
+- if (!ctrl)
+- return false;
+-
+- if (!ctrl->assoc_rem_len) {
+- BT_DBG("all fragments are written");
+- ctrl->assoc_rem_len = ctrl->assoc_len;
+- ctrl->assoc_len_so_far = 0;
+-
+- amp_ctrl_put(ctrl);
+- return true;
+- }
+-
+- frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
+- len = frag_len + sizeof(*cp);
+-
+- cp = kzalloc(len, GFP_KERNEL);
+- if (!cp) {
+- amp_ctrl_put(ctrl);
+- return false;
+- }
+-
+- BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
+- hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
+-
+- cp->phy_handle = hcon->handle;
+- cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
+- cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
+- memcpy(cp->frag, ctrl->assoc, frag_len);
+-
+- ctrl->assoc_len_so_far += frag_len;
+- ctrl->assoc_rem_len -= frag_len;
+-
+- amp_ctrl_put(ctrl);
+-
+- hci_req_init(&req, hdev);
+- hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+- hci_req_run_skb(&req, write_remote_amp_assoc_complete);
+-
+- kfree(cp);
+-
+- return false;
+-}
+-
+-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
+-{
+- struct hci_conn *hcon;
+-
+- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
+-
+- hcon = hci_conn_hash_lookup_handle(hdev, handle);
+- if (!hcon)
+- return;
+-
+- /* Send A2MP create phylink rsp when all fragments are written */
+- if (amp_write_rem_assoc_frag(hdev, hcon))
+- a2mp_send_create_phy_link_rsp(hdev, 0);
+-}
+-
+-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
+-{
+- struct hci_conn *hcon;
+-
+- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
+-
+- hcon = hci_conn_hash_lookup_handle(hdev, handle);
+- if (!hcon)
+- return;
+-
+- BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
+-
+- amp_write_rem_assoc_frag(hdev, hcon);
+-}
+-
+-static void create_phylink_complete(struct hci_dev *hdev, u8 status,
+- u16 opcode)
+-{
+- struct hci_cp_create_phy_link *cp;
+-
+- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+-
+- cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+- if (!cp)
+- return;
+-
+- hci_dev_lock(hdev);
+-
+- if (status) {
+- struct hci_conn *hcon;
+-
+- hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+- if (hcon)
+- hci_conn_del(hcon);
+- } else {
+- amp_write_remote_assoc(hdev, cp->phy_handle);
+- }
+-
+- hci_dev_unlock(hdev);
+-}
+-
+-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+- struct hci_conn *hcon)
+-{
+- struct hci_cp_create_phy_link cp;
+- struct hci_request req;
+-
+- cp.phy_handle = hcon->handle;
+-
+- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
+- hcon->handle);
+-
+- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
+- &cp.key_type)) {
+- BT_DBG("Cannot create link key");
+- return;
+- }
+-
+- hci_req_init(&req, hdev);
+- hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+- hci_req_run(&req, create_phylink_complete);
+-}
+-
+-static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
+- u16 opcode)
+-{
+- struct hci_cp_accept_phy_link *cp;
+-
+- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+-
+- if (status)
+- return;
+-
+- cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+- if (!cp)
+- return;
+-
+- amp_write_remote_assoc(hdev, cp->phy_handle);
+-}
+-
+-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+- struct hci_conn *hcon)
+-{
+- struct hci_cp_accept_phy_link cp;
+- struct hci_request req;
+-
+- cp.phy_handle = hcon->handle;
+-
+- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
+- hcon->handle);
+-
+- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
+- &cp.key_type)) {
+- BT_DBG("Cannot create link key");
+- return;
+- }
+-
+- hci_req_init(&req, hdev);
+- hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+- hci_req_run(&req, accept_phylink_complete);
+-}
+-
+-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
+-{
+- struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
+- struct amp_mgr *mgr = hs_hcon->amp_mgr;
+- struct l2cap_chan *bredr_chan;
+-
+- BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
+-
+- if (!bredr_hdev || !mgr || !mgr->bredr_chan)
+- return;
+-
+- bredr_chan = mgr->bredr_chan;
+-
+- l2cap_chan_lock(bredr_chan);
+-
+- set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
+- bredr_chan->remote_amp_id = hs_hcon->remote_id;
+- bredr_chan->local_amp_id = hs_hcon->hdev->id;
+- bredr_chan->hs_hcon = hs_hcon;
+- bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
+-
+- __l2cap_physical_cfm(bredr_chan, 0);
+-
+- l2cap_chan_unlock(bredr_chan);
+-
+- hci_dev_put(bredr_hdev);
+-}
+-
+-void amp_create_logical_link(struct l2cap_chan *chan)
+-{
+- struct hci_conn *hs_hcon = chan->hs_hcon;
+- struct hci_cp_create_accept_logical_link cp;
+- struct hci_dev *hdev;
+-
+- BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
+- &chan->conn->hcon->dst);
+-
+- if (!hs_hcon)
+- return;
+-
+- hdev = hci_dev_hold(chan->hs_hcon->hdev);
+- if (!hdev)
+- return;
+-
+- cp.phy_handle = hs_hcon->handle;
+-
+- cp.tx_flow_spec.id = chan->local_id;
+- cp.tx_flow_spec.stype = chan->local_stype;
+- cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
+- cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+- cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
+- cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
+-
+- cp.rx_flow_spec.id = chan->remote_id;
+- cp.rx_flow_spec.stype = chan->remote_stype;
+- cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
+- cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
+- cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
+- cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
+-
+- if (hs_hcon->out)
+- hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
+- &cp);
+- else
+- hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
+- &cp);
+-
+- hci_dev_put(hdev);
+-}
+-
+-void amp_disconnect_logical_link(struct hci_chan *hchan)
+-{
+- struct hci_conn *hcon = hchan->conn;
+- struct hci_cp_disconn_logical_link cp;
+-
+- if (hcon->state != BT_CONNECTED) {
+- BT_DBG("hchan %p not connected", hchan);
+- return;
+- }
+-
+- cp.log_handle = cpu_to_le16(hchan->handle);
+- hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
+-}
+-
+-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
+-{
+- BT_DBG("hchan %p", hchan);
+-
+- hci_chan_del(hchan);
+-}
+diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
+deleted file mode 100644
+index 97c87abd129f64..00000000000000
+--- a/net/bluetooth/amp.h
++++ /dev/null
+@@ -1,60 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-only */
+-/*
+- Copyright (c) 2011,2012 Intel Corp.
+-
+-*/
+-
+-#ifndef __AMP_H
+-#define __AMP_H
+-
+-struct amp_ctrl {
+- struct list_head list;
+- struct kref kref;
+- __u8 id;
+- __u16 assoc_len_so_far;
+- __u16 assoc_rem_len;
+- __u16 assoc_len;
+- __u8 *assoc;
+-};
+-
+-int amp_ctrl_put(struct amp_ctrl *ctrl);
+-void amp_ctrl_get(struct amp_ctrl *ctrl);
+-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id);
+-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id);
+-void amp_ctrl_list_flush(struct amp_mgr *mgr);
+-
+-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+- u8 remote_id, bool out);
+-
+-int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
+-
+-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
+-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
+-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+- struct hci_conn *hcon);
+-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+- struct hci_conn *hcon);
+-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+- struct hci_conn *hcon);
+-
+-#if IS_ENABLED(CONFIG_BT_HS)
+-void amp_create_logical_link(struct l2cap_chan *chan);
+-void amp_disconnect_logical_link(struct hci_chan *hchan);
+-#else
+-static inline void amp_create_logical_link(struct l2cap_chan *chan)
+-{
+-}
+-
+-static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
+-{
+-}
+-#endif
+-
+-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
+-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
+-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
+-void amp_create_logical_link(struct l2cap_chan *chan);
+-void amp_disconnect_logical_link(struct hci_chan *hchan);
+-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason);
+-
+-#endif /* __AMP_H */
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index 5a6a49885ab66d..a660c428e2207c 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -385,7 +385,8 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
+
+ case BNEP_COMPRESSED_DST_ONLY:
+ __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
+- __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2);
++ __skb_put_data(nskb, s->eh.h_source, ETH_ALEN);
++ put_unaligned(s->eh.h_proto, (__be16 *)__skb_put(nskb, 2));
+ break;
+
+ case BNEP_GENERAL:
+diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
+index 9214189279e80e..1bc51e2b05a347 100644
+--- a/net/bluetooth/eir.c
++++ b/net/bluetooth/eir.c
+@@ -13,48 +13,33 @@
+
+ #define PNP_INFO_SVCLASS_ID 0x1200
+
+-static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len)
+-{
+- u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
+-
+- /* If data is already NULL terminated just pass it directly */
+- if (data[data_len - 1] == '\0')
+- return eir_append_data(eir, eir_len, type, data, data_len);
+-
+- memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH);
+- name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
+-
+- return eir_append_data(eir, eir_len, type, name, sizeof(name));
+-}
+-
+ u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+ {
+ size_t short_len;
+ size_t complete_len;
+
+- /* no space left for name (+ NULL + type + len) */
+- if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
++ /* no space left for name (+ type + len) */
++ if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 2)
+ return ad_len;
+
+ /* use complete name if present and fits */
+ complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
+ if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
+- return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE,
+- hdev->dev_name, complete_len + 1);
++ return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
++ hdev->dev_name, complete_len);
+
+ /* use short name if present */
+ short_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
+ if (short_len)
+- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
++ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
+ hdev->short_name,
+- short_len == HCI_MAX_SHORT_NAME_LENGTH ?
+- short_len : short_len + 1);
++ short_len);
+
+ /* use shortened full name if present, we already know that name
+ * is longer then HCI_MAX_SHORT_NAME_LENGTH
+ */
+ if (complete_len)
+- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
++ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
+ hdev->dev_name,
+ HCI_MAX_SHORT_NAME_LENGTH);
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 73470cc3518a71..35d739988ce3e4 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -36,7 +36,6 @@
+
+ #include "hci_request.h"
+ #include "smp.h"
+-#include "a2mp.h"
+ #include "eir.h"
+
+ struct sco_param {
+@@ -69,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = {
+ };
+
+ /* This function requires the caller holds hdev->lock */
+-static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
++void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+ {
+ struct hci_conn_params *params;
+ struct hci_dev *hdev = conn->hdev;
+@@ -108,8 +107,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+ * where a timeout + cancel does indicate an actual failure.
+ */
+ if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
+- mgmt_connect_failed(hdev, &conn->dst, conn->type,
+- conn->dst_type, status);
++ mgmt_connect_failed(hdev, conn, status);
+
+ /* The connection attempt was doing scan for new RPA, and is
+ * in scan phase. If params are not associated with any other
+@@ -153,6 +151,9 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+
+ hci_conn_hash_del(hdev, conn);
+
++ if (HCI_CONN_HANDLE_UNSET(conn->handle))
++ ida_free(&hdev->unset_handle_ida, conn->handle);
++
+ if (conn->cleanup)
+ conn->cleanup(conn);
+
+@@ -169,71 +170,11 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+ hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+ }
+
+- hci_conn_del_sysfs(conn);
+-
+ debugfs_remove_recursive(conn->debugfs);
+
+- hci_dev_put(hdev);
+-
+- hci_conn_put(conn);
+-}
+-
+-static void hci_acl_create_connection(struct hci_conn *conn)
+-{
+- struct hci_dev *hdev = conn->hdev;
+- struct inquiry_entry *ie;
+- struct hci_cp_create_conn cp;
+-
+- BT_DBG("hcon %p", conn);
+-
+- /* Many controllers disallow HCI Create Connection while it is doing
+- * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
+- * Connection. This may cause the MGMT discovering state to become false
+- * without user space's request but it is okay since the MGMT Discovery
+- * APIs do not promise that discovery should be done forever. Instead,
+- * the user space monitors the status of MGMT discovering and it may
+- * request for discovery again when this flag becomes false.
+- */
+- if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+- /* Put this connection to "pending" state so that it will be
+- * executed after the inquiry cancel command complete event.
+- */
+- conn->state = BT_CONNECT2;
+- hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+- return;
+- }
+-
+- conn->state = BT_CONNECT;
+- conn->out = true;
+- conn->role = HCI_ROLE_MASTER;
+-
+- conn->attempt++;
+-
+- conn->link_policy = hdev->link_policy;
+-
+- memset(&cp, 0, sizeof(cp));
+- bacpy(&cp.bdaddr, &conn->dst);
+- cp.pscan_rep_mode = 0x02;
+-
+- ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
+- if (ie) {
+- if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
+- cp.pscan_rep_mode = ie->data.pscan_rep_mode;
+- cp.pscan_mode = ie->data.pscan_mode;
+- cp.clock_offset = ie->data.clock_offset |
+- cpu_to_le16(0x8000);
+- }
+-
+- memcpy(conn->dev_class, ie->data.dev_class, 3);
+- }
+-
+- cp.pkt_type = cpu_to_le16(conn->pkt_type);
+- if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
+- cp.role_switch = 0x01;
+- else
+- cp.role_switch = 0x00;
++ hci_conn_del_sysfs(conn);
+
+- hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
++ hci_dev_put(hdev);
+ }
+
+ int hci_disconnect(struct hci_conn *conn, __u8 reason)
+@@ -299,6 +240,13 @@ static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
+ __u8 vnd_len, *vnd_data = NULL;
+ struct hci_op_configure_data_path *cmd = NULL;
+
++ if (!codec->data_path || !hdev->get_codec_config_data)
++ return 0;
++
++ /* Do not take me as error */
++ if (!hdev->get_codec_config_data)
++ return 0;
++
+ err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
+ &vnd_data);
+ if (err < 0)
+@@ -342,11 +290,12 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
+
+ kfree(conn_handle);
+
++ if (!hci_conn_valid(hdev, conn))
++ return -ECANCELED;
++
+ bt_dev_dbg(hdev, "hcon %p", conn);
+
+- /* for offload use case, codec needs to configured before opening SCO */
+- if (conn->codec.data_path)
+- configure_datapath_sync(hdev, &conn->codec);
++ configure_datapath_sync(hdev, &conn->codec);
+
+ conn->state = BT_CONNECT;
+ conn->out = true;
+@@ -759,6 +708,7 @@ static int terminate_big_sync(struct hci_dev *hdev, void *data)
+
+ bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
+
++ hci_disable_per_advertising_sync(hdev, d->bis);
+ hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
+
+ /* Only terminate BIG if it has been created */
+@@ -928,39 +878,52 @@ static void cis_cleanup(struct hci_conn *conn)
+ hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
+ }
+
+-static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
++static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
+ {
+- struct hci_conn_hash *h = &hdev->conn_hash;
+- struct hci_conn *c;
+- u16 handle = HCI_CONN_HANDLE_MAX + 1;
+-
+- rcu_read_lock();
+-
+- list_for_each_entry_rcu(c, &h->list, list) {
+- /* Find the first unused handle */
+- if (handle == 0xffff || c->handle != handle)
+- break;
+- handle++;
+- }
+- rcu_read_unlock();
+-
+- return handle;
++ return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
++ U16_MAX, GFP_ATOMIC);
+ }
+
+-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+- u8 role)
++static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
++ u8 role, u16 handle)
+ {
+ struct hci_conn *conn;
+
+- BT_DBG("%s dst %pMR", hdev->name, dst);
++ switch (type) {
++ case ACL_LINK:
++ if (!hdev->acl_mtu)
++ return ERR_PTR(-ECONNREFUSED);
++ break;
++ case ISO_LINK:
++ if (hdev->iso_mtu)
++ /* Dedicated ISO Buffer exists */
++ break;
++ fallthrough;
++ case LE_LINK:
++ if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
++ return ERR_PTR(-ECONNREFUSED);
++ if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
++ return ERR_PTR(-ECONNREFUSED);
++ break;
++ case SCO_LINK:
++ case ESCO_LINK:
++ if (!hdev->sco_pkts)
++ /* Controller does not support SCO or eSCO over HCI */
++ return ERR_PTR(-ECONNREFUSED);
++ break;
++ default:
++ return ERR_PTR(-ECONNREFUSED);
++ }
++
++ bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
+
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ bacpy(&conn->dst, dst);
+ bacpy(&conn->src, &hdev->bdaddr);
+- conn->handle = hci_conn_hash_alloc_unset(hdev);
++ conn->handle = handle;
+ conn->hdev = hdev;
+ conn->type = type;
+ conn->role = role;
+@@ -973,6 +936,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ conn->rssi = HCI_RSSI_INVALID;
+ conn->tx_power = HCI_TX_POWER_INVALID;
+ conn->max_tx_power = HCI_TX_POWER_INVALID;
++ conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
+
+ set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+@@ -986,10 +950,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ switch (type) {
+ case ACL_LINK:
+ conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
++ conn->mtu = hdev->acl_mtu;
+ break;
+ case LE_LINK:
+ /* conn->src should reflect the local identity address */
+ hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
++ conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
+ break;
+ case ISO_LINK:
+ /* conn->src should reflect the local identity address */
+@@ -1001,6 +967,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ else if (conn->role == HCI_ROLE_MASTER)
+ conn->cleanup = cis_cleanup;
+
++ conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
++ hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
+ break;
+ case SCO_LINK:
+ if (lmp_esco_capable(hdev))
+@@ -1008,9 +976,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ (hdev->esco_type & EDR_ESCO_MASK);
+ else
+ conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
++
++ conn->mtu = hdev->sco_mtu;
+ break;
+ case ESCO_LINK:
+ conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
++ conn->mtu = hdev->sco_mtu;
+ break;
+ }
+
+@@ -1044,6 +1015,29 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ return conn;
+ }
+
++struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
++ bdaddr_t *dst, u8 role)
++{
++ int handle;
++
++ bt_dev_dbg(hdev, "dst %pMR", dst);
++
++ handle = hci_conn_hash_alloc_unset(hdev);
++ if (unlikely(handle < 0))
++ return ERR_PTR(-ECONNREFUSED);
++
++ return __hci_conn_add(hdev, type, dst, role, handle);
++}
++
++struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
++ u8 role, u16 handle)
++{
++ if (handle > HCI_CONN_HANDLE_MAX)
++ return ERR_PTR(-EINVAL);
++
++ return __hci_conn_add(hdev, type, dst, role, handle);
++}
++
+ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
+ {
+ if (!reason)
+@@ -1143,9 +1137,6 @@ void hci_conn_del(struct hci_conn *conn)
+ }
+ }
+
+- if (conn->amp_mgr)
+- amp_mgr_put(conn->amp_mgr);
+-
+ skb_queue_purge(&conn->data_q);
+
+ /* Remove the connection from the list and cleanup its remaining
+@@ -1154,6 +1145,9 @@ void hci_conn_del(struct hci_conn *conn)
+ * rest of hci_conn_del.
+ */
+ hci_conn_cleanup(conn);
++
++ /* Dequeue callbacks using connection pointer as data */
++ hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
+ }
+
+ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
+@@ -1167,8 +1161,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
+
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (!test_bit(HCI_UP, &d->flags) ||
+- hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
+- d->dev_type != HCI_PRIMARY)
++ hci_dev_test_flag(d, HCI_USER_CHANNEL))
+ continue;
+
+ /* Simple routing:
+@@ -1242,11 +1235,16 @@ void hci_conn_failed(struct hci_conn *conn, u8 status)
+ hci_le_conn_failed(conn, status);
+ break;
+ case ACL_LINK:
+- mgmt_connect_failed(hdev, &conn->dst, conn->type,
+- conn->dst_type, status);
++ mgmt_connect_failed(hdev, conn, status);
+ break;
+ }
+
++ /* In case of BIG/PA sync failed, clear conn flags so that
++ * the conns will be correctly cleaned up by ISO layer
++ */
++ test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
++ test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
++
+ conn->state = BT_CLOSED;
+ hci_connect_cfm(conn, status);
+ hci_conn_del(conn);
+@@ -1274,58 +1272,14 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
+ if (conn->abort_reason)
+ return conn->abort_reason;
+
++ if (HCI_CONN_HANDLE_UNSET(conn->handle))
++ ida_free(&hdev->unset_handle_ida, conn->handle);
++
+ conn->handle = handle;
+
+ return 0;
+ }
+
+-static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
+-{
+- struct hci_conn *conn;
+- u16 handle = PTR_UINT(data);
+-
+- conn = hci_conn_hash_lookup_handle(hdev, handle);
+- if (!conn)
+- return;
+-
+- bt_dev_dbg(hdev, "err %d", err);
+-
+- hci_dev_lock(hdev);
+-
+- if (!err) {
+- hci_connect_le_scan_cleanup(conn, 0x00);
+- goto done;
+- }
+-
+- /* Check if connection is still pending */
+- if (conn != hci_lookup_le_connect(hdev))
+- goto done;
+-
+- /* Flush to make sure we send create conn cancel command if needed */
+- flush_delayed_work(&conn->le_conn_timeout);
+- hci_conn_failed(conn, bt_status(err));
+-
+-done:
+- hci_dev_unlock(hdev);
+-}
+-
+-static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
+-{
+- struct hci_conn *conn;
+- u16 handle = PTR_UINT(data);
+-
+- conn = hci_conn_hash_lookup_handle(hdev, handle);
+- if (!conn)
+- return 0;
+-
+- bt_dev_dbg(hdev, "conn %p", conn);
+-
+- clear_bit(HCI_CONN_SCANNING, &conn->flags);
+- conn->state = BT_CONNECT;
+-
+- return hci_le_create_conn_sync(hdev, conn);
+-}
+-
+ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, bool dst_resolved, u8 sec_level,
+ u16 conn_timeout, u8 role)
+@@ -1381,9 +1335,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ if (conn) {
+ bacpy(&conn->dst, dst);
+ } else {
+- conn = hci_conn_add(hdev, LE_LINK, dst, role);
+- if (!conn)
+- return ERR_PTR(-ENOMEM);
++ conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
++ if (IS_ERR(conn))
++ return conn;
+ hci_conn_hold(conn);
+ conn->pending_sec_level = sec_level;
+ }
+@@ -1392,9 +1346,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ conn->sec_level = BT_SECURITY_LOW;
+ conn->conn_timeout = conn_timeout;
+
+- err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
+- UINT_PTR(conn->handle),
+- create_le_conn_complete);
++ err = hci_connect_le_sync(hdev, conn);
+ if (err) {
+ hci_conn_del(conn);
+ return ERR_PTR(err);
+@@ -1546,9 +1498,9 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ memcmp(conn->le_per_adv_data, base, base_len)))
+ return ERR_PTR(-EADDRINUSE);
+
+- conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+- if (!conn)
+- return ERR_PTR(-ENOMEM);
++ conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
++ if (IS_ERR(conn))
++ return conn;
+
+ conn->state = BT_CONNECT;
+
+@@ -1590,9 +1542,9 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+
+ BT_DBG("requesting refresh of dst_addr");
+
+- conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
+- if (!conn)
+- return ERR_PTR(-ENOMEM);
++ conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
++ if (IS_ERR(conn))
++ return conn;
+
+ if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
+ hci_conn_del(conn);
+@@ -1638,19 +1590,26 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+
+ acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (!acl) {
+- acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
+- if (!acl)
+- return ERR_PTR(-ENOMEM);
++ acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
++ if (IS_ERR(acl))
++ return acl;
+ }
+
+ hci_conn_hold(acl);
+
+ acl->conn_reason = conn_reason;
+ if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
++ int err;
++
+ acl->sec_level = BT_SECURITY_LOW;
+ acl->pending_sec_level = sec_level;
+ acl->auth_type = auth_type;
+- hci_acl_create_connection(acl);
++
++ err = hci_connect_acl_sync(hdev, acl);
++ if (err) {
++ hci_conn_del(acl);
++ return ERR_PTR(err);
++ }
+ }
+
+ return acl;
+@@ -1698,10 +1657,10 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+
+ sco = hci_conn_hash_lookup_ba(hdev, type, dst);
+ if (!sco) {
+- sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
+- if (!sco) {
++ sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
++ if (IS_ERR(sco)) {
+ hci_conn_drop(acl);
+- return ERR_PTR(-ENOMEM);
++ return sco;
+ }
+ }
+
+@@ -1890,9 +1849,9 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
+ qos->ucast.cis);
+ if (!cis) {
+- cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+- if (!cis)
+- return ERR_PTR(-ENOMEM);
++ cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
++ if (IS_ERR(cis))
++ return cis;
+ cis->cleanup = cis_cleanup;
+ cis->dst_type = dst_type;
+ cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
+@@ -2027,14 +1986,8 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
+ struct bt_iso_io_qos *qos, __u8 phy)
+ {
+ /* Only set MTU if PHY is enabled */
+- if (!qos->sdu && qos->phy) {
+- if (hdev->iso_mtu > 0)
+- qos->sdu = hdev->iso_mtu;
+- else if (hdev->le_mtu > 0)
+- qos->sdu = hdev->le_mtu;
+- else
+- qos->sdu = hdev->acl_mtu;
+- }
++ if (!qos->sdu && qos->phy)
++ qos->sdu = conn->mtu;
+
+ /* Use the same PHY as ACL if set to any */
+ if (qos->phy == BT_ISO_PHY_ANY)
+@@ -2374,12 +2327,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
+
+- /* If we're already encrypted set the REAUTH_PEND flag,
+- * otherwise set the ENCRYPT_PEND.
++ /* Set the ENCRYPT_PEND to trigger encryption after
++ * authentication.
+ */
+- if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+- set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+- else
++ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ }
+
+@@ -2569,22 +2520,6 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
+ }
+ }
+
+-/* Check pending connect attempts */
+-void hci_conn_check_pending(struct hci_dev *hdev)
+-{
+- struct hci_conn *conn;
+-
+- BT_DBG("hdev %s", hdev->name);
+-
+- hci_dev_lock(hdev);
+-
+- conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
+- if (conn)
+- hci_acl_create_connection(conn);
+-
+- hci_dev_unlock(hdev);
+-}
+-
+ static u32 get_link_mode(struct hci_conn *conn)
+ {
+ u32 link_mode = 0;
+@@ -2900,12 +2835,10 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+
+ static int abort_conn_sync(struct hci_dev *hdev, void *data)
+ {
+- struct hci_conn *conn;
+- u16 handle = PTR_UINT(data);
++ struct hci_conn *conn = data;
+
+- conn = hci_conn_hash_lookup_handle(hdev, handle);
+- if (!conn)
+- return 0;
++ if (!hci_conn_valid(hdev, conn))
++ return -ECANCELED;
+
+ return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
+ }
+@@ -2933,14 +2866,21 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
+ */
+ if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
+ switch (hci_skb_event(hdev->sent_cmd)) {
++ case HCI_EV_CONN_COMPLETE:
+ case HCI_EV_LE_CONN_COMPLETE:
+ case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
+ case HCI_EVT_LE_CIS_ESTABLISHED:
+- hci_cmd_sync_cancel(hdev, -ECANCELED);
++ hci_cmd_sync_cancel(hdev, ECANCELED);
+ break;
+ }
++ /* Cancel connect attempt if still queued/pending */
++ } else if (!hci_cancel_connect_sync(hdev, conn)) {
++ return 0;
+ }
+
+- return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
+- NULL);
++ /* Run immediately if on cmd_sync_work since this may be called
++ * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
++ * already queue its callback on cmd_sync_work.
++ */
++ return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
+ }
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 195aea2198a963..d4e607bf35baff 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -63,50 +63,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
+ /* HCI ID Numbering */
+ static DEFINE_IDA(hci_index_ida);
+
+-static int hci_scan_req(struct hci_request *req, unsigned long opt)
+-{
+- __u8 scan = opt;
+-
+- BT_DBG("%s %x", req->hdev->name, scan);
+-
+- /* Inquiry and Page scans */
+- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+- return 0;
+-}
+-
+-static int hci_auth_req(struct hci_request *req, unsigned long opt)
+-{
+- __u8 auth = opt;
+-
+- BT_DBG("%s %x", req->hdev->name, auth);
+-
+- /* Authentication */
+- hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
+- return 0;
+-}
+-
+-static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
+-{
+- __u8 encrypt = opt;
+-
+- BT_DBG("%s %x", req->hdev->name, encrypt);
+-
+- /* Encryption */
+- hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
+- return 0;
+-}
+-
+-static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
+-{
+- __le16 policy = cpu_to_le16(opt);
+-
+- BT_DBG("%s %x", req->hdev->name, policy);
+-
+- /* Default link policy */
+- hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
+- return 0;
+-}
+-
+ /* Get HCI device by index.
+ * Device is held on return. */
+ struct hci_dev *hci_dev_get(int index)
+@@ -395,11 +351,6 @@ int hci_inquiry(void __user *arg)
+ goto done;
+ }
+
+- if (hdev->dev_type != HCI_PRIMARY) {
+- err = -EOPNOTSUPP;
+- goto done;
+- }
+-
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ err = -EOPNOTSUPP;
+ goto done;
+@@ -733,6 +684,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ {
+ struct hci_dev *hdev;
+ struct hci_dev_req dr;
++ __le16 policy;
+ int err = 0;
+
+ if (copy_from_user(&dr, arg, sizeof(dr)))
+@@ -752,11 +704,6 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ goto done;
+ }
+
+- if (hdev->dev_type != HCI_PRIMARY) {
+- err = -EOPNOTSUPP;
+- goto done;
+- }
+-
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ err = -EOPNOTSUPP;
+ goto done;
+@@ -764,8 +711,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+
+ switch (cmd) {
+ case HCISETAUTH:
+- err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+- HCI_INIT_TIMEOUT, NULL);
++ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
++ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
+ break;
+
+ case HCISETENCRYPT:
+@@ -776,19 +723,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+
+ if (!test_bit(HCI_AUTH, &hdev->flags)) {
+ /* Auth must be enabled first */
+- err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+- HCI_INIT_TIMEOUT, NULL);
++ err = hci_cmd_sync_status(hdev,
++ HCI_OP_WRITE_AUTH_ENABLE,
++ 1, &dr.dev_opt,
++ HCI_CMD_TIMEOUT);
+ if (err)
+ break;
+ }
+
+- err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
+- HCI_INIT_TIMEOUT, NULL);
++ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
++ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
+ break;
+
+ case HCISETSCAN:
+- err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
+- HCI_INIT_TIMEOUT, NULL);
++ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
++ 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
+
+ /* Ensure that the connectable and discoverable states
+ * get correctly modified as this was a non-mgmt change.
+@@ -798,8 +747,10 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ break;
+
+ case HCISETLINKPOL:
+- err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
+- HCI_INIT_TIMEOUT, NULL);
++ policy = cpu_to_le16(dr.dev_opt);
++
++ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
++ 2, &policy, HCI_CMD_TIMEOUT);
+ break;
+
+ case HCISETLINKMODE:
+@@ -908,9 +859,9 @@ int hci_get_dev_info(void __user *arg)
+ else
+ flags = hdev->flags;
+
+- strcpy(di.name, hdev->name);
++ strscpy(di.name, hdev->name, sizeof(di.name));
+ di.bdaddr = hdev->bdaddr;
+- di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
++ di.type = (hdev->bus & 0x0f);
+ di.flags = flags;
+ di.pkt_type = hdev->pkt_type;
+ if (lmp_bredr_capable(hdev)) {
+@@ -995,8 +946,7 @@ static void hci_power_on(struct work_struct *work)
+ */
+ if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
+ hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
+- (hdev->dev_type == HCI_PRIMARY &&
+- !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
++ (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
+ hci_dev_do_close(hdev);
+@@ -1049,6 +999,7 @@ static void hci_error_reset(struct work_struct *work)
+ {
+ struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
+
++ hci_dev_hold(hdev);
+ BT_DBG("%s", hdev->name);
+
+ if (hdev->hw_error)
+@@ -1056,10 +1007,10 @@ static void hci_error_reset(struct work_struct *work)
+ else
+ bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
+
+- if (hci_dev_do_close(hdev))
+- return;
++ if (!hci_dev_do_close(hdev))
++ hci_dev_do_open(hdev);
+
+- hci_dev_do_open(hdev);
++ hci_dev_put(hdev);
+ }
+
+ void hci_uuids_clear(struct hci_dev *hdev)
+@@ -1490,11 +1441,12 @@ static void hci_cmd_timeout(struct work_struct *work)
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ cmd_timer.work);
+
+- if (hdev->sent_cmd) {
+- struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+- u16 opcode = __le16_to_cpu(sent->opcode);
++ if (hdev->req_skb) {
++ u16 opcode = hci_skb_opcode(hdev->req_skb);
+
+ bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
++
++ hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
+ } else {
+ bt_dev_err(hdev, "command tx timeout");
+ }
+@@ -2439,10 +2391,16 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ /* To avoid a potential race with hci_unregister_dev. */
+ hci_dev_hold(hdev);
+
+- if (action == PM_SUSPEND_PREPARE)
++ switch (action) {
++ case PM_HIBERNATION_PREPARE:
++ case PM_SUSPEND_PREPARE:
+ ret = hci_suspend_dev(hdev);
+- else if (action == PM_POST_SUSPEND)
++ break;
++ case PM_POST_HIBERNATION:
++ case PM_POST_SUSPEND:
+ ret = hci_resume_dev(hdev);
++ break;
++ }
+
+ if (ret)
+ bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
+@@ -2535,6 +2493,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
+ mutex_init(&hdev->lock);
+ mutex_init(&hdev->req_lock);
+
++ ida_init(&hdev->unset_handle_ida);
++
+ INIT_LIST_HEAD(&hdev->mesh_pending);
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+ INIT_LIST_HEAD(&hdev->reject_list);
+@@ -2600,20 +2560,7 @@ int hci_register_dev(struct hci_dev *hdev)
+ if (!hdev->open || !hdev->close || !hdev->send)
+ return -EINVAL;
+
+- /* Do not allow HCI_AMP devices to register at index 0,
+- * so the index can be used as the AMP controller ID.
+- */
+- switch (hdev->dev_type) {
+- case HCI_PRIMARY:
+- id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
+- break;
+- case HCI_AMP:
+- id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
+- break;
+- default:
+- return -EINVAL;
+- }
+-
++ id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+@@ -2665,12 +2612,10 @@ int hci_register_dev(struct hci_dev *hdev)
+ hci_dev_set_flag(hdev, HCI_SETUP);
+ hci_dev_set_flag(hdev, HCI_AUTO_OFF);
+
+- if (hdev->dev_type == HCI_PRIMARY) {
+- /* Assume BR/EDR support until proven otherwise (such as
+- * through reading supported features during init.
+- */
+- hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
+- }
++ /* Assume BR/EDR support until proven otherwise (such as
++ * through reading supported features during init.
++ */
++ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
+
+ write_lock(&hci_dev_list_lock);
+ list_add(&hdev->list, &hci_dev_list);
+@@ -2707,7 +2652,7 @@ int hci_register_dev(struct hci_dev *hdev)
+ destroy_workqueue(hdev->workqueue);
+ destroy_workqueue(hdev->req_workqueue);
+ err:
+- ida_simple_remove(&hci_index_ida, hdev->id);
++ ida_free(&hci_index_ida, hdev->id);
+
+ return error;
+ }
+@@ -2726,14 +2671,16 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ list_del(&hdev->list);
+ write_unlock(&hci_dev_list_lock);
+
++ cancel_work_sync(&hdev->rx_work);
++ cancel_work_sync(&hdev->cmd_work);
++ cancel_work_sync(&hdev->tx_work);
+ cancel_work_sync(&hdev->power_on);
++ cancel_work_sync(&hdev->error_reset);
+
+ hci_cmd_sync_clear(hdev);
+
+ hci_unregister_suspend_notifier(hdev);
+
+- msft_unregister(hdev);
+-
+ hci_dev_do_close(hdev);
+
+ if (!test_bit(HCI_INIT, &hdev->flags) &&
+@@ -2787,10 +2734,13 @@ void hci_release_dev(struct hci_dev *hdev)
+ hci_discovery_filter_clear(hdev);
+ hci_blocked_keys_clear(hdev);
+ hci_codec_list_clear(&hdev->local_codecs);
++ msft_release(hdev);
+ hci_dev_unlock(hdev);
+
+- ida_simple_remove(&hci_index_ida, hdev->id);
++ ida_destroy(&hdev->unset_handle_ida);
++ ida_free(&hci_index_ida, hdev->id);
+ kfree_skb(hdev->sent_cmd);
++ kfree_skb(hdev->req_skb);
+ kfree_skb(hdev->recv_event);
+ kfree(hdev);
+ }
+@@ -2822,6 +2772,23 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
+ return ret;
+ }
+
++/* Cancel ongoing command synchronously:
++ *
++ * - Cancel command timer
++ * - Reset command counter
++ * - Cancel command request
++ */
++static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
++{
++ bt_dev_dbg(hdev, "err 0x%2.2x", err);
++
++ cancel_delayed_work_sync(&hdev->cmd_timer);
++ cancel_delayed_work_sync(&hdev->ncmd_timer);
++ atomic_set(&hdev->cmd_cnt, 1);
++
++ hci_cmd_sync_cancel_sync(hdev, err);
++}
++
+ /* Suspend HCI device */
+ int hci_suspend_dev(struct hci_dev *hdev)
+ {
+@@ -2839,7 +2806,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
+ return 0;
+
+ /* Cancel potentially blocking sync operation before suspend */
+- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
++ hci_cancel_cmd_sync(hdev, EHOSTDOWN);
+
+ hci_req_sync_lock(hdev);
+ ret = hci_suspend_sync(hdev);
+@@ -3103,21 +3070,33 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
+ EXPORT_SYMBOL(__hci_cmd_send);
+
+ /* Get data from the previously sent command */
+-void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
++static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
+ {
+ struct hci_command_hdr *hdr;
+
+- if (!hdev->sent_cmd)
++ if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
+ return NULL;
+
+- hdr = (void *) hdev->sent_cmd->data;
++ hdr = (void *)skb->data;
+
+ if (hdr->opcode != cpu_to_le16(opcode))
+ return NULL;
+
+- BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
++ return skb->data + HCI_COMMAND_HDR_SIZE;
++}
++
++/* Get data from the previously sent command */
++void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
++{
++ void *data;
+
+- return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
++ /* Check if opcode matches last sent command */
++ data = hci_cmd_data(hdev->sent_cmd, opcode);
++ if (!data)
++ /* Check if opcode matches last request */
++ data = hci_cmd_data(hdev->req_skb, opcode);
++
++ return data;
+ }
+
+ /* Get data from last received event */
+@@ -3176,17 +3155,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
+
+ hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
+
+- switch (hdev->dev_type) {
+- case HCI_PRIMARY:
+- hci_add_acl_hdr(skb, conn->handle, flags);
+- break;
+- case HCI_AMP:
+- hci_add_acl_hdr(skb, chan->handle, flags);
+- break;
+- default:
+- bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
+- return;
+- }
++ hci_add_acl_hdr(skb, conn->handle, flags);
+
+ list = skb_shinfo(skb)->frag_list;
+ if (!list) {
+@@ -3346,9 +3315,6 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+- case AMP_LINK:
+- cnt = hdev->block_cnt;
+- break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+@@ -3546,12 +3512,6 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+
+ }
+
+-static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
+-{
+- /* Calculate count of blocks used by this packet */
+- return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
+-}
+-
+ static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
+ {
+ unsigned long last_tx;
+@@ -3665,100 +3625,34 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
+ hci_prio_recalculate(hdev, ACL_LINK);
+ }
+
+-static void hci_sched_acl_blk(struct hci_dev *hdev)
+-{
+- unsigned int cnt = hdev->block_cnt;
+- struct hci_chan *chan;
+- struct sk_buff *skb;
+- int quote;
+- u8 type;
+-
+- BT_DBG("%s", hdev->name);
+-
+- if (hdev->dev_type == HCI_AMP)
+- type = AMP_LINK;
+- else
+- type = ACL_LINK;
+-
+- __check_timeout(hdev, cnt, type);
+-
+- while (hdev->block_cnt > 0 &&
+- (chan = hci_chan_sent(hdev, type, &quote))) {
+- u32 priority = (skb_peek(&chan->data_q))->priority;
+- while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
+- int blocks;
+-
+- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+- skb->len, skb->priority);
+-
+- /* Stop if priority has changed */
+- if (skb->priority < priority)
+- break;
+-
+- skb = skb_dequeue(&chan->data_q);
+-
+- blocks = __get_blocks(hdev, skb);
+- if (blocks > hdev->block_cnt)
+- return;
+-
+- hci_conn_enter_active_mode(chan->conn,
+- bt_cb(skb)->force_active);
+-
+- hci_send_frame(hdev, skb);
+- hdev->acl_last_tx = jiffies;
+-
+- hdev->block_cnt -= blocks;
+- quote -= blocks;
+-
+- chan->sent += blocks;
+- chan->conn->sent += blocks;
+- }
+- }
+-
+- if (cnt != hdev->block_cnt)
+- hci_prio_recalculate(hdev, type);
+-}
+-
+ static void hci_sched_acl(struct hci_dev *hdev)
+ {
+ BT_DBG("%s", hdev->name);
+
+ /* No ACL link over BR/EDR controller */
+- if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
+- return;
+-
+- /* No AMP link over AMP controller */
+- if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
++ if (!hci_conn_num(hdev, ACL_LINK))
+ return;
+
+- switch (hdev->flow_ctl_mode) {
+- case HCI_FLOW_CTL_MODE_PACKET_BASED:
+- hci_sched_acl_pkt(hdev);
+- break;
+-
+- case HCI_FLOW_CTL_MODE_BLOCK_BASED:
+- hci_sched_acl_blk(hdev);
+- break;
+- }
++ hci_sched_acl_pkt(hdev);
+ }
+
+ static void hci_sched_le(struct hci_dev *hdev)
+ {
+ struct hci_chan *chan;
+ struct sk_buff *skb;
+- int quote, cnt, tmp;
++ int quote, *cnt, tmp;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!hci_conn_num(hdev, LE_LINK))
+ return;
+
+- cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
++ cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
+
+- __check_timeout(hdev, cnt, LE_LINK);
++ __check_timeout(hdev, *cnt, LE_LINK);
+
+- tmp = cnt;
+- while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
++ tmp = *cnt;
++ while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+@@ -3773,7 +3667,7 @@ static void hci_sched_le(struct hci_dev *hdev)
+ hci_send_frame(hdev, skb);
+ hdev->le_last_tx = jiffies;
+
+- cnt--;
++ (*cnt)--;
+ chan->sent++;
+ chan->conn->sent++;
+
+@@ -3783,12 +3677,7 @@ static void hci_sched_le(struct hci_dev *hdev)
+ }
+ }
+
+- if (hdev->le_pkts)
+- hdev->le_cnt = cnt;
+- else
+- hdev->acl_cnt = cnt;
+-
+- if (cnt != tmp)
++ if (*cnt != tmp)
+ hci_prio_recalculate(hdev, LE_LINK);
+ }
+
+@@ -3863,6 +3752,8 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+
+ hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
++ if (conn && hci_dev_test_flag(hdev, HCI_MGMT))
++ mgmt_device_connected(hdev, conn, NULL, 0);
+ hci_dev_unlock(hdev);
+
+ if (conn) {
+@@ -4018,17 +3909,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ if (!status && !hci_req_is_complete(hdev))
+ return;
+
++ skb = hdev->req_skb;
++
+ /* If this was the last command in a request the complete
+- * callback would be found in hdev->sent_cmd instead of the
++ * callback would be found in hdev->req_skb instead of the
+ * command queue (hdev->cmd_q).
+ */
+- if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
+- *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
++ if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
++ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
+ return;
+ }
+
+- if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
+- *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
++ if (skb && bt_cb(skb)->hci.req_complete) {
++ *req_complete = bt_cb(skb)->hci.req_complete;
+ return;
+ }
+
+@@ -4124,6 +4017,36 @@ static void hci_rx_work(struct work_struct *work)
+ }
+ }
+
++static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
++{
++ int err;
++
++ bt_dev_dbg(hdev, "skb %p", skb);
++
++ kfree_skb(hdev->sent_cmd);
++
++ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
++ if (!hdev->sent_cmd) {
++ skb_queue_head(&hdev->cmd_q, skb);
++ queue_work(hdev->workqueue, &hdev->cmd_work);
++ return;
++ }
++
++ err = hci_send_frame(hdev, skb);
++ if (err < 0) {
++ hci_cmd_sync_cancel_sync(hdev, -err);
++ return;
++ }
++
++ if (hci_req_status_pend(hdev) &&
++ !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
++ kfree_skb(hdev->req_skb);
++ hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
++ }
++
++ atomic_dec(&hdev->cmd_cnt);
++}
++
+ static void hci_cmd_work(struct work_struct *work)
+ {
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
+@@ -4138,30 +4061,15 @@ static void hci_cmd_work(struct work_struct *work)
+ if (!skb)
+ return;
+
+- kfree_skb(hdev->sent_cmd);
+-
+- hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
+- if (hdev->sent_cmd) {
+- int res;
+- if (hci_req_status_pend(hdev))
+- hci_dev_set_flag(hdev, HCI_CMD_PENDING);
+- atomic_dec(&hdev->cmd_cnt);
+-
+- res = hci_send_frame(hdev, skb);
+- if (res < 0)
+- __hci_cmd_sync_cancel(hdev, -res);
++ hci_send_cmd_sync(hdev, skb);
+
+- rcu_read_lock();
+- if (test_bit(HCI_RESET, &hdev->flags) ||
+- hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
+- cancel_delayed_work(&hdev->cmd_timer);
+- else
+- queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
+- HCI_CMD_TIMEOUT);
+- rcu_read_unlock();
+- } else {
+- skb_queue_head(&hdev->cmd_q, skb);
+- queue_work(hdev->workqueue, &hdev->cmd_work);
+- }
++ rcu_read_lock();
++ if (test_bit(HCI_RESET, &hdev->flags) ||
++ hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
++ cancel_delayed_work(&hdev->cmd_timer);
++ else
++ queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
++ HCI_CMD_TIMEOUT);
++ rcu_read_unlock();
+ }
+ }
+diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
+index 6b7741f6e95b25..ce3ff2fa72e58a 100644
+--- a/net/bluetooth/hci_debugfs.c
++++ b/net/bluetooth/hci_debugfs.c
+@@ -218,10 +218,12 @@ static int conn_info_min_age_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val == 0 || val > hdev->conn_info_max_age)
++ hci_dev_lock(hdev);
++ if (val == 0 || val > hdev->conn_info_max_age) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->conn_info_min_age = val;
+ hci_dev_unlock(hdev);
+
+@@ -246,10 +248,12 @@ static int conn_info_max_age_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val == 0 || val < hdev->conn_info_min_age)
++ hci_dev_lock(hdev);
++ if (val == 0 || val < hdev->conn_info_min_age) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->conn_info_max_age = val;
+ hci_dev_unlock(hdev);
+
+@@ -567,10 +571,12 @@ static int sniff_min_interval_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
++ hci_dev_lock(hdev);
++ if (val == 0 || val % 2 || val > hdev->sniff_max_interval) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->sniff_min_interval = val;
+ hci_dev_unlock(hdev);
+
+@@ -595,10 +601,12 @@ static int sniff_max_interval_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
++ hci_dev_lock(hdev);
++ if (val == 0 || val % 2 || val < hdev->sniff_min_interval) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->sniff_max_interval = val;
+ hci_dev_unlock(hdev);
+
+@@ -850,10 +858,12 @@ static int conn_min_interval_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
++ hci_dev_lock(hdev);
++ if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->le_conn_min_interval = val;
+ hci_dev_unlock(hdev);
+
+@@ -878,10 +888,12 @@ static int conn_max_interval_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
++ hci_dev_lock(hdev);
++ if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->le_conn_max_interval = val;
+ hci_dev_unlock(hdev);
+
+@@ -990,10 +1002,12 @@ static int adv_min_interval_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
++ hci_dev_lock(hdev);
++ if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->le_adv_min_interval = val;
+ hci_dev_unlock(hdev);
+
+@@ -1018,10 +1032,12 @@ static int adv_max_interval_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
++ hci_dev_lock(hdev);
++ if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->le_adv_max_interval = val;
+ hci_dev_unlock(hdev);
+
+@@ -1046,10 +1062,12 @@ static int min_key_size_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE)
++ hci_dev_lock(hdev);
++ if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->le_min_key_size = val;
+ hci_dev_unlock(hdev);
+
+@@ -1074,10 +1092,12 @@ static int max_key_size_set(void *data, u64 val)
+ {
+ struct hci_dev *hdev = data;
+
+- if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size)
++ hci_dev_lock(hdev);
++ if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) {
++ hci_dev_unlock(hdev);
+ return -EINVAL;
++ }
+
+- hci_dev_lock(hdev);
+ hdev->le_max_key_size = val;
+ hci_dev_unlock(hdev);
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 1e1c9147356c3c..da056cca3edbcf 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -36,8 +36,6 @@
+ #include "hci_request.h"
+ #include "hci_debugfs.h"
+ #include "hci_codec.h"
+-#include "a2mp.h"
+-#include "amp.h"
+ #include "smp.h"
+ #include "msft.h"
+ #include "eir.h"
+@@ -95,11 +93,11 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
+ /* It is possible that we receive Inquiry Complete event right
+ * before we receive Inquiry Cancel Command Complete event, in
+ * which case the latter event should have status of Command
+- * Disallowed (0x0c). This should not be treated as error, since
++ * Disallowed. This should not be treated as error, since
+ * we actually achieve what Inquiry Cancel wants to achieve,
+ * which is to end the last Inquiry session.
+ */
+- if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
++ if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
+ bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
+ rp->status = 0x00;
+ }
+@@ -120,8 +118,6 @@ static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ hci_dev_unlock(hdev);
+
+- hci_conn_check_pending(hdev);
+-
+ return rp->status;
+ }
+
+@@ -152,8 +148,6 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
+
+ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
+
+- hci_conn_check_pending(hdev);
+-
+ return rp->status;
+ }
+
+@@ -516,6 +510,9 @@ static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
+ {
+ struct hci_rp_read_class_of_dev *rp = data;
+
++ if (WARN_ON(!hdev))
++ return HCI_ERROR_UNSPECIFIED;
++
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ if (rp->status)
+@@ -747,9 +744,23 @@ static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
+ } else {
+ conn->enc_key_size = rp->key_size;
+ status = 0;
++
++ if (conn->enc_key_size < hdev->min_enc_key_size) {
++ /* As slave role, the conn->state has been set to
++ * BT_CONNECTED and l2cap conn req might not be received
++ * yet, at this moment the l2cap layer almost does
++ * nothing with the non-zero status.
++ * So we also clear encrypt related bits, and then the
++ * handler of l2cap conn req will get the right secure
++ * state at a later time.
++ */
++ status = HCI_ERROR_AUTH_FAILURE;
++ clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
++ clear_bit(HCI_CONN_AES_CCM, &conn->flags);
++ }
+ }
+
+- hci_encrypt_cfm(conn, 0);
++ hci_encrypt_cfm(conn, status);
+
+ done:
+ hci_dev_unlock(hdev);
+@@ -820,8 +831,6 @@ static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
+ if (!rp->status)
+ conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
+
+- hci_encrypt_cfm(conn, 0);
+-
+ unlock:
+ hci_dev_unlock(hdev);
+
+@@ -904,21 +913,6 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
+ return rp->status;
+ }
+
+-static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_rp_read_flow_control_mode *rp = data;
+-
+- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+-
+- if (rp->status)
+- return rp->status;
+-
+- hdev->flow_ctl_mode = rp->mode;
+-
+- return rp->status;
+-}
+-
+ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+ {
+@@ -945,6 +939,9 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
+ BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
+ hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
+
++ if (!hdev->acl_mtu || !hdev->acl_pkts)
++ return HCI_ERROR_INVALID_PARAMETERS;
++
+ return rp->status;
+ }
+
+@@ -1059,28 +1056,6 @@ static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
+ return rp->status;
+ }
+
+-static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_rp_read_data_block_size *rp = data;
+-
+- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+-
+- if (rp->status)
+- return rp->status;
+-
+- hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
+- hdev->block_len = __le16_to_cpu(rp->block_len);
+- hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
+-
+- hdev->block_cnt = hdev->num_blocks;
+-
+- BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
+- hdev->block_cnt, hdev->block_len);
+-
+- return rp->status;
+-}
+-
+ static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+ {
+@@ -1115,30 +1090,6 @@ static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
+ return rp->status;
+ }
+
+-static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_rp_read_local_amp_info *rp = data;
+-
+- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+-
+- if (rp->status)
+- return rp->status;
+-
+- hdev->amp_status = rp->amp_status;
+- hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+- hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+- hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+- hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+- hdev->amp_type = rp->amp_type;
+- hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+- hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+- hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+- hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+-
+- return rp->status;
+-}
+-
+ static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+ {
+@@ -1254,6 +1205,9 @@ static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
+
+ BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+
++ if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
++ return HCI_ERROR_INVALID_PARAMETERS;
++
+ return rp->status;
+ }
+
+@@ -2299,12 +2253,11 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+ {
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
+
+- if (status) {
+- hci_conn_check_pending(hdev);
++ if (status)
+ return;
+- }
+
+- set_bit(HCI_INQUIRY, &hdev->flags);
++ if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
++ set_bit(HCI_INQUIRY, &hdev->flags);
+ }
+
+ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+@@ -2326,19 +2279,16 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+
+ if (status) {
+ if (conn && conn->state == BT_CONNECT) {
+- if (status != 0x0c || conn->attempt > 2) {
+- conn->state = BT_CLOSED;
+- hci_connect_cfm(conn, status);
+- hci_conn_del(conn);
+- } else
+- conn->state = BT_CONNECT2;
++ conn->state = BT_CLOSED;
++ hci_connect_cfm(conn, status);
++ hci_conn_del(conn);
+ }
+ } else {
+ if (!conn) {
+- conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
+- HCI_ROLE_MASTER);
+- if (!conn)
+- bt_dev_err(hdev, "no memory for new connection");
++ conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
++ HCI_ROLE_MASTER);
++ if (IS_ERR(conn))
++ bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
+ }
+ }
+
+@@ -2510,9 +2460,7 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
+ * Only those in BT_CONFIG or BT_CONNECTED states can be
+ * considered connected.
+ */
+- if (conn &&
+- (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
+- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
++ if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
+ mgmt_device_connected(hdev, conn, name, name_len);
+
+ if (discov->state == DISCOVERY_STOPPED)
+@@ -3023,8 +2971,6 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+
+- hci_conn_check_pending(hdev);
+-
+ if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ return;
+
+@@ -3151,10 +3097,10 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
+ &ev->bdaddr,
+ BDADDR_BREDR)) {
+- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+- HCI_ROLE_SLAVE);
+- if (!conn) {
+- bt_dev_err(hdev, "no memory for new conn");
++ conn = hci_conn_add_unset(hdev, ev->link_type,
++ &ev->bdaddr, HCI_ROLE_SLAVE);
++ if (IS_ERR(conn)) {
++ bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
+ goto unlock;
+ }
+ } else {
+@@ -3207,6 +3153,31 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ if (test_bit(HCI_ENCRYPT, &hdev->flags))
+ set_bit(HCI_CONN_ENCRYPT, &conn->flags);
+
++ /* "Link key request" completed ahead of "connect request" completes */
++ if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
++ ev->link_type == ACL_LINK) {
++ struct link_key *key;
++ struct hci_cp_read_enc_key_size cp;
++
++ key = hci_find_link_key(hdev, &ev->bdaddr);
++ if (key) {
++ set_bit(HCI_CONN_ENCRYPT, &conn->flags);
++
++ if (!read_key_size_capable(hdev)) {
++ conn->enc_key_size = HCI_LINK_KEY_SIZE;
++ } else {
++ cp.handle = cpu_to_le16(conn->handle);
++ if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
++ sizeof(cp), &cp)) {
++ bt_dev_err(hdev, "sending read key size failed");
++ conn->enc_key_size = HCI_LINK_KEY_SIZE;
++ }
++ }
++
++ hci_encrypt_cfm(conn, ev->status);
++ }
++ }
++
+ /* Get remote features */
+ if (conn->type == ACL_LINK) {
+ struct hci_cp_read_remote_features cp;
+@@ -3246,8 +3217,6 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+
+ unlock:
+ hci_dev_unlock(hdev);
+-
+- hci_conn_check_pending(hdev);
+ }
+
+ static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
+@@ -3317,10 +3286,10 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
+ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
+ &ev->bdaddr);
+ if (!conn) {
+- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+- HCI_ROLE_SLAVE);
+- if (!conn) {
+- bt_dev_err(hdev, "no memory for new connection");
++ conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
++ HCI_ROLE_SLAVE);
++ if (IS_ERR(conn)) {
++ bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
+ goto unlock;
+ }
+ }
+@@ -3484,14 +3453,8 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
+
+ if (!ev->status) {
+ clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
+-
+- if (!hci_conn_ssp_enabled(conn) &&
+- test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
+- bt_dev_info(hdev, "re-auth of legacy device is not possible.");
+- } else {
+- set_bit(HCI_CONN_AUTH, &conn->flags);
+- conn->sec_level = conn->pending_sec_level;
+- }
++ set_bit(HCI_CONN_AUTH, &conn->flags);
++ conn->sec_level = conn->pending_sec_level;
+ } else {
+ if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
+ set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
+@@ -3500,7 +3463,6 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
+ }
+
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
+- clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status && hci_conn_ssp_enabled(conn)) {
+@@ -3547,8 +3509,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+
+- hci_conn_check_pending(hdev);
+-
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+@@ -3651,7 +3611,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
+ * controller really supports it. If it doesn't, assume
+ * the default size (16).
+ */
+- if (!(hdev->commands[20] & 0x10)) {
++ if (!read_key_size_capable(hdev)) {
+ conn->enc_key_size = HCI_LINK_KEY_SIZE;
+ goto notify;
+ }
+@@ -3683,12 +3643,8 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
+ if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
+- sizeof(cp), &cp)) {
++ sizeof(cp), &cp))
+ bt_dev_err(hdev, "write auth payload timeout failed");
+- goto notify;
+- }
+-
+- goto unlock;
+ }
+
+ notify:
+@@ -3751,14 +3707,15 @@ static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
+ goto unlock;
+ }
+
+- if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
++ if (!ev->status) {
+ struct hci_cp_remote_name_req cp;
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pscan_rep_mode = 0x02;
+ hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
+- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
++ } else {
+ mgmt_device_connected(hdev, conn, NULL, 0);
++ }
+
+ if (!hci_outgoing_auth_needed(hdev, conn)) {
+ conn->state = BT_CONNECTED;
+@@ -3809,6 +3766,9 @@ static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
+ BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
+ hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
+
++ if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
++ return HCI_ERROR_INVALID_PARAMETERS;
++
+ return rp->status;
+ }
+
+@@ -3931,6 +3891,11 @@ static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
+ * last.
+ */
+ hci_connect_cfm(conn, rp->status);
++
++ /* Notify device connected in case it is a BIG Sync */
++ if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
++ mgmt_device_connected(hdev, conn, NULL, 0);
++
+ break;
+ }
+
+@@ -4095,12 +4060,6 @@ static const struct hci_cc {
+ HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
+ sizeof(struct hci_rp_read_page_scan_type)),
+ HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
+- HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
+- sizeof(struct hci_rp_read_data_block_size)),
+- HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
+- sizeof(struct hci_rp_read_flow_control_mode)),
+- HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
+- sizeof(struct hci_rp_read_local_amp_info)),
+ HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
+ sizeof(struct hci_rp_read_clock)),
+ HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
+@@ -4376,7 +4335,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
+ * (since for this kind of commands there will not be a command
+ * complete event).
+ */
+- if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
++ if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
+ hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
+ req_complete_skb);
+ if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
+@@ -4435,11 +4394,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
+ flex_array_size(ev, handles, ev->num)))
+ return;
+
+- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
+- bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
+- return;
+- }
+-
+ bt_dev_dbg(hdev, "num %d", ev->num);
+
+ for (i = 0; i < ev->num; i++) {
+@@ -4507,78 +4461,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
+ queue_work(hdev->workqueue, &hdev->tx_work);
+ }
+
+-static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
+- __u16 handle)
+-{
+- struct hci_chan *chan;
+-
+- switch (hdev->dev_type) {
+- case HCI_PRIMARY:
+- return hci_conn_hash_lookup_handle(hdev, handle);
+- case HCI_AMP:
+- chan = hci_chan_lookup_handle(hdev, handle);
+- if (chan)
+- return chan->conn;
+- break;
+- default:
+- bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
+- break;
+- }
+-
+- return NULL;
+-}
+-
+-static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_ev_num_comp_blocks *ev = data;
+- int i;
+-
+- if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
+- flex_array_size(ev, handles, ev->num_hndl)))
+- return;
+-
+- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
+- bt_dev_err(hdev, "wrong event for mode %d",
+- hdev->flow_ctl_mode);
+- return;
+- }
+-
+- bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
+- ev->num_hndl);
+-
+- for (i = 0; i < ev->num_hndl; i++) {
+- struct hci_comp_blocks_info *info = &ev->handles[i];
+- struct hci_conn *conn = NULL;
+- __u16 handle, block_count;
+-
+- handle = __le16_to_cpu(info->handle);
+- block_count = __le16_to_cpu(info->blocks);
+-
+- conn = __hci_conn_lookup_handle(hdev, handle);
+- if (!conn)
+- continue;
+-
+- conn->sent -= block_count;
+-
+- switch (conn->type) {
+- case ACL_LINK:
+- case AMP_LINK:
+- hdev->block_cnt += block_count;
+- if (hdev->block_cnt > hdev->num_blocks)
+- hdev->block_cnt = hdev->num_blocks;
+- break;
+-
+- default:
+- bt_dev_err(hdev, "unknown type %d conn %p",
+- conn->type, conn);
+- break;
+- }
+- }
+-
+- queue_work(hdev->workqueue, &hdev->tx_work);
+-}
+-
+ static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+ {
+@@ -5005,8 +4887,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pscan_rep_mode = 0x02;
+ hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
+- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
++ } else {
+ mgmt_device_connected(hdev, conn, NULL, 0);
++ }
+
+ if (!hci_outgoing_auth_needed(hdev, conn)) {
+ conn->state = BT_CONNECTED;
+@@ -5324,9 +5207,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+- if (!conn || !hci_conn_ssp_enabled(conn))
++ if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ goto unlock;
+
++ /* Assume remote supports SSP since it has triggered this event */
++ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
++
+ hci_conn_hold(conn);
+
+ if (!hci_dev_test_flag(hdev, HCI_MGMT))
+@@ -5438,19 +5324,16 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
+ goto unlock;
+ }
+
+- /* If no side requires MITM protection; auto-accept */
++ /* If no side requires MITM protection; use JUST_CFM method */
+ if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
+ (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
+
+- /* If we're not the initiators request authorization to
+- * proceed from user space (mgmt_user_confirm with
+- * confirm_hint set to 1). The exception is if neither
+- * side had MITM or if the local IO capability is
+- * NoInputNoOutput, in which case we do auto-accept
++ /* If we're not the initiator of request authorization and the
++ * local IO capability is not NoInputNoOutput, use JUST_WORKS
++ * method (mgmt_user_confirm with confirm_hint set to 1).
+ */
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
+- conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
+- (loc_mitm || rem_mitm)) {
++ conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) {
+ bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
+ confirm_hint = 1;
+ goto confirm;
+@@ -5667,150 +5550,6 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
+ hci_dev_unlock(hdev);
+ }
+
+-#if IS_ENABLED(CONFIG_BT_HS)
+-static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_ev_channel_selected *ev = data;
+- struct hci_conn *hcon;
+-
+- bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
+-
+- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+- if (!hcon)
+- return;
+-
+- amp_read_loc_assoc_final_data(hdev, hcon);
+-}
+-
+-static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_ev_phy_link_complete *ev = data;
+- struct hci_conn *hcon, *bredr_hcon;
+-
+- bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
+- ev->status);
+-
+- hci_dev_lock(hdev);
+-
+- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+- if (!hcon)
+- goto unlock;
+-
+- if (!hcon->amp_mgr)
+- goto unlock;
+-
+- if (ev->status) {
+- hci_conn_del(hcon);
+- goto unlock;
+- }
+-
+- bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
+-
+- hcon->state = BT_CONNECTED;
+- bacpy(&hcon->dst, &bredr_hcon->dst);
+-
+- hci_conn_hold(hcon);
+- hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+- hci_conn_drop(hcon);
+-
+- hci_debugfs_create_conn(hcon);
+- hci_conn_add_sysfs(hcon);
+-
+- amp_physical_cfm(bredr_hcon, hcon);
+-
+-unlock:
+- hci_dev_unlock(hdev);
+-}
+-
+-static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_ev_logical_link_complete *ev = data;
+- struct hci_conn *hcon;
+- struct hci_chan *hchan;
+- struct amp_mgr *mgr;
+-
+- bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
+- le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
+-
+- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+- if (!hcon)
+- return;
+-
+- /* Create AMP hchan */
+- hchan = hci_chan_create(hcon);
+- if (!hchan)
+- return;
+-
+- hchan->handle = le16_to_cpu(ev->handle);
+- hchan->amp = true;
+-
+- BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+-
+- mgr = hcon->amp_mgr;
+- if (mgr && mgr->bredr_chan) {
+- struct l2cap_chan *bredr_chan = mgr->bredr_chan;
+-
+- l2cap_chan_lock(bredr_chan);
+-
+- bredr_chan->conn->mtu = hdev->block_mtu;
+- l2cap_logical_cfm(bredr_chan, hchan, 0);
+- hci_conn_hold(hcon);
+-
+- l2cap_chan_unlock(bredr_chan);
+- }
+-}
+-
+-static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_ev_disconn_logical_link_complete *ev = data;
+- struct hci_chan *hchan;
+-
+- bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
+- le16_to_cpu(ev->handle), ev->status);
+-
+- if (ev->status)
+- return;
+-
+- hci_dev_lock(hdev);
+-
+- hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
+- if (!hchan || !hchan->amp)
+- goto unlock;
+-
+- amp_destroy_logical_link(hchan, ev->reason);
+-
+-unlock:
+- hci_dev_unlock(hdev);
+-}
+-
+-static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
+- struct sk_buff *skb)
+-{
+- struct hci_ev_disconn_phy_link_complete *ev = data;
+- struct hci_conn *hcon;
+-
+- bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+-
+- if (ev->status)
+- return;
+-
+- hci_dev_lock(hdev);
+-
+- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+- if (hcon && hcon->type == AMP_LINK) {
+- hcon->state = BT_CLOSED;
+- hci_disconn_cfm(hcon, ev->reason);
+- hci_conn_del(hcon);
+- }
+-
+- hci_dev_unlock(hdev);
+-}
+-#endif
+-
+ static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
+ u8 bdaddr_type, bdaddr_t *local_rpa)
+ {
+@@ -5890,9 +5629,9 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ if (status)
+ goto unlock;
+
+- conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
+- if (!conn) {
+- bt_dev_err(hdev, "no memory for new connection");
++ conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
++ if (IS_ERR(conn)) {
++ bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
+ goto unlock;
+ }
+
+@@ -5952,17 +5691,11 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+
+ conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
+
+- if (handle > HCI_CONN_HANDLE_MAX) {
+- bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
+- HCI_CONN_HANDLE_MAX);
+- status = HCI_ERROR_INVALID_PARAMETERS;
+- }
+-
+ /* All connection failure handling is taken care of by the
+ * hci_conn_failed function which is triggered by the HCI
+ * request completion callbacks used for connecting.
+ */
+- if (status)
++ if (status || hci_conn_set_handle(conn, handle))
+ goto unlock;
+
+ /* Drop the connection if it has been aborted */
+@@ -5982,11 +5715,9 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ goto unlock;
+ }
+
+- if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+- mgmt_device_connected(hdev, conn, NULL, 0);
++ mgmt_device_connected(hdev, conn, NULL, 0);
+
+ conn->sec_level = BT_SECURITY_LOW;
+- conn->handle = handle;
+ conn->state = BT_CONFIG;
+
+ /* Store current advertising instance as connection advertising instance
+@@ -6603,7 +6334,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ struct hci_ev_le_pa_sync_established *ev = data;
+ int mask = hdev->link_mode;
+ __u8 flags = 0;
+- struct hci_conn *bis;
++ struct hci_conn *pa_sync;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+
+@@ -6620,20 +6351,19 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ if (!(flags & HCI_PROTO_DEFER))
+ goto unlock;
+
+- /* Add connection to indicate the PA sync event */
+- bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
+- HCI_ROLE_SLAVE);
++ if (ev->status) {
++ /* Add connection to indicate the failed PA sync event */
++ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
++ HCI_ROLE_SLAVE);
+
+- if (!bis)
+- goto unlock;
++ if (!pa_sync)
++ goto unlock;
+
+- if (ev->status)
+- set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags);
+- else
+- set_bit(HCI_CONN_PA_SYNC, &bis->flags);
++ set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
+
+- /* Notify connection to iso layer */
+- hci_connect_cfm(bis, ev->status);
++ /* Notify iso layer */
++ hci_connect_cfm(pa_sync, ev->status);
++ }
+
+ unlock:
+ hci_dev_unlock(hdev);
+@@ -6684,7 +6414,7 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
+ * transition into connected state and mark it as
+ * successful.
+ */
+- if (!conn->out && ev->status == 0x1a &&
++ if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
+ (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
+ status = 0x00;
+ else
+@@ -6797,6 +6527,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_UNKNOWN_CONN_ID);
+
++ if (max > hcon->le_conn_max_interval)
++ return send_conn_param_neg_reply(hdev, handle,
++ HCI_ERROR_INVALID_LL_PARAMS);
++
+ if (hci_check_conn_params(min, max, latency, timeout))
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_INVALID_LL_PARAMS);
+@@ -6896,6 +6630,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ struct bt_iso_qos *qos;
+ bool pending = false;
+ u16 handle = __le16_to_cpu(ev->handle);
++ u32 c_sdu_interval, p_sdu_interval;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+
+@@ -6920,12 +6655,25 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+
+ pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
+
+- /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
+- qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
+- qos->ucast.out.interval = qos->ucast.in.interval;
++ /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
++ * page 3075:
++ * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
++ * ISO_Interval + SDU_Interval_C_To_P
++ * ...
++ * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
++ * Transport_Latency
++ */
++ c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
++ (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
++ get_unaligned_le24(ev->c_latency);
++ p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
++ (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
++ get_unaligned_le24(ev->p_latency);
+
+ switch (conn->role) {
+ case HCI_ROLE_SLAVE:
++ qos->ucast.in.interval = c_sdu_interval;
++ qos->ucast.out.interval = p_sdu_interval;
+ /* Convert Transport Latency (us) to Latency (msec) */
+ qos->ucast.in.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
+@@ -6939,6 +6687,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ qos->ucast.out.phy = ev->p_phy;
+ break;
+ case HCI_ROLE_MASTER:
++ qos->ucast.in.interval = p_sdu_interval;
++ qos->ucast.out.interval = c_sdu_interval;
+ /* Convert Transport Latency (us) to Latency (msec) */
+ qos->ucast.out.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
+@@ -7020,12 +6770,12 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
+
+ cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
+ if (!cis) {
+- cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
+- if (!cis) {
++ cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
++ cis_handle);
++ if (IS_ERR(cis)) {
+ hci_le_reject_cis(hdev, ev->cis_handle);
+ goto unlock;
+ }
+- cis->handle = cis_handle;
+ }
+
+ cis->iso_qos.ucast.cig = ev->cig_id;
+@@ -7125,7 +6875,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ hci_dev_lock(hdev);
+
+ if (!ev->status) {
+- pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle);
++ pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
+ if (pa_sync)
+ /* Also mark the BIG sync established event on the
+ * associated PA sync hcon
+@@ -7139,11 +6889,14 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+
+ bis = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!bis) {
++ if (handle > HCI_CONN_HANDLE_MAX) {
++ bt_dev_dbg(hdev, "ignore too large handle %u", handle);
++ continue;
++ }
+ bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
+- HCI_ROLE_SLAVE);
+- if (!bis)
++ HCI_ROLE_SLAVE, handle);
++ if (IS_ERR(bis))
+ continue;
+- bis->handle = handle;
+ }
+
+ if (ev->status != 0x42)
+@@ -7172,6 +6925,8 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ u16 handle = le16_to_cpu(ev->bis[i]);
+
+ bis = hci_conn_hash_lookup_handle(hdev, handle);
++ if (!bis)
++ continue;
+
+ set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
+ hci_connect_cfm(bis, ev->status);
+@@ -7186,15 +6941,45 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
+ struct hci_evt_le_big_info_adv_report *ev = data;
+ int mask = hdev->link_mode;
+ __u8 flags = 0;
++ struct hci_conn *pa_sync;
+
+ bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
+
+ hci_dev_lock(hdev);
+
+ mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
+- if (!(mask & HCI_LM_ACCEPT))
++ if (!(mask & HCI_LM_ACCEPT)) {
+ hci_le_pa_term_sync(hdev, ev->sync_handle);
++ goto unlock;
++ }
++
++ if (!(flags & HCI_PROTO_DEFER))
++ goto unlock;
++
++ pa_sync = hci_conn_hash_lookup_pa_sync_handle
++ (hdev,
++ le16_to_cpu(ev->sync_handle));
++
++ if (pa_sync)
++ goto unlock;
++
++ /* Add connection to indicate the PA sync event */
++ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
++ HCI_ROLE_SLAVE);
++
++ if (IS_ERR(pa_sync))
++ goto unlock;
+
++ pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
++ set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
++
++ /* Notify iso layer */
++ hci_connect_cfm(pa_sync, 0x00);
++
++ /* Notify MGMT layer */
++ mgmt_device_connected(hdev, pa_sync, NULL, 0);
++
++unlock:
+ hci_dev_unlock(hdev);
+ }
+
+@@ -7304,10 +7089,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
+ bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
+
+ /* Only match event if command OGF is for LE */
+- if (hdev->sent_cmd &&
+- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
+- hci_skb_event(hdev->sent_cmd) == ev->subevent) {
+- *opcode = hci_skb_opcode(hdev->sent_cmd);
++ if (hdev->req_skb &&
++ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
++ hci_skb_event(hdev->req_skb) == ev->subevent) {
++ *opcode = hci_skb_opcode(hdev->req_skb);
+ hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
+ req_complete_skb);
+ }
+@@ -7407,10 +7192,10 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
+ * keep track of the bdaddr of the connection event that woke us up.
+ */
+ if (event == HCI_EV_CONN_REQUEST) {
+- bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
++ bacpy(&hdev->wake_addr, &conn_request->bdaddr);
+ hdev->wake_addr_type = BDADDR_BREDR;
+ } else if (event == HCI_EV_CONN_COMPLETE) {
+- bacpy(&hdev->wake_addr, &conn_request->bdaddr);
++ bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
+ hdev->wake_addr_type = BDADDR_BREDR;
+ } else if (event == HCI_EV_LE_META) {
+ struct hci_ev_le_meta *le_ev = (void *)skb->data;
+@@ -7606,28 +7391,6 @@ static const struct hci_ev {
+ /* [0x3e = HCI_EV_LE_META] */
+ HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
+ sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
+-#if IS_ENABLED(CONFIG_BT_HS)
+- /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
+- HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
+- sizeof(struct hci_ev_phy_link_complete)),
+- /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
+- HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
+- sizeof(struct hci_ev_channel_selected)),
+- /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
+- HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
+- hci_disconn_loglink_complete_evt,
+- sizeof(struct hci_ev_disconn_logical_link_complete)),
+- /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
+- HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
+- sizeof(struct hci_ev_logical_link_complete)),
+- /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
+- HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
+- hci_disconn_phylink_complete_evt,
+- sizeof(struct hci_ev_disconn_phy_link_complete)),
+-#endif
+- /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
+- HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
+- sizeof(struct hci_ev_num_comp_blocks)),
+ /* [0xff = HCI_EV_VENDOR] */
+ HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
+ };
+@@ -7694,10 +7457,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ }
+
+ /* Only match event if command OGF is not for LE */
+- if (hdev->sent_cmd &&
+- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
+- hci_skb_event(hdev->sent_cmd) == event) {
+- hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
++ if (hdev->req_skb &&
++ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
++ hci_skb_event(hdev->req_skb) == event) {
++ hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
+ status, &req_complete, &req_complete_skb);
+ req_evt = event;
+ }
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 6e023b0104b039..efea25eb56ce03 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -105,8 +105,10 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ if (hdev->req_status == HCI_REQ_PEND) {
+ hdev->req_result = result;
+ hdev->req_status = HCI_REQ_DONE;
+- if (skb)
++ if (skb) {
++ kfree_skb(hdev->req_skb);
+ hdev->req_skb = skb_get(skb);
++ }
+ wake_up_interruptible(&hdev->req_wait_q);
+ }
+ }
+@@ -895,7 +897,7 @@ void hci_request_setup(struct hci_dev *hdev)
+
+ void hci_request_cancel_all(struct hci_dev *hdev)
+ {
+- __hci_cmd_sync_cancel(hdev, ENODEV);
++ hci_cmd_sync_cancel_sync(hdev, ENODEV);
+
+ cancel_interleave_scan(hdev);
+ }
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 3e7cd330d731ac..69c2ba1e843eb4 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -101,7 +101,7 @@ static bool hci_sock_gen_cookie(struct sock *sk)
+ int id = hci_pi(sk)->cookie;
+
+ if (!id) {
+- id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
++ id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
+ if (id < 0)
+ id = 0xffffffff;
+
+@@ -119,7 +119,7 @@ static void hci_sock_free_cookie(struct sock *sk)
+
+ if (id) {
+ hci_pi(sk)->cookie = 0xffffffff;
+- ida_simple_remove(&sock_cookie_ida, id);
++ ida_free(&sock_cookie_ida, id);
+ }
+ }
+
+@@ -485,7 +485,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
+ return NULL;
+
+ ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
+- ni->type = hdev->dev_type;
++ ni->type = 0x00; /* Old hdev->dev_type */
+ ni->bus = hdev->bus;
+ bacpy(&ni->bdaddr, &hdev->bdaddr);
+ memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
+@@ -1007,9 +1007,6 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ return -EOPNOTSUPP;
+
+- if (hdev->dev_type != HCI_PRIMARY)
+- return -EOPNOTSUPP;
+-
+ switch (cmd) {
+ case HCISETRAW:
+ if (!capable(CAP_NET_ADMIN))
+@@ -1946,10 +1943,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+
+ switch (optname) {
+ case HCI_DATA_DIR:
+- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
+@@ -1958,10 +1954,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ break;
+
+ case HCI_TIME_STAMP:
+- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
+@@ -1979,11 +1974,9 @@ static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
+ uf.event_mask[1] = *((u32 *) f->event_mask + 1);
+ }
+
+- len = min_t(unsigned int, len, sizeof(uf));
+- if (copy_from_sockptr(&uf, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&uf, sizeof(uf), optval, len);
++ if (err)
+ break;
+- }
+
+ if (!capable(CAP_NET_RAW)) {
+ uf.type_mask &= hci_sec_filter.type_mask;
+@@ -2042,10 +2035,9 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ goto done;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, len);
++ if (err)
+ break;
+- }
+
+ hci_pi(sk)->mtu = opt;
+ break;
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index a15ab0b874a9d5..75515a1d2923aa 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -32,6 +32,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ hdev->req_result = result;
+ hdev->req_status = HCI_REQ_DONE;
+
++ /* Free the request command so it is not used as response */
++ kfree_skb(hdev->req_skb);
++ hdev->req_skb = NULL;
++
+ if (skb) {
+ struct sock *sk = hci_skb_sk(skb);
+
+@@ -39,7 +43,7 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ if (sk)
+ sock_put(sk);
+
+- hdev->req_skb = skb_get(skb);
++ hdev->req_rsp = skb_get(skb);
+ }
+
+ wake_up_interruptible(&hdev->req_wait_q);
+@@ -110,7 +114,7 @@ static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
+ skb_queue_tail(&req->cmd_q, skb);
+ }
+
+-static int hci_cmd_sync_run(struct hci_request *req)
++static int hci_req_sync_run(struct hci_request *req)
+ {
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+@@ -152,7 +156,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ struct sk_buff *skb;
+ int err = 0;
+
+- bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
++ bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
+
+ hci_req_init(&req, hdev);
+
+@@ -160,7 +164,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+
+ hdev->req_status = HCI_REQ_PEND;
+
+- err = hci_cmd_sync_run(&req);
++ err = hci_req_sync_run(&req);
+ if (err < 0)
+ return ERR_PTR(err);
+
+@@ -187,8 +191,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+
+ hdev->req_status = 0;
+ hdev->req_result = 0;
+- skb = hdev->req_skb;
+- hdev->req_skb = NULL;
++ skb = hdev->req_rsp;
++ hdev->req_rsp = NULL;
+
+ bt_dev_dbg(hdev, "end: err %d", err);
+
+@@ -248,7 +252,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ if (IS_ERR(skb)) {
+ if (!event)
+- bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
++ bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+@@ -276,6 +280,19 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ }
+ EXPORT_SYMBOL(__hci_cmd_sync_status);
+
++int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
++ const void *param, u32 timeout)
++{
++ int err;
++
++ hci_req_sync_lock(hdev);
++ err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
++ hci_req_sync_unlock(hdev);
++
++ return err;
++}
++EXPORT_SYMBOL(hci_cmd_sync_status);
++
+ static void hci_cmd_sync_work(struct work_struct *work)
+ {
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
+@@ -634,6 +651,17 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
+ INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
+ }
+
++static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
++ struct hci_cmd_sync_work_entry *entry,
++ int err)
++{
++ if (entry->destroy)
++ entry->destroy(hdev, entry->data, err);
++
++ list_del(&entry->list);
++ kfree(entry);
++}
++
+ void hci_cmd_sync_clear(struct hci_dev *hdev)
+ {
+ struct hci_cmd_sync_work_entry *entry, *tmp;
+@@ -642,17 +670,12 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
+ cancel_work_sync(&hdev->reenable_adv_work);
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+- list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+- if (entry->destroy)
+- entry->destroy(hdev, entry->data, -ECANCELED);
+-
+- list_del(&entry->list);
+- kfree(entry);
+- }
++ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
++ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+ }
+
+-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
++void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+ {
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+@@ -660,26 +683,31 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+ hdev->req_result = err;
+ hdev->req_status = HCI_REQ_CANCELED;
+
+- cancel_delayed_work_sync(&hdev->cmd_timer);
+- cancel_delayed_work_sync(&hdev->ncmd_timer);
+- atomic_set(&hdev->cmd_cnt, 1);
+-
+- wake_up_interruptible(&hdev->req_wait_q);
++ queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
+ }
+ }
++EXPORT_SYMBOL(hci_cmd_sync_cancel);
+
+-void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
++/* Cancel ongoing command request synchronously:
++ *
++ * - Set result and mark status to HCI_REQ_CANCELED
++ * - Wakeup command sync thread
++ */
++void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
+ {
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+ if (hdev->req_status == HCI_REQ_PEND) {
+- hdev->req_result = err;
++ /* req_result is __u32 so error must be positive to be properly
++ * propagated.
++ */
++ hdev->req_result = err < 0 ? -err : err;
+ hdev->req_status = HCI_REQ_CANCELED;
+
+- queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
++ wake_up_interruptible(&hdev->req_wait_q);
+ }
+ }
+-EXPORT_SYMBOL(hci_cmd_sync_cancel);
++EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
+
+ /* Submit HCI command to be run in as cmd_sync_work:
+ *
+@@ -735,6 +763,153 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ }
+ EXPORT_SYMBOL(hci_cmd_sync_queue);
+
++static struct hci_cmd_sync_work_entry *
++_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ struct hci_cmd_sync_work_entry *entry, *tmp;
++
++ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
++ if (func && entry->func != func)
++ continue;
++
++ if (data && entry->data != data)
++ continue;
++
++ if (destroy && entry->destroy != destroy)
++ continue;
++
++ return entry;
++ }
++
++ return NULL;
++}
++
++/* Queue HCI command entry once:
++ *
++ * - Lookup if an entry already exist and only if it doesn't creates a new entry
++ * and queue it.
++ */
++int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
++ return 0;
++
++ return hci_cmd_sync_queue(hdev, func, data, destroy);
++}
++EXPORT_SYMBOL(hci_cmd_sync_queue_once);
++
++/* Run HCI command:
++ *
++ * - hdev must be running
++ * - if on cmd_sync_work then run immediately otherwise queue
++ */
++int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ /* Only queue command if hdev is running which means it had been opened
++ * and is either on init phase or is already up.
++ */
++ if (!test_bit(HCI_RUNNING, &hdev->flags))
++ return -ENETDOWN;
++
++ /* If on cmd_sync_work then run immediately otherwise queue */
++ if (current_work() == &hdev->cmd_sync_work)
++ return func(hdev, data);
++
++ return hci_cmd_sync_submit(hdev, func, data, destroy);
++}
++EXPORT_SYMBOL(hci_cmd_sync_run);
++
++/* Run HCI command entry once:
++ *
++ * - Lookup if an entry already exist and only if it doesn't creates a new entry
++ * and run it.
++ * - if on cmd_sync_work then run immediately otherwise queue
++ */
++int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
++ return 0;
++
++ return hci_cmd_sync_run(hdev, func, data, destroy);
++}
++EXPORT_SYMBOL(hci_cmd_sync_run_once);
++
++/* Lookup HCI command entry:
++ *
++ * - Return first entry that matches by function callback or data or
++ * destroy callback.
++ */
++struct hci_cmd_sync_work_entry *
++hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ struct hci_cmd_sync_work_entry *entry;
++
++ mutex_lock(&hdev->cmd_sync_work_lock);
++ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
++ mutex_unlock(&hdev->cmd_sync_work_lock);
++
++ return entry;
++}
++EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
++
++/* Cancel HCI command entry */
++void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
++ struct hci_cmd_sync_work_entry *entry)
++{
++ mutex_lock(&hdev->cmd_sync_work_lock);
++ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
++ mutex_unlock(&hdev->cmd_sync_work_lock);
++}
++EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
++
++/* Dequeue one HCI command entry:
++ *
++ * - Lookup and cancel first entry that matches.
++ */
++bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
++ hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ struct hci_cmd_sync_work_entry *entry;
++
++ entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
++ if (!entry)
++ return false;
++
++ hci_cmd_sync_cancel_entry(hdev, entry);
++
++ return true;
++}
++EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
++
++/* Dequeue HCI command entry:
++ *
++ * - Lookup and cancel any entry that matches by function callback or data or
++ * destroy callback.
++ */
++bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
++ void *data, hci_cmd_sync_work_destroy_t destroy)
++{
++ struct hci_cmd_sync_work_entry *entry;
++ bool ret = false;
++
++ mutex_lock(&hdev->cmd_sync_work_lock);
++ while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
++ destroy))) {
++ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
++ ret = true;
++ }
++ mutex_unlock(&hdev->cmd_sync_work_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(hci_cmd_sync_dequeue);
++
+ int hci_update_eir_sync(struct hci_dev *hdev)
+ {
+ struct hci_cp_write_eir cp;
+@@ -1312,7 +1487,7 @@ int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
+ return hci_enable_ext_advertising_sync(hdev, instance);
+ }
+
+-static int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
++int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
+ {
+ struct hci_cp_le_set_per_adv_enable cp;
+ struct adv_info *adv = NULL;
+@@ -2274,8 +2449,11 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
+
+ /* During suspend, only wakeable devices can be in acceptlist */
+ if (hdev->suspended &&
+- !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
++ !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
++ hci_le_del_accept_list_sync(hdev, &params->addr,
++ params->addr_type);
+ return 0;
++ }
+
+ /* Select filter policy to accept all advertising */
+ if (*num_entries >= hdev->le_accept_list_size)
+@@ -2667,6 +2845,14 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+ return filter_policy;
+ }
+
++static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
++ u8 type, u16 interval, u16 window)
++{
++ cp->type = type;
++ cp->interval = cpu_to_le16(interval);
++ cp->window = cpu_to_le16(window);
++}
++
+ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ u16 interval, u16 window,
+ u8 own_addr_type, u8 filter_policy)
+@@ -2674,7 +2860,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ struct hci_cp_le_set_ext_scan_params *cp;
+ struct hci_cp_le_scan_phy_params *phy;
+ u8 data[sizeof(*cp) + sizeof(*phy) * 2];
+- u8 num_phy = 0;
++ u8 num_phy = 0x00;
+
+ cp = (void *)data;
+ phy = (void *)cp->data;
+@@ -2684,28 +2870,64 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ cp->own_addr_type = own_addr_type;
+ cp->filter_policy = filter_policy;
+
++ /* Check if PA Sync is in progress then select the PHY based on the
++ * hci_conn.iso_qos.
++ */
++ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
++ struct hci_cp_le_add_to_accept_list *sent;
++
++ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
++ if (sent) {
++ struct hci_conn *conn;
++
++ conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
++ &sent->bdaddr);
++ if (conn) {
++ struct bt_iso_qos *qos = &conn->iso_qos;
++
++ if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
++ qos->bcast.in.phy & BT_ISO_PHY_2M) {
++ cp->scanning_phys |= LE_SCAN_PHY_1M;
++ hci_le_scan_phy_params(phy, type,
++ interval,
++ window);
++ num_phy++;
++ phy++;
++ }
++
++ if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
++ cp->scanning_phys |= LE_SCAN_PHY_CODED;
++ hci_le_scan_phy_params(phy, type,
++ interval * 3,
++ window * 3);
++ num_phy++;
++ phy++;
++ }
++
++ if (num_phy)
++ goto done;
++ }
++ }
++ }
++
+ if (scan_1m(hdev) || scan_2m(hdev)) {
+ cp->scanning_phys |= LE_SCAN_PHY_1M;
+-
+- phy->type = type;
+- phy->interval = cpu_to_le16(interval);
+- phy->window = cpu_to_le16(window);
+-
++ hci_le_scan_phy_params(phy, type, interval, window);
+ num_phy++;
+ phy++;
+ }
+
+ if (scan_coded(hdev)) {
+ cp->scanning_phys |= LE_SCAN_PHY_CODED;
+-
+- phy->type = type;
+- phy->interval = cpu_to_le16(interval);
+- phy->window = cpu_to_le16(window);
+-
++ hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
+ num_phy++;
+ phy++;
+ }
+
++done:
++ if (!num_phy)
++ return -EINVAL;
++
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
+ sizeof(*cp) + sizeof(*phy) * num_phy,
+ data, HCI_CMD_TIMEOUT);
+@@ -2793,6 +3015,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
+ */
+ filter_policy = hci_update_accept_list_sync(hdev);
+
++ /* If suspended and filter_policy set to 0x00 (no acceptlist) then
++ * passive scanning cannot be started since that would require the host
++ * to be woken up to process the reports.
++ */
++ if (hdev->suspended && !filter_policy) {
++ /* Check if accept list is empty then there is no need to scan
++ * while suspended.
++ */
++ if (list_empty(&hdev->le_accept_list))
++ return 0;
++
++ /* If there are devices is the accept_list that means some
++ * devices could not be programmed which in non-suspended case
++ * means filter_policy needs to be set to 0x00 so the host needs
++ * to filter, but since this is treating suspended case we
++ * can ignore device needing host to filter to allow devices in
++ * the acceptlist to be able to wakeup the system.
++ */
++ filter_policy = 0x01;
++ }
++
+ /* When the controller is using random resolvable addresses and
+ * with that having LE privacy enabled, then controllers with
+ * Extended Scanner Filter Policies support can now enable support
+@@ -2815,6 +3058,20 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
+ } else if (hci_is_adv_monitoring(hdev)) {
+ window = hdev->le_scan_window_adv_monitor;
+ interval = hdev->le_scan_int_adv_monitor;
++
++ /* Disable duplicates filter when scanning for advertisement
++ * monitor for the following reasons.
++ *
++ * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
++ * controllers ignore RSSI_Sampling_Period when the duplicates
++ * filter is enabled.
++ *
++ * For SW pattern filtering, when we're not doing interleaved
++ * scanning, it is necessary to disable duplicates filter,
++ * otherwise hosts can only receive one advertisement and it's
++ * impossible to know if a peer is still in range.
++ */
++ filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
+ } else {
+ window = hdev->le_scan_window;
+ interval = hdev->le_scan_interval;
+@@ -2944,7 +3201,8 @@ int hci_update_passive_scan(struct hci_dev *hdev)
+ hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ return 0;
+
+- return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
++ return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
++ NULL);
+ }
+
+ int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
+@@ -3280,7 +3538,10 @@ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
+ if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
+ return;
+
+- bacpy(&hdev->public_addr, &ba);
++ if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
++ baswap(&hdev->public_addr, &ba);
++ else
++ bacpy(&hdev->public_addr, &ba);
+ }
+
+ struct hci_init_stage {
+@@ -3381,10 +3642,6 @@ static int hci_unconf_init_sync(struct hci_dev *hdev)
+ /* Read Local Supported Features. */
+ static int hci_read_local_features_sync(struct hci_dev *hdev)
+ {
+- /* Not all AMP controllers support this command */
+- if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20))
+- return 0;
+-
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
+ 0, NULL, HCI_CMD_TIMEOUT);
+ }
+@@ -3419,51 +3676,6 @@ static int hci_read_local_cmds_sync(struct hci_dev *hdev)
+ return 0;
+ }
+
+-/* Read Local AMP Info */
+-static int hci_read_local_amp_info_sync(struct hci_dev *hdev)
+-{
+- return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO,
+- 0, NULL, HCI_CMD_TIMEOUT);
+-}
+-
+-/* Read Data Blk size */
+-static int hci_read_data_block_size_sync(struct hci_dev *hdev)
+-{
+- return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE,
+- 0, NULL, HCI_CMD_TIMEOUT);
+-}
+-
+-/* Read Flow Control Mode */
+-static int hci_read_flow_control_mode_sync(struct hci_dev *hdev)
+-{
+- return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE,
+- 0, NULL, HCI_CMD_TIMEOUT);
+-}
+-
+-/* Read Location Data */
+-static int hci_read_location_data_sync(struct hci_dev *hdev)
+-{
+- return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA,
+- 0, NULL, HCI_CMD_TIMEOUT);
+-}
+-
+-/* AMP Controller init stage 1 command sequence */
+-static const struct hci_init_stage amp_init1[] = {
+- /* HCI_OP_READ_LOCAL_VERSION */
+- HCI_INIT(hci_read_local_version_sync),
+- /* HCI_OP_READ_LOCAL_COMMANDS */
+- HCI_INIT(hci_read_local_cmds_sync),
+- /* HCI_OP_READ_LOCAL_AMP_INFO */
+- HCI_INIT(hci_read_local_amp_info_sync),
+- /* HCI_OP_READ_DATA_BLOCK_SIZE */
+- HCI_INIT(hci_read_data_block_size_sync),
+- /* HCI_OP_READ_FLOW_CONTROL_MODE */
+- HCI_INIT(hci_read_flow_control_mode_sync),
+- /* HCI_OP_READ_LOCATION_DATA */
+- HCI_INIT(hci_read_location_data_sync),
+- {}
+-};
+-
+ static int hci_init1_sync(struct hci_dev *hdev)
+ {
+ int err;
+@@ -3477,28 +3689,9 @@ static int hci_init1_sync(struct hci_dev *hdev)
+ return err;
+ }
+
+- switch (hdev->dev_type) {
+- case HCI_PRIMARY:
+- hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
+- return hci_init_stage_sync(hdev, br_init1);
+- case HCI_AMP:
+- hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+- return hci_init_stage_sync(hdev, amp_init1);
+- default:
+- bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
+- break;
+- }
+-
+- return 0;
++ return hci_init_stage_sync(hdev, br_init1);
+ }
+
+-/* AMP Controller init stage 2 command sequence */
+-static const struct hci_init_stage amp_init2[] = {
+- /* HCI_OP_READ_LOCAL_FEATURES */
+- HCI_INIT(hci_read_local_features_sync),
+- {}
+-};
+-
+ /* Read Buffer Size (ACL mtu, max pkt, etc.) */
+ static int hci_read_buffer_size_sync(struct hci_dev *hdev)
+ {
+@@ -3756,9 +3949,6 @@ static int hci_init2_sync(struct hci_dev *hdev)
+
+ bt_dev_dbg(hdev, "");
+
+- if (hdev->dev_type == HCI_AMP)
+- return hci_init_stage_sync(hdev, amp_init2);
+-
+ err = hci_init_stage_sync(hdev, hci_init2);
+ if (err)
+ return err;
+@@ -3800,12 +3990,14 @@ static int hci_set_event_mask_sync(struct hci_dev *hdev)
+ if (lmp_bredr_capable(hdev)) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+
+- /* Don't set Disconnect Complete when suspended as that
+- * would wakeup the host when disconnecting due to
+- * suspend.
++ /* Don't set Disconnect Complete and mode change when
++ * suspended as that would wakeup the host when disconnecting
++ * due to suspend.
+ */
+- if (hdev->suspended)
++ if (hdev->suspended) {
+ events[0] &= 0xef;
++ events[2] &= 0xf7;
++ }
+ } else {
+ /* Use a different default for LE-only devices */
+ memset(events, 0, sizeof(events));
+@@ -4594,13 +4786,6 @@ static int hci_init_sync(struct hci_dev *hdev)
+ if (err < 0)
+ return err;
+
+- /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
+- * BR/EDR/LE type controllers. AMP controllers only need the
+- * first two stages of init.
+- */
+- if (hdev->dev_type != HCI_PRIMARY)
+- return 0;
+-
+ err = hci_init3_sync(hdev);
+ if (err < 0)
+ return err;
+@@ -4829,12 +5014,8 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+ * In case of user channel usage, it is not important
+ * if a public address or static random address is
+ * available.
+- *
+- * This check is only valid for BR/EDR controllers
+- * since AMP controllers do not have an address.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+- hdev->dev_type == HCI_PRIMARY &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY)) {
+ ret = -EADDRNOTAVAIL;
+@@ -4869,8 +5050,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+ !hci_dev_test_flag(hdev, HCI_CONFIG) &&
+ !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+- hci_dev_test_flag(hdev, HCI_MGMT) &&
+- hdev->dev_type == HCI_PRIMARY) {
++ hci_dev_test_flag(hdev, HCI_MGMT)) {
+ ret = hci_powered_update_sync(hdev);
+ mgmt_power_on(hdev, ret);
+ }
+@@ -4897,6 +5077,11 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+ hdev->sent_cmd = NULL;
+ }
+
++ if (hdev->req_skb) {
++ kfree_skb(hdev->req_skb);
++ hdev->req_skb = NULL;
++ }
++
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+
+@@ -5011,8 +5196,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+
+ auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
+
+- if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
+- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
++ if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_dev_test_flag(hdev, HCI_MGMT))
+ __mgmt_power_off(hdev);
+
+@@ -5058,6 +5242,12 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+ hdev->sent_cmd = NULL;
+ }
+
++ /* Drop last request */
++ if (hdev->req_skb) {
++ kfree_skb(hdev->req_skb);
++ hdev->req_skb = NULL;
++ }
++
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+
+@@ -5068,9 +5258,6 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+ hdev->flags &= BIT(HCI_RAW);
+ hci_dev_clear_volatile_flags(hdev);
+
+- /* Controller radio is available but is currently powered down */
+- hdev->amp_status = AMP_STATUS_POWERED_DOWN;
+-
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+ bacpy(&hdev->random_addr, BDADDR_ANY);
+@@ -5107,8 +5294,7 @@ static int hci_power_on_sync(struct hci_dev *hdev)
+ */
+ if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
+ hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
+- (hdev->dev_type == HCI_PRIMARY &&
+- !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
++ (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
+ hci_dev_close_sync(hdev);
+@@ -5205,32 +5391,30 @@ int hci_stop_discovery_sync(struct hci_dev *hdev)
+ if (!e)
+ return 0;
+
+- return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
++ /* Ignore cancel errors since it should interfere with stopping
++ * of the discovery.
++ */
++ hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
+ }
+
+ return 0;
+ }
+
+-static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
+- u8 reason)
+-{
+- struct hci_cp_disconn_phy_link cp;
+-
+- memset(&cp, 0, sizeof(cp));
+- cp.phy_handle = HCI_PHY_HANDLE(handle);
+- cp.reason = reason;
+-
+- return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
+- sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+-}
+-
+ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+ {
+ struct hci_cp_disconnect cp;
+
+- if (conn->type == AMP_LINK)
+- return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
++ if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
++ /* This is a BIS connection, hci_conn_del will
++ * do the necessary cleanup.
++ */
++ hci_dev_lock(hdev);
++ hci_conn_failed(conn, reason);
++ hci_dev_unlock(hdev);
++
++ return 0;
++ }
+
+ memset(&cp, 0, sizeof(cp));
+ cp.handle = cpu_to_le16(conn->handle);
+@@ -5384,21 +5568,6 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
+ err = hci_reject_conn_sync(hdev, conn, reason);
+ break;
+ case BT_OPEN:
+- hci_dev_lock(hdev);
+-
+- /* Cleanup bis or pa sync connections */
+- if (test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags) ||
+- test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags)) {
+- hci_conn_failed(conn, reason);
+- } else if (test_bit(HCI_CONN_PA_SYNC, &conn->flags) ||
+- test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
+- conn->state = BT_CLOSED;
+- hci_disconn_cfm(conn, reason);
+- hci_conn_del(conn);
+- }
+-
+- hci_dev_unlock(hdev);
+- return 0;
+ case BT_BOUND:
+ break;
+ default:
+@@ -5631,7 +5800,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
+
+ bt_dev_dbg(hdev, "");
+
+- if (hci_dev_test_flag(hdev, HCI_INQUIRY))
++ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ return 0;
+
+ hci_dev_lock(hdev);
+@@ -6242,12 +6411,21 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
+ conn->conn_timeout, NULL);
+ }
+
+-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
++static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
+ {
+ struct hci_cp_le_create_conn cp;
+ struct hci_conn_params *params;
+ u8 own_addr_type;
+ int err;
++ struct hci_conn *conn = data;
++
++ if (!hci_conn_valid(hdev, conn))
++ return -ECANCELED;
++
++ bt_dev_dbg(hdev, "conn %p", conn);
++
++ clear_bit(HCI_CONN_SCANNING, &conn->flags);
++ conn->state = BT_CONNECT;
+
+ /* If requested to connect as peripheral use directed advertising */
+ if (conn->role == HCI_ROLE_SLAVE) {
+@@ -6565,3 +6743,125 @@ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
+ return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
+ UINT_PTR(instance), NULL);
+ }
++
++static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
++{
++ struct hci_conn *conn = data;
++ struct inquiry_entry *ie;
++ struct hci_cp_create_conn cp;
++ int err;
++
++ if (!hci_conn_valid(hdev, conn))
++ return -ECANCELED;
++
++ /* Many controllers disallow HCI Create Connection while it is doing
++ * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
++ * Connection. This may cause the MGMT discovering state to become false
++ * without user space's request but it is okay since the MGMT Discovery
++ * APIs do not promise that discovery should be done forever. Instead,
++ * the user space monitors the status of MGMT discovering and it may
++ * request for discovery again when this flag becomes false.
++ */
++ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
++ err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
++ NULL, HCI_CMD_TIMEOUT);
++ if (err)
++ bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
++ }
++
++ conn->state = BT_CONNECT;
++ conn->out = true;
++ conn->role = HCI_ROLE_MASTER;
++
++ conn->attempt++;
++
++ conn->link_policy = hdev->link_policy;
++
++ memset(&cp, 0, sizeof(cp));
++ bacpy(&cp.bdaddr, &conn->dst);
++ cp.pscan_rep_mode = 0x02;
++
++ ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
++ if (ie) {
++ if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
++ cp.pscan_rep_mode = ie->data.pscan_rep_mode;
++ cp.pscan_mode = ie->data.pscan_mode;
++ cp.clock_offset = ie->data.clock_offset |
++ cpu_to_le16(0x8000);
++ }
++
++ memcpy(conn->dev_class, ie->data.dev_class, 3);
++ }
++
++ cp.pkt_type = cpu_to_le16(conn->pkt_type);
++ if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
++ cp.role_switch = 0x01;
++ else
++ cp.role_switch = 0x00;
++
++ return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
++ sizeof(cp), &cp,
++ HCI_EV_CONN_COMPLETE,
++ conn->conn_timeout, NULL);
++}
++
++int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
++{
++ return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
++ NULL);
++}
++
++static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
++{
++ struct hci_conn *conn = data;
++
++ bt_dev_dbg(hdev, "err %d", err);
++
++ if (err == -ECANCELED)
++ return;
++
++ hci_dev_lock(hdev);
++
++ if (!hci_conn_valid(hdev, conn))
++ goto done;
++
++ if (!err) {
++ hci_connect_le_scan_cleanup(conn, 0x00);
++ goto done;
++ }
++
++ /* Check if connection is still pending */
++ if (conn != hci_lookup_le_connect(hdev))
++ goto done;
++
++ /* Flush to make sure we send create conn cancel command if needed */
++ flush_delayed_work(&conn->le_conn_timeout);
++ hci_conn_failed(conn, bt_status(err));
++
++done:
++ hci_dev_unlock(hdev);
++}
++
++int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
++{
++ return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
++ create_le_conn_complete);
++}
++
++int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
++{
++ if (conn->state != BT_OPEN)
++ return -EINVAL;
++
++ switch (conn->type) {
++ case ACL_LINK:
++ return !hci_cmd_sync_dequeue_once(hdev,
++ hci_acl_create_conn_sync,
++ conn, NULL);
++ case LE_LINK:
++ return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
++ conn, create_le_conn_complete);
++ }
++
++ return -ENOENT;
++}
+diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
+index 15b33579007cb6..367e32fe30eb84 100644
+--- a/net/bluetooth/hci_sysfs.c
++++ b/net/bluetooth/hci_sysfs.c
+@@ -35,7 +35,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- BT_DBG("conn %p", conn);
++ bt_dev_dbg(hdev, "conn %p", conn);
+
+ conn->dev.type = &bt_link;
+ conn->dev.class = &bt_class;
+@@ -48,27 +48,30 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- BT_DBG("conn %p", conn);
++ bt_dev_dbg(hdev, "conn %p", conn);
+
+ if (device_is_registered(&conn->dev))
+ return;
+
+ dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+
+- if (device_add(&conn->dev) < 0) {
++ if (device_add(&conn->dev) < 0)
+ bt_dev_err(hdev, "failed to register connection device");
+- return;
+- }
+-
+- hci_dev_hold(hdev);
+ }
+
+ void hci_conn_del_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- if (!device_is_registered(&conn->dev))
++ bt_dev_dbg(hdev, "conn %p", conn);
++
++ if (!device_is_registered(&conn->dev)) {
++ /* If device_add() has *not* succeeded, use *only* put_device()
++ * to drop the reference count.
++ */
++ put_device(&conn->dev);
+ return;
++ }
+
+ while (1) {
+ struct device *dev;
+@@ -80,9 +83,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
+ put_device(dev);
+ }
+
+- device_del(&conn->dev);
+-
+- hci_dev_put(hdev);
++ device_unregister(&conn->dev);
+ }
+
+ static void bt_host_release(struct device *dev)
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 71248163ce9a5c..9b365fb44fac6d 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -52,6 +52,7 @@ static void iso_sock_kill(struct sock *sk);
+ enum {
+ BT_SK_BIG_SYNC,
+ BT_SK_PA_SYNC,
++ BT_SK_PA_SYNC_TERM,
+ };
+
+ struct iso_pinfo {
+@@ -77,8 +78,14 @@ static struct bt_iso_qos default_qos;
+ static bool check_ucast_qos(struct bt_iso_qos *qos);
+ static bool check_bcast_qos(struct bt_iso_qos *qos);
+ static bool iso_match_sid(struct sock *sk, void *data);
++static bool iso_match_sync_handle(struct sock *sk, void *data);
+ static void iso_sock_disconn(struct sock *sk);
+
++typedef bool (*iso_sock_match_t)(struct sock *sk, void *data);
++
++static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst,
++ iso_sock_match_t match, void *data);
++
+ /* ---- ISO timers ---- */
+ #define ISO_CONN_TIMEOUT (HZ * 40)
+ #define ISO_DISCONN_TIMEOUT (HZ * 2)
+@@ -187,10 +194,21 @@ static void iso_chan_del(struct sock *sk, int err)
+ sock_set_flag(sk, SOCK_ZAPPED);
+ }
+
++static bool iso_match_conn_sync_handle(struct sock *sk, void *data)
++{
++ struct hci_conn *hcon = data;
++
++ if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags))
++ return false;
++
++ return hcon->sync_handle == iso_pi(sk)->sync_handle;
++}
++
+ static void iso_conn_del(struct hci_conn *hcon, int err)
+ {
+ struct iso_conn *conn = hcon->iso_data;
+ struct sock *sk;
++ struct sock *parent;
+
+ if (!conn)
+ return;
+@@ -206,6 +224,25 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
+
+ if (sk) {
+ lock_sock(sk);
++
++ /* While a PA sync hcon is in the process of closing,
++ * mark parent socket with a flag, so that any residual
++ * BIGInfo adv reports that arrive before PA sync is
++ * terminated are not processed anymore.
++ */
++ if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
++ parent = iso_get_sock_listen(&hcon->src,
++ &hcon->dst,
++ iso_match_conn_sync_handle,
++ hcon);
++
++ if (parent) {
++ set_bit(BT_SK_PA_SYNC_TERM,
++ &iso_pi(parent)->flags);
++ sock_put(parent);
++ }
++ }
++
+ iso_sock_clear_timer(sk);
+ iso_chan_del(sk, err);
+ release_sock(sk);
+@@ -542,8 +579,6 @@ static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc,
+ return NULL;
+ }
+
+-typedef bool (*iso_sock_match_t)(struct sock *sk, void *data);
+-
+ /* Find socket listening:
+ * source bdaddr (Unicast)
+ * destination bdaddr (Broadcast only)
+@@ -729,10 +764,10 @@ static struct bt_iso_qos default_qos = {
+ .bcode = {0x00},
+ .options = 0x00,
+ .skip = 0x0000,
+- .sync_timeout = 0x4000,
++ .sync_timeout = BT_ISO_SYNC_TIMEOUT,
+ .sync_cte_type = 0x00,
+ .mse = 0x00,
+- .timeout = 0x4000,
++ .timeout = BT_ISO_SYNC_TIMEOUT,
+ },
+ };
+
+@@ -1100,7 +1135,7 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ return -ENOTCONN;
+ }
+
+- mtu = iso_pi(sk)->conn->hcon->hdev->iso_mtu;
++ mtu = iso_pi(sk)->conn->hcon->mtu;
+
+ release_sock(sk);
+
+@@ -1198,11 +1233,9 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ lock_sock(sk);
+ switch (sk->sk_state) {
+ case BT_CONNECT2:
+- if (pi->conn->hcon &&
+- test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
++ if (test_bit(BT_SK_PA_SYNC, &pi->flags)) {
+ iso_conn_big_sync(sk);
+ sk->sk_state = BT_LISTEN;
+- set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
+ } else {
+ iso_conn_defer_accept(pi->conn->hcon);
+ sk->sk_state = BT_CONFIG;
+@@ -1267,8 +1300,8 @@ static bool check_ucast_qos(struct bt_iso_qos *qos)
+
+ static bool check_bcast_qos(struct bt_iso_qos *qos)
+ {
+- if (qos->bcast.sync_factor == 0x00)
+- return false;
++ if (!qos->bcast.sync_factor)
++ qos->bcast.sync_factor = 0x01;
+
+ if (qos->bcast.packing > 0x01)
+ return false;
+@@ -1291,6 +1324,9 @@ static bool check_bcast_qos(struct bt_iso_qos *qos)
+ if (qos->bcast.skip > 0x01f3)
+ return false;
+
++ if (!qos->bcast.sync_timeout)
++ qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT;
++
+ if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000)
+ return false;
+
+@@ -1300,6 +1336,9 @@ static bool check_bcast_qos(struct bt_iso_qos *qos)
+ if (qos->bcast.mse > 0x1f)
+ return false;
+
++ if (!qos->bcast.timeout)
++ qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT;
++
+ if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000)
+ return false;
+
+@@ -1310,7 +1349,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+ {
+ struct sock *sk = sock->sk;
+- int len, err = 0;
++ int err = 0;
+ struct bt_iso_qos qos = default_qos;
+ u32 opt;
+
+@@ -1325,10 +1364,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -1337,10 +1375,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_PKT_STATUS:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
+@@ -1355,17 +1392,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- len = min_t(unsigned int, sizeof(qos), optlen);
+-
+- if (copy_from_sockptr(&qos, optval, len)) {
+- err = -EFAULT;
+- break;
+- }
+-
+- if (len == sizeof(qos.ucast) && !check_ucast_qos(&qos)) {
+- err = -EINVAL;
++ err = bt_copy_from_sockptr(&qos, sizeof(qos), optval, optlen);
++ if (err)
+ break;
+- }
+
+ iso_pi(sk)->qos = qos;
+ iso_pi(sk)->qos_user_set = true;
+@@ -1380,18 +1409,16 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ }
+
+ if (optlen > sizeof(iso_pi(sk)->base)) {
+- err = -EOVERFLOW;
++ err = -EINVAL;
+ break;
+ }
+
+- len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen);
+-
+- if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(iso_pi(sk)->base, optlen, optval,
++ optlen);
++ if (err)
+ break;
+- }
+
+- iso_pi(sk)->base_len = len;
++ iso_pi(sk)->base_len = optlen;
+
+ break;
+
+@@ -1579,6 +1606,7 @@ static void iso_conn_ready(struct iso_conn *conn)
+ struct sock *sk = conn->sk;
+ struct hci_ev_le_big_sync_estabilished *ev = NULL;
+ struct hci_ev_le_pa_sync_established *ev2 = NULL;
++ struct hci_evt_le_big_info_adv_report *ev3 = NULL;
+ struct hci_conn *hcon;
+
+ BT_DBG("conn %p", conn);
+@@ -1603,14 +1631,20 @@ static void iso_conn_ready(struct iso_conn *conn)
+ parent = iso_get_sock_listen(&hcon->src,
+ &hcon->dst,
+ iso_match_big, ev);
+- } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags) ||
+- test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
++ } else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
+ ev2 = hci_recv_event_data(hcon->hdev,
+ HCI_EV_LE_PA_SYNC_ESTABLISHED);
+ if (ev2)
+ parent = iso_get_sock_listen(&hcon->src,
+ &hcon->dst,
+ iso_match_sid, ev2);
++ } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
++ ev3 = hci_recv_event_data(hcon->hdev,
++ HCI_EVT_LE_BIG_INFO_ADV_REPORT);
++ if (ev3)
++ parent = iso_get_sock_listen(&hcon->src,
++ &hcon->dst,
++ iso_match_sync_handle, ev3);
+ }
+
+ if (!parent)
+@@ -1650,11 +1684,13 @@ static void iso_conn_ready(struct iso_conn *conn)
+ hcon->sync_handle = iso_pi(parent)->sync_handle;
+ }
+
+- if (ev2 && !ev2->status) {
+- iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle;
++ if (ev3) {
+ iso_pi(sk)->qos = iso_pi(parent)->qos;
++ iso_pi(sk)->qos.bcast.encryption = ev3->encryption;
++ hcon->iso_qos = iso_pi(sk)->qos;
+ iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis;
+ memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS);
++ set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
+ }
+
+ bacpy(&iso_pi(sk)->dst, &hcon->dst);
+@@ -1747,9 +1783,20 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ /* Try to get PA sync listening socket, if it exists */
+ sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
+ iso_match_pa_sync_flag, NULL);
+- if (!sk)
++
++ if (!sk) {
+ sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
+ iso_match_sync_handle, ev2);
++
++ /* If PA Sync is in process of terminating,
++ * do not handle any more BIGInfo adv reports.
++ */
++
++ if (sk && test_bit(BT_SK_PA_SYNC_TERM,
++ &iso_pi(sk)->flags))
++ return lm;
++ }
++
+ if (sk) {
+ int err;
+
+@@ -2065,13 +2112,9 @@ int iso_init(void)
+
+ hci_register_cb(&iso_cb);
+
+- if (IS_ERR_OR_NULL(bt_debugfs))
+- return 0;
+-
+- if (!iso_debugfs) {
++ if (!IS_ERR_OR_NULL(bt_debugfs))
+ iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs,
+ NULL, &iso_debugfs_fops);
+- }
+
+ iso_inited = true;
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 17ca13e8c044cb..93651c421767a0 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -39,8 +39,6 @@
+ #include <net/bluetooth/l2cap.h>
+
+ #include "smp.h"
+-#include "a2mp.h"
+-#include "amp.h"
+
+ #define LE_FLOWCTL_MAX_CREDITS 65535
+
+@@ -167,24 +165,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
+ return NULL;
+ }
+
+-static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
+- u8 ident)
+-{
+- struct l2cap_chan *c;
+-
+- mutex_lock(&conn->chan_lock);
+- c = __l2cap_get_chan_by_ident(conn, ident);
+- if (c) {
+- /* Only lock if chan reference is not 0 */
+- c = l2cap_chan_hold_unless_zero(c);
+- if (c)
+- l2cap_chan_lock(c);
+- }
+- mutex_unlock(&conn->chan_lock);
+-
+- return c;
+-}
+-
+ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
+ u8 src_type)
+ {
+@@ -435,6 +415,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
+
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+
++ if (!conn)
++ return;
++
+ mutex_lock(&conn->chan_lock);
+ /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
+ * this work. No need to call l2cap_chan_hold(chan) here again.
+@@ -474,6 +457,9 @@ struct l2cap_chan *l2cap_chan_create(void)
+ /* Set default lock nesting level */
+ atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
+
++ /* Available receive buffer space is initially unknown */
++ chan->rx_avail = -1;
++
+ write_lock(&chan_list_lock);
+ list_add(&chan->global_l, &chan_list);
+ write_unlock(&chan_list_lock);
+@@ -555,6 +541,28 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+ }
+ EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
+
++static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
++{
++ size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
++
++ if (chan->mps == 0)
++ return 0;
++
++ /* If we don't know the available space in the receiver buffer, give
++ * enough credits for a full packet.
++ */
++ if (chan->rx_avail == -1)
++ return (chan->imtu / chan->mps) + 1;
++
++ /* If we know how much space is available in the receive buffer, give
++ * out as many credits as would fill the buffer.
++ */
++ if (chan->rx_avail <= sdu_len)
++ return 0;
++
++ return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
++}
++
+ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
+ {
+ chan->sdu = NULL;
+@@ -563,8 +571,7 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
+ chan->tx_credits = tx_credits;
+ /* Derive MPS from connection MTU to stop HCI fragmentation */
+ chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
+- /* Give enough credits for a full packet */
+- chan->rx_credits = (chan->imtu / chan->mps) + 1;
++ chan->rx_credits = l2cap_le_rx_credits(chan);
+
+ skb_queue_head_init(&chan->tx_q);
+ }
+@@ -576,7 +583,7 @@ static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
+ /* L2CAP implementations shall support a minimum MPS of 64 octets */
+ if (chan->mps < L2CAP_ECRED_MIN_MPS) {
+ chan->mps = L2CAP_ECRED_MIN_MPS;
+- chan->rx_credits = (chan->imtu / chan->mps) + 1;
++ chan->rx_credits = l2cap_le_rx_credits(chan);
+ }
+ }
+
+@@ -651,7 +658,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
+ chan->ops->teardown(chan, err);
+
+ if (conn) {
+- struct amp_mgr *mgr = conn->hcon->amp_mgr;
+ /* Delete from channel list */
+ list_del(&chan->list);
+
+@@ -666,16 +672,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
+ if (chan->chan_type != L2CAP_CHAN_FIXED ||
+ test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
+ hci_conn_drop(conn->hcon);
+-
+- if (mgr && mgr->bredr_chan == chan)
+- mgr->bredr_chan = NULL;
+- }
+-
+- if (chan->hs_hchan) {
+- struct hci_chan *hs_hchan = chan->hs_hchan;
+-
+- BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
+- amp_disconnect_logical_link(hs_hchan);
+ }
+
+ if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
+@@ -977,12 +973,6 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ hci_send_acl(conn->hchan, skb, flags);
+ }
+
+-static bool __chan_is_moving(struct l2cap_chan *chan)
+-{
+- return chan->move_state != L2CAP_MOVE_STABLE &&
+- chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
+-}
+-
+ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+ struct hci_conn *hcon = chan->conn->hcon;
+@@ -991,15 +981,6 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+ BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+ skb->priority);
+
+- if (chan->hs_hcon && !__chan_is_moving(chan)) {
+- if (chan->hs_hchan)
+- hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
+- else
+- kfree_skb(skb);
+-
+- return;
+- }
+-
+ /* Use NO_FLUSH for LE links (where this is the only option) or
+ * if the BR/EDR link supports it and flushing has not been
+ * explicitly requested (through FLAG_FLUSHABLE).
+@@ -1180,9 +1161,6 @@ static void l2cap_send_sframe(struct l2cap_chan *chan,
+ if (!control->sframe)
+ return;
+
+- if (__chan_is_moving(chan))
+- return;
+-
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
+ !control->poll)
+ control->final = 1;
+@@ -1237,40 +1215,6 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
+ return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
+ }
+
+-static bool __amp_capable(struct l2cap_chan *chan)
+-{
+- struct l2cap_conn *conn = chan->conn;
+- struct hci_dev *hdev;
+- bool amp_available = false;
+-
+- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
+- return false;
+-
+- if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
+- return false;
+-
+- read_lock(&hci_dev_list_lock);
+- list_for_each_entry(hdev, &hci_dev_list, list) {
+- if (hdev->amp_type != AMP_TYPE_BREDR &&
+- test_bit(HCI_UP, &hdev->flags)) {
+- amp_available = true;
+- break;
+- }
+- }
+- read_unlock(&hci_dev_list_lock);
+-
+- if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
+- return amp_available;
+-
+- return false;
+-}
+-
+-static bool l2cap_check_efs(struct l2cap_chan *chan)
+-{
+- /* Check EFS parameters */
+- return true;
+-}
+-
+ void l2cap_send_conn_req(struct l2cap_chan *chan)
+ {
+ struct l2cap_conn *conn = chan->conn;
+@@ -1286,76 +1230,6 @@ void l2cap_send_conn_req(struct l2cap_chan *chan)
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
+ }
+
+-static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
+-{
+- struct l2cap_create_chan_req req;
+- req.scid = cpu_to_le16(chan->scid);
+- req.psm = chan->psm;
+- req.amp_id = amp_id;
+-
+- chan->ident = l2cap_get_ident(chan->conn);
+-
+- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
+- sizeof(req), &req);
+-}
+-
+-static void l2cap_move_setup(struct l2cap_chan *chan)
+-{
+- struct sk_buff *skb;
+-
+- BT_DBG("chan %p", chan);
+-
+- if (chan->mode != L2CAP_MODE_ERTM)
+- return;
+-
+- __clear_retrans_timer(chan);
+- __clear_monitor_timer(chan);
+- __clear_ack_timer(chan);
+-
+- chan->retry_count = 0;
+- skb_queue_walk(&chan->tx_q, skb) {
+- if (bt_cb(skb)->l2cap.retries)
+- bt_cb(skb)->l2cap.retries = 1;
+- else
+- break;
+- }
+-
+- chan->expected_tx_seq = chan->buffer_seq;
+-
+- clear_bit(CONN_REJ_ACT, &chan->conn_state);
+- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
+- l2cap_seq_list_clear(&chan->retrans_list);
+- l2cap_seq_list_clear(&chan->srej_list);
+- skb_queue_purge(&chan->srej_q);
+-
+- chan->tx_state = L2CAP_TX_STATE_XMIT;
+- chan->rx_state = L2CAP_RX_STATE_MOVE;
+-
+- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+-}
+-
+-static void l2cap_move_done(struct l2cap_chan *chan)
+-{
+- u8 move_role = chan->move_role;
+- BT_DBG("chan %p", chan);
+-
+- chan->move_state = L2CAP_MOVE_STABLE;
+- chan->move_role = L2CAP_MOVE_ROLE_NONE;
+-
+- if (chan->mode != L2CAP_MODE_ERTM)
+- return;
+-
+- switch (move_role) {
+- case L2CAP_MOVE_ROLE_INITIATOR:
+- l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
+- chan->rx_state = L2CAP_RX_STATE_WAIT_F;
+- break;
+- case L2CAP_MOVE_ROLE_RESPONDER:
+- chan->rx_state = L2CAP_RX_STATE_WAIT_P;
+- break;
+- }
+-}
+-
+ static void l2cap_chan_ready(struct l2cap_chan *chan)
+ {
+ /* The channel may have already been flagged as connected in
+@@ -1505,10 +1379,7 @@ static void l2cap_le_start(struct l2cap_chan *chan)
+
+ static void l2cap_start_connection(struct l2cap_chan *chan)
+ {
+- if (__amp_capable(chan)) {
+- BT_DBG("chan %p AMP capable: discover AMPs", chan);
+- a2mp_discover_amp(chan);
+- } else if (chan->conn->hcon->type == LE_LINK) {
++ if (chan->conn->hcon->type == LE_LINK) {
+ l2cap_le_start(chan);
+ } else {
+ l2cap_send_conn_req(chan);
+@@ -1611,11 +1482,6 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
+ __clear_ack_timer(chan);
+ }
+
+- if (chan->scid == L2CAP_CID_A2MP) {
+- l2cap_state_change(chan, BT_DISCONN);
+- return;
+- }
+-
+ req.dcid = cpu_to_le16(chan->dcid);
+ req.scid = cpu_to_le16(chan->scid);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
+@@ -1754,11 +1620,6 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
+
+ l2cap_chan_lock(chan);
+
+- if (chan->scid == L2CAP_CID_A2MP) {
+- l2cap_chan_unlock(chan);
+- continue;
+- }
+-
+ if (hcon->type == LE_LINK) {
+ l2cap_le_start(chan);
+ } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+@@ -2067,9 +1928,6 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
+
+ BT_DBG("chan %p, skbs %p", chan, skbs);
+
+- if (__chan_is_moving(chan))
+- return;
+-
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+
+ while (!skb_queue_empty(&chan->tx_q)) {
+@@ -2112,9 +1970,6 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return 0;
+
+- if (__chan_is_moving(chan))
+- return 0;
+-
+ while (chan->tx_send_head &&
+ chan->unacked_frames < chan->remote_tx_win &&
+ chan->tx_state == L2CAP_TX_STATE_XMIT) {
+@@ -2180,9 +2035,6 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return;
+
+- if (__chan_is_moving(chan))
+- return;
+-
+ while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
+ seq = l2cap_seq_list_pop(&chan->retrans_list);
+
+@@ -2522,8 +2374,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
+ pdu_len = chan->conn->mtu;
+
+ /* Constrain PDU size for BR/EDR connections */
+- if (!chan->hs_hcon)
+- pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
++ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+
+ /* Adjust for largest possible L2CAP overhead. */
+ if (chan->fcs)
+@@ -3287,11 +3138,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
+
+ skb_queue_head_init(&chan->tx_q);
+
+- chan->local_amp_id = AMP_ID_BREDR;
+- chan->move_id = AMP_ID_BREDR;
+- chan->move_state = L2CAP_MOVE_STABLE;
+- chan->move_role = L2CAP_MOVE_ROLE_NONE;
+-
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return 0;
+
+@@ -3326,52 +3172,19 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
+
+ static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
+ {
+- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
+- (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
++ return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
+ }
+
+ static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
+ {
+- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
+- (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
++ return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
+ }
+
+ static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
+ struct l2cap_conf_rfc *rfc)
+ {
+- if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
+- u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
+-
+- /* Class 1 devices have must have ERTM timeouts
+- * exceeding the Link Supervision Timeout. The
+- * default Link Supervision Timeout for AMP
+- * controllers is 10 seconds.
+- *
+- * Class 1 devices use 0xffffffff for their
+- * best-effort flush timeout, so the clamping logic
+- * will result in a timeout that meets the above
+- * requirement. ERTM timeouts are 16-bit values, so
+- * the maximum timeout is 65.535 seconds.
+- */
+-
+- /* Convert timeout to milliseconds and round */
+- ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
+-
+- /* This is the recommended formula for class 2 devices
+- * that start ERTM timers when packets are sent to the
+- * controller.
+- */
+- ertm_to = 3 * ertm_to + 500;
+-
+- if (ertm_to > 0xffff)
+- ertm_to = 0xffff;
+-
+- rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
+- rfc->monitor_timeout = rfc->retrans_timeout;
+- } else {
+- rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+- rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+- }
++ rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
++ rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ }
+
+ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+@@ -3623,13 +3436,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
+ case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
+- if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
+- return -ECONNREFUSED;
+- set_bit(FLAG_EXT_CTRL, &chan->flags);
+- set_bit(CONF_EWS_RECV, &chan->conf_state);
+- chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+- chan->remote_tx_win = val;
+- break;
++ return -ECONNREFUSED;
+
+ default:
+ if (hint)
+@@ -4027,11 +3834,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+-
+- if (chan->hs_hcon)
+- rsp_code = L2CAP_CREATE_CHAN_RSP;
+- else
+- rsp_code = L2CAP_CONN_RSP;
++ rsp_code = L2CAP_CONN_RSP;
+
+ BT_DBG("chan %p rsp_code %u", chan, rsp_code);
+
+@@ -4126,13 +3929,12 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn,
+ return 0;
+ }
+
+-static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+- struct l2cap_cmd_hdr *cmd,
+- u8 *data, u8 rsp_code, u8 amp_id)
++static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
++ u8 *data, u8 rsp_code)
+ {
+ struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
+ struct l2cap_conn_rsp rsp;
+- struct l2cap_chan *chan = NULL, *pchan;
++ struct l2cap_chan *chan = NULL, *pchan = NULL;
+ int result, status = L2CAP_CS_NO_INFO;
+
+ u16 dcid = 0, scid = __le16_to_cpu(req->scid);
+@@ -4145,7 +3947,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ &conn->hcon->dst, ACL_LINK);
+ if (!pchan) {
+ result = L2CAP_CR_BAD_PSM;
+- goto sendresp;
++ goto response;
+ }
+
+ mutex_lock(&conn->chan_lock);
+@@ -4190,7 +3992,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ chan->dst_type = bdaddr_dst_type(conn->hcon);
+ chan->psm = psm;
+ chan->dcid = scid;
+- chan->local_amp_id = amp_id;
+
+ __l2cap_chan_add(conn, chan);
+
+@@ -4208,17 +4009,8 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ status = L2CAP_CS_AUTHOR_PEND;
+ chan->ops->defer(chan);
+ } else {
+- /* Force pending result for AMP controllers.
+- * The connection will succeed after the
+- * physical link is up.
+- */
+- if (amp_id == AMP_ID_BREDR) {
+- l2cap_state_change(chan, BT_CONFIG);
+- result = L2CAP_CR_SUCCESS;
+- } else {
+- l2cap_state_change(chan, BT_CONNECT2);
+- result = L2CAP_CR_PEND;
+- }
++ l2cap_state_change(chan, BT_CONFIG);
++ result = L2CAP_CR_SUCCESS;
+ status = L2CAP_CS_NO_INFO;
+ }
+ } else {
+@@ -4233,17 +4025,15 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ }
+
+ response:
+- l2cap_chan_unlock(pchan);
+- mutex_unlock(&conn->chan_lock);
+- l2cap_chan_put(pchan);
+-
+-sendresp:
+ rsp.scid = cpu_to_le16(scid);
+ rsp.dcid = cpu_to_le16(dcid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(status);
+ l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
+
++ if (!pchan)
++ return;
++
+ if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
+ struct l2cap_info_req info;
+ info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+@@ -4266,25 +4056,18 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ chan->num_conf_req++;
+ }
+
+- return chan;
++ l2cap_chan_unlock(pchan);
++ mutex_unlock(&conn->chan_lock);
++ l2cap_chan_put(pchan);
+ }
+
+ static int l2cap_connect_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+ {
+- struct hci_dev *hdev = conn->hcon->hdev;
+- struct hci_conn *hcon = conn->hcon;
+-
+ if (cmd_len < sizeof(struct l2cap_conn_req))
+ return -EPROTO;
+
+- hci_dev_lock(hdev);
+- if (hci_dev_test_flag(hdev, HCI_MGMT) &&
+- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
+- mgmt_device_connected(hdev, hcon, NULL, 0);
+- hci_dev_unlock(hdev);
+-
+- l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
++ l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
+ return 0;
+ }
+
+@@ -4516,10 +4299,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ /* check compatibility */
+
+ /* Send rsp for BR/EDR channel */
+- if (!chan->hs_hcon)
+- l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
+- else
+- chan->ident = cmd->ident;
++ l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
+ }
+
+ unlock:
+@@ -4571,15 +4351,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ goto done;
+ }
+
+- if (!chan->hs_hcon) {
+- l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
+- 0);
+- } else {
+- if (l2cap_check_efs(chan)) {
+- amp_create_logical_link(chan);
+- chan->ident = cmd->ident;
+- }
+- }
++ l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
+ }
+ goto done;
+
+@@ -4750,9 +4522,6 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
+ if (!disable_ertm)
+ feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
+ | L2CAP_FEAT_FCS;
+- if (conn->local_fixed_chan & L2CAP_FC_A2MP)
+- feat_mask |= L2CAP_FEAT_EXT_FLOW
+- | L2CAP_FEAT_EXT_WINDOW;
+
+ put_unaligned_le32(feat_mask, rsp->data);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
+@@ -4841,846 +4610,101 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
+ return 0;
+ }
+
+-static int l2cap_create_channel_req(struct l2cap_conn *conn,
+- struct l2cap_cmd_hdr *cmd,
+- u16 cmd_len, void *data)
++static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd,
++ u16 cmd_len, u8 *data)
+ {
+- struct l2cap_create_chan_req *req = data;
+- struct l2cap_create_chan_rsp rsp;
+- struct l2cap_chan *chan;
+- struct hci_dev *hdev;
+- u16 psm, scid;
+-
+- if (cmd_len != sizeof(*req))
+- return -EPROTO;
++ struct hci_conn *hcon = conn->hcon;
++ struct l2cap_conn_param_update_req *req;
++ struct l2cap_conn_param_update_rsp rsp;
++ u16 min, max, latency, to_multiplier;
++ int err;
+
+- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
++ if (hcon->role != HCI_ROLE_MASTER)
+ return -EINVAL;
+
+- psm = le16_to_cpu(req->psm);
+- scid = le16_to_cpu(req->scid);
+-
+- BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
+-
+- /* For controller id 0 make BR/EDR connection */
+- if (req->amp_id == AMP_ID_BREDR) {
+- l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+- req->amp_id);
+- return 0;
+- }
+-
+- /* Validate AMP controller id */
+- hdev = hci_dev_get(req->amp_id);
+- if (!hdev)
+- goto error;
++ if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
++ return -EPROTO;
+
+- if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
+- hci_dev_put(hdev);
+- goto error;
+- }
++ req = (struct l2cap_conn_param_update_req *) data;
++ min = __le16_to_cpu(req->min);
++ max = __le16_to_cpu(req->max);
++ latency = __le16_to_cpu(req->latency);
++ to_multiplier = __le16_to_cpu(req->to_multiplier);
+
+- chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+- req->amp_id);
+- if (chan) {
+- struct amp_mgr *mgr = conn->hcon->amp_mgr;
+- struct hci_conn *hs_hcon;
+-
+- hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+- &conn->hcon->dst);
+- if (!hs_hcon) {
+- hci_dev_put(hdev);
+- cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+- chan->dcid);
+- return 0;
+- }
++ BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
++ min, max, latency, to_multiplier);
+
+- BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
++ memset(&rsp, 0, sizeof(rsp));
+
+- mgr->bredr_chan = chan;
+- chan->hs_hcon = hs_hcon;
+- chan->fcs = L2CAP_FCS_NONE;
+- conn->mtu = hdev->block_mtu;
+- }
++ err = hci_check_conn_params(min, max, latency, to_multiplier);
++ if (err)
++ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
++ else
++ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+
+- hci_dev_put(hdev);
++ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
++ sizeof(rsp), &rsp);
+
+- return 0;
++ if (!err) {
++ u8 store_hint;
+
+-error:
+- rsp.dcid = 0;
+- rsp.scid = cpu_to_le16(scid);
+- rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
+- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
++ store_hint = hci_le_conn_update(hcon, min, max, latency,
++ to_multiplier);
++ mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
++ store_hint, min, max, latency,
++ to_multiplier);
+
+- l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+- sizeof(rsp), &rsp);
++ }
+
+ return 0;
+ }
+
+-static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
++static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+- struct l2cap_move_chan_req req;
+- u8 ident;
+-
+- BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
++ struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
++ struct hci_conn *hcon = conn->hcon;
++ u16 dcid, mtu, mps, credits, result;
++ struct l2cap_chan *chan;
++ int err, sec_level;
+
+- ident = l2cap_get_ident(chan->conn);
+- chan->ident = ident;
++ if (cmd_len < sizeof(*rsp))
++ return -EPROTO;
+
+- req.icid = cpu_to_le16(chan->scid);
+- req.dest_amp_id = dest_amp_id;
++ dcid = __le16_to_cpu(rsp->dcid);
++ mtu = __le16_to_cpu(rsp->mtu);
++ mps = __le16_to_cpu(rsp->mps);
++ credits = __le16_to_cpu(rsp->credits);
++ result = __le16_to_cpu(rsp->result);
+
+- l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
+- &req);
++ if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
++ dcid < L2CAP_CID_DYN_START ||
++ dcid > L2CAP_CID_LE_DYN_END))
++ return -EPROTO;
+
+- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
+-}
++ BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
++ dcid, mtu, mps, credits, result);
+
+-static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
+-{
+- struct l2cap_move_chan_rsp rsp;
++ mutex_lock(&conn->chan_lock);
+
+- BT_DBG("chan %p, result 0x%4.4x", chan, result);
++ chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
++ if (!chan) {
++ err = -EBADSLT;
++ goto unlock;
++ }
+
+- rsp.icid = cpu_to_le16(chan->dcid);
+- rsp.result = cpu_to_le16(result);
++ err = 0;
+
+- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
+- sizeof(rsp), &rsp);
+-}
++ l2cap_chan_lock(chan);
+
+-static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
+-{
+- struct l2cap_move_chan_cfm cfm;
+-
+- BT_DBG("chan %p, result 0x%4.4x", chan, result);
+-
+- chan->ident = l2cap_get_ident(chan->conn);
+-
+- cfm.icid = cpu_to_le16(chan->scid);
+- cfm.result = cpu_to_le16(result);
+-
+- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
+- sizeof(cfm), &cfm);
+-
+- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
+-}
+-
+-static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
+-{
+- struct l2cap_move_chan_cfm cfm;
+-
+- BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
+-
+- cfm.icid = cpu_to_le16(icid);
+- cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
+-
+- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
+- sizeof(cfm), &cfm);
+-}
+-
+-static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
+- u16 icid)
+-{
+- struct l2cap_move_chan_cfm_rsp rsp;
+-
+- BT_DBG("icid 0x%4.4x", icid);
+-
+- rsp.icid = cpu_to_le16(icid);
+- l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
+-}
+-
+-static void __release_logical_link(struct l2cap_chan *chan)
+-{
+- chan->hs_hchan = NULL;
+- chan->hs_hcon = NULL;
+-
+- /* Placeholder - release the logical link */
+-}
+-
+-static void l2cap_logical_fail(struct l2cap_chan *chan)
+-{
+- /* Logical link setup failed */
+- if (chan->state != BT_CONNECTED) {
+- /* Create channel failure, disconnect */
+- l2cap_send_disconn_req(chan, ECONNRESET);
+- return;
+- }
+-
+- switch (chan->move_role) {
+- case L2CAP_MOVE_ROLE_RESPONDER:
+- l2cap_move_done(chan);
+- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
+- break;
+- case L2CAP_MOVE_ROLE_INITIATOR:
+- if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
+- chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
+- /* Remote has only sent pending or
+- * success responses, clean up
+- */
+- l2cap_move_done(chan);
+- }
+-
+- /* Other amp move states imply that the move
+- * has already aborted
+- */
+- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+- break;
+- }
+-}
+-
+-static void l2cap_logical_finish_create(struct l2cap_chan *chan,
+- struct hci_chan *hchan)
+-{
+- struct l2cap_conf_rsp rsp;
+-
+- chan->hs_hchan = hchan;
+- chan->hs_hcon->l2cap_data = chan->conn;
+-
+- l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
+-
+- if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
+- int err;
+-
+- set_default_fcs(chan);
+-
+- err = l2cap_ertm_init(chan);
+- if (err < 0)
+- l2cap_send_disconn_req(chan, -err);
+- else
+- l2cap_chan_ready(chan);
+- }
+-}
+-
+-static void l2cap_logical_finish_move(struct l2cap_chan *chan,
+- struct hci_chan *hchan)
+-{
+- chan->hs_hcon = hchan->conn;
+- chan->hs_hcon->l2cap_data = chan->conn;
+-
+- BT_DBG("move_state %d", chan->move_state);
+-
+- switch (chan->move_state) {
+- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
+- /* Move confirm will be sent after a success
+- * response is received
+- */
+- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+- break;
+- case L2CAP_MOVE_WAIT_LOGICAL_CFM:
+- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+- } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
+- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
+- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+- } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
+- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
+- }
+- break;
+- default:
+- /* Move was not in expected state, free the channel */
+- __release_logical_link(chan);
+-
+- chan->move_state = L2CAP_MOVE_STABLE;
+- }
+-}
+-
+-/* Call with chan locked */
+-void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+- u8 status)
+-{
+- BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
+-
+- if (status) {
+- l2cap_logical_fail(chan);
+- __release_logical_link(chan);
+- return;
+- }
+-
+- if (chan->state != BT_CONNECTED) {
+- /* Ignore logical link if channel is on BR/EDR */
+- if (chan->local_amp_id != AMP_ID_BREDR)
+- l2cap_logical_finish_create(chan, hchan);
+- } else {
+- l2cap_logical_finish_move(chan, hchan);
+- }
+-}
+-
+-void l2cap_move_start(struct l2cap_chan *chan)
+-{
+- BT_DBG("chan %p", chan);
+-
+- if (chan->local_amp_id == AMP_ID_BREDR) {
+- if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
+- return;
+- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
+- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
+- /* Placeholder - start physical link setup */
+- } else {
+- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
+- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+- chan->move_id = 0;
+- l2cap_move_setup(chan);
+- l2cap_send_move_chan_req(chan, 0);
+- }
+-}
+-
+-static void l2cap_do_create(struct l2cap_chan *chan, int result,
+- u8 local_amp_id, u8 remote_amp_id)
+-{
+- BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
+- local_amp_id, remote_amp_id);
+-
+- chan->fcs = L2CAP_FCS_NONE;
+-
+- /* Outgoing channel on AMP */
+- if (chan->state == BT_CONNECT) {
+- if (result == L2CAP_CR_SUCCESS) {
+- chan->local_amp_id = local_amp_id;
+- l2cap_send_create_chan_req(chan, remote_amp_id);
+- } else {
+- /* Revert to BR/EDR connect */
+- l2cap_send_conn_req(chan);
+- }
+-
+- return;
+- }
+-
+- /* Incoming channel on AMP */
+- if (__l2cap_no_conn_pending(chan)) {
+- struct l2cap_conn_rsp rsp;
+- char buf[128];
+- rsp.scid = cpu_to_le16(chan->dcid);
+- rsp.dcid = cpu_to_le16(chan->scid);
+-
+- if (result == L2CAP_CR_SUCCESS) {
+- /* Send successful response */
+- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+- } else {
+- /* Send negative response */
+- rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
+- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+- }
+-
+- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
+- sizeof(rsp), &rsp);
+-
+- if (result == L2CAP_CR_SUCCESS) {
+- l2cap_state_change(chan, BT_CONFIG);
+- set_bit(CONF_REQ_SENT, &chan->conf_state);
+- l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+- L2CAP_CONF_REQ,
+- l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+- chan->num_conf_req++;
+- }
+- }
+-}
+-
+-static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
+- u8 remote_amp_id)
+-{
+- l2cap_move_setup(chan);
+- chan->move_id = local_amp_id;
+- chan->move_state = L2CAP_MOVE_WAIT_RSP;
+-
+- l2cap_send_move_chan_req(chan, remote_amp_id);
+-}
+-
+-static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
+-{
+- struct hci_chan *hchan = NULL;
+-
+- /* Placeholder - get hci_chan for logical link */
+-
+- if (hchan) {
+- if (hchan->state == BT_CONNECTED) {
+- /* Logical link is ready to go */
+- chan->hs_hcon = hchan->conn;
+- chan->hs_hcon->l2cap_data = chan->conn;
+- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
+-
+- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
+- } else {
+- /* Wait for logical link to be ready */
+- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+- }
+- } else {
+- /* Logical link not available */
+- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
+- }
+-}
+-
+-static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
+-{
+- if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
+- u8 rsp_result;
+- if (result == -EINVAL)
+- rsp_result = L2CAP_MR_BAD_ID;
+- else
+- rsp_result = L2CAP_MR_NOT_ALLOWED;
+-
+- l2cap_send_move_chan_rsp(chan, rsp_result);
+- }
+-
+- chan->move_role = L2CAP_MOVE_ROLE_NONE;
+- chan->move_state = L2CAP_MOVE_STABLE;
+-
+- /* Restart data transmission */
+- l2cap_ertm_send(chan);
+-}
+-
+-/* Invoke with locked chan */
+-void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
+-{
+- u8 local_amp_id = chan->local_amp_id;
+- u8 remote_amp_id = chan->remote_amp_id;
+-
+- BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
+- chan, result, local_amp_id, remote_amp_id);
+-
+- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
+- return;
+-
+- if (chan->state != BT_CONNECTED) {
+- l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
+- } else if (result != L2CAP_MR_SUCCESS) {
+- l2cap_do_move_cancel(chan, result);
+- } else {
+- switch (chan->move_role) {
+- case L2CAP_MOVE_ROLE_INITIATOR:
+- l2cap_do_move_initiate(chan, local_amp_id,
+- remote_amp_id);
+- break;
+- case L2CAP_MOVE_ROLE_RESPONDER:
+- l2cap_do_move_respond(chan, result);
+- break;
+- default:
+- l2cap_do_move_cancel(chan, result);
+- break;
+- }
+- }
+-}
+-
+-static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+- struct l2cap_cmd_hdr *cmd,
+- u16 cmd_len, void *data)
+-{
+- struct l2cap_move_chan_req *req = data;
+- struct l2cap_move_chan_rsp rsp;
+- struct l2cap_chan *chan;
+- u16 icid = 0;
+- u16 result = L2CAP_MR_NOT_ALLOWED;
+-
+- if (cmd_len != sizeof(*req))
+- return -EPROTO;
+-
+- icid = le16_to_cpu(req->icid);
+-
+- BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
+-
+- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
+- return -EINVAL;
+-
+- chan = l2cap_get_chan_by_dcid(conn, icid);
+- if (!chan) {
+- rsp.icid = cpu_to_le16(icid);
+- rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
+- l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
+- sizeof(rsp), &rsp);
+- return 0;
+- }
+-
+- chan->ident = cmd->ident;
+-
+- if (chan->scid < L2CAP_CID_DYN_START ||
+- chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
+- (chan->mode != L2CAP_MODE_ERTM &&
+- chan->mode != L2CAP_MODE_STREAMING)) {
+- result = L2CAP_MR_NOT_ALLOWED;
+- goto send_move_response;
+- }
+-
+- if (chan->local_amp_id == req->dest_amp_id) {
+- result = L2CAP_MR_SAME_ID;
+- goto send_move_response;
+- }
+-
+- if (req->dest_amp_id != AMP_ID_BREDR) {
+- struct hci_dev *hdev;
+- hdev = hci_dev_get(req->dest_amp_id);
+- if (!hdev || hdev->dev_type != HCI_AMP ||
+- !test_bit(HCI_UP, &hdev->flags)) {
+- if (hdev)
+- hci_dev_put(hdev);
+-
+- result = L2CAP_MR_BAD_ID;
+- goto send_move_response;
+- }
+- hci_dev_put(hdev);
+- }
+-
+- /* Detect a move collision. Only send a collision response
+- * if this side has "lost", otherwise proceed with the move.
+- * The winner has the larger bd_addr.
+- */
+- if ((__chan_is_moving(chan) ||
+- chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
+- bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
+- result = L2CAP_MR_COLLISION;
+- goto send_move_response;
+- }
+-
+- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
+- l2cap_move_setup(chan);
+- chan->move_id = req->dest_amp_id;
+-
+- if (req->dest_amp_id == AMP_ID_BREDR) {
+- /* Moving to BR/EDR */
+- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+- result = L2CAP_MR_PEND;
+- } else {
+- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+- result = L2CAP_MR_SUCCESS;
+- }
+- } else {
+- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
+- /* Placeholder - uncomment when amp functions are available */
+- /*amp_accept_physical(chan, req->dest_amp_id);*/
+- result = L2CAP_MR_PEND;
+- }
+-
+-send_move_response:
+- l2cap_send_move_chan_rsp(chan, result);
+-
+- l2cap_chan_unlock(chan);
+- l2cap_chan_put(chan);
+-
+- return 0;
+-}
+-
+-static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
+-{
+- struct l2cap_chan *chan;
+- struct hci_chan *hchan = NULL;
+-
+- chan = l2cap_get_chan_by_scid(conn, icid);
+- if (!chan) {
+- l2cap_send_move_chan_cfm_icid(conn, icid);
+- return;
+- }
+-
+- __clear_chan_timer(chan);
+- if (result == L2CAP_MR_PEND)
+- __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
+-
+- switch (chan->move_state) {
+- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
+- /* Move confirm will be sent when logical link
+- * is complete.
+- */
+- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+- break;
+- case L2CAP_MOVE_WAIT_RSP_SUCCESS:
+- if (result == L2CAP_MR_PEND) {
+- break;
+- } else if (test_bit(CONN_LOCAL_BUSY,
+- &chan->conn_state)) {
+- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+- } else {
+- /* Logical link is up or moving to BR/EDR,
+- * proceed with move
+- */
+- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
+- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+- }
+- break;
+- case L2CAP_MOVE_WAIT_RSP:
+- /* Moving to AMP */
+- if (result == L2CAP_MR_SUCCESS) {
+- /* Remote is ready, send confirm immediately
+- * after logical link is ready
+- */
+- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+- } else {
+- /* Both logical link and move success
+- * are required to confirm
+- */
+- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
+- }
+-
+- /* Placeholder - get hci_chan for logical link */
+- if (!hchan) {
+- /* Logical link not available */
+- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+- break;
+- }
+-
+- /* If the logical link is not yet connected, do not
+- * send confirmation.
+- */
+- if (hchan->state != BT_CONNECTED)
+- break;
+-
+- /* Logical link is already ready to go */
+-
+- chan->hs_hcon = hchan->conn;
+- chan->hs_hcon->l2cap_data = chan->conn;
+-
+- if (result == L2CAP_MR_SUCCESS) {
+- /* Can confirm now */
+- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+- } else {
+- /* Now only need move success
+- * to confirm
+- */
+- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+- }
+-
+- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
+- break;
+- default:
+- /* Any other amp move state means the move failed. */
+- chan->move_id = chan->local_amp_id;
+- l2cap_move_done(chan);
+- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+- }
+-
+- l2cap_chan_unlock(chan);
+- l2cap_chan_put(chan);
+-}
+-
+-static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
+- u16 result)
+-{
+- struct l2cap_chan *chan;
+-
+- chan = l2cap_get_chan_by_ident(conn, ident);
+- if (!chan) {
+- /* Could not locate channel, icid is best guess */
+- l2cap_send_move_chan_cfm_icid(conn, icid);
+- return;
+- }
+-
+- __clear_chan_timer(chan);
+-
+- if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
+- if (result == L2CAP_MR_COLLISION) {
+- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
+- } else {
+- /* Cleanup - cancel move */
+- chan->move_id = chan->local_amp_id;
+- l2cap_move_done(chan);
+- }
+- }
+-
+- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+-
+- l2cap_chan_unlock(chan);
+- l2cap_chan_put(chan);
+-}
+-
+-static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+- struct l2cap_cmd_hdr *cmd,
+- u16 cmd_len, void *data)
+-{
+- struct l2cap_move_chan_rsp *rsp = data;
+- u16 icid, result;
+-
+- if (cmd_len != sizeof(*rsp))
+- return -EPROTO;
+-
+- icid = le16_to_cpu(rsp->icid);
+- result = le16_to_cpu(rsp->result);
+-
+- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
+-
+- if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
+- l2cap_move_continue(conn, icid, result);
+- else
+- l2cap_move_fail(conn, cmd->ident, icid, result);
+-
+- return 0;
+-}
+-
+-static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+- struct l2cap_cmd_hdr *cmd,
+- u16 cmd_len, void *data)
+-{
+- struct l2cap_move_chan_cfm *cfm = data;
+- struct l2cap_chan *chan;
+- u16 icid, result;
+-
+- if (cmd_len != sizeof(*cfm))
+- return -EPROTO;
+-
+- icid = le16_to_cpu(cfm->icid);
+- result = le16_to_cpu(cfm->result);
+-
+- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
+-
+- chan = l2cap_get_chan_by_dcid(conn, icid);
+- if (!chan) {
+- /* Spec requires a response even if the icid was not found */
+- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+- return 0;
+- }
+-
+- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
+- if (result == L2CAP_MC_CONFIRMED) {
+- chan->local_amp_id = chan->move_id;
+- if (chan->local_amp_id == AMP_ID_BREDR)
+- __release_logical_link(chan);
+- } else {
+- chan->move_id = chan->local_amp_id;
+- }
+-
+- l2cap_move_done(chan);
+- }
+-
+- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+-
+- l2cap_chan_unlock(chan);
+- l2cap_chan_put(chan);
+-
+- return 0;
+-}
+-
+-static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
+- struct l2cap_cmd_hdr *cmd,
+- u16 cmd_len, void *data)
+-{
+- struct l2cap_move_chan_cfm_rsp *rsp = data;
+- struct l2cap_chan *chan;
+- u16 icid;
+-
+- if (cmd_len != sizeof(*rsp))
+- return -EPROTO;
+-
+- icid = le16_to_cpu(rsp->icid);
+-
+- BT_DBG("icid 0x%4.4x", icid);
+-
+- chan = l2cap_get_chan_by_scid(conn, icid);
+- if (!chan)
+- return 0;
+-
+- __clear_chan_timer(chan);
+-
+- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
+- chan->local_amp_id = chan->move_id;
+-
+- if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
+- __release_logical_link(chan);
+-
+- l2cap_move_done(chan);
+- }
+-
+- l2cap_chan_unlock(chan);
+- l2cap_chan_put(chan);
+-
+- return 0;
+-}
+-
+-static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+- struct l2cap_cmd_hdr *cmd,
+- u16 cmd_len, u8 *data)
+-{
+- struct hci_conn *hcon = conn->hcon;
+- struct l2cap_conn_param_update_req *req;
+- struct l2cap_conn_param_update_rsp rsp;
+- u16 min, max, latency, to_multiplier;
+- int err;
+-
+- if (hcon->role != HCI_ROLE_MASTER)
+- return -EINVAL;
+-
+- if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
+- return -EPROTO;
+-
+- req = (struct l2cap_conn_param_update_req *) data;
+- min = __le16_to_cpu(req->min);
+- max = __le16_to_cpu(req->max);
+- latency = __le16_to_cpu(req->latency);
+- to_multiplier = __le16_to_cpu(req->to_multiplier);
+-
+- BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
+- min, max, latency, to_multiplier);
+-
+- memset(&rsp, 0, sizeof(rsp));
+-
+- err = hci_check_conn_params(min, max, latency, to_multiplier);
+- if (err)
+- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+- else
+- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+-
+- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
+- sizeof(rsp), &rsp);
+-
+- if (!err) {
+- u8 store_hint;
+-
+- store_hint = hci_le_conn_update(hcon, min, max, latency,
+- to_multiplier);
+- mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
+- store_hint, min, max, latency,
+- to_multiplier);
+-
+- }
+-
+- return 0;
+-}
+-
+-static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
+- struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+- u8 *data)
+-{
+- struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
+- struct hci_conn *hcon = conn->hcon;
+- u16 dcid, mtu, mps, credits, result;
+- struct l2cap_chan *chan;
+- int err, sec_level;
+-
+- if (cmd_len < sizeof(*rsp))
+- return -EPROTO;
+-
+- dcid = __le16_to_cpu(rsp->dcid);
+- mtu = __le16_to_cpu(rsp->mtu);
+- mps = __le16_to_cpu(rsp->mps);
+- credits = __le16_to_cpu(rsp->credits);
+- result = __le16_to_cpu(rsp->result);
+-
+- if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
+- dcid < L2CAP_CID_DYN_START ||
+- dcid > L2CAP_CID_LE_DYN_END))
+- return -EPROTO;
+-
+- BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
+- dcid, mtu, mps, credits, result);
+-
+- mutex_lock(&conn->chan_lock);
+-
+- chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+- if (!chan) {
+- err = -EBADSLT;
+- goto unlock;
+- }
+-
+- err = 0;
+-
+- l2cap_chan_lock(chan);
+-
+- switch (result) {
+- case L2CAP_CR_LE_SUCCESS:
+- if (__l2cap_get_chan_by_dcid(conn, dcid)) {
+- err = -EBADSLT;
+- break;
+- }
++ switch (result) {
++ case L2CAP_CR_LE_SUCCESS:
++ if (__l2cap_get_chan_by_dcid(conn, dcid)) {
++ err = -EBADSLT;
++ break;
++ }
+
+ chan->ident = 0;
+ chan->dcid = dcid;
+@@ -5739,7 +4763,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ break;
+
+ case L2CAP_CONN_RSP:
+- case L2CAP_CREATE_CHAN_RSP:
+ l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
+ break;
+
+@@ -5774,26 +4797,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ l2cap_information_rsp(conn, cmd, cmd_len, data);
+ break;
+
+- case L2CAP_CREATE_CHAN_REQ:
+- err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
+- break;
+-
+- case L2CAP_MOVE_CHAN_REQ:
+- err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
+- break;
+-
+- case L2CAP_MOVE_CHAN_RSP:
+- l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+- break;
+-
+- case L2CAP_MOVE_CHAN_CFM:
+- err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
+- break;
+-
+- case L2CAP_MOVE_CHAN_CFM_RSP:
+- l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+- break;
+-
+ default:
+ BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
+ err = -EINVAL;
+@@ -6492,6 +5495,14 @@ static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
+ kfree_skb(skb);
+ }
+
++static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
++{
++ struct l2cap_cmd_rej_unk rej;
++
++ rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
++ l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
++}
++
+ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+ {
+@@ -6517,23 +5528,25 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+
+ if (len > skb->len || !cmd->ident) {
+ BT_DBG("corrupted command");
+- break;
++ l2cap_sig_send_rej(conn, cmd->ident);
++ skb_pull(skb, len > skb->len ? skb->len : len);
++ continue;
+ }
+
+ err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
+ if (err) {
+- struct l2cap_cmd_rej_unk rej;
+-
+ BT_ERR("Wrong link type (%d)", err);
+-
+- rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+- l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+- sizeof(rej), &rej);
++ l2cap_sig_send_rej(conn, cmd->ident);
+ }
+
+ skb_pull(skb, len);
+ }
+
++ if (skb->len > 0) {
++ BT_DBG("corrupted command");
++ l2cap_sig_send_rej(conn, 0);
++ }
++
+ drop:
+ kfree_skb(skb);
+ }
+@@ -7035,8 +6048,8 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+ if (control->final) {
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
+- !__chan_is_moving(chan)) {
++ if (!test_and_clear_bit(CONN_REJ_ACT,
++ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ }
+@@ -7229,11 +6242,7 @@ static int l2cap_finish_move(struct l2cap_chan *chan)
+ BT_DBG("chan %p", chan);
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+-
+- if (chan->hs_hcon)
+- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
+- else
+- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
++ chan->conn->mtu = chan->conn->hcon->mtu;
+
+ return l2cap_resegment(chan);
+ }
+@@ -7300,11 +6309,7 @@ static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
+ */
+ chan->next_tx_seq = control->reqseq;
+ chan->unacked_frames = 0;
+-
+- if (chan->hs_hcon)
+- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
+- else
+- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
++ chan->conn->mtu = chan->conn->hcon->mtu;
+
+ err = l2cap_resegment(chan);
+
+@@ -7509,9 +6514,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
+ {
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_le_credits pkt;
+- u16 return_credits;
+-
+- return_credits = (chan->imtu / chan->mps) + 1;
++ u16 return_credits = l2cap_le_rx_credits(chan);
+
+ if (chan->rx_credits >= return_credits)
+ return;
+@@ -7530,6 +6533,19 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
+ l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
+ }
+
++void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
++{
++ if (chan->rx_avail == rx_avail)
++ return;
++
++ BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
++
++ chan->rx_avail = rx_avail;
++
++ if (chan->state == BT_CONNECTED)
++ l2cap_chan_le_send_credits(chan);
++}
++
+ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+ int err;
+@@ -7539,6 +6555,12 @@ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+ /* Wait recv to confirm reception before updating the credits */
+ err = chan->ops->recv(chan, skb);
+
++ if (err < 0 && chan->rx_avail != -1) {
++ BT_ERR("Queueing received LE L2CAP data failed");
++ l2cap_send_disconn_req(chan, ECONNRESET);
++ return err;
++ }
++
+ /* Update credits whenever an SDU is received */
+ l2cap_chan_le_send_credits(chan);
+
+@@ -7561,7 +6583,8 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+ }
+
+ chan->rx_credits--;
+- BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
++ BT_DBG("chan %p: rx_credits %u -> %u",
++ chan, chan->rx_credits + 1, chan->rx_credits);
+
+ /* Update if remote had run out of credits, this should only happens
+ * if the remote is not using the entire MPS.
+@@ -7656,21 +6679,10 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
+
+ chan = l2cap_get_chan_by_scid(conn, cid);
+ if (!chan) {
+- if (cid == L2CAP_CID_A2MP) {
+- chan = a2mp_channel_create(conn, skb);
+- if (!chan) {
+- kfree_skb(skb);
+- return;
+- }
+-
+- l2cap_chan_hold(chan);
+- l2cap_chan_lock(chan);
+- } else {
+- BT_DBG("unknown cid 0x%4.4x", cid);
+- /* Drop packet and return */
+- kfree_skb(skb);
+- return;
+- }
++ BT_DBG("unknown cid 0x%4.4x", cid);
++ /* Drop packet and return */
++ kfree_skb(skb);
++ return;
+ }
+
+ BT_DBG("chan %p, len %d", chan, skb->len);
+@@ -7742,6 +6754,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+
+ BT_DBG("chan %p, len %d", chan, skb->len);
+
++ l2cap_chan_lock(chan);
++
+ if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
+ goto drop;
+
+@@ -7753,11 +6767,13 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ bt_cb(skb)->l2cap.psm = psm;
+
+ if (!chan->ops->recv(chan, skb)) {
++ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ return;
+ }
+
+ drop:
++ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ free_skb:
+ kfree_skb(skb);
+@@ -7855,26 +6871,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
+
+ BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
+
+- switch (hcon->type) {
+- case LE_LINK:
+- if (hcon->hdev->le_mtu) {
+- conn->mtu = hcon->hdev->le_mtu;
+- break;
+- }
+- fallthrough;
+- default:
+- conn->mtu = hcon->hdev->acl_mtu;
+- break;
+- }
+-
++ conn->mtu = hcon->mtu;
+ conn->feat_mask = 0;
+
+ conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
+
+- if (hcon->type == ACL_LINK &&
+- hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
+- conn->local_fixed_chan |= L2CAP_FC_A2MP;
+-
+ if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
+ (bredr_sc_enabled(hcon->hdev) ||
+ hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
+@@ -8339,11 +7340,6 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
+ state_to_string(chan->state));
+
+- if (chan->scid == L2CAP_CID_A2MP) {
+- l2cap_chan_unlock(chan);
+- continue;
+- }
+-
+ if (!status && encrypt)
+ chan->sec_level = hcon->sec_level;
+
+@@ -8482,10 +7478,6 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ int len;
+
+- /* For AMP controller do not create l2cap conn */
+- if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
+- goto drop;
+-
+ if (!conn)
+ conn = l2cap_conn_add(hcon);
+
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 3bdfc3f1e73d0f..f04ce84267988f 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -438,7 +438,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_options opts;
+ struct l2cap_conninfo cinfo;
+- int len, err = 0;
++ int err = 0;
++ size_t len;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -485,7 +486,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+
+ BT_DBG("mode 0x%2.2x", chan->mode);
+
+- len = min_t(unsigned int, len, sizeof(opts));
++ len = min(len, sizeof(opts));
+ if (copy_to_user(optval, (char *) &opts, len))
+ err = -EFAULT;
+
+@@ -535,7 +536,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ cinfo.hci_handle = chan->conn->hcon->handle;
+ memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
+
+- len = min_t(unsigned int, len, sizeof(cinfo));
++ len = min(len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *) &cinfo, len))
+ err = -EFAULT;
+
+@@ -726,7 +727,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_options opts;
+- int len, err = 0;
++ int err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -753,11 +754,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ opts.max_tx = chan->max_tx;
+ opts.txwin_size = chan->tx_win;
+
+- len = min_t(unsigned int, sizeof(opts), optlen);
+- if (copy_from_sockptr(&opts, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opts, sizeof(opts), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
+ err = -EINVAL;
+@@ -800,10 +799,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ break;
+
+ case L2CAP_LM:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt & L2CAP_LM_FIPS) {
+ err = -EINVAL;
+@@ -884,7 +882,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ struct bt_security sec;
+ struct bt_power pwr;
+ struct l2cap_conn *conn;
+- int len, err = 0;
++ int err = 0;
+ u32 opt;
+ u16 mtu;
+ u8 mode;
+@@ -910,11 +908,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ sec.level = BT_SECURITY_LOW;
+
+- len = min_t(unsigned int, sizeof(sec), optlen);
+- if (copy_from_sockptr(&sec, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (sec.level < BT_SECURITY_LOW ||
+ sec.level > BT_SECURITY_FIPS) {
+@@ -959,10 +955,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt) {
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -974,10 +969,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_FLUSHABLE:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt > BT_FLUSHABLE_ON) {
+ err = -EINVAL;
+@@ -1009,11 +1003,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+
+- len = min_t(unsigned int, sizeof(pwr), optlen);
+- if (copy_from_sockptr(&pwr, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&pwr, sizeof(pwr), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (pwr.force_active)
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+@@ -1022,28 +1014,11 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_CHANNEL_POLICY:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
+- break;
+- }
+-
+- if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+- err = -EINVAL;
+- break;
+- }
+-
+- if (chan->mode != L2CAP_MODE_ERTM &&
+- chan->mode != L2CAP_MODE_STREAMING) {
+- err = -EOPNOTSUPP;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+-
+- chan->chan_policy = (u8) opt;
+-
+- if (sk->sk_state == BT_CONNECTED &&
+- chan->move_role == L2CAP_MOVE_ROLE_NONE)
+- l2cap_move_start(chan);
+
++ err = -EOPNOTSUPP;
+ break;
+
+ case BT_SNDMTU:
+@@ -1070,10 +1045,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&mtu, optval, sizeof(u16))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&mtu, sizeof(mtu), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
+ sk->sk_state == BT_CONNECTED)
+@@ -1101,10 +1075,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&mode, optval, sizeof(u8))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&mode, sizeof(mode), optval, optlen);
++ if (err)
+ break;
+- }
+
+ BT_DBG("mode %u", mode);
+
+@@ -1157,6 +1130,34 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ return err;
+ }
+
++static void l2cap_publish_rx_avail(struct l2cap_chan *chan)
++{
++ struct sock *sk = chan->data;
++ ssize_t avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc);
++ int expected_skbs, skb_overhead;
++
++ if (avail <= 0) {
++ l2cap_chan_rx_avail(chan, 0);
++ return;
++ }
++
++ if (!chan->mps) {
++ l2cap_chan_rx_avail(chan, -1);
++ return;
++ }
++
++ /* Correct available memory by estimated sk_buff overhead.
++ * This is significant due to small transfer sizes. However, accept
++ * at least one full packet if receive space is non-zero.
++ */
++ expected_skbs = DIV_ROUND_UP(avail, chan->mps);
++ skb_overhead = expected_skbs * sizeof(struct sk_buff);
++ if (skb_overhead < avail)
++ l2cap_chan_rx_avail(chan, avail - skb_overhead);
++ else
++ l2cap_chan_rx_avail(chan, -1);
++}
++
+ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
+ {
+@@ -1193,28 +1194,33 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ else
+ err = bt_sock_recvmsg(sock, msg, len, flags);
+
+- if (pi->chan->mode != L2CAP_MODE_ERTM)
++ if (pi->chan->mode != L2CAP_MODE_ERTM &&
++ pi->chan->mode != L2CAP_MODE_LE_FLOWCTL &&
++ pi->chan->mode != L2CAP_MODE_EXT_FLOWCTL)
+ return err;
+
+- /* Attempt to put pending rx data in the socket buffer */
+-
+ lock_sock(sk);
+
+- if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
+- goto done;
++ l2cap_publish_rx_avail(pi->chan);
+
+- if (pi->rx_busy_skb) {
+- if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+- pi->rx_busy_skb = NULL;
+- else
++ /* Attempt to put pending rx data in the socket buffer */
++ while (!list_empty(&pi->rx_busy)) {
++ struct l2cap_rx_busy *rx_busy =
++ list_first_entry(&pi->rx_busy,
++ struct l2cap_rx_busy,
++ list);
++ if (__sock_queue_rcv_skb(sk, rx_busy->skb) < 0)
+ goto done;
++ list_del(&rx_busy->list);
++ kfree(rx_busy);
+ }
+
+ /* Restore data flow when half of the receive buffer is
+ * available. This avoids resending large numbers of
+ * frames.
+ */
+- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
++ if (test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state) &&
++ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+ l2cap_chan_busy(pi->chan, 0);
+
+ done:
+@@ -1232,6 +1238,10 @@ static void l2cap_sock_kill(struct sock *sk)
+
+ BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+
++ /* Sock is dead, so set chan data to NULL, avoid other task use invalid
++ * sock pointer.
++ */
++ l2cap_pi(sk)->chan->data = NULL;
+ /* Kill poor orphan */
+
+ l2cap_chan_put(l2cap_pi(sk)->chan);
+@@ -1474,18 +1484,25 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+
+ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+- struct sock *sk = chan->data;
++ struct sock *sk;
++ struct l2cap_pinfo *pi;
+ int err;
+
+- lock_sock(sk);
++ sk = chan->data;
++ if (!sk)
++ return -ENXIO;
+
+- if (l2cap_pi(sk)->rx_busy_skb) {
++ pi = l2cap_pi(sk);
++ lock_sock(sk);
++ if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ if (chan->mode != L2CAP_MODE_ERTM &&
+- chan->mode != L2CAP_MODE_STREAMING) {
++ chan->mode != L2CAP_MODE_STREAMING &&
++ chan->mode != L2CAP_MODE_LE_FLOWCTL &&
++ chan->mode != L2CAP_MODE_EXT_FLOWCTL) {
+ /* Even if no filter is attached, we could potentially
+ * get errors from security modules, etc.
+ */
+@@ -1496,7 +1513,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+
+ err = __sock_queue_rcv_skb(sk, skb);
+
+- /* For ERTM, handle one skb that doesn't fit into the recv
++ l2cap_publish_rx_avail(chan);
++
++ /* For ERTM and LE, handle a skb that doesn't fit into the recv
+ * buffer. This is important to do because the data frames
+ * have already been acked, so the skb cannot be discarded.
+ *
+@@ -1505,8 +1524,18 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ * acked and reassembled until there is buffer space
+ * available.
+ */
+- if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
+- l2cap_pi(sk)->rx_busy_skb = skb;
++ if (err < 0 &&
++ (chan->mode == L2CAP_MODE_ERTM ||
++ chan->mode == L2CAP_MODE_LE_FLOWCTL ||
++ chan->mode == L2CAP_MODE_EXT_FLOWCTL)) {
++ struct l2cap_rx_busy *rx_busy =
++ kmalloc(sizeof(*rx_busy), GFP_KERNEL);
++ if (!rx_busy) {
++ err = -ENOMEM;
++ goto done;
++ }
++ rx_busy->skb = skb;
++ list_add_tail(&rx_busy->list, &pi->rx_busy);
+ l2cap_chan_busy(chan, 1);
+ err = 0;
+ }
+@@ -1732,6 +1761,8 @@ static const struct l2cap_ops l2cap_chan_ops = {
+
+ static void l2cap_sock_destruct(struct sock *sk)
+ {
++ struct l2cap_rx_busy *rx_busy, *next;
++
+ BT_DBG("sk %p", sk);
+
+ if (l2cap_pi(sk)->chan) {
+@@ -1739,9 +1770,10 @@ static void l2cap_sock_destruct(struct sock *sk)
+ l2cap_chan_put(l2cap_pi(sk)->chan);
+ }
+
+- if (l2cap_pi(sk)->rx_busy_skb) {
+- kfree_skb(l2cap_pi(sk)->rx_busy_skb);
+- l2cap_pi(sk)->rx_busy_skb = NULL;
++ list_for_each_entry_safe(rx_busy, next, &l2cap_pi(sk)->rx_busy, list) {
++ kfree_skb(rx_busy->skb);
++ list_del(&rx_busy->list);
++ kfree(rx_busy);
+ }
+
+ skb_queue_purge(&sk->sk_receive_queue);
+@@ -1825,6 +1857,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
+
+ chan->data = sk;
+ chan->ops = &l2cap_chan_ops;
++
++ l2cap_publish_rx_avail(chan);
+ }
+
+ static struct proto l2cap_proto = {
+@@ -1846,6 +1880,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ sk->sk_destruct = l2cap_sock_destruct;
+ sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
+
++ INIT_LIST_HEAD(&l2cap_pi(sk)->rx_busy);
++
+ chan = l2cap_chan_create();
+ if (!chan) {
+ sk_free(sk);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index ba2e00646e8e82..1f3a39c20a9114 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -443,8 +443,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
+
+ count = 0;
+ list_for_each_entry(d, &hci_dev_list, list) {
+- if (d->dev_type == HCI_PRIMARY &&
+- !hci_dev_test_flag(d, HCI_UNCONFIGURED))
++ if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ count++;
+ }
+
+@@ -468,8 +467,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ continue;
+
+- if (d->dev_type == HCI_PRIMARY &&
+- !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
++ if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
+ rp->index[count++] = cpu_to_le16(d->id);
+ bt_dev_dbg(hdev, "Added hci%u", d->id);
+ }
+@@ -503,8 +501,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
+
+ count = 0;
+ list_for_each_entry(d, &hci_dev_list, list) {
+- if (d->dev_type == HCI_PRIMARY &&
+- hci_dev_test_flag(d, HCI_UNCONFIGURED))
++ if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ count++;
+ }
+
+@@ -528,8 +525,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ continue;
+
+- if (d->dev_type == HCI_PRIMARY &&
+- hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
++ if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
+ rp->index[count++] = cpu_to_le16(d->id);
+ bt_dev_dbg(hdev, "Added hci%u", d->id);
+ }
+@@ -561,10 +557,8 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
+ read_lock(&hci_dev_list_lock);
+
+ count = 0;
+- list_for_each_entry(d, &hci_dev_list, list) {
+- if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
+- count++;
+- }
++ list_for_each_entry(d, &hci_dev_list, list)
++ count++;
+
+ rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
+ if (!rp) {
+@@ -585,16 +579,10 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ continue;
+
+- if (d->dev_type == HCI_PRIMARY) {
+- if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
+- rp->entry[count].type = 0x01;
+- else
+- rp->entry[count].type = 0x00;
+- } else if (d->dev_type == HCI_AMP) {
+- rp->entry[count].type = 0x02;
+- } else {
+- continue;
+- }
++ if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
++ rp->entry[count].type = 0x01;
++ else
++ rp->entry[count].type = 0x00;
+
+ rp->entry[count].bus = d->bus;
+ rp->entry[count++].index = cpu_to_le16(d->id);
+@@ -835,8 +823,6 @@ static u32 get_supported_settings(struct hci_dev *hdev)
+
+ if (lmp_ssp_capable(hdev)) {
+ settings |= MGMT_SETTING_SSP;
+- if (IS_ENABLED(CONFIG_BT_HS))
+- settings |= MGMT_SETTING_HS;
+ }
+
+ if (lmp_sc_capable(hdev))
+@@ -901,9 +887,6 @@ static u32 get_current_settings(struct hci_dev *hdev)
+ if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ settings |= MGMT_SETTING_SSP;
+
+- if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
+- settings |= MGMT_SETTING_HS;
+-
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ settings |= MGMT_SETTING_ADVERTISING;
+
+@@ -1045,6 +1028,8 @@ static void rpa_expired(struct work_struct *work)
+ hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
+ }
+
++static int set_discoverable_sync(struct hci_dev *hdev, void *data);
++
+ static void discov_off(struct work_struct *work)
+ {
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+@@ -1063,7 +1048,7 @@ static void discov_off(struct work_struct *work)
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+ hdev->discov_timeout = 0;
+
+- hci_update_discoverable(hdev);
++ hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
+
+ mgmt_new_settings(hdev);
+
+@@ -1407,7 +1392,7 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
+
+ /* Cancel potentially blocking sync operation before power off */
+ if (cp->val == 0x00) {
+- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
++ hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
+ err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
+ mgmt_set_powered_complete);
+ } else {
+@@ -1461,10 +1446,15 @@ static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
+
+ static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
+ {
+- if (cmd->cmd_complete) {
+- u8 *status = data;
++ struct cmd_lookup *match = data;
++
++ /* dequeue cmd_sync entries using cmd as data as that is about to be
++ * removed/freed.
++ */
++ hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
+
+- cmd->cmd_complete(cmd, *status);
++ if (cmd->cmd_complete) {
++ cmd->cmd_complete(cmd, match->mgmt_status);
+ mgmt_pending_remove(cmd);
+
+ return;
+@@ -1928,7 +1918,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+
+ if (enable && hci_dev_test_and_clear_flag(hdev,
+ HCI_SSP_ENABLED)) {
+- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
+ new_settings(hdev, NULL);
+ }
+
+@@ -1941,12 +1930,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
+ } else {
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
+-
+- if (!changed)
+- changed = hci_dev_test_and_clear_flag(hdev,
+- HCI_HS_ENABLED);
+- else
+- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
+@@ -2010,11 +1993,6 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+ } else {
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_SSP_ENABLED);
+- if (!changed)
+- changed = hci_dev_test_and_clear_flag(hdev,
+- HCI_HS_ENABLED);
+- else
+- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
+@@ -2060,63 +2038,10 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+
+ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+ {
+- struct mgmt_mode *cp = data;
+- bool changed;
+- u8 status;
+- int err;
+-
+ bt_dev_dbg(hdev, "sock %p", sk);
+
+- if (!IS_ENABLED(CONFIG_BT_HS))
+- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_NOT_SUPPORTED);
+-
+- status = mgmt_bredr_support(hdev);
+- if (status)
+- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
+-
+- if (!lmp_ssp_capable(hdev))
+- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+- MGMT_STATUS_NOT_SUPPORTED);
+-
+- if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+- MGMT_STATUS_REJECTED);
+-
+- if (cp->val != 0x00 && cp->val != 0x01)
+- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+- MGMT_STATUS_INVALID_PARAMS);
+-
+- hci_dev_lock(hdev);
+-
+- if (pending_find(MGMT_OP_SET_SSP, hdev)) {
+- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+- MGMT_STATUS_BUSY);
+- goto unlock;
+- }
+-
+- if (cp->val) {
+- changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
+- } else {
+- if (hdev_is_powered(hdev)) {
+- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+- MGMT_STATUS_REJECTED);
+- goto unlock;
+- }
+-
+- changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
+- }
+-
+- err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+- if (err < 0)
+- goto unlock;
+-
+- if (changed)
+- err = new_settings(hdev, sk);
+-
+-unlock:
+- hci_dev_unlock(hdev);
+- return err;
+ }
+
+ static void set_le_complete(struct hci_dev *hdev, void *data, int err)
+@@ -2684,7 +2609,11 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+ goto failed;
+ }
+
+- err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
++ /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
++ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
++ */
++ err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
++ mgmt_class_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto failed;
+@@ -2778,8 +2707,11 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
+ goto unlock;
+ }
+
+- err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
+- mgmt_class_complete);
++ /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
++ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
++ */
++ err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
++ mgmt_class_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
+
+@@ -2845,8 +2777,11 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
+ goto unlock;
+ }
+
+- err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
+- mgmt_class_complete);
++ /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
++ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
++ */
++ err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
++ mgmt_class_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
+
+@@ -2894,15 +2829,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
+ key_count);
+
+- for (i = 0; i < key_count; i++) {
+- struct mgmt_link_key_info *key = &cp->keys[i];
+-
+- if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
+- return mgmt_cmd_status(sk, hdev->id,
+- MGMT_OP_LOAD_LINK_KEYS,
+- MGMT_STATUS_INVALID_PARAMS);
+- }
+-
+ hci_dev_lock(hdev);
+
+ hci_link_keys_clear(hdev);
+@@ -2927,6 +2853,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ continue;
+ }
+
++ if (key->addr.type != BDADDR_BREDR) {
++ bt_dev_warn(hdev,
++ "Invalid link address type %u for %pMR",
++ key->addr.type, &key->addr.bdaddr);
++ continue;
++ }
++
++ if (key->type > 0x08) {
++ bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
++ key->type, &key->addr.bdaddr);
++ continue;
++ }
++
+ /* Always ignore debug keys and require a new pairing if
+ * the user wants to use them.
+ */
+@@ -2984,7 +2923,12 @@ static int unpair_device_sync(struct hci_dev *hdev, void *data)
+ if (!conn)
+ return 0;
+
+- return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
++ /* Disregard any possible error since the likes of hci_abort_conn_sync
++ * will clean up the connection no matter the error.
++ */
++ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
++
++ return 0;
+ }
+
+ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+@@ -3116,13 +3060,44 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ return err;
+ }
+
++static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
++{
++ struct mgmt_pending_cmd *cmd = data;
++
++ cmd->cmd_complete(cmd, mgmt_status(err));
++ mgmt_pending_free(cmd);
++}
++
++static int disconnect_sync(struct hci_dev *hdev, void *data)
++{
++ struct mgmt_pending_cmd *cmd = data;
++ struct mgmt_cp_disconnect *cp = cmd->param;
++ struct hci_conn *conn;
++
++ if (cp->addr.type == BDADDR_BREDR)
++ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
++ &cp->addr.bdaddr);
++ else
++ conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
++ le_addr_type(cp->addr.type));
++
++ if (!conn)
++ return -ENOTCONN;
++
++ /* Disregard any possible error since the likes of hci_abort_conn_sync
++ * will clean up the connection no matter the error.
++ */
++ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
++
++ return 0;
++}
++
+ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+ {
+ struct mgmt_cp_disconnect *cp = data;
+ struct mgmt_rp_disconnect rp;
+ struct mgmt_pending_cmd *cmd;
+- struct hci_conn *conn;
+ int err;
+
+ bt_dev_dbg(hdev, "sock %p", sk);
+@@ -3145,27 +3120,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+ goto failed;
+ }
+
+- if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
+- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+- MGMT_STATUS_BUSY, &rp, sizeof(rp));
+- goto failed;
+- }
+-
+- if (cp->addr.type == BDADDR_BREDR)
+- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+- &cp->addr.bdaddr);
+- else
+- conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
+- le_addr_type(cp->addr.type));
+-
+- if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
+- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+- MGMT_STATUS_NOT_CONNECTED, &rp,
+- sizeof(rp));
+- goto failed;
+- }
+-
+- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
++ cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+@@ -3173,9 +3128,10 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+
+ cmd->cmd_complete = generic_cmd_complete;
+
+- err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
++ err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
++ disconnect_complete);
+ if (err < 0)
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+
+ failed:
+ hci_dev_unlock(hdev);
+@@ -3185,6 +3141,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+ static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
+ {
+ switch (link_type) {
++ case ISO_LINK:
+ case LE_LINK:
+ switch (addr_type) {
+ case ADDR_LE_DEV_PUBLIC:
+@@ -3517,6 +3474,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ * will be kept and this function does nothing.
+ */
+ p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
++ if (!p) {
++ err = -EIO;
++ goto unlock;
++ }
+
+ if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+ p->auto_connect = HCI_AUTO_CONN_DISABLED;
+@@ -5533,8 +5494,8 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
+ goto unlock;
+ }
+
+- err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
+- mgmt_remove_adv_monitor_complete);
++ err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
++ mgmt_remove_adv_monitor_complete);
+
+ if (err) {
+ mgmt_pending_remove(cmd);
+@@ -6763,7 +6724,6 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+ hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
+ hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
+ hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
+- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
+ }
+
+ hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
+@@ -7205,15 +7165,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+
+ bt_dev_dbg(hdev, "key_count %u", key_count);
+
+- for (i = 0; i < key_count; i++) {
+- struct mgmt_ltk_info *key = &cp->keys[i];
+-
+- if (!ltk_is_valid(key))
+- return mgmt_cmd_status(sk, hdev->id,
+- MGMT_OP_LOAD_LONG_TERM_KEYS,
+- MGMT_STATUS_INVALID_PARAMS);
+- }
+-
+ hci_dev_lock(hdev);
+
+ hci_smp_ltks_clear(hdev);
+@@ -7230,6 +7181,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ continue;
+ }
+
++ if (!ltk_is_valid(key)) {
++ bt_dev_warn(hdev, "Invalid LTK for %pMR",
++ &key->addr.bdaddr);
++ continue;
++ }
++
+ switch (key->type) {
+ case MGMT_LTK_UNAUTHENTICATED:
+ authenticated = 0x00;
+@@ -8457,7 +8414,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
+
+ static u8 calculate_name_len(struct hci_dev *hdev)
+ {
+- u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
++ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
+
+ return eir_append_local_name(hdev, buf, 0);
+ }
+@@ -9371,23 +9328,14 @@ void mgmt_index_added(struct hci_dev *hdev)
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ return;
+
+- switch (hdev->dev_type) {
+- case HCI_PRIMARY:
+- if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+- mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
+- NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
+- ev.type = 0x01;
+- } else {
+- mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
+- HCI_MGMT_INDEX_EVENTS);
+- ev.type = 0x00;
+- }
+- break;
+- case HCI_AMP:
+- ev.type = 0x02;
+- break;
+- default:
+- return;
++ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
++ mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
++ HCI_MGMT_UNCONF_INDEX_EVENTS);
++ ev.type = 0x01;
++ } else {
++ mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
++ HCI_MGMT_INDEX_EVENTS);
++ ev.type = 0x00;
+ }
+
+ ev.bus = hdev->bus;
+@@ -9399,30 +9347,21 @@ void mgmt_index_added(struct hci_dev *hdev)
+ void mgmt_index_removed(struct hci_dev *hdev)
+ {
+ struct mgmt_ev_ext_index ev;
+- u8 status = MGMT_STATUS_INVALID_INDEX;
++ struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
+
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ return;
+
+- switch (hdev->dev_type) {
+- case HCI_PRIMARY:
+- mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
++ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
+
+- if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+- mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
+- NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
+- ev.type = 0x01;
+- } else {
+- mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
+- HCI_MGMT_INDEX_EVENTS);
+- ev.type = 0x00;
+- }
+- break;
+- case HCI_AMP:
+- ev.type = 0x02;
+- break;
+- default:
+- return;
++ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
++ mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
++ HCI_MGMT_UNCONF_INDEX_EVENTS);
++ ev.type = 0x01;
++ } else {
++ mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
++ HCI_MGMT_INDEX_EVENTS);
++ ev.type = 0x00;
+ }
+
+ ev.bus = hdev->bus;
+@@ -9464,7 +9403,7 @@ void mgmt_power_on(struct hci_dev *hdev, int err)
+ void __mgmt_power_off(struct hci_dev *hdev)
+ {
+ struct cmd_lookup match = { NULL, hdev };
+- u8 status, zero_cod[] = { 0, 0, 0 };
++ u8 zero_cod[] = { 0, 0, 0 };
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
+
+@@ -9476,11 +9415,11 @@ void __mgmt_power_off(struct hci_dev *hdev)
+ * status responses.
+ */
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
+- status = MGMT_STATUS_INVALID_INDEX;
++ match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
+ else
+- status = MGMT_STATUS_NOT_POWERED;
++ match.mgmt_status = MGMT_STATUS_NOT_POWERED;
+
+- mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
++ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
+
+ if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
+ mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+@@ -9668,6 +9607,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ u16 eir_len = 0;
+ u32 flags = 0;
+
++ if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
++ return;
++
+ /* allocate buff for LE or BR/EDR adv */
+ if (conn->le_adv_data_len > 0)
+ skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
+@@ -9707,18 +9649,6 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
+ mgmt_event_skb(skb, NULL);
+ }
+
+-static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
+-{
+- struct sock **sk = data;
+-
+- cmd->cmd_complete(cmd, 0);
+-
+- *sk = cmd->sk;
+- sock_hold(*sk);
+-
+- mgmt_pending_remove(cmd);
+-}
+-
+ static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
+ {
+ struct hci_dev *hdev = data;
+@@ -9753,22 +9683,12 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ struct mgmt_ev_device_disconnected ev;
+ struct sock *sk = NULL;
+
+- /* The connection is still in hci_conn_hash so test for 1
+- * instead of 0 to know if this is the last one.
+- */
+- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
+- cancel_delayed_work(&hdev->power_off);
+- queue_work(hdev->req_workqueue, &hdev->power_off.work);
+- }
+-
+ if (!mgmt_connected)
+ return;
+
+ if (link_type != ACL_LINK && link_type != LE_LINK)
+ return;
+
+- mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
+-
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.reason = reason;
+@@ -9781,9 +9701,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+
+ if (sk)
+ sock_put(sk);
+-
+- mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+- hdev);
+ }
+
+ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+@@ -9812,21 +9729,18 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ mgmt_pending_remove(cmd);
+ }
+
+-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+- u8 addr_type, u8 status)
++void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
+ {
+ struct mgmt_ev_connect_failed ev;
+
+- /* The connection is still in hci_conn_hash so test for 1
+- * instead of 0 to know if this is the last one.
+- */
+- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
+- cancel_delayed_work(&hdev->power_off);
+- queue_work(hdev->req_workqueue, &hdev->power_off.work);
++ if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
++ mgmt_device_disconnected(hdev, &conn->dst, conn->type,
++ conn->dst_type, status, true);
++ return;
+ }
+
+- bacpy(&ev.addr.bdaddr, bdaddr);
+- ev.addr.type = link_to_bdaddr(link_type, addr_type);
++ bacpy(&ev.addr.bdaddr, &conn->dst);
++ ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
+ ev.status = mgmt_status(status);
+
+ mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
+diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
+index abbafa6194ca1c..d039683d3bdd41 100644
+--- a/net/bluetooth/msft.c
++++ b/net/bluetooth/msft.c
+@@ -150,10 +150,7 @@ static bool read_supported_features(struct hci_dev *hdev,
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+- if (IS_ERR_OR_NULL(skb)) {
+- if (!skb)
+- skb = ERR_PTR(-EIO);
+-
++ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to read MSFT supported features (%ld)",
+ PTR_ERR(skb));
+ return false;
+@@ -353,7 +350,7 @@ static void msft_remove_addr_filters_sync(struct hci_dev *hdev, u8 handle)
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+- if (IS_ERR_OR_NULL(skb)) {
++ if (IS_ERR(skb)) {
+ kfree(address_filter);
+ continue;
+ }
+@@ -442,11 +439,8 @@ static int msft_remove_monitor_sync(struct hci_dev *hdev,
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+- if (IS_ERR_OR_NULL(skb)) {
+- if (!skb)
+- return -EIO;
++ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+- }
+
+ return msft_le_cancel_monitor_advertisement_cb(hdev, hdev->msft_opcode,
+ monitor, skb);
+@@ -559,7 +553,7 @@ static int msft_add_monitor_sync(struct hci_dev *hdev,
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp,
+ HCI_CMD_TIMEOUT);
+
+- if (IS_ERR_OR_NULL(skb)) {
++ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto out_free;
+ }
+@@ -740,10 +734,10 @@ static int msft_cancel_address_filter_sync(struct hci_dev *hdev, void *data)
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+- if (IS_ERR_OR_NULL(skb)) {
++ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "MSFT: Failed to cancel address (%pMR) filter",
+ &address_filter->bdaddr);
+- err = -EIO;
++ err = PTR_ERR(skb);
+ goto done;
+ }
+ kfree_skb(skb);
+@@ -775,7 +769,7 @@ void msft_register(struct hci_dev *hdev)
+ mutex_init(&msft->filter_lock);
+ }
+
+-void msft_unregister(struct hci_dev *hdev)
++void msft_release(struct hci_dev *hdev)
+ {
+ struct msft_data *msft = hdev->msft_data;
+
+@@ -881,6 +875,7 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
+ remove = true;
+ goto done;
+ }
++
+ cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
+ cp->rssi_high = address_filter->rssi_high;
+ cp->rssi_low = address_filter->rssi_low;
+@@ -893,7 +888,9 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp,
+ HCI_CMD_TIMEOUT);
+- if (IS_ERR_OR_NULL(skb)) {
++ kfree(cp);
++
++ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to enable address %pMR filter",
+ &address_filter->bdaddr);
+ skb = NULL;
+diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h
+index 2a63205b377b70..fe538e9c91c019 100644
+--- a/net/bluetooth/msft.h
++++ b/net/bluetooth/msft.h
+@@ -14,7 +14,7 @@
+
+ bool msft_monitor_supported(struct hci_dev *hdev);
+ void msft_register(struct hci_dev *hdev);
+-void msft_unregister(struct hci_dev *hdev);
++void msft_release(struct hci_dev *hdev);
+ void msft_do_open(struct hci_dev *hdev);
+ void msft_do_close(struct hci_dev *hdev);
+ void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb);
+@@ -35,7 +35,7 @@ static inline bool msft_monitor_supported(struct hci_dev *hdev)
+ }
+
+ static inline void msft_register(struct hci_dev *hdev) {}
+-static inline void msft_unregister(struct hci_dev *hdev) {}
++static inline void msft_release(struct hci_dev *hdev) {}
+ static inline void msft_do_open(struct hci_dev *hdev) {}
+ static inline void msft_do_close(struct hci_dev *hdev) {}
+ static inline void msft_vendor_evt(struct hci_dev *hdev, void *data,
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index 053ef8f25fae47..1d34d849703329 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -1941,7 +1941,7 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
+ /* Get data directly from socket receive queue without copying it. */
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ skb_orphan(skb);
+- if (!skb_linearize(skb)) {
++ if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) {
+ s = rfcomm_recv_frame(s, skb);
+ if (!s)
+ break;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index b54e8a530f55a1..cbff37b3273407 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -629,7 +629,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname,
+
+ switch (optname) {
+ case RFCOMM_LM:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
++ if (bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen)) {
+ err = -EFAULT;
+ break;
+ }
+@@ -664,7 +664,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ struct sock *sk = sock->sk;
+ struct bt_security sec;
+ int err = 0;
+- size_t len;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -686,11 +685,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ sec.level = BT_SECURITY_LOW;
+
+- len = min_t(unsigned int, sizeof(sec), optlen);
+- if (copy_from_sockptr(&sec, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (sec.level > BT_SECURITY_HIGH) {
+ err = -EINVAL;
+@@ -706,10 +703,9 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -869,9 +865,7 @@ static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
+
+ if (err == -ENOIOCTLCMD) {
+ #ifdef CONFIG_BT_RFCOMM_TTY
+- lock_sock(sk);
+ err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg);
+- release_sock(sk);
+ #else
+ err = -EOPNOTSUPP;
+ #endif
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index c736186aba26be..3c3650902c8396 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -83,6 +83,10 @@ static void sco_sock_timeout(struct work_struct *work)
+ struct sock *sk;
+
+ sco_conn_lock(conn);
++ if (!conn->hcon) {
++ sco_conn_unlock(conn);
++ return;
++ }
+ sk = conn->sk;
+ if (sk)
+ sock_hold(sk);
+@@ -122,7 +126,6 @@ static void sco_sock_clear_timer(struct sock *sk)
+ /* ---- SCO connections ---- */
+ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
+ {
+- struct hci_dev *hdev = hcon->hdev;
+ struct sco_conn *conn = hcon->sco_data;
+
+ if (conn) {
+@@ -140,9 +143,10 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
+
+ hcon->sco_data = conn;
+ conn->hcon = hcon;
++ conn->mtu = hcon->mtu;
+
+- if (hdev->sco_mtu > 0)
+- conn->mtu = hdev->sco_mtu;
++ if (hcon->mtu > 0)
++ conn->mtu = hcon->mtu;
+ else
+ conn->mtu = 60;
+
+@@ -823,7 +827,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+ {
+ struct sock *sk = sock->sk;
+- int len, err = 0;
++ int err = 0;
+ struct bt_voice voice;
+ u32 opt;
+ struct bt_codecs *codecs;
+@@ -842,10 +846,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -862,11 +865,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ voice.setting = sco_pi(sk)->setting;
+
+- len = min_t(unsigned int, sizeof(voice), optlen);
+- if (copy_from_sockptr(&voice, optval, len)) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&voice, sizeof(voice), optval,
++ optlen);
++ if (err)
+ break;
+- }
+
+ /* Explicitly check for these values */
+ if (voice.setting != BT_VOICE_TRANSPARENT &&
+@@ -889,10 +891,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+
+ case BT_PKT_STATUS:
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt)
+ set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
+@@ -933,9 +934,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(buffer, optval, optlen)) {
++ err = bt_copy_from_sockptr(buffer, optlen, optval, optlen);
++ if (err) {
+ hci_dev_put(hdev);
+- err = -EFAULT;
+ break;
+ }
+
+@@ -966,7 +967,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+ struct sock *sk = sock->sk;
+ struct sco_options opts;
+ struct sco_conninfo cinfo;
+- int len, err = 0;
++ int err = 0;
++ size_t len;
+
+ BT_DBG("sk %p", sk);
+
+@@ -988,7 +990,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+
+ BT_DBG("mtu %u", opts.mtu);
+
+- len = min_t(unsigned int, len, sizeof(opts));
++ len = min(len, sizeof(opts));
+ if (copy_to_user(optval, (char *)&opts, len))
+ err = -EFAULT;
+
+@@ -1006,7 +1008,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+ cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
+ memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
+
+- len = min_t(unsigned int, len, sizeof(cinfo));
++ len = min(len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *)&cinfo, len))
+ err = -EFAULT;
+
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index f1a9fc0012f098..56f7f041c9a604 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -915,7 +915,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
+ * Confirms and the responder Enters the passkey.
+ */
+ if (smp->method == OVERLAP) {
+- if (hcon->role == HCI_ROLE_MASTER)
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ smp->method = CFM_PASSKEY;
+ else
+ smp->method = REQ_PASSKEY;
+@@ -965,7 +965,7 @@ static u8 smp_confirm(struct smp_chan *smp)
+
+ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+
+- if (conn->hcon->out)
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+ else
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+@@ -981,7 +981,8 @@ static u8 smp_random(struct smp_chan *smp)
+ int ret;
+
+ bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn,
+- conn->hcon->out ? "initiator" : "responder");
++ test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" :
++ "responder");
+
+ ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp,
+ hcon->init_addr_type, &hcon->init_addr,
+@@ -995,7 +996,7 @@ static u8 smp_random(struct smp_chan *smp)
+ return SMP_CONFIRM_FAILED;
+ }
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ u8 stk[16];
+ __le64 rand = 0;
+ __le16 ediv = 0;
+@@ -1250,14 +1251,15 @@ static void smp_distribute_keys(struct smp_chan *smp)
+ rsp = (void *) &smp->prsp[1];
+
+ /* The responder sends its keys first */
+- if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags) &&
++ (smp->remote_key_dist & KEY_DIST_MASK)) {
+ smp_allow_key_dist(smp);
+ return;
+ }
+
+ req = (void *) &smp->preq[1];
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ keydist = &rsp->init_key_dist;
+ *keydist &= req->init_key_dist;
+ } else {
+@@ -1426,7 +1428,7 @@ static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16])
+ struct hci_conn *hcon = smp->conn->hcon;
+ u8 *na, *nb, a[7], b[7];
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ na = smp->prnd;
+ nb = smp->rrnd;
+ } else {
+@@ -1454,7 +1456,7 @@ static void sc_dhkey_check(struct smp_chan *smp)
+ a[6] = hcon->init_addr_type;
+ b[6] = hcon->resp_addr_type;
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ local_addr = a;
+ remote_addr = b;
+ memcpy(io_cap, &smp->preq[1], 3);
+@@ -1533,7 +1535,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
+ /* The round is only complete when the initiator
+ * receives pairing random.
+ */
+- if (!hcon->out) {
++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ sizeof(smp->prnd), smp->prnd);
+ if (smp->passkey_round == 20)
+@@ -1561,7 +1563,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
+
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ sizeof(smp->prnd), smp->prnd);
+ return 0;
+@@ -1572,7 +1574,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
+ case SMP_CMD_PUBLIC_KEY:
+ default:
+ /* Initiating device starts the round */
+- if (!hcon->out)
++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ return 0;
+
+ bt_dev_dbg(hdev, "Starting passkey round %u",
+@@ -1617,7 +1619,7 @@ static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey)
+ }
+
+ /* Initiator sends DHKey check first */
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ sc_dhkey_check(smp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ } else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) {
+@@ -1740,7 +1742,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+ struct l2cap_chan *chan = conn->smp;
+ struct hci_dev *hdev = conn->hcon->hdev;
+- struct smp_chan *smp;
++ struct smp_chan *smp = chan->data;
+ u8 key_size, auth, sec_level;
+ int ret;
+
+@@ -1749,16 +1751,14 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ if (skb->len < sizeof(*req))
+ return SMP_INVALID_PARAMS;
+
+- if (conn->hcon->role != HCI_ROLE_SLAVE)
++ if (smp && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ return SMP_CMD_NOTSUPP;
+
+- if (!chan->data)
++ if (!smp) {
+ smp = smp_chan_create(conn);
+- else
+- smp = chan->data;
+-
+- if (!smp)
+- return SMP_UNSPECIFIED;
++ if (!smp)
++ return SMP_UNSPECIFIED;
++ }
+
+ /* We didn't start the pairing, so match remote */
+ auth = req->auth_req & AUTH_REQ_MASK(hdev);
+@@ -1940,7 +1940,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
+ if (skb->len < sizeof(*rsp))
+ return SMP_INVALID_PARAMS;
+
+- if (conn->hcon->role != HCI_ROLE_MASTER)
++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ return SMP_CMD_NOTSUPP;
+
+ skb_pull(skb, sizeof(*rsp));
+@@ -2035,7 +2035,7 @@ static u8 sc_check_confirm(struct smp_chan *smp)
+ if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
+ return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM);
+
+- if (conn->hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+ smp->prnd);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+@@ -2057,7 +2057,7 @@ static int fixup_sc_false_positive(struct smp_chan *smp)
+ u8 auth;
+
+ /* The issue is only observed when we're in responder role */
+- if (hcon->out)
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ return SMP_UNSPECIFIED;
+
+ if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
+@@ -2093,7 +2093,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
+ struct hci_dev *hdev = hcon->hdev;
+
+ bt_dev_dbg(hdev, "conn %p %s", conn,
+- hcon->out ? "initiator" : "responder");
++ test_bit(SMP_FLAG_INITIATOR, &smp->flags) ? "initiator" :
++ "responder");
+
+ if (skb->len < sizeof(smp->pcnf))
+ return SMP_INVALID_PARAMS;
+@@ -2115,7 +2116,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
+ return ret;
+ }
+
+- if (conn->hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+ smp->prnd);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+@@ -2150,7 +2151,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ if (!test_bit(SMP_FLAG_SC, &smp->flags))
+ return smp_random(smp);
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ pkax = smp->local_pk;
+ pkbx = smp->remote_pk;
+ na = smp->prnd;
+@@ -2163,7 +2164,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ }
+
+ if (smp->method == REQ_OOB) {
+- if (!hcon->out)
++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ sizeof(smp->prnd), smp->prnd);
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+@@ -2174,7 +2175,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY)
+ return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM);
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ u8 cfm[16];
+
+ err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk,
+@@ -2215,7 +2216,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ return SMP_UNSPECIFIED;
+
+ if (smp->method == REQ_OOB) {
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ sc_dhkey_check(smp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+ }
+@@ -2289,10 +2290,27 @@ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
+ return false;
+ }
+
++static void smp_send_pairing_req(struct smp_chan *smp, __u8 auth)
++{
++ struct smp_cmd_pairing cp;
++
++ if (smp->conn->hcon->type == ACL_LINK)
++ build_bredr_pairing_cmd(smp, &cp, NULL);
++ else
++ build_pairing_cmd(smp->conn, &cp, NULL, auth);
++
++ smp->preq[0] = SMP_CMD_PAIRING_REQ;
++ memcpy(&smp->preq[1], &cp, sizeof(cp));
++
++ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
++ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
++
++ set_bit(SMP_FLAG_INITIATOR, &smp->flags);
++}
++
+ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ {
+ struct smp_cmd_security_req *rp = (void *) skb->data;
+- struct smp_cmd_pairing cp;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ struct smp_chan *smp;
+@@ -2341,16 +2359,20 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+
+ skb_pull(skb, sizeof(*rp));
+
+- memset(&cp, 0, sizeof(cp));
+- build_pairing_cmd(conn, &cp, NULL, auth);
++ smp_send_pairing_req(smp, auth);
+
+- smp->preq[0] = SMP_CMD_PAIRING_REQ;
+- memcpy(&smp->preq[1], &cp, sizeof(cp));
++ return 0;
++}
+
+- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
++static void smp_send_security_req(struct smp_chan *smp, __u8 auth)
++{
++ struct smp_cmd_security_req cp;
+
+- return 0;
++ cp.auth_req = auth;
++ smp_send_cmd(smp->conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
++ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
++
++ clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
+ }
+
+ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+@@ -2421,23 +2443,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+ authreq |= SMP_AUTH_MITM;
+ }
+
+- if (hcon->role == HCI_ROLE_MASTER) {
+- struct smp_cmd_pairing cp;
+-
+- build_pairing_cmd(conn, &cp, NULL, authreq);
+- smp->preq[0] = SMP_CMD_PAIRING_REQ;
+- memcpy(&smp->preq[1], &cp, sizeof(cp));
+-
+- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
+- } else {
+- struct smp_cmd_security_req cp;
+- cp.auth_req = authreq;
+- smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
+- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
+- }
++ if (hcon->role == HCI_ROLE_MASTER)
++ smp_send_pairing_req(smp, authreq);
++ else
++ smp_send_security_req(smp, authreq);
+
+- set_bit(SMP_FLAG_INITIATOR, &smp->flags);
+ ret = 0;
+
+ unlock:
+@@ -2688,8 +2698,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
+
+ static u8 sc_select_method(struct smp_chan *smp)
+ {
+- struct l2cap_conn *conn = smp->conn;
+- struct hci_conn *hcon = conn->hcon;
+ struct smp_cmd_pairing *local, *remote;
+ u8 local_mitm, remote_mitm, local_io, remote_io, method;
+
+@@ -2702,7 +2710,7 @@ static u8 sc_select_method(struct smp_chan *smp)
+ * the "struct smp_cmd_pairing" from them we need to skip the
+ * first byte which contains the opcode.
+ */
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ local = (void *) &smp->preq[1];
+ remote = (void *) &smp->prsp[1];
+ } else {
+@@ -2771,7 +2779,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ /* Non-initiating device sends its public key after receiving
+ * the key from the initiating device.
+ */
+- if (!hcon->out) {
++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ err = sc_send_public_key(smp);
+ if (err)
+ return err;
+@@ -2833,7 +2841,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ }
+
+ if (smp->method == REQ_OOB) {
+- if (hcon->out)
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM,
+ sizeof(smp->prnd), smp->prnd);
+
+@@ -2842,7 +2850,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ return 0;
+ }
+
+- if (hcon->out)
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+
+ if (smp->method == REQ_PASSKEY) {
+@@ -2857,7 +2865,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ /* The Initiating device waits for the non-initiating device to
+ * send the confirm value.
+ */
+- if (conn->hcon->out)
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ return 0;
+
+ err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd,
+@@ -2891,7 +2899,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
+ a[6] = hcon->init_addr_type;
+ b[6] = hcon->resp_addr_type;
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ local_addr = a;
+ remote_addr = b;
+ memcpy(io_cap, &smp->prsp[1], 3);
+@@ -2916,7 +2924,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
+ if (crypto_memneq(check->e, e, 16))
+ return SMP_DHKEY_CHECK_FAILED;
+
+- if (!hcon->out) {
++ if (!test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) {
+ set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags);
+ return 0;
+@@ -2928,7 +2936,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
+
+ sc_add_ltk(smp);
+
+- if (hcon->out) {
++ if (test_bit(SMP_FLAG_INITIATOR, &smp->flags)) {
+ hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size);
+ hcon->enc_key_size = smp->enc_key_size;
+ }
+@@ -3077,7 +3085,6 @@ static void bredr_pairing(struct l2cap_chan *chan)
+ struct l2cap_conn *conn = chan->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+- struct smp_cmd_pairing req;
+ struct smp_chan *smp;
+
+ bt_dev_dbg(hdev, "chan %p", chan);
+@@ -3129,14 +3136,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
+
+ bt_dev_dbg(hdev, "starting SMP over BR/EDR");
+
+- /* Prepare and send the BR/EDR SMP Pairing Request */
+- build_bredr_pairing_cmd(smp, &req, NULL);
+-
+- smp->preq[0] = SMP_CMD_PAIRING_REQ;
+- memcpy(&smp->preq[1], &req, sizeof(req));
+-
+- smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req);
+- SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
++ smp_send_pairing_req(smp, 0x00);
+ }
+
+ static void smp_resume_cb(struct l2cap_chan *chan)
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index 0841f8d824198d..12a2934b28ffbf 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -543,7 +543,7 @@ struct bpf_fentry_test_t {
+
+ int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
+ {
+- asm volatile ("");
++ asm volatile ("": "+r"(arg));
+ return (long)arg;
+ }
+
+@@ -707,10 +707,16 @@ static void
+ __bpf_prog_test_run_raw_tp(void *data)
+ {
+ struct bpf_raw_tp_test_run_info *info = data;
++ struct bpf_trace_run_ctx run_ctx = {};
++ struct bpf_run_ctx *old_run_ctx;
++
++ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+
+ rcu_read_lock();
+ info->retval = bpf_prog_run(info->prog, info->ctx);
+ rcu_read_unlock();
++
++ bpf_reset_run_ctx(old_run_ctx);
+ }
+
+ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+diff --git a/net/bridge/br_cfm_netlink.c b/net/bridge/br_cfm_netlink.c
+index 5c4c369f8536e9..2faab44652e7c0 100644
+--- a/net/bridge/br_cfm_netlink.c
++++ b/net/bridge/br_cfm_netlink.c
+@@ -362,7 +362,7 @@ static int br_cc_ccm_tx_parse(struct net_bridge *br, struct nlattr *attr,
+
+ memset(&tx_info, 0, sizeof(tx_info));
+
+- instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_RDI_INSTANCE]);
++ instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_CC_CCM_TX_INSTANCE]);
+ nla_memcpy(&tx_info.dmac.addr,
+ tb[IFLA_BRIDGE_CFM_CC_CCM_TX_DMAC],
+ sizeof(tx_info.dmac.addr));
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 9a5ea06236bd7c..42d4c3727bf76d 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -27,6 +27,7 @@ EXPORT_SYMBOL_GPL(nf_br_ops);
+ /* net device transmit always called with BH disabled */
+ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
++ enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN);
+ struct net_bridge_mcast_port *pmctx_null = NULL;
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_mcast *brmctx = &br->multicast_ctx;
+@@ -38,6 +39,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ const unsigned char *dest;
+ u16 vid = 0;
+
++ if (unlikely(reason != SKB_NOT_DROPPED_YET)) {
++ kfree_skb_reason(skb, reason);
++ return NETDEV_TX_OK;
++ }
++
+ memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
+ br_tc_skb_miss_set(skb, false);
+
+diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
+index e69a872bfc1d70..a6d8cd9a58078f 100644
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -1425,12 +1425,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+ modified = true;
+ }
+
+- if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
++ if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+ /* Refresh entry */
+ fdb->used = jiffies;
+- } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
+- /* Take over SW learned entry */
+- set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
++ } else {
+ modified = true;
+ }
+
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 7431f89e897b95..e19b583ff2c6d0 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -25,8 +25,8 @@ static inline int should_deliver(const struct net_bridge_port *p,
+
+ vg = nbp_vlan_group_rcu(p);
+ return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+- p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
+- nbp_switchdev_allowed_egress(p, skb) &&
++ (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) &&
++ br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) &&
+ !br_skb_isolated(p, skb);
+ }
+
+@@ -258,6 +258,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+ {
+ struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+ const unsigned char *src = eth_hdr(skb)->h_source;
++ struct sk_buff *nskb;
+
+ if (!should_deliver(p, skb))
+ return;
+@@ -266,12 +267,16 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+ if (skb->dev == p->dev && ether_addr_equal(src, addr))
+ return;
+
+- skb = skb_copy(skb, GFP_ATOMIC);
+- if (!skb) {
++ __skb_push(skb, ETH_HLEN);
++ nskb = pskb_copy(skb, GFP_ATOMIC);
++ __skb_pull(skb, ETH_HLEN);
++ if (!nskb) {
+ DEV_STATS_INC(dev, tx_dropped);
+ return;
+ }
+
++ skb = nskb;
++ __skb_pull(skb, ETH_HLEN);
+ if (!is_broadcast_ether_addr(addr))
+ memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
+
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index c729528b5e85f3..e09000e38d071d 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -30,7 +30,7 @@ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
+ return netif_receive_skb(skb);
+ }
+
+-static int br_pass_frame_up(struct sk_buff *skb)
++static int br_pass_frame_up(struct sk_buff *skb, bool promisc)
+ {
+ struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
+ struct net_bridge *br = netdev_priv(brdev);
+@@ -65,6 +65,8 @@ static int br_pass_frame_up(struct sk_buff *skb)
+ br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
+ BR_MCAST_DIR_TX);
+
++ BR_INPUT_SKB_CB(skb)->promisc = promisc;
++
+ return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+ dev_net(indev), NULL, skb, indev, NULL,
+ br_netif_receive_skb);
+@@ -82,6 +84,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ struct net_bridge_mcast *brmctx;
+ struct net_bridge_vlan *vlan;
+ struct net_bridge *br;
++ bool promisc;
+ u16 vid = 0;
+ u8 state;
+
+@@ -137,7 +140,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ if (p->flags & BR_LEARNING)
+ br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
+
+- local_rcv = !!(br->dev->flags & IFF_PROMISC);
++ promisc = !!(br->dev->flags & IFF_PROMISC);
++ local_rcv = promisc;
++
+ if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+ /* by definition the broadcast is also a multicast address */
+ if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
+@@ -200,7 +205,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ unsigned long now = jiffies;
+
+ if (test_bit(BR_FDB_LOCAL, &dst->flags))
+- return br_pass_frame_up(skb);
++ return br_pass_frame_up(skb, false);
+
+ if (now != dst->used)
+ dst->used = now;
+@@ -213,7 +218,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ }
+
+ if (local_rcv)
+- return br_pass_frame_up(skb);
++ return br_pass_frame_up(skb, promisc);
+
+ out:
+ return 0;
+@@ -386,6 +391,8 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
+ goto forward;
+ }
+
++ BR_INPUT_SKB_CB(skb)->promisc = false;
++
+ /* The else clause should be hit when nf_hook():
+ * - returns < 0 (drop/error)
+ * - returns = 0 (stolen/nf_queue)
+diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
+index ee680adcee1796..1820f09ff59ceb 100644
+--- a/net/bridge/br_mst.c
++++ b/net/bridge/br_mst.c
+@@ -73,12 +73,11 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
+ }
+ EXPORT_SYMBOL_GPL(br_mst_get_state);
+
+-static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
++static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
++ struct net_bridge_vlan *v,
+ u8 state)
+ {
+- struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
+-
+- if (v->state == state)
++ if (br_vlan_get_state(v) == state)
+ return;
+
+ br_vlan_set_state(v, state);
+@@ -100,11 +99,12 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ };
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_vlan *v;
+- int err;
++ int err = 0;
+
+- vg = nbp_vlan_group(p);
++ rcu_read_lock();
++ vg = nbp_vlan_group_rcu(p);
+ if (!vg)
+- return 0;
++ goto out;
+
+ /* MSTI 0 (CST) state changes are notified via the regular
+ * SWITCHDEV_ATTR_ID_PORT_STP_STATE.
+@@ -112,17 +112,20 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ if (msti) {
+ err = switchdev_port_attr_set(p->dev, &attr, extack);
+ if (err && err != -EOPNOTSUPP)
+- return err;
++ goto out;
+ }
+
+- list_for_each_entry(v, &vg->vlan_list, vlist) {
++ err = 0;
++ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
+ if (v->brvlan->msti != msti)
+ continue;
+
+- br_mst_vlan_set_state(p, v, state);
++ br_mst_vlan_set_state(vg, v, state);
+ }
+
+- return 0;
++out:
++ rcu_read_unlock();
++ return err;
+ }
+
+ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
+@@ -136,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
+ * it.
+ */
+ if (v != pv && v->brvlan->msti == msti) {
+- br_mst_vlan_set_state(pv->port, pv, v->state);
++ br_mst_vlan_set_state(vg, pv, v->state);
+ return;
+ }
+ }
+
+ /* Otherwise, start out in a new MSTI with all ports disabled. */
+- return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
++ return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED);
+ }
+
+ int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 96d1fc78dd3964..c38244d60ff86e 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1761,6 +1761,10 @@ static void br_ip6_multicast_querier_expired(struct timer_list *t)
+ }
+ #endif
+
++static void br_multicast_query_delay_expired(struct timer_list *t)
++{
++}
++
+ static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
+ struct br_ip *ip,
+ struct sk_buff *skb)
+@@ -2040,16 +2044,14 @@ void br_multicast_del_port(struct net_bridge_port *port)
+ {
+ struct net_bridge *br = port->br;
+ struct net_bridge_port_group *pg;
+- HLIST_HEAD(deleted_head);
+ struct hlist_node *n;
+
+ /* Take care of the remaining groups, only perm ones should be left */
+ spin_lock_bh(&br->multicast_lock);
+ hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
+ br_multicast_find_del_pg(br, pg);
+- hlist_move_list(&br->mcast_gc_list, &deleted_head);
+ spin_unlock_bh(&br->multicast_lock);
+- br_multicast_gc(&deleted_head);
++ flush_work(&br->mcast_gc_work);
+ br_multicast_port_ctx_deinit(&port->multicast_ctx);
+ free_percpu(port->mcast_stats);
+ }
+@@ -3197,7 +3199,7 @@ br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
+ unsigned long max_delay)
+ {
+ if (!timer_pending(&query->timer))
+- query->delay_time = jiffies + max_delay;
++ mod_timer(&query->delay_timer, jiffies + max_delay);
+
+ mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
+ }
+@@ -4040,13 +4042,11 @@ void br_multicast_ctx_init(struct net_bridge *br,
+ brmctx->multicast_querier_interval = 255 * HZ;
+ brmctx->multicast_membership_interval = 260 * HZ;
+
+- brmctx->ip4_other_query.delay_time = 0;
+ brmctx->ip4_querier.port_ifidx = 0;
+ seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
+ brmctx->multicast_igmp_version = 2;
+ #if IS_ENABLED(CONFIG_IPV6)
+ brmctx->multicast_mld_version = 1;
+- brmctx->ip6_other_query.delay_time = 0;
+ brmctx->ip6_querier.port_ifidx = 0;
+ seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
+ #endif
+@@ -4055,6 +4055,8 @@ void br_multicast_ctx_init(struct net_bridge *br,
+ br_ip4_multicast_local_router_expired, 0);
+ timer_setup(&brmctx->ip4_other_query.timer,
+ br_ip4_multicast_querier_expired, 0);
++ timer_setup(&brmctx->ip4_other_query.delay_timer,
++ br_multicast_query_delay_expired, 0);
+ timer_setup(&brmctx->ip4_own_query.timer,
+ br_ip4_multicast_query_expired, 0);
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -4062,6 +4064,8 @@ void br_multicast_ctx_init(struct net_bridge *br,
+ br_ip6_multicast_local_router_expired, 0);
+ timer_setup(&brmctx->ip6_other_query.timer,
+ br_ip6_multicast_querier_expired, 0);
++ timer_setup(&brmctx->ip6_other_query.delay_timer,
++ br_multicast_query_delay_expired, 0);
+ timer_setup(&brmctx->ip6_own_query.timer,
+ br_ip6_multicast_query_expired, 0);
+ #endif
+@@ -4196,10 +4200,12 @@ static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
+ {
+ del_timer_sync(&brmctx->ip4_mc_router_timer);
+ del_timer_sync(&brmctx->ip4_other_query.timer);
++ del_timer_sync(&brmctx->ip4_other_query.delay_timer);
+ del_timer_sync(&brmctx->ip4_own_query.timer);
+ #if IS_ENABLED(CONFIG_IPV6)
+ del_timer_sync(&brmctx->ip6_mc_router_timer);
+ del_timer_sync(&brmctx->ip6_other_query.timer);
++ del_timer_sync(&brmctx->ip6_other_query.delay_timer);
+ del_timer_sync(&brmctx->ip6_own_query.timer);
+ #endif
+ }
+@@ -4642,13 +4648,15 @@ int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
+ max_delay = brmctx->multicast_query_response_interval;
+
+ if (!timer_pending(&brmctx->ip4_other_query.timer))
+- brmctx->ip4_other_query.delay_time = jiffies + max_delay;
++ mod_timer(&brmctx->ip4_other_query.delay_timer,
++ jiffies + max_delay);
+
+ br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
+
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (!timer_pending(&brmctx->ip6_other_query.timer))
+- brmctx->ip6_other_query.delay_time = jiffies + max_delay;
++ mod_timer(&brmctx->ip6_other_query.delay_timer,
++ jiffies + max_delay);
+
+ br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
+ #endif
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 033034d68f1f05..a1cfa75bbadb97 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -33,6 +33,7 @@
+ #include <net/ip.h>
+ #include <net/ipv6.h>
+ #include <net/addrconf.h>
++#include <net/dst_metadata.h>
+ #include <net/route.h>
+ #include <net/netfilter/br_netfilter.h>
+ #include <net/netns/generic.h>
+@@ -43,6 +44,10 @@
+ #include <linux/sysctl.h>
+ #endif
+
++#if IS_ENABLED(CONFIG_NF_CONNTRACK)
++#include <net/netfilter/nf_conntrack_core.h>
++#endif
++
+ static unsigned int brnf_net_id __read_mostly;
+
+ struct brnf_net {
+@@ -279,8 +284,17 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
+
+ if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
+ READ_ONCE(neigh->hh.hh_len)) {
++ struct net_device *br_indev;
++
++ br_indev = nf_bridge_get_physindev(skb, net);
++ if (!br_indev) {
++ neigh_release(neigh);
++ goto free_skb;
++ }
++
+ neigh_hh_bridge(&neigh->hh, skb);
+- skb->dev = nf_bridge->physindev;
++ skb->dev = br_indev;
++
+ ret = br_handle_frame_finish(net, sk, skb);
+ } else {
+ /* the neighbour function below overwrites the complete
+@@ -352,12 +366,18 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
+ */
+ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+- struct net_device *dev = skb->dev;
++ struct net_device *dev = skb->dev, *br_indev;
+ struct iphdr *iph = ip_hdr(skb);
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ struct rtable *rt;
+ int err;
+
++ br_indev = nf_bridge_get_physindev(skb, net);
++ if (!br_indev) {
++ kfree_skb(skb);
++ return 0;
++ }
++
+ nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
+
+ if (nf_bridge->pkt_otherhost) {
+@@ -397,7 +417,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
+ } else {
+ if (skb_dst(skb)->dev == dev) {
+ bridged_dnat:
+- skb->dev = nf_bridge->physindev;
++ skb->dev = br_indev;
+ nf_bridge_update_protocol(skb);
+ nf_bridge_push_encap_header(skb);
+ br_nf_hook_thresh(NF_BR_PRE_ROUTING,
+@@ -410,7 +430,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
+ skb->pkt_type = PACKET_HOST;
+ }
+ } else {
+- rt = bridge_parent_rtable(nf_bridge->physindev);
++ rt = bridge_parent_rtable(br_indev);
+ if (!rt) {
+ kfree_skb(skb);
+ return 0;
+@@ -419,7 +439,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
+ skb_dst_set_noref(skb, &rt->dst);
+ }
+
+- skb->dev = nf_bridge->physindev;
++ skb->dev = br_indev;
+ nf_bridge_update_protocol(skb);
+ nf_bridge_push_encap_header(skb);
+ br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
+@@ -456,7 +476,7 @@ struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
+ }
+
+ nf_bridge->in_prerouting = 1;
+- nf_bridge->physindev = skb->dev;
++ nf_bridge->physinif = skb->dev->ifindex;
+ skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+@@ -538,6 +558,100 @@ static unsigned int br_nf_pre_routing(void *priv,
+ return NF_STOLEN;
+ }
+
++#if IS_ENABLED(CONFIG_NF_CONNTRACK)
++/* conntracks' nf_confirm logic cannot handle cloned skbs referencing
++ * the same nf_conn entry, which will happen for multicast (broadcast)
++ * Frames on bridges.
++ *
++ * Example:
++ * macvlan0
++ * br0
++ * ethX ethY
++ *
++ * ethX (or Y) receives multicast or broadcast packet containing
++ * an IP packet, not yet in conntrack table.
++ *
++ * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting.
++ * -> skb->_nfct now references a unconfirmed entry
++ * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge
++ * interface.
++ * 3. skb gets passed up the stack.
++ * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb
++ * and schedules a work queue to send them out on the lower devices.
++ *
++ * The clone skb->_nfct is not a copy, it is the same entry as the
++ * original skb. The macvlan rx handler then returns RX_HANDLER_PASS.
++ * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb.
++ *
++ * The Macvlan broadcast worker and normal confirm path will race.
++ *
++ * This race will not happen if step 2 already confirmed a clone. In that
++ * case later steps perform skb_clone() with skb->_nfct already confirmed (in
++ * hash table). This works fine.
++ *
++ * But such confirmation won't happen when eb/ip/nftables rules dropped the
++ * packets before they reached the nf_confirm step in postrouting.
++ *
++ * Work around this problem by explicit confirmation of the entry at
++ * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed
++ * entry.
++ *
++ */
++static unsigned int br_nf_local_in(void *priv,
++ struct sk_buff *skb,
++ const struct nf_hook_state *state)
++{
++ bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
++ struct nf_conntrack *nfct = skb_nfct(skb);
++ const struct nf_ct_hook *ct_hook;
++ struct nf_conn *ct;
++ int ret;
++
++ if (promisc) {
++ nf_reset_ct(skb);
++ return NF_ACCEPT;
++ }
++
++ if (!nfct || skb->pkt_type == PACKET_HOST)
++ return NF_ACCEPT;
++
++ ct = container_of(nfct, struct nf_conn, ct_general);
++ if (likely(nf_ct_is_confirmed(ct)))
++ return NF_ACCEPT;
++
++ if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) {
++ nf_reset_ct(skb);
++ return NF_ACCEPT;
++ }
++
++ WARN_ON_ONCE(skb_shared(skb));
++
++ /* We can't call nf_confirm here, it would create a dependency
++ * on nf_conntrack module.
++ */
++ ct_hook = rcu_dereference(nf_ct_hook);
++ if (!ct_hook) {
++ skb->_nfct = 0ul;
++ nf_conntrack_put(nfct);
++ return NF_ACCEPT;
++ }
++
++ nf_bridge_pull_encap_header(skb);
++ ret = ct_hook->confirm(skb);
++ switch (ret & NF_VERDICT_MASK) {
++ case NF_STOLEN:
++ return NF_STOLEN;
++ default:
++ nf_bridge_push_encap_header(skb);
++ break;
++ }
++
++ ct = container_of(nfct, struct nf_conn, ct_general);
++ WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
++
++ return ret;
++}
++#endif
+
+ /* PF_BRIDGE/FORWARD *************************************************/
+ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+@@ -553,7 +667,11 @@ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff
+ if (skb->protocol == htons(ETH_P_IPV6))
+ nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
+
+- in = nf_bridge->physindev;
++ in = nf_bridge_get_physindev(skb, net);
++ if (!in) {
++ kfree_skb(skb);
++ return 0;
++ }
+ if (nf_bridge->pkt_otherhost) {
+ skb->pkt_type = PACKET_OTHERHOST;
+ nf_bridge->pkt_otherhost = false;
+@@ -754,6 +872,10 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
+ return br_dev_queue_push_xmit(net, sk, skb);
+ }
+
++ /* Fragmentation on metadata/template dst is not supported */
++ if (unlikely(!skb_valid_dst(skb)))
++ goto drop;
++
+ /* This is wrong! We should preserve the original fragment
+ * boundaries by preserving frag_list rather than refragmenting.
+ */
+@@ -897,6 +1019,13 @@ static unsigned int ip_sabotage_in(void *priv,
+ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+ {
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
++ struct net_device *br_indev;
++
++ br_indev = nf_bridge_get_physindev(skb, dev_net(skb->dev));
++ if (!br_indev) {
++ kfree_skb(skb);
++ return;
++ }
+
+ skb_pull(skb, ETH_HLEN);
+ nf_bridge->bridged_dnat = 0;
+@@ -906,7 +1035,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+ skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
+ nf_bridge->neigh_header,
+ ETH_HLEN - ETH_ALEN);
+- skb->dev = nf_bridge->physindev;
++ skb->dev = br_indev;
+
+ nf_bridge->physoutdev = NULL;
+ br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
+@@ -936,6 +1065,14 @@ static const struct nf_hook_ops br_nf_ops[] = {
+ .hooknum = NF_BR_PRE_ROUTING,
+ .priority = NF_BR_PRI_BRNF,
+ },
++#if IS_ENABLED(CONFIG_NF_CONNTRACK)
++ {
++ .hook = br_nf_local_in,
++ .pf = NFPROTO_BRIDGE,
++ .hooknum = NF_BR_LOCAL_IN,
++ .priority = NF_BR_PRI_LAST,
++ },
++#endif
+ {
+ .hook = br_nf_forward_ip,
+ .pf = NFPROTO_BRIDGE,
+diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
+index 550039dfc31a9c..ad268bd19d5b0c 100644
+--- a/net/bridge/br_netfilter_ipv6.c
++++ b/net/bridge/br_netfilter_ipv6.c
+@@ -102,9 +102,15 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ {
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ struct rtable *rt;
+- struct net_device *dev = skb->dev;
++ struct net_device *dev = skb->dev, *br_indev;
+ const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+
++ br_indev = nf_bridge_get_physindev(skb, net);
++ if (!br_indev) {
++ kfree_skb(skb);
++ return 0;
++ }
++
+ nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
+
+ if (nf_bridge->pkt_otherhost) {
+@@ -122,7 +128,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ }
+
+ if (skb_dst(skb)->dev == dev) {
+- skb->dev = nf_bridge->physindev;
++ skb->dev = br_indev;
+ nf_bridge_update_protocol(skb);
+ nf_bridge_push_encap_header(skb);
+ br_nf_hook_thresh(NF_BR_PRE_ROUTING,
+@@ -133,7 +139,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
+ skb->pkt_type = PACKET_HOST;
+ } else {
+- rt = bridge_parent_rtable(nf_bridge->physindev);
++ rt = bridge_parent_rtable(br_indev);
+ if (!rt) {
+ kfree_skb(skb);
+ return 0;
+@@ -142,7 +148,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
+ skb_dst_set_noref(skb, &rt->dst);
+ }
+
+- skb->dev = nf_bridge->physindev;
++ skb->dev = br_indev;
+ nf_bridge_update_protocol(skb);
+ nf_bridge_push_encap_header(skb);
+ br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb,
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index 10f0d33d8ccf2e..4b80ec5ae57003 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -455,7 +455,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
+ u32 filter_mask, const struct net_device *dev,
+ bool getlink)
+ {
+- u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
++ u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) :
++ IF_OPER_DOWN;
+ struct nlattr *af = NULL;
+ struct net_bridge *br;
+ struct ifinfomsg *hdr;
+@@ -666,7 +667,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br,
+ {
+ u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
+
+- return br_info_notify(event, br, port, filter);
++ br_info_notify(event, br, port, filter);
+ }
+
+ /*
+@@ -1904,7 +1905,10 @@ int __init br_netlink_init(void)
+ {
+ int err;
+
+- br_vlan_rtnl_init();
++ err = br_vlan_rtnl_init();
++ if (err)
++ goto out;
++
+ rtnl_af_register(&br_af_ops);
+
+ err = rtnl_link_register(&br_link_ops);
+@@ -1915,6 +1919,7 @@ int __init br_netlink_init(void)
+
+ out_af:
+ rtnl_af_unregister(&br_af_ops);
++out:
+ return err;
+ }
+
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index a1f4acfa699499..72d80fd943a8a2 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -78,7 +78,7 @@ struct bridge_mcast_own_query {
+ /* other querier */
+ struct bridge_mcast_other_query {
+ struct timer_list timer;
+- unsigned long delay_time;
++ struct timer_list delay_timer;
+ };
+
+ /* selected querier */
+@@ -583,6 +583,7 @@ struct br_input_skb_cb {
+ #endif
+ u8 proxyarp_replied:1;
+ u8 src_port_isolated:1;
++ u8 promisc:1;
+ #ifdef CONFIG_BRIDGE_VLAN_FILTERING
+ u8 vlan_filtered:1;
+ #endif
+@@ -1149,7 +1150,7 @@ __br_multicast_querier_exists(struct net_bridge_mcast *brmctx,
+ own_querier_enabled = false;
+ }
+
+- return time_is_before_jiffies(querier->delay_time) &&
++ return !timer_pending(&querier->delay_timer) &&
+ (own_querier_enabled || timer_pending(&querier->timer));
+ }
+
+@@ -1546,7 +1547,7 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
+ void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
+ int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
+ void *ptr);
+-void br_vlan_rtnl_init(void);
++int br_vlan_rtnl_init(void);
+ void br_vlan_rtnl_uninit(void);
+ void br_vlan_notify(const struct net_bridge *br,
+ const struct net_bridge_port *p,
+@@ -1777,8 +1778,9 @@ static inline int br_vlan_bridge_event(struct net_device *dev,
+ return 0;
+ }
+
+-static inline void br_vlan_rtnl_init(void)
++static inline int br_vlan_rtnl_init(void)
+ {
++ return 0;
+ }
+
+ static inline void br_vlan_rtnl_uninit(void)
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index ee84e783e1dff5..7b41ee8740cbba 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -595,21 +595,40 @@ br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
+ }
+
+ static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
++ struct net_device *dev,
++ unsigned long action,
+ enum switchdev_obj_id id,
+ const struct net_bridge_mdb_entry *mp,
+ struct net_device *orig_dev)
+ {
+- struct switchdev_obj_port_mdb *mdb;
++ struct switchdev_obj_port_mdb mdb = {
++ .obj = {
++ .id = id,
++ .orig_dev = orig_dev,
++ },
++ };
++ struct switchdev_obj_port_mdb *pmdb;
+
+- mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
+- if (!mdb)
+- return -ENOMEM;
++ br_switchdev_mdb_populate(&mdb, mp);
++
++ if (action == SWITCHDEV_PORT_OBJ_ADD &&
++ switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) {
++ /* This event is already in the deferred queue of
++ * events, so this replay must be elided, lest the
++ * driver receives duplicate events for it. This can
++ * only happen when replaying additions, since
++ * modifications are always immediately visible in
++ * br->mdb_list, whereas actual event delivery may be
++ * delayed.
++ */
++ return 0;
++ }
+
+- mdb->obj.id = id;
+- mdb->obj.orig_dev = orig_dev;
+- br_switchdev_mdb_populate(mdb, mp);
+- list_add_tail(&mdb->obj.list, mdb_list);
++ pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC);
++ if (!pmdb)
++ return -ENOMEM;
+
++ list_add_tail(&pmdb->obj.list, mdb_list);
+ return 0;
+ }
+
+@@ -677,51 +696,50 @@ br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ return 0;
+
+- /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
+- * because the write-side protection is br->multicast_lock. But we
+- * need to emulate the [ blocking ] calling context of a regular
+- * switchdev event, so since both br->multicast_lock and RCU read side
+- * critical sections are atomic, we have no choice but to pick the RCU
+- * read side lock, queue up all our events, leave the critical section
+- * and notify switchdev from blocking context.
++ if (adding)
++ action = SWITCHDEV_PORT_OBJ_ADD;
++ else
++ action = SWITCHDEV_PORT_OBJ_DEL;
++
++ /* br_switchdev_mdb_queue_one() will take care to not queue a
++ * replay of an event that is already pending in the switchdev
++ * deferred queue. In order to safely determine that, there
++ * must be no new deferred MDB notifications enqueued for the
++ * duration of the MDB scan. Therefore, grab the write-side
++ * lock to avoid racing with any concurrent IGMP/MLD snooping.
+ */
+- rcu_read_lock();
++ spin_lock_bh(&br->multicast_lock);
+
+- hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
++ hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
+ struct net_bridge_port_group __rcu * const *pp;
+ const struct net_bridge_port_group *p;
+
+ if (mp->host_joined) {
+- err = br_switchdev_mdb_queue_one(&mdb_list,
++ err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
+ SWITCHDEV_OBJ_ID_HOST_MDB,
+ mp, br_dev);
+ if (err) {
+- rcu_read_unlock();
++ spin_unlock_bh(&br->multicast_lock);
+ goto out_free_mdb;
+ }
+ }
+
+- for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
++ for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
+ pp = &p->next) {
+ if (p->key.port->dev != dev)
+ continue;
+
+- err = br_switchdev_mdb_queue_one(&mdb_list,
++ err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
+ SWITCHDEV_OBJ_ID_PORT_MDB,
+ mp, dev);
+ if (err) {
+- rcu_read_unlock();
++ spin_unlock_bh(&br->multicast_lock);
+ goto out_free_mdb;
+ }
+ }
+ }
+
+- rcu_read_unlock();
+-
+- if (adding)
+- action = SWITCHDEV_PORT_OBJ_ADD;
+- else
+- action = SWITCHDEV_PORT_OBJ_DEL;
++ spin_unlock_bh(&br->multicast_lock);
+
+ list_for_each_entry(obj, &mdb_list, list) {
+ err = br_switchdev_mdb_replay_one(nb, dev,
+@@ -786,6 +804,16 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
+ br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
+
+ br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
++
++ /* Make sure that the device leaving this bridge has seen all
++ * relevant events before it is disassociated. In the normal
++ * case, when the device is directly attached to the bridge,
++ * this is covered by del_nbp(). If the association was indirect
++ * however, e.g. via a team or bond, and the device is leaving
++ * that intermediate device, then the bridge port remains in
++ * place.
++ */
++ switchdev_deferred_process();
+ }
+
+ /* Let the bridge know that this port is offloaded, so that it can assign a
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 15f44d026e75a8..be714b4d7b4307 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -2296,19 +2296,18 @@ static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
+ return err;
+ }
+
+-void br_vlan_rtnl_init(void)
++static const struct rtnl_msg_handler br_vlan_rtnl_msg_handlers[] = {
++ {THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN, br_vlan_rtm_process, NULL, 0},
++ {THIS_MODULE, PF_BRIDGE, RTM_DELVLAN, br_vlan_rtm_process, NULL, 0},
++ {THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL, br_vlan_rtm_dump, 0},
++};
++
++int br_vlan_rtnl_init(void)
+ {
+- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
+- br_vlan_rtm_dump, 0);
+- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
+- br_vlan_rtm_process, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
+- br_vlan_rtm_process, NULL, 0);
++ return rtnl_register_many(br_vlan_rtnl_msg_handlers);
+ }
+
+ void br_vlan_rtnl_uninit(void)
+ {
+- rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
+- rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
+- rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
++ rtnl_unregister_many(br_vlan_rtnl_msg_handlers);
+ }
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index aa23479b20b2ae..ed62c1026fe93e 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1111,6 +1111,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ struct ebt_table_info *newinfo;
+ struct ebt_replace tmp;
+
++ if (len < sizeof(tmp))
++ return -EINVAL;
+ if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+@@ -1423,6 +1425,8 @@ static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
+ {
+ struct ebt_replace hlp;
+
++ if (len < sizeof(hlp))
++ return -EINVAL;
+ if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
+ return -EFAULT;
+
+@@ -2352,6 +2356,8 @@ static int compat_update_counters(struct net *net, sockptr_t arg,
+ {
+ struct compat_ebt_replace hlp;
+
++ if (len < sizeof(hlp))
++ return -EINVAL;
+ if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
+ return -EFAULT;
+
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index 71056ee847736b..6ef04f9fe481be 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -37,7 +37,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
+ ktime_t tstamp = skb->tstamp;
+ struct ip_frag_state state;
+ struct iphdr *iph;
+- int err;
++ int err = 0;
+
+ /* for offloaded checksums cleanup checksum before fragmentation */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+@@ -291,6 +291,36 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
+ return nf_conntrack_in(skb, &bridge_state);
+ }
+
++static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
++ const struct nf_hook_state *state)
++{
++ bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
++ struct nf_conntrack *nfct = skb_nfct(skb);
++ struct nf_conn *ct;
++
++ if (promisc) {
++ nf_reset_ct(skb);
++ return NF_ACCEPT;
++ }
++
++ if (!nfct || skb->pkt_type == PACKET_HOST)
++ return NF_ACCEPT;
++
++ /* nf_conntrack_confirm() cannot handle concurrent clones,
++ * this happens for broad/multicast frames with e.g. macvlan on top
++ * of the bridge device.
++ */
++ ct = container_of(nfct, struct nf_conn, ct_general);
++ if (nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
++ return NF_ACCEPT;
++
++ /* let inet prerouting call conntrack again */
++ skb->_nfct = 0;
++ nf_ct_put(ct);
++
++ return NF_ACCEPT;
++}
++
+ static void nf_ct_bridge_frag_save(struct sk_buff *skb,
+ struct nf_bridge_frag_data *data)
+ {
+@@ -385,6 +415,12 @@ static struct nf_hook_ops nf_ct_bridge_hook_ops[] __read_mostly = {
+ .hooknum = NF_BR_PRE_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK,
+ },
++ {
++ .hook = nf_ct_bridge_in,
++ .pf = NFPROTO_BRIDGE,
++ .hooknum = NF_BR_LOCAL_IN,
++ .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
++ },
+ {
+ .hook = nf_ct_bridge_post,
+ .pf = NFPROTO_BRIDGE,
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 9168114fc87f7b..a1f5db0fd5d4fd 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1428,6 +1428,12 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+
+ /* remove device reference, if this is our bound device */
+ if (bo->bound && bo->ifindex == dev->ifindex) {
++#if IS_ENABLED(CONFIG_PROC_FS)
++ if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
++ remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
++ bo->bcm_proc_read = NULL;
++ }
++#endif
+ bo->bound = 0;
+ bo->ifindex = 0;
+ notify_enodev = 1;
+diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
+index 16af1a7f80f60e..31a93cae5111b5 100644
+--- a/net/can/j1939/j1939-priv.h
++++ b/net/can/j1939/j1939-priv.h
+@@ -86,7 +86,7 @@ struct j1939_priv {
+ unsigned int tp_max_packet_size;
+
+ /* lock for j1939_socks list */
+- spinlock_t j1939_socks_lock;
++ rwlock_t j1939_socks_lock;
+ struct list_head j1939_socks;
+
+ struct kref rx_kref;
+@@ -301,6 +301,7 @@ struct j1939_sock {
+
+ int ifindex;
+ struct j1939_addr addr;
++ spinlock_t filters_lock;
+ struct j1939_filter *filters;
+ int nfilters;
+ pgn_t pgn_rx_filter;
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index ecff1c947d683b..7e8a20f2fc42b5 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -30,10 +30,6 @@ MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
+ /* CAN_HDR: #bytes before can_frame data part */
+ #define J1939_CAN_HDR (offsetof(struct can_frame, data))
+
+-/* CAN_FTR: #bytes beyond data part */
+-#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
+- sizeof(((struct can_frame *)0)->data))
+-
+ /* lowest layer */
+ static void j1939_can_recv(struct sk_buff *iskb, void *data)
+ {
+@@ -274,7 +270,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
+ return ERR_PTR(-ENOMEM);
+
+ j1939_tp_init(priv);
+- spin_lock_init(&priv->j1939_socks_lock);
++ rwlock_init(&priv->j1939_socks_lock);
+ INIT_LIST_HEAD(&priv->j1939_socks);
+
+ mutex_lock(&j1939_netdev_lock);
+@@ -342,7 +338,7 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
+ memset(cf, 0, J1939_CAN_HDR);
+
+ /* make it a full can frame again */
+- skb_put(skb, J1939_CAN_FTR + (8 - dlc));
++ skb_put_zero(skb, 8 - dlc);
+
+ canid = CAN_EFF_FLAG |
+ (skcb->priority << 26) |
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index b28c976f52a0a1..1f49d6164ea1da 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -80,16 +80,16 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
+ jsk->state |= J1939_SOCK_BOUND;
+ j1939_priv_get(priv);
+
+- spin_lock_bh(&priv->j1939_socks_lock);
++ write_lock_bh(&priv->j1939_socks_lock);
+ list_add_tail(&jsk->list, &priv->j1939_socks);
+- spin_unlock_bh(&priv->j1939_socks_lock);
++ write_unlock_bh(&priv->j1939_socks_lock);
+ }
+
+ static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
+ {
+- spin_lock_bh(&priv->j1939_socks_lock);
++ write_lock_bh(&priv->j1939_socks_lock);
+ list_del_init(&jsk->list);
+- spin_unlock_bh(&priv->j1939_socks_lock);
++ write_unlock_bh(&priv->j1939_socks_lock);
+
+ j1939_priv_put(priv);
+ jsk->state &= ~J1939_SOCK_BOUND;
+@@ -262,12 +262,17 @@ static bool j1939_sk_match_dst(struct j1939_sock *jsk,
+ static bool j1939_sk_match_filter(struct j1939_sock *jsk,
+ const struct j1939_sk_buff_cb *skcb)
+ {
+- const struct j1939_filter *f = jsk->filters;
+- int nfilter = jsk->nfilters;
++ const struct j1939_filter *f;
++ int nfilter;
++
++ spin_lock_bh(&jsk->filters_lock);
++
++ f = jsk->filters;
++ nfilter = jsk->nfilters;
+
+ if (!nfilter)
+ /* receive all when no filters are assigned */
+- return true;
++ goto filter_match_found;
+
+ for (; nfilter; ++f, --nfilter) {
+ if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
+@@ -276,9 +281,15 @@ static bool j1939_sk_match_filter(struct j1939_sock *jsk,
+ continue;
+ if ((skcb->addr.src_name & f->name_mask) != f->name)
+ continue;
+- return true;
++ goto filter_match_found;
+ }
++
++ spin_unlock_bh(&jsk->filters_lock);
+ return false;
++
++filter_match_found:
++ spin_unlock_bh(&jsk->filters_lock);
++ return true;
+ }
+
+ static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
+@@ -329,13 +340,13 @@ bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
+ struct j1939_sock *jsk;
+ bool match = false;
+
+- spin_lock_bh(&priv->j1939_socks_lock);
++ read_lock_bh(&priv->j1939_socks_lock);
+ list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ match = j1939_sk_recv_match_one(jsk, skcb);
+ if (match)
+ break;
+ }
+- spin_unlock_bh(&priv->j1939_socks_lock);
++ read_unlock_bh(&priv->j1939_socks_lock);
+
+ return match;
+ }
+@@ -344,11 +355,11 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
+ {
+ struct j1939_sock *jsk;
+
+- spin_lock_bh(&priv->j1939_socks_lock);
++ read_lock_bh(&priv->j1939_socks_lock);
+ list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ j1939_sk_recv_one(jsk, skb);
+ }
+- spin_unlock_bh(&priv->j1939_socks_lock);
++ read_unlock_bh(&priv->j1939_socks_lock);
+ }
+
+ static void j1939_sk_sock_destruct(struct sock *sk)
+@@ -401,6 +412,7 @@ static int j1939_sk_init(struct sock *sk)
+ atomic_set(&jsk->skb_pending, 0);
+ spin_lock_init(&jsk->sk_session_queue_lock);
+ INIT_LIST_HEAD(&jsk->sk_session_queue);
++ spin_lock_init(&jsk->filters_lock);
+
+ /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
+ sock_set_flag(sk, SOCK_RCU_FREE);
+@@ -703,9 +715,11 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
+ }
+
+ lock_sock(&jsk->sk);
++ spin_lock_bh(&jsk->filters_lock);
+ ofilters = jsk->filters;
+ jsk->filters = filters;
+ jsk->nfilters = count;
++ spin_unlock_bh(&jsk->filters_lock);
+ release_sock(&jsk->sk);
+ kfree(ofilters);
+ return 0;
+@@ -1080,12 +1094,12 @@ void j1939_sk_errqueue(struct j1939_session *session,
+ }
+
+ /* spread RX notifications to all sockets subscribed to this session */
+- spin_lock_bh(&priv->j1939_socks_lock);
++ read_lock_bh(&priv->j1939_socks_lock);
+ list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ if (j1939_sk_recv_match_one(jsk, &session->skcb))
+ __j1939_sk_errqueue(session, &jsk->sk, type);
+ }
+- spin_unlock_bh(&priv->j1939_socks_lock);
++ read_unlock_bh(&priv->j1939_socks_lock);
+ };
+
+ void j1939_sk_send_loop_abort(struct sock *sk, int err)
+@@ -1273,7 +1287,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
+ struct j1939_sock *jsk;
+ int error_code = ENETDOWN;
+
+- spin_lock_bh(&priv->j1939_socks_lock);
++ read_lock_bh(&priv->j1939_socks_lock);
+ list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ jsk->sk.sk_err = error_code;
+ if (!sock_flag(&jsk->sk, SOCK_DEAD))
+@@ -1281,7 +1295,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
+
+ j1939_sk_queue_drop_all(priv, jsk, error_code);
+ }
+- spin_unlock_bh(&priv->j1939_socks_lock);
++ read_unlock_bh(&priv->j1939_socks_lock);
+ }
+
+ static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index fe3df23a259578..319f47df33300c 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1179,10 +1179,10 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
+ break;
+ case -ENETDOWN:
+ /* In this case we should get a netdev_event(), all active
+- * sessions will be cleared by
+- * j1939_cancel_all_active_sessions(). So handle this as an
+- * error, but let j1939_cancel_all_active_sessions() do the
+- * cleanup including propagation of the error to user space.
++ * sessions will be cleared by j1939_cancel_active_session().
++ * So handle this as an error, but let
++ * j1939_cancel_active_session() do the cleanup including
++ * propagation of the error to user space.
+ */
+ break;
+ case -EOVERFLOW:
+@@ -1593,8 +1593,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
+ struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
+ struct j1939_session *session;
+ const u8 *dat;
++ int len, ret;
+ pgn_t pgn;
+- int len;
+
+ netdev_dbg(priv->ndev, "%s\n", __func__);
+
+@@ -1653,7 +1653,22 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
+ session->tskey = priv->rx_tskey++;
+ j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_RTS);
+
+- WARN_ON_ONCE(j1939_session_activate(session));
++ ret = j1939_session_activate(session);
++ if (ret) {
++ /* Entering this scope indicates an issue with the J1939 bus.
++ * Possible scenarios include:
++ * - A time lapse occurred, and a new session was initiated
++ * due to another packet being sent correctly. This could
++ * have been caused by too long interrupt, debugger, or being
++ * out-scheduled by another task.
++ * - The bus is receiving numerous erroneous packets, either
++ * from a malfunctioning device or during a test scenario.
++ */
++ netdev_alert(priv->ndev, "%s: 0x%p: concurrent session with same addr (%02x %02x) is already active.\n",
++ __func__, session, skcb.addr.sa, skcb.addr.da);
++ j1939_session_put(session);
++ return NULL;
++ }
+
+ return session;
+ }
+@@ -1681,6 +1696,8 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
+
+ j1939_session_timers_cancel(session);
+ j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
++ if (session->transmission)
++ j1939_session_deactivate_activate_next(session);
+
+ return -EBUSY;
+ }
+diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
+index f9a50d7f0d2046..0cb61c76b9b87d 100644
+--- a/net/ceph/messenger_v1.c
++++ b/net/ceph/messenger_v1.c
+@@ -160,8 +160,9 @@ static size_t sizeof_footer(struct ceph_connection *con)
+ static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
+ {
+ /* Initialize data cursor if it's not a sparse read */
+- if (!msg->sparse_read)
+- ceph_msg_data_cursor_init(&msg->cursor, msg, data_len);
++ u64 len = msg->sparse_read_total ? : data_len;
++
++ ceph_msg_data_cursor_init(&msg->cursor, msg, len);
+ }
+
+ /*
+@@ -991,7 +992,7 @@ static inline int read_partial_message_section(struct ceph_connection *con,
+ return read_partial_message_chunk(con, section, sec_len, crc);
+ }
+
+-static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
++static int read_partial_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
+ {
+ struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
+ bool do_bounce = ceph_test_opt(from_msgr(con->msgr), RXBOUNCE);
+@@ -1026,7 +1027,7 @@ static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
+ return 1;
+ }
+
+-static int read_sparse_msg_data(struct ceph_connection *con)
++static int read_partial_sparse_msg_data(struct ceph_connection *con)
+ {
+ struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
+ bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
+@@ -1036,31 +1037,31 @@ static int read_sparse_msg_data(struct ceph_connection *con)
+ if (do_datacrc)
+ crc = con->in_data_crc;
+
+- do {
++ while (cursor->total_resid) {
+ if (con->v1.in_sr_kvec.iov_base)
+ ret = read_partial_message_chunk(con,
+ &con->v1.in_sr_kvec,
+ con->v1.in_sr_len,
+ &crc);
+ else if (cursor->sr_resid > 0)
+- ret = read_sparse_msg_extent(con, &crc);
+-
+- if (ret <= 0) {
+- if (do_datacrc)
+- con->in_data_crc = crc;
+- return ret;
+- }
++ ret = read_partial_sparse_msg_extent(con, &crc);
++ if (ret <= 0)
++ break;
+
+ memset(&con->v1.in_sr_kvec, 0, sizeof(con->v1.in_sr_kvec));
+ ret = con->ops->sparse_read(con, cursor,
+ (char **)&con->v1.in_sr_kvec.iov_base);
++ if (ret <= 0) {
++ ret = ret ? ret : 1; /* must return > 0 to indicate success */
++ break;
++ }
+ con->v1.in_sr_len = ret;
+- } while (ret > 0);
++ }
+
+ if (do_datacrc)
+ con->in_data_crc = crc;
+
+- return ret < 0 ? ret : 1; /* must return > 0 to indicate success */
++ return ret;
+ }
+
+ static int read_partial_msg_data(struct ceph_connection *con)
+@@ -1253,8 +1254,8 @@ static int read_partial_message(struct ceph_connection *con)
+ if (!m->num_data_items)
+ return -EIO;
+
+- if (m->sparse_read)
+- ret = read_sparse_msg_data(con);
++ if (m->sparse_read_total)
++ ret = read_partial_sparse_msg_data(con);
+ else if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE))
+ ret = read_partial_msg_data_bounce(con);
+ else
+diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
+index d09a39ff2cf041..f9ed6bf6c4776a 100644
+--- a/net/ceph/messenger_v2.c
++++ b/net/ceph/messenger_v2.c
+@@ -1132,7 +1132,7 @@ static int decrypt_tail(struct ceph_connection *con)
+ struct sg_table enc_sgt = {};
+ struct sg_table sgt = {};
+ struct page **pages = NULL;
+- bool sparse = con->in_msg->sparse_read;
++ bool sparse = !!con->in_msg->sparse_read_total;
+ int dpos = 0;
+ int tail_len;
+ int ret;
+@@ -2038,6 +2038,9 @@ static int prepare_sparse_read_data(struct ceph_connection *con)
+ if (!con_secure(con))
+ con->in_data_crc = -1;
+
++ ceph_msg_data_cursor_init(&con->v2.in_cursor, msg,
++ msg->sparse_read_total);
++
+ reset_in_kvecs(con);
+ con->v2.in_state = IN_S_PREPARE_SPARSE_DATA_CONT;
+ con->v2.data_len_remain = data_len(msg);
+@@ -2064,7 +2067,7 @@ static int prepare_read_tail_plain(struct ceph_connection *con)
+ }
+
+ if (data_len(msg)) {
+- if (msg->sparse_read)
++ if (msg->sparse_read_total)
+ con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
+ else
+ con->v2.in_state = IN_S_PREPARE_READ_DATA;
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index faabad6603db29..68f9552931776f 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -1085,13 +1085,19 @@ static void delayed_work(struct work_struct *work)
+ struct ceph_mon_client *monc =
+ container_of(work, struct ceph_mon_client, delayed_work.work);
+
+- dout("monc delayed_work\n");
+ mutex_lock(&monc->mutex);
++ dout("%s mon%d\n", __func__, monc->cur_mon);
++ if (monc->cur_mon < 0) {
++ goto out;
++ }
++
+ if (monc->hunting) {
+ dout("%s continuing hunt\n", __func__);
+ reopen_session(monc);
+ } else {
+ int is_auth = ceph_auth_is_authenticated(monc->auth);
++
++ dout("%s is_authed %d\n", __func__, is_auth);
+ if (ceph_con_keepalive_expired(&monc->con,
+ CEPH_MONC_PING_TIMEOUT)) {
+ dout("monc keepalive timeout\n");
+@@ -1116,6 +1122,8 @@ static void delayed_work(struct work_struct *work)
+ }
+ }
+ __schedule_delayed(monc);
++
++out:
+ mutex_unlock(&monc->mutex);
+ }
+
+@@ -1232,13 +1240,15 @@ EXPORT_SYMBOL(ceph_monc_init);
+ void ceph_monc_stop(struct ceph_mon_client *monc)
+ {
+ dout("stop\n");
+- cancel_delayed_work_sync(&monc->delayed_work);
+
+ mutex_lock(&monc->mutex);
+ __close_session(monc);
++ monc->hunting = false;
+ monc->cur_mon = -1;
+ mutex_unlock(&monc->mutex);
+
++ cancel_delayed_work_sync(&monc->delayed_work);
++
+ /*
+ * flush msgr queue before we destroy ourselves to ensure that:
+ * - any work that references our embedded con is finished.
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index d3a759e052c81f..3babcd5e65e16d 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -5510,7 +5510,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
+ }
+
+ m = ceph_msg_get(req->r_reply);
+- m->sparse_read = (bool)srlen;
++ m->sparse_read_total = srlen;
+
+ dout("get_reply tid %lld %p\n", tid, m);
+
+@@ -5777,11 +5777,8 @@ static int prep_next_sparse_read(struct ceph_connection *con,
+ }
+
+ if (o->o_sparse_op_idx < 0) {
+- u64 srlen = sparse_data_requested(req);
+-
+- dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n",
+- __func__, o->o_osd, srlen);
+- ceph_msg_data_cursor_init(cursor, con->in_msg, srlen);
++ dout("%s: [%d] starting new sparse read req\n",
++ __func__, o->o_osd);
+ } else {
+ u64 end;
+
+@@ -5859,8 +5856,8 @@ static int osd_sparse_read(struct ceph_connection *con,
+ struct ceph_osd *o = con->private;
+ struct ceph_sparse_read *sr = &o->o_sparse_read;
+ u32 count = sr->sr_count;
+- u64 eoff, elen;
+- int ret;
++ u64 eoff, elen, len = 0;
++ int i, ret;
+
+ switch (sr->sr_state) {
+ case CEPH_SPARSE_READ_HDR:
+@@ -5912,8 +5909,20 @@ static int osd_sparse_read(struct ceph_connection *con,
+ convert_extent_map(sr);
+ ret = sizeof(sr->sr_datalen);
+ *pbuf = (char *)&sr->sr_datalen;
+- sr->sr_state = CEPH_SPARSE_READ_DATA;
++ sr->sr_state = CEPH_SPARSE_READ_DATA_PRE;
+ break;
++ case CEPH_SPARSE_READ_DATA_PRE:
++ /* Convert sr_datalen to host-endian */
++ sr->sr_datalen = le32_to_cpu((__force __le32)sr->sr_datalen);
++ for (i = 0; i < count; i++)
++ len += sr->sr_extent[i].len;
++ if (sr->sr_datalen != len) {
++ pr_warn_ratelimited("data len %u != extent len %llu\n",
++ sr->sr_datalen, len);
++ return -EREMOTEIO;
++ }
++ sr->sr_state = CEPH_SPARSE_READ_DATA;
++ fallthrough;
+ case CEPH_SPARSE_READ_DATA:
+ if (sr->sr_index >= count) {
+ sr->sr_state = CEPH_SPARSE_READ_HDR;
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 176eb58347461b..ef4e9e423d393b 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -434,15 +434,23 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+
+ end = start + skb_frag_size(frag);
+ if ((copy = end - offset) > 0) {
+- struct page *page = skb_frag_page(frag);
+- u8 *vaddr = kmap(page);
++ u32 p_off, p_len, copied;
++ struct page *p;
++ u8 *vaddr;
+
+ if (copy > len)
+ copy = len;
+- n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+- vaddr + skb_frag_off(frag) + offset - start,
+- copy, data, to);
+- kunmap(page);
++
++ n = 0;
++ skb_frag_foreach_page(frag,
++ skb_frag_off(frag) + offset - start,
++ copy, p, p_off, p_len, copied) {
++ vaddr = kmap_local_page(p);
++ n += INDIRECT_CALL_1(cb, simple_copy_to_iter,
++ vaddr + p_off, p_len, data, to);
++ kunmap_local(vaddr);
++ }
++
+ offset += n;
+ if (n != copy)
+ goto short_copy;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 9f3f8930c69147..70f757707f1a2f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2290,7 +2290,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+ rcu_read_lock();
+ again:
+ list_for_each_entry_rcu(ptype, ptype_list, list) {
+- if (ptype->ignore_outgoing)
++ if (READ_ONCE(ptype->ignore_outgoing))
+ continue;
+
+ /* Never send packets back to the socket
+@@ -3500,6 +3500,9 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ if (gso_segs > READ_ONCE(dev->gso_max_segs))
+ return features & ~NETIF_F_GSO_MASK;
+
++ if (unlikely(skb->len >= netif_get_gso_max_size(dev, skb)))
++ return features & ~NETIF_F_GSO_MASK;
++
+ if (!skb_shinfo(skb)->gso_type) {
+ skb_warn_bad_offload(skb);
+ return features & ~NETIF_F_GSO_MASK;
+@@ -3743,7 +3746,7 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
+ sizeof(_tcphdr), &_tcphdr);
+ if (likely(th))
+ hdr_len += __tcp_hdrlen(th);
+- } else {
++ } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+ struct udphdr _udphdr;
+
+ if (skb_header_pointer(skb, hdr_len,
+@@ -3751,10 +3754,14 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
+ hdr_len += sizeof(struct udphdr);
+ }
+
+- if (shinfo->gso_type & SKB_GSO_DODGY)
+- gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+- shinfo->gso_size);
++ if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) {
++ int payload = skb->len - hdr_len;
+
++ /* Malicious packet. */
++ if (payload <= 0)
++ return;
++ gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size);
++ }
+ qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
+ }
+ }
+@@ -6686,6 +6693,8 @@ static int napi_threaded_poll(void *data)
+ void *have;
+
+ while (!napi_thread_wait(napi)) {
++ unsigned long last_qs = jiffies;
++
+ for (;;) {
+ bool repoll = false;
+
+@@ -6710,6 +6719,7 @@ static int napi_threaded_poll(void *data)
+ if (!repoll)
+ break;
+
++ rcu_softirq_qs_periodic(last_qs);
+ cond_resched();
+ }
+ }
+@@ -10050,6 +10060,54 @@ void netif_tx_stop_all_queues(struct net_device *dev)
+ }
+ EXPORT_SYMBOL(netif_tx_stop_all_queues);
+
++static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
++{
++ void __percpu *v;
++
++ /* Drivers implementing ndo_get_peer_dev must support tstat
++ * accounting, so that skb_do_redirect() can bump the dev's
++ * RX stats upon network namespace switch.
++ */
++ if (dev->netdev_ops->ndo_get_peer_dev &&
++ dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
++ return -EOPNOTSUPP;
++
++ switch (dev->pcpu_stat_type) {
++ case NETDEV_PCPU_STAT_NONE:
++ return 0;
++ case NETDEV_PCPU_STAT_LSTATS:
++ v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
++ break;
++ case NETDEV_PCPU_STAT_TSTATS:
++ v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
++ break;
++ case NETDEV_PCPU_STAT_DSTATS:
++ v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return v ? 0 : -ENOMEM;
++}
++
++static void netdev_do_free_pcpu_stats(struct net_device *dev)
++{
++ switch (dev->pcpu_stat_type) {
++ case NETDEV_PCPU_STAT_NONE:
++ return;
++ case NETDEV_PCPU_STAT_LSTATS:
++ free_percpu(dev->lstats);
++ break;
++ case NETDEV_PCPU_STAT_TSTATS:
++ free_percpu(dev->tstats);
++ break;
++ case NETDEV_PCPU_STAT_DSTATS:
++ free_percpu(dev->dstats);
++ break;
++ }
++}
++
+ /**
+ * register_netdevice() - register a network device
+ * @dev: device to register
+@@ -10110,9 +10168,13 @@ int register_netdevice(struct net_device *dev)
+ goto err_uninit;
+ }
+
++ ret = netdev_do_alloc_pcpu_stats(dev);
++ if (ret)
++ goto err_uninit;
++
+ ret = dev_index_reserve(net, dev->ifindex);
+ if (ret < 0)
+- goto err_uninit;
++ goto err_free_pcpu;
+ dev->ifindex = ret;
+
+ /* Transfer changeable features to wanted_features and enable
+@@ -10218,6 +10280,8 @@ int register_netdevice(struct net_device *dev)
+ call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
+ err_ifindex_release:
+ dev_index_release(net, dev->ifindex);
++err_free_pcpu:
++ netdev_do_free_pcpu_stats(dev);
+ err_uninit:
+ if (dev->netdev_ops->ndo_uninit)
+ dev->netdev_ops->ndo_uninit(dev);
+@@ -10370,8 +10434,9 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
+ rebroadcast_time = jiffies;
+ }
+
++ rcu_barrier();
++
+ if (!wait) {
+- rcu_barrier();
+ wait = WAIT_REFS_MIN_MSECS;
+ } else {
+ msleep(wait);
+@@ -10470,6 +10535,7 @@ void netdev_run_todo(void)
+ WARN_ON(rcu_access_pointer(dev->ip_ptr));
+ WARN_ON(rcu_access_pointer(dev->ip6_ptr));
+
++ netdev_do_free_pcpu_stats(dev);
+ if (dev->priv_destructor)
+ dev->priv_destructor(dev);
+ if (dev->needs_free_netdev)
+@@ -11433,6 +11499,7 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
+
+ static void __net_exit default_device_exit_net(struct net *net)
+ {
++ struct netdev_name_node *name_node, *tmp;
+ struct net_device *dev, *aux;
+ /*
+ * Push all migratable network devices back to the
+@@ -11455,6 +11522,14 @@ static void __net_exit default_device_exit_net(struct net *net)
+ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+ if (netdev_name_in_use(&init_net, fb_name))
+ snprintf(fb_name, IFNAMSIZ, "dev%%d");
++
++ netdev_for_each_altname_safe(dev, name_node, tmp)
++ if (netdev_name_in_use(&init_net, name_node->name)) {
++ netdev_name_node_del(name_node);
++ synchronize_rcu();
++ __netdev_name_node_alt_destroy(name_node);
++ }
++
+ err = dev_change_net_namespace(dev, &init_net, fb_name);
+ if (err) {
+ pr_emerg("%s: failed to move %s to init_net: %d\n",
+diff --git a/net/core/dev.h b/net/core/dev.h
+index fa2e9c5c412242..f2037d402144f4 100644
+--- a/net/core/dev.h
++++ b/net/core/dev.h
+@@ -64,6 +64,9 @@ int dev_change_name(struct net_device *dev, const char *newname);
+
+ #define netdev_for_each_altname(dev, namenode) \
+ list_for_each_entry((namenode), &(dev)->name_node->list, list)
++#define netdev_for_each_altname_safe(dev, namenode, next) \
++ list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
++ list)
+
+ int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index aff31cd944c29d..58843a52bad0e7 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -74,7 +74,7 @@ struct net_dm_hw_entries {
+ };
+
+ struct per_cpu_dm_data {
+- spinlock_t lock; /* Protects 'skb', 'hw_entries' and
++ raw_spinlock_t lock; /* Protects 'skb', 'hw_entries' and
+ * 'send_timer'
+ */
+ union {
+@@ -168,9 +168,9 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ err:
+ mod_timer(&data->send_timer, jiffies + HZ / 10);
+ out:
+- spin_lock_irqsave(&data->lock, flags);
++ raw_spin_lock_irqsave(&data->lock, flags);
+ swap(data->skb, skb);
+- spin_unlock_irqrestore(&data->lock, flags);
++ raw_spin_unlock_irqrestore(&data->lock, flags);
+
+ if (skb) {
+ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+@@ -183,7 +183,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ }
+
+ static const struct genl_multicast_group dropmon_mcgrps[] = {
+- { .name = "events", },
++ { .name = "events", .cap_sys_admin = 1 },
+ };
+
+ static void send_dm_alert(struct work_struct *work)
+@@ -225,7 +225,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+
+ local_irq_save(flags);
+ data = this_cpu_ptr(&dm_cpu_data);
+- spin_lock(&data->lock);
++ raw_spin_lock(&data->lock);
+ dskb = data->skb;
+
+ if (!dskb)
+@@ -259,7 +259,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ }
+
+ out:
+- spin_unlock_irqrestore(&data->lock, flags);
++ raw_spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
+@@ -314,9 +314,9 @@ net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
+ mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
+ }
+
+- spin_lock_irqsave(&hw_data->lock, flags);
++ raw_spin_lock_irqsave(&hw_data->lock, flags);
+ swap(hw_data->hw_entries, hw_entries);
+- spin_unlock_irqrestore(&hw_data->lock, flags);
++ raw_spin_unlock_irqrestore(&hw_data->lock, flags);
+
+ return hw_entries;
+ }
+@@ -448,7 +448,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
+ return;
+
+ hw_data = this_cpu_ptr(&dm_hw_cpu_data);
+- spin_lock_irqsave(&hw_data->lock, flags);
++ raw_spin_lock_irqsave(&hw_data->lock, flags);
+ hw_entries = hw_data->hw_entries;
+
+ if (!hw_entries)
+@@ -477,7 +477,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
+ }
+
+ out:
+- spin_unlock_irqrestore(&hw_data->lock, flags);
++ raw_spin_unlock_irqrestore(&hw_data->lock, flags);
+ }
+
+ static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
+@@ -1619,11 +1619,13 @@ static const struct genl_small_ops dropmon_ops[] = {
+ .cmd = NET_DM_CMD_START,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = net_dm_cmd_trace,
++ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NET_DM_CMD_STOP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = net_dm_cmd_trace,
++ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NET_DM_CMD_CONFIG_GET,
+@@ -1671,7 +1673,7 @@ static struct notifier_block dropmon_net_notifier = {
+
+ static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
+ {
+- spin_lock_init(&data->lock);
++ raw_spin_lock_init(&data->lock);
+ skb_queue_head_init(&data->drop_queue);
+ u64_stats_init(&data->stats.syncp);
+ }
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 980e2fd2f013b3..137b8d1c72203a 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -109,9 +109,6 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
+ child = xdst->child;
+ }
+ #endif
+- if (!(dst->flags & DST_NOCOUNT))
+- dst_entries_add(dst->ops, -1);
+-
+ if (dst->ops->destroy)
+ dst->ops->destroy(dst);
+ netdev_put(dst->dev, &dst->dev_tracker);
+@@ -161,17 +158,27 @@ void dst_dev_put(struct dst_entry *dst)
+ }
+ EXPORT_SYMBOL(dst_dev_put);
+
++static void dst_count_dec(struct dst_entry *dst)
++{
++ if (!(dst->flags & DST_NOCOUNT))
++ dst_entries_add(dst->ops, -1);
++}
++
+ void dst_release(struct dst_entry *dst)
+ {
+- if (dst && rcuref_put(&dst->__rcuref))
++ if (dst && rcuref_put(&dst->__rcuref)) {
++ dst_count_dec(dst);
+ call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu);
++ }
+ }
+ EXPORT_SYMBOL(dst_release);
+
+ void dst_release_immediate(struct dst_entry *dst)
+ {
+- if (dst && rcuref_put(&dst->__rcuref))
++ if (dst && rcuref_put(&dst->__rcuref)) {
++ dst_count_dec(dst);
+ dst_destroy(dst);
++ }
+ }
+ EXPORT_SYMBOL(dst_release_immediate);
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index a094694899c99b..8bfd46a070c167 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -81,6 +81,8 @@
+ #include <net/xdp.h>
+ #include <net/mptcp.h>
+ #include <net/netfilter/nf_conntrack_bpf.h>
++#include <linux/un.h>
++#include <net/xdp_sock_drv.h>
+
+ static const struct bpf_func_proto *
+ bpf_sk_base_func_proto(enum bpf_func_id func_id);
+@@ -1655,6 +1657,11 @@ static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
+ static inline int __bpf_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+ {
++#ifdef CONFIG_DEBUG_NET
++ /* Avoid a splat in pskb_may_pull_reason() */
++ if (write_len > INT_MAX)
++ return -EINVAL;
++#endif
+ return skb_ensure_writable(skb, write_len);
+ }
+
+@@ -2264,12 +2271,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
+
+ err = bpf_out_neigh_v6(net, skb, dev, nh);
+ if (unlikely(net_xmit_eval(err)))
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out_xmit;
+ out_drop:
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ out_xmit:
+ return ret;
+@@ -2371,12 +2378,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
+
+ err = bpf_out_neigh_v4(net, skb, dev, nh);
+ if (unlikely(net_xmit_eval(err)))
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out_xmit;
+ out_drop:
+- dev->stats.tx_errors++;
++ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ out_xmit:
+ return ret;
+@@ -2489,6 +2496,7 @@ int skb_do_redirect(struct sk_buff *skb)
+ net_eq(net, dev_net(dev))))
+ goto out_drop;
+ skb->dev = dev;
++ dev_sw_netstats_rx_add(dev, skb->len);
+ return -EAGAIN;
+ }
+ return flags & BPF_F_NEIGH ?
+@@ -2590,6 +2598,22 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
+ return 0;
+ }
+
++static void sk_msg_reset_curr(struct sk_msg *msg)
++{
++ u32 i = msg->sg.start;
++ u32 len = 0;
++
++ do {
++ len += sk_msg_elem(msg, i)->length;
++ sk_msg_iter_var_next(i);
++ if (len >= msg->sg.size)
++ break;
++ } while (i != msg->sg.end);
++
++ msg->sg.curr = i;
++ msg->sg.copybreak = 0;
++}
++
+ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
+ .func = bpf_msg_cork_bytes,
+ .gpl_only = false,
+@@ -2709,6 +2733,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
+ msg->sg.end - shift + NR_MSG_FRAG_IDS :
+ msg->sg.end - shift;
+ out:
++ sk_msg_reset_curr(msg);
+ msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
+ msg->data_end = msg->data + bytes;
+ return 0;
+@@ -2845,6 +2870,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
+ msg->sg.data[new] = rsge;
+ }
+
++ sk_msg_reset_curr(msg);
+ sk_msg_compute_data_pointers(msg);
+ return 0;
+ }
+@@ -3013,6 +3039,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+
+ sk_mem_uncharge(msg->sk, len - pop);
+ msg->sg.size -= (len - pop);
++ sk_msg_reset_curr(msg);
+ sk_msg_compute_data_pointers(msg);
+ return 0;
+ }
+@@ -3503,13 +3530,20 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+- /* Due to header grow, MSS needs to be downgraded. */
+- if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
+- skb_decrease_gso_size(shinfo, len_diff);
+-
+ /* Header must be checked, and gso_segs recomputed. */
+ shinfo->gso_type |= gso_type;
+ shinfo->gso_segs = 0;
++
++ /* Due to header growth, MSS needs to be downgraded.
++ * There is a BUG_ON() when segmenting the frag_list with
++ * head_frag true, so linearize the skb after downgrading
++ * the MSS.
++ */
++ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
++ skb_decrease_gso_size(shinfo, len_diff);
++ if (shinfo->frag_list)
++ return skb_linearize(skb);
++ }
+ }
+
+ return 0;
+@@ -4059,10 +4093,46 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset);
+ skb_frag_size_add(frag, offset);
+ sinfo->xdp_frags_size += offset;
++ if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
++ xsk_buff_get_tail(xdp)->data_end += offset;
+
+ return 0;
+ }
+
++static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
++ struct xdp_mem_info *mem_info, bool release)
++{
++ struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
++
++ if (release) {
++ xsk_buff_del_tail(zc_frag);
++ __xdp_return(NULL, mem_info, false, zc_frag);
++ } else {
++ zc_frag->data_end -= shrink;
++ }
++}
++
++static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
++ int shrink)
++{
++ struct xdp_mem_info *mem_info = &xdp->rxq->mem;
++ bool release = skb_frag_size(frag) == shrink;
++
++ if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) {
++ bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
++ goto out;
++ }
++
++ if (release) {
++ struct page *page = skb_frag_page(frag);
++
++ __xdp_return(page_address(page), mem_info, false, NULL);
++ }
++
++out:
++ return release;
++}
++
+ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
+ {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+@@ -4077,12 +4147,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
+
+ len_free += shrink;
+ offset -= shrink;
+-
+- if (skb_frag_size(frag) == shrink) {
+- struct page *page = skb_frag_page(frag);
+-
+- __xdp_return(page_address(page), &xdp->rxq->mem,
+- false, NULL);
++ if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
+ n_frags_free++;
+ } else {
+ skb_frag_size_sub(frag, shrink);
+@@ -4281,10 +4346,12 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+ enum bpf_map_type map_type = ri->map_type;
+ void *fwd = ri->tgt_value;
+ u32 map_id = ri->map_id;
++ u32 flags = ri->flags;
+ struct bpf_map *map;
+ int err;
+
+ ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
++ ri->flags = 0;
+ ri->map_type = BPF_MAP_TYPE_UNSPEC;
+
+ if (unlikely(!xdpf)) {
+@@ -4296,11 +4363,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+ case BPF_MAP_TYPE_DEVMAP:
+ fallthrough;
+ case BPF_MAP_TYPE_DEVMAP_HASH:
+- map = READ_ONCE(ri->map);
+- if (unlikely(map)) {
++ if (unlikely(flags & BPF_F_BROADCAST)) {
++ map = READ_ONCE(ri->map);
++
++ /* The map pointer is cleared when the map is being torn
++ * down by bpf_clear_redirect_map()
++ */
++ if (unlikely(!map)) {
++ err = -ENOENT;
++ break;
++ }
++
+ WRITE_ONCE(ri->map, NULL);
+ err = dev_map_enqueue_multi(xdpf, dev, map,
+- ri->flags & BPF_F_EXCLUDE_INGRESS);
++ flags & BPF_F_EXCLUDE_INGRESS);
+ } else {
+ err = dev_map_enqueue(fwd, xdpf, dev);
+ }
+@@ -4363,9 +4439,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
+ static int xdp_do_generic_redirect_map(struct net_device *dev,
+ struct sk_buff *skb,
+ struct xdp_buff *xdp,
+- struct bpf_prog *xdp_prog,
+- void *fwd,
+- enum bpf_map_type map_type, u32 map_id)
++ struct bpf_prog *xdp_prog, void *fwd,
++ enum bpf_map_type map_type, u32 map_id,
++ u32 flags)
+ {
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_map *map;
+@@ -4375,11 +4451,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
+ case BPF_MAP_TYPE_DEVMAP:
+ fallthrough;
+ case BPF_MAP_TYPE_DEVMAP_HASH:
+- map = READ_ONCE(ri->map);
+- if (unlikely(map)) {
++ if (unlikely(flags & BPF_F_BROADCAST)) {
++ map = READ_ONCE(ri->map);
++
++ /* The map pointer is cleared when the map is being torn
++ * down by bpf_clear_redirect_map()
++ */
++ if (unlikely(!map)) {
++ err = -ENOENT;
++ break;
++ }
++
+ WRITE_ONCE(ri->map, NULL);
+ err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
+- ri->flags & BPF_F_EXCLUDE_INGRESS);
++ flags & BPF_F_EXCLUDE_INGRESS);
+ } else {
+ err = dev_map_generic_redirect(fwd, skb, xdp_prog);
+ }
+@@ -4416,9 +4501,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ enum bpf_map_type map_type = ri->map_type;
+ void *fwd = ri->tgt_value;
+ u32 map_id = ri->map_id;
++ u32 flags = ri->flags;
+ int err;
+
+ ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
++ ri->flags = 0;
+ ri->map_type = BPF_MAP_TYPE_UNSPEC;
+
+ if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
+@@ -4438,7 +4525,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ return 0;
+ }
+
+- return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
++ return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags);
+ err:
+ _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
+ return err;
+@@ -5850,6 +5937,9 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ params->rt_metric = res.fi->fib_priority;
+ params->ifindex = dev->ifindex;
+
++ if (flags & BPF_FIB_LOOKUP_SRC)
++ params->ipv4_src = fib_result_prefsrc(net, &res);
++
+ /* xdp and cls_bpf programs are run in RCU-bh so
+ * rcu_read_lock_bh is not needed here
+ */
+@@ -5992,6 +6082,18 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ params->rt_metric = res.f6i->fib6_metric;
+ params->ifindex = dev->ifindex;
+
++ if (flags & BPF_FIB_LOOKUP_SRC) {
++ if (res.f6i->fib6_prefsrc.plen) {
++ *src = res.f6i->fib6_prefsrc.addr;
++ } else {
++ err = ipv6_bpf_stub->ipv6_dev_get_saddr(net, dev,
++ &fl6.daddr, 0,
++ src);
++ if (err)
++ return BPF_FIB_LKUP_RET_NO_SRC_ADDR;
++ }
++ }
++
+ if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
+ goto set_fwd_params;
+
+@@ -6010,7 +6112,8 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ #endif
+
+ #define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \
+- BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID)
++ BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID | \
++ BPF_FIB_LOOKUP_SRC)
+
+ BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
+ struct bpf_fib_lookup *, params, int, plen, u32, flags)
+@@ -6119,20 +6222,25 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
+ int ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
+ struct net_device *dev = skb->dev;
+ int skb_len, dev_len;
+- int mtu;
++ int mtu = 0;
+
+- if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
+- return -EINVAL;
++ if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+- if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
+- return -EINVAL;
++ if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ dev = __dev_via_ifindex(dev, ifindex);
+- if (unlikely(!dev))
+- return -ENODEV;
++ if (unlikely(!dev)) {
++ ret = -ENODEV;
++ goto out;
++ }
+
+ mtu = READ_ONCE(dev->mtu);
+-
+ dev_len = mtu + dev->hard_header_len;
+
+ /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
+@@ -6150,15 +6258,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
+ */
+ if (skb_is_gso(skb)) {
+ ret = BPF_MTU_CHK_RET_SUCCESS;
+-
+ if (flags & BPF_MTU_CHK_SEGS &&
+ !skb_gso_validate_network_len(skb, mtu))
+ ret = BPF_MTU_CHK_RET_SEGS_TOOBIG;
+ }
+ out:
+- /* BPF verifier guarantees valid pointer */
+ *mtu_len = mtu;
+-
+ return ret;
+ }
+
+@@ -6168,19 +6273,21 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
+ struct net_device *dev = xdp->rxq->dev;
+ int xdp_len = xdp->data_end - xdp->data;
+ int ret = BPF_MTU_CHK_RET_SUCCESS;
+- int mtu, dev_len;
++ int mtu = 0, dev_len;
+
+ /* XDP variant doesn't support multi-buffer segment check (yet) */
+- if (unlikely(flags))
+- return -EINVAL;
++ if (unlikely(flags)) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ dev = __dev_via_ifindex(dev, ifindex);
+- if (unlikely(!dev))
+- return -ENODEV;
++ if (unlikely(!dev)) {
++ ret = -ENODEV;
++ goto out;
++ }
+
+ mtu = READ_ONCE(dev->mtu);
+-
+- /* Add L2-header as dev MTU is L3 size */
+ dev_len = mtu + dev->hard_header_len;
+
+ /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
+@@ -6190,10 +6297,8 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
+ xdp_len += len_diff; /* minus result pass check */
+ if (xdp_len > dev_len)
+ ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
+-
+- /* BPF verifier guarantees valid pointer */
++out:
+ *mtu_len = mtu;
+-
+ return ret;
+ }
+
+@@ -6203,7 +6308,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = {
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+- .arg3_type = ARG_PTR_TO_INT,
++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg3_size = sizeof(u32),
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+ };
+@@ -6214,7 +6320,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = {
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+- .arg3_type = ARG_PTR_TO_INT,
++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg3_size = sizeof(u32),
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+ };
+@@ -11752,6 +11859,27 @@ __bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_buff *xdp, u64 flags,
+
+ return 0;
+ }
++
++__bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
++ const u8 *sun_path, u32 sun_path__sz)
++{
++ struct sockaddr_un *un;
++
++ if (sa_kern->sk->sk_family != AF_UNIX)
++ return -EINVAL;
++
++ /* We do not allow changing the address to unnamed or larger than the
++ * maximum allowed address size for a unix sockaddr.
++ */
++ if (sun_path__sz == 0 || sun_path__sz > UNIX_PATH_MAX)
++ return -EINVAL;
++
++ un = (struct sockaddr_un *)sa_kern->uaddr;
++ memcpy(un->sun_path, sun_path, sun_path__sz);
++ sa_kern->uaddrlen = offsetof(struct sockaddr_un, sun_path) + sun_path__sz;
++
++ return 0;
++}
+ __diag_pop();
+
+ int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
+@@ -11776,6 +11904,10 @@ BTF_SET8_START(bpf_kfunc_check_set_xdp)
+ BTF_ID_FLAGS(func, bpf_dynptr_from_xdp)
+ BTF_SET8_END(bpf_kfunc_check_set_xdp)
+
++BTF_SET8_START(bpf_kfunc_check_set_sock_addr)
++BTF_ID_FLAGS(func, bpf_sock_addr_set_sun_path)
++BTF_SET8_END(bpf_kfunc_check_set_sock_addr)
++
+ static const struct btf_kfunc_id_set bpf_kfunc_set_skb = {
+ .owner = THIS_MODULE,
+ .set = &bpf_kfunc_check_set_skb,
+@@ -11786,6 +11918,11 @@ static const struct btf_kfunc_id_set bpf_kfunc_set_xdp = {
+ .set = &bpf_kfunc_check_set_xdp,
+ };
+
++static const struct btf_kfunc_id_set bpf_kfunc_set_sock_addr = {
++ .owner = THIS_MODULE,
++ .set = &bpf_kfunc_check_set_sock_addr,
++};
++
+ static int __init bpf_kfunc_init(void)
+ {
+ int ret;
+@@ -11800,7 +11937,9 @@ static int __init bpf_kfunc_init(void)
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb);
+- return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
++ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
++ return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
++ &bpf_kfunc_set_sock_addr);
+ }
+ late_initcall(bpf_kfunc_init);
+
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 272f09251343da..b22d20cc417b21 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1093,7 +1093,7 @@ bool __skb_flow_dissect(const struct net *net,
+ }
+ }
+
+- WARN_ON_ONCE(!net);
++ DEBUG_NET_WARN_ON_ONCE(!net);
+ if (net) {
+ enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+ struct bpf_prog_array *run_array;
+diff --git a/net/core/gro.c b/net/core/gro.c
+index 0759277dc14ee6..85d3f686ba539b 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -100,7 +100,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
+ unsigned int headlen = skb_headlen(skb);
+ unsigned int len = skb_gro_len(skb);
+ unsigned int delta_truesize;
+- unsigned int gro_max_size;
+ unsigned int new_truesize;
+ struct sk_buff *lp;
+ int segs;
+@@ -114,12 +113,8 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
+ if (p->pp_recycle != skb->pp_recycle)
+ return -ETOOMANYREFS;
+
+- /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
+- gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
+- READ_ONCE(p->dev->gro_max_size) :
+- READ_ONCE(p->dev->gro_ipv4_max_size);
+-
+- if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
++ if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) ||
++ NAPI_GRO_CB(skb)->flush))
+ return -E2BIG;
+
+ if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
+@@ -195,8 +190,9 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
+ }
+
+ merge:
+- /* sk owenrship - if any - completely transferred to the aggregated packet */
++ /* sk ownership - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
++ skb->sk = NULL;
+ delta_truesize = skb->truesize;
+ if (offset > headlen) {
+ unsigned int eat = offset - headlen;
+@@ -372,6 +368,7 @@ static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
+ const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ const skb_frag_t *frag0 = &pinfo->frags[0];
+
++ NAPI_GRO_CB(skb)->network_offset = 0;
+ NAPI_GRO_CB(skb)->data_offset = 0;
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+diff --git a/net/core/link_watch.c b/net/core/link_watch.c
+index c469d1c4db5d7a..cf867f6e38bf17 100644
+--- a/net/core/link_watch.c
++++ b/net/core/link_watch.c
+@@ -67,7 +67,7 @@ static void rfc2863_policy(struct net_device *dev)
+ {
+ unsigned char operstate = default_operstate(dev);
+
+- if (operstate == dev->operstate)
++ if (operstate == READ_ONCE(dev->operstate))
+ return;
+
+ write_lock(&dev_base_lock);
+@@ -87,7 +87,7 @@ static void rfc2863_policy(struct net_device *dev)
+ break;
+ }
+
+- dev->operstate = operstate;
++ WRITE_ONCE(dev->operstate, operstate);
+
+ write_unlock(&dev_base_lock);
+ }
+@@ -153,9 +153,9 @@ static void linkwatch_schedule_work(int urgent)
+ * override the existing timer.
+ */
+ if (test_bit(LW_URGENT, &linkwatch_flags))
+- mod_delayed_work(system_wq, &linkwatch_work, 0);
++ mod_delayed_work(system_unbound_wq, &linkwatch_work, 0);
+ else
+- schedule_delayed_work(&linkwatch_work, delay);
++ queue_delayed_work(system_unbound_wq, &linkwatch_work, delay);
+ }
+
+
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index df81c1f0a57047..552719c3bbc3d7 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -253,9 +253,11 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ {
+ int max_clean = atomic_read(&tbl->gc_entries) -
+ READ_ONCE(tbl->gc_thresh2);
++ u64 tmax = ktime_get_ns() + NSEC_PER_MSEC;
+ unsigned long tref = jiffies - 5 * HZ;
+ struct neighbour *n, *tmp;
+ int shrunk = 0;
++ int loop = 0;
+
+ NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
+
+@@ -278,11 +280,16 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ shrunk++;
+ if (shrunk >= max_clean)
+ break;
++ if (++loop == 16) {
++ if (ktime_get_ns() > tmax)
++ goto unlock;
++ loop = 0;
++ }
+ }
+ }
+
+ WRITE_ONCE(tbl->last_flush, jiffies);
+-
++unlock:
+ write_unlock_bh(&tbl->lock);
+
+ return shrunk;
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index fccaa5bac0ed0a..f7404bc679746f 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -216,7 +216,7 @@ static ssize_t speed_show(struct device *dev,
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+- if (netif_running(netdev) && netif_device_present(netdev)) {
++ if (netif_running(netdev)) {
+ struct ethtool_link_ksettings cmd;
+
+ if (!__ethtool_get_link_ksettings(netdev, &cmd))
+@@ -307,11 +307,9 @@ static ssize_t operstate_show(struct device *dev,
+ const struct net_device *netdev = to_net_dev(dev);
+ unsigned char operstate;
+
+- read_lock(&dev_base_lock);
+- operstate = netdev->operstate;
++ operstate = READ_ONCE(netdev->operstate);
+ if (!netif_running(netdev))
+ operstate = IF_OPER_DOWN;
+- read_unlock(&dev_base_lock);
+
+ if (operstate >= ARRAY_SIZE(operstates))
+ return -EINVAL; /* should not happen */
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index f4183c4c1ec82f..018e213185a17f 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -69,12 +69,15 @@ DEFINE_COOKIE(net_cookie);
+
+ static struct net_generic *net_alloc_generic(void)
+ {
++ unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
++ unsigned int generic_size;
+ struct net_generic *ng;
+- unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
++
++ generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
+
+ ng = kzalloc(generic_size, GFP_KERNEL);
+ if (ng)
+- ng->s.len = max_gen_ptrs;
++ ng->s.len = gen_ptrs;
+
+ return ng;
+ }
+@@ -667,11 +670,16 @@ EXPORT_SYMBOL_GPL(__put_net);
+ * get_net_ns - increment the refcount of the network namespace
+ * @ns: common namespace (net)
+ *
+- * Returns the net's common namespace.
++ * Returns the net's common namespace or ERR_PTR() if ref is zero.
+ */
+ struct ns_common *get_net_ns(struct ns_common *ns)
+ {
+- return &get_net(container_of(ns, struct net, ns))->ns;
++ struct net *net;
++
++ net = maybe_get_net(container_of(ns, struct net, ns));
++ if (net)
++ return &net->ns;
++ return ERR_PTR(-EINVAL);
+ }
+ EXPORT_SYMBOL_GPL(get_net_ns);
+
+@@ -1229,7 +1237,11 @@ static int register_pernet_operations(struct list_head *list,
+ if (error < 0)
+ return error;
+ *ops->id = error;
+- max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
++ /* This does not require READ_ONCE as writers already hold
++ * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
++ * net_alloc_generic.
++ */
++ WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
+ }
+ error = __register_pernet_operations(list, ops);
+ if (error) {
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 543007f159f997..e0821390040937 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -316,7 +316,7 @@ static int netpoll_owner_active(struct net_device *dev)
+ struct napi_struct *napi;
+
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
+- if (napi->poll_owner == smp_processor_id())
++ if (READ_ONCE(napi->poll_owner) == smp_processor_id())
+ return 1;
+ }
+ return 0;
+@@ -626,12 +626,9 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
+ const struct net_device_ops *ops;
+ int err;
+
+- np->dev = ndev;
+- strscpy(np->dev_name, ndev->name, IFNAMSIZ);
+-
+ if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
+ np_err(np, "%s doesn't support polling, aborting\n",
+- np->dev_name);
++ ndev->name);
+ err = -ENOTSUPP;
+ goto out;
+ }
+@@ -649,7 +646,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
+
+ refcount_set(&npinfo->refcnt, 1);
+
+- ops = np->dev->netdev_ops;
++ ops = ndev->netdev_ops;
+ if (ops->ndo_netpoll_setup) {
+ err = ops->ndo_netpoll_setup(ndev, npinfo);
+ if (err)
+@@ -660,6 +657,8 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
+ refcount_inc(&npinfo->refcnt);
+ }
+
++ np->dev = ndev;
++ strscpy(np->dev_name, ndev->name, IFNAMSIZ);
+ npinfo->netpoll = np;
+
+ /* last thing to do is link it to the net device structure */
+@@ -677,6 +676,7 @@ EXPORT_SYMBOL_GPL(__netpoll_setup);
+ int netpoll_setup(struct netpoll *np)
+ {
+ struct net_device *ndev = NULL;
++ bool ip_overwritten = false;
+ struct in_device *in_dev;
+ int err;
+
+@@ -741,6 +741,7 @@ int netpoll_setup(struct netpoll *np)
+ }
+
+ np->local_ip.ip = ifa->ifa_local;
++ ip_overwritten = true;
+ np_info(np, "local IP %pI4\n", &np->local_ip.ip);
+ } else {
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -757,6 +758,7 @@ int netpoll_setup(struct netpoll *np)
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
+ continue;
+ np->local_ip.in6 = ifp->addr;
++ ip_overwritten = true;
+ err = 0;
+ break;
+ }
+@@ -787,6 +789,9 @@ int netpoll_setup(struct netpoll *np)
+ return 0;
+
+ put:
++ DEBUG_NET_WARN_ON_ONCE(np->dev);
++ if (ip_overwritten)
++ memset(&np->local_ip, 0, sizeof(np->local_ip));
+ netdev_put(ndev, &np->dev_tracker);
+ unlock:
+ rtnl_unlock();
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index 77cb75e63aca18..31f923e7b5c40c 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -221,8 +221,12 @@ static int page_pool_init(struct page_pool *pool,
+ return -ENOMEM;
+ #endif
+
+- if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
++ if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
++#ifdef CONFIG_PAGE_POOL_STATS
++ free_percpu(pool->recycle_stats);
++#endif
+ return -ENOMEM;
++ }
+
+ atomic_set(&pool->pages_state_release_cnt, 0);
+
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 4d1696677c48c8..0e472f6fab8538 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -3982,8 +3982,7 @@ static void __net_exit pg_net_exit(struct net *net)
+ list_for_each_safe(q, n, &list) {
+ t = list_entry(q, struct pktgen_thread, th_list);
+ list_del(&t->th_list);
+- kthread_stop(t->tsk);
+- put_task_struct(t->tsk);
++ kthread_stop_put(t->tsk);
+ kfree(t);
+ }
+
+diff --git a/net/core/request_sock.c b/net/core/request_sock.c
+index f35c2e9984062b..63de5c635842b6 100644
+--- a/net/core/request_sock.c
++++ b/net/core/request_sock.c
+@@ -33,9 +33,6 @@
+
+ void reqsk_queue_alloc(struct request_sock_queue *queue)
+ {
+- spin_lock_init(&queue->rskq_lock);
+-
+- spin_lock_init(&queue->fastopenq.lock);
+ queue->fastopenq.rskq_rst_head = NULL;
+ queue->fastopenq.rskq_rst_tail = NULL;
+ queue->fastopenq.qlen = 0;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 53c377d054f036..c76c54879fdddb 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -389,6 +389,35 @@ void rtnl_unregister_all(int protocol)
+ }
+ EXPORT_SYMBOL_GPL(rtnl_unregister_all);
+
++int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n)
++{
++ const struct rtnl_msg_handler *handler;
++ int i, err;
++
++ for (i = 0, handler = handlers; i < n; i++, handler++) {
++ err = rtnl_register_internal(handler->owner, handler->protocol,
++ handler->msgtype, handler->doit,
++ handler->dumpit, handler->flags);
++ if (err) {
++ __rtnl_unregister_many(handlers, i);
++ break;
++ }
++ }
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(__rtnl_register_many);
++
++void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n)
++{
++ const struct rtnl_msg_handler *handler;
++ int i;
++
++ for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--)
++ rtnl_unregister(handler->protocol, handler->msgtype);
++}
++EXPORT_SYMBOL_GPL(__rtnl_unregister_many);
++
+ static LIST_HEAD(link_ops);
+
+ static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+@@ -880,9 +909,9 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
+ break;
+ }
+
+- if (dev->operstate != operstate) {
++ if (READ_ONCE(dev->operstate) != operstate) {
+ write_lock(&dev_base_lock);
+- dev->operstate = operstate;
++ WRITE_ONCE(dev->operstate, operstate);
+ write_unlock(&dev_base_lock);
+ netdev_state_change(dev);
+ }
+@@ -2519,7 +2548,7 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
+
+ nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
+ if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
+- nla_len(attr) < NLA_HDRLEN) {
++ nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
+ return -EINVAL;
+ }
+ if (len >= MAX_VLAN_LIST_LEN)
+@@ -2869,13 +2898,6 @@ static int do_setlink(const struct sk_buff *skb,
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ }
+
+- if (tb[IFLA_MASTER]) {
+- err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
+- if (err)
+- goto errout;
+- status |= DO_SETLINK_MODIFIED;
+- }
+-
+ if (ifm->ifi_flags || ifm->ifi_change) {
+ err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
+ extack);
+@@ -2883,6 +2905,13 @@ static int do_setlink(const struct sk_buff *skb,
+ goto errout;
+ }
+
++ if (tb[IFLA_MASTER]) {
++ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
++ if (err)
++ goto errout;
++ status |= DO_SETLINK_MODIFIED;
++ }
++
+ if (tb[IFLA_CARRIER]) {
+ err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
+ if (err)
+@@ -3263,7 +3292,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
+ else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
+- dev = rtnl_dev_get(net, tb);
++ dev = rtnl_dev_get(tgt_net, tb);
+ else if (tb[IFLA_GROUP])
+ err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
+ else
+@@ -5135,10 +5164,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct net *net = sock_net(skb->sk);
+ struct ifinfomsg *ifm;
+ struct net_device *dev;
+- struct nlattr *br_spec, *attr = NULL;
++ struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
+ int rem, err = -EOPNOTSUPP;
+ u16 flags = 0;
+- bool have_flags = false;
+
+ if (nlmsg_len(nlh) < sizeof(*ifm))
+ return -EINVAL;
+@@ -5156,11 +5184,11 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (br_spec) {
+ nla_for_each_nested(attr, br_spec, rem) {
+- if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
++ if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
+ if (nla_len(attr) < sizeof(flags))
+ return -EINVAL;
+
+- have_flags = true;
++ br_flags_attr = attr;
+ flags = nla_get_u16(attr);
+ }
+
+@@ -5204,8 +5232,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ }
+ }
+
+- if (have_flags)
+- memcpy(nla_data(attr), &flags, sizeof(flags));
++ if (br_flags_attr)
++ memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
+ out:
+ return err;
+ }
+@@ -6377,6 +6405,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ }
+ owner = link->owner;
+ dumpit = link->dumpit;
++ flags = link->flags;
+
+ if (type == RTM_GETLINK - RTM_BASE)
+ min_dump_alloc = rtnl_calcit(skb, nlh);
+@@ -6394,6 +6423,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ .dump = dumpit,
+ .min_dump_alloc = min_dump_alloc,
+ .module = owner,
++ .flags = flags,
+ };
+ err = netlink_dump_start(rtnl, skb, nlh, &c);
+ /* netlink_dump_start() will keep a reference on
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 880027ecf51650..737917c7ac6276 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -26,6 +26,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/slab.h>
+ #include <linux/errqueue.h>
++#include <linux/io_uring.h>
+
+ #include <linux/uaccess.h>
+
+@@ -103,6 +104,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+
+ if (fd < 0 || !(file = fget_raw(fd)))
+ return -EBADF;
++ /* don't allow io_uring files */
++ if (io_is_uring_fops(file)) {
++ fput(file);
++ return -EINVAL;
++ }
+ *fpp++ = file;
+ fpl->count++;
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 4eaf7ed0d1f44e..f0a9ef1aeaa298 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1971,11 +1971,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
+
+ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
+ {
+- int headerlen = skb_headroom(skb);
+- unsigned int size = skb_end_offset(skb) + skb->data_len;
+- struct sk_buff *n = __alloc_skb(size, gfp_mask,
+- skb_alloc_rx_flag(skb), NUMA_NO_NODE);
++ struct sk_buff *n;
++ unsigned int size;
++ int headerlen;
++
++ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++ return NULL;
+
++ headerlen = skb_headroom(skb);
++ size = skb_end_offset(skb) + skb->data_len;
++ n = __alloc_skb(size, gfp_mask,
++ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+ if (!n)
+ return NULL;
+
+@@ -2303,12 +2309,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+ /*
+ * Allocate the copy buffer
+ */
+- struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
+- gfp_mask, skb_alloc_rx_flag(skb),
+- NUMA_NO_NODE);
+- int oldheadroom = skb_headroom(skb);
+ int head_copy_len, head_copy_off;
++ struct sk_buff *n;
++ int oldheadroom;
++
++ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
++ return NULL;
+
++ oldheadroom = skb_headroom(skb);
++ n = __alloc_skb(newheadroom + skb->len + newtailroom,
++ gfp_mask, skb_alloc_rx_flag(skb),
++ NUMA_NO_NODE);
+ if (!n)
+ return NULL;
+
+@@ -4254,6 +4265,7 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
+ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct ts_config *config)
+ {
++ unsigned int patlen = config->ops->get_pattern_len(config);
+ struct ts_state state;
+ unsigned int ret;
+
+@@ -4265,7 +4277,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
+
+ ret = textsearch_find(config, &state);
+- return (ret <= to - from ? ret : UINT_MAX);
++ return (ret + patlen <= to - from ? ret : UINT_MAX);
+ }
+ EXPORT_SYMBOL(skb_find_text);
+
+@@ -4507,8 +4519,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ /* GSO partial only requires that we trim off any excess that
+ * doesn't fit into an MSS sized block, so take care of that
+ * now.
++ * Cap len to not accidentally hit GSO_BY_FRAGS.
+ */
+- partial_segs = len / mss;
++ partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss;
+ if (partial_segs > 1)
+ mss *= partial_segs;
+ else
+@@ -4809,7 +4822,9 @@ static __always_inline unsigned int skb_ext_total_length(void)
+ static void skb_extensions_init(void)
+ {
+ BUILD_BUG_ON(SKB_EXT_NUM >= 8);
++#if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL)
+ BUILD_BUG_ON(skb_ext_total_length() > 255);
++#endif
+
+ skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
+ SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
+@@ -6655,6 +6670,14 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
+ for (i = 0; i < sp->len; i++)
+ xfrm_state_hold(sp->xvec[i]);
+ }
++#endif
++#ifdef CONFIG_MCTP_FLOWS
++ if (old_active & (1 << SKB_EXT_MCTP)) {
++ struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
++
++ if (flow->key)
++ refcount_inc(&flow->key->refs);
++ }
+ #endif
+ __skb_ext_put(old);
+ return new;
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 6c31eefbd77786..bbf40b99971382 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -434,7 +434,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ page = sg_page(sge);
+ if (copied + copy > len)
+ copy = len - copied;
+- copy = copy_page_to_iter(page, sge->offset, copy, iter);
++ if (copy)
++ copy = copy_page_to_iter(page, sge->offset, copy, iter);
+ if (!copy) {
+ copied = copied ? copied : -EFAULT;
+ goto out;
+@@ -826,6 +827,8 @@ static void sk_psock_destroy(struct work_struct *work)
+
+ if (psock->sk_redir)
+ sock_put(psock->sk_redir);
++ if (psock->sk_pair)
++ sock_put(psock->sk_pair);
+ sock_put(psock->sk);
+ kfree(psock);
+ }
+@@ -1225,7 +1228,7 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock)
+- psock->saved_data_ready(sk);
++ sk_psock_data_ready(sk, psock);
+ rcu_read_unlock();
+ }
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 16584e2dd6481a..bc2a4e38dcea8e 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -107,6 +107,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/poll.h>
+ #include <linux/tcp.h>
++#include <linux/udp.h>
+ #include <linux/init.h>
+ #include <linux/highmem.h>
+ #include <linux/user_namespace.h>
+@@ -282,6 +283,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+ EXPORT_SYMBOL(sysctl_rmem_max);
+ __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
+ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
++int sysctl_mem_pcpu_rsv __read_mostly = SK_MEMORY_PCPU_RESERVE;
+
+ /* Maximal space eaten by iovec or ancillary data plus some space */
+ int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
+@@ -484,7 +486,7 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ unsigned long flags;
+ struct sk_buff_head *list = &sk->sk_receive_queue;
+
+- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
++ if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
+ atomic_inc(&sk->sk_drops);
+ trace_sock_rcvqueue_full(sk, skb);
+ return -ENOMEM;
+@@ -554,7 +556,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+
+ skb->dev = NULL;
+
+- if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
++ if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+@@ -600,7 +602,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
+ INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
+ dst, cookie) == NULL) {
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
+ dst_release(dst);
+ return NULL;
+@@ -1718,9 +1720,16 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ break;
+
+ case SO_TIMESTAMPING_OLD:
++ case SO_TIMESTAMPING_NEW:
+ lv = sizeof(v.timestamping);
+- v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
+- v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
++ /* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only
++ * returning the flags when they were set through the same option.
++ * Don't change the beviour for the old case SO_TIMESTAMPING_OLD.
++ */
++ if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) {
++ v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
++ v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
++ }
+ break;
+
+ case SO_RCVTIMEO_OLD:
+@@ -2010,14 +2019,6 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
+ return 0;
+ }
+
+-int sock_getsockopt(struct socket *sock, int level, int optname,
+- char __user *optval, int __user *optlen)
+-{
+- return sk_getsockopt(sock->sk, level, optname,
+- USER_SOCKPTR(optval),
+- USER_SOCKPTR(optlen));
+-}
+-
+ /*
+ * Initialize an sk_lock.
+ *
+@@ -2821,6 +2822,7 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
+ sockc->mark = *(u32 *)CMSG_DATA(cmsg);
+ break;
+ case SO_TIMESTAMPING_OLD:
++ case SO_TIMESTAMPING_NEW:
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+ return -EINVAL;
+
+@@ -3715,6 +3717,9 @@ void sk_common_release(struct sock *sk)
+
+ sk->sk_prot->unhash(sk);
+
++ if (sk->sk_socket)
++ sk->sk_socket->sk = NULL;
++
+ /*
+ * In this point socket cannot receive new packets, but it is possible
+ * that some packets are in flight because some CPU runs receiver and
+@@ -4128,8 +4133,14 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
+ {
+ struct sock *sk = p;
+
+- return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+- sk_busy_loop_timeout(sk, start_time);
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
++ return true;
++
++ if (sk_is_udp(sk) &&
++ !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
++ return true;
++
++ return sk_busy_loop_timeout(sk, start_time);
+ }
+ EXPORT_SYMBOL(sk_busy_loop_end);
+ #endif /* CONFIG_NET_RX_BUSY_POLL */
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index b1e29e18d1d60c..c53b731f2d6728 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -193,7 +193,7 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
+ if (sock_diag_handlers[hndl->family])
+ err = -EBUSY;
+ else
+- sock_diag_handlers[hndl->family] = hndl;
++ WRITE_ONCE(sock_diag_handlers[hndl->family], hndl);
+ mutex_unlock(&sock_diag_table_mutex);
+
+ return err;
+@@ -209,7 +209,7 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
+
+ mutex_lock(&sock_diag_table_mutex);
+ BUG_ON(sock_diag_handlers[family] != hnld);
+- sock_diag_handlers[family] = NULL;
++ WRITE_ONCE(sock_diag_handlers[family], NULL);
+ mutex_unlock(&sock_diag_table_mutex);
+ }
+ EXPORT_SYMBOL_GPL(sock_diag_unregister);
+@@ -227,7 +227,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
+ return -EINVAL;
+ req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
+
+- if (sock_diag_handlers[req->sdiag_family] == NULL)
++ if (READ_ONCE(sock_diag_handlers[req->sdiag_family]) == NULL)
+ sock_load_diag_module(req->sdiag_family, 0);
+
+ mutex_lock(&sock_diag_table_mutex);
+@@ -286,12 +286,12 @@ static int sock_diag_bind(struct net *net, int group)
+ switch (group) {
+ case SKNLGRP_INET_TCP_DESTROY:
+ case SKNLGRP_INET_UDP_DESTROY:
+- if (!sock_diag_handlers[AF_INET])
++ if (!READ_ONCE(sock_diag_handlers[AF_INET]))
+ sock_load_diag_module(AF_INET, 0);
+ break;
+ case SKNLGRP_INET6_TCP_DESTROY:
+ case SKNLGRP_INET6_UDP_DESTROY:
+- if (!sock_diag_handlers[AF_INET6])
++ if (!READ_ONCE(sock_diag_handlers[AF_INET6]))
+ sock_load_diag_module(AF_INET6, 0);
+ break;
+ }
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 4292c2ed182866..2afac40bb83ca1 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -536,6 +536,8 @@ static bool sock_map_sk_state_allowed(const struct sock *sk)
+ {
+ if (sk_is_tcp(sk))
+ return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
++ if (sk_is_stream_unix(sk))
++ return (1 << sk->sk_state) & TCPF_ESTABLISHED;
+ return true;
+ }
+
+@@ -1169,6 +1171,7 @@ static void sock_hash_free(struct bpf_map *map)
+ sock_put(elem->sk);
+ sock_hash_free_elem(htab, elem);
+ }
++ cond_resched();
+ }
+
+ /* wait for psock readers accessing its map link */
+@@ -1631,19 +1634,23 @@ void sock_map_close(struct sock *sk, long timeout)
+
+ lock_sock(sk);
+ rcu_read_lock();
+- psock = sk_psock_get(sk);
+- if (unlikely(!psock)) {
+- rcu_read_unlock();
+- release_sock(sk);
+- saved_close = READ_ONCE(sk->sk_prot)->close;
+- } else {
++ psock = sk_psock(sk);
++ if (likely(psock)) {
+ saved_close = psock->saved_close;
+ sock_map_remove_links(sk, psock);
++ psock = sk_psock_get(sk);
++ if (unlikely(!psock))
++ goto no_psock;
+ rcu_read_unlock();
+ sk_psock_stop(psock);
+ release_sock(sk);
+ cancel_delayed_work_sync(&psock->work);
+ sk_psock_put(sk, psock);
++ } else {
++ saved_close = READ_ONCE(sk->sk_prot)->close;
++no_psock:
++ rcu_read_unlock();
++ release_sock(sk);
+ }
+
+ /* Make sure we do not recurse. This is a bug.
+diff --git a/net/core/stream.c b/net/core/stream.c
+index 96fbcb9bbb30a5..b16dfa568a2d5b 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -79,7 +79,7 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
+ remove_wait_queue(sk_sleep(sk), &wait);
+ sk->sk_write_pending--;
+ } while (!done);
+- return 0;
++ return done < 0 ? done : 0;
+ }
+ EXPORT_SYMBOL(sk_stream_wait_connect);
+
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 03f1edb948d7df..373b5b2231c492 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -30,6 +30,7 @@ static int int_3600 = 3600;
+ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
+ static int max_skb_frags = MAX_SKB_FRAGS;
++static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
+
+ static int net_msg_warn; /* Unused, but still a sysctl */
+
+@@ -407,6 +408,14 @@ static struct ctl_table net_core_table[] = {
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_rcvbuf,
+ },
++ {
++ .procname = "mem_pcpu_rsv",
++ .data = &sysctl_mem_pcpu_rsv,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &min_mem_pcpu_rsv,
++ },
+ {
+ .procname = "dev_weight",
+ .data = &weight_p,
+diff --git a/net/core/xdp.c b/net/core/xdp.c
+index a70670fe9a2dc8..5ee3f8f165e5aa 100644
+--- a/net/core/xdp.c
++++ b/net/core/xdp.c
+@@ -126,10 +126,8 @@ void xdp_unreg_mem_model(struct xdp_mem_info *mem)
+ return;
+
+ if (type == MEM_TYPE_PAGE_POOL) {
+- rcu_read_lock();
+- xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
++ xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
+ page_pool_destroy(xa->page_pool);
+- rcu_read_unlock();
+ }
+ }
+ EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
+@@ -294,10 +292,8 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
+ mutex_lock(&mem_id_lock);
+ ret = __mem_id_init_hash_table();
+ mutex_unlock(&mem_id_lock);
+- if (ret < 0) {
+- WARN_ON(1);
++ if (ret < 0)
+ return ERR_PTR(ret);
+- }
+ }
+
+ xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 69453b936bd557..ca31c3b096bbfc 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -629,9 +629,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (dccp_parse_options(sk, dreq, skb))
+ goto drop_and_free;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto drop_and_free;
+-
+ ireq = inet_rsk(req);
+ sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+@@ -639,6 +636,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ ireq->ireq_family = AF_INET;
+ ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
+
++ if (security_inet_conn_request(sk, skb, req))
++ goto drop_and_free;
++
+ /*
+ * Step 3: Process LISTEN state
+ *
+@@ -655,8 +655,11 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (dccp_v4_send_response(sk, req))
+ goto drop_and_free;
+
+- inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+- reqsk_put(req);
++ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
++ reqsk_free(req);
++ else
++ reqsk_put(req);
++
+ return 0;
+
+ drop_and_free:
+@@ -1039,7 +1042,7 @@ static void __net_exit dccp_v4_exit_net(struct net *net)
+
+ static void __net_exit dccp_v4_exit_batch(struct list_head *net_exit_list)
+ {
+- inet_twsk_purge(&dccp_hashinfo, AF_INET);
++ inet_twsk_purge(&dccp_hashinfo);
+ }
+
+ static struct pernet_operations dccp_v4_ops = {
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index c693a570682fba..d25e962b18a53e 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -360,15 +360,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (dccp_parse_options(sk, dreq, skb))
+ goto drop_and_free;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto drop_and_free;
+-
+ ireq = inet_rsk(req);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ ireq->ireq_family = AF_INET6;
+ ireq->ir_mark = inet_request_mark(sk, skb);
+
++ if (security_inet_conn_request(sk, skb, req))
++ goto drop_and_free;
++
+ if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+ np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+@@ -398,8 +398,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (dccp_v6_send_response(sk, req))
+ goto drop_and_free;
+
+- inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+- reqsk_put(req);
++ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
++ reqsk_free(req);
++ else
++ reqsk_put(req);
++
+ return 0;
+
+ drop_and_free:
+@@ -1119,15 +1122,9 @@ static void __net_exit dccp_v6_exit_net(struct net *net)
+ inet_ctl_sock_destroy(pn->v6_ctl_sk);
+ }
+
+-static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list)
+-{
+- inet_twsk_purge(&dccp_hashinfo, AF_INET6);
+-}
+-
+ static struct pernet_operations dccp_v6_ops = {
+ .init = dccp_v6_init_net,
+ .exit = dccp_v6_exit_net,
+- .exit_batch = dccp_v6_exit_batch,
+ .id = &dccp_v6_pernet_id,
+ .size = sizeof(struct dccp_v6_pernet),
+ };
+diff --git a/net/devlink/core.c b/net/devlink/core.c
+index 6cec4afb01fbd8..451f2bc141a052 100644
+--- a/net/devlink/core.c
++++ b/net/devlink/core.c
+@@ -308,14 +308,20 @@ static int __init devlink_init(void)
+ {
+ int err;
+
+- err = genl_register_family(&devlink_nl_family);
+- if (err)
+- goto out;
+ err = register_pernet_subsys(&devlink_pernet_ops);
+ if (err)
+ goto out;
++ err = genl_register_family(&devlink_nl_family);
++ if (err)
++ goto out_unreg_pernet_subsys;
+ err = register_netdevice_notifier(&devlink_port_netdevice_nb);
++ if (!err)
++ return 0;
++
++ genl_unregister_family(&devlink_nl_family);
+
++out_unreg_pernet_subsys:
++ unregister_pernet_subsys(&devlink_pernet_ops);
+ out:
+ WARN_ON(err);
+ return err;
+diff --git a/net/devlink/port.c b/net/devlink/port.c
+index 4763b42885fbd2..4d49c21997e618 100644
+--- a/net/devlink/port.c
++++ b/net/devlink/port.c
+@@ -574,7 +574,7 @@ devlink_nl_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+
+ xa_for_each_start(&devlink->ports, port_index, devlink_port, state->idx) {
+ err = devlink_nl_port_fill(msg, devlink_port,
+- DEVLINK_CMD_NEW,
++ DEVLINK_CMD_PORT_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags,
+ cb->extack);
+@@ -665,7 +665,7 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port,
+ return -EOPNOTSUPP;
+ }
+ if (tb[DEVLINK_PORT_FN_ATTR_STATE] && !ops->port_fn_state_set) {
+- NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR],
++ NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FN_ATTR_STATE],
+ "Function does not support state setting");
+ return -EOPNOTSUPP;
+ }
+@@ -881,7 +881,7 @@ int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, struct genl_info *info)
+ err = -ENOMEM;
+ goto err_out_port_del;
+ }
+- err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW,
++ err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW,
+ info->snd_portid, info->snd_seq, 0, NULL);
+ if (WARN_ON_ONCE(err))
+ goto err_out_msg_free;
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 01e54b46ae0b97..c42ddd85ff1f9c 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -91,7 +91,6 @@ const struct cred *dns_resolver_cache;
+ static int
+ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ {
+- const struct dns_payload_header *bin;
+ struct user_key_payload *upayload;
+ unsigned long derrno;
+ int ret;
+@@ -102,26 +101,34 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ return -EINVAL;
+
+ if (data[0] == 0) {
++ const struct dns_server_list_v1_header *v1;
++
+ /* It may be a server list. */
+- if (datalen <= sizeof(*bin))
++ if (datalen < sizeof(*v1))
+ return -EINVAL;
+
+- bin = (const struct dns_payload_header *)data;
+- kenter("[%u,%u],%u", bin->content, bin->version, datalen);
+- if (bin->content != DNS_PAYLOAD_IS_SERVER_LIST) {
++ v1 = (const struct dns_server_list_v1_header *)data;
++ kenter("[%u,%u],%u", v1->hdr.content, v1->hdr.version, datalen);
++ if (v1->hdr.content != DNS_PAYLOAD_IS_SERVER_LIST) {
+ pr_warn_ratelimited(
+ "dns_resolver: Unsupported content type (%u)\n",
+- bin->content);
++ v1->hdr.content);
+ return -EINVAL;
+ }
+
+- if (bin->version != 1) {
++ if (v1->hdr.version != 1) {
+ pr_warn_ratelimited(
+ "dns_resolver: Unsupported server list version (%u)\n",
+- bin->version);
++ v1->hdr.version);
+ return -EINVAL;
+ }
+
++ if ((v1->status != DNS_LOOKUP_GOOD &&
++ v1->status != DNS_LOOKUP_GOOD_WITH_BAD)) {
++ if (prep->expiry == TIME64_MAX)
++ prep->expiry = ktime_get_real_seconds() + 1;
++ }
++
+ result_len = datalen;
+ goto store_result;
+ }
+@@ -314,7 +321,7 @@ static long dns_resolver_read(const struct key *key,
+
+ struct key_type key_type_dns_resolver = {
+ .name = "dns_resolver",
+- .flags = KEY_TYPE_NET_DOMAIN,
++ .flags = KEY_TYPE_NET_DOMAIN | KEY_TYPE_INSTANT_REAP,
+ .preparse = dns_resolver_preparse,
+ .free_preparse = dns_resolver_free_preparse,
+ .instantiate = generic_key_instantiate,
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 48db91b33390bb..9328ca004fd900 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -2822,13 +2822,14 @@ EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
+ static int dsa_slave_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+ {
+- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct netlink_ext_ack *extack;
+ int err = NOTIFY_DONE;
++ struct dsa_port *dp;
+
+ if (!dsa_slave_dev_check(dev))
+ return err;
+
++ dp = dsa_slave_to_port(dev);
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+@@ -2881,11 +2882,13 @@ static int dsa_slave_changeupper(struct net_device *dev,
+ static int dsa_slave_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+ {
+- struct dsa_port *dp = dsa_slave_to_port(dev);
++ struct dsa_port *dp;
+
+ if (!dsa_slave_dev_check(dev))
+ return NOTIFY_DONE;
+
++ dp = dsa_slave_to_port(dev);
++
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ dsa_port_pre_bridge_leave(dp, info->upper_dev);
+ else if (netif_is_lag_master(info->upper_dev) && !info->linking)
+diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
+index 20bf7074d5a679..ff0ae3f6be5660 100644
+--- a/net/dsa/tag_ocelot.c
++++ b/net/dsa/tag_ocelot.c
+@@ -8,40 +8,6 @@
+ #define OCELOT_NAME "ocelot"
+ #define SEVILLE_NAME "seville"
+
+-/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
+- * payload and move it into the DSA tag, which will make the switch classify
+- * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
+- * which is the pvid of standalone and VLAN-unaware bridge ports.
+- */
+-static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
+- u64 *vlan_tci, u64 *tag_type)
+-{
+- struct net_device *br = dsa_port_bridge_dev_get(dp);
+- struct vlan_ethhdr *hdr;
+- u16 proto, tci;
+-
+- if (!br || !br_vlan_enabled(br)) {
+- *vlan_tci = 0;
+- *tag_type = IFH_TAG_TYPE_C;
+- return;
+- }
+-
+- hdr = skb_vlan_eth_hdr(skb);
+- br_vlan_get_proto(br, &proto);
+-
+- if (ntohs(hdr->h_vlan_proto) == proto) {
+- vlan_remove_tag(skb, &tci);
+- *vlan_tci = tci;
+- } else {
+- rcu_read_lock();
+- br_vlan_get_pvid_rcu(br, &tci);
+- rcu_read_unlock();
+- *vlan_tci = tci;
+- }
+-
+- *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
+-}
+-
+ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
+ __be32 ifh_prefix, void **ifh)
+ {
+@@ -53,7 +19,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
+ u32 rew_op = 0;
+ u64 qos_class;
+
+- ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
++ ocelot_xmit_get_vlan_info(skb, dsa_port_bridge_dev_get(dp), &vlan_tci,
++ &tag_type);
+
+ qos_class = netdev_get_num_tc(netdev) ?
+ netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index 2edc8b796a4e73..049c3adeb85044 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -164,17 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+ eth = (struct ethhdr *)skb->data;
+ skb_pull_inline(skb, ETH_HLEN);
+
+- if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+- dev->dev_addr))) {
+- if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+- if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+- skb->pkt_type = PACKET_BROADCAST;
+- else
+- skb->pkt_type = PACKET_MULTICAST;
+- } else {
+- skb->pkt_type = PACKET_OTHERHOST;
+- }
+- }
++ eth_skb_pkt_type(skb, dev);
+
+ /*
+ * Some variants of DSA tagging don't have an ethertype field
+diff --git a/net/ethtool/features.c b/net/ethtool/features.c
+index a79af8c25a0712..b6cb101d7f19ef 100644
+--- a/net/ethtool/features.c
++++ b/net/ethtool/features.c
+@@ -234,17 +234,20 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+ dev = req_info.dev;
+
+ rtnl_lock();
++ ret = ethnl_ops_begin(dev);
++ if (ret < 0)
++ goto out_rtnl;
+ ethnl_features_to_bitmap(old_active, dev->features);
+ ethnl_features_to_bitmap(old_wanted, dev->wanted_features);
+ ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
+ tb[ETHTOOL_A_FEATURES_WANTED],
+ netdev_features_strings, info->extack);
+ if (ret < 0)
+- goto out_rtnl;
++ goto out_ops;
+ if (ethnl_bitmap_to_features(req_mask) & ~NETIF_F_ETHTOOL_BITS) {
+ GENL_SET_ERR_MSG(info, "attempt to change non-ethtool features");
+ ret = -EINVAL;
+- goto out_rtnl;
++ goto out_ops;
+ }
+
+ /* set req_wanted bits not in req_mask from old_wanted */
+@@ -281,6 +284,8 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+ if (mod)
+ netdev_features_change(dev);
+
++out_ops:
++ ethnl_ops_complete(dev);
+ out_rtnl:
+ rtnl_unlock();
+ ethnl_parse_header_dev_put(&req_info);
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 0b0ce4f81c017c..4486cbe2faf0c5 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -438,6 +438,9 @@ int __ethtool_get_link_ksettings(struct net_device *dev,
+ if (!dev->ethtool_ops->get_link_ksettings)
+ return -EOPNOTSUPP;
+
++ if (!netif_device_present(dev))
++ return -ENODEV;
++
+ memset(link_ksettings, 0, sizeof(*link_ksettings));
+ return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
+ }
+@@ -2134,7 +2137,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ int n_stats, ret;
+
+- if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
++ if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
+ return -EOPNOTSUPP;
+
+ n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
+diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
+index b2de2108b356ab..34d76e87847d08 100644
+--- a/net/ethtool/linkstate.c
++++ b/net/ethtool/linkstate.c
+@@ -37,6 +37,8 @@ static int linkstate_get_sqi(struct net_device *dev)
+ mutex_lock(&phydev->lock);
+ if (!phydev->drv || !phydev->drv->get_sqi)
+ ret = -EOPNOTSUPP;
++ else if (!phydev->link)
++ ret = -ENETDOWN;
+ else
+ ret = phydev->drv->get_sqi(phydev);
+ mutex_unlock(&phydev->lock);
+@@ -55,6 +57,8 @@ static int linkstate_get_sqi_max(struct net_device *dev)
+ mutex_lock(&phydev->lock);
+ if (!phydev->drv || !phydev->drv->get_sqi_max)
+ ret = -EOPNOTSUPP;
++ else if (!phydev->link)
++ ret = -ENETDOWN;
+ else
+ ret = phydev->drv->get_sqi_max(phydev);
+ mutex_unlock(&phydev->lock);
+@@ -62,6 +66,17 @@ static int linkstate_get_sqi_max(struct net_device *dev)
+ return ret;
+ };
+
++static bool linkstate_sqi_critical_error(int sqi)
++{
++ return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN;
++}
++
++static bool linkstate_sqi_valid(struct linkstate_reply_data *data)
++{
++ return data->sqi >= 0 && data->sqi_max >= 0 &&
++ data->sqi <= data->sqi_max;
++}
++
+ static int linkstate_get_link_ext_state(struct net_device *dev,
+ struct linkstate_reply_data *data)
+ {
+@@ -93,12 +108,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
+ data->link = __ethtool_get_link(dev);
+
+ ret = linkstate_get_sqi(dev);
+- if (ret < 0 && ret != -EOPNOTSUPP)
++ if (linkstate_sqi_critical_error(ret))
+ goto out;
+ data->sqi = ret;
+
+ ret = linkstate_get_sqi_max(dev);
+- if (ret < 0 && ret != -EOPNOTSUPP)
++ if (linkstate_sqi_critical_error(ret))
+ goto out;
+ data->sqi_max = ret;
+
+@@ -136,11 +151,10 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base,
+ len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */
+ + 0;
+
+- if (data->sqi != -EOPNOTSUPP)
+- len += nla_total_size(sizeof(u32));
+-
+- if (data->sqi_max != -EOPNOTSUPP)
+- len += nla_total_size(sizeof(u32));
++ if (linkstate_sqi_valid(data)) {
++ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */
++ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */
++ }
+
+ if (data->link_ext_state_provided)
+ len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */
+@@ -164,13 +178,14 @@ static int linkstate_fill_reply(struct sk_buff *skb,
+ nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link))
+ return -EMSGSIZE;
+
+- if (data->sqi != -EOPNOTSUPP &&
+- nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
+- return -EMSGSIZE;
++ if (linkstate_sqi_valid(data)) {
++ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
++ return -EMSGSIZE;
+
+- if (data->sqi_max != -EOPNOTSUPP &&
+- nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max))
+- return -EMSGSIZE;
++ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX,
++ data->sqi_max))
++ return -EMSGSIZE;
++ }
+
+ if (data->link_ext_state_provided) {
+ if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE,
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index 3bbd5afb7b31cf..fe3553f60bf39e 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -505,6 +505,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
+ ret = skb->len;
+ break;
+ }
++ ret = 0;
+ }
+ rtnl_unlock();
+
+diff --git a/net/handshake/handshake-test.c b/net/handshake/handshake-test.c
+index 16ed7bfd29e4fb..34fd1d9b2db861 100644
+--- a/net/handshake/handshake-test.c
++++ b/net/handshake/handshake-test.c
+@@ -471,7 +471,10 @@ static void handshake_req_destroy_test1(struct kunit *test)
+ handshake_req_cancel(sock->sk);
+
+ /* Act */
+- fput(filp);
++ /* Ensure the close/release/put process has run to
++ * completion before checking the result.
++ */
++ __fput_sync(filp);
+
+ /* Assert */
+ KUNIT_EXPECT_PTR_EQ(test, handshake_req_destroy_test, req);
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 306f942c3b28af..c5f7bd01379ce3 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -31,8 +31,8 @@ static bool is_slave_up(struct net_device *dev)
+ static void __hsr_set_operstate(struct net_device *dev, int transition)
+ {
+ write_lock(&dev_base_lock);
+- if (dev->operstate != transition) {
+- dev->operstate = transition;
++ if (READ_ONCE(dev->operstate) != transition) {
++ WRITE_ONCE(dev->operstate, transition);
+ write_unlock(&dev_base_lock);
+ netdev_state_change(dev);
+ } else {
+@@ -71,39 +71,36 @@ static bool hsr_check_carrier(struct hsr_port *master)
+ return false;
+ }
+
+-static void hsr_check_announce(struct net_device *hsr_dev,
+- unsigned char old_operstate)
++static void hsr_check_announce(struct net_device *hsr_dev)
+ {
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(hsr_dev);
+-
+- if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
+- /* Went up */
+- hsr->announce_count = 0;
+- mod_timer(&hsr->announce_timer,
+- jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
++ if (netif_running(hsr_dev) && netif_oper_up(hsr_dev)) {
++ /* Enable announce timer and start sending supervisory frames */
++ if (!timer_pending(&hsr->announce_timer)) {
++ hsr->announce_count = 0;
++ mod_timer(&hsr->announce_timer, jiffies +
++ msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
++ }
++ } else {
++ /* Deactivate the announce timer */
++ timer_delete(&hsr->announce_timer);
+ }
+-
+- if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
+- /* Went down */
+- del_timer(&hsr->announce_timer);
+ }
+
+ void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
+ {
+ struct hsr_port *master;
+- unsigned char old_operstate;
+ bool has_carrier;
+
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ /* netif_stacked_transfer_operstate() cannot be used here since
+ * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
+ */
+- old_operstate = master->dev->operstate;
+ has_carrier = hsr_check_carrier(master);
+ hsr_set_operstate(master, has_carrier);
+- hsr_check_announce(master->dev, old_operstate);
++ hsr_check_announce(master->dev);
+ }
+
+ int hsr_get_max_mtu(struct hsr_priv *hsr)
+@@ -291,7 +288,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+
+ skb = hsr_init_skb(master);
+ if (!skb) {
+- WARN_ONCE(1, "HSR: Could not send supervision frame\n");
++ netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n");
+ return;
+ }
+
+@@ -338,7 +335,7 @@ static void send_prp_supervision_frame(struct hsr_port *master,
+
+ skb = hsr_init_skb(master);
+ if (!skb) {
+- WARN_ONCE(1, "PRP: Could not send supervision frame\n");
++ netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n");
+ return;
+ }
+
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index b71dab630a8732..0323ab5023c690 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -83,7 +83,7 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
+ return false;
+
+ /* Get next tlv */
+- total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
++ total_length += hsr_sup_tag->tlv.HSR_TLV_length;
+ if (!pskb_may_pull(skb, total_length))
+ return false;
+ skb_pull(skb, total_length);
+@@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ skb = skb_copy_expand(frame->skb_std, 0,
+ skb_tailroom(frame->skb_std) + HSR_HLEN,
+ GFP_ATOMIC);
+- prp_fill_rct(skb, frame, port);
+-
+- return skb;
++ return prp_fill_rct(skb, frame, port);
+ }
+
+ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 6d14d935ee828d..26329db09210bb 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -228,6 +228,10 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ */
+ if (ethhdr->h_proto == htons(ETH_P_PRP) ||
+ ethhdr->h_proto == htons(ETH_P_HSR)) {
++ /* Check if skb contains hsr_ethhdr */
++ if (skb->mac_len < sizeof(struct hsr_ethhdr))
++ return NULL;
++
+ /* Use the existing sequence_nr from the tag as starting point
+ * for filtering duplicate frames.
+ */
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index b099c315015096..257b50124cee5e 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -148,14 +148,21 @@ static struct notifier_block hsr_nb = {
+
+ static int __init hsr_init(void)
+ {
+- int res;
++ int err;
+
+ BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
+
+- register_netdevice_notifier(&hsr_nb);
+- res = hsr_netlink_init();
++ err = register_netdevice_notifier(&hsr_nb);
++ if (err)
++ return err;
++
++ err = hsr_netlink_init();
++ if (err) {
++ unregister_netdevice_notifier(&hsr_nb);
++ return err;
++ }
+
+- return res;
++ return 0;
+ }
+
+ static void __exit hsr_exit(void)
+diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
+index e5742f2a2d522a..1b6457f357bdb2 100644
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -220,7 +220,8 @@ void hsr_del_port(struct hsr_port *port)
+ netdev_update_features(master->dev);
+ dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+ netdev_rx_handler_unregister(port->dev);
+- dev_set_promiscuity(port->dev, -1);
++ if (!port->hsr->fwd_offloaded)
++ dev_set_promiscuity(port->dev, -1);
+ netdev_upper_dev_unlink(port->dev, master->dev);
+ }
+
+diff --git a/net/ife/ife.c b/net/ife/ife.c
+index 13bbf8cb6a3961..be05b690b9ef29 100644
+--- a/net/ife/ife.c
++++ b/net/ife/ife.c
+@@ -82,6 +82,7 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
+ if (unlikely(!pskb_may_pull(skb, total_pull)))
+ return NULL;
+
++ ifehdr = (struct ifeheadr *)(skb->data + skb->dev->hard_header_len);
+ skb_set_mac_header(skb, total_pull);
+ __skb_pull(skb, total_pull);
+ *metalen = ifehdrln - IFE_METAHDRLEN;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 2713c9b06c4c0f..3feff7f738a487 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -330,6 +330,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
+ if (INET_PROTOSW_REUSE & answer_flags)
+ sk->sk_reuse = SK_CAN_REUSE;
+
++ if (INET_PROTOSW_ICSK & answer_flags)
++ inet_init_csk_locks(sk);
++
+ inet = inet_sk(sk);
+ inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
+
+@@ -452,7 +455,7 @@ int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ /* BPF prog is run before any checks are done so that if the prog
+ * changes context in a wrong way it will be caught.
+ */
+- err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr,
++ err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len,
+ CGROUP_INET4_BIND, &flags);
+ if (err)
+ return err;
+@@ -754,7 +757,9 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new
+ sock_rps_record_flow(newsk);
+ WARN_ON(!((1 << newsk->sk_state) &
+ (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+- TCPF_CLOSE_WAIT | TCPF_CLOSE)));
++ TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
++ TCPF_CLOSING | TCPF_CLOSE_WAIT |
++ TCPF_CLOSE)));
+
+ if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
+ set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
+@@ -794,6 +799,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet = inet_sk(sk);
+ DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
++ int sin_addr_len = sizeof(*sin);
+
+ sin->sin_family = AF_INET;
+ lock_sock(sk);
+@@ -806,7 +812,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
+ }
+ sin->sin_port = inet->inet_dport;
+ sin->sin_addr.s_addr = inet->inet_daddr;
+- BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
++ BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
+ CGROUP_INET4_GETPEERNAME);
+ } else {
+ __be32 addr = inet->inet_rcv_saddr;
+@@ -814,12 +820,12 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
+ addr = inet->inet_saddr;
+ sin->sin_port = inet->inet_sport;
+ sin->sin_addr.s_addr = addr;
+- BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
++ BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
+ CGROUP_INET4_GETSOCKNAME);
+ }
+ release_sock(sk);
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+- return sizeof(*sin);
++ return sin_addr_len;
+ }
+ EXPORT_SYMBOL(inet_getname);
+
+@@ -1567,6 +1573,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
+ /* The above will be needed by the transport layer if there is one
+ * immediately following this IP hdr.
+ */
++ NAPI_GRO_CB(skb)->inner_network_offset = off;
+
+ /* Note : No need to call skb_gro_postpull_rcsum() here,
+ * as we already checked checksum over ipv4 header was 0
+@@ -1624,14 +1631,17 @@ EXPORT_SYMBOL(inet_current_timestamp);
+
+ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+- if (sk->sk_family == AF_INET)
++ unsigned int family = READ_ONCE(sk->sk_family);
++
++ if (family == AF_INET)
+ return ip_recv_error(sk, msg, len, addr_len);
+ #if IS_ENABLED(CONFIG_IPV6)
+- if (sk->sk_family == AF_INET6)
++ if (family == AF_INET6)
+ return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
+ #endif
+ return -EINVAL;
+ }
++EXPORT_SYMBOL(inet_recv_error);
+
+ int inet_gro_complete(struct sk_buff *skb, int nhoff)
+ {
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 9456f5bb35e5d9..0d0d725b46ad0c 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -1125,7 +1125,8 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
+ if (neigh) {
+ if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) {
+ read_lock_bh(&neigh->lock);
+- memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
++ memcpy(r->arp_ha.sa_data, neigh->ha,
++ min(dev->addr_len, sizeof(r->arp_ha.sa_data_min)));
+ r->arp_flags = arp_state_to_flags(neigh);
+ read_unlock_bh(&neigh->lock);
+ r->arp_ha.sa_family = dev->type;
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index d048aa83329386..685474ef11c400 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -2015,12 +2015,16 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
+ * from there we can determine the new total option length */
+ iter = 0;
+ optlen_new = 0;
+- while (iter < opt->opt.optlen)
+- if (opt->opt.__data[iter] != IPOPT_NOP) {
++ while (iter < opt->opt.optlen) {
++ if (opt->opt.__data[iter] == IPOPT_END) {
++ break;
++ } else if (opt->opt.__data[iter] == IPOPT_NOP) {
++ iter++;
++ } else {
+ iter += opt->opt.__data[iter + 1];
+ optlen_new = iter;
+- } else
+- iter++;
++ }
++ }
+ hdr_delta = opt->opt.optlen;
+ opt->opt.optlen = (optlen_new + 3) & ~3;
+ hdr_delta -= opt->opt.optlen;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index ca0ff15dc8fa35..cb0c80328eebf3 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -569,10 +569,6 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
+
+ ASSERT_RTNL();
+
+- if (!in_dev) {
+- inet_free_ifa(ifa);
+- return -ENOBUFS;
+- }
+ ipv4_devconf_setall(in_dev);
+ neigh_parms_data_state_setall(in_dev->arp_parms);
+ if (ifa->ifa_dev != in_dev) {
+@@ -1174,6 +1170,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
+
+ if (!ifa) {
+ ret = -ENOBUFS;
++ if (!in_dev)
++ break;
+ ifa = inet_alloc_ifa();
+ if (!ifa)
+ break;
+@@ -1825,6 +1823,21 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
+ return err;
+ }
+
++/* Combine dev_addr_genid and dev_base_seq to detect changes.
++ */
++static u32 inet_base_seq(const struct net *net)
++{
++ u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
++ net->dev_base_seq;
++
++ /* Must not return 0 (see nl_dump_check_consistent()).
++ * Chose a value far away from 0.
++ */
++ if (!res)
++ res = 0x80000000;
++ return res;
++}
++
+ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ const struct nlmsghdr *nlh = cb->nlh;
+@@ -1876,8 +1889,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+ idx = 0;
+ head = &tgt_net->dev_index_head[h];
+ rcu_read_lock();
+- cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
+- tgt_net->dev_base_seq;
++ cb->seq = inet_base_seq(tgt_net);
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+@@ -2278,8 +2290,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
+ idx = 0;
+ head = &net->dev_index_head[h];
+ rcu_read_lock();
+- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
+- net->dev_base_seq;
++ cb->seq = inet_base_seq(net);
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 4ccfc104f13a51..eeace9b509cec7 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -95,7 +95,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
+ __alignof__(struct scatterlist));
+ }
+
+-static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
++static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
+ {
+ struct crypto_aead *aead = x->data;
+ int extralen = 0;
+@@ -114,7 +114,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+ */
+ if (req->src != req->dst)
+ for (sg = sg_next(req->src); sg; sg = sg_next(sg))
+- put_page(sg_page(sg));
++ skb_page_unref(skb, sg_page(sg), false);
+ }
+
+ #ifdef CONFIG_INET_ESPINTCP
+@@ -238,8 +238,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
+ #else
+ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
+ {
+- kfree_skb(skb);
+-
++ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+ #endif
+@@ -260,7 +259,7 @@ static void esp_output_done(void *data, int err)
+ }
+
+ tmp = ESP_SKB_CB(skb)->tmp;
+- esp_ssg_unref(x, tmp);
++ esp_ssg_unref(x, tmp, skb);
+ kfree(tmp);
+
+ if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+@@ -639,7 +638,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
+ }
+
+ if (sg != dsg)
+- esp_ssg_unref(x, tmp);
++ esp_ssg_unref(x, tmp, skb);
+
+ if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
+ err = esp_output_tail_tcp(x, skb);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 390f4be7f7bec2..90ce87ffed4617 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1343,7 +1343,7 @@ static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
+ struct flowi4 fl4 = {
+ .flowi4_mark = frn->fl_mark,
+ .daddr = frn->fl_addr,
+- .flowi4_tos = frn->fl_tos,
++ .flowi4_tos = frn->fl_tos & IPTOS_RT_MASK,
+ .flowi4_scope = frn->fl_scope,
+ };
+ struct fib_table *tb;
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 5eb1b8d302bbd1..233d9d0437c278 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1030,7 +1030,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
+ bool ecn_ca = false;
+
+ nla_strscpy(tmp, nla, sizeof(tmp));
+- val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
++ val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ } else {
+ if (nla_len(nla) != sizeof(u32))
+ return false;
+@@ -1459,8 +1459,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
+ fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
+ if (!fi)
+ goto failure;
+- fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
+- cfg->fc_mx_len, extack);
++ fi->fib_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, extack);
+ if (IS_ERR(fi->fib_metrics)) {
+ err = PTR_ERR(fi->fib_metrics);
+ kfree(fi);
+@@ -2270,6 +2269,15 @@ void fib_select_path(struct net *net, struct fib_result *res,
+ fib_select_default(fl4, res);
+
+ check_saddr:
+- if (!fl4->saddr)
+- fl4->saddr = fib_result_prefsrc(net, res);
++ if (!fl4->saddr) {
++ struct net_device *l3mdev;
++
++ l3mdev = dev_get_by_index_rcu(net, fl4->flowi4_l3mdev);
++
++ if (!l3mdev ||
++ l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) == l3mdev)
++ fl4->saddr = fib_result_prefsrc(net, res);
++ else
++ fl4->saddr = inet_select_addr(l3mdev, 0, RT_SCOPE_LINK);
++ }
+ }
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 9bdfdab906fe00..77b97c48da5ea8 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1628,6 +1628,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+ res->nhc = nhc;
+ res->type = fa->fa_type;
+ res->scope = fi->fib_scope;
++ res->dscp = fa->fa_dscp;
+ res->fi = fi;
+ res->table = tb;
+ res->fa_head = &n->leaf;
+diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
+index 0c41076e31edad..4e0a7d038e219c 100644
+--- a/net/ipv4/fou_core.c
++++ b/net/ipv4/fou_core.c
+@@ -50,7 +50,7 @@ struct fou_net {
+
+ static inline struct fou *fou_from_sock(struct sock *sk)
+ {
+- return sk->sk_user_data;
++ return rcu_dereference_sk_user_data(sk);
+ }
+
+ static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
+@@ -233,9 +233,15 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
+ struct sk_buff *skb)
+ {
+ const struct net_offload __rcu **offloads;
+- u8 proto = fou_from_sock(sk)->protocol;
++ struct fou *fou = fou_from_sock(sk);
+ const struct net_offload *ops;
+ struct sk_buff *pp = NULL;
++ u8 proto;
++
++ if (!fou)
++ goto out;
++
++ proto = fou->protocol;
+
+ /* We can clear the encap_mark for FOU as we are essentially doing
+ * one of two possible things. We are either adding an L4 tunnel
+@@ -263,14 +269,24 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
+ int nhoff)
+ {
+ const struct net_offload __rcu **offloads;
+- u8 proto = fou_from_sock(sk)->protocol;
++ struct fou *fou = fou_from_sock(sk);
+ const struct net_offload *ops;
+- int err = -ENOSYS;
++ u8 proto;
++ int err;
++
++ if (!fou) {
++ err = -ENOENT;
++ goto out;
++ }
++
++ proto = fou->protocol;
+
+ offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[proto]);
+- if (WARN_ON(!ops || !ops->callbacks.gro_complete))
++ if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
++ err = -ENOSYS;
+ goto out;
++ }
+
+ err = ops->callbacks.gro_complete(skb, nhoff);
+
+@@ -322,6 +338,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+
+ skb_gro_remcsum_init(&grc);
+
++ if (!fou)
++ goto out;
++
+ off = skb_gro_offset(skb);
+ len = off + sizeof(*guehdr);
+
+@@ -433,7 +452,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+
+ offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[proto]);
+- if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
++ if (!ops || !ops->callbacks.gro_receive)
+ goto out;
+
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index b8607763d113a5..9dffdd876fef50 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -92,6 +92,7 @@
+ #include <net/inet_common.h>
+ #include <net/ip_fib.h>
+ #include <net/l3mdev.h>
++#include <net/addrconf.h>
+
+ /*
+ * Build xmit assembly blocks
+@@ -221,57 +222,59 @@ int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
+ int sysctl_icmp_msgs_burst __read_mostly = 50;
+
+ static struct {
+- spinlock_t lock;
+- u32 credit;
++ atomic_t credit;
+ u32 stamp;
+-} icmp_global = {
+- .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock),
+-};
++} icmp_global;
+
+ /**
+ * icmp_global_allow - Are we allowed to send one more ICMP message ?
+ *
+ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
+ * Returns false if we reached the limit and can not send another packet.
+- * Note: called with BH disabled
++ * Works in tandem with icmp_global_consume().
+ */
+ bool icmp_global_allow(void)
+ {
+- u32 credit, delta, incr = 0, now = (u32)jiffies;
+- bool rc = false;
++ u32 delta, now, oldstamp;
++ int incr, new, old;
+
+- /* Check if token bucket is empty and cannot be refilled
+- * without taking the spinlock. The READ_ONCE() are paired
+- * with the following WRITE_ONCE() in this same function.
++ /* Note: many cpus could find this condition true.
++ * Then later icmp_global_consume() could consume more credits,
++ * this is an acceptable race.
+ */
+- if (!READ_ONCE(icmp_global.credit)) {
+- delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
+- if (delta < HZ / 50)
+- return false;
+- }
++ if (atomic_read(&icmp_global.credit) > 0)
++ return true;
+
+- spin_lock(&icmp_global.lock);
+- delta = min_t(u32, now - icmp_global.stamp, HZ);
+- if (delta >= HZ / 50) {
+- incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ;
+- if (incr)
+- WRITE_ONCE(icmp_global.stamp, now);
+- }
+- credit = min_t(u32, icmp_global.credit + incr,
+- READ_ONCE(sysctl_icmp_msgs_burst));
+- if (credit) {
+- /* We want to use a credit of one in average, but need to randomize
+- * it for security reasons.
+- */
+- credit = max_t(int, credit - get_random_u32_below(3), 0);
+- rc = true;
++ now = jiffies;
++ oldstamp = READ_ONCE(icmp_global.stamp);
++ delta = min_t(u32, now - oldstamp, HZ);
++ if (delta < HZ / 50)
++ return false;
++
++ incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ;
++ if (!incr)
++ return false;
++
++ if (cmpxchg(&icmp_global.stamp, oldstamp, now) == oldstamp) {
++ old = atomic_read(&icmp_global.credit);
++ do {
++ new = min(old + incr, READ_ONCE(sysctl_icmp_msgs_burst));
++ } while (!atomic_try_cmpxchg(&icmp_global.credit, &old, new));
+ }
+- WRITE_ONCE(icmp_global.credit, credit);
+- spin_unlock(&icmp_global.lock);
+- return rc;
++ return true;
+ }
+ EXPORT_SYMBOL(icmp_global_allow);
+
++void icmp_global_consume(void)
++{
++ int credits = get_random_u32_below(3);
++
++ /* Note: this might make icmp_global.credit negative. */
++ if (credits)
++ atomic_sub(credits, &icmp_global.credit);
++}
++EXPORT_SYMBOL(icmp_global_consume);
++
+ static bool icmpv4_mask_allow(struct net *net, int type, int code)
+ {
+ if (type > NR_ICMP_TYPES)
+@@ -288,14 +291,16 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code)
+ return false;
+ }
+
+-static bool icmpv4_global_allow(struct net *net, int type, int code)
++static bool icmpv4_global_allow(struct net *net, int type, int code,
++ bool *apply_ratelimit)
+ {
+ if (icmpv4_mask_allow(net, type, code))
+ return true;
+
+- if (icmp_global_allow())
++ if (icmp_global_allow()) {
++ *apply_ratelimit = true;
+ return true;
+-
++ }
+ __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
+ return false;
+ }
+@@ -305,15 +310,16 @@ static bool icmpv4_global_allow(struct net *net, int type, int code)
+ */
+
+ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+- struct flowi4 *fl4, int type, int code)
++ struct flowi4 *fl4, int type, int code,
++ bool apply_ratelimit)
+ {
+ struct dst_entry *dst = &rt->dst;
+ struct inet_peer *peer;
+ bool rc = true;
+ int vif;
+
+- if (icmpv4_mask_allow(net, type, code))
+- goto out;
++ if (!apply_ratelimit)
++ return true;
+
+ /* No rate limit on loopback */
+ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
+@@ -328,6 +334,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+ out:
+ if (!rc)
+ __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
++ else
++ icmp_global_consume();
+ return rc;
+ }
+
+@@ -399,6 +407,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
+ struct ipcm_cookie ipc;
+ struct rtable *rt = skb_rtable(skb);
+ struct net *net = dev_net(rt->dst.dev);
++ bool apply_ratelimit = false;
+ struct flowi4 fl4;
+ struct sock *sk;
+ struct inet_sock *inet;
+@@ -410,11 +419,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
+ if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb))
+ return;
+
+- /* Needed by both icmp_global_allow and icmp_xmit_lock */
++ /* Needed by both icmpv4_global_allow and icmp_xmit_lock */
+ local_bh_disable();
+
+- /* global icmp_msgs_per_sec */
+- if (!icmpv4_global_allow(net, type, code))
++ /* is global icmp_msgs_per_sec exhausted ? */
++ if (!icmpv4_global_allow(net, type, code, &apply_ratelimit))
+ goto out_bh_enable;
+
+ sk = icmp_xmit_lock(net);
+@@ -447,7 +456,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt))
+ goto out_unlock;
+- if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
++ if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit))
+ icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt);
+ ip_rt_put(rt);
+ out_unlock:
+@@ -591,6 +600,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ int room;
+ struct icmp_bxm icmp_param;
+ struct rtable *rt = skb_rtable(skb_in);
++ bool apply_ratelimit = false;
+ struct ipcm_cookie ipc;
+ struct flowi4 fl4;
+ __be32 saddr;
+@@ -672,7 +682,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ }
+ }
+
+- /* Needed by both icmp_global_allow and icmp_xmit_lock */
++ /* Needed by both icmpv4_global_allow and icmp_xmit_lock */
+ local_bh_disable();
+
+ /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
+@@ -680,7 +690,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
+ */
+ if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
+- !icmpv4_global_allow(net, type, code))
++ !icmpv4_global_allow(net, type, code, &apply_ratelimit))
+ goto out_bh_enable;
+
+ sk = icmp_xmit_lock(net);
+@@ -739,7 +749,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ goto out_unlock;
+
+ /* peer icmp_ratelimit */
+- if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
++ if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit))
+ goto ende;
+
+ /* RFC says return as much as we can without exceeding 576 bytes. */
+@@ -1032,6 +1042,8 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
+ struct icmp_ext_hdr *ext_hdr, _ext_hdr;
+ struct icmp_ext_echo_iio *iio, _iio;
+ struct net *net = dev_net(skb->dev);
++ struct inet6_dev *in6_dev;
++ struct in_device *in_dev;
+ struct net_device *dev;
+ char buff[IFNAMSIZ];
+ u16 ident_len;
+@@ -1115,10 +1127,15 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
+ /* Fill bits in reply message */
+ if (dev->flags & IFF_UP)
+ status |= ICMP_EXT_ECHOREPLY_ACTIVE;
+- if (__in_dev_get_rcu(dev) && __in_dev_get_rcu(dev)->ifa_list)
++
++ in_dev = __in_dev_get_rcu(dev);
++ if (in_dev && rcu_access_pointer(in_dev->ifa_list))
+ status |= ICMP_EXT_ECHOREPLY_IPV4;
+- if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
++
++ in6_dev = __in6_dev_get(dev);
++ if (in6_dev && !list_empty(&in6_dev->addr_list))
+ status |= ICMP_EXT_ECHOREPLY_IPV6;
++
+ dev_put(dev);
+ icmphdr->un.echo.sequence |= htons(status);
+ return true;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 418e5fb58fd3f2..d515881d02a6f7 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -216,8 +216,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
+ int tv = get_random_u32_below(max_delay);
+
+ im->tm_running = 1;
+- if (!mod_timer(&im->timer, jiffies+tv+2))
+- refcount_inc(&im->refcnt);
++ if (refcount_inc_not_zero(&im->refcnt)) {
++ if (mod_timer(&im->timer, jiffies + tv + 2))
++ ip_ma_put(im);
++ }
+ }
+
+ static void igmp_gq_start_timer(struct in_device *in_dev)
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 394a498c282322..8720f3840b6985 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -289,6 +289,7 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
+ struct sock_reuseport *reuseport_cb;
+ struct inet_bind_hashbucket *head2;
+ struct inet_bind2_bucket *tb2;
++ bool conflict = false;
+ bool reuseport_cb_ok;
+
+ rcu_read_lock();
+@@ -301,18 +302,20 @@ static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l
+
+ spin_lock(&head2->lock);
+
+- inet_bind_bucket_for_each(tb2, &head2->chain)
+- if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
+- break;
++ inet_bind_bucket_for_each(tb2, &head2->chain) {
++ if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
++ continue;
+
+- if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
+- reuseport_ok)) {
+- spin_unlock(&head2->lock);
+- return true;
++ if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, reuseport_ok))
++ continue;
++
++ conflict = true;
++ break;
+ }
+
+ spin_unlock(&head2->lock);
+- return false;
++
++ return conflict;
+ }
+
+ /*
+@@ -730,6 +733,10 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
+ }
+ if (req)
+ reqsk_put(req);
++
++ if (newsk)
++ inet_init_csk_locks(newsk);
++
+ return newsk;
+ out_err:
+ newsk = NULL;
+@@ -770,6 +777,20 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
+ }
+ EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
+
++void inet_csk_clear_xmit_timers_sync(struct sock *sk)
++{
++ struct inet_connection_sock *icsk = inet_csk(sk);
++
++ /* ongoing timer handlers need to acquire socket lock. */
++ sock_not_owned_by_me(sk);
++
++ icsk->icsk_pending = icsk->icsk_ack.pending = 0;
++
++ sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer);
++ sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
++ sk_stop_timer_sync(sk, &sk->sk_timer);
++}
++
+ void inet_csk_delete_keepalive_timer(struct sock *sk)
+ {
+ sk_stop_timer(sk, &sk->sk_timer);
+@@ -1095,25 +1116,34 @@ static void reqsk_timer_handler(struct timer_list *t)
+ inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
+ }
+
+-static void reqsk_queue_hash_req(struct request_sock *req,
++static bool reqsk_queue_hash_req(struct request_sock *req,
+ unsigned long timeout)
+ {
++ bool found_dup_sk = false;
++
++ if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
++ return false;
++
++ /* The timer needs to be setup after a successful insertion. */
+ timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
+ mod_timer(&req->rsk_timer, jiffies + timeout);
+
+- inet_ehash_insert(req_to_sk(req), NULL, NULL);
+ /* before letting lookups find us, make sure all req fields
+ * are committed to memory and refcnt initialized.
+ */
+ smp_wmb();
+ refcount_set(&req->rsk_refcnt, 2 + 1);
++ return true;
+ }
+
+-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
++bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ unsigned long timeout)
+ {
+- reqsk_queue_hash_req(req, timeout);
++ if (!reqsk_queue_hash_req(req, timeout))
++ return false;
++
+ inet_csk_reqsk_queue_added(sk);
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
+
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index e13a84433413ed..87ecefea72398d 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -57,7 +57,7 @@ static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
+ return ERR_PTR(-ENOENT);
+ }
+
+- if (!inet_diag_table[proto])
++ if (!READ_ONCE(inet_diag_table[proto]))
+ sock_load_diag_module(AF_INET, proto);
+
+ mutex_lock(&inet_diag_table_mutex);
+@@ -1281,6 +1281,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
+ req.sdiag_family = AF_UNSPEC; /* compatibility */
+ req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+ req.idiag_ext = rc->idiag_ext;
++ req.pad = 0;
+ req.idiag_states = rc->idiag_states;
+ req.id = rc->id;
+
+@@ -1296,6 +1297,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+ req.sdiag_family = rc->idiag_family;
+ req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+ req.idiag_ext = rc->idiag_ext;
++ req.pad = 0;
+ req.idiag_states = rc->idiag_states;
+ req.id = rc->id;
+
+@@ -1419,7 +1421,7 @@ int inet_diag_register(const struct inet_diag_handler *h)
+ mutex_lock(&inet_diag_table_mutex);
+ err = -EEXIST;
+ if (!inet_diag_table[type]) {
+- inet_diag_table[type] = h;
++ WRITE_ONCE(inet_diag_table[type], h);
+ err = 0;
+ }
+ mutex_unlock(&inet_diag_table_mutex);
+@@ -1436,7 +1438,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
+ return;
+
+ mutex_lock(&inet_diag_table_mutex);
+- inet_diag_table[type] = NULL;
++ WRITE_ONCE(inet_diag_table[type], NULL);
+ mutex_unlock(&inet_diag_table_mutex);
+ }
+ EXPORT_SYMBOL_GPL(inet_diag_unregister);
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index 7072fc0783ef56..c88c9034d63004 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -24,6 +24,8 @@
+ #include <net/ip.h>
+ #include <net/ipv6.h>
+
++#include "../core/sock_destructor.h"
++
+ /* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+@@ -39,6 +41,7 @@ struct ipfrag_skb_cb {
+ };
+ struct sk_buff *next_frag;
+ int frag_run_len;
++ int ip_defrag_offset;
+ };
+
+ #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+@@ -396,12 +399,12 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ */
+ if (!last)
+ fragrun_create(q, skb); /* First fragment. */
+- else if (last->ip_defrag_offset + last->len < end) {
++ else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
+ /* This is the common case: skb goes to the end. */
+ /* Detect and discard overlaps. */
+- if (offset < last->ip_defrag_offset + last->len)
++ if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
+ return IPFRAG_OVERLAP;
+- if (offset == last->ip_defrag_offset + last->len)
++ if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
+ fragrun_append_to_last(q, skb);
+ else
+ fragrun_create(q, skb);
+@@ -418,13 +421,13 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+
+ parent = *rbn;
+ curr = rb_to_skb(parent);
+- curr_run_end = curr->ip_defrag_offset +
++ curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
+ FRAG_CB(curr)->frag_run_len;
+- if (end <= curr->ip_defrag_offset)
++ if (end <= FRAG_CB(curr)->ip_defrag_offset)
+ rbn = &parent->rb_left;
+ else if (offset >= curr_run_end)
+ rbn = &parent->rb_right;
+- else if (offset >= curr->ip_defrag_offset &&
++ else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
+ end <= curr_run_end)
+ return IPFRAG_DUP;
+ else
+@@ -438,7 +441,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+ }
+
+- skb->ip_defrag_offset = offset;
++ FRAG_CB(skb)->ip_defrag_offset = offset;
+
+ return IPFRAG_OK;
+ }
+@@ -448,13 +451,28 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent)
+ {
+ struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
+- struct sk_buff **nextp;
++ void (*destructor)(struct sk_buff *);
++ unsigned int orig_truesize = 0;
++ struct sk_buff **nextp = NULL;
++ struct sock *sk = skb->sk;
+ int delta;
+
++ if (sk && is_skb_wmem(skb)) {
++ /* TX: skb->sk might have been passed as argument to
++ * dst->output and must remain valid until tx completes.
++ *
++ * Move sk to reassembled skb and fix up wmem accounting.
++ */
++ orig_truesize = skb->truesize;
++ destructor = skb->destructor;
++ }
++
+ if (head != skb) {
+ fp = skb_clone(skb, GFP_ATOMIC);
+- if (!fp)
+- return NULL;
++ if (!fp) {
++ head = skb;
++ goto out_restore_sk;
++ }
+ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+ if (RB_EMPTY_NODE(&skb->rbnode))
+ FRAG_CB(parent)->next_frag = fp;
+@@ -463,6 +481,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ &q->rb_fragments);
+ if (q->fragments_tail == skb)
+ q->fragments_tail = fp;
++
++ if (orig_truesize) {
++ /* prevent skb_morph from releasing sk */
++ skb->sk = NULL;
++ skb->destructor = NULL;
++ }
+ skb_morph(skb, head);
+ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+@@ -470,13 +494,13 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ consume_skb(head);
+ head = skb;
+ }
+- WARN_ON(head->ip_defrag_offset != 0);
++ WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
+
+ delta = -head->truesize;
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+- return NULL;
++ goto out_restore_sk;
+
+ delta += head->truesize;
+ if (delta)
+@@ -492,7 +516,7 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+- return NULL;
++ goto out_restore_sk;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+@@ -509,6 +533,21 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ nextp = &skb_shinfo(head)->frag_list;
+ }
+
++out_restore_sk:
++ if (orig_truesize) {
++ int ts_delta = head->truesize - orig_truesize;
++
++ /* if this reassembled skb is fragmented later,
++ * fraglist skbs will get skb->sk assigned from head->sk,
++ * and each frag skb will be released via sock_wfree.
++ *
++ * Update sk_wmem_alloc.
++ */
++ head->sk = sk;
++ head->destructor = destructor;
++ refcount_add(ts_delta, &sk->sk_wmem_alloc);
++ }
++
+ return nextp;
+ }
+ EXPORT_SYMBOL(inet_frag_reasm_prepare);
+@@ -516,6 +555,8 @@ EXPORT_SYMBOL(inet_frag_reasm_prepare);
+ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data, bool try_coalesce)
+ {
++ struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
++ const unsigned int head_truesize = head->truesize;
+ struct sk_buff **nextp = reasm_data;
+ struct rb_node *rbn;
+ struct sk_buff *fp;
+@@ -579,6 +620,9 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ head->prev = NULL;
+ head->tstamp = q->stamp;
+ head->mono_delivery_time = q->mono_delivery_time;
++
++ if (sk)
++ refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
+ }
+ EXPORT_SYMBOL(inet_frag_reasm_finish);
+
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 598c1b114d2c22..7967ff7e02f794 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -751,12 +751,12 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ if (err)
+ goto unlock;
+ }
++ sock_set_flag(sk, SOCK_RCU_FREE);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
+ else
+ __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
+- sock_set_flag(sk, SOCK_RCU_FREE);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ unlock:
+ spin_unlock(&ilb2->lock);
+@@ -1131,10 +1131,33 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ return 0;
+
+ error:
++ if (sk_hashed(sk)) {
++ spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash);
++
++ sock_prot_inuse_add(net, sk->sk_prot, -1);
++
++ spin_lock(lock);
++ __sk_nulls_del_node_init_rcu(sk);
++ spin_unlock(lock);
++
++ sk->sk_hash = 0;
++ inet_sk(sk)->inet_sport = 0;
++ inet_sk(sk)->inet_num = 0;
++
++ if (tw)
++ inet_twsk_bind_unhash(tw, hinfo);
++ }
++
+ spin_unlock(&head2->lock);
+ if (tb_created)
+ inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
+- spin_unlock_bh(&head->lock);
++ spin_unlock(&head->lock);
++
++ if (tw)
++ inet_twsk_deschedule_put(tw);
++
++ local_bh_enable();
++
+ return -ENOMEM;
+ }
+
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index dd37a5bf688111..fff53144250c51 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -278,52 +278,51 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
+ }
+ EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
+
+-void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
++/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
++void inet_twsk_purge(struct inet_hashinfo *hashinfo)
+ {
+- struct inet_timewait_sock *tw;
+- struct sock *sk;
++ struct inet_ehash_bucket *head = &hashinfo->ehash[0];
++ unsigned int ehash_mask = hashinfo->ehash_mask;
+ struct hlist_nulls_node *node;
+ unsigned int slot;
++ struct sock *sk;
++
++ for (slot = 0; slot <= ehash_mask; slot++, head++) {
++ if (hlist_nulls_empty(&head->chain))
++ continue;
+
+- for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
+- struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+ restart_rcu:
+ cond_resched();
+ rcu_read_lock();
+ restart:
+ sk_nulls_for_each_rcu(sk, node, &head->chain) {
+- if (sk->sk_state != TCP_TIME_WAIT) {
+- /* A kernel listener socket might not hold refcnt for net,
+- * so reqsk_timer_handler() could be fired after net is
+- * freed. Userspace listener and reqsk never exist here.
+- */
+- if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
+- hashinfo->pernet)) {
+- struct request_sock *req = inet_reqsk(sk);
+-
+- inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
+- }
++ int state = inet_sk_state_load(sk);
+
++ if ((1 << state) & ~(TCPF_TIME_WAIT |
++ TCPF_NEW_SYN_RECV))
+ continue;
+- }
+
+- tw = inet_twsk(sk);
+- if ((tw->tw_family != family) ||
+- refcount_read(&twsk_net(tw)->ns.count))
++ if (refcount_read(&sock_net(sk)->ns.count))
+ continue;
+
+- if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
++ if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
+ continue;
+
+- if (unlikely((tw->tw_family != family) ||
+- refcount_read(&twsk_net(tw)->ns.count))) {
+- inet_twsk_put(tw);
++ if (refcount_read(&sock_net(sk)->ns.count)) {
++ sock_gen_put(sk);
+ goto restart;
+ }
+
+ rcu_read_unlock();
+ local_bh_disable();
+- inet_twsk_deschedule_put(tw);
++ if (state == TCP_TIME_WAIT) {
++ inet_twsk_deschedule_put(inet_twsk(sk));
++ } else {
++ struct request_sock *req = inet_reqsk(sk);
++
++ inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
++ req);
++ }
+ local_bh_enable();
+ goto restart_rcu;
+ }
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index a4941f53b52372..fb947d1613fe2b 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -384,6 +384,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ }
+
+ skb_dst_drop(skb);
++ skb_orphan(skb);
+ return -EINPROGRESS;
+
+ insert_error:
+@@ -487,7 +488,6 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
+ struct ipq *qp;
+
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
+- skb_orphan(skb);
+
+ /* Lookup (or create) queue header */
+ qp = ip_find(net, ip_hdr(skb), user, vif);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 22a26d1d29a09d..890c15510b4210 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -280,8 +280,13 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ tpi->flags | TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
+ } else {
++ if (unlikely(!pskb_may_pull(skb,
++ gre_hdr_len + sizeof(*ershdr))))
++ return PACKET_REJECT;
++
+ ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+ ver = ershdr->ver;
++ iph = ip_hdr(skb);
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
+ tpi->flags | TUNNEL_KEY,
+ iph->saddr, iph->daddr, tpi->key);
+@@ -635,15 +640,18 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ }
+
+ if (dev->header_ops) {
++ int pull_len = tunnel->hlen + sizeof(struct iphdr);
++
+ if (skb_cow_head(skb, 0))
+ goto free_skb;
+
++ if (!pskb_may_pull(skb, pull_len))
++ goto free_skb;
++
+ tnl_params = (const struct iphdr *)skb->data;
+
+- /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
+- * to gre header.
+- */
+- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
++ /* ip_tunnel_xmit() needs skb->data pointing to gre header. */
++ skb_pull(skb, pull_len);
+ skb_reset_mac_header(skb);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 4ab877cf6d35f2..2458461e24874e 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -101,6 +101,8 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ struct iphdr *iph = ip_hdr(skb);
+
++ IP_INC_STATS(net, IPSTATS_MIB_OUTREQUESTS);
++
+ iph_set_totlen(iph, skb->len);
+ ip_send_check(iph);
+
+@@ -1285,6 +1287,12 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
+ if (unlikely(!rt))
+ return -EFAULT;
+
++ cork->fragsize = ip_sk_use_pmtu(sk) ?
++ dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
++
++ if (!inetdev_valid_mtu(cork->fragsize))
++ return -ENETUNREACH;
++
+ /*
+ * setup for corking.
+ */
+@@ -1301,12 +1309,6 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
+ cork->addr = ipc->addr;
+ }
+
+- cork->fragsize = ip_sk_use_pmtu(sk) ?
+- dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
+-
+- if (!inetdev_valid_mtu(cork->fragsize))
+- return -ENETUNREACH;
+-
+ cork->gso_size = ipc->gso_size;
+
+ cork->dst = &rt->dst;
+@@ -1467,7 +1469,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ * by icmp_hdr(skb)->type.
+ */
+ if (sk->sk_type == SOCK_RAW &&
+- !inet_test_bit(HDRINCL, sk))
++ !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
+ icmp_type = fl4->fl4_icmp_type;
+ else
+ icmp_type = icmp_hdr(skb)->type;
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index cce9cb25f3b31c..1a6952921e07b7 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1369,12 +1369,13 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
+ * ipv4_pktinfo_prepare - transfer some info from rtable to skb
+ * @sk: socket
+ * @skb: buffer
++ * @drop_dst: if true, drops skb dst
+ *
+ * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
+ * destination in skb->cb[] before dst drop.
+ * This way, receiver doesn't make cache line misses to read rtable.
+ */
+-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
++void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst)
+ {
+ struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
+ bool prepare = inet_test_bit(PKTINFO, sk) ||
+@@ -1403,7 +1404,8 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
+ pktinfo->ipi_ifindex = 0;
+ pktinfo->ipi_spec_dst.s_addr = 0;
+ }
+- skb_dst_drop(skb);
++ if (drop_dst)
++ skb_dst_drop(skb);
+ }
+
+ int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index beeae624c412d7..acf93f34a8213d 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -378,7 +378,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ bool log_ecn_error)
+ {
+ const struct iphdr *iph = ip_hdr(skb);
+- int err;
++ int nh, err;
+
+ #ifdef CONFIG_NET_IPGRE_BROADCAST
+ if (ipv4_is_multicast(iph->daddr)) {
+@@ -404,8 +404,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+ tunnel->i_seqno = ntohl(tpi->seq) + 1;
+ }
+
++ /* Save offset of outer header relative to skb->head,
++ * because we are going to reset the network header to the inner header
++ * and might change skb->head.
++ */
++ nh = skb_network_header(skb) - skb->head;
++
+ skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
+
++ if (!pskb_inet_may_pull(skb)) {
++ DEV_STATS_INC(tunnel->dev, rx_length_errors);
++ DEV_STATS_INC(tunnel->dev, rx_errors);
++ goto drop;
++ }
++ iph = (struct iphdr *)(skb->head + nh);
++
+ err = IP_ECN_decapsulate(iph, skb);
+ if (unlikely(err)) {
+ if (log_ecn_error)
+@@ -554,6 +567,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+ return 0;
+ }
+
++static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
++{
++ /* we must cap headroom to some upperlimit, else pskb_expand_head
++ * will overflow header offsets in skb_headers_offset_update().
++ */
++ static const unsigned int max_allowed = 512;
++
++ if (headroom > max_allowed)
++ headroom = max_allowed;
++
++ if (headroom > READ_ONCE(dev->needed_headroom))
++ WRITE_ONCE(dev->needed_headroom, headroom);
++}
++
+ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ u8 proto, int tunnel_hlen)
+ {
+@@ -632,13 +659,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ }
+
+ headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+- if (headroom > READ_ONCE(dev->needed_headroom))
+- WRITE_ONCE(dev->needed_headroom, headroom);
+-
+- if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
++ if (skb_cow_head(skb, headroom)) {
+ ip_rt_put(rt);
+ goto tx_dropped;
+ }
++
++ ip_tunnel_adj_headroom(dev, headroom);
++
+ iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
+ df, !net_eq(tunnel->net, dev_net(dev)));
+ return;
+@@ -818,16 +845,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+
+ max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+- if (max_headroom > READ_ONCE(dev->needed_headroom))
+- WRITE_ONCE(dev->needed_headroom, max_headroom);
+
+- if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
++ if (skb_cow_head(skb, max_headroom)) {
+ ip_rt_put(rt);
+ DEV_STATS_INC(dev, tx_dropped);
+ kfree_skb(skb);
+ return;
+ }
+
++ ip_tunnel_adj_headroom(dev, max_headroom);
++
+ iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
+ df, !net_eq(tunnel->net, dev_net(dev)));
+ return;
+@@ -1271,6 +1298,7 @@ int ip_tunnel_init(struct net_device *dev)
+
+ if (tunnel->collect_md)
+ netif_keep_dst(dev);
++ netdev_lockdep_set_classes(dev);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(ip_tunnel_init);
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 586b1b3e35b805..80ccd6661aa32f 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -332,7 +332,7 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ };
+ skb_reset_network_header(skb);
+
+- csum = csum_partial(icmp6h, len, 0);
++ csum = skb_checksum(skb, skb_transport_offset(skb), len, 0);
+ icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len,
+ IPPROTO_ICMPV6, csum);
+
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 9e222a57bc2b47..66eade3fb629f1 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1025,6 +1025,10 @@ static int ipmr_cache_report(const struct mr_table *mrt,
+ struct sk_buff *skb;
+ int ret;
+
++ mroute_sk = rcu_dereference(mrt->mroute_sk);
++ if (!mroute_sk)
++ return -EINVAL;
++
+ if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
+ skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
+ else
+@@ -1069,7 +1073,8 @@ static int ipmr_cache_report(const struct mr_table *mrt,
+ msg = (struct igmpmsg *)skb_network_header(skb);
+ msg->im_vif = vifi;
+ msg->im_vif_hi = vifi >> 8;
+- skb_dst_set(skb, dst_clone(skb_dst(pkt)));
++ ipv4_pktinfo_prepare(mroute_sk, pkt, false);
++ memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
+ /* Add our header */
+ igmp = skb_put(skb, sizeof(struct igmphdr));
+ igmp->type = assert;
+@@ -1079,12 +1084,6 @@ static int ipmr_cache_report(const struct mr_table *mrt,
+ skb->transport_header = skb->network_header;
+ }
+
+- mroute_sk = rcu_dereference(mrt->mroute_sk);
+- if (!mroute_sk) {
+- kfree_skb(skb);
+- return -EINVAL;
+- }
+-
+ igmpmsg_netlink_event(mrt, skb);
+
+ /* Deliver to mrouted */
+@@ -1604,9 +1603,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
+
+ if (copy_from_sockptr(&olr, optlen, sizeof(int)))
+ return -EFAULT;
+- olr = min_t(unsigned int, olr, sizeof(int));
+ if (olr < 0)
+ return -EINVAL;
++
++ olr = min_t(unsigned int, olr, sizeof(int));
++
+ if (copy_to_sockptr(optlen, &olr, sizeof(int)))
+ return -EFAULT;
+ if (copy_to_sockptr(optval, &val, olr))
+diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
+index 0e3ee1532848c8..8ddac1f595ed8c 100644
+--- a/net/ipv4/metrics.c
++++ b/net/ipv4/metrics.c
+@@ -7,7 +7,7 @@
+ #include <net/net_namespace.h>
+ #include <net/tcp.h>
+
+-static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
++static int ip_metrics_convert(struct nlattr *fc_mx,
+ int fc_mx_len, u32 *metrics,
+ struct netlink_ext_ack *extack)
+ {
+@@ -31,7 +31,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ char tmp[TCP_CA_NAME_MAX];
+
+ nla_strscpy(tmp, nla, sizeof(tmp));
+- val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
++ val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ if (val == TCP_CA_UNSPEC) {
+ NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm");
+ return -EINVAL;
+@@ -63,7 +63,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ return 0;
+ }
+
+-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
++struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx,
+ int fc_mx_len,
+ struct netlink_ext_ack *extack)
+ {
+@@ -77,7 +77,7 @@ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+ if (unlikely(!fib_metrics))
+ return ERR_PTR(-ENOMEM);
+
+- err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics,
++ err = ip_metrics_convert(fc_mx, fc_mx_len, fib_metrics->metrics,
+ extack);
+ if (!err) {
+ refcount_set(&fib_metrics->refcnt, 1);
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 2407066b0fec11..14365b20f1c5c0 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -956,6 +956,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ void *loc_cpu_entry;
+ struct arpt_entry *iter;
+
++ if (len < sizeof(tmp))
++ return -EINVAL;
+ if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+@@ -964,6 +966,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1254,6 +1258,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ void *loc_cpu_entry;
+ struct arpt_entry *iter;
+
++ if (len < sizeof(tmp))
++ return -EINVAL;
+ if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+@@ -1262,6 +1268,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 7da1df4997d057..fe89a056eb06c4 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1108,6 +1108,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ void *loc_cpu_entry;
+ struct ipt_entry *iter;
+
++ if (len < sizeof(tmp))
++ return -EINVAL;
+ if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+@@ -1116,6 +1118,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1492,6 +1496,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ void *loc_cpu_entry;
+ struct ipt_entry *iter;
+
++ if (len < sizeof(tmp))
++ return -EINVAL;
+ if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+@@ -1500,6 +1506,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
+index 56f6ecc43451ec..12ca666d6e2c16 100644
+--- a/net/ipv4/netfilter/iptable_nat.c
++++ b/net/ipv4/netfilter/iptable_nat.c
+@@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
+
+ static int __init iptable_nat_init(void)
+ {
+- int ret = xt_register_template(&nf_nat_ipv4_table,
+- iptable_nat_table_init);
++ int ret;
+
++ /* net->gen->ptr[iptable_nat_net_id] must be allocated
++ * before calling iptable_nat_table_init().
++ */
++ ret = register_pernet_subsys(&iptable_nat_net_ops);
+ if (ret < 0)
+ return ret;
+
+- ret = register_pernet_subsys(&iptable_nat_net_ops);
+- if (ret < 0) {
+- xt_unregister_template(&nf_nat_ipv4_table);
+- return ret;
+- }
++ ret = xt_register_template(&nf_nat_ipv4_table,
++ iptable_nat_table_init);
++ if (ret < 0)
++ unregister_pernet_subsys(&iptable_nat_net_ops);
+
+ return ret;
+ }
+
+ static void __exit iptable_nat_exit(void)
+ {
+- unregister_pernet_subsys(&iptable_nat_net_ops);
+ xt_unregister_template(&nf_nat_ipv4_table);
++ unregister_pernet_subsys(&iptable_nat_net_ops);
+ }
+
+ module_init(iptable_nat_init);
+diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
+index 6cc5743c553a02..9a21175693db58 100644
+--- a/net/ipv4/netfilter/nf_dup_ipv4.c
++++ b/net/ipv4/netfilter/nf_dup_ipv4.c
+@@ -52,8 +52,9 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
+ {
+ struct iphdr *iph;
+
++ local_bh_disable();
+ if (this_cpu_read(nf_skb_duplicated))
+- return;
++ goto out;
+ /*
+ * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+ * the original skb, which should continue on its way as if nothing has
+@@ -61,7 +62,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
+ */
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+- return;
++ goto out;
+
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ /* Avoid counting cloned packets towards the original connection. */
+@@ -90,6 +91,8 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
+ } else {
+ kfree_skb(skb);
+ }
++out:
++ local_bh_enable();
+ }
+ EXPORT_SYMBOL_GPL(nf_dup_ipv4);
+
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index f33aeab9424f75..675b5bbed638e4 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -239,10 +239,8 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
+ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ int hook)
+ {
+- struct net_device *br_indev __maybe_unused;
+- struct sk_buff *nskb;
+- struct iphdr *niph;
+ const struct tcphdr *oth;
++ struct sk_buff *nskb;
+ struct tcphdr _oth;
+
+ oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
+@@ -267,14 +265,12 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+- niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+- ip4_dst_hoplimit(skb_dst(nskb)));
++ nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
++ ip4_dst_hoplimit(skb_dst(nskb)));
+ nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
+ if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC))
+ goto free_nskb;
+
+- niph = ip_hdr(nskb);
+-
+ /* "Never happens" */
+ if (nskb->len > dst_mtu(skb_dst(nskb)))
+ goto free_nskb;
+@@ -289,9 +285,14 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+- br_indev = nf_bridge_get_physindev(oldskb);
+- if (br_indev) {
++ if (nf_bridge_info_exists(oldskb)) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
++ struct iphdr *niph = ip_hdr(nskb);
++ struct net_device *br_indev;
++
++ br_indev = nf_bridge_get_physindev(oldskb, net);
++ if (!br_indev)
++ goto free_nskb;
+
+ nskb->dev = br_indev;
+ niph->tot_len = htons(nskb->len);
+diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+index 69e33179960430..73e66a088e25eb 100644
+--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
+
+ laddr = 0;
+ indev = __in_dev_get_rcu(skb->dev);
++ if (!indev)
++ return daddr;
+
+ in_dev_for_each_ifa_rcu(ifa, indev) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY)
+diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
+index 9eee535c64dd48..ba233fdd81886b 100644
+--- a/net/ipv4/netfilter/nft_fib_ipv4.c
++++ b/net/ipv4/netfilter/nft_fib_ipv4.c
+@@ -66,6 +66,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ .flowi4_scope = RT_SCOPE_UNIVERSE,
+ .flowi4_iif = LOOPBACK_IFINDEX,
+ .flowi4_uid = sock_net_uid(nft_net(pkt), NULL),
++ .flowi4_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)),
+ };
+ const struct net_device *oif;
+ const struct net_device *found;
+@@ -84,9 +85,6 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ else
+ oif = NULL;
+
+- if (priv->flags & NFTA_FIB_F_IIF)
+- fl4.flowi4_l3mdev = l3mdev_master_ifindex_rcu(oif);
+-
+ if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+ nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+ nft_fib_store_result(dest, priv, nft_in(pkt));
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index bbff68b5b5d4a1..8d41b039421976 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -676,9 +676,10 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
+
+ p = nla_data(nla);
+ for (i = 0; i < nhg->num_nh; ++i) {
+- p->id = nhg->nh_entries[i].nh->id;
+- p->weight = nhg->nh_entries[i].weight - 1;
+- p += 1;
++ *p++ = (struct nexthop_grp) {
++ .id = nhg->nh_entries[i].nh->id,
++ .weight = nhg->nh_entries[i].weight - 1,
++ };
+ }
+
+ if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 75e0aee35eb787..4cb0c896caf978 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -301,7 +301,7 @@ static int ping_pre_connect(struct sock *sk, struct sockaddr *uaddr,
+ if (addr_len < sizeof(struct sockaddr_in))
+ return -EINVAL;
+
+- return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
++ return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, &addr_len);
+ }
+
+ /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */
+diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
+index eaf1d3113b62f7..a85b0aba36462d 100644
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -83,7 +83,7 @@ static const struct snmp_mib snmp4_ipstats_list[] = {
+ SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
+ SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS),
+ SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS),
+- SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTPKTS),
++ SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTREQUESTS),
+ SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS),
+ SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
+ SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
+@@ -93,6 +93,7 @@ static const struct snmp_mib snmp4_ipstats_list[] = {
+ SNMP_MIB_ITEM("FragOKs", IPSTATS_MIB_FRAGOKS),
+ SNMP_MIB_ITEM("FragFails", IPSTATS_MIB_FRAGFAILS),
+ SNMP_MIB_ITEM("FragCreates", IPSTATS_MIB_FRAGCREATES),
++ SNMP_MIB_ITEM("OutTransmits", IPSTATS_MIB_OUTPKTS),
+ SNMP_MIB_SENTINEL
+ };
+
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 4b5db5d1edc279..39834b95ee59a6 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
+
+ /* Charge it to the socket. */
+
+- ipv4_pktinfo_prepare(sk, skb);
++ ipv4_pktinfo_prepare(sk, skb, true);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
+ kfree_skb_reason(skb, reason);
+ return NET_RX_DROP;
+@@ -350,6 +350,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
+ goto error;
+ skb_reserve(skb, hlen);
+
++ skb->protocol = htons(ETH_P_IP);
+ skb->priority = READ_ONCE(sk->sk_priority);
+ skb->mark = sockc->mark;
+ skb->tstamp = sockc->transmit_time;
+@@ -603,6 +604,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
+ daddr, saddr, 0, 0, sk->sk_uid);
+
++ fl4.fl4_icmp_type = 0;
++ fl4.fl4_icmp_code = 0;
++
+ if (!hdrincl) {
+ rfv.msg = msg;
+ rfv.hlen = 0;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index b214b5a2e045fe..285482060082f8 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -132,7 +132,8 @@ struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
+ static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
+ INDIRECT_CALLABLE_SCOPE
+ unsigned int ipv4_mtu(const struct dst_entry *dst);
+-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
++static void ipv4_negative_advice(struct sock *sk,
++ struct dst_entry *dst);
+ static void ipv4_link_failure(struct sk_buff *skb);
+ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu,
+@@ -780,7 +781,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ goto reject_redirect;
+ }
+
+- n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
++ n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
+ if (!n)
+ n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
+ if (!IS_ERR(n)) {
+@@ -837,22 +838,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
+ __ip_do_redirect(rt, skb, &fl4, true);
+ }
+
+-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
++static void ipv4_negative_advice(struct sock *sk,
++ struct dst_entry *dst)
+ {
+ struct rtable *rt = (struct rtable *)dst;
+- struct dst_entry *ret = dst;
+
+- if (rt) {
+- if (dst->obsolete > 0) {
+- ip_rt_put(rt);
+- ret = NULL;
+- } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
+- rt->dst.expires) {
+- ip_rt_put(rt);
+- ret = NULL;
+- }
+- }
+- return ret;
++ if ((dst->obsolete > 0) ||
++ (rt->rt_flags & RTCF_REDIRECTED) ||
++ rt->dst.expires)
++ sk_dst_reset(sk);
+ }
+
+ /*
+@@ -926,13 +920,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ peer->rate_last = jiffies;
+ ++peer->n_redirects;
+-#ifdef CONFIG_IP_ROUTE_VERBOSE
+- if (log_martians &&
++ if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
+ peer->n_redirects == ip_rt_redirect_number)
+ net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, inet_iif(skb),
+ &ip_hdr(skb)->daddr, &gw);
+-#endif
+ }
+ out_put_peer:
+ inet_putpeer(peer);
+@@ -1283,7 +1275,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
+ struct flowi4 fl4 = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+- .flowi4_tos = RT_TOS(iph->tos),
++ .flowi4_tos = iph->tos & IPTOS_RT_MASK,
+ .flowi4_oif = rt->dst.dev->ifindex,
+ .flowi4_iif = skb->dev->ifindex,
+ .flowi4_mark = skb->mark,
+@@ -2168,6 +2160,9 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ int err = -EINVAL;
+ u32 tag = 0;
+
++ if (!in_dev)
++ return -EINVAL;
++
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
+ goto martian_source;
+
+@@ -2935,9 +2930,9 @@ EXPORT_SYMBOL_GPL(ip_route_output_tunnel);
+
+ /* called with rcu_read_lock held */
+ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
+- struct rtable *rt, u32 table_id, struct flowi4 *fl4,
+- struct sk_buff *skb, u32 portid, u32 seq,
+- unsigned int flags)
++ struct rtable *rt, u32 table_id, dscp_t dscp,
++ struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
++ u32 seq, unsigned int flags)
+ {
+ struct rtmsg *r;
+ struct nlmsghdr *nlh;
+@@ -2953,7 +2948,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
+ r->rtm_family = AF_INET;
+ r->rtm_dst_len = 32;
+ r->rtm_src_len = 0;
+- r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
++ r->rtm_tos = inet_dscp_to_dsfield(dscp);
+ r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
+ if (nla_put_u32(skb, RTA_TABLE, table_id))
+ goto nla_put_failure;
+@@ -3103,7 +3098,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
+ goto next;
+
+ err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
+- table_id, NULL, skb,
++ table_id, 0, NULL, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err)
+@@ -3399,7 +3394,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ fri.tb_id = table_id;
+ fri.dst = res.prefix;
+ fri.dst_len = res.prefixlen;
+- fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
++ fri.dscp = res.dscp;
+ fri.type = rt->rt_type;
+ fri.offload = 0;
+ fri.trap = 0;
+@@ -3426,8 +3421,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
+ } else {
+- err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
+- NETLINK_CB(in_skb).portid,
++ err = rt_fill_info(net, dst, src, rt, table_id, res.dscp, &fl4,
++ skb, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
+ }
+ if (err < 0)
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index dc478a0574cbe7..e1435620779584 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -41,7 +41,6 @@ static siphash_aligned_key_t syncookie_secret[2];
+ * requested/supported by the syn/synack exchange.
+ */
+ #define TSBITS 6
+-#define TSMASK (((__u32)1 << TSBITS) - 1)
+
+ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+ u32 count, int c)
+@@ -62,27 +61,22 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
+ */
+ u64 cookie_init_timestamp(struct request_sock *req, u64 now)
+ {
+- struct inet_request_sock *ireq;
+- u32 ts, ts_now = tcp_ns_to_ts(now);
++ const struct inet_request_sock *ireq = inet_rsk(req);
++ u64 ts, ts_now = tcp_ns_to_ts(now);
+ u32 options = 0;
+
+- ireq = inet_rsk(req);
+-
+ options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
+ if (ireq->sack_ok)
+ options |= TS_OPT_SACK;
+ if (ireq->ecn_ok)
+ options |= TS_OPT_ECN;
+
+- ts = ts_now & ~TSMASK;
++ ts = (ts_now >> TSBITS) << TSBITS;
+ ts |= options;
+- if (ts > ts_now) {
+- ts >>= TSBITS;
+- ts--;
+- ts <<= TSBITS;
+- ts |= options;
+- }
+- return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
++ if (ts > ts_now)
++ ts -= (1UL << TSBITS);
++
++ return ts * (NSEC_PER_SEC / TCP_TS_HZ);
+ }
+
+
+@@ -430,7 +424,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
+ }
+
+ /* Try to redo what tcp_v4_send_synack did. */
+- req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
++ req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? :
++ dst_metric(&rt->dst, RTAX_WINDOW);
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ full_space = tcp_full_space(sk);
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 3d3a24f795734e..75371928d94f6e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -591,7 +591,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
+ */
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
+- /* This barrier is coupled with smp_wmb() in tcp_reset() */
++ /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */
+ smp_rmb();
+ if (READ_ONCE(sk->sk_err) ||
+ !skb_queue_empty_lockless(&sk->sk_error_queue))
+@@ -722,6 +722,7 @@ void tcp_push(struct sock *sk, int flags, int mss_now,
+ if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
+ set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
++ smp_mb__after_atomic();
+ }
+ /* It is possible TX completion already happened
+ * before we set TSQ_THROTTLED.
+@@ -1157,6 +1158,9 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
+
+ process_backlog++;
+
++#ifdef CONFIG_SKB_DECRYPTED
++ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
++#endif
+ tcp_skb_entail(sk, skb);
+ copy = size_goal;
+
+@@ -1719,7 +1723,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
+ space = tcp_space_from_win(sk, val);
+ if (space > sk->sk_rcvbuf) {
+ WRITE_ONCE(sk->sk_rcvbuf, space);
+- tcp_sk(sk)->window_clamp = val;
++ WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
+ }
+ return 0;
+ }
+@@ -1785,7 +1789,17 @@ static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
+
+ static bool can_map_frag(const skb_frag_t *frag)
+ {
+- return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag);
++ struct page *page;
++
++ if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
++ return false;
++
++ page = skb_frag_page(frag);
++
++ if (PageCompound(page) || page->mapping)
++ return false;
++
++ return true;
+ }
+
+ static int find_next_mappable_frag(const skb_frag_t *frag,
+@@ -2626,6 +2640,10 @@ void tcp_set_state(struct sock *sk, int state)
+ if (oldstate != TCP_ESTABLISHED)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ break;
++ case TCP_CLOSE_WAIT:
++ if (oldstate == TCP_SYN_RECV)
++ TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
++ break;
+
+ case TCP_CLOSE:
+ if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
+@@ -2637,7 +2655,7 @@ void tcp_set_state(struct sock *sk, int state)
+ inet_put_port(sk);
+ fallthrough;
+ default:
+- if (oldstate == TCP_ESTABLISHED)
++ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
+ TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ }
+
+@@ -2699,7 +2717,7 @@ void tcp_shutdown(struct sock *sk, int how)
+ /* If we've already sent a FIN, or it's a closed state, skip this. */
+ if ((1 << sk->sk_state) &
+ (TCPF_ESTABLISHED | TCPF_SYN_SENT |
+- TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
++ TCPF_CLOSE_WAIT)) {
+ /* Clear out any half completed packets. FIN if needed. */
+ if (tcp_close_state(sk))
+ tcp_send_fin(sk);
+@@ -2808,7 +2826,7 @@ void __tcp_close(struct sock *sk, long timeout)
+ * machine. State transitions:
+ *
+ * TCP_ESTABLISHED -> TCP_FIN_WAIT1
+- * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
++ * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult)
+ * TCP_CLOSE_WAIT -> TCP_LAST_ACK
+ *
+ * are legal only when FIN has been sent (i.e. in window),
+@@ -2920,6 +2938,8 @@ void tcp_close(struct sock *sk, long timeout)
+ lock_sock(sk);
+ __tcp_close(sk, timeout);
+ release_sock(sk);
++ if (!sk->sk_net_refcnt)
++ inet_csk_clear_xmit_timers_sync(sk);
+ sock_put(sk);
+ }
+ EXPORT_SYMBOL(tcp_close);
+@@ -3366,11 +3386,27 @@ int tcp_set_window_clamp(struct sock *sk, int val)
+ if (!val) {
+ if (sk->sk_state != TCP_CLOSE)
+ return -EINVAL;
+- tp->window_clamp = 0;
++ WRITE_ONCE(tp->window_clamp, 0);
+ } else {
+- tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
+- SOCK_MIN_RCVBUF / 2 : val;
+- tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
++ u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
++ u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
++ SOCK_MIN_RCVBUF / 2 : val;
++
++ if (new_window_clamp == old_window_clamp)
++ return 0;
++
++ WRITE_ONCE(tp->window_clamp, new_window_clamp);
++ if (new_window_clamp < old_window_clamp) {
++ /* need to apply the reserved mem provisioning only
++ * when shrinking the window clamp
++ */
++ __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
++
++ } else {
++ new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
++ tp->rcv_ssthresh = max(new_rcv_ssthresh,
++ tp->rcv_ssthresh);
++ }
+ }
+ return 0;
+ }
+@@ -3758,7 +3794,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
+ info->tcpi_options |= TCPI_OPT_SYN_DATA;
+
+ info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
+- info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
++ info->tcpi_ato = jiffies_to_usecs(min(icsk->icsk_ack.ato,
++ tcp_delack_max(sk)));
+ info->tcpi_snd_mss = tp->mss_cache;
+ info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
+
+@@ -3814,6 +3851,15 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
+ info->tcpi_rcv_wnd = tp->rcv_wnd;
+ info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash;
+ info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
++
++ info->tcpi_total_rto = tp->total_rto;
++ info->tcpi_total_rto_recoveries = tp->total_rto_recoveries;
++ info->tcpi_total_rto_time = tp->total_rto_time;
++ if (tp->rto_stamp) {
++ info->tcpi_total_rto_time += tcp_time_stamp_raw() -
++ tp->rto_stamp;
++ }
++
+ unlock_sock_fast(sk, slow);
+ }
+ EXPORT_SYMBOL_GPL(tcp_get_info);
+@@ -3939,11 +3985,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
+ if (copy_from_sockptr(&len, optlen, sizeof(int)))
+ return -EFAULT;
+
+- len = min_t(unsigned int, len, sizeof(int));
+-
+ if (len < 0)
+ return -EINVAL;
+
++ len = min_t(unsigned int, len, sizeof(int));
++
+ switch (optname) {
+ case TCP_MAXSEG:
+ val = tp->mss_cache;
+@@ -3983,7 +4029,7 @@ int do_tcp_getsockopt(struct sock *sk, int level,
+ TCP_RTO_MAX / HZ);
+ break;
+ case TCP_WINDOW_CLAMP:
+- val = tp->window_clamp;
++ val = READ_ONCE(tp->window_clamp);
+ break;
+ case TCP_INFO: {
+ struct tcp_info info;
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 53b0d62fd2c2db..fe6178715ba05f 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -577,7 +577,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ err = sk_stream_error(sk, msg->msg_flags, err);
+ release_sock(sk);
+ sk_psock_put(sk, psock);
+- return copied ? copied : err;
++ return copied > 0 ? copied : err;
+ }
+
+ enum {
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 1b34050a7538be..95dbb2799be463 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -46,8 +46,7 @@ void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
+ }
+
+ /* Must be called with rcu lock held */
+-static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
+- const char *name)
++static struct tcp_congestion_ops *tcp_ca_find_autoload(const char *name)
+ {
+ struct tcp_congestion_ops *ca = tcp_ca_find(name);
+
+@@ -182,7 +181,7 @@ int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_cong
+ return ret;
+ }
+
+-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
++u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
+ {
+ const struct tcp_congestion_ops *ca;
+ u32 key = TCP_CA_UNSPEC;
+@@ -190,7 +189,7 @@ u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
+ might_sleep();
+
+ rcu_read_lock();
+- ca = tcp_ca_find_autoload(net, name);
++ ca = tcp_ca_find_autoload(name);
+ if (ca) {
+ key = ca->key;
+ *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
+@@ -287,7 +286,7 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
+ int ret;
+
+ rcu_read_lock();
+- ca = tcp_ca_find_autoload(net, name);
++ ca = tcp_ca_find_autoload(name);
+ if (!ca) {
+ ret = -ENOENT;
+ } else if (!bpf_try_module_get(ca, ca->owner)) {
+@@ -425,7 +424,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
+ if (!load)
+ ca = tcp_ca_find(name);
+ else
+- ca = tcp_ca_find_autoload(sock_net(sk), name);
++ ca = tcp_ca_find_autoload(name);
+
+ /* No change asking for existing value */
+ if (ca == icsk->icsk_ca_ops) {
+diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
+index bb23bb5b387a0c..8ad62713b0ba2b 100644
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -58,7 +58,18 @@ struct dctcp {
+ };
+
+ static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
+-module_param(dctcp_shift_g, uint, 0644);
++
++static int dctcp_shift_g_set(const char *val, const struct kernel_param *kp)
++{
++ return param_set_uint_minmax(val, kp, 0, 10);
++}
++
++static const struct kernel_param_ops dctcp_shift_g_ops = {
++ .set = dctcp_shift_g_set,
++ .get = param_get_uint,
++};
++
++module_param_cb(dctcp_shift_g, &dctcp_shift_g_ops, &dctcp_shift_g, 0644);
+ MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
+
+ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 804821d6bd4d47..fb053942dba2a1 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -243,9 +243,14 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
+ */
+ if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
+ u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
++ u8 old_ratio = tcp_sk(sk)->scaling_ratio;
+
+ do_div(val, skb->truesize);
+ tcp_sk(sk)->scaling_ratio = val ? val : 1;
++
++ if (old_ratio != tcp_sk(sk)->scaling_ratio)
++ WRITE_ONCE(tcp_sk(sk)->window_clamp,
++ tcp_win_from_space(sk, sk->sk_rcvbuf));
+ }
+ icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
+ tcp_sk(sk)->advmss);
+@@ -570,19 +575,20 @@ static void tcp_init_buffer_space(struct sock *sk)
+ maxwin = tcp_full_space(sk);
+
+ if (tp->window_clamp >= maxwin) {
+- tp->window_clamp = maxwin;
++ WRITE_ONCE(tp->window_clamp, maxwin);
+
+ if (tcp_app_win && maxwin > 4 * tp->advmss)
+- tp->window_clamp = max(maxwin -
+- (maxwin >> tcp_app_win),
+- 4 * tp->advmss);
++ WRITE_ONCE(tp->window_clamp,
++ max(maxwin - (maxwin >> tcp_app_win),
++ 4 * tp->advmss));
+ }
+
+ /* Force reservation of one segment. */
+ if (tcp_app_win &&
+ tp->window_clamp > 2 * tp->advmss &&
+ tp->window_clamp + tp->advmss > maxwin)
+- tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
++ WRITE_ONCE(tp->window_clamp,
++ max(2 * tp->advmss, maxwin - tp->advmss));
+
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+@@ -768,7 +774,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
+ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+
+ /* Make the window clamp follow along. */
+- tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
++ WRITE_ONCE(tp->window_clamp,
++ tcp_win_from_space(sk, rcvbuf));
+ }
+ }
+ tp->rcvq_space.space = copied;
+@@ -2101,13 +2108,25 @@ void tcp_clear_retrans(struct tcp_sock *tp)
+ tp->undo_marker = 0;
+ tp->undo_retrans = -1;
+ tp->sacked_out = 0;
++ tp->rto_stamp = 0;
++ tp->total_rto = 0;
++ tp->total_rto_recoveries = 0;
++ tp->total_rto_time = 0;
+ }
+
+ static inline void tcp_init_undo(struct tcp_sock *tp)
+ {
+ tp->undo_marker = tp->snd_una;
++
+ /* Retransmission still in flight may cause DSACKs later. */
+- tp->undo_retrans = tp->retrans_out ? : -1;
++ /* First, account for regular retransmits in flight: */
++ tp->undo_retrans = tp->retrans_out;
++ /* Next, account for TLP retransmits in flight: */
++ if (tp->tlp_high_seq && tp->tlp_retrans)
++ tp->undo_retrans++;
++ /* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
++ if (!tp->undo_retrans)
++ tp->undo_retrans = -1;
+ }
+
+ static bool tcp_is_rack(const struct sock *sk)
+@@ -2186,6 +2205,7 @@ void tcp_enter_loss(struct sock *sk)
+
+ tcp_set_ca_state(sk, TCP_CA_Loss);
+ tp->high_seq = tp->snd_nxt;
++ tp->tlp_high_seq = 0;
+ tcp_ecn_queue_cwr(tp);
+
+ /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
+@@ -2436,8 +2456,22 @@ static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
+ */
+ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
+ {
+- return tp->retrans_stamp &&
+- tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
++ const struct sock *sk = (const struct sock *)tp;
++
++ if (tp->retrans_stamp &&
++ tcp_tsopt_ecr_before(tp, tp->retrans_stamp))
++ return true; /* got echoed TS before first retransmission */
++
++ /* Check if nothing was retransmitted (retrans_stamp==0), which may
++ * happen in fast recovery due to TSQ. But we ignore zero retrans_stamp
++ * in TCP_SYN_SENT, since when we set FLAG_SYN_ACKED we also clear
++ * retrans_stamp even if we had retransmitted the SYN.
++ */
++ if (!tp->retrans_stamp && /* no record of a retransmit/SYN? */
++ sk->sk_state != TCP_SYN_SENT) /* not the FLAG_SYN_ACKED case? */
++ return true; /* nothing was retransmitted */
++
++ return false;
+ }
+
+ /* Undo procedures. */
+@@ -2471,6 +2505,16 @@ static bool tcp_any_retrans_done(const struct sock *sk)
+ return false;
+ }
+
++/* If loss recovery is finished and there are no retransmits out in the
++ * network, then we clear retrans_stamp so that upon the next loss recovery
++ * retransmits_timed_out() and timestamp-undo are using the correct value.
++ */
++static void tcp_retrans_stamp_cleanup(struct sock *sk)
++{
++ if (!tcp_any_retrans_done(sk))
++ tcp_sk(sk)->retrans_stamp = 0;
++}
++
+ static void DBGUNDO(struct sock *sk, const char *msg)
+ {
+ #if FASTRETRANS_DEBUG > 1
+@@ -2759,13 +2803,37 @@ static void tcp_mtup_probe_success(struct sock *sk)
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
+ }
+
++/* Sometimes we deduce that packets have been dropped due to reasons other than
++ * congestion, like path MTU reductions or failed client TFO attempts. In these
++ * cases we call this function to retransmit as many packets as cwnd allows,
++ * without reducing cwnd. Given that retransmits will set retrans_stamp to a
++ * non-zero value (and may do so in a later calling context due to TSQ), we
++ * also enter CA_Loss so that we track when all retransmitted packets are ACKed
++ * and clear retrans_stamp when that happens (to ensure later recurring RTOs
++ * are using the correct retrans_stamp and don't declare ETIMEDOUT
++ * prematurely).
++ */
++static void tcp_non_congestion_loss_retransmit(struct sock *sk)
++{
++ const struct inet_connection_sock *icsk = inet_csk(sk);
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ if (icsk->icsk_ca_state != TCP_CA_Loss) {
++ tp->high_seq = tp->snd_nxt;
++ tp->snd_ssthresh = tcp_current_ssthresh(sk);
++ tp->prior_ssthresh = 0;
++ tp->undo_marker = 0;
++ tcp_set_ca_state(sk, TCP_CA_Loss);
++ }
++ tcp_xmit_retransmit_queue(sk);
++}
++
+ /* Do a simple retransmit without using the backoff mechanisms in
+ * tcp_timer. This is used for path mtu discovery.
+ * The socket is already locked here.
+ */
+ void tcp_simple_retransmit(struct sock *sk)
+ {
+- const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ int mss;
+@@ -2805,14 +2873,7 @@ void tcp_simple_retransmit(struct sock *sk)
+ * in network, but units changed and effective
+ * cwnd/ssthresh really reduced now.
+ */
+- if (icsk->icsk_ca_state != TCP_CA_Loss) {
+- tp->high_seq = tp->snd_nxt;
+- tp->snd_ssthresh = tcp_current_ssthresh(sk);
+- tp->prior_ssthresh = 0;
+- tp->undo_marker = 0;
+- tcp_set_ca_state(sk, TCP_CA_Loss);
+- }
+- tcp_xmit_retransmit_queue(sk);
++ tcp_non_congestion_loss_retransmit(sk);
+ }
+ EXPORT_SYMBOL(tcp_simple_retransmit);
+
+@@ -2821,6 +2882,9 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+ struct tcp_sock *tp = tcp_sk(sk);
+ int mib_idx;
+
++ /* Start the clock with our fast retransmit, for undo and ETIMEDOUT. */
++ tcp_retrans_stamp_cleanup(sk);
++
+ if (tcp_is_reno(tp))
+ mib_idx = LINUX_MIB_TCPRENORECOVERY;
+ else
+@@ -2839,6 +2903,14 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+ tcp_set_ca_state(sk, TCP_CA_Recovery);
+ }
+
++static void tcp_update_rto_time(struct tcp_sock *tp)
++{
++ if (tp->rto_stamp) {
++ tp->total_rto_time += tcp_time_stamp(tp) - tp->rto_stamp;
++ tp->rto_stamp = 0;
++ }
++}
++
+ /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
+ * recovered or spurious. Otherwise retransmits more on partial ACKs.
+ */
+@@ -3029,7 +3101,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
+ return;
+
+ if (tcp_try_undo_dsack(sk))
+- tcp_try_keep_open(sk);
++ tcp_try_to_open(sk, flag);
+
+ tcp_identify_packet_loss(sk, ack_flag);
+ if (icsk->icsk_ca_state != TCP_CA_Recovery) {
+@@ -3043,6 +3115,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
+ break;
+ case TCP_CA_Loss:
+ tcp_process_loss(sk, flag, num_dupack, rexmit);
++ if (icsk->icsk_ca_state != TCP_CA_Loss)
++ tcp_update_rto_time(tp);
+ tcp_identify_packet_loss(sk, ack_flag);
+ if (!(icsk->icsk_ca_state == TCP_CA_Open ||
+ (*ack_flag & FLAG_LOST_RETRANS)))
+@@ -3809,8 +3883,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ * then we can probably ignore it.
+ */
+ if (before(ack, prior_snd_una)) {
++ u32 max_window;
++
++ /* do not accept ACK for bytes we never sent. */
++ max_window = min_t(u64, tp->max_window, tp->bytes_acked);
+ /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
+- if (before(ack, prior_snd_una - tp->max_window)) {
++ if (before(ack, prior_snd_una - max_window)) {
+ if (!(flag & FLAG_NO_CHALLENGE_ACK))
+ tcp_send_challenge_ack(sk);
+ return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
+@@ -4337,9 +4415,26 @@ static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp,
+ return SKB_NOT_DROPPED_YET;
+ }
+
++
++void tcp_done_with_error(struct sock *sk, int err)
++{
++ /* This barrier is coupled with smp_rmb() in tcp_poll() */
++ WRITE_ONCE(sk->sk_err, err);
++ smp_wmb();
++
++ tcp_write_queue_purge(sk);
++ tcp_done(sk);
++
++ if (!sock_flag(sk, SOCK_DEAD))
++ sk_error_report(sk);
++}
++EXPORT_SYMBOL(tcp_done_with_error);
++
+ /* When we get a reset we do this. */
+ void tcp_reset(struct sock *sk, struct sk_buff *skb)
+ {
++ int err;
++
+ trace_tcp_receive_reset(sk);
+
+ /* mptcp can't tell us to ignore reset pkts,
+@@ -4351,24 +4446,17 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
+ /* We want the right error as BSD sees it (and indeed as we do). */
+ switch (sk->sk_state) {
+ case TCP_SYN_SENT:
+- WRITE_ONCE(sk->sk_err, ECONNREFUSED);
++ err = ECONNREFUSED;
+ break;
+ case TCP_CLOSE_WAIT:
+- WRITE_ONCE(sk->sk_err, EPIPE);
++ err = EPIPE;
+ break;
+ case TCP_CLOSE:
+ return;
+ default:
+- WRITE_ONCE(sk->sk_err, ECONNRESET);
++ err = ECONNRESET;
+ }
+- /* This barrier is coupled with smp_rmb() in tcp_poll() */
+- smp_wmb();
+-
+- tcp_write_queue_purge(sk);
+- tcp_done(sk);
+-
+- if (!sock_flag(sk, SOCK_DEAD))
+- sk_error_report(sk);
++ tcp_done_with_error(sk, err);
+ }
+
+ /*
+@@ -5833,6 +5921,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ * RFC 5961 4.2 : Send a challenge ack
+ */
+ if (th->syn) {
++ if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
++ TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
++ TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
++ TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
++ goto pass;
+ syn_challenge:
+ if (syn_inerr)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
+@@ -5842,6 +5935,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+ }
+
++pass:
+ bpf_skops_parse_hdr(sk, skb);
+
+ return true;
+@@ -6171,7 +6265,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+ tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
+ skb_rbtree_walk_from(data)
+ tcp_mark_skb_lost(sk, data);
+- tcp_xmit_retransmit_queue(sk);
++ tcp_non_congestion_loss_retransmit(sk);
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ return true;
+@@ -6307,7 +6401,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+
+ if (!tp->rx_opt.wscale_ok) {
+ tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
+- tp->window_clamp = min(tp->window_clamp, 65535U);
++ WRITE_ONCE(tp->window_clamp,
++ min(tp->window_clamp, 65535U));
+ }
+
+ if (tp->rx_opt.saw_tstamp) {
+@@ -6450,22 +6545,31 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+
+ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
+ {
++ struct tcp_sock *tp = tcp_sk(sk);
+ struct request_sock *req;
+
+ /* If we are still handling the SYNACK RTO, see if timestamp ECR allows
+ * undo. If peer SACKs triggered fast recovery, we can't undo here.
+ */
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
+- tcp_try_undo_loss(sk, false);
++ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
++ tcp_try_undo_recovery(sk);
+
+- /* Reset rtx states to prevent spurious retransmits_timed_out() */
+- tcp_sk(sk)->retrans_stamp = 0;
++ tcp_update_rto_time(tp);
+ inet_csk(sk)->icsk_retransmits = 0;
++ /* In tcp_fastopen_synack_timer() on the first SYNACK RTO we set
++ * retrans_stamp but don't enter CA_Loss, so in case that happened we
++ * need to zero retrans_stamp here to prevent spurious
++ * retransmits_timed_out(). However, if the ACK of our SYNACK caused us
++ * to enter CA_Recovery then we need to leave retrans_stamp as it was
++ * set entering CA_Recovery, for correct retransmits_timed_out() and
++ * undo behavior.
++ */
++ tcp_retrans_stamp_cleanup(sk);
+
+ /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
+ * we no longer need req so release it.
+ */
+- req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
++ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ reqsk_fastopen_remove(sk, req, false);
+
+@@ -6622,6 +6726,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+
+ tcp_initialize_rcv_mss(sk);
+ tcp_fast_path_on(tp);
++ if (sk->sk_shutdown & SEND_SHUTDOWN)
++ tcp_shutdown(sk, SEND_SHUTDOWN);
+ break;
+
+ case TCP_FIN_WAIT1: {
+@@ -7083,7 +7189,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ tcp_rsk(req)->tfo_listener = false;
+ if (!want_cookie) {
+ req->timeout = tcp_timeout_init((struct sock *)req);
+- inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
++ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
++ req->timeout))) {
++ reqsk_free(req);
++ return 0;
++ }
++
+ }
+ af_ops->send_synack(sk, dst, &fl, req, &foc,
+ !want_cookie ? TCP_SYNACK_NORMAL :
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 4167e8a48b60a1..df3ddf31f8e673 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -94,6 +94,8 @@ EXPORT_SYMBOL(tcp_hashinfo);
+
+ static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
+
++static DEFINE_MUTEX(tcp_exit_batch_mutex);
++
+ static u32 tcp_v4_init_seq(const struct sk_buff *skb)
+ {
+ return secure_tcp_seq(ip_hdr(skb)->daddr,
+@@ -114,6 +116,9 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+ const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
+ struct tcp_sock *tp = tcp_sk(sk);
+
++ if (tw->tw_substate == TCP_FIN_WAIT2)
++ reuse = 0;
++
+ if (reuse == 2) {
+ /* Still does not detect *everything* that goes through
+ * lo, since we require a loopback src or dst address
+@@ -154,6 +159,12 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+ if (tcptw->tw_ts_recent_stamp &&
+ (!twp || (reuse && time_after32(ktime_get_seconds(),
+ tcptw->tw_ts_recent_stamp)))) {
++ /* inet_twsk_hashdance() sets sk_refcnt after putting twsk
++ * and releasing the bucket lock.
++ */
++ if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt)))
++ return 0;
++
+ /* In case of repair and re-using TIME-WAIT sockets we still
+ * want to be sure that it is safe as above but honor the
+ * sequence numbers and time stamps set as part of the repair
+@@ -174,7 +185,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+ tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
+ tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ }
+- sock_hold(sktw);
++
+ return 1;
+ }
+
+@@ -194,7 +205,7 @@ static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
+
+ sock_owned_by_me(sk);
+
+- return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
++ return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, &addr_len);
+ }
+
+ /* This will initiate an outgoing connection. */
+@@ -596,15 +607,10 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
+
+ ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
+
+- if (!sock_owned_by_user(sk)) {
+- WRITE_ONCE(sk->sk_err, err);
+-
+- sk_error_report(sk);
+-
+- tcp_done(sk);
+- } else {
++ if (!sock_owned_by_user(sk))
++ tcp_done_with_error(sk, err);
++ else
+ WRITE_ONCE(sk->sk_err_soft, err);
+- }
+ goto out;
+ }
+
+@@ -1816,7 +1822,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
+ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason)
+ {
+- u32 limit, tail_gso_size, tail_gso_segs;
++ u32 tail_gso_size, tail_gso_segs;
+ struct skb_shared_info *shinfo;
+ const struct tcphdr *th;
+ struct tcphdr *thtail;
+@@ -1825,6 +1831,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ bool fragstolen;
+ u32 gso_segs;
+ u32 gso_size;
++ u64 limit;
+ int delta;
+
+ /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
+@@ -1922,7 +1929,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ __skb_push(skb, hdrlen);
+
+ no_coalesce:
+- limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
++ /* sk->sk_backlog.len is reset only at the end of __release_sock().
++ * Both sk->sk_backlog.len and sk->sk_rmem_alloc could reach
++ * sk_rcvbuf in normal conditions.
++ */
++ limit = ((u64)READ_ONCE(sk->sk_rcvbuf)) << 1;
++
++ limit += ((u32)READ_ONCE(sk->sk_sndbuf)) >> 1;
+
+ /* Only socket owner can try to collapse/prune rx queues
+ * to reduce memory overhead, so add a little headroom here.
+@@ -1930,6 +1943,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ */
+ limit += 64 * 1024;
+
++ limit = min_t(u64, limit, UINT_MAX);
++
+ if (unlikely(sk_add_backlog(sk, skb, limit))) {
+ bh_unlock_sock(sk);
+ *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
+@@ -3294,13 +3309,25 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
+ {
+ struct net *net;
+
+- tcp_twsk_purge(net_exit_list, AF_INET);
++ /* make sure concurrent calls to tcp_sk_exit_batch from net_cleanup_work
++ * and failed setup_net error unwinding path are serialized.
++ *
++ * tcp_twsk_purge() handles twsk in any dead netns, not just those in
++ * net_exit_list, the thread that dismantles a particular twsk must
++ * do so without other thread progressing to refcount_dec_and_test() of
++ * tcp_death_row.tw_refcount.
++ */
++ mutex_lock(&tcp_exit_batch_mutex);
++
++ tcp_twsk_purge(net_exit_list);
+
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
+ WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
+ tcp_fastopen_ctx_destroy(net);
+ }
++
++ mutex_unlock(&tcp_exit_batch_mutex);
+ }
+
+ static struct pernet_operations __net_initdata tcp_sk_ops = {
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index c196759f1d3bd8..e0883ba709b0bf 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -470,11 +470,15 @@ void tcp_init_metrics(struct sock *sk)
+ u32 val, crtt = 0; /* cached RTT scaled by 8 */
+
+ sk_dst_confirm(sk);
++ /* ssthresh may have been reduced unnecessarily during.
++ * 3WHS. Restore it back to its initial default.
++ */
++ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ if (!dst)
+ goto reset;
+
+ rcu_read_lock();
+- tm = tcp_get_metrics(sk, dst, true);
++ tm = tcp_get_metrics(sk, dst, false);
+ if (!tm) {
+ rcu_read_unlock();
+ goto reset;
+@@ -489,11 +493,6 @@ void tcp_init_metrics(struct sock *sk)
+ tp->snd_ssthresh = val;
+ if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
+ tp->snd_ssthresh = tp->snd_cwnd_clamp;
+- } else {
+- /* ssthresh may have been reduced unnecessarily during.
+- * 3WHS. Restore it back to its initial default.
+- */
+- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ }
+ val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ if (val && tp->reordering != val)
+@@ -620,6 +619,7 @@ static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] =
+ [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
+ [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr), },
++ [TCP_METRICS_ATTR_SADDR_IPV4] = { .type = NLA_U32, },
+ /* Following attributes are not received for GET/DEL,
+ * we keep them for reference
+ */
+@@ -899,22 +899,25 @@ static void tcp_metrics_flush_all(struct net *net)
+ unsigned int row;
+
+ for (row = 0; row < max_rows; row++, hb++) {
+- struct tcp_metrics_block __rcu **pp;
++ struct tcp_metrics_block __rcu **pp = &hb->chain;
+ bool match;
+
++ if (!rcu_access_pointer(*pp))
++ continue;
++
+ spin_lock_bh(&tcp_metrics_lock);
+- pp = &hb->chain;
+ for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
+ match = net ? net_eq(tm_net(tm), net) :
+ !refcount_read(&tm_net(tm)->ns.count);
+ if (match) {
+- *pp = tm->tcpm_next;
++ rcu_assign_pointer(*pp, tm->tcpm_next);
+ kfree_rcu(tm, rcu_head);
+ } else {
+ pp = &tm->tcpm_next;
+ }
+ }
+ spin_unlock_bh(&tcp_metrics_lock);
++ cond_resched();
+ }
+ }
+
+@@ -949,7 +952,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
+ if (addr_same(&tm->tcpm_daddr, &daddr) &&
+ (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
+ net_eq(tm_net(tm), net)) {
+- *pp = tm->tcpm_next;
++ rcu_assign_pointer(*pp, tm->tcpm_next);
+ kfree_rcu(tm, rcu_head);
+ found = true;
+ } else {
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index b98d476f1594bd..cc2b608b1a8e78 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -363,7 +363,7 @@ void tcp_twsk_destructor(struct sock *sk)
+ }
+ EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
+
+-void tcp_twsk_purge(struct list_head *net_exit_list, int family)
++void tcp_twsk_purge(struct list_head *net_exit_list)
+ {
+ bool purged_once = false;
+ struct net *net;
+@@ -371,18 +371,13 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family)
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ if (net->ipv4.tcp_death_row.hashinfo->pernet) {
+ /* Even if tw_refcount == 1, we must clean up kernel reqsk */
+- inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
++ inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
+ } else if (!purged_once) {
+- /* The last refcount is decremented in tcp_sk_exit_batch() */
+- if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
+- continue;
+-
+- inet_twsk_purge(&tcp_hashinfo, family);
++ inet_twsk_purge(&tcp_hashinfo);
+ purged_once = true;
+ }
+ }
+ }
+-EXPORT_SYMBOL_GPL(tcp_twsk_purge);
+
+ /* Warning : This function is called without sk_listener being locked.
+ * Be sure to read socket fields once, as their value could change under us.
+@@ -565,6 +560,10 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
+ newtp->undo_marker = treq->snt_isn;
+ newtp->retrans_stamp = div_u64(treq->snt_synack,
+ USEC_PER_SEC / TCP_TS_HZ);
++ newtp->total_rto = req->num_timeout;
++ newtp->total_rto_recoveries = 1;
++ newtp->total_rto_time = tcp_time_stamp_raw() -
++ newtp->retrans_stamp;
+ }
+ newtp->tsoffset = treq->ts_off;
+ #ifdef CONFIG_TCP_MD5SIG
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 8311c38267b55b..69e6012ae82fbd 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -73,6 +73,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ if (thlen < sizeof(*th))
+ goto out;
+
++ if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
++ goto out;
++
+ if (!pskb_may_pull(skb, thlen))
+ goto out;
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index f0723460753c5d..328640d9b60762 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -203,16 +203,17 @@ static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
+ * This MUST be enforced by all callers.
+ */
+ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
+- __u32 *rcv_wnd, __u32 *window_clamp,
++ __u32 *rcv_wnd, __u32 *__window_clamp,
+ int wscale_ok, __u8 *rcv_wscale,
+ __u32 init_rcv_wnd)
+ {
+ unsigned int space = (__space < 0 ? 0 : __space);
++ u32 window_clamp = READ_ONCE(*__window_clamp);
+
+ /* If no clamp set the clamp to the max possible scaled window */
+- if (*window_clamp == 0)
+- (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
+- space = min(*window_clamp, space);
++ if (window_clamp == 0)
++ window_clamp = (U16_MAX << TCP_MAX_WSCALE);
++ space = min(window_clamp, space);
+
+ /* Quantize space offering to a multiple of mss if possible. */
+ if (space > mss)
+@@ -239,12 +240,13 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
+ /* Set window scaling on max possible window */
+ space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
+ space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
+- space = min_t(u32, space, *window_clamp);
++ space = min_t(u32, space, window_clamp);
+ *rcv_wscale = clamp_t(int, ilog2(space) - 15,
+ 0, TCP_MAX_WSCALE);
+ }
+ /* Set the clamp no higher than max representable value */
+- (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
++ WRITE_ONCE(*__window_clamp,
++ min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp));
+ }
+ EXPORT_SYMBOL(tcp_select_initial_window);
+
+@@ -1331,7 +1333,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
+ refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+
+- skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
++ skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
+
+ /* Build TCP header and checksum it. */
+ th = (struct tcphdr *)skb->data;
+@@ -2312,9 +2314,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
+ if (len <= skb->len)
+ break;
+
+- if (unlikely(TCP_SKB_CB(skb)->eor) ||
+- tcp_has_tx_tstamp(skb) ||
+- !skb_pure_zcopy_same(skb, next))
++ if (tcp_has_tx_tstamp(skb) || !tcp_skb_can_collapse(skb, next))
+ return false;
+
+ len -= skb->len;
+@@ -3263,7 +3263,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
+ if (skb_still_in_host_queue(sk, skb))
+ return -EBUSY;
+
++start:
+ if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
++ if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
++ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
++ TCP_SKB_CB(skb)->seq++;
++ goto start;
++ }
+ if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+@@ -3527,7 +3533,9 @@ void tcp_send_fin(struct sock *sk)
+ return;
+ }
+ } else {
+- skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++ skb = alloc_skb_fclone(MAX_TCP_HEADER,
++ sk_gfp_mask(sk, GFP_ATOMIC |
++ __GFP_NOWARN));
+ if (unlikely(!skb))
+ return;
+
+@@ -3779,7 +3787,7 @@ static void tcp_connect_init(struct sock *sk)
+ tcp_ca_dst_init(sk, dst);
+
+ if (!tp->window_clamp)
+- tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
++ WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW));
+ tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
+
+ tcp_initialize_rcv_mss(sk);
+@@ -3787,7 +3795,7 @@ static void tcp_connect_init(struct sock *sk)
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+ (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
+- tp->window_clamp = tcp_full_space(sk);
++ WRITE_ONCE(tp->window_clamp, tcp_full_space(sk));
+
+ rcv_wnd = tcp_rwnd_init_bpf(sk);
+ if (rcv_wnd == 0)
+@@ -3997,6 +4005,20 @@ int tcp_connect(struct sock *sk)
+ }
+ EXPORT_SYMBOL(tcp_connect);
+
++u32 tcp_delack_max(const struct sock *sk)
++{
++ const struct dst_entry *dst = __sk_dst_get(sk);
++ u32 delack_max = inet_csk(sk)->icsk_delack_max;
++
++ if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) {
++ u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
++ u32 delack_from_rto_min = max_t(int, 1, rto_min - 1);
++
++ delack_max = min_t(u32, delack_max, delack_from_rto_min);
++ }
++ return delack_max;
++}
++
+ /* Send out a delayed ack, the caller does the policy checking
+ * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
+ * for details.
+@@ -4032,7 +4054,7 @@ void tcp_send_delayed_ack(struct sock *sk)
+ ato = min(ato, max_ato);
+ }
+
+- ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max);
++ ato = min_t(u32, ato, tcp_delack_max(sk));
+
+ /* Stay within the limit we were given */
+ timeout = jiffies + ato;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 984ab4a0421ed5..b65cd417b0f7ca 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -69,11 +69,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
+
+ static void tcp_write_err(struct sock *sk)
+ {
+- WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
+- sk_error_report(sk);
+-
+- tcp_write_queue_purge(sk);
+- tcp_done(sk);
++ tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
+ }
+
+@@ -415,6 +411,19 @@ abort: tcp_write_err(sk);
+ }
+ }
+
++static void tcp_update_rto_stats(struct sock *sk)
++{
++ struct inet_connection_sock *icsk = inet_csk(sk);
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ if (!icsk->icsk_retransmits) {
++ tp->total_rto_recoveries++;
++ tp->rto_stamp = tcp_time_stamp(tp);
++ }
++ icsk->icsk_retransmits++;
++ tp->total_rto++;
++}
++
+ /*
+ * Timer for Fast Open socket to retransmit SYNACK. Note that the
+ * sk here is the child socket, not the parent (listener) socket.
+@@ -447,7 +456,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
+ */
+ inet_rtx_syn_ack(sk, req);
+ req->num_timeout++;
+- icsk->icsk_retransmits++;
++ tcp_update_rto_stats(sk);
+ if (!tp->retrans_stamp)
+ tp->retrans_stamp = tcp_time_stamp(tp);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+@@ -457,17 +466,34 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
+ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
+ const struct sk_buff *skb)
+ {
++ const struct inet_connection_sock *icsk = inet_csk(sk);
++ u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
+ const struct tcp_sock *tp = tcp_sk(sk);
+- const int timeout = TCP_RTO_MAX * 2;
+- u32 rcv_delta, rtx_delta;
+-
+- rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
+- if (rcv_delta <= timeout)
+- return false;
++ int timeout = TCP_RTO_MAX * 2;
++ u32 rtx_delta;
++ s32 rcv_delta;
+
+ rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
+ (tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
+
++ if (user_timeout) {
++ /* If user application specified a TCP_USER_TIMEOUT,
++ * it does not want win 0 packets to 'reset the timer'
++ * while retransmits are not making progress.
++ */
++ if (rtx_delta > user_timeout)
++ return true;
++ timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
++ }
++
++ /* Note: timer interrupt might have been delayed by at least one jiffy,
++ * and tp->rcv_tstamp might very well have been written recently.
++ * rcv_delta can thus be negative.
++ */
++ rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp;
++ if (rcv_delta <= timeout)
++ return false;
++
+ return rtx_delta > timeout;
+ }
+
+@@ -509,8 +535,6 @@ void tcp_retransmit_timer(struct sock *sk)
+ if (WARN_ON_ONCE(!skb))
+ return;
+
+- tp->tlp_high_seq = 0;
+-
+ if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
+ !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
+ /* Receiver dastardly shrinks window. Our retransmits
+@@ -575,7 +599,7 @@ void tcp_retransmit_timer(struct sock *sk)
+
+ tcp_enter_loss(sk);
+
+- icsk->icsk_retransmits++;
++ tcp_update_rto_stats(sk);
+ if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
+ /* Retransmission failed because of local congestion,
+ * Let senders fight for local resources conservatively.
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index f39b9c8445808d..73fb814460b6b7 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -326,6 +326,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ goto fail_unlock;
+ }
+
++ sock_set_flag(sk, SOCK_RCU_FREE);
++
+ sk_add_node_rcu(sk, &hslot->head);
+ hslot->count++;
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+@@ -342,7 +344,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ hslot2->count++;
+ spin_unlock(&hslot2->lock);
+ }
+- sock_set_flag(sk, SOCK_RCU_FREE);
++
+ error = 0;
+ fail_unlock:
+ spin_unlock_bh(&hslot->lock);
+@@ -429,15 +431,21 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ {
+ struct sock *sk, *result;
+ int score, badness;
++ bool need_rescore;
+
+ result = NULL;
+ badness = 0;
+ udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+- score = compute_score(sk, net, saddr, sport,
+- daddr, hnum, dif, sdif);
++ need_rescore = false;
++rescore:
++ score = compute_score(need_rescore ? result : sk, net, saddr,
++ sport, daddr, hnum, dif, sdif);
+ if (score > badness) {
+ badness = score;
+
++ if (need_rescore)
++ continue;
++
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ result = sk;
+ continue;
+@@ -458,9 +466,14 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ if (IS_ERR(result))
+ continue;
+
+- badness = compute_score(result, net, saddr, sport,
+- daddr, hnum, dif, sdif);
+-
++ /* compute_score is too long of a function to be
++ * inlined, and calling it again here yields
++ * measureable overhead for some
++ * workloads. Work around it by jumping
++ * backwards to rescore 'result'.
++ */
++ need_rescore = true;
++ goto rescore;
+ }
+ }
+ return result;
+@@ -534,7 +547,8 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
+ struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
+ __be16 sport, __be16 dport)
+ {
+- const struct iphdr *iph = ip_hdr(skb);
++ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
++ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
+ struct net *net = dev_net(skb->dev);
+ int iif, sdif;
+
+@@ -584,6 +598,13 @@ static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk,
+ }
+
+ DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
++EXPORT_SYMBOL(udp_encap_needed_key);
++
++#if IS_ENABLED(CONFIG_IPV6)
++DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
++EXPORT_SYMBOL(udpv6_encap_needed_key);
++#endif
++
+ void udp_encap_enable(void)
+ {
+ static_branch_inc(&udp_encap_needed_key);
+@@ -714,7 +735,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
+ iph->saddr, uh->source, skb->dev->ifindex,
+ inet_sdif(skb), udptable, NULL);
+
+- if (!sk || udp_sk(sk)->encap_type) {
++ if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
+ /* No socket for error: try tunnels before discarding */
+ if (static_branch_unlikely(&udp_encap_needed_key)) {
+ sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
+@@ -805,7 +826,7 @@ void udp_flush_pending_frames(struct sock *sk)
+
+ if (up->pending) {
+ up->len = 0;
+- up->pending = 0;
++ WRITE_ONCE(up->pending, 0);
+ ip_flush_pending_frames(sk);
+ }
+ }
+@@ -993,7 +1014,7 @@ int udp_push_pending_frames(struct sock *sk)
+
+ out:
+ up->len = 0;
+- up->pending = 0;
++ WRITE_ONCE(up->pending, 0);
+ return err;
+ }
+ EXPORT_SYMBOL(udp_push_pending_frames);
+@@ -1051,7 +1072,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ u8 tos, scope;
+ __be16 dport;
+ int err, is_udplite = IS_UDPLITE(sk);
+- int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
++ int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
+ int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+ struct sk_buff *skb;
+ struct ip_options_data opt_copy;
+@@ -1069,7 +1090,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
+
+ fl4 = &inet->cork.fl.u.ip4;
+- if (up->pending) {
++ if (READ_ONCE(up->pending)) {
+ /*
+ * There are pending frames.
+ * The socket lock must be held while it's corked.
+@@ -1117,16 +1138,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ if (msg->msg_controllen) {
+ err = udp_cmsg_send(sk, msg, &ipc.gso_size);
+- if (err > 0)
++ if (err > 0) {
+ err = ip_cmsg_send(sk, msg, &ipc,
+ sk->sk_family == AF_INET6);
++ connected = 0;
++ }
+ if (unlikely(err < 0)) {
+ kfree(ipc.opt);
+ return err;
+ }
+ if (ipc.opt)
+ free = 1;
+- connected = 0;
+ }
+ if (!ipc.opt) {
+ struct ip_options_rcu *inet_opt;
+@@ -1143,7 +1165,9 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) {
+ err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
+- (struct sockaddr *)usin, &ipc.addr);
++ (struct sockaddr *)usin,
++ &msg->msg_namelen,
++ &ipc.addr);
+ if (err)
+ goto out_free;
+ if (usin) {
+@@ -1265,7 +1289,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ fl4->saddr = saddr;
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = inet->inet_sport;
+- up->pending = AF_INET;
++ WRITE_ONCE(up->pending, AF_INET);
+
+ do_append_data:
+ up->len += ulen;
+@@ -1277,7 +1301,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ else if (!corkreq)
+ err = udp_push_pending_frames(sk);
+ else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
+- up->pending = 0;
++ WRITE_ONCE(up->pending, 0);
+ release_sock(sk);
+
+ out:
+@@ -1315,11 +1339,11 @@ void udp_splice_eof(struct socket *sock)
+ struct sock *sk = sock->sk;
+ struct udp_sock *up = udp_sk(sk);
+
+- if (!up->pending || READ_ONCE(up->corkflag))
++ if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
+ return;
+
+ lock_sock(sk);
+- if (up->pending && !READ_ONCE(up->corkflag))
++ if (up->pending && !udp_test_bit(CORK, sk))
+ udp_push_pending_frames(sk);
+ release_sock(sk);
+ }
+@@ -1865,10 +1889,11 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
+ *addr_len = sizeof(*sin);
+
+ BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
+- (struct sockaddr *)sin);
++ (struct sockaddr *)sin,
++ addr_len);
+ }
+
+- if (udp_sk(sk)->gro_enabled)
++ if (udp_test_bit(GRO_ENABLED, sk))
+ udp_cmsg_recv(msg, sk, skb);
+
+ if (inet_cmsg_flags(inet))
+@@ -1904,7 +1929,7 @@ int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (addr_len < sizeof(struct sockaddr_in))
+ return -EINVAL;
+
+- return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
++ return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, &addr_len);
+ }
+ EXPORT_SYMBOL(udp_pre_connect);
+
+@@ -2081,7 +2106,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ }
+ nf_reset_ct(skb);
+
+- if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
++ if (static_branch_unlikely(&udp_encap_needed_key) &&
++ READ_ONCE(up->encap_type)) {
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+
+ /*
+@@ -2119,7 +2145,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ /*
+ * UDP-Lite specific tests, ignored on UDP sockets
+ */
+- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
++ if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
++ u16 pcrlen = READ_ONCE(up->pcrlen);
+
+ /*
+ * MIB statistics other than incrementing the error count are
+@@ -2132,7 +2159,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ * delivery of packets with coverage values less than a value
+ * provided by the application."
+ */
+- if (up->pcrlen == 0) { /* full coverage was set */
++ if (pcrlen == 0) { /* full coverage was set */
+ net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
+ UDP_SKB_CB(skb)->cscov, skb->len);
+ goto drop;
+@@ -2143,9 +2170,9 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ * that it wants x while sender emits packets of smaller size y.
+ * Therefore the above ...()->partial_cov statement is essential.
+ */
+- if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
++ if (UDP_SKB_CB(skb)->cscov < pcrlen) {
+ net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
+- UDP_SKB_CB(skb)->cscov, up->pcrlen);
++ UDP_SKB_CB(skb)->cscov, pcrlen);
+ goto drop;
+ }
+ }
+@@ -2162,7 +2189,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+
+ udp_csum_pull_header(skb);
+
+- ipv4_pktinfo_prepare(sk, skb);
++ ipv4_pktinfo_prepare(sk, skb, true);
+ return __udp_queue_rcv_skb(sk, skb);
+
+ csum_error:
+@@ -2618,7 +2645,7 @@ void udp_destroy_sock(struct sock *sk)
+ if (encap_destroy)
+ encap_destroy(sk);
+ }
+- if (up->encap_enabled)
++ if (udp_test_bit(ENCAP_ENABLED, sk))
+ static_branch_dec(&udp_encap_needed_key);
+ }
+ }
+@@ -2658,9 +2685,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ switch (optname) {
+ case UDP_CORK:
+ if (val != 0) {
+- WRITE_ONCE(up->corkflag, 1);
++ udp_set_bit(CORK, sk);
+ } else {
+- WRITE_ONCE(up->corkflag, 0);
++ udp_clear_bit(CORK, sk);
+ lock_sock(sk);
+ push_pending_frames(sk);
+ release_sock(sk);
+@@ -2675,17 +2702,17 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ case UDP_ENCAP_ESPINUDP_NON_IKE:
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+- up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
++ WRITE_ONCE(up->encap_rcv,
++ ipv6_stub->xfrm6_udp_encap_rcv);
+ else
+ #endif
+- up->encap_rcv = xfrm4_udp_encap_rcv;
++ WRITE_ONCE(up->encap_rcv,
++ xfrm4_udp_encap_rcv);
+ #endif
+ fallthrough;
+ case UDP_ENCAP_L2TPINUDP:
+- up->encap_type = val;
+- lock_sock(sk);
+- udp_tunnel_encap_enable(sk->sk_socket);
+- release_sock(sk);
++ WRITE_ONCE(up->encap_type, val);
++ udp_tunnel_encap_enable(sk);
+ break;
+ default:
+ err = -ENOPROTOOPT;
+@@ -2694,11 +2721,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ break;
+
+ case UDP_NO_CHECK6_TX:
+- up->no_check6_tx = valbool;
++ udp_set_no_check6_tx(sk, valbool);
+ break;
+
+ case UDP_NO_CHECK6_RX:
+- up->no_check6_rx = valbool;
++ udp_set_no_check6_rx(sk, valbool);
+ break;
+
+ case UDP_SEGMENT:
+@@ -2708,14 +2735,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ break;
+
+ case UDP_GRO:
+- lock_sock(sk);
+
+ /* when enabling GRO, accept the related GSO packet type */
+ if (valbool)
+- udp_tunnel_encap_enable(sk->sk_socket);
+- up->gro_enabled = valbool;
+- up->accept_udp_l4 = valbool;
+- release_sock(sk);
++ udp_tunnel_encap_enable(sk);
++ udp_assign_bit(GRO_ENABLED, sk, valbool);
++ udp_assign_bit(ACCEPT_L4, sk, valbool);
+ break;
+
+ /*
+@@ -2730,8 +2755,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ val = 8;
+ else if (val > USHRT_MAX)
+ val = USHRT_MAX;
+- up->pcslen = val;
+- up->pcflag |= UDPLITE_SEND_CC;
++ WRITE_ONCE(up->pcslen, val);
++ udp_set_bit(UDPLITE_SEND_CC, sk);
+ break;
+
+ /* The receiver specifies a minimum checksum coverage value. To make
+@@ -2744,8 +2769,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ val = 8;
+ else if (val > USHRT_MAX)
+ val = USHRT_MAX;
+- up->pcrlen = val;
+- up->pcflag |= UDPLITE_RECV_CC;
++ WRITE_ONCE(up->pcrlen, val);
++ udp_set_bit(UDPLITE_RECV_CC, sk);
+ break;
+
+ default:
+@@ -2776,26 +2801,26 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+- len = min_t(unsigned int, len, sizeof(int));
+-
+ if (len < 0)
+ return -EINVAL;
+
++ len = min_t(unsigned int, len, sizeof(int));
++
+ switch (optname) {
+ case UDP_CORK:
+- val = READ_ONCE(up->corkflag);
++ val = udp_test_bit(CORK, sk);
+ break;
+
+ case UDP_ENCAP:
+- val = up->encap_type;
++ val = READ_ONCE(up->encap_type);
+ break;
+
+ case UDP_NO_CHECK6_TX:
+- val = up->no_check6_tx;
++ val = udp_get_no_check6_tx(sk);
+ break;
+
+ case UDP_NO_CHECK6_RX:
+- val = up->no_check6_rx;
++ val = udp_get_no_check6_rx(sk);
+ break;
+
+ case UDP_SEGMENT:
+@@ -2803,17 +2828,17 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ break;
+
+ case UDP_GRO:
+- val = up->gro_enabled;
++ val = udp_test_bit(GRO_ENABLED, sk);
+ break;
+
+ /* The following two cannot be changed on UDP sockets, the return is
+ * always 0 (which corresponds to the full checksum coverage of UDP). */
+ case UDPLITE_SEND_CSCOV:
+- val = up->pcslen;
++ val = READ_ONCE(up->pcslen);
+ break;
+
+ case UDPLITE_RECV_CSCOV:
+- val = up->pcrlen;
++ val = READ_ONCE(up->pcrlen);
+ break;
+
+ default:
+@@ -3116,16 +3141,18 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
+ struct bpf_udp_iter_state *iter = seq->private;
+ struct udp_iter_state *state = &iter->state;
+ struct net *net = seq_file_net(seq);
++ int resume_bucket, resume_offset;
+ struct udp_table *udptable;
+ unsigned int batch_sks = 0;
+ bool resized = false;
+ struct sock *sk;
+
++ resume_bucket = state->bucket;
++ resume_offset = iter->offset;
++
+ /* The current batch is done, so advance the bucket. */
+- if (iter->st_bucket_done) {
++ if (iter->st_bucket_done)
+ state->bucket++;
+- iter->offset = 0;
+- }
+
+ udptable = udp_get_table_seq(seq, net);
+
+@@ -3145,19 +3172,19 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
+ for (; state->bucket <= udptable->mask; state->bucket++) {
+ struct udp_hslot *hslot2 = &udptable->hash2[state->bucket];
+
+- if (hlist_empty(&hslot2->head)) {
+- iter->offset = 0;
++ if (hlist_empty(&hslot2->head))
+ continue;
+- }
+
++ iter->offset = 0;
+ spin_lock_bh(&hslot2->lock);
+ udp_portaddr_for_each_entry(sk, &hslot2->head) {
+ if (seq_sk_match(seq, sk)) {
+ /* Resume from the last iterated socket at the
+ * offset in the bucket before iterator was stopped.
+ */
+- if (iter->offset) {
+- --iter->offset;
++ if (state->bucket == resume_bucket &&
++ iter->offset < resume_offset) {
++ ++iter->offset;
+ continue;
+ }
+ if (iter->end_sk < iter->max_sk) {
+@@ -3171,9 +3198,6 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
+
+ if (iter->end_sk)
+ break;
+-
+- /* Reset the current bucket's offset before moving to the next bucket. */
+- iter->offset = 0;
+ }
+
+ /* All done: no batch made. */
+@@ -3192,7 +3216,6 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
+ /* After allocating a larger batch, retry one more time to grab
+ * the whole bucket.
+ */
+- state->bucket--;
+ goto again;
+ }
+ done:
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 0f46b3c2e4ac54..a727eeafd0a96d 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -278,6 +278,11 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ if (gso_skb->len <= sizeof(*uh) + mss)
+ return ERR_PTR(-EINVAL);
+
++ if (unlikely(skb_checksum_start(gso_skb) !=
++ skb_transport_header(gso_skb) &&
++ !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)))
++ return ERR_PTR(-EINVAL);
++
+ if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
+ /* Packet is from an untrusted source, reset gso_segs. */
+ skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
+@@ -285,8 +290,26 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ return NULL;
+ }
+
+- if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
+- return __udp_gso_segment_list(gso_skb, features, is_ipv6);
++ if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) {
++ /* Detect modified geometry and pass those to skb_segment. */
++ if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
++ return __udp_gso_segment_list(gso_skb, features, is_ipv6);
++
++ /* Setup csum, as fraglist skips this in udp4_gro_receive. */
++ gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head;
++ gso_skb->csum_offset = offsetof(struct udphdr, check);
++ gso_skb->ip_summed = CHECKSUM_PARTIAL;
++
++ uh = udp_hdr(gso_skb);
++ if (is_ipv6)
++ uh->check = ~udp_v6_check(gso_skb->len,
++ &ipv6_hdr(gso_skb)->saddr,
++ &ipv6_hdr(gso_skb)->daddr, 0);
++ else
++ uh->check = ~udp_v4_check(gso_skb->len,
++ ip_hdr(gso_skb)->saddr,
++ ip_hdr(gso_skb)->daddr, 0);
++ }
+
+ skb_pull(gso_skb, sizeof(*uh));
+
+@@ -449,8 +472,9 @@ static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += skb->len;
+
+- /* sk owenrship - if any - completely transferred to the aggregated packet */
++ /* sk ownership - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
++ skb->sk = NULL;
+ p->truesize += skb->truesize;
+ p->len += skb->len;
+
+@@ -470,6 +494,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+ struct sk_buff *p;
+ unsigned int ulen;
+ int ret = 0;
++ int flush;
+
+ /* requires non zero csum, for symmetry with GSO */
+ if (!uh->check) {
+@@ -503,13 +528,22 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+ return p;
+ }
+
++ flush = NAPI_GRO_CB(p)->flush;
++
++ if (NAPI_GRO_CB(p)->flush_id != 1 ||
++ NAPI_GRO_CB(p)->count != 1 ||
++ !NAPI_GRO_CB(p)->is_atomic)
++ flush |= NAPI_GRO_CB(p)->flush_id;
++ else
++ NAPI_GRO_CB(p)->is_atomic = false;
++
+ /* Terminate the flow on len mismatch or if it grow "too much".
+ * Under small packet flood GRO count could elsewhere grow a lot
+ * leading to excessive truesize values.
+ * On len mismatch merge the first packet shorter than gso_size,
+ * otherwise complete the GRO packet.
+ */
+- if (ulen > ntohs(uh2->len)) {
++ if (ulen > ntohs(uh2->len) || flush) {
+ pp = p;
+ } else {
+ if (NAPI_GRO_CB(skb)->is_flist) {
+@@ -551,16 +585,24 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ unsigned int off = skb_gro_offset(skb);
+ int flush = 1;
+
+- /* we can do L4 aggregation only if the packet can't land in a tunnel
+- * otherwise we could corrupt the inner stream
++ /* We can do L4 aggregation only if the packet can't land in a tunnel
++ * otherwise we could corrupt the inner stream. Detecting such packets
++ * cannot be foolproof and the aggregation might still happen in some
++ * cases. Such packets should be caught in udp_unexpected_gso later.
+ */
+ NAPI_GRO_CB(skb)->is_flist = 0;
+ if (!sk || !udp_sk(sk)->gro_receive) {
++ /* If the packet was locally encapsulated in a UDP tunnel that
++ * wasn't detected above, do not GRO.
++ */
++ if (skb->encapsulation)
++ goto out;
++
+ if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
+- NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
++ NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
+
+ if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
+- (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
++ (sk && udp_test_bit(GRO_ENABLED, sk)) || NAPI_GRO_CB(skb)->is_flist)
+ return call_gro_receive(udp_gro_receive_segment, head, skb);
+
+ /* no GRO, be sure flush the current packet */
+@@ -709,7 +751,8 @@ EXPORT_SYMBOL(udp_gro_complete);
+
+ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+ {
+- const struct iphdr *iph = ip_hdr(skb);
++ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
++ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
+ struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
+
+ /* do fraglist only if there is no outer UDP encap (or we already processed it) */
+@@ -719,13 +762,7 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+ skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+- if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
+- skb->csum_level++;
+- } else {
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- skb->csum_level = 0;
+- }
++ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
+index 9b18f371af0d49..1e7e4aecdc48a2 100644
+--- a/net/ipv4/udp_tunnel_core.c
++++ b/net/ipv4/udp_tunnel_core.c
+@@ -78,7 +78,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ udp_sk(sk)->gro_receive = cfg->gro_receive;
+ udp_sk(sk)->gro_complete = cfg->gro_complete;
+
+- udp_tunnel_encap_enable(sock);
++ udp_tunnel_encap_enable(sk);
+ }
+ EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
+
+diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
+index 39ecdad1b50ce5..af37af3ab727bf 100644
+--- a/net/ipv4/udplite.c
++++ b/net/ipv4/udplite.c
+@@ -21,7 +21,6 @@ EXPORT_SYMBOL(udplite_table);
+ static int udplite_sk_init(struct sock *sk)
+ {
+ udp_init_sock(sk);
+- udp_sk(sk)->pcflag = UDPLITE_BIT;
+ pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
+ "please contact the netdev mailing list\n");
+ return 0;
+diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
+index eac206a290d059..f6e90ba50b639d 100644
+--- a/net/ipv4/xfrm4_input.c
++++ b/net/ipv4/xfrm4_input.c
+@@ -61,7 +61,11 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
+ ip_send_check(iph);
+
+ if (xo && (xo->flags & XFRM_GRO)) {
+- skb_mac_header_rebuild(skb);
++ /* The full l2 header needs to be preserved so that re-injecting the packet at l2
++ * works correctly in the presence of vlan tags.
++ */
++ skb_mac_header_rebuild_full(skb, xo->orig_mac_len);
++ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ return 0;
+ }
+@@ -85,11 +89,11 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ struct udphdr *uh;
+ struct iphdr *iph;
+ int iphlen, len;
+-
+ __u8 *udpdata;
+ __be32 *udpdata32;
+- __u16 encap_type = up->encap_type;
++ u16 encap_type;
+
++ encap_type = READ_ONCE(up->encap_type);
+ /* if this is not encapsulated socket, then just return now */
+ if (!encap_type)
+ return 1;
+diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
+index 08d4b7132d4c45..1c9c686d9522f7 100644
+--- a/net/ipv6/Kconfig
++++ b/net/ipv6/Kconfig
+@@ -323,6 +323,7 @@ config IPV6_RPL_LWTUNNEL
+ bool "IPv6: RPL Source Routing Header support"
+ depends on IPV6
+ select LWTUNNEL
++ select DST_CACHE
+ help
+ Support for RFC6554 RPL Source Routing Header using the lightweight
+ tunnels mechanism.
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 0b6ee962c84e27..a9358c796a8150 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -706,6 +706,22 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+ return err;
+ }
+
++/* Combine dev_addr_genid and dev_base_seq to detect changes.
++ */
++static u32 inet6_base_seq(const struct net *net)
++{
++ u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
++ net->dev_base_seq;
++
++ /* Must not return 0 (see nl_dump_check_consistent()).
++ * Chose a value far away from 0.
++ */
++ if (!res)
++ res = 0x80000000;
++ return res;
++}
++
++
+ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
+ struct netlink_callback *cb)
+ {
+@@ -739,8 +755,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
+ idx = 0;
+ head = &net->dev_index_head[h];
+ rcu_read_lock();
+- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
+- net->dev_base_seq;
++ cb->seq = inet6_base_seq(net);
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+@@ -1824,7 +1839,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
+ master, &dst,
+ scores, hiscore_idx);
+
+- if (scores[hiscore_idx].ifa)
++ if (scores[hiscore_idx].ifa &&
++ scores[hiscore_idx].scopedist >= 0)
+ goto out;
+ }
+
+@@ -2043,9 +2059,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
+ if (ipv6_addr_equal(&ifp->addr, addr)) {
+ if (!dev || ifp->idev->dev == dev ||
+ !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
+- result = ifp;
+- in6_ifa_hold(ifp);
+- break;
++ if (in6_ifa_hold_safe(ifp)) {
++ result = ifp;
++ break;
++ }
+ }
+ }
+ }
+@@ -4144,7 +4161,7 @@ static void addrconf_dad_work(struct work_struct *w)
+ if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
+ ipv6_addr_equal(&ifp->addr, &addr)) {
+ /* DAD failed for link-local based on MAC */
+- idev->cnf.disable_ipv6 = 1;
++ WRITE_ONCE(idev->cnf.disable_ipv6, 1);
+
+ pr_info("%s: IPv6 being disabled!\n",
+ ifp->idev->dev->name);
+@@ -5358,7 +5375,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
+ }
+
+ rcu_read_lock();
+- cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
++ cb->seq = inet6_base_seq(tgt_net);
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &tgt_net->dev_index_head[h];
+@@ -5490,9 +5507,10 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ }
+
+ addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
+- if (!addr)
+- return -EINVAL;
+-
++ if (!addr) {
++ err = -EINVAL;
++ goto errout;
++ }
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifa_index)
+ dev = dev_get_by_index(tgt_net, ifm->ifa_index);
+@@ -5994,7 +6012,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
+ (dev->ifindex != dev_get_iflink(dev) &&
+ nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+ nla_put_u8(skb, IFLA_OPERSTATE,
+- netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
++ netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
+ goto nla_put_failure;
+ protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
+ if (!protoinfo)
+@@ -6137,11 +6155,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
+ pmsg->prefix_len = pinfo->prefix_len;
+ pmsg->prefix_type = pinfo->type;
+ pmsg->prefix_pad3 = 0;
+- pmsg->prefix_flags = 0;
+- if (pinfo->onlink)
+- pmsg->prefix_flags |= IF_PREFIX_ONLINK;
+- if (pinfo->autoconf)
+- pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
++ pmsg->prefix_flags = pinfo->flags;
+
+ if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
+ goto nla_put_failure;
+@@ -6308,7 +6322,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
+ idev = __in6_dev_get(dev);
+ if (idev) {
+ int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
+- idev->cnf.disable_ipv6 = newf;
++
++ WRITE_ONCE(idev->cnf.disable_ipv6, newf);
+ if (changed)
+ dev_disable_change(idev);
+ }
+@@ -6325,7 +6340,7 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+
+ net = (struct net *)table->extra2;
+ old = *p;
+- *p = newf;
++ WRITE_ONCE(*p, newf);
+
+ if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
+ rtnl_unlock();
+@@ -6333,7 +6348,7 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+ }
+
+ if (p == &net->ipv6.devconf_all->disable_ipv6) {
+- net->ipv6.devconf_dflt->disable_ipv6 = newf;
++ WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
+ addrconf_disable_change(net, newf);
+ } else if ((!newf) ^ (!old))
+ dev_disable_change((struct inet6_dev *)table->extra1);
+diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
+index 507a8353a6bdb9..c008d21925d7f4 100644
+--- a/net/ipv6/addrconf_core.c
++++ b/net/ipv6/addrconf_core.c
+@@ -220,19 +220,26 @@ const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+ EXPORT_SYMBOL_GPL(ipv6_stub);
+
+ /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
+-const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
++const struct in6_addr in6addr_loopback __aligned(BITS_PER_LONG/8)
++ = IN6ADDR_LOOPBACK_INIT;
+ EXPORT_SYMBOL(in6addr_loopback);
+-const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
++const struct in6_addr in6addr_any __aligned(BITS_PER_LONG/8)
++ = IN6ADDR_ANY_INIT;
+ EXPORT_SYMBOL(in6addr_any);
+-const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
++const struct in6_addr in6addr_linklocal_allnodes __aligned(BITS_PER_LONG/8)
++ = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+ EXPORT_SYMBOL(in6addr_linklocal_allnodes);
+-const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
++const struct in6_addr in6addr_linklocal_allrouters __aligned(BITS_PER_LONG/8)
++ = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+ EXPORT_SYMBOL(in6addr_linklocal_allrouters);
+-const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
++const struct in6_addr in6addr_interfacelocal_allnodes __aligned(BITS_PER_LONG/8)
++ = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
+ EXPORT_SYMBOL(in6addr_interfacelocal_allnodes);
+-const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
++const struct in6_addr in6addr_interfacelocal_allrouters __aligned(BITS_PER_LONG/8)
++ = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
+ EXPORT_SYMBOL(in6addr_interfacelocal_allrouters);
+-const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
++const struct in6_addr in6addr_sitelocal_allrouters __aligned(BITS_PER_LONG/8)
++ = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
+ EXPORT_SYMBOL(in6addr_sitelocal_allrouters);
+
+ static void snmp6_free_dev(struct inet6_dev *idev)
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 368824fe9719f9..b9c50cceba568c 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -199,6 +199,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
+ if (INET_PROTOSW_REUSE & answer_flags)
+ sk->sk_reuse = SK_CAN_REUSE;
+
++ if (INET_PROTOSW_ICSK & answer_flags)
++ inet_init_csk_locks(sk);
++
+ inet = inet_sk(sk);
+ inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
+
+@@ -453,7 +456,7 @@ int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ /* BPF prog is run before any checks are done so that if the prog
+ * changes context in a wrong way it will be caught.
+ */
+- err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr,
++ err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len,
+ CGROUP_INET6_BIND, &flags);
+ if (err)
+ return err;
+@@ -519,6 +522,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer)
+ {
+ struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr;
++ int sin_addr_len = sizeof(*sin);
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+@@ -538,7 +542,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
+ sin->sin6_addr = sk->sk_v6_daddr;
+ if (np->sndflow)
+ sin->sin6_flowinfo = np->flow_label;
+- BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
++ BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
+ CGROUP_INET6_GETPEERNAME);
+ } else {
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+@@ -546,13 +550,13 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
+ else
+ sin->sin6_addr = sk->sk_v6_rcv_saddr;
+ sin->sin6_port = inet->inet_sport;
+- BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
++ BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
+ CGROUP_INET6_GETSOCKNAME);
+ }
+ sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr,
+ sk->sk_bound_dev_if);
+ release_sock(sk);
+- return sizeof(*sin);
++ return sin_addr_len;
+ }
+ EXPORT_SYMBOL(inet6_getname);
+
+@@ -1060,6 +1064,7 @@ static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = {
+ .udp6_lib_lookup = __udp6_lib_lookup,
+ .ipv6_setsockopt = do_ipv6_setsockopt,
+ .ipv6_getsockopt = do_ipv6_getsockopt,
++ .ipv6_dev_get_saddr = ipv6_dev_get_saddr,
+ };
+
+ static int __init inet6_init(void)
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 2cc1a45742d823..62bb9651133c4d 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -112,7 +112,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
+ __alignof__(struct scatterlist));
+ }
+
+-static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
++static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
+ {
+ struct crypto_aead *aead = x->data;
+ int extralen = 0;
+@@ -131,7 +131,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+ */
+ if (req->src != req->dst)
+ for (sg = sg_next(req->src); sg; sg = sg_next(sg))
+- put_page(sg_page(sg));
++ skb_page_unref(skb, sg_page(sg), false);
+ }
+
+ #ifdef CONFIG_INET6_ESPINTCP
+@@ -255,8 +255,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
+ #else
+ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
+ {
+- kfree_skb(skb);
+-
++ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+ #endif
+@@ -294,7 +293,7 @@ static void esp_output_done(void *data, int err)
+ }
+
+ tmp = ESP_SKB_CB(skb)->tmp;
+- esp_ssg_unref(x, tmp);
++ esp_ssg_unref(x, tmp, skb);
+ kfree(tmp);
+
+ esp_output_encap_csum(skb);
+@@ -677,7 +676,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
+ }
+
+ if (sg != dsg)
+- esp_ssg_unref(x, tmp);
++ esp_ssg_unref(x, tmp, skb);
+
+ if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
+ err = esp_output_tail_tcp(x, skb);
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index 4952ae79245057..02e9ffb63af197 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -177,6 +177,8 @@ static bool ip6_parse_tlv(bool hopbyhop,
+ case IPV6_TLV_IOAM:
+ if (!ipv6_hop_ioam(skb, off))
+ return false;
++
++ nh = skb_network_header(skb);
+ break;
+ case IPV6_TLV_JUMBO:
+ if (!ipv6_hop_jumbo(skb, off))
+@@ -943,6 +945,14 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
+ if (!skb_valid_dst(skb))
+ ip6_route_input(skb);
+
++ /* About to mangle packet header */
++ if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len))
++ goto drop;
++
++ /* Trace pointer may have changed */
++ trace = (struct ioam6_trace_hdr *)(skb_network_header(skb)
++ + optoff + sizeof(*hdr));
++
+ ioam6_fill_trace_data(skb, ns, trace, true);
+ break;
+ default:
+diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
+index 7c20038330104e..6eeab21512ba98 100644
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -233,8 +233,12 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
+ rt = pol_lookup_func(lookup,
+ net, table, flp6, arg->lookup_data, flags);
+ if (rt != net->ipv6.ip6_null_entry) {
++ struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
++
++ if (!idev)
++ goto again;
+ err = fib6_rule_saddr(net, rule, flags, flp6,
+- ip6_dst_idev(&rt->dst)->dev);
++ idev->dev);
+
+ if (err == -EAGAIN)
+ goto again;
+@@ -449,6 +453,11 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+ + nla_total_size(16); /* src */
+ }
+
++static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
++{
++ rt_genid_bump_ipv6(ops->fro_net);
++}
++
+ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
+ .family = AF_INET6,
+ .rule_size = sizeof(struct fib6_rule),
+@@ -461,6 +470,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
+ .compare = fib6_rule_compare,
+ .fill = fib6_rule_fill,
+ .nlmsg_payload = fib6_rule_nlmsg_payload,
++ .flush_cache = fib6_rule_flush_cache,
+ .nlgroup = RTNLGRP_IPV6_RULE,
+ .owner = THIS_MODULE,
+ .fro_net = &init_net,
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 93a594a901d12b..a790294d310484 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -175,14 +175,16 @@ static bool icmpv6_mask_allow(struct net *net, int type)
+ return false;
+ }
+
+-static bool icmpv6_global_allow(struct net *net, int type)
++static bool icmpv6_global_allow(struct net *net, int type,
++ bool *apply_ratelimit)
+ {
+ if (icmpv6_mask_allow(net, type))
+ return true;
+
+- if (icmp_global_allow())
++ if (icmp_global_allow()) {
++ *apply_ratelimit = true;
+ return true;
+-
++ }
+ __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL);
+ return false;
+ }
+@@ -191,13 +193,13 @@ static bool icmpv6_global_allow(struct net *net, int type)
+ * Check the ICMP output rate limit
+ */
+ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+- struct flowi6 *fl6)
++ struct flowi6 *fl6, bool apply_ratelimit)
+ {
+ struct net *net = sock_net(sk);
+ struct dst_entry *dst;
+ bool res = false;
+
+- if (icmpv6_mask_allow(net, type))
++ if (!apply_ratelimit)
+ return true;
+
+ /*
+@@ -228,6 +230,8 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+ if (!res)
+ __ICMP6_INC_STATS(net, ip6_dst_idev(dst),
+ ICMP6_MIB_RATELIMITHOST);
++ else
++ icmp_global_consume();
+ dst_release(dst);
+ return res;
+ }
+@@ -452,6 +456,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ struct net *net;
+ struct ipv6_pinfo *np;
+ const struct in6_addr *saddr = NULL;
++ bool apply_ratelimit = false;
+ struct dst_entry *dst;
+ struct icmp6hdr tmp_hdr;
+ struct flowi6 fl6;
+@@ -533,11 +538,12 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ return;
+ }
+
+- /* Needed by both icmp_global_allow and icmpv6_xmit_lock */
++ /* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */
+ local_bh_disable();
+
+ /* Check global sysctl_icmp_msgs_per_sec ratelimit */
+- if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type))
++ if (!(skb->dev->flags & IFF_LOOPBACK) &&
++ !icmpv6_global_allow(net, type, &apply_ratelimit))
+ goto out_bh_enable;
+
+ mip6_addr_swap(skb, parm);
+@@ -575,7 +581,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+
+ np = inet6_sk(sk);
+
+- if (!icmpv6_xrlim_allow(sk, type, &fl6))
++ if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit))
+ goto out;
+
+ tmp_hdr.icmp6_type = type;
+@@ -717,6 +723,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
+ struct ipv6_pinfo *np;
+ const struct in6_addr *saddr = NULL;
+ struct icmp6hdr *icmph = icmp6_hdr(skb);
++ bool apply_ratelimit = false;
+ struct icmp6hdr tmp_hdr;
+ struct flowi6 fl6;
+ struct icmpv6_msg msg;
+@@ -781,8 +788,9 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
+ goto out;
+
+ /* Check the ratelimit */
+- if ((!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY)) ||
+- !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6))
++ if ((!(skb->dev->flags & IFF_LOOPBACK) &&
++ !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY, &apply_ratelimit)) ||
++ !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6, apply_ratelimit))
+ goto out_dst_release;
+
+ idev = __in6_dev_get(skb->dev);
+diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
+index ad5f6f6ba33302..85b92917849bff 100644
+--- a/net/ipv6/ila/ila.h
++++ b/net/ipv6/ila/ila.h
+@@ -108,6 +108,7 @@ int ila_lwt_init(void);
+ void ila_lwt_fini(void);
+
+ int ila_xlat_init_net(struct net *net);
++void ila_xlat_pre_exit_net(struct net *net);
+ void ila_xlat_exit_net(struct net *net);
+
+ int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index 8c1ce78956bae2..9d37f7164e732e 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ return orig_dst->lwtstate->orig_output(net, sk, skb);
+ }
+
++ local_bh_disable();
+ dst = dst_cache_get(&ilwt->dst_cache);
++ local_bh_enable();
+ if (unlikely(!dst)) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct flowi6 fl6;
+@@ -86,8 +88,11 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ goto drop;
+ }
+
+- if (ilwt->connected)
++ if (ilwt->connected) {
++ local_bh_disable();
+ dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
++ local_bh_enable();
++ }
+ }
+
+ skb_dst_set(skb, dst);
+diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
+index 69caed07315f0c..976c78efbae170 100644
+--- a/net/ipv6/ila/ila_main.c
++++ b/net/ipv6/ila/ila_main.c
+@@ -71,6 +71,11 @@ static __net_init int ila_init_net(struct net *net)
+ return err;
+ }
+
++static __net_exit void ila_pre_exit_net(struct net *net)
++{
++ ila_xlat_pre_exit_net(net);
++}
++
+ static __net_exit void ila_exit_net(struct net *net)
+ {
+ ila_xlat_exit_net(net);
+@@ -78,6 +83,7 @@ static __net_exit void ila_exit_net(struct net *net)
+
+ static struct pernet_operations ila_net_ops = {
+ .init = ila_init_net,
++ .pre_exit = ila_pre_exit_net,
+ .exit = ila_exit_net,
+ .id = &ila_net_id,
+ .size = sizeof(struct ila_net),
+diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
+index 67e8c9440977a4..534a4498e280d7 100644
+--- a/net/ipv6/ila/ila_xlat.c
++++ b/net/ipv6/ila/ila_xlat.c
+@@ -619,6 +619,15 @@ int ila_xlat_init_net(struct net *net)
+ return 0;
+ }
+
++void ila_xlat_pre_exit_net(struct net *net)
++{
++ struct ila_net *ilan = net_generic(net, ila_net_id);
++
++ if (ilan->xlat.hooks_registered)
++ nf_unregister_net_hooks(net, ila_nf_hook_ops,
++ ARRAY_SIZE(ila_nf_hook_ops));
++}
++
+ void ila_xlat_exit_net(struct net *net)
+ {
+ struct ila_net *ilan = net_generic(net, ila_net_id);
+@@ -626,10 +635,6 @@ void ila_xlat_exit_net(struct net *net)
+ rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
+
+ free_bucket_spinlocks(ilan->xlat.locks);
+-
+- if (ilan->xlat.hooks_registered)
+- nf_unregister_net_hooks(net, ila_nf_hook_ops,
+- ARRAY_SIZE(ila_nf_hook_ops));
+ }
+
+ static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
+diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
+index f6f5b83dd954db..a5cfc5b0b206bb 100644
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -351,9 +351,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ goto drop;
+
+ if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
+- preempt_disable();
++ local_bh_disable();
+ dst = dst_cache_get(&ilwt->cache);
+- preempt_enable();
++ local_bh_enable();
+
+ if (unlikely(!dst)) {
+ struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -373,9 +373,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ goto drop;
+ }
+
+- preempt_disable();
++ local_bh_disable();
+ dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
+- preempt_enable();
++ local_bh_enable();
+ }
+
+ skb_dst_drop(skb);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 28b01a068412ab..4356806b52bd51 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -160,8 +160,6 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh)
+ INIT_LIST_HEAD(&f6i->fib6_siblings);
+ refcount_set(&f6i->fib6_ref, 1);
+
+- INIT_HLIST_NODE(&f6i->gc_link);
+-
+ return f6i;
+ }
+
+@@ -248,7 +246,6 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
+ net->ipv6.fib6_null_entry);
+ table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
+ inet_peer_base_init(&table->tb6_peers);
+- INIT_HLIST_HEAD(&table->tb6_gc_hlist);
+ }
+
+ return table;
+@@ -648,19 +645,19 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
+ if (!w) {
+ /* New dump:
+ *
+- * 1. hook callback destructor.
+- */
+- cb->args[3] = (long)cb->done;
+- cb->done = fib6_dump_done;
+-
+- /*
+- * 2. allocate and initialize walker.
++ * 1. allocate and initialize walker.
+ */
+ w = kzalloc(sizeof(*w), GFP_ATOMIC);
+ if (!w)
+ return -ENOMEM;
+ w->func = fib6_dump_node;
+ cb->args[2] = (long)w;
++
++ /* 2. hook callback destructor.
++ */
++ cb->args[3] = (long)cb->done;
++ cb->done = fib6_dump_done;
++
+ }
+
+ arg.skb = skb;
+@@ -964,6 +961,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ if (!fib6_nh->rt6i_pcpu)
+ return;
+
++ rcu_read_lock();
+ /* release the reference to this fib entry from
+ * all of its cached pcpu routes
+ */
+@@ -972,7 +970,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ struct rt6_info *pcpu_rt;
+
+ ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
+- pcpu_rt = *ppcpu_rt;
++
++ /* Paired with xchg() in rt6_get_pcpu_route() */
++ pcpu_rt = READ_ONCE(*ppcpu_rt);
+
+ /* only dropping the 'from' reference if the cached route
+ * is using 'match'. The cached pcpu_rt->from only changes
+@@ -986,6 +986,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ fib6_info_release(from);
+ }
+ }
++ rcu_read_unlock();
+ }
+
+ struct fib6_nh_pcpu_arg {
+@@ -1060,8 +1061,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
+ lockdep_is_held(&table->tb6_lock));
+ }
+ }
+-
+- fib6_clean_expires_locked(rt);
+ }
+
+ /*
+@@ -1123,10 +1122,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ if (!(iter->fib6_flags & RTF_EXPIRES))
+ return -EEXIST;
+ if (!(rt->fib6_flags & RTF_EXPIRES))
+- fib6_clean_expires_locked(iter);
++ fib6_clean_expires(iter);
+ else
+- fib6_set_expires_locked(iter,
+- rt->expires);
++ fib6_set_expires(iter, rt->expires);
+
+ if (rt->fib6_pmtu)
+ fib6_metric_set(iter, RTAX_MTU,
+@@ -1381,7 +1379,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ struct nl_info *info, struct netlink_ext_ack *extack)
+ {
+ struct fib6_table *table = rt->fib6_table;
+- struct fib6_node *fn, *pn = NULL;
++ struct fib6_node *fn;
++#ifdef CONFIG_IPV6_SUBTREES
++ struct fib6_node *pn = NULL;
++#endif
+ int err = -ENOMEM;
+ int allow_create = 1;
+ int replace_required = 0;
+@@ -1405,9 +1406,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ goto out;
+ }
+
++#ifdef CONFIG_IPV6_SUBTREES
+ pn = fn;
+
+-#ifdef CONFIG_IPV6_SUBTREES
+ if (rt->fib6_src.plen) {
+ struct fib6_node *sn;
+
+@@ -1485,10 +1486,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ if (rt->nh)
+ list_add(&rt->nh_list, &rt->nh->f6i_list);
+ __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
+-
+- if (fib6_has_expires(rt))
+- hlist_add_head(&rt->gc_link, &table->tb6_gc_hlist);
+-
+ fib6_start_gc(info->nl_net, rt);
+ }
+
+@@ -1511,13 +1508,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
+ if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
+ pn_leaf = fib6_find_prefix(info->nl_net, table,
+ pn);
+-#if RT6_DEBUG >= 2
+- if (!pn_leaf) {
+- WARN_ON(!pn_leaf);
++ if (!pn_leaf)
+ pn_leaf =
+ info->nl_net->ipv6.fib6_null_entry;
+- }
+-#endif
+ fib6_info_hold(pn_leaf);
+ rcu_assign_pointer(pn->leaf, pn_leaf);
+ }
+@@ -2295,8 +2288,9 @@ static void fib6_flush_trees(struct net *net)
+ * Garbage collection
+ */
+
+-static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
++static int fib6_age(struct fib6_info *rt, void *arg)
+ {
++ struct fib6_gc_args *gc_args = arg;
+ unsigned long now = jiffies;
+
+ /*
+@@ -2304,7 +2298,7 @@ static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
+ * Routes are expired even if they are in use.
+ */
+
+- if (fib6_has_expires(rt) && rt->expires) {
++ if (rt->fib6_flags & RTF_EXPIRES && rt->expires) {
+ if (time_after(now, rt->expires)) {
+ RT6_TRACE("expiring %p\n", rt);
+ return -1;
+@@ -2321,40 +2315,6 @@ static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
+ return 0;
+ }
+
+-static void fib6_gc_table(struct net *net,
+- struct fib6_table *tb6,
+- struct fib6_gc_args *gc_args)
+-{
+- struct fib6_info *rt;
+- struct hlist_node *n;
+- struct nl_info info = {
+- .nl_net = net,
+- .skip_notify = false,
+- };
+-
+- hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link)
+- if (fib6_age(rt, gc_args) == -1)
+- fib6_del(rt, &info);
+-}
+-
+-static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args)
+-{
+- struct fib6_table *table;
+- struct hlist_head *head;
+- unsigned int h;
+-
+- rcu_read_lock();
+- for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+- head = &net->ipv6.fib_table_hash[h];
+- hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+- spin_lock_bh(&table->tb6_lock);
+- fib6_gc_table(net, table, gc_args);
+- spin_unlock_bh(&table->tb6_lock);
+- }
+- }
+- rcu_read_unlock();
+-}
+-
+ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
+ {
+ struct fib6_gc_args gc_args;
+@@ -2370,7 +2330,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
+ net->ipv6.sysctl.ip6_rt_gc_interval;
+ gc_args.more = 0;
+
+- fib6_gc_all(net, &gc_args);
++ fib6_clean_all(net, fib6_age, &gc_args);
+ now = jiffies;
+ net->ipv6.ip6_rt_last_gc = now;
+
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 070d87abf7c028..289b83347d9d57 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -528,6 +528,9 @@ static int ip6erspan_rcv(struct sk_buff *skb,
+ struct ip6_tnl *tunnel;
+ u8 ver;
+
++ if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
++ return PACKET_REJECT;
++
+ ipv6h = ipv6_hdr(skb);
+ ershdr = (struct erspan_base_hdr *)skb->data;
+ ver = ershdr->ver;
+@@ -1511,6 +1514,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
+ ip6gre_tnl_init_features(dev);
+
+ netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
++ netdev_lockdep_set_classes(dev);
+ return 0;
+
+ cleanup_dst_cache_init:
+@@ -1903,6 +1907,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
+ ip6erspan_tnl_link_config(tunnel, 1);
+
+ netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
++ netdev_lockdep_set_classes(dev);
+ return 0;
+
+ cleanup_dst_cache_init:
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index b8378814532cea..1ba97933c74fbd 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -168,9 +168,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
+
+ SKB_DR_SET(reason, NOT_SPECIFIED);
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
+- !idev || unlikely(idev->cnf.disable_ipv6)) {
++ !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
+ __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
+- if (idev && unlikely(idev->cnf.disable_ipv6))
++ if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
+ SKB_DR_SET(reason, IPV6DISABLED);
+ goto drop;
+ }
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index d6314287338da1..7f014a8969fb25 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -37,6 +37,40 @@
+ INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
+ })
+
++static int ipv6_gro_pull_exthdrs(struct sk_buff *skb, int off, int proto)
++{
++ const struct net_offload *ops = NULL;
++ struct ipv6_opt_hdr *opth;
++
++ for (;;) {
++ int len;
++
++ ops = rcu_dereference(inet6_offloads[proto]);
++
++ if (unlikely(!ops))
++ break;
++
++ if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
++ break;
++
++ opth = skb_gro_header(skb, off + sizeof(*opth), off);
++ if (unlikely(!opth))
++ break;
++
++ len = ipv6_optlen(opth);
++
++ opth = skb_gro_header(skb, off + len, off);
++ if (unlikely(!opth))
++ break;
++ proto = opth->nexthdr;
++
++ off += len;
++ }
++
++ skb_gro_pull(skb, off - skb_network_offset(skb));
++ return proto;
++}
++
+ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
+ {
+ const struct net_offload *ops = NULL;
+@@ -206,28 +240,26 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
+ goto out;
+
+ skb_set_network_header(skb, off);
+- skb_gro_pull(skb, sizeof(*iph));
+- skb_set_transport_header(skb, skb_gro_offset(skb));
++ NAPI_GRO_CB(skb)->inner_network_offset = off;
+
+- flush += ntohs(iph->payload_len) != skb_gro_len(skb);
++ flush += ntohs(iph->payload_len) != skb->len - hlen;
+
+ proto = iph->nexthdr;
+ ops = rcu_dereference(inet6_offloads[proto]);
+ if (!ops || !ops->callbacks.gro_receive) {
+- pskb_pull(skb, skb_gro_offset(skb));
+- skb_gro_frag0_invalidate(skb);
+- proto = ipv6_gso_pull_exthdrs(skb, proto);
+- skb_gro_pull(skb, -skb_transport_offset(skb));
+- skb_reset_transport_header(skb);
+- __skb_push(skb, skb_gro_offset(skb));
++ proto = ipv6_gro_pull_exthdrs(skb, hlen, proto);
+
+ ops = rcu_dereference(inet6_offloads[proto]);
+ if (!ops || !ops->callbacks.gro_receive)
+ goto out;
+
+- iph = ipv6_hdr(skb);
++ iph = skb_gro_network_header(skb);
++ } else {
++ skb_gro_pull(skb, sizeof(*iph));
+ }
+
++ skb_set_transport_header(skb, skb_gro_offset(skb));
++
+ NAPI_GRO_CB(skb)->proto = proto;
+
+ flush--;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 54fc4c711f2c54..5d8d86c159dc30 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -70,11 +70,15 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+
+ /* Be paranoid, rather than too clever. */
+ if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
++ /* Make sure idev stays alive */
++ rcu_read_lock();
+ skb = skb_expand_head(skb, hh_len);
+ if (!skb) {
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ rcu_read_unlock();
+ return -ENOMEM;
+ }
++ rcu_read_unlock();
+ }
+
+ hdr = ipv6_hdr(skb);
+@@ -117,6 +121,8 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ return res;
+ }
+
++ IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
++
+ rcu_read_lock();
+ nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
+ neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
+@@ -162,7 +168,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+ int err;
+
+ skb_mark_not_on_list(segs);
+- err = ip6_fragment(net, sk, segs, ip6_finish_output2);
++ /* Last GSO segment can be smaller than gso_size (and MTU).
++ * Adding a fragment header would produce an "atomic fragment",
++ * which is considered harmful (RFC-8021). Avoid that.
++ */
++ err = segs->len > mtu ?
++ ip6_fragment(net, sk, segs, ip6_finish_output2) :
++ ip6_finish_output2(net, sk, segs);
+ if (err && ret == 0)
+ ret = err;
+ }
+@@ -219,7 +231,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->dev = dev;
+
+- if (unlikely(idev->cnf.disable_ipv6)) {
++ if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) {
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
+ return 0;
+@@ -269,11 +281,15 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ head_room += opt->opt_nflen + opt->opt_flen;
+
+ if (unlikely(head_room > skb_headroom(skb))) {
++ /* Make sure idev stays alive */
++ rcu_read_lock();
+ skb = skb_expand_head(skb, head_room);
+ if (!skb) {
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ rcu_read_unlock();
+ return -ENOBUFS;
+ }
++ rcu_read_unlock();
+ }
+
+ if (opt) {
+@@ -329,7 +345,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+
+ mtu = dst_mtu(dst);
+ if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
+- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
+
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+@@ -1114,6 +1130,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ from = rt ? rcu_dereference(rt->from) : NULL;
+ err = ip6_route_get_saddr(net, from, &fl6->daddr,
+ sk ? inet6_sk(sk)->srcprefs : 0,
++ fl6->flowi6_l3mdev,
+ &fl6->saddr);
+ rcu_read_unlock();
+
+@@ -1989,13 +2006,13 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
+ skb->tstamp = cork->base.transmit_time;
+
+ ip6_cork_steal_dst(skb, cork);
+- IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
++ IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
+ if (proto == IPPROTO_ICMPV6) {
+ struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+ u8 icmp6_type;
+
+ if (sk->sk_socket->type == SOCK_RAW &&
+- !inet_test_bit(HDRINCL, sk))
++ !(fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH))
+ icmp6_type = fl6->fl6_icmp_type;
+ else
+ icmp6_type = icmp6_hdr(skb)->icmp6_type;
+@@ -2014,6 +2031,7 @@ int ip6_send_skb(struct sk_buff *skb)
+ struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ int err;
+
++ rcu_read_lock();
+ err = ip6_local_out(net, skb->sk, skb);
+ if (err) {
+ if (err > 0)
+@@ -2023,6 +2041,7 @@ int ip6_send_skb(struct sk_buff *skb)
+ IPSTATS_MIB_OUTDISCARDS);
+ }
+
++ rcu_read_unlock();
+ return err;
+ }
+
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 5e80e517f07101..97905d4174eca5 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -399,7 +399,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+ unsigned int nhoff = raw - skb->data;
+ unsigned int off = nhoff + sizeof(*ipv6h);
+- u8 next, nexthdr = ipv6h->nexthdr;
++ u8 nexthdr = ipv6h->nexthdr;
+
+ while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
+ struct ipv6_opt_hdr *hdr;
+@@ -410,25 +410,25 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+ if (nexthdr == NEXTHDR_FRAGMENT) {
+- struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
+- if (frag_hdr->frag_off)
+- break;
+ optlen = 8;
+ } else if (nexthdr == NEXTHDR_AUTH) {
+ optlen = ipv6_authlen(hdr);
+ } else {
+ optlen = ipv6_optlen(hdr);
+ }
+- /* cache hdr->nexthdr, since pskb_may_pull() might
+- * invalidate hdr
+- */
+- next = hdr->nexthdr;
+- if (nexthdr == NEXTHDR_DEST) {
+- u16 i = 2;
+
+- /* Remember : hdr is no longer valid at this point. */
+- if (!pskb_may_pull(skb, off + optlen))
++ if (!pskb_may_pull(skb, off + optlen))
++ break;
++
++ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
++ if (nexthdr == NEXTHDR_FRAGMENT) {
++ struct frag_hdr *frag_hdr = (struct frag_hdr *)hdr;
++
++ if (frag_hdr->frag_off)
+ break;
++ }
++ if (nexthdr == NEXTHDR_DEST) {
++ u16 i = 2;
+
+ while (1) {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+@@ -449,7 +449,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ i++;
+ }
+ }
+- nexthdr = next;
++ nexthdr = hdr->nexthdr;
+ off += optlen;
+ }
+ return 0;
+@@ -796,8 +796,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ struct sk_buff *skb),
+ bool log_ecn_err)
+ {
+- const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+- int err;
++ const struct ipv6hdr *ipv6h;
++ int nh, err;
+
+ if ((!(tpi->flags & TUNNEL_CSUM) &&
+ (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
+@@ -829,7 +829,6 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ goto drop;
+ }
+
+- ipv6h = ipv6_hdr(skb);
+ skb->protocol = eth_type_trans(skb, tunnel->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ } else {
+@@ -837,7 +836,23 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ skb_reset_mac_header(skb);
+ }
+
++ /* Save offset of outer header relative to skb->head,
++ * because we are going to reset the network header to the inner header
++ * and might change skb->head.
++ */
++ nh = skb_network_header(skb) - skb->head;
++
+ skb_reset_network_header(skb);
++
++ if (!pskb_inet_may_pull(skb)) {
++ DEV_STATS_INC(tunnel->dev, rx_length_errors);
++ DEV_STATS_INC(tunnel->dev, rx_errors);
++ goto drop;
++ }
++
++ /* Get the outer header. */
++ ipv6h = (struct ipv6hdr *)(skb->head + nh);
++
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+
+ __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+@@ -1493,7 +1508,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
+ tdev = __dev_get_by_index(t->net, p->link);
+
+ if (tdev) {
+- dev->hard_header_len = tdev->hard_header_len + t_hlen;
++ dev->needed_headroom = tdev->hard_header_len +
++ tdev->needed_headroom + t_hlen;
+ mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
+
+ mtu = mtu - t_hlen;
+@@ -1717,7 +1733,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
+ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+ {
+ struct ip6_tnl *tnl = netdev_priv(dev);
++ int t_hlen;
+
++ t_hlen = tnl->hlen + sizeof(struct ipv6hdr);
+ if (tnl->parms.proto == IPPROTO_IPV6) {
+ if (new_mtu < IPV6_MIN_MTU)
+ return -EINVAL;
+@@ -1726,10 +1744,10 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+ return -EINVAL;
+ }
+ if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
+- if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
++ if (new_mtu > IP6_MAX_MTU - dev->hard_header_len - t_hlen)
+ return -EINVAL;
+ } else {
+- if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
++ if (new_mtu > IP_MAX_MTU - dev->hard_header_len - t_hlen)
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+@@ -1875,14 +1893,14 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
+ t_hlen = t->hlen + sizeof(struct ipv6hdr);
+
+ dev->type = ARPHRD_TUNNEL6;
+- dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ dev->mtu = ETH_DATA_LEN - t_hlen;
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ dev->mtu -= 8;
+ dev->min_mtu = ETH_MIN_MTU;
+- dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
++ dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len - t_hlen;
+
+ netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
++ netdev_lockdep_set_classes(dev);
+ return 0;
+
+ destroy_dst:
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 73c85d4e0e9cd5..04e4368fe46557 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -935,6 +935,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
+ if (!dev->tstats)
+ return -ENOMEM;
+ netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
++ netdev_lockdep_set_classes(dev);
+ return 0;
+ }
+
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 5ce25bcb9974de..6e2f77a95a657a 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1789,7 +1789,7 @@ static void mld_sendpack(struct sk_buff *skb)
+
+ rcu_read_lock();
+ idev = __in6_dev_get(skb->dev);
+- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
+
+ payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
+ sizeof(*pip6);
+@@ -2147,8 +2147,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
+ full_len = sizeof(struct ipv6hdr) + payload_len;
+
+ rcu_read_lock();
+- IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
+- IPSTATS_MIB_OUT, full_len);
++ IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTREQUESTS);
+ rcu_read_unlock();
+
+ skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
+@@ -2720,11 +2719,14 @@ void ipv6_mc_down(struct inet6_dev *idev)
+ /* Should stop work after group drop. or we will
+ * start work again in mld_ifc_event()
+ */
+- synchronize_net();
+ mld_query_stop_work(idev);
+ mld_report_stop_work(idev);
++
++ mutex_lock(&idev->mc_lock);
+ mld_ifc_stop_work(idev);
+ mld_gq_stop_work(idev);
++ mutex_unlock(&idev->mc_lock);
++
+ mld_dad_stop_work(idev);
+ }
+
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 553c8664e0a7a3..2062ab94721e37 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
+ return NULL;
+ memset(ndopts, 0, sizeof(*ndopts));
+ while (opt_len) {
++ bool unknown = false;
+ int l;
+ if (opt_len < sizeof(struct nd_opt_hdr))
+ return NULL;
+@@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
+ break;
+ #endif
+ default:
+- if (ndisc_is_useropt(dev, nd_opt)) {
+- ndopts->nd_useropts_end = nd_opt;
+- if (!ndopts->nd_useropts)
+- ndopts->nd_useropts = nd_opt;
+- } else {
+- /*
+- * Unknown options must be silently ignored,
+- * to accommodate future extension to the
+- * protocol.
+- */
+- ND_PRINTK(2, notice,
+- "%s: ignored unsupported option; type=%d, len=%d\n",
+- __func__,
+- nd_opt->nd_opt_type,
+- nd_opt->nd_opt_len);
+- }
++ unknown = true;
++ }
++ if (ndisc_is_useropt(dev, nd_opt)) {
++ ndopts->nd_useropts_end = nd_opt;
++ if (!ndopts->nd_useropts)
++ ndopts->nd_useropts = nd_opt;
++ } else if (unknown) {
++ /*
++ * Unknown options must be silently ignored,
++ * to accommodate future extension to the
++ * protocol.
++ */
++ ND_PRINTK(2, notice,
++ "%s: ignored unsupported option; type=%d, len=%d\n",
++ __func__,
++ nd_opt->nd_opt_type,
++ nd_opt->nd_opt_len);
+ }
+ next_opt:
+ opt_len -= l;
+@@ -504,7 +506,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+
+ rcu_read_lock();
+ idev = __in6_dev_get(dst->dev);
+- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
+
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+ net, sk, skb, NULL, dst->dev,
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index fd9f049d6d41e7..131f7bb2110d3a 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1125,6 +1125,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ void *loc_cpu_entry;
+ struct ip6t_entry *iter;
+
++ if (len < sizeof(tmp))
++ return -EINVAL;
+ if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+@@ -1133,6 +1135,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+@@ -1501,6 +1505,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ void *loc_cpu_entry;
+ struct ip6t_entry *iter;
+
++ if (len < sizeof(tmp))
++ return -EINVAL;
+ if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+@@ -1509,6 +1515,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
+ return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
++ if ((u64)len < (u64)tmp.size + sizeof(tmp))
++ return -EINVAL;
+
+ tmp.name[sizeof(tmp.name)-1] = 0;
+
+diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
+index bf3cb3a13600cd..52d597b16b658b 100644
+--- a/net/ipv6/netfilter/ip6table_nat.c
++++ b/net/ipv6/netfilter/ip6table_nat.c
+@@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
+
+ static int __init ip6table_nat_init(void)
+ {
+- int ret = xt_register_template(&nf_nat_ipv6_table,
+- ip6table_nat_table_init);
++ int ret;
+
++ /* net->gen->ptr[ip6table_nat_net_id] must be allocated
++ * before calling ip6t_nat_register_lookups().
++ */
++ ret = register_pernet_subsys(&ip6table_nat_net_ops);
+ if (ret < 0)
+ return ret;
+
+- ret = register_pernet_subsys(&ip6table_nat_net_ops);
++ ret = xt_register_template(&nf_nat_ipv6_table,
++ ip6table_nat_table_init);
+ if (ret)
+- xt_unregister_template(&nf_nat_ipv6_table);
++ unregister_pernet_subsys(&ip6table_nat_net_ops);
+
+ return ret;
+ }
+
+ static void __exit ip6table_nat_exit(void)
+ {
+- unregister_pernet_subsys(&ip6table_nat_net_ops);
+ xt_unregister_template(&nf_nat_ipv6_table);
++ unregister_pernet_subsys(&ip6table_nat_net_ops);
+ }
+
+ module_init(ip6table_nat_init);
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index b2dd48911c8d62..c78b13ea5b196a 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -155,6 +155,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
+ };
+ struct inet_frag_queue *q;
+
++ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
++ IPV6_ADDR_LINKLOCAL)))
++ key.iif = 0;
++
+ q = inet_frag_find(nf_frag->fqdir, &key);
+ if (!q)
+ return NULL;
+@@ -294,6 +298,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
+ }
+
+ skb_dst_drop(skb);
++ skb_orphan(skb);
+ return -EINPROGRESS;
+
+ insert_error:
+@@ -469,7 +474,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
+ hdr = ipv6_hdr(skb);
+ fhdr = (struct frag_hdr *)skb_transport_header(skb);
+
+- skb_orphan(skb);
+ fq = fq_find(net, fhdr->identification, user, hdr,
+ skb->dev ? skb->dev->ifindex : 0);
+ if (fq == NULL) {
+diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
+index a0a2de30be3e7b..0c39c77fe8a8a4 100644
+--- a/net/ipv6/netfilter/nf_dup_ipv6.c
++++ b/net/ipv6/netfilter/nf_dup_ipv6.c
+@@ -47,11 +47,12 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
+ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
+ const struct in6_addr *gw, int oif)
+ {
++ local_bh_disable();
+ if (this_cpu_read(nf_skb_duplicated))
+- return;
++ goto out;
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+- return;
++ goto out;
+
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_reset_ct(skb);
+@@ -69,6 +70,8 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
+ } else {
+ kfree_skb(skb);
+ }
++out:
++ local_bh_enable();
+ }
+ EXPORT_SYMBOL_GPL(nf_dup_ipv6);
+
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index 58ccdb08c0fd18..4e0976534648c4 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -223,33 +223,23 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
+ const struct tcphdr *oth, unsigned int otcplen)
+ {
+ struct tcphdr *tcph;
+- int needs_ack;
+
+ skb_reset_transport_header(nskb);
+- tcph = skb_put(nskb, sizeof(struct tcphdr));
++ tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
+ /* Truncate to length (no data) */
+ tcph->doff = sizeof(struct tcphdr)/4;
+ tcph->source = oth->dest;
+ tcph->dest = oth->source;
+
+ if (oth->ack) {
+- needs_ack = 0;
+ tcph->seq = oth->ack_seq;
+- tcph->ack_seq = 0;
+ } else {
+- needs_ack = 1;
+ tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
+ otcplen - (oth->doff<<2));
+- tcph->seq = 0;
++ tcph->ack = 1;
+ }
+
+- /* Reset flags */
+- ((u_int8_t *)tcph)[13] = 0;
+ tcph->rst = 1;
+- tcph->ack = needs_ack;
+- tcph->window = 0;
+- tcph->urg_ptr = 0;
+- tcph->check = 0;
+
+ /* Adjust TCP checksum */
+ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
+@@ -278,13 +268,11 @@ static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
+ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ int hook)
+ {
+- struct net_device *br_indev __maybe_unused;
+ struct sk_buff *nskb;
+ struct tcphdr _otcph;
+ const struct tcphdr *otcph;
+ unsigned int otcplen, hh_len;
+ const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
+- struct ipv6hdr *ip6h;
+ struct dst_entry *dst = NULL;
+ struct flowi6 fl6;
+
+@@ -340,8 +328,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ nskb->mark = fl6.flowi6_mark;
+
+ skb_reserve(nskb, hh_len + dst->header_len);
+- ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
+- ip6_dst_hoplimit(dst));
++ nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, ip6_dst_hoplimit(dst));
+ nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
+
+ nf_ct_attach(nskb, oldskb);
+@@ -354,9 +341,16 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+- br_indev = nf_bridge_get_physindev(oldskb);
+- if (br_indev) {
++ if (nf_bridge_info_exists(oldskb)) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
++ struct ipv6hdr *ip6h = ipv6_hdr(nskb);
++ struct net_device *br_indev;
++
++ br_indev = nf_bridge_get_physindev(oldskb, net);
++ if (!br_indev) {
++ kfree_skb(nskb);
++ return;
++ }
+
+ nskb->dev = br_indev;
+ nskb->protocol = htons(ETH_P_IPV6);
+diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
+index 36dc14b34388c8..c9f1634b3838ae 100644
+--- a/net/ipv6/netfilter/nft_fib_ipv6.c
++++ b/net/ipv6/netfilter/nft_fib_ipv6.c
+@@ -41,8 +41,6 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv,
+ if (ipv6_addr_type(&fl6->daddr) & IPV6_ADDR_LINKLOCAL) {
+ lookup_flags |= RT6_LOOKUP_F_IFACE;
+ fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev);
+- } else if (priv->flags & NFTA_FIB_F_IIF) {
+- fl6->flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev);
+ }
+
+ if (ipv6_addr_type(&fl6->saddr) & IPV6_ADDR_UNICAST)
+@@ -75,6 +73,8 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
+ else if (priv->flags & NFTA_FIB_F_OIF)
+ dev = nft_out(pkt);
+
++ fl6.flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev);
++
+ nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
+
+ if (dev && nf_ipv6_chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
+@@ -165,6 +165,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ .flowi6_iif = LOOPBACK_IFINDEX,
+ .flowi6_proto = pkt->tprot,
+ .flowi6_uid = sock_net_uid(nft_net(pkt), NULL),
++ .flowi6_l3mdev = l3mdev_master_ifindex_rcu(nft_in(pkt)),
+ };
+ struct rt6_info *rt;
+ int lookup_flags;
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 5831aaa53d75ea..25243737fbc425 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -56,7 +56,7 @@ static int ping_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
+ if (addr_len < SIN6_LEN_RFC2133)
+ return -EINVAL;
+
+- return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
++ return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
+ }
+
+ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
+index e20b3705c2d2ac..6d1d9221649d52 100644
+--- a/net/ipv6/proc.c
++++ b/net/ipv6/proc.c
+@@ -61,7 +61,7 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
+ SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS),
+ SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS),
+ SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
+- SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTPKTS),
++ SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTREQUESTS),
+ SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS),
+ SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
+ SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
+@@ -84,6 +84,7 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
+ SNMP_MIB_ITEM("Ip6InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS),
++ SNMP_MIB_ITEM("Ip6OutTransmits", IPSTATS_MIB_OUTPKTS),
+ SNMP_MIB_SENTINEL
+ };
+
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 42fcec3ecf5e17..0a3e12502b05a6 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -651,7 +651,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
+ * have been queued for deletion.
+ */
+ rcu_read_lock();
+- IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
++ IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
+ err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
+ NULL, rt->dst.dev, dst_output);
+ if (err > 0)
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 5ebc47da1000c2..2af98edef87ee0 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -369,7 +369,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ * the source of the fragment, with the Pointer field set to zero.
+ */
+ nexthdr = hdr->nexthdr;
+- if (ipv6frag_thdr_truncated(skb, skb_transport_offset(skb), &nexthdr)) {
++ if (ipv6frag_thdr_truncated(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr), &nexthdr)) {
+ __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
+ IPSTATS_MIB_INHDRERRORS);
+ icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 9c687b357e6a41..a9104c4c1c02d9 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -87,7 +87,8 @@ struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
+ static unsigned int ip6_default_advmss(const struct dst_entry *dst);
+ INDIRECT_CALLABLE_SCOPE
+ unsigned int ip6_mtu(const struct dst_entry *dst);
+-static struct dst_entry *ip6_negative_advice(struct dst_entry *);
++static void ip6_negative_advice(struct sock *sk,
++ struct dst_entry *dst);
+ static void ip6_dst_destroy(struct dst_entry *);
+ static void ip6_dst_ifdown(struct dst_entry *,
+ struct net_device *dev);
+@@ -174,7 +175,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
+ struct net_device *rt_dev = rt->dst.dev;
+ bool handled = false;
+
+- if (rt_idev->dev == dev) {
++ if (rt_idev && rt_idev->dev == dev) {
+ rt->rt6i_idev = in6_dev_get(blackhole_netdev);
+ in6_dev_put(rt_idev);
+ handled = true;
+@@ -637,6 +638,8 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
+ rcu_read_lock();
+ last_probe = READ_ONCE(fib6_nh->last_probe);
+ idev = __in6_dev_get(dev);
++ if (!idev)
++ goto out;
+ neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
+ if (neigh) {
+ if (READ_ONCE(neigh->nud_state) & NUD_VALID)
+@@ -1398,6 +1401,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
+ struct rt6_info *prev, **p;
+
+ p = this_cpu_ptr(res->nh->rt6i_pcpu);
++ /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
+ prev = xchg(p, NULL);
+ if (prev) {
+ dst_dev_put(&prev->dst);
+@@ -2760,24 +2764,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
+ }
+ EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
+
+-static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
++static void ip6_negative_advice(struct sock *sk,
++ struct dst_entry *dst)
+ {
+ struct rt6_info *rt = (struct rt6_info *) dst;
+
+- if (rt) {
+- if (rt->rt6i_flags & RTF_CACHE) {
+- rcu_read_lock();
+- if (rt6_check_expired(rt)) {
+- rt6_remove_exception_rt(rt);
+- dst = NULL;
+- }
+- rcu_read_unlock();
+- } else {
+- dst_release(dst);
+- dst = NULL;
++ if (rt->rt6i_flags & RTF_CACHE) {
++ rcu_read_lock();
++ if (rt6_check_expired(rt)) {
++ /* counteract the dst_release() in sk_dst_reset() */
++ dst_hold(dst);
++ sk_dst_reset(sk);
++
++ rt6_remove_exception_rt(rt);
+ }
++ rcu_read_unlock();
++ return;
+ }
+- return dst;
++ sk_dst_reset(sk);
+ }
+
+ static void ip6_link_failure(struct sk_buff *skb)
+@@ -3591,7 +3595,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ if (!dev)
+ goto out;
+
+- if (idev->cnf.disable_ipv6) {
++ if (!idev || idev->cnf.disable_ipv6) {
+ NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
+ err = -EACCES;
+ goto out;
+@@ -3750,7 +3754,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ if (!rt)
+ goto out;
+
+- rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
++ rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len,
+ extack);
+ if (IS_ERR(rt->fib6_metrics)) {
+ err = PTR_ERR(rt->fib6_metrics);
+@@ -3763,10 +3767,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ rt->dst_nocount = true;
+
+ if (cfg->fc_flags & RTF_EXPIRES)
+- fib6_set_expires_locked(rt, jiffies +
+- clock_t_to_jiffies(cfg->fc_expires));
++ fib6_set_expires(rt, jiffies +
++ clock_t_to_jiffies(cfg->fc_expires));
+ else
+- fib6_clean_expires_locked(rt);
++ fib6_clean_expires(rt);
+
+ if (cfg->fc_protocol == RTPROT_UNSPEC)
+ cfg->fc_protocol = RTPROT_BOOT;
+@@ -4434,7 +4438,7 @@ static void rtmsg_to_fib6_config(struct net *net,
+ .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
+ : RT6_TABLE_MAIN,
+ .fc_ifindex = rtmsg->rtmsg_ifindex,
+- .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
++ .fc_metric = rtmsg->rtmsg_metric,
+ .fc_expires = rtmsg->rtmsg_info,
+ .fc_dst_len = rtmsg->rtmsg_dst_len,
+ .fc_src_len = rtmsg->rtmsg_src_len,
+@@ -4464,6 +4468,9 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
+ rtnl_lock();
+ switch (cmd) {
+ case SIOCADDRT:
++ /* Only do the default setting of fc_metric in route adding */
++ if (cfg.fc_metric == 0)
++ cfg.fc_metric = IP6_RT_PRIO_USER;
+ err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
+ break;
+ case SIOCDELRT:
+@@ -5332,19 +5339,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
+ err_nh = NULL;
+ list_for_each_entry(nh, &rt6_nh_list, next) {
+ err = __ip6_ins_rt(nh->fib6_info, info, extack);
+- fib6_info_release(nh->fib6_info);
+-
+- if (!err) {
+- /* save reference to last route successfully inserted */
+- rt_last = nh->fib6_info;
+
+- /* save reference to first route for notification */
+- if (!rt_notif)
+- rt_notif = nh->fib6_info;
+- }
+-
+- /* nh->fib6_info is used or freed at this point, reset to NULL*/
+- nh->fib6_info = NULL;
+ if (err) {
+ if (replace && nhn)
+ NL_SET_ERR_MSG_MOD(extack,
+@@ -5352,6 +5347,12 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
+ err_nh = nh;
+ goto add_errout;
+ }
++ /* save reference to last route successfully inserted */
++ rt_last = nh->fib6_info;
++
++ /* save reference to first route for notification */
++ if (!rt_notif)
++ rt_notif = nh->fib6_info;
+
+ /* Because each route is added like a single route we remove
+ * these flags after the first nexthop: if there is a collision,
+@@ -5412,8 +5413,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
+
+ cleanup:
+ list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
+- if (nh->fib6_info)
+- fib6_info_release(nh->fib6_info);
++ fib6_info_release(nh->fib6_info);
+ list_del(&nh->next);
+ kfree(nh);
+ }
+@@ -5678,7 +5678,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ goto nla_put_failure;
+ } else if (dest) {
+ struct in6_addr saddr_buf;
+- if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
++ if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 &&
+ nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
+ goto nla_put_failure;
+ }
+@@ -6334,12 +6334,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
+ if (!write)
+ return -EINVAL;
+
+- net = (struct net *)ctl->extra1;
+- delay = net->ipv6.sysctl.flush_delay;
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ if (ret)
+ return ret;
+
++ net = (struct net *)ctl->extra1;
++ delay = net->ipv6.sysctl.flush_delay;
+ fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
+ return 0;
+ }
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index a013b92cbb860a..db3c19a42e1ca7 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -212,9 +212,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ if (unlikely(err))
+ goto drop;
+
+- preempt_disable();
++ local_bh_disable();
+ dst = dst_cache_get(&rlwt->cache);
+- preempt_enable();
++ local_bh_enable();
+
+ if (unlikely(!dst)) {
+ struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -234,9 +234,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ goto drop;
+ }
+
+- preempt_disable();
++ local_bh_disable();
+ dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
+- preempt_enable();
++ local_bh_enable();
+ }
+
+ skb_dst_drop(skb);
+@@ -263,34 +263,34 @@ static int rpl_input(struct sk_buff *skb)
+ rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+
+ err = rpl_do_srh(skb, rlwt);
+- if (unlikely(err)) {
+- kfree_skb(skb);
+- return err;
+- }
++ if (unlikely(err))
++ goto drop;
+
+- preempt_disable();
++ local_bh_disable();
+ dst = dst_cache_get(&rlwt->cache);
+- preempt_enable();
+
+ if (!dst) {
+ ip6_route_input(skb);
+ dst = skb_dst(skb);
+ if (!dst->error) {
+- preempt_disable();
+ dst_cache_set_ip6(&rlwt->cache, dst,
+ &ipv6_hdr(skb)->saddr);
+- preempt_enable();
+ }
+ } else {
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+ }
++ local_bh_enable();
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+- return err;
++ goto drop;
+
+ return dst_input(skb);
++
++drop:
++ kfree_skb(skb);
++ return err;
+ }
+
+ static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype,
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index 29346a6eec9ffe..a31521e270f785 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -512,22 +512,24 @@ int __init seg6_init(void)
+ {
+ int err;
+
+- err = genl_register_family(&seg6_genl_family);
++ err = register_pernet_subsys(&ip6_segments_ops);
+ if (err)
+ goto out;
+
+- err = register_pernet_subsys(&ip6_segments_ops);
++ err = genl_register_family(&seg6_genl_family);
+ if (err)
+- goto out_unregister_genl;
++ goto out_unregister_pernet;
+
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+ err = seg6_iptunnel_init();
+ if (err)
+- goto out_unregister_pernet;
++ goto out_unregister_genl;
+
+ err = seg6_local_init();
+- if (err)
+- goto out_unregister_pernet;
++ if (err) {
++ seg6_iptunnel_exit();
++ goto out_unregister_genl;
++ }
+ #endif
+
+ #ifdef CONFIG_IPV6_SEG6_HMAC
+@@ -548,11 +550,13 @@ int __init seg6_init(void)
+ #endif
+ #endif
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+-out_unregister_pernet:
+- unregister_pernet_subsys(&ip6_segments_ops);
+-#endif
+ out_unregister_genl:
++#endif
++#if IS_ENABLED(CONFIG_IPV6_SEG6_LWTUNNEL) || IS_ENABLED(CONFIG_IPV6_SEG6_HMAC)
+ genl_unregister_family(&seg6_genl_family);
++#endif
++out_unregister_pernet:
++ unregister_pernet_subsys(&ip6_segments_ops);
+ goto out;
+ }
+
+@@ -562,8 +566,9 @@ void seg6_exit(void)
+ seg6_hmac_exit();
+ #endif
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
++ seg6_local_exit();
+ seg6_iptunnel_exit();
+ #endif
+- unregister_pernet_subsys(&ip6_segments_ops);
+ genl_unregister_family(&seg6_genl_family);
++ unregister_pernet_subsys(&ip6_segments_ops);
+ }
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index d43c50a7310d64..3c3800223e0e0d 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -354,6 +354,7 @@ static int seg6_hmac_init_algo(void)
+ struct crypto_shash *tfm;
+ struct shash_desc *shash;
+ int i, alg_count, cpu;
++ int ret = -ENOMEM;
+
+ alg_count = ARRAY_SIZE(hmac_algos);
+
+@@ -364,12 +365,14 @@ static int seg6_hmac_init_algo(void)
+ algo = &hmac_algos[i];
+ algo->tfms = alloc_percpu(struct crypto_shash *);
+ if (!algo->tfms)
+- return -ENOMEM;
++ goto error_out;
+
+ for_each_possible_cpu(cpu) {
+ tfm = crypto_alloc_shash(algo->name, 0, 0);
+- if (IS_ERR(tfm))
+- return PTR_ERR(tfm);
++ if (IS_ERR(tfm)) {
++ ret = PTR_ERR(tfm);
++ goto error_out;
++ }
+ p_tfm = per_cpu_ptr(algo->tfms, cpu);
+ *p_tfm = tfm;
+ }
+@@ -381,18 +384,22 @@ static int seg6_hmac_init_algo(void)
+
+ algo->shashs = alloc_percpu(struct shash_desc *);
+ if (!algo->shashs)
+- return -ENOMEM;
++ goto error_out;
+
+ for_each_possible_cpu(cpu) {
+ shash = kzalloc_node(shsize, GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!shash)
+- return -ENOMEM;
++ goto error_out;
+ *per_cpu_ptr(algo->shashs, cpu) = shash;
+ }
+ }
+
+ return 0;
++
++error_out:
++ seg6_hmac_exit();
++ return ret;
+ }
+
+ int __init seg6_hmac_init(void)
+@@ -410,22 +417,29 @@ int __net_init seg6_hmac_net_init(struct net *net)
+ void seg6_hmac_exit(void)
+ {
+ struct seg6_hmac_algo *algo = NULL;
++ struct crypto_shash *tfm;
++ struct shash_desc *shash;
+ int i, alg_count, cpu;
+
+ alg_count = ARRAY_SIZE(hmac_algos);
+ for (i = 0; i < alg_count; i++) {
+ algo = &hmac_algos[i];
+- for_each_possible_cpu(cpu) {
+- struct crypto_shash *tfm;
+- struct shash_desc *shash;
+
+- shash = *per_cpu_ptr(algo->shashs, cpu);
+- kfree(shash);
+- tfm = *per_cpu_ptr(algo->tfms, cpu);
+- crypto_free_shash(tfm);
++ if (algo->shashs) {
++ for_each_possible_cpu(cpu) {
++ shash = *per_cpu_ptr(algo->shashs, cpu);
++ kfree(shash);
++ }
++ free_percpu(algo->shashs);
++ }
++
++ if (algo->tfms) {
++ for_each_possible_cpu(cpu) {
++ tfm = *per_cpu_ptr(algo->tfms, cpu);
++ crypto_free_shash(tfm);
++ }
++ free_percpu(algo->tfms);
+ }
+- free_percpu(algo->tfms);
+- free_percpu(algo->shashs);
+ }
+ }
+ EXPORT_SYMBOL(seg6_hmac_exit);
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 03b877ff45588b..098632adc9b5af 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -459,34 +459,30 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ int err;
+
+ err = seg6_do_srh(skb);
+- if (unlikely(err)) {
+- kfree_skb(skb);
+- return err;
+- }
++ if (unlikely(err))
++ goto drop;
+
+ slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+
+- preempt_disable();
++ local_bh_disable();
+ dst = dst_cache_get(&slwt->cache);
+- preempt_enable();
+
+ if (!dst) {
+ ip6_route_input(skb);
+ dst = skb_dst(skb);
+ if (!dst->error) {
+- preempt_disable();
+ dst_cache_set_ip6(&slwt->cache, dst,
+ &ipv6_hdr(skb)->saddr);
+- preempt_enable();
+ }
+ } else {
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+ }
++ local_bh_enable();
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+- return err;
++ goto drop;
+
+ if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+@@ -494,6 +490,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ skb_dst(skb)->dev, seg6_input_finish);
+
+ return seg6_input_finish(dev_net(skb->dev), NULL, skb);
++drop:
++ kfree_skb(skb);
++ return err;
+ }
+
+ static int seg6_input_nf(struct sk_buff *skb)
+@@ -535,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+
+ slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+
+- preempt_disable();
++ local_bh_disable();
+ dst = dst_cache_get(&slwt->cache);
+- preempt_enable();
++ local_bh_enable();
+
+ if (unlikely(!dst)) {
+ struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -557,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ goto drop;
+ }
+
+- preempt_disable();
++ local_bh_disable();
+ dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+- preempt_enable();
++ local_bh_enable();
+ }
+
+ skb_dst_drop(skb);
+diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
+index 24e2b4b494cb08..c434940131b1d0 100644
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -941,8 +941,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
+
+ if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
+- dev_net(skb->dev), NULL, skb, NULL,
+- skb_dst(skb)->dev, input_action_end_dx6_finish);
++ dev_net(skb->dev), NULL, skb, skb->dev,
++ NULL, input_action_end_dx6_finish);
+
+ return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb);
+ drop:
+@@ -991,8 +991,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
+
+ if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
+- dev_net(skb->dev), NULL, skb, NULL,
+- skb_dst(skb)->dev, input_action_end_dx4_finish);
++ dev_net(skb->dev), NULL, skb, skb->dev,
++ NULL, input_action_end_dx4_finish);
+
+ return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb);
+ drop:
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index cc24cefdb85c09..eb4c8e2a2b12e0 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1460,6 +1460,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ return err;
+ }
+ netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
++ netdev_lockdep_set_classes(dev);
+ return 0;
+ }
+
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 5014aa66345276..593ead8a45d791 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -180,14 +180,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ treq = tcp_rsk(req);
+ treq->tfo_listener = false;
+
+- if (security_inet_conn_request(sk, skb, req))
+- goto out_free;
+-
+ req->mss = mss;
+ ireq->ir_rmt_port = th->source;
+ ireq->ir_num = ntohs(th->dest);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
++
++ if (security_inet_conn_request(sk, skb, req))
++ goto out_free;
++
+ if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
+ np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+ np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
+@@ -242,7 +243,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ goto out_free;
+ }
+
+- req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
++ req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? :dst_metric(dst, RTAX_WINDOW);
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ full_space = tcp_full_space(sk);
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 44b6949d72b221..83b48dc2b3ee26 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -135,7 +135,7 @@ static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
+
+ sock_owned_by_me(sk);
+
+- return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
++ return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len);
+ }
+
+ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+@@ -488,14 +488,10 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+
+ ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
+
+- if (!sock_owned_by_user(sk)) {
+- WRITE_ONCE(sk->sk_err, err);
+- sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
+-
+- tcp_done(sk);
+- } else {
++ if (!sock_owned_by_user(sk))
++ tcp_done_with_error(sk, err);
++ else
+ WRITE_ONCE(sk->sk_err_soft, err);
+- }
+ goto out;
+ case TCP_LISTEN:
+ break;
+@@ -1287,7 +1283,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ */
+
+ newsk->sk_gso_type = SKB_GSO_TCPV6;
+- ip6_dst_store(newsk, dst, NULL, NULL);
+ inet6_sk_rx_dst_set(newsk, skb);
+
+ inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
+@@ -1298,6 +1293,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+
+ memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+
++ ip6_dst_store(newsk, dst, NULL, NULL);
++
+ newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
+ newnp->saddr = ireq->ir_v6_loc_addr;
+ newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
+@@ -2216,15 +2213,9 @@ static void __net_exit tcpv6_net_exit(struct net *net)
+ inet_ctl_sock_destroy(net->ipv6.tcp_sk);
+ }
+
+-static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
+-{
+- tcp_twsk_purge(net_exit_list, AF_INET6);
+-}
+-
+ static struct pernet_operations tcpv6_net_ops = {
+ .init = tcpv6_net_init,
+ .exit = tcpv6_net_exit,
+- .exit_batch = tcpv6_net_exit_batch,
+ };
+
+ int __init tcpv6_init(void)
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 86b5d509a4688c..c77ee9a3cde24c 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -171,15 +171,21 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ {
+ struct sock *sk, *result;
+ int score, badness;
++ bool need_rescore;
+
+ result = NULL;
+ badness = -1;
+ udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+- score = compute_score(sk, net, saddr, sport,
+- daddr, hnum, dif, sdif);
++ need_rescore = false;
++rescore:
++ score = compute_score(need_rescore ? result : sk, net, saddr,
++ sport, daddr, hnum, dif, sdif);
+ if (score > badness) {
+ badness = score;
+
++ if (need_rescore)
++ continue;
++
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ result = sk;
+ continue;
+@@ -200,8 +206,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ if (IS_ERR(result))
+ continue;
+
+- badness = compute_score(sk, net, saddr, sport,
+- daddr, hnum, dif, sdif);
++ /* compute_score is too long of a function to be
++ * inlined, and calling it again here yields
++ * measureable overhead for some
++ * workloads. Work around it by jumping
++ * backwards to rescore 'result'.
++ */
++ need_rescore = true;
++ goto rescore;
+ }
+ }
+ return result;
+@@ -275,7 +287,8 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
+ struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
+ __be16 sport, __be16 dport)
+ {
+- const struct ipv6hdr *iph = ipv6_hdr(skb);
++ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
++ const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
+ struct net *net = dev_net(skb->dev);
+ int iif, sdif;
+
+@@ -410,10 +423,11 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ *addr_len = sizeof(*sin6);
+
+ BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
+- (struct sockaddr *)sin6);
++ (struct sockaddr *)sin6,
++ addr_len);
+ }
+
+- if (udp_sk(sk)->gro_enabled)
++ if (udp_test_bit(GRO_ENABLED, sk))
+ udp_cmsg_recv(msg, sk, skb);
+
+ if (np->rxopt.all)
+@@ -449,7 +463,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ goto try_again;
+ }
+
+-DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
++DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+ void udpv6_encap_enable(void)
+ {
+ static_branch_inc(&udpv6_encap_needed_key);
+@@ -571,7 +585,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
+ inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
+
+- if (!sk || udp_sk(sk)->encap_type) {
++ if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
+ /* No socket for error: try tunnels before discarding */
+ if (static_branch_unlikely(&udpv6_encap_needed_key)) {
+ sk = __udp6_lib_err_encap(net, hdr, offset, uh,
+@@ -688,7 +702,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ }
+ nf_reset_ct(skb);
+
+- if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
++ if (static_branch_unlikely(&udpv6_encap_needed_key) &&
++ READ_ONCE(up->encap_type)) {
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+
+ /*
+@@ -726,16 +741,17 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ /*
+ * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
+ */
+- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
++ if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
++ u16 pcrlen = READ_ONCE(up->pcrlen);
+
+- if (up->pcrlen == 0) { /* full coverage was set */
++ if (pcrlen == 0) { /* full coverage was set */
+ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
+ UDP_SKB_CB(skb)->cscov, skb->len);
+ goto drop;
+ }
+- if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
++ if (UDP_SKB_CB(skb)->cscov < pcrlen) {
+ net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
+- UDP_SKB_CB(skb)->cscov, up->pcrlen);
++ UDP_SKB_CB(skb)->cscov, pcrlen);
+ goto drop;
+ }
+ }
+@@ -858,7 +874,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
+ /* If zero checksum and no_check is not on for
+ * the socket then skip it.
+ */
+- if (!uh->check && !udp_sk(sk)->no_check6_rx)
++ if (!uh->check && !udp_get_no_check6_rx(sk))
+ continue;
+ if (!first) {
+ first = sk;
+@@ -980,7 +996,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
+ udp6_sk_rx_dst_set(sk, dst);
+
+- if (!uh->check && !udp_sk(sk)->no_check6_rx) {
++ if (!uh->check && !udp_get_no_check6_rx(sk)) {
+ if (refcounted)
+ sock_put(sk);
+ goto report_csum_error;
+@@ -1002,7 +1018,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ /* Unicast */
+ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ if (sk) {
+- if (!uh->check && !udp_sk(sk)->no_check6_rx)
++ if (!uh->check && !udp_get_no_check6_rx(sk))
+ goto report_csum_error;
+ return udp6_unicast_rcv_skb(sk, skb, uh);
+ }
+@@ -1132,7 +1148,7 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
+ udp_flush_pending_frames(sk);
+ else if (up->pending) {
+ up->len = 0;
+- up->pending = 0;
++ WRITE_ONCE(up->pending, 0);
+ ip6_flush_pending_frames(sk);
+ }
+ }
+@@ -1155,7 +1171,7 @@ static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
+ if (addr_len < SIN6_LEN_RFC2133)
+ return -EINVAL;
+
+- return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
++ return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
+ }
+
+ /**
+@@ -1241,7 +1257,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+- if (udp_sk(sk)->no_check6_tx) {
++ if (udp_get_no_check6_tx(sk)) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1262,7 +1278,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+
+ if (is_udplite)
+ csum = udplite_csum(skb);
+- else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
++ else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
+ skb->ip_summed = CHECKSUM_NONE;
+ goto send;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
+@@ -1310,7 +1326,7 @@ static int udp_v6_push_pending_frames(struct sock *sk)
+ &inet_sk(sk)->cork.base);
+ out:
+ up->len = 0;
+- up->pending = 0;
++ WRITE_ONCE(up->pending, 0);
+ return err;
+ }
+
+@@ -1332,7 +1348,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ int addr_len = msg->msg_namelen;
+ bool connected = false;
+ int ulen = len;
+- int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
++ int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
+ int err;
+ int is_udplite = IS_UDPLITE(sk);
+ int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+@@ -1367,7 +1383,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ default:
+ return -EINVAL;
+ }
+- } else if (!up->pending) {
++ } else if (!READ_ONCE(up->pending)) {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
+ daddr = &sk->sk_v6_daddr;
+@@ -1398,8 +1414,8 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ return -EMSGSIZE;
+
+ getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
+- if (up->pending) {
+- if (up->pending == AF_INET)
++ if (READ_ONCE(up->pending)) {
++ if (READ_ONCE(up->pending) == AF_INET)
+ return udp_sendmsg(sk, msg, len);
+ /*
+ * There are pending frames.
+@@ -1473,9 +1489,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ ipc6.opt = opt;
+
+ err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
+- if (err > 0)
++ if (err > 0) {
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
+ &ipc6);
++ connected = false;
++ }
+ if (err < 0) {
+ fl6_sock_release(flowlabel);
+ return err;
+@@ -1487,7 +1505,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ }
+ if (!(opt->opt_nflen|opt->opt_flen))
+ opt = NULL;
+- connected = false;
+ }
+ if (!opt) {
+ opt = txopt_get(np);
+@@ -1508,6 +1525,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
+ err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
+ (struct sockaddr *)sin6,
++ &addr_len,
+ &fl6->saddr);
+ if (err)
+ goto out_no_dst;
+@@ -1589,7 +1607,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ goto out;
+ }
+
+- up->pending = AF_INET6;
++ WRITE_ONCE(up->pending, AF_INET6);
+
+ do_append_data:
+ if (ipc6.dontfrag < 0)
+@@ -1603,7 +1621,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ else if (!corkreq)
+ err = udp_v6_push_pending_frames(sk);
+ else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
+- up->pending = 0;
++ WRITE_ONCE(up->pending, 0);
+
+ if (err > 0)
+ err = np->recverr ? net_xmit_errno(err) : 0;
+@@ -1644,11 +1662,11 @@ static void udpv6_splice_eof(struct socket *sock)
+ struct sock *sk = sock->sk;
+ struct udp_sock *up = udp_sk(sk);
+
+- if (!up->pending || READ_ONCE(up->corkflag))
++ if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
+ return;
+
+ lock_sock(sk);
+- if (up->pending && !READ_ONCE(up->corkflag))
++ if (up->pending && !udp_test_bit(CORK, sk))
+ udp_v6_push_pending_frames(sk);
+ release_sock(sk);
+ }
+@@ -1670,7 +1688,7 @@ void udpv6_destroy_sock(struct sock *sk)
+ if (encap_destroy)
+ encap_destroy(sk);
+ }
+- if (up->encap_enabled) {
++ if (udp_test_bit(ENCAP_ENABLED, sk)) {
+ static_branch_dec(&udpv6_encap_needed_key);
+ udp_encap_disable();
+ }
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 6b95ba241ebe2a..639a4b506f9b5a 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -164,7 +164,8 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
+
+ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
+ {
+- const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
++ const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + offset);
+ struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
+
+ /* do fraglist only if there is no outer UDP encap (or we already processed it) */
+@@ -174,13 +175,7 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
+ skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+- if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
+- skb->csum_level++;
+- } else {
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- skb->csum_level = 0;
+- }
++ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 267d491e970753..a60bec9b14f14a 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -17,7 +17,6 @@
+ static int udplitev6_sk_init(struct sock *sk)
+ {
+ udpv6_init_sock(sk);
+- udp_sk(sk)->pcflag = UDPLITE_BIT;
+ pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
+ "please contact the netdev mailing list\n");
+ return 0;
+diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
+index 4907ab241d6bed..8432b50d9ce4ca 100644
+--- a/net/ipv6/xfrm6_input.c
++++ b/net/ipv6/xfrm6_input.c
+@@ -56,7 +56,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
+ skb_postpush_rcsum(skb, skb_network_header(skb), nhlen);
+
+ if (xo && (xo->flags & XFRM_GRO)) {
+- skb_mac_header_rebuild(skb);
++ /* The full l2 header needs to be preserved so that re-injecting the packet at l2
++ * works correctly in the presence of vlan tags.
++ */
++ skb_mac_header_rebuild_full(skb, xo->orig_mac_len);
++ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ return 0;
+ }
+@@ -81,14 +85,14 @@ int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
+ struct ipv6hdr *ip6h;
+ int len;
+ int ip6hlen = sizeof(struct ipv6hdr);
+-
+ __u8 *udpdata;
+ __be32 *udpdata32;
+- __u16 encap_type = up->encap_type;
++ u16 encap_type;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return xfrm4_udp_encap_rcv(sk, skb);
+
++ encap_type = READ_ONCE(up->encap_type);
+ /* if this is not encapsulated socket, then just return now */
+ if (!encap_type)
+ return 1;
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index 42fb6996b0777a..444b0b4469a49e 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -56,12 +56,18 @@ static int xfrm6_get_saddr(struct net *net, int oif,
+ {
+ struct dst_entry *dst;
+ struct net_device *dev;
++ struct inet6_dev *idev;
+
+ dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
+ if (IS_ERR(dst))
+ return -EHOSTUNREACH;
+
+- dev = ip6_dst_idev(dst)->dev;
++ idev = ip6_dst_idev(dst);
++ if (!idev) {
++ dst_release(dst);
++ return -EHOSTUNREACH;
++ }
++ dev = idev->dev;
+ ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
+ dst_release(dst);
+ return 0;
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 498a0c35b7bb20..815b1df0b2d194 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct iucv_path *path = iucv->path;
+
+- if (iucv->path) {
+- iucv->path = NULL;
++ /* Whoever resets the path pointer, must sever and free it. */
++ if (xchg(&iucv->path, NULL)) {
+ if (with_user_data) {
+ low_nmcpy(user_data, iucv->src_name);
+ high_nmcpy(user_data, iucv->dst_name);
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index fc3fddeb6f36d4..038e1ba9aec270 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -156,7 +156,7 @@ static char iucv_error_pathid[16] = "INVALID PATHID";
+ static LIST_HEAD(iucv_handler_list);
+
+ /*
+- * iucv_path_table: an array of iucv_path structures.
++ * iucv_path_table: array of pointers to iucv_path structures.
+ */
+ static struct iucv_path **iucv_path_table;
+ static unsigned long iucv_max_pathid;
+@@ -519,7 +519,7 @@ static void iucv_setmask_mp(void)
+ */
+ static void iucv_setmask_up(void)
+ {
+- cpumask_t cpumask;
++ static cpumask_t cpumask;
+ int cpu;
+
+ /* Disable all cpu but the first in cpu_irq_cpumask. */
+@@ -544,7 +544,7 @@ static int iucv_enable(void)
+
+ cpus_read_lock();
+ rc = -ENOMEM;
+- alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
++ alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
+ iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
+ if (!iucv_path_table)
+ goto out;
+@@ -627,23 +627,33 @@ static int iucv_cpu_online(unsigned int cpu)
+
+ static int iucv_cpu_down_prep(unsigned int cpu)
+ {
+- cpumask_t cpumask;
++ cpumask_var_t cpumask;
++ int ret = 0;
+
+ if (!iucv_path_table)
+ return 0;
+
+- cpumask_copy(&cpumask, &iucv_buffer_cpumask);
+- cpumask_clear_cpu(cpu, &cpumask);
+- if (cpumask_empty(&cpumask))
++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
++ return -ENOMEM;
++
++ cpumask_copy(cpumask, &iucv_buffer_cpumask);
++ cpumask_clear_cpu(cpu, cpumask);
++ if (cpumask_empty(cpumask)) {
+ /* Can't offline last IUCV enabled cpu. */
+- return -EINVAL;
++ ret = -EINVAL;
++ goto __free_cpumask;
++ }
+
+ iucv_retrieve_cpu(NULL);
+ if (!cpumask_empty(&iucv_irq_cpumask))
+- return 0;
++ goto __free_cpumask;
++
+ smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
+ iucv_allow_cpu, NULL, 1);
+- return 0;
++
++__free_cpumask:
++ free_cpumask_var(cpumask);
++ return ret;
+ }
+
+ /**
+@@ -1080,8 +1090,7 @@ static int iucv_message_receive_iprmdata(struct iucv_path *path,
+ size = (size < 8) ? size : 8;
+ for (array = buffer; size > 0; array++) {
+ copy = min_t(size_t, size, array->length);
+- memcpy((u8 *)(addr_t) array->address,
+- rmmsg, copy);
++ memcpy(phys_to_virt(array->address), rmmsg, copy);
+ rmmsg += copy;
+ size -= copy;
+ }
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index dd1d8ffd5f5941..829eb67240a998 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -634,7 +634,7 @@ static int kcm_write_msgs(struct kcm_sock *kcm)
+
+ msize = 0;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+- msize += skb_shinfo(skb)->frags[i].bv_len;
++ msize += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
+ skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags,
+@@ -754,6 +754,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
+ int err = -EPIPE;
+
++ mutex_lock(&kcm->tx_mutex);
+ lock_sock(sk);
+
+ /* Per tcp_sendmsg this should be in poll */
+@@ -925,6 +926,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
+
+ release_sock(sk);
++ mutex_unlock(&kcm->tx_mutex);
+ return copied;
+
+ out_error:
+@@ -950,6 +952,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ sk->sk_write_space(sk);
+
+ release_sock(sk);
++ mutex_unlock(&kcm->tx_mutex);
+ return err;
+ }
+
+@@ -1152,10 +1155,11 @@ static int kcm_getsockopt(struct socket *sock, int level, int optname,
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+- len = min_t(unsigned int, len, sizeof(int));
+ if (len < 0)
+ return -EINVAL;
+
++ len = min_t(unsigned int, len, sizeof(int));
++
+ switch (optname) {
+ case KCM_RECV_DISABLE:
+ val = kcm->rx_disabled;
+@@ -1202,6 +1206,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
+ spin_unlock_bh(&mux->lock);
+
+ INIT_WORK(&kcm->tx_work, kcm_tx_work);
++ mutex_init(&kcm->tx_mutex);
+
+ spin_lock_bh(&mux->rx_lock);
+ kcm_rcv_ready(kcm);
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 03608d3ded4b83..70da78ab952027 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -88,6 +88,11 @@
+ /* Default trace flags */
+ #define L2TP_DEFAULT_DEBUG_FLAGS 0
+
++#define L2TP_DEPTH_NESTING 2
++#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
++#error "L2TP requires its own lockdep subclass"
++#endif
++
+ /* Private data stored for received packets in the skb.
+ */
+ struct l2tp_skb_cb {
+@@ -1041,7 +1046,13 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+ nf_reset_ct(skb);
+
+- bh_lock_sock_nested(sk);
++ /* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
++ * nested socket calls on the same lockdep socket class. This can
++ * happen when data from a user socket is routed over l2tp, which uses
++ * another userspace socket.
++ */
++ spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
++
+ if (sock_owned_by_user(sk)) {
+ kfree_skb(skb);
+ ret = NET_XMIT_DROP;
+@@ -1093,7 +1104,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
+ ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
+
+ out_unlock:
+- bh_unlock_sock(sk);
++ spin_unlock(&sk->sk_lock.slock);
+
+ return ret;
+ }
+@@ -1139,9 +1150,9 @@ static void l2tp_tunnel_destruct(struct sock *sk)
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+- (udp_sk(sk))->encap_type = 0;
+- (udp_sk(sk))->encap_rcv = NULL;
+- (udp_sk(sk))->encap_destroy = NULL;
++ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
++ udp_sk(sk)->encap_rcv = NULL;
++ udp_sk(sk)->encap_destroy = NULL;
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ break;
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index f2ae03c404736d..1f41d2f3b8c4e0 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -136,6 +136,9 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
+ /* checksums verified by L2TP */
+ skb->ip_summed = CHECKSUM_NONE;
+
++ /* drop outer flow-hash */
++ skb_clear_hash(skb);
++
+ skb_dst_drop(skb);
+ nf_reset_ct(skb);
+
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 11f3d375cec000..db4971d52802b9 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -627,7 +627,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ back_from_confirm:
+ lock_sock(sk);
+- ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
++ ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
+ err = ip6_append_data(sk, ip_generic_getfrag, msg,
+ ulen, transhdrlen, &ipc6,
+ &fl6, (struct rt6_info *)dst,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index f011af6601c9cd..6146e4e67bbb54 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1356,11 +1356,11 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+- len = min_t(unsigned int, len, sizeof(int));
+-
+ if (len < 0)
+ return -EINVAL;
+
++ len = min_t(unsigned int, len, sizeof(int));
++
+ err = -ENOTCONN;
+ if (!sk->sk_user_data)
+ goto end;
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 9b06c380866b53..fde1140d899efc 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -226,6 +226,8 @@ static int llc_ui_release(struct socket *sock)
+ }
+ netdev_put(llc->dev, &llc->dev_tracker);
+ sock_put(sk);
++ sock_orphan(sk);
++ sock->sk = NULL;
+ llc_sk_free(sk);
+ out:
+ return 0;
+@@ -928,14 +930,15 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ */
+ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ {
++ DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
+ struct sock *sk = sock->sk;
+ struct llc_sock *llc = llc_sk(sk);
+- DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
+ int flags = msg->msg_flags;
+ int noblock = flags & MSG_DONTWAIT;
++ int rc = -EINVAL, copied = 0, hdrlen, hh_len;
+ struct sk_buff *skb = NULL;
++ struct net_device *dev;
+ size_t size = 0;
+- int rc = -EINVAL, copied = 0, hdrlen;
+
+ dprintk("%s: sending from %02X to %02X\n", __func__,
+ llc->laddr.lsap, llc->daddr.lsap);
+@@ -955,22 +958,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ if (rc)
+ goto out;
+ }
+- hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
++ dev = llc->dev;
++ hh_len = LL_RESERVED_SPACE(dev);
++ hdrlen = llc_ui_header_len(sk, addr);
+ size = hdrlen + len;
+- if (size > llc->dev->mtu)
+- size = llc->dev->mtu;
++ size = min_t(size_t, size, READ_ONCE(dev->mtu));
+ copied = size - hdrlen;
+ rc = -EINVAL;
+ if (copied < 0)
+ goto out;
+ release_sock(sk);
+- skb = sock_alloc_send_skb(sk, size, noblock, &rc);
++ skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
+ lock_sock(sk);
+ if (!skb)
+ goto out;
+- skb->dev = llc->dev;
++ if (sock_flag(sk, SOCK_ZAPPED) ||
++ llc->dev != dev ||
++ hdrlen != llc_ui_header_len(sk, addr) ||
++ hh_len != LL_RESERVED_SPACE(dev) ||
++ size > READ_ONCE(dev->mtu))
++ goto out;
++ skb->dev = dev;
+ skb->protocol = llc_proto_type(addr->sllc_arphrd);
+- skb_reserve(skb, hdrlen);
++ skb_reserve(skb, hh_len + hdrlen);
+ rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
+ if (rc)
+ goto out;
+diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
+index 6e387aadffcecb..4f16d9c88350b4 100644
+--- a/net/llc/llc_core.c
++++ b/net/llc/llc_core.c
+@@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
+ .func = llc_rcv,
+ };
+
+-static struct packet_type llc_tr_packet_type __read_mostly = {
+- .type = cpu_to_be16(ETH_P_TR_802_2),
+- .func = llc_rcv,
+-};
+-
+ static int __init llc_init(void)
+ {
+ dev_add_pack(&llc_packet_type);
+- dev_add_pack(&llc_tr_packet_type);
+ return 0;
+ }
+
+ static void __exit llc_exit(void)
+ {
+ dev_remove_pack(&llc_packet_type);
+- dev_remove_pack(&llc_tr_packet_type);
+ }
+
+ module_init(llc_init);
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index 7cac441862e216..51bccfb00a9cd9 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
+ skb->transport_header += llc_len;
+ skb_pull(skb, llc_len);
+ if (skb->protocol == htons(ETH_P_802_2)) {
+- __be16 pdulen = eth_hdr(skb)->h_proto;
+- s32 data_size = ntohs(pdulen) - llc_len;
++ __be16 pdulen;
++ s32 data_size;
++
++ if (skb->mac_len < ETH_HLEN)
++ return 0;
++
++ pdulen = eth_hdr(skb)->h_proto;
++ data_size = ntohs(pdulen) - llc_len;
+
+ if (data_size < 0 ||
+ !pskb_may_pull(skb, data_size))
+diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
+index 79d1cef8f15a92..06fb8e6944b06a 100644
+--- a/net/llc/llc_s_ac.c
++++ b/net/llc/llc_s_ac.c
+@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
+ int rc = 1;
+ u32 data_size;
+
++ if (skb->mac_len < ETH_HLEN)
++ return 1;
++
+ llc_pdu_decode_sa(skb, mac_da);
+ llc_pdu_decode_da(skb, mac_sa);
+ llc_pdu_decode_ssap(skb, &dsap);
+diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
+index 05c6ae0920534b..f5065429251095 100644
+--- a/net/llc/llc_station.c
++++ b/net/llc/llc_station.c
+@@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
+ u32 data_size;
+ struct sk_buff *nskb;
+
++ if (skb->mac_len < ETH_HLEN)
++ goto out;
++
+ /* The test request command is type U (llc_len = 3) */
+ data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
+ nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index b6b7726858815f..0a69e47f1c55f7 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -497,7 +497,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ {
+ struct tid_ampdu_tx *tid_tx;
+ struct ieee80211_local *local = sta->local;
+- struct ieee80211_sub_if_data *sdata;
++ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_TX_START,
+@@ -525,7 +525,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ */
+ synchronize_net();
+
+- sdata = sta->sdata;
+ params.ssn = sta->tid_seq[tid] >> 4;
+ ret = drv_ampdu_action(local, sdata, &params);
+ tid_tx->ssn = params.ssn;
+@@ -539,9 +538,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ */
+ set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
+ } else if (ret) {
+- if (!sdata)
+- return;
+-
+ ht_dbg(sdata,
+ "BA request denied - HW unavailable for %pM tid %d\n",
+ sta->sta.addr, tid);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 0e3a1753a51c6d..ca5b111f20e5bd 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1806,10 +1806,10 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ lockdep_is_held(&local->sta_mtx));
+
+ /*
+- * If there are no changes, then accept a link that doesn't exist,
++ * If there are no changes, then accept a link that exist,
+ * unless it's a new link.
+ */
+- if (params->link_id < 0 && !new_link &&
++ if (params->link_id >= 0 && !new_link &&
+ !params->link_mac && !params->txpwr_set &&
+ !params->supported_rates_len &&
+ !params->ht_capa && !params->vht_capa &&
+@@ -1887,6 +1887,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ sband->band);
+ }
+
++ ieee80211_sta_init_nss(link_sta);
++
+ return ret;
+ }
+
+@@ -2185,15 +2187,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
+ }
+
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+- sta->sdata->u.vlan.sta) {
+- ieee80211_clear_fast_rx(sta);
++ sta->sdata->u.vlan.sta)
+ RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
+- }
+
+ if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ ieee80211_vif_dec_num_mcast(sta->sdata);
+
+ sta->sdata = vlansdata;
++ ieee80211_check_fast_rx(sta);
+ ieee80211_check_fast_xmit(sta);
+
+ if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
+@@ -2952,8 +2953,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
+ memcpy(sdata->vif.bss_conf.mcast_rate, rate,
+ sizeof(int) * NUM_NL80211_BANDS);
+
+- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+- BSS_CHANGED_MCAST_RATE);
++ if (ieee80211_sdata_running(sdata))
++ ieee80211_link_info_change_notify(sdata, &sdata->deflink,
++ BSS_CHANGED_MCAST_RATE);
+
+ return 0;
+ }
+@@ -3121,6 +3123,10 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
+ else
+ *dbm = sdata->vif.bss_conf.txpower;
+
++ /* INT_MIN indicates no power level was set yet */
++ if (*dbm == INT_MIN)
++ return -EINVAL;
++
+ return 0;
+ }
+
+diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
+index 68952752b5990f..c09aed6a3cfcc8 100644
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -245,7 +245,9 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
+ struct sta_info *sta;
+
+- list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
++ lockdep_assert_wiphy(sdata->local->hw.wiphy);
++
++ list_for_each_entry(sta, &sdata->local->sta_list, list) {
+ if (sdata != sta->sdata &&
+ !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
+ continue;
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index 30cd0c905a24f6..d6478fd00badf3 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+ * Copyright 2015 Intel Deutschland GmbH
+- * Copyright (C) 2022 Intel Corporation
++ * Copyright (C) 2022-2023 Intel Corporation
+ */
+ #include <net/mac80211.h>
+ #include "ieee80211_i.h"
+@@ -393,9 +393,6 @@ int drv_ampdu_action(struct ieee80211_local *local,
+
+ might_sleep();
+
+- if (!sdata)
+- return -EIO;
+-
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+@@ -510,10 +507,13 @@ int drv_change_vif_links(struct ieee80211_local *local,
+ if (ret)
+ return ret;
+
+- for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+- link = rcu_access_pointer(sdata->link[link_id]);
++ if (!local->in_reconfig) {
++ for_each_set_bit(link_id, &links_to_add,
++ IEEE80211_MLD_MAX_NUM_LINKS) {
++ link = rcu_access_pointer(sdata->link[link_id]);
+
+- ieee80211_link_debugfs_drv_add(link);
++ ieee80211_link_debugfs_drv_add(link);
++ }
+ }
+
+ return 0;
+@@ -561,6 +561,10 @@ int drv_change_sta_links(struct ieee80211_local *local,
+ if (ret)
+ return ret;
+
++ /* during reconfig don't add it to debugfs again */
++ if (local->in_reconfig)
++ return 0;
++
+ for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_sta = rcu_dereference_protected(info->link[link_id],
+ lockdep_is_held(&local->sta_mtx));
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index c4505593ba7a6f..2bc2fbe58f944b 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -23,7 +23,7 @@
+ static inline struct ieee80211_sub_if_data *
+ get_bss_sdata(struct ieee80211_sub_if_data *sdata)
+ {
+- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++ if (sdata && sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+ u.ap);
+
+@@ -638,10 +638,13 @@ static inline void drv_flush(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u32 queues, bool drop)
+ {
+- struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
++ struct ieee80211_vif *vif;
+
+ might_sleep();
+
++ sdata = get_bss_sdata(sdata);
++ vif = sdata ? &sdata->vif : NULL;
++
+ if (sdata && !check_sdata_in_driver(sdata))
+ return;
+
+@@ -657,6 +660,8 @@ static inline void drv_flush_sta(struct ieee80211_local *local,
+ {
+ might_sleep();
+
++ sdata = get_bss_sdata(sdata);
++
+ if (sdata && !check_sdata_in_driver(sdata))
+ return;
+
+diff --git a/net/mac80211/drop.h b/net/mac80211/drop.h
+index 49dc809cab290c..1570fac8411f4f 100644
+--- a/net/mac80211/drop.h
++++ b/net/mac80211/drop.h
+@@ -53,4 +53,7 @@ enum mac80211_drop_reason {
+ #undef DEF
+ };
+
++#define RX_RES_IS_UNUSABLE(result) \
++ (((__force u32)(result) & SKB_DROP_REASON_SUBSYS_MASK) == ___RX_DROP_UNUSABLE)
++
+ #endif /* MAC80211_DROP_H */
+diff --git a/net/mac80211/he.c b/net/mac80211/he.c
+index 9f5ffdc9db284a..ecbb042dd0433e 100644
+--- a/net/mac80211/he.c
++++ b/net/mac80211/he.c
+@@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
+
+ if (!he_spr_ie_elem)
+ return;
++
++ he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
+ data = he_spr_ie_elem->optional;
+
+ if (he_spr_ie_elem->he_sr_control &
+ IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+- data++;
++ he_obss_pd->non_srg_max_offset = *data++;
++
+ if (he_spr_ie_elem->he_sr_control &
+ IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+- he_obss_pd->max_offset = *data++;
+ he_obss_pd->min_offset = *data++;
++ he_obss_pd->max_offset = *data++;
++ memcpy(he_obss_pd->bss_color_bitmap, data, 8);
++ data += 8;
++ memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
+ he_obss_pd->enable = true;
+ }
+ }
+diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
+index 33729870ad8a37..b3371872895cca 100644
+--- a/net/mac80211/ht.c
++++ b/net/mac80211/ht.c
+@@ -271,6 +271,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
++ case NL80211_CHAN_WIDTH_320:
+ bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
+ break;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 98ef1fe1226e72..daea061d0fc136 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -122,7 +122,7 @@ struct ieee80211_bss {
+ };
+
+ /**
+- * enum ieee80211_corrupt_data_flags - BSS data corruption flags
++ * enum ieee80211_bss_corrupt_data_flags - BSS data corruption flags
+ * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted
+ * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted
+ *
+@@ -135,7 +135,7 @@ enum ieee80211_bss_corrupt_data_flags {
+ };
+
+ /**
+- * enum ieee80211_valid_data_flags - BSS valid data flags
++ * enum ieee80211_bss_valid_data_flags - BSS valid data flags
+ * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
+ * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
+ * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
+@@ -1406,7 +1406,7 @@ struct ieee80211_local {
+ /* wowlan is enabled -- don't reconfig on resume */
+ bool wowlan;
+
+- struct work_struct radar_detected_work;
++ struct wiphy_work radar_detected_work;
+
+ /* number of RX chains the hardware has */
+ u8 rx_chains;
+@@ -1483,14 +1483,14 @@ struct ieee80211_local {
+ int hw_scan_ies_bufsize;
+ struct cfg80211_scan_info scan_info;
+
+- struct work_struct sched_scan_stopped_work;
++ struct wiphy_work sched_scan_stopped_work;
+ struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
+ struct cfg80211_sched_scan_request __rcu *sched_scan_req;
+ u8 scan_addr[ETH_ALEN];
+
+ unsigned long leave_oper_channel_time;
+ enum mac80211_scan_state next_scan_state;
+- struct delayed_work scan_work;
++ struct wiphy_delayed_work scan_work;
+ struct ieee80211_sub_if_data __rcu *scan_sdata;
+ /* For backward compatibility only -- do not use */
+ struct cfg80211_chan_def _oper_chandef;
+@@ -1583,9 +1583,9 @@ struct ieee80211_local {
+ /*
+ * Remain-on-channel support
+ */
+- struct delayed_work roc_work;
++ struct wiphy_delayed_work roc_work;
+ struct list_head roc_list;
+- struct work_struct hw_roc_start, hw_roc_done;
++ struct wiphy_work hw_roc_start, hw_roc_done;
+ unsigned long hw_roc_start_time;
+ u64 roc_cookie_counter;
+
+@@ -1846,6 +1846,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ void ieee80211_configure_filter(struct ieee80211_local *local);
+ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+
++void ieee80211_handle_queued_frames(struct ieee80211_local *local);
++
+ u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
+ int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
+ u64 *cookie, gfp_t gfp);
+@@ -1929,7 +1931,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata,
+ u64 *changed);
+
+ /* scan/BSS handling */
+-void ieee80211_scan_work(struct work_struct *work);
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work);
+ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+ const u8 *ssid, u8 ssid_len,
+ struct ieee80211_channel **channels,
+@@ -1962,7 +1964,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_sched_scan_request *req);
+ int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
+ void ieee80211_sched_scan_end(struct ieee80211_local *local);
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work);
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++ struct wiphy_work *work);
+
+ /* off-channel/mgmt-tx */
+ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
+@@ -2147,7 +2150,7 @@ enum ieee80211_sta_rx_bandwidth
+ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
+ enum ieee80211_sta_rx_bandwidth
+ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta);
+-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta);
++void ieee80211_sta_init_nss(struct link_sta_info *link_sta);
+ enum ieee80211_sta_rx_bandwidth
+ ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
+ enum nl80211_chan_width
+@@ -2566,7 +2569,8 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local);
+
+ void ieee80211_dfs_cac_timer_work(struct work_struct *work);
+ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work);
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++ struct wiphy_work *work);
+ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_csa_settings *csa_settings);
+
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index be586bc0b5b7d7..fae701248f0580 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -251,9 +251,9 @@ static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata
+ return ret;
+ }
+
+-static int ieee80211_change_mac(struct net_device *dev, void *addr)
++static int _ieee80211_change_mac(struct ieee80211_sub_if_data *sdata,
++ void *addr)
+ {
+- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sockaddr *sa = addr;
+ bool check_dup = true;
+@@ -278,7 +278,7 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
+
+ if (live)
+ drv_remove_interface(local, sdata);
+- ret = eth_mac_addr(dev, sa);
++ ret = eth_mac_addr(sdata->dev, sa);
+
+ if (ret == 0) {
+ memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
+@@ -294,6 +294,27 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
+ return ret;
+ }
+
++static int ieee80211_change_mac(struct net_device *dev, void *addr)
++{
++ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
++ struct ieee80211_local *local = sdata->local;
++ int ret;
++
++ /*
++ * This happens during unregistration if there's a bond device
++ * active (maybe other cases?) and we must get removed from it.
++ * But we really don't care anymore if it's not registered now.
++ */
++ if (!dev->ieee80211_ptr->registered)
++ return 0;
++
++ wiphy_lock(local->hw.wiphy);
++ ret = _ieee80211_change_mac(sdata, addr);
++ wiphy_unlock(local->hw.wiphy);
++
++ return ret;
++}
++
+ static inline int identical_mac_addr_allowed(int type1, int type2)
+ {
+ return type1 == NL80211_IFTYPE_MONITOR ||
+@@ -445,6 +466,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ {
+ struct ieee80211_local *local = sdata->local;
+ unsigned long flags;
++ struct sk_buff_head freeq;
+ struct sk_buff *skb, *tmp;
+ u32 hw_reconf_flags = 0;
+ int i, flushed;
+@@ -631,18 +653,32 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ skb_queue_purge(&sdata->status_queue);
+ }
+
++ /*
++ * Since ieee80211_free_txskb() may issue __dev_queue_xmit()
++ * which should be called with interrupts enabled, reclamation
++ * is done in two phases:
++ */
++ __skb_queue_head_init(&freeq);
++
++ /* unlink from local queues... */
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
+ skb_queue_walk_safe(&local->pending[i], skb, tmp) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ if (info->control.vif == &sdata->vif) {
+ __skb_unlink(skb, &local->pending[i]);
+- ieee80211_free_txskb(&local->hw, skb);
++ __skb_queue_tail(&freeq, skb);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
++ /* ... and perform actual reclamation with interrupts enabled. */
++ skb_queue_walk_safe(&freeq, skb, tmp) {
++ __skb_unlink(skb, &freeq);
++ ieee80211_free_txskb(&local->hw, skb);
++ }
++
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ ieee80211_txq_remove_vlan(local, sdata);
+
+@@ -691,7 +727,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ ieee80211_recalc_ps(local);
+
+ if (cancel_scan)
+- flush_delayed_work(&local->scan_work);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+
+ if (local->open_count == 0) {
+ ieee80211_stop_device(local);
+@@ -2294,6 +2330,20 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
+ list_for_each_entry_safe(sdata, tmp, &unreg_list, list) {
+ bool netdev = sdata->dev;
+
++ /*
++ * Remove IP addresses explicitly, since the notifier will
++ * skip the callbacks if wdev->registered is false, since
++ * we can't acquire the wiphy_lock() again there if already
++ * inside this locked section.
++ */
++ sdata_lock(sdata);
++ sdata->vif.cfg.arp_addr_cnt = 0;
++ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
++ sdata->u.mgd.associated)
++ ieee80211_vif_cfg_change_notify(sdata,
++ BSS_CHANGED_ARP_FILTER);
++ sdata_unlock(sdata);
++
+ list_del(&sdata->list);
+ cfg80211_unregister_wdev(&sdata->wdev);
+
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index 6148208b320e3b..16cbaea93fc32d 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -195,7 +195,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
+
+ memset(to_free, 0, sizeof(links));
+
+- if (old_links == new_links)
++ if (old_links == new_links && dormant_links == sdata->vif.dormant_links)
+ return 0;
+
+ /* if there were no old links, need to clear the pointers to deflink */
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 24315d7b31263e..71d60f57a886ce 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -215,6 +215,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+
+ might_sleep();
+
++ WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif));
++
+ if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ return;
+
+@@ -247,7 +249,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) {
+ u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS;
+
+- /* FIXME: should be for each link */
+ trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf,
+ changed);
+ if (local->ops->link_info_changed)
+@@ -301,9 +302,9 @@ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
+ BSS_CHANGED_ERP_SLOT;
+ }
+
+-static void ieee80211_tasklet_handler(struct tasklet_struct *t)
++/* context: requires softirqs disabled */
++void ieee80211_handle_queued_frames(struct ieee80211_local *local)
+ {
+- struct ieee80211_local *local = from_tasklet(local, t, tasklet);
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&local->skb_queue)) ||
+@@ -328,6 +329,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+ }
+ }
+
++static void ieee80211_tasklet_handler(struct tasklet_struct *t)
++{
++ struct ieee80211_local *local = from_tasklet(local, t, tasklet);
++
++ ieee80211_handle_queued_frames(local);
++}
++
+ static void ieee80211_restart_work(struct work_struct *work)
+ {
+ struct ieee80211_local *local =
+@@ -335,10 +343,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ struct ieee80211_sub_if_data *sdata;
+ int ret;
+
+- /* wait for scan work complete */
+ flush_workqueue(local->workqueue);
+- flush_work(&local->sched_scan_stopped_work);
+- flush_work(&local->radar_detected_work);
+
+ rtnl_lock();
+ /* we might do interface manipulations, so need both */
+@@ -379,8 +384,8 @@ static void ieee80211_restart_work(struct work_struct *work)
+ ieee80211_scan_cancel(local);
+
+ /* make sure any new ROC will consider local->in_reconfig */
+- flush_delayed_work(&local->roc_work);
+- flush_work(&local->hw_roc_done);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work);
++ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
+
+ /* wait for all packet processing to be done */
+ synchronize_net();
+@@ -439,7 +444,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
+ if (!wdev)
+ return NOTIFY_DONE;
+
+- if (wdev->wiphy != local->hw.wiphy)
++ if (wdev->wiphy != local->hw.wiphy || !wdev->registered)
+ return NOTIFY_DONE;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
+@@ -454,6 +459,25 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
+ return NOTIFY_DONE;
+
+ ifmgd = &sdata->u.mgd;
++
++ /*
++ * The nested here is needed to convince lockdep that this is
++ * all OK. Yes, we lock the wiphy mutex here while we already
++ * hold the notifier rwsem, that's the normal case. And yes,
++ * we also acquire the notifier rwsem again when unregistering
++ * a netdev while we already hold the wiphy mutex, so it does
++ * look like a typical ABBA deadlock.
++ *
++ * However, both of these things happen with the RTNL held
++ * already. Therefore, they can't actually happen, since the
++ * lock orders really are ABC and ACB, which is fine due to
++ * the RTNL (A).
++ *
++ * We still need to prevent recursion, which is accomplished
++ * by the !wdev->registered check above.
++ */
++ mutex_lock_nested(&local->hw.wiphy->mtx, 1);
++ __acquire(&local->hw.wiphy->mtx);
+ sdata_lock(sdata);
+
+ /* Copy the addresses to the vif config list */
+@@ -472,6 +496,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
+ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER);
+
+ sdata_unlock(sdata);
++ wiphy_unlock(local->hw.wiphy);
+
+ return NOTIFY_OK;
+ }
+@@ -809,12 +834,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ INIT_LIST_HEAD(&local->chanctx_list);
+ mutex_init(&local->chanctx_mtx);
+
+- INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
++ wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work);
+
+ INIT_WORK(&local->restart_work, ieee80211_restart_work);
+
+- INIT_WORK(&local->radar_detected_work,
+- ieee80211_dfs_radar_detected_work);
++ wiphy_work_init(&local->radar_detected_work,
++ ieee80211_dfs_radar_detected_work);
+
+ INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+ local->smps_mode = IEEE80211_SMPS_OFF;
+@@ -825,8 +850,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ ieee80211_dynamic_ps_disable_work);
+ timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
+
+- INIT_WORK(&local->sched_scan_stopped_work,
+- ieee80211_sched_scan_stopped_work);
++ wiphy_work_init(&local->sched_scan_stopped_work,
++ ieee80211_sched_scan_stopped_work);
+
+ spin_lock_init(&local->ack_status_lock);
+ idr_init(&local->ack_status_frames);
+@@ -1482,13 +1507,15 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
+ */
+ ieee80211_remove_interfaces(local);
+
++ wiphy_lock(local->hw.wiphy);
++ wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work);
++ wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work);
++ wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work);
++ wiphy_unlock(local->hw.wiphy);
+ rtnl_unlock();
+
+- cancel_delayed_work_sync(&local->roc_work);
+ cancel_work_sync(&local->restart_work);
+ cancel_work_sync(&local->reconfig_filter);
+- flush_work(&local->sched_scan_stopped_work);
+- flush_work(&local->radar_detected_work);
+
+ ieee80211_clear_tx_pending(local);
+ rate_control_deinitialize(local);
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index e31c312c124a1a..25223184d6e5b0 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -765,6 +765,9 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u32 ctrl_flags)
+ {
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
++ struct ieee80211_mesh_fast_tx_key key = {
++ .type = MESH_FAST_TX_TYPE_LOCAL
++ };
+ struct ieee80211_mesh_fast_tx *entry;
+ struct ieee80211s_hdr *meshhdr;
+ u8 sa[ETH_ALEN] __aligned(2);
+@@ -800,7 +803,10 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
+ return false;
+ }
+
+- entry = mesh_fast_tx_get(sdata, skb->data);
++ ether_addr_copy(key.addr, skb->data);
++ if (!ether_addr_equal(skb->data + ETH_ALEN, sdata->vif.addr))
++ key.type = MESH_FAST_TX_TYPE_PROXIED;
++ entry = mesh_fast_tx_get(sdata, &key);
+ if (!entry)
+ return false;
+
+@@ -1786,6 +1792,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+ ifmsh->last_preq = jiffies;
+ ifmsh->next_perr = jiffies;
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
++ ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+ /* Allocate all mesh structures when creating the first mesh interface. */
+ if (!mesh_allocated)
+ ieee80211s_init();
+diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
+index ad8469293d7125..58c619874ca6af 100644
+--- a/net/mac80211/mesh.h
++++ b/net/mac80211/mesh.h
+@@ -133,10 +133,39 @@ struct mesh_path {
+ #define MESH_FAST_TX_CACHE_THRESHOLD_SIZE 384
+ #define MESH_FAST_TX_CACHE_TIMEOUT 8000 /* msecs */
+
++/**
++ * enum ieee80211_mesh_fast_tx_type - cached mesh fast tx entry type
++ *
++ * @MESH_FAST_TX_TYPE_LOCAL: tx from the local vif address as SA
++ * @MESH_FAST_TX_TYPE_PROXIED: local tx with a different SA (e.g. bridged)
++ * @MESH_FAST_TX_TYPE_FORWARDED: forwarded from a different mesh point
++ * @NUM_MESH_FAST_TX_TYPE: number of entry types
++ */
++enum ieee80211_mesh_fast_tx_type {
++ MESH_FAST_TX_TYPE_LOCAL,
++ MESH_FAST_TX_TYPE_PROXIED,
++ MESH_FAST_TX_TYPE_FORWARDED,
++
++ /* must be last */
++ NUM_MESH_FAST_TX_TYPE
++};
++
++
++/**
++ * struct ieee80211_mesh_fast_tx_key - cached mesh fast tx entry key
++ *
++ * @addr: The Ethernet DA for this entry
++ * @type: cache entry type
++ */
++struct ieee80211_mesh_fast_tx_key {
++ u8 addr[ETH_ALEN] __aligned(2);
++ u16 type;
++};
++
+ /**
+ * struct ieee80211_mesh_fast_tx - cached mesh fast tx entry
+ * @rhash: rhashtable pointer
+- * @addr_key: The Ethernet DA which is the key for this entry
++ * @key: the lookup key for this cache entry
+ * @fast_tx: base fast_tx data
+ * @hdr: cached mesh and rfc1042 headers
+ * @hdrlen: length of mesh + rfc1042
+@@ -147,7 +176,7 @@ struct mesh_path {
+ */
+ struct ieee80211_mesh_fast_tx {
+ struct rhash_head rhash;
+- u8 addr_key[ETH_ALEN] __aligned(2);
++ struct ieee80211_mesh_fast_tx_key key;
+
+ struct ieee80211_fast_tx fast_tx;
+ u8 hdr[sizeof(struct ieee80211s_hdr) + sizeof(rfc1042_header)];
+@@ -333,7 +362,8 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
+
+ bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
+ struct ieee80211_mesh_fast_tx *
+-mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr);
++mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
++ struct ieee80211_mesh_fast_tx_key *key);
+ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u32 ctrl_flags);
+ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index d32e304eeb4ba4..530581ba812b43 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -36,8 +36,8 @@ static const struct rhashtable_params mesh_rht_params = {
+ static const struct rhashtable_params fast_tx_rht_params = {
+ .nelem_hint = 10,
+ .automatic_shrinking = true,
+- .key_len = ETH_ALEN,
+- .key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key),
++ .key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key),
++ .key_offset = offsetof(struct ieee80211_mesh_fast_tx, key),
+ .head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
+ .hashfn = mesh_table_hash,
+ };
+@@ -426,20 +426,21 @@ static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
+ }
+
+ struct ieee80211_mesh_fast_tx *
+-mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr)
++mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
++ struct ieee80211_mesh_fast_tx_key *key)
+ {
+ struct ieee80211_mesh_fast_tx *entry;
+ struct mesh_tx_cache *cache;
+
+ cache = &sdata->u.mesh.tx_cache;
+- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
++ entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
+ if (!entry)
+ return NULL;
+
+ if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
+ mpath_expired(entry->mpath)) {
+ spin_lock_bh(&cache->walk_lock);
+- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
++ entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
+ if (entry)
+ mesh_fast_tx_entry_free(cache, entry);
+ spin_unlock_bh(&cache->walk_lock);
+@@ -484,18 +485,24 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
+ if (!sta)
+ return;
+
++ build.key.type = MESH_FAST_TX_TYPE_LOCAL;
+ if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
+ /* This is required to keep the mppath alive */
+ mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
+ if (!mppath)
+ return;
+ build.mppath = mppath;
++ if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr))
++ build.key.type = MESH_FAST_TX_TYPE_PROXIED;
+ } else if (ieee80211_has_a4(hdr->frame_control)) {
+ mppath = mpath;
+ } else {
+ return;
+ }
+
++ if (!ether_addr_equal(hdr->addr4, sdata->vif.addr))
++ build.key.type = MESH_FAST_TX_TYPE_FORWARDED;
++
+ /* rate limit, in case fast xmit can't be enabled */
+ if (mppath->fast_tx_check == jiffies)
+ return;
+@@ -542,7 +549,7 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
+ }
+ }
+
+- memcpy(build.addr_key, mppath->dst, ETH_ALEN);
++ memcpy(build.key.addr, mppath->dst, ETH_ALEN);
+ build.timestamp = jiffies;
+ build.fast_tx.band = info->band;
+ build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
+@@ -595,11 +602,10 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
+ void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
+ {
+ unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
+- struct mesh_tx_cache *cache;
++ struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
+ struct ieee80211_mesh_fast_tx *entry;
+ struct hlist_node *n;
+
+- cache = &sdata->u.mesh.tx_cache;
+ if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
+ return;
+
+@@ -617,7 +623,6 @@ void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
+ struct ieee80211_mesh_fast_tx *entry;
+ struct hlist_node *n;
+
+- cache = &sdata->u.mesh.tx_cache;
+ spin_lock_bh(&cache->walk_lock);
+ hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
+ if (entry->mpath == mpath)
+@@ -632,7 +637,6 @@ void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mesh_fast_tx *entry;
+ struct hlist_node *n;
+
+- cache = &sdata->u.mesh.tx_cache;
+ spin_lock_bh(&cache->walk_lock);
+ hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
+ if (rcu_access_pointer(entry->mpath->next_hop) == sta)
+@@ -644,13 +648,18 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+ {
+ struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
++ struct ieee80211_mesh_fast_tx_key key = {};
+ struct ieee80211_mesh_fast_tx *entry;
++ int i;
+
+- cache = &sdata->u.mesh.tx_cache;
++ ether_addr_copy(key.addr, addr);
+ spin_lock_bh(&cache->walk_lock);
+- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
+- if (entry)
+- mesh_fast_tx_entry_free(cache, entry);
++ for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) {
++ key.type = i;
++ entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params);
++ if (entry)
++ mesh_fast_tx_entry_free(cache, entry);
++ }
+ spin_unlock_bh(&cache->walk_lock);
+ }
+
+@@ -1002,10 +1011,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
+ */
+ void mesh_path_flush_pending(struct mesh_path *mpath)
+ {
++ struct ieee80211_sub_if_data *sdata = mpath->sdata;
++ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
++ struct mesh_preq_queue *preq, *tmp;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
+ mesh_path_discard_frame(mpath->sdata, skb);
++
++ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
++ list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
++ if (ether_addr_equal(mpath->dst, preq->dst)) {
++ list_del(&preq->list);
++ kfree(preq);
++ --ifmsh->preq_queue_len;
++ }
++ }
++ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+ }
+
+ /**
+diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
+index a1e526419e9d25..cc62c2a01f54f8 100644
+--- a/net/mac80211/mesh_plink.c
++++ b/net/mac80211/mesh_plink.c
+@@ -1064,8 +1064,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
+ case WLAN_SP_MESH_PEERING_OPEN:
+ if (!matches_local)
+ event = OPN_RJCT;
+- if (!mesh_plink_free_count(sdata) ||
+- (sta->mesh->plid && sta->mesh->plid != plid))
++ else if (!mesh_plink_free_count(sdata) ||
++ (sta->mesh->plid && sta->mesh->plid != plid))
+ event = OPN_IGNR;
+ else
+ event = OPN_ACPT;
+@@ -1073,9 +1073,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ if (!matches_local)
+ event = CNF_RJCT;
+- if (!mesh_plink_free_count(sdata) ||
+- sta->mesh->llid != llid ||
+- (sta->mesh->plid && sta->mesh->plid != plid))
++ else if (!mesh_plink_free_count(sdata) ||
++ sta->mesh->llid != llid ||
++ (sta->mesh->plid && sta->mesh->plid != plid))
+ event = CNF_IGNR;
+ else
+ event = CNF_ACPT;
+@@ -1243,6 +1243,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
+ return;
+ }
+ elems = ieee802_11_parse_elems(baseaddr, len - baselen, true, NULL);
+- mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
+- kfree(elems);
++ if (elems) {
++ mesh_process_plink_frame(sdata, mgmt, elems, rx_status);
++ kfree(elems);
++ }
+ }
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 0c9198997482bc..b14c809bcdea33 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -732,7 +732,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
+ bool disable_mu_mimo = false;
+ struct ieee80211_sub_if_data *other;
+
+- list_for_each_entry_rcu(other, &local->interfaces, list) {
++ list_for_each_entry(other, &local->interfaces, list) {
+ if (other->vif.bss_conf.mu_mimo_owner) {
+ disable_mu_mimo = true;
+ break;
+@@ -5805,7 +5805,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
+ {
+ const struct ieee80211_multi_link_elem *ml;
+ const struct element *sub;
+- size_t ml_len;
++ ssize_t ml_len;
+ unsigned long removed_links = 0;
+ u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ u8 link_id;
+@@ -5821,6 +5821,8 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
+ elems->scratch + elems->scratch_len -
+ elems->scratch_pos,
+ WLAN_EID_FRAGMENT);
++ if (ml_len < 0)
++ return;
+
+ elems->ml_reconf = (const void *)elems->scratch_pos;
+ elems->ml_reconf_len = ml_len;
+@@ -5857,7 +5859,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
+ */
+ if (control &
+ IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
+- link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos);
++ link_removal_timeout[link_id] = get_unaligned_le16(pos);
+ }
+
+ removed_links &= sdata->vif.valid_links;
+@@ -5882,8 +5884,11 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
+ continue;
+ }
+
+- link_delay = link_conf->beacon_int *
+- link_removal_timeout[link_id];
++ if (link_removal_timeout[link_id] < 1)
++ link_delay = 0;
++ else
++ link_delay = link_conf->beacon_int *
++ (link_removal_timeout[link_id] - 1);
+
+ if (!delay)
+ delay = link_delay;
+@@ -5974,7 +5979,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
+ link->u.mgd.dtim_period = elems->dtim_period;
+ link->u.mgd.have_beacon = true;
+ ifmgd->assoc_data->need_beacon = false;
+- if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
++ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
++ !ieee80211_is_s1g_beacon(hdr->frame_control)) {
+ link->conf->sync_tsf =
+ le64_to_cpu(mgmt->u.beacon.timestamp);
+ link->conf->sync_device_ts =
+@@ -7075,7 +7081,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
+ sdata_info(sdata,
+ "failed to insert STA entry for the AP (error %d)\n",
+ err);
+- goto out_err;
++ goto out_release_chan;
+ }
+ } else
+ WARN_ON_ONCE(!ether_addr_equal(link->u.mgd.bssid, cbss->bssid));
+@@ -7086,8 +7092,9 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
+
+ return 0;
+
++out_release_chan:
++ ieee80211_link_release_channel(link);
+ out_err:
+- ieee80211_link_release_channel(&sdata->deflink);
+ ieee80211_vif_set_links(sdata, 0, 0);
+ return err;
+ }
+@@ -7725,8 +7732,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+
+ rcu_read_lock();
+ beacon_ies = rcu_dereference(req->bss->beacon_ies);
+-
+- if (beacon_ies) {
++ if (!beacon_ies) {
+ /*
+ * Wait up to one beacon interval ...
+ * should this be more if we miss one?
+@@ -7799,6 +7805,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ ieee80211_report_disconnect(sdata, frame_buf,
+ sizeof(frame_buf), true,
+ req->reason_code, false);
++ drv_mgd_complete_tx(sdata->local, sdata, &info);
+ return 0;
+ }
+
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index cdf991e74ab990..2517a5521a5780 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -230,7 +230,7 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
+ if (dur == LONG_MAX)
+ return false;
+
+- mod_delayed_work(local->workqueue, &local->roc_work, dur);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur);
+ return true;
+ }
+
+@@ -258,7 +258,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
+ roc->notified = true;
+ }
+
+-static void ieee80211_hw_roc_start(struct work_struct *work)
++static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_start);
+@@ -285,7 +285,7 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
+
+ trace_api_ready_on_channel(local);
+
+- ieee80211_queue_work(hw, &local->hw_roc_start);
++ wiphy_work_queue(hw->wiphy, &local->hw_roc_start);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
+
+@@ -338,7 +338,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ tmp->started = true;
+ tmp->abort = true;
+ }
+- ieee80211_queue_work(&local->hw, &local->hw_roc_done);
++ wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done);
+ return;
+ }
+
+@@ -368,8 +368,8 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
+ ieee80211_hw_config(local, 0);
+ }
+
+- ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+- msecs_to_jiffies(min_dur));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++ msecs_to_jiffies(min_dur));
+
+ /* tell userspace or send frame(s) */
+ list_for_each_entry(tmp, &local->roc_list, list) {
+@@ -407,8 +407,8 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
+ _ieee80211_start_next_roc(local);
+ } else {
+ /* delay it a bit */
+- ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
+- round_jiffies_relative(HZ/2));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
++ round_jiffies_relative(HZ / 2));
+ }
+ }
+
+@@ -451,7 +451,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
+ }
+ }
+
+-static void ieee80211_roc_work(struct work_struct *work)
++static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, roc_work.work);
+@@ -461,7 +461,7 @@ static void ieee80211_roc_work(struct work_struct *work)
+ mutex_unlock(&local->mtx);
+ }
+
+-static void ieee80211_hw_roc_done(struct work_struct *work)
++static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_done);
+@@ -482,7 +482,7 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
+
+ trace_api_remain_on_channel_expired(local);
+
+- ieee80211_queue_work(hw, &local->hw_roc_done);
++ wiphy_work_queue(hw->wiphy, &local->hw_roc_done);
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
+
+@@ -586,8 +586,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ /* if not HW assist, just queue & schedule work */
+ if (!local->ops->remain_on_channel) {
+ list_add_tail(&roc->list, &local->roc_list);
+- ieee80211_queue_delayed_work(&local->hw,
+- &local->roc_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy,
++ &local->roc_work, 0);
+ } else {
+ /* otherwise actually kick it off here
+ * (for error handling)
+@@ -695,7 +695,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ if (!cookie)
+ return -ENOENT;
+
+- flush_work(&local->hw_roc_start);
++ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
+
+ mutex_lock(&local->mtx);
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+@@ -745,7 +745,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ } else {
+ /* go through work struct to return to the operating channel */
+ found->abort = true;
+- mod_delayed_work(local->workqueue, &local->roc_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0);
+ }
+
+ out_unlock:
+@@ -940,6 +940,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ }
+
+ IEEE80211_SKB_CB(skb)->flags = flags;
++ IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
+
+ skb->dev = sdata->dev;
+
+@@ -994,9 +995,9 @@ int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+
+ void ieee80211_roc_setup(struct ieee80211_local *local)
+ {
+- INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
+- INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+- INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
++ wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start);
++ wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done);
++ wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work);
+ INIT_LIST_HEAD(&local->roc_list);
+ }
+
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index d5ea5f5bcf3a06..78e7ac6c0af0b0 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -37,7 +37,7 @@ void rate_control_rate_init(struct sta_info *sta)
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+- ieee80211_sta_set_rx_nss(&sta->deflink);
++ ieee80211_sta_init_nss(&sta->deflink);
+
+ if (!ref)
+ return;
+@@ -119,7 +119,8 @@ void rate_control_rate_update(struct ieee80211_local *local,
+ rcu_read_unlock();
+ }
+
+- drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
++ if (sta->uploaded)
++ drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+ }
+
+ int ieee80211_rate_control_register(const struct rate_control_ops *ops)
+@@ -876,6 +877,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_supported_band *sband;
++ u32 mask = ~0;
+
+ rate_control_fill_sta_table(sta, info, dest, max_rates);
+
+@@ -888,9 +890,12 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
+ if (ieee80211_is_tx_data(skb))
+ rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
+
++ if (!(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
++ mask = sdata->rc_rateidx_mask[info->band];
++
+ if (dest[0].idx < 0)
+ __rate_control_send_low(&sdata->local->hw, sband, sta, info,
+- sdata->rc_rateidx_mask[info->band]);
++ mask);
+
+ if (sta)
+ rate_fixup_ratelist(vif, sband, info, dest, max_rates);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 8f6b6f56b65b43..604863cebc198a 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2112,7 +2112,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
+ /* either the frame has been decrypted or will be dropped */
+ status->flag |= RX_FLAG_DECRYPTED;
+
+- if (unlikely(ieee80211_is_beacon(fc) && (result & RX_DROP_UNUSABLE) &&
++ if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) &&
+ rx->sdata->dev))
+ cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
+ skb->data, skb->len);
+@@ -2726,7 +2726,10 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, int hdrlen)
+ {
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+- struct ieee80211_mesh_fast_tx *entry = NULL;
++ struct ieee80211_mesh_fast_tx_key key = {
++ .type = MESH_FAST_TX_TYPE_FORWARDED
++ };
++ struct ieee80211_mesh_fast_tx *entry;
+ struct ieee80211s_hdr *mesh_hdr;
+ struct tid_ampdu_tx *tid_tx;
+ struct sta_info *sta;
+@@ -2735,9 +2738,13 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
+
+ mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth));
+ if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
+- entry = mesh_fast_tx_get(sdata, mesh_hdr->eaddr1);
++ ether_addr_copy(key.addr, mesh_hdr->eaddr1);
+ else if (!(mesh_hdr->flags & MESH_FLAGS_AE))
+- entry = mesh_fast_tx_get(sdata, skb->data);
++ ether_addr_copy(key.addr, skb->data);
++ else
++ return false;
++
++ entry = mesh_fast_tx_get(sdata, &key);
+ if (!entry)
+ return false;
+
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 0805aa8603c61c..d4a032f3457732 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -9,7 +9,7 @@
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2013-2015 Intel Mobile Communications GmbH
+ * Copyright 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2023 Intel Corporation
++ * Copyright (C) 2018-2024 Intel Corporation
+ */
+
+ #include <linux/if_arp.h>
+@@ -222,14 +222,18 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
+ }
+
+ static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
++ struct ieee80211_channel *channel,
+ u32 scan_flags, const u8 *da)
+ {
+ if (!sdata)
+ return false;
+- /* accept broadcast for OCE */
+- if (scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP &&
+- is_broadcast_ether_addr(da))
++
++ /* accept broadcast on 6 GHz and for OCE */
++ if (is_broadcast_ether_addr(da) &&
++ (channel->band == NL80211_BAND_6GHZ ||
++ scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP))
+ return true;
++
+ if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ return true;
+ return ether_addr_equal(da, sdata->vif.addr);
+@@ -274,10 +278,16 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ * the beacon/proberesp rx gives us an opportunity to upgrade
+ * to active scan
+ */
+- set_bit(SCAN_BEACON_DONE, &local->scanning);
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++ set_bit(SCAN_BEACON_DONE, &local->scanning);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+
++ channel = ieee80211_get_channel_khz(local->hw.wiphy,
++ ieee80211_rx_status_to_khz(rx_status));
++
++ if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
++ return;
++
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ struct cfg80211_scan_request *scan_req;
+ struct cfg80211_sched_scan_request *sched_scan_req;
+@@ -295,19 +305,15 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ /* ignore ProbeResp to foreign address or non-bcast (OCE)
+ * unless scanning with randomised address
+ */
+- if (!ieee80211_scan_accept_presp(sdata1, scan_req_flags,
++ if (!ieee80211_scan_accept_presp(sdata1, channel,
++ scan_req_flags,
+ mgmt->da) &&
+- !ieee80211_scan_accept_presp(sdata2, sched_scan_req_flags,
++ !ieee80211_scan_accept_presp(sdata2, channel,
++ sched_scan_req_flags,
+ mgmt->da))
+ return;
+ }
+
+- channel = ieee80211_get_channel_khz(local->hw.wiphy,
+- ieee80211_rx_status_to_khz(rx_status));
+-
+- if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
+- return;
+-
+ bss = ieee80211_bss_info_update(local, rx_status,
+ mgmt, skb->len,
+ channel);
+@@ -340,7 +346,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
+ struct cfg80211_scan_request *req;
+ struct cfg80211_chan_def chandef;
+ u8 bands_used = 0;
+- int i, ielen, n_chans;
++ int i, ielen;
++ u32 *n_chans;
+ u32 flags = 0;
+
+ req = rcu_dereference_protected(local->scan_req,
+@@ -350,34 +357,34 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
+ return false;
+
+ if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
++ local->hw_scan_req->req.n_channels = req->n_channels;
++
+ for (i = 0; i < req->n_channels; i++) {
+ local->hw_scan_req->req.channels[i] = req->channels[i];
+ bands_used |= BIT(req->channels[i]->band);
+ }
+-
+- n_chans = req->n_channels;
+ } else {
+ do {
+ if (local->hw_scan_band == NUM_NL80211_BANDS)
+ return false;
+
+- n_chans = 0;
++ n_chans = &local->hw_scan_req->req.n_channels;
++ *n_chans = 0;
+
+ for (i = 0; i < req->n_channels; i++) {
+ if (req->channels[i]->band !=
+ local->hw_scan_band)
+ continue;
+- local->hw_scan_req->req.channels[n_chans] =
++ local->hw_scan_req->req.channels[(*n_chans)++] =
+ req->channels[i];
+- n_chans++;
++
+ bands_used |= BIT(req->channels[i]->band);
+ }
+
+ local->hw_scan_band++;
+- } while (!n_chans);
++ } while (!*n_chans);
+ }
+
+- local->hw_scan_req->req.n_channels = n_chans;
+ ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+
+ if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+@@ -483,7 +490,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+ * the scan was in progress; if there was none this will
+ * just be a no-op for the particular interface.
+ */
+- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (ieee80211_sdata_running(sdata))
+ wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
+ }
+@@ -505,7 +512,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw,
+
+ memcpy(&local->scan_info, info, sizeof(*info));
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+ EXPORT_SYMBOL(ieee80211_scan_completed);
+
+@@ -545,8 +552,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
+ /* We need to set power level at maximum rate for scanning. */
+ ieee80211_hw_config(local, 0);
+
+- ieee80211_queue_delayed_work(&local->hw,
+- &local->scan_work, 0);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+
+ return 0;
+ }
+@@ -603,8 +609,8 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
+ lockdep_is_held(&local->mtx))))
+ return;
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+- round_jiffies_relative(0));
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ round_jiffies_relative(0));
+ }
+
+ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+@@ -631,6 +637,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+ cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
+ }
+ IEEE80211_SKB_CB(skb)->flags |= tx_flags;
++ IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
+ ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
+ }
+ }
+@@ -716,15 +723,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ local->hw_scan_ies_bufsize *= n_bands;
+ }
+
+- local->hw_scan_req = kmalloc(
+- sizeof(*local->hw_scan_req) +
+- req->n_channels * sizeof(req->channels[0]) +
+- local->hw_scan_ies_bufsize, GFP_KERNEL);
++ local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req,
++ req.channels,
++ req->n_channels) +
++ local->hw_scan_ies_bufsize,
++ GFP_KERNEL);
+ if (!local->hw_scan_req)
+ return -ENOMEM;
+
+ local->hw_scan_req->req.ssids = req->ssids;
+ local->hw_scan_req->req.n_ssids = req->n_ssids;
++ /* None of the channels are actually set
++ * up but let UBSAN know the boundaries.
++ */
++ local->hw_scan_req->req.n_channels = req->n_channels;
++
+ ies = (u8 *)local->hw_scan_req +
+ sizeof(*local->hw_scan_req) +
+ req->n_channels * sizeof(req->channels[0]);
+@@ -795,8 +808,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ }
+
+ /* Now, just wait a bit and we are all done! */
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+- next_delay);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ next_delay);
+ return 0;
+ } else {
+ /* Do normal software scan */
+@@ -1043,7 +1056,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+ local->next_scan_state = SCAN_SET_CHANNEL;
+ }
+
+-void ieee80211_scan_work(struct work_struct *work)
++void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, scan_work.work);
+@@ -1137,7 +1150,8 @@ void ieee80211_scan_work(struct work_struct *work)
+ }
+ } while (next_delay == 0);
+
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
++ next_delay);
+ goto out;
+
+ out_complete:
+@@ -1280,12 +1294,7 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
+ goto out;
+ }
+
+- /*
+- * If the work is currently running, it must be blocked on
+- * the mutex, but we'll set scan_sdata = NULL and it'll
+- * simply exit once it acquires the mutex.
+- */
+- cancel_delayed_work(&local->scan_work);
++ wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work);
+ /* and clean up */
+ memset(&local->scan_info, 0, sizeof(local->scan_info));
+ __ieee80211_scan_completed(&local->hw, true);
+@@ -1427,10 +1436,11 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local)
+
+ mutex_unlock(&local->mtx);
+
+- cfg80211_sched_scan_stopped(local->hw.wiphy, 0);
++ cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
+ }
+
+-void ieee80211_sched_scan_stopped_work(struct work_struct *work)
++void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
++ struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local,
+@@ -1453,6 +1463,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
+ if (local->in_reconfig)
+ return;
+
+- schedule_work(&local->sched_scan_stopped_work);
++ wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work);
+ }
+ EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 7751f8ba960eef..5d71e8d084c459 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -398,7 +398,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
+- if (!(sta->sta.valid_links & BIT(i)))
++ struct link_sta_info *link_sta;
++
++ link_sta = rcu_access_pointer(sta->link[i]);
++ if (!link_sta)
+ continue;
+
+ sta_remove_link(sta, i, false);
+@@ -911,6 +914,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_accept_plinks_update(sdata);
+
++ ieee80211_check_fast_xmit(sta);
++
+ return 0;
+ out_remove:
+ if (sta->sta.valid_links)
+@@ -1279,6 +1284,8 @@ static int _sta_info_move_state(struct sta_info *sta,
+ enum ieee80211_sta_state new_state,
+ bool recalc)
+ {
++ struct ieee80211_local *local = sta->local;
++
+ might_sleep();
+
+ if (sta->sta_state == new_state)
+@@ -1354,6 +1361,24 @@ static int _sta_info_move_state(struct sta_info *sta,
+ } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+ ieee80211_vif_dec_num_mcast(sta->sdata);
+ clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
++
++ /*
++ * If we have encryption offload, flush (station) queues
++ * (after ensuring concurrent TX completed) so we won't
++ * transmit anything later unencrypted if/when keys are
++ * also removed, which might otherwise happen depending
++ * on how the hardware offload works.
++ */
++ if (local->ops->set_key) {
++ synchronize_net();
++ if (local->ops->flush_sta)
++ drv_flush_sta(local, sta->sdata, sta);
++ else
++ ieee80211_flush_queues(local,
++ sta->sdata,
++ false);
++ }
++
+ ieee80211_clear_fast_xmit(sta);
+ ieee80211_clear_fast_rx(sta);
+ }
+@@ -1397,6 +1422,20 @@ static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc)
+ * after _part1 and before _part2!
+ */
+
++ /*
++ * There's a potential race in _part1 where we set WLAN_STA_BLOCK_BA
++ * but someone might have just gotten past a check, and not yet into
++ * queuing the work/creating the data/etc.
++ *
++ * Do another round of destruction so that the worker is certainly
++ * canceled before we later free the station.
++ *
++ * Since this is after synchronize_rcu()/synchronize_net() we're now
++ * certain that nobody can actually hold a reference to the STA and
++ * be calling e.g. ieee80211_start_tx_ba_session().
++ */
++ ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
++
+ might_sleep();
+ lockdep_assert_held(&local->sta_mtx);
+
+@@ -1405,18 +1444,6 @@ static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc)
+ WARN_ON_ONCE(ret);
+ }
+
+- /* Flush queues before removing keys, as that might remove them
+- * from hardware, and then depending on the offload method, any
+- * frames sitting on hardware queues might be sent out without
+- * any encryption at all.
+- */
+- if (local->ops->set_key) {
+- if (local->ops->flush_sta)
+- drv_flush_sta(local, sta->sdata, sta);
+- else
+- ieee80211_flush_queues(local, sta->sdata, false);
+- }
+-
+ /* now keys can no longer be reached */
+ ieee80211_free_sta_keys(local, sta);
+
+@@ -1704,7 +1731,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ skb_queue_head_init(&pending);
+
+ /* sync with ieee80211_tx_h_unicast_ps_buf */
+- spin_lock(&sta->ps_lock);
++ spin_lock_bh(&sta->ps_lock);
+ /* Send all buffered frames to the station */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int count = skb_queue_len(&pending), tmp;
+@@ -1733,7 +1760,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ */
+ clear_sta_flag(sta, WLAN_STA_PSPOLL);
+ clear_sta_flag(sta, WLAN_STA_UAPSD);
+- spin_unlock(&sta->ps_lock);
++ spin_unlock_bh(&sta->ps_lock);
+
+ atomic_dec(&ps->num_sta_ps);
+
+@@ -2990,7 +3017,7 @@ void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
+ WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1;
+
+ if (val)
+- sta->sta.max_amsdu_subframes = 4 << val;
++ sta->sta.max_amsdu_subframes = 4 << (4 - val);
+ }
+
+ #ifdef CONFIG_LOCKDEP
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 195b563132d6c5..f4af851f45cebe 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -3,7 +3,7 @@
+ * Copyright 2002-2005, Devicescape Software, Inc.
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+- * Copyright(c) 2020-2022 Intel Corporation
++ * Copyright(c) 2020-2024 Intel Corporation
+ */
+
+ #ifndef STA_INFO_H
+@@ -485,6 +485,8 @@ struct ieee80211_fragment_cache {
+ * same for non-MLD STA. This is used as key for searching link STA
+ * @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD
+ * and set to the corresponding vif LinkId for MLD STA
++ * @op_mode_nss: NSS limit as set by operating mode notification, or 0
++ * @capa_nss: NSS limit as determined by local and peer capabilities
+ * @link_hash_node: hash node for rhashtable
+ * @sta: Points to the STA info
+ * @gtk: group keys negotiated with this station, if any
+@@ -521,6 +523,8 @@ struct link_sta_info {
+ u8 addr[ETH_ALEN];
+ u8 link_id;
+
++ u8 op_mode_nss, capa_nss;
++
+ struct rhlist_head link_hash_node;
+
+ struct sta_info *sta;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index d45d4be63dd877..45a093d3f1fa7f 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -5,7 +5,7 @@
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+- * Copyright (C) 2018-2022 Intel Corporation
++ * Copyright (C) 2018-2024 Intel Corporation
+ *
+ * Transmit and frame generation functions.
+ */
+@@ -705,11 +705,16 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
+ txrc.bss_conf = &tx->sdata->vif.bss_conf;
+ txrc.skb = tx->skb;
+ txrc.reported_rate.idx = -1;
+- txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
+
+- if (tx->sdata->rc_has_mcs_mask[info->band])
+- txrc.rate_idx_mcs_mask =
+- tx->sdata->rc_rateidx_mcs_mask[info->band];
++ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) {
++ txrc.rate_idx_mask = ~0;
++ } else {
++ txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
++
++ if (tx->sdata->rc_has_mcs_mask[info->band])
++ txrc.rate_idx_mcs_mask =
++ tx->sdata->rc_rateidx_mcs_mask[info->band];
++ }
+
+ txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
+ tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
+@@ -3034,7 +3039,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
+ sdata->vif.type == NL80211_IFTYPE_STATION)
+ goto out;
+
+- if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
++ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
+ goto out;
+
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+@@ -3086,10 +3091,11 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
+ /* DA SA BSSID */
+ build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+ build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
++ rcu_read_lock();
+ link = rcu_dereference(sdata->link[tdls_link_id]);
+- if (WARN_ON_ONCE(!link))
+- break;
+- memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN);
++ if (!WARN_ON_ONCE(!link))
++ memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN);
++ rcu_read_unlock();
+ build.hdr_len = 24;
+ break;
+ }
+@@ -3912,6 +3918,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ goto begin;
+
+ skb = __skb_dequeue(&tx.skbs);
++ info = IEEE80211_SKB_CB(skb);
+
+ if (!skb_queue_empty(&tx.skbs)) {
+ spin_lock_bh(&fq->lock);
+@@ -3956,7 +3963,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ }
+
+ encap_out:
+- IEEE80211_SKB_CB(skb)->control.vif = vif;
++ info->control.vif = vif;
+
+ if (tx.sta &&
+ wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
+@@ -5304,8 +5311,10 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
+ if (beacon->tail)
+ skb_put_data(skb, beacon->tail, beacon->tail_len);
+
+- if (ieee80211_beacon_protect(skb, local, sdata, link) < 0)
++ if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) {
++ dev_kfree_skb(skb);
+ return NULL;
++ }
+
+ ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
+ chanctx_conf, csa_off_base);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 8a6917cf63cf9d..02b5aaad2a155c 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -745,7 +745,9 @@ static void __iterate_interfaces(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata;
+ bool active_only = iter_flags & IEEE80211_IFACE_ITER_ACTIVE;
+
+- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++ list_for_each_entry_rcu(sdata, &local->interfaces, list,
++ lockdep_is_held(&local->iflist_mtx) ||
++ lockdep_is_held(&local->hw.wiphy->mtx)) {
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_MONITOR:
+ if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
+@@ -2313,6 +2315,10 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
+
+ void ieee80211_stop_device(struct ieee80211_local *local)
+ {
++ local_bh_disable();
++ ieee80211_handle_queued_frames(local);
++ local_bh_enable();
++
+ ieee80211_led_radio(local, false);
+ ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
+
+@@ -2340,8 +2346,8 @@ static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
+ */
+ if (aborted)
+ set_bit(SCAN_ABORTED, &local->scanning);
+- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+- flush_delayed_work(&local->scan_work);
++ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
++ wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
+ }
+ }
+
+@@ -4356,7 +4362,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+ mutex_unlock(&local->mtx);
+ }
+
+-void ieee80211_dfs_radar_detected_work(struct work_struct *work)
++void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
++ struct wiphy_work *work)
+ {
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, radar_detected_work);
+@@ -4374,9 +4381,7 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
+ }
+ mutex_unlock(&local->chanctx_mtx);
+
+- wiphy_lock(local->hw.wiphy);
+ ieee80211_dfs_cac_cancel(local);
+- wiphy_unlock(local->hw.wiphy);
+
+ if (num_chanctx > 1)
+ /* XXX: multi-channel is not supported yet */
+@@ -4391,7 +4396,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
+
+ trace_api_radar_detected(local);
+
+- schedule_work(&local->radar_detected_work);
++ wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
+ }
+ EXPORT_SYMBOL(ieee80211_radar_detected);
+
+diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
+index b3a5c3e96a7205..bc13b1419981a9 100644
+--- a/net/mac80211/vht.c
++++ b/net/mac80211/vht.c
+@@ -4,7 +4,7 @@
+ *
+ * Portions of this file
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+- * Copyright (C) 2018 - 2023 Intel Corporation
++ * Copyright (C) 2018 - 2024 Intel Corporation
+ */
+
+ #include <linux/ieee80211.h>
+@@ -541,15 +541,11 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
+ return bw;
+ }
+
+-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
++void ieee80211_sta_init_nss(struct link_sta_info *link_sta)
+ {
+ u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
+ bool support_160;
+
+- /* if we received a notification already don't overwrite it */
+- if (link_sta->pub->rx_nss)
+- return;
+-
+ if (link_sta->pub->eht_cap.has_eht) {
+ int i;
+ const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp;
+@@ -627,7 +623,15 @@ void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
+ rx_nss = max(vht_rx_nss, ht_rx_nss);
+ rx_nss = max(he_rx_nss, rx_nss);
+ rx_nss = max(eht_rx_nss, rx_nss);
+- link_sta->pub->rx_nss = max_t(u8, 1, rx_nss);
++ rx_nss = max_t(u8, 1, rx_nss);
++ link_sta->capa_nss = rx_nss;
++
++ /* that shouldn't be set yet, but we can handle it anyway */
++ if (link_sta->op_mode_nss)
++ link_sta->pub->rx_nss =
++ min_t(u8, rx_nss, link_sta->op_mode_nss);
++ else
++ link_sta->pub->rx_nss = rx_nss;
+ }
+
+ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+@@ -637,7 +641,7 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_sta_rx_bandwidth new_bw;
+ struct sta_opmode_info sta_opmode = {};
+ u32 changed = 0;
+- u8 nss, cur_nss;
++ u8 nss;
+
+ /* ignore - no support for BF yet */
+ if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
+@@ -647,23 +651,17 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
+ nss += 1;
+
+- if (link_sta->pub->rx_nss != nss) {
+- cur_nss = link_sta->pub->rx_nss;
+- /* Reset rx_nss and call ieee80211_sta_set_rx_nss() which
+- * will set the same to max nss value calculated based on capability.
+- */
+- link_sta->pub->rx_nss = 0;
+- ieee80211_sta_set_rx_nss(link_sta);
+- /* Do not allow an nss change to rx_nss greater than max_nss
+- * negotiated and capped to APs capability during association.
+- */
+- if (nss <= link_sta->pub->rx_nss) {
+- link_sta->pub->rx_nss = nss;
+- sta_opmode.rx_nss = nss;
+- changed |= IEEE80211_RC_NSS_CHANGED;
+- sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
++ if (link_sta->op_mode_nss != nss) {
++ if (nss <= link_sta->capa_nss) {
++ link_sta->op_mode_nss = nss;
++
++ if (nss != link_sta->pub->rx_nss) {
++ link_sta->pub->rx_nss = nss;
++ changed |= IEEE80211_RC_NSS_CHANGED;
++ sta_opmode.rx_nss = link_sta->pub->rx_nss;
++ sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
++ }
+ } else {
+- link_sta->pub->rx_nss = cur_nss;
+ pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
+ link_sta->pub->addr, nss);
+ }
+diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
+index 8d2eabc71bbeb0..f13b07ebfb98a6 100644
+--- a/net/mac802154/llsec.c
++++ b/net/mac802154/llsec.c
+@@ -265,19 +265,27 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec,
+ return -ENOMEM;
+ }
+
++static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu)
++{
++ struct ieee802154_llsec_key_entry *pos;
++ struct mac802154_llsec_key *mkey;
++
++ pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu);
++ mkey = container_of(pos->key, struct mac802154_llsec_key, key);
++
++ llsec_key_put(mkey);
++ kfree_sensitive(pos);
++}
++
+ int mac802154_llsec_key_del(struct mac802154_llsec *sec,
+ const struct ieee802154_llsec_key_id *key)
+ {
+ struct ieee802154_llsec_key_entry *pos;
+
+ list_for_each_entry(pos, &sec->table.keys, list) {
+- struct mac802154_llsec_key *mkey;
+-
+- mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+-
+ if (llsec_key_id_equal(&pos->id, key)) {
+ list_del_rcu(&pos->list);
+- llsec_key_put(mkey);
++ call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu);
+ return 0;
+ }
+ }
+diff --git a/net/mac802154/main.c b/net/mac802154/main.c
+index 357ece67432b1a..3054da2aa95803 100644
+--- a/net/mac802154/main.c
++++ b/net/mac802154/main.c
+@@ -159,8 +159,10 @@ void ieee802154_configure_durations(struct wpan_phy *phy,
+ }
+
+ phy->symbol_duration = duration;
+- phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
+- phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
++ phy->lifs_period =
++ (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
++ phy->sifs_period =
++ (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ EXPORT_SYMBOL(ieee802154_configure_durations);
+
+@@ -182,10 +184,10 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
+ * Should be done when all drivers sets this value.
+ */
+
+- wpan_phy->lifs_period =
+- (IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
+- wpan_phy->sifs_period =
+- (IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
++ wpan_phy->lifs_period = (IEEE802154_LIFS_PERIOD *
++ wpan_phy->symbol_duration) / NSEC_PER_USEC;
++ wpan_phy->sifs_period = (IEEE802154_SIFS_PERIOD *
++ wpan_phy->symbol_duration) / NSEC_PER_USEC;
+ }
+
+ int ieee802154_register_hw(struct ieee802154_hw *hw)
+diff --git a/net/mac802154/scan.c b/net/mac802154/scan.c
+index d9658f2c4ae6be..c9f72f271c4ea1 100644
+--- a/net/mac802154/scan.c
++++ b/net/mac802154/scan.c
+@@ -176,6 +176,7 @@ void mac802154_scan_worker(struct work_struct *work)
+ struct ieee802154_local *local =
+ container_of(work, struct ieee802154_local, scan_work.work);
+ struct cfg802154_scan_request *scan_req;
++ enum nl802154_scan_types scan_req_type;
+ struct ieee802154_sub_if_data *sdata;
+ unsigned int scan_duration = 0;
+ struct wpan_phy *wpan_phy;
+@@ -209,6 +210,7 @@ void mac802154_scan_worker(struct work_struct *work)
+ }
+
+ wpan_phy = scan_req->wpan_phy;
++ scan_req_type = scan_req->type;
+ scan_req_duration = scan_req->duration;
+
+ /* Look for the next valid chan */
+@@ -246,7 +248,7 @@ void mac802154_scan_worker(struct work_struct *work)
+ goto end_scan;
+ }
+
+- if (scan_req->type == NL802154_SCAN_ACTIVE) {
++ if (scan_req_type == NL802154_SCAN_ACTIVE) {
+ ret = mac802154_transmit_beacon_req(local, sdata);
+ if (ret)
+ dev_err(&sdata->dev->dev,
+diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
+index 2a6f1ed763c9bd..6fbed5bb5c3e0d 100644
+--- a/net/mac802154/tx.c
++++ b/net/mac802154/tx.c
+@@ -34,8 +34,8 @@ void ieee802154_xmit_sync_worker(struct work_struct *work)
+ if (res)
+ goto err_tx;
+
+- dev->stats.tx_packets++;
+- dev->stats.tx_bytes += skb->len;
++ DEV_STATS_INC(dev, tx_packets);
++ DEV_STATS_ADD(dev, tx_bytes, skb->len);
+
+ ieee802154_xmit_complete(&local->hw, skb, false);
+
+@@ -90,8 +90,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
+ if (ret)
+ goto err_wake_netif_queue;
+
+- dev->stats.tx_packets++;
+- dev->stats.tx_bytes += len;
++ DEV_STATS_INC(dev, tx_packets);
++ DEV_STATS_ADD(dev, tx_bytes, len);
+ } else {
+ local->tx_skb = skb;
+ queue_work(local->workqueue, &local->sync_tx_work);
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index f6be58b68c6f36..28be85d055330b 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -676,10 +676,14 @@ static __init int mctp_init(void)
+ if (rc)
+ goto err_unreg_routes;
+
+- mctp_device_init();
++ rc = mctp_device_init();
++ if (rc)
++ goto err_unreg_neigh;
+
+ return 0;
+
++err_unreg_neigh:
++ mctp_neigh_exit();
+ err_unreg_routes:
+ mctp_routes_exit();
+ err_unreg_proto:
+diff --git a/net/mctp/device.c b/net/mctp/device.c
+index acb97b25742896..85cc5f31f1e7c0 100644
+--- a/net/mctp/device.c
++++ b/net/mctp/device.c
+@@ -524,25 +524,31 @@ static struct notifier_block mctp_dev_nb = {
+ .priority = ADDRCONF_NOTIFY_PRIORITY,
+ };
+
+-void __init mctp_device_init(void)
++static const struct rtnl_msg_handler mctp_device_rtnl_msg_handlers[] = {
++ {THIS_MODULE, PF_MCTP, RTM_NEWADDR, mctp_rtm_newaddr, NULL, 0},
++ {THIS_MODULE, PF_MCTP, RTM_DELADDR, mctp_rtm_deladdr, NULL, 0},
++ {THIS_MODULE, PF_MCTP, RTM_GETADDR, NULL, mctp_dump_addrinfo, 0},
++};
++
++int __init mctp_device_init(void)
+ {
+- register_netdevice_notifier(&mctp_dev_nb);
++ int err;
+
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETADDR,
+- NULL, mctp_dump_addrinfo, 0);
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWADDR,
+- mctp_rtm_newaddr, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELADDR,
+- mctp_rtm_deladdr, NULL, 0);
++ register_netdevice_notifier(&mctp_dev_nb);
+ rtnl_af_register(&mctp_af_ops);
++
++ err = rtnl_register_many(mctp_device_rtnl_msg_handlers);
++ if (err) {
++ rtnl_af_unregister(&mctp_af_ops);
++ unregister_netdevice_notifier(&mctp_dev_nb);
++ }
++
++ return err;
+ }
+
+ void __exit mctp_device_exit(void)
+ {
++ rtnl_unregister_many(mctp_device_rtnl_msg_handlers);
+ rtnl_af_unregister(&mctp_af_ops);
+- rtnl_unregister(PF_MCTP, RTM_DELADDR);
+- rtnl_unregister(PF_MCTP, RTM_NEWADDR);
+- rtnl_unregister(PF_MCTP, RTM_GETADDR);
+-
+ unregister_netdevice_notifier(&mctp_dev_nb);
+ }
+diff --git a/net/mctp/neigh.c b/net/mctp/neigh.c
+index ffa0f9e0983fba..590f642413e4ef 100644
+--- a/net/mctp/neigh.c
++++ b/net/mctp/neigh.c
+@@ -322,22 +322,29 @@ static struct pernet_operations mctp_net_ops = {
+ .exit = mctp_neigh_net_exit,
+ };
+
++static const struct rtnl_msg_handler mctp_neigh_rtnl_msg_handlers[] = {
++ {THIS_MODULE, PF_MCTP, RTM_NEWNEIGH, mctp_rtm_newneigh, NULL, 0},
++ {THIS_MODULE, PF_MCTP, RTM_DELNEIGH, mctp_rtm_delneigh, NULL, 0},
++ {THIS_MODULE, PF_MCTP, RTM_GETNEIGH, NULL, mctp_rtm_getneigh, 0},
++};
++
+ int __init mctp_neigh_init(void)
+ {
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWNEIGH,
+- mctp_rtm_newneigh, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELNEIGH,
+- mctp_rtm_delneigh, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETNEIGH,
+- NULL, mctp_rtm_getneigh, 0);
+-
+- return register_pernet_subsys(&mctp_net_ops);
++ int err;
++
++ err = register_pernet_subsys(&mctp_net_ops);
++ if (err)
++ return err;
++
++ err = rtnl_register_many(mctp_neigh_rtnl_msg_handlers);
++ if (err)
++ unregister_pernet_subsys(&mctp_net_ops);
++
++ return err;
+ }
+
+-void __exit mctp_neigh_exit(void)
++void mctp_neigh_exit(void)
+ {
++ rtnl_unregister_many(mctp_neigh_rtnl_msg_handlers);
+ unregister_pernet_subsys(&mctp_net_ops);
+- rtnl_unregister(PF_MCTP, RTM_GETNEIGH);
+- rtnl_unregister(PF_MCTP, RTM_DELNEIGH);
+- rtnl_unregister(PF_MCTP, RTM_NEWNEIGH);
+ }
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 7a47a58aa54b44..c6a815df9d358c 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -663,7 +663,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ if (!tagbits) {
+- kfree(key);
++ mctp_key_unref(key);
+ return ERR_PTR(-EBUSY);
+ }
+
+@@ -843,6 +843,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ /* copy message payload */
+ skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
+
++ /* we need to copy the extensions, for MCTP flow data */
++ skb_ext_copy(skb2, skb);
++
+ /* do route */
+ rc = rt->output(rt, skb2);
+ if (rc)
+@@ -888,7 +891,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+- return rc;
++ goto out_free;
+ }
+ rt->dev = __mctp_dev_get(dev);
+ rcu_read_unlock();
+@@ -903,7 +906,8 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ rt->mtu = 0;
+
+ } else {
+- return -EINVAL;
++ rc = -EINVAL;
++ goto out_free;
+ }
+
+ spin_lock_irqsave(&rt->dev->addrs_lock, flags);
+@@ -966,12 +970,17 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ rc = mctp_do_fragment_route(rt, skb, mtu, tag);
+ }
+
++ /* route output functions consume the skb, even on error */
++ skb = NULL;
++
+ out_release:
+ if (!ext_rt)
+ mctp_route_release(rt);
+
+ mctp_dev_put(tmp_rt.dev);
+
++out_free:
++ kfree_skb(skb);
+ return rc;
+ }
+
+@@ -1401,26 +1410,39 @@ static struct pernet_operations mctp_net_ops = {
+ .exit = mctp_routes_net_exit,
+ };
+
++static const struct rtnl_msg_handler mctp_route_rtnl_msg_handlers[] = {
++ {THIS_MODULE, PF_MCTP, RTM_NEWROUTE, mctp_newroute, NULL, 0},
++ {THIS_MODULE, PF_MCTP, RTM_DELROUTE, mctp_delroute, NULL, 0},
++ {THIS_MODULE, PF_MCTP, RTM_GETROUTE, NULL, mctp_dump_rtinfo, 0},
++};
++
+ int __init mctp_routes_init(void)
+ {
++ int err;
++
+ dev_add_pack(&mctp_packet_type);
+
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETROUTE,
+- NULL, mctp_dump_rtinfo, 0);
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWROUTE,
+- mctp_newroute, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELROUTE,
+- mctp_delroute, NULL, 0);
++ err = register_pernet_subsys(&mctp_net_ops);
++ if (err)
++ goto err_pernet;
+
+- return register_pernet_subsys(&mctp_net_ops);
++ err = rtnl_register_many(mctp_route_rtnl_msg_handlers);
++ if (err)
++ goto err_rtnl;
++
++ return 0;
++
++err_rtnl:
++ unregister_pernet_subsys(&mctp_net_ops);
++err_pernet:
++ dev_remove_pack(&mctp_packet_type);
++ return err;
+ }
+
+ void mctp_routes_exit(void)
+ {
++ rtnl_unregister_many(mctp_route_rtnl_msg_handlers);
+ unregister_pernet_subsys(&mctp_net_ops);
+- rtnl_unregister(PF_MCTP, RTM_DELROUTE);
+- rtnl_unregister(PF_MCTP, RTM_NEWROUTE);
+- rtnl_unregister(PF_MCTP, RTM_GETROUTE);
+ dev_remove_pack(&mctp_packet_type);
+ }
+
+diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
+index 92ea4158f7fc4d..a944490a724d3c 100644
+--- a/net/mctp/test/route-test.c
++++ b/net/mctp/test/route-test.c
+@@ -354,7 +354,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
+
+ skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
+- KUNIT_EXPECT_EQ(test, skb->len, 1);
++ KUNIT_EXPECT_EQ(test, skb2->len, 1);
+
+ skb_free_datagram(sock->sk, skb2);
+
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 1af29af6538858..43e8343df0db71 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -1154,7 +1154,7 @@ static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
+
+ if ((all || type == NETCONFA_INPUT) &&
+ nla_put_s32(skb, NETCONFA_INPUT,
+- mdev->input_enabled) < 0)
++ READ_ONCE(mdev->input_enabled)) < 0)
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+@@ -1303,11 +1303,12 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
+ {
+ const struct nlmsghdr *nlh = cb->nlh;
+ struct net *net = sock_net(skb->sk);
+- struct hlist_head *head;
++ struct {
++ unsigned long ifindex;
++ } *ctx = (void *)cb->ctx;
+ struct net_device *dev;
+ struct mpls_dev *mdev;
+- int idx, s_idx;
+- int h, s_h;
++ int err = 0;
+
+ if (cb->strict_check) {
+ struct netlink_ext_ack *extack = cb->extack;
+@@ -1324,40 +1325,23 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
+ }
+ }
+
+- s_h = cb->args[0];
+- s_idx = idx = cb->args[1];
+-
+- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+- idx = 0;
+- head = &net->dev_index_head[h];
+- rcu_read_lock();
+- cb->seq = net->dev_base_seq;
+- hlist_for_each_entry_rcu(dev, head, index_hlist) {
+- if (idx < s_idx)
+- goto cont;
+- mdev = mpls_dev_get(dev);
+- if (!mdev)
+- goto cont;
+- if (mpls_netconf_fill_devconf(skb, mdev,
+- NETLINK_CB(cb->skb).portid,
+- nlh->nlmsg_seq,
+- RTM_NEWNETCONF,
+- NLM_F_MULTI,
+- NETCONFA_ALL) < 0) {
+- rcu_read_unlock();
+- goto done;
+- }
+- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+-cont:
+- idx++;
+- }
+- rcu_read_unlock();
++ rcu_read_lock();
++ for_each_netdev_dump(net, dev, ctx->ifindex) {
++ mdev = mpls_dev_get(dev);
++ if (!mdev)
++ continue;
++ err = mpls_netconf_fill_devconf(skb, mdev,
++ NETLINK_CB(cb->skb).portid,
++ nlh->nlmsg_seq,
++ RTM_NEWNETCONF,
++ NLM_F_MULTI,
++ NETCONFA_ALL);
++ if (err < 0)
++ break;
+ }
+-done:
+- cb->args[0] = h;
+- cb->args[1] = idx;
++ rcu_read_unlock();
+
+- return skb->len;
++ return err;
+ }
+
+ #define MPLS_PERDEV_SYSCTL_OFFSET(field) \
+@@ -2745,6 +2729,15 @@ static struct rtnl_af_ops mpls_af_ops __read_mostly = {
+ .get_stats_af_size = mpls_get_stats_af_size,
+ };
+
++static const struct rtnl_msg_handler mpls_rtnl_msg_handlers[] __initdata_or_module = {
++ {THIS_MODULE, PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, 0},
++ {THIS_MODULE, PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, 0},
++ {THIS_MODULE, PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes, 0},
++ {THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
++ mpls_netconf_get_devconf, mpls_netconf_dump_devconf,
++ RTNL_FLAG_DUMP_UNLOCKED},
++};
++
+ static int __init mpls_init(void)
+ {
+ int err;
+@@ -2763,23 +2756,25 @@ static int __init mpls_init(void)
+
+ rtnl_af_register(&mpls_af_ops);
+
+- rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_NEWROUTE,
+- mpls_rtm_newroute, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_DELROUTE,
+- mpls_rtm_delroute, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETROUTE,
+- mpls_getroute, mpls_dump_routes, 0);
+- rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
+- mpls_netconf_get_devconf,
+- mpls_netconf_dump_devconf, 0);
+- err = ipgre_tunnel_encap_add_mpls_ops();
++ err = rtnl_register_many(mpls_rtnl_msg_handlers);
+ if (err)
++ goto out_unregister_rtnl_af;
++
++ err = ipgre_tunnel_encap_add_mpls_ops();
++ if (err) {
+ pr_err("Can't add mpls over gre tunnel ops\n");
++ goto out_unregister_rtnl;
++ }
+
+ err = 0;
+ out:
+ return err;
+
++out_unregister_rtnl:
++ rtnl_unregister_many(mpls_rtnl_msg_handlers);
++out_unregister_rtnl_af:
++ rtnl_af_unregister(&mpls_af_ops);
++ dev_remove_pack(&mpls_packet_type);
+ out_unregister_pernet:
+ unregister_pernet_subsys(&mpls_net_ops);
+ goto out;
+diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
+index 533d082f0701e5..45d1e6a157fc7d 100644
+--- a/net/mpls/mpls_gso.c
++++ b/net/mpls/mpls_gso.c
+@@ -27,6 +27,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
+ __be16 mpls_protocol;
+ unsigned int mpls_hlen;
+
++ if (!skb_inner_network_header_was_set(skb))
++ goto out;
++
+ skb_reset_network_header(skb);
+ mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
+ if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
+diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
+index e72b518c5d0266..de75df904a0034 100644
+--- a/net/mptcp/ctrl.c
++++ b/net/mptcp/ctrl.c
+@@ -87,6 +87,43 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
+ }
+
+ #ifdef CONFIG_SYSCTL
++static int mptcp_set_scheduler(const struct net *net, const char *name)
++{
++ struct mptcp_pernet *pernet = mptcp_get_pernet(net);
++ struct mptcp_sched_ops *sched;
++ int ret = 0;
++
++ rcu_read_lock();
++ sched = mptcp_sched_find(name);
++ if (sched)
++ strscpy(pernet->scheduler, name, MPTCP_SCHED_NAME_MAX);
++ else
++ ret = -ENOENT;
++ rcu_read_unlock();
++
++ return ret;
++}
++
++static int proc_scheduler(struct ctl_table *ctl, int write,
++ void *buffer, size_t *lenp, loff_t *ppos)
++{
++ const struct net *net = current->nsproxy->net_ns;
++ char val[MPTCP_SCHED_NAME_MAX];
++ struct ctl_table tbl = {
++ .data = val,
++ .maxlen = MPTCP_SCHED_NAME_MAX,
++ };
++ int ret;
++
++ strscpy(val, mptcp_get_scheduler(net), MPTCP_SCHED_NAME_MAX);
++
++ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
++ if (write && ret == 0)
++ ret = mptcp_set_scheduler(net, val);
++
++ return ret;
++}
++
+ static struct ctl_table mptcp_sysctl_table[] = {
+ {
+ .procname = "enabled",
+@@ -139,7 +176,7 @@ static struct ctl_table mptcp_sysctl_table[] = {
+ .procname = "scheduler",
+ .maxlen = MPTCP_SCHED_NAME_MAX,
+ .mode = 0644,
+- .proc_handler = proc_dostring,
++ .proc_handler = proc_scheduler,
+ },
+ {}
+ };
+diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
+index a536586742f28c..b2199cc2823843 100644
+--- a/net/mptcp/diag.c
++++ b/net/mptcp/diag.c
+@@ -13,17 +13,22 @@
+ #include <uapi/linux/mptcp.h>
+ #include "protocol.h"
+
+-static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
++static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
+ {
+ struct mptcp_subflow_context *sf;
+ struct nlattr *start;
+ u32 flags = 0;
++ bool slow;
+ int err;
+
++ if (inet_sk_state_load(sk) == TCP_LISTEN)
++ return 0;
++
+ start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
+ if (!start)
+ return -EMSGSIZE;
+
++ slow = lock_sock_fast(sk);
+ rcu_read_lock();
+ sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
+ if (!sf) {
+@@ -63,17 +68,19 @@ static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
+ sf->map_data_len) ||
+ nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) ||
+ nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) ||
+- nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, sf->local_id)) {
++ nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, subflow_get_local_id(sf))) {
+ err = -EMSGSIZE;
+ goto nla_failure;
+ }
+
+ rcu_read_unlock();
++ unlock_sock_fast(sk, slow);
+ nla_nest_end(skb, start);
+ return 0;
+
+ nla_failure:
+ rcu_read_unlock();
++ unlock_sock_fast(sk, slow);
+ nla_nest_cancel(skb, start);
+ return err;
+ }
+@@ -88,7 +95,7 @@ static size_t subflow_get_info_size(const struct sock *sk)
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */
+ nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */
+- nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
++ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
+ nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */
+ nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */
+diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
+index bceaab8dd8e460..a29ff901df7588 100644
+--- a/net/mptcp/fastopen.c
++++ b/net/mptcp/fastopen.c
+@@ -52,29 +52,28 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
+
+ mptcp_set_owner_r(skb, sk);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
++ mptcp_sk(sk)->bytes_received += skb->len;
+
+ sk->sk_data_ready(sk);
+
+ mptcp_data_unlock(sk);
+ }
+
+-void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+- const struct mptcp_options_received *mp_opt)
++void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt)
+ {
+ struct sock *sk = (struct sock *)msk;
+ struct sk_buff *skb;
+
+- mptcp_data_lock(sk);
+ skb = skb_peek_tail(&sk->sk_receive_queue);
+ if (skb) {
+ WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
+- pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk,
++ pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk,
+ MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq,
+ MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq);
+ MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq;
+ MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq;
+ }
+
+- pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
+- mptcp_data_unlock(sk);
++ pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq);
+ }
+diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
+index a0990c365a2ea1..3dc49c3169f20e 100644
+--- a/net/mptcp/mib.c
++++ b/net/mptcp/mib.c
+@@ -15,15 +15,20 @@ static const struct snmp_mib mptcp_snmp_list[] = {
+ SNMP_MIB_ITEM("MPCapableACKRX", MPTCP_MIB_MPCAPABLEPASSIVEACK),
+ SNMP_MIB_ITEM("MPCapableFallbackACK", MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK),
+ SNMP_MIB_ITEM("MPCapableFallbackSYNACK", MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
++ SNMP_MIB_ITEM("MPCapableEndpAttempt", MPTCP_MIB_MPCAPABLEENDPATTEMPT),
+ SNMP_MIB_ITEM("MPFallbackTokenInit", MPTCP_MIB_TOKENFALLBACKINIT),
+ SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
+ SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
+ SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
++ SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX),
+ SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
++ SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX),
+ SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
+ SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
+ SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
+ SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
++ SNMP_MIB_ITEM("DSSCorruptionFallback", MPTCP_MIB_DSSCORRUPTIONFALLBACK),
++ SNMP_MIB_ITEM("DSSCorruptionReset", MPTCP_MIB_DSSCORRUPTIONRESET),
+ SNMP_MIB_ITEM("InfiniteMapTx", MPTCP_MIB_INFINITEMAPTX),
+ SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
+ SNMP_MIB_ITEM("DSSNoMatchTCP", MPTCP_MIB_DSSTCPMISMATCH),
+@@ -66,6 +71,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
+ SNMP_MIB_ITEM("RcvWndShared", MPTCP_MIB_RCVWNDSHARED),
+ SNMP_MIB_ITEM("RcvWndConflictUpdate", MPTCP_MIB_RCVWNDCONFLICTUPDATE),
+ SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
++ SNMP_MIB_ITEM("MPCurrEstab", MPTCP_MIB_CURRESTAB),
+ SNMP_MIB_SENTINEL
+ };
+
+diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
+index cae71d9472529d..007d7a74947272 100644
+--- a/net/mptcp/mib.h
++++ b/net/mptcp/mib.h
+@@ -8,15 +8,20 @@ enum linux_mptcp_mib_field {
+ MPTCP_MIB_MPCAPABLEPASSIVEACK, /* Received third ACK with MP_CAPABLE */
+ MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way handshake */
+ MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */
++ MPTCP_MIB_MPCAPABLEENDPATTEMPT, /* Prohibited MPC to port-based endp */
+ MPTCP_MIB_TOKENFALLBACKINIT, /* Could not init/allocate token */
+ MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
+ MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
+ MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
++ MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */
+ MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
++ MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */
+ MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
+ MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
+ MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
+ MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
++ MPTCP_MIB_DSSCORRUPTIONFALLBACK,/* DSS corruption detected, fallback */
++ MPTCP_MIB_DSSCORRUPTIONRESET, /* DSS corruption detected, MPJ subflow reset */
+ MPTCP_MIB_INFINITEMAPTX, /* Sent an infinite mapping */
+ MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
+ MPTCP_MIB_DSSTCPMISMATCH, /* DSS-mapping did not map with TCP's sequence numbers */
+@@ -65,6 +70,7 @@ enum linux_mptcp_mib_field {
+ * conflict with another subflow while updating msk rcv wnd
+ */
+ MPTCP_MIB_RCVWNDCONFLICT, /* Conflict with while updating msk rcv wnd */
++ MPTCP_MIB_CURRESTAB, /* Current established MPTCP connections */
+ __MPTCP_MIB_MAX
+ };
+
+@@ -95,4 +101,11 @@ static inline void __MPTCP_INC_STATS(struct net *net,
+ __SNMP_INC_STATS(net->mib.mptcp_statistics, field);
+ }
+
++static inline void MPTCP_DEC_STATS(struct net *net,
++ enum linux_mptcp_mib_field field)
++{
++ if (likely(net->mib.mptcp_statistics))
++ SNMP_DEC_STATS(net->mib.mptcp_statistics, field);
++}
++
+ bool mptcp_mib_alloc(struct net *net);
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index cd15ec73073e05..2ad9006a157aef 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -108,6 +108,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->suboptions |= OPTION_MPTCP_DSS;
+ mp_opt->use_map = 1;
+ mp_opt->mpc_map = 1;
++ mp_opt->use_ack = 0;
+ mp_opt->data_len = get_unaligned_be16(ptr);
+ ptr += 2;
+ }
+@@ -116,44 +117,44 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+ ptr += 2;
+ }
+- pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
++ pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n",
+ version, flags, opsize, mp_opt->sndr_key,
+ mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
+ break;
+
+ case MPTCPOPT_MP_JOIN:
+- mp_opt->suboptions |= OPTIONS_MPTCP_MPJ;
+ if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
++ mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYN;
+ mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
+ mp_opt->join_id = *ptr++;
+ mp_opt->token = get_unaligned_be32(ptr);
+ ptr += 4;
+ mp_opt->nonce = get_unaligned_be32(ptr);
+ ptr += 4;
+- pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
++ pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
+ mp_opt->backup, mp_opt->join_id,
+ mp_opt->token, mp_opt->nonce);
+ } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
++ mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYNACK;
+ mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
+ mp_opt->join_id = *ptr++;
+ mp_opt->thmac = get_unaligned_be64(ptr);
+ ptr += 8;
+ mp_opt->nonce = get_unaligned_be32(ptr);
+ ptr += 4;
+- pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
++ pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+ mp_opt->backup, mp_opt->join_id,
+ mp_opt->thmac, mp_opt->nonce);
+ } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
++ mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
+ ptr += 2;
+ memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+- pr_debug("MP_JOIN hmac");
+- } else {
+- mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ;
++ pr_debug("MP_JOIN hmac\n");
+ }
+ break;
+
+ case MPTCPOPT_DSS:
+- pr_debug("DSS");
++ pr_debug("DSS\n");
+ ptr++;
+
+ /* we must clear 'mpc_map' be able to detect MP_CAPABLE
+@@ -168,7 +169,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
+ mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
+
+- pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
++ pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
+ mp_opt->data_fin, mp_opt->dsn64,
+ mp_opt->use_map, mp_opt->ack64,
+ mp_opt->use_ack);
+@@ -206,7 +207,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ ptr += 4;
+ }
+
+- pr_debug("data_ack=%llu", mp_opt->data_ack);
++ pr_debug("data_ack=%llu\n", mp_opt->data_ack);
+ }
+
+ if (mp_opt->use_map) {
+@@ -230,7 +231,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ ptr += 2;
+ }
+
+- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+ mp_opt->data_seq, mp_opt->subflow_seq,
+ mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
+ mp_opt->csum);
+@@ -292,7 +293,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->ahmac = get_unaligned_be64(ptr);
+ ptr += 8;
+ }
+- pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
++ pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n",
+ (mp_opt->addr.family == AF_INET6) ? "6" : "",
+ mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
+ break;
+@@ -308,7 +309,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
+ for (i = 0; i < mp_opt->rm_list.nr; i++)
+ mp_opt->rm_list.ids[i] = *ptr++;
+- pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
++ pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr);
+ break;
+
+ case MPTCPOPT_MP_PRIO:
+@@ -317,7 +318,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+
+ mp_opt->suboptions |= OPTION_MPTCP_PRIO;
+ mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
+- pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
++ pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup);
+ break;
+
+ case MPTCPOPT_MP_FASTCLOSE:
+@@ -328,7 +329,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ mp_opt->rcvr_key = get_unaligned_be64(ptr);
+ ptr += 8;
+ mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
+- pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
++ pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key);
+ break;
+
+ case MPTCPOPT_RST:
+@@ -342,7 +343,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ flags = *ptr++;
+ mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
+ mp_opt->reset_reason = *ptr;
+- pr_debug("MP_RST: transient=%u reason=%u",
++ pr_debug("MP_RST: transient=%u reason=%u\n",
+ mp_opt->reset_transient, mp_opt->reset_reason);
+ break;
+
+@@ -353,7 +354,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ ptr += 2;
+ mp_opt->suboptions |= OPTION_MPTCP_FAIL;
+ mp_opt->fail_seq = get_unaligned_be64(ptr);
+- pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
++ pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq);
+ break;
+
+ default:
+@@ -416,7 +417,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ *size = TCPOLEN_MPTCP_MPC_SYN;
+ return true;
+ } else if (subflow->request_join) {
+- pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
++ pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
+ subflow->local_nonce);
+ opts->suboptions = OPTION_MPTCP_MPJ_SYN;
+ opts->join_id = subflow->local_id;
+@@ -499,7 +500,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ *size = TCPOLEN_MPTCP_MPC_ACK;
+ }
+
+- pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
++ pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
+ subflow, subflow->local_key, subflow->remote_key,
+ data_len);
+
+@@ -508,7 +509,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ opts->suboptions = OPTION_MPTCP_MPJ_ACK;
+ memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
+ *size = TCPOLEN_MPTCP_MPJ_ACK;
+- pr_debug("subflow=%p", subflow);
++ pr_debug("subflow=%p\n", subflow);
+
+ /* we can use the full delegate action helper only from BH context
+ * If we are in process context - sk is flushing the backlog at
+@@ -674,7 +675,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+
+ *size = len;
+ if (drop_other_suboptions) {
+- pr_debug("drop other suboptions");
++ pr_debug("drop other suboptions\n");
+ opts->suboptions = 0;
+
+ /* note that e.g. DSS could have written into the memory
+@@ -694,7 +695,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ } else {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
+ }
+- pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
++ pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n",
+ opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
+
+ return true;
+@@ -725,7 +726,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk,
+ opts->rm_list = rm_list;
+
+ for (i = 0; i < opts->rm_list.nr; i++)
+- pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
++ pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]);
+ MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr);
+ return true;
+ }
+@@ -751,7 +752,7 @@ static bool mptcp_established_options_mp_prio(struct sock *sk,
+ opts->suboptions |= OPTION_MPTCP_PRIO;
+ opts->backup = subflow->request_bkup;
+
+- pr_debug("prio=%d", opts->backup);
++ pr_debug("prio=%d\n", opts->backup);
+
+ return true;
+ }
+@@ -793,7 +794,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
+ opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
+ opts->rcvr_key = msk->remote_key;
+
+- pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
++ pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
+ return true;
+ }
+@@ -815,7 +816,7 @@ static bool mptcp_established_options_mp_fail(struct sock *sk,
+ opts->suboptions |= OPTION_MPTCP_FAIL;
+ opts->fail_seq = subflow->map_seq;
+
+- pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
++ pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
+
+ return true;
+@@ -903,16 +904,16 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ opts->csum_reqd = subflow_req->csum_reqd;
+ opts->allow_join_id0 = subflow_req->allow_join_id0;
+ *size = TCPOLEN_MPTCP_MPC_SYNACK;
+- pr_debug("subflow_req=%p, local_key=%llu",
++ pr_debug("subflow_req=%p, local_key=%llu\n",
+ subflow_req, subflow_req->local_key);
+ return true;
+ } else if (subflow_req->mp_join) {
+ opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
+- opts->backup = subflow_req->backup;
++ opts->backup = subflow_req->request_bkup;
+ opts->join_id = subflow_req->local_id;
+ opts->thmac = subflow_req->thmac;
+ opts->nonce = subflow_req->local_nonce;
+- pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
++ pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+ subflow_req, opts->backup, opts->join_id,
+ opts->thmac, opts->nonce);
+ *size = TCPOLEN_MPTCP_MPJ_SYNACK;
+@@ -957,13 +958,12 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+
+ if (subflow->remote_key_valid &&
+ (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) ||
+- ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo))) {
++ ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) &&
++ (!mp_opt->echo || subflow->mp_join)))) {
+ /* subflows are fully established as soon as we get any
+ * additional ack, including ADD_ADDR.
+ */
+- subflow->fully_established = 1;
+- WRITE_ONCE(msk->fully_established, true);
+- goto check_notify;
++ goto set_fully_established;
+ }
+
+ /* If the first established packet does not contain MP_CAPABLE + data
+@@ -982,10 +982,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ if (mp_opt->deny_join_id0)
+ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+
+-set_fully_established:
+ if (unlikely(!READ_ONCE(msk->pm.server_side)))
+ pr_warn_once("bogus mpc option on established client sk");
+- mptcp_subflow_fully_established(subflow, mp_opt);
++
++set_fully_established:
++ mptcp_data_lock((struct sock *)msk);
++ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
++ mptcp_data_unlock((struct sock *)msk);
+
+ check_notify:
+ /* if the subflow is not already linked into the conn_list, we can't
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index d8da5374d9e133..157a574fab0ccf 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -20,7 +20,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ {
+ u8 add_addr = READ_ONCE(msk->pm.addr_signal);
+
+- pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
++ pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
+
+ lockdep_assert_held(&msk->pm.lock);
+
+@@ -46,7 +46,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
+ {
+ u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
+
+- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
++ pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
+
+ if (rm_addr) {
+ MPTCP_ADD_STATS(sock_net((struct sock *)msk),
+@@ -61,23 +61,13 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
+ return 0;
+ }
+
+-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list)
+-{
+- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
+-
+- spin_lock_bh(&msk->pm.lock);
+- mptcp_pm_nl_rm_subflow_received(msk, rm_list);
+- spin_unlock_bh(&msk->pm.lock);
+- return 0;
+-}
+-
+ /* path manager event handlers */
+
+ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side)
+ {
+ struct mptcp_pm_data *pm = &msk->pm;
+
+- pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
++ pr_debug("msk=%p, token=%u side=%d\n", msk, msk->token, server_side);
+
+ WRITE_ONCE(pm->server_side, server_side);
+ mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
+@@ -101,7 +91,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+
+ subflows_max = mptcp_pm_get_subflows_max(msk);
+
+- pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
++ pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
+ subflows_max, READ_ONCE(pm->accept_subflow));
+
+ /* try to avoid acquiring the lock below */
+@@ -125,7 +115,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
+ enum mptcp_pm_status new_status)
+ {
+- pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
++ pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
+ BIT(new_status));
+ if (msk->pm.status & BIT(new_status))
+ return false;
+@@ -140,7 +130,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
+ struct mptcp_pm_data *pm = &msk->pm;
+ bool announce = false;
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ spin_lock_bh(&pm->lock);
+
+@@ -164,14 +154,14 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk)
+
+ void mptcp_pm_connection_closed(struct mptcp_sock *msk)
+ {
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+ }
+
+ void mptcp_pm_subflow_established(struct mptcp_sock *msk)
+ {
+ struct mptcp_pm_data *pm = &msk->pm;
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ if (!READ_ONCE(pm->work_pending))
+ return;
+@@ -223,7 +213,7 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ struct mptcp_pm_data *pm = &msk->pm;
+
+- pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
++ pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
+ READ_ONCE(pm->accept_addr));
+
+ mptcp_event_addr_announced(ssk, addr);
+@@ -237,7 +227,9 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ } else {
+ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
+ }
+- } else if (!READ_ONCE(pm->accept_addr)) {
++ /* id0 should not have a different address */
++ } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
++ (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
+ mptcp_pm_announce_addr(msk, addr, true);
+ mptcp_pm_add_addr_send_ack(msk);
+ } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
+@@ -254,7 +246,7 @@ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+ {
+ struct mptcp_pm_data *pm = &msk->pm;
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ spin_lock_bh(&pm->lock);
+
+@@ -278,7 +270,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+ struct mptcp_pm_data *pm = &msk->pm;
+ u8 i;
+
+- pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
++ pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
+
+ for (i = 0; i < rm_list->nr; i++)
+ mptcp_event_addr_removed(msk, rm_list->ids[i]);
+@@ -310,19 +302,19 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+
+- pr_debug("fail_seq=%llu", fail_seq);
++ pr_debug("fail_seq=%llu\n", fail_seq);
+
+ if (!READ_ONCE(msk->allow_infinite_fallback))
+ return;
+
+ if (!subflow->fail_tout) {
+- pr_debug("send MP_FAIL response and infinite map");
++ pr_debug("send MP_FAIL response and infinite map\n");
+
+ subflow->send_mp_fail = 1;
+ subflow->send_infinite_map = 1;
+ tcp_send_ack(sk);
+ } else {
+- pr_debug("MP_FAIL response received");
++ pr_debug("MP_FAIL response received\n");
+ WRITE_ONCE(subflow->fail_tout, 0);
+ }
+ }
+@@ -427,15 +419,24 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+ return mptcp_pm_nl_get_local_id(msk, &skc_local);
+ }
+
++bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
++{
++ struct mptcp_addr_info skc_local;
++
++ mptcp_local_address((struct sock_common *)skc, &skc_local);
++
++ if (mptcp_pm_is_userspace(msk))
++ return mptcp_userspace_pm_is_backup(msk, &skc_local);
++
++ return mptcp_pm_nl_is_backup(msk, &skc_local);
++}
++
+ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
+ u8 *flags, int *ifindex)
+ {
+ *flags = 0;
+ *ifindex = 0;
+
+- if (!id)
+- return 0;
+-
+ if (mptcp_pm_is_userspace(msk))
+ return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
+ return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 9661f38126826d..d8c47ca86de4fb 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -135,12 +135,15 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
+ {
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_addr_info cur;
+- struct sock_common *skc;
+
+ list_for_each_entry(subflow, list, node) {
+- skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++ if (!((1 << inet_sk_state_load(ssk)) &
++ (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV)))
++ continue;
+
+- remote_address(skc, &cur);
++ remote_address((struct sock_common *)ssk, &cur);
+ if (mptcp_addresses_equal(&cur, daddr, daddr->port))
+ return true;
+ }
+@@ -148,11 +151,13 @@ static bool lookup_subflow_by_daddr(const struct list_head *list,
+ return false;
+ }
+
+-static struct mptcp_pm_addr_entry *
++static bool
+ select_local_address(const struct pm_nl_pernet *pernet,
+- const struct mptcp_sock *msk)
++ const struct mptcp_sock *msk,
++ struct mptcp_pm_addr_entry *new_entry)
+ {
+- struct mptcp_pm_addr_entry *entry, *ret = NULL;
++ struct mptcp_pm_addr_entry *entry;
++ bool found = false;
+
+ msk_owned_by_me(msk);
+
+@@ -164,17 +169,21 @@ select_local_address(const struct pm_nl_pernet *pernet,
+ if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
+ continue;
+
+- ret = entry;
++ *new_entry = *entry;
++ found = true;
+ break;
+ }
+ rcu_read_unlock();
+- return ret;
++
++ return found;
+ }
+
+-static struct mptcp_pm_addr_entry *
+-select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
++static bool
++select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk,
++ struct mptcp_pm_addr_entry *new_entry)
+ {
+- struct mptcp_pm_addr_entry *entry, *ret = NULL;
++ struct mptcp_pm_addr_entry *entry;
++ bool found = false;
+
+ rcu_read_lock();
+ /* do not keep any additional per socket state, just signal
+@@ -189,11 +198,13 @@ select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
+ if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
+ continue;
+
+- ret = entry;
++ *new_entry = *entry;
++ found = true;
+ break;
+ }
+ rcu_read_unlock();
+- return ret;
++
++ return found;
+ }
+
+ unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
+@@ -284,7 +295,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ struct mptcp_sock *msk = entry->sock;
+ struct sock *sk = (struct sock *)msk;
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ if (!msk)
+ return;
+@@ -303,7 +314,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ spin_lock_bh(&msk->pm.lock);
+
+ if (!mptcp_pm_should_add_signal_addr(msk)) {
+- pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
++ pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
+ mptcp_pm_announce_addr(msk, &entry->addr, false);
+ mptcp_pm_add_addr_send_ack(msk);
+ entry->retrans_times++;
+@@ -328,15 +339,21 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+ {
+ struct mptcp_pm_add_entry *entry;
+ struct sock *sk = (struct sock *)msk;
++ struct timer_list *add_timer = NULL;
+
+ spin_lock_bh(&msk->pm.lock);
+ entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
+- if (entry && (!check_id || entry->addr.id == addr->id))
++ if (entry && (!check_id || entry->addr.id == addr->id)) {
+ entry->retrans_times = ADD_ADDR_RETRANS_MAX;
++ add_timer = &entry->add_timer;
++ }
++ if (!check_id && entry)
++ list_del(&entry->list);
+ spin_unlock_bh(&msk->pm.lock);
+
+- if (entry && (!check_id || entry->addr.id == addr->id))
+- sk_stop_timer_sync(sk, &entry->add_timer);
++ /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */
++ if (add_timer)
++ sk_stop_timer_sync(sk, add_timer);
+
+ return entry;
+ }
+@@ -353,7 +370,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
+
+ if (add_entry) {
+- if (mptcp_pm_is_kernel(msk))
++ if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
+ return false;
+
+ sk_reset_timer(sk, &add_entry->add_timer,
+@@ -384,7 +401,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+ struct sock *sk = (struct sock *)msk;
+ LIST_HEAD(free_list);
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ spin_lock_bh(&msk->pm.lock);
+ list_splice_init(&msk->pm.anno_list, &free_list);
+@@ -396,19 +413,6 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+ }
+ }
+
+-static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
+- const struct mptcp_addr_info *addr)
+-{
+- int i;
+-
+- for (i = 0; i < nr; i++) {
+- if (addrs[i].id == addr->id)
+- return true;
+- }
+-
+- return false;
+-}
+-
+ /* Fill all the remote addresses into the array addrs[],
+ * and return the array size.
+ */
+@@ -440,18 +444,34 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
+ msk->pm.subflows++;
+ addrs[i++] = remote;
+ } else {
++ DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
++
++ /* Forbid creation of new subflows matching existing
++ * ones, possibly already created by incoming ADD_ADDR
++ */
++ bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
++ mptcp_for_each_subflow(msk, subflow)
++ if (READ_ONCE(subflow->local_id) == local->id)
++ __set_bit(subflow->remote_id, unavail_id);
++
+ mptcp_for_each_subflow(msk, subflow) {
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ remote_address((struct sock_common *)ssk, &addrs[i]);
+- addrs[i].id = subflow->remote_id;
++ addrs[i].id = READ_ONCE(subflow->remote_id);
+ if (deny_id0 && !addrs[i].id)
+ continue;
+
++ if (test_bit(addrs[i].id, unavail_id))
++ continue;
++
+ if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
+ continue;
+
+- if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
+- msk->pm.subflows < subflows_max) {
++ if (msk->pm.subflows < subflows_max) {
++ /* forbid creating multiple address towards
++ * this id
++ */
++ __set_bit(addrs[i].id, unavail_id);
+ msk->pm.subflows++;
+ i++;
+ }
+@@ -467,13 +487,12 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
+
+- pr_debug("send ack for %s",
++ pr_debug("send ack for %s\n",
+ prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
+
+ slow = lock_sock_fast(ssk);
+ if (prio) {
+ subflow->send_mp_prio = 1;
+- subflow->backup = backup;
+ subflow->request_bkup = backup;
+ }
+
+@@ -519,8 +538,9 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
+ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ {
+ struct sock *sk = (struct sock *)msk;
+- struct mptcp_pm_addr_entry *local;
++ struct mptcp_pm_addr_entry local;
+ unsigned int add_addr_signal_max;
++ bool signal_and_subflow = false;
+ unsigned int local_addr_max;
+ struct pm_nl_pernet *pernet;
+ unsigned int subflows_max;
+@@ -561,8 +581,6 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+
+ /* check first for announce */
+ if (msk->pm.add_addr_signaled < add_addr_signal_max) {
+- local = select_signal_address(pernet, msk);
+-
+ /* due to racing events on both ends we can reach here while
+ * previous add address is still running: if we invoke now
+ * mptcp_pm_announce_addr(), that will fail and the
+@@ -573,16 +591,30 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
+ return;
+
+- if (local) {
+- if (mptcp_pm_alloc_anno_list(msk, &local->addr)) {
+- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+- msk->pm.add_addr_signaled++;
+- mptcp_pm_announce_addr(msk, &local->addr, false);
+- mptcp_pm_nl_addr_send_ack(msk);
+- }
+- }
++ if (!select_signal_address(pernet, msk, &local))
++ goto subflow;
++
++ /* If the alloc fails, we are on memory pressure, not worth
++ * continuing, and trying to create subflows.
++ */
++ if (!mptcp_pm_alloc_anno_list(msk, &local.addr))
++ return;
++
++ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
++ msk->pm.add_addr_signaled++;
++
++ /* Special case for ID0: set the correct ID */
++ if (local.addr.id == msk->mpc_endpoint_id)
++ local.addr.id = 0;
++
++ mptcp_pm_announce_addr(msk, &local.addr, false);
++ mptcp_pm_nl_addr_send_ack(msk);
++
++ if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
++ signal_and_subflow = true;
+ }
+
++subflow:
+ /* check if should create a new subflow */
+ while (msk->pm.local_addr_used < local_addr_max &&
+ msk->pm.subflows < subflows_max) {
+@@ -590,21 +622,28 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ bool fullmesh;
+ int i, nr;
+
+- local = select_local_address(pernet, msk);
+- if (!local)
++ if (signal_and_subflow)
++ signal_and_subflow = false;
++ else if (!select_local_address(pernet, msk, &local))
+ break;
+
+- fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
++ fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
++
++ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+
+- msk->pm.local_addr_used++;
+- __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+- nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
++ /* Special case for ID0: set the correct ID */
++ if (local.addr.id == msk->mpc_endpoint_id)
++ local.addr.id = 0;
++ else /* local_addr_used is not decr for ID 0 */
++ msk->pm.local_addr_used++;
++
++ nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
+ if (nr == 0)
+ continue;
+
+ spin_unlock_bh(&msk->pm.lock);
+ for (i = 0; i < nr; i++)
+- __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
++ __mptcp_subflow_connect(sk, &local.addr, &addrs[i]);
+ spin_lock_bh(&msk->pm.lock);
+ }
+ mptcp_pm_nl_check_work_pending(msk);
+@@ -629,6 +668,7 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ {
+ struct sock *sk = (struct sock *)msk;
+ struct mptcp_pm_addr_entry *entry;
++ struct mptcp_addr_info mpc_addr;
+ struct pm_nl_pernet *pernet;
+ unsigned int subflows_max;
+ int i = 0;
+@@ -636,6 +676,8 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ pernet = pm_nl_get_pernet_from_msk(msk);
+ subflows_max = mptcp_pm_get_subflows_max(msk);
+
++ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
++
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
+@@ -646,7 +688,13 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+
+ if (msk->pm.subflows < subflows_max) {
+ msk->pm.subflows++;
+- addrs[i++] = entry->addr;
++ addrs[i] = entry->addr;
++
++ /* Special case for ID0: set the correct ID */
++ if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port))
++ addrs[i].id = 0;
++
++ i++;
+ }
+ }
+ rcu_read_unlock();
+@@ -682,12 +730,13 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ unsigned int add_addr_accept_max;
+ struct mptcp_addr_info remote;
+ unsigned int subflows_max;
++ bool sf_created = false;
+ int i, nr;
+
+ add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
+ subflows_max = mptcp_pm_get_subflows_max(msk);
+
+- pr_debug("accepted %d:%d remote family %d",
++ pr_debug("accepted %d:%d remote family %d\n",
+ msk->pm.add_addr_accepted, add_addr_accept_max,
+ msk->pm.remote.family);
+
+@@ -709,15 +758,29 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ if (nr == 0)
+ return;
+
+- msk->pm.add_addr_accepted++;
+- if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+- msk->pm.subflows >= subflows_max)
+- WRITE_ONCE(msk->pm.accept_addr, false);
+-
+ spin_unlock_bh(&msk->pm.lock);
+ for (i = 0; i < nr; i++)
+- __mptcp_subflow_connect(sk, &addrs[i], &remote);
++ if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0)
++ sf_created = true;
+ spin_lock_bh(&msk->pm.lock);
++
++ if (sf_created) {
++ /* add_addr_accepted is not decr for ID 0 */
++ if (remote.id)
++ msk->pm.add_addr_accepted++;
++ if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
++ msk->pm.subflows >= subflows_max)
++ WRITE_ONCE(msk->pm.accept_addr, false);
++ }
++}
++
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++ const struct mptcp_addr_info *remote)
++{
++ struct mptcp_addr_info mpc_remote;
++
++ remote_address((struct sock_common *)msk, &mpc_remote);
++ return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
+ }
+
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+@@ -731,9 +794,12 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+ !mptcp_pm_should_rm_signal(msk))
+ return;
+
+- subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
+- if (subflow)
+- mptcp_pm_send_ack(msk, subflow, false, false);
++ mptcp_for_each_subflow(msk, subflow) {
++ if (__mptcp_subflow_active(subflow)) {
++ mptcp_pm_send_ack(msk, subflow, false, false);
++ break;
++ }
++ }
+ }
+
+ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+@@ -743,7 +809,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ {
+ struct mptcp_subflow_context *subflow;
+
+- pr_debug("bkup=%d", bkup);
++ pr_debug("bkup=%d\n", bkup);
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+@@ -766,11 +832,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ return -EINVAL;
+ }
+
+-static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
+-{
+- return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
+-}
+-
+ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ const struct mptcp_rm_list *rm_list,
+ enum linux_mptcp_mib_field rm_type)
+@@ -779,7 +840,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ struct sock *sk = (struct sock *)msk;
+ u8 i;
+
+- pr_debug("%s rm_list_nr %d",
++ pr_debug("%s rm_list_nr %d\n",
+ rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
+
+ msk_owned_by_me(msk);
+@@ -799,41 +860,49 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+
+ mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ u8 remote_id = READ_ONCE(subflow->remote_id);
+ int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+- u8 id = subflow->local_id;
++ u8 id = subflow_get_local_id(subflow);
+
+- if (rm_type == MPTCP_MIB_RMADDR && subflow->remote_id != rm_id)
++ if ((1 << inet_sk_state_load(ssk)) &
++ (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE))
++ continue;
++ if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+ continue;
+- if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
++ if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
+ continue;
+
+- pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
++ pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
+ rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
+- i, rm_id, subflow->local_id, subflow->remote_id,
+- msk->mpc_endpoint_id);
++ i, rm_id, id, remote_id, msk->mpc_endpoint_id);
+ spin_unlock_bh(&msk->pm.lock);
+ mptcp_subflow_shutdown(sk, ssk, how);
++ removed |= subflow->request_join;
+
+ /* the following takes care of updating the subflows counter */
+ mptcp_close_ssk(sk, ssk, subflow);
+ spin_lock_bh(&msk->pm.lock);
+
+- removed = true;
+- __MPTCP_INC_STATS(sock_net(sk), rm_type);
++ if (rm_type == MPTCP_MIB_RMSUBFLOW)
++ __MPTCP_INC_STATS(sock_net(sk), rm_type);
+ }
+- if (rm_type == MPTCP_MIB_RMSUBFLOW)
+- __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
++
++ if (rm_type == MPTCP_MIB_RMADDR)
++ __MPTCP_INC_STATS(sock_net(sk), rm_type);
++
+ if (!removed)
+ continue;
+
+ if (!mptcp_pm_is_kernel(msk))
+ continue;
+
+- if (rm_type == MPTCP_MIB_RMADDR) {
+- msk->pm.add_addr_accepted--;
+- WRITE_ONCE(msk->pm.accept_addr, true);
+- } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
+- msk->pm.local_addr_used--;
++ if (rm_type == MPTCP_MIB_RMADDR && rm_id &&
++ !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
++ /* Note: if the subflow has been closed before, this
++ * add_addr_accepted counter will not be decremented.
++ */
++ if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk))
++ WRITE_ONCE(msk->pm.accept_addr, true);
+ }
+ }
+ }
+@@ -843,8 +912,8 @@ static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
+ mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
+ }
+
+-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
+- const struct mptcp_rm_list *rm_list)
++static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
++ const struct mptcp_rm_list *rm_list)
+ {
+ mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
+ }
+@@ -860,7 +929,7 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk)
+
+ spin_lock_bh(&msk->pm.lock);
+
+- pr_debug("msk=%p status=%x", msk, pm->status);
++ pr_debug("msk=%p status=%x\n", msk, pm->status);
+ if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
+ pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
+ mptcp_pm_nl_add_addr_received(msk);
+@@ -901,7 +970,8 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
+ }
+
+ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+- struct mptcp_pm_addr_entry *entry)
++ struct mptcp_pm_addr_entry *entry,
++ bool needs_id)
+ {
+ struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
+ unsigned int addr_max;
+@@ -949,7 +1019,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ }
+ }
+
+- if (!entry->addr.id) {
++ if (!entry->addr.id && needs_id) {
+ find_next:
+ entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
+ MPTCP_PM_MAX_ADDR_ID + 1,
+@@ -960,7 +1030,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ }
+ }
+
+- if (!entry->addr.id)
++ if (!entry->addr.id && needs_id)
+ goto out;
+
+ __set_bit(entry->addr.id, pernet->id_bitmap);
+@@ -1048,8 +1118,14 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ if (err)
+ return err;
+
++ /* We don't use mptcp_set_state() here because it needs to be called
++ * under the msk socket lock. For the moment, that will not bring
++ * anything more than only calling inet_sk_state_store(), because the
++ * old status is known (TCP_CLOSE).
++ */
+ inet_sk_state_store(newsk, TCP_LISTEN);
+ lock_sock(ssk);
++ WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true);
+ err = __inet_listen_sk(ssk, backlog);
+ if (!err)
+ mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
+@@ -1087,13 +1163,31 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
+ entry->ifindex = 0;
+ entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
+ entry->lsk = NULL;
+- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
++ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
+ if (ret < 0)
+ kfree(entry);
+
+ return ret;
+ }
+
++bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
++{
++ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
++ struct mptcp_pm_addr_entry *entry;
++ bool backup = false;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
++ if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
++ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
++ break;
++ }
++ }
++ rcu_read_unlock();
++
++ return backup;
++}
++
+ #define MPTCP_PM_CMD_GRP_OFFSET 0
+ #define MPTCP_PM_EV_GRP_OFFSET 1
+
+@@ -1277,20 +1371,27 @@ static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
+ return pm_nl_get_pernet(genl_info_net(info));
+ }
+
+-static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
++static int mptcp_nl_add_subflow_or_signal_addr(struct net *net,
++ struct mptcp_addr_info *addr)
+ {
+ struct mptcp_sock *msk;
+ long s_slot = 0, s_num = 0;
+
+ while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ struct sock *sk = (struct sock *)msk;
++ struct mptcp_addr_info mpc_addr;
+
+ if (!READ_ONCE(msk->fully_established) ||
+ mptcp_pm_is_userspace(msk))
+ goto next;
+
++ /* if the endp linked to the init sf is re-added with a != ID */
++ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
++
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
++ if (mptcp_addresses_equal(addr, &mpc_addr, addr->port))
++ msk->mpc_endpoint_id = addr->id;
+ mptcp_pm_create_subflow_or_signal_addr(msk);
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+@@ -1303,6 +1404,18 @@ static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
+ return 0;
+ }
+
++static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr,
++ struct genl_info *info)
++{
++ struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
++
++ if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
++ mptcp_pm_addr_policy, info->extack) &&
++ tb[MPTCP_PM_ADDR_ATTR_ID])
++ return true;
++ return false;
++}
++
+ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ {
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+@@ -1314,8 +1427,8 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ if (ret < 0)
+ return ret;
+
+- if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
+- GENL_SET_ERR_MSG(info, "flags must have signal when using port");
++ if (addr.addr.port && !address_use_port(&addr)) {
++ GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port");
+ return -EINVAL;
+ }
+
+@@ -1344,13 +1457,14 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ goto out_free;
+ }
+ }
+- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
++ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
++ !mptcp_pm_has_addr_attr_id(attr, info));
+ if (ret < 0) {
+ GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
+ goto out_free;
+ }
+
+- mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
++ mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr);
+ return 0;
+
+ out_free:
+@@ -1365,6 +1479,10 @@ int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int
+ struct sock *sk = (struct sock *)msk;
+ struct net *net = sock_net(sk);
+
++ /* No entries with ID 0 */
++ if (id == 0)
++ return 0;
++
+ rcu_read_lock();
+ entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id);
+ if (entry) {
+@@ -1383,7 +1501,6 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+
+ entry = mptcp_pm_del_add_timer(msk, addr, false);
+ if (entry) {
+- list_del(&entry->list);
+ kfree(entry);
+ return true;
+ }
+@@ -1391,6 +1508,12 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+ return false;
+ }
+
++static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
++ const struct mptcp_addr_info *addr)
++{
++ return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
++}
++
+ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr,
+ bool force)
+@@ -1398,28 +1521,38 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ struct mptcp_rm_list list = { .nr = 0 };
+ bool ret;
+
+- list.ids[list.nr++] = addr->id;
++ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
+
+ ret = remove_anno_list_by_saddr(msk, addr);
+ if (ret || force) {
+ spin_lock_bh(&msk->pm.lock);
++ if (ret) {
++ __set_bit(addr->id, msk->pm.id_avail_bitmap);
++ msk->pm.add_addr_signaled--;
++ }
+ mptcp_pm_remove_addr(msk, &list);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+ return ret;
+ }
+
++static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id)
++{
++ /* If it was marked as used, and not ID 0, decrement local_addr_used */
++ if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) &&
++ id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0))
++ msk->pm.local_addr_used--;
++}
++
+ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ const struct mptcp_pm_addr_entry *entry)
+ {
+ const struct mptcp_addr_info *addr = &entry->addr;
+- struct mptcp_rm_list list = { .nr = 0 };
++ struct mptcp_rm_list list = { .nr = 1 };
+ long s_slot = 0, s_num = 0;
+ struct mptcp_sock *msk;
+
+- pr_debug("remove_id=%d", addr->id);
+-
+- list.ids[list.nr++] = addr->id;
++ pr_debug("remove_id=%d\n", addr->id);
+
+ while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ struct sock *sk = (struct sock *)msk;
+@@ -1437,8 +1570,22 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
+ mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
+ !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
+- if (remove_subflow)
+- mptcp_pm_remove_subflow(msk, &list);
++
++ list.ids[0] = mptcp_endp_get_local_id(msk, addr);
++ if (remove_subflow) {
++ spin_lock_bh(&msk->pm.lock);
++ mptcp_pm_nl_rm_subflow_received(msk, &list);
++ spin_unlock_bh(&msk->pm.lock);
++ }
++
++ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
++ spin_lock_bh(&msk->pm.lock);
++ __mark_subflow_endp_available(msk, list.ids[0]);
++ spin_unlock_bh(&msk->pm.lock);
++ }
++
++ if (msk->mpc_endpoint_id == entry->addr.id)
++ msk->mpc_endpoint_id = 0;
+ release_sock(sk);
+
+ next:
+@@ -1473,6 +1620,7 @@ static int mptcp_nl_remove_id_zero_address(struct net *net,
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_pm_remove_addr(msk, &list);
+ mptcp_pm_nl_rm_subflow_received(msk, &list);
++ __mark_subflow_endp_available(msk, 0);
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+
+@@ -1532,47 +1680,63 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
+ return ret;
+ }
+
++/* Called from the userspace PM only */
+ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ {
+ struct mptcp_rm_list alist = { .nr = 0 };
+ struct mptcp_pm_addr_entry *entry;
++ int anno_nr = 0;
+
+ list_for_each_entry(entry, rm_list, list) {
+- remove_anno_list_by_saddr(msk, &entry->addr);
+- if (alist.nr < MPTCP_RM_IDS_MAX)
+- alist.ids[alist.nr++] = entry->addr.id;
++ if (alist.nr >= MPTCP_RM_IDS_MAX)
++ break;
++
++ /* only delete if either announced or matching a subflow */
++ if (remove_anno_list_by_saddr(msk, &entry->addr))
++ anno_nr++;
++ else if (!lookup_subflow_by_saddr(&msk->conn_list,
++ &entry->addr))
++ continue;
++
++ alist.ids[alist.nr++] = entry->addr.id;
+ }
+
+ if (alist.nr) {
+ spin_lock_bh(&msk->pm.lock);
++ msk->pm.add_addr_signaled -= anno_nr;
+ mptcp_pm_remove_addr(msk, &alist);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+ }
+
+-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+- struct list_head *rm_list)
++/* Called from the in-kernel PM only */
++static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
++ struct list_head *rm_list)
+ {
+ struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, rm_list, list) {
+- if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
+- slist.nr < MPTCP_RM_IDS_MAX)
+- slist.ids[slist.nr++] = entry->addr.id;
++ if (slist.nr < MPTCP_RM_IDS_MAX &&
++ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
++ slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
+
+- if (remove_anno_list_by_saddr(msk, &entry->addr) &&
+- alist.nr < MPTCP_RM_IDS_MAX)
+- alist.ids[alist.nr++] = entry->addr.id;
++ if (alist.nr < MPTCP_RM_IDS_MAX &&
++ remove_anno_list_by_saddr(msk, &entry->addr))
++ alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
+ }
+
++ spin_lock_bh(&msk->pm.lock);
+ if (alist.nr) {
+- spin_lock_bh(&msk->pm.lock);
++ msk->pm.add_addr_signaled -= alist.nr;
+ mptcp_pm_remove_addr(msk, &alist);
+- spin_unlock_bh(&msk->pm.lock);
+ }
+ if (slist.nr)
+- mptcp_pm_remove_subflow(msk, &slist);
++ mptcp_pm_nl_rm_subflow_received(msk, &slist);
++ /* Reset counters: maybe some subflows have been removed before */
++ bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
++ msk->pm.local_addr_used = 0;
++ spin_unlock_bh(&msk->pm.lock);
+ }
+
+ static void mptcp_nl_remove_addrs_list(struct net *net,
+@@ -1847,10 +2011,11 @@ static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
+ {
+ struct mptcp_rm_list list = { .nr = 0 };
+
+- list.ids[list.nr++] = addr->id;
++ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
+
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_pm_nl_rm_subflow_received(msk, &list);
++ __mark_subflow_endp_available(msk, list.ids[0]);
+ mptcp_pm_create_subflow_or_signal_addr(msk);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+@@ -1999,7 +2164,7 @@ static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
+ if (WARN_ON_ONCE(!sf))
+ return -EINVAL;
+
+- if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, sf->local_id))
++ if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf)))
+ return -EMSGSIZE;
+
+ if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index d042d32beb4df0..6738bad048cece 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -26,7 +26,8 @@ void mptcp_free_local_addr_list(struct mptcp_sock *msk)
+ }
+
+ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+- struct mptcp_pm_addr_entry *entry)
++ struct mptcp_pm_addr_entry *entry,
++ bool needs_id)
+ {
+ DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ struct mptcp_pm_addr_entry *match = NULL;
+@@ -41,7 +42,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
+ addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
+- if (addr_match && entry->addr.id == 0)
++ if (addr_match && entry->addr.id == 0 && needs_id)
+ entry->addr.id = e->addr.id;
+ id_match = (e->addr.id == entry->addr.id);
+ if (addr_match && id_match) {
+@@ -64,7 +65,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ }
+
+ *e = *entry;
+- if (!e->addr.id)
++ if (!e->addr.id && needs_id)
+ e->addr.id = find_next_zero_bit(id_bitmap,
+ MPTCP_PM_MAX_ADDR_ID + 1,
+ 1);
+@@ -130,10 +131,21 @@ int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
+ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
+ struct mptcp_addr_info *skc)
+ {
+- struct mptcp_pm_addr_entry new_entry;
++ struct mptcp_pm_addr_entry *entry = NULL, *e, new_entry;
+ __be16 msk_sport = ((struct inet_sock *)
+ inet_sk((struct sock *)msk))->inet_sport;
+
++ spin_lock_bh(&msk->pm.lock);
++ list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
++ if (mptcp_addresses_equal(&e->addr, skc, false)) {
++ entry = e;
++ break;
++ }
++ }
++ spin_unlock_bh(&msk->pm.lock);
++ if (entry)
++ return entry->addr.id;
++
+ memset(&new_entry, 0, sizeof(struct mptcp_pm_addr_entry));
+ new_entry.addr = *skc;
+ new_entry.addr.id = 0;
+@@ -142,7 +154,25 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
+ if (new_entry.addr.port == msk_sport)
+ new_entry.addr.port = 0;
+
+- return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
++ return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
++}
++
++bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk,
++ struct mptcp_addr_info *skc)
++{
++ struct mptcp_pm_addr_entry *entry;
++ bool backup = false;
++
++ spin_lock_bh(&msk->pm.lock);
++ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
++ if (mptcp_addresses_equal(&entry->addr, skc, false)) {
++ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
++ break;
++ }
++ }
++ spin_unlock_bh(&msk->pm.lock);
++
++ return backup;
+ }
+
+ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+@@ -184,7 +214,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ goto announce_err;
+ }
+
+- err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val);
++ err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val, false);
+ if (err < 0) {
+ GENL_SET_ERR_MSG(info, "did not match address and id");
+ goto announce_err;
+@@ -208,6 +238,40 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ return err;
+ }
+
++static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk,
++ struct genl_info *info)
++{
++ struct mptcp_rm_list list = { .nr = 0 };
++ struct mptcp_subflow_context *subflow;
++ struct sock *sk = (struct sock *)msk;
++ bool has_id_0 = false;
++ int err = -EINVAL;
++
++ lock_sock(sk);
++ mptcp_for_each_subflow(msk, subflow) {
++ if (subflow->local_id == 0) {
++ has_id_0 = true;
++ break;
++ }
++ }
++ if (!has_id_0) {
++ GENL_SET_ERR_MSG(info, "address with id 0 not found");
++ goto remove_err;
++ }
++
++ list.ids[list.nr++] = 0;
++
++ spin_lock_bh(&msk->pm.lock);
++ mptcp_pm_remove_addr(msk, &list);
++ spin_unlock_bh(&msk->pm.lock);
++
++ err = 0;
++
++remove_err:
++ release_sock(sk);
++ return err;
++}
++
+ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+ {
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+@@ -239,6 +303,11 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+ goto remove_err;
+ }
+
++ if (id_val == 0) {
++ err = mptcp_userspace_pm_remove_id_zero_address(msk, info);
++ goto remove_err;
++ }
++
+ lock_sock((struct sock *)msk);
+
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+@@ -322,7 +391,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ }
+
+ local.addr = addr_l;
+- err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
++ err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
+ if (err < 0) {
+ GENL_SET_ERR_MSG(info, "did not match address and id");
+ goto create_err;
+@@ -436,6 +505,16 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
+ goto destroy_err;
+ }
+
++#if IS_ENABLED(CONFIG_MPTCP_IPV6)
++ if (addr_l.family == AF_INET && ipv6_addr_v4mapped(&addr_r.addr6)) {
++ ipv6_addr_set_v4mapped(addr_l.addr.s_addr, &addr_l.addr6);
++ addr_l.family = AF_INET6;
++ }
++ if (addr_r.family == AF_INET && ipv6_addr_v4mapped(&addr_l.addr6)) {
++ ipv6_addr_set_v4mapped(addr_r.addr.s_addr, &addr_r.addr6);
++ addr_r.family = AF_INET6;
++ }
++#endif
+ if (addr_l.family != addr_r.family) {
+ GENL_SET_ERR_MSG(info, "address families do not match");
+ err = -EINVAL;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 886ab689a8aea9..8cdd4ec152e7b5 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -55,28 +55,14 @@ static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
+ return READ_ONCE(msk->wnd_end);
+ }
+
+-static bool mptcp_is_tcpsk(struct sock *sk)
++static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
+ {
+- struct socket *sock = sk->sk_socket;
+-
+- if (unlikely(sk->sk_prot == &tcp_prot)) {
+- /* we are being invoked after mptcp_accept() has
+- * accepted a non-mp-capable flow: sk is a tcp_sk,
+- * not an mptcp one.
+- *
+- * Hand the socket over to tcp so all further socket ops
+- * bypass mptcp.
+- */
+- WRITE_ONCE(sock->ops, &inet_stream_ops);
+- return true;
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+- } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
+- WRITE_ONCE(sock->ops, &inet6_stream_ops);
+- return true;
++ if (sk->sk_prot == &tcpv6_prot)
++ return &inet6_stream_ops;
+ #endif
+- }
+-
+- return false;
++ WARN_ON_ONCE(sk->sk_prot != &tcp_prot);
++ return &inet_stream_ops;
+ }
+
+ static int __mptcp_socket_create(struct mptcp_sock *msk)
+@@ -99,7 +85,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
+ subflow->subflow_id = msk->subflow_id++;
+
+ /* This is the first subflow, always with id 0 */
+- subflow->local_id_valid = 1;
++ WRITE_ONCE(subflow->local_id, 0);
+ mptcp_sock_graft(msk->first, sk->sk_socket);
+ iput(SOCK_INODE(ssock));
+
+@@ -155,7 +141,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ !skb_try_coalesce(to, from, &fragstolen, &delta))
+ return false;
+
+- pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
++ pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
+ MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
+ to->len, MPTCP_SKB_CB(from)->end_seq);
+ MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+@@ -233,7 +219,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
+ end_seq = MPTCP_SKB_CB(skb)->end_seq;
+ max_seq = atomic64_read(&msk->rcv_wnd_sent);
+
+- pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
++ pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
+ RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ if (after64(end_seq, max_seq)) {
+ /* out of window */
+@@ -366,8 +352,10 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
+ skb_orphan(skb);
+
+ /* try to fetch required memory from subflow */
+- if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
++ if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ goto drop;
++ }
+
+ has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+
+@@ -445,11 +433,11 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
+
+ switch (sk->sk_state) {
+ case TCP_FIN_WAIT1:
+- inet_sk_state_store(sk, TCP_FIN_WAIT2);
++ mptcp_set_state(sk, TCP_FIN_WAIT2);
+ break;
+ case TCP_CLOSING:
+ case TCP_LAST_ACK:
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ break;
+ }
+
+@@ -610,13 +598,13 @@ static bool mptcp_check_data_fin(struct sock *sk)
+
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+- inet_sk_state_store(sk, TCP_CLOSE_WAIT);
++ mptcp_set_state(sk, TCP_CLOSE_WAIT);
+ break;
+ case TCP_FIN_WAIT1:
+- inet_sk_state_store(sk, TCP_CLOSING);
++ mptcp_set_state(sk, TCP_CLOSING);
+ break;
+ case TCP_FIN_WAIT2:
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ break;
+ default:
+ /* Other states not expected */
+@@ -632,6 +620,18 @@ static bool mptcp_check_data_fin(struct sock *sk)
+ return ret;
+ }
+
++static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
++{
++ if (READ_ONCE(msk->allow_infinite_fallback)) {
++ MPTCP_INC_STATS(sock_net(ssk),
++ MPTCP_MIB_DSSCORRUPTIONFALLBACK);
++ mptcp_do_fallback(ssk);
++ } else {
++ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
++ mptcp_subflow_reset(ssk);
++ }
++}
++
+ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
+ struct sock *ssk,
+ unsigned int *bytes)
+@@ -655,7 +655,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
+ }
+ }
+
+- pr_debug("msk=%p ssk=%p", msk, ssk);
++ pr_debug("msk=%p ssk=%p\n", msk, ssk);
+ tp = tcp_sk(ssk);
+ do {
+ u32 map_remaining, offset;
+@@ -704,10 +704,16 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
+ moved += len;
+ seq += len;
+
+- if (WARN_ON_ONCE(map_remaining < len))
+- break;
++ if (unlikely(map_remaining < len)) {
++ DEBUG_NET_WARN_ON_ONCE(1);
++ mptcp_dss_corruption(msk, ssk);
++ }
+ } else {
+- WARN_ON_ONCE(!fin);
++ if (unlikely(!fin)) {
++ DEBUG_NET_WARN_ON_ONCE(1);
++ mptcp_dss_corruption(msk, ssk);
++ }
++
+ sk_eat_skb(ssk, skb);
+ done = true;
+ }
+@@ -734,7 +740,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ u64 end_seq;
+
+ p = rb_first(&msk->out_of_order_queue);
+- pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
++ pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ while (p) {
+ skb = rb_to_skb(p);
+ if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
+@@ -756,7 +762,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+
+ /* skip overlapping data, if any */
+- pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
++ pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
+ MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
+ delta);
+ MPTCP_SKB_CB(skb)->offset += delta;
+@@ -791,7 +797,7 @@ static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
+ */
+ ssk_state = inet_sk_state_load(ssk);
+ if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
+- inet_sk_state_store(sk, ssk_state);
++ mptcp_set_state(sk, ssk_state);
+ WRITE_ONCE(sk->sk_err, -err);
+
+ /* This barrier is coupled with smp_rmb() in mptcp_poll() */
+@@ -856,16 +862,13 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+ sk_rbuf = ssk_rbuf;
+
+ /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
+- if (__mptcp_rmem(sk) > sk_rbuf) {
+- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
++ if (__mptcp_rmem(sk) > sk_rbuf)
+ return;
+- }
+
+ /* Wake-up the reader only for in-sequence data */
+ mptcp_data_lock(sk);
+- if (move_skbs_to_msk(msk, ssk))
++ if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
+ sk->sk_data_ready(sk);
+-
+ mptcp_data_unlock(sk);
+ }
+
+@@ -893,6 +896,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ mptcp_sockopt_sync_locked(msk, ssk);
+ mptcp_subflow_joined(msk, ssk);
+ mptcp_stop_tout_timer(sk);
++ __mptcp_propagate_sndbuf(sk, ssk);
+ return true;
+ }
+
+@@ -1079,15 +1083,16 @@ static void mptcp_enter_memory_pressure(struct sock *sk)
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ bool first = true;
+
+- sk_stream_moderate_sndbuf(sk);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (first)
+ tcp_enter_memory_pressure(ssk);
+ sk_stream_moderate_sndbuf(ssk);
++
+ first = false;
+ }
++ __mptcp_sync_sndbuf(sk);
+ }
+
+ /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
+@@ -1231,6 +1236,8 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
+ mptcp_do_fallback(ssk);
+ }
+
++#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
++
+ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ struct mptcp_data_frag *dfrag,
+ struct mptcp_sendmsg_info *info)
+@@ -1246,7 +1253,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ size_t copy;
+ int i;
+
+- pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
++ pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
+ msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+
+ if (WARN_ON_ONCE(info->sent > info->limit ||
+@@ -1257,6 +1264,8 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ return -EAGAIN;
+
+ /* compute send limit */
++ if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
++ ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
+ info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
+ copy = info->size_goal;
+
+@@ -1271,6 +1280,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
+ if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
+ TCP_SKB_CB(skb)->eor = 1;
++ tcp_mark_push(tcp_sk(ssk), skb);
+ goto alloc_skb;
+ }
+
+@@ -1344,7 +1354,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ mpext->use_map = 1;
+ mpext->dsn64 = 1;
+
+- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
++ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
+ mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+ mpext->dsn64);
+
+@@ -1425,13 +1435,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
+ }
+
+ mptcp_for_each_subflow(msk, subflow) {
++ bool backup = subflow->backup || subflow->request_bkup;
++
+ trace_mptcp_subflow_get_send(subflow);
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ if (!mptcp_subflow_active(subflow))
+ continue;
+
+ tout = max(tout, mptcp_timeout_from_subflow(subflow));
+- nr_active += !subflow->backup;
++ nr_active += !backup;
+ pace = subflow->avg_pacing_rate;
+ if (unlikely(!pace)) {
+ /* init pacing rate from socket */
+@@ -1442,9 +1454,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
+ }
+
+ linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
+- if (linger_time < send_info[subflow->backup].linger_time) {
+- send_info[subflow->backup].ssk = ssk;
+- send_info[subflow->backup].linger_time = linger_time;
++ if (linger_time < send_info[backup].linger_time) {
++ send_info[backup].ssk = ssk;
++ send_info[backup].linger_time = linger_time;
+ }
+ }
+ __mptcp_set_timeout(sk, tout);
+@@ -1516,8 +1528,11 @@ static void mptcp_update_post_push(struct mptcp_sock *msk,
+
+ void mptcp_check_and_set_pending(struct sock *sk)
+ {
+- if (mptcp_send_head(sk))
+- mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING);
++ if (mptcp_send_head(sk)) {
++ mptcp_data_lock(sk);
++ mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING);
++ mptcp_data_unlock(sk);
++ }
+ }
+
+ static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
+@@ -1858,7 +1873,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ if (!msk->first_pending)
+ WRITE_ONCE(msk->first_pending, dfrag);
+ }
+- pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
++ pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
+ dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
+ !dfrag_collapsed);
+
+@@ -1922,6 +1937,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ if (!(flags & MSG_PEEK)) {
+ MPTCP_SKB_CB(skb)->offset += count;
+ MPTCP_SKB_CB(skb)->map_seq += count;
++ msk->bytes_consumed += count;
+ }
+ break;
+ }
+@@ -1932,6 +1948,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
+ __skb_unlink(skb, &msk->receive_queue);
+ __kfree_skb(skb);
++ msk->bytes_consumed += count;
+ }
+
+ if (copied >= len)
+@@ -1958,6 +1975,9 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
+ if (copied <= 0)
+ return;
+
++ if (!msk->rcvspace_init)
++ mptcp_rcv_space_init(msk, msk->first);
++
+ msk->rcvq_space.copied += copied;
+
+ mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
+@@ -2024,7 +2044,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ slow = lock_sock_fast(ssk);
+ WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
+- tcp_sk(ssk)->window_clamp = window_clamp;
++ WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp);
+ tcp_cleanup_rbuf(ssk, 1);
+ unlock_sock_fast(ssk, slow);
+ }
+@@ -2209,7 +2229,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ }
+ }
+
+- pr_debug("block timeout %ld", timeo);
++ pr_debug("block timeout %ld\n", timeo);
+ sk_wait_data(sk, &timeo, NULL);
+ }
+
+@@ -2225,7 +2245,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ }
+ }
+
+- pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
++ pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
+ msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+ skb_queue_empty(&msk->receive_queue), copied);
+ if (!(flags & MSG_PEEK))
+@@ -2287,7 +2307,7 @@ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
+ continue;
+ }
+
+- if (subflow->backup) {
++ if (subflow->backup || subflow->request_bkup) {
+ if (!backup)
+ backup = ssk;
+ continue;
+@@ -2312,9 +2332,6 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
+ if (__mptcp_check_fallback(msk))
+ return false;
+
+- if (tcp_rtx_and_write_queues_empty(sk))
+- return false;
+-
+ /* the closing socket has some data untransmitted and/or unacked:
+ * some data in the mptcp rtx queue has not really xmitted yet.
+ * keep it simple and re-inject the whole mptcp level rtx queue
+@@ -2448,6 +2465,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ WRITE_ONCE(msk->first, NULL);
+
+ out:
++ __mptcp_sync_sndbuf(sk);
+ if (need_push)
+ __mptcp_push_pending(sk, 0);
+
+@@ -2460,7 +2478,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ inet_sk_state_load(msk->first) == TCP_CLOSE) {
+ if (sk->sk_state != TCP_ESTABLISHED ||
+ msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ mptcp_close_wake_up(sk);
+ } else {
+ mptcp_start_tout_timer(sk);
+@@ -2471,6 +2489,12 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ struct mptcp_subflow_context *subflow)
+ {
++ /* The first subflow can already be closed and still in the list */
++ if (subflow->close_event_done)
++ return;
++
++ subflow->close_event_done = true;
++
+ if (sk->sk_state == TCP_ESTABLISHED)
+ mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
+
+@@ -2496,8 +2520,11 @@ static void __mptcp_close_subflow(struct sock *sk)
+
+ mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ int ssk_state = inet_sk_state_load(ssk);
+
+- if (inet_sk_state_load(ssk) != TCP_CLOSE)
++ if (ssk_state != TCP_CLOSE &&
++ (ssk_state != TCP_CLOSE_WAIT ||
++ inet_sk_state_load(sk) != TCP_ESTABLISHED))
+ continue;
+
+ /* 'subflow_data_ready' will re-sched once rx queue is empty */
+@@ -2555,7 +2582,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
+ WRITE_ONCE(sk->sk_err, ECONNRESET);
+ }
+
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
+ set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
+@@ -2677,7 +2704,7 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
+ if (!ssk)
+ return;
+
+- pr_debug("MP_FAIL doesn't respond, reset the subflow");
++ pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
+
+ slow = lock_sock_fast(ssk);
+ mptcp_subflow_reset(ssk);
+@@ -2690,7 +2717,7 @@ static void mptcp_do_fastclose(struct sock *sk)
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ mptcp_for_each_subflow_safe(msk, subflow, tmp)
+ __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
+ subflow, MPTCP_CF_FASTCLOSE);
+@@ -2755,6 +2782,7 @@ static void __mptcp_init_sock(struct sock *sk)
+ msk->rmem_fwd_alloc = 0;
+ WRITE_ONCE(msk->rmem_released, 0);
+ msk->timer_ival = TCP_RTO_MIN;
++ msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
+
+ WRITE_ONCE(msk->first, NULL);
+ inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
+@@ -2846,7 +2874,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ break;
+ default:
+ if (__mptcp_check_fallback(mptcp_sk(sk))) {
+- pr_debug("Fallback");
++ pr_debug("Fallback\n");
+ ssk->sk_shutdown |= how;
+ tcp_shutdown(ssk, how);
+
+@@ -2856,7 +2884,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
+ mptcp_schedule_work(sk);
+ } else {
+- pr_debug("Sending DATA_FIN on subflow %p", ssk);
++ pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
+ tcp_send_ack(ssk);
+ if (!mptcp_rtx_timer_pending(sk))
+ mptcp_reset_rtx_timer(sk);
+@@ -2867,6 +2895,29 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ release_sock(ssk);
+ }
+
++void mptcp_set_state(struct sock *sk, int state)
++{
++ int oldstate = sk->sk_state;
++
++ switch (state) {
++ case TCP_ESTABLISHED:
++ if (oldstate != TCP_ESTABLISHED)
++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
++ break;
++ case TCP_CLOSE_WAIT:
++ /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
++ * MPTCP "accepted" sockets will be created later on. So no
++ * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
++ */
++ break;
++ default:
++ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
++ MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
++ }
++
++ inet_sk_state_store(sk, state);
++}
++
+ static const unsigned char new_state[16] = {
+ /* current state: new state: action: */
+ [0 /* (Invalid) */] = TCP_CLOSE,
+@@ -2889,7 +2940,7 @@ static int mptcp_close_state(struct sock *sk)
+ int next = (int)new_state[sk->sk_state];
+ int ns = next & TCP_STATE_MASK;
+
+- inet_sk_state_store(sk, ns);
++ mptcp_set_state(sk, ns);
+
+ return next & TCP_ACTION_FIN;
+ }
+@@ -2899,7 +2950,7 @@ static void mptcp_check_send_data_fin(struct sock *sk)
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+- pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
++ pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
+ msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
+ msk->snd_nxt, msk->write_seq);
+
+@@ -2923,7 +2974,7 @@ static void __mptcp_wr_shutdown(struct sock *sk)
+ {
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+- pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
++ pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
+ msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
+ !!mptcp_send_head(sk));
+
+@@ -2938,7 +2989,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ {
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ might_sleep();
+
+@@ -2964,16 +3015,9 @@ void __mptcp_unaccepted_force_close(struct sock *sk)
+ __mptcp_destroy_sock(sk);
+ }
+
+-static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
++static __poll_t mptcp_check_readable(struct sock *sk)
+ {
+- /* Concurrent splices from sk_receive_queue into receive_queue will
+- * always show at least one non-empty queue when checked in this order.
+- */
+- if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
+- skb_queue_empty_lockless(&msk->receive_queue))
+- return 0;
+-
+- return EPOLLIN | EPOLLRDNORM;
++ return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0;
+ }
+
+ static void mptcp_check_listen_stop(struct sock *sk)
+@@ -3007,11 +3051,11 @@ bool __mptcp_close(struct sock *sk, long timeout)
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
+ mptcp_check_listen_stop(sk);
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ goto cleanup;
+ }
+
+- if (mptcp_check_readable(msk) || timeout < 0) {
++ if (mptcp_data_avail(msk) || timeout < 0) {
+ /* If the msk has read data, or the caller explicitly ask it,
+ * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
+ */
+@@ -3050,10 +3094,10 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ * state, let's not keep resources busy for no reasons
+ */
+ if (subflows_alive == 0)
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+
+ sock_hold(sk);
+- pr_debug("msk=%p state=%d", sk, sk->sk_state);
++ pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
+ if (msk->token)
+ mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
+
+@@ -3116,7 +3160,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ return -EBUSY;
+
+ mptcp_check_listen_stop(sk);
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+
+ mptcp_stop_rtx_timer(sk);
+ mptcp_stop_tout_timer(sk);
+@@ -3130,7 +3174,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
+ WRITE_ONCE(msk->flags, 0);
+ msk->cb_flags = 0;
+- msk->push_pending = 0;
+ msk->recovery = false;
+ msk->can_ack = false;
+ msk->fully_established = false;
+@@ -3138,6 +3181,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ msk->snd_data_fin_enable = false;
+ msk->rcv_fastclose = false;
+ msk->use_64bit_ack = false;
++ msk->bytes_consumed = 0;
+ WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
+ mptcp_pm_data_reset(msk);
+ mptcp_ca_reset(sk);
+@@ -3145,6 +3189,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ msk->bytes_received = 0;
+ msk->bytes_sent = 0;
+ msk->bytes_retrans = 0;
++ msk->rcvspace_init = 0;
+
+ WRITE_ONCE(sk->sk_shutdown, 0);
+ sk_error_report(sk);
+@@ -3158,8 +3203,50 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
+
+ return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
+ }
++
++static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
++{
++ const struct ipv6_pinfo *np = inet6_sk(sk);
++ struct ipv6_txoptions *opt;
++ struct ipv6_pinfo *newnp;
++
++ newnp = inet6_sk(newsk);
++
++ rcu_read_lock();
++ opt = rcu_dereference(np->opt);
++ if (opt) {
++ opt = ipv6_dup_options(newsk, opt);
++ if (!opt)
++ net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
++ }
++ RCU_INIT_POINTER(newnp->opt, opt);
++ rcu_read_unlock();
++}
+ #endif
+
++static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
++{
++ struct ip_options_rcu *inet_opt, *newopt = NULL;
++ const struct inet_sock *inet = inet_sk(sk);
++ struct inet_sock *newinet;
++
++ newinet = inet_sk(newsk);
++
++ rcu_read_lock();
++ inet_opt = rcu_dereference(inet->inet_opt);
++ if (inet_opt) {
++ newopt = sock_kmalloc(newsk, sizeof(*inet_opt) +
++ inet_opt->opt.optlen, GFP_ATOMIC);
++ if (newopt)
++ memcpy(newopt, inet_opt, sizeof(*inet_opt) +
++ inet_opt->opt.optlen);
++ else
++ net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
++ }
++ RCU_INIT_POINTER(newinet->inet_opt, newopt);
++ rcu_read_unlock();
++}
++
+ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ const struct mptcp_options_received *mp_opt,
+ struct sock *ssk,
+@@ -3167,6 +3254,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ {
+ struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
++ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk;
+
+ if (!nsk)
+@@ -3179,6 +3267,13 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+
+ __mptcp_init_sock(nsk);
+
++#if IS_ENABLED(CONFIG_MPTCP_IPV6)
++ if (nsk->sk_family == AF_INET6)
++ mptcp_copy_ip6_options(nsk, sk);
++ else
++#endif
++ mptcp_copy_ip_options(nsk, sk);
++
+ msk = mptcp_sk(nsk);
+ msk->local_key = subflow_req->local_key;
+ msk->token = subflow_req->token;
+@@ -3190,7 +3285,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ msk->write_seq = subflow_req->idsn + 1;
+ msk->snd_nxt = msk->write_seq;
+ msk->snd_una = msk->write_seq;
+- msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
++ msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd;
+ msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
+ mptcp_init_sched(msk, mptcp_sk(sk)->sched);
+
+@@ -3203,11 +3298,12 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ /* this can't race with mptcp_close(), as the msk is
+ * not yet exposted to user-space
+ */
+- inet_sk_state_store(nsk, TCP_ESTABLISHED);
++ mptcp_set_state(nsk, TCP_ESTABLISHED);
+
+ /* The msk maintain a ref to each subflow in the connections list */
+ WRITE_ONCE(msk->first, ssk);
+- list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list);
++ subflow = mptcp_subflow_ctx(ssk);
++ list_add(&subflow->node, &msk->conn_list);
+ sock_hold(ssk);
+
+ /* new mpc subflow takes ownership of the newly
+@@ -3219,9 +3315,12 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ * uses the correct data
+ */
+ mptcp_copy_inaddrs(nsk, ssk);
+- mptcp_propagate_sndbuf(nsk, ssk);
++ __mptcp_propagate_sndbuf(nsk, ssk);
+
+ mptcp_rcv_space_init(msk, ssk);
++
++ if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
++ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
+ bh_unlock_sock(nsk);
+
+ /* note: the newly allocated socket refcount is 2 now */
+@@ -3232,6 +3331,7 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
+ {
+ const struct tcp_sock *tp = tcp_sk(ssk);
+
++ msk->rcvspace_init = 1;
+ msk->rcvq_space.copied = 0;
+ msk->rcvq_space.rtt_us = 0;
+
+@@ -3242,46 +3342,6 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
+ TCP_INIT_CWND * tp->advmss);
+ if (msk->rcvq_space.space == 0)
+ msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
+-
+- WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
+-}
+-
+-static struct sock *mptcp_accept(struct sock *ssk, int flags, int *err,
+- bool kern)
+-{
+- struct sock *newsk;
+-
+- pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk));
+- newsk = inet_csk_accept(ssk, flags, err, kern);
+- if (!newsk)
+- return NULL;
+-
+- pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk));
+- if (sk_is_mptcp(newsk)) {
+- struct mptcp_subflow_context *subflow;
+- struct sock *new_mptcp_sock;
+-
+- subflow = mptcp_subflow_ctx(newsk);
+- new_mptcp_sock = subflow->conn;
+-
+- /* is_mptcp should be false if subflow->conn is missing, see
+- * subflow_syn_recv_sock()
+- */
+- if (WARN_ON_ONCE(!new_mptcp_sock)) {
+- tcp_sk(newsk)->is_mptcp = 0;
+- goto out;
+- }
+-
+- newsk = new_mptcp_sock;
+- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
+- } else {
+- MPTCP_INC_STATS(sock_net(ssk),
+- MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
+- }
+-
+-out:
+- newsk->sk_kern_sock = kern;
+- return newsk;
+ }
+
+ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
+@@ -3355,8 +3415,7 @@ static void mptcp_release_cb(struct sock *sk)
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ for (;;) {
+- unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) |
+- msk->push_pending;
++ unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
+ struct list_head join_list;
+
+ if (!flags)
+@@ -3372,7 +3431,6 @@ static void mptcp_release_cb(struct sock *sk)
+ * datapath acquires the msk socket spinlock while helding
+ * the subflow socket lock
+ */
+- msk->push_pending = 0;
+ msk->cb_flags &= ~flags;
+ spin_unlock_bh(&sk->sk_lock.slock);
+
+@@ -3390,13 +3448,16 @@ static void mptcp_release_cb(struct sock *sk)
+ if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
+ __mptcp_clean_una_wakeup(sk);
+ if (unlikely(msk->cb_flags)) {
+- /* be sure to set the current sk state before tacking actions
+- * depending on sk_state, that is processing MPTCP_ERROR_REPORT
++ /* be sure to sync the msk state before taking actions
++ * depending on sk_state (MPTCP_ERROR_REPORT)
++ * On sk release avoid actions depending on the first subflow
+ */
+- if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags))
+- __mptcp_set_connected(sk);
++ if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first)
++ __mptcp_sync_state(sk, msk->pending_state);
+ if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
+ __mptcp_error_report(sk);
++ if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
++ __mptcp_sync_sndbuf(sk);
+ }
+
+ __mptcp_update_rmem(sk);
+@@ -3441,6 +3502,14 @@ void mptcp_subflow_process_delegated(struct sock *ssk, long status)
+ __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
+ mptcp_data_unlock(sk);
+ }
++ if (status & BIT(MPTCP_DELEGATE_SNDBUF)) {
++ mptcp_data_lock(sk);
++ if (!sock_owned_by_user(sk))
++ __mptcp_sync_sndbuf(sk);
++ else
++ __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags);
++ mptcp_data_unlock(sk);
++ }
+ if (status & BIT(MPTCP_DELEGATE_ACK))
+ schedule_3rdack_retransmission(ssk);
+ }
+@@ -3463,7 +3532,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)
+ {
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+- pr_debug("msk=%p, ssk=%p", msk, msk->first);
++ pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
+ if (WARN_ON_ONCE(!msk->first))
+ return -EINVAL;
+
+@@ -3480,7 +3549,7 @@ void mptcp_finish_connect(struct sock *ssk)
+ sk = subflow->conn;
+ msk = mptcp_sk(sk);
+
+- pr_debug("msk=%p, token=%u", sk, subflow->token);
++ pr_debug("msk=%p, token=%u\n", sk, subflow->token);
+
+ subflow->map_seq = subflow->iasn;
+ subflow->map_subflow_seq = 1;
+@@ -3489,13 +3558,8 @@ void mptcp_finish_connect(struct sock *ssk)
+ * accessing the field below
+ */
+ WRITE_ONCE(msk->local_key, subflow->local_key);
+- WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
+- WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+- WRITE_ONCE(msk->snd_una, msk->write_seq);
+
+ mptcp_pm_new_connection(msk, ssk, 0);
+-
+- mptcp_rcv_space_init(msk, ssk);
+ }
+
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent)
+@@ -3514,7 +3578,7 @@ bool mptcp_finish_join(struct sock *ssk)
+ struct sock *parent = (void *)msk;
+ bool ret = true;
+
+- pr_debug("msk=%p, subflow=%p", msk, subflow);
++ pr_debug("msk=%p, subflow=%p\n", msk, subflow);
+
+ /* mptcp socket already closing? */
+ if (!mptcp_is_fully_established(parent)) {
+@@ -3525,6 +3589,7 @@ bool mptcp_finish_join(struct sock *ssk)
+ /* active subflow, already present inside the conn_list */
+ if (!list_empty(&subflow->node)) {
+ mptcp_subflow_joined(msk, ssk);
++ mptcp_propagate_sndbuf(parent, ssk);
+ return true;
+ }
+
+@@ -3559,7 +3624,7 @@ bool mptcp_finish_join(struct sock *ssk)
+
+ static void mptcp_shutdown(struct sock *sk, int how)
+ {
+- pr_debug("sk=%p, how=%d", sk, how);
++ pr_debug("sk=%p, how=%d\n", sk, how);
+
+ if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+ __mptcp_wr_shutdown(sk);
+@@ -3650,7 +3715,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(ssk))
+ return PTR_ERR(ssk);
+
+- inet_sk_state_store(sk, TCP_SYN_SENT);
++ mptcp_set_state(sk, TCP_SYN_SENT);
+ subflow = mptcp_subflow_ctx(ssk);
+ #ifdef CONFIG_TCP_MD5SIG
+ /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+@@ -3663,6 +3728,10 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
+ mptcp_subflow_early_fallback(msk, subflow);
+ }
++
++ WRITE_ONCE(msk->write_seq, subflow->idsn);
++ WRITE_ONCE(msk->snd_nxt, subflow->idsn);
++ WRITE_ONCE(msk->snd_una, subflow->idsn);
+ if (likely(!__mptcp_check_fallback(msk)))
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
+
+@@ -3700,7 +3769,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (unlikely(err)) {
+ /* avoid leaving a dangling token in an unconnected socket */
+ mptcp_token_destroy(msk);
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ return err;
+ }
+
+@@ -3715,7 +3784,6 @@ static struct proto mptcp_prot = {
+ .connect = mptcp_connect,
+ .disconnect = mptcp_disconnect,
+ .close = mptcp_close,
+- .accept = mptcp_accept,
+ .setsockopt = mptcp_setsockopt,
+ .getsockopt = mptcp_getsockopt,
+ .shutdown = mptcp_shutdown,
+@@ -3776,7 +3844,7 @@ static int mptcp_listen(struct socket *sock, int backlog)
+ struct sock *ssk;
+ int err;
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ lock_sock(sk);
+
+@@ -3790,13 +3858,13 @@ static int mptcp_listen(struct socket *sock, int backlog)
+ goto unlock;
+ }
+
+- inet_sk_state_store(sk, TCP_LISTEN);
++ mptcp_set_state(sk, TCP_LISTEN);
+ sock_set_flag(sk, SOCK_RCU_FREE);
+
+ lock_sock(ssk);
+ err = __inet_listen_sk(ssk, backlog);
+ release_sock(ssk);
+- inet_sk_state_store(sk, inet_sk_state_load(ssk));
++ mptcp_set_state(sk, inet_sk_state_load(ssk));
+
+ if (!err) {
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+@@ -3816,7 +3884,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ struct sock *ssk, *newsk;
+ int err;
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ /* Buggy applications can call accept on socket states other then LISTEN
+ * but no need to allocate the first subflow just to error out.
+@@ -3825,18 +3893,36 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ if (!ssk)
+ return -EINVAL;
+
+- newsk = mptcp_accept(ssk, flags, &err, kern);
++ pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
++ newsk = inet_csk_accept(ssk, flags, &err, kern);
+ if (!newsk)
+ return err;
+
+- lock_sock(newsk);
+-
+- __inet_accept(sock, newsock, newsk);
+- if (!mptcp_is_tcpsk(newsock->sk)) {
+- struct mptcp_sock *msk = mptcp_sk(newsk);
++ pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
++ if (sk_is_mptcp(newsk)) {
+ struct mptcp_subflow_context *subflow;
++ struct sock *new_mptcp_sock;
++
++ subflow = mptcp_subflow_ctx(newsk);
++ new_mptcp_sock = subflow->conn;
++
++ /* is_mptcp should be false if subflow->conn is missing, see
++ * subflow_syn_recv_sock()
++ */
++ if (WARN_ON_ONCE(!new_mptcp_sock)) {
++ tcp_sk(newsk)->is_mptcp = 0;
++ goto tcpfallback;
++ }
++
++ newsk = new_mptcp_sock;
++ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
++
++ newsk->sk_kern_sock = kern;
++ lock_sock(newsk);
++ __inet_accept(sock, newsock, newsk);
+
+ set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
++ msk = mptcp_sk(newsk);
+ msk->in_accept_queue = 0;
+
+ /* set ssk->sk_socket of accept()ed flows to mptcp socket.
+@@ -3856,8 +3942,21 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ __mptcp_close_ssk(newsk, msk->first,
+ mptcp_subflow_ctx(msk->first), 0);
+ if (unlikely(list_is_singular(&msk->conn_list)))
+- inet_sk_state_store(newsk, TCP_CLOSE);
++ mptcp_set_state(newsk, TCP_CLOSE);
+ }
++ } else {
++tcpfallback:
++ newsk->sk_kern_sock = kern;
++ lock_sock(newsk);
++ __inet_accept(sock, newsock, newsk);
++ /* we are being invoked after accepting a non-mp-capable
++ * flow: sk is a tcp_sk, not an mptcp one.
++ *
++ * Hand the socket over to tcp so all further socket ops
++ * bypass mptcp.
++ */
++ WRITE_ONCE(newsock->sk->sk_socket->ops,
++ mptcp_fallback_tcp_ops(newsock->sk));
+ }
+ release_sock(newsk);
+
+@@ -3892,7 +3991,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ sock_poll_wait(file, sock, wait);
+
+ state = inet_sk_state_load(sk);
+- pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
++ pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
+ if (state == TCP_LISTEN) {
+ struct sock *ssk = READ_ONCE(msk->first);
+
+@@ -3909,7 +4008,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+
+ if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
+- mask |= mptcp_check_readable(msk);
++ mask |= mptcp_check_readable(sk);
+ if (shutdown & SEND_SHUTDOWN)
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ else
+@@ -3947,6 +4046,7 @@ static const struct proto_ops mptcp_stream_ops = {
+ .sendmsg = inet_sendmsg,
+ .recvmsg = inet_recvmsg,
+ .mmap = sock_no_mmap,
++ .set_rcvlowat = mptcp_set_rcvlowat,
+ };
+
+ static struct inet_protosw mptcp_protosw = {
+@@ -4048,6 +4148,7 @@ static const struct proto_ops mptcp_v6_stream_ops = {
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = inet6_compat_ioctl,
+ #endif
++ .set_rcvlowat = mptcp_set_rcvlowat,
+ };
+
+ static struct proto mptcp_v6_prot;
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 3612545fa62e04..89d1c299ff2b9f 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -122,7 +122,8 @@
+ #define MPTCP_ERROR_REPORT 3
+ #define MPTCP_RETRANSMIT 4
+ #define MPTCP_FLUSH_JOIN_LIST 5
+-#define MPTCP_CONNECTED 6
++#define MPTCP_SYNC_STATE 6
++#define MPTCP_SYNC_SNDBUF 7
+
+ struct mptcp_skb_cb {
+ u64 map_seq;
+@@ -267,6 +268,7 @@ struct mptcp_sock {
+ atomic64_t rcv_wnd_sent;
+ u64 rcv_data_fin_seq;
+ u64 bytes_retrans;
++ u64 bytes_consumed;
+ int rmem_fwd_alloc;
+ int snd_burst;
+ int old_wspace;
+@@ -282,7 +284,6 @@ struct mptcp_sock {
+ int rmem_released;
+ unsigned long flags;
+ unsigned long cb_flags;
+- unsigned long push_pending;
+ bool recovery; /* closing subflow write queue reinjected */
+ bool can_ack;
+ bool fully_established;
+@@ -292,13 +293,20 @@ struct mptcp_sock {
+ bool use_64bit_ack; /* Set when we received a 64-bit DSN */
+ bool csum_enabled;
+ bool allow_infinite_fallback;
++ u8 pending_state; /* A subflow asked to set this sk_state,
++ * protected by the msk data lock
++ */
+ u8 mpc_endpoint_id;
+ u8 recvmsg_inq:1,
+ cork:1,
+ nodelay:1,
+ fastopening:1,
+ in_accept_queue:1,
+- free_first:1;
++ free_first:1,
++ rcvspace_init:1;
++ int keepalive_cnt;
++ int keepalive_idle;
++ int keepalive_intvl;
+ struct work_struct work;
+ struct sk_buff *ooo_last_skb;
+ struct rb_root out_of_order_queue;
+@@ -411,6 +419,7 @@ struct mptcp_subflow_request_sock {
+ u16 mp_capable : 1,
+ mp_join : 1,
+ backup : 1,
++ request_bkup : 1,
+ csum_reqd : 1,
+ allow_join_id0 : 1;
+ u8 local_id;
+@@ -447,6 +456,7 @@ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
+ #define MPTCP_DELEGATE_SCHEDULED 0
+ #define MPTCP_DELEGATE_SEND 1
+ #define MPTCP_DELEGATE_ACK 2
++#define MPTCP_DELEGATE_SNDBUF 3
+
+ #define MPTCP_DELEGATE_ACTIONS_MASK (~BIT(MPTCP_DELEGATE_SCHEDULED))
+ /* MPTCP subflow context */
+@@ -488,12 +498,13 @@ struct mptcp_subflow_context {
+ remote_key_valid : 1, /* received the peer key from */
+ disposable : 1, /* ctx can be free at ulp release time */
+ stale : 1, /* unable to snd/rcv data, do not use for xmit */
+- local_id_valid : 1, /* local_id is correctly initialized */
+ valid_csum_seen : 1, /* at least one csum validated */
+ is_mptfo : 1, /* subflow is doing TFO */
++ close_event_done : 1, /* has done the post-closed part */
+ __unused : 9;
+ enum mptcp_data_avail data_avail;
+ bool scheduled;
++ bool pm_listener; /* a listener managed by the kernel PM? */
+ u32 remote_nonce;
+ u64 thmac;
+ u32 local_nonce;
+@@ -502,7 +513,7 @@ struct mptcp_subflow_context {
+ u8 hmac[MPTCPOPT_HMAC_LEN]; /* MPJ subflow only */
+ u64 iasn; /* initial ack sequence number, MPC subflows only */
+ };
+- u8 local_id;
++ s16 local_id; /* if negative not initialized yet */
+ u8 remote_id;
+ u8 reset_seen:1;
+ u8 reset_transient:1;
+@@ -520,6 +531,9 @@ struct mptcp_subflow_context {
+
+ u32 setsockopt_seq;
+ u32 stale_rcv_tstamp;
++ int cached_sndbuf; /* sndbuf size when last synced with the msk sndbuf,
++ * protected by the msk socket lock
++ */
+
+ struct sock *tcp_sock; /* tcp sk backpointer */
+ struct sock *conn; /* parent mptcp_sock */
+@@ -550,6 +564,7 @@ mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow)
+ {
+ memset(&subflow->reset, 0, sizeof(subflow->reset));
+ subflow->request_mptcp = 1;
++ WRITE_ONCE(subflow->local_id, -1);
+ }
+
+ static inline u64
+@@ -615,8 +630,9 @@ int mptcp_allow_join_id0(const struct net *net);
+ unsigned int mptcp_stale_loss_cnt(const struct net *net);
+ int mptcp_get_pm_type(const struct net *net);
+ const char *mptcp_get_scheduler(const struct net *net);
+-void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+- const struct mptcp_options_received *mp_opt);
++void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
++ struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt);
+ bool __mptcp_retransmit_pending_data(struct sock *sk);
+ void mptcp_check_and_set_pending(struct sock *sk);
+ void __mptcp_push_pending(struct sock *sk, unsigned int flags);
+@@ -634,6 +650,7 @@ bool __mptcp_close(struct sock *sk, long timeout);
+ void mptcp_cancel_work(struct sock *sk);
+ void __mptcp_unaccepted_force_close(struct sock *sk);
+ void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk);
++void mptcp_set_state(struct sock *sk, int state);
+
+ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+ const struct mptcp_addr_info *b, bool use_port);
+@@ -661,6 +678,24 @@ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk);
+ int mptcp_sched_get_send(struct mptcp_sock *msk);
+ int mptcp_sched_get_retrans(struct mptcp_sock *msk);
+
++static inline u64 mptcp_data_avail(const struct mptcp_sock *msk)
++{
++ return READ_ONCE(msk->bytes_received) - READ_ONCE(msk->bytes_consumed);
++}
++
++static inline bool mptcp_epollin_ready(const struct sock *sk)
++{
++ /* mptcp doesn't have to deal with small skbs in the receive queue,
++ * at it can always coalesce them
++ */
++ return (mptcp_data_avail(mptcp_sk(sk)) >= sk->sk_rcvlowat) ||
++ (mem_cgroup_sockets_enabled && sk->sk_memcg &&
++ mem_cgroup_under_socket_pressure(sk->sk_memcg)) ||
++ READ_ONCE(tcp_memory_pressure);
++}
++
++int mptcp_set_rcvlowat(struct sock *sk, int val);
++
+ static inline bool __tcp_can_send(const struct sock *ssk)
+ {
+ /* only send if our side has not closed yet */
+@@ -706,7 +741,7 @@ void mptcp_get_options(const struct sk_buff *skb,
+ struct mptcp_options_received *mp_opt);
+
+ void mptcp_finish_connect(struct sock *sk);
+-void __mptcp_set_connected(struct sock *sk);
++void __mptcp_sync_state(struct sock *sk, int state);
+ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout);
+
+ static inline void mptcp_stop_tout_timer(struct sock *sk)
+@@ -735,6 +770,7 @@ static inline bool mptcp_is_fully_established(struct sock *sk)
+ return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
+ READ_ONCE(mptcp_sk(sk)->fully_established);
+ }
++
+ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk);
+ void mptcp_data_ready(struct sock *sk, struct sock *ssk);
+ bool mptcp_finish_join(struct sock *sk);
+@@ -762,15 +798,6 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
+ READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
+ }
+
+-static inline bool mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
+-{
+- if ((sk->sk_userlocks & SOCK_SNDBUF_LOCK) || ssk->sk_sndbuf <= READ_ONCE(sk->sk_sndbuf))
+- return false;
+-
+- WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf);
+- return true;
+-}
+-
+ static inline void mptcp_write_space(struct sock *sk)
+ {
+ if (sk_stream_is_writeable(sk)) {
+@@ -781,6 +808,55 @@ static inline void mptcp_write_space(struct sock *sk)
+ }
+ }
+
++static inline void __mptcp_sync_sndbuf(struct sock *sk)
++{
++ struct mptcp_subflow_context *subflow;
++ int ssk_sndbuf, new_sndbuf;
++
++ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
++ return;
++
++ new_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[0];
++ mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
++ ssk_sndbuf = READ_ONCE(mptcp_subflow_tcp_sock(subflow)->sk_sndbuf);
++
++ subflow->cached_sndbuf = ssk_sndbuf;
++ new_sndbuf += ssk_sndbuf;
++ }
++
++ /* the msk max wmem limit is <nr_subflows> * tcp wmem[2] */
++ WRITE_ONCE(sk->sk_sndbuf, new_sndbuf);
++ mptcp_write_space(sk);
++}
++
++/* The called held both the msk socket and the subflow socket locks,
++ * possibly under BH
++ */
++static inline void __mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
++{
++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++
++ if (READ_ONCE(ssk->sk_sndbuf) != subflow->cached_sndbuf)
++ __mptcp_sync_sndbuf(sk);
++}
++
++/* the caller held only the subflow socket lock, either in process or
++ * BH context. Additionally this can be called under the msk data lock,
++ * so we can't acquire such lock here: let the delegate action acquires
++ * the needed locks in suitable order.
++ */
++static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
++{
++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++
++ if (likely(READ_ONCE(ssk->sk_sndbuf) == subflow->cached_sndbuf))
++ return;
++
++ local_bh_disable();
++ mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_SNDBUF);
++ local_bh_enable();
++}
++
+ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
+
+ #define MPTCP_TOKEN_MAX_RETRIES 4
+@@ -833,6 +909,8 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr);
+ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++ const struct mptcp_addr_info *remote);
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
+ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+ const struct mptcp_rm_list *rm_list);
+@@ -871,10 +949,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr,
+ bool echo);
+ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
+-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+- struct list_head *rm_list);
+
+ void mptcp_free_local_addr_list(struct mptcp_sock *msk);
+ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info);
+@@ -890,8 +965,8 @@ void mptcp_event_pm_listener(const struct sock *ssk,
+ enum mptcp_event_type event);
+ bool mptcp_userspace_pm_active(const struct mptcp_sock *msk);
+
+-void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+- const struct mptcp_options_received *mp_opt);
++void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt);
+ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
+ struct request_sock *req);
+
+@@ -958,11 +1033,21 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
++bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
++bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
++bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
++
++static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
++{
++ int local_id = READ_ONCE(subflow->local_id);
++
++ if (local_id < 0)
++ return 0;
++ return local_id;
++}
+
+ void __init mptcp_pm_nl_init(void);
+ void mptcp_pm_nl_work(struct mptcp_sock *msk);
+-void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
+- const struct mptcp_rm_list *rm_list);
+ unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
+ unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
+ unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
+@@ -1008,7 +1093,7 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
+ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
+ {
+ if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) {
+- pr_debug("TCP fallback already done (msk=%p)", msk);
++ pr_debug("TCP fallback already done (msk=%p)\n", msk);
+ return;
+ }
+ set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+@@ -1035,7 +1120,7 @@ static inline void mptcp_do_fallback(struct sock *ssk)
+ }
+ }
+
+-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
++#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+
+ static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
+ {
+@@ -1057,7 +1142,8 @@ static inline bool subflow_simultaneous_connect(struct sock *sk)
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+
+- return sk->sk_state == TCP_ESTABLISHED &&
++ return (1 << sk->sk_state) &
++ (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING) &&
+ is_active_ssk(subflow) &&
+ !subflow->conn_finished;
+ }
+diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
+index 4ab0693c069c0f..907986f5f04277 100644
+--- a/net/mptcp/sched.c
++++ b/net/mptcp/sched.c
+@@ -64,7 +64,7 @@ int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
+ list_add_tail_rcu(&sched->list, &mptcp_sched_list);
+ spin_unlock(&mptcp_sched_list_lock);
+
+- pr_debug("%s registered", sched->name);
++ pr_debug("%s registered\n", sched->name);
+ return 0;
+ }
+
+@@ -96,7 +96,7 @@ int mptcp_init_sched(struct mptcp_sock *msk,
+ if (msk->sched->init)
+ msk->sched->init(msk);
+
+- pr_debug("sched=%s", msk->sched->name);
++ pr_debug("sched=%s\n", msk->sched->name);
+
+ return 0;
+ }
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 8260202c00669f..d0f73b9180c7c0 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -95,6 +95,7 @@ static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, in
+ case SO_SNDBUFFORCE:
+ ssk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
++ mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf;
+ break;
+ case SO_RCVBUF:
+ case SO_RCVBUFFORCE:
+@@ -180,8 +181,6 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
+
+ switch (optname) {
+ case SO_KEEPALIVE:
+- mptcp_sol_socket_sync_intval(msk, optname, val);
+- return 0;
+ case SO_DEBUG:
+ case SO_MARK:
+ case SO_PRIORITY:
+@@ -622,20 +621,36 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
+ return ret;
+ }
+
+-static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval,
+- unsigned int optlen)
++static int __mptcp_setsockopt_set_val(struct mptcp_sock *msk, int max,
++ int (*set_val)(struct sock *, int),
++ int *msk_val, int val)
+ {
+ struct mptcp_subflow_context *subflow;
+- struct sock *sk = (struct sock *)msk;
+- int val;
++ int err = 0;
+
+- if (optlen < sizeof(int))
+- return -EINVAL;
++ mptcp_for_each_subflow(msk, subflow) {
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ int ret;
+
+- if (copy_from_sockptr(&val, optval, sizeof(val)))
+- return -EFAULT;
++ lock_sock(ssk);
++ ret = set_val(ssk, val);
++ err = err ? : ret;
++ release_sock(ssk);
++ }
++
++ if (!err) {
++ *msk_val = val;
++ sockopt_seq_inc(msk);
++ }
++
++ return err;
++}
++
++static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val)
++{
++ struct mptcp_subflow_context *subflow;
++ struct sock *sk = (struct sock *)msk;
+
+- lock_sock(sk);
+ sockopt_seq_inc(msk);
+ msk->cork = !!val;
+ mptcp_for_each_subflow(msk, subflow) {
+@@ -647,25 +662,15 @@ static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optva
+ }
+ if (!val)
+ mptcp_check_and_set_pending(sk);
+- release_sock(sk);
+
+ return 0;
+ }
+
+-static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval,
+- unsigned int optlen)
++static int __mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, int val)
+ {
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+- int val;
+
+- if (optlen < sizeof(int))
+- return -EINVAL;
+-
+- if (copy_from_sockptr(&val, optval, sizeof(val)))
+- return -EFAULT;
+-
+- lock_sock(sk);
+ sockopt_seq_inc(msk);
+ msk->nodelay = !!val;
+ mptcp_for_each_subflow(msk, subflow) {
+@@ -677,8 +682,6 @@ static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t op
+ }
+ if (val)
+ mptcp_check_and_set_pending(sk);
+- release_sock(sk);
+-
+ return 0;
+ }
+
+@@ -737,8 +740,11 @@ static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
+ val = inet_sk(sk)->tos;
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ bool slow;
+
++ slow = lock_sock_fast(ssk);
+ __ip_sock_set_tos(ssk, val);
++ unlock_sock_fast(ssk, slow);
+ }
+ release_sock(sk);
+
+@@ -788,25 +794,10 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ int ret, val;
+
+ switch (optname) {
+- case TCP_INQ:
+- ret = mptcp_get_int_option(msk, optval, optlen, &val);
+- if (ret)
+- return ret;
+- if (val < 0 || val > 1)
+- return -EINVAL;
+-
+- lock_sock(sk);
+- msk->recvmsg_inq = !!val;
+- release_sock(sk);
+- return 0;
+ case TCP_ULP:
+ return -EOPNOTSUPP;
+ case TCP_CONGESTION:
+ return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
+- case TCP_CORK:
+- return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen);
+- case TCP_NODELAY:
+- return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
+ case TCP_DEFER_ACCEPT:
+ /* See tcp.c: TCP_DEFER_ACCEPT does not fail */
+ mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen);
+@@ -819,7 +810,46 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ optval, optlen);
+ }
+
+- return -EOPNOTSUPP;
++ ret = mptcp_get_int_option(msk, optval, optlen, &val);
++ if (ret)
++ return ret;
++
++ lock_sock(sk);
++ switch (optname) {
++ case TCP_INQ:
++ if (val < 0 || val > 1)
++ ret = -EINVAL;
++ else
++ msk->recvmsg_inq = !!val;
++ break;
++ case TCP_CORK:
++ ret = __mptcp_setsockopt_sol_tcp_cork(msk, val);
++ break;
++ case TCP_NODELAY:
++ ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val);
++ break;
++ case TCP_KEEPIDLE:
++ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPIDLE,
++ &tcp_sock_set_keepidle_locked,
++ &msk->keepalive_idle, val);
++ break;
++ case TCP_KEEPINTVL:
++ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPINTVL,
++ &tcp_sock_set_keepintvl,
++ &msk->keepalive_intvl, val);
++ break;
++ case TCP_KEEPCNT:
++ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPCNT,
++ &tcp_sock_set_keepcnt,
++ &msk->keepalive_cnt,
++ val);
++ break;
++ default:
++ ret = -ENOPROTOOPT;
++ }
++
++ release_sock(sk);
++ return ret;
+ }
+
+ int mptcp_setsockopt(struct sock *sk, int level, int optname,
+@@ -828,7 +858,7 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sock *ssk;
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ if (level == SOL_SOCKET)
+ return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
+@@ -1314,6 +1344,8 @@ static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval,
+ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ char __user *optval, int __user *optlen)
+ {
++ struct sock *sk = (void *)msk;
++
+ switch (optname) {
+ case TCP_ULP:
+ case TCP_CONGESTION:
+@@ -1332,6 +1364,18 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ return mptcp_put_int_option(msk, optval, optlen, msk->cork);
+ case TCP_NODELAY:
+ return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
++ case TCP_KEEPIDLE:
++ return mptcp_put_int_option(msk, optval, optlen,
++ msk->keepalive_idle ? :
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_time) / HZ);
++ case TCP_KEEPINTVL:
++ return mptcp_put_int_option(msk, optval, optlen,
++ msk->keepalive_intvl ? :
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_intvl) / HZ);
++ case TCP_KEEPCNT:
++ return mptcp_put_int_option(msk, optval, optlen,
++ msk->keepalive_cnt ? :
++ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_probes));
+ }
+ return -EOPNOTSUPP;
+ }
+@@ -1372,7 +1416,7 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname,
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sock *ssk;
+
+- pr_debug("msk=%p", msk);
++ pr_debug("msk=%p\n", msk);
+
+ /* @@ the meaning of setsockopt() when the socket is connected and
+ * there are multiple subflows is not yet defined. It is up to the
+@@ -1415,8 +1459,10 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
+
+ if (sk->sk_userlocks & tx_rx_locks) {
+ ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
+- if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
++ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) {
+ WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
++ mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf;
++ }
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
+ WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
+ }
+@@ -1439,6 +1485,9 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
+ tcp_set_congestion_control(ssk, msk->ca_name, false, true);
+ __tcp_sock_set_cork(ssk, !!msk->cork);
+ __tcp_sock_set_nodelay(ssk, !!msk->nodelay);
++ tcp_sock_set_keepidle_locked(ssk, msk->keepalive_idle);
++ tcp_sock_set_keepintvl(ssk, msk->keepalive_intvl);
++ tcp_sock_set_keepcnt(ssk, msk->keepalive_cnt);
+
+ inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
+ inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
+@@ -1472,9 +1521,55 @@ void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk)
+
+ msk_owned_by_me(msk);
+
++ ssk->sk_rcvlowat = 0;
++
+ if (READ_ONCE(subflow->setsockopt_seq) != msk->setsockopt_seq) {
+ sync_socket_options(msk, ssk);
+
+ subflow->setsockopt_seq = msk->setsockopt_seq;
+ }
+ }
++
++/* unfortunately this is different enough from the tcp version so
++ * that we can't factor it out
++ */
++int mptcp_set_rcvlowat(struct sock *sk, int val)
++{
++ struct mptcp_subflow_context *subflow;
++ int space, cap;
++
++ /* bpf can land here with a wrong sk type */
++ if (sk->sk_protocol == IPPROTO_TCP)
++ return -EINVAL;
++
++ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
++ cap = sk->sk_rcvbuf >> 1;
++ else
++ cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
++ val = min(val, cap);
++ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
++
++ /* Check if we need to signal EPOLLIN right now */
++ if (mptcp_epollin_ready(sk))
++ sk->sk_data_ready(sk);
++
++ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
++ return 0;
++
++ space = __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, val);
++ if (space <= sk->sk_rcvbuf)
++ return 0;
++
++ /* propagate the rcvbuf changes to all the subflows */
++ WRITE_ONCE(sk->sk_rcvbuf, space);
++ mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ bool slow;
++
++ slow = lock_sock_fast(ssk);
++ WRITE_ONCE(ssk->sk_rcvbuf, space);
++ WRITE_ONCE(tcp_sk(ssk)->window_clamp, val);
++ unlock_sock_fast(ssk, slow);
++ }
++ return 0;
++}
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 9c1f8d1d63d24a..282ecc8bf75e80 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -40,7 +40,7 @@ static void subflow_req_destructor(struct request_sock *req)
+ {
+ struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+
+- pr_debug("subflow_req=%p", subflow_req);
++ pr_debug("subflow_req=%p\n", subflow_req);
+
+ if (subflow_req->msk)
+ sock_put((struct sock *)subflow_req->msk);
+@@ -100,6 +100,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
+ return NULL;
+ }
+ subflow_req->local_id = local_id;
++ subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
+
+ return msk;
+ }
+@@ -131,6 +132,13 @@ static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
+ }
+ }
+
++static int subflow_reset_req_endp(struct request_sock *req, struct sk_buff *skb)
++{
++ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEENDPATTEMPT);
++ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
++ return -EPERM;
++}
++
+ /* Init mptcp request socket.
+ *
+ * Returns an error code if a JOIN has failed and a TCP reset
+@@ -145,7 +153,7 @@ static int subflow_check_req(struct request_sock *req,
+ struct mptcp_options_received mp_opt;
+ bool opt_mp_capable, opt_mp_join;
+
+- pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
++ pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
+
+ #ifdef CONFIG_TCP_MD5SIG
+ /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+@@ -157,15 +165,22 @@ static int subflow_check_req(struct request_sock *req,
+
+ mptcp_get_options(skb, &mp_opt);
+
+- opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
+- opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
++ opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
++ opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
+ if (opt_mp_capable) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
+
++ if (unlikely(listener->pm_listener))
++ return subflow_reset_req_endp(req, skb);
+ if (opt_mp_join)
+ return 0;
+ } else if (opt_mp_join) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
++
++ if (mp_opt.backup)
++ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
++ } else if (unlikely(listener->pm_listener)) {
++ return subflow_reset_req_endp(req, skb);
+ }
+
+ if (opt_mp_capable && listener->request_mptcp) {
+@@ -215,7 +230,7 @@ static int subflow_check_req(struct request_sock *req,
+ }
+
+ if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
+- pr_debug("syn inet_sport=%d %d",
++ pr_debug("syn inet_sport=%d %d\n",
+ ntohs(inet_sk(sk_listener)->inet_sport),
+ ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
+ if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
+@@ -234,7 +249,7 @@ static int subflow_check_req(struct request_sock *req,
+ return -EPERM;
+ }
+
+- pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
++ pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
+ subflow_req->remote_nonce, subflow_req->msk);
+ }
+
+@@ -254,8 +269,8 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ subflow_init_req(req, sk_listener);
+ mptcp_get_options(skb, &mp_opt);
+
+- opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
+- opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
++ opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
++ opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
+ if (opt_mp_capable && opt_mp_join)
+ return -EINVAL;
+
+@@ -419,24 +434,28 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
+ return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
+ }
+
+-void __mptcp_set_connected(struct sock *sk)
++void __mptcp_sync_state(struct sock *sk, int state)
+ {
++ struct mptcp_subflow_context *subflow;
++ struct mptcp_sock *msk = mptcp_sk(sk);
++ struct sock *ssk = msk->first;
++
++ subflow = mptcp_subflow_ctx(ssk);
++ __mptcp_propagate_sndbuf(sk, ssk);
++ if (!msk->rcvspace_init)
++ mptcp_rcv_space_init(msk, ssk);
++
+ if (sk->sk_state == TCP_SYN_SENT) {
+- inet_sk_state_store(sk, TCP_ESTABLISHED);
++ /* subflow->idsn is always available is TCP_SYN_SENT state,
++ * even for the FASTOPEN scenarios
++ */
++ WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
++ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
++ mptcp_set_state(sk, state);
+ sk->sk_state_change(sk);
+ }
+ }
+
+-static void mptcp_set_connected(struct sock *sk)
+-{
+- mptcp_data_lock(sk);
+- if (!sock_owned_by_user(sk))
+- __mptcp_set_connected(sk);
+- else
+- __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags);
+- mptcp_data_unlock(sk);
+-}
+-
+ static void subflow_set_remote_key(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow,
+ const struct mptcp_options_received *mp_opt)
+@@ -458,6 +477,31 @@ static void subflow_set_remote_key(struct mptcp_sock *msk,
+ atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
+ }
+
++static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
++ struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt)
++{
++ struct mptcp_sock *msk = mptcp_sk(sk);
++
++ mptcp_data_lock(sk);
++ if (mp_opt) {
++ /* Options are available only in the non fallback cases
++ * avoid updating rx path fields otherwise
++ */
++ WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
++ WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
++ subflow_set_remote_key(msk, subflow, mp_opt);
++ }
++
++ if (!sock_owned_by_user(sk)) {
++ __mptcp_sync_state(sk, ssk->sk_state);
++ } else {
++ msk->pending_state = ssk->sk_state;
++ __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
++ }
++ mptcp_data_unlock(sk);
++}
++
+ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+@@ -472,15 +516,14 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ return;
+
+ msk = mptcp_sk(parent);
+- mptcp_propagate_sndbuf(parent, sk);
+ subflow->rel_write_seq = 1;
+ subflow->conn_finished = 1;
+ subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+- pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
++ pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
+
+ mptcp_get_options(skb, &mp_opt);
+ if (subflow->request_mptcp) {
+- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
++ if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
+ MPTCP_INC_STATS(sock_net(sk),
+ MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
+ mptcp_do_fallback(sk);
+@@ -493,14 +536,13 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ if (mp_opt.deny_join_id0)
+ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+ subflow->mp_capable = 1;
+- subflow_set_remote_key(msk, subflow, &mp_opt);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
+ mptcp_finish_connect(sk);
+- mptcp_set_connected(parent);
++ mptcp_propagate_state(parent, sk, subflow, &mp_opt);
+ } else if (subflow->request_join) {
+ u8 hmac[SHA256_DIGEST_SIZE];
+
+- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
++ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
+ subflow->reset_reason = MPTCP_RST_EMPTCP;
+ goto do_reset;
+ }
+@@ -508,8 +550,8 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ subflow->backup = mp_opt.backup;
+ subflow->thmac = mp_opt.thmac;
+ subflow->remote_nonce = mp_opt.nonce;
+- subflow->remote_id = mp_opt.join_id;
+- pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
++ WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
++ pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
+ subflow, subflow->thmac, subflow->remote_nonce,
+ subflow->backup);
+
+@@ -531,16 +573,18 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ subflow->mp_join = 1;
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+
++ if (subflow->backup)
++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
++
+ if (subflow_use_different_dport(msk, sk)) {
+- pr_debug("synack inet_dport=%d %d",
++ pr_debug("synack inet_dport=%d %d\n",
+ ntohs(inet_sk(sk)->inet_dport),
+ ntohs(inet_sk(parent)->inet_dport));
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
+ }
+ } else if (mptcp_check_fallback(sk)) {
+ fallback:
+- mptcp_rcv_space_init(msk, sk);
+- mptcp_set_connected(parent);
++ mptcp_propagate_state(parent, sk, subflow, NULL);
+ }
+ return;
+
+@@ -551,8 +595,8 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+
+ static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
+ {
+- subflow->local_id = local_id;
+- subflow->local_id_valid = 1;
++ WARN_ON_ONCE(local_id < 0 || local_id > 255);
++ WRITE_ONCE(subflow->local_id, local_id);
+ }
+
+ static int subflow_chk_local_id(struct sock *sk)
+@@ -561,7 +605,7 @@ static int subflow_chk_local_id(struct sock *sk)
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ int err;
+
+- if (likely(subflow->local_id_valid))
++ if (likely(subflow->local_id >= 0))
+ return 0;
+
+ err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
+@@ -569,6 +613,8 @@ static int subflow_chk_local_id(struct sock *sk)
+ return err;
+
+ subflow_set_local_id(subflow, err);
++ subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
++
+ return 0;
+ }
+
+@@ -601,7 +647,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+
+- pr_debug("subflow=%p", subflow);
++ pr_debug("subflow=%p\n", subflow);
+
+ /* Never answer to SYNs sent to broadcast or multicast */
+ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -632,7 +678,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+
+- pr_debug("subflow=%p", subflow);
++ pr_debug("subflow=%p\n", subflow);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return subflow_v4_conn_request(sk, skb);
+@@ -725,17 +771,16 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
+ kfree_rcu(ctx, rcu);
+ }
+
+-void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+- const struct mptcp_options_received *mp_opt)
++void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
++ struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt)
+ {
+- struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+-
+ subflow_set_remote_key(msk, subflow, mp_opt);
+ subflow->fully_established = 1;
+ WRITE_ONCE(msk->fully_established, true);
+
+ if (subflow->is_mptfo)
+- mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
++ __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
+ }
+
+ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+@@ -752,7 +797,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ struct mptcp_sock *owner;
+ struct sock *child;
+
+- pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
++ pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
+
+ /* After child creation we must look for MPC even when options
+ * are not parsed
+@@ -777,12 +822,13 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ * options.
+ */
+ mptcp_get_options(skb, &mp_opt);
+- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC))
++ if (!(mp_opt.suboptions &
++ (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
+ fallback = true;
+
+ } else if (subflow_req->mp_join) {
+ mptcp_get_options(skb, &mp_opt);
+- if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
++ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
+ !subflow_hmac_valid(req, &mp_opt) ||
+ !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+@@ -827,7 +873,6 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ * mpc option
+ */
+ if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
+- mptcp_subflow_fully_established(ctx, &mp_opt);
+ mptcp_pm_fully_established(owner, child);
+ ctx->pm_notified = 1;
+ }
+@@ -843,7 +888,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ ctx->conn = (struct sock *)owner;
+
+ if (subflow_use_different_sport(owner, sk)) {
+- pr_debug("ack inet_sport=%d %d",
++ pr_debug("ack inet_sport=%d %d\n",
+ ntohs(inet_sk(sk)->inet_sport),
+ ntohs(inet_sk((struct sock *)owner)->inet_sport));
+ if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
+@@ -880,6 +925,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ return child;
+
+ fallback:
++ if (fallback)
++ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
+ mptcp_subflow_drop_ctx(child);
+ return child;
+ }
+@@ -898,7 +945,7 @@ enum mapping_status {
+
+ static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+ {
+- pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
++ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
+ ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ }
+
+@@ -908,8 +955,10 @@ static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
+ unsigned int skb_consumed;
+
+ skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
+- if (WARN_ON_ONCE(skb_consumed >= skb->len))
++ if (unlikely(skb_consumed >= skb->len)) {
++ DEBUG_NET_WARN_ON_ONCE(1);
+ return true;
++ }
+
+ return skb->len - skb_consumed <= subflow->map_data_len -
+ mptcp_subflow_get_map_offset(subflow);
+@@ -1058,7 +1107,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+
+ data_len = mpext->data_len;
+ if (data_len == 0) {
+- pr_debug("infinite mapping received");
++ pr_debug("infinite mapping received\n");
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
+ subflow->map_data_len = 0;
+ return MAPPING_INVALID;
+@@ -1068,7 +1117,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ if (data_len == 1) {
+ bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
+ mpext->dsn64);
+- pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
++ pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
+ if (subflow->map_valid) {
+ /* A DATA_FIN might arrive in a DSS
+ * option before the previous mapping
+@@ -1093,7 +1142,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ data_fin_seq &= GENMASK_ULL(31, 0);
+
+ mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
+- pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
++ pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
+ data_fin_seq, mpext->dsn64);
+ }
+
+@@ -1140,7 +1189,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ if (unlikely(subflow->map_csum_reqd != csum_reqd))
+ return MAPPING_INVALID;
+
+- pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
++ pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
+ subflow->map_seq, subflow->map_subflow_seq,
+ subflow->map_data_len, subflow->map_csum_reqd,
+ subflow->map_data_csum);
+@@ -1165,14 +1214,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
+- u32 incr;
++ struct tcp_sock *tp = tcp_sk(ssk);
++ u32 offset, incr, avail_len;
++
++ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
++ if (WARN_ON_ONCE(offset > skb->len))
++ goto out;
+
+- incr = limit >= skb->len ? skb->len + fin : limit;
++ avail_len = skb->len - offset;
++ incr = limit >= avail_len ? avail_len + fin : limit;
+
+- pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
+- subflow->map_subflow_seq);
++ pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
++ offset, subflow->map_subflow_seq);
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+ tcp_sk(ssk)->copied_seq += incr;
++
++out:
+ if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
+ sk_eat_skb(ssk, skb);
+ if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
+@@ -1182,12 +1239,16 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ /* sched mptcp worker to remove the subflow if no more data is pending */
+ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
+ {
+- if (likely(ssk->sk_state != TCP_CLOSE))
++ struct sock *sk = (struct sock *)msk;
++
++ if (likely(ssk->sk_state != TCP_CLOSE &&
++ (ssk->sk_state != TCP_CLOSE_WAIT ||
++ inet_sk_state_load(sk) != TCP_ESTABLISHED)))
+ return;
+
+ if (skb_queue_empty(&ssk->sk_receive_queue) &&
+ !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+- mptcp_schedule_work((struct sock *)msk);
++ mptcp_schedule_work(sk);
+ }
+
+ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
+@@ -1199,7 +1260,7 @@ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
+ else if (READ_ONCE(msk->csum_enabled))
+ return !subflow->valid_csum_seen;
+ else
+- return !subflow->fully_established;
++ return READ_ONCE(msk->allow_infinite_fallback);
+ }
+
+ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+@@ -1264,7 +1325,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+
+ old_ack = READ_ONCE(msk->ack_seq);
+ ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
+- pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
++ pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
+ ack_seq);
+ if (unlikely(before64(ack_seq, old_ack))) {
+ mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+@@ -1336,7 +1397,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
+ subflow->map_valid = 0;
+ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+
+- pr_debug("Done with mapping: seq=%u data_len=%u",
++ pr_debug("Done with mapping: seq=%u data_len=%u\n",
+ subflow->map_subflow_seq,
+ subflow->map_data_len);
+ }
+@@ -1405,10 +1466,18 @@ static void subflow_data_ready(struct sock *sk)
+ WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+ !subflow->mp_join && !(state & TCPF_CLOSE));
+
+- if (mptcp_subflow_data_available(sk))
++ if (mptcp_subflow_data_available(sk)) {
+ mptcp_data_ready(parent, sk);
+- else if (unlikely(sk->sk_err))
++
++ /* subflow-level lowat test are not relevant.
++ * respect the msk-level threshold eventually mandating an immediate ack
++ */
++ if (mptcp_data_avail(msk) < parent->sk_rcvlowat &&
++ (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss)
++ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
++ } else if (unlikely(sk->sk_err)) {
+ subflow_error_report(sk);
++ }
+ }
+
+ static void subflow_write_space(struct sock *ssk)
+@@ -1438,7 +1507,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
+
+ target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
+
+- pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
++ pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
+ subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
+
+ if (likely(icsk->icsk_af_ops == target))
+@@ -1533,10 +1602,10 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ goto failed;
+
+ mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
+- pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
++ pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
+ remote_token, local_id, remote_id);
+ subflow->remote_token = remote_token;
+- subflow->remote_id = remote_id;
++ WRITE_ONCE(subflow->remote_id, remote_id);
+ subflow->request_join = 1;
+ subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ subflow->subflow_id = msk->subflow_id++;
+@@ -1671,7 +1740,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+ SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
+
+ subflow = mptcp_subflow_ctx(sf->sk);
+- pr_debug("subflow=%p", subflow);
++ pr_debug("subflow=%p\n", subflow);
+
+ *new_sock = sf;
+ sock_hold(sk);
+@@ -1695,9 +1764,10 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
+ INIT_LIST_HEAD(&ctx->node);
+ INIT_LIST_HEAD(&ctx->delegated_node);
+
+- pr_debug("subflow=%p", ctx);
++ pr_debug("subflow=%p\n", ctx);
+
+ ctx->tcp_sock = sk;
++ WRITE_ONCE(ctx->local_id, -1);
+
+ return ctx;
+ }
+@@ -1728,12 +1798,10 @@ static void subflow_state_change(struct sock *sk)
+
+ msk = mptcp_sk(parent);
+ if (subflow_simultaneous_connect(sk)) {
+- mptcp_propagate_sndbuf(parent, sk);
+ mptcp_do_fallback(sk);
+- mptcp_rcv_space_init(msk, sk);
+ pr_fallback(msk);
+ subflow->conn_finished = 1;
+- mptcp_set_connected(parent);
++ mptcp_propagate_state(parent, sk, subflow, NULL);
+ }
+
+ /* as recvmsg() does not acquire the subflow socket for ssk selection
+@@ -1847,7 +1915,7 @@ static int subflow_ulp_init(struct sock *sk)
+ goto out;
+ }
+
+- pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
++ pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
+
+ tp->is_mptcp = 1;
+ ctx->icsk_af_ops = icsk->icsk_af_ops;
+@@ -1935,14 +2003,15 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ new_ctx->idsn = subflow_req->idsn;
+
+ /* this is the first subflow, id is always 0 */
+- new_ctx->local_id_valid = 1;
++ subflow_set_local_id(new_ctx, 0);
+ } else if (subflow_req->mp_join) {
+ new_ctx->ssn_offset = subflow_req->ssn_offset;
+ new_ctx->mp_join = 1;
+ new_ctx->fully_established = 1;
+ new_ctx->remote_key_valid = 1;
+ new_ctx->backup = subflow_req->backup;
+- new_ctx->remote_id = subflow_req->remote_id;
++ new_ctx->request_bkup = subflow_req->request_bkup;
++ WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
+ new_ctx->token = subflow_req->token;
+ new_ctx->thmac = subflow_req->thmac;
+
+@@ -1969,6 +2038,17 @@ static void tcp_release_cb_override(struct sock *ssk)
+ tcp_release_cb(ssk);
+ }
+
++static int tcp_abort_override(struct sock *ssk, int err)
++{
++ /* closing a listener subflow requires a great deal of care.
++ * keep it simple and just prevent such operation
++ */
++ if (inet_sk_state_load(ssk) == TCP_LISTEN)
++ return -EINVAL;
++
++ return tcp_abort(ssk, err);
++}
++
+ static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
+ .name = "mptcp",
+ .owner = THIS_MODULE,
+@@ -2013,6 +2093,7 @@ void __init mptcp_subflow_init(void)
+
+ tcp_prot_override = tcp_prot;
+ tcp_prot_override.release_cb = tcp_release_cb_override;
++ tcp_prot_override.diag_destroy = tcp_abort_override;
+
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ /* In struct mptcp_subflow_request_sock, we assume the TCP request sock
+@@ -2049,6 +2130,7 @@ void __init mptcp_subflow_init(void)
+
+ tcpv6_prot_override = tcpv6_prot;
+ tcpv6_prot_override.release_cb = tcp_release_cb_override;
++ tcpv6_prot_override.diag_destroy = tcp_abort_override;
+ #endif
+
+ mptcp_diag_subflow_init(&subflow_ulp_ops);
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index 03757e76bb6b9b..ef0f8f73826f53 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -105,8 +105,11 @@ enum {
+
+
+ struct ncsi_channel_version {
+- u32 version; /* Supported BCD encoded NCSI version */
+- u32 alpha2; /* Supported BCD encoded NCSI version */
++ u8 major; /* NCSI version major */
++ u8 minor; /* NCSI version minor */
++ u8 update; /* NCSI version update */
++ char alpha1; /* NCSI version alpha1 */
++ char alpha2; /* NCSI version alpha2 */
+ u8 fw_name[12]; /* Firmware name string */
+ u32 fw_version; /* Firmware version */
+ u16 pci_ids[4]; /* PCI identification */
+@@ -322,6 +325,7 @@ struct ncsi_dev_priv {
+ spinlock_t lock; /* Protect the NCSI device */
+ unsigned int package_probe_id;/* Current ID during probe */
+ unsigned int package_num; /* Number of packages */
++ unsigned int channel_probe_id;/* Current cahnnel ID during probe */
+ struct list_head packages; /* List of packages */
+ struct ncsi_channel *hot_channel; /* Channel was ever active */
+ struct ncsi_request requests[256]; /* Request table */
+@@ -340,6 +344,7 @@ struct ncsi_dev_priv {
+ bool multi_package; /* Enable multiple packages */
+ bool mlx_multi_host; /* Enable multi host Mellanox */
+ u32 package_whitelist; /* Packages to configure */
++ unsigned char channel_count; /* Num of channels to probe */
+ };
+
+ struct ncsi_cmd_arg {
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index f8854bff286cbd..62fb1031763d14 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -89,11 +89,6 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
+ if ((had_link == has_link) || chained)
+ return 0;
+
+- if (had_link)
+- netif_carrier_off(ndp->ndev.dev);
+- else
+- netif_carrier_on(ndp->ndev.dev);
+-
+ if (!ndp->multi_package && !nc->package->multi_channel) {
+ if (had_link) {
+ ndp->flags |= NCSI_DEV_RESHUFFLE;
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index d9da942ad53dd9..90c6cf676221af 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
+
+ break;
+ case ncsi_dev_state_suspend_gls:
+- ndp->pending_req_num = np->channel_num;
++ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_GLS;
+ nca.package = np->id;
++ nca.channel = ndp->channel_probe_id;
++ ret = ncsi_xmit_cmd(&nca);
++ if (ret)
++ goto error;
++ ndp->channel_probe_id++;
+
+- nd->state = ncsi_dev_state_suspend_dcnt;
+- NCSI_FOR_EACH_CHANNEL(np, nc) {
+- nca.channel = nc->id;
+- ret = ncsi_xmit_cmd(&nca);
+- if (ret)
+- goto error;
++ if (ndp->channel_probe_id == ndp->channel_count) {
++ ndp->channel_probe_id = 0;
++ nd->state = ncsi_dev_state_suspend_dcnt;
+ }
+
+ break;
+@@ -689,8 +691,6 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
+ return 0;
+ }
+
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+-
+ static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+ {
+ unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
+@@ -716,10 +716,6 @@ static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+ return ret;
+ }
+
+-#endif
+-
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+-
+ /* NCSI OEM Command APIs */
+ static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
+ {
+@@ -856,8 +852,6 @@ static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
+ return nch->handler(nca);
+ }
+
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+-
+ /* Determine if a given channel from the channel_queue should be used for Tx */
+ static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
+ struct ncsi_channel *nc)
+@@ -1039,20 +1033,18 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
+ goto error;
+ }
+
+- nd->state = ncsi_dev_state_config_oem_gma;
++ nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
++ ? ncsi_dev_state_config_oem_gma
++ : ncsi_dev_state_config_clear_vids;
+ break;
+ case ncsi_dev_state_config_oem_gma:
+ nd->state = ncsi_dev_state_config_clear_vids;
+- ret = -1;
+
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+ nca.type = NCSI_PKT_CMD_OEM;
+ nca.package = np->id;
+ nca.channel = nc->id;
+ ndp->pending_req_num = 1;
+ ret = ncsi_gma_handler(&nca, nc->version.mf_id);
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+-
+ if (ret < 0)
+ schedule_work(&ndp->work);
+
+@@ -1350,7 +1342,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ {
+ struct ncsi_dev *nd = &ndp->ndev;
+ struct ncsi_package *np;
+- struct ncsi_channel *nc;
+ struct ncsi_cmd_arg nca;
+ unsigned char index;
+ int ret;
+@@ -1404,7 +1395,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+
+ schedule_work(&ndp->work);
+ break;
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+ case ncsi_dev_state_probe_mlx_gma:
+ ndp->pending_req_num = 1;
+
+@@ -1429,25 +1419,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+
+ nd->state = ncsi_dev_state_probe_cis;
+ break;
+-#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+- case ncsi_dev_state_probe_cis:
+- ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
+-
+- /* Clear initial state */
+- nca.type = NCSI_PKT_CMD_CIS;
+- nca.package = ndp->active_package->id;
+- for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
+- nca.channel = index;
+- ret = ncsi_xmit_cmd(&nca);
+- if (ret)
+- goto error;
+- }
+-
+- nd->state = ncsi_dev_state_probe_gvi;
+- if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
+- nd->state = ncsi_dev_state_probe_keep_phy;
+- break;
+-#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+ case ncsi_dev_state_probe_keep_phy:
+ ndp->pending_req_num = 1;
+
+@@ -1460,15 +1431,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+
+ nd->state = ncsi_dev_state_probe_gvi;
+ break;
+-#endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
++ case ncsi_dev_state_probe_cis:
+ case ncsi_dev_state_probe_gvi:
+ case ncsi_dev_state_probe_gc:
+ case ncsi_dev_state_probe_gls:
+ np = ndp->active_package;
+- ndp->pending_req_num = np->channel_num;
++ ndp->pending_req_num = 1;
+
+- /* Retrieve version, capability or link status */
+- if (nd->state == ncsi_dev_state_probe_gvi)
++ /* Clear initial state Retrieve version, capability or link status */
++ if (nd->state == ncsi_dev_state_probe_cis)
++ nca.type = NCSI_PKT_CMD_CIS;
++ else if (nd->state == ncsi_dev_state_probe_gvi)
+ nca.type = NCSI_PKT_CMD_GVI;
+ else if (nd->state == ncsi_dev_state_probe_gc)
+ nca.type = NCSI_PKT_CMD_GC;
+@@ -1476,19 +1449,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ nca.type = NCSI_PKT_CMD_GLS;
+
+ nca.package = np->id;
+- NCSI_FOR_EACH_CHANNEL(np, nc) {
+- nca.channel = nc->id;
+- ret = ncsi_xmit_cmd(&nca);
+- if (ret)
+- goto error;
+- }
++ nca.channel = ndp->channel_probe_id;
++
++ ret = ncsi_xmit_cmd(&nca);
++ if (ret)
++ goto error;
+
+- if (nd->state == ncsi_dev_state_probe_gvi)
++ if (nd->state == ncsi_dev_state_probe_cis) {
++ nd->state = ncsi_dev_state_probe_gvi;
++ if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
++ nd->state = ncsi_dev_state_probe_keep_phy;
++ } else if (nd->state == ncsi_dev_state_probe_gvi) {
+ nd->state = ncsi_dev_state_probe_gc;
+- else if (nd->state == ncsi_dev_state_probe_gc)
++ } else if (nd->state == ncsi_dev_state_probe_gc) {
+ nd->state = ncsi_dev_state_probe_gls;
+- else
++ } else {
++ nd->state = ncsi_dev_state_probe_cis;
++ ndp->channel_probe_id++;
++ }
++
++ if (ndp->channel_probe_id == ndp->channel_count) {
++ ndp->channel_probe_id = 0;
+ nd->state = ncsi_dev_state_probe_dp;
++ }
+ break;
+ case ncsi_dev_state_probe_dp:
+ ndp->pending_req_num = 1;
+@@ -1789,6 +1772,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
+ ndp->requests[i].ndp = ndp;
+ timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
+ }
++ ndp->channel_count = NCSI_RESERVED_CHANNEL;
+
+ spin_lock_irqsave(&ncsi_dev_lock, flags);
+ list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
+@@ -1822,6 +1806,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
+
+ if (!(ndp->flags & NCSI_DEV_PROBED)) {
+ ndp->package_probe_id = 0;
++ ndp->channel_probe_id = 0;
+ nd->state = ncsi_dev_state_probe;
+ schedule_work(&ndp->work);
+ return 0;
+diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
+index a3a6753a1db762..2f872d064396df 100644
+--- a/net/ncsi/ncsi-netlink.c
++++ b/net/ncsi/ncsi-netlink.c
+@@ -71,8 +71,8 @@ static int ncsi_write_channel_info(struct sk_buff *skb,
+ if (nc == nc->package->preferred_channel)
+ nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);
+
+- nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
+- nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2);
++ nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.major);
++ nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.minor);
+ nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name);
+
+ vid_nest = nla_nest_start_noflag(skb, NCSI_CHANNEL_ATTR_VLAN_LIST);
+diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h
+index ba66c7dc3a216d..c9d1da34dc4dc5 100644
+--- a/net/ncsi/ncsi-pkt.h
++++ b/net/ncsi/ncsi-pkt.h
+@@ -197,9 +197,12 @@ struct ncsi_rsp_gls_pkt {
+ /* Get Version ID */
+ struct ncsi_rsp_gvi_pkt {
+ struct ncsi_rsp_pkt_hdr rsp; /* Response header */
+- __be32 ncsi_version; /* NCSI version */
++ unsigned char major; /* NCSI version major */
++ unsigned char minor; /* NCSI version minor */
++ unsigned char update; /* NCSI version update */
++ unsigned char alpha1; /* NCSI version alpha1 */
+ unsigned char reserved[3]; /* Reserved */
+- unsigned char alpha2; /* NCSI version */
++ unsigned char alpha2; /* NCSI version alpha2 */
+ unsigned char fw_name[12]; /* f/w name string */
+ __be32 fw_version; /* f/w version */
+ __be16 pci_ids[4]; /* PCI IDs */
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 069c2659074bc2..f22d67cb04d371 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -19,6 +19,19 @@
+ #include "ncsi-pkt.h"
+ #include "ncsi-netlink.h"
+
++/* Nibbles within [0xA, 0xF] add zero "0" to the returned value.
++ * Optional fields (encoded as 0xFF) will default to zero.
++ */
++static u8 decode_bcd_u8(u8 x)
++{
++ int lo = x & 0xF;
++ int hi = x >> 4;
++
++ lo = lo < 0xA ? lo : 0;
++ hi = hi < 0xA ? hi : 0;
++ return lo + hi * 10;
++}
++
+ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
+ unsigned short payload)
+ {
+@@ -755,9 +768,18 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
+ if (!nc)
+ return -ENODEV;
+
+- /* Update to channel's version info */
++ /* Update channel's version info
++ *
++ * Major, minor, and update fields are supposed to be
++ * unsigned integers encoded as packed BCD.
++ *
++ * Alpha1 and alpha2 are ISO/IEC 8859-1 characters.
++ */
+ ncv = &nc->version;
+- ncv->version = ntohl(rsp->ncsi_version);
++ ncv->major = decode_bcd_u8(rsp->major);
++ ncv->minor = decode_bcd_u8(rsp->minor);
++ ncv->update = decode_bcd_u8(rsp->update);
++ ncv->alpha1 = rsp->alpha1;
+ ncv->alpha2 = rsp->alpha2;
+ memcpy(ncv->fw_name, rsp->fw_name, 12);
+ ncv->fw_version = ntohl(rsp->fw_version);
+@@ -773,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
+ struct ncsi_rsp_gc_pkt *rsp;
+ struct ncsi_dev_priv *ndp = nr->ndp;
+ struct ncsi_channel *nc;
++ struct ncsi_package *np;
+ size_t size;
+
+ /* Find the channel */
+ rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
+ ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
+- NULL, &nc);
++ &np, &nc);
+ if (!nc)
+ return -ENODEV;
+
+@@ -813,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
+ */
+ nc->vlan_filter.bitmap = U64_MAX;
+ nc->vlan_filter.n_vids = rsp->vlan_cnt;
++ np->ndp->channel_count = rsp->channel_cnt;
+
+ return 0;
+ }
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index ef4e76e5aef9f5..7bae43b00ebbe7 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -815,12 +815,21 @@ int __init netfilter_init(void)
+ if (ret < 0)
+ goto err;
+
++#ifdef CONFIG_LWTUNNEL
++ ret = netfilter_lwtunnel_init();
++ if (ret < 0)
++ goto err_lwtunnel_pernet;
++#endif
+ ret = netfilter_log_init();
+ if (ret < 0)
+- goto err_pernet;
++ goto err_log_pernet;
+
+ return 0;
+-err_pernet:
++err_log_pernet:
++#ifdef CONFIG_LWTUNNEL
++ netfilter_lwtunnel_fini();
++err_lwtunnel_pernet:
++#endif
+ unregister_pernet_subsys(&netfilter_net_ops);
+ err:
+ return ret;
+diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
+index 26ab0e9612d825..9523104a90da47 100644
+--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
++++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
+@@ -28,6 +28,7 @@
+ #define mtype_del IPSET_TOKEN(MTYPE, _del)
+ #define mtype_list IPSET_TOKEN(MTYPE, _list)
+ #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
++#define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc)
+ #define mtype MTYPE
+
+ #define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
+@@ -57,9 +58,6 @@ mtype_destroy(struct ip_set *set)
+ {
+ struct mtype *map = set->data;
+
+- if (SET_WITH_TIMEOUT(set))
+- del_timer_sync(&map->gc);
+-
+ if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
+ mtype_ext_cleanup(set);
+ ip_set_free(map->members);
+@@ -288,6 +286,15 @@ mtype_gc(struct timer_list *t)
+ add_timer(&map->gc);
+ }
+
++static void
++mtype_cancel_gc(struct ip_set *set)
++{
++ struct mtype *map = set->data;
++
++ if (SET_WITH_TIMEOUT(set))
++ del_timer_sync(&map->gc);
++}
++
+ static const struct ip_set_type_variant mtype = {
+ .kadt = mtype_kadt,
+ .uadt = mtype_uadt,
+@@ -301,6 +308,7 @@ static const struct ip_set_type_variant mtype = {
+ .head = mtype_head,
+ .list = mtype_list,
+ .same_set = mtype_same_set,
++ .cancel_gc = mtype_cancel_gc,
+ };
+
+ #endif /* __IP_SET_BITMAP_IP_GEN_H */
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 35d2f9c9ada025..61431690cbd5f1 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -53,14 +53,17 @@ MODULE_DESCRIPTION("core IP set support");
+ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+
+ /* When the nfnl mutex or ip_set_ref_lock is held: */
+-#define ip_set_dereference(p) \
+- rcu_dereference_protected(p, \
++#define ip_set_dereference(inst) \
++ rcu_dereference_protected((inst)->ip_set_list, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+- lockdep_is_held(&ip_set_ref_lock))
++ lockdep_is_held(&ip_set_ref_lock) || \
++ (inst)->is_deleted)
+ #define ip_set(inst, id) \
+- ip_set_dereference((inst)->ip_set_list)[id]
++ ip_set_dereference(inst)[id]
+ #define ip_set_ref_netlink(inst,id) \
+ rcu_dereference_raw((inst)->ip_set_list)[id]
++#define ip_set_dereference_nfnl(p) \
++ rcu_dereference_check(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+
+ /* The set types are implemented in modules and registered set types
+ * can be found in ip_set_type_list. Adding/deleting types is
+@@ -708,15 +711,10 @@ __ip_set_put_netlink(struct ip_set *set)
+ static struct ip_set *
+ ip_set_rcu_get(struct net *net, ip_set_id_t index)
+ {
+- struct ip_set *set;
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+- rcu_read_lock();
+- /* ip_set_list itself needs to be protected */
+- set = rcu_dereference(inst->ip_set_list)[index];
+- rcu_read_unlock();
+-
+- return set;
++ /* ip_set_list and the set pointer need to be protected */
++ return ip_set_dereference_nfnl(inst->ip_set_list)[index];
+ }
+
+ static inline void
+@@ -1136,7 +1134,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
+ if (!list)
+ goto cleanup;
+ /* nfnl mutex is held, both lists are valid */
+- tmp = ip_set_dereference(inst->ip_set_list);
++ tmp = ip_set_dereference(inst);
+ memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
+ rcu_assign_pointer(inst->ip_set_list, list);
+ /* Make sure all current packets have passed through */
+@@ -1157,6 +1155,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
+ return ret;
+
+ cleanup:
++ set->variant->cancel_gc(set);
+ set->variant->destroy(set);
+ put_out:
+ module_put(set->type->me);
+@@ -1174,17 +1173,52 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ .len = IPSET_MAXNAMELEN - 1 },
+ };
+
++/* In order to return quickly when destroying a single set, it is split
++ * into two stages:
++ * - Cancel garbage collector
++ * - Destroy the set itself via call_rcu()
++ */
++
+ static void
+-ip_set_destroy_set(struct ip_set *set)
++ip_set_destroy_set_rcu(struct rcu_head *head)
+ {
+- pr_debug("set: %s\n", set->name);
++ struct ip_set *set = container_of(head, struct ip_set, rcu);
+
+- /* Must call it without holding any lock */
+ set->variant->destroy(set);
+ module_put(set->type->me);
+ kfree(set);
+ }
+
++static void
++_destroy_all_sets(struct ip_set_net *inst)
++{
++ struct ip_set *set;
++ ip_set_id_t i;
++ bool need_wait = false;
++
++ /* First cancel gc's: set:list sets are flushed as well */
++ for (i = 0; i < inst->ip_set_max; i++) {
++ set = ip_set(inst, i);
++ if (set) {
++ set->variant->cancel_gc(set);
++ if (set->type->features & IPSET_TYPE_NAME)
++ need_wait = true;
++ }
++ }
++ /* Must wait for flush to be really finished */
++ if (need_wait)
++ rcu_barrier();
++ for (i = 0; i < inst->ip_set_max; i++) {
++ set = ip_set(inst, i);
++ if (set) {
++ ip_set(inst, i) = NULL;
++ set->variant->destroy(set);
++ module_put(set->type->me);
++ kfree(set);
++ }
++ }
++}
++
+ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ const struct nlattr * const attr[])
+ {
+@@ -1196,21 +1230,18 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ if (unlikely(protocol_min_failed(attr)))
+ return -IPSET_ERR_PROTOCOL;
+
+- /* Must wait for flush to be really finished in list:set */
+- rcu_barrier();
+-
+ /* Commands are serialized and references are
+ * protected by the ip_set_ref_lock.
+ * External systems (i.e. xt_set) must call
+- * ip_set_put|get_nfnl_* functions, that way we
++ * ip_set_nfnl_get_* functions, that way we
+ * can safely check references here.
+ *
+ * list:set timer can only decrement the reference
+ * counter, so if it's already zero, we can proceed
+ * without holding the lock.
+ */
+- read_lock_bh(&ip_set_ref_lock);
+ if (!attr[IPSET_ATTR_SETNAME]) {
++ read_lock_bh(&ip_set_ref_lock);
+ for (i = 0; i < inst->ip_set_max; i++) {
+ s = ip_set(inst, i);
+ if (s && (s->ref || s->ref_netlink)) {
+@@ -1220,17 +1251,14 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ }
+ inst->is_destroyed = true;
+ read_unlock_bh(&ip_set_ref_lock);
+- for (i = 0; i < inst->ip_set_max; i++) {
+- s = ip_set(inst, i);
+- if (s) {
+- ip_set(inst, i) = NULL;
+- ip_set_destroy_set(s);
+- }
+- }
++ _destroy_all_sets(inst);
+ /* Modified by ip_set_destroy() only, which is serialized */
+ inst->is_destroyed = false;
+ } else {
+ u32 flags = flag_exist(info->nlh);
++ u16 features = 0;
++
++ read_lock_bh(&ip_set_ref_lock);
+ s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+ &i);
+ if (!s) {
+@@ -1241,10 +1269,16 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ ret = -IPSET_ERR_BUSY;
+ goto out;
+ }
++ features = s->type->features;
+ ip_set(inst, i) = NULL;
+ read_unlock_bh(&ip_set_ref_lock);
+-
+- ip_set_destroy_set(s);
++ /* Must cancel garbage collectors */
++ s->variant->cancel_gc(s);
++ if (features & IPSET_TYPE_NAME) {
++ /* Must wait for flush to be really finished */
++ rcu_barrier();
++ }
++ call_rcu(&s->rcu, ip_set_destroy_set_rcu);
+ }
+ return 0;
+ out:
+@@ -2348,29 +2382,25 @@ ip_set_net_init(struct net *net)
+ }
+
+ static void __net_exit
+-ip_set_net_exit(struct net *net)
++ip_set_net_pre_exit(struct net *net)
+ {
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+- struct ip_set *set = NULL;
+- ip_set_id_t i;
+-
+ inst->is_deleted = true; /* flag for ip_set_nfnl_put */
++}
+
+- nfnl_lock(NFNL_SUBSYS_IPSET);
+- for (i = 0; i < inst->ip_set_max; i++) {
+- set = ip_set(inst, i);
+- if (set) {
+- ip_set(inst, i) = NULL;
+- ip_set_destroy_set(set);
+- }
+- }
+- nfnl_unlock(NFNL_SUBSYS_IPSET);
++static void __net_exit
++ip_set_net_exit(struct net *net)
++{
++ struct ip_set_net *inst = ip_set_pernet(net);
++
++ _destroy_all_sets(inst);
+ kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
+ }
+
+ static struct pernet_operations ip_set_net_ops = {
+ .init = ip_set_net_init,
++ .pre_exit = ip_set_net_pre_exit,
+ .exit = ip_set_net_exit,
+ .id = &ip_set_net_id,
+ .size = sizeof(struct ip_set_net),
+@@ -2409,8 +2439,11 @@ ip_set_fini(void)
+ {
+ nf_unregister_sockopt(&so_set);
+ nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+-
+ unregister_pernet_subsys(&ip_set_net_ops);
++
++ /* Wait for call_rcu() in destroy */
++ rcu_barrier();
++
+ pr_debug("these are the famous last words\n");
+ }
+
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index 7c2399541771fc..20aad81fcad7e6 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -221,6 +221,7 @@ static const union nf_inet_addr zeromask = {};
+ #undef mtype_gc_do
+ #undef mtype_gc
+ #undef mtype_gc_init
++#undef mtype_cancel_gc
+ #undef mtype_variant
+ #undef mtype_data_match
+
+@@ -265,6 +266,7 @@ static const union nf_inet_addr zeromask = {};
+ #define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
+ #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
+ #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
++#define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc)
+ #define mtype_variant IPSET_TOKEN(MTYPE, _variant)
+ #define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
+
+@@ -429,7 +431,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
+ u32 i;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+- n = __ipset_dereference(hbucket(t, i));
++ n = (__force struct hbucket *)hbucket(t, i);
+ if (!n)
+ continue;
+ if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
+@@ -449,10 +451,7 @@ mtype_destroy(struct ip_set *set)
+ struct htype *h = set->data;
+ struct list_head *l, *lt;
+
+- if (SET_WITH_TIMEOUT(set))
+- cancel_delayed_work_sync(&h->gc.dwork);
+-
+- mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
++ mtype_ahash_destroy(set, (__force struct htable *)h->table, true);
+ list_for_each_safe(l, lt, &h->ad) {
+ list_del(l);
+ kfree(l);
+@@ -598,6 +597,15 @@ mtype_gc_init(struct htable_gc *gc)
+ queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
+ }
+
++static void
++mtype_cancel_gc(struct ip_set *set)
++{
++ struct htype *h = set->data;
++
++ if (SET_WITH_TIMEOUT(set))
++ cancel_delayed_work_sync(&h->gc.dwork);
++}
++
+ static int
+ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags);
+@@ -1440,6 +1448,7 @@ static const struct ip_set_type_variant mtype_variant = {
+ .uref = mtype_uref,
+ .resize = mtype_resize,
+ .same_set = mtype_same_set,
++ .cancel_gc = mtype_cancel_gc,
+ .region_lock = true,
+ };
+
+diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
+index 95aeb31c60e0d7..30a655e5c4fdcd 100644
+--- a/net/netfilter/ipset/ip_set_hash_netiface.c
++++ b/net/netfilter/ipset/ip_set_hash_netiface.c
+@@ -138,9 +138,9 @@ hash_netiface4_data_next(struct hash_netiface4_elem *next,
+ #include "ip_set_hash_gen.h"
+
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+-static const char *get_physindev_name(const struct sk_buff *skb)
++static const char *get_physindev_name(const struct sk_buff *skb, struct net *net)
+ {
+- struct net_device *dev = nf_bridge_get_physindev(skb);
++ struct net_device *dev = nf_bridge_get_physindev(skb, net);
+
+ return dev ? dev->name : NULL;
+ }
+@@ -177,7 +177,7 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
+
+ if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+- const char *eiface = SRCDIR ? get_physindev_name(skb) :
++ const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) :
+ get_physoutdev_name(skb);
+
+ if (!eiface)
+@@ -395,7 +395,7 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
+
+ if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+- const char *eiface = SRCDIR ? get_physindev_name(skb) :
++ const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) :
+ get_physoutdev_name(skb);
+
+ if (!eiface)
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index e162636525cfb4..bfae7066936bb9 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
+ struct set_elem *e;
+ int ret;
+
+- list_for_each_entry(e, &map->members, list) {
++ list_for_each_entry_rcu(e, &map->members, list) {
+ if (SET_WITH_TIMEOUT(set) &&
+ ip_set_timeout_expired(ext_timeout(e, set)))
+ continue;
+@@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
+ struct set_elem *e;
+ int ret;
+
+- list_for_each_entry(e, &map->members, list) {
++ list_for_each_entry_rcu(e, &map->members, list) {
+ if (SET_WITH_TIMEOUT(set) &&
+ ip_set_timeout_expired(ext_timeout(e, set)))
+ continue;
+@@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ struct list_set *map = set->data;
+ struct set_adt_elem *d = value;
+ struct set_elem *e, *next, *prev = NULL;
+- int ret;
++ int ret = 0;
+
+- list_for_each_entry(e, &map->members, list) {
++ rcu_read_lock();
++ list_for_each_entry_rcu(e, &map->members, list) {
+ if (SET_WITH_TIMEOUT(set) &&
+ ip_set_timeout_expired(ext_timeout(e, set)))
+ continue;
+@@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+
+ if (d->before == 0) {
+ ret = 1;
++ goto out;
+ } else if (d->before > 0) {
+ next = list_next_entry(e, list);
+ ret = !list_is_last(&e->list, &map->members) &&
+@@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ } else {
+ ret = prev && prev->id == d->refid;
+ }
+- return ret;
++ goto out;
+ }
+- return 0;
++out:
++ rcu_read_unlock();
++ return ret;
+ }
+
+ static void
+@@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+
+ /* Find where to add the new entry */
+ n = prev = next = NULL;
+- list_for_each_entry(e, &map->members, list) {
++ list_for_each_entry_rcu(e, &map->members, list) {
+ if (SET_WITH_TIMEOUT(set) &&
+ ip_set_timeout_expired(ext_timeout(e, set)))
+ continue;
+@@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ {
+ struct list_set *map = set->data;
+ struct set_adt_elem *d = value;
+- struct set_elem *e, *next, *prev = NULL;
++ struct set_elem *e, *n, *next, *prev = NULL;
+
+- list_for_each_entry(e, &map->members, list) {
++ list_for_each_entry_safe(e, n, &map->members, list) {
+ if (SET_WITH_TIMEOUT(set) &&
+ ip_set_timeout_expired(ext_timeout(e, set)))
+ continue;
+@@ -424,17 +428,8 @@ static void
+ list_set_destroy(struct ip_set *set)
+ {
+ struct list_set *map = set->data;
+- struct set_elem *e, *n;
+
+- if (SET_WITH_TIMEOUT(set))
+- timer_shutdown_sync(&map->gc);
+-
+- list_for_each_entry_safe(e, n, &map->members, list) {
+- list_del(&e->list);
+- ip_set_put_byindex(map->net, e->id);
+- ip_set_ext_destroy(set, e);
+- kfree(e);
+- }
++ WARN_ON_ONCE(!list_empty(&map->members));
+ kfree(map);
+
+ set->data = NULL;
+@@ -545,6 +540,18 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
+ a->extensions == b->extensions;
+ }
+
++static void
++list_set_cancel_gc(struct ip_set *set)
++{
++ struct list_set *map = set->data;
++
++ if (SET_WITH_TIMEOUT(set))
++ timer_shutdown_sync(&map->gc);
++
++ /* Flush list to drop references to other ipsets */
++ list_set_flush(set);
++}
++
+ static const struct ip_set_type_variant set_variant = {
+ .kadt = list_set_kadt,
+ .uadt = list_set_uadt,
+@@ -558,6 +565,7 @@ static const struct ip_set_type_variant set_variant = {
+ .head = list_set_head,
+ .list = list_set_list,
+ .same_set = list_set_same_set,
++ .cancel_gc = list_set_cancel_gc,
+ };
+
+ static void
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 143a341bbc0a4d..dec5309d9f1f59 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1459,18 +1459,18 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
+ if (ret < 0)
+ goto out_err;
+
+- /* Bind the ct retriever */
+- RCU_INIT_POINTER(svc->pe, pe);
+- pe = NULL;
+-
+ /* Update the virtual service counters */
+ if (svc->port == FTPPORT)
+ atomic_inc(&ipvs->ftpsvc_counter);
+ else if (svc->port == 0)
+ atomic_inc(&ipvs->nullsvc_counter);
+- if (svc->pe && svc->pe->conn_out)
++ if (pe && pe->conn_out)
+ atomic_inc(&ipvs->conn_out_counter);
+
++ /* Bind the ct retriever */
++ RCU_INIT_POINTER(svc->pe, pe);
++ pe = NULL;
++
+ /* Count only IPv4 services for old get/setsockopt interface */
+ if (svc->af == AF_INET)
+ ipvs->num_services++;
+diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
+index a0921adc31a9ff..83e452916403d5 100644
+--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
++++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
+@@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ if (sctph->source != cp->vport || payload_csum ||
+ skb->ip_summed == CHECKSUM_PARTIAL) {
+ sctph->source = cp->vport;
+- sctp_nat_csum(skb, sctph, sctphoff);
++ if (!skb_is_gso(skb))
++ sctp_nat_csum(skb, sctph, sctphoff);
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+@@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
+ (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
+ sctph->dest = cp->dport;
+- sctp_nat_csum(skb, sctph, sctphoff);
++ if (!skb_is_gso(skb))
++ sctp_nat_csum(skb, sctph, sctphoff);
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 9193e109e6b38f..65e0259178da43 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -271,7 +271,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
+ skb->dev = dst->dev;
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED,
+ ICMPV6_EXC_HOPLIMIT, 0);
+- __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+
+ return false;
+ }
+@@ -286,7 +286,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
+ {
+ if (ip_hdr(skb)->ttl <= 1) {
+ /* Tell the sender its packet died... */
+- __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
++ IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
+ icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
+ return false;
+ }
+diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
+index e502ec00b2fe1e..0e4beae421f830 100644
+--- a/net/netfilter/nf_bpf_link.c
++++ b/net/netfilter/nf_bpf_link.c
+@@ -31,7 +31,7 @@ struct bpf_nf_link {
+ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ static const struct nf_defrag_hook *
+ get_proto_defrag_hook(struct bpf_nf_link *link,
+- const struct nf_defrag_hook __rcu *global_hook,
++ const struct nf_defrag_hook __rcu **ptr_global_hook,
+ const char *mod)
+ {
+ const struct nf_defrag_hook *hook;
+@@ -39,7 +39,7 @@ get_proto_defrag_hook(struct bpf_nf_link *link,
+
+ /* RCU protects us from races against module unloading */
+ rcu_read_lock();
+- hook = rcu_dereference(global_hook);
++ hook = rcu_dereference(*ptr_global_hook);
+ if (!hook) {
+ rcu_read_unlock();
+ err = request_module(mod);
+@@ -47,7 +47,7 @@ get_proto_defrag_hook(struct bpf_nf_link *link,
+ return ERR_PTR(err < 0 ? err : -EINVAL);
+
+ rcu_read_lock();
+- hook = rcu_dereference(global_hook);
++ hook = rcu_dereference(*ptr_global_hook);
+ }
+
+ if (hook && try_module_get(hook->owner)) {
+@@ -78,7 +78,7 @@ static int bpf_nf_enable_defrag(struct bpf_nf_link *link)
+ switch (link->hook_ops.pf) {
+ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
+ case NFPROTO_IPV4:
+- hook = get_proto_defrag_hook(link, nf_defrag_v4_hook, "nf_defrag_ipv4");
++ hook = get_proto_defrag_hook(link, &nf_defrag_v4_hook, "nf_defrag_ipv4");
+ if (IS_ERR(hook))
+ return PTR_ERR(hook);
+
+@@ -87,7 +87,7 @@ static int bpf_nf_enable_defrag(struct bpf_nf_link *link)
+ #endif
+ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ case NFPROTO_IPV6:
+- hook = get_proto_defrag_hook(link, nf_defrag_v6_hook, "nf_defrag_ipv6");
++ hook = get_proto_defrag_hook(link, &nf_defrag_v6_hook, "nf_defrag_ipv6");
+ if (IS_ERR(hook))
+ return PTR_ERR(hook);
+
+diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
+index 5d8ed6c90b7ef4..5885810da412fa 100644
+--- a/net/netfilter/nf_conncount.c
++++ b/net/netfilter/nf_conncount.c
+@@ -321,7 +321,6 @@ insert_tree(struct net *net,
+ struct nf_conncount_rb *rbconn;
+ struct nf_conncount_tuple *conn;
+ unsigned int count = 0, gc_count = 0;
+- u8 keylen = data->keylen;
+ bool do_gc = true;
+
+ spin_lock_bh(&nf_conncount_locks[hash]);
+@@ -333,7 +332,7 @@ insert_tree(struct net *net,
+ rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
+
+ parent = *rbnode;
+- diff = key_diff(key, rbconn->key, keylen);
++ diff = key_diff(key, rbconn->key, data->keylen);
+ if (diff < 0) {
+ rbnode = &((*rbnode)->rb_left);
+ } else if (diff > 0) {
+@@ -378,7 +377,7 @@ insert_tree(struct net *net,
+
+ conn->tuple = *tuple;
+ conn->zone = *zone;
+- memcpy(rbconn->key, key, sizeof(u32) * keylen);
++ memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
+
+ nf_conncount_list_init(&rbconn->list);
+ list_add(&conn->node, &rbconn->list.head);
+@@ -403,7 +402,6 @@ count_tree(struct net *net,
+ struct rb_node *parent;
+ struct nf_conncount_rb *rbconn;
+ unsigned int hash;
+- u8 keylen = data->keylen;
+
+ hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
+ root = &data->root[hash];
+@@ -414,7 +412,7 @@ count_tree(struct net *net,
+
+ rbconn = rb_entry(parent, struct nf_conncount_rb, node);
+
+- diff = key_diff(key, rbconn->key, keylen);
++ diff = key_diff(key, rbconn->key, data->keylen);
+ if (diff < 0) {
+ parent = rcu_dereference_raw(parent->rb_left);
+ } else if (diff > 0) {
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 9f6f2e6435758e..e4ae2a08da6ac3 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -2766,6 +2766,7 @@ static const struct nf_ct_hook nf_conntrack_hook = {
+ .get_tuple_skb = nf_conntrack_get_tuple_skb,
+ .attach = nf_conntrack_attach,
+ .set_closing = nf_conntrack_set_closing,
++ .confirm = __nf_conntrack_confirm,
+ };
+
+ void nf_conntrack_init_end(void)
+diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
+index e697a824b0018e..540d97715bd23d 100644
+--- a/net/netfilter/nf_conntrack_h323_asn1.c
++++ b/net/netfilter/nf_conntrack_h323_asn1.c
+@@ -533,6 +533,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
+ /* Get fields bitmap */
+ if (nf_h323_error_boundary(bs, 0, f->sz))
+ return H323_ERROR_BOUND;
++ if (f->sz > 32)
++ return H323_ERROR_RANGE;
+ bmp = get_bitmap(bs, f->sz);
+ if (base)
+ *(unsigned int *)base = bmp;
+@@ -589,6 +591,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
+ bmp2_len = get_bits(bs, 7) + 1;
+ if (nf_h323_error_boundary(bs, 0, bmp2_len))
+ return H323_ERROR_BOUND;
++ if (bmp2_len > 32)
++ return H323_ERROR_RANGE;
+ bmp2 = get_bitmap(bs, bmp2_len);
+ bmp |= bmp2 >> f->sz;
+ if (base)
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 334db22199c1d6..282e9644f6fdd6 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -381,7 +381,7 @@ static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
+ #define ctnetlink_dump_secctx(a, b) (0)
+ #endif
+
+-#ifdef CONFIG_NF_CONNTRACK_LABELS
++#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ static inline int ctnetlink_label_size(const struct nf_conn *ct)
+ {
+ struct nf_conn_labels *labels = nf_ct_labels_find(ct);
+@@ -390,6 +390,7 @@ static inline int ctnetlink_label_size(const struct nf_conn *ct)
+ return 0;
+ return nla_total_size(sizeof(labels->bits));
+ }
++#endif
+
+ static int
+ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
+@@ -410,10 +411,6 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
+
+ return 0;
+ }
+-#else
+-#define ctnetlink_dump_labels(a, b) (0)
+-#define ctnetlink_label_size(a) (0)
+-#endif
+
+ #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
+
+@@ -3411,7 +3408,8 @@ static int ctnetlink_del_expect(struct sk_buff *skb,
+
+ if (cda[CTA_EXPECT_ID]) {
+ __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
+- if (ntohl(id) != (u32)(unsigned long)exp) {
++
++ if (id != nf_expect_get_id(exp)) {
+ nf_ct_expect_put(exp);
+ return -ENOENT;
+ }
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index c6bd533983c1ff..4cc97f971264ed 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -283,7 +283,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ pr_debug("Setting vtag %x for secondary conntrack\n",
+ sh->vtag);
+ ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
+- } else {
++ } else if (sch->type == SCTP_CID_SHUTDOWN_ACK) {
+ /* If it is a shutdown ack OOTB packet, we expect a return
+ shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
+ pr_debug("Setting vtag %x for new conn OOTB\n",
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 4018acb1d674e1..53d46ebcb5f769 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -457,7 +457,8 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct tcphdr *tcph,
+- u32 end, u32 win)
++ u32 end, u32 win,
++ enum ip_conntrack_dir dir)
+ {
+ /* SYN-ACK in reply to a SYN
+ * or SYN from reply direction in simultaneous open.
+@@ -471,7 +472,8 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
+ * Both sides must send the Window Scale option
+ * to enable window scaling in either direction.
+ */
+- if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
++ if (dir == IP_CT_DIR_REPLY &&
++ !(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
+ receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
+ sender->td_scale = 0;
+ receiver->td_scale = 0;
+@@ -542,7 +544,7 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
+ if (tcph->syn) {
+ tcp_init_sender(sender, receiver,
+ skb, dataoff, tcph,
+- end, win);
++ end, win, dir);
+ if (!tcph->ack)
+ /* Simultaneous open */
+ return NFCT_TCP_ACCEPT;
+@@ -585,7 +587,7 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
+ */
+ tcp_init_sender(sender, receiver,
+ skb, dataoff, tcph,
+- end, win);
++ end, win, dir);
+
+ if (dir == IP_CT_DIR_REPLY && !tcph->ack)
+ return NFCT_TCP_ACCEPT;
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index 0ee98ce5b81655..559665467b04dd 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -22,9 +22,6 @@
+ #include <net/netfilter/nf_conntrack_acct.h>
+ #include <net/netfilter/nf_conntrack_zones.h>
+ #include <net/netfilter/nf_conntrack_timestamp.h>
+-#ifdef CONFIG_LWTUNNEL
+-#include <net/netfilter/nf_hooks_lwtunnel.h>
+-#endif
+ #include <linux/rculist_nulls.h>
+
+ static bool enable_hooks __read_mostly;
+@@ -612,9 +609,6 @@ enum nf_ct_sysctl_index {
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
+ #endif
+-#ifdef CONFIG_LWTUNNEL
+- NF_SYSCTL_CT_LWTUNNEL,
+-#endif
+
+ __NF_SYSCTL_CT_LAST_SYSCTL,
+ };
+@@ -947,15 +941,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+-#endif
+-#ifdef CONFIG_LWTUNNEL
+- [NF_SYSCTL_CT_LWTUNNEL] = {
+- .procname = "nf_hooks_lwtunnel",
+- .data = NULL,
+- .maxlen = sizeof(int),
+- .mode = 0644,
+- .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
+- },
+ #endif
+ {}
+ };
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 920a5a29ae1dce..a0571339239c40 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -87,12 +87,22 @@ static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
+ return 0;
+ }
+
++static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route,
++ enum flow_offload_tuple_dir dir)
++{
++ struct dst_entry *dst = route->tuple[dir].dst;
++
++ route->tuple[dir].dst = NULL;
++
++ return dst;
++}
++
+ static int flow_offload_fill_route(struct flow_offload *flow,
+- const struct nf_flow_route *route,
++ struct nf_flow_route *route,
+ enum flow_offload_tuple_dir dir)
+ {
+ struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+- struct dst_entry *dst = route->tuple[dir].dst;
++ struct dst_entry *dst = nft_route_dst_fetch(route, dir);
+ int i, j = 0;
+
+ switch (flow_tuple->l3proto) {
+@@ -122,6 +132,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
+ ETH_ALEN);
+ flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
+ flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
++ dst_release(dst);
+ break;
+ case FLOW_OFFLOAD_XMIT_XFRM:
+ case FLOW_OFFLOAD_XMIT_NEIGH:
+@@ -146,7 +157,7 @@ static void nft_flow_dst_release(struct flow_offload *flow,
+ }
+
+ void flow_offload_route_init(struct flow_offload *flow,
+- const struct nf_flow_route *route)
++ struct nf_flow_route *route)
+ {
+ flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
+ flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
+diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
+index 9505f9d188ff25..b0f19917193241 100644
+--- a/net/netfilter/nf_flow_table_inet.c
++++ b/net/netfilter/nf_flow_table_inet.c
+@@ -17,11 +17,15 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
+
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
++ if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth)))
++ return NF_ACCEPT;
++
+ veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+ proto = veth->h_vlan_encapsulated_proto;
+ break;
+ case htons(ETH_P_PPP_SES):
+- proto = nf_flow_pppoe_proto(skb);
++ if (!nf_flow_pppoe_proto(skb, &proto))
++ return NF_ACCEPT;
+ break;
+ default:
+ proto = skb->protocol;
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index e45fade7640961..846fa2ad7c8580 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -157,7 +157,7 @@ static void nf_flow_tuple_encap(struct sk_buff *skb,
+ tuple->encap[i].proto = skb->protocol;
+ break;
+ case htons(ETH_P_PPP_SES):
+- phdr = (struct pppoe_hdr *)skb_mac_header(skb);
++ phdr = (struct pppoe_hdr *)skb_network_header(skb);
+ tuple->encap[i].id = ntohs(phdr->sid);
+ tuple->encap[i].proto = skb->protocol;
+ break;
+@@ -273,13 +273,17 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
+ return NF_STOLEN;
+ }
+
+-static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
++static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
+ u32 *offset)
+ {
+ struct vlan_ethhdr *veth;
++ __be16 inner_proto;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
++ if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth)))
++ return false;
++
+ veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+ if (veth->h_vlan_encapsulated_proto == proto) {
+ *offset += VLAN_HLEN;
+@@ -287,7 +291,8 @@ static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
+ }
+ break;
+ case htons(ETH_P_PPP_SES):
+- if (nf_flow_pppoe_proto(skb) == proto) {
++ if (nf_flow_pppoe_proto(skb, &inner_proto) &&
++ inner_proto == proto) {
+ *offset += PPPOE_SES_HLEN;
+ return true;
+ }
+@@ -316,7 +321,7 @@ static void nf_flow_encap_pop(struct sk_buff *skb,
+ skb_reset_network_header(skb);
+ break;
+ case htons(ETH_P_PPP_SES):
+- skb->protocol = nf_flow_pppoe_proto(skb);
++ skb->protocol = __nf_flow_pppoe_proto(skb);
+ skb_pull(skb, PPPOE_SES_HLEN);
+ skb_reset_network_header(skb);
+ break;
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index a010b25076ca06..3d46372b538e56 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -841,8 +841,8 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
+ struct list_head *block_cb_list)
+ {
+ struct flow_cls_offload cls_flow = {};
++ struct netlink_ext_ack extack = {};
+ struct flow_block_cb *block_cb;
+- struct netlink_ext_ack extack;
+ __be16 proto = ETH_P_ALL;
+ int err, i = 0;
+
+diff --git a/net/netfilter/nf_hooks_lwtunnel.c b/net/netfilter/nf_hooks_lwtunnel.c
+index 00e89ffd78f692..d8ebebc9775d78 100644
+--- a/net/netfilter/nf_hooks_lwtunnel.c
++++ b/net/netfilter/nf_hooks_lwtunnel.c
+@@ -3,6 +3,9 @@
+ #include <linux/sysctl.h>
+ #include <net/lwtunnel.h>
+ #include <net/netfilter/nf_hooks_lwtunnel.h>
++#include <linux/netfilter.h>
++
++#include "nf_internals.h"
+
+ static inline int nf_hooks_lwtunnel_get(void)
+ {
+@@ -50,4 +53,71 @@ int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_sysctl_handler);
++
++static struct ctl_table nf_lwtunnel_sysctl_table[] = {
++ {
++ .procname = "nf_hooks_lwtunnel",
++ .data = NULL,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
++ },
++};
++
++static int __net_init nf_lwtunnel_net_init(struct net *net)
++{
++ struct ctl_table_header *hdr;
++ struct ctl_table *table;
++
++ table = nf_lwtunnel_sysctl_table;
++ if (!net_eq(net, &init_net)) {
++ table = kmemdup(nf_lwtunnel_sysctl_table,
++ sizeof(nf_lwtunnel_sysctl_table),
++ GFP_KERNEL);
++ if (!table)
++ goto err_alloc;
++ }
++
++ hdr = register_net_sysctl_sz(net, "net/netfilter", table,
++ ARRAY_SIZE(nf_lwtunnel_sysctl_table));
++ if (!hdr)
++ goto err_reg;
++
++ net->nf.nf_lwtnl_dir_header = hdr;
++
++ return 0;
++err_reg:
++ if (!net_eq(net, &init_net))
++ kfree(table);
++err_alloc:
++ return -ENOMEM;
++}
++
++static void __net_exit nf_lwtunnel_net_exit(struct net *net)
++{
++ const struct ctl_table *table;
++
++ table = net->nf.nf_lwtnl_dir_header->ctl_table_arg;
++ unregister_net_sysctl_table(net->nf.nf_lwtnl_dir_header);
++ if (!net_eq(net, &init_net))
++ kfree(table);
++}
++
++static struct pernet_operations nf_lwtunnel_net_ops = {
++ .init = nf_lwtunnel_net_init,
++ .exit = nf_lwtunnel_net_exit,
++};
++
++int __init netfilter_lwtunnel_init(void)
++{
++ return register_pernet_subsys(&nf_lwtunnel_net_ops);
++}
++
++void netfilter_lwtunnel_fini(void)
++{
++ unregister_pernet_subsys(&nf_lwtunnel_net_ops);
++}
++#else
++int __init netfilter_lwtunnel_init(void) { return 0; }
++void netfilter_lwtunnel_fini(void) {}
+ #endif /* CONFIG_SYSCTL */
+diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
+index 832ae64179f0f2..25403023060b6d 100644
+--- a/net/netfilter/nf_internals.h
++++ b/net/netfilter/nf_internals.h
+@@ -29,6 +29,12 @@ void nf_queue_nf_hook_drop(struct net *net);
+ /* nf_log.c */
+ int __init netfilter_log_init(void);
+
++#ifdef CONFIG_LWTUNNEL
++/* nf_hooks_lwtunnel.c */
++int __init netfilter_lwtunnel_init(void);
++void netfilter_lwtunnel_fini(void);
++#endif
++
+ /* core.c */
+ void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
+ const struct nf_hook_ops *reg);
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 8cc52d2bd31be5..e16f158388bbe5 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -193,11 +193,12 @@ void nf_logger_put(int pf, enum nf_log_type type)
+ return;
+ }
+
+- BUG_ON(loggers[pf][type] == NULL);
+-
+ rcu_read_lock();
+ logger = rcu_dereference(loggers[pf][type]);
+- module_put(logger->me);
++ if (!logger)
++ WARN_ON_ONCE(1);
++ else
++ module_put(logger->me);
+ rcu_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(nf_logger_put);
+diff --git a/net/netfilter/nf_log_syslog.c b/net/netfilter/nf_log_syslog.c
+index c66689ad2b4919..58402226045e84 100644
+--- a/net/netfilter/nf_log_syslog.c
++++ b/net/netfilter/nf_log_syslog.c
+@@ -111,7 +111,8 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
+ unsigned int hooknum, const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+- const struct nf_loginfo *loginfo, const char *prefix)
++ const struct nf_loginfo *loginfo, const char *prefix,
++ struct net *net)
+ {
+ const struct net_device *physoutdev __maybe_unused;
+ const struct net_device *physindev __maybe_unused;
+@@ -121,7 +122,7 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
+ in ? in->name : "",
+ out ? out->name : "");
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+- physindev = nf_bridge_get_physindev(skb);
++ physindev = nf_bridge_get_physindev(skb, net);
+ if (physindev && in != physindev)
+ nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
+ physoutdev = nf_bridge_get_physoutdev(skb);
+@@ -148,7 +149,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
+ loginfo = &default_loginfo;
+
+ nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
+- prefix);
++ prefix, net);
+ dump_arp_packet(m, loginfo, skb, skb_network_offset(skb));
+
+ nf_log_buf_close(m);
+@@ -845,7 +846,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
+ loginfo = &default_loginfo;
+
+ nf_log_dump_packet_common(m, pf, hooknum, skb, in,
+- out, loginfo, prefix);
++ out, loginfo, prefix, net);
+
+ if (in)
+ dump_mac_header(m, loginfo, skb);
+@@ -880,7 +881,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
+ loginfo = &default_loginfo;
+
+ nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
+- loginfo, prefix);
++ loginfo, prefix, net);
+
+ if (in)
+ dump_mac_header(m, loginfo, skb);
+@@ -916,7 +917,7 @@ static void nf_log_unknown_packet(struct net *net, u_int8_t pf,
+ loginfo = &default_loginfo;
+
+ nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
+- prefix);
++ prefix, net);
+
+ dump_mac_header(m, loginfo, skb);
+
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index c4e0516a8dfab4..ccca6e3848bcc8 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -183,7 +183,35 @@ hash_by_src(const struct net *net,
+ return reciprocal_scale(hash, nf_nat_htable_size);
+ }
+
+-/* Is this tuple already taken? (not by us) */
++/**
++ * nf_nat_used_tuple - check if proposed nat tuple clashes with existing entry
++ * @tuple: proposed NAT binding
++ * @ignored_conntrack: our (unconfirmed) conntrack entry
++ *
++ * A conntrack entry can be inserted to the connection tracking table
++ * if there is no existing entry with an identical tuple in either direction.
++ *
++ * Example:
++ * INITIATOR -> NAT/PAT -> RESPONDER
++ *
++ * INITIATOR passes through NAT/PAT ("us") and SNAT is done (saddr rewrite).
++ * Then, later, NAT/PAT itself also connects to RESPONDER.
++ *
++ * This will not work if the SNAT done earlier has same IP:PORT source pair.
++ *
++ * Conntrack table has:
++ * ORIGINAL: $IP_INITIATOR:$SPORT -> $IP_RESPONDER:$DPORT
++ * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
++ *
++ * and new locally originating connection wants:
++ * ORIGINAL: $IP_NAT:$SPORT -> $IP_RESPONDER:$DPORT
++ * REPLY: $IP_RESPONDER:$DPORT -> $IP_NAT:$SPORT
++ *
++ * ... which would mean incoming packets cannot be distinguished between
++ * the existing and the newly added entry (identical IP_CT_DIR_REPLY tuple).
++ *
++ * @return: true if the proposed NAT mapping collides with an existing entry.
++ */
+ static int
+ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack)
+@@ -200,6 +228,94 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+ return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
+ }
+
++static bool nf_nat_allow_clash(const struct nf_conn *ct)
++{
++ return nf_ct_l4proto_find(nf_ct_protonum(ct))->allow_clash;
++}
++
++/**
++ * nf_nat_used_tuple_new - check if to-be-inserted conntrack collides with existing entry
++ * @tuple: proposed NAT binding
++ * @ignored_ct: our (unconfirmed) conntrack entry
++ *
++ * Same as nf_nat_used_tuple, but also check for rare clash in reverse
++ * direction. Should be called only when @tuple has not been altered, i.e.
++ * @ignored_conntrack will not be subject to NAT.
++ *
++ * @return: true if the proposed NAT mapping collides with existing entry.
++ */
++static noinline bool
++nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
++ const struct nf_conn *ignored_ct)
++{
++ static const unsigned long uses_nat = IPS_NAT_MASK | IPS_SEQ_ADJUST_BIT;
++ const struct nf_conntrack_tuple_hash *thash;
++ const struct nf_conntrack_zone *zone;
++ struct nf_conn *ct;
++ bool taken = true;
++ struct net *net;
++
++ if (!nf_nat_used_tuple(tuple, ignored_ct))
++ return false;
++
++ if (!nf_nat_allow_clash(ignored_ct))
++ return true;
++
++ /* Initial choice clashes with existing conntrack.
++ * Check for (rare) reverse collision.
++ *
++ * This can happen when new packets are received in both directions
++ * at the exact same time on different CPUs.
++ *
++ * Without SMP, first packet creates new conntrack entry and second
++ * packet is resolved as established reply packet.
++ *
++ * With parallel processing, both packets could be picked up as
++ * new and both get their own ct entry allocated.
++ *
++ * If ignored_conntrack and colliding ct are not subject to NAT then
++ * pretend the tuple is available and let later clash resolution
++ * handle this at insertion time.
++ *
++ * Without it, the 'reply' packet has its source port rewritten
++ * by nat engine.
++ */
++ if (READ_ONCE(ignored_ct->status) & uses_nat)
++ return true;
++
++ net = nf_ct_net(ignored_ct);
++ zone = nf_ct_zone(ignored_ct);
++
++ thash = nf_conntrack_find_get(net, zone, tuple);
++ if (unlikely(!thash)) /* clashing entry went away */
++ return false;
++
++ ct = nf_ct_tuplehash_to_ctrack(thash);
++
++ /* NB: IP_CT_DIR_ORIGINAL should be impossible because
++ * nf_nat_used_tuple() handles origin collisions.
++ *
++ * Handle remote chance other CPU confirmed its ct right after.
++ */
++ if (thash->tuple.dst.dir != IP_CT_DIR_REPLY)
++ goto out;
++
++ /* clashing connection subject to NAT? Retry with new tuple. */
++ if (READ_ONCE(ct->status) & uses_nat)
++ goto out;
++
++ if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
++ &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple) &&
++ nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
++ &ignored_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) {
++ taken = false;
++ goto out;
++ }
++out:
++ nf_ct_put(ct);
++ return taken;
++}
++
+ static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags)
+ {
+ static const unsigned long flags_refuse = IPS_FIXED_TIMEOUT |
+@@ -608,7 +724,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
+ !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
+ /* try the original tuple first */
+ if (nf_in_range(orig_tuple, range)) {
+- if (!nf_nat_used_tuple(orig_tuple, ct)) {
++ if (!nf_nat_used_tuple_new(orig_tuple, ct)) {
+ *tuple = *orig_tuple;
+ return;
+ }
+diff --git a/net/netfilter/nf_nat_ovs.c b/net/netfilter/nf_nat_ovs.c
+index 551abd2da6143c..0f9a559f620795 100644
+--- a/net/netfilter/nf_nat_ovs.c
++++ b/net/netfilter/nf_nat_ovs.c
+@@ -75,9 +75,10 @@ static int nf_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
+ }
+
+ err = nf_nat_packet(ct, ctinfo, hooknum, skb);
++out:
+ if (err == NF_ACCEPT)
+ *action |= BIT(maniptype);
+-out:
++
+ return err;
+ }
+
+diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
+index 6616ba5d0b0490..5b37487d9d11fa 100644
+--- a/net/netfilter/nf_nat_redirect.c
++++ b/net/netfilter/nf_nat_redirect.c
+@@ -80,6 +80,26 @@ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
+
+ static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+
++static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
++{
++ unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
++
++ if (ifa_addr_type & IPV6_ADDR_MAPPED)
++ return false;
++
++ if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
++ return false;
++
++ if (scope) {
++ unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
++
++ if (!(scope & ifa_scope))
++ return false;
++ }
++
++ return true;
++}
++
+ unsigned int
+ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ unsigned int hooknum)
+@@ -89,14 +109,19 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ if (hooknum == NF_INET_LOCAL_OUT) {
+ newdst.in6 = loopback_addr;
+ } else {
++ unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
+ struct inet6_dev *idev;
+- struct inet6_ifaddr *ifa;
+ bool addr = false;
+
+ idev = __in6_dev_get(skb->dev);
+ if (idev != NULL) {
++ const struct inet6_ifaddr *ifa;
++
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
++ if (!nf_nat_redirect_ipv6_usable(ifa, scope))
++ continue;
++
+ newdst.in6 = ifa->addr;
+ addr = true;
+ break;
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index 63d1516816b1fd..e2f334f70281f8 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -82,11 +82,9 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
+ {
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ const struct sk_buff *skb = entry->skb;
+- struct nf_bridge_info *nf_bridge;
+
+- nf_bridge = nf_bridge_info_get(skb);
+- if (nf_bridge) {
+- entry->physin = nf_bridge_get_physindev(skb);
++ if (nf_bridge_info_exists(skb)) {
++ entry->physin = nf_bridge_get_physindev(skb, entry->state.net);
+ entry->physout = nf_bridge_get_physoutdev(skb);
+ } else {
+ entry->physin = NULL;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 29c651804cb221..aacb0d7f82e9f8 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -24,6 +24,7 @@
+ #include <net/sock.h>
+
+ #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
++#define NFT_SET_MAX_ANONLEN 16
+
+ unsigned int nf_tables_net_id __read_mostly;
+
+@@ -593,6 +594,12 @@ static int nft_mapelem_deactivate(const struct nft_ctx *ctx,
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem)
+ {
++ struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
++
++ if (!nft_set_elem_active(ext, iter->genmask))
++ return 0;
++
++ nft_set_elem_change_active(ctx->net, set, ext);
+ nft_setelem_data_deactivate(ctx->net, set, elem);
+
+ return 0;
+@@ -618,6 +625,7 @@ static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
+ continue;
+
+ elem.priv = catchall->elem;
++ nft_set_elem_change_active(ctx->net, set, ext);
+ nft_setelem_data_deactivate(ctx->net, set, &elem);
+ break;
+ }
+@@ -627,6 +635,7 @@ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ struct nft_set_iter iter = {
+ .genmask = nft_genmask_next(ctx->net),
++ .type = NFT_ITER_UPDATE,
+ .fn = nft_mapelem_deactivate,
+ };
+
+@@ -685,15 +694,16 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
+ return err;
+ }
+
+-static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+- struct nft_flowtable *flowtable)
++static struct nft_trans *
++nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
++ struct nft_flowtable *flowtable)
+ {
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc(ctx, msg_type,
+ sizeof(struct nft_trans_flowtable));
+ if (trans == NULL)
+- return -ENOMEM;
++ return ERR_PTR(-ENOMEM);
+
+ if (msg_type == NFT_MSG_NEWFLOWTABLE)
+ nft_activate_next(ctx->net, flowtable);
+@@ -702,22 +712,22 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+ nft_trans_flowtable(trans) = flowtable;
+ nft_trans_commit_list_add_tail(ctx->net, trans);
+
+- return 0;
++ return trans;
+ }
+
+ static int nft_delflowtable(struct nft_ctx *ctx,
+ struct nft_flowtable *flowtable)
+ {
+- int err;
++ struct nft_trans *trans;
+
+- err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
+- if (err < 0)
+- return err;
++ trans = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
++ if (IS_ERR(trans))
++ return PTR_ERR(trans);
+
+ nft_deactivate_next(ctx->net, flowtable);
+ nft_use_dec(&ctx->table->use);
+
+- return err;
++ return 0;
+ }
+
+ static void __nft_reg_track_clobber(struct nft_regs_track *track, u8 dreg)
+@@ -805,7 +815,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
+
+ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+ const struct nlattr *nla,
+- u8 genmask, u32 nlpid)
++ int family, u8 genmask, u32 nlpid)
+ {
+ struct nftables_pernet *nft_net;
+ struct nft_table *table;
+@@ -813,6 +823,7 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+ nft_net = nft_pernet(net);
+ list_for_each_entry(table, &nft_net->tables, list) {
+ if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
++ table->family == family &&
+ nft_active_genmask(table, genmask)) {
+ if (nft_table_has_owner(table) &&
+ nlpid && table->nlpid != nlpid)
+@@ -1197,6 +1208,26 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
+ #define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
+ __NFT_TABLE_F_WAS_AWAKEN)
+
++static bool nft_table_pending_update(const struct nft_ctx *ctx)
++{
++ struct nftables_pernet *nft_net = nft_pernet(ctx->net);
++ struct nft_trans *trans;
++
++ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
++ return true;
++
++ list_for_each_entry(trans, &nft_net->commit_list, list) {
++ if (trans->ctx.table == ctx->table &&
++ ((trans->msg_type == NFT_MSG_NEWCHAIN &&
++ nft_trans_chain_update(trans)) ||
++ (trans->msg_type == NFT_MSG_DELCHAIN &&
++ nft_is_base_chain(trans->ctx.chain))))
++ return true;
++ }
++
++ return false;
++}
++
+ static int nf_tables_updtable(struct nft_ctx *ctx)
+ {
+ struct nft_trans *trans;
+@@ -1210,7 +1241,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ if (flags & ~NFT_TABLE_F_MASK)
+ return -EOPNOTSUPP;
+
+- if (flags == ctx->table->flags)
++ if (flags == (ctx->table->flags & NFT_TABLE_F_MASK))
+ return 0;
+
+ if ((nft_table_has_owner(ctx->table) &&
+@@ -1220,7 +1251,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ return -EOPNOTSUPP;
+
+ /* No dormant off/on/off/on games in single transaction */
+- if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
++ if (nft_table_pending_update(ctx))
+ return -EINVAL;
+
+ trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
+@@ -1251,6 +1282,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ return 0;
+
+ err_register_hooks:
++ ctx->table->flags |= NFT_TABLE_F_DORMANT;
+ nft_trans_destroy(trans);
+ return ret;
+ }
+@@ -1546,7 +1578,7 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
+
+ if (nla[NFTA_TABLE_HANDLE]) {
+ attr = nla[NFTA_TABLE_HANDLE];
+- table = nft_table_lookup_byhandle(net, attr, genmask,
++ table = nft_table_lookup_byhandle(net, attr, family, genmask,
+ NETLINK_CB(skb).portid);
+ } else {
+ attr = nla[NFTA_TABLE_NAME];
+@@ -1753,7 +1785,7 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
+ if (!hook_list)
+ hook_list = &basechain->hook_list;
+
+- list_for_each_entry(hook, hook_list, list) {
++ list_for_each_entry_rcu(hook, hook_list, list) {
+ if (!first)
+ first = hook;
+
+@@ -2080,7 +2112,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
+ struct nft_hook *hook;
+ int err;
+
+- hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
++ hook = kzalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
+ if (!hook) {
+ err = -ENOMEM;
+ goto err_hook_alloc;
+@@ -2262,7 +2294,16 @@ static int nft_chain_parse_hook(struct net *net,
+ return -EOPNOTSUPP;
+ }
+
+- type = basechain->type;
++ if (nla[NFTA_CHAIN_TYPE]) {
++ type = __nf_tables_chain_type_lookup(nla[NFTA_CHAIN_TYPE],
++ family);
++ if (!type) {
++ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
++ return -ENOENT;
++ }
++ } else {
++ type = basechain->type;
++ }
+ }
+
+ if (!try_module_get(type->owner)) {
+@@ -2407,6 +2448,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ struct nft_stats __percpu *stats = NULL;
+ struct nft_chain_hook hook = {};
+
++ if (table->flags & __NFT_TABLE_F_UPDATE)
++ return -EINVAL;
++
+ if (flags & NFT_CHAIN_BINDING)
+ return -EOPNOTSUPP;
+
+@@ -2494,19 +2538,15 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ RCU_INIT_POINTER(chain->blob_gen_0, blob);
+ RCU_INIT_POINTER(chain->blob_gen_1, blob);
+
+- err = nf_tables_register_hook(net, table, chain);
+- if (err < 0)
+- goto err_destroy_chain;
+-
+ if (!nft_use_inc(&table->use)) {
+ err = -EMFILE;
+- goto err_use;
++ goto err_destroy_chain;
+ }
+
+ trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+- goto err_unregister_hook;
++ goto err_trans;
+ }
+
+ nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET;
+@@ -2514,17 +2554,22 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ nft_trans_chain_policy(trans) = policy;
+
+ err = nft_chain_add(table, chain);
+- if (err < 0) {
+- nft_trans_destroy(trans);
+- goto err_unregister_hook;
+- }
++ if (err < 0)
++ goto err_chain_add;
++
++ /* This must be LAST to ensure no packets are walking over this chain. */
++ err = nf_tables_register_hook(net, table, chain);
++ if (err < 0)
++ goto err_register_hook;
+
+ return 0;
+
+-err_unregister_hook:
++err_register_hook:
++ nft_chain_del(chain);
++err_chain_add:
++ nft_trans_destroy(trans);
++err_trans:
+ nft_use_dec_restore(&table->use);
+-err_use:
+- nf_tables_unregister_hook(net, table, chain);
+ err_destroy_chain:
+ nf_tables_chain_destroy(ctx);
+
+@@ -2607,17 +2652,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
+ }
+ }
+
+- if (nla[NFTA_CHAIN_COUNTERS]) {
+- if (!nft_is_base_chain(chain)) {
+- err = -EOPNOTSUPP;
+- goto err_hooks;
+- }
+-
+- stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
+- if (IS_ERR(stats)) {
+- err = PTR_ERR(stats);
+- goto err_hooks;
+- }
++ if (table->flags & __NFT_TABLE_F_UPDATE &&
++ !list_empty(&hook.list)) {
++ NL_SET_BAD_ATTR(extack, attr);
++ err = -EOPNOTSUPP;
++ goto err_hooks;
+ }
+
+ if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+@@ -2634,6 +2673,20 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
+ }
+
+ unregister = true;
++
++ if (nla[NFTA_CHAIN_COUNTERS]) {
++ if (!nft_is_base_chain(chain)) {
++ err = -EOPNOTSUPP;
++ goto err_hooks;
++ }
++
++ stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
++ if (IS_ERR(stats)) {
++ err = PTR_ERR(stats);
++ goto err_hooks;
++ }
++ }
++
+ err = -ENOMEM;
+ trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
+ sizeof(struct nft_trans_chain));
+@@ -2835,6 +2888,9 @@ static int nft_delchain_hook(struct nft_ctx *ctx,
+ struct nft_trans *trans;
+ int err;
+
++ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
++ return -EOPNOTSUPP;
++
+ err = nft_chain_parse_hook(ctx->net, basechain, nla, &chain_hook,
+ ctx->family, chain->flags, extack);
+ if (err < 0)
+@@ -2919,7 +2975,8 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);
+
+ if (nla[NFTA_CHAIN_HOOK]) {
+- if (chain->flags & NFT_CHAIN_HW_OFFLOAD)
++ if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYCHAIN ||
++ chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ return -EOPNOTSUPP;
+
+ if (nft_is_base_chain(chain)) {
+@@ -2998,7 +3055,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
+ {
+ const struct nft_expr_type *type, *candidate = NULL;
+
+- list_for_each_entry(type, &nf_tables_expressions, list) {
++ list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
+ if (!nla_strcmp(nla, type->name)) {
+ if (!type->family && !candidate)
+ candidate = type;
+@@ -3030,9 +3087,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
+ if (nla == NULL)
+ return ERR_PTR(-EINVAL);
+
++ rcu_read_lock();
+ type = __nft_expr_type_get(family, nla);
+- if (type != NULL && try_module_get(type->owner))
++ if (type != NULL && try_module_get(type->owner)) {
++ rcu_read_unlock();
+ return type;
++ }
++ rcu_read_unlock();
+
+ lockdep_nfnl_nft_mutex_not_held();
+ #ifdef CONFIG_MODULES
+@@ -3465,10 +3526,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
+ goto cont_skip;
+ if (*idx < s_idx)
+ goto cont;
+- if (*idx > s_idx) {
+- memset(&cb->args[1], 0,
+- sizeof(cb->args) - sizeof(cb->args[0]));
+- }
+ if (prule)
+ handle = prule->handle;
+ else
+@@ -3694,6 +3751,15 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
+ nf_tables_rule_destroy(ctx, rule);
+ }
+
++/** nft_chain_validate - loop detection and hook validation
++ *
++ * @ctx: context containing call depth and base chain
++ * @chain: chain to validate
++ *
++ * Walk through the rules of the given chain and chase all jumps/gotos
++ * and set lookups until either the jump limit is hit or all reachable
++ * chains have been validated.
++ */
+ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ {
+ struct nft_expr *expr, *last;
+@@ -3715,6 +3781,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ if (!expr->ops->validate)
+ continue;
+
++ /* This may call nft_chain_validate() recursively,
++ * callers that do so must increment ctx->level.
++ */
+ err = expr->ops->validate(ctx, expr, &data);
+ if (err < 0)
+ return err;
+@@ -3758,6 +3827,9 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
+ const struct nft_data *data;
+ int err;
+
++ if (!nft_set_elem_active(ext, iter->genmask))
++ return 0;
++
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+ *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
+ return 0;
+@@ -3781,19 +3853,22 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
+
+ int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+- u8 genmask = nft_genmask_next(ctx->net);
++ struct nft_set_iter dummy_iter = {
++ .genmask = nft_genmask_next(ctx->net),
++ };
+ struct nft_set_elem_catchall *catchall;
+ struct nft_set_elem elem;
++
+ struct nft_set_ext *ext;
+ int ret = 0;
+
+ list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ ext = nft_set_elem_ext(set, catchall->elem);
+- if (!nft_set_elem_active(ext, genmask))
++ if (!nft_set_elem_active(ext, dummy_iter.genmask))
+ continue;
+
+ elem.priv = catchall->elem;
+- ret = nft_setelem_validate(ctx, set, NULL, &elem);
++ ret = nft_setelem_validate(ctx, set, &dummy_iter, &elem);
+ if (ret < 0)
+ return ret;
+ }
+@@ -4345,6 +4420,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
+ if (p[1] != 'd' || strchr(p + 2, '%'))
+ return -EINVAL;
+
++ if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
++ return -EINVAL;
++
+ inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (inuse == NULL)
+ return -ENOMEM;
+@@ -4397,7 +4475,7 @@ int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
+ return -ERANGE;
+
+ ms *= NSEC_PER_MSEC;
+- *result = nsecs_to_jiffies64(ms);
++ *result = nsecs_to_jiffies64(ms) ? : !!ms;
+ return 0;
+ }
+
+@@ -4747,8 +4825,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
+ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ const struct nlattr *nla)
+ {
++ u32 num_regs = 0, key_num_regs = 0;
+ struct nlattr *attr;
+- u32 num_regs = 0;
+ int rem, err, i;
+
+ nla_for_each_nested(attr, nla, rem) {
+@@ -4763,6 +4841,10 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ for (i = 0; i < desc->field_count; i++)
+ num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
+
++ key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
++ if (key_num_regs != num_regs)
++ return -EINVAL;
++
+ if (num_regs > NFT_REG32_COUNT)
+ return -E2BIG;
+
+@@ -4924,6 +5006,12 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
+ (NFT_SET_EVAL | NFT_SET_OBJECT))
+ return -EOPNOTSUPP;
++ if ((flags & (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT | NFT_SET_EVAL)) ==
++ (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT))
++ return -EOPNOTSUPP;
++ if ((flags & (NFT_SET_CONSTANT | NFT_SET_TIMEOUT)) ==
++ (NFT_SET_CONSTANT | NFT_SET_TIMEOUT))
++ return -EOPNOTSUPP;
+ }
+
+ desc.dtype = 0;
+@@ -4984,16 +5072,28 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ }
+
+ desc.policy = NFT_SET_POL_PERFORMANCE;
+- if (nla[NFTA_SET_POLICY] != NULL)
++ if (nla[NFTA_SET_POLICY] != NULL) {
+ desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
++ switch (desc.policy) {
++ case NFT_SET_POL_PERFORMANCE:
++ case NFT_SET_POL_MEMORY:
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++ }
+
+ if (nla[NFTA_SET_DESC] != NULL) {
+ err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
+ if (err < 0)
+ return err;
+
+- if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
++ if (desc.field_count > 1) {
++ if (!(flags & NFT_SET_CONCAT))
++ return -EINVAL;
++ } else if (flags & NFT_SET_CONCAT) {
+ return -EINVAL;
++ }
+ } else if (flags & NFT_SET_CONCAT) {
+ return -EINVAL;
+ }
+@@ -5260,6 +5360,11 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem)
+ {
++ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
++
++ if (!nft_set_elem_active(ext, iter->genmask))
++ return 0;
++
+ return nft_setelem_data_validate(ctx, set, elem);
+ }
+
+@@ -5306,6 +5411,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ }
+
+ iter.genmask = nft_genmask_next(ctx->net);
++ iter.type = NFT_ITER_UPDATE;
+ iter.skip = 0;
+ iter.count = 0;
+ iter.err = 0;
+@@ -5337,6 +5443,7 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+
+ if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
+ list_del_rcu(&set->list);
++ set->dead = 1;
+ if (event)
+ nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
+ GFP_KERNEL);
+@@ -5352,6 +5459,13 @@ static int nft_mapelem_activate(const struct nft_ctx *ctx,
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem)
+ {
++ struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
++
++ /* called from abort path, reverse check to undo changes. */
++ if (nft_set_elem_active(ext, iter->genmask))
++ return 0;
++
++ nft_clear(ctx->net, ext);
+ nft_setelem_data_activate(ctx->net, set, elem);
+
+ return 0;
+@@ -5370,6 +5484,7 @@ static void nft_map_catchall_activate(const struct nft_ctx *ctx,
+ if (!nft_set_elem_active(ext, genmask))
+ continue;
+
++ nft_clear(ctx->net, ext);
+ elem.priv = catchall->elem;
+ nft_setelem_data_activate(ctx->net, set, &elem);
+ break;
+@@ -5380,6 +5495,7 @@ static void nft_map_activate(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ struct nft_set_iter iter = {
+ .genmask = nft_genmask_next(ctx->net),
++ .type = NFT_ITER_UPDATE,
+ .fn = nft_mapelem_activate,
+ };
+
+@@ -5573,8 +5689,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+ nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
+- set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
+- set->dlen) < 0)
++ nft_set_datatype(set), set->dlen) < 0)
+ goto nla_put_failure;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) &&
+@@ -5644,7 +5759,10 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ struct nft_set_dump_args *args;
+
+- if (nft_set_elem_expired(ext))
++ if (!nft_set_elem_active(ext, iter->genmask))
++ return 0;
++
++ if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
+ return 0;
+
+ args = container_of(iter, struct nft_set_dump_args, iter);
+@@ -5759,6 +5877,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+ args.skb = skb;
+ args.reset = reset;
+ args.iter.genmask = nft_genmask_cur(net);
++ args.iter.type = NFT_ITER_READ;
+ args.iter.skip = cb->args[0];
+ args.iter.count = 0;
+ args.iter.err = 0;
+@@ -6410,7 +6529,7 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set,
+ struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+
+ if (nft_setelem_is_catchall(set, elem)) {
+- nft_set_elem_change_active(net, set, ext);
++ nft_clear(net, ext);
+ } else {
+ set->ops->activate(net, set, elem);
+ }
+@@ -6425,7 +6544,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net,
+
+ list_for_each_entry(catchall, &set->catchall_list, list) {
+ ext = nft_set_elem_ext(set, catchall->elem);
+- if (!nft_is_active(net, ext))
++ if (!nft_is_active_next(net, ext))
+ continue;
+
+ kfree(elem->priv);
+@@ -6468,6 +6587,12 @@ static int nft_setelem_deactivate(const struct net *net,
+ return ret;
+ }
+
++static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall)
++{
++ list_del_rcu(&catchall->list);
++ kfree_rcu(catchall, rcu);
++}
++
+ static void nft_setelem_catchall_remove(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem)
+@@ -6476,8 +6601,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
+
+ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
+ if (catchall->elem == elem->priv) {
+- list_del_rcu(&catchall->list);
+- kfree_rcu(catchall, rcu);
++ nft_setelem_catchall_destroy(catchall);
+ break;
+ }
+ }
+@@ -6596,17 +6720,23 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ return err;
+ } else if (set->flags & NFT_SET_TIMEOUT &&
+ !(flags & NFT_SET_ELEM_INTERVAL_END)) {
+- timeout = READ_ONCE(set->timeout);
++ timeout = set->timeout;
+ }
+
+ expiration = 0;
+ if (nla[NFTA_SET_ELEM_EXPIRATION] != NULL) {
+ if (!(set->flags & NFT_SET_TIMEOUT))
+ return -EINVAL;
++ if (timeout == 0)
++ return -EOPNOTSUPP;
++
+ err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_EXPIRATION],
+ &expiration);
+ if (err)
+ return err;
++
++ if (expiration > timeout)
++ return -ERANGE;
+ }
+
+ if (nla[NFTA_SET_ELEM_EXPR]) {
+@@ -6697,7 +6827,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ if (err < 0)
+ goto err_parse_key_end;
+
+- if (timeout != READ_ONCE(set->timeout)) {
++ if (timeout != set->timeout) {
+ err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
+ if (err < 0)
+ goto err_parse_key_end;
+@@ -6960,6 +7090,16 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
+ }
+ }
+
++static int nft_setelem_active_next(const struct net *net,
++ const struct nft_set *set,
++ struct nft_set_elem *elem)
++{
++ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
++ u8 genmask = nft_genmask_next(net);
++
++ return nft_set_elem_active(ext, genmask);
++}
++
+ static void nft_setelem_data_activate(const struct net *net,
+ const struct nft_set *set,
+ struct nft_set_elem *elem)
+@@ -7083,9 +7223,13 @@ static int nft_setelem_flush(const struct nft_ctx *ctx,
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem)
+ {
++ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ struct nft_trans *trans;
+ int err;
+
++ if (!nft_set_elem_active(ext, iter->genmask))
++ return 0;
++
+ trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
+ sizeof(struct nft_trans_elem), GFP_ATOMIC);
+ if (!trans)
+@@ -7155,6 +7299,7 @@ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
+ {
+ struct nft_set_iter iter = {
+ .genmask = genmask,
++ .type = NFT_ITER_UPDATE,
+ .fn = nft_setelem_flush,
+ };
+
+@@ -7209,10 +7354,11 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
+
+ if (err < 0) {
+ NL_SET_BAD_ATTR(extack, attr);
+- break;
++ return err;
+ }
+ }
+- return err;
++
++ return 0;
+ }
+
+ /*
+@@ -7383,11 +7529,15 @@ static int nft_object_dump(struct sk_buff *skb, unsigned int attr,
+ return -1;
+ }
+
+-static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
++static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
+ {
+ const struct nft_object_type *type;
+
+- list_for_each_entry(type, &nf_tables_objects, list) {
++ list_for_each_entry_rcu(type, &nf_tables_objects, list) {
++ if (type->family != NFPROTO_UNSPEC &&
++ type->family != family)
++ continue;
++
+ if (objtype == type->type)
+ return type;
+ }
+@@ -7395,13 +7545,17 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
+ }
+
+ static const struct nft_object_type *
+-nft_obj_type_get(struct net *net, u32 objtype)
++nft_obj_type_get(struct net *net, u32 objtype, u8 family)
+ {
+ const struct nft_object_type *type;
+
+- type = __nft_obj_type_get(objtype);
+- if (type != NULL && try_module_get(type->owner))
++ rcu_read_lock();
++ type = __nft_obj_type_get(objtype, family);
++ if (type != NULL && try_module_get(type->owner)) {
++ rcu_read_unlock();
+ return type;
++ }
++ rcu_read_unlock();
+
+ lockdep_nfnl_nft_mutex_not_held();
+ #ifdef CONFIG_MODULES
+@@ -7492,7 +7646,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
+- type = __nft_obj_type_get(objtype);
++ type = __nft_obj_type_get(objtype, family);
+ if (WARN_ON_ONCE(!type))
+ return -ENOENT;
+
+@@ -7506,7 +7660,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ if (!nft_use_inc(&table->use))
+ return -EMFILE;
+
+- type = nft_obj_type_get(net, objtype);
++ type = nft_obj_type_get(net, objtype, family);
+ if (IS_ERR(type)) {
+ err = PTR_ERR(type);
+ goto err_type;
+@@ -7617,28 +7771,26 @@ static void audit_log_obj_reset(const struct nft_table *table,
+ kfree(buf);
+ }
+
+-struct nft_obj_filter {
++struct nft_obj_dump_ctx {
++ unsigned int s_idx;
+ char *table;
+ u32 type;
++ bool reset;
+ };
+
+ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+- const struct nft_table *table;
+- unsigned int idx = 0, s_idx = cb->args[0];
+- struct nft_obj_filter *filter = cb->data;
++ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
+ struct net *net = sock_net(skb->sk);
+ int family = nfmsg->nfgen_family;
+ struct nftables_pernet *nft_net;
++ const struct nft_table *table;
+ unsigned int entries = 0;
+ struct nft_object *obj;
+- bool reset = false;
++ unsigned int idx = 0;
+ int rc = 0;
+
+- if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
+- reset = true;
+-
+ rcu_read_lock();
+ nft_net = nft_pernet(net);
+ cb->seq = READ_ONCE(nft_net->base_seq);
+@@ -7651,17 +7803,12 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ list_for_each_entry_rcu(obj, &table->objects, list) {
+ if (!nft_is_active(net, obj))
+ goto cont;
+- if (idx < s_idx)
++ if (idx < ctx->s_idx)
+ goto cont;
+- if (idx > s_idx)
+- memset(&cb->args[1], 0,
+- sizeof(cb->args) - sizeof(cb->args[0]));
+- if (filter && filter->table &&
+- strcmp(filter->table, table->name))
++ if (ctx->table && strcmp(ctx->table, table->name))
+ goto cont;
+- if (filter &&
+- filter->type != NFT_OBJECT_UNSPEC &&
+- obj->ops->type->type != filter->type)
++ if (ctx->type != NFT_OBJECT_UNSPEC &&
++ obj->ops->type->type != ctx->type)
+ goto cont;
+
+ rc = nf_tables_fill_obj_info(skb, net,
+@@ -7670,7 +7817,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ NFT_MSG_NEWOBJ,
+ NLM_F_MULTI | NLM_F_APPEND,
+ table->family, table,
+- obj, reset);
++ obj, ctx->reset);
+ if (rc < 0)
+ break;
+
+@@ -7679,58 +7826,71 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
+ cont:
+ idx++;
+ }
+- if (reset && entries)
++ if (ctx->reset && entries)
+ audit_log_obj_reset(table, nft_net->base_seq, entries);
+ if (rc < 0)
+ break;
+ }
+ rcu_read_unlock();
+
+- cb->args[0] = idx;
++ ctx->s_idx = idx;
+ return skb->len;
+ }
+
++static int nf_tables_dumpreset_obj(struct sk_buff *skb,
++ struct netlink_callback *cb)
++{
++ struct nftables_pernet *nft_net = nft_pernet(sock_net(skb->sk));
++ int ret;
++
++ mutex_lock(&nft_net->commit_mutex);
++ ret = nf_tables_dump_obj(skb, cb);
++ mutex_unlock(&nft_net->commit_mutex);
++
++ return ret;
++}
++
+ static int nf_tables_dump_obj_start(struct netlink_callback *cb)
+ {
++ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
+ const struct nlattr * const *nla = cb->data;
+- struct nft_obj_filter *filter = NULL;
+
+- if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
+- filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+- if (!filter)
+- return -ENOMEM;
++ BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
+
+- if (nla[NFTA_OBJ_TABLE]) {
+- filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
+- if (!filter->table) {
+- kfree(filter);
+- return -ENOMEM;
+- }
+- }
+-
+- if (nla[NFTA_OBJ_TYPE])
+- filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
++ if (nla[NFTA_OBJ_TABLE]) {
++ ctx->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
++ if (!ctx->table)
++ return -ENOMEM;
+ }
+
+- cb->data = filter;
++ if (nla[NFTA_OBJ_TYPE])
++ ctx->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
++
+ return 0;
+ }
+
++static int nf_tables_dumpreset_obj_start(struct netlink_callback *cb)
++{
++ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
++
++ ctx->reset = true;
++
++ return nf_tables_dump_obj_start(cb);
++}
++
+ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
+ {
+- struct nft_obj_filter *filter = cb->data;
++ struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
+
+- if (filter) {
+- kfree(filter->table);
+- kfree(filter);
+- }
++ kfree(ctx->table);
+
+ return 0;
+ }
+
+ /* called with rcu_read_lock held */
+-static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
+- const struct nlattr * const nla[])
++static struct sk_buff *
++nf_tables_getobj_single(u32 portid, const struct nfnl_info *info,
++ const struct nlattr * const nla[], bool reset)
+ {
+ struct netlink_ext_ack *extack = info->extack;
+ u8 genmask = nft_genmask_cur(info->net);
+@@ -7739,72 +7899,109 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
+ struct net *net = info->net;
+ struct nft_object *obj;
+ struct sk_buff *skb2;
+- bool reset = false;
+ u32 objtype;
+ int err;
+
+- if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
+- struct netlink_dump_control c = {
+- .start = nf_tables_dump_obj_start,
+- .dump = nf_tables_dump_obj,
+- .done = nf_tables_dump_obj_done,
+- .module = THIS_MODULE,
+- .data = (void *)nla,
+- };
+-
+- return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
+- }
+-
+ if (!nla[NFTA_OBJ_NAME] ||
+ !nla[NFTA_OBJ_TYPE])
+- return -EINVAL;
++ return ERR_PTR(-EINVAL);
+
+ table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0);
+ if (IS_ERR(table)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
+- return PTR_ERR(table);
++ return ERR_CAST(table);
+ }
+
+ objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
+ obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
+ if (IS_ERR(obj)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
+- return PTR_ERR(obj);
++ return ERR_CAST(obj);
+ }
+
+ skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb2)
+- return -ENOMEM;
++ return ERR_PTR(-ENOMEM);
+
+- if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
+- reset = true;
++ err = nf_tables_fill_obj_info(skb2, net, portid,
++ info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
++ family, table, obj, reset);
++ if (err < 0) {
++ kfree_skb(skb2);
++ return ERR_PTR(err);
++ }
++
++ return skb2;
++}
++
++static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
++ const struct nlattr * const nla[])
++{
++ u32 portid = NETLINK_CB(skb).portid;
++ struct sk_buff *skb2;
+
+- if (reset) {
+- const struct nftables_pernet *nft_net;
+- char *buf;
++ if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
++ struct netlink_dump_control c = {
++ .start = nf_tables_dump_obj_start,
++ .dump = nf_tables_dump_obj,
++ .done = nf_tables_dump_obj_done,
++ .module = THIS_MODULE,
++ .data = (void *)nla,
++ };
++
++ return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
++ }
+
+- nft_net = nft_pernet(net);
+- buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, nft_net->base_seq);
++ skb2 = nf_tables_getobj_single(portid, info, nla, false);
++ if (IS_ERR(skb2))
++ return PTR_ERR(skb2);
+
+- audit_log_nfcfg(buf,
+- family,
+- 1,
+- AUDIT_NFT_OP_OBJ_RESET,
+- GFP_ATOMIC);
+- kfree(buf);
++ return nfnetlink_unicast(skb2, info->net, portid);
++}
++
++static int nf_tables_getobj_reset(struct sk_buff *skb,
++ const struct nfnl_info *info,
++ const struct nlattr * const nla[])
++{
++ struct nftables_pernet *nft_net = nft_pernet(info->net);
++ u32 portid = NETLINK_CB(skb).portid;
++ struct net *net = info->net;
++ struct sk_buff *skb2;
++ char *buf;
++
++ if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
++ struct netlink_dump_control c = {
++ .start = nf_tables_dumpreset_obj_start,
++ .dump = nf_tables_dumpreset_obj,
++ .done = nf_tables_dump_obj_done,
++ .module = THIS_MODULE,
++ .data = (void *)nla,
++ };
++
++ return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
+ }
+
+- err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid,
+- info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
+- family, table, obj, reset);
+- if (err < 0)
+- goto err_fill_obj_info;
++ if (!try_module_get(THIS_MODULE))
++ return -EINVAL;
++ rcu_read_unlock();
++ mutex_lock(&nft_net->commit_mutex);
++ skb2 = nf_tables_getobj_single(portid, info, nla, true);
++ mutex_unlock(&nft_net->commit_mutex);
++ rcu_read_lock();
++ module_put(THIS_MODULE);
+
+- return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
++ if (IS_ERR(skb2))
++ return PTR_ERR(skb2);
+
+-err_fill_obj_info:
+- kfree_skb(skb2);
+- return err;
++ buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
++ nla_len(nla[NFTA_OBJ_TABLE]),
++ (char *)nla_data(nla[NFTA_OBJ_TABLE]),
++ nft_net->base_seq);
++ audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
++ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
++ kfree(buf);
++
++ return nfnetlink_unicast(skb2, net, portid);
+ }
+
+ static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
+@@ -8087,11 +8284,12 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+ return err;
+ }
+
++/* call under rcu_read_lock */
+ static const struct nf_flowtable_type *__nft_flowtable_type_get(u8 family)
+ {
+ const struct nf_flowtable_type *type;
+
+- list_for_each_entry(type, &nf_tables_flowtables, list) {
++ list_for_each_entry_rcu(type, &nf_tables_flowtables, list) {
+ if (family == type->family)
+ return type;
+ }
+@@ -8103,9 +8301,13 @@ nft_flowtable_type_get(struct net *net, u8 family)
+ {
+ const struct nf_flowtable_type *type;
+
++ rcu_read_lock();
+ type = __nft_flowtable_type_get(family);
+- if (type != NULL && try_module_get(type->owner))
++ if (type != NULL && try_module_get(type->owner)) {
++ rcu_read_unlock();
+ return type;
++ }
++ rcu_read_unlock();
+
+ lockdep_nfnl_nft_mutex_not_held();
+ #ifdef CONFIG_MODULES
+@@ -8297,9 +8499,9 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ u8 family = info->nfmsg->nfgen_family;
+ const struct nf_flowtable_type *type;
+ struct nft_flowtable *flowtable;
+- struct nft_hook *hook, *next;
+ struct net *net = info->net;
+ struct nft_table *table;
++ struct nft_trans *trans;
+ struct nft_ctx ctx;
+ int err;
+
+@@ -8379,34 +8581,34 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable,
+ extack, true);
+ if (err < 0)
+- goto err4;
++ goto err_flowtable_parse_hooks;
+
+ list_splice(&flowtable_hook.list, &flowtable->hook_list);
+ flowtable->data.priority = flowtable_hook.priority;
+ flowtable->hooknum = flowtable_hook.num;
+
++ trans = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
++ if (IS_ERR(trans)) {
++ err = PTR_ERR(trans);
++ goto err_flowtable_trans;
++ }
++
++ /* This must be LAST to ensure no packets are walking over this flowtable. */
+ err = nft_register_flowtable_net_hooks(ctx.net, table,
+ &flowtable->hook_list,
+ flowtable);
+- if (err < 0) {
+- nft_hooks_destroy(&flowtable->hook_list);
+- goto err4;
+- }
+-
+- err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
+ if (err < 0)
+- goto err5;
++ goto err_flowtable_hooks;
+
+ list_add_tail_rcu(&flowtable->list, &table->flowtables);
+
+ return 0;
+-err5:
+- list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+- nft_unregister_flowtable_hook(net, flowtable, hook);
+- list_del_rcu(&hook->list);
+- kfree_rcu(hook, rcu);
+- }
+-err4:
++
++err_flowtable_hooks:
++ nft_trans_destroy(trans);
++err_flowtable_trans:
++ nft_hooks_destroy(&flowtable->hook_list);
++err_flowtable_parse_hooks:
+ flowtable->data.type->free(&flowtable->data);
+ err3:
+ module_put(type->owner);
+@@ -8784,7 +8986,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+ flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
+ list_del_rcu(&hook->list);
+- kfree(hook);
++ kfree_rcu(hook, rcu);
+ }
+ kfree(flowtable->name);
+ module_put(flowtable->data.type->owner);
+@@ -9077,7 +9279,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
+ .policy = nft_obj_policy,
+ },
+ [NFT_MSG_GETOBJ_RESET] = {
+- .call = nf_tables_getobj,
++ .call = nf_tables_getobj_reset,
+ .type = NFNL_CB_RCU,
+ .attr_count = NFTA_OBJ_MAX,
+ .policy = nft_obj_policy,
+@@ -9638,9 +9840,8 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
+ call_rcu(&trans->rcu, nft_trans_gc_trans_free);
+ }
+
+-static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+- unsigned int gc_seq,
+- bool sync)
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++ unsigned int gc_seq)
+ {
+ struct nft_set_elem_catchall *catchall;
+ const struct nft_set *set = gc->set;
+@@ -9656,11 +9857,7 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+
+ nft_set_elem_dead(ext);
+ dead_elem:
+- if (sync)
+- gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+- else
+- gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+-
++ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ if (!gc)
+ return NULL;
+
+@@ -9670,15 +9867,34 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+ return gc;
+ }
+
+-struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+- unsigned int gc_seq)
+-{
+- return nft_trans_gc_catchall(gc, gc_seq, false);
+-}
+-
+ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
+ {
+- return nft_trans_gc_catchall(gc, 0, true);
++ struct nft_set_elem_catchall *catchall, *next;
++ const struct nft_set *set = gc->set;
++ struct nft_set_elem elem;
++ struct nft_set_ext *ext;
++
++ WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net));
++
++ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
++ ext = nft_set_elem_ext(set, catchall->elem);
++
++ if (!nft_set_elem_expired(ext))
++ continue;
++
++ gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
++ if (!gc)
++ return NULL;
++
++ memset(&elem, 0, sizeof(elem));
++ elem.priv = catchall->elem;
++
++ nft_setelem_data_deactivate(gc->net, gc->set, &elem);
++ nft_setelem_catchall_destroy(catchall);
++ nft_trans_gc_elem_add(gc, elem.priv);
++ }
++
++ return gc;
+ }
+
+ static void nf_tables_module_autoload_cleanup(struct net *net)
+@@ -9832,7 +10048,7 @@ static void nft_set_commit_update(struct list_head *set_update_list)
+ list_for_each_entry_safe(set, next, set_update_list, pending_update) {
+ list_del_init(&set->pending_update);
+
+- if (!set->ops->commit)
++ if (!set->ops->commit || set->dead)
+ continue;
+
+ set->ops->commit(set);
+@@ -9992,9 +10208,11 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ if (nft_trans_chain_update(trans)) {
+ nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
+ &nft_trans_chain_hooks(trans));
+- nft_netdev_unregister_hooks(net,
+- &nft_trans_chain_hooks(trans),
+- true);
++ if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) {
++ nft_netdev_unregister_hooks(net,
++ &nft_trans_chain_hooks(trans),
++ true);
++ }
+ } else {
+ nft_chain_del(trans->ctx.chain);
+ nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,
+@@ -10233,10 +10451,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ struct nft_trans *trans, *next;
+ LIST_HEAD(set_update_list);
+ struct nft_trans_elem *te;
++ int err = 0;
+
+ if (action == NFNL_ABORT_VALIDATE &&
+ nf_tables_validate(net) < 0)
+- return -EAGAIN;
++ err = -EAGAIN;
+
+ list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
+ list) {
+@@ -10266,9 +10485,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ break;
+ case NFT_MSG_NEWCHAIN:
+ if (nft_trans_chain_update(trans)) {
+- nft_netdev_unregister_hooks(net,
+- &nft_trans_chain_hooks(trans),
+- true);
++ if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) {
++ nft_netdev_unregister_hooks(net,
++ &nft_trans_chain_hooks(trans),
++ true);
++ }
+ free_percpu(nft_trans_chain_stats(trans));
+ kfree(nft_trans_chain_name(trans));
+ nft_trans_destroy(trans);
+@@ -10328,6 +10549,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ nft_trans_destroy(trans);
+ break;
+ }
++ nft_trans_set(trans)->dead = 1;
+ list_del_rcu(&nft_trans_set(trans)->list);
+ break;
+ case NFT_MSG_DELSET:
+@@ -10359,8 +10581,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ case NFT_MSG_DESTROYSETELEM:
+ te = (struct nft_trans_elem *)trans->data;
+
+- nft_setelem_data_activate(net, te->set, &te->elem);
+- nft_setelem_activate(net, te->set, &te->elem);
++ if (!nft_setelem_active_next(net, te->set, &te->elem)) {
++ nft_setelem_data_activate(net, te->set, &te->elem);
++ nft_setelem_activate(net, te->set, &te->elem);
++ }
+ if (!nft_setelem_is_catchall(te->set, &te->elem))
+ te->set->ndeact--;
+
+@@ -10421,12 +10645,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ nf_tables_abort_release(trans);
+ }
+
+- if (action == NFNL_ABORT_AUTOLOAD)
+- nf_tables_module_autoload(net);
+- else
+- nf_tables_module_autoload_cleanup(net);
+-
+- return 0;
++ return err;
+ }
+
+ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
+@@ -10439,6 +10658,17 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
+ gc_seq = nft_gc_seq_begin(nft_net);
+ ret = __nf_tables_abort(net, action);
+ nft_gc_seq_end(nft_net, gc_seq);
++
++ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
++
++ /* module autoload needs to happen after GC sequence update because it
++ * temporarily releases and grabs mutex again.
++ */
++ if (action == NFNL_ABORT_AUTOLOAD)
++ nf_tables_module_autoload(net);
++ else
++ nf_tables_module_autoload_cleanup(net);
++
+ mutex_unlock(&nft_net->commit_mutex);
+
+ return ret;
+@@ -10502,146 +10732,6 @@ int nft_chain_validate_hooks(const struct nft_chain *chain,
+ }
+ EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
+
+-/*
+- * Loop detection - walk through the ruleset beginning at the destination chain
+- * of a new jump until either the source chain is reached (loop) or all
+- * reachable chains have been traversed.
+- *
+- * The loop check is performed whenever a new jump verdict is added to an
+- * expression or verdict map or a verdict map is bound to a new chain.
+- */
+-
+-static int nf_tables_check_loops(const struct nft_ctx *ctx,
+- const struct nft_chain *chain);
+-
+-static int nft_check_loops(const struct nft_ctx *ctx,
+- const struct nft_set_ext *ext)
+-{
+- const struct nft_data *data;
+- int ret;
+-
+- data = nft_set_ext_data(ext);
+- switch (data->verdict.code) {
+- case NFT_JUMP:
+- case NFT_GOTO:
+- ret = nf_tables_check_loops(ctx, data->verdict.chain);
+- break;
+- default:
+- ret = 0;
+- break;
+- }
+-
+- return ret;
+-}
+-
+-static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
+- struct nft_set *set,
+- const struct nft_set_iter *iter,
+- struct nft_set_elem *elem)
+-{
+- const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+-
+- if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+- *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
+- return 0;
+-
+- return nft_check_loops(ctx, ext);
+-}
+-
+-static int nft_set_catchall_loops(const struct nft_ctx *ctx,
+- struct nft_set *set)
+-{
+- u8 genmask = nft_genmask_next(ctx->net);
+- struct nft_set_elem_catchall *catchall;
+- struct nft_set_ext *ext;
+- int ret = 0;
+-
+- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+- ext = nft_set_elem_ext(set, catchall->elem);
+- if (!nft_set_elem_active(ext, genmask))
+- continue;
+-
+- ret = nft_check_loops(ctx, ext);
+- if (ret < 0)
+- return ret;
+- }
+-
+- return ret;
+-}
+-
+-static int nf_tables_check_loops(const struct nft_ctx *ctx,
+- const struct nft_chain *chain)
+-{
+- const struct nft_rule *rule;
+- const struct nft_expr *expr, *last;
+- struct nft_set *set;
+- struct nft_set_binding *binding;
+- struct nft_set_iter iter;
+-
+- if (ctx->chain == chain)
+- return -ELOOP;
+-
+- if (fatal_signal_pending(current))
+- return -EINTR;
+-
+- list_for_each_entry(rule, &chain->rules, list) {
+- nft_rule_for_each_expr(expr, last, rule) {
+- struct nft_immediate_expr *priv;
+- const struct nft_data *data;
+- int err;
+-
+- if (strcmp(expr->ops->type->name, "immediate"))
+- continue;
+-
+- priv = nft_expr_priv(expr);
+- if (priv->dreg != NFT_REG_VERDICT)
+- continue;
+-
+- data = &priv->data;
+- switch (data->verdict.code) {
+- case NFT_JUMP:
+- case NFT_GOTO:
+- err = nf_tables_check_loops(ctx,
+- data->verdict.chain);
+- if (err < 0)
+- return err;
+- break;
+- default:
+- break;
+- }
+- }
+- }
+-
+- list_for_each_entry(set, &ctx->table->sets, list) {
+- if (!nft_is_active_next(ctx->net, set))
+- continue;
+- if (!(set->flags & NFT_SET_MAP) ||
+- set->dtype != NFT_DATA_VERDICT)
+- continue;
+-
+- list_for_each_entry(binding, &set->bindings, list) {
+- if (!(binding->flags & NFT_SET_MAP) ||
+- binding->chain != chain)
+- continue;
+-
+- iter.genmask = nft_genmask_next(ctx->net);
+- iter.skip = 0;
+- iter.count = 0;
+- iter.err = 0;
+- iter.fn = nf_tables_loop_check_setelem;
+-
+- set->ops->walk(ctx, set, &iter);
+- if (!iter.err)
+- iter.err = nft_set_catchall_loops(ctx, set);
+-
+- if (iter.err < 0)
+- return iter.err;
+- }
+- }
+-
+- return 0;
+-}
+-
+ /**
+ * nft_parse_u32_check - fetch u32 attribute and check for maximum value
+ *
+@@ -10754,13 +10844,16 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
+ if (data != NULL &&
+ (data->verdict.code == NFT_GOTO ||
+ data->verdict.code == NFT_JUMP)) {
+- err = nf_tables_check_loops(ctx, data->verdict.chain);
++ err = nft_chain_validate(ctx, data->verdict.chain);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+ default:
++ if (type != NFT_DATA_VALUE)
++ return -EINVAL;
++
+ if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
+ return -EINVAL;
+ if (len == 0)
+@@ -10769,8 +10862,6 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
+ sizeof_field(struct nft_regs, data))
+ return -ERANGE;
+
+- if (data != NULL && type != NFT_DATA_VALUE)
+- return -EINVAL;
+ return 0;
+ }
+ }
+@@ -10824,16 +10915,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+
+ switch (data->verdict.code) {
+- default:
+- switch (data->verdict.code & NF_VERDICT_MASK) {
+- case NF_ACCEPT:
+- case NF_DROP:
+- case NF_QUEUE:
+- break;
+- default:
+- return -EINVAL;
+- }
+- fallthrough;
++ case NF_ACCEPT:
++ case NF_DROP:
++ case NF_QUEUE:
++ break;
+ case NFT_CONTINUE:
+ case NFT_BREAK:
+ case NFT_RETURN:
+@@ -10868,6 +10953,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+
+ data->verdict.chain = chain;
+ break;
++ default:
++ return -EINVAL;
+ }
+
+ desc->len = sizeof(data->verdict);
+@@ -11175,8 +11262,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+
+ gc_seq = nft_gc_seq_begin(nft_net);
+
+- if (!list_empty(&nf_tables_destroy_list))
+- nf_tables_trans_destroy_flush_work();
++ nf_tables_trans_destroy_flush_work();
+ again:
+ list_for_each_entry(table, &nft_net->tables, list) {
+ if (nft_table_has_owner(table) &&
+@@ -11243,9 +11329,10 @@ static void __net_exit nf_tables_exit_net(struct net *net)
+
+ gc_seq = nft_gc_seq_begin(nft_net);
+
+- if (!list_empty(&nft_net->commit_list) ||
+- !list_empty(&nft_net->module_list))
+- __nf_tables_abort(net, NFNL_ABORT_NONE);
++ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
++
++ if (!list_empty(&nft_net->module_list))
++ nf_tables_module_autoload_cleanup(net);
+
+ __nft_release_tables(net);
+
+@@ -11337,6 +11424,7 @@ static void __exit nf_tables_module_exit(void)
+ unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
+ nft_chain_filter_fini();
+ nft_chain_route_fini();
++ nf_tables_trans_destroy_flush_work();
+ unregister_pernet_subsys(&nf_tables_net_ops);
+ cancel_work_sync(&trans_gc_work);
+ cancel_work_sync(&trans_destroy_work);
+diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
+index 4d0ce12221f66d..711c22ab701dde 100644
+--- a/net/netfilter/nf_tables_core.c
++++ b/net/netfilter/nf_tables_core.c
+@@ -158,7 +158,7 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr,
+ else {
+ if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+ return false;
+- ptr = skb_network_header(skb) + nft_thoff(pkt);
++ ptr = skb->data + nft_thoff(pkt);
+ }
+
+ ptr += priv->offset;
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index f03f4d4d7d8896..134e05d31061e4 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -508,7 +508,7 @@ __build_packet_message(struct nfnl_log_net *log,
+ htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+ goto nla_put_failure;
+ } else {
+- struct net_device *physindev;
++ int physinif;
+
+ /* Case 2: indev is bridge group, we need to look for
+ * physical device (when called from ipv4) */
+@@ -516,10 +516,10 @@ __build_packet_message(struct nfnl_log_net *log,
+ htonl(indev->ifindex)))
+ goto nla_put_failure;
+
+- physindev = nf_bridge_get_physindev(skb);
+- if (physindev &&
++ physinif = nf_bridge_get_physinif(skb);
++ if (physinif &&
+ nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+- htonl(physindev->ifindex)))
++ htonl(physinif)))
+ goto nla_put_failure;
+ }
+ #endif
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index 556bc902af00f8..09209b4952ad14 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head)
+ struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
+ rcu);
+
++ rcu_read_lock();
+ nfqnl_flush(inst, NULL, 0);
++ rcu_read_unlock();
+ kfree(inst);
+ module_put(THIS_MODULE);
+ }
+@@ -666,10 +668,41 @@ static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
+ {
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
+- const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
++ struct nf_conn *ct = (void *)skb_nfct(entry->skb);
++ unsigned long status;
++ unsigned int use;
++
++ if (!ct)
++ return false;
+
+- if (ct && ((ct->status & flags) == IPS_DYING))
++ status = READ_ONCE(ct->status);
++ if ((status & flags) == IPS_DYING)
+ return true;
++
++ if (status & IPS_CONFIRMED)
++ return false;
++
++ /* in some cases skb_clone() can occur after initial conntrack
++ * pickup, but conntrack assumes exclusive skb->_nfct ownership for
++ * unconfirmed entries.
++ *
++ * This happens for br_netfilter and with ip multicast routing.
++ * We can't be solved with serialization here because one clone could
++ * have been queued for local delivery.
++ */
++ use = refcount_read(&ct->ct_general.use);
++ if (likely(use == 1))
++ return false;
++
++ /* Can't decrement further? Exclusive ownership. */
++ if (!refcount_dec_not_one(&ct->ct_general.use))
++ return false;
++
++ skb_set_nfct(entry->skb, 0);
++ /* No nf_ct_put(): we already decremented .use and it cannot
++ * drop down to 0.
++ */
++ return true;
+ #endif
+ return false;
+ }
+diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
+index e596d1a842f702..f6e791a6810151 100644
+--- a/net/netfilter/nft_byteorder.c
++++ b/net/netfilter/nft_byteorder.c
+@@ -38,13 +38,14 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+
+ switch (priv->size) {
+ case 8: {
++ u64 *dst64 = (void *)dst;
+ u64 src64;
+
+ switch (priv->op) {
+ case NFT_BYTEORDER_NTOH:
+ for (i = 0; i < priv->len / 8; i++) {
+ src64 = nft_reg_load64(&src[i]);
+- nft_reg_store64(&dst[i],
++ nft_reg_store64(&dst64[i],
+ be64_to_cpu((__force __be64)src64));
+ }
+ break;
+@@ -52,7 +53,7 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ for (i = 0; i < priv->len / 8; i++) {
+ src64 = (__force __u64)
+ cpu_to_be64(nft_reg_load64(&src[i]));
+- nft_reg_store64(&dst[i], src64);
++ nft_reg_store64(&dst64[i], src64);
+ }
+ break;
+ }
+diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
+index 680fe557686e42..d170758a1eb5d0 100644
+--- a/net/netfilter/nft_chain_filter.c
++++ b/net/netfilter/nft_chain_filter.c
+@@ -338,7 +338,9 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
+ return;
+
+ if (n > 1) {
+- nf_unregister_net_hook(ctx->net, &found->ops);
++ if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT))
++ nf_unregister_net_hook(ctx->net, &found->ops);
++
+ list_del_rcu(&found->list);
+ kfree_rcu(found, rcu);
+ return;
+@@ -357,9 +359,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+ {
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++ struct nft_base_chain *basechain;
+ struct nftables_pernet *nft_net;
+- struct nft_table *table;
+ struct nft_chain *chain, *nr;
++ struct nft_table *table;
+ struct nft_ctx ctx = {
+ .net = dev_net(dev),
+ };
+@@ -371,7 +374,8 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ nft_net = nft_pernet(ctx.net);
+ mutex_lock(&nft_net->commit_mutex);
+ list_for_each_entry(table, &nft_net->tables, list) {
+- if (table->family != NFPROTO_NETDEV)
++ if (table->family != NFPROTO_NETDEV &&
++ table->family != NFPROTO_INET)
+ continue;
+
+ ctx.family = table->family;
+@@ -380,6 +384,11 @@ static int nf_tables_netdev_event(struct notifier_block *this,
+ if (!nft_is_base_chain(chain))
+ continue;
+
++ basechain = nft_base_chain(chain);
++ if (table->family == NFPROTO_INET &&
++ basechain->ops.hooknum != NF_INET_INGRESS)
++ continue;
++
+ ctx.chain = chain;
+ nft_netdev_event(event, dev, &ctx);
+ }
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 5284cd2ad53271..d3d11dede54507 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -135,7 +135,7 @@ static void nft_target_eval_bridge(const struct nft_expr *expr,
+
+ static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
+ [NFTA_TARGET_NAME] = { .type = NLA_NUL_STRING },
+- [NFTA_TARGET_REV] = { .type = NLA_U32 },
++ [NFTA_TARGET_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
+ [NFTA_TARGET_INFO] = { .type = NLA_BINARY },
+ };
+
+@@ -200,6 +200,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
+ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
+ {
+ struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
++ u32 l4proto;
+ u32 flags;
+ int err;
+
+@@ -212,12 +213,18 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
+ return -EINVAL;
+
+ flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
+- if (flags & ~NFT_RULE_COMPAT_F_MASK)
++ if (flags & NFT_RULE_COMPAT_F_UNUSED ||
++ flags & ~NFT_RULE_COMPAT_F_MASK)
+ return -EINVAL;
+ if (flags & NFT_RULE_COMPAT_F_INV)
+ *inv = true;
+
+- *proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
++ l4proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
++ if (l4proto > U16_MAX)
++ return -EINVAL;
++
++ *proto = l4proto;
++
+ return 0;
+ }
+
+@@ -350,6 +357,22 @@ static int nft_target_validate(const struct nft_ctx *ctx,
+ unsigned int hook_mask = 0;
+ int ret;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET &&
++ ctx->family != NFPROTO_BRIDGE &&
++ ctx->family != NFPROTO_ARP)
++ return -EOPNOTSUPP;
++
++ ret = nft_chain_validate_hooks(ctx->chain,
++ (1 << NF_INET_PRE_ROUTING) |
++ (1 << NF_INET_LOCAL_IN) |
++ (1 << NF_INET_FORWARD) |
++ (1 << NF_INET_LOCAL_OUT) |
++ (1 << NF_INET_POST_ROUTING));
++ if (ret)
++ return ret;
++
+ if (nft_is_base_chain(ctx->chain)) {
+ const struct nft_base_chain *basechain =
+ nft_base_chain(ctx->chain);
+@@ -413,7 +436,7 @@ static void nft_match_eval(const struct nft_expr *expr,
+
+ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
+ [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
+- [NFTA_MATCH_REV] = { .type = NLA_U32 },
++ [NFTA_MATCH_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
+ [NFTA_MATCH_INFO] = { .type = NLA_BINARY },
+ };
+
+@@ -595,6 +618,22 @@ static int nft_match_validate(const struct nft_ctx *ctx,
+ unsigned int hook_mask = 0;
+ int ret;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET &&
++ ctx->family != NFPROTO_BRIDGE &&
++ ctx->family != NFPROTO_ARP)
++ return -EOPNOTSUPP;
++
++ ret = nft_chain_validate_hooks(ctx->chain,
++ (1 << NF_INET_PRE_ROUTING) |
++ (1 << NF_INET_LOCAL_IN) |
++ (1 << NF_INET_FORWARD) |
++ (1 << NF_INET_LOCAL_OUT) |
++ (1 << NF_INET_POST_ROUTING));
++ if (ret)
++ return ret;
++
+ if (nft_is_base_chain(ctx->chain)) {
+ const struct nft_base_chain *basechain =
+ nft_base_chain(ctx->chain);
+@@ -712,7 +751,7 @@ static int nfnl_compat_get_rcu(struct sk_buff *skb,
+ static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
+ [NFTA_COMPAT_NAME] = { .type = NLA_NUL_STRING,
+ .len = NFT_COMPAT_NAME_MAX-1 },
+- [NFTA_COMPAT_REV] = { .type = NLA_U32 },
++ [NFTA_COMPAT_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
+ [NFTA_COMPAT_TYPE] = { .type = NLA_U32 },
+ };
+
+diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
+index dccc68a5135add..b7aa4d2c8c22f6 100644
+--- a/net/netfilter/nft_counter.c
++++ b/net/netfilter/nft_counter.c
+@@ -107,11 +107,16 @@ static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
+ struct nft_counter *total)
+ {
+ struct nft_counter *this_cpu;
++ seqcount_t *myseq;
+
+ local_bh_disable();
+ this_cpu = this_cpu_ptr(priv->counter);
++ myseq = this_cpu_ptr(&nft_counter_seq);
++
++ write_seqcount_begin(myseq);
+ this_cpu->packets -= total->packets;
+ this_cpu->bytes -= total->bytes;
++ write_seqcount_end(myseq);
+ local_bh_enable();
+ }
+
+@@ -265,7 +270,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
+ struct nft_counter *this_cpu;
+ seqcount_t *myseq;
+
+- preempt_disable();
++ local_bh_disable();
+ this_cpu = this_cpu_ptr(priv->counter);
+ myseq = this_cpu_ptr(&nft_counter_seq);
+
+@@ -273,7 +278,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
+ this_cpu->packets += stats->pkts;
+ this_cpu->bytes += stats->bytes;
+ write_seqcount_end(myseq);
+- preempt_enable();
++ local_bh_enable();
+ }
+
+ void nft_counter_init_seqcount(void)
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 86bb9d7797d9ee..255640013ab845 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -476,6 +476,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
+ break;
+ #endif
+ case NFT_CT_ID:
++ if (tb[NFTA_CT_DIRECTION])
++ return -EINVAL;
++
+ len = sizeof(u32);
+ break;
+ default:
+@@ -1250,7 +1253,30 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_CT_EXPECT_L3PROTO])
+ priv->l3num = ntohs(nla_get_be16(tb[NFTA_CT_EXPECT_L3PROTO]));
+
++ switch (priv->l3num) {
++ case NFPROTO_IPV4:
++ case NFPROTO_IPV6:
++ if (priv->l3num == ctx->family || ctx->family == NFPROTO_INET)
++ break;
++
++ return -EINVAL;
++ case NFPROTO_INET: /* tuple.src.l3num supports NFPROTO_IPV4/6 only */
++ default:
++ return -EAFNOSUPPORT;
++ }
++
+ priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
++ switch (priv->l4proto) {
++ case IPPROTO_TCP:
++ case IPPROTO_UDP:
++ case IPPROTO_UDPLITE:
++ case IPPROTO_DCCP:
++ case IPPROTO_SCTP:
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
+ priv->dport = nla_get_be16(tb[NFTA_CT_EXPECT_DPORT]);
+ priv->timeout = nla_get_u32(tb[NFTA_CT_EXPECT_TIMEOUT]);
+ priv->size = nla_get_u8(tb[NFTA_CT_EXPECT_SIZE]);
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 5c5cc01c73c5a7..629a91a8c61419 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -279,10 +279,15 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ priv->expr_array[i] = dynset_expr;
+ priv->num_exprs++;
+
+- if (set->num_exprs &&
+- dynset_expr->ops != set->exprs[i]->ops) {
+- err = -EOPNOTSUPP;
+- goto err_expr_free;
++ if (set->num_exprs) {
++ if (i >= set->num_exprs) {
++ err = -EINVAL;
++ goto err_expr_free;
++ }
++ if (dynset_expr->ops != set->exprs[i]->ops) {
++ err = -EOPNOTSUPP;
++ goto err_expr_free;
++ }
+ }
+ i++;
+ }
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index 3fbaa7bf41f9c7..6eb571d0c3fdfc 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -214,7 +214,7 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
+
+ offset = i + priv->offset;
+ if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+- *dest = 1;
++ nft_reg_store8(dest, 1);
+ } else {
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+@@ -461,7 +461,7 @@ static void nft_exthdr_dccp_eval(const struct nft_expr *expr,
+ type = bufp[0];
+
+ if (type == priv->type) {
+- *dest = 1;
++ nft_reg_store8(dest, 1);
+ return;
+ }
+
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index 04b51f28533217..bf825f6cb974ea 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -35,11 +35,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ switch (priv->result) {
+ case NFT_FIB_RESULT_OIF:
+ case NFT_FIB_RESULT_OIFNAME:
+- hooks = (1 << NF_INET_PRE_ROUTING);
+- if (priv->flags & NFTA_FIB_F_IIF) {
+- hooks |= (1 << NF_INET_LOCAL_IN) |
+- (1 << NF_INET_FORWARD);
+- }
++ hooks = (1 << NF_INET_PRE_ROUTING) |
++ (1 << NF_INET_LOCAL_IN) |
++ (1 << NF_INET_FORWARD);
+ break;
+ case NFT_FIB_RESULT_ADDRTYPE:
+ if (priv->flags & NFTA_FIB_F_IIF)
+@@ -145,11 +143,15 @@ void nft_fib_store_result(void *reg, const struct nft_fib *priv,
+ switch (priv->result) {
+ case NFT_FIB_RESULT_OIF:
+ index = dev ? dev->ifindex : 0;
+- *dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index;
++ if (priv->flags & NFTA_FIB_F_PRESENT)
++ nft_reg_store8(dreg, !!index);
++ else
++ *dreg = index;
++
+ break;
+ case NFT_FIB_RESULT_OIFNAME:
+ if (priv->flags & NFTA_FIB_F_PRESENT)
+- *dreg = !!dev;
++ nft_reg_store8(dreg, !!dev);
+ else
+ strscpy_pad(reg, dev ? dev->name : "", IFNAMSIZ);
+ break;
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index ab3362c483b4a7..397351fa4d5f82 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -384,6 +384,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
+ {
+ unsigned int hook_mask = (1 << NF_INET_FORWARD);
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ return nft_chain_validate_hooks(ctx->chain, hook_mask);
+ }
+
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index fccb3cf7749c1d..6475c7abc1fe35 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -78,7 +78,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
+ case NFT_GOTO:
+ err = nf_tables_bind_chain(ctx, chain);
+ if (err < 0)
+- return err;
++ goto err1;
+ break;
+ default:
+ break;
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 145dc62c624726..cefa25e0dbb0a2 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -58,16 +58,19 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
+ static int nft_limit_init(struct nft_limit_priv *priv,
+ const struct nlattr * const tb[], bool pkts)
+ {
+- u64 unit, tokens;
++ u64 unit, tokens, rate_with_burst;
++ bool invert = false;
+
+ if (tb[NFTA_LIMIT_RATE] == NULL ||
+ tb[NFTA_LIMIT_UNIT] == NULL)
+ return -EINVAL;
+
+ priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
++ if (priv->rate == 0)
++ return -EINVAL;
++
+ unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+- priv->nsecs = unit * NSEC_PER_SEC;
+- if (priv->rate == 0 || priv->nsecs < unit)
++ if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs))
+ return -EOVERFLOW;
+
+ if (tb[NFTA_LIMIT_BURST])
+@@ -76,18 +79,35 @@ static int nft_limit_init(struct nft_limit_priv *priv,
+ if (pkts && priv->burst == 0)
+ priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
+
+- if (priv->rate + priv->burst < priv->rate)
++ if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst))
+ return -EOVERFLOW;
+
+ if (pkts) {
+- tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
++ u64 tmp = div64_u64(priv->nsecs, priv->rate);
++
++ if (check_mul_overflow(tmp, priv->burst, &tokens))
++ return -EOVERFLOW;
+ } else {
++ u64 tmp;
++
+ /* The token bucket size limits the number of tokens can be
+ * accumulated. tokens_max specifies the bucket size.
+ * tokens_max = unit * (rate + burst) / rate.
+ */
+- tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
+- priv->rate);
++ if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp))
++ return -EOVERFLOW;
++
++ tokens = div64_u64(tmp, priv->rate);
++ }
++
++ if (tb[NFTA_LIMIT_FLAGS]) {
++ u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
++
++ if (flags & ~NFT_LIMIT_F_INV)
++ return -EOPNOTSUPP;
++
++ if (flags & NFT_LIMIT_F_INV)
++ invert = true;
+ }
+
+ priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL_ACCOUNT);
+@@ -96,13 +116,7 @@ static int nft_limit_init(struct nft_limit_priv *priv,
+
+ priv->limit->tokens = tokens;
+ priv->tokens_max = priv->limit->tokens;
+-
+- if (tb[NFTA_LIMIT_FLAGS]) {
+- u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
+-
+- if (flags & NFT_LIMIT_F_INV)
+- priv->invert = true;
+- }
++ priv->invert = invert;
+ priv->limit->last = ktime_get_ns();
+ spin_lock_init(&priv->limit->lock);
+
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 870e5b113d13ec..1b9edf2b339373 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -132,7 +132,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
+ return -EINVAL;
+
+ err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
+- &priv->dreg, NULL, set->dtype,
++ &priv->dreg, NULL,
++ nft_set_datatype(set),
+ set->dlen);
+ if (err < 0)
+ return err;
+@@ -216,6 +217,7 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
+ return 0;
+
+ iter.genmask = nft_genmask_next(ctx->net);
++ iter.type = NFT_ITER_UPDATE;
+ iter.skip = 0;
+ iter.count = 0;
+ iter.err = 0;
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index f7da7c43333b5a..9139ce38ea7b9a 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -63,7 +63,7 @@ nft_meta_get_eval_time(enum nft_meta_keys key,
+ {
+ switch (key) {
+ case NFT_META_TIME_NS:
+- nft_reg_store64(dest, ktime_get_real_ns());
++ nft_reg_store64((u64 *)dest, ktime_get_real_ns());
+ break;
+ case NFT_META_TIME_DAY:
+ nft_reg_store8(dest, nft_meta_weekday());
+@@ -839,6 +839,9 @@ static int nft_meta_inner_init(const struct nft_ctx *ctx,
+ struct nft_meta *priv = nft_expr_priv(expr);
+ unsigned int len;
+
++ if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG])
++ return -EINVAL;
++
+ priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+ switch (priv->key) {
+ case NFT_META_PROTOCOL:
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index 583885ce72328f..808f5802c2704a 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -143,6 +143,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
+ struct nft_nat *priv = nft_expr_priv(expr);
+ int err;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+ if (err < 0)
+ return err;
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 0a689c8e0295df..50429cbd42da42 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+ int mac_off = skb_mac_header(skb) - skb->data;
+ u8 *vlanh, *dst_u8 = (u8 *) d;
+ struct vlan_ethhdr veth;
+- u8 vlan_hlen = 0;
+-
+- if ((skb->protocol == htons(ETH_P_8021AD) ||
+- skb->protocol == htons(ETH_P_8021Q)) &&
+- offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
+- vlan_hlen += VLAN_HLEN;
+
+ vlanh = (u8 *) &veth;
+- if (offset < VLAN_ETH_HLEN + vlan_hlen) {
++ if (offset < VLAN_ETH_HLEN) {
+ u8 ethlen = len;
+
+- if (vlan_hlen &&
+- skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
+- return false;
+- else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
++ if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+ return false;
+
+- if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
+- ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
++ if (offset + len > VLAN_ETH_HLEN)
++ ethlen -= offset + len - VLAN_ETH_HLEN;
+
+- memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
++ memcpy(dst_u8, vlanh + offset, ethlen);
+
+ len -= ethlen;
+ if (len == 0)
+ return true;
+
+ dst_u8 += ethlen;
+- offset = ETH_HLEN + vlan_hlen;
++ offset = ETH_HLEN;
+ } else {
+- offset -= VLAN_HLEN + vlan_hlen;
++ offset -= VLAN_HLEN;
+ }
+
+ return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+ return pkt->inneroff;
+ }
+
+-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
++static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
+ {
+- unsigned int len = priv->offset + priv->len;
++ unsigned int boundary = offset + len;
+
+ /* data past ether src/dst requested, copy needed */
+- if (len > offsetof(struct ethhdr, h_proto))
++ if (boundary > offsetof(struct ethhdr, h_proto))
+ return true;
+
+ return false;
+@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+ goto err;
+
+ if (skb_vlan_tag_present(skb) &&
+- nft_payload_need_vlan_copy(priv)) {
++ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+ if (!nft_payload_copy_vlan(dest, skb,
+ priv->offset, priv->len))
+ goto err;
+@@ -659,6 +650,10 @@ static int nft_payload_inner_init(const struct nft_ctx *ctx,
+ struct nft_payload *priv = nft_expr_priv(expr);
+ u32 base;
+
++ if (!tb[NFTA_PAYLOAD_BASE] || !tb[NFTA_PAYLOAD_OFFSET] ||
++ !tb[NFTA_PAYLOAD_LEN] || !tb[NFTA_PAYLOAD_DREG])
++ return -EINVAL;
++
+ base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ switch (base) {
+ case NFT_PAYLOAD_TUN_HEADER:
+@@ -810,21 +805,79 @@ struct nft_payload_set {
+ u8 csum_flags;
+ };
+
++/* This is not struct vlan_hdr. */
++struct nft_payload_vlan_hdr {
++ __be16 h_vlan_proto;
++ __be16 h_vlan_TCI;
++};
++
++static bool
++nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
++ int *vlan_hlen)
++{
++ struct nft_payload_vlan_hdr *vlanh;
++ __be16 vlan_proto;
++ u16 vlan_tci;
++
++ if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
++ *vlan_hlen = VLAN_HLEN;
++ return true;
++ }
++
++ switch (offset) {
++ case offsetof(struct vlan_ethhdr, h_vlan_proto):
++ if (len == 2) {
++ vlan_proto = nft_reg_load_be16(src);
++ skb->vlan_proto = vlan_proto;
++ } else if (len == 4) {
++ vlanh = (struct nft_payload_vlan_hdr *)src;
++ __vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
++ ntohs(vlanh->h_vlan_TCI));
++ } else {
++ return false;
++ }
++ break;
++ case offsetof(struct vlan_ethhdr, h_vlan_TCI):
++ if (len != 2)
++ return false;
++
++ vlan_tci = ntohs(nft_reg_load_be16(src));
++ skb->vlan_tci = vlan_tci;
++ break;
++ default:
++ return false;
++ }
++
++ return true;
++}
++
+ static void nft_payload_set_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+ {
+ const struct nft_payload_set *priv = nft_expr_priv(expr);
+- struct sk_buff *skb = pkt->skb;
+ const u32 *src = &regs->data[priv->sreg];
+- int offset, csum_offset;
++ int offset, csum_offset, vlan_hlen = 0;
++ struct sk_buff *skb = pkt->skb;
+ __wsum fsum, tsum;
+
+ switch (priv->base) {
+ case NFT_PAYLOAD_LL_HEADER:
+ if (!skb_mac_header_was_set(skb))
+ goto err;
+- offset = skb_mac_header(skb) - skb->data;
++
++ if (skb_vlan_tag_present(skb) &&
++ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
++ if (!nft_payload_set_vlan(src, skb,
++ priv->offset, priv->len,
++ &vlan_hlen))
++ goto err;
++
++ if (!vlan_hlen)
++ return;
++ }
++
++ offset = skb_mac_header(skb) - skb->data - vlan_hlen;
+ break;
+ case NFT_PAYLOAD_NETWORK_HEADER:
+ offset = skb_network_offset(skb);
+diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
+index 35a2c28caa60bb..24d97713857298 100644
+--- a/net/netfilter/nft_rt.c
++++ b/net/netfilter/nft_rt.c
+@@ -166,6 +166,11 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
+ const struct nft_rt *priv = nft_expr_priv(expr);
+ unsigned int hooks;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ switch (priv->key) {
+ case NFT_RT_NEXTHOP4:
+ case NFT_RT_NEXTHOP6:
+diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
+index 1e5e7a181e0bc2..cbf7f7825f1b88 100644
+--- a/net/netfilter/nft_set_bitmap.c
++++ b/net/netfilter/nft_set_bitmap.c
+@@ -171,7 +171,7 @@ static void nft_bitmap_activate(const struct net *net,
+ nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
+ /* Enter 11 state. */
+ priv->bitmap[idx] |= (genmask << off);
+- nft_set_elem_change_active(net, set, &be->ext);
++ nft_clear(net, &be->ext);
+ }
+
+ static bool nft_bitmap_flush(const struct net *net,
+@@ -223,8 +223,6 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
+ list_for_each_entry_rcu(be, &priv->list, head) {
+ if (iter->count < iter->skip)
+ goto cont;
+- if (!nft_set_elem_active(&be->ext, iter->genmask))
+- goto cont;
+
+ elem.priv = be;
+
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 2013de934cef09..3a96d4a77a228a 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -189,7 +189,7 @@ static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
+ {
+ struct nft_rhash_elem *he = elem->priv;
+
+- nft_set_elem_change_active(net, set, &he->ext);
++ nft_clear(net, &he->ext);
+ }
+
+ static bool nft_rhash_flush(const struct net *net,
+@@ -277,8 +277,6 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
+
+ if (iter->count < iter->skip)
+ goto cont;
+- if (!nft_set_elem_active(&he->ext, iter->genmask))
+- goto cont;
+
+ elem.priv = he;
+
+@@ -587,7 +585,7 @@ static void nft_hash_activate(const struct net *net, const struct nft_set *set,
+ {
+ struct nft_hash_elem *he = elem->priv;
+
+- nft_set_elem_change_active(net, set, &he->ext);
++ nft_clear(net, &he->ext);
+ }
+
+ static bool nft_hash_flush(const struct net *net,
+@@ -641,8 +639,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ hlist_for_each_entry_rcu(he, &priv->table[i], node) {
+ if (iter->count < iter->skip)
+ goto cont;
+- if (!nft_set_elem_active(&he->ext, iter->genmask))
+- goto cont;
+
+ elem.priv = he;
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index c0dcc40de358fb..334958ef8d66c8 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -342,9 +342,6 @@
+ #include "nft_set_pipapo_avx2.h"
+ #include "nft_set_pipapo.h"
+
+-/* Current working bitmap index, toggled between field matches */
+-static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
+-
+ /**
+ * pipapo_refill() - For each set bit, set bits from selected mapping table item
+ * @map: Bitmap to be scanned for set bits
+@@ -363,7 +360,7 @@ static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
+ * Return: -1 on no match, bit position on 'match_only', 0 otherwise.
+ */
+ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+- union nft_pipapo_map_bucket *mt, bool match_only)
++ const union nft_pipapo_map_bucket *mt, bool match_only)
+ {
+ unsigned long bitset;
+ int k, ret = -1;
+@@ -412,27 +409,30 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
++ struct nft_pipapo_scratch *scratch;
+ unsigned long *res_map, *fill_map;
+ u8 genmask = nft_genmask_cur(net);
++ const struct nft_pipapo_match *m;
++ const struct nft_pipapo_field *f;
+ const u8 *rp = (const u8 *)key;
+- struct nft_pipapo_match *m;
+- struct nft_pipapo_field *f;
+ bool map_index;
+ int i;
+
+ local_bh_disable();
+
+- map_index = raw_cpu_read(nft_pipapo_scratch_index);
+-
+ m = rcu_dereference(priv->match);
+
+ if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
+ goto out;
+
+- res_map = *raw_cpu_ptr(m->scratch) + (map_index ? m->bsize_max : 0);
+- fill_map = *raw_cpu_ptr(m->scratch) + (map_index ? 0 : m->bsize_max);
++ scratch = *raw_cpu_ptr(m->scratch);
+
+- memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
++ map_index = scratch->map_index;
++
++ res_map = scratch->map + (map_index ? m->bsize_max : 0);
++ fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
++
++ pipapo_resmap_init(m, res_map);
+
+ nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1;
+@@ -460,7 +460,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
+ last);
+ if (b < 0) {
+- raw_cpu_write(nft_pipapo_scratch_index, map_index);
++ scratch->map_index = map_index;
+ local_bh_enable();
+
+ return false;
+@@ -477,7 +477,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ * current inactive bitmap is clean and can be reused as
+ * *next* bitmap (not initial) for the next packet.
+ */
+- raw_cpu_write(nft_pipapo_scratch_index, map_index);
++ scratch->map_index = map_index;
+ local_bh_enable();
+
+ return true;
+@@ -517,11 +517,13 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ {
+ struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
+ struct nft_pipapo *priv = nft_set_priv(set);
+- struct nft_pipapo_match *m = priv->clone;
+ unsigned long *res_map, *fill_map = NULL;
+- struct nft_pipapo_field *f;
++ const struct nft_pipapo_match *m;
++ const struct nft_pipapo_field *f;
+ int i;
+
++ m = priv->clone;
++
+ res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ if (!res_map) {
+ ret = ERR_PTR(-ENOMEM);
+@@ -534,7 +536,7 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
+ goto out;
+ }
+
+- memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
++ pipapo_resmap_init(m, res_map);
+
+ nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1;
+@@ -1101,6 +1103,25 @@ static void pipapo_map(struct nft_pipapo_match *m,
+ f->mt[map[i].to + j].e = e;
+ }
+
++/**
++ * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
++ * @m: Matching data
++ * @cpu: CPU number
++ */
++static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
++{
++ struct nft_pipapo_scratch *s;
++ void *mem;
++
++ s = *per_cpu_ptr(m->scratch, cpu);
++ if (!s)
++ return;
++
++ mem = s;
++ mem -= s->align_off;
++ kfree(mem);
++}
++
+ /**
+ * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
+ * @clone: Copy of matching data with pending insertions and deletions
+@@ -1114,12 +1135,13 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
+ int i;
+
+ for_each_possible_cpu(i) {
+- unsigned long *scratch;
++ struct nft_pipapo_scratch *scratch;
+ #ifdef NFT_PIPAPO_ALIGN
+- unsigned long *scratch_aligned;
++ void *scratch_aligned;
++ u32 align_off;
+ #endif
+-
+- scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2 +
++ scratch = kzalloc_node(struct_size(scratch, map,
++ bsize_max * 2) +
+ NFT_PIPAPO_ALIGN_HEADROOM,
+ GFP_KERNEL, cpu_to_node(i));
+ if (!scratch) {
+@@ -1133,14 +1155,25 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
+ return -ENOMEM;
+ }
+
+- kfree(*per_cpu_ptr(clone->scratch, i));
+-
+- *per_cpu_ptr(clone->scratch, i) = scratch;
++ pipapo_free_scratch(clone, i);
+
+ #ifdef NFT_PIPAPO_ALIGN
+- scratch_aligned = NFT_PIPAPO_LT_ALIGN(scratch);
+- *per_cpu_ptr(clone->scratch_aligned, i) = scratch_aligned;
++ /* Align &scratch->map (not the struct itself): the extra
++ * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
++ * above guarantee we can waste up to those bytes in order
++ * to align the map field regardless of its offset within
++ * the struct.
++ */
++ BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch, map) > NFT_PIPAPO_ALIGN_HEADROOM);
++
++ scratch_aligned = NFT_PIPAPO_LT_ALIGN(&scratch->map);
++ scratch_aligned -= offsetof(struct nft_pipapo_scratch, map);
++ align_off = scratch_aligned - (void *)scratch;
++
++ scratch = scratch_aligned;
++ scratch->align_off = align_off;
+ #endif
++ *per_cpu_ptr(clone->scratch, i) = scratch;
+ }
+
+ return 0;
+@@ -1293,11 +1326,6 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+ if (!new->scratch)
+ goto out_scratch;
+
+-#ifdef NFT_PIPAPO_ALIGN
+- new->scratch_aligned = alloc_percpu(*new->scratch_aligned);
+- if (!new->scratch_aligned)
+- goto out_scratch;
+-#endif
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(new->scratch, i) = NULL;
+
+@@ -1349,10 +1377,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+ }
+ out_scratch_realloc:
+ for_each_possible_cpu(i)
+- kfree(*per_cpu_ptr(new->scratch, i));
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(new->scratch_aligned);
+-#endif
++ pipapo_free_scratch(new, i);
+ out_scratch:
+ free_percpu(new->scratch);
+ kfree(new);
+@@ -1567,7 +1592,7 @@ static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
+
+ while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+- struct nft_pipapo_field *f;
++ const struct nft_pipapo_field *f;
+ int i, start, rules_fx;
+
+ start = first_rule;
+@@ -1637,13 +1662,9 @@ static void pipapo_free_match(struct nft_pipapo_match *m)
+ int i;
+
+ for_each_possible_cpu(i)
+- kfree(*per_cpu_ptr(m->scratch, i));
++ pipapo_free_scratch(m, i);
+
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(m->scratch_aligned);
+-#endif
+ free_percpu(m->scratch);
+-
+ pipapo_free_fields(m);
+
+ kfree(m);
+@@ -1745,7 +1766,7 @@ static void nft_pipapo_activate(const struct net *net,
+ {
+ struct nft_pipapo_elem *e = elem->priv;
+
+- nft_set_elem_change_active(net, set, &e->ext);
++ nft_clear(net, &e->ext);
+ }
+
+ /**
+@@ -1974,6 +1995,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+ rules_fx = rules_f0;
+
+ nft_pipapo_for_each_field(f, i, m) {
++ bool last = i == m->field_count - 1;
++
+ if (!pipapo_match_field(f, start, rules_fx,
+ match_start, match_end))
+ break;
+@@ -1986,16 +2009,18 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+
+ match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+- }
+
+- if (i == m->field_count) {
+- priv->dirty = true;
+- pipapo_drop(m, rulemap);
+- return;
++ if (last && f->mt[rulemap[i].to].e == e) {
++ priv->dirty = true;
++ pipapo_drop(m, rulemap);
++ return;
++ }
+ }
+
+ first_rule += rules_f0;
+ }
++
++ WARN_ON_ONCE(1); /* elem_priv not found */
+ }
+
+ /**
+@@ -2012,13 +2037,15 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_iter *iter)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+- struct net *net = read_pnet(&set->net);
+- struct nft_pipapo_match *m;
+- struct nft_pipapo_field *f;
++ const struct nft_pipapo_match *m;
++ const struct nft_pipapo_field *f;
+ int i, r;
+
++ WARN_ON_ONCE(iter->type != NFT_ITER_READ &&
++ iter->type != NFT_ITER_UPDATE);
++
+ rcu_read_lock();
+- if (iter->genmask == nft_genmask_cur(net))
++ if (iter->type == NFT_ITER_READ)
+ m = rcu_dereference(priv->match);
+ else
+ m = priv->clone;
+@@ -2127,7 +2154,7 @@ static int nft_pipapo_init(const struct nft_set *set,
+ m->field_count = field_count;
+ m->bsize_max = 0;
+
+- m->scratch = alloc_percpu(unsigned long *);
++ m->scratch = alloc_percpu(struct nft_pipapo_scratch *);
+ if (!m->scratch) {
+ err = -ENOMEM;
+ goto out_scratch;
+@@ -2135,16 +2162,6 @@ static int nft_pipapo_init(const struct nft_set *set,
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(m->scratch, i) = NULL;
+
+-#ifdef NFT_PIPAPO_ALIGN
+- m->scratch_aligned = alloc_percpu(unsigned long *);
+- if (!m->scratch_aligned) {
+- err = -ENOMEM;
+- goto out_free;
+- }
+- for_each_possible_cpu(i)
+- *per_cpu_ptr(m->scratch_aligned, i) = NULL;
+-#endif
+-
+ rcu_head_init(&m->rcu);
+
+ nft_pipapo_for_each_field(f, i, m) {
+@@ -2175,9 +2192,6 @@ static int nft_pipapo_init(const struct nft_set *set,
+ return 0;
+
+ out_free:
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(m->scratch_aligned);
+-#endif
+ free_percpu(m->scratch);
+ out_scratch:
+ kfree(m);
+@@ -2229,13 +2243,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ if (m) {
+ rcu_barrier();
+
+- nft_set_pipapo_match_destroy(ctx, set, m);
+-
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(m->scratch_aligned);
+-#endif
+ for_each_possible_cpu(cpu)
+- kfree(*per_cpu_ptr(m->scratch, cpu));
++ pipapo_free_scratch(m, cpu);
+ free_percpu(m->scratch);
+ pipapo_free_fields(m);
+ kfree(m);
+@@ -2245,14 +2254,10 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ if (priv->clone) {
+ m = priv->clone;
+
+- if (priv->dirty)
+- nft_set_pipapo_match_destroy(ctx, set, m);
++ nft_set_pipapo_match_destroy(ctx, set, m);
+
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(priv->clone->scratch_aligned);
+-#endif
+ for_each_possible_cpu(cpu)
+- kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
++ pipapo_free_scratch(priv->clone, cpu);
+ free_percpu(priv->clone->scratch);
+
+ pipapo_free_fields(priv->clone);
+diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
+index 2e164a319945f7..aad9130cc7635c 100644
+--- a/net/netfilter/nft_set_pipapo.h
++++ b/net/netfilter/nft_set_pipapo.h
+@@ -130,21 +130,29 @@ struct nft_pipapo_field {
+ union nft_pipapo_map_bucket *mt;
+ };
+
++/**
++ * struct nft_pipapo_scratch - percpu data used for lookup and matching
++ * @map_index: Current working bitmap index, toggled between field matches
++ * @align_off: Offset to get the originally allocated address
++ * @map: store partial matching results during lookup
++ */
++struct nft_pipapo_scratch {
++ u8 map_index;
++ u32 align_off;
++ unsigned long map[];
++};
++
+ /**
+ * struct nft_pipapo_match - Data used for lookup and matching
+ * @field_count Amount of fields in set
+ * @scratch: Preallocated per-CPU maps for partial matching results
+- * @scratch_aligned: Version of @scratch aligned to NFT_PIPAPO_ALIGN bytes
+ * @bsize_max: Maximum lookup table bucket size of all fields, in longs
+ * @rcu Matching data is swapped on commits
+ * @f: Fields, with lookup and mapping tables
+ */
+ struct nft_pipapo_match {
+ int field_count;
+-#ifdef NFT_PIPAPO_ALIGN
+- unsigned long * __percpu *scratch_aligned;
+-#endif
+- unsigned long * __percpu *scratch;
++ struct nft_pipapo_scratch * __percpu *scratch;
+ size_t bsize_max;
+ struct rcu_head rcu;
+ struct nft_pipapo_field f[] __counted_by(field_count);
+@@ -177,7 +185,7 @@ struct nft_pipapo_elem {
+ };
+
+ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+- union nft_pipapo_map_bucket *mt, bool match_only);
++ const union nft_pipapo_map_bucket *mt, bool match_only);
+
+ /**
+ * pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
+@@ -185,7 +193,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+ * @dst: Area to store result
+ * @data: Input data selecting table buckets
+ */
+-static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
++static inline void pipapo_and_field_buckets_4bit(const struct nft_pipapo_field *f,
+ unsigned long *dst,
+ const u8 *data)
+ {
+@@ -213,7 +221,7 @@ static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
+ * @dst: Area to store result
+ * @data: Input data selecting table buckets
+ */
+-static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
++static inline void pipapo_and_field_buckets_8bit(const struct nft_pipapo_field *f,
+ unsigned long *dst,
+ const u8 *data)
+ {
+@@ -277,4 +285,25 @@ static u64 pipapo_estimate_size(const struct nft_set_desc *desc)
+ return size;
+ }
+
++/**
++ * pipapo_resmap_init() - Initialise result map before first use
++ * @m: Matching data, including mapping table
++ * @res_map: Result map
++ *
++ * Initialize all bits covered by the first field to one, so that after
++ * the first step, only the matching bits of the first bit group remain.
++ *
++ * If other fields have a large bitmap, set remainder of res_map to 0.
++ */
++static inline void pipapo_resmap_init(const struct nft_pipapo_match *m, unsigned long *res_map)
++{
++ const struct nft_pipapo_field *f = m->f;
++ int i;
++
++ for (i = 0; i < f->bsize; i++)
++ res_map[i] = ULONG_MAX;
++
++ for (i = f->bsize; i < m->bsize_max; i++)
++ res_map[i] = 0ul;
++}
+ #endif /* _NFT_SET_PIPAPO_H */
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index 52e0d026d30ad2..b8d3c3213efee5 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -57,7 +57,7 @@
+
+ /* Jump to label if @reg is zero */
+ #define NFT_PIPAPO_AVX2_NOMATCH_GOTO(reg, label) \
+- asm_volatile_goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \
++ asm goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \
+ "je %l[" #label "]" : : : : label)
+
+ /* Store 256 bits from YMM register into memory. Contrary to bucket load
+@@ -71,9 +71,6 @@
+ #define NFT_PIPAPO_AVX2_ZERO(reg) \
+ asm volatile("vpxor %ymm" #reg ", %ymm" #reg ", %ymm" #reg)
+
+-/* Current working bitmap index, toggled between field matches */
+-static DEFINE_PER_CPU(bool, nft_pipapo_avx2_scratch_index);
+-
+ /**
+ * nft_pipapo_avx2_prepare() - Prepare before main algorithm body
+ *
+@@ -215,8 +212,9 @@ static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
+@@ -277,8 +275,9 @@ static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
+@@ -353,8 +352,9 @@ static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
+ pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
+@@ -448,8 +448,9 @@ static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
+ pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
+@@ -537,8 +538,9 @@ static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ u8 pg[32] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
+ pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
+@@ -672,8 +674,9 @@ static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -729,8 +732,9 @@ static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -793,8 +797,9 @@ static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -868,8 +873,9 @@ static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -953,8 +959,9 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
+ * word index to be checked next (i.e. first filled word).
+ */
+ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+@@ -1029,6 +1036,7 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
+
+ /**
+ * nft_pipapo_avx2_lookup_slow() - Fallback function for uncommon field sizes
++ * @mdata: Matching data, including mapping table
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+@@ -1044,15 +1052,17 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+-static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
+- struct nft_pipapo_field *f, int offset,
+- const u8 *pkt, bool first, bool last)
++static int nft_pipapo_avx2_lookup_slow(const struct nft_pipapo_match *mdata,
++ unsigned long *map, unsigned long *fill,
++ const struct nft_pipapo_field *f,
++ int offset, const u8 *pkt,
++ bool first, bool last)
+ {
+ unsigned long bsize = f->bsize;
+ int i, ret = -1, b;
+
+ if (first)
+- memset(map, 0xff, bsize * sizeof(*map));
++ pipapo_resmap_init(mdata, map);
+
+ for (i = offset; i < bsize; i++) {
+ if (f->bb == 8)
+@@ -1120,16 +1130,23 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+- unsigned long *res, *fill, *scratch;
++ struct nft_pipapo_scratch *scratch;
+ u8 genmask = nft_genmask_cur(net);
++ const struct nft_pipapo_match *m;
++ const struct nft_pipapo_field *f;
+ const u8 *rp = (const u8 *)key;
+- struct nft_pipapo_match *m;
+- struct nft_pipapo_field *f;
++ unsigned long *res, *fill;
+ bool map_index;
+ int i, ret = 0;
+
+- if (unlikely(!irq_fpu_usable()))
+- return nft_pipapo_lookup(net, set, key, ext);
++ local_bh_disable();
++
++ if (unlikely(!irq_fpu_usable())) {
++ bool fallback_res = nft_pipapo_lookup(net, set, key, ext);
++
++ local_bh_enable();
++ return fallback_res;
++ }
+
+ m = rcu_dereference(priv->match);
+
+@@ -1141,15 +1158,17 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ */
+ kernel_fpu_begin_mask(0);
+
+- scratch = *raw_cpu_ptr(m->scratch_aligned);
++ scratch = *raw_cpu_ptr(m->scratch);
+ if (unlikely(!scratch)) {
+ kernel_fpu_end();
++ local_bh_enable();
+ return false;
+ }
+- map_index = raw_cpu_read(nft_pipapo_avx2_scratch_index);
+
+- res = scratch + (map_index ? m->bsize_max : 0);
+- fill = scratch + (map_index ? 0 : m->bsize_max);
++ map_index = scratch->map_index;
++
++ res = scratch->map + (map_index ? m->bsize_max : 0);
++ fill = scratch->map + (map_index ? 0 : m->bsize_max);
+
+ /* Starting map doesn't need to be set for this implementation */
+
+@@ -1176,7 +1195,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ } else if (f->groups == 16) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(8, 16);
+ } else {
+- ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
++ ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f,
+ ret, rp,
+ first, last);
+ }
+@@ -1192,7 +1211,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ } else if (f->groups == 32) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(4, 32);
+ } else {
+- ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
++ ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f,
+ ret, rp,
+ first, last);
+ }
+@@ -1221,8 +1240,9 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+
+ out:
+ if (i % 2)
+- raw_cpu_write(nft_pipapo_avx2_scratch_index, !map_index);
++ scratch->map_index = !map_index;
+ kernel_fpu_end();
++ local_bh_enable();
+
+ return ret >= 0;
+ }
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index e34662f4a71e0f..afbda7e3fd0487 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -235,7 +235,7 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
+
+ static const struct nft_rbtree_elem *
+ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
+- struct nft_rbtree_elem *rbe, u8 genmask)
++ struct nft_rbtree_elem *rbe)
+ {
+ struct nft_set *set = (struct nft_set *)__set;
+ struct rb_node *prev = rb_prev(&rbe->node);
+@@ -254,7 +254,7 @@ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
+ while (prev) {
+ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ if (nft_rbtree_interval_end(rbe_prev) &&
+- nft_set_elem_active(&rbe_prev->ext, genmask))
++ nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
+ break;
+
+ prev = rb_prev(prev);
+@@ -365,7 +365,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ nft_set_elem_active(&rbe->ext, cur_genmask)) {
+ const struct nft_rbtree_elem *removed_end;
+
+- removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
++ removed_end = nft_rbtree_gc_elem(set, priv, rbe);
+ if (IS_ERR(removed_end))
+ return PTR_ERR(removed_end);
+
+@@ -527,7 +527,7 @@ static void nft_rbtree_activate(const struct net *net,
+ {
+ struct nft_rbtree_elem *rbe = elem->priv;
+
+- nft_set_elem_change_active(net, set, &rbe->ext);
++ nft_clear(net, &rbe->ext);
+ }
+
+ static bool nft_rbtree_flush(const struct net *net,
+@@ -596,8 +596,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
+
+ if (iter->count < iter->skip)
+ goto cont;
+- if (!nft_set_elem_active(&rbe->ext, iter->genmask))
+- goto cont;
+
+ elem.priv = rbe;
+
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 9ed85be79452d9..0a8883a93e8369 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -9,7 +9,8 @@
+
+ struct nft_socket {
+ enum nft_socket_keys key:8;
+- u8 level;
++ u8 level; /* cgroupv2 level to extract */
++ u8 level_user; /* cgroupv2 level provided by userspace */
+ u8 len;
+ union {
+ u8 dreg;
+@@ -53,6 +54,28 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo
+ memcpy(dest, &cgid, sizeof(u64));
+ return true;
+ }
++
++/* process context only, uses current->nsproxy. */
++static noinline int nft_socket_cgroup_subtree_level(void)
++{
++ struct cgroup *cgrp = cgroup_get_from_path("/");
++ int level;
++
++ if (IS_ERR(cgrp))
++ return PTR_ERR(cgrp);
++
++ level = cgrp->level;
++
++ cgroup_put(cgrp);
++
++ if (WARN_ON_ONCE(level > 255))
++ return -ERANGE;
++
++ if (WARN_ON_ONCE(level < 0))
++ return -EINVAL;
++
++ return level;
++}
+ #endif
+
+ static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
+@@ -110,13 +133,13 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ *dest = READ_ONCE(sk->sk_mark);
+ } else {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ break;
+ case NFT_SOCKET_WILDCARD:
+ if (!sk_fullsock(sk)) {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ nft_socket_wildcard(pkt, regs, sk, dest);
+ break;
+@@ -124,7 +147,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ case NFT_SOCKET_CGROUPV2:
+ if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ break;
+ #endif
+@@ -133,6 +156,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ regs->verdict.code = NFT_BREAK;
+ }
+
++out_put_sk:
+ if (sk != skb->sk)
+ sock_gen_put(sk);
+ }
+@@ -173,9 +197,10 @@ static int nft_socket_init(const struct nft_ctx *ctx,
+ case NFT_SOCKET_MARK:
+ len = sizeof(u32);
+ break;
+-#ifdef CONFIG_CGROUPS
++#ifdef CONFIG_SOCK_CGROUP_DATA
+ case NFT_SOCKET_CGROUPV2: {
+ unsigned int level;
++ int err;
+
+ if (!tb[NFTA_SOCKET_LEVEL])
+ return -EINVAL;
+@@ -184,6 +209,17 @@ static int nft_socket_init(const struct nft_ctx *ctx,
+ if (level > 255)
+ return -EOPNOTSUPP;
+
++ err = nft_socket_cgroup_subtree_level();
++ if (err < 0)
++ return err;
++
++ priv->level_user = level;
++
++ level += err;
++ /* Implies a giant cgroup tree */
++ if (WARN_ON_ONCE(level > 255))
++ return -EOPNOTSUPP;
++
+ priv->level = level;
+ len = sizeof(u64);
+ break;
+@@ -208,7 +244,7 @@ static int nft_socket_dump(struct sk_buff *skb,
+ if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
+ return -1;
+ if (priv->key == NFT_SOCKET_CGROUPV2 &&
+- nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
++ nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level_user)))
+ return -1;
+ return 0;
+ }
+@@ -242,6 +278,11 @@ static int nft_socket_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+ {
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ return nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
+index 13da882669a4ee..1d737f89dfc18c 100644
+--- a/net/netfilter/nft_synproxy.c
++++ b/net/netfilter/nft_synproxy.c
+@@ -186,7 +186,6 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
+ break;
+ #endif
+ case NFPROTO_INET:
+- case NFPROTO_BRIDGE:
+ err = nf_synproxy_ipv4_init(snet, ctx->net);
+ if (err)
+ goto nf_ct_failure;
+@@ -219,7 +218,6 @@ static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
+ break;
+ #endif
+ case NFPROTO_INET:
+- case NFPROTO_BRIDGE:
+ nf_synproxy_ipv4_fini(snet, ctx->net);
+ nf_synproxy_ipv6_fini(snet, ctx->net);
+ break;
+@@ -253,6 +251,11 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+ {
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD));
+ }
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index ae15cd693f0ec2..71412adb73d414 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -316,6 +316,11 @@ static int nft_tproxy_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+ {
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
+ }
+
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index 9f21953c7433ff..f735d79d8be577 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -713,6 +713,7 @@ static const struct nft_object_ops nft_tunnel_obj_ops = {
+
+ static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
+ .type = NFT_OBJECT_TUNNEL,
++ .family = NFPROTO_NETDEV,
+ .ops = &nft_tunnel_obj_ops,
+ .maxattr = NFTA_TUNNEL_KEY_MAX,
+ .policy = nft_tunnel_key_policy,
+diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
+index 452f8587addadc..1c866757db5524 100644
+--- a/net/netfilter/nft_xfrm.c
++++ b/net/netfilter/nft_xfrm.c
+@@ -235,6 +235,11 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
+ const struct nft_xfrm *priv = nft_expr_priv(expr);
+ unsigned int hooks;
+
++ if (ctx->family != NFPROTO_IPV4 &&
++ ctx->family != NFPROTO_IPV6 &&
++ ctx->family != NFPROTO_INET)
++ return -EOPNOTSUPP;
++
+ switch (priv->dir) {
+ case XFRM_POLICY_IN:
+ hooks = (1 << NF_INET_FORWARD) |
+diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
+index c8a639f5616841..9d99f5a3d1764b 100644
+--- a/net/netfilter/xt_CHECKSUM.c
++++ b/net/netfilter/xt_CHECKSUM.c
+@@ -63,24 +63,37 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
+ return 0;
+ }
+
+-static struct xt_target checksum_tg_reg __read_mostly = {
+- .name = "CHECKSUM",
+- .family = NFPROTO_UNSPEC,
+- .target = checksum_tg,
+- .targetsize = sizeof(struct xt_CHECKSUM_info),
+- .table = "mangle",
+- .checkentry = checksum_tg_check,
+- .me = THIS_MODULE,
++static struct xt_target checksum_tg_reg[] __read_mostly = {
++ {
++ .name = "CHECKSUM",
++ .family = NFPROTO_IPV4,
++ .target = checksum_tg,
++ .targetsize = sizeof(struct xt_CHECKSUM_info),
++ .table = "mangle",
++ .checkentry = checksum_tg_check,
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "CHECKSUM",
++ .family = NFPROTO_IPV6,
++ .target = checksum_tg,
++ .targetsize = sizeof(struct xt_CHECKSUM_info),
++ .table = "mangle",
++ .checkentry = checksum_tg_check,
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init checksum_tg_init(void)
+ {
+- return xt_register_target(&checksum_tg_reg);
++ return xt_register_targets(checksum_tg_reg, ARRAY_SIZE(checksum_tg_reg));
+ }
+
+ static void __exit checksum_tg_exit(void)
+ {
+- xt_unregister_target(&checksum_tg_reg);
++ xt_unregister_targets(checksum_tg_reg, ARRAY_SIZE(checksum_tg_reg));
+ }
+
+ module_init(checksum_tg_init);
+diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
+index 0accac98dea784..0ae8d8a1216e19 100644
+--- a/net/netfilter/xt_CLASSIFY.c
++++ b/net/netfilter/xt_CLASSIFY.c
+@@ -38,9 +38,9 @@ static struct xt_target classify_tg_reg[] __read_mostly = {
+ {
+ .name = "CLASSIFY",
+ .revision = 0,
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
+- (1 << NF_INET_POST_ROUTING),
++ (1 << NF_INET_POST_ROUTING),
+ .target = classify_tg,
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+@@ -54,6 +54,18 @@ static struct xt_target classify_tg_reg[] __read_mostly = {
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "CLASSIFY",
++ .revision = 0,
++ .family = NFPROTO_IPV6,
++ .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
++ (1 << NF_INET_POST_ROUTING),
++ .target = classify_tg,
++ .targetsize = sizeof(struct xt_classify_target_info),
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init classify_tg_init(void)
+diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
+index 76acecf3e757a0..1494b3ee30e11e 100644
+--- a/net/netfilter/xt_CONNSECMARK.c
++++ b/net/netfilter/xt_CONNSECMARK.c
+@@ -114,25 +114,39 @@ static void connsecmark_tg_destroy(const struct xt_tgdtor_param *par)
+ nf_ct_netns_put(par->net, par->family);
+ }
+
+-static struct xt_target connsecmark_tg_reg __read_mostly = {
+- .name = "CONNSECMARK",
+- .revision = 0,
+- .family = NFPROTO_UNSPEC,
+- .checkentry = connsecmark_tg_check,
+- .destroy = connsecmark_tg_destroy,
+- .target = connsecmark_tg,
+- .targetsize = sizeof(struct xt_connsecmark_target_info),
+- .me = THIS_MODULE,
++static struct xt_target connsecmark_tg_reg[] __read_mostly = {
++ {
++ .name = "CONNSECMARK",
++ .revision = 0,
++ .family = NFPROTO_IPV4,
++ .checkentry = connsecmark_tg_check,
++ .destroy = connsecmark_tg_destroy,
++ .target = connsecmark_tg,
++ .targetsize = sizeof(struct xt_connsecmark_target_info),
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "CONNSECMARK",
++ .revision = 0,
++ .family = NFPROTO_IPV6,
++ .checkentry = connsecmark_tg_check,
++ .destroy = connsecmark_tg_destroy,
++ .target = connsecmark_tg,
++ .targetsize = sizeof(struct xt_connsecmark_target_info),
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init connsecmark_tg_init(void)
+ {
+- return xt_register_target(&connsecmark_tg_reg);
++ return xt_register_targets(connsecmark_tg_reg, ARRAY_SIZE(connsecmark_tg_reg));
+ }
+
+ static void __exit connsecmark_tg_exit(void)
+ {
+- xt_unregister_target(&connsecmark_tg_reg);
++ xt_unregister_targets(connsecmark_tg_reg, ARRAY_SIZE(connsecmark_tg_reg));
+ }
+
+ module_init(connsecmark_tg_init);
+diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
+index 2be2f7a7b60f4e..3ba94c34297cf5 100644
+--- a/net/netfilter/xt_CT.c
++++ b/net/netfilter/xt_CT.c
+@@ -313,10 +313,30 @@ static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
+ xt_ct_tg_destroy(par, par->targinfo);
+ }
+
++static unsigned int
++notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
++{
++ /* Previously seen (loopback)? Ignore. */
++ if (skb->_nfct != 0)
++ return XT_CONTINUE;
++
++ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
++
++ return XT_CONTINUE;
++}
++
+ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
++ {
++ .name = "NOTRACK",
++ .revision = 0,
++ .family = NFPROTO_IPV4,
++ .target = notrack_tg,
++ .table = "raw",
++ .me = THIS_MODULE,
++ },
+ {
+ .name = "CT",
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .targetsize = sizeof(struct xt_ct_target_info),
+ .usersize = offsetof(struct xt_ct_target_info, ct),
+ .checkentry = xt_ct_tg_check_v0,
+@@ -327,7 +347,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
+ },
+ {
+ .name = "CT",
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .revision = 1,
+ .targetsize = sizeof(struct xt_ct_target_info_v1),
+ .usersize = offsetof(struct xt_ct_target_info, ct),
+@@ -339,7 +359,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
+ },
+ {
+ .name = "CT",
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .revision = 2,
+ .targetsize = sizeof(struct xt_ct_target_info_v1),
+ .usersize = offsetof(struct xt_ct_target_info, ct),
+@@ -349,49 +369,61 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
+-};
+-
+-static unsigned int
+-notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
+-{
+- /* Previously seen (loopback)? Ignore. */
+- if (skb->_nfct != 0)
+- return XT_CONTINUE;
+-
+- nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+-
+- return XT_CONTINUE;
+-}
+-
+-static struct xt_target notrack_tg_reg __read_mostly = {
+- .name = "NOTRACK",
+- .revision = 0,
+- .family = NFPROTO_UNSPEC,
+- .target = notrack_tg,
+- .table = "raw",
+- .me = THIS_MODULE,
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "NOTRACK",
++ .revision = 0,
++ .family = NFPROTO_IPV6,
++ .target = notrack_tg,
++ .table = "raw",
++ .me = THIS_MODULE,
++ },
++ {
++ .name = "CT",
++ .family = NFPROTO_IPV6,
++ .targetsize = sizeof(struct xt_ct_target_info),
++ .usersize = offsetof(struct xt_ct_target_info, ct),
++ .checkentry = xt_ct_tg_check_v0,
++ .destroy = xt_ct_tg_destroy_v0,
++ .target = xt_ct_target_v0,
++ .table = "raw",
++ .me = THIS_MODULE,
++ },
++ {
++ .name = "CT",
++ .family = NFPROTO_IPV6,
++ .revision = 1,
++ .targetsize = sizeof(struct xt_ct_target_info_v1),
++ .usersize = offsetof(struct xt_ct_target_info, ct),
++ .checkentry = xt_ct_tg_check_v1,
++ .destroy = xt_ct_tg_destroy_v1,
++ .target = xt_ct_target_v1,
++ .table = "raw",
++ .me = THIS_MODULE,
++ },
++ {
++ .name = "CT",
++ .family = NFPROTO_IPV6,
++ .revision = 2,
++ .targetsize = sizeof(struct xt_ct_target_info_v1),
++ .usersize = offsetof(struct xt_ct_target_info, ct),
++ .checkentry = xt_ct_tg_check_v2,
++ .destroy = xt_ct_tg_destroy_v1,
++ .target = xt_ct_target_v1,
++ .table = "raw",
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init xt_ct_tg_init(void)
+ {
+- int ret;
+-
+- ret = xt_register_target(&notrack_tg_reg);
+- if (ret < 0)
+- return ret;
+-
+- ret = xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
+- if (ret < 0) {
+- xt_unregister_target(&notrack_tg_reg);
+- return ret;
+- }
+- return 0;
++ return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
+ }
+
+ static void __exit xt_ct_tg_exit(void)
+ {
+ xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
+- xt_unregister_target(&notrack_tg_reg);
+ }
+
+ module_init(xt_ct_tg_init);
+diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
+index db720efa811d58..f8b25b6f5da736 100644
+--- a/net/netfilter/xt_IDLETIMER.c
++++ b/net/netfilter/xt_IDLETIMER.c
+@@ -458,28 +458,49 @@ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
+
+ static struct xt_target idletimer_tg[] __read_mostly = {
+ {
+- .name = "IDLETIMER",
+- .family = NFPROTO_UNSPEC,
+- .target = idletimer_tg_target,
+- .targetsize = sizeof(struct idletimer_tg_info),
+- .usersize = offsetof(struct idletimer_tg_info, timer),
+- .checkentry = idletimer_tg_checkentry,
+- .destroy = idletimer_tg_destroy,
+- .me = THIS_MODULE,
++ .name = "IDLETIMER",
++ .family = NFPROTO_IPV4,
++ .target = idletimer_tg_target,
++ .targetsize = sizeof(struct idletimer_tg_info),
++ .usersize = offsetof(struct idletimer_tg_info, timer),
++ .checkentry = idletimer_tg_checkentry,
++ .destroy = idletimer_tg_destroy,
++ .me = THIS_MODULE,
+ },
+ {
+- .name = "IDLETIMER",
+- .family = NFPROTO_UNSPEC,
+- .revision = 1,
+- .target = idletimer_tg_target_v1,
+- .targetsize = sizeof(struct idletimer_tg_info_v1),
+- .usersize = offsetof(struct idletimer_tg_info_v1, timer),
+- .checkentry = idletimer_tg_checkentry_v1,
+- .destroy = idletimer_tg_destroy_v1,
+- .me = THIS_MODULE,
++ .name = "IDLETIMER",
++ .family = NFPROTO_IPV4,
++ .revision = 1,
++ .target = idletimer_tg_target_v1,
++ .targetsize = sizeof(struct idletimer_tg_info_v1),
++ .usersize = offsetof(struct idletimer_tg_info_v1, timer),
++ .checkentry = idletimer_tg_checkentry_v1,
++ .destroy = idletimer_tg_destroy_v1,
++ .me = THIS_MODULE,
+ },
+-
+-
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "IDLETIMER",
++ .family = NFPROTO_IPV6,
++ .target = idletimer_tg_target,
++ .targetsize = sizeof(struct idletimer_tg_info),
++ .usersize = offsetof(struct idletimer_tg_info, timer),
++ .checkentry = idletimer_tg_checkentry,
++ .destroy = idletimer_tg_destroy,
++ .me = THIS_MODULE,
++ },
++ {
++ .name = "IDLETIMER",
++ .family = NFPROTO_IPV6,
++ .revision = 1,
++ .target = idletimer_tg_target_v1,
++ .targetsize = sizeof(struct idletimer_tg_info_v1),
++ .usersize = offsetof(struct idletimer_tg_info_v1, timer),
++ .checkentry = idletimer_tg_checkentry_v1,
++ .destroy = idletimer_tg_destroy_v1,
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static struct class *idletimer_tg_class;
+diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
+index 36c9720ad8d6d4..f7b0286d106ac1 100644
+--- a/net/netfilter/xt_LED.c
++++ b/net/netfilter/xt_LED.c
+@@ -175,26 +175,41 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
+ kfree(ledinternal);
+ }
+
+-static struct xt_target led_tg_reg __read_mostly = {
+- .name = "LED",
+- .revision = 0,
+- .family = NFPROTO_UNSPEC,
+- .target = led_tg,
+- .targetsize = sizeof(struct xt_led_info),
+- .usersize = offsetof(struct xt_led_info, internal_data),
+- .checkentry = led_tg_check,
+- .destroy = led_tg_destroy,
+- .me = THIS_MODULE,
++static struct xt_target led_tg_reg[] __read_mostly = {
++ {
++ .name = "LED",
++ .revision = 0,
++ .family = NFPROTO_IPV4,
++ .target = led_tg,
++ .targetsize = sizeof(struct xt_led_info),
++ .usersize = offsetof(struct xt_led_info, internal_data),
++ .checkentry = led_tg_check,
++ .destroy = led_tg_destroy,
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "LED",
++ .revision = 0,
++ .family = NFPROTO_IPV6,
++ .target = led_tg,
++ .targetsize = sizeof(struct xt_led_info),
++ .usersize = offsetof(struct xt_led_info, internal_data),
++ .checkentry = led_tg_check,
++ .destroy = led_tg_destroy,
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init led_tg_init(void)
+ {
+- return xt_register_target(&led_tg_reg);
++ return xt_register_targets(led_tg_reg, ARRAY_SIZE(led_tg_reg));
+ }
+
+ static void __exit led_tg_exit(void)
+ {
+- xt_unregister_target(&led_tg_reg);
++ xt_unregister_targets(led_tg_reg, ARRAY_SIZE(led_tg_reg));
+ }
+
+ module_init(led_tg_init);
+diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
+index e660c3710a1096..d80abd6ccaf8f7 100644
+--- a/net/netfilter/xt_NFLOG.c
++++ b/net/netfilter/xt_NFLOG.c
+@@ -64,25 +64,39 @@ static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
+ nf_logger_put(par->family, NF_LOG_TYPE_ULOG);
+ }
+
+-static struct xt_target nflog_tg_reg __read_mostly = {
+- .name = "NFLOG",
+- .revision = 0,
+- .family = NFPROTO_UNSPEC,
+- .checkentry = nflog_tg_check,
+- .destroy = nflog_tg_destroy,
+- .target = nflog_tg,
+- .targetsize = sizeof(struct xt_nflog_info),
+- .me = THIS_MODULE,
++static struct xt_target nflog_tg_reg[] __read_mostly = {
++ {
++ .name = "NFLOG",
++ .revision = 0,
++ .family = NFPROTO_IPV4,
++ .checkentry = nflog_tg_check,
++ .destroy = nflog_tg_destroy,
++ .target = nflog_tg,
++ .targetsize = sizeof(struct xt_nflog_info),
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "NFLOG",
++ .revision = 0,
++ .family = NFPROTO_IPV4,
++ .checkentry = nflog_tg_check,
++ .destroy = nflog_tg_destroy,
++ .target = nflog_tg,
++ .targetsize = sizeof(struct xt_nflog_info),
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init nflog_tg_init(void)
+ {
+- return xt_register_target(&nflog_tg_reg);
++ return xt_register_targets(nflog_tg_reg, ARRAY_SIZE(nflog_tg_reg));
+ }
+
+ static void __exit nflog_tg_exit(void)
+ {
+- xt_unregister_target(&nflog_tg_reg);
++ xt_unregister_targets(nflog_tg_reg, ARRAY_SIZE(nflog_tg_reg));
+ }
+
+ module_init(nflog_tg_init);
+diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
+index 80f6624e23554b..4f49cfc2783120 100644
+--- a/net/netfilter/xt_RATEEST.c
++++ b/net/netfilter/xt_RATEEST.c
+@@ -179,16 +179,31 @@ static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par)
+ xt_rateest_put(par->net, info->est);
+ }
+
+-static struct xt_target xt_rateest_tg_reg __read_mostly = {
+- .name = "RATEEST",
+- .revision = 0,
+- .family = NFPROTO_UNSPEC,
+- .target = xt_rateest_tg,
+- .checkentry = xt_rateest_tg_checkentry,
+- .destroy = xt_rateest_tg_destroy,
+- .targetsize = sizeof(struct xt_rateest_target_info),
+- .usersize = offsetof(struct xt_rateest_target_info, est),
+- .me = THIS_MODULE,
++static struct xt_target xt_rateest_tg_reg[] __read_mostly = {
++ {
++ .name = "RATEEST",
++ .revision = 0,
++ .family = NFPROTO_IPV4,
++ .target = xt_rateest_tg,
++ .checkentry = xt_rateest_tg_checkentry,
++ .destroy = xt_rateest_tg_destroy,
++ .targetsize = sizeof(struct xt_rateest_target_info),
++ .usersize = offsetof(struct xt_rateest_target_info, est),
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "RATEEST",
++ .revision = 0,
++ .family = NFPROTO_IPV6,
++ .target = xt_rateest_tg,
++ .checkentry = xt_rateest_tg_checkentry,
++ .destroy = xt_rateest_tg_destroy,
++ .targetsize = sizeof(struct xt_rateest_target_info),
++ .usersize = offsetof(struct xt_rateest_target_info, est),
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static __net_init int xt_rateest_net_init(struct net *net)
+@@ -214,12 +229,12 @@ static int __init xt_rateest_tg_init(void)
+
+ if (err)
+ return err;
+- return xt_register_target(&xt_rateest_tg_reg);
++ return xt_register_targets(xt_rateest_tg_reg, ARRAY_SIZE(xt_rateest_tg_reg));
+ }
+
+ static void __exit xt_rateest_tg_fini(void)
+ {
+- xt_unregister_target(&xt_rateest_tg_reg);
++ xt_unregister_targets(xt_rateest_tg_reg, ARRAY_SIZE(xt_rateest_tg_reg));
+ unregister_pernet_subsys(&xt_rateest_net_ops);
+ }
+
+diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
+index 498a0bf6f0444a..5bc5ea505eb9e0 100644
+--- a/net/netfilter/xt_SECMARK.c
++++ b/net/netfilter/xt_SECMARK.c
+@@ -157,7 +157,7 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
+ {
+ .name = "SECMARK",
+ .revision = 0,
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .checkentry = secmark_tg_check_v0,
+ .destroy = secmark_tg_destroy,
+ .target = secmark_tg_v0,
+@@ -167,7 +167,7 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
+ {
+ .name = "SECMARK",
+ .revision = 1,
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .checkentry = secmark_tg_check_v1,
+ .destroy = secmark_tg_destroy,
+ .target = secmark_tg_v1,
+@@ -175,6 +175,29 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
+ .usersize = offsetof(struct xt_secmark_target_info_v1, secid),
+ .me = THIS_MODULE,
+ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "SECMARK",
++ .revision = 0,
++ .family = NFPROTO_IPV6,
++ .checkentry = secmark_tg_check_v0,
++ .destroy = secmark_tg_destroy,
++ .target = secmark_tg_v0,
++ .targetsize = sizeof(struct xt_secmark_target_info),
++ .me = THIS_MODULE,
++ },
++ {
++ .name = "SECMARK",
++ .revision = 1,
++ .family = NFPROTO_IPV6,
++ .checkentry = secmark_tg_check_v1,
++ .destroy = secmark_tg_destroy,
++ .target = secmark_tg_v1,
++ .targetsize = sizeof(struct xt_secmark_target_info_v1),
++ .usersize = offsetof(struct xt_secmark_target_info_v1, secid),
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init secmark_tg_init(void)
+diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c
+index 5582dce98cae7d..f3fa4f11348cd8 100644
+--- a/net/netfilter/xt_TRACE.c
++++ b/net/netfilter/xt_TRACE.c
+@@ -29,25 +29,38 @@ trace_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ return XT_CONTINUE;
+ }
+
+-static struct xt_target trace_tg_reg __read_mostly = {
+- .name = "TRACE",
+- .revision = 0,
+- .family = NFPROTO_UNSPEC,
+- .table = "raw",
+- .target = trace_tg,
+- .checkentry = trace_tg_check,
+- .destroy = trace_tg_destroy,
+- .me = THIS_MODULE,
++static struct xt_target trace_tg_reg[] __read_mostly = {
++ {
++ .name = "TRACE",
++ .revision = 0,
++ .family = NFPROTO_IPV4,
++ .table = "raw",
++ .target = trace_tg,
++ .checkentry = trace_tg_check,
++ .destroy = trace_tg_destroy,
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "TRACE",
++ .revision = 0,
++ .family = NFPROTO_IPV6,
++ .table = "raw",
++ .target = trace_tg,
++ .checkentry = trace_tg_check,
++ .destroy = trace_tg_destroy,
++ },
++#endif
+ };
+
+ static int __init trace_tg_init(void)
+ {
+- return xt_register_target(&trace_tg_reg);
++ return xt_register_targets(trace_tg_reg, ARRAY_SIZE(trace_tg_reg));
+ }
+
+ static void __exit trace_tg_exit(void)
+ {
+- xt_unregister_target(&trace_tg_reg);
++ xt_unregister_targets(trace_tg_reg, ARRAY_SIZE(trace_tg_reg));
+ }
+
+ module_init(trace_tg_init);
+diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
+index e9b2181e8c425f..a7708894310716 100644
+--- a/net/netfilter/xt_addrtype.c
++++ b/net/netfilter/xt_addrtype.c
+@@ -208,13 +208,24 @@ static struct xt_match addrtype_mt_reg[] __read_mostly = {
+ },
+ {
+ .name = "addrtype",
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .revision = 1,
+ .match = addrtype_mt_v1,
+ .checkentry = addrtype_mt_checkentry_v1,
+ .matchsize = sizeof(struct xt_addrtype_info_v1),
+ .me = THIS_MODULE
+- }
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "addrtype",
++ .family = NFPROTO_IPV6,
++ .revision = 1,
++ .match = addrtype_mt_v1,
++ .checkentry = addrtype_mt_checkentry_v1,
++ .matchsize = sizeof(struct xt_addrtype_info_v1),
++ .me = THIS_MODULE
++ },
++#endif
+ };
+
+ static int __init addrtype_mt_init(void)
+diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
+index a047a545371e18..908fd5f2c3c848 100644
+--- a/net/netfilter/xt_cluster.c
++++ b/net/netfilter/xt_cluster.c
+@@ -146,24 +146,37 @@ static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
+ nf_ct_netns_put(par->net, par->family);
+ }
+
+-static struct xt_match xt_cluster_match __read_mostly = {
+- .name = "cluster",
+- .family = NFPROTO_UNSPEC,
+- .match = xt_cluster_mt,
+- .checkentry = xt_cluster_mt_checkentry,
+- .matchsize = sizeof(struct xt_cluster_match_info),
+- .destroy = xt_cluster_mt_destroy,
+- .me = THIS_MODULE,
++static struct xt_match xt_cluster_match[] __read_mostly = {
++ {
++ .name = "cluster",
++ .family = NFPROTO_IPV4,
++ .match = xt_cluster_mt,
++ .checkentry = xt_cluster_mt_checkentry,
++ .matchsize = sizeof(struct xt_cluster_match_info),
++ .destroy = xt_cluster_mt_destroy,
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "cluster",
++ .family = NFPROTO_IPV6,
++ .match = xt_cluster_mt,
++ .checkentry = xt_cluster_mt_checkentry,
++ .matchsize = sizeof(struct xt_cluster_match_info),
++ .destroy = xt_cluster_mt_destroy,
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init xt_cluster_mt_init(void)
+ {
+- return xt_register_match(&xt_cluster_match);
++ return xt_register_matches(xt_cluster_match, ARRAY_SIZE(xt_cluster_match));
+ }
+
+ static void __exit xt_cluster_mt_fini(void)
+ {
+- xt_unregister_match(&xt_cluster_match);
++ xt_unregister_matches(xt_cluster_match, ARRAY_SIZE(xt_cluster_match));
+ }
+
+ MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
+index 93cb018c3055f8..2aabdcea870723 100644
+--- a/net/netfilter/xt_connbytes.c
++++ b/net/netfilter/xt_connbytes.c
+@@ -111,9 +111,11 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par)
+ return -EINVAL;
+
+ ret = nf_ct_netns_get(par->net, par->family);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
++ return ret;
++ }
+
+ /*
+ * This filter cannot function correctly unless connection tracking
+diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
+index 5d04ef80a61dcf..d1d0fa6c8061e9 100644
+--- a/net/netfilter/xt_connlimit.c
++++ b/net/netfilter/xt_connlimit.c
+@@ -106,26 +106,41 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
+ nf_conncount_destroy(par->net, par->family, info->data);
+ }
+
+-static struct xt_match connlimit_mt_reg __read_mostly = {
+- .name = "connlimit",
+- .revision = 1,
+- .family = NFPROTO_UNSPEC,
+- .checkentry = connlimit_mt_check,
+- .match = connlimit_mt,
+- .matchsize = sizeof(struct xt_connlimit_info),
+- .usersize = offsetof(struct xt_connlimit_info, data),
+- .destroy = connlimit_mt_destroy,
+- .me = THIS_MODULE,
++static struct xt_match connlimit_mt_reg[] __read_mostly = {
++ {
++ .name = "connlimit",
++ .revision = 1,
++ .family = NFPROTO_IPV4,
++ .checkentry = connlimit_mt_check,
++ .match = connlimit_mt,
++ .matchsize = sizeof(struct xt_connlimit_info),
++ .usersize = offsetof(struct xt_connlimit_info, data),
++ .destroy = connlimit_mt_destroy,
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "connlimit",
++ .revision = 1,
++ .family = NFPROTO_IPV6,
++ .checkentry = connlimit_mt_check,
++ .match = connlimit_mt,
++ .matchsize = sizeof(struct xt_connlimit_info),
++ .usersize = offsetof(struct xt_connlimit_info, data),
++ .destroy = connlimit_mt_destroy,
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static int __init connlimit_mt_init(void)
+ {
+- return xt_register_match(&connlimit_mt_reg);
++ return xt_register_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
+ }
+
+ static void __exit connlimit_mt_exit(void)
+ {
+- xt_unregister_match(&connlimit_mt_reg);
++ xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
+ }
+
+ module_init(connlimit_mt_init);
+diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
+index ad3c033db64e70..4277084de2e70c 100644
+--- a/net/netfilter/xt_connmark.c
++++ b/net/netfilter/xt_connmark.c
+@@ -151,7 +151,7 @@ static struct xt_target connmark_tg_reg[] __read_mostly = {
+ {
+ .name = "CONNMARK",
+ .revision = 1,
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .checkentry = connmark_tg_check,
+ .target = connmark_tg,
+ .targetsize = sizeof(struct xt_connmark_tginfo1),
+@@ -161,13 +161,35 @@ static struct xt_target connmark_tg_reg[] __read_mostly = {
+ {
+ .name = "CONNMARK",
+ .revision = 2,
+- .family = NFPROTO_UNSPEC,
++ .family = NFPROTO_IPV4,
+ .checkentry = connmark_tg_check,
+ .target = connmark_tg_v2,
+ .targetsize = sizeof(struct xt_connmark_tginfo2),
+ .destroy = connmark_tg_destroy,
+ .me = THIS_MODULE,
+- }
++ },
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "CONNMARK",
++ .revision = 1,
++ .family = NFPROTO_IPV6,
++ .checkentry = connmark_tg_check,
++ .target = connmark_tg,
++ .targetsize = sizeof(struct xt_connmark_tginfo1),
++ .destroy = connmark_tg_destroy,
++ .me = THIS_MODULE,
++ },
++ {
++ .name = "CONNMARK",
++ .revision = 2,
++ .family = NFPROTO_IPV6,
++ .checkentry = connmark_tg_check,
++ .target = connmark_tg_v2,
++ .targetsize = sizeof(struct xt_connmark_tginfo2),
++ .destroy = connmark_tg_destroy,
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static struct xt_match connmark_mt_reg __read_mostly = {
+diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
+index 1ad74b5920b533..f76fe04fc9a4e1 100644
+--- a/net/netfilter/xt_mark.c
++++ b/net/netfilter/xt_mark.c
+@@ -39,13 +39,35 @@ mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ return ((skb->mark & info->mask) == info->mark) ^ info->invert;
+ }
+
+-static struct xt_target mark_tg_reg __read_mostly = {
+- .name = "MARK",
+- .revision = 2,
+- .family = NFPROTO_UNSPEC,
+- .target = mark_tg,
+- .targetsize = sizeof(struct xt_mark_tginfo2),
+- .me = THIS_MODULE,
++static struct xt_target mark_tg_reg[] __read_mostly = {
++ {
++ .name = "MARK",
++ .revision = 2,
++ .family = NFPROTO_IPV4,
++ .target = mark_tg,
++ .targetsize = sizeof(struct xt_mark_tginfo2),
++ .me = THIS_MODULE,
++ },
++#if IS_ENABLED(CONFIG_IP_NF_ARPTABLES)
++ {
++ .name = "MARK",
++ .revision = 2,
++ .family = NFPROTO_ARP,
++ .target = mark_tg,
++ .targetsize = sizeof(struct xt_mark_tginfo2),
++ .me = THIS_MODULE,
++ },
++#endif
++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
++ {
++ .name = "MARK",
++ .revision = 2,
++ .family = NFPROTO_IPV4,
++ .target = mark_tg,
++ .targetsize = sizeof(struct xt_mark_tginfo2),
++ .me = THIS_MODULE,
++ },
++#endif
+ };
+
+ static struct xt_match mark_mt_reg __read_mostly = {
+@@ -61,12 +83,12 @@ static int __init mark_mt_init(void)
+ {
+ int ret;
+
+- ret = xt_register_target(&mark_tg_reg);
++ ret = xt_register_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg));
+ if (ret < 0)
+ return ret;
+ ret = xt_register_match(&mark_mt_reg);
+ if (ret < 0) {
+- xt_unregister_target(&mark_tg_reg);
++ xt_unregister_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg));
+ return ret;
+ }
+ return 0;
+@@ -75,7 +97,7 @@ static int __init mark_mt_init(void)
+ static void __exit mark_mt_exit(void)
+ {
+ xt_unregister_match(&mark_mt_reg);
+- xt_unregister_target(&mark_tg_reg);
++ xt_unregister_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg));
+ }
+
+ module_init(mark_mt_init);
+diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
+index e85ce69924aee9..50332888c8d233 100644
+--- a/net/netfilter/xt_owner.c
++++ b/net/netfilter/xt_owner.c
+@@ -76,18 +76,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ */
+ return false;
+
+- filp = sk->sk_socket->file;
+- if (filp == NULL)
++ read_lock_bh(&sk->sk_callback_lock);
++ filp = sk->sk_socket ? sk->sk_socket->file : NULL;
++ if (filp == NULL) {
++ read_unlock_bh(&sk->sk_callback_lock);
+ return ((info->match ^ info->invert) &
+ (XT_OWNER_UID | XT_OWNER_GID)) == 0;
++ }
+
+ if (info->match & XT_OWNER_UID) {
+ kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
+ kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
+ if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
+ uid_lte(filp->f_cred->fsuid, uid_max)) ^
+- !(info->invert & XT_OWNER_UID))
++ !(info->invert & XT_OWNER_UID)) {
++ read_unlock_bh(&sk->sk_callback_lock);
+ return false;
++ }
+ }
+
+ if (info->match & XT_OWNER_GID) {
+@@ -112,10 +117,13 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ }
+ }
+
+- if (match ^ !(info->invert & XT_OWNER_GID))
++ if (match ^ !(info->invert & XT_OWNER_GID)) {
++ read_unlock_bh(&sk->sk_callback_lock);
+ return false;
++ }
+ }
+
++ read_unlock_bh(&sk->sk_callback_lock);
+ return true;
+ }
+
+diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
+index ec6ed6fda96c59..343e65f377d442 100644
+--- a/net/netfilter/xt_physdev.c
++++ b/net/netfilter/xt_physdev.c
+@@ -59,7 +59,7 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ (!!outdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
+ return false;
+
+- physdev = nf_bridge_get_physindev(skb);
++ physdev = nf_bridge_get_physindev(skb, xt_net(par));
+ indev = physdev ? physdev->name : NULL;
+
+ if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
+diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
+index 7ddb9a78e3fc88..ef93e0d3bee04c 100644
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
+ {
+ struct recent_table *t = pde_data(file_inode(file));
+ struct recent_entry *e;
+- char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
++ char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
+ const char *c = buf;
+ union nf_inet_addr addr = {};
+ u_int16_t family;
+diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
+index f1d5b846521780..a07c2216d28b6c 100644
+--- a/net/netlabel/netlabel_calipso.c
++++ b/net/netlabel/netlabel_calipso.c
+@@ -54,6 +54,28 @@ static const struct nla_policy calipso_genl_policy[NLBL_CALIPSO_A_MAX + 1] = {
+ [NLBL_CALIPSO_A_MTYPE] = { .type = NLA_U32 },
+ };
+
++static const struct netlbl_calipso_ops *calipso_ops;
++
++/**
++ * netlbl_calipso_ops_register - Register the CALIPSO operations
++ * @ops: ops to register
++ *
++ * Description:
++ * Register the CALIPSO packet engine operations.
++ *
++ */
++const struct netlbl_calipso_ops *
++netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
++{
++ return xchg(&calipso_ops, ops);
++}
++EXPORT_SYMBOL(netlbl_calipso_ops_register);
++
++static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
++{
++ return READ_ONCE(calipso_ops);
++}
++
+ /* NetLabel Command Handlers
+ */
+ /**
+@@ -96,15 +118,18 @@ static int netlbl_calipso_add_pass(struct genl_info *info,
+ *
+ */
+ static int netlbl_calipso_add(struct sk_buff *skb, struct genl_info *info)
+-
+ {
+ int ret_val = -EINVAL;
+ struct netlbl_audit audit_info;
++ const struct netlbl_calipso_ops *ops = netlbl_calipso_ops_get();
+
+ if (!info->attrs[NLBL_CALIPSO_A_DOI] ||
+ !info->attrs[NLBL_CALIPSO_A_MTYPE])
+ return -EINVAL;
+
++ if (!ops)
++ return -EOPNOTSUPP;
++
+ netlbl_netlink_auditinfo(&audit_info);
+ switch (nla_get_u32(info->attrs[NLBL_CALIPSO_A_MTYPE])) {
+ case CALIPSO_MAP_PASS:
+@@ -363,28 +388,6 @@ int __init netlbl_calipso_genl_init(void)
+ return genl_register_family(&netlbl_calipso_gnl_family);
+ }
+
+-static const struct netlbl_calipso_ops *calipso_ops;
+-
+-/**
+- * netlbl_calipso_ops_register - Register the CALIPSO operations
+- * @ops: ops to register
+- *
+- * Description:
+- * Register the CALIPSO packet engine operations.
+- *
+- */
+-const struct netlbl_calipso_ops *
+-netlbl_calipso_ops_register(const struct netlbl_calipso_ops *ops)
+-{
+- return xchg(&calipso_ops, ops);
+-}
+-EXPORT_SYMBOL(netlbl_calipso_ops_register);
+-
+-static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
+-{
+- return READ_ONCE(calipso_ops);
+-}
+-
+ /**
+ * calipso_doi_add - Add a new DOI to the CALIPSO protocol engine
+ * @doi_def: the DOI structure
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index eb086b06d60da4..50e13207a05aa5 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -130,7 +130,7 @@ static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
+ "nlk_cb_mutex-MAX_LINKS"
+ };
+
+-static int netlink_dump(struct sock *sk);
++static int netlink_dump(struct sock *sk, bool lock_taken);
+
+ /* nl_table locking explained:
+ * Lookup and traversal are protected with an RCU read-side lock. Insertion
+@@ -167,7 +167,7 @@ static inline u32 netlink_group_mask(u32 group)
+ static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
+ gfp_t gfp_mask)
+ {
+- unsigned int len = skb_end_offset(skb);
++ unsigned int len = skb->len;
+ struct sk_buff *new;
+
+ new = alloc_skb(len, gfp_mask);
+@@ -374,7 +374,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
+ if (is_vmalloc_addr(skb->head)) {
+ if (!skb->cloned ||
+ !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
+- vfree(skb->head);
++ vfree_atomic(skb->head);
+
+ skb->head = NULL;
+ }
+@@ -636,7 +636,7 @@ static struct proto netlink_proto = {
+ };
+
+ static int __netlink_create(struct net *net, struct socket *sock,
+- struct mutex *cb_mutex, int protocol,
++ struct mutex *dump_cb_mutex, int protocol,
+ int kern)
+ {
+ struct sock *sk;
+@@ -651,15 +651,11 @@ static int __netlink_create(struct net *net, struct socket *sock,
+ sock_init_data(sock, sk);
+
+ nlk = nlk_sk(sk);
+- if (cb_mutex) {
+- nlk->cb_mutex = cb_mutex;
+- } else {
+- nlk->cb_mutex = &nlk->cb_def_mutex;
+- mutex_init(nlk->cb_mutex);
+- lockdep_set_class_and_name(nlk->cb_mutex,
++ mutex_init(&nlk->nl_cb_mutex);
++ lockdep_set_class_and_name(&nlk->nl_cb_mutex,
+ nlk_cb_mutex_keys + protocol,
+ nlk_cb_mutex_key_strings[protocol]);
+- }
++ nlk->dump_cb_mutex = dump_cb_mutex;
+ init_waitqueue_head(&nlk->wait);
+
+ sk->sk_destruct = netlink_sock_destruct;
+@@ -1989,7 +1985,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+
+ if (READ_ONCE(nlk->cb_running) &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+- ret = netlink_dump(sk);
++ ret = netlink_dump(sk, false);
+ if (ret) {
+ WRITE_ONCE(sk->sk_err, -ret);
+ sk_error_report(sk);
+@@ -2147,8 +2143,9 @@ void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
+ {
+ struct sock *sk;
+ struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
++ struct hlist_node *tmp;
+
+- sk_for_each_bound(sk, &tbl->mc_list)
++ sk_for_each_bound_safe(sk, tmp, &tbl->mc_list)
+ netlink_update_socket_mc(nlk_sk(sk), group, 0);
+ }
+
+@@ -2198,7 +2195,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
+ return 0;
+ }
+
+-static int netlink_dump(struct sock *sk)
++static int netlink_dump(struct sock *sk, bool lock_taken)
+ {
+ struct netlink_sock *nlk = nlk_sk(sk);
+ struct netlink_ext_ack extack = {};
+@@ -2210,7 +2207,8 @@ static int netlink_dump(struct sock *sk)
+ int alloc_min_size;
+ int alloc_size;
+
+- mutex_lock(nlk->cb_mutex);
++ if (!lock_taken)
++ mutex_lock(&nlk->nl_cb_mutex);
+ if (!nlk->cb_running) {
+ err = -EINVAL;
+ goto errout_skb;
+@@ -2262,14 +2260,24 @@ static int netlink_dump(struct sock *sk)
+ netlink_skb_set_owner_r(skb, sk);
+
+ if (nlk->dump_done_errno > 0) {
++ struct mutex *extra_mutex = nlk->dump_cb_mutex;
++
+ cb->extack = &extack;
++
++ if (cb->flags & RTNL_FLAG_DUMP_UNLOCKED)
++ extra_mutex = NULL;
++ if (extra_mutex)
++ mutex_lock(extra_mutex);
+ nlk->dump_done_errno = cb->dump(skb, cb);
++ if (extra_mutex)
++ mutex_unlock(extra_mutex);
++
+ cb->extack = NULL;
+ }
+
+ if (nlk->dump_done_errno > 0 ||
+ skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
+- mutex_unlock(nlk->cb_mutex);
++ mutex_unlock(&nlk->nl_cb_mutex);
+
+ if (sk_filter(sk, skb))
+ kfree_skb(skb);
+@@ -2303,13 +2311,13 @@ static int netlink_dump(struct sock *sk)
+ WRITE_ONCE(nlk->cb_running, false);
+ module = cb->module;
+ skb = cb->skb;
+- mutex_unlock(nlk->cb_mutex);
++ mutex_unlock(&nlk->nl_cb_mutex);
+ module_put(module);
+ consume_skb(skb);
+ return 0;
+
+ errout_skb:
+- mutex_unlock(nlk->cb_mutex);
++ mutex_unlock(&nlk->nl_cb_mutex);
+ kfree_skb(skb);
+ return err;
+ }
+@@ -2332,7 +2340,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ }
+
+ nlk = nlk_sk(sk);
+- mutex_lock(nlk->cb_mutex);
++ mutex_lock(&nlk->nl_cb_mutex);
+ /* A dump is in progress... */
+ if (nlk->cb_running) {
+ ret = -EBUSY;
+@@ -2352,6 +2360,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ cb->data = control->data;
+ cb->module = control->module;
+ cb->min_dump_alloc = control->min_dump_alloc;
++ cb->flags = control->flags;
+ cb->skb = skb;
+
+ cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
+@@ -2367,9 +2376,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ WRITE_ONCE(nlk->cb_running, true);
+ nlk->dump_done_errno = INT_MAX;
+
+- mutex_unlock(nlk->cb_mutex);
+-
+- ret = netlink_dump(sk);
++ ret = netlink_dump(sk, true);
+
+ sock_put(sk);
+
+@@ -2385,7 +2392,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ module_put(control->module);
+ error_unlock:
+ sock_put(sk);
+- mutex_unlock(nlk->cb_mutex);
++ mutex_unlock(&nlk->nl_cb_mutex);
+ error_free:
+ kfree_skb(skb);
+ return ret;
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index 2145979b9986a0..9751e29d4bbb9a 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -39,8 +39,9 @@ struct netlink_sock {
+ bool cb_running;
+ int dump_done_errno;
+ struct netlink_callback cb;
+- struct mutex *cb_mutex;
+- struct mutex cb_def_mutex;
++ struct mutex nl_cb_mutex;
++
++ struct mutex *dump_cb_mutex;
+ void (*netlink_rcv)(struct sk_buff *skb);
+ int (*netlink_bind)(struct net *net, int group);
+ void (*netlink_unbind)(struct net *net, int group);
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 8315d31b53db42..d41c4a936ad0c9 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1690,6 +1690,9 @@ static int genl_bind(struct net *net, int group)
+ if ((grp->flags & GENL_UNS_ADMIN_PERM) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN))
+ ret = -EPERM;
++ if (grp->cap_sys_admin &&
++ !ns_capable(net->user_ns, CAP_SYS_ADMIN))
++ ret = -EPERM;
+
+ break;
+ }
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 96e91ab71573cf..f26dee48e03af1 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -453,16 +453,16 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
+ nr_init_timers(sk);
+
+ nr->t1 =
+- msecs_to_jiffies(sysctl_netrom_transport_timeout);
++ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout));
+ nr->t2 =
+- msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
++ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay));
+ nr->n2 =
+- msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
++ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries));
+ nr->t4 =
+- msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
++ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay));
+ nr->idle =
+- msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
+- nr->window = sysctl_netrom_transport_requested_window_size;
++ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout));
++ nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size);
+
+ nr->bpqext = 1;
+ nr->state = NR_STATE_0;
+@@ -954,7 +954,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
+ * G8PZT's Xrouter which is sending packets with command type 7
+ * as an extension of the protocol.
+ */
+- if (sysctl_netrom_reset_circuit &&
++ if (READ_ONCE(sysctl_netrom_reset_circuit) &&
+ (frametype != NR_RESET || flags != 0))
+ nr_transmit_reset(skb, 1);
+
+diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
+index 3aaac4a22b3876..2c34389c3ce6f1 100644
+--- a/net/netrom/nr_dev.c
++++ b/net/netrom/nr_dev.c
+@@ -81,7 +81,7 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev,
+ buff[6] |= AX25_SSSID_SPARE;
+ buff += AX25_ADDR_LEN;
+
+- *buff++ = sysctl_netrom_network_ttl_initialiser;
++ *buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
+
+ *buff++ = NR_PROTO_IP;
+ *buff++ = NR_PROTO_IP;
+diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
+index 2f084b6f69d7e0..97944db6b5ac64 100644
+--- a/net/netrom/nr_in.c
++++ b/net/netrom/nr_in.c
+@@ -97,7 +97,7 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
+ break;
+
+ case NR_RESET:
+- if (sysctl_netrom_reset_circuit)
++ if (READ_ONCE(sysctl_netrom_reset_circuit))
+ nr_disconnect(sk, ECONNRESET);
+ break;
+
+@@ -128,7 +128,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
+ break;
+
+ case NR_RESET:
+- if (sysctl_netrom_reset_circuit)
++ if (READ_ONCE(sysctl_netrom_reset_circuit))
+ nr_disconnect(sk, ECONNRESET);
+ break;
+
+@@ -262,7 +262,7 @@ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype
+ break;
+
+ case NR_RESET:
+- if (sysctl_netrom_reset_circuit)
++ if (READ_ONCE(sysctl_netrom_reset_circuit))
+ nr_disconnect(sk, ECONNRESET);
+ break;
+
+diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
+index 44929657f5b717..5e531394a724b7 100644
+--- a/net/netrom/nr_out.c
++++ b/net/netrom/nr_out.c
+@@ -204,7 +204,7 @@ void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
+ dptr[6] |= AX25_SSSID_SPARE;
+ dptr += AX25_ADDR_LEN;
+
+- *dptr++ = sysctl_netrom_network_ttl_initialiser;
++ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
+
+ if (!nr_route_frame(skb, NULL)) {
+ kfree_skb(skb);
+diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
+index baea3cbd76ca5b..bd2b17b219ae90 100644
+--- a/net/netrom/nr_route.c
++++ b/net/netrom/nr_route.c
+@@ -153,7 +153,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
+ nr_neigh->digipeat = NULL;
+ nr_neigh->ax25 = NULL;
+ nr_neigh->dev = dev;
+- nr_neigh->quality = sysctl_netrom_default_path_quality;
++ nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality);
+ nr_neigh->locked = 0;
+ nr_neigh->count = 0;
+ nr_neigh->number = nr_neigh_no++;
+@@ -285,22 +285,14 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
+ return 0;
+ }
+
+-static inline void __nr_remove_node(struct nr_node *nr_node)
++static void nr_remove_node_locked(struct nr_node *nr_node)
+ {
++ lockdep_assert_held(&nr_node_list_lock);
++
+ hlist_del_init(&nr_node->node_node);
+ nr_node_put(nr_node);
+ }
+
+-#define nr_remove_node_locked(__node) \
+- __nr_remove_node(__node)
+-
+-static void nr_remove_node(struct nr_node *nr_node)
+-{
+- spin_lock_bh(&nr_node_list_lock);
+- __nr_remove_node(nr_node);
+- spin_unlock_bh(&nr_node_list_lock);
+-}
+-
+ static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
+ {
+ hlist_del_init(&nr_neigh->neigh_node);
+@@ -339,6 +331,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ return -EINVAL;
+ }
+
++ spin_lock_bh(&nr_node_list_lock);
+ nr_node_lock(nr_node);
+ for (i = 0; i < nr_node->count; i++) {
+ if (nr_node->routes[i].neighbour == nr_neigh) {
+@@ -352,7 +345,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ nr_node->count--;
+
+ if (nr_node->count == 0) {
+- nr_remove_node(nr_node);
++ nr_remove_node_locked(nr_node);
+ } else {
+ switch (i) {
+ case 0:
+@@ -367,12 +360,14 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ nr_node_put(nr_node);
+ }
+ nr_node_unlock(nr_node);
++ spin_unlock_bh(&nr_node_list_lock);
+
+ return 0;
+ }
+ }
+ nr_neigh_put(nr_neigh);
+ nr_node_unlock(nr_node);
++ spin_unlock_bh(&nr_node_list_lock);
+ nr_node_put(nr_node);
+
+ return -EINVAL;
+@@ -728,7 +723,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
+ nr_neigh->ax25 = NULL;
+ ax25_cb_put(ax25);
+
+- if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
++ if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
+ nr_neigh_put(nr_neigh);
+ return;
+ }
+@@ -766,7 +761,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ if (ax25 != NULL) {
+ ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
+ ax25->ax25_dev->dev, 0,
+- sysctl_netrom_obsolescence_count_initialiser);
++ READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
+ if (ret)
+ return ret;
+ }
+@@ -780,7 +775,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ return ret;
+ }
+
+- if (!sysctl_netrom_routing_control && ax25 != NULL)
++ if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
+ return 0;
+
+ /* Its Time-To-Live has expired */
+diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
+index e2d2af924cff4a..c3bbd5880850bb 100644
+--- a/net/netrom/nr_subr.c
++++ b/net/netrom/nr_subr.c
+@@ -182,7 +182,8 @@ void nr_write_internal(struct sock *sk, int frametype)
+ *dptr++ = nr->my_id;
+ *dptr++ = frametype;
+ *dptr++ = nr->window;
+- if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
++ if (nr->bpqext)
++ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
+ break;
+
+ case NR_DISCREQ:
+@@ -236,7 +237,7 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
+ dptr[6] |= AX25_SSSID_SPARE;
+ dptr += AX25_ADDR_LEN;
+
+- *dptr++ = sysctl_netrom_network_ttl_initialiser;
++ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
+
+ if (mine) {
+ *dptr++ = 0;
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index 4e7c968cde2dcf..5e3ca068f04e04 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -121,7 +121,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
+ is accepted() it isn't 'dead' so doesn't get removed. */
+ if (sock_flag(sk, SOCK_DESTROY) ||
+ (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+- sock_hold(sk);
++ if (sk->sk_state == TCP_LISTEN)
++ sock_hold(sk);
+ bh_unlock_sock(sk);
+ nr_destroy_socket(sk);
+ goto out;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index 1dac28136e6a35..18be13fb9b75a3 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -145,6 +145,13 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device,
+
+ static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
+ {
++ /* Since using nfc_llcp_local may result in usage of nfc_dev, whenever
++ * we hold a reference to local, we also need to hold a reference to
++ * the device to avoid UAF.
++ */
++ if (!nfc_get_device(local->dev->idx))
++ return NULL;
++
+ kref_get(&local->ref);
+
+ return local;
+@@ -177,10 +184,18 @@ static void local_release(struct kref *ref)
+
+ int nfc_llcp_local_put(struct nfc_llcp_local *local)
+ {
++ struct nfc_dev *dev;
++ int ret;
++
+ if (local == NULL)
+ return 0;
+
+- return kref_put(&local->ref, local_release);
++ dev = local->dev;
++
++ ret = kref_put(&local->ref, local_release);
++ nfc_put_device(dev);
++
++ return ret;
+ }
+
+ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+@@ -959,8 +974,17 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
+ }
+
+ new_sock = nfc_llcp_sock(new_sk);
+- new_sock->dev = local->dev;
++
+ new_sock->local = nfc_llcp_local_get(local);
++ if (!new_sock->local) {
++ reason = LLCP_DM_REJ;
++ sock_put(&new_sock->sk);
++ release_sock(&sock->sk);
++ sock_put(&sock->sk);
++ goto fail;
++ }
++
++ new_sock->dev = local->dev;
+ new_sock->rw = sock->rw;
+ new_sock->miux = sock->miux;
+ new_sock->nfc_protocol = sock->nfc_protocol;
+@@ -1597,7 +1621,16 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
+ if (local == NULL)
+ return -ENOMEM;
+
+- local->dev = ndev;
++ /* As we are going to initialize local's refcount, we need to get the
++ * nfc_dev to avoid UAF, otherwise there is no point in continuing.
++ * See nfc_llcp_local_get().
++ */
++ local->dev = nfc_get_device(ndev->idx);
++ if (!local->dev) {
++ kfree(local);
++ return -ENODEV;
++ }
++
+ INIT_LIST_HEAD(&local->list);
+ kref_init(&local->ref);
+ mutex_init(&local->sdp_lock);
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 645677f84dba25..d5344563e525c9 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -252,10 +252,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = copy_safe_from_sockptr(&opt, sizeof(opt),
++ optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt > LLCP_MAX_RW) {
+ err = -EINVAL;
+@@ -274,10 +274,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
+ break;
+ }
+
+- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+- err = -EFAULT;
++ err = copy_safe_from_sockptr(&opt, sizeof(opt),
++ optval, optlen);
++ if (err)
+ break;
+- }
+
+ if (opt > LLCP_MAX_MIUX) {
+ err = -EINVAL;
+@@ -796,6 +796,11 @@ static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ }
+
+ if (sk->sk_type == SOCK_DGRAM) {
++ if (sk->sk_state != LLCP_BOUND) {
++ release_sock(sk);
++ return -ENOTCONN;
++ }
++
+ DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, addr,
+ msg->msg_name);
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 6c9592d051206f..c4d2932c590322 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1208,6 +1208,10 @@ void nci_free_device(struct nci_dev *ndev)
+ {
+ nfc_free_device(ndev->nfc_dev);
+ nci_hci_deallocate(ndev);
++
++ /* drop partial rx data packet if present */
++ if (ndev->rx_data_reassembly)
++ kfree_skb(ndev->rx_data_reassembly);
+ kfree(ndev);
+ }
+ EXPORT_SYMBOL(nci_free_device);
+@@ -1459,6 +1463,19 @@ int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+ ndev->ops->n_core_ops);
+ }
+
++static bool nci_valid_size(struct sk_buff *skb)
++{
++ BUILD_BUG_ON(NCI_CTRL_HDR_SIZE != NCI_DATA_HDR_SIZE);
++ unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
++
++ if (skb->len < hdr_size ||
++ !nci_plen(skb->data) ||
++ skb->len < hdr_size + nci_plen(skb->data)) {
++ return false;
++ }
++ return true;
++}
++
+ /* ---- NCI TX Data worker thread ---- */
+
+ static void nci_tx_work(struct work_struct *work)
+@@ -1512,6 +1529,11 @@ static void nci_rx_work(struct work_struct *work)
+ nfc_send_to_raw_sock(ndev->nfc_dev, skb,
+ RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
+
++ if (!nci_valid_size(skb)) {
++ kfree_skb(skb);
++ continue;
++ }
++
+ /* Process frame */
+ switch (nci_mt(skb->data)) {
+ case NCI_MT_RSP_PKT:
+diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
+index f4a38bd6a7e04f..bfb7758063f315 100644
+--- a/net/nsh/nsh.c
++++ b/net/nsh/nsh.c
+@@ -77,13 +77,15 @@ EXPORT_SYMBOL_GPL(nsh_pop);
+ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+ {
++ unsigned int outer_hlen, mac_len, nsh_len;
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ u16 mac_offset = skb->mac_header;
+- unsigned int nsh_len, mac_len;
+- __be16 proto;
++ __be16 outer_proto, proto;
+
+ skb_reset_network_header(skb);
+
++ outer_proto = skb->protocol;
++ outer_hlen = skb_mac_header_len(skb);
+ mac_len = skb->mac_len;
+
+ if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
+@@ -113,10 +115,10 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ }
+
+ for (skb = segs; skb; skb = skb->next) {
+- skb->protocol = htons(ETH_P_NSH);
+- __skb_push(skb, nsh_len);
+- skb->mac_header = mac_offset;
+- skb->network_header = skb->mac_header + mac_len;
++ skb->protocol = outer_proto;
++ __skb_push(skb, nsh_len + outer_hlen);
++ skb_reset_mac_header(skb);
++ skb_set_network_header(skb, outer_hlen);
+ skb->mac_len = mac_len;
+ }
+
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index fd66014d8a76ad..4f5cbcaa38386f 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -929,6 +929,12 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+ pskb_trim(skb, ovs_mac_header_len(key));
+ }
+
++ /* Need to set the pkt_type to involve the routing layer. The
++ * packet movement through the OVS datapath doesn't generally
++ * use routing, but this is needed for tunnel cases.
++ */
++ skb->pkt_type = PACKET_OUTGOING;
++
+ if (likely(!mru ||
+ (skb->len <= mru + vport->dev->hard_header_len))) {
+ ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 0b9a785dea4595..3b980bf2770bb2 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -168,8 +168,13 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
+ static void ovs_ct_get_labels(const struct nf_conn *ct,
+ struct ovs_key_ct_labels *labels)
+ {
+- struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
++ struct nf_conn_labels *cl = NULL;
+
++ if (ct) {
++ if (ct->master && !nf_ct_is_confirmed(ct))
++ ct = ct->master;
++ cl = nf_ct_labels_find(ct);
++ }
+ if (cl)
+ memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
+ else
+@@ -985,7 +990,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
+ if (err)
+ return err;
+
+- nf_conn_act_ct_ext_add(ct);
++ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+ } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&info->labels.mask)) {
+ err = ovs_ct_set_labels(ct, key, &info->labels.value,
+@@ -1380,8 +1385,9 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+ if (ct_info.timeout[0]) {
+ if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
+ ct_info.timeout))
+- pr_info_ratelimited("Failed to associated timeout "
+- "policy `%s'\n", ct_info.timeout);
++ OVS_NLERR(log,
++ "Failed to associated timeout policy '%s'",
++ ct_info.timeout);
+ else
+ ct_info.nf_ct_timeout = rcu_dereference(
+ nf_ct_timeout_find(ct_info.ct)->timeout);
+@@ -1592,9 +1598,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
+ for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
+ struct hlist_head *head = &info->limits[i];
+ struct ovs_ct_limit *ct_limit;
++ struct hlist_node *next;
+
+- hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
+- lockdep_ovsl_is_held())
++ hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
+ kfree_rcu(ct_limit, rcu);
+ }
+ kfree(info->limits);
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 11c69415c60529..b7232142c13f83 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2707,7 +2707,7 @@ static struct pernet_operations ovs_net_ops = {
+ };
+
+ static const char * const ovs_drop_reasons[] = {
+-#define S(x) (#x),
++#define S(x) [(x) & ~SKB_DROP_REASON_SUBSYS_MASK] = (#x),
+ OVS_DROP_REASONS(S)
+ #undef S
+ };
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
+index 33b21a0c05481e..8a848ce72e2910 100644
+--- a/net/openvswitch/flow.c
++++ b/net/openvswitch/flow.c
+@@ -561,7 +561,6 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+ */
+ key->tp.src = htons(icmp->icmp6_type);
+ key->tp.dst = htons(icmp->icmp6_code);
+- memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
+
+ if (icmp->icmp6_code == 0 &&
+ (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+@@ -570,6 +569,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+ struct nd_msg *nd;
+ int offset;
+
++ memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
++
+ /* In order to process neighbor discovery options, we need the
+ * entire packet.
+ */
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 88965e2068ac65..ebc5728aab4eaf 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -48,6 +48,7 @@ struct ovs_len_tbl {
+
+ #define OVS_ATTR_NESTED -1
+ #define OVS_ATTR_VARIABLE -2
++#define OVS_COPY_ACTIONS_MAX_DEPTH 16
+
+ static bool actions_may_change_flow(const struct nlattr *actions)
+ {
+@@ -2545,13 +2546,15 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci,
+- u32 mpls_label_count, bool log);
++ u32 mpls_label_count, bool log,
++ u32 depth);
+
+ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci,
+- u32 mpls_label_count, bool log, bool last)
++ u32 mpls_label_count, bool log, bool last,
++ u32 depth)
+ {
+ const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+ const struct nlattr *probability, *actions;
+@@ -2602,7 +2605,8 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
+ return err;
+
+ err = __ovs_nla_copy_actions(net, actions, key, sfa,
+- eth_type, vlan_tci, mpls_label_count, log);
++ eth_type, vlan_tci, mpls_label_count, log,
++ depth + 1);
+
+ if (err)
+ return err;
+@@ -2617,7 +2621,8 @@ static int validate_and_copy_dec_ttl(struct net *net,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci,
+- u32 mpls_label_count, bool log)
++ u32 mpls_label_count, bool log,
++ u32 depth)
+ {
+ const struct nlattr *attrs[OVS_DEC_TTL_ATTR_MAX + 1];
+ int start, action_start, err, rem;
+@@ -2660,7 +2665,8 @@ static int validate_and_copy_dec_ttl(struct net *net,
+ return action_start;
+
+ err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type,
+- vlan_tci, mpls_label_count, log);
++ vlan_tci, mpls_label_count, log,
++ depth + 1);
+ if (err)
+ return err;
+
+@@ -2674,7 +2680,8 @@ static int validate_and_copy_clone(struct net *net,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci,
+- u32 mpls_label_count, bool log, bool last)
++ u32 mpls_label_count, bool log, bool last,
++ u32 depth)
+ {
+ int start, err;
+ u32 exec;
+@@ -2694,7 +2701,8 @@ static int validate_and_copy_clone(struct net *net,
+ return err;
+
+ err = __ovs_nla_copy_actions(net, attr, key, sfa,
+- eth_type, vlan_tci, mpls_label_count, log);
++ eth_type, vlan_tci, mpls_label_count, log,
++ depth + 1);
+ if (err)
+ return err;
+
+@@ -3063,7 +3071,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci,
+ u32 mpls_label_count,
+- bool log, bool last)
++ bool log, bool last, u32 depth)
+ {
+ const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
+ struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
+@@ -3111,7 +3119,8 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+ return nested_acts_start;
+
+ err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
+- eth_type, vlan_tci, mpls_label_count, log);
++ eth_type, vlan_tci, mpls_label_count, log,
++ depth + 1);
+
+ if (err)
+ return err;
+@@ -3124,7 +3133,8 @@ static int validate_and_copy_check_pkt_len(struct net *net,
+ return nested_acts_start;
+
+ err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
+- eth_type, vlan_tci, mpls_label_count, log);
++ eth_type, vlan_tci, mpls_label_count, log,
++ depth + 1);
+
+ if (err)
+ return err;
+@@ -3152,12 +3162,16 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci,
+- u32 mpls_label_count, bool log)
++ u32 mpls_label_count, bool log,
++ u32 depth)
+ {
+ u8 mac_proto = ovs_key_mac_proto(key);
+ const struct nlattr *a;
+ int rem, err;
+
++ if (depth > OVS_COPY_ACTIONS_MAX_DEPTH)
++ return -EOVERFLOW;
++
+ nla_for_each_nested(a, attr, rem) {
+ /* Expected argument lengths, (u32)-1 for variable length. */
+ static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+@@ -3355,7 +3369,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ err = validate_and_copy_sample(net, a, key, sfa,
+ eth_type, vlan_tci,
+ mpls_label_count,
+- log, last);
++ log, last, depth);
+ if (err)
+ return err;
+ skip_copy = true;
+@@ -3426,7 +3440,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ err = validate_and_copy_clone(net, a, key, sfa,
+ eth_type, vlan_tci,
+ mpls_label_count,
+- log, last);
++ log, last, depth);
+ if (err)
+ return err;
+ skip_copy = true;
+@@ -3440,7 +3454,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ eth_type,
+ vlan_tci,
+ mpls_label_count,
+- log, last);
++ log, last,
++ depth);
+ if (err)
+ return err;
+ skip_copy = true;
+@@ -3450,7 +3465,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ case OVS_ACTION_ATTR_DEC_TTL:
+ err = validate_and_copy_dec_ttl(net, a, key, sfa,
+ eth_type, vlan_tci,
+- mpls_label_count, log);
++ mpls_label_count, log,
++ depth);
+ if (err)
+ return err;
+ skip_copy = true;
+@@ -3495,7 +3511,8 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+
+ (*sfa)->orig_len = nla_len(attr);
+ err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
+- key->eth.vlan.tci, mpls_label_count, log);
++ key->eth.vlan.tci, mpls_label_count, log,
++ 0);
+ if (err)
+ ovs_nla_free_flow_actions(*sfa);
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a84e00b5904be0..3e5703537e4eb8 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -538,6 +538,61 @@ static void *packet_current_frame(struct packet_sock *po,
+ return packet_lookup_frame(po, rb, rb->head, status);
+ }
+
++static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
++{
++ u8 *skb_orig_data = skb->data;
++ int skb_orig_len = skb->len;
++ struct vlan_hdr vhdr, *vh;
++ unsigned int header_len;
++
++ if (!dev)
++ return 0;
++
++ /* In the SOCK_DGRAM scenario, skb data starts at the network
++ * protocol, which is after the VLAN headers. The outer VLAN
++ * header is at the hard_header_len offset in non-variable
++ * length link layer headers. If it's a VLAN device, the
++ * min_header_len should be used to exclude the VLAN header
++ * size.
++ */
++ if (dev->min_header_len == dev->hard_header_len)
++ header_len = dev->hard_header_len;
++ else if (is_vlan_dev(dev))
++ header_len = dev->min_header_len;
++ else
++ return 0;
++
++ skb_push(skb, skb->data - skb_mac_header(skb));
++ vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
++ if (skb_orig_data != skb->data) {
++ skb->data = skb_orig_data;
++ skb->len = skb_orig_len;
++ }
++ if (unlikely(!vh))
++ return 0;
++
++ return ntohs(vh->h_vlan_TCI);
++}
++
++static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
++{
++ __be16 proto = skb->protocol;
++
++ if (unlikely(eth_type_vlan(proto))) {
++ u8 *skb_orig_data = skb->data;
++ int skb_orig_len = skb->len;
++
++ skb_push(skb, skb->data - skb_mac_header(skb));
++ proto = __vlan_get_protocol(skb, proto, NULL);
++ if (skb_orig_data != skb->data) {
++ skb->data = skb_orig_data;
++ skb->len = skb_orig_len;
++ }
++ }
++
++ return proto;
++}
++
+ static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
+ {
+ del_timer_sync(&pkc->retire_blk_timer);
+@@ -1007,10 +1062,16 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
+ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+ {
++ struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc);
++
+ if (skb_vlan_tag_present(pkc->skb)) {
+ ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
+ ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
+ ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
++ } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) {
++ ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev);
++ ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol);
++ ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else {
+ ppd->hv1.tp_vlan_tci = 0;
+ ppd->hv1.tp_vlan_tpid = 0;
+@@ -2431,6 +2492,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
+ h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
+ status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
++ } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
++ h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev);
++ h.h2->tp_vlan_tpid = ntohs(skb->protocol);
++ status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
+ } else {
+ h.h2->tp_vlan_tci = 0;
+ h.h2->tp_vlan_tpid = 0;
+@@ -2460,7 +2525,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
+ sll->sll_family = AF_PACKET;
+ sll->sll_hatype = dev->type;
+- sll->sll_protocol = skb->protocol;
++ sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ?
++ vlan_get_protocol_dgram(skb) : skb->protocol;
+ sll->sll_pkttype = skb->pkt_type;
+ if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
+ sll->sll_ifindex = orig_dev->ifindex;
+@@ -2528,8 +2594,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+ ts = __packet_set_timestamp(po, ph, skb);
+ __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
+
+- if (!packet_read_pending(&po->tx_ring))
+- complete(&po->skb_completion);
++ complete(&po->skb_completion);
+ }
+
+ sock_wfree(skb);
+@@ -3489,7 +3554,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ /* Original length was stored in sockaddr_ll fields */
+ origlen = PACKET_SKB_CB(skb)->sa.origlen;
+ sll->sll_family = AF_PACKET;
+- sll->sll_protocol = skb->protocol;
++ sll->sll_protocol = (sock->type == SOCK_DGRAM) ?
++ vlan_get_protocol_dgram(skb) : skb->protocol;
+ }
+
+ sock_recv_cmsgs(msg, sk, skb);
+@@ -3546,6 +3612,21 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ aux.tp_vlan_tci = skb_vlan_tag_get(skb);
+ aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
+ aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
++ } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
++ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
++ struct net_device *dev;
++
++ rcu_read_lock();
++ dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex);
++ if (dev) {
++ aux.tp_vlan_tci = vlan_get_tci(skb, dev);
++ aux.tp_vlan_tpid = ntohs(skb->protocol);
++ aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
++ } else {
++ aux.tp_vlan_tci = 0;
++ aux.tp_vlan_tpid = 0;
++ }
++ rcu_read_unlock();
+ } else {
+ aux.tp_vlan_tci = 0;
+ aux.tp_vlan_tpid = 0;
+@@ -3806,28 +3887,30 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ case PACKET_TX_RING:
+ {
+ union tpacket_req_u req_u;
+- int len;
+
++ ret = -EINVAL;
+ lock_sock(sk);
+ switch (po->tp_version) {
+ case TPACKET_V1:
+ case TPACKET_V2:
+- len = sizeof(req_u.req);
++ if (optlen < sizeof(req_u.req))
++ break;
++ ret = copy_from_sockptr(&req_u.req, optval,
++ sizeof(req_u.req)) ?
++ -EINVAL : 0;
+ break;
+ case TPACKET_V3:
+ default:
+- len = sizeof(req_u.req3);
++ if (optlen < sizeof(req_u.req3))
++ break;
++ ret = copy_from_sockptr(&req_u.req3, optval,
++ sizeof(req_u.req3)) ?
++ -EINVAL : 0;
+ break;
+ }
+- if (optlen < len) {
+- ret = -EINVAL;
+- } else {
+- if (copy_from_sockptr(&req_u.req, optval, len))
+- ret = -EFAULT;
+- else
+- ret = packet_set_ring(sk, &req_u, 0,
+- optname == PACKET_TX_RING);
+- }
++ if (!ret)
++ ret = packet_set_ring(sk, &req_u, 0,
++ optname == PACKET_TX_RING);
+ release_sock(sk);
+ return ret;
+ }
+@@ -4004,7 +4087,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+- po->prot_hook.ignore_outgoing = !!val;
++ WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
+ return 0;
+ }
+ case PACKET_TX_HAS_OFF:
+@@ -4135,7 +4218,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ 0);
+ break;
+ case PACKET_IGNORE_OUTGOING:
+- val = po->prot_hook.ignore_outgoing;
++ val = READ_ONCE(po->prot_hook.ignore_outgoing);
+ break;
+ case PACKET_ROLLOVER_STATS:
+ if (!po->rollover)
+@@ -4300,7 +4383,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
+ struct sock *sk = sock->sk;
+
+ if (sk)
+- atomic_inc(&pkt_sk(sk)->mapped);
++ atomic_long_inc(&pkt_sk(sk)->mapped);
+ }
+
+ static void packet_mm_close(struct vm_area_struct *vma)
+@@ -4310,7 +4393,7 @@ static void packet_mm_close(struct vm_area_struct *vma)
+ struct sock *sk = sock->sk;
+
+ if (sk)
+- atomic_dec(&pkt_sk(sk)->mapped);
++ atomic_long_dec(&pkt_sk(sk)->mapped);
+ }
+
+ static const struct vm_operations_struct packet_mmap_ops = {
+@@ -4405,7 +4488,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+
+ err = -EBUSY;
+ if (!closing) {
+- if (atomic_read(&po->mapped))
++ if (atomic_long_read(&po->mapped))
+ goto out;
+ if (packet_read_pending(rb))
+ goto out;
+@@ -4508,7 +4591,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+
+ err = -EBUSY;
+ mutex_lock(&po->pg_vec_lock);
+- if (closing || atomic_read(&po->mapped) == 0) {
++ if (closing || atomic_long_read(&po->mapped) == 0) {
+ err = 0;
+ spin_lock_bh(&rb_queue->lock);
+ swap(rb->pg_vec, pg_vec);
+@@ -4526,9 +4609,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ po->prot_hook.func = (po->rx_ring.pg_vec) ?
+ tpacket_rcv : packet_rcv;
+ skb_queue_purge(rb_queue);
+- if (atomic_read(&po->mapped))
+- pr_err("packet_mmap: vma is busy: %d\n",
+- atomic_read(&po->mapped));
++ if (atomic_long_read(&po->mapped))
++ pr_err("packet_mmap: vma is busy: %ld\n",
++ atomic_long_read(&po->mapped));
+ }
+ mutex_unlock(&po->pg_vec_lock);
+
+@@ -4606,7 +4689,7 @@ static int packet_mmap(struct file *file, struct socket *sock,
+ }
+ }
+
+- atomic_inc(&po->mapped);
++ atomic_long_inc(&po->mapped);
+ vma->vm_ops = &packet_mmap_ops;
+ err = 0;
+
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 63f4865202c139..11ba8a78676abb 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -122,7 +122,7 @@ struct packet_sock {
+ __be16 num;
+ struct packet_rollover *rollover;
+ struct packet_mclist *mclist;
+- atomic_t mapped;
++ atomic_long_t mapped;
+ enum tpacket_versions tp_version;
+ unsigned int tp_hdrlen;
+ unsigned int tp_reserve;
+diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
+index 3aa50dc7535b77..976fe250b50955 100644
+--- a/net/phonet/datagram.c
++++ b/net/phonet/datagram.c
+@@ -34,10 +34,10 @@ static int pn_ioctl(struct sock *sk, int cmd, int *karg)
+
+ switch (cmd) {
+ case SIOCINQ:
+- lock_sock(sk);
++ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ *karg = skb ? skb->len : 0;
+- release_sock(sk);
++ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ return 0;
+
+ case SIOCPNADDRESOURCE:
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index faba31f2eff290..3dd5f52bc1b58e 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -917,6 +917,37 @@ static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+ return 0;
+ }
+
++static unsigned int pep_first_packet_length(struct sock *sk)
++{
++ struct pep_sock *pn = pep_sk(sk);
++ struct sk_buff_head *q;
++ struct sk_buff *skb;
++ unsigned int len = 0;
++ bool found = false;
++
++ if (sock_flag(sk, SOCK_URGINLINE)) {
++ q = &pn->ctrlreq_queue;
++ spin_lock_bh(&q->lock);
++ skb = skb_peek(q);
++ if (skb) {
++ len = skb->len;
++ found = true;
++ }
++ spin_unlock_bh(&q->lock);
++ }
++
++ if (likely(!found)) {
++ q = &sk->sk_receive_queue;
++ spin_lock_bh(&q->lock);
++ skb = skb_peek(q);
++ if (skb)
++ len = skb->len;
++ spin_unlock_bh(&q->lock);
++ }
++
++ return len;
++}
++
+ static int pep_ioctl(struct sock *sk, int cmd, int *karg)
+ {
+ struct pep_sock *pn = pep_sk(sk);
+@@ -929,15 +960,7 @@ static int pep_ioctl(struct sock *sk, int cmd, int *karg)
+ break;
+ }
+
+- lock_sock(sk);
+- if (sock_flag(sk, SOCK_URGINLINE) &&
+- !skb_queue_empty(&pn->ctrlreq_queue))
+- *karg = skb_peek(&pn->ctrlreq_queue)->len;
+- else if (!skb_queue_empty(&sk->sk_receive_queue))
+- *karg = skb_peek(&sk->sk_receive_queue)->len;
+- else
+- *karg = 0;
+- release_sock(sk);
++ *karg = pep_first_packet_length(sk);
+ ret = 0;
+ break;
+
+diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
+index 59aebe29689077..894e5c72d6bfff 100644
+--- a/net/phonet/pn_netlink.c
++++ b/net/phonet/pn_netlink.c
+@@ -178,7 +178,7 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
+ rtm->rtm_type = RTN_UNICAST;
+ rtm->rtm_flags = 0;
+ if (nla_put_u8(skb, RTA_DST, dst) ||
+- nla_put_u32(skb, RTA_OIF, dev->ifindex))
++ nla_put_u32(skb, RTA_OIF, READ_ONCE(dev->ifindex)))
+ goto nla_put_failure;
+ nlmsg_end(skb, nlh);
+ return 0;
+@@ -193,7 +193,7 @@ void rtm_phonet_notify(int event, struct net_device *dev, u8 dst)
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+- skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
++ skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct rtmsg)) +
+ nla_total_size(1) + nla_total_size(4), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+@@ -263,6 +263,7 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ struct net *net = sock_net(skb->sk);
++ int err = 0;
+ u8 addr;
+
+ rcu_read_lock();
+@@ -272,35 +273,29 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+ if (!dev)
+ continue;
+
+- if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
+- cb->nlh->nlmsg_seq, RTM_NEWROUTE) < 0)
+- goto out;
++ err = fill_route(skb, dev, addr << 2,
++ NETLINK_CB(cb->skb).portid,
++ cb->nlh->nlmsg_seq, RTM_NEWROUTE);
++ if (err < 0)
++ break;
+ }
+-
+-out:
+ rcu_read_unlock();
+ cb->args[0] = addr;
+
+- return skb->len;
++ return err;
+ }
+
++static const struct rtnl_msg_handler phonet_rtnl_msg_handlers[] __initdata_or_module = {
++ {THIS_MODULE, PF_PHONET, RTM_NEWADDR, addr_doit, NULL, 0},
++ {THIS_MODULE, PF_PHONET, RTM_DELADDR, addr_doit, NULL, 0},
++ {THIS_MODULE, PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, 0},
++ {THIS_MODULE, PF_PHONET, RTM_NEWROUTE, route_doit, NULL, 0},
++ {THIS_MODULE, PF_PHONET, RTM_DELROUTE, route_doit, NULL, 0},
++ {THIS_MODULE, PF_PHONET, RTM_GETROUTE, NULL, route_dumpit,
++ RTNL_FLAG_DUMP_UNLOCKED},
++};
++
+ int __init phonet_netlink_register(void)
+ {
+- int err = rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWADDR,
+- addr_doit, NULL, 0);
+- if (err)
+- return err;
+-
+- /* Further rtnl_register_module() cannot fail */
+- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELADDR,
+- addr_doit, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETADDR,
+- NULL, getaddr_dumpit, 0);
+- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWROUTE,
+- route_doit, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELROUTE,
+- route_doit, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETROUTE,
+- NULL, route_dumpit, 0);
+- return 0;
++ return rtnl_register_many(phonet_rtnl_msg_handlers);
+ }
+diff --git a/net/psample/psample.c b/net/psample/psample.c
+index 81a794e36f5358..c34e902855dbef 100644
+--- a/net/psample/psample.c
++++ b/net/psample/psample.c
+@@ -31,7 +31,8 @@ enum psample_nl_multicast_groups {
+
+ static const struct genl_multicast_group psample_nl_mcgrps[] = {
+ [PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
+- [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
++ [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME,
++ .flags = GENL_UNS_ADMIN_PERM },
+ };
+
+ static struct genl_family psample_nl_family __ro_after_init;
+diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
+index 41ece61eb57ab7..00c51cf693f3d0 100644
+--- a/net/qrtr/af_qrtr.c
++++ b/net/qrtr/af_qrtr.c
+@@ -884,7 +884,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
+
+ mutex_lock(&qrtr_node_lock);
+ list_for_each_entry(node, &qrtr_all_nodes, item) {
+- skbn = skb_clone(skb, GFP_KERNEL);
++ skbn = pskb_copy(skb, GFP_KERNEL);
+ if (!skbn)
+ break;
+ skb_set_owner_w(skbn, skb->sk);
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index b1db0b519179b6..654a3cc0d3479e 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -512,7 +512,9 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
+ if (!node)
+ return -ENOENT;
+
+- return server_del(node, port, true);
++ server_del(node, port, true);
++
++ return 0;
+ }
+
+ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
+@@ -723,6 +725,24 @@ int qrtr_ns_init(void)
+ if (ret < 0)
+ goto err_wq;
+
++ /* As the qrtr ns socket owner and creator is the same module, we have
++ * to decrease the qrtr module reference count to guarantee that it
++ * remains zero after the ns socket is created, otherwise, executing
++ * "rmmod" command is unable to make the qrtr module deleted after the
++ * qrtr module is inserted successfully.
++ *
++ * However, the reference count is increased twice in
++ * sock_create_kern(): one is to increase the reference count of owner
++ * of qrtr socket's proto_ops struct; another is to increment the
++ * reference count of owner of qrtr proto struct. Therefore, we must
++ * decrement the module reference count twice to ensure that it keeps
++ * zero after server's listening socket is created. Of course, we
++ * must bump the module reference count twice as well before the socket
++ * is closed.
++ */
++ module_put(qrtr_ns.sock->ops->owner);
++ module_put(qrtr_ns.sock->sk->sk_prot_creator->owner);
++
+ return 0;
+
+ err_wq:
+@@ -737,6 +757,15 @@ void qrtr_ns_remove(void)
+ {
+ cancel_work_sync(&qrtr_ns.work);
+ destroy_workqueue(qrtr_ns.workqueue);
++
++ /* sock_release() expects the two references that were put during
++ * qrtr_ns_init(). This function is only called during module remove,
++ * so try_stop_module() has already set the refcnt to 0. Use
++ * __module_get() instead of try_module_get() to successfully take two
++ * references.
++ */
++ __module_get(qrtr_ns.sock->ops->owner);
++ __module_get(qrtr_ns.sock->sk->sk_prot_creator->owner);
+ sock_release(qrtr_ns.sock);
+ }
+ EXPORT_SYMBOL_GPL(qrtr_ns_remove);
+diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
+index 01c4cdfef45df3..8435a20968ef51 100644
+--- a/net/rds/af_rds.c
++++ b/net/rds/af_rds.c
+@@ -419,7 +419,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
+
+ rs->rs_rx_traces = trace.rx_traces;
+ for (i = 0; i < rs->rs_rx_traces; i++) {
+- if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
++ if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) {
+ rs->rs_rx_traces = 0;
+ return -EFAULT;
+ }
+diff --git a/net/rds/rdma.c b/net/rds/rdma.c
+index fba82d36593add..00dbcd4d28e680 100644
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -301,6 +301,9 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
+ kfree(sg);
+ }
+ ret = PTR_ERR(trans_private);
++ /* Trigger connection so that its ready for the next retry */
++ if (ret == -ENODEV && cp)
++ rds_conn_connect_if_down(cp->cp_conn);
+ goto out;
+ }
+
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index c71b923764fd7c..5627f80013f8b1 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -425,6 +425,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
+ struct sock *sk = rds_rs_to_sk(rs);
+ int ret = 0;
+ unsigned long flags;
++ struct rds_incoming *to_drop = NULL;
+
+ write_lock_irqsave(&rs->rs_recv_lock, flags);
+ if (!list_empty(&inc->i_item)) {
+@@ -435,11 +436,14 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
+ -be32_to_cpu(inc->i_hdr.h_len),
+ inc->i_hdr.h_dport);
+ list_del_init(&inc->i_item);
+- rds_inc_put(inc);
++ to_drop = inc;
+ }
+ }
+ write_unlock_irqrestore(&rs->rs_recv_lock, flags);
+
++ if (to_drop)
++ rds_inc_put(to_drop);
++
+ rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
+ return ret;
+ }
+@@ -758,16 +762,21 @@ void rds_clear_recv_queue(struct rds_sock *rs)
+ struct sock *sk = rds_rs_to_sk(rs);
+ struct rds_incoming *inc, *tmp;
+ unsigned long flags;
++ LIST_HEAD(to_drop);
+
+ write_lock_irqsave(&rs->rs_recv_lock, flags);
+ list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
+ rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
+ -be32_to_cpu(inc->i_hdr.h_len),
+ inc->i_hdr.h_dport);
++ list_move(&inc->i_item, &to_drop);
++ }
++ write_unlock_irqrestore(&rs->rs_recv_lock, flags);
++
++ list_for_each_entry_safe(inc, tmp, &to_drop, i_item) {
+ list_del_init(&inc->i_item);
+ rds_inc_put(inc);
+ }
+- write_unlock_irqrestore(&rs->rs_recv_lock, flags);
+ }
+
+ /*
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 5e57a1581dc605..09a28011065493 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -103,13 +103,12 @@ EXPORT_SYMBOL_GPL(rds_send_path_reset);
+
+ static int acquire_in_xmit(struct rds_conn_path *cp)
+ {
+- return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
++ return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0;
+ }
+
+ static void release_in_xmit(struct rds_conn_path *cp)
+ {
+- clear_bit(RDS_IN_XMIT, &cp->cp_flags);
+- smp_mb__after_atomic();
++ clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags);
+ /*
+ * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
+ * hot path and finding waiters is very rare. We don't want to walk
+@@ -1313,12 +1312,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+
+ /* Parse any control messages the user may have included. */
+ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
+- if (ret) {
+- /* Trigger connection so that its ready for the next retry */
+- if (ret == -EAGAIN)
+- rds_conn_connect_if_down(conn);
++ if (ret)
+ goto out;
+- }
+
+ if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
+ printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index 5a81505fba9ac4..4e32d659524e0d 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -126,6 +126,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ ret = gpiod_direction_output(rfkill->reset_gpio, true);
++ if (ret)
++ return ret;
++
++ ret = gpiod_direction_output(rfkill->shutdown_gpio, true);
++ if (ret)
++ return ret;
++
+ rfkill->rfkill_dev = rfkill_alloc(rfkill->name, &pdev->dev,
+ rfkill->type, &rfkill_gpio_ops,
+ rfkill);
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 49dafe9ac72f01..42e8b9e37516b2 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -182,21 +182,47 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
+ */
+ static void rose_kill_by_device(struct net_device *dev)
+ {
+- struct sock *s;
++ struct sock *sk, *array[16];
++ struct rose_sock *rose;
++ bool rescan;
++ int i, cnt;
+
++start:
++ rescan = false;
++ cnt = 0;
+ spin_lock_bh(&rose_list_lock);
+- sk_for_each(s, &rose_list) {
+- struct rose_sock *rose = rose_sk(s);
++ sk_for_each(sk, &rose_list) {
++ rose = rose_sk(sk);
++ if (rose->device == dev) {
++ if (cnt == ARRAY_SIZE(array)) {
++ rescan = true;
++ break;
++ }
++ sock_hold(sk);
++ array[cnt++] = sk;
++ }
++ }
++ spin_unlock_bh(&rose_list_lock);
+
++ for (i = 0; i < cnt; i++) {
++ sk = array[cnt];
++ rose = rose_sk(sk);
++ lock_sock(sk);
++ spin_lock_bh(&rose_list_lock);
+ if (rose->device == dev) {
+- rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
++ rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
+ if (rose->neighbour)
+ rose->neighbour->use--;
+ netdev_put(rose->device, &rose->dev_tracker);
+ rose->device = NULL;
+ }
++ spin_unlock_bh(&rose_list_lock);
++ release_sock(sk);
++ sock_put(sk);
++ cond_resched();
+ }
+- spin_unlock_bh(&rose_list_lock);
++ if (rescan)
++ goto start;
+ }
+
+ /*
+@@ -656,7 +682,10 @@ static int rose_release(struct socket *sock)
+ break;
+ }
+
++ spin_lock_bh(&rose_list_lock);
+ netdev_put(rose->device, &rose->dev_tracker);
++ rose->device = NULL;
++ spin_unlock_bh(&rose_list_lock);
+ sock->sk = NULL;
+ release_sock(sk);
+ sock_put(sk);
+@@ -1315,9 +1344,11 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ case TIOCINQ: {
+ struct sk_buff *skb;
+ long amount = 0L;
+- /* These two are safe on a single CPU system as only user tasks fiddle here */
++
++ spin_lock_irq(&sk->sk_receive_queue.lock);
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
+ amount = skb->len;
++ spin_unlock_irq(&sk->sk_receive_queue.lock);
+ return put_user(amount, (unsigned int __user *) argp);
+ }
+
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index e8e14c6f904d9e..66ad7dc10864e0 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -198,11 +198,19 @@ struct rxrpc_host_header {
+ */
+ struct rxrpc_skb_priv {
+ struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
+- u16 offset; /* Offset of data */
+- u16 len; /* Length of data */
+- u8 flags;
++ union {
++ struct {
++ u16 offset; /* Offset of data */
++ u16 len; /* Length of data */
++ u8 flags;
+ #define RXRPC_RX_VERIFIED 0x01
+-
++ };
++ struct {
++ rxrpc_seq_t first_ack; /* First packet in acks table */
++ u8 nr_acks; /* Number of acks+nacks */
++ u8 nr_nacks; /* Number of nacks */
++ };
++ };
+ struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
+ };
+
+@@ -506,7 +514,7 @@ struct rxrpc_connection {
+ enum rxrpc_call_completion completion; /* Completion condition */
+ s32 abort_code; /* Abort code of connection abort */
+ int debug_id; /* debug ID for printks */
+- atomic_t serial; /* packet serial number counter */
++ rxrpc_serial_t tx_serial; /* Outgoing packet serial number counter */
+ unsigned int hi_serial; /* highest serial number received */
+ u32 service_id; /* Service ID, possibly upgraded */
+ u32 security_level; /* Security level selected */
+@@ -680,7 +688,7 @@ struct rxrpc_call {
+ * packets) rather than bytes.
+ */
+ #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
+-#define RXRPC_MIN_CWND (RXRPC_TX_SMSS > 2190 ? 2 : RXRPC_TX_SMSS > 1095 ? 3 : 4)
++#define RXRPC_MIN_CWND 4
+ u8 cong_cwnd; /* Congestion window size */
+ u8 cong_extra; /* Extra to send for congestion management */
+ u8 cong_ssthresh; /* Slow-start threshold */
+@@ -688,11 +696,11 @@ struct rxrpc_call {
+ u8 cong_dup_acks; /* Count of ACKs showing missing packets */
+ u8 cong_cumul_acks; /* Cumulative ACK count */
+ ktime_t cong_tstamp; /* Last time cwnd was changed */
++ struct sk_buff *cong_last_nack; /* Last ACK with nacks received */
+
+ /* Receive-phase ACK management (ACKs we send). */
+ u8 ackr_reason; /* reason to ACK */
+ u16 ackr_sack_base; /* Starting slot in SACK table ring */
+- rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
+ rxrpc_seq_t ackr_window; /* Base of SACK window */
+ rxrpc_seq_t ackr_wtop; /* Base of SACK window */
+ unsigned int ackr_nr_unacked; /* Number of unacked packets */
+@@ -726,7 +734,8 @@ struct rxrpc_call {
+ struct rxrpc_ack_summary {
+ u16 nr_acks; /* Number of ACKs in packet */
+ u16 nr_new_acks; /* Number of new ACKs in packet */
+- u16 nr_rot_new_acks; /* Number of rotated new ACKs */
++ u16 nr_new_nacks; /* Number of new nacks in packet */
++ u16 nr_retained_nacks; /* Number of nacks retained between ACKs */
+ u8 ack_reason;
+ bool saw_nacks; /* Saw NACKs in packet */
+ bool new_low_nack; /* T if new low NACK found */
+@@ -818,6 +827,20 @@ static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
+
+ #include <trace/events/rxrpc.h>
+
++/*
++ * Allocate the next serial number on a connection. 0 must be skipped.
++ */
++static inline rxrpc_serial_t rxrpc_get_next_serial(struct rxrpc_connection *conn)
++{
++ rxrpc_serial_t serial;
++
++ serial = conn->tx_serial;
++ if (serial == 0)
++ serial = 1;
++ conn->tx_serial = serial + 1;
++ return serial;
++}
++
+ /*
+ * af_rxrpc.c
+ */
+@@ -1043,7 +1066,7 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+ int rxrpc_io_thread(void *data);
+ static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
+ {
+- wake_up_process(local->io_thread);
++ wake_up_process(READ_ONCE(local->io_thread));
+ }
+
+ static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
+@@ -1076,6 +1099,7 @@ void rxrpc_send_version_request(struct rxrpc_local *local,
+ /*
+ * local_object.c
+ */
++void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set);
+ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
+ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *, enum rxrpc_local_trace);
+ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *, enum rxrpc_local_trace);
+diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
+index e363f21a20141b..0f78544d043be9 100644
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -43,8 +43,6 @@ void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
+ unsigned long expiry = rxrpc_soft_ack_delay;
+ unsigned long now = jiffies, ack_at;
+
+- call->ackr_serial = serial;
+-
+ if (rxrpc_soft_ack_delay < expiry)
+ expiry = rxrpc_soft_ack_delay;
+ if (call->peer->srtt_us != 0)
+@@ -114,6 +112,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
+ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
+ {
+ struct rxrpc_ackpacket *ack = NULL;
++ struct rxrpc_skb_priv *sp;
+ struct rxrpc_txbuf *txb;
+ unsigned long resend_at;
+ rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
+@@ -141,14 +140,15 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
+ * explicitly NAK'd packets.
+ */
+ if (ack_skb) {
++ sp = rxrpc_skb(ack_skb);
+ ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
+
+- for (i = 0; i < ack->nAcks; i++) {
++ for (i = 0; i < sp->nr_acks; i++) {
+ rxrpc_seq_t seq;
+
+ if (ack->acks[i] & 1)
+ continue;
+- seq = ntohl(ack->firstPacket) + i;
++ seq = sp->first_ack + i;
+ if (after(txb->seq, transmitted))
+ break;
+ if (after(txb->seq, seq))
+@@ -373,7 +373,6 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
+ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
+ {
+ unsigned long now, next, t;
+- rxrpc_serial_t ackr_serial;
+ bool resend = false, expired = false;
+ s32 abort_code;
+
+@@ -423,8 +422,7 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
+ cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
+- ackr_serial = xchg(&call->ackr_serial, 0);
+- rxrpc_send_ACK(call, RXRPC_ACK_DELAY, ackr_serial,
++ rxrpc_send_ACK(call, RXRPC_ACK_DELAY, 0,
+ rxrpc_propose_ack_ping_for_lost_ack);
+ }
+
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index 773eecd1e9794d..29385908099efc 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -175,12 +175,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
+ call->rx_winsize = rxrpc_rx_window_size;
+ call->tx_winsize = 16;
+
+- if (RXRPC_TX_SMSS > 2190)
+- call->cong_cwnd = 2;
+- else if (RXRPC_TX_SMSS > 1095)
+- call->cong_cwnd = 3;
+- else
+- call->cong_cwnd = 4;
++ call->cong_cwnd = RXRPC_MIN_CWND;
+ call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
+
+ call->rxnet = rxnet;
+@@ -545,8 +540,8 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
+ */
+ static void rxrpc_cleanup_ring(struct rxrpc_call *call)
+ {
+- skb_queue_purge(&call->recvmsg_queue);
+- skb_queue_purge(&call->rx_oos_queue);
++ rxrpc_purge_queue(&call->recvmsg_queue);
++ rxrpc_purge_queue(&call->rx_oos_queue);
+ }
+
+ /*
+@@ -685,6 +680,7 @@ static void rxrpc_destroy_call(struct work_struct *work)
+
+ del_timer_sync(&call->timer);
+
++ rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
+ rxrpc_cleanup_ring(call);
+ while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
+ struct rxrpc_txbuf, call_link))) {
+diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
+index 981ca5b98bcb90..1d95f8bc769fa2 100644
+--- a/net/rxrpc/conn_client.c
++++ b/net/rxrpc/conn_client.c
+@@ -73,6 +73,7 @@ static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
+ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
+ gfp_t gfp)
+ {
++ static atomic_t rxrpc_bundle_id;
+ struct rxrpc_bundle *bundle;
+
+ bundle = kzalloc(sizeof(*bundle), gfp);
+@@ -85,6 +86,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
+ bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
+ bundle->service_id = call->dest_srx.srx_service;
+ bundle->security_level = call->security_level;
++ bundle->debug_id = atomic_inc_return(&rxrpc_bundle_id);
+ refcount_set(&bundle->ref, 1);
+ atomic_set(&bundle->active, 1);
+ INIT_LIST_HEAD(&bundle->waiting_calls);
+@@ -105,7 +107,8 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle,
+
+ static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
+ {
+- trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_free);
++ trace_rxrpc_bundle(bundle->debug_id, refcount_read(&bundle->ref),
++ rxrpc_bundle_free);
+ rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
+ key_put(bundle->key);
+ kfree(bundle);
+@@ -239,7 +242,6 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
+ */
+ int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
+ {
+- static atomic_t rxrpc_bundle_id;
+ struct rxrpc_bundle *bundle, *candidate;
+ struct rxrpc_local *local = call->local;
+ struct rb_node *p, **pp, *parent;
+@@ -306,7 +308,6 @@ int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
+ }
+
+ _debug("new bundle");
+- candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
+ rb_link_node(&candidate->local_node, parent, pp);
+ rb_insert_color(&candidate->local_node, &local->client_bundles);
+ call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index 95f4bc206b3dc9..598b4ee389fc1e 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -88,13 +88,21 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+ struct rxrpc_ackpacket ack;
+ };
+ } __attribute__((packed)) pkt;
+- struct rxrpc_ackinfo ack_info;
++ struct rxrpc_acktrailer trailer;
+ size_t len;
+ int ret, ioc;
+ u32 serial, mtu, call_id, padding;
+
+ _enter("%d", conn->debug_id);
+
++ if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
++ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
++ &pkt.ack, sizeof(pkt.ack)) < 0)
++ return;
++ if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE)
++ return;
++ }
++
+ chan = &conn->channels[channel];
+
+ /* If the last call got moved on whilst we were waiting to run, just
+@@ -114,10 +122,10 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+ iov[0].iov_len = sizeof(pkt.whdr);
+ iov[1].iov_base = &padding;
+ iov[1].iov_len = 3;
+- iov[2].iov_base = &ack_info;
+- iov[2].iov_len = sizeof(ack_info);
++ iov[2].iov_base = &trailer;
++ iov[2].iov_len = sizeof(trailer);
+
+- serial = atomic_inc_return(&conn->serial);
++ serial = rxrpc_get_next_serial(conn);
+
+ pkt.whdr.epoch = htonl(conn->proto.epoch);
+ pkt.whdr.cid = htonl(conn->proto.cid | channel);
+@@ -150,14 +158,14 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+ pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
+ pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
+ pkt.ack.nAcks = 0;
+- ack_info.rxMTU = htonl(rxrpc_rx_mtu);
+- ack_info.maxMTU = htonl(mtu);
+- ack_info.rwind = htonl(rxrpc_rx_window_size);
+- ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
++ trailer.maxMTU = htonl(rxrpc_rx_mtu);
++ trailer.ifMTU = htonl(mtu);
++ trailer.rwind = htonl(rxrpc_rx_window_size);
++ trailer.jumbo_max = htonl(rxrpc_rx_jumbo_max);
+ pkt.whdr.flags |= RXRPC_SLOW_START_OK;
+ padding = 0;
+ iov[0].iov_len += sizeof(pkt.ack);
+- len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
++ len += sizeof(pkt.ack) + 3 + sizeof(trailer);
+ ioc = 3;
+
+ trace_rxrpc_tx_ack(chan->call_debug_id, serial,
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index ac85d4644a3c3a..7aa58129ae4550 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -118,18 +118,13 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
+ switch (srx->transport.family) {
+ case AF_INET:
+ if (peer->srx.transport.sin.sin_port !=
+- srx->transport.sin.sin_port ||
+- peer->srx.transport.sin.sin_addr.s_addr !=
+- srx->transport.sin.sin_addr.s_addr)
++ srx->transport.sin.sin_port)
+ goto not_found;
+ break;
+ #ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ if (peer->srx.transport.sin6.sin6_port !=
+- srx->transport.sin6.sin6_port ||
+- memcmp(&peer->srx.transport.sin6.sin6_addr,
+- &srx->transport.sin6.sin6_addr,
+- sizeof(struct in6_addr)) != 0)
++ srx->transport.sin6.sin6_port)
+ goto not_found;
+ break;
+ #endif
+@@ -212,7 +207,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
+ conn->idle_timestamp = jiffies;
+ if (atomic_dec_and_test(&conn->active))
+ rxrpc_set_service_reap_timer(conn->rxnet,
+- jiffies + rxrpc_connection_expiry);
++ jiffies + rxrpc_connection_expiry * HZ);
+ }
+
+ rxrpc_put_call(call, rxrpc_call_put_io_thread);
+diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
+index 89ac05a711a427..39c908a3ca6e89 100644
+--- a/net/rxrpc/conn_service.c
++++ b/net/rxrpc/conn_service.c
+@@ -25,7 +25,7 @@ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
+ struct rxrpc_conn_proto k;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rb_node *p;
+- unsigned int seq = 0;
++ unsigned int seq = 1;
+
+ k.epoch = sp->hdr.epoch;
+ k.cid = sp->hdr.cid & RXRPC_CIDMASK;
+@@ -35,6 +35,7 @@ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
+ * under just the RCU read lock, so we have to check for
+ * changes.
+ */
++ seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
+
+ p = rcu_dereference_raw(peer->service_conns.rb_node);
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 030d64f282f370..5dfda1ac51dda7 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -9,6 +9,17 @@
+
+ #include "ar-internal.h"
+
++/* Override priority when generating ACKs for received DATA */
++static const u8 rxrpc_ack_priority[RXRPC_ACK__INVALID] = {
++ [RXRPC_ACK_IDLE] = 1,
++ [RXRPC_ACK_DELAY] = 2,
++ [RXRPC_ACK_REQUESTED] = 3,
++ [RXRPC_ACK_DUPLICATE] = 4,
++ [RXRPC_ACK_EXCEEDS_WINDOW] = 5,
++ [RXRPC_ACK_NOSPACE] = 6,
++ [RXRPC_ACK_OUT_OF_SEQUENCE] = 7,
++};
++
+ static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
+ enum rxrpc_abort_reason why)
+ {
+@@ -45,11 +56,9 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
+ }
+
+ cumulative_acks += summary->nr_new_acks;
+- cumulative_acks += summary->nr_rot_new_acks;
+ if (cumulative_acks > 255)
+ cumulative_acks = 255;
+
+- summary->mode = call->cong_mode;
+ summary->cwnd = call->cong_cwnd;
+ summary->ssthresh = call->cong_ssthresh;
+ summary->cumulative_acks = cumulative_acks;
+@@ -151,6 +160,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
+ cwnd = RXRPC_TX_MAX_WINDOW;
+ call->cong_cwnd = cwnd;
+ call->cong_cumul_acks = cumulative_acks;
++ summary->mode = call->cong_mode;
+ trace_rxrpc_congest(call, summary, acked_serial, change);
+ if (resend)
+ rxrpc_resend(call, skb);
+@@ -213,7 +223,6 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+ list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
+ if (before_eq(txb->seq, call->acks_hard_ack))
+ continue;
+- summary->nr_rot_new_acks++;
+ if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) {
+ set_bit(RXRPC_CALL_TX_LAST, &call->flags);
+ rot_last = true;
+@@ -254,6 +263,11 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
+ {
+ ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
+
++ if (unlikely(call->cong_last_nack)) {
++ rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
++ call->cong_last_nack = NULL;
++ }
++
+ switch (__rxrpc_call_state(call)) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+@@ -363,7 +377,7 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
+ * Process a DATA packet.
+ */
+ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
+- bool *_notify)
++ bool *_notify, rxrpc_serial_t *_ack_serial, int *_ack_reason)
+ {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct sk_buff *oos;
+@@ -416,8 +430,6 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
+ /* Send an immediate ACK if we fill in a hole */
+ else if (!skb_queue_empty(&call->rx_oos_queue))
+ ack_reason = RXRPC_ACK_DELAY;
+- else
+- call->ackr_nr_unacked++;
+
+ window++;
+ if (after(window, wtop)) {
+@@ -495,12 +507,16 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
+ }
+
+ send_ack:
+- if (ack_reason >= 0)
+- rxrpc_send_ACK(call, ack_reason, serial,
+- rxrpc_propose_ack_input_data);
+- else
+- rxrpc_propose_delay_ACK(call, serial,
+- rxrpc_propose_ack_input_data);
++ if (ack_reason >= 0) {
++ if (rxrpc_ack_priority[ack_reason] > rxrpc_ack_priority[*_ack_reason]) {
++ *_ack_serial = serial;
++ *_ack_reason = ack_reason;
++ } else if (rxrpc_ack_priority[ack_reason] == rxrpc_ack_priority[*_ack_reason] &&
++ ack_reason == RXRPC_ACK_REQUESTED) {
++ *_ack_serial = serial;
++ *_ack_reason = ack_reason;
++ }
++ }
+ }
+
+ /*
+@@ -511,9 +527,11 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
+ struct rxrpc_jumbo_header jhdr;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp;
+ struct sk_buff *jskb;
++ rxrpc_serial_t ack_serial = 0;
+ unsigned int offset = sizeof(struct rxrpc_wire_header);
+ unsigned int len = skb->len - offset;
+ bool notify = false;
++ int ack_reason = 0;
+
+ while (sp->hdr.flags & RXRPC_JUMBO_PACKET) {
+ if (len < RXRPC_JUMBO_SUBPKTLEN)
+@@ -533,7 +551,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
+ jsp = rxrpc_skb(jskb);
+ jsp->offset = offset;
+ jsp->len = RXRPC_JUMBO_DATALEN;
+- rxrpc_input_data_one(call, jskb, &notify);
++ rxrpc_input_data_one(call, jskb, &notify, &ack_serial, &ack_reason);
+ rxrpc_free_skb(jskb, rxrpc_skb_put_jumbo_subpacket);
+
+ sp->hdr.flags = jhdr.flags;
+@@ -546,7 +564,16 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
+
+ sp->offset = offset;
+ sp->len = len;
+- rxrpc_input_data_one(call, skb, &notify);
++ rxrpc_input_data_one(call, skb, &notify, &ack_serial, &ack_reason);
++
++ if (ack_reason > 0) {
++ rxrpc_send_ACK(call, ack_reason, ack_serial,
++ rxrpc_propose_ack_input_data);
++ } else {
++ call->ackr_nr_unacked++;
++ rxrpc_propose_delay_ACK(call, sp->hdr.serial,
++ rxrpc_propose_ack_input_data);
++ }
+ if (notify) {
+ trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial);
+ rxrpc_notify_socket(call);
+@@ -643,12 +670,8 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_mb(); /* Read data before setting avail bit */
+ set_bit(i, &call->rtt_avail);
+- if (type != rxrpc_rtt_rx_cancel)
+- rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
+- sent_at, resp_time);
+- else
+- trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
+- orig_serial, acked_serial, 0, 0);
++ rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
++ sent_at, resp_time);
+ matched = true;
+ }
+
+@@ -671,14 +694,14 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
+ /*
+ * Process the extra information that may be appended to an ACK packet
+ */
+-static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
+- struct rxrpc_ackinfo *ackinfo)
++static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb,
++ struct rxrpc_acktrailer *trailer)
+ {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rxrpc_peer *peer;
+ unsigned int mtu;
+ bool wake = false;
+- u32 rwind = ntohl(ackinfo->rwind);
++ u32 rwind = ntohl(trailer->rwind);
+
+ if (rwind > RXRPC_TX_MAX_WINDOW)
+ rwind = RXRPC_TX_MAX_WINDOW;
+@@ -689,10 +712,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
+ call->tx_winsize = rwind;
+ }
+
+- if (call->cong_ssthresh > rwind)
+- call->cong_ssthresh = rwind;
+-
+- mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
++ mtu = min(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
+
+ peer = call->peer;
+ if (mtu < peer->maxdata) {
+@@ -706,6 +726,43 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
+ wake_up(&call->waitq);
+ }
+
++/*
++ * Determine how many nacks from the previous ACK have now been satisfied.
++ */
++static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
++ struct rxrpc_ack_summary *summary,
++ rxrpc_seq_t seq)
++{
++ struct sk_buff *skb = call->cong_last_nack;
++ struct rxrpc_ackpacket ack;
++ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++ unsigned int i, new_acks = 0, retained_nacks = 0;
++ rxrpc_seq_t old_seq = sp->first_ack;
++ u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(ack);
++
++ if (after_eq(seq, old_seq + sp->nr_acks)) {
++ summary->nr_new_acks += sp->nr_nacks;
++ summary->nr_new_acks += seq - (old_seq + sp->nr_acks);
++ summary->nr_retained_nacks = 0;
++ } else if (seq == old_seq) {
++ summary->nr_retained_nacks = sp->nr_nacks;
++ } else {
++ for (i = 0; i < sp->nr_acks; i++) {
++ if (acks[i] == RXRPC_ACK_TYPE_NACK) {
++ if (before(old_seq + i, seq))
++ new_acks++;
++ else
++ retained_nacks++;
++ }
++ }
++
++ summary->nr_new_acks += new_acks;
++ summary->nr_retained_nacks = retained_nacks;
++ }
++
++ return old_seq + sp->nr_acks;
++}
++
+ /*
+ * Process individual soft ACKs.
+ *
+@@ -715,25 +772,51 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
+ * the timer on the basis that the peer might just not have processed them at
+ * the time the ACK was sent.
+ */
+-static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
+- rxrpc_seq_t seq, int nr_acks,
+- struct rxrpc_ack_summary *summary)
++static void rxrpc_input_soft_acks(struct rxrpc_call *call,
++ struct rxrpc_ack_summary *summary,
++ struct sk_buff *skb,
++ rxrpc_seq_t seq,
++ rxrpc_seq_t since)
+ {
+- unsigned int i;
++ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++ unsigned int i, old_nacks = 0;
++ rxrpc_seq_t lowest_nak = seq + sp->nr_acks;
++ u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
+
+- for (i = 0; i < nr_acks; i++) {
++ for (i = 0; i < sp->nr_acks; i++) {
+ if (acks[i] == RXRPC_ACK_TYPE_ACK) {
+ summary->nr_acks++;
+- summary->nr_new_acks++;
++ if (after_eq(seq, since))
++ summary->nr_new_acks++;
+ } else {
+- if (!summary->saw_nacks &&
+- call->acks_lowest_nak != seq + i) {
+- call->acks_lowest_nak = seq + i;
+- summary->new_low_nack = true;
+- }
+ summary->saw_nacks = true;
++ if (before(seq, since)) {
++ /* Overlap with previous ACK */
++ old_nacks++;
++ } else {
++ summary->nr_new_nacks++;
++ sp->nr_nacks++;
++ }
++
++ if (before(seq, lowest_nak))
++ lowest_nak = seq;
+ }
++ seq++;
+ }
++
++ if (lowest_nak != call->acks_lowest_nak) {
++ call->acks_lowest_nak = lowest_nak;
++ summary->new_low_nack = true;
++ }
++
++ /* We *can* have more nacks than we did - the peer is permitted to drop
++ * packets it has soft-acked and re-request them. Further, it is
++ * possible for the nack distribution to change whilst the number of
++ * nacks stays the same or goes down.
++ */
++ if (old_nacks < summary->nr_retained_nacks)
++ summary->nr_new_acks += summary->nr_retained_nacks - old_nacks;
++ summary->nr_retained_nacks = old_nacks;
+ }
+
+ /*
+@@ -775,9 +858,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ struct rxrpc_ack_summary summary = { 0 };
+ struct rxrpc_ackpacket ack;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+- struct rxrpc_ackinfo info;
++ struct rxrpc_acktrailer trailer;
+ rxrpc_serial_t ack_serial, acked_serial;
+- rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
++ rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt, since;
+ int nr_acks, offset, ioffset;
+
+ _enter("");
+@@ -793,6 +876,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ prev_pkt = ntohl(ack.previousPacket);
+ hard_ack = first_soft_ack - 1;
+ nr_acks = ack.nAcks;
++ sp->first_ack = first_soft_ack;
++ sp->nr_acks = nr_acks;
+ summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ?
+ ack.reason : RXRPC_ACK__INVALID);
+
+@@ -801,28 +886,21 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ summary.ack_reason, nr_acks);
+ rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
+
+- switch (ack.reason) {
+- case RXRPC_ACK_PING_RESPONSE:
+- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+- rxrpc_rtt_rx_ping_response);
+- break;
+- case RXRPC_ACK_REQUESTED:
+- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+- rxrpc_rtt_rx_requested_ack);
+- break;
+- default:
+- if (acked_serial != 0)
++ if (acked_serial != 0) {
++ switch (ack.reason) {
++ case RXRPC_ACK_PING_RESPONSE:
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+- rxrpc_rtt_rx_cancel);
+- break;
+- }
+-
+- if (ack.reason == RXRPC_ACK_PING) {
+- rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
+- rxrpc_propose_ack_respond_to_ping);
+- } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
+- rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
+- rxrpc_propose_ack_respond_to_ack);
++ rxrpc_rtt_rx_ping_response);
++ break;
++ case RXRPC_ACK_REQUESTED:
++ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
++ rxrpc_rtt_rx_requested_ack);
++ break;
++ default:
++ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
++ rxrpc_rtt_rx_other_ack);
++ break;
++ }
+ }
+
+ /* If we get an EXCEEDS_WINDOW ACK from the server, it probably
+@@ -835,7 +913,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ rxrpc_is_client_call(call)) {
+ rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ 0, -ENETRESET);
+- return;
++ goto send_response;
+ }
+
+ /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also
+@@ -849,7 +927,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ rxrpc_is_client_call(call)) {
+ rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ 0, -ENETRESET);
+- return;
++ goto send_response;
+ }
+
+ /* Discard any out-of-order or duplicate ACKs (outside lock). */
+@@ -857,18 +935,28 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
+ first_soft_ack, call->acks_first_seq,
+ prev_pkt, call->acks_prev_seq);
+- return;
++ goto send_response;
+ }
+
+- info.rxMTU = 0;
++ trailer.maxMTU = 0;
+ ioffset = offset + nr_acks + 3;
+- if (skb->len >= ioffset + sizeof(info) &&
+- skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
+- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
++ if (skb->len >= ioffset + sizeof(trailer) &&
++ skb_copy_bits(skb, ioffset, &trailer, sizeof(trailer)) < 0)
++ return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_trailer);
+
+ if (nr_acks > 0)
+ skb_condense(skb);
+
++ if (call->cong_last_nack) {
++ since = rxrpc_input_check_prev_ack(call, &summary, first_soft_ack);
++ rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
++ call->cong_last_nack = NULL;
++ } else {
++ summary.nr_new_acks = first_soft_ack - call->acks_first_seq;
++ call->acks_lowest_nak = first_soft_ack + nr_acks;
++ since = first_soft_ack;
++ }
++
+ call->acks_latest_ts = skb->tstamp;
+ call->acks_first_seq = first_soft_ack;
+ call->acks_prev_seq = prev_pkt;
+@@ -877,14 +965,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ case RXRPC_ACK_PING:
+ break;
+ default:
+- if (after(acked_serial, call->acks_highest_serial))
++ if (acked_serial && after(acked_serial, call->acks_highest_serial))
+ call->acks_highest_serial = acked_serial;
+ break;
+ }
+
+ /* Parse rwind and mtu sizes if provided. */
+- if (info.rxMTU)
+- rxrpc_input_ackinfo(call, skb, &info);
++ if (trailer.maxMTU)
++ rxrpc_input_ack_trailer(call, skb, &trailer);
+
+ if (first_soft_ack == 0)
+ return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
+@@ -897,7 +985,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ case RXRPC_CALL_SERVER_AWAIT_ACK:
+ break;
+ default:
+- return;
++ goto send_response;
+ }
+
+ if (before(hard_ack, call->acks_hard_ack) ||
+@@ -909,15 +997,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ if (after(hard_ack, call->acks_hard_ack)) {
+ if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
+ rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
+- return;
++ goto send_response;
+ }
+ }
+
+ if (nr_acks > 0) {
+ if (offset > (int)skb->len - nr_acks)
+ return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
+- rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
+- nr_acks, &summary);
++ rxrpc_input_soft_acks(call, &summary, skb, first_soft_ack, since);
++ rxrpc_get_skb(skb, rxrpc_skb_get_last_nack);
++ call->cong_last_nack = skb;
+ }
+
+ if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
+@@ -927,6 +1016,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ rxrpc_propose_ack_ping_for_lost_reply);
+
+ rxrpc_congestion_management(call, skb, &summary, acked_serial);
++
++send_response:
++ if (ack.reason == RXRPC_ACK_PING)
++ rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
++ rxrpc_propose_ack_respond_to_ping);
++ else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
++ rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
++ rxrpc_propose_ack_respond_to_ack);
+ }
+
+ /*
+diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
+index 4a3a08a0e2cd04..d7e72bae4d2b2c 100644
+--- a/net/rxrpc/io_thread.c
++++ b/net/rxrpc/io_thread.c
+@@ -27,11 +27,17 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
+ {
+ struct sk_buff_head *rx_queue;
+ struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
++ struct task_struct *io_thread;
+
+ if (unlikely(!local)) {
+ kfree_skb(skb);
+ return 0;
+ }
++ io_thread = READ_ONCE(local->io_thread);
++ if (!io_thread) {
++ kfree_skb(skb);
++ return 0;
++ }
+ if (skb->tstamp == 0)
+ skb->tstamp = ktime_get_real();
+
+@@ -47,7 +53,7 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
+ #endif
+
+ skb_queue_tail(rx_queue, skb);
+- rxrpc_wake_up_io_thread(local);
++ wake_up_process(io_thread);
+ return 0;
+ }
+
+@@ -554,7 +560,7 @@ int rxrpc_io_thread(void *data)
+ __set_current_state(TASK_RUNNING);
+ rxrpc_see_local(local, rxrpc_local_stop);
+ rxrpc_destroy_local(local);
+- local->io_thread = NULL;
++ WRITE_ONCE(local->io_thread, NULL);
+ rxrpc_see_local(local, rxrpc_local_stopped);
+ return 0;
+ }
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
+index 7d910aee4f8cb2..8da6120f65330e 100644
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -36,6 +36,17 @@ static void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, int err,
+ return ipv6_icmp_error(sk, skb, err, port, info, payload);
+ }
+
++/*
++ * Set or clear the Don't Fragment flag on a socket.
++ */
++void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set)
++{
++ if (set)
++ ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DO);
++ else
++ ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DONT);
++}
++
+ /*
+ * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
+ * same or greater than.
+@@ -87,7 +98,7 @@ static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+ struct rxrpc_local *local =
+ container_of(timer, struct rxrpc_local, client_conn_reap_timer);
+
+- if (local->kill_all_client_conns &&
++ if (!local->kill_all_client_conns &&
+ test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
+ rxrpc_wake_up_io_thread(local);
+ }
+@@ -203,7 +214,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
+ ip_sock_set_recverr(usk);
+
+ /* we want to set the don't fragment bit */
+- ip_sock_set_mtu_discover(usk, IP_PMTUDISC_DO);
++ rxrpc_local_dont_fragment(local, true);
+
+ /* We want receive timestamps. */
+ sock_enable_timestamps(usk);
+@@ -221,7 +232,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
+ }
+
+ wait_for_completion(&local->io_thread_ready);
+- local->io_thread = io_thread;
++ WRITE_ONCE(local->io_thread, io_thread);
+ _leave(" = 0");
+ return 0;
+
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index 5e53429c692288..cad6a7d18e0405 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -83,7 +83,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
+ struct rxrpc_txbuf *txb,
+ u16 *_rwind)
+ {
+- struct rxrpc_ackinfo ackinfo;
++ struct rxrpc_acktrailer trailer;
+ unsigned int qsize, sack, wrap, to;
+ rxrpc_seq_t window, wtop;
+ int rsize;
+@@ -126,16 +126,16 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
+ qsize = (window - 1) - call->rx_consumed;
+ rsize = max_t(int, call->rx_winsize - qsize, 0);
+ *_rwind = rsize;
+- ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
+- ackinfo.maxMTU = htonl(mtu);
+- ackinfo.rwind = htonl(rsize);
+- ackinfo.jumbo_max = htonl(jmax);
++ trailer.maxMTU = htonl(rxrpc_rx_mtu);
++ trailer.ifMTU = htonl(mtu);
++ trailer.rwind = htonl(rsize);
++ trailer.jumbo_max = htonl(jmax);
+
+ *ackp++ = 0;
+ *ackp++ = 0;
+ *ackp++ = 0;
+- memcpy(ackp, &ackinfo, sizeof(ackinfo));
+- return txb->ack.nAcks + 3 + sizeof(ackinfo);
++ memcpy(ackp, &trailer, sizeof(trailer));
++ return txb->ack.nAcks + 3 + sizeof(trailer);
+ }
+
+ /*
+@@ -216,7 +216,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+ iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n;
+ len = iov[0].iov_len;
+
+- serial = atomic_inc_return(&conn->serial);
++ serial = rxrpc_get_next_serial(conn);
+ txb->wire.serial = htonl(serial);
+ trace_rxrpc_tx_ack(call->debug_id, serial,
+ ntohl(txb->ack.firstPacket),
+@@ -302,7 +302,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = sizeof(pkt);
+
+- serial = atomic_inc_return(&conn->serial);
++ serial = rxrpc_get_next_serial(conn);
+ pkt.whdr.serial = htonl(serial);
+
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt));
+@@ -334,7 +334,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+ _enter("%x,{%d}", txb->seq, txb->len);
+
+ /* Each transmission of a Tx packet needs a new serial number */
+- serial = atomic_inc_return(&conn->serial);
++ serial = rxrpc_get_next_serial(conn);
+ txb->wire.serial = htonl(serial);
+
+ if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
+@@ -494,14 +494,12 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+ switch (conn->local->srx.transport.family) {
+ case AF_INET6:
+ case AF_INET:
+- ip_sock_set_mtu_discover(conn->local->socket->sk,
+- IP_PMTUDISC_DONT);
++ rxrpc_local_dont_fragment(conn->local, false);
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag);
+ ret = do_udp_sendmsg(conn->local->socket, &msg, len);
+ conn->peer->last_tx_at = ktime_get_seconds();
+
+- ip_sock_set_mtu_discover(conn->local->socket->sk,
+- IP_PMTUDISC_DO);
++ rxrpc_local_dont_fragment(conn->local, true);
+ break;
+
+ default:
+@@ -560,7 +558,7 @@ void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+- serial = atomic_inc_return(&conn->serial);
++ serial = rxrpc_get_next_serial(conn);
+ whdr.serial = htonl(serial);
+
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
+diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
+index 682636d3b060bb..208312c244f6b0 100644
+--- a/net/rxrpc/proc.c
++++ b/net/rxrpc/proc.c
+@@ -181,7 +181,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
+ atomic_read(&conn->active),
+ state,
+ key_serial(conn->key),
+- atomic_read(&conn->serial),
++ conn->tx_serial,
+ conn->hi_serial,
+ conn->channels[0].call_id,
+ conn->channels[1].call_id,
+diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
+index e8ee4af43ca89b..4fe6b4d20ada91 100644
+--- a/net/rxrpc/protocol.h
++++ b/net/rxrpc/protocol.h
+@@ -135,9 +135,9 @@ struct rxrpc_ackpacket {
+ /*
+ * ACK packets can have a further piece of information tagged on the end
+ */
+-struct rxrpc_ackinfo {
+- __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
+- __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
++struct rxrpc_acktrailer {
++ __be32 maxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
++ __be32 ifMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
+ __be32 rwind; /* Rx window size (packets) [AFS 3.4] */
+ __be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */
+ };
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
+index 1bf571a66e020d..ad6c57a9f27c77 100644
+--- a/net/rxrpc/rxkad.c
++++ b/net/rxrpc/rxkad.c
+@@ -259,7 +259,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
+
+ _enter("");
+
+- check = txb->seq ^ ntohl(txb->wire.callNumber);
++ check = txb->seq ^ call->call_id;
+ hdr->data_size = htonl((u32)check << 16 | txb->len);
+
+ txb->len += sizeof(struct rxkad_level1_hdr);
+@@ -302,7 +302,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
+
+ _enter("");
+
+- check = txb->seq ^ ntohl(txb->wire.callNumber);
++ check = txb->seq ^ call->call_id;
+
+ rxkhdr->data_size = htonl(txb->len | (u32)check << 16);
+ rxkhdr->checksum = 0;
+@@ -362,9 +362,9 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+ memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv));
+
+ /* calculate the security checksum */
+- x = (ntohl(txb->wire.cid) & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
++ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
+ x |= txb->seq & 0x3fffffff;
+- crypto.buf[0] = txb->wire.callNumber;
++ crypto.buf[0] = htonl(call->call_id);
+ crypto.buf[1] = htonl(x);
+
+ sg_init_one(&sg, crypto.buf, 8);
+@@ -664,7 +664,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+- serial = atomic_inc_return(&conn->serial);
++ serial = rxrpc_get_next_serial(conn);
+ whdr.serial = htonl(serial);
+
+ ret = kernel_sendmsg(conn->local->socket, &msg, iov, 2, len);
+@@ -721,10 +721,12 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
+
+ len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
+
+- serial = atomic_inc_return(&conn->serial);
++ serial = rxrpc_get_next_serial(conn);
+ whdr.serial = htonl(serial);
+
++ rxrpc_local_dont_fragment(conn->local, false);
+ ret = kernel_sendmsg(conn->local->socket, &msg, iov, 3, len);
++ rxrpc_local_dont_fragment(conn->local, true);
+ if (ret < 0) {
+ trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
+ rxrpc_tx_point_rxkad_response);
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 8e0b94714e849f..24f765d243db1c 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -303,6 +303,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+ reload:
++ txb = call->tx_pending;
++ call->tx_pending = NULL;
++ if (txb)
++ rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
++
+ ret = -EPIPE;
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ goto maybe_error;
+@@ -329,11 +334,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
+ goto maybe_error;
+ }
+
+- txb = call->tx_pending;
+- call->tx_pending = NULL;
+- if (txb)
+- rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
+-
+ do {
+ if (!txb) {
+ size_t remain, bufsize, chunk, offset;
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 9d3f26bf0440d9..2d6d58e1b278a1 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -816,6 +816,9 @@ EXPORT_SYMBOL(tcf_idr_cleanup);
+ * its reference and bind counters, and return 1. Otherwise insert temporary
+ * error pointer (to prevent concurrent users from inserting actions with same
+ * index) and return 0.
++ *
++ * May return -EAGAIN for binding actions in case of a parallel add/delete on
++ * the requested index.
+ */
+
+ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+@@ -824,43 +827,60 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tcf_idrinfo *idrinfo = tn->idrinfo;
+ struct tc_action *p;
+ int ret;
++ u32 max;
+
+-again:
+- mutex_lock(&idrinfo->lock);
+ if (*index) {
++ rcu_read_lock();
+ p = idr_find(&idrinfo->action_idr, *index);
++
+ if (IS_ERR(p)) {
+ /* This means that another process allocated
+ * index but did not assign the pointer yet.
+ */
+- mutex_unlock(&idrinfo->lock);
+- goto again;
++ rcu_read_unlock();
++ return -EAGAIN;
+ }
+
+- if (p) {
+- refcount_inc(&p->tcfa_refcnt);
+- if (bind)
+- atomic_inc(&p->tcfa_bindcnt);
+- *a = p;
+- ret = 1;
+- } else {
+- *a = NULL;
+- ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
+- *index, GFP_KERNEL);
+- if (!ret)
+- idr_replace(&idrinfo->action_idr,
+- ERR_PTR(-EBUSY), *index);
++ if (!p) {
++ /* Empty slot, try to allocate it */
++ max = *index;
++ rcu_read_unlock();
++ goto new;
+ }
++
++ if (!refcount_inc_not_zero(&p->tcfa_refcnt)) {
++ /* Action was deleted in parallel */
++ rcu_read_unlock();
++ return -EAGAIN;
++ }
++
++ if (bind)
++ atomic_inc(&p->tcfa_bindcnt);
++ *a = p;
++
++ rcu_read_unlock();
++
++ return 1;
+ } else {
++ /* Find a slot */
+ *index = 1;
+- *a = NULL;
+- ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
+- UINT_MAX, GFP_KERNEL);
+- if (!ret)
+- idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
+- *index);
++ max = UINT_MAX;
+ }
++
++new:
++ *a = NULL;
++
++ mutex_lock(&idrinfo->lock);
++ ret = idr_alloc_u32(&idrinfo->action_idr, ERR_PTR(-EBUSY), index, max,
++ GFP_KERNEL);
+ mutex_unlock(&idrinfo->lock);
++
++ /* N binds raced for action allocation,
++ * retry for all the ones that failed.
++ */
++ if (ret == -ENOSPC && *index == max)
++ ret = -EAGAIN;
++
+ return ret;
+ }
+ EXPORT_SYMBOL(tcf_idr_check_alloc);
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index fb52d6f9aff939..50d24e240e8fba 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -41,21 +41,28 @@ static struct workqueue_struct *act_ct_wq;
+ static struct rhashtable zones_ht;
+ static DEFINE_MUTEX(zones_mutex);
+
++struct zones_ht_key {
++ struct net *net;
++ u16 zone;
++ /* Note : pad[] must be the last field. */
++ u8 pad[];
++};
++
+ struct tcf_ct_flow_table {
+ struct rhash_head node; /* In zones tables */
+
+ struct rcu_work rwork;
+ struct nf_flowtable nf_ft;
+ refcount_t ref;
+- u16 zone;
++ struct zones_ht_key key;
+
+ bool dying;
+ };
+
+ static const struct rhashtable_params zones_params = {
+ .head_offset = offsetof(struct tcf_ct_flow_table, node),
+- .key_offset = offsetof(struct tcf_ct_flow_table, zone),
+- .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
++ .key_offset = offsetof(struct tcf_ct_flow_table, key),
++ .key_len = offsetof(struct zones_ht_key, pad),
+ .automatic_shrinking = true,
+ };
+
+@@ -286,19 +293,42 @@ static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
+ !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+ }
+
++static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
++
++static void tcf_ct_nf_get(struct nf_flowtable *ft)
++{
++ struct tcf_ct_flow_table *ct_ft =
++ container_of(ft, struct tcf_ct_flow_table, nf_ft);
++
++ tcf_ct_flow_table_get_ref(ct_ft);
++}
++
++static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
++
++static void tcf_ct_nf_put(struct nf_flowtable *ft)
++{
++ struct tcf_ct_flow_table *ct_ft =
++ container_of(ft, struct tcf_ct_flow_table, nf_ft);
++
++ tcf_ct_flow_table_put(ct_ft);
++}
++
+ static struct nf_flowtable_type flowtable_ct = {
+ .gc = tcf_ct_flow_is_outdated,
+ .action = tcf_ct_flow_table_fill_actions,
++ .get = tcf_ct_nf_get,
++ .put = tcf_ct_nf_put,
+ .owner = THIS_MODULE,
+ };
+
+ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
+ {
++ struct zones_ht_key key = { .net = net, .zone = params->zone };
+ struct tcf_ct_flow_table *ct_ft;
+ int err = -ENOMEM;
+
+ mutex_lock(&zones_mutex);
+- ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
++ ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
+ if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
+ goto out_unlock;
+
+@@ -307,7 +337,7 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
+ goto err_alloc;
+ refcount_set(&ct_ft->ref, 1);
+
+- ct_ft->zone = params->zone;
++ ct_ft->key = key;
+ err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
+ if (err)
+ goto err_insert;
+@@ -337,9 +367,13 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
+ return err;
+ }
+
++static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
++{
++ refcount_inc(&ct_ft->ref);
++}
++
+ static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
+ {
+- struct flow_block_cb *block_cb, *tmp_cb;
+ struct tcf_ct_flow_table *ct_ft;
+ struct flow_block *block;
+
+@@ -347,13 +381,9 @@ static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
+ rwork);
+ nf_flow_table_free(&ct_ft->nf_ft);
+
+- /* Remove any remaining callbacks before cleanup */
+ block = &ct_ft->nf_ft.flow_block;
+ down_write(&ct_ft->nf_ft.flow_block_lock);
+- list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
+- list_del(&block_cb->list);
+- flow_block_cb_free(block_cb);
+- }
++ WARN_ON(!list_empty(&block->cb_list));
+ up_write(&ct_ft->nf_ft.flow_block_lock);
+ kfree(ct_ft);
+
+@@ -376,6 +406,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
+ entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
+ }
+
++static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
++{
++ struct nf_conn_act_ct_ext *act_ct_ext;
++
++ act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
++ if (act_ct_ext) {
++ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
++ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
++ }
++}
++
+ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
+ struct nf_conn *ct,
+ bool tcp, bool bidirectional)
+@@ -671,6 +712,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+ else
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
+
++ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
++ tcf_ct_flow_ct_ext_ifidx_update(flow);
+ flow_offload_refresh(nf_ft, flow, force_refresh);
+ if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
+ /* Process this flow in SW to allow promoting to ASSURED */
+@@ -816,7 +859,6 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ if (err || !frag)
+ return err;
+
+- skb_get(skb);
+ err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
+ if (err)
+ return err;
+@@ -960,12 +1002,8 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ nh_ofs = skb_network_offset(skb);
+ skb_pull_rcsum(skb, nh_ofs);
+ err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
+- if (err == -EINPROGRESS) {
+- retval = TC_ACT_STOLEN;
+- goto out_clear;
+- }
+ if (err)
+- goto drop;
++ goto out_frag;
+
+ err = nf_ct_skb_network_trim(skb, family);
+ if (err)
+@@ -1030,13 +1068,21 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
+
+ if (!nf_ct_is_confirmed(ct))
+- nf_conn_act_ct_ext_add(ct);
++ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
+
+ /* This will take care of sending queued events
+ * even if the connection is already confirmed.
+ */
+ if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+ goto drop;
++
++ /* The ct may be dropped if a clash has been resolved,
++ * so it's necessary to retrieve it from skb again to
++ * prevent UAF.
++ */
++ ct = nf_ct_get(skb, &ctinfo);
++ if (!ct)
++ skip_add = true;
+ }
+
+ if (!skip_add)
+@@ -1052,6 +1098,11 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ return retval;
+
++out_frag:
++ if (err != -EINPROGRESS)
++ tcf_action_inc_drop_qstats(&c->common);
++ return TC_ACT_CONSUMED;
++
+ drop:
+ tcf_action_inc_drop_qstats(&c->common);
+ return TC_ACT_SHOT;
+@@ -1522,6 +1573,9 @@ static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
++ if (tcf_ct_helper(act))
++ return -EOPNOTSUPP;
++
+ entry->id = FLOW_ACTION_CT;
+ entry->ct.action = tcf_ct_action(act);
+ entry->ct.zone = tcf_ct_zone(act);
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 0a711c184c29bd..674f7ae356ca2c 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -206,18 +206,14 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
+ return err;
+ }
+
+-static bool is_mirred_nested(void)
+-{
+- return unlikely(__this_cpu_read(mirred_nest_level) > 1);
+-}
+-
+-static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
++static int
++tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb)
+ {
+ int err;
+
+ if (!want_ingress)
+ err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
+- else if (is_mirred_nested())
++ else if (!at_ingress)
+ err = netif_rx(skb);
+ else
+ err = netif_receive_skb(skb);
+@@ -225,110 +221,123 @@ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+ return err;
+ }
+
+-TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
+- const struct tc_action *a,
+- struct tcf_result *res)
++static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
++ struct net_device *dev,
++ const bool m_mac_header_xmit, int m_eaction,
++ int retval)
+ {
+- struct tcf_mirred *m = to_mirred(a);
+- struct sk_buff *skb2 = skb;
+- bool m_mac_header_xmit;
+- struct net_device *dev;
+- unsigned int nest_level;
+- int retval, err = 0;
+- bool use_reinsert;
++ struct sk_buff *skb_to_send = skb;
+ bool want_ingress;
+ bool is_redirect;
+ bool expects_nh;
+ bool at_ingress;
+- int m_eaction;
++ bool dont_clone;
+ int mac_len;
+ bool at_nh;
++ int err;
+
+- nest_level = __this_cpu_inc_return(mirred_nest_level);
+- if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
+- net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
+- netdev_name(skb->dev));
+- __this_cpu_dec(mirred_nest_level);
+- return TC_ACT_SHOT;
+- }
+-
+- tcf_lastuse_update(&m->tcf_tm);
+- tcf_action_update_bstats(&m->common, skb);
+-
+- m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
+- m_eaction = READ_ONCE(m->tcfm_eaction);
+- retval = READ_ONCE(m->tcf_action);
+- dev = rcu_dereference_bh(m->tcfm_dev);
+- if (unlikely(!dev)) {
+- pr_notice_once("tc mirred: target device is gone\n");
+- goto out;
+- }
+-
++ is_redirect = tcf_mirred_is_act_redirect(m_eaction);
+ if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
+ net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+ dev->name);
+- goto out;
++ goto err_cant_do;
+ }
+
+ /* we could easily avoid the clone only if called by ingress and clsact;
+ * since we can't easily detect the clsact caller, skip clone only for
+ * ingress - that covers the TC S/W datapath.
+ */
+- is_redirect = tcf_mirred_is_act_redirect(m_eaction);
+ at_ingress = skb_at_tc_ingress(skb);
+- use_reinsert = at_ingress && is_redirect &&
+- tcf_mirred_can_reinsert(retval);
+- if (!use_reinsert) {
+- skb2 = skb_clone(skb, GFP_ATOMIC);
+- if (!skb2)
+- goto out;
++ dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
++ tcf_mirred_can_reinsert(retval);
++ if (!dont_clone) {
++ skb_to_send = skb_clone(skb, GFP_ATOMIC);
++ if (!skb_to_send)
++ goto err_cant_do;
+ }
+
+ want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
+
+ /* All mirred/redirected skbs should clear previous ct info */
+- nf_reset_ct(skb2);
++ nf_reset_ct(skb_to_send);
+ if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
+- skb_dst_drop(skb2);
++ skb_dst_drop(skb_to_send);
+
+ expects_nh = want_ingress || !m_mac_header_xmit;
+ at_nh = skb->data == skb_network_header(skb);
+ if (at_nh != expects_nh) {
+- mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
++ mac_len = at_ingress ? skb->mac_len :
+ skb_network_offset(skb);
+ if (expects_nh) {
+ /* target device/action expect data at nh */
+- skb_pull_rcsum(skb2, mac_len);
++ skb_pull_rcsum(skb_to_send, mac_len);
+ } else {
+ /* target device/action expect data at mac */
+- skb_push_rcsum(skb2, mac_len);
++ skb_push_rcsum(skb_to_send, mac_len);
+ }
+ }
+
+- skb2->skb_iif = skb->dev->ifindex;
+- skb2->dev = dev;
++ skb_to_send->skb_iif = skb->dev->ifindex;
++ skb_to_send->dev = dev;
+
+- /* mirror is always swallowed */
+ if (is_redirect) {
+- skb_set_redirected(skb2, skb2->tc_at_ingress);
+-
+- /* let's the caller reinsert the packet, if possible */
+- if (use_reinsert) {
+- err = tcf_mirred_forward(want_ingress, skb);
+- if (err)
+- tcf_action_inc_overlimit_qstats(&m->common);
+- __this_cpu_dec(mirred_nest_level);
+- return TC_ACT_CONSUMED;
+- }
++ if (skb == skb_to_send)
++ retval = TC_ACT_CONSUMED;
++
++ skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
++
++ err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
++ } else {
++ err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
+ }
++ if (err)
++ tcf_action_inc_overlimit_qstats(&m->common);
++
++ return retval;
++
++err_cant_do:
++ if (is_redirect)
++ retval = TC_ACT_SHOT;
++ tcf_action_inc_overlimit_qstats(&m->common);
++ return retval;
++}
++
++TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
++ const struct tc_action *a,
++ struct tcf_result *res)
++{
++ struct tcf_mirred *m = to_mirred(a);
++ int retval = READ_ONCE(m->tcf_action);
++ unsigned int nest_level;
++ bool m_mac_header_xmit;
++ struct net_device *dev;
++ int m_eaction;
+
+- err = tcf_mirred_forward(want_ingress, skb2);
+- if (err) {
+-out:
++ nest_level = __this_cpu_inc_return(mirred_nest_level);
++ if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
++ net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
++ netdev_name(skb->dev));
++ retval = TC_ACT_SHOT;
++ goto dec_nest_level;
++ }
++
++ tcf_lastuse_update(&m->tcf_tm);
++ tcf_action_update_bstats(&m->common, skb);
++
++ dev = rcu_dereference_bh(m->tcfm_dev);
++ if (unlikely(!dev)) {
++ pr_notice_once("tc mirred: target device is gone\n");
+ tcf_action_inc_overlimit_qstats(&m->common);
+- if (tcf_mirred_is_act_redirect(m_eaction))
+- retval = TC_ACT_SHOT;
++ goto dec_nest_level;
+ }
++
++ m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
++ m_eaction = READ_ONCE(m->tcfm_eaction);
++
++ retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
++ retval);
++
++dec_nest_level:
+ __this_cpu_dec(mirred_nest_level);
+
+ return retval;
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
+index dffa990a9629f0..e34f1be1516459 100644
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -241,13 +241,13 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
+ struct tcf_skbmod *d = to_skbmod(a);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_skbmod_params *p;
+- struct tc_skbmod opt = {
+- .index = d->tcf_index,
+- .refcnt = refcount_read(&d->tcf_refcnt) - ref,
+- .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
+- };
++ struct tc_skbmod opt;
+ struct tcf_t t;
+
++ memset(&opt, 0, sizeof(opt));
++ opt.index = d->tcf_index;
++ opt.refcnt = refcount_read(&d->tcf_refcnt) - ref,
++ opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
+ spin_lock_bh(&d->tcf_lock);
+ opt.action = d->tcf_action;
+ p = rcu_dereference_protected(d->skbmod_p,
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index a193cc7b32418c..84e18b5f72a305 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1536,6 +1536,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
+ chain_prev = chain,
+ chain = __tcf_get_next_chain(block, chain),
+ tcf_chain_put(chain_prev)) {
++ if (chain->tmplt_ops && add)
++ chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
++ cb_priv);
+ for (tp = __tcf_get_next_proto(chain, NULL); tp;
+ tp_prev = tp,
+ tp = __tcf_get_next_proto(chain, tp),
+@@ -1551,6 +1554,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
+ goto err_playback_remove;
+ }
+ }
++ if (chain->tmplt_ops && !add)
++ chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
++ cb_priv);
+ }
+
+ return 0;
+@@ -2950,7 +2956,8 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
+ ops = tcf_proto_lookup_ops(name, true, extack);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+- if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
++ if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
++ !ops->tmplt_reoffload) {
+ NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
+ module_put(ops->owner);
+ return -EOPNOTSUPP;
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index e5314a31f75ae3..6ee7064c82fcc3 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -2460,8 +2460,11 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
+ }
+
+ errout_idr:
+- if (!fold)
++ if (!fold) {
++ spin_lock(&tp->lock);
+ idr_remove(&head->handle_idr, fnew->handle);
++ spin_unlock(&tp->lock);
++ }
+ __fl_put(fnew);
+ errout_tb:
+ kfree(tb);
+@@ -2721,6 +2724,28 @@ static void fl_tmplt_destroy(void *tmplt_priv)
+ kfree(tmplt);
+ }
+
++static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
++ flow_setup_cb_t *cb, void *cb_priv)
++{
++ struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
++ struct flow_cls_offload cls_flower = {};
++
++ cls_flower.rule = flow_rule_alloc(0);
++ if (!cls_flower.rule)
++ return;
++
++ cls_flower.common.chain_index = chain->index;
++ cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
++ FLOW_CLS_TMPLT_DESTROY;
++ cls_flower.cookie = (unsigned long) tmplt;
++ cls_flower.rule->match.dissector = &tmplt->dissector;
++ cls_flower.rule->match.mask = &tmplt->mask;
++ cls_flower.rule->match.key = &tmplt->dummy_key;
++
++ cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
++ kfree(cls_flower.rule);
++}
++
+ static int fl_dump_key_val(struct sk_buff *skb,
+ void *val, int val_type,
+ void *mask, int mask_type, int len)
+@@ -3628,6 +3653,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
+ .bind_class = fl_bind_class,
+ .tmplt_create = fl_tmplt_create,
+ .tmplt_destroy = fl_tmplt_destroy,
++ .tmplt_reoffload = fl_tmplt_reoffload,
+ .tmplt_dump = fl_tmplt_dump,
+ .get_exts = fl_get_exts,
+ .owner = THIS_MODULE,
+diff --git a/net/sched/em_text.c b/net/sched/em_text.c
+index 6f3c1fb2fb44c4..f176afb70559eb 100644
+--- a/net/sched/em_text.c
++++ b/net/sched/em_text.c
+@@ -97,8 +97,10 @@ static int em_text_change(struct net *net, void *data, int len,
+
+ static void em_text_destroy(struct tcf_ematch *m)
+ {
+- if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
++ if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) {
+ textsearch_destroy(EM_TEXT_PRIV(m)->config);
++ kfree(EM_TEXT_PRIV(m));
++ }
+ }
+
+ static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index e9eaf637220e9c..1455892694c001 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -593,7 +593,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ pkt_len = 1;
+ qdisc_skb_cb(skb)->pkt_len = pkt_len;
+ }
+-EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
+
+ void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
+ {
+@@ -809,7 +808,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
+ notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
+ !qdisc_is_offloaded);
+ /* TODO: perform the search on a per txq basis */
+- sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
++ sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
+ if (sch == NULL) {
+ WARN_ON_ONCE(parentid != TC_H_ROOT);
+ break;
+@@ -1172,6 +1171,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+ return -EINVAL;
+ }
+
++ if (new &&
++ !(parent->flags & TCQ_F_MQROOT) &&
++ rcu_access_pointer(new->stab)) {
++ NL_SET_ERR_MSG(extack, "STAB not supported on a non root");
++ return -EINVAL;
++ }
+ err = cops->graft(parent, cl, new, &old, extack);
+ if (err)
+ return err;
+@@ -1360,6 +1365,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
+ ops->destroy(sch);
+ qdisc_put_stab(rtnl_dereference(sch->stab));
+ err_out3:
++ lockdep_unregister_key(&sch->root_lock_key);
+ netdev_put(dev, &sch->dev_tracker);
+ qdisc_free(sch);
+ err_out2:
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 9cff99558694dc..30955dd45779e2 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -786,12 +786,15 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ * queue, accept the collision, update the host tags.
+ */
+ q->way_collisions++;
+- if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+- q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+- q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+- }
+ allocate_src = cake_dsrc(flow_mode);
+ allocate_dst = cake_ddst(flow_mode);
++
++ if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
++ if (allocate_src)
++ q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
++ if (allocate_dst)
++ q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
++ }
+ found:
+ /* reserve queue for future packets in same flow */
+ reduced_hash = outer_hash + k;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 5d7e23f4cc0ee4..6ab9359c1706f1 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -522,8 +522,9 @@ static void dev_watchdog(struct timer_list *t)
+
+ if (unlikely(timedout_ms)) {
+ trace_net_dev_xmit_timeout(dev, i);
+- WARN_ONCE(1, "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out %u ms\n",
+- dev->name, netdev_drivername(dev), i, timedout_ms);
++ netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n",
++ raw_smp_processor_id(),
++ i, timedout_ms);
+ netif_freeze_queues(dev);
+ dev->netdev_ops->ndo_tx_timeout(dev, i);
+ netif_unfreeze_queues(dev);
+@@ -942,7 +943,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ __skb_queue_head_init(&sch->gso_skb);
+ __skb_queue_head_init(&sch->skb_bad_txq);
+ gnet_stats_basic_sync_init(&sch->bstats);
++ lockdep_register_key(&sch->root_lock_key);
+ spin_lock_init(&sch->q.lock);
++ lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
+
+ if (ops->static_flags & TCQ_F_CPUSTATS) {
+ sch->cpu_bstats =
+@@ -976,6 +979,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+
+ return sch;
+ errout1:
++ lockdep_unregister_key(&sch->root_lock_key);
+ kfree(sch);
+ errout:
+ return ERR_PTR(err);
+@@ -1062,6 +1066,7 @@ static void __qdisc_destroy(struct Qdisc *qdisc)
+ if (ops->destroy)
+ ops->destroy(qdisc);
+
++ lockdep_unregister_key(&qdisc->root_lock_key);
+ module_put(ops->owner);
+ netdev_put(qdisc_dev(qdisc), &qdisc->dev_tracker);
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 0d947414e61611..19035ef8387fed 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1039,13 +1039,6 @@ static void htb_work_func(struct work_struct *work)
+ rcu_read_unlock();
+ }
+
+-static void htb_set_lockdep_class_child(struct Qdisc *q)
+-{
+- static struct lock_class_key child_key;
+-
+- lockdep_set_class(qdisc_lock(q), &child_key);
+-}
+-
+ static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
+ {
+ return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
+@@ -1132,7 +1125,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
+ return -ENOMEM;
+ }
+
+- htb_set_lockdep_class_child(qdisc);
+ q->direct_qdiscs[ntx] = qdisc;
+ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ }
+@@ -1468,7 +1460,6 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ }
+
+ if (q->offload) {
+- htb_set_lockdep_class_child(new);
+ /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
+ qdisc_refcount_inc(new);
+ old_q = htb_graft_helper(dev_queue, new);
+@@ -1733,11 +1724,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
+ new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ cl->parent->common.classid,
+ NULL);
+- if (q->offload) {
+- if (new_q)
+- htb_set_lockdep_class_child(new_q);
++ if (q->offload)
+ htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+- }
+ }
+
+ sch_tree_lock(sch);
+@@ -1947,13 +1935,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ classid, NULL);
+ if (q->offload) {
+- if (new_q) {
+- htb_set_lockdep_class_child(new_q);
+- /* One ref for cl->leaf.q, the other for
+- * dev_queue->qdisc.
+- */
++ /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
++ if (new_q)
+ qdisc_refcount_inc(new_q);
+- }
+ old_q = htb_graft_helper(dev_queue, new_q);
+ /* No qdisc_put needed. */
+ WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
+diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
+index a463a63192c3c7..8dde3548dc11cb 100644
+--- a/net/sched/sch_ingress.c
++++ b/net/sched/sch_ingress.c
+@@ -91,7 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
+ entry = tcx_entry_fetch_or_create(dev, true, &created);
+ if (!entry)
+ return -ENOMEM;
+- tcx_miniq_set_active(entry, true);
++ tcx_miniq_inc(entry);
+ mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq);
+ if (created)
+ tcx_entry_update(dev, entry, true);
+@@ -121,7 +121,7 @@ static void ingress_destroy(struct Qdisc *sch)
+ tcf_block_put_ext(q->block, sch, &q->block_info);
+
+ if (entry) {
+- tcx_miniq_set_active(entry, false);
++ tcx_miniq_dec(entry);
+ if (!tcx_entry_is_active(entry)) {
+ tcx_entry_update(dev, NULL, true);
+ tcx_entry_free(entry);
+@@ -256,7 +256,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+ entry = tcx_entry_fetch_or_create(dev, true, &created);
+ if (!entry)
+ return -ENOMEM;
+- tcx_miniq_set_active(entry, true);
++ tcx_miniq_inc(entry);
+ mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq);
+ if (created)
+ tcx_entry_update(dev, entry, true);
+@@ -275,7 +275,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+ entry = tcx_entry_fetch_or_create(dev, false, &created);
+ if (!entry)
+ return -ENOMEM;
+- tcx_miniq_set_active(entry, true);
++ tcx_miniq_inc(entry);
+ mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq);
+ if (created)
+ tcx_entry_update(dev, entry, false);
+@@ -301,7 +301,7 @@ static void clsact_destroy(struct Qdisc *sch)
+ tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
+
+ if (ingress_entry) {
+- tcx_miniq_set_active(ingress_entry, false);
++ tcx_miniq_dec(ingress_entry);
+ if (!tcx_entry_is_active(ingress_entry)) {
+ tcx_entry_update(dev, NULL, true);
+ tcx_entry_free(ingress_entry);
+@@ -309,7 +309,7 @@ static void clsact_destroy(struct Qdisc *sch)
+ }
+
+ if (egress_entry) {
+- tcx_miniq_set_active(egress_entry, false);
++ tcx_miniq_dec(egress_entry);
+ if (!tcx_entry_is_active(egress_entry)) {
+ tcx_entry_update(dev, NULL, false);
+ tcx_entry_free(egress_entry);
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index 75c9c860182b40..0d6649d937c9fa 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+
+ qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
+
+- removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
++ removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
+ GFP_KERNEL);
+ if (!removed)
+ return -ENOMEM;
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 4ad39a4a3cf5bd..d36eeb7b050293 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -446,12 +446,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct netem_sched_data *q = qdisc_priv(sch);
+ /* We don't fill cb now as skb_unshare() may invalidate it */
+ struct netem_skb_cb *cb;
+- struct sk_buff *skb2;
++ struct sk_buff *skb2 = NULL;
+ struct sk_buff *segs = NULL;
+ unsigned int prev_len = qdisc_pkt_len(skb);
+ int count = 1;
+- int rc = NET_XMIT_SUCCESS;
+- int rc_drop = NET_XMIT_DROP;
+
+ /* Do not fool qdisc_drop_all() */
+ skb->prev = NULL;
+@@ -480,19 +478,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ skb_orphan_partial(skb);
+
+ /*
+- * If we need to duplicate packet, then re-insert at top of the
+- * qdisc tree, since parent queuer expects that only one
+- * skb will be queued.
++ * If we need to duplicate packet, then clone it before
++ * original is modified.
+ */
+- if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
+- struct Qdisc *rootq = qdisc_root_bh(sch);
+- u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
+-
+- q->duplicate = 0;
+- rootq->enqueue(skb2, rootq, to_free);
+- q->duplicate = dupsave;
+- rc_drop = NET_XMIT_SUCCESS;
+- }
++ if (count > 1)
++ skb2 = skb_clone(skb, GFP_ATOMIC);
+
+ /*
+ * Randomized packet corruption.
+@@ -504,7 +494,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ if (skb_is_gso(skb)) {
+ skb = netem_segment(skb, sch, to_free);
+ if (!skb)
+- return rc_drop;
++ goto finish_segs;
++
+ segs = skb->next;
+ skb_mark_not_on_list(skb);
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+@@ -530,7 +521,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ /* re-link segs, so that qdisc_drop_all() frees them all */
+ skb->next = segs;
+ qdisc_drop_all(skb, sch, to_free);
+- return rc_drop;
++ if (skb2)
++ __qdisc_drop(skb2, to_free);
++ return NET_XMIT_DROP;
++ }
++
++ /*
++ * If doing duplication then re-insert at top of the
++ * qdisc tree, since parent queuer expects that only one
++ * skb will be queued.
++ */
++ if (skb2) {
++ struct Qdisc *rootq = qdisc_root_bh(sch);
++ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
++
++ q->duplicate = 0;
++ rootq->enqueue(skb2, rootq, to_free);
++ q->duplicate = dupsave;
++ skb2 = NULL;
+ }
+
+ qdisc_qstats_backlog_inc(sch, skb);
+@@ -601,9 +609,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ }
+
+ finish_segs:
++ if (skb2)
++ __qdisc_drop(skb2, to_free);
++
+ if (segs) {
+ unsigned int len, last_len;
+- int nb;
++ int rc, nb;
+
+ len = skb ? skb->len : 0;
+ nb = skb ? 1 : 0;
+@@ -731,11 +742,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+
+ err = qdisc_enqueue(skb, q->qdisc, &to_free);
+ kfree_skb_list(to_free);
+- if (err != NET_XMIT_SUCCESS &&
+- net_xmit_drop_count(err)) {
+- qdisc_qstats_drop(sch);
+- qdisc_tree_reduce_backlog(sch, 1,
+- pkt_len);
++ if (err != NET_XMIT_SUCCESS) {
++ if (net_xmit_drop_count(err))
++ qdisc_qstats_drop(sch);
++ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
+ }
+ goto tfifo_dequeue;
+ }
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 1cb5e41c0ec724..87090d67903621 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1008,7 +1008,8 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
+ };
+
+ static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
+- [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 },
++ [TCA_TAPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
++ TC_QOPT_MAX_QUEUE),
+ [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 },
+ [TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
+ TC_FP_EXPRESS,
+@@ -1160,11 +1161,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+ list_for_each_entry(entry, &new->entries, list)
+ cycle = ktime_add_ns(cycle, entry->interval);
+
+- if (!cycle) {
+- NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
+- return -EINVAL;
+- }
+-
+ if (cycle < 0 || cycle > INT_MAX) {
+ NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
+ return -EINVAL;
+@@ -1173,6 +1169,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+ new->cycle_time = cycle;
+ }
+
++ if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) {
++ NL_SET_ERR_MSG(extack, "'cycle_time' is too small");
++ return -EINVAL;
++ }
++
+ taprio_calculate_gate_durations(q, new);
+
+ return 0;
+@@ -1185,16 +1186,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
+ {
+ bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
+
+- if (!qopt && !dev->num_tc) {
+- NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+- return -EINVAL;
+- }
+-
+- /* If num_tc is already set, it means that the user already
+- * configured the mqprio part
+- */
+- if (dev->num_tc)
++ if (!qopt) {
++ if (!dev->num_tc) {
++ NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
++ return -EINVAL;
++ }
+ return 0;
++ }
+
+ /* taprio imposes that traffic classes map 1:n to tx queues */
+ if (qopt->num_tc > dev->num_tx_queues) {
+@@ -1870,6 +1868,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+
+ q->flags = err;
+
++ /* Needed for length_to_duration() during netlink attribute parsing */
++ taprio_set_picos_per_byte(dev, q);
++
+ err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
+ if (err < 0)
+ return err;
+@@ -1929,7 +1930,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+ if (err < 0)
+ goto free_sched;
+
+- taprio_set_picos_per_byte(dev, q);
+ taprio_update_queue_max_sdu(q, new_admin, stab);
+
+ if (FULL_OFFLOAD_IS_ENABLED(q->flags))
+@@ -1975,7 +1975,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+ goto unlock;
+ }
+
+- rcu_assign_pointer(q->admin_sched, new_admin);
++ /* Not going to race against advance_sched(), but still */
++ admin = rcu_replace_pointer(q->admin_sched, new_admin,
++ lockdep_rtnl_is_held());
+ if (admin)
+ call_rcu(&admin->rcu, taprio_free_sched_cb);
+ } else {
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 17fcaa9b0df945..a8a254a5008e52 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -735,15 +735,19 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
+ struct sock *sk = ep->base.sk;
+ struct net *net = sock_net(sk);
+ struct sctp_hashbucket *head;
++ int err = 0;
+
+ ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
+ head = &sctp_ep_hashtable[ep->hashent];
+
++ write_lock(&head->lock);
+ if (sk->sk_reuseport) {
+ bool any = sctp_is_ep_boundall(sk);
+ struct sctp_endpoint *ep2;
+ struct list_head *list;
+- int cnt = 0, err = 1;
++ int cnt = 0;
++
++ err = 1;
+
+ list_for_each(list, &ep->base.bind_addr.address_list)
+ cnt++;
+@@ -761,24 +765,24 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
+ if (!err) {
+ err = reuseport_add_sock(sk, sk2, any);
+ if (err)
+- return err;
++ goto out;
+ break;
+ } else if (err < 0) {
+- return err;
++ goto out;
+ }
+ }
+
+ if (err) {
+ err = reuseport_alloc(sk, any);
+ if (err)
+- return err;
++ goto out;
+ }
+ }
+
+- write_lock(&head->lock);
+ hlist_add_head(&ep->node, &head->chain);
++out:
+ write_unlock(&head->lock);
+- return 0;
++ return err;
+ }
+
+ /* Add an endpoint to the hash. Local BH-safe. */
+@@ -803,10 +807,9 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
+
+ head = &sctp_ep_hashtable[ep->hashent];
+
++ write_lock(&head->lock);
+ if (rcu_access_pointer(sk->sk_reuseport_cb))
+ reuseport_detach_sock(sk);
+-
+- write_lock(&head->lock);
+ hlist_del_init(&ep->node);
+ write_unlock(&head->lock);
+ }
+diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
+index 7182c5a450fb5b..5c165218180588 100644
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -38,6 +38,14 @@ void sctp_inq_init(struct sctp_inq *queue)
+ INIT_WORK(&queue->immediate, NULL);
+ }
+
++/* Properly release the chunk which is being worked on. */
++static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk)
++{
++ if (chunk->head_skb)
++ chunk->skb = chunk->head_skb;
++ sctp_chunk_free(chunk);
++}
++
+ /* Release the memory associated with an SCTP inqueue. */
+ void sctp_inq_free(struct sctp_inq *queue)
+ {
+@@ -53,7 +61,7 @@ void sctp_inq_free(struct sctp_inq *queue)
+ * free it as well.
+ */
+ if (queue->in_progress) {
+- sctp_chunk_free(queue->in_progress);
++ sctp_inq_chunk_free(queue->in_progress);
+ queue->in_progress = NULL;
+ }
+ }
+@@ -130,9 +138,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ goto new_skb;
+ }
+
+- if (chunk->head_skb)
+- chunk->skb = chunk->head_skb;
+- sctp_chunk_free(chunk);
++ sctp_inq_chunk_free(chunk);
+ chunk = queue->in_progress = NULL;
+ } else {
+ /* Nothing to do. Next chunk in the packet, please. */
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 08fdf1251f46af..3649a4e1eb9de7 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -2259,12 +2259,6 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ }
+ }
+
+- /* Update socket peer label if first association. */
+- if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
+- sctp_association_free(new_asoc);
+- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+- }
+-
+ /* Set temp so that it won't be added into hashtable */
+ new_asoc->temp = 1;
+
+@@ -2273,6 +2267,22 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ */
+ action = sctp_tietags_compare(new_asoc, asoc);
+
++ /* In cases C and E the association doesn't enter the ESTABLISHED
++ * state, so there is no need to call security_sctp_assoc_request().
++ */
++ switch (action) {
++ case 'A': /* Association restart. */
++ case 'B': /* Collision case B. */
++ case 'D': /* Collision case D. */
++ /* Update socket peer label if first association. */
++ if (security_sctp_assoc_request((struct sctp_association *)asoc,
++ chunk->head_skb ?: chunk->skb)) {
++ sctp_association_free(new_asoc);
++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++ }
++ break;
++ }
++
+ switch (action) {
+ case 'A': /* Association restart. */
+ retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 7f89e43154c091..108a0745c0c3ca 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2099,6 +2099,13 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ pr_debug("%s: sk:%p, msghdr:%p, len:%zd, flags:0x%x, addr_len:%p)\n",
+ __func__, sk, msg, len, flags, addr_len);
+
++ if (unlikely(flags & MSG_ERRQUEUE))
++ return inet_recv_error(sk, msg, len, addr_len);
++
++ if (sk_can_busy_loop(sk) &&
++ skb_queue_empty_lockless(&sk->sk_receive_queue))
++ sk_busy_loop(sk, flags & MSG_DONTWAIT);
++
+ lock_sock(sk);
+
+ if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
+@@ -7111,6 +7118,7 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ struct sctp_assoc_ids *ids;
++ size_t ids_size;
+ u32 num = 0;
+
+ if (sctp_style(sk, TCP))
+@@ -7123,11 +7131,11 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ num++;
+ }
+
+- if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
++ ids_size = struct_size(ids, gaids_assoc_id, num);
++ if (len < ids_size)
+ return -EINVAL;
+
+- len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
+-
++ len = ids_size;
+ ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
+ if (unlikely(!ids))
+ return -ENOMEM;
+@@ -8517,6 +8525,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
+ struct sctp_endpoint *ep = sp->ep;
+ struct crypto_shash *tfm = NULL;
+ char alg[32];
++ int err;
+
+ /* Allocate HMAC for generating cookie. */
+ if (!sp->hmac && sp->sctp_hmac_alg) {
+@@ -8543,17 +8552,26 @@ static int sctp_listen_start(struct sock *sk, int backlog)
+ */
+ inet_sk_set_state(sk, SCTP_SS_LISTENING);
+ if (!ep->base.bind_addr.port) {
+- if (sctp_autobind(sk))
+- return -EAGAIN;
++ if (sctp_autobind(sk)) {
++ err = -EAGAIN;
++ goto err;
++ }
+ } else {
+ if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
+- inet_sk_set_state(sk, SCTP_SS_CLOSED);
+- return -EADDRINUSE;
++ err = -EADDRINUSE;
++ goto err;
+ }
+ }
+
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
+- return sctp_hash_endpoint(ep);
++ err = sctp_hash_endpoint(ep);
++ if (err)
++ goto err;
++
++ return 0;
++err:
++ inet_sk_set_state(sk, SCTP_SS_CLOSED);
++ return err;
+ }
+
+ /*
+@@ -9043,12 +9061,6 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int *err)
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ break;
+
+- if (sk_can_busy_loop(sk)) {
+- sk_busy_loop(sk, flags & MSG_DONTWAIT);
+-
+- if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+- continue;
+- }
+
+ /* User doesn't want to wait. */
+ error = -EAGAIN;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 35ddebae88941b..3158b94fd347a6 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -275,7 +275,7 @@ static int __smc_release(struct smc_sock *smc)
+
+ if (!smc->use_fallback) {
+ rc = smc_close_active(smc);
+- sock_set_flag(sk, SOCK_DEAD);
++ smc_sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ } else {
+ if (sk->sk_state != SMC_CLOSED) {
+@@ -460,29 +460,11 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+ static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
+ unsigned long mask)
+ {
+- struct net *nnet = sock_net(nsk);
+-
+ nsk->sk_userlocks = osk->sk_userlocks;
+- if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
++ if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ nsk->sk_sndbuf = osk->sk_sndbuf;
+- } else {
+- if (mask == SK_FLAGS_SMC_TO_CLC)
+- WRITE_ONCE(nsk->sk_sndbuf,
+- READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
+- else
+- WRITE_ONCE(nsk->sk_sndbuf,
+- 2 * READ_ONCE(nnet->smc.sysctl_wmem));
+- }
+- if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
++ if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
+ nsk->sk_rcvbuf = osk->sk_rcvbuf;
+- } else {
+- if (mask == SK_FLAGS_SMC_TO_CLC)
+- WRITE_ONCE(nsk->sk_rcvbuf,
+- READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
+- else
+- WRITE_ONCE(nsk->sk_rcvbuf,
+- 2 * READ_ONCE(nnet->smc.sysctl_rmem));
+- }
+ }
+
+ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
+@@ -598,8 +580,12 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
+ struct smc_llc_qentry *qentry;
+ int rc;
+
+- /* receive CONFIRM LINK request from server over RoCE fabric */
+- qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
++ /* Receive CONFIRM LINK request from server over RoCE fabric.
++ * Increasing the client's timeout by twice as much as the server's
++ * timeout by default can temporarily avoid decline messages of
++ * both sides crossing or colliding
++ */
++ qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
+ SMC_LLC_CONFIRM_LINK);
+ if (!qentry) {
+ struct smc_clc_msg_decline dclc;
+@@ -719,7 +705,7 @@ static void smcd_conn_save_peer_info(struct smc_sock *smc,
+ int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
+
+ smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
+- smc->conn.peer_token = clc->d0.token;
++ smc->conn.peer_token = ntohll(clc->d0.token);
+ /* msg header takes up space in the buffer */
+ smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
+ atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
+@@ -1411,7 +1397,7 @@ static int smc_connect_ism(struct smc_sock *smc,
+ if (rc)
+ return rc;
+ }
+- ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
++ ini->ism_peer_gid[ini->ism_selected] = ntohll(aclc->d0.gid);
+
+ /* there is only one lgr role for SMC-D; use server lock */
+ mutex_lock(&smc_server_lgr_pending);
+@@ -1743,7 +1729,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
+ if (new_clcsock)
+ sock_release(new_clcsock);
+ new_sk->sk_state = SMC_CLOSED;
+- sock_set_flag(new_sk, SOCK_DEAD);
++ smc_sock_set_flag(new_sk, SOCK_DEAD);
+ sock_put(new_sk); /* final */
+ *new_smc = NULL;
+ goto out;
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index 24745fde4ac264..e377980b84145d 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -377,4 +377,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
+ int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+ int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+
++static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
++{
++ set_bit(flag, &sk->sk_flags);
++}
++
+ #endif /* __SMC_H */
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 89105e95b4523f..3c06625ceb200e 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
+ {
+ struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
+ struct smc_connection *conn = cdcpend->conn;
++ struct smc_buf_desc *sndbuf_desc;
+ struct smc_sock *smc;
+ int diff;
+
++ sndbuf_desc = conn->sndbuf_desc;
+ smc = container_of(conn, struct smc_sock, conn);
+ bh_lock_sock(&smc->sk);
+- if (!wc_status) {
+- diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
++ if (!wc_status && sndbuf_desc) {
++ diff = smc_curs_diff(sndbuf_desc->len,
+ &cdcpend->conn->tx_curs_fin,
+ &cdcpend->cursor);
+ /* sndbuf_space is decreased in smc_sendmsg */
+@@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+ union smc_host_cursor cfed;
+ int rc;
+
+- if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
+- return -ENOBUFS;
+-
+ smc_cdc_add_pending_send(conn, pend);
+
+ conn->tx_cdc_seq++;
+@@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
+ smc->sk.sk_shutdown |= RCV_SHUTDOWN;
+ if (smc->clcsock && smc->clcsock->sk)
+ smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
+- sock_set_flag(&smc->sk, SOCK_DONE);
++ smc_sock_set_flag(&smc->sk, SOCK_DONE);
+ sock_hold(&smc->sk); /* sock_put in close_work */
+ if (!queue_work(smc_close_wq, &conn->close_work))
+ sock_put(&smc->sk);
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index 8deb46c28f1d55..1489a8421d7862 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -155,10 +155,12 @@ static int smc_clc_ueid_remove(char *ueid)
+ rc = 0;
+ }
+ }
++#if IS_ENABLED(CONFIG_S390)
+ if (!rc && !smc_clc_eid_table.ueid_cnt) {
+ smc_clc_eid_table.seid_enabled = 1;
+ rc = -EAGAIN; /* indicate success and enabling of seid */
+ }
++#endif
+ write_unlock(&smc_clc_eid_table.lock);
+ return rc;
+ }
+@@ -273,22 +275,30 @@ int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
+
+ int smc_nl_enable_seid(struct sk_buff *skb, struct genl_info *info)
+ {
++#if IS_ENABLED(CONFIG_S390)
+ write_lock(&smc_clc_eid_table.lock);
+ smc_clc_eid_table.seid_enabled = 1;
+ write_unlock(&smc_clc_eid_table.lock);
+ return 0;
++#else
++ return -EOPNOTSUPP;
++#endif
+ }
+
+ int smc_nl_disable_seid(struct sk_buff *skb, struct genl_info *info)
+ {
+ int rc = 0;
+
++#if IS_ENABLED(CONFIG_S390)
+ write_lock(&smc_clc_eid_table.lock);
+ if (!smc_clc_eid_table.ueid_cnt)
+ rc = -ENOENT;
+ else
+ smc_clc_eid_table.seid_enabled = 0;
+ write_unlock(&smc_clc_eid_table.lock);
++#else
++ rc = -EOPNOTSUPP;
++#endif
+ return rc;
+ }
+
+@@ -1004,6 +1014,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
+ {
+ struct smc_connection *conn = &smc->conn;
+ struct smc_clc_first_contact_ext_v2x fce;
++ struct smcd_dev *smcd = conn->lgr->smcd;
+ struct smc_clc_msg_accept_confirm *clc;
+ struct smc_clc_fce_gid_ext gle;
+ struct smc_clc_msg_trail trl;
+@@ -1021,17 +1032,15 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
+ memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER,
+ sizeof(SMCD_EYECATCHER));
+ clc->hdr.typev1 = SMC_TYPE_D;
+- clc->d0.gid =
+- conn->lgr->smcd->ops->get_local_gid(conn->lgr->smcd);
+- clc->d0.token = conn->rmb_desc->token;
++ clc->d0.gid = htonll(smcd->ops->get_local_gid(smcd));
++ clc->d0.token = htonll(conn->rmb_desc->token);
+ clc->d0.dmbe_size = conn->rmbe_size_comp;
+ clc->d0.dmbe_idx = 0;
+ memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
+ if (version == SMC_V1) {
+ clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
+ } else {
+- clc_v2->d1.chid =
+- htons(smc_ism_get_chid(conn->lgr->smcd));
++ clc_v2->d1.chid = htons(smc_ism_get_chid(smcd));
+ if (eid && eid[0])
+ memcpy(clc_v2->d1.eid, eid, SMC_MAX_EID_LEN);
+ len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
+@@ -1270,7 +1279,11 @@ void __init smc_clc_init(void)
+ INIT_LIST_HEAD(&smc_clc_eid_table.list);
+ rwlock_init(&smc_clc_eid_table.lock);
+ smc_clc_eid_table.ueid_cnt = 0;
++#if IS_ENABLED(CONFIG_S390)
+ smc_clc_eid_table.seid_enabled = 1;
++#else
++ smc_clc_eid_table.seid_enabled = 0;
++#endif
+ }
+
+ void smc_clc_exit(void)
+diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
+index c5c8e7db775a76..08155a96a02a17 100644
+--- a/net/smc/smc_clc.h
++++ b/net/smc/smc_clc.h
+@@ -204,8 +204,8 @@ struct smcr_clc_msg_accept_confirm { /* SMCR accept/confirm */
+ } __packed;
+
+ struct smcd_clc_msg_accept_confirm_common { /* SMCD accept/confirm */
+- u64 gid; /* Sender GID */
+- u64 token; /* DMB token */
++ __be64 gid; /* Sender GID */
++ __be64 token; /* DMB token */
+ u8 dmbe_idx; /* DMBE index */
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ u8 dmbe_size : 4, /* buf size (compressed) */
+diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
+index dbdf03e8aa5b55..10219f55aad14d 100644
+--- a/net/smc/smc_close.c
++++ b/net/smc/smc_close.c
+@@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc)
+ struct sock *sk = &smc->sk;
+
+ release_sock(sk);
+- cancel_work_sync(&smc->conn.close_work);
++ if (cancel_work_sync(&smc->conn.close_work))
++ sock_put(sk);
+ cancel_delayed_work_sync(&smc->conn.tx_work);
+ lock_sock(sk);
+ }
+@@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc)
+ break;
+ }
+
+- sock_set_flag(sk, SOCK_DEAD);
++ smc_sock_set_flag(sk, SOCK_DEAD);
+ sk->sk_state_change(sk);
+
+ if (release_clcsock) {
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index d520ee62c8ecd6..f99bb9d0adcc6d 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1974,7 +1974,6 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
+ */
+ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
+ {
+- const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE;
+ u8 compressed;
+
+ if (size <= SMC_BUF_MIN_SIZE)
+@@ -1984,9 +1983,11 @@ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
+ compressed = min_t(u8, ilog2(size) + 1,
+ is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
+
++#ifdef CONFIG_ARCH_NO_SG_CHAIN
+ if (!is_smcd && is_rmb)
+ /* RMBs are backed by & limited to max size of scatterlists */
+- compressed = min_t(u8, compressed, ilog2(max_scat >> 14));
++ compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14));
++#endif
+
+ return compressed;
+ }
+diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
+index 7ff2152971a5b8..37833b96b508ef 100644
+--- a/net/smc/smc_diag.c
++++ b/net/smc/smc_diag.c
+@@ -153,8 +153,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
+ .lnk[0].link_id = link->link_id,
+ };
+
+- memcpy(linfo.lnk[0].ibname,
+- smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
++ memcpy(linfo.lnk[0].ibname, link->smcibdev->ibdev->name,
+ sizeof(link->smcibdev->ibdev->name));
+ smc_gid_be16_convert(linfo.lnk[0].gid, link->gid);
+ smc_gid_be16_convert(linfo.lnk[0].peer_gid, link->peer_gid);
+@@ -164,7 +163,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
+ }
+ if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd &&
+ (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
+- !list_empty(&smc->conn.lgr->list)) {
++ !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) {
+ struct smc_connection *conn = &smc->conn;
+ struct smcd_diag_dmbinfo dinfo;
+ struct smcd_dev *smcd = conn->lgr->smcd;
+diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
+index 89981dbe46c946..598ac9ead64b72 100644
+--- a/net/smc/smc_ib.c
++++ b/net/smc/smc_ib.c
+@@ -209,13 +209,18 @@ int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
+ if (IS_ERR(rt))
+ goto out;
+ if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
+- goto out;
+- neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
+- if (neigh) {
+- memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
+- *uses_gateway = rt->rt_uses_gateway;
+- return 0;
+- }
++ goto out_rt;
++ neigh = dst_neigh_lookup(&rt->dst, &fl4.daddr);
++ if (!neigh)
++ goto out_rt;
++ memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
++ *uses_gateway = rt->rt_uses_gateway;
++ neigh_release(neigh);
++ ip_rt_put(rt);
++ return 0;
++
++out_rt:
++ ip_rt_put(rt);
+ out:
+ return -ENOENT;
+ }
+diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
+index 11775401df6894..306b536fa89e98 100644
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -806,6 +806,16 @@ static void smc_pnet_create_pnetids_list(struct net *net)
+ u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+ struct net_device *dev;
+
++ /* Newly created netns do not have devices.
++ * Do not even acquire rtnl.
++ */
++ if (list_empty(&net->dev_base_head))
++ return;
++
++ /* Note: This might not be needed, because smc_pnet_netdev_event()
++ * is also calling smc_pnet_add_base_pnetid() when handling
++ * NETDEV_UP event.
++ */
+ rtnl_lock();
+ for_each_netdev(net, dev)
+ smc_pnet_add_base_pnetid(net, dev, ndev_pnetid);
+diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
+index 9d32058db2b5d6..e19177ce409230 100644
+--- a/net/smc/smc_stats.h
++++ b/net/smc/smc_stats.h
+@@ -19,7 +19,7 @@
+
+ #include "smc_clc.h"
+
+-#define SMC_MAX_FBACK_RSN_CNT 30
++#define SMC_MAX_FBACK_RSN_CNT 36
+
+ enum {
+ SMC_BUF_8K,
+diff --git a/net/socket.c b/net/socket.c
+index c4a6f55329552d..bad58f23f30722 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -757,6 +757,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
+ {
+ struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
+ struct sockaddr_storage address;
++ int save_len = msg->msg_namelen;
+ int ret;
+
+ if (msg->msg_name) {
+@@ -766,6 +767,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
+
+ ret = __sock_sendmsg(sock, msg);
+ msg->msg_name = save_addr;
++ msg->msg_namelen = save_len;
+
+ return ret;
+ }
+@@ -1567,8 +1569,13 @@ int __sock_create(struct net *net, int family, int type, int protocol,
+ rcu_read_unlock();
+
+ err = pf->create(net, sock, protocol, kern);
+- if (err < 0)
++ if (err < 0) {
++ /* ->create should release the allocated sock->sk object on error
++ * but it may leave the dangling pointer
++ */
++ sock->sk = NULL;
+ goto out_module_put;
++ }
+
+ /*
+ * Now to bump the refcnt of the [loadable] module that owns this
+@@ -2279,33 +2286,23 @@ static bool sock_use_custom_sol_socket(const struct socket *sock)
+ return test_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags);
+ }
+
+-/*
+- * Set a socket option. Because we don't know the option lengths we have
+- * to pass the user mode parameter for the protocols to sort out.
+- */
+-int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
+- int optlen)
++int do_sock_setsockopt(struct socket *sock, bool compat, int level,
++ int optname, sockptr_t optval, int optlen)
+ {
+- sockptr_t optval = USER_SOCKPTR(user_optval);
+ const struct proto_ops *ops;
+ char *kernel_optval = NULL;
+- int err, fput_needed;
+- struct socket *sock;
++ int err;
+
+ if (optlen < 0)
+ return -EINVAL;
+
+- sock = sockfd_lookup_light(fd, &err, &fput_needed);
+- if (!sock)
+- return err;
+-
+ err = security_socket_setsockopt(sock, level, optname);
+ if (err)
+ goto out_put;
+
+- if (!in_compat_syscall())
++ if (!compat)
+ err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname,
+- user_optval, &optlen,
++ optval, &optlen,
+ &kernel_optval);
+ if (err < 0)
+ goto out_put;
+@@ -2326,6 +2323,27 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
+ optlen);
+ kfree(kernel_optval);
+ out_put:
++ return err;
++}
++EXPORT_SYMBOL(do_sock_setsockopt);
++
++/* Set a socket option. Because we don't know the option lengths we have
++ * to pass the user mode parameter for the protocols to sort out.
++ */
++int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
++ int optlen)
++{
++ sockptr_t optval = USER_SOCKPTR(user_optval);
++ bool compat = in_compat_syscall();
++ int err, fput_needed;
++ struct socket *sock;
++
++ sock = sockfd_lookup_light(fd, &err, &fput_needed);
++ if (!sock)
++ return err;
++
++ err = do_sock_setsockopt(sock, compat, level, optname, optval, optlen);
++
+ fput_light(sock->file, fput_needed);
+ return err;
+ }
+@@ -2339,6 +2357,43 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+ INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
+ int optname));
+
++int do_sock_getsockopt(struct socket *sock, bool compat, int level,
++ int optname, sockptr_t optval, sockptr_t optlen)
++{
++ int max_optlen __maybe_unused = 0;
++ const struct proto_ops *ops;
++ int err;
++
++ err = security_socket_getsockopt(sock, level, optname);
++ if (err)
++ return err;
++
++ if (!compat)
++ copy_from_sockptr(&max_optlen, optlen, sizeof(int));
++
++ ops = READ_ONCE(sock->ops);
++ if (level == SOL_SOCKET) {
++ err = sk_getsockopt(sock->sk, level, optname, optval, optlen);
++ } else if (unlikely(!ops->getsockopt)) {
++ err = -EOPNOTSUPP;
++ } else {
++ if (WARN_ONCE(optval.is_kernel || optlen.is_kernel,
++ "Invalid argument type"))
++ return -EOPNOTSUPP;
++
++ err = ops->getsockopt(sock, level, optname, optval.user,
++ optlen.user);
++ }
++
++ if (!compat)
++ err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname,
++ optval, optlen, max_optlen,
++ err);
++
++ return err;
++}
++EXPORT_SYMBOL(do_sock_getsockopt);
++
+ /*
+ * Get a socket option. Because we don't know the option lengths we have
+ * to pass a user mode parameter for the protocols to sort out.
+@@ -2346,36 +2401,18 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
+ int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
+ int __user *optlen)
+ {
+- int max_optlen __maybe_unused;
+- const struct proto_ops *ops;
+ int err, fput_needed;
+ struct socket *sock;
++ bool compat;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ return err;
+
+- err = security_socket_getsockopt(sock, level, optname);
+- if (err)
+- goto out_put;
+-
+- if (!in_compat_syscall())
+- max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen);
+-
+- ops = READ_ONCE(sock->ops);
+- if (level == SOL_SOCKET)
+- err = sock_getsockopt(sock, level, optname, optval, optlen);
+- else if (unlikely(!ops->getsockopt))
+- err = -EOPNOTSUPP;
+- else
+- err = ops->getsockopt(sock, level, optname, optval,
+- optlen);
++ compat = in_compat_syscall();
++ err = do_sock_getsockopt(sock, compat, level, optname,
++ USER_SOCKPTR(optval), USER_SOCKPTR(optlen));
+
+- if (!in_compat_syscall())
+- err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname,
+- optval, optlen, max_optlen,
+- err);
+-out_put:
+ fput_light(sock->file, fput_needed);
+ return err;
+ }
+diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
+index d435bffc619997..97ff11973c4937 100644
+--- a/net/sunrpc/addr.c
++++ b/net/sunrpc/addr.c
+@@ -284,10 +284,10 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
+ }
+
+ if (snprintf(portbuf, sizeof(portbuf),
+- ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
++ ".%u.%u", port >> 8, port & 0xff) >= (int)sizeof(portbuf))
+ return NULL;
+
+- if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
++ if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) >= sizeof(addrbuf))
+ return NULL;
+
+ return kstrdup(addrbuf, gfp_flags);
+diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
+index 814b0169f97230..ec41b26af76e22 100644
+--- a/net/sunrpc/auth.c
++++ b/net/sunrpc/auth.c
+@@ -40,9 +40,6 @@ static unsigned long number_cred_unused;
+
+ static struct cred machine_cred = {
+ .usage = ATOMIC_INIT(1),
+-#ifdef CONFIG_DEBUG_CREDENTIALS
+- .magic = CRED_MAGIC,
+-#endif
+ };
+
+ /*
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 1af71fbb0d8059..00753bc5f1b147 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -1875,8 +1875,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
+ maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
+ /* slack space should prevent this ever happening: */
+- if (unlikely(snd_buf->len > snd_buf->buflen))
++ if (unlikely(snd_buf->len > snd_buf->buflen)) {
++ status = -EIO;
+ goto wrap_failed;
++ }
+ /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
+ * done anyway, so it's safe to put the request on the wire: */
+ if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
+index 06d8ee0db000fb..4eb19c3a54c70e 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
++++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
+@@ -168,7 +168,7 @@ static int krb5_DK(const struct gss_krb5_enctype *gk5e,
+ goto err_return;
+ blocksize = crypto_sync_skcipher_blocksize(cipher);
+ if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
+- goto err_return;
++ goto err_free_cipher;
+
+ ret = -ENOMEM;
+ inblockdata = kmalloc(blocksize, gfp_mask);
+diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
+index e31cfdf7eadcb9..f6fc80e1d658be 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
+@@ -398,6 +398,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
+ u64 seq_send64;
+ int keylen;
+ u32 time32;
++ int ret;
+
+ p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
+ if (IS_ERR(p))
+@@ -450,8 +451,16 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
+ }
+ ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
+
+- return gss_krb5_import_ctx_v2(ctx, gfp_mask);
++ ret = gss_krb5_import_ctx_v2(ctx, gfp_mask);
++ if (ret) {
++ p = ERR_PTR(ret);
++ goto out_free;
++ }
+
++ return 0;
++
++out_free:
++ kfree(ctx->mech_used.data);
+ out_err:
+ return PTR_ERR(p);
+ }
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index d79f12c2550ac3..cb32ab9a839521 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -250,8 +250,8 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
+
+ creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
+ if (!creds) {
+- kfree(oa->data);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto free_oa;
+ }
+
+ oa->data[0].option.data = CREDS_VALUE;
+@@ -265,29 +265,40 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
+
+ /* option buffer */
+ p = xdr_inline_decode(xdr, 4);
+- if (unlikely(p == NULL))
+- return -ENOSPC;
++ if (unlikely(p == NULL)) {
++ err = -ENOSPC;
++ goto free_creds;
++ }
+
+ length = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, length);
+- if (unlikely(p == NULL))
+- return -ENOSPC;
++ if (unlikely(p == NULL)) {
++ err = -ENOSPC;
++ goto free_creds;
++ }
+
+ if (length == sizeof(CREDS_VALUE) &&
+ memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) {
+ /* We have creds here. parse them */
+ err = gssx_dec_linux_creds(xdr, creds);
+ if (err)
+- return err;
++ goto free_creds;
+ oa->data[0].value.len = 1; /* presence */
+ } else {
+ /* consume uninteresting buffer */
+ err = gssx_dec_buffer(xdr, &dummy);
+ if (err)
+- return err;
++ goto free_creds;
+ }
+ }
+ return 0;
++
++free_creds:
++ kfree(creds);
++free_oa:
++ kfree(oa->data);
++ oa->data = NULL;
++ return err;
+ }
+
+ static int gssx_dec_status(struct xdr_stream *xdr,
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 18734e70c5ddb1..cf30bd649e2704 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1043,17 +1043,11 @@ svcauth_gss_proc_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
+
+ static void gss_free_in_token_pages(struct gssp_in_token *in_token)
+ {
+- u32 inlen;
+ int i;
+
+ i = 0;
+- inlen = in_token->page_len;
+- while (inlen) {
+- if (in_token->pages[i])
+- put_page(in_token->pages[i]);
+- inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
+- }
+-
++ while (in_token->pages[i])
++ put_page(in_token->pages[i++]);
+ kfree(in_token->pages);
+ in_token->pages = NULL;
+ }
+@@ -1085,7 +1079,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ goto out_denied_free;
+
+ pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
+- in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
++ in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (!in_token->pages)
+ goto out_denied_free;
+ in_token->page_base = 0;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 9c210273d06b7f..142ee6554848a6 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -111,7 +111,8 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+- __rpc_clnt_remove_pipedir(clnt);
++ if (pipefs_sb == clnt->pipefs_sb)
++ __rpc_clnt_remove_pipedir(clnt);
+ rpc_put_sb_net(net);
+ }
+ }
+@@ -151,6 +152,8 @@ rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
+ {
+ struct dentry *dentry;
+
++ clnt->pipefs_sb = pipefs_sb;
++
+ if (clnt->cl_program->pipe_dir_name != NULL) {
+ dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
+ if (IS_ERR(dentry))
+@@ -396,7 +399,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
+ clnt->cl_maxproc = version->nrprocs;
+ clnt->cl_prog = args->prognumber ? : program->number;
+ clnt->cl_vers = version->number;
+- clnt->cl_stats = program->stats;
++ clnt->cl_stats = args->stats ? : program->stats;
+ clnt->cl_metrics = rpc_alloc_iostats(clnt);
+ rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
+ err = -ENOMEM;
+@@ -682,6 +685,7 @@ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
+ .version = clnt->cl_vers,
+ .authflavor = clnt->cl_auth->au_flavor,
+ .cred = clnt->cl_cred,
++ .stats = clnt->cl_stats,
+ };
+ return __rpc_clone_client(&args, clnt);
+ }
+@@ -704,6 +708,7 @@ rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
+ .version = clnt->cl_vers,
+ .authflavor = flavor,
+ .cred = clnt->cl_cred,
++ .stats = clnt->cl_stats,
+ };
+ return __rpc_clone_client(&args, clnt);
+ }
+@@ -1050,6 +1055,8 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
+ .version = vers,
+ .authflavor = old->cl_auth->au_flavor,
+ .cred = old->cl_cred,
++ .stats = old->cl_stats,
++ .timeout = old->cl_timeout,
+ };
+ struct rpc_clnt *clnt;
+ int err;
+@@ -2171,6 +2178,7 @@ call_connect_status(struct rpc_task *task)
+ task->tk_status = 0;
+ switch (status) {
+ case -ECONNREFUSED:
++ case -ECONNRESET:
+ /* A positive refusal suggests a rebind is needed. */
+ if (RPC_IS_SOFTCONN(task))
+ break;
+@@ -2179,7 +2187,6 @@ call_connect_status(struct rpc_task *task)
+ goto out_retry;
+ }
+ fallthrough;
+- case -ECONNRESET:
+ case -ECONNABORTED:
+ case -ENETDOWN:
+ case -ENETUNREACH:
+@@ -2303,12 +2310,13 @@ call_transmit_status(struct rpc_task *task)
+ task->tk_action = call_transmit;
+ task->tk_status = 0;
+ break;
+- case -ECONNREFUSED:
+ case -EHOSTDOWN:
+ case -ENETDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -EPERM:
++ break;
++ case -ECONNREFUSED:
+ if (RPC_IS_SOFTCONN(task)) {
+ if (!task->tk_msg.rpc_proc->p_proc)
+ trace_xprt_ping(task->tk_xprt,
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 5988a5c5ff3f0c..102c3818bc54d4 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -769,6 +769,10 @@ void rpcb_getport_async(struct rpc_task *task)
+
+ child = rpcb_call_async(rpcb_clnt, map, proc);
+ rpc_release_client(rpcb_clnt);
++ if (IS_ERR(child)) {
++ /* rpcb_map_release() has freed the arguments */
++ return;
++ }
+
+ xprt->stat.bind_count++;
+ rpc_put_task(child);
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 6debf4fd42d4e8..cef623ea150609 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -369,8 +369,10 @@ static void rpc_make_runnable(struct workqueue_struct *wq,
+ if (RPC_IS_ASYNC(task)) {
+ INIT_WORK(&task->u.tk_work, rpc_async_schedule);
+ queue_work(wq, &task->u.tk_work);
+- } else
++ } else {
++ smp_mb__after_atomic();
+ wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
++ }
+ }
+
+ /*
+diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
+index 65fc1297c6dfa4..383860cb1d5b0f 100644
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -314,7 +314,7 @@ EXPORT_SYMBOL_GPL(rpc_proc_unregister);
+ struct proc_dir_entry *
+ svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
+ {
+- return do_register(net, statp->program->pg_name, statp, proc_ops);
++ return do_register(net, statp->program->pg_name, net, proc_ops);
+ }
+ EXPORT_SYMBOL_GPL(svc_proc_register);
+
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 812fda9d45dd63..029c49065016ac 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -453,8 +453,8 @@ __svc_init_bc(struct svc_serv *serv)
+ * Create an RPC service
+ */
+ static struct svc_serv *
+-__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+- int (*threadfn)(void *data))
++__svc_create(struct svc_program *prog, struct svc_stat *stats,
++ unsigned int bufsize, int npools, int (*threadfn)(void *data))
+ {
+ struct svc_serv *serv;
+ unsigned int vers;
+@@ -466,7 +466,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+ serv->sv_name = prog->pg_name;
+ serv->sv_program = prog;
+ kref_init(&serv->sv_refcnt);
+- serv->sv_stats = prog->pg_stats;
++ serv->sv_stats = stats;
+ if (bufsize > RPCSVC_MAXPAYLOAD)
+ bufsize = RPCSVC_MAXPAYLOAD;
+ serv->sv_max_payload = bufsize? bufsize : 4096;
+@@ -532,26 +532,28 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+ struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
+ int (*threadfn)(void *data))
+ {
+- return __svc_create(prog, bufsize, 1, threadfn);
++ return __svc_create(prog, NULL, bufsize, 1, threadfn);
+ }
+ EXPORT_SYMBOL_GPL(svc_create);
+
+ /**
+ * svc_create_pooled - Create an RPC service with pooled threads
+ * @prog: the RPC program the new service will handle
++ * @stats: the stats struct if desired
+ * @bufsize: maximum message size for @prog
+ * @threadfn: a function to service RPC requests for @prog
+ *
+ * Returns an instantiated struct svc_serv object or NULL.
+ */
+ struct svc_serv *svc_create_pooled(struct svc_program *prog,
++ struct svc_stat *stats,
+ unsigned int bufsize,
+ int (*threadfn)(void *data))
+ {
+ struct svc_serv *serv;
+ unsigned int npools = svc_pool_map_get();
+
+- serv = __svc_create(prog, bufsize, npools, threadfn);
++ serv = __svc_create(prog, stats, bufsize, npools, threadfn);
+ if (!serv)
+ goto out_err;
+ return serv;
+@@ -1265,8 +1267,6 @@ svc_generic_init_request(struct svc_rqst *rqstp,
+ if (rqstp->rq_proc >= versp->vs_nproc)
+ goto err_bad_proc;
+ rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
+- if (!procp)
+- goto err_bad_proc;
+
+ /* Initialize storage for argp and resp */
+ memset(rqstp->rq_argp, 0, procp->pc_argzero);
+@@ -1379,7 +1379,8 @@ svc_process_common(struct svc_rqst *rqstp)
+ goto err_bad_proc;
+
+ /* Syntactic check complete */
+- serv->sv_stats->rpccnt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpccnt++;
+ trace_svc_process(rqstp, progp->pg_name);
+
+ aoffset = xdr_stream_pos(xdr);
+@@ -1431,7 +1432,8 @@ svc_process_common(struct svc_rqst *rqstp)
+ goto close_xprt;
+
+ err_bad_rpc:
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
+ xdr_stream_encode_u32(xdr, RPC_MISMATCH);
+ /* Only RPCv2 supported */
+@@ -1442,7 +1444,8 @@ svc_process_common(struct svc_rqst *rqstp)
+ err_bad_auth:
+ dprintk("svc: authentication failed (%d)\n",
+ be32_to_cpu(rqstp->rq_auth_stat));
+- serv->sv_stats->rpcbadauth++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadauth++;
+ /* Restore write pointer to location of reply status: */
+ xdr_truncate_encode(xdr, XDR_UNIT * 2);
+ xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
+@@ -1452,7 +1455,8 @@ svc_process_common(struct svc_rqst *rqstp)
+
+ err_bad_prog:
+ dprintk("svc: unknown program %d\n", rqstp->rq_prog);
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ *rqstp->rq_accept_statp = rpc_prog_unavail;
+ goto sendit;
+
+@@ -1460,7 +1464,8 @@ svc_process_common(struct svc_rqst *rqstp)
+ svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
+ rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
+
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ *rqstp->rq_accept_statp = rpc_prog_mismatch;
+
+ /*
+@@ -1474,19 +1479,22 @@ svc_process_common(struct svc_rqst *rqstp)
+ err_bad_proc:
+ svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
+
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ *rqstp->rq_accept_statp = rpc_proc_unavail;
+ goto sendit;
+
+ err_garbage_args:
+ svc_printk(rqstp, "failed to decode RPC header\n");
+
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ *rqstp->rq_accept_statp = rpc_garbage_args;
+ goto sendit;
+
+ err_system_err:
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ *rqstp->rq_accept_statp = rpc_system_err;
+ goto sendit;
+ }
+@@ -1538,7 +1546,8 @@ void svc_process(struct svc_rqst *rqstp)
+ out_baddir:
+ svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
+ be32_to_cpu(*p));
+- rqstp->rq_server->sv_stats->rpcbadfmt++;
++ if (rqstp->rq_server->sv_stats)
++ rqstp->rq_server->sv_stats->rpcbadfmt++;
+ out_drop:
+ svc_drop(rqstp);
+ }
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 4cfe9640df4814..5cfe5c7408b744 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -666,9 +666,8 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
+ }
+
+ for (filled = 0; filled < pages; filled = ret) {
+- ret = alloc_pages_bulk_array_node(GFP_KERNEL,
+- rqstp->rq_pool->sp_id,
+- pages, rqstp->rq_pages);
++ ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
++ rqstp->rq_pages);
+ if (ret > filled)
+ /* Made progress, don't sleep yet */
+ continue;
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 998687421fa6af..933e12e3a55c75 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -717,12 +717,12 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
+ ARRAY_SIZE(rqstp->rq_bvec), xdr);
+
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+- count, 0);
++ count, rqstp->rq_res.len);
+ err = sock_sendmsg(svsk->sk_sock, &msg);
+ if (err == -ECONNREFUSED) {
+ /* ICMP error on earlier request. */
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+- count, 0);
++ count, rqstp->rq_res.len);
+ err = sock_sendmsg(svsk->sk_sock, &msg);
+ }
+
+@@ -1216,15 +1216,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+ * MSG_SPLICE_PAGES is used exclusively to reduce the number of
+ * copy operations in this path. Therefore the caller must ensure
+ * that the pages backing @xdr are unchanging.
+- *
+- * Note that the send is non-blocking. The caller has incremented
+- * the reference count on each page backing the RPC message, and
+- * the network layer will "put" these pages when transmission is
+- * complete.
+- *
+- * This is safe for our RPC services because the memory backing
+- * the head and tail components is never kmalloc'd. These always
+- * come from pages in the svc_rqst::rq_pages array.
+ */
+ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
+ rpc_fraghdr marker, unsigned int *sentp)
+@@ -1254,6 +1245,7 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+ 1 + count, sizeof(marker) + rqstp->rq_res.len);
+ ret = sock_sendmsg(svsk->sk_sock, &msg);
++ page_frag_free(buf);
+ if (ret < 0)
+ return ret;
+ *sentp += ret;
+diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
+index 701250b305dba9..720d3ba742ec02 100644
+--- a/net/sunrpc/xprtmultipath.c
++++ b/net/sunrpc/xprtmultipath.c
+@@ -284,7 +284,7 @@ struct rpc_xprt *_xprt_switch_find_current_entry(struct list_head *head,
+ if (cur == pos)
+ found = true;
+ if (found && ((find_active && xprt_is_active(pos)) ||
+- (!find_active && xprt_is_active(pos))))
++ (!find_active && !xprt_is_active(pos))))
+ return pos;
+ }
+ return NULL;
+@@ -336,8 +336,9 @@ struct rpc_xprt *xprt_iter_current_entry_offline(struct rpc_xprt_iter *xpi)
+ xprt_switch_find_current_entry_offline);
+ }
+
+-bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+- const struct sockaddr *sap)
++static
++bool __rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
++ const struct sockaddr *sap)
+ {
+ struct list_head *head;
+ struct rpc_xprt *pos;
+@@ -356,6 +357,18 @@ bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+ return false;
+ }
+
++bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
++ const struct sockaddr *sap)
++{
++ bool res;
++
++ rcu_read_lock();
++ res = __rpc_xprt_switch_has_addr(xps, sap);
++ rcu_read_unlock();
++
++ return res;
++}
++
+ static
+ struct rpc_xprt *xprt_switch_find_next_entry(struct list_head *head,
+ const struct rpc_xprt *cur, bool check_active)
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index ffbf99894970e0..47f33bb7bff814 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -92,7 +92,8 @@ static void frwr_mr_put(struct rpcrdma_mr *mr)
+ rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
+ }
+
+-/* frwr_reset - Place MRs back on the free list
++/**
++ * frwr_reset - Place MRs back on @req's free list
+ * @req: request to reset
+ *
+ * Used after a failed marshal. For FRWR, this means the MRs
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 85c8bcaebb80f1..3b05f90a3e50dd 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -852,7 +852,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ if (ret == -EINVAL)
+ svc_rdma_send_error(rdma_xprt, ctxt, ret);
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
+- return ret;
++ svc_xprt_deferred_close(xprt);
++ return -ENOTCONN;
+
+ out_backchannel:
+ svc_rdma_handle_bc_reply(rqstp, ctxt);
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 28c0771c4e8c34..cb909329a5039d 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -244,7 +244,11 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ pr_info("rpcrdma: removing device %s for %pISpc\n",
+ ep->re_id->device->name, sap);
+- fallthrough;
++ switch (xchg(&ep->re_connect_status, -ENODEV)) {
++ case 0: goto wake_connect_worker;
++ case 1: goto disconnected;
++ }
++ return 0;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ ep->re_connect_status = -ENODEV;
+ goto disconnected;
+@@ -893,6 +897,8 @@ static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
+
+ static void rpcrdma_req_reset(struct rpcrdma_req *req)
+ {
++ struct rpcrdma_mr *mr;
++
+ /* Credits are valid for only one connection */
+ req->rl_slot.rq_cong = 0;
+
+@@ -902,7 +908,19 @@ static void rpcrdma_req_reset(struct rpcrdma_req *req)
+ rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
+ rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
+
+- frwr_reset(req);
++ /* The verbs consumer can't know the state of an MR on the
++ * req->rl_registered list unless a successful completion
++ * has occurred, so they cannot be re-used.
++ */
++ while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
++ struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
++
++ spin_lock(&buf->rb_lock);
++ list_del(&mr->mr_all);
++ spin_unlock(&buf->rb_lock);
++
++ frwr_mr_release(mr);
++ }
+ }
+
+ /* ASSUMPTION: the rb_allreqs list is stable for the duration,
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index a15bf2ede89bf5..c1fe2a6ea7976c 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2422,6 +2422,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ transport->srcport = 0;
+ status = -EAGAIN;
+ break;
++ case -EPERM:
++ /* Happens, for instance, if a BPF program is preventing
++ * the connect. Remap the error so upper layers can better
++ * deal with it.
++ */
++ status = -ECONNREFUSED;
++ fallthrough;
+ case -EINVAL:
+ /* Happens, for instance, if the user specified a link
+ * local IPv6 address without a scope-id.
+@@ -2644,6 +2651,7 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
+ .xprtsec = {
+ .policy = RPC_XPRTSEC_NONE,
+ },
++ .stats = upper_clnt->cl_stats,
+ };
+ unsigned int pflags = current->flags;
+ struct rpc_clnt *lower_clnt;
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index 5b045284849e03..c9189a970eec31 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -19,6 +19,35 @@
+ #include <linux/rtnetlink.h>
+ #include <net/switchdev.h>
+
++static bool switchdev_obj_eq(const struct switchdev_obj *a,
++ const struct switchdev_obj *b)
++{
++ const struct switchdev_obj_port_vlan *va, *vb;
++ const struct switchdev_obj_port_mdb *ma, *mb;
++
++ if (a->id != b->id || a->orig_dev != b->orig_dev)
++ return false;
++
++ switch (a->id) {
++ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ va = SWITCHDEV_OBJ_PORT_VLAN(a);
++ vb = SWITCHDEV_OBJ_PORT_VLAN(b);
++ return va->flags == vb->flags &&
++ va->vid == vb->vid &&
++ va->changed == vb->changed;
++ case SWITCHDEV_OBJ_ID_PORT_MDB:
++ case SWITCHDEV_OBJ_ID_HOST_MDB:
++ ma = SWITCHDEV_OBJ_PORT_MDB(a);
++ mb = SWITCHDEV_OBJ_PORT_MDB(b);
++ return ma->vid == mb->vid &&
++ ether_addr_equal(ma->addr, mb->addr);
++ default:
++ break;
++ }
++
++ BUG();
++}
++
+ static LIST_HEAD(deferred);
+ static DEFINE_SPINLOCK(deferred_lock);
+
+@@ -307,6 +336,50 @@ int switchdev_port_obj_del(struct net_device *dev,
+ }
+ EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
+
++/**
++ * switchdev_port_obj_act_is_deferred - Is object action pending?
++ *
++ * @dev: port device
++ * @nt: type of action; add or delete
++ * @obj: object to test
++ *
++ * Returns true if a deferred item is pending, which is
++ * equivalent to the action @nt on an object @obj.
++ *
++ * rtnl_lock must be held.
++ */
++bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
++ enum switchdev_notifier_type nt,
++ const struct switchdev_obj *obj)
++{
++ struct switchdev_deferred_item *dfitem;
++ bool found = false;
++
++ ASSERT_RTNL();
++
++ spin_lock_bh(&deferred_lock);
++
++ list_for_each_entry(dfitem, &deferred, list) {
++ if (dfitem->dev != dev)
++ continue;
++
++ if ((dfitem->func == switchdev_port_obj_add_deferred &&
++ nt == SWITCHDEV_PORT_OBJ_ADD) ||
++ (dfitem->func == switchdev_port_obj_del_deferred &&
++ nt == SWITCHDEV_PORT_OBJ_DEL)) {
++ if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
++ found = true;
++ break;
++ }
++ }
++ }
++
++ spin_unlock_bh(&deferred_lock);
++
++ return found;
++}
++EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
++
+ static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
+ static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
+
+diff --git a/net/sysctl_net.c b/net/sysctl_net.c
+index 051ed5f6fc9372..a0a7a79991f9ff 100644
+--- a/net/sysctl_net.c
++++ b/net/sysctl_net.c
+@@ -54,7 +54,6 @@ static int net_ctl_permissions(struct ctl_table_header *head,
+ }
+
+ static void net_ctl_set_ownership(struct ctl_table_header *head,
+- struct ctl_table *table,
+ kuid_t *uid, kgid_t *gid)
+ {
+ struct net *net = container_of(head->set, struct net, sysctls);
+diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
+index 593846d252143c..114fef65f92eab 100644
+--- a/net/tipc/bcast.c
++++ b/net/tipc/bcast.c
+@@ -320,8 +320,8 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
+ {
+ struct tipc_msg *hdr, *_hdr;
+ struct sk_buff_head tmpq;
++ u16 cong_link_cnt = 0;
+ struct sk_buff *_skb;
+- u16 cong_link_cnt;
+ int rc = 0;
+
+ /* Is a cluster supporting with new capabilities ? */
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 2cde375477e381..fec638e494c9dd 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -163,8 +163,12 @@ static int bearer_name_validate(const char *name,
+
+ /* return bearer name components, if necessary */
+ if (name_parts) {
+- strcpy(name_parts->media_name, media_name);
+- strcpy(name_parts->if_name, if_name);
++ if (strscpy(name_parts->media_name, media_name,
++ TIPC_MAX_MEDIA_NAME) < 0)
++ return 0;
++ if (strscpy(name_parts->if_name, if_name,
++ TIPC_MAX_IF_NAME) < 0)
++ return 0;
+ }
+ return 1;
+ }
+@@ -1086,6 +1090,12 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
+
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+ if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
++ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
++ rtnl_unlock();
++ NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
++ return -EINVAL;
++ }
++
+ err = tipc_udp_nl_bearer_add(b,
+ attrs[TIPC_NLA_BEARER_UDP_OPTS]);
+ if (err) {
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index e33b4f29f77cf2..d0143823658d58 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1446,7 +1446,7 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ p = (struct tipc_gap_ack_blks *)msg_data(hdr);
+ sz = ntohs(p->len);
+ /* Sanity check */
+- if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
++ if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
+ /* Good, check if the desired type exists */
+ if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
+ goto ok;
+@@ -1533,7 +1533,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
+ __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
+
+ /* Total len */
+- len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
++ len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
+ ga->len = htons(len);
+ return len;
+ }
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 5c9fd4791c4ba1..76284fc538ebdd 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -142,9 +142,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ if (fragid == FIRST_FRAGMENT) {
+ if (unlikely(head))
+ goto err;
+- *buf = NULL;
+ if (skb_has_frag_list(frag) && __skb_linearize(frag))
+ goto err;
++ *buf = NULL;
+ frag = skb_unshare(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
+ goto err;
+@@ -156,6 +156,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ if (!head)
+ goto err;
+
++ /* Either the input skb ownership is transferred to headskb
++ * or the input skb is freed, clear the reference to avoid
++ * bad access on error path.
++ */
++ *buf = NULL;
+ if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
+ kfree_skb_partial(frag, headstolen);
+ } else {
+@@ -179,7 +184,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ *headbuf = NULL;
+ return 1;
+ }
+- *buf = NULL;
+ return 0;
+ err:
+ kfree_skb(*buf);
+diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
+index e8fd257c0e6888..1a9a5bdaccf4fc 100644
+--- a/net/tipc/netlink.c
++++ b/net/tipc/netlink.c
+@@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+
+ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
+- [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING,
++ [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING,
+ .len = TIPC_MAX_LINK_NAME },
+ [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
+@@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
+
+ const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
+ [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
+- [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING,
++ [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING,
+ .len = TIPC_MAX_BEARER_NAME },
+ [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 5bc076f2fa74a2..c763008a8adbaa 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -102,6 +102,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+ return -EMSGSIZE;
+
+ skb_put(skb, TLV_SPACE(len));
++ memset(tlv, 0, TLV_SPACE(len));
+ tlv->tlv_type = htons(type);
+ tlv->tlv_len = htons(TLV_LENGTH(len));
+ if (len && data)
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 3105abe97bb9cc..69053c03982528 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -2107,6 +2107,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+ } else {
+ n = tipc_node_find_by_id(net, ehdr->id);
+ }
++ skb_dst_force(skb);
+ tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
+ if (!skb)
+ return;
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index f892b0903dbaf2..cdc8378261ec3f 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -135,8 +135,11 @@ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
+ snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port));
+ else if (ntohs(ua->proto) == ETH_P_IPV6)
+ snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port));
+- else
++ else {
+ pr_err("Invalid UDP media address\n");
++ return 1;
++ }
++
+ return 0;
+ }
+
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 28a8c0e80e3c56..02038d0381b754 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -212,7 +212,7 @@ static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx)
+
+ static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
+ {
+- return ctx->strp.msg_ready;
++ return READ_ONCE(ctx->strp.msg_ready);
+ }
+
+ static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 002483e60c190d..0a67b93a52ec25 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -814,9 +814,17 @@ struct tls_context *tls_ctx_create(struct sock *sk)
+ return NULL;
+
+ mutex_init(&ctx->tx_lock);
+- rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ ctx->sk_proto = READ_ONCE(sk->sk_prot);
+ ctx->sk = sk;
++ /* Release semantic of rcu_assign_pointer() ensures that
++ * ctx->sk_proto is visible before changing sk->sk_prot in
++ * update_sk_prot(), and prevents reading uninitialized value in
++ * tls_{getsockopt, setsockopt}. Note that we do not need a
++ * read barrier in tls_{getsockopt,setsockopt} as there is an
++ * address dependency between sk->sk_proto->{getsockopt,setsockopt}
++ * and ctx->sk_proto.
++ */
++ rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ return ctx;
+ }
+
+@@ -1001,7 +1009,7 @@ static u16 tls_user_config(struct tls_context *ctx, bool tx)
+ return 0;
+ }
+
+-static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
++static int tls_get_info(struct sock *sk, struct sk_buff *skb)
+ {
+ u16 version, cipher_type;
+ struct tls_context *ctx;
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index ca1e0e198ceb45..5df08d848b5c9c 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -360,7 +360,7 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
+ if (strp->stm.full_len && strp->stm.full_len == skb->len) {
+ desc->count = 0;
+
+- strp->msg_ready = 1;
++ WRITE_ONCE(strp->msg_ready, 1);
+ tls_rx_msg_ready(strp);
+ }
+
+@@ -528,7 +528,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ if (!tls_strp_check_queue_ok(strp))
+ return tls_strp_read_copy(strp, false);
+
+- strp->msg_ready = 1;
++ WRITE_ONCE(strp->msg_ready, 1);
+ tls_rx_msg_ready(strp);
+
+ return 0;
+@@ -580,7 +580,7 @@ void tls_strp_msg_done(struct tls_strparser *strp)
+ else
+ tls_strp_flush_anchor_copy(strp);
+
+- strp->msg_ready = 0;
++ WRITE_ONCE(strp->msg_ready, 0);
+ memset(&strp->stm, 0, sizeof(strp->stm));
+
+ tls_strp_check_rcv(strp);
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index e9d1e83a859d1f..df166f6afad823 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -52,6 +52,7 @@ struct tls_decrypt_arg {
+ struct_group(inargs,
+ bool zc;
+ bool async;
++ bool async_done;
+ u8 tail;
+ );
+
+@@ -63,6 +64,7 @@ struct tls_decrypt_ctx {
+ u8 iv[MAX_IV_SIZE];
+ u8 aad[TLS_MAX_AAD_SIZE];
+ u8 tail;
++ bool free_sgout;
+ struct scatterlist sg[];
+ };
+
+@@ -187,7 +189,6 @@ static void tls_decrypt_done(void *data, int err)
+ struct aead_request *aead_req = data;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct scatterlist *sgout = aead_req->dst;
+- struct scatterlist *sgin = aead_req->src;
+ struct tls_sw_context_rx *ctx;
+ struct tls_decrypt_ctx *dctx;
+ struct tls_context *tls_ctx;
+@@ -196,6 +197,17 @@ static void tls_decrypt_done(void *data, int err)
+ struct sock *sk;
+ int aead_size;
+
++ /* If requests get too backlogged crypto API returns -EBUSY and calls
++ * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
++ * to make waiting for backlog to flush with crypto_wait_req() easier.
++ * First wait converts -EBUSY -> -EINPROGRESS, and the second one
++ * -EINPROGRESS -> 0.
++ * We have a single struct crypto_async_request per direction, this
++ * scheme doesn't help us, so just ignore the first ->complete().
++ */
++ if (err == -EINPROGRESS)
++ return;
++
+ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
+ aead_size = ALIGN(aead_size, __alignof__(*dctx));
+ dctx = (void *)((u8 *)aead_req + aead_size);
+@@ -213,7 +225,7 @@ static void tls_decrypt_done(void *data, int err)
+ }
+
+ /* Free the destination pages if skb was not decrypted inplace */
+- if (sgout != sgin) {
++ if (dctx->free_sgout) {
+ /* Skip the first S/G entry as it points to AAD */
+ for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+ if (!sg)
+@@ -224,10 +236,17 @@ static void tls_decrypt_done(void *data, int err)
+
+ kfree(aead_req);
+
+- spin_lock_bh(&ctx->decrypt_compl_lock);
+- if (!atomic_dec_return(&ctx->decrypt_pending))
++ if (atomic_dec_and_test(&ctx->decrypt_pending))
+ complete(&ctx->async_wait.completion);
+- spin_unlock_bh(&ctx->decrypt_compl_lock);
++}
++
++static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
++{
++ if (!atomic_dec_and_test(&ctx->decrypt_pending))
++ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++ atomic_inc(&ctx->decrypt_pending);
++
++ return ctx->async_wait.err;
+ }
+
+ static int tls_do_decryption(struct sock *sk,
+@@ -253,20 +272,33 @@ static int tls_do_decryption(struct sock *sk,
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tls_decrypt_done, aead_req);
++ DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
+ atomic_inc(&ctx->decrypt_pending);
+ } else {
++ DECLARE_CRYPTO_WAIT(wait);
++
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+- crypto_req_done, &ctx->async_wait);
++ crypto_req_done, &wait);
++ ret = crypto_aead_decrypt(aead_req);
++ if (ret == -EINPROGRESS || ret == -EBUSY)
++ ret = crypto_wait_req(ret, &wait);
++ return ret;
+ }
+
+ ret = crypto_aead_decrypt(aead_req);
+- if (ret == -EINPROGRESS) {
+- if (darg->async)
+- return 0;
++ if (ret == -EINPROGRESS)
++ return 0;
+
+- ret = crypto_wait_req(ret, &ctx->async_wait);
++ if (ret == -EBUSY) {
++ ret = tls_decrypt_async_wait(ctx);
++ darg->async_done = true;
++ /* all completions have run, we're not doing async anymore */
++ darg->async = false;
++ return ret;
+ }
++
++ atomic_dec(&ctx->decrypt_pending);
+ darg->async = false;
+
+ return ret;
+@@ -439,9 +471,10 @@ static void tls_encrypt_done(void *data, int err)
+ struct tls_rec *rec = data;
+ struct scatterlist *sge;
+ struct sk_msg *msg_en;
+- bool ready = false;
+ struct sock *sk;
+- int pending;
++
++ if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
++ return;
+
+ msg_en = &rec->msg_encrypted;
+
+@@ -476,23 +509,25 @@ static void tls_encrypt_done(void *data, int err)
+ /* If received record is at head of tx_list, schedule tx */
+ first_rec = list_first_entry(&ctx->tx_list,
+ struct tls_rec, list);
+- if (rec == first_rec)
+- ready = true;
++ if (rec == first_rec) {
++ /* Schedule the transmission */
++ if (!test_and_set_bit(BIT_TX_SCHEDULED,
++ &ctx->tx_bitmask))
++ schedule_delayed_work(&ctx->tx_work.work, 1);
++ }
+ }
+
+- spin_lock_bh(&ctx->encrypt_compl_lock);
+- pending = atomic_dec_return(&ctx->encrypt_pending);
+-
+- if (!pending && ctx->async_notify)
++ if (atomic_dec_and_test(&ctx->encrypt_pending))
+ complete(&ctx->async_wait.completion);
+- spin_unlock_bh(&ctx->encrypt_compl_lock);
++}
+
+- if (!ready)
+- return;
++static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
++{
++ if (!atomic_dec_and_test(&ctx->encrypt_pending))
++ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++ atomic_inc(&ctx->encrypt_pending);
+
+- /* Schedule the transmission */
+- if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+- schedule_delayed_work(&ctx->tx_work.work, 1);
++ return ctx->async_wait.err;
+ }
+
+ static int tls_do_encryption(struct sock *sk,
+@@ -541,9 +576,14 @@ static int tls_do_encryption(struct sock *sk,
+
+ /* Add the record in tx_list */
+ list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
++ DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
+ atomic_inc(&ctx->encrypt_pending);
+
+ rc = crypto_aead_encrypt(aead_req);
++ if (rc == -EBUSY) {
++ rc = tls_encrypt_async_wait(ctx);
++ rc = rc ?: -EINPROGRESS;
++ }
+ if (!rc || rc != -EINPROGRESS) {
+ atomic_dec(&ctx->encrypt_pending);
+ sge->offset -= prot->prepend_size;
+@@ -952,6 +992,8 @@ static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
+ }
+
+ sk_msg_page_add(msg_pl, page, part, off);
++ msg_pl->sg.copybreak = 0;
++ msg_pl->sg.curr = msg_pl->sg.end;
+ sk_mem_charge(sk, part);
+ *copied += part;
+ try_to_copy -= part;
+@@ -982,7 +1024,6 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ int num_zc = 0;
+ int orig_size;
+ int ret = 0;
+- int pending;
+
+ if (!eor && (msg->msg_flags & MSG_EOR))
+ return -EINVAL;
+@@ -1050,7 +1091,11 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ if (ret < 0)
+ goto send_end;
+ tls_ctx->pending_open_record_frags = true;
+- if (full_record || eor || sk_msg_full(msg_pl))
++
++ if (sk_msg_full(msg_pl))
++ full_record = true;
++
++ if (full_record || eor)
+ goto copied;
+ continue;
+ }
+@@ -1157,24 +1202,12 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ if (!num_async) {
+ goto send_end;
+ } else if (num_zc) {
+- /* Wait for pending encryptions to get completed */
+- spin_lock_bh(&ctx->encrypt_compl_lock);
+- ctx->async_notify = true;
+-
+- pending = atomic_read(&ctx->encrypt_pending);
+- spin_unlock_bh(&ctx->encrypt_compl_lock);
+- if (pending)
+- crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+- else
+- reinit_completion(&ctx->async_wait.completion);
+-
+- /* There can be no concurrent accesses, since we have no
+- * pending encrypt operations
+- */
+- WRITE_ONCE(ctx->async_notify, false);
++ int err;
+
+- if (ctx->async_wait.err) {
+- ret = ctx->async_wait.err;
++ /* Wait for pending encryptions to get completed */
++ err = tls_encrypt_async_wait(ctx);
++ if (err) {
++ ret = err;
+ copied = 0;
+ }
+ }
+@@ -1223,7 +1256,6 @@ void tls_sw_splice_eof(struct socket *sock)
+ ssize_t copied = 0;
+ bool retrying = false;
+ int ret = 0;
+- int pending;
+
+ if (!ctx->open_rec)
+ return;
+@@ -1232,11 +1264,14 @@ void tls_sw_splice_eof(struct socket *sock)
+ lock_sock(sk);
+
+ retry:
++ /* same checks as in tls_sw_push_pending_record() */
+ rec = ctx->open_rec;
+ if (!rec)
+ goto unlock;
+
+ msg_pl = &rec->msg_plaintext;
++ if (msg_pl->sg.size == 0)
++ goto unlock;
+
+ /* Check the BPF advisor and perform transmission. */
+ ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
+@@ -1255,22 +1290,7 @@ void tls_sw_splice_eof(struct socket *sock)
+ }
+
+ /* Wait for pending encryptions to get completed */
+- spin_lock_bh(&ctx->encrypt_compl_lock);
+- ctx->async_notify = true;
+-
+- pending = atomic_read(&ctx->encrypt_pending);
+- spin_unlock_bh(&ctx->encrypt_compl_lock);
+- if (pending)
+- crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+- else
+- reinit_completion(&ctx->async_wait.completion);
+-
+- /* There can be no concurrent accesses, since we have no pending
+- * encrypt operations
+- */
+- WRITE_ONCE(ctx->async_notify, false);
+-
+- if (ctx->async_wait.err)
++ if (tls_encrypt_async_wait(ctx))
+ goto unlock;
+
+ /* Transmit if any encryptions have completed */
+@@ -1491,7 +1511,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ */
+ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
+ aead_size = ALIGN(aead_size, __alignof__(*dctx));
+- mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
++ mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
+ sk->sk_allocation);
+ if (!mem) {
+ err = -ENOMEM;
+@@ -1572,12 +1592,16 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ } else if (out_sg) {
+ memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
+ }
++ dctx->free_sgout = !!pages;
+
+ /* Prepare and submit AEAD request */
+ err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
+ data_len + prot->tail_size, aead_req, darg);
+- if (err)
++ if (err) {
++ if (darg->async_done)
++ goto exit_free_skb;
+ goto exit_free_pages;
++ }
+
+ darg->skb = clear_skb ?: tls_strp_msg(ctx);
+ clear_skb = NULL;
+@@ -1589,6 +1613,9 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ return err;
+ }
+
++ if (unlikely(darg->async_done))
++ return 0;
++
+ if (prot->tail_size)
+ darg->tail = dctx->tail;
+
+@@ -1760,7 +1787,8 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+ u8 *control,
+ size_t skip,
+ size_t len,
+- bool is_peek)
++ bool is_peek,
++ bool *more)
+ {
+ struct sk_buff *skb = skb_peek(&ctx->rx_list);
+ struct tls_msg *tlm;
+@@ -1773,7 +1801,7 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+
+ err = tls_record_content_type(msg, tlm, control);
+ if (err <= 0)
+- goto out;
++ goto more;
+
+ if (skip < rxm->full_len)
+ break;
+@@ -1791,12 +1819,12 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+
+ err = tls_record_content_type(msg, tlm, control);
+ if (err <= 0)
+- goto out;
++ goto more;
+
+ err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ msg, chunk);
+ if (err < 0)
+- goto out;
++ goto more;
+
+ len = len - chunk;
+ copied = copied + chunk;
+@@ -1832,6 +1860,10 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+
+ out:
+ return copied ? : err;
++more:
++ if (more)
++ *more = true;
++ goto out;
+ }
+
+ static bool
+@@ -1931,10 +1963,12 @@ int tls_sw_recvmsg(struct sock *sk,
+ struct strp_msg *rxm;
+ struct tls_msg *tlm;
+ ssize_t copied = 0;
++ ssize_t peeked = 0;
+ bool async = false;
+ int target, err;
+ bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
+ bool is_peek = flags & MSG_PEEK;
++ bool rx_more = false;
+ bool released = true;
+ bool bpf_strp_enabled;
+ bool zc_capable;
+@@ -1942,10 +1976,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
+
+- psock = sk_psock_get(sk);
+ err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
+ if (err < 0)
+ return err;
++ psock = sk_psock_get(sk);
+ bpf_strp_enabled = sk_psock_strp_enabled(psock);
+
+ /* If crypto failed the connection is broken */
+@@ -1954,12 +1988,12 @@ int tls_sw_recvmsg(struct sock *sk,
+ goto end;
+
+ /* Process pending decrypted records. It must be non-zero-copy */
+- err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
++ err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
+ if (err < 0)
+ goto end;
+
+ copied = err;
+- if (len <= copied)
++ if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
+ goto end;
+
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+@@ -2052,6 +2086,8 @@ int tls_sw_recvmsg(struct sock *sk,
+ decrypted += chunk;
+ len -= chunk;
+ __skb_queue_tail(&ctx->rx_list, skb);
++ if (unlikely(control != TLS_RECORD_TYPE_DATA))
++ break;
+ continue;
+ }
+
+@@ -2075,8 +2111,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ if (err < 0)
+ goto put_on_rx_list_err;
+
+- if (is_peek)
++ if (is_peek) {
++ peeked += chunk;
+ goto put_on_rx_list;
++ }
+
+ if (partially_consumed) {
+ rxm->offset += chunk;
+@@ -2100,16 +2138,10 @@ int tls_sw_recvmsg(struct sock *sk,
+
+ recv_end:
+ if (async) {
+- int ret, pending;
++ int ret;
+
+ /* Wait for all previously submitted records to be decrypted */
+- spin_lock_bh(&ctx->decrypt_compl_lock);
+- reinit_completion(&ctx->async_wait.completion);
+- pending = atomic_read(&ctx->decrypt_pending);
+- spin_unlock_bh(&ctx->decrypt_compl_lock);
+- ret = 0;
+- if (pending)
+- ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++ ret = tls_decrypt_async_wait(ctx);
+ __skb_queue_purge(&ctx->async_hold);
+
+ if (ret) {
+@@ -2120,13 +2152,15 @@ int tls_sw_recvmsg(struct sock *sk,
+ }
+
+ /* Drain records from the rx_list & copy if required */
+- if (is_peek || is_kvec)
+- err = process_rx_list(ctx, msg, &control, copied,
+- decrypted, is_peek);
++ if (is_peek)
++ err = process_rx_list(ctx, msg, &control, copied + peeked,
++ decrypted - peeked, is_peek, NULL);
+ else
+ err = process_rx_list(ctx, msg, &control, 0,
+- async_copy_bytes, is_peek);
+- decrypted += max(err, 0);
++ async_copy_bytes, is_peek, NULL);
++
++ /* we could have copied less than we wanted, and possibly nothing */
++ decrypted += max(err, 0) - async_copy_bytes;
+ }
+
+ copied += decrypted;
+@@ -2426,16 +2460,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec, *tmp;
+- int pending;
+
+ /* Wait for any pending async encryptions to complete */
+- spin_lock_bh(&ctx->encrypt_compl_lock);
+- ctx->async_notify = true;
+- pending = atomic_read(&ctx->encrypt_pending);
+- spin_unlock_bh(&ctx->encrypt_compl_lock);
+-
+- if (pending)
+- crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
++ tls_encrypt_async_wait(ctx);
+
+ tls_tx_records(sk, -1);
+
+@@ -2588,6 +2615,48 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
+ tls_ctx->prot_info.version != TLS_1_3_VERSION;
+ }
+
++static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
++{
++ struct tls_sw_context_tx *sw_ctx_tx;
++
++ if (!ctx->priv_ctx_tx) {
++ sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
++ if (!sw_ctx_tx)
++ return NULL;
++ } else {
++ sw_ctx_tx = ctx->priv_ctx_tx;
++ }
++
++ crypto_init_wait(&sw_ctx_tx->async_wait);
++ atomic_set(&sw_ctx_tx->encrypt_pending, 1);
++ INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
++ INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
++ sw_ctx_tx->tx_work.sk = sk;
++
++ return sw_ctx_tx;
++}
++
++static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
++{
++ struct tls_sw_context_rx *sw_ctx_rx;
++
++ if (!ctx->priv_ctx_rx) {
++ sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
++ if (!sw_ctx_rx)
++ return NULL;
++ } else {
++ sw_ctx_rx = ctx->priv_ctx_rx;
++ }
++
++ crypto_init_wait(&sw_ctx_rx->async_wait);
++ atomic_set(&sw_ctx_rx->decrypt_pending, 1);
++ init_waitqueue_head(&sw_ctx_rx->wq);
++ skb_queue_head_init(&sw_ctx_rx->rx_list);
++ skb_queue_head_init(&sw_ctx_rx->async_hold);
++
++ return sw_ctx_rx;
++}
++
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ {
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+@@ -2609,48 +2678,22 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ }
+
+ if (tx) {
+- if (!ctx->priv_ctx_tx) {
+- sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
+- if (!sw_ctx_tx) {
+- rc = -ENOMEM;
+- goto out;
+- }
+- ctx->priv_ctx_tx = sw_ctx_tx;
+- } else {
+- sw_ctx_tx =
+- (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
+- }
+- } else {
+- if (!ctx->priv_ctx_rx) {
+- sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
+- if (!sw_ctx_rx) {
+- rc = -ENOMEM;
+- goto out;
+- }
+- ctx->priv_ctx_rx = sw_ctx_rx;
+- } else {
+- sw_ctx_rx =
+- (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
+- }
+- }
++ ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
++ if (!ctx->priv_ctx_tx)
++ return -ENOMEM;
+
+- if (tx) {
+- crypto_init_wait(&sw_ctx_tx->async_wait);
+- spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
++ sw_ctx_tx = ctx->priv_ctx_tx;
+ crypto_info = &ctx->crypto_send.info;
+ cctx = &ctx->tx;
+ aead = &sw_ctx_tx->aead_send;
+- INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
+- INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
+- sw_ctx_tx->tx_work.sk = sk;
+ } else {
+- crypto_init_wait(&sw_ctx_rx->async_wait);
+- spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
+- init_waitqueue_head(&sw_ctx_rx->wq);
++ ctx->priv_ctx_rx = init_ctx_rx(ctx);
++ if (!ctx->priv_ctx_rx)
++ return -ENOMEM;
++
++ sw_ctx_rx = ctx->priv_ctx_rx;
+ crypto_info = &ctx->crypto_recv.info;
+ cctx = &ctx->rx;
+- skb_queue_head_init(&sw_ctx_rx->rx_list);
+- skb_queue_head_init(&sw_ctx_rx->async_hold);
+ aead = &sw_ctx_rx->aead_recv;
+ }
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 3e8a04a1366883..dca4429014db15 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -212,8 +212,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
+ }
+ #endif /* CONFIG_SECURITY_NETWORK */
+
+-#define unix_peer(sk) (unix_sk(sk)->peer)
+-
+ static inline int unix_our_peer(struct sock *sk, struct sock *osk)
+ {
+ return unix_peer(osk) == sk;
+@@ -224,15 +222,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
+ return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
+ }
+
+-static inline int unix_recvq_full(const struct sock *sk)
+-{
+- return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+-}
+-
+ static inline int unix_recvq_full_lockless(const struct sock *sk)
+ {
+- return skb_queue_len_lockless(&sk->sk_receive_queue) >
+- READ_ONCE(sk->sk_max_ack_backlog);
++ return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+ }
+
+ struct sock *unix_peer_get(struct sock *s)
+@@ -533,10 +525,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+ return 0;
+ }
+
+-static int unix_writable(const struct sock *sk)
++static int unix_writable(const struct sock *sk, unsigned char state)
+ {
+- return sk->sk_state != TCP_LISTEN &&
+- (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
++ return state != TCP_LISTEN &&
++ (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
+ }
+
+ static void unix_write_space(struct sock *sk)
+@@ -544,7 +536,7 @@ static void unix_write_space(struct sock *sk)
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+- if (unix_writable(sk)) {
++ if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait,
+@@ -573,7 +565,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
+ sk_error_report(other);
+ }
+ }
+- other->sk_state = TCP_CLOSE;
+ }
+
+ static void unix_sock_destructor(struct sock *sk)
+@@ -620,7 +611,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ u->path.dentry = NULL;
+ u->path.mnt = NULL;
+ state = sk->sk_state;
+- sk->sk_state = TCP_CLOSE;
++ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+
+ skpair = unix_peer(sk);
+ unix_peer(sk) = NULL;
+@@ -641,7 +632,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ unix_state_lock(skpair);
+ /* No more writes */
+ WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
+- if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
+ WRITE_ONCE(skpair->sk_err, ECONNRESET);
+ unix_state_unlock(skpair);
+ skpair->sk_state_change(skpair);
+@@ -702,9 +693,6 @@ static void init_peercred(struct sock *sk)
+
+ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ {
+- const struct cred *old_cred;
+- struct pid *old_pid;
+-
+ if (sk < peersk) {
+ spin_lock(&sk->sk_peer_lock);
+ spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+@@ -712,16 +700,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ spin_lock(&peersk->sk_peer_lock);
+ spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+ }
+- old_pid = sk->sk_peer_pid;
+- old_cred = sk->sk_peer_cred;
++
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+
+ spin_unlock(&sk->sk_peer_lock);
+ spin_unlock(&peersk->sk_peer_lock);
+-
+- put_pid(old_pid);
+- put_cred(old_cred);
+ }
+
+ static int unix_listen(struct socket *sock, int backlog)
+@@ -734,7 +718,7 @@ static int unix_listen(struct socket *sock, int backlog)
+ if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
+ goto out; /* Only stream/seqpacket sockets accept */
+ err = -EINVAL;
+- if (!u->addr)
++ if (!READ_ONCE(u->addr))
+ goto out; /* No listens on an unbound socket */
+ unix_state_lock(sk);
+ if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
+@@ -742,7 +726,8 @@ static int unix_listen(struct socket *sock, int backlog)
+ if (backlog > sk->sk_max_ack_backlog)
+ wake_up_interruptible_all(&u->peer_wait);
+ sk->sk_max_ack_backlog = backlog;
+- sk->sk_state = TCP_LISTEN;
++ WRITE_ONCE(sk->sk_state, TCP_LISTEN);
++
+ /* set credentials so connect can copy them */
+ init_peercred(sk);
+ err = 0;
+@@ -992,13 +977,13 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
+ sk->sk_hash = unix_unbound_hash(sk);
+ sk->sk_allocation = GFP_KERNEL_ACCOUNT;
+ sk->sk_write_space = unix_write_space;
+- sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
++ sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
+ sk->sk_destruct = unix_sock_destructor;
+- u = unix_sk(sk);
++ u = unix_sk(sk);
++ u->inflight = 0;
+ u->path.dentry = NULL;
+ u->path.mnt = NULL;
+ spin_lock_init(&u->lock);
+- atomic_long_set(&u->inflight, 0);
+ INIT_LIST_HEAD(&u->link);
+ mutex_init(&u->iolock); /* single task reading lock */
+ mutex_init(&u->bindlock); /* single task binding lock */
+@@ -1147,8 +1132,8 @@ static struct sock *unix_find_other(struct net *net,
+
+ static int unix_autobind(struct sock *sk)
+ {
+- unsigned int new_hash, old_hash = sk->sk_hash;
+ struct unix_sock *u = unix_sk(sk);
++ unsigned int new_hash, old_hash;
+ struct net *net = sock_net(sk);
+ struct unix_address *addr;
+ u32 lastnum, ordernum;
+@@ -1171,6 +1156,7 @@ static int unix_autobind(struct sock *sk)
+ addr->name->sun_family = AF_UNIX;
+ refcount_set(&addr->refcnt, 1);
+
++ old_hash = sk->sk_hash;
+ ordernum = get_random_u32();
+ lastnum = ordernum & 0xFFFFF;
+ retry:
+@@ -1211,8 +1197,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ {
+ umode_t mode = S_IFSOCK |
+ (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
+- unsigned int new_hash, old_hash = sk->sk_hash;
+ struct unix_sock *u = unix_sk(sk);
++ unsigned int new_hash, old_hash;
+ struct net *net = sock_net(sk);
+ struct mnt_idmap *idmap;
+ struct unix_address *addr;
+@@ -1250,6 +1236,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ if (u->addr)
+ goto out_unlock;
+
++ old_hash = sk->sk_hash;
+ new_hash = unix_bsd_hash(d_backing_inode(dentry));
+ unix_table_double_lock(net, old_hash, new_hash);
+ u->path.mnt = mntget(parent.mnt);
+@@ -1277,8 +1264,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+ int addr_len)
+ {
+- unsigned int new_hash, old_hash = sk->sk_hash;
+ struct unix_sock *u = unix_sk(sk);
++ unsigned int new_hash, old_hash;
+ struct net *net = sock_net(sk);
+ struct unix_address *addr;
+ int err;
+@@ -1296,6 +1283,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+ goto out_mutex;
+ }
+
++ old_hash = sk->sk_hash;
+ new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
+ unix_table_double_lock(net, old_hash, new_hash);
+
+@@ -1345,13 +1333,11 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
+ unix_state_lock(sk1);
+ return;
+ }
+- if (sk1 < sk2) {
+- unix_state_lock(sk1);
+- unix_state_lock_nested(sk2);
+- } else {
+- unix_state_lock(sk2);
+- unix_state_lock_nested(sk1);
+- }
++ if (sk1 > sk2)
++ swap(sk1, sk2);
++
++ unix_state_lock(sk1);
++ unix_state_lock_nested(sk2, U_LOCK_SECOND);
+ }
+
+ static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
+@@ -1383,7 +1369,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+
+ if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+- !unix_sk(sk)->addr) {
++ !READ_ONCE(unix_sk(sk)->addr)) {
+ err = unix_autobind(sk);
+ if (err)
+ goto out;
+@@ -1413,7 +1399,8 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+ if (err)
+ goto out_unlock;
+
+- sk->sk_state = other->sk_state = TCP_ESTABLISHED;
++ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
++ WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
+ } else {
+ /*
+ * 1003.1g breaking connected state with AF_UNSPEC
+@@ -1430,13 +1417,20 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+
+ unix_peer(sk) = other;
+ if (!other)
+- sk->sk_state = TCP_CLOSE;
++ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+ unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+
+ unix_state_double_unlock(sk, other);
+
+- if (other != old_peer)
++ if (other != old_peer) {
+ unix_dgram_disconnected(sk, old_peer);
++
++ unix_state_lock(old_peer);
++ if (!unix_peer(old_peer))
++ WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
++ unix_state_unlock(old_peer);
++ }
++
+ sock_put(old_peer);
+ } else {
+ unix_peer(sk) = other;
+@@ -1482,16 +1476,17 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ struct unix_sock *u = unix_sk(sk), *newu, *otheru;
+ struct net *net = sock_net(sk);
+ struct sk_buff *skb = NULL;
++ unsigned char state;
+ long timeo;
+ int err;
+- int st;
+
+ err = unix_validate_addr(sunaddr, addr_len);
+ if (err)
+ goto out;
+
+ if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
+- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
++ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
++ !READ_ONCE(u->addr)) {
+ err = unix_autobind(sk);
+ if (err)
+ goto out;
+@@ -1528,7 +1523,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ goto out;
+ }
+
+- /* Latch state of peer */
+ unix_state_lock(other);
+
+ /* Apparently VFS overslept socket death. Retry. */
+@@ -1544,7 +1538,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ if (other->sk_shutdown & RCV_SHUTDOWN)
+ goto out_unlock;
+
+- if (unix_recvq_full(other)) {
++ if (unix_recvq_full_lockless(other)) {
+ err = -EAGAIN;
+ if (!timeo)
+ goto out_unlock;
+@@ -1558,39 +1552,21 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ goto restart;
+ }
+
+- /* Latch our state.
+-
+- It is tricky place. We need to grab our state lock and cannot
+- drop lock on peer. It is dangerous because deadlock is
+- possible. Connect to self case and simultaneous
+- attempt to connect are eliminated by checking socket
+- state. other is TCP_LISTEN, if sk is TCP_LISTEN we
+- check this before attempt to grab lock.
+-
+- Well, and we have to recheck the state after socket locked.
++ /* self connect and simultaneous connect are eliminated
++ * by rejecting TCP_LISTEN socket to avoid deadlock.
+ */
+- st = sk->sk_state;
+-
+- switch (st) {
+- case TCP_CLOSE:
+- /* This is ok... continue with connect */
+- break;
+- case TCP_ESTABLISHED:
+- /* Socket is already connected */
+- err = -EISCONN;
+- goto out_unlock;
+- default:
+- err = -EINVAL;
++ state = READ_ONCE(sk->sk_state);
++ if (unlikely(state != TCP_CLOSE)) {
++ err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
+ goto out_unlock;
+ }
+
+- unix_state_lock_nested(sk);
++ unix_state_lock_nested(sk, U_LOCK_SECOND);
+
+- if (sk->sk_state != st) {
++ if (unlikely(sk->sk_state != TCP_CLOSE)) {
++ err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL;
+ unix_state_unlock(sk);
+- unix_state_unlock(other);
+- sock_put(other);
+- goto restart;
++ goto out_unlock;
+ }
+
+ err = security_unix_stream_connect(sk, other, newsk);
+@@ -1638,7 +1614,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ copy_peercred(sk, other);
+
+ sock->state = SS_CONNECTED;
+- sk->sk_state = TCP_ESTABLISHED;
++ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+ sock_hold(newsk);
+
+ smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
+@@ -1711,7 +1687,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
+ goto out;
+
+ err = -EINVAL;
+- if (sk->sk_state != TCP_LISTEN)
++ if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
+ goto out;
+
+ /* If socket state is TCP_LISTEN it cannot change (for now...),
+@@ -1931,14 +1907,15 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ }
+
+ if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
+- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
++ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
++ !READ_ONCE(u->addr)) {
+ err = unix_autobind(sk);
+ if (err)
+ goto out;
+ }
+
+ err = -EMSGSIZE;
+- if (len > sk->sk_sndbuf - 32)
++ if (len > READ_ONCE(sk->sk_sndbuf) - 32)
+ goto out;
+
+ if (len > SKB_MAX_ALLOC) {
+@@ -2020,7 +1997,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ unix_peer(sk) = NULL;
+ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+
+- sk->sk_state = TCP_CLOSE;
++ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+ unix_state_unlock(sk);
+
+ unix_dgram_disconnected(sk, other);
+@@ -2151,13 +2128,15 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
+ maybe_add_creds(skb, sock, other);
+ skb_get(skb);
+
++ scm_stat_add(other, skb);
++
++ spin_lock(&other->sk_receive_queue.lock);
+ if (ousk->oob_skb)
+ consume_skb(ousk->oob_skb);
+-
+ WRITE_ONCE(ousk->oob_skb, skb);
++ __skb_queue_tail(&other->sk_receive_queue, skb);
++ spin_unlock(&other->sk_receive_queue.lock);
+
+- scm_stat_add(other, skb);
+- skb_queue_tail(&other->sk_receive_queue, skb);
+ sk_send_sigurg(other);
+ unix_state_unlock(other);
+ other->sk_data_ready(other);
+@@ -2194,7 +2173,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ }
+
+ if (msg->msg_namelen) {
+- err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
++ err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+ goto out_err;
+ } else {
+ err = -ENOTCONN;
+@@ -2203,7 +2182,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ goto out_err;
+ }
+
+- if (sk->sk_shutdown & SEND_SHUTDOWN)
++ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ goto pipe_err;
+
+ while (sent < len) {
+@@ -2215,7 +2194,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ &err, 0);
+ } else {
+ /* Keep two messages in the pipe so it schedules better */
+- size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
++ size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
+
+ /* allow fallback to order-0 allocations */
+ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
+@@ -2308,7 +2287,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
+ if (err)
+ return err;
+
+- if (sk->sk_state != TCP_ESTABLISHED)
++ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ if (msg->msg_namelen)
+@@ -2322,7 +2301,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
+ {
+ struct sock *sk = sock->sk;
+
+- if (sk->sk_state != TCP_ESTABLISHED)
++ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ return unix_dgram_recvmsg(sock, msg, size, flags);
+@@ -2542,8 +2521,10 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+
+ mutex_lock(&u->iolock);
+ unix_state_lock(sk);
++ spin_lock(&sk->sk_receive_queue.lock);
+
+ if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
++ spin_unlock(&sk->sk_receive_queue.lock);
+ unix_state_unlock(sk);
+ mutex_unlock(&u->iolock);
+ return -EINVAL;
+@@ -2553,15 +2534,18 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+
+ if (!(state->flags & MSG_PEEK))
+ WRITE_ONCE(u->oob_skb, NULL);
++ else
++ skb_get(oob_skb);
+
++ spin_unlock(&sk->sk_receive_queue.lock);
+ unix_state_unlock(sk);
+
+ chunk = state->recv_actor(oob_skb, 0, chunk, state);
+
+- if (!(state->flags & MSG_PEEK)) {
++ if (!(state->flags & MSG_PEEK))
+ UNIXCB(oob_skb).consumed += 1;
+- kfree_skb(oob_skb);
+- }
++
++ consume_skb(oob_skb);
+
+ mutex_unlock(&u->iolock);
+
+@@ -2582,20 +2566,34 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ consume_skb(skb);
+ skb = NULL;
+ } else {
++ struct sk_buff *unlinked_skb = NULL;
++
++ spin_lock(&sk->sk_receive_queue.lock);
++
+ if (skb == u->oob_skb) {
+ if (copied) {
+ skb = NULL;
+- } else if (sock_flag(sk, SOCK_URGINLINE)) {
+- if (!(flags & MSG_PEEK)) {
++ } else if (!(flags & MSG_PEEK)) {
++ if (sock_flag(sk, SOCK_URGINLINE)) {
+ WRITE_ONCE(u->oob_skb, NULL);
+ consume_skb(skb);
++ } else {
++ __skb_unlink(skb, &sk->sk_receive_queue);
++ WRITE_ONCE(u->oob_skb, NULL);
++ unlinked_skb = skb;
++ skb = skb_peek(&sk->sk_receive_queue);
+ }
+- } else if (!(flags & MSG_PEEK)) {
+- skb_unlink(skb, &sk->sk_receive_queue);
+- consume_skb(skb);
+- skb = skb_peek(&sk->sk_receive_queue);
++ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
++ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ }
+ }
++
++ spin_unlock(&sk->sk_receive_queue.lock);
++
++ if (unlinked_skb) {
++ WARN_ON_ONCE(skb_unref(unlinked_skb));
++ kfree_skb(unlinked_skb);
++ }
+ }
+ return skb;
+ }
+@@ -2603,10 +2601,49 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+
+ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+- if (unlikely(sk->sk_state != TCP_ESTABLISHED))
++ struct unix_sock *u = unix_sk(sk);
++ struct sk_buff *skb;
++ int err;
++
++ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
+ return -ENOTCONN;
+
+- return unix_read_skb(sk, recv_actor);
++ mutex_lock(&u->iolock);
++ skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
++ mutex_unlock(&u->iolock);
++ if (!skb)
++ return err;
++
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++ if (unlikely(skb == READ_ONCE(u->oob_skb))) {
++ bool drop = false;
++
++ unix_state_lock(sk);
++
++ if (sock_flag(sk, SOCK_DEAD)) {
++ unix_state_unlock(sk);
++ kfree_skb(skb);
++ return -ECONNRESET;
++ }
++
++ spin_lock(&sk->sk_receive_queue.lock);
++ if (likely(skb == u->oob_skb)) {
++ WRITE_ONCE(u->oob_skb, NULL);
++ drop = true;
++ }
++ spin_unlock(&sk->sk_receive_queue.lock);
++
++ unix_state_unlock(sk);
++
++ if (drop) {
++ WARN_ON_ONCE(skb_unref(skb));
++ kfree_skb(skb);
++ return -EAGAIN;
++ }
++ }
++#endif
++
++ return recv_actor(sk, skb);
+ }
+
+ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+@@ -2627,7 +2664,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+ size_t size = state->size;
+ unsigned int last_len;
+
+- if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
++ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
+ err = -EINVAL;
+ goto out;
+ }
+@@ -2666,18 +2703,16 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+ last = skb = skb_peek(&sk->sk_receive_queue);
+ last_len = last ? last->len : 0;
+
++again:
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (skb) {
+ skb = manage_oob(skb, sk, flags, copied);
+- if (!skb) {
++ if (!skb && copied) {
+ unix_state_unlock(sk);
+- if (copied)
+- break;
+- goto redo;
++ break;
+ }
+ }
+ #endif
+-again:
+ if (skb == NULL) {
+ if (copied >= target)
+ goto unlock;
+@@ -2955,7 +2990,7 @@ long unix_inq_len(struct sock *sk)
+ struct sk_buff *skb;
+ long amount = 0;
+
+- if (sk->sk_state == TCP_LISTEN)
++ if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
+ return -EINVAL;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+@@ -3067,12 +3102,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
+ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
+ {
+ struct sock *sk = sock->sk;
++ unsigned char state;
+ __poll_t mask;
+ u8 shutdown;
+
+ sock_poll_wait(file, sock, wait);
+ mask = 0;
+ shutdown = READ_ONCE(sk->sk_shutdown);
++ state = READ_ONCE(sk->sk_state);
+
+ /* exceptional events? */
+ if (READ_ONCE(sk->sk_err))
+@@ -3094,14 +3131,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
+
+ /* Connection-based need to check for termination and startup */
+ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+- sk->sk_state == TCP_CLOSE)
++ state == TCP_CLOSE)
+ mask |= EPOLLHUP;
+
+ /*
+ * we set writable also when the other side has shut down the
+ * connection. This prevents stuck sockets.
+ */
+- if (unix_writable(sk))
++ if (unix_writable(sk, state))
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
+
+ return mask;
+@@ -3112,12 +3149,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ {
+ struct sock *sk = sock->sk, *other;
+ unsigned int writable;
++ unsigned char state;
+ __poll_t mask;
+ u8 shutdown;
+
+ sock_poll_wait(file, sock, wait);
+ mask = 0;
+ shutdown = READ_ONCE(sk->sk_shutdown);
++ state = READ_ONCE(sk->sk_state);
+
+ /* exceptional events? */
+ if (READ_ONCE(sk->sk_err) ||
+@@ -3137,19 +3176,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+- if (sk->sk_type == SOCK_SEQPACKET) {
+- if (sk->sk_state == TCP_CLOSE)
+- mask |= EPOLLHUP;
+- /* connection hasn't started yet? */
+- if (sk->sk_state == TCP_SYN_SENT)
+- return mask;
+- }
++ if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
++ mask |= EPOLLHUP;
+
+ /* No write status requested, avoid expensive OUT tests. */
+ if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+ return mask;
+
+- writable = unix_writable(sk);
++ writable = unix_writable(sk, state);
+ if (writable) {
+ unix_state_lock(sk);
+
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 616b55c5b89080..1de7500b41b616 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+ u32 *buf;
+ int i;
+
+- if (sk->sk_state == TCP_LISTEN) {
++ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+ spin_lock(&sk->sk_receive_queue.lock);
+
+ attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
+@@ -84,7 +84,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+ * queue lock. With the other's queue locked it's
+ * OK to lock the state.
+ */
+- unix_state_lock_nested(req);
++ unix_state_lock_nested(req, U_LOCK_DIAG);
+ peer = unix_sk(req)->peer;
+ buf[i++] = (peer ? sock_i_ino(peer) : 0);
+ unix_state_unlock(req);
+@@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+ {
+ struct unix_diag_rqlen rql;
+
+- if (sk->sk_state == TCP_LISTEN) {
+- rql.udiag_rqueue = sk->sk_receive_queue.qlen;
++ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
++ rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
+ rql.udiag_wqueue = sk->sk_max_ack_backlog;
+ } else {
+ rql.udiag_rqueue = (u32) unix_inq_len(sk);
+@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
+ rep = nlmsg_data(nlh);
+ rep->udiag_family = AF_UNIX;
+ rep->udiag_type = sk->sk_type;
+- rep->udiag_state = sk->sk_state;
++ rep->udiag_state = READ_ONCE(sk->sk_state);
+ rep->pad = 0;
+ rep->udiag_ino = sk_ino;
+ sock_diag_save_cookie(sk, rep->udiag_cookie);
+@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
+ sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
+ goto out_nlmsg_trim;
+
+- if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
++ if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
+ goto out_nlmsg_trim;
+
+ if ((req->udiag_show & UDIAG_SHOW_UID) &&
+@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ sk_for_each(sk, &net->unx.table.buckets[slot]) {
+ if (num < s_num)
+ goto next;
+- if (!(req->udiag_states & (1 << sk->sk_state)))
++ if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
+ goto next;
+ if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
+ NETLINK_CB(cb->skb).portid,
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 2405f0f9af31c0..2a758531e10271 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -166,17 +166,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
+
+ static void dec_inflight(struct unix_sock *usk)
+ {
+- atomic_long_dec(&usk->inflight);
++ usk->inflight--;
+ }
+
+ static void inc_inflight(struct unix_sock *usk)
+ {
+- atomic_long_inc(&usk->inflight);
++ usk->inflight++;
+ }
+
+ static void inc_inflight_move_tail(struct unix_sock *u)
+ {
+- atomic_long_inc(&u->inflight);
++ u->inflight++;
++
+ /* If this still might be part of a cycle, move it to the end
+ * of the list, so that it's checked even if it was already
+ * passed over
+@@ -198,7 +199,7 @@ void wait_for_unix_gc(void)
+ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
+ !READ_ONCE(gc_in_progress))
+ unix_gc();
+- wait_event(unix_gc_wait, gc_in_progress == false);
++ wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress));
+ }
+
+ /* The external entry point: unix_gc() */
+@@ -234,20 +235,34 @@ void unix_gc(void)
+ * receive queues. Other, non candidate sockets _can_ be
+ * added to queue, so we must make sure only to touch
+ * candidates.
++ *
++ * Embryos, though never candidates themselves, affect which
++ * candidates are reachable by the garbage collector. Before
++ * being added to a listener's queue, an embryo may already
++ * receive data carrying SCM_RIGHTS, potentially making the
++ * passed socket a candidate that is not yet reachable by the
++ * collector. It becomes reachable once the embryo is
++ * enqueued. Therefore, we must ensure that no SCM-laden
++ * embryo appears in a (candidate) listener's queue between
++ * consecutive scan_children() calls.
+ */
+ list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
++ struct sock *sk = &u->sk;
+ long total_refs;
+- long inflight_refs;
+
+- total_refs = file_count(u->sk.sk_socket->file);
+- inflight_refs = atomic_long_read(&u->inflight);
++ total_refs = file_count(sk->sk_socket->file);
+
+- BUG_ON(inflight_refs < 1);
+- BUG_ON(total_refs < inflight_refs);
+- if (total_refs == inflight_refs) {
++ BUG_ON(!u->inflight);
++ BUG_ON(total_refs < u->inflight);
++ if (total_refs == u->inflight) {
+ list_move_tail(&u->link, &gc_candidates);
+ __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
+ __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
++
++ if (sk->sk_state == TCP_LISTEN) {
++ unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
++ unix_state_unlock(sk);
++ }
+ }
+ }
+
+@@ -271,7 +286,7 @@ void unix_gc(void)
+ /* Move cursor to after the current position. */
+ list_move(&cursor, &u->link);
+
+- if (atomic_long_read(&u->inflight) > 0) {
++ if (u->inflight) {
+ list_move_tail(&u->link, &not_cycle_list);
+ __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+ scan_children(&u->sk, inc_inflight_move_tail, NULL);
+@@ -284,9 +299,17 @@ void unix_gc(void)
+ * which are creating the cycle(s).
+ */
+ skb_queue_head_init(&hitlist);
+- list_for_each_entry(u, &gc_candidates, link)
++ list_for_each_entry(u, &gc_candidates, link) {
+ scan_children(&u->sk, inc_inflight, &hitlist);
+
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++ if (u->oob_skb) {
++ kfree_skb(u->oob_skb);
++ u->oob_skb = NULL;
++ }
++#endif
++ }
++
+ /* not_cycle_list contains those sockets which do not make up a
+ * cycle. Restore these to the inflight list.
+ */
+diff --git a/net/unix/scm.c b/net/unix/scm.c
+index 6ff628f2349f57..e92f2fad64105d 100644
+--- a/net/unix/scm.c
++++ b/net/unix/scm.c
+@@ -35,10 +35,8 @@ struct sock *unix_get_socket(struct file *filp)
+ /* PF_UNIX ? */
+ if (s && ops && ops->family == PF_UNIX)
+ u_sock = s;
+- } else {
+- /* Could be an io_uring instance */
+- u_sock = io_uring_get_socket(filp);
+ }
++
+ return u_sock;
+ }
+ EXPORT_SYMBOL(unix_get_socket);
+@@ -55,12 +53,13 @@ void unix_inflight(struct user_struct *user, struct file *fp)
+ if (s) {
+ struct unix_sock *u = unix_sk(s);
+
+- if (atomic_long_inc_return(&u->inflight) == 1) {
++ if (!u->inflight) {
+ BUG_ON(!list_empty(&u->link));
+ list_add_tail(&u->link, &gc_inflight_list);
+ } else {
+ BUG_ON(list_empty(&u->link));
+ }
++ u->inflight++;
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ }
+@@ -77,10 +76,11 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
+ if (s) {
+ struct unix_sock *u = unix_sk(s);
+
+- BUG_ON(!atomic_long_read(&u->inflight));
++ BUG_ON(!u->inflight);
+ BUG_ON(list_empty(&u->link));
+
+- if (atomic_long_dec_and_test(&u->inflight))
++ u->inflight--;
++ if (!u->inflight)
+ list_del_init(&u->link);
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
+index 2f9d8271c6ec7d..bca2d86ba97d8d 100644
+--- a/net/unix/unix_bpf.c
++++ b/net/unix/unix_bpf.c
+@@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ struct sk_psock *psock;
+ int copied;
+
++ if (flags & MSG_OOB)
++ return -EOPNOTSUPP;
++
+ if (!len)
+ return 0;
+
+@@ -159,12 +162,32 @@ int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool re
+
+ int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ {
++ struct sock *sk_pair;
++
++ /* Restore does not decrement the sk_pair reference yet because we must
++ * keep the a reference to the socket until after an RCU grace period
++ * and any pending sends have completed.
++ */
+ if (restore) {
+ sk->sk_write_space = psock->saved_write_space;
+ sock_replace_proto(sk, psock->sk_proto);
+ return 0;
+ }
+
++ /* psock_update_sk_prot can be called multiple times if psock is
++ * added to multiple maps and/or slots in the same map. There is
++ * also an edge case where replacing a psock with itself can trigger
++ * an extra psock_update_sk_prot during the insert process. So it
++ * must be safe to do multiple calls. Here we need to ensure we don't
++ * increment the refcnt through sock_hold many times. There will only
++ * be a single matching destroy operation.
++ */
++ if (!psock->sk_pair) {
++ sk_pair = unix_peer(sk);
++ sock_hold(sk_pair);
++ psock->sk_pair = sk_pair;
++ }
++
+ unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
+ sock_replace_proto(sk, &unix_stream_bpf_prot);
+ return 0;
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 020cf17ab7e47b..f5eb737a677d97 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -89,6 +89,7 @@
+ #include <linux/types.h>
+ #include <linux/bitops.h>
+ #include <linux/cred.h>
++#include <linux/errqueue.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -110,6 +111,7 @@
+ #include <linux/workqueue.h>
+ #include <net/sock.h>
+ #include <net/af_vsock.h>
++#include <uapi/linux/vm_sockets.h>
+
+ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
+ static void vsock_sk_destruct(struct sock *sk);
+@@ -1268,25 +1270,28 @@ static int vsock_dgram_connect(struct socket *sock,
+ return err;
+ }
+
++int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
++ size_t len, int flags)
++{
++ struct sock *sk = sock->sk;
++ struct vsock_sock *vsk = vsock_sk(sk);
++
++ return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
++}
++
+ int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
+ {
+ #ifdef CONFIG_BPF_SYSCALL
++ struct sock *sk = sock->sk;
+ const struct proto *prot;
+-#endif
+- struct vsock_sock *vsk;
+- struct sock *sk;
+-
+- sk = sock->sk;
+- vsk = vsock_sk(sk);
+
+-#ifdef CONFIG_BPF_SYSCALL
+ prot = READ_ONCE(sk->sk_prot);
+ if (prot != &vsock_proto)
+ return prot->recvmsg(sk, msg, len, flags, NULL);
+ #endif
+
+- return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
++ return __vsock_dgram_recvmsg(sock, msg, len, flags);
+ }
+ EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg);
+
+@@ -2122,18 +2127,19 @@ static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
+ }
+
+ int
+-vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+- int flags)
++__vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
++ int flags)
+ {
+ struct sock *sk;
+ struct vsock_sock *vsk;
+ const struct vsock_transport *transport;
+-#ifdef CONFIG_BPF_SYSCALL
+- const struct proto *prot;
+-#endif
+ int err;
+
+ sk = sock->sk;
++
++ if (unlikely(flags & MSG_ERRQUEUE))
++ return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
++
+ vsk = vsock_sk(sk);
+ err = 0;
+
+@@ -2177,14 +2183,6 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ goto out;
+ }
+
+-#ifdef CONFIG_BPF_SYSCALL
+- prot = READ_ONCE(sk->sk_prot);
+- if (prot != &vsock_proto) {
+- release_sock(sk);
+- return prot->recvmsg(sk, msg, len, flags, NULL);
+- }
+-#endif
+-
+ if (sk->sk_type == SOCK_STREAM)
+ err = __vsock_stream_recvmsg(sk, msg, len, flags);
+ else
+@@ -2194,6 +2192,22 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ release_sock(sk);
+ return err;
+ }
++
++int
++vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
++ int flags)
++{
++#ifdef CONFIG_BPF_SYSCALL
++ struct sock *sk = sock->sk;
++ const struct proto *prot;
++
++ prot = READ_ONCE(sk->sk_prot);
++ if (prot != &vsock_proto)
++ return prot->recvmsg(sk, msg, len, flags, NULL);
++#endif
++
++ return __vsock_connectible_recvmsg(sock, msg, len, flags);
++}
+ EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg);
+
+ static int vsock_set_rcvlowat(struct sock *sk, int val)
+@@ -2208,8 +2222,13 @@ static int vsock_set_rcvlowat(struct sock *sk, int val)
+
+ transport = vsk->transport;
+
+- if (transport && transport->set_rcvlowat)
+- return transport->set_rcvlowat(vsk, val);
++ if (transport && transport->notify_set_rcvlowat) {
++ int err;
++
++ err = transport->notify_set_rcvlowat(vsk, val);
++ if (err)
++ return err;
++ }
+
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
+ return 0;
+diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
+index 7cb1a9d2cdb4f8..e2157e38721770 100644
+--- a/net/vmw_vsock/hyperv_transport.c
++++ b/net/vmw_vsock/hyperv_transport.c
+@@ -816,7 +816,7 @@ int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written,
+ }
+
+ static
+-int hvs_set_rcvlowat(struct vsock_sock *vsk, int val)
++int hvs_notify_set_rcvlowat(struct vsock_sock *vsk, int val)
+ {
+ return -EOPNOTSUPP;
+ }
+@@ -856,7 +856,7 @@ static struct vsock_transport hvs_transport = {
+ .notify_send_pre_enqueue = hvs_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = hvs_notify_send_post_enqueue,
+
+- .set_rcvlowat = hvs_set_rcvlowat
++ .notify_set_rcvlowat = hvs_notify_set_rcvlowat
+ };
+
+ static bool hvs_check_transport(struct vsock_sock *vsk)
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index b80bf681327bd3..2925f5d27ad3fb 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -109,7 +109,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
+ if (!skb)
+ break;
+
+- virtio_transport_deliver_tap_pkt(skb);
+ reply = virtio_vsock_skb_reply(skb);
+
+ sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
+@@ -128,6 +127,8 @@ virtio_transport_send_pkt_work(struct work_struct *work)
+ break;
+ }
+
++ virtio_transport_deliver_tap_pkt(skb);
++
+ if (reply) {
+ struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+ int val;
+@@ -457,6 +458,7 @@ static struct virtio_transport virtio_transport = {
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
++ .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
+
+ .read_skb = virtio_transport_read_skb,
+ },
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 352d042b130b54..e87fd9480acdac 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -68,6 +68,8 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
+ hdr->dst_port = cpu_to_le32(dst_port);
+ hdr->flags = cpu_to_le32(info->flags);
+ hdr->len = cpu_to_le32(len);
++ hdr->buf_alloc = cpu_to_le32(0);
++ hdr->fwd_cnt = cpu_to_le32(0);
+
+ if (info->msg && len > 0) {
+ payload = skb_put(skb, len);
+@@ -396,6 +398,8 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ size_t bytes, total = 0;
+ struct sk_buff *skb;
++ u32 fwd_cnt_delta;
++ bool low_rx_bytes;
+ int err = -EFAULT;
+ u32 free_space;
+
+@@ -437,7 +441,10 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ }
+ }
+
+- free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
++ fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt;
++ free_space = vvs->buf_alloc - fwd_cnt_delta;
++ low_rx_bytes = (vvs->rx_bytes <
++ sock_rcvlowat(sk_vsock(vsk), 0, INT_MAX));
+
+ spin_unlock_bh(&vvs->rx_lock);
+
+@@ -447,9 +454,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ * too high causes extra messages. Too low causes transmitter
+ * stalls. As stalls are in theory more expensive than extra
+ * messages, we set the limit to a high value. TODO: experiment
+- * with different values.
++ * with different values. Also send credit update message when
++ * number of bytes in rx queue is not enough to wake up reader.
+ */
+- if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
++ if (fwd_cnt_delta &&
++ (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || low_rx_bytes))
+ virtio_transport_send_credit_update(vsk);
+
+ return total;
+@@ -677,7 +686,7 @@ static s64 virtio_transport_has_space(struct vsock_sock *vsk)
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ s64 bytes;
+
+- bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
++ bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ if (bytes < 0)
+ bytes = 0;
+
+@@ -1204,11 +1213,17 @@ virtio_transport_recv_connected(struct sock *sk,
+ vsk->peer_shutdown |= RCV_SHUTDOWN;
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ vsk->peer_shutdown |= SEND_SHUTDOWN;
+- if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+- vsock_stream_has_data(vsk) <= 0 &&
+- !sock_flag(sk, SOCK_DONE)) {
+- (void)virtio_transport_reset(vsk, NULL);
+- virtio_transport_do_close(vsk, true);
++ if (vsk->peer_shutdown == SHUTDOWN_MASK) {
++ if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
++ (void)virtio_transport_reset(vsk, NULL);
++ virtio_transport_do_close(vsk, true);
++ }
++ /* Remove this socket anyway because the remote peer sent
++ * the shutdown. This way a new connection will succeed
++ * if the remote peer uses the same source port,
++ * even if the old socket is still unreleased, but now disconnected.
++ */
++ vsock_remove_sock(vsk);
+ }
+ if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
+ sk->sk_state_change(sk);
+@@ -1511,6 +1526,36 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_read_skb);
+
++int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val)
++{
++ struct virtio_vsock_sock *vvs = vsk->trans;
++ bool send_update;
++
++ spin_lock_bh(&vvs->rx_lock);
++
++ /* If number of available bytes is less than new SO_RCVLOWAT value,
++ * kick sender to send more data, because sender may sleep in its
++ * 'send()' syscall waiting for enough space at our side. Also
++ * don't send credit update when peer already knows actual value -
++ * such transmission will be useless.
++ */
++ send_update = (vvs->rx_bytes < val) &&
++ (vvs->fwd_cnt != vvs->last_fwd_cnt);
++
++ spin_unlock_bh(&vvs->rx_lock);
++
++ if (send_update) {
++ int err;
++
++ err = virtio_transport_send_credit_update(vsk);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(virtio_transport_notify_set_rcvlowat);
++
+ MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Asias He");
+ MODULE_DESCRIPTION("common code for virtio vsock");
+diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
+index a3c97546ab84a6..c42c5cc18f3241 100644
+--- a/net/vmw_vsock/vsock_bpf.c
++++ b/net/vmw_vsock/vsock_bpf.c
+@@ -64,9 +64,9 @@ static int __vsock_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int
+ int err;
+
+ if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
+- err = vsock_connectible_recvmsg(sock, msg, len, flags);
++ err = __vsock_connectible_recvmsg(sock, msg, len, flags);
+ else if (sk->sk_type == SOCK_DGRAM)
+- err = vsock_dgram_recvmsg(sock, msg, len, flags);
++ err = __vsock_dgram_recvmsg(sock, msg, len, flags);
+ else
+ err = -EPROTOTYPE;
+
+diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c
+index 5c6360df1f3137..0ce65d0a4a44ff 100644
+--- a/net/vmw_vsock/vsock_loopback.c
++++ b/net/vmw_vsock/vsock_loopback.c
+@@ -90,6 +90,7 @@ static struct virtio_transport loopback_transport = {
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
++ .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
+
+ .read_skb = virtio_transport_read_skb,
+ },
+diff --git a/net/wireless/certs/wens.hex b/net/wireless/certs/wens.hex
+new file mode 100644
+index 00000000000000..0d50369bede989
+--- /dev/null
++++ b/net/wireless/certs/wens.hex
+@@ -0,0 +1,87 @@
++/* Chen-Yu Tsai's regdb certificate */
++0x30, 0x82, 0x02, 0xa7, 0x30, 0x82, 0x01, 0x8f,
++0x02, 0x14, 0x61, 0xc0, 0x38, 0x65, 0x1a, 0xab,
++0xdc, 0xf9, 0x4b, 0xd0, 0xac, 0x7f, 0xf0, 0x6c,
++0x72, 0x48, 0xdb, 0x18, 0xc6, 0x00, 0x30, 0x0d,
++0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
++0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x0f, 0x31,
++0x0d, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x04, 0x03,
++0x0c, 0x04, 0x77, 0x65, 0x6e, 0x73, 0x30, 0x20,
++0x17, 0x0d, 0x32, 0x33, 0x31, 0x32, 0x30, 0x31,
++0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a, 0x18,
++0x0f, 0x32, 0x31, 0x32, 0x33, 0x31, 0x31, 0x30,
++0x37, 0x30, 0x37, 0x34, 0x31, 0x31, 0x34, 0x5a,
++0x30, 0x0f, 0x31, 0x0d, 0x30, 0x0b, 0x06, 0x03,
++0x55, 0x04, 0x03, 0x0c, 0x04, 0x77, 0x65, 0x6e,
++0x73, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06,
++0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01,
++0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f,
++0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01,
++0x01, 0x00, 0xa9, 0x7a, 0x2c, 0x78, 0x4d, 0xa7,
++0x19, 0x2d, 0x32, 0x52, 0xa0, 0x2e, 0x6c, 0xef,
++0x88, 0x7f, 0x15, 0xc5, 0xb6, 0x69, 0x54, 0x16,
++0x43, 0x14, 0x79, 0x53, 0xb7, 0xae, 0x88, 0xfe,
++0xc0, 0xb7, 0x5d, 0x47, 0x8e, 0x1a, 0xe1, 0xef,
++0xb3, 0x90, 0x86, 0xda, 0xd3, 0x64, 0x81, 0x1f,
++0xce, 0x5d, 0x9e, 0x4b, 0x6e, 0x58, 0x02, 0x3e,
++0xb2, 0x6f, 0x5e, 0x42, 0x47, 0x41, 0xf4, 0x2c,
++0xb8, 0xa8, 0xd4, 0xaa, 0xc0, 0x0e, 0xe6, 0x48,
++0xf0, 0xa8, 0xce, 0xcb, 0x08, 0xae, 0x37, 0xaf,
++0xf6, 0x40, 0x39, 0xcb, 0x55, 0x6f, 0x5b, 0x4f,
++0x85, 0x34, 0xe6, 0x69, 0x10, 0x50, 0x72, 0x5e,
++0x4e, 0x9d, 0x4c, 0xba, 0x38, 0x36, 0x0d, 0xce,
++0x73, 0x38, 0xd7, 0x27, 0x02, 0x2a, 0x79, 0x03,
++0xe1, 0xac, 0xcf, 0xb0, 0x27, 0x85, 0x86, 0x93,
++0x17, 0xab, 0xec, 0x42, 0x77, 0x37, 0x65, 0x8a,
++0x44, 0xcb, 0xd6, 0x42, 0x93, 0x92, 0x13, 0xe3,
++0x39, 0x45, 0xc5, 0x6e, 0x00, 0x4a, 0x7f, 0xcb,
++0x42, 0x17, 0x2b, 0x25, 0x8c, 0xb8, 0x17, 0x3b,
++0x15, 0x36, 0x59, 0xde, 0x42, 0xce, 0x21, 0xe6,
++0xb6, 0xc7, 0x6e, 0x5e, 0x26, 0x1f, 0xf7, 0x8a,
++0x57, 0x9e, 0xa5, 0x96, 0x72, 0xb7, 0x02, 0x32,
++0xeb, 0x07, 0x2b, 0x73, 0xe2, 0x4f, 0x66, 0x58,
++0x9a, 0xeb, 0x0f, 0x07, 0xb6, 0xab, 0x50, 0x8b,
++0xc3, 0x8f, 0x17, 0xfa, 0x0a, 0x99, 0xc2, 0x16,
++0x25, 0xbf, 0x2d, 0x6b, 0x1a, 0xaa, 0xe6, 0x3e,
++0x5f, 0xeb, 0x6d, 0x9b, 0x5d, 0x4d, 0x42, 0x83,
++0x2d, 0x39, 0xb8, 0xc9, 0xac, 0xdb, 0x3a, 0x91,
++0x50, 0xdf, 0xbb, 0xb1, 0x76, 0x6d, 0x15, 0x73,
++0xfd, 0xc6, 0xe6, 0x6b, 0x71, 0x9e, 0x67, 0x36,
++0x22, 0x83, 0x79, 0xb1, 0xd6, 0xb8, 0x84, 0x52,
++0xaf, 0x96, 0x5b, 0xc3, 0x63, 0x02, 0x4e, 0x78,
++0x70, 0x57, 0x02, 0x03, 0x01, 0x00, 0x01, 0x30,
++0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7,
++0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82,
++0x01, 0x01, 0x00, 0x24, 0x28, 0xee, 0x22, 0x74,
++0x7f, 0x7c, 0xfa, 0x6c, 0x1f, 0xb3, 0x18, 0xd1,
++0xc2, 0x3d, 0x7d, 0x29, 0x42, 0x88, 0xad, 0x82,
++0xa5, 0xb1, 0x8a, 0x05, 0xd0, 0xec, 0x5c, 0x91,
++0x20, 0xf6, 0x82, 0xfd, 0xd5, 0x67, 0x60, 0x5f,
++0x31, 0xf5, 0xbd, 0x88, 0x91, 0x70, 0xbd, 0xb8,
++0xb9, 0x8c, 0x88, 0xfe, 0x53, 0xc9, 0x54, 0x9b,
++0x43, 0xc4, 0x7a, 0x43, 0x74, 0x6b, 0xdd, 0xb0,
++0xb1, 0x3b, 0x33, 0x45, 0x46, 0x78, 0xa3, 0x1c,
++0xef, 0x54, 0x68, 0xf7, 0x85, 0x9c, 0xe4, 0x51,
++0x6f, 0x06, 0xaf, 0x81, 0xdb, 0x2a, 0x7b, 0x7b,
++0x6f, 0xa8, 0x9c, 0x67, 0xd8, 0xcb, 0xc9, 0x91,
++0x40, 0x00, 0xae, 0xd9, 0xa1, 0x9f, 0xdd, 0xa6,
++0x43, 0x0e, 0x28, 0x7b, 0xaa, 0x1b, 0xe9, 0x84,
++0xdb, 0x76, 0x64, 0x42, 0x70, 0xc9, 0xc0, 0xeb,
++0xae, 0x84, 0x11, 0x16, 0x68, 0x4e, 0x84, 0x9e,
++0x7e, 0x92, 0x36, 0xee, 0x1c, 0x3b, 0x08, 0x63,
++0xeb, 0x79, 0x84, 0x15, 0x08, 0x9d, 0xaf, 0xc8,
++0x9a, 0xc7, 0x34, 0xd3, 0x94, 0x4b, 0xd1, 0x28,
++0x97, 0xbe, 0xd1, 0x45, 0x75, 0xdc, 0x35, 0x62,
++0xac, 0x1d, 0x1f, 0xb7, 0xb7, 0x15, 0x87, 0xc8,
++0x98, 0xc0, 0x24, 0x31, 0x56, 0x8d, 0xed, 0xdb,
++0x06, 0xc6, 0x46, 0xbf, 0x4b, 0x6d, 0xa6, 0xd5,
++0xab, 0xcc, 0x60, 0xfc, 0xe5, 0x37, 0xb6, 0x53,
++0x7d, 0x58, 0x95, 0xa9, 0x56, 0xc7, 0xf7, 0xee,
++0xc3, 0xa0, 0x76, 0xf7, 0x65, 0x4d, 0x53, 0xfa,
++0xff, 0x5f, 0x76, 0x33, 0x5a, 0x08, 0xfa, 0x86,
++0x92, 0x5a, 0x13, 0xfa, 0x1a, 0xfc, 0xf2, 0x1b,
++0x8c, 0x7f, 0x42, 0x6d, 0xb7, 0x7e, 0xb7, 0xb4,
++0xf0, 0xc7, 0x83, 0xbb, 0xa2, 0x81, 0x03, 0x2d,
++0xd4, 0x2a, 0x63, 0x3f, 0xf7, 0x31, 0x2e, 0x40,
++0x33, 0x5c, 0x46, 0xbc, 0x9b, 0xc1, 0x05, 0xa5,
++0x45, 0x4e, 0xc3,
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index acec41c1809a82..68aa8f0d70140d 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -221,7 +221,9 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
+ {
+ struct cfg80211_registered_device *rdev = data;
+
++ wiphy_lock(&rdev->wiphy);
+ rdev_rfkill_poll(rdev);
++ wiphy_unlock(&rdev->wiphy);
+ }
+
+ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+@@ -429,7 +431,7 @@ static void cfg80211_wiphy_work(struct work_struct *work)
+ if (wk) {
+ list_del_init(&wk->entry);
+ if (!list_empty(&rdev->wiphy_work_list))
+- schedule_work(work);
++ queue_work(system_unbound_wq, work);
+ spin_unlock_irq(&rdev->wiphy_work_lock);
+
+ wk->func(&rdev->wiphy, wk);
+@@ -1049,7 +1051,8 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
+ }
+ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
+
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++ struct wiphy_work *end)
+ {
+ unsigned int runaway_limit = 100;
+ unsigned long flags;
+@@ -1068,6 +1071,10 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
+ wk->func(&rdev->wiphy, wk);
+
+ spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++
++ if (wk == end)
++ break;
++
+ if (WARN_ON(--runaway_limit == 0))
+ INIT_LIST_HEAD(&rdev->wiphy_work_list);
+ }
+@@ -1118,7 +1125,7 @@ void wiphy_unregister(struct wiphy *wiphy)
+ #endif
+
+ /* surely nothing is reachable now, clean up work */
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ wiphy_unlock(&rdev->wiphy);
+ rtnl_unlock();
+
+@@ -1640,6 +1647,21 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
+ }
+ EXPORT_SYMBOL_GPL(wiphy_work_cancel);
+
++void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
++{
++ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
++ unsigned long flags;
++ bool run;
++
++ spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
++ run = !work || !list_empty(&work->entry);
++ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
++
++ if (run)
++ cfg80211_process_wiphy_works(rdev, work);
++}
++EXPORT_SYMBOL_GPL(wiphy_work_flush);
++
+ void wiphy_delayed_work_timer(struct timer_list *t)
+ {
+ struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer);
+@@ -1653,6 +1675,7 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ unsigned long delay)
+ {
+ if (!delay) {
++ del_timer(&dwork->timer);
+ wiphy_work_queue(wiphy, &dwork->work);
+ return;
+ }
+@@ -1672,6 +1695,16 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
+
++void wiphy_delayed_work_flush(struct wiphy *wiphy,
++ struct wiphy_delayed_work *dwork)
++{
++ lockdep_assert_held(&wiphy->mtx);
++
++ del_timer_sync(&dwork->timer);
++ wiphy_work_flush(wiphy, &dwork->work);
++}
++EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
++
+ static int __init cfg80211_init(void)
+ {
+ int err;
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index ba9c7170afa44e..f0a3a231763854 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -299,6 +299,7 @@ struct cfg80211_cqm_config {
+ u32 rssi_hyst;
+ s32 last_rssi_event_value;
+ enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
++ bool use_range_api;
+ int n_rssi_thresholds;
+ s32 rssi_thresholds[] __counted_by(n_rssi_thresholds);
+ };
+@@ -464,7 +465,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, enum nl80211_iftype ntype,
+ struct vif_params *params);
+ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+-void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev);
++void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
++ struct wiphy_work *end);
+ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+
+ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 931a03f4549c9f..9e74f249cb45f1 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -468,6 +468,10 @@ static struct netlink_range_validation nl80211_punct_bitmap_range = {
+ .max = 0xffff,
+ };
+
++static struct netlink_range_validation q_range = {
++ .max = INT_MAX,
++};
++
+ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
+ [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
+@@ -750,7 +754,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+
+ [NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
+ [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
+- [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
++ [NL80211_ATTR_TXQ_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &q_range),
+ [NL80211_ATTR_HE_CAPABILITY] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa,
+ NL80211_HE_MAX_CAPABILITY_LEN),
+@@ -3398,6 +3402,33 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
+ if (chandef.chan != cur_chan)
+ return -EBUSY;
+
++ /* only allow this for regular channel widths */
++ switch (wdev->links[link_id].ap.chandef.width) {
++ case NL80211_CHAN_WIDTH_20_NOHT:
++ case NL80211_CHAN_WIDTH_20:
++ case NL80211_CHAN_WIDTH_40:
++ case NL80211_CHAN_WIDTH_80:
++ case NL80211_CHAN_WIDTH_80P80:
++ case NL80211_CHAN_WIDTH_160:
++ case NL80211_CHAN_WIDTH_320:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ switch (chandef.width) {
++ case NL80211_CHAN_WIDTH_20_NOHT:
++ case NL80211_CHAN_WIDTH_20:
++ case NL80211_CHAN_WIDTH_40:
++ case NL80211_CHAN_WIDTH_80:
++ case NL80211_CHAN_WIDTH_80P80:
++ case NL80211_CHAN_WIDTH_160:
++ case NL80211_CHAN_WIDTH_320:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ result = rdev_set_ap_chanwidth(rdev, dev, link_id,
+ &chandef);
+ if (result)
+@@ -4012,6 +4043,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
+ if_idx++;
+ }
+
++ if_start = 0;
+ wp_idx++;
+ }
+ out:
+@@ -4188,6 +4220,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
+
+ if (ntype != NL80211_IFTYPE_MESH_POINT)
+ return -EINVAL;
++ if (otype != NL80211_IFTYPE_MESH_POINT)
++ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+
+@@ -4443,10 +4477,7 @@ static void get_key_callback(void *c, struct key_params *params)
+ struct nlattr *key;
+ struct get_key_cookie *cookie = c;
+
+- if ((params->key &&
+- nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
+- params->key_len, params->key)) ||
+- (params->seq &&
++ if ((params->seq &&
+ nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
+ params->seq_len, params->seq)) ||
+ (params->cipher &&
+@@ -4458,10 +4489,7 @@ static void get_key_callback(void *c, struct key_params *params)
+ if (!key)
+ goto nla_put_failure;
+
+- if ((params->key &&
+- nla_put(cookie->msg, NL80211_KEY_DATA,
+- params->key_len, params->key)) ||
+- (params->seq &&
++ if ((params->seq &&
+ nla_put(cookie->msg, NL80211_KEY_SEQ,
+ params->seq_len, params->seq)) ||
+ (params->cipher &&
+@@ -9150,6 +9178,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
+ struct wiphy *wiphy;
+ int err, tmp, n_ssids = 0, n_channels, i;
+ size_t ie_len, size;
++ size_t ssids_offset, ie_offset;
+
+ wiphy = &rdev->wiphy;
+
+@@ -9195,21 +9224,20 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
+ return -EINVAL;
+
+ size = struct_size(request, channels, n_channels);
++ ssids_offset = size;
+ size = size_add(size, array_size(sizeof(*request->ssids), n_ssids));
++ ie_offset = size;
+ size = size_add(size, ie_len);
+ request = kzalloc(size, GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
++ request->n_channels = n_channels;
+
+ if (n_ssids)
+- request->ssids = (void *)&request->channels[n_channels];
++ request->ssids = (void *)request + ssids_offset;
+ request->n_ssids = n_ssids;
+- if (ie_len) {
+- if (n_ssids)
+- request->ie = (void *)(request->ssids + n_ssids);
+- else
+- request->ie = (void *)(request->channels + n_channels);
+- }
++ if (ie_len)
++ request->ie = (void *)request + ie_offset;
+
+ i = 0;
+ if (scan_freqs) {
+@@ -9651,7 +9679,8 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
+ return ERR_PTR(-ENOMEM);
+
+ if (n_ssids)
+- request->ssids = (void *)&request->channels[n_channels];
++ request->ssids = (void *)request +
++ struct_size(request, channels, n_channels);
+ request->n_ssids = n_ssids;
+ if (ie_len) {
+ if (n_ssids)
+@@ -10019,7 +10048,20 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
+
+ err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms);
+ if (!err) {
+- wdev->links[0].ap.chandef = chandef;
++ switch (wdev->iftype) {
++ case NL80211_IFTYPE_AP:
++ case NL80211_IFTYPE_P2P_GO:
++ wdev->links[0].ap.chandef = chandef;
++ break;
++ case NL80211_IFTYPE_ADHOC:
++ wdev->u.ibss.chandef = chandef;
++ break;
++ case NL80211_IFTYPE_MESH_POINT:
++ wdev->u.mesh.chandef = chandef;
++ break;
++ default:
++ break;
++ }
+ wdev->cac_started = true;
+ wdev->cac_start_time = jiffies;
+ wdev->cac_time_ms = cac_time_ms;
+@@ -12824,10 +12866,6 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ int i, n, low_index;
+ int err;
+
+- /* RSSI reporting disabled? */
+- if (!cqm_config)
+- return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+-
+ /*
+ * Obtain current RSSI value if possible, if not and no RSSI threshold
+ * event has been received yet, we should receive an event after a
+@@ -12902,18 +12940,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+- if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
+- if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
+- return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+-
+- return rdev_set_cqm_rssi_config(rdev, dev,
+- thresholds[0], hysteresis);
+- }
+-
+- if (!wiphy_ext_feature_isset(&rdev->wiphy,
+- NL80211_EXT_FEATURE_CQM_RSSI_LIST))
+- return -EOPNOTSUPP;
+-
+ if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
+ n_thresholds = 0;
+
+@@ -12921,6 +12947,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ old = rcu_dereference_protected(wdev->cqm_config,
+ lockdep_is_held(&wdev->mtx));
+
++ /* if already disabled just succeed */
++ if (!n_thresholds && !old) {
++ err = 0;
++ goto unlock;
++ }
++
++ if (n_thresholds > 1) {
++ if (!wiphy_ext_feature_isset(&rdev->wiphy,
++ NL80211_EXT_FEATURE_CQM_RSSI_LIST) ||
++ !rdev->ops->set_cqm_rssi_range_config) {
++ err = -EOPNOTSUPP;
++ goto unlock;
++ }
++ } else {
++ if (!rdev->ops->set_cqm_rssi_config) {
++ err = -EOPNOTSUPP;
++ goto unlock;
++ }
++ }
++
+ if (n_thresholds) {
+ cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
+ n_thresholds),
+@@ -12935,13 +12981,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ memcpy(cqm_config->rssi_thresholds, thresholds,
+ flex_array_size(cqm_config, rssi_thresholds,
+ n_thresholds));
++ cqm_config->use_range_api = n_thresholds > 1 ||
++ !rdev->ops->set_cqm_rssi_config;
+
+ rcu_assign_pointer(wdev->cqm_config, cqm_config);
++
++ if (cqm_config->use_range_api)
++ err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
++ else
++ err = rdev_set_cqm_rssi_config(rdev, dev,
++ thresholds[0],
++ hysteresis);
+ } else {
+ RCU_INIT_POINTER(wdev->cqm_config, NULL);
++ /* if enabled as range also disable via range */
++ if (old->use_range_api)
++ err = rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
++ else
++ err = rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+ }
+
+- err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ if (err) {
+ rcu_assign_pointer(wdev->cqm_config, old);
+ kfree_rcu(cqm_config, rcu_head);
+@@ -14032,6 +14091,8 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
+ error:
+ for (i = 0; i < new_coalesce.n_rules; i++) {
+ tmp_rule = &new_coalesce.rules[i];
++ if (!tmp_rule)
++ continue;
+ for (j = 0; j < tmp_rule->n_patterns; j++)
+ kfree(tmp_rule->patterns[j].mask);
+ kfree(tmp_rule->patterns);
+@@ -19131,10 +19192,11 @@ void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+ wdev_lock(wdev);
+ cqm_config = rcu_dereference_protected(wdev->cqm_config,
+ lockdep_is_held(&wdev->mtx));
+- if (!wdev->cqm_config)
++ if (!cqm_config)
+ goto unlock;
+
+- cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
++ if (cqm_config->use_range_api)
++ cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+
+ rssi_level = cqm_config->last_rssi_event_value;
+ rssi_event = cqm_config->last_rssi_event_type;
+diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
+index 9611aa0bd05133..841a4516793b19 100644
+--- a/net/wireless/pmsr.c
++++ b/net/wireless/pmsr.c
+@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ out->ftm.burst_period = 0;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
+ out->ftm.burst_period =
+- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
++ nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+
+ out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
+ if (out->ftm.asap && !capa->ftm.asap) {
+@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ out->ftm.num_bursts_exp = 0;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
+ out->ftm.num_bursts_exp =
+- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
++ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+
+ if (capa->ftm.max_bursts_exponent >= 0 &&
+ out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
+@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ out->ftm.burst_duration = 15;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
+ out->ftm.burst_duration =
+- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
++ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+
+ out->ftm.ftms_per_burst = 0;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
+@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ out->ftm.ftmr_retries = 3;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
+ out->ftm.ftmr_retries =
+- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
++ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+
+ out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
+ if (out->ftm.request_lci && !capa->ftm.request_lci) {
+diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
+index 90bb7ac4b930b0..f8f114a8dc02ac 100644
+--- a/net/wireless/rdev-ops.h
++++ b/net/wireless/rdev-ops.h
+@@ -2,7 +2,7 @@
+ /*
+ * Portions of this file
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018, 2021-2023 Intel Corporation
++ * Copyright (C) 2018, 2021-2024 Intel Corporation
+ */
+ #ifndef __CFG80211_RDEV_OPS
+ #define __CFG80211_RDEV_OPS
+@@ -458,6 +458,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
+ struct cfg80211_scan_request *request)
+ {
+ int ret;
++
++ if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
++ return -EINVAL;
++
+ trace_rdev_scan(&rdev->wiphy, request);
+ ret = rdev->ops->scan(&rdev->wiphy, request);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 8210a6090ac161..4fc6279750ea15 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -810,6 +810,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ LIST_HEAD(coloc_ap_list);
+ bool need_scan_psc = true;
+ const struct ieee80211_sband_iftype_data *iftd;
++ size_t size, offs_ssids, offs_6ghz_params, offs_ies;
+
+ rdev_req->scan_6ghz = true;
+
+@@ -838,10 +839,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ spin_unlock_bh(&rdev->bss_lock);
+ }
+
+- request = kzalloc(struct_size(request, channels, n_channels) +
+- sizeof(*request->scan_6ghz_params) * count +
+- sizeof(*request->ssids) * rdev_req->n_ssids,
+- GFP_KERNEL);
++ size = struct_size(request, channels, n_channels);
++ offs_ssids = size;
++ size += sizeof(*request->ssids) * rdev_req->n_ssids;
++ offs_6ghz_params = size;
++ size += sizeof(*request->scan_6ghz_params) * count;
++ offs_ies = size;
++ size += rdev_req->ie_len;
++
++ request = kzalloc(size, GFP_KERNEL);
+ if (!request) {
+ cfg80211_free_coloc_ap_list(&coloc_ap_list);
+ return -ENOMEM;
+@@ -849,8 +855,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+
+ *request = *rdev_req;
+ request->n_channels = 0;
+- request->scan_6ghz_params =
+- (void *)&request->channels[n_channels];
++ request->n_6ghz_params = 0;
++ if (rdev_req->n_ssids) {
++ /*
++ * Add the ssids from the parent scan request to the new
++ * scan request, so the driver would be able to use them
++ * in its probe requests to discover hidden APs on PSC
++ * channels.
++ */
++ request->ssids = (void *)request + offs_ssids;
++ memcpy(request->ssids, rdev_req->ssids,
++ sizeof(*request->ssids) * request->n_ssids);
++ }
++ request->scan_6ghz_params = (void *)request + offs_6ghz_params;
++
++ if (rdev_req->ie_len) {
++ void *ie = (void *)request + offs_ies;
++
++ memcpy(ie, rdev_req->ie, rdev_req->ie_len);
++ request->ie = ie;
++ }
+
+ /*
+ * PSC channels should not be scanned in case of direct scan with 1 SSID
+@@ -939,17 +963,8 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+
+ if (request->n_channels) {
+ struct cfg80211_scan_request *old = rdev->int_scan_req;
+- rdev->int_scan_req = request;
+
+- /*
+- * Add the ssids from the parent scan request to the new scan
+- * request, so the driver would be able to use them in its
+- * probe requests to discover hidden APs on PSC channels.
+- */
+- request->ssids = (void *)&request->channels[request->n_channels];
+- request->n_ssids = rdev_req->n_ssids;
+- memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
+- request->n_ssids);
++ rdev->int_scan_req = request;
+
+ /*
+ * If this scan follows a previous scan, save the scan start
+@@ -1547,7 +1562,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL(cfg80211_get_bss);
+
+-static void rb_insert_bss(struct cfg80211_registered_device *rdev,
++static bool rb_insert_bss(struct cfg80211_registered_device *rdev,
+ struct cfg80211_internal_bss *bss)
+ {
+ struct rb_node **p = &rdev->bss_tree.rb_node;
+@@ -1563,7 +1578,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+
+ if (WARN_ON(!cmp)) {
+ /* will sort of leak this BSS */
+- return;
++ return false;
+ }
+
+ if (cmp < 0)
+@@ -1574,6 +1589,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+
+ rb_link_node(&bss->rbn, parent, p);
+ rb_insert_color(&bss->rbn, &rdev->bss_tree);
++ return true;
+ }
+
+ static struct cfg80211_internal_bss *
+@@ -1600,6 +1616,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev,
+ return NULL;
+ }
+
++static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev,
++ struct cfg80211_internal_bss *bss)
++{
++ lockdep_assert_held(&rdev->bss_lock);
++
++ if (!rb_insert_bss(rdev, bss))
++ return;
++ list_add_tail(&bss->list, &rdev->bss_list);
++ rdev->bss_entries++;
++}
++
++static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev,
++ struct cfg80211_internal_bss *bss)
++{
++ lockdep_assert_held(&rdev->bss_lock);
++
++ rb_erase(&bss->rbn, &rdev->bss_tree);
++ if (!rb_insert_bss(rdev, bss)) {
++ list_del(&bss->list);
++ if (!list_empty(&bss->hidden_list))
++ list_del_init(&bss->hidden_list);
++ if (!list_empty(&bss->pub.nontrans_list))
++ list_del_init(&bss->pub.nontrans_list);
++ rdev->bss_entries--;
++ }
++ rdev->bss_generation++;
++}
++
+ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
+ struct cfg80211_internal_bss *new)
+ {
+@@ -1829,8 +1873,12 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ list_add(&new->hidden_list,
+ &hidden->hidden_list);
+ hidden->refcount++;
++
++ ies = (void *)rcu_access_pointer(new->pub.beacon_ies);
+ rcu_assign_pointer(new->pub.beacon_ies,
+ hidden->pub.beacon_ies);
++ if (ies)
++ kfree_rcu(ies, rcu_head);
+ }
+ } else {
+ /*
+@@ -1857,9 +1905,7 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ bss_ref_get(rdev, bss_from_pub(tmp->pub.transmitted_bss));
+ }
+
+- list_add_tail(&new->list, &rdev->bss_list);
+- rdev->bss_entries++;
+- rb_insert_bss(rdev, new);
++ cfg80211_insert_bss(rdev, new);
+ found = new;
+ }
+
+@@ -2358,8 +2404,8 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+
+ /* elem might be invalid after the memmove */
+ next = (void *)(elem->data + elem->datalen);
+-
+ elem_datalen = elem->datalen;
++
+ if (elem->id == WLAN_EID_EXTENSION) {
+ copied = elem->datalen - 1;
+ if (copied > data_len)
+@@ -2380,7 +2426,7 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+
+ for (elem = next;
+ elem->data < ies + ieslen &&
+- elem->data + elem->datalen < ies + ieslen;
++ elem->data + elem->datalen <= ies + ieslen;
+ elem = next) {
+ /* elem might be invalid after the memmove */
+ next = (void *)(elem->data + elem->datalen);
+@@ -2569,10 +2615,12 @@ cfg80211_tbtt_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
+ return false;
+ }
+
+-static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
+- struct cfg80211_inform_single_bss_data *tx_data,
+- struct cfg80211_bss *source_bss,
+- gfp_t gfp)
++static void
++cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
++ struct cfg80211_inform_single_bss_data *tx_data,
++ struct cfg80211_bss *source_bss,
++ const struct element *elem,
++ gfp_t gfp)
+ {
+ struct cfg80211_inform_single_bss_data data = {
+ .drv_data = tx_data->drv_data,
+@@ -2581,7 +2629,6 @@ static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
+ .bss_source = BSS_SOURCE_STA_PROFILE,
+ };
+ struct ieee80211_multi_link_elem *ml_elem;
+- const struct element *elem;
+ struct cfg80211_mle *mle;
+ u16 control;
+ u8 *new_ie;
+@@ -2591,15 +2638,7 @@ static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
+ const u8 *pos;
+ u8 i;
+
+- if (!source_bss)
+- return;
+-
+- if (tx_data->ftype != CFG80211_BSS_FTYPE_PRESP)
+- return;
+-
+- elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
+- tx_data->ie, tx_data->ielen);
+- if (!elem || !ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
++ if (!ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
+ return;
+
+ ml_elem = (void *)elem->data + 1;
+@@ -2625,8 +2664,11 @@ static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
+ /* MLD capabilities and operations */
+ pos += 2;
+
+- /* Not included when the (nontransmitted) AP is responding itself,
+- * but defined to zero then (Draft P802.11be_D3.0, 9.4.2.170.2)
++ /*
++ * The MLD ID of the reporting AP is always zero. It is set if the AP
++ * is part of an MBSSID set and will be non-zero for ML Elements
++ * relating to a nontransmitted BSS (matching the Multi-BSSID Index,
++ * Draft P802.11be_D3.2, 35.3.4.2)
+ */
+ if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MLD_ID)) {
+ mld_id = *pos;
+@@ -2731,6 +2773,25 @@ static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
+ kfree(mle);
+ }
+
++static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy,
++ struct cfg80211_inform_single_bss_data *tx_data,
++ struct cfg80211_bss *source_bss,
++ gfp_t gfp)
++{
++ const struct element *elem;
++
++ if (!source_bss)
++ return;
++
++ if (tx_data->ftype != CFG80211_BSS_FTYPE_PRESP)
++ return;
++
++ for_each_element_extid(elem, WLAN_EID_EXT_EHT_MULTI_LINK,
++ tx_data->ie, tx_data->ielen)
++ cfg80211_parse_ml_elem_sta_data(wiphy, tx_data, source_bss,
++ elem, gfp);
++}
++
+ struct cfg80211_bss *
+ cfg80211_inform_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+@@ -3077,19 +3138,14 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
+ if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new)))
+ rdev->bss_generation++;
+ }
+-
+- rb_erase(&cbss->rbn, &rdev->bss_tree);
+- rb_insert_bss(rdev, cbss);
+- rdev->bss_generation++;
++ cfg80211_rehash_bss(rdev, cbss);
+
+ list_for_each_entry_safe(nontrans_bss, tmp,
+ &cbss->pub.nontrans_list,
+ nontrans_list) {
+ bss = bss_from_pub(nontrans_bss);
+ bss->pub.channel = chan;
+- rb_erase(&bss->rbn, &rdev->bss_tree);
+- rb_insert_bss(rdev, bss);
+- rdev->bss_generation++;
++ cfg80211_rehash_bss(rdev, bss);
+ }
+
+ done:
+@@ -3144,13 +3200,17 @@ int cfg80211_wext_siwscan(struct net_device *dev,
+ wiphy = &rdev->wiphy;
+
+ /* Determine number of channels, needed to allocate creq */
+- if (wreq && wreq->num_channels)
++ if (wreq && wreq->num_channels) {
++ /* Passed from userspace so should be checked */
++ if (unlikely(wreq->num_channels > IW_MAX_FREQUENCIES))
++ return -EINVAL;
+ n_channels = wreq->num_channels;
+- else
++ } else {
+ n_channels = ieee80211_get_num_supported_channels(wiphy);
++ }
+
+- creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
+- n_channels * sizeof(void *),
++ creq = kzalloc(struct_size(creq, channels, n_channels) +
++ sizeof(struct cfg80211_ssid),
+ GFP_ATOMIC);
+ if (!creq)
+ return -ENOMEM;
+@@ -3158,7 +3218,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
+ creq->wiphy = wiphy;
+ creq->wdev = dev->ieee80211_ptr;
+ /* SSIDs come after channels */
+- creq->ssids = (void *)&creq->channels[n_channels];
++ creq->ssids = (void *)creq + struct_size(creq, channels, n_channels);
+ creq->n_channels = n_channels;
+ creq->n_ssids = 1;
+ creq->scan_start = jiffies;
+@@ -3221,8 +3281,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
+ memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
+ creq->ssids[0].ssid_len = wreq->essid_len;
+ }
+- if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE)
++ if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) {
++ creq->ssids = NULL;
+ creq->n_ssids = 0;
++ }
+ }
+
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 9bba233b5a6ec8..591cda99d72f57 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -115,7 +115,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
+ n_channels = i;
+ }
+ request->n_channels = n_channels;
+- request->ssids = (void *)&request->channels[n_channels];
++ request->ssids = (void *)request +
++ struct_size(request, channels, n_channels);
+ request->n_ssids = 1;
+
+ memcpy(request->ssids[0].ssid, wdev->conn->params.ssid,
+@@ -1057,6 +1058,7 @@ void cfg80211_connect_done(struct net_device *dev,
+ cfg80211_hold_bss(
+ bss_from_pub(params->links[link].bss));
+ ev->cr.links[link].bss = params->links[link].bss;
++ ev->cr.links[link].status = params->links[link].status;
+
+ if (params->links[link].addr) {
+ ev->cr.links[link].addr = next;
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index c629bac3f2983d..62f26618f67474 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -5,7 +5,7 @@
+ *
+ * Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
+- * Copyright (C) 2020-2021, 2023 Intel Corporation
++ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
+ */
+
+ #include <linux/device.h>
+@@ -105,14 +105,14 @@ static int wiphy_suspend(struct device *dev)
+ cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+ }
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ if (rdev->ops->suspend)
+ ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
+ if (ret == 1) {
+ /* Driver refuse to configure wowlan */
+ cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+- cfg80211_process_wiphy_works(rdev);
++ cfg80211_process_wiphy_works(rdev, NULL);
+ ret = rdev_suspend(rdev, NULL);
+ }
+ if (ret == 0)
+@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
+ if (rdev->wiphy.registered && rdev->ops->resume)
+ ret = rdev_resume(rdev);
+ rdev->suspended = false;
+- schedule_work(&rdev->wiphy_work);
++ queue_work(system_unbound_wq, &rdev->wiphy_work);
+ wiphy_unlock(&rdev->wiphy);
+
+ if (ret)
+diff --git a/net/wireless/trace.h b/net/wireless/trace.h
+index 617c0d0dfa963c..df92ee4d91d1d4 100644
+--- a/net/wireless/trace.h
++++ b/net/wireless/trace.h
+@@ -1015,7 +1015,7 @@ TRACE_EVENT(rdev_get_mpp,
+ TRACE_EVENT(rdev_dump_mpp,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx,
+ u8 *dst, u8 *mpp),
+- TP_ARGS(wiphy, netdev, _idx, mpp, dst),
++ TP_ARGS(wiphy, netdev, _idx, dst, mpp),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+@@ -1747,7 +1747,7 @@ TRACE_EVENT(rdev_return_void_tx_rx,
+
+ DECLARE_EVENT_CLASS(tx_rx_evt,
+ TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
+- TP_ARGS(wiphy, rx, tx),
++ TP_ARGS(wiphy, tx, rx),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(u32, tx)
+@@ -1764,7 +1764,7 @@ DECLARE_EVENT_CLASS(tx_rx_evt,
+
+ DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
+ TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
+- TP_ARGS(wiphy, rx, tx)
++ TP_ARGS(wiphy, tx, rx)
+ );
+
+ DECLARE_EVENT_CLASS(wiphy_netdev_id_evt,
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 1783ab9d57a319..7acd8d0db61a76 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -797,15 +797,19 @@ ieee80211_amsdu_subframe_length(void *field, u8 mesh_flags, u8 hdr_type)
+
+ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
+ {
+- int offset = 0, remaining, subframe_len, padding;
++ int offset = 0, subframe_len, padding;
+
+ for (offset = 0; offset < skb->len; offset += subframe_len + padding) {
++ int remaining = skb->len - offset;
+ struct {
+ __be16 len;
+ u8 mesh_flags;
+ } hdr;
+ u16 len;
+
++ if (sizeof(hdr) > remaining)
++ return false;
++
+ if (skb_copy_bits(skb, offset + 2 * ETH_ALEN, &hdr, sizeof(hdr)) < 0)
+ return false;
+
+@@ -813,7 +817,6 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
+ mesh_hdr);
+ subframe_len = sizeof(struct ethhdr) + len;
+ padding = (4 - subframe_len) & 0x3;
+- remaining = skb->len - offset;
+
+ if (subframe_len > remaining)
+ return false;
+@@ -831,7 +834,7 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ {
+ unsigned int hlen = ALIGN(extra_headroom, 4);
+ struct sk_buff *frame = NULL;
+- int offset = 0, remaining;
++ int offset = 0;
+ struct {
+ struct ethhdr eth;
+ uint8_t flags;
+@@ -845,10 +848,14 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ copy_len = sizeof(hdr);
+
+ while (!last) {
++ int remaining = skb->len - offset;
+ unsigned int subframe_len;
+ int len, mesh_len = 0;
+ u8 padding;
+
++ if (copy_len > remaining)
++ goto purge;
++
+ skb_copy_bits(skb, offset, &hdr, copy_len);
+ if (iftype == NL80211_IFTYPE_MESH_POINT)
+ mesh_len = __ieee80211_get_mesh_hdrlen(hdr.flags);
+@@ -858,7 +865,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ padding = (4 - subframe_len) & 0x3;
+
+ /* the last MSDU has no padding */
+- remaining = skb->len - offset;
+ if (subframe_len > remaining)
+ goto purge;
+ /* mitigate A-MSDU aggregation injection attacks */
+@@ -1454,7 +1460,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
+ 5120, /* 0.833333... */
+ };
+ u32 rates_160M[3] = { 960777777, 907400000, 816666666 };
+- u32 rates_969[3] = { 480388888, 453700000, 408333333 };
++ u32 rates_996[3] = { 480388888, 453700000, 408333333 };
+ u32 rates_484[3] = { 229411111, 216666666, 195000000 };
+ u32 rates_242[3] = { 114711111, 108333333, 97500000 };
+ u32 rates_106[3] = { 40000000, 37777777, 34000000 };
+@@ -1474,12 +1480,14 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
+ if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8))
+ return 0;
+
+- if (rate->bw == RATE_INFO_BW_160)
++ if (rate->bw == RATE_INFO_BW_160 ||
++ (rate->bw == RATE_INFO_BW_HE_RU &&
++ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_2x996))
+ result = rates_160M[rate->he_gi];
+ else if (rate->bw == RATE_INFO_BW_80 ||
+ (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996))
+- result = rates_969[rate->he_gi];
++ result = rates_996[rate->he_gi];
+ else if (rate->bw == RATE_INFO_BW_40 ||
+ (rate->bw == RATE_INFO_BW_HE_RU &&
+ rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484))
+@@ -2393,6 +2401,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+ {
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
++ int ret;
+
+ wdev = dev->ieee80211_ptr;
+ if (!wdev)
+@@ -2404,7 +2413,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+
+ memset(sinfo, 0, sizeof(*sinfo));
+
+- return rdev_get_station(rdev, dev, mac_addr, sinfo);
++ wiphy_lock(&rdev->wiphy);
++ ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
++ wiphy_unlock(&rdev->wiphy);
++
++ return ret;
+ }
+ EXPORT_SYMBOL(cfg80211_get_station);
+
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index a161c64d1765e6..838ad6541a17d8 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -4,6 +4,7 @@
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved.
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
++ * Copyright (C) 2024 Intel Corporation
+ *
+ * (As all part of the Linux kernel, this file is GPL)
+ */
+@@ -662,7 +663,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
+ dev->ieee80211_ptr->wiphy->wext &&
+ dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) {
+ wireless_warn_cfg80211_wext();
+- if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
++ if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO |
++ WIPHY_FLAG_DISABLE_WEXT))
+ return NULL;
+ return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev);
+ }
+@@ -704,7 +706,8 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
+ #ifdef CONFIG_CFG80211_WEXT
+ if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) {
+ wireless_warn_cfg80211_wext();
+- if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
++ if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO |
++ WIPHY_FLAG_DISABLE_WEXT))
+ return NULL;
+ handlers = dev->ieee80211_ptr->wiphy->wext;
+ }
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 0fb5143bec7ac4..f15a4493eb0bf1 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -460,12 +460,12 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
+ if (get_user(len, optlen))
+ goto out;
+
+- len = min_t(unsigned int, len, sizeof(int));
+-
+ rc = -EINVAL;
+ if (len < 0)
+ goto out;
+
++ len = min_t(unsigned int, len, sizeof(int));
++
+ rc = -EFAULT;
+ if (put_user(len, optlen))
+ goto out;
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 55f8b9b0e06d1f..93c802cfb9c6ab 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -166,8 +166,10 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ contd = XDP_PKT_CONTD;
+
+ err = __xsk_rcv_zc(xs, xskb, len, contd);
+- if (err || likely(!frags))
+- goto out;
++ if (err)
++ goto err;
++ if (likely(!frags))
++ return 0;
+
+ xskb_list = &xskb->pool->xskb_list;
+ list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
+@@ -176,11 +178,13 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ len = pos->xdp.data_end - pos->xdp.data;
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+- return err;
++ goto err;
+ list_del(&pos->xskb_list_node);
+ }
+
+-out:
++ return 0;
++err:
++ xsk_buff_free(xdp);
+ return err;
+ }
+
+@@ -679,7 +683,8 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ memcpy(vaddr, buffer, len);
+ kunmap_local(vaddr);
+
+- skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
++ skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
++ refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
+ }
+ }
+
+@@ -919,7 +924,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
+
+ rcu_read_lock();
+ if (xsk_check_common(xs))
+- goto skip_tx;
++ goto out;
+
+ pool = xs->pool;
+
+@@ -931,12 +936,11 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
+ xsk_generic_xmit(sk);
+ }
+
+-skip_tx:
+ if (xs->rx && !xskq_prod_is_empty(xs->rx))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (xs->tx && xsk_tx_writeable(xs))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+-
++out:
+ rcu_read_unlock();
+ return mask;
+ }
+@@ -1228,7 +1232,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+
+ xs->dev = dev;
+ xs->zc = xs->umem->zc;
+- xs->sg = !!(flags & XDP_USE_SG);
++ xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
+ xs->queue_id = qid;
+ xp_add_xsk(xs->pool, xs);
+
+@@ -1328,6 +1332,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
+ struct xsk_queue **q;
+ int entries;
+
++ if (optlen < sizeof(entries))
++ return -EINVAL;
+ if (copy_from_sockptr(&entries, optval, sizeof(entries)))
+ return -EFAULT;
+
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index b3f7b310811edf..b0a611677865d2 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -170,6 +170,9 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
+ if (err)
+ return err;
+
++ if (flags & XDP_USE_SG)
++ pool->umem->flags |= XDP_UMEM_SG_FLAG;
++
+ if (flags & XDP_USE_NEED_WAKEUP)
+ pool->uses_need_wakeup = true;
+ /* Tx needs to be explicitly woken up the first time. Also
+@@ -538,6 +541,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
+
+ xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
+ xskb->xdp.data_meta = xskb->xdp.data;
++ xskb->xdp.flags = 0;
+
+ if (pool->dma_need_sync) {
+ dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
+diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
+index 3784534c918552..6346690d5c699d 100644
+--- a/net/xfrm/xfrm_device.c
++++ b/net/xfrm/xfrm_device.c
+@@ -407,7 +407,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ struct net_device *dev = x->xso.dev;
+
+- if (!x->type_offload || x->encap)
++ if (!x->type_offload ||
++ (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
+ return false;
+
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index d5ee96789d4bf3..0c08bac3ed269d 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -388,11 +388,15 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
+ */
+ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
++ struct xfrm_offload *xo = xfrm_offload(skb);
+ int ihl = skb->data - skb_transport_header(skb);
+
+ if (skb->transport_header != skb->network_header) {
+ memmove(skb_transport_header(skb),
+ skb_network_header(skb), ihl);
++ if (xo)
++ xo->orig_mac_len =
++ skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0;
+ skb->network_header = skb->transport_header;
+ }
+ ip_hdr(skb)->tot_len = htons(skb->len + ihl);
+@@ -403,11 +407,15 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
++ struct xfrm_offload *xo = xfrm_offload(skb);
+ int ihl = skb->data - skb_transport_header(skb);
+
+ if (skb->transport_header != skb->network_header) {
+ memmove(skb_transport_header(skb),
+ skb_network_header(skb), ihl);
++ if (xo)
++ xo->orig_mac_len =
++ skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0;
+ skb->network_header = skb->transport_header;
+ }
+ ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index 662c83beb345ed..e5722c95b8bb38 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -704,9 +704,13 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
+ {
+ struct net *net = dev_net(skb_dst(skb)->dev);
+ struct xfrm_state *x = skb_dst(skb)->xfrm;
++ int family;
+ int err;
+
+- switch (x->outer_mode.family) {
++ family = (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) ? x->outer_mode.family
++ : skb_dst(skb)->ops->family;
++
++ switch (family) {
+ case AF_INET:
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index d24b4d4f620ea0..b699cc2ec35ac3 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -436,6 +436,8 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
+
+ static void xfrm_policy_kill(struct xfrm_policy *policy)
+ {
++ xfrm_dev_policy_delete(policy);
++
+ write_lock_bh(&policy->lock);
+ policy->walk.dead = 1;
+ write_unlock_bh(&policy->lock);
+@@ -1834,7 +1836,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
+
+ __xfrm_policy_unlink(pol, dir);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+- xfrm_dev_policy_delete(pol);
+ cnt++;
+ xfrm_audit_policy_delete(pol, 1, task_valid);
+ xfrm_policy_kill(pol);
+@@ -1875,7 +1876,6 @@ int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
+
+ __xfrm_policy_unlink(pol, dir);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+- xfrm_dev_policy_delete(pol);
+ cnt++;
+ xfrm_audit_policy_delete(pol, 1, task_valid);
+ xfrm_policy_kill(pol);
+@@ -2326,7 +2326,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
+ pol = __xfrm_policy_unlink(pol, dir);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ if (pol) {
+- xfrm_dev_policy_delete(pol);
+ xfrm_policy_kill(pol);
+ return 0;
+ }
+@@ -2679,7 +2678,9 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
+ if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
+ mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
+
+- family = xfrm[i]->props.family;
++ if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
++ family = xfrm[i]->props.family;
++
+ oif = fl->flowi_oif ? : fl->flowi_l3mdev;
+ dst = xfrm_dst_lookup(xfrm[i], tos, oif,
+ &saddr, &daddr, family, mark);
+@@ -3851,15 +3852,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
+ /* Impossible. Such dst must be popped before reaches point of failure. */
+ }
+
+-static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
++static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
+ {
+- if (dst) {
+- if (dst->obsolete) {
+- dst_release(dst);
+- dst = NULL;
+- }
+- }
+- return dst;
++ if (dst->obsolete)
++ sk_dst_reset(sk);
+ }
+
+ static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index bda5327bf34dff..8a6e8656d014f2 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -49,6 +49,7 @@ static struct kmem_cache *xfrm_state_cache __ro_after_init;
+
+ static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
+ static HLIST_HEAD(xfrm_state_gc_list);
++static HLIST_HEAD(xfrm_state_dev_gc_list);
+
+ static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
+ {
+@@ -214,6 +215,7 @@ static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
+ static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
+
+ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
++static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock);
+
+ int __xfrm_state_delete(struct xfrm_state *x);
+
+@@ -683,6 +685,41 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
+ }
+ EXPORT_SYMBOL(xfrm_state_alloc);
+
++#ifdef CONFIG_XFRM_OFFLOAD
++void xfrm_dev_state_delete(struct xfrm_state *x)
++{
++ struct xfrm_dev_offload *xso = &x->xso;
++ struct net_device *dev = READ_ONCE(xso->dev);
++
++ if (dev) {
++ dev->xfrmdev_ops->xdo_dev_state_delete(x);
++ spin_lock_bh(&xfrm_state_dev_gc_lock);
++ hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list);
++ spin_unlock_bh(&xfrm_state_dev_gc_lock);
++ }
++}
++EXPORT_SYMBOL_GPL(xfrm_dev_state_delete);
++
++void xfrm_dev_state_free(struct xfrm_state *x)
++{
++ struct xfrm_dev_offload *xso = &x->xso;
++ struct net_device *dev = READ_ONCE(xso->dev);
++
++ if (dev && dev->xfrmdev_ops) {
++ spin_lock_bh(&xfrm_state_dev_gc_lock);
++ if (!hlist_unhashed(&x->dev_gclist))
++ hlist_del(&x->dev_gclist);
++ spin_unlock_bh(&xfrm_state_dev_gc_lock);
++
++ if (dev->xfrmdev_ops->xdo_dev_state_free)
++ dev->xfrmdev_ops->xdo_dev_state_free(x);
++ WRITE_ONCE(xso->dev, NULL);
++ xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
++ netdev_put(dev, &xso->dev_tracker);
++ }
++}
++#endif
++
+ void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
+ {
+ WARN_ON(x->km.state != XFRM_STATE_DEAD);
+@@ -848,6 +885,9 @@ EXPORT_SYMBOL(xfrm_state_flush);
+
+ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
+ {
++ struct xfrm_state *x;
++ struct hlist_node *tmp;
++ struct xfrm_dev_offload *xso;
+ int i, err = 0, cnt = 0;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+@@ -857,8 +897,6 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
+
+ err = -ESRCH;
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+- struct xfrm_state *x;
+- struct xfrm_dev_offload *xso;
+ restart:
+ hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
+ xso = &x->xso;
+@@ -868,6 +906,8 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+ err = xfrm_state_delete(x);
++ xfrm_dev_state_free(x);
++
+ xfrm_audit_state_delete(x, err ? 0 : 1,
+ task_valid);
+ xfrm_state_put(x);
+@@ -884,6 +924,24 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
+
+ out:
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
++
++ spin_lock_bh(&xfrm_state_dev_gc_lock);
++restart_gc:
++ hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) {
++ xso = &x->xso;
++
++ if (xso->dev == dev) {
++ spin_unlock_bh(&xfrm_state_dev_gc_lock);
++ xfrm_dev_state_free(x);
++ spin_lock_bh(&xfrm_state_dev_gc_lock);
++ goto restart_gc;
++ }
++
++ }
++ spin_unlock_bh(&xfrm_state_dev_gc_lock);
++
++ xfrm_flush_gc();
++
+ return err;
+ }
+ EXPORT_SYMBOL(xfrm_dev_state_flush);
+@@ -1273,8 +1331,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ xso->dev = xdo->dev;
+ xso->real_dev = xdo->real_dev;
+ xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
+- netdev_tracker_alloc(xso->dev, &xso->dev_tracker,
+- GFP_ATOMIC);
++ netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC);
+ error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
+ if (error) {
+ xso->dir = 0;
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index ad01997c3aa9dd..979f23cded401a 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -2017,6 +2017,9 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
+ if (xp->xfrm_nr == 0)
+ return 0;
+
++ if (xp->xfrm_nr > XFRM_MAX_DEPTH)
++ return -ENOBUFS;
++
+ for (i = 0; i < xp->xfrm_nr; i++) {
+ struct xfrm_user_tmpl *up = &vec[i];
+ struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
+@@ -2345,7 +2348,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
+ NETLINK_CB(skb).portid);
+ }
+ } else {
+- xfrm_dev_policy_delete(xp);
+ xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
+
+ if (err != 0)
+diff --git a/rust/Makefile b/rust/Makefile
+index 7dbf9abe0d0197..333b9a482473d6 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -173,7 +173,6 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
+ mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
+ OBJTREE=$(abspath $(objtree)) \
+ $(RUSTDOC) --test $(rust_flags) \
+- @$(objtree)/include/generated/rustc_cfg \
+ -L$(objtree)/$(obj) --extern alloc --extern kernel \
+ --extern build_error --extern macros \
+ --extern bindings --extern uapi \
+@@ -364,9 +363,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
+ quiet_cmd_exports = EXPORTS $@
+ cmd_exports = \
+ $(NM) -p --defined-only $< \
+- | grep -E ' (T|R|D) ' | cut -d ' ' -f 3 \
+- | xargs -Isymbol \
+- echo 'EXPORT_SYMBOL_RUST_GPL(symbol);' > $@
++ | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
+
+ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE
+ $(call if_changed,exports)
+diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
+index 0b6bf5b6da4345..8cb4a31cf6e54d 100644
+--- a/rust/alloc/alloc.rs
++++ b/rust/alloc/alloc.rs
+@@ -6,9 +6,7 @@
+
+ #[cfg(not(test))]
+ use core::intrinsics;
+-use core::intrinsics::{min_align_of_val, size_of_val};
+
+-use core::ptr::Unique;
+ #[cfg(not(test))]
+ use core::ptr::{self, NonNull};
+
+@@ -40,7 +38,6 @@
+ #[rustc_nounwind]
+ fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
+
+- #[cfg(not(bootstrap))]
+ static __rust_no_alloc_shim_is_unstable: u8;
+ }
+
+@@ -98,7 +95,6 @@ pub unsafe fn alloc(layout: Layout) -> *mut u8 {
+ unsafe {
+ // Make sure we don't accidentally allow omitting the allocator shim in
+ // stable code until it is actually stabilized.
+- #[cfg(not(bootstrap))]
+ core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable);
+
+ __rust_alloc(layout.size(), layout.align())
+@@ -339,22 +335,6 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
+ }
+ }
+
+-#[cfg_attr(not(test), lang = "box_free")]
+-#[inline]
+-// This signature has to be the same as `Box`, otherwise an ICE will happen.
+-// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
+-// well.
+-// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
+-// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
+-pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
+- unsafe {
+- let size = size_of_val(ptr.as_ref());
+- let align = min_align_of_val(ptr.as_ref());
+- let layout = Layout::from_size_align_unchecked(size, align);
+- alloc.deallocate(From::from(ptr.cast()), layout)
+- }
+-}
+-
+ // # Allocation error handler
+
+ #[cfg(not(no_global_oom_handling))]
+@@ -414,7 +394,6 @@ pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
+ static __rust_alloc_error_handler_should_panic: u8;
+ }
+
+- #[allow(unused_unsafe)]
+ if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
+ panic!("memory allocation of {size} bytes failed")
+ } else {
+diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
+index c8173cea831773..9620eba1726872 100644
+--- a/rust/alloc/boxed.rs
++++ b/rust/alloc/boxed.rs
+@@ -159,12 +159,12 @@
+ use core::iter::FusedIterator;
+ use core::marker::Tuple;
+ use core::marker::Unsize;
+-use core::mem;
++use core::mem::{self, SizedTypeProperties};
+ use core::ops::{
+ CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
+ };
+ use core::pin::Pin;
+-use core::ptr::{self, Unique};
++use core::ptr::{self, NonNull, Unique};
+ use core::task::{Context, Poll};
+
+ #[cfg(not(no_global_oom_handling))]
+@@ -483,8 +483,12 @@ pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocE
+ where
+ A: Allocator,
+ {
+- let layout = Layout::new::<mem::MaybeUninit<T>>();
+- let ptr = alloc.allocate(layout)?.cast();
++ let ptr = if T::IS_ZST {
++ NonNull::dangling()
++ } else {
++ let layout = Layout::new::<mem::MaybeUninit<T>>();
++ alloc.allocate(layout)?.cast()
++ };
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+@@ -553,8 +557,12 @@ pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocE
+ where
+ A: Allocator,
+ {
+- let layout = Layout::new::<mem::MaybeUninit<T>>();
+- let ptr = alloc.allocate_zeroed(layout)?.cast();
++ let ptr = if T::IS_ZST {
++ NonNull::dangling()
++ } else {
++ let layout = Layout::new::<mem::MaybeUninit<T>>();
++ alloc.allocate_zeroed(layout)?.cast()
++ };
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+@@ -679,14 +687,16 @@ pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_uninit_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
+- unsafe {
++ let ptr = if T::IS_ZST || len == 0 {
++ NonNull::dangling()
++ } else {
+ let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
+ Ok(l) => l,
+ Err(_) => return Err(AllocError),
+ };
+- let ptr = Global.allocate(layout)?;
+- Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
+- }
++ Global.allocate(layout)?.cast()
++ };
++ unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents, with the memory
+@@ -711,14 +721,16 @@ pub fn try_new_uninit_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, Al
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_zeroed_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
+- unsafe {
++ let ptr = if T::IS_ZST || len == 0 {
++ NonNull::dangling()
++ } else {
+ let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
+ Ok(l) => l,
+ Err(_) => return Err(AllocError),
+ };
+- let ptr = Global.allocate_zeroed(layout)?;
+- Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
+- }
++ Global.allocate_zeroed(layout)?.cast()
++ };
++ unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
+ }
+ }
+
+@@ -1215,8 +1227,18 @@ pub const fn into_pin(boxed: Self) -> Pin<Self>
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
++ #[inline]
+ fn drop(&mut self) {
+- // FIXME: Do nothing, drop is currently performed by compiler.
++ // the T in the Box is dropped by the compiler before the destructor is run
++
++ let ptr = self.0;
++
++ unsafe {
++ let layout = Layout::for_value_raw(ptr.as_ptr());
++ if layout.size() != 0 {
++ self.1.deallocate(From::from(ptr.cast()), layout);
++ }
++ }
+ }
+ }
+
+@@ -2165,7 +2187,7 @@ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn E
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send` marker.
+- mem::transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
++ Box::from_raw(Box::into_raw(s) as *mut (dyn Error + Send))
+ })
+ }
+ }
+@@ -2179,7 +2201,7 @@ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send + Sync` marker.
+- mem::transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
++ Box::from_raw(Box::into_raw(s) as *mut (dyn Error + Send + Sync))
+ })
+ }
+ }
+diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
+index 85e91356ecb308..73b9ffd845d952 100644
+--- a/rust/alloc/lib.rs
++++ b/rust/alloc/lib.rs
+@@ -58,6 +58,11 @@
+ //! [`Rc`]: rc
+ //! [`RefCell`]: core::cell
+
++// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
++// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
++// rustc itself never sets the feature, so this line has no effect there.
++#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
++//
+ #![allow(unused_attributes)]
+ #![stable(feature = "alloc", since = "1.36.0")]
+ #![doc(
+@@ -77,11 +82,6 @@
+ ))]
+ #![no_std]
+ #![needs_allocator]
+-// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
+-// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+-// rustc itself never sets the feature, so this line has no affect there.
+-#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
+-//
+ // Lints:
+ #![deny(unsafe_op_in_unsafe_fn)]
+ #![deny(fuzzy_provenance_casts)]
+@@ -90,6 +90,8 @@
+ #![warn(missing_docs)]
+ #![allow(explicit_outlives_requirements)]
+ #![warn(multiple_supertrait_upcastable)]
++#![cfg_attr(not(bootstrap), allow(internal_features))]
++#![cfg_attr(not(bootstrap), allow(rustdoc::redundant_explicit_links))]
+ //
+ // Library features:
+ // tidy-alphabetical-start
+@@ -139,7 +141,6 @@
+ #![feature(maybe_uninit_uninit_array_transpose)]
+ #![feature(pattern)]
+ #![feature(pointer_byte_offsets)]
+-#![feature(provide_any)]
+ #![feature(ptr_internals)]
+ #![feature(ptr_metadata)]
+ #![feature(ptr_sub_ptr)]
+diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
+index 65d5ce15828e43..a7425582a323f1 100644
+--- a/rust/alloc/raw_vec.rs
++++ b/rust/alloc/raw_vec.rs
+@@ -471,16 +471,26 @@ fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
+ let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
+ // See current_memory() why this assert is here
+ let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
+- let ptr = unsafe {
+- // `Layout::array` cannot overflow here because it would have
+- // overflowed earlier when capacity was larger.
+- let new_size = mem::size_of::<T>().unchecked_mul(cap);
+- let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+- self.alloc
+- .shrink(ptr, layout, new_layout)
+- .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+- };
+- self.set_ptr_and_cap(ptr, cap);
++
++ // If shrinking to 0, deallocate the buffer. We don't reach this point
++ // for the T::IS_ZST case since current_memory() will have returned
++ // None.
++ if cap == 0 {
++ unsafe { self.alloc.deallocate(ptr, layout) };
++ self.ptr = Unique::dangling();
++ self.cap = 0;
++ } else {
++ let ptr = unsafe {
++ // `Layout::array` cannot overflow here because it would have
++ // overflowed earlier when capacity was larger.
++ let new_size = mem::size_of::<T>().unchecked_mul(cap);
++ let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
++ self.alloc
++ .shrink(ptr, layout, new_layout)
++ .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
++ };
++ self.set_ptr_and_cap(ptr, cap);
++ }
+ Ok(())
+ }
+ }
+diff --git a/rust/alloc/vec/drain_filter.rs b/rust/alloc/vec/drain_filter.rs
+deleted file mode 100644
+index 09efff090e428c..00000000000000
+--- a/rust/alloc/vec/drain_filter.rs
++++ /dev/null
+@@ -1,199 +0,0 @@
+-// SPDX-License-Identifier: Apache-2.0 OR MIT
+-
+-use crate::alloc::{Allocator, Global};
+-use core::mem::{ManuallyDrop, SizedTypeProperties};
+-use core::ptr;
+-use core::slice;
+-
+-use super::Vec;
+-
+-/// An iterator which uses a closure to determine if an element should be removed.
+-///
+-/// This struct is created by [`Vec::drain_filter`].
+-/// See its documentation for more.
+-///
+-/// # Example
+-///
+-/// ```
+-/// #![feature(drain_filter)]
+-///
+-/// let mut v = vec![0, 1, 2];
+-/// let iter: std::vec::DrainFilter<'_, _, _> = v.drain_filter(|x| *x % 2 == 0);
+-/// ```
+-#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+-#[derive(Debug)]
+-pub struct DrainFilter<
+- 'a,
+- T,
+- F,
+- #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+-> where
+- F: FnMut(&mut T) -> bool,
+-{
+- pub(super) vec: &'a mut Vec<T, A>,
+- /// The index of the item that will be inspected by the next call to `next`.
+- pub(super) idx: usize,
+- /// The number of items that have been drained (removed) thus far.
+- pub(super) del: usize,
+- /// The original length of `vec` prior to draining.
+- pub(super) old_len: usize,
+- /// The filter test predicate.
+- pub(super) pred: F,
+- /// A flag that indicates a panic has occurred in the filter test predicate.
+- /// This is used as a hint in the drop implementation to prevent consumption
+- /// of the remainder of the `DrainFilter`. Any unprocessed items will be
+- /// backshifted in the `vec`, but no further items will be dropped or
+- /// tested by the filter predicate.
+- pub(super) panic_flag: bool,
+-}
+-
+-impl<T, F, A: Allocator> DrainFilter<'_, T, F, A>
+-where
+- F: FnMut(&mut T) -> bool,
+-{
+- /// Returns a reference to the underlying allocator.
+- #[unstable(feature = "allocator_api", issue = "32838")]
+- #[inline]
+- pub fn allocator(&self) -> &A {
+- self.vec.allocator()
+- }
+-
+- /// Keep unyielded elements in the source `Vec`.
+- ///
+- /// # Examples
+- ///
+- /// ```
+- /// #![feature(drain_filter)]
+- /// #![feature(drain_keep_rest)]
+- ///
+- /// let mut vec = vec!['a', 'b', 'c'];
+- /// let mut drain = vec.drain_filter(|_| true);
+- ///
+- /// assert_eq!(drain.next().unwrap(), 'a');
+- ///
+- /// // This call keeps 'b' and 'c' in the vec.
+- /// drain.keep_rest();
+- ///
+- /// // If we wouldn't call `keep_rest()`,
+- /// // `vec` would be empty.
+- /// assert_eq!(vec, ['b', 'c']);
+- /// ```
+- #[unstable(feature = "drain_keep_rest", issue = "101122")]
+- pub fn keep_rest(self) {
+- // At this moment layout looks like this:
+- //
+- // _____________________/-- old_len
+- // / \
+- // [kept] [yielded] [tail]
+- // \_______/ ^-- idx
+- // \-- del
+- //
+- // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
+- //
+- // 1. Move [tail] after [kept]
+- // 2. Update length of the original vec to `old_len - del`
+- // a. In case of ZST, this is the only thing we want to do
+- // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+- let mut this = ManuallyDrop::new(self);
+-
+- unsafe {
+- // ZSTs have no identity, so we don't need to move them around.
+- if !T::IS_ZST && this.idx < this.old_len && this.del > 0 {
+- let ptr = this.vec.as_mut_ptr();
+- let src = ptr.add(this.idx);
+- let dst = src.sub(this.del);
+- let tail_len = this.old_len - this.idx;
+- src.copy_to(dst, tail_len);
+- }
+-
+- let new_len = this.old_len - this.del;
+- this.vec.set_len(new_len);
+- }
+- }
+-}
+-
+-#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+-impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A>
+-where
+- F: FnMut(&mut T) -> bool,
+-{
+- type Item = T;
+-
+- fn next(&mut self) -> Option<T> {
+- unsafe {
+- while self.idx < self.old_len {
+- let i = self.idx;
+- let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
+- self.panic_flag = true;
+- let drained = (self.pred)(&mut v[i]);
+- self.panic_flag = false;
+- // Update the index *after* the predicate is called. If the index
+- // is updated prior and the predicate panics, the element at this
+- // index would be leaked.
+- self.idx += 1;
+- if drained {
+- self.del += 1;
+- return Some(ptr::read(&v[i]));
+- } else if self.del > 0 {
+- let del = self.del;
+- let src: *const T = &v[i];
+- let dst: *mut T = &mut v[i - del];
+- ptr::copy_nonoverlapping(src, dst, 1);
+- }
+- }
+- None
+- }
+- }
+-
+- fn size_hint(&self) -> (usize, Option<usize>) {
+- (0, Some(self.old_len - self.idx))
+- }
+-}
+-
+-#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+-impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A>
+-where
+- F: FnMut(&mut T) -> bool,
+-{
+- fn drop(&mut self) {
+- struct BackshiftOnDrop<'a, 'b, T, F, A: Allocator>
+- where
+- F: FnMut(&mut T) -> bool,
+- {
+- drain: &'b mut DrainFilter<'a, T, F, A>,
+- }
+-
+- impl<'a, 'b, T, F, A: Allocator> Drop for BackshiftOnDrop<'a, 'b, T, F, A>
+- where
+- F: FnMut(&mut T) -> bool,
+- {
+- fn drop(&mut self) {
+- unsafe {
+- if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
+- // This is a pretty messed up state, and there isn't really an
+- // obviously right thing to do. We don't want to keep trying
+- // to execute `pred`, so we just backshift all the unprocessed
+- // elements and tell the vec that they still exist. The backshift
+- // is required to prevent a double-drop of the last successfully
+- // drained item prior to a panic in the predicate.
+- let ptr = self.drain.vec.as_mut_ptr();
+- let src = ptr.add(self.drain.idx);
+- let dst = src.sub(self.drain.del);
+- let tail_len = self.drain.old_len - self.drain.idx;
+- src.copy_to(dst, tail_len);
+- }
+- self.drain.vec.set_len(self.drain.old_len - self.drain.del);
+- }
+- }
+- }
+-
+- let backshift = BackshiftOnDrop { drain: self };
+-
+- // Attempt to consume any remaining elements if the filter predicate
+- // has not yet panicked. We'll backshift any remaining elements
+- // whether we've already panicked or if the consumption here panics.
+- if !backshift.drain.panic_flag {
+- backshift.drain.for_each(drop);
+- }
+- }
+-}
+diff --git a/rust/alloc/vec/extract_if.rs b/rust/alloc/vec/extract_if.rs
+new file mode 100644
+index 00000000000000..f314a51d4d3dbb
+--- /dev/null
++++ b/rust/alloc/vec/extract_if.rs
+@@ -0,0 +1,115 @@
++// SPDX-License-Identifier: Apache-2.0 OR MIT
++
++use crate::alloc::{Allocator, Global};
++use core::ptr;
++use core::slice;
++
++use super::Vec;
++
++/// An iterator which uses a closure to determine if an element should be removed.
++///
++/// This struct is created by [`Vec::extract_if`].
++/// See its documentation for more.
++///
++/// # Example
++///
++/// ```
++/// #![feature(extract_if)]
++///
++/// let mut v = vec![0, 1, 2];
++/// let iter: std::vec::ExtractIf<'_, _, _> = v.extract_if(|x| *x % 2 == 0);
++/// ```
++#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
++#[derive(Debug)]
++#[must_use = "iterators are lazy and do nothing unless consumed"]
++pub struct ExtractIf<
++ 'a,
++ T,
++ F,
++ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
++> where
++ F: FnMut(&mut T) -> bool,
++{
++ pub(super) vec: &'a mut Vec<T, A>,
++ /// The index of the item that will be inspected by the next call to `next`.
++ pub(super) idx: usize,
++ /// The number of items that have been drained (removed) thus far.
++ pub(super) del: usize,
++ /// The original length of `vec` prior to draining.
++ pub(super) old_len: usize,
++ /// The filter test predicate.
++ pub(super) pred: F,
++}
++
++impl<T, F, A: Allocator> ExtractIf<'_, T, F, A>
++where
++ F: FnMut(&mut T) -> bool,
++{
++ /// Returns a reference to the underlying allocator.
++ #[unstable(feature = "allocator_api", issue = "32838")]
++ #[inline]
++ pub fn allocator(&self) -> &A {
++ self.vec.allocator()
++ }
++}
++
++#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
++impl<T, F, A: Allocator> Iterator for ExtractIf<'_, T, F, A>
++where
++ F: FnMut(&mut T) -> bool,
++{
++ type Item = T;
++
++ fn next(&mut self) -> Option<T> {
++ unsafe {
++ while self.idx < self.old_len {
++ let i = self.idx;
++ let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
++ let drained = (self.pred)(&mut v[i]);
++ // Update the index *after* the predicate is called. If the index
++ // is updated prior and the predicate panics, the element at this
++ // index would be leaked.
++ self.idx += 1;
++ if drained {
++ self.del += 1;
++ return Some(ptr::read(&v[i]));
++ } else if self.del > 0 {
++ let del = self.del;
++ let src: *const T = &v[i];
++ let dst: *mut T = &mut v[i - del];
++ ptr::copy_nonoverlapping(src, dst, 1);
++ }
++ }
++ None
++ }
++ }
++
++ fn size_hint(&self) -> (usize, Option<usize>) {
++ (0, Some(self.old_len - self.idx))
++ }
++}
++
++#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
++impl<T, F, A: Allocator> Drop for ExtractIf<'_, T, F, A>
++where
++ F: FnMut(&mut T) -> bool,
++{
++ fn drop(&mut self) {
++ unsafe {
++ if self.idx < self.old_len && self.del > 0 {
++ // This is a pretty messed up state, and there isn't really an
++ // obviously right thing to do. We don't want to keep trying
++ // to execute `pred`, so we just backshift all the unprocessed
++ // elements and tell the vec that they still exist. The backshift
++ // is required to prevent a double-drop of the last successfully
++ // drained item prior to a panic in the predicate.
++ let ptr = self.vec.as_mut_ptr();
++ let src = ptr.add(self.idx);
++ let dst = src.sub(self.del);
++ let tail_len = self.old_len - self.idx;
++ src.copy_to(dst, tail_len);
++ }
++ self.vec.set_len(self.old_len - self.del);
++ }
++ }
++}
+diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
+index 05c70de0227ed3..209a88cfe598f1 100644
+--- a/rust/alloc/vec/mod.rs
++++ b/rust/alloc/vec/mod.rs
+@@ -74,10 +74,10 @@
+ use crate::collections::{TryReserveError, TryReserveErrorKind};
+ use crate::raw_vec::RawVec;
+
+-#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+-pub use self::drain_filter::DrainFilter;
++#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
++pub use self::extract_if::ExtractIf;
+
+-mod drain_filter;
++mod extract_if;
+
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_splice", since = "1.21.0")]
+@@ -216,7 +216,7 @@
+ ///
+ /// # Indexing
+ ///
+-/// The `Vec` type allows to access values by index, because it implements the
++/// The `Vec` type allows access to values by index, because it implements the
+ /// [`Index`] trait. An example will be more explicit:
+ ///
+ /// ```
+@@ -618,22 +618,20 @@ pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+- /// #![feature(allocator_api)]
+- ///
+- /// use std::alloc::{AllocError, Allocator, Global, Layout};
++ /// use std::alloc::{alloc, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ ///
+ /// let vec = unsafe {
+- /// let mem = match Global.allocate(layout) {
+- /// Ok(mem) => mem.cast::<u32>().as_ptr(),
+- /// Err(AllocError) => return,
+- /// };
++ /// let mem = alloc(layout).cast::<u32>();
++ /// if mem.is_null() {
++ /// return;
++ /// }
+ ///
+ /// mem.write(1_000_000);
+ ///
+- /// Vec::from_raw_parts_in(mem, 1, 16, Global)
++ /// Vec::from_raw_parts(mem, 1, 16)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+@@ -876,19 +874,22 @@ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserv
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+- /// use std::alloc::{alloc, Layout};
++ /// #![feature(allocator_api)]
++ ///
++ /// use std::alloc::{AllocError, Allocator, Global, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
++ ///
+ /// let vec = unsafe {
+- /// let mem = alloc(layout).cast::<u32>();
+- /// if mem.is_null() {
+- /// return;
+- /// }
++ /// let mem = match Global.allocate(layout) {
++ /// Ok(mem) => mem.cast::<u32>().as_ptr(),
++ /// Err(AllocError) => return,
++ /// };
+ ///
+ /// mem.write(1_000_000);
+ ///
+- /// Vec::from_raw_parts(mem, 1, 16)
++ /// Vec::from_raw_parts_in(mem, 1, 16, Global)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+@@ -2507,7 +2508,7 @@ pub fn resize(&mut self, new_len: usize, value: T) {
+ let len = self.len();
+
+ if new_len > len {
+- self.extend_with(new_len - len, ExtendElement(value))
++ self.extend_with(new_len - len, value)
+ } else {
+ self.truncate(new_len);
+ }
+@@ -2545,7 +2546,7 @@ pub fn try_resize(&mut self, new_len: usize, value: T) -> Result<(), TryReserveE
+ let len = self.len();
+
+ if new_len > len {
+- self.try_extend_with(new_len - len, ExtendElement(value))
++ self.try_extend_with(new_len - len, value)
+ } else {
+ self.truncate(new_len);
+ Ok(())
+@@ -2684,26 +2685,10 @@ pub fn into_flattened(self) -> Vec<T, A> {
+ }
+ }
+
+-// This code generalizes `extend_with_{element,default}`.
+-trait ExtendWith<T> {
+- fn next(&mut self) -> T;
+- fn last(self) -> T;
+-}
+-
+-struct ExtendElement<T>(T);
+-impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
+- fn next(&mut self) -> T {
+- self.0.clone()
+- }
+- fn last(self) -> T {
+- self.0
+- }
+-}
+-
+-impl<T, A: Allocator> Vec<T, A> {
++impl<T: Clone, A: Allocator> Vec<T, A> {
+ #[cfg(not(no_global_oom_handling))]
+- /// Extend the vector by `n` values, using the given generator.
+- fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
++ /// Extend the vector by `n` clones of value.
++ fn extend_with(&mut self, n: usize, value: T) {
+ self.reserve(n);
+
+ unsafe {
+@@ -2715,15 +2700,15 @@ fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+
+ // Write all elements except the last one
+ for _ in 1..n {
+- ptr::write(ptr, value.next());
++ ptr::write(ptr, value.clone());
+ ptr = ptr.add(1);
+- // Increment the length in every step in case next() panics
++ // Increment the length in every step in case clone() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+- ptr::write(ptr, value.last());
++ ptr::write(ptr, value);
+ local_len.increment_len(1);
+ }
+
+@@ -2731,8 +2716,8 @@ fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+ }
+ }
+
+- /// Try to extend the vector by `n` values, using the given generator.
+- fn try_extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) -> Result<(), TryReserveError> {
++ /// Try to extend the vector by `n` clones of value.
++ fn try_extend_with(&mut self, n: usize, value: T) -> Result<(), TryReserveError> {
+ self.try_reserve(n)?;
+
+ unsafe {
+@@ -2744,15 +2729,15 @@ fn try_extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) -> Resul
+
+ // Write all elements except the last one
+ for _ in 1..n {
+- ptr::write(ptr, value.next());
++ ptr::write(ptr, value.clone());
+ ptr = ptr.add(1);
+- // Increment the length in every step in case next() panics
++ // Increment the length in every step in case clone() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+- ptr::write(ptr, value.last());
++ ptr::write(ptr, value);
+ local_len.increment_len(1);
+ }
+
+@@ -3210,6 +3195,12 @@ pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoI
+ /// If the closure returns false, the element will remain in the vector and will not be yielded
+ /// by the iterator.
+ ///
++ /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating
++ /// or the iteration short-circuits, then the remaining elements will be retained.
++ /// Use [`retain`] with a negated predicate if you do not need the returned iterator.
++ ///
++ /// [`retain`]: Vec::retain
++ ///
+ /// Using this method is equivalent to the following code:
+ ///
+ /// ```
+@@ -3228,10 +3219,10 @@ pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoI
+ /// # assert_eq!(vec, vec![1, 4, 5]);
+ /// ```
+ ///
+- /// But `drain_filter` is easier to use. `drain_filter` is also more efficient,
++ /// But `extract_if` is easier to use. `extract_if` is also more efficient,
+ /// because it can backshift the elements of the array in bulk.
+ ///
+- /// Note that `drain_filter` also lets you mutate every element in the filter closure,
++ /// Note that `extract_if` also lets you mutate every element in the filter closure,
+ /// regardless of whether you choose to keep or remove it.
+ ///
+ /// # Examples
+@@ -3239,17 +3230,17 @@ pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoI
+ /// Splitting an array into evens and odds, reusing the original allocation:
+ ///
+ /// ```
+- /// #![feature(drain_filter)]
++ /// #![feature(extract_if)]
+ /// let mut numbers = vec![1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15];
+ ///
+- /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
++ /// let evens = numbers.extract_if(|x| *x % 2 == 0).collect::<Vec<_>>();
+ /// let odds = numbers;
+ ///
+ /// assert_eq!(evens, vec![2, 4, 6, 8, 14]);
+ /// assert_eq!(odds, vec![1, 3, 5, 9, 11, 13, 15]);
+ /// ```
+- #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+- pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F, A>
++ #[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
++ pub fn extract_if<F>(&mut self, filter: F) -> ExtractIf<'_, T, F, A>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+@@ -3260,7 +3251,7 @@ pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F, A>
+ self.set_len(0);
+ }
+
+- DrainFilter { vec: self, idx: 0, del: 0, old_len, pred: filter, panic_flag: false }
++ ExtractIf { vec: self, idx: 0, del: 0, old_len, pred: filter }
+ }
+ }
+
+@@ -3272,7 +3263,7 @@ pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F, A>
+ /// [`copy_from_slice`]: slice::copy_from_slice
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "extend_ref", since = "1.2.0")]
+-impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
++impl<'a, T: Copy + 'a, A: Allocator> Extend<&'a T> for Vec<T, A> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.spec_extend(iter.into_iter())
+ }
+@@ -3290,9 +3281,14 @@ fn extend_reserve(&mut self, additional: usize) {
+
+ /// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
+ #[stable(feature = "rust1", since = "1.0.0")]
+-impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
++impl<T, A1, A2> PartialOrd<Vec<T, A2>> for Vec<T, A1>
++where
++ T: PartialOrd,
++ A1: Allocator,
++ A2: Allocator,
++{
+ #[inline]
+- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
++ fn partial_cmp(&self, other: &Vec<T, A2>) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+ }
+diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs
+index a6a735201e59bb..ada91953744608 100644
+--- a/rust/alloc/vec/spec_extend.rs
++++ b/rust/alloc/vec/spec_extend.rs
+@@ -77,7 +77,7 @@ fn try_spec_extend(&mut self, mut iterator: IntoIter<T>) -> Result<(), TryReserv
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+-impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
++impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for Vec<T, A>
+ where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+@@ -87,7 +87,7 @@ impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
+ }
+ }
+
+-impl<'a, T: 'a, I, A: Allocator + 'a> TrySpecExtend<&'a T, I> for Vec<T, A>
++impl<'a, T: 'a, I, A: Allocator> TrySpecExtend<&'a T, I> for Vec<T, A>
+ where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+@@ -98,7 +98,7 @@ impl<'a, T: 'a, I, A: Allocator + 'a> TrySpecExtend<&'a T, I> for Vec<T, A>
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+-impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
++impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+ where
+ T: Copy,
+ {
+@@ -108,7 +108,7 @@ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ }
+ }
+
+-impl<'a, T: 'a, A: Allocator + 'a> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
++impl<'a, T: 'a, A: Allocator> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+ where
+ T: Copy,
+ {
+diff --git a/rust/bindgen_parameters b/rust/bindgen_parameters
+index 552d9a85925b99..a721d466bee4b2 100644
+--- a/rust/bindgen_parameters
++++ b/rust/bindgen_parameters
+@@ -20,3 +20,7 @@
+
+ # `seccomp`'s comment gets understood as a doctest
+ --no-doc-comments
++
++# These functions use the `__preserve_most` calling convention, which neither bindgen
++# nor Rust currently understand, and which Clang currently declares to be unstable.
++--blocklist-function __list_.*_report
+diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
+index fb8ac3f211de52..bba2922c6ef77f 100644
+--- a/rust/compiler_builtins.rs
++++ b/rust/compiler_builtins.rs
+@@ -19,6 +19,7 @@
+ //! [`compiler_builtins`]: https://github.com/rust-lang/compiler-builtins
+ //! [`compiler-rt`]: https://compiler-rt.llvm.org/
+
++#![allow(internal_features)]
+ #![feature(compiler_builtins)]
+ #![compiler_builtins]
+ #![no_builtins]
+diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
+index 4ebb6f23fc2ec0..0fe043c0eaacdc 100644
+--- a/rust/kernel/init.rs
++++ b/rust/kernel/init.rs
+@@ -1292,8 +1292,15 @@ macro_rules! impl_zeroable {
+ i8, i16, i32, i64, i128, isize,
+ f32, f64,
+
+- // SAFETY: These are ZSTs, there is nothing to zero.
+- {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, Infallible, (),
++ // Note: do not add uninhabited types (such as `!` or `core::convert::Infallible`) to this list;
++ // creating an instance of an uninhabited type is immediate undefined behavior. For more on
++ // uninhabited/empty types, consult The Rustonomicon:
++ // <https://doc.rust-lang.org/stable/nomicon/exotic-sizes.html#empty-types>. The Rust Reference
++ // also has information on undefined behavior:
++ // <https://doc.rust-lang.org/stable/reference/behavior-considered-undefined.html>.
++ //
++ // SAFETY: These are inhabited ZSTs; there is nothing to zero and a valid value exists.
++ {<T: ?Sized>} PhantomData<T>, core::marker::PhantomPinned, (),
+
+ // SAFETY: Type is allowed to take any value, including all zeros.
+ {<T>} MaybeUninit<T>,
+diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
+index e8811700239aaf..de54d5fede6f8d 100644
+--- a/rust/kernel/lib.rs
++++ b/rust/kernel/lib.rs
+@@ -60,7 +60,7 @@
+ /// The top level entrypoint to implementing a kernel module.
+ ///
+ /// For any teardown or cleanup operations, your type may implement [`Drop`].
+-pub trait Module: Sized + Sync {
++pub trait Module: Sized + Sync + Send {
+ /// Called at module initialization time.
+ ///
+ /// Use this method to perform whatever setup or registration your module
+diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
+index 8009184bf6d768..f48926e3e9fe32 100644
+--- a/rust/kernel/print.rs
++++ b/rust/kernel/print.rs
+@@ -399,6 +399,7 @@ macro_rules! pr_debug (
+ /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+ /// `alloc::format!` for information about the formatting syntax.
+ ///
++/// [`pr_info!`]: crate::pr_info!
+ /// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont
+ /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+ ///
+diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
+index 3d496391a9bd86..7f04e4f00a2c74 100644
+--- a/rust/kernel/sync/arc.rs
++++ b/rust/kernel/sync/arc.rs
+@@ -302,7 +302,7 @@ fn drop(&mut self) {
+ // The count reached zero, we must free the memory.
+ //
+ // SAFETY: The pointer was initialised from the result of `Box::leak`.
+- unsafe { Box::from_raw(self.ptr.as_ptr()) };
++ unsafe { drop(Box::from_raw(self.ptr.as_ptr())) };
+ }
+ }
+ }
+diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
+index b17ee5cd98f3eb..48546ad37335b6 100644
+--- a/rust/kernel/sync/locked_by.rs
++++ b/rust/kernel/sync/locked_by.rs
+@@ -80,8 +80,12 @@ pub struct LockedBy<T: ?Sized, U: ?Sized> {
+ // SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can.
+ unsafe impl<T: ?Sized + Send, U: ?Sized> Send for LockedBy<T, U> {}
+
+-// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the
+-// data it protects is `Send`.
++// SAFETY: If `T` is not `Sync`, then parallel shared access to this `LockedBy` allows you to use
++// `access_mut` to hand out `&mut T` on one thread at the time. The requirement that `T: Send` is
++// sufficient to allow that.
++//
++// If `T` is `Sync`, then the `access` method also becomes available, which allows you to obtain
++// several `&T` from several threads at once. However, this is okay as `T` is `Sync`.
+ unsafe impl<T: ?Sized + Send, U: ?Sized> Sync for LockedBy<T, U> {}
+
+ impl<T, U> LockedBy<T, U> {
+@@ -115,7 +119,10 @@ impl<T: ?Sized, U> LockedBy<T, U> {
+ ///
+ /// Panics if `owner` is different from the data protected by the lock used in
+ /// [`new`](LockedBy::new).
+- pub fn access<'a>(&'a self, owner: &'a U) -> &'a T {
++ pub fn access<'a>(&'a self, owner: &'a U) -> &'a T
++ where
++ T: Sync,
++ {
+ build_assert!(
+ size_of::<U>() > 0,
+ "`U` cannot be a ZST because `owner` wouldn't be unique"
+@@ -124,7 +131,10 @@ pub fn access<'a>(&'a self, owner: &'a U) -> &'a T {
+ panic!("mismatched owners");
+ }
+
+- // SAFETY: `owner` is evidence that the owner is locked.
++ // SAFETY: `owner` is evidence that there are only shared references to the owner for the
++ // duration of 'a, so it's not possible to use `Self::access_mut` to obtain a mutable
++ // reference to the inner value that aliases with this shared reference. The type is `Sync`
++ // so there are no other requirements.
+ unsafe { &*self.data.get() }
+ }
+
+diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
+index 7eda15e5f1b372..b2299bc7ac1ff5 100644
+--- a/rust/kernel/task.rs
++++ b/rust/kernel/task.rs
+@@ -82,7 +82,7 @@ impl Task {
+ /// Returns a task reference for the currently executing task/thread.
+ ///
+ /// The recommended way to get the current task/thread is to use the
+- /// [`current`](crate::current) macro because it is safe.
++ /// [`current`] macro because it is safe.
+ ///
+ /// # Safety
+ ///
+diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
+index fdb778e65d79d3..e23e7827d756d7 100644
+--- a/rust/kernel/types.rs
++++ b/rust/kernel/types.rs
+@@ -248,7 +248,7 @@ pub fn ffi_init(init_func: impl FnOnce(*mut T)) -> impl PinInit<Self> {
+ }
+
+ /// Returns a raw pointer to the opaque data.
+- pub fn get(&self) -> *mut T {
++ pub const fn get(&self) -> *mut T {
+ UnsafeCell::get(&self.value).cast::<T>()
+ }
+
+diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
+index c42105c2ff9635..34ae73f5db068a 100644
+--- a/rust/macros/lib.rs
++++ b/rust/macros/lib.rs
+@@ -35,18 +35,6 @@
+ /// author: "Rust for Linux Contributors",
+ /// description: "My very own kernel module!",
+ /// license: "GPL",
+-/// params: {
+-/// my_i32: i32 {
+-/// default: 42,
+-/// permissions: 0o000,
+-/// description: "Example of i32",
+-/// },
+-/// writeable_i32: i32 {
+-/// default: 42,
+-/// permissions: 0o644,
+-/// description: "Example of i32",
+-/// },
+-/// },
+ /// }
+ ///
+ /// struct MyModule;
+diff --git a/rust/macros/module.rs b/rust/macros/module.rs
+index d62d8710d77ab0..7dee348ef0cc82 100644
+--- a/rust/macros/module.rs
++++ b/rust/macros/module.rs
+@@ -199,98 +199,147 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
+ /// Used by the printing macros, e.g. [`info!`].
+ const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
+
+- /// The \"Rust loadable module\" mark.
+- //
+- // This may be best done another way later on, e.g. as a new modinfo
+- // key or a new section. For the moment, keep it simple.
+- #[cfg(MODULE)]
+- #[doc(hidden)]
+- #[used]
+- static __IS_RUST_MODULE: () = ();
+-
+- static mut __MOD: Option<{type_}> = None;
+-
+ // SAFETY: `__this_module` is constructed by the kernel at load time and will not be
+ // freed until the module is unloaded.
+ #[cfg(MODULE)]
+ static THIS_MODULE: kernel::ThisModule = unsafe {{
+- kernel::ThisModule::from_ptr(&kernel::bindings::__this_module as *const _ as *mut _)
++ extern \"C\" {{
++ static __this_module: kernel::types::Opaque<kernel::bindings::module>;
++ }}
++
++ kernel::ThisModule::from_ptr(__this_module.get())
+ }};
+ #[cfg(not(MODULE))]
+ static THIS_MODULE: kernel::ThisModule = unsafe {{
+ kernel::ThisModule::from_ptr(core::ptr::null_mut())
+ }};
+
+- // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
+- #[cfg(MODULE)]
+- #[doc(hidden)]
+- #[no_mangle]
+- pub extern \"C\" fn init_module() -> core::ffi::c_int {{
+- __init()
+- }}
+-
+- #[cfg(MODULE)]
+- #[doc(hidden)]
+- #[no_mangle]
+- pub extern \"C\" fn cleanup_module() {{
+- __exit()
+- }}
++ // Double nested modules, since then nobody can access the public items inside.
++ mod __module_init {{
++ mod __module_init {{
++ use super::super::{type_};
++
++ /// The \"Rust loadable module\" mark.
++ //
++ // This may be best done another way later on, e.g. as a new modinfo
++ // key or a new section. For the moment, keep it simple.
++ #[cfg(MODULE)]
++ #[doc(hidden)]
++ #[used]
++ static __IS_RUST_MODULE: () = ();
++
++ static mut __MOD: Option<{type_}> = None;
++
++ // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
++ /// # Safety
++ ///
++ /// This function must not be called after module initialization, because it may be
++ /// freed after that completes.
++ #[cfg(MODULE)]
++ #[doc(hidden)]
++ #[no_mangle]
++ #[link_section = \".init.text\"]
++ pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{
++ // SAFETY: This function is inaccessible to the outside due to the double
++ // module wrapping it. It is called exactly once by the C side via its
++ // unique name.
++ unsafe {{ __init() }}
++ }}
+
+- // Built-in modules are initialized through an initcall pointer
+- // and the identifiers need to be unique.
+- #[cfg(not(MODULE))]
+- #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
+- #[doc(hidden)]
+- #[link_section = \"{initcall_section}\"]
+- #[used]
+- pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
++ #[cfg(MODULE)]
++ #[doc(hidden)]
++ #[no_mangle]
++ pub extern \"C\" fn cleanup_module() {{
++ // SAFETY:
++ // - This function is inaccessible to the outside due to the double
++ // module wrapping it. It is called exactly once by the C side via its
++ // unique name,
++ // - furthermore it is only called after `init_module` has returned `0`
++ // (which delegates to `__init`).
++ unsafe {{ __exit() }}
++ }}
+
+- #[cfg(not(MODULE))]
+- #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
+- core::arch::global_asm!(
+- r#\".section \"{initcall_section}\", \"a\"
+- __{name}_initcall:
+- .long __{name}_init - .
+- .previous
+- \"#
+- );
++ // Built-in modules are initialized through an initcall pointer
++ // and the identifiers need to be unique.
++ #[cfg(not(MODULE))]
++ #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
++ #[doc(hidden)]
++ #[link_section = \"{initcall_section}\"]
++ #[used]
++ pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
++
++ #[cfg(not(MODULE))]
++ #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
++ core::arch::global_asm!(
++ r#\".section \"{initcall_section}\", \"a\"
++ __{name}_initcall:
++ .long __{name}_init - .
++ .previous
++ \"#
++ );
++
++ #[cfg(not(MODULE))]
++ #[doc(hidden)]
++ #[no_mangle]
++ pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
++ // SAFETY: This function is inaccessible to the outside due to the double
++ // module wrapping it. It is called exactly once by the C side via its
++ // placement above in the initcall section.
++ unsafe {{ __init() }}
++ }}
+
+- #[cfg(not(MODULE))]
+- #[doc(hidden)]
+- #[no_mangle]
+- pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
+- __init()
+- }}
++ #[cfg(not(MODULE))]
++ #[doc(hidden)]
++ #[no_mangle]
++ pub extern \"C\" fn __{name}_exit() {{
++ // SAFETY:
++ // - This function is inaccessible to the outside due to the double
++ // module wrapping it. It is called exactly once by the C side via its
++ // unique name,
++ // - furthermore it is only called after `__{name}_init` has returned `0`
++ // (which delegates to `__init`).
++ unsafe {{ __exit() }}
++ }}
+
+- #[cfg(not(MODULE))]
+- #[doc(hidden)]
+- #[no_mangle]
+- pub extern \"C\" fn __{name}_exit() {{
+- __exit()
+- }}
++ /// # Safety
++ ///
++ /// This function must only be called once.
++ unsafe fn __init() -> core::ffi::c_int {{
++ match <{type_} as kernel::Module>::init(&super::super::THIS_MODULE) {{
++ Ok(m) => {{
++ // SAFETY: No data race, since `__MOD` can only be accessed by this
++ // module and there only `__init` and `__exit` access it. These
++ // functions are only called once and `__exit` cannot be called
++ // before or during `__init`.
++ unsafe {{
++ __MOD = Some(m);
++ }}
++ return 0;
++ }}
++ Err(e) => {{
++ return e.to_errno();
++ }}
++ }}
++ }}
+
+- fn __init() -> core::ffi::c_int {{
+- match <{type_} as kernel::Module>::init(&THIS_MODULE) {{
+- Ok(m) => {{
++ /// # Safety
++ ///
++ /// This function must
++ /// - only be called once,
++ /// - be called after `__init` has been called and returned `0`.
++ unsafe fn __exit() {{
++ // SAFETY: No data race, since `__MOD` can only be accessed by this module
++ // and there only `__init` and `__exit` access it. These functions are only
++ // called once and `__init` was already called.
+ unsafe {{
+- __MOD = Some(m);
++ // Invokes `drop()` on `__MOD`, which should be used for cleanup.
++ __MOD = None;
+ }}
+- return 0;
+- }}
+- Err(e) => {{
+- return e.to_errno();
+ }}
+- }}
+- }}
+
+- fn __exit() {{
+- unsafe {{
+- // Invokes `drop()` on `__MOD`, which should be used for cleanup.
+- __MOD = None;
++ {modinfo}
+ }}
+ }}
+-
+- {modinfo}
+ ",
+ type_ = info.type_,
+ name = info.name,
+diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
+index 4ccf4236031c1f..3fa16412db15ca 100644
+--- a/samples/bpf/Makefile
++++ b/samples/bpf/Makefile
+@@ -166,6 +166,10 @@ BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic
+ endif
+ endif
+
++ifeq ($(ARCH), x86)
++BPF_EXTRA_CFLAGS += -fcf-protection
++endif
++
+ TPROGS_CFLAGS += -Wall -O2
+ TPROGS_CFLAGS += -Wmissing-prototypes
+ TPROGS_CFLAGS += -Wstrict-prototypes
+@@ -394,7 +398,7 @@ $(obj)/%.o: $(src)/%.c
+ -Wno-gnu-variable-sized-type-not-at-end \
+ -Wno-address-of-packed-member -Wno-tautological-compare \
+ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+- -fno-asynchronous-unwind-tables -fcf-protection \
++ -fno-asynchronous-unwind-tables \
+ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
+ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \
+ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \
+diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
+index 7048bb3594d65b..634e81d83efd95 100644
+--- a/samples/bpf/asm_goto_workaround.h
++++ b/samples/bpf/asm_goto_workaround.h
+@@ -4,14 +4,14 @@
+ #define __ASM_GOTO_WORKAROUND_H
+
+ /*
+- * This will bring in asm_volatile_goto and asm_inline macro definitions
++ * This will bring in asm_goto_output and asm_inline macro definitions
+ * if enabled by compiler and config options.
+ */
+ #include <linux/types.h>
+
+-#ifdef asm_volatile_goto
+-#undef asm_volatile_goto
+-#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
++#ifdef asm_goto_output
++#undef asm_goto_output
++#define asm_goto_output(x...) asm volatile("invalid use of asm_goto_output")
+ #endif
+
+ /*
+diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
+index d2fbcf963cdf6d..07ff471ed6aee0 100644
+--- a/samples/bpf/map_perf_test_user.c
++++ b/samples/bpf/map_perf_test_user.c
+@@ -370,7 +370,7 @@ static void run_perf_test(int tasks)
+
+ static void fill_lpm_trie(void)
+ {
+- struct bpf_lpm_trie_key *key;
++ struct bpf_lpm_trie_key_u8 *key;
+ unsigned long value = 0;
+ unsigned int i;
+ int r;
+diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
+index 7a788bb837fc1a..7a09ac74fac07a 100644
+--- a/samples/bpf/syscall_tp_user.c
++++ b/samples/bpf/syscall_tp_user.c
+@@ -17,9 +17,9 @@
+
+ static void usage(const char *cmd)
+ {
+- printf("USAGE: %s [-i num_progs] [-h]\n", cmd);
+- printf(" -i num_progs # number of progs of the test\n");
+- printf(" -h # help\n");
++ printf("USAGE: %s [-i nr_tests] [-h]\n", cmd);
++ printf(" -i nr_tests # rounds of test to run\n");
++ printf(" -h # help\n");
+ }
+
+ static void verify_map(int map_id)
+@@ -45,14 +45,14 @@ static void verify_map(int map_id)
+ }
+ }
+
+-static int test(char *filename, int num_progs)
++static int test(char *filename, int nr_tests)
+ {
+- int map0_fds[num_progs], map1_fds[num_progs], fd, i, j = 0;
+- struct bpf_link *links[num_progs * 4];
+- struct bpf_object *objs[num_progs];
++ int map0_fds[nr_tests], map1_fds[nr_tests], fd, i, j = 0;
++ struct bpf_link **links = NULL;
++ struct bpf_object *objs[nr_tests];
+ struct bpf_program *prog;
+
+- for (i = 0; i < num_progs; i++) {
++ for (i = 0; i < nr_tests; i++) {
+ objs[i] = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(objs[i])) {
+ fprintf(stderr, "opening BPF object file failed\n");
+@@ -60,6 +60,19 @@ static int test(char *filename, int num_progs)
+ goto cleanup;
+ }
+
++ /* One-time initialization */
++ if (!links) {
++ int nr_progs = 0;
++
++ bpf_object__for_each_program(prog, objs[i])
++ nr_progs += 1;
++
++ links = calloc(nr_progs * nr_tests, sizeof(struct bpf_link *));
++
++ if (!links)
++ goto cleanup;
++ }
++
+ /* load BPF program */
+ if (bpf_object__load(objs[i])) {
+ fprintf(stderr, "loading BPF object file failed\n");
+@@ -101,14 +114,18 @@ static int test(char *filename, int num_progs)
+ close(fd);
+
+ /* verify the map */
+- for (i = 0; i < num_progs; i++) {
++ for (i = 0; i < nr_tests; i++) {
+ verify_map(map0_fds[i]);
+ verify_map(map1_fds[i]);
+ }
+
+ cleanup:
+- for (j--; j >= 0; j--)
+- bpf_link__destroy(links[j]);
++ if (links) {
++ for (j--; j >= 0; j--)
++ bpf_link__destroy(links[j]);
++
++ free(links);
++ }
+
+ for (i--; i >= 0; i--)
+ bpf_object__close(objs[i]);
+@@ -117,13 +134,13 @@ static int test(char *filename, int num_progs)
+
+ int main(int argc, char **argv)
+ {
+- int opt, num_progs = 1;
++ int opt, nr_tests = 1;
+ char filename[256];
+
+ while ((opt = getopt(argc, argv, "i:h")) != -1) {
+ switch (opt) {
+ case 'i':
+- num_progs = atoi(optarg);
++ nr_tests = atoi(optarg);
+ break;
+ case 'h':
+ default:
+@@ -134,5 +151,5 @@ int main(int argc, char **argv)
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+- return test(filename, num_progs);
++ return test(filename, nr_tests);
+ }
+diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
+index 9d41db09c4800f..266fdd0b025dc6 100644
+--- a/samples/bpf/xdp_router_ipv4_user.c
++++ b/samples/bpf/xdp_router_ipv4_user.c
+@@ -91,7 +91,7 @@ static int recv_msg(struct sockaddr_nl sock_addr, int sock)
+ static void read_route(struct nlmsghdr *nh, int nll)
+ {
+ char dsts[24], gws[24], ifs[16], dsts_len[24], metrics[24];
+- struct bpf_lpm_trie_key *prefix_key;
++ struct bpf_lpm_trie_key_u8 *prefix_key;
+ struct rtattr *rt_attr;
+ struct rtmsg *rt_msg;
+ int rtm_family;
+diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
+index 5af00387c519e2..245db52bedf299 100644
+--- a/samples/vfio-mdev/mtty.c
++++ b/samples/vfio-mdev/mtty.c
+@@ -127,7 +127,6 @@ struct serial_port {
+ /* State of each mdev device */
+ struct mdev_state {
+ struct vfio_device vdev;
+- int irq_fd;
+ struct eventfd_ctx *intx_evtfd;
+ struct eventfd_ctx *msi_evtfd;
+ int irq_index;
+@@ -141,6 +140,7 @@ struct mdev_state {
+ struct mutex rxtx_lock;
+ struct vfio_device_info dev_info;
+ int nr_ports;
++ u8 intx_mask:1;
+ };
+
+ static struct mtty_type {
+@@ -166,10 +166,6 @@ static const struct file_operations vd_fops = {
+
+ static const struct vfio_device_ops mtty_dev_ops;
+
+-/* function prototypes */
+-
+-static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
+-
+ /* Helper functions */
+
+ static void dump_buffer(u8 *buf, uint32_t count)
+@@ -186,6 +182,36 @@ static void dump_buffer(u8 *buf, uint32_t count)
+ #endif
+ }
+
++static bool is_intx(struct mdev_state *mdev_state)
++{
++ return mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX;
++}
++
++static bool is_msi(struct mdev_state *mdev_state)
++{
++ return mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX;
++}
++
++static bool is_noirq(struct mdev_state *mdev_state)
++{
++ return !is_intx(mdev_state) && !is_msi(mdev_state);
++}
++
++static void mtty_trigger_interrupt(struct mdev_state *mdev_state)
++{
++ lockdep_assert_held(&mdev_state->ops_lock);
++
++ if (is_msi(mdev_state)) {
++ if (mdev_state->msi_evtfd)
++ eventfd_signal(mdev_state->msi_evtfd, 1);
++ } else if (is_intx(mdev_state)) {
++ if (mdev_state->intx_evtfd && !mdev_state->intx_mask) {
++ eventfd_signal(mdev_state->intx_evtfd, 1);
++ mdev_state->intx_mask = true;
++ }
++ }
++}
++
+ static void mtty_create_config_space(struct mdev_state *mdev_state)
+ {
+ /* PCI dev ID */
+@@ -921,6 +947,25 @@ static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
+ return -EFAULT;
+ }
+
++static void mtty_disable_intx(struct mdev_state *mdev_state)
++{
++ if (mdev_state->intx_evtfd) {
++ eventfd_ctx_put(mdev_state->intx_evtfd);
++ mdev_state->intx_evtfd = NULL;
++ mdev_state->intx_mask = false;
++ mdev_state->irq_index = -1;
++ }
++}
++
++static void mtty_disable_msi(struct mdev_state *mdev_state)
++{
++ if (mdev_state->msi_evtfd) {
++ eventfd_ctx_put(mdev_state->msi_evtfd);
++ mdev_state->msi_evtfd = NULL;
++ mdev_state->irq_index = -1;
++ }
++}
++
+ static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
+ unsigned int index, unsigned int start,
+ unsigned int count, void *data)
+@@ -932,59 +977,113 @@ static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
++ if (!is_intx(mdev_state) || start != 0 || count != 1) {
++ ret = -EINVAL;
++ break;
++ }
++
++ if (flags & VFIO_IRQ_SET_DATA_NONE) {
++ mdev_state->intx_mask = true;
++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
++ uint8_t mask = *(uint8_t *)data;
++
++ if (mask)
++ mdev_state->intx_mask = true;
++ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
++ ret = -ENOTTY; /* No support for mask fd */
++ }
++ break;
+ case VFIO_IRQ_SET_ACTION_UNMASK:
++ if (!is_intx(mdev_state) || start != 0 || count != 1) {
++ ret = -EINVAL;
++ break;
++ }
++
++ if (flags & VFIO_IRQ_SET_DATA_NONE) {
++ mdev_state->intx_mask = false;
++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
++ uint8_t mask = *(uint8_t *)data;
++
++ if (mask)
++ mdev_state->intx_mask = false;
++ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
++ ret = -ENOTTY; /* No support for unmask fd */
++ }
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+- {
+- if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- pr_info("%s: disable INTx\n", __func__);
+- if (mdev_state->intx_evtfd)
+- eventfd_ctx_put(mdev_state->intx_evtfd);
++ if (is_intx(mdev_state) && !count &&
++ (flags & VFIO_IRQ_SET_DATA_NONE)) {
++ mtty_disable_intx(mdev_state);
++ break;
++ }
++
++ if (!(is_intx(mdev_state) || is_noirq(mdev_state)) ||
++ start != 0 || count != 1) {
++ ret = -EINVAL;
+ break;
+ }
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int fd = *(int *)data;
++ struct eventfd_ctx *evt;
++
++ mtty_disable_intx(mdev_state);
++
++ if (fd < 0)
++ break;
+
+- if (fd > 0) {
+- struct eventfd_ctx *evt;
+-
+- evt = eventfd_ctx_fdget(fd);
+- if (IS_ERR(evt)) {
+- ret = PTR_ERR(evt);
+- break;
+- }
+- mdev_state->intx_evtfd = evt;
+- mdev_state->irq_fd = fd;
+- mdev_state->irq_index = index;
++ evt = eventfd_ctx_fdget(fd);
++ if (IS_ERR(evt)) {
++ ret = PTR_ERR(evt);
+ break;
+ }
++ mdev_state->intx_evtfd = evt;
++ mdev_state->irq_index = index;
++ break;
++ }
++
++ if (!is_intx(mdev_state)) {
++ ret = -EINVAL;
++ break;
++ }
++
++ if (flags & VFIO_IRQ_SET_DATA_NONE) {
++ mtty_trigger_interrupt(mdev_state);
++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
++ uint8_t trigger = *(uint8_t *)data;
++
++ if (trigger)
++ mtty_trigger_interrupt(mdev_state);
+ }
+ break;
+ }
+- }
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
++ ret = -ENOTTY;
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+- if (flags & VFIO_IRQ_SET_DATA_NONE) {
+- if (mdev_state->msi_evtfd)
+- eventfd_ctx_put(mdev_state->msi_evtfd);
+- pr_info("%s: disable MSI\n", __func__);
+- mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
++ if (is_msi(mdev_state) && !count &&
++ (flags & VFIO_IRQ_SET_DATA_NONE)) {
++ mtty_disable_msi(mdev_state);
+ break;
+ }
++
++ if (!(is_msi(mdev_state) || is_noirq(mdev_state)) ||
++ start != 0 || count != 1) {
++ ret = -EINVAL;
++ break;
++ }
++
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int fd = *(int *)data;
+ struct eventfd_ctx *evt;
+
+- if (fd <= 0)
+- break;
++ mtty_disable_msi(mdev_state);
+
+- if (mdev_state->msi_evtfd)
++ if (fd < 0)
+ break;
+
+ evt = eventfd_ctx_fdget(fd);
+@@ -993,20 +1092,37 @@ static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
+ break;
+ }
+ mdev_state->msi_evtfd = evt;
+- mdev_state->irq_fd = fd;
+ mdev_state->irq_index = index;
++ break;
++ }
++
++ if (!is_msi(mdev_state)) {
++ ret = -EINVAL;
++ break;
++ }
++
++ if (flags & VFIO_IRQ_SET_DATA_NONE) {
++ mtty_trigger_interrupt(mdev_state);
++ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
++ uint8_t trigger = *(uint8_t *)data;
++
++ if (trigger)
++ mtty_trigger_interrupt(mdev_state);
+ }
+ break;
+- }
+- break;
++ }
++ break;
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+- pr_info("%s: MSIX_IRQ\n", __func__);
++ dev_dbg(mdev_state->vdev.dev, "%s: MSIX_IRQ\n", __func__);
++ ret = -ENOTTY;
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+- pr_info("%s: ERR_IRQ\n", __func__);
++ dev_dbg(mdev_state->vdev.dev, "%s: ERR_IRQ\n", __func__);
++ ret = -ENOTTY;
+ break;
+ case VFIO_PCI_REQ_IRQ_INDEX:
+- pr_info("%s: REQ_IRQ\n", __func__);
++ dev_dbg(mdev_state->vdev.dev, "%s: REQ_IRQ\n", __func__);
++ ret = -ENOTTY;
+ break;
+ }
+
+@@ -1014,33 +1130,6 @@ static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
+ return ret;
+ }
+
+-static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
+-{
+- int ret = -1;
+-
+- if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
+- (!mdev_state->msi_evtfd))
+- return -EINVAL;
+- else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
+- (!mdev_state->intx_evtfd)) {
+- pr_info("%s: Intr eventfd not found\n", __func__);
+- return -EINVAL;
+- }
+-
+- if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
+- ret = eventfd_signal(mdev_state->msi_evtfd, 1);
+- else
+- ret = eventfd_signal(mdev_state->intx_evtfd, 1);
+-
+-#if defined(DEBUG_INTR)
+- pr_info("Intx triggered\n");
+-#endif
+- if (ret != 1)
+- pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
+-
+- return ret;
+-}
+-
+ static int mtty_get_region_info(struct mdev_state *mdev_state,
+ struct vfio_region_info *region_info,
+ u16 *cap_type_id, void **cap_type)
+@@ -1084,22 +1173,16 @@ static int mtty_get_region_info(struct mdev_state *mdev_state,
+
+ static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
+ {
+- switch (irq_info->index) {
+- case VFIO_PCI_INTX_IRQ_INDEX:
+- case VFIO_PCI_MSI_IRQ_INDEX:
+- case VFIO_PCI_REQ_IRQ_INDEX:
+- break;
+-
+- default:
++ if (irq_info->index != VFIO_PCI_INTX_IRQ_INDEX &&
++ irq_info->index != VFIO_PCI_MSI_IRQ_INDEX)
+ return -EINVAL;
+- }
+
+ irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
+ irq_info->count = 1;
+
+ if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
+- irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
+- VFIO_IRQ_INFO_AUTOMASKED);
++ irq_info->flags |= VFIO_IRQ_INFO_MASKABLE |
++ VFIO_IRQ_INFO_AUTOMASKED;
+ else
+ irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
+
+@@ -1262,6 +1345,15 @@ static unsigned int mtty_get_available(struct mdev_type *mtype)
+ return atomic_read(&mdev_avail_ports) / type->nr_ports;
+ }
+
++static void mtty_close(struct vfio_device *vdev)
++{
++ struct mdev_state *mdev_state =
++ container_of(vdev, struct mdev_state, vdev);
++
++ mtty_disable_intx(mdev_state);
++ mtty_disable_msi(mdev_state);
++}
++
+ static const struct vfio_device_ops mtty_dev_ops = {
+ .name = "vfio-mtty",
+ .init = mtty_init_dev,
+@@ -1273,6 +1365,7 @@ static const struct vfio_device_ops mtty_dev_ops = {
+ .unbind_iommufd = vfio_iommufd_emulated_unbind,
+ .attach_ioas = vfio_iommufd_emulated_attach_ioas,
+ .detach_ioas = vfio_iommufd_emulated_detach_ioas,
++ .close_device = mtty_close,
+ };
+
+ static struct mdev_driver mtty_driver = {
+diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
+index 5a84b6443875c4..3500a3d62f0df2 100644
+--- a/scripts/Kconfig.include
++++ b/scripts/Kconfig.include
+@@ -33,7 +33,8 @@ ld-option = $(success,$(LD) -v $(1))
+
+ # $(as-instr,<instr>)
+ # Return y if the assembler supports <instr>, n otherwise
+-as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler-with-cpp -o /dev/null -)
++as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) $(2) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o /dev/null -)
++as-instr64 = $(as-instr,$(1),$(m64-flag))
+
+ # check if $(CC) and $(LD) exist
+ $(error-if,$(failure,command -v $(CC)),C compiler '$(CC)' not found)
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 82e3fb19fdafc9..5c4e437f9d854d 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -272,7 +272,7 @@ rust_common_cmd = \
+ -Zallow-features=$(rust_allowed_features) \
+ -Zcrate-attr=no_std \
+ -Zcrate-attr='feature($(rust_allowed_features))' \
+- --extern alloc --extern kernel \
++ -Zunstable-options --extern force:alloc --extern kernel \
+ --crate-type rlib -L $(objtree)/rust/ \
+ --crate-name $(basename $(notdir $@)) \
+ --out-dir $(dir $@) --emit=dep-info=$(depfile)
+diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
+index 8fcb427405a6f1..92be0c9a13eeb5 100644
+--- a/scripts/Makefile.compiler
++++ b/scripts/Makefile.compiler
+@@ -38,7 +38,7 @@ as-option = $(call try-run,\
+ # Usage: aflags-y += $(call as-instr,instr,option1,option2)
+
+ as-instr = $(call try-run,\
+- printf "%b\n" "$(1)" | $(CC) -Werror $(CLANG_FLAGS) $(KBUILD_AFLAGS) -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
++ printf "%b\n" "$(1)" | $(CC) -Werror $(CLANG_FLAGS) $(KBUILD_AFLAGS) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
+
+ # __cc-option
+ # Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
+diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst
+index 4405d5b67578d6..fa3ad33a19df8d 100644
+--- a/scripts/Makefile.dtbinst
++++ b/scripts/Makefile.dtbinst
+@@ -24,7 +24,7 @@ __dtbs_install: $(dtbs) $(subdirs)
+ @:
+
+ quiet_cmd_dtb_install = INSTALL $@
+- cmd_dtb_install = install -D $< $@
++ cmd_dtb_install = install -D -m 0644 $< $@
+
+ $(dst)/%.dtb: $(obj)/%.dtb
+ $(call cmd,dtb_install)
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 2fe6f2828d3769..16c750bb95fafd 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -143,6 +143,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
+ KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+ KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
+ KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict)
++KBUILD_CFLAGS += -Wno-enum-compare-conditional
++KBUILD_CFLAGS += -Wno-enum-enum-conversion
+ endif
+
+ endif
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index 68d0134bdbf9d1..e702552fb131af 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -395,8 +395,12 @@ cmd_dtc = $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ;
+ -d $(depfile).dtc.tmp $(dtc-tmp) ; \
+ cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile)
+
++# NOTE:
++# Do not replace $(filter %.dtb %.dtbo, $^) with $(real-prereqs). When a single
++# DTB is turned into a multi-blob DTB, $^ will contain header file dependencies
++# recorded in the .*.cmd file.
+ quiet_cmd_fdtoverlay = DTOVL $@
+- cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(real-prereqs)
++ cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(filter %.dtb %.dtbo, $^)
+
+ $(multi-dtb-y): FORCE
+ $(call if_changed,fdtoverlay)
+diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
+index b3a6aa8fbe8cb4..1979913aff6824 100644
+--- a/scripts/Makefile.modfinal
++++ b/scripts/Makefile.modfinal
+@@ -23,7 +23,7 @@ modname = $(notdir $(@:.mod.o=))
+ part-of-module = y
+
+ quiet_cmd_cc_o_c = CC [M] $@
+- cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV), $(c_flags)) -c -o $@ $<
++ cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV) $(CFLAGS_KCSAN), $(c_flags)) -c -o $@ $<
+
+ %.mod.o: %.mod.c FORCE
+ $(call if_changed_dep,cc_o_c)
+diff --git a/scripts/Makefile.package b/scripts/Makefile.package
+index 2bcab02da9653c..a16d60a4b3fd7a 100644
+--- a/scripts/Makefile.package
++++ b/scripts/Makefile.package
+@@ -126,7 +126,7 @@ debian-orig: private version = $(shell dpkg-parsechangelog -S Version | sed 's/-
+ debian-orig: private orig-name = $(source)_$(version).orig.tar$(debian-orig-suffix)
+ debian-orig: mkdebian-opts = --need-source
+ debian-orig: linux.tar$(debian-orig-suffix) debian
+- $(Q)if [ "$(df --output=target .. 2>/dev/null)" = "$(df --output=target $< 2>/dev/null)" ]; then \
++ $(Q)if [ "$$(df --output=target .. 2>/dev/null)" = "$$(df --output=target $< 2>/dev/null)" ]; then \
+ ln -f $< ../$(orig-name); \
+ else \
+ cp $< ../$(orig-name); \
+diff --git a/scripts/Makefile.vdsoinst b/scripts/Makefile.vdsoinst
+new file mode 100644
+index 00000000000000..a81ca735003e4e
+--- /dev/null
++++ b/scripts/Makefile.vdsoinst
+@@ -0,0 +1,45 @@
++# SPDX-License-Identifier: GPL-2.0-only
++# ==========================================================================
++# Install unstripped copies of vDSO
++# ==========================================================================
++
++PHONY := __default
++__default:
++ @:
++
++include $(srctree)/scripts/Kbuild.include
++
++install-dir := $(MODLIB)/vdso
++
++define gen_install_rules
++
++src := $$(firstword $$(subst :,$(space),$(1)))
++dest := $(install-dir)/$$(or $$(word 2,$$(subst :,$(space),$(1))),$$(patsubst %.dbg,%,$$(notdir $(1))))
++
++__default: $$(dest)
++$$(dest): $$(src) FORCE
++ $$(call cmd,install)
++
++# Some architectures create .build-id symlinks
++ifneq ($(filter arm s390 sparc x86, $(SRCARCH)),)
++link := $(install-dir)/.build-id/$$(shell $(READELF) -n $$(src) | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p').debug
++
++__default: $$(link)
++$$(link): $$(dest) FORCE
++ $$(call cmd,symlink)
++endif
++
++endef
++
++$(foreach x, $(sort $(INSTALL_FILES)), $(eval $(call gen_install_rules,$(x))))
++
++quiet_cmd_install = INSTALL $@
++ cmd_install = mkdir -p $(dir $@); cp $< $@
++
++quiet_cmd_symlink = SYMLINK $@
++ cmd_symlink = mkdir -p $(dir $@); ln -sf --relative $< $@
++
++PHONY += FORCE
++FORCE:
++
++.PHONY: $(PHONY)
+diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
+index 3cd6ca15f390de..c9f3e03124d7f5 100644
+--- a/scripts/Makefile.vmlinux
++++ b/scripts/Makefile.vmlinux
+@@ -19,6 +19,7 @@ quiet_cmd_cc_o_c = CC $@
+
+ ifdef CONFIG_MODULES
+ KASAN_SANITIZE_.vmlinux.export.o := n
++KCSAN_SANITIZE_.vmlinux.export.o := n
+ GCOV_PROFILE_.vmlinux.export.o := n
+ targets += .vmlinux.export.o
+ vmlinux: .vmlinux.export.o
+diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
+index 0edfdb40364b8c..25b3b587d37c00 100644
+--- a/scripts/Makefile.vmlinux_o
++++ b/scripts/Makefile.vmlinux_o
+@@ -37,7 +37,8 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
+
+ vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y)
+ vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
+-vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret)
++vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
++ $(if $(or $(CONFIG_CPU_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret)
+
+ objtool-args = $(vmlinux-objtool-args-y) --link
+
+diff --git a/scripts/atomic/kerneldoc/sub_and_test b/scripts/atomic/kerneldoc/sub_and_test
+index d3760f7749d4e7..96615e50836b0a 100644
+--- a/scripts/atomic/kerneldoc/sub_and_test
++++ b/scripts/atomic/kerneldoc/sub_and_test
+@@ -1,7 +1,7 @@
+ cat <<EOF
+ /**
+ * ${class}${atomicname}() - atomic subtract and test if zero with ${desc_order} ordering
+- * @i: ${int} value to add
++ * @i: ${int} value to subtract
+ * @v: pointer to ${atomic}_t
+ *
+ * Atomically updates @v to (@v - @i) with ${desc_order} ordering.
+diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
+index 61b7dddedc461e..3f899cc7e99a94 100755
+--- a/scripts/bpf_doc.py
++++ b/scripts/bpf_doc.py
+@@ -414,8 +414,8 @@ class PrinterRST(Printer):
+ version = version.stdout.decode().rstrip()
+ except:
+ try:
+- version = subprocess.run(['make', 'kernelversion'], cwd=linuxRoot,
+- capture_output=True, check=True)
++ version = subprocess.run(['make', '-s', '--no-print-directory', 'kernelversion'],
++ cwd=linuxRoot, capture_output=True, check=True)
+ version = version.stdout.decode().rstrip()
+ except:
+ return 'Linux'
+@@ -513,7 +513,7 @@ eBPF programs can have an associated license, passed along with the bytecode
+ instructions to the kernel when the programs are loaded. The format for that
+ string is identical to the one in use for kernel modules (Dual licenses, such
+ as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
+-programs that are compatible with the GNU Privacy License (GPL).
++programs that are compatible with the GNU General Public License (GNU GPL).
+
+ In order to use such helpers, the eBPF program must be loaded with the correct
+ license string passed (via **attr**) to the **bpf**\\ () system call, and this
+diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
+index 84f5fb7f1cecc2..f27d552aec43f2 100755
+--- a/scripts/checkstack.pl
++++ b/scripts/checkstack.pl
+@@ -97,8 +97,7 @@ my (@stack, $re, $dre, $sub, $x, $xs, $funcre, $min_stack);
+ # 11160: a7 fb ff 60 aghi %r15,-160
+ # or
+ # 100092: e3 f0 ff c8 ff 71 lay %r15,-56(%r15)
+- $re = qr/.*(?:lay|ag?hi).*\%r15,-(([0-9]{2}|[3-9])[0-9]{2})
+- (?:\(\%r15\))?$/ox;
++ $re = qr/.*(?:lay|ag?hi).*\%r15,-([0-9]+)(?:\(\%r15\))?$/o;
+ } elsif ($arch eq 'sparc' || $arch eq 'sparc64') {
+ # f0019d10: 9d e3 bf 90 save %sp, -112, %sp
+ $re = qr/.*save.*%sp, -(([0-9]{2}|[3-9])[0-9]{2}), %sp/o;
+@@ -139,15 +138,11 @@ $total_size = 0;
+ while (my $line = <STDIN>) {
+ if ($line =~ m/$funcre/) {
+ $func = $1;
+- next if $line !~ m/^($xs*)/;
++ next if $line !~ m/^($x*)/;
+ if ($total_size > $min_stack) {
+ push @stack, "$intro$total_size\n";
+ }
+-
+- $addr = $1;
+- $addr =~ s/ /0/g;
+- $addr = "0x$addr";
+-
++ $addr = "0x$1";
+ $intro = "$addr $func [$file]:";
+ my $padlen = 56 - length($intro);
+ while ($padlen > 0) {
+diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
+index a84cc5737c2c6c..bc005cac19441c 100755
+--- a/scripts/clang-tools/gen_compile_commands.py
++++ b/scripts/clang-tools/gen_compile_commands.py
+@@ -170,7 +170,7 @@ def process_line(root_directory, command_prefix, file_path):
+ # escape the pound sign '#', either as '\#' or '$(pound)' (depending on the
+ # kernel version). The compile_commands.json file is not interepreted
+ # by Make, so this code replaces the escaped version with '#'.
+- prefix = command_prefix.replace('\#', '#').replace('$(pound)', '#')
++ prefix = command_prefix.replace(r'\#', '#').replace('$(pound)', '#')
+
+ # Use os.path.abspath() to normalize the path resolving '.' and '..' .
+ abs_path = os.path.abspath(os.path.join(root_directory, file_path))
+diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
+index 564c5632e1a243..bfe5a4082d8eaf 100755
+--- a/scripts/decode_stacktrace.sh
++++ b/scripts/decode_stacktrace.sh
+@@ -16,6 +16,21 @@ elif type c++filt >/dev/null 2>&1 ; then
+ cppfilt_opts=-i
+ fi
+
++UTIL_SUFFIX=
++if [[ -z ${LLVM:-} ]]; then
++ UTIL_PREFIX=${CROSS_COMPILE:-}
++else
++ UTIL_PREFIX=llvm-
++ if [[ ${LLVM} == */ ]]; then
++ UTIL_PREFIX=${LLVM}${UTIL_PREFIX}
++ elif [[ ${LLVM} == -* ]]; then
++ UTIL_SUFFIX=${LLVM}
++ fi
++fi
++
++READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX}
++ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX}
++
+ if [[ $1 == "-r" ]] ; then
+ vmlinux=""
+ basepath="auto"
+@@ -75,7 +90,7 @@ find_module() {
+
+ if [[ "$modpath" != "" ]] ; then
+ for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do
+- if readelf -WS "$fn" | grep -qwF .debug_line ; then
++ if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
+ echo $fn
+ return
+ fi
+@@ -169,7 +184,7 @@ parse_symbol() {
+ if [[ $aarray_support == true && "${cache[$module,$address]+isset}" == "isset" ]]; then
+ local code=${cache[$module,$address]}
+ else
+- local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address" 2>/dev/null)
++ local code=$(${ADDR2LINE} -i -e "$objfile" "$address" 2>/dev/null)
+ if [[ $aarray_support == true ]]; then
+ cache[$module,$address]=$code
+ fi
+diff --git a/scripts/dtc/dt-extract-compatibles b/scripts/dtc/dt-extract-compatibles
+index 9df9f1face832b..2f9d0eb59f5b70 100755
+--- a/scripts/dtc/dt-extract-compatibles
++++ b/scripts/dtc/dt-extract-compatibles
+@@ -1,8 +1,8 @@
+ #!/usr/bin/env python3
+ # SPDX-License-Identifier: GPL-2.0-only
+
++import fnmatch
+ import os
+-import glob
+ import re
+ import argparse
+
+@@ -49,6 +49,24 @@ def print_compat(filename, compatibles):
+ else:
+ print(*compatibles, sep='\n')
+
++def glob_without_symlinks(root, glob):
++ for path, dirs, files in os.walk(root):
++ # Ignore hidden directories
++ for d in dirs:
++ if fnmatch.fnmatch(d, ".*"):
++ dirs.remove(d)
++ for f in files:
++ if fnmatch.fnmatch(f, glob):
++ yield os.path.join(path, f)
++
++def files_to_parse(path_args):
++ for f in path_args:
++ if os.path.isdir(f):
++ for filename in glob_without_symlinks(f, "*.c"):
++ yield filename
++ else:
++ yield f
++
+ show_filename = False
+
+ if __name__ == "__main__":
+@@ -59,11 +77,6 @@ if __name__ == "__main__":
+
+ show_filename = args.with_filename
+
+- for f in args.cfile:
+- if os.path.isdir(f):
+- for filename in glob.iglob(f + "/**/*.c", recursive=True):
+- compat_list = parse_compatibles(filename)
+- print_compat(filename, compat_list)
+- else:
+- compat_list = parse_compatibles(f)
+- print_compat(f, compat_list)
++ for f in files_to_parse(args.cfile):
++ compat_list = parse_compatibles(f)
++ print_compat(f, compat_list)
+diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
+index 951b74ba1b2423..746ff2d272f256 100644
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -191,12 +191,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
+
+ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
+ {
+- unsigned long i, x;
++ unsigned long i, x, index;
+ struct partition_group size_group[length];
+ unsigned long num_groups = 0;
+ unsigned long randnum;
+
+ partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
++
++ /* FIXME: this group shuffle is currently a no-op. */
+ for (i = num_groups - 1; i > 0; i--) {
+ struct partition_group tmp;
+ randnum = ranval(prng_state) % (i + 1);
+@@ -206,11 +208,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
+ }
+
+ for (x = 0; x < num_groups; x++) {
+- for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
++ for (index = size_group[x].length - 1; index > 0; index--) {
+ tree tmp;
++
++ i = size_group[x].start + index;
+ if (DECL_BIT_FIELD_TYPE(newtree[i]))
+ continue;
+- randnum = ranval(prng_state) % (i + 1);
++ randnum = ranval(prng_state) % (index + 1);
++ randnum += size_group[x].start;
+ // we could handle this case differently if desired
+ if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
+ continue;
+@@ -273,8 +278,6 @@ static bool is_flexible_array(const_tree field)
+ {
+ const_tree fieldtype;
+ const_tree typesize;
+- const_tree elemtype;
+- const_tree elemsize;
+
+ fieldtype = TREE_TYPE(field);
+ typesize = TYPE_SIZE(fieldtype);
+@@ -282,20 +285,12 @@ static bool is_flexible_array(const_tree field)
+ if (TREE_CODE(fieldtype) != ARRAY_TYPE)
+ return false;
+
+- elemtype = TREE_TYPE(fieldtype);
+- elemsize = TYPE_SIZE(elemtype);
+-
+ /* size of type is represented in bits */
+
+ if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE &&
+ TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE)
+ return true;
+
+- if (typesize != NULL_TREE &&
+- (TREE_CONSTANT(typesize) && (!tree_to_uhwi(typesize) ||
+- tree_to_uhwi(typesize) == tree_to_uhwi(elemsize))))
+- return true;
+-
+ return false;
+ }
+
+@@ -344,8 +339,7 @@ static int relayout_struct(tree type)
+
+ /*
+ * enforce that we don't randomize the layout of the last
+- * element of a struct if it's a 0 or 1-length array
+- * or a proper flexible array
++ * element of a struct if it's a proper flexible array
+ */
+ if (is_flexible_array(newtree[num_fields - 1])) {
+ has_flexarray = true;
+diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
+index c5c2ce113c9232..d20c47d21ad835 100644
+--- a/scripts/gcc-plugins/stackleak_plugin.c
++++ b/scripts/gcc-plugins/stackleak_plugin.c
+@@ -467,6 +467,8 @@ static bool stackleak_gate(void)
+ return false;
+ if (STRING_EQUAL(section, ".entry.text"))
+ return false;
++ if (STRING_EQUAL(section, ".head.text"))
++ return false;
+ }
+
+ return track_frame_size >= 0;
+diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
+index 825c75c5b71505..9459ca4f0f11fb 100755
+--- a/scripts/gcc-x86_32-has-stack-protector.sh
++++ b/scripts/gcc-x86_32-has-stack-protector.sh
+@@ -5,4 +5,4 @@
+ # -mstack-protector-guard-reg, added by
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs"
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index 75e4e22b986adc..f680bb01aeeb30 100755
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,4 +1,4 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
+index e3517d4ab8ec94..e810e0c27ff18d 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -66,10 +66,11 @@ LX_GDBPARSED(IRQD_LEVEL)
+ LX_GDBPARSED(IRQ_HIDDEN)
+
+ /* linux/module.h */
+-LX_GDBPARSED(MOD_TEXT)
+-LX_GDBPARSED(MOD_DATA)
+-LX_GDBPARSED(MOD_RODATA)
+-LX_GDBPARSED(MOD_RO_AFTER_INIT)
++if IS_BUILTIN(CONFIG_MODULES):
++ LX_GDBPARSED(MOD_TEXT)
++ LX_GDBPARSED(MOD_DATA)
++ LX_GDBPARSED(MOD_RODATA)
++ LX_GDBPARSED(MOD_RO_AFTER_INIT)
+
+ /* linux/mount.h */
+ LX_VALUE(MNT_NOSUID)
+@@ -157,3 +158,4 @@ LX_CONFIG(CONFIG_STACKDEPOT)
+ LX_CONFIG(CONFIG_PAGE_OWNER)
+ LX_CONFIG(CONFIG_SLUB_DEBUG)
+ LX_CONFIG(CONFIG_SLAB_FREELIST_HARDENED)
++LX_CONFIG(CONFIG_MMU)
+diff --git a/scripts/gdb/linux/device.py b/scripts/gdb/linux/device.py
+index 16376c5cfec641..0eabc5f4f8ca22 100644
+--- a/scripts/gdb/linux/device.py
++++ b/scripts/gdb/linux/device.py
+@@ -36,26 +36,26 @@ def for_each_bus():
+ for kobj in kset_for_each_object(gdb.parse_and_eval('bus_kset')):
+ subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
+ subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
+- yield subsys_priv['bus']
++ yield subsys_priv
+
+
+ def for_each_class():
+ for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')):
+ subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
+ subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
+- yield subsys_priv['class']
++ yield subsys_priv
+
+
+ def get_bus_by_name(name):
+ for item in for_each_bus():
+- if item['name'].string() == name:
++ if item['bus']['name'].string() == name:
+ return item
+ raise gdb.GdbError("Can't find bus type {!r}".format(name))
+
+
+ def get_class_by_name(name):
+ for item in for_each_class():
+- if item['name'].string() == name:
++ if item['class']['name'].string() == name:
+ return item
+ raise gdb.GdbError("Can't find device class {!r}".format(name))
+
+@@ -70,13 +70,13 @@ def klist_for_each(klist):
+
+
+ def bus_for_each_device(bus):
+- for kn in klist_for_each(bus['p']['klist_devices']):
++ for kn in klist_for_each(bus['klist_devices']):
+ dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_bus')
+ yield dp['device']
+
+
+ def class_for_each_device(cls):
+- for kn in klist_for_each(cls['p']['klist_devices']):
++ for kn in klist_for_each(cls['klist_devices']):
+ dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_class')
+ yield dp['device']
+
+@@ -103,7 +103,7 @@ class LxDeviceListBus(gdb.Command):
+ def invoke(self, arg, from_tty):
+ if not arg:
+ for bus in for_each_bus():
+- gdb.write('bus {}:\t{}\n'.format(bus['name'].string(), bus))
++ gdb.write('bus {}:\t{}\n'.format(bus['bus']['name'].string(), bus))
+ for dev in bus_for_each_device(bus):
+ _show_device(dev, level=1)
+ else:
+@@ -123,7 +123,7 @@ class LxDeviceListClass(gdb.Command):
+ def invoke(self, arg, from_tty):
+ if not arg:
+ for cls in for_each_class():
+- gdb.write("class {}:\t{}\n".format(cls['name'].string(), cls))
++ gdb.write("class {}:\t{}\n".format(cls['class']['name'].string(), cls))
+ for dev in class_for_each_device(cls):
+ _show_device(dev, level=1)
+ else:
+diff --git a/scripts/gdb/linux/vmalloc.py b/scripts/gdb/linux/vmalloc.py
+index 48e4a4fae7bbfd..d3c8a0274d1eda 100644
+--- a/scripts/gdb/linux/vmalloc.py
++++ b/scripts/gdb/linux/vmalloc.py
+@@ -10,8 +10,9 @@ import gdb
+ import re
+ from linux import lists, utils, stackdepot, constants, mm
+
+-vmap_area_type = utils.CachedType('struct vmap_area')
+-vmap_area_ptr_type = vmap_area_type.get_type().pointer()
++if constants.LX_CONFIG_MMU:
++ vmap_area_type = utils.CachedType('struct vmap_area')
++ vmap_area_ptr_type = vmap_area_type.get_type().pointer()
+
+ def is_vmalloc_addr(x):
+ pg_ops = mm.page_ops().ops
+@@ -25,6 +26,9 @@ class LxVmallocInfo(gdb.Command):
+ super(LxVmallocInfo, self).__init__("lx-vmallocinfo", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
++ if not constants.LX_CONFIG_MMU:
++ raise gdb.GdbError("Requires MMU support")
++
+ vmap_area_list = gdb.parse_and_eval('vmap_area_list')
+ for vmap_area in lists.list_for_each_entry(vmap_area_list, vmap_area_ptr_type, "list"):
+ if not vmap_area['vm']:
+diff --git a/scripts/get_abi.pl b/scripts/get_abi.pl
+index 0ffd5531242aa7..408bfd0216da0b 100755
+--- a/scripts/get_abi.pl
++++ b/scripts/get_abi.pl
+@@ -98,7 +98,7 @@ sub parse_abi {
+ $name =~ s,.*/,,;
+
+ my $fn = $file;
+- $fn =~ s,Documentation/ABI/,,;
++ $fn =~ s,.*Documentation/ABI/,,;
+
+ my $nametag = "File $fn";
+ $data{$nametag}->{what} = "File $name";
+diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
+index 81ebf8108ca748..81dfdf4470f757 100644
+--- a/scripts/kconfig/expr.c
++++ b/scripts/kconfig/expr.c
+@@ -396,35 +396,6 @@ static struct expr *expr_eliminate_yn(struct expr *e)
+ return e;
+ }
+
+-/*
+- * bool FOO!=n => FOO
+- */
+-struct expr *expr_trans_bool(struct expr *e)
+-{
+- if (!e)
+- return NULL;
+- switch (e->type) {
+- case E_AND:
+- case E_OR:
+- case E_NOT:
+- e->left.expr = expr_trans_bool(e->left.expr);
+- e->right.expr = expr_trans_bool(e->right.expr);
+- break;
+- case E_UNEQUAL:
+- // FOO!=n -> FOO
+- if (e->left.sym->type == S_TRISTATE) {
+- if (e->right.sym == &symbol_no) {
+- e->type = E_SYMBOL;
+- e->right.sym = NULL;
+- }
+- }
+- break;
+- default:
+- ;
+- }
+- return e;
+-}
+-
+ /*
+ * e1 || e2 -> ?
+ */
+diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
+index 4a9a23b1b7e1f9..fa38f9f263f7e8 100644
+--- a/scripts/kconfig/expr.h
++++ b/scripts/kconfig/expr.h
+@@ -295,7 +295,6 @@ void expr_free(struct expr *e);
+ void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
+ int expr_eq(struct expr *e1, struct expr *e2);
+ tristate expr_calc_value(struct expr *e);
+-struct expr *expr_trans_bool(struct expr *e);
+ struct expr *expr_eliminate_dups(struct expr *e);
+ struct expr *expr_transform(struct expr *e);
+ int expr_contains_symbol(struct expr *dep, struct symbol *sym);
+diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
+index 9709aca3a30fe9..9e52c7360e55b4 100644
+--- a/scripts/kconfig/gconf.c
++++ b/scripts/kconfig/gconf.c
+@@ -1478,7 +1478,6 @@ int main(int ac, char *av[])
+
+ conf_parse(name);
+ fixup_rootmenu(&rootmenu);
+- conf_read(NULL);
+
+ /* Load the interface and connect signals */
+ init_main_window(glade_file);
+@@ -1486,6 +1485,8 @@ int main(int ac, char *av[])
+ init_left_tree();
+ init_right_tree();
+
++ conf_read(NULL);
++
+ switch (view_mode) {
+ case SINGLE_VIEW:
+ display_tree_part();
+diff --git a/scripts/kconfig/lexer.l b/scripts/kconfig/lexer.l
+index cc386e44368346..2c2b3e6f248caf 100644
+--- a/scripts/kconfig/lexer.l
++++ b/scripts/kconfig/lexer.l
+@@ -302,8 +302,11 @@ static char *expand_token(const char *in, size_t n)
+ new_string();
+ append_string(in, n);
+
+- /* get the whole line because we do not know the end of token. */
+- while ((c = input()) != EOF) {
++ /*
++ * get the whole line because we do not know the end of token.
++ * input() returns 0 (not EOF!) when it reachs the end of file.
++ */
++ while ((c = input()) != 0) {
+ if (c == '\n') {
+ unput(c);
+ break;
+diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
+index 61c442d84aef4a..69a77f308fdc15 100644
+--- a/scripts/kconfig/menu.c
++++ b/scripts/kconfig/menu.c
+@@ -380,8 +380,6 @@ void menu_finalize(struct menu *parent)
+ dep = expr_transform(dep);
+ dep = expr_alloc_and(expr_copy(basedep), dep);
+ dep = expr_eliminate_dups(dep);
+- if (menu->sym && menu->sym->type != S_TRISTATE)
+- dep = expr_trans_bool(dep);
+ prop->visible.expr = dep;
+
+ /*
+diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
+index 902eb429b9dbd9..0b7952471c18f6 100755
+--- a/scripts/kconfig/merge_config.sh
++++ b/scripts/kconfig/merge_config.sh
+@@ -167,6 +167,8 @@ for ORIG_MERGE_FILE in $MERGE_LIST ; do
+ sed -i "/$CFG[ =]/d" $MERGE_FILE
+ fi
+ done
++ # In case the previous file lacks a new line at the end
++ echo >> $TMP_FILE
+ cat $MERGE_FILE >> $TMP_FILE
+ done
+
+diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
+index 620a3527c767af..4f3ba3debc08e1 100644
+--- a/scripts/kconfig/qconf.cc
++++ b/scripts/kconfig/qconf.cc
+@@ -1174,7 +1174,7 @@ void ConfigInfoView::clicked(const QUrl &url)
+ {
+ QByteArray str = url.toEncoded();
+ const std::size_t count = str.size();
+- char *data = new char[count + 1];
++ char *data = new char[count + 2]; // '$' + '\0'
+ struct symbol **result;
+ struct menu *m = NULL;
+
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index 0572330bf8a78a..7b1df55b017679 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -13,18 +13,21 @@
+
+ struct symbol symbol_yes = {
+ .name = "y",
++ .type = S_TRISTATE,
+ .curr = { "y", yes },
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+
+ struct symbol symbol_mod = {
+ .name = "m",
++ .type = S_TRISTATE,
+ .curr = { "m", mod },
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+
+ struct symbol symbol_no = {
+ .name = "n",
++ .type = S_TRISTATE,
+ .curr = { "n", no },
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+@@ -122,9 +125,9 @@ static long long sym_get_range_val(struct symbol *sym, int base)
+ static void sym_validate_range(struct symbol *sym)
+ {
+ struct property *prop;
++ struct symbol *range_sym;
+ int base;
+ long long val, val2;
+- char str[64];
+
+ switch (sym->type) {
+ case S_INT:
+@@ -140,17 +143,15 @@ static void sym_validate_range(struct symbol *sym)
+ if (!prop)
+ return;
+ val = strtoll(sym->curr.val, NULL, base);
+- val2 = sym_get_range_val(prop->expr->left.sym, base);
++ range_sym = prop->expr->left.sym;
++ val2 = sym_get_range_val(range_sym, base);
+ if (val >= val2) {
+- val2 = sym_get_range_val(prop->expr->right.sym, base);
++ range_sym = prop->expr->right.sym;
++ val2 = sym_get_range_val(range_sym, base);
+ if (val <= val2)
+ return;
+ }
+- if (sym->type == S_INT)
+- sprintf(str, "%lld", val2);
+- else
+- sprintf(str, "0x%llx", val2);
+- sym->curr.val = xstrdup(str);
++ sym->curr.val = range_sym->curr.val;
+ }
+
+ static void sym_set_changed(struct symbol *sym)
+@@ -777,8 +778,7 @@ const char *sym_get_string_value(struct symbol *sym)
+ case no:
+ return "n";
+ case mod:
+- sym_calc_value(modules_sym);
+- return (modules_sym->curr.tri == no) ? "n" : "m";
++ return "m";
+ case yes:
+ return "y";
+ }
+diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
+index a78b804b680cf3..b9513d224476fc 100755
+--- a/scripts/ld-version.sh
++++ b/scripts/ld-version.sh
+@@ -57,9 +57,11 @@ else
+ fi
+ fi
+
+-# Some distributions append a package release number, as in 2.34-4.fc32
+-# Trim the hyphen and any characters that follow.
+-version=${version%-*}
++# There may be something after the version, such as a distribution's package
++# release number (like Fedora's "2.34-4.fc32") or punctuation (like LLD briefly
++# added before the "compatible with GNU linkers" string), so remove everything
++# after just numbers and periods.
++version=${version%%[!0-9.]*}
+
+ cversion=$(get_canonical_version $version)
+ min_cversion=$(get_canonical_version $min_version)
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index a432b171be826a..a9434a72cac4fb 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -135,8 +135,13 @@ gen_btf()
+ ${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
+ --strip-all ${1} ${2} 2>/dev/null
+ # Change e_type to ET_REL so that it can be used to link final vmlinux.
+- # Unlike GNU ld, lld does not allow an ET_EXEC input.
+- printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none
++ # GNU ld 2.35+ and lld do not allow an ET_EXEC input.
++ if is_enabled CONFIG_CPU_BIG_ENDIAN; then
++ et_rel='\0\1'
++ else
++ et_rel='\1\0'
++ fi
++ printf "${et_rel}" | dd of=${2} conv=notrunc bs=1 seek=16 status=none
+ }
+
+ # Create ${2} .S file with all symbols from the ${1} object file
+@@ -177,7 +182,7 @@ kallsyms_step()
+ mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms ${kallsymso_prev}
+ kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
+
+- info AS ${kallsyms_S}
++ info AS ${kallsymso}
+ ${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
+ ${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ -c -o ${kallsymso} ${kallsyms_S}
+diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
+index d65ab8bfeaf4b5..fd5ffdb81bab70 100755
+--- a/scripts/min-tool-version.sh
++++ b/scripts/min-tool-version.sh
+@@ -31,7 +31,7 @@ llvm)
+ fi
+ ;;
+ rustc)
+- echo 1.71.1
++ echo 1.73.0
+ ;;
+ bindgen)
+ echo 0.65.1
+diff --git a/scripts/mksysmap b/scripts/mksysmap
+index 9ba1c9da0a40f2..57ff5656d566fb 100755
+--- a/scripts/mksysmap
++++ b/scripts/mksysmap
+@@ -48,17 +48,8 @@ ${NM} -n ${1} | sed >${2} -e "
+ / __kvm_nvhe_\\$/d
+ / __kvm_nvhe_\.L/d
+
+-# arm64 lld
+-/ __AArch64ADRPThunk_/d
+-
+-# arm lld
+-/ __ARMV5PILongThunk_/d
+-/ __ARMV7PILongThunk_/d
+-/ __ThumbV7PILongThunk_/d
+-
+-# mips lld
+-/ __LA25Thunk_/d
+-/ __microLA25Thunk_/d
++# lld arm/aarch64/mips thunks
++/ __[[:alnum:]]*Thunk_/d
+
+ # CFI type identifiers
+ / __kcfi_typeid_/d
+diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
+index c9e38ad937fd45..3c54125eb37334 100644
+--- a/scripts/mod/Makefile
++++ b/scripts/mod/Makefile
+@@ -5,7 +5,7 @@ CFLAGS_REMOVE_empty.o += $(CC_FLAGS_LTO)
+ hostprogs-always-y += modpost mk_elfconfig
+ always-y += empty.o
+
+-modpost-objs := modpost.o file2alias.o sumversion.o
++modpost-objs := modpost.o file2alias.o sumversion.o symsearch.o
+
+ devicetable-offsets-file := devicetable-offsets.h
+
+@@ -16,7 +16,7 @@ targets += $(devicetable-offsets-file) devicetable-offsets.s
+
+ # dependencies on generated files need to be listed explicitly
+
+-$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
++$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o $(obj)/symsearch.o: $(obj)/elfconfig.h
+ $(obj)/file2alias.o: $(obj)/$(devicetable-offsets-file)
+
+ quiet_cmd_elfconfig = MKELF $@
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 7056751c29b1fb..6583b36dbe6948 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -1348,13 +1348,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: tee:uuid */
+ static int do_tee_entry(const char *filename, void *symval, char *alias)
+ {
+- DEF_FIELD(symval, tee_client_device_id, uuid);
++ DEF_FIELD_ADDR(symval, tee_client_device_id, uuid);
+
+ sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+- uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
+- uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
+- uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
+- uuid.b[15]);
++ uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4],
++ uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9],
++ uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14],
++ uuid->b[15]);
+
+ add_wildcard(alias);
+ return 1;
+@@ -1401,10 +1401,10 @@ static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
+ /* Looks like: ishtp:{guid} */
+ static int do_ishtp_entry(const char *filename, void *symval, char *alias)
+ {
+- DEF_FIELD(symval, ishtp_device_id, guid);
++ DEF_FIELD_ADDR(symval, ishtp_device_id, guid);
+
+ strcpy(alias, ISHTP_MODULE_PREFIX "{");
+- add_guid(alias, guid);
++ add_guid(alias, *guid);
+ strcat(alias, "}");
+
+ return 1;
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index b3dee80497cb2b..828d5cc367169f 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -22,7 +22,6 @@
+ #include <errno.h>
+ #include "modpost.h"
+ #include "../../include/linux/license.h"
+-#include "../../include/linux/module_symbol.h"
+
+ static bool module_enabled;
+ /* Are we using CONFIG_MODVERSIONS? */
+@@ -577,11 +576,14 @@ static int parse_elf(struct elf_info *info, const char *filename)
+ *p = TO_NATIVE(*p);
+ }
+
++ symsearch_init(info);
++
+ return 1;
+ }
+
+ static void parse_elf_finish(struct elf_info *info)
+ {
++ symsearch_finish(info);
+ release_file(info->hdr, info->size);
+ }
+
+@@ -798,7 +800,7 @@ static void check_section(const char *modname, struct elf_info *elf,
+ #define ALL_INIT_TEXT_SECTIONS \
+ ".init.text", ".meminit.text"
+ #define ALL_EXIT_TEXT_SECTIONS \
+- ".exit.text", ".memexit.text"
++ ".exit.text"
+
+ #define ALL_PCI_INIT_SECTIONS \
+ ".pci_fixup_early", ".pci_fixup_header", ".pci_fixup_final", \
+@@ -806,14 +808,14 @@ static void check_section(const char *modname, struct elf_info *elf,
+ ".pci_fixup_resume_early", ".pci_fixup_suspend"
+
+ #define ALL_XXXINIT_SECTIONS MEM_INIT_SECTIONS
+-#define ALL_XXXEXIT_SECTIONS MEM_EXIT_SECTIONS
+
+ #define ALL_INIT_SECTIONS INIT_SECTIONS, ALL_XXXINIT_SECTIONS
+-#define ALL_EXIT_SECTIONS EXIT_SECTIONS, ALL_XXXEXIT_SECTIONS
++#define ALL_EXIT_SECTIONS EXIT_SECTIONS
+
+ #define DATA_SECTIONS ".data", ".data.rel"
+ #define TEXT_SECTIONS ".text", ".text.*", ".sched.text", \
+- ".kprobes.text", ".cpuidle.text", ".noinstr.text"
++ ".kprobes.text", ".cpuidle.text", ".noinstr.text", \
++ ".ltext", ".ltext.*"
+ #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
+ ".fixup", ".entry.text", ".exception.text", \
+ ".coldtext", ".softirqentry.text"
+@@ -822,7 +824,6 @@ static void check_section(const char *modname, struct elf_info *elf,
+ #define MEM_INIT_SECTIONS ".meminit.*"
+
+ #define EXIT_SECTIONS ".exit.*"
+-#define MEM_EXIT_SECTIONS ".memexit.*"
+
+ #define ALL_TEXT_SECTIONS ALL_INIT_TEXT_SECTIONS, ALL_EXIT_TEXT_SECTIONS, \
+ TEXT_SECTIONS, OTHER_TEXT_SECTIONS
+@@ -832,7 +833,6 @@ enum mismatch {
+ DATA_TO_ANY_INIT,
+ TEXTDATA_TO_ANY_EXIT,
+ XXXINIT_TO_SOME_INIT,
+- XXXEXIT_TO_SOME_EXIT,
+ ANY_INIT_TO_ANY_EXIT,
+ ANY_EXIT_TO_ANY_INIT,
+ EXTABLE_TO_NON_TEXT,
+@@ -883,12 +883,6 @@ static const struct sectioncheck sectioncheck[] = {
+ .bad_tosec = { INIT_SECTIONS, NULL },
+ .mismatch = XXXINIT_TO_SOME_INIT,
+ },
+-/* Do not reference exit code/data from memexit code/data */
+-{
+- .fromsec = { ALL_XXXEXIT_SECTIONS, NULL },
+- .bad_tosec = { EXIT_SECTIONS, NULL },
+- .mismatch = XXXEXIT_TO_SOME_EXIT,
+-},
+ /* Do not use exit code/data from init code */
+ {
+ .fromsec = { ALL_INIT_SECTIONS, NULL },
+@@ -1017,7 +1011,7 @@ static int secref_whitelist(const char *fromsec, const char *fromsym,
+
+ /* symbols in data sections that may refer to meminit sections */
+ if (match(fromsec, PATTERNS(DATA_SECTIONS)) &&
+- match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_XXXEXIT_SECTIONS)) &&
++ match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS)) &&
+ match(fromsym, PATTERNS("*driver")))
+ return 0;
+
+@@ -1050,75 +1044,16 @@ static int secref_whitelist(const char *fromsec, const char *fromsym,
+ return 1;
+ }
+
+-/*
+- * If there's no name there, ignore it; likewise, ignore it if it's
+- * one of the magic symbols emitted used by current tools.
+- *
+- * Otherwise if find_symbols_between() returns those symbols, they'll
+- * fail the whitelist tests and cause lots of false alarms ... fixable
+- * only by merging __exit and __init sections into __text, bloating
+- * the kernel (which is especially evil on embedded platforms).
+- */
+-static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
+-{
+- const char *name = elf->strtab + sym->st_name;
+-
+- if (!name || !strlen(name))
+- return 0;
+- return !is_mapping_symbol(name);
+-}
+-
+-/* Look up the nearest symbol based on the section and the address */
+-static Elf_Sym *find_nearest_sym(struct elf_info *elf, Elf_Addr addr,
+- unsigned int secndx, bool allow_negative,
+- Elf_Addr min_distance)
+-{
+- Elf_Sym *sym;
+- Elf_Sym *near = NULL;
+- Elf_Addr sym_addr, distance;
+- bool is_arm = (elf->hdr->e_machine == EM_ARM);
+-
+- for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
+- if (get_secindex(elf, sym) != secndx)
+- continue;
+- if (!is_valid_name(elf, sym))
+- continue;
+-
+- sym_addr = sym->st_value;
+-
+- /*
+- * For ARM Thumb instruction, the bit 0 of st_value is set
+- * if the symbol is STT_FUNC type. Mask it to get the address.
+- */
+- if (is_arm && ELF_ST_TYPE(sym->st_info) == STT_FUNC)
+- sym_addr &= ~1;
+-
+- if (addr >= sym_addr)
+- distance = addr - sym_addr;
+- else if (allow_negative)
+- distance = sym_addr - addr;
+- else
+- continue;
+-
+- if (distance <= min_distance) {
+- min_distance = distance;
+- near = sym;
+- }
+-
+- if (min_distance == 0)
+- break;
+- }
+- return near;
+-}
+-
+ static Elf_Sym *find_fromsym(struct elf_info *elf, Elf_Addr addr,
+ unsigned int secndx)
+ {
+- return find_nearest_sym(elf, addr, secndx, false, ~0);
++ return symsearch_find_nearest(elf, addr, secndx, false, ~0);
+ }
+
+ static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym)
+ {
++ Elf_Sym *new_sym;
++
+ /* If the supplied symbol has a valid name, return it */
+ if (is_valid_name(elf, sym))
+ return sym;
+@@ -1127,7 +1062,9 @@ static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym)
+ * Strive to find a better symbol name, but the resulting name may not
+ * match the symbol referenced in the original code.
+ */
+- return find_nearest_sym(elf, addr, get_secindex(elf, sym), true, 20);
++ new_sym = symsearch_find_nearest(elf, addr, get_secindex(elf, sym),
++ true, 20);
++ return new_sym ? new_sym : sym;
+ }
+
+ static bool is_executable_section(struct elf_info *elf, unsigned int secndx)
+@@ -1161,7 +1098,9 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
+ sec_mismatch_count++;
+
+ warn("%s: section mismatch in reference: %s+0x%x (section: %s) -> %s (section: %s)\n",
+- modname, fromsym, (unsigned int)(faddr - from->st_value), fromsec, tosym, tosec);
++ modname, fromsym,
++ (unsigned int)(faddr - (from ? from->st_value : 0)),
++ fromsec, tosym, tosec);
+
+ if (mismatch->mismatch == EXTABLE_TO_NON_TEXT) {
+ if (match(tosec, mismatch->bad_tosec))
+@@ -1496,13 +1435,15 @@ static void section_rela(struct module *mod, struct elf_info *elf,
+ return;
+
+ for (rela = start; rela < stop; rela++) {
++ Elf_Sym *tsym;
+ Elf_Addr taddr, r_offset;
+ unsigned int r_type, r_sym;
+
+ r_offset = TO_NATIVE(rela->r_offset);
+ get_rel_type_and_sym(elf, rela->r_info, &r_type, &r_sym);
+
+- taddr = TO_NATIVE(rela->r_addend);
++ tsym = elf->symtab_start + r_sym;
++ taddr = tsym->st_value + TO_NATIVE(rela->r_addend);
+
+ switch (elf->hdr->e_machine) {
+ case EM_RISCV:
+@@ -1517,7 +1458,7 @@ static void section_rela(struct module *mod, struct elf_info *elf,
+ break;
+ }
+
+- check_section_mismatch(mod, elf, elf->symtab_start + r_sym,
++ check_section_mismatch(mod, elf, tsym,
+ fsecndx, fromsec, r_offset, taddr);
+ }
+ }
+@@ -1743,10 +1684,11 @@ static void read_symbols(const char *modname)
+ namespace = get_next_modinfo(&info, "import_ns",
+ namespace);
+ }
++
++ if (extra_warn && !get_modinfo(&info, "description"))
++ warn("missing MODULE_DESCRIPTION() in %s\n", modname);
+ }
+
+- if (extra_warn && !get_modinfo(&info, "description"))
+- warn("missing MODULE_DESCRIPTION() in %s\n", modname);
+ for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
+ symname = remove_dot(info.strtab + sym->st_name);
+
+diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
+index 5f94c2c9f2d950..6413f26fcb6b45 100644
+--- a/scripts/mod/modpost.h
++++ b/scripts/mod/modpost.h
+@@ -10,6 +10,7 @@
+ #include <fcntl.h>
+ #include <unistd.h>
+ #include <elf.h>
++#include "../../include/linux/module_symbol.h"
+
+ #include "list.h"
+ #include "elfconfig.h"
+@@ -128,6 +129,8 @@ struct elf_info {
+ * take shndx from symtab_shndx_start[N] instead */
+ Elf32_Word *symtab_shndx_start;
+ Elf32_Word *symtab_shndx_stop;
++
++ struct symsearch *symsearch;
+ };
+
+ /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
+@@ -154,6 +157,28 @@ static inline unsigned int get_secindex(const struct elf_info *info,
+ return index;
+ }
+
++/*
++ * If there's no name there, ignore it; likewise, ignore it if it's
++ * one of the magic symbols emitted used by current tools.
++ *
++ * Internal symbols created by tools should be ignored by modpost.
++ */
++static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
++{
++ const char *name = elf->strtab + sym->st_name;
++
++ if (!name || !strlen(name))
++ return 0;
++ return !is_mapping_symbol(name);
++}
++
++/* symsearch.c */
++void symsearch_init(struct elf_info *elf);
++void symsearch_finish(struct elf_info *elf);
++Elf_Sym *symsearch_find_nearest(struct elf_info *elf, Elf_Addr addr,
++ unsigned int secndx, bool allow_negative,
++ Elf_Addr min_distance);
++
+ /* file2alias.c */
+ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ Elf_Sym *sym, const char *symname);
+diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
+index 31066bfdba04e3..dc4878502276ce 100644
+--- a/scripts/mod/sumversion.c
++++ b/scripts/mod/sumversion.c
+@@ -326,7 +326,12 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
+
+ /* Sum all files in the same dir or subdirs. */
+ while ((line = get_line(&pos))) {
+- char* p = line;
++ char* p;
++
++ /* trim the leading spaces away */
++ while (isspace(*line))
++ line++;
++ p = line;
+
+ if (strncmp(line, "source_", sizeof("source_")-1) == 0) {
+ p = strrchr(line, ' ');
+diff --git a/scripts/mod/symsearch.c b/scripts/mod/symsearch.c
+new file mode 100644
+index 00000000000000..aa4ed51f9960cd
+--- /dev/null
++++ b/scripts/mod/symsearch.c
+@@ -0,0 +1,199 @@
++// SPDX-License-Identifier: GPL-2.0
++
++/*
++ * Helper functions for finding the symbol in an ELF which is "nearest"
++ * to a given address.
++ */
++
++#include "modpost.h"
++
++struct syminfo {
++ unsigned int symbol_index;
++ unsigned int section_index;
++ Elf_Addr addr;
++};
++
++/*
++ * Container used to hold an entire binary search table.
++ * Entries in table are ascending, sorted first by section_index,
++ * then by addr, and last by symbol_index. The sorting by
++ * symbol_index is used to ensure predictable behavior when
++ * multiple symbols are present with the same address; all
++ * symbols past the first are effectively ignored, by eliding
++ * them in symsearch_fixup().
++ */
++struct symsearch {
++ unsigned int table_size;
++ struct syminfo table[];
++};
++
++static int syminfo_compare(const void *s1, const void *s2)
++{
++ const struct syminfo *sym1 = s1;
++ const struct syminfo *sym2 = s2;
++
++ if (sym1->section_index > sym2->section_index)
++ return 1;
++ if (sym1->section_index < sym2->section_index)
++ return -1;
++ if (sym1->addr > sym2->addr)
++ return 1;
++ if (sym1->addr < sym2->addr)
++ return -1;
++ if (sym1->symbol_index > sym2->symbol_index)
++ return 1;
++ if (sym1->symbol_index < sym2->symbol_index)
++ return -1;
++ return 0;
++}
++
++static unsigned int symbol_count(struct elf_info *elf)
++{
++ unsigned int result = 0;
++
++ for (Elf_Sym *sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
++ if (is_valid_name(elf, sym))
++ result++;
++ }
++ return result;
++}
++
++/*
++ * Populate the search array that we just allocated.
++ * Be slightly paranoid here. The ELF file is mmap'd and could
++ * conceivably change between symbol_count() and symsearch_populate().
++ * If we notice any difference, bail out rather than potentially
++ * propagating errors or crashing.
++ */
++static void symsearch_populate(struct elf_info *elf,
++ struct syminfo *table,
++ unsigned int table_size)
++{
++ bool is_arm = (elf->hdr->e_machine == EM_ARM);
++
++ for (Elf_Sym *sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
++ if (is_valid_name(elf, sym)) {
++ if (table_size-- == 0)
++ fatal("%s: size mismatch\n", __func__);
++ table->symbol_index = sym - elf->symtab_start;
++ table->section_index = get_secindex(elf, sym);
++ table->addr = sym->st_value;
++
++ /*
++ * For ARM Thumb instruction, the bit 0 of st_value is
++ * set if the symbol is STT_FUNC type. Mask it to get
++ * the address.
++ */
++ if (is_arm && ELF_ST_TYPE(sym->st_info) == STT_FUNC)
++ table->addr &= ~1;
++
++ table++;
++ }
++ }
++
++ if (table_size != 0)
++ fatal("%s: size mismatch\n", __func__);
++}
++
++/*
++ * Do any fixups on the table after sorting.
++ * For now, this just finds adjacent entries which have
++ * the same section_index and addr, and it propagates
++ * the first symbol_index over the subsequent entries,
++ * so that only one symbol_index is seen for any given
++ * section_index and addr. This ensures that whether
++ * we're looking at an address from "above" or "below"
++ * that we see the same symbol_index.
++ * This does leave some duplicate entries in the table;
++ * in practice, these are a small fraction of the
++ * total number of entries, and they are harmless to
++ * the binary search algorithm other than a few occasional
++ * unnecessary comparisons.
++ */
++static void symsearch_fixup(struct syminfo *table, unsigned int table_size)
++{
++ /* Don't look at index 0, it will never change. */
++ for (unsigned int i = 1; i < table_size; i++) {
++ if (table[i].addr == table[i - 1].addr &&
++ table[i].section_index == table[i - 1].section_index) {
++ table[i].symbol_index = table[i - 1].symbol_index;
++ }
++ }
++}
++
++void symsearch_init(struct elf_info *elf)
++{
++ unsigned int table_size = symbol_count(elf);
++
++ elf->symsearch = NOFAIL(malloc(sizeof(struct symsearch) +
++ sizeof(struct syminfo) * table_size));
++ elf->symsearch->table_size = table_size;
++
++ symsearch_populate(elf, elf->symsearch->table, table_size);
++ qsort(elf->symsearch->table, table_size,
++ sizeof(struct syminfo), syminfo_compare);
++
++ symsearch_fixup(elf->symsearch->table, table_size);
++}
++
++void symsearch_finish(struct elf_info *elf)
++{
++ free(elf->symsearch);
++ elf->symsearch = NULL;
++}
++
++/*
++ * Find the syminfo which is in secndx and "nearest" to addr.
++ * allow_negative: allow returning a symbol whose address is > addr.
++ * min_distance: ignore symbols which are further away than this.
++ *
++ * Returns a pointer into the symbol table for success.
++ * Returns NULL if no legal symbol is found within the requested range.
++ */
++Elf_Sym *symsearch_find_nearest(struct elf_info *elf, Elf_Addr addr,
++ unsigned int secndx, bool allow_negative,
++ Elf_Addr min_distance)
++{
++ unsigned int hi = elf->symsearch->table_size;
++ unsigned int lo = 0;
++ struct syminfo *table = elf->symsearch->table;
++ struct syminfo target;
++
++ target.addr = addr;
++ target.section_index = secndx;
++ target.symbol_index = ~0; /* compares greater than any actual index */
++ while (hi > lo) {
++ unsigned int mid = lo + (hi - lo) / 2; /* Avoids overflow */
++
++ if (syminfo_compare(&table[mid], &target) > 0)
++ hi = mid;
++ else
++ lo = mid + 1;
++ }
++
++ /*
++ * table[hi], if it exists, is the first entry in the array which
++ * lies beyond target. table[hi - 1], if it exists, is the last
++ * entry in the array which comes before target, including the
++ * case where it perfectly matches the section and the address.
++ *
++ * Note -- if the address we're looking up falls perfectly
++ * in the middle of two symbols, this is written to always
++ * prefer the symbol with the lower address.
++ */
++ Elf_Sym *result = NULL;
++
++ if (allow_negative &&
++ hi < elf->symsearch->table_size &&
++ table[hi].section_index == secndx &&
++ table[hi].addr - addr <= min_distance) {
++ min_distance = table[hi].addr - addr;
++ result = &elf->symtab_start[table[hi].symbol_index];
++ }
++ if (hi > 0 &&
++ table[hi - 1].section_index == secndx &&
++ addr - table[hi - 1].addr <= min_distance) {
++ result = &elf->symtab_start[table[hi - 1].symbol_index];
++ }
++ return result;
++}
+diff --git a/scripts/module.lds.S b/scripts/module.lds.S
+index bf5bcf2836d815..89ff01a22634f0 100644
+--- a/scripts/module.lds.S
++++ b/scripts/module.lds.S
+@@ -13,6 +13,7 @@ SECTIONS {
+ /DISCARD/ : {
+ *(.discard)
+ *(.discard.*)
++ *(.export_symbol)
+ }
+
+ __ksymtab 0 : { *(SORT(___ksymtab+*)) }
+diff --git a/scripts/rust_is_available.sh b/scripts/rust_is_available.sh
+index 117018946b577a..a6fdcf13e0e53e 100755
+--- a/scripts/rust_is_available.sh
++++ b/scripts/rust_is_available.sh
+@@ -129,8 +129,12 @@ fi
+ # Check that the Rust bindings generator is suitable.
+ #
+ # Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
++#
++# The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
++# (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
++# the minimum version is upgraded past that (0.69.1 already fixed the issue).
+ rust_bindings_generator_output=$( \
+- LC_ALL=C "$BINDGEN" --version 2>/dev/null
++ LC_ALL=C "$BINDGEN" --version workaround-for-0.69.0 2>/dev/null
+ ) || rust_bindings_generator_code=$?
+ if [ -n "$rust_bindings_generator_code" ]; then
+ echo >&2 "***"
+diff --git a/scripts/sign-file.c b/scripts/sign-file.c
+index 598ef5465f8256..3edb156ae52c30 100644
+--- a/scripts/sign-file.c
++++ b/scripts/sign-file.c
+@@ -322,7 +322,7 @@ int main(int argc, char **argv)
+ CMS_NOSMIMECAP | use_keyid |
+ use_signed_attrs),
+ "CMS_add1_signer");
+- ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
++ ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) != 1,
+ "CMS_final");
+
+ #else
+@@ -341,10 +341,10 @@ int main(int argc, char **argv)
+ b = BIO_new_file(sig_file_name, "wb");
+ ERR(!b, "%s", sig_file_name);
+ #ifndef USE_PKCS7
+- ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
++ ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) != 1,
+ "%s", sig_file_name);
+ #else
+- ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
++ ERR(i2d_PKCS7_bio(b, pkcs7) != 1,
+ "%s", sig_file_name);
+ #endif
+ BIO_free(b);
+@@ -374,9 +374,9 @@ int main(int argc, char **argv)
+
+ if (!raw_sig) {
+ #ifndef USE_PKCS7
+- ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
++ ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) != 1, "%s", dest_name);
+ #else
+- ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
++ ERR(i2d_PKCS7_bio(bd, pkcs7) != 1, "%s", dest_name);
+ #endif
+ } else {
+ BIO *b;
+@@ -396,7 +396,7 @@ int main(int argc, char **argv)
+ ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
+ ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
+
+- ERR(BIO_free(bd) < 0, "%s", dest_name);
++ ERR(BIO_free(bd) != 1, "%s", dest_name);
+
+ /* Finally, if we're signing in place, replace the original. */
+ if (replace_orig)
+diff --git a/security/Kconfig b/security/Kconfig
+index 52c9af08ad35d3..39af8b8696efb0 100644
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -19,6 +19,38 @@ config SECURITY_DMESG_RESTRICT
+
+ If you are unsure how to answer this question, answer N.
+
++choice
++ prompt "Allow /proc/pid/mem access override"
++ default PROC_MEM_ALWAYS_FORCE
++ help
++ Traditionally /proc/pid/mem allows users to override memory
++ permissions for users like ptrace, assuming they have ptrace
++ capability.
++
++ This allows people to limit that - either never override, or
++ require actual active ptrace attachment.
++
++ Defaults to the traditional behavior (for now)
++
++config PROC_MEM_ALWAYS_FORCE
++ bool "Traditional /proc/pid/mem behavior"
++ help
++ This allows /proc/pid/mem accesses to override memory mapping
++ permissions if you have ptrace access rights.
++
++config PROC_MEM_FORCE_PTRACE
++ bool "Require active ptrace() use for access override"
++ help
++ This allows /proc/pid/mem accesses to override memory mapping
++ permissions for active ptracers like gdb.
++
++config PROC_MEM_NO_FORCE
++ bool "Never"
++ help
++ Never override memory mapping permissions
++
++endchoice
++
+ config SECURITY
+ bool "Enable different security models"
+ depends on SYSFS
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index bd6a910f65282a..23b2853ce3c428 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -423,7 +423,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
+ /* high level check about policy management - fine grained in
+ * below after unpack
+ */
+- error = aa_may_manage_policy(label, ns, mask);
++ error = aa_may_manage_policy(current_cred(), label, ns, mask);
+ if (error)
+ goto end_section;
+
+@@ -486,7 +486,8 @@ static ssize_t profile_remove(struct file *f, const char __user *buf,
+ /* high level check about policy management - fine grained in
+ * below after unpack
+ */
+- error = aa_may_manage_policy(label, ns, AA_MAY_REMOVE_POLICY);
++ error = aa_may_manage_policy(current_cred(), label, ns,
++ AA_MAY_REMOVE_POLICY);
+ if (error)
+ goto out;
+
+@@ -1697,6 +1698,10 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+ struct aa_profile *p;
+ p = aa_deref_parent(profile);
+ dent = prof_dir(p);
++ if (!dent) {
++ error = -ENOENT;
++ goto fail2;
++ }
+ /* adding to parent that previously didn't have children */
+ dent = aafs_create_dir("profiles", dent);
+ if (IS_ERR(dent))
+@@ -1805,7 +1810,8 @@ static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
+ int error;
+
+ label = begin_current_label_crit_section();
+- error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
++ error = aa_may_manage_policy(current_cred(), label, NULL,
++ AA_MAY_LOAD_POLICY);
+ end_current_label_crit_section(label);
+ if (error)
+ return error;
+@@ -1854,7 +1860,8 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
+ int error;
+
+ label = begin_current_label_crit_section();
+- error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
++ error = aa_may_manage_policy(current_cred(), label, NULL,
++ AA_MAY_LOAD_POLICY);
+ end_current_label_crit_section(label);
+ if (error)
+ return error;
+@@ -2361,6 +2368,7 @@ static struct aa_sfs_entry aa_sfs_entry_policy[] = {
+
+ static struct aa_sfs_entry aa_sfs_entry_mount[] = {
+ AA_SFS_FILE_STRING("mask", "mount umount pivot_root"),
++ AA_SFS_FILE_STRING("move_mount", "detached"),
+ { }
+ };
+
+diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
+index 5a7978aa4b19e7..fa2d48250a4f58 100644
+--- a/security/apparmor/audit.c
++++ b/security/apparmor/audit.c
+@@ -85,37 +85,36 @@ static const char *const aa_class_names[] = {
+ /**
+ * audit_pre() - core AppArmor function.
+ * @ab: audit buffer to fill (NOT NULL)
+- * @ca: audit structure containing data to audit (NOT NULL)
++ * @va: audit structure containing data to audit (NOT NULL)
+ *
+- * Record common AppArmor audit data from @sa
++ * Record common AppArmor audit data from @va
+ */
+-static void audit_pre(struct audit_buffer *ab, void *ca)
++static void audit_pre(struct audit_buffer *ab, void *va)
+ {
+- struct common_audit_data *sa = ca;
++ struct apparmor_audit_data *ad = aad_of_va(va);
+
+ if (aa_g_audit_header) {
+ audit_log_format(ab, "apparmor=\"%s\"",
+- aa_audit_type[aad(sa)->type]);
++ aa_audit_type[ad->type]);
+ }
+
+- if (aad(sa)->op) {
+- audit_log_format(ab, " operation=\"%s\"", aad(sa)->op);
+- }
++ if (ad->op)
++ audit_log_format(ab, " operation=\"%s\"", ad->op);
+
+- if (aad(sa)->class)
++ if (ad->class)
+ audit_log_format(ab, " class=\"%s\"",
+- aad(sa)->class <= AA_CLASS_LAST ?
+- aa_class_names[aad(sa)->class] :
++ ad->class <= AA_CLASS_LAST ?
++ aa_class_names[ad->class] :
+ "unknown");
+
+- if (aad(sa)->info) {
+- audit_log_format(ab, " info=\"%s\"", aad(sa)->info);
+- if (aad(sa)->error)
+- audit_log_format(ab, " error=%d", aad(sa)->error);
++ if (ad->info) {
++ audit_log_format(ab, " info=\"%s\"", ad->info);
++ if (ad->error)
++ audit_log_format(ab, " error=%d", ad->error);
+ }
+
+- if (aad(sa)->label) {
+- struct aa_label *label = aad(sa)->label;
++ if (ad->subj_label) {
++ struct aa_label *label = ad->subj_label;
+
+ if (label_isprofile(label)) {
+ struct aa_profile *profile = labels_profile(label);
+@@ -134,42 +133,44 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
+ }
+ }
+
+- if (aad(sa)->name) {
++ if (ad->name) {
+ audit_log_format(ab, " name=");
+- audit_log_untrustedstring(ab, aad(sa)->name);
++ audit_log_untrustedstring(ab, ad->name);
+ }
+ }
+
+ /**
+ * aa_audit_msg - Log a message to the audit subsystem
+- * @sa: audit event structure (NOT NULL)
++ * @type: audit type for the message
++ * @ad: audit event structure (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ */
+-void aa_audit_msg(int type, struct common_audit_data *sa,
++void aa_audit_msg(int type, struct apparmor_audit_data *ad,
+ void (*cb) (struct audit_buffer *, void *))
+ {
+- aad(sa)->type = type;
+- common_lsm_audit(sa, audit_pre, cb);
++ ad->type = type;
++ common_lsm_audit(&ad->common, audit_pre, cb);
+ }
+
+ /**
+ * aa_audit - Log a profile based audit event to the audit subsystem
+ * @type: audit type for the message
+ * @profile: profile to check against (NOT NULL)
+- * @sa: audit event (NOT NULL)
++ * @ad: audit event (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ *
+ * Handle default message switching based off of audit mode flags
+ *
+ * Returns: error on failure
+ */
+-int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
++int aa_audit(int type, struct aa_profile *profile,
++ struct apparmor_audit_data *ad,
+ void (*cb) (struct audit_buffer *, void *))
+ {
+ AA_BUG(!profile);
+
+ if (type == AUDIT_APPARMOR_AUTO) {
+- if (likely(!aad(sa)->error)) {
++ if (likely(!ad->error)) {
+ if (AUDIT_MODE(profile) != AUDIT_ALL)
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+@@ -181,24 +182,24 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
+ if (AUDIT_MODE(profile) == AUDIT_QUIET ||
+ (type == AUDIT_APPARMOR_DENIED &&
+ AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
+- return aad(sa)->error;
++ return ad->error;
+
+ if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
+ type = AUDIT_APPARMOR_KILL;
+
+- aad(sa)->label = &profile->label;
++ ad->subj_label = &profile->label;
+
+- aa_audit_msg(type, sa, cb);
++ aa_audit_msg(type, ad, cb);
+
+- if (aad(sa)->type == AUDIT_APPARMOR_KILL)
++ if (ad->type == AUDIT_APPARMOR_KILL)
+ (void)send_sig_info(SIGKILL, NULL,
+- sa->type == LSM_AUDIT_DATA_TASK && sa->u.tsk ?
+- sa->u.tsk : current);
++ ad->common.type == LSM_AUDIT_DATA_TASK &&
++ ad->common.u.tsk ? ad->common.u.tsk : current);
+
+- if (aad(sa)->type == AUDIT_APPARMOR_ALLOWED)
+- return complain_error(aad(sa)->error);
++ if (ad->type == AUDIT_APPARMOR_ALLOWED)
++ return complain_error(ad->error);
+
+- return aad(sa)->error;
++ return ad->error;
+ }
+
+ struct aa_audit_rule {
+@@ -216,7 +217,7 @@ void aa_audit_rule_free(void *vrule)
+ }
+ }
+
+-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp)
+ {
+ struct aa_audit_rule *rule;
+
+@@ -229,14 +230,14 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+ return -EINVAL;
+ }
+
+- rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
++ rule = kzalloc(sizeof(struct aa_audit_rule), gfp);
+
+ if (!rule)
+ return -ENOMEM;
+
+ /* Currently rules are treated as coming from the root ns */
+ rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
+- GFP_KERNEL, true, false);
++ gfp, true, false);
+ if (IS_ERR(rule->label)) {
+ int err = PTR_ERR(rule->label);
+ aa_audit_rule_free(rule);
+diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
+index 326a51838ef289..2fb6a2ea0b998c 100644
+--- a/security/apparmor/capability.c
++++ b/security/apparmor/capability.c
+@@ -51,7 +51,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+
+ /**
+ * audit_caps - audit a capability
+- * @sa: audit data
++ * @as: audit data
+ * @profile: profile being tested for confinement (NOT NULL)
+ * @cap: capability tested
+ * @error: error code returned by test
+@@ -59,9 +59,9 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+ * Do auditing of capability and handle, audit/complain/kill modes switching
+ * and duplicate message elimination.
+ *
+- * Returns: 0 or sa->error on success, error code on failure
++ * Returns: 0 or ad->error on success, error code on failure
+ */
+-static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
++static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile,
+ int cap, int error)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+@@ -69,7 +69,7 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+ struct audit_cache *ent;
+ int type = AUDIT_APPARMOR_AUTO;
+
+- aad(sa)->error = error;
++ ad->error = error;
+
+ if (likely(!error)) {
+ /* test if auditing is being forced */
+@@ -101,7 +101,7 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+ }
+ put_cpu_var(audit_cache);
+
+- return aa_audit(type, profile, sa, audit_cb);
++ return aa_audit(type, profile, ad, audit_cb);
+ }
+
+ /**
+@@ -109,12 +109,12 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+ * @profile: profile being enforced (NOT NULL, NOT unconfined)
+ * @cap: capability to test if allowed
+ * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
+- * @sa: audit data (MAY BE NULL indicating no auditing)
++ * @ad: audit data (MAY BE NULL indicating no auditing)
+ *
+ * Returns: 0 if allowed else -EPERM
+ */
+ static int profile_capable(struct aa_profile *profile, int cap,
+- unsigned int opts, struct common_audit_data *sa)
++ unsigned int opts, struct apparmor_audit_data *ad)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+@@ -132,14 +132,15 @@ static int profile_capable(struct aa_profile *profile, int cap,
+ /* audit the cap request in complain mode but note that it
+ * should be optional.
+ */
+- aad(sa)->info = "optional: no audit";
++ ad->info = "optional: no audit";
+ }
+
+- return audit_caps(sa, profile, cap, error);
++ return audit_caps(ad, profile, cap, error);
+ }
+
+ /**
+ * aa_capable - test permission to use capability
++ * @subj_cread: cred we are testing capability against
+ * @label: label being tested for capability (NOT NULL)
+ * @cap: capability to be tested
+ * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
+@@ -148,15 +149,17 @@ static int profile_capable(struct aa_profile *profile, int cap,
+ *
+ * Returns: 0 on success, or else an error code.
+ */
+-int aa_capable(struct aa_label *label, int cap, unsigned int opts)
++int aa_capable(const struct cred *subj_cred, struct aa_label *label,
++ int cap, unsigned int opts)
+ {
+ struct aa_profile *profile;
+ int error = 0;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
+
+- sa.u.cap = cap;
++ ad.subj_cred = subj_cred;
++ ad.common.u.cap = cap;
+ error = fn_for_each_confined(label, profile,
+- profile_capable(profile, cap, opts, &sa));
++ profile_capable(profile, cap, opts, &ad));
+
+ return error;
+ }
+diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
+index f3715cda59c52b..543105cf7e334d 100644
+--- a/security/apparmor/domain.c
++++ b/security/apparmor/domain.c
+@@ -31,6 +31,7 @@
+
+ /**
+ * may_change_ptraced_domain - check if can change profile on ptraced task
++ * @cred: cred of task changing domain
+ * @to_label: profile to change to (NOT NULL)
+ * @info: message if there is an error
+ *
+@@ -39,28 +40,34 @@
+ *
+ * Returns: %0 or error if change not allowed
+ */
+-static int may_change_ptraced_domain(struct aa_label *to_label,
++static int may_change_ptraced_domain(const struct cred *to_cred,
++ struct aa_label *to_label,
+ const char **info)
+ {
+ struct task_struct *tracer;
+ struct aa_label *tracerl = NULL;
++ const struct cred *tracer_cred = NULL;
++
+ int error = 0;
+
+ rcu_read_lock();
+ tracer = ptrace_parent(current);
+- if (tracer)
++ if (tracer) {
+ /* released below */
+ tracerl = aa_get_task_label(tracer);
+-
++ tracer_cred = get_task_cred(tracer);
++ }
+ /* not ptraced */
+ if (!tracer || unconfined(tracerl))
+ goto out;
+
+- error = aa_may_ptrace(tracerl, to_label, PTRACE_MODE_ATTACH);
++ error = aa_may_ptrace(tracer_cred, tracerl, to_cred, to_label,
++ PTRACE_MODE_ATTACH);
+
+ out:
+ rcu_read_unlock();
+ aa_put_label(tracerl);
++ put_cred(tracer_cred);
+
+ if (error)
+ *info = "ptrace prevents transition";
+@@ -619,7 +626,8 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
+ return new;
+ }
+
+-static struct aa_label *profile_transition(struct aa_profile *profile,
++static struct aa_label *profile_transition(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+ bool *secure_exec)
+@@ -709,7 +717,8 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
+ }
+
+ audit:
+- aa_audit_file(profile, &perms, OP_EXEC, MAY_EXEC, name, target, new,
++ aa_audit_file(subj_cred, profile, &perms, OP_EXEC, MAY_EXEC, name,
++ target, new,
+ cond->uid, info, error);
+ if (!new || nonewprivs) {
+ aa_put_label(new);
+@@ -719,7 +728,8 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
+ return new;
+ }
+
+-static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
++static int profile_onexec(const struct cred *subj_cred,
++ struct aa_profile *profile, struct aa_label *onexec,
+ bool stack, const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+ bool *secure_exec)
+@@ -787,13 +797,15 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
+ }
+
+ audit:
+- return aa_audit_file(profile, &perms, OP_EXEC, AA_MAY_ONEXEC, xname,
++ return aa_audit_file(subj_cred, profile, &perms, OP_EXEC,
++ AA_MAY_ONEXEC, xname,
+ NULL, onexec, cond->uid, info, error);
+ }
+
+ /* ensure none ns domain transitions are correctly applied with onexec */
+
+-static struct aa_label *handle_onexec(struct aa_label *label,
++static struct aa_label *handle_onexec(const struct cred *subj_cred,
++ struct aa_label *label,
+ struct aa_label *onexec, bool stack,
+ const struct linux_binprm *bprm,
+ char *buffer, struct path_cond *cond,
+@@ -810,26 +822,28 @@ static struct aa_label *handle_onexec(struct aa_label *label,
+
+ if (!stack) {
+ error = fn_for_each_in_ns(label, profile,
+- profile_onexec(profile, onexec, stack,
++ profile_onexec(subj_cred, profile, onexec, stack,
+ bprm, buffer, cond, unsafe));
+ if (error)
+ return ERR_PTR(error);
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ aa_get_newest_label(onexec),
+- profile_transition(profile, bprm, buffer,
++ profile_transition(subj_cred, profile, bprm,
++ buffer,
+ cond, unsafe));
+
+ } else {
+ /* TODO: determine how much we want to loosen this */
+ error = fn_for_each_in_ns(label, profile,
+- profile_onexec(profile, onexec, stack, bprm,
++ profile_onexec(subj_cred, profile, onexec, stack, bprm,
+ buffer, cond, unsafe));
+ if (error)
+ return ERR_PTR(error);
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ aa_label_merge(&profile->label, onexec,
+ GFP_KERNEL),
+- profile_transition(profile, bprm, buffer,
++ profile_transition(subj_cred, profile, bprm,
++ buffer,
+ cond, unsafe));
+ }
+
+@@ -838,7 +852,8 @@ static struct aa_label *handle_onexec(struct aa_label *label,
+
+ /* TODO: get rid of GLOBAL_ROOT_UID */
+ error = fn_for_each_in_ns(label, profile,
+- aa_audit_file(profile, &nullperms, OP_CHANGE_ONEXEC,
++ aa_audit_file(subj_cred, profile, &nullperms,
++ OP_CHANGE_ONEXEC,
+ AA_MAY_ONEXEC, bprm->filename, NULL,
+ onexec, GLOBAL_ROOT_UID,
+ "failed to build target label", -ENOMEM));
+@@ -857,6 +872,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+ {
+ struct aa_task_ctx *ctx;
+ struct aa_label *label, *new = NULL;
++ const struct cred *subj_cred;
+ struct aa_profile *profile;
+ char *buffer = NULL;
+ const char *info = NULL;
+@@ -869,6 +885,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+ file_inode(bprm->file)->i_mode
+ };
+
++ subj_cred = current_cred();
+ ctx = task_ctx(current);
+ AA_BUG(!cred_label(bprm->cred));
+ AA_BUG(!ctx);
+@@ -895,11 +912,12 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+
+ /* Test for onexec first as onexec override other x transitions. */
+ if (ctx->onexec)
+- new = handle_onexec(label, ctx->onexec, ctx->token,
++ new = handle_onexec(subj_cred, label, ctx->onexec, ctx->token,
+ bprm, buffer, &cond, &unsafe);
+ else
+ new = fn_label_build(label, profile, GFP_KERNEL,
+- profile_transition(profile, bprm, buffer,
++ profile_transition(subj_cred, profile, bprm,
++ buffer,
+ &cond, &unsafe));
+
+ AA_BUG(!new);
+@@ -934,7 +952,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+
+ if (bprm->unsafe & (LSM_UNSAFE_PTRACE)) {
+ /* TODO: test needs to be profile of label to new */
+- error = may_change_ptraced_domain(new, &info);
++ error = may_change_ptraced_domain(bprm->cred, new, &info);
+ if (error)
+ goto audit;
+ }
+@@ -971,7 +989,8 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+
+ audit:
+ error = fn_for_each(label, profile,
+- aa_audit_file(profile, &nullperms, OP_EXEC, MAY_EXEC,
++ aa_audit_file(current_cred(), profile, &nullperms,
++ OP_EXEC, MAY_EXEC,
+ bprm->filename, NULL, new,
+ vfsuid_into_kuid(vfsuid), info, error));
+ aa_put_label(new);
+@@ -987,7 +1006,8 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
+ *
+ * Returns: label for hat transition OR ERR_PTR. Does NOT return NULL
+ */
+-static struct aa_label *build_change_hat(struct aa_profile *profile,
++static struct aa_label *build_change_hat(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const char *name, bool sibling)
+ {
+ struct aa_profile *root, *hat = NULL;
+@@ -1019,7 +1039,8 @@ static struct aa_label *build_change_hat(struct aa_profile *profile,
+ aa_put_profile(root);
+
+ audit:
+- aa_audit_file(profile, &nullperms, OP_CHANGE_HAT, AA_MAY_CHANGEHAT,
++ aa_audit_file(subj_cred, profile, &nullperms, OP_CHANGE_HAT,
++ AA_MAY_CHANGEHAT,
+ name, hat ? hat->base.hname : NULL,
+ hat ? &hat->label : NULL, GLOBAL_ROOT_UID, info,
+ error);
+@@ -1035,7 +1056,8 @@ static struct aa_label *build_change_hat(struct aa_profile *profile,
+ *
+ * Returns: label for hat transition or ERR_PTR. Does not return NULL
+ */
+-static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
++static struct aa_label *change_hat(const struct cred *subj_cred,
++ struct aa_label *label, const char *hats[],
+ int count, int flags)
+ {
+ struct aa_profile *profile, *root, *hat = NULL;
+@@ -1111,7 +1133,8 @@ static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
+ */
+ /* TODO: get rid of GLOBAL_ROOT_UID */
+ if (count > 1 || COMPLAIN_MODE(profile)) {
+- aa_audit_file(profile, &nullperms, OP_CHANGE_HAT,
++ aa_audit_file(subj_cred, profile, &nullperms,
++ OP_CHANGE_HAT,
+ AA_MAY_CHANGEHAT, name, NULL, NULL,
+ GLOBAL_ROOT_UID, info, error);
+ }
+@@ -1120,7 +1143,8 @@ static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
+
+ build:
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+- build_change_hat(profile, name, sibling),
++ build_change_hat(subj_cred, profile, name,
++ sibling),
+ aa_get_label(&profile->label));
+ if (!new) {
+ info = "label build failed";
+@@ -1150,7 +1174,7 @@ static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
+ */
+ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ {
+- const struct cred *cred;
++ const struct cred *subj_cred;
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct aa_label *label, *previous, *new = NULL, *target = NULL;
+ struct aa_profile *profile;
+@@ -1159,8 +1183,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ int error = 0;
+
+ /* released below */
+- cred = get_current_cred();
+- label = aa_get_newest_cred_label(cred);
++ subj_cred = get_current_cred();
++ label = aa_get_newest_cred_label(subj_cred);
+ previous = aa_get_newest_label(ctx->previous);
+
+ /*
+@@ -1180,7 +1204,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ }
+
+ if (count) {
+- new = change_hat(label, hats, count, flags);
++ new = change_hat(subj_cred, label, hats, count, flags);
+ AA_BUG(!new);
+ if (IS_ERR(new)) {
+ error = PTR_ERR(new);
+@@ -1189,7 +1213,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ goto out;
+ }
+
+- error = may_change_ptraced_domain(new, &info);
++ /* target cred is the same as current except new label */
++ error = may_change_ptraced_domain(subj_cred, new, &info);
+ if (error)
+ goto fail;
+
+@@ -1242,7 +1267,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+ aa_put_label(new);
+ aa_put_label(previous);
+ aa_put_label(label);
+- put_cred(cred);
++ put_cred(subj_cred);
+
+ return error;
+
+@@ -1252,7 +1277,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+
+ fail:
+ fn_for_each_in_ns(label, profile,
+- aa_audit_file(profile, &perms, OP_CHANGE_HAT,
++ aa_audit_file(subj_cred, profile, &perms, OP_CHANGE_HAT,
+ AA_MAY_CHANGEHAT, NULL, NULL, target,
+ GLOBAL_ROOT_UID, info, error));
+
+@@ -1261,6 +1286,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
+
+
+ static int change_profile_perms_wrapper(const char *op, const char *name,
++ const struct cred *subj_cred,
+ struct aa_profile *profile,
+ struct aa_label *target, bool stack,
+ u32 request, struct aa_perms *perms)
+@@ -1275,7 +1301,8 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
+ rules->file.start[AA_CLASS_FILE],
+ perms);
+ if (error)
+- error = aa_audit_file(profile, perms, op, request, name,
++ error = aa_audit_file(subj_cred, profile, perms, op, request,
++ name,
+ NULL, target, GLOBAL_ROOT_UID, info,
+ error);
+
+@@ -1304,6 +1331,7 @@ int aa_change_profile(const char *fqname, int flags)
+ const char *auditname = fqname; /* retain leading & if stack */
+ bool stack = flags & AA_CHANGE_STACK;
+ struct aa_task_ctx *ctx = task_ctx(current);
++ const struct cred *subj_cred = get_current_cred();
+ int error = 0;
+ char *op;
+ u32 request;
+@@ -1381,6 +1409,7 @@ int aa_change_profile(const char *fqname, int flags)
+ */
+ error = fn_for_each_in_ns(label, profile,
+ change_profile_perms_wrapper(op, auditname,
++ subj_cred,
+ profile, target, stack,
+ request, &perms));
+ if (error)
+@@ -1391,7 +1420,7 @@ int aa_change_profile(const char *fqname, int flags)
+
+ check:
+ /* check if tracing task is allowed to trace target domain */
+- error = may_change_ptraced_domain(target, &info);
++ error = may_change_ptraced_domain(subj_cred, target, &info);
+ if (error && !fn_for_each_in_ns(label, profile,
+ COMPLAIN_MODE(profile)))
+ goto audit;
+@@ -1451,7 +1480,8 @@ int aa_change_profile(const char *fqname, int flags)
+
+ audit:
+ error = fn_for_each_in_ns(label, profile,
+- aa_audit_file(profile, &perms, op, request, auditname,
++ aa_audit_file(subj_cred,
++ profile, &perms, op, request, auditname,
+ NULL, new ? new : target,
+ GLOBAL_ROOT_UID, info, error));
+
+@@ -1459,6 +1489,7 @@ int aa_change_profile(const char *fqname, int flags)
+ aa_put_label(new);
+ aa_put_label(target);
+ aa_put_label(label);
++ put_cred(subj_cred);
+
+ return error;
+ }
+diff --git a/security/apparmor/file.c b/security/apparmor/file.c
+index 698b124e649f6d..6fd21324a097f6 100644
+--- a/security/apparmor/file.c
++++ b/security/apparmor/file.c
+@@ -44,38 +44,40 @@ static u32 map_mask_to_chr_mask(u32 mask)
+ static void file_audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
+- kuid_t fsuid = current_fsuid();
++ struct apparmor_audit_data *ad = aad(sa);
++ kuid_t fsuid = ad->subj_cred ? ad->subj_cred->fsuid : current_fsuid();
+ char str[10];
+
+- if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
++ if (ad->request & AA_AUDIT_FILE_MASK) {
+ aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
+- map_mask_to_chr_mask(aad(sa)->request));
++ map_mask_to_chr_mask(ad->request));
+ audit_log_format(ab, " requested_mask=\"%s\"", str);
+ }
+- if (aad(sa)->denied & AA_AUDIT_FILE_MASK) {
++ if (ad->denied & AA_AUDIT_FILE_MASK) {
+ aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
+- map_mask_to_chr_mask(aad(sa)->denied));
++ map_mask_to_chr_mask(ad->denied));
+ audit_log_format(ab, " denied_mask=\"%s\"", str);
+ }
+- if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
++ if (ad->request & AA_AUDIT_FILE_MASK) {
+ audit_log_format(ab, " fsuid=%d",
+ from_kuid(&init_user_ns, fsuid));
+ audit_log_format(ab, " ouid=%d",
+- from_kuid(&init_user_ns, aad(sa)->fs.ouid));
++ from_kuid(&init_user_ns, ad->fs.ouid));
+ }
+
+- if (aad(sa)->peer) {
++ if (ad->peer) {
+ audit_log_format(ab, " target=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAG_VIEW_SUBNS, GFP_KERNEL);
+- } else if (aad(sa)->fs.target) {
++ } else if (ad->fs.target) {
+ audit_log_format(ab, " target=");
+- audit_log_untrustedstring(ab, aad(sa)->fs.target);
++ audit_log_untrustedstring(ab, ad->fs.target);
+ }
+ }
+
+ /**
+ * aa_audit_file - handle the auditing of file operations
++ * @subj_cred: cred of the subject
+ * @profile: the profile being enforced (NOT NULL)
+ * @perms: the permissions computed for the request (NOT NULL)
+ * @op: operation being mediated
+@@ -89,59 +91,74 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
+ *
+ * Returns: %0 or error on failure
+ */
+-int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
++int aa_audit_file(const struct cred *subj_cred,
++ struct aa_profile *profile, struct aa_perms *perms,
+ const char *op, u32 request, const char *name,
+ const char *target, struct aa_label *tlabel,
+ kuid_t ouid, const char *info, int error)
+ {
+ int type = AUDIT_APPARMOR_AUTO;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
+-
+- sa.u.tsk = NULL;
+- aad(&sa)->request = request;
+- aad(&sa)->name = name;
+- aad(&sa)->fs.target = target;
+- aad(&sa)->peer = tlabel;
+- aad(&sa)->fs.ouid = ouid;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
+- sa.u.tsk = NULL;
+-
+- if (likely(!aad(&sa)->error)) {
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
++
++ ad.subj_cred = subj_cred;
++ ad.request = request;
++ ad.name = name;
++ ad.fs.target = target;
++ ad.peer = tlabel;
++ ad.fs.ouid = ouid;
++ ad.info = info;
++ ad.error = error;
++ ad.common.u.tsk = NULL;
++
++ if (likely(!ad.error)) {
+ u32 mask = perms->audit;
+
+ if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
+ mask = 0xffff;
+
+ /* mask off perms that are not being force audited */
+- aad(&sa)->request &= mask;
++ ad.request &= mask;
+
+- if (likely(!aad(&sa)->request))
++ if (likely(!ad.request))
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+ } else {
+ /* only report permissions that were denied */
+- aad(&sa)->request = aad(&sa)->request & ~perms->allow;
+- AA_BUG(!aad(&sa)->request);
++ ad.request = ad.request & ~perms->allow;
++ AA_BUG(!ad.request);
+
+- if (aad(&sa)->request & perms->kill)
++ if (ad.request & perms->kill)
+ type = AUDIT_APPARMOR_KILL;
+
+ /* quiet known rejects, assumes quiet and kill do not overlap */
+- if ((aad(&sa)->request & perms->quiet) &&
++ if ((ad.request & perms->quiet) &&
+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+ AUDIT_MODE(profile) != AUDIT_ALL)
+- aad(&sa)->request &= ~perms->quiet;
++ ad.request &= ~perms->quiet;
+
+- if (!aad(&sa)->request)
+- return aad(&sa)->error;
++ if (!ad.request)
++ return ad.error;
+ }
+
+- aad(&sa)->denied = aad(&sa)->request & ~perms->allow;
+- return aa_audit(type, profile, &sa, file_audit_cb);
++ ad.denied = ad.request & ~perms->allow;
++ return aa_audit(type, profile, &ad, file_audit_cb);
+ }
+
+-static int path_name(const char *op, struct aa_label *label,
++/**
++ * is_deleted - test if a file has been completely unlinked
++ * @dentry: dentry of file to test for deletion (NOT NULL)
++ *
++ * Returns: true if deleted else false
++ */
++static inline bool is_deleted(struct dentry *dentry)
++{
++ if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
++ return true;
++ return false;
++}
++
++static int path_name(const char *op, const struct cred *subj_cred,
++ struct aa_label *label,
+ const struct path *path, int flags, char *buffer,
+ const char **name, struct path_cond *cond, u32 request)
+ {
+@@ -153,7 +170,8 @@ static int path_name(const char *op, struct aa_label *label,
+ labels_profile(label)->disconnected);
+ if (error) {
+ fn_for_each_confined(label, profile,
+- aa_audit_file(profile, &nullperms, op, request, *name,
++ aa_audit_file(subj_cred,
++ profile, &nullperms, op, request, *name,
+ NULL, NULL, cond->uid, info, error));
+ return error;
+ }
+@@ -207,9 +225,9 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
+ return state;
+ }
+
+-static int __aa_path_perm(const char *op, struct aa_profile *profile,
+- const char *name, u32 request,
+- struct path_cond *cond, int flags,
++static int __aa_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_profile *profile, const char *name,
++ u32 request, struct path_cond *cond, int flags,
+ struct aa_perms *perms)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+@@ -222,12 +240,14 @@ static int __aa_path_perm(const char *op, struct aa_profile *profile,
+ name, cond, perms);
+ if (request & ~perms->allow)
+ e = -EACCES;
+- return aa_audit_file(profile, perms, op, request, name, NULL, NULL,
++ return aa_audit_file(subj_cred,
++ profile, perms, op, request, name, NULL, NULL,
+ cond->uid, NULL, e);
+ }
+
+
+-static int profile_path_perm(const char *op, struct aa_profile *profile,
++static int profile_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct path *path, char *buffer, u32 request,
+ struct path_cond *cond, int flags,
+ struct aa_perms *perms)
+@@ -238,18 +258,19 @@ static int profile_path_perm(const char *op, struct aa_profile *profile,
+ if (profile_unconfined(profile))
+ return 0;
+
+- error = path_name(op, &profile->label, path,
++ error = path_name(op, subj_cred, &profile->label, path,
+ flags | profile->path_flags, buffer, &name, cond,
+ request);
+ if (error)
+ return error;
+- return __aa_path_perm(op, profile, name, request, cond, flags,
+- perms);
++ return __aa_path_perm(op, subj_cred, profile, name, request, cond,
++ flags, perms);
+ }
+
+ /**
+ * aa_path_perm - do permissions check & audit for @path
+ * @op: operation being checked
++ * @subj_cred: subject cred
+ * @label: profile being enforced (NOT NULL)
+ * @path: path to check permissions of (NOT NULL)
+ * @flags: any additional path flags beyond what the profile specifies
+@@ -258,7 +279,8 @@ static int profile_path_perm(const char *op, struct aa_profile *profile,
+ *
+ * Returns: %0 else error if access denied or other error
+ */
+-int aa_path_perm(const char *op, struct aa_label *label,
++int aa_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label,
+ const struct path *path, int flags, u32 request,
+ struct path_cond *cond)
+ {
+@@ -273,8 +295,8 @@ int aa_path_perm(const char *op, struct aa_label *label,
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+- profile_path_perm(op, profile, path, buffer, request,
+- cond, flags, &perms));
++ profile_path_perm(op, subj_cred, profile, path, buffer,
++ request, cond, flags, &perms));
+
+ aa_put_buffer(buffer);
+
+@@ -301,7 +323,8 @@ static inline bool xindex_is_subset(u32 link, u32 target)
+ return true;
+ }
+
+-static int profile_path_link(struct aa_profile *profile,
++static int profile_path_link(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct path *link, char *buffer,
+ const struct path *target, char *buffer2,
+ struct path_cond *cond)
+@@ -315,13 +338,15 @@ static int profile_path_link(struct aa_profile *profile,
+ aa_state_t state;
+ int error;
+
+- error = path_name(OP_LINK, &profile->label, link, profile->path_flags,
++ error = path_name(OP_LINK, subj_cred, &profile->label, link,
++ profile->path_flags,
+ buffer, &lname, cond, AA_MAY_LINK);
+ if (error)
+ goto audit;
+
+ /* buffer2 freed below, tname is pointer in buffer2 */
+- error = path_name(OP_LINK, &profile->label, target, profile->path_flags,
++ error = path_name(OP_LINK, subj_cred, &profile->label, target,
++ profile->path_flags,
+ buffer2, &tname, cond, AA_MAY_LINK);
+ if (error)
+ goto audit;
+@@ -381,12 +406,14 @@ static int profile_path_link(struct aa_profile *profile,
+ error = 0;
+
+ audit:
+- return aa_audit_file(profile, &lperms, OP_LINK, request, lname, tname,
++ return aa_audit_file(subj_cred,
++ profile, &lperms, OP_LINK, request, lname, tname,
+ NULL, cond->uid, info, error);
+ }
+
+ /**
+ * aa_path_link - Handle hard link permission check
++ * @subj_cred: subject cred
+ * @label: the label being enforced (NOT NULL)
+ * @old_dentry: the target dentry (NOT NULL)
+ * @new_dir: directory the new link will be created in (NOT NULL)
+@@ -403,7 +430,8 @@ static int profile_path_link(struct aa_profile *profile,
+ *
+ * Returns: %0 if allowed else error
+ */
+-int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
++int aa_path_link(const struct cred *subj_cred,
++ struct aa_label *label, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry)
+ {
+ struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
+@@ -424,8 +452,8 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
+ goto out;
+
+ error = fn_for_each_confined(label, profile,
+- profile_path_link(profile, &link, buffer, &target,
+- buffer2, &cond));
++ profile_path_link(subj_cred, profile, &link, buffer,
++ &target, buffer2, &cond));
+ out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(buffer2);
+@@ -453,7 +481,8 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
+ spin_unlock(&fctx->lock);
+ }
+
+-static int __file_path_perm(const char *op, struct aa_label *label,
++static int __file_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label,
+ struct aa_label *flabel, struct file *file,
+ u32 request, u32 denied, bool in_atomic)
+ {
+@@ -480,7 +509,8 @@ static int __file_path_perm(const char *op, struct aa_label *label,
+
+ /* check every profile in task label not in current cache */
+ error = fn_for_each_not_in_set(flabel, label, profile,
+- profile_path_perm(op, profile, &file->f_path, buffer,
++ profile_path_perm(op, subj_cred, profile,
++ &file->f_path, buffer,
+ request, &cond, flags, &perms));
+ if (denied && !error) {
+ /*
+@@ -493,12 +523,14 @@ static int __file_path_perm(const char *op, struct aa_label *label,
+ */
+ if (label == flabel)
+ error = fn_for_each(label, profile,
+- profile_path_perm(op, profile, &file->f_path,
++ profile_path_perm(op, subj_cred,
++ profile, &file->f_path,
+ buffer, request, &cond, flags,
+ &perms));
+ else
+ error = fn_for_each_not_in_set(label, flabel, profile,
+- profile_path_perm(op, profile, &file->f_path,
++ profile_path_perm(op, subj_cred,
++ profile, &file->f_path,
+ buffer, request, &cond, flags,
+ &perms));
+ }
+@@ -510,7 +542,8 @@ static int __file_path_perm(const char *op, struct aa_label *label,
+ return error;
+ }
+
+-static int __file_sock_perm(const char *op, struct aa_label *label,
++static int __file_sock_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label,
+ struct aa_label *flabel, struct file *file,
+ u32 request, u32 denied)
+ {
+@@ -524,11 +557,12 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
+ return 0;
+
+ /* TODO: improve to skip profiles cached in flabel */
+- error = aa_sock_file_perm(label, op, request, sock);
++ error = aa_sock_file_perm(subj_cred, label, op, request, sock);
+ if (denied) {
+ /* TODO: improve to skip profiles checked above */
+ /* check every profile in file label to is cached */
+- last_error(error, aa_sock_file_perm(flabel, op, request, sock));
++ last_error(error, aa_sock_file_perm(subj_cred, flabel, op,
++ request, sock));
+ }
+ if (!error)
+ update_file_ctx(file_ctx(file), label, request);
+@@ -539,6 +573,7 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
+ /**
+ * aa_file_perm - do permission revalidation check & audit for @file
+ * @op: operation being checked
++ * @subj_cred: subject cred
+ * @label: label being enforced (NOT NULL)
+ * @file: file to revalidate access permissions on (NOT NULL)
+ * @request: requested permissions
+@@ -546,7 +581,8 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
+ *
+ * Returns: %0 if access allowed else error
+ */
+-int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
++int aa_file_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label, struct file *file,
+ u32 request, bool in_atomic)
+ {
+ struct aa_file_ctx *fctx;
+@@ -582,19 +618,19 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
+ /* TODO: label cross check */
+
+ if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
+- error = __file_path_perm(op, label, flabel, file, request,
+- denied, in_atomic);
++ error = __file_path_perm(op, subj_cred, label, flabel, file,
++ request, denied, in_atomic);
+
+ else if (S_ISSOCK(file_inode(file)->i_mode))
+- error = __file_sock_perm(op, label, flabel, file, request,
+- denied);
++ error = __file_sock_perm(op, subj_cred, label, flabel, file,
++ request, denied);
+ aa_put_label(flabel);
+
+ done:
+ return error;
+ }
+
+-static void revalidate_tty(struct aa_label *label)
++static void revalidate_tty(const struct cred *subj_cred, struct aa_label *label)
+ {
+ struct tty_struct *tty;
+ int drop_tty = 0;
+@@ -612,8 +648,8 @@ static void revalidate_tty(struct aa_label *label)
+ struct tty_file_private, list);
+ file = file_priv->file;
+
+- if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
+- IN_ATOMIC))
++ if (aa_file_perm(OP_INHERIT, subj_cred, label, file,
++ MAY_READ | MAY_WRITE, IN_ATOMIC))
+ drop_tty = 1;
+ }
+ spin_unlock(&tty->files_lock);
+@@ -623,12 +659,17 @@ static void revalidate_tty(struct aa_label *label)
+ no_tty();
+ }
+
++struct cred_label {
++ const struct cred *cred;
++ struct aa_label *label;
++};
++
+ static int match_file(const void *p, struct file *file, unsigned int fd)
+ {
+- struct aa_label *label = (struct aa_label *)p;
++ struct cred_label *cl = (struct cred_label *)p;
+
+- if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
+- IN_ATOMIC))
++ if (aa_file_perm(OP_INHERIT, cl->cred, cl->label, file,
++ aa_map_file_to_perms(file), IN_ATOMIC))
+ return fd + 1;
+ return 0;
+ }
+@@ -638,13 +679,17 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
+ void aa_inherit_files(const struct cred *cred, struct files_struct *files)
+ {
+ struct aa_label *label = aa_get_newest_cred_label(cred);
++ struct cred_label cl = {
++ .cred = cred,
++ .label = label,
++ };
+ struct file *devnull = NULL;
+ unsigned int n;
+
+- revalidate_tty(label);
++ revalidate_tty(cred, label);
+
+ /* Revalidate access to inherited open files. */
+- n = iterate_fd(files, 0, match_file, label);
++ n = iterate_fd(files, 0, match_file, &cl);
+ if (!n) /* none found? */
+ goto out;
+
+@@ -654,7 +699,7 @@ void aa_inherit_files(const struct cred *cred, struct files_struct *files)
+ /* replace all the matching ones with this */
+ do {
+ replace_fd(n - 1, devnull, 0);
+- } while ((n = iterate_fd(files, n, match_file, label)) != 0);
++ } while ((n = iterate_fd(files, n, match_file, &cl)) != 0);
+ if (devnull)
+ fput(devnull);
+ out:
+diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
+index c328f07f11cd83..6e12ab5b30aed7 100644
+--- a/security/apparmor/include/audit.h
++++ b/security/apparmor/include/audit.h
+@@ -109,7 +109,8 @@ struct apparmor_audit_data {
+ int type;
+ u16 class;
+ const char *op;
+- struct aa_label *label;
++ const struct cred *subj_cred;
++ struct aa_label *subj_label;
+ const char *name;
+ const char *info;
+ u32 request;
+@@ -152,33 +153,35 @@ struct apparmor_audit_data {
+ unsigned long flags;
+ } mnt;
+ };
++
++ struct common_audit_data common;
+ };
+
+ /* macros for dealing with apparmor_audit_data structure */
+-#define aad(SA) ((SA)->apparmor_audit_data)
++#define aad(SA) (container_of(SA, struct apparmor_audit_data, common))
++#define aad_of_va(VA) aad((struct common_audit_data *)(VA))
++
+ #define DEFINE_AUDIT_DATA(NAME, T, C, X) \
+ /* TODO: cleanup audit init so we don't need _aad = {0,} */ \
+- struct apparmor_audit_data NAME ## _aad = { \
++ struct apparmor_audit_data NAME = { \
+ .class = (C), \
+ .op = (X), \
+- }; \
+- struct common_audit_data NAME = \
+- { \
+- .type = (T), \
+- .u.tsk = NULL, \
+- }; \
+- NAME.apparmor_audit_data = &(NAME ## _aad)
+-
+-void aa_audit_msg(int type, struct common_audit_data *sa,
++ .common.type = (T), \
++ .common.u.tsk = NULL, \
++ .common.apparmor_audit_data = &NAME, \
++ };
++
++void aa_audit_msg(int type, struct apparmor_audit_data *ad,
+ void (*cb) (struct audit_buffer *, void *));
+-int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
++int aa_audit(int type, struct aa_profile *profile,
++ struct apparmor_audit_data *ad,
+ void (*cb) (struct audit_buffer *, void *));
+
+-#define aa_audit_error(ERROR, SA, CB) \
++#define aa_audit_error(ERROR, AD, CB) \
+ ({ \
+- aad((SA))->error = (ERROR); \
+- aa_audit_msg(AUDIT_APPARMOR_ERROR, (SA), (CB)); \
+- aad((SA))->error; \
++ (AD)->error = (ERROR); \
++ aa_audit_msg(AUDIT_APPARMOR_ERROR, (AD), (CB)); \
++ (AD)->error; \
+ })
+
+
+@@ -190,7 +193,7 @@ static inline int complain_error(int error)
+ }
+
+ void aa_audit_rule_free(void *vrule);
+-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule);
++int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp);
+ int aa_audit_rule_known(struct audit_krule *rule);
+ int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule);
+
+diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
+index d420e2d10b31bc..d6dcc604ec0cc2 100644
+--- a/security/apparmor/include/capability.h
++++ b/security/apparmor/include/capability.h
+@@ -36,7 +36,8 @@ struct aa_caps {
+
+ extern struct aa_sfs_entry aa_sfs_entry_caps[];
+
+-int aa_capable(struct aa_label *label, int cap, unsigned int opts);
++int aa_capable(const struct cred *subj_cred, struct aa_label *label,
++ int cap, unsigned int opts);
+
+ static inline void aa_free_cap_rules(struct aa_caps *caps)
+ {
+diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
+index 5be620af33ba0a..64dc6d1a7a05c0 100644
+--- a/security/apparmor/include/file.h
++++ b/security/apparmor/include/file.h
+@@ -108,7 +108,8 @@ struct path_cond {
+
+ #define COMBINED_PERM_MASK(X) ((X).allow | (X).audit | (X).quiet | (X).kill)
+
+-int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
++int aa_audit_file(const struct cred *cred,
++ struct aa_profile *profile, struct aa_perms *perms,
+ const char *op, u32 request, const char *name,
+ const char *target, struct aa_label *tlabel, kuid_t ouid,
+ const char *info, int error);
+@@ -119,14 +120,16 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
+ const char *name, struct path_cond *cond,
+ struct aa_perms *perms);
+
+-int aa_path_perm(const char *op, struct aa_label *label,
+- const struct path *path, int flags, u32 request,
+- struct path_cond *cond);
++int aa_path_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
++ int flags, u32 request, struct path_cond *cond);
+
+-int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
+- const struct path *new_dir, struct dentry *new_dentry);
++int aa_path_link(const struct cred *subj_cred, struct aa_label *label,
++ struct dentry *old_dentry, const struct path *new_dir,
++ struct dentry *new_dentry);
+
+-int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
++int aa_file_perm(const char *op, const struct cred *subj_cred,
++ struct aa_label *label, struct file *file,
+ u32 request, bool in_atomic);
+
+ void aa_inherit_files(const struct cred *cred, struct files_struct *files);
+diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
+index a1ac6ffb95e9c0..74d17052f76bcd 100644
+--- a/security/apparmor/include/ipc.h
++++ b/security/apparmor/include/ipc.h
+@@ -13,6 +13,8 @@
+
+ #include <linux/sched.h>
+
+-int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig);
++int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
++ const struct cred *target_cred, struct aa_label *target,
++ int sig);
+
+ #endif /* __AA_IPC_H */
+diff --git a/security/apparmor/include/mount.h b/security/apparmor/include/mount.h
+index a710683b249651..46834f8281794d 100644
+--- a/security/apparmor/include/mount.h
++++ b/security/apparmor/include/mount.h
+@@ -25,26 +25,36 @@
+
+ #define AA_MS_IGNORE_MASK (MS_KERNMOUNT | MS_NOSEC | MS_ACTIVE | MS_BORN)
+
+-int aa_remount(struct aa_label *label, const struct path *path,
++int aa_remount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ unsigned long flags, void *data);
+
+-int aa_bind_mount(struct aa_label *label, const struct path *path,
++int aa_bind_mount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ const char *old_name, unsigned long flags);
+
+
+-int aa_mount_change_type(struct aa_label *label, const struct path *path,
++int aa_mount_change_type(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ unsigned long flags);
+
+-int aa_move_mount(struct aa_label *label, const struct path *path,
+- const char *old_name);
++int aa_move_mount_old(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
++ const char *old_name);
++int aa_move_mount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *from_path,
++ const struct path *to_path);
+
+-int aa_new_mount(struct aa_label *label, const char *dev_name,
++int aa_new_mount(const struct cred *subj_cred,
++ struct aa_label *label, const char *dev_name,
+ const struct path *path, const char *type, unsigned long flags,
+ void *data);
+
+-int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags);
++int aa_umount(const struct cred *subj_cred,
++ struct aa_label *label, struct vfsmount *mnt, int flags);
+
+-int aa_pivotroot(struct aa_label *label, const struct path *old_path,
++int aa_pivotroot(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *old_path,
+ const struct path *new_path);
+
+ #endif /* __AA_MOUNT_H */
+diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
+index 6fa440b5daed8d..aa8515af677f0e 100644
+--- a/security/apparmor/include/net.h
++++ b/security/apparmor/include/net.h
+@@ -61,9 +61,9 @@ struct aa_sk_ctx {
+ LSM_AUDIT_DATA_NONE, \
+ AA_CLASS_NET, \
+ OP); \
+- NAME.u.net = &(NAME ## _net); \
+- aad(&NAME)->net.type = (T); \
+- aad(&NAME)->net.protocol = (P)
++ NAME.common.u.net = &(NAME ## _net); \
++ NAME.net.type = (T); \
++ NAME.net.protocol = (P)
+
+ #define DEFINE_AUDIT_SK(NAME, OP, SK) \
+ DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
+@@ -90,21 +90,24 @@ struct aa_secmark {
+ extern struct aa_sfs_entry aa_sfs_entry_network[];
+
+ void audit_net_cb(struct audit_buffer *ab, void *va);
+-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
++int aa_profile_af_perm(struct aa_profile *profile,
++ struct apparmor_audit_data *ad,
+ u32 request, u16 family, int type);
+-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
++int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
++ const char *op, u32 request, u16 family,
+ int type, int protocol);
+ static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
+- struct common_audit_data *sa,
++ struct apparmor_audit_data *ad,
+ u32 request,
+ struct sock *sk)
+ {
+- return aa_profile_af_perm(profile, sa, request, sk->sk_family,
++ return aa_profile_af_perm(profile, ad, request, sk->sk_family,
+ sk->sk_type);
+ }
+ int aa_sk_perm(const char *op, u32 request, struct sock *sk);
+
+-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
++int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
++ const char *op, u32 request,
+ struct socket *sock);
+
+ int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
+index 797a7a00644d21..83534df8939fdd 100644
+--- a/security/apparmor/include/perms.h
++++ b/security/apparmor/include/perms.h
+@@ -212,8 +212,8 @@ void aa_profile_match_label(struct aa_profile *profile,
+ int type, u32 request, struct aa_perms *perms);
+ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ u32 request, int type, u32 *deny,
+- struct common_audit_data *sa);
++ struct apparmor_audit_data *ad);
+ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+- u32 request, struct common_audit_data *sa,
++ u32 request, struct apparmor_audit_data *ad,
+ void (*cb)(struct audit_buffer *, void *));
+ #endif /* __AA_PERM_H */
+diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
+index 545f791cabdae2..fa15a5c7febb89 100644
+--- a/security/apparmor/include/policy.h
++++ b/security/apparmor/include/policy.h
+@@ -370,9 +370,12 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
+ return profile->audit;
+ }
+
+-bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns);
+-bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns);
+-int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns,
++bool aa_policy_view_capable(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns);
++bool aa_policy_admin_capable(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns);
++int aa_may_manage_policy(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns,
+ u32 mask);
+ bool aa_current_policy_view_capable(struct aa_ns *ns);
+ bool aa_current_policy_admin_capable(struct aa_ns *ns);
+diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
+index 961d85d328ea94..ad2c0da8e64fc1 100644
+--- a/security/apparmor/include/resource.h
++++ b/security/apparmor/include/resource.h
+@@ -33,7 +33,8 @@ struct aa_rlimit {
+ extern struct aa_sfs_entry aa_sfs_entry_rlimit[];
+
+ int aa_map_resource(int resource);
+-int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
++int aa_task_setrlimit(const struct cred *subj_cred, struct aa_label *label,
++ struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim);
+
+ void __aa_transition_rlimits(struct aa_label *old, struct aa_label *new);
+diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
+index 13437d62c70f46..29ba55107b7d6e 100644
+--- a/security/apparmor/include/task.h
++++ b/security/apparmor/include/task.h
+@@ -91,7 +91,8 @@ static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
+ "segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \
+ "xcpu xfsz vtalrm prof winch io pwr sys emt lost"
+
+-int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
++int aa_may_ptrace(const struct cred *tracer_cred, struct aa_label *tracer,
++ const struct cred *tracee_cred, struct aa_label *tracee,
+ u32 request);
+
+
+diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
+index 5acde746775f76..c0d0dbd7b4c4b3 100644
+--- a/security/apparmor/ipc.c
++++ b/security/apparmor/ipc.c
+@@ -52,31 +52,33 @@ static const char *audit_signal_mask(u32 mask)
+ static void audit_signal_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->request & AA_SIGNAL_PERM_MASK) {
++ if (ad->request & AA_SIGNAL_PERM_MASK) {
+ audit_log_format(ab, " requested_mask=\"%s\"",
+- audit_signal_mask(aad(sa)->request));
+- if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) {
++ audit_signal_mask(ad->request));
++ if (ad->denied & AA_SIGNAL_PERM_MASK) {
+ audit_log_format(ab, " denied_mask=\"%s\"",
+- audit_signal_mask(aad(sa)->denied));
++ audit_signal_mask(ad->denied));
+ }
+ }
+- if (aad(sa)->signal == SIGUNKNOWN)
++ if (ad->signal == SIGUNKNOWN)
+ audit_log_format(ab, "signal=unknown(%d)",
+- aad(sa)->unmappedsig);
+- else if (aad(sa)->signal < MAXMAPPED_SIGNAME)
+- audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
++ ad->unmappedsig);
++ else if (ad->signal < MAXMAPPED_SIGNAME)
++ audit_log_format(ab, " signal=%s", sig_names[ad->signal]);
+ else
+ audit_log_format(ab, " signal=rtmin+%d",
+- aad(sa)->signal - SIGRT_BASE);
++ ad->signal - SIGRT_BASE);
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+
+-static int profile_signal_perm(struct aa_profile *profile,
++static int profile_signal_perm(const struct cred *cred,
++ struct aa_profile *profile,
+ struct aa_label *peer, u32 request,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+@@ -87,24 +89,29 @@ static int profile_signal_perm(struct aa_profile *profile,
+ !ANY_RULE_MEDIATES(&profile->rules, AA_CLASS_SIGNAL))
+ return 0;
+
+- aad(sa)->peer = peer;
++ ad->subj_cred = cred;
++ ad->peer = peer;
+ /* TODO: secondary cache check <profile, profile, perm> */
+ state = aa_dfa_next(rules->policy.dfa,
+ rules->policy.start[AA_CLASS_SIGNAL],
+- aad(sa)->signal);
++ ad->signal);
+ aa_label_match(profile, rules, peer, state, false, request, &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+- return aa_check_perms(profile, &perms, request, sa, audit_signal_cb);
++ return aa_check_perms(profile, &perms, request, ad, audit_signal_cb);
+ }
+
+-int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig)
++int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
++ const struct cred *target_cred, struct aa_label *target,
++ int sig)
+ {
+ struct aa_profile *profile;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
+
+- aad(&sa)->signal = map_signal_num(sig);
+- aad(&sa)->unmappedsig = sig;
++ ad.signal = map_signal_num(sig);
++ ad.unmappedsig = sig;
+ return xcheck_labels(sender, target, profile,
+- profile_signal_perm(profile, target, MAY_WRITE, &sa),
+- profile_signal_perm(profile, sender, MAY_READ, &sa));
++ profile_signal_perm(subj_cred, profile, target,
++ MAY_WRITE, &ad),
++ profile_signal_perm(target_cred, profile, sender,
++ MAY_READ, &ad));
+ }
+diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
+index a630c951bb3b80..7182a8b821fbdb 100644
+--- a/security/apparmor/lib.c
++++ b/security/apparmor/lib.c
+@@ -27,7 +27,7 @@ struct aa_perms allperms = { .allow = ALL_PERMS_MASK,
+
+ /**
+ * aa_free_str_table - free entries str table
+- * @str: the string table to free (MAYBE NULL)
++ * @t: the string table to free (MAYBE NULL)
+ */
+ void aa_free_str_table(struct aa_str_table *t)
+ {
+@@ -41,6 +41,7 @@ void aa_free_str_table(struct aa_str_table *t)
+ kfree_sensitive(t->table[i]);
+ kfree_sensitive(t->table);
+ t->table = NULL;
++ t->size = 0;
+ }
+ }
+
+@@ -85,6 +86,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
+ /**
+ * skipn_spaces - Removes leading whitespace from @str.
+ * @str: The string to be stripped.
++ * @n: length of str to parse, will stop at \0 if encountered before n
+ *
+ * Returns a pointer to the first non-whitespace character in @str.
+ * if all whitespace will return NULL
+@@ -143,10 +145,10 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
+ void aa_info_message(const char *str)
+ {
+ if (audit_enabled) {
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
+
+- aad(&sa)->info = str;
+- aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
++ ad.info = str;
++ aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, NULL);
+ }
+ printk(KERN_INFO "AppArmor: %s\n", str);
+ }
+@@ -281,21 +283,22 @@ void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
+ static void aa_audit_perms_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->request) {
++ if (ad->request) {
+ audit_log_format(ab, " requested_mask=");
+- aa_audit_perm_mask(ab, aad(sa)->request, aa_file_perm_chrs,
++ aa_audit_perm_mask(ab, ad->request, aa_file_perm_chrs,
+ PERMS_CHRS_MASK, aa_file_perm_names,
+ PERMS_NAMES_MASK);
+ }
+- if (aad(sa)->denied) {
++ if (ad->denied) {
+ audit_log_format(ab, "denied_mask=");
+- aa_audit_perm_mask(ab, aad(sa)->denied, aa_file_perm_chrs,
++ aa_audit_perm_mask(ab, ad->denied, aa_file_perm_chrs,
+ PERMS_CHRS_MASK, aa_file_perm_names,
+ PERMS_NAMES_MASK);
+ }
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+
+@@ -349,21 +352,20 @@ void aa_profile_match_label(struct aa_profile *profile,
+ /* currently unused */
+ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ u32 request, int type, u32 *deny,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+ struct aa_perms perms;
+
+- aad(sa)->label = &profile->label;
+- aad(sa)->peer = &target->label;
+- aad(sa)->request = request;
++ ad->peer = &target->label;
++ ad->request = request;
+
+ aa_profile_match_label(profile, rules, &target->label, type, request,
+ &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+ *deny |= request & perms.deny;
+- return aa_check_perms(profile, &perms, request, sa, aa_audit_perms_cb);
++ return aa_check_perms(profile, &perms, request, ad, aa_audit_perms_cb);
+ }
+
+ /**
+@@ -371,8 +373,7 @@ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ * @profile: profile being checked
+ * @perms: perms computed for the request
+ * @request: requested perms
+- * @deny: Returns: explicit deny set
+- * @sa: initialized audit structure (MAY BE NULL if not auditing)
++ * @ad: initialized audit structure (MAY BE NULL if not auditing)
+ * @cb: callback fn for type specific fields (MAY BE NULL)
+ *
+ * Returns: 0 if permission else error code
+@@ -385,7 +386,7 @@ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+ * with a positive value.
+ */
+ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+- u32 request, struct common_audit_data *sa,
++ u32 request, struct apparmor_audit_data *ad,
+ void (*cb)(struct audit_buffer *, void *))
+ {
+ int type, error;
+@@ -394,7 +395,7 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+ if (likely(!denied)) {
+ /* mask off perms that are not being force audited */
+ request &= perms->audit;
+- if (!request || !sa)
++ if (!request || !ad)
+ return 0;
+
+ type = AUDIT_APPARMOR_AUDIT;
+@@ -413,16 +414,16 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+ error = -ENOENT;
+
+ denied &= ~perms->quiet;
+- if (!sa || !denied)
++ if (!ad || !denied)
+ return error;
+ }
+
+- if (sa) {
+- aad(sa)->label = &profile->label;
+- aad(sa)->request = request;
+- aad(sa)->denied = denied;
+- aad(sa)->error = error;
+- aa_audit_msg(type, sa, cb);
++ if (ad) {
++ ad->subj_label = &profile->label;
++ ad->request = request;
++ ad->denied = denied;
++ ad->error = error;
++ aa_audit_msg(type, ad, cb);
+ }
+
+ if (type == AUDIT_APPARMOR_ALLOWED)
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index 108eccc5ada584..5303a51eff9c10 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -116,15 +116,17 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+ {
+ struct aa_label *tracer, *tracee;
++ const struct cred *cred;
+ int error;
+
++ cred = get_task_cred(child);
++ tracee = cred_label(cred); /* ref count on cred */
+ tracer = __begin_current_label_crit_section();
+- tracee = aa_get_task_label(child);
+- error = aa_may_ptrace(tracer, tracee,
++ error = aa_may_ptrace(current_cred(), tracer, cred, tracee,
+ (mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
+ : AA_PTRACE_TRACE);
+- aa_put_label(tracee);
+ __end_current_label_crit_section(tracer);
++ put_cred(cred);
+
+ return error;
+ }
+@@ -132,12 +134,15 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
+ static int apparmor_ptrace_traceme(struct task_struct *parent)
+ {
+ struct aa_label *tracer, *tracee;
++ const struct cred *cred;
+ int error;
+
+ tracee = __begin_current_label_crit_section();
+- tracer = aa_get_task_label(parent);
+- error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
+- aa_put_label(tracer);
++ cred = get_task_cred(parent);
++ tracer = cred_label(cred); /* ref count on cred */
++ error = aa_may_ptrace(cred, tracer, current_cred(), tracee,
++ AA_PTRACE_TRACE);
++ put_cred(cred);
+ __end_current_label_crit_section(tracee);
+
+ return error;
+@@ -188,7 +193,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
+
+ label = aa_get_newest_cred_label(cred);
+ if (!unconfined(label))
+- error = aa_capable(label, cap, opts);
++ error = aa_capable(cred, label, cap, opts);
+ aa_put_label(label);
+
+ return error;
+@@ -211,7 +216,8 @@ static int common_perm(const char *op, const struct path *path, u32 mask,
+
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label))
+- error = aa_path_perm(op, label, path, 0, mask, cond);
++ error = aa_path_perm(op, current_cred(), label, path, 0, mask,
++ cond);
+ __end_current_label_crit_section(label);
+
+ return error;
+@@ -357,7 +363,8 @@ static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_
+
+ label = begin_current_label_crit_section();
+ if (!unconfined(label))
+- error = aa_path_link(label, old_dentry, new_dir, new_dentry);
++ error = aa_path_link(current_cred(), label, old_dentry, new_dir,
++ new_dentry);
+ end_current_label_crit_section(label);
+
+ return error;
+@@ -396,23 +403,27 @@ static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_d
+ vfsuid = i_uid_into_vfsuid(idmap, d_backing_inode(old_dentry));
+ cond_exchange.uid = vfsuid_into_kuid(vfsuid);
+
+- error = aa_path_perm(OP_RENAME_SRC, label, &new_path, 0,
++ error = aa_path_perm(OP_RENAME_SRC, current_cred(),
++ label, &new_path, 0,
+ MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
+ AA_MAY_SETATTR | AA_MAY_DELETE,
+ &cond_exchange);
+ if (!error)
+- error = aa_path_perm(OP_RENAME_DEST, label, &old_path,
++ error = aa_path_perm(OP_RENAME_DEST, current_cred(),
++ label, &old_path,
+ 0, MAY_WRITE | AA_MAY_SETATTR |
+ AA_MAY_CREATE, &cond_exchange);
+ }
+
+ if (!error)
+- error = aa_path_perm(OP_RENAME_SRC, label, &old_path, 0,
++ error = aa_path_perm(OP_RENAME_SRC, current_cred(),
++ label, &old_path, 0,
+ MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
+ AA_MAY_SETATTR | AA_MAY_DELETE,
+ &cond);
+ if (!error)
+- error = aa_path_perm(OP_RENAME_DEST, label, &new_path,
++ error = aa_path_perm(OP_RENAME_DEST, current_cred(),
++ label, &new_path,
+ 0, MAY_WRITE | AA_MAY_SETATTR |
+ AA_MAY_CREATE, &cond);
+
+@@ -467,7 +478,8 @@ static int apparmor_file_open(struct file *file)
+ vfsuid = i_uid_into_vfsuid(idmap, inode);
+ cond.uid = vfsuid_into_kuid(vfsuid);
+
+- error = aa_path_perm(OP_OPEN, label, &file->f_path, 0,
++ error = aa_path_perm(OP_OPEN, file->f_cred,
++ label, &file->f_path, 0,
+ aa_map_file_to_perms(file), &cond);
+ /* todo cache full allowed permissions set and state */
+ fctx->allow = aa_map_file_to_perms(file);
+@@ -507,7 +519,7 @@ static int common_file_perm(const char *op, struct file *file, u32 mask,
+ return -EACCES;
+
+ label = __begin_current_label_crit_section();
+- error = aa_file_perm(op, label, file, mask, in_atomic);
++ error = aa_file_perm(op, current_cred(), label, file, mask, in_atomic);
+ __end_current_label_crit_section(label);
+
+ return error;
+@@ -585,23 +597,42 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path,
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label)) {
+ if (flags & MS_REMOUNT)
+- error = aa_remount(label, path, flags, data);
++ error = aa_remount(current_cred(), label, path, flags,
++ data);
+ else if (flags & MS_BIND)
+- error = aa_bind_mount(label, path, dev_name, flags);
++ error = aa_bind_mount(current_cred(), label, path,
++ dev_name, flags);
+ else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE |
+ MS_UNBINDABLE))
+- error = aa_mount_change_type(label, path, flags);
++ error = aa_mount_change_type(current_cred(), label,
++ path, flags);
+ else if (flags & MS_MOVE)
+- error = aa_move_mount(label, path, dev_name);
++ error = aa_move_mount_old(current_cred(), label, path,
++ dev_name);
+ else
+- error = aa_new_mount(label, dev_name, path, type,
+- flags, data);
++ error = aa_new_mount(current_cred(), label, dev_name,
++ path, type, flags, data);
+ }
+ __end_current_label_crit_section(label);
+
+ return error;
+ }
+
++static int apparmor_move_mount(const struct path *from_path,
++ const struct path *to_path)
++{
++ struct aa_label *label;
++ int error = 0;
++
++ label = __begin_current_label_crit_section();
++ if (!unconfined(label))
++ error = aa_move_mount(current_cred(), label, from_path,
++ to_path);
++ __end_current_label_crit_section(label);
++
++ return error;
++}
++
+ static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
+ {
+ struct aa_label *label;
+@@ -609,7 +640,7 @@ static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
+
+ label = __begin_current_label_crit_section();
+ if (!unconfined(label))
+- error = aa_umount(label, mnt, flags);
++ error = aa_umount(current_cred(), label, mnt, flags);
+ __end_current_label_crit_section(label);
+
+ return error;
+@@ -623,7 +654,7 @@ static int apparmor_sb_pivotroot(const struct path *old_path,
+
+ label = aa_get_current_label();
+ if (!unconfined(label))
+- error = aa_pivotroot(label, old_path, new_path);
++ error = aa_pivotroot(current_cred(), label, old_path, new_path);
+ aa_put_label(label);
+
+ return error;
+@@ -662,7 +693,7 @@ static int apparmor_setprocattr(const char *name, void *value,
+ char *command, *largs = NULL, *args = value;
+ size_t arg_size;
+ int error;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
+ OP_SETPROCATTR);
+
+ if (size == 0)
+@@ -722,11 +753,11 @@ static int apparmor_setprocattr(const char *name, void *value,
+ return error;
+
+ fail:
+- aad(&sa)->label = begin_current_label_crit_section();
+- aad(&sa)->info = name;
+- aad(&sa)->error = error = -EINVAL;
+- aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
+- end_current_label_crit_section(aad(&sa)->label);
++ ad.subj_label = begin_current_label_crit_section();
++ ad.info = name;
++ ad.error = error = -EINVAL;
++ aa_audit_msg(AUDIT_APPARMOR_DENIED, &ad, NULL);
++ end_current_label_crit_section(ad.subj_label);
+ goto out;
+ }
+
+@@ -785,7 +816,8 @@ static int apparmor_task_setrlimit(struct task_struct *task,
+ int error = 0;
+
+ if (!unconfined(label))
+- error = aa_task_setrlimit(label, task, resource, new_rlim);
++ error = aa_task_setrlimit(current_cred(), label, task,
++ resource, new_rlim);
+ __end_current_label_crit_section(label);
+
+ return error;
+@@ -794,26 +826,26 @@ static int apparmor_task_setrlimit(struct task_struct *task,
+ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+ {
++ const struct cred *tc;
+ struct aa_label *cl, *tl;
+ int error;
+
++ tc = get_task_cred(target);
++ tl = aa_get_newest_cred_label(tc);
+ if (cred) {
+ /*
+ * Dealing with USB IO specific behavior
+ */
+ cl = aa_get_newest_cred_label(cred);
+- tl = aa_get_task_label(target);
+- error = aa_may_signal(cl, tl, sig);
++ error = aa_may_signal(cred, cl, tc, tl, sig);
+ aa_put_label(cl);
+- aa_put_label(tl);
+- return error;
++ } else {
++ cl = __begin_current_label_crit_section();
++ error = aa_may_signal(current_cred(), cl, tc, tl, sig);
++ __end_current_label_crit_section(cl);
+ }
+-
+- cl = __begin_current_label_crit_section();
+- tl = aa_get_task_label(target);
+- error = aa_may_signal(cl, tl, sig);
+ aa_put_label(tl);
+- __end_current_label_crit_section(cl);
++ put_cred(tc);
+
+ return error;
+ }
+@@ -879,7 +911,8 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern)
+ if (!(kern || unconfined(label)))
+ error = af_select(family,
+ create_perm(label, family, type, protocol),
+- aa_af_perm(label, OP_CREATE, AA_MAY_CREATE,
++ aa_af_perm(current_cred(), label,
++ OP_CREATE, AA_MAY_CREATE,
+ family, type, protocol));
+ end_current_label_crit_section(label);
+
+@@ -1097,6 +1130,13 @@ static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ if (!skb->secmark)
+ return 0;
+
++ /*
++ * If reach here before socket_post_create hook is called, in which
++ * case label is null, drop the packet.
++ */
++ if (!ctx->label)
++ return -EACCES;
++
+ return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE,
+ skb->secmark, sk);
+ }
+@@ -1221,6 +1261,7 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(capget, apparmor_capget),
+ LSM_HOOK_INIT(capable, apparmor_capable),
+
++ LSM_HOOK_INIT(move_mount, apparmor_move_mount),
+ LSM_HOOK_INIT(sb_mount, apparmor_sb_mount),
+ LSM_HOOK_INIT(sb_umount, apparmor_sb_umount),
+ LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot),
+diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
+index cdfa430ae2161f..cb0fdbdb82d944 100644
+--- a/security/apparmor/mount.c
++++ b/security/apparmor/mount.c
+@@ -86,32 +86,34 @@ static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags)
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->mnt.type) {
++ if (ad->mnt.type) {
+ audit_log_format(ab, " fstype=");
+- audit_log_untrustedstring(ab, aad(sa)->mnt.type);
++ audit_log_untrustedstring(ab, ad->mnt.type);
+ }
+- if (aad(sa)->mnt.src_name) {
++ if (ad->mnt.src_name) {
+ audit_log_format(ab, " srcname=");
+- audit_log_untrustedstring(ab, aad(sa)->mnt.src_name);
++ audit_log_untrustedstring(ab, ad->mnt.src_name);
+ }
+- if (aad(sa)->mnt.trans) {
++ if (ad->mnt.trans) {
+ audit_log_format(ab, " trans=");
+- audit_log_untrustedstring(ab, aad(sa)->mnt.trans);
++ audit_log_untrustedstring(ab, ad->mnt.trans);
+ }
+- if (aad(sa)->mnt.flags) {
++ if (ad->mnt.flags) {
+ audit_log_format(ab, " flags=\"");
+- audit_mnt_flags(ab, aad(sa)->mnt.flags);
++ audit_mnt_flags(ab, ad->mnt.flags);
+ audit_log_format(ab, "\"");
+ }
+- if (aad(sa)->mnt.data) {
++ if (ad->mnt.data) {
+ audit_log_format(ab, " options=");
+- audit_log_untrustedstring(ab, aad(sa)->mnt.data);
++ audit_log_untrustedstring(ab, ad->mnt.data);
+ }
+ }
+
+ /**
+ * audit_mount - handle the auditing of mount operations
++ * @subj_cred: cred of the subject
+ * @profile: the profile being enforced (NOT NULL)
+ * @op: operation being mediated (NOT NULL)
+ * @name: name of object being mediated (MAYBE NULL)
+@@ -127,14 +129,15 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+ *
+ * Returns: %0 or error on failure
+ */
+-static int audit_mount(struct aa_profile *profile, const char *op,
++static int audit_mount(const struct cred *subj_cred,
++ struct aa_profile *profile, const char *op,
+ const char *name, const char *src_name,
+ const char *type, const char *trans,
+ unsigned long flags, const void *data, u32 request,
+ struct aa_perms *perms, const char *info, int error)
+ {
+ int audit_type = AUDIT_APPARMOR_AUTO;
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
+
+ if (likely(!error)) {
+ u32 mask = perms->audit;
+@@ -165,17 +168,18 @@ static int audit_mount(struct aa_profile *profile, const char *op,
+ return error;
+ }
+
+- aad(&sa)->name = name;
+- aad(&sa)->mnt.src_name = src_name;
+- aad(&sa)->mnt.type = type;
+- aad(&sa)->mnt.trans = trans;
+- aad(&sa)->mnt.flags = flags;
++ ad.subj_cred = subj_cred;
++ ad.name = name;
++ ad.mnt.src_name = src_name;
++ ad.mnt.type = type;
++ ad.mnt.trans = trans;
++ ad.mnt.flags = flags;
+ if (data && (perms->audit & AA_AUDIT_DATA))
+- aad(&sa)->mnt.data = data;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
++ ad.mnt.data = data;
++ ad.info = info;
++ ad.error = error;
+
+- return aa_audit(audit_type, profile, &sa, audit_cb);
++ return aa_audit(audit_type, profile, &ad, audit_cb);
+ }
+
+ /**
+@@ -283,6 +287,7 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
+
+ /**
+ * match_mnt_path_str - handle path matching for mount
++ * @subj_cred: cred of confined subject
+ * @profile: the confining profile
+ * @mntpath: for the mntpnt (NOT NULL)
+ * @buffer: buffer to be used to lookup mntpath
+@@ -295,7 +300,8 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
+ *
+ * Returns: 0 on success else error
+ */
+-static int match_mnt_path_str(struct aa_profile *profile,
++static int match_mnt_path_str(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct path *mntpath, char *buffer,
+ const char *devname, const char *type,
+ unsigned long flags, void *data, bool binary,
+@@ -336,12 +342,14 @@ static int match_mnt_path_str(struct aa_profile *profile,
+ error = 0;
+
+ audit:
+- return audit_mount(profile, OP_MOUNT, mntpnt, devname, type, NULL,
++ return audit_mount(subj_cred, profile, OP_MOUNT, mntpnt, devname,
++ type, NULL,
+ flags, data, AA_MAY_MOUNT, &perms, info, error);
+ }
+
+ /**
+ * match_mnt - handle path matching for mount
++ * @subj_cred: cred of the subject
+ * @profile: the confining profile
+ * @path: for the mntpnt (NOT NULL)
+ * @buffer: buffer to be used to lookup mntpath
+@@ -354,7 +362,8 @@ static int match_mnt_path_str(struct aa_profile *profile,
+ *
+ * Returns: 0 on success else error
+ */
+-static int match_mnt(struct aa_profile *profile, const struct path *path,
++static int match_mnt(const struct cred *subj_cred,
++ struct aa_profile *profile, const struct path *path,
+ char *buffer, const struct path *devpath, char *devbuffer,
+ const char *type, unsigned long flags, void *data,
+ bool binary)
+@@ -378,11 +387,12 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
+ devname = ERR_PTR(error);
+ }
+
+- return match_mnt_path_str(profile, path, buffer, devname, type, flags,
+- data, binary, info);
++ return match_mnt_path_str(subj_cred, profile, path, buffer, devname,
++ type, flags, data, binary, info);
+ }
+
+-int aa_remount(struct aa_label *label, const struct path *path,
++int aa_remount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ unsigned long flags, void *data)
+ {
+ struct aa_profile *profile;
+@@ -399,14 +409,16 @@ int aa_remount(struct aa_label *label, const struct path *path,
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, NULL, NULL, NULL,
++ match_mnt(subj_cred, profile, path, buffer, NULL,
++ NULL, NULL,
+ flags, data, binary));
+ aa_put_buffer(buffer);
+
+ return error;
+ }
+
+-int aa_bind_mount(struct aa_label *label, const struct path *path,
++int aa_bind_mount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ const char *dev_name, unsigned long flags)
+ {
+ struct aa_profile *profile;
+@@ -433,8 +445,8 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
+ goto out;
+
+ error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, &old_path, old_buffer,
+- NULL, flags, NULL, false));
++ match_mnt(subj_cred, profile, path, buffer, &old_path,
++ old_buffer, NULL, flags, NULL, false));
+ out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(old_buffer);
+@@ -443,7 +455,8 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
+ return error;
+ }
+
+-int aa_mount_change_type(struct aa_label *label, const struct path *path,
++int aa_mount_change_type(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *path,
+ unsigned long flags)
+ {
+ struct aa_profile *profile;
+@@ -461,50 +474,67 @@ int aa_mount_change_type(struct aa_label *label, const struct path *path,
+ if (!buffer)
+ return -ENOMEM;
+ error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, NULL, NULL, NULL,
++ match_mnt(subj_cred, profile, path, buffer, NULL,
++ NULL, NULL,
+ flags, NULL, false));
+ aa_put_buffer(buffer);
+
+ return error;
+ }
+
+-int aa_move_mount(struct aa_label *label, const struct path *path,
+- const char *orig_name)
++int aa_move_mount(const struct cred *subj_cred,
++ struct aa_label *label, const struct path *from_path,
++ const struct path *to_path)
+ {
+ struct aa_profile *profile;
+- char *buffer = NULL, *old_buffer = NULL;
+- struct path old_path;
++ char *to_buffer = NULL, *from_buffer = NULL;
+ int error;
+
+ AA_BUG(!label);
+- AA_BUG(!path);
++ AA_BUG(!from_path);
++ AA_BUG(!to_path);
++
++ to_buffer = aa_get_buffer(false);
++ from_buffer = aa_get_buffer(false);
++ error = -ENOMEM;
++ if (!to_buffer || !from_buffer)
++ goto out;
++
++ if (!our_mnt(from_path->mnt))
++ /* moving a mount detached from the namespace */
++ from_path = NULL;
++ error = fn_for_each_confined(label, profile,
++ match_mnt(subj_cred, profile, to_path, to_buffer,
++ from_path, from_buffer,
++ NULL, MS_MOVE, NULL, false));
++out:
++ aa_put_buffer(to_buffer);
++ aa_put_buffer(from_buffer);
++
++ return error;
++}
++
++int aa_move_mount_old(const struct cred *subj_cred, struct aa_label *label,
++ const struct path *path, const char *orig_name)
++{
++ struct path old_path;
++ int error;
+
+ if (!orig_name || !*orig_name)
+ return -EINVAL;
+-
+ error = kern_path(orig_name, LOOKUP_FOLLOW, &old_path);
+ if (error)
+ return error;
+
+- buffer = aa_get_buffer(false);
+- old_buffer = aa_get_buffer(false);
+- error = -ENOMEM;
+- if (!buffer || !old_buffer)
+- goto out;
+- error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, &old_path, old_buffer,
+- NULL, MS_MOVE, NULL, false));
+-out:
+- aa_put_buffer(buffer);
+- aa_put_buffer(old_buffer);
++ error = aa_move_mount(subj_cred, label, &old_path, path);
+ path_put(&old_path);
+
+ return error;
+ }
+
+-int aa_new_mount(struct aa_label *label, const char *dev_name,
+- const struct path *path, const char *type, unsigned long flags,
+- void *data)
++int aa_new_mount(const struct cred *subj_cred, struct aa_label *label,
++ const char *dev_name, const struct path *path,
++ const char *type, unsigned long flags, void *data)
+ {
+ struct aa_profile *profile;
+ char *buffer = NULL, *dev_buffer = NULL;
+@@ -549,12 +579,14 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
+ goto out;
+ }
+ error = fn_for_each_confined(label, profile,
+- match_mnt(profile, path, buffer, dev_path, dev_buffer,
++ match_mnt(subj_cred, profile, path, buffer,
++ dev_path, dev_buffer,
+ type, flags, data, binary));
+ } else {
+ error = fn_for_each_confined(label, profile,
+- match_mnt_path_str(profile, path, buffer, dev_name,
+- type, flags, data, binary, NULL));
++ match_mnt_path_str(subj_cred, profile, path,
++ buffer, dev_name,
++ type, flags, data, binary, NULL));
+ }
+
+ out:
+@@ -566,7 +598,8 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
+ return error;
+ }
+
+-static int profile_umount(struct aa_profile *profile, const struct path *path,
++static int profile_umount(const struct cred *subj_cred,
++ struct aa_profile *profile, const struct path *path,
+ char *buffer)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+@@ -595,11 +628,13 @@ static int profile_umount(struct aa_profile *profile, const struct path *path,
+ error = -EACCES;
+
+ audit:
+- return audit_mount(profile, OP_UMOUNT, name, NULL, NULL, NULL, 0, NULL,
++ return audit_mount(subj_cred, profile, OP_UMOUNT, name, NULL, NULL,
++ NULL, 0, NULL,
+ AA_MAY_UMOUNT, &perms, info, error);
+ }
+
+-int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
++int aa_umount(const struct cred *subj_cred, struct aa_label *label,
++ struct vfsmount *mnt, int flags)
+ {
+ struct aa_profile *profile;
+ char *buffer = NULL;
+@@ -614,7 +649,7 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
+ return -ENOMEM;
+
+ error = fn_for_each_confined(label, profile,
+- profile_umount(profile, &path, buffer));
++ profile_umount(subj_cred, profile, &path, buffer));
+ aa_put_buffer(buffer);
+
+ return error;
+@@ -624,7 +659,8 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
+ *
+ * Returns: label for transition or ERR_PTR. Does not return NULL
+ */
+-static struct aa_label *build_pivotroot(struct aa_profile *profile,
++static struct aa_label *build_pivotroot(const struct cred *subj_cred,
++ struct aa_profile *profile,
+ const struct path *new_path,
+ char *new_buffer,
+ const struct path *old_path,
+@@ -669,7 +705,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
+ error = 0;
+
+ audit:
+- error = audit_mount(profile, OP_PIVOTROOT, new_name, old_name,
++ error = audit_mount(subj_cred, profile, OP_PIVOTROOT, new_name,
++ old_name,
+ NULL, trans_name, 0, NULL, AA_MAY_PIVOTROOT,
+ &perms, info, error);
+ if (error)
+@@ -678,7 +715,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
+ return aa_get_newest_label(&profile->label);
+ }
+
+-int aa_pivotroot(struct aa_label *label, const struct path *old_path,
++int aa_pivotroot(const struct cred *subj_cred, struct aa_label *label,
++ const struct path *old_path,
+ const struct path *new_path)
+ {
+ struct aa_profile *profile;
+@@ -696,7 +734,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+ if (!old_buffer || !new_buffer)
+ goto out;
+ target = fn_label_build(label, profile, GFP_KERNEL,
+- build_pivotroot(profile, new_path, new_buffer,
++ build_pivotroot(subj_cred, profile, new_path,
++ new_buffer,
+ old_path, old_buffer));
+ if (!target) {
+ info = "label build failed";
+@@ -722,7 +761,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
+ fail:
+ /* TODO: add back in auditing of new_name and old_name */
+ error = fn_for_each(label, profile,
+- audit_mount(profile, OP_PIVOTROOT, NULL /*new_name */,
++ audit_mount(subj_cred, profile, OP_PIVOTROOT,
++ NULL /*new_name */,
+ NULL /* old_name */,
+ NULL, NULL,
+ 0, NULL, AA_MAY_PIVOTROOT, &nullperms, info,
+diff --git a/security/apparmor/net.c b/security/apparmor/net.c
+index 788be1609a865d..704c171232ab46 100644
+--- a/security/apparmor/net.c
++++ b/security/apparmor/net.c
+@@ -71,6 +71,7 @@ static const char * const net_mask_names[] = {
+ void audit_net_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+ if (address_family_names[sa->u.net->family])
+ audit_log_format(ab, " family=\"%s\"",
+@@ -78,35 +79,36 @@ void audit_net_cb(struct audit_buffer *ab, void *va)
+ else
+ audit_log_format(ab, " family=\"unknown(%d)\"",
+ sa->u.net->family);
+- if (sock_type_names[aad(sa)->net.type])
++ if (sock_type_names[ad->net.type])
+ audit_log_format(ab, " sock_type=\"%s\"",
+- sock_type_names[aad(sa)->net.type]);
++ sock_type_names[ad->net.type]);
+ else
+ audit_log_format(ab, " sock_type=\"unknown(%d)\"",
+- aad(sa)->net.type);
+- audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
++ ad->net.type);
++ audit_log_format(ab, " protocol=%d", ad->net.protocol);
+
+- if (aad(sa)->request & NET_PERMS_MASK) {
++ if (ad->request & NET_PERMS_MASK) {
+ audit_log_format(ab, " requested_mask=");
+- aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
++ aa_audit_perm_mask(ab, ad->request, NULL, 0,
+ net_mask_names, NET_PERMS_MASK);
+
+- if (aad(sa)->denied & NET_PERMS_MASK) {
++ if (ad->denied & NET_PERMS_MASK) {
+ audit_log_format(ab, " denied_mask=");
+- aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
++ aa_audit_perm_mask(ab, ad->denied, NULL, 0,
+ net_mask_names, NET_PERMS_MASK);
+ }
+ }
+- if (aad(sa)->peer) {
++ if (ad->peer) {
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+ }
+
+ /* Generic af perm */
+-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+- u32 request, u16 family, int type)
++int aa_profile_af_perm(struct aa_profile *profile,
++ struct apparmor_audit_data *ad, u32 request, u16 family,
++ int type)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+@@ -130,21 +132,23 @@ int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+ perms = *aa_lookup_perms(&rules->policy, state);
+ aa_apply_modes_to_perms(profile, &perms);
+
+- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
++ return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
+ }
+
+-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
+- int type, int protocol)
++int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
++ const char *op, u32 request, u16 family, int type, int protocol)
+ {
+ struct aa_profile *profile;
+- DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
++ DEFINE_AUDIT_NET(ad, op, NULL, family, type, protocol);
+
+ return fn_for_each_confined(label, profile,
+- aa_profile_af_perm(profile, &sa, request, family,
++ aa_profile_af_perm(profile, &ad, request, family,
+ type));
+ }
+
+-static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
++static int aa_label_sk_perm(const struct cred *subj_cred,
++ struct aa_label *label,
++ const char *op, u32 request,
+ struct sock *sk)
+ {
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+@@ -155,10 +159,11 @@ static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
+
+ if (ctx->label != kernel_t && !unconfined(label)) {
+ struct aa_profile *profile;
+- DEFINE_AUDIT_SK(sa, op, sk);
++ DEFINE_AUDIT_SK(ad, op, sk);
+
++ ad.subj_cred = subj_cred;
+ error = fn_for_each_confined(label, profile,
+- aa_profile_af_sk_perm(profile, &sa, request, sk));
++ aa_profile_af_sk_perm(profile, &ad, request, sk));
+ }
+
+ return error;
+@@ -174,21 +179,21 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk)
+
+ /* TODO: switch to begin_current_label ???? */
+ label = begin_current_label_crit_section();
+- error = aa_label_sk_perm(label, op, request, sk);
++ error = aa_label_sk_perm(current_cred(), label, op, request, sk);
+ end_current_label_crit_section(label);
+
+ return error;
+ }
+
+
+-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
+- struct socket *sock)
++int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
++ const char *op, u32 request, struct socket *sock)
+ {
+ AA_BUG(!label);
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+
+- return aa_label_sk_perm(label, op, request, sock->sk);
++ return aa_label_sk_perm(subj_cred, label, op, request, sock->sk);
+ }
+
+ #ifdef CONFIG_NETWORK_SECMARK
+@@ -214,7 +219,7 @@ static int apparmor_secmark_init(struct aa_secmark *secmark)
+ }
+
+ static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ int i, ret;
+ struct aa_perms perms = { };
+@@ -245,17 +250,17 @@ static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
+
+ aa_apply_modes_to_perms(profile, &perms);
+
+- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
++ return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
+ }
+
+ int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+ u32 secid, const struct sock *sk)
+ {
+ struct aa_profile *profile;
+- DEFINE_AUDIT_SK(sa, op, sk);
++ DEFINE_AUDIT_SK(ad, op, sk);
+
+ return fn_for_each_confined(label, profile,
+ aa_secmark_perm(profile, request, secid,
+- &sa));
++ &ad));
+ }
+ #endif
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index b38f7b2a5e1d5d..d9d3b3d776e11b 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -188,7 +188,7 @@ static void aa_free_data(void *ptr, void *arg)
+ {
+ struct aa_data *data = ptr;
+
+- kfree_sensitive(data->data);
++ kvfree_sensitive(data->data, data->size);
+ kfree_sensitive(data->key);
+ kfree_sensitive(data);
+ }
+@@ -255,6 +255,7 @@ void aa_free_profile(struct aa_profile *profile)
+
+ aa_put_ns(profile->ns);
+ kfree_sensitive(profile->rename);
++ kfree_sensitive(profile->disconnected);
+
+ free_attachment(&profile->attach);
+
+@@ -285,6 +286,7 @@ void aa_free_profile(struct aa_profile *profile)
+ /**
+ * aa_alloc_profile - allocate, initialize and return a new profile
+ * @hname: name of the profile (NOT NULL)
++ * @proxy: proxy to use OR null if to allocate a new one
+ * @gfp: allocation type
+ *
+ * Returns: refcount profile or NULL on failure
+@@ -721,16 +723,17 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->iface.ns) {
++ if (ad->iface.ns) {
+ audit_log_format(ab, " ns=");
+- audit_log_untrustedstring(ab, aad(sa)->iface.ns);
++ audit_log_untrustedstring(ab, ad->iface.ns);
+ }
+ }
+
+ /**
+ * audit_policy - Do auditing of policy changes
+- * @label: label to check if it can manage policy
++ * @subj_label: label to check if it can manage policy
+ * @op: policy operation being performed
+ * @ns_name: name of namespace being manipulated
+ * @name: name of profile being manipulated (NOT NULL)
+@@ -739,19 +742,19 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+ *
+ * Returns: the error to be returned after audit is done
+ */
+-static int audit_policy(struct aa_label *label, const char *op,
++static int audit_policy(struct aa_label *subj_label, const char *op,
+ const char *ns_name, const char *name,
+ const char *info, int error)
+ {
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
+
+- aad(&sa)->iface.ns = ns_name;
+- aad(&sa)->name = name;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
+- aad(&sa)->label = label;
++ ad.iface.ns = ns_name;
++ ad.name = name;
++ ad.info = info;
++ ad.error = error;
++ ad.subj_label = subj_label;
+
+- aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, audit_cb);
++ aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, audit_cb);
+
+ return error;
+ }
+@@ -759,31 +762,35 @@ static int audit_policy(struct aa_label *label, const char *op,
+ /* don't call out to other LSMs in the stack for apparmor policy admin
+ * permissions
+ */
+-static int policy_ns_capable(struct aa_label *label,
++static int policy_ns_capable(const struct cred *subj_cred,
++ struct aa_label *label,
+ struct user_namespace *userns, int cap)
+ {
+ int err;
+
+ /* check for MAC_ADMIN cap in cred */
+- err = cap_capable(current_cred(), userns, cap, CAP_OPT_NONE);
++ err = cap_capable(subj_cred, userns, cap, CAP_OPT_NONE);
+ if (!err)
+- err = aa_capable(label, cap, CAP_OPT_NONE);
++ err = aa_capable(subj_cred, label, cap, CAP_OPT_NONE);
+
+ return err;
+ }
+
+ /**
+ * aa_policy_view_capable - check if viewing policy in at @ns is allowed
+- * label: label that is trying to view policy in ns
+- * ns: namespace being viewed by @label (may be NULL if @label's ns)
++ * @subj_cred: cred of subject
++ * @label: label that is trying to view policy in ns
++ * @ns: namespace being viewed by @label (may be NULL if @label's ns)
++ *
+ * Returns: true if viewing policy is allowed
+ *
+ * If @ns is NULL then the namespace being viewed is assumed to be the
+ * tasks current namespace.
+ */
+-bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
++bool aa_policy_view_capable(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns)
+ {
+- struct user_namespace *user_ns = current_user_ns();
++ struct user_namespace *user_ns = subj_cred->user_ns;
+ struct aa_ns *view_ns = labels_view(label);
+ bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) ||
+ in_egroup_p(make_kgid(user_ns, 0));
+@@ -800,15 +807,17 @@ bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
+ return response;
+ }
+
+-bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns)
++bool aa_policy_admin_capable(const struct cred *subj_cred,
++ struct aa_label *label, struct aa_ns *ns)
+ {
+- struct user_namespace *user_ns = current_user_ns();
+- bool capable = policy_ns_capable(label, user_ns, CAP_MAC_ADMIN) == 0;
++ struct user_namespace *user_ns = subj_cred->user_ns;
++ bool capable = policy_ns_capable(subj_cred, label, user_ns,
++ CAP_MAC_ADMIN) == 0;
+
+ AA_DEBUG("cap_mac_admin? %d\n", capable);
+ AA_DEBUG("policy locked? %d\n", aa_g_lock_policy);
+
+- return aa_policy_view_capable(label, ns) && capable &&
++ return aa_policy_view_capable(subj_cred, label, ns) && capable &&
+ !aa_g_lock_policy;
+ }
+
+@@ -818,7 +827,7 @@ bool aa_current_policy_view_capable(struct aa_ns *ns)
+ bool res;
+
+ label = __begin_current_label_crit_section();
+- res = aa_policy_view_capable(label, ns);
++ res = aa_policy_view_capable(current_cred(), label, ns);
+ __end_current_label_crit_section(label);
+
+ return res;
+@@ -830,7 +839,7 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
+ bool res;
+
+ label = __begin_current_label_crit_section();
+- res = aa_policy_admin_capable(label, ns);
++ res = aa_policy_admin_capable(current_cred(), label, ns);
+ __end_current_label_crit_section(label);
+
+ return res;
+@@ -838,12 +847,15 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
+
+ /**
+ * aa_may_manage_policy - can the current task manage policy
++ * @subj_cred; subjects cred
+ * @label: label to check if it can manage policy
++ * @ns: namespace being managed by @label (may be NULL if @label's ns)
+ * @mask: contains the policy manipulation operation being done
+ *
+ * Returns: 0 if the task is allowed to manipulate policy else error
+ */
+-int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
++int aa_may_manage_policy(const struct cred *subj_cred, struct aa_label *label,
++ struct aa_ns *ns, u32 mask)
+ {
+ const char *op;
+
+@@ -859,7 +871,7 @@ int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
+ return audit_policy(label, op, NULL, NULL, "policy_locked",
+ -EACCES);
+
+- if (!aa_policy_admin_capable(label, ns))
++ if (!aa_policy_admin_capable(subj_cred, label, ns))
+ return audit_policy(label, op, NULL, NULL, "not policy admin",
+ -EACCES);
+
+@@ -950,11 +962,11 @@ static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
+
+ /**
+ * __lookup_replace - lookup replacement information for a profile
+- * @ns - namespace the lookup occurs in
+- * @hname - name of profile to lookup
+- * @noreplace - true if not replacing an existing profile
+- * @p - Returns: profile to be replaced
+- * @info - Returns: info string on why lookup failed
++ * @ns: namespace the lookup occurs in
++ * @hname: name of profile to lookup
++ * @noreplace: true if not replacing an existing profile
++ * @p: Returns - profile to be replaced
++ * @info: Returns - info string on why lookup failed
+ *
+ * Returns: profile to replace (no ref) on success else ptr error
+ */
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 8b8846073e142a..d752bfa9b3f37e 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -34,17 +34,18 @@
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->iface.ns) {
++ if (ad->iface.ns) {
+ audit_log_format(ab, " ns=");
+- audit_log_untrustedstring(ab, aad(sa)->iface.ns);
++ audit_log_untrustedstring(ab, ad->iface.ns);
+ }
+- if (aad(sa)->name) {
++ if (ad->name) {
+ audit_log_format(ab, " name=");
+- audit_log_untrustedstring(ab, aad(sa)->name);
++ audit_log_untrustedstring(ab, ad->name);
+ }
+- if (aad(sa)->iface.pos)
+- audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
++ if (ad->iface.pos)
++ audit_log_format(ab, " offset=%ld", ad->iface.pos);
+ }
+
+ /**
+@@ -63,18 +64,18 @@ static int audit_iface(struct aa_profile *new, const char *ns_name,
+ int error)
+ {
+ struct aa_profile *profile = labels_profile(aa_current_raw_label());
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
+ if (e)
+- aad(&sa)->iface.pos = e->pos - e->start;
+- aad(&sa)->iface.ns = ns_name;
++ ad.iface.pos = e->pos - e->start;
++ ad.iface.ns = ns_name;
+ if (new)
+- aad(&sa)->name = new->base.hname;
++ ad.name = new->base.hname;
+ else
+- aad(&sa)->name = name;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
++ ad.name = name;
++ ad.info = info;
++ ad.error = error;
+
+- return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
++ return aa_audit(AUDIT_APPARMOR_STATUS, profile, &ad, audit_cb);
+ }
+
+ void __aa_loaddata_update(struct aa_loaddata *data, long revision)
+@@ -477,6 +478,8 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
+ if (!table)
+ goto fail;
+
++ strs->table = table;
++ strs->size = size;
+ for (i = 0; i < size; i++) {
+ char *str;
+ int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
+@@ -519,14 +522,11 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
+ goto fail;
+ if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+-
+- strs->table = table;
+- strs->size = size;
+ }
+ return true;
+
+ fail:
+- kfree_sensitive(table);
++ aa_free_str_table(strs);
+ e->pos = saved_pos;
+ return false;
+ }
+@@ -807,7 +807,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ const char *info = "failed to unpack profile";
+ size_t ns_len;
+ struct rhashtable_params params = { 0 };
+- char *key = NULL;
++ char *key = NULL, *disconnected = NULL;
+ struct aa_data *data;
+ int error = -EPROTO;
+ kernel_cap_t tmpcap;
+@@ -825,6 +825,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+
+ tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
+ if (tmpns) {
++ if (!tmpname) {
++ info = "empty profile name";
++ goto fail;
++ }
+ *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
+ if (!*ns_name) {
+ info = "out of memory";
+@@ -873,7 +877,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+ }
+
+ /* disconnected attachment string is optional */
+- (void) aa_unpack_str(e, &profile->disconnected, "disconnected");
++ (void) aa_unpack_strdup(e, &disconnected, "disconnected");
++ profile->disconnected = disconnected;
+
+ /* per profile debug flags (complain, audit) */
+ if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
+@@ -1076,6 +1081,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
+
+ if (rhashtable_insert_fast(profile->data, &data->head,
+ profile->data->p)) {
++ kvfree_sensitive(data->data, data->size);
+ kfree_sensitive(data->key);
+ kfree_sensitive(data);
+ info = "failed to insert data to table";
+diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
+index 5c9bde25e56df9..2b8003eb4f463a 100644
+--- a/security/apparmor/policy_unpack_test.c
++++ b/security/apparmor/policy_unpack_test.c
+@@ -80,14 +80,14 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ *(buf + 1) = strlen(TEST_U32_NAME) + 1;
+ strscpy(buf + 3, TEST_U32_NAME, e->end - (void *)(buf + 3));
+ *(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
+- *((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA;
++ *((__le32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = cpu_to_le32(TEST_U32_DATA);
+
+ buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_U64_NAME) + 1;
+ strscpy(buf + 3, TEST_U64_NAME, e->end - (void *)(buf + 3));
+ *(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64;
+- *((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA;
++ *((__le64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = cpu_to_le64(TEST_U64_DATA);
+
+ buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET;
+ *buf = AA_NAME;
+@@ -103,7 +103,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ *(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
+ strscpy(buf + 3, TEST_ARRAY_NAME, e->end - (void *)(buf + 3));
+ *(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
+- *((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE;
++ *((__le16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = cpu_to_le16(TEST_ARRAY_SIZE);
+
+ return e;
+ }
+diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
+index e8594816489626..dcc94c3153d511 100644
+--- a/security/apparmor/resource.c
++++ b/security/apparmor/resource.c
+@@ -30,18 +30,20 @@ struct aa_sfs_entry aa_sfs_entry_rlimit[] = {
+ static void audit_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+ audit_log_format(ab, " rlimit=%s value=%lu",
+- rlim_names[aad(sa)->rlim.rlim], aad(sa)->rlim.max);
+- if (aad(sa)->peer) {
++ rlim_names[ad->rlim.rlim], ad->rlim.max);
++ if (ad->peer) {
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+ }
+
+ /**
+ * audit_resource - audit setting resource limit
++ * @subj_cred: cred setting the resource
+ * @profile: profile being enforced (NOT NULL)
+ * @resource: rlimit being auditing
+ * @value: value being set
+@@ -49,22 +51,24 @@ static void audit_cb(struct audit_buffer *ab, void *va)
+ * @info: info being auditing
+ * @error: error value
+ *
+- * Returns: 0 or sa->error else other error code on failure
++ * Returns: 0 or ad->error else other error code on failure
+ */
+-static int audit_resource(struct aa_profile *profile, unsigned int resource,
++static int audit_resource(const struct cred *subj_cred,
++ struct aa_profile *profile, unsigned int resource,
+ unsigned long value, struct aa_label *peer,
+ const char *info, int error)
+ {
+- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
++ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
+ OP_SETRLIMIT);
+
+- aad(&sa)->rlim.rlim = resource;
+- aad(&sa)->rlim.max = value;
+- aad(&sa)->peer = peer;
+- aad(&sa)->info = info;
+- aad(&sa)->error = error;
++ ad.subj_cred = subj_cred;
++ ad.rlim.rlim = resource;
++ ad.rlim.max = value;
++ ad.peer = peer;
++ ad.info = info;
++ ad.error = error;
+
+- return aa_audit(AUDIT_APPARMOR_AUTO, profile, &sa, audit_cb);
++ return aa_audit(AUDIT_APPARMOR_AUTO, profile, &ad, audit_cb);
+ }
+
+ /**
+@@ -81,7 +85,8 @@ int aa_map_resource(int resource)
+ return rlim_map[resource];
+ }
+
+-static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
++static int profile_setrlimit(const struct cred *subj_cred,
++ struct aa_profile *profile, unsigned int resource,
+ struct rlimit *new_rlim)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+@@ -91,22 +96,24 @@ static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
+ if (rules->rlimits.mask & (1 << resource) && new_rlim->rlim_max >
+ rules->rlimits.limits[resource].rlim_max)
+ e = -EACCES;
+- return audit_resource(profile, resource, new_rlim->rlim_max, NULL, NULL,
+- e);
++ return audit_resource(subj_cred, profile, resource, new_rlim->rlim_max,
++ NULL, NULL, e);
+ }
+
+ /**
+ * aa_task_setrlimit - test permission to set an rlimit
+- * @label - label confining the task (NOT NULL)
+- * @task - task the resource is being set on
+- * @resource - the resource being set
+- * @new_rlim - the new resource limit (NOT NULL)
++ * @subj_cred: cred setting the limit
++ * @label: label confining the task (NOT NULL)
++ * @task: task the resource is being set on
++ * @resource: the resource being set
++ * @new_rlim: the new resource limit (NOT NULL)
+ *
+ * Control raising the processes hard limit.
+ *
+ * Returns: 0 or error code if setting resource failed
+ */
+-int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
++int aa_task_setrlimit(const struct cred *subj_cred, struct aa_label *label,
++ struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim)
+ {
+ struct aa_profile *profile;
+@@ -125,14 +132,15 @@ int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
+ */
+
+ if (label != peer &&
+- aa_capable(label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
++ aa_capable(subj_cred, label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
+ error = fn_for_each(label, profile,
+- audit_resource(profile, resource,
++ audit_resource(subj_cred, profile, resource,
+ new_rlim->rlim_max, peer,
+ "cap_sys_resource", -EACCES));
+ else
+ error = fn_for_each_confined(label, profile,
+- profile_setrlimit(profile, resource, new_rlim));
++ profile_setrlimit(subj_cred, profile, resource,
++ new_rlim));
+ aa_put_label(peer);
+
+ return error;
+diff --git a/security/apparmor/task.c b/security/apparmor/task.c
+index 84d16a29bfcbc3..0d7af707cccdd3 100644
+--- a/security/apparmor/task.c
++++ b/security/apparmor/task.c
+@@ -208,70 +208,75 @@ static const char *audit_ptrace_mask(u32 mask)
+ static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
+ {
+ struct common_audit_data *sa = va;
++ struct apparmor_audit_data *ad = aad(sa);
+
+- if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
++ if (ad->request & AA_PTRACE_PERM_MASK) {
+ audit_log_format(ab, " requested_mask=\"%s\"",
+- audit_ptrace_mask(aad(sa)->request));
++ audit_ptrace_mask(ad->request));
+
+- if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
++ if (ad->denied & AA_PTRACE_PERM_MASK) {
+ audit_log_format(ab, " denied_mask=\"%s\"",
+- audit_ptrace_mask(aad(sa)->denied));
++ audit_ptrace_mask(ad->denied));
+ }
+ }
+ audit_log_format(ab, " peer=");
+- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
++ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+
+ /* assumes check for RULE_MEDIATES is already done */
+ /* TODO: conditionals */
+-static int profile_ptrace_perm(struct aa_profile *profile,
+- struct aa_label *peer, u32 request,
+- struct common_audit_data *sa)
++static int profile_ptrace_perm(const struct cred *cred,
++ struct aa_profile *profile,
++ struct aa_label *peer, u32 request,
++ struct apparmor_audit_data *ad)
+ {
+ struct aa_ruleset *rules = list_first_entry(&profile->rules,
+ typeof(*rules), list);
+ struct aa_perms perms = { };
+
+- aad(sa)->peer = peer;
++ ad->subj_cred = cred;
++ ad->peer = peer;
+ aa_profile_match_label(profile, rules, peer, AA_CLASS_PTRACE, request,
+ &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+- return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
++ return aa_check_perms(profile, &perms, request, ad, audit_ptrace_cb);
+ }
+
+-static int profile_tracee_perm(struct aa_profile *tracee,
++static int profile_tracee_perm(const struct cred *cred,
++ struct aa_profile *tracee,
+ struct aa_label *tracer, u32 request,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ if (profile_unconfined(tracee) || unconfined(tracer) ||
+ !ANY_RULE_MEDIATES(&tracee->rules, AA_CLASS_PTRACE))
+ return 0;
+
+- return profile_ptrace_perm(tracee, tracer, request, sa);
++ return profile_ptrace_perm(cred, tracee, tracer, request, ad);
+ }
+
+-static int profile_tracer_perm(struct aa_profile *tracer,
++static int profile_tracer_perm(const struct cred *cred,
++ struct aa_profile *tracer,
+ struct aa_label *tracee, u32 request,
+- struct common_audit_data *sa)
++ struct apparmor_audit_data *ad)
+ {
+ if (profile_unconfined(tracer))
+ return 0;
+
+ if (ANY_RULE_MEDIATES(&tracer->rules, AA_CLASS_PTRACE))
+- return profile_ptrace_perm(tracer, tracee, request, sa);
++ return profile_ptrace_perm(cred, tracer, tracee, request, ad);
+
+ /* profile uses the old style capability check for ptrace */
+ if (&tracer->label == tracee)
+ return 0;
+
+- aad(sa)->label = &tracer->label;
+- aad(sa)->peer = tracee;
+- aad(sa)->request = 0;
+- aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
+- CAP_OPT_NONE);
++ ad->subj_label = &tracer->label;
++ ad->peer = tracee;
++ ad->request = 0;
++ ad->error = aa_capable(cred, &tracer->label, CAP_SYS_PTRACE,
++ CAP_OPT_NONE);
+
+- return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
++ return aa_audit(AUDIT_APPARMOR_AUTO, tracer, ad, audit_ptrace_cb);
+ }
+
+ /**
+@@ -282,7 +287,8 @@ static int profile_tracer_perm(struct aa_profile *tracer,
+ *
+ * Returns: %0 else error code if permission denied or error
+ */
+-int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
++int aa_may_ptrace(const struct cred *tracer_cred, struct aa_label *tracer,
++ const struct cred *tracee_cred, struct aa_label *tracee,
+ u32 request)
+ {
+ struct aa_profile *profile;
+@@ -290,6 +296,8 @@ int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+ DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_PTRACE, OP_PTRACE);
+
+ return xcheck_labels(tracer, tracee, profile,
+- profile_tracer_perm(profile, tracee, request, &sa),
+- profile_tracee_perm(profile, tracer, xrequest, &sa));
++ profile_tracer_perm(tracer_cred, profile, tracee,
++ request, &sa),
++ profile_tracee_perm(tracee_cred, profile, tracer,
++ xrequest, &sa));
+ }
+diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
+index cfaf1d0e6a5f51..35933ae53b92cf 100644
+--- a/security/bpf/hooks.c
++++ b/security/bpf/hooks.c
+@@ -24,7 +24,6 @@ static int __init bpf_lsm_init(void)
+
+ struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = {
+ .lbs_inode = sizeof(struct bpf_storage_blob),
+- .lbs_task = sizeof(struct bpf_storage_blob),
+ };
+
+ DEFINE_LSM(bpf) = {
+diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
+index 232191ee09e310..b6e074ac022730 100644
+--- a/security/integrity/Kconfig
++++ b/security/integrity/Kconfig
+@@ -68,8 +68,6 @@ config INTEGRITY_MACHINE_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ depends on SYSTEM_BLACKLIST_KEYRING
+ depends on LOAD_UEFI_KEYS || LOAD_PPC_KEYS
+- select INTEGRITY_CA_MACHINE_KEYRING if LOAD_PPC_KEYS
+- select INTEGRITY_CA_MACHINE_KEYRING_MAX if LOAD_PPC_KEYS
+ help
+ If set, provide a keyring to which Machine Owner Keys (MOK) may
+ be added. This keyring shall contain just MOK keys. Unlike keys
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index ff9a939dad8e42..2393230c03aa3a 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -864,6 +864,13 @@ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+ evm_update_evmxattr(dentry, NULL, NULL, 0);
+ }
+
++int evm_inode_copy_up_xattr(const char *name)
++{
++ if (strcmp(name, XATTR_NAME_EVM) == 0)
++ return 1; /* Discard */
++ return -EOPNOTSUPP;
++}
++
+ /*
+ * evm_inode_init_security - initializes security.evm HMAC value
+ */
+diff --git a/security/integrity/iint.c b/security/integrity/iint.c
+index a462df827de2da..27ea19fb1f54c7 100644
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -66,9 +66,32 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+ return iint;
+ }
+
+-static void iint_free(struct integrity_iint_cache *iint)
++#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
++
++/*
++ * It is not clear that IMA should be nested at all, but as long is it measures
++ * files both on overlayfs and on underlying fs, we need to annotate the iint
++ * mutex to avoid lockdep false positives related to IMA + overlayfs.
++ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
++ */
++static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
++ struct inode *inode)
++{
++#ifdef CONFIG_LOCKDEP
++ static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
++
++ int depth = inode->i_sb->s_stack_depth;
++
++ if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
++ depth = 0;
++
++ lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
++#endif
++}
++
++static void iint_init_always(struct integrity_iint_cache *iint,
++ struct inode *inode)
+ {
+- kfree(iint->ima_hash);
+ iint->ima_hash = NULL;
+ iint->version = 0;
+ iint->flags = 0UL;
+@@ -80,6 +103,14 @@ static void iint_free(struct integrity_iint_cache *iint)
+ iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ iint->measured_pcrs = 0;
++ mutex_init(&iint->mutex);
++ iint_lockdep_annotate(iint, inode);
++}
++
++static void iint_free(struct integrity_iint_cache *iint)
++{
++ kfree(iint->ima_hash);
++ mutex_destroy(&iint->mutex);
+ kmem_cache_free(iint_cache, iint);
+ }
+
+@@ -104,6 +135,8 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+ if (!iint)
+ return NULL;
+
++ iint_init_always(iint, inode);
++
+ write_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+@@ -153,25 +186,18 @@ void integrity_inode_free(struct inode *inode)
+ iint_free(iint);
+ }
+
+-static void init_once(void *foo)
++static void iint_init_once(void *foo)
+ {
+ struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
+
+ memset(iint, 0, sizeof(*iint));
+- iint->ima_file_status = INTEGRITY_UNKNOWN;
+- iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+- iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+- iint->ima_read_status = INTEGRITY_UNKNOWN;
+- iint->ima_creds_status = INTEGRITY_UNKNOWN;
+- iint->evm_status = INTEGRITY_UNKNOWN;
+- mutex_init(&iint->mutex);
+ }
+
+ static int __init integrity_iintcache_init(void)
+ {
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+- 0, SLAB_PANIC, init_once);
++ 0, SLAB_PANIC, iint_init_once);
+ return 0;
+ }
+ DEFINE_LSM(integrity) = {
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index c29db699c996e5..07a4586e129c80 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -430,7 +430,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
+ #else
+
+ static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
+- void **lsmrule)
++ void **lsmrule, gfp_t gfp)
+ {
+ return -EINVAL;
+ }
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 452e80b541e544..44b8161746fec4 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -243,8 +243,9 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ {
+ const char *audit_cause = "failed";
+ struct inode *inode = file_inode(file);
+- const char *filename = file->f_path.dentry->d_name.name;
++ struct inode *real_inode = d_real_inode(file_dentry(file));
+ struct ima_max_digest_data hash;
++ struct name_snapshot filename;
+ struct kstat stat;
+ int result = 0;
+ int length;
+@@ -302,6 +303,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
++ if (real_inode != inode) {
++ iint->real_ino = real_inode->i_ino;
++ iint->real_dev = real_inode->i_sb->s_dev;
++ }
+
+ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ if (!result)
+@@ -311,9 +316,13 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ if (file->f_flags & O_DIRECT)
+ audit_cause = "failed(directio)";
+
++ take_dentry_name_snapshot(&filename, file->f_path.dentry);
++
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+- filename, "collect_data", audit_cause,
+- result, 0);
++ filename.name.name, "collect_data",
++ audit_cause, result, 0);
++
++ release_dentry_name_snapshot(&filename);
+ }
+ return result;
+ }
+@@ -426,6 +435,7 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
+ */
+ const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
+ {
++ struct name_snapshot filename;
+ char *pathname = NULL;
+
+ *pathbuf = __getname();
+@@ -439,7 +449,10 @@ const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
+ }
+
+ if (!pathname) {
+- strscpy(namebuf, path->dentry->d_name.name, NAME_MAX);
++ take_dentry_name_snapshot(&filename, path->dentry);
++ strscpy(namebuf, filename.name.name, NAME_MAX);
++ release_dentry_name_snapshot(&filename);
++
+ pathname = namebuf;
+ }
+
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 365db0e43d7c22..cc1217ac2c6faf 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -25,6 +25,7 @@
+ #include <linux/xattr.h>
+ #include <linux/ima.h>
+ #include <linux/fs.h>
++#include <linux/iversion.h>
+
+ #include "ima.h"
+
+@@ -207,7 +208,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ u32 secid, char *buf, loff_t size, int mask,
+ enum ima_hooks func)
+ {
+- struct inode *inode = file_inode(file);
++ struct inode *backing_inode, *inode = file_inode(file);
+ struct integrity_iint_cache *iint = NULL;
+ struct ima_template_desc *template_desc = NULL;
+ char *pathbuf = NULL;
+@@ -284,6 +285,19 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ iint->measured_pcrs = 0;
+ }
+
++ /* Detect and re-evaluate changes made to the backing file. */
++ backing_inode = d_real_inode(file_dentry(file));
++ if (backing_inode != inode &&
++ (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
++ if (!IS_I_VERSION(backing_inode) ||
++ backing_inode->i_sb->s_dev != iint->real_dev ||
++ backing_inode->i_ino != iint->real_ino ||
++ !inode_eq_iversion(backing_inode, iint->version)) {
++ iint->flags &= ~IMA_DONE_MASK;
++ iint->measured_pcrs = 0;
++ }
++ }
++
+ /* Determine if already appraised/measured based on bitmask
+ * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ * IMA_AUDIT, IMA_AUDITED)
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index f6906261775462..f3f46c6186c081 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -401,7 +401,8 @@ static void ima_free_rule(struct ima_rule_entry *entry)
+ kfree(entry);
+ }
+
+-static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
++static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
++ gfp_t gfp)
+ {
+ struct ima_rule_entry *nentry;
+ int i;
+@@ -410,7 +411,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+ * Immutable elements are copied over as pointers and data; only
+ * lsm rules can change
+ */
+- nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL);
++ nentry = kmemdup(entry, sizeof(*nentry), gfp);
+ if (!nentry)
+ return NULL;
+
+@@ -425,7 +426,8 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+
+ ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+ nentry->lsm[i].args_p,
+- &nentry->lsm[i].rule);
++ &nentry->lsm[i].rule,
++ gfp);
+ if (!nentry->lsm[i].rule)
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ nentry->lsm[i].args_p);
+@@ -438,7 +440,7 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+ int i;
+ struct ima_rule_entry *nentry;
+
+- nentry = ima_lsm_copy_rule(entry);
++ nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
+ if (!nentry)
+ return -ENOMEM;
+
+@@ -664,7 +666,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ }
+
+ if (rc == -ESTALE && !rule_reinitialized) {
+- lsm_rule = ima_lsm_copy_rule(rule);
++ lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
+ if (lsm_rule) {
+ rule_reinitialized = true;
+ goto retry;
+@@ -1140,7 +1142,8 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ entry->lsm[lsm_rule].type = audit_type;
+ result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
+ entry->lsm[lsm_rule].args_p,
+- &entry->lsm[lsm_rule].rule);
++ &entry->lsm[lsm_rule].rule,
++ GFP_KERNEL);
+ if (!entry->lsm[lsm_rule].rule) {
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ entry->lsm[lsm_rule].args_p);
+diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
+index 6cd0add524cdc9..3b2cb8f1002e61 100644
+--- a/security/integrity/ima/ima_template_lib.c
++++ b/security/integrity/ima/ima_template_lib.c
+@@ -483,7 +483,10 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ bool size_limit)
+ {
+ const char *cur_filename = NULL;
++ struct name_snapshot filename;
+ u32 cur_filename_len = 0;
++ bool snapshot = false;
++ int ret;
+
+ BUG_ON(event_data->filename == NULL && event_data->file == NULL);
+
+@@ -496,7 +499,10 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ }
+
+ if (event_data->file) {
+- cur_filename = event_data->file->f_path.dentry->d_name.name;
++ take_dentry_name_snapshot(&filename,
++ event_data->file->f_path.dentry);
++ snapshot = true;
++ cur_filename = filename.name.name;
+ cur_filename_len = strlen(cur_filename);
+ } else
+ /*
+@@ -505,8 +511,13 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ */
+ cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+ out:
+- return ima_write_template_field_data(cur_filename, cur_filename_len,
+- DATA_FMT_STRING, field_data);
++ ret = ima_write_template_field_data(cur_filename, cur_filename_len,
++ DATA_FMT_STRING, field_data);
++
++ if (snapshot)
++ release_dentry_name_snapshot(&filename);
++
++ return ret;
+ }
+
+ /*
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index d7553c93f5c0d0..9561db7cf6b42c 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -164,6 +164,8 @@ struct integrity_iint_cache {
+ unsigned long flags;
+ unsigned long measured_pcrs;
+ unsigned long atomic_flags;
++ unsigned long real_ino;
++ dev_t real_dev;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index 3c90807476eb0e..eaddaceda14eab 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -66,6 +66,19 @@ void key_schedule_gc(time64_t gc_at)
+ }
+ }
+
++/*
++ * Set the expiration time on a key.
++ */
++void key_set_expiry(struct key *key, time64_t expiry)
++{
++ key->expiry = expiry;
++ if (expiry != TIME64_MAX) {
++ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++ expiry += key_gc_delay;
++ key_schedule_gc(expiry);
++ }
++}
++
+ /*
+ * Schedule a dead links collection run.
+ */
+@@ -176,7 +189,6 @@ static void key_garbage_collector(struct work_struct *work)
+ static u8 gc_state; /* Internal persistent state */
+ #define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
+ #define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
+-#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
+ #define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
+ #define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
+ #define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
+@@ -184,21 +196,17 @@ static void key_garbage_collector(struct work_struct *work)
+
+ struct rb_node *cursor;
+ struct key *key;
+- time64_t new_timer, limit;
++ time64_t new_timer, limit, expiry;
+
+ kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+ limit = ktime_get_real_seconds();
+- if (limit > key_gc_delay)
+- limit -= key_gc_delay;
+- else
+- limit = key_gc_delay;
+
+ /* Work out what we're going to be doing in this pass */
+ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ gc_state <<= 1;
+ if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+- gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
++ gc_state |= KEY_GC_REAPING_LINKS;
+
+ if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_DEAD_1;
+@@ -233,8 +241,11 @@ static void key_garbage_collector(struct work_struct *work)
+ }
+ }
+
+- if (gc_state & KEY_GC_SET_TIMER) {
+- if (key->expiry > limit && key->expiry < new_timer) {
++ expiry = key->expiry;
++ if (expiry != TIME64_MAX) {
++ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++ expiry += key_gc_delay;
++ if (expiry > limit && expiry < new_timer) {
+ kdebug("will expire %x in %lld",
+ key_serial(key), key->expiry - limit);
+ new_timer = key->expiry;
+@@ -276,7 +287,7 @@ static void key_garbage_collector(struct work_struct *work)
+ */
+ kdebug("pass complete");
+
+- if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
++ if (new_timer != TIME64_MAX) {
+ new_timer += key_gc_delay;
+ key_schedule_gc(new_timer);
+ }
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index 3c1e7122076b9e..ec2ec335b61334 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -174,6 +174,7 @@ extern unsigned key_gc_delay;
+ extern void keyring_gc(struct key *keyring, time64_t limit);
+ extern void keyring_restriction_gc(struct key *keyring,
+ struct key_type *dead_type);
++void key_set_expiry(struct key *key, time64_t expiry);
+ extern void key_schedule_gc(time64_t gc_at);
+ extern void key_schedule_gc_links(void);
+ extern void key_gc_keytype(struct key_type *ktype);
+@@ -222,10 +223,18 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
+ */
+ static inline bool key_is_dead(const struct key *key, time64_t limit)
+ {
++ time64_t expiry = key->expiry;
++
++ if (expiry != TIME64_MAX) {
++ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
++ expiry += key_gc_delay;
++ if (expiry <= limit)
++ return true;
++ }
++
+ return
+ key->flags & ((1 << KEY_FLAG_DEAD) |
+ (1 << KEY_FLAG_INVALIDATED)) ||
+- (key->expiry > 0 && key->expiry <= limit) ||
+ key->domain_tag->removed;
+ }
+
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 5c0c7df833f8a9..35db23d05302e3 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -294,6 +294,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+ key->uid = uid;
+ key->gid = gid;
+ key->perm = perm;
++ key->expiry = TIME64_MAX;
+ key->restrict_link = restrict_link;
+ key->last_used_at = ktime_get_real_seconds();
+
+@@ -463,10 +464,8 @@ static int __key_instantiate_and_link(struct key *key,
+ if (authkey)
+ key_invalidate(authkey);
+
+- if (prep->expiry != TIME64_MAX) {
+- key->expiry = prep->expiry;
+- key_schedule_gc(prep->expiry + key_gc_delay);
+- }
++ if (prep->expiry != TIME64_MAX)
++ key_set_expiry(key, prep->expiry);
+ }
+ }
+
+@@ -606,8 +605,7 @@ int key_reject_and_link(struct key *key,
+ atomic_inc(&key->user->nikeys);
+ mark_key_instantiated(key, -error);
+ notify_key(key, NOTIFY_KEY_INSTANTIATED, -error);
+- key->expiry = ktime_get_real_seconds() + timeout;
+- key_schedule_gc(key->expiry + key_gc_delay);
++ key_set_expiry(key, ktime_get_real_seconds() + timeout);
+
+ if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+ awaken = 1;
+@@ -722,16 +720,14 @@ struct key_type *key_type_lookup(const char *type)
+
+ void key_set_timeout(struct key *key, unsigned timeout)
+ {
+- time64_t expiry = 0;
++ time64_t expiry = TIME64_MAX;
+
+ /* make the changes with the locks held to prevent races */
+ down_write(&key->sem);
+
+ if (timeout > 0)
+ expiry = ktime_get_real_seconds() + timeout;
+-
+- key->expiry = expiry;
+- key_schedule_gc(key->expiry + key_gc_delay);
++ key_set_expiry(key, expiry);
+
+ up_write(&key->sem);
+ }
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 19be69fa4d052a..aa1dc43b16ddf3 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1694,7 +1694,7 @@ long keyctl_session_to_parent(void)
+ goto unlock;
+
+ /* cancel an already pending keyring replacement */
+- oldwork = task_work_cancel(parent, key_change_session_keyring);
++ oldwork = task_work_cancel_func(parent, key_change_session_keyring);
+
+ /* the replacement session keyring is applied just prior to userspace
+ * restarting */
+diff --git a/security/keys/proc.c b/security/keys/proc.c
+index d0cde6685627f2..4f4e2c1824f18b 100644
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -198,7 +198,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
+
+ /* come up with a suitable timeout value */
+ expiry = READ_ONCE(key->expiry);
+- if (expiry == 0) {
++ if (expiry == TIME64_MAX) {
+ memcpy(xbuf, "perm", 5);
+ } else if (now >= expiry) {
+ memcpy(xbuf, "expd", 5);
+diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
+index 85fb5c22529a76..fee1ab2c734d32 100644
+--- a/security/keys/trusted-keys/trusted_core.c
++++ b/security/keys/trusted-keys/trusted_core.c
+@@ -358,17 +358,17 @@ static int __init init_trusted(void)
+ if (!get_random)
+ get_random = kernel_get_random;
+
+- static_call_update(trusted_key_seal,
+- trusted_key_sources[i].ops->seal);
+- static_call_update(trusted_key_unseal,
+- trusted_key_sources[i].ops->unseal);
+- static_call_update(trusted_key_get_random,
+- get_random);
+- trusted_key_exit = trusted_key_sources[i].ops->exit;
+- migratable = trusted_key_sources[i].ops->migratable;
+-
+ ret = trusted_key_sources[i].ops->init();
+- if (!ret)
++ if (!ret) {
++ static_call_update(trusted_key_seal, trusted_key_sources[i].ops->seal);
++ static_call_update(trusted_key_unseal, trusted_key_sources[i].ops->unseal);
++ static_call_update(trusted_key_get_random, get_random);
++
++ trusted_key_exit = trusted_key_sources[i].ops->exit;
++ migratable = trusted_key_sources[i].ops->migratable;
++ }
++
++ if (!ret || ret != -ENODEV)
+ break;
+ }
+
+diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
+index ac3e270ade69be..aa3d477de6db54 100644
+--- a/security/keys/trusted-keys/trusted_tee.c
++++ b/security/keys/trusted-keys/trusted_tee.c
+@@ -65,24 +65,16 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+- struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+- reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+- p->key_len);
+- if (IS_ERR(reg_shm_in)) {
+- dev_err(pvt_data.dev, "key shm register failed\n");
+- return PTR_ERR(reg_shm_in);
+- }
+-
+- reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+- sizeof(p->blob));
+- if (IS_ERR(reg_shm_out)) {
+- dev_err(pvt_data.dev, "blob shm register failed\n");
+- ret = PTR_ERR(reg_shm_out);
+- goto out;
++ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++ sizeof(p->key) + sizeof(p->blob));
++ if (IS_ERR(reg_shm)) {
++ dev_err(pvt_data.dev, "shm register failed\n");
++ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_SEAL;
+@@ -90,13 +82,13 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+- param[0].u.memref.shm = reg_shm_in;
++ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = p->key_len;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+- param[1].u.memref.shm = reg_shm_out;
++ param[1].u.memref.shm = reg_shm;
+ param[1].u.memref.size = sizeof(p->blob);
+- param[1].u.memref.shm_offs = 0;
++ param[1].u.memref.shm_offs = sizeof(p->key);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+@@ -107,11 +99,7 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ p->blob_len = param[1].u.memref.size;
+ }
+
+-out:
+- if (reg_shm_out)
+- tee_shm_free(reg_shm_out);
+- if (reg_shm_in)
+- tee_shm_free(reg_shm_in);
++ tee_shm_free(reg_shm);
+
+ return ret;
+ }
+@@ -124,24 +112,16 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+- struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+- reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+- p->blob_len);
+- if (IS_ERR(reg_shm_in)) {
+- dev_err(pvt_data.dev, "blob shm register failed\n");
+- return PTR_ERR(reg_shm_in);
+- }
+-
+- reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+- sizeof(p->key));
+- if (IS_ERR(reg_shm_out)) {
+- dev_err(pvt_data.dev, "key shm register failed\n");
+- ret = PTR_ERR(reg_shm_out);
+- goto out;
++ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++ sizeof(p->key) + sizeof(p->blob));
++ if (IS_ERR(reg_shm)) {
++ dev_err(pvt_data.dev, "shm register failed\n");
++ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_UNSEAL;
+@@ -149,11 +129,11 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+- param[0].u.memref.shm = reg_shm_in;
++ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = p->blob_len;
+- param[0].u.memref.shm_offs = 0;
++ param[0].u.memref.shm_offs = sizeof(p->key);
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+- param[1].u.memref.shm = reg_shm_out;
++ param[1].u.memref.shm = reg_shm;
+ param[1].u.memref.size = sizeof(p->key);
+ param[1].u.memref.shm_offs = 0;
+
+@@ -166,11 +146,7 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ p->key_len = param[1].u.memref.size;
+ }
+
+-out:
+- if (reg_shm_out)
+- tee_shm_free(reg_shm_out);
+- if (reg_shm_in)
+- tee_shm_free(reg_shm_in);
++ tee_shm_free(reg_shm);
+
+ return ret;
+ }
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index bc700f85f80be7..ea277c55a38dba 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -38,6 +38,7 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ u8 *end_work = scratch + SCRATCH_SIZE;
+ u8 *priv, *pub;
+ u16 priv_len, pub_len;
++ int ret;
+
+ priv_len = get_unaligned_be16(src) + 2;
+ priv = src;
+@@ -57,8 +58,10 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ unsigned char bool[3], *w = bool;
+ /* tag 0 is emptyAuth */
+ w = asn1_encode_boolean(w, w + sizeof(bool), true);
+- if (WARN(IS_ERR(w), "BUG: Boolean failed to encode"))
+- return PTR_ERR(w);
++ if (WARN(IS_ERR(w), "BUG: Boolean failed to encode")) {
++ ret = PTR_ERR(w);
++ goto err;
++ }
+ work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
+ }
+
+@@ -69,8 +72,10 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ * trigger, so if it does there's something nefarious going on
+ */
+ if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
+- "BUG: scratch buffer is too small"))
+- return -EINVAL;
++ "BUG: scratch buffer is too small")) {
++ ret = -EINVAL;
++ goto err;
++ }
+
+ work = asn1_encode_integer(work, end_work, options->keyhandle);
+ work = asn1_encode_octet_string(work, end_work, pub, pub_len);
+@@ -79,10 +84,18 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ work1 = payload->blob;
+ work1 = asn1_encode_sequence(work1, work1 + sizeof(payload->blob),
+ scratch, work - scratch);
+- if (WARN(IS_ERR(work1), "BUG: ASN.1 encoder failed"))
+- return PTR_ERR(work1);
++ if (IS_ERR(work1)) {
++ ret = PTR_ERR(work1);
++ pr_err("BUG: ASN.1 encoder failed with %d\n", ret);
++ goto err;
++ }
+
++ kfree(scratch);
+ return work1 - payload->blob;
++
++err:
++ kfree(scratch);
++ return ret;
+ }
+
+ struct tpm2_key_context {
+diff --git a/security/landlock/cred.c b/security/landlock/cred.c
+index 13dff2a3154513..94f0d03bfd6432 100644
+--- a/security/landlock/cred.c
++++ b/security/landlock/cred.c
+@@ -14,8 +14,8 @@
+ #include "ruleset.h"
+ #include "setup.h"
+
+-static int hook_cred_prepare(struct cred *const new,
+- const struct cred *const old, const gfp_t gfp)
++static void hook_cred_transfer(struct cred *const new,
++ const struct cred *const old)
+ {
+ struct landlock_ruleset *const old_dom = landlock_cred(old)->domain;
+
+@@ -23,6 +23,12 @@ static int hook_cred_prepare(struct cred *const new,
+ landlock_get_ruleset(old_dom);
+ landlock_cred(new)->domain = old_dom;
+ }
++}
++
++static int hook_cred_prepare(struct cred *const new,
++ const struct cred *const old, const gfp_t gfp)
++{
++ hook_cred_transfer(new, old);
+ return 0;
+ }
+
+@@ -36,6 +42,7 @@ static void hook_cred_free(struct cred *const cred)
+
+ static struct security_hook_list landlock_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(cred_prepare, hook_cred_prepare),
++ LSM_HOOK_INIT(cred_transfer, hook_cred_transfer),
+ LSM_HOOK_INIT(cred_free, hook_cred_free),
+ };
+
+diff --git a/security/landlock/fs.c b/security/landlock/fs.c
+index 1c0c198f6fdb85..1bdd049e3d636a 100644
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -820,8 +820,9 @@ static int current_check_refer_path(struct dentry *const old_dentry,
+ bool allow_parent1, allow_parent2;
+ access_mask_t access_request_parent1, access_request_parent2;
+ struct path mnt_dir;
+- layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS],
+- layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS];
++ struct dentry *old_parent;
++ layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
++ layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
+
+ if (!dom)
+ return 0;
+@@ -867,9 +868,17 @@ static int current_check_refer_path(struct dentry *const old_dentry,
+ mnt_dir.mnt = new_dir->mnt;
+ mnt_dir.dentry = new_dir->mnt->mnt_root;
+
++ /*
++ * old_dentry may be the root of the common mount point and
++ * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
++ * OPEN_TREE_CLONE). We do not need to call dget(old_parent) because
++ * we keep a reference to old_dentry.
++ */
++ old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
++ old_dentry->d_parent;
++
+ /* new_dir->dentry is equal to new_dentry->d_parent */
+- allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
+- old_dentry->d_parent,
++ allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
+ &layer_masks_parent1);
+ allow_parent2 = collect_domain_accesses(
+ dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
+diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
+index 245cc650a4dc99..336bedaa3af689 100644
+--- a/security/landlock/syscalls.c
++++ b/security/landlock/syscalls.c
+@@ -32,6 +32,18 @@
+ #include "ruleset.h"
+ #include "setup.h"
+
++static bool is_initialized(void)
++{
++ if (likely(landlock_initialized))
++ return true;
++
++ pr_warn_once(
++ "Disabled but requested by user space. "
++ "You should enable Landlock at boot time: "
++ "https://docs.kernel.org/userspace-api/landlock.html#boot-time-configuration\n");
++ return false;
++}
++
+ /**
+ * copy_min_struct_from_user - Safe future-proof argument copying
+ *
+@@ -165,7 +177,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
+ /* Build-time checks. */
+ build_check_abi();
+
+- if (!landlock_initialized)
++ if (!is_initialized())
+ return -EOPNOTSUPP;
+
+ if (flags) {
+@@ -311,7 +323,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
+ struct landlock_ruleset *ruleset;
+ int res, err;
+
+- if (!landlock_initialized)
++ if (!is_initialized())
+ return -EOPNOTSUPP;
+
+ /* No flag for now. */
+@@ -402,7 +414,7 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
+ struct landlock_cred_security *new_llcred;
+ int err;
+
+- if (!landlock_initialized)
++ if (!is_initialized())
+ return -EOPNOTSUPP;
+
+ /*
+diff --git a/security/security.c b/security/security.c
+index 23b129d482a7c8..b6144833c7a8ea 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -2539,7 +2539,7 @@ int security_inode_copy_up_xattr(const char *name)
+ return rc;
+ }
+
+- return LSM_RET_DEFAULT(inode_copy_up_xattr);
++ return evm_inode_copy_up_xattr(name);
+ }
+ EXPORT_SYMBOL(security_inode_copy_up_xattr);
+
+@@ -2648,6 +2648,24 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ }
+ EXPORT_SYMBOL_GPL(security_file_ioctl);
+
++/**
++ * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode
++ * @file: associated file
++ * @cmd: ioctl cmd
++ * @arg: ioctl arguments
++ *
++ * Compat version of security_file_ioctl() that correctly handles 32-bit
++ * processes running on 64-bit kernels.
++ *
++ * Return: Returns 0 if permission is granted.
++ */
++int security_file_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ return call_int_hook(file_ioctl_compat, 0, file, cmd, arg);
++}
++EXPORT_SYMBOL_GPL(security_file_ioctl_compat);
++
+ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
+ {
+ /*
+@@ -4012,7 +4030,19 @@ EXPORT_SYMBOL(security_inode_setsecctx);
+ */
+ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+ {
+- return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
++ struct security_hook_list *hp;
++ int rc;
++
++ /*
++ * Only one module will provide a security context.
++ */
++ hlist_for_each_entry(hp, &security_hook_heads.inode_getsecctx, list) {
++ rc = hp->hook.inode_getsecctx(inode, ctx, ctxlen);
++ if (rc != LSM_RET_DEFAULT(inode_getsecctx))
++ return rc;
++ }
++
++ return LSM_RET_DEFAULT(inode_getsecctx);
+ }
+ EXPORT_SYMBOL(security_inode_getsecctx);
+
+@@ -4369,8 +4399,20 @@ EXPORT_SYMBOL(security_sock_rcv_skb);
+ int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
+ sockptr_t optlen, unsigned int len)
+ {
+- return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
+- optval, optlen, len);
++ struct security_hook_list *hp;
++ int rc;
++
++ /*
++ * Only one module will provide a security context.
++ */
++ hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_stream,
++ list) {
++ rc = hp->hook.socket_getpeersec_stream(sock, optval, optlen,
++ len);
++ if (rc != LSM_RET_DEFAULT(socket_getpeersec_stream))
++ return rc;
++ }
++ return LSM_RET_DEFAULT(socket_getpeersec_stream);
+ }
+
+ /**
+@@ -4390,8 +4432,19 @@ int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
+ int security_socket_getpeersec_dgram(struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+ {
+- return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
+- skb, secid);
++ struct security_hook_list *hp;
++ int rc;
++
++ /*
++ * Only one module will provide a security context.
++ */
++ hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_dgram,
++ list) {
++ rc = hp->hook.socket_getpeersec_dgram(sock, skb, secid);
++ if (rc != LSM_RET_DEFAULT(socket_getpeersec_dgram))
++ return rc;
++ }
++ return LSM_RET_DEFAULT(socket_getpeersec_dgram);
+ }
+ EXPORT_SYMBOL(security_socket_getpeersec_dgram);
+
+@@ -5063,15 +5116,17 @@ int security_key_getsecurity(struct key *key, char **buffer)
+ * @op: rule operator
+ * @rulestr: rule context
+ * @lsmrule: receive buffer for audit rule struct
++ * @gfp: GFP flag used for kmalloc
+ *
+ * Allocate and initialize an LSM audit rule structure.
+ *
+ * Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of
+ * an invalid rule.
+ */
+-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
++int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
++ gfp_t gfp)
+ {
+- return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
++ return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule, gfp);
+ }
+
+ /**
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index 32eb67fb3e42c0..b49c44869dc462 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -330,12 +330,12 @@ static int avc_add_xperms_decision(struct avc_node *node,
+ {
+ struct avc_xperms_decision_node *dest_xpd;
+
+- node->ae.xp_node->xp.len++;
+ dest_xpd = avc_xperms_decision_alloc(src->used);
+ if (!dest_xpd)
+ return -ENOMEM;
+ avc_copy_xperms_decision(&dest_xpd->xpd, src);
+ list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
++ node->ae.xp_node->xp.len++;
+ return 0;
+ }
+
+@@ -907,7 +907,11 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
+ node->ae.avd.auditdeny &= ~perms;
+ break;
+ case AVC_CALLBACK_ADD_XPERMS:
+- avc_add_xperms_decision(node, xpd);
++ rc = avc_add_xperms_decision(node, xpd);
++ if (rc) {
++ avc_node_kill(node);
++ goto out_unlock;
++ }
+ break;
+ }
+ avc_node_replace(node, orig);
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 2aa0e219d72177..d4a99d98ec7745 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1660,8 +1660,6 @@ static int inode_has_perm(const struct cred *cred,
+ struct inode_security_struct *isec;
+ u32 sid;
+
+- validate_creds(cred);
+-
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+@@ -3056,8 +3054,6 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
+ struct inode_security_struct *isec;
+ u32 sid;
+
+- validate_creds(cred);
+-
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
+ sid = cred_sid(cred);
+@@ -3101,8 +3097,6 @@ static int selinux_inode_permission(struct inode *inode, int mask)
+ if (!mask)
+ return 0;
+
+- validate_creds(cred);
+-
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+@@ -3731,6 +3725,33 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
+ return error;
+ }
+
++static int selinux_file_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ /*
++ * If we are in a 64-bit kernel running 32-bit userspace, we need to
++ * make sure we don't compare 32-bit flags to 64-bit flags.
++ */
++ switch (cmd) {
++ case FS_IOC32_GETFLAGS:
++ cmd = FS_IOC_GETFLAGS;
++ break;
++ case FS_IOC32_SETFLAGS:
++ cmd = FS_IOC_SETFLAGS;
++ break;
++ case FS_IOC32_GETVERSION:
++ cmd = FS_IOC_GETVERSION;
++ break;
++ case FS_IOC32_SETVERSION:
++ cmd = FS_IOC_SETVERSION;
++ break;
++ default:
++ break;
++ }
++
++ return selinux_file_ioctl(file, cmd, arg);
++}
++
+ static int default_noexec __ro_after_init;
+
+ static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
+@@ -3814,7 +3835,17 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
+ if (default_noexec &&
+ (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
+ int rc = 0;
+- if (vma_is_initial_heap(vma)) {
++ /*
++ * We don't use the vma_is_initial_heap() helper as it has
++ * a history of problems and is currently broken on systems
++ * where there is no heap, e.g. brk == start_brk. Before
++ * replacing the conditional below with vma_is_initial_heap(),
++ * or something similar, please ensure that the logic is the
++ * same as what we have below or you have tested every possible
++ * corner case you can think to test.
++ */
++ if (vma->vm_start >= vma->vm_mm->start_brk &&
++ vma->vm_end <= vma->vm_mm->brk) {
+ rc = avc_has_perm(sid, sid, SECCLASS_PROCESS,
+ PROCESS__EXECHEAP, NULL);
+ } else if (!vma->vm_file && (vma_is_initial_stack(vma) ||
+@@ -4667,6 +4698,13 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
+ return -EINVAL;
+ addr4 = (struct sockaddr_in *)address;
+ if (family_sa == AF_UNSPEC) {
++ if (family == PF_INET6) {
++ /* Length check from inet6_bind_sk() */
++ if (addrlen < SIN6_LEN_RFC2133)
++ return -EINVAL;
++ /* Family check from __inet6_bind() */
++ goto err_af;
++ }
+ /* see __inet_bind(), we only want to allow
+ * AF_UNSPEC if the address is INADDR_ANY
+ */
+@@ -6515,8 +6553,8 @@ static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen
+ */
+ static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+ {
+- return __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, XATTR_NAME_SELINUX,
+- ctx, ctxlen, 0);
++ return __vfs_setxattr_locked(&nop_mnt_idmap, dentry, XATTR_NAME_SELINUX,
++ ctx, ctxlen, 0, NULL);
+ }
+
+ static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+@@ -7036,6 +7074,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(file_permission, selinux_file_permission),
+ LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
+ LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
++ LSM_HOOK_INIT(file_ioctl_compat, selinux_file_ioctl_compat),
+ LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
+ LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
+ LSM_HOOK_INIT(file_mprotect, selinux_file_mprotect),
+diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
+index d5495134a5b9e8..1d3cf45d4550ed 100644
+--- a/security/selinux/include/audit.h
++++ b/security/selinux/include/audit.h
+@@ -21,12 +21,14 @@
+ * @op: the operator the rule uses
+ * @rulestr: the text "target" of the rule
+ * @rule: pointer to the new rule structure returned via this
++ * @gfp: GFP flag used for kmalloc
+ *
+ * Returns 0 if successful, -errno if not. On success, the rule structure
+ * will be allocated internally. The caller must free this structure with
+ * selinux_audit_rule_free() after use.
+ */
+-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
++int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule,
++ gfp_t gfp);
+
+ /**
+ * selinux_audit_rule_free - free an selinux audit rule structure.
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index 6fa640263216fc..2c23a5a2860860 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -2135,7 +2135,6 @@ static struct file_system_type sel_fs_type = {
+ .kill_sb = sel_kill_sb,
+ };
+
+-static struct vfsmount *selinuxfs_mount __ro_after_init;
+ struct path selinux_null __ro_after_init;
+
+ static int __init init_sel_fs(void)
+@@ -2157,18 +2156,21 @@ static int __init init_sel_fs(void)
+ return err;
+ }
+
+- selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type);
+- if (IS_ERR(selinuxfs_mount)) {
++ selinux_null.mnt = kern_mount(&sel_fs_type);
++ if (IS_ERR(selinux_null.mnt)) {
+ pr_err("selinuxfs: could not mount!\n");
+- err = PTR_ERR(selinuxfs_mount);
+- selinuxfs_mount = NULL;
++ err = PTR_ERR(selinux_null.mnt);
++ selinux_null.mnt = NULL;
++ return err;
+ }
++
+ selinux_null.dentry = d_hash_and_lookup(selinux_null.mnt->mnt_root,
+ &null_name);
+ if (IS_ERR(selinux_null.dentry)) {
+ pr_err("selinuxfs: could not lookup null!\n");
+ err = PTR_ERR(selinux_null.dentry);
+ selinux_null.dentry = NULL;
++ return err;
+ }
+
+ return err;
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 1eeffc66ea7d7a..379ac7b5c7098c 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -3497,7 +3497,8 @@ void selinux_audit_rule_free(void *vrule)
+ }
+ }
+
+-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
++ gfp_t gfp)
+ {
+ struct selinux_state *state = &selinux_state;
+ struct selinux_policy *policy;
+@@ -3538,7 +3539,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+ return -EINVAL;
+ }
+
+- tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
++ tmprule = kzalloc(sizeof(struct selinux_audit_rule), gfp);
+ if (!tmprule)
+ return -ENOMEM;
+ context_init(&tmprule->au_ctxt);
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 65130a791f5730..4625674f0e95b8 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -1312,7 +1312,8 @@ static int smack_inode_setxattr(struct mnt_idmap *idmap,
+ check_star = 1;
+ } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ check_priv = 1;
+- if (size != TRANS_TRUE_SIZE ||
++ if (!S_ISDIR(d_backing_inode(dentry)->i_mode) ||
++ size != TRANS_TRUE_SIZE ||
+ strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
+ rc = -EINVAL;
+ } else
+@@ -2853,6 +2854,15 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
+ if (value == NULL || size > SMK_LONGLABEL || size == 0)
+ return -EINVAL;
+
++ if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) {
++ if (!S_ISDIR(inode->i_mode) || size != TRANS_TRUE_SIZE ||
++ strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
++ return -EINVAL;
++
++ nsp->smk_flags |= SMK_INODE_TRANSMUTE;
++ return 0;
++ }
++
+ skp = smk_import_entry(value, size);
+ if (IS_ERR(skp))
+ return PTR_ERR(skp);
+@@ -3759,12 +3769,18 @@ static int smack_unix_stream_connect(struct sock *sock,
+ }
+ }
+
+- /*
+- * Cross reference the peer labels for SO_PEERSEC.
+- */
+ if (rc == 0) {
++ /*
++ * Cross reference the peer labels for SO_PEERSEC.
++ */
+ nsp->smk_packet = ssp->smk_out;
+ ssp->smk_packet = osp->smk_out;
++
++ /*
++ * new/child/established socket must inherit listening socket labels
++ */
++ nsp->smk_out = osp->smk_out;
++ nsp->smk_in = osp->smk_in;
+ }
+
+ return rc;
+@@ -4344,7 +4360,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+ rcu_read_unlock();
+
+ if (hskp == NULL)
+- rc = netlbl_req_setattr(req, &skp->smk_netlabel);
++ rc = netlbl_req_setattr(req, &ssp->smk_out->smk_netlabel);
+ else
+ netlbl_req_delattr(req);
+
+@@ -4606,11 +4622,13 @@ static int smack_post_notification(const struct cred *w_cred,
+ * @op: required testing operator (=, !=, >, <, ...)
+ * @rulestr: smack label to be audited
+ * @vrule: pointer to save our own audit rule representation
++ * @gfp: type of the memory for the allocation
+ *
+ * Prepare to audit cases where (@field @op @rulestr) is true.
+ * The label to be audited is created if necessay.
+ */
+-static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
++ gfp_t gfp)
+ {
+ struct smack_known *skp;
+ char **rule = (char **)vrule;
+@@ -4760,8 +4778,8 @@ static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+
+ static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+ {
+- return __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, XATTR_NAME_SMACK,
+- ctx, ctxlen, 0);
++ return __vfs_setxattr_locked(&nop_mnt_idmap, dentry, XATTR_NAME_SMACK,
++ ctx, ctxlen, 0, NULL);
+ }
+
+ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+@@ -4973,6 +4991,7 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
+
+ LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security),
+ LSM_HOOK_INIT(file_ioctl, smack_file_ioctl),
++ LSM_HOOK_INIT(file_ioctl_compat, smack_file_ioctl),
+ LSM_HOOK_INIT(file_lock, smack_file_lock),
+ LSM_HOOK_INIT(file_fcntl, smack_file_fcntl),
+ LSM_HOOK_INIT(mmap_file, smack_mmap_file),
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index e22aad7604e8ac..5dd1e164f9b13d 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -932,7 +932,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ }
+ if (rc >= 0) {
+ old_cat = skp->smk_netlabel.attr.mls.cat;
+- skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat;
++ rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat);
+ skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
+ synchronize_rcu();
+ netlbl_catmap_free(old_cat);
+diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
+index 57ee70ae50f24a..ea3140d510ecbf 100644
+--- a/security/tomoyo/common.c
++++ b/security/tomoyo/common.c
+@@ -2649,13 +2649,14 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+ {
+ int error = buffer_len;
+ size_t avail_len = buffer_len;
+- char *cp0 = head->write_buf;
++ char *cp0;
+ int idx;
+
+ if (!head->write)
+ return -EINVAL;
+ if (mutex_lock_interruptible(&head->io_sem))
+ return -EINTR;
++ cp0 = head->write_buf;
+ head->read_user_buf_avail = 0;
+ idx = tomoyo_read_lock();
+ /* Read a line and dispatch it to the policy handler. */
+diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
+index 90b53500a236bd..aed9e3ef2c9ecb 100644
+--- a/security/tomoyo/domain.c
++++ b/security/tomoyo/domain.c
+@@ -723,10 +723,13 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
+ ee->r.obj = &ee->obj;
+ ee->obj.path1 = bprm->file->f_path;
+ /* Get symlink's pathname of program. */
+- retval = -ENOENT;
+ exename.name = tomoyo_realpath_nofollow(original_name);
+- if (!exename.name)
+- goto out;
++ if (!exename.name) {
++ /* Fallback to realpath if symlink's pathname does not exist. */
++ exename.name = tomoyo_realpath_from_path(&bprm->file->f_path);
++ if (!exename.name)
++ goto out;
++ }
+ tomoyo_fill_path_info(&exename);
+ retry:
+ /* Check 'aggregator' directive. */
+diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
+index 25006fddc964b4..298d182759c2dc 100644
+--- a/security/tomoyo/tomoyo.c
++++ b/security/tomoyo/tomoyo.c
+@@ -568,6 +568,7 @@ static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
+ LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
+ LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
++ LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl),
+ LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
+ LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
+ LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
+diff --git a/sound/core/Makefile b/sound/core/Makefile
+index a6b444ee283264..f6526b33713756 100644
+--- a/sound/core/Makefile
++++ b/sound/core/Makefile
+@@ -32,7 +32,6 @@ snd-ump-objs := ump.o
+ snd-ump-$(CONFIG_SND_UMP_LEGACY_RAWMIDI) += ump_convert.o
+ snd-timer-objs := timer.o
+ snd-hrtimer-objs := hrtimer.o
+-snd-rtctimer-objs := rtctimer.o
+ snd-hwdep-objs := hwdep.o
+ snd-seq-device-objs := seq_device.o
+
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 59c8658966d4cb..dd4bdb39782cda 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1553,12 +1553,16 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
+ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- int change;
++ int err, change;
+ struct user_element *ue = kcontrol->private_data;
+ unsigned int size = ue->elem_data_size;
+ char *dst = ue->elem_data +
+ snd_ctl_get_ioff(kcontrol, &ucontrol->id) * size;
+
++ err = sanity_check_input_values(ue->card, ucontrol, &ue->info, false);
++ if (err < 0)
++ return err;
++
+ change = memcmp(&ucontrol->value, dst, size) != 0;
+ if (change)
+ memcpy(dst, &ucontrol->value, size);
+diff --git a/sound/core/info.c b/sound/core/info.c
+index 0b2f04dcb58979..e2f302e55bbb20 100644
+--- a/sound/core/info.c
++++ b/sound/core/info.c
+@@ -56,7 +56,7 @@ struct snd_info_private_data {
+ };
+
+ static int snd_info_version_init(void);
+-static void snd_info_disconnect(struct snd_info_entry *entry);
++static void snd_info_clear_entries(struct snd_info_entry *entry);
+
+ /*
+
+@@ -569,11 +569,16 @@ void snd_info_card_disconnect(struct snd_card *card)
+ {
+ if (!card)
+ return;
+- mutex_lock(&info_mutex);
++
+ proc_remove(card->proc_root_link);
+- card->proc_root_link = NULL;
+ if (card->proc_root)
+- snd_info_disconnect(card->proc_root);
++ proc_remove(card->proc_root->p);
++
++ mutex_lock(&info_mutex);
++ if (card->proc_root)
++ snd_info_clear_entries(card->proc_root);
++ card->proc_root_link = NULL;
++ card->proc_root = NULL;
+ mutex_unlock(&info_mutex);
+ }
+
+@@ -745,15 +750,14 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
+ }
+ EXPORT_SYMBOL(snd_info_create_card_entry);
+
+-static void snd_info_disconnect(struct snd_info_entry *entry)
++static void snd_info_clear_entries(struct snd_info_entry *entry)
+ {
+ struct snd_info_entry *p;
+
+ if (!entry->p)
+ return;
+ list_for_each_entry(p, &entry->children, list)
+- snd_info_disconnect(p);
+- proc_remove(entry->p);
++ snd_info_clear_entries(p);
+ entry->p = NULL;
+ }
+
+@@ -770,8 +774,9 @@ void snd_info_free_entry(struct snd_info_entry * entry)
+ if (!entry)
+ return;
+ if (entry->p) {
++ proc_remove(entry->p);
+ mutex_lock(&info_mutex);
+- snd_info_disconnect(entry);
++ snd_info_clear_entries(entry);
+ mutex_unlock(&info_mutex);
+ }
+
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 22c0d217b8608f..81186af3ac625c 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -312,8 +312,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
+ card->number = idx;
+ #ifdef MODULE
+ WARN_ON(!module);
+- card->module = module;
+ #endif
++ card->module = module;
+ INIT_LIST_HEAD(&card->devices);
+ init_rwsem(&card->controls_rwsem);
+ rwlock_init(&card->ctl_files_rwlock);
+@@ -523,6 +523,14 @@ void snd_card_disconnect(struct snd_card *card)
+ }
+ spin_unlock(&card->files_lock);
+
++#ifdef CONFIG_PM
++ /* wake up sleepers here before other callbacks for avoiding potential
++ * deadlocks with other locks (e.g. in kctls);
++ * then this notifies the shutdown and sleepers would abort immediately
++ */
++ wake_up_all(&card->power_sleep);
++#endif
++
+ /* notify all connected devices about disconnection */
+ /* at this point, they cannot respond to any calls except release() */
+
+@@ -538,6 +546,11 @@ void snd_card_disconnect(struct snd_card *card)
+ synchronize_irq(card->sync_irq);
+
+ snd_info_card_disconnect(card);
++#ifdef CONFIG_SND_DEBUG
++ debugfs_remove(card->debugfs_root);
++ card->debugfs_root = NULL;
++#endif
++
+ if (card->registered) {
+ device_del(&card->card_dev);
+ card->registered = false;
+@@ -550,7 +563,6 @@ void snd_card_disconnect(struct snd_card *card)
+ mutex_unlock(&snd_card_mutex);
+
+ #ifdef CONFIG_PM
+- wake_up(&card->power_sleep);
+ snd_power_sync_ref(card);
+ #endif
+ }
+@@ -591,10 +603,6 @@ static int snd_card_do_free(struct snd_card *card)
+ dev_warn(card->dev, "unable to free card info\n");
+ /* Not fatal error */
+ }
+-#ifdef CONFIG_SND_DEBUG
+- debugfs_remove(card->debugfs_root);
+- card->debugfs_root = NULL;
+-#endif
+ if (card->release_completion)
+ complete(card->release_completion);
+ if (!card->managed)
+@@ -658,13 +666,19 @@ void snd_card_free(struct snd_card *card)
+ }
+ EXPORT_SYMBOL(snd_card_free);
+
++/* check, if the character is in the valid ASCII range */
++static inline bool safe_ascii_char(char c)
++{
++ return isascii(c) && isalnum(c);
++}
++
+ /* retrieve the last word of shortname or longname */
+ static const char *retrieve_id_from_card_name(const char *name)
+ {
+ const char *spos = name;
+
+ while (*name) {
+- if (isspace(*name) && isalnum(name[1]))
++ if (isspace(*name) && safe_ascii_char(name[1]))
+ spos = name + 1;
+ name++;
+ }
+@@ -691,12 +705,12 @@ static void copy_valid_id_string(struct snd_card *card, const char *src,
+ {
+ char *id = card->id;
+
+- while (*nid && !isalnum(*nid))
++ while (*nid && !safe_ascii_char(*nid))
+ nid++;
+ if (isdigit(*nid))
+ *id++ = isalpha(*src) ? *src : 'D';
+ while (*nid && (size_t)(id - card->id) < sizeof(card->id) - 1) {
+- if (isalnum(*nid))
++ if (safe_ascii_char(*nid))
+ *id++ = *nid;
+ nid++;
+ }
+@@ -792,7 +806,7 @@ static ssize_t id_store(struct device *dev, struct device_attribute *attr,
+
+ for (idx = 0; idx < copy; idx++) {
+ c = buf[idx];
+- if (!isalnum(c) && c != '_' && c != '-')
++ if (!safe_ascii_char(c) && c != '_' && c != '-')
+ return -EINVAL;
+ }
+ memcpy(buf1, buf, copy);
+diff --git a/sound/core/jack.c b/sound/core/jack.c
+index e0f034e7275cd5..e4bcecdf89b7ec 100644
+--- a/sound/core/jack.c
++++ b/sound/core/jack.c
+@@ -37,16 +37,18 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
+ };
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+
++static void snd_jack_remove_debugfs(struct snd_jack *jack);
++
+ static int snd_jack_dev_disconnect(struct snd_device *device)
+ {
+-#ifdef CONFIG_SND_JACK_INPUT_DEV
+ struct snd_jack *jack = device->device_data;
+
+- mutex_lock(&jack->input_dev_lock);
+- if (!jack->input_dev) {
+- mutex_unlock(&jack->input_dev_lock);
++ snd_jack_remove_debugfs(jack);
++
++#ifdef CONFIG_SND_JACK_INPUT_DEV
++ guard(mutex)(&jack->input_dev_lock);
++ if (!jack->input_dev)
+ return 0;
+- }
+
+ /* If the input device is registered with the input subsystem
+ * then we need to use a different deallocator. */
+@@ -55,7 +57,6 @@ static int snd_jack_dev_disconnect(struct snd_device *device)
+ else
+ input_free_device(jack->input_dev);
+ jack->input_dev = NULL;
+- mutex_unlock(&jack->input_dev_lock);
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+ return 0;
+ }
+@@ -92,11 +93,9 @@ static int snd_jack_dev_register(struct snd_device *device)
+ snprintf(jack->name, sizeof(jack->name), "%s %s",
+ card->shortname, jack->id);
+
+- mutex_lock(&jack->input_dev_lock);
+- if (!jack->input_dev) {
+- mutex_unlock(&jack->input_dev_lock);
++ guard(mutex)(&jack->input_dev_lock);
++ if (!jack->input_dev)
+ return 0;
+- }
+
+ jack->input_dev->name = jack->name;
+
+@@ -121,7 +120,6 @@ static int snd_jack_dev_register(struct snd_device *device)
+ if (err == 0)
+ jack->registered = 1;
+
+- mutex_unlock(&jack->input_dev_lock);
+ return err;
+ }
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+@@ -387,10 +385,14 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+ return 0;
+ }
+
+-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
++static void snd_jack_remove_debugfs(struct snd_jack *jack)
+ {
+- debugfs_remove(jack_kctl->jack_debugfs_root);
+- jack_kctl->jack_debugfs_root = NULL;
++ struct snd_jack_kctl *jack_kctl;
++
++ list_for_each_entry(jack_kctl, &jack->kctl_list, list) {
++ debugfs_remove(jack_kctl->jack_debugfs_root);
++ jack_kctl->jack_debugfs_root = NULL;
++ }
+ }
+ #else /* CONFIG_SND_JACK_INJECTION_DEBUG */
+ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+@@ -399,7 +401,7 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+ return 0;
+ }
+
+-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
++static void snd_jack_remove_debugfs(struct snd_jack *jack)
+ {
+ }
+ #endif /* CONFIG_SND_JACK_INJECTION_DEBUG */
+@@ -410,7 +412,6 @@ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl)
+
+ jack_kctl = kctl->private_data;
+ if (jack_kctl) {
+- snd_jack_debugfs_clear_inject_node(jack_kctl);
+ list_del(&jack_kctl->list);
+ kfree(jack_kctl);
+ }
+@@ -503,8 +504,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
+ .dev_free = snd_jack_dev_free,
+ #ifdef CONFIG_SND_JACK_INPUT_DEV
+ .dev_register = snd_jack_dev_register,
+- .dev_disconnect = snd_jack_dev_disconnect,
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
++ .dev_disconnect = snd_jack_dev_disconnect,
+ };
+
+ if (initial_kctl) {
+@@ -586,14 +587,9 @@ EXPORT_SYMBOL(snd_jack_new);
+ void snd_jack_set_parent(struct snd_jack *jack, struct device *parent)
+ {
+ WARN_ON(jack->registered);
+- mutex_lock(&jack->input_dev_lock);
+- if (!jack->input_dev) {
+- mutex_unlock(&jack->input_dev_lock);
+- return;
+- }
+-
+- jack->input_dev->dev.parent = parent;
+- mutex_unlock(&jack->input_dev_lock);
++ guard(mutex)(&jack->input_dev_lock);
++ if (jack->input_dev)
++ jack->input_dev->dev.parent = parent;
+ }
+ EXPORT_SYMBOL(snd_jack_set_parent);
+
+diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
+index dae2da3808351b..abc99ae3332824 100644
+--- a/sound/core/oss/mixer_oss.c
++++ b/sound/core/oss/mixer_oss.c
+@@ -967,8 +967,8 @@ static void snd_mixer_oss_slot_free(struct snd_mixer_oss_slot *chn)
+ struct slot *p = chn->private_data;
+ if (p) {
+ if (p->allocated && p->assigned) {
+- kfree_const(p->assigned->name);
+- kfree_const(p->assigned);
++ kfree(p->assigned->name);
++ kfree(p->assigned);
+ }
+ kfree(p);
+ }
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index 20bb2d7c8d4bf6..6d0c9c37796c22 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -253,6 +253,7 @@ static const char * const snd_pcm_state_names[] = {
+ STATE(DRAINING),
+ STATE(PAUSED),
+ STATE(SUSPENDED),
++ STATE(DISCONNECTED),
+ };
+
+ static const char * const snd_pcm_access_names[] = {
+diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
+index 494ec0c207fad1..62489677f3947e 100644
+--- a/sound/core/pcm_dmaengine.c
++++ b/sound/core/pcm_dmaengine.c
+@@ -349,6 +349,20 @@ int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ }
+ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+
++int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream)
++{
++ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++ struct dma_tx_state state;
++ enum dma_status status;
++
++ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++ if (status != DMA_PAUSED)
++ dmaengine_synchronize(prtd->dma_chan);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_sync_stop);
++
+ /**
+ * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+ * @substream: PCM substream
+@@ -358,6 +372,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+ {
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++ struct dma_tx_state state;
++ enum dma_status status;
++
++ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++ if (status == DMA_PAUSED)
++ dmaengine_terminate_async(prtd->dma_chan);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ kfree(prtd);
+@@ -378,6 +398,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+ int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+ {
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++ struct dma_tx_state state;
++ enum dma_status status;
++
++ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++ if (status == DMA_PAUSED)
++ dmaengine_terminate_async(prtd->dma_chan);
+
+ dmaengine_synchronize(prtd->dma_chan);
+ dma_release_channel(prtd->dma_chan);
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index bd9ddf412b4655..cc21c483c4a579 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1783,6 +1783,8 @@ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
+ snd_pcm_state_t state)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
++ if (runtime->state != SNDRV_PCM_STATE_SUSPENDED)
++ return -EBADFD;
+ if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
+ return -ENOSYS;
+ runtime->trigger_master = substream;
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 42a70514105018..e115fe18363495 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -537,6 +537,9 @@ static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event,
+ return NULL;
+ if (! dest->accept_input)
+ goto __not_avail;
++ if (snd_seq_ev_is_ump(event))
++ return dest; /* ok - no filter checks */
++
+ if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
+ ! test_bit(event->type, dest->event_filter))
+ goto __not_avail;
+diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
+index 18320a248aa7da..78dcb0ea155827 100644
+--- a/sound/core/seq/seq_midi.c
++++ b/sound/core/seq/seq_midi.c
+@@ -113,6 +113,12 @@ static int dump_midi(struct snd_rawmidi_substream *substream, const char *buf, i
+ return 0;
+ }
+
++/* callback for snd_seq_dump_var_event(), bridging to dump_midi() */
++static int __dump_midi(void *ptr, void *buf, int count)
++{
++ return dump_midi(ptr, buf, count);
++}
++
+ static int event_process_midi(struct snd_seq_event *ev, int direct,
+ void *private_data, int atomic, int hop)
+ {
+@@ -132,7 +138,7 @@ static int event_process_midi(struct snd_seq_event *ev, int direct,
+ pr_debug("ALSA: seq_midi: invalid sysex event flags = 0x%x\n", ev->flags);
+ return 0;
+ }
+- snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)dump_midi, substream);
++ snd_seq_dump_var_event(ev, __dump_midi, substream);
+ snd_midi_event_reset_decode(msynth->parser);
+ } else {
+ if (msynth->parser == NULL)
+diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h
+index b111382f697aa6..9e36738c0dd049 100644
+--- a/sound/core/seq/seq_ports.h
++++ b/sound/core/seq/seq_ports.h
+@@ -7,6 +7,7 @@
+ #define __SND_SEQ_PORTS_H
+
+ #include <sound/seq_kernel.h>
++#include <sound/ump_convert.h>
+ #include "seq_lock.h"
+
+ /* list of 'exported' ports */
+@@ -42,17 +43,6 @@ struct snd_seq_port_subs_info {
+ int (*close)(void *private_data, struct snd_seq_port_subscribe *info);
+ };
+
+-/* context for converting from legacy control event to UMP packet */
+-struct snd_seq_ump_midi2_bank {
+- bool rpn_set;
+- bool nrpn_set;
+- bool bank_set;
+- unsigned char cc_rpn_msb, cc_rpn_lsb;
+- unsigned char cc_nrpn_msb, cc_nrpn_lsb;
+- unsigned char cc_data_msb, cc_data_lsb;
+- unsigned char cc_bank_msb, cc_bank_lsb;
+-};
+-
+ struct snd_seq_client_port {
+
+ struct snd_seq_addr addr; /* client/port number */
+@@ -88,7 +78,7 @@ struct snd_seq_client_port {
+ unsigned char ump_group;
+
+ #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
+- struct snd_seq_ump_midi2_bank midi2_bank[16]; /* per channel */
++ struct ump_cvt_to_ump_bank midi2_bank[16]; /* per channel */
+ #endif
+ };
+
+diff --git a/sound/core/seq/seq_ump_client.c b/sound/core/seq/seq_ump_client.c
+index 2db371d79930d0..eaf7181b9af5bb 100644
+--- a/sound/core/seq/seq_ump_client.c
++++ b/sound/core/seq/seq_ump_client.c
+@@ -28,6 +28,7 @@ struct seq_ump_group {
+ int group; /* group index (0-based) */
+ unsigned int dir_bits; /* directions */
+ bool active; /* activeness */
++ bool valid; /* valid group (referred by blocks) */
+ char name[64]; /* seq port name */
+ };
+
+@@ -213,6 +214,13 @@ static void fill_port_info(struct snd_seq_port_info *port,
+ sprintf(port->name, "Group %d", group->group + 1);
+ }
+
++/* skip non-existing group for static blocks */
++static bool skip_group(struct seq_ump_client *client, struct seq_ump_group *group)
++{
++ return !group->valid &&
++ (client->ump->info.flags & SNDRV_UMP_EP_INFO_STATIC_BLOCKS);
++}
++
+ /* create a new sequencer port per UMP group */
+ static int seq_ump_group_init(struct seq_ump_client *client, int group_index)
+ {
+@@ -221,6 +229,9 @@ static int seq_ump_group_init(struct seq_ump_client *client, int group_index)
+ struct snd_seq_port_callback pcallbacks;
+ int err;
+
++ if (skip_group(client, group))
++ return 0;
++
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+@@ -258,6 +269,9 @@ static void update_port_infos(struct seq_ump_client *client)
+ goto error;
+
+ for (i = 0; i < SNDRV_UMP_MAX_GROUPS; i++) {
++ if (skip_group(client, &client->groups[i]))
++ continue;
++
+ old->addr.client = client->seq_client;
+ old->addr.port = i;
+ err = snd_seq_kernel_client_ctl(client->seq_client,
+@@ -295,6 +309,7 @@ static void update_group_attrs(struct seq_ump_client *client)
+ group->dir_bits = 0;
+ group->active = 0;
+ group->group = i;
++ group->valid = false;
+ }
+
+ list_for_each_entry(fb, &client->ump->block_list, list) {
+@@ -302,6 +317,7 @@ static void update_group_attrs(struct seq_ump_client *client)
+ break;
+ group = &client->groups[fb->info.first_group];
+ for (i = 0; i < fb->info.num_groups; i++, group++) {
++ group->valid = true;
+ if (fb->info.active)
+ group->active = 1;
+ switch (fb->info.direction) {
+diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
+index b141024830ecc8..4dd540cbb1cbbc 100644
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -157,7 +157,7 @@ static void ump_system_to_one_param_ev(const union snd_ump_midi1_msg *val,
+ static void ump_system_to_songpos_ev(const union snd_ump_midi1_msg *val,
+ struct snd_seq_event *ev)
+ {
+- ev->data.control.value = (val->system.parm1 << 7) | val->system.parm2;
++ ev->data.control.value = (val->system.parm2 << 7) | val->system.parm1;
+ }
+
+ /* Encoders for 0xf0 - 0xff */
+@@ -368,6 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
+ struct snd_seq_ump_event ev_cvt;
+ const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump;
+ union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump;
++ struct ump_cvt_to_ump_bank *cc;
+
+ ev_cvt = *event;
+ memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump));
+@@ -387,11 +388,29 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
+ midi2->paf.data = upscale_7_to_32bit(midi1->paf.data);
+ break;
+ case UMP_MSG_STATUS_CC:
++ cc = &dest_port->midi2_bank[midi1->note.channel];
++ switch (midi1->cc.index) {
++ case UMP_CC_BANK_SELECT:
++ cc->bank_set = 1;
++ cc->cc_bank_msb = midi1->cc.data;
++ return 0; // skip
++ case UMP_CC_BANK_SELECT_LSB:
++ cc->bank_set = 1;
++ cc->cc_bank_lsb = midi1->cc.data;
++ return 0; // skip
++ }
+ midi2->cc.index = midi1->cc.index;
+ midi2->cc.data = upscale_7_to_32bit(midi1->cc.data);
+ break;
+ case UMP_MSG_STATUS_PROGRAM:
+ midi2->pg.program = midi1->pg.program;
++ cc = &dest_port->midi2_bank[midi1->note.channel];
++ if (cc->bank_set) {
++ midi2->pg.bank_valid = 1;
++ midi2->pg.bank_msb = cc->cc_bank_msb;
++ midi2->pg.bank_lsb = cc->cc_bank_lsb;
++ cc->bank_set = 0;
++ }
+ break;
+ case UMP_MSG_STATUS_CHANNEL_PRESSURE:
+ midi2->caf.data = upscale_7_to_32bit(midi1->caf.data);
+@@ -419,6 +438,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
+ struct snd_seq_ump_event ev_cvt;
+ union snd_ump_midi1_msg *midi1 = (union snd_ump_midi1_msg *)ev_cvt.ump;
+ const union snd_ump_midi2_msg *midi2 = (const union snd_ump_midi2_msg *)event->ump;
++ int err;
+ u16 v;
+
+ ev_cvt = *event;
+@@ -428,7 +448,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
+ midi1->note.group = midi2->note.group;
+ midi1->note.status = midi2->note.status;
+ midi1->note.channel = midi2->note.channel;
+- switch (midi2->note.status << 4) {
++ switch (midi2->note.status) {
+ case UMP_MSG_STATUS_NOTE_ON:
+ case UMP_MSG_STATUS_NOTE_OFF:
+ midi1->note.note = midi2->note.note;
+@@ -443,6 +463,24 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
+ midi1->cc.data = downscale_32_to_7bit(midi2->cc.data);
+ break;
+ case UMP_MSG_STATUS_PROGRAM:
++ if (midi2->pg.bank_valid) {
++ midi1->cc.status = UMP_MSG_STATUS_CC;
++ midi1->cc.index = UMP_CC_BANK_SELECT;
++ midi1->cc.data = midi2->pg.bank_msb;
++ err = __snd_seq_deliver_single_event(dest, dest_port,
++ (struct snd_seq_event *)&ev_cvt,
++ atomic, hop);
++ if (err < 0)
++ return err;
++ midi1->cc.index = UMP_CC_BANK_SELECT_LSB;
++ midi1->cc.data = midi2->pg.bank_lsb;
++ err = __snd_seq_deliver_single_event(dest, dest_port,
++ (struct snd_seq_event *)&ev_cvt,
++ atomic, hop);
++ if (err < 0)
++ return err;
++ midi1->note.status = midi2->note.status;
++ }
+ midi1->pg.program = midi2->pg.program;
+ break;
+ case UMP_MSG_STATUS_CHANNEL_PRESSURE:
+@@ -691,6 +729,7 @@ static int system_ev_to_ump_midi1(const struct snd_seq_event *event,
+ union snd_ump_midi1_msg *data,
+ unsigned char status)
+ {
++ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
+ data->system.status = status;
+ return 1;
+ }
+@@ -701,6 +740,7 @@ static int system_1p_ev_to_ump_midi1(const struct snd_seq_event *event,
+ union snd_ump_midi1_msg *data,
+ unsigned char status)
+ {
++ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
+ data->system.status = status;
+ data->system.parm1 = event->data.control.value & 0x7f;
+ return 1;
+@@ -712,9 +752,10 @@ static int system_2p_ev_to_ump_midi1(const struct snd_seq_event *event,
+ union snd_ump_midi1_msg *data,
+ unsigned char status)
+ {
++ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
+ data->system.status = status;
+- data->system.parm1 = (event->data.control.value >> 7) & 0x7f;
+- data->system.parm2 = event->data.control.value & 0x7f;
++ data->system.parm1 = event->data.control.value & 0x7f;
++ data->system.parm2 = (event->data.control.value >> 7) & 0x7f;
+ return 1;
+ }
+
+@@ -748,26 +789,45 @@ static int paf_ev_to_ump_midi2(const struct snd_seq_event *event,
+ return 1;
+ }
+
++static void reset_rpn(struct ump_cvt_to_ump_bank *cc)
++{
++ cc->rpn_set = 0;
++ cc->nrpn_set = 0;
++ cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
++ cc->cc_data_msb = cc->cc_data_lsb = 0;
++ cc->cc_data_msb_set = cc->cc_data_lsb_set = 0;
++}
++
+ /* set up the MIDI2 RPN/NRPN packet data from the parsed info */
+-static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
+- union snd_ump_midi2_msg *data)
++static int fill_rpn(struct ump_cvt_to_ump_bank *cc,
++ union snd_ump_midi2_msg *data,
++ unsigned char channel,
++ bool flush)
+ {
++ if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set))
++ return 0; // skip
++ /* when not flushing, wait for complete data set */
++ if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set))
++ return 0; // skip
++
+ if (cc->rpn_set) {
+ data->rpn.status = UMP_MSG_STATUS_RPN;
+ data->rpn.bank = cc->cc_rpn_msb;
+ data->rpn.index = cc->cc_rpn_lsb;
+- cc->rpn_set = 0;
+- cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
+- } else {
++ } else if (cc->nrpn_set) {
+ data->rpn.status = UMP_MSG_STATUS_NRPN;
+ data->rpn.bank = cc->cc_nrpn_msb;
+ data->rpn.index = cc->cc_nrpn_lsb;
+- cc->nrpn_set = 0;
+- cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0;
++ } else {
++ return 0; // skip
+ }
++
+ data->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
+ cc->cc_data_lsb);
+- cc->cc_data_msb = cc->cc_data_lsb = 0;
++ data->rpn.channel = channel;
++
++ reset_rpn(cc);
++ return 1;
+ }
+
+ /* convert CC event to MIDI 2.0 UMP */
+@@ -779,29 +839,39 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
+ unsigned char channel = event->data.control.channel & 0x0f;
+ unsigned char index = event->data.control.param & 0x7f;
+ unsigned char val = event->data.control.value & 0x7f;
+- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
++ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
++ int ret;
+
+ /* process special CC's (bank/rpn/nrpn) */
+ switch (index) {
+ case UMP_CC_RPN_MSB:
++ ret = fill_rpn(cc, data, channel, true);
+ cc->rpn_set = 1;
+ cc->cc_rpn_msb = val;
+- return 0; // skip
++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++ reset_rpn(cc);
++ return ret;
+ case UMP_CC_RPN_LSB:
++ ret = fill_rpn(cc, data, channel, true);
+ cc->rpn_set = 1;
+ cc->cc_rpn_lsb = val;
+- return 0; // skip
++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++ reset_rpn(cc);
++ return ret;
+ case UMP_CC_NRPN_MSB:
++ ret = fill_rpn(cc, data, channel, true);
+ cc->nrpn_set = 1;
+ cc->cc_nrpn_msb = val;
+- return 0; // skip
++ return ret;
+ case UMP_CC_NRPN_LSB:
++ ret = fill_rpn(cc, data, channel, true);
+ cc->nrpn_set = 1;
+ cc->cc_nrpn_lsb = val;
+- return 0; // skip
++ return ret;
+ case UMP_CC_DATA:
++ cc->cc_data_msb_set = 1;
+ cc->cc_data_msb = val;
+- return 0; // skip
++ return fill_rpn(cc, data, channel, false);
+ case UMP_CC_BANK_SELECT:
+ cc->bank_set = 1;
+ cc->cc_bank_msb = val;
+@@ -811,11 +881,9 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
+ cc->cc_bank_lsb = val;
+ return 0; // skip
+ case UMP_CC_DATA_LSB:
++ cc->cc_data_lsb_set = 1;
+ cc->cc_data_lsb = val;
+- if (!(cc->rpn_set || cc->nrpn_set))
+- return 0; // skip
+- fill_rpn(cc, data);
+- return 1;
++ return fill_rpn(cc, data, channel, false);
+ }
+
+ data->cc.status = status;
+@@ -844,7 +912,7 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
+ unsigned char status)
+ {
+ unsigned char channel = event->data.control.channel & 0x0f;
+- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
++ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
+
+ data->pg.status = status;
+ data->pg.channel = channel;
+@@ -854,7 +922,6 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
+ data->pg.bank_msb = cc->cc_bank_msb;
+ data->pg.bank_lsb = cc->cc_bank_lsb;
+ cc->bank_set = 0;
+- cc->cc_bank_msb = cc->cc_bank_lsb = 0;
+ }
+ return 1;
+ }
+@@ -882,8 +949,9 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
+ {
+ unsigned char channel = event->data.control.channel & 0x0f;
+ unsigned char index = event->data.control.param & 0x7f;
+- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
++ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
+ unsigned char msb, lsb;
++ int ret;
+
+ msb = (event->data.control.value >> 7) & 0x7f;
+ lsb = event->data.control.value & 0x7f;
+@@ -897,28 +965,27 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
+ cc->cc_bank_lsb = lsb;
+ return 0; // skip
+ case UMP_CC_RPN_MSB:
+- cc->cc_rpn_msb = msb;
+- fallthrough;
+ case UMP_CC_RPN_LSB:
+- cc->rpn_set = 1;
++ ret = fill_rpn(cc, data, channel, true);
++ cc->cc_rpn_msb = msb;
+ cc->cc_rpn_lsb = lsb;
+- return 0; // skip
++ cc->rpn_set = 1;
++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++ reset_rpn(cc);
++ return ret;
+ case UMP_CC_NRPN_MSB:
+- cc->cc_nrpn_msb = msb;
+- fallthrough;
+ case UMP_CC_NRPN_LSB:
++ ret = fill_rpn(cc, data, channel, true);
++ cc->cc_nrpn_msb = msb;
+ cc->nrpn_set = 1;
+ cc->cc_nrpn_lsb = lsb;
+- return 0; // skip
++ return ret;
+ case UMP_CC_DATA:
+- cc->cc_data_msb = msb;
+- fallthrough;
+ case UMP_CC_DATA_LSB:
++ cc->cc_data_msb_set = cc->cc_data_lsb_set = 1;
++ cc->cc_data_msb = msb;
+ cc->cc_data_lsb = lsb;
+- if (!(cc->rpn_set || cc->nrpn_set))
+- return 0; // skip
+- fill_rpn(cc, data);
+- return 1;
++ return fill_rpn(cc, data, channel, false);
+ }
+
+ data->cc.status = UMP_MSG_STATUS_CC;
+@@ -978,7 +1045,7 @@ static int system_2p_ev_to_ump_midi2(const struct snd_seq_event *event,
+ union snd_ump_midi2_msg *data,
+ unsigned char status)
+ {
+- return system_1p_ev_to_ump_midi1(event, dest_port,
++ return system_2p_ev_to_ump_midi1(event, dest_port,
+ (union snd_ump_midi1_msg *)data,
+ status);
+ }
+@@ -1035,6 +1102,8 @@ static const struct seq_ev_to_ump seq_ev_ump_encoders[] = {
+ system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
+ { SNDRV_SEQ_EVENT_SENSING, UMP_SYSTEM_STATUS_ACTIVE_SENSING,
+ system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
++ { SNDRV_SEQ_EVENT_RESET, UMP_SYSTEM_STATUS_RESET,
++ system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
+ };
+
+ static const struct seq_ev_to_ump *find_ump_encoder(int type)
+@@ -1148,44 +1217,53 @@ static int cvt_sysex_to_ump(struct snd_seq_client *dest,
+ {
+ struct snd_seq_ump_event ev_cvt;
+ unsigned char status;
+- u8 buf[6], *xbuf;
++ u8 buf[8], *xbuf;
+ int offset = 0;
+ int len, err;
++ bool finished = false;
+
+ if (!snd_seq_ev_is_variable(event))
+ return 0;
+
+ setup_ump_event(&ev_cvt, event);
+- for (;;) {
++ while (!finished) {
+ len = snd_seq_expand_var_event_at(event, sizeof(buf), buf, offset);
+ if (len <= 0)
+ break;
+- if (WARN_ON(len > 6))
++ if (WARN_ON(len > sizeof(buf)))
+ break;
+- offset += len;
++
+ xbuf = buf;
++ status = UMP_SYSEX_STATUS_CONTINUE;
++ /* truncate the sysex start-marker */
+ if (*xbuf == UMP_MIDI1_MSG_SYSEX_START) {
+ status = UMP_SYSEX_STATUS_START;
+- xbuf++;
+ len--;
+- if (len > 0 && xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
++ offset++;
++ xbuf++;
++ }
++
++ /* if the last of this packet or the 1st byte of the next packet
++ * is the end-marker, finish the transfer with this packet
++ */
++ if (len > 0 && len < 8 &&
++ xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
++ if (status == UMP_SYSEX_STATUS_START)
+ status = UMP_SYSEX_STATUS_SINGLE;
+- len--;
+- }
+- } else {
+- if (xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
++ else
+ status = UMP_SYSEX_STATUS_END;
+- len--;
+- } else {
+- status = UMP_SYSEX_STATUS_CONTINUE;
+- }
++ len--;
++ finished = true;
+ }
++
++ len = min(len, 6);
+ fill_sysex7_ump(dest_port, ev_cvt.ump, status, xbuf, len);
+ err = __snd_seq_deliver_single_event(dest, dest_port,
+ (struct snd_seq_event *)&ev_cvt,
+ atomic, hop);
+ if (err < 0)
+ return err;
++ offset += len;
+ }
+ return 0;
+ }
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index 1b9260108e4821..1678737f11be79 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -62,6 +62,13 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
+ /*
+ * decode input event and put to read buffer of each opened file
+ */
++
++/* callback for snd_seq_dump_var_event(), bridging to snd_rawmidi_receive() */
++static int dump_to_rawmidi(void *ptr, void *buf, int count)
++{
++ return snd_rawmidi_receive(ptr, buf, count);
++}
++
+ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
+ struct snd_seq_event *ev,
+ bool atomic)
+@@ -80,7 +87,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
+ if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
+ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+ continue;
+- snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
++ snd_seq_dump_var_event(ev, dump_to_rawmidi, vmidi->substream);
+ snd_midi_event_reset_decode(vmidi->parser);
+ } else {
+ len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index e6e551d4a29e01..230babace502d4 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -553,6 +553,16 @@ static int snd_timer_start1(struct snd_timer_instance *timeri,
+ goto unlock;
+ }
+
++ /* check the actual time for the start tick;
++ * bail out as error if it's way too low (< 100us)
++ */
++ if (start && !(timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
++ if ((u64)snd_timer_hw_resolution(timer) * ticks < 100000) {
++ result = -EINVAL;
++ goto unlock;
++ }
++ }
++
+ if (start)
+ timeri->ticks = timeri->cticks = ticks;
+ else if (!timeri->cticks)
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index 3bef1944e955ff..8a7ecec74b5d61 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -685,10 +685,17 @@ static void seq_notify_protocol(struct snd_ump_endpoint *ump)
+ */
+ int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol)
+ {
++ unsigned int type;
++
+ protocol &= ump->info.protocol_caps;
+ if (protocol == ump->info.protocol)
+ return 0;
+
++ type = protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK;
++ if (type != SNDRV_UMP_EP_INFO_PROTO_MIDI1 &&
++ type != SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++ return 0;
++
+ ump->info.protocol = protocol;
+ ump_dbg(ump, "New protocol = %x (caps = %x)\n",
+ protocol, ump->info.protocol_caps);
+@@ -726,6 +733,12 @@ static void fill_fb_info(struct snd_ump_endpoint *ump,
+ info->block_id, info->direction, info->active,
+ info->first_group, info->num_groups, info->midi_ci_version,
+ info->sysex8_streams, info->flags);
++
++ if ((info->flags & SNDRV_UMP_BLOCK_IS_MIDI1) && info->num_groups != 1) {
++ info->num_groups = 1;
++ ump_dbg(ump, "FB %d: corrected groups to 1 for MIDI1\n",
++ info->block_id);
++ }
+ }
+
+ /* check whether the FB info gets updated by the current message */
+@@ -799,6 +812,13 @@ static int ump_handle_fb_name_msg(struct snd_ump_endpoint *ump,
+ if (!fb)
+ return -ENODEV;
+
++ if (ump->parsed &&
++ (ump->info.flags & SNDRV_UMP_EP_INFO_STATIC_BLOCKS)) {
++ ump_dbg(ump, "Skipping static FB name update (blk#%d)\n",
++ fb->info.block_id);
++ return 0;
++ }
++
+ ret = ump_append_string(ump, fb->info.name, sizeof(fb->info.name),
+ buf->raw, 3);
+ /* notify the FB name update to sequencer, too */
+@@ -960,6 +980,14 @@ int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump)
+ if (err < 0)
+ ump_dbg(ump, "Unable to get UMP EP stream config\n");
+
++ /* If no protocol is set by some reason, assume the valid one */
++ if (!(ump->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK)) {
++ if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++ ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI2;
++ else if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI1)
++ ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI1;
++ }
++
+ /* Query and create blocks from Function Blocks */
+ for (blk = 0; blk < ump->info.num_blocks; blk++) {
+ err = create_block_from_fb_info(ump, blk);
+@@ -985,7 +1013,7 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream)
+ struct snd_ump_endpoint *ump = substream->rmidi->private_data;
+ int dir = substream->stream;
+ int group = ump->legacy_mapping[substream->number];
+- int err;
++ int err = 0;
+
+ mutex_lock(&ump->open_mutex);
+ if (ump->legacy_substreams[dir][group]) {
+@@ -1009,7 +1037,7 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream)
+ spin_unlock_irq(&ump->legacy_locks[dir]);
+ unlock:
+ mutex_unlock(&ump->open_mutex);
+- return 0;
++ return err;
+ }
+
+ static int snd_ump_legacy_close(struct snd_rawmidi_substream *substream)
+diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c
+index de04799fdb69aa..0fe13d03165686 100644
+--- a/sound/core/ump_convert.c
++++ b/sound/core/ump_convert.c
+@@ -287,25 +287,42 @@ static int cvt_legacy_system_to_ump(struct ump_cvt_to_ump *cvt,
+ return 4;
+ }
+
+-static void fill_rpn(struct ump_cvt_to_ump_bank *cc,
+- union snd_ump_midi2_msg *midi2)
++static void reset_rpn(struct ump_cvt_to_ump_bank *cc)
+ {
++ cc->rpn_set = 0;
++ cc->nrpn_set = 0;
++ cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
++ cc->cc_data_msb = cc->cc_data_lsb = 0;
++ cc->cc_data_msb_set = cc->cc_data_lsb_set = 0;
++}
++
++static int fill_rpn(struct ump_cvt_to_ump_bank *cc,
++ union snd_ump_midi2_msg *midi2,
++ bool flush)
++{
++ if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set))
++ return 0; // skip
++ /* when not flushing, wait for complete data set */
++ if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set))
++ return 0; // skip
++
+ if (cc->rpn_set) {
+ midi2->rpn.status = UMP_MSG_STATUS_RPN;
+ midi2->rpn.bank = cc->cc_rpn_msb;
+ midi2->rpn.index = cc->cc_rpn_lsb;
+- cc->rpn_set = 0;
+- cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
+- } else {
++ } else if (cc->nrpn_set) {
+ midi2->rpn.status = UMP_MSG_STATUS_NRPN;
+ midi2->rpn.bank = cc->cc_nrpn_msb;
+ midi2->rpn.index = cc->cc_nrpn_lsb;
+- cc->nrpn_set = 0;
+- cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0;
++ } else {
++ return 0; // skip
+ }
++
+ midi2->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
+ cc->cc_data_lsb);
+- cc->cc_data_msb = cc->cc_data_lsb = 0;
++
++ reset_rpn(cc);
++ return 1;
+ }
+
+ /* convert to a MIDI 1.0 Channel Voice message */
+@@ -318,6 +335,7 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
+ struct ump_cvt_to_ump_bank *cc;
+ union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)data;
+ unsigned char status, channel;
++ int ret;
+
+ BUILD_BUG_ON(sizeof(union snd_ump_midi1_msg) != 4);
+ BUILD_BUG_ON(sizeof(union snd_ump_midi2_msg) != 8);
+@@ -358,24 +376,33 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
+ case UMP_MSG_STATUS_CC:
+ switch (buf[1]) {
+ case UMP_CC_RPN_MSB:
++ ret = fill_rpn(cc, midi2, true);
+ cc->rpn_set = 1;
+ cc->cc_rpn_msb = buf[2];
+- return 0; // skip
++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++ reset_rpn(cc);
++ return ret;
+ case UMP_CC_RPN_LSB:
++ ret = fill_rpn(cc, midi2, true);
+ cc->rpn_set = 1;
+ cc->cc_rpn_lsb = buf[2];
+- return 0; // skip
++ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
++ reset_rpn(cc);
++ return ret;
+ case UMP_CC_NRPN_MSB:
++ ret = fill_rpn(cc, midi2, true);
+ cc->nrpn_set = 1;
+ cc->cc_nrpn_msb = buf[2];
+- return 0; // skip
++ return ret;
+ case UMP_CC_NRPN_LSB:
++ ret = fill_rpn(cc, midi2, true);
+ cc->nrpn_set = 1;
+ cc->cc_nrpn_lsb = buf[2];
+- return 0; // skip
++ return ret;
+ case UMP_CC_DATA:
++ cc->cc_data_msb_set = 1;
+ cc->cc_data_msb = buf[2];
+- return 0; // skip
++ return fill_rpn(cc, midi2, false);
+ case UMP_CC_BANK_SELECT:
+ cc->bank_set = 1;
+ cc->cc_bank_msb = buf[2];
+@@ -385,12 +412,9 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
+ cc->cc_bank_lsb = buf[2];
+ return 0; // skip
+ case UMP_CC_DATA_LSB:
++ cc->cc_data_lsb_set = 1;
+ cc->cc_data_lsb = buf[2];
+- if (cc->rpn_set || cc->nrpn_set)
+- fill_rpn(cc, midi2);
+- else
+- return 0; // skip
+- break;
++ return fill_rpn(cc, midi2, false);
+ default:
+ midi2->cc.index = buf[1];
+ midi2->cc.data = upscale_7_to_32bit(buf[2]);
+@@ -404,7 +428,6 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
+ midi2->pg.bank_msb = cc->cc_bank_msb;
+ midi2->pg.bank_lsb = cc->cc_bank_lsb;
+ cc->bank_set = 0;
+- cc->cc_bank_msb = cc->cc_bank_lsb = 0;
+ }
+ break;
+ case UMP_MSG_STATUS_CHANNEL_PRESSURE:
+diff --git a/sound/drivers/pcmtest.c b/sound/drivers/pcmtest.c
+index b59b78a0922409..b8bff5522bce20 100644
+--- a/sound/drivers/pcmtest.c
++++ b/sound/drivers/pcmtest.c
+@@ -397,7 +397,6 @@ static int snd_pcmtst_pcm_close(struct snd_pcm_substream *substream)
+ struct pcmtst_buf_iter *v_iter = substream->runtime->private_data;
+
+ timer_shutdown_sync(&v_iter->timer_instance);
+- v_iter->substream = NULL;
+ playback_capture_test = !v_iter->is_buf_corrupted;
+ kfree(v_iter);
+ return 0;
+@@ -435,6 +434,7 @@ static int snd_pcmtst_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ // We can't call timer_shutdown_sync here, as it is forbidden to sleep here
+ v_iter->suspend = true;
++ timer_delete(&v_iter->timer_instance);
+ break;
+ }
+
+@@ -512,12 +512,22 @@ static int snd_pcmtst_ioctl(struct snd_pcm_substream *substream, unsigned int cm
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+ }
+
++static int snd_pcmtst_sync_stop(struct snd_pcm_substream *substream)
++{
++ struct pcmtst_buf_iter *v_iter = substream->runtime->private_data;
++
++ timer_delete_sync(&v_iter->timer_instance);
++
++ return 0;
++}
++
+ static const struct snd_pcm_ops snd_pcmtst_playback_ops = {
+ .open = snd_pcmtst_pcm_open,
+ .close = snd_pcmtst_pcm_close,
+ .trigger = snd_pcmtst_pcm_trigger,
+ .hw_params = snd_pcmtst_pcm_hw_params,
+ .ioctl = snd_pcmtst_ioctl,
++ .sync_stop = snd_pcmtst_sync_stop,
+ .hw_free = snd_pcmtst_pcm_hw_free,
+ .prepare = snd_pcmtst_pcm_prepare,
+ .pointer = snd_pcmtst_pcm_pointer,
+@@ -530,6 +540,7 @@ static const struct snd_pcm_ops snd_pcmtst_capture_ops = {
+ .hw_params = snd_pcmtst_pcm_hw_params,
+ .hw_free = snd_pcmtst_pcm_hw_free,
+ .ioctl = snd_pcmtst_ioctl,
++ .sync_stop = snd_pcmtst_sync_stop,
+ .prepare = snd_pcmtst_pcm_prepare,
+ .pointer = snd_pcmtst_pcm_pointer,
+ };
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index a13c0b408aadfc..5f0f8d9c08d1e7 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -77,6 +77,8 @@
+ // overrun. Actual device can skip more, then this module stops the packet streaming.
+ #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
+
++static void pcm_period_work(struct work_struct *work);
++
+ /**
+ * amdtp_stream_init - initialize an AMDTP stream structure
+ * @s: the AMDTP stream to initialize
+@@ -105,6 +107,7 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
+ s->flags = flags;
+ s->context = ERR_PTR(-1);
+ mutex_init(&s->mutex);
++ INIT_WORK(&s->period_work, pcm_period_work);
+ s->packet_index = 0;
+
+ init_waitqueue_head(&s->ready_wait);
+@@ -347,6 +350,7 @@ EXPORT_SYMBOL(amdtp_stream_get_max_payload);
+ */
+ void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
+ {
++ cancel_work_sync(&s->period_work);
+ s->pcm_buffer_pointer = 0;
+ s->pcm_period_pointer = 0;
+ }
+@@ -611,19 +615,21 @@ static void update_pcm_pointers(struct amdtp_stream *s,
+ // The program in user process should periodically check the status of intermediate
+ // buffer associated to PCM substream to process PCM frames in the buffer, instead
+ // of receiving notification of period elapsed by poll wait.
+- if (!pcm->runtime->no_period_wakeup) {
+- if (in_softirq()) {
+- // In software IRQ context for 1394 OHCI.
+- snd_pcm_period_elapsed(pcm);
+- } else {
+- // In process context of ALSA PCM application under acquired lock of
+- // PCM substream.
+- snd_pcm_period_elapsed_under_stream_lock(pcm);
+- }
+- }
++ if (!pcm->runtime->no_period_wakeup)
++ queue_work(system_highpri_wq, &s->period_work);
+ }
+ }
+
++static void pcm_period_work(struct work_struct *work)
++{
++ struct amdtp_stream *s = container_of(work, struct amdtp_stream,
++ period_work);
++ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
++
++ if (pcm)
++ snd_pcm_period_elapsed(pcm);
++}
++
+ static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
+ bool sched_irq)
+ {
+@@ -773,10 +779,14 @@ static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
+ } else {
+ unsigned int dbc_interval;
+
+- if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
+- dbc_interval = s->ctx_data.tx.dbc_interval;
+- else
+- dbc_interval = *data_blocks;
++ if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) {
++ if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
++ dbc_interval = s->ctx_data.tx.dbc_interval;
++ else
++ dbc_interval = *data_blocks;
++ } else {
++ dbc_interval = payload_length / sizeof(__be32);
++ }
+
+ lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
+ }
+@@ -951,7 +961,7 @@ static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *des
+ // to the reason.
+ unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
+ IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
+- lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
++ lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0);
+ }
+ if (lost) {
+ dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
+@@ -1848,11 +1858,14 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
+ {
+ struct amdtp_stream *irq_target = d->irq_target;
+
+- // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
+ if (irq_target && amdtp_stream_running(irq_target)) {
+- // In software IRQ context, the call causes dead-lock to disable the tasklet
+- // synchronously.
+- if (!in_softirq())
++ // use wq to prevent AB/BA deadlock competition for
++ // substream lock:
++ // fw_iso_context_flush_completions() acquires
++ // lock by ohci_flush_iso_completions(),
++ // amdtp-stream process_rx_packets() attempts to
++ // acquire same lock by snd_pcm_elapsed()
++ if (current_work() != &s->period_work)
+ fw_iso_context_flush_completions(irq_target->context);
+ }
+
+@@ -1908,6 +1921,7 @@ static void amdtp_stream_stop(struct amdtp_stream *s)
+ return;
+ }
+
++ cancel_work_sync(&s->period_work);
+ fw_iso_context_stop(s->context);
+ fw_iso_context_destroy(s->context);
+ s->context = ERR_PTR(-1);
+diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
+index b7ff44751ab93b..775db3fc4959f5 100644
+--- a/sound/firewire/amdtp-stream.h
++++ b/sound/firewire/amdtp-stream.h
+@@ -37,6 +37,9 @@
+ * the value of current SYT_INTERVAL; e.g. initial value is not zero.
+ * @CIP_UNAWARE_SYT: For outgoing packet, the value in SYT field of CIP is 0xffff.
+ * For incoming packet, the value in SYT field of CIP is not handled.
++ * @CIP_DBC_IS_PAYLOAD_QUADLETS: Available for incoming packet, and only effective with
++ * CIP_DBC_IS_END_EVENT flag. The value of dbc field is the number of accumulated quadlets
++ * in CIP payload, instead of the number of accumulated data blocks.
+ */
+ enum cip_flags {
+ CIP_NONBLOCKING = 0x00,
+@@ -51,6 +54,7 @@ enum cip_flags {
+ CIP_NO_HEADER = 0x100,
+ CIP_UNALIGHED_DBC = 0x200,
+ CIP_UNAWARE_SYT = 0x400,
++ CIP_DBC_IS_PAYLOAD_QUADLETS = 0x800,
+ };
+
+ /**
+@@ -187,6 +191,7 @@ struct amdtp_stream {
+
+ /* For a PCM substream processing. */
+ struct snd_pcm_substream *pcm;
++ struct work_struct period_work;
+ snd_pcm_uframes_t pcm_buffer_pointer;
+ unsigned int pcm_period_pointer;
+ unsigned int pcm_frame_multiplier;
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
+index 2633a4bb1d85db..fe0958f9969c30 100644
+--- a/sound/hda/hdac_stream.c
++++ b/sound/hda/hdac_stream.c
+@@ -354,8 +354,10 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
+ struct hdac_stream *res = NULL;
+
+ /* make a non-zero unique key for the substream */
+- int key = (substream->pcm->device << 16) | (substream->number << 2) |
+- (substream->stream + 1);
++ int key = (substream->number << 2) | (substream->stream + 1);
++
++ if (substream->pcm)
++ key |= (substream->pcm->device << 16);
+
+ spin_lock_irq(&bus->reg_lock);
+ list_for_each_entry(azx_dev, &bus->stream_list, list) {
+@@ -658,17 +660,15 @@ void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
+ struct hdac_stream *s;
+ bool inited = false;
+ u64 cycle_last = 0;
+- int i = 0;
+
+ list_for_each_entry(s, &bus->stream_list, list) {
+- if (streams & (1 << i)) {
++ if ((streams & (1 << s->index))) {
+ azx_timecounter_init(s, inited, cycle_last);
+ if (!inited) {
+ inited = true;
+ cycle_last = s->tc.cycle_last;
+ }
+ }
+- i++;
+ }
+
+ snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
+@@ -713,14 +713,13 @@ void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
+ unsigned int streams)
+ {
+ struct hdac_bus *bus = azx_dev->bus;
+- int i, nwait, timeout;
++ int nwait, timeout;
+ struct hdac_stream *s;
+
+ for (timeout = 5000; timeout; timeout--) {
+ nwait = 0;
+- i = 0;
+ list_for_each_entry(s, &bus->stream_list, list) {
+- if (!(streams & (1 << i++)))
++ if (!(streams & (1 << s->index)))
+ continue;
+
+ if (start) {
+diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
+index 5d8e1d944b0afb..7b276047f85a7d 100644
+--- a/sound/hda/hdmi_chmap.c
++++ b/sound/hda/hdmi_chmap.c
+@@ -753,6 +753,20 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+ return 0;
+ }
+
++/* a simple sanity check for input values to chmap kcontrol */
++static int chmap_value_check(struct hdac_chmap *hchmap,
++ const struct snd_ctl_elem_value *ucontrol)
++{
++ int i;
++
++ for (i = 0; i < hchmap->channels_max; i++) {
++ if (ucontrol->value.integer.value[i] < 0 ||
++ ucontrol->value.integer.value[i] > SNDRV_CHMAP_LAST)
++ return -EINVAL;
++ }
++ return 0;
++}
++
+ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -764,6 +778,10 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ unsigned char chmap[8], per_pin_chmap[8];
+ int i, err, ca, prepared = 0;
+
++ err = chmap_value_check(hchmap, ucontrol);
++ if (err < 0)
++ return err;
++
+ /* No monitor is connected in dyn_pcm_assign.
+ * It's invalid to setup the chmap
+ */
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index 24a948baf1bc05..e7c2ef6c6b4cb0 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -16,7 +16,7 @@
+ static int dsp_driver;
+
+ module_param(dsp_driver, int, 0444);
+-MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
++MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF, 4=AVS)");
+
+ #define FLAG_SST BIT(0)
+ #define FLAG_SOF BIT(1)
+@@ -336,6 +336,12 @@ static const struct config_entry config_table[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
++ {
++ .ident = "Google firmware",
++ .matches = {
++ DMI_MATCH(DMI_BIOS_VERSION, "Google"),
++ }
++ },
+ {}
+ }
+ },
+@@ -515,6 +521,16 @@ static const struct config_entry config_table[] = {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = PCI_DEVICE_ID_INTEL_HDA_MTL,
+ },
++ /* ArrowLake-S */
++ {
++ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++ .device = PCI_DEVICE_ID_INTEL_HDA_ARL_S,
++ },
++ /* ArrowLake */
++ {
++ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++ .device = PCI_DEVICE_ID_INTEL_HDA_ARL,
++ },
+ #endif
+
+ /* Lunar Lake */
+@@ -541,9 +557,32 @@ static const struct config_entry *snd_intel_dsp_find_config
+ if (table->codec_hid) {
+ int i;
+
+- for (i = 0; i < table->codec_hid->num_codecs; i++)
+- if (acpi_dev_present(table->codec_hid->codecs[i], NULL, -1))
++ for (i = 0; i < table->codec_hid->num_codecs; i++) {
++ struct nhlt_acpi_table *nhlt;
++ bool ssp_found = false;
++
++ if (!acpi_dev_present(table->codec_hid->codecs[i], NULL, -1))
++ continue;
++
++ nhlt = intel_nhlt_init(&pci->dev);
++ if (!nhlt) {
++ dev_warn(&pci->dev, "%s: NHLT table not found, skipped HID %s\n",
++ __func__, table->codec_hid->codecs[i]);
++ continue;
++ }
++
++ if (intel_nhlt_has_endpoint_type(nhlt, NHLT_LINK_SSP) &&
++ intel_nhlt_ssp_endpoint_mask(nhlt, NHLT_DEVICE_I2S))
++ ssp_found = true;
++
++ intel_nhlt_free(nhlt);
++
++ if (ssp_found)
+ break;
++
++ dev_warn(&pci->dev, "%s: no valid SSP found for HID %s, skipped\n",
++ __func__, table->codec_hid->codecs[i]);
++ }
+ if (i == table->codec_hid->num_codecs)
+ continue;
+ }
+diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
+index 2c4dfc0b7e342c..696a958d93e9c3 100644
+--- a/sound/hda/intel-nhlt.c
++++ b/sound/hda/intel-nhlt.c
+@@ -238,7 +238,7 @@ EXPORT_SYMBOL(intel_nhlt_ssp_mclk_mask);
+
+ static struct nhlt_specific_cfg *
+ nhlt_get_specific_cfg(struct device *dev, struct nhlt_fmt *fmt, u8 num_ch,
+- u32 rate, u8 vbps, u8 bps)
++ u32 rate, u8 vbps, u8 bps, bool ignore_vbps)
+ {
+ struct nhlt_fmt_cfg *cfg = fmt->fmt_config;
+ struct wav_fmt *wfmt;
+@@ -255,8 +255,12 @@ nhlt_get_specific_cfg(struct device *dev, struct nhlt_fmt *fmt, u8 num_ch,
+ dev_dbg(dev, "Endpoint format: ch=%d fmt=%d/%d rate=%d\n",
+ wfmt->channels, _vbps, _bps, wfmt->samples_per_sec);
+
++ /*
++ * When looking for exact match of configuration ignore the vbps
++ * from NHLT table when ignore_vbps is true
++ */
+ if (wfmt->channels == num_ch && wfmt->samples_per_sec == rate &&
+- vbps == _vbps && bps == _bps)
++ (ignore_vbps || vbps == _vbps) && bps == _bps)
+ return &cfg->config;
+
+ cfg = (struct nhlt_fmt_cfg *)(cfg->config.caps + cfg->config.size);
+@@ -289,6 +293,7 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ {
+ struct nhlt_specific_cfg *cfg;
+ struct nhlt_endpoint *epnt;
++ bool ignore_vbps = false;
+ struct nhlt_fmt *fmt;
+ int i;
+
+@@ -298,7 +303,26 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ dev_dbg(dev, "Looking for configuration:\n");
+ dev_dbg(dev, " vbus_id=%d link_type=%d dir=%d, dev_type=%d\n",
+ bus_id, link_type, dir, dev_type);
+- dev_dbg(dev, " ch=%d fmt=%d/%d rate=%d\n", num_ch, vbps, bps, rate);
++ if (link_type == NHLT_LINK_DMIC && bps == 32 && (vbps == 24 || vbps == 32)) {
++ /*
++ * The DMIC hardware supports only one type of 32 bits sample
++ * size, which is 24 bit sampling on the MSB side and bits[1:0]
++ * are used for indicating the channel number.
++ * It has been observed that some NHLT tables have the vbps
++ * specified as 32 while some uses 24.
++ * The format these variations describe are identical, the
++ * hardware is configured and behaves the same way.
++ * Note: when the samples assumed to be vbps=32 then the 'noise'
++ * introduced by the lower two bits (channel number) have no
++ * real life implication on audio quality.
++ */
++ dev_dbg(dev,
++ " ch=%d fmt=%d rate=%d (vbps is ignored for DMIC 32bit format)\n",
++ num_ch, bps, rate);
++ ignore_vbps = true;
++ } else {
++ dev_dbg(dev, " ch=%d fmt=%d/%d rate=%d\n", num_ch, vbps, bps, rate);
++ }
+ dev_dbg(dev, "Endpoint count=%d\n", nhlt->endpoint_count);
+
+ epnt = (struct nhlt_endpoint *)nhlt->desc;
+@@ -307,7 +331,8 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ if (nhlt_check_ep_match(dev, epnt, bus_id, link_type, dir, dev_type)) {
+ fmt = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size);
+
+- cfg = nhlt_get_specific_cfg(dev, fmt, num_ch, rate, vbps, bps);
++ cfg = nhlt_get_specific_cfg(dev, fmt, num_ch, rate,
++ vbps, bps, ignore_vbps);
+ if (cfg)
+ return cfg;
+ }
+diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
+index b57d72ea4503fa..4e376994bf78b9 100644
+--- a/sound/hda/intel-sdw-acpi.c
++++ b/sound/hda/intel-sdw-acpi.c
+@@ -41,6 +41,8 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx)
+ "intel-quirk-mask",
+ &quirk_mask);
+
++ fwnode_handle_put(link);
++
+ if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
+ return false;
+
+diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c
+index 850544725da796..d55c3dc229c0e8 100644
+--- a/sound/isa/gus/gus_pcm.c
++++ b/sound/isa/gus/gus_pcm.c
+@@ -378,7 +378,7 @@ static int snd_gf1_pcm_playback_copy(struct snd_pcm_substream *substream,
+
+ bpos = get_bpos(pcmp, voice, pos, len);
+ if (bpos < 0)
+- return pos;
++ return bpos;
+ if (copy_from_iter(runtime->dma_area + bpos, len, src) != len)
+ return -EFAULT;
+ return playback_copy_ack(substream, bpos, len);
+@@ -395,7 +395,7 @@ static int snd_gf1_pcm_playback_silence(struct snd_pcm_substream *substream,
+
+ bpos = get_bpos(pcmp, voice, pos, len);
+ if (bpos < 0)
+- return pos;
++ return bpos;
+ snd_pcm_format_set_silence(runtime->format, runtime->dma_area + bpos,
+ bytes_to_samples(runtime, count));
+ return playback_copy_ack(substream, bpos, len);
+diff --git a/sound/pci/asihpi/hpimsgx.c b/sound/pci/asihpi/hpimsgx.c
+index d0caef2994818e..b68e6bfbbfbab5 100644
+--- a/sound/pci/asihpi/hpimsgx.c
++++ b/sound/pci/asihpi/hpimsgx.c
+@@ -708,7 +708,7 @@ static u16 HPIMSGX__init(struct hpi_message *phm,
+ phr->error = HPI_ERROR_PROCESSING_MESSAGE;
+ return phr->error;
+ }
+- if (hr.error == 0) {
++ if (hr.error == 0 && hr.u.s.adapter_index < HPI_MAX_ADAPTERS) {
+ /* the adapter was created successfully
+ save the mapping for future use */
+ hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
+diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
+index fe72e7d7724127..dadeda7758ceeb 100644
+--- a/sound/pci/emu10k1/emu10k1.c
++++ b/sound/pci/emu10k1/emu10k1.c
+@@ -189,8 +189,7 @@ static int snd_emu10k1_suspend(struct device *dev)
+
+ emu->suspend = 1;
+
+- cancel_work_sync(&emu->emu1010.firmware_work);
+- cancel_work_sync(&emu->emu1010.clock_work);
++ cancel_work_sync(&emu->emu1010.work);
+
+ snd_ac97_suspend(emu->ac97);
+
+diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
+index d36234b88fb421..941bfbf812ed30 100644
+--- a/sound/pci/emu10k1/emu10k1_callback.c
++++ b/sound/pci/emu10k1/emu10k1_callback.c
+@@ -255,7 +255,7 @@ lookup_voices(struct snd_emux *emu, struct snd_emu10k1 *hw,
+ /* check if sample is finished playing (non-looping only) */
+ if (bp != best + V_OFF && bp != best + V_FREE &&
+ (vp->reg.sample_mode & SNDRV_SFNT_SAMPLE_SINGLESHOT)) {
+- val = snd_emu10k1_ptr_read(hw, CCCA_CURRADDR, vp->ch) - 64;
++ val = snd_emu10k1_ptr_read(hw, CCCA_CURRADDR, vp->ch);
+ if (val >= vp->reg.loopstart)
+ bp = best + V_OFF;
+ }
+@@ -362,7 +362,7 @@ start_voice(struct snd_emux_voice *vp)
+
+ map = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
+
+- addr = vp->reg.start + 64;
++ addr = vp->reg.start;
+ temp = vp->reg.parm.filterQ;
+ ccca = (temp << 28) | addr;
+ if (vp->apitch < 0xe400)
+@@ -430,9 +430,6 @@ start_voice(struct snd_emux_voice *vp)
+ /* Q & current address (Q 4bit value, MSB) */
+ CCCA, ccca,
+
+- /* cache */
+- CCR, REG_VAL_PUT(CCR_CACHEINVALIDSIZE, 64),
+-
+ /* reset volume */
+ VTFT, vtarget | vp->ftarget,
+ CVCF, vtarget | CVCF_CURRENTFILTER_MASK,
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index de5c41e578e1f4..ade90c7ecd922b 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -732,69 +732,67 @@ static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, int dock,
+ return snd_emu1010_load_firmware_entry(emu, *fw);
+ }
+
+-static void emu1010_firmware_work(struct work_struct *work)
++static void snd_emu1010_load_dock_firmware(struct snd_emu10k1 *emu)
+ {
+- struct snd_emu10k1 *emu;
+- u32 tmp, tmp2, reg;
++ u32 tmp, tmp2;
+ int err;
+
+- emu = container_of(work, struct snd_emu10k1,
+- emu1010.firmware_work);
+- if (emu->card->shutdown)
++ // The docking events clearly arrive prematurely - while the
++ // Dock's FPGA seems to be successfully programmed, the Dock
++ // fails to initialize subsequently if we don't give it some
++ // time to "warm up" here.
++ msleep(200);
++
++ dev_info(emu->card->dev, "emu1010: Loading Audio Dock Firmware\n");
++ /* Return to Audio Dock programming mode */
++ snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG,
++ EMU_HANA_FPGA_CONFIG_AUDIODOCK);
++ err = snd_emu1010_load_firmware(emu, 1, &emu->dock_fw);
++ if (err < 0)
+ return;
+-#ifdef CONFIG_PM_SLEEP
+- if (emu->suspend)
++ snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
++
++ snd_emu1010_fpga_read(emu, EMU_HANA_ID, &tmp);
++ dev_dbg(emu->card->dev, "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", tmp);
++ if ((tmp & 0x1f) != 0x15) {
++ /* FPGA failed to be programmed */
++ dev_err(emu->card->dev,
++ "emu1010: Loading Audio Dock Firmware failed, reg = 0x%x\n",
++ tmp);
+ return;
+-#endif
++ }
++ dev_info(emu->card->dev, "emu1010: Audio Dock Firmware loaded\n");
++
++ snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
++ snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
++ dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n", tmp, tmp2);
++
++ /* Allow DLL to settle, to sync clocking between 1010 and Dock */
++ msleep(10);
++}
++
++static void emu1010_dock_event(struct snd_emu10k1 *emu)
++{
++ u32 reg;
++
+ snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); /* OPTIONS: Which cards are attached to the EMU */
+ if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) {
+ /* Audio Dock attached */
+- /* Return to Audio Dock programming mode */
+- dev_info(emu->card->dev,
+- "emu1010: Loading Audio Dock Firmware\n");
+- snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG,
+- EMU_HANA_FPGA_CONFIG_AUDIODOCK);
+- err = snd_emu1010_load_firmware(emu, 1, &emu->dock_fw);
+- if (err < 0)
+- return;
+- snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
+- snd_emu1010_fpga_read(emu, EMU_HANA_ID, &tmp);
+- dev_info(emu->card->dev,
+- "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", tmp);
+- if ((tmp & 0x1f) != 0x15) {
+- /* FPGA failed to be programmed */
+- dev_info(emu->card->dev,
+- "emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n",
+- tmp);
+- return;
+- }
+- dev_info(emu->card->dev,
+- "emu1010: Audio Dock Firmware loaded\n");
+- snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
+- snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
+- dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n", tmp, tmp2);
+- /* Sync clocking between 1010 and Dock */
+- /* Allow DLL to settle */
+- msleep(10);
++ snd_emu1010_load_dock_firmware(emu);
+ /* Unmute all. Default is muted after a firmware load */
+ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
++ } else if (!(reg & EMU_HANA_OPTION_DOCK_ONLINE)) {
++ /* Audio Dock removed */
++ dev_info(emu->card->dev, "emu1010: Audio Dock detached\n");
++ /* The hardware auto-mutes all, so we unmute again */
++ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
+ }
+ }
+
+-static void emu1010_clock_work(struct work_struct *work)
++static void emu1010_clock_event(struct snd_emu10k1 *emu)
+ {
+- struct snd_emu10k1 *emu;
+ struct snd_ctl_elem_id id;
+
+- emu = container_of(work, struct snd_emu10k1,
+- emu1010.clock_work);
+- if (emu->card->shutdown)
+- return;
+-#ifdef CONFIG_PM_SLEEP
+- if (emu->suspend)
+- return;
+-#endif
+-
+ spin_lock_irq(&emu->reg_lock);
+ // This is the only thing that can actually happen.
+ emu->emu1010.clock_source = emu->emu1010.clock_fallback;
+@@ -805,21 +803,40 @@ static void emu1010_clock_work(struct work_struct *work)
+ snd_ctl_notify(emu->card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
+ }
+
+-static void emu1010_interrupt(struct snd_emu10k1 *emu)
++static void emu1010_work(struct work_struct *work)
+ {
++ struct snd_emu10k1 *emu;
+ u32 sts;
+
++ emu = container_of(work, struct snd_emu10k1, emu1010.work);
++ if (emu->card->shutdown)
++ return;
++#ifdef CONFIG_PM_SLEEP
++ if (emu->suspend)
++ return;
++#endif
++
+ snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &sts);
+- if (sts & EMU_HANA_IRQ_DOCK_LOST) {
+- /* Audio Dock removed */
+- dev_info(emu->card->dev, "emu1010: Audio Dock detached\n");
+- /* The hardware auto-mutes all, so we unmute again */
+- snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
+- } else if (sts & EMU_HANA_IRQ_DOCK) {
+- schedule_work(&emu->emu1010.firmware_work);
+- }
++
++ // The distinction of the IRQ status bits is unreliable,
++ // so we dispatch later based on option card status.
++ if (sts & (EMU_HANA_IRQ_DOCK | EMU_HANA_IRQ_DOCK_LOST))
++ emu1010_dock_event(emu);
++
+ if (sts & EMU_HANA_IRQ_WCLK_CHANGED)
+- schedule_work(&emu->emu1010.clock_work);
++ emu1010_clock_event(emu);
++}
++
++static void emu1010_interrupt(struct snd_emu10k1 *emu)
++{
++ // We get an interrupt on each GPIO input pin change, but we
++ // care only about the ones triggered by the dedicated pin.
++ u16 sts = inw(emu->port + A_GPIO);
++ u16 bit = emu->card_capabilities->ca0108_chip ? 0x2000 : 0x8000;
++ if (!(sts & bit))
++ return;
++
++ schedule_work(&emu->emu1010.work);
+ }
+
+ /*
+@@ -889,7 +906,7 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
+ snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg);
+ dev_info(emu->card->dev, "emu1010: Card options = 0x%x\n", reg);
+ if (reg & EMU_HANA_OPTION_DOCK_OFFLINE)
+- schedule_work(&emu->emu1010.firmware_work);
++ snd_emu1010_load_dock_firmware(emu);
+ if (emu->card_capabilities->no_adat) {
+ emu->emu1010.optical_in = 0; /* IN_SPDIF */
+ emu->emu1010.optical_out = 0; /* OUT_SPDIF */
+@@ -960,8 +977,7 @@ static void snd_emu10k1_free(struct snd_card *card)
+ /* Disable 48Volt power to Audio Dock */
+ snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0);
+ }
+- cancel_work_sync(&emu->emu1010.firmware_work);
+- cancel_work_sync(&emu->emu1010.clock_work);
++ cancel_work_sync(&emu->emu1010.work);
+ release_firmware(emu->firmware);
+ release_firmware(emu->dock_fw);
+ snd_util_memhdr_free(emu->memhdr);
+@@ -1540,8 +1556,7 @@ int snd_emu10k1_create(struct snd_card *card,
+ emu->irq = -1;
+ emu->synth = NULL;
+ emu->get_synth_voice = NULL;
+- INIT_WORK(&emu->emu1010.firmware_work, emu1010_firmware_work);
+- INIT_WORK(&emu->emu1010.clock_work, emu1010_clock_work);
++ INIT_WORK(&emu->emu1010.work, emu1010_work);
+ /* read revision & serial */
+ emu->revision = pci->revision;
+ pci_read_config_dword(pci, PCI_SUBSYSTEM_VENDOR_ID, &emu->serial);
+diff --git a/sound/pci/emu10k1/io.c b/sound/pci/emu10k1/io.c
+index 74df2330015f66..5cb8acf5b158c6 100644
+--- a/sound/pci/emu10k1/io.c
++++ b/sound/pci/emu10k1/io.c
+@@ -285,6 +285,7 @@ static void snd_emu1010_fpga_write_locked(struct snd_emu10k1 *emu, u32 reg, u32
+ outw(value, emu->port + A_GPIO);
+ udelay(10);
+ outw(value | 0x80 , emu->port + A_GPIO); /* High bit clocks the value into the fpga. */
++ udelay(10);
+ }
+
+ void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value)
+diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
+index 0d7502d6e06049..9698ebe3fbc2e7 100644
+--- a/sound/pci/hda/Kconfig
++++ b/sound/pci/hda/Kconfig
+@@ -140,7 +140,8 @@ config SND_HDA_SCODEC_CS35L56_I2C
+ depends on I2C
+ depends on ACPI || COMPILE_TEST
+ depends on SND_SOC
+- select CS_DSP
++ select FW_CS_DSP
++ imply SERIAL_MULTI_INSTANTIATE
+ select SND_HDA_GENERIC
+ select SND_SOC_CS35L56_SHARED
+ select SND_HDA_SCODEC_CS35L56
+@@ -154,7 +155,8 @@ config SND_HDA_SCODEC_CS35L56_SPI
+ depends on SPI_MASTER
+ depends on ACPI || COMPILE_TEST
+ depends on SND_SOC
+- select CS_DSP
++ select FW_CS_DSP
++ imply SERIAL_MULTI_INSTANTIATE
+ select SND_HDA_GENERIC
+ select SND_SOC_CS35L56_SHARED
+ select SND_HDA_SCODEC_CS35L56
+diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
+index c6031f74409964..b437beae9b5164 100644
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -570,7 +570,7 @@ static void cs35l41_hda_play_done(struct device *dev)
+
+ dev_dbg(dev, "Play (Complete)\n");
+
+- cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1, NULL,
++ cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1,
+ cs35l41->firmware_running);
+ if (cs35l41->firmware_running) {
+ regmap_multi_reg_write(reg, cs35l41_hda_unmute_dsp,
+@@ -589,7 +589,7 @@ static void cs35l41_hda_pause_start(struct device *dev)
+ dev_dbg(dev, "Pause (Start)\n");
+
+ regmap_multi_reg_write(reg, cs35l41_hda_mute, ARRAY_SIZE(cs35l41_hda_mute));
+- cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0, NULL,
++ cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0,
+ cs35l41->firmware_running);
+ }
+
+@@ -1187,7 +1187,7 @@ static void cs35l41_hda_unbind(struct device *dev, struct device *master, void *
+ if (comps[cs35l41->index].dev == dev) {
+ memset(&comps[cs35l41->index], 0, sizeof(*comps));
+ sleep_flags = lock_system_sleep();
+- device_link_remove(&comps->codec->core.dev, cs35l41->dev);
++ device_link_remove(&cs35l41->codec->core.dev, cs35l41->dev);
+ unlock_system_sleep(sleep_flags);
+ }
+ }
+@@ -1668,8 +1668,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ ret = component_add(cs35l41->dev, &cs35l41_hda_comp_ops);
+ if (ret) {
+ dev_err(cs35l41->dev, "Register component failed: %d\n", ret);
+- pm_runtime_disable(cs35l41->dev);
+- goto err;
++ goto err_pm;
+ }
+
+ dev_info(cs35l41->dev, "Cirrus Logic CS35L41 (%x), Revision: %02X\n", regid, reg_revid);
+@@ -1677,6 +1676,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
+ return 0;
+
+ err_pm:
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+ pm_runtime_put_noidle(cs35l41->dev);
+
+@@ -1695,6 +1695,7 @@ void cs35l41_hda_remove(struct device *dev)
+ struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(cs35l41->dev);
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+
+ if (cs35l41->halo_initialized)
+diff --git a/sound/pci/hda/cs35l41_hda_spi.c b/sound/pci/hda/cs35l41_hda_spi.c
+index eb287aa5f78250..d95954ce55d81b 100644
+--- a/sound/pci/hda/cs35l41_hda_spi.c
++++ b/sound/pci/hda/cs35l41_hda_spi.c
+@@ -38,6 +38,7 @@ static const struct spi_device_id cs35l41_hda_spi_id[] = {
+ { "cs35l41-hda", 0 },
+ {}
+ };
++MODULE_DEVICE_TABLE(spi, cs35l41_hda_spi_id);
+
+ static const struct acpi_device_id cs35l41_acpi_hda_match[] = {
+ { "CSC3551", 0 },
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index 7adc1d373d65c3..b84f3b3eb1409e 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -29,14 +29,23 @@
+ * ASP1_RX_WL = 24 bits per sample
+ * ASP1_TX_WL = 24 bits per sample
+ * ASP1_RXn_EN 1..3 and ASP1_TXn_EN 1..4 disabled
++ *
++ * Override any Windows-specific mixer settings applied by the firmware.
+ */
+ static const struct reg_sequence cs35l56_hda_dai_config[] = {
+ { CS35L56_ASP1_CONTROL1, 0x00000021 },
+ { CS35L56_ASP1_CONTROL2, 0x20200200 },
+ { CS35L56_ASP1_CONTROL3, 0x00000003 },
++ { CS35L56_ASP1_FRAME_CONTROL1, 0x03020100 },
++ { CS35L56_ASP1_FRAME_CONTROL5, 0x00020100 },
+ { CS35L56_ASP1_DATA_CONTROL5, 0x00000018 },
+ { CS35L56_ASP1_DATA_CONTROL1, 0x00000018 },
+ { CS35L56_ASP1_ENABLES1, 0x00000000 },
++ { CS35L56_ASP1TX1_INPUT, 0x00000018 },
++ { CS35L56_ASP1TX2_INPUT, 0x00000019 },
++ { CS35L56_ASP1TX3_INPUT, 0x00000020 },
++ { CS35L56_ASP1TX4_INPUT, 0x00000028 },
++
+ };
+
+ static void cs35l56_hda_play(struct cs35l56_hda *cs35l56)
+@@ -132,6 +141,10 @@ static int cs35l56_hda_runtime_resume(struct device *dev)
+ }
+ }
+
++ ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
++ if (ret)
++ goto err;
++
+ return 0;
+
+ err:
+@@ -603,6 +616,8 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
+ ret = cs35l56_wait_for_firmware_boot(&cs35l56->base);
+ if (ret)
+ goto err_powered_up;
++
++ regcache_cache_only(cs35l56->base.regmap, false);
+ }
+
+ /* Disable auto-hibernate so that runtime_pm has control */
+@@ -684,11 +699,11 @@ static void cs35l56_hda_unbind(struct device *dev, struct device *master, void *
+ if (cs35l56->base.fw_patched)
+ cs_dsp_power_down(&cs35l56->cs_dsp);
+
+- cs_dsp_remove(&cs35l56->cs_dsp);
+-
+ if (comps[cs35l56->index].dev == dev)
+ memset(&comps[cs35l56->index], 0, sizeof(*comps));
+
++ cs35l56->codec = NULL;
++
+ dev_dbg(cs35l56->base.dev, "Unbound\n");
+ }
+
+@@ -794,6 +809,9 @@ static int cs35l56_hda_system_resume(struct device *dev)
+
+ cs35l56->suspended = false;
+
++ if (!cs35l56->codec)
++ return 0;
++
+ ret = cs35l56_is_fw_reload_needed(&cs35l56->base);
+ dev_dbg(cs35l56->base.dev, "fw_reload_needed: %d\n", ret);
+ if (ret > 0) {
+@@ -942,6 +960,8 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int id)
+ if (ret)
+ goto err;
+
++ regcache_cache_only(cs35l56->base.regmap, false);
++
+ ret = cs35l56_set_patch(&cs35l56->base);
+ if (ret)
+ goto err;
+@@ -965,6 +985,9 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int id)
+
+ regmap_multi_reg_write(cs35l56->base.regmap, cs35l56_hda_dai_config,
+ ARRAY_SIZE(cs35l56_hda_dai_config));
++ ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
++ if (ret)
++ goto dsp_err;
+
+ /*
+ * By default only enable one ASP1TXn, where n=amplifier index,
+@@ -978,18 +1001,20 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int id)
+ pm_runtime_mark_last_busy(cs35l56->base.dev);
+ pm_runtime_enable(cs35l56->base.dev);
+
++ cs35l56->base.init_done = true;
++
+ ret = component_add(cs35l56->base.dev, &cs35l56_hda_comp_ops);
+ if (ret) {
+ dev_err(cs35l56->base.dev, "Register component failed: %d\n", ret);
+ goto pm_err;
+ }
+
+- cs35l56->base.init_done = true;
+-
+ return 0;
+
+ pm_err:
+ pm_runtime_disable(cs35l56->base.dev);
++dsp_err:
++ cs_dsp_remove(&cs35l56->cs_dsp);
+ err:
+ gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
+
+@@ -1001,11 +1026,13 @@ void cs35l56_hda_remove(struct device *dev)
+ {
+ struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev);
+
++ component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
++
+ pm_runtime_dont_use_autosuspend(cs35l56->base.dev);
+ pm_runtime_get_sync(cs35l56->base.dev);
+ pm_runtime_disable(cs35l56->base.dev);
+
+- component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
++ cs_dsp_remove(&cs35l56->cs_dsp);
+
+ kfree(cs35l56->system_name);
+ pm_runtime_put_noidle(cs35l56->base.dev);
+diff --git a/sound/pci/hda/cs35l56_hda_i2c.c b/sound/pci/hda/cs35l56_hda_i2c.c
+index 757a4d193e0fb5..c31f60b0421e54 100644
+--- a/sound/pci/hda/cs35l56_hda_i2c.c
++++ b/sound/pci/hda/cs35l56_hda_i2c.c
+@@ -49,10 +49,19 @@ static const struct i2c_device_id cs35l56_hda_i2c_id[] = {
+ {}
+ };
+
++static const struct acpi_device_id cs35l56_acpi_hda_match[] = {
++ { "CSC3554", 0 },
++ { "CSC3556", 0 },
++ { "CSC3557", 0 },
++ {}
++};
++MODULE_DEVICE_TABLE(acpi, cs35l56_acpi_hda_match);
++
+ static struct i2c_driver cs35l56_hda_i2c_driver = {
+ .driver = {
+- .name = "cs35l56-hda",
+- .pm = &cs35l56_hda_pm_ops,
++ .name = "cs35l56-hda",
++ .acpi_match_table = cs35l56_acpi_hda_match,
++ .pm = &cs35l56_hda_pm_ops,
+ },
+ .id_table = cs35l56_hda_i2c_id,
+ .probe = cs35l56_hda_i2c_probe,
+diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
+index 756aec342eab7f..52c9e04b3c55fa 100644
+--- a/sound/pci/hda/cs35l56_hda_spi.c
++++ b/sound/pci/hda/cs35l56_hda_spi.c
+@@ -49,10 +49,19 @@ static const struct spi_device_id cs35l56_hda_spi_id[] = {
+ {}
+ };
+
++static const struct acpi_device_id cs35l56_acpi_hda_match[] = {
++ { "CSC3554", 0 },
++ { "CSC3556", 0 },
++ { "CSC3557", 0 },
++ {}
++};
++MODULE_DEVICE_TABLE(acpi, cs35l56_acpi_hda_match);
++
+ static struct spi_driver cs35l56_hda_spi_driver = {
+ .driver = {
+- .name = "cs35l56-hda",
+- .pm = &cs35l56_hda_pm_ops,
++ .name = "cs35l56-hda",
++ .acpi_match_table = cs35l56_acpi_hda_match,
++ .pm = &cs35l56_hda_pm_ops,
+ },
+ .id_table = cs35l56_hda_spi_id,
+ .probe = cs35l56_hda_spi_probe,
+diff --git a/sound/pci/hda/hda_cs_dsp_ctl.c b/sound/pci/hda/hda_cs_dsp_ctl.c
+index 463ca06036bfe7..9db45d7c17e5f2 100644
+--- a/sound/pci/hda/hda_cs_dsp_ctl.c
++++ b/sound/pci/hda/hda_cs_dsp_ctl.c
+@@ -8,6 +8,7 @@
+
+ #include <linux/module.h>
+ #include <sound/soc.h>
++#include <linux/cleanup.h>
+ #include <linux/firmware/cirrus/cs_dsp.h>
+ #include <linux/firmware/cirrus/wmfw.h>
+ #include "hda_cs_dsp_ctl.h"
+@@ -97,11 +98,23 @@ static unsigned int wmfw_convert_flags(unsigned int in)
+ return out;
+ }
+
+-static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char *name)
++static void hda_cs_dsp_free_kcontrol(struct snd_kcontrol *kctl)
+ {
++ struct hda_cs_dsp_coeff_ctl *ctl = (struct hda_cs_dsp_coeff_ctl *)snd_kcontrol_chip(kctl);
+ struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
++
++ /* NULL priv to prevent a double-free in hda_cs_dsp_control_remove() */
++ cs_ctl->priv = NULL;
++ kfree(ctl);
++}
++
++static void hda_cs_dsp_add_kcontrol(struct cs_dsp_coeff_ctl *cs_ctl,
++ const struct hda_cs_dsp_ctl_info *info,
++ const char *name)
++{
+ struct snd_kcontrol_new kcontrol = {0};
+ struct snd_kcontrol *kctl;
++ struct hda_cs_dsp_coeff_ctl *ctl __free(kfree) = NULL;
+ int ret = 0;
+
+ if (cs_ctl->len > ADSP_MAX_STD_CTRL_SIZE) {
+@@ -110,6 +123,13 @@ static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
+ return;
+ }
+
++ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
++ if (!ctl)
++ return;
++
++ ctl->cs_ctl = cs_ctl;
++ ctl->card = info->card;
++
+ kcontrol.name = name;
+ kcontrol.info = hda_cs_dsp_coeff_info;
+ kcontrol.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+@@ -117,20 +137,22 @@ static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
+ kcontrol.get = hda_cs_dsp_coeff_get;
+ kcontrol.put = hda_cs_dsp_coeff_put;
+
+- /* Save ctl inside private_data, ctl is owned by cs_dsp,
+- * and will be freed when cs_dsp removes the control */
+ kctl = snd_ctl_new1(&kcontrol, (void *)ctl);
+ if (!kctl)
+ return;
+
+- ret = snd_ctl_add(ctl->card, kctl);
++ kctl->private_free = hda_cs_dsp_free_kcontrol;
++ ctl->kctl = kctl;
++
++ /* snd_ctl_add() calls our private_free on error, which will kfree(ctl) */
++ cs_ctl->priv = no_free_ptr(ctl);
++ ret = snd_ctl_add(info->card, kctl);
+ if (ret) {
+ dev_err(cs_ctl->dsp->dev, "Failed to add KControl %s = %d\n", kcontrol.name, ret);
+ return;
+ }
+
+ dev_dbg(cs_ctl->dsp->dev, "Added KControl: %s\n", kcontrol.name);
+- ctl->kctl = kctl;
+ }
+
+ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+@@ -138,7 +160,6 @@ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+ {
+ struct cs_dsp *cs_dsp = cs_ctl->dsp;
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+- struct hda_cs_dsp_coeff_ctl *ctl;
+ const char *region_name;
+ int ret;
+
+@@ -163,15 +184,7 @@ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+ " %.*s", cs_ctl->subname_len - skip, cs_ctl->subname + skip);
+ }
+
+- ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+- if (!ctl)
+- return;
+-
+- ctl->cs_ctl = cs_ctl;
+- ctl->card = info->card;
+- cs_ctl->priv = ctl;
+-
+- hda_cs_dsp_add_kcontrol(ctl, name);
++ hda_cs_dsp_add_kcontrol(cs_ctl, info, name);
+ }
+
+ void hda_cs_dsp_add_controls(struct cs_dsp *dsp, const struct hda_cs_dsp_ctl_info *info)
+@@ -203,7 +216,9 @@ void hda_cs_dsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
+ {
+ struct hda_cs_dsp_coeff_ctl *ctl = cs_ctl->priv;
+
+- kfree(ctl);
++ /* ctl and kctl may already have been removed by ALSA private_free */
++ if (ctl && ctl->kctl)
++ snd_ctl_remove(ctl->card, ctl->kctl);
+ }
+ EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_control_remove, SND_HDA_CS_DSP_CONTROLS);
+
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index bf685d01259d30..8e8d4c667923c5 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -1383,7 +1383,7 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
+ struct nid_path *path;
+ hda_nid_t pin = pins[i];
+
+- if (!spec->obey_preferred_dacs) {
++ if (!spec->preferred_dacs) {
+ path = snd_hda_get_path_from_idx(codec, path_idx[i]);
+ if (path) {
+ badness += assign_out_path_ctls(codec, path);
+@@ -1395,7 +1395,7 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
+ if (dacs[i]) {
+ if (is_dac_already_used(codec, dacs[i]))
+ badness += bad->shared_primary;
+- } else if (spec->obey_preferred_dacs) {
++ } else if (spec->preferred_dacs) {
+ badness += BAD_NO_PRIMARY_DAC;
+ }
+
+@@ -4956,6 +4956,69 @@ void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on)
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm);
+
++/* forcibly mute the speaker output without caching; return true if updated */
++static bool force_mute_output_path(struct hda_codec *codec, hda_nid_t nid)
++{
++ if (!nid)
++ return false;
++ if (!nid_has_mute(codec, nid, HDA_OUTPUT))
++ return false; /* no mute, skip */
++ if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) &
++ snd_hda_codec_amp_read(codec, nid, 1, HDA_OUTPUT, 0) &
++ HDA_AMP_MUTE)
++ return false; /* both channels already muted, skip */
++
++ /* direct amp update without caching */
++ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
++ AC_AMP_SET_OUTPUT | AC_AMP_SET_LEFT |
++ AC_AMP_SET_RIGHT | HDA_AMP_MUTE);
++ return true;
++}
++
++/**
++ * snd_hda_gen_shutup_speakers - Forcibly mute the speaker outputs
++ * @codec: the HDA codec
++ *
++ * Forcibly mute the speaker outputs, to be called at suspend or shutdown.
++ *
++ * The mute state done by this function isn't cached, hence the original state
++ * will be restored at resume.
++ *
++ * Return true if the mute state has been changed.
++ */
++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec)
++{
++ struct hda_gen_spec *spec = codec->spec;
++ const int *paths;
++ const struct nid_path *path;
++ int i, p, num_paths;
++ bool updated = false;
++
++ /* if already powered off, do nothing */
++ if (!snd_hdac_is_power_on(&codec->core))
++ return false;
++
++ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) {
++ paths = spec->out_paths;
++ num_paths = spec->autocfg.line_outs;
++ } else {
++ paths = spec->speaker_paths;
++ num_paths = spec->autocfg.speaker_outs;
++ }
++
++ for (i = 0; i < num_paths; i++) {
++ path = snd_hda_get_path_from_idx(codec, paths[i]);
++ if (!path)
++ continue;
++ for (p = 0; p < path->depth; p++)
++ if (force_mute_output_path(codec, path->path[p]))
++ updated = true;
++ }
++
++ return updated;
++}
++EXPORT_SYMBOL_GPL(snd_hda_gen_shutup_speakers);
++
+ /**
+ * snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and
+ * set up the hda_gen_spec
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index a8eea836762990..aed4381f7a619c 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -355,5 +355,6 @@ int snd_hda_gen_add_mute_led_cdev(struct hda_codec *codec,
+ int snd_hda_gen_add_micmute_led_cdev(struct hda_codec *codec,
+ int (*callback)(struct led_classdev *,
+ enum led_brightness));
++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec);
+
+ #endif /* __SOUND_HDA_GENERIC_H */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ca765ac4765f4a..134c6f6e0959ae 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1735,9 +1735,11 @@ static int default_bdl_pos_adj(struct azx *chip)
+ /* some exceptions: Atoms seem problematic with value 1 */
+ if (chip->pci->vendor == PCI_VENDOR_ID_INTEL) {
+ switch (chip->pci->device) {
+- case 0x0f04: /* Baytrail */
+- case 0x2284: /* Braswell */
++ case PCI_DEVICE_ID_INTEL_HDA_BYT:
++ case PCI_DEVICE_ID_INTEL_HDA_BSW:
+ return 32;
++ case PCI_DEVICE_ID_INTEL_HDA_APL:
++ return 64;
+ }
+ }
+
+@@ -2218,6 +2220,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
+ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+ /* https://bugs.launchpad.net/bugs/1821663 */
+ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
++ /* KONTRON SinglePC may cause a stall at runtime resume */
++ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
+ {}
+ };
+ #endif /* CONFIG_PM */
+@@ -2502,6 +2506,8 @@ static const struct pci_device_id azx_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_LNL_P, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE) },
+ /* Arrow Lake-S */
+ { PCI_DEVICE_DATA(INTEL, HDA_ARL_S, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE) },
++ /* Arrow Lake */
++ { PCI_DEVICE_DATA(INTEL, HDA_ARL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE) },
+ /* Apollolake (Broxton-P) */
+ { PCI_DEVICE_DATA(INTEL, HDA_APL, AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON) },
+ /* Gemini-Lake */
+@@ -2676,7 +2682,7 @@ static const struct pci_device_id azx_ids[] = {
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
+ /* GLENFLY */
+- { PCI_DEVICE(0x6766, PCI_ANY_ID),
++ { PCI_DEVICE(PCI_VENDOR_ID_GLENFLY, PCI_ANY_ID),
+ .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+ .class_mask = 0xffffff,
+ .driver_data = AZX_DRIVER_GFHDMI | AZX_DCAPS_POSFIX_LPIB |
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index a889cccdd607cf..5833623f6ffafd 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -42,7 +42,7 @@ struct conexant_spec {
+ unsigned int gpio_led;
+ unsigned int gpio_mute_led_mask;
+ unsigned int gpio_mic_led_mask;
+-
++ bool is_cx8070_sn6140;
+ };
+
+
+@@ -164,6 +164,27 @@ static void cxt_init_gpio_led(struct hda_codec *codec)
+ }
+ }
+
++static void cx_fixup_headset_recog(struct hda_codec *codec)
++{
++ unsigned int mic_persent;
++
++ /* fix some headset type recognize fail issue, such as EDIFIER headset */
++ /* set micbiasd output current comparator threshold from 66% to 55%. */
++ snd_hda_codec_write(codec, 0x1c, 0, 0x320, 0x010);
++ /* set OFF voltage for DFET from -1.2V to -0.8V, set headset micbias registor
++ * value adjustment trim from 2.2K ohms to 2.0K ohms.
++ */
++ snd_hda_codec_write(codec, 0x1c, 0, 0x3b0, 0xe10);
++ /* fix reboot headset type recognize fail issue */
++ mic_persent = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0);
++ if (mic_persent & AC_PINSENSE_PRESENCE)
++ /* enable headset mic VREF */
++ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24);
++ else
++ /* disable headset mic VREF */
++ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
++}
++
+ static int cx_auto_init(struct hda_codec *codec)
+ {
+ struct conexant_spec *spec = codec->spec;
+@@ -174,6 +195,9 @@ static int cx_auto_init(struct hda_codec *codec)
+ cxt_init_gpio_led(codec);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
+
++ if (spec->is_cx8070_sn6140)
++ cx_fixup_headset_recog(codec);
++
+ return 0;
+ }
+
+@@ -181,6 +205,8 @@ static void cx_auto_shutdown(struct hda_codec *codec)
+ {
+ struct conexant_spec *spec = codec->spec;
+
++ snd_hda_gen_shutup_speakers(codec);
++
+ /* Turn the problematic codec into D3 to avoid spurious noises
+ from the internal speaker during (and after) reboot */
+ cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+@@ -192,6 +218,48 @@ static void cx_auto_free(struct hda_codec *codec)
+ snd_hda_gen_free(codec);
+ }
+
++static void cx_process_headset_plugin(struct hda_codec *codec)
++{
++ unsigned int val;
++ unsigned int count = 0;
++
++ /* Wait headset detect done. */
++ do {
++ val = snd_hda_codec_read(codec, 0x1c, 0, 0xca0, 0x0);
++ if (val & 0x080) {
++ codec_dbg(codec, "headset type detect done!\n");
++ break;
++ }
++ msleep(20);
++ count++;
++ } while (count < 3);
++ val = snd_hda_codec_read(codec, 0x1c, 0, 0xcb0, 0x0);
++ if (val & 0x800) {
++ codec_dbg(codec, "headset plugin, type is CTIA\n");
++ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24);
++ } else if (val & 0x400) {
++ codec_dbg(codec, "headset plugin, type is OMTP\n");
++ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24);
++ } else {
++ codec_dbg(codec, "headphone plugin\n");
++ }
++}
++
++static void cx_update_headset_mic_vref(struct hda_codec *codec, struct hda_jack_callback *event)
++{
++ unsigned int mic_present;
++
++ /* In cx8070 and sn6140, the node 16 can only be config to headphone or disabled,
++ * the node 19 can only be config to microphone or disabled.
++ * Check hp&mic tag to process headset pulgin&plugout.
++ */
++ mic_present = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0);
++ if (!(mic_present & AC_PINSENSE_PRESENCE)) /* mic plugout */
++ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
++ else
++ cx_process_headset_plugin(codec);
++}
++
+ #ifdef CONFIG_PM
+ static int cx_auto_suspend(struct hda_codec *codec)
+ {
+@@ -239,9 +307,12 @@ enum {
+ CXT_FIXUP_HP_SPECTRE,
+ CXT_FIXUP_HP_GATE_MIC,
+ CXT_FIXUP_MUTE_LED_GPIO,
++ CXT_FIXUP_HP_ELITEONE_OUT_DIS,
+ CXT_FIXUP_HP_ZBOOK_MUTE_LED,
+ CXT_FIXUP_HEADSET_MIC,
+ CXT_FIXUP_HP_MIC_NO_PRESENCE,
++ CXT_PINCFG_SWS_JS201D,
++ CXT_PINCFG_TOP_SPEAKER,
+ };
+
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -254,6 +325,19 @@ static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
+ spec->gen.inv_dmic_split = 1;
+ }
+
++/* fix widget control pin settings */
++static void cxt_fixup_update_pinctl(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action == HDA_FIXUP_ACT_PROBE) {
++ /* Unset OUT_EN for this Node pin, leaving only HP_EN.
++ * This is the value stored in the codec register after
++ * the correct initialization of the previous windows boot.
++ */
++ snd_hda_set_pin_ctl_cache(codec, 0x1d, AC_PINCTL_HP_EN);
++ }
++}
++
+ static void cxt5066_increase_mic_boost(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -739,6 +823,34 @@ static const struct hda_pintbl cxt_pincfg_lemote[] = {
+ {}
+ };
+
++/* SuoWoSi/South-holding JS201D with sn6140 */
++static const struct hda_pintbl cxt_pincfg_sws_js201d[] = {
++ { 0x16, 0x03211040 }, /* hp out */
++ { 0x17, 0x91170110 }, /* SPK/Class_D */
++ { 0x18, 0x95a70130 }, /* Internal mic */
++ { 0x19, 0x03a11020 }, /* Headset Mic */
++ { 0x1a, 0x40f001f0 }, /* Not used */
++ { 0x21, 0x40f001f0 }, /* Not used */
++ {}
++};
++
++/* pincfg quirk for Tuxedo Sirius;
++ * unfortunately the (PCI) SSID conflicts with System76 Pangolin pang14,
++ * which has incompatible pin setup, so we check the codec SSID (luckily
++ * different one!) and conditionally apply the quirk here
++ */
++static void cxt_fixup_sirius_top_speaker(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
++{
++ /* ignore for incorrectly picked-up pang14 */
++ if (codec->core.subsystem_id == 0x278212b3)
++ return;
++ /* set up the top speaker pin */
++ if (action == HDA_FIXUP_ACT_PRE_PROBE)
++ snd_hda_codec_set_pincfg(codec, 0x1d, 0x82170111);
++}
++
+ static const struct hda_fixup cxt_fixups[] = {
+ [CXT_PINCFG_LENOVO_X200] = {
+ .type = HDA_FIXUP_PINS,
+@@ -877,6 +989,10 @@ static const struct hda_fixup cxt_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_mute_led_gpio,
+ },
++ [CXT_FIXUP_HP_ELITEONE_OUT_DIS] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cxt_fixup_update_pinctl,
++ },
+ [CXT_FIXUP_HP_ZBOOK_MUTE_LED] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_hp_zbook_mute_led,
+@@ -894,6 +1010,14 @@ static const struct hda_fixup cxt_fixups[] = {
+ .chained = true,
+ .chain_id = CXT_FIXUP_HEADSET_MIC,
+ },
++ [CXT_PINCFG_SWS_JS201D] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = cxt_pincfg_sws_js201d,
++ },
++ [CXT_PINCFG_TOP_SPEAKER] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cxt_fixup_sirius_top_speaker,
++ },
+ };
+
+ static const struct snd_pci_quirk cxt5045_fixups[] = {
+@@ -959,6 +1083,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
++ SND_PCI_QUIRK(0x103c, 0x83e5, "HP EliteOne 1000 G2", CXT_FIXUP_HP_ELITEONE_OUT_DIS),
+ SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+@@ -967,6 +1092,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
++ SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D),
+ SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
+@@ -989,6 +1115,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
+ SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
++ SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
++ SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER),
+ {}
+ };
+
+@@ -1007,6 +1135,8 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ { .id = CXT_FIXUP_HP_ZBOOK_MUTE_LED, .name = "hp-zbook-mute-led" },
+ { .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+ { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
++ { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
++ { .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" },
+ {}
+ };
+
+@@ -1042,6 +1172,15 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ codec->spec = spec;
+ codec->patch_ops = cx_auto_patch_ops;
+
++ /* init cx8070/sn6140 flag and reset headset_present_flag */
++ switch (codec->core.vendor_id) {
++ case 0x14f11f86:
++ case 0x14f11f87:
++ spec->is_cx8070_sn6140 = true;
++ snd_hda_jack_detect_enable_callback(codec, 0x19, cx_update_headset_mic_vref);
++ break;
++ }
++
+ cx_auto_parse_eapd(codec);
+ spec->gen.own_eapd_ctl = 1;
+
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index 627899959ffe8c..e41316e2e98338 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -1371,6 +1371,7 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
+ spec->scodecs[CS8409_CODEC1] = &dolphin_cs42l42_1;
+ spec->scodecs[CS8409_CODEC1]->codec = codec;
+ spec->num_scodecs = 2;
++ spec->gen.suppress_vmaster = 1;
+
+ codec->patch_ops = cs8409_dolphin_patch_ops;
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 1cde2a69bdb4ba..f030700cd60d75 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1989,11 +1989,16 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
+ }
+
+ static const struct snd_pci_quirk force_connect_list[] = {
++ SND_PCI_QUIRK(0x103c, 0x83e2, "HP EliteDesk 800 G4", 1),
++ SND_PCI_QUIRK(0x103c, 0x83ef, "HP MP9 G4 Retail System AMS", 1),
+ SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
+ SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+ SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+ SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
++ SND_PCI_QUIRK(0x1043, 0x86ae, "ASUS", 1), /* Z170 PRO */
++ SND_PCI_QUIRK(0x1043, 0x86c7, "ASUS", 1), /* Z170M PLUS */
+ SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
++ SND_PCI_QUIRK(0x8086, 0x2060, "Intel NUC5CPYB", 1),
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
+ {}
+ };
+@@ -2298,6 +2303,7 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
+ codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
+
+ for (idx = 0; idx < pcm_num; idx++) {
++ struct hdmi_spec_per_cvt *per_cvt;
+ struct hda_pcm *info;
+ struct hda_pcm_stream *pstr;
+
+@@ -2313,6 +2319,11 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
+ pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
+ pstr->substreams = 1;
+ pstr->ops = generic_ops;
++
++ per_cvt = get_cvt(spec, 0);
++ pstr->channels_min = per_cvt->channels_min;
++ pstr->channels_max = per_cvt->channels_max;
++
+ /* pcm number is less than pcm_rec array size */
+ if (spec->pcm_used >= ARRAY_SIZE(spec->pcm_rec))
+ break;
+@@ -4635,6 +4646,7 @@ HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
+ HDA_CODEC_ENTRY(0x8086281d, "Meteor Lake HDMI", patch_i915_adlp_hdmi),
+ HDA_CODEC_ENTRY(0x8086281f, "Raptor Lake P HDMI", patch_i915_adlp_hdmi),
+ HDA_CODEC_ENTRY(0x80862820, "Lunar Lake HDMI", patch_i915_adlp_hdmi),
++HDA_CODEC_ENTRY(0x80862822, "Panther Lake HDMI", patch_i915_adlp_hdmi),
+ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
+ HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 9677c09cf7a98e..07e1547fff2e51 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -438,6 +438,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+ fallthrough;
+ case 0x10ec0215:
++ case 0x10ec0285:
++ case 0x10ec0289:
++ alc_update_coef_idx(codec, 0x36, 1<<13, 0);
++ fallthrough;
+ case 0x10ec0230:
+ case 0x10ec0233:
+ case 0x10ec0235:
+@@ -451,9 +455,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ case 0x10ec0283:
+ case 0x10ec0286:
+ case 0x10ec0288:
+- case 0x10ec0285:
+ case 0x10ec0298:
+- case 0x10ec0289:
+ case 0x10ec0300:
+ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ break;
+@@ -583,10 +585,14 @@ static void alc_shutup_pins(struct hda_codec *codec)
+ switch (codec->core.vendor_id) {
+ case 0x10ec0236:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ case 0x10ec0283:
++ case 0x10ec0285:
+ case 0x10ec0286:
++ case 0x10ec0287:
+ case 0x10ec0288:
++ case 0x10ec0295:
+ case 0x10ec0298:
+ alc_headset_mic_no_shutup(codec);
+ break;
+@@ -1986,6 +1992,7 @@ enum {
+ ALC887_FIXUP_ASUS_AUDIO,
+ ALC887_FIXUP_ASUS_HMIC,
+ ALCS1200A_FIXUP_MIC_VREF,
++ ALC888VD_FIXUP_MIC_100VREF,
+ };
+
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2539,6 +2546,13 @@ static const struct hda_fixup alc882_fixups[] = {
+ {}
+ }
+ },
++ [ALC888VD_FIXUP_MIC_100VREF] = {
++ .type = HDA_FIXUP_PINCTLS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x18, PIN_VREF100 }, /* headset mic */
++ {}
++ }
++ },
+ };
+
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2608,6 +2622,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
+
+ SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
++ SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF),
+ SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+@@ -2634,6 +2649,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x66a2, "Clevo PE60RNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++ SND_PCI_QUIRK(0x1558, 0x66a6, "Clevo PE60SN[CDE]-[GS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+@@ -3255,6 +3271,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
+ case 0x10ec0230:
+ case 0x10ec0236:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_write_coef_idx(codec, 0x48, 0x0);
+ alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
+@@ -3284,6 +3301,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
+ case 0x10ec0230:
+ case 0x10ec0236:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_write_coef_idx(codec, 0x48, 0xd011);
+ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+@@ -3670,6 +3688,7 @@ static void alc285_hp_init(struct hda_codec *codec)
+ int i, val;
+ int coef38, coef0d, coef36;
+
++ alc_write_coefex_idx(codec, 0x58, 0x00, 0x1888); /* write default value */
+ alc_update_coef_idx(codec, 0x4a, 1<<15, 1<<15); /* Reset HP JD */
+ coef38 = alc_read_coef_idx(codec, 0x38); /* Amp control */
+ coef0d = alc_read_coef_idx(codec, 0x0d); /* Digital Misc control */
+@@ -4913,6 +4932,30 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
+ }
+ }
+
++static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay)
++{
++ if (delay <= 0)
++ delay = 75;
++ snd_hda_codec_write(codec, 0x21, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++ msleep(delay);
++ snd_hda_codec_write(codec, 0x21, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ msleep(delay);
++}
++
++static void alc_hp_enable_unmute(struct hda_codec *codec, unsigned int delay)
++{
++ if (delay <= 0)
++ delay = 75;
++ snd_hda_codec_write(codec, 0x21, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++ msleep(delay);
++ snd_hda_codec_write(codec, 0x21, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++ msleep(delay);
++}
++
+ static const struct coef_fw alc225_pre_hsmode[] = {
+ UPDATE_COEF(0x4a, 1<<8, 0),
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
+@@ -5014,6 +5057,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ case 0x10ec0236:
+ case 0x10ec0256:
+ case 0x19e58326:
++ alc_hp_mute_disable(codec, 75);
+ alc_process_coef_fw(codec, coef0256);
+ break;
+ case 0x10ec0234:
+@@ -5048,6 +5092,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ case 0x10ec0295:
+ case 0x10ec0289:
+ case 0x10ec0299:
++ alc_hp_mute_disable(codec, 75);
+ alc_process_coef_fw(codec, alc225_pre_hsmode);
+ alc_process_coef_fw(codec, coef0225);
+ break;
+@@ -5273,6 +5318,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ case 0x10ec0299:
+ alc_process_coef_fw(codec, alc225_pre_hsmode);
+ alc_process_coef_fw(codec, coef0225);
++ alc_hp_enable_unmute(codec, 75);
+ break;
+ case 0x10ec0255:
+ alc_process_coef_fw(codec, coef0255);
+@@ -5285,6 +5331,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ alc_write_coef_idx(codec, 0x45, 0xc089);
+ msleep(50);
+ alc_process_coef_fw(codec, coef0256);
++ alc_hp_enable_unmute(codec, 75);
+ break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+@@ -5382,6 +5429,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ case 0x10ec0256:
+ case 0x19e58326:
+ alc_process_coef_fw(codec, coef0256);
++ alc_hp_enable_unmute(codec, 75);
+ break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+@@ -5430,6 +5478,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ alc_process_coef_fw(codec, coef0225_2);
+ else
+ alc_process_coef_fw(codec, coef0225_1);
++ alc_hp_enable_unmute(codec, 75);
+ break;
+ case 0x10ec0867:
+ alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+@@ -5497,6 +5546,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ case 0x10ec0256:
+ case 0x19e58326:
+ alc_process_coef_fw(codec, coef0256);
++ alc_hp_enable_unmute(codec, 75);
+ break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+@@ -5534,6 +5584,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ case 0x10ec0289:
+ case 0x10ec0299:
+ alc_process_coef_fw(codec, coef0225);
++ alc_hp_enable_unmute(codec, 75);
+ break;
+ }
+ codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
+@@ -5602,25 +5653,21 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ alc_write_coef_idx(codec, 0x06, 0x6104);
+ alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
+
+- snd_hda_codec_write(codec, 0x21, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+- msleep(80);
+- snd_hda_codec_write(codec, 0x21, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+-
+ alc_process_coef_fw(codec, coef0255);
+ msleep(300);
+ val = alc_read_coef_idx(codec, 0x46);
+ is_ctia = (val & 0x0070) == 0x0070;
+-
++ if (!is_ctia) {
++ alc_write_coef_idx(codec, 0x45, 0xe089);
++ msleep(100);
++ val = alc_read_coef_idx(codec, 0x46);
++ if ((val & 0x0070) == 0x0070)
++ is_ctia = false;
++ else
++ is_ctia = true;
++ }
+ alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
+ alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+-
+- snd_hda_codec_write(codec, 0x21, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+- msleep(80);
+- snd_hda_codec_write(codec, 0x21, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+@@ -5697,12 +5744,6 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ case 0x10ec0295:
+ case 0x10ec0289:
+ case 0x10ec0299:
+- snd_hda_codec_write(codec, 0x21, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+- msleep(80);
+- snd_hda_codec_write(codec, 0x21, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+-
+ alc_process_coef_fw(codec, alc225_pre_hsmode);
+ alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);
+ val = alc_read_coef_idx(codec, 0x45);
+@@ -5719,15 +5760,19 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ val = alc_read_coef_idx(codec, 0x46);
+ is_ctia = (val & 0x00f0) == 0x00f0;
+ }
++ if (!is_ctia) {
++ alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x38<<10);
++ alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8);
++ msleep(100);
++ val = alc_read_coef_idx(codec, 0x46);
++ if ((val & 0x00f0) == 0x00f0)
++ is_ctia = false;
++ else
++ is_ctia = true;
++ }
+ alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);
+ alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);
+ alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+-
+- snd_hda_codec_write(codec, 0x21, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+- msleep(80);
+- snd_hda_codec_write(codec, 0x21, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ break;
+ case 0x10ec0867:
+ is_ctia = true;
+@@ -6495,6 +6540,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
+ case 0x10ec0236:
+ case 0x10ec0255:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+@@ -6680,6 +6726,60 @@ static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
+ }
+ }
+
++static void alc285_fixup_hp_envy_x360(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
++{
++ static const struct coef_fw coefs[] = {
++ WRITE_COEF(0x08, 0x6a0c), WRITE_COEF(0x0d, 0xa023),
++ WRITE_COEF(0x10, 0x0320), WRITE_COEF(0x1a, 0x8c03),
++ WRITE_COEF(0x25, 0x1800), WRITE_COEF(0x26, 0x003a),
++ WRITE_COEF(0x28, 0x1dfe), WRITE_COEF(0x29, 0xb014),
++ WRITE_COEF(0x2b, 0x1dfe), WRITE_COEF(0x37, 0xfe15),
++ WRITE_COEF(0x38, 0x7909), WRITE_COEF(0x45, 0xd489),
++ WRITE_COEF(0x46, 0x00f4), WRITE_COEF(0x4a, 0x21e0),
++ WRITE_COEF(0x66, 0x03f0), WRITE_COEF(0x67, 0x1000),
++ WRITE_COEF(0x6e, 0x1005), { }
++ };
++
++ static const struct hda_pintbl pincfgs[] = {
++ { 0x12, 0xb7a60130 }, /* Internal microphone*/
++ { 0x14, 0x90170150 }, /* B&O soundbar speakers */
++ { 0x17, 0x90170153 }, /* Side speakers */
++ { 0x19, 0x03a11040 }, /* Headset microphone */
++ { }
++ };
++
++ switch (action) {
++ case HDA_FIXUP_ACT_PRE_PROBE:
++ snd_hda_apply_pincfgs(codec, pincfgs);
++
++ /* Fixes volume control problem for side speakers */
++ alc295_fixup_disable_dac3(codec, fix, action);
++
++ /* Fixes no sound from headset speaker */
++ snd_hda_codec_amp_stereo(codec, 0x21, HDA_OUTPUT, 0, -1, 0);
++
++ /* Auto-enable headset mic when plugged */
++ snd_hda_jack_set_gating_jack(codec, 0x19, 0x21);
++
++ /* Headset mic volume enhancement */
++ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREF50);
++ break;
++ case HDA_FIXUP_ACT_INIT:
++ alc_process_coef_fw(codec, coefs);
++ break;
++ case HDA_FIXUP_ACT_BUILD:
++ rename_ctl(codec, "Bass Speaker Playback Volume",
++ "B&O-Tuned Playback Volume");
++ rename_ctl(codec, "Front Playback Switch",
++ "B&O Soundbar Playback Switch");
++ rename_ctl(codec, "Bass Speaker Playback Switch",
++ "Side Speaker Playback Switch");
++ break;
++ }
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+
+@@ -7168,6 +7268,7 @@ enum {
+ ALC290_FIXUP_SUBWOOFER_HSJACK,
+ ALC269_FIXUP_THINKPAD_ACPI,
+ ALC269_FIXUP_DMIC_THINKPAD_ACPI,
++ ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO,
+ ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ ALC255_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -7189,6 +7290,7 @@ enum {
+ ALC280_FIXUP_HP_9480M,
+ ALC245_FIXUP_HP_X360_AMP,
+ ALC285_FIXUP_HP_SPECTRE_X360_EB1,
++ ALC285_FIXUP_HP_ENVY_X360,
+ ALC288_FIXUP_DELL_HEADSET_MODE,
+ ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC288_FIXUP_DELL_XPS_13,
+@@ -7262,8 +7364,10 @@ enum {
+ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC299_FIXUP_PREDATOR_SPK,
+ ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
++ ALC289_FIXUP_DELL_SPK1,
+ ALC289_FIXUP_DELL_SPK2,
+ ALC289_FIXUP_DUAL_SPK,
++ ALC289_FIXUP_RTK_AMP_DUAL_SPK,
+ ALC294_FIXUP_SPK2_TO_DAC1,
+ ALC294_FIXUP_ASUS_DUAL_SPK,
+ ALC285_FIXUP_THINKPAD_X1_GEN7,
+@@ -7289,6 +7393,7 @@ enum {
+ ALC236_FIXUP_HP_GPIO_LED,
+ ALC236_FIXUP_HP_MUTE_LED,
+ ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
++ ALC236_FIXUP_LENOVO_INV_DMIC,
+ ALC298_FIXUP_SAMSUNG_AMP,
+ ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+ ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+@@ -7335,6 +7440,7 @@ enum {
+ ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+ ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
+ ALC298_FIXUP_LENOVO_C940_DUET7,
++ ALC287_FIXUP_LENOVO_14IRP8_DUETITL,
+ ALC287_FIXUP_13S_GEN2_SPEAKERS,
+ ALC256_FIXUP_SET_COEF_DEFAULTS,
+ ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+@@ -7354,6 +7460,7 @@ enum {
+ ALC287_FIXUP_LEGION_16ITHG6,
+ ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
+ ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN,
++ ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN,
+ ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS,
+ ALC236_FIXUP_DELL_DUAL_CODECS,
+ ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
+@@ -7363,6 +7470,8 @@ enum {
+ ALC287_FIXUP_THINKPAD_I2S_SPK,
+ ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+ ALC2XX_FIXUP_HEADSET_MIC,
++ ALC289_FIXUP_DELL_CS35L41_SPI_2,
++ ALC294_FIXUP_CS35L41_I2C_2,
+ };
+
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -7382,6 +7491,26 @@ static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec,
+ __snd_hda_apply_fixup(codec, id, action, 0);
+ }
+
++/* A special fixup for Lenovo Slim/Yoga Pro 9 14IRP8 and Yoga DuetITL 2021;
++ * 14IRP8 PCI SSID will mistakenly be matched with the DuetITL codec SSID,
++ * so we need to apply a different fixup in this case. The only DuetITL codec
++ * SSID reported so far is the 17aa:3802 while the 14IRP8 has the 17aa:38be
++ * and 17aa:38bf. If it weren't for the PCI SSID, the 14IRP8 models would
++ * have matched correctly by their codecs.
++ */
++static void alc287_fixup_lenovo_14irp8_duetitl(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
++{
++ int id;
++
++ if (codec->core.subsystem_id == 0x17aa3802)
++ id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* DuetITL */
++ else
++ id = ALC287_FIXUP_TAS2781_I2C; /* 14IRP8 */
++ __snd_hda_apply_fixup(codec, id, action, 0);
++}
++
+ static const struct hda_fixup alc269_fixups[] = {
+ [ALC269_FIXUP_GPIO2] = {
+ .type = HDA_FIXUP_FUNC,
+@@ -7516,6 +7645,14 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
+ },
++ [ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x18, 0x03a19020 }, /* headset mic */
++ { 0x1b, 0x90170150 }, /* speaker */
++ { }
++ },
++ },
+ [ALC269_FIXUP_AMIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -8589,6 +8726,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ },
++ [ALC289_FIXUP_DELL_SPK1] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x14, 0x90170140 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
++ },
+ [ALC289_FIXUP_DELL_SPK2] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -8604,6 +8750,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC289_FIXUP_DELL_SPK2
+ },
++ [ALC289_FIXUP_RTK_AMP_DUAL_SPK] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc285_fixup_speaker2_to_dac1,
++ .chained = true,
++ .chain_id = ALC289_FIXUP_DELL_SPK1
++ },
+ [ALC294_FIXUP_SPK2_TO_DAC1] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_speaker2_to_dac1,
+@@ -8798,6 +8950,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc236_fixup_hp_mute_led_micmute_vref,
+ },
++ [ALC236_FIXUP_LENOVO_INV_DMIC] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_inv_dmic,
++ .chained = true,
++ .chain_id = ALC283_FIXUP_INT_MIC,
++ },
+ [ALC298_FIXUP_SAMSUNG_AMP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc298_fixup_samsung_amp,
+@@ -9115,6 +9273,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_hp_spectre_x360_eb1
+ },
++ [ALC285_FIXUP_HP_ENVY_X360] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc285_fixup_hp_envy_x360,
++ .chained = true,
++ .chain_id = ALC285_FIXUP_HP_GPIO_AMP_INIT,
++ },
+ [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_ideapad_s740_coef,
+@@ -9243,6 +9407,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc298_fixup_lenovo_c940_duet7,
+ },
++ [ALC287_FIXUP_LENOVO_14IRP8_DUETITL] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc287_fixup_lenovo_14irp8_duetitl,
++ },
+ [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+@@ -9423,6 +9591,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
+ },
++ [ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc287_fixup_yoga9_14iap7_bass_spk_pin,
++ .chained = true,
++ .chain_id = ALC287_FIXUP_CS35L41_I2C_2,
++ },
+ [ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc295_fixup_dell_inspiron_top_speakers,
+@@ -9439,13 +9613,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs35l41_fixup_i2c_two,
+ .chained = true,
+- .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
++ .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ },
+ [ALC287_FIXUP_TAS2781_I2C] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = tas2781_fixup_i2c,
+ .chained = true,
+- .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
++ .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+ },
+ [ALC245_FIXUP_HP_MUTE_LED_COEFBIT] = {
+ .type = HDA_FIXUP_FUNC,
+@@ -9460,6 +9634,8 @@ static const struct hda_fixup alc269_fixups[] = {
+ [ALC287_FIXUP_THINKPAD_I2S_SPK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_bind_dacs,
++ .chained = true,
++ .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ },
+ [ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = {
+ .type = HDA_FIXUP_FUNC,
+@@ -9471,6 +9647,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mic,
+ },
++ [ALC289_FIXUP_DELL_CS35L41_SPI_2] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cs35l41_fixup_spi_two,
++ .chained = true,
++ .chain_id = ALC289_FIXUP_DUAL_SPK
++ },
++ [ALC294_FIXUP_CS35L41_I2C_2] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cs35l41_fixup_i2c_two,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -9486,6 +9672,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+ SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
++ SND_PCI_QUIRK(0x1025, 0x100c, "Acer Aspire E5-574G", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
+@@ -9499,6 +9686,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1269, "Acer SWIFT SF314-54", ALC256_FIXUP_ACER_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1025, 0x126a, "Acer Swift SF114-32", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+@@ -9574,20 +9762,26 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
+ SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
++ SND_PCI_QUIRK(0x1028, 0x0beb, "Dell XPS 15 9530 (2023)", ALC289_FIXUP_DELL_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0c0b, "Dell Oasis 14 RPL-P", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
++ SND_PCI_QUIRK(0x1028, 0x0c0d, "Dell Oasis", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
++ SND_PCI_QUIRK(0x1028, 0x0c0e, "Dell Oasis 16", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+- SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc0, "Dell Oasis 13", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
++ SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis 14", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -9661,12 +9855,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++ SND_PCI_QUIRK(0x103c, 0x84a6, "HP 250 G7 Notebook PC", ALC269_FIXUP_HP_LINE1_MIC1_LED),
++ SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
++ SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360),
+ SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
++ SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+ SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+@@ -9680,6 +9879,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
++ SND_PCI_QUIRK(0x103c, 0x876e, "HP ENVY x360 Convertible 13-ay0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8780, "HP ZBook Fury 17 G7 Mobile Workstation",
+@@ -9689,7 +9889,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8786, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
++ SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+@@ -9698,6 +9900,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
++ SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++ SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+@@ -9719,7 +9923,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x88dd, "HP Pavilion 15z-ec200", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
++ SND_PCI_QUIRK(0x103c, 0x890e, "HP 255 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9728,6 +9934,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8973, "HP EliteBook 860 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8974, "HP EliteBook 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x897d, "HP mt440 Mobile Thin Client U74", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4),
+ SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -9745,6 +9952,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
+ SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+@@ -9752,15 +9960,20 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8ab9, "HP EliteBook 840 G8 (MB 8AB8)", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8b0f, "HP Elite mt645 G7 Mobile Thin Client U81", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++ SND_PCI_QUIRK(0x103c, 0x8b3f, "HP mt440 Mobile Thin Client U91", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b45, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b46, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+@@ -9788,12 +10001,34 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c7b, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8c7f, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8c80, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8c81, "HP EliteBook 665 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c8d, "HP ProBook 440 G11", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c8e, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++ SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
+ SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
+ SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -9813,10 +10048,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
++ SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
++ SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+@@ -9824,6 +10061,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally RC71L_RC71L", ALC294_FIXUP_ASUS_ALLY),
+ SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x18d3, "ASUS UM3504DA", ALC294_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1970, "ASUS UX550VE", ALC289_FIXUP_ASUS_GA401),
+@@ -9832,12 +10070,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
++ SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1043, 0x1c03, "ASUS UM3406HA", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x1043, 0x1c33, "ASUS UX5304MA", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+@@ -9846,21 +10089,23 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
++ SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
++ SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2),
++ SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+- SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+@@ -9882,18 +10127,21 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+ SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
++ SND_PCI_QUIRK(0x10ec, 0x119e, "Positivo SU C1400", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
++ SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+- SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
++ SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc1a3, "Samsung Galaxy Book Pro (NP935XDB-KC1SE)", ALC298_FIXUP_SAMSUNG_AMP),
++ SND_PCI_QUIRK(0x144d, 0xc1a4, "Samsung Galaxy Book Pro 360 (NT935QBD)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc1a6, "Samsung Galaxy Book Pro 360 (NP930QBD)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
+ SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
+@@ -9905,12 +10153,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK),
++ SND_PCI_QUIRK(0x152d, 0x1262, "Huawei NBLB-WAX9N", ALC2XX_FIXUP_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1558, 0x0353, "Clevo V35[05]SN[CDE]Q", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1325, "Clevo N15[01][CW]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1403, "Clevo N140CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x1404, "Clevo N150CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x14a1, "Clevo L141MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1558, 0x2624, "Clevo L240TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -9975,6 +10226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -10008,6 +10260,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
++ SND_PCI_QUIRK(0x17aa, 0x2234, "Thinkpad ICE-1", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+@@ -10039,12 +10292,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
++ SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+- SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
++ SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8 / DuetITL 2021", ALC287_FIXUP_LENOVO_14IRP8_DUETITL),
+ SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
+ SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
+- SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
++ SND_PCI_QUIRK(0x17aa, 0x3820, "IdeaPad 330-17IKB 81DM", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+ SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+@@ -10055,12 +10309,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6),
++ SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
++ SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x387d, "Yoga S780-16 pro Quad AAC", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x387e, "Yoga S780-16 pro Quad YC", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x3881, "YB9 dual power mode2 YC", ALC287_FIXUP_TAS2781_I2C),
++ SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
++ SND_PCI_QUIRK(0x17aa, 0x3891, "Lenovo Yoga Pro 7 14AHP9", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x38ba, "Yoga S780-14.5 Air AMD quad YC", ALC287_FIXUP_TAS2781_I2C),
+@@ -10070,7 +10328,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x38c3, "Y980 DUAL", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x38cb, "Y790 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x38cd, "Y790 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
++ SND_PCI_QUIRK(0x17aa, 0x38d2, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x38d7, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x38df, "Y990 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
++ SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
++ SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
++ SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -10099,12 +10363,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x19e5, 0x3212, "Huawei KLV-WX9 ", ALC256_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
+ SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
+ SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
++ SND_PCI_QUIRK(0x1c6c, 0x122a, "Positivo N14AP7", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
+ SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
+@@ -10115,16 +10381,23 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1d05, 0x1147, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
++ SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
++ SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
++ SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
+ SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+
+ #if 0
+ /* Below is a quirk table taken from the old code.
+@@ -10305,11 +10578,13 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
+ {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+ {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
++ {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"},
+ {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+ {.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"},
+ {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+ {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
++ {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
+ {}
+ };
+ #define ALC225_STANDARD_PINS \
+@@ -10707,22 +10982,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
+ {0x21, 0x03211020}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+- {0x14, 0x90170110},
+- {0x21, 0x04211020}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+- {0x14, 0x90170110},
+- {0x21, 0x04211030}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS,
+- {0x17, 0x21014020},
+- {0x18, 0x21a19030}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS,
+- {0x17, 0x21014040},
+- {0x18, 0x21a19050}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS),
+ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC298_STANDARD_PINS,
+ {0x17, 0x90170110}),
+@@ -10763,9 +11022,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ * at most one tbl is allowed to define for the same vendor and same codec
+ */
+ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1025, "Acer", ALC2XX_FIXUP_HEADSET_MIC,
++ {0x19, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1b, 0x40000000}),
++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
++ {0x19, 0x40000000},
++ {0x1b, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1a, 0x40000000}),
+@@ -11449,8 +11713,7 @@ static void alc897_hp_automute_hook(struct hda_codec *codec,
+
+ snd_hda_gen_hp_automute(codec, jack);
+ vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
+- snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+- vref);
++ snd_hda_set_pin_ctl(codec, 0x1b, vref);
+ }
+
+ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+@@ -11459,6 +11722,10 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+ struct alc_spec *spec = codec->spec;
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gen.hp_automute_hook = alc897_hp_automute_hook;
++ spec->no_shutup_pins = 1;
++ }
++ if (action == HDA_FIXUP_ACT_PROBE) {
++ snd_hda_set_pin_ctl_cache(codec, 0x1a, PIN_IN | AC_PINCTL_VREF_100);
+ }
+ }
+
+@@ -11559,6 +11826,7 @@ enum {
+ ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ ALC897_FIXUP_HEADSET_MIC_PIN2,
+ ALC897_FIXUP_UNIS_H3C_X500S,
++ ALC897_FIXUP_HEADSET_MIC_PIN3,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -12005,10 +12273,18 @@ static const struct hda_fixup alc662_fixups[] = {
+ {}
+ },
+ },
++ [ALC897_FIXUP_HEADSET_MIC_PIN3] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x03a11050 }, /* use as headset mic */
++ { }
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
++ SND_PCI_QUIRK(0x1019, 0x9859, "JP-IK LEAP W502", ALC897_FIXUP_HEADSET_MIC_PIN3),
+ SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+@@ -12063,6 +12339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x3364, "Lenovo ThinkCentre M90 Gen5", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
+index fb802802939e17..980e6104c2f3f2 100644
+--- a/sound/pci/hda/tas2781_hda_i2c.c
++++ b/sound/pci/hda/tas2781_hda_i2c.c
+@@ -2,10 +2,12 @@
+ //
+ // TAS2781 HDA I2C driver
+ //
+-// Copyright 2023 Texas Instruments, Inc.
++// Copyright 2023 - 2024 Texas Instruments, Inc.
+ //
+ // Author: Shenghao Ding <shenghao-ding@ti.com>
++// Current maintainer: Baojun Xu <baojun.xu@ti.com>
+
++#include <asm/unaligned.h>
+ #include <linux/acpi.h>
+ #include <linux/crc8.h>
+ #include <linux/crc32.h>
+@@ -65,6 +67,15 @@ enum calib_data {
+ CALIB_MAX
+ };
+
++struct tas2781_hda {
++ struct device *dev;
++ struct tasdevice_priv *priv;
++ struct snd_kcontrol *dsp_prog_ctl;
++ struct snd_kcontrol *dsp_conf_ctl;
++ struct snd_kcontrol *prof_ctl;
++ struct snd_kcontrol *snd_ctls[2];
++};
++
+ static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data)
+ {
+ struct tasdevice_priv *tas_priv = data;
+@@ -84,9 +95,7 @@ static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data)
+ static int tas2781_read_acpi(struct tasdevice_priv *p, const char *hid)
+ {
+ struct acpi_device *adev;
+- struct device *physdev;
+ LIST_HEAD(resources);
+- const char *sub;
+ int ret;
+
+ adev = acpi_dev_get_first_match_dev(hid, NULL, -1);
+@@ -102,18 +111,8 @@ static int tas2781_read_acpi(struct tasdevice_priv *p, const char *hid)
+
+ acpi_dev_free_resource_list(&resources);
+ strscpy(p->dev_name, hid, sizeof(p->dev_name));
+- physdev = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+
+- /* No side-effect to the playback even if subsystem_id is NULL*/
+- sub = acpi_get_subsystem_id(ACPI_HANDLE(physdev));
+- if (IS_ERR(sub))
+- sub = NULL;
+-
+- p->acpi_subsystem_id = sub;
+-
+- put_device(physdev);
+-
+ return 0;
+
+ err:
+@@ -125,26 +124,28 @@ static int tas2781_read_acpi(struct tasdevice_priv *p, const char *hid)
+
+ static void tas2781_hda_playback_hook(struct device *dev, int action)
+ {
+- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
++ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+- dev_dbg(tas_priv->dev, "%s: action = %d\n", __func__, action);
++ dev_dbg(tas_hda->dev, "%s: action = %d\n", __func__, action);
+ switch (action) {
+ case HDA_GEN_PCM_ACT_OPEN:
+ pm_runtime_get_sync(dev);
+- mutex_lock(&tas_priv->codec_lock);
+- tasdevice_tuning_switch(tas_priv, 0);
+- mutex_unlock(&tas_priv->codec_lock);
++ mutex_lock(&tas_hda->priv->codec_lock);
++ tasdevice_tuning_switch(tas_hda->priv, 0);
++ tas_hda->priv->playback_started = true;
++ mutex_unlock(&tas_hda->priv->codec_lock);
+ break;
+ case HDA_GEN_PCM_ACT_CLOSE:
+- mutex_lock(&tas_priv->codec_lock);
+- tasdevice_tuning_switch(tas_priv, 1);
+- mutex_unlock(&tas_priv->codec_lock);
++ mutex_lock(&tas_hda->priv->codec_lock);
++ tasdevice_tuning_switch(tas_hda->priv, 1);
++ tas_hda->priv->playback_started = false;
++ mutex_unlock(&tas_hda->priv->codec_lock);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ break;
+ default:
+- dev_dbg(tas_priv->dev, "Playback action not supported: %d\n",
++ dev_dbg(tas_hda->dev, "Playback action not supported: %d\n",
+ action);
+ break;
+ }
+@@ -168,8 +169,12 @@ static int tasdevice_get_profile_id(struct snd_kcontrol *kcontrol,
+ {
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
++ mutex_lock(&tas_priv->codec_lock);
++
+ ucontrol->value.integer.value[0] = tas_priv->rcabin.profile_cfg_id;
+
++ mutex_unlock(&tas_priv->codec_lock);
++
+ return 0;
+ }
+
+@@ -183,11 +188,15 @@ static int tasdevice_set_profile_id(struct snd_kcontrol *kcontrol,
+
+ val = clamp(nr_profile, 0, max);
+
++ mutex_lock(&tas_priv->codec_lock);
++
+ if (tas_priv->rcabin.profile_cfg_id != val) {
+ tas_priv->rcabin.profile_cfg_id = val;
+ ret = 1;
+ }
+
++ mutex_unlock(&tas_priv->codec_lock);
++
+ return ret;
+ }
+
+@@ -224,8 +233,12 @@ static int tasdevice_program_get(struct snd_kcontrol *kcontrol,
+ {
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
++ mutex_lock(&tas_priv->codec_lock);
++
+ ucontrol->value.integer.value[0] = tas_priv->cur_prog;
+
++ mutex_unlock(&tas_priv->codec_lock);
++
+ return 0;
+ }
+
+@@ -240,11 +253,15 @@ static int tasdevice_program_put(struct snd_kcontrol *kcontrol,
+
+ val = clamp(nr_program, 0, max);
+
++ mutex_lock(&tas_priv->codec_lock);
++
+ if (tas_priv->cur_prog != val) {
+ tas_priv->cur_prog = val;
+ ret = 1;
+ }
+
++ mutex_unlock(&tas_priv->codec_lock);
++
+ return ret;
+ }
+
+@@ -253,8 +270,12 @@ static int tasdevice_config_get(struct snd_kcontrol *kcontrol,
+ {
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
++ mutex_lock(&tas_priv->codec_lock);
++
+ ucontrol->value.integer.value[0] = tas_priv->cur_conf;
+
++ mutex_unlock(&tas_priv->codec_lock);
++
+ return 0;
+ }
+
+@@ -269,33 +290,16 @@ static int tasdevice_config_put(struct snd_kcontrol *kcontrol,
+
+ val = clamp(nr_config, 0, max);
+
++ mutex_lock(&tas_priv->codec_lock);
++
+ if (tas_priv->cur_conf != val) {
+ tas_priv->cur_conf = val;
+ ret = 1;
+ }
+
+- return ret;
+-}
+-
+-/*
+- * tas2781_digital_getvol - get the volum control
+- * @kcontrol: control pointer
+- * @ucontrol: User data
+- * Customer Kcontrol for tas2781 is primarily for regmap booking, paging
+- * depends on internal regmap mechanism.
+- * tas2781 contains book and page two-level register map, especially
+- * book switching will set the register BXXP00R7F, after switching to the
+- * correct book, then leverage the mechanism for paging to access the
+- * register.
+- */
+-static int tas2781_digital_getvol(struct snd_kcontrol *kcontrol,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+- struct soc_mixer_control *mc =
+- (struct soc_mixer_control *)kcontrol->private_value;
++ mutex_unlock(&tas_priv->codec_lock);
+
+- return tasdevice_digital_getvol(tas_priv, ucontrol, mc);
++ return ret;
+ }
+
+ static int tas2781_amp_getvol(struct snd_kcontrol *kcontrol,
+@@ -304,19 +308,15 @@ static int tas2781_amp_getvol(struct snd_kcontrol *kcontrol,
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
++ int ret;
+
+- return tasdevice_amp_getvol(tas_priv, ucontrol, mc);
+-}
++ mutex_lock(&tas_priv->codec_lock);
+
+-static int tas2781_digital_putvol(struct snd_kcontrol *kcontrol,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+- struct soc_mixer_control *mc =
+- (struct soc_mixer_control *)kcontrol->private_value;
++ ret = tasdevice_amp_getvol(tas_priv, ucontrol, mc);
+
+- /* The check of the given value is in tasdevice_digital_putvol. */
+- return tasdevice_digital_putvol(tas_priv, ucontrol, mc);
++ mutex_unlock(&tas_priv->codec_lock);
++
++ return ret;
+ }
+
+ static int tas2781_amp_putvol(struct snd_kcontrol *kcontrol,
+@@ -325,9 +325,16 @@ static int tas2781_amp_putvol(struct snd_kcontrol *kcontrol,
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
++ int ret;
++
++ mutex_lock(&tas_priv->codec_lock);
+
+ /* The check of the given value is in tasdevice_amp_putvol. */
+- return tasdevice_amp_putvol(tas_priv, ucontrol, mc);
++ ret = tasdevice_amp_putvol(tas_priv, ucontrol, mc);
++
++ mutex_unlock(&tas_priv->codec_lock);
++
++ return ret;
+ }
+
+ static int tas2781_force_fwload_get(struct snd_kcontrol *kcontrol,
+@@ -335,10 +342,14 @@ static int tas2781_force_fwload_get(struct snd_kcontrol *kcontrol,
+ {
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+
++ mutex_lock(&tas_priv->codec_lock);
++
+ ucontrol->value.integer.value[0] = (int)tas_priv->force_fwload_status;
+ dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,
+ tas_priv->force_fwload_status ? "ON" : "OFF");
+
++ mutex_unlock(&tas_priv->codec_lock);
++
+ return 0;
+ }
+
+@@ -348,6 +359,8 @@ static int tas2781_force_fwload_put(struct snd_kcontrol *kcontrol,
+ struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ bool change, val = (bool)ucontrol->value.integer.value[0];
+
++ mutex_lock(&tas_priv->codec_lock);
++
+ if (tas_priv->force_fwload_status == val)
+ change = false;
+ else {
+@@ -357,6 +370,8 @@ static int tas2781_force_fwload_put(struct snd_kcontrol *kcontrol,
+ dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,
+ tas_priv->force_fwload_status ? "ON" : "OFF");
+
++ mutex_unlock(&tas_priv->codec_lock);
++
+ return change;
+ }
+
+@@ -364,9 +379,6 @@ static const struct snd_kcontrol_new tas2781_snd_controls[] = {
+ ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain", TAS2781_AMP_LEVEL,
+ 1, 0, 20, 0, tas2781_amp_getvol,
+ tas2781_amp_putvol, amp_vol_tlv),
+- ACARD_SINGLE_RANGE_EXT_TLV("Speaker Digital Gain", TAS2781_DVC_LVL,
+- 0, 0, 200, 1, tas2781_digital_getvol,
+- tas2781_digital_putvol, dvc_tlv),
+ ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load", 0,
+ tas2781_force_fwload_get, tas2781_force_fwload_put),
+ };
+@@ -398,25 +410,27 @@ static const struct snd_kcontrol_new tas2781_dsp_conf_ctrl = {
+ static void tas2781_apply_calib(struct tasdevice_priv *tas_priv)
+ {
+ static const unsigned char page_array[CALIB_MAX] = {
+- 0x17, 0x18, 0x18, 0x0d, 0x18
++ 0x17, 0x18, 0x18, 0x13, 0x18,
+ };
+ static const unsigned char rgno_array[CALIB_MAX] = {
+- 0x74, 0x0c, 0x14, 0x3c, 0x7c
++ 0x74, 0x0c, 0x14, 0x70, 0x7c,
+ };
+- unsigned char *data;
++ int offset = 0;
+ int i, j, rc;
++ __be32 data;
+
+ for (i = 0; i < tas_priv->ndev; i++) {
+- data = tas_priv->cali_data.data +
+- i * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
+ for (j = 0; j < CALIB_MAX; j++) {
++ data = cpu_to_be32(
++ *(uint32_t *)&tas_priv->cali_data.data[offset]);
+ rc = tasdevice_dev_bulk_write(tas_priv, i,
+ TASDEVICE_REG(0, page_array[j], rgno_array[j]),
+- &(data[4 * j]), 4);
++ (unsigned char *)&data, 4);
+ if (rc < 0)
+ dev_err(tas_priv->dev,
+ "chn %d calib %d bulk_wr err = %d\n",
+ i, j, rc);
++ offset += 4;
+ }
+ }
+ }
+@@ -455,9 +469,9 @@ static int tas2781_save_calibration(struct tasdevice_priv *tas_priv)
+ status = efi.get_variable(efi_name, &efi_guid, &attr,
+ &tas_priv->cali_data.total_sz,
+ tas_priv->cali_data.data);
+- if (status != EFI_SUCCESS)
+- return -EINVAL;
+ }
++ if (status != EFI_SUCCESS)
++ return -EINVAL;
+
+ tmp_val = (unsigned int *)tas_priv->cali_data.data;
+
+@@ -470,16 +484,35 @@ static int tas2781_save_calibration(struct tasdevice_priv *tas_priv)
+ dev_dbg(tas_priv->dev, "%4ld-%2d-%2d, %2d:%2d:%2d\n",
+ tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+- tas2781_apply_calib(tas_priv);
++ tasdevice_apply_calibration(tas_priv);
+ } else
+ tas_priv->cali_data.total_sz = 0;
+
+ return 0;
+ }
+
++static void tas2781_hda_remove_controls(struct tas2781_hda *tas_hda)
++{
++ struct hda_codec *codec = tas_hda->priv->codec;
++
++ if (tas_hda->dsp_prog_ctl)
++ snd_ctl_remove(codec->card, tas_hda->dsp_prog_ctl);
++
++ if (tas_hda->dsp_conf_ctl)
++ snd_ctl_remove(codec->card, tas_hda->dsp_conf_ctl);
++
++ for (int i = ARRAY_SIZE(tas_hda->snd_ctls) - 1; i >= 0; i--)
++ if (tas_hda->snd_ctls[i])
++ snd_ctl_remove(codec->card, tas_hda->snd_ctls[i]);
++
++ if (tas_hda->prof_ctl)
++ snd_ctl_remove(codec->card, tas_hda->prof_ctl);
++}
++
+ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
+ {
+ struct tasdevice_priv *tas_priv = context;
++ struct tas2781_hda *tas_hda = dev_get_drvdata(tas_priv->dev);
+ struct hda_codec *codec = tas_priv->codec;
+ int i, ret;
+
+@@ -490,8 +523,8 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
+ if (ret)
+ goto out;
+
+- ret = snd_ctl_add(codec->card,
+- snd_ctl_new1(&tas2781_prof_ctrl, tas_priv));
++ tas_hda->prof_ctl = snd_ctl_new1(&tas2781_prof_ctrl, tas_priv);
++ ret = snd_ctl_add(codec->card, tas_hda->prof_ctl);
+ if (ret) {
+ dev_err(tas_priv->dev,
+ "Failed to add KControl %s = %d\n",
+@@ -500,8 +533,9 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tas2781_snd_controls); i++) {
+- ret = snd_ctl_add(codec->card,
+- snd_ctl_new1(&tas2781_snd_controls[i], tas_priv));
++ tas_hda->snd_ctls[i] = snd_ctl_new1(&tas2781_snd_controls[i],
++ tas_priv);
++ ret = snd_ctl_add(codec->card, tas_hda->snd_ctls[i]);
+ if (ret) {
+ dev_err(tas_priv->dev,
+ "Failed to add KControl %s = %d\n",
+@@ -523,8 +557,9 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
+ goto out;
+ }
+
+- ret = snd_ctl_add(codec->card,
+- snd_ctl_new1(&tas2781_dsp_prog_ctrl, tas_priv));
++ tas_hda->dsp_prog_ctl = snd_ctl_new1(&tas2781_dsp_prog_ctrl,
++ tas_priv);
++ ret = snd_ctl_add(codec->card, tas_hda->dsp_prog_ctl);
+ if (ret) {
+ dev_err(tas_priv->dev,
+ "Failed to add KControl %s = %d\n",
+@@ -532,8 +567,9 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
+ goto out;
+ }
+
+- ret = snd_ctl_add(codec->card,
+- snd_ctl_new1(&tas2781_dsp_conf_ctrl, tas_priv));
++ tas_hda->dsp_conf_ctl = snd_ctl_new1(&tas2781_dsp_conf_ctrl,
++ tas_priv);
++ ret = snd_ctl_add(codec->card, tas_hda->dsp_conf_ctl);
+ if (ret) {
+ dev_err(tas_priv->dev,
+ "Failed to add KControl %s = %d\n",
+@@ -543,39 +579,41 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
+
+ tas_priv->fw_state = TASDEVICE_DSP_FW_ALL_OK;
+ tasdevice_prmg_load(tas_priv, 0);
++ if (tas_priv->fmw->nr_programs > 0)
++ tas_priv->cur_prog = 0;
++ if (tas_priv->fmw->nr_configurations > 0)
++ tas_priv->cur_conf = 0;
+
+ /* If calibrated data occurs error, dsp will still works with default
+ * calibrated data inside algo.
+ */
+- tas2781_save_calibration(tas_priv);
++ tasdevice_save_calibration(tas_priv);
++
++ tasdevice_tuning_switch(tas_hda->priv, 0);
++ tas_hda->priv->playback_started = true;
+
+ out:
+- if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) {
+- /*If DSP FW fail, kcontrol won't be created */
+- tasdevice_config_info_remove(tas_priv);
+- tasdevice_dsp_remove(tas_priv);
+- }
+- mutex_unlock(&tas_priv->codec_lock);
++ mutex_unlock(&tas_hda->priv->codec_lock);
+ if (fmw)
+ release_firmware(fmw);
+- pm_runtime_mark_last_busy(tas_priv->dev);
+- pm_runtime_put_autosuspend(tas_priv->dev);
++ pm_runtime_mark_last_busy(tas_hda->dev);
++ pm_runtime_put_autosuspend(tas_hda->dev);
+ }
+
+ static int tas2781_hda_bind(struct device *dev, struct device *master,
+ void *master_data)
+ {
+- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
++ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ struct hda_component *comps = master_data;
+ struct hda_codec *codec;
+ unsigned int subid;
+ int ret;
+
+- if (!comps || tas_priv->index < 0 ||
+- tas_priv->index >= HDA_MAX_COMPONENTS)
++ if (!comps || tas_hda->priv->index < 0 ||
++ tas_hda->priv->index >= HDA_MAX_COMPONENTS)
+ return -EINVAL;
+
+- comps = &comps[tas_priv->index];
++ comps = &comps[tas_hda->priv->index];
+ if (comps->dev)
+ return -EBUSY;
+
+@@ -584,10 +622,10 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
+
+ switch (subid) {
+ case 0x17aa:
+- tas_priv->catlog_id = LENOVO;
++ tas_hda->priv->catlog_id = LENOVO;
+ break;
+ default:
+- tas_priv->catlog_id = OTHERS;
++ tas_hda->priv->catlog_id = OTHERS;
+ break;
+ }
+
+@@ -597,7 +635,7 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
+
+ strscpy(comps->name, dev_name(dev), sizeof(comps->name));
+
+- ret = tascodec_init(tas_priv, codec, tasdev_fw_ready);
++ ret = tascodec_init(tas_hda->priv, codec, THIS_MODULE, tasdev_fw_ready);
+ if (!ret)
+ comps->playback_hook = tas2781_hda_playback_hook;
+
+@@ -610,16 +648,22 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
+ static void tas2781_hda_unbind(struct device *dev,
+ struct device *master, void *master_data)
+ {
+- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
++ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ struct hda_component *comps = master_data;
++ comps = &comps[tas_hda->priv->index];
+
+- if (comps[tas_priv->index].dev == dev)
+- memset(&comps[tas_priv->index], 0, sizeof(*comps));
++ if (comps->dev == dev) {
++ comps->dev = NULL;
++ memset(comps->name, 0, sizeof(comps->name));
++ comps->playback_hook = NULL;
++ }
+
+- tasdevice_config_info_remove(tas_priv);
+- tasdevice_dsp_remove(tas_priv);
++ tas2781_hda_remove_controls(tas_hda);
+
+- tas_priv->fw_state = TASDEVICE_DSP_FW_PENDING;
++ tasdevice_config_info_remove(tas_hda->priv);
++ tasdevice_dsp_remove(tas_hda->priv);
++
++ tas_hda->priv->fw_state = TASDEVICE_DSP_FW_PENDING;
+ }
+
+ static const struct component_ops tas2781_hda_comp_ops = {
+@@ -629,60 +673,70 @@ static const struct component_ops tas2781_hda_comp_ops = {
+
+ static void tas2781_hda_remove(struct device *dev)
+ {
+- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
++ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+- pm_runtime_get_sync(tas_priv->dev);
+- pm_runtime_disable(tas_priv->dev);
++ component_del(tas_hda->dev, &tas2781_hda_comp_ops);
+
+- component_del(tas_priv->dev, &tas2781_hda_comp_ops);
++ pm_runtime_get_sync(tas_hda->dev);
++ pm_runtime_disable(tas_hda->dev);
+
+- pm_runtime_put_noidle(tas_priv->dev);
++ pm_runtime_put_noidle(tas_hda->dev);
+
+- tasdevice_remove(tas_priv);
++ tasdevice_remove(tas_hda->priv);
+ }
+
+ static int tas2781_hda_i2c_probe(struct i2c_client *clt)
+ {
+- struct tasdevice_priv *tas_priv;
++ struct tas2781_hda *tas_hda;
+ const char *device_name;
+ int ret;
+
+- if (strstr(dev_name(&clt->dev), "TIAS2781"))
+- device_name = "TIAS2781";
+- else
+- return -ENODEV;
+
+- tas_priv = tasdevice_kzalloc(clt);
+- if (!tas_priv)
++ tas_hda = devm_kzalloc(&clt->dev, sizeof(*tas_hda), GFP_KERNEL);
++ if (!tas_hda)
++ return -ENOMEM;
++
++ dev_set_drvdata(&clt->dev, tas_hda);
++ tas_hda->dev = &clt->dev;
++
++ tas_hda->priv = tasdevice_kzalloc(clt);
++ if (!tas_hda->priv)
+ return -ENOMEM;
+
+- tas_priv->irq_info.irq = clt->irq;
+- ret = tas2781_read_acpi(tas_priv, device_name);
++ if (strstr(dev_name(&clt->dev), "TIAS2781")) {
++ device_name = "TIAS2781";
++ tas_hda->priv->save_calibration = tas2781_save_calibration;
++ tas_hda->priv->apply_calibration = tas2781_apply_calib;
++ } else
++ return -ENODEV;
++
++ tas_hda->priv->irq = clt->irq;
++ ret = tas2781_read_acpi(tas_hda->priv, device_name);
+ if (ret)
+- return dev_err_probe(tas_priv->dev, ret,
++ return dev_err_probe(tas_hda->dev, ret,
+ "Platform not supported\n");
+
+- ret = tasdevice_init(tas_priv);
++ ret = tasdevice_init(tas_hda->priv);
+ if (ret)
+ goto err;
+
+- pm_runtime_set_autosuspend_delay(tas_priv->dev, 3000);
+- pm_runtime_use_autosuspend(tas_priv->dev);
+- pm_runtime_mark_last_busy(tas_priv->dev);
+- pm_runtime_set_active(tas_priv->dev);
+- pm_runtime_get_noresume(tas_priv->dev);
+- pm_runtime_enable(tas_priv->dev);
++ pm_runtime_set_autosuspend_delay(tas_hda->dev, 3000);
++ pm_runtime_use_autosuspend(tas_hda->dev);
++ pm_runtime_mark_last_busy(tas_hda->dev);
++ pm_runtime_set_active(tas_hda->dev);
++ pm_runtime_get_noresume(tas_hda->dev);
++ pm_runtime_enable(tas_hda->dev);
+
+- pm_runtime_put_autosuspend(tas_priv->dev);
++ pm_runtime_put_autosuspend(tas_hda->dev);
+
+- ret = component_add(tas_priv->dev, &tas2781_hda_comp_ops);
++ tas2781_reset(tas_hda->priv);
++
++ ret = component_add(tas_hda->dev, &tas2781_hda_comp_ops);
+ if (ret) {
+- dev_err(tas_priv->dev, "Register component failed: %d\n", ret);
+- pm_runtime_disable(tas_priv->dev);
+- goto err;
++ dev_err(tas_hda->dev, "Register component failed: %d\n", ret);
++ pm_runtime_disable(tas_hda->dev);
+ }
+
+- tas2781_reset(tas_priv);
+ err:
+ if (ret)
+ tas2781_hda_remove(&clt->dev);
+@@ -696,81 +750,58 @@ static void tas2781_hda_i2c_remove(struct i2c_client *clt)
+
+ static int tas2781_runtime_suspend(struct device *dev)
+ {
+- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+- int i;
++ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+- dev_dbg(tas_priv->dev, "Runtime Suspend\n");
++ dev_dbg(tas_hda->dev, "Runtime Suspend\n");
+
+- mutex_lock(&tas_priv->codec_lock);
+-
+- if (tas_priv->playback_started) {
+- tasdevice_tuning_switch(tas_priv, 1);
+- tas_priv->playback_started = false;
+- }
++ mutex_lock(&tas_hda->priv->codec_lock);
+
+- for (i = 0; i < tas_priv->ndev; i++) {
+- tas_priv->tasdevice[i].cur_book = -1;
+- tas_priv->tasdevice[i].cur_prog = -1;
+- tas_priv->tasdevice[i].cur_conf = -1;
++ /* The driver powers up the amplifiers at module load time.
++ * Stop the playback if it's unused.
++ */
++ if (tas_hda->priv->playback_started) {
++ tasdevice_tuning_switch(tas_hda->priv, 1);
++ tas_hda->priv->playback_started = false;
+ }
+
+- regcache_cache_only(tas_priv->regmap, true);
+- regcache_mark_dirty(tas_priv->regmap);
+-
+- mutex_unlock(&tas_priv->codec_lock);
++ mutex_unlock(&tas_hda->priv->codec_lock);
+
+ return 0;
+ }
+
+ static int tas2781_runtime_resume(struct device *dev)
+ {
+- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+- unsigned long calib_data_sz =
+- tas_priv->ndev * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
+- int ret;
++ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+- dev_dbg(tas_priv->dev, "Runtime Resume\n");
++ dev_dbg(tas_hda->dev, "Runtime Resume\n");
+
+- mutex_lock(&tas_priv->codec_lock);
++ mutex_lock(&tas_hda->priv->codec_lock);
+
+- regcache_cache_only(tas_priv->regmap, false);
+- ret = regcache_sync(tas_priv->regmap);
+- if (ret) {
+- dev_err(tas_priv->dev,
+- "Failed to restore register cache: %d\n", ret);
+- goto out;
+- }
+-
+- tasdevice_prmg_load(tas_priv, tas_priv->cur_prog);
++ tasdevice_prmg_load(tas_hda->priv, tas_hda->priv->cur_prog);
+
+ /* If calibrated data occurs error, dsp will still works with default
+ * calibrated data inside algo.
+ */
+- if (tas_priv->cali_data.total_sz > calib_data_sz)
+- tas2781_apply_calib(tas_priv);
++ tasdevice_apply_calibration(tas_hda->priv);
+
+-out:
+- mutex_unlock(&tas_priv->codec_lock);
++ mutex_unlock(&tas_hda->priv->codec_lock);
+
+- return ret;
++ return 0;
+ }
+
+ static int tas2781_system_suspend(struct device *dev)
+ {
+- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+- int ret;
++ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+
+- dev_dbg(tas_priv->dev, "System Suspend\n");
++ dev_dbg(tas_hda->priv->dev, "System Suspend\n");
+
+- ret = pm_runtime_force_suspend(dev);
+- if (ret)
+- return ret;
++ mutex_lock(&tas_hda->priv->codec_lock);
+
+ /* Shutdown chip before system suspend */
+- regcache_cache_only(tas_priv->regmap, false);
+- tasdevice_tuning_switch(tas_priv, 1);
+- regcache_cache_only(tas_priv->regmap, true);
+- regcache_mark_dirty(tas_priv->regmap);
++ if (tas_hda->priv->playback_started)
++ tasdevice_tuning_switch(tas_hda->priv, 1);
++
++ mutex_unlock(&tas_hda->priv->codec_lock);
+
+ /*
+ * Reset GPIO may be shared, so cannot reset here.
+@@ -781,33 +812,30 @@ static int tas2781_system_suspend(struct device *dev)
+
+ static int tas2781_system_resume(struct device *dev)
+ {
+- struct tasdevice_priv *tas_priv = dev_get_drvdata(dev);
+- unsigned long calib_data_sz =
+- tas_priv->ndev * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
+- int i, ret;
+-
+- dev_dbg(tas_priv->dev, "System Resume\n");
++ struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
++ int i;
+
+- ret = pm_runtime_force_resume(dev);
+- if (ret)
+- return ret;
++ dev_dbg(tas_hda->priv->dev, "System Resume\n");
+
+- mutex_lock(&tas_priv->codec_lock);
++ mutex_lock(&tas_hda->priv->codec_lock);
+
+- for (i = 0; i < tas_priv->ndev; i++) {
+- tas_priv->tasdevice[i].cur_book = -1;
+- tas_priv->tasdevice[i].cur_prog = -1;
+- tas_priv->tasdevice[i].cur_conf = -1;
++ for (i = 0; i < tas_hda->priv->ndev; i++) {
++ tas_hda->priv->tasdevice[i].cur_book = -1;
++ tas_hda->priv->tasdevice[i].cur_prog = -1;
++ tas_hda->priv->tasdevice[i].cur_conf = -1;
+ }
+- tas2781_reset(tas_priv);
+- tasdevice_prmg_load(tas_priv, tas_priv->cur_prog);
++ tas2781_reset(tas_hda->priv);
++ tasdevice_prmg_load(tas_hda->priv, tas_hda->priv->cur_prog);
+
+ /* If calibrated data occurs error, dsp will still work with default
+ * calibrated data inside algo.
+ */
+- if (tas_priv->cali_data.total_sz > calib_data_sz)
+- tas2781_apply_calib(tas_priv);
+- mutex_unlock(&tas_priv->codec_lock);
++ tasdevice_apply_calibration(tas_hda->priv);
++
++ if (tas_hda->priv->playback_started)
++ tasdevice_tuning_switch(tas_hda->priv, 0);
++
++ mutex_unlock(&tas_hda->priv->codec_lock);
+
+ return 0;
+ }
+diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
+index 46705ec77b4810..eb3aca16359c58 100644
+--- a/sound/pci/oxygen/oxygen_mixer.c
++++ b/sound/pci/oxygen/oxygen_mixer.c
+@@ -718,7 +718,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
+ oldreg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN);
+ newreg = oldreg & ~0x0707;
+ newreg = newreg | (value->value.integer.value[0] & 7);
+- newreg = newreg | ((value->value.integer.value[0] & 7) << 8);
++ newreg = newreg | ((value->value.integer.value[1] & 7) << 8);
+ change = newreg != oldreg;
+ if (change)
+ oxygen_write_ac97(chip, 1, AC97_REC_GAIN, newreg);
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index e7d1b43471a291..713ca262a0e979 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -1298,8 +1298,10 @@ static int snd_hdsp_midi_output_possible (struct hdsp *hdsp, int id)
+
+ static void snd_hdsp_flush_midi_input (struct hdsp *hdsp, int id)
+ {
+- while (snd_hdsp_midi_input_available (hdsp, id))
+- snd_hdsp_midi_read_byte (hdsp, id);
++ int count = 256;
++
++ while (snd_hdsp_midi_input_available(hdsp, id) && --count)
++ snd_hdsp_midi_read_byte(hdsp, id);
+ }
+
+ static int snd_hdsp_midi_output_write (struct hdsp_midi *hmidi)
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 267c7848974aee..74215f57f4fc9d 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -1838,8 +1838,10 @@ static inline int snd_hdspm_midi_output_possible (struct hdspm *hdspm, int id)
+
+ static void snd_hdspm_flush_midi_input(struct hdspm *hdspm, int id)
+ {
+- while (snd_hdspm_midi_input_available (hdspm, id))
+- snd_hdspm_midi_read_byte (hdspm, id);
++ int count = 256;
++
++ while (snd_hdspm_midi_input_available(hdspm, id) && --count)
++ snd_hdspm_midi_read_byte(hdspm, id);
+ }
+
+ static int snd_hdspm_midi_output_write (struct hdspm_midi *hmidi)
+diff --git a/sound/sh/aica.c b/sound/sh/aica.c
+index 320ac792c7fe24..3182c634464d42 100644
+--- a/sound/sh/aica.c
++++ b/sound/sh/aica.c
+@@ -278,7 +278,8 @@ static void run_spu_dma(struct work_struct *work)
+ dreamcastcard->clicks++;
+ if (unlikely(dreamcastcard->clicks >= AICA_PERIOD_NUMBER))
+ dreamcastcard->clicks %= AICA_PERIOD_NUMBER;
+- mod_timer(&dreamcastcard->timer, jiffies + 1);
++ if (snd_pcm_running(dreamcastcard->substream))
++ mod_timer(&dreamcastcard->timer, jiffies + 1);
+ }
+ }
+
+@@ -290,6 +291,8 @@ static void aica_period_elapsed(struct timer_list *t)
+ /*timer function - so cannot sleep */
+ int play_period;
+ struct snd_pcm_runtime *runtime;
++ if (!snd_pcm_running(substream))
++ return;
+ runtime = substream->runtime;
+ dreamcastcard = substream->pcm->private_data;
+ /* Have we played out an additional period? */
+@@ -350,12 +353,19 @@ static int snd_aicapcm_pcm_open(struct snd_pcm_substream
+ return 0;
+ }
+
++static int snd_aicapcm_pcm_sync_stop(struct snd_pcm_substream *substream)
++{
++ struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
++
++ del_timer_sync(&dreamcastcard->timer);
++ cancel_work_sync(&dreamcastcard->spu_dma_work);
++ return 0;
++}
++
+ static int snd_aicapcm_pcm_close(struct snd_pcm_substream
+ *substream)
+ {
+ struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
+- flush_work(&(dreamcastcard->spu_dma_work));
+- del_timer(&dreamcastcard->timer);
+ dreamcastcard->substream = NULL;
+ kfree(dreamcastcard->channel);
+ spu_disable();
+@@ -401,6 +411,7 @@ static const struct snd_pcm_ops snd_aicapcm_playback_ops = {
+ .prepare = snd_aicapcm_pcm_prepare,
+ .trigger = snd_aicapcm_pcm_trigger,
+ .pointer = snd_aicapcm_pcm_pointer,
++ .sync_stop = snd_aicapcm_pcm_sync_stop,
+ };
+
+ /* TO DO: set up to handle more than one pcm instance */
+diff --git a/sound/soc/amd/acp-es8336.c b/sound/soc/amd/acp-es8336.c
+index 5e56d3a53be783..49bffc567e68d9 100644
+--- a/sound/soc/amd/acp-es8336.c
++++ b/sound/soc/amd/acp-es8336.c
+@@ -203,8 +203,10 @@ static int st_es8336_late_probe(struct snd_soc_card *card)
+
+ codec_dev = acpi_get_first_physical_node(adev);
+ acpi_dev_put(adev);
+- if (!codec_dev)
++ if (!codec_dev) {
+ dev_err(card->dev, "can not find codec dev\n");
++ return -ENODEV;
++ }
+
+ ret = devm_acpi_dev_add_driver_gpios(codec_dev, acpi_es8336_gpios);
+ if (ret)
+diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
+index df350014966a05..cf2fdde5aaa18d 100644
+--- a/sound/soc/amd/acp/acp-i2s.c
++++ b/sound/soc/amd/acp/acp-i2s.c
+@@ -543,20 +543,12 @@ static int acp_i2s_probe(struct snd_soc_dai *dai)
+ {
+ struct device *dev = dai->component->dev;
+ struct acp_dev_data *adata = dev_get_drvdata(dev);
+- struct acp_resource *rsrc = adata->rsrc;
+- unsigned int val;
+
+ if (!adata->acp_base) {
+ dev_err(dev, "I2S base is NULL\n");
+ return -EINVAL;
+ }
+
+- val = readl(adata->acp_base + rsrc->i2s_pin_cfg_offset);
+- if (val != rsrc->i2s_mode) {
+- dev_err(dev, "I2S Mode not supported val %x\n", val);
+- return -EINVAL;
+- }
+-
+ return 0;
+ }
+
+diff --git a/sound/soc/amd/acp/acp-legacy-mach.c b/sound/soc/amd/acp/acp-legacy-mach.c
+index 6d57d17ddfd770..6e820c2edd1d87 100644
+--- a/sound/soc/amd/acp/acp-legacy-mach.c
++++ b/sound/soc/amd/acp/acp-legacy-mach.c
+@@ -137,6 +137,8 @@ static const struct platform_device_id board_ids[] = {
+ },
+ { }
+ };
++MODULE_DEVICE_TABLE(platform, board_ids);
++
+ static struct platform_driver acp_asoc_audio = {
+ .driver = {
+ .pm = &snd_soc_pm_ops,
+diff --git a/sound/soc/amd/acp/acp-mach-common.c b/sound/soc/amd/acp/acp-mach-common.c
+index a06af82b805656..fc4e91535578b4 100644
+--- a/sound/soc/amd/acp/acp-mach-common.c
++++ b/sound/soc/amd/acp/acp-mach-common.c
+@@ -1416,8 +1416,13 @@ int acp_sofdsp_dai_links_create(struct snd_soc_card *card)
+ if (drv_data->amp_cpu_id == I2S_SP) {
+ links[i].name = "acp-amp-codec";
+ links[i].id = AMP_BE_ID;
+- links[i].cpus = sof_sp_virtual;
+- links[i].num_cpus = ARRAY_SIZE(sof_sp_virtual);
++ if (drv_data->platform == RENOIR) {
++ links[i].cpus = sof_sp;
++ links[i].num_cpus = ARRAY_SIZE(sof_sp);
++ } else {
++ links[i].cpus = sof_sp_virtual;
++ links[i].num_cpus = ARRAY_SIZE(sof_sp_virtual);
++ }
+ links[i].platforms = sof_component;
+ links[i].num_platforms = ARRAY_SIZE(sof_component);
+ links[i].dpcm_playback = 1;
+diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
+index a32c14a109b77b..7de6446e6f7c18 100644
+--- a/sound/soc/amd/acp/acp-pci.c
++++ b/sound/soc/amd/acp/acp-pci.c
+@@ -107,7 +107,10 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
+ goto unregister_dmic_dev;
+ }
+
+- acp_init(chip);
++ ret = acp_init(chip);
++ if (ret)
++ goto unregister_dmic_dev;
++
+ res = devm_kcalloc(&pci->dev, num_res, sizeof(struct resource), GFP_KERNEL);
+ if (!res) {
+ ret = -ENOMEM;
+@@ -182,10 +185,12 @@ static int __maybe_unused snd_acp_resume(struct device *dev)
+ ret = acp_init(chip);
+ if (ret)
+ dev_err(dev, "ACP init failed\n");
+- child = chip->chip_pdev->dev;
+- adata = dev_get_drvdata(&child);
+- if (adata)
+- acp_enable_interrupts(adata);
++ if (chip->chip_pdev) {
++ child = chip->chip_pdev->dev;
++ adata = dev_get_drvdata(&child);
++ if (adata)
++ acp_enable_interrupts(adata);
++ }
+ return ret;
+ }
+
+diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
+index 5223033a122f86..0c5254c52b7945 100644
+--- a/sound/soc/amd/acp/acp-sof-mach.c
++++ b/sound/soc/amd/acp/acp-sof-mach.c
+@@ -120,16 +120,14 @@ static int acp_sof_probe(struct platform_device *pdev)
+ if (dmi_id && dmi_id->driver_data)
+ acp_card_drvdata->tdm_mode = dmi_id->driver_data;
+
+- acp_sofdsp_dai_links_create(card);
++ ret = acp_sofdsp_dai_links_create(card);
++ if (ret)
++ return dev_err_probe(&pdev->dev, ret, "Failed to create DAI links\n");
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+- if (ret) {
+- dev_err(&pdev->dev,
+- "devm_snd_soc_register_card(%s) failed: %d\n",
+- card->name, ret);
+- return ret;
+- }
+-
++ if (ret)
++ return dev_err_probe(&pdev->dev, ret,
++ "Failed to register card(%s)\n", card->name);
+ return 0;
+ }
+
+@@ -164,6 +162,8 @@ static const struct platform_device_id board_ids[] = {
+ },
+ { }
+ };
++MODULE_DEVICE_TABLE(platform, board_ids);
++
+ static struct platform_driver acp_asoc_audio = {
+ .driver = {
+ .name = "sof_mach",
+diff --git a/sound/soc/amd/vangogh/acp5x-mach.c b/sound/soc/amd/vangogh/acp5x-mach.c
+index eda464545866ca..2ccc95d5778342 100644
+--- a/sound/soc/amd/vangogh/acp5x-mach.c
++++ b/sound/soc/amd/vangogh/acp5x-mach.c
+@@ -439,7 +439,15 @@ static const struct dmi_system_id acp5x_vg_quirk_table[] = {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Valve"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
+- }
++ },
++ .driver_data = (void *)&acp5x_8821_35l41_card,
++ },
++ {
++ .matches = {
++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Valve"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
++ },
++ .driver_data = (void *)&acp5x_8821_98388_card,
+ },
+ {}
+ };
+@@ -452,25 +460,15 @@ static int acp5x_probe(struct platform_device *pdev)
+ struct snd_soc_card *card;
+ int ret;
+
+- card = (struct snd_soc_card *)device_get_match_data(dev);
+- if (!card) {
+- /*
+- * This is normally the result of directly probing the driver
+- * in pci-acp5x through platform_device_register_full(), which
+- * is necessary for the CS35L41 variant, as it doesn't support
+- * ACPI probing and relies on DMI quirks.
+- */
+- dmi_id = dmi_first_match(acp5x_vg_quirk_table);
+- if (!dmi_id)
+- return -ENODEV;
+-
+- card = &acp5x_8821_35l41_card;
+- }
++ dmi_id = dmi_first_match(acp5x_vg_quirk_table);
++ if (!dmi_id || !dmi_id->driver_data)
++ return -ENODEV;
+
+ machine = devm_kzalloc(dev, sizeof(*machine), GFP_KERNEL);
+ if (!machine)
+ return -ENOMEM;
+
++ card = dmi_id->driver_data;
+ card->dev = dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+@@ -482,17 +480,10 @@ static int acp5x_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static const struct acpi_device_id acp5x_acpi_match[] = {
+- { "AMDI8821", (kernel_ulong_t)&acp5x_8821_98388_card },
+- {},
+-};
+-MODULE_DEVICE_TABLE(acpi, acp5x_acpi_match);
+-
+ static struct platform_driver acp5x_mach_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &snd_soc_pm_ops,
+- .acpi_match_table = acp5x_acpi_match,
+ },
+ .probe = acp5x_probe,
+ };
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 15a864dcd7bd3a..248e3bcbf386b0 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -199,6 +199,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "21HY"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "21J0"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -213,6 +220,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J6"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "21M3"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "21M5"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -234,6 +255,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "82UG"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "82UU"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -248,6 +276,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YM"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "83AS"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -262,6 +297,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "M5602RA"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -283,6 +325,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "E1504FA"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "M7600RE"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -290,6 +346,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 B7ED"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7VF"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -353,6 +423,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "8A43"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_BOARD_NAME, "8A44"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -367,6 +444,27 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "8A3E"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_BOARD_NAME, "8B27"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_BOARD_NAME, "8B2F"),
++ }
++ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_BOARD_NAME, "8BD6"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -374,6 +472,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "MRID6"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "MDC"),
++ DMI_MATCH(DMI_BOARD_NAME, "Herbag_MDU"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+@@ -381,6 +486,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "System76"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "pang13"),
++ }
++ },
+ {}
+ };
+
+diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
+index 4c1985711218dc..ba314b2799190e 100644
+--- a/sound/soc/atmel/atmel-classd.c
++++ b/sound/soc/atmel/atmel-classd.c
+@@ -118,7 +118,7 @@ static const struct snd_pcm_hardware atmel_classd_hw = {
+ static int atmel_classd_cpu_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
+ int err;
+
+@@ -141,7 +141,7 @@ atmel_classd_platform_configure_dma(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
+
+ if (params_physical_width(params) != 16) {
+@@ -338,7 +338,7 @@ atmel_classd_cpu_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = cpu_dai->component;
+ int fs;
+@@ -381,7 +381,7 @@ static void
+ atmel_classd_cpu_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
+
+ clk_disable_unprepare(dd->gclk);
+@@ -473,19 +473,22 @@ static int atmel_classd_asoc_card_init(struct device *dev,
+ if (!dai_link)
+ return -ENOMEM;
+
+- comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
++ comp = devm_kzalloc(dev, 2 * sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return -ENOMEM;
+
+- dai_link->cpus = comp;
+- dai_link->codecs = &asoc_dummy_dlc;
++ dai_link->cpus = &comp[0];
++ dai_link->codecs = &snd_soc_dummy_dlc;
++ dai_link->platforms = &comp[1];
+
+ dai_link->num_cpus = 1;
+ dai_link->num_codecs = 1;
++ dai_link->num_platforms = 1;
+
+ dai_link->name = "CLASSD";
+ dai_link->stream_name = "CLASSD PCM";
+ dai_link->cpus->dai_name = dev_name(dev);
++ dai_link->platforms->name = dev_name(dev);
+
+ card->dai_link = dai_link;
+ card->num_links = 1;
+diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
+index 96a8c7dba98ff2..7306e04da513b1 100644
+--- a/sound/soc/atmel/atmel-pcm-dma.c
++++ b/sound/soc/atmel/atmel-pcm-dma.c
+@@ -52,10 +52,10 @@ static const struct snd_pcm_hardware atmel_pcm_dma_hardware = {
+ static void atmel_pcm_dma_irq(u32 ssc_sr,
+ struct snd_pcm_substream *substream)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_pcm_dma_params *prtd;
+
+- prtd = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
++ prtd = snd_soc_dai_get_dma_data(snd_soc_rtd_to_cpu(rtd, 0), substream);
+
+ if (ssc_sr & prtd->mask->ssc_error) {
+ if (snd_pcm_running(substream))
+@@ -77,12 +77,12 @@ static void atmel_pcm_dma_irq(u32 ssc_sr,
+ static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_pcm_dma_params *prtd;
+ struct ssc_device *ssc;
+ int ret;
+
+- prtd = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
++ prtd = snd_soc_dai_get_dma_data(snd_soc_rtd_to_cpu(rtd, 0), substream);
+ ssc = prtd->ssc;
+
+ ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
+diff --git a/sound/soc/atmel/atmel-pcm-pdc.c b/sound/soc/atmel/atmel-pcm-pdc.c
+index 3e7ea2021b46b2..7db8df85c54f3b 100644
+--- a/sound/soc/atmel/atmel-pcm-pdc.c
++++ b/sound/soc/atmel/atmel-pcm-pdc.c
+@@ -140,12 +140,12 @@ static int atmel_pcm_hw_params(struct snd_soc_component *component,
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct atmel_runtime_data *prtd = runtime->private_data;
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+
+ /* this may get called several times by oss emulation
+ * with different params */
+
+- prtd->params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
++ prtd->params = snd_soc_dai_get_dma_data(snd_soc_rtd_to_cpu(rtd, 0), substream);
+ prtd->params->dma_intr_handler = atmel_pcm_dma_irq;
+
+ prtd->dma_buffer = runtime->dma_addr;
+diff --git a/sound/soc/atmel/atmel-pdmic.c b/sound/soc/atmel/atmel-pdmic.c
+index 0db7815d230c3c..fa29dd8ef20897 100644
+--- a/sound/soc/atmel/atmel-pdmic.c
++++ b/sound/soc/atmel/atmel-pdmic.c
+@@ -104,7 +104,7 @@ static struct atmel_pdmic_pdata *atmel_pdmic_dt_init(struct device *dev)
+ static int atmel_pdmic_cpu_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
+ int ret;
+
+@@ -132,7 +132,7 @@ static int atmel_pdmic_cpu_dai_startup(struct snd_pcm_substream *substream,
+ static void atmel_pdmic_cpu_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
+
+ /* Disable the overrun error interrupt */
+@@ -145,7 +145,7 @@ static void atmel_pdmic_cpu_dai_shutdown(struct snd_pcm_substream *substream,
+ static int atmel_pdmic_cpu_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = cpu_dai->component;
+ u32 val;
+@@ -191,7 +191,7 @@ atmel_pdmic_platform_configure_dma(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct dma_slave_config *slave_config)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
+ int ret;
+
+@@ -356,7 +356,7 @@ atmel_pdmic_cpu_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = cpu_dai->component;
+ unsigned int rate_min = substream->runtime->hw.rate_min;
+@@ -501,7 +501,7 @@ static int atmel_pdmic_asoc_card_init(struct device *dev,
+ return -ENOMEM;
+
+ dai_link->cpus = comp;
+- dai_link->codecs = &asoc_dummy_dlc;
++ dai_link->codecs = &snd_soc_dummy_dlc;
+
+ dai_link->num_cpus = 1;
+ dai_link->num_codecs = 1;
+diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c
+index 00e98136bec25d..01e944fa11483b 100644
+--- a/sound/soc/atmel/atmel_wm8904.c
++++ b/sound/soc/atmel/atmel_wm8904.c
+@@ -26,8 +26,8 @@ static const struct snd_soc_dapm_widget atmel_asoc_wm8904_dapm_widgets[] = {
+ static int atmel_asoc_wm8904_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ int ret;
+
+ ret = snd_soc_dai_set_pll(codec_dai, WM8904_FLL_MCLK, WM8904_FLL_MCLK,
+diff --git a/sound/soc/atmel/mchp-pdmc.c b/sound/soc/atmel/mchp-pdmc.c
+index dcc4e14b3dde27..206bbb5aaab5d9 100644
+--- a/sound/soc/atmel/mchp-pdmc.c
++++ b/sound/soc/atmel/mchp-pdmc.c
+@@ -285,6 +285,9 @@ static int mchp_pdmc_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ if (!substream)
+ return -ENODEV;
+
++ if (!substream->runtime)
++ return 0; /* just for avoiding error from alsactl restore */
++
+ map = mchp_pdmc_chmap_get(substream, info);
+ if (!map)
+ return -EINVAL;
+diff --git a/sound/soc/atmel/mikroe-proto.c b/sound/soc/atmel/mikroe-proto.c
+index 30c87c2c1b0bd1..18a8760443ae6a 100644
+--- a/sound/soc/atmel/mikroe-proto.c
++++ b/sound/soc/atmel/mikroe-proto.c
+@@ -21,7 +21,7 @@
+ static int snd_proto_init(struct snd_soc_pcm_runtime *rtd)
+ {
+ struct snd_soc_card *card = rtd->card;
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+
+ /* Set proto sysclk */
+ int ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
+diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
+index 0405e9e49140e2..d3ec9826d505f4 100644
+--- a/sound/soc/atmel/sam9g20_wm8731.c
++++ b/sound/soc/atmel/sam9g20_wm8731.c
+@@ -66,7 +66,7 @@ static const struct snd_soc_dapm_route intercon[] = {
+ */
+ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ struct device *dev = rtd->dev;
+ int ret;
+
+diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
+index cd1d59a90e0218..d1c1f370a9cd5a 100644
+--- a/sound/soc/atmel/sam9x5_wm8731.c
++++ b/sound/soc/atmel/sam9x5_wm8731.c
+@@ -40,7 +40,7 @@ struct sam9x5_drvdata {
+ */
+ static int sam9x5_wm8731_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ struct device *dev = rtd->dev;
+ int ret;
+
+diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
+index 400eaf9f8b1407..f185711180cb46 100644
+--- a/sound/soc/au1x/db1200.c
++++ b/sound/soc/au1x/db1200.c
+@@ -44,6 +44,7 @@ static const struct platform_device_id db1200_pids[] = {
+ },
+ {},
+ };
++MODULE_DEVICE_TABLE(platform, db1200_pids);
+
+ /*------------------------- AC97 PART ---------------------------*/
+
+diff --git a/sound/soc/codecs/chv3-codec.c b/sound/soc/codecs/chv3-codec.c
+index ab99effa68748d..40020500b1fe89 100644
+--- a/sound/soc/codecs/chv3-codec.c
++++ b/sound/soc/codecs/chv3-codec.c
+@@ -26,6 +26,7 @@ static const struct of_device_id chv3_codec_of_match[] = {
+ { .compatible = "google,chv3-codec", },
+ { }
+ };
++MODULE_DEVICE_TABLE(of, chv3_codec_of_match);
+
+ static struct platform_driver chv3_codec_platform_driver = {
+ .driver = {
+diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
+index 9968c2e189e64b..d25455f395660f 100644
+--- a/sound/soc/codecs/cs35l33.c
++++ b/sound/soc/codecs/cs35l33.c
+@@ -22,13 +22,11 @@
+ #include <sound/soc-dapm.h>
+ #include <sound/initval.h>
+ #include <sound/tlv.h>
+-#include <linux/gpio.h>
+ #include <linux/gpio/consumer.h>
+ #include <sound/cs35l33.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/regulator/machine.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/of_irq.h>
+@@ -1167,7 +1165,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client)
+
+ /* We could issue !RST or skip it based on AMP topology */
+ cs35l33->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+- "reset-gpios", GPIOD_OUT_HIGH);
++ "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(cs35l33->reset_gpio)) {
+ dev_err(&i2c_client->dev, "%s ERROR: Can't get reset GPIO\n",
+ __func__);
+diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
+index 6974dd4614103b..04d9117b31ac7b 100644
+--- a/sound/soc/codecs/cs35l34.c
++++ b/sound/soc/codecs/cs35l34.c
+@@ -20,14 +20,12 @@
+ #include <linux/regulator/machine.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/of_device.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of_irq.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
+-#include <linux/gpio.h>
+ #include <linux/gpio/consumer.h>
+ #include <sound/initval.h>
+ #include <sound/tlv.h>
+@@ -1061,7 +1059,7 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client)
+ dev_err(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
+
+ cs35l34->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+- "reset-gpios", GPIOD_OUT_LOW);
++ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(cs35l34->reset_gpio)) {
+ ret = PTR_ERR(cs35l34->reset_gpio);
+ goto err_regulator;
+diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c
+index 4ec306cd2f4766..2ec5fdc875b13f 100644
+--- a/sound/soc/codecs/cs35l41-lib.c
++++ b/sound/soc/codecs/cs35l41-lib.c
+@@ -1192,8 +1192,28 @@ bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type)
+ }
+ EXPORT_SYMBOL_GPL(cs35l41_safe_reset);
+
++/*
++ * Enabling the CS35L41_SHD_BOOST_ACTV and CS35L41_SHD_BOOST_PASS shared boosts
++ * does also require a call to cs35l41_mdsync_up(), but not before getting the
++ * PLL Lock signal.
++ *
++ * PLL Lock seems to be triggered soon after snd_pcm_start() is executed and
++ * SNDRV_PCM_TRIGGER_START command is processed, which happens (long) after the
++ * SND_SOC_DAPM_PRE_PMU event handler is invoked as part of snd_pcm_prepare().
++ *
++ * This event handler is where cs35l41_global_enable() is normally called from,
++ * but waiting for PLL Lock here will time out. Increasing the wait duration
++ * will not help, as the only consequence of it would be to add an unnecessary
++ * delay in the invocation of snd_pcm_start().
++ *
++ * Trying to move the wait in the SNDRV_PCM_TRIGGER_START callback is not a
++ * solution either, as the trigger is executed in an IRQ-off atomic context.
++ *
++ * The current approach is to invoke cs35l41_mdsync_up() right after receiving
++ * the PLL Lock interrupt, in the IRQ handler.
++ */
+ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
+- int enable, struct completion *pll_lock, bool firmware_running)
++ int enable, bool firmware_running)
+ {
+ int ret;
+ unsigned int gpio1_func, pad_control, pwr_ctrl1, pwr_ctrl3, int_status, pup_pdn_mask;
+@@ -1203,11 +1223,6 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
+ {CS35L41_GPIO_PAD_CONTROL, 0},
+ {CS35L41_PWR_CTRL1, 0, 3000},
+ };
+- struct reg_sequence cs35l41_mdsync_up_seq[] = {
+- {CS35L41_PWR_CTRL3, 0},
+- {CS35L41_PWR_CTRL1, 0x00000000, 3000},
+- {CS35L41_PWR_CTRL1, 0x00000001, 3000},
+- };
+
+ pup_pdn_mask = enable ? CS35L41_PUP_DONE_MASK : CS35L41_PDN_DONE_MASK;
+
+@@ -1241,24 +1256,12 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
+ cs35l41_mdsync_down_seq[0].def = pwr_ctrl3;
+ cs35l41_mdsync_down_seq[1].def = pad_control;
+ cs35l41_mdsync_down_seq[2].def = pwr_ctrl1;
++
+ ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_down_seq,
+ ARRAY_SIZE(cs35l41_mdsync_down_seq));
+- if (!enable)
+- break;
+-
+- if (!pll_lock)
+- return -EINVAL;
+-
+- ret = wait_for_completion_timeout(pll_lock, msecs_to_jiffies(1000));
+- if (ret == 0) {
+- ret = -ETIMEDOUT;
+- } else {
+- regmap_read(regmap, CS35L41_PWR_CTRL3, &pwr_ctrl3);
+- pwr_ctrl3 |= CS35L41_SYNC_EN_MASK;
+- cs35l41_mdsync_up_seq[0].def = pwr_ctrl3;
+- ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_up_seq,
+- ARRAY_SIZE(cs35l41_mdsync_up_seq));
+- }
++ /* Activation to be completed later via cs35l41_mdsync_up() */
++ if (ret || enable)
++ return ret;
+
+ ret = regmap_read_poll_timeout(regmap, CS35L41_IRQ1_STATUS1,
+ int_status, int_status & pup_pdn_mask,
+@@ -1266,7 +1269,7 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
+ if (ret)
+ dev_err(dev, "Enable(%d) failed: %d\n", enable, ret);
+
+- // Clear PUP/PDN status
++ /* Clear PUP/PDN status */
+ regmap_write(regmap, CS35L41_IRQ1_STATUS1, pup_pdn_mask);
+ break;
+ case CS35L41_INT_BOOST:
+@@ -1348,6 +1351,17 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
+ }
+ EXPORT_SYMBOL_GPL(cs35l41_global_enable);
+
++/*
++ * To be called after receiving the IRQ Lock interrupt, in order to complete
++ * any shared boost activation initiated by cs35l41_global_enable().
++ */
++int cs35l41_mdsync_up(struct regmap *regmap)
++{
++ return regmap_update_bits(regmap, CS35L41_PWR_CTRL3,
++ CS35L41_SYNC_EN_MASK, CS35L41_SYNC_EN_MASK);
++}
++EXPORT_SYMBOL_GPL(cs35l41_mdsync_up);
++
+ int cs35l41_gpio_config(struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg)
+ {
+ struct cs35l41_gpio_cfg *gpio1 = &hw_cfg->gpio1;
+diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
+index 722b69a6de26ca..bc541293089f01 100644
+--- a/sound/soc/codecs/cs35l41.c
++++ b/sound/soc/codecs/cs35l41.c
+@@ -386,10 +386,18 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ struct cs35l41_private *cs35l41 = data;
+ unsigned int status[4] = { 0, 0, 0, 0 };
+ unsigned int masks[4] = { 0, 0, 0, 0 };
+- int ret = IRQ_NONE;
+ unsigned int i;
++ int ret;
+
+- pm_runtime_get_sync(cs35l41->dev);
++ ret = pm_runtime_resume_and_get(cs35l41->dev);
++ if (ret < 0) {
++ dev_err(cs35l41->dev,
++ "pm_runtime_resume_and_get failed in %s: %d\n",
++ __func__, ret);
++ return IRQ_NONE;
++ }
++
++ ret = IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ regmap_read(cs35l41->regmap,
+@@ -459,7 +467,19 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+
+ if (status[2] & CS35L41_PLL_LOCK) {
+ regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS3, CS35L41_PLL_LOCK);
+- complete(&cs35l41->pll_lock);
++
++ if (cs35l41->hw_cfg.bst_type == CS35L41_SHD_BOOST_ACTV ||
++ cs35l41->hw_cfg.bst_type == CS35L41_SHD_BOOST_PASS) {
++ ret = cs35l41_mdsync_up(cs35l41->regmap);
++ if (ret)
++ dev_err(cs35l41->dev, "MDSYNC-up failed: %d\n", ret);
++ else
++ dev_dbg(cs35l41->dev, "MDSYNC-up done\n");
++
++ dev_dbg(cs35l41->dev, "PUP-done status: %d\n",
++ !!(status[0] & CS35L41_PUP_DONE_MASK));
++ }
++
+ ret = IRQ_HANDLED;
+ }
+
+@@ -500,11 +520,11 @@ static int cs35l41_main_amp_event(struct snd_soc_dapm_widget *w,
+ ARRAY_SIZE(cs35l41_pup_patch));
+
+ ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type,
+- 1, &cs35l41->pll_lock, cs35l41->dsp.cs_dsp.running);
++ 1, cs35l41->dsp.cs_dsp.running);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type,
+- 0, &cs35l41->pll_lock, cs35l41->dsp.cs_dsp.running);
++ 0, cs35l41->dsp.cs_dsp.running);
+
+ regmap_multi_reg_write_bypassed(cs35l41->regmap,
+ cs35l41_pdn_patch,
+@@ -802,10 +822,6 @@ static const struct snd_pcm_hw_constraint_list cs35l41_constraints = {
+ static int cs35l41_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+ {
+- struct cs35l41_private *cs35l41 = snd_soc_component_get_drvdata(dai->component);
+-
+- reinit_completion(&cs35l41->pll_lock);
+-
+ if (substream->runtime)
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+@@ -1079,6 +1095,7 @@ static int cs35l41_handle_pdata(struct device *dev, struct cs35l41_hw_cfg *hw_cf
+ static int cs35l41_dsp_init(struct cs35l41_private *cs35l41)
+ {
+ struct wm_adsp *dsp;
++ uint32_t dsp1rx5_src;
+ int ret;
+
+ dsp = &cs35l41->dsp;
+@@ -1098,16 +1115,29 @@ static int cs35l41_dsp_init(struct cs35l41_private *cs35l41)
+ return ret;
+ }
+
+- ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX5_SRC,
+- CS35L41_INPUT_SRC_VPMON);
++ switch (cs35l41->hw_cfg.bst_type) {
++ case CS35L41_INT_BOOST:
++ case CS35L41_SHD_BOOST_ACTV:
++ dsp1rx5_src = CS35L41_INPUT_SRC_VPMON;
++ break;
++ case CS35L41_EXT_BOOST:
++ case CS35L41_SHD_BOOST_PASS:
++ dsp1rx5_src = CS35L41_INPUT_SRC_VBSTMON;
++ break;
++ default:
++ dev_err(cs35l41->dev, "wm_halo_init failed - Invalid Boost Type: %d\n",
++ cs35l41->hw_cfg.bst_type);
++ goto err_dsp;
++ }
++
++ ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX5_SRC, dsp1rx5_src);
+ if (ret < 0) {
+- dev_err(cs35l41->dev, "Write INPUT_SRC_VPMON failed: %d\n", ret);
++ dev_err(cs35l41->dev, "Write DSP1RX5_SRC: %d failed: %d\n", dsp1rx5_src, ret);
+ goto err_dsp;
+ }
+- ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX6_SRC,
+- CS35L41_INPUT_SRC_CLASSH);
++ ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX6_SRC, CS35L41_INPUT_SRC_VBSTMON);
+ if (ret < 0) {
+- dev_err(cs35l41->dev, "Write INPUT_SRC_CLASSH failed: %d\n", ret);
++ dev_err(cs35l41->dev, "Write CS35L41_INPUT_SRC_VBSTMON failed: %d\n", ret);
+ goto err_dsp;
+ }
+ ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX7_SRC,
+@@ -1295,8 +1325,6 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ if (ret < 0)
+ goto err;
+
+- init_completion(&cs35l41->pll_lock);
+-
+ pm_runtime_set_autosuspend_delay(cs35l41->dev, 3000);
+ pm_runtime_use_autosuspend(cs35l41->dev);
+ pm_runtime_mark_last_busy(cs35l41->dev);
+@@ -1320,6 +1348,7 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
+ return 0;
+
+ err_pm:
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+ pm_runtime_put_noidle(cs35l41->dev);
+
+@@ -1336,6 +1365,7 @@ EXPORT_SYMBOL_GPL(cs35l41_probe);
+ void cs35l41_remove(struct cs35l41_private *cs35l41)
+ {
+ pm_runtime_get_sync(cs35l41->dev);
++ pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ pm_runtime_disable(cs35l41->dev);
+
+ regmap_write(cs35l41->regmap, CS35L41_IRQ1_MASK1, 0xFFFFFFFF);
+diff --git a/sound/soc/codecs/cs35l41.h b/sound/soc/codecs/cs35l41.h
+index 34d967d4372b28..c85cbc1dd333b7 100644
+--- a/sound/soc/codecs/cs35l41.h
++++ b/sound/soc/codecs/cs35l41.h
+@@ -33,7 +33,6 @@ struct cs35l41_private {
+ int irq;
+ /* GPIO for /RST */
+ struct gpio_desc *reset_gpio;
+- struct completion pll_lock;
+ };
+
+ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *hw_cfg);
+diff --git a/sound/soc/codecs/cs35l45-i2c.c b/sound/soc/codecs/cs35l45-i2c.c
+index 77e0f8750f3757..bc2af1ed0fe9bb 100644
+--- a/sound/soc/codecs/cs35l45-i2c.c
++++ b/sound/soc/codecs/cs35l45-i2c.c
+@@ -62,7 +62,7 @@ static struct i2c_driver cs35l45_i2c_driver = {
+ .driver = {
+ .name = "cs35l45",
+ .of_match_table = cs35l45_of_match,
+- .pm = &cs35l45_pm_ops,
++ .pm = pm_ptr(&cs35l45_pm_ops),
+ },
+ .id_table = cs35l45_id_i2c,
+ .probe = cs35l45_i2c_probe,
+diff --git a/sound/soc/codecs/cs35l45-spi.c b/sound/soc/codecs/cs35l45-spi.c
+index 5efb77530cc33e..39e203a5f060c6 100644
+--- a/sound/soc/codecs/cs35l45-spi.c
++++ b/sound/soc/codecs/cs35l45-spi.c
+@@ -64,7 +64,7 @@ static struct spi_driver cs35l45_spi_driver = {
+ .driver = {
+ .name = "cs35l45",
+ .of_match_table = cs35l45_of_match,
+- .pm = &cs35l45_pm_ops,
++ .pm = pm_ptr(&cs35l45_pm_ops),
+ },
+ .id_table = cs35l45_id_spi,
+ .probe = cs35l45_spi_probe,
+diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c
+index be4f4229576c4b..7e439c778c6b49 100644
+--- a/sound/soc/codecs/cs35l45.c
++++ b/sound/soc/codecs/cs35l45.c
+@@ -775,6 +775,8 @@ static int cs35l45_enter_hibernate(struct cs35l45_private *cs35l45)
+
+ cs35l45_setup_hibernate(cs35l45);
+
++ regmap_set_bits(cs35l45->regmap, CS35L45_IRQ1_MASK_2, CS35L45_DSP_VIRT2_MBOX_MASK);
++
+ // Don't wait for ACK since bus activity would wake the device
+ regmap_write(cs35l45->regmap, CS35L45_DSP_VIRT1_MBOX_1, CSPL_MBOX_CMD_HIBERNATE);
+
+@@ -795,6 +797,8 @@ static int cs35l45_exit_hibernate(struct cs35l45_private *cs35l45)
+ CSPL_MBOX_CMD_OUT_OF_HIBERNATE);
+ if (!ret) {
+ dev_dbg(cs35l45->dev, "Wake success at cycle: %d\n", j);
++ regmap_clear_bits(cs35l45->regmap, CS35L45_IRQ1_MASK_2,
++ CS35L45_DSP_VIRT2_MBOX_MASK);
+ return 0;
+ }
+ usleep_range(100, 200);
+@@ -810,7 +814,7 @@ static int cs35l45_exit_hibernate(struct cs35l45_private *cs35l45)
+ return -ETIMEDOUT;
+ }
+
+-static int __maybe_unused cs35l45_runtime_suspend(struct device *dev)
++static int cs35l45_runtime_suspend(struct device *dev)
+ {
+ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
+
+@@ -827,7 +831,7 @@ static int __maybe_unused cs35l45_runtime_suspend(struct device *dev)
+ return 0;
+ }
+
+-static int __maybe_unused cs35l45_runtime_resume(struct device *dev)
++static int cs35l45_runtime_resume(struct device *dev)
+ {
+ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
+ int ret;
+@@ -854,6 +858,46 @@ static int __maybe_unused cs35l45_runtime_resume(struct device *dev)
+ return ret;
+ }
+
++static int cs35l45_sys_suspend(struct device *dev)
++{
++ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
++
++ dev_dbg(cs35l45->dev, "System suspend, disabling IRQ\n");
++ disable_irq(cs35l45->irq);
++
++ return 0;
++}
++
++static int cs35l45_sys_suspend_noirq(struct device *dev)
++{
++ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
++
++ dev_dbg(cs35l45->dev, "Late system suspend, reenabling IRQ\n");
++ enable_irq(cs35l45->irq);
++
++ return 0;
++}
++
++static int cs35l45_sys_resume_noirq(struct device *dev)
++{
++ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
++
++ dev_dbg(cs35l45->dev, "Early system resume, disabling IRQ\n");
++ disable_irq(cs35l45->irq);
++
++ return 0;
++}
++
++static int cs35l45_sys_resume(struct device *dev)
++{
++ struct cs35l45_private *cs35l45 = dev_get_drvdata(dev);
++
++ dev_dbg(cs35l45->dev, "System resume, reenabling IRQ\n");
++ enable_irq(cs35l45->irq);
++
++ return 0;
++}
++
+ static int cs35l45_apply_property_config(struct cs35l45_private *cs35l45)
+ {
+ struct device_node *node = cs35l45->dev->of_node;
+@@ -1023,7 +1067,10 @@ static irqreturn_t cs35l45_spk_safe_err(int irq, void *data)
+
+ i = irq - regmap_irq_get_virq(cs35l45->irq_data, 0);
+
+- dev_err(cs35l45->dev, "%s condition detected!\n", cs35l45_irqs[i].name);
++ if (i < 0 || i >= ARRAY_SIZE(cs35l45_irqs))
++ dev_err(cs35l45->dev, "Unspecified global error condition (%d) detected!\n", irq);
++ else
++ dev_err(cs35l45->dev, "%s condition detected!\n", cs35l45_irqs[i].name);
+
+ return IRQ_HANDLED;
+ }
+@@ -1289,10 +1336,12 @@ void cs35l45_remove(struct cs35l45_private *cs35l45)
+ }
+ EXPORT_SYMBOL_NS_GPL(cs35l45_remove, SND_SOC_CS35L45);
+
+-const struct dev_pm_ops cs35l45_pm_ops = {
+- SET_RUNTIME_PM_OPS(cs35l45_runtime_suspend, cs35l45_runtime_resume, NULL)
++EXPORT_GPL_DEV_PM_OPS(cs35l45_pm_ops) = {
++ RUNTIME_PM_OPS(cs35l45_runtime_suspend, cs35l45_runtime_resume, NULL)
++
++ SYSTEM_SLEEP_PM_OPS(cs35l45_sys_suspend, cs35l45_sys_resume)
++ NOIRQ_SYSTEM_SLEEP_PM_OPS(cs35l45_sys_suspend_noirq, cs35l45_sys_resume_noirq)
+ };
+-EXPORT_SYMBOL_NS_GPL(cs35l45_pm_ops, SND_SOC_CS35L45);
+
+ MODULE_DESCRIPTION("ASoC CS35L45 driver");
+ MODULE_AUTHOR("James Schulman, Cirrus Logic Inc, <james.schulman@cirrus.com>");
+diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
+index 98b1e63360aeb3..d3db89c93b331d 100644
+--- a/sound/soc/codecs/cs35l56-shared.c
++++ b/sound/soc/codecs/cs35l56-shared.c
+@@ -5,6 +5,7 @@
+ // Copyright (C) 2023 Cirrus Logic, Inc. and
+ // Cirrus Logic International Semiconductor Ltd.
+
++#include <linux/gpio/consumer.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/types.h>
+@@ -34,10 +35,9 @@ static const struct reg_default cs35l56_reg_defaults[] = {
+ { CS35L56_ASP1_FRAME_CONTROL5, 0x00020100 },
+ { CS35L56_ASP1_DATA_CONTROL1, 0x00000018 },
+ { CS35L56_ASP1_DATA_CONTROL5, 0x00000018 },
+- { CS35L56_ASP1TX1_INPUT, 0x00000018 },
+- { CS35L56_ASP1TX2_INPUT, 0x00000019 },
+- { CS35L56_ASP1TX3_INPUT, 0x00000020 },
+- { CS35L56_ASP1TX4_INPUT, 0x00000028 },
++
++ /* no defaults for ASP1TX mixer */
++
+ { CS35L56_SWIRE_DP3_CH1_INPUT, 0x00000018 },
+ { CS35L56_SWIRE_DP3_CH2_INPUT, 0x00000019 },
+ { CS35L56_SWIRE_DP3_CH3_INPUT, 0x00000029 },
+@@ -195,6 +195,47 @@ static bool cs35l56_volatile_reg(struct device *dev, unsigned int reg)
+ }
+ }
+
++/*
++ * The firmware boot sequence can overwrite the ASP1 config registers so that
++ * they don't match regmap's view of their values. Rewrite the values from the
++ * regmap cache into the hardware registers.
++ */
++int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base)
++{
++ struct reg_sequence asp1_regs[] = {
++ { .reg = CS35L56_ASP1_ENABLES1 },
++ { .reg = CS35L56_ASP1_CONTROL1 },
++ { .reg = CS35L56_ASP1_CONTROL2 },
++ { .reg = CS35L56_ASP1_CONTROL3 },
++ { .reg = CS35L56_ASP1_FRAME_CONTROL1 },
++ { .reg = CS35L56_ASP1_FRAME_CONTROL5 },
++ { .reg = CS35L56_ASP1_DATA_CONTROL1 },
++ { .reg = CS35L56_ASP1_DATA_CONTROL5 },
++ };
++ int i, ret;
++
++ /* Read values from regmap cache into a write sequence */
++ for (i = 0; i < ARRAY_SIZE(asp1_regs); ++i) {
++ ret = regmap_read(cs35l56_base->regmap, asp1_regs[i].reg, &asp1_regs[i].def);
++ if (ret)
++ goto err;
++ }
++
++ /* Write the values cache-bypassed so that they will be written to silicon */
++ ret = regmap_multi_reg_write_bypassed(cs35l56_base->regmap, asp1_regs,
++ ARRAY_SIZE(asp1_regs));
++ if (ret)
++ goto err;
++
++ return 0;
++
++err:
++ dev_err(cs35l56_base->dev, "Failed to sync ASP1 registers: %d\n", ret);
++
++ return ret;
++}
++EXPORT_SYMBOL_NS_GPL(cs35l56_force_sync_asp1_registers_from_cache, SND_SOC_CS35L56_SHARED);
++
+ int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command)
+ {
+ unsigned int val;
+@@ -286,6 +327,7 @@ void cs35l56_wait_min_reset_pulse(void)
+ EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, SND_SOC_CS35L56_SHARED);
+
+ static const struct reg_sequence cs35l56_system_reset_seq[] = {
++ REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0),
+ REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET),
+ };
+
+@@ -313,7 +355,7 @@ int cs35l56_irq_request(struct cs35l56_base *cs35l56_base, int irq)
+ {
+ int ret;
+
+- if (!irq)
++ if (irq < 1)
+ return 0;
+
+ ret = devm_request_threaded_irq(cs35l56_base->dev, irq, NULL, cs35l56_irq,
+@@ -654,6 +696,41 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
+ }
+ EXPORT_SYMBOL_NS_GPL(cs35l56_hw_init, SND_SOC_CS35L56_SHARED);
+
++int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base)
++{
++ struct gpio_descs *descs;
++ int speaker_id;
++ int i, ret;
++
++ /* Read the speaker type qualifier from the motherboard GPIOs */
++ descs = gpiod_get_array_optional(cs35l56_base->dev, "spk-id", GPIOD_IN);
++ if (!descs) {
++ return -ENOENT;
++ } else if (IS_ERR(descs)) {
++ ret = PTR_ERR(descs);
++ return dev_err_probe(cs35l56_base->dev, ret, "Failed to get spk-id-gpios\n");
++ }
++
++ speaker_id = 0;
++ for (i = 0; i < descs->ndescs; i++) {
++ ret = gpiod_get_value_cansleep(descs->desc[i]);
++ if (ret < 0) {
++ dev_err_probe(cs35l56_base->dev, ret, "Failed to read spk-id[%d]\n", i);
++ goto err;
++ }
++
++ speaker_id |= (ret << i);
++ }
++
++ dev_dbg(cs35l56_base->dev, "Speaker ID = %d\n", speaker_id);
++ ret = speaker_id;
++err:
++ gpiod_put_array(descs);
++
++ return ret;
++}
++EXPORT_SYMBOL_NS_GPL(cs35l56_get_speaker_id, SND_SOC_CS35L56_SHARED);
++
+ static const u32 cs35l56_bclk_valid_for_pll_freq_table[] = {
+ [0x0C] = 128000,
+ [0x0F] = 256000,
+diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
+index f9059780b7a7b6..015269f0db54cf 100644
+--- a/sound/soc/codecs/cs35l56.c
++++ b/sound/soc/codecs/cs35l56.c
+@@ -59,6 +59,131 @@ static int cs35l56_dspwait_put_volsw(struct snd_kcontrol *kcontrol,
+ return snd_soc_put_volsw(kcontrol, ucontrol);
+ }
+
++static const unsigned short cs35l56_asp1_mixer_regs[] = {
++ CS35L56_ASP1TX1_INPUT, CS35L56_ASP1TX2_INPUT,
++ CS35L56_ASP1TX3_INPUT, CS35L56_ASP1TX4_INPUT,
++};
++
++static const char * const cs35l56_asp1_mux_control_names[] = {
++ "ASP1 TX1 Source", "ASP1 TX2 Source", "ASP1 TX3 Source", "ASP1 TX4 Source"
++};
++
++static int cs35l56_sync_asp1_mixer_widgets_with_firmware(struct cs35l56_private *cs35l56)
++{
++ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cs35l56->component);
++ const char *prefix = cs35l56->component->name_prefix;
++ char full_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++ const char *name;
++ struct snd_kcontrol *kcontrol;
++ struct soc_enum *e;
++ unsigned int val[4];
++ int i, item, ret;
++
++ if (cs35l56->asp1_mixer_widgets_initialized)
++ return 0;
++
++ /*
++ * Resume so we can read the registers from silicon if the regmap
++ * cache has not yet been populated.
++ */
++ ret = pm_runtime_resume_and_get(cs35l56->base.dev);
++ if (ret < 0)
++ return ret;
++
++ /* Wait for firmware download and reboot */
++ cs35l56_wait_dsp_ready(cs35l56);
++
++ ret = regmap_bulk_read(cs35l56->base.regmap, CS35L56_ASP1TX1_INPUT,
++ val, ARRAY_SIZE(val));
++
++ pm_runtime_mark_last_busy(cs35l56->base.dev);
++ pm_runtime_put_autosuspend(cs35l56->base.dev);
++
++ if (ret) {
++ dev_err(cs35l56->base.dev, "Failed to read ASP1 mixer regs: %d\n", ret);
++ return ret;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(cs35l56_asp1_mux_control_names); ++i) {
++ name = cs35l56_asp1_mux_control_names[i];
++
++ if (prefix) {
++ snprintf(full_name, sizeof(full_name), "%s %s", prefix, name);
++ name = full_name;
++ }
++
++ kcontrol = snd_soc_card_get_kcontrol_locked(dapm->card, name);
++ if (!kcontrol) {
++ dev_warn(cs35l56->base.dev, "Could not find control %s\n", name);
++ continue;
++ }
++
++ e = (struct soc_enum *)kcontrol->private_value;
++ item = snd_soc_enum_val_to_item(e, val[i] & CS35L56_ASP_TXn_SRC_MASK);
++ snd_soc_dapm_mux_update_power(dapm, kcontrol, item, e, NULL);
++ }
++
++ cs35l56->asp1_mixer_widgets_initialized = true;
++
++ return 0;
++}
++
++static int cs35l56_dspwait_asp1tx_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol);
++ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
++ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
++ int index = e->shift_l;
++ unsigned int addr, val;
++ int ret;
++
++ ret = cs35l56_sync_asp1_mixer_widgets_with_firmware(cs35l56);
++ if (ret)
++ return ret;
++
++ addr = cs35l56_asp1_mixer_regs[index];
++ ret = regmap_read(cs35l56->base.regmap, addr, &val);
++ if (ret)
++ return ret;
++
++ val &= CS35L56_ASP_TXn_SRC_MASK;
++ ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val);
++
++ return 0;
++}
++
++static int cs35l56_dspwait_asp1tx_put(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol);
++ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
++ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
++ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
++ int item = ucontrol->value.enumerated.item[0];
++ int index = e->shift_l;
++ unsigned int addr, val;
++ bool changed;
++ int ret;
++
++ ret = cs35l56_sync_asp1_mixer_widgets_with_firmware(cs35l56);
++ if (ret)
++ return ret;
++
++ addr = cs35l56_asp1_mixer_regs[index];
++ val = snd_soc_enum_item_to_val(e, item);
++
++ ret = regmap_update_bits_check(cs35l56->base.regmap, addr,
++ CS35L56_ASP_TXn_SRC_MASK, val, &changed);
++ if (ret)
++ return ret;
++
++ if (changed)
++ snd_soc_dapm_mux_update_power(dapm, kcontrol, item, e, NULL);
++
++ return changed;
++}
++
+ static DECLARE_TLV_DB_SCALE(vol_tlv, -10000, 25, 0);
+
+ static const struct snd_kcontrol_new cs35l56_controls[] = {
+@@ -77,40 +202,44 @@ static const struct snd_kcontrol_new cs35l56_controls[] = {
+ };
+
+ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx1_enum,
+- CS35L56_ASP1TX1_INPUT,
+- 0, CS35L56_ASP_TXn_SRC_MASK,
++ SND_SOC_NOPM,
++ 0, 0,
+ cs35l56_tx_input_texts,
+ cs35l56_tx_input_values);
+
+ static const struct snd_kcontrol_new asp1_tx1_mux =
+- SOC_DAPM_ENUM("ASP1TX1 SRC", cs35l56_asp1tx1_enum);
++ SOC_DAPM_ENUM_EXT("ASP1TX1 SRC", cs35l56_asp1tx1_enum,
++ cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
+
+ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx2_enum,
+- CS35L56_ASP1TX2_INPUT,
+- 0, CS35L56_ASP_TXn_SRC_MASK,
++ SND_SOC_NOPM,
++ 1, 0,
+ cs35l56_tx_input_texts,
+ cs35l56_tx_input_values);
+
+ static const struct snd_kcontrol_new asp1_tx2_mux =
+- SOC_DAPM_ENUM("ASP1TX2 SRC", cs35l56_asp1tx2_enum);
++ SOC_DAPM_ENUM_EXT("ASP1TX2 SRC", cs35l56_asp1tx2_enum,
++ cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
+
+ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx3_enum,
+- CS35L56_ASP1TX3_INPUT,
+- 0, CS35L56_ASP_TXn_SRC_MASK,
++ SND_SOC_NOPM,
++ 2, 0,
+ cs35l56_tx_input_texts,
+ cs35l56_tx_input_values);
+
+ static const struct snd_kcontrol_new asp1_tx3_mux =
+- SOC_DAPM_ENUM("ASP1TX3 SRC", cs35l56_asp1tx3_enum);
++ SOC_DAPM_ENUM_EXT("ASP1TX3 SRC", cs35l56_asp1tx3_enum,
++ cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
+
+ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx4_enum,
+- CS35L56_ASP1TX4_INPUT,
+- 0, CS35L56_ASP_TXn_SRC_MASK,
++ SND_SOC_NOPM,
++ 3, 0,
+ cs35l56_tx_input_texts,
+ cs35l56_tx_input_values);
+
+ static const struct snd_kcontrol_new asp1_tx4_mux =
+- SOC_DAPM_ENUM("ASP1TX4 SRC", cs35l56_asp1tx4_enum);
++ SOC_DAPM_ENUM_EXT("ASP1TX4 SRC", cs35l56_asp1tx4_enum,
++ cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
+
+ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_sdw1tx1_enum,
+ CS35L56_SWIRE_DP3_CH1_INPUT,
+@@ -148,6 +277,21 @@ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_sdw1tx4_enum,
+ static const struct snd_kcontrol_new sdw1_tx4_mux =
+ SOC_DAPM_ENUM("SDW1TX4 SRC", cs35l56_sdw1tx4_enum);
+
++static int cs35l56_asp1_cfg_event(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
++ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
++
++ switch (event) {
++ case SND_SOC_DAPM_PRE_PMU:
++ /* Override register values set by firmware boot */
++ return cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
++ default:
++ return 0;
++ }
++}
++
+ static int cs35l56_play_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+ {
+@@ -184,6 +328,9 @@ static const struct snd_soc_dapm_widget cs35l56_dapm_widgets[] = {
+ SND_SOC_DAPM_REGULATOR_SUPPLY("VDD_B", 0, 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("VDD_AMP", 0, 0),
+
++ SND_SOC_DAPM_SUPPLY("ASP1 CFG", SND_SOC_NOPM, 0, 0, cs35l56_asp1_cfg_event,
++ SND_SOC_DAPM_PRE_PMU),
++
+ SND_SOC_DAPM_SUPPLY("PLAY", SND_SOC_NOPM, 0, 0, cs35l56_play_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+@@ -251,6 +398,9 @@ static const struct snd_soc_dapm_route cs35l56_audio_map[] = {
+ { "AMP", NULL, "VDD_B" },
+ { "AMP", NULL, "VDD_AMP" },
+
++ { "ASP1 Playback", NULL, "ASP1 CFG" },
++ { "ASP1 Capture", NULL, "ASP1 CFG" },
++
+ { "ASP1 Playback", NULL, "PLAY" },
+ { "SDW1 Playback", NULL, "PLAY" },
+
+@@ -753,6 +903,18 @@ static void cs35l56_dsp_work(struct work_struct *work)
+
+ pm_runtime_get_sync(cs35l56->base.dev);
+
++ /* Populate fw file qualifier with the revision and security state */
++ if (!cs35l56->dsp.fwf_name) {
++ cs35l56->dsp.fwf_name = kasprintf(GFP_KERNEL, "%02x%s-dsp1",
++ cs35l56->base.rev,
++ cs35l56->base.secured ? "-s" : "");
++ if (!cs35l56->dsp.fwf_name)
++ goto err;
++ }
++
++ dev_dbg(cs35l56->base.dev, "DSP fwf name: '%s' system name: '%s'\n",
++ cs35l56->dsp.fwf_name, cs35l56->dsp.system_name);
++
+ /*
+ * When the device is running in secure mode the firmware files can
+ * only contain insecure tunings and therefore we do not need to
+@@ -764,6 +926,7 @@ static void cs35l56_dsp_work(struct work_struct *work)
+ else
+ cs35l56_patch(cs35l56);
+
++err:
+ pm_runtime_mark_last_busy(cs35l56->base.dev);
+ pm_runtime_put_autosuspend(cs35l56->base.dev);
+ }
+@@ -772,9 +935,29 @@ static int cs35l56_component_probe(struct snd_soc_component *component)
+ {
+ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+ struct dentry *debugfs_root = component->debugfs_root;
++ unsigned short vendor, device;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cs35l56_tx_input_texts) != ARRAY_SIZE(cs35l56_tx_input_values));
+
++ if (!cs35l56->dsp.system_name &&
++ (snd_soc_card_get_pci_ssid(component->card, &vendor, &device) == 0)) {
++ /* Append a speaker qualifier if there is a speaker ID */
++ if (cs35l56->speaker_id >= 0) {
++ cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
++ GFP_KERNEL,
++ "%04x%04x-spkid%d",
++ vendor, device,
++ cs35l56->speaker_id);
++ } else {
++ cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
++ GFP_KERNEL,
++ "%04x%04x",
++ vendor, device);
++ }
++ if (!cs35l56->dsp.system_name)
++ return -ENOMEM;
++ }
++
+ if (!wait_for_completion_timeout(&cs35l56->init_completion,
+ msecs_to_jiffies(5000))) {
+ dev_err(cs35l56->base.dev, "%s: init_completion timed out\n", __func__);
+@@ -788,6 +971,13 @@ static int cs35l56_component_probe(struct snd_soc_component *component)
+ debugfs_create_bool("can_hibernate", 0444, debugfs_root, &cs35l56->base.can_hibernate);
+ debugfs_create_bool("fw_patched", 0444, debugfs_root, &cs35l56->base.fw_patched);
+
++ /*
++ * The widgets for the ASP1TX mixer can't be initialized
++ * until the firmware has been downloaded and rebooted.
++ */
++ regcache_drop_region(cs35l56->base.regmap, CS35L56_ASP1TX1_INPUT, CS35L56_ASP1TX4_INPUT);
++ cs35l56->asp1_mixer_widgets_initialized = false;
++
+ queue_work(cs35l56->dsp_wq, &cs35l56->dsp_work);
+
+ return 0;
+@@ -798,6 +988,16 @@ static void cs35l56_component_remove(struct snd_soc_component *component)
+ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+
+ cancel_work_sync(&cs35l56->dsp_work);
++
++ if (cs35l56->dsp.cs_dsp.booted)
++ wm_adsp_power_down(&cs35l56->dsp);
++
++ wm_adsp2_component_remove(&cs35l56->dsp, component);
++
++ kfree(cs35l56->dsp.fwf_name);
++ cs35l56->dsp.fwf_name = NULL;
++
++ cs35l56->component = NULL;
+ }
+
+ static int cs35l56_set_bias_level(struct snd_soc_component *component,
+@@ -1039,7 +1239,13 @@ static int cs35l56_get_firmware_uid(struct cs35l56_private *cs35l56)
+ if (ret < 0)
+ return 0;
+
+- cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL);
++ /* Append a speaker qualifier if there is a speaker ID */
++ if (cs35l56->speaker_id >= 0)
++ cs35l56->dsp.system_name = devm_kasprintf(dev, GFP_KERNEL, "%s-spkid%d",
++ prop, cs35l56->speaker_id);
++ else
++ cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL);
++
+ if (cs35l56->dsp.system_name == NULL)
+ return -ENOMEM;
+
+@@ -1054,6 +1260,7 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
+
+ init_completion(&cs35l56->init_completion);
+ mutex_init(&cs35l56->base.irq_lock);
++ cs35l56->speaker_id = -ENOENT;
+
+ dev_set_drvdata(cs35l56->base.dev, cs35l56);
+
+@@ -1090,6 +1297,12 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
+ gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 1);
+ }
+
++ ret = cs35l56_get_speaker_id(&cs35l56->base);
++ if ((ret < 0) && (ret != -ENOENT))
++ goto err;
++
++ cs35l56->speaker_id = ret;
++
+ ret = cs35l56_get_firmware_uid(cs35l56);
+ if (ret != 0)
+ goto err;
+@@ -1141,11 +1354,9 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
+ if (ret < 0)
+ return ret;
+
+- /* Populate the DSP information with the revision and security state */
+- cs35l56->dsp.part = devm_kasprintf(cs35l56->base.dev, GFP_KERNEL, "cs35l56%s-%02x",
+- cs35l56->base.secured ? "s" : "", cs35l56->base.rev);
+- if (!cs35l56->dsp.part)
+- return -ENOMEM;
++ ret = cs35l56_set_patch(&cs35l56->base);
++ if (ret)
++ return ret;
+
+ if (!cs35l56->base.reset_gpio) {
+ dev_dbg(cs35l56->base.dev, "No reset gpio: using soft reset\n");
+@@ -1179,10 +1390,6 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
+ if (ret)
+ return ret;
+
+- ret = cs35l56_set_patch(&cs35l56->base);
+- if (ret)
+- return ret;
+-
+ /* Registers could be dirty after soft reset or SoundWire enumeration */
+ regcache_sync(cs35l56->base.regmap);
+
+diff --git a/sound/soc/codecs/cs35l56.h b/sound/soc/codecs/cs35l56.h
+index 8159c3e217d936..b000e7365e4065 100644
+--- a/sound/soc/codecs/cs35l56.h
++++ b/sound/soc/codecs/cs35l56.h
+@@ -44,12 +44,14 @@ struct cs35l56_private {
+ bool sdw_attached;
+ struct completion init_completion;
+
++ int speaker_id;
+ u32 rx_mask;
+ u32 tx_mask;
+ u8 asp_slot_width;
+ u8 asp_slot_count;
+ bool tdm_mode;
+ bool sysclk_set;
++ bool asp1_mixer_widgets_initialized;
+ u8 old_sdw_clock_scale;
+ };
+
+diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
+index 5643c666d7d04b..1443eb1dc0b170 100644
+--- a/sound/soc/codecs/cs42l43.c
++++ b/sound/soc/codecs/cs42l43.c
+@@ -220,8 +220,9 @@ static int cs42l43_startup(struct snd_pcm_substream *substream, struct snd_soc_d
+ struct snd_soc_component *component = dai->component;
+ struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component);
+ struct cs42l43 *cs42l43 = priv->core;
+- int provider = !!regmap_test_bits(cs42l43->regmap, CS42L43_ASP_CLK_CONFIG2,
+- CS42L43_ASP_MASTER_MODE_MASK);
++ int provider = !dai->id || !!regmap_test_bits(cs42l43->regmap,
++ CS42L43_ASP_CLK_CONFIG2,
++ CS42L43_ASP_MASTER_MODE_MASK);
+
+ if (provider)
+ priv->constraint.mask = CS42L43_PROVIDER_RATE_MASK;
+@@ -2175,7 +2176,10 @@ static int cs42l43_codec_probe(struct platform_device *pdev)
+ pm_runtime_use_autosuspend(priv->dev);
+ pm_runtime_set_active(priv->dev);
+ pm_runtime_get_noresume(priv->dev);
+- devm_pm_runtime_enable(priv->dev);
++
++ ret = devm_pm_runtime_enable(priv->dev);
++ if (ret)
++ goto err_pm;
+
+ for (i = 0; i < ARRAY_SIZE(cs42l43_irqs); i++) {
+ ret = cs42l43_request_irq(priv, dom, cs42l43_irqs[i].name,
+diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
+index 3292405024bc0e..206008bdecac37 100644
+--- a/sound/soc/codecs/cs43130.c
++++ b/sound/soc/codecs/cs43130.c
+@@ -579,7 +579,7 @@ static int cs43130_set_sp_fmt(int dai_id, unsigned int bitwidth_sclk,
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ hi_size = bitwidth_sclk;
+- frm_delay = 2;
++ frm_delay = 0;
+ frm_phase = 1;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+@@ -1683,7 +1683,7 @@ static ssize_t hpload_dc_r_show(struct device *dev,
+ return cs43130_show_dc(dev, buf, HP_RIGHT);
+ }
+
+-static u16 const cs43130_ac_freq[CS43130_AC_FREQ] = {
++static const u16 cs43130_ac_freq[CS43130_AC_FREQ] = {
+ 24,
+ 43,
+ 93,
+@@ -2363,7 +2363,7 @@ static const struct regmap_config cs43130_regmap = {
+ .use_single_write = true,
+ };
+
+-static u16 const cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
++static const u16 cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
+ 50,
+ 120,
+ };
+diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
+index 3bbe8509164932..9b0c4701817069 100644
+--- a/sound/soc/codecs/da7219-aad.c
++++ b/sound/soc/codecs/da7219-aad.c
+@@ -671,8 +671,10 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
+ return NULL;
+
+ aad_pdata = devm_kzalloc(dev, sizeof(*aad_pdata), GFP_KERNEL);
+- if (!aad_pdata)
++ if (!aad_pdata) {
++ fwnode_handle_put(aad_np);
+ return NULL;
++ }
+
+ aad_pdata->irq = i2c->irq;
+
+@@ -696,7 +698,7 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
+ aad_pdata->mic_det_thr =
+ da7219_aad_fw_mic_det_thr(dev, fw_val32);
+ else
+- aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
++ aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_200_OHMS;
+
+ if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
+ aad_pdata->jack_ins_deb =
+@@ -753,6 +755,8 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
+ else
+ aad_pdata->adc_1bit_rpt = DA7219_AAD_ADC_1BIT_RPT_1;
+
++ fwnode_handle_put(aad_np);
++
+ return aad_pdata;
+ }
+
+diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
+index 6c263086c44d2f..32a9b26ee2c898 100644
+--- a/sound/soc/codecs/es8326.c
++++ b/sound/soc/codecs/es8326.c
+@@ -617,6 +617,8 @@ static void es8326_jack_detect_handler(struct work_struct *work)
+ es8326_disable_micbias(es8326->component);
+ if (es8326->jack->status & SND_JACK_HEADPHONE) {
+ dev_dbg(comp->dev, "Report hp remove event\n");
++ snd_soc_jack_report(es8326->jack, 0,
++ SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2);
+ snd_soc_jack_report(es8326->jack, 0, SND_JACK_HEADSET);
+ /* mute adc when mic path switch */
+ regmap_write(es8326->regmap, ES8326_ADC_SCALE, 0x33);
+diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
+index be66853afbe2b6..d59d38ce5657be 100644
+--- a/sound/soc/codecs/hdac_hda.c
++++ b/sound/soc/codecs/hdac_hda.c
+@@ -124,6 +124,9 @@ static struct snd_soc_dai_driver hdac_hda_dais[] = {
+ .sig_bits = 24,
+ },
+ },
++};
++
++static struct snd_soc_dai_driver hdac_hda_hdmi_dais[] = {
+ {
+ .id = HDAC_HDMI_0_DAI_ID,
+ .name = "intel-hdmi-hifi1",
+@@ -578,8 +581,16 @@ static const struct snd_soc_component_driver hdac_hda_codec = {
+ .endianness = 1,
+ };
+
++static const struct snd_soc_component_driver hdac_hda_hdmi_codec = {
++ .probe = hdac_hda_codec_probe,
++ .remove = hdac_hda_codec_remove,
++ .idle_bias_on = false,
++ .endianness = 1,
++};
++
+ static int hdac_hda_dev_probe(struct hdac_device *hdev)
+ {
++ struct hdac_hda_priv *hda_pvt = dev_get_drvdata(&hdev->dev);
+ struct hdac_ext_link *hlink;
+ int ret;
+
+@@ -592,9 +603,15 @@ static int hdac_hda_dev_probe(struct hdac_device *hdev)
+ snd_hdac_ext_bus_link_get(hdev->bus, hlink);
+
+ /* ASoC specific initialization */
+- ret = devm_snd_soc_register_component(&hdev->dev,
+- &hdac_hda_codec, hdac_hda_dais,
+- ARRAY_SIZE(hdac_hda_dais));
++ if (hda_pvt->need_display_power)
++ ret = devm_snd_soc_register_component(&hdev->dev,
++ &hdac_hda_hdmi_codec, hdac_hda_hdmi_dais,
++ ARRAY_SIZE(hdac_hda_hdmi_dais));
++ else
++ ret = devm_snd_soc_register_component(&hdev->dev,
++ &hdac_hda_codec, hdac_hda_dais,
++ ARRAY_SIZE(hdac_hda_dais));
++
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to register HDA codec %d\n", ret);
+ return ret;
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 09eef6042aad6d..0938671700c621 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -850,8 +850,9 @@ static int hdmi_dai_probe(struct snd_soc_dai *dai)
+ static void hdmi_codec_jack_report(struct hdmi_codec_priv *hcp,
+ unsigned int jack_status)
+ {
+- if (hcp->jack && jack_status != hcp->jack_status) {
+- snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT);
++ if (jack_status != hcp->jack_status) {
++ if (hcp->jack)
++ snd_soc_jack_report(hcp->jack, jack_status, SND_JACK_LINEOUT);
+ hcp->jack_status = jack_status;
+ }
+ }
+@@ -877,18 +878,20 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
+ void *data)
+ {
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+- int ret = -ENOTSUPP;
+
+ if (hcp->hcd.ops->hook_plugged_cb) {
+ hcp->jack = jack;
+- ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
+- hcp->hcd.data,
+- plugged_cb,
+- component->dev);
+- if (ret)
+- hcp->jack = NULL;
++
++ /*
++ * Report the initial jack status which may have been provided
++ * by the parent hdmi driver while the hpd hook was registered.
++ */
++ snd_soc_jack_report(jack, hcp->jack_status, SND_JACK_LINEOUT);
++
++ return 0;
+ }
+- return ret;
++
++ return -ENOTSUPP;
+ }
+
+ static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
+@@ -982,6 +985,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
+ return ret;
+ }
+
++static int hdmi_probe(struct snd_soc_component *component)
++{
++ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
++ int ret = 0;
++
++ if (hcp->hcd.ops->hook_plugged_cb) {
++ ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
++ hcp->hcd.data,
++ plugged_cb,
++ component->dev);
++ }
++
++ return ret;
++}
++
+ static void hdmi_remove(struct snd_soc_component *component)
+ {
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+@@ -992,6 +1010,7 @@ static void hdmi_remove(struct snd_soc_component *component)
+ }
+
+ static const struct snd_soc_component_driver hdmi_driver = {
++ .probe = hdmi_probe,
+ .remove = hdmi_remove,
+ .dapm_widgets = hdmi_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index 3e33418898e826..ebddfa74ce0a07 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -2021,6 +2021,11 @@ static int tx_macro_probe(struct platform_device *pdev)
+
+ tx->dev = dev;
+
++ /* Set active_decimator default value */
++ tx->active_decimator[TX_MACRO_AIF1_CAP] = -1;
++ tx->active_decimator[TX_MACRO_AIF2_CAP] = -1;
++ tx->active_decimator[TX_MACRO_AIF3_CAP] = -1;
++
+ /* set MCLK and NPL rates */
+ clk_set_rate(tx->mclk, MCLK_FREQ);
+ clk_set_rate(tx->npl, MCLK_FREQ);
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index fff4a8b862a732..6ce309980cd10e 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -1584,7 +1584,6 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
+ u16 gain_reg;
+ u16 reg;
+ int val;
+- int offset_val = 0;
+ struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
+
+ if (w->shift == WSA_MACRO_COMP1) {
+@@ -1623,10 +1622,8 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
+ CDC_WSA_RX1_RX_PATH_MIX_SEC0,
+ CDC_WSA_RX_PGA_HALF_DB_MASK,
+ CDC_WSA_RX_PGA_HALF_DB_ENABLE);
+- offset_val = -2;
+ }
+ val = snd_soc_component_read(component, gain_reg);
+- val += offset_val;
+ snd_soc_component_write(component, gain_reg, val);
+ wsa_macro_config_ear_spkr_gain(component, wsa,
+ event, gain_reg);
+@@ -1654,10 +1651,6 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
+ CDC_WSA_RX1_RX_PATH_MIX_SEC0,
+ CDC_WSA_RX_PGA_HALF_DB_MASK,
+ CDC_WSA_RX_PGA_HALF_DB_DISABLE);
+- offset_val = 2;
+- val = snd_soc_component_read(component, gain_reg);
+- val += offset_val;
+- snd_soc_component_write(component, gain_reg, val);
+ }
+ wsa_macro_config_ear_spkr_gain(component, wsa,
+ event, gain_reg);
+@@ -1685,6 +1678,9 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
+ boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
+ reg = CDC_WSA_RX1_RX_PATH_CTL;
+ reg_mix = CDC_WSA_RX1_RX_PATH_MIX_CTL;
++ } else {
++ dev_warn(component->dev, "Incorrect widget name in the driver\n");
++ return -EINVAL;
+ }
+
+ switch (event) {
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index 8b56ee550c09e2..8b0645c6346207 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -1318,6 +1318,7 @@ static int max98088_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+ {
+ struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component);
++ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+@@ -1333,10 +1334,13 @@ static int max98088_set_bias_level(struct snd_soc_component *component,
+ */
+ if (!IS_ERR(max98088->mclk)) {
+ if (snd_soc_component_get_bias_level(component) ==
+- SND_SOC_BIAS_ON)
++ SND_SOC_BIAS_ON) {
+ clk_disable_unprepare(max98088->mclk);
+- else
+- clk_prepare_enable(max98088->mclk);
++ } else {
++ ret = clk_prepare_enable(max98088->mclk);
++ if (ret)
++ return ret;
++ }
+ }
+ break;
+
+diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
+index ff3024899f456d..7199d734c79f2c 100644
+--- a/sound/soc/codecs/nau8822.c
++++ b/sound/soc/codecs/nau8822.c
+@@ -184,6 +184,7 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ int i, reg;
+ u16 reg_val, *val;
++ __be16 tmp;
+
+ val = (u16 *)ucontrol->value.bytes.data;
+ reg = NAU8822_REG_EQ1;
+@@ -192,8 +193,8 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+- reg_val = cpu_to_be16(reg_val);
+- memcpy(val + i, &reg_val, sizeof(reg_val));
++ tmp = cpu_to_be16(reg_val);
++ memcpy(val + i, &tmp, sizeof(tmp));
+ }
+
+ return 0;
+@@ -216,6 +217,7 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
+ void *data;
+ u16 *val, value;
+ int i, reg, ret;
++ __be16 *tmp;
+
+ data = kmemdup(ucontrol->value.bytes.data,
+ params->max, GFP_KERNEL | GFP_DMA);
+@@ -228,7 +230,8 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+- value = be16_to_cpu(*(val + i));
++ tmp = (__be16 *)(val + i);
++ value = be16_to_cpup(tmp);
+ ret = snd_soc_component_write(component, reg + i, value);
+ if (ret) {
+ dev_err(component->dev,
+diff --git a/sound/soc/codecs/peb2466.c b/sound/soc/codecs/peb2466.c
+index 5dec69be0acb2e..06c83d2042f3e5 100644
+--- a/sound/soc/codecs/peb2466.c
++++ b/sound/soc/codecs/peb2466.c
+@@ -229,7 +229,8 @@ static int peb2466_reg_read(void *context, unsigned int reg, unsigned int *val)
+ case PEB2466_CMD_XOP:
+ case PEB2466_CMD_SOP:
+ ret = peb2466_read_byte(peb2466, reg, &tmp);
+- *val = tmp;
++ if (!ret)
++ *val = tmp;
+ break;
+ default:
+ dev_err(&peb2466->spi->dev, "Not a XOP or SOP command\n");
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 7938b52d741d8c..b69f6afa0ae40f 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -441,6 +441,7 @@ struct rt5645_priv {
+ struct regmap *regmap;
+ struct i2c_client *i2c;
+ struct gpio_desc *gpiod_hp_det;
++ struct gpio_desc *gpiod_cbj_sleeve;
+ struct snd_soc_jack *hp_jack;
+ struct snd_soc_jack *mic_jack;
+ struct snd_soc_jack *btn_jack;
+@@ -448,6 +449,7 @@ struct rt5645_priv {
+ struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
+ struct rt5645_eq_param_s *eq_param;
+ struct timer_list btn_check_timer;
++ struct mutex jd_mutex;
+
+ int codec_type;
+ int sysclk;
+@@ -3182,6 +3184,9 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
+ regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+ RT5645_CBJ_MN_JD, 0);
+
++ if (rt5645->gpiod_cbj_sleeve)
++ gpiod_set_value(rt5645->gpiod_cbj_sleeve, 1);
++
+ msleep(600);
+ regmap_read(rt5645->regmap, RT5645_IN1_CTRL3, &val);
+ val &= 0x7;
+@@ -3193,9 +3198,13 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
+ rt5645_enable_push_button_irq(component, true);
+ }
+ } else {
++ if (rt5645->en_button_func)
++ rt5645_enable_push_button_irq(component, false);
+ snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+ snd_soc_dapm_sync(dapm);
+ rt5645->jack_type = SND_JACK_HEADPHONE;
++ if (rt5645->gpiod_cbj_sleeve)
++ gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
+ }
+ if (rt5645->pdata.level_trigger_irq)
+ regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
+@@ -3223,6 +3232,9 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
+ if (rt5645->pdata.level_trigger_irq)
+ regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
+ RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
++
++ if (rt5645->gpiod_cbj_sleeve)
++ gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
+ }
+
+ return rt5645->jack_type;
+@@ -3295,6 +3307,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ if (!rt5645->component)
+ return;
+
++ mutex_lock(&rt5645->jd_mutex);
++
+ switch (rt5645->pdata.jd_mode) {
+ case 0: /* Not using rt5645 JD */
+ if (rt5645->gpiod_hp_det) {
+@@ -3309,6 +3323,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ report, SND_JACK_HEADPHONE);
+ snd_soc_jack_report(rt5645->mic_jack,
+ report, SND_JACK_MICROPHONE);
++ mutex_unlock(&rt5645->jd_mutex);
+ return;
+ case 4:
+ val = snd_soc_component_read(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020;
+@@ -3321,7 +3336,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+
+ if (!val && (rt5645->jack_type == 0)) { /* jack in */
+ report = rt5645_jack_detect(rt5645->component, 1);
+- } else if (!val && rt5645->jack_type != 0) {
++ } else if (!val && rt5645->jack_type == SND_JACK_HEADSET) {
+ /* for push button and jack out */
+ btn_type = 0;
+ if (snd_soc_component_read(rt5645->component, RT5645_INT_IRQ_ST) & 0x4) {
+@@ -3377,6 +3392,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
+ rt5645_jack_detect(rt5645->component, 0);
+ }
+
++ mutex_unlock(&rt5645->jd_mutex);
++
+ snd_soc_jack_report(rt5645->hp_jack, report, SND_JACK_HEADPHONE);
+ snd_soc_jack_report(rt5645->mic_jack, report, SND_JACK_MICROPHONE);
+ if (rt5645->en_button_func)
+@@ -3821,6 +3838,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_BOARD_VERSION, "Default string"),
++ /*
++ * Above strings are too generic, LattePanda BIOS versions for
++ * all 4 hw revisions are:
++ * DF-BI-7-S70CR100-*
++ * DF-BI-7-S70CR110-*
++ * DF-BI-7-S70CR200-*
++ * LP-BS-7-S70CR700-*
++ * Do a partial match for S70CR to avoid false positive matches.
++ */
++ DMI_MATCH(DMI_BIOS_VERSION, "S70CR"),
+ },
+ .driver_data = (void *)&lattepanda_board_platform_data,
+ },
+@@ -3847,14 +3874,6 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ },
+ .driver_data = (void *)&ecs_ef20_platform_data,
+ },
+- {
+- .ident = "EF20EA",
+- .callback = cht_rt5645_ef20_quirk_cb,
+- .matches = {
+- DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
+- },
+- .driver_data = (void *)&ecs_ef20_platform_data,
+- },
+ { }
+ };
+
+@@ -3948,6 +3967,16 @@ static int rt5645_i2c_probe(struct i2c_client *i2c)
+ return ret;
+ }
+
++ rt5645->gpiod_cbj_sleeve = devm_gpiod_get_optional(&i2c->dev, "cbj-sleeve",
++ GPIOD_OUT_LOW);
++
++ if (IS_ERR(rt5645->gpiod_cbj_sleeve)) {
++ ret = PTR_ERR(rt5645->gpiod_cbj_sleeve);
++ dev_info(&i2c->dev, "failed to initialize gpiod, ret=%d\n", ret);
++ if (ret != -ENOENT)
++ return ret;
++ }
++
+ for (i = 0; i < ARRAY_SIZE(rt5645->supplies); i++)
+ rt5645->supplies[i].supply = rt5645_supply_names[i];
+
+@@ -4150,6 +4179,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c)
+ }
+ timer_setup(&rt5645->btn_check_timer, rt5645_btn_check_callback, 0);
+
++ mutex_init(&rt5645->jd_mutex);
+ INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
+ INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
+
+@@ -4194,6 +4224,9 @@ static void rt5645_i2c_remove(struct i2c_client *i2c)
+ cancel_delayed_work_sync(&rt5645->jack_detect_work);
+ cancel_delayed_work_sync(&rt5645->rcclock_work);
+
++ if (rt5645->gpiod_cbj_sleeve)
++ gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
++
+ regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
+ }
+
+@@ -4209,6 +4242,9 @@ static void rt5645_i2c_shutdown(struct i2c_client *i2c)
+ 0);
+ msleep(20);
+ regmap_write(rt5645->regmap, RT5645_RESET, 0);
++
++ if (rt5645->gpiod_cbj_sleeve)
++ gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
+ }
+
+ static int __maybe_unused rt5645_sys_suspend(struct device *dev)
+diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
+index e67c2e19cb1a72..1fdbef5fd6cbab 100644
+--- a/sound/soc/codecs/rt5682-sdw.c
++++ b/sound/soc/codecs/rt5682-sdw.c
+@@ -763,12 +763,12 @@ static int __maybe_unused rt5682_dev_resume(struct device *dev)
+ return 0;
+
+ if (!slave->unattach_request) {
++ mutex_lock(&rt5682->disable_irq_lock);
+ if (rt5682->disable_irq == true) {
+- mutex_lock(&rt5682->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
+ rt5682->disable_irq = false;
+- mutex_unlock(&rt5682->disable_irq_lock);
+ }
++ mutex_unlock(&rt5682->disable_irq_lock);
+ goto regmap_sync;
+ }
+
+diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
+index e3aca9c785a079..aa163ec4086223 100644
+--- a/sound/soc/codecs/rt5682.c
++++ b/sound/soc/codecs/rt5682.c
+@@ -2903,8 +2903,10 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
+ }
+
+ if (dev->of_node) {
+- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ dai_clk_hw);
++ if (ret)
++ return ret;
+ } else {
+ ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw,
+ init.name,
+diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
+index 68ac5ea50396d5..92c647d439ec7f 100644
+--- a/sound/soc/codecs/rt5682s.c
++++ b/sound/soc/codecs/rt5682s.c
+@@ -2828,7 +2828,9 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
+ }
+
+ if (dev->of_node) {
+- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw);
++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw);
++ if (ret)
++ return ret;
+ } else {
+ ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw,
+ init.name, dev_name(dev));
+diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
+index 935e597022d324..b8471b2d8f4f13 100644
+--- a/sound/soc/codecs/rt711-sdca-sdw.c
++++ b/sound/soc/codecs/rt711-sdca-sdw.c
+@@ -438,13 +438,13 @@ static int __maybe_unused rt711_sdca_dev_resume(struct device *dev)
+ return 0;
+
+ if (!slave->unattach_request) {
++ mutex_lock(&rt711->disable_irq_lock);
+ if (rt711->disable_irq == true) {
+- mutex_lock(&rt711->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
+ rt711->disable_irq = false;
+- mutex_unlock(&rt711->disable_irq_lock);
+ }
++ mutex_unlock(&rt711->disable_irq_lock);
+ goto regmap_sync;
+ }
+
+diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
+index 3f5773310ae8cc..988451f24a7562 100644
+--- a/sound/soc/codecs/rt711-sdw.c
++++ b/sound/soc/codecs/rt711-sdw.c
+@@ -536,12 +536,12 @@ static int __maybe_unused rt711_dev_resume(struct device *dev)
+ return 0;
+
+ if (!slave->unattach_request) {
++ mutex_lock(&rt711->disable_irq_lock);
+ if (rt711->disable_irq == true) {
+- mutex_lock(&rt711->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_INTMASK1, SDW_SCP_INT1_IMPL_DEF);
+ rt711->disable_irq = false;
+- mutex_unlock(&rt711->disable_irq_lock);
+ }
++ mutex_unlock(&rt711->disable_irq_lock);
+ goto regmap_sync;
+ }
+
+diff --git a/sound/soc/codecs/rt712-sdca-sdw.c b/sound/soc/codecs/rt712-sdca-sdw.c
+index 6b644a89c5890b..ba877432cea610 100644
+--- a/sound/soc/codecs/rt712-sdca-sdw.c
++++ b/sound/soc/codecs/rt712-sdca-sdw.c
+@@ -438,13 +438,14 @@ static int __maybe_unused rt712_sdca_dev_resume(struct device *dev)
+ return 0;
+
+ if (!slave->unattach_request) {
++ mutex_lock(&rt712->disable_irq_lock);
+ if (rt712->disable_irq == true) {
+- mutex_lock(&rt712->disable_irq_lock);
++
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_0);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
+ rt712->disable_irq = false;
+- mutex_unlock(&rt712->disable_irq_lock);
+ }
++ mutex_unlock(&rt712->disable_irq_lock);
+ goto regmap_sync;
+ }
+
+diff --git a/sound/soc/codecs/rt712-sdca.c b/sound/soc/codecs/rt712-sdca.c
+index 7077ff6ba1f4bc..6954fbe7ec5f3b 100644
+--- a/sound/soc/codecs/rt712-sdca.c
++++ b/sound/soc/codecs/rt712-sdca.c
+@@ -963,13 +963,6 @@ static int rt712_sdca_probe(struct snd_soc_component *component)
+ rt712_sdca_parse_dt(rt712, &rt712->slave->dev);
+ rt712->component = component;
+
+- if (!rt712->first_hw_init)
+- return 0;
+-
+- ret = pm_runtime_resume(component->dev);
+- if (ret < 0 && ret != -EACCES)
+- return ret;
+-
+ /* add SPK route */
+ if (rt712->hw_id != RT712_DEV_ID_713) {
+ snd_soc_add_component_controls(component,
+@@ -980,6 +973,13 @@ static int rt712_sdca_probe(struct snd_soc_component *component)
+ rt712_sdca_spk_dapm_routes, ARRAY_SIZE(rt712_sdca_spk_dapm_routes));
+ }
+
++ if (!rt712->first_hw_init)
++ return 0;
++
++ ret = pm_runtime_resume(component->dev);
++ if (ret < 0 && ret != -EACCES)
++ return ret;
++
+ return 0;
+ }
+
+diff --git a/sound/soc/codecs/rt715-sdca.c b/sound/soc/codecs/rt715-sdca.c
+index 9fa96fd83d4aa5..84f1dc453e9719 100644
+--- a/sound/soc/codecs/rt715-sdca.c
++++ b/sound/soc/codecs/rt715-sdca.c
+@@ -316,7 +316,7 @@ static int rt715_sdca_set_amp_gain_8ch_get(struct snd_kcontrol *kcontrol,
+ return 0;
+ }
+
+-static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -17625, 375, 0);
++static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -1725, 75, 0);
+ static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 1000, 0);
+
+ static int rt715_sdca_get_volsw(struct snd_kcontrol *kcontrol,
+@@ -477,7 +477,7 @@ static const struct snd_kcontrol_new rt715_sdca_snd_controls[] = {
+ RT715_SDCA_FU_VOL_CTRL, CH_01),
+ SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_ADC7_27_VOL,
+ RT715_SDCA_FU_VOL_CTRL, CH_02),
+- 0x2f, 0x7f, 0,
++ 0x2f, 0x3f, 0,
+ rt715_sdca_set_amp_gain_get, rt715_sdca_set_amp_gain_put,
+ in_vol_tlv),
+ RT715_SDCA_EXT_TLV("FU02 Capture Volume",
+@@ -485,13 +485,13 @@ static const struct snd_kcontrol_new rt715_sdca_snd_controls[] = {
+ RT715_SDCA_FU_VOL_CTRL, CH_01),
+ rt715_sdca_set_amp_gain_4ch_get,
+ rt715_sdca_set_amp_gain_4ch_put,
+- in_vol_tlv, 4, 0x7f),
++ in_vol_tlv, 4, 0x3f),
+ RT715_SDCA_EXT_TLV("FU06 Capture Volume",
+ SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_ADC10_11_VOL,
+ RT715_SDCA_FU_VOL_CTRL, CH_01),
+ rt715_sdca_set_amp_gain_4ch_get,
+ rt715_sdca_set_amp_gain_4ch_put,
+- in_vol_tlv, 4, 0x7f),
++ in_vol_tlv, 4, 0x3f),
+ /* MIC Boost Control */
+ RT715_SDCA_BOOST_EXT_TLV("FU0E Boost",
+ SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_DMIC_GAIN_EN,
+diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
+index 21f37babd148a4..376585f5a8dd8b 100644
+--- a/sound/soc/codecs/rt715-sdw.c
++++ b/sound/soc/codecs/rt715-sdw.c
+@@ -111,6 +111,7 @@ static bool rt715_readable_register(struct device *dev, unsigned int reg)
+ case 0x839d:
+ case 0x83a7:
+ case 0x83a9:
++ case 0x752001:
+ case 0x752039:
+ return true;
+ default:
+diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c
+index a38ec586221457..32578a212642e0 100644
+--- a/sound/soc/codecs/rt722-sdca-sdw.c
++++ b/sound/soc/codecs/rt722-sdca-sdw.c
+@@ -68,6 +68,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re
+ case 0x200007f:
+ case 0x2000082 ... 0x200008e:
+ case 0x2000090 ... 0x2000094:
++ case 0x3110000:
+ case 0x5300000 ... 0x5300002:
+ case 0x5400002:
+ case 0x5600000 ... 0x5600007:
+@@ -125,6 +126,7 @@ static bool rt722_sdca_mbq_volatile_register(struct device *dev, unsigned int re
+ case 0x2000067:
+ case 0x2000084:
+ case 0x2000086:
++ case 0x3110000:
+ return true;
+ default:
+ return false;
+@@ -347,7 +349,7 @@ static int rt722_sdca_interrupt_callback(struct sdw_slave *slave,
+
+ if (status->sdca_cascade && !rt722->disable_irq)
+ mod_delayed_work(system_power_efficient_wq,
+- &rt722->jack_detect_work, msecs_to_jiffies(30));
++ &rt722->jack_detect_work, msecs_to_jiffies(280));
+
+ mutex_unlock(&rt722->disable_irq_lock);
+
+@@ -464,13 +466,13 @@ static int __maybe_unused rt722_sdca_dev_resume(struct device *dev)
+ return 0;
+
+ if (!slave->unattach_request) {
++ mutex_lock(&rt722->disable_irq_lock);
+ if (rt722->disable_irq == true) {
+- mutex_lock(&rt722->disable_irq_lock);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK1, SDW_SCP_SDCA_INTMASK_SDCA_6);
+ sdw_write_no_pm(slave, SDW_SCP_SDCA_INTMASK2, SDW_SCP_SDCA_INTMASK_SDCA_8);
+ rt722->disable_irq = false;
+- mutex_unlock(&rt722->disable_irq_lock);
+ }
++ mutex_unlock(&rt722->disable_irq_lock);
+ goto regmap_sync;
+ }
+
+diff --git a/sound/soc/codecs/rt722-sdca.c b/sound/soc/codecs/rt722-sdca.c
+index 0e1c65a20392ad..9ff607984ea193 100644
+--- a/sound/soc/codecs/rt722-sdca.c
++++ b/sound/soc/codecs/rt722-sdca.c
+@@ -1329,7 +1329,7 @@ static struct snd_soc_dai_driver rt722_sdca_dai[] = {
+ .capture = {
+ .stream_name = "DP6 DMic Capture",
+ .channels_min = 1,
+- .channels_max = 2,
++ .channels_max = 4,
+ .rates = RT722_STEREO_RATES,
+ .formats = RT722_FORMATS,
+ },
+@@ -1438,9 +1438,12 @@ static void rt722_sdca_jack_preset(struct rt722_sdca_priv *rt722)
+ int loop_check, chk_cnt = 100, ret;
+ unsigned int calib_status = 0;
+
+- /* Read eFuse */
+- rt722_sdca_index_write(rt722, RT722_VENDOR_SPK_EFUSE, RT722_DC_CALIB_CTRL,
+- 0x4808);
++ /* Config analog bias */
++ rt722_sdca_index_write(rt722, RT722_VENDOR_REG, RT722_ANALOG_BIAS_CTL3,
++ 0xa081);
++ /* GE related settings */
++ rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_GE_RELATED_CTL2,
++ 0xa009);
+ /* Button A, B, C, D bypass mode */
+ rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_UMP_HID_CTL4,
+ 0xcf00);
+@@ -1474,9 +1477,6 @@ static void rt722_sdca_jack_preset(struct rt722_sdca_priv *rt722)
+ if ((calib_status & 0x0040) == 0x0)
+ break;
+ }
+- /* Release HP-JD, EN_CBJ_TIE_GL/R open, en_osw gating auto done bit */
+- rt722_sdca_index_write(rt722, RT722_VENDOR_REG, RT722_DIGITAL_MISC_CTRL4,
+- 0x0010);
+ /* Set ADC09 power entity floating control */
+ rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_ADC0A_08_PDE_FLOAT_CTL,
+ 0x2a12);
+@@ -1489,8 +1489,21 @@ static void rt722_sdca_jack_preset(struct rt722_sdca_priv *rt722)
+ /* Set DAC03 and HP power entity floating control */
+ rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_DAC03_HP_PDE_FLOAT_CTL,
+ 0x4040);
++ rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_ENT_FLOAT_CTRL_1,
++ 0x4141);
++ rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_FLOAT_CTRL_1,
++ 0x0101);
+ /* Fine tune PDE40 latency */
+ regmap_write(rt722->regmap, 0x2f58, 0x07);
++ regmap_write(rt722->regmap, 0x2f03, 0x06);
++ /* MIC VRefo */
++ rt722_sdca_index_update_bits(rt722, RT722_VENDOR_REG,
++ RT722_COMBO_JACK_AUTO_CTL1, 0x0200, 0x0200);
++ rt722_sdca_index_update_bits(rt722, RT722_VENDOR_REG,
++ RT722_VREFO_GAT, 0x4000, 0x4000);
++ /* Release HP-JD, EN_CBJ_TIE_GL/R open, en_osw gating auto done bit */
++ rt722_sdca_index_write(rt722, RT722_VENDOR_REG, RT722_DIGITAL_MISC_CTRL4,
++ 0x0010);
+ }
+
+ int rt722_sdca_io_init(struct device *dev, struct sdw_slave *slave)
+diff --git a/sound/soc/codecs/rt722-sdca.h b/sound/soc/codecs/rt722-sdca.h
+index 44af8901352eb6..2464361a7958c6 100644
+--- a/sound/soc/codecs/rt722-sdca.h
++++ b/sound/soc/codecs/rt722-sdca.h
+@@ -69,6 +69,7 @@ struct rt722_sdca_dmic_kctrl_priv {
+ #define RT722_COMBO_JACK_AUTO_CTL2 0x46
+ #define RT722_COMBO_JACK_AUTO_CTL3 0x47
+ #define RT722_DIGITAL_MISC_CTRL4 0x4a
++#define RT722_VREFO_GAT 0x63
+ #define RT722_FSM_CTL 0x67
+ #define RT722_SDCA_INTR_REC 0x82
+ #define RT722_SW_CONFIG1 0x8a
+@@ -127,6 +128,8 @@ struct rt722_sdca_dmic_kctrl_priv {
+ #define RT722_UMP_HID_CTL6 0x66
+ #define RT722_UMP_HID_CTL7 0x67
+ #define RT722_UMP_HID_CTL8 0x68
++#define RT722_FLOAT_CTRL_1 0x70
++#define RT722_ENT_FLOAT_CTRL_1 0x76
+
+ /* Parameter & Verb control 01 (0x1a)(NID:20h) */
+ #define RT722_HIDDEN_REG_SW_RESET (0x1 << 14)
+diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
+index 8c9dc318b0e824..c65a4219ecd6c2 100644
+--- a/sound/soc/codecs/tas2552.c
++++ b/sound/soc/codecs/tas2552.c
+@@ -2,7 +2,8 @@
+ /*
+ * tas2552.c - ALSA SoC Texas Instruments TAS2552 Mono Audio Amplifier
+ *
+- * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
++ * Copyright (C) 2014 - 2024 Texas Instruments Incorporated -
++ * https://www.ti.com
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ */
+@@ -119,12 +120,14 @@ static const struct snd_soc_dapm_widget tas2552_dapm_widgets[] =
+ &tas2552_input_mux_control),
+
+ SND_SOC_DAPM_AIF_IN("DAC IN", "DAC Playback", 0, SND_SOC_NOPM, 0, 0),
++ SND_SOC_DAPM_AIF_OUT("ASI OUT", "DAC Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUT_DRV("ClassD", TAS2552_CFG_2, 7, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL", TAS2552_CFG_2, 3, 0, NULL, 0),
+ SND_SOC_DAPM_POST("Post Event", tas2552_post_event),
+
+- SND_SOC_DAPM_OUTPUT("OUT")
++ SND_SOC_DAPM_OUTPUT("OUT"),
++ SND_SOC_DAPM_INPUT("DMIC")
+ };
+
+ static const struct snd_soc_dapm_route tas2552_audio_map[] = {
+@@ -134,6 +137,7 @@ static const struct snd_soc_dapm_route tas2552_audio_map[] = {
+ {"ClassD", NULL, "Input selection"},
+ {"OUT", NULL, "ClassD"},
+ {"ClassD", NULL, "PLL"},
++ {"ASI OUT", NULL, "DMIC"}
+ };
+
+ #ifdef CONFIG_PM
+@@ -538,6 +542,13 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = TAS2552_FORMATS,
+ },
++ .capture = {
++ .stream_name = "Capture",
++ .channels_min = 2,
++ .channels_max = 2,
++ .rates = SNDRV_PCM_RATE_8000_192000,
++ .formats = TAS2552_FORMATS,
++ },
+ .ops = &tas2552_speaker_dai_ops,
+ },
+ };
+diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c
+index ffb26e4a7e2f09..0444cf90c5119f 100644
+--- a/sound/soc/codecs/tas2781-comlib.c
++++ b/sound/soc/codecs/tas2781-comlib.c
+@@ -14,7 +14,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of_irq.h>
+ #include <linux/regmap.h>
+ #include <linux/slab.h>
+@@ -39,7 +38,7 @@ static const struct regmap_range_cfg tasdevice_ranges[] = {
+ static const struct regmap_config tasdevice_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+- .cache_type = REGCACHE_RBTREE,
++ .cache_type = REGCACHE_NONE,
+ .ranges = tasdevice_ranges,
+ .num_ranges = ARRAY_SIZE(tasdevice_ranges),
+ .max_register = 256 * 128,
+@@ -267,6 +266,7 @@ void tas2781_reset(struct tasdevice_priv *tas_dev)
+ EXPORT_SYMBOL_GPL(tas2781_reset);
+
+ int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
++ struct module *module,
+ void (*cont)(const struct firmware *fw, void *context))
+ {
+ int ret = 0;
+@@ -280,7 +280,7 @@ int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
+ tas_priv->dev_name, tas_priv->ndev);
+ crc8_populate_msb(tas_priv->crc8_lkp_tbl, TASDEVICE_CRC8_POLYNOMIAL);
+ tas_priv->codec = codec;
+- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
++ ret = request_firmware_nowait(module, FW_ACTION_UEVENT,
+ tas_priv->rca_binaryname, tas_priv->dev, GFP_KERNEL, tas_priv,
+ cont);
+ if (ret)
+@@ -316,8 +316,6 @@ int tasdevice_init(struct tasdevice_priv *tas_priv)
+ tas_priv->tasdevice[i].cur_conf = -1;
+ }
+
+- dev_set_drvdata(tas_priv->dev, tas_priv);
+-
+ mutex_init(&tas_priv->codec_lock);
+
+ out:
+@@ -407,13 +405,25 @@ EXPORT_SYMBOL_GPL(tasdevice_dsp_remove);
+
+ void tasdevice_remove(struct tasdevice_priv *tas_priv)
+ {
+- if (gpio_is_valid(tas_priv->irq_info.irq_gpio))
+- gpio_free(tas_priv->irq_info.irq_gpio);
+- kfree(tas_priv->acpi_subsystem_id);
+ mutex_destroy(&tas_priv->codec_lock);
+ }
+ EXPORT_SYMBOL_GPL(tasdevice_remove);
+
++int tasdevice_save_calibration(struct tasdevice_priv *tas_priv)
++{
++ if (tas_priv->save_calibration)
++ return tas_priv->save_calibration(tas_priv);
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(tasdevice_save_calibration);
++
++void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv)
++{
++ if (tas_priv->apply_calibration && tas_priv->cali_data.total_sz)
++ tas_priv->apply_calibration(tas_priv);
++}
++EXPORT_SYMBOL_GPL(tasdevice_apply_calibration);
++
+ static int tasdevice_clamp(int val, int max, unsigned int invert)
+ {
+ if (val > max)
+diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c
+index eb55abae0d7bbb..629e2195a890b2 100644
+--- a/sound/soc/codecs/tas2781-fmwlib.c
++++ b/sound/soc/codecs/tas2781-fmwlib.c
+@@ -1,8 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0
+ //
+-// tasdevice-fmw.c -- TASDEVICE firmware support
++// tas2781-fmwlib.c -- TASDEVICE firmware support
+ //
+-// Copyright 2023 Texas Instruments, Inc.
++// Copyright 2023 - 2024 Texas Instruments, Inc.
+ //
+ // Author: Shenghao Ding <shenghao-ding@ti.com>
+
+@@ -13,7 +13,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of_irq.h>
+ #include <linux/regmap.h>
+ #include <linux/slab.h>
+@@ -21,7 +20,7 @@
+ #include <sound/soc.h>
+ #include <sound/tlv.h>
+ #include <sound/tas2781.h>
+-
++#include <asm/unaligned.h>
+
+ #define ERROR_PRAM_CRCCHK 0x0000000
+ #define ERROR_YRAM_CRCCHK 0x0000001
+@@ -125,8 +124,7 @@ static struct tasdevice_config_info *tasdevice_add_config(
+ /* convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host
+ */
+- cfg_info->nblocks =
+- be32_to_cpup((__be32 *)&config_data[config_offset]);
++ cfg_info->nblocks = get_unaligned_be32(&config_data[config_offset]);
+ config_offset += 4;
+
+ /* Several kinds of dsp/algorithm firmwares can run on tas2781,
+@@ -170,14 +168,14 @@ static struct tasdevice_config_info *tasdevice_add_config(
+
+ }
+ bk_da[i]->yram_checksum =
+- be16_to_cpup((__be16 *)&config_data[config_offset]);
++ get_unaligned_be16(&config_data[config_offset]);
+ config_offset += 2;
+ bk_da[i]->block_size =
+- be32_to_cpup((__be32 *)&config_data[config_offset]);
++ get_unaligned_be32(&config_data[config_offset]);
+ config_offset += 4;
+
+ bk_da[i]->n_subblks =
+- be32_to_cpup((__be32 *)&config_data[config_offset]);
++ get_unaligned_be32(&config_data[config_offset]);
+
+ config_offset += 4;
+
+@@ -227,7 +225,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
+ }
+ buf = (unsigned char *)fmw->data;
+
+- fw_hdr->img_sz = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->img_sz = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ if (fw_hdr->img_sz != fmw->size) {
+ dev_err(tas_priv->dev,
+@@ -238,9 +236,9 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
+ goto out;
+ }
+
+- fw_hdr->checksum = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->checksum = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+- fw_hdr->binary_version_num = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->binary_version_num = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->binary_version_num < 0x103) {
+ dev_err(tas_priv->dev, "File version 0x%04x is too low",
+ fw_hdr->binary_version_num);
+@@ -249,7 +247,7 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
+ goto out;
+ }
+ offset += 4;
+- fw_hdr->drv_fw_version = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->drv_fw_version = get_unaligned_be32(&buf[offset]);
+ offset += 8;
+ fw_hdr->plat_type = buf[offset];
+ offset += 1;
+@@ -277,11 +275,11 @@ int tasdevice_rca_parser(void *context, const struct firmware *fmw)
+ for (i = 0; i < TASDEVICE_DEVICE_SUM; i++, offset++)
+ fw_hdr->devs[i] = buf[offset];
+
+- fw_hdr->nconfig = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->nconfig = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ for (i = 0; i < TASDEVICE_CONFIG_SUM; i++) {
+- fw_hdr->config_size[i] = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->config_size[i] = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ total_config_sz += fw_hdr->config_size[i];
+ }
+@@ -330,7 +328,7 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw,
+ /* convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host
+ */
+- block->type = be32_to_cpup((__be32 *)&data[offset]);
++ block->type = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ block->is_pchksum_present = data[offset];
+@@ -345,10 +343,10 @@ static int fw_parse_block_data_kernel(struct tasdevice_fw *tas_fmw,
+ block->ychksum = data[offset];
+ offset++;
+
+- block->blk_size = be32_to_cpup((__be32 *)&data[offset]);
++ block->blk_size = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+- block->nr_subblocks = be32_to_cpup((__be32 *)&data[offset]);
++ block->nr_subblocks = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ if (offset + block->blk_size > fmw->size) {
+@@ -381,7 +379,7 @@ static int fw_parse_data_kernel(struct tasdevice_fw *tas_fmw,
+ offset = -EINVAL;
+ goto out;
+ }
+- img_data->nr_blk = be32_to_cpup((__be32 *)&data[offset]);
++ img_data->nr_blk = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ img_data->dev_blks = kcalloc(img_data->nr_blk,
+@@ -477,14 +475,14 @@ static int fw_parse_variable_header_kernel(
+ offset = -EINVAL;
+ goto out;
+ }
+- fw_hdr->device_family = be16_to_cpup((__be16 *)&buf[offset]);
++ fw_hdr->device_family = get_unaligned_be16(&buf[offset]);
+ if (fw_hdr->device_family != 0) {
+ dev_err(tas_priv->dev, "%s:not TAS device\n", __func__);
+ offset = -EINVAL;
+ goto out;
+ }
+ offset += 2;
+- fw_hdr->device = be16_to_cpup((__be16 *)&buf[offset]);
++ fw_hdr->device = get_unaligned_be16(&buf[offset]);
+ if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE ||
+ fw_hdr->device == 6) {
+ dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device);
+@@ -502,7 +500,7 @@ static int fw_parse_variable_header_kernel(
+ goto out;
+ }
+
+- tas_fmw->nr_programs = be32_to_cpup((__be32 *)&buf[offset]);
++ tas_fmw->nr_programs = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ if (tas_fmw->nr_programs == 0 || tas_fmw->nr_programs >
+@@ -521,14 +519,14 @@ static int fw_parse_variable_header_kernel(
+
+ for (i = 0; i < tas_fmw->nr_programs; i++) {
+ program = &(tas_fmw->programs[i]);
+- program->prog_size = be32_to_cpup((__be32 *)&buf[offset]);
++ program->prog_size = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ }
+
+ /* Skip the unused prog_size */
+ offset += 4 * (TASDEVICE_MAXPROGRAM_NUM_KERNEL - tas_fmw->nr_programs);
+
+- tas_fmw->nr_configurations = be32_to_cpup((__be32 *)&buf[offset]);
++ tas_fmw->nr_configurations = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+
+ /* The max number of config in firmware greater than 4 pieces of
+@@ -560,7 +558,7 @@ static int fw_parse_variable_header_kernel(
+
+ for (i = 0; i < tas_fmw->nr_programs; i++) {
+ config = &(tas_fmw->configs[i]);
+- config->cfg_size = be32_to_cpup((__be32 *)&buf[offset]);
++ config->cfg_size = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ }
+
+@@ -598,7 +596,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
+ switch (subblk_typ) {
+ case TASDEVICE_CMD_SING_W: {
+ int i;
+- unsigned short len = be16_to_cpup((__be16 *)&data[2]);
++ unsigned short len = get_unaligned_be16(&data[2]);
+
+ subblk_offset += 2;
+ if (subblk_offset + 4 * len > sublocksize) {
+@@ -624,7 +622,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
+ }
+ break;
+ case TASDEVICE_CMD_BURST: {
+- unsigned short len = be16_to_cpup((__be16 *)&data[2]);
++ unsigned short len = get_unaligned_be16(&data[2]);
+
+ subblk_offset += 2;
+ if (subblk_offset + 4 + len > sublocksize) {
+@@ -665,7 +663,7 @@ static int tasdevice_process_block(void *context, unsigned char *data,
+ is_err = true;
+ break;
+ }
+- sleep_time = be16_to_cpup((__be16 *)&data[2]) * 1000;
++ sleep_time = get_unaligned_be16(&data[2]) * 1000;
+ usleep_range(sleep_time, sleep_time + 50);
+ subblk_offset += 2;
+ }
+@@ -940,7 +938,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv
+
+ offset += len;
+
+- fw_hdr->device_family = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->device_family = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->device_family != 0) {
+ dev_err(tas_priv->dev, "%s: not TAS device\n", __func__);
+ offset = -EINVAL;
+@@ -948,7 +946,7 @@ static int fw_parse_variable_hdr(struct tasdevice_priv
+ }
+ offset += 4;
+
+- fw_hdr->device = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_hdr->device = get_unaligned_be32(&buf[offset]);
+ if (fw_hdr->device >= TASDEVICE_DSP_TAS_MAX_DEVICE ||
+ fw_hdr->device == 6) {
+ dev_err(tas_priv->dev, "Unsupported dev %d\n", fw_hdr->device);
+@@ -993,7 +991,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw,
+ offset = -EINVAL;
+ goto out;
+ }
+- block->type = be32_to_cpup((__be32 *)&data[offset]);
++ block->type = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ if (tas_fmw->fw_hdr.fixed_hdr.drv_ver >= PPC_DRIVER_CRCCHK) {
+@@ -1018,7 +1016,7 @@ static int fw_parse_block_data(struct tasdevice_fw *tas_fmw,
+ block->is_ychksum_present = 0;
+ }
+
+- block->nr_cmds = be32_to_cpup((__be32 *)&data[offset]);
++ block->nr_cmds = get_unaligned_be32(&data[offset]);
+ offset += 4;
+
+ n = block->nr_cmds * 4;
+@@ -1069,7 +1067,7 @@ static int fw_parse_data(struct tasdevice_fw *tas_fmw,
+ goto out;
+ }
+ offset += n;
+- img_data->nr_blk = be16_to_cpup((__be16 *)&data[offset]);
++ img_data->nr_blk = get_unaligned_be16(&data[offset]);
+ offset += 2;
+
+ img_data->dev_blks = kcalloc(img_data->nr_blk,
+@@ -1106,7 +1104,7 @@ static int fw_parse_program_data(struct tasdevice_priv *tas_priv,
+ offset = -EINVAL;
+ goto out;
+ }
+- tas_fmw->nr_programs = be16_to_cpup((__be16 *)&buf[offset]);
++ tas_fmw->nr_programs = get_unaligned_be16(&buf[offset]);
+ offset += 2;
+
+ if (tas_fmw->nr_programs == 0) {
+@@ -1173,7 +1171,7 @@ static int fw_parse_configuration_data(
+ offset = -EINVAL;
+ goto out;
+ }
+- tas_fmw->nr_configurations = be16_to_cpup((__be16 *)&data[offset]);
++ tas_fmw->nr_configurations = get_unaligned_be16(&data[offset]);
+ offset += 2;
+
+ if (tas_fmw->nr_configurations == 0) {
+@@ -1805,7 +1803,7 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv,
+ /* Convert data[offset], data[offset + 1], data[offset + 2] and
+ * data[offset + 3] into host
+ */
+- fw_fixed_hdr->fwsize = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_fixed_hdr->fwsize = get_unaligned_be32(&buf[offset]);
+ offset += 4;
+ if (fw_fixed_hdr->fwsize != fmw->size) {
+ dev_err(tas_priv->dev, "File size not match, %lu %u",
+@@ -1814,9 +1812,9 @@ static int fw_parse_header(struct tasdevice_priv *tas_priv,
+ goto out;
+ }
+ offset += 4;
+- fw_fixed_hdr->ppcver = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_fixed_hdr->ppcver = get_unaligned_be32(&buf[offset]);
+ offset += 8;
+- fw_fixed_hdr->drv_ver = be32_to_cpup((__be32 *)&buf[offset]);
++ fw_fixed_hdr->drv_ver = get_unaligned_be32(&buf[offset]);
+ offset += 72;
+
+ out:
+@@ -1858,7 +1856,7 @@ static int fw_parse_calibration_data(struct tasdevice_priv *tas_priv,
+ offset = -EINVAL;
+ goto out;
+ }
+- tas_fmw->nr_calibrations = be16_to_cpup((__be16 *)&data[offset]);
++ tas_fmw->nr_calibrations = get_unaligned_be16(&data[offset]);
+ offset += 2;
+
+ if (tas_fmw->nr_calibrations != 1) {
+@@ -1908,7 +1906,7 @@ int tas2781_load_calibration(void *context, char *file_name,
+ {
+ struct tasdevice_priv *tas_priv = (struct tasdevice_priv *)context;
+ struct tasdevice *tasdev = &(tas_priv->tasdevice[i]);
+- const struct firmware *fw_entry;
++ const struct firmware *fw_entry = NULL;
+ struct tasdevice_fw *tas_fmw;
+ struct firmware fmw;
+ int offset = 0;
+@@ -2012,6 +2010,7 @@ static int tasdevice_dspfw_ready(const struct firmware *fmw,
+ case 0x301:
+ case 0x302:
+ case 0x502:
++ case 0x503:
+ tas_priv->fw_parse_variable_header =
+ fw_parse_variable_header_kernel;
+ tas_priv->fw_parse_program_data =
+@@ -2180,6 +2179,24 @@ static int tasdevice_load_data(struct tasdevice_priv *tas_priv,
+ return ret;
+ }
+
++static void tasdev_load_calibrated_data(struct tasdevice_priv *priv, int i)
++{
++ struct tasdevice_calibration *cal;
++ struct tasdevice_fw *cal_fmw;
++
++ cal_fmw = priv->tasdevice[i].cali_data_fmw;
++
++ /* No calibrated data for current devices, playback will go ahead. */
++ if (!cal_fmw)
++ return;
++
++ cal = cal_fmw->calibrations;
++ if (!cal)
++ return;
++
++ load_calib_data(priv, &cal->dev_data);
++}
++
+ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
+ int cfg_no, int rca_conf_no)
+ {
+@@ -2219,11 +2236,11 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
+ goto out;
+ }
+
+- conf = &(tas_fmw->configs[cfg_no]);
+ for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) {
+ if (cfg_info[rca_conf_no]->active_dev & (1 << i)) {
+- if (tas_priv->tasdevice[i].cur_prog != prm_no
+- || tas_priv->force_fwload_status) {
++ if (prm_no >= 0
++ && (tas_priv->tasdevice[i].cur_prog != prm_no
++ || tas_priv->force_fwload_status)) {
+ tas_priv->tasdevice[i].cur_conf = -1;
+ tas_priv->tasdevice[i].is_loading = true;
+ prog_status++;
+@@ -2239,26 +2256,15 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
+ for (i = 0; i < tas_priv->ndev; i++) {
+ if (tas_priv->tasdevice[i].is_loaderr == true)
+ continue;
+- else if (tas_priv->tasdevice[i].is_loaderr == false
+- && tas_priv->tasdevice[i].is_loading == true) {
+- struct tasdevice_fw *cal_fmw =
+- tas_priv->tasdevice[i].cali_data_fmw;
+-
+- if (cal_fmw) {
+- struct tasdevice_calibration
+- *cal = cal_fmw->calibrations;
+-
+- if (cal)
+- load_calib_data(tas_priv,
+- &(cal->dev_data));
+- }
++ if (tas_priv->tasdevice[i].is_loaderr == false &&
++ tas_priv->tasdevice[i].is_loading == true)
+ tas_priv->tasdevice[i].cur_prog = prm_no;
+- }
+ }
+ }
+
+ for (i = 0, status = 0; i < tas_priv->ndev; i++) {
+- if (tas_priv->tasdevice[i].cur_conf != cfg_no
++ if (cfg_no >= 0
++ && tas_priv->tasdevice[i].cur_conf != cfg_no
+ && (cfg_info[rca_conf_no]->active_dev & (1 << i))
+ && (tas_priv->tasdevice[i].is_loaderr == false)) {
+ status++;
+@@ -2268,15 +2274,20 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
+ }
+
+ if (status) {
++ conf = &(tas_fmw->configs[cfg_no]);
+ status = 0;
+ tasdevice_load_data(tas_priv, &(conf->dev_data));
+ for (i = 0; i < tas_priv->ndev; i++) {
+ if (tas_priv->tasdevice[i].is_loaderr == true) {
+- status |= 1 << (i + 4);
++ status |= BIT(i + 4);
+ continue;
+- } else if (tas_priv->tasdevice[i].is_loaderr == false
+- && tas_priv->tasdevice[i].is_loading == true)
++ }
++
++ if (tas_priv->tasdevice[i].is_loaderr == false &&
++ tas_priv->tasdevice[i].is_loading == true) {
++ tasdev_load_calibrated_data(tas_priv, i);
+ tas_priv->tasdevice[i].cur_conf = cfg_no;
++ }
+ }
+ } else
+ dev_dbg(tas_priv->dev, "%s: Unneeded loading dsp conf %d\n",
+@@ -2311,7 +2322,7 @@ int tasdevice_prmg_load(void *context, int prm_no)
+ }
+
+ for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) {
+- if (tas_priv->tasdevice[i].cur_prog != prm_no) {
++ if (prm_no >= 0 && tas_priv->tasdevice[i].cur_prog != prm_no) {
+ tas_priv->tasdevice[i].cur_conf = -1;
+ tas_priv->tasdevice[i].is_loading = true;
+ prog_status++;
+@@ -2335,79 +2346,27 @@ int tasdevice_prmg_load(void *context, int prm_no)
+ }
+ EXPORT_SYMBOL_NS_GPL(tasdevice_prmg_load, SND_SOC_TAS2781_FMWLIB);
+
+-int tasdevice_prmg_calibdata_load(void *context, int prm_no)
+-{
+- struct tasdevice_priv *tas_priv = (struct tasdevice_priv *) context;
+- struct tasdevice_fw *tas_fmw = tas_priv->fmw;
+- struct tasdevice_prog *program;
+- int prog_status = 0;
+- int i;
+-
+- if (!tas_fmw) {
+- dev_err(tas_priv->dev, "%s: Firmware is NULL\n", __func__);
+- goto out;
+- }
+-
+- if (prm_no >= tas_fmw->nr_programs) {
+- dev_err(tas_priv->dev,
+- "%s: prm(%d) is not in range of Programs %u\n",
+- __func__, prm_no, tas_fmw->nr_programs);
+- goto out;
+- }
+-
+- for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) {
+- if (tas_priv->tasdevice[i].cur_prog != prm_no) {
+- tas_priv->tasdevice[i].cur_conf = -1;
+- tas_priv->tasdevice[i].is_loading = true;
+- prog_status++;
+- }
+- tas_priv->tasdevice[i].is_loaderr = false;
+- }
+-
+- if (prog_status) {
+- program = &(tas_fmw->programs[prm_no]);
+- tasdevice_load_data(tas_priv, &(program->dev_data));
+- for (i = 0; i < tas_priv->ndev; i++) {
+- if (tas_priv->tasdevice[i].is_loaderr == true)
+- continue;
+- else if (tas_priv->tasdevice[i].is_loaderr == false
+- && tas_priv->tasdevice[i].is_loading == true) {
+- struct tasdevice_fw *cal_fmw =
+- tas_priv->tasdevice[i].cali_data_fmw;
+-
+- if (cal_fmw) {
+- struct tasdevice_calibration *cal =
+- cal_fmw->calibrations;
+-
+- if (cal)
+- load_calib_data(tas_priv,
+- &(cal->dev_data));
+- }
+- tas_priv->tasdevice[i].cur_prog = prm_no;
+- }
+- }
+- }
+-
+-out:
+- return prog_status;
+-}
+-EXPORT_SYMBOL_NS_GPL(tasdevice_prmg_calibdata_load,
+- SND_SOC_TAS2781_FMWLIB);
+-
+ void tasdevice_tuning_switch(void *context, int state)
+ {
+ struct tasdevice_priv *tas_priv = (struct tasdevice_priv *) context;
+ struct tasdevice_fw *tas_fmw = tas_priv->fmw;
+ int profile_cfg_id = tas_priv->rcabin.profile_cfg_id;
+
+- if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) {
+- dev_err(tas_priv->dev, "DSP bin file not loaded\n");
++ /*
++ * Only RCA-based Playback can still work with no dsp program running
++ * inside the chip.
++ */
++ switch (tas_priv->fw_state) {
++ case TASDEVICE_RCA_FW_OK:
++ case TASDEVICE_DSP_FW_ALL_OK:
++ break;
++ default:
+ return;
+ }
+
+ if (state == 0) {
+- if (tas_priv->cur_prog < tas_fmw->nr_programs) {
+- /*dsp mode or tuning mode*/
++ if (tas_fmw && tas_priv->cur_prog < tas_fmw->nr_programs) {
++ /* dsp mode or tuning mode */
+ profile_cfg_id = tas_priv->rcabin.profile_cfg_id;
+ tasdevice_select_tuningprm_cfg(tas_priv,
+ tas_priv->cur_prog, tas_priv->cur_conf,
+@@ -2416,9 +2375,10 @@ void tasdevice_tuning_switch(void *context, int state)
+
+ tasdevice_select_cfg_blk(tas_priv, profile_cfg_id,
+ TASDEVICE_BIN_BLK_PRE_POWER_UP);
+- } else
++ } else {
+ tasdevice_select_cfg_blk(tas_priv, profile_cfg_id,
+ TASDEVICE_BIN_BLK_PRE_SHUTDOWN);
++ }
+ }
+ EXPORT_SYMBOL_NS_GPL(tasdevice_tuning_switch,
+ SND_SOC_TAS2781_FMWLIB);
+diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
+index 55cd5e3c23a5d9..43775c19444525 100644
+--- a/sound/soc/codecs/tas2781-i2c.c
++++ b/sound/soc/codecs/tas2781-i2c.c
+@@ -2,7 +2,7 @@
+ //
+ // ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+ //
+-// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
++// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+ // https://www.ti.com
+ //
+ // The TAS2781 driver implements a flexible and configurable
+@@ -21,7 +21,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/of_gpio.h>
++#include <linux/of_address.h>
+ #include <linux/of_irq.h>
+ #include <linux/regmap.h>
+ #include <linux/slab.h>
+@@ -378,23 +378,37 @@ static void tasdevice_fw_ready(const struct firmware *fmw,
+ mutex_lock(&tas_priv->codec_lock);
+
+ ret = tasdevice_rca_parser(tas_priv, fmw);
+- if (ret)
++ if (ret) {
++ tasdevice_config_info_remove(tas_priv);
+ goto out;
++ }
+ tasdevice_create_control(tas_priv);
+
+ tasdevice_dsp_remove(tas_priv);
+ tasdevice_calbin_remove(tas_priv);
+- tas_priv->fw_state = TASDEVICE_DSP_FW_PENDING;
++ /*
++ * The baseline is the RCA-only case, and then the code attempts to
++ * load DSP firmware but in case of failures just keep going, i.e.
++ * failing to load DSP firmware is NOT an error.
++ */
++ tas_priv->fw_state = TASDEVICE_RCA_FW_OK;
+ scnprintf(tas_priv->coef_binaryname, 64, "%s_coef.bin",
+ tas_priv->dev_name);
+ ret = tasdevice_dsp_parser(tas_priv);
+ if (ret) {
+ dev_err(tas_priv->dev, "dspfw load %s error\n",
+ tas_priv->coef_binaryname);
+- tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL;
+ goto out;
+ }
+- tasdevice_dsp_create_ctrls(tas_priv);
++
++ /*
++ * If no dsp-related kcontrol created, the dsp resource will be freed.
++ */
++ ret = tasdevice_dsp_create_ctrls(tas_priv);
++ if (ret) {
++ dev_err(tas_priv->dev, "dsp controls error\n");
++ goto out;
++ }
+
+ tas_priv->fw_state = TASDEVICE_DSP_FW_ALL_OK;
+
+@@ -412,12 +426,11 @@ static void tasdevice_fw_ready(const struct firmware *fmw,
+ __func__, tas_priv->cal_binaryname[i]);
+ }
+
+- tasdevice_prmg_calibdata_load(tas_priv, 0);
++ tasdevice_prmg_load(tas_priv, 0);
+ tas_priv->cur_prog = 0;
+ out:
+- if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) {
+- /*If DSP FW fail, kcontrol won't be created */
+- tasdevice_config_info_remove(tas_priv);
++ if (tas_priv->fw_state == TASDEVICE_RCA_FW_OK) {
++ /* If DSP FW fail, DSP kcontrol won't be created. */
+ tasdevice_dsp_remove(tas_priv);
+ }
+ mutex_unlock(&tas_priv->codec_lock);
+@@ -464,14 +477,14 @@ static int tasdevice_startup(struct snd_pcm_substream *substream,
+ {
+ struct snd_soc_component *codec = dai->component;
+ struct tasdevice_priv *tas_priv = snd_soc_component_get_drvdata(codec);
+- int ret = 0;
+
+- if (tas_priv->fw_state != TASDEVICE_DSP_FW_ALL_OK) {
+- dev_err(tas_priv->dev, "DSP bin file not loaded\n");
+- ret = -EINVAL;
++ switch (tas_priv->fw_state) {
++ case TASDEVICE_RCA_FW_OK:
++ case TASDEVICE_DSP_FW_ALL_OK:
++ return 0;
++ default:
++ return -EINVAL;
+ }
+-
+- return ret;
+ }
+
+ static int tasdevice_hw_params(struct snd_pcm_substream *substream,
+@@ -564,7 +577,7 @@ static int tasdevice_codec_probe(struct snd_soc_component *codec)
+ {
+ struct tasdevice_priv *tas_priv = snd_soc_component_get_drvdata(codec);
+
+- return tascodec_init(tas_priv, codec, tasdevice_fw_ready);
++ return tascodec_init(tas_priv, codec, THIS_MODULE, tasdevice_fw_ready);
+ }
+
+ static void tasdevice_deinit(void *context)
+@@ -603,7 +616,7 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv)
+ {
+ struct i2c_client *client = (struct i2c_client *)tas_priv->client;
+ unsigned int dev_addrs[TASDEVICE_MAX_CHANNELS];
+- int rc, i, ndev = 0;
++ int i, ndev = 0;
+
+ if (tas_priv->isacpi) {
+ ndev = device_property_read_u32_array(&client->dev,
+@@ -618,64 +631,34 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv)
+ "ti,audio-slots", dev_addrs, ndev);
+ }
+
+- tas_priv->irq_info.irq_gpio =
++ tas_priv->irq =
+ acpi_dev_gpio_irq_get(ACPI_COMPANION(&client->dev), 0);
+- } else {
++ } else if (IS_ENABLED(CONFIG_OF)) {
+ struct device_node *np = tas_priv->dev->of_node;
+-#ifdef CONFIG_OF
+- const __be32 *reg, *reg_end;
+- int len, sw, aw;
+-
+- aw = of_n_addr_cells(np);
+- sw = of_n_size_cells(np);
+- if (sw == 0) {
+- reg = (const __be32 *)of_get_property(np,
+- "reg", &len);
+- reg_end = reg + len/sizeof(*reg);
+- ndev = 0;
+- do {
+- dev_addrs[ndev] = of_read_number(reg, aw);
+- reg += aw;
+- ndev++;
+- } while (reg < reg_end);
+- } else {
+- ndev = 1;
+- dev_addrs[0] = client->addr;
++ u64 addr;
++
++ for (i = 0; i < TASDEVICE_MAX_CHANNELS; i++) {
++ if (of_property_read_reg(np, i, &addr, NULL))
++ break;
++ dev_addrs[ndev++] = addr;
+ }
+-#else
++
++ tas_priv->irq = of_irq_get(np, 0);
++ } else {
+ ndev = 1;
+ dev_addrs[0] = client->addr;
+-#endif
+- tas_priv->irq_info.irq_gpio = of_irq_get(np, 0);
+ }
+ tas_priv->ndev = ndev;
+ for (i = 0; i < ndev; i++)
+ tas_priv->tasdevice[i].dev_addr = dev_addrs[i];
+
+ tas_priv->reset = devm_gpiod_get_optional(&client->dev,
+- "reset-gpios", GPIOD_OUT_HIGH);
++ "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tas_priv->reset))
+ dev_err(tas_priv->dev, "%s Can't get reset GPIO\n",
+ __func__);
+
+ strcpy(tas_priv->dev_name, tasdevice_id[tas_priv->chip_id].name);
+-
+- if (gpio_is_valid(tas_priv->irq_info.irq_gpio)) {
+- rc = gpio_request(tas_priv->irq_info.irq_gpio,
+- "AUDEV-IRQ");
+- if (!rc) {
+- gpio_direction_input(
+- tas_priv->irq_info.irq_gpio);
+-
+- tas_priv->irq_info.irq =
+- gpio_to_irq(tas_priv->irq_info.irq_gpio);
+- } else
+- dev_err(tas_priv->dev, "%s: GPIO %d request error\n",
+- __func__, tas_priv->irq_info.irq_gpio);
+- } else
+- dev_err(tas_priv->dev,
+- "Looking up irq-gpio property failed %d\n",
+- tas_priv->irq_info.irq_gpio);
+ }
+
+ static int tasdevice_i2c_probe(struct i2c_client *i2c)
+@@ -689,6 +672,8 @@ static int tasdevice_i2c_probe(struct i2c_client *i2c)
+ if (!tas_priv)
+ return -ENOMEM;
+
++ dev_set_drvdata(&i2c->dev, tas_priv);
++
+ if (ACPI_HANDLE(&i2c->dev)) {
+ acpi_id = acpi_match_device(i2c->dev.driver->acpi_match_table,
+ &i2c->dev);
+diff --git a/sound/soc/codecs/tda7419.c b/sound/soc/codecs/tda7419.c
+index e187d74a17376f..3914deb060cacf 100644
+--- a/sound/soc/codecs/tda7419.c
++++ b/sound/soc/codecs/tda7419.c
+@@ -623,6 +623,7 @@ static const struct of_device_id tda7419_of_match[] = {
+ { .compatible = "st,tda7419" },
+ { },
+ };
++MODULE_DEVICE_TABLE(of, tda7419_of_match);
+
+ static struct i2c_driver tda7419_driver = {
+ .driver = {
+diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
+index 420bbf588efeaf..e100cc9f5c1929 100644
+--- a/sound/soc/codecs/tlv320adc3xxx.c
++++ b/sound/soc/codecs/tlv320adc3xxx.c
+@@ -1429,7 +1429,7 @@ static int adc3xxx_i2c_probe(struct i2c_client *i2c)
+ return ret;
+ }
+
+-static void __exit adc3xxx_i2c_remove(struct i2c_client *client)
++static void adc3xxx_i2c_remove(struct i2c_client *client)
+ {
+ struct adc3xxx *adc3xxx = i2c_get_clientdata(client);
+
+@@ -1452,7 +1452,7 @@ static struct i2c_driver adc3xxx_i2c_driver = {
+ .of_match_table = tlv320adc3xxx_of_match,
+ },
+ .probe = adc3xxx_i2c_probe,
+- .remove = __exit_p(adc3xxx_i2c_remove),
++ .remove = adc3xxx_i2c_remove,
+ .id_table = adc3xxx_i2c_id,
+ };
+
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index a1f04010da95f6..132c1d24f8f6e7 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -1252,12 +1252,12 @@ static int wcd9380_probe(struct sdw_slave *pdev,
+ pdev->prop.lane_control_support = true;
+ pdev->prop.simple_clk_stop_capable = true;
+ if (wcd->is_tx) {
+- pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0);
++ pdev->prop.source_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0);
+ pdev->prop.src_dpn_prop = wcd938x_dpn_prop;
+ wcd->ch_info = &wcd938x_sdw_tx_ch_info[0];
+ pdev->prop.wake_capable = true;
+ } else {
+- pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS, 0);
++ pdev->prop.sink_ports = GENMASK(WCD938X_MAX_SWR_PORTS - 1, 0);
+ pdev->prop.sink_dpn_prop = wcd938x_dpn_prop;
+ wcd->ch_info = &wcd938x_sdw_rx_ch_info[0];
+ }
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index d27b919c63b419..7df1719e07239d 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -210,7 +210,7 @@ struct wcd938x_priv {
+ };
+
+ static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(ear_pa_gain, 600, -1800);
+-static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, -3000);
++static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, 0);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(analog_gain, 0, 3000);
+
+ struct wcd938x_mbhc_zdet_param {
+@@ -3589,7 +3589,7 @@ static int wcd938x_probe(struct platform_device *pdev)
+ ret = wcd938x_populate_dt_data(wcd938x, dev);
+ if (ret) {
+ dev_err(dev, "%s: Fail to obtain platform data\n", __func__);
+- return -EINVAL;
++ return ret;
+ }
+
+ ret = wcd938x_add_slave_components(wcd938x, dev, &match);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 83ce5dbecc45d4..4f50b07848fd8f 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -2229,6 +2229,9 @@ SND_SOC_DAPM_PGA_E("HPOUT", SND_SOC_NOPM, 0, 0, NULL, 0, hp_event,
+
+ SND_SOC_DAPM_OUTPUT("HPOUTL"),
+ SND_SOC_DAPM_OUTPUT("HPOUTR"),
++
++SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
++SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
+ };
+
+ static const struct snd_soc_dapm_widget wm8962_dapm_spk_mono_widgets[] = {
+@@ -2236,7 +2239,6 @@ SND_SOC_DAPM_MIXER("Speaker Mixer", WM8962_MIXER_ENABLES, 1, 0,
+ spkmixl, ARRAY_SIZE(spkmixl)),
+ SND_SOC_DAPM_MUX_E("Speaker PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
+ out_pga_event, SND_SOC_DAPM_POST_PMU),
+-SND_SOC_DAPM_PGA("Speaker Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("SPKOUT"),
+ };
+
+@@ -2251,9 +2253,6 @@ SND_SOC_DAPM_MUX_E("SPKOUTL PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
+ SND_SOC_DAPM_MUX_E("SPKOUTR PGA", WM8962_PWR_MGMT_2, 3, 0, &spkoutr_mux,
+ out_pga_event, SND_SOC_DAPM_POST_PMU),
+
+-SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
+-SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
+-
+ SND_SOC_DAPM_OUTPUT("SPKOUTL"),
+ SND_SOC_DAPM_OUTPUT("SPKOUTR"),
+ };
+@@ -2366,12 +2365,18 @@ static const struct snd_soc_dapm_route wm8962_spk_mono_intercon[] = {
+ { "Speaker PGA", "Mixer", "Speaker Mixer" },
+ { "Speaker PGA", "DAC", "DACL" },
+
+- { "Speaker Output", NULL, "Speaker PGA" },
+- { "Speaker Output", NULL, "SYSCLK" },
+- { "Speaker Output", NULL, "TOCLK" },
+- { "Speaker Output", NULL, "TEMP_SPK" },
++ { "SPKOUTL Output", NULL, "Speaker PGA" },
++ { "SPKOUTL Output", NULL, "SYSCLK" },
++ { "SPKOUTL Output", NULL, "TOCLK" },
++ { "SPKOUTL Output", NULL, "TEMP_SPK" },
+
+- { "SPKOUT", NULL, "Speaker Output" },
++ { "SPKOUTR Output", NULL, "Speaker PGA" },
++ { "SPKOUTR Output", NULL, "SYSCLK" },
++ { "SPKOUTR Output", NULL, "TOCLK" },
++ { "SPKOUTR Output", NULL, "TEMP_SPK" },
++
++ { "SPKOUT", NULL, "SPKOUTL Output" },
++ { "SPKOUT", NULL, "SPKOUTR Output" },
+ };
+
+ static const struct snd_soc_dapm_route wm8962_spk_stereo_intercon[] = {
+@@ -2914,8 +2919,12 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
+ switch (fll_id) {
+ case WM8962_FLL_MCLK:
+ case WM8962_FLL_BCLK:
++ fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
++ break;
+ case WM8962_FLL_OSC:
+ fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
++ snd_soc_component_update_bits(component, WM8962_PLL2,
++ WM8962_OSC_ENA, WM8962_OSC_ENA);
+ break;
+ case WM8962_FLL_INT:
+ snd_soc_component_update_bits(component, WM8962_FLL_CONTROL_1,
+@@ -2924,7 +2933,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
+ WM8962_FLL_FRC_NCO, WM8962_FLL_FRC_NCO);
+ break;
+ default:
+- dev_err(component->dev, "Unknown FLL source %d\n", ret);
++ dev_err(component->dev, "Unknown FLL source %d\n", source);
+ return -EINVAL;
+ }
+
+diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
+index 044b6f604c090a..260bac695b20ab 100644
+--- a/sound/soc/codecs/wm8974.c
++++ b/sound/soc/codecs/wm8974.c
+@@ -186,7 +186,7 @@ SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
+
+ /* Boost mixer */
+ static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
+-SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 1),
++SOC_DAPM_SINGLE("PGA Switch", WM8974_INPPGA, 6, 1, 1),
+ };
+
+ /* Input PGA */
+@@ -246,8 +246,8 @@ static const struct snd_soc_dapm_route wm8974_dapm_routes[] = {
+
+ /* Boost Mixer */
+ {"ADC", NULL, "Boost Mixer"},
+- {"Boost Mixer", "Aux Switch", "Aux Input"},
+- {"Boost Mixer", NULL, "Input PGA"},
++ {"Boost Mixer", NULL, "Aux Input"},
++ {"Boost Mixer", "PGA Switch", "Input PGA"},
+ {"Boost Mixer", NULL, "MICP"},
+
+ /* Input PGA */
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index d1b9238d391e81..b9c20e29fe63ef 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -683,11 +683,12 @@ static void wm_adsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
+ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len)
+ {
+- struct cs_dsp_coeff_ctl *cs_ctl = cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg);
++ struct cs_dsp_coeff_ctl *cs_ctl;
+ struct wm_coeff_ctl *ctl;
+ int ret;
+
+ mutex_lock(&dsp->cs_dsp.pwr_lock);
++ cs_ctl = cs_dsp_get_ctl(&dsp->cs_dsp, name, type, alg);
+ ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, buf, len);
+ mutex_unlock(&dsp->cs_dsp.pwr_lock);
+
+@@ -739,19 +740,25 @@ static int wm_adsp_request_firmware_file(struct wm_adsp *dsp,
+ const char *filetype)
+ {
+ struct cs_dsp *cs_dsp = &dsp->cs_dsp;
++ const char *fwf;
+ char *s, c;
+ int ret = 0;
+
++ if (dsp->fwf_name)
++ fwf = dsp->fwf_name;
++ else
++ fwf = dsp->cs_dsp.name;
++
+ if (system_name && asoc_component_prefix)
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-%s.%s", dir, dsp->part,
+- dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
++ fwf, wm_adsp_fw[dsp->fw].file, system_name,
+ asoc_component_prefix, filetype);
+ else if (system_name)
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s.%s", dir, dsp->part,
+- dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
++ fwf, wm_adsp_fw[dsp->fw].file, system_name,
+ filetype);
+ else
+- *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, dsp->fwf_name,
++ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, fwf,
+ wm_adsp_fw[dsp->fw].file, filetype);
+
+ if (*filename == NULL)
+@@ -863,29 +870,18 @@ static int wm_adsp_request_firmware_files(struct wm_adsp *dsp,
+ }
+
+ adsp_err(dsp, "Failed to request firmware <%s>%s-%s-%s<-%s<%s>>.wmfw\n",
+- cirrus_dir, dsp->part, dsp->fwf_name, wm_adsp_fw[dsp->fw].file,
+- system_name, asoc_component_prefix);
++ cirrus_dir, dsp->part,
++ dsp->fwf_name ? dsp->fwf_name : dsp->cs_dsp.name,
++ wm_adsp_fw[dsp->fw].file, system_name, asoc_component_prefix);
+
+ return -ENOENT;
+ }
+
+ static int wm_adsp_common_init(struct wm_adsp *dsp)
+ {
+- char *p;
+-
+ INIT_LIST_HEAD(&dsp->compr_list);
+ INIT_LIST_HEAD(&dsp->buffer_list);
+
+- if (!dsp->fwf_name) {
+- p = devm_kstrdup(dsp->cs_dsp.dev, dsp->cs_dsp.name, GFP_KERNEL);
+- if (!p)
+- return -ENOMEM;
+-
+- dsp->fwf_name = p;
+- for (; *p != 0; ++p)
+- *p = tolower(*p);
+- }
+-
+ return 0;
+ }
+
+@@ -1451,12 +1447,12 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+ ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
+ &region->base_addr);
+ if (ret < 0)
+- return ret;
++ goto err;
+
+ ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
+ &offset);
+ if (ret < 0)
+- return ret;
++ goto err;
+
+ region->cumulative_size = offset;
+
+@@ -1467,6 +1463,10 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+ }
+
+ return 0;
++
++err:
++ kfree(buf->regions);
++ return ret;
+ }
+
+ static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index 3c025dabaf7a47..53b828f6810209 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -1152,9 +1152,10 @@ static int wsa881x_probe(struct sdw_slave *pdev,
+ wsa881x->sconfig.frame_rate = 48000;
+ wsa881x->sconfig.direction = SDW_DATA_DIR_RX;
+ wsa881x->sconfig.type = SDW_STREAM_PDM;
+- pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0);
++ pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS - 1, 0);
+ pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
+ pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
++ pdev->prop.clk_stop_mode1 = true;
+ gpiod_direction_output(wsa881x->sd_n, !wsa881x->sd_n_val);
+
+ wsa881x->regmap = devm_regmap_init_sdw(pdev, &wsa881x_regmap_config);
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index 197fae23762f53..1831d4487ba9d1 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -998,15 +998,19 @@ static const struct reg_sequence reg_init[] = {
+ {WSA883X_GMAMP_SUP1, 0xE2},
+ };
+
+-static void wsa883x_init(struct wsa883x_priv *wsa883x)
++static int wsa883x_init(struct wsa883x_priv *wsa883x)
+ {
+ struct regmap *regmap = wsa883x->regmap;
+- int variant, version;
++ int variant, version, ret;
+
+- regmap_read(regmap, WSA883X_OTP_REG_0, &variant);
++ ret = regmap_read(regmap, WSA883X_OTP_REG_0, &variant);
++ if (ret)
++ return ret;
+ wsa883x->variant = variant & WSA883X_ID_MASK;
+
+- regmap_read(regmap, WSA883X_CHIP_ID0, &version);
++ ret = regmap_read(regmap, WSA883X_CHIP_ID0, &version);
++ if (ret)
++ return ret;
+ wsa883x->version = version;
+
+ switch (wsa883x->variant) {
+@@ -1041,6 +1045,8 @@ static void wsa883x_init(struct wsa883x_priv *wsa883x)
+ WSA883X_DRE_OFFSET_MASK,
+ wsa883x->comp_offset);
+ }
++
++ return 0;
+ }
+
+ static int wsa883x_update_status(struct sdw_slave *slave,
+@@ -1049,7 +1055,7 @@ static int wsa883x_update_status(struct sdw_slave *slave,
+ struct wsa883x_priv *wsa883x = dev_get_drvdata(&slave->dev);
+
+ if (status == SDW_SLAVE_ATTACHED && slave->dev_num > 0)
+- wsa883x_init(wsa883x);
++ return wsa883x_init(wsa883x);
+
+ return 0;
+ }
+@@ -1098,7 +1104,11 @@ static int wsa_dev_mode_put(struct snd_kcontrol *kcontrol,
+ return 1;
+ }
+
+-static const DECLARE_TLV_DB_SCALE(pa_gain, -300, 150, -300);
++static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(pa_gain,
++ 0, 14, TLV_DB_SCALE_ITEM(-300, 0, 0),
++ 15, 29, TLV_DB_SCALE_ITEM(-300, 150, 0),
++ 30, 31, TLV_DB_SCALE_ITEM(1800, 0, 0),
++);
+
+ static int wsa883x_get_swr_port(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+@@ -1203,9 +1213,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ break;
+ }
+
+- snd_soc_component_write_field(component, WSA883X_DRE_CTL_1,
+- WSA883X_DRE_GAIN_EN_MASK,
+- WSA883X_DRE_GAIN_FROM_CSR);
+ if (wsa883x->port_enable[WSA883X_PORT_COMP])
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_0,
+ WSA883X_DRE_OFFSET_MASK,
+@@ -1218,9 +1225,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ snd_soc_component_write_field(component, WSA883X_PDM_WD_CTL,
+ WSA883X_PDM_EN_MASK,
+ WSA883X_PDM_ENABLE);
+- snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
+- WSA883X_GLOBAL_PA_EN_MASK,
+- WSA883X_GLOBAL_PA_ENABLE);
+
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+@@ -1346,6 +1350,7 @@ static const struct snd_soc_dai_ops wsa883x_dai_ops = {
+ .hw_free = wsa883x_hw_free,
+ .mute_stream = wsa883x_digital_mute,
+ .set_stream = wsa883x_set_sdw_stream,
++ .mute_unmute_on_trigger = true,
+ };
+
+ static struct snd_soc_dai_driver wsa883x_dais[] = {
+@@ -1400,7 +1405,15 @@ static int wsa883x_probe(struct sdw_slave *pdev,
+ wsa883x->sconfig.direction = SDW_DATA_DIR_RX;
+ wsa883x->sconfig.type = SDW_STREAM_PDM;
+
+- pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS, 0);
++ /**
++ * Port map index starts with 0, however the data port for this codec
++ * are from index 1
++ */
++ if (of_property_read_u32_array(dev->of_node, "qcom,port-mapping", &pdev->m_port_map[1],
++ WSA883X_MAX_SWR_PORTS))
++ dev_dbg(dev, "Static Port mapping not specified\n");
++
++ pdev->prop.sink_ports = GENMASK(WSA883X_MAX_SWR_PORTS - 1, 0);
+ pdev->prop.simple_clk_stop_capable = true;
+ pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
+ pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c
+index 993d76b18b5367..1cd52fab7b40d0 100644
+--- a/sound/soc/codecs/wsa884x.c
++++ b/sound/soc/codecs/wsa884x.c
+@@ -1858,7 +1858,15 @@ static int wsa884x_probe(struct sdw_slave *pdev,
+ wsa884x->sconfig.direction = SDW_DATA_DIR_RX;
+ wsa884x->sconfig.type = SDW_STREAM_PDM;
+
+- pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS, 0);
++ /**
++ * Port map index starts with 0, however the data port for this codec
++ * are from index 1
++ */
++ if (of_property_read_u32_array(dev->of_node, "qcom,port-mapping", &pdev->m_port_map[1],
++ WSA884X_MAX_SWR_PORTS))
++ dev_dbg(dev, "Static Port mapping not specified\n");
++
++ pdev->prop.sink_ports = GENMASK(WSA884X_MAX_SWR_PORTS - 1, 0);
+ pdev->prop.simple_clk_stop_capable = true;
+ pdev->prop.sink_dpn_prop = wsa884x_sink_dpn_prop;
+ pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index bab7d34cf585bf..f76252b3f59133 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -41,6 +41,7 @@
+
+ /**
+ * struct codec_priv - CODEC private data
++ * @mclk: Main clock of the CODEC
+ * @mclk_freq: Clock rate of MCLK
+ * @free_freq: Clock rate of MCLK for hw_free()
+ * @mclk_id: MCLK (or main clock) id for set_sysclk()
+@@ -558,6 +559,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ if (!priv)
+ return -ENOMEM;
+
++ priv->pdev = pdev;
++
+ cpu_np = of_parse_phandle(np, "audio-cpu", 0);
+ /* Give a chance to old DT binding */
+ if (!cpu_np)
+@@ -780,7 +783,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ }
+
+ /* Initialize sound card */
+- priv->pdev = pdev;
+ priv->card.dev = &pdev->dev;
+ priv->card.owner = THIS_MODULE;
+ ret = snd_soc_of_parse_card_name(&priv->card, "model");
+diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
+index ba62995c909ac3..ec53bda46a467d 100644
+--- a/sound/soc/fsl/fsl_easrc.c
++++ b/sound/soc/fsl/fsl_easrc.c
+@@ -1966,17 +1966,21 @@ static int fsl_easrc_probe(struct platform_device *pdev)
+ &fsl_easrc_dai, 1);
+ if (ret) {
+ dev_err(dev, "failed to register ASoC DAI\n");
+- return ret;
++ goto err_pm_disable;
+ }
+
+ ret = devm_snd_soc_register_component(dev, &fsl_asrc_component,
+ NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ASoC platform\n");
+- return ret;
++ goto err_pm_disable;
+ }
+
+ return 0;
++
++err_pm_disable:
++ pm_runtime_disable(&pdev->dev);
++ return ret;
+ }
+
+ static void fsl_easrc_remove(struct platform_device *pdev)
+diff --git a/sound/soc/fsl/fsl_qmc_audio.c b/sound/soc/fsl/fsl_qmc_audio.c
+index 56d6b0b039a2e9..df8188159a5823 100644
+--- a/sound/soc/fsl/fsl_qmc_audio.c
++++ b/sound/soc/fsl/fsl_qmc_audio.c
+@@ -604,6 +604,8 @@ static int qmc_audio_dai_parse(struct qmc_audio *qmc_audio, struct device_node *
+
+ qmc_dai->name = devm_kasprintf(qmc_audio->dev, GFP_KERNEL, "%s.%d",
+ np->parent->name, qmc_dai->id);
++ if (!qmc_dai->name)
++ return -ENOMEM;
+
+ qmc_dai->qmc_chan = devm_qmc_chan_get_byphandle(qmc_audio->dev, np,
+ "fsl,qmc-chan");
+diff --git a/sound/soc/fsl/fsl_rpmsg.c b/sound/soc/fsl/fsl_rpmsg.c
+index abe19a8a7aa72d..f7180f1959dd08 100644
+--- a/sound/soc/fsl/fsl_rpmsg.c
++++ b/sound/soc/fsl/fsl_rpmsg.c
+@@ -239,7 +239,7 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+ &fsl_rpmsg_dai, 1);
+ if (ret)
+- return ret;
++ goto err_pm_disable;
+
+ rpmsg->card_pdev = platform_device_register_data(&pdev->dev,
+ "imx-audio-rpmsg",
+@@ -249,16 +249,22 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
+ if (IS_ERR(rpmsg->card_pdev)) {
+ dev_err(&pdev->dev, "failed to register rpmsg card\n");
+ ret = PTR_ERR(rpmsg->card_pdev);
+- return ret;
++ goto err_pm_disable;
+ }
+
+ return 0;
++
++err_pm_disable:
++ pm_runtime_disable(&pdev->dev);
++ return ret;
+ }
+
+ static void fsl_rpmsg_remove(struct platform_device *pdev)
+ {
+ struct fsl_rpmsg *rpmsg = platform_get_drvdata(pdev);
+
++ pm_runtime_disable(&pdev->dev);
++
+ if (rpmsg->card_pdev)
+ platform_device_unregister(rpmsg->card_pdev);
+ }
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 8a9a30dd31e208..3d202398c5411b 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -674,6 +674,20 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+ FSL_SAI_CR3_TRCE_MASK,
+ FSL_SAI_CR3_TRCE((dl_cfg[dl_cfg_idx].mask[tx] & trce_mask)));
+
++ /*
++ * When the TERE and FSD_MSTR enabled before configuring the word width
++ * There will be no frame sync clock issue, because word width impact
++ * the generation of frame sync clock.
++ *
++ * TERE enabled earlier only for i.MX8MP case for the hardware limitation,
++ * We need to disable FSD_MSTR before configuring word width, then enable
++ * FSD_MSTR bit for this specific case.
++ */
++ if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
++ !sai->is_consumer_mode)
++ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
++ FSL_SAI_CR4_FSD_MSTR, 0);
++
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+ FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
+ FSL_SAI_CR4_CHMOD_MASK,
+@@ -681,6 +695,13 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
+ FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
+ FSL_SAI_CR5_FBT_MASK, val_cr5);
++
++ /* Enable FSD_MSTR after configuring word width */
++ if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
++ !sai->is_consumer_mode)
++ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
++ FSL_SAI_CR4_FSD_MSTR, FSL_SAI_CR4_FSD_MSTR);
++
+ regmap_write(sai->regmap, FSL_SAI_xMR(tx),
+ ~0UL - ((1 << min(channels, slots)) - 1));
+
+@@ -694,6 +715,9 @@ static int fsl_sai_hw_free(struct snd_pcm_substream *substream,
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned int ofs = sai->soc_data->reg_offset;
+
++ /* Clear xMR to avoid channel swap with mclk_with_tere enabled case */
++ regmap_write(sai->regmap, FSL_SAI_xMR(tx), 0);
++
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR3(tx, ofs),
+ FSL_SAI_CR3_TRCE_MASK, 0);
+
+diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
+index fa0a15263c66dc..c46f64557a7ffd 100644
+--- a/sound/soc/fsl/fsl_xcvr.c
++++ b/sound/soc/fsl/fsl_xcvr.c
+@@ -174,7 +174,9 @@ static int fsl_xcvr_activate_ctl(struct snd_soc_dai *dai, const char *name,
+ struct snd_kcontrol *kctl;
+ bool enabled;
+
+- kctl = snd_soc_card_get_kcontrol(card, name);
++ lockdep_assert_held(&card->snd_card->controls_rwsem);
++
++ kctl = snd_soc_card_get_kcontrol_locked(card, name);
+ if (kctl == NULL)
+ return -ENOENT;
+
+@@ -358,7 +360,7 @@ static int fsl_xcvr_en_aud_pll(struct fsl_xcvr *xcvr, u32 freq)
+ struct device *dev = &xcvr->pdev->dev;
+ int ret;
+
+- freq = xcvr->soc_data->spdif_only ? freq / 10 : freq;
++ freq = xcvr->soc_data->spdif_only ? freq / 5 : freq;
+ clk_disable_unprepare(xcvr->phy_clk);
+ ret = clk_set_rate(xcvr->phy_clk, freq);
+ if (ret < 0) {
+@@ -409,11 +411,21 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ u32 m_ctl = 0, v_ctl = 0;
+ u32 r = substream->runtime->rate, ch = substream->runtime->channels;
+- u32 fout = 32 * r * ch * 10 * 2;
++ u32 fout = 32 * r * ch * 10;
+ int ret = 0;
+
+ switch (xcvr->mode) {
+ case FSL_XCVR_MODE_SPDIF:
++ if (xcvr->soc_data->spdif_only && tx) {
++ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL_SET,
++ FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM,
++ FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM);
++ if (ret < 0) {
++ dev_err(dai->dev, "Failed to set bypass fem: %d\n", ret);
++ return ret;
++ }
++ }
++ fallthrough;
+ case FSL_XCVR_MODE_ARC:
+ if (tx) {
+ ret = fsl_xcvr_en_aud_pll(xcvr, fout);
+@@ -566,10 +578,14 @@ static int fsl_xcvr_startup(struct snd_pcm_substream *substream,
+ xcvr->streams |= BIT(substream->stream);
+
+ if (!xcvr->soc_data->spdif_only) {
++ struct snd_soc_card *card = dai->component->card;
++
+ /* Disable XCVR controls if there is stream started */
++ down_read(&card->snd_card->controls_rwsem);
+ fsl_xcvr_activate_ctl(dai, fsl_xcvr_mode_kctl.name, false);
+ fsl_xcvr_activate_ctl(dai, fsl_xcvr_arc_mode_kctl.name, false);
+ fsl_xcvr_activate_ctl(dai, fsl_xcvr_earc_capds_kctl.name, false);
++ up_read(&card->snd_card->controls_rwsem);
+ }
+
+ return 0;
+@@ -588,11 +604,15 @@ static void fsl_xcvr_shutdown(struct snd_pcm_substream *substream,
+ /* Enable XCVR controls if there is no stream started */
+ if (!xcvr->streams) {
+ if (!xcvr->soc_data->spdif_only) {
++ struct snd_soc_card *card = dai->component->card;
++
++ down_read(&card->snd_card->controls_rwsem);
+ fsl_xcvr_activate_ctl(dai, fsl_xcvr_mode_kctl.name, true);
+ fsl_xcvr_activate_ctl(dai, fsl_xcvr_arc_mode_kctl.name,
+ (xcvr->mode == FSL_XCVR_MODE_ARC));
+ fsl_xcvr_activate_ctl(dai, fsl_xcvr_earc_capds_kctl.name,
+ (xcvr->mode == FSL_XCVR_MODE_EARC));
++ up_read(&card->snd_card->controls_rwsem);
+ }
+ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_IER0,
+ FSL_XCVR_IRQ_EARC_ALL, 0);
+diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
+index 356a0bc3b126b3..f8144bf4c90d33 100644
+--- a/sound/soc/fsl/imx-card.c
++++ b/sound/soc/fsl/imx-card.c
+@@ -714,6 +714,7 @@ static int imx_card_probe(struct platform_device *pdev)
+
+ data->plat_data = plat_data;
+ data->card.dev = &pdev->dev;
++ data->card.owner = THIS_MODULE;
+
+ dev_set_drvdata(&pdev->dev, &data->card);
+ snd_soc_card_set_drvdata(&data->card, data);
+diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
+index 9014978100207a..3f7ccae3f6b1ab 100644
+--- a/sound/soc/fsl/mpc5200_dma.c
++++ b/sound/soc/fsl/mpc5200_dma.c
+@@ -100,6 +100,9 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
+
+ /**
+ * psc_dma_trigger: start and stop the DMA transfer.
++ * @component: triggered component
++ * @substream: triggered substream
++ * @cmd: triggered command
+ *
+ * This function is called by ALSA to start, stop, pause, and resume the DMA
+ * transfer of data.
+diff --git a/sound/soc/google/chv3-i2s.c b/sound/soc/google/chv3-i2s.c
+index 0f65134449066b..462e970b954f10 100644
+--- a/sound/soc/google/chv3-i2s.c
++++ b/sound/soc/google/chv3-i2s.c
+@@ -322,6 +322,7 @@ static const struct of_device_id chv3_i2s_of_match[] = {
+ { .compatible = "google,chv3-i2s" },
+ {},
+ };
++MODULE_DEVICE_TABLE(of, chv3_i2s_of_match);
+
+ static struct platform_driver chv3_i2s_driver = {
+ .probe = chv3_i2s_probe,
+diff --git a/sound/soc/intel/avs/board_selection.c b/sound/soc/intel/avs/board_selection.c
+index 59a13feec57b25..0db5e530a8de2b 100644
+--- a/sound/soc/intel/avs/board_selection.c
++++ b/sound/soc/intel/avs/board_selection.c
+@@ -227,6 +227,82 @@ static struct snd_soc_acpi_mach avs_gml_i2s_machines[] = {
+ {},
+ };
+
++static struct snd_soc_acpi_mach avs_cnl_i2s_machines[] = {
++ {
++ .id = "INT34C2",
++ .drv_name = "avs_rt274",
++ .mach_params = {
++ .i2s_link_mask = AVS_SSP(0),
++ },
++ .tplg_filename = "rt274-tplg.bin",
++ },
++ {
++ .id = "10EC5682",
++ .drv_name = "avs_rt5682",
++ .mach_params = {
++ .i2s_link_mask = AVS_SSP(1),
++ },
++ .tplg_filename = "rt5682-tplg.bin",
++ },
++ {},
++};
++
++static struct snd_soc_acpi_mach avs_icl_i2s_machines[] = {
++ {
++ .id = "INT343A",
++ .drv_name = "avs_rt298",
++ .mach_params = {
++ .i2s_link_mask = AVS_SSP(0),
++ },
++ .tplg_filename = "rt298-tplg.bin",
++ },
++ {
++ .id = "INT34C2",
++ .drv_name = "avs_rt274",
++ .mach_params = {
++ .i2s_link_mask = AVS_SSP(0),
++ },
++ .tplg_filename = "rt274-tplg.bin",
++ },
++ {},
++};
++
++static struct snd_soc_acpi_mach avs_tgl_i2s_machines[] = {
++ {
++ .id = "INT34C2",
++ .drv_name = "avs_rt274",
++ .mach_params = {
++ .i2s_link_mask = AVS_SSP(0),
++ },
++ .tplg_filename = "rt274-tplg.bin",
++ },
++ {
++ .id = "10EC0298",
++ .drv_name = "avs_rt298",
++ .mach_params = {
++ .i2s_link_mask = AVS_SSP(0),
++ },
++ .tplg_filename = "rt298-tplg.bin",
++ },
++ {
++ .id = "10EC1308",
++ .drv_name = "avs_rt1308",
++ .mach_params = {
++ .i2s_link_mask = AVS_SSP(1),
++ },
++ .tplg_filename = "rt1308-tplg.bin",
++ },
++ {
++ .id = "ESSX8336",
++ .drv_name = "avs_es8336",
++ .mach_params = {
++ .i2s_link_mask = AVS_SSP(0),
++ },
++ .tplg_filename = "es8336-tplg.bin",
++ },
++ {},
++};
++
+ static struct snd_soc_acpi_mach avs_test_i2s_machines[] = {
+ {
+ .drv_name = "avs_i2s_test",
+@@ -287,6 +363,15 @@ static const struct avs_acpi_boards i2s_boards[] = {
+ AVS_MACH_ENTRY(HDA_KBL_LP, avs_kbl_i2s_machines),
+ AVS_MACH_ENTRY(HDA_APL, avs_apl_i2s_machines),
+ AVS_MACH_ENTRY(HDA_GML, avs_gml_i2s_machines),
++ AVS_MACH_ENTRY(HDA_CNL_LP, avs_cnl_i2s_machines),
++ AVS_MACH_ENTRY(HDA_CNL_H, avs_cnl_i2s_machines),
++ AVS_MACH_ENTRY(HDA_CML_LP, avs_cnl_i2s_machines),
++ AVS_MACH_ENTRY(HDA_ICL_LP, avs_icl_i2s_machines),
++ AVS_MACH_ENTRY(HDA_TGL_LP, avs_tgl_i2s_machines),
++ AVS_MACH_ENTRY(HDA_EHL_0, avs_tgl_i2s_machines),
++ AVS_MACH_ENTRY(HDA_ADL_P, avs_tgl_i2s_machines),
++ AVS_MACH_ENTRY(HDA_RPL_P_0, avs_tgl_i2s_machines),
++ AVS_MACH_ENTRY(HDA_RPL_M, avs_tgl_i2s_machines),
+ {},
+ };
+
+diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c
+index 7324869d613274..7db1b89b0d9e99 100644
+--- a/sound/soc/intel/avs/boards/ssm4567.c
++++ b/sound/soc/intel/avs/boards/ssm4567.c
+@@ -166,7 +166,6 @@ static int avs_ssm4567_probe(struct platform_device *pdev)
+ card->dapm_routes = card_base_routes;
+ card->num_dapm_routes = ARRAY_SIZE(card_base_routes);
+ card->fully_routed = true;
+- card->disable_route_checks = true;
+
+ ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ if (ret)
+diff --git a/sound/soc/intel/avs/cldma.c b/sound/soc/intel/avs/cldma.c
+index d7a9390b5e483c..585579840b646e 100644
+--- a/sound/soc/intel/avs/cldma.c
++++ b/sound/soc/intel/avs/cldma.c
+@@ -35,7 +35,7 @@ struct hda_cldma {
+
+ unsigned int buffer_size;
+ unsigned int num_periods;
+- unsigned int stream_tag;
++ unsigned char stream_tag;
+ void __iomem *sd_addr;
+
+ struct snd_dma_buffer dmab_data;
+diff --git a/sound/soc/intel/avs/path.c b/sound/soc/intel/avs/path.c
+index adbe23a47847b7..a4b9e209f22300 100644
+--- a/sound/soc/intel/avs/path.c
++++ b/sound/soc/intel/avs/path.c
+@@ -368,6 +368,7 @@ static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
+ struct avs_tplg_module *t = mod->template;
+ struct avs_asrc_cfg cfg;
+
++ memset(&cfg, 0, sizeof(cfg));
+ cfg.base.cpc = t->cfg_base->cpc;
+ cfg.base.ibs = t->cfg_base->ibs;
+ cfg.base.obs = t->cfg_base->obs;
+diff --git a/sound/soc/intel/avs/probes.c b/sound/soc/intel/avs/probes.c
+index 4cab8c6c457666..341773ec49072c 100644
+--- a/sound/soc/intel/avs/probes.c
++++ b/sound/soc/intel/avs/probes.c
+@@ -19,8 +19,11 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
+ struct avs_probe_cfg cfg = {{0}};
+ struct avs_module_entry mentry;
+ u8 dummy;
++ int ret;
+
+- avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
++ ret = avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
++ if (ret)
++ return ret;
+
+ /*
+ * Probe module uses no cycles, audio data format and input and output
+@@ -39,11 +42,12 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
+ static void avs_dsp_delete_probe(struct avs_dev *adev)
+ {
+ struct avs_module_entry mentry;
++ int ret;
+
+- avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
+-
+- /* There is only ever one probe module instance. */
+- avs_dsp_delete_module(adev, mentry.module_id, 0, INVALID_PIPELINE_ID, 0);
++ ret = avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
++ if (!ret)
++ /* There is only ever one probe module instance. */
++ avs_dsp_delete_module(adev, mentry.module_id, 0, INVALID_PIPELINE_ID, 0);
+ }
+
+ static inline struct hdac_ext_stream *avs_compr_get_host_stream(struct snd_compr_stream *cstream)
+diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
+index 45d0eb2a8e7105..141255420c12bf 100644
+--- a/sound/soc/intel/avs/topology.c
++++ b/sound/soc/intel/avs/topology.c
+@@ -1412,6 +1412,8 @@ static int avs_widget_load(struct snd_soc_component *comp, int index,
+ if (!le32_to_cpu(dw->priv.size))
+ return 0;
+
++ w->no_wname_in_kcontrol_name = true;
++
+ if (w->ignore_suspend && !AVS_S0IX_SUPPORTED) {
+ dev_info_once(comp->dev, "Device does not support S0IX, check BIOS settings\n");
+ w->ignore_suspend = false;
+diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+index cbfff466c5c863..b6e6601b30c210 100644
+--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
++++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+@@ -768,6 +768,7 @@ static struct snd_soc_card broxton_audio_card = {
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = bxt_card_late_probe,
+ };
+
+diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
+index bf89fe80423d08..4275c40e8114df 100644
+--- a/sound/soc/intel/boards/bxt_rt298.c
++++ b/sound/soc/intel/boards/bxt_rt298.c
+@@ -574,6 +574,7 @@ static struct snd_soc_card broxton_rt298 = {
+ .dapm_routes = broxton_rt298_map,
+ .num_dapm_routes = ARRAY_SIZE(broxton_rt298_map),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = bxt_card_late_probe,
+
+ };
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 630784b6cb6d30..5b8b21ade9cfed 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -83,6 +83,7 @@ enum {
+ #define BYT_RT5640_HSMIC2_ON_IN1 BIT(27)
+ #define BYT_RT5640_JD_HP_ELITEP_1000G2 BIT(28)
+ #define BYT_RT5640_USE_AMCR0F28 BIT(29)
++#define BYT_RT5640_SWAPPED_SPEAKERS BIT(30)
+
+ #define BYTCR_INPUT_DEFAULTS \
+ (BYT_RT5640_IN3_MAP | \
+@@ -157,6 +158,8 @@ static void log_quirks(struct device *dev)
+ dev_info(dev, "quirk MONO_SPEAKER enabled\n");
+ if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS)
+ dev_info(dev, "quirk NO_SPEAKERS enabled\n");
++ if (byt_rt5640_quirk & BYT_RT5640_SWAPPED_SPEAKERS)
++ dev_info(dev, "quirk SWAPPED_SPEAKERS enabled\n");
+ if (byt_rt5640_quirk & BYT_RT5640_LINEOUT)
+ dev_info(dev, "quirk LINEOUT enabled\n");
+ if (byt_rt5640_quirk & BYT_RT5640_LINEOUT_AS_HP2)
+@@ -607,6 +610,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
++ {
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 101 CESIUM"),
++ },
++ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++ BYT_RT5640_JD_NOT_INV |
++ BYT_RT5640_DIFF_MIC |
++ BYT_RT5640_SSP0_AIF1 |
++ BYT_RT5640_MCLK_EN),
++ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
+@@ -633,28 +647,30 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ BYT_RT5640_USE_AMCR0F28),
+ },
+ {
++ /* Asus T100TAF, unlike other T100TA* models this one has a mono speaker */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
++ BYT_RT5640_MONO_SPEAKER |
++ BYT_RT5640_DIFF_MIC |
++ BYT_RT5640_SSP0_AIF2 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
++ /* Asus T100TA and T100TAM, must come after T100TAF (mono spk) match */
+ .matches = {
+- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+- BYT_RT5640_MONO_SPEAKER |
+- BYT_RT5640_DIFF_MIC |
+- BYT_RT5640_SSP0_AIF2 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+@@ -682,6 +698,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
++ { /* Chuwi Vi8 dual-boot (CWI506) */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "i86"),
++ /* The above are too generic, also match BIOS info */
++ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
++ },
++ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++ BYT_RT5640_MONO_SPEAKER |
++ BYT_RT5640_SSP0_AIF1 |
++ BYT_RT5640_MCLK_EN),
++ },
+ {
+ /* Chuwi Vi10 (CWI505) */
+ .matches = {
+@@ -894,6 +922,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
++ {
++ /* Medion Lifetab S10346 */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++ /* Above strings are much too generic, also match on BIOS date */
++ DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"),
++ },
++ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++ BYT_RT5640_SWAPPED_SPEAKERS |
++ BYT_RT5640_SSP0_AIF1 |
++ BYT_RT5640_MCLK_EN),
++ },
+ { /* Mele PCG03 Mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"),
+@@ -1619,11 +1660,11 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ const char *platform_name;
+ struct acpi_device *adev;
+ struct device *codec_dev;
++ const char *cfg_spk;
+ bool sof_parent;
+ int ret_val = 0;
+ int dai_index = 0;
+- int i, cfg_spk;
+- int aif;
++ int i, aif;
+
+ is_bytcr = false;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -1783,13 +1824,16 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ }
+
+ if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS) {
+- cfg_spk = 0;
++ cfg_spk = "0";
+ spk_type = "none";
+ } else if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) {
+- cfg_spk = 1;
++ cfg_spk = "1";
+ spk_type = "mono";
++ } else if (byt_rt5640_quirk & BYT_RT5640_SWAPPED_SPEAKERS) {
++ cfg_spk = "swapped";
++ spk_type = "swapped";
+ } else {
+- cfg_spk = 2;
++ cfg_spk = "2";
+ spk_type = "stereo";
+ }
+
+@@ -1804,7 +1848,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ headset2_string = " cfg-hs2:in1";
+
+ snprintf(byt_rt5640_components, sizeof(byt_rt5640_components),
+- "cfg-spk:%d cfg-mic:%s aif:%d%s%s", cfg_spk,
++ "cfg-spk:%s cfg-mic:%s aif:%d%s%s", cfg_spk,
+ map_name[BYT_RT5640_MAP(byt_rt5640_quirk)], aif,
+ lineout_string, headset2_string);
+ byt_rt5640_card.components = byt_rt5640_components;
+diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+index cf0f89db3e204f..0f9bbb970b230a 100644
+--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
++++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+@@ -649,6 +649,8 @@ static int geminilake_audio_probe(struct platform_device *pdev)
+ card = &glk_audio_card_rt5682_m98357a;
+ card->dev = &pdev->dev;
+ snd_soc_card_set_drvdata(card, ctx);
++ if (!snd_soc_acpi_sof_parent(&pdev->dev))
++ card->disable_route_checks = true;
+
+ /* override platform name, if required */
+ mach = pdev->dev.platform_data;
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+index 97149513076f9f..a7868e5735bcbe 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+@@ -639,6 +639,7 @@ static struct snd_soc_card kabylake_audio_card_da7219_m98357a = {
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
+index a1f8234c77bd24..2e75070eb9216c 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
+@@ -1036,6 +1036,7 @@ static struct snd_soc_card kbl_audio_card_da7219_m98927 = {
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+@@ -1054,6 +1055,7 @@ static struct snd_soc_card kbl_audio_card_max98927 = {
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+@@ -1071,6 +1073,7 @@ static struct snd_soc_card kbl_audio_card_da7219_m98373 = {
+ .codec_conf = max98373_codec_conf,
+ .num_configs = ARRAY_SIZE(max98373_codec_conf),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+@@ -1088,6 +1091,7 @@ static struct snd_soc_card kbl_audio_card_max98373 = {
+ .codec_conf = max98373_codec_conf,
+ .num_configs = ARRAY_SIZE(max98373_codec_conf),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+diff --git a/sound/soc/intel/boards/kbl_rt5660.c b/sound/soc/intel/boards/kbl_rt5660.c
+index 2c7a547f63c901..358d6062281212 100644
+--- a/sound/soc/intel/boards/kbl_rt5660.c
++++ b/sound/soc/intel/boards/kbl_rt5660.c
+@@ -518,6 +518,7 @@ static struct snd_soc_card kabylake_audio_card_rt5660 = {
+ .dapm_routes = kabylake_rt5660_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_rt5660_map),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
+index 2d4224c5b1520b..d110ebd10bca20 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
+@@ -966,6 +966,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663_m98927 = {
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+@@ -982,6 +983,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663 = {
+ .dapm_routes = kabylake_5663_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_5663_map),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+index 2c79fca57b19e7..a15d2c30b6c469 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+@@ -791,6 +791,7 @@ static struct snd_soc_card kabylake_audio_card = {
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = kabylake_card_late_probe,
+ };
+
+diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+index a06e05154ae1f7..da6079c61f88d8 100644
+--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+@@ -154,6 +154,8 @@ static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
+ card->dapm_widgets = skl_hda_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(skl_hda_widgets);
+ if (!ctx->idisp_codec) {
++ card->dapm_routes = &skl_hda_map[IDISP_ROUTE_COUNT];
++ num_route -= IDISP_ROUTE_COUNT;
+ for (i = 0; i < IDISP_DAI_COUNT; i++) {
+ skl_hda_be_dai_links[i].codecs = &asoc_dummy_dlc;
+ skl_hda_be_dai_links[i].num_codecs = 1;
+@@ -225,6 +227,8 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
+ hda_soc_card.dev = &pdev->dev;
++ if (!snd_soc_acpi_sof_parent(&pdev->dev))
++ hda_soc_card.disable_route_checks = true;
+
+ if (mach->mach_params.dmic_num > 0) {
+ snprintf(hda_soc_components, sizeof(hda_soc_components),
+diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+index e13a5a4d8f7e9a..2d424e3e2abd8c 100644
+--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
++++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+@@ -654,6 +654,7 @@ static struct snd_soc_card skylake_audio_card = {
+ .dapm_routes = skylake_map,
+ .num_dapm_routes = ARRAY_SIZE(skylake_map),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = skylake_card_late_probe,
+ };
+
+diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
+index 4f3d655e2bfa8d..0a4795a94a768e 100644
+--- a/sound/soc/intel/boards/skl_rt286.c
++++ b/sound/soc/intel/boards/skl_rt286.c
+@@ -523,6 +523,7 @@ static struct snd_soc_card skylake_rt286 = {
+ .dapm_routes = skylake_rt286_map,
+ .num_dapm_routes = ARRAY_SIZE(skylake_rt286_map),
+ .fully_routed = true,
++ .disable_route_checks = true,
+ .late_probe = skylake_card_late_probe,
+ };
+
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 842649501e303a..5980fce8179760 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -243,6 +243,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ SOF_SDW_PCH_DMIC |
+ RT711_JD2_100K),
+ },
++ {
++ /* NUC15 LAPRC710 skews */
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
++ },
++ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
++ SOF_SDW_PCH_DMIC |
++ RT711_JD2_100K),
++ },
+ /* TigerLake-SDCA devices */
+ {
+ .callback = sof_sdw_quirk_cb,
+@@ -425,6 +436,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0C0F")
++ },
++ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
++ RT711_JD2),
++ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+@@ -491,6 +511,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ SOF_BT_OFFLOAD_SSP(1) |
+ SOF_SSP_BT_OFFLOAD_PRESENT),
+ },
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN Transcend Gaming Laptop"),
++ },
++ .driver_data = (void *)(RT711_JD2),
++ },
++
+ /* LunarLake devices */
+ {
+ .callback = sof_sdw_quirk_cb,
+@@ -1197,11 +1226,11 @@ static int fill_sdw_codec_dlc(struct device *dev,
+ else if (is_unique_device(adr_link, sdw_version, mfg_id, part_id,
+ class_id, adr_index))
+ codec->name = devm_kasprintf(dev, GFP_KERNEL,
+- "sdw:%01x:%04x:%04x:%02x", link_id,
++ "sdw:0:%01x:%04x:%04x:%02x", link_id,
+ mfg_id, part_id, class_id);
+ else
+ codec->name = devm_kasprintf(dev, GFP_KERNEL,
+- "sdw:%01x:%04x:%04x:%02x:%01x", link_id,
++ "sdw:0:%01x:%04x:%04x:%02x:%01x", link_id,
+ mfg_id, part_id, class_id, unique_id);
+
+ if (!codec->name)
+@@ -1374,7 +1403,7 @@ static int create_sdw_dailink(struct snd_soc_card *card, int *link_index,
+ continue;
+
+ /* j reset after loop, adr_index only applies to first link */
+- for (; j < adr_link_next->num_adr; j++) {
++ for (; j < adr_link_next->num_adr && codec_dlc_index < codec_num; j++) {
+ const struct snd_soc_acpi_endpoint *endpoints;
+
+ endpoints = adr_link_next->adr_d[j].endpoints;
+@@ -1934,6 +1963,12 @@ static int mc_probe(struct platform_device *pdev)
+ for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
+ codec_info_list[i].amp_num = 0;
+
++ if (mach->mach_params.subsystem_id_set) {
++ snd_soc_card_set_pci_ssid(card,
++ mach->mach_params.subsystem_vendor,
++ mach->mach_params.subsystem_device);
++ }
++
+ ret = sof_card_dai_links_create(card);
+ if (ret < 0)
+ return ret;
+diff --git a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
+index 623e3bebb8884a..890517eb63f62f 100644
+--- a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
++++ b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
+@@ -58,6 +58,11 @@ static const struct snd_soc_dapm_route rt712_sdca_map[] = {
+ { "rt712 MIC2", NULL, "Headset Mic" },
+ };
+
++static const struct snd_soc_dapm_route rt713_sdca_map[] = {
++ { "Headphone", NULL, "rt713 HP" },
++ { "rt713 MIC2", NULL, "Headset Mic" },
++};
++
+ static const struct snd_kcontrol_new rt_sdca_jack_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+@@ -109,6 +114,9 @@ static int rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd)
+ } else if (strstr(component->name_prefix, "rt712")) {
+ ret = snd_soc_dapm_add_routes(&card->dapm, rt712_sdca_map,
+ ARRAY_SIZE(rt712_sdca_map));
++ } else if (strstr(component->name_prefix, "rt713")) {
++ ret = snd_soc_dapm_add_routes(&card->dapm, rt713_sdca_map,
++ ARRAY_SIZE(rt713_sdca_map));
+ } else {
+ dev_err(card->dev, "%s is not supported\n", component->name_prefix);
+ return -EINVAL;
+@@ -160,6 +168,7 @@ int sof_sdw_rt_sdca_jack_exit(struct snd_soc_card *card, struct snd_soc_dai_link
+
+ device_remove_software_node(ctx->headset_codec_dev);
+ put_device(ctx->headset_codec_dev);
++ ctx->headset_codec_dev = NULL;
+
+ return 0;
+ }
+diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
+index 07aa37dd90e997..f7370e5b4e9e41 100644
+--- a/sound/soc/intel/common/Makefile
++++ b/sound/soc/intel/common/Makefile
+@@ -10,6 +10,7 @@ snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-m
+ soc-acpi-intel-tgl-match.o soc-acpi-intel-ehl-match.o \
+ soc-acpi-intel-jsl-match.o soc-acpi-intel-adl-match.o \
+ soc-acpi-intel-rpl-match.o soc-acpi-intel-mtl-match.o \
++ soc-acpi-intel-arl-match.o \
+ soc-acpi-intel-lnl-match.o \
+ soc-acpi-intel-hda-match.o \
+ soc-acpi-intel-sdw-mockup-match.o
+diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+new file mode 100644
+index 00000000000000..e52797aae6e655
+--- /dev/null
++++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+@@ -0,0 +1,51 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * soc-apci-intel-arl-match.c - tables and support for ARL ACPI enumeration.
++ *
++ * Copyright (c) 2023 Intel Corporation.
++ */
++
++#include <sound/soc-acpi.h>
++#include <sound/soc-acpi-intel-match.h>
++
++static const struct snd_soc_acpi_endpoint single_endpoint = {
++ .num = 0,
++ .aggregated = 0,
++ .group_position = 0,
++ .group_id = 0,
++};
++
++static const struct snd_soc_acpi_adr_device rt711_0_adr[] = {
++ {
++ .adr = 0x000020025D071100ull,
++ .num_endpoints = 1,
++ .endpoints = &single_endpoint,
++ .name_prefix = "rt711"
++ }
++};
++
++static const struct snd_soc_acpi_link_adr arl_rvp[] = {
++ {
++ .mask = BIT(0),
++ .num_adr = ARRAY_SIZE(rt711_0_adr),
++ .adr_d = rt711_0_adr,
++ },
++ {}
++};
++
++struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[] = {
++ {},
++};
++EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_arl_machines);
++
++/* this table is used when there is no I2S codec present */
++struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = {
++ {
++ .link_mask = 0x1, /* link0 required */
++ .links = arl_rvp,
++ .drv_name = "sof_sdw",
++ .sof_tplg_filename = "sof-arl-rt711.tplg",
++ },
++ {},
++};
++EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_arl_sdw_machines);
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index cdcbf04b8832ff..e4c3492a0c2824 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -75,6 +75,38 @@ static struct snd_soc_acpi_mach *cht_ess8316_quirk(void *arg)
+ return arg;
+ }
+
++/*
++ * The Lenovo Yoga Tab 3 Pro YT3-X90, with Android factory OS has a buggy DSDT
++ * with the coded not being listed at all.
++ */
++static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
++ {
++ /* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++ },
++ },
++ { }
++};
++
++static struct snd_soc_acpi_mach cht_lenovo_yoga_tab3_x90_mach = {
++ .id = "10WM5102",
++ .drv_name = "bytcr_wm5102",
++ .fw_filename = "intel/fw_sst_22a8.bin",
++ .board = "bytcr_wm5102",
++ .sof_tplg_filename = "sof-cht-wm5102.tplg",
++};
++
++static struct snd_soc_acpi_mach *lenovo_yt3_x90_quirk(void *arg)
++{
++ if (dmi_check_system(lenovo_yoga_tab3_x90))
++ return &cht_lenovo_yoga_tab3_x90_mach;
++
++ /* Skip wildcard match snd_soc_acpi_intel_cherrytrail_machines[] entry */
++ return NULL;
++}
++
+ static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
+ .num_codecs = 2,
+ .codecs = { "10EC5640", "10EC3276" },
+@@ -175,6 +207,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
+ .drv_name = "sof_pcm512x",
+ .sof_tplg_filename = "sof-cht-src-50khz-pcm512x.tplg",
+ },
++ /*
++ * Special case for the Lenovo Yoga Tab 3 Pro YT3-X90 where the DSDT
++ * misses the codec. Match on the SST id instead, lenovo_yt3_x90_quirk()
++ * will return a YT3 specific mach or NULL when called on other hw,
++ * skipping this entry.
++ */
++ {
++ .id = "808622A8",
++ .machine_quirk = lenovo_yt3_x90_quirk,
++ },
+
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+index 387e7310088417..8911c90bbaf68f 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+@@ -19,6 +19,11 @@ static const struct snd_soc_acpi_codecs glk_codecs = {
+ .codecs = {"MX98357A"}
+ };
+
++static const struct snd_soc_acpi_codecs glk_rt5682_rt5682s_hp = {
++ .num_codecs = 2,
++ .codecs = {"10EC5682", "RTL5682"},
++};
++
+ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
+ {
+ .id = "INT343A",
+@@ -35,20 +40,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[] = {
+ .sof_tplg_filename = "sof-glk-da7219.tplg",
+ },
+ {
+- .id = "10EC5682",
++ .comp_ids = &glk_rt5682_rt5682s_hp,
+ .drv_name = "glk_rt5682_mx98357a",
+ .fw_filename = "intel/dsp_fw_glk.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &glk_codecs,
+ .sof_tplg_filename = "sof-glk-rt5682.tplg",
+ },
+- {
+- .id = "RTL5682",
+- .drv_name = "glk_rt5682_max98357a",
+- .machine_quirk = snd_soc_acpi_codec_list,
+- .quirk_data = &glk_codecs,
+- .sof_tplg_filename = "sof-glk-rt5682.tplg",
+- },
+ {
+ .id = "10134242",
+ .drv_name = "glk_cs4242_mx98357a",
+diff --git a/sound/soc/intel/common/soc-intel-quirks.h b/sound/soc/intel/common/soc-intel-quirks.h
+index de4e550c5b34dc..42bd51456b945d 100644
+--- a/sound/soc/intel/common/soc-intel-quirks.h
++++ b/sound/soc/intel/common/soc-intel-quirks.h
+@@ -11,7 +11,7 @@
+
+ #include <linux/platform_data/x86/soc.h>
+
+-#if IS_ENABLED(CONFIG_X86)
++#if IS_REACHABLE(CONFIG_IOSF_MBI)
+
+ #include <linux/dmi.h>
+ #include <asm/iosf_mbi.h>
+diff --git a/sound/soc/intel/keembay/kmb_platform.c b/sound/soc/intel/keembay/kmb_platform.c
+index 6b06b7b5ede869..ffe558ef49220a 100644
+--- a/sound/soc/intel/keembay/kmb_platform.c
++++ b/sound/soc/intel/keembay/kmb_platform.c
+@@ -815,6 +815,7 @@ static const struct of_device_id kmb_plat_of_match[] = {
+ { .compatible = "intel,keembay-tdm", .data = &intel_kmb_tdm_dai},
+ {}
+ };
++MODULE_DEVICE_TABLE(of, kmb_plat_of_match);
+
+ static int kmb_plat_dai_probe(struct platform_device *pdev)
+ {
+diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
+index ac3dc8c63c260d..c602275fcf717b 100644
+--- a/sound/soc/intel/skylake/skl-pcm.c
++++ b/sound/soc/intel/skylake/skl-pcm.c
+@@ -252,8 +252,10 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
+ snd_pcm_set_sync(substream);
+
+ mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+- if (!mconfig)
++ if (!mconfig) {
++ kfree(dma_params);
+ return -EINVAL;
++ }
+
+ skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
+
+@@ -1471,6 +1473,7 @@ int skl_platform_register(struct device *dev)
+ dais = krealloc(skl->dais, sizeof(skl_fe_dai) +
+ sizeof(skl_platform_dai), GFP_KERNEL);
+ if (!dais) {
++ kfree(skl->dais);
+ ret = -ENOMEM;
+ goto err;
+ }
+@@ -1483,8 +1486,10 @@ int skl_platform_register(struct device *dev)
+
+ ret = devm_snd_soc_register_component(dev, &skl_component,
+ skl->dais, num_dais);
+- if (ret)
++ if (ret) {
++ kfree(skl->dais);
+ dev_err(dev, "soc component registration failed %d\n", ret);
++ }
+ err:
+ return ret;
+ }
+diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
+index 7a425271b08b16..fd9624ad5f72b0 100644
+--- a/sound/soc/intel/skylake/skl-sst-ipc.c
++++ b/sound/soc/intel/skylake/skl-sst-ipc.c
+@@ -1003,8 +1003,10 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
+
+ reply.size = (reply.header >> 32) & IPC_DATA_OFFSET_SZ_MASK;
+ buf = krealloc(reply.data, reply.size, GFP_KERNEL);
+- if (!buf)
++ if (!buf) {
++ kfree(reply.data);
+ return -ENOMEM;
++ }
+ *payload = buf;
+ *bytes = reply.size;
+
+diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
+index 57ea815d3f0419..b776c58dcf47a1 100644
+--- a/sound/soc/intel/skylake/skl-sst-utils.c
++++ b/sound/soc/intel/skylake/skl-sst-utils.c
+@@ -299,6 +299,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
+ module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
+ if (!module->instance_id) {
+ ret = -ENOMEM;
++ kfree(module);
+ goto free_uuid_list;
+ }
+
+diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
+index 640cebd2983e22..16d2c9acc33a6a 100644
+--- a/sound/soc/kirkwood/kirkwood-dma.c
++++ b/sound/soc/kirkwood/kirkwood-dma.c
+@@ -182,6 +182,9 @@ static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+ const struct mbus_dram_target_info *dram = mv_mbus_dram_info();
+ unsigned long addr = substream->runtime->dma_addr;
+
++ if (!dram)
++ return 0;
++
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ kirkwood_dma_conf_mbus_windows(priv->io,
+ KIRKWOOD_PLAYBACK_WIN, addr, dram);
+diff --git a/sound/soc/loongson/loongson_card.c b/sound/soc/loongson/loongson_card.c
+index 406ee8db1a3c5f..8cc54aedd00242 100644
+--- a/sound/soc/loongson/loongson_card.c
++++ b/sound/soc/loongson/loongson_card.c
+@@ -127,8 +127,8 @@ static int loongson_card_parse_of(struct loongson_card_data *data)
+ codec = of_get_child_by_name(dev->of_node, "codec");
+ if (!codec) {
+ dev_err(dev, "audio-codec property missing or invalid\n");
+- ret = -EINVAL;
+- goto err;
++ of_node_put(cpu);
++ return -EINVAL;
+ }
+
+ for (i = 0; i < card->num_links; i++) {
+diff --git a/sound/soc/mediatek/common/mtk-dsp-sof-common.c b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
+index 6fef16306f74ff..21a9403b7e9238 100644
+--- a/sound/soc/mediatek/common/mtk-dsp-sof-common.c
++++ b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
+@@ -24,7 +24,7 @@ int mtk_sof_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai_link *sof_dai_link = NULL;
+ const struct sof_conn_stream *conn = &sof_priv->conn_streams[i];
+
+- if (strcmp(rtd->dai_link->name, conn->normal_link))
++ if (conn->normal_link && strcmp(rtd->dai_link->name, conn->normal_link))
+ continue;
+
+ for_each_card_rtds(card, runtime) {
+diff --git a/sound/soc/mediatek/common/mtk-soundcard-driver.c b/sound/soc/mediatek/common/mtk-soundcard-driver.c
+index a58e1e3674deca..000a086a8cf44d 100644
+--- a/sound/soc/mediatek/common/mtk-soundcard-driver.c
++++ b/sound/soc/mediatek/common/mtk-soundcard-driver.c
+@@ -22,7 +22,11 @@ static int set_card_codec_info(struct snd_soc_card *card,
+
+ codec_node = of_get_child_by_name(sub_node, "codec");
+ if (!codec_node) {
+- dev_dbg(dev, "%s no specified codec\n", dai_link->name);
++ dev_dbg(dev, "%s no specified codec: setting dummy.\n", dai_link->name);
++
++ dai_link->codecs = &snd_soc_dummy_dlc;
++ dai_link->num_codecs = 1;
++ dai_link->dynamic = 1;
+ return 0;
+ }
+
+diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+index 701fbcc0f2c9c3..b48375aa30271c 100644
+--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
++++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+@@ -31,7 +31,7 @@ struct mt8183_da7219_max98357_priv {
+
+ static struct snd_soc_jack_pin mt8183_da7219_max98357_jack_pins[] = {
+ {
+- .pin = "Headphone",
++ .pin = "Headphones",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+@@ -626,7 +626,7 @@ static struct snd_soc_codec_conf mt6358_codec_conf[] = {
+ };
+
+ static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
+- SOC_DAPM_PIN_SWITCH("Headphone"),
++ SOC_DAPM_PIN_SWITCH("Headphones"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Speakers"),
+ SOC_DAPM_PIN_SWITCH("Line Out"),
+@@ -634,7 +634,7 @@ static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
+
+ static const
+ struct snd_soc_dapm_widget mt8183_da7219_max98357_dapm_widgets[] = {
+- SND_SOC_DAPM_HP("Headphone", NULL),
++ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Speakers", NULL),
+ SND_SOC_DAPM_SPK("Line Out", NULL),
+@@ -680,7 +680,7 @@ static struct snd_soc_codec_conf mt8183_da7219_rt1015_codec_conf[] = {
+ };
+
+ static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
+- SOC_DAPM_PIN_SWITCH("Headphone"),
++ SOC_DAPM_PIN_SWITCH("Headphones"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+@@ -689,7 +689,7 @@ static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
+
+ static const
+ struct snd_soc_dapm_widget mt8183_da7219_rt1015_dapm_widgets[] = {
+- SND_SOC_DAPM_HP("Headphone", NULL),
++ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
+index 247ab8df941f7b..ab61e597c9a0ff 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
++++ b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
+@@ -499,7 +499,7 @@ static const struct snd_soc_dapm_widget mtk_dai_adda_widgets[] = {
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY_S("AUD_PAD_TOP", SUPPLY_SEQ_ADDA_AUD_PAD_TOP,
+- 0, 0, 0,
++ AFE_AUD_PAD_TOP, RG_RX_FIFO_ON_SFT, 0,
+ mtk_adda_pad_top_event,
+ SND_SOC_DAPM_PRE_PMU),
+ SND_SOC_DAPM_SUPPLY_S("ADDA_MTKAIF_CFG", SUPPLY_SEQ_ADDA_MTKAIF_CFG,
+diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+index 9c11016f032c2a..9777ba89e956c7 100644
+--- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
++++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
+@@ -1179,7 +1179,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
+ if (!playback_codec) {
+ ret = -EINVAL;
+- dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
++ dev_err_probe(&pdev->dev, ret, "Property 'playback-codecs' missing or invalid\n");
+ goto err_playback_codec;
+ }
+
+@@ -1193,7 +1193,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
+ for_each_card_prelinks(card, i, dai_link) {
+ ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
+ if (ret) {
+- dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
++ dev_err_probe(&pdev->dev, ret, "%s set playback_codec fail\n",
+ dai_link->name);
+ goto err_probe;
+ }
+diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
+index 5e14655c5617ed..11f30b183520ff 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
++++ b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
+@@ -2748,6 +2748,7 @@ static bool mt8188_is_volatile_reg(struct device *dev, unsigned int reg)
+ case AFE_ASRC12_NEW_CON9:
+ case AFE_LRCK_CNT:
+ case AFE_DAC_MON0:
++ case AFE_DAC_CON0:
+ case AFE_DL2_CUR:
+ case AFE_DL3_CUR:
+ case AFE_DL6_CUR:
+diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+index 9017f48b6272be..f7e22abb758461 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
++++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+@@ -246,6 +246,11 @@ static const struct snd_soc_dapm_widget mt8188_mt6359_widgets[] = {
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SINK("HDMI"),
+ SND_SOC_DAPM_SINK("DP"),
++
++ /* dynamic pinctrl */
++ SND_SOC_DAPM_PINCTRL("ETDM_SPK_PIN", "aud_etdm_spk_on", "aud_etdm_spk_off"),
++ SND_SOC_DAPM_PINCTRL("ETDM_HP_PIN", "aud_etdm_hp_on", "aud_etdm_hp_off"),
++ SND_SOC_DAPM_PINCTRL("MTKAIF_PIN", "aud_mtkaif_on", "aud_mtkaif_off"),
+ };
+
+ static const struct snd_kcontrol_new mt8188_mt6359_controls[] = {
+@@ -267,6 +272,7 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct snd_soc_component *cmpnt_codec =
+ asoc_rtd_to_codec(rtd, 0)->component;
++ struct snd_soc_dapm_widget *pin_w = NULL, *w;
+ struct mtk_base_afe *afe;
+ struct mt8188_afe_private *afe_priv;
+ struct mtkaif_param *param;
+@@ -306,6 +312,18 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ return 0;
+ }
+
++ for_each_card_widgets(rtd->card, w) {
++ if (!strcmp(w->name, "MTKAIF_PIN")) {
++ pin_w = w;
++ break;
++ }
++ }
++
++ if (pin_w)
++ dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_PRE_PMU);
++ else
++ dev_dbg(afe->dev, "%s(), no pinmux widget, please check if default on\n", __func__);
++
+ pm_runtime_get_sync(afe->dev);
+ mt6359_mtkaif_calibration_enable(cmpnt_codec);
+
+@@ -403,6 +421,9 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ for (i = 0; i < MT8188_MTKAIF_MISO_NUM; i++)
+ param->mtkaif_phase_cycle[i] = mtkaif_phase_cycle[i];
+
++ if (pin_w)
++ dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_POST_PMD);
++
+ dev_dbg(afe->dev, "%s(), end, calibration ok %d\n",
+ __func__, param->mtkaif_calibration_ok);
+
+diff --git a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+index 9ce06821c7d0f0..49440db370af07 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
++++ b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+@@ -566,10 +566,10 @@ static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
+ tdm_con |= 1 << DELAY_DATA_SFT;
+ tdm_con |= get_tdm_lrck_width(format) << LRCK_TDM_WIDTH_SFT;
+ } else if (tdm_priv->tdm_out_mode == TDM_OUT_DSP_A) {
+- tdm_con |= 0 << DELAY_DATA_SFT;
++ tdm_con |= 1 << DELAY_DATA_SFT;
+ tdm_con |= 0 << LRCK_TDM_WIDTH_SFT;
+ } else if (tdm_priv->tdm_out_mode == TDM_OUT_DSP_B) {
+- tdm_con |= 1 << DELAY_DATA_SFT;
++ tdm_con |= 0 << DELAY_DATA_SFT;
+ tdm_con |= 0 << LRCK_TDM_WIDTH_SFT;
+ }
+
+diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
+index b93ea33739f29d..6458d5dc4902f6 100644
+--- a/sound/soc/meson/Kconfig
++++ b/sound/soc/meson/Kconfig
+@@ -99,6 +99,7 @@ config SND_MESON_AXG_PDM
+
+ config SND_MESON_CARD_UTILS
+ tristate
++ select SND_DYNAMIC_MINORS
+
+ config SND_MESON_CODEC_GLUE
+ tristate
+diff --git a/sound/soc/meson/aiu.c b/sound/soc/meson/aiu.c
+index 7109b81cc3d0a7..5d1419ed7a62d9 100644
+--- a/sound/soc/meson/aiu.c
++++ b/sound/soc/meson/aiu.c
+@@ -212,11 +212,12 @@ static const char * const aiu_spdif_ids[] = {
+ static int aiu_clk_get(struct device *dev)
+ {
+ struct aiu *aiu = dev_get_drvdata(dev);
++ struct clk *pclk;
+ int ret;
+
+- aiu->pclk = devm_clk_get(dev, "pclk");
+- if (IS_ERR(aiu->pclk))
+- return dev_err_probe(dev, PTR_ERR(aiu->pclk), "Can't get the aiu pclk\n");
++ pclk = devm_clk_get_enabled(dev, "pclk");
++ if (IS_ERR(pclk))
++ return dev_err_probe(dev, PTR_ERR(pclk), "Can't get the aiu pclk\n");
+
+ aiu->spdif_mclk = devm_clk_get(dev, "spdif_mclk");
+ if (IS_ERR(aiu->spdif_mclk))
+@@ -233,18 +234,6 @@ static int aiu_clk_get(struct device *dev)
+ if (ret)
+ return dev_err_probe(dev, ret, "Can't get the spdif clocks\n");
+
+- ret = clk_prepare_enable(aiu->pclk);
+- if (ret) {
+- dev_err(dev, "peripheral clock enable failed\n");
+- return ret;
+- }
+-
+- ret = devm_add_action_or_reset(dev,
+- (void(*)(void *))clk_disable_unprepare,
+- aiu->pclk);
+- if (ret)
+- dev_err(dev, "failed to add reset action on pclk");
+-
+ return ret;
+ }
+
+diff --git a/sound/soc/meson/aiu.h b/sound/soc/meson/aiu.h
+index 393b6c2307e49f..0f94c8bf608181 100644
+--- a/sound/soc/meson/aiu.h
++++ b/sound/soc/meson/aiu.h
+@@ -33,7 +33,6 @@ struct aiu_platform_data {
+ };
+
+ struct aiu {
+- struct clk *pclk;
+ struct clk *spdif_mclk;
+ struct aiu_interface i2s;
+ struct aiu_interface spdif;
+diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
+index f10c0c17863eb5..44175b1b14a295 100644
+--- a/sound/soc/meson/axg-card.c
++++ b/sound/soc/meson/axg-card.c
+@@ -104,7 +104,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
+ int *index)
+ {
+ struct meson_card *priv = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai_link *pad = &card->dai_link[*index];
++ struct snd_soc_dai_link *pad;
+ struct snd_soc_dai_link *lb;
+ struct snd_soc_dai_link_component *dlc;
+ int ret;
+@@ -114,6 +114,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
+ if (ret)
+ return ret;
+
++ pad = &card->dai_link[*index];
+ lb = &card->dai_link[*index + 1];
+
+ lb->name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-lb", pad->name);
+@@ -318,6 +319,7 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
+
+ dai_link->cpus = cpu;
+ dai_link->num_cpus = 1;
++ dai_link->nonatomic = true;
+
+ ret = meson_card_parse_dai(card, np, dai_link->cpus);
+ if (ret)
+diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
+index bccfb770b33911..5218e40aeb1bbc 100644
+--- a/sound/soc/meson/axg-fifo.c
++++ b/sound/soc/meson/axg-fifo.c
+@@ -3,6 +3,7 @@
+ // Copyright (c) 2018 BayLibre, SAS.
+ // Author: Jerome Brunet <jbrunet@baylibre.com>
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+@@ -145,8 +146,8 @@ int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
+ /* Enable irq if necessary */
+ irq_en = runtime->no_period_wakeup ? 0 : FIFO_INT_COUNT_REPEAT;
+ regmap_update_bits(fifo->map, FIFO_CTRL0,
+- CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
+- CTRL0_INT_EN(irq_en));
++ CTRL0_INT_EN,
++ FIELD_PREP(CTRL0_INT_EN, irq_en));
+
+ return 0;
+ }
+@@ -176,9 +177,9 @@ int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
+ {
+ struct axg_fifo *fifo = axg_fifo_data(ss);
+
+- /* Disable the block count irq */
++ /* Disable irqs */
+ regmap_update_bits(fifo->map, FIFO_CTRL0,
+- CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
++ CTRL0_INT_EN, 0);
+
+ return 0;
+ }
+@@ -187,13 +188,13 @@ EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free);
+ static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
+ {
+ regmap_update_bits(fifo->map, FIFO_CTRL1,
+- CTRL1_INT_CLR(FIFO_INT_MASK),
+- CTRL1_INT_CLR(mask));
++ CTRL1_INT_CLR,
++ FIELD_PREP(CTRL1_INT_CLR, mask));
+
+ /* Clear must also be cleared */
+ regmap_update_bits(fifo->map, FIFO_CTRL1,
+- CTRL1_INT_CLR(FIFO_INT_MASK),
+- 0);
++ CTRL1_INT_CLR,
++ FIELD_PREP(CTRL1_INT_CLR, 0));
+ }
+
+ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+@@ -203,18 +204,19 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
+ unsigned int status;
+
+ regmap_read(fifo->map, FIFO_STATUS1, &status);
++ status = FIELD_GET(STATUS1_INT_STS, status);
++ axg_fifo_ack_irq(fifo, status);
+
+- status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
+- if (status & FIFO_INT_COUNT_REPEAT)
+- snd_pcm_period_elapsed(ss);
+- else
++ if (status & ~FIFO_INT_COUNT_REPEAT)
+ dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
+ status);
+
+- /* Ack irqs */
+- axg_fifo_ack_irq(fifo, status);
++ if (status & FIFO_INT_COUNT_REPEAT) {
++ snd_pcm_period_elapsed(ss);
++ return IRQ_HANDLED;
++ }
+
+- return IRQ_RETVAL(status);
++ return IRQ_NONE;
+ }
+
+ int axg_fifo_pcm_open(struct snd_soc_component *component,
+@@ -242,8 +244,10 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
+ if (ret)
+ return ret;
+
+- ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
+- dev_name(dev), ss);
++ /* Use the threaded irq handler only with non-atomic links */
++ ret = request_threaded_irq(fifo->irq, NULL,
++ axg_fifo_pcm_irq_block,
++ IRQF_ONESHOT, dev_name(dev), ss);
+ if (ret)
+ return ret;
+
+@@ -254,15 +258,15 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
+
+ /* Setup status2 so it reports the memory pointer */
+ regmap_update_bits(fifo->map, FIFO_CTRL1,
+- CTRL1_STATUS2_SEL_MASK,
+- CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
++ CTRL1_STATUS2_SEL,
++ FIELD_PREP(CTRL1_STATUS2_SEL, STATUS2_SEL_DDR_READ));
+
+ /* Make sure the dma is initially disabled */
+ __dma_enable(fifo, false);
+
+ /* Disable irqs until params are ready */
+ regmap_update_bits(fifo->map, FIFO_CTRL0,
+- CTRL0_INT_EN(FIFO_INT_MASK), 0);
++ CTRL0_INT_EN, 0);
+
+ /* Clear any pending interrupt */
+ axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
+diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h
+index b63acd723c870c..5b7d32c37991be 100644
+--- a/sound/soc/meson/axg-fifo.h
++++ b/sound/soc/meson/axg-fifo.h
+@@ -42,21 +42,19 @@ struct snd_soc_pcm_runtime;
+
+ #define FIFO_CTRL0 0x00
+ #define CTRL0_DMA_EN BIT(31)
+-#define CTRL0_INT_EN(x) ((x) << 16)
++#define CTRL0_INT_EN GENMASK(23, 16)
+ #define CTRL0_SEL_MASK GENMASK(2, 0)
+ #define CTRL0_SEL_SHIFT 0
+ #define FIFO_CTRL1 0x04
+-#define CTRL1_INT_CLR(x) ((x) << 0)
+-#define CTRL1_STATUS2_SEL_MASK GENMASK(11, 8)
+-#define CTRL1_STATUS2_SEL(x) ((x) << 8)
++#define CTRL1_INT_CLR GENMASK(7, 0)
++#define CTRL1_STATUS2_SEL GENMASK(11, 8)
+ #define STATUS2_SEL_DDR_READ 0
+-#define CTRL1_FRDDR_DEPTH_MASK GENMASK(31, 24)
+-#define CTRL1_FRDDR_DEPTH(x) ((x) << 24)
++#define CTRL1_FRDDR_DEPTH GENMASK(31, 24)
+ #define FIFO_START_ADDR 0x08
+ #define FIFO_FINISH_ADDR 0x0c
+ #define FIFO_INT_ADDR 0x10
+ #define FIFO_STATUS1 0x14
+-#define STATUS1_INT_STS(x) ((x) << 0)
++#define STATUS1_INT_STS GENMASK(7, 0)
+ #define FIFO_STATUS2 0x18
+ #define FIFO_INIT_ADDR 0x24
+ #define FIFO_CTRL2 0x28
+diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c
+index 8c166a5f338ced..747a900c0bb220 100644
+--- a/sound/soc/meson/axg-frddr.c
++++ b/sound/soc/meson/axg-frddr.c
+@@ -7,6 +7,7 @@
+ * This driver implements the frontend playback DAI of AXG and G12A based SoCs
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/regmap.h>
+ #include <linux/module.h>
+@@ -59,8 +60,8 @@ static int axg_frddr_dai_hw_params(struct snd_pcm_substream *substream,
+ /* Trim the FIFO depth if the period is small to improve latency */
+ depth = min(period, fifo->depth);
+ val = (depth / AXG_FIFO_BURST) - 1;
+- regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH_MASK,
+- CTRL1_FRDDR_DEPTH(val));
++ regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH,
++ FIELD_PREP(CTRL1_FRDDR_DEPTH, val));
+
+ return 0;
+ }
+diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
+index 1c3d433cefd23c..a71790908e178e 100644
+--- a/sound/soc/meson/axg-tdm-interface.c
++++ b/sound/soc/meson/axg-tdm-interface.c
+@@ -12,6 +12,9 @@
+
+ #include "axg-tdm.h"
+
++/* Maximum bit clock frequency according the datasheets */
++#define MAX_SCLK 100000000 /* Hz */
++
+ enum {
+ TDM_IFACE_PAD,
+ TDM_IFACE_LOOPBACK,
+@@ -153,19 +156,27 @@ static int axg_tdm_iface_startup(struct snd_pcm_substream *substream,
+ return -EINVAL;
+ }
+
+- /* Apply component wide rate symmetry */
+ if (snd_soc_component_active(dai->component)) {
++ /* Apply component wide rate symmetry */
+ ret = snd_pcm_hw_constraint_single(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE,
+ iface->rate);
+- if (ret < 0) {
+- dev_err(dai->dev,
+- "can't set iface rate constraint\n");
+- return ret;
+- }
++
++ } else {
++ /* Limit rate according to the slot number and width */
++ unsigned int max_rate =
++ MAX_SCLK / (iface->slots * iface->slot_width);
++ ret = snd_pcm_hw_constraint_minmax(substream->runtime,
++ SNDRV_PCM_HW_PARAM_RATE,
++ 0, max_rate);
+ }
+
+- return 0;
++ if (ret < 0)
++ dev_err(dai->dev, "can't set iface rate constraint\n");
++ else
++ ret = 0;
++
++ return ret;
+ }
+
+ static int axg_tdm_iface_set_stream(struct snd_pcm_substream *substream,
+@@ -264,8 +275,8 @@ static int axg_tdm_iface_set_sclk(struct snd_soc_dai *dai,
+ srate = iface->slots * iface->slot_width * params_rate(params);
+
+ if (!iface->mclk_rate) {
+- /* If no specific mclk is requested, default to bit clock * 4 */
+- clk_set_rate(iface->mclk, 4 * srate);
++ /* If no specific mclk is requested, default to bit clock * 2 */
++ clk_set_rate(iface->mclk, 2 * srate);
+ } else {
+ /* Check if we can actually get the bit clock from mclk */
+ if (iface->mclk_rate % srate) {
+@@ -338,26 +349,31 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
+ return 0;
+ }
+
+-static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream,
++static int axg_tdm_iface_trigger(struct snd_pcm_substream *substream,
++ int cmd,
+ struct snd_soc_dai *dai)
+ {
+- struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
++ struct axg_tdm_stream *ts =
++ snd_soc_dai_get_dma_data(dai, substream);
+
+- /* Stop all attached formatters */
+- axg_tdm_stream_stop(ts);
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ axg_tdm_stream_start(ts);
++ break;
++ case SNDRV_PCM_TRIGGER_SUSPEND:
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ case SNDRV_PCM_TRIGGER_STOP:
++ axg_tdm_stream_stop(ts);
++ break;
++ default:
++ return -EINVAL;
++ }
+
+ return 0;
+ }
+
+-static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream,
+- struct snd_soc_dai *dai)
+-{
+- struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
+-
+- /* Force all attached formatters to update */
+- return axg_tdm_stream_reset(ts);
+-}
+-
+ static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai)
+ {
+ int stream;
+@@ -401,8 +417,7 @@ static const struct snd_soc_dai_ops axg_tdm_iface_ops = {
+ .set_fmt = axg_tdm_iface_set_fmt,
+ .startup = axg_tdm_iface_startup,
+ .hw_params = axg_tdm_iface_hw_params,
+- .prepare = axg_tdm_iface_prepare,
+- .hw_free = axg_tdm_iface_hw_free,
++ .trigger = axg_tdm_iface_trigger,
+ };
+
+ /* TDM Backend DAIs */
+diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
+index 1a0be177b8fe77..972ad99f31be2b 100644
+--- a/sound/soc/meson/axg-toddr.c
++++ b/sound/soc/meson/axg-toddr.c
+@@ -5,6 +5,7 @@
+
+ /* This driver implements the frontend capture DAI of AXG based SoCs */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/regmap.h>
+ #include <linux/module.h>
+@@ -19,12 +20,9 @@
+ #define CTRL0_TODDR_EXT_SIGNED BIT(29)
+ #define CTRL0_TODDR_PP_MODE BIT(28)
+ #define CTRL0_TODDR_SYNC_CH BIT(27)
+-#define CTRL0_TODDR_TYPE_MASK GENMASK(15, 13)
+-#define CTRL0_TODDR_TYPE(x) ((x) << 13)
+-#define CTRL0_TODDR_MSB_POS_MASK GENMASK(12, 8)
+-#define CTRL0_TODDR_MSB_POS(x) ((x) << 8)
+-#define CTRL0_TODDR_LSB_POS_MASK GENMASK(7, 3)
+-#define CTRL0_TODDR_LSB_POS(x) ((x) << 3)
++#define CTRL0_TODDR_TYPE GENMASK(15, 13)
++#define CTRL0_TODDR_MSB_POS GENMASK(12, 8)
++#define CTRL0_TODDR_LSB_POS GENMASK(7, 3)
+ #define CTRL1_TODDR_FORCE_FINISH BIT(25)
+ #define CTRL1_SEL_SHIFT 28
+
+@@ -76,12 +74,12 @@ static int axg_toddr_dai_hw_params(struct snd_pcm_substream *substream,
+ width = params_width(params);
+
+ regmap_update_bits(fifo->map, FIFO_CTRL0,
+- CTRL0_TODDR_TYPE_MASK |
+- CTRL0_TODDR_MSB_POS_MASK |
+- CTRL0_TODDR_LSB_POS_MASK,
+- CTRL0_TODDR_TYPE(type) |
+- CTRL0_TODDR_MSB_POS(TODDR_MSB_POS) |
+- CTRL0_TODDR_LSB_POS(TODDR_MSB_POS - (width - 1)));
++ CTRL0_TODDR_TYPE |
++ CTRL0_TODDR_MSB_POS |
++ CTRL0_TODDR_LSB_POS,
++ FIELD_PREP(CTRL0_TODDR_TYPE, type) |
++ FIELD_PREP(CTRL0_TODDR_MSB_POS, TODDR_MSB_POS) |
++ FIELD_PREP(CTRL0_TODDR_LSB_POS, TODDR_MSB_POS - (width - 1)));
+
+ return 0;
+ }
+diff --git a/sound/soc/meson/g12a-toacodec.c b/sound/soc/meson/g12a-toacodec.c
+index 6c4503766fdcae..531bb8707a3ec4 100644
+--- a/sound/soc/meson/g12a-toacodec.c
++++ b/sound/soc/meson/g12a-toacodec.c
+@@ -71,6 +71,9 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int mux, reg;
+
++ if (ucontrol->value.enumerated.item[0] >= e->items)
++ return -EINVAL;
++
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ regmap_field_read(priv->field_dat_sel, &reg);
+
+@@ -101,7 +104,7 @@ static int g12a_toacodec_mux_put_enum(struct snd_kcontrol *kcontrol,
+
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+
+- return 0;
++ return 1;
+ }
+
+ static SOC_ENUM_SINGLE_DECL(g12a_toacodec_mux_enum, TOACODEC_CTRL0,
+diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
+index f7ef9aa1eed8db..b92434125face1 100644
+--- a/sound/soc/meson/g12a-tohdmitx.c
++++ b/sound/soc/meson/g12a-tohdmitx.c
+@@ -45,6 +45,9 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol,
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int mux, changed;
+
++ if (ucontrol->value.enumerated.item[0] >= e->items)
++ return -EINVAL;
++
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, e->reg,
+ CTRL0_I2S_DAT_SEL,
+@@ -93,6 +96,9 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ unsigned int mux, changed;
+
++ if (ucontrol->value.enumerated.item[0] >= e->items)
++ return -EINVAL;
++
+ mux = snd_soc_enum_item_to_val(e, ucontrol->value.enumerated.item[0]);
+ changed = snd_soc_component_test_bits(component, TOHDMITX_CTRL0,
+ CTRL0_SPDIF_SEL,
+@@ -112,7 +118,7 @@ static int g12a_tohdmitx_spdif_mux_put_enum(struct snd_kcontrol *kcontrol,
+
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL);
+
+- return 0;
++ return 1;
+ }
+
+ static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_spdif_mux_enum, TOHDMITX_CTRL0,
+diff --git a/sound/soc/meson/t9015.c b/sound/soc/meson/t9015.c
+index 9c6b4dac689320..571f65788c5920 100644
+--- a/sound/soc/meson/t9015.c
++++ b/sound/soc/meson/t9015.c
+@@ -48,7 +48,6 @@
+ #define POWER_CFG 0x10
+
+ struct t9015 {
+- struct clk *pclk;
+ struct regulator *avdd;
+ };
+
+@@ -249,6 +248,7 @@ static int t9015_probe(struct platform_device *pdev)
+ struct t9015 *priv;
+ void __iomem *regs;
+ struct regmap *regmap;
++ struct clk *pclk;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+@@ -256,26 +256,14 @@ static int t9015_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+- priv->pclk = devm_clk_get(dev, "pclk");
+- if (IS_ERR(priv->pclk))
+- return dev_err_probe(dev, PTR_ERR(priv->pclk), "failed to get core clock\n");
++ pclk = devm_clk_get_enabled(dev, "pclk");
++ if (IS_ERR(pclk))
++ return dev_err_probe(dev, PTR_ERR(pclk), "failed to get core clock\n");
+
+ priv->avdd = devm_regulator_get(dev, "AVDD");
+ if (IS_ERR(priv->avdd))
+ return dev_err_probe(dev, PTR_ERR(priv->avdd), "failed to AVDD\n");
+
+- ret = clk_prepare_enable(priv->pclk);
+- if (ret) {
+- dev_err(dev, "core clock enable failed\n");
+- return ret;
+- }
+-
+- ret = devm_add_action_or_reset(dev,
+- (void(*)(void *))clk_disable_unprepare,
+- priv->pclk);
+- if (ret)
+- return ret;
+-
+ ret = device_reset(dev);
+ if (ret) {
+ dev_err(dev, "reset failed\n");
+diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
+index 6de533d45e7d89..ff9f6a1c95df19 100644
+--- a/sound/soc/qcom/apq8016_sbc.c
++++ b/sound/soc/qcom/apq8016_sbc.c
+@@ -147,7 +147,7 @@ static int apq8016_dai_init(struct snd_soc_pcm_runtime *rtd, int mi2s)
+
+ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ return apq8016_dai_init(rtd, cpu_dai->id);
+ }
+@@ -183,7 +183,7 @@ static int qdsp6_dai_get_lpass_id(struct snd_soc_dai *cpu_dai)
+
+ static int msm8916_qdsp6_dai_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_BP_FP);
+ return apq8016_dai_init(rtd, qdsp6_dai_get_lpass_id(cpu_dai));
+@@ -194,7 +194,7 @@ static int msm8916_qdsp6_startup(struct snd_pcm_substream *substream)
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct apq8016_sbc_data *data = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ int mi2s, ret;
+
+ mi2s = qdsp6_dai_get_lpass_id(cpu_dai);
+@@ -215,7 +215,7 @@ static void msm8916_qdsp6_shutdown(struct snd_pcm_substream *substream)
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct apq8016_sbc_data *data = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ int mi2s, ret;
+
+ mi2s = qdsp6_dai_get_lpass_id(cpu_dai);
+diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
+index 5d07b38f6d7290..cddeb47dbcf213 100644
+--- a/sound/soc/qcom/apq8096.c
++++ b/sound/soc/qcom/apq8096.c
+@@ -30,9 +30,9 @@ static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ static int msm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ u32 rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
+ u32 rx_ch_cnt = 0, tx_ch_cnt = 0;
+ int ret = 0;
+@@ -66,7 +66,7 @@ static const struct snd_soc_ops apq8096_ops = {
+
+ static int apq8096_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+
+ /*
+ * Codec SLIMBUS configuration
+diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
+index e2d8c41945fad2..f2d1e3009cd23c 100644
+--- a/sound/soc/qcom/common.c
++++ b/sound/soc/qcom/common.c
+@@ -138,7 +138,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
+ }
+ } else {
+ /* DPCM frontend */
+- link->codecs = &asoc_dummy_dlc;
++ link->codecs = &snd_soc_dummy_dlc;
+ link->num_codecs = 1;
+ link->dynamic = 1;
+ }
+@@ -189,8 +189,8 @@ static struct snd_soc_jack_pin qcom_headset_jack_pins[] = {
+ int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_jack *jack, bool *jack_setup)
+ {
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ struct snd_soc_card *card = rtd->card;
+ int rval, i;
+
+diff --git a/sound/soc/qcom/lpass-cdc-dma.c b/sound/soc/qcom/lpass-cdc-dma.c
+index 31b9f1c22beead..4d5d147b47db00 100644
+--- a/sound/soc/qcom/lpass-cdc-dma.c
++++ b/sound/soc/qcom/lpass-cdc-dma.c
+@@ -32,8 +32,8 @@ enum codec_dma_interfaces {
+ static void __lpass_get_dmactl_handle(struct snd_pcm_substream *substream, struct snd_soc_dai *dai,
+ struct lpaif_dmactl **dmactl, int *id)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
+@@ -122,8 +122,8 @@ static int __lpass_get_codec_dma_intf_type(int dai_id)
+ static int __lpass_platform_codec_intf_init(struct snd_soc_dai *dai,
+ struct snd_pcm_substream *substream)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpaif_dmactl *dmactl = NULL;
+ struct device *dev = soc_runtime->dev;
+ int ret, id, codec_intf;
+@@ -171,7 +171,7 @@ static int lpass_cdc_dma_daiops_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+ {
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
+
+ switch (dai->id) {
+ case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
+@@ -194,7 +194,7 @@ static void lpass_cdc_dma_daiops_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+ {
+ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
+
+ switch (dai->id) {
+ case LPASS_CDC_DMA_RX0 ... LPASS_CDC_DMA_RX9:
+@@ -214,7 +214,7 @@ static int lpass_cdc_dma_daiops_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
+ struct lpaif_dmactl *dmactl = NULL;
+ unsigned int ret, regval;
+ unsigned int channels = params_channels(params);
+@@ -257,8 +257,8 @@ static int lpass_cdc_dma_daiops_hw_params(struct snd_pcm_substream *substream,
+ static int lpass_cdc_dma_daiops_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct lpaif_dmactl *dmactl;
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct lpaif_dmactl *dmactl = NULL;
+ int ret = 0, id;
+
+ switch (cmd) {
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 39571fed40019a..73b42d9ee24471 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -1170,9 +1170,13 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-rxtx-cdc-dma-lpm");
++ if (!res)
++ return -EINVAL;
+ drvdata->rxtx_cdc_dma_lpm_buf = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-va-cdc-dma-lpm");
++ if (!res)
++ return -EINVAL;
+ drvdata->va_cdc_dma_lpm_buf = res->start;
+ }
+
+diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
+index 990d7c33f90f51..73e3d39bd24c30 100644
+--- a/sound/soc/qcom/lpass-platform.c
++++ b/sound/soc/qcom/lpass-platform.c
+@@ -192,8 +192,8 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct lpass_variant *v = drvdata->variant;
+ int ret, dma_ch, dir = substream->stream;
+@@ -284,8 +284,8 @@ static int lpass_platform_pcmops_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct lpass_variant *v = drvdata->variant;
+ struct lpass_pcm_data *data;
+@@ -321,8 +321,8 @@ static int lpass_platform_pcmops_close(struct snd_soc_component *component,
+ static struct lpaif_dmactl *__lpass_get_dmactl_handle(const struct snd_pcm_substream *substream,
+ struct snd_soc_component *component)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct lpaif_dmactl *dmactl = NULL;
+
+@@ -353,8 +353,8 @@ static struct lpaif_dmactl *__lpass_get_dmactl_handle(const struct snd_pcm_subst
+ static int __lpass_get_id(const struct snd_pcm_substream *substream,
+ struct snd_soc_component *component)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
+@@ -388,8 +388,8 @@ static int __lpass_get_id(const struct snd_pcm_substream *substream,
+ static struct regmap *__lpass_get_regmap_handle(const struct snd_pcm_substream *substream,
+ struct snd_soc_component *component)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct regmap *map = NULL;
+
+@@ -416,8 +416,8 @@ static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
+@@ -569,8 +569,8 @@ static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
+ static int lpass_platform_pcmops_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
+@@ -597,8 +597,8 @@ static int lpass_platform_pcmops_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
+@@ -660,8 +660,8 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ int cmd)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
+@@ -859,8 +859,8 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+ struct snd_pcm_runtime *rt = substream->runtime;
+ struct lpass_pcm_data *pcm_data = rt->private_data;
+@@ -911,8 +911,8 @@ static int lpass_platform_pcmops_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ unsigned int dai_id = cpu_dai->driver->id;
+
+ if (is_cdc_dma_port(dai_id))
+@@ -926,8 +926,8 @@ static irqreturn_t lpass_dma_interrupt_handler(
+ struct lpass_data *drvdata,
+ int chan, u32 interrupts)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ struct lpass_variant *v = drvdata->variant;
+ irqreturn_t ret = IRQ_NONE;
+ int rv;
+@@ -1169,7 +1169,7 @@ static int lpass_platform_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *soc_runtime)
+ {
+ struct snd_pcm *pcm = soc_runtime->pcm;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_runtime, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_runtime, 0);
+ unsigned int dai_id = cpu_dai->driver->id;
+
+ size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
+diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
+index c90db6daabbd8c..739856a00017c5 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
+@@ -332,7 +332,7 @@ static int q6apm_dai_open(struct snd_soc_component *component,
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_prtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_prtd, 0);
+ struct device *dev = component->dev;
+ struct q6apm_dai_data *pdata;
+ struct q6apm_dai_rtd *prtd;
+@@ -478,7 +478,7 @@ static int q6apm_dai_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
+ {
+ struct snd_soc_pcm_runtime *rtd = stream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6apm_dai_rtd *prtd;
+ struct q6apm_dai_data *pdata;
+diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+index 7ad604b80e25ec..6511f0a08de161 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -140,14 +140,17 @@ static void q6apm_lpass_dai_shutdown(struct snd_pcm_substream *substream, struct
+ struct q6apm_lpass_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc;
+
+- if (!dai_data->is_port_started[dai->id])
+- return;
+- rc = q6apm_graph_stop(dai_data->graph[dai->id]);
+- if (rc < 0)
+- dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
++ if (dai_data->is_port_started[dai->id]) {
++ rc = q6apm_graph_stop(dai_data->graph[dai->id]);
++ dai_data->is_port_started[dai->id] = false;
++ if (rc < 0)
++ dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
++ }
+
+- q6apm_graph_close(dai_data->graph[dai->id]);
+- dai_data->is_port_started[dai->id] = false;
++ if (dai_data->graph[dai->id]) {
++ q6apm_graph_close(dai_data->graph[dai->id]);
++ dai_data->graph[dai->id] = NULL;
++ }
+ }
+
+ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+@@ -162,8 +165,10 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+ q6apm_graph_stop(dai_data->graph[dai->id]);
+ dai_data->is_port_started[dai->id] = false;
+
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ q6apm_graph_close(dai_data->graph[dai->id]);
++ dai_data->graph[dai->id] = NULL;
++ }
+ }
+
+ /**
+@@ -182,26 +187,29 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+
+ cfg->direction = substream->stream;
+ rc = q6apm_graph_media_format_pcm(dai_data->graph[dai->id], cfg);
+-
+ if (rc) {
+ dev_err(dai->dev, "Failed to set media format %d\n", rc);
+- return rc;
++ goto err;
+ }
+
+ rc = q6apm_graph_prepare(dai_data->graph[dai->id]);
+ if (rc) {
+ dev_err(dai->dev, "Failed to prepare Graph %d\n", rc);
+- return rc;
++ goto err;
+ }
+
+ rc = q6apm_graph_start(dai_data->graph[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to start APM port %x\n", dai->id);
+- return rc;
++ goto err;
+ }
+ dai_data->is_port_started[dai->id] = true;
+
+ return 0;
++err:
++ q6apm_graph_close(dai_data->graph[dai->id]);
++ dai_data->graph[dai->id] = NULL;
++ return rc;
+ }
+
+ static int q6apm_lpass_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
+index fe0666e9fd2386..5e14cd0a38deb6 100644
+--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
+@@ -218,7 +218,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+- struct snd_soc_pcm_runtime *soc_prtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *soc_prtd = snd_soc_substream_to_rtd(substream);
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = component->dev;
+@@ -350,8 +350,8 @@ static int q6asm_dai_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+- struct snd_soc_pcm_runtime *soc_prtd = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(soc_prtd, 0);
++ struct snd_soc_pcm_runtime *soc_prtd = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(soc_prtd, 0);
+ struct q6asm_dai_rtd *prtd;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = component->dev;
+@@ -443,7 +443,7 @@ static int q6asm_dai_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+- struct snd_soc_pcm_runtime *soc_prtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *soc_prtd = snd_soc_substream_to_rtd(substream);
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+
+ if (prtd->audio_client) {
+@@ -603,7 +603,7 @@ static int q6asm_dai_compr_open(struct snd_soc_component *component,
+ {
+ struct snd_soc_pcm_runtime *rtd = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct q6asm_dai_data *pdata;
+ struct device *dev = component->dev;
+ struct q6asm_dai_rtd *prtd;
+diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
+index bba07899f8fc19..c583faae3a3e4d 100644
+--- a/sound/soc/qcom/qdsp6/q6routing.c
++++ b/sound/soc/qcom/qdsp6/q6routing.c
+@@ -1048,9 +1048,9 @@ static int routing_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct msm_routing_data *data = dev_get_drvdata(component->dev);
+- unsigned int be_id = asoc_rtd_to_cpu(rtd, 0)->id;
++ unsigned int be_id = snd_soc_rtd_to_cpu(rtd, 0)->id;
+ struct session_data *session;
+ int path_type;
+
+diff --git a/sound/soc/qcom/sc7180.c b/sound/soc/qcom/sc7180.c
+index 57c5f35dfcc51c..d1fd40e3f7a9d8 100644
+--- a/sound/soc/qcom/sc7180.c
++++ b/sound/soc/qcom/sc7180.c
+@@ -57,7 +57,7 @@ static int sc7180_headset_init(struct snd_soc_pcm_runtime *rtd)
+ {
+ struct snd_soc_card *card = rtd->card;
+ struct sc7180_snd_data *pdata = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
+ struct snd_jack *jack;
+ int rval;
+@@ -93,7 +93,7 @@ static int sc7180_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+ {
+ struct snd_soc_card *card = rtd->card;
+ struct sc7180_snd_data *pdata = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
+ struct snd_jack *jack;
+ int rval;
+@@ -117,7 +117,7 @@ static int sc7180_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+
+ static int sc7180_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case MI2S_PRIMARY:
+@@ -139,8 +139,8 @@ static int sc7180_snd_startup(struct snd_pcm_substream *substream)
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct sc7180_snd_data *data = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ int pll_id, pll_source, pll_in, pll_out, clk_id, ret;
+
+ if (!strcmp(codec_dai->name, "rt5682-aif1")) {
+@@ -225,7 +225,7 @@ static void sc7180_snd_shutdown(struct snd_pcm_substream *substream)
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct sc7180_snd_data *data = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case MI2S_PRIMARY:
+@@ -249,7 +249,7 @@ static void sc7180_snd_shutdown(struct snd_pcm_substream *substream)
+
+ static int sc7180_adau7002_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case MI2S_PRIMARY:
+@@ -269,8 +269,8 @@ static int sc7180_adau7002_init(struct snd_soc_pcm_runtime *rtd)
+ static int sc7180_adau7002_snd_startup(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ switch (cpu_dai->id) {
+diff --git a/sound/soc/qcom/sc7280.c b/sound/soc/qcom/sc7280.c
+index 43010e4e224202..c23df4c8f34175 100644
+--- a/sound/soc/qcom/sc7280.c
++++ b/sound/soc/qcom/sc7280.c
+@@ -58,8 +58,8 @@ static int sc7280_headset_init(struct snd_soc_pcm_runtime *rtd)
+ {
+ struct snd_soc_card *card = rtd->card;
+ struct sc7280_snd_data *pdata = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
+ struct snd_jack *jack;
+ int rval, i;
+@@ -115,7 +115,7 @@ static int sc7280_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+ {
+ struct snd_soc_card *card = rtd->card;
+ struct sc7280_snd_data *pdata = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
+ struct snd_jack *jack;
+ int rval;
+@@ -137,8 +137,8 @@ static int sc7280_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+
+ static int sc7280_rt5682_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ struct snd_soc_card *card = rtd->card;
+ struct sc7280_snd_data *data = snd_soc_card_get_drvdata(card);
+ int ret;
+@@ -176,7 +176,7 @@ static int sc7280_rt5682_init(struct snd_soc_pcm_runtime *rtd)
+
+ static int sc7280_init(struct snd_soc_pcm_runtime *rtd)
+ {
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case MI2S_PRIMARY:
+@@ -205,7 +205,7 @@ static int sc7280_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
+- const struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ const struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sc7280_snd_data *pdata = snd_soc_card_get_drvdata(rtd->card);
+ struct sdw_stream_runtime *sruntime;
+ int i;
+@@ -236,7 +236,7 @@ static int sc7280_snd_hw_params(struct snd_pcm_substream *substream,
+ static int sc7280_snd_swr_prepare(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- const struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ const struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sc7280_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+ int ret;
+@@ -267,7 +267,7 @@ static int sc7280_snd_swr_prepare(struct snd_pcm_substream *substream)
+ static int sc7280_snd_prepare(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- const struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ const struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case LPASS_CDC_DMA_RX0:
+@@ -287,7 +287,7 @@ static int sc7280_snd_hw_free(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sc7280_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+- const struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ const struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+
+ switch (cpu_dai->id) {
+@@ -313,7 +313,7 @@ static void sc7280_snd_shutdown(struct snd_pcm_substream *substream)
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct sc7280_snd_data *data = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case MI2S_PRIMARY:
+@@ -338,8 +338,8 @@ static int sc7280_snd_startup(struct snd_pcm_substream *substream)
+ unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+ unsigned int codec_dai_fmt = SND_SOC_DAIFMT_CBS_CFS;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ int ret = 0;
+
+ switch (cpu_dai->id) {
+diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c
+index 14d9fea33d16ab..6e5f194bc34b06 100644
+--- a/sound/soc/qcom/sc8280xp.c
++++ b/sound/soc/qcom/sc8280xp.c
+@@ -27,6 +27,25 @@ struct sc8280xp_snd_data {
+ static int sc8280xp_snd_init(struct snd_soc_pcm_runtime *rtd)
+ {
+ struct sc8280xp_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
++ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_card *card = rtd->card;
++
++ switch (cpu_dai->id) {
++ case WSA_CODEC_DMA_RX_0:
++ case WSA_CODEC_DMA_RX_1:
++ /*
++ * Set limit of -3 dB on Digital Volume and 0 dB on PA Volume
++ * to reduce the risk of speaker damage until we have active
++ * speaker protection in place.
++ */
++ snd_soc_limit_volume(card, "WSA_RX0 Digital Volume", 81);
++ snd_soc_limit_volume(card, "WSA_RX1 Digital Volume", 81);
++ snd_soc_limit_volume(card, "SpkrLeft PA Volume", 17);
++ snd_soc_limit_volume(card, "SpkrRight PA Volume", 17);
++ break;
++ default:
++ break;
++ }
+
+ return qcom_snd_wcd_jack_setup(rtd, &data->jack, &data->jack_setup);
+ }
+@@ -34,7 +53,7 @@ static int sc8280xp_snd_init(struct snd_soc_pcm_runtime *rtd)
+ static int sc8280xp_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+@@ -62,7 +81,7 @@ static int sc8280xp_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sc8280xp_snd_data *pdata = snd_soc_card_get_drvdata(rtd->card);
+
+ return qcom_snd_sdw_hw_params(substream, params, &pdata->sruntime[cpu_dai->id]);
+@@ -71,7 +90,7 @@ static int sc8280xp_snd_hw_params(struct snd_pcm_substream *substream,
+ static int sc8280xp_snd_prepare(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sc8280xp_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+
+@@ -83,7 +102,7 @@ static int sc8280xp_snd_hw_free(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sc8280xp_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+
+ return qcom_snd_sdw_hw_free(substream, sruntime,
+diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
+index 29d23fe5dfa2d5..25b964dea6c56c 100644
+--- a/sound/soc/qcom/sdm845.c
++++ b/sound/soc/qcom/sdm845.c
+@@ -58,8 +58,8 @@ static unsigned int tdm_slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+ static int sdm845_slim_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai;
+ struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(rtd->card);
+ u32 rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
+@@ -98,8 +98,8 @@ static int sdm845_slim_snd_hw_params(struct snd_pcm_substream *substream,
+ static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai;
+ int ret = 0, j;
+ int channels, slot_width;
+@@ -183,9 +183,9 @@ static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
+ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ int ret = 0;
+
+ switch (cpu_dai->id) {
+@@ -233,8 +233,8 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
+ {
+ struct snd_soc_component *component;
+ struct snd_soc_card *card = rtd->card;
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai_link *link = rtd->dai_link;
+ struct snd_jack *jack;
+@@ -331,11 +331,11 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
+ {
+ unsigned int fmt = SND_SOC_DAIFMT_BP_FP;
+ unsigned int codec_dai_fmt = SND_SOC_DAIFMT_BC_FC;
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_card *card = rtd->card;
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+ int j;
+ int ret;
+
+@@ -421,10 +421,10 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
+
+ static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_card *card = rtd->card;
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case PRIMARY_MI2S_RX:
+@@ -467,9 +467,9 @@ static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
+
+ static int sdm845_snd_prepare(struct snd_pcm_substream *substream)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+ int ret;
+
+@@ -506,9 +506,9 @@ static int sdm845_snd_prepare(struct snd_pcm_substream *substream)
+
+ static int sdm845_snd_hw_free(struct snd_pcm_substream *substream)
+ {
+- struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct sdm845_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+
+ if (sruntime && data->stream_prepared[cpu_dai->id]) {
+diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
+index 1a41419c7eb8f7..ce89c0a33ef058 100644
+--- a/sound/soc/qcom/sdw.c
++++ b/sound/soc/qcom/sdw.c
+@@ -12,7 +12,7 @@ int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+ bool *stream_prepared)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ int ret;
+
+ if (!sruntime)
+@@ -64,7 +64,7 @@ int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sdw_stream_runtime *sruntime;
+ int i;
+
+@@ -93,7 +93,7 @@ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+ struct sdw_stream_runtime *sruntime, bool *stream_prepared)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case WSA_CODEC_DMA_RX_0:
+diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c
+index 9626a9ef78c233..6558bf2e14e83d 100644
+--- a/sound/soc/qcom/sm8250.c
++++ b/sound/soc/qcom/sm8250.c
+@@ -51,8 +51,8 @@ static int sm8250_snd_startup(struct snd_pcm_substream *substream)
+ unsigned int fmt = SND_SOC_DAIFMT_BP_FP;
+ unsigned int codec_dai_fmt = SND_SOC_DAIFMT_BC_FC;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+- struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0);
+
+ switch (cpu_dai->id) {
+ case TERTIARY_MI2S_RX:
+@@ -73,7 +73,7 @@ static int sm8250_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sm8250_snd_data *pdata = snd_soc_card_get_drvdata(rtd->card);
+
+ return qcom_snd_sdw_hw_params(substream, params, &pdata->sruntime[cpu_dai->id]);
+@@ -82,7 +82,7 @@ static int sm8250_snd_hw_params(struct snd_pcm_substream *substream,
+ static int sm8250_snd_prepare(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sm8250_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+
+@@ -94,7 +94,7 @@ static int sm8250_snd_hw_free(struct snd_pcm_substream *substream)
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sm8250_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
++ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id];
+
+ return qcom_snd_sdw_hw_free(substream, sruntime,
+diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c
+index 80c9cf2f254a76..553165f11d3069 100644
+--- a/sound/soc/qcom/storm.c
++++ b/sound/soc/qcom/storm.c
+@@ -19,7 +19,7 @@
+ static int storm_ops_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+- struct snd_soc_pcm_runtime *soc_runtime = asoc_substream_to_rtd(substream);
++ struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_card *card = soc_runtime->card;
+ snd_pcm_format_t format = params_format(params);
+ unsigned int rate = params_rate(params);
+@@ -39,7 +39,7 @@ static int storm_ops_hw_params(struct snd_pcm_substream *substream,
+ */
+ sysclk_freq = rate * bitwidth * 2 * STORM_SYSCLK_MULT;
+
+- ret = snd_soc_dai_set_sysclk(asoc_rtd_to_cpu(soc_runtime, 0), 0, sysclk_freq, 0);
++ ret = snd_soc_dai_set_sysclk(snd_soc_rtd_to_cpu(soc_runtime, 0), 0, sysclk_freq, 0);
+ if (ret) {
+ dev_err(card->dev, "error setting sysclk to %u: %d\n",
+ sysclk_freq, ret);
+diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
+index d3700f3c98e654..e6a6eabc47e5bb 100644
+--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
+@@ -27,8 +27,6 @@
+ #define DEFAULT_MCLK_FS 256
+ #define CH_GRP_MAX 4 /* The max channel 8 / 2 */
+ #define MULTIPLEX_CH_MAX 10
+-#define CLK_PPM_MIN -1000
+-#define CLK_PPM_MAX 1000
+
+ #define TRCM_TXRX 0
+ #define TRCM_TX 1
+@@ -55,20 +53,6 @@ struct rk_i2s_tdm_dev {
+ struct clk *hclk;
+ struct clk *mclk_tx;
+ struct clk *mclk_rx;
+- /* The mclk_tx_src is parent of mclk_tx */
+- struct clk *mclk_tx_src;
+- /* The mclk_rx_src is parent of mclk_rx */
+- struct clk *mclk_rx_src;
+- /*
+- * The mclk_root0 and mclk_root1 are root parent and supplies for
+- * the different FS.
+- *
+- * e.g:
+- * mclk_root0 is VPLL0, used for FS=48000Hz
+- * mclk_root1 is VPLL1, used for FS=44100Hz
+- */
+- struct clk *mclk_root0;
+- struct clk *mclk_root1;
+ struct regmap *regmap;
+ struct regmap *grf;
+ struct snd_dmaengine_dai_dma_data capture_dma_data;
+@@ -78,19 +62,11 @@ struct rk_i2s_tdm_dev {
+ struct rk_i2s_soc_data *soc_data;
+ bool is_master_mode;
+ bool io_multiplex;
+- bool mclk_calibrate;
+ bool tdm_mode;
+- unsigned int mclk_rx_freq;
+- unsigned int mclk_tx_freq;
+- unsigned int mclk_root0_freq;
+- unsigned int mclk_root1_freq;
+- unsigned int mclk_root0_initial_freq;
+- unsigned int mclk_root1_initial_freq;
+ unsigned int frame_width;
+ unsigned int clk_trcm;
+ unsigned int i2s_sdis[CH_GRP_MAX];
+ unsigned int i2s_sdos[CH_GRP_MAX];
+- int clk_ppm;
+ int refcount;
+ spinlock_t lock; /* xfer lock */
+ bool has_playback;
+@@ -116,12 +92,6 @@ static void i2s_tdm_disable_unprepare_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
+ {
+ clk_disable_unprepare(i2s_tdm->mclk_tx);
+ clk_disable_unprepare(i2s_tdm->mclk_rx);
+- if (i2s_tdm->mclk_calibrate) {
+- clk_disable_unprepare(i2s_tdm->mclk_tx_src);
+- clk_disable_unprepare(i2s_tdm->mclk_rx_src);
+- clk_disable_unprepare(i2s_tdm->mclk_root0);
+- clk_disable_unprepare(i2s_tdm->mclk_root1);
+- }
+ }
+
+ /**
+@@ -144,29 +114,9 @@ static int i2s_tdm_prepare_enable_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
+ ret = clk_prepare_enable(i2s_tdm->mclk_rx);
+ if (ret)
+ goto err_mclk_rx;
+- if (i2s_tdm->mclk_calibrate) {
+- ret = clk_prepare_enable(i2s_tdm->mclk_tx_src);
+- if (ret)
+- goto err_mclk_rx;
+- ret = clk_prepare_enable(i2s_tdm->mclk_rx_src);
+- if (ret)
+- goto err_mclk_rx_src;
+- ret = clk_prepare_enable(i2s_tdm->mclk_root0);
+- if (ret)
+- goto err_mclk_root0;
+- ret = clk_prepare_enable(i2s_tdm->mclk_root1);
+- if (ret)
+- goto err_mclk_root1;
+- }
+
+ return 0;
+
+-err_mclk_root1:
+- clk_disable_unprepare(i2s_tdm->mclk_root0);
+-err_mclk_root0:
+- clk_disable_unprepare(i2s_tdm->mclk_rx_src);
+-err_mclk_rx_src:
+- clk_disable_unprepare(i2s_tdm->mclk_tx_src);
+ err_mclk_rx:
+ clk_disable_unprepare(i2s_tdm->mclk_tx);
+ err_mclk_tx:
+@@ -566,159 +516,6 @@ static void rockchip_i2s_tdm_xfer_resume(struct snd_pcm_substream *substream,
+ I2S_XFER_RXS_START);
+ }
+
+-static int rockchip_i2s_tdm_clk_set_rate(struct rk_i2s_tdm_dev *i2s_tdm,
+- struct clk *clk, unsigned long rate,
+- int ppm)
+-{
+- unsigned long rate_target;
+- int delta, ret;
+-
+- if (ppm == i2s_tdm->clk_ppm)
+- return 0;
+-
+- if (ppm < 0)
+- delta = -1;
+- else
+- delta = 1;
+-
+- delta *= (int)div64_u64((u64)rate * (u64)abs(ppm) + 500000,
+- 1000000);
+-
+- rate_target = rate + delta;
+-
+- if (!rate_target)
+- return -EINVAL;
+-
+- ret = clk_set_rate(clk, rate_target);
+- if (ret)
+- return ret;
+-
+- i2s_tdm->clk_ppm = ppm;
+-
+- return 0;
+-}
+-
+-static int rockchip_i2s_tdm_calibrate_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
+- struct snd_pcm_substream *substream,
+- unsigned int lrck_freq)
+-{
+- struct clk *mclk_root;
+- struct clk *mclk_parent;
+- unsigned int mclk_root_freq;
+- unsigned int mclk_root_initial_freq;
+- unsigned int mclk_parent_freq;
+- unsigned int div, delta;
+- u64 ppm;
+- int ret;
+-
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- mclk_parent = i2s_tdm->mclk_tx_src;
+- else
+- mclk_parent = i2s_tdm->mclk_rx_src;
+-
+- switch (lrck_freq) {
+- case 8000:
+- case 16000:
+- case 24000:
+- case 32000:
+- case 48000:
+- case 64000:
+- case 96000:
+- case 192000:
+- mclk_root = i2s_tdm->mclk_root0;
+- mclk_root_freq = i2s_tdm->mclk_root0_freq;
+- mclk_root_initial_freq = i2s_tdm->mclk_root0_initial_freq;
+- mclk_parent_freq = DEFAULT_MCLK_FS * 192000;
+- break;
+- case 11025:
+- case 22050:
+- case 44100:
+- case 88200:
+- case 176400:
+- mclk_root = i2s_tdm->mclk_root1;
+- mclk_root_freq = i2s_tdm->mclk_root1_freq;
+- mclk_root_initial_freq = i2s_tdm->mclk_root1_initial_freq;
+- mclk_parent_freq = DEFAULT_MCLK_FS * 176400;
+- break;
+- default:
+- dev_err(i2s_tdm->dev, "Invalid LRCK frequency: %u Hz\n",
+- lrck_freq);
+- return -EINVAL;
+- }
+-
+- ret = clk_set_parent(mclk_parent, mclk_root);
+- if (ret)
+- return ret;
+-
+- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, mclk_root,
+- mclk_root_freq, 0);
+- if (ret)
+- return ret;
+-
+- delta = abs(mclk_root_freq % mclk_parent_freq - mclk_parent_freq);
+- ppm = div64_u64((uint64_t)delta * 1000000, (uint64_t)mclk_root_freq);
+-
+- if (ppm) {
+- div = DIV_ROUND_CLOSEST(mclk_root_initial_freq, mclk_parent_freq);
+- if (!div)
+- return -EINVAL;
+-
+- mclk_root_freq = mclk_parent_freq * round_up(div, 2);
+-
+- ret = clk_set_rate(mclk_root, mclk_root_freq);
+- if (ret)
+- return ret;
+-
+- i2s_tdm->mclk_root0_freq = clk_get_rate(i2s_tdm->mclk_root0);
+- i2s_tdm->mclk_root1_freq = clk_get_rate(i2s_tdm->mclk_root1);
+- }
+-
+- return clk_set_rate(mclk_parent, mclk_parent_freq);
+-}
+-
+-static int rockchip_i2s_tdm_set_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
+- struct snd_pcm_substream *substream,
+- struct clk **mclk)
+-{
+- unsigned int mclk_freq;
+- int ret;
+-
+- if (i2s_tdm->clk_trcm) {
+- if (i2s_tdm->mclk_tx_freq != i2s_tdm->mclk_rx_freq) {
+- dev_err(i2s_tdm->dev,
+- "clk_trcm, tx: %d and rx: %d should be the same\n",
+- i2s_tdm->mclk_tx_freq,
+- i2s_tdm->mclk_rx_freq);
+- return -EINVAL;
+- }
+-
+- ret = clk_set_rate(i2s_tdm->mclk_tx, i2s_tdm->mclk_tx_freq);
+- if (ret)
+- return ret;
+-
+- ret = clk_set_rate(i2s_tdm->mclk_rx, i2s_tdm->mclk_rx_freq);
+- if (ret)
+- return ret;
+-
+- /* mclk_rx is also ok. */
+- *mclk = i2s_tdm->mclk_tx;
+- } else {
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+- *mclk = i2s_tdm->mclk_tx;
+- mclk_freq = i2s_tdm->mclk_tx_freq;
+- } else {
+- *mclk = i2s_tdm->mclk_rx;
+- mclk_freq = i2s_tdm->mclk_rx_freq;
+- }
+-
+- ret = clk_set_rate(*mclk, mclk_freq);
+- if (ret)
+- return ret;
+- }
+-
+- return 0;
+-}
+-
+ static int rockchip_i2s_ch_to_io(unsigned int ch, bool substream_capture)
+ {
+ if (substream_capture) {
+@@ -855,19 +652,26 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+ {
+ struct rk_i2s_tdm_dev *i2s_tdm = to_info(dai);
+- struct clk *mclk;
+- int ret = 0;
+ unsigned int val = 0;
+ unsigned int mclk_rate, bclk_rate, div_bclk = 4, div_lrck = 64;
++ int err;
+
+ if (i2s_tdm->is_master_mode) {
+- if (i2s_tdm->mclk_calibrate)
+- rockchip_i2s_tdm_calibrate_mclk(i2s_tdm, substream,
+- params_rate(params));
++ struct clk *mclk;
++
++ if (i2s_tdm->clk_trcm == TRCM_TX) {
++ mclk = i2s_tdm->mclk_tx;
++ } else if (i2s_tdm->clk_trcm == TRCM_RX) {
++ mclk = i2s_tdm->mclk_rx;
++ } else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ mclk = i2s_tdm->mclk_tx;
++ } else {
++ mclk = i2s_tdm->mclk_rx;
++ }
+
+- ret = rockchip_i2s_tdm_set_mclk(i2s_tdm, substream, &mclk);
+- if (ret)
+- return ret;
++ err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
++ if (err)
++ return err;
+
+ mclk_rate = clk_get_rate(mclk);
+ bclk_rate = i2s_tdm->frame_width * params_rate(params);
+@@ -975,96 +779,6 @@ static int rockchip_i2s_tdm_trigger(struct snd_pcm_substream *substream,
+ return 0;
+ }
+
+-static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
+- unsigned int freq, int dir)
+-{
+- struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
+-
+- /* Put set mclk rate into rockchip_i2s_tdm_set_mclk() */
+- if (i2s_tdm->clk_trcm) {
+- i2s_tdm->mclk_tx_freq = freq;
+- i2s_tdm->mclk_rx_freq = freq;
+- } else {
+- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+- i2s_tdm->mclk_tx_freq = freq;
+- else
+- i2s_tdm->mclk_rx_freq = freq;
+- }
+-
+- dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
+- stream ? "rx" : "tx", freq);
+-
+- return 0;
+-}
+-
+-static int rockchip_i2s_tdm_clk_compensation_info(struct snd_kcontrol *kcontrol,
+- struct snd_ctl_elem_info *uinfo)
+-{
+- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+- uinfo->count = 1;
+- uinfo->value.integer.min = CLK_PPM_MIN;
+- uinfo->value.integer.max = CLK_PPM_MAX;
+- uinfo->value.integer.step = 1;
+-
+- return 0;
+-}
+-
+-static int rockchip_i2s_tdm_clk_compensation_get(struct snd_kcontrol *kcontrol,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+- struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
+-
+- ucontrol->value.integer.value[0] = i2s_tdm->clk_ppm;
+-
+- return 0;
+-}
+-
+-static int rockchip_i2s_tdm_clk_compensation_put(struct snd_kcontrol *kcontrol,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+- struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
+- int ret = 0, ppm = 0;
+- int changed = 0;
+- unsigned long old_rate;
+-
+- if (ucontrol->value.integer.value[0] < CLK_PPM_MIN ||
+- ucontrol->value.integer.value[0] > CLK_PPM_MAX)
+- return -EINVAL;
+-
+- ppm = ucontrol->value.integer.value[0];
+-
+- old_rate = clk_get_rate(i2s_tdm->mclk_root0);
+- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root0,
+- i2s_tdm->mclk_root0_freq, ppm);
+- if (ret)
+- return ret;
+- if (old_rate != clk_get_rate(i2s_tdm->mclk_root0))
+- changed = 1;
+-
+- if (clk_is_match(i2s_tdm->mclk_root0, i2s_tdm->mclk_root1))
+- return changed;
+-
+- old_rate = clk_get_rate(i2s_tdm->mclk_root1);
+- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root1,
+- i2s_tdm->mclk_root1_freq, ppm);
+- if (ret)
+- return ret;
+- if (old_rate != clk_get_rate(i2s_tdm->mclk_root1))
+- changed = 1;
+-
+- return changed;
+-}
+-
+-static struct snd_kcontrol_new rockchip_i2s_tdm_compensation_control = {
+- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+- .name = "PCM Clock Compensation in PPM",
+- .info = rockchip_i2s_tdm_clk_compensation_info,
+- .get = rockchip_i2s_tdm_clk_compensation_get,
+- .put = rockchip_i2s_tdm_clk_compensation_put,
+-};
+-
+ static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
+ {
+ struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
+@@ -1074,9 +788,6 @@ static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
+ if (i2s_tdm->has_playback)
+ snd_soc_dai_dma_data_set_playback(dai, &i2s_tdm->playback_dma_data);
+
+- if (i2s_tdm->mclk_calibrate)
+- snd_soc_add_dai_controls(dai, &rockchip_i2s_tdm_compensation_control, 1);
+-
+ return 0;
+ }
+
+@@ -1117,7 +828,6 @@ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
+ .probe = rockchip_i2s_tdm_dai_probe,
+ .hw_params = rockchip_i2s_tdm_hw_params,
+ .set_bclk_ratio = rockchip_i2s_tdm_set_bclk_ratio,
+- .set_sysclk = rockchip_i2s_tdm_set_sysclk,
+ .set_fmt = rockchip_i2s_tdm_set_fmt,
+ .set_tdm_slot = rockchip_dai_tdm_slot,
+ .trigger = rockchip_i2s_tdm_trigger,
+@@ -1446,35 +1156,6 @@ static void rockchip_i2s_tdm_path_config(struct rk_i2s_tdm_dev *i2s_tdm,
+ rockchip_i2s_tdm_tx_path_config(i2s_tdm, num);
+ }
+
+-static int rockchip_i2s_tdm_get_calibrate_mclks(struct rk_i2s_tdm_dev *i2s_tdm)
+-{
+- int num_mclks = 0;
+-
+- i2s_tdm->mclk_tx_src = devm_clk_get(i2s_tdm->dev, "mclk_tx_src");
+- if (!IS_ERR(i2s_tdm->mclk_tx_src))
+- num_mclks++;
+-
+- i2s_tdm->mclk_rx_src = devm_clk_get(i2s_tdm->dev, "mclk_rx_src");
+- if (!IS_ERR(i2s_tdm->mclk_rx_src))
+- num_mclks++;
+-
+- i2s_tdm->mclk_root0 = devm_clk_get(i2s_tdm->dev, "mclk_root0");
+- if (!IS_ERR(i2s_tdm->mclk_root0))
+- num_mclks++;
+-
+- i2s_tdm->mclk_root1 = devm_clk_get(i2s_tdm->dev, "mclk_root1");
+- if (!IS_ERR(i2s_tdm->mclk_root1))
+- num_mclks++;
+-
+- if (num_mclks < 4 && num_mclks != 0)
+- return -ENOENT;
+-
+- if (num_mclks == 4)
+- i2s_tdm->mclk_calibrate = 1;
+-
+- return 0;
+-}
+-
+ static int rockchip_i2s_tdm_path_prepare(struct rk_i2s_tdm_dev *i2s_tdm,
+ struct device_node *np,
+ bool is_rx_path)
+@@ -1618,11 +1299,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
+ i2s_tdm->io_multiplex =
+ of_property_read_bool(node, "rockchip,io-multiplex");
+
+- ret = rockchip_i2s_tdm_get_calibrate_mclks(i2s_tdm);
+- if (ret)
+- return dev_err_probe(i2s_tdm->dev, ret,
+- "mclk-calibrate clocks missing");
+-
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(regs)) {
+ return dev_err_probe(i2s_tdm->dev, PTR_ERR(regs),
+@@ -1675,13 +1351,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
+ goto err_disable_hclk;
+ }
+
+- if (i2s_tdm->mclk_calibrate) {
+- i2s_tdm->mclk_root0_initial_freq = clk_get_rate(i2s_tdm->mclk_root0);
+- i2s_tdm->mclk_root1_initial_freq = clk_get_rate(i2s_tdm->mclk_root1);
+- i2s_tdm->mclk_root0_freq = i2s_tdm->mclk_root0_initial_freq;
+- i2s_tdm->mclk_root1_freq = i2s_tdm->mclk_root1_initial_freq;
+- }
+-
+ pm_runtime_enable(&pdev->dev);
+
+ regmap_update_bits(i2s_tdm->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
+diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c
+index fe79eb90e1e5cf..1588b93cc35d01 100644
+--- a/sound/soc/sh/rz-ssi.c
++++ b/sound/soc/sh/rz-ssi.c
+@@ -1016,7 +1016,7 @@ static int rz_ssi_probe(struct platform_device *pdev)
+ dev_name(&pdev->dev), ssi);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+- "irq request error (dma_tx)\n");
++ "irq request error (dma_rt)\n");
+ } else {
+ if (ssi->irq_tx < 0)
+ return ssi->irq_tx;
+diff --git a/sound/soc/soc-card.c b/sound/soc/soc-card.c
+index 285ab4c9c71683..8a2f163da6bc9e 100644
+--- a/sound/soc/soc-card.c
++++ b/sound/soc/soc-card.c
+@@ -5,6 +5,9 @@
+ // Copyright (C) 2019 Renesas Electronics Corp.
+ // Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ //
++
++#include <linux/lockdep.h>
++#include <linux/rwsem.h>
+ #include <sound/soc.h>
+ #include <sound/jack.h>
+
+@@ -26,12 +29,15 @@ static inline int _soc_card_ret(struct snd_soc_card *card,
+ return ret;
+ }
+
+-struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+- const char *name)
++struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
++ const char *name)
+ {
+ struct snd_card *card = soc_card->snd_card;
+ struct snd_kcontrol *kctl;
+
++ /* must be held read or write */
++ lockdep_assert_held(&card->controls_rwsem);
++
+ if (unlikely(!name))
+ return NULL;
+
+@@ -40,6 +46,20 @@ struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+ return kctl;
+ return NULL;
+ }
++EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol_locked);
++
++struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
++ const char *name)
++{
++ struct snd_card *card = soc_card->snd_card;
++ struct snd_kcontrol *kctl;
++
++ down_read(&card->controls_rwsem);
++ kctl = snd_soc_card_get_kcontrol_locked(soc_card, name);
++ up_read(&card->controls_rwsem);
++
++ return kctl;
++}
+ EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol);
+
+ static int jack_new(struct snd_soc_card *card, const char *id, int type,
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 9de98c01d81517..e65fe3a7c3e42c 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1105,6 +1105,9 @@ static int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
+ if (!snd_soc_is_matching_component(platform, component))
+ continue;
+
++ if (snd_soc_component_is_dummy(component) && component->num_dai)
++ continue;
++
+ snd_soc_rtd_add_component(rtd, component);
+ }
+ }
+diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
+index 3f33f0630ad8aa..9a828e55c4f9e7 100644
+--- a/sound/soc/soc-dai.c
++++ b/sound/soc/soc-dai.c
+@@ -658,6 +658,10 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ ret = soc_dai_trigger(dai, substream, cmd);
+ if (ret < 0)
+ break;
++
++ if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 0, substream->stream);
++
+ soc_dai_mark_push(dai, substream, trigger);
+ }
+ break;
+@@ -668,6 +672,9 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ if (rollback && !soc_dai_mark_match(dai, substream, trigger))
+ continue;
+
++ if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 1, substream->stream);
++
+ r = soc_dai_trigger(dai, substream, cmd);
+ if (r < 0)
+ ret = r; /* use last ret */
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 312e5557983156..7729f8f4d5e610 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3670,7 +3670,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
+ dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD);
+ break;
+ case snd_soc_dapm_clock_supply:
+- w->clk = devm_clk_get(dapm->dev, w->name);
++ w->clk = devm_clk_get(dapm->dev, widget->name);
+ if (IS_ERR(w->clk)) {
+ ret = PTR_ERR(w->clk);
+ goto request_failed;
+@@ -4018,6 +4018,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
+
+ case SND_SOC_DAPM_POST_PMD:
+ kfree(substream->runtime);
++ substream->runtime = NULL;
+ break;
+
+ default:
+diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
+index cad222eb9a293a..30bb1b018aa89a 100644
+--- a/sound/soc/soc-generic-dmaengine-pcm.c
++++ b/sound/soc/soc-generic-dmaengine-pcm.c
+@@ -318,6 +318,12 @@ static int dmaengine_copy(struct snd_soc_component *component,
+ return 0;
+ }
+
++static int dmaengine_pcm_sync_stop(struct snd_soc_component *component,
++ struct snd_pcm_substream *substream)
++{
++ return snd_dmaengine_pcm_sync_stop(substream);
++}
++
+ static const struct snd_soc_component_driver dmaengine_pcm_component = {
+ .name = SND_DMAENGINE_PCM_DRV_NAME,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
+@@ -327,6 +333,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component = {
+ .trigger = dmaengine_pcm_trigger,
+ .pointer = dmaengine_pcm_pointer,
+ .pcm_construct = dmaengine_pcm_new,
++ .sync_stop = dmaengine_pcm_sync_stop,
+ };
+
+ static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
+@@ -339,6 +346,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
+ .pointer = dmaengine_pcm_pointer,
+ .copy = dmaengine_copy,
+ .pcm_construct = dmaengine_pcm_new,
++ .sync_stop = dmaengine_pcm_sync_stop,
+ };
+
+ static const char * const dmaengine_pcm_dma_channel_names[] = {
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index 55b009d3c68154..b27e89ff6a1673 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -263,7 +263,7 @@ int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
+ int max = mc->max;
+ int min = mc->min;
+ int sign_bit = mc->sign_bit;
+- unsigned int mask = (1 << fls(max)) - 1;
++ unsigned int mask = (1ULL << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ int val;
+ int ret;
+@@ -661,7 +661,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card,
+ kctl = snd_soc_card_get_kcontrol(card, name);
+ if (kctl) {
+ struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
+- if (max <= mc->max) {
++ if (max <= mc->max - mc->min) {
+ mc->platform_max = max;
+ ret = 0;
+ }
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 54704250c0a2c4..511446a30c057b 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -698,14 +698,12 @@ static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd,
+
+ if (!rollback) {
+ snd_soc_runtime_deactivate(rtd, substream->stream);
+- /* clear the corresponding DAIs parameters when going to be inactive */
+- for_each_rtd_dais(rtd, i, dai) {
+- if (snd_soc_dai_active(dai) == 0)
+- soc_pcm_set_dai_params(dai, NULL);
+
+- if (snd_soc_dai_stream_active(dai, substream->stream) == 0)
+- snd_soc_dai_digital_mute(dai, 1, substream->stream);
+- }
++ /* Make sure DAI parameters cleared if the DAI becomes inactive */
++ for_each_rtd_dais(rtd, i, dai)
++ if (snd_soc_dai_active(dai) == 0 &&
++ (dai->rate || dai->channels || dai->sample_bits))
++ soc_pcm_set_dai_params(dai, NULL);
+ }
+
+ for_each_rtd_dais(rtd, i, dai)
+@@ -898,8 +896,10 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
+ snd_soc_dapm_stream_event(rtd, substream->stream,
+ SND_SOC_DAPM_STREAM_START);
+
+- for_each_rtd_dais(rtd, i, dai)
+- snd_soc_dai_digital_mute(dai, 0, substream->stream);
++ for_each_rtd_dais(rtd, i, dai) {
++ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 0, substream->stream);
++ }
+
+ out:
+ return soc_pcm_ret(rtd, ret);
+@@ -936,6 +936,17 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
+
+ snd_soc_dpcm_mutex_assert_held(rtd);
+
++ /* clear the corresponding DAIs parameters when going to be inactive */
++ for_each_rtd_dais(rtd, i, dai) {
++ if (snd_soc_dai_active(dai) == 1)
++ soc_pcm_set_dai_params(dai, NULL);
++
++ if (snd_soc_dai_stream_active(dai, substream->stream) == 1) {
++ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 1, substream->stream);
++ }
++ }
++
+ /* run the stream event */
+ snd_soc_dapm_stream_stop(rtd, substream->stream);
+
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 2362c282ec8b36..a643ef654b9d74 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -851,6 +851,8 @@ static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum *
+ se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]);
+ }
+
++ se->items = le32_to_cpu(ec->items);
++ se->values = (const unsigned int *)se->dobj.control.dvalues;
+ return 0;
+ }
+
+@@ -1021,6 +1023,7 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
+ struct snd_soc_tplg_hdr *hdr)
+ {
+ struct snd_soc_dapm_context *dapm = &tplg->comp->dapm;
++ const size_t maxlen = SNDRV_CTL_ELEM_ID_NAME_MAXLEN;
+ struct snd_soc_tplg_dapm_graph_elem *elem;
+ struct snd_soc_dapm_route *route;
+ int count, i;
+@@ -1044,31 +1047,27 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
+ tplg->pos += sizeof(struct snd_soc_tplg_dapm_graph_elem);
+
+ /* validate routes */
+- if (strnlen(elem->source, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+- ret = -EINVAL;
+- break;
+- }
+- if (strnlen(elem->sink, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
++ if ((strnlen(elem->source, maxlen) == maxlen) ||
++ (strnlen(elem->sink, maxlen) == maxlen) ||
++ (strnlen(elem->control, maxlen) == maxlen)) {
+ ret = -EINVAL;
+ break;
+ }
+- if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+- SNDRV_CTL_ELEM_ID_NAME_MAXLEN) {
+- ret = -EINVAL;
++
++ route->source = devm_kstrdup(tplg->dev, elem->source, GFP_KERNEL);
++ route->sink = devm_kstrdup(tplg->dev, elem->sink, GFP_KERNEL);
++ if (!route->source || !route->sink) {
++ ret = -ENOMEM;
+ break;
+ }
+
+- route->source = elem->source;
+- route->sink = elem->sink;
+-
+- /* set to NULL atm for tplg users */
+- route->connected = NULL;
+- if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0)
+- route->control = NULL;
+- else
+- route->control = elem->control;
++ if (strnlen(elem->control, maxlen) != 0) {
++ route->control = devm_kstrdup(tplg->dev, elem->control, GFP_KERNEL);
++ if (!route->control) {
++ ret = -ENOMEM;
++ break;
++ }
++ }
+
+ /* add route dobj to dobj_list */
+ route->dobj.type = SND_SOC_DOBJ_GRAPH;
+diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
+index 9c746e4edef71c..941ba0639a4e6d 100644
+--- a/sound/soc/soc-utils.c
++++ b/sound/soc/soc-utils.c
+@@ -225,12 +225,12 @@ int snd_soc_component_is_dummy(struct snd_soc_component *component)
+ (component->driver == &dummy_codec));
+ }
+
+-struct snd_soc_dai_link_component asoc_dummy_dlc = {
++struct snd_soc_dai_link_component snd_soc_dummy_dlc = {
+ .of_node = NULL,
+ .dai_name = "snd-soc-dummy-dai",
+ .name = "snd-soc-dummy",
+ };
+-EXPORT_SYMBOL_GPL(asoc_dummy_dlc);
++EXPORT_SYMBOL_GPL(snd_soc_dummy_dlc);
+
+ static int snd_soc_dummy_probe(struct platform_device *pdev)
+ {
+diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
+index 19a801908b56d4..bfed848de77c8a 100644
+--- a/sound/soc/sof/amd/acp.c
++++ b/sound/soc/sof/amd/acp.c
+@@ -28,11 +28,10 @@ MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
+
+ const struct dmi_system_id acp_sof_quirk_table[] = {
+ {
+- /* Valve Jupiter device */
++ /* Steam Deck OLED device */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
+- DMI_MATCH(DMI_PRODUCT_FAMILY, "Sephiroth"),
+ },
+ .driver_data = (void *)SECURED_FIRMWARE,
+ },
+@@ -381,6 +380,7 @@ static int acp_power_on(struct snd_sof_dev *sdev)
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
+ unsigned int base = desc->pgfsm_base;
+ unsigned int val;
++ unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask;
+ int ret;
+
+ val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
+@@ -388,9 +388,23 @@ static int acp_power_on(struct snd_sof_dev *sdev)
+ if (val == ACP_POWERED_ON)
+ return 0;
+
+- if (val & ACP_PGFSM_STATUS_MASK)
++ switch (desc->rev) {
++ case 3:
++ case 5:
++ acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK;
++ acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK;
++ break;
++ case 6:
++ acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK;
++ acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (val & acp_pgfsm_status_mask)
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
+- ACP_PGFSM_CNTL_POWER_ON_MASK);
++ acp_pgfsm_cntl_mask);
+
+ ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
+ !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
+@@ -522,6 +536,10 @@ int amd_sof_acp_probe(struct snd_sof_dev *sdev)
+ goto unregister_dev;
+ }
+
++ ret = acp_init(sdev);
++ if (ret < 0)
++ goto free_smn_dev;
++
+ sdev->ipc_irq = pci->irq;
+ ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread,
+ IRQF_SHARED, "AudioDSP", sdev);
+@@ -531,10 +549,6 @@ int amd_sof_acp_probe(struct snd_sof_dev *sdev)
+ goto free_smn_dev;
+ }
+
+- ret = acp_init(sdev);
+- if (ret < 0)
+- goto free_ipc_irq;
+-
+ sdev->dsp_box.offset = 0;
+ sdev->dsp_box.size = BOX_SIZE_512;
+
+@@ -547,17 +561,27 @@ int amd_sof_acp_probe(struct snd_sof_dev *sdev)
+ adata->signed_fw_image = false;
+ dmi_id = dmi_first_match(acp_sof_quirk_table);
+ if (dmi_id && dmi_id->driver_data) {
+- adata->fw_code_bin = kasprintf(GFP_KERNEL, "%s/sof-%s-code.bin",
+- plat_data->fw_filename_prefix,
+- chip->name);
+- adata->fw_data_bin = kasprintf(GFP_KERNEL, "%s/sof-%s-data.bin",
+- plat_data->fw_filename_prefix,
+- chip->name);
+- adata->signed_fw_image = dmi_id->driver_data;
++ adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
++ "%s/sof-%s-code.bin",
++ plat_data->fw_filename_prefix,
++ chip->name);
++ if (!adata->fw_code_bin) {
++ ret = -ENOMEM;
++ goto free_ipc_irq;
++ }
+
+- dev_dbg(sdev->dev, "fw_code_bin:%s, fw_data_bin:%s\n", adata->fw_code_bin,
+- adata->fw_data_bin);
++ adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
++ "%s/sof-%s-data.bin",
++ plat_data->fw_filename_prefix,
++ chip->name);
++ if (!adata->fw_data_bin) {
++ ret = -ENOMEM;
++ goto free_ipc_irq;
++ }
++
++ adata->signed_fw_image = dmi_id->driver_data;
+ }
++
+ adata->enable_fw_debug = enable_fw_debug;
+ acp_memory_init(sdev);
+
+diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
+index 4dcceb7647694a..133abed74f0153 100644
+--- a/sound/soc/sof/amd/acp.h
++++ b/sound/soc/sof/amd/acp.h
+@@ -25,8 +25,11 @@
+ #define ACP_REG_POLL_TIMEOUT_US 2000
+ #define ACP_DMA_COMPLETE_TIMEOUT_US 5000
+
+-#define ACP_PGFSM_CNTL_POWER_ON_MASK 0x01
+-#define ACP_PGFSM_STATUS_MASK 0x03
++#define ACP3X_PGFSM_CNTL_POWER_ON_MASK 0x01
++#define ACP3X_PGFSM_STATUS_MASK 0x03
++#define ACP6X_PGFSM_CNTL_POWER_ON_MASK 0x07
++#define ACP6X_PGFSM_STATUS_MASK 0x0F
++
+ #define ACP_POWERED_ON 0x00
+ #define ACP_ASSERT_RESET 0x01
+ #define ACP_RELEASE_RESET 0x00
+diff --git a/sound/soc/sof/amd/pci-vangogh.c b/sound/soc/sof/amd/pci-vangogh.c
+index d8be42fbcb6dd8..b035e31fadabaa 100644
+--- a/sound/soc/sof/amd/pci-vangogh.c
++++ b/sound/soc/sof/amd/pci-vangogh.c
+@@ -34,7 +34,6 @@ static const struct sof_amd_acp_desc vangogh_chip_info = {
+ .dsp_intr_base = ACP5X_DSP_SW_INTR_BASE,
+ .sram_pte_offset = ACP5X_SRAM_PTE_OFFSET,
+ .hw_semaphore_offset = ACP5X_AXI2DAGB_SEM_0,
+- .acp_clkmux_sel = ACP5X_CLKMUX_SEL,
+ .probe_reg_offset = ACP5X_FUTURE_REG_ACLK_0,
+ };
+
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 2d1616b81485c5..0938b259f70340 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -459,9 +459,10 @@ int snd_sof_device_remove(struct device *dev)
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct snd_sof_pdata *pdata = sdev->pdata;
+ int ret;
++ bool aborted = false;
+
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+- cancel_work_sync(&sdev->probe_work);
++ aborted = cancel_work_sync(&sdev->probe_work);
+
+ /*
+ * Unregister any registered client device first before IPC and debugfs
+@@ -487,6 +488,9 @@ int snd_sof_device_remove(struct device *dev)
+ snd_sof_free_debug(sdev);
+ snd_sof_remove(sdev);
+ sof_ops_free(sdev);
++ } else if (aborted) {
++ /* probe_work never ran */
++ sof_ops_free(sdev);
+ }
+
+ /* release firmware */
+diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
+index 1243f8a6141eaa..186ba4bbb5b26b 100644
+--- a/sound/soc/sof/imx/imx8m.c
++++ b/sound/soc/sof/imx/imx8m.c
+@@ -243,7 +243,7 @@ static int imx8m_probe(struct snd_sof_dev *sdev)
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+- priv->regmap = syscon_regmap_lookup_by_compatible("fsl,dsp-ctrl");
++ priv->regmap = syscon_regmap_lookup_by_phandle(np, "fsl,dsp-ctrl");
+ if (IS_ERR(priv->regmap)) {
+ dev_err(sdev->dev, "cannot find dsp-ctrl registers");
+ ret = PTR_ERR(priv->regmap);
+diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
+index 8a5e99a898ecb4..328d7c227b2184 100644
+--- a/sound/soc/sof/intel/hda-codec.c
++++ b/sound/soc/sof/intel/hda-codec.c
+@@ -54,8 +54,16 @@ static int request_codec_module(struct hda_codec *codec)
+
+ static int hda_codec_load_module(struct hda_codec *codec)
+ {
+- int ret = request_codec_module(codec);
++ int ret;
++
++ ret = snd_hdac_device_register(&codec->core);
++ if (ret) {
++ dev_err(&codec->core.dev, "failed to register hdac device\n");
++ put_device(&codec->core.dev);
++ return ret;
++ }
+
++ ret = request_codec_module(codec);
+ if (ret <= 0) {
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ ret = request_codec_module(codec);
+@@ -116,7 +124,6 @@ EXPORT_SYMBOL_NS_GPL(hda_codec_jack_check, SND_SOC_SOF_HDA_AUDIO_CODEC);
+ static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, int type)
+ {
+ struct hda_codec *codec;
+- int ret;
+
+ codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "ehdaudio%dD%d", bus->idx, addr);
+ if (IS_ERR(codec)) {
+@@ -126,13 +133,6 @@ static struct hda_codec *hda_codec_device_init(struct hdac_bus *bus, int addr, i
+
+ codec->core.type = type;
+
+- ret = snd_hdac_device_register(&codec->core);
+- if (ret) {
+- dev_err(bus->dev, "failed to register hdac device\n");
+- put_device(&codec->core.dev);
+- return ERR_PTR(ret);
+- }
+-
+ return codec;
+ }
+
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index f3cefd86608120..19ec1a45737eac 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -534,12 +534,6 @@ static int hda_dai_suspend(struct hdac_bus *bus)
+ sdai = swidget->private;
+ ops = sdai->platform_private;
+
+- ret = hda_link_dma_cleanup(hext_stream->link_substream,
+- hext_stream,
+- cpu_dai);
+- if (ret < 0)
+- return ret;
+-
+ /* for consistency with TRIGGER_SUSPEND */
+ if (ops->post_trigger) {
+ ret = ops->post_trigger(sdev, cpu_dai,
+@@ -548,6 +542,12 @@ static int hda_dai_suspend(struct hdac_bus *bus)
+ if (ret < 0)
+ return ret;
+ }
++
++ ret = hda_link_dma_cleanup(hext_stream->link_substream,
++ hext_stream,
++ cpu_dai);
++ if (ret < 0)
++ return ret;
+ }
+ }
+
+diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
+index 44f39a520bb39c..1506982a56c305 100644
+--- a/sound/soc/sof/intel/hda-dsp.c
++++ b/sound/soc/sof/intel/hda-dsp.c
+@@ -681,17 +681,27 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+ struct hdac_bus *bus = sof_to_bus(sdev);
++ bool imr_lost = false;
+ int ret, j;
+
+ /*
+- * The memory used for IMR boot loses its content in deeper than S3 state
+- * We must not try IMR boot on next power up (as it will fail).
+- *
++ * The memory used for IMR boot loses its content in deeper than S3
++ * state on CAVS platforms.
++ * On ACE platforms due to the system architecture the IMR content is
++ * lost at S3 state already, they are tailored for s2idle use.
++ * We must not try IMR boot on next power up in these cases as it will
++ * fail.
++ */
++ if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
++ (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 &&
++ sdev->system_suspend_target == SOF_SUSPEND_S3))
++ imr_lost = true;
++
++ /*
+ * In case of firmware crash or boot failure set the skip_imr_boot to true
+ * as well in order to try to re-load the firmware to do a 'cold' boot.
+ */
+- if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
+- sdev->fw_state == SOF_FW_CRASHED ||
++ if (imr_lost || sdev->fw_state == SOF_FW_CRASHED ||
+ sdev->fw_state == SOF_FW_BOOT_FAILED)
+ hda->skip_imr_boot = true;
+
+@@ -699,6 +709,9 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
+ if (ret < 0)
+ return ret;
+
++ /* make sure that no irq handler is pending before shutdown */
++ synchronize_irq(sdev->ipc_irq);
++
+ hda_codec_jack_wake_enable(sdev, runtime_suspend);
+
+ /* power down all hda links */
+diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
+index f23c72cdff489d..7d17d586ed9dbb 100644
+--- a/sound/soc/sof/intel/hda-pcm.c
++++ b/sound/soc/sof/intel/hda-pcm.c
+@@ -254,6 +254,12 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+
++ /* Limit the maximum number of periods to not exceed the BDL entries count */
++ if (runtime->hw.periods_max > HDA_DSP_MAX_BDL_ENTRIES)
++ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
++ runtime->hw.periods_min,
++ HDA_DSP_MAX_BDL_ENTRIES);
++
+ /* Only S16 and S32 supported by HDA hardware when used without DSP */
+ if (sdev->dspless_mode_selected)
+ snd_pcm_hw_constraint_mask64(substream->runtime, SNDRV_PCM_HW_PARAM_FORMAT,
+diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
+index 5c517ec57d4a20..0f0cfd0f85a3fb 100644
+--- a/sound/soc/sof/intel/hda.h
++++ b/sound/soc/sof/intel/hda.h
+@@ -876,6 +876,7 @@ extern const struct sof_intel_dsp_desc ehl_chip_info;
+ extern const struct sof_intel_dsp_desc jsl_chip_info;
+ extern const struct sof_intel_dsp_desc adls_chip_info;
+ extern const struct sof_intel_dsp_desc mtl_chip_info;
++extern const struct sof_intel_dsp_desc arl_s_chip_info;
+ extern const struct sof_intel_dsp_desc lnl_chip_info;
+
+ /* Probes support */
+diff --git a/sound/soc/sof/intel/lnl.c b/sound/soc/sof/intel/lnl.c
+index db94b45e53af8f..822f8577232083 100644
+--- a/sound/soc/sof/intel/lnl.c
++++ b/sound/soc/sof/intel/lnl.c
+@@ -16,6 +16,7 @@
+ #include "hda-ipc.h"
+ #include "../sof-audio.h"
+ #include "mtl.h"
++#include "lnl.h"
+ #include <sound/hda-mlink.h>
+
+ /* LunarLake ops */
+@@ -172,7 +173,7 @@ const struct sof_intel_dsp_desc lnl_chip_info = {
+ .ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ .ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ .ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+- .rom_status_reg = MTL_DSP_ROM_STS,
++ .rom_status_reg = LNL_DSP_REG_HFDSC,
+ .rom_init_timeout = 300,
+ .ssp_count = MTL_SSP_COUNT,
+ .d0i3_offset = MTL_HDA_VS_D0I3C,
+diff --git a/sound/soc/sof/intel/lnl.h b/sound/soc/sof/intel/lnl.h
+new file mode 100644
+index 00000000000000..4f4734fe7e089e
+--- /dev/null
++++ b/sound/soc/sof/intel/lnl.h
+@@ -0,0 +1,15 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * Copyright(c) 2024 Intel Corporation. All rights reserved.
++ */
++
++#ifndef __SOF_INTEL_LNL_H
++#define __SOF_INTEL_LNL_H
++
++#define LNL_DSP_REG_HFDSC 0x160200 /* DSP core0 status */
++#define LNL_DSP_REG_HFDEC 0x160204 /* DSP core0 error */
++
++#endif /* __SOF_INTEL_LNL_H */
+diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
+index f9412517eaf29b..7d7a017c2e1f7e 100644
+--- a/sound/soc/sof/intel/mtl.c
++++ b/sound/soc/sof/intel/mtl.c
+@@ -436,8 +436,9 @@ int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+ {
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ const struct sof_intel_dsp_desc *chip = hda->desc;
+- unsigned int status;
+- u32 ipc_hdr;
++ unsigned int status, target_status;
++ u32 ipc_hdr, flags;
++ char *dump_msg;
+ int ret;
+
+ /* step 1: purge FW request */
+@@ -481,17 +482,55 @@ int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+
+ mtl_enable_ipc_interrupts(sdev);
+
++ if (chip->rom_status_reg == MTL_DSP_ROM_STS) {
++ /*
++ * Workaround: when the ROM status register is pointing to
++ * the SRAM window (MTL_DSP_ROM_STS) the platform cannot catch
++ * ROM_INIT_DONE because of a very short timing window.
++ * Follow the recommendations and skip target state waiting.
++ */
++ return 0;
++ }
++
+ /*
+- * ACE workaround: don't wait for ROM INIT.
+- * The platform cannot catch ROM_INIT_DONE because of a very short
+- * timing window. Follow the recommendations and skip this part.
++ * step 7:
++ * - Cold/Full boot: wait for ROM init to proceed to download the firmware
++ * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
+ */
++ if (imr_boot)
++ target_status = FSR_STATE_FW_ENTERED;
++ else
++ target_status = FSR_STATE_INIT_DONE;
+
+- return 0;
++ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
++ chip->rom_status_reg, status,
++ (FSR_TO_STATE_CODE(status) == target_status),
++ HDA_DSP_REG_POLL_INTERVAL_US,
++ chip->rom_init_timeout *
++ USEC_PER_MSEC);
++
++ if (!ret)
++ return 0;
++
++ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
++ dev_err(sdev->dev,
++ "%s: timeout with rom_status_reg (%#x) read\n",
++ __func__, chip->rom_status_reg);
+
+ err:
+- snd_sof_dsp_dbg_dump(sdev, "MTL DSP init fail", 0);
++ flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
++
++ /* after max boot attempts make sure that the dump is printed */
++ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
++ flags &= ~SOF_DBG_DUMP_OPTIONAL;
++
++ dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
++ hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
++ snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
++ mtl_enable_interrupts(sdev, false);
+ mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
++
++ kfree(dump_msg);
+ return ret;
+ }
+
+@@ -725,7 +764,7 @@ const struct sof_intel_dsp_desc mtl_chip_info = {
+ .ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ .ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ .ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+- .rom_status_reg = MTL_DSP_ROM_STS,
++ .rom_status_reg = MTL_DSP_REG_HFFLGPXQWY,
+ .rom_init_timeout = 300,
+ .ssp_count = MTL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+@@ -743,3 +782,31 @@ const struct sof_intel_dsp_desc mtl_chip_info = {
+ .hw_ip_version = SOF_INTEL_ACE_1_0,
+ };
+ EXPORT_SYMBOL_NS(mtl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
++
++const struct sof_intel_dsp_desc arl_s_chip_info = {
++ .cores_num = 2,
++ .init_core_mask = BIT(0),
++ .host_managed_cores_mask = BIT(0),
++ .ipc_req = MTL_DSP_REG_HFIPCXIDR,
++ .ipc_req_mask = MTL_DSP_REG_HFIPCXIDR_BUSY,
++ .ipc_ack = MTL_DSP_REG_HFIPCXIDA,
++ .ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
++ .ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
++ .rom_status_reg = MTL_DSP_REG_HFFLGPXQWY,
++ .rom_init_timeout = 300,
++ .ssp_count = MTL_SSP_COUNT,
++ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
++ .sdw_shim_base = SDW_SHIM_BASE_ACE,
++ .sdw_alh_base = SDW_ALH_BASE_ACE,
++ .d0i3_offset = MTL_HDA_VS_D0I3C,
++ .read_sdw_lcount = hda_sdw_check_lcount_common,
++ .enable_sdw_irq = mtl_enable_sdw_irq,
++ .check_sdw_irq = mtl_dsp_check_sdw_irq,
++ .check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
++ .check_ipc_irq = mtl_dsp_check_ipc_irq,
++ .cl_init = mtl_dsp_cl_init,
++ .power_down_dsp = mtl_power_down_dsp,
++ .disable_interrupts = mtl_dsp_disable_interrupts,
++ .hw_ip_version = SOF_INTEL_ACE_1_0,
++};
++EXPORT_SYMBOL_NS(arl_s_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
+index 95696b3d7c4cfb..fab28d5f689151 100644
+--- a/sound/soc/sof/intel/mtl.h
++++ b/sound/soc/sof/intel/mtl.h
+@@ -76,8 +76,8 @@
+ #define MTL_DSP_ROM_STS MTL_SRAM_WINDOW_OFFSET(0) /* ROM status */
+ #define MTL_DSP_ROM_ERROR (MTL_SRAM_WINDOW_OFFSET(0) + 0x4) /* ROM error code */
+
+-#define MTL_DSP_REG_HFFLGPXQWY 0x163200 /* ROM debug status */
+-#define MTL_DSP_REG_HFFLGPXQWY_ERROR 0x163204 /* ROM debug error code */
++#define MTL_DSP_REG_HFFLGPXQWY 0x163200 /* DSP core0 status */
++#define MTL_DSP_REG_HFFLGPXQWY_ERROR 0x163204 /* DSP core0 error */
+ #define MTL_DSP_REG_HfIMRIS1 0x162088
+ #define MTL_DSP_REG_HfIMRIS1_IU_MASK BIT(0)
+
+diff --git a/sound/soc/sof/intel/pci-lnl.c b/sound/soc/sof/intel/pci-lnl.c
+index 1b12c280edb46c..7ad7aa3c3461b7 100644
+--- a/sound/soc/sof/intel/pci-lnl.c
++++ b/sound/soc/sof/intel/pci-lnl.c
+@@ -35,6 +35,9 @@ static const struct sof_dev_desc lnl_desc = {
+ .default_fw_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ipc4/lnl",
+ },
++ .default_lib_path = {
++ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/lnl",
++ },
+ .default_tplg_path = {
+ [SOF_INTEL_IPC4] = "intel/sof-ace-tplg",
+ },
+diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
+index 7868b0827e844e..7d00e469f58ce7 100644
+--- a/sound/soc/sof/intel/pci-mtl.c
++++ b/sound/soc/sof/intel/pci-mtl.c
+@@ -50,9 +50,40 @@ static const struct sof_dev_desc mtl_desc = {
+ .ops_free = hda_ops_free,
+ };
+
++static const struct sof_dev_desc arl_s_desc = {
++ .use_acpi_target_states = true,
++ .machines = snd_soc_acpi_intel_arl_machines,
++ .alt_machines = snd_soc_acpi_intel_arl_sdw_machines,
++ .resindex_lpe_base = 0,
++ .resindex_pcicfg_base = -1,
++ .resindex_imr_base = -1,
++ .irqindex_host_ipc = -1,
++ .chip_info = &arl_s_chip_info,
++ .ipc_supported_mask = BIT(SOF_IPC_TYPE_4),
++ .ipc_default = SOF_IPC_TYPE_4,
++ .dspless_mode_supported = true, /* Only supported for HDaudio */
++ .default_fw_path = {
++ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/arl-s",
++ },
++ .default_lib_path = {
++ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/arl-s",
++ },
++ .default_tplg_path = {
++ [SOF_IPC_TYPE_4] = "intel/sof-ace-tplg",
++ },
++ .default_fw_filename = {
++ [SOF_IPC_TYPE_4] = "sof-arl-s.ri",
++ },
++ .nocodec_tplg_filename = "sof-arl-nocodec.tplg",
++ .ops = &sof_mtl_ops,
++ .ops_init = sof_mtl_ops_init,
++ .ops_free = hda_ops_free,
++};
++
+ /* PCI IDs */
+ static const struct pci_device_id sof_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, HDA_MTL, &mtl_desc) },
++ { PCI_DEVICE_DATA(INTEL, HDA_ARL_S, &arl_s_desc) },
+ { 0, }
+ };
+ MODULE_DEVICE_TABLE(pci, sof_pci_ids);
+diff --git a/sound/soc/sof/ipc3-loader.c b/sound/soc/sof/ipc3-loader.c
+index 28218766d2114f..6e3ef067211068 100644
+--- a/sound/soc/sof/ipc3-loader.c
++++ b/sound/soc/sof/ipc3-loader.c
+@@ -148,6 +148,8 @@ static size_t sof_ipc3_fw_parse_ext_man(struct snd_sof_dev *sdev)
+
+ head = (struct sof_ext_man_header *)fw->data;
+ remaining = head->full_size - head->header_size;
++ if (remaining < 0 || remaining > sdev->basefw.fw->size)
++ return -EINVAL;
+ ext_man_size = ipc3_fw_ext_man_size(sdev, fw);
+
+ /* Assert firmware starts with extended manifest */
+diff --git a/sound/soc/sof/ipc3-pcm.c b/sound/soc/sof/ipc3-pcm.c
+index cb58ee8c158a55..720bd9bd2667ae 100644
+--- a/sound/soc/sof/ipc3-pcm.c
++++ b/sound/soc/sof/ipc3-pcm.c
+@@ -398,4 +398,5 @@ const struct sof_ipc_pcm_ops ipc3_pcm_ops = {
+ .trigger = sof_ipc3_pcm_trigger,
+ .dai_link_fixup = sof_ipc3_pcm_dai_link_fixup,
+ .reset_hw_params_during_stop = true,
++ .d0i3_supported_in_s0ix = true,
+ };
+diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
+index ba4ef290b6343f..d96555438c6bff 100644
+--- a/sound/soc/sof/ipc3-topology.c
++++ b/sound/soc/sof/ipc3-topology.c
+@@ -493,6 +493,7 @@ static int sof_ipc3_widget_setup_comp_mixer(struct snd_sof_widget *swidget)
+ static int sof_ipc3_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
+ {
+ struct snd_soc_component *scomp = swidget->scomp;
++ struct snd_sof_pipeline *spipe = swidget->spipe;
+ struct sof_ipc_pipe_new *pipeline;
+ struct snd_sof_widget *comp_swidget;
+ int ret;
+@@ -545,6 +546,7 @@ static int sof_ipc3_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
+ swidget->dynamic_pipeline_widget);
+
+ swidget->core = pipeline->core;
++ spipe->core_mask |= BIT(pipeline->core);
+
+ return 0;
+
+@@ -2307,27 +2309,16 @@ static int sof_tear_down_left_over_pipelines(struct snd_sof_dev *sdev)
+ return 0;
+ }
+
+-/*
+- * For older firmware, this function doesn't free widgets for static pipelines during suspend.
+- * It only resets use_count for all widgets.
+- */
+-static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verify)
++static int sof_ipc3_free_widgets_in_list(struct snd_sof_dev *sdev, bool include_scheduler,
++ bool *dyn_widgets, bool verify)
+ {
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
+ struct snd_sof_widget *swidget;
+- struct snd_sof_route *sroute;
+- bool dyn_widgets = false;
+ int ret;
+
+- /*
+- * This function is called during suspend and for one-time topology verification during
+- * first boot. In both cases, there is no need to protect swidget->use_count and
+- * sroute->setup because during suspend all running streams are suspended and during
+- * topology loading the sound card unavailable to open PCMs.
+- */
+ list_for_each_entry(swidget, &sdev->widget_list, list) {
+ if (swidget->dynamic_pipeline_widget) {
+- dyn_widgets = true;
++ *dyn_widgets = true;
+ continue;
+ }
+
+@@ -2342,11 +2333,49 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
+ continue;
+ }
+
++ if (include_scheduler && swidget->id != snd_soc_dapm_scheduler)
++ continue;
++
++ if (!include_scheduler && swidget->id == snd_soc_dapm_scheduler)
++ continue;
++
+ ret = sof_widget_free(sdev, swidget);
+ if (ret < 0)
+ return ret;
+ }
+
++ return 0;
++}
++
++/*
++ * For older firmware, this function doesn't free widgets for static pipelines during suspend.
++ * It only resets use_count for all widgets.
++ */
++static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verify)
++{
++ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
++ struct snd_sof_widget *swidget;
++ struct snd_sof_route *sroute;
++ bool dyn_widgets = false;
++ int ret;
++
++ /*
++ * This function is called during suspend and for one-time topology verification during
++ * first boot. In both cases, there is no need to protect swidget->use_count and
++ * sroute->setup because during suspend all running streams are suspended and during
++ * topology loading the sound card unavailable to open PCMs. Do not free the scheduler
++ * widgets yet so that the secondary cores do not get powered down before all the widgets
++ * associated with the scheduler are freed.
++ */
++ ret = sof_ipc3_free_widgets_in_list(sdev, false, &dyn_widgets, verify);
++ if (ret < 0)
++ return ret;
++
++ /* free all the scheduler widgets now */
++ ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify);
++ if (ret < 0)
++ return ret;
++
+ /*
+ * Tear down all pipelines associated with PCMs that did not get suspended
+ * and unset the prepare flag so that they can be set up again during resume.
+diff --git a/sound/soc/sof/ipc3.c b/sound/soc/sof/ipc3.c
+index fb40378ad08402..c03dd513fbff14 100644
+--- a/sound/soc/sof/ipc3.c
++++ b/sound/soc/sof/ipc3.c
+@@ -1067,7 +1067,7 @@ static void sof_ipc3_rx_msg(struct snd_sof_dev *sdev)
+ return;
+ }
+
+- if (hdr.size < sizeof(hdr)) {
++ if (hdr.size < sizeof(hdr) || hdr.size > SOF_IPC_MSG_MAX_SIZE) {
+ dev_err(sdev->dev, "The received message size is invalid\n");
+ return;
+ }
+diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c
+index c6d404d440970b..e4ce1b53fba652 100644
+--- a/sound/soc/sof/ipc4-control.c
++++ b/sound/soc/sof/ipc4-control.c
+@@ -89,7 +89,7 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
+ struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
+ struct sof_ipc4_gain *gain = swidget->private;
+ struct sof_ipc4_msg *msg = &cdata->msg;
+- struct sof_ipc4_gain_data data;
++ struct sof_ipc4_gain_params params;
+ bool all_channels_equal = true;
+ u32 value;
+ int ret, i;
+@@ -109,20 +109,20 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
+ */
+ for (i = 0; i < scontrol->num_channels; i++) {
+ if (all_channels_equal) {
+- data.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+- data.init_val = cdata->chanv[0].value;
++ params.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
++ params.init_val = cdata->chanv[0].value;
+ } else {
+- data.channels = cdata->chanv[i].channel;
+- data.init_val = cdata->chanv[i].value;
++ params.channels = cdata->chanv[i].channel;
++ params.init_val = cdata->chanv[i].value;
+ }
+
+ /* set curve type and duration from topology */
+- data.curve_duration_l = gain->data.curve_duration_l;
+- data.curve_duration_h = gain->data.curve_duration_h;
+- data.curve_type = gain->data.curve_type;
++ params.curve_duration_l = gain->data.params.curve_duration_l;
++ params.curve_duration_h = gain->data.params.curve_duration_h;
++ params.curve_type = gain->data.params.curve_type;
+
+- msg->data_ptr = &data;
+- msg->data_size = sizeof(data);
++ msg->data_ptr = &params;
++ msg->data_size = sizeof(params);
+
+ ret = sof_ipc4_set_get_kcontrol_data(scontrol, true, lock);
+ msg->data_ptr = NULL;
+diff --git a/sound/soc/sof/ipc4-loader.c b/sound/soc/sof/ipc4-loader.c
+index eaa04762eb1122..4d37e89b592a9b 100644
+--- a/sound/soc/sof/ipc4-loader.c
++++ b/sound/soc/sof/ipc4-loader.c
+@@ -479,13 +479,10 @@ void sof_ipc4_update_cpc_from_manifest(struct snd_sof_dev *sdev,
+ msg = "No CPC match in the firmware file's manifest";
+
+ no_cpc:
+- dev_warn(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
+- fw_module->man4_module_entry.name,
+- &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
+- basecfg->obs);
+- dev_warn_once(sdev->dev, "Please try to update the firmware.\n");
+- dev_warn_once(sdev->dev, "If the issue persists, file a bug at\n");
+- dev_warn_once(sdev->dev, "https://github.com/thesofproject/sof/issues/\n");
++ dev_dbg(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
++ fw_module->man4_module_entry.name,
++ &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
++ basecfg->obs);
+ }
+
+ const struct sof_ipc_fw_loader_ops ipc4_loader_ops = {
+diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
+index db19cd03ecad8a..e8acf60c27a743 100644
+--- a/sound/soc/sof/ipc4-pcm.c
++++ b/sound/soc/sof/ipc4-pcm.c
+@@ -377,7 +377,18 @@ static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component,
+ ret = sof_ipc4_set_multi_pipeline_state(sdev, state, trigger_list);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to set final state %d for all pipelines\n", state);
+- goto free;
++ /*
++ * workaround: if the firmware is crashed while setting the
++ * pipelines to reset state we must ignore the error code and
++ * reset it to 0.
++ * Since the firmware is crashed we will not send IPC messages
++ * and we are going to see errors printed, but the state of the
++ * widgets will be correct for the next boot.
++ */
++ if (sdev->fw_state != SOF_FW_CRASHED || state != SOF_IPC4_PIPE_RESET)
++ goto free;
++
++ ret = 0;
+ }
+
+ /* update RUNNING/RESET state for all pipelines that were just triggered */
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 7cb63e6b24dc96..284efad30f1a02 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -128,18 +128,18 @@ static const struct sof_topology_token comp_ext_tokens[] = {
+
+ static const struct sof_topology_token gain_tokens[] = {
+ {SOF_TKN_GAIN_RAMP_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+- get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)},
++ get_token_u32, offsetof(struct sof_ipc4_gain_params, curve_type)},
+ {SOF_TKN_GAIN_RAMP_DURATION,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+- offsetof(struct sof_ipc4_gain_data, curve_duration_l)},
++ offsetof(struct sof_ipc4_gain_params, curve_duration_l)},
+ {SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+- get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)},
++ get_token_u32, offsetof(struct sof_ipc4_gain_params, init_val)},
+ };
+
+ /* SRC */
+ static const struct sof_topology_token src_tokens[] = {
+ {SOF_TKN_SRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+- offsetof(struct sof_ipc4_src, sink_rate)},
++ offsetof(struct sof_ipc4_src_data, sink_rate)},
+ };
+
+ static const struct sof_token_info ipc4_token_list[SOF_TOKEN_COUNT] = {
+@@ -195,6 +195,14 @@ sof_ipc4_get_input_pin_audio_fmt(struct snd_sof_widget *swidget, int pin_index)
+ }
+
+ process = swidget->private;
++
++ /*
++ * For process modules without base config extension, base module config
++ * format is used for all input pins
++ */
++ if (process->init_config != SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT)
++ return &process->base_config.audio_fmt;
++
+ base_cfg_ext = process->base_config_ext;
+
+ /*
+@@ -654,6 +662,7 @@ static int sof_ipc4_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
+ {
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_pipeline *pipeline;
++ struct snd_sof_pipeline *spipe = swidget->spipe;
+ int ret;
+
+ pipeline = kzalloc(sizeof(*pipeline), GFP_KERNEL);
+@@ -668,6 +677,7 @@ static int sof_ipc4_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
+ }
+
+ swidget->core = pipeline->core_id;
++ spipe->core_mask |= BIT(pipeline->core_id);
+
+ if (pipeline->use_chain_dma) {
+ dev_dbg(scomp->dev, "Set up chain DMA for %s\n", swidget->widget->name);
+@@ -719,15 +729,15 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
+
+ swidget->private = gain;
+
+- gain->data.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+- gain->data.init_val = SOF_IPC4_VOL_ZERO_DB;
++ gain->data.params.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
++ gain->data.params.init_val = SOF_IPC4_VOL_ZERO_DB;
+
+- ret = sof_ipc4_get_audio_fmt(scomp, swidget, &gain->available_fmt, &gain->base_config);
++ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &gain->available_fmt, &gain->data.base_config);
+ if (ret)
+ goto err;
+
+- ret = sof_update_ipc_object(scomp, &gain->data, SOF_GAIN_TOKENS, swidget->tuples,
+- swidget->num_tuples, sizeof(gain->data), 1);
++ ret = sof_update_ipc_object(scomp, &gain->data.params, SOF_GAIN_TOKENS,
++ swidget->tuples, swidget->num_tuples, sizeof(gain->data), 1);
+ if (ret) {
+ dev_err(scomp->dev, "Parsing gain tokens failed\n");
+ goto err;
+@@ -735,8 +745,8 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
+
+ dev_dbg(scomp->dev,
+ "pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x\n",
+- swidget->widget->name, gain->data.curve_type, gain->data.curve_duration_l,
+- gain->data.init_val);
++ swidget->widget->name, gain->data.params.curve_type,
++ gain->data.params.curve_duration_l, gain->data.params.init_val);
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
+ if (ret)
+@@ -798,6 +808,7 @@ static int sof_ipc4_widget_setup_comp_mixer(struct snd_sof_widget *swidget)
+ static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
+ {
+ struct snd_soc_component *scomp = swidget->scomp;
++ struct snd_sof_pipeline *spipe = swidget->spipe;
+ struct sof_ipc4_src *src;
+ int ret;
+
+@@ -809,18 +820,21 @@ static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
+
+ swidget->private = src;
+
+- ret = sof_ipc4_get_audio_fmt(scomp, swidget, &src->available_fmt, &src->base_config);
++ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &src->available_fmt,
++ &src->data.base_config);
+ if (ret)
+ goto err;
+
+- ret = sof_update_ipc_object(scomp, src, SOF_SRC_TOKENS, swidget->tuples,
++ ret = sof_update_ipc_object(scomp, &src->data, SOF_SRC_TOKENS, swidget->tuples,
+ swidget->num_tuples, sizeof(*src), 1);
+ if (ret) {
+ dev_err(scomp->dev, "Parsing SRC tokens failed\n");
+ goto err;
+ }
+
+- dev_dbg(scomp->dev, "SRC sink rate %d\n", src->sink_rate);
++ spipe->core_mask |= BIT(swidget->core);
++
++ dev_dbg(scomp->dev, "SRC sink rate %d\n", src->data.sink_rate);
+
+ ret = sof_ipc4_widget_setup_msg(swidget, &src->msg);
+ if (ret)
+@@ -865,6 +879,7 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
+ {
+ struct snd_soc_component *scomp = swidget->scomp;
+ struct sof_ipc4_fw_module *fw_module;
++ struct snd_sof_pipeline *spipe = swidget->spipe;
+ struct sof_ipc4_process *process;
+ void *cfg;
+ int ret;
+@@ -895,7 +910,8 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
+ if (process->init_config == SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT) {
+ struct sof_ipc4_base_module_cfg_ext *base_cfg_ext;
+ u32 ext_size = struct_size(base_cfg_ext, pin_formats,
+- swidget->num_input_pins + swidget->num_output_pins);
++ size_add(swidget->num_input_pins,
++ swidget->num_output_pins));
+
+ base_cfg_ext = kzalloc(ext_size, GFP_KERNEL);
+ if (!base_cfg_ext) {
+@@ -920,6 +936,9 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
+
+ sof_ipc4_widget_update_kcontrol_module_id(swidget);
+
++ /* set pipeline core mask to keep track of the core the module is scheduled to run on */
++ spipe->core_mask |= BIT(swidget->core);
++
+ return 0;
+ free_base_cfg_ext:
+ kfree(process->base_config_ext);
+@@ -1235,7 +1254,13 @@ static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget)
+ ipc4_copier = dai->private;
+
+ if (pipeline->use_chain_dma) {
+- pipeline->msg.primary = 0;
++ /*
++ * Preserve the DMA Link ID and clear other bits since
++ * the DMA Link ID is only configured once during
++ * dai_config, other fields are expected to be 0 for
++ * re-configuration
++ */
++ pipeline->msg.primary &= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK;
+ pipeline->msg.extension = 0;
+ }
+
+@@ -1811,7 +1836,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
+ u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
+ int ret;
+
+- ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &gain->base_config,
++ ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &gain->data.base_config,
+ pipeline_params, available_fmt);
+ if (ret < 0)
+ return ret;
+@@ -1821,7 +1846,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
+ out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
+ out_ref_valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
+
+- ret = sof_ipc4_init_output_audio_fmt(sdev, &gain->base_config, available_fmt,
++ ret = sof_ipc4_init_output_audio_fmt(sdev, &gain->data.base_config, available_fmt,
+ out_ref_rate, out_ref_channels, out_ref_valid_bits);
+ if (ret < 0) {
+ dev_err(sdev->dev, "Failed to initialize output format for %s",
+@@ -1830,7 +1855,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
+ }
+
+ /* update pipeline memory usage */
+- sof_ipc4_update_resource_usage(sdev, swidget, &gain->base_config);
++ sof_ipc4_update_resource_usage(sdev, swidget, &gain->data.base_config);
+
+ return 0;
+ }
+@@ -1886,7 +1911,7 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
+ u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
+ int output_format_index, input_format_index;
+
+- input_format_index = sof_ipc4_init_input_audio_fmt(sdev, swidget, &src->base_config,
++ input_format_index = sof_ipc4_init_input_audio_fmt(sdev, swidget, &src->data.base_config,
+ pipeline_params, available_fmt);
+ if (input_format_index < 0)
+ return input_format_index;
+@@ -1916,7 +1941,7 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
+ */
+ out_ref_rate = params_rate(fe_params);
+
+- output_format_index = sof_ipc4_init_output_audio_fmt(sdev, &src->base_config,
++ output_format_index = sof_ipc4_init_output_audio_fmt(sdev, &src->data.base_config,
+ available_fmt, out_ref_rate,
+ out_ref_channels, out_ref_valid_bits);
+ if (output_format_index < 0) {
+@@ -1926,10 +1951,10 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
+ }
+
+ /* update pipeline memory usage */
+- sof_ipc4_update_resource_usage(sdev, swidget, &src->base_config);
++ sof_ipc4_update_resource_usage(sdev, swidget, &src->data.base_config);
+
+ out_audio_fmt = &available_fmt->output_pin_fmts[output_format_index].audio_fmt;
+- src->sink_rate = out_audio_fmt->sampling_frequency;
++ src->data.sink_rate = out_audio_fmt->sampling_frequency;
+
+ /* update pipeline_params for sink widgets */
+ return sof_ipc4_update_hw_params(sdev, pipeline_params, out_audio_fmt);
+@@ -2266,9 +2291,8 @@ static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget
+ {
+ struct sof_ipc4_gain *gain = swidget->private;
+
+- ipc_size = sizeof(struct sof_ipc4_base_module_cfg) +
+- sizeof(struct sof_ipc4_gain_data);
+- ipc_data = gain;
++ ipc_size = sizeof(gain->data);
++ ipc_data = &gain->data;
+
+ msg = &gain->msg;
+ break;
+@@ -2287,8 +2311,8 @@ static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget
+ {
+ struct sof_ipc4_src *src = swidget->private;
+
+- ipc_size = sizeof(struct sof_ipc4_base_module_cfg) + sizeof(src->sink_rate);
+- ipc_data = src;
++ ipc_size = sizeof(src->data);
++ ipc_data = &src->data;
+
+ msg = &src->msg;
+ break;
+diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
+index d75f17f4749c60..21436657ad85b6 100644
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -344,7 +344,7 @@ struct sof_ipc4_control_data {
+ };
+
+ /**
+- * struct sof_ipc4_gain_data - IPC gain blob
++ * struct sof_ipc4_gain_params - IPC gain parameters
+ * @channels: Channels
+ * @init_val: Initial value
+ * @curve_type: Curve type
+@@ -352,24 +352,32 @@ struct sof_ipc4_control_data {
+ * @curve_duration_l: Curve duration low part
+ * @curve_duration_h: Curve duration high part
+ */
+-struct sof_ipc4_gain_data {
++struct sof_ipc4_gain_params {
+ uint32_t channels;
+ uint32_t init_val;
+ uint32_t curve_type;
+ uint32_t reserved;
+ uint32_t curve_duration_l;
+ uint32_t curve_duration_h;
+-} __aligned(8);
++} __packed __aligned(4);
+
+ /**
+- * struct sof_ipc4_gain - gain config data
++ * struct sof_ipc4_gain_data - IPC gain init blob
+ * @base_config: IPC base config data
++ * @params: Initial parameters for the gain module
++ */
++struct sof_ipc4_gain_data {
++ struct sof_ipc4_base_module_cfg base_config;
++ struct sof_ipc4_gain_params params;
++} __packed __aligned(4);
++
++/**
++ * struct sof_ipc4_gain - gain config data
+ * @data: IPC gain blob
+ * @available_fmt: Available audio format
+ * @msg: message structure for gain
+ */
+ struct sof_ipc4_gain {
+- struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_gain_data data;
+ struct sof_ipc4_available_audio_format available_fmt;
+ struct sof_ipc4_msg msg;
+@@ -387,16 +395,24 @@ struct sof_ipc4_mixer {
+ struct sof_ipc4_msg msg;
+ };
+
+-/**
+- * struct sof_ipc4_src SRC config data
++/*
++ * struct sof_ipc4_src_data - IPC data for SRC
+ * @base_config: IPC base config data
+ * @sink_rate: Output rate for sink module
++ */
++struct sof_ipc4_src_data {
++ struct sof_ipc4_base_module_cfg base_config;
++ uint32_t sink_rate;
++} __packed __aligned(4);
++
++/**
++ * struct sof_ipc4_src - SRC config data
++ * @data: IPC base config data
+ * @available_fmt: Available audio format
+ * @msg: IPC4 message struct containing header and data info
+ */
+ struct sof_ipc4_src {
+- struct sof_ipc4_base_module_cfg base_config;
+- uint32_t sink_rate;
++ struct sof_ipc4_src_data data;
+ struct sof_ipc4_available_audio_format available_fmt;
+ struct sof_ipc4_msg msg;
+ };
+diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
+index ab6eddd91bb771..81d1ce4b5f0cd2 100644
+--- a/sound/soc/sof/ipc4.c
++++ b/sound/soc/sof/ipc4.c
+@@ -614,6 +614,9 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
+ case SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS:
+ sof_ipc4_mtrace_update_pos(sdev, SOF_IPC4_LOG_CORE_GET(ipc4_msg->primary));
+ break;
++ case SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT:
++ snd_sof_dsp_panic(sdev, 0, true);
++ break;
+ default:
+ dev_dbg(sdev->dev, "Unhandled DSP message: %#x|%#x\n",
+ ipc4_msg->primary, ipc4_msg->extension);
+@@ -626,7 +629,14 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
+ return;
+
+ ipc4_msg->data_size = data_size;
+- snd_sof_ipc_msg_data(sdev, NULL, ipc4_msg->data_ptr, ipc4_msg->data_size);
++ err = snd_sof_ipc_msg_data(sdev, NULL, ipc4_msg->data_ptr, ipc4_msg->data_size);
++ if (err < 0) {
++ dev_err(sdev->dev, "failed to read IPC notification data: %d\n", err);
++ kfree(ipc4_msg->data_ptr);
++ ipc4_msg->data_ptr = NULL;
++ ipc4_msg->data_size = 0;
++ return;
++ }
+ }
+
+ sof_ipc4_log_header(sdev->dev, "ipc rx done ", ipc4_msg, true);
+diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
+index 7d6a568556ea47..94db51d88dda0b 100644
+--- a/sound/soc/sof/mediatek/mt8195/mt8195.c
++++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
+@@ -624,7 +624,10 @@ static struct snd_sof_dsp_ops sof_mt8195_ops = {
+ static struct snd_sof_of_mach sof_mt8195_machs[] = {
+ {
+ .compatible = "google,tomato",
+- .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682-dts.tplg"
++ .sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg"
++ }, {
++ .compatible = "google,dojo",
++ .sof_tplg_filename = "sof-mt8195-mt6359-max98390-rt5682.tplg"
+ }, {
+ .compatible = "mediatek,mt8195",
+ .sof_tplg_filename = "sof-mt8195.tplg"
+diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
+index d778717cab10b8..8e602e42afee23 100644
+--- a/sound/soc/sof/pcm.c
++++ b/sound/soc/sof/pcm.c
+@@ -325,14 +325,13 @@ static int sof_pcm_trigger(struct snd_soc_component *component,
+ ipc_first = true;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+- if (sdev->system_suspend_target == SOF_SUSPEND_S0IX &&
++ /*
++ * If DSP D0I3 is allowed during S0iX, set the suspend_ignored flag for
++ * D0I3-compatible streams to keep the firmware pipeline running
++ */
++ if (pcm_ops && pcm_ops->d0i3_supported_in_s0ix &&
++ sdev->system_suspend_target == SOF_SUSPEND_S0IX &&
+ spcm->stream[substream->stream].d0i3_compatible) {
+- /*
+- * trap the event, not sending trigger stop to
+- * prevent the FW pipelines from being stopped,
+- * and mark the flag to ignore the upcoming DAPM
+- * PM events.
+- */
+ spcm->stream[substream->stream].suspend_ignored = true;
+ return 0;
+ }
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index e5405f854a910c..51626258347257 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -46,6 +46,7 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget)
+ {
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
++ struct snd_sof_pipeline *spipe = swidget->spipe;
+ struct snd_sof_widget *pipe_widget;
+ int err = 0;
+ int ret;
+@@ -87,15 +88,22 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
+ }
+
+ /*
+- * disable widget core. continue to route setup status and complete flag
+- * even if this fails and return the appropriate error
++ * decrement ref count for cores associated with all modules in the pipeline and clear
++ * the complete flag
+ */
+- ret = snd_sof_dsp_core_put(sdev, swidget->core);
+- if (ret < 0) {
+- dev_err(sdev->dev, "error: failed to disable target core: %d for widget %s\n",
+- swidget->core, swidget->widget->name);
+- if (!err)
+- err = ret;
++ if (swidget->id == snd_soc_dapm_scheduler) {
++ int i;
++
++ for_each_set_bit(i, &spipe->core_mask, sdev->num_cores) {
++ ret = snd_sof_dsp_core_put(sdev, i);
++ if (ret < 0) {
++ dev_err(sdev->dev, "failed to disable target core: %d for pipeline %s\n",
++ i, swidget->widget->name);
++ if (!err)
++ err = ret;
++ }
++ }
++ swidget->spipe->complete = 0;
+ }
+
+ /*
+@@ -108,10 +116,6 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
+ err = ret;
+ }
+
+- /* clear pipeline complete */
+- if (swidget->id == snd_soc_dapm_scheduler)
+- swidget->spipe->complete = 0;
+-
+ if (!err)
+ dev_dbg(sdev->dev, "widget %s freed\n", swidget->widget->name);
+
+@@ -134,8 +138,10 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
+ struct snd_sof_widget *swidget)
+ {
+ const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
++ struct snd_sof_pipeline *spipe = swidget->spipe;
+ bool use_count_decremented = false;
+ int ret;
++ int i;
+
+ /* skip if there is no private data */
+ if (!swidget->private)
+@@ -166,19 +172,23 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
+ goto use_count_dec;
+ }
+
+- /* enable widget core */
+- ret = snd_sof_dsp_core_get(sdev, swidget->core);
+- if (ret < 0) {
+- dev_err(sdev->dev, "error: failed to enable target core for widget %s\n",
+- swidget->widget->name);
+- goto pipe_widget_free;
++ /* update ref count for cores associated with all modules in the pipeline */
++ if (swidget->id == snd_soc_dapm_scheduler) {
++ for_each_set_bit(i, &spipe->core_mask, sdev->num_cores) {
++ ret = snd_sof_dsp_core_get(sdev, i);
++ if (ret < 0) {
++ dev_err(sdev->dev, "failed to enable target core %d for pipeline %s\n",
++ i, swidget->widget->name);
++ goto pipe_widget_free;
++ }
++ }
+ }
+
+ /* setup widget in the DSP */
+ if (tplg_ops && tplg_ops->widget_setup) {
+ ret = tplg_ops->widget_setup(sdev, swidget);
+ if (ret < 0)
+- goto core_put;
++ goto pipe_widget_free;
+ }
+
+ /* send config for DAI components */
+@@ -208,15 +218,22 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
+ return 0;
+
+ widget_free:
+- /* widget use_count and core ref_count will both be decremented by sof_widget_free() */
++ /* widget use_count will be decremented by sof_widget_free() */
+ sof_widget_free_unlocked(sdev, swidget);
+ use_count_decremented = true;
+-core_put:
+- if (!use_count_decremented)
+- snd_sof_dsp_core_put(sdev, swidget->core);
+ pipe_widget_free:
+- if (swidget->id != snd_soc_dapm_scheduler)
++ if (swidget->id != snd_soc_dapm_scheduler) {
+ sof_widget_free_unlocked(sdev, swidget->spipe->pipe_widget);
++ } else {
++ int j;
++
++ /* decrement ref count for all cores that were updated previously */
++ for_each_set_bit(j, &spipe->core_mask, sdev->num_cores) {
++ if (j >= i)
++ break;
++ snd_sof_dsp_core_put(sdev, j);
++ }
++ }
+ use_count_dec:
+ if (!use_count_decremented)
+ swidget->use_count--;
+@@ -471,7 +488,7 @@ sof_prepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget
+ if (ret < 0) {
+ /* unprepare the source widget */
+ if (widget_ops[widget->id].ipc_unprepare &&
+- swidget && swidget->prepared) {
++ swidget && swidget->prepared && swidget->use_count == 0) {
+ widget_ops[widget->id].ipc_unprepare(swidget);
+ swidget->prepared = false;
+ }
+@@ -1032,6 +1049,13 @@ int sof_machine_check(struct snd_sof_dev *sdev)
+ mach = snd_sof_machine_select(sdev);
+ if (mach) {
+ sof_pdata->machine = mach;
++
++ if (sof_pdata->subsystem_id_set) {
++ mach->mach_params.subsystem_vendor = sof_pdata->subsystem_vendor;
++ mach->mach_params.subsystem_device = sof_pdata->subsystem_device;
++ mach->mach_params.subsystem_id_set = true;
++ }
++
+ snd_sof_set_mach_params(mach, sdev);
+ return 0;
+ }
+diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
+index 5d5eeb1a1a6f0d..3606595a7500c2 100644
+--- a/sound/soc/sof/sof-audio.h
++++ b/sound/soc/sof/sof-audio.h
+@@ -113,6 +113,7 @@ struct snd_sof_dai_config_data {
+ * triggers. The FW keeps the host DMA running in this case and
+ * therefore the host must do the same and should stop the DMA during
+ * hw_free.
++ * @d0i3_supported_in_s0ix: Allow DSP D0I3 during S0iX
+ */
+ struct sof_ipc_pcm_ops {
+ int (*hw_params)(struct snd_soc_component *component, struct snd_pcm_substream *substream,
+@@ -129,6 +130,7 @@ struct sof_ipc_pcm_ops {
+ bool reset_hw_params_during_stop;
+ bool ipc_first_on_start;
+ bool platform_stop_during_hw_free;
++ bool d0i3_supported_in_s0ix;
+ };
+
+ /**
+@@ -480,6 +482,7 @@ struct snd_sof_widget {
+ * @paused_count: Count of number of PCM's that have started and have currently paused this
+ pipeline
+ * @complete: flag used to indicate that pipeline set up is complete.
++ * @core_mask: Mask containing target cores for all modules in the pipeline
+ * @list: List item in sdev pipeline_list
+ */
+ struct snd_sof_pipeline {
+@@ -487,6 +490,7 @@ struct snd_sof_pipeline {
+ int started_count;
+ int paused_count;
+ int complete;
++ unsigned long core_mask;
+ struct list_head list;
+ };
+
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index f5ece43d0ec247..69a2352f2e1a0a 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -145,6 +145,13 @@ static const struct dmi_system_id community_key_platforms[] = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"),
+ }
+ },
++ {
++ .ident = "Google firmware",
++ .callback = chromebook_use_community_key,
++ .matches = {
++ DMI_MATCH(DMI_BIOS_VERSION, "Google"),
++ }
++ },
+ {},
+ };
+
+@@ -214,6 +221,14 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ return ret;
+
+ sof_pdata->name = pci_name(pci);
++
++ /* PCI defines a vendor ID of 0xFFFF as invalid. */
++ if (pci->subsystem_vendor != 0xFFFF) {
++ sof_pdata->subsystem_vendor = pci->subsystem_vendor;
++ sof_pdata->subsystem_device = pci->subsystem_device;
++ sof_pdata->subsystem_id_set = true;
++ }
++
+ sof_pdata->desc = desc;
+ sof_pdata->dev = dev;
+
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index a3a3af252259d9..cf1e63daad86bf 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -1134,7 +1134,7 @@ static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ /* does stream match DAI link ? */
+ if (!rtd->dai_link->stream_name ||
+- strcmp(sname, rtd->dai_link->stream_name))
++ !strstr(rtd->dai_link->stream_name, sname))
+ continue;
+
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+@@ -1736,8 +1736,10 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
+ /* perform pcm set op */
+ if (ipc_pcm_ops && ipc_pcm_ops->pcm_setup) {
+ ret = ipc_pcm_ops->pcm_setup(sdev, spcm);
+- if (ret < 0)
++ if (ret < 0) {
++ kfree(spcm);
+ return ret;
++ }
+ }
+
+ dai_drv->dobj.private = spcm;
+@@ -2038,6 +2040,8 @@ static int sof_link_unload(struct snd_soc_component *scomp, struct snd_soc_dobj
+ if (!slink)
+ return 0;
+
++ slink->link->platforms->name = NULL;
++
+ kfree(slink->tuples);
+ list_del(&slink->list);
+ kfree(slink->hw_configs);
+diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
+index 2c21a86421e66e..cc9a8122b9bc20 100644
+--- a/sound/soc/sti/sti_uniperif.c
++++ b/sound/soc/sti/sti_uniperif.c
+@@ -352,7 +352,7 @@ static int sti_uniperiph_resume(struct snd_soc_component *component)
+ return ret;
+ }
+
+-static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai)
++int sti_uniperiph_dai_probe(struct snd_soc_dai *dai)
+ {
+ struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+ struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
+index 2a5de328501c19..74e51f0ff85c84 100644
+--- a/sound/soc/sti/uniperif.h
++++ b/sound/soc/sti/uniperif.h
+@@ -1380,6 +1380,7 @@ int uni_reader_init(struct platform_device *pdev,
+ struct uniperif *reader);
+
+ /* common */
++int sti_uniperiph_dai_probe(struct snd_soc_dai *dai);
+ int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt);
+
+diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
+index dd9013c4766491..6d1ce030963c62 100644
+--- a/sound/soc/sti/uniperif_player.c
++++ b/sound/soc/sti/uniperif_player.c
+@@ -1038,6 +1038,7 @@ static const struct snd_soc_dai_ops uni_player_dai_ops = {
+ .startup = uni_player_startup,
+ .shutdown = uni_player_shutdown,
+ .prepare = uni_player_prepare,
++ .probe = sti_uniperiph_dai_probe,
+ .trigger = uni_player_trigger,
+ .hw_params = sti_uniperiph_dai_hw_params,
+ .set_fmt = sti_uniperiph_dai_set_fmt,
+diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
+index 065c5f0d1f5f00..05ea2b794eb925 100644
+--- a/sound/soc/sti/uniperif_reader.c
++++ b/sound/soc/sti/uniperif_reader.c
+@@ -401,6 +401,7 @@ static const struct snd_soc_dai_ops uni_reader_dai_ops = {
+ .startup = uni_reader_startup,
+ .shutdown = uni_reader_shutdown,
+ .prepare = uni_reader_prepare,
++ .probe = sti_uniperiph_dai_probe,
+ .trigger = uni_reader_trigger,
+ .hw_params = sti_uniperiph_dai_hw_params,
+ .set_fmt = sti_uniperiph_dai_set_fmt,
+diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
+index 5124b6c9ceb4b9..d1cb49d54f0084 100644
+--- a/sound/soc/sunxi/sun4i-i2s.c
++++ b/sound/soc/sunxi/sun4i-i2s.c
+@@ -100,8 +100,8 @@
+ #define SUN8I_I2S_CTRL_MODE_PCM (0 << 4)
+
+ #define SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK BIT(19)
+-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED (1 << 19)
+-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_NORMAL (0 << 19)
++#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH (1 << 19)
++#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW (0 << 19)
+ #define SUN8I_I2S_FMT0_LRCK_PERIOD_MASK GENMASK(17, 8)
+ #define SUN8I_I2S_FMT0_LRCK_PERIOD(period) ((period - 1) << 8)
+ #define SUN8I_I2S_FMT0_BCLK_POLARITY_MASK BIT(7)
+@@ -727,65 +727,37 @@ static int sun4i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ unsigned int fmt)
+ {
+- u32 mode, val;
++ u32 mode, lrclk_pol, bclk_pol, val;
+ u8 offset;
+
+- /*
+- * DAI clock polarity
+- *
+- * The setup for LRCK contradicts the datasheet, but under a
+- * scope it's clear that the LRCK polarity is reversed
+- * compared to the expected polarity on the bus.
+- */
+- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+- case SND_SOC_DAIFMT_IB_IF:
+- /* Invert both clocks */
+- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+- break;
+- case SND_SOC_DAIFMT_IB_NF:
+- /* Invert bit clock */
+- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED |
+- SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+- break;
+- case SND_SOC_DAIFMT_NB_IF:
+- /* Invert frame clock */
+- val = 0;
+- break;
+- case SND_SOC_DAIFMT_NB_NF:
+- val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+- SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
+- SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
+- val);
+-
+ /* DAI Mode */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ mode = SUN8I_I2S_CTRL_MODE_PCM;
+ offset = 1;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ mode = SUN8I_I2S_CTRL_MODE_PCM;
+ offset = 0;
+ break;
+
+ case SND_SOC_DAIFMT_I2S:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW;
+ mode = SUN8I_I2S_CTRL_MODE_LEFT;
+ offset = 1;
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ mode = SUN8I_I2S_CTRL_MODE_LEFT;
+ offset = 0;
+ break;
+
+ case SND_SOC_DAIFMT_RIGHT_J:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ mode = SUN8I_I2S_CTRL_MODE_RIGHT;
+ offset = 0;
+ break;
+@@ -803,6 +775,35 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ SUN8I_I2S_TX_CHAN_OFFSET_MASK,
+ SUN8I_I2S_TX_CHAN_OFFSET(offset));
+
++ /* DAI clock polarity */
++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL;
++
++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++ case SND_SOC_DAIFMT_IB_IF:
++ /* Invert both clocks */
++ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
++ break;
++ case SND_SOC_DAIFMT_IB_NF:
++ /* Invert bit clock */
++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
++ break;
++ case SND_SOC_DAIFMT_NB_IF:
++ /* Invert frame clock */
++ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
++ break;
++ case SND_SOC_DAIFMT_NB_NF:
++ /* No inversion */
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
++ SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
++ SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
++ lrclk_pol | bclk_pol);
++
+ /* DAI clock master masks */
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
+@@ -834,65 +835,37 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ unsigned int fmt)
+ {
+- u32 mode, val;
++ u32 mode, lrclk_pol, bclk_pol, val;
+ u8 offset;
+
+- /*
+- * DAI clock polarity
+- *
+- * The setup for LRCK contradicts the datasheet, but under a
+- * scope it's clear that the LRCK polarity is reversed
+- * compared to the expected polarity on the bus.
+- */
+- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+- case SND_SOC_DAIFMT_IB_IF:
+- /* Invert both clocks */
+- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+- break;
+- case SND_SOC_DAIFMT_IB_NF:
+- /* Invert bit clock */
+- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED |
+- SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+- break;
+- case SND_SOC_DAIFMT_NB_IF:
+- /* Invert frame clock */
+- val = 0;
+- break;
+- case SND_SOC_DAIFMT_NB_NF:
+- val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+- SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
+- SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
+- val);
+-
+ /* DAI Mode */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ mode = SUN8I_I2S_CTRL_MODE_PCM;
+ offset = 1;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ mode = SUN8I_I2S_CTRL_MODE_PCM;
+ offset = 0;
+ break;
+
+ case SND_SOC_DAIFMT_I2S:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW;
+ mode = SUN8I_I2S_CTRL_MODE_LEFT;
+ offset = 1;
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ mode = SUN8I_I2S_CTRL_MODE_LEFT;
+ offset = 0;
+ break;
+
+ case SND_SOC_DAIFMT_RIGHT_J:
++ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
+ mode = SUN8I_I2S_CTRL_MODE_RIGHT;
+ offset = 0;
+ break;
+@@ -910,6 +883,36 @@ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
+ SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET_MASK,
+ SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET(offset));
+
++ /* DAI clock polarity */
++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL;
++
++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++ case SND_SOC_DAIFMT_IB_IF:
++ /* Invert both clocks */
++ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
++ break;
++ case SND_SOC_DAIFMT_IB_NF:
++ /* Invert bit clock */
++ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
++ break;
++ case SND_SOC_DAIFMT_NB_IF:
++ /* Invert frame clock */
++ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
++ break;
++ case SND_SOC_DAIFMT_NB_NF:
++ /* No inversion */
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
++ SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
++ SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
++ lrclk_pol | bclk_pol);
++
++
+ /* DAI clock master masks */
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+ case SND_SOC_DAIFMT_BP_FP:
+diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
+index b849bb7cf58e24..2347aeb049bccf 100644
+--- a/sound/soc/sunxi/sun4i-spdif.c
++++ b/sound/soc/sunxi/sun4i-spdif.c
+@@ -578,6 +578,11 @@ static const struct of_device_id sun4i_spdif_of_match[] = {
+ .compatible = "allwinner,sun50i-h6-spdif",
+ .data = &sun50i_h6_spdif_quirks,
+ },
++ {
++ .compatible = "allwinner,sun50i-h616-spdif",
++ /* Essentially the same as the H6, but without RX */
++ .data = &sun50i_h6_spdif_quirks,
++ },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
+diff --git a/sound/soc/tegra/tegra186_dspk.c b/sound/soc/tegra/tegra186_dspk.c
+index a0ce7eb11de96b..95bff466e8bdb1 100644
+--- a/sound/soc/tegra/tegra186_dspk.c
++++ b/sound/soc/tegra/tegra186_dspk.c
+@@ -1,8 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
++// SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ //
+ // tegra186_dspk.c - Tegra186 DSPK driver
+-//
+-// Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
+
+ #include <linux/clk.h>
+ #include <linux/device.h>
+@@ -241,14 +240,14 @@ static int tegra186_dspk_hw_params(struct snd_pcm_substream *substream,
+ return -EINVAL;
+ }
+
+- cif_conf.client_bits = TEGRA_ACIF_BITS_24;
+-
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ cif_conf.audio_bits = TEGRA_ACIF_BITS_16;
++ cif_conf.client_bits = TEGRA_ACIF_BITS_16;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ cif_conf.audio_bits = TEGRA_ACIF_BITS_32;
++ cif_conf.client_bits = TEGRA_ACIF_BITS_24;
+ break;
+ default:
+ dev_err(dev, "unsupported format!\n");
+diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
+index 3f114a2adfced0..ab3c6b2544d205 100644
+--- a/sound/soc/tegra/tegra210_ahub.c
++++ b/sound/soc/tegra/tegra210_ahub.c
+@@ -2,7 +2,7 @@
+ //
+ // tegra210_ahub.c - Tegra210 AHUB driver
+ //
+-// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
++// Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
+
+ #include <linux/clk.h>
+ #include <linux/device.h>
+@@ -1391,11 +1391,13 @@ static int tegra_ahub_probe(struct platform_device *pdev)
+ return err;
+ }
+
++ pm_runtime_enable(&pdev->dev);
++
+ err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+- if (err)
++ if (err) {
++ pm_runtime_disable(&pdev->dev);
+ return err;
+-
+- pm_runtime_enable(&pdev->dev);
++ }
+
+ return 0;
+ }
+diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
+index 666057d50ea0d2..dd3f59bb72fafe 100644
+--- a/sound/soc/ti/ams-delta.c
++++ b/sound/soc/ti/ams-delta.c
+@@ -303,7 +303,7 @@ static int cx81801_open(struct tty_struct *tty)
+ static void cx81801_close(struct tty_struct *tty)
+ {
+ struct snd_soc_component *component = tty->disc_data;
+- struct snd_soc_dapm_context *dapm = &component->card->dapm;
++ struct snd_soc_dapm_context *dapm;
+
+ del_timer_sync(&cx81801_timer);
+
+@@ -315,6 +315,8 @@ static void cx81801_close(struct tty_struct *tty)
+
+ v253_ops.close(tty);
+
++ dapm = &component->card->dapm;
++
+ /* Revert back to default audio input/output constellation */
+ snd_soc_dapm_mutex_lock(dapm);
+
+diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
+index 7e7d665a5504af..8c8b2a2f6f8620 100644
+--- a/sound/soc/ti/davinci-mcasp.c
++++ b/sound/soc/ti/davinci-mcasp.c
+@@ -1474,10 +1474,11 @@ static int davinci_mcasp_hw_rule_min_periodsize(
+ {
+ struct snd_interval *period_size = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
++ u8 numevt = *((u8 *)rule->private);
+ struct snd_interval frames;
+
+ snd_interval_any(&frames);
+- frames.min = 64;
++ frames.min = numevt;
+ frames.integer = 1;
+
+ return snd_interval_refine(period_size, &frames);
+@@ -1492,6 +1493,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ u32 max_channels = 0;
+ int i, dir, ret;
+ int tdm_slots = mcasp->tdm_slots;
++ u8 *numevt;
+
+ /* Do not allow more then one stream per direction */
+ if (mcasp->substreams[substream->stream])
+@@ -1591,9 +1593,12 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ return ret;
+ }
+
++ numevt = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
++ &mcasp->txnumevt :
++ &mcasp->rxnumevt;
+ snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+- davinci_mcasp_hw_rule_min_periodsize, NULL,
++ davinci_mcasp_hw_rule_min_periodsize, numevt,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+
+ return 0;
+@@ -2418,12 +2423,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
+
+ mcasp_reparent_fck(pdev);
+
+- ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
+- &davinci_mcasp_dai[mcasp->op_mode], 1);
+-
+- if (ret != 0)
+- goto err;
+-
+ ret = davinci_mcasp_get_dma_type(mcasp);
+ switch (ret) {
+ case PCM_EDMA:
+@@ -2450,6 +2449,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
+ goto err;
+ }
+
++ ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
++ &davinci_mcasp_dai[mcasp->op_mode], 1);
++
++ if (ret != 0)
++ goto err;
++
+ no_audio:
+ ret = davinci_mcasp_init_gpiochip(mcasp);
+ if (ret) {
+diff --git a/sound/soc/ti/omap-hdmi.c b/sound/soc/ti/omap-hdmi.c
+index a3663ab065ac21..0a731b21e5a58d 100644
+--- a/sound/soc/ti/omap-hdmi.c
++++ b/sound/soc/ti/omap-hdmi.c
+@@ -354,11 +354,7 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
+ if (!card)
+ return -ENOMEM;
+
+- card->name = devm_kasprintf(dev, GFP_KERNEL,
+- "HDMI %s", dev_name(ad->dssdev));
+- if (!card->name)
+- return -ENOMEM;
+-
++ card->name = "HDMI";
+ card->owner = THIS_MODULE;
+ card->dai_link =
+ devm_kzalloc(dev, sizeof(*(card->dai_link)), GFP_KERNEL);
+diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
+index fdabed5133e836..b399d86f22777f 100644
+--- a/sound/soc/ti/omap-mcbsp.c
++++ b/sound/soc/ti/omap-mcbsp.c
+@@ -74,14 +74,16 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
+ return 0;
+ }
+
+- pm_runtime_put_sync(mcbsp->dev);
++ if (mcbsp->active)
++ pm_runtime_put_sync(mcbsp->dev);
+
+ r = clk_set_parent(mcbsp->fclk, fck_src);
+ if (r)
+ dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
+ src);
+
+- pm_runtime_get_sync(mcbsp->dev);
++ if (mcbsp->active)
++ pm_runtime_get_sync(mcbsp->dev);
+
+ clk_put(fck_src);
+
+diff --git a/sound/soc/ti/omap3pandora.c b/sound/soc/ti/omap3pandora.c
+index a287e9747c2a12..fa92ed97dfe3bf 100644
+--- a/sound/soc/ti/omap3pandora.c
++++ b/sound/soc/ti/omap3pandora.c
+@@ -7,7 +7,7 @@
+
+ #include <linux/clk.h>
+ #include <linux/platform_device.h>
+-#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/delay.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/module.h>
+@@ -21,12 +21,11 @@
+
+ #include "omap-mcbsp.h"
+
+-#define OMAP3_PANDORA_DAC_POWER_GPIO 118
+-#define OMAP3_PANDORA_AMP_POWER_GPIO 14
+-
+ #define PREFIX "ASoC omap3pandora: "
+
+ static struct regulator *omap3pandora_dac_reg;
++static struct gpio_desc *dac_power_gpio;
++static struct gpio_desc *amp_power_gpio;
+
+ static int omap3pandora_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+@@ -78,9 +77,9 @@ static int omap3pandora_dac_event(struct snd_soc_dapm_widget *w,
+ return ret;
+ }
+ mdelay(1);
+- gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 1);
++ gpiod_set_value(dac_power_gpio, 1);
+ } else {
+- gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 0);
++ gpiod_set_value(dac_power_gpio, 0);
+ mdelay(1);
+ regulator_disable(omap3pandora_dac_reg);
+ }
+@@ -92,9 +91,9 @@ static int omap3pandora_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+ {
+ if (SND_SOC_DAPM_EVENT_ON(event))
+- gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 1);
++ gpiod_set_value(amp_power_gpio, 1);
+ else
+- gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 0);
++ gpiod_set_value(amp_power_gpio, 0);
+
+ return 0;
+ }
+@@ -229,35 +228,10 @@ static int __init omap3pandora_soc_init(void)
+
+ pr_info("OMAP3 Pandora SoC init\n");
+
+- ret = gpio_request(OMAP3_PANDORA_DAC_POWER_GPIO, "dac_power");
+- if (ret) {
+- pr_err(PREFIX "Failed to get DAC power GPIO\n");
+- return ret;
+- }
+-
+- ret = gpio_direction_output(OMAP3_PANDORA_DAC_POWER_GPIO, 0);
+- if (ret) {
+- pr_err(PREFIX "Failed to set DAC power GPIO direction\n");
+- goto fail0;
+- }
+-
+- ret = gpio_request(OMAP3_PANDORA_AMP_POWER_GPIO, "amp_power");
+- if (ret) {
+- pr_err(PREFIX "Failed to get amp power GPIO\n");
+- goto fail0;
+- }
+-
+- ret = gpio_direction_output(OMAP3_PANDORA_AMP_POWER_GPIO, 0);
+- if (ret) {
+- pr_err(PREFIX "Failed to set amp power GPIO direction\n");
+- goto fail1;
+- }
+-
+ omap3pandora_snd_device = platform_device_alloc("soc-audio", -1);
+ if (omap3pandora_snd_device == NULL) {
+ pr_err(PREFIX "Platform device allocation failed\n");
+- ret = -ENOMEM;
+- goto fail1;
++ return -ENOMEM;
+ }
+
+ platform_set_drvdata(omap3pandora_snd_device, &snd_soc_card_omap3pandora);
+@@ -268,6 +242,20 @@ static int __init omap3pandora_soc_init(void)
+ goto fail2;
+ }
+
++ dac_power_gpio = devm_gpiod_get(&omap3pandora_snd_device->dev,
++ "dac", GPIOD_OUT_LOW);
++ if (IS_ERR(dac_power_gpio)) {
++ ret = PTR_ERR(dac_power_gpio);
++ goto fail3;
++ }
++
++ amp_power_gpio = devm_gpiod_get(&omap3pandora_snd_device->dev,
++ "amp", GPIOD_OUT_LOW);
++ if (IS_ERR(amp_power_gpio)) {
++ ret = PTR_ERR(amp_power_gpio);
++ goto fail3;
++ }
++
+ omap3pandora_dac_reg = regulator_get(&omap3pandora_snd_device->dev, "vcc");
+ if (IS_ERR(omap3pandora_dac_reg)) {
+ pr_err(PREFIX "Failed to get DAC regulator from %s: %ld\n",
+@@ -283,10 +271,7 @@ static int __init omap3pandora_soc_init(void)
+ platform_device_del(omap3pandora_snd_device);
+ fail2:
+ platform_device_put(omap3pandora_snd_device);
+-fail1:
+- gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO);
+-fail0:
+- gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO);
++
+ return ret;
+ }
+ module_init(omap3pandora_soc_init);
+@@ -295,8 +280,6 @@ static void __exit omap3pandora_soc_exit(void)
+ {
+ regulator_put(omap3pandora_dac_reg);
+ platform_device_unregister(omap3pandora_snd_device);
+- gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO);
+- gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO);
+ }
+ module_exit(omap3pandora_soc_exit);
+
+diff --git a/sound/synth/emux/soundfont.c b/sound/synth/emux/soundfont.c
+index 16f00097cb95a8..eed47e48302485 100644
+--- a/sound/synth/emux/soundfont.c
++++ b/sound/synth/emux/soundfont.c
+@@ -701,7 +701,6 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ struct snd_soundfont *sf;
+ struct soundfont_sample_info sample_info;
+ struct snd_sf_sample *sp;
+- long off;
+
+ /* patch must be opened */
+ sf = sflist->currsf;
+@@ -711,12 +710,16 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ if (is_special_type(sf->type))
+ return -EINVAL;
+
++ if (count < (long)sizeof(sample_info)) {
++ return -EINVAL;
++ }
+ if (copy_from_user(&sample_info, data, sizeof(sample_info)))
+ return -EFAULT;
++ data += sizeof(sample_info);
++ count -= sizeof(sample_info);
+
+- off = sizeof(sample_info);
+-
+- if (sample_info.size != (count-off)/2)
++ // SoundFont uses S16LE samples.
++ if (sample_info.size * 2 != count)
+ return -EINVAL;
+
+ /* Check for dup */
+@@ -744,7 +747,7 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ int rc;
+ rc = sflist->callback.sample_new
+ (sflist->callback.private_data, sp, sflist->memhdr,
+- data + off, count - off);
++ data, count);
+ if (rc < 0) {
+ sf_sample_delete(sflist, sf, sp);
+ return rc;
+@@ -957,10 +960,12 @@ load_guspatch(struct snd_sf_list *sflist, const char __user *data,
+ }
+ if (copy_from_user(&patch, data, sizeof(patch)))
+ return -EFAULT;
+-
+ count -= sizeof(patch);
+ data += sizeof(patch);
+
++ if ((patch.len << (patch.mode & WAVE_16_BITS ? 1 : 0)) != count)
++ return -EINVAL;
++
+ sf = newsf(sflist, SNDRV_SFNT_PAT_TYPE_GUS|SNDRV_SFNT_PAT_SHARED, NULL);
+ if (sf == NULL)
+ return -ENOMEM;
+diff --git a/sound/usb/Makefile b/sound/usb/Makefile
+index db5ff76d0e61f9..8c657c2753c84f 100644
+--- a/sound/usb/Makefile
++++ b/sound/usb/Makefile
+@@ -12,7 +12,7 @@ snd-usb-audio-objs := card.o \
+ mixer.o \
+ mixer_quirks.o \
+ mixer_scarlett.o \
+- mixer_scarlett_gen2.o \
++ mixer_scarlett2.o \
+ mixer_us16x08.o \
+ mixer_s1810c.o \
+ pcm.o \
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 1b2edc0fd2e992..7743ea983b1a81 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -382,6 +382,12 @@ static const struct usb_audio_device_name usb_audio_names[] = {
+ /* Creative/Toshiba Multimedia Center SB-0500 */
+ DEVICE_NAME(0x041e, 0x3048, "Toshiba", "SB-0500"),
+
++ /* Logitech Audio Devices */
++ DEVICE_NAME(0x046d, 0x0867, "Logitech, Inc.", "Logi-MeetUp"),
++ DEVICE_NAME(0x046d, 0x0874, "Logitech, Inc.", "Logi-Tap-Audio"),
++ DEVICE_NAME(0x046d, 0x087c, "Logitech, Inc.", "Logi-Huddle"),
++ DEVICE_NAME(0x046d, 0x0898, "Logitech, Inc.", "Logi-RB-Audio"),
++ DEVICE_NAME(0x046d, 0x08d2, "Logitech, Inc.", "Logi-RBM-Audio"),
+ DEVICE_NAME(0x046d, 0x0990, "Logitech, Inc.", "QuickCam Pro 9000"),
+
+ DEVICE_NAME(0x05e1, 0x0408, "Syntek", "STK1160"),
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 33db334e655667..a676ad093d1897 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -328,8 +328,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR)
+ return ret;
+ err = uac_clock_selector_set_val(chip, entity_id, cur);
+- if (err < 0)
++ if (err < 0) {
++ if (pins == 1) {
++ usb_audio_dbg(chip,
++ "%s(): selector returned an error, "
++ "assuming a firmware bug, id %d, ret %d\n",
++ __func__, clock_id, err);
++ return ret;
++ }
+ return err;
++ }
+ }
+
+ if (!validate || ret > 0 || !chip->autoclock)
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index ab5fed9f55b60e..3b45d0ee769389 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -470,9 +470,11 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
+ int clock)
+ {
+ struct usb_device *dev = chip->dev;
++ struct usb_host_interface *alts;
+ unsigned int *table;
+ unsigned int nr_rates;
+ int i, err;
++ u32 bmControls;
+
+ /* performing the rate verification may lead to unexpected USB bus
+ * behavior afterwards by some unknown reason. Do this only for the
+@@ -481,6 +483,24 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
+ if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES))
+ return 0; /* don't perform the validation as default */
+
++ alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting);
++ if (!alts)
++ return 0;
++
++ if (fp->protocol == UAC_VERSION_3) {
++ struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc(
++ alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
++ bmControls = le32_to_cpu(as->bmControls);
++ } else {
++ struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc(
++ alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
++ bmControls = as->bmControls;
++ }
++
++ if (!uac_v2v3_control_is_readable(bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS))
++ return 0;
++
+ table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index b67617b68e509d..9df49a880b750d 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -202,7 +202,7 @@ int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer,
+ struct urb *urb;
+
+ /* create message: */
+- msg = kmalloc(sizeof(struct message), GFP_ATOMIC);
++ msg = kzalloc(sizeof(struct message), GFP_ATOMIC);
+ if (msg == NULL)
+ return -ENOMEM;
+
+@@ -286,12 +286,14 @@ static void line6_data_received(struct urb *urb)
+ {
+ struct usb_line6 *line6 = (struct usb_line6 *)urb->context;
+ struct midi_buffer *mb = &line6->line6midi->midibuf_in;
++ unsigned long flags;
+ int done;
+
+ if (urb->status == -ESHUTDOWN)
+ return;
+
+ if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
++ spin_lock_irqsave(&line6->line6midi->lock, flags);
+ done =
+ line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length);
+
+@@ -300,12 +302,15 @@ static void line6_data_received(struct urb *urb)
+ dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n",
+ done, urb->actual_length);
+ }
++ spin_unlock_irqrestore(&line6->line6midi->lock, flags);
+
+ for (;;) {
++ spin_lock_irqsave(&line6->line6midi->lock, flags);
+ done =
+ line6_midibuf_read(mb, line6->buffer_message,
+ LINE6_MIDI_MESSAGE_MAXLEN,
+ LINE6_MIDIBUF_READ_RX);
++ spin_unlock_irqrestore(&line6->line6midi->lock, flags);
+
+ if (done <= 0)
+ break;
+@@ -688,7 +693,7 @@ static int line6_init_cap_control(struct usb_line6 *line6)
+ int ret;
+
+ /* initialize USB buffers: */
+- line6->buffer_listen = kmalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
++ line6->buffer_listen = kzalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL);
+ if (!line6->buffer_listen)
+ return -ENOMEM;
+
+@@ -697,7 +702,7 @@ static int line6_init_cap_control(struct usb_line6 *line6)
+ return -ENOMEM;
+
+ if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) {
+- line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
++ line6->buffer_message = kzalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
+ if (!line6->buffer_message)
+ return -ENOMEM;
+
+diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
+index ffd8c157a28139..70de08635f54cb 100644
+--- a/sound/usb/line6/podhd.c
++++ b/sound/usb/line6/podhd.c
+@@ -507,7 +507,7 @@ static const struct line6_properties podhd_properties_table[] = {
+ [LINE6_PODHD500X] = {
+ .id = "PODHD500X",
+ .name = "POD HD500X",
+- .capabilities = LINE6_CAP_CONTROL
++ .capabilities = LINE6_CAP_CONTROL | LINE6_CAP_HWMON_CTL
+ | LINE6_CAP_PCM | LINE6_CAP_HWMON,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x81,
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 409fc11646948e..197fd07e69edd4 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1211,6 +1211,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ cval->res = 16;
+ }
+ break;
++ case USB_ID(0x1bcf, 0x2281): /* HD Webcam */
++ if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
++ usb_audio_info(chip,
++ "set resolution quirk: cval->res = 16\n");
++ cval->res = 16;
++ }
++ break;
+ }
+ }
+
+@@ -1370,6 +1377,19 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
+
+ #define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL)
+
++/* get the max value advertised via control API */
++static int get_max_exposed(struct usb_mixer_elem_info *cval)
++{
++ if (!cval->max_exposed) {
++ if (cval->res)
++ cval->max_exposed =
++ DIV_ROUND_UP(cval->max - cval->min, cval->res);
++ else
++ cval->max_exposed = cval->max - cval->min;
++ }
++ return cval->max_exposed;
++}
++
+ /* get a feature/mixer unit info */
+ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+@@ -1382,11 +1402,8 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol,
+ else
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = cval->channels;
+- if (cval->val_type == USB_MIXER_BOOLEAN ||
+- cval->val_type == USB_MIXER_INV_BOOLEAN) {
+- uinfo->value.integer.min = 0;
+- uinfo->value.integer.max = 1;
+- } else {
++ if (cval->val_type != USB_MIXER_BOOLEAN &&
++ cval->val_type != USB_MIXER_INV_BOOLEAN) {
+ if (!cval->initialized) {
+ get_min_max_with_quirks(cval, 0, kcontrol);
+ if (cval->initialized && cval->dBmin >= cval->dBmax) {
+@@ -1398,10 +1415,10 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol,
+ &kcontrol->id);
+ }
+ }
+- uinfo->value.integer.min = 0;
+- uinfo->value.integer.max =
+- DIV_ROUND_UP(cval->max - cval->min, cval->res);
+ }
++
++ uinfo->value.integer.min = 0;
++ uinfo->value.integer.max = get_max_exposed(cval);
+ return 0;
+ }
+
+@@ -1442,6 +1459,7 @@ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct usb_mixer_elem_info *cval = kcontrol->private_data;
++ int max_val = get_max_exposed(cval);
+ int c, cnt, val, oval, err;
+ int changed = 0;
+
+@@ -1454,6 +1472,8 @@ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol,
+ if (err < 0)
+ return filter_error(cval, err);
+ val = ucontrol->value.integer.value[cnt];
++ if (val < 0 || val > max_val)
++ return -EINVAL;
+ val = get_abs_value(cval, val);
+ if (oval != val) {
+ snd_usb_set_cur_mix_value(cval, c + 1, cnt, val);
+@@ -1467,6 +1487,8 @@ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol,
+ if (err < 0)
+ return filter_error(cval, err);
+ val = ucontrol->value.integer.value[0];
++ if (val < 0 || val > max_val)
++ return -EINVAL;
+ val = get_abs_value(cval, val);
+ if (val != oval) {
+ snd_usb_set_cur_mix_value(cval, 0, 0, val);
+@@ -2014,6 +2036,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
+ bmaControls = ftr->bmaControls;
+ }
+
++ if (channels > 32) {
++ usb_audio_info(state->chip,
++ "usbmixer: too many channels (%d) in unit %d\n",
++ channels, unitid);
++ return -EINVAL;
++ }
++
+ /* parse the source unit */
+ err = parse_audio_unit(state, hdr->bSourceID);
+ if (err < 0)
+@@ -2323,6 +2352,8 @@ static int mixer_ctl_procunit_put(struct snd_kcontrol *kcontrol,
+ if (err < 0)
+ return filter_error(cval, err);
+ val = ucontrol->value.integer.value[0];
++ if (val < 0 || val > get_max_exposed(cval))
++ return -EINVAL;
+ val = get_abs_value(cval, val);
+ if (val != oval) {
+ set_cur_ctl_value(cval, cval->control << 8, val);
+@@ -2685,6 +2716,8 @@ static int mixer_ctl_selector_put(struct snd_kcontrol *kcontrol,
+ if (err < 0)
+ return filter_error(cval, err);
+ val = ucontrol->value.enumerated.item[0];
++ if (val < 0 || val >= cval->max) /* here cval->max = # elements */
++ return -EINVAL;
+ val = get_abs_value(cval, val);
+ if (val != oval) {
+ set_cur_ctl_value(cval, cval->control << 8, val);
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index d43895c1ae5c6c..167fbfcf01ace9 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -88,6 +88,7 @@ struct usb_mixer_elem_info {
+ int channels;
+ int val_type;
+ int min, max, res;
++ int max_exposed; /* control API exposes the value in 0..max_exposed */
+ int dBmin, dBmax;
+ int cached;
+ int cache_val[MAX_CHANNELS];
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ab0d459f42715f..c8d48566e17598 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -33,7 +33,7 @@
+ #include "mixer.h"
+ #include "mixer_quirks.h"
+ #include "mixer_scarlett.h"
+-#include "mixer_scarlett_gen2.h"
++#include "mixer_scarlett2.h"
+ #include "mixer_us16x08.h"
+ #include "mixer_s1810c.h"
+ #include "helper.h"
+@@ -2978,6 +2978,7 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
+ #define SND_DJM_850_IDX 0x2
+ #define SND_DJM_900NXS2_IDX 0x3
+ #define SND_DJM_750MK2_IDX 0x4
++#define SND_DJM_450_IDX 0x5
+
+
+ #define SND_DJM_CTL(_name, suffix, _default_value, _windex) { \
+@@ -3108,6 +3109,31 @@ static const struct snd_djm_ctl snd_djm_ctls_250mk2[] = {
+ };
+
+
++// DJM-450
++static const u16 snd_djm_opts_450_cap1[] = {
++ 0x0103, 0x0100, 0x0106, 0x0107, 0x0108, 0x0109, 0x010d, 0x010a };
++
++static const u16 snd_djm_opts_450_cap2[] = {
++ 0x0203, 0x0200, 0x0206, 0x0207, 0x0208, 0x0209, 0x020d, 0x020a };
++
++static const u16 snd_djm_opts_450_cap3[] = {
++ 0x030a, 0x0311, 0x0312, 0x0307, 0x0308, 0x0309, 0x030d };
++
++static const u16 snd_djm_opts_450_pb1[] = { 0x0100, 0x0101, 0x0104 };
++static const u16 snd_djm_opts_450_pb2[] = { 0x0200, 0x0201, 0x0204 };
++static const u16 snd_djm_opts_450_pb3[] = { 0x0300, 0x0301, 0x0304 };
++
++static const struct snd_djm_ctl snd_djm_ctls_450[] = {
++ SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++ SND_DJM_CTL("Ch1 Input", 450_cap1, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch2 Input", 450_cap2, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch3 Input", 450_cap3, 0, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch1 Output", 450_pb1, 0, SND_DJM_WINDEX_PB),
++ SND_DJM_CTL("Ch2 Output", 450_pb2, 1, SND_DJM_WINDEX_PB),
++ SND_DJM_CTL("Ch3 Output", 450_pb3, 2, SND_DJM_WINDEX_PB)
++};
++
++
+ // DJM-750
+ static const u16 snd_djm_opts_750_cap1[] = {
+ 0x0101, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a, 0x010f };
+@@ -3203,6 +3229,7 @@ static const struct snd_djm_device snd_djm_devices[] = {
+ [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
+ [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
+ [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
++ [SND_DJM_450_IDX] = SND_DJM_DEVICE(450),
+ };
+
+
+@@ -3420,8 +3447,13 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ case USB_ID(0x1235, 0x8213): /* Focusrite Scarlett 8i6 3rd Gen */
+ case USB_ID(0x1235, 0x8214): /* Focusrite Scarlett 18i8 3rd Gen */
+ case USB_ID(0x1235, 0x8215): /* Focusrite Scarlett 18i20 3rd Gen */
++ case USB_ID(0x1235, 0x8206): /* Focusrite Clarett 2Pre USB */
++ case USB_ID(0x1235, 0x8207): /* Focusrite Clarett 4Pre USB */
++ case USB_ID(0x1235, 0x8208): /* Focusrite Clarett 8Pre USB */
++ case USB_ID(0x1235, 0x820a): /* Focusrite Clarett+ 2Pre */
++ case USB_ID(0x1235, 0x820b): /* Focusrite Clarett+ 4Pre */
+ case USB_ID(0x1235, 0x820c): /* Focusrite Clarett+ 8Pre */
+- err = snd_scarlett_gen2_init(mixer);
++ err = snd_scarlett2_init(mixer);
+ break;
+
+ case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
+@@ -3449,6 +3481,9 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
+ err = snd_djm_controls_create(mixer, SND_DJM_250MK2_IDX);
+ break;
++ case USB_ID(0x2b73, 0x0013): /* Pioneer DJ DJM-450 */
++ err = snd_djm_controls_create(mixer, SND_DJM_450_IDX);
++ break;
+ case USB_ID(0x08e4, 0x017f): /* Pioneer DJ DJM-750 */
+ err = snd_djm_controls_create(mixer, SND_DJM_750_IDX);
+ break;
+diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
+new file mode 100644
+index 00000000000000..90480b9b9b0891
+--- /dev/null
++++ b/sound/usb/mixer_scarlett2.c
+@@ -0,0 +1,4391 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Focusrite Scarlett 2 Protocol Driver for ALSA
++ * (including Scarlett 2nd Gen, 3rd Gen, Clarett USB, and Clarett+
++ * series products)
++ *
++ * Supported models:
++ * - 6i6/18i8/18i20 Gen 2
++ * - Solo/2i2/4i4/8i6/18i8/18i20 Gen 3
++ * - Clarett 2Pre/4Pre/8Pre USB
++ * - Clarett+ 2Pre/4Pre/8Pre
++ *
++ * Copyright (c) 2018-2023 by Geoffrey D. Bennett <g at b4.vu>
++ * Copyright (c) 2020-2021 by Vladimir Sadovnikov <sadko4u@gmail.com>
++ * Copyright (c) 2022 by Christian Colglazier <christian@cacolglazier.com>
++ *
++ * Based on the Scarlett (Gen 1) Driver for ALSA:
++ *
++ * Copyright (c) 2013 by Tobias Hoffmann
++ * Copyright (c) 2013 by Robin Gareus <robin at gareus.org>
++ * Copyright (c) 2002 by Takashi Iwai <tiwai at suse.de>
++ * Copyright (c) 2014 by Chris J Arges <chris.j.arges at canonical.com>
++ *
++ * Many codes borrowed from audio.c by
++ * Alan Cox (alan at lxorguk.ukuu.org.uk)
++ * Thomas Sailer (sailer at ife.ee.ethz.ch)
++ *
++ * Code cleanup:
++ * David Henningsson <david.henningsson at canonical.com>
++ */
++
++/* The protocol was reverse engineered by looking at the communication
++ * between Focusrite Control 2.3.4 and the Focusrite(R) Scarlett 18i20
++ * (firmware 1083) using usbmon in July-August 2018.
++ *
++ * Scarlett 18i8 support added in April 2019.
++ *
++ * Scarlett 6i6 support added in June 2019 (thanks to Martin Wittmann
++ * for providing usbmon output and testing).
++ *
++ * Scarlett 4i4/8i6 Gen 3 support added in May 2020 (thanks to Laurent
++ * Debricon for donating a 4i4 and to Fredrik Unger for providing 8i6
++ * usbmon output and testing).
++ *
++ * Scarlett 18i8/18i20 Gen 3 support added in June 2020 (thanks to
++ * Darren Jaeckel, Alex Sedlack, and Clovis Lunel for providing usbmon
++ * output, protocol traces and testing).
++ *
++ * Support for loading mixer volume and mux configuration from the
++ * interface during driver initialisation added in May 2021 (thanks to
++ * Vladimir Sadovnikov for figuring out how).
++ *
++ * Support for Solo/2i2 Gen 3 added in May 2021 (thanks to Alexander
++ * Vorona for 2i2 protocol traces).
++ *
++ * Support for phantom power, direct monitoring, speaker switching,
++ * and talkback added in May-June 2021.
++ *
++ * Support for Clarett+ 8Pre added in Aug 2022 by Christian
++ * Colglazier.
++ *
++ * Support for Clarett 8Pre USB added in Sep 2023 (thanks to Philippe
++ * Perrot for confirmation).
++ *
++ * Support for Clarett+ 4Pre and 2Pre added in Sep 2023 (thanks to
++ * Gregory Rozzo for donating a 4Pre, and David Sherwood and Patrice
++ * Peterson for usbmon output).
++ *
++ * Support for Clarett 2Pre and 4Pre USB added in Oct 2023.
++ *
++ * This ALSA mixer gives access to (model-dependent):
++ * - input, output, mixer-matrix muxes
++ * - mixer-matrix gain stages
++ * - gain/volume/mute controls
++ * - level meters
++ * - line/inst level, pad, and air controls
++ * - phantom power, direct monitor, speaker switching, and talkback
++ * controls
++ * - disable/enable MSD mode
++ * - disable/enable standalone mode
++ *
++ * <ditaa>
++ * /--------------\ 18chn 20chn /--------------\
++ * | Hardware in +--+------\ /-------------+--+ ALSA PCM out |
++ * \--------------/ | | | | \--------------/
++ * | | | /-----\ |
++ * | | | | | |
++ * | v v v | |
++ * | +---------------+ | |
++ * | \ Matrix Mux / | |
++ * | +-----+-----+ | |
++ * | | | |
++ * | |18chn | |
++ * | | | |
++ * | | 10chn| |
++ * | v | |
++ * | +------------+ | |
++ * | | Mixer | | |
++ * | | Matrix | | |
++ * | | | | |
++ * | | 18x10 Gain | | |
++ * | | stages | | |
++ * | +-----+------+ | |
++ * | | | |
++ * |18chn |10chn | |20chn
++ * | | | |
++ * | +----------/ |
++ * | | |
++ * v v v
++ * ===========================
++ * +---------------+ +--—------------+
++ * \ Output Mux / \ Capture Mux /
++ * +---+---+---+ +-----+-----+
++ * | | |
++ * 10chn| | |18chn
++ * | | |
++ * /--------------\ | | | /--------------\
++ * | S/PDIF, ADAT |<--/ |10chn \-->| ALSA PCM in |
++ * | Hardware out | | \--------------/
++ * \--------------/ |
++ * v
++ * +-------------+ Software gain per channel.
++ * | Master Gain |<-- 18i20 only: Switch per channel
++ * +------+------+ to select HW or SW gain control.
++ * |
++ * |10chn
++ * /--------------\ |
++ * | Analogue |<------/
++ * | Hardware out |
++ * \--------------/
++ * </ditaa>
++ *
++ * Gen 3 devices have a Mass Storage Device (MSD) mode where a small
++ * disk with registration and driver download information is presented
++ * to the host. To access the full functionality of the device without
++ * proprietary software, MSD mode can be disabled by:
++ * - holding down the 48V button for five seconds while powering on
++ * the device, or
++ * - using this driver and alsamixer to change the "MSD Mode" setting
++ * to Off and power-cycling the device
++ */
++
++#include <linux/slab.h>
++#include <linux/usb.h>
++#include <linux/moduleparam.h>
++
++#include <sound/control.h>
++#include <sound/tlv.h>
++
++#include "usbaudio.h"
++#include "mixer.h"
++#include "helper.h"
++
++#include "mixer_scarlett2.h"
++
++/* device_setup value to allow turning MSD mode back on */
++#define SCARLETT2_MSD_ENABLE 0x02
++
++/* device_setup value to disable this mixer driver */
++#define SCARLETT2_DISABLE 0x04
++
++/* some gui mixers can't handle negative ctl values */
++#define SCARLETT2_VOLUME_BIAS 127
++
++/* mixer range from -80dB to +6dB in 0.5dB steps */
++#define SCARLETT2_MIXER_MIN_DB -80
++#define SCARLETT2_MIXER_BIAS (-SCARLETT2_MIXER_MIN_DB * 2)
++#define SCARLETT2_MIXER_MAX_DB 6
++#define SCARLETT2_MIXER_MAX_VALUE \
++ ((SCARLETT2_MIXER_MAX_DB - SCARLETT2_MIXER_MIN_DB) * 2)
++#define SCARLETT2_MIXER_VALUE_COUNT (SCARLETT2_MIXER_MAX_VALUE + 1)
++
++/* map from (dB + 80) * 2 to mixer value
++ * for dB in 0 .. 172: int(8192 * pow(10, ((dB - 160) / 2 / 20)))
++ */
++static const u16 scarlett2_mixer_values[SCARLETT2_MIXER_VALUE_COUNT] = {
++ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
++ 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8,
++ 9, 9, 10, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
++ 23, 24, 25, 27, 29, 30, 32, 34, 36, 38, 41, 43, 46, 48, 51,
++ 54, 57, 61, 65, 68, 73, 77, 81, 86, 91, 97, 103, 109, 115,
++ 122, 129, 137, 145, 154, 163, 173, 183, 194, 205, 217, 230,
++ 244, 259, 274, 290, 307, 326, 345, 365, 387, 410, 434, 460,
++ 487, 516, 547, 579, 614, 650, 689, 730, 773, 819, 867, 919,
++ 973, 1031, 1092, 1157, 1225, 1298, 1375, 1456, 1543, 1634,
++ 1731, 1833, 1942, 2057, 2179, 2308, 2445, 2590, 2744, 2906,
++ 3078, 3261, 3454, 3659, 3876, 4105, 4349, 4606, 4879, 5168,
++ 5475, 5799, 6143, 6507, 6892, 7301, 7733, 8192, 8677, 9191,
++ 9736, 10313, 10924, 11571, 12257, 12983, 13752, 14567, 15430,
++ 16345
++};
++
++/* Maximum number of analogue outputs */
++#define SCARLETT2_ANALOGUE_MAX 10
++
++/* Maximum number of level and pad switches */
++#define SCARLETT2_LEVEL_SWITCH_MAX 2
++#define SCARLETT2_PAD_SWITCH_MAX 8
++#define SCARLETT2_AIR_SWITCH_MAX 8
++#define SCARLETT2_PHANTOM_SWITCH_MAX 2
++
++/* Maximum number of inputs to the mixer */
++#define SCARLETT2_INPUT_MIX_MAX 25
++
++/* Maximum number of outputs from the mixer */
++#define SCARLETT2_OUTPUT_MIX_MAX 12
++
++/* Maximum size of the data in the USB mux assignment message:
++ * 20 inputs, 20 outputs, 25 matrix inputs, 12 spare
++ */
++#define SCARLETT2_MUX_MAX 77
++
++/* Maximum number of meters (sum of output port counts) */
++#define SCARLETT2_MAX_METERS 65
++
++/* There are three different sets of configuration parameters across
++ * the devices
++ */
++enum {
++ SCARLETT2_CONFIG_SET_NO_MIXER = 0,
++ SCARLETT2_CONFIG_SET_GEN_2 = 1,
++ SCARLETT2_CONFIG_SET_GEN_3 = 2,
++ SCARLETT2_CONFIG_SET_CLARETT = 3,
++ SCARLETT2_CONFIG_SET_COUNT = 4
++};
++
++/* Hardware port types:
++ * - None (no input to mux)
++ * - Analogue I/O
++ * - S/PDIF I/O
++ * - ADAT I/O
++ * - Mixer I/O
++ * - PCM I/O
++ */
++enum {
++ SCARLETT2_PORT_TYPE_NONE = 0,
++ SCARLETT2_PORT_TYPE_ANALOGUE = 1,
++ SCARLETT2_PORT_TYPE_SPDIF = 2,
++ SCARLETT2_PORT_TYPE_ADAT = 3,
++ SCARLETT2_PORT_TYPE_MIX = 4,
++ SCARLETT2_PORT_TYPE_PCM = 5,
++ SCARLETT2_PORT_TYPE_COUNT = 6,
++};
++
++/* I/O count of each port type kept in struct scarlett2_ports */
++enum {
++ SCARLETT2_PORT_IN = 0,
++ SCARLETT2_PORT_OUT = 1,
++ SCARLETT2_PORT_DIRNS = 2,
++};
++
++/* Dim/Mute buttons on the 18i20 */
++enum {
++ SCARLETT2_BUTTON_MUTE = 0,
++ SCARLETT2_BUTTON_DIM = 1,
++ SCARLETT2_DIM_MUTE_COUNT = 2,
++};
++
++static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = {
++ "Mute Playback Switch", "Dim Playback Switch"
++};
++
++/* Description of each hardware port type:
++ * - id: hardware ID of this port type
++ * - src_descr: printf format string for mux input selections
++ * - src_num_offset: added to channel number for the fprintf
++ * - dst_descr: printf format string for mixer controls
++ */
++struct scarlett2_port {
++ u16 id;
++ const char * const src_descr;
++ int src_num_offset;
++ const char * const dst_descr;
++};
++
++static const struct scarlett2_port scarlett2_ports[SCARLETT2_PORT_TYPE_COUNT] = {
++ [SCARLETT2_PORT_TYPE_NONE] = {
++ .id = 0x000,
++ .src_descr = "Off"
++ },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
++ .id = 0x080,
++ .src_descr = "Analogue %d",
++ .src_num_offset = 1,
++ .dst_descr = "Analogue Output %02d Playback"
++ },
++ [SCARLETT2_PORT_TYPE_SPDIF] = {
++ .id = 0x180,
++ .src_descr = "S/PDIF %d",
++ .src_num_offset = 1,
++ .dst_descr = "S/PDIF Output %d Playback"
++ },
++ [SCARLETT2_PORT_TYPE_ADAT] = {
++ .id = 0x200,
++ .src_descr = "ADAT %d",
++ .src_num_offset = 1,
++ .dst_descr = "ADAT Output %d Playback"
++ },
++ [SCARLETT2_PORT_TYPE_MIX] = {
++ .id = 0x300,
++ .src_descr = "Mix %c",
++ .src_num_offset = 'A',
++ .dst_descr = "Mixer Input %02d Capture"
++ },
++ [SCARLETT2_PORT_TYPE_PCM] = {
++ .id = 0x600,
++ .src_descr = "PCM %d",
++ .src_num_offset = 1,
++ .dst_descr = "PCM %02d Capture"
++ },
++};
++
++/* Number of mux tables: one for each band of sample rates
++ * (44.1/48kHz, 88.2/96kHz, and 176.4/176kHz)
++ */
++#define SCARLETT2_MUX_TABLES 3
++
++/* Maximum number of entries in a mux table */
++#define SCARLETT2_MAX_MUX_ENTRIES 10
++
++/* One entry within mux_assignment defines the port type and range of
++ * ports to add to the set_mux message. The end of the list is marked
++ * with count == 0.
++ */
++struct scarlett2_mux_entry {
++ u8 port_type;
++ u8 start;
++ u8 count;
++};
++
++struct scarlett2_device_info {
++ /* Gen 3 devices have an internal MSD mode switch that needs
++ * to be disabled in order to access the full functionality of
++ * the device.
++ */
++ u8 has_msd_mode;
++
++ /* which set of configuration parameters the device uses */
++ u8 config_set;
++
++ /* line out hw volume is sw controlled */
++ u8 line_out_hw_vol;
++
++ /* support for main/alt speaker switching */
++ u8 has_speaker_switching;
++
++ /* support for talkback microphone */
++ u8 has_talkback;
++
++ /* the number of analogue inputs with a software switchable
++ * level control that can be set to line or instrument
++ */
++ u8 level_input_count;
++
++ /* the first input with a level control (0-based) */
++ u8 level_input_first;
++
++ /* the number of analogue inputs with a software switchable
++ * 10dB pad control
++ */
++ u8 pad_input_count;
++
++ /* the number of analogue inputs with a software switchable
++ * "air" control
++ */
++ u8 air_input_count;
++
++ /* the number of phantom (48V) software switchable controls */
++ u8 phantom_count;
++
++ /* the number of inputs each phantom switch controls */
++ u8 inputs_per_phantom;
++
++ /* the number of direct monitor options
++ * (0 = none, 1 = mono only, 2 = mono/stereo)
++ */
++ u8 direct_monitor;
++
++ /* remap analogue outputs; 18i8 Gen 3 has "line 3/4" connected
++ * internally to the analogue 7/8 outputs
++ */
++ u8 line_out_remap_enable;
++ u8 line_out_remap[SCARLETT2_ANALOGUE_MAX];
++
++ /* additional description for the line out volume controls */
++ const char * const line_out_descrs[SCARLETT2_ANALOGUE_MAX];
++
++ /* number of sources/destinations of each port type */
++ const int port_count[SCARLETT2_PORT_TYPE_COUNT][SCARLETT2_PORT_DIRNS];
++
++ /* layout/order of the entries in the set_mux message */
++ struct scarlett2_mux_entry mux_assignment[SCARLETT2_MUX_TABLES]
++ [SCARLETT2_MAX_MUX_ENTRIES];
++};
++
++struct scarlett2_data {
++ struct usb_mixer_interface *mixer;
++ struct mutex usb_mutex; /* prevent sending concurrent USB requests */
++ struct mutex data_mutex; /* lock access to this data */
++ struct delayed_work work;
++ const struct scarlett2_device_info *info;
++ const char *series_name;
++ __u8 bInterfaceNumber;
++ __u8 bEndpointAddress;
++ __u16 wMaxPacketSize;
++ __u8 bInterval;
++ int num_mux_srcs;
++ int num_mux_dsts;
++ u16 scarlett2_seq;
++ u8 sync_updated;
++ u8 vol_updated;
++ u8 input_other_updated;
++ u8 monitor_other_updated;
++ u8 mux_updated;
++ u8 speaker_switching_switched;
++ u8 sync;
++ u8 master_vol;
++ u8 vol[SCARLETT2_ANALOGUE_MAX];
++ u8 vol_sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
++ u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
++ u8 level_switch[SCARLETT2_LEVEL_SWITCH_MAX];
++ u8 pad_switch[SCARLETT2_PAD_SWITCH_MAX];
++ u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
++ u8 air_switch[SCARLETT2_AIR_SWITCH_MAX];
++ u8 phantom_switch[SCARLETT2_PHANTOM_SWITCH_MAX];
++ u8 phantom_persistence;
++ u8 direct_monitor_switch;
++ u8 speaker_switching_switch;
++ u8 talkback_switch;
++ u8 talkback_map[SCARLETT2_OUTPUT_MIX_MAX];
++ u8 msd_switch;
++ u8 standalone_switch;
++ struct snd_kcontrol *sync_ctl;
++ struct snd_kcontrol *master_vol_ctl;
++ struct snd_kcontrol *vol_ctls[SCARLETT2_ANALOGUE_MAX];
++ struct snd_kcontrol *sw_hw_ctls[SCARLETT2_ANALOGUE_MAX];
++ struct snd_kcontrol *mute_ctls[SCARLETT2_ANALOGUE_MAX];
++ struct snd_kcontrol *dim_mute_ctls[SCARLETT2_DIM_MUTE_COUNT];
++ struct snd_kcontrol *level_ctls[SCARLETT2_LEVEL_SWITCH_MAX];
++ struct snd_kcontrol *pad_ctls[SCARLETT2_PAD_SWITCH_MAX];
++ struct snd_kcontrol *air_ctls[SCARLETT2_AIR_SWITCH_MAX];
++ struct snd_kcontrol *phantom_ctls[SCARLETT2_PHANTOM_SWITCH_MAX];
++ struct snd_kcontrol *mux_ctls[SCARLETT2_MUX_MAX];
++ struct snd_kcontrol *direct_monitor_ctl;
++ struct snd_kcontrol *speaker_switching_ctl;
++ struct snd_kcontrol *talkback_ctl;
++ u8 mux[SCARLETT2_MUX_MAX];
++ u8 mix[SCARLETT2_INPUT_MIX_MAX * SCARLETT2_OUTPUT_MIX_MAX];
++};
++
++/*** Model-specific data ***/
++
++static const struct scarlett2_device_info s6i6_gen2_info = {
++ .config_set = SCARLETT2_CONFIG_SET_GEN_2,
++ .level_input_count = 2,
++ .pad_input_count = 2,
++
++ .line_out_descrs = {
++ "Headphones 1 L",
++ "Headphones 1 R",
++ "Headphones 2 L",
++ "Headphones 2 R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 4, 4 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 6, 6 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info s18i8_gen2_info = {
++ .config_set = SCARLETT2_CONFIG_SET_GEN_2,
++ .level_input_count = 2,
++ .pad_input_count = 4,
++
++ .line_out_descrs = {
++ "Monitor L",
++ "Monitor R",
++ "Headphones 1 L",
++ "Headphones 1 R",
++ "Headphones 2 L",
++ "Headphones 2 R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 6 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
++ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 8, 18 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 4 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info s18i20_gen2_info = {
++ .config_set = SCARLETT2_CONFIG_SET_GEN_2,
++ .line_out_hw_vol = 1,
++
++ .line_out_descrs = {
++ "Monitor L",
++ "Monitor R",
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ "Headphones 1 L",
++ "Headphones 1 R",
++ "Headphones 2 L",
++ "Headphones 2 R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 10 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
++ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 20, 18 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ADAT, 0, 4 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 6 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info solo_gen3_info = {
++ .has_msd_mode = 1,
++ .config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
++ .level_input_count = 1,
++ .level_input_first = 1,
++ .air_input_count = 1,
++ .phantom_count = 1,
++ .inputs_per_phantom = 1,
++ .direct_monitor = 1,
++};
++
++static const struct scarlett2_device_info s2i2_gen3_info = {
++ .has_msd_mode = 1,
++ .config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
++ .level_input_count = 2,
++ .air_input_count = 2,
++ .phantom_count = 1,
++ .inputs_per_phantom = 2,
++ .direct_monitor = 2,
++};
++
++static const struct scarlett2_device_info s4i4_gen3_info = {
++ .has_msd_mode = 1,
++ .config_set = SCARLETT2_CONFIG_SET_GEN_3,
++ .level_input_count = 2,
++ .pad_input_count = 2,
++ .air_input_count = 2,
++ .phantom_count = 1,
++ .inputs_per_phantom = 2,
++
++ .line_out_descrs = {
++ "Monitor L",
++ "Monitor R",
++ "Headphones L",
++ "Headphones R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 4, 4 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 6, 8 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 4, 6 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info s8i6_gen3_info = {
++ .has_msd_mode = 1,
++ .config_set = SCARLETT2_CONFIG_SET_GEN_3,
++ .level_input_count = 2,
++ .pad_input_count = 2,
++ .air_input_count = 2,
++ .phantom_count = 1,
++ .inputs_per_phantom = 2,
++
++ .line_out_descrs = {
++ "Headphones 1 L",
++ "Headphones 1 R",
++ "Headphones 2 L",
++ "Headphones 2 R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 6, 4 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 8, 8 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 6, 10 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info s18i8_gen3_info = {
++ .has_msd_mode = 1,
++ .config_set = SCARLETT2_CONFIG_SET_GEN_3,
++ .line_out_hw_vol = 1,
++ .has_speaker_switching = 1,
++ .level_input_count = 2,
++ .pad_input_count = 4,
++ .air_input_count = 4,
++ .phantom_count = 2,
++ .inputs_per_phantom = 2,
++
++ .line_out_remap_enable = 1,
++ .line_out_remap = { 0, 1, 6, 7, 2, 3, 4, 5 },
++
++ .line_out_descrs = {
++ "Monitor L",
++ "Monitor R",
++ "Alt Monitor L",
++ "Alt Monitor R",
++ "Headphones 1 L",
++ "Headphones 1 R",
++ "Headphones 2 L",
++ "Headphones 2 R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 8 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
++ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 10, 20 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 8, 20 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
++ { SCARLETT2_PORT_TYPE_PCM, 12, 8 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_PCM, 10, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
++ { SCARLETT2_PORT_TYPE_PCM, 12, 4 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_PCM, 10, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info s18i20_gen3_info = {
++ .has_msd_mode = 1,
++ .config_set = SCARLETT2_CONFIG_SET_GEN_3,
++ .line_out_hw_vol = 1,
++ .has_speaker_switching = 1,
++ .has_talkback = 1,
++ .level_input_count = 2,
++ .pad_input_count = 8,
++ .air_input_count = 8,
++ .phantom_count = 2,
++ .inputs_per_phantom = 4,
++
++ .line_out_descrs = {
++ "Monitor 1 L",
++ "Monitor 1 R",
++ "Monitor 2 L",
++ "Monitor 2 R",
++ NULL,
++ NULL,
++ "Headphones 1 L",
++ "Headphones 1 R",
++ "Headphones 2 L",
++ "Headphones 2 R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 9, 10 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
++ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 12, 25 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 20, 20 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
++ { SCARLETT2_PORT_TYPE_PCM, 10, 10 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
++ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 25 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 12 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
++ { SCARLETT2_PORT_TYPE_PCM, 10, 8 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
++ { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 25 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 24 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info clarett_2pre_info = {
++ .config_set = SCARLETT2_CONFIG_SET_CLARETT,
++ .line_out_hw_vol = 1,
++ .level_input_count = 2,
++ .air_input_count = 2,
++
++ .line_out_descrs = {
++ "Monitor L",
++ "Monitor R",
++ "Headphones L",
++ "Headphones R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 2, 4 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 0 },
++ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 4, 12 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 12 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 26 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info clarett_4pre_info = {
++ .config_set = SCARLETT2_CONFIG_SET_CLARETT,
++ .line_out_hw_vol = 1,
++ .level_input_count = 2,
++ .air_input_count = 4,
++
++ .line_out_descrs = {
++ "Monitor L",
++ "Monitor R",
++ "Headphones 1 L",
++ "Headphones 1 R",
++ "Headphones 2 L",
++ "Headphones 2 R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 6 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
++ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 8, 18 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 12 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 24 },
++ { 0, 0, 0 },
++ } },
++};
++
++static const struct scarlett2_device_info clarett_8pre_info = {
++ .config_set = SCARLETT2_CONFIG_SET_CLARETT,
++ .line_out_hw_vol = 1,
++ .level_input_count = 2,
++ .air_input_count = 8,
++
++ .line_out_descrs = {
++ "Monitor L",
++ "Monitor R",
++ NULL,
++ NULL,
++ NULL,
++ NULL,
++ "Headphones 1 L",
++ "Headphones 1 R",
++ "Headphones 2 L",
++ "Headphones 2 R",
++ },
++
++ .port_count = {
++ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
++ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 10 },
++ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
++ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
++ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
++ [SCARLETT2_PORT_TYPE_PCM] = { 20, 18 },
++ },
++
++ .mux_assignment = { {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_ADAT, 0, 4 },
++ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
++ { 0, 0, 0 },
++ }, {
++ { SCARLETT2_PORT_TYPE_PCM, 0, 12 },
++ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
++ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
++ { SCARLETT2_PORT_TYPE_NONE, 0, 22 },
++ { 0, 0, 0 },
++ } },
++};
++
++struct scarlett2_device_entry {
++ const u32 usb_id; /* USB device identifier */
++ const struct scarlett2_device_info *info;
++ const char *series_name;
++};
++
++static const struct scarlett2_device_entry scarlett2_devices[] = {
++ /* Supported Gen 2 devices */
++ { USB_ID(0x1235, 0x8203), &s6i6_gen2_info, "Scarlett Gen 2" },
++ { USB_ID(0x1235, 0x8204), &s18i8_gen2_info, "Scarlett Gen 2" },
++ { USB_ID(0x1235, 0x8201), &s18i20_gen2_info, "Scarlett Gen 2" },
++
++ /* Supported Gen 3 devices */
++ { USB_ID(0x1235, 0x8211), &solo_gen3_info, "Scarlett Gen 3" },
++ { USB_ID(0x1235, 0x8210), &s2i2_gen3_info, "Scarlett Gen 3" },
++ { USB_ID(0x1235, 0x8212), &s4i4_gen3_info, "Scarlett Gen 3" },
++ { USB_ID(0x1235, 0x8213), &s8i6_gen3_info, "Scarlett Gen 3" },
++ { USB_ID(0x1235, 0x8214), &s18i8_gen3_info, "Scarlett Gen 3" },
++ { USB_ID(0x1235, 0x8215), &s18i20_gen3_info, "Scarlett Gen 3" },
++
++ /* Supported Clarett USB/Clarett+ devices */
++ { USB_ID(0x1235, 0x8206), &clarett_2pre_info, "Clarett USB" },
++ { USB_ID(0x1235, 0x8207), &clarett_4pre_info, "Clarett USB" },
++ { USB_ID(0x1235, 0x8208), &clarett_8pre_info, "Clarett USB" },
++ { USB_ID(0x1235, 0x820a), &clarett_2pre_info, "Clarett+" },
++ { USB_ID(0x1235, 0x820b), &clarett_4pre_info, "Clarett+" },
++ { USB_ID(0x1235, 0x820c), &clarett_8pre_info, "Clarett+" },
++
++ /* End of list */
++ { 0, NULL },
++};
++
++/* get the starting port index number for a given port type/direction */
++static int scarlett2_get_port_start_num(
++ const int port_count[][SCARLETT2_PORT_DIRNS],
++ int direction, int port_type)
++{
++ int i, num = 0;
++
++ for (i = 0; i < port_type; i++)
++ num += port_count[i][direction];
++
++ return num;
++}
++
++/*** USB Interactions ***/
++
++/* Notifications from the interface */
++#define SCARLETT2_USB_NOTIFY_SYNC 0x00000008
++#define SCARLETT2_USB_NOTIFY_DIM_MUTE 0x00200000
++#define SCARLETT2_USB_NOTIFY_MONITOR 0x00400000
++#define SCARLETT2_USB_NOTIFY_INPUT_OTHER 0x00800000
++#define SCARLETT2_USB_NOTIFY_MONITOR_OTHER 0x01000000
++
++/* Commands for sending/receiving requests/responses */
++#define SCARLETT2_USB_CMD_INIT 0
++#define SCARLETT2_USB_CMD_REQ 2
++#define SCARLETT2_USB_CMD_RESP 3
++
++#define SCARLETT2_USB_INIT_1 0x00000000
++#define SCARLETT2_USB_INIT_2 0x00000002
++#define SCARLETT2_USB_GET_METER 0x00001001
++#define SCARLETT2_USB_GET_MIX 0x00002001
++#define SCARLETT2_USB_SET_MIX 0x00002002
++#define SCARLETT2_USB_GET_MUX 0x00003001
++#define SCARLETT2_USB_SET_MUX 0x00003002
++#define SCARLETT2_USB_GET_SYNC 0x00006004
++#define SCARLETT2_USB_GET_DATA 0x00800000
++#define SCARLETT2_USB_SET_DATA 0x00800001
++#define SCARLETT2_USB_DATA_CMD 0x00800002
++
++#define SCARLETT2_USB_CONFIG_SAVE 6
++
++#define SCARLETT2_USB_VOLUME_STATUS_OFFSET 0x31
++#define SCARLETT2_USB_METER_LEVELS_GET_MAGIC 1
++
++/* volume status is read together (matches scarlett2_config_items[1]) */
++struct scarlett2_usb_volume_status {
++ /* dim/mute buttons */
++ u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
++
++ u8 pad1;
++
++ /* software volume setting */
++ s16 sw_vol[SCARLETT2_ANALOGUE_MAX];
++
++ /* actual volume of output inc. dim (-18dB) */
++ s16 hw_vol[SCARLETT2_ANALOGUE_MAX];
++
++ /* internal mute buttons */
++ u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
++
++ /* sw (0) or hw (1) controlled */
++ u8 sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
++
++ u8 pad3[6];
++
++ /* front panel volume knob */
++ s16 master_vol;
++} __packed;
++
++/* Configuration parameters that can be read and written */
++enum {
++ SCARLETT2_CONFIG_DIM_MUTE = 0,
++ SCARLETT2_CONFIG_LINE_OUT_VOLUME = 1,
++ SCARLETT2_CONFIG_MUTE_SWITCH = 2,
++ SCARLETT2_CONFIG_SW_HW_SWITCH = 3,
++ SCARLETT2_CONFIG_LEVEL_SWITCH = 4,
++ SCARLETT2_CONFIG_PAD_SWITCH = 5,
++ SCARLETT2_CONFIG_MSD_SWITCH = 6,
++ SCARLETT2_CONFIG_AIR_SWITCH = 7,
++ SCARLETT2_CONFIG_STANDALONE_SWITCH = 8,
++ SCARLETT2_CONFIG_PHANTOM_SWITCH = 9,
++ SCARLETT2_CONFIG_PHANTOM_PERSISTENCE = 10,
++ SCARLETT2_CONFIG_DIRECT_MONITOR = 11,
++ SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH = 12,
++ SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE = 13,
++ SCARLETT2_CONFIG_TALKBACK_MAP = 14,
++ SCARLETT2_CONFIG_COUNT = 15
++};
++
++/* Location, size, and activation command number for the configuration
++ * parameters. Size is in bits and may be 1, 8, or 16.
++ */
++struct scarlett2_config {
++ u8 offset;
++ u8 size;
++ u8 activate;
++};
++
++static const struct scarlett2_config
++ scarlett2_config_items[SCARLETT2_CONFIG_SET_COUNT]
++ [SCARLETT2_CONFIG_COUNT] =
++
++/* Devices without a mixer (Gen 3 Solo and 2i2) */
++{ {
++ [SCARLETT2_CONFIG_MSD_SWITCH] = {
++ .offset = 0x04, .size = 8, .activate = 6 },
++
++ [SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
++ .offset = 0x05, .size = 8, .activate = 6 },
++
++ [SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
++ .offset = 0x06, .size = 8, .activate = 3 },
++
++ [SCARLETT2_CONFIG_DIRECT_MONITOR] = {
++ .offset = 0x07, .size = 8, .activate = 4 },
++
++ [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
++ .offset = 0x08, .size = 1, .activate = 7 },
++
++ [SCARLETT2_CONFIG_AIR_SWITCH] = {
++ .offset = 0x09, .size = 1, .activate = 8 },
++
++/* Gen 2 devices: 6i6, 18i8, 18i20 */
++}, {
++ [SCARLETT2_CONFIG_DIM_MUTE] = {
++ .offset = 0x31, .size = 8, .activate = 2 },
++
++ [SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
++ .offset = 0x34, .size = 16, .activate = 1 },
++
++ [SCARLETT2_CONFIG_MUTE_SWITCH] = {
++ .offset = 0x5c, .size = 8, .activate = 1 },
++
++ [SCARLETT2_CONFIG_SW_HW_SWITCH] = {
++ .offset = 0x66, .size = 8, .activate = 3 },
++
++ [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
++ .offset = 0x7c, .size = 8, .activate = 7 },
++
++ [SCARLETT2_CONFIG_PAD_SWITCH] = {
++ .offset = 0x84, .size = 8, .activate = 8 },
++
++ [SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
++ .offset = 0x8d, .size = 8, .activate = 6 },
++
++/* Gen 3 devices: 4i4, 8i6, 18i8, 18i20 */
++}, {
++ [SCARLETT2_CONFIG_DIM_MUTE] = {
++ .offset = 0x31, .size = 8, .activate = 2 },
++
++ [SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
++ .offset = 0x34, .size = 16, .activate = 1 },
++
++ [SCARLETT2_CONFIG_MUTE_SWITCH] = {
++ .offset = 0x5c, .size = 8, .activate = 1 },
++
++ [SCARLETT2_CONFIG_SW_HW_SWITCH] = {
++ .offset = 0x66, .size = 8, .activate = 3 },
++
++ [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
++ .offset = 0x7c, .size = 8, .activate = 7 },
++
++ [SCARLETT2_CONFIG_PAD_SWITCH] = {
++ .offset = 0x84, .size = 8, .activate = 8 },
++
++ [SCARLETT2_CONFIG_AIR_SWITCH] = {
++ .offset = 0x8c, .size = 8, .activate = 8 },
++
++ [SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
++ .offset = 0x95, .size = 8, .activate = 6 },
++
++ [SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
++ .offset = 0x9c, .size = 1, .activate = 8 },
++
++ [SCARLETT2_CONFIG_MSD_SWITCH] = {
++ .offset = 0x9d, .size = 8, .activate = 6 },
++
++ [SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
++ .offset = 0x9e, .size = 8, .activate = 6 },
++
++ [SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH] = {
++ .offset = 0x9f, .size = 1, .activate = 10 },
++
++ [SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE] = {
++ .offset = 0xa0, .size = 1, .activate = 10 },
++
++ [SCARLETT2_CONFIG_TALKBACK_MAP] = {
++ .offset = 0xb0, .size = 16, .activate = 10 },
++
++/* Clarett USB and Clarett+ devices: 2Pre, 4Pre, 8Pre */
++}, {
++ [SCARLETT2_CONFIG_DIM_MUTE] = {
++ .offset = 0x31, .size = 8, .activate = 2 },
++
++ [SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
++ .offset = 0x34, .size = 16, .activate = 1 },
++
++ [SCARLETT2_CONFIG_MUTE_SWITCH] = {
++ .offset = 0x5c, .size = 8, .activate = 1 },
++
++ [SCARLETT2_CONFIG_SW_HW_SWITCH] = {
++ .offset = 0x66, .size = 8, .activate = 3 },
++
++ [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
++ .offset = 0x7c, .size = 8, .activate = 7 },
++
++ [SCARLETT2_CONFIG_AIR_SWITCH] = {
++ .offset = 0x95, .size = 8, .activate = 8 },
++
++ [SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
++ .offset = 0x8d, .size = 8, .activate = 6 },
++} };
++
++/* proprietary request/response format */
++struct scarlett2_usb_packet {
++ __le32 cmd;
++ __le16 size;
++ __le16 seq;
++ __le32 error;
++ __le32 pad;
++ u8 data[];
++};
++
++static void scarlett2_fill_request_header(struct scarlett2_data *private,
++ struct scarlett2_usb_packet *req,
++ u32 cmd, u16 req_size)
++{
++ /* sequence must go up by 1 for each request */
++ u16 seq = private->scarlett2_seq++;
++
++ req->cmd = cpu_to_le32(cmd);
++ req->size = cpu_to_le16(req_size);
++ req->seq = cpu_to_le16(seq);
++ req->error = 0;
++ req->pad = 0;
++}
++
++static int scarlett2_usb_tx(struct usb_device *dev, int interface,
++ void *buf, u16 size)
++{
++ return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
++ SCARLETT2_USB_CMD_REQ,
++ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
++ 0, interface, buf, size);
++}
++
++static int scarlett2_usb_rx(struct usb_device *dev, int interface,
++ u32 usb_req, void *buf, u16 size)
++{
++ return snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
++ usb_req,
++ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
++ 0, interface, buf, size);
++}
++
++/* Send a proprietary format request to the Scarlett interface */
++static int scarlett2_usb(
++ struct usb_mixer_interface *mixer, u32 cmd,
++ void *req_data, u16 req_size, void *resp_data, u16 resp_size)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ struct usb_device *dev = mixer->chip->dev;
++ struct scarlett2_usb_packet *req, *resp = NULL;
++ size_t req_buf_size = struct_size(req, data, req_size);
++ size_t resp_buf_size = struct_size(resp, data, resp_size);
++ int err;
++
++ req = kmalloc(req_buf_size, GFP_KERNEL);
++ if (!req) {
++ err = -ENOMEM;
++ goto error;
++ }
++
++ resp = kmalloc(resp_buf_size, GFP_KERNEL);
++ if (!resp) {
++ err = -ENOMEM;
++ goto error;
++ }
++
++ mutex_lock(&private->usb_mutex);
++
++ /* build request message and send it */
++
++ scarlett2_fill_request_header(private, req, cmd, req_size);
++
++ if (req_size)
++ memcpy(req->data, req_data, req_size);
++
++ err = scarlett2_usb_tx(dev, private->bInterfaceNumber,
++ req, req_buf_size);
++
++ if (err != req_buf_size) {
++ usb_audio_err(
++ mixer->chip,
++ "%s USB request result cmd %x was %d\n",
++ private->series_name, cmd, err);
++ err = -EINVAL;
++ goto unlock;
++ }
++
++ /* send a second message to get the response */
++
++ err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
++ SCARLETT2_USB_CMD_RESP,
++ resp, resp_buf_size);
++
++ /* validate the response */
++
++ if (err != resp_buf_size) {
++ usb_audio_err(
++ mixer->chip,
++ "%s USB response result cmd %x was %d expected %zu\n",
++ private->series_name, cmd, err, resp_buf_size);
++ err = -EINVAL;
++ goto unlock;
++ }
++
++ /* cmd/seq/size should match except when initialising
++ * seq sent = 1, response = 0
++ */
++ if (resp->cmd != req->cmd ||
++ (resp->seq != req->seq &&
++ (le16_to_cpu(req->seq) != 1 || resp->seq != 0)) ||
++ resp_size != le16_to_cpu(resp->size) ||
++ resp->error ||
++ resp->pad) {
++ usb_audio_err(
++ mixer->chip,
++ "%s USB invalid response; "
++ "cmd tx/rx %d/%d seq %d/%d size %d/%d "
++ "error %d pad %d\n",
++ private->series_name,
++ le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
++ le16_to_cpu(req->seq), le16_to_cpu(resp->seq),
++ resp_size, le16_to_cpu(resp->size),
++ le32_to_cpu(resp->error),
++ le32_to_cpu(resp->pad));
++ err = -EINVAL;
++ goto unlock;
++ }
++
++ if (resp_data && resp_size > 0)
++ memcpy(resp_data, resp->data, resp_size);
++
++unlock:
++ mutex_unlock(&private->usb_mutex);
++error:
++ kfree(req);
++ kfree(resp);
++ return err;
++}
++
++/* Send a USB message to get data; result placed in *buf */
++static int scarlett2_usb_get(
++ struct usb_mixer_interface *mixer,
++ int offset, void *buf, int size)
++{
++ struct {
++ __le32 offset;
++ __le32 size;
++ } __packed req;
++
++ req.offset = cpu_to_le32(offset);
++ req.size = cpu_to_le32(size);
++ return scarlett2_usb(mixer, SCARLETT2_USB_GET_DATA,
++ &req, sizeof(req), buf, size);
++}
++
++/* Send a USB message to get configuration parameters; result placed in *buf */
++static int scarlett2_usb_get_config(
++ struct usb_mixer_interface *mixer,
++ int config_item_num, int count, void *buf)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const struct scarlett2_config *config_item =
++ &scarlett2_config_items[info->config_set][config_item_num];
++ int size, err, i;
++ u8 *buf_8;
++ u8 value;
++
++ /* For byte-sized parameters, retrieve directly into buf */
++ if (config_item->size >= 8) {
++ size = config_item->size / 8 * count;
++ err = scarlett2_usb_get(mixer, config_item->offset, buf, size);
++ if (err < 0)
++ return err;
++ if (size == 2) {
++ u16 *buf_16 = buf;
++
++ for (i = 0; i < count; i++, buf_16++)
++ *buf_16 = le16_to_cpu(*(__le16 *)buf_16);
++ }
++ return 0;
++ }
++
++ /* For bit-sized parameters, retrieve into value */
++ err = scarlett2_usb_get(mixer, config_item->offset, &value, 1);
++ if (err < 0)
++ return err;
++
++ /* then unpack from value into buf[] */
++ buf_8 = buf;
++ for (i = 0; i < 8 && i < count; i++, value >>= 1)
++ *buf_8++ = value & 1;
++
++ return 0;
++}
++
++/* Send SCARLETT2_USB_DATA_CMD SCARLETT2_USB_CONFIG_SAVE */
++static void scarlett2_config_save(struct usb_mixer_interface *mixer)
++{
++ __le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
++
++ int err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
++ &req, sizeof(u32),
++ NULL, 0);
++ if (err < 0)
++ usb_audio_err(mixer->chip, "config save failed: %d\n", err);
++}
++
++/* Delayed work to save config */
++static void scarlett2_config_save_work(struct work_struct *work)
++{
++ struct scarlett2_data *private =
++ container_of(work, struct scarlett2_data, work.work);
++
++ scarlett2_config_save(private->mixer);
++}
++
++/* Send a USB message to set a SCARLETT2_CONFIG_* parameter */
++static int scarlett2_usb_set_config(
++ struct usb_mixer_interface *mixer,
++ int config_item_num, int index, int value)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const struct scarlett2_config *config_item =
++ &scarlett2_config_items[info->config_set][config_item_num];
++ struct {
++ __le32 offset;
++ __le32 bytes;
++ __le32 value;
++ } __packed req;
++ __le32 req2;
++ int offset, size;
++ int err;
++
++ /* Cancel any pending NVRAM save */
++ cancel_delayed_work_sync(&private->work);
++
++ /* Convert config_item->size in bits to size in bytes and
++ * calculate offset
++ */
++ if (config_item->size >= 8) {
++ size = config_item->size / 8;
++ offset = config_item->offset + index * size;
++
++ /* If updating a bit, retrieve the old value, set/clear the
++ * bit as needed, and update value
++ */
++ } else {
++ u8 tmp;
++
++ size = 1;
++ offset = config_item->offset;
++
++ err = scarlett2_usb_get(mixer, offset, &tmp, 1);
++ if (err < 0)
++ return err;
++
++ if (value)
++ tmp |= (1 << index);
++ else
++ tmp &= ~(1 << index);
++
++ value = tmp;
++ }
++
++ /* Send the configuration parameter data */
++ req.offset = cpu_to_le32(offset);
++ req.bytes = cpu_to_le32(size);
++ req.value = cpu_to_le32(value);
++ err = scarlett2_usb(mixer, SCARLETT2_USB_SET_DATA,
++ &req, sizeof(u32) * 2 + size,
++ NULL, 0);
++ if (err < 0)
++ return err;
++
++ /* Activate the change */
++ req2 = cpu_to_le32(config_item->activate);
++ err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
++ &req2, sizeof(req2), NULL, 0);
++ if (err < 0)
++ return err;
++
++ /* Schedule the change to be written to NVRAM */
++ if (config_item->activate != SCARLETT2_USB_CONFIG_SAVE)
++ schedule_delayed_work(&private->work, msecs_to_jiffies(2000));
++
++ return 0;
++}
++
++/* Send a USB message to get sync status; result placed in *sync */
++static int scarlett2_usb_get_sync_status(
++ struct usb_mixer_interface *mixer,
++ u8 *sync)
++{
++ __le32 data;
++ int err;
++
++ err = scarlett2_usb(mixer, SCARLETT2_USB_GET_SYNC,
++ NULL, 0, &data, sizeof(data));
++ if (err < 0)
++ return err;
++
++ *sync = !!data;
++ return 0;
++}
++
++/* Send a USB message to get volume status; result placed in *buf */
++static int scarlett2_usb_get_volume_status(
++ struct usb_mixer_interface *mixer,
++ struct scarlett2_usb_volume_status *buf)
++{
++ return scarlett2_usb_get(mixer, SCARLETT2_USB_VOLUME_STATUS_OFFSET,
++ buf, sizeof(*buf));
++}
++
++/* Send a USB message to get the volumes for all inputs of one mix
++ * and put the values into private->mix[]
++ */
++static int scarlett2_usb_get_mix(struct usb_mixer_interface *mixer,
++ int mix_num)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++
++ int num_mixer_in =
++ info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
++ int err, i, j, k;
++
++ struct {
++ __le16 mix_num;
++ __le16 count;
++ } __packed req;
++
++ __le16 data[SCARLETT2_INPUT_MIX_MAX];
++
++ req.mix_num = cpu_to_le16(mix_num);
++ req.count = cpu_to_le16(num_mixer_in);
++
++ err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MIX,
++ &req, sizeof(req),
++ data, num_mixer_in * sizeof(u16));
++ if (err < 0)
++ return err;
++
++ for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++) {
++ u16 mixer_value = le16_to_cpu(data[i]);
++
++ for (k = 0; k < SCARLETT2_MIXER_VALUE_COUNT; k++)
++ if (scarlett2_mixer_values[k] >= mixer_value)
++ break;
++ if (k == SCARLETT2_MIXER_VALUE_COUNT)
++ k = SCARLETT2_MIXER_MAX_VALUE;
++ private->mix[j] = k;
++ }
++
++ return 0;
++}
++
++/* Send a USB message to set the volumes for all inputs of one mix
++ * (values obtained from private->mix[])
++ */
++static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
++ int mix_num)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++
++ struct {
++ __le16 mix_num;
++ __le16 data[SCARLETT2_INPUT_MIX_MAX];
++ } __packed req;
++
++ int i, j;
++ int num_mixer_in =
++ info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
++
++ req.mix_num = cpu_to_le16(mix_num);
++
++ for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++)
++ req.data[i] = cpu_to_le16(
++ scarlett2_mixer_values[private->mix[j]]
++ );
++
++ return scarlett2_usb(mixer, SCARLETT2_USB_SET_MIX,
++ &req, (num_mixer_in + 1) * sizeof(u16),
++ NULL, 0);
++}
++
++/* Convert a port number index (per info->port_count) to a hardware ID */
++static u32 scarlett2_mux_src_num_to_id(
++ const int port_count[][SCARLETT2_PORT_DIRNS], int num)
++{
++ int port_type;
++
++ for (port_type = 0;
++ port_type < SCARLETT2_PORT_TYPE_COUNT;
++ port_type++) {
++ if (num < port_count[port_type][SCARLETT2_PORT_IN])
++ return scarlett2_ports[port_type].id | num;
++ num -= port_count[port_type][SCARLETT2_PORT_IN];
++ }
++
++ /* Oops */
++ return 0;
++}
++
++/* Convert a hardware ID to a port number index */
++static u32 scarlett2_mux_id_to_num(
++ const int port_count[][SCARLETT2_PORT_DIRNS], int direction, u32 id)
++{
++ int port_type;
++ int port_num = 0;
++
++ for (port_type = 0;
++ port_type < SCARLETT2_PORT_TYPE_COUNT;
++ port_type++) {
++ int base = scarlett2_ports[port_type].id;
++ int count = port_count[port_type][direction];
++
++ if (id >= base && id < base + count)
++ return port_num + id - base;
++ port_num += count;
++ }
++
++ /* Oops */
++ return -1;
++}
++
++/* Convert one mux entry from the interface and load into private->mux[] */
++static void scarlett2_usb_populate_mux(struct scarlett2_data *private,
++ u32 mux_entry)
++{
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++
++ int dst_idx, src_idx;
++
++ dst_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_OUT,
++ mux_entry & 0xFFF);
++ if (dst_idx < 0)
++ return;
++
++ if (dst_idx >= private->num_mux_dsts) {
++ usb_audio_err(private->mixer->chip,
++ "BUG: scarlett2_mux_id_to_num(%06x, OUT): %d >= %d",
++ mux_entry, dst_idx, private->num_mux_dsts);
++ return;
++ }
++
++ src_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_IN,
++ mux_entry >> 12);
++ if (src_idx < 0)
++ return;
++
++ if (src_idx >= private->num_mux_srcs) {
++ usb_audio_err(private->mixer->chip,
++ "BUG: scarlett2_mux_id_to_num(%06x, IN): %d >= %d",
++ mux_entry, src_idx, private->num_mux_srcs);
++ return;
++ }
++
++ private->mux[dst_idx] = src_idx;
++}
++
++/* Send USB message to get mux inputs and then populate private->mux[] */
++static int scarlett2_usb_get_mux(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ int count = private->num_mux_dsts;
++ int err, i;
++
++ struct {
++ __le16 num;
++ __le16 count;
++ } __packed req;
++
++ __le32 data[SCARLETT2_MUX_MAX];
++
++ private->mux_updated = 0;
++
++ req.num = 0;
++ req.count = cpu_to_le16(count);
++
++ err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MUX,
++ &req, sizeof(req),
++ data, count * sizeof(u32));
++ if (err < 0)
++ return err;
++
++ for (i = 0; i < count; i++)
++ scarlett2_usb_populate_mux(private, le32_to_cpu(data[i]));
++
++ return 0;
++}
++
++/* Send USB messages to set mux inputs */
++static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int table;
++
++ struct {
++ __le16 pad;
++ __le16 num;
++ __le32 data[SCARLETT2_MUX_MAX];
++ } __packed req;
++
++ req.pad = 0;
++
++ /* set mux settings for each rate */
++ for (table = 0; table < SCARLETT2_MUX_TABLES; table++) {
++ const struct scarlett2_mux_entry *entry;
++
++ /* i counts over the output array */
++ int i = 0, err;
++
++ req.num = cpu_to_le16(table);
++
++ /* loop through each entry */
++ for (entry = info->mux_assignment[table];
++ entry->count;
++ entry++) {
++ int j;
++ int port_type = entry->port_type;
++ int port_idx = entry->start;
++ int mux_idx = scarlett2_get_port_start_num(port_count,
++ SCARLETT2_PORT_OUT, port_type) + port_idx;
++ int dst_id = scarlett2_ports[port_type].id + port_idx;
++
++ /* Empty slots */
++ if (!dst_id) {
++ for (j = 0; j < entry->count; j++)
++ req.data[i++] = 0;
++ continue;
++ }
++
++ /* Non-empty mux slots use the lower 12 bits
++ * for the destination and next 12 bits for
++ * the source
++ */
++ for (j = 0; j < entry->count; j++) {
++ int src_id = scarlett2_mux_src_num_to_id(
++ port_count, private->mux[mux_idx++]);
++ req.data[i++] = cpu_to_le32(dst_id |
++ src_id << 12);
++ dst_id++;
++ }
++ }
++
++ err = scarlett2_usb(mixer, SCARLETT2_USB_SET_MUX,
++ &req, (i + 1) * sizeof(u32),
++ NULL, 0);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++
++/* Send USB message to get meter levels */
++static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
++ u16 num_meters, u16 *levels)
++{
++ struct {
++ __le16 pad;
++ __le16 num_meters;
++ __le32 magic;
++ } __packed req;
++ u32 resp[SCARLETT2_MAX_METERS];
++ int i, err;
++
++ req.pad = 0;
++ req.num_meters = cpu_to_le16(num_meters);
++ req.magic = cpu_to_le32(SCARLETT2_USB_METER_LEVELS_GET_MAGIC);
++ err = scarlett2_usb(mixer, SCARLETT2_USB_GET_METER,
++ &req, sizeof(req), resp, num_meters * sizeof(u32));
++ if (err < 0)
++ return err;
++
++ /* copy, convert to u16 */
++ for (i = 0; i < num_meters; i++)
++ levels[i] = resp[i];
++
++ return 0;
++}
++
++/*** Control Functions ***/
++
++/* helper function to create a new control */
++static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
++ const struct snd_kcontrol_new *ncontrol,
++ int index, int channels, const char *name,
++ struct snd_kcontrol **kctl_return)
++{
++ struct snd_kcontrol *kctl;
++ struct usb_mixer_elem_info *elem;
++ int err;
++
++ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
++ if (!elem)
++ return -ENOMEM;
++
++ /* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
++ * ignores them for resume and other operations.
++ * Also, the head.id field is set to 0, as we don't use this field.
++ */
++ elem->head.mixer = mixer;
++ elem->control = index;
++ elem->head.id = 0;
++ elem->channels = channels;
++ elem->val_type = USB_MIXER_BESPOKEN;
++
++ kctl = snd_ctl_new1(ncontrol, elem);
++ if (!kctl) {
++ kfree(elem);
++ return -ENOMEM;
++ }
++ kctl->private_free = snd_usb_mixer_elem_free;
++
++ strscpy(kctl->id.name, name, sizeof(kctl->id.name));
++
++ err = snd_usb_mixer_add_control(&elem->head, kctl);
++ if (err < 0)
++ return err;
++
++ if (kctl_return)
++ *kctl_return = kctl;
++
++ return 0;
++}
++
++/*** Sync Control ***/
++
++/* Update sync control after receiving notification that the status
++ * has changed
++ */
++static int scarlett2_update_sync(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++
++ private->sync_updated = 0;
++ return scarlett2_usb_get_sync_status(mixer, &private->sync);
++}
++
++static int scarlett2_sync_ctl_info(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_info *uinfo)
++{
++ static const char *texts[2] = {
++ "Unlocked", "Locked"
++ };
++ return snd_ctl_enum_info(uinfo, 1, 2, texts);
++}
++
++static int scarlett2_sync_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->sync_updated) {
++ err = scarlett2_update_sync(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.enumerated.item[0] = private->sync;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_sync_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .access = SNDRV_CTL_ELEM_ACCESS_READ,
++ .name = "",
++ .info = scarlett2_sync_ctl_info,
++ .get = scarlett2_sync_ctl_get
++};
++
++static int scarlett2_add_sync_ctl(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++
++ /* devices without a mixer also don't support reporting sync status */
++ if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
++ return 0;
++
++ return scarlett2_add_new_ctl(mixer, &scarlett2_sync_ctl,
++ 0, 1, "Sync Status", &private->sync_ctl);
++}
++
++/*** Analogue Line Out Volume Controls ***/
++
++/* Update hardware volume controls after receiving notification that
++ * they have changed
++ */
++static int scarlett2_update_volumes(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ struct scarlett2_usb_volume_status volume_status;
++ int num_line_out =
++ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++ int err, i;
++ int mute;
++
++ private->vol_updated = 0;
++
++ err = scarlett2_usb_get_volume_status(mixer, &volume_status);
++ if (err < 0)
++ return err;
++
++ private->master_vol = clamp(
++ volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
++ 0, SCARLETT2_VOLUME_BIAS);
++
++ if (info->line_out_hw_vol)
++ for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
++ private->dim_mute[i] = !!volume_status.dim_mute[i];
++
++ mute = private->dim_mute[SCARLETT2_BUTTON_MUTE];
++
++ for (i = 0; i < num_line_out; i++)
++ if (private->vol_sw_hw_switch[i]) {
++ private->vol[i] = private->master_vol;
++ private->mute_switch[i] = mute;
++ }
++
++ return 0;
++}
++
++static int scarlett2_volume_ctl_info(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_info *uinfo)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++ uinfo->count = elem->channels;
++ uinfo->value.integer.min = 0;
++ uinfo->value.integer.max = SCARLETT2_VOLUME_BIAS;
++ uinfo->value.integer.step = 1;
++ return 0;
++}
++
++static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->vol_updated) {
++ err = scarlett2_update_volumes(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.integer.value[0] = private->master_vol;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int line_out_remap(struct scarlett2_data *private, int index)
++{
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int line_out_count =
++ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++
++ if (!info->line_out_remap_enable)
++ return index;
++
++ if (index >= line_out_count)
++ return index;
++
++ return info->line_out_remap[index];
++}
++
++static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int index = line_out_remap(private, elem->control);
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->vol_updated) {
++ err = scarlett2_update_volumes(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.integer.value[0] = private->vol[index];
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_volume_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int index = line_out_remap(private, elem->control);
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->vol[index];
++ val = ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->vol[index] = val;
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
++ index, val - SCARLETT2_VOLUME_BIAS);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const DECLARE_TLV_DB_MINMAX(
++ db_scale_scarlett2_gain, -SCARLETT2_VOLUME_BIAS * 100, 0
++);
++
++static const struct snd_kcontrol_new scarlett2_master_volume_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .access = SNDRV_CTL_ELEM_ACCESS_READ |
++ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
++ .name = "",
++ .info = scarlett2_volume_ctl_info,
++ .get = scarlett2_master_volume_ctl_get,
++ .private_value = 0, /* max value */
++ .tlv = { .p = db_scale_scarlett2_gain }
++};
++
++static const struct snd_kcontrol_new scarlett2_line_out_volume_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
++ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
++ .name = "",
++ .info = scarlett2_volume_ctl_info,
++ .get = scarlett2_volume_ctl_get,
++ .put = scarlett2_volume_ctl_put,
++ .private_value = 0, /* max value */
++ .tlv = { .p = db_scale_scarlett2_gain }
++};
++
++/*** Mute Switch Controls ***/
++
++static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int index = line_out_remap(private, elem->control);
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->vol_updated) {
++ err = scarlett2_update_volumes(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.integer.value[0] = private->mute_switch[index];
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_mute_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int index = line_out_remap(private, elem->control);
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->mute_switch[index];
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->mute_switch[index] = val;
++
++ /* Send mute change to the device */
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
++ index, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_mute_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_mute_ctl_get,
++ .put = scarlett2_mute_ctl_put,
++};
++
++/*** HW/SW Volume Switch Controls ***/
++
++static void scarlett2_sw_hw_ctl_ro(struct scarlett2_data *private, int index)
++{
++ private->sw_hw_ctls[index]->vd[0].access &=
++ ~SNDRV_CTL_ELEM_ACCESS_WRITE;
++}
++
++static void scarlett2_sw_hw_ctl_rw(struct scarlett2_data *private, int index)
++{
++ private->sw_hw_ctls[index]->vd[0].access |=
++ SNDRV_CTL_ELEM_ACCESS_WRITE;
++}
++
++static int scarlett2_sw_hw_enum_ctl_info(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_info *uinfo)
++{
++ static const char *const values[2] = {
++ "SW", "HW"
++ };
++
++ return snd_ctl_enum_info(uinfo, 1, 2, values);
++}
++
++static int scarlett2_sw_hw_enum_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct scarlett2_data *private = elem->head.mixer->private_data;
++ int index = line_out_remap(private, elem->control);
++
++ ucontrol->value.enumerated.item[0] = private->vol_sw_hw_switch[index];
++ return 0;
++}
++
++static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer,
++ int index, int value)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ struct snd_card *card = mixer->chip->card;
++
++ /* Set/Clear write bits */
++ if (value) {
++ private->vol_ctls[index]->vd[0].access |=
++ SNDRV_CTL_ELEM_ACCESS_WRITE;
++ private->mute_ctls[index]->vd[0].access |=
++ SNDRV_CTL_ELEM_ACCESS_WRITE;
++ } else {
++ private->vol_ctls[index]->vd[0].access &=
++ ~SNDRV_CTL_ELEM_ACCESS_WRITE;
++ private->mute_ctls[index]->vd[0].access &=
++ ~SNDRV_CTL_ELEM_ACCESS_WRITE;
++ }
++
++ /* Notify of write bit and possible value change */
++ snd_ctl_notify(card,
++ SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
++ &private->vol_ctls[index]->id);
++ snd_ctl_notify(card,
++ SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
++ &private->mute_ctls[index]->id);
++}
++
++static int scarlett2_sw_hw_change(struct usb_mixer_interface *mixer,
++ int ctl_index, int val)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ int index = line_out_remap(private, ctl_index);
++ int err;
++
++ private->vol_sw_hw_switch[index] = val;
++
++ /* Change access mode to RO (hardware controlled volume)
++ * or RW (software controlled volume)
++ */
++ scarlett2_vol_ctl_set_writable(mixer, ctl_index, !val);
++
++ /* Reset volume/mute to master volume/mute */
++ private->vol[index] = private->master_vol;
++ private->mute_switch[index] = private->dim_mute[SCARLETT2_BUTTON_MUTE];
++
++ /* Set SW volume to current HW volume */
++ err = scarlett2_usb_set_config(
++ mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
++ index, private->master_vol - SCARLETT2_VOLUME_BIAS);
++ if (err < 0)
++ return err;
++
++ /* Set SW mute to current HW mute */
++ err = scarlett2_usb_set_config(
++ mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
++ index, private->dim_mute[SCARLETT2_BUTTON_MUTE]);
++ if (err < 0)
++ return err;
++
++ /* Send SW/HW switch change to the device */
++ return scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_SW_HW_SWITCH,
++ index, val);
++}
++
++static int scarlett2_sw_hw_enum_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int ctl_index = elem->control;
++ int index = line_out_remap(private, ctl_index);
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->vol_sw_hw_switch[index];
++ val = !!ucontrol->value.enumerated.item[0];
++
++ if (oval == val)
++ goto unlock;
++
++ err = scarlett2_sw_hw_change(mixer, ctl_index, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_sw_hw_enum_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = scarlett2_sw_hw_enum_ctl_info,
++ .get = scarlett2_sw_hw_enum_ctl_get,
++ .put = scarlett2_sw_hw_enum_ctl_put,
++};
++
++/*** Line Level/Instrument Level Switch Controls ***/
++
++static int scarlett2_update_input_other(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++
++ private->input_other_updated = 0;
++
++ if (info->level_input_count) {
++ int err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
++ info->level_input_count + info->level_input_first,
++ private->level_switch);
++ if (err < 0)
++ return err;
++ }
++
++ if (info->pad_input_count) {
++ int err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_PAD_SWITCH,
++ info->pad_input_count, private->pad_switch);
++ if (err < 0)
++ return err;
++ }
++
++ if (info->air_input_count) {
++ int err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_AIR_SWITCH,
++ info->air_input_count, private->air_switch);
++ if (err < 0)
++ return err;
++ }
++
++ if (info->phantom_count) {
++ int err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
++ info->phantom_count, private->phantom_switch);
++ if (err < 0)
++ return err;
++
++ err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE,
++ 1, &private->phantom_persistence);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++
++static int scarlett2_level_enum_ctl_info(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_info *uinfo)
++{
++ static const char *const values[2] = {
++ "Line", "Inst"
++ };
++
++ return snd_ctl_enum_info(uinfo, 1, 2, values);
++}
++
++static int scarlett2_level_enum_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++
++ int index = elem->control + info->level_input_first;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->input_other_updated) {
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.enumerated.item[0] = private->level_switch[index];
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++
++ int index = elem->control + info->level_input_first;
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->level_switch[index];
++ val = !!ucontrol->value.enumerated.item[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->level_switch[index] = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
++ index, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_level_enum_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = scarlett2_level_enum_ctl_info,
++ .get = scarlett2_level_enum_ctl_get,
++ .put = scarlett2_level_enum_ctl_put,
++};
++
++/*** Pad Switch Controls ***/
++
++static int scarlett2_pad_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->input_other_updated) {
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.integer.value[0] =
++ private->pad_switch[elem->control];
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int index = elem->control;
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->pad_switch[index];
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->pad_switch[index] = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PAD_SWITCH,
++ index, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_pad_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_pad_ctl_get,
++ .put = scarlett2_pad_ctl_put,
++};
++
++/*** Air Switch Controls ***/
++
++static int scarlett2_air_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->input_other_updated) {
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.integer.value[0] = private->air_switch[elem->control];
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_air_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int index = elem->control;
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->air_switch[index];
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->air_switch[index] = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_AIR_SWITCH,
++ index, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_air_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_air_ctl_get,
++ .put = scarlett2_air_ctl_put,
++};
++
++/*** Phantom Switch Controls ***/
++
++static int scarlett2_phantom_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->input_other_updated) {
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.integer.value[0] =
++ private->phantom_switch[elem->control];
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_phantom_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int index = elem->control;
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->phantom_switch[index];
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->phantom_switch[index] = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
++ index, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_phantom_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_phantom_ctl_get,
++ .put = scarlett2_phantom_ctl_put,
++};
++
++/*** Phantom Persistence Control ***/
++
++static int scarlett2_phantom_persistence_ctl_get(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct scarlett2_data *private = elem->head.mixer->private_data;
++
++ ucontrol->value.integer.value[0] = private->phantom_persistence;
++ return 0;
++}
++
++static int scarlett2_phantom_persistence_ctl_put(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int index = elem->control;
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->phantom_persistence;
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->phantom_persistence = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(
++ mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE, index, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_phantom_persistence_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_phantom_persistence_ctl_get,
++ .put = scarlett2_phantom_persistence_ctl_put,
++};
++
++/*** Direct Monitor Control ***/
++
++static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ int err;
++
++ /* monitor_other_enable[0] enables speaker switching
++ * monitor_other_enable[1] enables talkback
++ */
++ u8 monitor_other_enable[2];
++
++ /* monitor_other_switch[0] activates the alternate speakers
++ * monitor_other_switch[1] activates talkback
++ */
++ u8 monitor_other_switch[2];
++
++ private->monitor_other_updated = 0;
++
++ if (info->direct_monitor)
++ return scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_DIRECT_MONITOR,
++ 1, &private->direct_monitor_switch);
++
++ /* if it doesn't do speaker switching then it also doesn't do
++ * talkback
++ */
++ if (!info->has_speaker_switching)
++ return 0;
++
++ err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
++ 2, monitor_other_enable);
++ if (err < 0)
++ return err;
++
++ err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
++ 2, monitor_other_switch);
++ if (err < 0)
++ return err;
++
++ if (!monitor_other_enable[0])
++ private->speaker_switching_switch = 0;
++ else
++ private->speaker_switching_switch = monitor_other_switch[0] + 1;
++
++ if (info->has_talkback) {
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] =
++ info->port_count;
++ int num_mixes =
++ port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++ u16 bitmap;
++ int i;
++
++ if (!monitor_other_enable[1])
++ private->talkback_switch = 0;
++ else
++ private->talkback_switch = monitor_other_switch[1] + 1;
++
++ err = scarlett2_usb_get_config(mixer,
++ SCARLETT2_CONFIG_TALKBACK_MAP,
++ 1, &bitmap);
++ if (err < 0)
++ return err;
++ for (i = 0; i < num_mixes; i++, bitmap >>= 1)
++ private->talkback_map[i] = bitmap & 1;
++ }
++
++ return 0;
++}
++
++static int scarlett2_direct_monitor_ctl_get(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = elem->head.mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->monitor_other_updated) {
++ err = scarlett2_update_monitor_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.enumerated.item[0] = private->direct_monitor_switch;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_direct_monitor_ctl_put(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int index = elem->control;
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->direct_monitor_switch;
++ val = min(ucontrol->value.enumerated.item[0], 2U);
++
++ if (oval == val)
++ goto unlock;
++
++ private->direct_monitor_switch = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(
++ mixer, SCARLETT2_CONFIG_DIRECT_MONITOR, index, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_direct_monitor_stereo_enum_ctl_info(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
++{
++ static const char *const values[3] = {
++ "Off", "Mono", "Stereo"
++ };
++
++ return snd_ctl_enum_info(uinfo, 1, 3, values);
++}
++
++/* Direct Monitor for Solo is mono-only and only needs a boolean control
++ * Direct Monitor for 2i2 is selectable between Off/Mono/Stereo
++ */
++static const struct snd_kcontrol_new scarlett2_direct_monitor_ctl[2] = {
++ {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_direct_monitor_ctl_get,
++ .put = scarlett2_direct_monitor_ctl_put,
++ },
++ {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = scarlett2_direct_monitor_stereo_enum_ctl_info,
++ .get = scarlett2_direct_monitor_ctl_get,
++ .put = scarlett2_direct_monitor_ctl_put,
++ }
++};
++
++static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const char *s;
++
++ if (!info->direct_monitor)
++ return 0;
++
++ s = info->direct_monitor == 1
++ ? "Direct Monitor Playback Switch"
++ : "Direct Monitor Playback Enum";
++
++ return scarlett2_add_new_ctl(
++ mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1],
++ 0, 1, s, &private->direct_monitor_ctl);
++}
++
++/*** Speaker Switching Control ***/
++
++static int scarlett2_speaker_switch_enum_ctl_info(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
++{
++ static const char *const values[3] = {
++ "Off", "Main", "Alt"
++ };
++
++ return snd_ctl_enum_info(uinfo, 1, 3, values);
++}
++
++static int scarlett2_speaker_switch_enum_ctl_get(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->monitor_other_updated) {
++ err = scarlett2_update_monitor_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.enumerated.item[0] = private->speaker_switching_switch;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++/* when speaker switching gets enabled, switch the main/alt speakers
++ * to HW volume and disable those controls
++ */
++static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer)
++{
++ struct snd_card *card = mixer->chip->card;
++ struct scarlett2_data *private = mixer->private_data;
++ int i, err;
++
++ for (i = 0; i < 4; i++) {
++ int index = line_out_remap(private, i);
++
++ /* switch the main/alt speakers to HW volume */
++ if (!private->vol_sw_hw_switch[index]) {
++ err = scarlett2_sw_hw_change(private->mixer, i, 1);
++ if (err < 0)
++ return err;
++ }
++
++ /* disable the line out SW/HW switch */
++ scarlett2_sw_hw_ctl_ro(private, i);
++ snd_ctl_notify(card,
++ SNDRV_CTL_EVENT_MASK_VALUE |
++ SNDRV_CTL_EVENT_MASK_INFO,
++ &private->sw_hw_ctls[i]->id);
++ }
++
++ /* when the next monitor-other notify comes in, update the mux
++ * configuration
++ */
++ private->speaker_switching_switched = 1;
++
++ return 0;
++}
++
++/* when speaker switching gets disabled, reenable the hw/sw controls
++ * and invalidate the routing
++ */
++static void scarlett2_speaker_switch_disable(struct usb_mixer_interface *mixer)
++{
++ struct snd_card *card = mixer->chip->card;
++ struct scarlett2_data *private = mixer->private_data;
++ int i;
++
++ /* enable the line out SW/HW switch */
++ for (i = 0; i < 4; i++) {
++ scarlett2_sw_hw_ctl_rw(private, i);
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
++ &private->sw_hw_ctls[i]->id);
++ }
++
++ /* when the next monitor-other notify comes in, update the mux
++ * configuration
++ */
++ private->speaker_switching_switched = 1;
++}
++
++static int scarlett2_speaker_switch_enum_ctl_put(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->speaker_switching_switch;
++ val = min(ucontrol->value.enumerated.item[0], 2U);
++
++ if (oval == val)
++ goto unlock;
++
++ private->speaker_switching_switch = val;
++
++ /* enable/disable speaker switching */
++ err = scarlett2_usb_set_config(
++ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
++ 0, !!val);
++ if (err < 0)
++ goto unlock;
++
++ /* if speaker switching is enabled, select main or alt */
++ err = scarlett2_usb_set_config(
++ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
++ 0, val == 2);
++ if (err < 0)
++ goto unlock;
++
++ /* update controls if speaker switching gets enabled or disabled */
++ if (!oval && val)
++ err = scarlett2_speaker_switch_enable(mixer);
++ else if (oval && !val)
++ scarlett2_speaker_switch_disable(mixer);
++
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_speaker_switch_enum_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = scarlett2_speaker_switch_enum_ctl_info,
++ .get = scarlett2_speaker_switch_enum_ctl_get,
++ .put = scarlett2_speaker_switch_enum_ctl_put,
++};
++
++static int scarlett2_add_speaker_switch_ctl(
++ struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++
++ if (!info->has_speaker_switching)
++ return 0;
++
++ return scarlett2_add_new_ctl(
++ mixer, &scarlett2_speaker_switch_enum_ctl,
++ 0, 1, "Speaker Switching Playback Enum",
++ &private->speaker_switching_ctl);
++}
++
++/*** Talkback and Talkback Map Controls ***/
++
++static int scarlett2_talkback_enum_ctl_info(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
++{
++ static const char *const values[3] = {
++ "Disabled", "Off", "On"
++ };
++
++ return snd_ctl_enum_info(uinfo, 1, 3, values);
++}
++
++static int scarlett2_talkback_enum_ctl_get(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->monitor_other_updated) {
++ err = scarlett2_update_monitor_other(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.enumerated.item[0] = private->talkback_switch;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_talkback_enum_ctl_put(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->talkback_switch;
++ val = min(ucontrol->value.enumerated.item[0], 2U);
++
++ if (oval == val)
++ goto unlock;
++
++ private->talkback_switch = val;
++
++ /* enable/disable talkback */
++ err = scarlett2_usb_set_config(
++ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
++ 1, !!val);
++ if (err < 0)
++ goto unlock;
++
++ /* if talkback is enabled, select main or alt */
++ err = scarlett2_usb_set_config(
++ mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
++ 1, val == 2);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_talkback_enum_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = scarlett2_talkback_enum_ctl_info,
++ .get = scarlett2_talkback_enum_ctl_get,
++ .put = scarlett2_talkback_enum_ctl_put,
++};
++
++static int scarlett2_talkback_map_ctl_get(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int index = elem->control;
++
++ ucontrol->value.integer.value[0] = private->talkback_map[index];
++
++ return 0;
++}
++
++static int scarlett2_talkback_map_ctl_put(
++ struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] =
++ private->info->port_count;
++ int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++
++ int index = elem->control;
++ int oval, val, err = 0, i;
++ u16 bitmap = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->talkback_map[index];
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->talkback_map[index] = val;
++
++ for (i = 0; i < num_mixes; i++)
++ bitmap |= private->talkback_map[i] << i;
++
++ /* Send updated bitmap to the device */
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_TALKBACK_MAP,
++ 0, bitmap);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_talkback_map_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_talkback_map_ctl_get,
++ .put = scarlett2_talkback_map_ctl_put,
++};
++
++static int scarlett2_add_talkback_ctls(
++ struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++ int err, i;
++ char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++
++ if (!info->has_talkback)
++ return 0;
++
++ err = scarlett2_add_new_ctl(
++ mixer, &scarlett2_talkback_enum_ctl,
++ 0, 1, "Talkback Playback Enum",
++ &private->talkback_ctl);
++ if (err < 0)
++ return err;
++
++ for (i = 0; i < num_mixes; i++) {
++ snprintf(s, sizeof(s),
++ "Talkback Mix %c Playback Switch", i + 'A');
++ err = scarlett2_add_new_ctl(mixer, &scarlett2_talkback_map_ctl,
++ i, 1, s, NULL);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++
++/*** Dim/Mute Controls ***/
++
++static int scarlett2_dim_mute_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->vol_updated) {
++ err = scarlett2_update_volumes(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.integer.value[0] = private->dim_mute[elem->control];
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int num_line_out =
++ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++
++ int index = elem->control;
++ int oval, val, err = 0, i;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->dim_mute[index];
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->dim_mute[index] = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_DIM_MUTE,
++ index, val);
++ if (err == 0)
++ err = 1;
++
++ if (index == SCARLETT2_BUTTON_MUTE)
++ for (i = 0; i < num_line_out; i++) {
++ int line_index = line_out_remap(private, i);
++
++ if (private->vol_sw_hw_switch[line_index]) {
++ private->mute_switch[line_index] = val;
++ snd_ctl_notify(mixer->chip->card,
++ SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->mute_ctls[i]->id);
++ }
++ }
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_dim_mute_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_dim_mute_ctl_get,
++ .put = scarlett2_dim_mute_ctl_put
++};
++
++/*** Create the analogue output controls ***/
++
++static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int num_line_out =
++ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++ int err, i;
++ char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++
++ /* Add R/O HW volume control */
++ if (info->line_out_hw_vol) {
++ snprintf(s, sizeof(s), "Master HW Playback Volume");
++ err = scarlett2_add_new_ctl(mixer,
++ &scarlett2_master_volume_ctl,
++ 0, 1, s, &private->master_vol_ctl);
++ if (err < 0)
++ return err;
++ }
++
++ /* Add volume controls */
++ for (i = 0; i < num_line_out; i++) {
++ int index = line_out_remap(private, i);
++
++ /* Fader */
++ if (info->line_out_descrs[i])
++ snprintf(s, sizeof(s),
++ "Line %02d (%s) Playback Volume",
++ i + 1, info->line_out_descrs[i]);
++ else
++ snprintf(s, sizeof(s),
++ "Line %02d Playback Volume",
++ i + 1);
++ err = scarlett2_add_new_ctl(mixer,
++ &scarlett2_line_out_volume_ctl,
++ i, 1, s, &private->vol_ctls[i]);
++ if (err < 0)
++ return err;
++
++ /* Mute Switch */
++ snprintf(s, sizeof(s),
++ "Line %02d Mute Playback Switch",
++ i + 1);
++ err = scarlett2_add_new_ctl(mixer,
++ &scarlett2_mute_ctl,
++ i, 1, s,
++ &private->mute_ctls[i]);
++ if (err < 0)
++ return err;
++
++ /* Make the fader and mute controls read-only if the
++ * SW/HW switch is set to HW
++ */
++ if (private->vol_sw_hw_switch[index])
++ scarlett2_vol_ctl_set_writable(mixer, i, 0);
++
++ /* SW/HW Switch */
++ if (info->line_out_hw_vol) {
++ snprintf(s, sizeof(s),
++ "Line Out %02d Volume Control Playback Enum",
++ i + 1);
++ err = scarlett2_add_new_ctl(mixer,
++ &scarlett2_sw_hw_enum_ctl,
++ i, 1, s,
++ &private->sw_hw_ctls[i]);
++ if (err < 0)
++ return err;
++
++ /* Make the switch read-only if the line is
++ * involved in speaker switching
++ */
++ if (private->speaker_switching_switch && i < 4)
++ scarlett2_sw_hw_ctl_ro(private, i);
++ }
++ }
++
++ /* Add dim/mute controls */
++ if (info->line_out_hw_vol)
++ for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++) {
++ err = scarlett2_add_new_ctl(
++ mixer, &scarlett2_dim_mute_ctl,
++ i, 1, scarlett2_dim_mute_names[i],
++ &private->dim_mute_ctls[i]);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++
++/*** Create the analogue input controls ***/
++
++static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ int err, i;
++ char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++ const char *fmt = "Line In %d %s Capture %s";
++ const char *fmt2 = "Line In %d-%d %s Capture %s";
++
++ /* Add input level (line/inst) controls */
++ for (i = 0; i < info->level_input_count; i++) {
++ snprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
++ "Level", "Enum");
++ err = scarlett2_add_new_ctl(mixer, &scarlett2_level_enum_ctl,
++ i, 1, s, &private->level_ctls[i]);
++ if (err < 0)
++ return err;
++ }
++
++ /* Add input pad controls */
++ for (i = 0; i < info->pad_input_count; i++) {
++ snprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
++ err = scarlett2_add_new_ctl(mixer, &scarlett2_pad_ctl,
++ i, 1, s, &private->pad_ctls[i]);
++ if (err < 0)
++ return err;
++ }
++
++ /* Add input air controls */
++ for (i = 0; i < info->air_input_count; i++) {
++ snprintf(s, sizeof(s), fmt, i + 1, "Air", "Switch");
++ err = scarlett2_add_new_ctl(mixer, &scarlett2_air_ctl,
++ i, 1, s, &private->air_ctls[i]);
++ if (err < 0)
++ return err;
++ }
++
++ /* Add input phantom controls */
++ if (info->inputs_per_phantom == 1) {
++ for (i = 0; i < info->phantom_count; i++) {
++ scnprintf(s, sizeof(s), fmt, i + 1,
++ "Phantom Power", "Switch");
++ err = scarlett2_add_new_ctl(
++ mixer, &scarlett2_phantom_ctl,
++ i, 1, s, &private->phantom_ctls[i]);
++ if (err < 0)
++ return err;
++ }
++ } else if (info->inputs_per_phantom > 1) {
++ for (i = 0; i < info->phantom_count; i++) {
++ int from = i * info->inputs_per_phantom + 1;
++ int to = (i + 1) * info->inputs_per_phantom;
++
++ scnprintf(s, sizeof(s), fmt2, from, to,
++ "Phantom Power", "Switch");
++ err = scarlett2_add_new_ctl(
++ mixer, &scarlett2_phantom_ctl,
++ i, 1, s, &private->phantom_ctls[i]);
++ if (err < 0)
++ return err;
++ }
++ }
++ if (info->phantom_count) {
++ err = scarlett2_add_new_ctl(
++ mixer, &scarlett2_phantom_persistence_ctl, 0, 1,
++ "Phantom Power Persistence Capture Switch", NULL);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++
++/*** Mixer Volume Controls ***/
++
++static int scarlett2_mixer_ctl_info(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_info *uinfo)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++ uinfo->count = elem->channels;
++ uinfo->value.integer.min = 0;
++ uinfo->value.integer.max = SCARLETT2_MIXER_MAX_VALUE;
++ uinfo->value.integer.step = 1;
++ return 0;
++}
++
++static int scarlett2_mixer_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct scarlett2_data *private = elem->head.mixer->private_data;
++
++ ucontrol->value.integer.value[0] = private->mix[elem->control];
++ return 0;
++}
++
++static int scarlett2_mixer_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int oval, val, num_mixer_in, mix_num, err = 0;
++ int index = elem->control;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->mix[index];
++ val = clamp(ucontrol->value.integer.value[0],
++ 0L, (long)SCARLETT2_MIXER_MAX_VALUE);
++ num_mixer_in = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
++ mix_num = index / num_mixer_in;
++
++ if (oval == val)
++ goto unlock;
++
++ private->mix[index] = val;
++ err = scarlett2_usb_set_mix(mixer, mix_num);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const DECLARE_TLV_DB_MINMAX(
++ db_scale_scarlett2_mixer,
++ SCARLETT2_MIXER_MIN_DB * 100,
++ SCARLETT2_MIXER_MAX_DB * 100
++);
++
++static const struct snd_kcontrol_new scarlett2_mixer_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
++ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
++ .name = "",
++ .info = scarlett2_mixer_ctl_info,
++ .get = scarlett2_mixer_ctl_get,
++ .put = scarlett2_mixer_ctl_put,
++ .private_value = SCARLETT2_MIXER_MAX_DB, /* max value */
++ .tlv = { .p = db_scale_scarlett2_mixer }
++};
++
++static int scarlett2_add_mixer_ctls(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int err, i, j;
++ int index;
++ char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++
++ int num_inputs =
++ port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
++ int num_outputs =
++ port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++
++ for (i = 0, index = 0; i < num_outputs; i++)
++ for (j = 0; j < num_inputs; j++, index++) {
++ snprintf(s, sizeof(s),
++ "Mix %c Input %02d Playback Volume",
++ 'A' + i, j + 1);
++ err = scarlett2_add_new_ctl(mixer, &scarlett2_mixer_ctl,
++ index, 1, s, NULL);
++ if (err < 0)
++ return err;
++ }
++
++ return 0;
++}
++
++/*** Mux Source Selection Controls ***/
++
++static int scarlett2_mux_src_enum_ctl_info(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_info *uinfo)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct scarlett2_data *private = elem->head.mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ unsigned int item = uinfo->value.enumerated.item;
++ int items = private->num_mux_srcs;
++ int port_type;
++
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
++ uinfo->count = elem->channels;
++ uinfo->value.enumerated.items = items;
++
++ if (item >= items)
++ item = uinfo->value.enumerated.item = items - 1;
++
++ for (port_type = 0;
++ port_type < SCARLETT2_PORT_TYPE_COUNT;
++ port_type++) {
++ if (item < port_count[port_type][SCARLETT2_PORT_IN]) {
++ const struct scarlett2_port *port =
++ &scarlett2_ports[port_type];
++
++ sprintf(uinfo->value.enumerated.name,
++ port->src_descr, item + port->src_num_offset);
++ return 0;
++ }
++ item -= port_count[port_type][SCARLETT2_PORT_IN];
++ }
++
++ return -EINVAL;
++}
++
++static int scarlett2_mux_src_enum_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int index = line_out_remap(private, elem->control);
++ int err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ if (private->mux_updated) {
++ err = scarlett2_usb_get_mux(mixer);
++ if (err < 0)
++ goto unlock;
++ }
++ ucontrol->value.enumerated.item[0] = private->mux[index];
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++ int index = line_out_remap(private, elem->control);
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->mux[index];
++ val = min(ucontrol->value.enumerated.item[0],
++ private->num_mux_srcs - 1U);
++
++ if (oval == val)
++ goto unlock;
++
++ private->mux[index] = val;
++ err = scarlett2_usb_set_mux(mixer);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_mux_src_enum_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = scarlett2_mux_src_enum_ctl_info,
++ .get = scarlett2_mux_src_enum_ctl_get,
++ .put = scarlett2_mux_src_enum_ctl_put,
++};
++
++static int scarlett2_add_mux_enums(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int port_type, channel, i;
++
++ for (i = 0, port_type = 0;
++ port_type < SCARLETT2_PORT_TYPE_COUNT;
++ port_type++) {
++ for (channel = 0;
++ channel < port_count[port_type][SCARLETT2_PORT_OUT];
++ channel++, i++) {
++ int err;
++ char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
++ const char *const descr =
++ scarlett2_ports[port_type].dst_descr;
++
++ snprintf(s, sizeof(s) - 5, descr, channel + 1);
++ strcat(s, " Enum");
++
++ err = scarlett2_add_new_ctl(mixer,
++ &scarlett2_mux_src_enum_ctl,
++ i, 1, s,
++ &private->mux_ctls[i]);
++ if (err < 0)
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++/*** Meter Controls ***/
++
++static int scarlett2_meter_ctl_info(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_info *uinfo)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++ uinfo->count = elem->channels;
++ uinfo->value.integer.min = 0;
++ uinfo->value.integer.max = 4095;
++ uinfo->value.integer.step = 1;
++ return 0;
++}
++
++static int scarlett2_meter_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ u16 meter_levels[SCARLETT2_MAX_METERS];
++ int i, err;
++
++ err = scarlett2_usb_get_meter_levels(elem->head.mixer, elem->channels,
++ meter_levels);
++ if (err < 0)
++ return err;
++
++ for (i = 0; i < elem->channels; i++)
++ ucontrol->value.integer.value[i] = meter_levels[i];
++
++ return 0;
++}
++
++static const struct snd_kcontrol_new scarlett2_meter_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
++ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
++ .name = "",
++ .info = scarlett2_meter_ctl_info,
++ .get = scarlett2_meter_ctl_get
++};
++
++static int scarlett2_add_meter_ctl(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++
++ /* devices without a mixer also don't support reporting levels */
++ if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
++ return 0;
++
++ return scarlett2_add_new_ctl(mixer, &scarlett2_meter_ctl,
++ 0, private->num_mux_dsts,
++ "Level Meter", NULL);
++}
++
++/*** MSD Controls ***/
++
++static int scarlett2_msd_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct scarlett2_data *private = elem->head.mixer->private_data;
++
++ ucontrol->value.integer.value[0] = private->msd_switch;
++ return 0;
++}
++
++static int scarlett2_msd_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->msd_switch;
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->msd_switch = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MSD_SWITCH,
++ 0, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_msd_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_msd_ctl_get,
++ .put = scarlett2_msd_ctl_put,
++};
++
++static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++
++ if (!info->has_msd_mode)
++ return 0;
++
++ /* If MSD mode is off, hide the switch by default */
++ if (!private->msd_switch && !(mixer->chip->setup & SCARLETT2_MSD_ENABLE))
++ return 0;
++
++ /* Add MSD control */
++ return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl,
++ 0, 1, "MSD Mode Switch", NULL);
++}
++
++/*** Standalone Control ***/
++
++static int scarlett2_standalone_ctl_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct scarlett2_data *private = elem->head.mixer->private_data;
++
++ ucontrol->value.integer.value[0] = private->standalone_switch;
++ return 0;
++}
++
++static int scarlett2_standalone_ctl_put(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct usb_mixer_elem_info *elem = kctl->private_data;
++ struct usb_mixer_interface *mixer = elem->head.mixer;
++ struct scarlett2_data *private = mixer->private_data;
++
++ int oval, val, err = 0;
++
++ mutex_lock(&private->data_mutex);
++
++ oval = private->standalone_switch;
++ val = !!ucontrol->value.integer.value[0];
++
++ if (oval == val)
++ goto unlock;
++
++ private->standalone_switch = val;
++
++ /* Send switch change to the device */
++ err = scarlett2_usb_set_config(mixer,
++ SCARLETT2_CONFIG_STANDALONE_SWITCH,
++ 0, val);
++ if (err == 0)
++ err = 1;
++
++unlock:
++ mutex_unlock(&private->data_mutex);
++ return err;
++}
++
++static const struct snd_kcontrol_new scarlett2_standalone_ctl = {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "",
++ .info = snd_ctl_boolean_mono_info,
++ .get = scarlett2_standalone_ctl_get,
++ .put = scarlett2_standalone_ctl_put,
++};
++
++static int scarlett2_add_standalone_ctl(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++
++ if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
++ return 0;
++
++ /* Add standalone control */
++ return scarlett2_add_new_ctl(mixer, &scarlett2_standalone_ctl,
++ 0, 1, "Standalone Switch", NULL);
++}
++
++/*** Cleanup/Suspend Callbacks ***/
++
++static void scarlett2_private_free(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++
++ cancel_delayed_work_sync(&private->work);
++ kfree(private);
++ mixer->private_data = NULL;
++}
++
++static void scarlett2_private_suspend(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++
++ if (cancel_delayed_work_sync(&private->work))
++ scarlett2_config_save(private->mixer);
++}
++
++/*** Initialisation ***/
++
++static void scarlett2_count_mux_io(struct scarlett2_data *private)
++{
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int port_type, srcs = 0, dsts = 0;
++
++ for (port_type = 0;
++ port_type < SCARLETT2_PORT_TYPE_COUNT;
++ port_type++) {
++ srcs += port_count[port_type][SCARLETT2_PORT_IN];
++ dsts += port_count[port_type][SCARLETT2_PORT_OUT];
++ }
++
++ private->num_mux_srcs = srcs;
++ private->num_mux_dsts = dsts;
++}
++
++/* Look through the interface descriptors for the Focusrite Control
++ * interface (bInterfaceClass = 255 Vendor Specific Class) and set
++ * bInterfaceNumber, bEndpointAddress, wMaxPacketSize, and bInterval
++ * in private
++ */
++static int scarlett2_find_fc_interface(struct usb_device *dev,
++ struct scarlett2_data *private)
++{
++ struct usb_host_config *config = dev->actconfig;
++ int i;
++
++ for (i = 0; i < config->desc.bNumInterfaces; i++) {
++ struct usb_interface *intf = config->interface[i];
++ struct usb_interface_descriptor *desc =
++ &intf->altsetting[0].desc;
++ struct usb_endpoint_descriptor *epd;
++
++ if (desc->bInterfaceClass != 255)
++ continue;
++
++ epd = get_endpoint(intf->altsetting, 0);
++ private->bInterfaceNumber = desc->bInterfaceNumber;
++ private->bEndpointAddress = epd->bEndpointAddress &
++ USB_ENDPOINT_NUMBER_MASK;
++ private->wMaxPacketSize = le16_to_cpu(epd->wMaxPacketSize);
++ private->bInterval = epd->bInterval;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++/* Initialise private data */
++static int scarlett2_init_private(struct usb_mixer_interface *mixer,
++ const struct scarlett2_device_entry *entry)
++{
++ struct scarlett2_data *private =
++ kzalloc(sizeof(struct scarlett2_data), GFP_KERNEL);
++
++ if (!private)
++ return -ENOMEM;
++
++ mutex_init(&private->usb_mutex);
++ mutex_init(&private->data_mutex);
++ INIT_DELAYED_WORK(&private->work, scarlett2_config_save_work);
++
++ mixer->private_data = private;
++ mixer->private_free = scarlett2_private_free;
++ mixer->private_suspend = scarlett2_private_suspend;
++
++ private->info = entry->info;
++ private->series_name = entry->series_name;
++ scarlett2_count_mux_io(private);
++ private->scarlett2_seq = 0;
++ private->mixer = mixer;
++
++ return scarlett2_find_fc_interface(mixer->chip->dev, private);
++}
++
++/* Cargo cult proprietary initialisation sequence */
++static int scarlett2_usb_init(struct usb_mixer_interface *mixer)
++{
++ struct usb_device *dev = mixer->chip->dev;
++ struct scarlett2_data *private = mixer->private_data;
++ u8 buf[24];
++ int err;
++
++ if (usb_pipe_type_check(dev, usb_sndctrlpipe(dev, 0)))
++ return -EINVAL;
++
++ /* step 0 */
++ err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
++ SCARLETT2_USB_CMD_INIT, buf, sizeof(buf));
++ if (err < 0)
++ return err;
++
++ /* step 1 */
++ private->scarlett2_seq = 1;
++ err = scarlett2_usb(mixer, SCARLETT2_USB_INIT_1, NULL, 0, NULL, 0);
++ if (err < 0)
++ return err;
++
++ /* step 2 */
++ private->scarlett2_seq = 1;
++ return scarlett2_usb(mixer, SCARLETT2_USB_INIT_2, NULL, 0, NULL, 84);
++}
++
++/* Read configuration from the interface on start */
++static int scarlett2_read_configs(struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int num_line_out =
++ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++ int num_mixer_out =
++ port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
++ struct scarlett2_usb_volume_status volume_status;
++ int err, i;
++
++ if (info->has_msd_mode) {
++ err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_MSD_SWITCH,
++ 1, &private->msd_switch);
++ if (err < 0)
++ return err;
++
++ /* no other controls are created if MSD mode is on */
++ if (private->msd_switch)
++ return 0;
++ }
++
++ err = scarlett2_update_input_other(mixer);
++ if (err < 0)
++ return err;
++
++ err = scarlett2_update_monitor_other(mixer);
++ if (err < 0)
++ return err;
++
++ /* the rest of the configuration is for devices with a mixer */
++ if (info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
++ return 0;
++
++ err = scarlett2_usb_get_config(
++ mixer, SCARLETT2_CONFIG_STANDALONE_SWITCH,
++ 1, &private->standalone_switch);
++ if (err < 0)
++ return err;
++
++ err = scarlett2_update_sync(mixer);
++ if (err < 0)
++ return err;
++
++ err = scarlett2_usb_get_volume_status(mixer, &volume_status);
++ if (err < 0)
++ return err;
++
++ if (info->line_out_hw_vol)
++ for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
++ private->dim_mute[i] = !!volume_status.dim_mute[i];
++
++ private->master_vol = clamp(
++ volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
++ 0, SCARLETT2_VOLUME_BIAS);
++
++ for (i = 0; i < num_line_out; i++) {
++ int volume, mute;
++
++ private->vol_sw_hw_switch[i] =
++ info->line_out_hw_vol
++ && volume_status.sw_hw_switch[i];
++
++ volume = private->vol_sw_hw_switch[i]
++ ? volume_status.master_vol
++ : volume_status.sw_vol[i];
++ volume = clamp(volume + SCARLETT2_VOLUME_BIAS,
++ 0, SCARLETT2_VOLUME_BIAS);
++ private->vol[i] = volume;
++
++ mute = private->vol_sw_hw_switch[i]
++ ? private->dim_mute[SCARLETT2_BUTTON_MUTE]
++ : volume_status.mute_switch[i];
++ private->mute_switch[i] = mute;
++ }
++
++ for (i = 0; i < num_mixer_out; i++) {
++ err = scarlett2_usb_get_mix(mixer, i);
++ if (err < 0)
++ return err;
++ }
++
++ return scarlett2_usb_get_mux(mixer);
++}
++
++/* Notify on sync change */
++static void scarlett2_notify_sync(
++ struct usb_mixer_interface *mixer)
++{
++ struct scarlett2_data *private = mixer->private_data;
++
++ private->sync_updated = 1;
++
++ snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->sync_ctl->id);
++}
++
++/* Notify on monitor change */
++static void scarlett2_notify_monitor(
++ struct usb_mixer_interface *mixer)
++{
++ struct snd_card *card = mixer->chip->card;
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int num_line_out =
++ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++ int i;
++
++ /* if line_out_hw_vol is 0, there are no controls to update */
++ if (!info->line_out_hw_vol)
++ return;
++
++ private->vol_updated = 1;
++
++ snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->master_vol_ctl->id);
++
++ for (i = 0; i < num_line_out; i++)
++ if (private->vol_sw_hw_switch[line_out_remap(private, i)])
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->vol_ctls[i]->id);
++}
++
++/* Notify on dim/mute change */
++static void scarlett2_notify_dim_mute(
++ struct usb_mixer_interface *mixer)
++{
++ struct snd_card *card = mixer->chip->card;
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
++ int num_line_out =
++ port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
++ int i;
++
++ private->vol_updated = 1;
++
++ if (!info->line_out_hw_vol)
++ return;
++
++ for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->dim_mute_ctls[i]->id);
++
++ for (i = 0; i < num_line_out; i++)
++ if (private->vol_sw_hw_switch[line_out_remap(private, i)])
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->mute_ctls[i]->id);
++}
++
++/* Notify on "input other" change (level/pad/air) */
++static void scarlett2_notify_input_other(
++ struct usb_mixer_interface *mixer)
++{
++ struct snd_card *card = mixer->chip->card;
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++ int i;
++
++ private->input_other_updated = 1;
++
++ for (i = 0; i < info->level_input_count; i++)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->level_ctls[i]->id);
++ for (i = 0; i < info->pad_input_count; i++)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->pad_ctls[i]->id);
++ for (i = 0; i < info->air_input_count; i++)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->air_ctls[i]->id);
++ for (i = 0; i < info->phantom_count; i++)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->phantom_ctls[i]->id);
++}
++
++/* Notify on "monitor other" change (direct monitor, speaker
++ * switching, talkback)
++ */
++static void scarlett2_notify_monitor_other(
++ struct usb_mixer_interface *mixer)
++{
++ struct snd_card *card = mixer->chip->card;
++ struct scarlett2_data *private = mixer->private_data;
++ const struct scarlett2_device_info *info = private->info;
++
++ private->monitor_other_updated = 1;
++
++ if (info->direct_monitor) {
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->direct_monitor_ctl->id);
++ return;
++ }
++
++ if (info->has_speaker_switching)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->speaker_switching_ctl->id);
++
++ if (info->has_talkback)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->talkback_ctl->id);
++
++ /* if speaker switching was recently enabled or disabled,
++ * invalidate the dim/mute and mux enum controls
++ */
++ if (private->speaker_switching_switched) {
++ int i;
++
++ scarlett2_notify_dim_mute(mixer);
++
++ private->speaker_switching_switched = 0;
++ private->mux_updated = 1;
++
++ for (i = 0; i < private->num_mux_dsts; i++)
++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
++ &private->mux_ctls[i]->id);
++ }
++}
++
++/* Interrupt callback */
++static void scarlett2_notify(struct urb *urb)
++{
++ struct usb_mixer_interface *mixer = urb->context;
++ int len = urb->actual_length;
++ int ustatus = urb->status;
++ u32 data;
++
++ if (ustatus != 0 || len != 8)
++ goto requeue;
++
++ data = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
++ if (data & SCARLETT2_USB_NOTIFY_SYNC)
++ scarlett2_notify_sync(mixer);
++ if (data & SCARLETT2_USB_NOTIFY_MONITOR)
++ scarlett2_notify_monitor(mixer);
++ if (data & SCARLETT2_USB_NOTIFY_DIM_MUTE)
++ scarlett2_notify_dim_mute(mixer);
++ if (data & SCARLETT2_USB_NOTIFY_INPUT_OTHER)
++ scarlett2_notify_input_other(mixer);
++ if (data & SCARLETT2_USB_NOTIFY_MONITOR_OTHER)
++ scarlett2_notify_monitor_other(mixer);
++
++requeue:
++ if (ustatus != -ENOENT &&
++ ustatus != -ECONNRESET &&
++ ustatus != -ESHUTDOWN) {
++ urb->dev = mixer->chip->dev;
++ usb_submit_urb(urb, GFP_ATOMIC);
++ }
++}
++
++static int scarlett2_init_notify(struct usb_mixer_interface *mixer)
++{
++ struct usb_device *dev = mixer->chip->dev;
++ struct scarlett2_data *private = mixer->private_data;
++ unsigned int pipe = usb_rcvintpipe(dev, private->bEndpointAddress);
++ void *transfer_buffer;
++
++ if (mixer->urb) {
++ usb_audio_err(mixer->chip,
++ "%s: mixer urb already in use!\n", __func__);
++ return 0;
++ }
++
++ if (usb_pipe_type_check(dev, pipe))
++ return -EINVAL;
++
++ mixer->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!mixer->urb)
++ return -ENOMEM;
++
++ transfer_buffer = kmalloc(private->wMaxPacketSize, GFP_KERNEL);
++ if (!transfer_buffer)
++ return -ENOMEM;
++
++ usb_fill_int_urb(mixer->urb, dev, pipe,
++ transfer_buffer, private->wMaxPacketSize,
++ scarlett2_notify, mixer, private->bInterval);
++
++ return usb_submit_urb(mixer->urb, GFP_KERNEL);
++}
++
++static const struct scarlett2_device_entry *get_scarlett2_device_entry(
++ struct usb_mixer_interface *mixer)
++{
++ const struct scarlett2_device_entry *entry = scarlett2_devices;
++
++ /* Find entry in scarlett2_devices */
++ while (entry->usb_id && entry->usb_id != mixer->chip->usb_id)
++ entry++;
++ if (!entry->usb_id)
++ return NULL;
++
++ return entry;
++}
++
++static int snd_scarlett2_controls_create(
++ struct usb_mixer_interface *mixer,
++ const struct scarlett2_device_entry *entry)
++{
++ int err;
++
++ /* Initialise private data */
++ err = scarlett2_init_private(mixer, entry);
++ if (err < 0)
++ return err;
++
++ /* Send proprietary USB initialisation sequence */
++ err = scarlett2_usb_init(mixer);
++ if (err < 0)
++ return err;
++
++ /* Read volume levels and controls from the interface */
++ err = scarlett2_read_configs(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the MSD control */
++ err = scarlett2_add_msd_ctl(mixer);
++ if (err < 0)
++ return err;
++
++ /* If MSD mode is enabled, don't create any other controls */
++ if (((struct scarlett2_data *)mixer->private_data)->msd_switch)
++ return 0;
++
++ /* Create the analogue output controls */
++ err = scarlett2_add_line_out_ctls(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the analogue input controls */
++ err = scarlett2_add_line_in_ctls(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the input, output, and mixer mux input selections */
++ err = scarlett2_add_mux_enums(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the matrix mixer controls */
++ err = scarlett2_add_mixer_ctls(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the level meter controls */
++ err = scarlett2_add_meter_ctl(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the sync control */
++ err = scarlett2_add_sync_ctl(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the direct monitor control */
++ err = scarlett2_add_direct_monitor_ctl(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the speaker switching control */
++ err = scarlett2_add_speaker_switch_ctl(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the talkback controls */
++ err = scarlett2_add_talkback_ctls(mixer);
++ if (err < 0)
++ return err;
++
++ /* Create the standalone control */
++ err = scarlett2_add_standalone_ctl(mixer);
++ if (err < 0)
++ return err;
++
++ /* Set up the interrupt polling */
++ err = scarlett2_init_notify(mixer);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++
++int snd_scarlett2_init(struct usb_mixer_interface *mixer)
++{
++ struct snd_usb_audio *chip = mixer->chip;
++ const struct scarlett2_device_entry *entry;
++ int err;
++
++ /* only use UAC_VERSION_2 */
++ if (!mixer->protocol)
++ return 0;
++
++ /* find entry in scarlett2_devices */
++ entry = get_scarlett2_device_entry(mixer);
++ if (!entry) {
++ usb_audio_err(mixer->chip,
++ "%s: missing device entry for %04x:%04x\n",
++ __func__,
++ USB_ID_VENDOR(chip->usb_id),
++ USB_ID_PRODUCT(chip->usb_id));
++ return 0;
++ }
++
++ if (chip->setup & SCARLETT2_DISABLE) {
++ usb_audio_info(chip,
++ "Focusrite %s Mixer Driver disabled "
++ "by modprobe options (snd_usb_audio "
++ "vid=0x%04x pid=0x%04x device_setup=%d)\n",
++ entry->series_name,
++ USB_ID_VENDOR(chip->usb_id),
++ USB_ID_PRODUCT(chip->usb_id),
++ SCARLETT2_DISABLE);
++ return 0;
++ }
++
++ usb_audio_info(chip,
++ "Focusrite %s Mixer Driver enabled (pid=0x%04x); "
++ "report any issues to g@b4.vu",
++ entry->series_name,
++ USB_ID_PRODUCT(chip->usb_id));
++
++ err = snd_scarlett2_controls_create(mixer, entry);
++ if (err < 0)
++ usb_audio_err(mixer->chip,
++ "Error initialising %s Mixer Driver: %d",
++ entry->series_name,
++ err);
++
++ return err;
++}
+diff --git a/sound/usb/mixer_scarlett2.h b/sound/usb/mixer_scarlett2.h
+new file mode 100644
+index 00000000000000..d209362cf41a6c
+--- /dev/null
++++ b/sound/usb/mixer_scarlett2.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __USB_MIXER_SCARLETT2_H
++#define __USB_MIXER_SCARLETT2_H
++
++int snd_scarlett2_init(struct usb_mixer_interface *mixer);
++
++#endif /* __USB_MIXER_SCARLETT2_H */
+diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
+deleted file mode 100644
+index d260be8cb6bc03..00000000000000
+--- a/sound/usb/mixer_scarlett_gen2.c
++++ /dev/null
+@@ -1,4197 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * Focusrite Scarlett Gen 2/3 and Clarett+ Driver for ALSA
+- *
+- * Supported models:
+- * - 6i6/18i8/18i20 Gen 2
+- * - Solo/2i2/4i4/8i6/18i8/18i20 Gen 3
+- * - Clarett+ 8Pre
+- *
+- * Copyright (c) 2018-2022 by Geoffrey D. Bennett <g at b4.vu>
+- * Copyright (c) 2020-2021 by Vladimir Sadovnikov <sadko4u@gmail.com>
+- * Copyright (c) 2022 by Christian Colglazier <christian@cacolglazier.com>
+- *
+- * Based on the Scarlett (Gen 1) Driver for ALSA:
+- *
+- * Copyright (c) 2013 by Tobias Hoffmann
+- * Copyright (c) 2013 by Robin Gareus <robin at gareus.org>
+- * Copyright (c) 2002 by Takashi Iwai <tiwai at suse.de>
+- * Copyright (c) 2014 by Chris J Arges <chris.j.arges at canonical.com>
+- *
+- * Many codes borrowed from audio.c by
+- * Alan Cox (alan at lxorguk.ukuu.org.uk)
+- * Thomas Sailer (sailer at ife.ee.ethz.ch)
+- *
+- * Code cleanup:
+- * David Henningsson <david.henningsson at canonical.com>
+- */
+-
+-/* The protocol was reverse engineered by looking at the communication
+- * between Focusrite Control 2.3.4 and the Focusrite(R) Scarlett 18i20
+- * (firmware 1083) using usbmon in July-August 2018.
+- *
+- * Scarlett 18i8 support added in April 2019.
+- *
+- * Scarlett 6i6 support added in June 2019 (thanks to Martin Wittmann
+- * for providing usbmon output and testing).
+- *
+- * Scarlett 4i4/8i6 Gen 3 support added in May 2020 (thanks to Laurent
+- * Debricon for donating a 4i4 and to Fredrik Unger for providing 8i6
+- * usbmon output and testing).
+- *
+- * Scarlett 18i8/18i20 Gen 3 support added in June 2020 (thanks to
+- * Darren Jaeckel, Alex Sedlack, and Clovis Lunel for providing usbmon
+- * output, protocol traces and testing).
+- *
+- * Support for loading mixer volume and mux configuration from the
+- * interface during driver initialisation added in May 2021 (thanks to
+- * Vladimir Sadovnikov for figuring out how).
+- *
+- * Support for Solo/2i2 Gen 3 added in May 2021 (thanks to Alexander
+- * Vorona for 2i2 protocol traces).
+- *
+- * Support for phantom power, direct monitoring, speaker switching,
+- * and talkback added in May-June 2021.
+- *
+- * Support for Clarett+ 8Pre added in Aug 2022 by Christian
+- * Colglazier.
+- *
+- * This ALSA mixer gives access to (model-dependent):
+- * - input, output, mixer-matrix muxes
+- * - mixer-matrix gain stages
+- * - gain/volume/mute controls
+- * - level meters
+- * - line/inst level, pad, and air controls
+- * - phantom power, direct monitor, speaker switching, and talkback
+- * controls
+- * - disable/enable MSD mode
+- * - disable/enable standalone mode
+- *
+- * <ditaa>
+- * /--------------\ 18chn 20chn /--------------\
+- * | Hardware in +--+------\ /-------------+--+ ALSA PCM out |
+- * \--------------/ | | | | \--------------/
+- * | | | /-----\ |
+- * | | | | | |
+- * | v v v | |
+- * | +---------------+ | |
+- * | \ Matrix Mux / | |
+- * | +-----+-----+ | |
+- * | | | |
+- * | |18chn | |
+- * | | | |
+- * | | 10chn| |
+- * | v | |
+- * | +------------+ | |
+- * | | Mixer | | |
+- * | | Matrix | | |
+- * | | | | |
+- * | | 18x10 Gain | | |
+- * | | stages | | |
+- * | +-----+------+ | |
+- * | | | |
+- * |18chn |10chn | |20chn
+- * | | | |
+- * | +----------/ |
+- * | | |
+- * v v v
+- * ===========================
+- * +---------------+ +--—------------+
+- * \ Output Mux / \ Capture Mux /
+- * +---+---+---+ +-----+-----+
+- * | | |
+- * 10chn| | |18chn
+- * | | |
+- * /--------------\ | | | /--------------\
+- * | S/PDIF, ADAT |<--/ |10chn \-->| ALSA PCM in |
+- * | Hardware out | | \--------------/
+- * \--------------/ |
+- * v
+- * +-------------+ Software gain per channel.
+- * | Master Gain |<-- 18i20 only: Switch per channel
+- * +------+------+ to select HW or SW gain control.
+- * |
+- * |10chn
+- * /--------------\ |
+- * | Analogue |<------/
+- * | Hardware out |
+- * \--------------/
+- * </ditaa>
+- *
+- * Gen 3 devices have a Mass Storage Device (MSD) mode where a small
+- * disk with registration and driver download information is presented
+- * to the host. To access the full functionality of the device without
+- * proprietary software, MSD mode can be disabled by:
+- * - holding down the 48V button for five seconds while powering on
+- * the device, or
+- * - using this driver and alsamixer to change the "MSD Mode" setting
+- * to Off and power-cycling the device
+- */
+-
+-#include <linux/slab.h>
+-#include <linux/usb.h>
+-#include <linux/moduleparam.h>
+-
+-#include <sound/control.h>
+-#include <sound/tlv.h>
+-
+-#include "usbaudio.h"
+-#include "mixer.h"
+-#include "helper.h"
+-
+-#include "mixer_scarlett_gen2.h"
+-
+-/* device_setup value to enable */
+-#define SCARLETT2_ENABLE 0x01
+-
+-/* device_setup value to allow turning MSD mode back on */
+-#define SCARLETT2_MSD_ENABLE 0x02
+-
+-/* some gui mixers can't handle negative ctl values */
+-#define SCARLETT2_VOLUME_BIAS 127
+-
+-/* mixer range from -80dB to +6dB in 0.5dB steps */
+-#define SCARLETT2_MIXER_MIN_DB -80
+-#define SCARLETT2_MIXER_BIAS (-SCARLETT2_MIXER_MIN_DB * 2)
+-#define SCARLETT2_MIXER_MAX_DB 6
+-#define SCARLETT2_MIXER_MAX_VALUE \
+- ((SCARLETT2_MIXER_MAX_DB - SCARLETT2_MIXER_MIN_DB) * 2)
+-#define SCARLETT2_MIXER_VALUE_COUNT (SCARLETT2_MIXER_MAX_VALUE + 1)
+-
+-/* map from (dB + 80) * 2 to mixer value
+- * for dB in 0 .. 172: int(8192 * pow(10, ((dB - 160) / 2 / 20)))
+- */
+-static const u16 scarlett2_mixer_values[SCARLETT2_MIXER_VALUE_COUNT] = {
+- 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+- 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8,
+- 9, 9, 10, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+- 23, 24, 25, 27, 29, 30, 32, 34, 36, 38, 41, 43, 46, 48, 51,
+- 54, 57, 61, 65, 68, 73, 77, 81, 86, 91, 97, 103, 109, 115,
+- 122, 129, 137, 145, 154, 163, 173, 183, 194, 205, 217, 230,
+- 244, 259, 274, 290, 307, 326, 345, 365, 387, 410, 434, 460,
+- 487, 516, 547, 579, 614, 650, 689, 730, 773, 819, 867, 919,
+- 973, 1031, 1092, 1157, 1225, 1298, 1375, 1456, 1543, 1634,
+- 1731, 1833, 1942, 2057, 2179, 2308, 2445, 2590, 2744, 2906,
+- 3078, 3261, 3454, 3659, 3876, 4105, 4349, 4606, 4879, 5168,
+- 5475, 5799, 6143, 6507, 6892, 7301, 7733, 8192, 8677, 9191,
+- 9736, 10313, 10924, 11571, 12257, 12983, 13752, 14567, 15430,
+- 16345
+-};
+-
+-/* Maximum number of analogue outputs */
+-#define SCARLETT2_ANALOGUE_MAX 10
+-
+-/* Maximum number of level and pad switches */
+-#define SCARLETT2_LEVEL_SWITCH_MAX 2
+-#define SCARLETT2_PAD_SWITCH_MAX 8
+-#define SCARLETT2_AIR_SWITCH_MAX 8
+-#define SCARLETT2_PHANTOM_SWITCH_MAX 2
+-
+-/* Maximum number of inputs to the mixer */
+-#define SCARLETT2_INPUT_MIX_MAX 25
+-
+-/* Maximum number of outputs from the mixer */
+-#define SCARLETT2_OUTPUT_MIX_MAX 12
+-
+-/* Maximum size of the data in the USB mux assignment message:
+- * 20 inputs, 20 outputs, 25 matrix inputs, 12 spare
+- */
+-#define SCARLETT2_MUX_MAX 77
+-
+-/* Maximum number of meters (sum of output port counts) */
+-#define SCARLETT2_MAX_METERS 65
+-
+-/* There are three different sets of configuration parameters across
+- * the devices
+- */
+-enum {
+- SCARLETT2_CONFIG_SET_NO_MIXER = 0,
+- SCARLETT2_CONFIG_SET_GEN_2 = 1,
+- SCARLETT2_CONFIG_SET_GEN_3 = 2,
+- SCARLETT2_CONFIG_SET_CLARETT = 3,
+- SCARLETT2_CONFIG_SET_COUNT = 4
+-};
+-
+-/* Hardware port types:
+- * - None (no input to mux)
+- * - Analogue I/O
+- * - S/PDIF I/O
+- * - ADAT I/O
+- * - Mixer I/O
+- * - PCM I/O
+- */
+-enum {
+- SCARLETT2_PORT_TYPE_NONE = 0,
+- SCARLETT2_PORT_TYPE_ANALOGUE = 1,
+- SCARLETT2_PORT_TYPE_SPDIF = 2,
+- SCARLETT2_PORT_TYPE_ADAT = 3,
+- SCARLETT2_PORT_TYPE_MIX = 4,
+- SCARLETT2_PORT_TYPE_PCM = 5,
+- SCARLETT2_PORT_TYPE_COUNT = 6,
+-};
+-
+-/* I/O count of each port type kept in struct scarlett2_ports */
+-enum {
+- SCARLETT2_PORT_IN = 0,
+- SCARLETT2_PORT_OUT = 1,
+- SCARLETT2_PORT_DIRNS = 2,
+-};
+-
+-/* Dim/Mute buttons on the 18i20 */
+-enum {
+- SCARLETT2_BUTTON_MUTE = 0,
+- SCARLETT2_BUTTON_DIM = 1,
+- SCARLETT2_DIM_MUTE_COUNT = 2,
+-};
+-
+-static const char *const scarlett2_dim_mute_names[SCARLETT2_DIM_MUTE_COUNT] = {
+- "Mute Playback Switch", "Dim Playback Switch"
+-};
+-
+-/* Description of each hardware port type:
+- * - id: hardware ID of this port type
+- * - src_descr: printf format string for mux input selections
+- * - src_num_offset: added to channel number for the fprintf
+- * - dst_descr: printf format string for mixer controls
+- */
+-struct scarlett2_port {
+- u16 id;
+- const char * const src_descr;
+- int src_num_offset;
+- const char * const dst_descr;
+-};
+-
+-static const struct scarlett2_port scarlett2_ports[SCARLETT2_PORT_TYPE_COUNT] = {
+- [SCARLETT2_PORT_TYPE_NONE] = {
+- .id = 0x000,
+- .src_descr = "Off"
+- },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = {
+- .id = 0x080,
+- .src_descr = "Analogue %d",
+- .src_num_offset = 1,
+- .dst_descr = "Analogue Output %02d Playback"
+- },
+- [SCARLETT2_PORT_TYPE_SPDIF] = {
+- .id = 0x180,
+- .src_descr = "S/PDIF %d",
+- .src_num_offset = 1,
+- .dst_descr = "S/PDIF Output %d Playback"
+- },
+- [SCARLETT2_PORT_TYPE_ADAT] = {
+- .id = 0x200,
+- .src_descr = "ADAT %d",
+- .src_num_offset = 1,
+- .dst_descr = "ADAT Output %d Playback"
+- },
+- [SCARLETT2_PORT_TYPE_MIX] = {
+- .id = 0x300,
+- .src_descr = "Mix %c",
+- .src_num_offset = 'A',
+- .dst_descr = "Mixer Input %02d Capture"
+- },
+- [SCARLETT2_PORT_TYPE_PCM] = {
+- .id = 0x600,
+- .src_descr = "PCM %d",
+- .src_num_offset = 1,
+- .dst_descr = "PCM %02d Capture"
+- },
+-};
+-
+-/* Number of mux tables: one for each band of sample rates
+- * (44.1/48kHz, 88.2/96kHz, and 176.4/176kHz)
+- */
+-#define SCARLETT2_MUX_TABLES 3
+-
+-/* Maximum number of entries in a mux table */
+-#define SCARLETT2_MAX_MUX_ENTRIES 10
+-
+-/* One entry within mux_assignment defines the port type and range of
+- * ports to add to the set_mux message. The end of the list is marked
+- * with count == 0.
+- */
+-struct scarlett2_mux_entry {
+- u8 port_type;
+- u8 start;
+- u8 count;
+-};
+-
+-struct scarlett2_device_info {
+- u32 usb_id; /* USB device identifier */
+-
+- /* Gen 3 devices have an internal MSD mode switch that needs
+- * to be disabled in order to access the full functionality of
+- * the device.
+- */
+- u8 has_msd_mode;
+-
+- /* which set of configuration parameters the device uses */
+- u8 config_set;
+-
+- /* line out hw volume is sw controlled */
+- u8 line_out_hw_vol;
+-
+- /* support for main/alt speaker switching */
+- u8 has_speaker_switching;
+-
+- /* support for talkback microphone */
+- u8 has_talkback;
+-
+- /* the number of analogue inputs with a software switchable
+- * level control that can be set to line or instrument
+- */
+- u8 level_input_count;
+-
+- /* the first input with a level control (0-based) */
+- u8 level_input_first;
+-
+- /* the number of analogue inputs with a software switchable
+- * 10dB pad control
+- */
+- u8 pad_input_count;
+-
+- /* the number of analogue inputs with a software switchable
+- * "air" control
+- */
+- u8 air_input_count;
+-
+- /* the number of phantom (48V) software switchable controls */
+- u8 phantom_count;
+-
+- /* the number of inputs each phantom switch controls */
+- u8 inputs_per_phantom;
+-
+- /* the number of direct monitor options
+- * (0 = none, 1 = mono only, 2 = mono/stereo)
+- */
+- u8 direct_monitor;
+-
+- /* remap analogue outputs; 18i8 Gen 3 has "line 3/4" connected
+- * internally to the analogue 7/8 outputs
+- */
+- u8 line_out_remap_enable;
+- u8 line_out_remap[SCARLETT2_ANALOGUE_MAX];
+-
+- /* additional description for the line out volume controls */
+- const char * const line_out_descrs[SCARLETT2_ANALOGUE_MAX];
+-
+- /* number of sources/destinations of each port type */
+- const int port_count[SCARLETT2_PORT_TYPE_COUNT][SCARLETT2_PORT_DIRNS];
+-
+- /* layout/order of the entries in the set_mux message */
+- struct scarlett2_mux_entry mux_assignment[SCARLETT2_MUX_TABLES]
+- [SCARLETT2_MAX_MUX_ENTRIES];
+-};
+-
+-struct scarlett2_data {
+- struct usb_mixer_interface *mixer;
+- struct mutex usb_mutex; /* prevent sending concurrent USB requests */
+- struct mutex data_mutex; /* lock access to this data */
+- struct delayed_work work;
+- const struct scarlett2_device_info *info;
+- __u8 bInterfaceNumber;
+- __u8 bEndpointAddress;
+- __u16 wMaxPacketSize;
+- __u8 bInterval;
+- int num_mux_srcs;
+- int num_mux_dsts;
+- u16 scarlett2_seq;
+- u8 sync_updated;
+- u8 vol_updated;
+- u8 input_other_updated;
+- u8 monitor_other_updated;
+- u8 mux_updated;
+- u8 speaker_switching_switched;
+- u8 sync;
+- u8 master_vol;
+- u8 vol[SCARLETT2_ANALOGUE_MAX];
+- u8 vol_sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
+- u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
+- u8 level_switch[SCARLETT2_LEVEL_SWITCH_MAX];
+- u8 pad_switch[SCARLETT2_PAD_SWITCH_MAX];
+- u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
+- u8 air_switch[SCARLETT2_AIR_SWITCH_MAX];
+- u8 phantom_switch[SCARLETT2_PHANTOM_SWITCH_MAX];
+- u8 phantom_persistence;
+- u8 direct_monitor_switch;
+- u8 speaker_switching_switch;
+- u8 talkback_switch;
+- u8 talkback_map[SCARLETT2_OUTPUT_MIX_MAX];
+- u8 msd_switch;
+- u8 standalone_switch;
+- struct snd_kcontrol *sync_ctl;
+- struct snd_kcontrol *master_vol_ctl;
+- struct snd_kcontrol *vol_ctls[SCARLETT2_ANALOGUE_MAX];
+- struct snd_kcontrol *sw_hw_ctls[SCARLETT2_ANALOGUE_MAX];
+- struct snd_kcontrol *mute_ctls[SCARLETT2_ANALOGUE_MAX];
+- struct snd_kcontrol *dim_mute_ctls[SCARLETT2_DIM_MUTE_COUNT];
+- struct snd_kcontrol *level_ctls[SCARLETT2_LEVEL_SWITCH_MAX];
+- struct snd_kcontrol *pad_ctls[SCARLETT2_PAD_SWITCH_MAX];
+- struct snd_kcontrol *air_ctls[SCARLETT2_AIR_SWITCH_MAX];
+- struct snd_kcontrol *phantom_ctls[SCARLETT2_PHANTOM_SWITCH_MAX];
+- struct snd_kcontrol *mux_ctls[SCARLETT2_MUX_MAX];
+- struct snd_kcontrol *direct_monitor_ctl;
+- struct snd_kcontrol *speaker_switching_ctl;
+- struct snd_kcontrol *talkback_ctl;
+- u8 mux[SCARLETT2_MUX_MAX];
+- u8 mix[SCARLETT2_INPUT_MIX_MAX * SCARLETT2_OUTPUT_MIX_MAX];
+-};
+-
+-/*** Model-specific data ***/
+-
+-static const struct scarlett2_device_info s6i6_gen2_info = {
+- .usb_id = USB_ID(0x1235, 0x8203),
+-
+- .config_set = SCARLETT2_CONFIG_SET_GEN_2,
+- .level_input_count = 2,
+- .pad_input_count = 2,
+-
+- .line_out_descrs = {
+- "Headphones 1 L",
+- "Headphones 1 R",
+- "Headphones 2 L",
+- "Headphones 2 R",
+- },
+-
+- .port_count = {
+- [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = { 4, 4 },
+- [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+- [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+- [SCARLETT2_PORT_TYPE_PCM] = { 6, 6 },
+- },
+-
+- .mux_assignment = { {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- } },
+-};
+-
+-static const struct scarlett2_device_info s18i8_gen2_info = {
+- .usb_id = USB_ID(0x1235, 0x8204),
+-
+- .config_set = SCARLETT2_CONFIG_SET_GEN_2,
+- .level_input_count = 2,
+- .pad_input_count = 4,
+-
+- .line_out_descrs = {
+- "Monitor L",
+- "Monitor R",
+- "Headphones 1 L",
+- "Headphones 1 R",
+- "Headphones 2 L",
+- "Headphones 2 R",
+- },
+-
+- .port_count = {
+- [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 6 },
+- [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+- [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
+- [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+- [SCARLETT2_PORT_TYPE_PCM] = { 8, 18 },
+- },
+-
+- .mux_assignment = { {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 4 },
+- { 0, 0, 0 },
+- } },
+-};
+-
+-static const struct scarlett2_device_info s18i20_gen2_info = {
+- .usb_id = USB_ID(0x1235, 0x8201),
+-
+- .config_set = SCARLETT2_CONFIG_SET_GEN_2,
+- .line_out_hw_vol = 1,
+-
+- .line_out_descrs = {
+- "Monitor L",
+- "Monitor R",
+- NULL,
+- NULL,
+- NULL,
+- NULL,
+- "Headphones 1 L",
+- "Headphones 1 R",
+- "Headphones 2 L",
+- "Headphones 2 R",
+- },
+-
+- .port_count = {
+- [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 10 },
+- [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+- [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
+- [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+- [SCARLETT2_PORT_TYPE_PCM] = { 20, 18 },
+- },
+-
+- .mux_assignment = { {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ADAT, 0, 4 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 6 },
+- { 0, 0, 0 },
+- } },
+-};
+-
+-static const struct scarlett2_device_info solo_gen3_info = {
+- .usb_id = USB_ID(0x1235, 0x8211),
+-
+- .has_msd_mode = 1,
+- .config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
+- .level_input_count = 1,
+- .level_input_first = 1,
+- .air_input_count = 1,
+- .phantom_count = 1,
+- .inputs_per_phantom = 1,
+- .direct_monitor = 1,
+-};
+-
+-static const struct scarlett2_device_info s2i2_gen3_info = {
+- .usb_id = USB_ID(0x1235, 0x8210),
+-
+- .has_msd_mode = 1,
+- .config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
+- .level_input_count = 2,
+- .air_input_count = 2,
+- .phantom_count = 1,
+- .inputs_per_phantom = 2,
+- .direct_monitor = 2,
+-};
+-
+-static const struct scarlett2_device_info s4i4_gen3_info = {
+- .usb_id = USB_ID(0x1235, 0x8212),
+-
+- .has_msd_mode = 1,
+- .config_set = SCARLETT2_CONFIG_SET_GEN_3,
+- .level_input_count = 2,
+- .pad_input_count = 2,
+- .air_input_count = 2,
+- .phantom_count = 1,
+- .inputs_per_phantom = 2,
+-
+- .line_out_descrs = {
+- "Monitor L",
+- "Monitor R",
+- "Headphones L",
+- "Headphones R",
+- },
+-
+- .port_count = {
+- [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = { 4, 4 },
+- [SCARLETT2_PORT_TYPE_MIX] = { 6, 8 },
+- [SCARLETT2_PORT_TYPE_PCM] = { 4, 6 },
+- },
+-
+- .mux_assignment = { {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 6 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 16 },
+- { 0, 0, 0 },
+- } },
+-};
+-
+-static const struct scarlett2_device_info s8i6_gen3_info = {
+- .usb_id = USB_ID(0x1235, 0x8213),
+-
+- .has_msd_mode = 1,
+- .config_set = SCARLETT2_CONFIG_SET_GEN_3,
+- .level_input_count = 2,
+- .pad_input_count = 2,
+- .air_input_count = 2,
+- .phantom_count = 1,
+- .inputs_per_phantom = 2,
+-
+- .line_out_descrs = {
+- "Headphones 1 L",
+- "Headphones 1 R",
+- "Headphones 2 L",
+- "Headphones 2 R",
+- },
+-
+- .port_count = {
+- [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = { 6, 4 },
+- [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+- [SCARLETT2_PORT_TYPE_MIX] = { 8, 8 },
+- [SCARLETT2_PORT_TYPE_PCM] = { 6, 10 },
+- },
+-
+- .mux_assignment = { {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 8 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 18 },
+- { 0, 0, 0 },
+- } },
+-};
+-
+-static const struct scarlett2_device_info s18i8_gen3_info = {
+- .usb_id = USB_ID(0x1235, 0x8214),
+-
+- .has_msd_mode = 1,
+- .config_set = SCARLETT2_CONFIG_SET_GEN_3,
+- .line_out_hw_vol = 1,
+- .has_speaker_switching = 1,
+- .level_input_count = 2,
+- .pad_input_count = 4,
+- .air_input_count = 4,
+- .phantom_count = 2,
+- .inputs_per_phantom = 2,
+-
+- .line_out_remap_enable = 1,
+- .line_out_remap = { 0, 1, 6, 7, 2, 3, 4, 5 },
+-
+- .line_out_descrs = {
+- "Monitor L",
+- "Monitor R",
+- "Alt Monitor L",
+- "Alt Monitor R",
+- "Headphones 1 L",
+- "Headphones 1 R",
+- "Headphones 2 L",
+- "Headphones 2 R",
+- },
+-
+- .port_count = {
+- [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 8 },
+- [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+- [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
+- [SCARLETT2_PORT_TYPE_MIX] = { 10, 20 },
+- [SCARLETT2_PORT_TYPE_PCM] = { 8, 20 },
+- },
+-
+- .mux_assignment = { {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+- { SCARLETT2_PORT_TYPE_PCM, 12, 8 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_PCM, 10, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+- { SCARLETT2_PORT_TYPE_PCM, 12, 4 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_PCM, 10, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 6, 2 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 2, 4 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 20 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
+- { 0, 0, 0 },
+- } },
+-};
+-
+-static const struct scarlett2_device_info s18i20_gen3_info = {
+- .usb_id = USB_ID(0x1235, 0x8215),
+-
+- .has_msd_mode = 1,
+- .config_set = SCARLETT2_CONFIG_SET_GEN_3,
+- .line_out_hw_vol = 1,
+- .has_speaker_switching = 1,
+- .has_talkback = 1,
+- .level_input_count = 2,
+- .pad_input_count = 8,
+- .air_input_count = 8,
+- .phantom_count = 2,
+- .inputs_per_phantom = 4,
+-
+- .line_out_descrs = {
+- "Monitor 1 L",
+- "Monitor 1 R",
+- "Monitor 2 L",
+- "Monitor 2 R",
+- NULL,
+- NULL,
+- "Headphones 1 L",
+- "Headphones 1 R",
+- "Headphones 2 L",
+- "Headphones 2 R",
+- },
+-
+- .port_count = {
+- [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = { 9, 10 },
+- [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+- [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
+- [SCARLETT2_PORT_TYPE_MIX] = { 12, 25 },
+- [SCARLETT2_PORT_TYPE_PCM] = { 20, 20 },
+- },
+-
+- .mux_assignment = { {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+- { SCARLETT2_PORT_TYPE_PCM, 10, 10 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
+- { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 25 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 12 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+- { SCARLETT2_PORT_TYPE_PCM, 10, 8 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
+- { SCARLETT2_PORT_TYPE_PCM, 8, 2 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 25 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 10 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 10 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 24 },
+- { 0, 0, 0 },
+- } },
+-};
+-
+-static const struct scarlett2_device_info clarett_8pre_info = {
+- .usb_id = USB_ID(0x1235, 0x820c),
+-
+- .config_set = SCARLETT2_CONFIG_SET_CLARETT,
+- .line_out_hw_vol = 1,
+- .level_input_count = 2,
+- .air_input_count = 8,
+-
+- .line_out_descrs = {
+- "Monitor L",
+- "Monitor R",
+- NULL,
+- NULL,
+- NULL,
+- NULL,
+- "Headphones 1 L",
+- "Headphones 1 R",
+- "Headphones 2 L",
+- "Headphones 2 R",
+- },
+-
+- .port_count = {
+- [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+- [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 10 },
+- [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+- [SCARLETT2_PORT_TYPE_ADAT] = { 8, 8 },
+- [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+- [SCARLETT2_PORT_TYPE_PCM] = { 20, 18 },
+- },
+-
+- .mux_assignment = { {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ADAT, 0, 8 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_ADAT, 0, 4 },
+- { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+- { 0, 0, 0 },
+- }, {
+- { SCARLETT2_PORT_TYPE_PCM, 0, 12 },
+- { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 10 },
+- { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+- { SCARLETT2_PORT_TYPE_NONE, 0, 22 },
+- { 0, 0, 0 },
+- } },
+-};
+-
+-static const struct scarlett2_device_info *scarlett2_devices[] = {
+- /* Supported Gen 2 devices */
+- &s6i6_gen2_info,
+- &s18i8_gen2_info,
+- &s18i20_gen2_info,
+-
+- /* Supported Gen 3 devices */
+- &solo_gen3_info,
+- &s2i2_gen3_info,
+- &s4i4_gen3_info,
+- &s8i6_gen3_info,
+- &s18i8_gen3_info,
+- &s18i20_gen3_info,
+-
+- /* Supported Clarett+ devices */
+- &clarett_8pre_info,
+-
+- /* End of list */
+- NULL
+-};
+-
+-/* get the starting port index number for a given port type/direction */
+-static int scarlett2_get_port_start_num(
+- const int port_count[][SCARLETT2_PORT_DIRNS],
+- int direction, int port_type)
+-{
+- int i, num = 0;
+-
+- for (i = 0; i < port_type; i++)
+- num += port_count[i][direction];
+-
+- return num;
+-}
+-
+-/*** USB Interactions ***/
+-
+-/* Notifications from the interface */
+-#define SCARLETT2_USB_NOTIFY_SYNC 0x00000008
+-#define SCARLETT2_USB_NOTIFY_DIM_MUTE 0x00200000
+-#define SCARLETT2_USB_NOTIFY_MONITOR 0x00400000
+-#define SCARLETT2_USB_NOTIFY_INPUT_OTHER 0x00800000
+-#define SCARLETT2_USB_NOTIFY_MONITOR_OTHER 0x01000000
+-
+-/* Commands for sending/receiving requests/responses */
+-#define SCARLETT2_USB_CMD_INIT 0
+-#define SCARLETT2_USB_CMD_REQ 2
+-#define SCARLETT2_USB_CMD_RESP 3
+-
+-#define SCARLETT2_USB_INIT_1 0x00000000
+-#define SCARLETT2_USB_INIT_2 0x00000002
+-#define SCARLETT2_USB_GET_METER 0x00001001
+-#define SCARLETT2_USB_GET_MIX 0x00002001
+-#define SCARLETT2_USB_SET_MIX 0x00002002
+-#define SCARLETT2_USB_GET_MUX 0x00003001
+-#define SCARLETT2_USB_SET_MUX 0x00003002
+-#define SCARLETT2_USB_GET_SYNC 0x00006004
+-#define SCARLETT2_USB_GET_DATA 0x00800000
+-#define SCARLETT2_USB_SET_DATA 0x00800001
+-#define SCARLETT2_USB_DATA_CMD 0x00800002
+-
+-#define SCARLETT2_USB_CONFIG_SAVE 6
+-
+-#define SCARLETT2_USB_VOLUME_STATUS_OFFSET 0x31
+-#define SCARLETT2_USB_METER_LEVELS_GET_MAGIC 1
+-
+-/* volume status is read together (matches scarlett2_config_items[1]) */
+-struct scarlett2_usb_volume_status {
+- /* dim/mute buttons */
+- u8 dim_mute[SCARLETT2_DIM_MUTE_COUNT];
+-
+- u8 pad1;
+-
+- /* software volume setting */
+- s16 sw_vol[SCARLETT2_ANALOGUE_MAX];
+-
+- /* actual volume of output inc. dim (-18dB) */
+- s16 hw_vol[SCARLETT2_ANALOGUE_MAX];
+-
+- /* internal mute buttons */
+- u8 mute_switch[SCARLETT2_ANALOGUE_MAX];
+-
+- /* sw (0) or hw (1) controlled */
+- u8 sw_hw_switch[SCARLETT2_ANALOGUE_MAX];
+-
+- u8 pad3[6];
+-
+- /* front panel volume knob */
+- s16 master_vol;
+-} __packed;
+-
+-/* Configuration parameters that can be read and written */
+-enum {
+- SCARLETT2_CONFIG_DIM_MUTE = 0,
+- SCARLETT2_CONFIG_LINE_OUT_VOLUME = 1,
+- SCARLETT2_CONFIG_MUTE_SWITCH = 2,
+- SCARLETT2_CONFIG_SW_HW_SWITCH = 3,
+- SCARLETT2_CONFIG_LEVEL_SWITCH = 4,
+- SCARLETT2_CONFIG_PAD_SWITCH = 5,
+- SCARLETT2_CONFIG_MSD_SWITCH = 6,
+- SCARLETT2_CONFIG_AIR_SWITCH = 7,
+- SCARLETT2_CONFIG_STANDALONE_SWITCH = 8,
+- SCARLETT2_CONFIG_PHANTOM_SWITCH = 9,
+- SCARLETT2_CONFIG_PHANTOM_PERSISTENCE = 10,
+- SCARLETT2_CONFIG_DIRECT_MONITOR = 11,
+- SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH = 12,
+- SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE = 13,
+- SCARLETT2_CONFIG_TALKBACK_MAP = 14,
+- SCARLETT2_CONFIG_COUNT = 15
+-};
+-
+-/* Location, size, and activation command number for the configuration
+- * parameters. Size is in bits and may be 1, 8, or 16.
+- */
+-struct scarlett2_config {
+- u8 offset;
+- u8 size;
+- u8 activate;
+-};
+-
+-static const struct scarlett2_config
+- scarlett2_config_items[SCARLETT2_CONFIG_SET_COUNT]
+- [SCARLETT2_CONFIG_COUNT] =
+-
+-/* Devices without a mixer (Gen 3 Solo and 2i2) */
+-{ {
+- [SCARLETT2_CONFIG_MSD_SWITCH] = {
+- .offset = 0x04, .size = 8, .activate = 6 },
+-
+- [SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
+- .offset = 0x05, .size = 8, .activate = 6 },
+-
+- [SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
+- .offset = 0x06, .size = 8, .activate = 3 },
+-
+- [SCARLETT2_CONFIG_DIRECT_MONITOR] = {
+- .offset = 0x07, .size = 8, .activate = 4 },
+-
+- [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+- .offset = 0x08, .size = 1, .activate = 7 },
+-
+- [SCARLETT2_CONFIG_AIR_SWITCH] = {
+- .offset = 0x09, .size = 1, .activate = 8 },
+-
+-/* Gen 2 devices: 6i6, 18i8, 18i20 */
+-}, {
+- [SCARLETT2_CONFIG_DIM_MUTE] = {
+- .offset = 0x31, .size = 8, .activate = 2 },
+-
+- [SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
+- .offset = 0x34, .size = 16, .activate = 1 },
+-
+- [SCARLETT2_CONFIG_MUTE_SWITCH] = {
+- .offset = 0x5c, .size = 8, .activate = 1 },
+-
+- [SCARLETT2_CONFIG_SW_HW_SWITCH] = {
+- .offset = 0x66, .size = 8, .activate = 3 },
+-
+- [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+- .offset = 0x7c, .size = 8, .activate = 7 },
+-
+- [SCARLETT2_CONFIG_PAD_SWITCH] = {
+- .offset = 0x84, .size = 8, .activate = 8 },
+-
+- [SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
+- .offset = 0x8d, .size = 8, .activate = 6 },
+-
+-/* Gen 3 devices: 4i4, 8i6, 18i8, 18i20 */
+-}, {
+- [SCARLETT2_CONFIG_DIM_MUTE] = {
+- .offset = 0x31, .size = 8, .activate = 2 },
+-
+- [SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
+- .offset = 0x34, .size = 16, .activate = 1 },
+-
+- [SCARLETT2_CONFIG_MUTE_SWITCH] = {
+- .offset = 0x5c, .size = 8, .activate = 1 },
+-
+- [SCARLETT2_CONFIG_SW_HW_SWITCH] = {
+- .offset = 0x66, .size = 8, .activate = 3 },
+-
+- [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+- .offset = 0x7c, .size = 8, .activate = 7 },
+-
+- [SCARLETT2_CONFIG_PAD_SWITCH] = {
+- .offset = 0x84, .size = 8, .activate = 8 },
+-
+- [SCARLETT2_CONFIG_AIR_SWITCH] = {
+- .offset = 0x8c, .size = 8, .activate = 8 },
+-
+- [SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
+- .offset = 0x95, .size = 8, .activate = 6 },
+-
+- [SCARLETT2_CONFIG_PHANTOM_SWITCH] = {
+- .offset = 0x9c, .size = 1, .activate = 8 },
+-
+- [SCARLETT2_CONFIG_MSD_SWITCH] = {
+- .offset = 0x9d, .size = 8, .activate = 6 },
+-
+- [SCARLETT2_CONFIG_PHANTOM_PERSISTENCE] = {
+- .offset = 0x9e, .size = 8, .activate = 6 },
+-
+- [SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH] = {
+- .offset = 0x9f, .size = 1, .activate = 10 },
+-
+- [SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE] = {
+- .offset = 0xa0, .size = 1, .activate = 10 },
+-
+- [SCARLETT2_CONFIG_TALKBACK_MAP] = {
+- .offset = 0xb0, .size = 16, .activate = 10 },
+-
+-/* Clarett+ 8Pre */
+-}, {
+- [SCARLETT2_CONFIG_DIM_MUTE] = {
+- .offset = 0x31, .size = 8, .activate = 2 },
+-
+- [SCARLETT2_CONFIG_LINE_OUT_VOLUME] = {
+- .offset = 0x34, .size = 16, .activate = 1 },
+-
+- [SCARLETT2_CONFIG_MUTE_SWITCH] = {
+- .offset = 0x5c, .size = 8, .activate = 1 },
+-
+- [SCARLETT2_CONFIG_SW_HW_SWITCH] = {
+- .offset = 0x66, .size = 8, .activate = 3 },
+-
+- [SCARLETT2_CONFIG_LEVEL_SWITCH] = {
+- .offset = 0x7c, .size = 8, .activate = 7 },
+-
+- [SCARLETT2_CONFIG_AIR_SWITCH] = {
+- .offset = 0x95, .size = 8, .activate = 8 },
+-
+- [SCARLETT2_CONFIG_STANDALONE_SWITCH] = {
+- .offset = 0x8d, .size = 8, .activate = 6 },
+-} };
+-
+-/* proprietary request/response format */
+-struct scarlett2_usb_packet {
+- __le32 cmd;
+- __le16 size;
+- __le16 seq;
+- __le32 error;
+- __le32 pad;
+- u8 data[];
+-};
+-
+-static void scarlett2_fill_request_header(struct scarlett2_data *private,
+- struct scarlett2_usb_packet *req,
+- u32 cmd, u16 req_size)
+-{
+- /* sequence must go up by 1 for each request */
+- u16 seq = private->scarlett2_seq++;
+-
+- req->cmd = cpu_to_le32(cmd);
+- req->size = cpu_to_le16(req_size);
+- req->seq = cpu_to_le16(seq);
+- req->error = 0;
+- req->pad = 0;
+-}
+-
+-static int scarlett2_usb_tx(struct usb_device *dev, int interface,
+- void *buf, u16 size)
+-{
+- return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
+- SCARLETT2_USB_CMD_REQ,
+- USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+- 0, interface, buf, size);
+-}
+-
+-static int scarlett2_usb_rx(struct usb_device *dev, int interface,
+- u32 usb_req, void *buf, u16 size)
+-{
+- return snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+- usb_req,
+- USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+- 0, interface, buf, size);
+-}
+-
+-/* Send a proprietary format request to the Scarlett interface */
+-static int scarlett2_usb(
+- struct usb_mixer_interface *mixer, u32 cmd,
+- void *req_data, u16 req_size, void *resp_data, u16 resp_size)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- struct usb_device *dev = mixer->chip->dev;
+- struct scarlett2_usb_packet *req, *resp = NULL;
+- size_t req_buf_size = struct_size(req, data, req_size);
+- size_t resp_buf_size = struct_size(resp, data, resp_size);
+- int err;
+-
+- req = kmalloc(req_buf_size, GFP_KERNEL);
+- if (!req) {
+- err = -ENOMEM;
+- goto error;
+- }
+-
+- resp = kmalloc(resp_buf_size, GFP_KERNEL);
+- if (!resp) {
+- err = -ENOMEM;
+- goto error;
+- }
+-
+- mutex_lock(&private->usb_mutex);
+-
+- /* build request message and send it */
+-
+- scarlett2_fill_request_header(private, req, cmd, req_size);
+-
+- if (req_size)
+- memcpy(req->data, req_data, req_size);
+-
+- err = scarlett2_usb_tx(dev, private->bInterfaceNumber,
+- req, req_buf_size);
+-
+- if (err != req_buf_size) {
+- usb_audio_err(
+- mixer->chip,
+- "Scarlett Gen 2/3 USB request result cmd %x was %d\n",
+- cmd, err);
+- err = -EINVAL;
+- goto unlock;
+- }
+-
+- /* send a second message to get the response */
+-
+- err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
+- SCARLETT2_USB_CMD_RESP,
+- resp, resp_buf_size);
+-
+- /* validate the response */
+-
+- if (err != resp_buf_size) {
+- usb_audio_err(
+- mixer->chip,
+- "Scarlett Gen 2/3 USB response result cmd %x was %d "
+- "expected %zu\n",
+- cmd, err, resp_buf_size);
+- err = -EINVAL;
+- goto unlock;
+- }
+-
+- /* cmd/seq/size should match except when initialising
+- * seq sent = 1, response = 0
+- */
+- if (resp->cmd != req->cmd ||
+- (resp->seq != req->seq &&
+- (le16_to_cpu(req->seq) != 1 || resp->seq != 0)) ||
+- resp_size != le16_to_cpu(resp->size) ||
+- resp->error ||
+- resp->pad) {
+- usb_audio_err(
+- mixer->chip,
+- "Scarlett Gen 2/3 USB invalid response; "
+- "cmd tx/rx %d/%d seq %d/%d size %d/%d "
+- "error %d pad %d\n",
+- le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
+- le16_to_cpu(req->seq), le16_to_cpu(resp->seq),
+- resp_size, le16_to_cpu(resp->size),
+- le32_to_cpu(resp->error),
+- le32_to_cpu(resp->pad));
+- err = -EINVAL;
+- goto unlock;
+- }
+-
+- if (resp_data && resp_size > 0)
+- memcpy(resp_data, resp->data, resp_size);
+-
+-unlock:
+- mutex_unlock(&private->usb_mutex);
+-error:
+- kfree(req);
+- kfree(resp);
+- return err;
+-}
+-
+-/* Send a USB message to get data; result placed in *buf */
+-static int scarlett2_usb_get(
+- struct usb_mixer_interface *mixer,
+- int offset, void *buf, int size)
+-{
+- struct {
+- __le32 offset;
+- __le32 size;
+- } __packed req;
+-
+- req.offset = cpu_to_le32(offset);
+- req.size = cpu_to_le32(size);
+- return scarlett2_usb(mixer, SCARLETT2_USB_GET_DATA,
+- &req, sizeof(req), buf, size);
+-}
+-
+-/* Send a USB message to get configuration parameters; result placed in *buf */
+-static int scarlett2_usb_get_config(
+- struct usb_mixer_interface *mixer,
+- int config_item_num, int count, void *buf)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const struct scarlett2_config *config_item =
+- &scarlett2_config_items[info->config_set][config_item_num];
+- int size, err, i;
+- u8 *buf_8;
+- u8 value;
+-
+- /* For byte-sized parameters, retrieve directly into buf */
+- if (config_item->size >= 8) {
+- size = config_item->size / 8 * count;
+- err = scarlett2_usb_get(mixer, config_item->offset, buf, size);
+- if (err < 0)
+- return err;
+- if (size == 2) {
+- u16 *buf_16 = buf;
+-
+- for (i = 0; i < count; i++, buf_16++)
+- *buf_16 = le16_to_cpu(*(__le16 *)buf_16);
+- }
+- return 0;
+- }
+-
+- /* For bit-sized parameters, retrieve into value */
+- err = scarlett2_usb_get(mixer, config_item->offset, &value, 1);
+- if (err < 0)
+- return err;
+-
+- /* then unpack from value into buf[] */
+- buf_8 = buf;
+- for (i = 0; i < 8 && i < count; i++, value >>= 1)
+- *buf_8++ = value & 1;
+-
+- return 0;
+-}
+-
+-/* Send SCARLETT2_USB_DATA_CMD SCARLETT2_USB_CONFIG_SAVE */
+-static void scarlett2_config_save(struct usb_mixer_interface *mixer)
+-{
+- __le32 req = cpu_to_le32(SCARLETT2_USB_CONFIG_SAVE);
+-
+- scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
+- &req, sizeof(u32),
+- NULL, 0);
+-}
+-
+-/* Delayed work to save config */
+-static void scarlett2_config_save_work(struct work_struct *work)
+-{
+- struct scarlett2_data *private =
+- container_of(work, struct scarlett2_data, work.work);
+-
+- scarlett2_config_save(private->mixer);
+-}
+-
+-/* Send a USB message to set a SCARLETT2_CONFIG_* parameter */
+-static int scarlett2_usb_set_config(
+- struct usb_mixer_interface *mixer,
+- int config_item_num, int index, int value)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const struct scarlett2_config *config_item =
+- &scarlett2_config_items[info->config_set][config_item_num];
+- struct {
+- __le32 offset;
+- __le32 bytes;
+- __le32 value;
+- } __packed req;
+- __le32 req2;
+- int offset, size;
+- int err;
+-
+- /* Cancel any pending NVRAM save */
+- cancel_delayed_work_sync(&private->work);
+-
+- /* Convert config_item->size in bits to size in bytes and
+- * calculate offset
+- */
+- if (config_item->size >= 8) {
+- size = config_item->size / 8;
+- offset = config_item->offset + index * size;
+-
+- /* If updating a bit, retrieve the old value, set/clear the
+- * bit as needed, and update value
+- */
+- } else {
+- u8 tmp;
+-
+- size = 1;
+- offset = config_item->offset;
+-
+- scarlett2_usb_get(mixer, offset, &tmp, 1);
+- if (value)
+- tmp |= (1 << index);
+- else
+- tmp &= ~(1 << index);
+-
+- value = tmp;
+- }
+-
+- /* Send the configuration parameter data */
+- req.offset = cpu_to_le32(offset);
+- req.bytes = cpu_to_le32(size);
+- req.value = cpu_to_le32(value);
+- err = scarlett2_usb(mixer, SCARLETT2_USB_SET_DATA,
+- &req, sizeof(u32) * 2 + size,
+- NULL, 0);
+- if (err < 0)
+- return err;
+-
+- /* Activate the change */
+- req2 = cpu_to_le32(config_item->activate);
+- err = scarlett2_usb(mixer, SCARLETT2_USB_DATA_CMD,
+- &req2, sizeof(req2), NULL, 0);
+- if (err < 0)
+- return err;
+-
+- /* Schedule the change to be written to NVRAM */
+- if (config_item->activate != SCARLETT2_USB_CONFIG_SAVE)
+- schedule_delayed_work(&private->work, msecs_to_jiffies(2000));
+-
+- return 0;
+-}
+-
+-/* Send a USB message to get sync status; result placed in *sync */
+-static int scarlett2_usb_get_sync_status(
+- struct usb_mixer_interface *mixer,
+- u8 *sync)
+-{
+- __le32 data;
+- int err;
+-
+- err = scarlett2_usb(mixer, SCARLETT2_USB_GET_SYNC,
+- NULL, 0, &data, sizeof(data));
+- if (err < 0)
+- return err;
+-
+- *sync = !!data;
+- return 0;
+-}
+-
+-/* Send a USB message to get volume status; result placed in *buf */
+-static int scarlett2_usb_get_volume_status(
+- struct usb_mixer_interface *mixer,
+- struct scarlett2_usb_volume_status *buf)
+-{
+- return scarlett2_usb_get(mixer, SCARLETT2_USB_VOLUME_STATUS_OFFSET,
+- buf, sizeof(*buf));
+-}
+-
+-/* Send a USB message to get the volumes for all inputs of one mix
+- * and put the values into private->mix[]
+- */
+-static int scarlett2_usb_get_mix(struct usb_mixer_interface *mixer,
+- int mix_num)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+-
+- int num_mixer_in =
+- info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+- int err, i, j, k;
+-
+- struct {
+- __le16 mix_num;
+- __le16 count;
+- } __packed req;
+-
+- __le16 data[SCARLETT2_INPUT_MIX_MAX];
+-
+- req.mix_num = cpu_to_le16(mix_num);
+- req.count = cpu_to_le16(num_mixer_in);
+-
+- err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MIX,
+- &req, sizeof(req),
+- data, num_mixer_in * sizeof(u16));
+- if (err < 0)
+- return err;
+-
+- for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++) {
+- u16 mixer_value = le16_to_cpu(data[i]);
+-
+- for (k = 0; k < SCARLETT2_MIXER_VALUE_COUNT; k++)
+- if (scarlett2_mixer_values[k] >= mixer_value)
+- break;
+- if (k == SCARLETT2_MIXER_VALUE_COUNT)
+- k = SCARLETT2_MIXER_MAX_VALUE;
+- private->mix[j] = k;
+- }
+-
+- return 0;
+-}
+-
+-/* Send a USB message to set the volumes for all inputs of one mix
+- * (values obtained from private->mix[])
+- */
+-static int scarlett2_usb_set_mix(struct usb_mixer_interface *mixer,
+- int mix_num)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+-
+- struct {
+- __le16 mix_num;
+- __le16 data[SCARLETT2_INPUT_MIX_MAX];
+- } __packed req;
+-
+- int i, j;
+- int num_mixer_in =
+- info->port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+-
+- req.mix_num = cpu_to_le16(mix_num);
+-
+- for (i = 0, j = mix_num * num_mixer_in; i < num_mixer_in; i++, j++)
+- req.data[i] = cpu_to_le16(
+- scarlett2_mixer_values[private->mix[j]]
+- );
+-
+- return scarlett2_usb(mixer, SCARLETT2_USB_SET_MIX,
+- &req, (num_mixer_in + 1) * sizeof(u16),
+- NULL, 0);
+-}
+-
+-/* Convert a port number index (per info->port_count) to a hardware ID */
+-static u32 scarlett2_mux_src_num_to_id(
+- const int port_count[][SCARLETT2_PORT_DIRNS], int num)
+-{
+- int port_type;
+-
+- for (port_type = 0;
+- port_type < SCARLETT2_PORT_TYPE_COUNT;
+- port_type++) {
+- if (num < port_count[port_type][SCARLETT2_PORT_IN])
+- return scarlett2_ports[port_type].id | num;
+- num -= port_count[port_type][SCARLETT2_PORT_IN];
+- }
+-
+- /* Oops */
+- return 0;
+-}
+-
+-/* Convert a hardware ID to a port number index */
+-static u32 scarlett2_mux_id_to_num(
+- const int port_count[][SCARLETT2_PORT_DIRNS], int direction, u32 id)
+-{
+- int port_type;
+- int port_num = 0;
+-
+- for (port_type = 0;
+- port_type < SCARLETT2_PORT_TYPE_COUNT;
+- port_type++) {
+- int base = scarlett2_ports[port_type].id;
+- int count = port_count[port_type][direction];
+-
+- if (id >= base && id < base + count)
+- return port_num + id - base;
+- port_num += count;
+- }
+-
+- /* Oops */
+- return -1;
+-}
+-
+-/* Convert one mux entry from the interface and load into private->mux[] */
+-static void scarlett2_usb_populate_mux(struct scarlett2_data *private,
+- u32 mux_entry)
+-{
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+-
+- int dst_idx, src_idx;
+-
+- dst_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_OUT,
+- mux_entry & 0xFFF);
+- if (dst_idx < 0)
+- return;
+-
+- if (dst_idx >= private->num_mux_dsts) {
+- usb_audio_err(private->mixer->chip,
+- "BUG: scarlett2_mux_id_to_num(%06x, OUT): %d >= %d",
+- mux_entry, dst_idx, private->num_mux_dsts);
+- return;
+- }
+-
+- src_idx = scarlett2_mux_id_to_num(port_count, SCARLETT2_PORT_IN,
+- mux_entry >> 12);
+- if (src_idx < 0)
+- return;
+-
+- if (src_idx >= private->num_mux_srcs) {
+- usb_audio_err(private->mixer->chip,
+- "BUG: scarlett2_mux_id_to_num(%06x, IN): %d >= %d",
+- mux_entry, src_idx, private->num_mux_srcs);
+- return;
+- }
+-
+- private->mux[dst_idx] = src_idx;
+-}
+-
+-/* Send USB message to get mux inputs and then populate private->mux[] */
+-static int scarlett2_usb_get_mux(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- int count = private->num_mux_dsts;
+- int err, i;
+-
+- struct {
+- __le16 num;
+- __le16 count;
+- } __packed req;
+-
+- __le32 data[SCARLETT2_MUX_MAX];
+-
+- private->mux_updated = 0;
+-
+- req.num = 0;
+- req.count = cpu_to_le16(count);
+-
+- err = scarlett2_usb(mixer, SCARLETT2_USB_GET_MUX,
+- &req, sizeof(req),
+- data, count * sizeof(u32));
+- if (err < 0)
+- return err;
+-
+- for (i = 0; i < count; i++)
+- scarlett2_usb_populate_mux(private, le32_to_cpu(data[i]));
+-
+- return 0;
+-}
+-
+-/* Send USB messages to set mux inputs */
+-static int scarlett2_usb_set_mux(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int table;
+-
+- struct {
+- __le16 pad;
+- __le16 num;
+- __le32 data[SCARLETT2_MUX_MAX];
+- } __packed req;
+-
+- req.pad = 0;
+-
+- /* set mux settings for each rate */
+- for (table = 0; table < SCARLETT2_MUX_TABLES; table++) {
+- const struct scarlett2_mux_entry *entry;
+-
+- /* i counts over the output array */
+- int i = 0, err;
+-
+- req.num = cpu_to_le16(table);
+-
+- /* loop through each entry */
+- for (entry = info->mux_assignment[table];
+- entry->count;
+- entry++) {
+- int j;
+- int port_type = entry->port_type;
+- int port_idx = entry->start;
+- int mux_idx = scarlett2_get_port_start_num(port_count,
+- SCARLETT2_PORT_OUT, port_type) + port_idx;
+- int dst_id = scarlett2_ports[port_type].id + port_idx;
+-
+- /* Empty slots */
+- if (!dst_id) {
+- for (j = 0; j < entry->count; j++)
+- req.data[i++] = 0;
+- continue;
+- }
+-
+- /* Non-empty mux slots use the lower 12 bits
+- * for the destination and next 12 bits for
+- * the source
+- */
+- for (j = 0; j < entry->count; j++) {
+- int src_id = scarlett2_mux_src_num_to_id(
+- port_count, private->mux[mux_idx++]);
+- req.data[i++] = cpu_to_le32(dst_id |
+- src_id << 12);
+- dst_id++;
+- }
+- }
+-
+- err = scarlett2_usb(mixer, SCARLETT2_USB_SET_MUX,
+- &req, (i + 1) * sizeof(u32),
+- NULL, 0);
+- if (err < 0)
+- return err;
+- }
+-
+- return 0;
+-}
+-
+-/* Send USB message to get meter levels */
+-static int scarlett2_usb_get_meter_levels(struct usb_mixer_interface *mixer,
+- u16 num_meters, u16 *levels)
+-{
+- struct {
+- __le16 pad;
+- __le16 num_meters;
+- __le32 magic;
+- } __packed req;
+- u32 resp[SCARLETT2_MAX_METERS];
+- int i, err;
+-
+- req.pad = 0;
+- req.num_meters = cpu_to_le16(num_meters);
+- req.magic = cpu_to_le32(SCARLETT2_USB_METER_LEVELS_GET_MAGIC);
+- err = scarlett2_usb(mixer, SCARLETT2_USB_GET_METER,
+- &req, sizeof(req), resp, num_meters * sizeof(u32));
+- if (err < 0)
+- return err;
+-
+- /* copy, convert to u16 */
+- for (i = 0; i < num_meters; i++)
+- levels[i] = resp[i];
+-
+- return 0;
+-}
+-
+-/*** Control Functions ***/
+-
+-/* helper function to create a new control */
+-static int scarlett2_add_new_ctl(struct usb_mixer_interface *mixer,
+- const struct snd_kcontrol_new *ncontrol,
+- int index, int channels, const char *name,
+- struct snd_kcontrol **kctl_return)
+-{
+- struct snd_kcontrol *kctl;
+- struct usb_mixer_elem_info *elem;
+- int err;
+-
+- elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+- if (!elem)
+- return -ENOMEM;
+-
+- /* We set USB_MIXER_BESPOKEN type, so that the core USB mixer code
+- * ignores them for resume and other operations.
+- * Also, the head.id field is set to 0, as we don't use this field.
+- */
+- elem->head.mixer = mixer;
+- elem->control = index;
+- elem->head.id = 0;
+- elem->channels = channels;
+- elem->val_type = USB_MIXER_BESPOKEN;
+-
+- kctl = snd_ctl_new1(ncontrol, elem);
+- if (!kctl) {
+- kfree(elem);
+- return -ENOMEM;
+- }
+- kctl->private_free = snd_usb_mixer_elem_free;
+-
+- strscpy(kctl->id.name, name, sizeof(kctl->id.name));
+-
+- err = snd_usb_mixer_add_control(&elem->head, kctl);
+- if (err < 0)
+- return err;
+-
+- if (kctl_return)
+- *kctl_return = kctl;
+-
+- return 0;
+-}
+-
+-/*** Sync Control ***/
+-
+-/* Update sync control after receiving notification that the status
+- * has changed
+- */
+-static int scarlett2_update_sync(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+-
+- private->sync_updated = 0;
+- return scarlett2_usb_get_sync_status(mixer, &private->sync);
+-}
+-
+-static int scarlett2_sync_ctl_info(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_info *uinfo)
+-{
+- static const char *texts[2] = {
+- "Unlocked", "Locked"
+- };
+- return snd_ctl_enum_info(uinfo, 1, 2, texts);
+-}
+-
+-static int scarlett2_sync_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->sync_updated)
+- scarlett2_update_sync(mixer);
+- ucontrol->value.enumerated.item[0] = private->sync;
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_sync_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .access = SNDRV_CTL_ELEM_ACCESS_READ,
+- .name = "",
+- .info = scarlett2_sync_ctl_info,
+- .get = scarlett2_sync_ctl_get
+-};
+-
+-static int scarlett2_add_sync_ctl(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+-
+- /* devices without a mixer also don't support reporting sync status */
+- if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
+- return 0;
+-
+- return scarlett2_add_new_ctl(mixer, &scarlett2_sync_ctl,
+- 0, 1, "Sync Status", &private->sync_ctl);
+-}
+-
+-/*** Analogue Line Out Volume Controls ***/
+-
+-/* Update hardware volume controls after receiving notification that
+- * they have changed
+- */
+-static int scarlett2_update_volumes(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- struct scarlett2_usb_volume_status volume_status;
+- int num_line_out =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int err, i;
+- int mute;
+-
+- private->vol_updated = 0;
+-
+- err = scarlett2_usb_get_volume_status(mixer, &volume_status);
+- if (err < 0)
+- return err;
+-
+- private->master_vol = clamp(
+- volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
+- 0, SCARLETT2_VOLUME_BIAS);
+-
+- if (info->line_out_hw_vol)
+- for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+- private->dim_mute[i] = !!volume_status.dim_mute[i];
+-
+- mute = private->dim_mute[SCARLETT2_BUTTON_MUTE];
+-
+- for (i = 0; i < num_line_out; i++)
+- if (private->vol_sw_hw_switch[i]) {
+- private->vol[i] = private->master_vol;
+- private->mute_switch[i] = mute;
+- }
+-
+- return 0;
+-}
+-
+-static int scarlett2_volume_ctl_info(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_info *uinfo)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+-
+- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+- uinfo->count = elem->channels;
+- uinfo->value.integer.min = 0;
+- uinfo->value.integer.max = SCARLETT2_VOLUME_BIAS;
+- uinfo->value.integer.step = 1;
+- return 0;
+-}
+-
+-static int scarlett2_master_volume_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->vol_updated)
+- scarlett2_update_volumes(mixer);
+- mutex_unlock(&private->data_mutex);
+-
+- ucontrol->value.integer.value[0] = private->master_vol;
+- return 0;
+-}
+-
+-static int line_out_remap(struct scarlett2_data *private, int index)
+-{
+- const struct scarlett2_device_info *info = private->info;
+-
+- if (!info->line_out_remap_enable)
+- return index;
+- return info->line_out_remap[index];
+-}
+-
+-static int scarlett2_volume_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- int index = line_out_remap(private, elem->control);
+-
+- mutex_lock(&private->data_mutex);
+- if (private->vol_updated)
+- scarlett2_update_volumes(mixer);
+- mutex_unlock(&private->data_mutex);
+-
+- ucontrol->value.integer.value[0] = private->vol[index];
+- return 0;
+-}
+-
+-static int scarlett2_volume_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- int index = line_out_remap(private, elem->control);
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->vol[index];
+- val = ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->vol[index] = val;
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
+- index, val - SCARLETT2_VOLUME_BIAS);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const DECLARE_TLV_DB_MINMAX(
+- db_scale_scarlett2_gain, -SCARLETT2_VOLUME_BIAS * 100, 0
+-);
+-
+-static const struct snd_kcontrol_new scarlett2_master_volume_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .access = SNDRV_CTL_ELEM_ACCESS_READ |
+- SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+- .name = "",
+- .info = scarlett2_volume_ctl_info,
+- .get = scarlett2_master_volume_ctl_get,
+- .private_value = 0, /* max value */
+- .tlv = { .p = db_scale_scarlett2_gain }
+-};
+-
+-static const struct snd_kcontrol_new scarlett2_line_out_volume_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+- SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+- .name = "",
+- .info = scarlett2_volume_ctl_info,
+- .get = scarlett2_volume_ctl_get,
+- .put = scarlett2_volume_ctl_put,
+- .private_value = 0, /* max value */
+- .tlv = { .p = db_scale_scarlett2_gain }
+-};
+-
+-/*** Mute Switch Controls ***/
+-
+-static int scarlett2_mute_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- int index = line_out_remap(private, elem->control);
+-
+- mutex_lock(&private->data_mutex);
+- if (private->vol_updated)
+- scarlett2_update_volumes(mixer);
+- mutex_unlock(&private->data_mutex);
+-
+- ucontrol->value.integer.value[0] = private->mute_switch[index];
+- return 0;
+-}
+-
+-static int scarlett2_mute_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- int index = line_out_remap(private, elem->control);
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->mute_switch[index];
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->mute_switch[index] = val;
+-
+- /* Send mute change to the device */
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
+- index, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_mute_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_mute_ctl_get,
+- .put = scarlett2_mute_ctl_put,
+-};
+-
+-/*** HW/SW Volume Switch Controls ***/
+-
+-static void scarlett2_sw_hw_ctl_ro(struct scarlett2_data *private, int index)
+-{
+- private->sw_hw_ctls[index]->vd[0].access &=
+- ~SNDRV_CTL_ELEM_ACCESS_WRITE;
+-}
+-
+-static void scarlett2_sw_hw_ctl_rw(struct scarlett2_data *private, int index)
+-{
+- private->sw_hw_ctls[index]->vd[0].access |=
+- SNDRV_CTL_ELEM_ACCESS_WRITE;
+-}
+-
+-static int scarlett2_sw_hw_enum_ctl_info(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_info *uinfo)
+-{
+- static const char *const values[2] = {
+- "SW", "HW"
+- };
+-
+- return snd_ctl_enum_info(uinfo, 1, 2, values);
+-}
+-
+-static int scarlett2_sw_hw_enum_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct scarlett2_data *private = elem->head.mixer->private_data;
+- int index = line_out_remap(private, elem->control);
+-
+- ucontrol->value.enumerated.item[0] = private->vol_sw_hw_switch[index];
+- return 0;
+-}
+-
+-static void scarlett2_vol_ctl_set_writable(struct usb_mixer_interface *mixer,
+- int index, int value)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- struct snd_card *card = mixer->chip->card;
+-
+- /* Set/Clear write bits */
+- if (value) {
+- private->vol_ctls[index]->vd[0].access |=
+- SNDRV_CTL_ELEM_ACCESS_WRITE;
+- private->mute_ctls[index]->vd[0].access |=
+- SNDRV_CTL_ELEM_ACCESS_WRITE;
+- } else {
+- private->vol_ctls[index]->vd[0].access &=
+- ~SNDRV_CTL_ELEM_ACCESS_WRITE;
+- private->mute_ctls[index]->vd[0].access &=
+- ~SNDRV_CTL_ELEM_ACCESS_WRITE;
+- }
+-
+- /* Notify of write bit and possible value change */
+- snd_ctl_notify(card,
+- SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+- &private->vol_ctls[index]->id);
+- snd_ctl_notify(card,
+- SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+- &private->mute_ctls[index]->id);
+-}
+-
+-static int scarlett2_sw_hw_change(struct usb_mixer_interface *mixer,
+- int ctl_index, int val)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- int index = line_out_remap(private, ctl_index);
+- int err;
+-
+- private->vol_sw_hw_switch[index] = val;
+-
+- /* Change access mode to RO (hardware controlled volume)
+- * or RW (software controlled volume)
+- */
+- scarlett2_vol_ctl_set_writable(mixer, ctl_index, !val);
+-
+- /* Reset volume/mute to master volume/mute */
+- private->vol[index] = private->master_vol;
+- private->mute_switch[index] = private->dim_mute[SCARLETT2_BUTTON_MUTE];
+-
+- /* Set SW volume to current HW volume */
+- err = scarlett2_usb_set_config(
+- mixer, SCARLETT2_CONFIG_LINE_OUT_VOLUME,
+- index, private->master_vol - SCARLETT2_VOLUME_BIAS);
+- if (err < 0)
+- return err;
+-
+- /* Set SW mute to current HW mute */
+- err = scarlett2_usb_set_config(
+- mixer, SCARLETT2_CONFIG_MUTE_SWITCH,
+- index, private->dim_mute[SCARLETT2_BUTTON_MUTE]);
+- if (err < 0)
+- return err;
+-
+- /* Send SW/HW switch change to the device */
+- return scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_SW_HW_SWITCH,
+- index, val);
+-}
+-
+-static int scarlett2_sw_hw_enum_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- int ctl_index = elem->control;
+- int index = line_out_remap(private, ctl_index);
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->vol_sw_hw_switch[index];
+- val = !!ucontrol->value.enumerated.item[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- err = scarlett2_sw_hw_change(mixer, ctl_index, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_sw_hw_enum_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = scarlett2_sw_hw_enum_ctl_info,
+- .get = scarlett2_sw_hw_enum_ctl_get,
+- .put = scarlett2_sw_hw_enum_ctl_put,
+-};
+-
+-/*** Line Level/Instrument Level Switch Controls ***/
+-
+-static int scarlett2_update_input_other(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+-
+- private->input_other_updated = 0;
+-
+- if (info->level_input_count) {
+- int err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
+- info->level_input_count + info->level_input_first,
+- private->level_switch);
+- if (err < 0)
+- return err;
+- }
+-
+- if (info->pad_input_count) {
+- int err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_PAD_SWITCH,
+- info->pad_input_count, private->pad_switch);
+- if (err < 0)
+- return err;
+- }
+-
+- if (info->air_input_count) {
+- int err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_AIR_SWITCH,
+- info->air_input_count, private->air_switch);
+- if (err < 0)
+- return err;
+- }
+-
+- if (info->phantom_count) {
+- int err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
+- info->phantom_count, private->phantom_switch);
+- if (err < 0)
+- return err;
+-
+- err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE,
+- 1, &private->phantom_persistence);
+- if (err < 0)
+- return err;
+- }
+-
+- return 0;
+-}
+-
+-static int scarlett2_level_enum_ctl_info(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_info *uinfo)
+-{
+- static const char *const values[2] = {
+- "Line", "Inst"
+- };
+-
+- return snd_ctl_enum_info(uinfo, 1, 2, values);
+-}
+-
+-static int scarlett2_level_enum_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+-
+- int index = elem->control + info->level_input_first;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->input_other_updated)
+- scarlett2_update_input_other(mixer);
+- ucontrol->value.enumerated.item[0] = private->level_switch[index];
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-static int scarlett2_level_enum_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+-
+- int index = elem->control + info->level_input_first;
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->level_switch[index];
+- val = !!ucontrol->value.enumerated.item[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->level_switch[index] = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_LEVEL_SWITCH,
+- index, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_level_enum_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = scarlett2_level_enum_ctl_info,
+- .get = scarlett2_level_enum_ctl_get,
+- .put = scarlett2_level_enum_ctl_put,
+-};
+-
+-/*** Pad Switch Controls ***/
+-
+-static int scarlett2_pad_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->input_other_updated)
+- scarlett2_update_input_other(mixer);
+- ucontrol->value.integer.value[0] =
+- private->pad_switch[elem->control];
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-static int scarlett2_pad_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int index = elem->control;
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->pad_switch[index];
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->pad_switch[index] = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PAD_SWITCH,
+- index, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_pad_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_pad_ctl_get,
+- .put = scarlett2_pad_ctl_put,
+-};
+-
+-/*** Air Switch Controls ***/
+-
+-static int scarlett2_air_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->input_other_updated)
+- scarlett2_update_input_other(mixer);
+- ucontrol->value.integer.value[0] = private->air_switch[elem->control];
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-static int scarlett2_air_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int index = elem->control;
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->air_switch[index];
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->air_switch[index] = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_AIR_SWITCH,
+- index, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_air_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_air_ctl_get,
+- .put = scarlett2_air_ctl_put,
+-};
+-
+-/*** Phantom Switch Controls ***/
+-
+-static int scarlett2_phantom_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->input_other_updated)
+- scarlett2_update_input_other(mixer);
+- ucontrol->value.integer.value[0] =
+- private->phantom_switch[elem->control];
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-static int scarlett2_phantom_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int index = elem->control;
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->phantom_switch[index];
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->phantom_switch[index] = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_PHANTOM_SWITCH,
+- index, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_phantom_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_phantom_ctl_get,
+- .put = scarlett2_phantom_ctl_put,
+-};
+-
+-/*** Phantom Persistence Control ***/
+-
+-static int scarlett2_phantom_persistence_ctl_get(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+- ucontrol->value.integer.value[0] = private->phantom_persistence;
+- return 0;
+-}
+-
+-static int scarlett2_phantom_persistence_ctl_put(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int index = elem->control;
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->phantom_persistence;
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->phantom_persistence = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(
+- mixer, SCARLETT2_CONFIG_PHANTOM_PERSISTENCE, index, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_phantom_persistence_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_phantom_persistence_ctl_get,
+- .put = scarlett2_phantom_persistence_ctl_put,
+-};
+-
+-/*** Direct Monitor Control ***/
+-
+-static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- int err;
+-
+- /* monitor_other_enable[0] enables speaker switching
+- * monitor_other_enable[1] enables talkback
+- */
+- u8 monitor_other_enable[2];
+-
+- /* monitor_other_switch[0] activates the alternate speakers
+- * monitor_other_switch[1] activates talkback
+- */
+- u8 monitor_other_switch[2];
+-
+- private->monitor_other_updated = 0;
+-
+- if (info->direct_monitor)
+- return scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_DIRECT_MONITOR,
+- 1, &private->direct_monitor_switch);
+-
+- /* if it doesn't do speaker switching then it also doesn't do
+- * talkback
+- */
+- if (!info->has_speaker_switching)
+- return 0;
+-
+- err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+- 2, monitor_other_enable);
+- if (err < 0)
+- return err;
+-
+- err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+- 2, monitor_other_switch);
+- if (err < 0)
+- return err;
+-
+- if (!monitor_other_enable[0])
+- private->speaker_switching_switch = 0;
+- else
+- private->speaker_switching_switch = monitor_other_switch[0] + 1;
+-
+- if (info->has_talkback) {
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] =
+- info->port_count;
+- int num_mixes =
+- port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+- u16 bitmap;
+- int i;
+-
+- if (!monitor_other_enable[1])
+- private->talkback_switch = 0;
+- else
+- private->talkback_switch = monitor_other_switch[1] + 1;
+-
+- err = scarlett2_usb_get_config(mixer,
+- SCARLETT2_CONFIG_TALKBACK_MAP,
+- 1, &bitmap);
+- if (err < 0)
+- return err;
+- for (i = 0; i < num_mixes; i++, bitmap >>= 1)
+- private->talkback_map[i] = bitmap & 1;
+- }
+-
+- return 0;
+-}
+-
+-static int scarlett2_direct_monitor_ctl_get(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->monitor_other_updated)
+- scarlett2_update_monitor_other(mixer);
+- ucontrol->value.enumerated.item[0] = private->direct_monitor_switch;
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-static int scarlett2_direct_monitor_ctl_put(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int index = elem->control;
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->direct_monitor_switch;
+- val = min(ucontrol->value.enumerated.item[0], 2U);
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->direct_monitor_switch = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(
+- mixer, SCARLETT2_CONFIG_DIRECT_MONITOR, index, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static int scarlett2_direct_monitor_stereo_enum_ctl_info(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+-{
+- static const char *const values[3] = {
+- "Off", "Mono", "Stereo"
+- };
+-
+- return snd_ctl_enum_info(uinfo, 1, 3, values);
+-}
+-
+-/* Direct Monitor for Solo is mono-only and only needs a boolean control
+- * Direct Monitor for 2i2 is selectable between Off/Mono/Stereo
+- */
+-static const struct snd_kcontrol_new scarlett2_direct_monitor_ctl[2] = {
+- {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_direct_monitor_ctl_get,
+- .put = scarlett2_direct_monitor_ctl_put,
+- },
+- {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = scarlett2_direct_monitor_stereo_enum_ctl_info,
+- .get = scarlett2_direct_monitor_ctl_get,
+- .put = scarlett2_direct_monitor_ctl_put,
+- }
+-};
+-
+-static int scarlett2_add_direct_monitor_ctl(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const char *s;
+-
+- if (!info->direct_monitor)
+- return 0;
+-
+- s = info->direct_monitor == 1
+- ? "Direct Monitor Playback Switch"
+- : "Direct Monitor Playback Enum";
+-
+- return scarlett2_add_new_ctl(
+- mixer, &scarlett2_direct_monitor_ctl[info->direct_monitor - 1],
+- 0, 1, s, &private->direct_monitor_ctl);
+-}
+-
+-/*** Speaker Switching Control ***/
+-
+-static int scarlett2_speaker_switch_enum_ctl_info(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+-{
+- static const char *const values[3] = {
+- "Off", "Main", "Alt"
+- };
+-
+- return snd_ctl_enum_info(uinfo, 1, 3, values);
+-}
+-
+-static int scarlett2_speaker_switch_enum_ctl_get(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->monitor_other_updated)
+- scarlett2_update_monitor_other(mixer);
+- ucontrol->value.enumerated.item[0] = private->speaker_switching_switch;
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-/* when speaker switching gets enabled, switch the main/alt speakers
+- * to HW volume and disable those controls
+- */
+-static int scarlett2_speaker_switch_enable(struct usb_mixer_interface *mixer)
+-{
+- struct snd_card *card = mixer->chip->card;
+- struct scarlett2_data *private = mixer->private_data;
+- int i, err;
+-
+- for (i = 0; i < 4; i++) {
+- int index = line_out_remap(private, i);
+-
+- /* switch the main/alt speakers to HW volume */
+- if (!private->vol_sw_hw_switch[index]) {
+- err = scarlett2_sw_hw_change(private->mixer, i, 1);
+- if (err < 0)
+- return err;
+- }
+-
+- /* disable the line out SW/HW switch */
+- scarlett2_sw_hw_ctl_ro(private, i);
+- snd_ctl_notify(card,
+- SNDRV_CTL_EVENT_MASK_VALUE |
+- SNDRV_CTL_EVENT_MASK_INFO,
+- &private->sw_hw_ctls[i]->id);
+- }
+-
+- /* when the next monitor-other notify comes in, update the mux
+- * configuration
+- */
+- private->speaker_switching_switched = 1;
+-
+- return 0;
+-}
+-
+-/* when speaker switching gets disabled, reenable the hw/sw controls
+- * and invalidate the routing
+- */
+-static void scarlett2_speaker_switch_disable(struct usb_mixer_interface *mixer)
+-{
+- struct snd_card *card = mixer->chip->card;
+- struct scarlett2_data *private = mixer->private_data;
+- int i;
+-
+- /* enable the line out SW/HW switch */
+- for (i = 0; i < 4; i++) {
+- scarlett2_sw_hw_ctl_rw(private, i);
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO,
+- &private->sw_hw_ctls[i]->id);
+- }
+-
+- /* when the next monitor-other notify comes in, update the mux
+- * configuration
+- */
+- private->speaker_switching_switched = 1;
+-}
+-
+-static int scarlett2_speaker_switch_enum_ctl_put(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->speaker_switching_switch;
+- val = min(ucontrol->value.enumerated.item[0], 2U);
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->speaker_switching_switch = val;
+-
+- /* enable/disable speaker switching */
+- err = scarlett2_usb_set_config(
+- mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+- 0, !!val);
+- if (err < 0)
+- goto unlock;
+-
+- /* if speaker switching is enabled, select main or alt */
+- err = scarlett2_usb_set_config(
+- mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+- 0, val == 2);
+- if (err < 0)
+- goto unlock;
+-
+- /* update controls if speaker switching gets enabled or disabled */
+- if (!oval && val)
+- err = scarlett2_speaker_switch_enable(mixer);
+- else if (oval && !val)
+- scarlett2_speaker_switch_disable(mixer);
+-
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_speaker_switch_enum_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = scarlett2_speaker_switch_enum_ctl_info,
+- .get = scarlett2_speaker_switch_enum_ctl_get,
+- .put = scarlett2_speaker_switch_enum_ctl_put,
+-};
+-
+-static int scarlett2_add_speaker_switch_ctl(
+- struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+-
+- if (!info->has_speaker_switching)
+- return 0;
+-
+- return scarlett2_add_new_ctl(
+- mixer, &scarlett2_speaker_switch_enum_ctl,
+- 0, 1, "Speaker Switching Playback Enum",
+- &private->speaker_switching_ctl);
+-}
+-
+-/*** Talkback and Talkback Map Controls ***/
+-
+-static int scarlett2_talkback_enum_ctl_info(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo)
+-{
+- static const char *const values[3] = {
+- "Disabled", "Off", "On"
+- };
+-
+- return snd_ctl_enum_info(uinfo, 1, 3, values);
+-}
+-
+-static int scarlett2_talkback_enum_ctl_get(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->monitor_other_updated)
+- scarlett2_update_monitor_other(mixer);
+- ucontrol->value.enumerated.item[0] = private->talkback_switch;
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-static int scarlett2_talkback_enum_ctl_put(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->talkback_switch;
+- val = min(ucontrol->value.enumerated.item[0], 2U);
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->talkback_switch = val;
+-
+- /* enable/disable talkback */
+- err = scarlett2_usb_set_config(
+- mixer, SCARLETT2_CONFIG_MONITOR_OTHER_ENABLE,
+- 1, !!val);
+- if (err < 0)
+- goto unlock;
+-
+- /* if talkback is enabled, select main or alt */
+- err = scarlett2_usb_set_config(
+- mixer, SCARLETT2_CONFIG_MONITOR_OTHER_SWITCH,
+- 1, val == 2);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_talkback_enum_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = scarlett2_talkback_enum_ctl_info,
+- .get = scarlett2_talkback_enum_ctl_get,
+- .put = scarlett2_talkback_enum_ctl_put,
+-};
+-
+-static int scarlett2_talkback_map_ctl_get(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- int index = elem->control;
+-
+- ucontrol->value.integer.value[0] = private->talkback_map[index];
+-
+- return 0;
+-}
+-
+-static int scarlett2_talkback_map_ctl_put(
+- struct snd_kcontrol *kctl, struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] =
+- private->info->port_count;
+- int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+-
+- int index = elem->control;
+- int oval, val, err = 0, i;
+- u16 bitmap = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->talkback_map[index];
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->talkback_map[index] = val;
+-
+- for (i = 0; i < num_mixes; i++)
+- bitmap |= private->talkback_map[i] << i;
+-
+- /* Send updated bitmap to the device */
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_TALKBACK_MAP,
+- 0, bitmap);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_talkback_map_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_talkback_map_ctl_get,
+- .put = scarlett2_talkback_map_ctl_put,
+-};
+-
+-static int scarlett2_add_talkback_ctls(
+- struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int num_mixes = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+- int err, i;
+- char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-
+- if (!info->has_talkback)
+- return 0;
+-
+- err = scarlett2_add_new_ctl(
+- mixer, &scarlett2_talkback_enum_ctl,
+- 0, 1, "Talkback Playback Enum",
+- &private->talkback_ctl);
+- if (err < 0)
+- return err;
+-
+- for (i = 0; i < num_mixes; i++) {
+- snprintf(s, sizeof(s),
+- "Talkback Mix %c Playback Switch", i + 'A');
+- err = scarlett2_add_new_ctl(mixer, &scarlett2_talkback_map_ctl,
+- i, 1, s, NULL);
+- if (err < 0)
+- return err;
+- }
+-
+- return 0;
+-}
+-
+-/*** Dim/Mute Controls ***/
+-
+-static int scarlett2_dim_mute_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- mutex_lock(&private->data_mutex);
+- if (private->vol_updated)
+- scarlett2_update_volumes(mixer);
+- mutex_unlock(&private->data_mutex);
+-
+- ucontrol->value.integer.value[0] = private->dim_mute[elem->control];
+- return 0;
+-}
+-
+-static int scarlett2_dim_mute_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int num_line_out =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+-
+- int index = elem->control;
+- int oval, val, err = 0, i;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->dim_mute[index];
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->dim_mute[index] = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_DIM_MUTE,
+- index, val);
+- if (err == 0)
+- err = 1;
+-
+- if (index == SCARLETT2_BUTTON_MUTE)
+- for (i = 0; i < num_line_out; i++) {
+- int line_index = line_out_remap(private, i);
+-
+- if (private->vol_sw_hw_switch[line_index]) {
+- private->mute_switch[line_index] = val;
+- snd_ctl_notify(mixer->chip->card,
+- SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->mute_ctls[i]->id);
+- }
+- }
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_dim_mute_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_dim_mute_ctl_get,
+- .put = scarlett2_dim_mute_ctl_put
+-};
+-
+-/*** Create the analogue output controls ***/
+-
+-static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int num_line_out =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int err, i;
+- char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-
+- /* Add R/O HW volume control */
+- if (info->line_out_hw_vol) {
+- snprintf(s, sizeof(s), "Master HW Playback Volume");
+- err = scarlett2_add_new_ctl(mixer,
+- &scarlett2_master_volume_ctl,
+- 0, 1, s, &private->master_vol_ctl);
+- if (err < 0)
+- return err;
+- }
+-
+- /* Add volume controls */
+- for (i = 0; i < num_line_out; i++) {
+- int index = line_out_remap(private, i);
+-
+- /* Fader */
+- if (info->line_out_descrs[i])
+- snprintf(s, sizeof(s),
+- "Line %02d (%s) Playback Volume",
+- i + 1, info->line_out_descrs[i]);
+- else
+- snprintf(s, sizeof(s),
+- "Line %02d Playback Volume",
+- i + 1);
+- err = scarlett2_add_new_ctl(mixer,
+- &scarlett2_line_out_volume_ctl,
+- i, 1, s, &private->vol_ctls[i]);
+- if (err < 0)
+- return err;
+-
+- /* Mute Switch */
+- snprintf(s, sizeof(s),
+- "Line %02d Mute Playback Switch",
+- i + 1);
+- err = scarlett2_add_new_ctl(mixer,
+- &scarlett2_mute_ctl,
+- i, 1, s,
+- &private->mute_ctls[i]);
+- if (err < 0)
+- return err;
+-
+- /* Make the fader and mute controls read-only if the
+- * SW/HW switch is set to HW
+- */
+- if (private->vol_sw_hw_switch[index])
+- scarlett2_vol_ctl_set_writable(mixer, i, 0);
+-
+- /* SW/HW Switch */
+- if (info->line_out_hw_vol) {
+- snprintf(s, sizeof(s),
+- "Line Out %02d Volume Control Playback Enum",
+- i + 1);
+- err = scarlett2_add_new_ctl(mixer,
+- &scarlett2_sw_hw_enum_ctl,
+- i, 1, s,
+- &private->sw_hw_ctls[i]);
+- if (err < 0)
+- return err;
+-
+- /* Make the switch read-only if the line is
+- * involved in speaker switching
+- */
+- if (private->speaker_switching_switch && i < 4)
+- scarlett2_sw_hw_ctl_ro(private, i);
+- }
+- }
+-
+- /* Add dim/mute controls */
+- if (info->line_out_hw_vol)
+- for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++) {
+- err = scarlett2_add_new_ctl(
+- mixer, &scarlett2_dim_mute_ctl,
+- i, 1, scarlett2_dim_mute_names[i],
+- &private->dim_mute_ctls[i]);
+- if (err < 0)
+- return err;
+- }
+-
+- return 0;
+-}
+-
+-/*** Create the analogue input controls ***/
+-
+-static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- int err, i;
+- char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+- const char *fmt = "Line In %d %s Capture %s";
+- const char *fmt2 = "Line In %d-%d %s Capture %s";
+-
+- /* Add input level (line/inst) controls */
+- for (i = 0; i < info->level_input_count; i++) {
+- snprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
+- "Level", "Enum");
+- err = scarlett2_add_new_ctl(mixer, &scarlett2_level_enum_ctl,
+- i, 1, s, &private->level_ctls[i]);
+- if (err < 0)
+- return err;
+- }
+-
+- /* Add input pad controls */
+- for (i = 0; i < info->pad_input_count; i++) {
+- snprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
+- err = scarlett2_add_new_ctl(mixer, &scarlett2_pad_ctl,
+- i, 1, s, &private->pad_ctls[i]);
+- if (err < 0)
+- return err;
+- }
+-
+- /* Add input air controls */
+- for (i = 0; i < info->air_input_count; i++) {
+- snprintf(s, sizeof(s), fmt, i + 1, "Air", "Switch");
+- err = scarlett2_add_new_ctl(mixer, &scarlett2_air_ctl,
+- i, 1, s, &private->air_ctls[i]);
+- if (err < 0)
+- return err;
+- }
+-
+- /* Add input phantom controls */
+- if (info->inputs_per_phantom == 1) {
+- for (i = 0; i < info->phantom_count; i++) {
+- scnprintf(s, sizeof(s), fmt, i + 1,
+- "Phantom Power", "Switch");
+- err = scarlett2_add_new_ctl(
+- mixer, &scarlett2_phantom_ctl,
+- i, 1, s, &private->phantom_ctls[i]);
+- if (err < 0)
+- return err;
+- }
+- } else if (info->inputs_per_phantom > 1) {
+- for (i = 0; i < info->phantom_count; i++) {
+- int from = i * info->inputs_per_phantom + 1;
+- int to = (i + 1) * info->inputs_per_phantom;
+-
+- scnprintf(s, sizeof(s), fmt2, from, to,
+- "Phantom Power", "Switch");
+- err = scarlett2_add_new_ctl(
+- mixer, &scarlett2_phantom_ctl,
+- i, 1, s, &private->phantom_ctls[i]);
+- if (err < 0)
+- return err;
+- }
+- }
+- if (info->phantom_count) {
+- err = scarlett2_add_new_ctl(
+- mixer, &scarlett2_phantom_persistence_ctl, 0, 1,
+- "Phantom Power Persistence Capture Switch", NULL);
+- if (err < 0)
+- return err;
+- }
+-
+- return 0;
+-}
+-
+-/*** Mixer Volume Controls ***/
+-
+-static int scarlett2_mixer_ctl_info(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_info *uinfo)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+-
+- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+- uinfo->count = elem->channels;
+- uinfo->value.integer.min = 0;
+- uinfo->value.integer.max = SCARLETT2_MIXER_MAX_VALUE;
+- uinfo->value.integer.step = 1;
+- return 0;
+-}
+-
+-static int scarlett2_mixer_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+- ucontrol->value.integer.value[0] = private->mix[elem->control];
+- return 0;
+-}
+-
+-static int scarlett2_mixer_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int oval, val, num_mixer_in, mix_num, err = 0;
+- int index = elem->control;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->mix[index];
+- val = ucontrol->value.integer.value[0];
+- num_mixer_in = port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+- mix_num = index / num_mixer_in;
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->mix[index] = val;
+- err = scarlett2_usb_set_mix(mixer, mix_num);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const DECLARE_TLV_DB_MINMAX(
+- db_scale_scarlett2_mixer,
+- SCARLETT2_MIXER_MIN_DB * 100,
+- SCARLETT2_MIXER_MAX_DB * 100
+-);
+-
+-static const struct snd_kcontrol_new scarlett2_mixer_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+- SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+- .name = "",
+- .info = scarlett2_mixer_ctl_info,
+- .get = scarlett2_mixer_ctl_get,
+- .put = scarlett2_mixer_ctl_put,
+- .private_value = SCARLETT2_MIXER_MAX_DB, /* max value */
+- .tlv = { .p = db_scale_scarlett2_mixer }
+-};
+-
+-static int scarlett2_add_mixer_ctls(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int err, i, j;
+- int index;
+- char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-
+- int num_inputs =
+- port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_OUT];
+- int num_outputs =
+- port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+-
+- for (i = 0, index = 0; i < num_outputs; i++)
+- for (j = 0; j < num_inputs; j++, index++) {
+- snprintf(s, sizeof(s),
+- "Mix %c Input %02d Playback Volume",
+- 'A' + i, j + 1);
+- err = scarlett2_add_new_ctl(mixer, &scarlett2_mixer_ctl,
+- index, 1, s, NULL);
+- if (err < 0)
+- return err;
+- }
+-
+- return 0;
+-}
+-
+-/*** Mux Source Selection Controls ***/
+-
+-static int scarlett2_mux_src_enum_ctl_info(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_info *uinfo)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct scarlett2_data *private = elem->head.mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- unsigned int item = uinfo->value.enumerated.item;
+- int items = private->num_mux_srcs;
+- int port_type;
+-
+- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+- uinfo->count = elem->channels;
+- uinfo->value.enumerated.items = items;
+-
+- if (item >= items)
+- item = uinfo->value.enumerated.item = items - 1;
+-
+- for (port_type = 0;
+- port_type < SCARLETT2_PORT_TYPE_COUNT;
+- port_type++) {
+- if (item < port_count[port_type][SCARLETT2_PORT_IN]) {
+- const struct scarlett2_port *port =
+- &scarlett2_ports[port_type];
+-
+- sprintf(uinfo->value.enumerated.name,
+- port->src_descr, item + port->src_num_offset);
+- return 0;
+- }
+- item -= port_count[port_type][SCARLETT2_PORT_IN];
+- }
+-
+- return -EINVAL;
+-}
+-
+-static int scarlett2_mux_src_enum_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int line_out_count =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int index = elem->control;
+-
+- if (index < line_out_count)
+- index = line_out_remap(private, index);
+-
+- mutex_lock(&private->data_mutex);
+- if (private->mux_updated)
+- scarlett2_usb_get_mux(mixer);
+- ucontrol->value.enumerated.item[0] = private->mux[index];
+- mutex_unlock(&private->data_mutex);
+-
+- return 0;
+-}
+-
+-static int scarlett2_mux_src_enum_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int line_out_count =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int index = elem->control;
+- int oval, val, err = 0;
+-
+- if (index < line_out_count)
+- index = line_out_remap(private, index);
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->mux[index];
+- val = min(ucontrol->value.enumerated.item[0],
+- private->num_mux_srcs - 1U);
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->mux[index] = val;
+- err = scarlett2_usb_set_mux(mixer);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_mux_src_enum_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = scarlett2_mux_src_enum_ctl_info,
+- .get = scarlett2_mux_src_enum_ctl_get,
+- .put = scarlett2_mux_src_enum_ctl_put,
+-};
+-
+-static int scarlett2_add_mux_enums(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int port_type, channel, i;
+-
+- for (i = 0, port_type = 0;
+- port_type < SCARLETT2_PORT_TYPE_COUNT;
+- port_type++) {
+- for (channel = 0;
+- channel < port_count[port_type][SCARLETT2_PORT_OUT];
+- channel++, i++) {
+- int err;
+- char s[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+- const char *const descr =
+- scarlett2_ports[port_type].dst_descr;
+-
+- snprintf(s, sizeof(s) - 5, descr, channel + 1);
+- strcat(s, " Enum");
+-
+- err = scarlett2_add_new_ctl(mixer,
+- &scarlett2_mux_src_enum_ctl,
+- i, 1, s,
+- &private->mux_ctls[i]);
+- if (err < 0)
+- return err;
+- }
+- }
+-
+- return 0;
+-}
+-
+-/*** Meter Controls ***/
+-
+-static int scarlett2_meter_ctl_info(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_info *uinfo)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+-
+- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+- uinfo->count = elem->channels;
+- uinfo->value.integer.min = 0;
+- uinfo->value.integer.max = 4095;
+- uinfo->value.integer.step = 1;
+- return 0;
+-}
+-
+-static int scarlett2_meter_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- u16 meter_levels[SCARLETT2_MAX_METERS];
+- int i, err;
+-
+- err = scarlett2_usb_get_meter_levels(elem->head.mixer, elem->channels,
+- meter_levels);
+- if (err < 0)
+- return err;
+-
+- for (i = 0; i < elem->channels; i++)
+- ucontrol->value.integer.value[i] = meter_levels[i];
+-
+- return 0;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_meter_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+- .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+- .name = "",
+- .info = scarlett2_meter_ctl_info,
+- .get = scarlett2_meter_ctl_get
+-};
+-
+-static int scarlett2_add_meter_ctl(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+-
+- /* devices without a mixer also don't support reporting levels */
+- if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
+- return 0;
+-
+- return scarlett2_add_new_ctl(mixer, &scarlett2_meter_ctl,
+- 0, private->num_mux_dsts,
+- "Level Meter", NULL);
+-}
+-
+-/*** MSD Controls ***/
+-
+-static int scarlett2_msd_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+- ucontrol->value.integer.value[0] = private->msd_switch;
+- return 0;
+-}
+-
+-static int scarlett2_msd_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->msd_switch;
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->msd_switch = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(mixer, SCARLETT2_CONFIG_MSD_SWITCH,
+- 0, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_msd_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_msd_ctl_get,
+- .put = scarlett2_msd_ctl_put,
+-};
+-
+-static int scarlett2_add_msd_ctl(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+-
+- if (!info->has_msd_mode)
+- return 0;
+-
+- /* If MSD mode is off, hide the switch by default */
+- if (!private->msd_switch && !(mixer->chip->setup & SCARLETT2_MSD_ENABLE))
+- return 0;
+-
+- /* Add MSD control */
+- return scarlett2_add_new_ctl(mixer, &scarlett2_msd_ctl,
+- 0, 1, "MSD Mode Switch", NULL);
+-}
+-
+-/*** Standalone Control ***/
+-
+-static int scarlett2_standalone_ctl_get(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct scarlett2_data *private = elem->head.mixer->private_data;
+-
+- ucontrol->value.integer.value[0] = private->standalone_switch;
+- return 0;
+-}
+-
+-static int scarlett2_standalone_ctl_put(struct snd_kcontrol *kctl,
+- struct snd_ctl_elem_value *ucontrol)
+-{
+- struct usb_mixer_elem_info *elem = kctl->private_data;
+- struct usb_mixer_interface *mixer = elem->head.mixer;
+- struct scarlett2_data *private = mixer->private_data;
+-
+- int oval, val, err = 0;
+-
+- mutex_lock(&private->data_mutex);
+-
+- oval = private->standalone_switch;
+- val = !!ucontrol->value.integer.value[0];
+-
+- if (oval == val)
+- goto unlock;
+-
+- private->standalone_switch = val;
+-
+- /* Send switch change to the device */
+- err = scarlett2_usb_set_config(mixer,
+- SCARLETT2_CONFIG_STANDALONE_SWITCH,
+- 0, val);
+- if (err == 0)
+- err = 1;
+-
+-unlock:
+- mutex_unlock(&private->data_mutex);
+- return err;
+-}
+-
+-static const struct snd_kcontrol_new scarlett2_standalone_ctl = {
+- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+- .name = "",
+- .info = snd_ctl_boolean_mono_info,
+- .get = scarlett2_standalone_ctl_get,
+- .put = scarlett2_standalone_ctl_put,
+-};
+-
+-static int scarlett2_add_standalone_ctl(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+-
+- if (private->info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
+- return 0;
+-
+- /* Add standalone control */
+- return scarlett2_add_new_ctl(mixer, &scarlett2_standalone_ctl,
+- 0, 1, "Standalone Switch", NULL);
+-}
+-
+-/*** Cleanup/Suspend Callbacks ***/
+-
+-static void scarlett2_private_free(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+-
+- cancel_delayed_work_sync(&private->work);
+- kfree(private);
+- mixer->private_data = NULL;
+-}
+-
+-static void scarlett2_private_suspend(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+-
+- if (cancel_delayed_work_sync(&private->work))
+- scarlett2_config_save(private->mixer);
+-}
+-
+-/*** Initialisation ***/
+-
+-static void scarlett2_count_mux_io(struct scarlett2_data *private)
+-{
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int port_type, srcs = 0, dsts = 0;
+-
+- for (port_type = 0;
+- port_type < SCARLETT2_PORT_TYPE_COUNT;
+- port_type++) {
+- srcs += port_count[port_type][SCARLETT2_PORT_IN];
+- dsts += port_count[port_type][SCARLETT2_PORT_OUT];
+- }
+-
+- private->num_mux_srcs = srcs;
+- private->num_mux_dsts = dsts;
+-}
+-
+-/* Look through the interface descriptors for the Focusrite Control
+- * interface (bInterfaceClass = 255 Vendor Specific Class) and set
+- * bInterfaceNumber, bEndpointAddress, wMaxPacketSize, and bInterval
+- * in private
+- */
+-static int scarlett2_find_fc_interface(struct usb_device *dev,
+- struct scarlett2_data *private)
+-{
+- struct usb_host_config *config = dev->actconfig;
+- int i;
+-
+- for (i = 0; i < config->desc.bNumInterfaces; i++) {
+- struct usb_interface *intf = config->interface[i];
+- struct usb_interface_descriptor *desc =
+- &intf->altsetting[0].desc;
+- struct usb_endpoint_descriptor *epd;
+-
+- if (desc->bInterfaceClass != 255)
+- continue;
+-
+- epd = get_endpoint(intf->altsetting, 0);
+- private->bInterfaceNumber = desc->bInterfaceNumber;
+- private->bEndpointAddress = epd->bEndpointAddress &
+- USB_ENDPOINT_NUMBER_MASK;
+- private->wMaxPacketSize = le16_to_cpu(epd->wMaxPacketSize);
+- private->bInterval = epd->bInterval;
+- return 0;
+- }
+-
+- return -EINVAL;
+-}
+-
+-/* Initialise private data */
+-static int scarlett2_init_private(struct usb_mixer_interface *mixer,
+- const struct scarlett2_device_info *info)
+-{
+- struct scarlett2_data *private =
+- kzalloc(sizeof(struct scarlett2_data), GFP_KERNEL);
+-
+- if (!private)
+- return -ENOMEM;
+-
+- mutex_init(&private->usb_mutex);
+- mutex_init(&private->data_mutex);
+- INIT_DELAYED_WORK(&private->work, scarlett2_config_save_work);
+-
+- mixer->private_data = private;
+- mixer->private_free = scarlett2_private_free;
+- mixer->private_suspend = scarlett2_private_suspend;
+-
+- private->info = info;
+- scarlett2_count_mux_io(private);
+- private->scarlett2_seq = 0;
+- private->mixer = mixer;
+-
+- return scarlett2_find_fc_interface(mixer->chip->dev, private);
+-}
+-
+-/* Cargo cult proprietary initialisation sequence */
+-static int scarlett2_usb_init(struct usb_mixer_interface *mixer)
+-{
+- struct usb_device *dev = mixer->chip->dev;
+- struct scarlett2_data *private = mixer->private_data;
+- u8 buf[24];
+- int err;
+-
+- if (usb_pipe_type_check(dev, usb_sndctrlpipe(dev, 0)))
+- return -EINVAL;
+-
+- /* step 0 */
+- err = scarlett2_usb_rx(dev, private->bInterfaceNumber,
+- SCARLETT2_USB_CMD_INIT, buf, sizeof(buf));
+- if (err < 0)
+- return err;
+-
+- /* step 1 */
+- private->scarlett2_seq = 1;
+- err = scarlett2_usb(mixer, SCARLETT2_USB_INIT_1, NULL, 0, NULL, 0);
+- if (err < 0)
+- return err;
+-
+- /* step 2 */
+- private->scarlett2_seq = 1;
+- return scarlett2_usb(mixer, SCARLETT2_USB_INIT_2, NULL, 0, NULL, 84);
+-}
+-
+-/* Read configuration from the interface on start */
+-static int scarlett2_read_configs(struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int num_line_out =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int num_mixer_out =
+- port_count[SCARLETT2_PORT_TYPE_MIX][SCARLETT2_PORT_IN];
+- struct scarlett2_usb_volume_status volume_status;
+- int err, i;
+-
+- if (info->has_msd_mode) {
+- err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_MSD_SWITCH,
+- 1, &private->msd_switch);
+- if (err < 0)
+- return err;
+-
+- /* no other controls are created if MSD mode is on */
+- if (private->msd_switch)
+- return 0;
+- }
+-
+- err = scarlett2_update_input_other(mixer);
+- if (err < 0)
+- return err;
+-
+- err = scarlett2_update_monitor_other(mixer);
+- if (err < 0)
+- return err;
+-
+- /* the rest of the configuration is for devices with a mixer */
+- if (info->config_set == SCARLETT2_CONFIG_SET_NO_MIXER)
+- return 0;
+-
+- err = scarlett2_usb_get_config(
+- mixer, SCARLETT2_CONFIG_STANDALONE_SWITCH,
+- 1, &private->standalone_switch);
+- if (err < 0)
+- return err;
+-
+- err = scarlett2_update_sync(mixer);
+- if (err < 0)
+- return err;
+-
+- err = scarlett2_usb_get_volume_status(mixer, &volume_status);
+- if (err < 0)
+- return err;
+-
+- if (info->line_out_hw_vol)
+- for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+- private->dim_mute[i] = !!volume_status.dim_mute[i];
+-
+- private->master_vol = clamp(
+- volume_status.master_vol + SCARLETT2_VOLUME_BIAS,
+- 0, SCARLETT2_VOLUME_BIAS);
+-
+- for (i = 0; i < num_line_out; i++) {
+- int volume, mute;
+-
+- private->vol_sw_hw_switch[i] =
+- info->line_out_hw_vol
+- && volume_status.sw_hw_switch[i];
+-
+- volume = private->vol_sw_hw_switch[i]
+- ? volume_status.master_vol
+- : volume_status.sw_vol[i];
+- volume = clamp(volume + SCARLETT2_VOLUME_BIAS,
+- 0, SCARLETT2_VOLUME_BIAS);
+- private->vol[i] = volume;
+-
+- mute = private->vol_sw_hw_switch[i]
+- ? private->dim_mute[SCARLETT2_BUTTON_MUTE]
+- : volume_status.mute_switch[i];
+- private->mute_switch[i] = mute;
+- }
+-
+- for (i = 0; i < num_mixer_out; i++) {
+- err = scarlett2_usb_get_mix(mixer, i);
+- if (err < 0)
+- return err;
+- }
+-
+- return scarlett2_usb_get_mux(mixer);
+-}
+-
+-/* Notify on sync change */
+-static void scarlett2_notify_sync(
+- struct usb_mixer_interface *mixer)
+-{
+- struct scarlett2_data *private = mixer->private_data;
+-
+- private->sync_updated = 1;
+-
+- snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->sync_ctl->id);
+-}
+-
+-/* Notify on monitor change */
+-static void scarlett2_notify_monitor(
+- struct usb_mixer_interface *mixer)
+-{
+- struct snd_card *card = mixer->chip->card;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int num_line_out =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int i;
+-
+- /* if line_out_hw_vol is 0, there are no controls to update */
+- if (!info->line_out_hw_vol)
+- return;
+-
+- private->vol_updated = 1;
+-
+- snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->master_vol_ctl->id);
+-
+- for (i = 0; i < num_line_out; i++)
+- if (private->vol_sw_hw_switch[line_out_remap(private, i)])
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->vol_ctls[i]->id);
+-}
+-
+-/* Notify on dim/mute change */
+-static void scarlett2_notify_dim_mute(
+- struct usb_mixer_interface *mixer)
+-{
+- struct snd_card *card = mixer->chip->card;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- const int (*port_count)[SCARLETT2_PORT_DIRNS] = info->port_count;
+- int num_line_out =
+- port_count[SCARLETT2_PORT_TYPE_ANALOGUE][SCARLETT2_PORT_OUT];
+- int i;
+-
+- private->vol_updated = 1;
+-
+- if (!info->line_out_hw_vol)
+- return;
+-
+- for (i = 0; i < SCARLETT2_DIM_MUTE_COUNT; i++)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->dim_mute_ctls[i]->id);
+-
+- for (i = 0; i < num_line_out; i++)
+- if (private->vol_sw_hw_switch[line_out_remap(private, i)])
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->mute_ctls[i]->id);
+-}
+-
+-/* Notify on "input other" change (level/pad/air) */
+-static void scarlett2_notify_input_other(
+- struct usb_mixer_interface *mixer)
+-{
+- struct snd_card *card = mixer->chip->card;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+- int i;
+-
+- private->input_other_updated = 1;
+-
+- for (i = 0; i < info->level_input_count; i++)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->level_ctls[i]->id);
+- for (i = 0; i < info->pad_input_count; i++)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->pad_ctls[i]->id);
+- for (i = 0; i < info->air_input_count; i++)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->air_ctls[i]->id);
+- for (i = 0; i < info->phantom_count; i++)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->phantom_ctls[i]->id);
+-}
+-
+-/* Notify on "monitor other" change (direct monitor, speaker
+- * switching, talkback)
+- */
+-static void scarlett2_notify_monitor_other(
+- struct usb_mixer_interface *mixer)
+-{
+- struct snd_card *card = mixer->chip->card;
+- struct scarlett2_data *private = mixer->private_data;
+- const struct scarlett2_device_info *info = private->info;
+-
+- private->monitor_other_updated = 1;
+-
+- if (info->direct_monitor) {
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->direct_monitor_ctl->id);
+- return;
+- }
+-
+- if (info->has_speaker_switching)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->speaker_switching_ctl->id);
+-
+- if (info->has_talkback)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->talkback_ctl->id);
+-
+- /* if speaker switching was recently enabled or disabled,
+- * invalidate the dim/mute and mux enum controls
+- */
+- if (private->speaker_switching_switched) {
+- int i;
+-
+- scarlett2_notify_dim_mute(mixer);
+-
+- private->speaker_switching_switched = 0;
+- private->mux_updated = 1;
+-
+- for (i = 0; i < private->num_mux_dsts; i++)
+- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
+- &private->mux_ctls[i]->id);
+- }
+-}
+-
+-/* Interrupt callback */
+-static void scarlett2_notify(struct urb *urb)
+-{
+- struct usb_mixer_interface *mixer = urb->context;
+- int len = urb->actual_length;
+- int ustatus = urb->status;
+- u32 data;
+-
+- if (ustatus != 0 || len != 8)
+- goto requeue;
+-
+- data = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
+- if (data & SCARLETT2_USB_NOTIFY_SYNC)
+- scarlett2_notify_sync(mixer);
+- if (data & SCARLETT2_USB_NOTIFY_MONITOR)
+- scarlett2_notify_monitor(mixer);
+- if (data & SCARLETT2_USB_NOTIFY_DIM_MUTE)
+- scarlett2_notify_dim_mute(mixer);
+- if (data & SCARLETT2_USB_NOTIFY_INPUT_OTHER)
+- scarlett2_notify_input_other(mixer);
+- if (data & SCARLETT2_USB_NOTIFY_MONITOR_OTHER)
+- scarlett2_notify_monitor_other(mixer);
+-
+-requeue:
+- if (ustatus != -ENOENT &&
+- ustatus != -ECONNRESET &&
+- ustatus != -ESHUTDOWN) {
+- urb->dev = mixer->chip->dev;
+- usb_submit_urb(urb, GFP_ATOMIC);
+- }
+-}
+-
+-static int scarlett2_init_notify(struct usb_mixer_interface *mixer)
+-{
+- struct usb_device *dev = mixer->chip->dev;
+- struct scarlett2_data *private = mixer->private_data;
+- unsigned int pipe = usb_rcvintpipe(dev, private->bEndpointAddress);
+- void *transfer_buffer;
+-
+- if (mixer->urb) {
+- usb_audio_err(mixer->chip,
+- "%s: mixer urb already in use!\n", __func__);
+- return 0;
+- }
+-
+- if (usb_pipe_type_check(dev, pipe))
+- return -EINVAL;
+-
+- mixer->urb = usb_alloc_urb(0, GFP_KERNEL);
+- if (!mixer->urb)
+- return -ENOMEM;
+-
+- transfer_buffer = kmalloc(private->wMaxPacketSize, GFP_KERNEL);
+- if (!transfer_buffer)
+- return -ENOMEM;
+-
+- usb_fill_int_urb(mixer->urb, dev, pipe,
+- transfer_buffer, private->wMaxPacketSize,
+- scarlett2_notify, mixer, private->bInterval);
+-
+- return usb_submit_urb(mixer->urb, GFP_KERNEL);
+-}
+-
+-static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
+-{
+- const struct scarlett2_device_info **info = scarlett2_devices;
+- int err;
+-
+- /* Find device in scarlett2_devices */
+- while (*info && (*info)->usb_id != mixer->chip->usb_id)
+- info++;
+- if (!*info)
+- return -EINVAL;
+-
+- /* Initialise private data */
+- err = scarlett2_init_private(mixer, *info);
+- if (err < 0)
+- return err;
+-
+- /* Send proprietary USB initialisation sequence */
+- err = scarlett2_usb_init(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Read volume levels and controls from the interface */
+- err = scarlett2_read_configs(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the MSD control */
+- err = scarlett2_add_msd_ctl(mixer);
+- if (err < 0)
+- return err;
+-
+- /* If MSD mode is enabled, don't create any other controls */
+- if (((struct scarlett2_data *)mixer->private_data)->msd_switch)
+- return 0;
+-
+- /* Create the analogue output controls */
+- err = scarlett2_add_line_out_ctls(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the analogue input controls */
+- err = scarlett2_add_line_in_ctls(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the input, output, and mixer mux input selections */
+- err = scarlett2_add_mux_enums(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the matrix mixer controls */
+- err = scarlett2_add_mixer_ctls(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the level meter controls */
+- err = scarlett2_add_meter_ctl(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the sync control */
+- err = scarlett2_add_sync_ctl(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the direct monitor control */
+- err = scarlett2_add_direct_monitor_ctl(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the speaker switching control */
+- err = scarlett2_add_speaker_switch_ctl(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the talkback controls */
+- err = scarlett2_add_talkback_ctls(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Create the standalone control */
+- err = scarlett2_add_standalone_ctl(mixer);
+- if (err < 0)
+- return err;
+-
+- /* Set up the interrupt polling */
+- err = scarlett2_init_notify(mixer);
+- if (err < 0)
+- return err;
+-
+- return 0;
+-}
+-
+-int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
+-{
+- struct snd_usb_audio *chip = mixer->chip;
+- int err;
+-
+- /* only use UAC_VERSION_2 */
+- if (!mixer->protocol)
+- return 0;
+-
+- if (!(chip->setup & SCARLETT2_ENABLE)) {
+- usb_audio_info(chip,
+- "Focusrite Scarlett Gen 2/3 Mixer Driver disabled; "
+- "use options snd_usb_audio vid=0x%04x pid=0x%04x "
+- "device_setup=1 to enable and report any issues "
+- "to g@b4.vu",
+- USB_ID_VENDOR(chip->usb_id),
+- USB_ID_PRODUCT(chip->usb_id));
+- return 0;
+- }
+-
+- usb_audio_info(chip,
+- "Focusrite Scarlett Gen 2/3 Mixer Driver enabled pid=0x%04x",
+- USB_ID_PRODUCT(chip->usb_id));
+-
+- err = snd_scarlett_gen2_controls_create(mixer);
+- if (err < 0)
+- usb_audio_err(mixer->chip,
+- "Error initialising Scarlett Mixer Driver: %d",
+- err);
+-
+- return err;
+-}
+diff --git a/sound/usb/mixer_scarlett_gen2.h b/sound/usb/mixer_scarlett_gen2.h
+deleted file mode 100644
+index 668c6b0cb50a63..00000000000000
+--- a/sound/usb/mixer_scarlett_gen2.h
++++ /dev/null
+@@ -1,7 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __USB_MIXER_SCARLETT_GEN2_H
+-#define __USB_MIXER_SCARLETT_GEN2_H
+-
+-int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer);
+-
+-#endif /* __USB_MIXER_SCARLETT_GEN2_H */
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 5d72dc8441cbb4..75cde5779f38d5 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -35,10 +35,87 @@
+ .bInterfaceClass = USB_CLASS_AUDIO, \
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL
+
++/* Quirk .driver_info, followed by the definition of the quirk entry;
++ * put like QUIRK_DRIVER_INFO { ... } in each entry of the quirk table
++ */
++#define QUIRK_DRIVER_INFO \
++ .driver_info = (unsigned long)&(const struct snd_usb_audio_quirk)
++
++/*
++ * Macros for quirk data entries
++ */
++
++/* Quirk data entry for ignoring the interface */
++#define QUIRK_DATA_IGNORE(_ifno) \
++ .ifnum = (_ifno), .type = QUIRK_IGNORE_INTERFACE
++/* Quirk data entry for a standard audio interface */
++#define QUIRK_DATA_STANDARD_AUDIO(_ifno) \
++ .ifnum = (_ifno), .type = QUIRK_AUDIO_STANDARD_INTERFACE
++/* Quirk data entry for a standard MIDI interface */
++#define QUIRK_DATA_STANDARD_MIDI(_ifno) \
++ .ifnum = (_ifno), .type = QUIRK_MIDI_STANDARD_INTERFACE
++/* Quirk data entry for a standard mixer interface */
++#define QUIRK_DATA_STANDARD_MIXER(_ifno) \
++ .ifnum = (_ifno), .type = QUIRK_AUDIO_STANDARD_MIXER
++
++/* Quirk data entry for Yamaha MIDI */
++#define QUIRK_DATA_MIDI_YAMAHA(_ifno) \
++ .ifnum = (_ifno), .type = QUIRK_MIDI_YAMAHA
++/* Quirk data entry for Edirol UAxx */
++#define QUIRK_DATA_EDIROL_UAXX(_ifno) \
++ .ifnum = (_ifno), .type = QUIRK_AUDIO_EDIROL_UAXX
++/* Quirk data entry for raw bytes interface */
++#define QUIRK_DATA_RAW_BYTES(_ifno) \
++ .ifnum = (_ifno), .type = QUIRK_MIDI_RAW_BYTES
++
++/* Quirk composite array terminator */
++#define QUIRK_COMPOSITE_END { .ifnum = -1 }
++
++/* Quirk data entry for composite quirks;
++ * followed by the quirk array that is terminated with QUIRK_COMPOSITE_END
++ * e.g. QUIRK_DATA_COMPOSITE { { quirk1 }, { quirk2 },..., QUIRK_COMPOSITE_END }
++ */
++#define QUIRK_DATA_COMPOSITE \
++ .ifnum = QUIRK_ANY_INTERFACE, \
++ .type = QUIRK_COMPOSITE, \
++ .data = &(const struct snd_usb_audio_quirk[])
++
++/* Quirk data entry for a fixed audio endpoint;
++ * followed by audioformat definition
++ * e.g. QUIRK_DATA_AUDIOFORMAT(n) { .formats = xxx, ... }
++ */
++#define QUIRK_DATA_AUDIOFORMAT(_ifno) \
++ .ifnum = (_ifno), \
++ .type = QUIRK_AUDIO_FIXED_ENDPOINT, \
++ .data = &(const struct audioformat)
++
++/* Quirk data entry for a fixed MIDI endpoint;
++ * followed by snd_usb_midi_endpoint_info definition
++ * e.g. QUIRK_DATA_MIDI_FIXED_ENDPOINT(n) { .out_cables = x, .in_cables = y }
++ */
++#define QUIRK_DATA_MIDI_FIXED_ENDPOINT(_ifno) \
++ .ifnum = (_ifno), \
++ .type = QUIRK_MIDI_FIXED_ENDPOINT, \
++ .data = &(const struct snd_usb_midi_endpoint_info)
++/* Quirk data entry for a MIDIMAN MIDI endpoint */
++#define QUIRK_DATA_MIDI_MIDIMAN(_ifno) \
++ .ifnum = (_ifno), \
++ .type = QUIRK_MIDI_MIDIMAN, \
++ .data = &(const struct snd_usb_midi_endpoint_info)
++/* Quirk data entry for a EMAGIC MIDI endpoint */
++#define QUIRK_DATA_MIDI_EMAGIC(_ifno) \
++ .ifnum = (_ifno), \
++ .type = QUIRK_MIDI_EMAGIC, \
++ .data = &(const struct snd_usb_midi_endpoint_info)
++
++/*
++ * Here we go... the quirk table definition begins:
++ */
++
+ /* FTDI devices */
+ {
+ USB_DEVICE(0x0403, 0xb8d8),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "STARR LABS", */
+ /* .product_name = "Starr Labs MIDI USB device", */
+ .ifnum = 0,
+@@ -49,10 +126,8 @@
+ {
+ /* Creative BT-D1 */
+ USB_DEVICE(0x041e, 0x0005),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 1,
+@@ -87,18 +162,11 @@
+ */
+ {
+ USB_AUDIO_DEVICE(0x041e, 0x4095),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(2) },
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(3) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .fmt_bits = 16,
+@@ -114,9 +182,7 @@
+ .rate_table = (unsigned int[]) { 48000 },
+ },
+ },
+- {
+- .ifnum = -1
+- },
++ QUIRK_COMPOSITE_END
+ },
+ },
+ },
+@@ -128,31 +194,18 @@
+ */
+ {
+ USB_DEVICE(0x0424, 0xb832),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Standard Microsystems Corp.",
+ .product_name = "HP Wireless Audio",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
+ /* Mixer */
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE,
+- },
++ { QUIRK_DATA_IGNORE(0) },
+ /* Playback */
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE,
+- },
++ { QUIRK_DATA_IGNORE(1) },
+ /* Capture */
+- {
+- .ifnum = 2,
+- .type = QUIRK_IGNORE_INTERFACE,
+- },
++ { QUIRK_DATA_IGNORE(2) },
+ /* HID Device, .ifnum = 3 */
+- {
+- .ifnum = -1,
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -175,20 +228,18 @@
+
+ #define YAMAHA_DEVICE(id, name) { \
+ USB_DEVICE(0x0499, id), \
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \
++ QUIRK_DRIVER_INFO { \
+ .vendor_name = "Yamaha", \
+ .product_name = name, \
+- .ifnum = QUIRK_ANY_INTERFACE, \
+- .type = QUIRK_MIDI_YAMAHA \
++ QUIRK_DATA_MIDI_YAMAHA(QUIRK_ANY_INTERFACE) \
+ } \
+ }
+ #define YAMAHA_INTERFACE(id, intf, name) { \
+ USB_DEVICE_VENDOR_SPEC(0x0499, id), \
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \
++ QUIRK_DRIVER_INFO { \
+ .vendor_name = "Yamaha", \
+ .product_name = name, \
+- .ifnum = intf, \
+- .type = QUIRK_MIDI_YAMAHA \
++ QUIRK_DATA_MIDI_YAMAHA(intf) \
+ } \
+ }
+ YAMAHA_DEVICE(0x1000, "UX256"),
+@@ -273,137 +324,70 @@ YAMAHA_DEVICE(0x105a, NULL),
+ YAMAHA_DEVICE(0x105b, NULL),
+ YAMAHA_DEVICE(0x105c, NULL),
+ YAMAHA_DEVICE(0x105d, NULL),
++YAMAHA_DEVICE(0x1718, "P-125"),
+ {
+ USB_DEVICE(0x0499, 0x1503),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Yamaha", */
+ /* .product_name = "MOX6/MOX8", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_YAMAHA
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ { QUIRK_DATA_MIDI_YAMAHA(3) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0499, 0x1507),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Yamaha", */
+ /* .product_name = "THR10", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_YAMAHA
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ { QUIRK_DATA_MIDI_YAMAHA(3) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0499, 0x1509),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Yamaha", */
+ /* .product_name = "Steinberg UR22", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_YAMAHA
+- },
+- {
+- .ifnum = 4,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ { QUIRK_DATA_MIDI_YAMAHA(3) },
++ { QUIRK_DATA_IGNORE(4) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0499, 0x150a),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Yamaha", */
+ /* .product_name = "THR5A", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_YAMAHA
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ { QUIRK_DATA_MIDI_YAMAHA(3) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0499, 0x150c),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Yamaha", */
+ /* .product_name = "THR10C", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_YAMAHA
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ { QUIRK_DATA_MIDI_YAMAHA(3) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -437,7 +421,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ USB_DEVICE_ID_MATCH_INT_CLASS,
+ .idVendor = 0x0499,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUTODETECT
+ }
+@@ -448,16 +432,12 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ */
+ {
+ USB_DEVICE(0x0582, 0x0000),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "UA-100",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 4,
+ .iface = 0,
+@@ -472,9 +452,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 1,
+@@ -489,106 +467,66 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0007,
+ .in_cables = 0x0007
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0582, 0x0002),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UM-4",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x000f,
+ .in_cables = 0x000f
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0582, 0x0003),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "SC-8850",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x003f,
+ .in_cables = 0x003f
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0582, 0x0004),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "U-8",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0005,
+ .in_cables = 0x0005
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -596,152 +534,92 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* Has ID 0x0099 when not in "Advanced Driver" mode.
+ * The UM-2EX has only one input, but we cannot detect this. */
+ USB_DEVICE(0x0582, 0x0005),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UM-2",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0582, 0x0007),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "SC-8820",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0013,
+ .in_cables = 0x0013
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0582, 0x0008),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "PC-300",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x009d when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0009),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UM-1",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0582, 0x000b),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "SK-500",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0013,
+ .in_cables = 0x0013
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -749,31 +627,19 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* thanks to Emiliano Grilli <emillo@libero.it>
+ * for helping researching this data */
+ USB_DEVICE(0x0582, 0x000c),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "SC-D70",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
+ {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0007,
+ .in_cables = 0x0007
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -787,35 +653,23 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * the 96kHz sample rate.
+ */
+ USB_DEVICE(0x0582, 0x0010),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UA-5",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x0013 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0012),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "XV-5050",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -824,12 +678,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x0015 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0014),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UM-880",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x01ff,
+ .in_cables = 0x01ff
+ }
+@@ -838,74 +690,48 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x0017 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0016),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "SD-90",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x000f,
+ .in_cables = 0x000f
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x001c when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x001b),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "MMP-2",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x001e when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x001d),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "V-SYNTH",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -914,12 +740,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x0024 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0023),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UM-550",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x003f,
+ .in_cables = 0x003f
+ }
+@@ -932,20 +756,13 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * and no MIDI.
+ */
+ USB_DEVICE(0x0582, 0x0025),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UA-20",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 2,
+ .iface = 1,
+@@ -960,9 +777,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 2,
+ .iface = 2,
+@@ -977,28 +792,22 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(3) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x0028 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0027),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "SD-20",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0007
+ }
+@@ -1007,12 +816,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x002a when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0029),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "SD-80",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x000f,
+ .in_cables = 0x000f
+ }
+@@ -1025,39 +832,24 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * but offers only 16-bit PCM and no MIDI.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x0582, 0x002b),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UA-700",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_EDIROL_UAXX(1) },
++ { QUIRK_DATA_EDIROL_UAXX(2) },
++ { QUIRK_DATA_EDIROL_UAXX(3) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x002e when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x002d),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "XV-2020",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1066,12 +858,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x0030 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x002f),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "VariOS",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0007,
+ .in_cables = 0x0007
+ }
+@@ -1080,12 +870,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x0034 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0033),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "PCR",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0007
+ }
+@@ -1097,12 +885,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * later revisions use IDs 0x0054 and 0x00a2.
+ */
+ USB_DEVICE(0x0582, 0x0037),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "Digital Piano",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1115,39 +901,24 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * and no MIDI.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x0582, 0x003b),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "BOSS",
+ .product_name = "GS-10",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ { QUIRK_DATA_STANDARD_MIDI(3) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x0041 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0040),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "GI-20",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1156,12 +927,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x0043 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0042),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "RS-70",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1170,36 +939,24 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x0049 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0047),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "EDIROL", */
+ /* .product_name = "UR-80", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
+ /* in the 96 kHz modes, only interface 1 is there */
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x004a when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0048),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "EDIROL", */
+ /* .product_name = "UR-80", */
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0007
+ }
+@@ -1208,35 +965,23 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x004e when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x004c),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "PCR-A",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x004f when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x004d),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "PCR-A",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0007
+ }
+@@ -1248,76 +993,52 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * is standard compliant, but has only 16-bit PCM.
+ */
+ USB_DEVICE(0x0582, 0x0050),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UA-3FX",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0582, 0x0052),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UM-1SX",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
++ QUIRK_DATA_STANDARD_MIDI(0)
+ }
+ },
+ {
+ USB_DEVICE(0x0582, 0x0060),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "EXR Series",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
++ QUIRK_DATA_STANDARD_MIDI(0)
+ }
+ },
+ {
+ /* has ID 0x0066 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0064),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "EDIROL", */
+ /* .product_name = "PCR-1", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x0067 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0065),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "EDIROL", */
+ /* .product_name = "PCR-1", */
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0003
+ }
+@@ -1326,12 +1047,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x006e when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x006d),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "FANTOM-X",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1344,39 +1063,24 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * offers only 16-bit PCM at 44.1 kHz and no MIDI.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x0582, 0x0074),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UA-25",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_EDIROL_UAXX(0) },
++ { QUIRK_DATA_EDIROL_UAXX(1) },
++ { QUIRK_DATA_EDIROL_UAXX(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* has ID 0x0076 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0075),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "BOSS",
+ .product_name = "DR-880",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1385,12 +1089,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x007b when not in "Advanced Driver" mode */
+ USB_DEVICE_VENDOR_SPEC(0x0582, 0x007a),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ /* "RD" or "RD-700SX"? */
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+@@ -1399,12 +1101,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x0081 when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x0080),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Roland",
+ .product_name = "G-70",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1413,12 +1113,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* has ID 0x008c when not in "Advanced Driver" mode */
+ USB_DEVICE(0x0582, 0x008b),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "PC-50",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1430,56 +1128,31 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * is standard compliant, but has only 16-bit PCM and no MIDI.
+ */
+ USB_DEVICE(0x0582, 0x00a3),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UA-4FX",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_EDIROL_UAXX(0) },
++ { QUIRK_DATA_EDIROL_UAXX(1) },
++ { QUIRK_DATA_EDIROL_UAXX(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* Edirol M-16DX */
+ USB_DEVICE(0x0582, 0x00c4),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -1489,37 +1162,22 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * offers only 16-bit PCM at 44.1 kHz and no MIDI.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x0582, 0x00e6),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "EDIROL",
+ .product_name = "UA-25EX",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_EDIROL_UAXX
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_EDIROL_UAXX(0) },
++ { QUIRK_DATA_EDIROL_UAXX(1) },
++ { QUIRK_DATA_EDIROL_UAXX(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* Edirol UM-3G */
+ USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = 0,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(0) {
+ .out_cables = 0x0007,
+ .in_cables = 0x0007
+ }
+@@ -1528,45 +1186,29 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* BOSS ME-25 */
+ USB_DEVICE(0x0582, 0x0113),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* only 44.1 kHz works at the moment */
+ USB_DEVICE(0x0582, 0x0120),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Roland", */
+ /* .product_name = "OCTO-CAPTURE", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 10,
+ .iface = 0,
+@@ -1582,9 +1224,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 12,
+ .iface = 1,
+@@ -1600,40 +1240,26 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = 3,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 4,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ { QUIRK_DATA_IGNORE(3) },
++ { QUIRK_DATA_IGNORE(4) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* only 44.1 kHz works at the moment */
+ USB_DEVICE(0x0582, 0x012f),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Roland", */
+ /* .product_name = "QUAD-CAPTURE", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 4,
+ .iface = 0,
+@@ -1649,9 +1275,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 6,
+ .iface = 1,
+@@ -1667,54 +1291,32 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = 3,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 4,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ { QUIRK_DATA_IGNORE(3) },
++ { QUIRK_DATA_IGNORE(4) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+-{
+- USB_DEVICE(0x0582, 0x0159),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- /* .vendor_name = "Roland", */
+- /* .product_name = "UA-22", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++{
++ USB_DEVICE(0x0582, 0x0159),
++ QUIRK_DRIVER_INFO {
++ /* .vendor_name = "Roland", */
++ /* .product_name = "UA-22", */
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
+ {
+- .ifnum = 2,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(2) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -1722,19 +1324,19 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* UA101 and co are supported by another driver */
+ {
+ USB_DEVICE(0x0582, 0x0044), /* UA-1000 high speed */
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .ifnum = QUIRK_NODEV_INTERFACE
+ },
+ },
+ {
+ USB_DEVICE(0x0582, 0x007d), /* UA-101 high speed */
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .ifnum = QUIRK_NODEV_INTERFACE
+ },
+ },
+ {
+ USB_DEVICE(0x0582, 0x008d), /* UA-101 full speed */
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .ifnum = QUIRK_NODEV_INTERFACE
+ },
+ },
+@@ -1745,7 +1347,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ USB_DEVICE_ID_MATCH_INT_CLASS,
+ .idVendor = 0x0582,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUTODETECT
+ }
+@@ -1760,12 +1362,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * compliant USB MIDI ports for external MIDI and controls.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x06f8, 0xb000),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Hercules",
+ .product_name = "DJ Console (WE)",
+- .ifnum = 4,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(4) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1775,12 +1375,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* Midiman/M-Audio devices */
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x1002),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "MidiSport 2x2",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(QUIRK_ANY_INTERFACE) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+@@ -1788,12 +1386,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x1011),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "MidiSport 1x1",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(QUIRK_ANY_INTERFACE) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1801,12 +1397,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x1015),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "Keystation",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(QUIRK_ANY_INTERFACE) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1814,12 +1408,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x1021),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "MidiSport 4x4",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(QUIRK_ANY_INTERFACE) {
+ .out_cables = 0x000f,
+ .in_cables = 0x000f
+ }
+@@ -1832,12 +1424,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * Thanks to Olaf Giesbrecht <Olaf_Giesbrecht@yahoo.de>
+ */
+ USB_DEVICE_VER(0x0763, 0x1031, 0x0100, 0x0109),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "MidiSport 8x8",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(QUIRK_ANY_INTERFACE) {
+ .out_cables = 0x01ff,
+ .in_cables = 0x01ff
+ }
+@@ -1845,12 +1435,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x1033),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "MidiSport 8x8",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(QUIRK_ANY_INTERFACE) {
+ .out_cables = 0x01ff,
+ .in_cables = 0x01ff
+ }
+@@ -1858,12 +1446,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x1041),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "MidiSport 2x4",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(QUIRK_ANY_INTERFACE) {
+ .out_cables = 0x000f,
+ .in_cables = 0x0003
+ }
+@@ -1871,76 +1457,41 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x2001),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "Quattro",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
+ /*
+ * Interfaces 0-2 are "Windows-compatible", 16-bit only,
+ * and share endpoints with the other interfaces.
+ * Ignore them. The other interfaces can do 24 bits,
+ * but captured samples are big-endian (see usbaudio.c).
+ */
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 4,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 5,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 6,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 7,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 8,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 9,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
++ { QUIRK_DATA_IGNORE(2) },
++ { QUIRK_DATA_IGNORE(3) },
++ { QUIRK_DATA_STANDARD_AUDIO(4) },
++ { QUIRK_DATA_STANDARD_AUDIO(5) },
++ { QUIRK_DATA_IGNORE(6) },
++ { QUIRK_DATA_STANDARD_AUDIO(7) },
++ { QUIRK_DATA_STANDARD_AUDIO(8) },
++ {
++ QUIRK_DATA_MIDI_MIDIMAN(9) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x2003),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "AudioPhile",
+- .ifnum = 6,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(6) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1948,12 +1499,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x2008),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "Ozone",
+- .ifnum = 3,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(3) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+@@ -1961,93 +1510,45 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x200d),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "M-Audio",
+ .product_name = "OmniStudio",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 3,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 4,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 5,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 6,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 7,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 8,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 9,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
++ { QUIRK_DATA_IGNORE(2) },
++ { QUIRK_DATA_IGNORE(3) },
++ { QUIRK_DATA_STANDARD_AUDIO(4) },
++ { QUIRK_DATA_STANDARD_AUDIO(5) },
++ { QUIRK_DATA_IGNORE(6) },
++ { QUIRK_DATA_STANDARD_AUDIO(7) },
++ { QUIRK_DATA_STANDARD_AUDIO(8) },
++ {
++ QUIRK_DATA_MIDI_MIDIMAN(9) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x0763, 0x2019),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "M-Audio", */
+ /* .product_name = "Ozone Academic", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
+ {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(3) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -2057,21 +1558,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x2030),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "M-Audio", */
+ /* .product_name = "Fast Track C400", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(1) },
+ /* Playback */
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6,
+ .iface = 2,
+@@ -2095,9 +1589,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ /* Capture */
+ {
+- .ifnum = 3,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(3) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 3,
+@@ -2119,30 +1611,21 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .clock = 0x80,
+ }
+ },
+- /* MIDI */
+- {
+- .ifnum = -1 /* Interface = 4 */
+- }
++ /* MIDI: Interface = 4*/
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x2031),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "M-Audio", */
+ /* .product_name = "Fast Track C600", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(1) },
+ /* Playback */
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 2,
+@@ -2166,9 +1649,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ /* Capture */
+ {
+- .ifnum = 3,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(3) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6,
+ .iface = 3,
+@@ -2190,29 +1671,20 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .clock = 0x80,
+ }
+ },
+- /* MIDI */
+- {
+- .ifnum = -1 /* Interface = 4 */
+- }
++ /* MIDI: Interface = 4 */
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x2080),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "M-Audio", */
+ /* .product_name = "Fast Track Ultra", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(0) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 1,
+@@ -2234,9 +1706,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 2,
+@@ -2258,28 +1728,19 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ /* interface 3 (MIDI) is standard compliant */
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x2081),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "M-Audio", */
+ /* .product_name = "Fast Track Ultra 8R", */
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(0) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 1,
+@@ -2301,9 +1762,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 2,
+@@ -2325,9 +1784,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ /* interface 3 (MIDI) is standard compliant */
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -2335,21 +1792,19 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* Casio devices */
+ {
+ USB_DEVICE(0x07cf, 0x6801),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Casio",
+ .product_name = "PL-40R",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_YAMAHA
++ QUIRK_DATA_MIDI_YAMAHA(0)
+ }
+ },
+ {
+ /* this ID is used by several devices without a product ID */
+ USB_DEVICE(0x07cf, 0x6802),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Casio",
+ .product_name = "Keyboard",
+- .ifnum = 0,
+- .type = QUIRK_MIDI_YAMAHA
++ QUIRK_DATA_MIDI_YAMAHA(0)
+ }
+ },
+
+@@ -2362,23 +1817,13 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .idVendor = 0x07fd,
+ .idProduct = 0x0001,
+ .bDeviceSubClass = 2,
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "MOTU",
+ .product_name = "Fastlane",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_MIDI_RAW_BYTES
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_RAW_BYTES(0) },
++ { QUIRK_DATA_IGNORE(1) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -2386,12 +1831,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* Emagic devices */
+ {
+ USB_DEVICE(0x086a, 0x0001),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Emagic",
+ .product_name = "Unitor8",
+- .ifnum = 2,
+- .type = QUIRK_MIDI_EMAGIC,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_EMAGIC(2) {
+ .out_cables = 0x80ff,
+ .in_cables = 0x80ff
+ }
+@@ -2399,12 +1842,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE(0x086a, 0x0002),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Emagic",
+ /* .product_name = "AMT8", */
+- .ifnum = 2,
+- .type = QUIRK_MIDI_EMAGIC,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_EMAGIC(2) {
+ .out_cables = 0x80ff,
+ .in_cables = 0x80ff
+ }
+@@ -2412,12 +1853,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE(0x086a, 0x0003),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Emagic",
+ /* .product_name = "MT4", */
+- .ifnum = 2,
+- .type = QUIRK_MIDI_EMAGIC,
+- .data = & (const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_EMAGIC(2) {
+ .out_cables = 0x800f,
+ .in_cables = 0x8003
+ }
+@@ -2427,38 +1866,35 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* KORG devices */
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0944, 0x0200),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "KORG, Inc.",
+ /* .product_name = "PANDORA PX5D", */
+- .ifnum = 3,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE,
++ QUIRK_DATA_STANDARD_MIDI(3)
+ }
+ },
+
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0944, 0x0201),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "KORG, Inc.",
+ /* .product_name = "ToneLab ST", */
+- .ifnum = 3,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE,
++ QUIRK_DATA_STANDARD_MIDI(3)
+ }
+ },
+
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "KORG, Inc.",
+ /* .product_name = "ToneLab EX", */
+- .ifnum = 3,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE,
++ QUIRK_DATA_STANDARD_MIDI(3)
+ }
+ },
+
+ /* AKAI devices */
+ {
+ USB_DEVICE(0x09e8, 0x0062),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "AKAI",
+ .product_name = "MPD16",
+ .ifnum = 0,
+@@ -2469,21 +1905,11 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* Akai MPC Element */
+ USB_DEVICE(0x09e8, 0x0021),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_STANDARD_MIDI(1) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -2492,66 +1918,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* Steinberg MI2 */
+ USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
+ {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = &(const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(3) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* Steinberg MI4 */
+ USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = & (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
+ {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = &(const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(3) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -2559,34 +1955,31 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* TerraTec devices */
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "TerraTec",
+ .product_name = "PHASE 26",
+- .ifnum = 3,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
++ QUIRK_DATA_STANDARD_MIDI(3)
+ }
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0013),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "TerraTec",
+ .product_name = "PHASE 26",
+- .ifnum = 3,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
++ QUIRK_DATA_STANDARD_MIDI(3)
+ }
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0014),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "TerraTec",
+ .product_name = "PHASE 26",
+- .ifnum = 3,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
++ QUIRK_DATA_STANDARD_MIDI(3)
+ }
+ },
+ {
+ USB_DEVICE(0x0ccd, 0x0035),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Miditech",
+ .product_name = "Play'n Roll",
+ .ifnum = 0,
+@@ -2594,10 +1987,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+
++/* Stanton ScratchAmp */
++{ USB_DEVICE(0x103d, 0x0100) },
++{ USB_DEVICE(0x103d, 0x0101) },
++
+ /* Novation EMS devices */
+ {
+ USB_DEVICE_VENDOR_SPEC(0x1235, 0x0001),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Novation",
+ .product_name = "ReMOTE Audio/XStation",
+ .ifnum = 4,
+@@ -2606,7 +2003,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x1235, 0x0002),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Novation",
+ .product_name = "Speedio",
+ .ifnum = 3,
+@@ -2615,38 +2012,29 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ USB_DEVICE(0x1235, 0x000a),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Novation", */
+ /* .product_name = "Nocturn", */
+- .ifnum = 0,
+- .type = QUIRK_MIDI_RAW_BYTES
++ QUIRK_DATA_RAW_BYTES(0)
+ }
+ },
+ {
+ USB_DEVICE(0x1235, 0x000e),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ /* .vendor_name = "Novation", */
+ /* .product_name = "Launchpad", */
+- .ifnum = 0,
+- .type = QUIRK_MIDI_RAW_BYTES
++ QUIRK_DATA_RAW_BYTES(0)
+ }
+ },
+ {
+ USB_DEVICE(0x1235, 0x0010),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Focusrite",
+ .product_name = "Saffire 6 USB",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(0) },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 0,
+@@ -2673,9 +2061,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 2,
+ .iface = 0,
+@@ -2697,28 +2083,19 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = 1,
+- .type = QUIRK_MIDI_RAW_BYTES
+- },
+- {
+- .ifnum = -1
+- }
++ { QUIRK_DATA_RAW_BYTES(1) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE(0x1235, 0x0018),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Novation",
+ .product_name = "Twitch",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = & (const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 0,
+@@ -2737,19 +2114,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = 1,
+- .type = QUIRK_MIDI_RAW_BYTES
+- },
+- {
+- .ifnum = -1
+- }
++ { QUIRK_DATA_RAW_BYTES(1) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ USB_DEVICE_VENDOR_SPEC(0x1235, 0x4661),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Novation",
+ .product_name = "ReMOTE25",
+ .ifnum = 0,
+@@ -2761,25 +2133,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* VirusTI Desktop */
+ USB_DEVICE_VENDOR_SPEC(0x133e, 0x0815),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 3,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = &(const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(3) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+ },
+- {
+- .ifnum = 4,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ { QUIRK_DATA_IGNORE(4) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -2807,7 +2170,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* QinHeng devices */
+ {
+ USB_DEVICE(0x1a86, 0x752d),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "QinHeng",
+ .product_name = "CH345",
+ .ifnum = 1,
+@@ -2821,7 +2184,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* Miditech devices */
+ {
+ USB_DEVICE(0x4752, 0x0011),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Miditech",
+ .product_name = "Midistart-2",
+ .ifnum = 0,
+@@ -2833,7 +2196,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* this ID used by both Miditech MidiStudio-2 and CME UF-x */
+ USB_DEVICE(0x7104, 0x2202),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .ifnum = 0,
+ .type = QUIRK_MIDI_CME
+ }
+@@ -2843,20 +2206,13 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ /* Thanks to Clemens Ladisch <clemens@ladisch.de> */
+ USB_DEVICE(0x0dba, 0x1000),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Digidesign",
+ .product_name = "MBox",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]){
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DATA_COMPOSITE{
++ { QUIRK_DATA_STANDARD_MIXER(0) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 2,
+ .iface = 1,
+@@ -2877,9 +2233,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 2,
+ .iface = 1,
+@@ -2900,9 +2254,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -2910,24 +2262,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* DIGIDESIGN MBOX 2 */
+ {
+ USB_DEVICE(0x0dba, 0x3000),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Digidesign",
+ .product_name = "Mbox 2",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 2,
+ .iface = 2,
+@@ -2945,15 +2287,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
++ { QUIRK_DATA_IGNORE(3) },
+ {
+- .ifnum = 3,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 4,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
+- .formats = SNDRV_PCM_FMTBIT_S24_3BE,
++ QUIRK_DATA_AUDIOFORMAT(4) {
++ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 2,
+ .iface = 4,
+ .altsetting = 2,
+@@ -2970,14 +2307,9 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
++ { QUIRK_DATA_IGNORE(5) },
+ {
+- .ifnum = 5,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 6,
+- .type = QUIRK_MIDI_MIDIMAN,
+- .data = &(const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_MIDIMAN(6) {
+ .out_ep = 0x02,
+ .out_cables = 0x0001,
+ .in_ep = 0x81,
+@@ -2985,33 +2317,21 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ /* DIGIDESIGN MBOX 3 */
+ {
+ USB_DEVICE(0x0dba, 0x5000),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Digidesign",
+ .product_name = "Mbox 3",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_IGNORE(1) },
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 2,
+@@ -3031,9 +2351,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 3,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(3) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 3,
+@@ -3054,36 +2372,25 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 4,
+- .type = QUIRK_MIDI_FIXED_ENDPOINT,
+- .data = &(const struct snd_usb_midi_endpoint_info) {
++ QUIRK_DATA_MIDI_FIXED_ENDPOINT(4) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ {
+ /* Tascam US122 MKII - playback-only support */
+ USB_DEVICE_VENDOR_SPEC(0x0644, 0x8021),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "TASCAM",
+ .product_name = "US122 MKII",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 2,
+ .iface = 1,
+@@ -3104,9 +2411,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3114,20 +2419,13 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ /* Denon DN-X1600 */
+ {
+ USB_AUDIO_DEVICE(0x154e, 0x500e),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Denon",
+ .product_name = "DN-X1600",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]){
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE,
+- },
++ QUIRK_DATA_COMPOSITE{
++ { QUIRK_DATA_IGNORE(0) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 1,
+@@ -3148,9 +2446,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 2,
+@@ -3170,13 +2466,8 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = 4,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE,
+- },
+- {
+- .ifnum = -1
+- }
++ { QUIRK_DATA_STANDARD_MIDI(4) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3185,17 +2476,13 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ USB_DEVICE(0x045e, 0x0283),
+ .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Microsoft",
+ .product_name = "XboxLive Headset/Xbox Communicator",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
++ QUIRK_DATA_COMPOSITE {
+ {
+ /* playback */
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 1,
+ .iface = 0,
+@@ -3211,9 +2498,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ {
+ /* capture */
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 1,
+ .iface = 1,
+@@ -3227,9 +2512,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_max = 16000
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3238,18 +2521,11 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ {
+ USB_DEVICE(0x200c, 0x100b),
+ .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(0) },
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 1,
+@@ -3268,9 +2544,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3283,28 +2557,12 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * enabled in create_standard_audio_quirk().
+ */
+ USB_DEVICE(0x1686, 0x00dd),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- /* Playback */
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+- },
+- {
+- /* Capture */
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+- },
+- {
+- /* Midi */
+- .ifnum = 3,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- },
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(1) }, /* Playback */
++ { QUIRK_DATA_STANDARD_AUDIO(2) }, /* Capture */
++ { QUIRK_DATA_STANDARD_MIDI(3) }, /* Midi */
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3318,18 +2576,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING,
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_MIDI_STANDARD_INTERFACE
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_STANDARD_MIDI(QUIRK_ANY_INTERFACE)
+ }
+ },
+
+ /* Rane SL-1 */
+ {
+ USB_DEVICE(0x13e5, 0x0001),
+- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_STANDARD_AUDIO(QUIRK_ANY_INTERFACE)
+ }
+ },
+
+@@ -3345,24 +2601,13 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * and only the 48 kHz sample rate works for the playback interface.
+ */
+ USB_DEVICE(0x0a12, 0x1243),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
+- /* Capture */
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE,
+- },
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(0) },
++ { QUIRK_DATA_IGNORE(1) }, /* Capture */
+ /* Playback */
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 2,
+@@ -3381,9 +2626,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = -1
+- },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3396,19 +2639,12 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * even on windows.
+ */
+ USB_DEVICE(0x19b5, 0x0021),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(0) },
+ /* Playback */
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 1,
+@@ -3427,29 +2663,20 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = -1
+- },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+ /* MOTU Microbook II */
+ {
+ USB_DEVICE_VENDOR_SPEC(0x07fd, 0x0004),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "MOTU",
+ .product_name = "MicroBookII",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(0) },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 6,
+ .iface = 0,
+@@ -3470,9 +2697,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 8,
+ .iface = 0,
+@@ -3493,9 +2718,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3507,14 +2730,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * The feedback for the output is the input.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0023),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 12,
+ .iface = 0,
+@@ -3531,9 +2750,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 10,
+ .iface = 0,
+@@ -3551,9 +2768,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3596,14 +2811,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * but not for DVS (Digital Vinyl Systems) like in Mixxx.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0017),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8, // outputs
+ .iface = 0,
+@@ -3620,9 +2831,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8, // inputs
+ .iface = 0,
+@@ -3640,9 +2849,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 48000 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3653,14 +2860,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * The feedback for the output is the dummy input.
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 0,
+@@ -3677,9 +2880,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 2,
+ .iface = 0,
+@@ -3697,9 +2898,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3710,14 +2909,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * PCM is 6 channels out & 4 channels in @ 44.1 fixed
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000d),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6, //Master, Headphones & Booth
+ .iface = 0,
+@@ -3734,9 +2929,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4, //2x RCA inputs (CH1 & CH2)
+ .iface = 0,
+@@ -3754,9 +2947,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3768,14 +2959,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * The Feedback for the output is the input
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x001e),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 0,
+@@ -3792,9 +2979,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6,
+ .iface = 0,
+@@ -3812,9 +2997,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3825,14 +3008,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * 10 channels playback & 12 channels capture @ 44.1/48/96kHz S24LE
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000a),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 10,
+ .iface = 0,
+@@ -3853,9 +3032,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 12,
+ .iface = 0,
+@@ -3877,9 +3054,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3891,14 +3066,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * The Feedback for the output is the input
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0029),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6,
+ .iface = 0,
+@@ -3915,9 +3086,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6,
+ .iface = 0,
+@@ -3935,9 +3104,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 44100 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -3955,20 +3122,13 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ */
+ {
+ USB_AUDIO_DEVICE(0x534d, 0x0021),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "MacroSilicon",
+ .product_name = "MS210x",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(2) },
+ {
+- .ifnum = 3,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(3) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 3,
+@@ -3983,9 +3143,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_max = 48000,
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4003,20 +3161,13 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ */
+ {
+ USB_AUDIO_DEVICE(0x534d, 0x2109),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "MacroSilicon",
+ .product_name = "MS2109",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_MIXER,
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_MIXER(2) },
+ {
+- .ifnum = 3,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(3) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 3,
+@@ -4031,9 +3182,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_max = 48000,
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4043,14 +3192,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * 8 channels playback & 8 channels capture @ 44.1/48/96kHz S24LE
+ */
+ USB_DEVICE_VENDOR_SPEC(0x08e4, 0x017f),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 0,
+@@ -4069,9 +3214,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 0,
+@@ -4091,9 +3234,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4103,14 +3244,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * 10 channels playback & 12 channels capture @ 48kHz S24LE
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x001b),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 10,
+ .iface = 0,
+@@ -4129,9 +3266,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 12,
+ .iface = 0,
+@@ -4149,9 +3284,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 48000 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4163,14 +3296,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * Capture on EP 0x86
+ */
+ USB_DEVICE_VENDOR_SPEC(0x08e4, 0x0163),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 0,
+@@ -4190,9 +3319,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8,
+ .iface = 0,
+@@ -4212,9 +3339,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4225,14 +3350,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * and 8 channels in @ 48 fixed (endpoint 0x82).
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0013),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8, // outputs
+ .iface = 0,
+@@ -4249,9 +3370,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(0) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8, // inputs
+ .iface = 0,
+@@ -4269,9 +3388,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .rate_table = (unsigned int[]) { 48000 }
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4282,28 +3399,15 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ */
+ USB_DEVICE(0x1395, 0x0300),
+ .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
++ QUIRK_DRIVER_INFO {
++ QUIRK_DATA_COMPOSITE {
+ // Communication
+- {
+- .ifnum = 3,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ { QUIRK_DATA_STANDARD_AUDIO(3) },
+ // Recording
+- {
+- .ifnum = 4,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ { QUIRK_DATA_STANDARD_AUDIO(4) },
+ // Main
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
+- {
+- .ifnum = -1
+- }
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4312,21 +3416,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * Fiero SC-01 (firmware v1.0.0 @ 48 kHz)
+ */
+ USB_DEVICE(0x2b53, 0x0023),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Fiero",
+ .product_name = "SC-01",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
+ /* Playback */
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 2,
+ .fmt_bits = 24,
+@@ -4346,9 +3443,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ /* Capture */
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 2,
+ .fmt_bits = 24,
+@@ -4367,9 +3462,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .clock = 0x29
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4378,21 +3471,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * Fiero SC-01 (firmware v1.0.0 @ 96 kHz)
+ */
+ USB_DEVICE(0x2b53, 0x0024),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Fiero",
+ .product_name = "SC-01",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
+ /* Playback */
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 2,
+ .fmt_bits = 24,
+@@ -4412,9 +3498,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ /* Capture */
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 2,
+ .fmt_bits = 24,
+@@ -4433,9 +3517,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .clock = 0x29
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4444,21 +3526,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * Fiero SC-01 (firmware v1.1.0)
+ */
+ USB_DEVICE(0x2b53, 0x0031),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Fiero",
+ .product_name = "SC-01",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = &(const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE
+- },
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_STANDARD_AUDIO(0) },
+ /* Playback */
+ {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(1) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 2,
+ .fmt_bits = 24,
+@@ -4479,9 +3554,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ },
+ /* Capture */
+ {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+- .data = &(const struct audioformat) {
++ QUIRK_DATA_AUDIOFORMAT(2) {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels = 2,
+ .fmt_bits = 24,
+@@ -4501,9 +3574,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .clock = 0x29
+ }
+ },
+- {
+- .ifnum = -1
+- }
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+@@ -4512,27 +3583,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ * For the standard mode, Mythware XA001AU has ID ffad:a001
+ */
+ USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001),
+- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ QUIRK_DRIVER_INFO {
+ .vendor_name = "Mythware",
+ .product_name = "XA001AU",
+- .ifnum = QUIRK_ANY_INTERFACE,
+- .type = QUIRK_COMPOSITE,
+- .data = (const struct snd_usb_audio_quirk[]) {
+- {
+- .ifnum = 0,
+- .type = QUIRK_IGNORE_INTERFACE,
+- },
+- {
+- .ifnum = 1,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+- },
+- {
+- .ifnum = 2,
+- .type = QUIRK_AUDIO_STANDARD_INTERFACE,
+- },
+- {
+- .ifnum = -1
+- }
++ QUIRK_DATA_COMPOSITE {
++ { QUIRK_DATA_IGNORE(0) },
++ { QUIRK_DATA_STANDARD_AUDIO(1) },
++ { QUIRK_DATA_STANDARD_AUDIO(2) },
++ QUIRK_COMPOSITE_END
+ }
+ }
+ },
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 4e64842245e190..1753746430da5f 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1387,7 +1387,7 @@ static int snd_usb_motu_microbookii_boot_quirk(struct usb_device *dev)
+
+ static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev)
+ {
+- msleep(2000);
++ msleep(4000);
+
+ return 0;
+ }
+@@ -1630,7 +1630,7 @@ int snd_usb_apply_boot_quirk_once(struct usb_device *dev,
+ unsigned int id)
+ {
+ switch (id) {
+- case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
++ case USB_ID(0x07fd, 0x0008): /* MOTU M Series, 1st hardware version */
+ return snd_usb_motu_m_series_boot_quirk(dev);
+ }
+
+@@ -2031,10 +2031,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x0499, 0x3108, /* Yamaha YIT-W12TX */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x04d8, 0xfeea, /* Benchmark DAC1 Pre */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x04e8, 0xa051, /* Samsung USBC Headset (AKG) */
+ QUIRK_FLAG_SKIP_CLOCK_SELECTOR | QUIRK_FLAG_CTL_MSG_DELAY_5M),
++ DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
++ QUIRK_FLAG_IFACE_SKIP_CLOSE),
+ DEVICE_FLG(0x054c, 0x0b8c, /* Sony WALKMAN NW-A45 DAC */
+ QUIRK_FLAG_SET_IFACE_FIRST),
+ DEVICE_FLG(0x0556, 0x0014, /* Phoenix Audio TMX320VC */
+@@ -2073,14 +2077,24 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x0763, 0x2031, /* M-Audio Fast Track C600 */
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x07fd, 0x000b, /* MOTU M Series 2nd hardware revision */
++ QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x08bb, 0x2702, /* LineX FM Transmitter */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x0951, 0x16ad, /* Kingston HyperX */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
++ DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
++ QUIRK_FLAG_FIXED_RATE),
++ DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
++ QUIRK_FLAG_FIXED_RATE),
+ DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */
+ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
++ DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */
+@@ -2109,10 +2123,18 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DISABLE_AUTOSUSPEND),
+ DEVICE_FLG(0x17aa, 0x104d, /* Lenovo ThinkStation P620 Internal Speaker + Front Headset */
+ QUIRK_FLAG_DISABLE_AUTOSUSPEND),
++ DEVICE_FLG(0x1852, 0x5062, /* Luxman D-08u */
++ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ DEVICE_FLG(0x1852, 0x5065, /* Luxman DA-06 */
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ DEVICE_FLG(0x1901, 0x0191, /* GE B850V3 CP2114 audio interface */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x1bcf, 0x2281, /* HD Webcam */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */
+ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x2040, 0x7201, /* Hauppauge HVR-950Q-MXL */
+@@ -2155,6 +2177,16 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
++ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
++ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
++ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x2d95, 0x8011, /* VIVO USB-C HEADSET */
++ QUIRK_FLAG_CTL_MSG_DELAY_1M),
++ DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */
++ QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
+@@ -2163,22 +2195,6 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
+ QUIRK_FLAG_ALIGN_TRANSFER),
+- DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+- QUIRK_FLAG_GET_SAMPLE_RATE),
+- DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
+- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+- DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
+- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+- DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
+- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+- DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
+- QUIRK_FLAG_IFACE_SKIP_CLOSE),
+- DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+- QUIRK_FLAG_FIXED_RATE),
+- DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+- QUIRK_FLAG_FIXED_RATE),
+- DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
+- QUIRK_FLAG_GET_SAMPLE_RATE),
+
+ /* Vendor matches */
+ VENDOR_FLG(0x045e, /* MS Lifecam */
+@@ -2220,6 +2236,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2ab6, /* T+A devices */
+ QUIRK_FLAG_DSD_RAW),
++ VENDOR_FLG(0x2afd, /* McIntosh Laboratory, Inc. */
++ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x2d87, /* Cayin device */
+ QUIRK_FLAG_DSD_RAW),
+ VENDOR_FLG(0x3336, /* HEM devices */
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 3d4add94e367d6..e14c725acebf2c 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -244,8 +244,8 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ SNDRV_CHMAP_FR, /* right front */
+ SNDRV_CHMAP_FC, /* center front */
+ SNDRV_CHMAP_LFE, /* LFE */
+- SNDRV_CHMAP_SL, /* left surround */
+- SNDRV_CHMAP_SR, /* right surround */
++ SNDRV_CHMAP_RL, /* left surround */
++ SNDRV_CHMAP_RR, /* right surround */
+ SNDRV_CHMAP_FLC, /* left of center */
+ SNDRV_CHMAP_FRC, /* right of center */
+ SNDRV_CHMAP_RC, /* surround */
+@@ -300,9 +300,12 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ c = 0;
+
+ if (bits) {
+- for (; bits && *maps; maps++, bits >>= 1)
++ for (; bits && *maps; maps++, bits >>= 1) {
+ if (bits & 1)
+ chmap->map[c++] = *maps;
++ if (c == chmap->channels)
++ break;
++ }
+ } else {
+ /* If we're missing wChannelConfig, then guess something
+ to make sure the channel map is not skipped entirely */
+diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
+index 5f6f84837a4903..329d41f8c92378 100644
+--- a/tools/arch/arm64/include/asm/cputype.h
++++ b/tools/arch/arm64/include/asm/cputype.h
+@@ -84,6 +84,9 @@
+ #define ARM_CPU_PART_CORTEX_X2 0xD48
+ #define ARM_CPU_PART_NEOVERSE_N2 0xD49
+ #define ARM_CPU_PART_CORTEX_A78C 0xD4B
++#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
++#define ARM_CPU_PART_CORTEX_X4 0xD82
++#define ARM_CPU_PART_NEOVERSE_V3 0xD84
+
+ #define APM_CPU_PART_POTENZA 0x000
+
+@@ -153,6 +156,9 @@
+ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+ #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+ #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
++#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
++#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
++#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
+index 87245c584784ec..8d94739d75c67c 100644
+--- a/tools/arch/parisc/include/uapi/asm/errno.h
++++ b/tools/arch/parisc/include/uapi/asm/errno.h
+@@ -75,7 +75,6 @@
+
+ /* We now return you to your regularly scheduled HPUX. */
+
+-#define ENOSYM 215 /* symbol does not exist in executable */
+ #define ENOTSOCK 216 /* Socket operation on non-socket */
+ #define EDESTADDRREQ 217 /* Destination address required */
+ #define EMSGSIZE 218 /* Message too long */
+@@ -101,7 +100,6 @@
+ #define ETIMEDOUT 238 /* Connection timed out */
+ #define ECONNREFUSED 239 /* Connection refused */
+ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+-#define EREMOTERELEASE 240 /* Remote peer released connection */
+ #define EHOSTDOWN 241 /* Host is down */
+ #define EHOSTUNREACH 242 /* No route to host */
+
+diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
+index 798e60b5454b7e..845a4023ba44e2 100644
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -219,7 +219,7 @@
+ #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+-#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
++#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
+ #define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
+ #define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
+ #define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
+diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h
+index 11ff975242cac7..e2ff22b379a44c 100644
+--- a/tools/arch/x86/include/asm/rmwcc.h
++++ b/tools/arch/x86/include/asm/rmwcc.h
+@@ -4,7 +4,7 @@
+
+ #define __GEN_RMWcc(fullop, var, cc, ...) \
+ do { \
+- asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
++ asm goto (fullop "; j" cc " %l[cc_label]" \
+ : : "m" (var), ## __VA_ARGS__ \
+ : "memory" : cc_label); \
+ return 0; \
+diff --git a/tools/arch/x86/intel_sdsi/intel_sdsi.c b/tools/arch/x86/intel_sdsi/intel_sdsi.c
+index 2cd92761f1714c..ba2a6b6645ae84 100644
+--- a/tools/arch/x86/intel_sdsi/intel_sdsi.c
++++ b/tools/arch/x86/intel_sdsi/intel_sdsi.c
+@@ -43,7 +43,6 @@
+ #define METER_CERT_MAX_SIZE 4096
+ #define STATE_MAX_NUM_LICENSES 16
+ #define STATE_MAX_NUM_IN_BUNDLE (uint32_t)8
+-#define METER_MAX_NUM_BUNDLES 8
+
+ #define __round_mask(x, y) ((__typeof__(x))((y) - 1))
+ #define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
+@@ -154,11 +153,12 @@ struct bundle_encoding {
+ };
+
+ struct meter_certificate {
+- uint32_t block_signature;
+- uint32_t counter_unit;
++ uint32_t signature;
++ uint32_t version;
+ uint64_t ppin;
++ uint32_t counter_unit;
+ uint32_t bundle_length;
+- uint32_t reserved;
++ uint64_t reserved;
+ uint32_t mmrc_encoding;
+ uint32_t mmrc_counter;
+ };
+@@ -167,6 +167,11 @@ struct bundle_encoding_counter {
+ uint32_t encoding;
+ uint32_t counter;
+ };
++#define METER_BUNDLE_SIZE sizeof(struct bundle_encoding_counter)
++#define BUNDLE_COUNT(length) ((length) / METER_BUNDLE_SIZE)
++#define METER_MAX_NUM_BUNDLES \
++ ((METER_CERT_MAX_SIZE - sizeof(struct meter_certificate)) / \
++ sizeof(struct bundle_encoding_counter))
+
+ struct sdsi_dev {
+ struct sdsi_regs regs;
+@@ -334,6 +339,7 @@ static int sdsi_meter_cert_show(struct sdsi_dev *s)
+ uint32_t count = 0;
+ FILE *cert_ptr;
+ int ret, size;
++ char name[4];
+
+ ret = sdsi_update_registers(s);
+ if (ret)
+@@ -375,32 +381,40 @@ static int sdsi_meter_cert_show(struct sdsi_dev *s)
+ printf("\n");
+ printf("Meter certificate for device %s\n", s->dev_name);
+ printf("\n");
+- printf("Block Signature: 0x%x\n", mc->block_signature);
+- printf("Count Unit: %dms\n", mc->counter_unit);
+- printf("PPIN: 0x%lx\n", mc->ppin);
+- printf("Feature Bundle Length: %d\n", mc->bundle_length);
+- printf("MMRC encoding: %d\n", mc->mmrc_encoding);
+- printf("MMRC counter: %d\n", mc->mmrc_counter);
+- if (mc->bundle_length % 8) {
++
++ get_feature(mc->signature, name);
++ printf("Signature: %.4s\n", name);
++
++ printf("Version: %d\n", mc->version);
++ printf("Count Unit: %dms\n", mc->counter_unit);
++ printf("PPIN: 0x%lx\n", mc->ppin);
++ printf("Feature Bundle Length: %d\n", mc->bundle_length);
++
++ get_feature(mc->mmrc_encoding, name);
++ printf("MMRC encoding: %.4s\n", name);
++
++ printf("MMRC counter: %d\n", mc->mmrc_counter);
++ if (mc->bundle_length % METER_BUNDLE_SIZE) {
+ fprintf(stderr, "Invalid bundle length\n");
+ return -1;
+ }
+
+- if (mc->bundle_length > METER_MAX_NUM_BUNDLES * 8) {
+- fprintf(stderr, "More than %d bundles: %d\n",
+- METER_MAX_NUM_BUNDLES, mc->bundle_length / 8);
++ if (mc->bundle_length > METER_MAX_NUM_BUNDLES * METER_BUNDLE_SIZE) {
++ fprintf(stderr, "More than %ld bundles: actual %ld\n",
++ METER_MAX_NUM_BUNDLES, BUNDLE_COUNT(mc->bundle_length));
+ return -1;
+ }
+
+- bec = (void *)(mc) + sizeof(mc);
++ bec = (struct bundle_encoding_counter *)(mc + 1);
+
+- printf("Number of Feature Counters: %d\n", mc->bundle_length / 8);
+- while (count++ < mc->bundle_length / 8) {
++ printf("Number of Feature Counters: %ld\n", BUNDLE_COUNT(mc->bundle_length));
++ while (count < BUNDLE_COUNT(mc->bundle_length)) {
+ char feature[5];
+
+ feature[4] = '\0';
+ get_feature(bec[count].encoding, feature);
+ printf(" %s: %d\n", feature, bec[count].counter);
++ ++count;
+ }
+
+ return 0;
+diff --git a/tools/arch/x86/kcpuid/kcpuid.c b/tools/arch/x86/kcpuid/kcpuid.c
+index 24b7d017ec2c11..b7965dfff33a9a 100644
+--- a/tools/arch/x86/kcpuid/kcpuid.c
++++ b/tools/arch/x86/kcpuid/kcpuid.c
+@@ -7,7 +7,8 @@
+ #include <string.h>
+ #include <getopt.h>
+
+-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
++#define min(a, b) (((a) < (b)) ? (a) : (b))
+
+ typedef unsigned int u32;
+ typedef unsigned long long u64;
+@@ -207,12 +208,9 @@ static void raw_dump_range(struct cpuid_range *range)
+ #define MAX_SUBLEAF_NUM 32
+ struct cpuid_range *setup_cpuid_range(u32 input_eax)
+ {
+- u32 max_func, idx_func;
+- int subleaf;
++ u32 max_func, idx_func, subleaf, max_subleaf;
++ u32 eax, ebx, ecx, edx, f = input_eax;
+ struct cpuid_range *range;
+- u32 eax, ebx, ecx, edx;
+- u32 f = input_eax;
+- int max_subleaf;
+ bool allzero;
+
+ eax = input_eax;
+@@ -258,7 +256,7 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax)
+ * others have to be tried (0xf)
+ */
+ if (f == 0x7 || f == 0x14 || f == 0x17 || f == 0x18)
+- max_subleaf = (eax & 0xff) + 1;
++ max_subleaf = min((eax & 0xff) + 1, max_subleaf);
+
+ if (f == 0xb)
+ max_subleaf = 2;
+diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
+index 5168ee0360b246..d1ccd06c531278 100644
+--- a/tools/arch/x86/lib/x86-opcode-map.txt
++++ b/tools/arch/x86/lib/x86-opcode-map.txt
+@@ -148,7 +148,7 @@ AVXcode:
+ 65: SEG=GS (Prefix)
+ 66: Operand-Size (Prefix)
+ 67: Address-Size (Prefix)
+-68: PUSH Iz (d64)
++68: PUSH Iz
+ 69: IMUL Gv,Ev,Iz
+ 6a: PUSH Ib (d64)
+ 6b: IMUL Gv,Ev,Ib
+@@ -698,10 +698,10 @@ AVXcode: 2
+ 4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+ 4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+ 4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+-50: vpdpbusd Vx,Hx,Wx (66),(ev)
+-51: vpdpbusds Vx,Hx,Wx (66),(ev)
+-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
++50: vpdpbusd Vx,Hx,Wx (66)
++51: vpdpbusds Vx,Hx,Wx (66)
++52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
++53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+ 54: vpopcntb/w Vx,Wx (66),(ev)
+ 55: vpopcntd/q Vx,Wx (66),(ev)
+ 58: vpbroadcastd Vx,Wx (66),(v)
+diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
+index cc6e6aae2447da..9b75639434b815 100644
+--- a/tools/bpf/bpftool/common.c
++++ b/tools/bpf/bpftool/common.c
+@@ -244,29 +244,101 @@ int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
+ return fd;
+ }
+
+-int mount_bpffs_for_pin(const char *name, bool is_dir)
++int create_and_mount_bpffs_dir(const char *dir_name)
+ {
+ char err_str[ERR_MAX_LEN];
+- char *file;
+- char *dir;
++ bool dir_exists;
+ int err = 0;
+
+- if (is_dir && is_bpffs(name))
++ if (is_bpffs(dir_name))
+ return err;
+
+- file = malloc(strlen(name) + 1);
+- if (!file) {
++ dir_exists = access(dir_name, F_OK) == 0;
++
++ if (!dir_exists) {
++ char *temp_name;
++ char *parent_name;
++
++ temp_name = strdup(dir_name);
++ if (!temp_name) {
++ p_err("mem alloc failed");
++ return -1;
++ }
++
++ parent_name = dirname(temp_name);
++
++ if (is_bpffs(parent_name)) {
++ /* nothing to do if already mounted */
++ free(temp_name);
++ return err;
++ }
++
++ if (access(parent_name, F_OK) == -1) {
++ p_err("can't create dir '%s' to pin BPF object: parent dir '%s' doesn't exist",
++ dir_name, parent_name);
++ free(temp_name);
++ return -1;
++ }
++
++ free(temp_name);
++ }
++
++ if (block_mount) {
++ p_err("no BPF file system found, not mounting it due to --nomount option");
++ return -1;
++ }
++
++ if (!dir_exists) {
++ err = mkdir(dir_name, S_IRWXU);
++ if (err) {
++ p_err("failed to create dir '%s': %s", dir_name, strerror(errno));
++ return err;
++ }
++ }
++
++ err = mnt_fs(dir_name, "bpf", err_str, ERR_MAX_LEN);
++ if (err) {
++ err_str[ERR_MAX_LEN - 1] = '\0';
++ p_err("can't mount BPF file system on given dir '%s': %s",
++ dir_name, err_str);
++
++ if (!dir_exists)
++ rmdir(dir_name);
++ }
++
++ return err;
++}
++
++int mount_bpffs_for_file(const char *file_name)
++{
++ char err_str[ERR_MAX_LEN];
++ char *temp_name;
++ char *dir;
++ int err = 0;
++
++ if (access(file_name, F_OK) != -1) {
++ p_err("can't pin BPF object: path '%s' already exists", file_name);
++ return -1;
++ }
++
++ temp_name = strdup(file_name);
++ if (!temp_name) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+
+- strcpy(file, name);
+- dir = dirname(file);
++ dir = dirname(temp_name);
+
+ if (is_bpffs(dir))
+ /* nothing to do if already mounted */
+ goto out_free;
+
++ if (access(dir, F_OK) == -1) {
++ p_err("can't pin BPF object: dir '%s' doesn't exist", dir);
++ err = -1;
++ goto out_free;
++ }
++
+ if (block_mount) {
+ p_err("no BPF file system found, not mounting it due to --nomount option");
+ err = -1;
+@@ -276,12 +348,12 @@ int mount_bpffs_for_pin(const char *name, bool is_dir)
+ err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
+ if (err) {
+ err_str[ERR_MAX_LEN - 1] = '\0';
+- p_err("can't mount BPF file system to pin the object (%s): %s",
+- name, err_str);
++ p_err("can't mount BPF file system to pin the object '%s': %s",
++ file_name, err_str);
+ }
+
+ out_free:
+- free(file);
++ free(temp_name);
+ return err;
+ }
+
+@@ -289,7 +361,7 @@ int do_pin_fd(int fd, const char *name)
+ {
+ int err;
+
+- err = mount_bpffs_for_pin(name, false);
++ err = mount_bpffs_for_file(name);
+ if (err)
+ return err;
+
+@@ -338,7 +410,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
+ {
+ const char *prog_name = prog_info->name;
+ const struct btf_type *func_type;
+- const struct bpf_func_info finfo = {};
++ struct bpf_func_info finfo = {};
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ struct btf *prog_btf = NULL;
+diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
+index 2883660d6b6728..882bf8e6e70e44 100644
+--- a/tools/bpf/bpftool/gen.c
++++ b/tools/bpf/bpftool/gen.c
+@@ -1209,7 +1209,7 @@ static int do_skeleton(int argc, char **argv)
+ codegen("\
+ \n\
+ \n\
+- s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
++ s->data = %1$s__elf_bytes(&s->data_sz); \n\
+ \n\
+ obj->skeleton = s; \n\
+ return 0; \n\
+@@ -1218,12 +1218,12 @@ static int do_skeleton(int argc, char **argv)
+ return err; \n\
+ } \n\
+ \n\
+- static inline const void *%2$s__elf_bytes(size_t *sz) \n\
++ static inline const void *%1$s__elf_bytes(size_t *sz) \n\
+ { \n\
+- *sz = %1$d; \n\
+- return (const void *)\"\\ \n\
+- "
+- , file_sz, obj_name);
++ static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
++ ",
++ obj_name
++ );
+
+ /* embed contents of BPF object file */
+ print_hex(obj_data, file_sz);
+@@ -1231,6 +1231,9 @@ static int do_skeleton(int argc, char **argv)
+ codegen("\
+ \n\
+ \"; \n\
++ \n\
++ *sz = sizeof(data) - 1; \n\
++ return (const void *)data; \n\
+ } \n\
+ \n\
+ #ifdef __cplusplus \n\
+diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
+index 6b0e5202ca7a96..5c39c2ed36a2be 100644
+--- a/tools/bpf/bpftool/iter.c
++++ b/tools/bpf/bpftool/iter.c
+@@ -76,7 +76,7 @@ static int do_pin(int argc, char **argv)
+ goto close_obj;
+ }
+
+- err = mount_bpffs_for_pin(path, false);
++ err = mount_bpffs_for_file(path);
+ if (err)
+ goto close_link;
+
+diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
+index b8bb08d10dec93..9eb764fe4cc8bd 100644
+--- a/tools/bpf/bpftool/main.h
++++ b/tools/bpf/bpftool/main.h
+@@ -142,7 +142,8 @@ const char *get_fd_type_name(enum bpf_obj_type type);
+ char *get_fdinfo(int fd, const char *key);
+ int open_obj_pinned(const char *path, bool quiet);
+ int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
+-int mount_bpffs_for_pin(const char *name, bool is_dir);
++int mount_bpffs_for_file(const char *file_name);
++int create_and_mount_bpffs_dir(const char *dir_name);
+ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
+ int do_pin_fd(int fd, const char *name);
+
+diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
+index 66a8ce8ae0127a..28e9417a5c2e35 100644
+--- a/tools/bpf/bpftool/net.c
++++ b/tools/bpf/bpftool/net.c
+@@ -480,9 +480,9 @@ static void __show_dev_tc_bpf(const struct ip_devname_ifindex *dev,
+ if (prog_flags[i] || json_output) {
+ NET_START_ARRAY("prog_flags", "%s ");
+ for (j = 0; prog_flags[i] && j < 32; j++) {
+- if (!(prog_flags[i] & (1 << j)))
++ if (!(prog_flags[i] & (1U << j)))
+ continue;
+- NET_DUMP_UINT_ONLY(1 << j);
++ NET_DUMP_UINT_ONLY(1U << j);
+ }
+ NET_END_ARRAY("");
+ }
+@@ -491,9 +491,9 @@ static void __show_dev_tc_bpf(const struct ip_devname_ifindex *dev,
+ if (link_flags[i] || json_output) {
+ NET_START_ARRAY("link_flags", "%s ");
+ for (j = 0; link_flags[i] && j < 32; j++) {
+- if (!(link_flags[i] & (1 << j)))
++ if (!(link_flags[i] & (1U << j)))
+ continue;
+- NET_DUMP_UINT_ONLY(1 << j);
++ NET_DUMP_UINT_ONLY(1U << j);
+ }
+ NET_END_ARRAY("");
+ }
+@@ -819,6 +819,9 @@ static void show_link_netfilter(void)
+ nf_link_count++;
+ }
+
++ if (!nf_link_info)
++ return;
++
+ qsort(nf_link_info, nf_link_count, sizeof(*nf_link_info), netfilter_link_compar);
+
+ for (id = 0; id < nf_link_count; id++) {
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 8443a149dd17fd..e5e0fe3854a353 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -1774,7 +1774,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
+ goto err_close_obj;
+ }
+
+- err = mount_bpffs_for_pin(pinfile, !first_prog_only);
++ if (first_prog_only)
++ err = mount_bpffs_for_file(pinfile);
++ else
++ err = create_and_mount_bpffs_dir(pinfile);
+ if (err)
+ goto err_close_obj;
+
+@@ -1806,6 +1809,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
+ }
+
+ if (pinmaps) {
++ err = create_and_mount_bpffs_dir(pinmaps);
++ if (err)
++ goto err_unpin;
++
+ err = bpf_object__pin_maps(obj, pinmaps);
+ if (err) {
+ p_err("failed to pin all maps");
+@@ -2294,7 +2301,7 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
+ int map_fd;
+
+ profile_perf_events = calloc(
+- sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
++ obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int));
+ if (!profile_perf_events) {
+ p_err("failed to allocate memory for perf_event array: %s",
+ strerror(errno));
+diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+index 26004f0c5a6ae1..7bdbcac3cf6285 100644
+--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
++++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+@@ -102,8 +102,8 @@ int iter(struct bpf_iter__task_file *ctx)
+ BPF_LINK_TYPE_PERF_EVENT___local)) {
+ struct bpf_link *link = (struct bpf_link *) file->private_data;
+
+- if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
+- BPF_LINK_TYPE_PERF_EVENT___local)) {
++ if (BPF_CORE_READ(link, type) == bpf_core_enum_value(enum bpf_link_type___local,
++ BPF_LINK_TYPE_PERF_EVENT___local)) {
+ e.has_bpf_cookie = true;
+ e.bpf_cookie = get_bpf_cookie(link);
+ }
+diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
+index 3ebc9fe91e0e13..d110c6ad8175c9 100644
+--- a/tools/bpf/bpftool/struct_ops.c
++++ b/tools/bpf/bpftool/struct_ops.c
+@@ -509,7 +509,7 @@ static int do_register(int argc, char **argv)
+ if (argc == 1)
+ linkdir = GET_ARG();
+
+- if (linkdir && mount_bpffs_for_pin(linkdir, true)) {
++ if (linkdir && create_and_mount_bpffs_dir(linkdir)) {
+ p_err("can't mount bpffs for pinning");
+ return -1;
+ }
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index 27a23196d58e10..b3edc239fe5625 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -70,6 +70,7 @@
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <errno.h>
++#include <linux/btf_ids.h>
+ #include <linux/rbtree.h>
+ #include <linux/zalloc.h>
+ #include <linux/err.h>
+@@ -78,7 +79,7 @@
+ #include <subcmd/parse-options.h>
+
+ #define BTF_IDS_SECTION ".BTF_ids"
+-#define BTF_ID "__BTF_ID__"
++#define BTF_ID_PREFIX "__BTF_ID__"
+
+ #define BTF_STRUCT "struct"
+ #define BTF_UNION "union"
+@@ -89,6 +90,14 @@
+
+ #define ADDR_CNT 100
+
++#if __BYTE_ORDER == __LITTLE_ENDIAN
++# define ELFDATANATIVE ELFDATA2LSB
++#elif __BYTE_ORDER == __BIG_ENDIAN
++# define ELFDATANATIVE ELFDATA2MSB
++#else
++# error "Unknown machine endianness!"
++#endif
++
+ struct btf_id {
+ struct rb_node rb_node;
+ char *name;
+@@ -116,6 +125,7 @@ struct object {
+ int idlist_shndx;
+ size_t strtabidx;
+ unsigned long idlist_addr;
++ int encoding;
+ } efile;
+
+ struct rb_root sets;
+@@ -161,7 +171,7 @@ static int eprintf(int level, int var, const char *fmt, ...)
+
+ static bool is_btf_id(const char *name)
+ {
+- return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
++ return name && !strncmp(name, BTF_ID_PREFIX, sizeof(BTF_ID_PREFIX) - 1);
+ }
+
+ static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
+@@ -319,6 +329,7 @@ static int elf_collect(struct object *obj)
+ {
+ Elf_Scn *scn = NULL;
+ size_t shdrstrndx;
++ GElf_Ehdr ehdr;
+ int idx = 0;
+ Elf *elf;
+ int fd;
+@@ -350,6 +361,13 @@ static int elf_collect(struct object *obj)
+ return -1;
+ }
+
++ if (gelf_getehdr(obj->efile.elf, &ehdr) == NULL) {
++ pr_err("FAILED cannot get ELF header: %s\n",
++ elf_errmsg(-1));
++ return -1;
++ }
++ obj->efile.encoding = ehdr.e_ident[EI_DATA];
++
+ /*
+ * Scan all the elf sections and look for save data
+ * from .BTF_ids section and symbols.
+@@ -441,7 +459,7 @@ static int symbols_collect(struct object *obj)
+ * __BTF_ID__TYPE__vfs_truncate__0
+ * prefix = ^
+ */
+- prefix = name + sizeof(BTF_ID) - 1;
++ prefix = name + sizeof(BTF_ID_PREFIX) - 1;
+
+ /* struct */
+ if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
+@@ -649,19 +667,18 @@ static int cmp_id(const void *pa, const void *pb)
+ static int sets_patch(struct object *obj)
+ {
+ Elf_Data *data = obj->efile.idlist;
+- int *ptr = data->d_buf;
+ struct rb_node *next;
+
+ next = rb_first(&obj->sets);
+ while (next) {
+- unsigned long addr, idx;
++ struct btf_id_set8 *set8;
++ struct btf_id_set *set;
++ unsigned long addr, off;
+ struct btf_id *id;
+- int *base;
+- int cnt;
+
+ id = rb_entry(next, struct btf_id, rb_node);
+ addr = id->addr[0];
+- idx = addr - obj->efile.idlist_addr;
++ off = addr - obj->efile.idlist_addr;
+
+ /* sets are unique */
+ if (id->addr_cnt != 1) {
+@@ -670,14 +687,39 @@ static int sets_patch(struct object *obj)
+ return -1;
+ }
+
+- idx = idx / sizeof(int);
+- base = &ptr[idx] + (id->is_set8 ? 2 : 1);
+- cnt = ptr[idx];
++ if (id->is_set) {
++ set = data->d_buf + off;
++ qsort(set->ids, set->cnt, sizeof(set->ids[0]), cmp_id);
++ } else {
++ set8 = data->d_buf + off;
++ /*
++ * Make sure id is at the beginning of the pairs
++ * struct, otherwise the below qsort would not work.
++ */
++ BUILD_BUG_ON((u32 *)set8->pairs != &set8->pairs[0].id);
++ qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id);
+
+- pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
+- (idx + 1) * sizeof(int), cnt, id->name);
++ /*
++ * When ELF endianness does not match endianness of the
++ * host, libelf will do the translation when updating
++ * the ELF. This, however, corrupts SET8 flags which are
++ * already in the target endianness. So, let's bswap
++ * them to the host endianness and libelf will then
++ * correctly translate everything.
++ */
++ if (obj->efile.encoding != ELFDATANATIVE) {
++ int i;
++
++ set8->flags = bswap_32(set8->flags);
++ for (i = 0; i < set8->cnt; i++) {
++ set8->pairs[i].flags =
++ bswap_32(set8->pairs[i].flags);
++ }
++ }
++ }
+
+- qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
++ pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
++ off, id->is_set ? set->cnt : set8->cnt, id->name);
+
+ next = rb_next(next);
+ }
+@@ -686,7 +728,7 @@ static int sets_patch(struct object *obj)
+
+ static int symbols_patch(struct object *obj)
+ {
+- int err;
++ off_t err;
+
+ if (__symbols_patch(obj, &obj->structs) ||
+ __symbols_patch(obj, &obj->unions) ||
+diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
+index d8288936c9120f..c4f1f1735af659 100644
+--- a/tools/bpf/runqslower/Makefile
++++ b/tools/bpf/runqslower/Makefile
+@@ -15,6 +15,7 @@ INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
+ CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
+ CFLAGS += $(EXTRA_CFLAGS)
+ LDFLAGS += $(EXTRA_LDFLAGS)
++LDLIBS += -lelf -lz
+
+ # Try to detect best kernel BTF source
+ KERNEL_REL := $(shell uname -r)
+@@ -51,7 +52,7 @@ clean:
+ libbpf_hdrs: $(BPFOBJ)
+
+ $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
+- $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
++ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
+
+ $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
+ $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs
+diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
+index eb6303ff446ed9..4cfcef9da3e434 100644
+--- a/tools/build/feature/test-libopencsd.c
++++ b/tools/build/feature/test-libopencsd.c
+@@ -4,9 +4,9 @@
+ /*
+ * Check OpenCSD library version is sufficient to provide required features
+ */
+-#define OCSD_MIN_VER ((1 << 16) | (1 << 8) | (1))
++#define OCSD_MIN_VER ((1 << 16) | (2 << 8) | (1))
+ #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
+-#error "OpenCSD >= 1.1.1 is required"
++#error "OpenCSD >= 1.2.1 is required"
+ #endif
+
+ int main(void)
+diff --git a/tools/crypto/ccp/dbc.c b/tools/crypto/ccp/dbc.c
+index 37e813175642fd..a807df0f059740 100644
+--- a/tools/crypto/ccp/dbc.c
++++ b/tools/crypto/ccp/dbc.c
+@@ -8,6 +8,7 @@
+ */
+
+ #include <assert.h>
++#include <errno.h>
+ #include <string.h>
+ #include <sys/ioctl.h>
+
+@@ -22,16 +23,14 @@ int get_nonce(int fd, void *nonce_out, void *signature)
+ struct dbc_user_nonce tmp = {
+ .auth_needed = !!signature,
+ };
+- int ret;
+
+ assert(nonce_out);
+
+ if (signature)
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+- ret = ioctl(fd, DBCIOCNONCE, &tmp);
+- if (ret)
+- return ret;
++ if (ioctl(fd, DBCIOCNONCE, &tmp))
++ return errno;
+ memcpy(nonce_out, tmp.nonce, sizeof(tmp.nonce));
+
+ return 0;
+@@ -47,7 +46,9 @@ int set_uid(int fd, __u8 *uid, __u8 *signature)
+ memcpy(tmp.uid, uid, sizeof(tmp.uid));
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+- return ioctl(fd, DBCIOCUID, &tmp);
++ if (ioctl(fd, DBCIOCUID, &tmp))
++ return errno;
++ return 0;
+ }
+
+ int process_param(int fd, int msg_index, __u8 *signature, int *data)
+@@ -63,10 +64,10 @@ int process_param(int fd, int msg_index, __u8 *signature, int *data)
+
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+- ret = ioctl(fd, DBCIOCPARAM, &tmp);
+- if (ret)
+- return ret;
++ if (ioctl(fd, DBCIOCPARAM, &tmp))
++ return errno;
+
+ *data = tmp.param;
++ memcpy(signature, tmp.signature, sizeof(tmp.signature));
+ return 0;
+ }
+diff --git a/tools/crypto/ccp/dbc.py b/tools/crypto/ccp/dbc.py
+index 3f6a825ffc9e4e..2b91415b194074 100644
+--- a/tools/crypto/ccp/dbc.py
++++ b/tools/crypto/ccp/dbc.py
+@@ -27,8 +27,7 @@ lib = ctypes.CDLL("./dbc_library.so", mode=ctypes.RTLD_GLOBAL)
+
+
+ def handle_error(code):
+- val = code * -1
+- raise OSError(val, os.strerror(val))
++ raise OSError(code, os.strerror(code))
+
+
+ def get_nonce(device, signature):
+@@ -58,7 +57,8 @@ def process_param(device, message, signature, data=None):
+ if type(message) != tuple:
+ raise ValueError("Expected message tuple")
+ arg = ctypes.c_int(data if data else 0)
+- ret = lib.process_param(device.fileno(), message[0], signature, ctypes.pointer(arg))
++ sig = ctypes.create_string_buffer(signature, len(signature))
++ ret = lib.process_param(device.fileno(), message[0], ctypes.pointer(sig), ctypes.pointer(arg))
+ if ret:
+ handle_error(ret)
+- return arg, signature
++ return arg.value, sig.value
+diff --git a/tools/crypto/ccp/test_dbc.py b/tools/crypto/ccp/test_dbc.py
+index 998bb3e3cd0409..79de3638a01abe 100755
+--- a/tools/crypto/ccp/test_dbc.py
++++ b/tools/crypto/ccp/test_dbc.py
+@@ -4,6 +4,12 @@ import unittest
+ import os
+ import time
+ import glob
++import fcntl
++try:
++ import ioctl_opt as ioctl
++except ImportError:
++ ioctl = None
++ pass
+ from dbc import *
+
+ # Artificial delay between set commands
+@@ -27,8 +33,8 @@ def system_is_secured() -> bool:
+ class DynamicBoostControlTest(unittest.TestCase):
+ def __init__(self, data) -> None:
+ self.d = None
+- self.signature = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
+- self.uid = "1111111111111111"
++ self.signature = b"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
++ self.uid = b"1111111111111111"
+ super().__init__(data)
+
+ def setUp(self) -> None:
+@@ -64,13 +70,16 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ def setUp(self) -> None:
+ if not os.path.exists(DEVICE_NODE):
+ self.skipTest("system is unsupported")
++ if not ioctl:
++ self.skipTest("unable to test IOCTLs without ioctl_opt")
++
+ return super().setUp()
+
+ def test_invalid_nonce_ioctl(self) -> None:
+ """tries to call get_nonce ioctl with invalid data structures"""
+
+ # 0x1 (get nonce), and invalid data
+- INVALID1 = IOWR(ord("D"), 0x01, invalid_param)
++ INVALID1 = ioctl.IOWR(ord("D"), 0x01, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID1, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -79,7 +88,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ """tries to call set_uid ioctl with invalid data structures"""
+
+ # 0x2 (set uid), and invalid data
+- INVALID2 = IOW(ord("D"), 0x02, invalid_param)
++ INVALID2 = ioctl.IOW(ord("D"), 0x02, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID2, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -88,7 +97,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ """tries to call set_uid ioctl with invalid data structures"""
+
+ # 0x2 as RW (set uid), and invalid data
+- INVALID3 = IOWR(ord("D"), 0x02, invalid_param)
++ INVALID3 = ioctl.IOWR(ord("D"), 0x02, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID3, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -96,7 +105,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ def test_invalid_param_ioctl(self) -> None:
+ """tries to call param ioctl with invalid data structures"""
+ # 0x3 (param), and invalid data
+- INVALID4 = IOWR(ord("D"), 0x03, invalid_param)
++ INVALID4 = ioctl.IOWR(ord("D"), 0x03, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID4, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -104,7 +113,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
+ def test_invalid_call_ioctl(self) -> None:
+ """tries to call the DBC ioctl with invalid data structures"""
+ # 0x4, and invalid data
+- INVALID5 = IOWR(ord("D"), 0x04, invalid_param)
++ INVALID5 = ioctl.IOWR(ord("D"), 0x04, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID5, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+@@ -183,12 +192,12 @@ class TestUnFusedSystem(DynamicBoostControlTest):
+ # SOC power
+ soc_power_max = process_param(self.d, PARAM_GET_SOC_PWR_MAX, self.signature)
+ soc_power_min = process_param(self.d, PARAM_GET_SOC_PWR_MIN, self.signature)
+- self.assertGreater(soc_power_max.parameter, soc_power_min.parameter)
++ self.assertGreater(soc_power_max[0], soc_power_min[0])
+
+ # fmax
+ fmax_max = process_param(self.d, PARAM_GET_FMAX_MAX, self.signature)
+ fmax_min = process_param(self.d, PARAM_GET_FMAX_MIN, self.signature)
+- self.assertGreater(fmax_max.parameter, fmax_min.parameter)
++ self.assertGreater(fmax_max[0], fmax_min[0])
+
+ # cap values
+ keys = {
+@@ -199,7 +208,7 @@ class TestUnFusedSystem(DynamicBoostControlTest):
+ }
+ for k in keys:
+ result = process_param(self.d, keys[k], self.signature)
+- self.assertGreater(result.parameter, 0)
++ self.assertGreater(result[0], 0)
+
+ def test_get_invalid_param(self) -> None:
+ """fetch an invalid parameter"""
+@@ -217,17 +226,17 @@ class TestUnFusedSystem(DynamicBoostControlTest):
+ original = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+
+ # set the fmax
+- target = original.parameter - 100
++ target = original[0] - 100
+ process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, target)
+ time.sleep(SET_DELAY)
+ new = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+- self.assertEqual(new.parameter, target)
++ self.assertEqual(new[0], target)
+
+ # revert back to current
+- process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original.parameter)
++ process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original[0])
+ time.sleep(SET_DELAY)
+ cur = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+- self.assertEqual(cur.parameter, original.parameter)
++ self.assertEqual(cur[0], original[0])
+
+ def test_set_power_cap(self) -> None:
+ """get/set power cap limit"""
+@@ -235,17 +244,17 @@ class TestUnFusedSystem(DynamicBoostControlTest):
+ original = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+
+ # set the fmax
+- target = original.parameter - 10
++ target = original[0] - 10
+ process_param(self.d, PARAM_SET_PWR_CAP, self.signature, target)
+ time.sleep(SET_DELAY)
+ new = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+- self.assertEqual(new.parameter, target)
++ self.assertEqual(new[0], target)
+
+ # revert back to current
+- process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original.parameter)
++ process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original[0])
+ time.sleep(SET_DELAY)
+ cur = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+- self.assertEqual(cur.parameter, original.parameter)
++ self.assertEqual(cur[0], original[0])
+
+ def test_set_3d_graphics_mode(self) -> None:
+ """set/get 3d graphics mode"""
+diff --git a/tools/hv/Makefile b/tools/hv/Makefile
+index fe770e679ae8fe..5643058e2d377b 100644
+--- a/tools/hv/Makefile
++++ b/tools/hv/Makefile
+@@ -47,7 +47,7 @@ $(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
+
+ clean:
+ rm -f $(ALL_PROGRAMS)
+- find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
++ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
+
+ install: $(ALL_PROGRAMS)
+ install -d -m 755 $(DESTDIR)$(sbindir); \
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 264eeb9c46a9f5..318e2dad27e048 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -1421,7 +1421,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
+ if (error)
+ goto setval_error;
+
+- if (new_val->addr_family == ADDR_FAMILY_IPV6) {
++ if (new_val->addr_family & ADDR_FAMILY_IPV6) {
+ error = fprintf(nmfile, "\n[ipv6]\n");
+ if (error < 0)
+ goto setval_error;
+@@ -1455,14 +1455,18 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
+ if (error < 0)
+ goto setval_error;
+
+- error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
+- if (error < 0)
+- goto setval_error;
+-
+- error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
+- if (error < 0)
+- goto setval_error;
++ /* we do not want ipv4 addresses in ipv6 section and vice versa */
++ if (is_ipv6 != is_ipv4((char *)new_val->gate_way)) {
++ error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
++ if (error < 0)
++ goto setval_error;
++ }
+
++ if (is_ipv6 != is_ipv4((char *)new_val->dns_addr)) {
++ error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
++ if (error < 0)
++ goto setval_error;
++ }
+ fclose(nmfile);
+ fclose(ifcfg_file);
+
+diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
+index ae5a7a8249a208..440a91b35823bf 100755
+--- a/tools/hv/hv_set_ifconfig.sh
++++ b/tools/hv/hv_set_ifconfig.sh
+@@ -53,7 +53,7 @@
+ # or "manual" if no boot-time protocol should be used)
+ #
+ # address1=ipaddr1/plen
+-# address=ipaddr2/plen
++# address2=ipaddr2/plen
+ #
+ # gateway=gateway1;gateway2
+ #
+@@ -61,7 +61,7 @@
+ #
+ # [ipv6]
+ # address1=ipaddr1/plen
+-# address2=ipaddr1/plen
++# address2=ipaddr2/plen
+ #
+ # gateway=gateway1;gateway2
+ #
+diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
+index 44bbf80f0cfdd1..9ef5ee087eda36 100644
+--- a/tools/iio/iio_generic_buffer.c
++++ b/tools/iio/iio_generic_buffer.c
+@@ -54,9 +54,12 @@ enum autochan {
+ static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+ {
+ unsigned int bytes = 0;
+- int i = 0;
++ int i = 0, max = 0;
++ unsigned int misalignment;
+
+ while (i < num_channels) {
++ if (channels[i].bytes > max)
++ max = channels[i].bytes;
+ if (bytes % channels[i].bytes == 0)
+ channels[i].location = bytes;
+ else
+@@ -66,6 +69,14 @@ static unsigned int size_from_channelarray(struct iio_channel_info *channels, in
+ bytes = channels[i].location + channels[i].bytes;
+ i++;
+ }
++ /*
++ * We want the data in next sample to also be properly aligned so
++ * we'll add padding at the end if needed. Adding padding only
++ * works for channel data which size is 2^n bytes.
++ */
++ misalignment = bytes % max;
++ if (misalignment)
++ bytes += max - misalignment;
+
+ return bytes;
+ }
+@@ -487,6 +498,10 @@ int main(int argc, char **argv)
+ return -ENOMEM;
+ }
+ trigger_name = malloc(IIO_MAX_NAME_LENGTH);
++ if (!trigger_name) {
++ ret = -ENOMEM;
++ goto error;
++ }
+ ret = read_sysfs_string("name", trig_dev_name, trigger_name);
+ free(trig_dev_name);
+ if (ret < 0) {
+diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
+index 6a00a6eecaef02..c5c5082cb24e5c 100644
+--- a/tools/iio/iio_utils.c
++++ b/tools/iio/iio_utils.c
+@@ -376,7 +376,7 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ goto error_close_dir;
+ }
+
+- seekdir(dp, 0);
++ rewinddir(dp);
+ while (ent = readdir(dp), ent) {
+ if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
+ "_en") == 0) {
+diff --git a/tools/include/linux/align.h b/tools/include/linux/align.h
+new file mode 100644
+index 00000000000000..14e34ace80ddae
+--- /dev/null
++++ b/tools/include/linux/align.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef _TOOLS_LINUX_ALIGN_H
++#define _TOOLS_LINUX_ALIGN_H
++
++#include <uapi/linux/const.h>
++
++#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
++#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
++#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
++
++#endif /* _TOOLS_LINUX_ALIGN_H */
+diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
+index f3566ea0f932ee..210c13b1b8570b 100644
+--- a/tools/include/linux/bitmap.h
++++ b/tools/include/linux/bitmap.h
+@@ -3,6 +3,7 @@
+ #define _TOOLS_LINUX_BITMAP_H
+
+ #include <string.h>
++#include <linux/align.h>
+ #include <linux/bitops.h>
+ #include <linux/find.h>
+ #include <stdlib.h>
+@@ -25,13 +26,14 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
+ #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+ #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+
++#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
++
+ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+ {
+ if (small_const_nbits(nbits))
+ *dst = 0UL;
+ else {
+- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+- memset(dst, 0, len);
++ memset(dst, 0, bitmap_size(nbits));
+ }
+ }
+
+@@ -83,7 +85,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ */
+ static inline unsigned long *bitmap_zalloc(int nbits)
+ {
+- return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
++ return calloc(1, bitmap_size(nbits));
+ }
+
+ /*
+@@ -126,7 +128,6 @@ static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
+ #endif
+ #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
+-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+ static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
+index 2f882d5cb30f5c..72ea363d434db0 100644
+--- a/tools/include/linux/btf_ids.h
++++ b/tools/include/linux/btf_ids.h
+@@ -3,11 +3,22 @@
+ #ifndef _LINUX_BTF_IDS_H
+ #define _LINUX_BTF_IDS_H
+
++#include <linux/types.h> /* for u32 */
++
+ struct btf_id_set {
+ u32 cnt;
+ u32 ids[];
+ };
+
++struct btf_id_set8 {
++ u32 cnt;
++ u32 flags;
++ struct {
++ u32 id;
++ u32 flags;
++ } pairs[];
++};
++
+ #ifdef CONFIG_DEBUG_INFO_BTF
+
+ #include <linux/compiler.h> /* for __PASTE */
+diff --git a/tools/include/linux/compiler_types.h b/tools/include/linux/compiler_types.h
+index 1bdd834bdd5719..d09f9dc172a486 100644
+--- a/tools/include/linux/compiler_types.h
++++ b/tools/include/linux/compiler_types.h
+@@ -36,8 +36,8 @@
+ #include <linux/compiler-gcc.h>
+ #endif
+
+-#ifndef asm_volatile_goto
+-#define asm_volatile_goto(x...) asm goto(x)
++#ifndef asm_goto_output
++#define asm_goto_output(x...) asm goto(x)
+ #endif
+
+ #endif /* __LINUX_COMPILER_TYPES_H */
+diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
+index 4b0673bf52c2e6..07cfad817d5390 100644
+--- a/tools/include/linux/kernel.h
++++ b/tools/include/linux/kernel.h
+@@ -8,6 +8,7 @@
+ #include <linux/build_bug.h>
+ #include <linux/compiler.h>
+ #include <linux/math.h>
++#include <linux/panic.h>
+ #include <endian.h>
+ #include <byteswap.h>
+
+diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
+index f3c82ab5b14cd7..dc0fc7125bc31a 100644
+--- a/tools/include/linux/mm.h
++++ b/tools/include/linux/mm.h
+@@ -2,8 +2,8 @@
+ #ifndef _TOOLS_LINUX_MM_H
+ #define _TOOLS_LINUX_MM_H
+
++#include <linux/align.h>
+ #include <linux/mmzone.h>
+-#include <uapi/linux/const.h>
+
+ #define PAGE_SHIFT 12
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+@@ -11,9 +11,6 @@
+
+ #define PHYS_ADDR_MAX (~(phys_addr_t)0)
+
+-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+-
+ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+ #define __va(x) ((void *)((unsigned long)(x)))
+@@ -37,4 +34,9 @@ static inline void totalram_pages_add(long count)
+ {
+ }
+
++static inline int early_pfn_to_nid(unsigned long pfn)
++{
++ return 0;
++}
++
+ #endif
+diff --git a/tools/include/linux/panic.h b/tools/include/linux/panic.h
+new file mode 100644
+index 00000000000000..9c8f17a41ce8ed
+--- /dev/null
++++ b/tools/include/linux/panic.h
+@@ -0,0 +1,19 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _TOOLS_LINUX_PANIC_H
++#define _TOOLS_LINUX_PANIC_H
++
++#include <stdarg.h>
++#include <stdio.h>
++#include <stdlib.h>
++
++static inline void panic(const char *fmt, ...)
++{
++ va_list argp;
++
++ va_start(argp, fmt);
++ vfprintf(stderr, fmt, argp);
++ va_end(argp);
++ exit(-1);
++}
++
++#endif
+diff --git a/tools/include/nolibc/arch-powerpc.h b/tools/include/nolibc/arch-powerpc.h
+index ac212e6185b26d..41ebd394b90c7a 100644
+--- a/tools/include/nolibc/arch-powerpc.h
++++ b/tools/include/nolibc/arch-powerpc.h
+@@ -172,7 +172,7 @@
+ _ret; \
+ })
+
+-#ifndef __powerpc64__
++#if !defined(__powerpc64__) && !defined(__clang__)
+ /* FIXME: For 32-bit PowerPC, with newer gcc compilers (e.g. gcc 13.1.0),
+ * "omit-frame-pointer" fails with __attribute__((no_stack_protector)) but
+ * works with __attribute__((__optimize__("-fno-stack-protector")))
+diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
+index bacfd35c515656..5be9d3c7435a80 100644
+--- a/tools/include/nolibc/stdlib.h
++++ b/tools/include/nolibc/stdlib.h
+@@ -185,7 +185,7 @@ void *realloc(void *old_ptr, size_t new_size)
+ if (__builtin_expect(!ret, 0))
+ return NULL;
+
+- memcpy(ret, heap->user_p, heap->len);
++ memcpy(ret, heap->user_p, user_p_len);
+ munmap(heap, heap->len);
+ return ret;
+ }
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 0448700890f77d..ba6e346c8d669a 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -77,12 +77,29 @@ struct bpf_insn {
+ __s32 imm; /* signed immediate constant */
+ };
+
+-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
++/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
++ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
++ * the trailing flexible array member) instead.
++ */
+ struct bpf_lpm_trie_key {
+ __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
+ __u8 data[0]; /* Arbitrary size */
+ };
+
++/* Header for bpf_lpm_trie_key structs */
++struct bpf_lpm_trie_key_hdr {
++ __u32 prefixlen;
++};
++
++/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
++struct bpf_lpm_trie_key_u8 {
++ union {
++ struct bpf_lpm_trie_key_hdr hdr;
++ __u32 prefixlen;
++ };
++ __u8 data[]; /* Arbitrary size */
++};
++
+ struct bpf_cgroup_storage_key {
+ __u64 cgroup_inode_id; /* cgroup inode id */
+ __u32 attach_type; /* program attach type (enum bpf_attach_type) */
+@@ -3257,6 +3274,11 @@ union bpf_attr {
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
++ * **BPF_FIB_LOOKUP_SRC**
++ * Derive and set source IP addr in *params*->ipv{4,6}_src
++ * for the nexthop. If the src addr cannot be derived,
++ * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
++ * case, *params*->dmac and *params*->smac are not set either.
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+@@ -4490,6 +4512,8 @@ union bpf_attr {
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ * Description
+ * Return a user or a kernel stack in bpf program provided buffer.
++ * Note: the user stack will only be populated if the *task* is
++ * the current task; all other tasks will return -EOPNOTSUPP.
+ * To achieve this, the helper needs *task*, which is a valid
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
+@@ -4501,6 +4525,7 @@ union bpf_attr {
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
++ * The *task* must be the current task.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+@@ -6953,6 +6978,7 @@ enum {
+ BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ BPF_FIB_LOOKUP_TBID = (1U << 3),
++ BPF_FIB_LOOKUP_SRC = (1U << 4),
+ };
+
+ enum {
+@@ -6965,6 +6991,7 @@ enum {
+ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
+ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
+ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
++ BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
+ };
+
+ struct bpf_fib_lookup {
+@@ -6984,7 +7011,7 @@ struct bpf_fib_lookup {
+
+ /* output: MTU value */
+ __u16 mtu_result;
+- };
++ } __attribute__((packed, aligned(2)));
+ /* input: L3 device index for lookup
+ * output: device index from FIB lookup
+ */
+@@ -6999,6 +7026,9 @@ struct bpf_fib_lookup {
+ __u32 rt_metric;
+ };
+
++ /* input: source address to consider for lookup
++ * output: source address result from lookup
++ */
+ union {
+ __be32 ipv4_src;
+ __u32 ipv6_src[4]; /* in6_addr; network order */
+diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
+index 3c36aeade991e9..370ed14b1ae092 100644
+--- a/tools/include/uapi/linux/prctl.h
++++ b/tools/include/uapi/linux/prctl.h
+@@ -283,7 +283,8 @@ struct prctl_mm_map {
+
+ /* Memory deny write / execute */
+ #define PR_SET_MDWE 65
+-# define PR_MDWE_REFUSE_EXEC_GAIN 1
++# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
++# define PR_MDWE_NO_INHERIT (1UL << 1)
+
+ #define PR_GET_MDWE 66
+
+diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
+index 9fc429d2852d78..f4a9328035bd7e 100644
+--- a/tools/lib/api/io.h
++++ b/tools/lib/api/io.h
+@@ -12,6 +12,7 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <unistd.h>
++#include <linux/types.h>
+
+ struct io {
+ /* File descriptor being read/ */
+diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
+index 74c2887cfd24a5..107fef74886829 100644
+--- a/tools/lib/bpf/bpf.h
++++ b/tools/lib/bpf/bpf.h
+@@ -35,7 +35,7 @@
+ extern "C" {
+ #endif
+
+-int libbpf_set_memlock_rlim(size_t memlock_bytes);
++LIBBPF_API int libbpf_set_memlock_rlim(size_t memlock_bytes);
+
+ struct bpf_map_create_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
+index 1ac57bb7ac55f7..e2b9e8415c0446 100644
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -102,6 +102,7 @@ enum bpf_enum_value_kind {
+ case 2: val = *(const unsigned short *)p; break; \
+ case 4: val = *(const unsigned int *)p; break; \
+ case 8: val = *(const unsigned long long *)p; break; \
++ default: val = 0; break; \
+ } \
+ val <<= __CORE_RELO(s, field, LSHIFT_U64); \
+ if (__CORE_RELO(s, field, SIGNED)) \
+diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
+index 3803479dbe1068..1c13f8e88833b4 100644
+--- a/tools/lib/bpf/bpf_tracing.h
++++ b/tools/lib/bpf/bpf_tracing.h
+@@ -362,8 +362,6 @@ struct pt_regs___arm64 {
+ #define __PT_PARM7_REG a6
+ #define __PT_PARM8_REG a7
+
+-/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
+-#define PT_REGS_SYSCALL_REGS(ctx) ctx
+ #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+ #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+ #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 4d9f30bf7f0143..ebf56d21d08eed 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -1559,10 +1559,12 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
+ * Clang for BPF target generates func_proto with no
+ * args as a func_proto with a single void arg (e.g.,
+ * `int (*f)(void)` vs just `int (*f)()`). We are
+- * going to pretend there are no args for such case.
++ * going to emit valid empty args (void) syntax for
++ * such case. Similarly and conveniently, valid
++ * no args case can be special-cased here as well.
+ */
+- if (vlen == 1 && p->type == 0) {
+- btf_dump_printf(d, ")");
++ if (vlen == 0 || (vlen == 1 && p->type == 0)) {
++ btf_dump_printf(d, "void)");
+ return;
+ }
+
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 96ff1aa4bf6a0e..ceed16a10285ab 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -70,6 +70,7 @@
+
+ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
+ static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
++static int map_set_def_max_entries(struct bpf_map *map);
+
+ static const char * const attach_type_name[] = {
+ [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
+@@ -4251,6 +4252,8 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
+
+ scn = elf_sec_by_idx(obj, sec_idx);
+ scn_data = elf_sec_data(obj, scn);
++ if (!scn_data)
++ return -LIBBPF_ERRNO__FORMAT;
+
+ relo_sec_name = elf_sec_str(obj, shdr->sh_name);
+ sec_name = elf_sec_name(obj, scn);
+@@ -5119,6 +5122,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
+
+ if (bpf_map_type__is_map_in_map(def->type)) {
+ if (map->inner_map) {
++ err = map_set_def_max_entries(map->inner_map);
++ if (err)
++ return err;
+ err = bpf_object__create_map(obj, map->inner_map, true);
+ if (err) {
+ pr_warn("map '%s': failed to create inner map: %d\n",
+@@ -9747,7 +9753,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
+ struct bpf_map *
+ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+ {
+- if (prev == NULL)
++ if (prev == NULL && obj != NULL)
+ return obj->maps;
+
+ return __bpf_map__iter(prev, obj, 1);
+@@ -9756,7 +9762,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
+ struct bpf_map *
+ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
+ {
+- if (next == NULL) {
++ if (next == NULL && obj != NULL) {
+ if (!obj->nr_maps)
+ return NULL;
+ return obj->maps + obj->nr_maps - 1;
+@@ -10979,7 +10985,7 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru
+
+ n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
+ if (n < 1) {
+- pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
++ pr_warn("kprobe multi pattern is invalid: %s\n", spec);
+ return -EINVAL;
+ }
+
+diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h
+index b7060f25448615..8fe248e14eb632 100644
+--- a/tools/lib/bpf/libbpf_common.h
++++ b/tools/lib/bpf/libbpf_common.h
+@@ -79,11 +79,14 @@
+ */
+ #define LIBBPF_OPTS_RESET(NAME, ...) \
+ do { \
+- memset(&NAME, 0, sizeof(NAME)); \
+- NAME = (typeof(NAME)) { \
+- .sz = sizeof(NAME), \
+- __VA_ARGS__ \
+- }; \
++ typeof(NAME) ___##NAME = ({ \
++ memset(&___##NAME, 0, sizeof(NAME)); \
++ (typeof(NAME)) { \
++ .sz = sizeof(NAME), \
++ __VA_ARGS__ \
++ }; \
++ }); \
++ memcpy(&NAME, &___##NAME, sizeof(NAME)); \
+ } while (0)
+
+ #endif /* __LIBBPF_LIBBPF_COMMON_H */
+diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
+index f0f08635adb0d3..57dec645d68786 100644
+--- a/tools/lib/bpf/libbpf_internal.h
++++ b/tools/lib/bpf/libbpf_internal.h
+@@ -18,6 +18,20 @@
+ #include <libelf.h>
+ #include "relo_core.h"
+
++/* Android's libc doesn't support AT_EACCESS in faccessat() implementation
++ * ([0]), and just returns -EINVAL even if file exists and is accessible.
++ * See [1] for issues caused by this.
++ *
++ * So just redefine it to 0 on Android.
++ *
++ * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50
++ * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250
++ */
++#ifdef __ANDROID__
++#undef AT_EACCESS
++#define AT_EACCESS 0
++#endif
++
+ /* make sure libbpf doesn't use kernel-only integer typedefs */
+ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
+index 5ced96d99f8c5f..b311bb91f672e5 100644
+--- a/tools/lib/bpf/linker.c
++++ b/tools/lib/bpf/linker.c
+@@ -2194,10 +2194,17 @@ static int linker_fixup_btf(struct src_obj *obj)
+ vi = btf_var_secinfos(t);
+ for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
+ const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
+- const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
+- int var_linkage = btf_var(vt)->linkage;
++ const char *var_name;
++ int var_linkage;
+ Elf64_Sym *sym;
+
++ /* could be a variable or function */
++ if (!btf_is_var(vt))
++ continue;
++
++ var_name = btf__str_by_offset(obj->btf, vt->name_off);
++ var_linkage = btf_var(vt)->linkage;
++
+ /* no need to patch up static or extern vars */
+ if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
+ continue;
+diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
+index 090bcf6e3b3d58..68a2def171751c 100644
+--- a/tools/lib/bpf/netlink.c
++++ b/tools/lib/bpf/netlink.c
+@@ -496,8 +496,8 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
+ if (err)
+ return libbpf_err(err);
+
+- opts->feature_flags = md.flags;
+- opts->xdp_zc_max_segs = md.xdp_zc_max_segs;
++ OPTS_SET(opts, feature_flags, md.flags);
++ OPTS_SET(opts, xdp_zc_max_segs, md.xdp_zc_max_segs);
+
+ skip_feature_flags:
+ return 0;
+diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
+index b8b066d0dc5e45..fad607789d1e53 100644
+--- a/tools/lib/perf/evlist.c
++++ b/tools/lib/perf/evlist.c
+@@ -248,10 +248,10 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist)
+
+ static void perf_evlist__id_hash(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+- int cpu, int thread, u64 id)
++ int cpu_map_idx, int thread, u64 id)
+ {
+ int hash;
+- struct perf_sample_id *sid = SID(evsel, cpu, thread);
++ struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
+
+ sid->id = id;
+ sid->evsel = evsel;
+@@ -269,21 +269,27 @@ void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
+
+ void perf_evlist__id_add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+- int cpu, int thread, u64 id)
++ int cpu_map_idx, int thread, u64 id)
+ {
+- perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
++ if (!SID(evsel, cpu_map_idx, thread))
++ return;
++
++ perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
+ evsel->id[evsel->ids++] = id;
+ }
+
+ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+- int cpu, int thread, int fd)
++ int cpu_map_idx, int thread, int fd)
+ {
+ u64 read_data[4] = { 0, };
+ int id_idx = 1; /* The first entry is the counter value */
+ u64 id;
+ int ret;
+
++ if (!SID(evsel, cpu_map_idx, thread))
++ return -1;
++
+ ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ if (!ret)
+ goto add;
+@@ -312,7 +318,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ id = read_data[id_idx];
+
+ add:
+- perf_evlist__id_add(evlist, evsel, cpu, thread, id);
++ perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
+ return 0;
+ }
+
+@@ -738,3 +744,12 @@ int perf_evlist__nr_groups(struct perf_evlist *evlist)
+ }
+ return nr_groups;
+ }
++
++void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel)
++{
++ if (!evsel->system_wide) {
++ evsel->system_wide = true;
++ if (evlist->needs_map_propagation)
++ __perf_evlist__propagate_maps(evlist, evsel);
++ }
++}
+diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
+index 3339bc2f176552..f43bdb9b6227ca 100644
+--- a/tools/lib/perf/include/internal/evlist.h
++++ b/tools/lib/perf/include/internal/evlist.h
+@@ -126,13 +126,15 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist);
+
+ void perf_evlist__id_add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+- int cpu, int thread, u64 id);
++ int cpu_map_idx, int thread, u64 id);
+
+ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+- int cpu, int thread, int fd);
++ int cpu_map_idx, int thread, int fd);
+
+ void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
+
+ void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader);
++
++void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel);
+ #endif /* __LIBPERF_INTERNAL_EVLIST_H */
+diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h
+index d5d771ccdc7b4e..e88a6d8a0b0f9f 100644
+--- a/tools/lib/perf/include/internal/rc_check.h
++++ b/tools/lib/perf/include/internal/rc_check.h
+@@ -9,8 +9,12 @@
+ * Enable reference count checking implicitly with leak checking, which is
+ * integrated into address sanitizer.
+ */
+-#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
++#if defined(__SANITIZE_ADDRESS__) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+ #define REFCNT_CHECKING 1
++#elif defined(__has_feature)
++#if __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
++#define REFCNT_CHECKING 1
++#endif
+ #endif
+
+ /*
+diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
+index adfbae27dc369d..8561b0f01a2476 100644
+--- a/tools/lib/subcmd/help.c
++++ b/tools/lib/subcmd/help.c
+@@ -52,11 +52,21 @@ void uniq(struct cmdnames *cmds)
+ if (!cmds->cnt)
+ return;
+
+- for (i = j = 1; i < cmds->cnt; i++)
+- if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
+- cmds->names[j++] = cmds->names[i];
+-
++ for (i = 1; i < cmds->cnt; i++) {
++ if (!strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
++ zfree(&cmds->names[i - 1]);
++ }
++ for (i = 0, j = 0; i < cmds->cnt; i++) {
++ if (cmds->names[i]) {
++ if (i == j)
++ j++;
++ else
++ cmds->names[j++] = cmds->names[i];
++ }
++ }
+ cmds->cnt = j;
++ while (j < i)
++ cmds->names[j++] = NULL;
+ }
+
+ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
+diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
+index 53b5a492739d03..21ba650869383c 100644
+--- a/tools/memory-model/lock.cat
++++ b/tools/memory-model/lock.cat
+@@ -102,19 +102,19 @@ let rf-lf = rfe-lf | rfi-lf
+ * within one of the lock's critical sections returns False.
+ *)
+
+-(* rfi for RU events: an RU may read from the last po-previous UL *)
+-let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc)
+-
+-(* rfe for RU events: an RU may read from an external UL or the initial write *)
+-let all-possible-rfe-ru =
+- let possible-rfe-ru r =
++(*
++ * rf for RU events: an RU may read from an external UL or the initial write,
++ * or from the last po-previous UL
++ *)
++let all-possible-rf-ru =
++ let possible-rf-ru r =
+ let pair-to-relation p = p ++ 0
+- in map pair-to-relation (((UL | IW) * {r}) & loc & ext)
+- in map possible-rfe-ru RU
++ in map pair-to-relation ((((UL | IW) * {r}) & loc & ext) |
++ (((UL * {r}) & po-loc) \ ([UL] ; po-loc ; [LKW] ; po-loc)))
++ in map possible-rf-ru RU
+
+ (* Generate all rf relations for RU events *)
+-with rfe-ru from cross(all-possible-rfe-ru)
+-let rf-ru = rfe-ru | rfi-ru
++with rf-ru from cross(all-possible-rf-ru)
+
+ (* Final rf relation *)
+ let rf = rf | rf-lf | rf-ru
+diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
+index 514e0d69e73106..ae61ae5b02bf88 100644
+--- a/tools/net/ynl/lib/ynl.c
++++ b/tools/net/ynl/lib/ynl.c
+@@ -450,6 +450,8 @@ ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version)
+
+ int ynl_recv_ack(struct ynl_sock *ys, int ret)
+ {
++ struct ynl_parse_arg yarg = { .ys = ys, };
++
+ if (!ret) {
+ yerr(ys, YNL_ERROR_EXPECT_ACK,
+ "Expecting an ACK but nothing received");
+@@ -462,7 +464,7 @@ int ynl_recv_ack(struct ynl_sock *ys, int ret)
+ return ret;
+ }
+ return mnl_cb_run(ys->rx_buf, ret, ys->seq, ys->portid,
+- ynl_cb_null, ys);
++ ynl_cb_null, &yarg);
+ }
+
+ int ynl_cb_null(const struct nlmsghdr *nlh, void *data)
+@@ -505,6 +507,7 @@ ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts)
+ ys->mcast_groups[i].name[GENL_NAMSIZ - 1] = 0;
+ }
+ }
++ i++;
+ }
+
+ return 0;
+@@ -570,7 +573,13 @@ static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
+ return err;
+ }
+
+- return ynl_recv_ack(ys, err);
++ err = ynl_recv_ack(ys, err);
++ if (err < 0) {
++ free(ys->mcast_groups);
++ return err;
++ }
++
++ return 0;
+ }
+
+ struct ynl_sock *
+@@ -725,11 +734,14 @@ static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
+
+ static int ynl_ntf_trampoline(const struct nlmsghdr *nlh, void *data)
+ {
+- return ynl_ntf_parse((struct ynl_sock *)data, nlh);
++ struct ynl_parse_arg *yarg = data;
++
++ return ynl_ntf_parse(yarg->ys, nlh);
+ }
+
+ int ynl_ntf_check(struct ynl_sock *ys)
+ {
++ struct ynl_parse_arg yarg = { .ys = ys, };
+ ssize_t len;
+ int err;
+
+@@ -751,7 +763,7 @@ int ynl_ntf_check(struct ynl_sock *ys)
+ return len;
+
+ err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
+- ynl_ntf_trampoline, ys,
++ ynl_ntf_trampoline, &yarg,
+ ynl_cb_array, NLMSG_MIN_TYPE);
+ if (err < 0)
+ return err;
+diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
+index 13c4b019a881f3..44ea0965c9d9cb 100644
+--- a/tools/net/ynl/lib/ynl.py
++++ b/tools/net/ynl/lib/ynl.py
+@@ -201,6 +201,7 @@ class NlMsg:
+ self.done = 1
+ extack_off = 20
+ elif self.nl_type == Netlink.NLMSG_DONE:
++ self.error = struct.unpack("i", self.raw[0:4])[0]
+ self.done = 1
+ extack_off = 4
+
+diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
+index 897af958cee852..575b7e248e5212 100755
+--- a/tools/net/ynl/ynl-gen-c.py
++++ b/tools/net/ynl/ynl-gen-c.py
+@@ -198,8 +198,11 @@ class Type(SpecAttr):
+ presence = ''
+ for i in range(0, len(ref)):
+ presence = f"{var}->{'.'.join(ref[:i] + [''])}_present.{ref[i]}"
+- if self.presence_type() == 'bit':
+- code.append(presence + ' = 1;')
++ # Every layer below last is a nest, so we know it uses bit presence
++ # last layer is "self" and may be a complex type
++ if i == len(ref) - 1 and self.presence_type() != 'bit':
++ continue
++ code.append(presence + ' = 1;')
+ code += self._setter_lines(ri, member, presence)
+
+ func_name = f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}"
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index e308d1ba664ef9..e3fc263b1b2065 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -3604,6 +3604,18 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ }
+
+ if (!save_insn->visited) {
++ /*
++ * If the restore hint insn is at the
++ * beginning of a basic block and was
++ * branched to from elsewhere, and the
++ * save insn hasn't been visited yet,
++ * defer following this branch for now.
++ * It will be seen later via the
++ * straight-line path.
++ */
++ if (!prev_insn)
++ return 0;
++
+ WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
+ return 1;
+ }
+diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
+index e45c7cb1d5bcdc..80a3e6acf31e4b 100644
+--- a/tools/objtool/noreturns.h
++++ b/tools/objtool/noreturns.h
+@@ -6,7 +6,6 @@
+ *
+ * Yes, this is unfortunate. A better solution is in the works.
+ */
+-NORETURN(__invalid_creds)
+ NORETURN(__kunit_abort)
+ NORETURN(__module_put_and_kthread_exit)
+ NORETURN(__reiserfs_panic)
+diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
+index c54f7235c5d941..f40febdd6e36aa 100644
+--- a/tools/objtool/objtool.c
++++ b/tools/objtool/objtool.c
+@@ -146,7 +146,5 @@ int main(int argc, const char **argv)
+ exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
+ pager_init(UNUSED);
+
+- objtool_run(argc, argv);
+-
+- return 0;
++ return objtool_run(argc, argv);
+ }
+diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
+index 3c36324712b6e0..482d6c52e2edf1 100644
+--- a/tools/perf/Documentation/perf-kwork.txt
++++ b/tools/perf/Documentation/perf-kwork.txt
+@@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies)
+ SYNOPSIS
+ --------
+ [verse]
+-'perf kwork' {record}
++'perf kwork' {record|report|latency|timehist}
+
+ DESCRIPTION
+ -----------
+diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
+index d5f78e125efed1..69c6d5e46ad88d 100644
+--- a/tools/perf/Documentation/perf-list.txt
++++ b/tools/perf/Documentation/perf-list.txt
+@@ -67,6 +67,7 @@ counted. The following modifiers exist:
+ D - pin the event to the PMU
+ W - group is weak and will fallback to non-group if not schedulable,
+ e - group or event are exclusive and do not share the PMU
++ b - use BPF aggregration (see perf stat --bpf-counters)
+
+ The 'p' modifier can be used for specifying how precise the instruction
+ address should be. The 'p' modifier can be specified multiple times:
+diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
+index ff9a52e4468842..66b633f3c6d26b 100644
+--- a/tools/perf/Documentation/perf-script.txt
++++ b/tools/perf/Documentation/perf-script.txt
+@@ -441,9 +441,10 @@ include::itrace.txt[]
+ will be printed. Each entry has function name and file/line. Enabled by
+ default, disable with --no-inline.
+
+---insn-trace::
+- Show instruction stream for intel_pt traces. Combine with --xed to
+- show disassembly.
++--insn-trace[=<raw|disasm>]::
++ Show instruction stream in bytes (raw) or disassembled (disasm)
++ for intel_pt traces. The default is 'raw'. To use xed, combine
++ 'raw' with --xed to show disassembly done by xed.
+
+ --xed::
+ Run xed disassembler on output. Requires installing the xed disassembler.
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 37af6df7b978de..b97224a8a65b9d 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -69,6 +69,10 @@ include ../scripts/utilities.mak
+ # Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
+ # for dwarf backtrace post unwind.
+ #
++# Define NO_LIBTRACEEVENT=1 if you don't want libtraceevent to be linked,
++# this will remove multiple features and tools, such as 'perf trace',
++# that need it to read tracefs event format files, etc.
++#
+ # Define NO_PERF_READ_VDSO32 if you do not want to build perf-read-vdso32
+ # for reading the 32-bit compatibility VDSO in 64-bit mode
+ #
+@@ -1123,7 +1127,7 @@ bpf-skel:
+ endif # BUILD_BPF_SKEL
+
+ bpf-skel-clean:
+- $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS)
++ $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) $(SKEL_OUT)/vmlinux.h
+
+ clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS)
+diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
+index 615084eb88d8c9..3d9330feebd28f 100644
+--- a/tools/perf/arch/arm64/util/pmu.c
++++ b/tools/perf/arch/arm64/util/pmu.c
+@@ -10,7 +10,7 @@
+
+ const struct pmu_metrics_table *pmu_metrics_table__find(void)
+ {
+- struct perf_pmu *pmu = pmu__find_core_pmu();
++ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
+
+ if (pmu)
+ return perf_pmu__find_metrics_table(pmu);
+@@ -20,7 +20,7 @@ const struct pmu_metrics_table *pmu_metrics_table__find(void)
+
+ const struct pmu_events_table *pmu_events_table__find(void)
+ {
+- struct perf_pmu *pmu = pmu__find_core_pmu();
++ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
+
+ if (pmu)
+ return perf_pmu__find_events_table(pmu);
+@@ -32,7 +32,7 @@ double perf_pmu__cpu_slots_per_cycle(void)
+ {
+ char path[PATH_MAX];
+ unsigned long long slots = 0;
+- struct perf_pmu *pmu = pmu__find_core_pmu();
++ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
+
+ if (pmu) {
+ perf_pmu__pathname_scnprintf(path, sizeof(path),
+diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
+index 31807791589ee5..aaa2c641e78715 100644
+--- a/tools/perf/arch/x86/util/intel-pt.c
++++ b/tools/perf/arch/x86/util/intel-pt.c
+@@ -32,6 +32,7 @@
+ #include "../../../util/tsc.h"
+ #include <internal/lib.h> // page_size
+ #include "../../../util/intel-pt.h"
++#include <api/fs/fs.h>
+
+ #define KiB(x) ((x) * 1024)
+ #define MiB(x) ((x) * 1024 * 1024)
+@@ -436,6 +437,16 @@ static int intel_pt_track_switches(struct evlist *evlist)
+ }
+ #endif
+
++static bool intel_pt_exclude_guest(void)
++{
++ int pt_mode;
++
++ if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode))
++ pt_mode = 0;
++
++ return pt_mode == 1;
++}
++
+ static void intel_pt_valid_str(char *str, size_t len, u64 valid)
+ {
+ unsigned int val, last = 0, state = 1;
+@@ -628,6 +639,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
+ }
+ evsel->core.attr.freq = 0;
+ evsel->core.attr.sample_period = 1;
++ evsel->core.attr.exclude_guest = intel_pt_exclude_guest();
+ evsel->no_aux_samples = true;
+ evsel->needs_auxtrace_mmap = true;
+ intel_pt_evsel = evsel;
+@@ -766,7 +778,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
+ }
+
+ if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
+- u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4;
++ size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4;
++ u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw;
+
+ intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
+ }
+diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
+index 49331743c7439b..a759eb2328bead 100644
+--- a/tools/perf/bench/inject-buildid.c
++++ b/tools/perf/bench/inject-buildid.c
+@@ -362,7 +362,7 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
+ return -1;
+
+ for (i = 0; i < nr_mmaps; i++) {
+- int idx = rand() % (nr_dsos - 1);
++ int idx = rand() % nr_dsos;
+ struct bench_dso *dso = &dsos[idx];
+ u64 timestamp = rand() % 1000000;
+
+diff --git a/tools/perf/bench/uprobe.c b/tools/perf/bench/uprobe.c
+index 914c0817fe8ad3..e8e0afa13f0496 100644
+--- a/tools/perf/bench/uprobe.c
++++ b/tools/perf/bench/uprobe.c
+@@ -47,7 +47,7 @@ static const char * const bench_uprobe_usage[] = {
+ #define bench_uprobe__attach_uprobe(prog) \
+ skel->links.prog = bpf_program__attach_uprobe_opts(/*prog=*/skel->progs.prog, \
+ /*pid=*/-1, \
+- /*binary_path=*/"/lib64/libc.so.6", \
++ /*binary_path=*/"libc.so.6", \
+ /*func_offset=*/0, \
+ /*opts=*/&uprobe_opts); \
+ if (!skel->links.prog) { \
+diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
+index aeeb801f1ed7b1..0f1e5787b4edac 100644
+--- a/tools/perf/builtin-annotate.c
++++ b/tools/perf/builtin-annotate.c
+@@ -45,7 +45,6 @@
+ struct perf_annotate {
+ struct perf_tool tool;
+ struct perf_session *session;
+- struct annotation_options opts;
+ #ifdef HAVE_SLANG_SUPPORT
+ bool use_tui;
+ #endif
+@@ -315,9 +314,9 @@ static int hist_entry__tty_annotate(struct hist_entry *he,
+ struct perf_annotate *ann)
+ {
+ if (!ann->use_stdio2)
+- return symbol__tty_annotate(&he->ms, evsel, &ann->opts);
++ return symbol__tty_annotate(&he->ms, evsel);
+
+- return symbol__tty_annotate2(&he->ms, evsel, &ann->opts);
++ return symbol__tty_annotate2(&he->ms, evsel);
+ }
+
+ static void hists__find_annotations(struct hists *hists,
+@@ -363,7 +362,6 @@ static void hists__find_annotations(struct hists *hists,
+ int ret;
+ int (*annotate)(struct hist_entry *he,
+ struct evsel *evsel,
+- struct annotation_options *options,
+ struct hist_browser_timer *hbt);
+
+ annotate = dlsym(perf_gtk_handle,
+@@ -373,14 +371,14 @@ static void hists__find_annotations(struct hists *hists,
+ return;
+ }
+
+- ret = annotate(he, evsel, &ann->opts, NULL);
++ ret = annotate(he, evsel, NULL);
+ if (!ret || !ann->skip_missing)
+ return;
+
+ /* skip missing symbols */
+ nd = rb_next(nd);
+ } else if (use_browser == 1) {
+- key = hist_entry__tui_annotate(he, evsel, NULL, &ann->opts);
++ key = hist_entry__tui_annotate(he, evsel, NULL);
+
+ switch (key) {
+ case -1:
+@@ -422,9 +420,9 @@ static int __cmd_annotate(struct perf_annotate *ann)
+ goto out;
+ }
+
+- if (!ann->opts.objdump_path) {
++ if (!annotate_opts.objdump_path) {
+ ret = perf_env__lookup_objdump(&session->header.env,
+- &ann->opts.objdump_path);
++ &annotate_opts.objdump_path);
+ if (ret)
+ goto out;
+ }
+@@ -558,9 +556,9 @@ int cmd_annotate(int argc, const char **argv)
+ "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
+- OPT_BOOLEAN('l', "print-line", &annotate.opts.print_lines,
++ OPT_BOOLEAN('l', "print-line", &annotate_opts.print_lines,
+ "print matching source lines (may be slow)"),
+- OPT_BOOLEAN('P', "full-paths", &annotate.opts.full_path,
++ OPT_BOOLEAN('P', "full-paths", &annotate_opts.full_path,
+ "Don't shorten the displayed pathnames"),
+ OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
+ "Skip symbols that cannot be annotated"),
+@@ -571,15 +569,15 @@ int cmd_annotate(int argc, const char **argv)
+ OPT_CALLBACK(0, "symfs", NULL, "directory",
+ "Look for files with symbols relative to this directory",
+ symbol__config_symfs),
+- OPT_BOOLEAN(0, "source", &annotate.opts.annotate_src,
++ OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
+ "Interleave source code with assembly code (default)"),
+- OPT_BOOLEAN(0, "asm-raw", &annotate.opts.show_asm_raw,
++ OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
+ "Display raw encoding of assembly instructions (default)"),
+ OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
+ "Specify disassembler style (e.g. -M intel for intel syntax)"),
+- OPT_STRING(0, "prefix", &annotate.opts.prefix, "prefix",
++ OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
+ "Add prefix to source file path names in programs (with --prefix-strip)"),
+- OPT_STRING(0, "prefix-strip", &annotate.opts.prefix_strip, "N",
++ OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
+ "Strip first N entries of source file path name in programs (with --prefix)"),
+ OPT_STRING(0, "objdump", &objdump_path, "path",
+ "objdump binary to use for disassembly and annotations"),
+@@ -589,8 +587,6 @@ int cmd_annotate(int argc, const char **argv)
+ "Enable symbol demangling"),
+ OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ "Enable kernel symbol demangling"),
+- OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
+- "Show event group information together"),
+ OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
+ "Show a column with the sum of periods"),
+ OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
+@@ -598,7 +594,7 @@ int cmd_annotate(int argc, const char **argv)
+ OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
+ "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
+ stdio__config_color, "always"),
+- OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
++ OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
+ "Set percent type local/global-period/hits",
+ annotate_parse_percent_type),
+ OPT_CALLBACK(0, "percent-limit", &annotate, "percent",
+@@ -614,13 +610,13 @@ int cmd_annotate(int argc, const char **argv)
+ set_option_flag(options, 0, "show-total-period", PARSE_OPT_EXCLUSIVE);
+ set_option_flag(options, 0, "show-nr-samples", PARSE_OPT_EXCLUSIVE);
+
+- annotation_options__init(&annotate.opts);
++ annotation_options__init(&annotate_opts);
+
+ ret = hists__init();
+ if (ret < 0)
+ return ret;
+
+- annotation_config__init(&annotate.opts);
++ annotation_config__init(&annotate_opts);
+
+ argc = parse_options(argc, argv, options, annotate_usage, 0);
+ if (argc) {
+@@ -635,13 +631,13 @@ int cmd_annotate(int argc, const char **argv)
+ }
+
+ if (disassembler_style) {
+- annotate.opts.disassembler_style = strdup(disassembler_style);
+- if (!annotate.opts.disassembler_style)
++ annotate_opts.disassembler_style = strdup(disassembler_style);
++ if (!annotate_opts.disassembler_style)
+ return -ENOMEM;
+ }
+ if (objdump_path) {
+- annotate.opts.objdump_path = strdup(objdump_path);
+- if (!annotate.opts.objdump_path)
++ annotate_opts.objdump_path = strdup(objdump_path);
++ if (!annotate_opts.objdump_path)
+ return -ENOMEM;
+ }
+ if (addr2line_path) {
+@@ -650,7 +646,7 @@ int cmd_annotate(int argc, const char **argv)
+ return -ENOMEM;
+ }
+
+- if (annotate_check_args(&annotate.opts) < 0)
++ if (annotate_check_args(&annotate_opts) < 0)
+ return -EINVAL;
+
+ #ifdef HAVE_GTK2_SUPPORT
+@@ -731,7 +727,7 @@ int cmd_annotate(int argc, const char **argv)
+ #ifndef NDEBUG
+ perf_session__delete(annotate.session);
+ #endif
+- annotation_options__exit(&annotate.opts);
++ annotation_options__exit(&annotate_opts);
+
+ return ret;
+ }
+diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
+index 83954af36753a9..de76bbc50bfbcb 100644
+--- a/tools/perf/builtin-daemon.c
++++ b/tools/perf/builtin-daemon.c
+@@ -523,7 +523,7 @@ static int daemon_session__control(struct daemon_session *session,
+ session->base, SESSION_CONTROL);
+
+ control = open(control_path, O_WRONLY|O_NONBLOCK);
+- if (!control)
++ if (control < 0)
+ return -1;
+
+ if (do_ack) {
+@@ -532,7 +532,7 @@ static int daemon_session__control(struct daemon_session *session,
+ session->base, SESSION_ACK);
+
+ ack = open(ack_path, O_RDONLY, O_NONBLOCK);
+- if (!ack) {
++ if (ack < 0) {
+ close(control);
+ return -1;
+ }
+diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
+index c8cf2fdd9cff96..8aba0566546797 100644
+--- a/tools/perf/builtin-inject.c
++++ b/tools/perf/builtin-inject.c
+@@ -2200,6 +2200,7 @@ int cmd_inject(int argc, const char **argv)
+ .finished_init = perf_event__repipe_op2_synth,
+ .compressed = perf_event__repipe_op4_synth,
+ .auxtrace = perf_event__repipe_auxtrace,
++ .dont_split_sample_group = true,
+ },
+ .input_name = "-",
+ .samples = LIST_HEAD_INIT(inject.samples),
+@@ -2265,6 +2266,12 @@ int cmd_inject(int argc, const char **argv)
+ "perf inject [<options>]",
+ NULL
+ };
++
++ if (!inject.itrace_synth_opts.set) {
++ /* Disable eager loading of kernel symbols that adds overhead to perf inject. */
++ symbol_conf.lazy_load_kernel_maps = true;
++ }
++
+ #ifndef HAVE_JITDUMP
+ set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
+ #endif
+diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
+index 9714327fd0eadd..cf623c1490d959 100644
+--- a/tools/perf/builtin-kmem.c
++++ b/tools/perf/builtin-kmem.c
+@@ -2058,6 +2058,8 @@ int cmd_kmem(int argc, const char **argv)
+
+ out_delete:
+ perf_session__delete(session);
++ /* free usage string allocated by parse_options_subcommand */
++ free((void *)kmem_usage[0]);
+
+ return ret;
+ }
+diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
+index 71165036e4cac8..988bef73bd0957 100644
+--- a/tools/perf/builtin-kvm.c
++++ b/tools/perf/builtin-kvm.c
+@@ -2187,5 +2187,8 @@ int cmd_kvm(int argc, const char **argv)
+ else
+ usage_with_options(kvm_usage, kvm_options);
+
++ /* free usage string allocated by parse_options_subcommand */
++ free((void *)kvm_usage[0]);
++
+ return 0;
+ }
+diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
+index 14bf7a8429e76f..be210be42c77b3 100644
+--- a/tools/perf/builtin-kwork.c
++++ b/tools/perf/builtin-kwork.c
+@@ -406,12 +406,14 @@ static int work_push_atom(struct perf_kwork *kwork,
+
+ work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ if (work == NULL) {
+- free(atom);
++ atom_free(atom);
+ return -1;
+ }
+
+- if (!profile_event_match(kwork, work, sample))
++ if (!profile_event_match(kwork, work, sample)) {
++ atom_free(atom);
+ return 0;
++ }
+
+ if (dst_type < KWORK_TRACE_MAX) {
+ dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
+@@ -1692,9 +1694,10 @@ int cmd_kwork(int argc, const char **argv)
+ static struct perf_kwork kwork = {
+ .class_list = LIST_HEAD_INIT(kwork.class_list),
+ .tool = {
+- .mmap = perf_event__process_mmap,
+- .mmap2 = perf_event__process_mmap2,
+- .sample = perf_kwork__process_tracepoint_sample,
++ .mmap = perf_event__process_mmap,
++ .mmap2 = perf_event__process_mmap2,
++ .sample = perf_kwork__process_tracepoint_sample,
++ .ordered_events = true,
+ },
+ .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
+ .sort_list = LIST_HEAD_INIT(kwork.sort_list),
+@@ -1850,5 +1853,8 @@ int cmd_kwork(int argc, const char **argv)
+ } else
+ usage_with_options(kwork_usage, kwork_options);
+
++ /* free usage string allocated by parse_options_subcommand */
++ free((void *)kwork_usage[0]);
++
+ return 0;
+ }
+diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
+index a343823c8ddfc9..61c2c96cc0701b 100644
+--- a/tools/perf/builtin-list.c
++++ b/tools/perf/builtin-list.c
+@@ -434,6 +434,11 @@ static void json_print_metric(void *ps __maybe_unused, const char *group,
+ strbuf_release(&buf);
+ }
+
++static bool json_skip_duplicate_pmus(void *ps __maybe_unused)
++{
++ return false;
++}
++
+ static bool default_skip_duplicate_pmus(void *ps)
+ {
+ struct print_state *print_state = ps;
+@@ -503,6 +508,7 @@ int cmd_list(int argc, const char **argv)
+ .print_end = json_print_end,
+ .print_event = json_print_event,
+ .print_metric = json_print_metric,
++ .skip_duplicate_pmus = json_skip_duplicate_pmus,
+ };
+ ps = &json_ps;
+ } else {
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index b141f213427406..fcb32c58bee7e4 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -524,6 +524,7 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
+ struct map *kmap;
+ struct symbol *sym;
+ u64 ip;
++ const char *arch = perf_env__arch(machine->env);
+
+ if (list_empty(&callstack_filters))
+ return true;
+@@ -531,7 +532,21 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
+ for (int i = 0; i < max_stack_depth; i++) {
+ struct callstack_filter *filter;
+
+- if (!callstack || !callstack[i])
++ /*
++ * In powerpc, the callchain saved by kernel always includes
++ * first three entries as the NIP (next instruction pointer),
++ * LR (link register), and the contents of LR save area in the
++ * second stack frame. In certain scenarios its possible to have
++ * invalid kernel instruction addresses in either LR or the second
++ * stack frame's LR. In that case, kernel will store that address as
++ * zero.
++ *
++ * The below check will continue to look into callstack,
++ * incase first or second callstack index entry has 0
++ * address for powerpc.
++ */
++ if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") ||
++ (i != 1 && i != 2))))
+ break;
+
+ ip = callstack[i];
+@@ -2607,6 +2622,9 @@ int cmd_lock(int argc, const char **argv)
+ usage_with_options(lock_usage, lock_options);
+ }
+
++ /* free usage string allocated by parse_options_subcommand */
++ free((void *)lock_usage[0]);
++
+ zfree(&lockhash_table);
+ return rc;
+ }
+diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
+index 51499c20da01e8..286105be91cec9 100644
+--- a/tools/perf/builtin-mem.c
++++ b/tools/perf/builtin-mem.c
+@@ -372,6 +372,7 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
+ rep_argv[i] = argv[j];
+
+ ret = cmd_report(i, rep_argv);
++ free(new_sort_order);
+ free(rep_argv);
+ return ret;
+ }
+@@ -517,5 +518,8 @@ int cmd_mem(int argc, const char **argv)
+ else
+ usage_with_options(mem_usage, mem_options);
+
++ /* free usage string allocated by parse_options_subcommand */
++ free((void *)mem_usage[0]);
++
+ return 0;
+ }
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 34bb31f08bb520..b94ae33a343c2a 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -906,6 +906,37 @@ static int record__config_off_cpu(struct record *rec)
+ return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
+ }
+
++static int record__config_tracking_events(struct record *rec)
++{
++ struct record_opts *opts = &rec->opts;
++ struct evlist *evlist = rec->evlist;
++ struct evsel *evsel;
++
++ /*
++ * For initial_delay, system wide or a hybrid system, we need to add
++ * tracking event so that we can track PERF_RECORD_MMAP to cover the
++ * delay of waiting or event synthesis.
++ */
++ if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
++ perf_pmus__num_core_pmus() > 1) {
++ evsel = evlist__findnew_tracking_event(evlist, false);
++ if (!evsel)
++ return -ENOMEM;
++
++ /*
++ * Enable the tracking event when the process is forked for
++ * initial_delay, immediately for system wide.
++ */
++ if (opts->target.initial_delay && !evsel->immediate &&
++ !target__has_cpu(&opts->target))
++ evsel->core.attr.enable_on_exec = 1;
++ else
++ evsel->immediate = 1;
++ }
++
++ return 0;
++}
++
+ static bool record__kcore_readable(struct machine *machine)
+ {
+ char kcore[PATH_MAX];
+@@ -1286,35 +1317,6 @@ static int record__open(struct record *rec)
+ struct record_opts *opts = &rec->opts;
+ int rc = 0;
+
+- /*
+- * For initial_delay, system wide or a hybrid system, we need to add a
+- * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
+- * of waiting or event synthesis.
+- */
+- if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
+- perf_pmus__num_core_pmus() > 1) {
+- pos = evlist__get_tracking_event(evlist);
+- if (!evsel__is_dummy_event(pos)) {
+- /* Set up dummy event. */
+- if (evlist__add_dummy(evlist))
+- return -ENOMEM;
+- pos = evlist__last(evlist);
+- evlist__set_tracking_event(evlist, pos);
+- }
+-
+- /*
+- * Enable the dummy event when the process is forked for
+- * initial_delay, immediately for system wide.
+- */
+- if (opts->target.initial_delay && !pos->immediate &&
+- !target__has_cpu(&opts->target))
+- pos->core.attr.enable_on_exec = 1;
+- else
+- pos->immediate = 1;
+- }
+-
+- evlist__config(evlist, opts, &callchain_param);
+-
+ evlist__for_each_entry(evlist, pos) {
+ try_again:
+ if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
+@@ -1786,8 +1788,8 @@ static int
+ record__switch_output(struct record *rec, bool at_exit)
+ {
+ struct perf_data *data = &rec->data;
++ char *new_filename = NULL;
+ int fd, err;
+- char *new_filename;
+
+ /* Same Size: "2015122520103046"*/
+ char timestamp[] = "InvalidTimestamp";
+@@ -2184,32 +2186,6 @@ static void hit_auxtrace_snapshot_trigger(struct record *rec)
+ }
+ }
+
+-static void record__uniquify_name(struct record *rec)
+-{
+- struct evsel *pos;
+- struct evlist *evlist = rec->evlist;
+- char *new_name;
+- int ret;
+-
+- if (perf_pmus__num_core_pmus() == 1)
+- return;
+-
+- evlist__for_each_entry(evlist, pos) {
+- if (!evsel__is_hybrid(pos))
+- continue;
+-
+- if (strchr(pos->name, '/'))
+- continue;
+-
+- ret = asprintf(&new_name, "%s/%s/",
+- pos->pmu_name, pos->name);
+- if (ret) {
+- free(pos->name);
+- pos->name = new_name;
+- }
+- }
+-}
+-
+ static int record__terminate_thread(struct record_thread *thread_data)
+ {
+ int err;
+@@ -2443,7 +2419,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ if (data->is_pipe && rec->evlist->core.nr_entries == 1)
+ rec->opts.sample_id = true;
+
+- record__uniquify_name(rec);
++ if (rec->timestamp_filename && perf_data__is_pipe(data)) {
++ rec->timestamp_filename = false;
++ pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n");
++ }
++
++ evlist__uniquify_name(rec->evlist);
++
++ evlist__config(rec->evlist, opts, &callchain_param);
+
+ /* Debug message used by test scripts */
+ pr_debug3("perf record opening and mmapping events\n");
+@@ -2843,10 +2826,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ }
+ #endif
+ zstd_fini(&session->zstd_data);
+- perf_session__delete(session);
+-
+ if (!opts->no_bpf_event)
+ evlist__stop_sb_thread(rec->sb_evlist);
++
++ perf_session__delete(session);
+ return status;
+ }
+
+@@ -3957,6 +3940,8 @@ int cmd_record(int argc, const char **argv)
+ # undef set_nobuild
+ #endif
+
++ /* Disable eager loading of kernel symbols that adds overhead to perf record. */
++ symbol_conf.lazy_load_kernel_maps = true;
+ rec->opts.affinity = PERF_AFFINITY_SYS;
+
+ rec->evlist = evlist__new();
+@@ -4195,6 +4180,12 @@ int cmd_record(int argc, const char **argv)
+ goto out;
+ }
+
++ err = record__config_tracking_events(rec);
++ if (err) {
++ pr_err("record__config_tracking_events failed, error %d\n", err);
++ goto out;
++ }
++
+ err = record__init_thread_masks(rec);
+ if (err) {
+ pr_err("Failed to initialize parallel data streaming masks\n");
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index dcedfe00f04dbd..cd2f3f1a756330 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -98,7 +98,6 @@ struct report {
+ bool skip_empty;
+ int max_stack;
+ struct perf_read_values show_threads_values;
+- struct annotation_options annotation_opts;
+ const char *pretty_printing_style;
+ const char *cpu_list;
+ const char *symbol_filter_str;
+@@ -427,7 +426,7 @@ static int report__setup_sample_type(struct report *rep)
+ * compatibility, set the bit if it's an old perf data file.
+ */
+ evlist__for_each_entry(session->evlist, evsel) {
+- if (strstr(evsel->name, "arm_spe") &&
++ if (strstr(evsel__name(evsel), "arm_spe") &&
+ !(sample_type & PERF_SAMPLE_DATA_SRC)) {
+ evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
+ sample_type |= PERF_SAMPLE_DATA_SRC;
+@@ -541,8 +540,7 @@ static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *
+ evlist__for_each_entry(evlist, pos) {
+ ret = report__browse_block_hists(&rep->block_reports[i++].hist,
+ rep->min_percent, pos,
+- &rep->session->header.env,
+- &rep->annotation_opts);
++ &rep->session->header.env);
+ if (ret != 0)
+ return ret;
+ }
+@@ -564,6 +562,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c
+ struct hists *hists = evsel__hists(pos);
+ const char *evname = evsel__name(pos);
+
++ i++;
+ if (symbol_conf.event_group && !evsel__is_group_leader(pos))
+ continue;
+
+@@ -573,9 +572,8 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c
+ hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
+
+ if (rep->total_cycles_mode) {
+- report__browse_block_hists(&rep->block_reports[i++].hist,
+- rep->min_percent, pos,
+- NULL, NULL);
++ report__browse_block_hists(&rep->block_reports[i - 1].hist,
++ rep->min_percent, pos, NULL);
+ continue;
+ }
+
+@@ -670,7 +668,7 @@ static int report__browse_hists(struct report *rep)
+ }
+
+ ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
+- &session->header.env, true, &rep->annotation_opts);
++ &session->header.env, true);
+ /*
+ * Usually "ret" is the last pressed key, and we only
+ * care if the key notifies us to switch data file.
+@@ -730,7 +728,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg)
+ if (rep->symbol_ipc && sym && !sym->annotate2) {
+ struct evsel *evsel = hists_to_evsel(he->hists);
+
+- symbol__annotate2(&he->ms, evsel, &rep->annotation_opts, NULL);
++ symbol__annotate2(&he->ms, evsel, NULL);
+ }
+
+ return 0;
+@@ -1326,15 +1324,15 @@ int cmd_report(int argc, const char **argv)
+ "list of cpus to profile"),
+ OPT_BOOLEAN('I', "show-info", &report.show_full_info,
+ "Display extended information about perf.data file"),
+- OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src,
++ OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
+ "Interleave source code with assembly code (default)"),
+- OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
++ OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
+ "Display raw encoding of assembly instructions (default)"),
+ OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
+ "Specify disassembler style (e.g. -M intel for intel syntax)"),
+- OPT_STRING(0, "prefix", &report.annotation_opts.prefix, "prefix",
++ OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
+ "Add prefix to source file path names in programs (with --prefix-strip)"),
+- OPT_STRING(0, "prefix-strip", &report.annotation_opts.prefix_strip, "N",
++ OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
+ "Strip first N entries of source file path name in programs (with --prefix)"),
+ OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
+ "Show a column with the sum of periods"),
+@@ -1386,7 +1384,7 @@ int cmd_report(int argc, const char **argv)
+ "Time span of interest (start,stop)"),
+ OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
+ "Show inline function"),
+- OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
++ OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
+ "Set percent type local/global-period/hits",
+ annotate_parse_percent_type),
+ OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
+@@ -1411,7 +1409,14 @@ int cmd_report(int argc, const char **argv)
+ if (ret < 0)
+ goto exit;
+
+- annotation_options__init(&report.annotation_opts);
++ /*
++ * tasks_mode require access to exited threads to list those that are in
++ * the data file. Off-cpu events are synthesized after other events and
++ * reference exited threads.
++ */
++ symbol_conf.keep_exited_threads = true;
++
++ annotation_options__init(&annotate_opts);
+
+ ret = perf_config(report__config, &report);
+ if (ret)
+@@ -1430,13 +1435,13 @@ int cmd_report(int argc, const char **argv)
+ }
+
+ if (disassembler_style) {
+- report.annotation_opts.disassembler_style = strdup(disassembler_style);
+- if (!report.annotation_opts.disassembler_style)
++ annotate_opts.disassembler_style = strdup(disassembler_style);
++ if (!annotate_opts.disassembler_style)
+ return -ENOMEM;
+ }
+ if (objdump_path) {
+- report.annotation_opts.objdump_path = strdup(objdump_path);
+- if (!report.annotation_opts.objdump_path)
++ annotate_opts.objdump_path = strdup(objdump_path);
++ if (!annotate_opts.objdump_path)
+ return -ENOMEM;
+ }
+ if (addr2line_path) {
+@@ -1445,7 +1450,7 @@ int cmd_report(int argc, const char **argv)
+ return -ENOMEM;
+ }
+
+- if (annotate_check_args(&report.annotation_opts) < 0) {
++ if (annotate_check_args(&annotate_opts) < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
+@@ -1677,7 +1682,7 @@ int cmd_report(int argc, const char **argv)
+ */
+ symbol_conf.priv_size += sizeof(u32);
+ }
+- annotation_config__init(&report.annotation_opts);
++ annotation_config__init(&annotate_opts);
+ }
+
+ if (symbol__init(&session->header.env) < 0)
+@@ -1731,7 +1736,7 @@ int cmd_report(int argc, const char **argv)
+ zstd_fini(&(session->zstd_data));
+ perf_session__delete(session);
+ exit:
+- annotation_options__exit(&report.annotation_opts);
++ annotation_options__exit(&annotate_opts);
+ free(sort_order_help);
+ free(field_order_help);
+ return ret;
+diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
+index 9ab300b6f131fe..ac9d94dbbeefae 100644
+--- a/tools/perf/builtin-sched.c
++++ b/tools/perf/builtin-sched.c
+@@ -2633,9 +2633,12 @@ static int timehist_sched_change_event(struct perf_tool *tool,
+ * - previous sched event is out of window - we are done
+ * - sample time is beyond window user cares about - reset it
+ * to close out stats for time window interest
++ * - If tprev is 0, that is, sched_in event for current task is
++ * not recorded, cannot determine whether sched_in event is
++ * within time window interest - ignore it
+ */
+ if (ptime->end) {
+- if (tprev > ptime->end)
++ if (!tprev || tprev > ptime->end)
+ goto out;
+
+ if (t > ptime->end)
+@@ -3000,8 +3003,11 @@ static int timehist_check_attr(struct perf_sched *sched,
+ return -1;
+ }
+
+- if (sched->show_callchain && !evsel__has_callchain(evsel)) {
+- pr_info("Samples do not have callchains.\n");
++ /* only need to save callchain related to sched_switch event */
++ if (sched->show_callchain &&
++ evsel__name_is(evsel, "sched:sched_switch") &&
++ !evsel__has_callchain(evsel)) {
++ pr_info("Samples of sched_switch event do not have callchains.\n");
+ sched->show_callchain = 0;
+ symbol_conf.use_callchain = 0;
+ }
+@@ -3065,7 +3071,8 @@ static int perf_sched__timehist(struct perf_sched *sched)
+
+ if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
+ pr_err("Invalid time string\n");
+- return -EINVAL;
++ err = -EINVAL;
++ goto out;
+ }
+
+ if (timehist_check_attr(sched, evlist) != 0)
+@@ -3204,14 +3211,44 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
+ }
+ }
+
++static int setup_cpus_switch_event(struct perf_sched *sched)
++{
++ unsigned int i;
++
++ sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
++ if (!sched->cpu_last_switched)
++ return -1;
++
++ sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
++ if (!sched->curr_pid) {
++ zfree(&sched->cpu_last_switched);
++ return -1;
++ }
++
++ for (i = 0; i < MAX_CPUS; i++)
++ sched->curr_pid[i] = -1;
++
++ return 0;
++}
++
++static void free_cpus_switch_event(struct perf_sched *sched)
++{
++ zfree(&sched->curr_pid);
++ zfree(&sched->cpu_last_switched);
++}
++
+ static int perf_sched__lat(struct perf_sched *sched)
+ {
++ int rc = -1;
+ struct rb_node *next;
+
+ setup_pager();
+
++ if (setup_cpus_switch_event(sched))
++ return rc;
++
+ if (perf_sched__read_events(sched))
+- return -1;
++ goto out_free_cpus_switch_event;
+
+ perf_sched__merge_lat(sched);
+ perf_sched__sort_lat(sched);
+@@ -3240,13 +3277,15 @@ static int perf_sched__lat(struct perf_sched *sched)
+ print_bad_events(sched);
+ printf("\n");
+
+- return 0;
++ rc = 0;
++
++out_free_cpus_switch_event:
++ free_cpus_switch_event(sched);
++ return rc;
+ }
+
+ static int setup_map_cpus(struct perf_sched *sched)
+ {
+- struct perf_cpu_map *map;
+-
+ sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
+
+ if (sched->map.comp) {
+@@ -3255,16 +3294,15 @@ static int setup_map_cpus(struct perf_sched *sched)
+ return -1;
+ }
+
+- if (!sched->map.cpus_str)
+- return 0;
+-
+- map = perf_cpu_map__new(sched->map.cpus_str);
+- if (!map) {
+- pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
+- return -1;
++ if (sched->map.cpus_str) {
++ sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
++ if (!sched->map.cpus) {
++ pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
++ zfree(&sched->map.comp_cpus);
++ return -1;
++ }
+ }
+
+- sched->map.cpus = map;
+ return 0;
+ }
+
+@@ -3304,33 +3342,69 @@ static int setup_color_cpus(struct perf_sched *sched)
+
+ static int perf_sched__map(struct perf_sched *sched)
+ {
++ int rc = -1;
++
++ sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
++ if (!sched->curr_thread)
++ return rc;
++
++ if (setup_cpus_switch_event(sched))
++ goto out_free_curr_thread;
++
+ if (setup_map_cpus(sched))
+- return -1;
++ goto out_free_cpus_switch_event;
+
+ if (setup_color_pids(sched))
+- return -1;
++ goto out_put_map_cpus;
+
+ if (setup_color_cpus(sched))
+- return -1;
++ goto out_put_color_pids;
+
+ setup_pager();
+ if (perf_sched__read_events(sched))
+- return -1;
++ goto out_put_color_cpus;
++
++ rc = 0;
+ print_bad_events(sched);
+- return 0;
++
++out_put_color_cpus:
++ perf_cpu_map__put(sched->map.color_cpus);
++
++out_put_color_pids:
++ perf_thread_map__put(sched->map.color_pids);
++
++out_put_map_cpus:
++ zfree(&sched->map.comp_cpus);
++ perf_cpu_map__put(sched->map.cpus);
++
++out_free_cpus_switch_event:
++ free_cpus_switch_event(sched);
++
++out_free_curr_thread:
++ zfree(&sched->curr_thread);
++ return rc;
+ }
+
+ static int perf_sched__replay(struct perf_sched *sched)
+ {
++ int ret;
+ unsigned long i;
+
++ mutex_init(&sched->start_work_mutex);
++ mutex_init(&sched->work_done_wait_mutex);
++
++ ret = setup_cpus_switch_event(sched);
++ if (ret)
++ goto out_mutex_destroy;
++
+ calibrate_run_measurement_overhead(sched);
+ calibrate_sleep_measurement_overhead(sched);
+
+ test_calibrations(sched);
+
+- if (perf_sched__read_events(sched))
+- return -1;
++ ret = perf_sched__read_events(sched);
++ if (ret)
++ goto out_free_cpus_switch_event;
+
+ printf("nr_run_events: %ld\n", sched->nr_run_events);
+ printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
+@@ -3355,7 +3429,14 @@ static int perf_sched__replay(struct perf_sched *sched)
+
+ sched->thread_funcs_exit = true;
+ destroy_tasks(sched);
+- return 0;
++
++out_free_cpus_switch_event:
++ free_cpus_switch_event(sched);
++
++out_mutex_destroy:
++ mutex_destroy(&sched->start_work_mutex);
++ mutex_destroy(&sched->work_done_wait_mutex);
++ return ret;
+ }
+
+ static void setup_sorting(struct perf_sched *sched, const struct option *options,
+@@ -3590,28 +3671,7 @@ int cmd_sched(int argc, const char **argv)
+ .switch_event = replay_switch_event,
+ .fork_event = replay_fork_event,
+ };
+- unsigned int i;
+- int ret = 0;
+-
+- mutex_init(&sched.start_work_mutex);
+- mutex_init(&sched.work_done_wait_mutex);
+- sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread));
+- if (!sched.curr_thread) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched));
+- if (!sched.cpu_last_switched) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- sched.curr_pid = malloc(MAX_CPUS * sizeof(*sched.curr_pid));
+- if (!sched.curr_pid) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- for (i = 0; i < MAX_CPUS; i++)
+- sched.curr_pid[i] = -1;
++ int ret;
+
+ argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
+ sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
+@@ -3622,9 +3682,9 @@ int cmd_sched(int argc, const char **argv)
+ * Aliased to 'perf script' for now:
+ */
+ if (!strcmp(argv[0], "script")) {
+- ret = cmd_script(argc, argv);
++ return cmd_script(argc, argv);
+ } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
+- ret = __cmd_record(argc, argv);
++ return __cmd_record(argc, argv);
+ } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
+ sched.tp_handler = &lat_ops;
+ if (argc > 1) {
+@@ -3633,7 +3693,7 @@ int cmd_sched(int argc, const char **argv)
+ usage_with_options(latency_usage, latency_options);
+ }
+ setup_sorting(&sched, latency_options, latency_usage);
+- ret = perf_sched__lat(&sched);
++ return perf_sched__lat(&sched);
+ } else if (!strcmp(argv[0], "map")) {
+ if (argc) {
+ argc = parse_options(argc, argv, map_options, map_usage, 0);
+@@ -3642,7 +3702,7 @@ int cmd_sched(int argc, const char **argv)
+ }
+ sched.tp_handler = &map_ops;
+ setup_sorting(&sched, latency_options, latency_usage);
+- ret = perf_sched__map(&sched);
++ return perf_sched__map(&sched);
+ } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
+ sched.tp_handler = &replay_ops;
+ if (argc) {
+@@ -3650,7 +3710,7 @@ int cmd_sched(int argc, const char **argv)
+ if (argc)
+ usage_with_options(replay_usage, replay_options);
+ }
+- ret = perf_sched__replay(&sched);
++ return perf_sched__replay(&sched);
+ } else if (!strcmp(argv[0], "timehist")) {
+ if (argc) {
+ argc = parse_options(argc, argv, timehist_options,
+@@ -3666,24 +3726,19 @@ int cmd_sched(int argc, const char **argv)
+ parse_options_usage(NULL, timehist_options, "w", true);
+ if (sched.show_next)
+ parse_options_usage(NULL, timehist_options, "n", true);
+- ret = -EINVAL;
+- goto out;
++ return -EINVAL;
+ }
+ ret = symbol__validate_sym_arguments();
+ if (ret)
+- goto out;
++ return ret;
+
+- ret = perf_sched__timehist(&sched);
++ return perf_sched__timehist(&sched);
+ } else {
+ usage_with_options(sched_usage, sched_options);
+ }
+
+-out:
+- free(sched.curr_pid);
+- free(sched.cpu_last_switched);
+- free(sched.curr_thread);
+- mutex_destroy(&sched.start_work_mutex);
+- mutex_destroy(&sched.work_done_wait_mutex);
++ /* free usage string allocated by parse_options_subcommand */
++ free((void *)sched_usage[0]);
+
+- return ret;
++ return 0;
+ }
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 517bf25750c8bb..f4f3ef90a4629a 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -3765,11 +3765,25 @@ static int perf_script__process_auxtrace_info(struct perf_session *session,
+ #endif
+
+ static int parse_insn_trace(const struct option *opt __maybe_unused,
+- const char *str __maybe_unused,
+- int unset __maybe_unused)
++ const char *str, int unset __maybe_unused)
+ {
+- parse_output_fields(NULL, "+insn,-event,-period", 0);
+- itrace_parse_synth_opts(opt, "i0ns", 0);
++ const char *fields = "+insn,-event,-period";
++ int ret;
++
++ if (str) {
++ if (strcmp(str, "disasm") == 0)
++ fields = "+disasm,-event,-period";
++ else if (strlen(str) != 0 && strcmp(str, "raw") != 0) {
++ fprintf(stderr, "Only accept raw|disasm\n");
++ return -EINVAL;
++ }
++ }
++
++ ret = parse_output_fields(NULL, fields, 0);
++ if (ret < 0)
++ return ret;
++
++ itrace_parse_synth_opts(opt, "i0nse", 0);
+ symbol_conf.nanosecs = true;
+ return 0;
+ }
+@@ -3914,7 +3928,7 @@ int cmd_script(int argc, const char **argv)
+ "only consider these symbols"),
+ OPT_INTEGER(0, "addr-range", &symbol_conf.addr_range,
+ "Use with -S to list traced records within address range"),
+- OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, NULL,
++ OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, "raw|disasm",
+ "Decode instructions from itrace", parse_insn_trace),
+ OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL,
+ "Run xed disassembler on output", parse_xed),
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 07b48f6df48eb2..78c10492218102 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void)
+ * taking the highest cpu number to be the size of
+ * the aggregation translate cpumap.
+ */
+- if (evsel_list->core.user_requested_cpus)
++ if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
+ nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
+ else
+ nr = 0;
+@@ -2695,15 +2695,19 @@ int cmd_stat(int argc, const char **argv)
+ */
+ if (metrics) {
+ const char *pmu = parse_events_option_args.pmu_filter ?: "all";
++ int ret = metricgroup__parse_groups(evsel_list, pmu, metrics,
++ stat_config.metric_no_group,
++ stat_config.metric_no_merge,
++ stat_config.metric_no_threshold,
++ stat_config.user_requested_cpu_list,
++ stat_config.system_wide,
++ &stat_config.metric_events);
+
+- metricgroup__parse_groups(evsel_list, pmu, metrics,
+- stat_config.metric_no_group,
+- stat_config.metric_no_merge,
+- stat_config.metric_no_threshold,
+- stat_config.user_requested_cpu_list,
+- stat_config.system_wide,
+- &stat_config.metric_events);
+ zfree(&metrics);
++ if (ret) {
++ status = ret;
++ goto out;
++ }
+ }
+
+ if (add_default_attributes())
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index ea8c7eca5eeedd..1c1ec444d501ee 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -147,7 +147,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
+ return err;
+ }
+
+- err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
++ err = symbol__annotate(&he->ms, evsel, NULL);
+ if (err == 0) {
+ top->sym_filter_entry = he;
+ } else {
+@@ -261,9 +261,9 @@ static void perf_top__show_details(struct perf_top *top)
+ goto out_unlock;
+
+ printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
+- printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
++ printf(" Events Pcnt (>=%d%%)\n", annotate_opts.min_pcnt);
+
+- more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
++ more = symbol__annotate_printf(&he->ms, top->sym_evsel);
+
+ if (top->evlist->enabled) {
+ if (top->zero)
+@@ -450,7 +450,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
+
+ fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
+
+- fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
++ fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", annotate_opts.min_pcnt);
+ fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
+ fprintf(stdout, "\t[S] stop annotation.\n");
+
+@@ -553,7 +553,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
+ prompt_integer(&top->count_filter, "Enter display event count filter");
+ break;
+ case 'F':
+- prompt_percent(&top->annotation_opts.min_pcnt,
++ prompt_percent(&annotate_opts.min_pcnt,
+ "Enter details display event filter (percent)");
+ break;
+ case 'K':
+@@ -646,8 +646,7 @@ static void *display_thread_tui(void *arg)
+ }
+
+ ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
+- &top->session->header.env, !top->record_opts.overwrite,
+- &top->annotation_opts);
++ &top->session->header.env, !top->record_opts.overwrite);
+ if (ret == K_RELOAD) {
+ top->zero = true;
+ goto repeat;
+@@ -1027,8 +1026,8 @@ static int perf_top__start_counters(struct perf_top *top)
+
+ evlist__for_each_entry(evlist, counter) {
+ try_again:
+- if (evsel__open(counter, top->evlist->core.user_requested_cpus,
+- top->evlist->core.threads) < 0) {
++ if (evsel__open(counter, counter->core.cpus,
++ counter->core.threads) < 0) {
+
+ /*
+ * Specially handle overwrite fall back.
+@@ -1241,9 +1240,9 @@ static int __cmd_top(struct perf_top *top)
+ pthread_t thread, thread_process;
+ int ret;
+
+- if (!top->annotation_opts.objdump_path) {
++ if (!annotate_opts.objdump_path) {
+ ret = perf_env__lookup_objdump(&top->session->header.env,
+- &top->annotation_opts.objdump_path);
++ &annotate_opts.objdump_path);
+ if (ret)
+ return ret;
+ }
+@@ -1299,6 +1298,7 @@ static int __cmd_top(struct perf_top *top)
+ }
+ }
+
++ evlist__uniquify_name(top->evlist);
+ ret = perf_top__start_counters(top);
+ if (ret)
+ return ret;
+@@ -1536,9 +1536,9 @@ int cmd_top(int argc, const char **argv)
+ "only consider symbols in these comms"),
+ OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
+- OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
++ OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
+ "Interleave source code with assembly code (default)"),
+- OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
++ OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
+ "Display raw encoding of assembly instructions (default)"),
+ OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ "Enable kernel symbol demangling"),
+@@ -1549,9 +1549,9 @@ int cmd_top(int argc, const char **argv)
+ "addr2line binary to use for line numbers"),
+ OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
+ "Specify disassembler style (e.g. -M intel for intel syntax)"),
+- OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
++ OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
+ "Add prefix to source file path names in programs (with --prefix-strip)"),
+- OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N",
++ OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
+ "Strip first N entries of source file path name in programs (with --prefix)"),
+ OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
+ OPT_CALLBACK(0, "percent-limit", &top, "percent",
+@@ -1609,10 +1609,10 @@ int cmd_top(int argc, const char **argv)
+ if (status < 0)
+ return status;
+
+- annotation_options__init(&top.annotation_opts);
++ annotation_options__init(&annotate_opts);
+
+- top.annotation_opts.min_pcnt = 5;
+- top.annotation_opts.context = 4;
++ annotate_opts.min_pcnt = 5;
++ annotate_opts.context = 4;
+
+ top.evlist = evlist__new();
+ if (top.evlist == NULL)
+@@ -1642,13 +1642,13 @@ int cmd_top(int argc, const char **argv)
+ usage_with_options(top_usage, options);
+
+ if (disassembler_style) {
+- top.annotation_opts.disassembler_style = strdup(disassembler_style);
+- if (!top.annotation_opts.disassembler_style)
++ annotate_opts.disassembler_style = strdup(disassembler_style);
++ if (!annotate_opts.disassembler_style)
+ return -ENOMEM;
+ }
+ if (objdump_path) {
+- top.annotation_opts.objdump_path = strdup(objdump_path);
+- if (!top.annotation_opts.objdump_path)
++ annotate_opts.objdump_path = strdup(objdump_path);
++ if (!annotate_opts.objdump_path)
+ return -ENOMEM;
+ }
+ if (addr2line_path) {
+@@ -1661,7 +1661,7 @@ int cmd_top(int argc, const char **argv)
+ if (status)
+ goto out_delete_evlist;
+
+- if (annotate_check_args(&top.annotation_opts) < 0)
++ if (annotate_check_args(&annotate_opts) < 0)
+ goto out_delete_evlist;
+
+ if (!top.evlist->core.nr_entries) {
+@@ -1787,7 +1787,7 @@ int cmd_top(int argc, const char **argv)
+ if (status < 0)
+ goto out_delete_evlist;
+
+- annotation_config__init(&top.annotation_opts);
++ annotation_config__init(&annotate_opts);
+
+ symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
+ status = symbol__init(NULL);
+@@ -1840,7 +1840,7 @@ int cmd_top(int argc, const char **argv)
+ out_delete_evlist:
+ evlist__delete(top.evlist);
+ perf_session__delete(top.session);
+- annotation_options__exit(&top.annotation_opts);
++ annotation_options__exit(&annotate_opts);
+
+ return status;
+ }
+diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
+index 88b23b85e33cd0..879ff21e0b177c 100644
+--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
++++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
+@@ -110,7 +110,7 @@
+ {
+ "PublicDescription": "Flushes due to memory hazards",
+ "EventCode": "0x121",
+- "EventName": "BPU_FLUSH_MEM_FAULT",
++ "EventName": "GPC_FLUSH_MEM_FAULT",
+ "BriefDescription": "Flushes due to memory hazards"
+ },
+ {
+diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
+index 1e7e8901a44509..afcdad58ef89c2 100644
+--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
++++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
+@@ -1,362 +1,386 @@
+ [
+ {
++ "MetricName": "branch_miss_pred_rate",
+ "MetricExpr": "BR_MIS_PRED / BR_PRED",
+ "BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch",
+- "MetricGroup": "Branch Prediction",
+- "MetricName": "Misprediction"
++ "MetricGroup": "branch",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
+- "BriefDescription": "Branch predictor misprediction rate",
+- "MetricGroup": "Branch Prediction",
+- "MetricName": "Misprediction (retired)"
+- },
+- {
+- "MetricExpr": "BUS_ACCESS / ( BUS_CYCLES * 1)",
++ "MetricName": "bus_utilization",
++ "MetricExpr": "((BUS_ACCESS / (BUS_CYCLES * 1)) * 100)",
+ "BriefDescription": "Core-to-uncore bus utilization",
+ "MetricGroup": "Bus",
+- "MetricName": "Bus utilization"
++ "ScaleUnit": "1percent of bus cycles"
+ },
+ {
+- "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
+- "BriefDescription": "L1D cache miss rate",
+- "MetricGroup": "Cache",
+- "MetricName": "L1D cache miss"
++ "MetricName": "l1d_cache_miss_ratio",
++ "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)",
++ "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
++ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
++ "ScaleUnit": "1per cache access"
++ },
++ {
++ "MetricName": "l1i_cache_miss_ratio",
++ "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)",
++ "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
++ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
++ "ScaleUnit": "1per cache access"
+ },
+ {
++ "MetricName": "Miss_Ratio;l1d_cache_read_miss",
+ "MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD",
+ "BriefDescription": "L1D cache read miss rate",
+ "MetricGroup": "Cache",
+- "MetricName": "L1D cache read miss"
++ "ScaleUnit": "1per cache read access"
+ },
+ {
+- "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
+- "BriefDescription": "L1I cache miss rate",
+- "MetricGroup": "Cache",
+- "MetricName": "L1I cache miss"
+- },
+- {
+- "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
+- "BriefDescription": "L2 cache miss rate",
+- "MetricGroup": "Cache",
+- "MetricName": "L2 cache miss"
++ "MetricName": "l2_cache_miss_ratio",
++ "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)",
++ "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
++ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
++ "ScaleUnit": "1per cache access"
+ },
+ {
++ "MetricName": "l1i_cache_read_miss_rate",
+ "MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE",
+ "BriefDescription": "L1I cache read miss rate",
+ "MetricGroup": "Cache",
+- "MetricName": "L1I cache read miss"
++ "ScaleUnit": "1per cache access"
+ },
+ {
++ "MetricName": "l2d_cache_read_miss_rate",
+ "MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD",
+ "BriefDescription": "L2 cache read miss rate",
+ "MetricGroup": "Cache",
+- "MetricName": "L2 cache read miss"
++ "ScaleUnit": "1per cache read access"
+ },
+ {
+- "MetricExpr": "(L1D_CACHE_LMISS_RD * 1000) / INST_RETIRED",
++ "MetricName": "l1d_cache_miss_mpki",
++ "MetricExpr": "(L1D_CACHE_LMISS_RD * 1e3) / INST_RETIRED",
+ "BriefDescription": "Misses per thousand instructions (data)",
+ "MetricGroup": "Cache",
+- "MetricName": "MPKI data"
++ "ScaleUnit": "1MPKI"
+ },
+ {
+- "MetricExpr": "(L1I_CACHE_LMISS * 1000) / INST_RETIRED",
++ "MetricName": "l1i_cache_miss_mpki",
++ "MetricExpr": "(L1I_CACHE_LMISS * 1e3) / INST_RETIRED",
+ "BriefDescription": "Misses per thousand instructions (instruction)",
+ "MetricGroup": "Cache",
+- "MetricName": "MPKI instruction"
++ "ScaleUnit": "1MPKI"
+ },
+ {
+- "MetricExpr": "ASE_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "ASE mix"
++ "MetricName": "simd_percentage",
++ "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "CRYPTO_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of crypto data processing operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Crypto mix"
++ "MetricName": "crypto_percentage",
++ "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "VFP_SPEC / (duration_time *1000000000)",
++ "MetricName": "gflops",
++ "MetricExpr": "VFP_SPEC / (duration_time * 1e9)",
+ "BriefDescription": "Giga-floating point operations per second",
+- "MetricGroup": "Instruction",
+- "MetricName": "GFLOPS_ISSUED"
++ "MetricGroup": "InstructionMix"
+ },
+ {
+- "MetricExpr": "DP_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of integer data processing operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Integer mix"
++ "MetricName": "integer_dp_percentage",
++ "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "INST_RETIRED / CPU_CYCLES",
+- "BriefDescription": "Instructions per cycle",
+- "MetricGroup": "Instruction",
+- "MetricName": "IPC"
++ "MetricName": "ipc",
++ "MetricExpr": "(INST_RETIRED / CPU_CYCLES)",
++ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
++ "MetricGroup": "General",
++ "ScaleUnit": "1per cycle"
+ },
+ {
+- "MetricExpr": "LD_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of load operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Load mix"
++ "MetricName": "load_percentage",
++ "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "LDST_SPEC/ OP_SPEC",
+- "BriefDescription": "Proportion of load & store operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Load-store mix"
++ "MetricName": "load_store_spec_rate",
++ "MetricExpr": "((LDST_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "INST_RETIRED / (duration_time * 1000000)",
++ "MetricName": "retired_mips",
++ "MetricExpr": "INST_RETIRED / (duration_time * 1e6)",
+ "BriefDescription": "Millions of instructions per second",
+- "MetricGroup": "Instruction",
+- "MetricName": "MIPS_RETIRED"
++ "MetricGroup": "InstructionMix"
+ },
+ {
+- "MetricExpr": "INST_SPEC / (duration_time * 1000000)",
++ "MetricName": "spec_utilization_mips",
++ "MetricExpr": "INST_SPEC / (duration_time * 1e6)",
+ "BriefDescription": "Millions of instructions per second",
+- "MetricGroup": "Instruction",
+- "MetricName": "MIPS_UTILIZATION"
+- },
+- {
+- "MetricExpr": "PC_WRITE_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of software change of PC operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "PC write mix"
++ "MetricGroup": "PEutilization"
+ },
+ {
+- "MetricExpr": "ST_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of store operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "Store mix"
++ "MetricName": "pc_write_spec_rate",
++ "MetricExpr": "((PC_WRITE_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "VFP_SPEC / OP_SPEC",
+- "BriefDescription": "Proportion of FP operations",
+- "MetricGroup": "Instruction",
+- "MetricName": "VFP mix"
++ "MetricName": "store_percentage",
++ "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "1 - (OP_RETIRED/ (CPU_CYCLES * 4))",
+- "BriefDescription": "Proportion of slots lost",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "CPU lost"
++ "MetricName": "scalar_fp_percentage",
++ "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)",
++ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
++ "MetricGroup": "Operation_Mix",
++ "ScaleUnit": "1percent of operations"
+ },
+ {
+- "MetricExpr": "OP_RETIRED/ (CPU_CYCLES * 4)",
+- "BriefDescription": "Proportion of slots retiring",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "CPU utilization"
++ "MetricName": "retired_rate",
++ "MetricExpr": "OP_RETIRED / OP_SPEC",
++ "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
++ "MetricGroup": "General",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "OP_RETIRED - OP_SPEC",
+- "BriefDescription": "Operations lost due to misspeculation",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "Operations lost"
++ "MetricName": "wasted",
++ "MetricExpr": "1 - (OP_RETIRED / (CPU_CYCLES * #slots))",
++ "BriefDescription": "Of all the micro-operations issued, what proportion are lost",
++ "MetricGroup": "General",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "1 - (OP_RETIRED / OP_SPEC)",
+- "BriefDescription": "Proportion of operations lost",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "Operations lost (ratio)"
++ "MetricName": "wasted_rate",
++ "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
++ "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
++ "MetricGroup": "General",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "OP_RETIRED / OP_SPEC",
+- "BriefDescription": "Proportion of operations retired",
+- "MetricGroup": "Speculation / TDA",
+- "MetricName": "Operations retired"
+- },
+- {
+- "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
++ "MetricName": "stall_backend_cache_rate",
++ "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall backend cache cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
++ "MetricName": "stall_backend_resource_rate",
++ "MetricExpr": "((STALL_BACKEND_RESOURCE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall backend resource cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
++ "MetricName": "stall_backend_tlb_rate",
++ "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall backend tlb cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
++ "MetricName": "stall_frontend_cache_rate",
++ "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall frontend cache cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
++ "MetricName": "stall_frontend_tlb_rate",
++ "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
+ "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
+ "MetricGroup": "Stall",
+- "MetricName": "Stall frontend tlb cycles"
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "DTLB_WALK / L1D_TLB",
+- "BriefDescription": "D-side walk per d-side translation request",
+- "MetricGroup": "TLB",
+- "MetricName": "DTLB walks"
++ "MetricName": "dtlb_walk_ratio",
++ "MetricExpr": "(DTLB_WALK / L1D_TLB)",
++ "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
++ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
++ "ScaleUnit": "1per TLB access"
+ },
+ {
+- "MetricExpr": "ITLB_WALK / L1I_TLB",
+- "BriefDescription": "I-side walk per i-side translation request",
+- "MetricGroup": "TLB",
+- "MetricName": "ITLB walks"
++ "MetricName": "itlb_walk_ratio",
++ "MetricExpr": "(ITLB_WALK / L1I_TLB)",
++ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
++ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
++ "ScaleUnit": "1per TLB access"
+ },
+ {
+- "MetricExpr": "STALL_SLOT_BACKEND / (CPU_CYCLES * 4)",
+- "BriefDescription": "Fraction of slots backend bound",
+- "MetricGroup": "TopDownL1",
+- "MetricName": "backend"
++ "ArchStdEvent": "backend_bound"
+ },
+ {
+- "MetricExpr": "1 - (retiring + lost + backend)",
+- "BriefDescription": "Fraction of slots frontend bound",
+- "MetricGroup": "TopDownL1",
+- "MetricName": "frontend"
++ "ArchStdEvent": "frontend_bound",
++ "MetricExpr": "100 - (retired_fraction + slots_lost_misspeculation_fraction + backend_bound)"
+ },
+ {
+- "MetricExpr": "((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * 4))",
++ "MetricName": "slots_lost_misspeculation_fraction",
++ "MetricExpr": "100 * ((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots))",
+ "BriefDescription": "Fraction of slots lost due to misspeculation",
+- "MetricGroup": "TopDownL1",
+- "MetricName": "lost"
++ "DefaultMetricgroupName": "TopdownL1",
++ "MetricGroup": "Default;TopdownL1",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "(OP_RETIRED / (CPU_CYCLES * 4))",
++ "MetricName": "retired_fraction",
++ "MetricExpr": "100 * (OP_RETIRED / (CPU_CYCLES * #slots))",
+ "BriefDescription": "Fraction of slots retiring, useful work",
+- "MetricGroup": "TopDownL1",
+- "MetricName": "retiring"
++ "DefaultMetricgroupName": "TopdownL1",
++ "MetricGroup": "Default;TopdownL1",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "backend - backend_memory",
++ "MetricName": "backend_core",
++ "MetricExpr": "(backend_bound / 100) - backend_memory",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "backend_core"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE + STALL_BACKEND_MEM) / CPU_CYCLES ",
++ "MetricName": "backend_memory",
++ "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE) / CPU_CYCLES",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "backend_memory"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "100%"
+ },
+ {
+- "MetricExpr": " (BR_MIS_PRED_RETIRED / GPC_FLUSH) * lost",
++ "MetricName": "branch_mispredict",
++ "MetricExpr": "(BR_MIS_PRED_RETIRED / GPC_FLUSH) * slots_lost_misspeculation_fraction",
+ "BriefDescription": "Fraction of slots lost due to branch misprediciton",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "branch_mispredict"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "frontend - frontend_latency",
++ "MetricName": "frontend_bandwidth",
++ "MetricExpr": "frontend_bound - frontend_latency",
+ "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "frontend_bandwidth"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - (frontend * CPU_CYCLES * 4)) / 4)) / CPU_CYCLES",
++ "MetricName": "frontend_latency",
++ "MetricExpr": "((STALL_FRONTEND - ((STALL_SLOT_FRONTEND - ((frontend_bound / 100) * CPU_CYCLES * #slots)) / #slots)) / CPU_CYCLES) * 100",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "frontend_latency"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "lost - branch_mispredict",
++ "MetricName": "other_miss_pred",
++ "MetricExpr": "slots_lost_misspeculation_fraction - branch_mispredict",
+ "BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "other_clears"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "(IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6)",
++ "MetricName": "pipe_utilization",
++ "MetricExpr": "100 * ((IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6))",
+ "BriefDescription": "Fraction of execute slots utilized",
+- "MetricGroup": "TopDownL2",
+- "MetricName": "pipe_utilization"
++ "MetricGroup": "TopdownL2",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES",
++ "MetricName": "d_cache_l2_miss_rate",
++ "MetricExpr": "((STALL_BACKEND_MEM / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "d_cache_l2_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
++ "MetricName": "d_cache_miss_rate",
++ "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "d_cache_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
++ "MetricName": "d_tlb_miss_rate",
++ "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "d_tlb_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)",
++ "MetricName": "fsu_pipe_utilization",
++ "MetricExpr": "((FSU_ISSUED / (CPU_CYCLES * 2)) * 100)",
+ "BriefDescription": "Fraction of FSU execute slots utilized",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "fsu_pipe_utilization"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
++ "MetricName": "i_cache_miss_rate",
++ "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "i_cache_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": " STALL_FRONTEND_TLB / CPU_CYCLES ",
++ "MetricName": "i_tlb_miss_rate",
++ "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "i_tlb_miss"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES / 4)",
++ "MetricName": "ixu_pipe_utilization",
++ "MetricExpr": "((IXU_NUM_UOPS_ISSUED / (CPU_CYCLES * #slots)) * 100)",
+ "BriefDescription": "Fraction of IXU execute slots utilized",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "ixu_pipe_utilization"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES",
++ "MetricName": "stall_recovery_rate",
++ "MetricExpr": "((IDR_STALL_FLUSH / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "recovery"
+- },
+- {
+- "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
+- "BriefDescription": "Fraction of cycles the CPU was stalled due to core resource shortage",
+- "MetricGroup": "TopDownL3",
+- "MetricName": "resource"
++ "MetricGroup": "TopdownL3",
++ "ScaleUnit": "1percent of slots"
+ },
+ {
+- "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES ",
++ "MetricName": "stall_fsu_sched_rate",
++ "MetricExpr": "((IDR_STALL_FSU_SCHED / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_fsu_sched"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES ",
++ "MetricName": "stall_ixu_sched_rate",
++ "MetricExpr": "((IDR_STALL_IXU_SCHED / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_ixu_sched"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES ",
++ "MetricName": "stall_lob_id_rate",
++ "MetricExpr": "((IDR_STALL_LOB_ID / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_lob_id"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES",
++ "MetricName": "stall_rob_id_rate",
++ "MetricExpr": "((IDR_STALL_ROB_ID / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_rob_id"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ },
+ {
+- "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES ",
++ "MetricName": "stall_sob_id_rate",
++ "MetricExpr": "((IDR_STALL_SOB_ID / CPU_CYCLES) * 100)",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full",
+- "MetricGroup": "TopDownL4",
+- "MetricName": "stall_sob_id"
++ "MetricGroup": "TopdownL4",
++ "ScaleUnit": "1percent of cycles"
+ }
+ ]
+diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+index c606ae03cd27db..0e0253d0e75771 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
++++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
+@@ -195,7 +195,7 @@
+ "BriefDescription": "Threshold counter exceeded a value of 128."
+ },
+ {
+- "EventCode": "0x400FA",
++ "EventCode": "0x500FA",
+ "EventName": "PM_RUN_INST_CMPL",
+ "BriefDescription": "PowerPC instruction completed while the run latch is set."
+ }
+diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
+index ec2ff78e2b5f2c..3ab1d3a6638c46 100644
+--- a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
++++ b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
+@@ -2,71 +2,71 @@
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+- "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
++ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL if has_event(TX_C_TEND) else 0"
+ },
+ {
+ "BriefDescription": "Cycles per Instruction",
+ "MetricName": "cpi",
+- "MetricExpr": "CPU_CYCLES / INSTRUCTIONS"
++ "MetricExpr": "CPU_CYCLES / INSTRUCTIONS if has_event(INSTRUCTIONS) else 0"
+ },
+ {
+ "BriefDescription": "Problem State Instruction Ratio",
+ "MetricName": "prbstate",
+- "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100"
++ "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0"
+ },
+ {
+ "BriefDescription": "Level One Miss per 100 Instructions",
+ "MetricName": "l1mp",
+- "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100"
++ "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 2 cache",
+ "MetricName": "l2p",
+- "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++ "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 3 on same chip cache",
+ "MetricName": "l3p",
+- "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++ "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_CHIP_HIT) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Local cache on same book",
+ "MetricName": "l4lp",
+- "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++ "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_DRAWER_HIT) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Remote cache on different book",
+ "MetricName": "l4rp",
+- "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++ "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_OFF_DRAWER) else 0"
+ },
+ {
+ "BriefDescription": "Percentage sourced from memory",
+ "MetricName": "memp",
+- "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++ "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_ON_CHIP_MEMORY) else 0"
+ },
+ {
+ "BriefDescription": "Cycles per Instructions from Finite cache/memory",
+ "MetricName": "finite_cpi",
+- "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS"
++ "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS if has_event(L1C_TLB2_MISSES) else 0"
+ },
+ {
+ "BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
+ "MetricName": "est_cpi",
+- "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS)"
++ "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS) if has_event(INSTRUCTIONS) else 0"
+ },
+ {
+ "BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
+ "MetricName": "scpl1m",
+- "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES)"
++ "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES) if has_event(L1C_TLB2_MISSES) else 0"
+ },
+ {
+ "BriefDescription": "Estimated TLB CPU percentage of Total CPU",
+ "MetricName": "tlb_percent",
+- "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100"
++ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100 if has_event(CPU_CYCLES) else 0"
+ },
+ {
+ "BriefDescription": "Estimated Cycles per TLB Miss",
+ "MetricName": "tlb_miss",
+- "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES))"
++ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) if has_event(DTLB2_MISSES) else 0"
+ }
+ ]
+diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+index 8fc62b8f667d80..e1f55fcfa0d02a 100644
+--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
++++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+@@ -48,6 +48,12 @@
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
++ {
++ "BriefDescription": "Uncore frequency per die [GHZ]",
++ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
++ "MetricGroup": "SoC",
++ "MetricName": "UNCORE_FREQ"
++ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+@@ -652,7 +658,7 @@
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+- "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3",
++ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_system_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+@@ -690,6 +696,12 @@
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_system_smt_2t_utilization"
+ },
++ {
++ "BriefDescription": "Socket actual clocks when any core is active on that socket",
++ "MetricExpr": "cbox_0@event\\=0x0@",
++ "MetricGroup": "SoC",
++ "MetricName": "tma_info_system_socket_clks"
++ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
+diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
+index 63d5e6d5f165bf..2b45ffa462a6c4 100644
+--- a/tools/perf/tests/Build
++++ b/tools/perf/tests/Build
+@@ -66,6 +66,7 @@ perf-y += dlfilter-test.o
+ perf-y += sigtrap.o
+ perf-y += event_groups.o
+ perf-y += symbols.o
++perf-y += util.o
+
+ ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
+ perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+diff --git a/tools/perf/tests/attr/system-wide-dummy b/tools/perf/tests/attr/system-wide-dummy
+index 2f3e3eb728eb40..a1e1d6a263bf14 100644
+--- a/tools/perf/tests/attr/system-wide-dummy
++++ b/tools/perf/tests/attr/system-wide-dummy
+@@ -9,8 +9,10 @@ flags=8
+ type=1
+ size=136
+ config=9
+-sample_period=4000
+-sample_type=455
++sample_period=1
++# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
++# PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER
++sample_type=65671
+ read_format=4|20
+ # Event will be enabled right away.
+ disabled=0
+@@ -18,12 +20,12 @@ inherit=1
+ pinned=0
+ exclusive=0
+ exclude_user=0
+-exclude_kernel=0
+-exclude_hv=0
++exclude_kernel=1
++exclude_hv=1
+ exclude_idle=0
+ mmap=1
+ comm=1
+-freq=1
++freq=0
+ inherit_stat=0
+ enable_on_exec=0
+ task=1
+@@ -32,7 +34,7 @@ precise_ip=0
+ mmap_data=0
+ sample_id_all=1
+ exclude_host=0
+-exclude_guest=0
++exclude_guest=1
+ exclude_callchain_kernel=0
+ exclude_callchain_user=0
+ mmap2=1
+diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0
+index 317730b906dd3c..198e8429a1bf85 100644
+--- a/tools/perf/tests/attr/test-record-C0
++++ b/tools/perf/tests/attr/test-record-C0
+@@ -10,9 +10,9 @@ cpu=0
+ enable_on_exec=0
+
+ # PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+-# PERF_SAMPLE_ID | PERF_SAMPLE_PERIOD
++# PERF_SAMPLE_PERIOD | PERF_SAMPLE_IDENTIFIER
+ # + PERF_SAMPLE_CPU added by -C 0
+-sample_type=455
++sample_type=65927
+
+ # Dummy event handles mmaps, comm and task.
+ mmap=0
+diff --git a/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64 b/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
+index fbb065842880f3..bed765450ca976 100644
+--- a/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
++++ b/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
+@@ -6,4 +6,4 @@ args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
+ ret = 129
+ test_ret = true
+ arch = aarch64
+-auxv = auxv["AT_HWCAP"] & 0x200000 == 0
++auxv = auxv["AT_HWCAP"] & 0x400000 == 0
+diff --git a/tools/perf/tests/attr/test-record-user-regs-sve-aarch64 b/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
+index c598c803221da7..a65113cd7311b4 100644
+--- a/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
++++ b/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
+@@ -6,7 +6,7 @@ args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
+ ret = 1
+ test_ret = true
+ arch = aarch64
+-auxv = auxv["AT_HWCAP"] & 0x200000 == 0x200000
++auxv = auxv["AT_HWCAP"] & 0x400000 == 0x400000
+ kernel_since = 6.1
+
+ [event:base-record]
+diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
+index 0ad18cf6dd2266..cb6f1dd00dc483 100644
+--- a/tools/perf/tests/builtin-test.c
++++ b/tools/perf/tests/builtin-test.c
+@@ -123,6 +123,7 @@ static struct test_suite *generic_tests[] = {
+ &suite__sigtrap,
+ &suite__event_groups,
+ &suite__symbols,
++ &suite__util,
+ NULL,
+ };
+
+diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
+index ed3815163d1be0..ff249555ca57a6 100644
+--- a/tools/perf/tests/code-reading.c
++++ b/tools/perf/tests/code-reading.c
+@@ -657,11 +657,11 @@ static int do_test_code_reading(bool try_kcore)
+
+ evlist__config(evlist, &opts, NULL);
+
+- evsel = evlist__first(evlist);
+-
+- evsel->core.attr.comm = 1;
+- evsel->core.attr.disabled = 1;
+- evsel->core.attr.enable_on_exec = 0;
++ evlist__for_each_entry(evlist, evsel) {
++ evsel->core.attr.comm = 1;
++ evsel->core.attr.disabled = 1;
++ evsel->core.attr.enable_on_exec = 0;
++ }
+
+ ret = evlist__open(evlist);
+ if (ret < 0) {
+diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
+index 81229fa4f1e967..cea4a506197db3 100644
+--- a/tools/perf/tests/expr.c
++++ b/tools/perf/tests/expr.c
+@@ -9,6 +9,7 @@
+ #include <math.h>
+ #include <stdlib.h>
+ #include <string.h>
++#include <string2.h>
+ #include <linux/zalloc.h>
+
+ static int test_ids_union(void)
+@@ -74,10 +75,13 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
+ int ret;
+ struct expr_parse_ctx *ctx;
+ bool is_intel = false;
+- char buf[128];
++ char strcmp_cpuid_buf[256];
++ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
++ char *cpuid = perf_pmu__getcpuid(pmu);
++ char *escaped_cpuid1, *escaped_cpuid2;
+
+- if (!get_cpuid(buf, sizeof(buf)))
+- is_intel = strstr(buf, "Intel") != NULL;
++ TEST_ASSERT_VAL("get_cpuid", cpuid);
++ is_intel = strstr(cpuid, "Intel") != NULL;
+
+ TEST_ASSERT_EQUAL("ids_union", test_ids_union(), 0);
+
+@@ -257,9 +261,28 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
+ TEST_ASSERT_VAL("source count", hashmap__size(ctx->ids) == 1);
+ TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1", &val_ptr));
+
++
++ /* Test no cpuid match */
++ ret = test(ctx, "strcmp_cpuid_str(0x0)", 0);
++
++ /*
++ * Test cpuid match with current cpuid. Special chars have to be
++ * escaped.
++ */
++ escaped_cpuid1 = strreplace_chars('-', cpuid, "\\-");
++ free(cpuid);
++ escaped_cpuid2 = strreplace_chars(',', escaped_cpuid1, "\\,");
++ free(escaped_cpuid1);
++ escaped_cpuid1 = strreplace_chars('=', escaped_cpuid2, "\\=");
++ free(escaped_cpuid2);
++ scnprintf(strcmp_cpuid_buf, sizeof(strcmp_cpuid_buf),
++ "strcmp_cpuid_str(%s)", escaped_cpuid1);
++ free(escaped_cpuid1);
++ ret |= test(ctx, strcmp_cpuid_buf, 1);
++
+ /* has_event returns 1 when an event exists. */
+ expr__add_id_val(ctx, strdup("cycles"), 2);
+- ret = test(ctx, "has_event(cycles)", 1);
++ ret |= test(ctx, "has_event(cycles)", 1);
+
+ expr__ctx_free(ctx);
+
+diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+index 66dfdfdad553f4..60cd35c73e47db 100755
+--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
++++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+@@ -14,28 +14,21 @@ cleanup_files()
+
+ trap cleanup_files EXIT TERM INT
+
+-# Add a 1 second delay to skip samples that are not in the leaf() function
+ # shellcheck disable=SC2086
+-perf record -o "$PERF_DATA" --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
+-PID=$!
++perf record -o "$PERF_DATA" --call-graph fp -e cycles//u --user-callchains -- $TEST_PROGRAM
+
+-echo " + Recording (PID=$PID)..."
+-sleep 2
+-echo " + Stopping perf-record..."
+-
+-kill $PID
+-wait $PID
++# Try opening the file so any immediate errors are visible in the log
++perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
+
+-# expected perf-script output:
++# expected perf-script output if 'leaf' has been inserted correctly:
+ #
+-# program
++# perf
+ # 728 leaf
+ # 753 parent
+ # 76c leafloop
+-# ...
++# ... remaining stack to main() ...
+
+-perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
+-perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 | \
+- awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||
+- sym[1] != "parent" ||
+- sym[2] != "leafloop") exit 1 }'
++# Each frame is separated by a tab, some spaces and an address
++SEP="[[:space:]]+ [[:xdigit:]]+"
++perf script -i "$PERF_DATA" -F comm,ip,sym | tr '\n' ' ' | \
++ grep -E -q "perf $SEP leaf $SEP parent $SEP leafloop"
+diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
+index f1bf5621160fbb..4d4e6857753032 100755
+--- a/tools/perf/tests/shell/test_arm_coresight.sh
++++ b/tools/perf/tests/shell/test_arm_coresight.sh
+@@ -186,7 +186,7 @@ arm_cs_etm_snapshot_test() {
+
+ arm_cs_etm_basic_test() {
+ echo "Recording trace with '$*'"
+- perf record -o ${perfdata} "$@" -- ls > /dev/null 2>&1
++ perf record -o ${perfdata} "$@" -m,8M -- ls > /dev/null 2>&1
+
+ perf_script_branch_samples ls &&
+ perf_report_branch_samples ls &&
+diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
+index f33cfc3c19a486..b394f3ac2d667b 100644
+--- a/tools/perf/tests/tests.h
++++ b/tools/perf/tests/tests.h
+@@ -145,6 +145,7 @@ DECLARE_SUITE(dlfilter);
+ DECLARE_SUITE(sigtrap);
+ DECLARE_SUITE(event_groups);
+ DECLARE_SUITE(symbols);
++DECLARE_SUITE(util);
+
+ /*
+ * PowerPC and S390 do not support creation of instruction breakpoints using the
+diff --git a/tools/perf/tests/util.c b/tools/perf/tests/util.c
+new file mode 100644
+index 00000000000000..6366db5cbf8ce8
+--- /dev/null
++++ b/tools/perf/tests/util.c
+@@ -0,0 +1,31 @@
++// SPDX-License-Identifier: GPL-2.0
++#include "tests.h"
++#include "util/debug.h"
++
++#include <linux/compiler.h>
++#include <stdlib.h>
++#include <string2.h>
++
++static int test_strreplace(char needle, const char *haystack,
++ const char *replace, const char *expected)
++{
++ char *new = strreplace_chars(needle, haystack, replace);
++ int ret = strcmp(new, expected);
++
++ free(new);
++ return ret == 0;
++}
++
++static int test__util(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
++{
++ TEST_ASSERT_VAL("empty string", test_strreplace(' ', "", "123", ""));
++ TEST_ASSERT_VAL("no match", test_strreplace('5', "123", "4", "123"));
++ TEST_ASSERT_VAL("replace 1", test_strreplace('3', "123", "4", "124"));
++ TEST_ASSERT_VAL("replace 2", test_strreplace('a', "abcabc", "ef", "efbcefbc"));
++ TEST_ASSERT_VAL("replace long", test_strreplace('a', "abcabc", "longlong",
++ "longlongbclonglongbc"));
++
++ return 0;
++}
++
++DEFINE_SUITE("util", util);
+diff --git a/tools/perf/tests/workloads/datasym.c b/tools/perf/tests/workloads/datasym.c
+index ddd40bc63448ae..8e08fc75a973e5 100644
+--- a/tools/perf/tests/workloads/datasym.c
++++ b/tools/perf/tests/workloads/datasym.c
+@@ -16,6 +16,22 @@ static int datasym(int argc __maybe_unused, const char **argv __maybe_unused)
+ {
+ for (;;) {
+ buf1.data1++;
++ if (buf1.data1 == 123) {
++ /*
++ * Add some 'noise' in the loop to work around errata
++ * 1694299 on Arm N1.
++ *
++ * Bias exists in SPE sampling which can cause the load
++ * and store instructions to be skipped entirely. This
++ * comes and goes randomly depending on the offset the
++ * linker places the datasym loop at in the Perf binary.
++ * With an extra branch in the middle of the loop that
++ * isn't always taken, the instruction stream is no
++ * longer a continuous repeating pattern that interacts
++ * badly with the bias.
++ */
++ buf1.data1++;
++ }
+ buf1.data2 += buf1.data1;
+ }
+ return 0;
+diff --git a/tools/perf/tests/workloads/leafloop.c b/tools/perf/tests/workloads/leafloop.c
+index 1bf5cc97649b0e..f7561767e32cd2 100644
+--- a/tools/perf/tests/workloads/leafloop.c
++++ b/tools/perf/tests/workloads/leafloop.c
+@@ -1,6 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
++#include <signal.h>
+ #include <stdlib.h>
+ #include <linux/compiler.h>
++#include <unistd.h>
+ #include "../tests.h"
+
+ /* We want to check these symbols in perf script */
+@@ -8,10 +10,16 @@ noinline void leaf(volatile int b);
+ noinline void parent(volatile int b);
+
+ static volatile int a;
++static volatile sig_atomic_t done;
++
++static void sighandler(int sig __maybe_unused)
++{
++ done = 1;
++}
+
+ noinline void leaf(volatile int b)
+ {
+- for (;;)
++ while (!done)
+ a += b;
+ }
+
+@@ -22,12 +30,16 @@ noinline void parent(volatile int b)
+
+ static int leafloop(int argc, const char **argv)
+ {
+- int c = 1;
++ int sec = 1;
+
+ if (argc > 0)
+- c = atoi(argv[0]);
++ sec = atoi(argv[0]);
++
++ signal(SIGINT, sighandler);
++ signal(SIGALRM, sighandler);
++ alarm(sec);
+
+- parent(c);
++ parent(sec);
+ return 0;
+ }
+
+diff --git a/tools/perf/tests/workloads/thloop.c b/tools/perf/tests/workloads/thloop.c
+index af05269c2eb8a4..457b29f91c3ee2 100644
+--- a/tools/perf/tests/workloads/thloop.c
++++ b/tools/perf/tests/workloads/thloop.c
+@@ -7,7 +7,6 @@
+ #include "../tests.h"
+
+ static volatile sig_atomic_t done;
+-static volatile unsigned count;
+
+ /* We want to check this symbol in perf report */
+ noinline void test_loop(void);
+@@ -19,8 +18,7 @@ static void sighandler(int sig __maybe_unused)
+
+ noinline void test_loop(void)
+ {
+- while (!done)
+- __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED);
++ while (!done);
+ }
+
+ static void *thfunc(void *arg)
+diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
+index 603d11283cbdce..19503e8387385d 100644
+--- a/tools/perf/ui/browser.c
++++ b/tools/perf/ui/browser.c
+@@ -203,7 +203,7 @@ void ui_browser__refresh_dimensions(struct ui_browser *browser)
+ void ui_browser__handle_resize(struct ui_browser *browser)
+ {
+ ui__refresh_dimensions(false);
+- ui_browser__show(browser, browser->title, ui_helpline__current);
++ ui_browser__show(browser, browser->title ?: "", ui_helpline__current);
+ ui_browser__refresh(browser);
+ }
+
+@@ -287,7 +287,8 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
+ mutex_lock(&ui__lock);
+ __ui_browser__show_title(browser, title);
+
+- browser->title = title;
++ free(browser->title);
++ browser->title = strdup(title);
+ zfree(&browser->helpline);
+
+ va_start(ap, helpline);
+@@ -304,6 +305,7 @@ void ui_browser__hide(struct ui_browser *browser)
+ mutex_lock(&ui__lock);
+ ui_helpline__pop();
+ zfree(&browser->helpline);
++ zfree(&browser->title);
+ mutex_unlock(&ui__lock);
+ }
+
+diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
+index 510ce455405019..6e98d5f8f71cc5 100644
+--- a/tools/perf/ui/browser.h
++++ b/tools/perf/ui/browser.h
+@@ -21,7 +21,7 @@ struct ui_browser {
+ u8 extra_title_lines;
+ int current_color;
+ void *priv;
+- const char *title;
++ char *title;
+ char *helpline;
+ const char *no_samples_msg;
+ void (*refresh_dimensions)(struct ui_browser *browser);
+diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
+index ccdb2cd11fbf03..20f24d104da8e5 100644
+--- a/tools/perf/ui/browsers/annotate.c
++++ b/tools/perf/ui/browsers/annotate.c
+@@ -27,7 +27,6 @@ struct annotate_browser {
+ struct rb_node *curr_hot;
+ struct annotation_line *selection;
+ struct arch *arch;
+- struct annotation_options *opts;
+ bool searching_backwards;
+ char search_bf[128];
+ };
+@@ -97,7 +96,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
+ struct annotation_write_ops ops = {
+ .first_line = row == 0,
+ .current_entry = is_current_entry,
+- .change_color = (!notes->options->hide_src_code &&
++ .change_color = (!annotate_opts.hide_src_code &&
+ (!is_current_entry ||
+ (browser->use_navkeypressed &&
+ !browser->navkeypressed))),
+@@ -114,7 +113,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
+ if (!browser->navkeypressed)
+ ops.width += 1;
+
+- annotation_line__write(al, notes, &ops, ab->opts);
++ annotation_line__write(al, notes, &ops);
+
+ if (ops.current_entry)
+ ab->selection = al;
+@@ -128,7 +127,7 @@ static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
+
+ while (pos && pos->al.offset == -1) {
+ pos = list_prev_entry(pos, al.node);
+- if (!ab->opts->hide_src_code)
++ if (!annotate_opts.hide_src_code)
+ diff++;
+ }
+
+@@ -195,7 +194,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
+ return;
+ }
+
+- if (notes->options->hide_src_code) {
++ if (annotate_opts.hide_src_code) {
+ from = cursor->al.idx_asm;
+ to = target->idx_asm;
+ } else {
+@@ -224,7 +223,7 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
+ int ret = ui_browser__list_head_refresh(browser);
+ int pcnt_width = annotation__pcnt_width(notes);
+
+- if (notes->options->jump_arrows)
++ if (annotate_opts.jump_arrows)
+ annotate_browser__draw_current_jump(browser);
+
+ ui_browser__set_color(browser, HE_COLORSET_NORMAL);
+@@ -258,7 +257,7 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser,
+ parent = *p;
+ l = rb_entry(parent, struct annotation_line, rb_node);
+
+- if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
++ if (disasm__cmp(al, l, annotate_opts.percent_type) < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+@@ -294,11 +293,10 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
+ static void annotate_browser__set_rb_top(struct annotate_browser *browser,
+ struct rb_node *nd)
+ {
+- struct annotation *notes = browser__annotation(&browser->b);
+ struct annotation_line * pos = rb_entry(nd, struct annotation_line, rb_node);
+ u32 idx = pos->idx;
+
+- if (notes->options->hide_src_code)
++ if (annotate_opts.hide_src_code)
+ idx = pos->idx_asm;
+ annotate_browser__set_top(browser, pos, idx);
+ browser->curr_hot = nd;
+@@ -331,13 +329,13 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
+ double percent;
+
+ percent = annotation_data__percent(&pos->al.data[i],
+- browser->opts->percent_type);
++ annotate_opts.percent_type);
+
+ if (max_percent < percent)
+ max_percent = percent;
+ }
+
+- if (max_percent < 0.01 && pos->al.ipc == 0) {
++ if (max_percent < 0.01 && (!pos->al.cycles || pos->al.cycles->ipc == 0)) {
+ RB_CLEAR_NODE(&pos->al.rb_node);
+ continue;
+ }
+@@ -380,12 +378,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
+ browser->b.seek(&browser->b, offset, SEEK_CUR);
+ al = list_entry(browser->b.top, struct annotation_line, node);
+
+- if (notes->options->hide_src_code) {
++ if (annotate_opts.hide_src_code) {
+ if (al->idx_asm < offset)
+ offset = al->idx;
+
+- browser->b.nr_entries = notes->nr_entries;
+- notes->options->hide_src_code = false;
++ browser->b.nr_entries = notes->src->nr_entries;
++ annotate_opts.hide_src_code = false;
+ browser->b.seek(&browser->b, -offset, SEEK_CUR);
+ browser->b.top_idx = al->idx - offset;
+ browser->b.index = al->idx;
+@@ -402,8 +400,8 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
+ if (al->idx_asm < offset)
+ offset = al->idx_asm;
+
+- browser->b.nr_entries = notes->nr_asm_entries;
+- notes->options->hide_src_code = true;
++ browser->b.nr_entries = notes->src->nr_asm_entries;
++ annotate_opts.hide_src_code = true;
+ browser->b.seek(&browser->b, -offset, SEEK_CUR);
+ browser->b.top_idx = al->idx_asm - offset;
+ browser->b.index = al->idx_asm;
+@@ -435,7 +433,7 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser)
+ {
+ struct annotation *notes = browser__annotation(browser);
+ ui_browser__reset_index(browser);
+- browser->nr_entries = notes->nr_asm_entries;
++ browser->nr_entries = notes->src->nr_asm_entries;
+ }
+
+ static int sym_title(struct symbol *sym, struct map *map, char *title,
+@@ -483,8 +481,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
+ target_ms.map = ms->map;
+ target_ms.sym = dl->ops.target.sym;
+ annotation__unlock(notes);
+- symbol__tui_annotate(&target_ms, evsel, hbt, browser->opts);
+- sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type);
++ symbol__tui_annotate(&target_ms, evsel, hbt);
++ sym_title(ms->sym, ms->map, title, sizeof(title), annotate_opts.percent_type);
+ ui_browser__show_title(&browser->b, title);
+ return true;
+ }
+@@ -659,7 +657,6 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
+
+ static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help)
+ {
+- struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
+ struct map_symbol *ms = browser->priv;
+ struct symbol *sym = ms->sym;
+ char symbol_dso[SYM_TITLE_MAX_SIZE];
+@@ -667,7 +664,7 @@ static int annotate_browser__show(struct ui_browser *browser, char *title, const
+ if (ui_browser__show(browser, title, help) < 0)
+ return -1;
+
+- sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type);
++ sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), annotate_opts.percent_type);
+
+ ui_browser__gotorc_title(browser, 0, 0);
+ ui_browser__set_color(browser, HE_COLORSET_ROOT);
+@@ -809,7 +806,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
+ annotate_browser__show(&browser->b, title, help);
+ continue;
+ case 'k':
+- notes->options->show_linenr = !notes->options->show_linenr;
++ annotate_opts.show_linenr = !annotate_opts.show_linenr;
+ continue;
+ case 'l':
+ annotate_browser__show_full_location (&browser->b);
+@@ -822,18 +819,18 @@ static int annotate_browser__run(struct annotate_browser *browser,
+ ui_helpline__puts(help);
+ continue;
+ case 'o':
+- notes->options->use_offset = !notes->options->use_offset;
++ annotate_opts.use_offset = !annotate_opts.use_offset;
+ annotation__update_column_widths(notes);
+ continue;
+ case 'O':
+- if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
+- notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
++ if (++annotate_opts.offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
++ annotate_opts.offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
+ continue;
+ case 'j':
+- notes->options->jump_arrows = !notes->options->jump_arrows;
++ annotate_opts.jump_arrows = !annotate_opts.jump_arrows;
+ continue;
+ case 'J':
+- notes->options->show_nr_jumps = !notes->options->show_nr_jumps;
++ annotate_opts.show_nr_jumps = !annotate_opts.show_nr_jumps;
+ annotation__update_column_widths(notes);
+ continue;
+ case '/':
+@@ -860,7 +857,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
+ browser->b.height,
+ browser->b.index,
+ browser->b.top_idx,
+- notes->nr_asm_entries);
++ notes->src->nr_asm_entries);
+ }
+ continue;
+ case K_ENTER:
+@@ -884,7 +881,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
+ continue;
+ }
+ case 'P':
+- map_symbol__annotation_dump(ms, evsel, browser->opts);
++ map_symbol__annotation_dump(ms, evsel);
+ continue;
+ case 't':
+ if (symbol_conf.show_total_period) {
+@@ -897,15 +894,15 @@ static int annotate_browser__run(struct annotate_browser *browser,
+ annotation__update_column_widths(notes);
+ continue;
+ case 'c':
+- if (notes->options->show_minmax_cycle)
+- notes->options->show_minmax_cycle = false;
++ if (annotate_opts.show_minmax_cycle)
++ annotate_opts.show_minmax_cycle = false;
+ else
+- notes->options->show_minmax_cycle = true;
++ annotate_opts.show_minmax_cycle = true;
+ annotation__update_column_widths(notes);
+ continue;
+ case 'p':
+ case 'b':
+- switch_percent_type(browser->opts, key == 'b');
++ switch_percent_type(&annotate_opts, key == 'b');
+ hists__scnprintf_title(hists, title, sizeof(title));
+ annotate_browser__show(&browser->b, title, help);
+ continue;
+@@ -932,26 +929,23 @@ static int annotate_browser__run(struct annotate_browser *browser,
+ }
+
+ int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
+- struct hist_browser_timer *hbt,
+- struct annotation_options *opts)
++ struct hist_browser_timer *hbt)
+ {
+- return symbol__tui_annotate(ms, evsel, hbt, opts);
++ return symbol__tui_annotate(ms, evsel, hbt);
+ }
+
+ int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
+- struct hist_browser_timer *hbt,
+- struct annotation_options *opts)
++ struct hist_browser_timer *hbt)
+ {
+ /* reset abort key so that it can get Ctrl-C as a key */
+ SLang_reset_tty();
+ SLang_init_tty(0, 0, 0);
+
+- return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts);
++ return map_symbol__tui_annotate(&he->ms, evsel, hbt);
+ }
+
+ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
+- struct hist_browser_timer *hbt,
+- struct annotation_options *opts)
++ struct hist_browser_timer *hbt)
+ {
+ struct symbol *sym = ms->sym;
+ struct annotation *notes = symbol__annotation(sym);
+@@ -965,7 +959,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
+ .priv = ms,
+ .use_navkeypressed = true,
+ },
+- .opts = opts,
+ };
+ struct dso *dso;
+ int ret = -1, err;
+@@ -979,7 +972,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
+ return -1;
+
+ if (not_annotated) {
+- err = symbol__annotate2(ms, evsel, opts, &browser.arch);
++ err = symbol__annotate2(ms, evsel, &browser.arch);
+ if (err) {
+ char msg[BUFSIZ];
+ dso->annotate_warned = true;
+@@ -991,12 +984,12 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
+
+ ui_helpline__push("Press ESC to exit");
+
+- browser.b.width = notes->max_line_len;
+- browser.b.nr_entries = notes->nr_entries;
++ browser.b.width = notes->src->max_line_len;
++ browser.b.nr_entries = notes->src->nr_entries;
+ browser.b.entries = &notes->src->source,
+ browser.b.width += 18; /* Percentage */
+
+- if (notes->options->hide_src_code)
++ if (annotate_opts.hide_src_code)
+ ui_browser__init_asm_mode(&browser.b);
+
+ ret = annotate_browser__run(&browser, evsel, hbt);
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 70db5a71790569..bb59d27642ccf2 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -2250,8 +2250,7 @@ struct hist_browser *hist_browser__new(struct hists *hists)
+ static struct hist_browser *
+ perf_evsel_browser__new(struct evsel *evsel,
+ struct hist_browser_timer *hbt,
+- struct perf_env *env,
+- struct annotation_options *annotation_opts)
++ struct perf_env *env)
+ {
+ struct hist_browser *browser = hist_browser__new(evsel__hists(evsel));
+
+@@ -2259,7 +2258,6 @@ perf_evsel_browser__new(struct evsel *evsel,
+ browser->hbt = hbt;
+ browser->env = env;
+ browser->title = hists_browser__scnprintf_title;
+- browser->annotation_opts = annotation_opts;
+ }
+ return browser;
+ }
+@@ -2432,8 +2430,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
+ struct hist_entry *he;
+ int err;
+
+- if (!browser->annotation_opts->objdump_path &&
+- perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path))
++ if (!annotate_opts.objdump_path &&
++ perf_env__lookup_objdump(browser->env, &annotate_opts.objdump_path))
+ return 0;
+
+ notes = symbol__annotation(act->ms.sym);
+@@ -2445,8 +2443,7 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
+ else
+ evsel = hists_to_evsel(browser->hists);
+
+- err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt,
+- browser->annotation_opts);
++ err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt);
+ he = hist_browser__selected_entry(browser);
+ /*
+ * offer option to annotate the other branch source or target
+@@ -2943,11 +2940,10 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
+
+ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *helpline,
+ bool left_exits, struct hist_browser_timer *hbt, float min_pcnt,
+- struct perf_env *env, bool warn_lost_event,
+- struct annotation_options *annotation_opts)
++ struct perf_env *env, bool warn_lost_event)
+ {
+ struct hists *hists = evsel__hists(evsel);
+- struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
++ struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
+ struct branch_info *bi = NULL;
+ #define MAX_OPTIONS 16
+ char *options[MAX_OPTIONS];
+@@ -3398,7 +3394,6 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h
+ struct evsel_menu {
+ struct ui_browser b;
+ struct evsel *selection;
+- struct annotation_options *annotation_opts;
+ bool lost_events, lost_events_warned;
+ float min_pcnt;
+ struct perf_env *env;
+@@ -3499,8 +3494,7 @@ static int perf_evsel_menu__run(struct evsel_menu *menu,
+ hbt->timer(hbt->arg);
+ key = evsel__hists_browse(pos, nr_events, help, true, hbt,
+ menu->min_pcnt, menu->env,
+- warn_lost_event,
+- menu->annotation_opts);
++ warn_lost_event);
+ ui_browser__show_title(&menu->b, title);
+ switch (key) {
+ case K_TAB:
+@@ -3557,7 +3551,7 @@ static bool filter_group_entries(struct ui_browser *browser __maybe_unused,
+
+ static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, const char *help,
+ struct hist_browser_timer *hbt, float min_pcnt, struct perf_env *env,
+- bool warn_lost_event, struct annotation_options *annotation_opts)
++ bool warn_lost_event)
+ {
+ struct evsel *pos;
+ struct evsel_menu menu = {
+@@ -3572,7 +3566,6 @@ static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, con
+ },
+ .min_pcnt = min_pcnt,
+ .env = env,
+- .annotation_opts = annotation_opts,
+ };
+
+ ui_helpline__push("Press ESC to exit");
+@@ -3607,8 +3600,7 @@ static bool evlist__single_entry(struct evlist *evlist)
+ }
+
+ int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
+- float min_pcnt, struct perf_env *env, bool warn_lost_event,
+- struct annotation_options *annotation_opts)
++ float min_pcnt, struct perf_env *env, bool warn_lost_event)
+ {
+ int nr_entries = evlist->core.nr_entries;
+
+@@ -3617,7 +3609,7 @@ single_entry: {
+ struct evsel *first = evlist__first(evlist);
+
+ return evsel__hists_browse(first, nr_entries, help, false, hbt, min_pcnt,
+- env, warn_lost_event, annotation_opts);
++ env, warn_lost_event);
+ }
+ }
+
+@@ -3635,7 +3627,7 @@ single_entry: {
+ }
+
+ return __evlist__tui_browse_hists(evlist, nr_entries, help, hbt, min_pcnt, env,
+- warn_lost_event, annotation_opts);
++ warn_lost_event);
+ }
+
+ static int block_hists_browser__title(struct hist_browser *browser, char *bf,
+@@ -3654,8 +3646,7 @@ static int block_hists_browser__title(struct hist_browser *browser, char *bf,
+ }
+
+ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
+- float min_percent, struct perf_env *env,
+- struct annotation_options *annotation_opts)
++ float min_percent, struct perf_env *env)
+ {
+ struct hists *hists = &bh->block_hists;
+ struct hist_browser *browser;
+@@ -3672,7 +3663,6 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
+ browser->title = block_hists_browser__title;
+ browser->min_pcnt = min_percent;
+ browser->env = env;
+- browser->annotation_opts = annotation_opts;
+
+ /* reset abort key so that it can get Ctrl-C as a key */
+ SLang_reset_tty();
+diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
+index 1e938d9ffa5ee2..de46f6c56b0ef0 100644
+--- a/tools/perf/ui/browsers/hists.h
++++ b/tools/perf/ui/browsers/hists.h
+@@ -4,7 +4,6 @@
+
+ #include "ui/browser.h"
+
+-struct annotation_options;
+ struct evsel;
+
+ struct hist_browser {
+@@ -15,7 +14,6 @@ struct hist_browser {
+ struct hist_browser_timer *hbt;
+ struct pstack *pstack;
+ struct perf_env *env;
+- struct annotation_options *annotation_opts;
+ struct evsel *block_evsel;
+ int print_seq;
+ bool show_dso;
+diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
+index 2effac77ca8c67..394861245fd3e4 100644
+--- a/tools/perf/ui/gtk/annotate.c
++++ b/tools/perf/ui/gtk/annotate.c
+@@ -162,7 +162,6 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms,
+ }
+
+ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *options,
+ struct hist_browser_timer *hbt)
+ {
+ struct dso *dso = map__dso(ms->map);
+@@ -176,7 +175,7 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
+ if (dso->annotate_warned)
+ return -1;
+
+- err = symbol__annotate(ms, evsel, options, NULL);
++ err = symbol__annotate(ms, evsel, NULL);
+ if (err) {
+ char msg[BUFSIZ];
+ dso->annotate_warned = true;
+@@ -244,10 +243,9 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
+
+ int hist_entry__gtk_annotate(struct hist_entry *he,
+ struct evsel *evsel,
+- struct annotation_options *options,
+ struct hist_browser_timer *hbt)
+ {
+- return symbol__gtk_annotate(&he->ms, evsel, options, hbt);
++ return symbol__gtk_annotate(&he->ms, evsel, hbt);
+ }
+
+ void perf_gtk__show_annotations(void)
+diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h
+index 1e84dceb526713..a2b497f03fd6e4 100644
+--- a/tools/perf/ui/gtk/gtk.h
++++ b/tools/perf/ui/gtk/gtk.h
+@@ -56,13 +56,11 @@ struct evsel;
+ struct evlist;
+ struct hist_entry;
+ struct hist_browser_timer;
+-struct annotation_options;
+
+ int evlist__gtk_browse_hists(struct evlist *evlist, const char *help,
+ struct hist_browser_timer *hbt, float min_pcnt);
+ int hist_entry__gtk_annotate(struct hist_entry *he,
+ struct evsel *evsel,
+- struct annotation_options *options,
+ struct hist_browser_timer *hbt);
+ void perf_gtk__show_annotations(void);
+
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index 82956adf99632d..6dfe11cbf30e2b 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -57,6 +57,9 @@
+
+ #include <linux/ctype.h>
+
++/* global annotation options */
++struct annotation_options annotate_opts;
++
+ static regex_t file_lineno;
+
+ static struct ins_ops *ins__find(struct arch *arch, const char *name);
+@@ -810,7 +813,6 @@ static __maybe_unused void annotated_source__delete(struct annotated_source *src
+ if (src == NULL)
+ return;
+ zfree(&src->histograms);
+- zfree(&src->cycles_hist);
+ free(src);
+ }
+
+@@ -845,18 +847,6 @@ static int annotated_source__alloc_histograms(struct annotated_source *src,
+ return src->histograms ? 0 : -1;
+ }
+
+-/* The cycles histogram is lazily allocated. */
+-static int symbol__alloc_hist_cycles(struct symbol *sym)
+-{
+- struct annotation *notes = symbol__annotation(sym);
+- const size_t size = symbol__size(sym);
+-
+- notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
+- if (notes->src->cycles_hist == NULL)
+- return -1;
+- return 0;
+-}
+-
+ void symbol__annotate_zero_histograms(struct symbol *sym)
+ {
+ struct annotation *notes = symbol__annotation(sym);
+@@ -865,9 +855,10 @@ void symbol__annotate_zero_histograms(struct symbol *sym)
+ if (notes->src != NULL) {
+ memset(notes->src->histograms, 0,
+ notes->src->nr_histograms * notes->src->sizeof_sym_hist);
+- if (notes->src->cycles_hist)
+- memset(notes->src->cycles_hist, 0,
+- symbol__size(sym) * sizeof(struct cyc_hist));
++ }
++ if (notes->branch && notes->branch->cycles_hist) {
++ memset(notes->branch->cycles_hist, 0,
++ symbol__size(sym) * sizeof(struct cyc_hist));
+ }
+ annotation__unlock(notes);
+ }
+@@ -958,23 +949,33 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms,
+ return 0;
+ }
+
++static struct annotated_branch *annotation__get_branch(struct annotation *notes)
++{
++ if (notes == NULL)
++ return NULL;
++
++ if (notes->branch == NULL)
++ notes->branch = zalloc(sizeof(*notes->branch));
++
++ return notes->branch;
++}
++
+ static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
+ {
+ struct annotation *notes = symbol__annotation(sym);
++ struct annotated_branch *branch;
+
+- if (notes->src == NULL) {
+- notes->src = annotated_source__new();
+- if (notes->src == NULL)
+- return NULL;
+- goto alloc_cycles_hist;
+- }
++ branch = annotation__get_branch(notes);
++ if (branch == NULL)
++ return NULL;
+
+- if (!notes->src->cycles_hist) {
+-alloc_cycles_hist:
+- symbol__alloc_hist_cycles(sym);
++ if (branch->cycles_hist == NULL) {
++ const size_t size = symbol__size(sym);
++
++ branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
+ }
+
+- return notes->src->cycles_hist;
++ return branch->cycles_hist;
+ }
+
+ struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
+@@ -1083,6 +1084,14 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64
+ return n_insn;
+ }
+
++static void annotated_branch__delete(struct annotated_branch *branch)
++{
++ if (branch) {
++ zfree(&branch->cycles_hist);
++ free(branch);
++ }
++}
++
+ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
+ {
+ unsigned n_insn;
+@@ -1091,6 +1100,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
+
+ n_insn = annotation__count_insn(notes, start, end);
+ if (n_insn && ch->num && ch->cycles) {
++ struct annotated_branch *branch;
+ float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
+
+ /* Hide data when there are too many overlaps. */
+@@ -1100,52 +1110,74 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
+ for (offset = start; offset <= end; offset++) {
+ struct annotation_line *al = notes->offsets[offset];
+
+- if (al && al->ipc == 0.0) {
+- al->ipc = ipc;
++ if (al && al->cycles && al->cycles->ipc == 0.0) {
++ al->cycles->ipc = ipc;
+ cover_insn++;
+ }
+ }
+
+- if (cover_insn) {
+- notes->hit_cycles += ch->cycles;
+- notes->hit_insn += n_insn * ch->num;
+- notes->cover_insn += cover_insn;
++ branch = annotation__get_branch(notes);
++ if (cover_insn && branch) {
++ branch->hit_cycles += ch->cycles;
++ branch->hit_insn += n_insn * ch->num;
++ branch->cover_insn += cover_insn;
+ }
+ }
+ }
+
+-void annotation__compute_ipc(struct annotation *notes, size_t size)
++static int annotation__compute_ipc(struct annotation *notes, size_t size)
+ {
++ int err = 0;
+ s64 offset;
+
+- if (!notes->src || !notes->src->cycles_hist)
+- return;
++ if (!notes->branch || !notes->branch->cycles_hist)
++ return 0;
+
+- notes->total_insn = annotation__count_insn(notes, 0, size - 1);
+- notes->hit_cycles = 0;
+- notes->hit_insn = 0;
+- notes->cover_insn = 0;
++ notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
++ notes->branch->hit_cycles = 0;
++ notes->branch->hit_insn = 0;
++ notes->branch->cover_insn = 0;
+
+ annotation__lock(notes);
+ for (offset = size - 1; offset >= 0; --offset) {
+ struct cyc_hist *ch;
+
+- ch = &notes->src->cycles_hist[offset];
++ ch = &notes->branch->cycles_hist[offset];
+ if (ch && ch->cycles) {
+ struct annotation_line *al;
+
++ al = notes->offsets[offset];
++ if (al && al->cycles == NULL) {
++ al->cycles = zalloc(sizeof(*al->cycles));
++ if (al->cycles == NULL) {
++ err = ENOMEM;
++ break;
++ }
++ }
+ if (ch->have_start)
+ annotation__count_and_fill(notes, ch->start, offset, ch);
+- al = notes->offsets[offset];
+ if (al && ch->num_aggr) {
+- al->cycles = ch->cycles_aggr / ch->num_aggr;
+- al->cycles_max = ch->cycles_max;
+- al->cycles_min = ch->cycles_min;
++ al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
++ al->cycles->max = ch->cycles_max;
++ al->cycles->min = ch->cycles_min;
+ }
+- notes->have_cycles = true;
+ }
+ }
++
++ if (err) {
++ while (++offset < (s64)size) {
++ struct cyc_hist *ch = &notes->branch->cycles_hist[offset];
++
++ if (ch && ch->cycles) {
++ struct annotation_line *al = notes->offsets[offset];
++ if (al)
++ zfree(&al->cycles);
++ }
++ }
++ }
++
+ annotation__unlock(notes);
++ return 0;
+ }
+
+ int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
+@@ -1225,6 +1257,7 @@ static void annotation_line__exit(struct annotation_line *al)
+ {
+ zfree_srcline(&al->path);
+ zfree(&al->line);
++ zfree(&al->cycles);
+ }
+
+ static size_t disasm_line_size(int nr)
+@@ -1299,6 +1332,7 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r
+ void annotation__exit(struct annotation *notes)
+ {
+ annotated_source__delete(notes->src);
++ annotated_branch__delete(notes->branch);
+ }
+
+ static struct sharded_mutex *sharded_mutex;
+@@ -1817,7 +1851,6 @@ static int symbol__disassemble_bpf(struct symbol *sym,
+ struct annotate_args *args)
+ {
+ struct annotation *notes = symbol__annotation(sym);
+- struct annotation_options *opts = args->options;
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ struct bpf_prog_info_node *info_node;
+ int len = sym->end - sym->start;
+@@ -1927,7 +1960,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
+ prev_buf_size = buf_size;
+ fflush(s);
+
+- if (!opts->hide_src_code && srcline) {
++ if (!annotate_opts.hide_src_code && srcline) {
+ args->offset = -1;
+ args->line = strdup(srcline);
+ args->line_nr = 0;
+@@ -2050,7 +2083,7 @@ static char *expand_tabs(char *line, char **storage, size_t *storage_len)
+
+ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
+ {
+- struct annotation_options *opts = args->options;
++ struct annotation_options *opts = &annotate_opts;
+ struct map *map = args->ms.map;
+ struct dso *dso = map__dso(map);
+ char *command;
+@@ -2300,13 +2333,13 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
+ }
+
+ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *options, struct arch **parch)
++ struct arch **parch)
+ {
+ struct symbol *sym = ms->sym;
+ struct annotation *notes = symbol__annotation(sym);
+ struct annotate_args args = {
+ .evsel = evsel,
+- .options = options,
++ .options = &annotate_opts,
+ };
+ struct perf_env *env = evsel__env(evsel);
+ const char *arch_name = perf_env__arch(env);
+@@ -2334,7 +2367,7 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
+ }
+
+ args.ms = *ms;
+- if (notes->options && notes->options->full_addr)
++ if (annotate_opts.full_addr)
+ notes->start = map__objdump_2mem(ms->map, ms->sym->start);
+ else
+ notes->start = map__rip_2objdump(ms->map, ms->sym->start);
+@@ -2342,12 +2375,12 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
+ return symbol__disassemble(sym, &args);
+ }
+
+-static void insert_source_line(struct rb_root *root, struct annotation_line *al,
+- struct annotation_options *opts)
++static void insert_source_line(struct rb_root *root, struct annotation_line *al)
+ {
+ struct annotation_line *iter;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
++ unsigned int percent_type = annotate_opts.percent_type;
+ int i, ret;
+
+ while (*p != NULL) {
+@@ -2358,7 +2391,7 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al,
+ if (ret == 0) {
+ for (i = 0; i < al->data_nr; i++) {
+ iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
+- opts->percent_type);
++ percent_type);
+ }
+ return;
+ }
+@@ -2371,7 +2404,7 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al,
+
+ for (i = 0; i < al->data_nr; i++) {
+ al->data[i].percent_sum = annotation_data__percent(&al->data[i],
+- opts->percent_type);
++ percent_type);
+ }
+
+ rb_link_node(&al->rb_node, parent, p);
+@@ -2493,8 +2526,7 @@ static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
+ return 0;
+ }
+
+-int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *opts)
++int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
+ {
+ struct map *map = ms->map;
+ struct symbol *sym = ms->sym;
+@@ -2505,6 +2537,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
+ struct annotation_line *pos, *queue = NULL;
++ struct annotation_options *opts = &annotate_opts;
+ u64 start = map__rip_2objdump(map, sym->start);
+ int printed = 2, queue_len = 0, addr_fmt_width;
+ int more = 0;
+@@ -2633,8 +2666,7 @@ static void FILE__write_graph(void *fp, int graph)
+ fputs(s, fp);
+ }
+
+-static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
+- struct annotation_options *opts)
++static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
+ {
+ struct annotation *notes = symbol__annotation(sym);
+ struct annotation_write_ops wops = {
+@@ -2651,7 +2683,7 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
+ list_for_each_entry(al, &notes->src->source, node) {
+ if (annotation_line__filter(al, notes))
+ continue;
+- annotation_line__write(al, notes, &wops, opts);
++ annotation_line__write(al, notes, &wops);
+ fputc('\n', fp);
+ wops.first_line = false;
+ }
+@@ -2659,8 +2691,7 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
+ return 0;
+ }
+
+-int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *opts)
++int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
+ {
+ const char *ev_name = evsel__name(evsel);
+ char buf[1024];
+@@ -2682,7 +2713,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
+
+ fprintf(fp, "%s() %s\nEvent: %s\n\n",
+ ms->sym->name, map__dso(ms->map)->long_name, ev_name);
+- symbol__annotate_fprintf2(ms->sym, fp, opts);
++ symbol__annotate_fprintf2(ms->sym, fp);
+
+ fclose(fp);
+ err = 0;
+@@ -2794,19 +2825,20 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
+ void annotation__set_offsets(struct annotation *notes, s64 size)
+ {
+ struct annotation_line *al;
++ struct annotated_source *src = notes->src;
+
+- notes->max_line_len = 0;
+- notes->nr_entries = 0;
+- notes->nr_asm_entries = 0;
++ src->max_line_len = 0;
++ src->nr_entries = 0;
++ src->nr_asm_entries = 0;
+
+- list_for_each_entry(al, &notes->src->source, node) {
++ list_for_each_entry(al, &src->source, node) {
+ size_t line_len = strlen(al->line);
+
+- if (notes->max_line_len < line_len)
+- notes->max_line_len = line_len;
+- al->idx = notes->nr_entries++;
++ if (src->max_line_len < line_len)
++ src->max_line_len = line_len;
++ al->idx = src->nr_entries++;
+ if (al->offset != -1) {
+- al->idx_asm = notes->nr_asm_entries++;
++ al->idx_asm = src->nr_asm_entries++;
+ /*
+ * FIXME: short term bandaid to cope with assembly
+ * routines that comes with labels in the same column
+@@ -2858,24 +2890,24 @@ void annotation__init_column_widths(struct annotation *notes, struct symbol *sym
+
+ void annotation__update_column_widths(struct annotation *notes)
+ {
+- if (notes->options->use_offset)
++ if (annotate_opts.use_offset)
+ notes->widths.target = notes->widths.min_addr;
+- else if (notes->options->full_addr)
++ else if (annotate_opts.full_addr)
+ notes->widths.target = BITS_PER_LONG / 4;
+ else
+ notes->widths.target = notes->widths.max_addr;
+
+ notes->widths.addr = notes->widths.target;
+
+- if (notes->options->show_nr_jumps)
++ if (annotate_opts.show_nr_jumps)
+ notes->widths.addr += notes->widths.jumps + 1;
+ }
+
+ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
+ {
+- notes->options->full_addr = !notes->options->full_addr;
++ annotate_opts.full_addr = !annotate_opts.full_addr;
+
+- if (notes->options->full_addr)
++ if (annotate_opts.full_addr)
+ notes->start = map__objdump_2mem(ms->map, ms->sym->start);
+ else
+ notes->start = map__rip_2objdump(ms->map, ms->sym->start);
+@@ -2883,22 +2915,22 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m
+ annotation__update_column_widths(notes);
+ }
+
+-static void annotation__calc_lines(struct annotation *notes, struct map *map,
+- struct rb_root *root,
+- struct annotation_options *opts)
++static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
++ struct rb_root *root)
+ {
+ struct annotation_line *al;
+ struct rb_root tmp_root = RB_ROOT;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ double percent_max = 0.0;
++ u64 addr;
+ int i;
+
+ for (i = 0; i < al->data_nr; i++) {
+ double percent;
+
+ percent = annotation_data__percent(&al->data[i],
+- opts->percent_type);
++ annotate_opts.percent_type);
+
+ if (percent > percent_max)
+ percent_max = percent;
+@@ -2907,24 +2939,23 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
+ if (percent_max <= 0.5)
+ continue;
+
+- al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL,
+- false, true, notes->start + al->offset);
+- insert_source_line(&tmp_root, al, opts);
++ addr = map__rip_2objdump(ms->map, ms->sym->start);
++ al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
++ false, true, ms->sym->start + al->offset);
++ insert_source_line(&tmp_root, al);
+ }
+
+ resort_source_line(root, &tmp_root);
+ }
+
+-static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root,
+- struct annotation_options *opts)
++static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
+ {
+ struct annotation *notes = symbol__annotation(ms->sym);
+
+- annotation__calc_lines(notes, ms->map, root, opts);
++ annotation__calc_lines(notes, ms, root);
+ }
+
+-int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *opts)
++int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
+ {
+ struct dso *dso = map__dso(ms->map);
+ struct symbol *sym = ms->sym;
+@@ -2933,7 +2964,7 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
+ char buf[1024];
+ int err;
+
+- err = symbol__annotate2(ms, evsel, opts, NULL);
++ err = symbol__annotate2(ms, evsel, NULL);
+ if (err) {
+ char msg[BUFSIZ];
+
+@@ -2943,31 +2974,31 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
+ return -1;
+ }
+
+- if (opts->print_lines) {
+- srcline_full_filename = opts->full_path;
+- symbol__calc_lines(ms, &source_line, opts);
++ if (annotate_opts.print_lines) {
++ srcline_full_filename = annotate_opts.full_path;
++ symbol__calc_lines(ms, &source_line);
+ print_summary(&source_line, dso->long_name);
+ }
+
+ hists__scnprintf_title(hists, buf, sizeof(buf));
+ fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
+- buf, percent_type_str(opts->percent_type), sym->name, dso->long_name);
+- symbol__annotate_fprintf2(sym, stdout, opts);
++ buf, percent_type_str(annotate_opts.percent_type), sym->name,
++ dso->long_name);
++ symbol__annotate_fprintf2(sym, stdout);
+
+ annotated_source__purge(symbol__annotation(sym)->src);
+
+ return 0;
+ }
+
+-int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *opts)
++int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
+ {
+ struct dso *dso = map__dso(ms->map);
+ struct symbol *sym = ms->sym;
+ struct rb_root source_line = RB_ROOT;
+ int err;
+
+- err = symbol__annotate(ms, evsel, opts, NULL);
++ err = symbol__annotate(ms, evsel, NULL);
+ if (err) {
+ char msg[BUFSIZ];
+
+@@ -2979,13 +3010,13 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
+
+ symbol__calc_percent(sym, evsel);
+
+- if (opts->print_lines) {
+- srcline_full_filename = opts->full_path;
+- symbol__calc_lines(ms, &source_line, opts);
++ if (annotate_opts.print_lines) {
++ srcline_full_filename = annotate_opts.full_path;
++ symbol__calc_lines(ms, &source_line);
+ print_summary(&source_line, dso->long_name);
+ }
+
+- symbol__annotate_printf(ms, evsel, opts);
++ symbol__annotate_printf(ms, evsel);
+
+ annotated_source__purge(symbol__annotation(sym)->src);
+
+@@ -3046,19 +3077,20 @@ static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
+ obj__printf(obj, " ");
+ }
+
+- disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name);
++ disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name);
+ }
+
+ static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
+ {
+ double ipc = 0.0, coverage = 0.0;
++ struct annotated_branch *branch = annotation__get_branch(notes);
+
+- if (notes->hit_cycles)
+- ipc = notes->hit_insn / ((double)notes->hit_cycles);
++ if (branch && branch->hit_cycles)
++ ipc = branch->hit_insn / ((double)branch->hit_cycles);
+
+- if (notes->total_insn) {
+- coverage = notes->cover_insn * 100.0 /
+- ((double)notes->total_insn);
++ if (branch && branch->total_insn) {
++ coverage = branch->cover_insn * 100.0 /
++ ((double)branch->total_insn);
+ }
+
+ scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
+@@ -3083,8 +3115,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
+ int printed;
+
+ if (first_line && (al->offset == -1 || percent_max == 0.0)) {
+- if (notes->have_cycles) {
+- if (al->ipc == 0.0 && al->cycles == 0)
++ if (notes->branch && al->cycles) {
++ if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
+ show_title = true;
+ } else
+ show_title = true;
+@@ -3120,18 +3152,18 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
+ }
+ }
+
+- if (notes->have_cycles) {
+- if (al->ipc)
+- obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->ipc);
++ if (notes->branch) {
++ if (al->cycles && al->cycles->ipc)
++ obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
+ else if (!show_title)
+ obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
+ else
+ obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
+
+- if (!notes->options->show_minmax_cycle) {
+- if (al->cycles)
++ if (!annotate_opts.show_minmax_cycle) {
++ if (al->cycles && al->cycles->avg)
+ obj__printf(obj, "%*" PRIu64 " ",
+- ANNOTATION__CYCLES_WIDTH - 1, al->cycles);
++ ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
+ else if (!show_title)
+ obj__printf(obj, "%*s",
+ ANNOTATION__CYCLES_WIDTH, " ");
+@@ -3145,8 +3177,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
+
+ scnprintf(str, sizeof(str),
+ "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
+- al->cycles, al->cycles_min,
+- al->cycles_max);
++ al->cycles->avg, al->cycles->min,
++ al->cycles->max);
+
+ obj__printf(obj, "%*s ",
+ ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
+@@ -3172,7 +3204,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
+ if (!*al->line)
+ obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
+ else if (al->offset == -1) {
+- if (al->line_nr && notes->options->show_linenr)
++ if (al->line_nr && annotate_opts.show_linenr)
+ printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr);
+ else
+ printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " ");
+@@ -3182,15 +3214,15 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
+ u64 addr = al->offset;
+ int color = -1;
+
+- if (!notes->options->use_offset)
++ if (!annotate_opts.use_offset)
+ addr += notes->start;
+
+- if (!notes->options->use_offset) {
++ if (!annotate_opts.use_offset) {
+ printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
+ } else {
+ if (al->jump_sources &&
+- notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
+- if (notes->options->show_nr_jumps) {
++ annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
++ if (annotate_opts.show_nr_jumps) {
+ int prev;
+ printed = scnprintf(bf, sizeof(bf), "%*d ",
+ notes->widths.jumps,
+@@ -3204,9 +3236,9 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
+ printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
+ notes->widths.target, addr);
+ } else if (ins__is_call(&disasm_line(al)->ins) &&
+- notes->options->offset_level >= ANNOTATION__OFFSET_CALL) {
++ annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
+ goto print_addr;
+- } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
++ } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
+ goto print_addr;
+ } else {
+ printed = scnprintf(bf, sizeof(bf), "%-*s ",
+@@ -3228,19 +3260,18 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
+ }
+
+ void annotation_line__write(struct annotation_line *al, struct annotation *notes,
+- struct annotation_write_ops *wops,
+- struct annotation_options *opts)
++ struct annotation_write_ops *wops)
+ {
+ __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
+ wops->change_color, wops->width, wops->obj,
+- opts->percent_type,
++ annotate_opts.percent_type,
+ wops->set_color, wops->set_percent_color,
+ wops->set_jumps_percent_color, wops->printf,
+ wops->write_graph);
+ }
+
+ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *options, struct arch **parch)
++ struct arch **parch)
+ {
+ struct symbol *sym = ms->sym;
+ struct annotation *notes = symbol__annotation(sym);
+@@ -3254,17 +3285,21 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
+ if (evsel__is_group_event(evsel))
+ nr_pcnt = evsel->core.nr_members;
+
+- err = symbol__annotate(ms, evsel, options, parch);
++ err = symbol__annotate(ms, evsel, parch);
+ if (err)
+ goto out_free_offsets;
+
+- notes->options = options;
++ notes->options = &annotate_opts;
+
+ symbol__calc_percent(sym, evsel);
+
+ annotation__set_offsets(notes, size);
+ annotation__mark_jump_targets(notes, sym);
+- annotation__compute_ipc(notes, size);
++
++ err = annotation__compute_ipc(notes, size);
++ if (err)
++ goto out_free_offsets;
++
+ annotation__init_column_widths(notes, sym);
+ notes->nr_events = nr_pcnt;
+
+@@ -3382,10 +3417,9 @@ static unsigned int parse_percent_type(char *str1, char *str2)
+ return type;
+ }
+
+-int annotate_parse_percent_type(const struct option *opt, const char *_str,
++int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
+ int unset __maybe_unused)
+ {
+- struct annotation_options *opts = opt->value;
+ unsigned int type;
+ char *str1, *str2;
+ int err = -1;
+@@ -3404,7 +3438,7 @@ int annotate_parse_percent_type(const struct option *opt, const char *_str,
+ if (type == (unsigned int) -1)
+ type = parse_percent_type(str2, str1);
+ if (type != (unsigned int) -1) {
+- opts->percent_type = type;
++ annotate_opts.percent_type = type;
+ err = 0;
+ }
+
+diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
+index 96278055917601..b79614c44a2424 100644
+--- a/tools/perf/util/annotate.h
++++ b/tools/perf/util/annotate.h
+@@ -101,6 +101,8 @@ struct annotation_options {
+ unsigned int percent_type;
+ };
+
++extern struct annotation_options annotate_opts;
++
+ enum {
+ ANNOTATION__OFFSET_JUMP_TARGETS = 1,
+ ANNOTATION__OFFSET_CALL,
+@@ -130,6 +132,13 @@ struct annotation_data {
+ struct sym_hist_entry he;
+ };
+
++struct cycles_info {
++ float ipc;
++ u64 avg;
++ u64 max;
++ u64 min;
++};
++
+ struct annotation_line {
+ struct list_head node;
+ struct rb_node rb_node;
+@@ -137,12 +146,9 @@ struct annotation_line {
+ char *line;
+ int line_nr;
+ char *fileloc;
+- int jump_sources;
+- float ipc;
+- u64 cycles;
+- u64 cycles_max;
+- u64 cycles_min;
+ char *path;
++ struct cycles_info *cycles;
++ int jump_sources;
+ u32 idx;
+ int idx_asm;
+ int data_nr;
+@@ -214,8 +220,7 @@ struct annotation_write_ops {
+ };
+
+ void annotation_line__write(struct annotation_line *al, struct annotation *notes,
+- struct annotation_write_ops *ops,
+- struct annotation_options *opts);
++ struct annotation_write_ops *ops);
+
+ int __annotation__scnprintf_samples_period(struct annotation *notes,
+ char *bf, size_t size,
+@@ -264,27 +269,30 @@ struct cyc_hist {
+ * returns.
+ */
+ struct annotated_source {
+- struct list_head source;
+- int nr_histograms;
+- size_t sizeof_sym_hist;
+- struct cyc_hist *cycles_hist;
+- struct sym_hist *histograms;
++ struct list_head source;
++ size_t sizeof_sym_hist;
++ struct sym_hist *histograms;
++ int nr_histograms;
++ int nr_entries;
++ int nr_asm_entries;
++ u16 max_line_len;
+ };
+
+-struct LOCKABLE annotation {
+- u64 max_coverage;
+- u64 start;
++struct annotated_branch {
+ u64 hit_cycles;
+ u64 hit_insn;
+ unsigned int total_insn;
+ unsigned int cover_insn;
++ struct cyc_hist *cycles_hist;
++};
++
++struct LOCKABLE annotation {
++ u64 max_coverage;
++ u64 start;
+ struct annotation_options *options;
+ struct annotation_line **offsets;
+ int nr_events;
+ int max_jump_sources;
+- int nr_entries;
+- int nr_asm_entries;
+- u16 max_line_len;
+ struct {
+ u8 addr;
+ u8 jumps;
+@@ -293,8 +301,8 @@ struct LOCKABLE annotation {
+ u8 max_addr;
+ u8 max_ins_name;
+ } widths;
+- bool have_cycles;
+ struct annotated_source *src;
++ struct annotated_branch *branch;
+ };
+
+ static inline void annotation__init(struct annotation *notes __maybe_unused)
+@@ -308,10 +316,10 @@ bool annotation__trylock(struct annotation *notes) EXCLUSIVE_TRYLOCK_FUNCTION(tr
+
+ static inline int annotation__cycles_width(struct annotation *notes)
+ {
+- if (notes->have_cycles && notes->options->show_minmax_cycle)
++ if (notes->branch && notes->options->show_minmax_cycle)
+ return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH;
+
+- return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0;
++ return notes->branch ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0;
+ }
+
+ static inline int annotation__pcnt_width(struct annotation *notes)
+@@ -325,7 +333,6 @@ static inline bool annotation_line__filter(struct annotation_line *al, struct an
+ }
+
+ void annotation__set_offsets(struct annotation *notes, s64 size);
+-void annotation__compute_ipc(struct annotation *notes, size_t size);
+ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym);
+ void annotation__update_column_widths(struct annotation *notes);
+ void annotation__init_column_widths(struct annotation *notes, struct symbol *sym);
+@@ -361,11 +368,9 @@ void symbol__annotate_zero_histograms(struct symbol *sym);
+
+ int symbol__annotate(struct map_symbol *ms,
+ struct evsel *evsel,
+- struct annotation_options *options,
+ struct arch **parch);
+ int symbol__annotate2(struct map_symbol *ms,
+ struct evsel *evsel,
+- struct annotation_options *options,
+ struct arch **parch);
+
+ enum symbol_disassemble_errno {
+@@ -392,30 +397,26 @@ enum symbol_disassemble_errno {
+
+ int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen);
+
+-int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *options);
++int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel);
+ void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
+ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
+ void annotated_source__purge(struct annotated_source *as);
+
+-int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
+- struct annotation_options *opts);
++int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel);
+
+ bool ui__has_annotation(void);
+
+-int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts);
++int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel);
+
+-int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts);
++int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel);
+
+ #ifdef HAVE_SLANG_SUPPORT
+ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
+- struct hist_browser_timer *hbt,
+- struct annotation_options *opts);
++ struct hist_browser_timer *hbt);
+ #else
+ static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+- struct hist_browser_timer *hbt __maybe_unused,
+- struct annotation_options *opts __maybe_unused)
++ struct hist_browser_timer *hbt __maybe_unused)
+ {
+ return 0;
+ }
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index a0368202a746ab..c51829fdef23b0 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -1466,6 +1466,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ char *endptr;
+ bool period_type_set = false;
+ bool period_set = false;
++ bool iy = false;
+
+ synth_opts->set = true;
+
+@@ -1484,6 +1485,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ switch (*p++) {
+ case 'i':
+ case 'y':
++ iy = true;
+ if (p[-1] == 'y')
+ synth_opts->cycles = true;
+ else
+@@ -1646,7 +1648,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ }
+ }
+ out:
+- if (synth_opts->instructions || synth_opts->cycles) {
++ if (iy) {
+ if (!period_type_set)
+ synth_opts->period_type =
+ PERF_ITRACE_DEFAULT_PERIOD_TYPE;
+diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c
+index 591fc1edd385ca..dec910989701eb 100644
+--- a/tools/perf/util/block-info.c
++++ b/tools/perf/util/block-info.c
+@@ -129,9 +129,9 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
+ al.sym = he->ms.sym;
+
+ notes = symbol__annotation(he->ms.sym);
+- if (!notes || !notes->src || !notes->src->cycles_hist)
++ if (!notes || !notes->branch || !notes->branch->cycles_hist)
+ return 0;
+- ch = notes->src->cycles_hist;
++ ch = notes->branch->cycles_hist;
+ for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
+ if (ch[i].num_aggr) {
+ struct block_info *bi;
+@@ -464,8 +464,7 @@ void block_info__free_report(struct block_report *reps, int nr_reps)
+ }
+
+ int report__browse_block_hists(struct block_hist *bh, float min_percent,
+- struct evsel *evsel, struct perf_env *env,
+- struct annotation_options *annotation_opts)
++ struct evsel *evsel, struct perf_env *env)
+ {
+ int ret;
+
+@@ -477,8 +476,7 @@ int report__browse_block_hists(struct block_hist *bh, float min_percent,
+ return 0;
+ case 1:
+ symbol_conf.report_individual_block = true;
+- ret = block_hists_tui_browse(bh, evsel, min_percent,
+- env, annotation_opts);
++ ret = block_hists_tui_browse(bh, evsel, min_percent, env);
+ return ret;
+ default:
+ return -1;
+diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h
+index 42e9dcc4cf0ab3..96f53e89795e24 100644
+--- a/tools/perf/util/block-info.h
++++ b/tools/perf/util/block-info.h
+@@ -78,8 +78,7 @@ struct block_report *block_info__create_report(struct evlist *evlist,
+ void block_info__free_report(struct block_report *reps, int nr_reps);
+
+ int report__browse_block_hists(struct block_hist *bh, float min_percent,
+- struct evsel *evsel, struct perf_env *env,
+- struct annotation_options *annotation_opts);
++ struct evsel *evsel, struct perf_env *env);
+
+ float block_info__total_cycles_percent(struct hist_entry *he);
+
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index 38fcf3ba5749d9..b00b5a2634c3d1 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -542,9 +542,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
+ return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+ }
+
+-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+- struct perf_env *env,
+- FILE *fp)
++void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
++ struct perf_env *env,
++ FILE *fp)
+ {
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+@@ -560,7 +560,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ if (info->btf_id) {
+ struct btf_node *node;
+
+- node = perf_env__find_btf(env, info->btf_id);
++ node = __perf_env__find_btf(env, info->btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
+index 1bcbd4fb6c669d..e2f0420905f597 100644
+--- a/tools/perf/util/bpf-event.h
++++ b/tools/perf/util/bpf-event.h
+@@ -33,9 +33,9 @@ struct btf_node {
+ int machine__process_bpf(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
+-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+- struct perf_env *env,
+- FILE *fp);
++void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
++ struct perf_env *env,
++ FILE *fp);
+ #else
+ static inline int machine__process_bpf(struct machine *machine __maybe_unused,
+ union perf_event *event __maybe_unused,
+@@ -50,9 +50,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
+ return 0;
+ }
+
+-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+- struct perf_env *env __maybe_unused,
+- FILE *fp __maybe_unused)
++static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
++ struct perf_env *env __maybe_unused,
++ FILE *fp __maybe_unused)
+ {
+
+ }
+diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
+index 01f70b8e705a8f..21f4d9ba023d94 100644
+--- a/tools/perf/util/bpf_off_cpu.c
++++ b/tools/perf/util/bpf_off_cpu.c
+@@ -98,7 +98,7 @@ static void off_cpu_finish(void *arg __maybe_unused)
+ /* v5.18 kernel added prev_state arg, so it needs to check the signature */
+ static void check_sched_switch_args(void)
+ {
+- const struct btf *btf = bpf_object__btf(skel->obj);
++ const struct btf *btf = btf__load_vmlinux_btf();
+ const struct btf_type *t1, *t2, *t3;
+ u32 type_id;
+
+@@ -116,7 +116,8 @@ static void check_sched_switch_args(void)
+ return;
+
+ t3 = btf__type_by_id(btf, t2->type);
+- if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
++ /* btf_trace func proto has one more argument for the context */
++ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
+ /* new format: pass prev_state as 4th arg */
+ skel->rodata->has_prev_state = true;
+ }
+diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+index 939ec769bf4a5a..52c270330ae0d2 100644
+--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
++++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+@@ -153,7 +153,7 @@ static inline
+ unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
+ {
+ unsigned int augmented_len = sizeof(*augmented_arg);
+- int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
++ int string_len = bpf_probe_read_user_str(&augmented_arg->value, arg_len, arg);
+
+ augmented_arg->size = augmented_arg->err = 0;
+ /*
+@@ -203,7 +203,7 @@ int sys_enter_connect(struct syscall_enter_args *args)
+ _Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two");
+ socklen &= sizeof(augmented_args->saddr) - 1;
+
+- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
++ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
+
+ return augmented__output(args, augmented_args, len + socklen);
+ }
+@@ -221,7 +221,7 @@ int sys_enter_sendto(struct syscall_enter_args *args)
+
+ socklen &= sizeof(augmented_args->saddr) - 1;
+
+- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
++ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
+
+ return augmented__output(args, augmented_args, len + socklen);
+ }
+@@ -311,7 +311,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
+ if (augmented_args == NULL)
+ goto failure;
+
+- if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
++ if (bpf_probe_read_user(&augmented_args->__data, sizeof(*attr), attr) < 0)
+ goto failure;
+
+ attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
+@@ -325,7 +325,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
+ goto failure;
+
+ // Now that we read attr->size and tested it against the size limits, read it completely
+- if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
++ if (bpf_probe_read_user(&augmented_args->__data, size, attr) < 0)
+ goto failure;
+
+ return augmented__output(args, augmented_args, len + size);
+@@ -347,7 +347,7 @@ int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
+ if (size > sizeof(augmented_args->__data))
+ goto failure;
+
+- bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
++ bpf_probe_read_user(&augmented_args->__data, size, rqtp_arg);
+
+ return augmented__output(args, augmented_args, len + size);
+ failure:
+@@ -385,7 +385,7 @@ int sys_enter(struct syscall_enter_args *args)
+ if (augmented_args == NULL)
+ return 1;
+
+- bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
++ bpf_probe_read_kernel(&augmented_args->args, sizeof(augmented_args->args), args);
+
+ /*
+ * Jump to syscall specific augmenter, even if the default one,
+@@ -406,7 +406,7 @@ int sys_exit(struct syscall_exit_args *args)
+ if (pid_filter__has(&pids_filtered, getpid()))
+ return 0;
+
+- bpf_probe_read(&exit_args, sizeof(exit_args), args);
++ bpf_probe_read_kernel(&exit_args, sizeof(exit_args), args);
+ /*
+ * Jump to syscall specific return augmenter, even if the default one,
+ * "!raw_syscalls:unaugmented" that will just return 1 to return the
+diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
+index 8d3cfbb3cc65bd..473106c72c695d 100644
+--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
++++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
+@@ -238,6 +238,7 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
+ struct task_struct *curr;
+ struct mm_struct___old *mm_old;
+ struct mm_struct___new *mm_new;
++ struct sighand_struct *sighand;
+
+ switch (flags) {
+ case LCB_F_READ: /* rwsem */
+@@ -259,7 +260,9 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
+ break;
+ case LCB_F_SPIN: /* spinlock */
+ curr = bpf_get_current_task_btf();
+- if (&curr->sighand->siglock == (void *)lock)
++ sighand = curr->sighand;
++
++ if (sighand && &sighand->siglock == (void *)lock)
+ return LCD_F_SIGHAND_LOCK;
+ break;
+ default:
+diff --git a/tools/perf/util/bpf_skel/vmlinux/.gitignore b/tools/perf/util/bpf_skel/vmlinux/.gitignore
+new file mode 100644
+index 00000000000000..49502c04183a2c
+--- /dev/null
++++ b/tools/perf/util/bpf_skel/vmlinux/.gitignore
+@@ -0,0 +1 @@
++!vmlinux.h
+diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
+index aee937d14fbbf1..09e6b4e1401c90 100644
+--- a/tools/perf/util/callchain.c
++++ b/tools/perf/util/callchain.c
+@@ -1126,7 +1126,7 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp
+ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
+ bool hide_unresolved)
+ {
+- struct machine *machine = maps__machine(node->ms.maps);
++ struct machine *machine = node->ms.maps ? maps__machine(node->ms.maps) : NULL;
+
+ maps__put(al->maps);
+ al->maps = maps__get(node->ms.maps);
+diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
+index fc16299c915f9b..851a9cd32c4a25 100644
+--- a/tools/perf/util/data.c
++++ b/tools/perf/util/data.c
+@@ -418,8 +418,6 @@ int perf_data__switch(struct perf_data *data,
+ {
+ int ret;
+
+- if (check_pipe(data))
+- return -EINVAL;
+ if (perf_data__is_read(data))
+ return -EINVAL;
+
+diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
+index b9fb71ab7a7303..106429155c2e9d 100644
+--- a/tools/perf/util/db-export.c
++++ b/tools/perf/util/db-export.c
+@@ -253,8 +253,8 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
+ */
+ addr_location__init(&al);
+ al.sym = node->ms.sym;
+- al.map = node->ms.map;
+- al.maps = thread__maps(thread);
++ al.map = map__get(node->ms.map);
++ al.maps = maps__get(thread__maps(thread));
+ al.addr = node->ip;
+
+ if (al.map && !al.sym)
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index a164164001fb51..d2c7b6e6eae51b 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -22,13 +22,19 @@ struct perf_env perf_env;
+
+ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node)
++{
++ down_write(&env->bpf_progs.lock);
++ __perf_env__insert_bpf_prog_info(env, info_node);
++ up_write(&env->bpf_progs.lock);
++}
++
++void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+ {
+ __u32 prog_id = info_node->info_linear->info.id;
+ struct bpf_prog_info_node *node;
+ struct rb_node *parent = NULL;
+ struct rb_node **p;
+
+- down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.infos.rb_node;
+
+ while (*p != NULL) {
+@@ -40,15 +46,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
+- goto out;
++ return;
+ }
+ }
+
+ rb_link_node(&info_node->rb_node, parent, p);
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ env->bpf_progs.infos_cnt++;
+-out:
+- up_write(&env->bpf_progs.lock);
+ }
+
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+@@ -77,14 +81,22 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ }
+
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
++{
++ bool ret;
++
++ down_write(&env->bpf_progs.lock);
++ ret = __perf_env__insert_btf(env, btf_node);
++ up_write(&env->bpf_progs.lock);
++ return ret;
++}
++
++bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+ {
+ struct rb_node *parent = NULL;
+ __u32 btf_id = btf_node->id;
+ struct btf_node *node;
+ struct rb_node **p;
+- bool ret = true;
+
+- down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.btfs.rb_node;
+
+ while (*p != NULL) {
+@@ -96,25 +108,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated btf %u\n", btf_id);
+- ret = false;
+- goto out;
++ return false;
+ }
+ }
+
+ rb_link_node(&btf_node->rb_node, parent, p);
+ rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+ env->bpf_progs.btfs_cnt++;
+-out:
+- up_write(&env->bpf_progs.lock);
+- return ret;
++ return true;
+ }
+
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
++{
++ struct btf_node *res;
++
++ down_read(&env->bpf_progs.lock);
++ res = __perf_env__find_btf(env, btf_id);
++ up_read(&env->bpf_progs.lock);
++ return res;
++}
++
++struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+ {
+ struct btf_node *node = NULL;
+ struct rb_node *n;
+
+- down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.btfs.rb_node;
+
+ while (n) {
+@@ -124,13 +142,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+ else if (btf_id > node->id)
+ n = n->rb_right;
+ else
+- goto out;
++ return node;
+ }
+- node = NULL;
+-
+-out:
+- up_read(&env->bpf_progs.lock);
+- return node;
++ return NULL;
+ }
+
+ /* purge data in bpf_progs.infos tree */
+diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
+index 4566c51f2fd956..359eff51cb85b7 100644
+--- a/tools/perf/util/env.h
++++ b/tools/perf/util/env.h
+@@ -164,12 +164,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
+ int perf_env__nr_cpus_avail(struct perf_env *env);
+
+ void perf_env__init(struct perf_env *env);
++void __perf_env__insert_bpf_prog_info(struct perf_env *env,
++ struct bpf_prog_info_node *info_node);
+ void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
+ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
++bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
++struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+ int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
+ char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index 923c0fb1512226..68f45e9e63b6e4 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -617,13 +617,13 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
+ if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
+ al->level = 'k';
+ maps = machine__kernel_maps(machine);
+- load_map = true;
++ load_map = !symbol_conf.lazy_load_kernel_maps;
+ } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
+ al->level = '.';
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+ al->level = 'g';
+ maps = machine__kernel_maps(machine);
+- load_map = true;
++ load_map = !symbol_conf.lazy_load_kernel_maps;
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
+ al->level = 'u';
+ } else {
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index 7ef43f72098e0a..eb1dd29c538d5c 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -103,7 +103,14 @@ struct evlist *evlist__new_default(void)
+ err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
+ if (err) {
+ evlist__delete(evlist);
+- evlist = NULL;
++ return NULL;
++ }
++
++ if (evlist->core.nr_entries > 1) {
++ struct evsel *evsel;
++
++ evlist__for_each_entry(evlist, evsel)
++ evsel__set_sample_id(evsel, /*can_sample_identifier=*/false);
+ }
+
+ return evlist;
+@@ -251,6 +258,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .size = sizeof(attr), /* to capture ABI version */
++ /* Avoid frequency mode for dummy events to avoid associated timers. */
++ .freq = 0,
++ .sample_period = 1,
+ };
+
+ return evsel__new_idx(&attr, evlist->core.nr_entries);
+@@ -277,8 +287,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
+ evsel->core.attr.exclude_kernel = 1;
+ evsel->core.attr.exclude_guest = 1;
+ evsel->core.attr.exclude_hv = 1;
+- evsel->core.attr.freq = 0;
+- evsel->core.attr.sample_period = 1;
+ evsel->core.system_wide = system_wide;
+ evsel->no_aux_samples = true;
+ evsel->name = strdup("dummy:u");
+@@ -1694,6 +1702,24 @@ void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_ev
+ tracking_evsel->tracking = true;
+ }
+
++struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide)
++{
++ struct evsel *evsel;
++
++ evsel = evlist__get_tracking_event(evlist);
++ if (!evsel__is_dummy_event(evsel)) {
++ evsel = evlist__add_aux_dummy(evlist, system_wide);
++ if (!evsel)
++ return NULL;
++
++ evlist__set_tracking_event(evlist, evsel);
++ } else if (system_wide) {
++ perf_evlist__go_system_wide(&evlist->core, &evsel->core);
++ }
++
++ return evsel;
++}
++
+ struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
+ {
+ struct evsel *evsel;
+@@ -2499,3 +2525,28 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis
+ }
+ perf_cpu_map__put(user_requested_cpus);
+ }
++
++void evlist__uniquify_name(struct evlist *evlist)
++{
++ struct evsel *pos;
++ char *new_name;
++ int ret;
++
++ if (perf_pmus__num_core_pmus() == 1)
++ return;
++
++ evlist__for_each_entry(evlist, pos) {
++ if (!evsel__is_hybrid(pos))
++ continue;
++
++ if (strchr(pos->name, '/'))
++ continue;
++
++ ret = asprintf(&new_name, "%s/%s/",
++ pos->pmu_name, pos->name);
++ if (ret) {
++ free(pos->name);
++ pos->name = new_name;
++ }
++ }
++}
+diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
+index 664c6bf7b3e023..cb91dc9117a272 100644
+--- a/tools/perf/util/evlist.h
++++ b/tools/perf/util/evlist.h
+@@ -387,6 +387,7 @@ bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr);
+
+ struct evsel *evlist__get_tracking_event(struct evlist *evlist);
+ void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel);
++struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide);
+
+ struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
+
+@@ -441,5 +442,6 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
+ int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
+ void evlist__check_mem_load_aux(struct evlist *evlist);
+ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
++void evlist__uniquify_name(struct evlist *evlist);
+
+ #endif /* __PERF_EVLIST_H */
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index a8a5ff87cc1f71..6d2b056232f6e1 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -2366,7 +2366,6 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
+ data->period = evsel->core.attr.sample_period;
+ data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ data->misc = event->header.misc;
+- data->id = -1ULL;
+ data->data_src = PERF_MEM_DATA_SRC_NONE;
+ data->vcpu = -1;
+
+diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
+index 4488f306de7853..b8875aac8f8709 100644
+--- a/tools/perf/util/expr.c
++++ b/tools/perf/util/expr.c
+@@ -500,7 +500,25 @@ double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const
+ tmp = evlist__new();
+ if (!tmp)
+ return NAN;
+- ret = parse_event(tmp, id) ? 0 : 1;
++
++ if (strchr(id, '@')) {
++ char *tmp_id, *p;
++
++ tmp_id = strdup(id);
++ if (!tmp_id) {
++ ret = NAN;
++ goto out;
++ }
++ p = strchr(tmp_id, '@');
++ *p = '/';
++ p = strrchr(tmp_id, '@');
++ *p = '/';
++ ret = parse_event(tmp, tmp_id) ? 0 : 1;
++ free(tmp_id);
++ } else {
++ ret = parse_event(tmp, id) ? 0 : 1;
++ }
++out:
+ evlist__delete(tmp);
+ return ret;
+ }
+@@ -509,7 +527,7 @@ double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx __maybe_unused,
+ bool compute_ids __maybe_unused, const char *test_id)
+ {
+ double ret;
+- struct perf_pmu *pmu = pmu__find_core_pmu();
++ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
+ char *cpuid = perf_pmu__getcpuid(pmu);
+
+ if (!cpuid)
+diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
+index fefc72066c4e8e..ac17a3cb59dc0d 100644
+--- a/tools/perf/util/genelf.c
++++ b/tools/perf/util/genelf.c
+@@ -293,9 +293,9 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ */
+ phdr = elf_newphdr(e, 1);
+ phdr[0].p_type = PT_LOAD;
+- phdr[0].p_offset = 0;
+- phdr[0].p_vaddr = 0;
+- phdr[0].p_paddr = 0;
++ phdr[0].p_offset = GEN_ELF_TEXT_OFFSET;
++ phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET;
++ phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_filesz = csize;
+ phdr[0].p_memsz = csize;
+ phdr[0].p_flags = PF_X | PF_R;
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index d812e1e371a743..1482567e5ac1a7 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1444,7 +1444,9 @@ static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
+ nodes = new_nodes;
+ size += 4;
+ }
+- ret = memory_node__read(&nodes[cnt++], idx);
++ ret = memory_node__read(&nodes[cnt], idx);
++ if (!ret)
++ cnt += 1;
+ }
+ out:
+ closedir(dir);
+@@ -1847,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+
+- bpf_event__print_bpf_prog_info(&node->info_linear->info,
+- env, fp);
++ __bpf_event__print_bpf_prog_info(&node->info_linear->info,
++ env, fp);
+ }
+
+ up_read(&env->bpf_progs.lock);
+@@ -3175,7 +3177,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+ /* after reading from file, translate offset to address */
+ bpil_offs_to_addr(info_linear);
+ info_node->info_linear = info_linear;
+- perf_env__insert_bpf_prog_info(env, info_node);
++ __perf_env__insert_bpf_prog_info(env, info_node);
+ }
+
+ up_write(&env->bpf_progs.lock);
+@@ -3222,7 +3224,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+ if (__do_read(ff, node->data, data_size))
+ goto out;
+
+- perf_env__insert_btf(env, node);
++ __perf_env__insert_btf(env, node);
+ node = NULL;
+ }
+
+@@ -4361,9 +4363,10 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
+ ret += fprintf(fp, "... ");
+
+ map = cpu_map__new_data(&ev->cpus.cpus);
+- if (map)
++ if (map) {
+ ret += cpu_map__fprintf(map, fp);
+- else
++ perf_cpu_map__put(map);
++ } else
+ ret += fprintf(fp, "failed to get cpus\n");
+ break;
+ default:
+diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
+index 45b614bb73bfa3..764d660d30e2f5 100644
+--- a/tools/perf/util/hisi-ptt.c
++++ b/tools/perf/util/hisi-ptt.c
+@@ -121,6 +121,7 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
+ if (dump_trace)
+ hisi_ptt_dump_event(ptt, data, size);
+
++ free(data);
+ return 0;
+ }
+
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 3dc8a4968beb9c..aa450cc2648aa5 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -637,7 +637,12 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
+ * mis-adjust symbol addresses when computing
+ * the history counter to increment.
+ */
+- if (he->ms.map != entry->ms.map) {
++ if (hists__has(hists, sym) && he->ms.map != entry->ms.map) {
++ if (he->ms.sym) {
++ u64 addr = he->ms.sym->start;
++ he->ms.sym = map__find_symbol(entry->ms.map, addr);
++ }
++
+ map__put(he->ms.map);
+ he->ms.map = map__get(entry->ms.map);
+ }
+@@ -2676,8 +2681,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+
+ /* If we have branch cycles always annotate them. */
+ if (bs && bs->nr && entries[0].flags.cycles) {
+- int i;
+-
+ bi = sample__resolve_bstack(sample, al);
+ if (bi) {
+ struct addr_map_symbol *prev = NULL;
+@@ -2692,7 +2695,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ * Note that perf stores branches reversed from
+ * program order!
+ */
+- for (i = bs->nr - 1; i >= 0; i--) {
++ for (int i = bs->nr - 1; i >= 0; i--) {
+ addr_map_symbol__account_cycles(&bi[i].from,
+ nonany_branch_mode ? NULL : prev,
+ bi[i].flags.cycles);
+@@ -2701,6 +2704,12 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+ if (total_cycles)
+ *total_cycles += bi[i].flags.cycles;
+ }
++ for (unsigned int i = 0; i < bs->nr; i++) {
++ map__put(bi[i].to.ms.map);
++ maps__put(bi[i].to.ms.maps);
++ map__put(bi[i].from.ms.map);
++ maps__put(bi[i].from.ms.maps);
++ }
+ free(bi);
+ }
+ }
+diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
+index afc9f1c7f4dc24..5d0db96609dff5 100644
+--- a/tools/perf/util/hist.h
++++ b/tools/perf/util/hist.h
+@@ -457,7 +457,6 @@ struct hist_browser_timer {
+ int refresh;
+ };
+
+-struct annotation_options;
+ struct res_sample;
+
+ enum rstype {
+@@ -473,16 +472,13 @@ struct block_hist;
+ void attr_to_script(char *buf, struct perf_event_attr *attr);
+
+ int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
+- struct hist_browser_timer *hbt,
+- struct annotation_options *annotation_opts);
++ struct hist_browser_timer *hbt);
+
+ int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
+- struct hist_browser_timer *hbt,
+- struct annotation_options *annotation_opts);
++ struct hist_browser_timer *hbt);
+
+ int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
+- float min_pcnt, struct perf_env *env, bool warn_lost_event,
+- struct annotation_options *annotation_options);
++ float min_pcnt, struct perf_env *env, bool warn_lost_event);
+
+ int script_browse(const char *script_opt, struct evsel *evsel);
+
+@@ -492,8 +488,7 @@ int res_sample_browse(struct res_sample *res_samples, int num_res,
+ void res_sample_init(void);
+
+ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
+- float min_percent, struct perf_env *env,
+- struct annotation_options *annotation_opts);
++ float min_percent, struct perf_env *env);
+ #else
+ static inline
+ int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
+@@ -501,23 +496,20 @@ int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
+ struct hist_browser_timer *hbt __maybe_unused,
+ float min_pcnt __maybe_unused,
+ struct perf_env *env __maybe_unused,
+- bool warn_lost_event __maybe_unused,
+- struct annotation_options *annotation_options __maybe_unused)
++ bool warn_lost_event __maybe_unused)
+ {
+ return 0;
+ }
+ static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+- struct hist_browser_timer *hbt __maybe_unused,
+- struct annotation_options *annotation_options __maybe_unused)
++ struct hist_browser_timer *hbt __maybe_unused)
+ {
+ return 0;
+ }
+
+ static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+- struct hist_browser_timer *hbt __maybe_unused,
+- struct annotation_options *annotation_opts __maybe_unused)
++ struct hist_browser_timer *hbt __maybe_unused)
+ {
+ return 0;
+ }
+@@ -541,8 +533,7 @@ static inline void res_sample_init(void) {}
+ static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ float min_percent __maybe_unused,
+- struct perf_env *env __maybe_unused,
+- struct annotation_options *annotation_opts __maybe_unused)
++ struct perf_env *env __maybe_unused)
+ {
+ return 0;
+ }
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index b450178e3420ba..e733f6b1f7ac58 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1319,6 +1319,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder, bool no_tip)
+ bool ret = false;
+
+ decoder->state.type &= ~INTEL_PT_BRANCH;
++ decoder->state.insn_op = INTEL_PT_OP_OTHER;
++ decoder->state.insn_len = 0;
+
+ if (decoder->set_fup_cfe_ip || decoder->set_fup_cfe) {
+ bool ip = decoder->set_fup_cfe_ip;
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index dbf0bc71a63bee..4db9a098f59262 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -764,6 +764,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
+
+ addr_location__init(&al);
+ intel_pt_insn->length = 0;
++ intel_pt_insn->op = INTEL_PT_OP_OTHER;
+
+ if (to_ip && *ip == to_ip)
+ goto out_no_cache;
+@@ -898,6 +899,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
+
+ if (to_ip && *ip == to_ip) {
+ intel_pt_insn->length = 0;
++ intel_pt_insn->op = INTEL_PT_OP_OTHER;
+ goto out_no_cache;
+ }
+
+@@ -1512,9 +1514,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
+ } else if (ptq->state->flags & INTEL_PT_ASYNC) {
+ if (!ptq->state->to_ip)
+ ptq->flags = PERF_IP_FLAG_BRANCH |
++ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_TRACE_END;
+ else if (ptq->state->from_nr && !ptq->state->to_nr)
+ ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
++ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_VMEXIT;
+ else
+ ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 88f31b3a63acb6..7c6874804660eb 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2158,9 +2158,13 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
+
+- if (thread != NULL)
+- thread__put(thread);
+-
++ if (thread != NULL) {
++ if (symbol_conf.keep_exited_threads)
++ thread__set_exited(thread, /*exited=*/true);
++ else
++ machine__remove_thread(machine, thread);
++ }
++ thread__put(thread);
+ return 0;
+ }
+
+@@ -2624,16 +2628,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
+ save_lbr_cursor_node(thread, cursor, i);
+ }
+
+- /* Add LBR ip from first entries.to */
+- ip = entries[0].to;
+- flags = &entries[0].flags;
+- *branch_from = entries[0].from;
+- err = add_callchain_ip(thread, cursor, parent,
+- root_al, &cpumode, ip,
+- true, flags, NULL,
+- *branch_from);
+- if (err)
+- return err;
++ if (lbr_nr > 0) {
++ /* Add LBR ip from first entries.to */
++ ip = entries[0].to;
++ flags = &entries[0].flags;
++ *branch_from = entries[0].from;
++ err = add_callchain_ip(thread, cursor, parent,
++ root_al, &cpumode, ip,
++ true, flags, NULL,
++ *branch_from);
++ if (err)
++ return err;
++ }
+
+ return 0;
+ }
+diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
+index 233438c95b531f..9a011aed4b754b 100644
+--- a/tools/perf/util/maps.c
++++ b/tools/perf/util/maps.c
+@@ -475,3 +475,241 @@ struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
+
+ return rb_entry(next, struct map_rb_node, rb_node);
+ }
++
++static int map__strcmp(const void *a, const void *b)
++{
++ const struct map *map_a = *(const struct map **)a;
++ const struct map *map_b = *(const struct map **)b;
++ const struct dso *dso_a = map__dso(map_a);
++ const struct dso *dso_b = map__dso(map_b);
++ int ret = strcmp(dso_a->short_name, dso_b->short_name);
++
++ if (ret == 0 && map_a != map_b) {
++ /*
++ * Ensure distinct but name equal maps have an order in part to
++ * aid reference counting.
++ */
++ ret = (int)map__start(map_a) - (int)map__start(map_b);
++ if (ret == 0)
++ ret = (int)((intptr_t)map_a - (intptr_t)map_b);
++ }
++
++ return ret;
++}
++
++static int map__strcmp_name(const void *name, const void *b)
++{
++ const struct dso *dso = map__dso(*(const struct map **)b);
++
++ return strcmp(name, dso->short_name);
++}
++
++void __maps__sort_by_name(struct maps *maps)
++{
++ qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
++}
++
++static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
++{
++ struct map_rb_node *rb_node;
++ struct map **maps_by_name = realloc(maps__maps_by_name(maps),
++ maps__nr_maps(maps) * sizeof(struct map *));
++ int i = 0;
++
++ if (maps_by_name == NULL)
++ return -1;
++
++ up_read(maps__lock(maps));
++ down_write(maps__lock(maps));
++
++ RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
++ RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
++
++ maps__for_each_entry(maps, rb_node)
++ maps_by_name[i++] = map__get(rb_node->map);
++
++ __maps__sort_by_name(maps);
++
++ up_write(maps__lock(maps));
++ down_read(maps__lock(maps));
++
++ return 0;
++}
++
++static struct map *__maps__find_by_name(struct maps *maps, const char *name)
++{
++ struct map **mapp;
++
++ if (maps__maps_by_name(maps) == NULL &&
++ map__groups__sort_by_name_from_rbtree(maps))
++ return NULL;
++
++ mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
++ sizeof(*mapp), map__strcmp_name);
++ if (mapp)
++ return *mapp;
++ return NULL;
++}
++
++struct map *maps__find_by_name(struct maps *maps, const char *name)
++{
++ struct map_rb_node *rb_node;
++ struct map *map;
++
++ down_read(maps__lock(maps));
++
++
++ if (RC_CHK_ACCESS(maps)->last_search_by_name) {
++ const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
++
++ if (strcmp(dso->short_name, name) == 0) {
++ map = RC_CHK_ACCESS(maps)->last_search_by_name;
++ goto out_unlock;
++ }
++ }
++ /*
++ * If we have maps->maps_by_name, then the name isn't in the rbtree,
++ * as maps->maps_by_name mirrors the rbtree when lookups by name are
++ * made.
++ */
++ map = __maps__find_by_name(maps, name);
++ if (map || maps__maps_by_name(maps) != NULL)
++ goto out_unlock;
++
++ /* Fallback to traversing the rbtree... */
++ maps__for_each_entry(maps, rb_node) {
++ struct dso *dso;
++
++ map = rb_node->map;
++ dso = map__dso(map);
++ if (strcmp(dso->short_name, name) == 0) {
++ RC_CHK_ACCESS(maps)->last_search_by_name = map;
++ goto out_unlock;
++ }
++ }
++ map = NULL;
++
++out_unlock:
++ up_read(maps__lock(maps));
++ return map;
++}
++
++void maps__fixup_end(struct maps *maps)
++{
++ struct map_rb_node *prev = NULL, *curr;
++
++ down_write(maps__lock(maps));
++
++ maps__for_each_entry(maps, curr) {
++ if (prev != NULL && !map__end(prev->map))
++ map__set_end(prev->map, map__start(curr->map));
++
++ prev = curr;
++ }
++
++ /*
++ * We still haven't the actual symbols, so guess the
++ * last map final address.
++ */
++ if (curr && !map__end(curr->map))
++ map__set_end(curr->map, ~0ULL);
++
++ up_write(maps__lock(maps));
++}
++
++/*
++ * Merges map into maps by splitting the new map within the existing map
++ * regions.
++ */
++int maps__merge_in(struct maps *kmaps, struct map *new_map)
++{
++ struct map_rb_node *rb_node;
++ LIST_HEAD(merged);
++ int err = 0;
++
++ maps__for_each_entry(kmaps, rb_node) {
++ struct map *old_map = rb_node->map;
++
++ /* no overload with this one */
++ if (map__end(new_map) < map__start(old_map) ||
++ map__start(new_map) >= map__end(old_map))
++ continue;
++
++ if (map__start(new_map) < map__start(old_map)) {
++ /*
++ * |new......
++ * |old....
++ */
++ if (map__end(new_map) < map__end(old_map)) {
++ /*
++ * |new......| -> |new..|
++ * |old....| -> |old....|
++ */
++ map__set_end(new_map, map__start(old_map));
++ } else {
++ /*
++ * |new.............| -> |new..| |new..|
++ * |old....| -> |old....|
++ */
++ struct map_list_node *m = map_list_node__new();
++
++ if (!m) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ m->map = map__clone(new_map);
++ if (!m->map) {
++ free(m);
++ err = -ENOMEM;
++ goto out;
++ }
++
++ map__set_end(m->map, map__start(old_map));
++ list_add_tail(&m->node, &merged);
++ map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
++ map__set_start(new_map, map__end(old_map));
++ }
++ } else {
++ /*
++ * |new......
++ * |old....
++ */
++ if (map__end(new_map) < map__end(old_map)) {
++ /*
++ * |new..| -> x
++ * |old.........| -> |old.........|
++ */
++ map__put(new_map);
++ new_map = NULL;
++ break;
++ } else {
++ /*
++ * |new......| -> |new...|
++ * |old....| -> |old....|
++ */
++ map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
++ map__set_start(new_map, map__end(old_map));
++ }
++ }
++ }
++
++out:
++ while (!list_empty(&merged)) {
++ struct map_list_node *old_node;
++
++ old_node = list_entry(merged.next, struct map_list_node, node);
++ list_del_init(&old_node->node);
++ if (!err)
++ err = maps__insert(kmaps, old_node->map);
++ map__put(old_node->map);
++ free(old_node);
++ }
++
++ if (new_map) {
++ if (!err)
++ err = maps__insert(kmaps, new_map);
++ map__put(new_map);
++ }
++ return err;
++}
+diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h
+index 83144e0645ed46..a689149be8c438 100644
+--- a/tools/perf/util/maps.h
++++ b/tools/perf/util/maps.h
+@@ -21,6 +21,16 @@ struct map_rb_node {
+ struct map *map;
+ };
+
++struct map_list_node {
++ struct list_head node;
++ struct map *map;
++};
++
++static inline struct map_list_node *map_list_node__new(void)
++{
++ return malloc(sizeof(struct map_list_node));
++}
++
+ struct map_rb_node *maps__first(struct maps *maps);
+ struct map_rb_node *map_rb_node__next(struct map_rb_node *node);
+ struct map_rb_node *maps__find_node(struct maps *maps, struct map *map);
+@@ -133,4 +143,6 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map);
+
+ void __maps__sort_by_name(struct maps *maps);
+
++void maps__fixup_end(struct maps *maps);
++
+ #endif // __PERF_MAPS_H
+diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
+index 39ffe8ceb38090..3a2e3687878c18 100644
+--- a/tools/perf/util/mem-events.c
++++ b/tools/perf/util/mem-events.c
+@@ -100,11 +100,14 @@ int perf_mem_events__parse(const char *str)
+ return -1;
+ }
+
+-static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
++static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu,
++ struct perf_mem_event *e)
+ {
++ char sysfs_name[100];
+ char path[PATH_MAX];
+ struct stat st;
+
++ scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
+ scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
+ return !stat(path, &st);
+ }
+@@ -120,7 +123,6 @@ int perf_mem_events__init(void)
+
+ for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ struct perf_mem_event *e = perf_mem_events__ptr(j);
+- char sysfs_name[100];
+ struct perf_pmu *pmu = NULL;
+
+ /*
+@@ -136,12 +138,12 @@ int perf_mem_events__init(void)
+ * of core PMU.
+ */
+ while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+- scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
+- e->supported |= perf_mem_event__supported(mnt, sysfs_name);
++ e->supported |= perf_mem_event__supported(mnt, pmu, e);
++ if (e->supported) {
++ found = true;
++ break;
++ }
+ }
+-
+- if (e->supported)
+- found = true;
+ }
+
+ return found ? 0 : -ENOENT;
+@@ -167,13 +169,10 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
+ int idx)
+ {
+ const char *mnt = sysfs__mount();
+- char sysfs_name[100];
+ struct perf_pmu *pmu = NULL;
+
+ while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+- scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
+- pmu->name);
+- if (!perf_mem_event__supported(mnt, sysfs_name)) {
++ if (!perf_mem_event__supported(mnt, pmu, e)) {
+ pr_err("failed: event '%s' not supported\n",
+ perf_mem_events__name(idx, pmu->name));
+ }
+@@ -183,9 +182,9 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
+ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ char **rec_tmp, int *tmp_nr)
+ {
++ const char *mnt = sysfs__mount();
+ int i = *argv_nr, k = 0;
+ struct perf_mem_event *e;
+- struct perf_pmu *pmu;
+
+ for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ e = perf_mem_events__ptr(j);
+@@ -202,6 +201,8 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = perf_mem_events__name(j, NULL);
+ } else {
++ struct perf_pmu *pmu = NULL;
++
+ if (!e->supported) {
+ perf_mem_events__print_unsupport_hybrid(e, j);
+ return -1;
+@@ -210,6 +211,9 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+ const char *s = perf_mem_events__name(j, pmu->name);
+
++ if (!perf_mem_event__supported(mnt, pmu, e))
++ continue;
++
+ rec_argv[i++] = "-e";
+ if (s) {
+ char *copy = strdup(s);
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 6231044a491e2b..bb5faaa25d510f 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -225,7 +225,7 @@ static struct metric *metric__new(const struct pmu_metric *pm,
+
+ m->pmu = pm->pmu ?: "cpu";
+ m->metric_name = pm->metric_name;
+- m->default_metricgroup_name = pm->default_metricgroup_name;
++ m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
+ m->modifier = NULL;
+ if (modifier) {
+ m->modifier = strdup(modifier);
+diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
+index 21bfe7e0d94442..c3a86ef4b7cf31 100644
+--- a/tools/perf/util/parse-events.y
++++ b/tools/perf/util/parse-events.y
+@@ -79,7 +79,7 @@ static void free_list_evsel(struct list_head* list_evsel)
+ %type <str> PE_MODIFIER_BP
+ %type <str> PE_EVENT_NAME
+ %type <str> PE_DRV_CFG_TERM
+-%type <str> name_or_raw name_or_legacy
++%type <str> name_or_raw
+ %destructor { free ($$); } <str>
+ %type <term> event_term
+ %destructor { parse_events_term__delete ($$); } <term>
+@@ -104,6 +104,7 @@ static void free_list_evsel(struct list_head* list_evsel)
+ %type <list_evsel> groups
+ %destructor { free_list_evsel ($$); } <list_evsel>
+ %type <tracepoint_name> tracepoint_name
++%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
+ %type <hardware_term> PE_TERM_HW
+ %destructor { free ($$.str); } <hardware_term>
+
+@@ -679,8 +680,6 @@ event_term
+
+ name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE
+
+-name_or_legacy: PE_NAME | PE_LEGACY_CACHE
+-
+ event_term:
+ PE_RAW
+ {
+@@ -695,7 +694,7 @@ PE_RAW
+ $$ = term;
+ }
+ |
+-name_or_raw '=' name_or_legacy
++name_or_raw '=' name_or_raw
+ {
+ struct parse_events_term *term;
+ int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3);
+@@ -775,7 +774,7 @@ PE_TERM_HW
+ $$ = term;
+ }
+ |
+-PE_TERM '=' name_or_legacy
++PE_TERM '=' name_or_raw
+ {
+ struct parse_events_term *term;
+ int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
+diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
+index 2247991451f3aa..1c1582688f0374 100644
+--- a/tools/perf/util/perf_event_attr_fprintf.c
++++ b/tools/perf/util/perf_event_attr_fprintf.c
+@@ -7,6 +7,8 @@
+ #include <linux/types.h>
+ #include <linux/perf_event.h>
+ #include "util/evsel_fprintf.h"
++#include "util/pmu.h"
++#include "util/pmus.h"
+ #include "trace-event.h"
+
+ struct bit_names {
+@@ -74,9 +76,12 @@ static void __p_read_format(char *buf, size_t size, u64 value)
+ }
+
+ #define ENUM_ID_TO_STR_CASE(x) case x: return (#x);
+-static const char *stringify_perf_type_id(u64 value)
++static const char *stringify_perf_type_id(struct perf_pmu *pmu, u32 type)
+ {
+- switch (value) {
++ if (pmu)
++ return pmu->name;
++
++ switch (type) {
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_HARDWARE)
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_SOFTWARE)
+ ENUM_ID_TO_STR_CASE(PERF_TYPE_TRACEPOINT)
+@@ -174,9 +179,9 @@ do { \
+ #define print_id_unsigned(_s) PRINT_ID(_s, "%"PRIu64)
+ #define print_id_hex(_s) PRINT_ID(_s, "%#"PRIx64)
+
+-static void __p_type_id(char *buf, size_t size, u64 value)
++static void __p_type_id(struct perf_pmu *pmu, char *buf, size_t size, u64 value)
+ {
+- print_id_unsigned(stringify_perf_type_id(value));
++ print_id_unsigned(stringify_perf_type_id(pmu, value));
+ }
+
+ static void __p_config_hw_id(char *buf, size_t size, u64 value)
+@@ -216,8 +221,14 @@ static void __p_config_tracepoint_id(char *buf, size_t size, u64 value)
+ }
+ #endif
+
+-static void __p_config_id(char *buf, size_t size, u32 type, u64 value)
++static void __p_config_id(struct perf_pmu *pmu, char *buf, size_t size, u32 type, u64 value)
+ {
++ const char *name = perf_pmu__name_from_config(pmu, value);
++
++ if (name) {
++ print_id_hex(name);
++ return;
++ }
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ return __p_config_hw_id(buf, size, value);
+@@ -245,8 +256,8 @@ static void __p_config_id(char *buf, size_t size, u32 type, u64 value)
+ #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
+ #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
+ #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
+-#define p_type_id(val) __p_type_id(buf, BUF_SIZE, val)
+-#define p_config_id(val) __p_config_id(buf, BUF_SIZE, attr->type, val)
++#define p_type_id(val) __p_type_id(pmu, buf, BUF_SIZE, val)
++#define p_config_id(val) __p_config_id(pmu, buf, BUF_SIZE, attr->type, val)
+
+ #define PRINT_ATTRn(_n, _f, _p, _a) \
+ do { \
+@@ -261,6 +272,7 @@ do { \
+ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
+ attr__fprintf_f attr__fprintf, void *priv)
+ {
++ struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+ char buf[BUF_SIZE];
+ int ret = 0;
+
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index d515ba8a0e160c..27393e4327922c 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -28,6 +28,7 @@
+ #include "strbuf.h"
+ #include "fncache.h"
+ #include "util/evsel_config.h"
++#include <regex.h>
+
+ struct perf_pmu perf_pmu__fake = {
+ .name = "fake",
+@@ -35,6 +36,18 @@ struct perf_pmu perf_pmu__fake = {
+
+ #define UNIT_MAX_LEN 31 /* max length for event unit name */
+
++enum event_source {
++ /* An event loaded from /sys/devices/<pmu>/events. */
++ EVENT_SRC_SYSFS,
++ /* An event loaded from a CPUID matched json file. */
++ EVENT_SRC_CPU_JSON,
++ /*
++ * An event loaded from a /sys/devices/<pmu>/identifier matched json
++ * file.
++ */
++ EVENT_SRC_SYS_JSON,
++};
++
+ /**
+ * struct perf_pmu_alias - An event either read from sysfs or builtin in
+ * pmu-events.c, created by parsing the pmu-events json files.
+@@ -424,9 +437,30 @@ static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu,
+ {
+ struct perf_pmu_alias *alias;
+
+- if (load && !pmu->sysfs_aliases_loaded)
+- pmu_aliases_parse(pmu);
++ if (load && !pmu->sysfs_aliases_loaded) {
++ bool has_sysfs_event;
++ char event_file_name[FILENAME_MAX + 8];
++
++ /*
++ * Test if alias/event 'name' exists in the PMU's sysfs/events
++ * directory. If not skip parsing the sysfs aliases. Sysfs event
++ * name must be all lower or all upper case.
++ */
++ scnprintf(event_file_name, sizeof(event_file_name), "events/%s", name);
++ for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
++ event_file_name[i] = tolower(event_file_name[i]);
++
++ has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
++ if (!has_sysfs_event) {
++ for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
++ event_file_name[i] = toupper(event_file_name[i]);
++
++ has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
++ }
++ if (has_sysfs_event)
++ pmu_aliases_parse(pmu);
+
++ }
+ list_for_each_entry(alias, &pmu->aliases, list) {
+ if (!strcasecmp(alias->name, name))
+ return alias;
+@@ -499,7 +533,7 @@ static int update_alias(const struct pmu_event *pe,
+
+ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
+ const char *desc, const char *val, FILE *val_fd,
+- const struct pmu_event *pe)
++ const struct pmu_event *pe, enum event_source src)
+ {
+ struct perf_pmu_alias *alias;
+ int ret;
+@@ -551,25 +585,30 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
+ }
+ snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
+ }
+- if (!pe) {
+- /* Update an event from sysfs with json data. */
+- struct update_alias_data data = {
+- .pmu = pmu,
+- .alias = alias,
+- };
+-
++ switch (src) {
++ default:
++ case EVENT_SRC_SYSFS:
+ alias->from_sysfs = true;
+ if (pmu->events_table) {
++ /* Update an event from sysfs with json data. */
++ struct update_alias_data data = {
++ .pmu = pmu,
++ .alias = alias,
++ };
+ if (pmu_events_table__find_event(pmu->events_table, pmu, name,
+ update_alias, &data) == 0)
+- pmu->loaded_json_aliases++;
++ pmu->cpu_json_aliases++;
+ }
+- }
+-
+- if (!pe)
+ pmu->sysfs_aliases++;
+- else
+- pmu->loaded_json_aliases++;
++ break;
++ case EVENT_SRC_CPU_JSON:
++ pmu->cpu_json_aliases++;
++ break;
++ case EVENT_SRC_SYS_JSON:
++ pmu->sys_json_aliases++;
++ break;
++
++ }
+ list_add_tail(&alias->list, &pmu->aliases);
+ return 0;
+ }
+@@ -645,7 +684,8 @@ static int pmu_aliases_parse(struct perf_pmu *pmu)
+ }
+
+ if (perf_pmu__new_alias(pmu, name, /*desc=*/ NULL,
+- /*val=*/ NULL, file, /*pe=*/ NULL) < 0)
++ /*val=*/ NULL, file, /*pe=*/ NULL,
++ EVENT_SRC_SYSFS) < 0)
+ pr_debug("Cannot set up %s\n", name);
+ fclose(file);
+ }
+@@ -874,13 +914,36 @@ static bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
+ return res;
+ }
+
++bool pmu_uncore_identifier_match(const char *compat, const char *id)
++{
++ regex_t re;
++ regmatch_t pmatch[1];
++ int match;
++
++ if (regcomp(&re, compat, REG_EXTENDED) != 0) {
++ /* Warn unable to generate match particular string. */
++ pr_info("Invalid regular expression %s\n", compat);
++ return false;
++ }
++
++ match = !regexec(&re, id, 1, pmatch, 0);
++ if (match) {
++ /* Ensure a full match. */
++ match = pmatch[0].rm_so == 0 && (size_t)pmatch[0].rm_eo == strlen(id);
++ }
++ regfree(&re);
++
++ return match;
++}
++
+ static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *vdata)
+ {
+ struct perf_pmu *pmu = vdata;
+
+- perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, pe);
++ perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL,
++ pe, EVENT_SRC_CPU_JSON);
+ return 0;
+ }
+
+@@ -914,14 +977,15 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
+ if (!pe->compat || !pe->pmu)
+ return 0;
+
+- if (!strcmp(pmu->id, pe->compat) &&
+- pmu_uncore_alias_match(pe->pmu, pmu->name)) {
++ if (pmu_uncore_alias_match(pe->pmu, pmu->name) &&
++ pmu_uncore_identifier_match(pe->compat, pmu->id)) {
+ perf_pmu__new_alias(pmu,
+ pe->name,
+ pe->desc,
+ pe->event,
+ /*val_fd=*/ NULL,
+- pe);
++ pe,
++ EVENT_SRC_SYS_JSON);
+ }
+
+ return 0;
+@@ -992,10 +1056,9 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
+ * type value and format definitions. Load both right
+ * now.
+ */
+- if (pmu_format(pmu, dirfd, name)) {
+- free(pmu);
+- return NULL;
+- }
++ if (pmu_format(pmu, dirfd, name))
++ goto err;
++
+ pmu->is_core = is_pmu_core(name);
+ pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
+
+@@ -1012,6 +1075,12 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
+ pmu->id = pmu_id(name);
+ pmu->max_precise = pmu_max_precise(dirfd, pmu);
+ pmu->events_table = perf_pmu__find_events_table(pmu);
++ /*
++ * Load the sys json events/aliases when loading the PMU as each event
++ * may have a different compat regular expression. We therefore can't
++ * know the number of sys json events/aliases without computing the
++ * regular expressions for them all.
++ */
+ pmu_add_sys_aliases(pmu);
+ list_add_tail(&pmu->list, pmus);
+
+@@ -1605,15 +1674,15 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu)
+ {
+ size_t nr;
+
+- if (!pmu->sysfs_aliases_loaded)
+- pmu_aliases_parse(pmu);
+-
+- nr = pmu->sysfs_aliases;
++ pmu_aliases_parse(pmu);
++ nr = pmu->sysfs_aliases + pmu->sys_json_aliases;;
+
+ if (pmu->cpu_aliases_added)
+- nr += pmu->loaded_json_aliases;
++ nr += pmu->cpu_json_aliases;
+ else if (pmu->events_table)
+- nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->loaded_json_aliases;
++ nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases;
++ else
++ assert(pmu->cpu_json_aliases == 0);
+
+ return pmu->selectable ? nr + 1 : nr;
+ }
+@@ -1666,6 +1735,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
+ struct strbuf sb;
+
+ strbuf_init(&sb, /*hint=*/ 0);
++ pmu_aliases_parse(pmu);
+ pmu_add_cpu_aliases(pmu);
+ list_for_each_entry(event, &pmu->aliases, list) {
+ size_t buf_used;
+@@ -1735,6 +1805,12 @@ bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name)
+
+ bool perf_pmu__is_software(const struct perf_pmu *pmu)
+ {
++ const char *known_sw_pmus[] = {
++ "kprobe",
++ "msr",
++ "uprobe",
++ };
++
+ if (pmu->is_core || pmu->is_uncore || pmu->auxtrace)
+ return false;
+ switch (pmu->type) {
+@@ -1746,7 +1822,11 @@ bool perf_pmu__is_software(const struct perf_pmu *pmu)
+ case PERF_TYPE_BREAKPOINT: return true;
+ default: break;
+ }
+- return !strcmp(pmu->name, "kprobe") || !strcmp(pmu->name, "uprobe");
++ for (size_t i = 0; i < ARRAY_SIZE(known_sw_pmus); i++) {
++ if (!strcmp(pmu->name, known_sw_pmus[i]))
++ return true;
++ }
++ return false;
+ }
+
+ FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
+@@ -2050,19 +2130,21 @@ void perf_pmu__delete(struct perf_pmu *pmu)
+ free(pmu);
+ }
+
+-struct perf_pmu *pmu__find_core_pmu(void)
++const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config)
+ {
+- struct perf_pmu *pmu = NULL;
++ struct perf_pmu_alias *event;
+
+- while ((pmu = perf_pmus__scan_core(pmu))) {
+- /*
+- * The cpumap should cover all CPUs. Otherwise, some CPUs may
+- * not support some events or have different event IDs.
+- */
+- if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
+- return NULL;
++ if (!pmu)
++ return NULL;
++
++ pmu_aliases_parse(pmu);
++ pmu_add_cpu_aliases(pmu);
++ list_for_each_entry(event, &pmu->aliases, list) {
++ struct perf_event_attr attr = {.config = 0,};
++ int ret = perf_pmu__config(pmu, &attr, &event->terms, NULL);
+
+- return pmu;
++ if (ret == 0 && config == attr.config)
++ return event->name;
+ }
+ return NULL;
+ }
+diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
+index 6a4e170c61d6be..aca4238f06a657 100644
+--- a/tools/perf/util/pmu.h
++++ b/tools/perf/util/pmu.h
+@@ -120,8 +120,10 @@ struct perf_pmu {
+ const struct pmu_events_table *events_table;
+ /** @sysfs_aliases: Number of sysfs aliases loaded. */
+ uint32_t sysfs_aliases;
+- /** @sysfs_aliases: Number of json event aliases loaded. */
+- uint32_t loaded_json_aliases;
++ /** @cpu_json_aliases: Number of json event aliases loaded specific to the CPUID. */
++ uint32_t cpu_json_aliases;
++ /** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */
++ uint32_t sys_json_aliases;
+ /** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */
+ bool sysfs_aliases_loaded;
+ /**
+@@ -240,6 +242,7 @@ void pmu_add_cpu_aliases_table(struct perf_pmu *pmu,
+ char *perf_pmu__getcpuid(struct perf_pmu *pmu);
+ const struct pmu_events_table *pmu_events_table__find(void);
+ const struct pmu_metrics_table *pmu_metrics_table__find(void);
++bool pmu_uncore_identifier_match(const char *compat, const char *id);
+
+ int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
+
+@@ -264,6 +267,7 @@ int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename,
+ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name);
+ struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
+ void perf_pmu__delete(struct perf_pmu *pmu);
+-struct perf_pmu *pmu__find_core_pmu(void);
++struct perf_pmu *perf_pmus__find_core_pmu(void);
++const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config);
+
+ #endif /* __PMU_H */
+diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
+index 6631367c756fdd..54a237b2b85386 100644
+--- a/tools/perf/util/pmus.c
++++ b/tools/perf/util/pmus.c
+@@ -10,6 +10,7 @@
+ #include <pthread.h>
+ #include <string.h>
+ #include <unistd.h>
++#include "cpumap.h"
+ #include "debug.h"
+ #include "evsel.h"
+ #include "pmus.h"
+@@ -268,7 +269,7 @@ struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
+ {
+ if (!pmu) {
+ pmu_read_sysfs(/*core_only=*/true);
+- pmu = list_prepare_entry(pmu, &core_pmus, list);
++ return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
+ }
+ list_for_each_entry_continue(pmu, &core_pmus, list)
+ return pmu;
+@@ -469,8 +470,8 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
+ qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
+ for (int j = 0; j < len; j++) {
+ /* Skip duplicates */
+- if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
+- continue;
++ if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
++ goto free;
+
+ print_cb->print_event(print_state,
+ aliases[j].pmu_name,
+@@ -483,6 +484,7 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
+ aliases[j].desc,
+ aliases[j].long_desc,
+ aliases[j].encoding_desc);
++free:
+ zfree(&aliases[j].name);
+ zfree(&aliases[j].alias);
+ zfree(&aliases[j].scale_unit);
+@@ -592,3 +594,20 @@ struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
+ }
+ return pmu;
+ }
++
++struct perf_pmu *perf_pmus__find_core_pmu(void)
++{
++ struct perf_pmu *pmu = NULL;
++
++ while ((pmu = perf_pmus__scan_core(pmu))) {
++ /*
++ * The cpumap should cover all CPUs. Otherwise, some CPUs may
++ * not support some events or have different event IDs.
++ */
++ if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
++ return NULL;
++
++ return pmu;
++ }
++ return NULL;
++}
+diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
+index a7566edc86a3e3..9bee082194d5ea 100644
+--- a/tools/perf/util/print-events.c
++++ b/tools/perf/util/print-events.c
+@@ -232,7 +232,6 @@ void print_sdt_events(const struct print_callbacks *print_cb, void *print_state)
+ bool is_event_supported(u8 type, u64 config)
+ {
+ bool ret = true;
+- int open_return;
+ struct evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = type,
+@@ -246,20 +245,32 @@ bool is_event_supported(u8 type, u64 config)
+
+ evsel = evsel__new(&attr);
+ if (evsel) {
+- open_return = evsel__open(evsel, NULL, tmap);
+- ret = open_return >= 0;
++ ret = evsel__open(evsel, NULL, tmap) >= 0;
+
+- if (open_return == -EACCES) {
++ if (!ret) {
+ /*
+- * This happens if the paranoid value
++ * The event may fail to open if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+- * Re-run with exclude_kernel set; we don't do that
+- * by default as some ARM machines do not support it.
+- *
++ * Re-run with exclude_kernel set; we don't do that by
++ * default as some ARM machines do not support it.
+ */
+ evsel->core.attr.exclude_kernel = 1;
+ ret = evsel__open(evsel, NULL, tmap) >= 0;
+ }
++
++ if (!ret) {
++ /*
++ * The event may fail to open if the PMU requires
++ * exclude_guest to be set (e.g. as the Apple M1 PMU
++ * requires).
++ * Re-run with exclude_guest set; we don't do that by
++ * default as it's equally legitimate for another PMU
++ * driver to require that exclude_guest is clear.
++ */
++ evsel->core.attr.exclude_guest = 1;
++ ret = evsel__open(evsel, NULL, tmap) >= 0;
++ }
++
+ evsel__delete(evsel);
+ }
+
+diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
+index 1a5b7fa459b232..4026cea9fc3a29 100644
+--- a/tools/perf/util/probe-event.c
++++ b/tools/perf/util/probe-event.c
+@@ -11,6 +11,7 @@
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <errno.h>
++#include <libgen.h>
+ #include <stdio.h>
+ #include <unistd.h>
+ #include <stdlib.h>
+diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
+index c29f5f0bb552c9..b01b0e55105638 100644
+--- a/tools/perf/util/python.c
++++ b/tools/perf/util/python.c
+@@ -103,6 +103,16 @@ int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
+ return EOF;
+ }
+
++const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused)
++{
++ return NULL;
++}
++
++struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused)
++{
++ return NULL;
++}
++
+ int perf_pmus__num_core_pmus(void)
+ {
+ return 1;
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 1e9aa8ed15b644..277b2cbd518611 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -115,6 +115,11 @@ static int perf_session__open(struct perf_session *session, int repipe_fd)
+ return -1;
+ }
+
++ if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) {
++ /* Auxiliary events may reference exited threads, hold onto dead ones. */
++ symbol_conf.keep_exited_threads = true;
++ }
++
+ if (perf_data__is_pipe(data))
+ return 0;
+
+@@ -1495,6 +1500,9 @@ static int deliver_sample_group(struct evlist *evlist,
+ int ret = -EINVAL;
+ struct sample_read_value *v = sample->read.group.values;
+
++ if (tool->dont_split_sample_group)
++ return deliver_sample_value(evlist, tool, event, sample, v, machine);
++
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ ret = deliver_sample_value(evlist, tool, event, sample, v,
+ machine);
+diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
+index 79d5e2955f85d8..e837132d5031bd 100644
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -17,7 +17,7 @@ src_feature_tests = getenv('srctree') + '/tools/build/feature'
+
+ def clang_has_option(option):
+ cc_output = Popen([cc, cc_options + option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
+- return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
++ return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o) or (b"unknown warning option" in o))] == [ ]
+
+ if cc_is_clang:
+ from sysconfig import get_config_vars
+@@ -63,6 +63,8 @@ cflags = getenv('CFLAGS', '').split()
+ cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls', '-DPYTHON_PERF' ]
+ if cc_is_clang:
+ cflags += ["-Wno-unused-command-line-argument" ]
++ if clang_has_option("-Wno-cast-function-type-mismatch"):
++ cflags += ["-Wno-cast-function-type-mismatch" ]
+ else:
+ cflags += ['-Wno-cast-function-type' ]
+
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index 6aa1c7f2b4448b..b80349ca219972 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -332,7 +332,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+ * comparing symbol address alone is not enough since it's a
+ * relative address within a dso.
+ */
+- if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
++ if (!hists__has(left->hists, dso)) {
+ ret = sort__dso_cmp(left, right);
+ if (ret != 0)
+ return ret;
+@@ -583,21 +583,21 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
+ {
+
+ struct symbol *sym = he->ms.sym;
+- struct annotation *notes;
++ struct annotated_branch *branch;
+ double ipc = 0.0, coverage = 0.0;
+ char tmp[64];
+
+ if (!sym)
+ return repsep_snprintf(bf, size, "%-*s", width, "-");
+
+- notes = symbol__annotation(sym);
++ branch = symbol__annotation(sym)->branch;
+
+- if (notes->hit_cycles)
+- ipc = notes->hit_insn / ((double)notes->hit_cycles);
++ if (branch && branch->hit_cycles)
++ ipc = branch->hit_insn / ((double)branch->hit_cycles);
+
+- if (notes->total_insn) {
+- coverage = notes->cover_insn * 100.0 /
+- ((double)notes->total_insn);
++ if (branch && branch->total_insn) {
++ coverage = branch->cover_insn * 100.0 /
++ ((double)branch->total_insn);
+ }
+
+ snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
+diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
+index 034b496df29780..7addc34afcf5d6 100644
+--- a/tools/perf/util/srcline.c
++++ b/tools/perf/util/srcline.c
+@@ -399,6 +399,8 @@ static void addr2line_subprocess_cleanup(struct child_process *a2l)
+ kill(a2l->pid, SIGKILL);
+ finish_command(a2l); /* ignore result, we don't care */
+ a2l->pid = -1;
++ close(a2l->in);
++ close(a2l->out);
+ }
+
+ free(a2l);
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index afe6db8e7bf4fb..f98ade7f9fba4f 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -560,7 +560,7 @@ static void print_metric_only(struct perf_stat_config *config,
+ if (color)
+ mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+
+- color_snprintf(str, sizeof(str), color ?: "", fmt, val);
++ color_snprintf(str, sizeof(str), color ?: "", fmt ?: "", val);
+ fprintf(out, "%*s ", mlen, str);
+ os->first = false;
+ }
+@@ -1207,6 +1207,10 @@ static void print_metric_headers(struct perf_stat_config *config,
+
+ /* Print metrics headers only */
+ evlist__for_each_entry(evlist, counter) {
++ if (!config->iostat_run &&
++ config->aggr_mode != AGGR_NONE && counter->metric_leader != counter)
++ continue;
++
+ os.evsel = counter;
+
+ perf_stat__print_shadow_stats(config, counter, 0,
+diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
+index 1c5c3eeba4cfb2..2affa4d45aa21c 100644
+--- a/tools/perf/util/stat-shadow.c
++++ b/tools/perf/util/stat-shadow.c
+@@ -176,6 +176,13 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type
+ if (type != evsel__stat_type(cur))
+ continue;
+
++ /*
++ * Except the SW CLOCK events,
++ * ignore if not the PMU we're looking for.
++ */
++ if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu))
++ continue;
++
+ aggr = &cur->stats->aggr[aggr_idx];
+ if (type == STAT_NSECS)
+ return aggr->counts.val;
+@@ -264,7 +271,7 @@ static void print_ll_miss(struct perf_stat_config *config,
+ static const double color_ratios[3] = {20.0, 10.0, 5.0};
+
+ print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, color_ratios,
+- "of all L1-icache accesses");
++ "of all LL-cache accesses");
+ }
+
+ static void print_dtlb_miss(struct perf_stat_config *config,
+@@ -414,12 +421,7 @@ static int prepare_metric(struct evsel **metric_events,
+ val = NAN;
+ source_count = 0;
+ } else {
+- /*
+- * If an event was scaled during stat gathering,
+- * reverse the scale before computing the
+- * metric.
+- */
+- val = aggr->counts.val * (1.0 / metric_events[i]->scale);
++ val = aggr->counts.val;
+ source_count = evsel__source_count(metric_events[i]);
+ }
+ }
+diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
+index cf05b0b56c57bc..116a642ad99d12 100644
+--- a/tools/perf/util/string.c
++++ b/tools/perf/util/string.c
+@@ -301,3 +301,51 @@ unsigned int hex(char c)
+ return c - 'a' + 10;
+ return c - 'A' + 10;
+ }
++
++/*
++ * Replace all occurrences of character 'needle' in string 'haystack' with
++ * string 'replace'
++ *
++ * The new string could be longer so a new string is returned which must be
++ * freed.
++ */
++char *strreplace_chars(char needle, const char *haystack, const char *replace)
++{
++ int replace_len = strlen(replace);
++ char *new_s, *to;
++ const char *loc = strchr(haystack, needle);
++ const char *from = haystack;
++ int num = 0;
++
++ /* Count occurrences */
++ while (loc) {
++ loc = strchr(loc + 1, needle);
++ num++;
++ }
++
++ /* Allocate enough space for replacements and reset first location */
++ new_s = malloc(strlen(haystack) + (num * (replace_len - 1) + 1));
++ if (!new_s)
++ return NULL;
++ loc = strchr(haystack, needle);
++ to = new_s;
++
++ while (loc) {
++ /* Copy original string up to found char and update positions */
++ memcpy(to, from, 1 + loc - from);
++ to += loc - from;
++ from = loc + 1;
++
++ /* Copy replacement string and update positions */
++ memcpy(to, replace, replace_len);
++ to += replace_len;
++
++ /* needle next occurrence or end of string */
++ loc = strchr(from, needle);
++ }
++
++ /* Copy any remaining chars + null */
++ strcpy(to, from);
++
++ return new_s;
++}
+diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
+index 56c30fef9682ff..52cb8ba057c779 100644
+--- a/tools/perf/util/string2.h
++++ b/tools/perf/util/string2.h
+@@ -39,5 +39,6 @@ char *strpbrk_esc(char *str, const char *stopset);
+ char *strdup_esc(const char *str);
+
+ unsigned int hex(char c);
++char *strreplace_chars(char needle, const char *haystack, const char *replace);
+
+ #endif /* PERF_STRING_H */
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 3f36675b7c8ff6..ea24f21aafc3e3 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -48,11 +48,6 @@ static bool symbol__is_idle(const char *name);
+ int vmlinux_path__nr_entries;
+ char **vmlinux_path;
+
+-struct map_list_node {
+- struct list_head node;
+- struct map *map;
+-};
+-
+ struct symbol_conf symbol_conf = {
+ .nanosecs = false,
+ .use_modules = true,
+@@ -90,11 +85,6 @@ static enum dso_binary_type binary_type_symtab[] = {
+
+ #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
+
+-static struct map_list_node *map_list_node__new(void)
+-{
+- return malloc(sizeof(struct map_list_node));
+-}
+-
+ static bool symbol_type__filter(char symbol_type)
+ {
+ symbol_type = toupper(symbol_type);
+@@ -271,29 +261,6 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
+ curr->end = roundup(curr->start, 4096) + 4096;
+ }
+
+-void maps__fixup_end(struct maps *maps)
+-{
+- struct map_rb_node *prev = NULL, *curr;
+-
+- down_write(maps__lock(maps));
+-
+- maps__for_each_entry(maps, curr) {
+- if (prev != NULL && !map__end(prev->map))
+- map__set_end(prev->map, map__start(curr->map));
+-
+- prev = curr;
+- }
+-
+- /*
+- * We still haven't the actual symbols, so guess the
+- * last map final address.
+- */
+- if (curr && !map__end(curr->map))
+- map__set_end(curr->map, ~0ULL);
+-
+- up_write(maps__lock(maps));
+-}
+-
+ struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
+ {
+ size_t namelen = strlen(name) + 1;
+@@ -1271,103 +1238,6 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
+ return 0;
+ }
+
+-/*
+- * Merges map into maps by splitting the new map within the existing map
+- * regions.
+- */
+-int maps__merge_in(struct maps *kmaps, struct map *new_map)
+-{
+- struct map_rb_node *rb_node;
+- LIST_HEAD(merged);
+- int err = 0;
+-
+- maps__for_each_entry(kmaps, rb_node) {
+- struct map *old_map = rb_node->map;
+-
+- /* no overload with this one */
+- if (map__end(new_map) < map__start(old_map) ||
+- map__start(new_map) >= map__end(old_map))
+- continue;
+-
+- if (map__start(new_map) < map__start(old_map)) {
+- /*
+- * |new......
+- * |old....
+- */
+- if (map__end(new_map) < map__end(old_map)) {
+- /*
+- * |new......| -> |new..|
+- * |old....| -> |old....|
+- */
+- map__set_end(new_map, map__start(old_map));
+- } else {
+- /*
+- * |new.............| -> |new..| |new..|
+- * |old....| -> |old....|
+- */
+- struct map_list_node *m = map_list_node__new();
+-
+- if (!m) {
+- err = -ENOMEM;
+- goto out;
+- }
+-
+- m->map = map__clone(new_map);
+- if (!m->map) {
+- free(m);
+- err = -ENOMEM;
+- goto out;
+- }
+-
+- map__set_end(m->map, map__start(old_map));
+- list_add_tail(&m->node, &merged);
+- map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
+- map__set_start(new_map, map__end(old_map));
+- }
+- } else {
+- /*
+- * |new......
+- * |old....
+- */
+- if (map__end(new_map) < map__end(old_map)) {
+- /*
+- * |new..| -> x
+- * |old.........| -> |old.........|
+- */
+- map__put(new_map);
+- new_map = NULL;
+- break;
+- } else {
+- /*
+- * |new......| -> |new...|
+- * |old....| -> |old....|
+- */
+- map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
+- map__set_start(new_map, map__end(old_map));
+- }
+- }
+- }
+-
+-out:
+- while (!list_empty(&merged)) {
+- struct map_list_node *old_node;
+-
+- old_node = list_entry(merged.next, struct map_list_node, node);
+- list_del_init(&old_node->node);
+- if (!err)
+- err = maps__insert(kmaps, old_node->map);
+- map__put(old_node->map);
+- free(old_node);
+- }
+-
+- if (new_map) {
+- if (!err)
+- err = maps__insert(kmaps, new_map);
+- map__put(new_map);
+- }
+- return err;
+-}
+-
+ static int dso__load_kcore(struct dso *dso, struct map *map,
+ const char *kallsyms_filename)
+ {
+@@ -2065,124 +1935,10 @@ int dso__load(struct dso *dso, struct map *map)
+ return ret;
+ }
+
+-static int map__strcmp(const void *a, const void *b)
+-{
+- const struct map *map_a = *(const struct map **)a;
+- const struct map *map_b = *(const struct map **)b;
+- const struct dso *dso_a = map__dso(map_a);
+- const struct dso *dso_b = map__dso(map_b);
+- int ret = strcmp(dso_a->short_name, dso_b->short_name);
+-
+- if (ret == 0 && map_a != map_b) {
+- /*
+- * Ensure distinct but name equal maps have an order in part to
+- * aid reference counting.
+- */
+- ret = (int)map__start(map_a) - (int)map__start(map_b);
+- if (ret == 0)
+- ret = (int)((intptr_t)map_a - (intptr_t)map_b);
+- }
+-
+- return ret;
+-}
+-
+-static int map__strcmp_name(const void *name, const void *b)
+-{
+- const struct dso *dso = map__dso(*(const struct map **)b);
+-
+- return strcmp(name, dso->short_name);
+-}
+-
+-void __maps__sort_by_name(struct maps *maps)
+-{
+- qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
+-}
+-
+-static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
+-{
+- struct map_rb_node *rb_node;
+- struct map **maps_by_name = realloc(maps__maps_by_name(maps),
+- maps__nr_maps(maps) * sizeof(struct map *));
+- int i = 0;
+-
+- if (maps_by_name == NULL)
+- return -1;
+-
+- up_read(maps__lock(maps));
+- down_write(maps__lock(maps));
+-
+- RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
+- RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
+-
+- maps__for_each_entry(maps, rb_node)
+- maps_by_name[i++] = map__get(rb_node->map);
+-
+- __maps__sort_by_name(maps);
+-
+- up_write(maps__lock(maps));
+- down_read(maps__lock(maps));
+-
+- return 0;
+-}
+-
+-static struct map *__maps__find_by_name(struct maps *maps, const char *name)
+-{
+- struct map **mapp;
+-
+- if (maps__maps_by_name(maps) == NULL &&
+- map__groups__sort_by_name_from_rbtree(maps))
+- return NULL;
+-
+- mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
+- sizeof(*mapp), map__strcmp_name);
+- if (mapp)
+- return *mapp;
+- return NULL;
+-}
+-
+-struct map *maps__find_by_name(struct maps *maps, const char *name)
+-{
+- struct map_rb_node *rb_node;
+- struct map *map;
+-
+- down_read(maps__lock(maps));
+-
+-
+- if (RC_CHK_ACCESS(maps)->last_search_by_name) {
+- const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
+-
+- if (strcmp(dso->short_name, name) == 0) {
+- map = RC_CHK_ACCESS(maps)->last_search_by_name;
+- goto out_unlock;
+- }
+- }
+- /*
+- * If we have maps->maps_by_name, then the name isn't in the rbtree,
+- * as maps->maps_by_name mirrors the rbtree when lookups by name are
+- * made.
+- */
+- map = __maps__find_by_name(maps, name);
+- if (map || maps__maps_by_name(maps) != NULL)
+- goto out_unlock;
+-
+- /* Fallback to traversing the rbtree... */
+- maps__for_each_entry(maps, rb_node) {
+- struct dso *dso;
+-
+- map = rb_node->map;
+- dso = map__dso(map);
+- if (strcmp(dso->short_name, name) == 0) {
+- RC_CHK_ACCESS(maps)->last_search_by_name = map;
+- goto out_unlock;
+- }
+- }
+- map = NULL;
+-
+-out_unlock:
+- up_read(maps__lock(maps));
+- return map;
+-}
+-
++/*
++ * Always takes ownership of vmlinux when vmlinux_allocated == true, even if
++ * it returns an error.
++ */
+ int dso__load_vmlinux(struct dso *dso, struct map *map,
+ const char *vmlinux, bool vmlinux_allocated)
+ {
+@@ -2201,8 +1957,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
+ else
+ symtab_type = DSO_BINARY_TYPE__VMLINUX;
+
+- if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
++ if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) {
++ if (vmlinux_allocated)
++ free((char *) vmlinux);
+ return -1;
++ }
+
+ /*
+ * dso__load_sym() may copy 'dso' which will result in the copies having
+@@ -2245,7 +2004,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map)
+ err = dso__load_vmlinux(dso, map, filename, true);
+ if (err > 0)
+ goto out;
+- free(filename);
+ }
+ out:
+ return err;
+@@ -2397,7 +2155,6 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map)
+ err = dso__load_vmlinux(dso, map, filename, true);
+ if (err > 0)
+ return err;
+- free(filename);
+ }
+
+ if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
+diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
+index af87c46b3f89e5..071837ddce2ac7 100644
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -189,7 +189,6 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
+ void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
+ void symbols__fixup_duplicate(struct rb_root_cached *symbols);
+ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms);
+-void maps__fixup_end(struct maps *maps);
+
+ typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
+ int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
+diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
+index 0b589570d1d095..6040286e07a652 100644
+--- a/tools/perf/util/symbol_conf.h
++++ b/tools/perf/util/symbol_conf.h
+@@ -42,7 +42,9 @@ struct symbol_conf {
+ inline_name,
+ disable_add2line_warn,
+ buildid_mmap2,
+- guest_code;
++ guest_code,
++ lazy_load_kernel_maps,
++ keep_exited_threads;
+ const char *vmlinux_name,
+ *kallsyms_name,
+ *source_prefix,
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index fe5e6991ae4b49..61e9f449c72581 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -39,12 +39,13 @@ int thread__init_maps(struct thread *thread, struct machine *machine)
+
+ struct thread *thread__new(pid_t pid, pid_t tid)
+ {
+- char *comm_str;
+- struct comm *comm;
+ RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
+ struct thread *thread;
+
+ if (ADD_RC_CHK(thread, _thread) != NULL) {
++ struct comm *comm;
++ char comm_str[32];
++
+ thread__set_pid(thread, pid);
+ thread__set_tid(thread, tid);
+ thread__set_ppid(thread, -1);
+@@ -56,13 +57,8 @@ struct thread *thread__new(pid_t pid, pid_t tid)
+ init_rwsem(thread__namespaces_lock(thread));
+ init_rwsem(thread__comm_lock(thread));
+
+- comm_str = malloc(32);
+- if (!comm_str)
+- goto err_thread;
+-
+- snprintf(comm_str, 32, ":%d", tid);
++ snprintf(comm_str, sizeof(comm_str), ":%d", tid);
+ comm = comm__new(comm_str, 0, false);
+- free(comm_str);
+ if (!comm)
+ goto err_thread;
+
+@@ -76,7 +72,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
+ return thread;
+
+ err_thread:
+- free(thread);
++ thread__delete(thread);
+ return NULL;
+ }
+
+diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
+index e79225a0ea46b7..0df775b5c1105d 100644
+--- a/tools/perf/util/thread.h
++++ b/tools/perf/util/thread.h
+@@ -36,13 +36,22 @@ struct thread_rb_node {
+ };
+
+ DECLARE_RC_STRUCT(thread) {
++ /** @maps: mmaps associated with this thread. */
+ struct maps *maps;
+ pid_t pid_; /* Not all tools update this */
++ /** @tid: thread ID number unique to a machine. */
+ pid_t tid;
++ /** @ppid: parent process of the process this thread belongs to. */
+ pid_t ppid;
+ int cpu;
+ int guest_cpu; /* For QEMU thread */
+ refcount_t refcnt;
++ /**
++ * @exited: Has the thread had an exit event. Such threads are usually
++ * removed from the machine's threads but some events/tools require
++ * access to dead threads.
++ */
++ bool exited;
+ bool comm_set;
+ int comm_len;
+ struct list_head namespaces_list;
+@@ -189,6 +198,11 @@ static inline refcount_t *thread__refcnt(struct thread *thread)
+ return &RC_CHK_ACCESS(thread)->refcnt;
+ }
+
++static inline void thread__set_exited(struct thread *thread, bool exited)
++{
++ RC_CHK_ACCESS(thread)->exited = exited;
++}
++
+ static inline bool thread__comm_set(const struct thread *thread)
+ {
+ return RC_CHK_ACCESS(thread)->comm_set;
+diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
+index e848579e61a863..ea3b431b978301 100644
+--- a/tools/perf/util/thread_map.c
++++ b/tools/perf/util/thread_map.c
+@@ -280,13 +280,13 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
+ threads->nr = ntasks;
+ }
+ out:
++ strlist__delete(slist);
+ if (threads)
+ refcount_set(&threads->refcnt, 1);
+ return threads;
+
+ out_free_threads:
+ zfree(&threads);
+- strlist__delete(slist);
+ goto out;
+ }
+
+diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
+index 30244392168163..1b91ccd4d52348 100644
+--- a/tools/perf/util/time-utils.c
++++ b/tools/perf/util/time-utils.c
+@@ -20,7 +20,7 @@ int parse_nsec_time(const char *str, u64 *ptime)
+ u64 time_sec, time_nsec;
+ char *end;
+
+- time_sec = strtoul(str, &end, 10);
++ time_sec = strtoull(str, &end, 10);
+ if (*end != '.' && *end != '\0')
+ return -1;
+
+@@ -38,7 +38,7 @@ int parse_nsec_time(const char *str, u64 *ptime)
+ for (i = strlen(nsec_buf); i < 9; i++)
+ nsec_buf[i] = '0';
+
+- time_nsec = strtoul(nsec_buf, &end, 10);
++ time_nsec = strtoull(nsec_buf, &end, 10);
+ if (*end != '\0')
+ return -1;
+ } else
+diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
+index c957fb849ac633..62bbc9cec151bb 100644
+--- a/tools/perf/util/tool.h
++++ b/tools/perf/util/tool.h
+@@ -85,6 +85,7 @@ struct perf_tool {
+ bool namespace_events;
+ bool cgroup_events;
+ bool no_warn;
++ bool dont_split_sample_group;
+ enum show_feature_header show_feat_hdr;
+ };
+
+diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
+index a8b0d79bd96cfa..4c5588dbb1317d 100644
+--- a/tools/perf/util/top.h
++++ b/tools/perf/util/top.h
+@@ -21,7 +21,6 @@ struct perf_top {
+ struct perf_tool tool;
+ struct evlist *evlist, *sb_evlist;
+ struct record_opts record_opts;
+- struct annotation_options annotation_opts;
+ struct evswitch evswitch;
+ /*
+ * Symbols will be added here in perf_event__process_sample and will
+diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
+index 8554db3fc0d7c9..6013335a8daea5 100644
+--- a/tools/perf/util/unwind-libdw.c
++++ b/tools/perf/util/unwind-libdw.c
+@@ -46,6 +46,7 @@ static int __report_module(struct addr_location *al, u64 ip,
+ {
+ Dwfl_Module *mod;
+ struct dso *dso = NULL;
++ Dwarf_Addr base;
+ /*
+ * Some callers will use al->sym, so we can't just use the
+ * cheaper thread__find_map() here.
+@@ -58,13 +59,25 @@ static int __report_module(struct addr_location *al, u64 ip,
+ if (!dso)
+ return 0;
+
++ /*
++ * The generated JIT DSO files only map the code segment without
++ * ELF headers. Since JIT codes used to be packed in a memory
++ * segment, calculating the base address using pgoff falls into
++ * a different code in another DSO. So just use the map->start
++ * directly to pick the correct one.
++ */
++ if (!strncmp(dso->long_name, "/tmp/jitted-", 12))
++ base = map__start(al->map);
++ else
++ base = map__start(al->map) - map__pgoff(al->map);
++
+ mod = dwfl_addrmodule(ui->dwfl, ip);
+ if (mod) {
+ Dwarf_Addr s;
+
+ dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+- if (s != map__start(al->map) - map__pgoff(al->map))
+- mod = 0;
++ if (s != base)
++ mod = NULL;
+ }
+
+ if (!mod) {
+@@ -72,14 +85,14 @@ static int __report_module(struct addr_location *al, u64 ip,
+
+ __symbol__join_symfs(filename, sizeof(filename), dso->long_name);
+ mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
+- map__start(al->map) - map__pgoff(al->map), false);
++ base, false);
+ }
+ if (!mod) {
+ char filename[PATH_MAX];
+
+ if (dso__build_id_filename(dso, filename, sizeof(filename), false))
+ mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
+- map__start(al->map) - map__pgoff(al->map), false);
++ base, false);
+ }
+
+ if (mod) {
+diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
+index c0641882fd2fd7..5e5c3395a49989 100644
+--- a/tools/perf/util/unwind-libunwind-local.c
++++ b/tools/perf/util/unwind-libunwind-local.c
+@@ -327,7 +327,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
+
+ maps__for_each_entry(thread__maps(ui->thread), map_node) {
+ struct map *map = map_node->map;
+- u64 start = map__start(map);
++ u64 start = map__start(map) - map__pgoff(map);
+
+ if (map__dso(map) == dso && start < base_addr)
+ base_addr = start;
+diff --git a/tools/power/cpupower/lib/powercap.c b/tools/power/cpupower/lib/powercap.c
+index a7a59c6bacda81..94a0c69e55ef5e 100644
+--- a/tools/power/cpupower/lib/powercap.c
++++ b/tools/power/cpupower/lib/powercap.c
+@@ -77,6 +77,14 @@ int powercap_get_enabled(int *mode)
+ return sysfs_get_enabled(path, mode);
+ }
+
++/*
++ * TODO: implement function. Returns dummy 0 for now.
++ */
++int powercap_set_enabled(int mode)
++{
++ return 0;
++}
++
+ /*
+ * Hardcoded, because rapl is the only powercap implementation
+ - * this needs to get more generic if more powercap implementations
+diff --git a/tools/power/cpupower/man/cpupower-powercap-info.1 b/tools/power/cpupower/man/cpupower-powercap-info.1
+index df3087000efb82..145d6f06fa72df 100644
+--- a/tools/power/cpupower/man/cpupower-powercap-info.1
++++ b/tools/power/cpupower/man/cpupower-powercap-info.1
+@@ -17,7 +17,7 @@ settings of all cores, see cpupower(1) how to choose specific cores.
+ .SH "DOCUMENTATION"
+
+ kernel sources:
+-Documentation/power/powercap/powercap.txt
++Documentation/power/powercap/powercap.rst
+
+
+ .SH "SEE ALSO"
+diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
+index c519cc89c97f42..0a56e22240fc8b 100644
+--- a/tools/power/cpupower/utils/helpers/amd.c
++++ b/tools/power/cpupower/utils/helpers/amd.c
+@@ -41,6 +41,16 @@ union core_pstate {
+ unsigned res1:31;
+ unsigned en:1;
+ } pstatedef;
++ /* since fam 1Ah: */
++ struct {
++ unsigned fid:12;
++ unsigned res1:2;
++ unsigned vid:8;
++ unsigned iddval:8;
++ unsigned idddiv:2;
++ unsigned res2:31;
++ unsigned en:1;
++ } pstatedef2;
+ unsigned long long val;
+ };
+
+@@ -48,6 +58,10 @@ static int get_did(union core_pstate pstate)
+ {
+ int t;
+
++ /* Fam 1Ah onward do not use did */
++ if (cpupower_cpu_info.family >= 0x1A)
++ return 0;
++
+ if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF)
+ t = pstate.pstatedef.did;
+ else if (cpupower_cpu_info.family == 0x12)
+@@ -61,12 +75,18 @@ static int get_did(union core_pstate pstate)
+ static int get_cof(union core_pstate pstate)
+ {
+ int t;
+- int fid, did, cof;
++ int fid, did, cof = 0;
+
+ did = get_did(pstate);
+ if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) {
+- fid = pstate.pstatedef.fid;
+- cof = 200 * fid / did;
++ if (cpupower_cpu_info.family >= 0x1A) {
++ fid = pstate.pstatedef2.fid;
++ if (fid > 0x0f)
++ cof = (fid * 5);
++ } else {
++ fid = pstate.pstatedef.fid;
++ cof = 200 * fid / did;
++ }
+ } else {
+ t = 0x10;
+ fid = pstate.pstate.fid;
+diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
+index 4a356a70678554..40ad221e88811b 100755
+--- a/tools/power/pm-graph/sleepgraph.py
++++ b/tools/power/pm-graph/sleepgraph.py
+@@ -4151,7 +4151,7 @@ def parseKernelLog(data):
+ elif(re.match('Enabling non-boot CPUs .*', msg)):
+ # start of first cpu resume
+ cpu_start = ktime
+- elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)) \
++ elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
+ or re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
+ # end of a cpu suspend, start of the next
+ m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
+diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
+index 8f08c3fd498d5b..1ba6340d3b3da5 100644
+--- a/tools/power/x86/turbostat/turbostat.8
++++ b/tools/power/x86/turbostat/turbostat.8
+@@ -370,7 +370,7 @@ below the processor's base frequency.
+
+ Busy% = MPERF_delta/TSC_delta
+
+-Bzy_MHz = TSC_delta/APERF_delta/MPERF_delta/measurement_interval
++Bzy_MHz = TSC_delta*APERF_delta/MPERF_delta/measurement_interval
+
+ Note that these calculations depend on TSC_delta, so they
+ are not reliable during intervals when TSC_MHz is not running at the base frequency.
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 9a10512e340787..6fad5fbfb0f967 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -53,6 +53,8 @@
+ #define NAME_BYTES 20
+ #define PATH_BYTES 128
+
++#define MAX_NOFILE 0x8000
++
+ enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE };
+ enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC };
+ enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT };
+@@ -564,6 +566,7 @@ struct topo_params {
+ int num_cpus;
+ int num_cores;
+ int max_cpu_num;
++ int max_die_id;
+ int max_node_num;
+ int nodes_per_pkg;
+ int cores_per_node;
+@@ -1811,9 +1814,10 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
+
+ for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
+- if (mp->format == FORMAT_RAW)
+- continue;
+- average.packages.counter[i] += p->counter[i];
++ if ((mp->format == FORMAT_RAW) && (topo.num_packages == 0))
++ average.packages.counter[i] = p->counter[i];
++ else
++ average.packages.counter[i] += p->counter[i];
+ }
+ return 0;
+ }
+@@ -1966,7 +1970,7 @@ unsigned long long get_uncore_mhz(int package, int die)
+ {
+ char path[128];
+
+- sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/current_freq_khz", package,
++ sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d/current_freq_khz", package,
+ die);
+
+ return (snapshot_sysfs_counter(path) / 1000);
+@@ -2180,7 +2184,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
+ if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+- } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
++ } else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) {
+ if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+ }
+@@ -5476,7 +5480,8 @@ void print_dev_latency(void)
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+- warnx("capget(CAP_SYS_ADMIN) failed, try \"# setcap cap_sys_admin=ep %s\"", progname);
++ if (debug)
++ warnx("Read %s failed", path);
+ return;
+ }
+
+@@ -5515,6 +5520,7 @@ void process_cpuid()
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int fms, family, model, stepping, ecx_flags, edx_flags;
+ unsigned long long ucode_patch = 0;
++ bool ucode_patch_valid = false;
+
+ eax = ebx = ecx = edx = 0;
+
+@@ -5544,6 +5550,8 @@ void process_cpuid()
+
+ if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch))
+ warnx("get_msr(UCODE)");
++ else
++ ucode_patch_valid = true;
+
+ /*
+ * check max extended function levels of CPUID.
+@@ -5554,9 +5562,12 @@ void process_cpuid()
+ __cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
+
+ if (!quiet) {
+- fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d) microcode 0x%x\n",
+- family, model, stepping, family, model, stepping,
+- (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF));
++ fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d)",
++ family, model, stepping, family, model, stepping);
++ if (ucode_patch_valid)
++ fprintf(outf, " microcode 0x%x", (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF));
++ fputc('\n', outf);
++
+ fprintf(outf, "CPUID(0x80000000): max_extended_levels: 0x%x\n", max_extended_level);
+ fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
+ ecx_flags & (1 << 0) ? "SSE3" : "-",
+@@ -5790,6 +5801,7 @@ void process_cpuid()
+ rapl_probe(family, model);
+ perf_limit_reasons_probe(family, model);
+ automatic_cstate_conversion_probe(family, model);
++ prewake_cstate_probe(family, model);
+
+ check_tcc_offset(model_orig);
+
+@@ -5860,7 +5872,6 @@ void topology_probe()
+ int i;
+ int max_core_id = 0;
+ int max_package_id = 0;
+- int max_die_id = 0;
+ int max_siblings = 0;
+
+ /* Initialize num_cpus, max_cpu_num */
+@@ -5929,8 +5940,8 @@ void topology_probe()
+
+ /* get die information */
+ cpus[i].die_id = get_die_id(i);
+- if (cpus[i].die_id > max_die_id)
+- max_die_id = cpus[i].die_id;
++ if (cpus[i].die_id > topo.max_die_id)
++ topo.max_die_id = cpus[i].die_id;
+
+ /* get numa node information */
+ cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
+@@ -5956,9 +5967,9 @@ void topology_probe()
+ if (!summary_only && topo.cores_per_node > 1)
+ BIC_PRESENT(BIC_Core);
+
+- topo.num_die = max_die_id + 1;
++ topo.num_die = topo.max_die_id + 1;
+ if (debug > 1)
+- fprintf(outf, "max_die_id %d, sizing for %d die\n", max_die_id, topo.num_die);
++ fprintf(outf, "max_die_id %d, sizing for %d die\n", topo.max_die_id, topo.num_die);
+ if (!summary_only && topo.num_die > 1)
+ BIC_PRESENT(BIC_Die);
+
+@@ -6717,6 +6728,22 @@ void cmdline(int argc, char **argv)
+ }
+ }
+
++void set_rlimit(void)
++{
++ struct rlimit limit;
++
++ if (getrlimit(RLIMIT_NOFILE, &limit) < 0)
++ err(1, "Failed to get rlimit");
++
++ if (limit.rlim_max < MAX_NOFILE)
++ limit.rlim_max = MAX_NOFILE;
++ if (limit.rlim_cur < MAX_NOFILE)
++ limit.rlim_cur = MAX_NOFILE;
++
++ if (setrlimit(RLIMIT_NOFILE, &limit) < 0)
++ err(1, "Failed to set rlimit");
++}
++
+ int main(int argc, char **argv)
+ {
+ outf = stderr;
+@@ -6729,6 +6756,9 @@ int main(int argc, char **argv)
+
+ probe_sysfs();
+
++ if (!getuid())
++ set_rlimit();
++
+ turbostat_init();
+
+ msr_sum_record();
+diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+index 5fd9e594079cfd..ebda9c366b2ba3 100644
+--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
++++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+@@ -1241,6 +1241,7 @@ unsigned int get_pkg_num(int cpu)
+ retval = fscanf(fp, "%d\n", &pkg);
+ if (retval != 1)
+ errx(1, "%s: failed to parse", pathname);
++ fclose(fp);
+ return pkg;
+ }
+
+diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
+index 90f3c9802ffb80..95dc58b94178bf 100644
+--- a/tools/testing/cxl/Kbuild
++++ b/tools/testing/cxl/Kbuild
+@@ -62,5 +62,6 @@ cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
+ cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
+ cxl_core-y += config_check.o
+ cxl_core-y += cxl_core_test.o
++cxl_core-y += cxl_core_exports.o
+
+ obj-m += test/
+diff --git a/tools/testing/cxl/cxl_core_exports.c b/tools/testing/cxl/cxl_core_exports.c
+new file mode 100644
+index 00000000000000..077e6883921df2
+--- /dev/null
++++ b/tools/testing/cxl/cxl_core_exports.c
+@@ -0,0 +1,7 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
++
++#include "cxl.h"
++
++/* Exporting of cxl_core symbols that are only used by cxl_test */
++EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, CXL);
+diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
+index fb6ab9cef84f77..8251718eaf3a8e 100644
+--- a/tools/testing/cxl/test/cxl.c
++++ b/tools/testing/cxl/test/cxl.c
+@@ -624,11 +624,15 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
+ {
+ struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
++ struct device *dev = &port->dev;
+
+ if (!cxlhdm)
+ return ERR_PTR(-ENOMEM);
+
+ cxlhdm->port = port;
++ cxlhdm->interleave_mask = ~0U;
++ cxlhdm->iw_cap_mask = ~0UL;
++ dev_set_drvdata(dev, cxlhdm);
+ return cxlhdm;
+ }
+
+@@ -669,10 +673,11 @@ static int mock_decoder_commit(struct cxl_decoder *cxld)
+ return 0;
+
+ dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
+- if (port->commit_end + 1 != id) {
++ if (cxl_num_decoders_committed(port) != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+- dev_name(&cxld->dev), port->id, port->commit_end + 1);
++ dev_name(&cxld->dev), port->id,
++ cxl_num_decoders_committed(port));
+ return -EBUSY;
+ }
+
+@@ -831,7 +836,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
+ cxld->interleave_ways = 2;
+ else
+ cxld->interleave_ways = 1;
+- cxld->interleave_granularity = 256;
++ cxld->interleave_granularity = 4096;
+ cxld->hpa_range = (struct range) {
+ .start = base,
+ .end = base + size - 1,
+diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
+index 464fc39ed2776b..0ed100617d9937 100644
+--- a/tools/testing/cxl/test/mem.c
++++ b/tools/testing/cxl/test/mem.c
+@@ -3,6 +3,7 @@
+
+ #include <linux/platform_device.h>
+ #include <linux/mod_devicetable.h>
++#include <linux/vmalloc.h>
+ #include <linux/module.h>
+ #include <linux/delay.h>
+ #include <linux/sizes.h>
+@@ -1450,11 +1451,11 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
+ mdata->mes.mds = mds;
+ cxl_mock_add_event_logs(&mdata->mes);
+
+- cxlmd = devm_cxl_add_memdev(cxlds);
++ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
+ if (IS_ERR(cxlmd))
+ return PTR_ERR(cxlmd);
+
+- rc = cxl_memdev_setup_fw_upload(mds);
++ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
+ if (rc)
+ return rc;
+
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index 829f5bdfd2e439..045090085ac5bd 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -843,6 +843,7 @@ sub set_value {
+ if ($lvalue =~ /^(TEST|BISECT|CONFIG_BISECT)_TYPE(\[.*\])?$/ &&
+ $prvalue !~ /^(config_|)bisect$/ &&
+ $prvalue !~ /^build$/ &&
++ $prvalue !~ /^make_warnings_file$/ &&
+ $buildonly) {
+
+ # Note if a test is something other than build, then we
+@@ -2042,7 +2043,7 @@ sub get_grub_index {
+ } elsif ($reboot_type eq "grub2") {
+ $command = "cat $grub_file";
+ $target = '^\s*menuentry.*' . $grub_menu_qt;
+- $skip = '^\s*menuentry';
++ $skip = '^\s*menuentry\s';
+ $submenu = '^\s*submenu\s';
+ } elsif ($reboot_type eq "grub2bls") {
+ $command = $grub_bls_get;
+diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
+index 79d8832c862aa7..ce34be15c929e0 100644
+--- a/tools/testing/kunit/kunit_parser.py
++++ b/tools/testing/kunit/kunit_parser.py
+@@ -450,7 +450,7 @@ def parse_diagnostic(lines: LineStream) -> List[str]:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+- non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START]
++ non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START, TEST_PLAN]
+ while lines and not any(re.match(lines.peek())
+ for re in non_diagnostic_lines):
+ log.append(lines.pop())
+@@ -726,6 +726,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
+ # test plan
+ test.name = "main"
+ ktap_line = parse_ktap_header(lines, test)
++ test.log.extend(parse_diagnostic(lines))
+ parse_test_plan(lines, test)
+ parent_test = True
+ else:
+@@ -737,6 +738,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
+ if parent_test:
+ # If KTAP version line and/or subtest header is found, attempt
+ # to parse test plan and print test header
++ test.log.extend(parse_diagnostic(lines))
+ parse_test_plan(lines, test)
+ print_test_header(test)
+ expected_count = test.expected_count
+diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
+index e5da1cad70baf6..76a8990bb14e87 100644
+--- a/tools/testing/radix-tree/maple.c
++++ b/tools/testing/radix-tree/maple.c
+@@ -35538,7 +35538,7 @@ static noinline void __init check_prealloc(struct maple_tree *mt)
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+- MT_BUG_ON(mt, allocated != 1);
++ MT_BUG_ON(mt, allocated != 0);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 1a21d6beebc682..5b61b8bb29f846 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -152,12 +152,10 @@ ifneq ($(KBUILD_OUTPUT),)
+ abs_objtree := $(realpath $(abs_objtree))
+ BUILD := $(abs_objtree)/kselftest
+ KHDR_INCLUDES := -isystem ${abs_objtree}/usr/include
+- KHDR_DIR := ${abs_objtree}/usr/include
+ else
+ BUILD := $(CURDIR)
+ abs_srctree := $(shell cd $(top_srcdir) && pwd)
+ KHDR_INCLUDES := -isystem ${abs_srctree}/usr/include
+- KHDR_DIR := ${abs_srctree}/usr/include
+ DEFAULT_INSTALL_HDR_PATH := 1
+ endif
+
+@@ -171,7 +169,7 @@ export KHDR_INCLUDES
+ # all isn't the first target in the file.
+ .DEFAULT_GOAL := all
+
+-all: kernel_header_files
++all:
+ @ret=1; \
+ for TARGET in $(TARGETS); do \
+ BUILD_TARGET=$$BUILD/$$TARGET; \
+@@ -182,27 +180,12 @@ all: kernel_header_files
+ ret=$$((ret * $$?)); \
+ done; exit $$ret;
+
+-kernel_header_files:
+- @ls $(KHDR_DIR)/linux/*.h >/dev/null 2>/dev/null; \
+- if [ $$? -ne 0 ]; then \
+- RED='\033[1;31m'; \
+- NOCOLOR='\033[0m'; \
+- echo; \
+- echo -e "$${RED}error$${NOCOLOR}: missing kernel header files."; \
+- echo "Please run this and try again:"; \
+- echo; \
+- echo " cd $(top_srcdir)"; \
+- echo " make headers"; \
+- echo; \
+- exit 1; \
+- fi
+-
+-.PHONY: kernel_header_files
+-
+ run_tests: all
+ @for TARGET in $(TARGETS); do \
+ BUILD_TARGET=$$BUILD/$$TARGET; \
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests \
++ SRC_PATH=$(shell readlink -e $$(pwd)) \
++ OBJ_PATH=$(BUILD) \
+ O=$(abs_objtree); \
+ done;
+
+@@ -253,7 +236,10 @@ ifdef INSTALL_PATH
+ @ret=1; \
+ for TARGET in $(TARGETS); do \
+ BUILD_TARGET=$$BUILD/$$TARGET; \
+- $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install \
++ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install \
++ INSTALL_PATH=$(INSTALL_PATH)/$$TARGET \
++ SRC_PATH=$(shell readlink -e $$(pwd)) \
++ OBJ_PATH=$(INSTALL_PATH) \
+ O=$(abs_objtree) \
+ $(if $(FORCE_TARGETS),|| exit); \
+ ret=$$((ret * $$?)); \
+diff --git a/tools/testing/selftests/alsa/conf.c b/tools/testing/selftests/alsa/conf.c
+index 2f1685a3eae142..ff09038fdce63d 100644
+--- a/tools/testing/selftests/alsa/conf.c
++++ b/tools/testing/selftests/alsa/conf.c
+@@ -186,7 +186,7 @@ static char *sysfs_get(const char *sysfs_root, const char *id)
+ close(fd);
+ if (len < 0)
+ ksft_exit_fail_msg("sysfs: unable to read value '%s': %s\n",
+- path, errno);
++ path, strerror(errno));
+ while (len > 0 && path[len-1] == '\n')
+ len--;
+ path[len] = '\0';
+diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
+index 21e482b23f5028..df942149c6f6c5 100644
+--- a/tools/testing/selftests/alsa/mixer-test.c
++++ b/tools/testing/selftests/alsa/mixer-test.c
+@@ -138,7 +138,7 @@ static void find_controls(void)
+ err = snd_ctl_elem_info(card_data->handle,
+ ctl_data->info);
+ if (err < 0) {
+- ksft_print_msg("%s getting info for %d\n",
++ ksft_print_msg("%s getting info for %s\n",
+ snd_strerror(err),
+ ctl_data->name);
+ }
+@@ -166,7 +166,7 @@ static void find_controls(void)
+ err = snd_ctl_poll_descriptors(card_data->handle,
+ &card_data->pollfd, 1);
+ if (err != 1) {
+- ksft_exit_fail_msg("snd_ctl_poll_descriptors() failed for %d\n",
++ ksft_exit_fail_msg("snd_ctl_poll_descriptors() failed for card %d: %d\n",
+ card, err);
+ }
+
+@@ -319,7 +319,7 @@ static bool ctl_value_index_valid(struct ctl_data *ctl,
+ }
+
+ if (int64_val > snd_ctl_elem_info_get_max64(ctl->info)) {
+- ksft_print_msg("%s.%d value %lld more than maximum %lld\n",
++ ksft_print_msg("%s.%d value %lld more than maximum %ld\n",
+ ctl->name, index, int64_val,
+ snd_ctl_elem_info_get_max(ctl->info));
+ return false;
+diff --git a/tools/testing/selftests/arm64/fp/za-fork.c b/tools/testing/selftests/arm64/fp/za-fork.c
+index b86cb1049497f3..587b9464822261 100644
+--- a/tools/testing/selftests/arm64/fp/za-fork.c
++++ b/tools/testing/selftests/arm64/fp/za-fork.c
+@@ -85,7 +85,7 @@ int main(int argc, char **argv)
+ */
+ ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
+ if (ret >= 0) {
+- ksft_test_result(fork_test(), "fork_test");
++ ksft_test_result(fork_test(), "fork_test\n");
+
+ } else {
+ ksft_print_msg("SME not supported\n");
+diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile
+index 8f5febaf1a9a25..edb3613513b8a8 100644
+--- a/tools/testing/selftests/arm64/signal/Makefile
++++ b/tools/testing/selftests/arm64/signal/Makefile
+@@ -23,7 +23,7 @@ $(TEST_GEN_PROGS): $(PROGS)
+ # Common test-unit targets to build common-layout test-cases executables
+ # Needs secondary expansion to properly include the testcase c-file in pre-reqs
+ COMMON_SOURCES := test_signals.c test_signals_utils.c testcases/testcases.c \
+- signals.S
++ signals.S sve_helpers.c
+ COMMON_HEADERS := test_signals.h test_signals_utils.h testcases/testcases.h
+
+ .SECONDEXPANSION:
+diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.c b/tools/testing/selftests/arm64/signal/sve_helpers.c
+new file mode 100644
+index 00000000000000..0acc121af3062a
+--- /dev/null
++++ b/tools/testing/selftests/arm64/signal/sve_helpers.c
+@@ -0,0 +1,56 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2024 ARM Limited
++ *
++ * Common helper functions for SVE and SME functionality.
++ */
++
++#include <stdbool.h>
++#include <kselftest.h>
++#include <asm/sigcontext.h>
++#include <sys/prctl.h>
++
++unsigned int vls[SVE_VQ_MAX];
++unsigned int nvls;
++
++int sve_fill_vls(bool use_sme, int min_vls)
++{
++ int vq, vl;
++ int pr_set_vl = use_sme ? PR_SME_SET_VL : PR_SVE_SET_VL;
++ int len_mask = use_sme ? PR_SME_VL_LEN_MASK : PR_SVE_VL_LEN_MASK;
++
++ /*
++ * Enumerate up to SVE_VQ_MAX vector lengths
++ */
++ for (vq = SVE_VQ_MAX; vq > 0; --vq) {
++ vl = prctl(pr_set_vl, vq * 16);
++ if (vl == -1)
++ return KSFT_FAIL;
++
++ vl &= len_mask;
++
++ /*
++ * Unlike SVE, SME does not require the minimum vector length
++ * to be implemented, or the VLs to be consecutive, so any call
++ * to the prctl might return the single implemented VL, which
++ * might be larger than 16. So to avoid this loop never
++ * terminating, bail out here when we find a higher VL than
++ * we asked for.
++ * See the ARM ARM, DDI 0487K.a, B1.4.2: I_QQRNR and I_NWYBP.
++ */
++ if (vq < sve_vq_from_vl(vl))
++ break;
++
++ /* Skip missing VLs */
++ vq = sve_vq_from_vl(vl);
++
++ vls[nvls++] = vl;
++ }
++
++ if (nvls < min_vls) {
++ fprintf(stderr, "Only %d VL supported\n", nvls);
++ return KSFT_SKIP;
++ }
++
++ return KSFT_PASS;
++}
+diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.h b/tools/testing/selftests/arm64/signal/sve_helpers.h
+new file mode 100644
+index 00000000000000..50948ce471cc62
+--- /dev/null
++++ b/tools/testing/selftests/arm64/signal/sve_helpers.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (C) 2024 ARM Limited
++ *
++ * Common helper functions for SVE and SME functionality.
++ */
++
++#ifndef __SVE_HELPERS_H__
++#define __SVE_HELPERS_H__
++
++#include <stdbool.h>
++
++#define VLS_USE_SVE false
++#define VLS_USE_SME true
++
++extern unsigned int vls[];
++extern unsigned int nvls;
++
++int sve_fill_vls(bool use_sme, int min_vls);
++
++#endif
+diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c
+index ebd5815b54bbaa..dfd6a2badf9fb3 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c
++++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c
+@@ -6,44 +6,28 @@
+ * handler, this is not supported and is expected to segfault.
+ */
+
++#include <kselftest.h>
+ #include <signal.h>
+ #include <ucontext.h>
+ #include <sys/prctl.h>
+
+ #include "test_signals_utils.h"
++#include "sve_helpers.h"
+ #include "testcases.h"
+
+ struct fake_sigframe sf;
+-static unsigned int vls[SVE_VQ_MAX];
+-unsigned int nvls = 0;
+
+ static bool sme_get_vls(struct tdescr *td)
+ {
+- int vq, vl;
++ int res = sve_fill_vls(VLS_USE_SME, 2);
+
+- /*
+- * Enumerate up to SVE_VQ_MAX vector lengths
+- */
+- for (vq = SVE_VQ_MAX; vq > 0; --vq) {
+- vl = prctl(PR_SVE_SET_VL, vq * 16);
+- if (vl == -1)
+- return false;
++ if (!res)
++ return true;
+
+- vl &= PR_SME_VL_LEN_MASK;
++ if (res == KSFT_SKIP)
++ td->result = KSFT_SKIP;
+
+- /* Skip missing VLs */
+- vq = sve_vq_from_vl(vl);
+-
+- vls[nvls++] = vl;
+- }
+-
+- /* We need at least two VLs */
+- if (nvls < 2) {
+- fprintf(stderr, "Only %d VL supported\n", nvls);
+- return false;
+- }
+-
+- return true;
++ return false;
+ }
+
+ static int fake_sigreturn_ssve_change_vl(struct tdescr *td,
+@@ -51,30 +35,30 @@ static int fake_sigreturn_ssve_change_vl(struct tdescr *td,
+ {
+ size_t resv_sz, offset;
+ struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf);
+- struct sve_context *sve;
++ struct za_context *za;
+
+ /* Get a signal context with a SME ZA frame in it */
+ if (!get_current_context(td, &sf.uc, sizeof(sf.uc)))
+ return 1;
+
+ resv_sz = GET_SF_RESV_SIZE(sf);
+- head = get_header(head, SVE_MAGIC, resv_sz, &offset);
++ head = get_header(head, ZA_MAGIC, resv_sz, &offset);
+ if (!head) {
+- fprintf(stderr, "No SVE context\n");
++ fprintf(stderr, "No ZA context\n");
+ return 1;
+ }
+
+- if (head->size != sizeof(struct sve_context)) {
++ if (head->size != sizeof(struct za_context)) {
+ fprintf(stderr, "Register data present, aborting\n");
+ return 1;
+ }
+
+- sve = (struct sve_context *)head;
++ za = (struct za_context *)head;
+
+ /* No changes are supported; init left us at minimum VL so go to max */
+ fprintf(stderr, "Attempting to change VL from %d to %d\n",
+- sve->vl, vls[0]);
+- sve->vl = vls[0];
++ za->vl, vls[0]);
++ za->vl = vls[0];
+
+ fake_sigreturn(&sf, sizeof(sf), 0);
+
+diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
+index e2a452190511ff..e1ccf8f85a70c8 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
++++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
+@@ -12,40 +12,22 @@
+ #include <sys/prctl.h>
+
+ #include "test_signals_utils.h"
++#include "sve_helpers.h"
+ #include "testcases.h"
+
+ struct fake_sigframe sf;
+-static unsigned int vls[SVE_VQ_MAX];
+-unsigned int nvls = 0;
+
+ static bool sve_get_vls(struct tdescr *td)
+ {
+- int vq, vl;
++ int res = sve_fill_vls(VLS_USE_SVE, 2);
+
+- /*
+- * Enumerate up to SVE_VQ_MAX vector lengths
+- */
+- for (vq = SVE_VQ_MAX; vq > 0; --vq) {
+- vl = prctl(PR_SVE_SET_VL, vq * 16);
+- if (vl == -1)
+- return false;
++ if (!res)
++ return true;
+
+- vl &= PR_SVE_VL_LEN_MASK;
+-
+- /* Skip missing VLs */
+- vq = sve_vq_from_vl(vl);
+-
+- vls[nvls++] = vl;
+- }
+-
+- /* We need at least two VLs */
+- if (nvls < 2) {
+- fprintf(stderr, "Only %d VL supported\n", nvls);
++ if (res == KSFT_SKIP)
+ td->result = KSFT_SKIP;
+- return false;
+- }
+
+- return true;
++ return false;
+ }
+
+ static int fake_sigreturn_sve_change_vl(struct tdescr *td,
+diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
+index 3d37daafcff513..6dbe48cf8b09ed 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
+@@ -6,51 +6,31 @@
+ * set up as expected.
+ */
+
++#include <kselftest.h>
+ #include <signal.h>
+ #include <ucontext.h>
+ #include <sys/prctl.h>
+
+ #include "test_signals_utils.h"
++#include "sve_helpers.h"
+ #include "testcases.h"
+
+ static union {
+ ucontext_t uc;
+ char buf[1024 * 64];
+ } context;
+-static unsigned int vls[SVE_VQ_MAX];
+-unsigned int nvls = 0;
+
+ static bool sme_get_vls(struct tdescr *td)
+ {
+- int vq, vl;
++ int res = sve_fill_vls(VLS_USE_SME, 1);
+
+- /*
+- * Enumerate up to SVE_VQ_MAX vector lengths
+- */
+- for (vq = SVE_VQ_MAX; vq > 0; --vq) {
+- vl = prctl(PR_SME_SET_VL, vq * 16);
+- if (vl == -1)
+- return false;
+-
+- vl &= PR_SME_VL_LEN_MASK;
+-
+- /* Did we find the lowest supported VL? */
+- if (vq < sve_vq_from_vl(vl))
+- break;
++ if (!res)
++ return true;
+
+- /* Skip missing VLs */
+- vq = sve_vq_from_vl(vl);
+-
+- vls[nvls++] = vl;
+- }
+-
+- /* We need at least one VL */
+- if (nvls < 1) {
+- fprintf(stderr, "Only %d VL supported\n", nvls);
+- return false;
+- }
++ if (res == KSFT_SKIP)
++ td->result = KSFT_SKIP;
+
+- return true;
++ return false;
+ }
+
+ static void setup_ssve_regs(void)
+diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c
+index 9dc5f128bbc0d5..5557e116e97363 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c
+@@ -6,51 +6,31 @@
+ * signal frames is set up as expected when enabled simultaneously.
+ */
+
++#include <kselftest.h>
+ #include <signal.h>
+ #include <ucontext.h>
+ #include <sys/prctl.h>
+
+ #include "test_signals_utils.h"
++#include "sve_helpers.h"
+ #include "testcases.h"
+
+ static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+ } context;
+-static unsigned int vls[SVE_VQ_MAX];
+-unsigned int nvls = 0;
+
+ static bool sme_get_vls(struct tdescr *td)
+ {
+- int vq, vl;
++ int res = sve_fill_vls(VLS_USE_SME, 1);
+
+- /*
+- * Enumerate up to SVE_VQ_MAX vector lengths
+- */
+- for (vq = SVE_VQ_MAX; vq > 0; --vq) {
+- vl = prctl(PR_SME_SET_VL, vq * 16);
+- if (vl == -1)
+- return false;
+-
+- vl &= PR_SME_VL_LEN_MASK;
+-
+- /* Did we find the lowest supported VL? */
+- if (vq < sve_vq_from_vl(vl))
+- break;
++ if (!res)
++ return true;
+
+- /* Skip missing VLs */
+- vq = sve_vq_from_vl(vl);
+-
+- vls[nvls++] = vl;
+- }
+-
+- /* We need at least one VL */
+- if (nvls < 1) {
+- fprintf(stderr, "Only %d VL supported\n", nvls);
+- return false;
+- }
++ if (res == KSFT_SKIP)
++ td->result = KSFT_SKIP;
+
+- return true;
++ return false;
+ }
+
+ static void setup_regs(void)
+diff --git a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c
+index 8b16eabbb7697e..8143eb1c58c187 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c
+@@ -6,47 +6,31 @@
+ * expected.
+ */
+
++#include <kselftest.h>
+ #include <signal.h>
+ #include <ucontext.h>
+ #include <sys/prctl.h>
+
+ #include "test_signals_utils.h"
++#include "sve_helpers.h"
+ #include "testcases.h"
+
+ static union {
+ ucontext_t uc;
+ char buf[1024 * 64];
+ } context;
+-static unsigned int vls[SVE_VQ_MAX];
+-unsigned int nvls = 0;
+
+ static bool sve_get_vls(struct tdescr *td)
+ {
+- int vq, vl;
++ int res = sve_fill_vls(VLS_USE_SVE, 1);
+
+- /*
+- * Enumerate up to SVE_VQ_MAX vector lengths
+- */
+- for (vq = SVE_VQ_MAX; vq > 0; --vq) {
+- vl = prctl(PR_SVE_SET_VL, vq * 16);
+- if (vl == -1)
+- return false;
+-
+- vl &= PR_SVE_VL_LEN_MASK;
+-
+- /* Skip missing VLs */
+- vq = sve_vq_from_vl(vl);
++ if (!res)
++ return true;
+
+- vls[nvls++] = vl;
+- }
+-
+- /* We need at least one VL */
+- if (nvls < 1) {
+- fprintf(stderr, "Only %d VL supported\n", nvls);
+- return false;
+- }
++ if (res == KSFT_SKIP)
++ td->result = KSFT_SKIP;
+
+- return true;
++ return false;
+ }
+
+ static void setup_sve_regs(void)
+diff --git a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c
+index 4d6f94b6178f36..ce26e9c2fa5e34 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c
+@@ -6,47 +6,31 @@
+ * expected.
+ */
+
++#include <kselftest.h>
+ #include <signal.h>
+ #include <ucontext.h>
+ #include <sys/prctl.h>
+
+ #include "test_signals_utils.h"
++#include "sve_helpers.h"
+ #include "testcases.h"
+
+ static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+ } context;
+-static unsigned int vls[SVE_VQ_MAX];
+-unsigned int nvls = 0;
+
+ static bool sme_get_vls(struct tdescr *td)
+ {
+- int vq, vl;
++ int res = sve_fill_vls(VLS_USE_SME, 1);
+
+- /*
+- * Enumerate up to SME_VQ_MAX vector lengths
+- */
+- for (vq = SVE_VQ_MAX; vq > 0; --vq) {
+- vl = prctl(PR_SME_SET_VL, vq * 16);
+- if (vl == -1)
+- return false;
+-
+- vl &= PR_SME_VL_LEN_MASK;
+-
+- /* Skip missing VLs */
+- vq = sve_vq_from_vl(vl);
++ if (!res)
++ return true;
+
+- vls[nvls++] = vl;
+- }
+-
+- /* We need at least one VL */
+- if (nvls < 1) {
+- fprintf(stderr, "Only %d VL supported\n", nvls);
+- return false;
+- }
++ if (res == KSFT_SKIP)
++ td->result = KSFT_SKIP;
+
+- return true;
++ return false;
+ }
+
+ static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc,
+diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c
+index 174ad665669647..b9e13f27f1f9aa 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/za_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c
+@@ -6,51 +6,31 @@
+ * expected.
+ */
+
++#include <kselftest.h>
+ #include <signal.h>
+ #include <ucontext.h>
+ #include <sys/prctl.h>
+
+ #include "test_signals_utils.h"
++#include "sve_helpers.h"
+ #include "testcases.h"
+
+ static union {
+ ucontext_t uc;
+ char buf[1024 * 128];
+ } context;
+-static unsigned int vls[SVE_VQ_MAX];
+-unsigned int nvls = 0;
+
+ static bool sme_get_vls(struct tdescr *td)
+ {
+- int vq, vl;
++ int res = sve_fill_vls(VLS_USE_SME, 1);
+
+- /*
+- * Enumerate up to SME_VQ_MAX vector lengths
+- */
+- for (vq = SVE_VQ_MAX; vq > 0; --vq) {
+- vl = prctl(PR_SME_SET_VL, vq * 16);
+- if (vl == -1)
+- return false;
+-
+- vl &= PR_SME_VL_LEN_MASK;
+-
+- /* Did we find the lowest supported VL? */
+- if (vq < sve_vq_from_vl(vl))
+- break;
++ if (!res)
++ return true;
+
+- /* Skip missing VLs */
+- vq = sve_vq_from_vl(vl);
+-
+- vls[nvls++] = vl;
+- }
+-
+- /* We need at least one VL */
+- if (nvls < 1) {
+- fprintf(stderr, "Only %d VL supported\n", nvls);
+- return false;
+- }
++ if (res == KSFT_SKIP)
++ td->result = KSFT_SKIP;
+
+- return true;
++ return false;
+ }
+
+ static void setup_za_regs(void)
+diff --git a/tools/testing/selftests/arm64/tags/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
+index 5701163460ef7f..955f87c1170d76 100644
+--- a/tools/testing/selftests/arm64/tags/tags_test.c
++++ b/tools/testing/selftests/arm64/tags/tags_test.c
+@@ -6,6 +6,7 @@
+ #include <stdint.h>
+ #include <sys/prctl.h>
+ #include <sys/utsname.h>
++#include "../../kselftest.h"
+
+ #define SHIFT_TAG(tag) ((uint64_t)(tag) << 56)
+ #define SET_TAG(ptr, tag) (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
+@@ -21,6 +22,9 @@ int main(void)
+ if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
+ tbi_enabled = 1;
+ ptr = (struct utsname *)malloc(sizeof(*ptr));
++ if (!ptr)
++ ksft_exit_fail_msg("Failed to allocate utsname buffer\n");
++
+ if (tbi_enabled)
+ tag = 0x42;
+ ptr = (struct utsname *)SET_TAG(ptr, tag);
+diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
+index 3babaf3eee5c45..ec6aa58fb18100 100644
+--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
++++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
+@@ -1,6 +1,5 @@
+ bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+ bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+-fexit_sleep # The test never returns. The remaining tests cannot start.
+ kprobe_multi_bench_attach # needs CONFIG_FPROBE
+ kprobe_multi_test # needs CONFIG_FPROBE
+ module_attach # prog 'kprobe_multi': failed to auto-attach: -95
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index caede9b574cb16..ab364e95a9b23e 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -56,6 +56,15 @@ TEST_INST_SUBDIRS := no_alu32
+ ifneq ($(BPF_GCC),)
+ TEST_GEN_PROGS += test_progs-bpf_gcc
+ TEST_INST_SUBDIRS += bpf_gcc
++
++# The following tests contain C code that, although technically legal,
++# triggers GCC warnings that cannot be disabled: declaration of
++# anonymous struct types in function parameter lists.
++progs/btf_dump_test_case_bitfields.c-bpf_gcc-CFLAGS := -Wno-error
++progs/btf_dump_test_case_namespacing.c-bpf_gcc-CFLAGS := -Wno-error
++progs/btf_dump_test_case_packing.c-bpf_gcc-CFLAGS := -Wno-error
++progs/btf_dump_test_case_padding.c-bpf_gcc-CFLAGS := -Wno-error
++progs/btf_dump_test_case_syntax.c-bpf_gcc-CFLAGS := -Wno-error
+ endif
+
+ ifneq ($(CLANG_CPUV4),)
+@@ -386,24 +395,25 @@ $(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h
+ # $1 - input .c file
+ # $2 - output .o file
+ # $3 - CFLAGS
++# $4 - binary name
+ define CLANG_BPF_BUILD_RULE
+- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
++ $(call msg,CLNG-BPF,$4,$2)
+ $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v3 -o $2
+ endef
+ # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
+ define CLANG_NOALU32_BPF_BUILD_RULE
+- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
++ $(call msg,CLNG-BPF,$4,$2)
+ $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2
+ endef
+ # Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4
+ define CLANG_CPUV4_BPF_BUILD_RULE
+- $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
++ $(call msg,CLNG-BPF,$4,$2)
+ $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2
+ endef
+ # Build BPF object using GCC
+ define GCC_BPF_BUILD_RULE
+- $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2)
+- $(Q)$(BPF_GCC) $3 -O2 -c $1 -o $2
++ $(call msg,GCC-BPF,$4,$2)
++ $(Q)$(BPF_GCC) $3 -DBPF_NO_PRESERVE_ACCESS_INDEX -Wno-attributes -O2 -c $1 -o $2
+ endef
+
+ SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
+@@ -442,7 +452,7 @@ LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(ske
+ # $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
+ # Parameters:
+ # $1 - test runner base binary name (e.g., test_progs)
+-# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc)
++# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc)
+ define DEFINE_TEST_RUNNER
+
+ TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2
+@@ -470,7 +480,7 @@ endef
+ # Using TRUNNER_XXX variables, provided by callers of DEFINE_TEST_RUNNER and
+ # set up by DEFINE_TEST_RUNNER itself, create test runner build rules with:
+ # $1 - test runner base binary name (e.g., test_progs)
+-# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc)
++# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc)
+ define DEFINE_TEST_RUNNER_RULES
+
+ ifeq ($($(TRUNNER_OUTPUT)-dir),)
+@@ -492,7 +502,9 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \
+ $(wildcard $(BPFDIR)/*.bpf.h) \
+ | $(TRUNNER_OUTPUT) $$(BPFOBJ)
+ $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
+- $(TRUNNER_BPF_CFLAGS))
++ $(TRUNNER_BPF_CFLAGS) \
++ $$($$<-CFLAGS) \
++ $$($$<-$2-CFLAGS),$(TRUNNER_BINARY))
+
+ $(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
+ $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
+@@ -702,6 +714,8 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
+
++# Linking uprobe_multi can fail due to relocation overflows on mips.
++$(OUTPUT)/uprobe_multi: CFLAGS += $(if $(filter mips, $(ARCH)),-mxgot)
+ $(OUTPUT)/uprobe_multi: uprobe_multi.c
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
+diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
+index 73ce11b0547da7..b705cbabe1e2f2 100644
+--- a/tools/testing/selftests/bpf/bench.c
++++ b/tools/testing/selftests/bpf/bench.c
+@@ -10,6 +10,7 @@
+ #include <sys/sysinfo.h>
+ #include <signal.h>
+ #include "bench.h"
++#include "bpf_util.h"
+ #include "testing_helpers.h"
+
+ struct env env = {
+diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
+index 68180d8f8558ec..005c401b3e2275 100644
+--- a/tools/testing/selftests/bpf/bench.h
++++ b/tools/testing/selftests/bpf/bench.h
+@@ -10,6 +10,7 @@
+ #include <math.h>
+ #include <time.h>
+ #include <sys/syscall.h>
++#include <limits.h>
+
+ struct cpu_set {
+ bool *cpus;
+diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+index cefc5dd72573c0..2e8adf059fa3b8 100644
+--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
++++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+@@ -2,6 +2,7 @@
+ /* Copyright (c) 2020 Facebook */
+ #include <linux/btf.h>
+ #include <linux/btf_ids.h>
++#include <linux/delay.h>
+ #include <linux/error-injection.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+@@ -541,6 +542,14 @@ static int bpf_testmod_init(void)
+
+ static void bpf_testmod_exit(void)
+ {
++ /* Need to wait for all references to be dropped because
++ * bpf_kfunc_call_test_release() which currently resides in kernel can
++ * be called after bpf_testmod is unloaded. Once release function is
++ * moved into the module this wait can be removed.
++ */
++ while (refcount_read(&prog_test_struct.cnt) > 1)
++ msleep(20);
++
+ return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ }
+
+diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
+index 2caee8423ee022..f68fbc6c3f52a5 100644
+--- a/tools/testing/selftests/bpf/cgroup_helpers.c
++++ b/tools/testing/selftests/bpf/cgroup_helpers.c
+@@ -499,10 +499,20 @@ int setup_classid_environment(void)
+ return 1;
+ }
+
+- if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
+- errno != EBUSY) {
+- log_err("mount cgroup net_cls");
+- return 1;
++ if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls")) {
++ if (errno != EBUSY) {
++ log_err("mount cgroup net_cls");
++ return 1;
++ }
++
++ if (rmdir(NETCLS_MOUNT_PATH)) {
++ log_err("rmdir cgroup net_cls");
++ return 1;
++ }
++ if (umount(CGROUP_MOUNT_DFLT)) {
++ log_err("umount cgroup base");
++ return 1;
++ }
+ }
+
+ cleanup_classid_environment();
+diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
+index e41eb33b27046c..5751614aef6a54 100644
+--- a/tools/testing/selftests/bpf/config
++++ b/tools/testing/selftests/bpf/config
+@@ -52,9 +52,12 @@ CONFIG_MPLS=y
+ CONFIG_MPLS_IPTUNNEL=y
+ CONFIG_MPLS_ROUTING=y
+ CONFIG_MPTCP=y
++CONFIG_NET_ACT_SKBMOD=y
++CONFIG_NET_CLS=y
+ CONFIG_NET_CLS_ACT=y
+ CONFIG_NET_CLS_BPF=y
+ CONFIG_NET_CLS_FLOWER=y
++CONFIG_NET_CLS_MATCHALL=y
+ CONFIG_NET_FOU=y
+ CONFIG_NET_FOU_IP_TUNNELS=y
+ CONFIG_NET_IPGRE=y
+diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
+index 2e70a604827845..49a29dbc191072 100644
+--- a/tools/testing/selftests/bpf/config.x86_64
++++ b/tools/testing/selftests/bpf/config.x86_64
+@@ -50,7 +50,6 @@ CONFIG_CRYPTO_SEQIV=y
+ CONFIG_CRYPTO_XXHASH=y
+ CONFIG_DCB=y
+ CONFIG_DEBUG_ATOMIC_SLEEP=y
+-CONFIG_DEBUG_CREDENTIALS=y
+ CONFIG_DEBUG_INFO_BTF=y
+ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+ CONFIG_DEBUG_MEMORY_INIT=y
+diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
+index 18405c3b7cee9a..af10c309359a77 100644
+--- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
++++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
+@@ -412,7 +412,7 @@ static void test_sk_storage_map_stress_free(void)
+ rlim_new.rlim_max = rlim_new.rlim_cur + 128;
+ err = setrlimit(RLIMIT_NOFILE, &rlim_new);
+ CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
+- rlim_new.rlim_cur, errno);
++ (unsigned long) rlim_new.rlim_cur, errno);
+ }
+
+ err = do_sk_storage_map_stress_free();
+diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
+index da72a3a662300c..d2acc88752126c 100644
+--- a/tools/testing/selftests/bpf/network_helpers.c
++++ b/tools/testing/selftests/bpf/network_helpers.c
+@@ -427,6 +427,8 @@ struct nstoken *open_netns(const char *name)
+
+ return token;
+ fail:
++ if (token->orig_netns_fd != -1)
++ close(token->orig_netns_fd);
+ free(token);
+ return NULL;
+ }
+@@ -463,3 +465,27 @@ int get_socket_local_port(int sock_fd)
+
+ return -1;
+ }
++
++int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param)
++{
++ struct ifreq ifr = {0};
++ int sockfd, err;
++
++ sockfd = socket(AF_INET, SOCK_DGRAM, 0);
++ if (sockfd < 0)
++ return -errno;
++
++ memcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
++
++ ring_param->cmd = ETHTOOL_GRINGPARAM;
++ ifr.ifr_data = (char *)ring_param;
++
++ if (ioctl(sockfd, SIOCETHTOOL, &ifr) < 0) {
++ err = errno;
++ close(sockfd);
++ return -err;
++ }
++
++ close(sockfd);
++ return 0;
++}
+diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
+index 5eccc67d1a9989..11cbe194769b16 100644
+--- a/tools/testing/selftests/bpf/network_helpers.h
++++ b/tools/testing/selftests/bpf/network_helpers.h
+@@ -9,8 +9,11 @@ typedef __u16 __sum16;
+ #include <linux/if_packet.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
++#include <linux/ethtool.h>
++#include <linux/sockios.h>
+ #include <netinet/tcp.h>
+ #include <bpf/bpf_endian.h>
++#include <net/if.h>
+
+ #define MAGIC_VAL 0x1234
+ #define NUM_ITER 100000
+@@ -60,6 +63,7 @@ int make_sockaddr(int family, const char *addr_str, __u16 port,
+ struct sockaddr_storage *addr, socklen_t *len);
+ char *ping_command(int family);
+ int get_socket_local_port(int sock_fd);
++int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param);
+
+ struct nstoken;
+ /**
+diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+index d2d9e965eba59f..f79815b7e951b3 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
++++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c
+@@ -2,6 +2,7 @@
+ /* Copyright (c) 2021 Facebook */
+
+ #include <sys/syscall.h>
++#include <limits.h>
+ #include <test_progs.h>
+ #include "bloom_filter_map.skel.h"
+
+@@ -21,6 +22,11 @@ static void test_fail_cases(void)
+ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0"))
+ close(fd);
+
++ /* Invalid value size: too big */
++ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, INT32_MAX, 100, NULL);
++ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value too large"))
++ close(fd);
++
+ /* Invalid max entries size */
+ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL);
+ if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size"))
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+index 1f02168103dd77..f141e278b16fd4 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+@@ -334,6 +334,8 @@ static void test_task_stack(void)
+ do_dummy_read(skel->progs.dump_task_stack);
+ do_dummy_read(skel->progs.get_task_user_stacks);
+
++ ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
++
+ bpf_iter_task_stack__destroy(skel);
+ }
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c
+index b52ff8ce34db82..16bed9dd8e6a30 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c
+@@ -95,7 +95,7 @@ static unsigned short get_local_port(int fd)
+ struct sockaddr_in6 addr;
+ socklen_t addrlen = sizeof(addr);
+
+- if (!getsockname(fd, &addr, &addrlen))
++ if (!getsockname(fd, (struct sockaddr *)&addr, &addrlen))
+ return ntohs(addr.sin6_port);
+
+ return 0;
+diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+index 4aabeaa525d474..d0d9a02415454b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
++++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+@@ -396,7 +396,8 @@ static void test_update_ca(void)
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+- ASSERT_OK_PTR(link, "attach_struct_ops");
++ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
++ goto out;
+
+ do_test("tcp_ca_update", NULL);
+ saved_ca1_cnt = skel->bss->ca1_cnt;
+@@ -410,6 +411,7 @@ static void test_update_ca(void)
+ ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
+
+ bpf_link__destroy(link);
++out:
+ tcp_ca_update__destroy(skel);
+ }
+
+@@ -425,7 +427,8 @@ static void test_update_wrong(void)
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+- ASSERT_OK_PTR(link, "attach_struct_ops");
++ if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
++ goto out;
+
+ do_test("tcp_ca_update", NULL);
+ saved_ca1_cnt = skel->bss->ca1_cnt;
+@@ -438,6 +441,7 @@ static void test_update_wrong(void)
+ ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
+
+ bpf_link__destroy(link);
++out:
+ tcp_ca_update__destroy(skel);
+ }
+
+@@ -452,7 +456,8 @@ static void test_mixed_links(void)
+ return;
+
+ link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
+- ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl");
++ if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"))
++ goto out;
+
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
+ ASSERT_OK_PTR(link, "attach_struct_ops");
+@@ -465,6 +470,7 @@ static void test_mixed_links(void)
+
+ bpf_link__destroy(link);
+ bpf_link__destroy(link_nl);
++out:
+ tcp_ca_update__destroy(skel);
+ }
+
+@@ -507,7 +513,8 @@ static void test_link_replace(void)
+ bpf_link__destroy(link);
+
+ link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
+- ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
++ if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd"))
++ goto out;
+
+ /* BPF_F_REPLACE with a wrong old map Fd. It should fail!
+ *
+@@ -530,6 +537,7 @@ static void test_link_replace(void)
+
+ bpf_link__destroy(link);
+
++out:
+ tcp_ca_update__destroy(skel);
+ }
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
+index 4e0cdb5933188f..3660cb151e784f 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf.c
+@@ -4630,11 +4630,6 @@ static int test_btf_id(unsigned int test_num)
+ /* The map holds the last ref to BTF and its btf_id */
+ close(map_fd);
+ map_fd = -1;
+- btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id);
+- if (CHECK(btf_fd[0] >= 0, "BTF lingers")) {
+- err = -1;
+- goto done;
+- }
+
+ fprintf(stderr, "OK");
+
+@@ -5265,6 +5260,7 @@ static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
+ #endif
+
+ assert(0);
++ return 0;
+ }
+
+ static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+index a8b53b8736f018..f66ceccd7029c0 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+@@ -25,7 +25,7 @@ static void test_lookup_update(void)
+ int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
+ int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
+ struct test_btf_map_in_map *skel;
+- int err, key = 0, val, i, fd;
++ int err, key = 0, val, i;
+
+ skel = test_btf_map_in_map__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
+@@ -102,30 +102,6 @@ static void test_lookup_update(void)
+ CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
+ CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
+
+- test_btf_map_in_map__destroy(skel);
+- skel = NULL;
+-
+- /* we need to either wait for or force synchronize_rcu(), before
+- * checking for "still exists" condition, otherwise map could still be
+- * resolvable by ID, causing false positives.
+- *
+- * Older kernels (5.8 and earlier) freed map only after two
+- * synchronize_rcu()s, so trigger two, to be entirely sure.
+- */
+- CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+- CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+-
+- fd = bpf_map_get_fd_by_id(map1_id);
+- if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
+- close(fd);
+- goto cleanup;
+- }
+- fd = bpf_map_get_fd_by_id(map2_id);
+- if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
+- close(fd);
+- goto cleanup;
+- }
+-
+ cleanup:
+ test_btf_map_in_map__destroy(skel);
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+index 47f42e6801056b..26019313e1fc20 100644
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -1,4 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
++#define _GNU_SOURCE
+ #include <test_progs.h>
+ #include "progs/core_reloc_types.h"
+ #include "bpf_testmod/bpf_testmod.h"
+diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
+index 5c0ebe6ba86673..95ea5a6a5f18dc 100644
+--- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
++++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
+@@ -4,7 +4,6 @@
+ #include <sys/types.h>
+ #include <sys/socket.h>
+ #include <net/if.h>
+-#include <linux/in6.h>
+
+ #include "test_progs.h"
+ #include "network_helpers.h"
+diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+index f43fcb13d2c460..d3d94596ab79cf 100644
+--- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
++++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+@@ -98,7 +98,8 @@ static void test_dummy_init_ptr_arg(void)
+
+ static void test_dummy_multiple_args(void)
+ {
+- __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
++ struct bpf_dummy_ops_state st = { 7 };
++ __u64 args[5] = {(__u64)&st, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+@@ -115,6 +116,7 @@ static void test_dummy_multiple_args(void)
+ fd = bpf_program__fd(skel->progs.test_2);
+ err = bpf_prog_test_run_opts(fd, &attr);
+ ASSERT_OK(err, "test_run");
++ args[0] = 7;
+ for (i = 0; i < ARRAY_SIZE(args); i++) {
+ snprintf(name, sizeof(name), "arg %zu", i);
+ ASSERT_EQ(skel->bss->test_2_args[i], args[i], name);
+@@ -125,7 +127,8 @@ static void test_dummy_multiple_args(void)
+
+ static void test_dummy_sleepable(void)
+ {
+- __u64 args[1] = {0};
++ struct bpf_dummy_ops_state st;
++ __u64 args[1] = {(__u64)&st};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+@@ -144,6 +147,31 @@ static void test_dummy_sleepable(void)
+ dummy_st_ops_success__destroy(skel);
+ }
+
++/* dummy_st_ops.test_sleepable() parameter is not marked as nullable,
++ * thus bpf_prog_test_run_opts() below should be rejected as it tries
++ * to pass NULL for this parameter.
++ */
++static void test_dummy_sleepable_reject_null(void)
++{
++ __u64 args[1] = {0};
++ LIBBPF_OPTS(bpf_test_run_opts, attr,
++ .ctx_in = args,
++ .ctx_size_in = sizeof(args),
++ );
++ struct dummy_st_ops_success *skel;
++ int fd, err;
++
++ skel = dummy_st_ops_success__open_and_load();
++ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
++ return;
++
++ fd = bpf_program__fd(skel->progs.test_sleepable);
++ err = bpf_prog_test_run_opts(fd, &attr);
++ ASSERT_EQ(err, -EINVAL, "test_run");
++
++ dummy_st_ops_success__destroy(skel);
++}
++
+ void test_dummy_st_ops(void)
+ {
+ if (test__start_subtest("dummy_st_ops_attach"))
+@@ -156,6 +184,8 @@ void test_dummy_st_ops(void)
+ test_dummy_multiple_args();
+ if (test__start_subtest("dummy_sleepable"))
+ test_dummy_sleepable();
++ if (test__start_subtest("dummy_sleepable_reject_null"))
++ test_dummy_sleepable_reject_null();
+
+ RUN_TESTS(dummy_st_ops_fail);
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
+index f949647dbbc21c..552a0875ca6dbf 100644
+--- a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
++++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
+@@ -21,13 +21,13 @@ static int do_sleep(void *skel)
+ }
+
+ #define STACK_SIZE (1024 * 1024)
+-static char child_stack[STACK_SIZE];
+
+ void test_fexit_sleep(void)
+ {
+ struct fexit_sleep_lskel *fexit_skel = NULL;
+ int wstatus, duration = 0;
+ pid_t cpid;
++ char *child_stack = NULL;
+ int err, fexit_cnt;
+
+ fexit_skel = fexit_sleep_lskel__open_and_load();
+@@ -38,6 +38,11 @@ void test_fexit_sleep(void)
+ if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
+ goto cleanup;
+
++ child_stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE |
++ MAP_ANONYMOUS | MAP_STACK, -1, 0);
++ if (!ASSERT_NEQ(child_stack, MAP_FAILED, "mmap"))
++ goto cleanup;
++
+ cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel);
+ if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
+ goto cleanup;
+@@ -78,5 +83,6 @@ void test_fexit_sleep(void)
+ goto cleanup;
+
+ cleanup:
++ munmap(child_stack, STACK_SIZE);
+ fexit_sleep_lskel__destroy(fexit_skel);
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+index c4773173a4e437..3171047414a7dc 100644
+--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
++++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+@@ -1,8 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
++#define _GNU_SOURCE
+ #include <test_progs.h>
+ #include <network_helpers.h>
+-#include <error.h>
+-#include <linux/if.h>
+ #include <linux/if_tun.h>
+ #include <sys/uio.h>
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+index c07991544a789e..34f8822fd2219c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
++++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+@@ -1,4 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
++#define _GNU_SOURCE
+ #include <test_progs.h>
+ #include <network_helpers.h>
+ #include "kfree_skb.skel.h"
+diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
+index 18cf7b17463d9f..98dde091d2825c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
++++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
+@@ -94,14 +94,8 @@ static struct {
+ { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
+ { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
+ { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
+- { "pop_front_off",
+- "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
+- "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
+- "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
+- { "pop_back_off",
+- "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
+- "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
+- "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
++ { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
++ { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
+ };
+
+ static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
+diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
+index 61333f2a03f91f..68c08309dbc823 100644
+--- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
++++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
+@@ -27,8 +27,6 @@
+ } \
+ })
+
+-#define NETNS "ns_lwt"
+-
+ static inline int netns_create(void)
+ {
+ return system("ip netns add " NETNS);
+diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
+index 59b38569f310b9..7b458ae5f0f856 100644
+--- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
++++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
+@@ -47,13 +47,13 @@
+ #include <linux/if_ether.h>
+ #include <linux/if_packet.h>
+ #include <linux/if_tun.h>
+-#include <linux/icmp.h>
+ #include <arpa/inet.h>
+ #include <unistd.h>
+ #include <errno.h>
+ #include <stdbool.h>
+ #include <stdlib.h>
+
++#define NETNS "ns_lwt_redirect"
+ #include "lwt_helpers.h"
+ #include "test_progs.h"
+ #include "network_helpers.h"
+@@ -203,6 +203,7 @@ static int setup_redirect_target(const char *target_dev, bool need_mac)
+ if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
+ goto fail;
+
++ SYS(fail, "sysctl -w net.ipv6.conf.all.disable_ipv6=1");
+ SYS(fail, "ip link add link_err type dummy");
+ SYS(fail, "ip link set lo up");
+ SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
+diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
+index f4bb2d5fcae0a0..920ee3042d0934 100644
+--- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
++++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
+@@ -48,6 +48,8 @@
+ * For case 2, force UDP packets to overflow fq limit. As long as kernel
+ * is not crashed, it is considered successful.
+ */
++#define NETNS "ns_lwt_reroute"
++#include <netinet/in.h>
+ #include "lwt_helpers.h"
+ #include "network_helpers.h"
+ #include <linux/net_tstamp.h>
+diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+index c7636e18b1ebda..aa9f67eb1c95b3 100644
+--- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
++++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
+@@ -61,6 +61,11 @@ void test_module_fentry_shadow(void)
+ int link_fd[2] = {};
+ __s32 btf_id[2] = {};
+
++ if (!env.has_testmod) {
++ test__skip();
++ return;
++ }
++
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ );
+diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
+index 24d493482ffc75..2c57ceede095eb 100644
+--- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
++++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
+@@ -11,78 +11,168 @@
+ #include <sched.h>
+ #include <sys/wait.h>
+ #include <sys/mount.h>
+-#include <sys/fcntl.h>
++#include <fcntl.h>
++#include "network_helpers.h"
+
+ #define STACK_SIZE (1024 * 1024)
+ static char child_stack[STACK_SIZE];
+
+-static int test_current_pid_tgid(void *args)
++static int get_pid_tgid(pid_t *pid, pid_t *tgid,
++ struct test_ns_current_pid_tgid__bss *bss)
+ {
+- struct test_ns_current_pid_tgid__bss *bss;
+- struct test_ns_current_pid_tgid *skel;
+- int err = -1, duration = 0;
+- pid_t tgid, pid;
+ struct stat st;
++ int err;
+
+- skel = test_ns_current_pid_tgid__open_and_load();
+- if (CHECK(!skel, "skel_open_load", "failed to load skeleton\n"))
+- goto cleanup;
+-
+- pid = syscall(SYS_gettid);
+- tgid = getpid();
++ *pid = syscall(SYS_gettid);
++ *tgid = getpid();
+
+ err = stat("/proc/self/ns/pid", &st);
+- if (CHECK(err, "stat", "failed /proc/self/ns/pid: %d\n", err))
+- goto cleanup;
++ if (!ASSERT_OK(err, "stat /proc/self/ns/pid"))
++ return err;
+
+- bss = skel->bss;
+ bss->dev = st.st_dev;
+ bss->ino = st.st_ino;
+ bss->user_pid = 0;
+ bss->user_tgid = 0;
++ return 0;
++}
++
++static int test_current_pid_tgid_tp(void *args)
++{
++ struct test_ns_current_pid_tgid__bss *bss;
++ struct test_ns_current_pid_tgid *skel;
++ int ret = -1, err;
++ pid_t tgid, pid;
++
++ skel = test_ns_current_pid_tgid__open();
++ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open"))
++ return ret;
++
++ bpf_program__set_autoload(skel->progs.tp_handler, true);
++
++ err = test_ns_current_pid_tgid__load(skel);
++ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load"))
++ goto cleanup;
++
++ bss = skel->bss;
++ if (get_pid_tgid(&pid, &tgid, bss))
++ goto cleanup;
+
+ err = test_ns_current_pid_tgid__attach(skel);
+- if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
++ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+- ASSERT_EQ(bss->user_pid, pid, "pid");
+- ASSERT_EQ(bss->user_tgid, tgid, "tgid");
+- err = 0;
++ if (!ASSERT_EQ(bss->user_pid, pid, "pid"))
++ goto cleanup;
++ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid"))
++ goto cleanup;
++ ret = 0;
+
+ cleanup:
+- test_ns_current_pid_tgid__destroy(skel);
++ test_ns_current_pid_tgid__destroy(skel);
++ return ret;
++}
+
+- return err;
++static int test_current_pid_tgid_cgrp(void *args)
++{
++ struct test_ns_current_pid_tgid__bss *bss;
++ struct test_ns_current_pid_tgid *skel;
++ int server_fd = -1, ret = -1, err;
++ int cgroup_fd = *(int *)args;
++ pid_t tgid, pid;
++
++ skel = test_ns_current_pid_tgid__open();
++ if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open"))
++ return ret;
++
++ bpf_program__set_autoload(skel->progs.cgroup_bind4, true);
++
++ err = test_ns_current_pid_tgid__load(skel);
++ if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load"))
++ goto cleanup;
++
++ bss = skel->bss;
++ if (get_pid_tgid(&pid, &tgid, bss))
++ goto cleanup;
++
++ skel->links.cgroup_bind4 = bpf_program__attach_cgroup(
++ skel->progs.cgroup_bind4, cgroup_fd);
++ if (!ASSERT_OK_PTR(skel->links.cgroup_bind4, "bpf_program__attach_cgroup"))
++ goto cleanup;
++
++ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
++ if (!ASSERT_GE(server_fd, 0, "start_server"))
++ goto cleanup;
++
++ if (!ASSERT_EQ(bss->user_pid, pid, "pid"))
++ goto cleanup;
++ if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid"))
++ goto cleanup;
++ ret = 0;
++
++cleanup:
++ if (server_fd >= 0)
++ close(server_fd);
++ test_ns_current_pid_tgid__destroy(skel);
++ return ret;
+ }
+
+-static void test_ns_current_pid_tgid_new_ns(void)
++static void test_ns_current_pid_tgid_new_ns(int (*fn)(void *), void *arg)
+ {
+- int wstatus, duration = 0;
++ int wstatus;
+ pid_t cpid;
+
+ /* Create a process in a new namespace, this process
+ * will be the init process of this new namespace hence will be pid 1.
+ */
+- cpid = clone(test_current_pid_tgid, child_stack + STACK_SIZE,
+- CLONE_NEWPID | SIGCHLD, NULL);
++ cpid = clone(fn, child_stack + STACK_SIZE,
++ CLONE_NEWPID | SIGCHLD, arg);
+
+- if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno)))
++ if (!ASSERT_NEQ(cpid, -1, "clone"))
+ return;
+
+- if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno)))
++ if (!ASSERT_NEQ(waitpid(cpid, &wstatus, 0), -1, "waitpid"))
+ return;
+
+- if (CHECK(WEXITSTATUS(wstatus) != 0, "newns_pidtgid", "failed"))
++ if (!ASSERT_OK(WEXITSTATUS(wstatus), "newns_pidtgid"))
+ return;
+ }
+
++static void test_in_netns(int (*fn)(void *), void *arg)
++{
++ struct nstoken *nstoken = NULL;
++
++ SYS(cleanup, "ip netns add ns_current_pid_tgid");
++ SYS(cleanup, "ip -net ns_current_pid_tgid link set dev lo up");
++
++ nstoken = open_netns("ns_current_pid_tgid");
++ if (!ASSERT_OK_PTR(nstoken, "open_netns"))
++ goto cleanup;
++
++ test_ns_current_pid_tgid_new_ns(fn, arg);
++
++cleanup:
++ if (nstoken)
++ close_netns(nstoken);
++ SYS_NOFAIL("ip netns del ns_current_pid_tgid");
++}
++
+ /* TODO: use a different tracepoint */
+ void serial_test_ns_current_pid_tgid(void)
+ {
+- if (test__start_subtest("ns_current_pid_tgid_root_ns"))
+- test_current_pid_tgid(NULL);
+- if (test__start_subtest("ns_current_pid_tgid_new_ns"))
+- test_ns_current_pid_tgid_new_ns();
++ if (test__start_subtest("root_ns_tp"))
++ test_current_pid_tgid_tp(NULL);
++ if (test__start_subtest("new_ns_tp"))
++ test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_tp, NULL);
++ if (test__start_subtest("new_ns_cgrp")) {
++ int cgroup_fd = -1;
++
++ cgroup_fd = test__join_cgroup("/sock_addr");
++ if (ASSERT_GE(cgroup_fd, 0, "join_cgroup")) {
++ test_in_netns(test_current_pid_tgid_cgrp, &cgroup_fd);
++ close(cgroup_fd);
++ }
++ }
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
+index daa952711d8fdf..e9c07d561ded6d 100644
+--- a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
++++ b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+
++#define _GNU_SOURCE
+ #include <test_progs.h>
+ #include <network_helpers.h>
+ #include "test_parse_tcp_hdr_opt.skel.h"
+diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
+index b15b343ebb6b12..9adcda7f1fedc6 100644
+--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
++++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
+@@ -156,7 +156,8 @@ static void test_send_signal_tracepoint(bool signal_thread)
+ static void test_send_signal_perf(bool signal_thread)
+ {
+ struct perf_event_attr attr = {
+- .sample_period = 1,
++ .freq = 1,
++ .sample_freq = 1000,
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ };
+diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+index 597d0467a92675..a1ab0af004549b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
++++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+@@ -18,7 +18,6 @@
+ #include <arpa/inet.h>
+ #include <assert.h>
+ #include <errno.h>
+-#include <error.h>
+ #include <fcntl.h>
+ #include <sched.h>
+ #include <stdio.h>
+@@ -994,7 +993,7 @@ static void drop_on_reuseport(const struct test *t)
+
+ err = update_lookup_map(t->sock_map, SERVER_A, server1);
+ if (err)
+- goto detach;
++ goto close_srv1;
+
+ /* second server on destination address we should never reach */
+ server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port,
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+index 8df8cbb447f10f..84d59419e4eb5b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+@@ -1841,7 +1841,7 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
+ if (err)
+ return;
+
+- if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd))
++ if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd))
+ goto close_cli0;
+ c1 = sfd[0], p1 = sfd[1];
+
+@@ -1876,7 +1876,6 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
+ close_cli0:
+ xclose(c0);
+ xclose(p0);
+-
+ }
+
+ static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
+diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+index 58fe2c586ed76a..09c189761926c7 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
++++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+@@ -271,11 +271,11 @@ static void test_tailcall_count(const char *which)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -352,11 +352,11 @@ static void test_tailcall_4(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -442,11 +442,11 @@ static void test_tailcall_5(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
+@@ -631,11 +631,11 @@ static void test_tailcall_bpf2bpf_2(void)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+@@ -805,11 +805,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+- return;
++ goto out;
+
+ data_fd = bpf_map__fd(data_map);
+- if (CHECK_FAIL(map_fd < 0))
+- return;
++ if (CHECK_FAIL(data_fd < 0))
++ goto out;
+
+ i = 0;
+ val.noise = noise;
+@@ -872,7 +872,7 @@ static void test_tailcall_bpf2bpf_6(void)
+ ASSERT_EQ(topts.retval, 0, "tailcall retval");
+
+ data_fd = bpf_map__fd(obj->maps.bss);
+- if (!ASSERT_GE(map_fd, 0, "bss map fd"))
++ if (!ASSERT_GE(data_fd, 0, "bss map fd"))
+ goto out;
+
+ i = 0;
+diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c
+index bc984114468556..1af9ec1149aab6 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tc_links.c
++++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c
+@@ -9,6 +9,8 @@
+ #define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
+
+ #include "test_tc_link.skel.h"
++
++#include "netlink_helpers.h"
+ #include "tc_helpers.h"
+
+ void serial_test_tc_links_basic(void)
+@@ -1787,6 +1789,65 @@ void serial_test_tc_links_ingress(void)
+ test_tc_links_ingress(BPF_TCX_INGRESS, false, false);
+ }
+
++struct qdisc_req {
++ struct nlmsghdr n;
++ struct tcmsg t;
++ char buf[1024];
++};
++
++static int qdisc_replace(int ifindex, const char *kind, bool block)
++{
++ struct rtnl_handle rth = { .fd = -1 };
++ struct qdisc_req req;
++ int err;
++
++ err = rtnl_open(&rth, 0);
++ if (!ASSERT_OK(err, "open_rtnetlink"))
++ return err;
++
++ memset(&req, 0, sizeof(req));
++ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
++ req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST;
++ req.n.nlmsg_type = RTM_NEWQDISC;
++ req.t.tcm_family = AF_UNSPEC;
++ req.t.tcm_ifindex = ifindex;
++ req.t.tcm_parent = 0xfffffff1;
++
++ addattr_l(&req.n, sizeof(req), TCA_KIND, kind, strlen(kind) + 1);
++ if (block)
++ addattr32(&req.n, sizeof(req), TCA_INGRESS_BLOCK, 1);
++
++ err = rtnl_talk(&rth, &req.n, NULL);
++ ASSERT_OK(err, "talk_rtnetlink");
++ rtnl_close(&rth);
++ return err;
++}
++
++void serial_test_tc_links_dev_chain0(void)
++{
++ int err, ifindex;
++
++ ASSERT_OK(system("ip link add dev foo type veth peer name bar"), "add veth");
++ ifindex = if_nametoindex("foo");
++ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
++ err = qdisc_replace(ifindex, "ingress", true);
++ if (!ASSERT_OK(err, "attaching ingress"))
++ goto cleanup;
++ ASSERT_OK(system("tc filter add block 1 matchall action skbmod swap mac"), "add block");
++ err = qdisc_replace(ifindex, "clsact", false);
++ if (!ASSERT_OK(err, "attaching clsact"))
++ goto cleanup;
++ /* Heuristic: kern_sync_rcu() alone does not work; a wait-time of ~5s
++ * triggered the issue without the fix reliably 100% of the time.
++ */
++ sleep(5);
++ ASSERT_OK(system("tc filter add dev foo ingress matchall action skbmod swap mac"), "add filter");
++cleanup:
++ ASSERT_OK(system("ip link del dev foo"), "del veth");
++ ASSERT_EQ(if_nametoindex("foo"), 0, "foo removed");
++ ASSERT_EQ(if_nametoindex("bar"), 0, "bar removed");
++}
++
+ static void test_tc_links_dev_mixed(int target)
+ {
+ LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
+index ca506d2fcf5886..d6fd09c2d6e645 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c
++++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
+@@ -2387,12 +2387,9 @@ static int generate_dummy_prog(void)
+ const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ const size_t log_buf_sz = 256;
+- char *log_buf;
++ char log_buf[log_buf_sz];
+ int fd = -1;
+
+- log_buf = malloc(log_buf_sz);
+- if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
+- return fd;
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_sz;
+
+@@ -2402,7 +2399,6 @@ static int generate_dummy_prog(void)
+ prog_insns, prog_insn_cnt, &opts);
+ ASSERT_STREQ(log_buf, "", "log_0");
+ ASSERT_GE(fd, 0, "prog_fd");
+- free(log_buf);
+ return fd;
+ }
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+index 6ee22c3b251ad0..5a640173358754 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
++++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+@@ -110,11 +110,16 @@ static void netns_setup_namespaces_nofail(const char *verb)
+ }
+ }
+
++enum dev_mode {
++ MODE_VETH,
++};
++
+ struct netns_setup_result {
+- int ifindex_veth_src;
+- int ifindex_veth_src_fwd;
+- int ifindex_veth_dst;
+- int ifindex_veth_dst_fwd;
++ enum dev_mode dev_mode;
++ int ifindex_src;
++ int ifindex_src_fwd;
++ int ifindex_dst;
++ int ifindex_dst_fwd;
+ };
+
+ static int get_ifaddr(const char *name, char *ifaddr)
+@@ -140,55 +145,63 @@ static int get_ifaddr(const char *name, char *ifaddr)
+ static int netns_setup_links_and_routes(struct netns_setup_result *result)
+ {
+ struct nstoken *nstoken = NULL;
+- char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {};
++ char src_fwd_addr[IFADDR_STR_LEN+1] = {};
++ char src_addr[IFADDR_STR_LEN + 1] = {};
+
+- SYS(fail, "ip link add veth_src type veth peer name veth_src_fwd");
+- SYS(fail, "ip link add veth_dst type veth peer name veth_dst_fwd");
++ if (result->dev_mode == MODE_VETH) {
++ SYS(fail, "ip link add src type veth peer name src_fwd");
++ SYS(fail, "ip link add dst type veth peer name dst_fwd");
+
+- SYS(fail, "ip link set veth_dst_fwd address " MAC_DST_FWD);
+- SYS(fail, "ip link set veth_dst address " MAC_DST);
++ SYS(fail, "ip link set dst_fwd address " MAC_DST_FWD);
++ SYS(fail, "ip link set dst address " MAC_DST);
++ }
+
+- if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr))
++ if (get_ifaddr("src_fwd", src_fwd_addr))
+ goto fail;
+
+- result->ifindex_veth_src = if_nametoindex("veth_src");
+- if (!ASSERT_GT(result->ifindex_veth_src, 0, "ifindex_veth_src"))
++ if (get_ifaddr("src", src_addr))
+ goto fail;
+
+- result->ifindex_veth_src_fwd = if_nametoindex("veth_src_fwd");
+- if (!ASSERT_GT(result->ifindex_veth_src_fwd, 0, "ifindex_veth_src_fwd"))
++ result->ifindex_src = if_nametoindex("src");
++ if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src"))
+ goto fail;
+
+- result->ifindex_veth_dst = if_nametoindex("veth_dst");
+- if (!ASSERT_GT(result->ifindex_veth_dst, 0, "ifindex_veth_dst"))
++ result->ifindex_src_fwd = if_nametoindex("src_fwd");
++ if (!ASSERT_GT(result->ifindex_src_fwd, 0, "ifindex_src_fwd"))
+ goto fail;
+
+- result->ifindex_veth_dst_fwd = if_nametoindex("veth_dst_fwd");
+- if (!ASSERT_GT(result->ifindex_veth_dst_fwd, 0, "ifindex_veth_dst_fwd"))
++ result->ifindex_dst = if_nametoindex("dst");
++ if (!ASSERT_GT(result->ifindex_dst, 0, "ifindex_dst"))
+ goto fail;
+
+- SYS(fail, "ip link set veth_src netns " NS_SRC);
+- SYS(fail, "ip link set veth_src_fwd netns " NS_FWD);
+- SYS(fail, "ip link set veth_dst_fwd netns " NS_FWD);
+- SYS(fail, "ip link set veth_dst netns " NS_DST);
++ result->ifindex_dst_fwd = if_nametoindex("dst_fwd");
++ if (!ASSERT_GT(result->ifindex_dst_fwd, 0, "ifindex_dst_fwd"))
++ goto fail;
++
++ SYS(fail, "ip link set src netns " NS_SRC);
++ SYS(fail, "ip link set src_fwd netns " NS_FWD);
++ SYS(fail, "ip link set dst_fwd netns " NS_FWD);
++ SYS(fail, "ip link set dst netns " NS_DST);
+
+ /** setup in 'src' namespace */
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto fail;
+
+- SYS(fail, "ip addr add " IP4_SRC "/32 dev veth_src");
+- SYS(fail, "ip addr add " IP6_SRC "/128 dev veth_src nodad");
+- SYS(fail, "ip link set dev veth_src up");
++ SYS(fail, "ip addr add " IP4_SRC "/32 dev src");
++ SYS(fail, "ip addr add " IP6_SRC "/128 dev src nodad");
++ SYS(fail, "ip link set dev src up");
+
+- SYS(fail, "ip route add " IP4_DST "/32 dev veth_src scope global");
+- SYS(fail, "ip route add " IP4_NET "/16 dev veth_src scope global");
+- SYS(fail, "ip route add " IP6_DST "/128 dev veth_src scope global");
++ SYS(fail, "ip route add " IP4_DST "/32 dev src scope global");
++ SYS(fail, "ip route add " IP4_NET "/16 dev src scope global");
++ SYS(fail, "ip route add " IP6_DST "/128 dev src scope global");
+
+- SYS(fail, "ip neigh add " IP4_DST " dev veth_src lladdr %s",
+- veth_src_fwd_addr);
+- SYS(fail, "ip neigh add " IP6_DST " dev veth_src lladdr %s",
+- veth_src_fwd_addr);
++ if (result->dev_mode == MODE_VETH) {
++ SYS(fail, "ip neigh add " IP4_DST " dev src lladdr %s",
++ src_fwd_addr);
++ SYS(fail, "ip neigh add " IP6_DST " dev src lladdr %s",
++ src_fwd_addr);
++ }
+
+ close_netns(nstoken);
+
+@@ -201,15 +214,22 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
+ * needs v4 one in order to start ARP probing. IP4_NET route is added
+ * to the endpoints so that the ARP processing will reply.
+ */
+- SYS(fail, "ip addr add " IP4_SLL "/32 dev veth_src_fwd");
+- SYS(fail, "ip addr add " IP4_DLL "/32 dev veth_dst_fwd");
+- SYS(fail, "ip link set dev veth_src_fwd up");
+- SYS(fail, "ip link set dev veth_dst_fwd up");
+-
+- SYS(fail, "ip route add " IP4_SRC "/32 dev veth_src_fwd scope global");
+- SYS(fail, "ip route add " IP6_SRC "/128 dev veth_src_fwd scope global");
+- SYS(fail, "ip route add " IP4_DST "/32 dev veth_dst_fwd scope global");
+- SYS(fail, "ip route add " IP6_DST "/128 dev veth_dst_fwd scope global");
++ SYS(fail, "ip addr add " IP4_SLL "/32 dev src_fwd");
++ SYS(fail, "ip addr add " IP4_DLL "/32 dev dst_fwd");
++ SYS(fail, "ip link set dev src_fwd up");
++ SYS(fail, "ip link set dev dst_fwd up");
++
++ SYS(fail, "ip route add " IP4_SRC "/32 dev src_fwd scope global");
++ SYS(fail, "ip route add " IP6_SRC "/128 dev src_fwd scope global");
++ SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global");
++ SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global");
++
++ if (result->dev_mode == MODE_VETH) {
++ SYS(fail, "ip neigh add " IP4_SRC " dev src_fwd lladdr %s", src_addr);
++ SYS(fail, "ip neigh add " IP6_SRC " dev src_fwd lladdr %s", src_addr);
++ SYS(fail, "ip neigh add " IP4_DST " dev dst_fwd lladdr %s", MAC_DST);
++ SYS(fail, "ip neigh add " IP6_DST " dev dst_fwd lladdr %s", MAC_DST);
++ }
+
+ close_netns(nstoken);
+
+@@ -218,16 +238,19 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
+ if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+ goto fail;
+
+- SYS(fail, "ip addr add " IP4_DST "/32 dev veth_dst");
+- SYS(fail, "ip addr add " IP6_DST "/128 dev veth_dst nodad");
+- SYS(fail, "ip link set dev veth_dst up");
++ SYS(fail, "ip addr add " IP4_DST "/32 dev dst");
++ SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad");
++ SYS(fail, "ip link set dev dst up");
++ SYS(fail, "ip link set dev lo up");
+
+- SYS(fail, "ip route add " IP4_SRC "/32 dev veth_dst scope global");
+- SYS(fail, "ip route add " IP4_NET "/16 dev veth_dst scope global");
+- SYS(fail, "ip route add " IP6_SRC "/128 dev veth_dst scope global");
++ SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global");
++ SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global");
++ SYS(fail, "ip route add " IP6_SRC "/128 dev dst scope global");
+
+- SYS(fail, "ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+- SYS(fail, "ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD);
++ if (result->dev_mode == MODE_VETH) {
++ SYS(fail, "ip neigh add " IP4_SRC " dev dst lladdr " MAC_DST_FWD);
++ SYS(fail, "ip neigh add " IP6_SRC " dev dst lladdr " MAC_DST_FWD);
++ }
+
+ close_netns(nstoken);
+
+@@ -293,23 +316,23 @@ static int netns_load_bpf(const struct bpf_program *src_prog,
+ const struct bpf_program *chk_prog,
+ const struct netns_setup_result *setup_result)
+ {
+- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd);
+- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
++ LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd);
++ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
+ int err;
+
+- /* tc qdisc add dev veth_src_fwd clsact */
+- QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd);
+- /* tc filter add dev veth_src_fwd ingress bpf da src_prog */
+- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS, src_prog, 0);
+- /* tc filter add dev veth_src_fwd egress bpf da chk_prog */
+- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS, chk_prog, 0);
++ /* tc qdisc add dev src_fwd clsact */
++ QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd);
++ /* tc filter add dev src_fwd ingress bpf da src_prog */
++ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS, src_prog, 0);
++ /* tc filter add dev src_fwd egress bpf da chk_prog */
++ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS, chk_prog, 0);
+
+- /* tc qdisc add dev veth_dst_fwd clsact */
+- QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
+- /* tc filter add dev veth_dst_fwd ingress bpf da dst_prog */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, dst_prog, 0);
+- /* tc filter add dev veth_dst_fwd egress bpf da chk_prog */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, chk_prog, 0);
++ /* tc qdisc add dev dst_fwd clsact */
++ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
++ /* tc filter add dev dst_fwd ingress bpf da dst_prog */
++ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, dst_prog, 0);
++ /* tc filter add dev dst_fwd egress bpf da chk_prog */
++ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, chk_prog, 0);
+
+ return 0;
+ fail:
+@@ -396,9 +419,9 @@ static int set_forwarding(bool enable)
+ return 0;
+ }
+
+-static void rcv_tstamp(int fd, const char *expected, size_t s)
++static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
+ {
+- struct __kernel_timespec pkt_ts = {};
++ struct timespec pkt_ts = {};
+ char ctl[CMSG_SPACE(sizeof(pkt_ts))];
+ struct timespec now_ts;
+ struct msghdr msg = {};
+@@ -417,15 +440,21 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
+
+ ret = recvmsg(fd, &msg, 0);
+ if (!ASSERT_EQ(ret, s, "recvmsg"))
+- return;
++ return -1;
+ ASSERT_STRNEQ(data, expected, s, "expected rcv data");
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (cmsg && cmsg->cmsg_level == SOL_SOCKET &&
+- cmsg->cmsg_type == SO_TIMESTAMPNS_NEW)
++ cmsg->cmsg_type == SO_TIMESTAMPNS)
+ memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
+
+ pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec;
++ if (tstamp) {
++ /* caller will check the tstamp itself */
++ *tstamp = pkt_ns;
++ return 0;
++ }
++
+ ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp");
+
+ ret = clock_gettime(CLOCK_REALTIME, &now_ts);
+@@ -435,6 +464,60 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
+ if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp"))
+ ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC,
+ "check rcv tstamp");
++ return 0;
++}
++
++static void rcv_tstamp(int fd, const char *expected, size_t s)
++{
++ __rcv_tstamp(fd, expected, s, NULL);
++}
++
++static int wait_netstamp_needed_key(void)
++{
++ int opt = 1, srv_fd = -1, cli_fd = -1, nretries = 0, err, n;
++ char buf[] = "testing testing";
++ struct nstoken *nstoken;
++ __u64 tstamp = 0;
++
++ nstoken = open_netns(NS_DST);
++ if (!nstoken)
++ return -1;
++
++ srv_fd = start_server(AF_INET6, SOCK_DGRAM, "::1", 0, 0);
++ if (!ASSERT_GE(srv_fd, 0, "start_server"))
++ goto done;
++
++ err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS,
++ &opt, sizeof(opt));
++ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)"))
++ goto done;
++
++ cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS);
++ if (!ASSERT_GE(cli_fd, 0, "connect_to_fd"))
++ goto done;
++
++again:
++ n = write(cli_fd, buf, sizeof(buf));
++ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
++ goto done;
++ err = __rcv_tstamp(srv_fd, buf, sizeof(buf), &tstamp);
++ if (!ASSERT_OK(err, "__rcv_tstamp"))
++ goto done;
++ if (!tstamp && nretries++ < 5) {
++ sleep(1);
++ printf("netstamp_needed_key retry#%d\n", nretries);
++ goto again;
++ }
++
++done:
++ if (!tstamp && srv_fd != -1) {
++ close(srv_fd);
++ srv_fd = -1;
++ }
++ if (cli_fd != -1)
++ close(cli_fd);
++ close_netns(nstoken);
++ return srv_fd;
+ }
+
+ static void snd_tstamp(int fd, char *b, size_t s)
+@@ -488,9 +571,9 @@ static void test_inet_dtime(int family, int type, const char *addr, __u16 port)
+ return;
+
+ /* Ensure the kernel puts the (rcv) timestamp for all skb */
+- err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
++ err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS,
+ &opt, sizeof(opt));
+- if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)"))
++ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)"))
+ goto done;
+
+ if (type == SOCK_STREAM) {
+@@ -539,10 +622,10 @@ static void test_inet_dtime(int family, int type, const char *addr, __u16 port)
+ static int netns_load_dtime_bpf(struct test_tc_dtime *skel,
+ const struct netns_setup_result *setup_result)
+ {
+- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src_fwd);
+- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
+- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_src);
+- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst);
++ LIBBPF_OPTS(bpf_tc_hook, qdisc_src_fwd);
++ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
++ LIBBPF_OPTS(bpf_tc_hook, qdisc_src);
++ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst);
+ struct nstoken *nstoken;
+ int err;
+
+@@ -550,58 +633,58 @@ static int netns_load_dtime_bpf(struct test_tc_dtime *skel,
+ nstoken = open_netns(NS_SRC);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
+ return -1;
+- /* tc qdisc add dev veth_src clsact */
+- QDISC_CLSACT_CREATE(&qdisc_veth_src, setup_result->ifindex_veth_src);
+- /* tc filter add dev veth_src ingress bpf da ingress_host */
+- XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
+- /* tc filter add dev veth_src egress bpf da egress_host */
+- XGRESS_FILTER_ADD(&qdisc_veth_src, BPF_TC_EGRESS, skel->progs.egress_host, 0);
++ /* tc qdisc add dev src clsact */
++ QDISC_CLSACT_CREATE(&qdisc_src, setup_result->ifindex_src);
++ /* tc filter add dev src ingress bpf da ingress_host */
++ XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
++ /* tc filter add dev src egress bpf da egress_host */
++ XGRESS_FILTER_ADD(&qdisc_src, BPF_TC_EGRESS, skel->progs.egress_host, 0);
+ close_netns(nstoken);
+
+ /* setup ns_dst tc progs */
+ nstoken = open_netns(NS_DST);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST))
+ return -1;
+- /* tc qdisc add dev veth_dst clsact */
+- QDISC_CLSACT_CREATE(&qdisc_veth_dst, setup_result->ifindex_veth_dst);
+- /* tc filter add dev veth_dst ingress bpf da ingress_host */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
+- /* tc filter add dev veth_dst egress bpf da egress_host */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0);
++ /* tc qdisc add dev dst clsact */
++ QDISC_CLSACT_CREATE(&qdisc_dst, setup_result->ifindex_dst);
++ /* tc filter add dev dst ingress bpf da ingress_host */
++ XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_INGRESS, skel->progs.ingress_host, 0);
++ /* tc filter add dev dst egress bpf da egress_host */
++ XGRESS_FILTER_ADD(&qdisc_dst, BPF_TC_EGRESS, skel->progs.egress_host, 0);
+ close_netns(nstoken);
+
+ /* setup ns_fwd tc progs */
+ nstoken = open_netns(NS_FWD);
+ if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
+ return -1;
+- /* tc qdisc add dev veth_dst_fwd clsact */
+- QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
+- /* tc filter add dev veth_dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS,
++ /* tc qdisc add dev dst_fwd clsact */
++ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
++ /* tc filter add dev dst_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
++ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS,
+ skel->progs.ingress_fwdns_prio100, 100);
+- /* tc filter add dev veth_dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS,
++ /* tc filter add dev dst_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
++ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS,
+ skel->progs.ingress_fwdns_prio101, 101);
+- /* tc filter add dev veth_dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS,
++ /* tc filter add dev dst_fwd egress prio 100 bpf da egress_fwdns_prio100 */
++ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS,
+ skel->progs.egress_fwdns_prio100, 100);
+- /* tc filter add dev veth_dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS,
++ /* tc filter add dev dst_fwd egress prio 101 bpf da egress_fwdns_prio101 */
++ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS,
+ skel->progs.egress_fwdns_prio101, 101);
+
+- /* tc qdisc add dev veth_src_fwd clsact */
+- QDISC_CLSACT_CREATE(&qdisc_veth_src_fwd, setup_result->ifindex_veth_src_fwd);
+- /* tc filter add dev veth_src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
+- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS,
++ /* tc qdisc add dev src_fwd clsact */
++ QDISC_CLSACT_CREATE(&qdisc_src_fwd, setup_result->ifindex_src_fwd);
++ /* tc filter add dev src_fwd ingress prio 100 bpf da ingress_fwdns_prio100 */
++ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS,
+ skel->progs.ingress_fwdns_prio100, 100);
+- /* tc filter add dev veth_src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
+- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_INGRESS,
++ /* tc filter add dev src_fwd ingress prio 101 bpf da ingress_fwdns_prio101 */
++ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_INGRESS,
+ skel->progs.ingress_fwdns_prio101, 101);
+- /* tc filter add dev veth_src_fwd egress prio 100 bpf da egress_fwdns_prio100 */
+- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS,
++ /* tc filter add dev src_fwd egress prio 100 bpf da egress_fwdns_prio100 */
++ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS,
+ skel->progs.egress_fwdns_prio100, 100);
+- /* tc filter add dev veth_src_fwd egress prio 101 bpf da egress_fwdns_prio101 */
+- XGRESS_FILTER_ADD(&qdisc_veth_src_fwd, BPF_TC_EGRESS,
++ /* tc filter add dev src_fwd egress prio 101 bpf da egress_fwdns_prio101 */
++ XGRESS_FILTER_ADD(&qdisc_src_fwd, BPF_TC_EGRESS,
+ skel->progs.egress_fwdns_prio101, 101);
+ close_netns(nstoken);
+ return 0;
+@@ -771,14 +854,23 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
+ {
+ struct test_tc_dtime *skel;
+ struct nstoken *nstoken;
+- int err;
++ int hold_tstamp_fd, err;
++
++ /* Hold a sk with the SOCK_TIMESTAMP set to ensure there
++ * is no delay in the kernel net_enable_timestamp().
++ * This ensures the following tests must have
++ * non zero rcv tstamp in the recvmsg().
++ */
++ hold_tstamp_fd = wait_netstamp_needed_key();
++ if (!ASSERT_GE(hold_tstamp_fd, 0, "wait_netstamp_needed_key"))
++ return;
+
+ skel = test_tc_dtime__open();
+ if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open"))
+- return;
++ goto done;
+
+- skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+- skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
++ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
++ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
+
+ err = test_tc_dtime__load(skel);
+ if (!ASSERT_OK(err, "test_tc_dtime__load"))
+@@ -820,6 +912,7 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
+
+ done:
+ test_tc_dtime__destroy(skel);
++ close(hold_tstamp_fd);
+ }
+
+ static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
+@@ -868,8 +961,8 @@ static void test_tc_redirect_neigh(struct netns_setup_result *setup_result)
+ if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open"))
+ goto done;
+
+- skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+- skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
++ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
++ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
+
+ err = test_tc_neigh__load(skel);
+ if (!ASSERT_OK(err, "test_tc_neigh__load"))
+@@ -904,8 +997,8 @@ static void test_tc_redirect_peer(struct netns_setup_result *setup_result)
+ if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+ goto done;
+
+- skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+- skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
++ skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
++ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
+
+ err = test_tc_peer__load(skel);
+ if (!ASSERT_OK(err, "test_tc_peer__load"))
+@@ -996,7 +1089,7 @@ static int tun_relay_loop(int src_fd, int target_fd)
+ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+ {
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_tun_fwd);
+- LIBBPF_OPTS(bpf_tc_hook, qdisc_veth_dst_fwd);
++ LIBBPF_OPTS(bpf_tc_hook, qdisc_dst_fwd);
+ struct test_tc_peer *skel = NULL;
+ struct nstoken *nstoken = NULL;
+ int err;
+@@ -1045,7 +1138,7 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+ goto fail;
+
+ skel->rodata->IFINDEX_SRC = ifindex;
+- skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
++ skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
+
+ err = test_tc_peer__load(skel);
+ if (!ASSERT_OK(err, "test_tc_peer__load"))
+@@ -1053,19 +1146,19 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+
+ /* Load "tc_src_l3" to the tun_fwd interface to redirect packets
+ * towards dst, and "tc_dst" to redirect packets
+- * and "tc_chk" on veth_dst_fwd to drop non-redirected packets.
++ * and "tc_chk" on dst_fwd to drop non-redirected packets.
+ */
+ /* tc qdisc add dev tun_fwd clsact */
+ QDISC_CLSACT_CREATE(&qdisc_tun_fwd, ifindex);
+ /* tc filter add dev tun_fwd ingress bpf da tc_src_l3 */
+ XGRESS_FILTER_ADD(&qdisc_tun_fwd, BPF_TC_INGRESS, skel->progs.tc_src_l3, 0);
+
+- /* tc qdisc add dev veth_dst_fwd clsact */
+- QDISC_CLSACT_CREATE(&qdisc_veth_dst_fwd, setup_result->ifindex_veth_dst_fwd);
+- /* tc filter add dev veth_dst_fwd ingress bpf da tc_dst_l3 */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0);
+- /* tc filter add dev veth_dst_fwd egress bpf da tc_chk */
+- XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0);
++ /* tc qdisc add dev dst_fwd clsact */
++ QDISC_CLSACT_CREATE(&qdisc_dst_fwd, setup_result->ifindex_dst_fwd);
++ /* tc filter add dev dst_fwd ingress bpf da tc_dst_l3 */
++ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_INGRESS, skel->progs.tc_dst_l3, 0);
++ /* tc filter add dev dst_fwd egress bpf da tc_chk */
++ XGRESS_FILTER_ADD(&qdisc_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0);
+
+ /* Setup route and neigh tables */
+ SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24");
+@@ -1074,17 +1167,17 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+ SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad");
+ SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad");
+
+- SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global");
++ SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev src scope global");
+ SYS(fail, "ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD
+ " dev tun_src scope global");
+- SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global");
+- SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global");
++ SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev dst scope global");
++ SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev src scope global");
+ SYS(fail, "ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD
+ " dev tun_src scope global");
+- SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global");
++ SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev dst scope global");
+
+- SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+- SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
++ SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev dst lladdr " MAC_DST_FWD);
++ SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev dst lladdr " MAC_DST_FWD);
+
+ if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+ goto fail;
+@@ -1106,9 +1199,9 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+ close_netns(nstoken);
+ }
+
+-#define RUN_TEST(name) \
++#define RUN_TEST(name, mode) \
+ ({ \
+- struct netns_setup_result setup_result; \
++ struct netns_setup_result setup_result = { .dev_mode = mode, }; \
+ if (test__start_subtest(#name)) \
+ if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \
+ if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \
+@@ -1122,11 +1215,11 @@ static void *test_tc_redirect_run_tests(void *arg)
+ {
+ netns_setup_namespaces_nofail("delete");
+
+- RUN_TEST(tc_redirect_peer);
+- RUN_TEST(tc_redirect_peer_l3);
+- RUN_TEST(tc_redirect_neigh);
+- RUN_TEST(tc_redirect_neigh_fib);
+- RUN_TEST(tc_redirect_dtime);
++ RUN_TEST(tc_redirect_peer, MODE_VETH);
++ RUN_TEST(tc_redirect_peer_l3, MODE_VETH);
++ RUN_TEST(tc_redirect_neigh, MODE_VETH);
++ RUN_TEST(tc_redirect_neigh_fib, MODE_VETH);
++ RUN_TEST(tc_redirect_dtime, MODE_VETH);
+ return NULL;
+ }
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
+index 8fe84da1b9b49b..6a2da7a64419ae 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
++++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
+@@ -1,4 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
++#define _GNU_SOURCE
+ #include <test_progs.h>
+ #include "cgroup_helpers.h"
+ #include "network_helpers.h"
+diff --git a/tools/testing/selftests/bpf/prog_tests/time_tai.c b/tools/testing/selftests/bpf/prog_tests/time_tai.c
+index a311198236661b..f45af1b0ef2c44 100644
+--- a/tools/testing/selftests/bpf/prog_tests/time_tai.c
++++ b/tools/testing/selftests/bpf/prog_tests/time_tai.c
+@@ -56,7 +56,7 @@ void test_time_tai(void)
+ ASSERT_NEQ(ts2, 0, "tai_ts2");
+
+ /* TAI is moving forward only */
+- ASSERT_GT(ts2, ts1, "tai_forward");
++ ASSERT_GE(ts2, ts1, "tai_forward");
+
+ /* Check for future */
+ ret = clock_gettime(CLOCK_TAI, &now_tai);
+diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
+index e51721df14fc19..dfff6feac12c3c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
++++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
+@@ -4,6 +4,7 @@
+ #define _GNU_SOURCE
+ #include <linux/compiler.h>
+ #include <linux/ring_buffer.h>
++#include <linux/build_bug.h>
+ #include <pthread.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
+index e3e68c97b40cfd..e51d11c36a211e 100644
+--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
++++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
+@@ -31,6 +31,7 @@
+ #include "verifier_helper_restricted.skel.h"
+ #include "verifier_helper_value_access.skel.h"
+ #include "verifier_int_ptr.skel.h"
++#include "verifier_iterating_callbacks.skel.h"
+ #include "verifier_jeq_infer_not_null.skel.h"
+ #include "verifier_ld_ind.skel.h"
+ #include "verifier_ldsx.skel.h"
+@@ -138,6 +139,7 @@ void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_acces
+ void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
+ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); }
+ void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
++void test_verifier_iterating_callbacks(void) { RUN(verifier_iterating_callbacks); }
+ void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
+ void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
+ void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+index f09505f8b0386c..53d6ad8c2257eb 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+@@ -222,7 +222,7 @@ static void test_xdp_adjust_frags_tail_grow(void)
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (bpf_object__load(obj))
+- return;
++ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
+index c3b45745cbccd7..6d8b54124cb359 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
+@@ -511,7 +511,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+- if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
++ if (!ASSERT_EQ(query_opts.feature_flags, 0,
+ "bond query_opts.feature_flags"))
+ goto out;
+
+@@ -601,7 +601,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)
+ if (!ASSERT_OK(err, "bond bpf_xdp_query"))
+ goto out;
+
+- ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
++ ASSERT_EQ(query_opts.feature_flags, 0,
+ "bond query_opts.feature_flags");
+ out:
+ bpf_link__destroy(link);
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+index 498d3bdaa4b0b5..bad0ea167be701 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+@@ -107,8 +107,8 @@ void test_xdp_do_redirect(void)
+ .attach_point = BPF_TC_INGRESS);
+
+ memcpy(&data[sizeof(__u64)], &pkt_udp, sizeof(pkt_udp));
+- *((__u32 *)data) = 0x42; /* metadata test value */
+- *((__u32 *)data + 4) = 0;
++ ((__u32 *)data)[0] = 0x42; /* metadata test value */
++ ((__u32 *)data)[1] = 0;
+
+ skel = test_xdp_do_redirect__open();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
+index e4bfbba6c19360..c8ec0d0368e4a1 100644
+--- a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
++++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
+@@ -61,14 +61,15 @@ SEC("lsm.s/socket_post_create")
+ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+ {
++ struct sock *sk = sock->sk;
+ struct storage *stg;
+ __u32 pid;
+
+ pid = bpf_get_current_pid_tgid() >> 32;
+- if (pid != bench_pid)
++ if (pid != bench_pid || !sk)
+ return 0;
+
+- stg = bpf_sk_storage_get(&sk_storage_map, sock->sk, NULL,
++ stg = bpf_sk_storage_get(&sk_storage_map, sk, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+ if (stg)
+diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+index f2b8167b72a84e..442f4ca39fd761 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
++++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
+@@ -35,6 +35,8 @@ int dump_task_stack(struct bpf_iter__task *ctx)
+ return 0;
+ }
+
++int num_user_stacks = 0;
++
+ SEC("iter/task")
+ int get_task_user_stacks(struct bpf_iter__task *ctx)
+ {
+@@ -51,6 +53,9 @@ int get_task_user_stacks(struct bpf_iter__task *ctx)
+ if (res <= 0)
+ return 0;
+
++ /* Only one task, the current one, should succeed */
++ ++num_user_stacks;
++
+ buf_sz += res;
+
+ /* If the verifier doesn't refine bpf_get_task_stack res, and instead
+diff --git a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
+index 4ce76eb064c41c..d461746fd3c1e7 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
++++ b/tools/testing/selftests/bpf/progs/bpf_loop_bench.c
+@@ -15,13 +15,16 @@ static int empty_callback(__u32 index, void *data)
+ return 0;
+ }
+
++static int outer_loop(__u32 index, void *data)
++{
++ bpf_loop(nr_loops, empty_callback, NULL, 0);
++ __sync_add_and_fetch(&hits, nr_loops);
++ return 0;
++}
++
+ SEC("fentry/" SYS_PREFIX "sys_getpgid")
+ int benchmark(void *ctx)
+ {
+- for (int i = 0; i < 1000; i++) {
+- bpf_loop(nr_loops, empty_callback, NULL, 0);
+-
+- __sync_add_and_fetch(&hits, nr_loops);
+- }
++ bpf_loop(1000, outer_loop, NULL, 0);
+ return 0;
+ }
+diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
+index 38a57a2e70dbe1..799fff4995d870 100644
+--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
++++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
+@@ -99,6 +99,9 @@
+ #elif defined(__TARGET_ARCH_arm64)
+ #define SYSCALL_WRAPPER 1
+ #define SYS_PREFIX "__arm64_"
++#elif defined(__TARGET_ARCH_riscv)
++#define SYSCALL_WRAPPER 1
++#define SYS_PREFIX "__riscv_"
+ #else
+ #define SYSCALL_WRAPPER 0
+ #define SYS_PREFIX "__se_"
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
+index ba97165bdb2822..a657651eba523e 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c
+@@ -14,9 +14,9 @@ typedef int *ptr_arr_t[6];
+
+ typedef int *ptr_multiarr_t[7][8][9][10];
+
+-typedef int * (*fn_ptr_arr_t[11])();
++typedef int * (*fn_ptr_arr_t[11])(void);
+
+-typedef int * (*fn_ptr_multiarr_t[12][13])();
++typedef int * (*fn_ptr_multiarr_t[12][13])(void);
+
+ struct root_struct {
+ arr_t _1;
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+index ad21ee8c7e2345..29d01fff32bd22 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+@@ -100,7 +100,7 @@ typedef void (*printf_fn_t)(const char *, ...);
+ * `int -> char *` function and returns pointer to a char. Equivalent:
+ * typedef char * (*fn_input_t)(int);
+ * typedef char * (*fn_output_outer_t)(fn_input_t);
+- * typedef const fn_output_outer_t (* fn_output_inner_t)();
++ * typedef const fn_output_outer_t (* fn_output_inner_t)(void);
+ * typedef const fn_output_inner_t fn_ptr_arr2_t[5];
+ */
+ /* ----- START-EXPECTED-OUTPUT ----- */
+@@ -127,7 +127,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int);
+
+ typedef char * (*fn_ptr_arr1_t[10])(int **);
+
+-typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int));
++typedef char * (* (* const fn_ptr_arr2_t[5])(void))(char * (*)(int));
+
+ struct struct_w_typedefs {
+ int_t a;
+diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c
+index 76d661b20e87d0..56c764df819679 100644
+--- a/tools/testing/selftests/bpf/progs/cb_refs.c
++++ b/tools/testing/selftests/bpf/progs/cb_refs.c
+@@ -33,6 +33,7 @@ int underflow_prog(void *ctx)
+ if (!p)
+ return 0;
+ bpf_for_each_map_elem(&array_map, cb1, &p, 0);
++ bpf_kfunc_call_test_release(p);
+ return 0;
+ }
+
+diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h
+index a0778fe7857a14..41d59f0ee606c7 100644
+--- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h
++++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h
+@@ -3,8 +3,6 @@
+ #ifndef __PROGS_CG_STORAGE_MULTI_H
+ #define __PROGS_CG_STORAGE_MULTI_H
+
+-#include <asm/types.h>
+-
+ struct cgroup_value {
+ __u32 egress_pkts;
+ __u32 ingress_pkts;
+diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c
+index a9bf6ea336cf6b..a988d2823b5285 100644
+--- a/tools/testing/selftests/bpf/progs/cpumask_failure.c
++++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c
+@@ -61,11 +61,8 @@ SEC("tp_btf/task_newtask")
+ __failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
+ int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
+ {
+- struct bpf_cpumask *cpumask;
+-
+ /* Can't set the CPU of a non-struct bpf_cpumask. */
+ bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
+- __sink(cpumask);
+
+ return 0;
+ }
+diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
+index 1efa746c25dc77..ec0c595d47af84 100644
+--- a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
++++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
+@@ -11,8 +11,17 @@ int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
+ {
+ int ret;
+
+- if (!state)
+- return 0xf2f3f4f5;
++ /* Check that 'state' nullable status is detected correctly.
++ * If 'state' argument would be assumed non-null by verifier
++ * the code below would be deleted as dead (which it shouldn't).
++ * Hide it from the compiler behind 'asm' block to avoid
++ * unnecessary optimizations.
++ */
++ asm volatile (
++ "if %[state] != 0 goto +2;"
++ "r0 = 0xf2f3f4f5;"
++ "exit;"
++ ::[state]"p"(state));
+
+ ret = state->val;
+ state->val = 0x5a;
+@@ -25,7 +34,7 @@ SEC("struct_ops/test_2")
+ int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ char a3, unsigned long a4)
+ {
+- test_2_args[0] = (unsigned long)state;
++ test_2_args[0] = state->val;
+ test_2_args[1] = a1;
+ test_2_args[2] = a2;
+ test_2_args[3] = a3;
+diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
+index 7ce7e827d5f01c..66a60bfb58672f 100644
+--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
++++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
+@@ -80,7 +80,7 @@ SEC("?raw_tp")
+ __failure __msg("Unreleased reference id=2")
+ int ringbuf_missing_release1(void *ctx)
+ {
+- struct bpf_dynptr ptr;
++ struct bpf_dynptr ptr = {};
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+@@ -1385,7 +1385,7 @@ SEC("?raw_tp")
+ __failure __msg("Expected an initialized dynptr as arg #1")
+ int dynptr_adjust_invalid(void *ctx)
+ {
+- struct bpf_dynptr ptr;
++ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_adjust(&ptr, 1, 2);
+@@ -1398,7 +1398,7 @@ SEC("?raw_tp")
+ __failure __msg("Expected an initialized dynptr as arg #1")
+ int dynptr_is_null_invalid(void *ctx)
+ {
+- struct bpf_dynptr ptr;
++ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_is_null(&ptr);
+@@ -1411,7 +1411,7 @@ SEC("?raw_tp")
+ __failure __msg("Expected an initialized dynptr as arg #1")
+ int dynptr_is_rdonly_invalid(void *ctx)
+ {
+- struct bpf_dynptr ptr;
++ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_is_rdonly(&ptr);
+@@ -1424,7 +1424,7 @@ SEC("?raw_tp")
+ __failure __msg("Expected an initialized dynptr as arg #1")
+ int dynptr_size_invalid(void *ctx)
+ {
+- struct bpf_dynptr ptr;
++ struct bpf_dynptr ptr = {};
+
+ /* this should fail */
+ bpf_dynptr_size(&ptr);
+@@ -1437,7 +1437,7 @@ SEC("?raw_tp")
+ __failure __msg("Expected an initialized dynptr as arg #1")
+ int clone_invalid1(void *ctx)
+ {
+- struct bpf_dynptr ptr1;
++ struct bpf_dynptr ptr1 = {};
+ struct bpf_dynptr ptr2;
+
+ /* this should fail */
+diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
+index 6b9b3c56f009a3..5685c2810fe530 100644
+--- a/tools/testing/selftests/bpf/progs/iters.c
++++ b/tools/testing/selftests/bpf/progs/iters.c
+@@ -14,6 +14,13 @@ int my_pid;
+ int arr[256];
+ int small_arr[16] SEC(".data.small_arr");
+
++struct {
++ __uint(type, BPF_MAP_TYPE_HASH);
++ __uint(max_entries, 10);
++ __type(key, int);
++ __type(value, int);
++} amap SEC(".maps");
++
+ #ifdef REAL_TEST
+ #define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0
+ #else
+@@ -716,4 +723,746 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx)
+ return 0;
+ }
+
++SEC("?raw_tp")
++__failure
++__msg("R1 type=scalar expected=fp")
++__naked int delayed_read_mark(void)
++{
++ /* This is equivalent to C program below.
++ * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead.
++ * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch.
++ * At this point iterator next() call is reached with r7 that has no read mark.
++ * Loop body with r7=0xdead would only be visited if verifier would decide to continue
++ * with second loop iteration. Absence of read mark on r7 might affect state
++ * equivalent logic used for iterator convergence tracking.
++ *
++ * r7 = &fp[-16]
++ * fp[-16] = 0
++ * r6 = bpf_get_prandom_u32()
++ * bpf_iter_num_new(&fp[-8], 0, 10)
++ * while (bpf_iter_num_next(&fp[-8])) {
++ * r6++
++ * if (r6 != 42) {
++ * r7 = 0xdead
++ * continue;
++ * }
++ * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe
++ * }
++ * bpf_iter_num_destroy(&fp[-8])
++ * return 0
++ */
++ asm volatile (
++ "r7 = r10;"
++ "r7 += -16;"
++ "r0 = 0;"
++ "*(u64 *)(r7 + 0) = r0;"
++ "call %[bpf_get_prandom_u32];"
++ "r6 = r0;"
++ "r1 = r10;"
++ "r1 += -8;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "1:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto 2f;"
++ "r6 += 1;"
++ "if r6 != 42 goto 3f;"
++ "r7 = 0xdead;"
++ "goto 1b;"
++ "3:"
++ "r1 = r7;"
++ "r2 = 8;"
++ "r3 = 0xdeadbeef;"
++ "call %[bpf_probe_read_user];"
++ "goto 1b;"
++ "2:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = 0;"
++ "exit;"
++ :
++ : __imm(bpf_get_prandom_u32),
++ __imm(bpf_iter_num_new),
++ __imm(bpf_iter_num_next),
++ __imm(bpf_iter_num_destroy),
++ __imm(bpf_probe_read_user)
++ : __clobber_all
++ );
++}
++
++SEC("?raw_tp")
++__failure
++__msg("math between fp pointer and register with unbounded")
++__naked int delayed_precision_mark(void)
++{
++ /* This is equivalent to C program below.
++ * The test is similar to delayed_iter_mark but verifies that incomplete
++ * precision don't fool verifier.
++ * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32.
++ * State with r7=-16 is visited first and follows r6 != 42 ... continue branch.
++ * At this point iterator next() call is reached with r7 that has no read
++ * and precision marks.
++ * Loop body with r7=-32 would only be visited if verifier would decide to continue
++ * with second loop iteration. Absence of precision mark on r7 might affect state
++ * equivalent logic used for iterator convergence tracking.
++ *
++ * r8 = 0
++ * fp[-16] = 0
++ * r7 = -16
++ * r6 = bpf_get_prandom_u32()
++ * bpf_iter_num_new(&fp[-8], 0, 10)
++ * while (bpf_iter_num_next(&fp[-8])) {
++ * if (r6 != 42) {
++ * r7 = -32
++ * r6 = bpf_get_prandom_u32()
++ * continue;
++ * }
++ * r0 = r10
++ * r0 += r7
++ * r8 = *(u64 *)(r0 + 0) // this is not safe
++ * r6 = bpf_get_prandom_u32()
++ * }
++ * bpf_iter_num_destroy(&fp[-8])
++ * return r8
++ */
++ asm volatile (
++ "r8 = 0;"
++ "*(u64 *)(r10 - 16) = r8;"
++ "r7 = -16;"
++ "call %[bpf_get_prandom_u32];"
++ "r6 = r0;"
++ "r1 = r10;"
++ "r1 += -8;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "1:"
++ "r1 = r10;"
++ "r1 += -8;\n"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto 2f;"
++ "if r6 != 42 goto 3f;"
++ "r7 = -32;"
++ "call %[bpf_get_prandom_u32];"
++ "r6 = r0;"
++ "goto 1b;\n"
++ "3:"
++ "r0 = r10;"
++ "r0 += r7;"
++ "r8 = *(u64 *)(r0 + 0);"
++ "call %[bpf_get_prandom_u32];"
++ "r6 = r0;"
++ "goto 1b;\n"
++ "2:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = r8;"
++ "exit;"
++ :
++ : __imm(bpf_get_prandom_u32),
++ __imm(bpf_iter_num_new),
++ __imm(bpf_iter_num_next),
++ __imm(bpf_iter_num_destroy),
++ __imm(bpf_probe_read_user)
++ : __clobber_all
++ );
++}
++
++SEC("?raw_tp")
++__failure
++__msg("math between fp pointer and register with unbounded")
++__flag(BPF_F_TEST_STATE_FREQ)
++__naked int loop_state_deps1(void)
++{
++ /* This is equivalent to C program below.
++ *
++ * The case turns out to be tricky in a sense that:
++ * - states with c=-25 are explored only on a second iteration
++ * of the outer loop;
++ * - states with read+precise mark on c are explored only on
++ * second iteration of the inner loop and in a state which
++ * is pushed to states stack first.
++ *
++ * Depending on the details of iterator convergence logic
++ * verifier might stop states traversal too early and miss
++ * unsafe c=-25 memory access.
++ *
++ * j = iter_new(); // fp[-16]
++ * a = 0; // r6
++ * b = 0; // r7
++ * c = -24; // r8
++ * while (iter_next(j)) {
++ * i = iter_new(); // fp[-8]
++ * a = 0; // r6
++ * b = 0; // r7
++ * while (iter_next(i)) {
++ * if (a == 1) {
++ * a = 0;
++ * b = 1;
++ * } else if (a == 0) {
++ * a = 1;
++ * if (random() == 42)
++ * continue;
++ * if (b == 1) {
++ * *(r10 + c) = 7; // this is not safe
++ * iter_destroy(i);
++ * iter_destroy(j);
++ * return;
++ * }
++ * }
++ * }
++ * iter_destroy(i);
++ * a = 0;
++ * b = 0;
++ * c = -25;
++ * }
++ * iter_destroy(j);
++ * return;
++ */
++ asm volatile (
++ "r1 = r10;"
++ "r1 += -16;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "r6 = 0;"
++ "r7 = 0;"
++ "r8 = -24;"
++ "j_loop_%=:"
++ "r1 = r10;"
++ "r1 += -16;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto j_loop_end_%=;"
++ "r1 = r10;"
++ "r1 += -8;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "r6 = 0;"
++ "r7 = 0;"
++ "i_loop_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto i_loop_end_%=;"
++ "check_one_r6_%=:"
++ "if r6 != 1 goto check_zero_r6_%=;"
++ "r6 = 0;"
++ "r7 = 1;"
++ "goto i_loop_%=;"
++ "check_zero_r6_%=:"
++ "if r6 != 0 goto i_loop_%=;"
++ "r6 = 1;"
++ "call %[bpf_get_prandom_u32];"
++ "if r0 != 42 goto check_one_r7_%=;"
++ "goto i_loop_%=;"
++ "check_one_r7_%=:"
++ "if r7 != 1 goto i_loop_%=;"
++ "r0 = r10;"
++ "r0 += r8;"
++ "r1 = 7;"
++ "*(u64 *)(r0 + 0) = r1;"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++ "r1 = r10;"
++ "r1 += -16;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = 0;"
++ "exit;"
++ "i_loop_end_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++ "r6 = 0;"
++ "r7 = 0;"
++ "r8 = -25;"
++ "goto j_loop_%=;"
++ "j_loop_end_%=:"
++ "r1 = r10;"
++ "r1 += -16;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = 0;"
++ "exit;"
++ :
++ : __imm(bpf_get_prandom_u32),
++ __imm(bpf_iter_num_new),
++ __imm(bpf_iter_num_next),
++ __imm(bpf_iter_num_destroy)
++ : __clobber_all
++ );
++}
++
++SEC("?raw_tp")
++__failure
++__msg("math between fp pointer and register with unbounded")
++__flag(BPF_F_TEST_STATE_FREQ)
++__naked int loop_state_deps2(void)
++{
++ /* This is equivalent to C program below.
++ *
++ * The case turns out to be tricky in a sense that:
++ * - states with read+precise mark on c are explored only on a second
++ * iteration of the first inner loop and in a state which is pushed to
++ * states stack first.
++ * - states with c=-25 are explored only on a second iteration of the
++ * second inner loop and in a state which is pushed to states stack
++ * first.
++ *
++ * Depending on the details of iterator convergence logic
++ * verifier might stop states traversal too early and miss
++ * unsafe c=-25 memory access.
++ *
++ * j = iter_new(); // fp[-16]
++ * a = 0; // r6
++ * b = 0; // r7
++ * c = -24; // r8
++ * while (iter_next(j)) {
++ * i = iter_new(); // fp[-8]
++ * a = 0; // r6
++ * b = 0; // r7
++ * while (iter_next(i)) {
++ * if (a == 1) {
++ * a = 0;
++ * b = 1;
++ * } else if (a == 0) {
++ * a = 1;
++ * if (random() == 42)
++ * continue;
++ * if (b == 1) {
++ * *(r10 + c) = 7; // this is not safe
++ * iter_destroy(i);
++ * iter_destroy(j);
++ * return;
++ * }
++ * }
++ * }
++ * iter_destroy(i);
++ * i = iter_new(); // fp[-8]
++ * a = 0; // r6
++ * b = 0; // r7
++ * while (iter_next(i)) {
++ * if (a == 1) {
++ * a = 0;
++ * b = 1;
++ * } else if (a == 0) {
++ * a = 1;
++ * if (random() == 42)
++ * continue;
++ * if (b == 1) {
++ * a = 0;
++ * c = -25;
++ * }
++ * }
++ * }
++ * iter_destroy(i);
++ * }
++ * iter_destroy(j);
++ * return;
++ */
++ asm volatile (
++ "r1 = r10;"
++ "r1 += -16;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "r6 = 0;"
++ "r7 = 0;"
++ "r8 = -24;"
++ "j_loop_%=:"
++ "r1 = r10;"
++ "r1 += -16;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto j_loop_end_%=;"
++
++ /* first inner loop */
++ "r1 = r10;"
++ "r1 += -8;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "r6 = 0;"
++ "r7 = 0;"
++ "i_loop_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto i_loop_end_%=;"
++ "check_one_r6_%=:"
++ "if r6 != 1 goto check_zero_r6_%=;"
++ "r6 = 0;"
++ "r7 = 1;"
++ "goto i_loop_%=;"
++ "check_zero_r6_%=:"
++ "if r6 != 0 goto i_loop_%=;"
++ "r6 = 1;"
++ "call %[bpf_get_prandom_u32];"
++ "if r0 != 42 goto check_one_r7_%=;"
++ "goto i_loop_%=;"
++ "check_one_r7_%=:"
++ "if r7 != 1 goto i_loop_%=;"
++ "r0 = r10;"
++ "r0 += r8;"
++ "r1 = 7;"
++ "*(u64 *)(r0 + 0) = r1;"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++ "r1 = r10;"
++ "r1 += -16;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = 0;"
++ "exit;"
++ "i_loop_end_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++
++ /* second inner loop */
++ "r1 = r10;"
++ "r1 += -8;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "r6 = 0;"
++ "r7 = 0;"
++ "i2_loop_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto i2_loop_end_%=;"
++ "check2_one_r6_%=:"
++ "if r6 != 1 goto check2_zero_r6_%=;"
++ "r6 = 0;"
++ "r7 = 1;"
++ "goto i2_loop_%=;"
++ "check2_zero_r6_%=:"
++ "if r6 != 0 goto i2_loop_%=;"
++ "r6 = 1;"
++ "call %[bpf_get_prandom_u32];"
++ "if r0 != 42 goto check2_one_r7_%=;"
++ "goto i2_loop_%=;"
++ "check2_one_r7_%=:"
++ "if r7 != 1 goto i2_loop_%=;"
++ "r6 = 0;"
++ "r8 = -25;"
++ "goto i2_loop_%=;"
++ "i2_loop_end_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++
++ "r6 = 0;"
++ "r7 = 0;"
++ "goto j_loop_%=;"
++ "j_loop_end_%=:"
++ "r1 = r10;"
++ "r1 += -16;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = 0;"
++ "exit;"
++ :
++ : __imm(bpf_get_prandom_u32),
++ __imm(bpf_iter_num_new),
++ __imm(bpf_iter_num_next),
++ __imm(bpf_iter_num_destroy)
++ : __clobber_all
++ );
++}
++
++SEC("?raw_tp")
++__success
++__naked int triple_continue(void)
++{
++ /* This is equivalent to C program below.
++ * High branching factor of the loop body turned out to be
++ * problematic for one of the iterator convergence tracking
++ * algorithms explored.
++ *
++ * r6 = bpf_get_prandom_u32()
++ * bpf_iter_num_new(&fp[-8], 0, 10)
++ * while (bpf_iter_num_next(&fp[-8])) {
++ * if (bpf_get_prandom_u32() != 42)
++ * continue;
++ * if (bpf_get_prandom_u32() != 42)
++ * continue;
++ * if (bpf_get_prandom_u32() != 42)
++ * continue;
++ * r0 += 0;
++ * }
++ * bpf_iter_num_destroy(&fp[-8])
++ * return 0
++ */
++ asm volatile (
++ "r1 = r10;"
++ "r1 += -8;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "loop_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto loop_end_%=;"
++ "call %[bpf_get_prandom_u32];"
++ "if r0 != 42 goto loop_%=;"
++ "call %[bpf_get_prandom_u32];"
++ "if r0 != 42 goto loop_%=;"
++ "call %[bpf_get_prandom_u32];"
++ "if r0 != 42 goto loop_%=;"
++ "r0 += 0;"
++ "goto loop_%=;"
++ "loop_end_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = 0;"
++ "exit;"
++ :
++ : __imm(bpf_get_prandom_u32),
++ __imm(bpf_iter_num_new),
++ __imm(bpf_iter_num_next),
++ __imm(bpf_iter_num_destroy)
++ : __clobber_all
++ );
++}
++
++SEC("?raw_tp")
++__success
++__naked int widen_spill(void)
++{
++ /* This is equivalent to C program below.
++ * The counter is stored in fp[-16], if this counter is not widened
++ * verifier states representing loop iterations would never converge.
++ *
++ * fp[-16] = 0
++ * bpf_iter_num_new(&fp[-8], 0, 10)
++ * while (bpf_iter_num_next(&fp[-8])) {
++ * r0 = fp[-16];
++ * r0 += 1;
++ * fp[-16] = r0;
++ * }
++ * bpf_iter_num_destroy(&fp[-8])
++ * return 0
++ */
++ asm volatile (
++ "r0 = 0;"
++ "*(u64 *)(r10 - 16) = r0;"
++ "r1 = r10;"
++ "r1 += -8;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "loop_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto loop_end_%=;"
++ "r0 = *(u64 *)(r10 - 16);"
++ "r0 += 1;"
++ "*(u64 *)(r10 - 16) = r0;"
++ "goto loop_%=;"
++ "loop_end_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = 0;"
++ "exit;"
++ :
++ : __imm(bpf_iter_num_new),
++ __imm(bpf_iter_num_next),
++ __imm(bpf_iter_num_destroy)
++ : __clobber_all
++ );
++}
++
++SEC("raw_tp")
++__success
++__naked int checkpoint_states_deletion(void)
++{
++ /* This is equivalent to C program below.
++ *
++ * int *a, *b, *c, *d, *e, *f;
++ * int i, sum = 0;
++ * bpf_for(i, 0, 10) {
++ * a = bpf_map_lookup_elem(&amap, &i);
++ * b = bpf_map_lookup_elem(&amap, &i);
++ * c = bpf_map_lookup_elem(&amap, &i);
++ * d = bpf_map_lookup_elem(&amap, &i);
++ * e = bpf_map_lookup_elem(&amap, &i);
++ * f = bpf_map_lookup_elem(&amap, &i);
++ * if (a) sum += 1;
++ * if (b) sum += 1;
++ * if (c) sum += 1;
++ * if (d) sum += 1;
++ * if (e) sum += 1;
++ * if (f) sum += 1;
++ * }
++ * return 0;
++ *
++ * The body of the loop spawns multiple simulation paths
++ * with different combination of NULL/non-NULL information for a/b/c/d/e/f.
++ * Each combination is unique from states_equal() point of view.
++ * Explored states checkpoint is created after each iterator next call.
++ * Iterator convergence logic expects that eventually current state
++ * would get equal to one of the explored states and thus loop
++ * exploration would be finished (at-least for a specific path).
++ * Verifier evicts explored states with high miss to hit ratio
++ * to to avoid comparing current state with too many explored
++ * states per instruction.
++ * This test is designed to "stress test" eviction policy defined using formula:
++ *
++ * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted
++ *
++ * Currently N is set to 64, which allows for 6 variables in this test.
++ */
++ asm volatile (
++ "r6 = 0;" /* a */
++ "r7 = 0;" /* b */
++ "r8 = 0;" /* c */
++ "*(u64 *)(r10 - 24) = r6;" /* d */
++ "*(u64 *)(r10 - 32) = r6;" /* e */
++ "*(u64 *)(r10 - 40) = r6;" /* f */
++ "r9 = 0;" /* sum */
++ "r1 = r10;"
++ "r1 += -8;"
++ "r2 = 0;"
++ "r3 = 10;"
++ "call %[bpf_iter_num_new];"
++ "loop_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_next];"
++ "if r0 == 0 goto loop_end_%=;"
++
++ "*(u64 *)(r10 - 16) = r0;"
++
++ "r1 = %[amap] ll;"
++ "r2 = r10;"
++ "r2 += -16;"
++ "call %[bpf_map_lookup_elem];"
++ "r6 = r0;"
++
++ "r1 = %[amap] ll;"
++ "r2 = r10;"
++ "r2 += -16;"
++ "call %[bpf_map_lookup_elem];"
++ "r7 = r0;"
++
++ "r1 = %[amap] ll;"
++ "r2 = r10;"
++ "r2 += -16;"
++ "call %[bpf_map_lookup_elem];"
++ "r8 = r0;"
++
++ "r1 = %[amap] ll;"
++ "r2 = r10;"
++ "r2 += -16;"
++ "call %[bpf_map_lookup_elem];"
++ "*(u64 *)(r10 - 24) = r0;"
++
++ "r1 = %[amap] ll;"
++ "r2 = r10;"
++ "r2 += -16;"
++ "call %[bpf_map_lookup_elem];"
++ "*(u64 *)(r10 - 32) = r0;"
++
++ "r1 = %[amap] ll;"
++ "r2 = r10;"
++ "r2 += -16;"
++ "call %[bpf_map_lookup_elem];"
++ "*(u64 *)(r10 - 40) = r0;"
++
++ "if r6 == 0 goto +1;"
++ "r9 += 1;"
++ "if r7 == 0 goto +1;"
++ "r9 += 1;"
++ "if r8 == 0 goto +1;"
++ "r9 += 1;"
++ "r0 = *(u64 *)(r10 - 24);"
++ "if r0 == 0 goto +1;"
++ "r9 += 1;"
++ "r0 = *(u64 *)(r10 - 32);"
++ "if r0 == 0 goto +1;"
++ "r9 += 1;"
++ "r0 = *(u64 *)(r10 - 40);"
++ "if r0 == 0 goto +1;"
++ "r9 += 1;"
++
++ "goto loop_%=;"
++ "loop_end_%=:"
++ "r1 = r10;"
++ "r1 += -8;"
++ "call %[bpf_iter_num_destroy];"
++ "r0 = 0;"
++ "exit;"
++ :
++ : __imm(bpf_map_lookup_elem),
++ __imm(bpf_iter_num_new),
++ __imm(bpf_iter_num_next),
++ __imm(bpf_iter_num_destroy),
++ __imm_addr(amap)
++ : __clobber_all
++ );
++}
++
++__u32 upper, select_n, result;
++__u64 global;
++
++static __noinline bool nest_2(char *str)
++{
++ /* some insns (including branch insns) to ensure stacksafe() is triggered
++ * in nest_2(). This way, stacksafe() can compare frame associated with nest_1().
++ */
++ if (str[0] == 't')
++ return true;
++ if (str[1] == 'e')
++ return true;
++ if (str[2] == 's')
++ return true;
++ if (str[3] == 't')
++ return true;
++ return false;
++}
++
++static __noinline bool nest_1(int n)
++{
++ /* case 0: allocate stack, case 1: no allocate stack */
++ switch (n) {
++ case 0: {
++ char comm[16];
++
++ if (bpf_get_current_comm(comm, 16))
++ return false;
++ return nest_2(comm);
++ }
++ case 1:
++ return nest_2((char *)&global);
++ default:
++ return false;
++ }
++}
++
++SEC("raw_tp")
++__success
++int iter_subprog_check_stacksafe(const void *ctx)
++{
++ long i;
++
++ bpf_for(i, 0, upper) {
++ if (!nest_1(select_n)) {
++ result = 1;
++ return 0;
++ }
++ }
++
++ result = 2;
++ return 0;
++}
++
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
+index f46965053acb2e..4d619bea9c7588 100644
+--- a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
++++ b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
+@@ -4,6 +4,10 @@
+ #include <bpf/bpf_helpers.h>
+ #include "bpf_misc.h"
+
++#ifndef __clang__
++#pragma GCC diagnostic ignored "-Warray-bounds"
++#endif
++
+ char _license[] SEC("license") = "GPL";
+
+ struct {
+diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
+index f4c63daba22970..6438982b928bdc 100644
+--- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
++++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
+@@ -591,7 +591,9 @@ int pop_ptr_off(void *(*op)(void *head))
+ n = op(&p->head);
+ bpf_spin_unlock(&p->lock);
+
+- bpf_this_cpu_ptr(n);
++ if (!n)
++ return 0;
++ bpf_spin_lock((void *)n);
+ return 0;
+ }
+
+diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c
+index bc8ea56671a16a..3bf75f4ea690a5 100644
+--- a/tools/testing/selftests/bpf/progs/local_storage.c
++++ b/tools/testing/selftests/bpf/progs/local_storage.c
+@@ -140,11 +140,12 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ {
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct local_storage *storage;
++ struct sock *sk = sock->sk;
+
+- if (pid != monitored_pid)
++ if (pid != monitored_pid || !sk)
+ return 0;
+
+- storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, 0);
++ storage = bpf_sk_storage_get(&sk_storage_map, sk, 0, 0);
+ if (!storage)
+ return 0;
+
+@@ -155,24 +156,24 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ /* This tests that we can associate multiple elements
+ * with the local storage.
+ */
+- storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
++ storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+
+- if (bpf_sk_storage_delete(&sk_storage_map2, sock->sk))
++ if (bpf_sk_storage_delete(&sk_storage_map2, sk))
+ return 0;
+
+- storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
++ storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+
+- if (bpf_sk_storage_delete(&sk_storage_map, sock->sk))
++ if (bpf_sk_storage_delete(&sk_storage_map, sk))
+ return 0;
+
+ /* Ensure that the sk_storage_map is disconnected from the storage. */
+- if (!sock->sk->sk_bpf_storage || sock->sk->sk_bpf_storage->smap)
++ if (!sk->sk_bpf_storage || sk->sk_bpf_storage->smap)
+ return 0;
+
+ sk_storage_result = 0;
+@@ -185,11 +186,12 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ {
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct local_storage *storage;
++ struct sock *sk = sock->sk;
+
+- if (pid != monitored_pid)
++ if (pid != monitored_pid || !sk)
+ return 0;
+
+- storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
++ storage = bpf_sk_storage_get(&sk_storage_map, sk, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+index 02c11d16b692ab..d7598538aa2dad 100644
+--- a/tools/testing/selftests/bpf/progs/lsm_cgroup.c
++++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+@@ -103,11 +103,15 @@ static __always_inline int real_bind(struct socket *sock,
+ int addrlen)
+ {
+ struct sockaddr_ll sa = {};
++ struct sock *sk = sock->sk;
+
+- if (sock->sk->__sk_common.skc_family != AF_PACKET)
++ if (!sk)
++ return 1;
++
++ if (sk->__sk_common.skc_family != AF_PACKET)
+ return 1;
+
+- if (sock->sk->sk_kern_sock)
++ if (sk->sk_kern_sock)
+ return 1;
+
+ bpf_probe_read_kernel(&sa, sizeof(sa), address);
+diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+index 3325da17ec81af..efaf622c28ddec 100644
+--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
++++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+@@ -316,7 +316,7 @@ struct lpm_trie {
+ } __attribute__((preserve_access_index));
+
+ struct lpm_key {
+- struct bpf_lpm_trie_key trie_key;
++ struct bpf_lpm_trie_key_hdr trie_key;
+ __u32 data;
+ };
+
+diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c
+index c39f559d3100e8..42c4a8b62e3602 100644
+--- a/tools/testing/selftests/bpf/progs/pyperf180.c
++++ b/tools/testing/selftests/bpf/progs/pyperf180.c
+@@ -1,4 +1,26 @@
+ // SPDX-License-Identifier: GPL-2.0
+ // Copyright (c) 2019 Facebook
+ #define STACK_MAX_LEN 180
++
++/* llvm upstream commit at clang18
++ * https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e
++ * changed inlining behavior and caused compilation failure as some branch
++ * target distance exceeded 16bit representation which is the maximum for
++ * cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18
++ * to specify which cpu version is used for compilation. So a smaller
++ * unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which
++ * reduced some branch target distances and resolved the compilation failure.
++ *
++ * To capture the case where a developer/ci uses clang18 but the corresponding
++ * repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count
++ * will be set as well to prevent potential compilation failures.
++ */
++#ifdef __BPF_CPU_VERSION__
++#if __BPF_CPU_VERSION__ < 4
++#define UNROLL_COUNT 90
++#endif
++#elif __clang_major__ == 18
++#define UNROLL_COUNT 90
++#endif
++
+ #include "pyperf.h"
+diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
+index e02cfd38074695..40df2cc26eaf9d 100644
+--- a/tools/testing/selftests/bpf/progs/strobemeta.h
++++ b/tools/testing/selftests/bpf/progs/strobemeta.h
+@@ -24,9 +24,11 @@ struct task_struct {};
+ #define STACK_TABLE_EPOCH_SHIFT 20
+ #define STROBE_MAX_STR_LEN 1
+ #define STROBE_MAX_CFGS 32
++#define READ_MAP_VAR_PAYLOAD_CAP \
++ ((1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
+ #define STROBE_MAX_PAYLOAD \
+ (STROBE_MAX_STRS * STROBE_MAX_STR_LEN + \
+- STROBE_MAX_MAPS * (1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
++ STROBE_MAX_MAPS * READ_MAP_VAR_PAYLOAD_CAP)
+
+ struct strobe_value_header {
+ /*
+@@ -355,7 +357,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
+ size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload *data,
+- void *payload)
++ size_t off)
+ {
+ void *location;
+ uint64_t len;
+@@ -366,7 +368,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
+ return 0;
+
+ bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
+- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr);
++ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr);
+ /*
+ * if bpf_probe_read_user_str returns error (<0), due to casting to
+ * unsinged int, it will become big number, so next check is
+@@ -378,14 +380,14 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg,
+ return 0;
+
+ data->str_lens[idx] = len;
+- return len;
++ return off + len;
+ }
+
+-static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
+- size_t idx, void *tls_base,
+- struct strobe_value_generic *value,
+- struct strobemeta_payload *data,
+- void *payload)
++static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg,
++ size_t idx, void *tls_base,
++ struct strobe_value_generic *value,
++ struct strobemeta_payload *data,
++ size_t off)
+ {
+ struct strobe_map_descr* descr = &data->map_descrs[idx];
+ struct strobe_map_raw map;
+@@ -397,11 +399,11 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
+
+ location = calc_location(&cfg->map_locs[idx], tls_base);
+ if (!location)
+- return payload;
++ return off;
+
+ bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location);
+ if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr))
+- return payload;
++ return off;
+
+ descr->id = map.id;
+ descr->cnt = map.cnt;
+@@ -410,10 +412,10 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
+ data->req_meta_valid = 1;
+ }
+
+- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag);
++ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, map.tag);
+ if (len <= STROBE_MAX_STR_LEN) {
+ descr->tag_len = len;
+- payload += len;
++ off += len;
+ }
+
+ #ifdef NO_UNROLL
+@@ -426,22 +428,22 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
+ break;
+
+ descr->key_lens[i] = 0;
+- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
++ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
+ map.entries[i].key);
+ if (len <= STROBE_MAX_STR_LEN) {
+ descr->key_lens[i] = len;
+- payload += len;
++ off += len;
+ }
+ descr->val_lens[i] = 0;
+- len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN,
++ len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN,
+ map.entries[i].val);
+ if (len <= STROBE_MAX_STR_LEN) {
+ descr->val_lens[i] = len;
+- payload += len;
++ off += len;
+ }
+ }
+
+- return payload;
++ return off;
+ }
+
+ #ifdef USE_BPF_LOOP
+@@ -455,14 +457,20 @@ struct read_var_ctx {
+ struct strobemeta_payload *data;
+ void *tls_base;
+ struct strobemeta_cfg *cfg;
+- void *payload;
++ size_t payload_off;
+ /* value gets mutated */
+ struct strobe_value_generic *value;
+ enum read_type type;
+ };
+
+-static int read_var_callback(__u32 index, struct read_var_ctx *ctx)
++static int read_var_callback(__u64 index, struct read_var_ctx *ctx)
+ {
++ /* lose precision info for ctx->payload_off, verifier won't track
++ * double xor, barrier_var() is needed to force clang keep both xors.
++ */
++ ctx->payload_off ^= index;
++ barrier_var(ctx->payload_off);
++ ctx->payload_off ^= index;
+ switch (ctx->type) {
+ case READ_INT_VAR:
+ if (index >= STROBE_MAX_INTS)
+@@ -472,14 +480,18 @@ static int read_var_callback(__u32 index, struct read_var_ctx *ctx)
+ case READ_MAP_VAR:
+ if (index >= STROBE_MAX_MAPS)
+ return 1;
+- ctx->payload = read_map_var(ctx->cfg, index, ctx->tls_base,
+- ctx->value, ctx->data, ctx->payload);
++ if (ctx->payload_off > sizeof(ctx->data->payload) - READ_MAP_VAR_PAYLOAD_CAP)
++ return 1;
++ ctx->payload_off = read_map_var(ctx->cfg, index, ctx->tls_base,
++ ctx->value, ctx->data, ctx->payload_off);
+ break;
+ case READ_STR_VAR:
+ if (index >= STROBE_MAX_STRS)
+ return 1;
+- ctx->payload += read_str_var(ctx->cfg, index, ctx->tls_base,
+- ctx->value, ctx->data, ctx->payload);
++ if (ctx->payload_off > sizeof(ctx->data->payload) - STROBE_MAX_STR_LEN)
++ return 1;
++ ctx->payload_off = read_str_var(ctx->cfg, index, ctx->tls_base,
++ ctx->value, ctx->data, ctx->payload_off);
+ break;
+ }
+ return 0;
+@@ -501,7 +513,8 @@ static void *read_strobe_meta(struct task_struct *task,
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ struct strobe_value_generic value = {0};
+ struct strobemeta_cfg *cfg;
+- void *tls_base, *payload;
++ size_t payload_off;
++ void *tls_base;
+
+ cfg = bpf_map_lookup_elem(&strobemeta_cfgs, &pid);
+ if (!cfg)
+@@ -509,7 +522,7 @@ static void *read_strobe_meta(struct task_struct *task,
+
+ data->int_vals_set_mask = 0;
+ data->req_meta_valid = 0;
+- payload = data->payload;
++ payload_off = 0;
+ /*
+ * we don't have struct task_struct definition, it should be:
+ * tls_base = (void *)task->thread.fsbase;
+@@ -522,7 +535,7 @@ static void *read_strobe_meta(struct task_struct *task,
+ .tls_base = tls_base,
+ .value = &value,
+ .data = data,
+- .payload = payload,
++ .payload_off = 0,
+ };
+ int err;
+
+@@ -540,6 +553,11 @@ static void *read_strobe_meta(struct task_struct *task,
+ err = bpf_loop(STROBE_MAX_MAPS, read_var_callback, &ctx, 0);
+ if (err != STROBE_MAX_MAPS)
+ return NULL;
++
++ payload_off = ctx.payload_off;
++ /* this should not really happen, here only to satisfy verifer */
++ if (payload_off > sizeof(data->payload))
++ payload_off = sizeof(data->payload);
+ #else
+ #ifdef NO_UNROLL
+ #pragma clang loop unroll(disable)
+@@ -555,7 +573,7 @@ static void *read_strobe_meta(struct task_struct *task,
+ #pragma unroll
+ #endif /* NO_UNROLL */
+ for (int i = 0; i < STROBE_MAX_STRS; ++i) {
+- payload += read_str_var(cfg, i, tls_base, &value, data, payload);
++ payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off);
+ }
+ #ifdef NO_UNROLL
+ #pragma clang loop unroll(disable)
+@@ -563,7 +581,7 @@ static void *read_strobe_meta(struct task_struct *task,
+ #pragma unroll
+ #endif /* NO_UNROLL */
+ for (int i = 0; i < STROBE_MAX_MAPS; ++i) {
+- payload = read_map_var(cfg, i, tls_base, &value, data, payload);
++ payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off);
+ }
+ #endif /* USE_BPF_LOOP */
+
+@@ -571,7 +589,7 @@ static void *read_strobe_meta(struct task_struct *task,
+ * return pointer right after end of payload, so it's possible to
+ * calculate exact amount of useful data that needs to be sent
+ */
+- return payload;
++ return &data->payload[payload_off];
+ }
+
+ SEC("raw_tracepoint/kfree_skb")
+diff --git a/tools/testing/selftests/bpf/progs/test_global_func16.c b/tools/testing/selftests/bpf/progs/test_global_func16.c
+index e7206304632e15..e3e64bc472cdaf 100644
+--- a/tools/testing/selftests/bpf/progs/test_global_func16.c
++++ b/tools/testing/selftests/bpf/progs/test_global_func16.c
+@@ -13,7 +13,7 @@ __noinline int foo(int (*arr)[10])
+ }
+
+ SEC("cgroup_skb/ingress")
+-__failure __msg("invalid indirect read from stack")
++__success
+ int global_func16(struct __sk_buff *skb)
+ {
+ int array[10];
+diff --git a/tools/testing/selftests/bpf/progs/test_global_func17.c b/tools/testing/selftests/bpf/progs/test_global_func17.c
+index a32e11c7d933ee..5de44b09e8ec17 100644
+--- a/tools/testing/selftests/bpf/progs/test_global_func17.c
++++ b/tools/testing/selftests/bpf/progs/test_global_func17.c
+@@ -5,6 +5,7 @@
+
+ __noinline int foo(int *p)
+ {
++ barrier_var(p);
+ return p ? (*p = 42) : 0;
+ }
+
+diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
+index f5ac5f3e89196f..568816307f7125 100644
+--- a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
++++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c
+@@ -31,6 +31,7 @@ int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
+
+ if (fmode & FMODE_WRITE)
+ return -EACCES;
++ barrier();
+
+ return 0;
+ }
+diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
+index f416032ba858b4..b295f9b721bf89 100644
+--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
++++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
+@@ -21,6 +21,32 @@ struct {
+ __type(value, __u32);
+ } mim_hash SEC(".maps");
+
++/* The following three maps are used to test
++ * perf_event_array map can be an inner
++ * map of hash/array_of_maps.
++ */
++struct perf_event_array {
++ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
++ __type(key, __u32);
++ __type(value, __u32);
++} inner_map0 SEC(".maps");
++
++struct {
++ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
++ __uint(max_entries, 1);
++ __type(key, __u32);
++ __array(values, struct perf_event_array);
++} mim_array_pe SEC(".maps") = {
++ .values = {&inner_map0}};
++
++struct {
++ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
++ __uint(max_entries, 1);
++ __type(key, __u32);
++ __array(values, struct perf_event_array);
++} mim_hash_pe SEC(".maps") = {
++ .values = {&inner_map0}};
++
+ SEC("xdp")
+ int xdp_mimtest0(struct xdp_md *ctx)
+ {
+diff --git a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
+index 0763d49f9c4213..d0010e698f6688 100644
+--- a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
++++ b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
+@@ -10,18 +10,29 @@ __u64 user_tgid = 0;
+ __u64 dev = 0;
+ __u64 ino = 0;
+
+-SEC("tracepoint/syscalls/sys_enter_nanosleep")
+-int handler(const void *ctx)
++static void get_pid_tgid(void)
+ {
+ struct bpf_pidns_info nsdata;
+
+ if (bpf_get_ns_current_pid_tgid(dev, ino, &nsdata, sizeof(struct bpf_pidns_info)))
+- return 0;
++ return;
+
+ user_pid = nsdata.pid;
+ user_tgid = nsdata.tgid;
++}
+
++SEC("?tracepoint/syscalls/sys_enter_nanosleep")
++int tp_handler(const void *ctx)
++{
++ get_pid_tgid();
+ return 0;
+ }
+
++SEC("?cgroup/bind4")
++int cgroup_bind4(struct bpf_sock_addr *ctx)
++{
++ get_pid_tgid();
++ return 1;
++}
++
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+index f66af753bbbb89..e68da33d7631d3 100644
+--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
++++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+@@ -597,12 +597,18 @@ int ip6vxlan_get_tunnel_src(struct __sk_buff *skb)
+ return TC_ACT_OK;
+ }
+
++struct local_geneve_opt {
++ struct geneve_opt gopt;
++ int data;
++};
++
+ SEC("tc")
+ int geneve_set_tunnel(struct __sk_buff *skb)
+ {
+ int ret;
+ struct bpf_tunnel_key key;
+- struct geneve_opt gopt;
++ struct local_geneve_opt local_gopt;
++ struct geneve_opt *gopt = (struct geneve_opt *) &local_gopt;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+@@ -610,14 +616,14 @@ int geneve_set_tunnel(struct __sk_buff *skb)
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+- __builtin_memset(&gopt, 0x0, sizeof(gopt));
+- gopt.opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
+- gopt.type = 0x08;
+- gopt.r1 = 0;
+- gopt.r2 = 0;
+- gopt.r3 = 0;
+- gopt.length = 2; /* 4-byte multiple */
+- *(int *) &gopt.opt_data = bpf_htonl(0xdeadbeef);
++ __builtin_memset(gopt, 0x0, sizeof(local_gopt));
++ gopt->opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
++ gopt->type = 0x08;
++ gopt->r1 = 0;
++ gopt->r2 = 0;
++ gopt->r3 = 0;
++ gopt->length = 2; /* 4-byte multiple */
++ *(int *) &gopt->opt_data = bpf_htonl(0xdeadbeef);
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_ZERO_CSUM_TX);
+@@ -626,7 +632,7 @@ int geneve_set_tunnel(struct __sk_buff *skb)
+ return TC_ACT_SHOT;
+ }
+
+- ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
++ ret = bpf_skb_set_tunnel_opt(skb, gopt, sizeof(local_gopt));
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+@@ -661,7 +667,8 @@ SEC("tc")
+ int ip6geneve_set_tunnel(struct __sk_buff *skb)
+ {
+ struct bpf_tunnel_key key;
+- struct geneve_opt gopt;
++ struct local_geneve_opt local_gopt;
++ struct geneve_opt *gopt = (struct geneve_opt *) &local_gopt;
+ int ret;
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+@@ -677,16 +684,16 @@ int ip6geneve_set_tunnel(struct __sk_buff *skb)
+ return TC_ACT_SHOT;
+ }
+
+- __builtin_memset(&gopt, 0x0, sizeof(gopt));
+- gopt.opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
+- gopt.type = 0x08;
+- gopt.r1 = 0;
+- gopt.r2 = 0;
+- gopt.r3 = 0;
+- gopt.length = 2; /* 4-byte multiple */
+- *(int *) &gopt.opt_data = bpf_htonl(0xfeedbeef);
++ __builtin_memset(gopt, 0x0, sizeof(local_gopt));
++ gopt->opt_class = bpf_htons(0x102); /* Open Virtual Networking (OVN) */
++ gopt->type = 0x08;
++ gopt->r1 = 0;
++ gopt->r2 = 0;
++ gopt->r3 = 0;
++ gopt->length = 2; /* 4-byte multiple */
++ *(int *) &gopt->opt_data = bpf_htonl(0xfeedbeef);
+
+- ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
++ ret = bpf_skb_set_tunnel_opt(skb, gopt, sizeof(gopt));
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
+index 359df865a8f3e9..8d77cc5323d337 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
++++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c
+@@ -27,8 +27,8 @@ __naked void stack_out_of_bounds(void)
+
+ SEC("socket")
+ __description("uninitialized stack1")
+-__failure __msg("invalid indirect read from stack")
+-__failure_unpriv
++__success __log_level(4) __msg("stack depth 8")
++__failure_unpriv __msg_unpriv("invalid indirect read from stack")
+ __naked void uninitialized_stack1(void)
+ {
+ asm volatile (" \
+@@ -45,8 +45,8 @@ __naked void uninitialized_stack1(void)
+
+ SEC("socket")
+ __description("uninitialized stack2")
+-__failure __msg("invalid read from stack")
+-__failure_unpriv
++__success __log_level(4) __msg("stack depth 8")
++__failure_unpriv __msg_unpriv("invalid read from stack")
+ __naked void uninitialized_stack2(void)
+ {
+ asm volatile (" \
+diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
+index b054f9c4814337..d873da71f14363 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
++++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c
+@@ -5,9 +5,9 @@
+ #include <bpf/bpf_helpers.h>
+ #include "bpf_misc.h"
+
+-SEC("cgroup/sysctl")
++SEC("socket")
+ __description("ARG_PTR_TO_LONG uninitialized")
+-__failure __msg("invalid indirect read from stack R4 off -16+0 size 8")
++__success
+ __naked void arg_ptr_to_long_uninitialized(void)
+ {
+ asm volatile (" \
+@@ -35,9 +35,7 @@ __naked void arg_ptr_to_long_uninitialized(void)
+
+ SEC("socket")
+ __description("ARG_PTR_TO_LONG half-uninitialized")
+-/* in privileged mode reads from uninitialized stack locations are permitted */
+-__success __failure_unpriv
+-__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8")
++__success
+ __retval(0)
+ __naked void ptr_to_long_half_uninitialized(void)
+ {
+diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
+new file mode 100644
+index 00000000000000..5905e036e0eaca
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
+@@ -0,0 +1,242 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bpf.h>
++#include <bpf/bpf_helpers.h>
++#include "bpf_misc.h"
++
++struct {
++ __uint(type, BPF_MAP_TYPE_ARRAY);
++ __uint(max_entries, 8);
++ __type(key, __u32);
++ __type(value, __u64);
++} map SEC(".maps");
++
++struct {
++ __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
++ __uint(max_entries, 8);
++} ringbuf SEC(".maps");
++
++struct vm_area_struct;
++struct bpf_map;
++
++struct buf_context {
++ char *buf;
++};
++
++struct num_context {
++ __u64 i;
++ __u64 j;
++};
++
++__u8 choice_arr[2] = { 0, 1 };
++
++static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
++{
++ if (idx == 0) {
++ ctx->buf = (char *)(0xDEAD);
++ return 0;
++ }
++
++ if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
++ return 1;
++
++ return 0;
++}
++
++SEC("?raw_tp")
++__failure __msg("R1 type=scalar expected=fp")
++int unsafe_on_2nd_iter(void *unused)
++{
++ char buf[4];
++ struct buf_context loop_ctx = { .buf = buf };
++
++ bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
++ return 0;
++}
++
++static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
++{
++ ctx->i = 0;
++ return 0;
++}
++
++SEC("?raw_tp")
++__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
++int unsafe_on_zero_iter(void *unused)
++{
++ struct num_context loop_ctx = { .i = 32 };
++
++ bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
++ return choice_arr[loop_ctx.i];
++}
++
++static int widening_cb(__u32 idx, struct num_context *ctx)
++{
++ ++ctx->i;
++ return 0;
++}
++
++SEC("?raw_tp")
++__success
++int widening(void *unused)
++{
++ struct num_context loop_ctx = { .i = 0, .j = 1 };
++
++ bpf_loop(100, widening_cb, &loop_ctx, 0);
++ /* loop_ctx.j is not changed during callback iteration,
++ * verifier should not apply widening to it.
++ */
++ return choice_arr[loop_ctx.j];
++}
++
++static int loop_detection_cb(__u32 idx, struct num_context *ctx)
++{
++ for (;;) {}
++ return 0;
++}
++
++SEC("?raw_tp")
++__failure __msg("infinite loop detected")
++int loop_detection(void *unused)
++{
++ struct num_context loop_ctx = { .i = 0 };
++
++ bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
++ return 0;
++}
++
++static __always_inline __u64 oob_state_machine(struct num_context *ctx)
++{
++ switch (ctx->i) {
++ case 0:
++ ctx->i = 1;
++ break;
++ case 1:
++ ctx->i = 32;
++ break;
++ }
++ return 0;
++}
++
++static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
++{
++ return oob_state_machine(data);
++}
++
++SEC("?raw_tp")
++__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
++int unsafe_for_each_map_elem(void *unused)
++{
++ struct num_context loop_ctx = { .i = 0 };
++
++ bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
++ return choice_arr[loop_ctx.i];
++}
++
++static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
++{
++ return oob_state_machine(data);
++}
++
++SEC("?raw_tp")
++__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
++int unsafe_ringbuf_drain(void *unused)
++{
++ struct num_context loop_ctx = { .i = 0 };
++
++ bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
++ return choice_arr[loop_ctx.i];
++}
++
++static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
++{
++ return oob_state_machine(data);
++}
++
++SEC("?raw_tp")
++__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
++int unsafe_find_vma(void *unused)
++{
++ struct task_struct *task = bpf_get_current_task_btf();
++ struct num_context loop_ctx = { .i = 0 };
++
++ bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
++ return choice_arr[loop_ctx.i];
++}
++
++static int iter_limit_cb(__u32 idx, struct num_context *ctx)
++{
++ ctx->i++;
++ return 0;
++}
++
++SEC("?raw_tp")
++__success
++int bpf_loop_iter_limit_ok(void *unused)
++{
++ struct num_context ctx = { .i = 0 };
++
++ bpf_loop(1, iter_limit_cb, &ctx, 0);
++ return choice_arr[ctx.i];
++}
++
++SEC("?raw_tp")
++__failure __msg("invalid access to map value, value_size=2 off=2 size=1")
++int bpf_loop_iter_limit_overflow(void *unused)
++{
++ struct num_context ctx = { .i = 0 };
++
++ bpf_loop(2, iter_limit_cb, &ctx, 0);
++ return choice_arr[ctx.i];
++}
++
++static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
++{
++ ctx->i += 100;
++ return 0;
++}
++
++static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
++{
++ ctx->i += 10;
++ return 0;
++}
++
++static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
++{
++ ctx->i += 1;
++ bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
++ bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
++ return 0;
++}
++
++/* Check that path visiting every callback function once had been
++ * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
++ * with each decimal digit corresponding to a callback visit marker.
++ */
++SEC("socket")
++__success __retval(111111)
++int bpf_loop_iter_limit_nested(void *unused)
++{
++ struct num_context ctx1 = { .i = 0 };
++ struct num_context ctx2 = { .i = 0 };
++ __u64 a, b, c;
++
++ bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
++ bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
++ a = ctx1.i;
++ b = ctx2.i;
++ /* Force 'ctx1.i' and 'ctx2.i' precise. */
++ c = choice_arr[(a + b) % 2];
++ /* This makes 'c' zero, but neither clang nor verifier know it. */
++ c /= 10;
++ /* Make sure that verifier does not visit 'impossible' states:
++ * enumerate all possible callback visit masks.
++ */
++ if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
++ b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
++ asm volatile ("r0 /= 0;" ::: "r0");
++ return 1000 * a + b + c;
++}
++
++char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
+index 5bc86af80a9ad4..71735dbf33d4f8 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
++++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
+@@ -75,9 +75,10 @@ l0_%=: r0 += 1; \
+ " ::: __clobber_all);
+ }
+
+-SEC("tracepoint")
++SEC("socket")
+ __description("bounded loop, start in the middle")
+-__failure __msg("back-edge")
++__success
++__failure_unpriv __msg_unpriv("back-edge")
+ __naked void loop_start_in_the_middle(void)
+ {
+ asm volatile (" \
+@@ -136,7 +137,9 @@ l0_%=: exit; \
+
+ SEC("tracepoint")
+ __description("bounded recursion")
+-__failure __msg("back-edge")
++__failure
++/* verifier limitation in detecting max stack depth */
++__msg("the call stack of 8 frames is too deep !")
+ __naked void bounded_recursion(void)
+ {
+ asm volatile (" \
+diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
+index efbfc3a4ad6a99..f67390224a9cf9 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
++++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
+@@ -5,9 +5,10 @@
+ #include <bpf/bpf_helpers.h>
+ #include "bpf_misc.h"
+
+-SEC("tc")
++SEC("socket")
+ __description("raw_stack: no skb_load_bytes")
+-__failure __msg("invalid read from stack R6 off=-8 size=8")
++__success
++__failure_unpriv __msg_unpriv("invalid read from stack R6 off=-8 size=8")
+ __naked void stack_no_skb_load_bytes(void)
+ {
+ asm volatile (" \
+diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+index db6b3143338b61..f61d623b1ce8df 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
++++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+@@ -119,15 +119,41 @@ __naked int global_subprog_result_precise(void)
+
+ SEC("?raw_tp")
+ __success __log_level(2)
++/* First simulated path does not include callback body,
++ * r1 and r4 are always precise for bpf_loop() calls.
++ */
++__msg("9: (85) call bpf_loop#181")
++__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
++__msg("mark_precise: frame0: parent state regs=r4 stack=:")
++__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
++__msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0")
++__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
++__msg("mark_precise: frame0: parent state regs=r1 stack=:")
++__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
++__msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0")
++__msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0")
++__msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8")
++__msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6")
++__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
++/* r6 precision propagation */
+ __msg("14: (0f) r1 += r6")
+-__msg("mark_precise: frame0: last_idx 14 first_idx 10")
++__msg("mark_precise: frame0: last_idx 14 first_idx 9")
+ __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
+ __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
+ __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
+ __msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
+-__msg("mark_precise: frame0: parent state regs=r0 stack=:")
+-__msg("mark_precise: frame0: last_idx 18 first_idx 0")
+-__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit")
++__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
++/* State entering callback body popped from states stack */
++__msg("from 9 to 17: frame1:")
++__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
++__msg("17: (b7) r0 = 0")
++__msg("18: (95) exit")
++__msg("returning from callee:")
++__msg("to caller at 9:")
++__msg("frame 0: propagating r1,r4")
++__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
++__msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit")
++__msg("from 18 to 9: safe")
+ __naked int callback_result_precise(void)
+ {
+ asm volatile (
+@@ -233,20 +259,36 @@ __naked int parent_callee_saved_reg_precise_global(void)
+
+ SEC("?raw_tp")
+ __success __log_level(2)
++/* First simulated path does not include callback body */
+ __msg("12: (0f) r1 += r6")
+-__msg("mark_precise: frame0: last_idx 12 first_idx 10")
++__msg("mark_precise: frame0: last_idx 12 first_idx 9")
+ __msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
+ __msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
++__msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
+ __msg("mark_precise: frame0: parent state regs=r6 stack=:")
+-__msg("mark_precise: frame0: last_idx 16 first_idx 0")
+-__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
+-__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
+-__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
++__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
+ __msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
+ __msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
+ __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
+ __msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
+ __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
++/* State entering callback body popped from states stack */
++__msg("from 9 to 15: frame1:")
++__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
++__msg("15: (b7) r0 = 0")
++__msg("16: (95) exit")
++__msg("returning from callee:")
++__msg("to caller at 9:")
++/* r1, r4 are always precise for bpf_loop(),
++ * r6 was marked before backtracking to callback body.
++ */
++__msg("frame 0: propagating r1,r4,r6")
++__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
++__msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit")
++__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
++__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
++__msg("mark_precise: frame0: parent state regs= stack=:")
++__msg("from 16 to 9: safe")
+ __naked int parent_callee_saved_reg_precise_with_callback(void)
+ {
+ asm volatile (
+@@ -373,22 +415,38 @@ __naked int parent_stack_slot_precise_global(void)
+
+ SEC("?raw_tp")
+ __success __log_level(2)
++/* First simulated path does not include callback body */
+ __msg("14: (0f) r1 += r6")
+-__msg("mark_precise: frame0: last_idx 14 first_idx 11")
++__msg("mark_precise: frame0: last_idx 14 first_idx 10")
+ __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
+ __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
+ __msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
++__msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
+ __msg("mark_precise: frame0: parent state regs= stack=-8:")
+-__msg("mark_precise: frame0: last_idx 18 first_idx 0")
+-__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
+-__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
+-__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
++__msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
+ __msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
+ __msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
+ __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
+ __msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
+ __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
+ __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
++/* State entering callback body popped from states stack */
++__msg("from 10 to 17: frame1:")
++__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
++__msg("17: (b7) r0 = 0")
++__msg("18: (95) exit")
++__msg("returning from callee:")
++__msg("to caller at 10:")
++/* r1, r4 are always precise for bpf_loop(),
++ * fp-8 was marked before backtracking to callback body.
++ */
++__msg("frame 0: propagating r1,r4,fp-8")
++__msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
++__msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit")
++__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
++__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
++__msg("mark_precise: frame0: parent state regs= stack=:")
++__msg("from 18 to 10: safe")
+ __naked int parent_stack_slot_precise_with_callback(void)
+ {
+ asm volatile (
+diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c
+index 83a90afba78576..d1f23c1a7c5b4e 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_var_off.c
++++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c
+@@ -59,9 +59,10 @@ __naked void stack_read_priv_vs_unpriv(void)
+ " ::: __clobber_all);
+ }
+
+-SEC("lwt_in")
++SEC("cgroup/skb")
+ __description("variable-offset stack read, uninitialized")
+-__failure __msg("invalid variable-offset read from stack R2")
++__success
++__failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root")
+ __naked void variable_offset_stack_read_uninitialized(void)
+ {
+ asm volatile (" \
+@@ -83,12 +84,55 @@ __naked void variable_offset_stack_read_uninitialized(void)
+
+ SEC("socket")
+ __description("variable-offset stack write, priv vs unpriv")
+-__success __failure_unpriv
++__success
++/* Check that the maximum stack depth is correctly maintained according to the
++ * maximum possible variable offset.
++ */
++__log_level(4) __msg("stack depth 16")
++__failure_unpriv
+ /* Variable stack access is rejected for unprivileged.
+ */
+ __msg_unpriv("R2 variable stack access prohibited for !root")
+ __retval(0)
+ __naked void stack_write_priv_vs_unpriv(void)
++{
++ asm volatile (" \
++ /* Get an unknown value */ \
++ r2 = *(u32*)(r1 + 0); \
++ /* Make it small and 8-byte aligned */ \
++ r2 &= 8; \
++ r2 -= 16; \
++ /* Add it to fp. We now have either fp-8 or \
++ * fp-16, but we don't know which \
++ */ \
++ r2 += r10; \
++ /* Dereference it for a stack write */ \
++ r0 = 0; \
++ *(u64*)(r2 + 0) = r0; \
++ exit; \
++" ::: __clobber_all);
++}
++
++/* Similar to the previous test, but this time also perform a read from the
++ * address written to with a variable offset. The read is allowed, showing that,
++ * after a variable-offset write, a priviledged program can read the slots that
++ * were in the range of that write (even if the verifier doesn't actually know if
++ * the slot being read was really written to or not.
++ *
++ * Despite this test being mostly a superset, the previous test is also kept for
++ * the sake of it checking the stack depth in the case where there is no read.
++ */
++SEC("socket")
++__description("variable-offset stack write followed by read")
++__success
++/* Check that the maximum stack depth is correctly maintained according to the
++ * maximum possible variable offset.
++ */
++__log_level(4) __msg("stack depth 16")
++__failure_unpriv
++__msg_unpriv("R2 variable stack access prohibited for !root")
++__retval(0)
++__naked void stack_write_followed_by_read(void)
+ {
+ asm volatile (" \
+ /* Get an unknown value */ \
+@@ -103,12 +147,7 @@ __naked void stack_write_priv_vs_unpriv(void)
+ /* Dereference it for a stack write */ \
+ r0 = 0; \
+ *(u64*)(r2 + 0) = r0; \
+- /* Now read from the address we just wrote. This shows\
+- * that, after a variable-offset write, a priviledged\
+- * program can read the slots that were in the range of\
+- * that write (even if the verifier doesn't actually know\
+- * if the slot being read was really written to or not.\
+- */ \
++ /* Now read from the address we just wrote. */ \
+ r3 = *(u64*)(r2 + 0); \
+ r0 = 0; \
+ exit; \
+@@ -253,9 +292,10 @@ __naked void access_min_out_of_bound(void)
+ : __clobber_all);
+ }
+
+-SEC("lwt_in")
++SEC("cgroup/skb")
+ __description("indirect variable-offset stack access, min_off < min_initialized")
+-__failure __msg("invalid indirect read from stack R2 var_off")
++__success
++__failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root")
+ __naked void access_min_off_min_initialized(void)
+ {
+ asm volatile (" \
+diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+index 07d786329105da..e4c729768b7d8a 100644
+--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
++++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+@@ -53,6 +53,8 @@
+ #define DEFAULT_TTL 64
+ #define MAX_ALLOWED_PORTS 8
+
++#define MAX_PACKET_OFF 0xffff
++
+ #define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+@@ -183,63 +185,76 @@ static __always_inline __u32 tcp_time_stamp_raw(void)
+ }
+
+ struct tcpopt_context {
+- __u8 *ptr;
+- __u8 *end;
++ void *data;
+ void *data_end;
+ __be32 *tsecr;
+ __u8 wscale;
+ bool option_timestamp;
+ bool option_sack;
++ __u32 off;
+ };
+
+-static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
++static __always_inline u8 *next(struct tcpopt_context *ctx, __u32 sz)
+ {
+- __u8 opcode, opsize;
++ __u64 off = ctx->off;
++ __u8 *data;
+
+- if (ctx->ptr >= ctx->end)
+- return 1;
+- if (ctx->ptr >= ctx->data_end)
+- return 1;
++ /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */
++ if (off > MAX_PACKET_OFF - sz)
++ return NULL;
+
+- opcode = ctx->ptr[0];
++ data = ctx->data + off;
++ barrier_var(data);
++ if (data + sz >= ctx->data_end)
++ return NULL;
+
+- if (opcode == TCPOPT_EOL)
+- return 1;
+- if (opcode == TCPOPT_NOP) {
+- ++ctx->ptr;
+- return 0;
+- }
++ ctx->off += sz;
++ return data;
++}
+
+- if (ctx->ptr + 1 >= ctx->end)
+- return 1;
+- if (ctx->ptr + 1 >= ctx->data_end)
++static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
++{
++ __u8 *opcode, *opsize, *wscale, *tsecr;
++ __u32 off = ctx->off;
++
++ opcode = next(ctx, 1);
++ if (!opcode)
+ return 1;
+- opsize = ctx->ptr[1];
+- if (opsize < 2)
++
++ if (*opcode == TCPOPT_EOL)
+ return 1;
++ if (*opcode == TCPOPT_NOP)
++ return 0;
+
+- if (ctx->ptr + opsize > ctx->end)
++ opsize = next(ctx, 1);
++ if (!opsize || *opsize < 2)
+ return 1;
+
+- switch (opcode) {
++ switch (*opcode) {
+ case TCPOPT_WINDOW:
+- if (opsize == TCPOLEN_WINDOW && ctx->ptr + TCPOLEN_WINDOW <= ctx->data_end)
+- ctx->wscale = ctx->ptr[2] < TCP_MAX_WSCALE ? ctx->ptr[2] : TCP_MAX_WSCALE;
++ wscale = next(ctx, 1);
++ if (!wscale)
++ return 1;
++ if (*opsize == TCPOLEN_WINDOW)
++ ctx->wscale = *wscale < TCP_MAX_WSCALE ? *wscale : TCP_MAX_WSCALE;
+ break;
+ case TCPOPT_TIMESTAMP:
+- if (opsize == TCPOLEN_TIMESTAMP && ctx->ptr + TCPOLEN_TIMESTAMP <= ctx->data_end) {
++ tsecr = next(ctx, 4);
++ if (!tsecr)
++ return 1;
++ if (*opsize == TCPOLEN_TIMESTAMP) {
+ ctx->option_timestamp = true;
+ /* Client's tsval becomes our tsecr. */
+- *ctx->tsecr = get_unaligned((__be32 *)(ctx->ptr + 2));
++ *ctx->tsecr = get_unaligned((__be32 *)tsecr);
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+- if (opsize == TCPOLEN_SACK_PERM)
++ if (*opsize == TCPOLEN_SACK_PERM)
+ ctx->option_sack = true;
+ break;
+ }
+
+- ctx->ptr += opsize;
++ ctx->off = off + *opsize;
+
+ return 0;
+ }
+@@ -256,16 +271,21 @@ static int tscookie_tcpopt_parse_batch(__u32 index, void *context)
+
+ static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
+ __u16 tcp_len, __be32 *tsval,
+- __be32 *tsecr, void *data_end)
++ __be32 *tsecr, void *data, void *data_end)
+ {
+ struct tcpopt_context loop_ctx = {
+- .ptr = (__u8 *)(tcp_header + 1),
+- .end = (__u8 *)tcp_header + tcp_len,
++ .data = data,
+ .data_end = data_end,
+ .tsecr = tsecr,
+ .wscale = TS_OPT_WSCALE_MASK,
+ .option_timestamp = false,
+ .option_sack = false,
++ /* Note: currently verifier would track .off as unbound scalar.
++ * In case if verifier would at some point get smarter and
++ * compute bounded value for this var, beware that it might
++ * hinder bpf_loop() convergence validation.
++ */
++ .off = (__u8 *)(tcp_header + 1) - (__u8 *)data,
+ };
+ u32 cookie;
+
+@@ -447,13 +467,13 @@ static __always_inline int tcp_lookup(void *ctx, struct header_pointers *hdr, bo
+ unsigned long status = ct->status;
+
+ bpf_ct_release(ct);
+- if (status & IPS_CONFIRMED_BIT)
++ if (status & IPS_CONFIRMED)
+ return XDP_PASS;
+ } else if (ct_lookup_opts.error != -ENOENT) {
+ return XDP_ABORTED;
+ }
+
+- /* error == -ENOENT || !(status & IPS_CONFIRMED_BIT) */
++ /* error == -ENOENT || !(status & IPS_CONFIRMED) */
+ return XDP_TX;
+ }
+
+@@ -635,7 +655,7 @@ static __always_inline int syncookie_handle_syn(struct header_pointers *hdr,
+ cookie = (__u32)value;
+
+ if (tscookie_init((void *)hdr->tcp, hdr->tcp_len,
+- &tsopt_buf[0], &tsopt_buf[1], data_end))
++ &tsopt_buf[0], &tsopt_buf[1], data, data_end))
+ tsopt = tsopt_buf;
+
+ /* Check that there is enough space for a SYNACK. It also covers
+diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp
+index f4936834f76f46..435341c2542085 100644
+--- a/tools/testing/selftests/bpf/test_cpp.cpp
++++ b/tools/testing/selftests/bpf/test_cpp.cpp
+@@ -6,6 +6,10 @@
+ #include <bpf/libbpf.h>
+ #include <bpf/bpf.h>
+ #include <bpf/btf.h>
++
++#ifndef _Bool
++#define _Bool bool
++#endif
+ #include "test_core_extern.skel.h"
+
+ template <typename T>
+diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
+index c028d621c744da..d98c72dc563eaf 100644
+--- a/tools/testing/selftests/bpf/test_lpm_map.c
++++ b/tools/testing/selftests/bpf/test_lpm_map.c
+@@ -211,7 +211,7 @@ static void test_lpm_map(int keysize)
+ volatile size_t n_matches, n_matches_after_delete;
+ size_t i, j, n_nodes, n_lookups;
+ struct tlpm_node *t, *list = NULL;
+- struct bpf_lpm_trie_key *key;
++ struct bpf_lpm_trie_key_u8 *key;
+ uint8_t *data, *value;
+ int r, map;
+
+@@ -331,8 +331,8 @@ static void test_lpm_map(int keysize)
+ static void test_lpm_ipaddr(void)
+ {
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+- struct bpf_lpm_trie_key *key_ipv4;
+- struct bpf_lpm_trie_key *key_ipv6;
++ struct bpf_lpm_trie_key_u8 *key_ipv4;
++ struct bpf_lpm_trie_key_u8 *key_ipv6;
+ size_t key_size_ipv4;
+ size_t key_size_ipv6;
+ int map_fd_ipv4;
+@@ -423,7 +423,7 @@ static void test_lpm_ipaddr(void)
+ static void test_lpm_delete(void)
+ {
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+- struct bpf_lpm_trie_key *key;
++ struct bpf_lpm_trie_key_u8 *key;
+ size_t key_size;
+ int map_fd;
+ __u64 value;
+@@ -532,7 +532,7 @@ static void test_lpm_delete(void)
+ static void test_lpm_get_next_key(void)
+ {
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
+- struct bpf_lpm_trie_key *key_p, *next_key_p;
++ struct bpf_lpm_trie_key_u8 *key_p, *next_key_p;
+ size_t key_size;
+ __u32 value = 0;
+ int map_fd;
+@@ -693,9 +693,9 @@ static void *lpm_test_command(void *arg)
+ {
+ int i, j, ret, iter, key_size;
+ struct lpm_mt_test_info *info = arg;
+- struct bpf_lpm_trie_key *key_p;
++ struct bpf_lpm_trie_key_u8 *key_p;
+
+- key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
++ key_size = sizeof(*key_p) + sizeof(__u32);
+ key_p = alloca(key_size);
+ for (iter = 0; iter < info->iter; iter++)
+ for (i = 0; i < MAX_TEST_KEYS; i++) {
+@@ -717,7 +717,7 @@ static void *lpm_test_command(void *arg)
+ ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
+ assert(ret == 0 || errno == ENOENT);
+ } else {
+- struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
++ struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size);
+ ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
+ assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
+ }
+@@ -752,7 +752,7 @@ static void test_lpm_multi_thread(void)
+
+ /* create a trie */
+ value_size = sizeof(__u32);
+- key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
++ key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts);
+
+ /* create 4 threads to test update, delete, lookup and get_next_key */
+diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
+index 4d0650cfb5cd8b..fda7589c50236c 100644
+--- a/tools/testing/selftests/bpf/test_lru_map.c
++++ b/tools/testing/selftests/bpf/test_lru_map.c
+@@ -126,7 +126,8 @@ static int sched_next_online(int pid, int *next_to_try)
+
+ while (next < nr_cpus) {
+ CPU_ZERO(&cpuset);
+- CPU_SET(next++, &cpuset);
++ CPU_SET(next, &cpuset);
++ next++;
+ if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
+ ret = 0;
+ break;
+diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
+index 7fc00e423e4dda..e0dd101c9f2bd4 100644
+--- a/tools/testing/selftests/bpf/test_maps.c
++++ b/tools/testing/selftests/bpf/test_maps.c
+@@ -1190,7 +1190,11 @@ static void test_map_in_map(void)
+ goto out_map_in_map;
+ }
+
+- bpf_object__load(obj);
++ err = bpf_object__load(obj);
++ if (err) {
++ printf("Failed to load test prog\n");
++ goto out_map_in_map;
++ }
+
+ map = bpf_object__find_map_by_name(obj, "mim_array");
+ if (!map) {
+diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
+index 4d582cac2c09e1..74620ed3a166e4 100644
+--- a/tools/testing/selftests/bpf/test_progs.c
++++ b/tools/testing/selftests/bpf/test_progs.c
+@@ -10,7 +10,6 @@
+ #include <sched.h>
+ #include <signal.h>
+ #include <string.h>
+-#include <execinfo.h> /* backtrace */
+ #include <sys/sysinfo.h> /* get_nprocs */
+ #include <netinet/in.h>
+ #include <sys/select.h>
+@@ -19,6 +18,21 @@
+ #include <bpf/btf.h>
+ #include "json_writer.h"
+
++#ifdef __GLIBC__
++#include <execinfo.h> /* backtrace */
++#endif
++
++/* Default backtrace funcs if missing at link */
++__weak int backtrace(void **buffer, int size)
++{
++ return 0;
++}
++
++__weak void backtrace_symbols_fd(void *const *buffer, int size, int fd)
++{
++ dprintf(fd, "<backtrace not supported>\n");
++}
++
+ static bool verbose(void)
+ {
+ return env.verbosity > VERBOSE_NONE;
+@@ -1690,7 +1704,7 @@ int main(int argc, char **argv)
+ /* launch workers if requested */
+ env.worker_id = -1; /* main process */
+ if (env.workers) {
+- env.worker_pids = calloc(sizeof(__pid_t), env.workers);
++ env.worker_pids = calloc(sizeof(pid_t), env.workers);
+ env.worker_socks = calloc(sizeof(int), env.workers);
+ if (env.debug)
+ fprintf(stdout, "Launching %d workers.\n", env.workers);
+diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
+index 77bd492c602481..2f9f6f250f1714 100644
+--- a/tools/testing/selftests/bpf/test_progs.h
++++ b/tools/testing/selftests/bpf/test_progs.h
+@@ -417,6 +417,8 @@ int get_bpf_max_tramp_links(void);
+ #define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep"
+ #elif defined(__aarch64__)
+ #define SYS_NANOSLEEP_KPROBE_NAME "__arm64_sys_nanosleep"
++#elif defined(__riscv)
++#define SYS_NANOSLEEP_KPROBE_NAME "__riscv_sys_nanosleep"
+ #else
+ #define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep"
+ #endif
+diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
+index 024a0faafb3be7..a181c0ccf98b2a 100644
+--- a/tools/testing/selftests/bpf/test_sockmap.c
++++ b/tools/testing/selftests/bpf/test_sockmap.c
+@@ -63,7 +63,7 @@ int passed;
+ int failed;
+ int map_fd[9];
+ struct bpf_map *maps[9];
+-int prog_fd[11];
++int prog_fd[9];
+
+ int txmsg_pass;
+ int txmsg_redir;
+@@ -680,7 +680,8 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+ }
+ }
+
+- s->bytes_recvd += recv;
++ if (recv > 0)
++ s->bytes_recvd += recv;
+
+ if (opt->check_recved_len && s->bytes_recvd > total_bytes) {
+ errno = EMSGSIZE;
+@@ -1793,8 +1794,6 @@ int prog_attach_type[] = {
+ BPF_SK_MSG_VERDICT,
+ BPF_SK_MSG_VERDICT,
+ BPF_SK_MSG_VERDICT,
+- BPF_SK_MSG_VERDICT,
+- BPF_SK_MSG_VERDICT,
+ };
+
+ int prog_type[] = {
+@@ -1807,8 +1806,6 @@ int prog_type[] = {
+ BPF_PROG_TYPE_SK_MSG,
+ BPF_PROG_TYPE_SK_MSG,
+ BPF_PROG_TYPE_SK_MSG,
+- BPF_PROG_TYPE_SK_MSG,
+- BPF_PROG_TYPE_SK_MSG,
+ };
+
+ static int populate_progs(char *bpf_file)
+@@ -2104,9 +2101,9 @@ int main(int argc, char **argv)
+ free(options.whitelist);
+ if (options.blacklist)
+ free(options.blacklist);
++ close(cg_fd);
+ if (cg_created)
+ cleanup_cgroup_environment();
+- close(cg_fd);
+ return err;
+ }
+
+diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+index 910044f08908a7..7989ec60845455 100755
+--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+@@ -72,7 +72,6 @@ cleanup() {
+ server_listen() {
+ ip netns exec "${ns2}" nc "${netcat_opt}" -l "${port}" > "${outfile}" &
+ server_pid=$!
+- sleep 0.2
+ }
+
+ client_connect() {
+@@ -93,6 +92,16 @@ verify_data() {
+ fi
+ }
+
++wait_for_port() {
++ for i in $(seq 20); do
++ if ip netns exec "${ns2}" ss ${2:--4}OHntl | grep -q "$1"; then
++ return 0
++ fi
++ sleep 0.1
++ done
++ return 1
++}
++
+ set -e
+
+ # no arguments: automated test, run all
+@@ -193,6 +202,7 @@ setup
+ # basic communication works
+ echo "test basic connectivity"
+ server_listen
++wait_for_port ${port} ${netcat_opt}
+ client_connect
+ verify_data
+
+@@ -204,6 +214,7 @@ ip netns exec "${ns1}" tc filter add dev veth1 egress \
+ section "encap_${tuntype}_${mac}"
+ echo "test bpf encap without decap (expect failure)"
+ server_listen
++wait_for_port ${port} ${netcat_opt}
+ ! client_connect
+
+ if [[ "$tuntype" =~ "udp" ]]; then
+diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
+index 8d994884c7b440..6acffe0426f016 100644
+--- a/tools/testing/selftests/bpf/testing_helpers.c
++++ b/tools/testing/selftests/bpf/testing_helpers.c
+@@ -220,13 +220,13 @@ int parse_test_list(const char *s,
+ bool is_glob_pattern)
+ {
+ char *input, *state = NULL, *test_spec;
+- int err = 0;
++ int err = 0, cnt = 0;
+
+ input = strdup(s);
+ if (!input)
+ return -ENOMEM;
+
+- while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) {
++ while ((test_spec = strtok_r(cnt++ ? NULL : input, ",", &state))) {
+ err = insert_test(set, test_spec, is_glob_pattern);
+ if (err)
+ break;
+diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c
+index 2a6efbd0401e5b..762e4b5ec95571 100644
+--- a/tools/testing/selftests/bpf/unpriv_helpers.c
++++ b/tools/testing/selftests/bpf/unpriv_helpers.c
+@@ -2,7 +2,6 @@
+
+ #include <stdbool.h>
+ #include <stdlib.h>
+-#include <error.h>
+ #include <stdio.h>
+
+ #include "unpriv_helpers.h"
+diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
+index 319337bdcfc856..9a7b1106fda812 100644
+--- a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
++++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
+@@ -83,17 +83,6 @@
+ .result = REJECT,
+ .errstr = "!read_ok",
+ },
+-{
+- "Can't use cmpxchg on uninit memory",
+- .insns = {
+- BPF_MOV64_IMM(BPF_REG_0, 3),
+- BPF_MOV64_IMM(BPF_REG_2, 4),
+- BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
+- BPF_EXIT_INSN(),
+- },
+- .result = REJECT,
+- .errstr = "invalid read from stack",
+-},
+ {
+ "BPF_W cmpxchg should zero top 32 bits",
+ .insns = {
+diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
+index 1bdf2b43e49eaf..ab25a81fd3a108 100644
+--- a/tools/testing/selftests/bpf/verifier/calls.c
++++ b/tools/testing/selftests/bpf/verifier/calls.c
+@@ -442,7 +442,7 @@
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+- .errstr = "back-edge from insn 0 to 0",
++ .errstr = "the call stack of 9 frames is too deep",
+ .result = REJECT,
+ },
+ {
+@@ -799,7 +799,7 @@
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+- .errstr = "back-edge",
++ .errstr = "the call stack of 9 frames is too deep",
+ .result = REJECT,
+ },
+ {
+@@ -811,7 +811,7 @@
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+- .errstr = "back-edge",
++ .errstr = "the call stack of 9 frames is too deep",
+ .result = REJECT,
+ },
+ {
+@@ -1505,7 +1505,9 @@
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = REJECT,
+- .errstr = "invalid read from stack R7 off=-16 size=8",
++ .errstr = "R0 invalid mem access 'scalar'",
++ .result_unpriv = REJECT,
++ .errstr_unpriv = "invalid read from stack R7 off=-16 size=8",
+ },
+ {
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+index f9297900cea6d4..78f19c255f20b4 100644
+--- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
++++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+@@ -9,8 +9,8 @@
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "invalid BPF_LD_IMM insn",
+- .errstr_unpriv = "R1 pointer comparison",
++ .errstr = "jump into the middle of ldimm64 insn 1",
++ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ .result = REJECT,
+ },
+ {
+@@ -23,8 +23,8 @@
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "invalid BPF_LD_IMM insn",
+- .errstr_unpriv = "R1 pointer comparison",
++ .errstr = "jump into the middle of ldimm64 insn 1",
++ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ .result = REJECT,
+ },
+ {
+diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
+index 655095810d4a12..611b5a0a6f7e3e 100644
+--- a/tools/testing/selftests/bpf/veristat.c
++++ b/tools/testing/selftests/bpf/veristat.c
+@@ -753,13 +753,13 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs)
+ static int parse_stats(const char *stats_str, struct stat_specs *specs)
+ {
+ char *input, *state = NULL, *next;
+- int err;
++ int err, cnt = 0;
+
+ input = strdup(stats_str);
+ if (!input)
+ return -ENOMEM;
+
+- while ((next = strtok_r(state ? NULL : input, ",", &state))) {
++ while ((next = strtok_r(cnt++ ? NULL : input, ",", &state))) {
+ err = parse_stat(next, specs);
+ if (err)
+ return err;
+@@ -1214,7 +1214,7 @@ static int cmp_join_stat(const struct verif_stats_join *s1,
+ enum stat_id id, enum stat_variant var, bool asc)
+ {
+ const char *str1 = NULL, *str2 = NULL;
+- double v1, v2;
++ double v1 = 0.0, v2 = 0.0;
+ int cmp = 0;
+
+ fetch_join_stat_value(s1, id, var, &str1, &v1);
+@@ -1444,7 +1444,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs,
+ while (fgets(line, sizeof(line), f)) {
+ char *input = line, *state = NULL, *next;
+ struct verif_stats *st = NULL;
+- int col = 0;
++ int col = 0, cnt = 0;
+
+ if (!header) {
+ void *tmp;
+@@ -1462,7 +1462,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs,
+ *stat_cntp += 1;
+ }
+
+- while ((next = strtok_r(state ? NULL : input, ",\n", &state))) {
++ while ((next = strtok_r(cnt++ ? NULL : input, ",\n", &state))) {
+ if (header) {
+ /* for the first line, set up spec stats */
+ err = parse_stat(next, specs);
+diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
+index 613321eb84c190..79f2da8f6ead63 100644
+--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
++++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
+@@ -68,7 +68,7 @@ static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id)
+ .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+ .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG,
+ };
+- __u32 idx;
++ __u32 idx = 0;
+ u64 addr;
+ int ret;
+ int i;
+@@ -288,20 +288,6 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t
+ return 0;
+ }
+
+-struct ethtool_channels {
+- __u32 cmd;
+- __u32 max_rx;
+- __u32 max_tx;
+- __u32 max_other;
+- __u32 max_combined;
+- __u32 rx_count;
+- __u32 tx_count;
+- __u32 other_count;
+- __u32 combined_count;
+-};
+-
+-#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */
+-
+ static int rxq_num(const char *ifname)
+ {
+ struct ethtool_channels ch = {
+diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+index 2cf6f10ab7c4ac..fc02918962c757 100644
+--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
++++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+@@ -153,7 +153,10 @@ void suspend(void)
+ if (err < 0)
+ ksft_exit_fail_msg("timerfd_settime() failed\n");
+
+- if (write(power_state_fd, "mem", strlen("mem")) != strlen("mem"))
++ system("(echo mem > /sys/power/state) 2> /dev/null");
++
++ timerfd_gettime(timerfd, &spec);
++ if (spec.it_value.tv_sec != 0 || spec.it_value.tv_nsec != 0)
+ ksft_exit_fail_msg("Failed to enter Suspend state\n");
+
+ close(timerfd);
+diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c
+index 4804c7dc7b3125..ddb70d418c6a82 100644
+--- a/tools/testing/selftests/cachestat/test_cachestat.c
++++ b/tools/testing/selftests/cachestat/test_cachestat.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #define _GNU_SOURCE
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+
+ #include <stdio.h>
+ #include <stdbool.h>
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
+index 0340d4ca8f51cb..432db923bced0c 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.c
++++ b/tools/testing/selftests/cgroup/cgroup_util.c
+@@ -195,10 +195,10 @@ int cg_write_numeric(const char *cgroup, const char *control, long value)
+ return cg_write(cgroup, control, buf);
+ }
+
+-int cg_find_unified_root(char *root, size_t len)
++int cg_find_unified_root(char *root, size_t len, bool *nsdelegate)
+ {
+ char buf[10 * PAGE_SIZE];
+- char *fs, *mount, *type;
++ char *fs, *mount, *type, *options;
+ const char delim[] = "\n\t ";
+
+ if (read_text("/proc/self/mounts", buf, sizeof(buf)) <= 0)
+@@ -211,12 +211,14 @@ int cg_find_unified_root(char *root, size_t len)
+ for (fs = strtok(buf, delim); fs; fs = strtok(NULL, delim)) {
+ mount = strtok(NULL, delim);
+ type = strtok(NULL, delim);
+- strtok(NULL, delim);
++ options = strtok(NULL, delim);
+ strtok(NULL, delim);
+ strtok(NULL, delim);
+
+ if (strcmp(type, "cgroup2") == 0) {
+ strncpy(root, mount, len);
++ if (nsdelegate)
++ *nsdelegate = !!strstr(options, "nsdelegate");
+ return 0;
+ }
+ }
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
+index 1df7f202214afc..89e8519fb2719c 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.h
++++ b/tools/testing/selftests/cgroup/cgroup_util.h
+@@ -21,7 +21,7 @@ static inline int values_close(long a, long b, int err)
+ return abs(a - b) <= (a + b) / 100 * err;
+ }
+
+-extern int cg_find_unified_root(char *root, size_t len);
++extern int cg_find_unified_root(char *root, size_t len, bool *nsdelegate);
+ extern char *cg_name(const char *root, const char *name);
+ extern char *cg_name_indexed(const char *root, const char *name, int index);
+ extern char *cg_control(const char *cgroup, const char *control);
+diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
+index 80aa6b2373b966..a5672a91d273ce 100644
+--- a/tools/testing/selftests/cgroup/test_core.c
++++ b/tools/testing/selftests/cgroup/test_core.c
+@@ -18,6 +18,8 @@
+ #include "../kselftest.h"
+ #include "cgroup_util.h"
+
++static bool nsdelegate;
++
+ static int touch_anon(char *buf, size_t size)
+ {
+ int fd;
+@@ -775,6 +777,9 @@ static int test_cgcore_lesser_ns_open(const char *root)
+ pid_t pid;
+ int status;
+
++ if (!nsdelegate)
++ return KSFT_SKIP;
++
+ cg_test_a = cg_name(root, "cg_test_a");
+ cg_test_b = cg_name(root, "cg_test_b");
+
+@@ -862,7 +867,7 @@ int main(int argc, char *argv[])
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+- if (cg_find_unified_root(root, sizeof(root)))
++ if (cg_find_unified_root(root, sizeof(root), &nsdelegate))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
+diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
+index 24020a2c68dcdd..186bf96f6a2846 100644
+--- a/tools/testing/selftests/cgroup/test_cpu.c
++++ b/tools/testing/selftests/cgroup/test_cpu.c
+@@ -700,7 +700,7 @@ int main(int argc, char *argv[])
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+- if (cg_find_unified_root(root, sizeof(root)))
++ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
+diff --git a/tools/testing/selftests/cgroup/test_cpuset.c b/tools/testing/selftests/cgroup/test_cpuset.c
+index b061ed1e05b4d0..4034d14ba69ac0 100644
+--- a/tools/testing/selftests/cgroup/test_cpuset.c
++++ b/tools/testing/selftests/cgroup/test_cpuset.c
+@@ -249,7 +249,7 @@ int main(int argc, char *argv[])
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+- if (cg_find_unified_root(root, sizeof(root)))
++ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "cpuset"))
+diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
+index ff519029f6f433..969e9f0f495c35 100644
+--- a/tools/testing/selftests/cgroup/test_freezer.c
++++ b/tools/testing/selftests/cgroup/test_freezer.c
+@@ -827,7 +827,7 @@ int main(int argc, char *argv[])
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+- if (cg_find_unified_root(root, sizeof(root)))
++ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ switch (tests[i].fn(root)) {
+diff --git a/tools/testing/selftests/cgroup/test_kill.c b/tools/testing/selftests/cgroup/test_kill.c
+index 6153690319c9c8..0e5bb6c7307a50 100644
+--- a/tools/testing/selftests/cgroup/test_kill.c
++++ b/tools/testing/selftests/cgroup/test_kill.c
+@@ -276,7 +276,7 @@ int main(int argc, char *argv[])
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+- if (cg_find_unified_root(root, sizeof(root)))
++ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ switch (tests[i].fn(root)) {
+diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
+index c82f974b85c94d..137506db03127f 100644
+--- a/tools/testing/selftests/cgroup/test_kmem.c
++++ b/tools/testing/selftests/cgroup/test_kmem.c
+@@ -420,7 +420,7 @@ int main(int argc, char **argv)
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+- if (cg_find_unified_root(root, sizeof(root)))
++ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ /*
+diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
+index c7c9572003a8c9..b462416b380612 100644
+--- a/tools/testing/selftests/cgroup/test_memcontrol.c
++++ b/tools/testing/selftests/cgroup/test_memcontrol.c
+@@ -1314,7 +1314,7 @@ int main(int argc, char **argv)
+ char root[PATH_MAX];
+ int i, proc_status, ret = EXIT_SUCCESS;
+
+- if (cg_find_unified_root(root, sizeof(root)))
++ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ /*
+diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c
+index 49def87a909bdb..6927b4a06dee62 100644
+--- a/tools/testing/selftests/cgroup/test_zswap.c
++++ b/tools/testing/selftests/cgroup/test_zswap.c
+@@ -250,7 +250,7 @@ int main(int argc, char **argv)
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+- if (cg_find_unified_root(root, sizeof(root)))
++ if (cg_find_unified_root(root, sizeof(root), NULL))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ if (!zswap_configured())
+diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
+index e60cf4da8fb07e..1c61e3c022cb84 100644
+--- a/tools/testing/selftests/clone3/clone3.c
++++ b/tools/testing/selftests/clone3/clone3.c
+@@ -196,7 +196,12 @@ int main(int argc, char *argv[])
+ CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() in a new time namespace */
+- test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
++ if (access("/proc/self/ns/time", F_OK) == 0) {
++ test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
++ } else {
++ ksft_print_msg("Time namespaces are not supported\n");
++ ksft_test_result_skip("Skipping clone3() with CLONE_NEWTIME\n");
++ }
+
+ /* Do a clone3() with exit signal (SIGCHLD) in flags */
+ test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
+diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c
+index 749239930ca83f..190c57b0efeba9 100644
+--- a/tools/testing/selftests/core/close_range_test.c
++++ b/tools/testing/selftests/core/close_range_test.c
+@@ -563,4 +563,39 @@ TEST(close_range_cloexec_unshare_syzbot)
+ EXPECT_EQ(close(fd3), 0);
+ }
+
++TEST(close_range_bitmap_corruption)
++{
++ pid_t pid;
++ int status;
++ struct __clone_args args = {
++ .flags = CLONE_FILES,
++ .exit_signal = SIGCHLD,
++ };
++
++ /* get the first 128 descriptors open */
++ for (int i = 2; i < 128; i++)
++ EXPECT_GE(dup2(0, i), 0);
++
++ /* get descriptor table shared */
++ pid = sys_clone3(&args, sizeof(args));
++ ASSERT_GE(pid, 0);
++
++ if (pid == 0) {
++ /* unshare and truncate descriptor table down to 64 */
++ if (sys_close_range(64, ~0U, CLOSE_RANGE_UNSHARE))
++ exit(EXIT_FAILURE);
++
++ ASSERT_EQ(fcntl(64, F_GETFD), -1);
++ /* ... and verify that the range 64..127 is not
++ stuck "fully used" according to secondary bitmap */
++ EXPECT_EQ(dup(0), 64)
++ exit(EXIT_FAILURE);
++ exit(EXIT_SUCCESS);
++ }
++
++ EXPECT_EQ(waitpid(pid, &status, 0), pid);
++ EXPECT_EQ(true, WIFEXITED(status));
++ EXPECT_EQ(0, WEXITSTATUS(status));
++}
++
+ TEST_HARNESS_MAIN
+diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+index 890a8236a8ba73..2809f9a25c4335 100644
+--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
++++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+@@ -28,9 +28,11 @@ static int check_vgem(int fd)
+ version.name = name;
+
+ ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
+- if (ret)
++ if (ret || version.name_len != 4)
+ return 0;
+
++ name[4] = '\0';
++
+ return !strcmp(name, "vgem");
+ }
+
+diff --git a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
+index 4917dbb35a44df..5667febee32865 100755
+--- a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
++++ b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
+@@ -30,16 +30,16 @@ ip netns exec server ip addr add ${server_ip4}/24 dev eth0
+
+ ip netns exec client ip link add dev bond0 down type bond mode 1 \
+ miimon 100 all_slaves_active 1
+-ip netns exec client ip link set dev eth0 down master bond0
++ip netns exec client ip link set dev eth0 master bond0
+ ip netns exec client ip link set dev bond0 up
+ ip netns exec client ip addr add ${client_ip4}/24 dev bond0
+ ip netns exec client ping -c 5 $server_ip4 >/dev/null
+
+-ip netns exec client ip link set dev eth0 down nomaster
++ip netns exec client ip link set dev eth0 nomaster
+ ip netns exec client ip link set dev bond0 down
+ ip netns exec client ip link set dev bond0 type bond mode 0 \
+ arp_interval 1000 arp_ip_target "+${server_ip4}"
+-ip netns exec client ip link set dev eth0 down master bond0
++ip netns exec client ip link set dev eth0 master bond0
+ ip netns exec client ip link set dev bond0 up
+ ip netns exec client ping -c 5 $server_ip4 >/dev/null
+
+diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+index c54d1697f439a4..9a3d3c389dadda 100755
+--- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh
++++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+@@ -62,6 +62,8 @@ prio_test()
+
+ # create bond
+ bond_reset "${param}"
++ # set active_slave to primary eth1 specifically
++ ip -n ${s_ns} link set bond0 type bond active_slave eth1
+
+ # check bonding member prio value
+ ip -n ${s_ns} link set eth0 type bond_slave prio 0
+@@ -162,7 +164,7 @@ prio_arp()
+ local mode=$1
+
+ for primary_reselect in 0 1 2; do
+- prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
++ prio_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
+ log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect"
+ done
+ }
+@@ -178,7 +180,7 @@ prio_ns()
+ fi
+
+ for primary_reselect in 0 1 2; do
+- prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
++ prio_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
+ log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect"
+ done
+ }
+@@ -194,9 +196,9 @@ prio()
+
+ for mode in $modes; do
+ prio_miimon $mode
+- prio_arp $mode
+- prio_ns $mode
+ done
++ prio_arp "active-backup"
++ prio_ns "active-backup"
+ }
+
+ arp_validate_test()
+diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+index 2a268b17b61f51..dbdd736a41d394 100644
+--- a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
++++ b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+@@ -48,6 +48,17 @@ test_LAG_cleanup()
+ ip link add mv0 link "$name" up address "$ucaddr" type macvlan
+ # Used to test dev->mc handling
+ ip address add "$addr6" dev "$name"
++
++ # Check that addresses were added as expected
++ (grep_bridge_fdb "$ucaddr" bridge fdb show dev dummy1 ||
++ grep_bridge_fdb "$ucaddr" bridge fdb show dev dummy2) >/dev/null
++ check_err $? "macvlan unicast address not found on a slave"
++
++ # mcaddr is added asynchronously by addrconf_dad_work(), use busywait
++ (busywait 10000 grep_bridge_fdb "$mcaddr" bridge fdb show dev dummy1 ||
++ grep_bridge_fdb "$mcaddr" bridge fdb show dev dummy2) >/dev/null
++ check_err $? "IPv6 solicited-node multicast mac address not found on a slave"
++
+ ip link set dev "$name" down
+ ip link del "$name"
+
+diff --git a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
+index ad4c845a4ac7c2..b76bf50309524a 100755
+--- a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
++++ b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+ # Regression Test:
+diff --git a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
+index 2330d37453f956..8c261900214791 100755
+--- a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
++++ b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+ # Regression Test:
+diff --git a/tools/testing/selftests/drivers/net/bonding/settings b/tools/testing/selftests/drivers/net/bonding/settings
+index 6091b45d226baf..79b65bdf05db65 100644
+--- a/tools/testing/selftests/drivers/net/bonding/settings
++++ b/tools/testing/selftests/drivers/net/bonding/settings
+@@ -1 +1 @@
+-timeout=120
++timeout=1200
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+index 42ce602d8d492e..e71d811656bb5a 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+@@ -120,6 +120,9 @@ h2_destroy()
+
+ switch_create()
+ {
++ local lanes_swp4
++ local pg1_size
++
+ # pools
+ # -----
+
+@@ -229,7 +232,20 @@ switch_create()
+ dcb pfc set dev $swp4 prio-pfc all:off 1:on
+ # PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
+ # is (-2*MTU) about 80K of delay provision.
+- dcb buffer set dev $swp4 buffer-size all:0 1:$_100KB
++ pg1_size=$_100KB
++
++ setup_wait_dev_with_timeout $swp4
++
++ lanes_swp4=$(ethtool $swp4 | grep 'Lanes:')
++ lanes_swp4=${lanes_swp4#*"Lanes: "}
++
++ # 8-lane ports use two buffers among which the configured buffer
++ # is split, so double the size to get twice (20K + 80K).
++ if [[ $lanes_swp4 -eq 8 ]]; then
++ pg1_size=$((pg1_size * 2))
++ fi
++
++ dcb buffer set dev $swp4 buffer-size all:0 1:$pg1_size
+
+ # bridges
+ # -------
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+index fb850e0ec8375f..21d0f419cc6d77 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+@@ -10,7 +10,8 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
+ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
+ multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ delta_two_masks_one_key_test delta_simple_rehash_test \
+- bloom_simple_test bloom_complex_test bloom_delta_test"
++ bloom_simple_test bloom_complex_test bloom_delta_test \
++ max_erp_entries_test max_group_size_test collision_test"
+ NUM_NETIFS=2
+ source $lib_dir/lib.sh
+ source $lib_dir/tc_common.sh
+@@ -456,7 +457,7 @@ delta_two_masks_one_key_test()
+ {
+ # If 2 keys are the same and only differ in mask in a way that
+ # they belong under the same ERP (second is delta of the first),
+- # there should be no C-TCAM spill.
++ # there should be C-TCAM spill.
+
+ RET=0
+
+@@ -473,8 +474,8 @@ delta_two_masks_one_key_test()
+ tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
+ action drop"
+- tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
+- check_err $? "incorrect C-TCAM spill while inserting the second rule"
++ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 1
++ check_err $? "C-TCAM spill did not happen while inserting the second rule"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+@@ -983,6 +984,156 @@ bloom_delta_test()
+ log_test "bloom delta test ($tcflags)"
+ }
+
++max_erp_entries_test()
++{
++ # The number of eRP entries is limited. Once the maximum number of eRPs
++ # has been reached, filters cannot be added. This test verifies that
++ # when this limit is reached, inserstion fails without crashing.
++
++ RET=0
++
++ local num_masks=32
++ local num_regions=15
++ local chain_failed
++ local mask_failed
++ local ret
++
++ if [[ "$tcflags" != "skip_sw" ]]; then
++ return 0;
++ fi
++
++ for ((i=1; i < $num_regions; i++)); do
++ for ((j=$num_masks; j >= 0; j--)); do
++ tc filter add dev $h2 ingress chain $i protocol ip \
++ pref $i handle $j flower $tcflags \
++ dst_ip 192.1.0.0/$j &> /dev/null
++ ret=$?
++
++ if [ $ret -ne 0 ]; then
++ chain_failed=$i
++ mask_failed=$j
++ break 2
++ fi
++ done
++ done
++
++ # We expect to exceed the maximum number of eRP entries, so that
++ # insertion eventually fails. Otherwise, the test should be adjusted to
++ # add more filters.
++ check_fail $ret "expected to exceed number of eRP entries"
++
++ for ((; i >= 1; i--)); do
++ for ((j=0; j <= $num_masks; j++)); do
++ tc filter del dev $h2 ingress chain $i protocol ip \
++ pref $i handle $j flower &> /dev/null
++ done
++ done
++
++ log_test "max eRP entries test ($tcflags). " \
++ "max chain $chain_failed, mask $mask_failed"
++}
++
++max_group_size_test()
++{
++ # The number of ACLs in an ACL group is limited. Once the maximum
++ # number of ACLs has been reached, filters cannot be added. This test
++ # verifies that when this limit is reached, insertion fails without
++ # crashing.
++
++ RET=0
++
++ local num_acls=32
++ local max_size
++ local ret
++
++ if [[ "$tcflags" != "skip_sw" ]]; then
++ return 0;
++ fi
++
++ for ((i=1; i < $num_acls; i++)); do
++ if [[ $(( i % 2 )) == 1 ]]; then
++ tc filter add dev $h2 ingress pref $i proto ipv4 \
++ flower $tcflags dst_ip 198.51.100.1/32 \
++ ip_proto tcp tcp_flags 0x01/0x01 \
++ action drop &> /dev/null
++ else
++ tc filter add dev $h2 ingress pref $i proto ipv6 \
++ flower $tcflags dst_ip 2001:db8:1::1/128 \
++ action drop &> /dev/null
++ fi
++
++ ret=$?
++ [[ $ret -ne 0 ]] && max_size=$((i - 1)) && break
++ done
++
++ # We expect to exceed the maximum number of ACLs in a group, so that
++ # insertion eventually fails. Otherwise, the test should be adjusted to
++ # add more filters.
++ check_fail $ret "expected to exceed number of ACLs in a group"
++
++ for ((; i >= 1; i--)); do
++ if [[ $(( i % 2 )) == 1 ]]; then
++ tc filter del dev $h2 ingress pref $i proto ipv4 \
++ flower $tcflags dst_ip 198.51.100.1/32 \
++ ip_proto tcp tcp_flags 0x01/0x01 \
++ action drop &> /dev/null
++ else
++ tc filter del dev $h2 ingress pref $i proto ipv6 \
++ flower $tcflags dst_ip 2001:db8:1::1/128 \
++ action drop &> /dev/null
++ fi
++ done
++
++ log_test "max ACL group size test ($tcflags). max size $max_size"
++}
++
++collision_test()
++{
++ # Filters cannot share an eRP if in the common unmasked part (i.e.,
++ # without the delta bits) they have the same values. If the driver does
++ # not prevent such configuration (by spilling into the C-TCAM), then
++ # multiple entries will be present in the device with the same key,
++ # leading to collisions and a reduced scale.
++ #
++ # Create such a scenario and make sure all the filters are successfully
++ # added.
++
++ RET=0
++
++ local ret
++
++ if [[ "$tcflags" != "skip_sw" ]]; then
++ return 0;
++ fi
++
++ # Add a single dst_ip/24 filter and multiple dst_ip/32 filters that all
++ # have the same values in the common unmasked part (dst_ip/24).
++
++ tc filter add dev $h2 ingress pref 1 proto ipv4 handle 101 \
++ flower $tcflags dst_ip 198.51.100.0/24 \
++ action drop
++
++ for i in {0..255}; do
++ tc filter add dev $h2 ingress pref 2 proto ipv4 \
++ handle $((102 + i)) \
++ flower $tcflags dst_ip 198.51.100.${i}/32 \
++ action drop
++ ret=$?
++ [[ $ret -ne 0 ]] && break
++ done
++
++ check_err $ret "failed to add all the filters"
++
++ for i in {255..0}; do
++ tc filter del dev $h2 ingress pref 2 proto ipv4 \
++ handle $((102 + i)) flower
++ done
++
++ tc filter del dev $h2 ingress pref 1 proto ipv4 handle 101 flower
++
++ log_test "collision test ($tcflags)"
++}
++
+ setup_prepare()
+ {
+ h1=${NETIFS[p1]}
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+index 1b08e042cf942a..185b02d2d4cd14 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+@@ -269,6 +269,7 @@ for port in 0 1; do
+ echo 1 > $NSIM_DEV_SYS/new_port
+ fi
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
++ ifconfig $NSIM_NETDEV up
+
+ msg="new NIC device created"
+ exp0=( 0 0 0 0 )
+@@ -430,6 +431,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ overflow_table0 "overflow NIC table"
+@@ -487,6 +489,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ overflow_table0 "overflow NIC table"
+@@ -543,6 +546,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ overflow_table0 "destroy NIC"
+@@ -572,6 +576,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ msg="create VxLANs v6"
+@@ -632,6 +637,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
+@@ -687,6 +693,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ msg="create VxLANs v6"
+@@ -746,6 +753,7 @@ for port in 0 1; do
+ fi
+
+ echo $port > $NSIM_DEV_SYS/new_port
++ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ifconfig $NSIM_NETDEV up
+
+ msg="create VxLANs v6"
+@@ -876,6 +884,7 @@ msg="re-add a port"
+
+ echo 2 > $NSIM_DEV_SYS/del_port
+ echo 2 > $NSIM_DEV_SYS/new_port
++NSIM_NETDEV=`get_netdev_name old_netdevs`
+ check_tables
+
+ msg="replace VxLAN in overflow table"
+diff --git a/tools/testing/selftests/drivers/net/team/config b/tools/testing/selftests/drivers/net/team/config
+index 265b6882cc21ed..b5e3a3aad4bfbb 100644
+--- a/tools/testing/selftests/drivers/net/team/config
++++ b/tools/testing/selftests/drivers/net/team/config
+@@ -1,3 +1,5 @@
++CONFIG_DUMMY=y
++CONFIG_IPV6=y
++CONFIG_MACVLAN=y
+ CONFIG_NET_TEAM=y
+ CONFIG_NET_TEAM_MODE_LOADBALANCE=y
+-CONFIG_MACVLAN=y
+diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
+index 9674a19396a325..7bc7af4eb2c17f 100644
+--- a/tools/testing/selftests/efivarfs/create-read.c
++++ b/tools/testing/selftests/efivarfs/create-read.c
+@@ -32,8 +32,10 @@ int main(int argc, char **argv)
+ rc = read(fd, buf, sizeof(buf));
+ if (rc != 0) {
+ fprintf(stderr, "Reading a new var should return EOF\n");
++ close(fd);
+ return EXIT_FAILURE;
+ }
+
++ close(fd);
+ return EXIT_SUCCESS;
+ }
+diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
+index c2f7cef919c045..eb4c3b41193484 100644
+--- a/tools/testing/selftests/filesystems/binderfs/Makefile
++++ b/tools/testing/selftests/filesystems/binderfs/Makefile
+@@ -3,6 +3,4 @@
+ CFLAGS += $(KHDR_INCLUDES) -pthread
+ TEST_GEN_PROGS := binderfs_test
+
+-binderfs_test: binderfs_test.c ../../kselftest.h ../../kselftest_harness.h
+-
+ include ../../lib.mk
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
+index b9c21a81d2481b..c0cdad4c400e8b 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
+@@ -53,7 +53,7 @@ fi
+
+ echo > dynamic_events
+
+-if [ "$FIELDS" ] ; then
++if [ "$FIELDS" -a "$FPROBES" ] ; then
+ echo "t:tpevent ${TP2} obj_size=s->object_size" >> dynamic_events
+ echo "f:fpevent ${TP3}%return path=\$retval->name:string" >> dynamic_events
+ echo "t:tpevent2 ${TP4} p->se.group_node.next->prev" >> dynamic_events
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+index d3a79da215c8b0..5f72abe6fa79bb 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ # description: Generic dynamic event - check if duplicate events are caught
+-# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
++# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README events/syscalls/sys_enter_openat
+
+ echo 0 > events/enable
+
+diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+index b1ede624986676..b7c8f29c09a978 100644
+--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
++++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+@@ -18,7 +18,7 @@ echo 'sched:*' > set_event
+
+ yield
+
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -lt 3 ]; then
+ fail "at least fork, exec and exit events should be recorded"
+ fi
+@@ -29,7 +29,7 @@ echo 1 > events/sched/enable
+
+ yield
+
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -lt 3 ]; then
+ fail "at least fork, exec and exit events should be recorded"
+ fi
+@@ -40,7 +40,7 @@ echo 0 > events/sched/enable
+
+ yield
+
+-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
++count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+ if [ $count -ne 0 ]; then
+ fail "any of scheduler events should not be recorded"
+ fi
+diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+index 2de7c61d1ae308..118247b8dd84d8 100644
+--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
++++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+@@ -10,7 +10,6 @@ fail() { #msg
+ }
+
+ sample_events() {
+- echo > trace
+ echo 1 > events/kmem/kmem_cache_free/enable
+ echo 1 > tracing_on
+ ls > /dev/null
+@@ -22,9 +21,10 @@ echo 0 > tracing_on
+ echo 0 > events/enable
+
+ echo "Get the most frequently calling function"
++echo > trace
+ sample_events
+
+-target_func=`cut -d: -f3 trace | sed 's/call_site=\([^+]*\)+0x.*/\1/' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
++target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
+ if [ -z "$target_func" ]; then
+ exit_fail
+ fi
+@@ -32,7 +32,16 @@ echo > trace
+
+ echo "Test event filter function name"
+ echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter
++
++sample_events
++max_retry=10
++while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
+ sample_events
++max_retry=$((max_retry - 1))
++if [ $max_retry -eq 0 ]; then
++ exit_fail
++fi
++done
+
+ hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
+ misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
+@@ -49,7 +58,16 @@ address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1`
+
+ echo "Test event filter function address"
+ echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter
++echo > trace
++sample_events
++max_retry=10
++while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
+ sample_events
++max_retry=$((max_retry - 1))
++if [ $max_retry -eq 0 ]; then
++ exit_fail
++fi
++done
+
+ hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
+ misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
+index ff7499eb98d6d7..ce5d2e62731f38 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ # description: Kprobe event char type argument
+-# requires: kprobe_events
++# requires: kprobe_events available_filter_functions
+
+ case `uname -m` in
+ x86_64)
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
+index a202b2ea4baf98..4f72c2875f6b9c 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ # description: Kprobe event string type argument
+-# requires: kprobe_events
++# requires: kprobe_events available_filter_functions
+
+ case `uname -m` in
+ x86_64)
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+index 1f6981ef7afa06..ba19b81cef39af 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+@@ -30,7 +30,8 @@ find_dot_func() {
+ fi
+
+ grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do
+- if grep -s $f available_filter_functions; then
++ cnt=`grep -s $f available_filter_functions | wc -l`;
++ if [ $cnt -eq 1 ]; then
+ echo $f
+ break
+ fi
+diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
+index a392d0917b4e55..994fa3468f170c 100644
+--- a/tools/testing/selftests/futex/functional/Makefile
++++ b/tools/testing/selftests/futex/functional/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
+-CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
++CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE= -pthread $(INCLUDES) $(KHDR_INCLUDES)
+ LDLIBS := -lpthread -lrt
+
+ LOCAL_HDRS := \
+diff --git a/tools/testing/selftests/hid/Makefile b/tools/testing/selftests/hid/Makefile
+index 2e986cbf1a4630..2e75fb30f3a5e0 100644
+--- a/tools/testing/selftests/hid/Makefile
++++ b/tools/testing/selftests/hid/Makefile
+@@ -17,6 +17,9 @@ TEST_PROGS += hid-tablet.sh
+ TEST_PROGS += hid-usb_crash.sh
+ TEST_PROGS += hid-wacom.sh
+
++TEST_FILES := run-hid-tools-tests.sh
++TEST_FILES += tests
++
+ CXX ?= $(CROSS_COMPILE)g++
+
+ HOSTPKG_CONFIG := pkg-config
+diff --git a/tools/testing/selftests/hid/config.common b/tools/testing/selftests/hid/config.common
+index 0617275d93cc70..0f456dbab62f37 100644
+--- a/tools/testing/selftests/hid/config.common
++++ b/tools/testing/selftests/hid/config.common
+@@ -46,7 +46,6 @@ CONFIG_CRYPTO_SEQIV=y
+ CONFIG_CRYPTO_XXHASH=y
+ CONFIG_DCB=y
+ CONFIG_DEBUG_ATOMIC_SLEEP=y
+-CONFIG_DEBUG_CREDENTIALS=y
+ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+ CONFIG_DEBUG_MEMORY_INIT=y
+ CONFIG_DEFAULT_FQ_CODEL=y
+diff --git a/tools/testing/selftests/iommu/config b/tools/testing/selftests/iommu/config
+index 6c4f901d6fed3c..110d73917615d1 100644
+--- a/tools/testing/selftests/iommu/config
++++ b/tools/testing/selftests/iommu/config
+@@ -1,2 +1,3 @@
+-CONFIG_IOMMUFD
+-CONFIG_IOMMUFD_TEST
++CONFIG_IOMMUFD=y
++CONFIG_FAULT_INJECTION=y
++CONFIG_IOMMUFD_TEST=y
+diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
+index 33d08600be13d6..890a81f4ff6184 100644
+--- a/tools/testing/selftests/iommu/iommufd.c
++++ b/tools/testing/selftests/iommu/iommufd.c
+@@ -531,7 +531,7 @@ TEST_F(iommufd_ioas, copy_area)
+ {
+ struct iommu_ioas_copy copy_cmd = {
+ .size = sizeof(copy_cmd),
+- .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
++ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
+ .dst_ioas_id = self->ioas_id,
+ .src_ioas_id = self->ioas_id,
+ .length = PAGE_SIZE,
+@@ -1024,7 +1024,7 @@ TEST_F(iommufd_ioas, copy_sweep)
+ {
+ struct iommu_ioas_copy copy_cmd = {
+ .size = sizeof(copy_cmd),
+- .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
++ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
+ .src_ioas_id = self->ioas_id,
+ .dst_iova = MOCK_APERTURE_START,
+ .length = MOCK_PAGE_SIZE,
+@@ -1314,7 +1314,7 @@ TEST_F(iommufd_mock_domain, user_copy)
+ };
+ struct iommu_ioas_copy copy_cmd = {
+ .size = sizeof(copy_cmd),
+- .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
++ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
+ .dst_ioas_id = self->ioas_id,
+ .dst_iova = MOCK_APERTURE_START,
+ .length = BUFFER_SIZE,
+diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
+index 25110c7c0b3ed2..d7a8e321bb16b6 100644
+--- a/tools/testing/selftests/kcmp/kcmp_test.c
++++ b/tools/testing/selftests/kcmp/kcmp_test.c
+@@ -91,7 +91,7 @@ int main(int argc, char **argv)
+ ksft_print_header();
+ ksft_set_plan(3);
+
+- fd2 = open(kpath, O_RDWR, 0644);
++ fd2 = open(kpath, O_RDWR);
+ if (fd2 < 0) {
+ perror("Can't open file");
+ ksft_exit_fail();
+diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
+index 529d29a359002c..ad7b97e16f37ef 100644
+--- a/tools/testing/selftests/kselftest.h
++++ b/tools/testing/selftests/kselftest.h
+@@ -48,7 +48,9 @@
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <stdarg.h>
++#include <string.h>
+ #include <stdio.h>
++#include <sys/utsname.h>
+ #endif
+
+ #ifndef ARRAY_SIZE
+@@ -155,6 +157,19 @@ static inline void ksft_print_msg(const char *msg, ...)
+ va_end(args);
+ }
+
++static inline void ksft_perror(const char *msg)
++{
++#ifndef NOLIBC
++ ksft_print_msg("%s: %s (%d)\n", msg, strerror(errno), errno);
++#else
++ /*
++ * nolibc doesn't provide strerror() and it seems
++ * inappropriate to add one, just print the errno.
++ */
++ ksft_print_msg("%s: %d)\n", msg, errno);
++#endif
++}
++
+ static inline void ksft_test_result_pass(const char *msg, ...)
+ {
+ int saved_errno = errno;
+@@ -327,4 +342,21 @@ static inline int ksft_exit_skip(const char *msg, ...)
+ exit(KSFT_SKIP);
+ }
+
++static inline int ksft_min_kernel_version(unsigned int min_major,
++ unsigned int min_minor)
++{
++#ifdef NOLIBC
++ ksft_print_msg("NOLIBC: Can't check kernel version: Function not implemented\n");
++ return 0;
++#else
++ unsigned int major, minor;
++ struct utsname info;
++
++ if (uname(&info) || sscanf(info.release, "%u.%u.", &major, &minor) != 2)
++ ksft_exit_fail_msg("Can't parse kernel version\n");
++
++ return major > min_major || (major == min_major && minor >= min_minor);
++#endif
++}
++
+ #endif /* __KSELFTEST_H */
+diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
+index eef816b80993f5..4ac4d3ea976ecd 100644
+--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
++++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
+@@ -6,6 +6,7 @@
+ */
+ #define _GNU_SOURCE
+ #include <linux/kernel.h>
++#include <linux/bitfield.h>
+ #include <sys/syscall.h>
+ #include <asm/kvm.h>
+ #include <asm/kvm_para.h>
+@@ -84,6 +85,18 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
+ return v;
+ }
+
++static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type)
++{
++ struct vm_gic v;
++
++ v.gic_dev_type = gic_dev_type;
++ v.vm = vm_create_barebones();
++ v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
++
++ return v;
++}
++
++
+ static void vm_gic_destroy(struct vm_gic *v)
+ {
+ close(v->gic_fd);
+@@ -357,6 +370,40 @@ static void test_vcpus_then_vgic(uint32_t gic_dev_type)
+ vm_gic_destroy(&v);
+ }
+
++#define KVM_VGIC_V2_ATTR(offset, cpu) \
++ (FIELD_PREP(KVM_DEV_ARM_VGIC_OFFSET_MASK, offset) | \
++ FIELD_PREP(KVM_DEV_ARM_VGIC_CPUID_MASK, cpu))
++
++#define GIC_CPU_CTRL 0x00
++
++static void test_v2_uaccess_cpuif_no_vcpus(void)
++{
++ struct vm_gic v;
++ u64 val = 0;
++ int ret;
++
++ v = vm_gic_create_barebones(KVM_DEV_TYPE_ARM_VGIC_V2);
++ subtest_dist_rdist(&v);
++
++ ret = __kvm_has_device_attr(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
++ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0));
++ TEST_ASSERT(ret && errno == EINVAL,
++ "accessed non-existent CPU interface, want errno: %i",
++ EINVAL);
++ ret = __kvm_device_attr_get(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
++ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
++ TEST_ASSERT(ret && errno == EINVAL,
++ "accessed non-existent CPU interface, want errno: %i",
++ EINVAL);
++ ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
++ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
++ TEST_ASSERT(ret && errno == EINVAL,
++ "accessed non-existent CPU interface, want errno: %i",
++ EINVAL);
++
++ vm_gic_destroy(&v);
++}
++
+ static void test_v3_new_redist_regions(void)
+ {
+ struct kvm_vcpu *vcpus[NR_VCPUS];
+@@ -675,6 +722,9 @@ void run_tests(uint32_t gic_dev_type)
+ test_vcpus_then_vgic(gic_dev_type);
+ test_vgic_then_vcpus(gic_dev_type);
+
++ if (VGIC_DEV_IS_V2(gic_dev_type))
++ test_v2_uaccess_cpuif_no_vcpus();
++
+ if (VGIC_DEV_IS_V3(gic_dev_type)) {
+ test_v3_new_redist_regions();
+ test_v3_typer_accesses();
+diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
+index 936f3a8d1b83e8..e96fababd3f063 100644
+--- a/tools/testing/selftests/kvm/dirty_log_test.c
++++ b/tools/testing/selftests/kvm/dirty_log_test.c
+@@ -376,7 +376,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
+
+ cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
+
+- /* Cleared pages should be the same as collected */
++ /*
++ * Cleared pages should be the same as collected, as KVM is supposed to
++ * clear only the entries that have been harvested.
++ */
+ TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
+ "with collected (%u)", cleared, count);
+
+@@ -415,12 +418,6 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+ }
+ }
+
+-static void dirty_ring_before_vcpu_join(void)
+-{
+- /* Kick another round of vcpu just to make sure it will quit */
+- sem_post(&sem_vcpu_cont);
+-}
+-
+ struct log_mode {
+ const char *name;
+ /* Return true if this mode is supported, otherwise false */
+@@ -433,7 +430,6 @@ struct log_mode {
+ uint32_t *ring_buf_idx);
+ /* Hook to call when after each vcpu run */
+ void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
+- void (*before_vcpu_join) (void);
+ } log_modes[LOG_MODE_NUM] = {
+ {
+ .name = "dirty-log",
+@@ -452,7 +448,6 @@ struct log_mode {
+ .supported = dirty_ring_supported,
+ .create_vm_done = dirty_ring_create_vm_done,
+ .collect_dirty_pages = dirty_ring_collect_dirty_pages,
+- .before_vcpu_join = dirty_ring_before_vcpu_join,
+ .after_vcpu_run = dirty_ring_after_vcpu_run,
+ },
+ };
+@@ -513,14 +508,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+ mode->after_vcpu_run(vcpu, ret, err);
+ }
+
+-static void log_mode_before_vcpu_join(void)
+-{
+- struct log_mode *mode = &log_modes[host_log_mode];
+-
+- if (mode->before_vcpu_join)
+- mode->before_vcpu_join();
+-}
+-
+ static void generate_random_array(uint64_t *guest_array, uint64_t size)
+ {
+ uint64_t i;
+@@ -719,6 +706,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ struct kvm_vm *vm;
+ unsigned long *bmap;
+ uint32_t ring_buf_idx = 0;
++ int sem_val;
+
+ if (!log_mode_supported()) {
+ print_skip("Log mode '%s' not supported",
+@@ -788,12 +776,22 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ /* Start the iterations */
+ iteration = 1;
+ sync_global_to_guest(vm, iteration);
+- host_quit = false;
++ WRITE_ONCE(host_quit, false);
+ host_dirty_count = 0;
+ host_clear_count = 0;
+ host_track_next_count = 0;
+ WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
+
++ /*
++ * Ensure the previous iteration didn't leave a dangling semaphore, i.e.
++ * that the main task and vCPU worker were synchronized and completed
++ * verification of all iterations.
++ */
++ sem_getvalue(&sem_vcpu_stop, &sem_val);
++ TEST_ASSERT_EQ(sem_val, 0);
++ sem_getvalue(&sem_vcpu_cont, &sem_val);
++ TEST_ASSERT_EQ(sem_val, 0);
++
+ pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
+
+ while (iteration < p->iterations) {
+@@ -819,15 +817,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ assert(host_log_mode == LOG_MODE_DIRTY_RING ||
+ atomic_read(&vcpu_sync_stop_requested) == false);
+ vm_dirty_log_verify(mode, bmap);
+- sem_post(&sem_vcpu_cont);
+
+- iteration++;
++ /*
++ * Set host_quit before sem_vcpu_cont in the final iteration to
++ * ensure that the vCPU worker doesn't resume the guest. As
++ * above, the dirty ring test may stop and wait even when not
++ * explicitly request to do so, i.e. would hang waiting for a
++ * "continue" if it's allowed to resume the guest.
++ */
++ if (++iteration == p->iterations)
++ WRITE_ONCE(host_quit, true);
++
++ sem_post(&sem_vcpu_cont);
+ sync_global_to_guest(vm, iteration);
+ }
+
+- /* Tell the vcpu thread to quit */
+- host_quit = true;
+- log_mode_before_vcpu_join();
+ pthread_join(vcpu_thread, NULL);
+
+ pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
+diff --git a/tools/testing/selftests/kvm/x86_64/amx_test.c b/tools/testing/selftests/kvm/x86_64/amx_test.c
+index 11329e5ff945eb..309ee5c72b46a5 100644
+--- a/tools/testing/selftests/kvm/x86_64/amx_test.c
++++ b/tools/testing/selftests/kvm/x86_64/amx_test.c
+@@ -221,7 +221,7 @@ int main(int argc, char *argv[])
+ vm_vaddr_t amx_cfg, tiledata, xstate;
+ struct ucall uc;
+ u32 amx_offset;
+- int stage, ret;
++ int ret;
+
+ /*
+ * Note, all off-by-default features must be enabled before anything
+@@ -263,7 +263,7 @@ int main(int argc, char *argv[])
+ memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
+ vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate);
+
+- for (stage = 1; ; stage++) {
++ for (;;) {
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+index 9f28aa276c4e23..a726831b80244e 100644
+--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
++++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+@@ -454,7 +454,7 @@ static void guest_test_msrs_access(void)
+ case 44:
+ /* MSR is not available when CPUID feature bit is unset */
+ if (!has_invtsc)
+- continue;
++ goto next_stage;
+ msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
+ msr->write = false;
+ msr->fault_expected = true;
+@@ -462,7 +462,7 @@ static void guest_test_msrs_access(void)
+ case 45:
+ /* MSR is vailable when CPUID feature bit is set */
+ if (!has_invtsc)
+- continue;
++ goto next_stage;
+ vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
+ msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
+ msr->write = false;
+@@ -471,7 +471,7 @@ static void guest_test_msrs_access(void)
+ case 46:
+ /* Writing bits other than 0 is forbidden */
+ if (!has_invtsc)
+- continue;
++ goto next_stage;
+ msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
+ msr->write = true;
+ msr->write_val = 0xdeadbeef;
+@@ -480,7 +480,7 @@ static void guest_test_msrs_access(void)
+ case 47:
+ /* Setting bit 0 enables the feature */
+ if (!has_invtsc)
+- continue;
++ goto next_stage;
+ msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
+ msr->write = true;
+ msr->write_val = 1;
+@@ -513,6 +513,7 @@ static void guest_test_msrs_access(void)
+ return;
+ }
+
++next_stage:
+ stage++;
+ kvm_vm_free(vm);
+ }
+diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
+index 792c3f0a59b4f4..5aa7d2feab100d 100644
+--- a/tools/testing/selftests/landlock/base_test.c
++++ b/tools/testing/selftests/landlock/base_test.c
+@@ -9,6 +9,7 @@
+ #define _GNU_SOURCE
+ #include <errno.h>
+ #include <fcntl.h>
++#include <linux/keyctl.h>
+ #include <linux/landlock.h>
+ #include <string.h>
+ #include <sys/prctl.h>
+@@ -326,4 +327,77 @@ TEST(ruleset_fd_transfer)
+ ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
+ }
+
++TEST(cred_transfer)
++{
++ struct landlock_ruleset_attr ruleset_attr = {
++ .handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR,
++ };
++ int ruleset_fd, dir_fd;
++ pid_t child;
++ int status;
++
++ drop_caps(_metadata);
++
++ dir_fd = open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
++ EXPECT_LE(0, dir_fd);
++ EXPECT_EQ(0, close(dir_fd));
++
++ /* Denies opening directories. */
++ ruleset_fd =
++ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
++ ASSERT_LE(0, ruleset_fd);
++ EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
++ ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0));
++ EXPECT_EQ(0, close(ruleset_fd));
++
++ /* Checks ruleset enforcement. */
++ EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
++ EXPECT_EQ(EACCES, errno);
++
++ /* Needed for KEYCTL_SESSION_TO_PARENT permission checks */
++ EXPECT_NE(-1, syscall(__NR_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL, 0,
++ 0, 0))
++ {
++ TH_LOG("Failed to join session keyring: %s", strerror(errno));
++ }
++
++ child = fork();
++ ASSERT_LE(0, child);
++ if (child == 0) {
++ /* Checks ruleset enforcement. */
++ EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
++ EXPECT_EQ(EACCES, errno);
++
++ /*
++ * KEYCTL_SESSION_TO_PARENT is a no-op unless we have a
++ * different session keyring in the child, so make that happen.
++ */
++ EXPECT_NE(-1, syscall(__NR_keyctl, KEYCTL_JOIN_SESSION_KEYRING,
++ NULL, 0, 0, 0));
++
++ /*
++ * KEYCTL_SESSION_TO_PARENT installs credentials on the parent
++ * that never go through the cred_prepare hook, this path uses
++ * cred_transfer instead.
++ */
++ EXPECT_EQ(0, syscall(__NR_keyctl, KEYCTL_SESSION_TO_PARENT, 0,
++ 0, 0, 0));
++
++ /* Re-checks ruleset enforcement. */
++ EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
++ EXPECT_EQ(EACCES, errno);
++
++ _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
++ return;
++ }
++
++ EXPECT_EQ(child, waitpid(child, &status, 0));
++ EXPECT_EQ(1, WIFEXITED(status));
++ EXPECT_EQ(EXIT_SUCCESS, WEXITSTATUS(status));
++
++ /* Re-checks ruleset enforcement. */
++ EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC));
++ EXPECT_EQ(EACCES, errno);
++}
++
+ TEST_HARNESS_MAIN
+diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
+index 3dc9e438eab10f..efca1c7333670a 100644
+--- a/tools/testing/selftests/landlock/config
++++ b/tools/testing/selftests/landlock/config
+@@ -1,5 +1,6 @@
+ CONFIG_CGROUPS=y
+ CONFIG_CGROUP_SCHED=y
++CONFIG_KEYS=y
+ CONFIG_OVERLAY_FS=y
+ CONFIG_PROC_FS=y
+ CONFIG_SECURITY=y
+diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
+index 251594306d4090..720bafa0f87be2 100644
+--- a/tools/testing/selftests/landlock/fs_test.c
++++ b/tools/testing/selftests/landlock/fs_test.c
+@@ -241,9 +241,11 @@ struct mnt_opt {
+ const char *const data;
+ };
+
+-const struct mnt_opt mnt_tmp = {
++#define MNT_TMP_DATA "size=4m,mode=700"
++
++static const struct mnt_opt mnt_tmp = {
+ .type = "tmpfs",
+- .data = "size=4m,mode=700",
++ .data = MNT_TMP_DATA,
+ };
+
+ static int mount_opt(const struct mnt_opt *const mnt, const char *const target)
+@@ -4523,7 +4525,10 @@ FIXTURE_VARIANT(layout3_fs)
+ /* clang-format off */
+ FIXTURE_VARIANT_ADD(layout3_fs, tmpfs) {
+ /* clang-format on */
+- .mnt = mnt_tmp,
++ .mnt = {
++ .type = "tmpfs",
++ .data = MNT_TMP_DATA,
++ },
+ .file_path = file1_s1d1,
+ };
+
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index 118e0964bda946..01db65c0e84ca3 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -7,6 +7,8 @@ else ifneq ($(filter -%,$(LLVM)),)
+ LLVM_SUFFIX := $(LLVM)
+ endif
+
++CLANG := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
++
+ CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi
+ CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu
+ CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl
+@@ -18,7 +20,13 @@ CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
+ CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
+ CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
+ CLANG_TARGET_FLAGS_x86_64 := x86_64-linux-gnu
+-CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
++
++# Default to host architecture if ARCH is not explicitly given.
++ifeq ($(ARCH),)
++CLANG_TARGET_FLAGS := $(shell $(CLANG) -print-target-triple)
++else
++CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
++endif
+
+ ifeq ($(CROSS_COMPILE),)
+ ifeq ($(CLANG_TARGET_FLAGS),)
+@@ -30,7 +38,7 @@ else
+ CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
+ endif # CROSS_COMPILE
+
+-CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
++CC := $(CLANG) $(CLANG_FLAGS) -fintegrated-as
+ else
+ CC := $(CROSS_COMPILE)gcc
+ endif # LLVM
+@@ -44,26 +52,10 @@ endif
+ selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
+ top_srcdir = $(selfdir)/../../..
+
+-ifeq ("$(origin O)", "command line")
+- KBUILD_OUTPUT := $(O)
+-endif
+-
+-ifneq ($(KBUILD_OUTPUT),)
+- # Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
+- # expand a shell special character '~'. We use a somewhat tedious way here.
+- abs_objtree := $(shell cd $(top_srcdir) && mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
+- $(if $(abs_objtree),, \
+- $(error failed to create output directory "$(KBUILD_OUTPUT)"))
+- # $(realpath ...) resolves symlinks
+- abs_objtree := $(realpath $(abs_objtree))
+- KHDR_DIR := ${abs_objtree}/usr/include
+-else
+- abs_srctree := $(shell cd $(top_srcdir) && pwd)
+- KHDR_DIR := ${abs_srctree}/usr/include
++ifeq ($(KHDR_INCLUDES),)
++KHDR_INCLUDES := -isystem $(top_srcdir)/usr/include
+ endif
+
+-KHDR_INCLUDES := -isystem $(KHDR_DIR)
+-
+ # The following are built by lib.mk common compile rules.
+ # TEST_CUSTOM_PROGS should be used by tests that require
+ # custom build rule and prevent common build rule use.
+@@ -74,25 +66,7 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
+ TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
+ TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
+
+-all: kernel_header_files $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) \
+- $(TEST_GEN_FILES)
+-
+-kernel_header_files:
+- @ls $(KHDR_DIR)/linux/*.h >/dev/null 2>/dev/null; \
+- if [ $$? -ne 0 ]; then \
+- RED='\033[1;31m'; \
+- NOCOLOR='\033[0m'; \
+- echo; \
+- echo -e "$${RED}error$${NOCOLOR}: missing kernel header files."; \
+- echo "Please run this and try again:"; \
+- echo; \
+- echo " cd $(top_srcdir)"; \
+- echo " make headers"; \
+- echo; \
+- exit 1; \
+- fi
+-
+-.PHONY: kernel_header_files
++all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
+
+ define RUN_TESTS
+ BASE_DIR="$(selfdir)"; \
+@@ -103,11 +77,29 @@ define RUN_TESTS
+ run_many $(1)
+ endef
+
++define INSTALL_INCLUDES
++ $(if $(TEST_INCLUDES), \
++ relative_files=""; \
++ for entry in $(TEST_INCLUDES); do \
++ entry_dir=$$(readlink -e "$$(dirname "$$entry")"); \
++ entry_name=$$(basename "$$entry"); \
++ relative_dir=$${entry_dir#"$$SRC_PATH"/}; \
++ if [ "$$relative_dir" = "$$entry_dir" ]; then \
++ echo "Error: TEST_INCLUDES entry \"$$entry\" not located inside selftests directory ($$SRC_PATH)" >&2; \
++ exit 1; \
++ fi; \
++ relative_files="$$relative_files $$relative_dir/$$entry_name"; \
++ done; \
++ cd $(SRC_PATH) && rsync -aR $$relative_files $(OBJ_PATH)/ \
++ )
++endef
++
+ run_tests: all
+ ifdef building_out_of_srctree
+ @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
+ rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ fi
++ @$(INSTALL_INCLUDES)
+ @if [ "X$(TEST_PROGS)" != "X" ]; then \
+ $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
+ $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
+@@ -137,6 +129,7 @@ endef
+ install: all
+ ifdef INSTALL_PATH
+ $(INSTALL_RULE)
++ $(INSTALL_INCLUDES)
+ else
+ $(error Error: set INSTALL_PATH to use install)
+ endif
+diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
+index 5d52f64dfb4300..7afe05e8c4d792 100644
+--- a/tools/testing/selftests/lkdtm/config
++++ b/tools/testing/selftests/lkdtm/config
+@@ -9,7 +9,6 @@ CONFIG_INIT_ON_FREE_DEFAULT_ON=y
+ CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+ CONFIG_UBSAN=y
+ CONFIG_UBSAN_BOUNDS=y
+-CONFIG_UBSAN_TRAP=y
+ CONFIG_STACKPROTECTOR_STRONG=y
+ CONFIG_SLUB_DEBUG=y
+ CONFIG_SLUB_DEBUG_ON=y
+diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
+index 607b8d7e3ea34d..2f3a1b96da6e38 100644
+--- a/tools/testing/selftests/lkdtm/tests.txt
++++ b/tools/testing/selftests/lkdtm/tests.txt
+@@ -7,7 +7,7 @@ EXCEPTION
+ #EXHAUST_STACK Corrupts memory on failure
+ #CORRUPT_STACK Crashes entire system on success
+ #CORRUPT_STACK_STRONG Crashes entire system on success
+-ARRAY_BOUNDS
++ARRAY_BOUNDS call trace:|UBSAN: array-index-out-of-bounds
+ CORRUPT_LIST_ADD list_add corruption
+ CORRUPT_LIST_DEL list_del corruption
+ STACK_GUARD_PAGE_LEADING
+diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
+index 6a9fc5693145f3..c9fcbc6e5121e6 100644
+--- a/tools/testing/selftests/mm/Makefile
++++ b/tools/testing/selftests/mm/Makefile
+@@ -12,7 +12,7 @@ uname_M := $(shell uname -m 2>/dev/null || echo not)
+ else
+ uname_M := $(shell echo $(CROSS_COMPILE) | grep -o '^[a-z0-9]\+')
+ endif
+-ARCH ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/')
++ARCH ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/powerpc/')
+ endif
+
+ # Without this, failed build products remain, with up-to-date timestamps,
+@@ -51,7 +51,9 @@ TEST_GEN_FILES += madv_populate
+ TEST_GEN_FILES += map_fixed_noreplace
+ TEST_GEN_FILES += map_hugetlb
+ TEST_GEN_FILES += map_populate
++ifneq (,$(filter $(ARCH),arm64 riscv riscv64 x86 x86_64))
+ TEST_GEN_FILES += memfd_secret
++endif
+ TEST_GEN_FILES += migration
+ TEST_GEN_FILES += mkdirty
+ TEST_GEN_FILES += mlock-random-test
+@@ -95,13 +97,13 @@ TEST_GEN_FILES += $(BINARIES_64)
+ endif
+ else
+
+-ifneq (,$(findstring $(ARCH),ppc64))
++ifneq (,$(findstring $(ARCH),powerpc))
+ TEST_GEN_FILES += protection_keys
+ endif
+
+ endif
+
+-ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sparc64 x86_64))
++ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64 s390))
+ TEST_GEN_FILES += va_high_addr_switch
+ TEST_GEN_FILES += virtual_address_range
+ TEST_GEN_FILES += write_to_hugetlbfs
+diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+index 0899019a7fcb4b..8e00276b4e69be 100755
+--- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
++++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+ # Kselftest framework requirement - SKIP code is 4.
+@@ -252,7 +252,7 @@ function cleanup_hugetlb_memory() {
+ local cgroup="$1"
+ if [[ "$(pgrep -f write_to_hugetlbfs)" != "" ]]; then
+ echo killing write_to_hugetlbfs
+- killall -2 write_to_hugetlbfs
++ killall -2 --wait write_to_hugetlbfs
+ wait_for_hugetlb_memory_to_get_depleted $cgroup
+ fi
+ set -e
+diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
+index 9b420140ba2bad..309b3750e57e13 100644
+--- a/tools/testing/selftests/mm/compaction_test.c
++++ b/tools/testing/selftests/mm/compaction_test.c
+@@ -33,7 +33,7 @@ int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
+ FILE *cmdfile = popen(cmd, "r");
+
+ if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
+- perror("Failed to read meminfo\n");
++ ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno));
+ return -1;
+ }
+
+@@ -44,7 +44,7 @@ int read_memory_info(unsigned long *memfree, unsigned long *hugepagesize)
+ cmdfile = popen(cmd, "r");
+
+ if (!(fgets(buffer, sizeof(buffer), cmdfile))) {
+- perror("Failed to read meminfo\n");
++ ksft_print_msg("Failed to read meminfo: %s\n", strerror(errno));
+ return -1;
+ }
+
+@@ -62,14 +62,14 @@ int prereq(void)
+ fd = open("/proc/sys/vm/compact_unevictable_allowed",
+ O_RDONLY | O_NONBLOCK);
+ if (fd < 0) {
+- perror("Failed to open\n"
+- "/proc/sys/vm/compact_unevictable_allowed\n");
++ ksft_print_msg("Failed to open /proc/sys/vm/compact_unevictable_allowed: %s\n",
++ strerror(errno));
+ return -1;
+ }
+
+ if (read(fd, &allowed, sizeof(char)) != sizeof(char)) {
+- perror("Failed to read from\n"
+- "/proc/sys/vm/compact_unevictable_allowed\n");
++ ksft_print_msg("Failed to read from /proc/sys/vm/compact_unevictable_allowed: %s\n",
++ strerror(errno));
+ close(fd);
+ return -1;
+ }
+@@ -78,15 +78,17 @@ int prereq(void)
+ if (allowed == '1')
+ return 0;
+
++ ksft_print_msg("Compaction isn't allowed\n");
+ return -1;
+ }
+
+-int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
++int check_compaction(unsigned long mem_free, unsigned long hugepage_size)
+ {
+- int fd;
++ unsigned long nr_hugepages_ul;
++ int fd, ret = -1;
+ int compaction_index = 0;
+- char initial_nr_hugepages[10] = {0};
+- char nr_hugepages[10] = {0};
++ char initial_nr_hugepages[20] = {0};
++ char nr_hugepages[20] = {0};
+
+ /* We want to test with 80% of available memory. Else, OOM killer comes
+ in to play */
+@@ -94,18 +96,24 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+
+ fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
+ if (fd < 0) {
+- perror("Failed to open /proc/sys/vm/nr_hugepages");
+- return -1;
++ ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
++ strerror(errno));
++ ret = -1;
++ goto out;
+ }
+
+ if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
+- perror("Failed to read from /proc/sys/vm/nr_hugepages");
++ ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
++ strerror(errno));
+ goto close_fd;
+ }
+
++ lseek(fd, 0, SEEK_SET);
++
+ /* Start with the initial condition of 0 huge pages*/
+ if (write(fd, "0", sizeof(char)) != sizeof(char)) {
+- perror("Failed to write 0 to /proc/sys/vm/nr_hugepages\n");
++ ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
++ strerror(errno));
+ goto close_fd;
+ }
+
+@@ -114,82 +122,82 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+ /* Request a large number of huge pages. The Kernel will allocate
+ as much as it can */
+ if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
+- perror("Failed to write 100000 to /proc/sys/vm/nr_hugepages\n");
++ ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n",
++ strerror(errno));
+ goto close_fd;
+ }
+
+ lseek(fd, 0, SEEK_SET);
+
+ if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
+- perror("Failed to re-read from /proc/sys/vm/nr_hugepages\n");
++ ksft_print_msg("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n",
++ strerror(errno));
+ goto close_fd;
+ }
+
+ /* We should have been able to request at least 1/3 rd of the memory in
+ huge pages */
+- compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
+-
+- if (compaction_index > 3) {
+- printf("No of huge pages allocated = %d\n",
+- (atoi(nr_hugepages)));
+- fprintf(stderr, "ERROR: Less that 1/%d of memory is available\n"
+- "as huge pages\n", compaction_index);
++ nr_hugepages_ul = strtoul(nr_hugepages, NULL, 10);
++ if (!nr_hugepages_ul) {
++ ksft_print_msg("ERROR: No memory is available as huge pages\n");
+ goto close_fd;
+ }
+-
+- printf("No of huge pages allocated = %d\n",
+- (atoi(nr_hugepages)));
++ compaction_index = mem_free/(nr_hugepages_ul * hugepage_size);
+
+ lseek(fd, 0, SEEK_SET);
+
+ if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
+ != strlen(initial_nr_hugepages)) {
+- perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
++ ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
++ strerror(errno));
+ goto close_fd;
+ }
+
+- close(fd);
+- return 0;
++ ksft_print_msg("Number of huge pages allocated = %lu\n",
++ nr_hugepages_ul);
++
++ if (compaction_index > 3) {
++ ksft_print_msg("ERROR: Less than 1/%d of memory is available\n"
++ "as huge pages\n", compaction_index);
++ goto close_fd;
++ }
++
++ ret = 0;
+
+ close_fd:
+ close(fd);
+- printf("Not OK. Compaction test failed.");
+- return -1;
++ out:
++ ksft_test_result(ret == 0, "check_compaction\n");
++ return ret;
+ }
+
+
+ int main(int argc, char **argv)
+ {
+ struct rlimit lim;
+- struct map_list *list, *entry;
++ struct map_list *list = NULL, *entry;
+ size_t page_size, i;
+ void *map = NULL;
+ unsigned long mem_free = 0;
+ unsigned long hugepage_size = 0;
+ long mem_fragmentable_MB = 0;
+
+- if (prereq() != 0) {
+- printf("Either the sysctl compact_unevictable_allowed is not\n"
+- "set to 1 or couldn't read the proc file.\n"
+- "Skipping the test\n");
+- return KSFT_SKIP;
+- }
++ ksft_print_header();
++
++ if (prereq() != 0)
++ return ksft_exit_pass();
++
++ ksft_set_plan(1);
+
+ lim.rlim_cur = RLIM_INFINITY;
+ lim.rlim_max = RLIM_INFINITY;
+- if (setrlimit(RLIMIT_MEMLOCK, &lim)) {
+- perror("Failed to set rlimit:\n");
+- return -1;
+- }
++ if (setrlimit(RLIMIT_MEMLOCK, &lim))
++ ksft_exit_fail_msg("Failed to set rlimit: %s\n", strerror(errno));
+
+ page_size = getpagesize();
+
+- list = NULL;
+-
+- if (read_memory_info(&mem_free, &hugepage_size) != 0) {
+- printf("ERROR: Cannot read meminfo\n");
+- return -1;
+- }
++ if (read_memory_info(&mem_free, &hugepage_size) != 0)
++ ksft_exit_fail_msg("Failed to get meminfo\n");
+
+ mem_fragmentable_MB = mem_free * 0.8 / 1024;
+
+@@ -225,7 +233,7 @@ int main(int argc, char **argv)
+ }
+
+ if (check_compaction(mem_free, hugepage_size) == 0)
+- return 0;
++ return ksft_exit_pass();
+
+- return -1;
++ return ksft_exit_fail();
+ }
+diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
+index 7324ce5363c0c9..6f2f839904416c 100644
+--- a/tools/testing/selftests/mm/cow.c
++++ b/tools/testing/selftests/mm/cow.c
+@@ -1680,6 +1680,8 @@ int main(int argc, char **argv)
+ {
+ int err;
+
++ ksft_print_header();
++
+ pagesize = getpagesize();
+ thpsize = read_pmd_pagesize();
+ if (thpsize)
+@@ -1689,7 +1691,6 @@ int main(int argc, char **argv)
+ ARRAY_SIZE(hugetlbsizes));
+ detect_huge_zeropage();
+
+- ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(anon_test_cases) * tests_per_anon_test_case() +
+ ARRAY_SIZE(anon_thp_test_cases) * tests_per_anon_thp_test_case() +
+ ARRAY_SIZE(non_anon_test_cases) * tests_per_non_anon_test_case());
+diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
+index ec22291363844b..7821cf45c323b9 100644
+--- a/tools/testing/selftests/mm/gup_test.c
++++ b/tools/testing/selftests/mm/gup_test.c
+@@ -1,3 +1,4 @@
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ #include <fcntl.h>
+ #include <errno.h>
+ #include <stdio.h>
+@@ -50,39 +51,41 @@ static char *cmd_to_str(unsigned long cmd)
+ void *gup_thread(void *data)
+ {
+ struct gup_test gup = *(struct gup_test *)data;
+- int i;
++ int i, status;
+
+ /* Only report timing information on the *_BENCHMARK commands: */
+ if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) ||
+ (cmd == PIN_LONGTERM_BENCHMARK)) {
+ for (i = 0; i < repeats; i++) {
+ gup.size = size;
+- if (ioctl(gup_fd, cmd, &gup))
+- perror("ioctl"), exit(1);
++ status = ioctl(gup_fd, cmd, &gup);
++ if (status)
++ break;
+
+ pthread_mutex_lock(&print_mutex);
+- printf("%s: Time: get:%lld put:%lld us",
+- cmd_to_str(cmd), gup.get_delta_usec,
+- gup.put_delta_usec);
++ ksft_print_msg("%s: Time: get:%lld put:%lld us",
++ cmd_to_str(cmd), gup.get_delta_usec,
++ gup.put_delta_usec);
+ if (gup.size != size)
+- printf(", truncated (size: %lld)", gup.size);
+- printf("\n");
++ ksft_print_msg(", truncated (size: %lld)", gup.size);
++ ksft_print_msg("\n");
+ pthread_mutex_unlock(&print_mutex);
+ }
+ } else {
+ gup.size = size;
+- if (ioctl(gup_fd, cmd, &gup)) {
+- perror("ioctl");
+- exit(1);
+- }
++ status = ioctl(gup_fd, cmd, &gup);
++ if (status)
++ goto return_;
+
+ pthread_mutex_lock(&print_mutex);
+- printf("%s: done\n", cmd_to_str(cmd));
++ ksft_print_msg("%s: done\n", cmd_to_str(cmd));
+ if (gup.size != size)
+- printf("Truncated (size: %lld)\n", gup.size);
++ ksft_print_msg("Truncated (size: %lld)\n", gup.size);
+ pthread_mutex_unlock(&print_mutex);
+ }
+
++return_:
++ ksft_test_result(!status, "ioctl status %d\n", status);
+ return NULL;
+ }
+
+@@ -170,7 +173,7 @@ int main(int argc, char **argv)
+ touch = 1;
+ break;
+ default:
+- return -1;
++ ksft_exit_fail_msg("Wrong argument\n");
+ }
+ }
+
+@@ -198,11 +201,12 @@ int main(int argc, char **argv)
+ }
+ }
+
+- filed = open(file, O_RDWR|O_CREAT);
+- if (filed < 0) {
+- perror("open");
+- exit(filed);
+- }
++ ksft_print_header();
++ ksft_set_plan(nthreads);
++
++ filed = open(file, O_RDWR|O_CREAT, 0664);
++ if (filed < 0)
++ ksft_exit_fail_msg("Unable to open %s: %s\n", file, strerror(errno));
+
+ gup.nr_pages_per_call = nr_pages;
+ if (write)
+@@ -213,27 +217,24 @@ int main(int argc, char **argv)
+ switch (errno) {
+ case EACCES:
+ if (getuid())
+- printf("Please run this test as root\n");
++ ksft_print_msg("Please run this test as root\n");
+ break;
+ case ENOENT:
+- if (opendir("/sys/kernel/debug") == NULL) {
+- printf("mount debugfs at /sys/kernel/debug\n");
+- break;
+- }
+- printf("check if CONFIG_GUP_TEST is enabled in kernel config\n");
++ if (opendir("/sys/kernel/debug") == NULL)
++ ksft_print_msg("mount debugfs at /sys/kernel/debug\n");
++ ksft_print_msg("check if CONFIG_GUP_TEST is enabled in kernel config\n");
+ break;
+ default:
+- perror("failed to open " GUP_TEST_FILE);
++ ksft_print_msg("failed to open %s: %s\n", GUP_TEST_FILE, strerror(errno));
+ break;
+ }
+- exit(KSFT_SKIP);
++ ksft_test_result_skip("Please run this test as root\n");
++ return ksft_exit_pass();
+ }
+
+ p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
+- if (p == MAP_FAILED) {
+- perror("mmap");
+- exit(1);
+- }
++ if (p == MAP_FAILED)
++ ksft_exit_fail_msg("mmap: %s\n", strerror(errno));
+ gup.addr = (unsigned long)p;
+
+ if (thp == 1)
+@@ -264,7 +265,8 @@ int main(int argc, char **argv)
+ ret = pthread_join(tid[i], NULL);
+ assert(ret == 0);
+ }
++
+ free(tid);
+
+- return 0;
++ return ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/mm/hmm-tests.c b/tools/testing/selftests/mm/hmm-tests.c
+index 20294553a5dd7a..2840924ec97680 100644
+--- a/tools/testing/selftests/mm/hmm-tests.c
++++ b/tools/testing/selftests/mm/hmm-tests.c
+@@ -1657,7 +1657,7 @@ TEST_F(hmm2, double_map)
+
+ buffer->fd = -1;
+ buffer->size = size;
+- buffer->mirror = malloc(npages);
++ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+
+ /* Reserve a range of addresses. */
+diff --git a/tools/testing/selftests/mm/hugepage-vmemmap.c b/tools/testing/selftests/mm/hugepage-vmemmap.c
+index 5b354c209e936f..894d28c3dd4785 100644
+--- a/tools/testing/selftests/mm/hugepage-vmemmap.c
++++ b/tools/testing/selftests/mm/hugepage-vmemmap.c
+@@ -10,10 +10,7 @@
+ #include <unistd.h>
+ #include <sys/mman.h>
+ #include <fcntl.h>
+-
+-#define MAP_LENGTH (2UL * 1024 * 1024)
+-
+-#define PAGE_SIZE 4096
++#include "vm_util.h"
+
+ #define PAGE_COMPOUND_HEAD (1UL << 15)
+ #define PAGE_COMPOUND_TAIL (1UL << 16)
+@@ -39,6 +36,9 @@
+ #define MAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
+ #endif
+
++static size_t pagesize;
++static size_t maplength;
++
+ static void write_bytes(char *addr, size_t length)
+ {
+ unsigned long i;
+@@ -56,7 +56,7 @@ static unsigned long virt_to_pfn(void *addr)
+ if (fd < 0)
+ return -1UL;
+
+- lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET);
++ lseek(fd, (unsigned long)addr / pagesize * sizeof(pagemap), SEEK_SET);
+ read(fd, &pagemap, sizeof(pagemap));
+ close(fd);
+
+@@ -86,7 +86,7 @@ static int check_page_flags(unsigned long pfn)
+ * this also verifies kernel has correctly set the fake page_head to tail
+ * while hugetlb_free_vmemmap is enabled.
+ */
+- for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) {
++ for (i = 1; i < maplength / pagesize; i++) {
+ read(fd, &pageflags, sizeof(pageflags));
+ if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
+ (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
+@@ -106,18 +106,25 @@ int main(int argc, char **argv)
+ void *addr;
+ unsigned long pfn;
+
+- addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
++ pagesize = psize();
++ maplength = default_huge_page_size();
++ if (!maplength) {
++ printf("Unable to determine huge page size\n");
++ exit(1);
++ }
++
++ addr = mmap(MAP_ADDR, maplength, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
+ if (addr == MAP_FAILED) {
+ perror("mmap");
+ exit(1);
+ }
+
+ /* Trigger allocation of HugeTLB page. */
+- write_bytes(addr, MAP_LENGTH);
++ write_bytes(addr, maplength);
+
+ pfn = virt_to_pfn(addr);
+ if (pfn == -1UL) {
+- munmap(addr, MAP_LENGTH);
++ munmap(addr, maplength);
+ perror("virt_to_pfn");
+ exit(1);
+ }
+@@ -125,13 +132,13 @@ int main(int argc, char **argv)
+ printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
+
+ if (check_page_flags(pfn) < 0) {
+- munmap(addr, MAP_LENGTH);
++ munmap(addr, maplength);
+ perror("check_page_flags");
+ exit(1);
+ }
+
+ /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
+- if (munmap(addr, MAP_LENGTH)) {
++ if (munmap(addr, maplength)) {
+ perror("munmap");
+ exit(1);
+ }
+diff --git a/tools/testing/selftests/mm/ksm_tests.c b/tools/testing/selftests/mm/ksm_tests.c
+index 380b691d3eb9fb..b748c48908d9d4 100644
+--- a/tools/testing/selftests/mm/ksm_tests.c
++++ b/tools/testing/selftests/mm/ksm_tests.c
+@@ -566,7 +566,7 @@ static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot,
+ if (map_ptr_orig == MAP_FAILED)
+ err(2, "initial mmap");
+
+- if (madvise(map_ptr, len + HPAGE_SIZE, MADV_HUGEPAGE))
++ if (madvise(map_ptr, len, MADV_HUGEPAGE))
+ err(2, "MADV_HUGEPAGE");
+
+ pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+diff --git a/tools/testing/selftests/mm/map_hugetlb.c b/tools/testing/selftests/mm/map_hugetlb.c
+index 193281560b61be..86e8f2048a4090 100644
+--- a/tools/testing/selftests/mm/map_hugetlb.c
++++ b/tools/testing/selftests/mm/map_hugetlb.c
+@@ -15,6 +15,7 @@
+ #include <unistd.h>
+ #include <sys/mman.h>
+ #include <fcntl.h>
++#include "vm_util.h"
+
+ #define LENGTH (256UL*1024*1024)
+ #define PROTECTION (PROT_READ | PROT_WRITE)
+@@ -58,10 +59,16 @@ int main(int argc, char **argv)
+ {
+ void *addr;
+ int ret;
++ size_t hugepage_size;
+ size_t length = LENGTH;
+ int flags = FLAGS;
+ int shift = 0;
+
++ hugepage_size = default_huge_page_size();
++ /* munmap with fail if the length is not page aligned */
++ if (hugepage_size > length)
++ length = hugepage_size;
++
+ if (argc > 1)
+ length = atol(argv[1]) << 20;
+ if (argc > 2) {
+diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
+index bc91bef5d254e5..0c5e469ae38fab 100644
+--- a/tools/testing/selftests/mm/mdwe_test.c
++++ b/tools/testing/selftests/mm/mdwe_test.c
+@@ -168,13 +168,10 @@ TEST_F(mdwe, mmap_FIXED)
+ self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
+ ASSERT_NE(self->p, MAP_FAILED);
+
+- p = mmap(self->p + self->size, self->size, PROT_READ | PROT_EXEC,
++ /* MAP_FIXED unmaps the existing page before mapping which is allowed */
++ p = mmap(self->p, self->size, PROT_READ | PROT_EXEC,
+ self->flags | MAP_FIXED, 0, 0);
+- if (variant->enabled) {
+- EXPECT_EQ(p, MAP_FAILED);
+- } else {
+- EXPECT_EQ(p, self->p);
+- }
++ EXPECT_EQ(p, self->p);
+ }
+
+ TEST_F(mdwe, arm64_BTI)
+diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c
+index 957b9e18c7295f..9b298f6a04b371 100644
+--- a/tools/testing/selftests/mm/memfd_secret.c
++++ b/tools/testing/selftests/mm/memfd_secret.c
+@@ -62,6 +62,9 @@ static void test_mlock_limit(int fd)
+ char *mem;
+
+ len = mlock_limit_cur;
++ if (len % page_size != 0)
++ len = (len/page_size) * page_size;
++
+ mem = mmap(NULL, len, prot, mode, fd, 0);
+ if (mem == MAP_FAILED) {
+ fail("unable to mmap secret memory\n");
+diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
+index 3e2bc818d566f6..d7b2c9d07eec53 100755
+--- a/tools/testing/selftests/mm/run_vmtests.sh
++++ b/tools/testing/selftests/mm/run_vmtests.sh
+@@ -5,6 +5,7 @@
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
+
++count_total=0
+ count_pass=0
+ count_fail=0
+ count_skip=0
+@@ -17,6 +18,7 @@ usage: ${BASH_SOURCE[0]:-$0} [ options ]
+ -a: run all tests, including extra ones
+ -t: specify specific categories to tests to run
+ -h: display this message
++ -n: disable TAP output
+
+ The default behavior is to run required tests only. If -a is specified,
+ will run all tests.
+@@ -75,12 +77,14 @@ EOF
+ }
+
+ RUN_ALL=false
++TAP_PREFIX="# "
+
+-while getopts "aht:" OPT; do
++while getopts "aht:n" OPT; do
+ case ${OPT} in
+ "a") RUN_ALL=true ;;
+ "h") usage ;;
+ "t") VM_SELFTEST_ITEMS=${OPTARG} ;;
++ "n") TAP_PREFIX= ;;
+ esac
+ done
+ shift $((OPTIND -1))
+@@ -182,30 +186,52 @@ fi
+ VADDR64=0
+ echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
+
++tap_prefix() {
++ sed -e "s/^/${TAP_PREFIX}/"
++}
++
++tap_output() {
++ if [[ ! -z "$TAP_PREFIX" ]]; then
++ read str
++ echo $str
++ fi
++}
++
++pretty_name() {
++ echo "$*" | sed -e 's/^\(bash \)\?\.\///'
++}
++
+ # Usage: run_test [test binary] [arbitrary test arguments...]
+ run_test() {
+ if test_selected ${CATEGORY}; then
++ local test=$(pretty_name "$*")
+ local title="running $*"
+ local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
+- printf "%s\n%s\n%s\n" "$sep" "$title" "$sep"
++ printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
+
+- "$@"
+- local ret=$?
++ ("$@" 2>&1) | tap_prefix
++ local ret=${PIPESTATUS[0]}
++ count_total=$(( count_total + 1 ))
+ if [ $ret -eq 0 ]; then
+ count_pass=$(( count_pass + 1 ))
+- echo "[PASS]"
++ echo "[PASS]" | tap_prefix
++ echo "ok ${count_total} ${test}" | tap_output
+ elif [ $ret -eq $ksft_skip ]; then
+ count_skip=$(( count_skip + 1 ))
+- echo "[SKIP]"
++ echo "[SKIP]" | tap_prefix
++ echo "ok ${count_total} ${test} # SKIP" | tap_output
+ exitcode=$ksft_skip
+ else
+ count_fail=$(( count_fail + 1 ))
+- echo "[FAIL]"
++ echo "[FAIL]" | tap_prefix
++ echo "not ok ${count_total} ${test} # exit=$ret" | tap_output
+ exitcode=1
+ fi
+ fi # test_selected
+ }
+
++echo "TAP version 13" | tap_output
++
+ CATEGORY="hugetlb" run_test ./hugepage-mmap
+
+ shmmax=$(cat /proc/sys/kernel/shmmax)
+@@ -222,9 +248,9 @@ CATEGORY="hugetlb" run_test ./hugepage-vmemmap
+ CATEGORY="hugetlb" run_test ./hugetlb-madvise
+
+ if test_selected "hugetlb"; then
+- echo "NOTE: These hugetlb tests provide minimal coverage. Use"
+- echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
+- echo " hugetlb regression testing."
++ echo "NOTE: These hugetlb tests provide minimal coverage. Use" | tap_prefix
++ echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix
++ echo " hugetlb regression testing." | tap_prefix
+ fi
+
+ CATEGORY="mmap" run_test ./map_fixed_noreplace
+@@ -303,7 +329,11 @@ CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
+ # MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
+ CATEGORY="madv_populate" run_test ./madv_populate
+
++if [ -x ./memfd_secret ]
++then
++(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
+ CATEGORY="memfd_secret" run_test ./memfd_secret
++fi
+
+ # KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
+ CATEGORY="ksm" run_test ./ksm_tests -H -s 100
+@@ -357,6 +387,7 @@ CATEGORY="mkdirty" run_test ./mkdirty
+
+ CATEGORY="mdwe" run_test ./mdwe_test
+
+-echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}"
++echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix
++echo "1..${count_total}" | tap_output
+
+ exit $exitcode
+diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
+index cc5f144430d4d2..7dbfa53d93a05f 100644
+--- a/tools/testing/selftests/mm/soft-dirty.c
++++ b/tools/testing/selftests/mm/soft-dirty.c
+@@ -137,7 +137,7 @@ static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
+ if (!map)
+ ksft_exit_fail_msg("anon mmap failed\n");
+ } else {
+- test_fd = open(fname, O_RDWR | O_CREAT);
++ test_fd = open(fname, O_RDWR | O_CREAT, 0664);
+ if (test_fd < 0) {
+ ksft_test_result_skip("Test %s open() file failed\n", __func__);
+ return;
+diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
+index 0e74635c8c3d97..dff3be23488b42 100644
+--- a/tools/testing/selftests/mm/split_huge_page_test.c
++++ b/tools/testing/selftests/mm/split_huge_page_test.c
+@@ -253,7 +253,7 @@ void split_file_backed_thp(void)
+ goto cleanup;
+ }
+
+- fd = open(testfile, O_CREAT|O_WRONLY);
++ fd = open(testfile, O_CREAT|O_WRONLY, 0664);
+ if (fd == -1) {
+ perror("Cannot open testing file\n");
+ goto cleanup;
+diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
+index 02b89860e193d8..3bdae35e0add01 100644
+--- a/tools/testing/selftests/mm/uffd-common.c
++++ b/tools/testing/selftests/mm/uffd-common.c
+@@ -17,6 +17,7 @@ bool map_shared;
+ bool test_uffdio_wp = true;
+ unsigned long long *count_verify;
+ uffd_test_ops_t *uffd_test_ops;
++pthread_barrier_t ready_for_fork;
+
+ static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
+ {
+@@ -507,6 +508,9 @@ void *uffd_poll_thread(void *arg)
+ pollfd[1].fd = pipefd[cpu*2];
+ pollfd[1].events = POLLIN;
+
++ /* Ready for parent thread to fork */
++ pthread_barrier_wait(&ready_for_fork);
++
+ for (;;) {
+ ret = poll(pollfd, 2, -1);
+ if (ret <= 0) {
+diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
+index 7c4fa964c3b088..2d78ae0daf0650 100644
+--- a/tools/testing/selftests/mm/uffd-common.h
++++ b/tools/testing/selftests/mm/uffd-common.h
+@@ -8,6 +8,7 @@
+ #define __UFFD_COMMON_H__
+
+ #define _GNU_SOURCE
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ #include <stdio.h>
+ #include <errno.h>
+ #include <unistd.h>
+@@ -97,6 +98,7 @@ extern bool map_shared;
+ extern bool test_uffdio_wp;
+ extern unsigned long long *count_verify;
+ extern volatile bool test_uffdio_copy_eexist;
++extern pthread_barrier_t ready_for_fork;
+
+ extern uffd_test_ops_t anon_uffd_test_ops;
+ extern uffd_test_ops_t shmem_uffd_test_ops;
+diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
+index 2709a34a39c52d..42cdba544f81bd 100644
+--- a/tools/testing/selftests/mm/uffd-unit-tests.c
++++ b/tools/testing/selftests/mm/uffd-unit-tests.c
+@@ -237,6 +237,9 @@ static void *fork_event_consumer(void *data)
+ fork_event_args *args = data;
+ struct uffd_msg msg = { 0 };
+
++ /* Ready for parent thread to fork */
++ pthread_barrier_wait(&ready_for_fork);
++
+ /* Read until a full msg received */
+ while (uffd_read_msg(args->parent_uffd, &msg));
+
+@@ -304,8 +307,12 @@ static int pagemap_test_fork(int uffd, bool with_event, bool test_pin)
+
+ /* Prepare a thread to resolve EVENT_FORK */
+ if (with_event) {
++ pthread_barrier_init(&ready_for_fork, NULL, 2);
+ if (pthread_create(&thread, NULL, fork_event_consumer, &args))
+ err("pthread_create()");
++ /* Wait for child thread to start before forking */
++ pthread_barrier_wait(&ready_for_fork);
++ pthread_barrier_destroy(&ready_for_fork);
+ }
+
+ child = fork();
+@@ -770,6 +777,8 @@ static void uffd_sigbus_test_common(bool wp)
+ char c;
+ struct uffd_args args = { 0 };
+
++ pthread_barrier_init(&ready_for_fork, NULL, 2);
++
+ fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+
+ if (uffd_register(uffd, area_dst, nr_pages * page_size,
+@@ -785,6 +794,10 @@ static void uffd_sigbus_test_common(bool wp)
+ if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
+ err("uffd_poll_thread create");
+
++ /* Wait for child thread to start before forking */
++ pthread_barrier_wait(&ready_for_fork);
++ pthread_barrier_destroy(&ready_for_fork);
++
+ pid = fork();
+ if (pid < 0)
+ err("fork");
+@@ -824,6 +837,8 @@ static void uffd_events_test_common(bool wp)
+ char c;
+ struct uffd_args args = { 0 };
+
++ pthread_barrier_init(&ready_for_fork, NULL, 2);
++
+ fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+ if (uffd_register(uffd, area_dst, nr_pages * page_size,
+ true, wp, false))
+@@ -833,6 +848,10 @@ static void uffd_events_test_common(bool wp)
+ if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
+ err("uffd_poll_thread create");
+
++ /* Wait for child thread to start before forking */
++ pthread_barrier_wait(&ready_for_fork);
++ pthread_barrier_destroy(&ready_for_fork);
++
+ pid = fork();
+ if (pid < 0)
+ err("fork");
+@@ -1219,7 +1238,8 @@ uffd_test_case_t uffd_tests[] = {
+ .uffd_fn = uffd_sigbus_wp_test,
+ .mem_targets = MEM_ALL,
+ .uffd_feature_required = UFFD_FEATURE_SIGBUS |
+- UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP,
++ UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP |
++ UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
+ },
+ {
+ .name = "events",
+@@ -1309,6 +1329,12 @@ int main(int argc, char *argv[])
+ continue;
+
+ uffd_test_start("%s on %s", test->name, mem_type->name);
++ if ((mem_type->mem_flag == MEM_HUGETLB ||
++ mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
++ (default_huge_page_size() == 0)) {
++ uffd_test_skip("huge page size is 0, feature missing?");
++ continue;
++ }
+ if (!uffd_feature_supported(test)) {
+ uffd_test_skip("feature missing");
+ continue;
+diff --git a/tools/testing/selftests/mm/va_high_addr_switch.sh b/tools/testing/selftests/mm/va_high_addr_switch.sh
+index 45cae7cab27e12..a0a75f30290437 100755
+--- a/tools/testing/selftests/mm/va_high_addr_switch.sh
++++ b/tools/testing/selftests/mm/va_high_addr_switch.sh
+@@ -29,9 +29,15 @@ check_supported_x86_64()
+ # See man 1 gzip under '-f'.
+ local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
+
++ local cpu_supports_pl5=$(awk '/^flags/ {if (/la57/) {print 0;}
++ else {print 1}; exit}' /proc/cpuinfo 2>/dev/null)
++
+ if [[ "${pg_table_levels}" -lt 5 ]]; then
+ echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
+ exit $ksft_skip
++ elif [[ "${cpu_supports_pl5}" -ne 0 ]]; then
++ echo "$0: CPU does not have the necessary la57 flag to support page table level 5"
++ exit $ksft_skip
+ fi
+ }
+
+diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
+index c7fa61f0dff8d9..0c603bec5e209c 100644
+--- a/tools/testing/selftests/mm/vm_util.h
++++ b/tools/testing/selftests/mm/vm_util.h
+@@ -3,7 +3,7 @@
+ #include <stdbool.h>
+ #include <sys/mman.h>
+ #include <err.h>
+-#include <string.h> /* ffsl() */
++#include <strings.h> /* ffsl() */
+ #include <unistd.h> /* _SC_PAGESIZE */
+
+ #define BIT_ULL(nr) (1ULL << (nr))
+diff --git a/tools/testing/selftests/mm/write_hugetlb_memory.sh b/tools/testing/selftests/mm/write_hugetlb_memory.sh
+index 70a02301f4c276..3d2d2eb9d6fff0 100755
+--- a/tools/testing/selftests/mm/write_hugetlb_memory.sh
++++ b/tools/testing/selftests/mm/write_hugetlb_memory.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+ set -e
+diff --git a/tools/testing/selftests/mm/write_to_hugetlbfs.c b/tools/testing/selftests/mm/write_to_hugetlbfs.c
+index 6a2caba19ee1d9..1289d311efd705 100644
+--- a/tools/testing/selftests/mm/write_to_hugetlbfs.c
++++ b/tools/testing/selftests/mm/write_to_hugetlbfs.c
+@@ -28,7 +28,7 @@ enum method {
+
+ /* Global variables. */
+ static const char *self;
+-static char *shmaddr;
++static int *shmaddr;
+ static int shmid;
+
+ /*
+@@ -47,15 +47,17 @@ void sig_handler(int signo)
+ {
+ printf("Received %d.\n", signo);
+ if (signo == SIGINT) {
+- printf("Deleting the memory\n");
+- if (shmdt((const void *)shmaddr) != 0) {
+- perror("Detach failure");
++ if (shmaddr) {
++ printf("Deleting the memory\n");
++ if (shmdt((const void *)shmaddr) != 0) {
++ perror("Detach failure");
++ shmctl(shmid, IPC_RMID, NULL);
++ exit(4);
++ }
++
+ shmctl(shmid, IPC_RMID, NULL);
+- exit(4);
++ printf("Done deleting the memory\n");
+ }
+-
+- shmctl(shmid, IPC_RMID, NULL);
+- printf("Done deleting the memory\n");
+ }
+ exit(2);
+ }
+@@ -211,7 +213,8 @@ int main(int argc, char **argv)
+ shmctl(shmid, IPC_RMID, NULL);
+ exit(2);
+ }
+- printf("shmaddr: %p\n", ptr);
++ shmaddr = ptr;
++ printf("shmaddr: %p\n", shmaddr);
+
+ break;
+ default:
+diff --git a/tools/testing/selftests/mqueue/setting b/tools/testing/selftests/mqueue/setting
+new file mode 100644
+index 00000000000000..a953c96aa16e1e
+--- /dev/null
++++ b/tools/testing/selftests/mqueue/setting
+@@ -0,0 +1 @@
++timeout=180
+diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
+index 4a2881d43989c2..91a48efb140bef 100644
+--- a/tools/testing/selftests/net/Makefile
++++ b/tools/testing/selftests/net/Makefile
+@@ -53,8 +53,7 @@ TEST_PROGS += bind_bhash.sh
+ TEST_PROGS += ip_local_port_range.sh
+ TEST_PROGS += rps_default_mask.sh
+ TEST_PROGS += big_tcp.sh
+-TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
+-TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
++TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh
+ TEST_GEN_FILES = socket nettest
+ TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
+ TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
+@@ -84,14 +83,17 @@ TEST_PROGS += sctp_vrf.sh
+ TEST_GEN_FILES += sctp_hello
+ TEST_GEN_FILES += csum
+ TEST_GEN_FILES += nat6to4.o
++TEST_GEN_FILES += xdp_dummy.o
+ TEST_GEN_FILES += ip_local_port_range
+-TEST_GEN_FILES += bind_wildcard
++TEST_GEN_PROGS += bind_wildcard
++TEST_GEN_PROGS += bind_timewait
+ TEST_PROGS += test_vxlan_mdb.sh
+ TEST_PROGS += test_bridge_neigh_suppress.sh
+ TEST_PROGS += test_vxlan_nolocalbypass.sh
+ TEST_PROGS += test_bridge_backup_port.sh
+
+ TEST_FILES := settings
++TEST_FILES += in_netns.sh lib.sh net_helper.sh setup_loopback.sh setup_veth.sh
+
+ include ../lib.mk
+
+@@ -100,7 +102,7 @@ $(OUTPUT)/tcp_mmap: LDLIBS += -lpthread -lcrypto
+ $(OUTPUT)/tcp_inq: LDLIBS += -lpthread
+ $(OUTPUT)/bind_bhash: LDLIBS += -lpthread
+
+-# Rules to generate bpf obj nat6to4.o
++# Rules to generate bpf objs
+ CLANG ?= clang
+ SCRATCH_DIR := $(OUTPUT)/tools
+ BUILD_DIR := $(SCRATCH_DIR)/build
+@@ -135,7 +137,7 @@ endif
+
+ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
+
+-$(OUTPUT)/nat6to4.o: nat6to4.c $(BPFOBJ) | $(MAKE_DIRS)
++$(OUTPUT)/nat6to4.o $(OUTPUT)/xdp_dummy.o: $(OUTPUT)/%.o : %.c $(BPFOBJ) | $(MAKE_DIRS)
+ $(CLANG) -O2 --target=bpf -c $< $(CCINCLUDE) $(CLANG_SYS_INCLUDES) -o $@
+
+ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
+diff --git a/tools/testing/selftests/net/af_unix/diag_uid.c b/tools/testing/selftests/net/af_unix/diag_uid.c
+index 5b88f7129fea44..79a3dd75590e89 100644
+--- a/tools/testing/selftests/net/af_unix/diag_uid.c
++++ b/tools/testing/selftests/net/af_unix/diag_uid.c
+@@ -148,7 +148,6 @@ void receive_response(struct __test_metadata *_metadata,
+ .msg_iov = &iov,
+ .msg_iovlen = 1
+ };
+- struct unix_diag_req *udr;
+ struct nlmsghdr *nlh;
+ int ret;
+
+diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
+index 75528788cb95e5..7e7ed6c558da9e 100755
+--- a/tools/testing/selftests/net/amt.sh
++++ b/tools/testing/selftests/net/amt.sh
+@@ -77,6 +77,7 @@ readonly LISTENER=$(mktemp -u listener-XXXXXXXX)
+ readonly GATEWAY=$(mktemp -u gateway-XXXXXXXX)
+ readonly RELAY=$(mktemp -u relay-XXXXXXXX)
+ readonly SOURCE=$(mktemp -u source-XXXXXXXX)
++readonly SMCROUTEDIR="$(mktemp -d)"
+ ERR=4
+ err=0
+
+@@ -85,6 +86,11 @@ exit_cleanup()
+ for ns in "$@"; do
+ ip netns delete "${ns}" 2>/dev/null || true
+ done
++ if [ -f "$SMCROUTEDIR/amt.pid" ]; then
++ smcpid=$(< $SMCROUTEDIR/amt.pid)
++ kill $smcpid
++ fi
++ rm -rf $SMCROUTEDIR
+
+ exit $ERR
+ }
+@@ -167,7 +173,7 @@ setup_iptables()
+
+ setup_mcast_routing()
+ {
+- ip netns exec "${RELAY}" smcrouted
++ ip netns exec "${RELAY}" smcrouted -P $SMCROUTEDIR/amt.pid
+ ip netns exec "${RELAY}" smcroutectl a relay_src \
+ 172.17.0.2 239.0.0.1 amtr
+ ip netns exec "${RELAY}" smcroutectl a relay_src \
+@@ -210,8 +216,8 @@ check_features()
+
+ test_ipv4_forward()
+ {
+- RESULT4=$(ip netns exec "${LISTENER}" nc -w 1 -l -u 239.0.0.1 4000)
+- if [ "$RESULT4" == "172.17.0.2" ]; then
++ RESULT4=$(ip netns exec "${LISTENER}" timeout 15 socat - UDP4-LISTEN:4000,readbytes=128 || true)
++ if echo "$RESULT4" | grep -q "172.17.0.2"; then
+ printf "TEST: %-60s [ OK ]\n" "IPv4 amt multicast forwarding"
+ exit 0
+ else
+@@ -222,8 +228,8 @@ test_ipv4_forward()
+
+ test_ipv6_forward()
+ {
+- RESULT6=$(ip netns exec "${LISTENER}" nc -w 1 -l -u ff0e::5:6 6000)
+- if [ "$RESULT6" == "2001:db8:3::2" ]; then
++ RESULT6=$(ip netns exec "${LISTENER}" timeout 15 socat - UDP6-LISTEN:6000,readbytes=128 || true)
++ if echo "$RESULT6" | grep -q "2001:db8:3::2"; then
+ printf "TEST: %-60s [ OK ]\n" "IPv6 amt multicast forwarding"
+ exit 0
+ else
+@@ -236,14 +242,14 @@ send_mcast4()
+ {
+ sleep 2
+ ip netns exec "${SOURCE}" bash -c \
+- 'echo 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
++ 'printf "%s %128s" 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
+ }
+
+ send_mcast6()
+ {
+ sleep 2
+ ip netns exec "${SOURCE}" bash -c \
+- 'echo 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
++ 'printf "%s %128s" 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
+ }
+
+ check_features
+diff --git a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
+index c899b446acb624..327427ec10f565 100755
+--- a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
++++ b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
+@@ -150,7 +150,7 @@ arp_test_gratuitous() {
+ fi
+ # Supply arp_accept option to set up which sets it in sysctl
+ setup ${arp_accept}
+- ip netns exec ${HOST_NS} arping -A -U ${HOST_ADDR} -c1 2>&1 >/dev/null
++ ip netns exec ${HOST_NS} arping -A -I ${HOST_INTF} -U ${HOST_ADDR} -c1 2>&1 >/dev/null
+
+ if verify_arp $1 $2; then
+ printf " TEST: %-60s [ OK ]\n" "${test_msg[*]}"
+diff --git a/tools/testing/selftests/net/big_tcp.sh b/tools/testing/selftests/net/big_tcp.sh
+index cde9a91c479716..2db9d15cd45fea 100755
+--- a/tools/testing/selftests/net/big_tcp.sh
++++ b/tools/testing/selftests/net/big_tcp.sh
+@@ -122,7 +122,9 @@ do_netperf() {
+ local netns=$1
+
+ [ "$NF" = "6" ] && serip=$SERVER_IP6
+- ip net exec $netns netperf -$NF -t TCP_STREAM -H $serip 2>&1 >/dev/null
++
++ # use large write to be sure to generate big tcp packets
++ ip net exec $netns netperf -$NF -t TCP_STREAM -l 1 -H $serip -- -m 262144 2>&1 >/dev/null
+ }
+
+ do_test() {
+diff --git a/tools/testing/selftests/net/cmsg_ipv6.sh b/tools/testing/selftests/net/cmsg_ipv6.sh
+index 330d0b1ceced3d..c921750ca118de 100755
+--- a/tools/testing/selftests/net/cmsg_ipv6.sh
++++ b/tools/testing/selftests/net/cmsg_ipv6.sh
+@@ -91,7 +91,7 @@ for ovr in setsock cmsg both diff; do
+ check_result $? 0 "TCLASS $prot $ovr - pass"
+
+ while [ -d /proc/$BG ]; do
+- $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
++ $NSEXE ./cmsg_sender -6 -p $p $m $((TOS2)) $TGT6 1234
+ done
+
+ tcpdump -r $TMPF -v 2>&1 | grep "class $TOS2" >> /dev/null
+@@ -128,7 +128,7 @@ for ovr in setsock cmsg both diff; do
+ check_result $? 0 "HOPLIMIT $prot $ovr - pass"
+
+ while [ -d /proc/$BG ]; do
+- $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
++ $NSEXE ./cmsg_sender -6 -p $p $m $LIM $TGT6 1234
+ done
+
+ tcpdump -r $TMPF -v 2>&1 | grep "hlim $LIM[^0-9]" >> /dev/null
+diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
+index 24b21b15ed3fb0..6ff3e732f449f9 100644
+--- a/tools/testing/selftests/net/cmsg_sender.c
++++ b/tools/testing/selftests/net/cmsg_sender.c
+@@ -416,9 +416,9 @@ int main(int argc, char *argv[])
+ {
+ struct addrinfo hints, *ai;
+ struct iovec iov[1];
++ unsigned char *buf;
+ struct msghdr msg;
+ char cbuf[1024];
+- char *buf;
+ int err;
+ int fd;
+
+diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
+index 8da562a9ae87e4..04de7a6ba6f318 100644
+--- a/tools/testing/selftests/net/config
++++ b/tools/testing/selftests/net/config
+@@ -1,5 +1,6 @@
+ CONFIG_USER_NS=y
+ CONFIG_NET_NS=y
++CONFIG_BONDING=m
+ CONFIG_BPF_SYSCALL=y
+ CONFIG_TEST_BPF=m
+ CONFIG_NUMA=y
+@@ -14,30 +15,75 @@ CONFIG_VETH=y
+ CONFIG_NET_IPVTI=y
+ CONFIG_IPV6_VTI=y
+ CONFIG_DUMMY=y
++CONFIG_BRIDGE_VLAN_FILTERING=y
+ CONFIG_BRIDGE=y
++CONFIG_CRYPTO_CHACHA20POLY1305=m
+ CONFIG_VLAN_8021Q=y
++CONFIG_GENEVE=m
+ CONFIG_IFB=y
++CONFIG_INET_DIAG=y
++CONFIG_INET_ESP=y
++CONFIG_INET_ESP_OFFLOAD=y
++CONFIG_NET_FOU=y
++CONFIG_NET_FOU_IP_TUNNELS=y
++CONFIG_IP_GRE=m
+ CONFIG_NETFILTER=y
+ CONFIG_NETFILTER_ADVANCED=y
+ CONFIG_NF_CONNTRACK=m
++CONFIG_IPV6_MROUTE=y
++CONFIG_IPV6_SIT=y
++CONFIG_IP_DCCP=m
+ CONFIG_NF_NAT=m
+ CONFIG_IP6_NF_IPTABLES=m
+ CONFIG_IP_NF_IPTABLES=m
+ CONFIG_IP6_NF_NAT=m
++CONFIG_IP6_NF_RAW=m
+ CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_TARGET_TTL=m
++CONFIG_IPV6_GRE=m
++CONFIG_IPV6_SEG6_LWTUNNEL=y
++CONFIG_L2TP_ETH=m
++CONFIG_L2TP_IP=m
++CONFIG_L2TP=m
++CONFIG_L2TP_V3=y
++CONFIG_MACSEC=m
++CONFIG_MACVLAN=y
++CONFIG_MACVTAP=y
++CONFIG_MPLS=y
++CONFIG_MPTCP=y
+ CONFIG_NF_TABLES=m
+ CONFIG_NF_TABLES_IPV6=y
+ CONFIG_NF_TABLES_IPV4=y
+ CONFIG_NFT_NAT=m
++CONFIG_NETFILTER_XT_MATCH_LENGTH=m
++CONFIG_NET_ACT_CSUM=m
++CONFIG_NET_ACT_CT=m
++CONFIG_NET_ACT_GACT=m
++CONFIG_NET_ACT_PEDIT=m
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_BPF=m
++CONFIG_NET_CLS_MATCHALL=m
++CONFIG_NET_CLS_U32=m
++CONFIG_NET_IPGRE_DEMUX=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPIP=y
++CONFIG_NET_SCH_FQ_CODEL=m
++CONFIG_NET_SCH_HTB=m
+ CONFIG_NET_SCH_FQ=m
+ CONFIG_NET_SCH_ETF=m
+ CONFIG_NET_SCH_NETEM=y
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NFT_COMPAT=m
++CONFIG_NF_FLOW_TABLE=m
++CONFIG_PSAMPLE=m
++CONFIG_TCP_MD5SIG=y
+ CONFIG_TEST_BLACKHOLE_DEV=m
+ CONFIG_KALLSYMS=y
++CONFIG_TLS=m
+ CONFIG_TRACEPOINTS=y
+ CONFIG_NET_DROP_MONITOR=m
+ CONFIG_NETDEVSIM=m
+-CONFIG_NET_FOU=m
+ CONFIG_MPLS_ROUTING=m
+ CONFIG_MPLS_IPTUNNEL=m
+ CONFIG_NET_SCH_INGRESS=m
+@@ -48,7 +94,10 @@ CONFIG_BAREUDP=m
+ CONFIG_IPV6_IOAM6_LWTUNNEL=y
+ CONFIG_CRYPTO_SM4_GENERIC=y
+ CONFIG_AMT=m
++CONFIG_TUN=y
+ CONFIG_VXLAN=m
+ CONFIG_IP_SCTP=m
+ CONFIG_NETFILTER_XT_MATCH_POLICY=m
+ CONFIG_CRYPTO_ARIA=y
++CONFIG_XFRM_INTERFACE=m
++CONFIG_XFRM_USER=m
+diff --git a/tools/testing/selftests/net/csum.c b/tools/testing/selftests/net/csum.c
+index 90eb06fefa59ec..eef72b50270c5d 100644
+--- a/tools/testing/selftests/net/csum.c
++++ b/tools/testing/selftests/net/csum.c
+@@ -654,10 +654,16 @@ static int recv_verify_packet_ipv4(void *nh, int len)
+ {
+ struct iphdr *iph = nh;
+ uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto;
++ uint16_t ip_len;
+
+ if (len < sizeof(*iph) || iph->protocol != proto)
+ return -1;
+
++ ip_len = ntohs(iph->tot_len);
++ if (ip_len > len || ip_len < sizeof(*iph))
++ return -1;
++
++ len = ip_len;
+ iph_addr_p = &iph->saddr;
+ if (proto == IPPROTO_TCP)
+ return recv_verify_packet_tcp(iph + 1, len - sizeof(*iph));
+@@ -669,16 +675,22 @@ static int recv_verify_packet_ipv6(void *nh, int len)
+ {
+ struct ipv6hdr *ip6h = nh;
+ uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto;
++ uint16_t ip_len;
+
+ if (len < sizeof(*ip6h) || ip6h->nexthdr != proto)
+ return -1;
+
++ ip_len = ntohs(ip6h->payload_len);
++ if (ip_len > len - sizeof(*ip6h))
++ return -1;
++
++ len = ip_len;
+ iph_addr_p = &ip6h->saddr;
+
+ if (proto == IPPROTO_TCP)
+- return recv_verify_packet_tcp(ip6h + 1, len - sizeof(*ip6h));
++ return recv_verify_packet_tcp(ip6h + 1, len);
+ else
+- return recv_verify_packet_udp(ip6h + 1, len - sizeof(*ip6h));
++ return recv_verify_packet_udp(ip6h + 1, len);
+ }
+
+ /* return whether auxdata includes TP_STATUS_CSUM_VALID */
+diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+index 51df5e305855a7..b52d59547fc598 100755
+--- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
++++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+@@ -209,12 +209,12 @@ validate_v6_exception()
+ echo "Route get"
+ ip -netns h0 -6 ro get ${dst}
+ echo "Searching for:"
+- echo " ${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
++ echo " ${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ echo
+ fi
+
+ ip -netns h0 -6 ro get ${dst} | \
+- grep -q "${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
++ grep -q "${dst}.* via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
+ rc=$?
+
+ log_test $rc 0 "IPv6: host 0 to host ${i}, mtu ${mtu}"
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 66d0db7a2614d3..ede2c0ec2a9dd4 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -1643,53 +1643,53 @@ ipv4_rt_dsfield()
+
+ # DSCP 0x10 should match the specific route, no matter the ECN bits
+ $IP route get fibmatch 172.16.102.1 dsfield 0x10 | \
+- grep -q "via 172.16.103.2"
++ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
+ log_test $? 0 "IPv4 route with DSCP and ECN:Not-ECT"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x11 | \
+- grep -q "via 172.16.103.2"
++ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
+ log_test $? 0 "IPv4 route with DSCP and ECN:ECT(1)"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x12 | \
+- grep -q "via 172.16.103.2"
++ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
+ log_test $? 0 "IPv4 route with DSCP and ECN:ECT(0)"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x13 | \
+- grep -q "via 172.16.103.2"
++ grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2"
+ log_test $? 0 "IPv4 route with DSCP and ECN:CE"
+
+ # Unknown DSCP should match the generic route, no matter the ECN bits
+ $IP route get fibmatch 172.16.102.1 dsfield 0x14 | \
+- grep -q "via 172.16.101.2"
++ grep -q "172.16.102.0/24 via 172.16.101.2"
+ log_test $? 0 "IPv4 route with unknown DSCP and ECN:Not-ECT"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x15 | \
+- grep -q "via 172.16.101.2"
++ grep -q "172.16.102.0/24 via 172.16.101.2"
+ log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(1)"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x16 | \
+- grep -q "via 172.16.101.2"
++ grep -q "172.16.102.0/24 via 172.16.101.2"
+ log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(0)"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x17 | \
+- grep -q "via 172.16.101.2"
++ grep -q "172.16.102.0/24 via 172.16.101.2"
+ log_test $? 0 "IPv4 route with unknown DSCP and ECN:CE"
+
+ # Null DSCP should match the generic route, no matter the ECN bits
+ $IP route get fibmatch 172.16.102.1 dsfield 0x00 | \
+- grep -q "via 172.16.101.2"
++ grep -q "172.16.102.0/24 via 172.16.101.2"
+ log_test $? 0 "IPv4 route with no DSCP and ECN:Not-ECT"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x01 | \
+- grep -q "via 172.16.101.2"
++ grep -q "172.16.102.0/24 via 172.16.101.2"
+ log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(1)"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x02 | \
+- grep -q "via 172.16.101.2"
++ grep -q "172.16.102.0/24 via 172.16.101.2"
+ log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(0)"
+
+ $IP route get fibmatch 172.16.102.1 dsfield 0x03 | \
+- grep -q "via 172.16.101.2"
++ grep -q "172.16.102.0/24 via 172.16.101.2"
+ log_test $? 0 "IPv4 route with no DSCP and ECN:CE"
+ }
+
+diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+index 2aa66d2a1702b3..e6a3e04fd83f31 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_igmp.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+@@ -478,10 +478,10 @@ v3exc_timeout_test()
+ RET=0
+ local X=("192.0.2.20" "192.0.2.30")
+
+- # GMI should be 3 seconds
++ # GMI should be 5 seconds
+ ip link set dev br0 type bridge mcast_query_interval 100 \
+ mcast_query_response_interval 100 \
+- mcast_membership_interval 300
++ mcast_membership_interval 500
+
+ v3exclude_prepare $h1 $ALL_MAC $ALL_GROUP
+ ip link set dev br0 type bridge mcast_query_interval 500 \
+@@ -489,7 +489,7 @@ v3exc_timeout_test()
+ mcast_membership_interval 1500
+
+ $MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_ALLOW2" -q
+- sleep 3
++ sleep 5
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+diff --git a/tools/testing/selftests/net/forwarding/bridge_locked_port.sh b/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
+index 9af9f6964808ba..c62331b2e00606 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
+@@ -327,10 +327,10 @@ locked_port_mab_redirect()
+ RET=0
+ check_port_mab_support || return 0
+
+- bridge link set dev $swp1 learning on locked on mab on
+ tc qdisc add dev $swp1 clsact
+ tc filter add dev $swp1 ingress protocol all pref 1 handle 101 flower \
+ action mirred egress redirect dev $swp2
++ bridge link set dev $swp1 learning on locked on mab on
+
+ ping_do $h1 192.0.2.2
+ check_err $? "Ping did not work with redirection"
+@@ -349,8 +349,8 @@ locked_port_mab_redirect()
+ check_err $? "Locked entry not created after deleting filter"
+
+ bridge fdb del `mac_get $h1` vlan 1 dev $swp1 master
+- tc qdisc del dev $swp1 clsact
+ bridge link set dev $swp1 learning off locked off mab off
++ tc qdisc del dev $swp1 clsact
+
+ log_test "Locked port MAB redirect"
+ }
+diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+index d0c6c499d5dab9..a3678dfe5848a2 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+@@ -145,14 +145,14 @@ cfg_test_host_common()
+
+ # Check basic add, replace and delete behavior.
+ bridge mdb add dev br0 port br0 grp $grp $state vid 10
+- bridge mdb show dev br0 vid 10 | grep -q "$grp"
++ bridge mdb get dev br0 grp $grp vid 10 &> /dev/null
+ check_err $? "Failed to add $name host entry"
+
+ bridge mdb replace dev br0 port br0 grp $grp $state vid 10 &> /dev/null
+ check_fail $? "Managed to replace $name host entry"
+
+ bridge mdb del dev br0 port br0 grp $grp $state vid 10
+- bridge mdb show dev br0 vid 10 | grep -q "$grp"
++ bridge mdb get dev br0 grp $grp vid 10 &> /dev/null
+ check_fail $? "Failed to delete $name host entry"
+
+ # Check error cases.
+@@ -200,7 +200,7 @@ cfg_test_port_common()
+
+ # Check basic add, replace and delete behavior.
+ bridge mdb add dev br0 port $swp1 $grp_key permanent vid 10
+- bridge mdb show dev br0 vid 10 | grep -q "$grp_key"
++ bridge mdb get dev br0 $grp_key vid 10 &> /dev/null
+ check_err $? "Failed to add $name entry"
+
+ bridge mdb replace dev br0 port $swp1 $grp_key permanent vid 10 \
+@@ -208,31 +208,31 @@ cfg_test_port_common()
+ check_err $? "Failed to replace $name entry"
+
+ bridge mdb del dev br0 port $swp1 $grp_key permanent vid 10
+- bridge mdb show dev br0 vid 10 | grep -q "$grp_key"
++ bridge mdb get dev br0 $grp_key vid 10 &> /dev/null
+ check_fail $? "Failed to delete $name entry"
+
+ # Check default protocol and replacement.
+ bridge mdb add dev br0 port $swp1 $grp_key permanent vid 10
+- bridge -d mdb show dev br0 vid 10 | grep "$grp_key" | grep -q "static"
++ bridge -d mdb get dev br0 $grp_key vid 10 | grep -q "static"
+ check_err $? "$name entry not added with default \"static\" protocol"
+
+ bridge mdb replace dev br0 port $swp1 $grp_key permanent vid 10 \
+ proto 123
+- bridge -d mdb show dev br0 vid 10 | grep "$grp_key" | grep -q "123"
++ bridge -d mdb get dev br0 $grp_key vid 10 | grep -q "123"
+ check_err $? "Failed to replace protocol of $name entry"
+ bridge mdb del dev br0 port $swp1 $grp_key permanent vid 10
+
+ # Check behavior when VLAN is not specified.
+ bridge mdb add dev br0 port $swp1 $grp_key permanent
+- bridge mdb show dev br0 vid 10 | grep -q "$grp_key"
++ bridge mdb get dev br0 $grp_key vid 10 &> /dev/null
+ check_err $? "$name entry with VLAN 10 not added when VLAN was not specified"
+- bridge mdb show dev br0 vid 20 | grep -q "$grp_key"
++ bridge mdb get dev br0 $grp_key vid 20 &> /dev/null
+ check_err $? "$name entry with VLAN 20 not added when VLAN was not specified"
+
+ bridge mdb del dev br0 port $swp1 $grp_key permanent
+- bridge mdb show dev br0 vid 10 | grep -q "$grp_key"
++ bridge mdb get dev br0 $grp_key vid 10 &> /dev/null
+ check_fail $? "$name entry with VLAN 10 not deleted when VLAN was not specified"
+- bridge mdb show dev br0 vid 20 | grep -q "$grp_key"
++ bridge mdb get dev br0 $grp_key vid 20 &> /dev/null
+ check_fail $? "$name entry with VLAN 20 not deleted when VLAN was not specified"
+
+ # Check behavior when bridge port is down.
+@@ -298,21 +298,21 @@ __cfg_test_port_ip_star_g()
+ RET=0
+
+ bridge mdb add dev br0 port $swp1 grp $grp vid 10
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "exclude"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "exclude"
+ check_err $? "Default filter mode is not \"exclude\""
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+
+ # Check basic add and delete behavior.
+ bridge mdb add dev br0 port $swp1 grp $grp vid 10 filter_mode exclude \
+ source_list $src1
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q -v "src"
++ bridge -d mdb get dev br0 grp $grp vid 10 &> /dev/null
+ check_err $? "(*, G) entry not created"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "src $src1"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 &> /dev/null
+ check_err $? "(S, G) entry not created"
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q -v "src"
++ bridge -d mdb get dev br0 grp $grp vid 10 &> /dev/null
+ check_fail $? "(*, G) entry not deleted"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "src $src1"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 &> /dev/null
+ check_fail $? "(S, G) entry not deleted"
+
+ ## State (permanent / temp) tests.
+@@ -321,18 +321,15 @@ __cfg_test_port_ip_star_g()
+ bridge mdb add dev br0 port $swp1 grp $grp permanent vid 10 \
+ filter_mode exclude source_list $src1
+
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "permanent"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "permanent"
+ check_err $? "(*, G) entry not added as \"permanent\" when should"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | \
+ grep -q "permanent"
+ check_err $? "(S, G) entry not added as \"permanent\" when should"
+
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q " 0.00"
++ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00"
+ check_err $? "(*, G) \"permanent\" entry has a pending group timer"
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "\/0.00"
++ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00"
+ check_err $? "\"permanent\" source entry has a pending source timer"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -342,18 +339,14 @@ __cfg_test_port_ip_star_g()
+ bridge mdb add dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode exclude source_list $src1
+
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "temp"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "temp"
+ check_err $? "(*, G) EXCLUDE entry not added as \"temp\" when should"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "temp"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "temp"
+ check_err $? "(S, G) \"blocked\" entry not added as \"temp\" when should"
+
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q " 0.00"
++ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00"
+ check_fail $? "(*, G) EXCLUDE entry does not have a pending group timer"
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "\/0.00"
++ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00"
+ check_err $? "\"blocked\" source entry has a pending source timer"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -363,18 +356,14 @@ __cfg_test_port_ip_star_g()
+ bridge mdb add dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode include source_list $src1
+
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "temp"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "temp"
+ check_err $? "(*, G) INCLUDE entry not added as \"temp\" when should"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "temp"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "temp"
+ check_err $? "(S, G) entry not added as \"temp\" when should"
+
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q " 0.00"
++ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00"
+ check_err $? "(*, G) INCLUDE entry has a pending group timer"
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "\/0.00"
++ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00"
+ check_fail $? "Source entry does not have a pending source timer"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -383,8 +372,7 @@ __cfg_test_port_ip_star_g()
+ bridge mdb add dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode include source_list $src1
+
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q " 0.00"
++ bridge -d -s mdb get dev br0 grp $grp src $src1 vid 10 | grep -q " 0.00"
+ check_err $? "(S, G) entry has a pending group timer"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -396,11 +384,9 @@ __cfg_test_port_ip_star_g()
+ bridge mdb add dev br0 port $swp1 grp $grp vid 10 \
+ filter_mode include source_list $src1
+
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "include"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "include"
+ check_err $? "(*, G) INCLUDE not added with \"include\" filter mode"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "blocked"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "blocked"
+ check_fail $? "(S, G) entry marked as \"blocked\" when should not"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -410,11 +396,9 @@ __cfg_test_port_ip_star_g()
+ bridge mdb add dev br0 port $swp1 grp $grp vid 10 \
+ filter_mode exclude source_list $src1
+
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "exclude"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "exclude"
+ check_err $? "(*, G) EXCLUDE not added with \"exclude\" filter mode"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "blocked"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "blocked"
+ check_err $? "(S, G) entry not marked as \"blocked\" when should"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -426,11 +410,9 @@ __cfg_test_port_ip_star_g()
+ bridge mdb add dev br0 port $swp1 grp $grp vid 10 \
+ filter_mode exclude source_list $src1 proto zebra
+
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "zebra"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "zebra"
+ check_err $? "(*, G) entry not added with \"zebra\" protocol"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "zebra"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "zebra"
+ check_err $? "(S, G) entry not marked added with \"zebra\" protocol"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -443,20 +425,16 @@ __cfg_test_port_ip_star_g()
+
+ bridge mdb replace dev br0 port $swp1 grp $grp permanent vid 10 \
+ filter_mode exclude source_list $src1
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "permanent"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "permanent"
+ check_err $? "(*, G) entry not marked as \"permanent\" after replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "permanent"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "permanent"
+ check_err $? "(S, G) entry not marked as \"permanent\" after replace"
+
+ bridge mdb replace dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode exclude source_list $src1
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "temp"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "temp"
+ check_err $? "(*, G) entry not marked as \"temp\" after replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "temp"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "temp"
+ check_err $? "(S, G) entry not marked as \"temp\" after replace"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -467,20 +445,16 @@ __cfg_test_port_ip_star_g()
+
+ bridge mdb replace dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode include source_list $src1
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "include"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "include"
+ check_err $? "(*, G) not marked with \"include\" filter mode after replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "blocked"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "blocked"
+ check_fail $? "(S, G) marked as \"blocked\" after replace"
+
+ bridge mdb replace dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode exclude source_list $src1
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "exclude"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "exclude"
+ check_err $? "(*, G) not marked with \"exclude\" filter mode after replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "blocked"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "blocked"
+ check_err $? "(S, G) not marked as \"blocked\" after replace"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -491,20 +465,20 @@ __cfg_test_port_ip_star_g()
+
+ bridge mdb replace dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode exclude source_list $src1,$src2,$src3
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "src $src1"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 &> /dev/null
+ check_err $? "(S, G) entry for source $src1 not created after replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "src $src2"
++ bridge -d mdb get dev br0 grp $grp src $src2 vid 10 &> /dev/null
+ check_err $? "(S, G) entry for source $src2 not created after replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "src $src3"
++ bridge -d mdb get dev br0 grp $grp src $src3 vid 10 &> /dev/null
+ check_err $? "(S, G) entry for source $src3 not created after replace"
+
+ bridge mdb replace dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode exclude source_list $src1,$src3
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "src $src1"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 &> /dev/null
+ check_err $? "(S, G) entry for source $src1 not created after second replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "src $src2"
++ bridge -d mdb get dev br0 grp $grp src $src2 vid 10 &> /dev/null
+ check_fail $? "(S, G) entry for source $src2 created after second replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -q "src $src3"
++ bridge -d mdb get dev br0 grp $grp src $src3 vid 10 &> /dev/null
+ check_err $? "(S, G) entry for source $src3 not created after second replace"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -515,11 +489,9 @@ __cfg_test_port_ip_star_g()
+
+ bridge mdb replace dev br0 port $swp1 grp $grp temp vid 10 \
+ filter_mode exclude source_list $src1 proto bgp
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep -v "src" | \
+- grep -q "bgp"
++ bridge -d mdb get dev br0 grp $grp vid 10 | grep -q "bgp"
+ check_err $? "(*, G) protocol not changed to \"bgp\" after replace"
+- bridge -d mdb show dev br0 vid 10 | grep "$grp" | grep "src" | \
+- grep -q "bgp"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep -q "bgp"
+ check_err $? "(S, G) protocol not changed to \"bgp\" after replace"
+
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+@@ -532,8 +504,8 @@ __cfg_test_port_ip_star_g()
+ bridge mdb add dev br0 port $swp2 grp $grp vid 10 \
+ filter_mode include source_list $src1
+ bridge mdb add dev br0 port $swp1 grp $grp vid 10
+- bridge -d mdb show dev br0 vid 10 | grep "$swp1" | grep "$grp" | \
+- grep "$src1" | grep -q "added_by_star_ex"
++ bridge -d mdb get dev br0 grp $grp src $src1 vid 10 | grep "$swp1" | \
++ grep -q "added_by_star_ex"
+ check_err $? "\"added_by_star_ex\" entry not created after adding (*, G) entry"
+ bridge mdb del dev br0 port $swp1 grp $grp vid 10
+ bridge mdb del dev br0 port $swp2 grp $grp src $src1 vid 10
+@@ -606,27 +578,23 @@ __cfg_test_port_ip_sg()
+ RET=0
+
+ bridge mdb add dev br0 port $swp1 $grp_key vid 10
+- bridge -d mdb show dev br0 vid 10 | grep "$grp_key" | grep -q "include"
++ bridge -d mdb get dev br0 $grp_key vid 10 | grep -q "include"
+ check_err $? "Default filter mode is not \"include\""
+ bridge mdb del dev br0 port $swp1 $grp_key vid 10
+
+ # Check that entries can be added as both permanent and temp and that
+ # group timer is set correctly.
+ bridge mdb add dev br0 port $swp1 $grp_key permanent vid 10
+- bridge -d mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q "permanent"
++ bridge -d mdb get dev br0 $grp_key vid 10 | grep -q "permanent"
+ check_err $? "Entry not added as \"permanent\" when should"
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q " 0.00"
++ bridge -d -s mdb get dev br0 $grp_key vid 10 | grep -q " 0.00"
+ check_err $? "\"permanent\" entry has a pending group timer"
+ bridge mdb del dev br0 port $swp1 $grp_key vid 10
+
+ bridge mdb add dev br0 port $swp1 $grp_key temp vid 10
+- bridge -d mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q "temp"
++ bridge -d mdb get dev br0 $grp_key vid 10 | grep -q "temp"
+ check_err $? "Entry not added as \"temp\" when should"
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q " 0.00"
++ bridge -d -s mdb get dev br0 $grp_key vid 10 | grep -q " 0.00"
+ check_fail $? "\"temp\" entry has an unpending group timer"
+ bridge mdb del dev br0 port $swp1 $grp_key vid 10
+
+@@ -650,24 +618,19 @@ __cfg_test_port_ip_sg()
+ # Check that we can replace available attributes.
+ bridge mdb add dev br0 port $swp1 $grp_key vid 10 proto 123
+ bridge mdb replace dev br0 port $swp1 $grp_key vid 10 proto 111
+- bridge -d mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q "111"
++ bridge -d mdb get dev br0 $grp_key vid 10 | grep -q "111"
+ check_err $? "Failed to replace protocol"
+
+ bridge mdb replace dev br0 port $swp1 $grp_key vid 10 permanent
+- bridge -d mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q "permanent"
++ bridge -d mdb get dev br0 $grp_key vid 10 | grep -q "permanent"
+ check_err $? "Entry not marked as \"permanent\" after replace"
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q " 0.00"
++ bridge -d -s mdb get dev br0 $grp_key vid 10 | grep -q " 0.00"
+ check_err $? "Entry has a pending group timer after replace"
+
+ bridge mdb replace dev br0 port $swp1 $grp_key vid 10 temp
+- bridge -d mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q "temp"
++ bridge -d mdb get dev br0 $grp_key vid 10 | grep -q "temp"
+ check_err $? "Entry not marked as \"temp\" after replace"
+- bridge -d -s mdb show dev br0 vid 10 | grep "$grp_key" | \
+- grep -q " 0.00"
++ bridge -d -s mdb get dev br0 $grp_key vid 10 | grep -q " 0.00"
+ check_fail $? "Entry has an unpending group timer after replace"
+ bridge mdb del dev br0 port $swp1 $grp_key vid 10
+
+@@ -675,7 +638,7 @@ __cfg_test_port_ip_sg()
+ # (*, G) ports need to be added to it.
+ bridge mdb add dev br0 port $swp2 grp $grp vid 10
+ bridge mdb add dev br0 port $swp1 $grp_key vid 10
+- bridge mdb show dev br0 vid 10 | grep "$grp_key" | grep $swp2 | \
++ bridge mdb get dev br0 $grp_key vid 10 | grep $swp2 | \
+ grep -q "added_by_star_ex"
+ check_err $? "\"added_by_star_ex\" entry not created after adding (S, G) entry"
+ bridge mdb del dev br0 port $swp1 $grp_key vid 10
+@@ -1102,14 +1065,17 @@ fwd_test()
+ echo
+ log_info "# Forwarding tests"
+
++ # Set the Max Response Delay to 100 centiseconds (1 second) so that the
++ # bridge will start forwarding according to its MDB soon after a
++ # multicast querier is enabled.
++ ip link set dev br0 type bridge mcast_query_response_interval 100
++
+ # Forwarding according to MDB entries only takes place when the bridge
+ # detects that there is a valid querier in the network. Set the bridge
+ # as the querier and assign it a valid IPv6 link-local address to be
+ # used as the source address for MLD queries.
+ ip -6 address add fe80::1/64 nodad dev br0
+ ip link set dev br0 type bridge mcast_querier 1
+- # Wait the default Query Response Interval (10 seconds) for the bridge
+- # to determine that there are no other queriers in the network.
+ sleep 10
+
+ fwd_test_host
+@@ -1117,6 +1083,7 @@ fwd_test()
+
+ ip link set dev br0 type bridge mcast_querier 0
+ ip -6 address del fe80::1/64 dev br0
++ ip link set dev br0 type bridge mcast_query_response_interval 1000
+ }
+
+ ctrl_igmpv3_is_in_test()
+@@ -1132,7 +1099,7 @@ ctrl_igmpv3_is_in_test()
+ $MZ $h1.10 -c 1 -a own -b 01:00:5e:01:01:01 -A 192.0.2.1 -B 239.1.1.1 \
+ -t ip proto=2,p=$(igmpv3_is_in_get 239.1.1.1 192.0.2.2) -q
+
+- bridge -d mdb show dev br0 vid 10 | grep 239.1.1.1 | grep -q 192.0.2.2
++ bridge mdb get dev br0 grp 239.1.1.1 src 192.0.2.2 vid 10 &> /dev/null
+ check_fail $? "Permanent entry affected by IGMP packet"
+
+ # Replace the permanent entry with a temporary one and check that after
+@@ -1145,12 +1112,10 @@ ctrl_igmpv3_is_in_test()
+ $MZ $h1.10 -a own -b 01:00:5e:01:01:01 -c 1 -A 192.0.2.1 -B 239.1.1.1 \
+ -t ip proto=2,p=$(igmpv3_is_in_get 239.1.1.1 192.0.2.2) -q
+
+- bridge -d mdb show dev br0 vid 10 | grep 239.1.1.1 | grep -v "src" | \
+- grep -q 192.0.2.2
++ bridge -d mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q 192.0.2.2
+ check_err $? "Source not add to source list"
+
+- bridge -d mdb show dev br0 vid 10 | grep 239.1.1.1 | \
+- grep -q "src 192.0.2.2"
++ bridge mdb get dev br0 grp 239.1.1.1 src 192.0.2.2 vid 10 &> /dev/null
+ check_err $? "(S, G) entry not created for new source"
+
+ bridge mdb del dev br0 port $swp1 grp 239.1.1.1 vid 10
+@@ -1172,8 +1137,7 @@ ctrl_mldv2_is_in_test()
+ $MZ -6 $h1.10 -a own -b 33:33:00:00:00:01 -c 1 -A fe80::1 -B ff0e::1 \
+ -t ip hop=1,next=0,p="$p" -q
+
+- bridge -d mdb show dev br0 vid 10 | grep ff0e::1 | \
+- grep -q 2001:db8:1::2
++ bridge mdb get dev br0 grp ff0e::1 src 2001:db8:1::2 vid 10 &> /dev/null
+ check_fail $? "Permanent entry affected by MLD packet"
+
+ # Replace the permanent entry with a temporary one and check that after
+@@ -1186,12 +1150,10 @@ ctrl_mldv2_is_in_test()
+ $MZ -6 $h1.10 -a own -b 33:33:00:00:00:01 -c 1 -A fe80::1 -B ff0e::1 \
+ -t ip hop=1,next=0,p="$p" -q
+
+- bridge -d mdb show dev br0 vid 10 | grep ff0e::1 | grep -v "src" | \
+- grep -q 2001:db8:1::2
++ bridge -d mdb get dev br0 grp ff0e::1 vid 10 | grep -q 2001:db8:1::2
+ check_err $? "Source not add to source list"
+
+- bridge -d mdb show dev br0 vid 10 | grep ff0e::1 | \
+- grep -q "src 2001:db8:1::2"
++ bridge mdb get dev br0 grp ff0e::1 src 2001:db8:1::2 vid 10 &> /dev/null
+ check_err $? "(S, G) entry not created for new source"
+
+ bridge mdb del dev br0 port $swp1 grp ff0e::1 vid 10
+@@ -1208,8 +1170,8 @@ ctrl_test()
+ ctrl_mldv2_is_in_test
+ }
+
+-if ! bridge mdb help 2>&1 | grep -q "replace"; then
+- echo "SKIP: iproute2 too old, missing bridge mdb replace support"
++if ! bridge mdb help 2>&1 | grep -q "get"; then
++ echo "SKIP: iproute2 too old, missing bridge mdb get support"
+ exit $ksft_skip
+ fi
+
+diff --git a/tools/testing/selftests/net/forwarding/bridge_mld.sh b/tools/testing/selftests/net/forwarding/bridge_mld.sh
+index e2b9ff773c6b60..f84ab2e657547a 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_mld.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_mld.sh
+@@ -478,10 +478,10 @@ mldv2exc_timeout_test()
+ RET=0
+ local X=("2001:db8:1::20" "2001:db8:1::30")
+
+- # GMI should be 3 seconds
++ # GMI should be 5 seconds
+ ip link set dev br0 type bridge mcast_query_interval 100 \
+ mcast_query_response_interval 100 \
+- mcast_membership_interval 300
++ mcast_membership_interval 500
+
+ mldv2exclude_prepare $h1
+ ip link set dev br0 type bridge mcast_query_interval 500 \
+@@ -489,7 +489,7 @@ mldv2exc_timeout_test()
+ mcast_membership_interval 1500
+
+ $MZ $h1 -c 1 $MZPKT_ALLOW2 -q
+- sleep 3
++ sleep 5
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
+index 697994a9278bbe..8d7a1a004b7c34 100644
+--- a/tools/testing/selftests/net/forwarding/config
++++ b/tools/testing/selftests/net/forwarding/config
+@@ -6,14 +6,49 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
+ CONFIG_NET_VRF=m
+ CONFIG_BPF_SYSCALL=y
+ CONFIG_CGROUP_BPF=y
++CONFIG_DUMMY=m
++CONFIG_IPV6=y
++CONFIG_IPV6_GRE=m
++CONFIG_IPV6_MROUTE=y
++CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IPV6_PIMSM_V2=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++CONFIG_MACVLAN=m
+ CONFIG_NET_ACT_CT=m
+ CONFIG_NET_ACT_MIRRED=m
+ CONFIG_NET_ACT_MPLS=m
++CONFIG_NET_ACT_PEDIT=m
++CONFIG_NET_ACT_POLICE=m
++CONFIG_NET_ACT_SAMPLE=m
++CONFIG_NET_ACT_SKBEDIT=m
++CONFIG_NET_ACT_TUNNEL_KEY=m
+ CONFIG_NET_ACT_VLAN=m
+ CONFIG_NET_CLS_FLOWER=m
+ CONFIG_NET_CLS_MATCHALL=m
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_META=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_DEMUX=m
++CONFIG_NET_IPIP=m
++CONFIG_NET_SCH_ETS=m
+ CONFIG_NET_SCH_INGRESS=m
+ CONFIG_NET_ACT_GACT=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_TC_SKB_EXT=y
++CONFIG_NET_TEAM=y
++CONFIG_NET_TEAM_MODE_LOADBALANCE=y
++CONFIG_NETFILTER=y
++CONFIG_NF_CONNTRACK=m
++CONFIG_NF_FLOW_TABLE=m
++CONFIG_NF_TABLES=m
+ CONFIG_VETH=m
+ CONFIG_NAMESPACES=y
+ CONFIG_NET_NS=y
++CONFIG_VXLAN=m
++CONFIG_XFRM_USER=m
+diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
+index f1de525cfa55be..62a05bca1e825d 100644
+--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
++++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
+@@ -122,6 +122,8 @@ devlink_reload()
+ still_pending=$(devlink resource show "$DEVLINK_DEV" | \
+ grep -c "size_new")
+ check_err $still_pending "Failed reload - There are still unset sizes"
++
++ udevadm settle
+ }
+
+ declare -A DEVLINK_ORIG
+diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
+index e37a15eda6c240..97e7675da04fe2 100755
+--- a/tools/testing/selftests/net/forwarding/lib.sh
++++ b/tools/testing/selftests/net/forwarding/lib.sh
+@@ -4,9 +4,6 @@
+ ##############################################################################
+ # Defines
+
+-# Kselftest framework requirement - SKIP code is 4.
+-ksft_skip=4
+-
+ # Can be overridden by the configuration file.
+ PING=${PING:=ping}
+ PING6=${PING6:=ping6}
+@@ -41,6 +38,32 @@ if [[ -f $relative_path/forwarding.config ]]; then
+ source "$relative_path/forwarding.config"
+ fi
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
++busywait()
++{
++ local timeout=$1; shift
++
++ local start_time="$(date -u +%s%3N)"
++ while true
++ do
++ local out
++ out=$("$@")
++ local ret=$?
++ if ((!ret)); then
++ echo -n "$out"
++ return 0
++ fi
++
++ local current_time="$(date -u +%s%3N)"
++ if ((current_time - start_time > timeout)); then
++ echo -n "$out"
++ return 1
++ fi
++ done
++}
++
+ ##############################################################################
+ # Sanity checks
+
+@@ -395,29 +418,6 @@ log_info()
+ echo "INFO: $msg"
+ }
+
+-busywait()
+-{
+- local timeout=$1; shift
+-
+- local start_time="$(date -u +%s%3N)"
+- while true
+- do
+- local out
+- out=$("$@")
+- local ret=$?
+- if ((!ret)); then
+- echo -n "$out"
+- return 0
+- fi
+-
+- local current_time="$(date -u +%s%3N)"
+- if ((current_time - start_time > timeout)); then
+- echo -n "$out"
+- return 1
+- fi
+- done
+-}
+-
+ not()
+ {
+ "$@"
+diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
+index c5b0cbc85b3e0b..9b5a63519b949e 100755
+--- a/tools/testing/selftests/net/forwarding/local_termination.sh
++++ b/tools/testing/selftests/net/forwarding/local_termination.sh
+@@ -278,6 +278,10 @@ bridge()
+ cleanup()
+ {
+ pre_cleanup
++
++ ip link set $h2 down
++ ip link set $h1 down
++
+ vrf_cleanup
+ }
+
+diff --git a/tools/testing/selftests/net/forwarding/no_forwarding.sh b/tools/testing/selftests/net/forwarding/no_forwarding.sh
+index af3b398d13f01a..694ece9ba3a742 100755
+--- a/tools/testing/selftests/net/forwarding/no_forwarding.sh
++++ b/tools/testing/selftests/net/forwarding/no_forwarding.sh
+@@ -202,7 +202,7 @@ one_bridge_two_pvids()
+ ip link set $swp2 master br0
+
+ bridge vlan add dev $swp1 vid 1 pvid untagged
+- bridge vlan add dev $swp1 vid 2 pvid untagged
++ bridge vlan add dev $swp2 vid 2 pvid untagged
+
+ run_test "Switch ports in VLAN-aware bridge with different PVIDs"
+
+@@ -233,6 +233,9 @@ cleanup()
+ {
+ pre_cleanup
+
++ ip link set dev $swp2 down
++ ip link set dev $swp1 down
++
+ h2_destroy
+ h1_destroy
+
+diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
+index b0f5e55d2d0b25..58962963650227 100755
+--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
++++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
+@@ -235,9 +235,6 @@ mirred_egress_to_ingress_tcp_test()
+ check_err $? "didn't mirred redirect ICMP"
+ tc_check_packets "dev $h1 ingress" 102 10
+ check_err $? "didn't drop mirred ICMP"
+- local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
+- test ${overlimits} = 10
+- check_err $? "wrong overlimits, expected 10 got ${overlimits}"
+
+ tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
+ tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
+diff --git a/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh b/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
+index 20a7cb7222b8ba..c2420bb72c1281 100755
+--- a/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
++++ b/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
+@@ -209,14 +209,17 @@ test_l2_miss_multicast()
+ # both registered and unregistered multicast traffic.
+ bridge link set dev $swp2 mcast_router 2
+
++ # Set the Max Response Delay to 100 centiseconds (1 second) so that the
++ # bridge will start forwarding according to its MDB soon after a
++ # multicast querier is enabled.
++ ip link set dev br1 type bridge mcast_query_response_interval 100
++
+ # Forwarding according to MDB entries only takes place when the bridge
+ # detects that there is a valid querier in the network. Set the bridge
+ # as the querier and assign it a valid IPv6 link-local address to be
+ # used as the source address for MLD queries.
+ ip link set dev br1 type bridge mcast_querier 1
+ ip -6 address add fe80::1/64 nodad dev br1
+- # Wait the default Query Response Interval (10 seconds) for the bridge
+- # to determine that there are no other queriers in the network.
+ sleep 10
+
+ test_l2_miss_multicast_ipv4
+@@ -224,6 +227,7 @@ test_l2_miss_multicast()
+
+ ip -6 address del fe80::1/64 dev br1
+ ip link set dev br1 type bridge mcast_querier 0
++ ip link set dev br1 type bridge mcast_query_response_interval 1000
+ bridge link set dev $swp2 mcast_router 1
+ }
+
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
+index ac97f07e5ce826..bd3f7d492af2bb 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
+@@ -354,7 +354,7 @@ __ping_ipv4()
+
+ # Send 100 packets and verify that at least 100 packets hit the rule,
+ # to overcome ARP noise.
+- PING_COUNT=100 PING_TIMEOUT=11 ping_do $dev $dst_ip
++ PING_COUNT=100 PING_TIMEOUT=20 ping_do $dev $dst_ip
+ check_err $? "Ping failed"
+
+ tc_check_at_least_x_packets "dev $rp1 egress" 101 10 100
+@@ -410,7 +410,7 @@ __ping_ipv6()
+
+ # Send 100 packets and verify that at least 100 packets hit the rule,
+ # to overcome neighbor discovery noise.
+- PING_COUNT=100 PING_TIMEOUT=11 ping6_do $dev $dst_ip
++ PING_COUNT=100 PING_TIMEOUT=20 ping6_do $dev $dst_ip
+ check_err $? "Ping failed"
+
+ tc_check_at_least_x_packets "dev $rp1 egress" 101 100
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
+index d880df89bc8bd5..e83fde79f40d0f 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
+@@ -457,7 +457,7 @@ __ping_ipv4()
+
+ # Send 100 packets and verify that at least 100 packets hit the rule,
+ # to overcome ARP noise.
+- PING_COUNT=100 PING_TIMEOUT=11 ping_do $dev $dst_ip
++ PING_COUNT=100 PING_TIMEOUT=20 ping_do $dev $dst_ip
+ check_err $? "Ping failed"
+
+ tc_check_at_least_x_packets "dev $rp1 egress" 101 10 100
+@@ -522,7 +522,7 @@ __ping_ipv6()
+
+ # Send 100 packets and verify that at least 100 packets hit the rule,
+ # to overcome neighbor discovery noise.
+- PING_COUNT=100 PING_TIMEOUT=11 ping6_do $dev $dst_ip
++ PING_COUNT=100 PING_TIMEOUT=20 ping6_do $dev $dst_ip
+ check_err $? "Ping failed"
+
+ tc_check_at_least_x_packets "dev $rp1 egress" 101 100
+diff --git a/tools/testing/selftests/net/ip_local_port_range.c b/tools/testing/selftests/net/ip_local_port_range.c
+index 75e3fdacdf7352..2465ff5bb3a8e5 100644
+--- a/tools/testing/selftests/net/ip_local_port_range.c
++++ b/tools/testing/selftests/net/ip_local_port_range.c
+@@ -343,7 +343,7 @@ TEST_F(ip_local_port_range, late_bind)
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } addr;
+- socklen_t addr_len;
++ socklen_t addr_len = 0;
+ const int one = 1;
+ int fd, err;
+ __u32 range;
+diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
+index 9a8229abfa026a..be4a30a0d02aef 100644
+--- a/tools/testing/selftests/net/ipsec.c
++++ b/tools/testing/selftests/net/ipsec.c
+@@ -2263,7 +2263,7 @@ static int check_results(void)
+
+ int main(int argc, char **argv)
+ {
+- unsigned int nr_process = 1;
++ long nr_process = 1;
+ int route_sock = -1, ret = KSFT_SKIP;
+ int test_desc_fd[2];
+ uint32_t route_seq;
+@@ -2284,7 +2284,7 @@ int main(int argc, char **argv)
+ exit_usage(argv);
+ }
+
+- if (nr_process > MAX_PROCESSES || !nr_process) {
++ if (nr_process > MAX_PROCESSES || nr_process < 1) {
+ printk("nr_process should be between [1; %u]",
+ MAX_PROCESSES);
+ exit_usage(argv);
+diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
+new file mode 100644
+index 00000000000000..e2c35eda230afc
+--- /dev/null
++++ b/tools/testing/selftests/net/lib.sh
+@@ -0,0 +1,90 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++##############################################################################
++# Defines
++
++WAIT_TIMEOUT=${WAIT_TIMEOUT:=20}
++BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms
++
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++# namespace list created by setup_ns
++NS_LIST=()
++
++##############################################################################
++# Helpers
++busywait()
++{
++ local timeout=$1; shift
++
++ local start_time="$(date -u +%s%3N)"
++ while true
++ do
++ local out
++ if out=$("$@"); then
++ echo -n "$out"
++ return 0
++ fi
++
++ local current_time="$(date -u +%s%3N)"
++ if ((current_time - start_time > timeout)); then
++ echo -n "$out"
++ return 1
++ fi
++ done
++}
++
++cleanup_ns()
++{
++ local ns=""
++ local ret=0
++
++ for ns in "$@"; do
++ [ -z "${ns}" ] && continue
++ ip netns pids "${ns}" 2> /dev/null | xargs -r kill || true
++ ip netns delete "${ns}" &> /dev/null || true
++ if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
++ echo "Warn: Failed to remove namespace $ns"
++ ret=1
++ fi
++ done
++
++ return $ret
++}
++
++cleanup_all_ns()
++{
++ cleanup_ns "${NS_LIST[@]}"
++}
++
++# setup netns with given names as prefix. e.g
++# setup_ns local remote
++setup_ns()
++{
++ local ns=""
++ local ns_name=""
++ local ns_list=()
++ local ns_exist=
++ for ns_name in "$@"; do
++ # Some test may setup/remove same netns multi times
++ if unset ${ns_name} 2> /dev/null; then
++ ns="${ns_name,,}-$(mktemp -u XXXXXX)"
++ eval readonly ${ns_name}="$ns"
++ ns_exist=false
++ else
++ eval ns='$'${ns_name}
++ cleanup_ns "$ns"
++ ns_exist=true
++ fi
++
++ if ! ip netns add "$ns"; then
++ echo "Failed to create namespace $ns_name"
++ cleanup_ns "${ns_list[@]}"
++ return $ksft_skip
++ fi
++ ip -n "$ns" link set lo up
++ ! $ns_exist && ns_list+=("$ns")
++ done
++ NS_LIST+=("${ns_list[@]}")
++}
+diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
+index e317c2e44dae84..4f80014cae4940 100644
+--- a/tools/testing/selftests/net/mptcp/config
++++ b/tools/testing/selftests/net/mptcp/config
+@@ -22,8 +22,11 @@ CONFIG_NFT_TPROXY=m
+ CONFIG_NFT_SOCKET=m
+ CONFIG_IP_ADVANCED_ROUTER=y
+ CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_MANGLE=m
+ CONFIG_IP_NF_TARGET_REJECT=m
+ CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_IP6_NF_FILTER=m
+ CONFIG_NET_ACT_CSUM=m
+ CONFIG_NET_ACT_PEDIT=m
+ CONFIG_NET_CLS_ACT=y
+diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
+index 85a8ee9395b39a..7f89623f1080e1 100755
+--- a/tools/testing/selftests/net/mptcp/diag.sh
++++ b/tools/testing/selftests/net/mptcp/diag.sh
+@@ -56,20 +56,20 @@ __chk_nr()
+ local command="$1"
+ local expected=$2
+ local msg="$3"
+- local skip="${4:-SKIP}"
++ local skip="${4-SKIP}"
+ local nr
+
+ nr=$(eval $command)
+
+ printf "%-50s" "$msg"
+- if [ $nr != $expected ]; then
+- if [ $nr = "$skip" ] && ! mptcp_lib_expect_all_features; then
++ if [ "$nr" != "$expected" ]; then
++ if [ "$nr" = "$skip" ] && ! mptcp_lib_expect_all_features; then
+ echo "[ skip ] Feature probably not supported"
+ mptcp_lib_result_skip "${msg}"
+ else
+ echo "[ fail ] expected $expected found $nr"
+ mptcp_lib_result_fail "${msg}"
+- ret=$test_cnt
++ ret=${KSFT_FAIL}
+ fi
+ else
+ echo "[ ok ]"
+@@ -115,11 +115,11 @@ wait_msk_nr()
+ if [ $i -ge $timeout ]; then
+ echo "[ fail ] timeout while expecting $expected max $max last $nr"
+ mptcp_lib_result_fail "${msg} # timeout"
+- ret=$test_cnt
++ ret=${KSFT_FAIL}
+ elif [ $nr != $expected ]; then
+ echo "[ fail ] expected $expected found $nr"
+ mptcp_lib_result_fail "${msg} # unexpected result"
+- ret=$test_cnt
++ ret=${KSFT_FAIL}
+ else
+ echo "[ ok ]"
+ mptcp_lib_result_pass "${msg}"
+@@ -166,9 +166,13 @@ chk_msk_listen()
+ chk_msk_inuse()
+ {
+ local expected=$1
+- local msg="$2"
++ local msg="....chk ${2:-${expected}} msk in use"
+ local listen_nr
+
++ if [ "${expected}" -eq 0 ]; then
++ msg+=" after flush"
++ fi
++
+ listen_nr=$(ss -N "${ns}" -Ml | grep -c LISTEN)
+ expected=$((expected + listen_nr))
+
+@@ -179,7 +183,7 @@ chk_msk_inuse()
+ sleep 0.1
+ done
+
+- __chk_nr get_msk_inuse $expected "$msg" 0
++ __chk_nr get_msk_inuse $expected "${msg}" 0
+ }
+
+ # $1: ns, $2: port
+@@ -199,6 +203,20 @@ wait_local_port_listen()
+ done
+ }
+
++# $1: cestab nr
++chk_msk_cestab()
++{
++ local expected=$1
++ local msg="....chk ${2:-${expected}} cestab"
++
++ if [ "${expected}" -eq 0 ]; then
++ msg+=" after flush"
++ fi
++
++ __chk_nr "mptcp_lib_get_counter ${ns} MPTcpExtMPCurrEstab" \
++ "${expected}" "${msg}" ""
++}
++
+ wait_connected()
+ {
+ local listener_ns="${1}"
+@@ -235,10 +253,12 @@ wait_connected $ns 10000
+ chk_msk_nr 2 "after MPC handshake "
+ chk_msk_remote_key_nr 2 "....chk remote_key"
+ chk_msk_fallback_nr 0 "....chk no fallback"
+-chk_msk_inuse 2 "....chk 2 msk in use"
++chk_msk_inuse 2
++chk_msk_cestab 2
+ flush_pids
+
+-chk_msk_inuse 0 "....chk 0 msk in use after flush"
++chk_msk_inuse 0 "2->0"
++chk_msk_cestab 0 "2->0"
+
+ echo "a" | \
+ timeout ${timeout_test} \
+@@ -253,10 +273,12 @@ echo "b" | \
+ 127.0.0.1 >/dev/null &
+ wait_connected $ns 10001
+ chk_msk_fallback_nr 1 "check fallback"
+-chk_msk_inuse 1 "....chk 1 msk in use"
++chk_msk_inuse 1
++chk_msk_cestab 1
+ flush_pids
+
+-chk_msk_inuse 0 "....chk 0 msk in use after flush"
++chk_msk_inuse 0 "1->0"
++chk_msk_cestab 0 "1->0"
+
+ NR_CLIENTS=100
+ for I in `seq 1 $NR_CLIENTS`; do
+@@ -277,10 +299,12 @@ for I in `seq 1 $NR_CLIENTS`; do
+ done
+
+ wait_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
+-chk_msk_inuse $((NR_CLIENTS*2)) "....chk many msk in use"
++chk_msk_inuse $((NR_CLIENTS*2)) "many"
++chk_msk_cestab $((NR_CLIENTS*2)) "many"
+ flush_pids
+
+-chk_msk_inuse 0 "....chk 0 msk in use after flush"
++chk_msk_inuse 0 "many->0"
++chk_msk_cestab 0 "many->0"
+
+ mptcp_lib_result_print_all_tap
+ exit $ret
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index c7f9ebeebc2c5b..4209b95690394b 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -18,6 +18,7 @@
+
+ #include <sys/ioctl.h>
+ #include <sys/poll.h>
++#include <sys/random.h>
+ #include <sys/sendfile.h>
+ #include <sys/stat.h>
+ #include <sys/socket.h>
+@@ -1114,26 +1115,22 @@ int main_loop_s(int listensock)
+ return 1;
+ }
+
+- if (--cfg_repeat > 0) {
+- if (cfg_input)
+- close(fd);
++ if (cfg_input)
++ close(fd);
++
++ if (--cfg_repeat > 0)
+ goto again;
+- }
+
+ return 0;
+ }
+
+ static void init_rng(void)
+ {
+- int fd = open("/dev/urandom", O_RDONLY);
+ unsigned int foo;
+
+- if (fd > 0) {
+- int ret = read(fd, &foo, sizeof(foo));
+-
+- if (ret < 0)
+- srand(fd + foo);
+- close(fd);
++ if (getrandom(&foo, sizeof(foo), 0) == -1) {
++ perror("getrandom");
++ exit(1);
+ }
+
+ srand(foo);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index b1fc8afd072dc6..d203d314b7b265 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -1,6 +1,11 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
++# Double quotes to prevent globbing and word splitting is recommended in new
++# code but we accept it, especially because there were too many before having
++# address all other issues detected by shellcheck.
++#shellcheck disable=SC2086
++
+ . "$(dirname "${0}")/mptcp_lib.sh"
+
+ time_start=$(date +%s)
+@@ -13,7 +18,6 @@ sout=""
+ cin_disconnect=""
+ cin=""
+ cout=""
+-ksft_skip=4
+ capture=false
+ timeout_poll=30
+ timeout_test=$((timeout_poll * 2 + 1))
+@@ -131,6 +135,8 @@ ns4="ns4-$rndh"
+ TEST_COUNT=0
+ TEST_GROUP=""
+
++# This function is used in the cleanup trap
++#shellcheck disable=SC2317
+ cleanup()
+ {
+ rm -f "$cin_disconnect" "$cout_disconnect"
+@@ -225,8 +231,9 @@ set_ethtool_flags() {
+ local dev="$2"
+ local flags="$3"
+
+- ip netns exec $ns ethtool -K $dev $flags 2>/dev/null
+- [ $? -eq 0 ] && echo "INFO: set $ns dev $dev: ethtool -K $flags"
++ if ip netns exec $ns ethtool -K $dev $flags 2>/dev/null; then
++ echo "INFO: set $ns dev $dev: ethtool -K $flags"
++ fi
+ }
+
+ set_random_ethtool_flags() {
+@@ -310,12 +317,6 @@ check_mptcp_disabled()
+ return 0
+ }
+
+-# $1: IP address
+-is_v6()
+-{
+- [ -z "${1##*:*}" ]
+-}
+-
+ do_ping()
+ {
+ local listener_ns="$1"
+@@ -324,7 +325,7 @@ do_ping()
+ local ping_args="-q -c 1"
+ local rc=0
+
+- if is_v6 "${connect_addr}"; then
++ if mptcp_lib_is_v6 "${connect_addr}"; then
+ $ipv6 || return 0
+ ping_args="${ping_args} -6"
+ fi
+@@ -341,21 +342,6 @@ do_ping()
+ return 0
+ }
+
+-# $1: ns, $2: MIB counter
+-get_mib_counter()
+-{
+- local listener_ns="${1}"
+- local mib="${2}"
+-
+- # strip the header
+- ip netns exec "${listener_ns}" \
+- nstat -z -a "${mib}" | \
+- tail -n+2 | \
+- while read a count c rest; do
+- echo $count
+- done
+-}
+-
+ # $1: ns, $2: port
+ wait_local_port_listen()
+ {
+@@ -384,19 +370,19 @@ do_transfer()
+ local extra_args="$7"
+
+ local port
+- port=$((10000+$TEST_COUNT))
++ port=$((10000+TEST_COUNT))
+ TEST_COUNT=$((TEST_COUNT+1))
+
+ if [ "$rcvbuf" -gt 0 ]; then
+- extra_args="$extra_args -R $rcvbuf"
++ extra_args+=" -R $rcvbuf"
+ fi
+
+ if [ "$sndbuf" -gt 0 ]; then
+- extra_args="$extra_args -S $sndbuf"
++ extra_args+=" -S $sndbuf"
+ fi
+
+ if [ -n "$testmode" ]; then
+- extra_args="$extra_args -m $testmode"
++ extra_args+=" -m $testmode"
+ fi
+
+ if [ -n "$extra_args" ] && $options_log; then
+@@ -441,12 +427,20 @@ do_transfer()
+ nstat -n
+ fi
+
+- local stat_synrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+- local stat_ackrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+- local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
+- local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+- local stat_csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+- local stat_csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
++ local stat_synrx_last_l
++ local stat_ackrx_last_l
++ local stat_cookietx_last
++ local stat_cookierx_last
++ local stat_csum_err_s
++ local stat_csum_err_c
++ local stat_tcpfb_last_l
++ stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
++ stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
++ stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
++ stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
++ stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
++ stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
++ stat_tcpfb_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
+
+ timeout ${timeout_test} \
+ ip netns exec ${listener_ns} \
+@@ -509,11 +503,19 @@ do_transfer()
+ check_transfer $cin $sout "file received by server"
+ rets=$?
+
+- local stat_synrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+- local stat_ackrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+- local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
+- local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+- local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
++ local extra=""
++ local stat_synrx_now_l
++ local stat_ackrx_now_l
++ local stat_cookietx_now
++ local stat_cookierx_now
++ local stat_ooo_now
++ local stat_tcpfb_now_l
++ stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
++ stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
++ stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
++ stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
++ stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
++ stat_tcpfb_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK")
+
+ expect_synrx=$((stat_synrx_last_l))
+ expect_ackrx=$((stat_ackrx_last_l))
+@@ -522,8 +524,8 @@ do_transfer()
+ cookies=${cookies##*=}
+
+ if [ ${cl_proto} = "MPTCP" ] && [ ${srv_proto} = "MPTCP" ]; then
+- expect_synrx=$((stat_synrx_last_l+$connect_per_transfer))
+- expect_ackrx=$((stat_ackrx_last_l+$connect_per_transfer))
++ expect_synrx=$((stat_synrx_last_l+connect_per_transfer))
++ expect_ackrx=$((stat_ackrx_last_l+connect_per_transfer))
+ fi
+
+ if [ ${stat_synrx_now_l} -lt ${expect_synrx} ]; then
+@@ -531,66 +533,75 @@ do_transfer()
+ "${stat_synrx_now_l}" "${expect_synrx}" 1>&2
+ retc=1
+ fi
+- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then
++ if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ] && [ ${stat_ooo_now} -eq 0 ]; then
+ if [ ${stat_ooo_now} -eq 0 ]; then
+ printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
+ "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
+ rets=1
+ else
+- printf "[ Note ] fallback due to TCP OoO"
++ extra+=" [ Note ] fallback due to TCP OoO"
+ fi
+ fi
+
+ if $checksum; then
+- local csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+- local csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
++ local csum_err_s
++ local csum_err_c
++ csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
++ csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+
+ local csum_err_s_nr=$((csum_err_s - stat_csum_err_s))
+ if [ $csum_err_s_nr -gt 0 ]; then
+- printf "[ FAIL ]\nserver got $csum_err_s_nr data checksum error[s]"
++ printf "[ FAIL ]\nserver got %d data checksum error[s]" ${csum_err_s_nr}
+ rets=1
+ fi
+
+ local csum_err_c_nr=$((csum_err_c - stat_csum_err_c))
+ if [ $csum_err_c_nr -gt 0 ]; then
+- printf "[ FAIL ]\nclient got $csum_err_c_nr data checksum error[s]"
++ printf "[ FAIL ]\nclient got %d data checksum error[s]" ${csum_err_c_nr}
+ retc=1
+ fi
+ fi
+
+- if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
+- printf "[ OK ]"
+- mptcp_lib_result_pass "${TEST_GROUP}: ${result_msg}"
+- else
+- mptcp_lib_result_fail "${TEST_GROUP}: ${result_msg}"
++ if [ ${stat_ooo_now} -eq 0 ] && [ ${stat_tcpfb_last_l} -ne ${stat_tcpfb_now_l} ]; then
++ mptcp_lib_pr_fail "unexpected fallback to TCP"
++ rets=1
+ fi
+
+ if [ $cookies -eq 2 ];then
+ if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then
+- printf " WARN: CookieSent: did not advance"
++ extra+=" WARN: CookieSent: did not advance"
+ fi
+ if [ $stat_cookierx_last -ge $stat_cookierx_now ] ;then
+- printf " WARN: CookieRecv: did not advance"
++ extra+=" WARN: CookieRecv: did not advance"
+ fi
+ else
+ if [ $stat_cookietx_last -ne $stat_cookietx_now ] ;then
+- printf " WARN: CookieSent: changed"
++ extra+=" WARN: CookieSent: changed"
+ fi
+ if [ $stat_cookierx_last -ne $stat_cookierx_now ] ;then
+- printf " WARN: CookieRecv: changed"
++ extra+=" WARN: CookieRecv: changed"
+ fi
+ fi
+
+ if [ ${stat_synrx_now_l} -gt ${expect_synrx} ]; then
+- printf " WARN: SYNRX: expect %d, got %d (probably retransmissions)" \
+- "${expect_synrx}" "${stat_synrx_now_l}"
++ extra+=" WARN: SYNRX: expect ${expect_synrx},"
++ extra+=" got ${stat_synrx_now_l} (probably retransmissions)"
+ fi
+ if [ ${stat_ackrx_now_l} -gt ${expect_ackrx} ]; then
+- printf " WARN: ACKRX: expect %d, got %d (probably retransmissions)" \
+- "${expect_ackrx}" "${stat_ackrx_now_l}"
++ extra+=" WARN: ACKRX: expect ${expect_ackrx},"
++ extra+=" got ${stat_ackrx_now_l} (probably retransmissions)"
++ fi
++
++ if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
++ printf "[ OK ]%s\n" "${extra}"
++ mptcp_lib_result_pass "${TEST_GROUP}: ${result_msg}"
++ else
++ if [ -n "${extra}" ]; then
++ printf "%s\n" "${extra:1}"
++ fi
++ mptcp_lib_result_fail "${TEST_GROUP}: ${result_msg}"
+ fi
+
+- echo
+ cat "$capout"
+ [ $retc -eq 0 ] && [ $rets -eq 0 ]
+ }
+@@ -635,12 +646,12 @@ run_tests_lo()
+ fi
+
+ # skip if we don't want v6
+- if ! $ipv6 && is_v6 "${connect_addr}"; then
++ if ! $ipv6 && mptcp_lib_is_v6 "${connect_addr}"; then
+ return 0
+ fi
+
+ local local_addr
+- if is_v6 "${connect_addr}"; then
++ if mptcp_lib_is_v6 "${connect_addr}"; then
+ local_addr="::"
+ else
+ local_addr="0.0.0.0"
+@@ -708,7 +719,7 @@ run_test_transparent()
+ TEST_GROUP="${msg}"
+
+ # skip if we don't want v6
+- if ! $ipv6 && is_v6 "${connect_addr}"; then
++ if ! $ipv6 && mptcp_lib_is_v6 "${connect_addr}"; then
+ return 0
+ fi
+
+@@ -722,7 +733,7 @@ run_test_transparent()
+ return
+ fi
+
+-ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF"
++ if ! ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF"
+ flush ruleset
+ table inet mangle {
+ chain divert {
+@@ -733,7 +744,7 @@ table inet mangle {
+ }
+ }
+ EOF
+- if [ $? -ne 0 ]; then
++ then
+ echo "SKIP: $msg, could not load nft ruleset"
+ mptcp_lib_fail_if_expected_feature "nft rules"
+ mptcp_lib_result_skip "${TEST_GROUP}"
+@@ -741,15 +752,14 @@ EOF
+ fi
+
+ local local_addr
+- if is_v6 "${connect_addr}"; then
++ if mptcp_lib_is_v6 "${connect_addr}"; then
+ local_addr="::"
+ r6flag="-6"
+ else
+ local_addr="0.0.0.0"
+ fi
+
+- ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100
+- if [ $? -ne 0 ]; then
++ if ! ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100; then
+ ip netns exec "$listener_ns" nft flush ruleset
+ echo "SKIP: $msg, ip $r6flag rule failed"
+ mptcp_lib_fail_if_expected_feature "ip rule"
+@@ -757,8 +767,7 @@ EOF
+ return
+ fi
+
+- ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100
+- if [ $? -ne 0 ]; then
++ if ! ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100; then
+ ip netns exec "$listener_ns" nft flush ruleset
+ ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100
+ echo "SKIP: $msg, ip route add local $local_addr failed"
+@@ -918,10 +927,10 @@ mptcp_lib_result_code "${ret}" "ping tests"
+ stop_if_error "Could not even run ping tests"
+
+ [ -n "$tc_loss" ] && tc -net "$ns2" qdisc add dev ns2eth3 root netem loss random $tc_loss delay ${tc_delay}ms
+-echo -n "INFO: Using loss of $tc_loss "
+-test "$tc_delay" -gt 0 && echo -n "delay $tc_delay ms "
++tc_info="loss of $tc_loss "
++test "$tc_delay" -gt 0 && tc_info+="delay $tc_delay ms "
+
+-reorder_delay=$(($tc_delay / 4))
++reorder_delay=$((tc_delay / 4))
+
+ if [ -z "${tc_reorder}" ]; then
+ reorder1=$((RANDOM%10))
+@@ -930,17 +939,17 @@ if [ -z "${tc_reorder}" ]; then
+
+ if [ $reorder_delay -gt 0 ] && [ $reorder1 -lt 100 ] && [ $reorder2 -gt 0 ]; then
+ tc_reorder="reorder ${reorder1}% ${reorder2}%"
+- echo -n "$tc_reorder with delay ${reorder_delay}ms "
++ tc_info+="$tc_reorder with delay ${reorder_delay}ms "
+ fi
+ elif [ "$tc_reorder" = "0" ];then
+ tc_reorder=""
+ elif [ "$reorder_delay" -gt 0 ];then
+ # reordering requires some delay
+ tc_reorder="reorder $tc_reorder"
+- echo -n "$tc_reorder with delay ${reorder_delay}ms "
++ tc_info+="$tc_reorder with delay ${reorder_delay}ms "
+ fi
+
+-echo "on ns3eth4"
++echo "INFO: Using ${tc_info}on ns3eth4"
+
+ tc -net "$ns3" qdisc add dev ns3eth4 root netem delay ${reorder_delay}ms $tc_reorder
+
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+index 8672d898f8cdad..218aac46732125 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+@@ -18,6 +18,7 @@
+ #include <time.h>
+
+ #include <sys/ioctl.h>
++#include <sys/random.h>
+ #include <sys/socket.h>
+ #include <sys/types.h>
+ #include <sys/wait.h>
+@@ -519,15 +520,11 @@ static int client(int unixfd)
+
+ static void init_rng(void)
+ {
+- int fd = open("/dev/urandom", O_RDONLY);
+ unsigned int foo;
+
+- if (fd > 0) {
+- int ret = read(fd, &foo, sizeof(foo));
+-
+- if (ret < 0)
+- srand(fd + foo);
+- close(fd);
++ if (getrandom(&foo, sizeof(foo), 0) == -1) {
++ perror("getrandom");
++ exit(1);
+ }
+
+ srand(foo);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index dc895b7b94e19d..17ace5627ce365 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -21,7 +21,9 @@ cinfail=""
+ cinsent=""
+ tmpfile=""
+ cout=""
++err=""
+ capout=""
++cappid=""
+ ns1=""
+ ns2=""
+ ksft_skip=4
+@@ -29,11 +31,11 @@ iptables="iptables"
+ ip6tables="ip6tables"
+ timeout_poll=30
+ timeout_test=$((timeout_poll * 2 + 1))
+-capture=0
+-checksum=0
++capture=false
++checksum=false
+ ip_mptcp=0
+ check_invert=0
+-validate_checksum=0
++validate_checksum=false
+ init=0
+ evts_ns1=""
+ evts_ns2=""
+@@ -98,7 +100,7 @@ init_partial()
+ ip netns exec $netns sysctl -q net.mptcp.pm_type=0 2>/dev/null || true
+ ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
+ ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
+- if [ $checksum -eq 1 ]; then
++ if $checksum; then
+ ip netns exec $netns sysctl -q net.mptcp.checksum_enabled=1
+ fi
+ done
+@@ -133,8 +135,8 @@ init_shapers()
+ {
+ local i
+ for i in $(seq 1 4); do
+- tc -n $ns1 qdisc add dev ns1eth$i root netem rate 20mbit delay 1
+- tc -n $ns2 qdisc add dev ns2eth$i root netem rate 20mbit delay 1
++ tc -n $ns1 qdisc add dev ns1eth$i root netem rate 20mbit delay 1ms
++ tc -n $ns2 qdisc add dev ns2eth$i root netem rate 20mbit delay 1ms
+ done
+ }
+
+@@ -159,6 +161,11 @@ check_tools()
+ exit $ksft_skip
+ fi
+
++ if ! ss -h | grep -q MPTCP; then
++ echo "SKIP: ss tool does not support MPTCP"
++ exit $ksft_skip
++ fi
++
+ # Use the legacy version if available to support old kernel versions
+ if iptables-legacy -V &> /dev/null; then
+ iptables="iptables-legacy"
+@@ -182,6 +189,7 @@ init() {
+ cin=$(mktemp)
+ cinsent=$(mktemp)
+ cout=$(mktemp)
++ err=$(mktemp)
+ evts_ns1=$(mktemp)
+ evts_ns2=$(mktemp)
+
+@@ -197,6 +205,7 @@ cleanup()
+ rm -f "$sin" "$sout" "$cinsent" "$cinfail"
+ rm -f "$tmpfile"
+ rm -rf $evts_ns1 $evts_ns2
++ rm -f "$err"
+ cleanup_partial
+ }
+
+@@ -378,7 +387,7 @@ reset_with_checksum()
+ ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=$ns1_enable
+ ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=$ns2_enable
+
+- validate_checksum=1
++ validate_checksum=true
+ }
+
+ reset_with_allow_join_id0()
+@@ -411,7 +420,7 @@ reset_with_allow_join_id0()
+ setup_fail_rules()
+ {
+ check_invert=1
+- validate_checksum=1
++ validate_checksum=true
+ local i="$1"
+ local ip="${2:-4}"
+ local tables
+@@ -456,16 +465,17 @@ reset_with_fail()
+ fi
+ }
+
++start_events()
++{
++ mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid
++ mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid
++}
++
+ reset_with_events()
+ {
+ reset "${1}" || return 1
+
+- :> "$evts_ns1"
+- :> "$evts_ns2"
+- ip netns exec $ns1 ./pm_nl_ctl events >> "$evts_ns1" 2>&1 &
+- evts_ns1_pid=$!
+- ip netns exec $ns2 ./pm_nl_ctl events >> "$evts_ns2" 2>&1 &
+- evts_ns2_pid=$!
++ start_events
+ }
+
+ reset_with_tcp_filter()
+@@ -476,9 +486,10 @@ reset_with_tcp_filter()
+ local ns="${!1}"
+ local src="${2}"
+ local target="${3}"
++ local chain="${4:-INPUT}"
+
+ if ! ip netns exec "${ns}" ${iptables} \
+- -A INPUT \
++ -A "${chain}" \
+ -s "${src}" \
+ -p tcp \
+ -j "${target}"; then
+@@ -587,12 +598,6 @@ link_failure()
+ done
+ }
+
+-# $1: IP address
+-is_v6()
+-{
+- [ -z "${1##*:*}" ]
+-}
+-
+ # $1: ns, $2: port
+ wait_local_port_listen()
+ {
+@@ -611,25 +616,9 @@ wait_local_port_listen()
+ done
+ }
+
+-# $1: ns ; $2: counter
+-get_counter()
+-{
+- local ns="${1}"
+- local counter="${2}"
+- local count
+-
+- count=$(ip netns exec ${ns} nstat -asz "${counter}" | awk 'NR==1 {next} {print $2}')
+- if [ -z "${count}" ]; then
+- mptcp_lib_fail_if_expected_feature "${counter} counter"
+- return 1
+- fi
+-
+- echo "${count}"
+-}
+-
+ rm_addr_count()
+ {
+- get_counter "${1}" "MPTcpExtRmAddr"
++ mptcp_lib_get_counter "${1}" "MPTcpExtRmAddr"
+ }
+
+ # $1: ns, $2: old rm_addr counter in $ns
+@@ -649,7 +638,7 @@ wait_rm_addr()
+
+ rm_sf_count()
+ {
+- get_counter "${1}" "MPTcpExtRmSubflow"
++ mptcp_lib_get_counter "${1}" "MPTcpExtRmSubflow"
+ }
+
+ # $1: ns, $2: old rm_sf counter in $ns
+@@ -672,33 +661,22 @@ wait_mpj()
+ local ns="${1}"
+ local cnt old_cnt
+
+- old_cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
++ old_cnt=$(mptcp_lib_get_counter ${ns} "MPTcpExtMPJoinAckRx")
+
+ local i
+ for i in $(seq 10); do
+- cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
++ cnt=$(mptcp_lib_get_counter ${ns} "MPTcpExtMPJoinAckRx")
+ [ "$cnt" = "${old_cnt}" ] || break
+ sleep 0.1
+ done
+ }
+
+-kill_wait()
+-{
+- kill $1 > /dev/null 2>&1
+- wait $1 2>/dev/null
+-}
+-
+ kill_events_pids()
+ {
+- kill_wait $evts_ns1_pid
+- kill_wait $evts_ns2_pid
+-}
+-
+-kill_tests_wait()
+-{
+- #shellcheck disable=SC2046
+- kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
+- wait
++ mptcp_lib_kill_wait $evts_ns1_pid
++ evts_ns1_pid=0
++ mptcp_lib_kill_wait $evts_ns2_pid
++ evts_ns2_pid=0
+ }
+
+ pm_nl_set_limits()
+@@ -826,7 +804,7 @@ pm_nl_check_endpoint()
+ [ -n "$_flags" ]; flags="flags $_flags"
+ shift
+ elif [ $1 = "dev" ]; then
+- [ -n "$2" ]; dev="dev $1"
++ [ -n "$2" ]; dev="dev $2"
+ shift
+ elif [ $1 = "id" ]; then
+ _id=$2
+@@ -842,7 +820,7 @@ pm_nl_check_endpoint()
+ done
+
+ if [ -z "$id" ]; then
+- test_fail "bad test - missing endpoint id"
++ fail_test "bad test - missing endpoint id"
+ return
+ fi
+
+@@ -852,18 +830,18 @@ pm_nl_check_endpoint()
+ line="${line% }"
+ # the dump order is: address id flags port dev
+ [ -n "$addr" ] && expected_line="$addr"
+- expected_line="$expected_line $id"
+- [ -n "$_flags" ] && expected_line="$expected_line ${_flags//","/" "}"
+- [ -n "$dev" ] && expected_line="$expected_line $dev"
+- [ -n "$port" ] && expected_line="$expected_line $port"
++ expected_line+=" $id"
++ [ -n "$_flags" ] && expected_line+=" ${_flags//","/" "}"
++ [ -n "$dev" ] && expected_line+=" $dev"
++ [ -n "$port" ] && expected_line+=" $port"
+ else
+ line=$(ip netns exec $ns ./pm_nl_ctl get $_id)
+ # the dump order is: id flags dev address port
+ expected_line="$id"
+- [ -n "$flags" ] && expected_line="$expected_line $flags"
+- [ -n "$dev" ] && expected_line="$expected_line $dev"
+- [ -n "$addr" ] && expected_line="$expected_line $addr"
+- [ -n "$_port" ] && expected_line="$expected_line $_port"
++ [ -n "$flags" ] && expected_line+=" $flags"
++ [ -n "$dev" ] && expected_line+=" $dev"
++ [ -n "$addr" ] && expected_line+=" $addr"
++ [ -n "$_port" ] && expected_line+=" $_port"
+ fi
+ if [ "$line" = "$expected_line" ]; then
+ print_ok
+@@ -901,7 +879,7 @@ pm_nl_set_endpoint()
+ local id=10
+ while [ $add_nr_ns1 -gt 0 ]; do
+ local addr
+- if is_v6 "${connect_addr}"; then
++ if mptcp_lib_is_v6 "${connect_addr}"; then
+ addr="dead:beef:$counter::1"
+ else
+ addr="10.0.$counter.1"
+@@ -953,7 +931,7 @@ pm_nl_set_endpoint()
+ local id=20
+ while [ $add_nr_ns2 -gt 0 ]; do
+ local addr
+- if is_v6 "${connect_addr}"; then
++ if mptcp_lib_is_v6 "${connect_addr}"; then
+ addr="dead:beef:$counter::2"
+ else
+ addr="10.0.$counter.2"
+@@ -995,7 +973,7 @@ pm_nl_set_endpoint()
+ pm_nl_flush_endpoint ${connector_ns}
+ elif [ $rm_nr_ns2 -eq 9 ]; then
+ local addr
+- if is_v6 "${connect_addr}"; then
++ if mptcp_lib_is_v6 "${connect_addr}"; then
+ addr="dead:beef:1::2"
+ else
+ addr="10.0.1.2"
+@@ -1029,40 +1007,62 @@ pm_nl_set_endpoint()
+ fi
+ }
+
+-do_transfer()
++cond_start_capture()
+ {
+- local listener_ns="$1"
+- local connector_ns="$2"
+- local cl_proto="$3"
+- local srv_proto="$4"
+- local connect_addr="$5"
+-
+- local port=$((10000 + TEST_COUNT - 1))
+- local cappid
+- local FAILING_LINKS=${FAILING_LINKS:-""}
+- local fastclose=${fastclose:-""}
+- local speed=${speed:-"fast"}
++ local ns="$1"
+
+- :> "$cout"
+- :> "$sout"
+ :> "$capout"
+
+- if [ $capture -eq 1 ]; then
+- local capuser
+- if [ -z $SUDO_USER ] ; then
++ if $capture; then
++ local capuser capfile
++ if [ -z $SUDO_USER ]; then
+ capuser=""
+ else
+ capuser="-Z $SUDO_USER"
+ fi
+
+- capfile=$(printf "mp_join-%02u-%s.pcap" "$TEST_COUNT" "${listener_ns}")
++ capfile=$(printf "mp_join-%02u-%s.pcap" "$TEST_COUNT" "$ns")
+
+ echo "Capturing traffic for test $TEST_COUNT into $capfile"
+- ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
++ ip netns exec "$ns" tcpdump -i any -s 65535 -B 32768 $capuser -w "$capfile" > "$capout" 2>&1 &
+ cappid=$!
+
+ sleep 1
+ fi
++}
++
++cond_stop_capture()
++{
++ if $capture; then
++ sleep 1
++ kill $cappid
++ cat "$capout"
++ fi
++}
++
++get_port()
++{
++ echo "$((10000 + TEST_COUNT - 1))"
++}
++
++do_transfer()
++{
++ local listener_ns="$1"
++ local connector_ns="$2"
++ local cl_proto="$3"
++ local srv_proto="$4"
++ local connect_addr="$5"
++ local port
++
++ local FAILING_LINKS=${FAILING_LINKS:-""}
++ local fastclose=${fastclose:-""}
++ local speed=${speed:-"fast"}
++ port=$(get_port)
++
++ :> "$cout"
++ :> "$sout"
++
++ cond_start_capture ${listener_ns}
+
+ NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
+ nstat -n
+@@ -1148,10 +1148,7 @@ do_transfer()
+ wait $spid
+ local rets=$?
+
+- if [ $capture -eq 1 ]; then
+- sleep 1
+- kill $cappid
+- fi
++ cond_stop_capture
+
+ NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
+ nstat | grep Tcp > /tmp/${listener_ns}.out
+@@ -1167,7 +1164,6 @@ do_transfer()
+ ip netns exec ${connector_ns} ss -Menita 1>&2 -o "dport = :$port"
+ cat /tmp/${connector_ns}.out
+
+- cat "$capout"
+ return 1
+ fi
+
+@@ -1184,13 +1180,7 @@ do_transfer()
+ fi
+ rets=$?
+
+- if [ $retc -eq 0 ] && [ $rets -eq 0 ];then
+- cat "$capout"
+- return 0
+- fi
+-
+- cat "$capout"
+- return 1
++ [ $retc -eq 0 ] && [ $rets -eq 0 ]
+ }
+
+ make_file()
+@@ -1284,27 +1274,27 @@ chk_csum_nr()
+ fi
+
+ print_check "sum"
+- count=$(get_counter ${ns1} "MPTcpExtDataCsumErr")
+- if [ "$count" != "$csum_ns1" ]; then
+- extra_msg="$extra_msg ns1=$count"
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr")
++ if [ -n "$count" ] && [ "$count" != "$csum_ns1" ]; then
++ extra_msg+=" ns1=$count"
+ fi
+ if [ -z "$count" ]; then
+ print_skip
+ elif { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
+- { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
++ { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
+ fail_test "got $count data checksum error[s] expected $csum_ns1"
+ else
+ print_ok
+ fi
+ print_check "csum"
+- count=$(get_counter ${ns2} "MPTcpExtDataCsumErr")
+- if [ "$count" != "$csum_ns2" ]; then
+- extra_msg="$extra_msg ns2=$count"
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr")
++ if [ -n "$count" ] && [ "$count" != "$csum_ns2" ]; then
++ extra_msg+=" ns2=$count"
+ fi
+ if [ -z "$count" ]; then
+ print_skip
+ elif { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
+- { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
++ { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
+ fail_test "got $count data checksum error[s] expected $csum_ns2"
+ else
+ print_ok
+@@ -1341,28 +1331,28 @@ chk_fail_nr()
+ fi
+
+ print_check "ftx"
+- count=$(get_counter ${ns_tx} "MPTcpExtMPFailTx")
+- if [ "$count" != "$fail_tx" ]; then
+- extra_msg="$extra_msg,tx=$count"
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx")
++ if [ -n "$count" ] && [ "$count" != "$fail_tx" ]; then
++ extra_msg+=",tx=$count"
+ fi
+ if [ -z "$count" ]; then
+ print_skip
+ elif { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
+- { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
++ { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
+ fail_test "got $count MP_FAIL[s] TX expected $fail_tx"
+ else
+ print_ok
+ fi
+
+ print_check "failrx"
+- count=$(get_counter ${ns_rx} "MPTcpExtMPFailRx")
+- if [ "$count" != "$fail_rx" ]; then
+- extra_msg="$extra_msg,rx=$count"
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx")
++ if [ -n "$count" ] && [ "$count" != "$fail_rx" ]; then
++ extra_msg+=",rx=$count"
+ fi
+ if [ -z "$count" ]; then
+ print_skip
+ elif { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
+- { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
++ { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
+ fail_test "got $count MP_FAIL[s] RX expected $fail_rx"
+ else
+ print_ok
+@@ -1388,22 +1378,22 @@ chk_fclose_nr()
+ fi
+
+ print_check "ctx"
+- count=$(get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$fclose_tx" ]; then
+- extra_msg="$extra_msg,tx=$count"
++ extra_msg+=",tx=$count"
+ fail_test "got $count MP_FASTCLOSE[s] TX expected $fclose_tx"
+ else
+ print_ok
+ fi
+
+ print_check "fclzrx"
+- count=$(get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$fclose_rx" ]; then
+- extra_msg="$extra_msg,rx=$count"
++ extra_msg+=",rx=$count"
+ fail_test "got $count MP_FASTCLOSE[s] RX expected $fclose_rx"
+ else
+ print_ok
+@@ -1429,7 +1419,7 @@ chk_rst_nr()
+ fi
+
+ print_check "rtx"
+- count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPRstTx")
+ if [ -z "$count" ]; then
+ print_skip
+ # accept more rst than expected except if we don't expect any
+@@ -1441,7 +1431,7 @@ chk_rst_nr()
+ fi
+
+ print_check "rstrx"
+- count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPRstRx")
+ if [ -z "$count" ]; then
+ print_skip
+ # accept more rst than expected except if we don't expect any
+@@ -1462,7 +1452,7 @@ chk_infi_nr()
+ local count
+
+ print_check "itx"
+- count=$(get_counter ${ns2} "MPTcpExtInfiniteMapTx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtInfiniteMapTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$infi_tx" ]; then
+@@ -1472,7 +1462,7 @@ chk_infi_nr()
+ fi
+
+ print_check "infirx"
+- count=$(get_counter ${ns1} "MPTcpExtInfiniteMapRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtInfiniteMapRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$infi_rx" ]; then
+@@ -1501,7 +1491,7 @@ chk_join_nr()
+ fi
+
+ print_check "syn"
+- count=$(get_counter ${ns1} "MPTcpExtMPJoinSynRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$syn_nr" ]; then
+@@ -1512,7 +1502,7 @@ chk_join_nr()
+
+ print_check "synack"
+ with_cookie=$(ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies)
+- count=$(get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$syn_ack_nr" ]; then
+@@ -1529,7 +1519,7 @@ chk_join_nr()
+ fi
+
+ print_check "ack"
+- count=$(get_counter ${ns1} "MPTcpExtMPJoinAckRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$ack_nr" ]; then
+@@ -1537,7 +1527,7 @@ chk_join_nr()
+ else
+ print_ok
+ fi
+- if [ $validate_checksum -eq 1 ]; then
++ if $validate_checksum; then
+ chk_csum_nr $csum_ns1 $csum_ns2
+ chk_fail_nr $fail_nr $fail_nr
+ chk_rst_nr $rst_nr $rst_nr
+@@ -1562,8 +1552,8 @@ chk_stale_nr()
+
+ print_check "stale"
+
+- stale_nr=$(get_counter ${ns} "MPTcpExtSubflowStale")
+- recover_nr=$(get_counter ${ns} "MPTcpExtSubflowRecover")
++ stale_nr=$(mptcp_lib_get_counter ${ns} "MPTcpExtSubflowStale")
++ recover_nr=$(mptcp_lib_get_counter ${ns} "MPTcpExtSubflowRecover")
+ if [ -z "$stale_nr" ] || [ -z "$recover_nr" ]; then
+ print_skip
+ elif [ $stale_nr -lt $stale_min ] ||
+@@ -1589,18 +1579,28 @@ chk_add_nr()
+ local add_nr=$1
+ local echo_nr=$2
+ local port_nr=${3:-0}
+- local syn_nr=${4:-$port_nr}
+- local syn_ack_nr=${5:-$port_nr}
+- local ack_nr=${6:-$port_nr}
+- local mis_syn_nr=${7:-0}
+- local mis_ack_nr=${8:-0}
++ local ns_invert=${4:-""}
++ local syn_nr=$port_nr
++ local syn_ack_nr=$port_nr
++ local ack_nr=$port_nr
++ local mis_syn_nr=0
++ local mis_ack_nr=0
++ local ns_tx=$ns1
++ local ns_rx=$ns2
++ local extra_msg=""
+ local count
+ local timeout
+
+- timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
++ if [[ $ns_invert = "invert" ]]; then
++ ns_tx=$ns2
++ ns_rx=$ns1
++ extra_msg="invert"
++ fi
++
++ timeout=$(ip netns exec ${ns_tx} sysctl -n net.mptcp.add_addr_timeout)
+
+ print_check "add"
+- count=$(get_counter ${ns2} "MPTcpExtAddAddr")
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtAddAddr")
+ if [ -z "$count" ]; then
+ print_skip
+ # if the test configured a short timeout tolerate greater then expected
+@@ -1612,7 +1612,7 @@ chk_add_nr()
+ fi
+
+ print_check "echo"
+- count=$(get_counter ${ns1} "MPTcpExtEchoAdd")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtEchoAdd")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$echo_nr" ]; then
+@@ -1623,7 +1623,7 @@ chk_add_nr()
+
+ if [ $port_nr -gt 0 ]; then
+ print_check "pt"
+- count=$(get_counter ${ns2} "MPTcpExtPortAdd")
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtPortAdd")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$port_nr" ]; then
+@@ -1633,7 +1633,7 @@ chk_add_nr()
+ fi
+
+ print_check "syn"
+- count=$(get_counter ${ns1} "MPTcpExtMPJoinPortSynRx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPJoinPortSynRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$syn_nr" ]; then
+@@ -1644,7 +1644,7 @@ chk_add_nr()
+ fi
+
+ print_check "synack"
+- count=$(get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx")
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPJoinPortSynAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$syn_ack_nr" ]; then
+@@ -1655,7 +1655,7 @@ chk_add_nr()
+ fi
+
+ print_check "ack"
+- count=$(get_counter ${ns1} "MPTcpExtMPJoinPortAckRx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPJoinPortAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$ack_nr" ]; then
+@@ -1666,7 +1666,7 @@ chk_add_nr()
+ fi
+
+ print_check "syn"
+- count=$(get_counter ${ns1} "MPTcpExtMismatchPortSynRx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMismatchPortSynRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mis_syn_nr" ]; then
+@@ -1677,7 +1677,7 @@ chk_add_nr()
+ fi
+
+ print_check "ack"
+- count=$(get_counter ${ns1} "MPTcpExtMismatchPortAckRx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMismatchPortAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mis_ack_nr" ]; then
+@@ -1687,6 +1687,8 @@ chk_add_nr()
+ print_ok
+ fi
+ fi
++
++ print_info "$extra_msg"
+ }
+
+ chk_add_tx_nr()
+@@ -1699,7 +1701,7 @@ chk_add_tx_nr()
+ timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
+
+ print_check "add TX"
+- count=$(get_counter ${ns1} "MPTcpExtAddAddrTx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtAddAddrTx")
+ if [ -z "$count" ]; then
+ print_skip
+ # if the test configured a short timeout tolerate greater then expected
+@@ -1711,7 +1713,7 @@ chk_add_tx_nr()
+ fi
+
+ print_check "echo TX"
+- count=$(get_counter ${ns2} "MPTcpExtEchoAddTx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtEchoAddTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$echo_tx_nr" ]; then
+@@ -1749,7 +1751,7 @@ chk_rm_nr()
+ fi
+
+ print_check "rm"
+- count=$(get_counter ${addr_ns} "MPTcpExtRmAddr")
++ count=$(mptcp_lib_get_counter ${addr_ns} "MPTcpExtRmAddr")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$rm_addr_nr" ]; then
+@@ -1759,18 +1761,21 @@ chk_rm_nr()
+ fi
+
+ print_check "rmsf"
+- count=$(get_counter ${subflow_ns} "MPTcpExtRmSubflow")
++ count=$(mptcp_lib_get_counter ${subflow_ns} "MPTcpExtRmSubflow")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ -n "$simult" ]; then
+ local cnt suffix
+
+- cnt=$(get_counter ${addr_ns} "MPTcpExtRmSubflow")
++ cnt=$(mptcp_lib_get_counter ${addr_ns} "MPTcpExtRmSubflow")
+
+ # in case of simult flush, the subflow removal count on each side is
+ # unreliable
+ count=$((count + cnt))
+- [ "$count" != "$rm_subflow_nr" ] && suffix="$count in [$rm_subflow_nr:$((rm_subflow_nr*2))]"
++ if [ "$count" != "$rm_subflow_nr" ]; then
++ suffix="$count in [$rm_subflow_nr:$((rm_subflow_nr*2))]"
++ extra_msg+=" simult"
++ fi
+ if [ $count -ge "$rm_subflow_nr" ] && \
+ [ "$count" -le "$((rm_subflow_nr *2 ))" ]; then
+ print_ok "$suffix"
+@@ -1791,7 +1796,7 @@ chk_rm_tx_nr()
+ local rm_addr_tx_nr=$1
+
+ print_check "rm TX"
+- count=$(get_counter ${ns2} "MPTcpExtRmAddrTx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtRmAddrTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$rm_addr_tx_nr" ]; then
+@@ -1805,10 +1810,12 @@ chk_prio_nr()
+ {
+ local mp_prio_nr_tx=$1
+ local mp_prio_nr_rx=$2
++ local mpj_syn=$3
++ local mpj_syn_ack=$4
+ local count
+
+ print_check "ptx"
+- count=$(get_counter ${ns1} "MPTcpExtMPPrioTx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPPrioTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mp_prio_nr_tx" ]; then
+@@ -1818,7 +1825,7 @@ chk_prio_nr()
+ fi
+
+ print_check "prx"
+- count=$(get_counter ${ns1} "MPTcpExtMPPrioRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPPrioRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mp_prio_nr_rx" ]; then
+@@ -1826,6 +1833,26 @@ chk_prio_nr()
+ else
+ print_ok
+ fi
++
++ print_check "syn backup"
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynBackupRx")
++ if [ -z "$count" ]; then
++ print_skip
++ elif [ "$count" != "$mpj_syn" ]; then
++ fail_test "got $count JOIN[s] syn with Backup expected $mpj_syn"
++ else
++ print_ok
++ fi
++
++ print_check "synack backup"
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckBackupRx")
++ if [ -z "$count" ]; then
++ print_skip
++ elif [ "$count" != "$mpj_syn_ack" ]; then
++ fail_test "got $count JOIN[s] synack with Backup expected $mpj_syn_ack"
++ else
++ print_ok
++ fi
+ }
+
+ chk_subflow_nr()
+@@ -1864,12 +1891,10 @@ chk_mptcp_info()
+ local cnt2
+ local dump_stats
+
+- print_check "mptcp_info ${info1:0:8}=$exp1:$exp2"
++ print_check "mptcp_info ${info1:0:15}=$exp1:$exp2"
+
+- cnt1=$(ss -N $ns1 -inmHM | grep "$info1:" |
+- sed -n 's/.*\('"$info1"':\)\([[:digit:]]*\).*$/\2/p;q')
+- cnt2=$(ss -N $ns2 -inmHM | grep "$info2:" |
+- sed -n 's/.*\('"$info2"':\)\([[:digit:]]*\).*$/\2/p;q')
++ cnt1=$(ss -N $ns1 -inmHM | mptcp_lib_get_info_value "$info1" "$info1")
++ cnt2=$(ss -N $ns2 -inmHM | mptcp_lib_get_info_value "$info2" "$info2")
+ # 'ss' only display active connections and counters that are not 0.
+ [ -z "$cnt1" ] && cnt1=0
+ [ -z "$cnt2" ] && cnt2=0
+@@ -1887,6 +1912,42 @@ chk_mptcp_info()
+ fi
+ }
+
++# $1: subflows in ns1 ; $2: subflows in ns2
++# number of all subflows, including the initial subflow.
++chk_subflows_total()
++{
++ local cnt1
++ local cnt2
++ local info="subflows_total"
++ local dump_stats
++
++ # if subflows_total counter is supported, use it:
++ if [ -n "$(ss -N $ns1 -inmHM | mptcp_lib_get_info_value $info $info)" ]; then
++ chk_mptcp_info $info $1 $info $2
++ return
++ fi
++
++ print_check "$info $1:$2"
++
++ # if not, count the TCP connections that are in fact MPTCP subflows
++ cnt1=$(ss -N $ns1 -ti state established state syn-sent state syn-recv |
++ grep -c tcp-ulp-mptcp)
++ cnt2=$(ss -N $ns2 -ti state established state syn-sent state syn-recv |
++ grep -c tcp-ulp-mptcp)
++
++ if [ "$1" != "$cnt1" ] || [ "$2" != "$cnt2" ]; then
++ fail_test "got subflows $cnt1:$cnt2 expected $1:$2"
++ dump_stats=1
++ else
++ print_ok
++ fi
++
++ if [ "$dump_stats" = 1 ]; then
++ ss -N $ns1 -ti
++ ss -N $ns2 -ti
++ fi
++}
++
+ chk_link_usage()
+ {
+ local ns=$1
+@@ -1918,7 +1979,7 @@ wait_attempt_fail()
+ while [ $time -lt $timeout_ms ]; do
+ local cnt
+
+- cnt=$(get_counter ${ns} "TcpAttemptFails")
++ cnt=$(mptcp_lib_get_counter ${ns} "TcpAttemptFails")
+
+ [ "$cnt" = 1 ] && return 1
+ time=$((time + 100))
+@@ -2092,6 +2153,21 @@ signal_address_tests()
+ chk_add_nr 1 1
+ fi
+
++ # uncommon: subflow and signal flags on the same endpoint
++ # or because the user wrongly picked both, but still expects the client
++ # to create additional subflows
++ if reset "subflow and signal together"; then
++ pm_nl_set_limits $ns1 0 2
++ pm_nl_set_limits $ns2 0 2
++ pm_nl_add_endpoint $ns2 10.0.3.2 flags signal,subflow
++ run_tests $ns1 $ns2 10.0.1.1
++ chk_join_nr 1 1 1
++ chk_add_nr 1 1 0 invert # only initiated by ns2
++ chk_add_nr 0 0 0 # none initiated by ns1
++ chk_rst_nr 0 0 invert # no RST sent by the client
++ chk_rst_nr 0 0 # no RST sent by the server
++ fi
++
+ # accept and use add_addr with additional subflows
+ if reset "multiple subflows and signal"; then
+ pm_nl_set_limits $ns1 0 3
+@@ -2386,9 +2462,10 @@ remove_tests()
+ if reset "remove invalid addresses"; then
+ pm_nl_set_limits $ns1 3 3
+ pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
++ # broadcast IP: no packet for this address will be received on ns1
++ pm_nl_add_endpoint $ns1 224.0.0.1 flags signal
+ pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+- pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
+- pm_nl_set_limits $ns2 3 3
++ pm_nl_set_limits $ns2 2 2
+ addr_nr_ns1=-3 speed=10 \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+@@ -2743,11 +2820,24 @@ backup_tests()
+ sflags=nobackup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 1 0
+ fi
+
+ # single address, backup
+ if reset "single address, backup" &&
++ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++ pm_nl_set_limits $ns1 0 1
++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
++ pm_nl_set_limits $ns2 1 1
++ sflags=nobackup speed=slow \
++ run_tests $ns1 $ns2 10.0.1.1
++ chk_join_nr 1 1 1
++ chk_add_nr 1 1
++ chk_prio_nr 1 0 0 1
++ fi
++
++ # single address, switch to backup
++ if reset "single address, switch to backup" &&
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+@@ -2756,67 +2846,70 @@ backup_tests()
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+- chk_prio_nr 1 1
++ chk_prio_nr 1 1 0 0
+ fi
+
+ # single address with port, backup
+ if reset "single address with port, backup" &&
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ pm_nl_set_limits $ns1 0 1
+- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup port 10100
+ pm_nl_set_limits $ns2 1 1
+- sflags=backup speed=slow \
++ sflags=nobackup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+- chk_prio_nr 1 1
++ chk_prio_nr 1 0 0 1
+ fi
+
+ if reset "mpc backup" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 0 0
+ fi
+
+ if reset "mpc backup both sides" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+- pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
++ pm_nl_set_limits $ns1 0 2
++ pm_nl_set_limits $ns2 1 2
++ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal,backup
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
++
++ # 10.0.2.2 (non-backup) -> 10.0.1.1 (backup)
++ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
++ # 10.0.1.2 (backup) -> 10.0.2.1 (non-backup)
++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
++ ip -net "$ns2" route add 10.0.2.1 via 10.0.1.1 dev ns2eth1 # force this path
++
+ speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+- chk_join_nr 0 0 0
+- chk_prio_nr 1 1
++ chk_join_nr 2 2 2
++ chk_prio_nr 1 1 1 1
+ fi
+
+ if reset "mpc switch to backup" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ sflags=backup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 0 0
+ fi
+
+ if reset "mpc switch to backup both sides" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ sflags=backup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+- chk_prio_nr 1 1
++ chk_prio_nr 1 1 0 0
+ fi
+ }
+
+-LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
+-LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED
+-
+-AF_INET=2
+-AF_INET6=10
+-
+ verify_listener_events()
+ {
+ local evt=$1
+@@ -2830,9 +2923,9 @@ verify_listener_events()
+ local sport
+ local name
+
+- if [ $e_type = $LISTENER_CREATED ]; then
++ if [ $e_type = $MPTCP_LIB_EVENT_LISTENER_CREATED ]; then
+ name="LISTENER_CREATED"
+- elif [ $e_type = $LISTENER_CLOSED ]; then
++ elif [ $e_type = $MPTCP_LIB_EVENT_LISTENER_CLOSED ]; then
+ name="LISTENER_CLOSED "
+ else
+ name="$e_type"
+@@ -2845,13 +2938,13 @@ verify_listener_events()
+ return
+ fi
+
+- type=$(grep "type:$e_type," $evt | sed -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q')
+- family=$(grep "type:$e_type," $evt | sed -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q')
+- sport=$(grep "type:$e_type," $evt | sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
++ type=$(mptcp_lib_evts_get_info type "$evt" "$e_type")
++ family=$(mptcp_lib_evts_get_info family "$evt" "$e_type")
++ sport=$(mptcp_lib_evts_get_info sport "$evt" "$e_type")
+ if [ $family ] && [ $family = $AF_INET6 ]; then
+- saddr=$(grep "type:$e_type," $evt | sed -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
++ saddr=$(mptcp_lib_evts_get_info saddr6 "$evt" "$e_type")
+ else
+- saddr=$(grep "type:$e_type," $evt | sed -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q')
++ saddr=$(mptcp_lib_evts_get_info saddr4 "$evt" "$e_type")
+ fi
+
+ if [ $type ] && [ $type = $e_type ] &&
+@@ -2864,6 +2957,32 @@ verify_listener_events()
+ fail_test "$e_type:$type $e_family:$family $e_saddr:$saddr $e_sport:$sport"
+ }
+
++chk_mpc_endp_attempt()
++{
++ local retl=$1
++ local attempts=$2
++
++ print_check "Connect"
++
++ if [ ${retl} = 124 ]; then
++ fail_test "timeout on connect"
++ elif [ ${retl} = 0 ]; then
++ fail_test "unexpected successful connect"
++ else
++ print_ok
++
++ print_check "Attempts"
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPCapableEndpAttempt")
++ if [ -z "$count" ]; then
++ print_skip
++ elif [ "$count" != "$attempts" ]; then
++ fail_test "got ${count} MPC attempt[s] on port-based endpoint, expected ${attempts}"
++ else
++ print_ok
++ fi
++ fi
++}
++
+ add_addr_ports_tests()
+ {
+ # signal address with port
+@@ -2899,8 +3018,10 @@ add_addr_ports_tests()
+ chk_add_nr 1 1 1
+ chk_rm_nr 1 1 invert
+
+- verify_listener_events $evts_ns1 $LISTENER_CREATED $AF_INET 10.0.2.1 10100
+- verify_listener_events $evts_ns1 $LISTENER_CLOSED $AF_INET 10.0.2.1 10100
++ verify_listener_events $evts_ns1 $MPTCP_LIB_EVENT_LISTENER_CREATED \
++ $MPTCP_LIB_AF_INET 10.0.2.1 10100
++ verify_listener_events $evts_ns1 $MPTCP_LIB_EVENT_LISTENER_CLOSED \
++ $MPTCP_LIB_AF_INET 10.0.2.1 10100
+ kill_events_pids
+ fi
+
+@@ -2952,6 +3073,22 @@ add_addr_ports_tests()
+ chk_join_nr 2 2 2
+ chk_add_nr 2 2 2
+ fi
++
++ if reset "port-based signal endpoint must not accept mpc"; then
++ local port retl count
++ port=$(get_port)
++
++ cond_start_capture ${ns1}
++ pm_nl_add_endpoint ${ns1} 10.0.2.1 flags signal port ${port}
++ mptcp_lib_wait_local_port_listen ${ns1} ${port}
++
++ timeout 1 ip netns exec ${ns2} \
++ ./mptcp_connect -t ${timeout_poll} -p $port -s MPTCP 10.0.2.1 >/dev/null 2>&1
++ retl=$?
++ cond_stop_capture
++
++ chk_mpc_endp_attempt ${retl} 1
++ fi
+ }
+
+ syncookies_tests()
+@@ -3140,6 +3277,9 @@ fullmesh_tests()
+ pm_nl_set_limits $ns1 1 3
+ pm_nl_set_limits $ns2 1 3
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
++ if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
++ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
++ fi
+ fullmesh=1 speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 3 3 3
+@@ -3206,7 +3346,7 @@ fullmesh_tests()
+ addr_nr_ns2=1 sflags=backup,fullmesh speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 2 2 2
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 1 0
+ chk_rm_nr 0 1
+ fi
+
+@@ -3219,7 +3359,7 @@ fullmesh_tests()
+ sflags=nobackup,nofullmesh speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 2 2 2
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 1 0
+ chk_rm_nr 0 1
+ fi
+ }
+@@ -3237,7 +3377,7 @@ fastclose_tests()
+ if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ test_linkfail=1024 fastclose=server \
+ run_tests $ns1 $ns2 10.0.1.1
+- chk_join_nr 0 0 0
++ chk_join_nr 0 0 0 0 0 0 1
+ chk_fclose_nr 1 1 invert
+ chk_rst_nr 1 1
+ fi
+@@ -3246,14 +3386,14 @@ fastclose_tests()
+ pedit_action_pkts()
+ {
+ tc -n $ns2 -j -s action show action pedit index 100 | \
+- grep "packets" | \
+- sed 's/.*"packets":\([0-9]\+\),.*/\1/'
++ mptcp_lib_get_info_value \"packets\" packets
+ }
+
+ fail_tests()
+ {
+ # single subflow
+ if reset_with_fail "Infinite map" 1; then
++ MPTCP_LIB_SUBTEST_FLAKY=1
+ test_linkfail=128 \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
+@@ -3262,7 +3402,8 @@ fail_tests()
+
+ # multiple subflows
+ if reset_with_fail "MP_FAIL MP_RST" 2; then
+- tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5
++ MPTCP_LIB_SUBTEST_FLAKY=1
++ tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5ms
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 0 1
+ pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
+@@ -3272,69 +3413,172 @@ fail_tests()
+ fi
+ }
+
++# $1: ns ; $2: addr ; $3: id
+ userspace_pm_add_addr()
+ {
+- local addr=$1
+- local id=$2
++ local evts=$evts_ns1
+ local tk
+
+- tk=$(grep "type:1," "$evts_ns1" |
+- sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+- ip netns exec $ns1 ./pm_nl_ctl ann $addr token $tk id $id
++ [ "$1" == "$ns2" ] && evts=$evts_ns2
++ tk=$(mptcp_lib_evts_get_info token "$evts")
++
++ ip netns exec $1 ./pm_nl_ctl ann $2 token $tk id $3
+ sleep 1
+ }
+
+-userspace_pm_rm_sf_addr_ns1()
++# $1: ns ; $2: id
++userspace_pm_rm_addr()
+ {
+- local addr=$1
+- local id=$2
+- local tk sp da dp
+-
+- tk=$(grep "type:1," "$evts_ns1" |
+- sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+- sp=$(grep "type:10" "$evts_ns1" |
+- sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+- da=$(grep "type:10" "$evts_ns1" |
+- sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+- dp=$(grep "type:10" "$evts_ns1" |
+- sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
+- ip netns exec $ns1 ./pm_nl_ctl rem token $tk id $id
+- ip netns exec $ns1 ./pm_nl_ctl dsf lip "::ffff:$addr" \
+- lport $sp rip $da rport $dp token $tk
+- wait_rm_addr $ns1 1
+- wait_rm_sf $ns1 1
++ local evts=$evts_ns1
++ local tk
++ local cnt
++
++ [ "$1" == "$ns2" ] && evts=$evts_ns2
++ tk=$(mptcp_lib_evts_get_info token "$evts")
++
++ cnt=$(rm_addr_count ${1})
++ ip netns exec $1 ./pm_nl_ctl rem token $tk id $2
++ wait_rm_addr $1 "${cnt}"
+ }
+
++# $1: ns ; $2: addr ; $3: id
+ userspace_pm_add_sf()
+ {
+- local addr=$1
+- local id=$2
++ local evts=$evts_ns1
+ local tk da dp
+
+- tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+- da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
+- dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+- ip netns exec $ns2 ./pm_nl_ctl csf lip $addr lid $id \
++ [ "$1" == "$ns2" ] && evts=$evts_ns2
++ tk=$(mptcp_lib_evts_get_info token "$evts")
++ da=$(mptcp_lib_evts_get_info daddr4 "$evts")
++ dp=$(mptcp_lib_evts_get_info dport "$evts")
++
++ ip netns exec $1 ./pm_nl_ctl csf lip $2 lid $3 \
+ rip $da rport $dp token $tk
+ sleep 1
+ }
+
+-userspace_pm_rm_sf_addr_ns2()
++# $1: ns ; $2: addr $3: event type
++userspace_pm_rm_sf()
+ {
+- local addr=$1
+- local id=$2
++ local evts=$evts_ns1
++ local t=${3:-1}
++ local ip
+ local tk da dp sp
++ local cnt
++
++ [ "$1" == "$ns2" ] && evts=$evts_ns2
++ [ -n "$(mptcp_lib_evts_get_info "saddr4" "$evts" $t)" ] && ip=4
++ [ -n "$(mptcp_lib_evts_get_info "saddr6" "$evts" $t)" ] && ip=6
++ tk=$(mptcp_lib_evts_get_info token "$evts")
++ da=$(mptcp_lib_evts_get_info "daddr$ip" "$evts" $t $2)
++ dp=$(mptcp_lib_evts_get_info dport "$evts" $t $2)
++ sp=$(mptcp_lib_evts_get_info sport "$evts" $t $2)
+
+- tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+- da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
+- dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+- sp=$(grep "type:10" "$evts_ns2" |
+- sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+- ip netns exec $ns2 ./pm_nl_ctl rem token $tk id $id
+- ip netns exec $ns2 ./pm_nl_ctl dsf lip $addr lport $sp \
++ cnt=$(rm_sf_count ${1})
++ ip netns exec $1 ./pm_nl_ctl dsf lip $2 lport $sp \
+ rip $da rport $dp token $tk
+- wait_rm_addr $ns2 1
+- wait_rm_sf $ns2 1
++ wait_rm_sf $1 "${cnt}"
++}
++
++check_output()
++{
++ local cmd="$1"
++ local expected="$2"
++ local msg="$3"
++ local rc=0
++
++ mptcp_lib_check_output "${err}" "${cmd}" "${expected}" || rc=${?}
++ if [ ${rc} -eq 2 ]; then
++ fail_test "fail to check output # error ${rc}"
++ elif [ ${rc} -eq 0 ]; then
++ print_ok
++ elif [ ${rc} -eq 1 ]; then
++ fail_test "fail to check output # different output"
++ fi
++}
++
++# $1: ns
++userspace_pm_dump()
++{
++ local evts=$evts_ns1
++ local tk
++
++ [ "$1" == "$ns2" ] && evts=$evts_ns2
++ tk=$(mptcp_lib_evts_get_info token "$evts")
++
++ ip netns exec $1 ./pm_nl_ctl dump token $tk
++}
++
++# $1: ns ; $2: id
++userspace_pm_get_addr()
++{
++ local evts=$evts_ns1
++ local tk
++
++ [ "$1" == "$ns2" ] && evts=$evts_ns2
++ tk=$(mptcp_lib_evts_get_info token "$evts")
++
++ ip netns exec $1 ./pm_nl_ctl get $2 token $tk
++}
++
++userspace_pm_chk_dump_addr()
++{
++ local ns="${1}"
++ local exp="${2}"
++ local check="${3}"
++
++ print_check "dump addrs ${check}"
++
++ if false && mptcp_lib_kallsyms_has "mptcp_userspace_pm_dump_addr$"; then
++ check_output "userspace_pm_dump ${ns}" "${exp}"
++ else
++ print_skip
++ fi
++}
++
++userspace_pm_chk_get_addr()
++{
++ local ns="${1}"
++ local id="${2}"
++ local exp="${3}"
++
++ print_check "get id ${id} addr"
++
++ if false && mptcp_lib_kallsyms_has "mptcp_userspace_pm_get_addr$"; then
++ check_output "userspace_pm_get_addr ${ns} ${id}" "${exp}"
++ else
++ print_skip
++ fi
++}
++
++# $1: ns ; $2: event type ; $3: count
++chk_evt_nr()
++{
++ local ns=${1}
++ local evt_name="${2}"
++ local exp="${3}"
++
++ local evts="${evts_ns1}"
++ local evt="${!evt_name}"
++ local count
++
++ evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_
++ [ "${ns}" == "ns2" ] && evts="${evts_ns2}"
++
++ print_check "event ${ns} ${evt_name} (${exp})"
++
++ if [[ "${evt_name}" = "LISTENER_"* ]] &&
++ ! mptcp_lib_kallsyms_has "mptcp_event_pm_listener$"; then
++ print_skip "event not supported"
++ return
++ fi
++
++ count=$(grep -cw "type:${evt}" "${evts}")
++ if [ "${count}" != "${exp}" ]; then
++ fail_test "got ${count} events, expected ${exp}"
++ else
++ print_ok
++ fi
+ }
+
+ userspace_tests()
+@@ -3395,7 +3639,7 @@ userspace_tests()
+ sflags=backup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 0
+- chk_prio_nr 0 0
++ chk_prio_nr 0 0 0 0
+ fi
+
+ # userspace pm type prevents rm_addr
+@@ -3416,21 +3660,33 @@ userspace_tests()
+ if reset_with_events "userspace pm add & remove address" &&
+ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ set_userspace_pm $ns1
+- pm_nl_set_limits $ns2 1 1
+- speed=10 \
++ pm_nl_set_limits $ns2 2 2
++ speed=5 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+ wait_mpj $ns1
+- userspace_pm_add_addr 10.0.2.1 10
+- chk_join_nr 1 1 1
+- chk_add_nr 1 1
+- chk_mptcp_info subflows 1 subflows 1
+- chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
+- userspace_pm_rm_sf_addr_ns1 10.0.2.1 10
++ userspace_pm_add_addr $ns1 10.0.2.1 10
++ userspace_pm_add_addr $ns1 10.0.3.1 20
++ chk_join_nr 2 2 2
++ chk_add_nr 2 2
++ chk_mptcp_info subflows 2 subflows 2
++ chk_subflows_total 3 3
++ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
++ userspace_pm_chk_dump_addr "${ns1}" \
++ $'id 10 flags signal 10.0.2.1\nid 20 flags signal 10.0.3.1' \
++ "signal"
++ userspace_pm_chk_get_addr "${ns1}" "10" "id 10 flags signal 10.0.2.1"
++ userspace_pm_chk_get_addr "${ns1}" "20" "id 20 flags signal 10.0.3.1"
++ userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $MPTCP_LIB_EVENT_SUB_ESTABLISHED
++ userspace_pm_chk_dump_addr "${ns1}" \
++ "id 20 flags signal 10.0.3.1" "after rm_sf 10"
++ userspace_pm_rm_addr $ns1 20
++ userspace_pm_chk_dump_addr "${ns1}" "" "after rm_addr 20"
+ chk_rm_nr 1 1 invert
+ chk_mptcp_info subflows 0 subflows 0
++ chk_subflows_total 1 1
+ kill_events_pids
+- wait $tests_pid
++ mptcp_lib_kill_wait $tests_pid
+ fi
+
+ # userspace pm create destroy subflow
+@@ -3438,18 +3694,48 @@ userspace_tests()
+ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 1
+- speed=10 \
++ speed=5 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+ wait_mpj $ns2
+- userspace_pm_add_sf 10.0.3.2 20
++ userspace_pm_add_sf $ns2 10.0.3.2 20
+ chk_join_nr 1 1 1
+ chk_mptcp_info subflows 1 subflows 1
+- userspace_pm_rm_sf_addr_ns2 10.0.3.2 20
+- chk_rm_nr 1 1
++ chk_subflows_total 2 2
++ userspace_pm_chk_dump_addr "${ns2}" \
++ "id 20 flags subflow 10.0.3.2" \
++ "subflow"
++ userspace_pm_chk_get_addr "${ns2}" "20" "id 20 flags subflow 10.0.3.2"
++ userspace_pm_rm_sf $ns2 10.0.3.2 $MPTCP_LIB_EVENT_SUB_ESTABLISHED
++ userspace_pm_chk_dump_addr "${ns2}" \
++ "" \
++ "after rm_sf 20"
++ chk_rm_nr 0 1
++ chk_mptcp_info subflows 0 subflows 0
++ chk_subflows_total 1 1
++ kill_events_pids
++ mptcp_lib_kill_wait $tests_pid
++ fi
++
++ # userspace pm create id 0 subflow
++ if reset_with_events "userspace pm create id 0 subflow" &&
++ continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
++ set_userspace_pm $ns2
++ pm_nl_set_limits $ns1 0 1
++ speed=5 \
++ run_tests $ns1 $ns2 10.0.1.1 &
++ local tests_pid=$!
++ wait_mpj $ns2
+ chk_mptcp_info subflows 0 subflows 0
++ chk_subflows_total 1 1
++ userspace_pm_add_sf $ns2 10.0.3.2 0
++ userspace_pm_chk_dump_addr "${ns2}" \
++ "id 0 flags subflow 10.0.3.2" "id 0 subflow"
++ chk_join_nr 1 1 1
++ chk_mptcp_info subflows 1 subflows 1
++ chk_subflows_total 2 2
+ kill_events_pids
+- wait $tests_pid
++ mptcp_lib_kill_wait $tests_pid
+ fi
+ }
+
+@@ -3463,7 +3749,8 @@ endpoint_tests()
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ speed=slow \
+- run_tests $ns1 $ns2 10.0.1.1 2>/dev/null &
++ run_tests $ns1 $ns2 10.0.1.1 &
++ local tests_pid=$!
+
+ wait_mpj $ns1
+ pm_nl_check_endpoint "creation" \
+@@ -3478,31 +3765,185 @@ endpoint_tests()
+ pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
+ pm_nl_check_endpoint "modif is allowed" \
+ $ns2 10.0.2.2 id 1 flags signal
+- kill_tests_wait
++ mptcp_lib_kill_wait $tests_pid
+ fi
+
+- if reset "delete and re-add" &&
++ if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
+ mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+- pm_nl_set_limits $ns1 1 1
+- pm_nl_set_limits $ns2 1 1
++ start_events
++ pm_nl_set_limits $ns1 0 3
++ pm_nl_set_limits $ns2 0 3
++ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+- test_linkfail=4 speed=20 \
+- run_tests $ns1 $ns2 10.0.1.1 2>/dev/null &
++ test_linkfail=4 speed=5 \
++ run_tests $ns1 $ns2 10.0.1.1 &
++ local tests_pid=$!
+
+ wait_mpj $ns2
+- chk_subflow_nr "before delete" 2
++ pm_nl_check_endpoint "creation" \
++ $ns2 10.0.2.2 id 2 flags subflow dev ns2eth2
++ chk_subflow_nr "before delete id 2" 2
+ chk_mptcp_info subflows 1 subflows 1
+
+ pm_nl_del_endpoint $ns2 2 10.0.2.2
+ sleep 0.5
+- chk_subflow_nr "after delete" 1
++ chk_subflow_nr "after delete id 2" 1
+ chk_mptcp_info subflows 0 subflows 0
+
+- pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
++ pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
++ wait_mpj $ns2
++ chk_subflow_nr "after re-add id 2" 2
++ chk_mptcp_info subflows 1 subflows 1
++
++ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++ wait_attempt_fail $ns2
++ chk_subflow_nr "after new reject" 2
++ chk_mptcp_info subflows 1 subflows 1
++
++ ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT
++ pm_nl_del_endpoint $ns2 3 10.0.3.2
++ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++ wait_mpj $ns2
++ chk_subflow_nr "after no reject" 3
++ chk_mptcp_info subflows 2 subflows 2
++
++ local i
++ for i in $(seq 3); do
++ pm_nl_del_endpoint $ns2 1 10.0.1.2
++ sleep 0.5
++ chk_subflow_nr "after delete id 0 ($i)" 2
++ chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf
++
++ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
++ wait_mpj $ns2
++ chk_subflow_nr "after re-add id 0 ($i)" 3
++ chk_mptcp_info subflows 3 subflows 3
++ done
++
++ mptcp_lib_kill_wait $tests_pid
++
++ kill_events_pids
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 4
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 4
++
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 0
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 0
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 5 # one has been closed before estab
++
++ chk_join_nr 6 6 6
++ chk_rm_nr 4 4
++ fi
++
++ # remove and re-add
++ if reset_with_events "delete re-add signal" &&
++ mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++ pm_nl_set_limits $ns1 0 3
++ pm_nl_set_limits $ns2 3 3
++ pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
++ # broadcast IP: no packet for this address will be received on ns1
++ pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
++ pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
++ test_linkfail=4 speed=5 \
++ run_tests $ns1 $ns2 10.0.1.1 &
++ local tests_pid=$!
++
+ wait_mpj $ns2
+- chk_subflow_nr "after re-add" 2
++ pm_nl_check_endpoint "creation" \
++ $ns1 10.0.2.1 id 1 flags signal
++ chk_subflow_nr "before delete" 2
+ chk_mptcp_info subflows 1 subflows 1
+- kill_tests_wait
++
++ pm_nl_del_endpoint $ns1 1 10.0.2.1
++ pm_nl_del_endpoint $ns1 2 224.0.0.1
++ sleep 0.5
++ chk_subflow_nr "after delete" 1
++ chk_mptcp_info subflows 0 subflows 0
++
++ pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
++ pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
++ wait_mpj $ns2
++ chk_subflow_nr "after re-add" 3
++ chk_mptcp_info subflows 2 subflows 2
++
++ pm_nl_del_endpoint $ns1 42 10.0.1.1
++ sleep 0.5
++ chk_subflow_nr "after delete ID 0" 2
++ chk_mptcp_info subflows 2 subflows 2
++
++ pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
++ wait_mpj $ns2
++ chk_subflow_nr "after re-add ID 0" 3
++ chk_mptcp_info subflows 3 subflows 3
++
++ pm_nl_del_endpoint $ns1 99 10.0.1.1
++ sleep 0.5
++ chk_subflow_nr "after re-delete ID 0" 2
++ chk_mptcp_info subflows 2 subflows 2
++
++ pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal
++ wait_mpj $ns2
++ chk_subflow_nr "after re-re-add ID 0" 3
++ chk_mptcp_info subflows 3 subflows 3
++ mptcp_lib_kill_wait $tests_pid
++
++ kill_events_pids
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 0
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5
++ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 3
++
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 6
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 4
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5
++ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 3
++
++ chk_join_nr 5 5 5
++ chk_add_nr 6 6
++ chk_rm_nr 4 3 invert
++ fi
++
++ # flush and re-add
++ if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT &&
++ mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++ pm_nl_set_limits $ns1 0 2
++ pm_nl_set_limits $ns2 1 2
++ # broadcast IP: no packet for this address will be received on ns1
++ pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
++ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++ test_linkfail=4 speed=20 \
++ run_tests $ns1 $ns2 10.0.1.1 &
++ local tests_pid=$!
++
++ wait_attempt_fail $ns2
++ chk_subflow_nr "before flush" 1
++ chk_mptcp_info subflows 0 subflows 0
++
++ pm_nl_flush_endpoint $ns2
++ pm_nl_flush_endpoint $ns1
++ wait_rm_addr $ns2 0
++ ip netns exec "${ns2}" ${iptables} -D OUTPUT -s "10.0.3.2" -p tcp -j REJECT
++ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
++ wait_mpj $ns2
++ pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
++ wait_mpj $ns2
++ mptcp_lib_kill_wait $tests_pid
++
++ chk_join_nr 2 2 2
++ chk_add_nr 2 2
++ chk_rm_nr 1 0 invert
+ fi
+ }
+
+@@ -3574,10 +4015,10 @@ while getopts "${all_tests_args}cCih" opt; do
+ tests+=("${all_tests[${opt}]}")
+ ;;
+ c)
+- capture=1
++ capture=true
+ ;;
+ C)
+- checksum=1
++ checksum=true
+ ;;
+ i)
+ ip_mptcp=1
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+index 92a5befe803940..d98c89f31afe8a 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+@@ -6,7 +6,22 @@ readonly KSFT_FAIL=1
+ readonly KSFT_SKIP=4
+
+ # shellcheck disable=SC2155 # declare and assign separately
+-readonly KSFT_TEST=$(basename "${0}" | sed 's/\.sh$//g')
++readonly KSFT_TEST="${MPTCP_LIB_KSFT_TEST:-$(basename "${0}" .sh)}"
++
++# These variables are used in some selftests, read-only
++declare -rx MPTCP_LIB_EVENT_CREATED=1 # MPTCP_EVENT_CREATED
++declare -rx MPTCP_LIB_EVENT_ESTABLISHED=2 # MPTCP_EVENT_ESTABLISHED
++declare -rx MPTCP_LIB_EVENT_CLOSED=3 # MPTCP_EVENT_CLOSED
++declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
++declare -rx MPTCP_LIB_EVENT_REMOVED=7 # MPTCP_EVENT_REMOVED
++declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
++declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED
++declare -rx MPTCP_LIB_EVENT_SUB_PRIORITY=13 # MPTCP_EVENT_SUB_PRIORITY
++declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED
++declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16 # MPTCP_EVENT_LISTENER_CLOSED
++
++declare -rx MPTCP_LIB_AF_INET=2
++declare -rx MPTCP_LIB_AF_INET6=10
+
+ MPTCP_LIB_SUBTESTS=()
+
+@@ -207,3 +222,55 @@ mptcp_lib_result_print_all_tap() {
+ printf "%s\n" "${subtest}"
+ done
+ }
++
++# get the value of keyword $1 in the line marked by keyword $2
++mptcp_lib_get_info_value() {
++ grep "${2}" | sed -n 's/.*\('"${1}"':\)\([0-9a-f:.]*\).*$/\2/p;q'
++}
++
++# $1: info name ; $2: evts_ns ; [$3: event type; [$4: addr]]
++mptcp_lib_evts_get_info() {
++ grep "${4:-}" "${2}" | mptcp_lib_get_info_value "${1}" "^type:${3:-1},"
++}
++
++# $1: PID
++mptcp_lib_kill_wait() {
++ [ "${1}" -eq 0 ] && return 0
++
++ kill -SIGUSR1 "${1}" > /dev/null 2>&1
++ kill "${1}" > /dev/null 2>&1
++ wait "${1}" 2>/dev/null
++}
++
++# $1: IP address
++mptcp_lib_is_v6() {
++ [ -z "${1##*:*}" ]
++}
++
++# $1: ns, $2: MIB counter
++mptcp_lib_get_counter() {
++ local ns="${1}"
++ local counter="${2}"
++ local count
++
++ count=$(ip netns exec "${ns}" nstat -asz "${counter}" |
++ awk 'NR==1 {next} {print $2}')
++ if [ -z "${count}" ]; then
++ mptcp_lib_fail_if_expected_feature "${counter} counter"
++ return 1
++ fi
++
++ echo "${count}"
++}
++
++mptcp_lib_events() {
++ local ns="${1}"
++ local evts="${2}"
++ declare -n pid="${3}"
++
++ :>"${evts}"
++
++ mptcp_lib_kill_wait "${pid:-0}"
++ ip netns exec "${ns}" ./pm_nl_ctl events >> "${evts}" 2>&1 &
++ pid=$!
++}
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+index 8c8694f21e7dfe..306d6c4ed5bb4f 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+@@ -162,12 +162,6 @@ check_transfer()
+ return 0
+ }
+
+-# $1: IP address
+-is_v6()
+-{
+- [ -z "${1##*:*}" ]
+-}
+-
+ do_transfer()
+ {
+ local listener_ns="$1"
+@@ -184,7 +178,7 @@ do_transfer()
+ local mptcp_connect="./mptcp_connect -r 20"
+
+ local local_addr ip
+- if is_v6 "${connect_addr}"; then
++ if mptcp_lib_is_v6 "${connect_addr}"; then
+ local_addr="::"
+ ip=ipv6
+ else
+diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+index 8f4ff123a7eb92..71899a3ffa7a9d 100755
+--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
++++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+@@ -183,7 +183,7 @@ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+ subflow 10.0.1.1" " (nobackup)"
+
+ # fullmesh support has been added later
+-ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh
++ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh 2>/dev/null
+ if ip netns exec $ns1 ./pm_nl_ctl dump | grep -q "fullmesh" ||
+ mptcp_lib_expect_all_features; then
+ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+@@ -194,6 +194,12 @@ subflow 10.0.1.1" " (nofullmesh)"
+ ip netns exec $ns1 ./pm_nl_ctl set id 1 flags backup,fullmesh
+ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+ subflow,backup,fullmesh 10.0.1.1" " (backup,fullmesh)"
++else
++ for st in fullmesh nofullmesh backup,fullmesh; do
++ st=" (${st})"
++ printf "%-50s%s\n" "${st}" "[SKIP]"
++ mptcp_lib_result_skip "${st}"
++ done
+ fi
+
+ mptcp_lib_result_print_all_tap
+diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+index 49369c4a5f261f..763402dd17742f 100644
+--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+@@ -1239,7 +1239,7 @@ int add_listener(int argc, char *argv[])
+ struct sockaddr_storage addr;
+ struct sockaddr_in6 *a6;
+ struct sockaddr_in *a4;
+- u_int16_t family;
++ u_int16_t family = AF_UNSPEC;
+ int enable = 1;
+ int sock;
+ int err;
+diff --git a/tools/testing/selftests/net/mptcp/settings b/tools/testing/selftests/net/mptcp/settings
+index 79b65bdf05db65..abc5648b59abde 100644
+--- a/tools/testing/selftests/net/mptcp/settings
++++ b/tools/testing/selftests/net/mptcp/settings
+@@ -1 +1 @@
+-timeout=1200
++timeout=1800
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index ce9203b817f88c..f24bd2bf083111 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -235,8 +235,8 @@ run_test()
+ shift 4
+ local msg=$*
+
+- [ $delay1 -gt 0 ] && delay1="delay $delay1" || delay1=""
+- [ $delay2 -gt 0 ] && delay2="delay $delay2" || delay2=""
++ [ $delay1 -gt 0 ] && delay1="delay ${delay1}ms" || delay1=""
++ [ $delay2 -gt 0 ] && delay2="delay ${delay2}ms" || delay2=""
+
+ for dev in ns1eth1 ns1eth2; do
+ tc -n $ns1 qdisc del dev $dev root >/dev/null 2>&1
+@@ -267,7 +267,8 @@ run_test()
+ [ $bail -eq 0 ] || exit $ret
+ fi
+
+- printf "%-60s" "$msg - reverse direction"
++ msg+=" - reverse direction"
++ printf "%-60s" "${msg}"
+ do_transfer $large $small $time
+ lret=$?
+ mptcp_lib_result_code "${lret}" "${msg}"
+@@ -301,12 +302,12 @@ done
+
+ setup
+ run_test 10 10 0 0 "balanced bwidth"
+-run_test 10 10 1 50 "balanced bwidth with unbalanced delay"
++run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
+
+ # we still need some additional infrastructure to pass the following test-cases
+-run_test 30 10 0 0 "unbalanced bwidth"
+-run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay"
+-run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay"
++run_test 10 3 0 0 "unbalanced bwidth"
++run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
++run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
+
+ mptcp_lib_result_print_all_tap
+ exit $ret
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index b25a3e33eb2538..c5d7af8e8efde1 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -23,15 +23,15 @@ if ! ip -Version &> /dev/null; then
+ exit ${KSFT_SKIP}
+ fi
+
+-ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
+-REMOVED=7 # MPTCP_EVENT_REMOVED
+-SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
+-SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED
+-LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
+-LISTENER_CLOSED=16 #MPTCP_EVENT_LISTENER_CLOSED
++ANNOUNCED=${MPTCP_LIB_EVENT_ANNOUNCED}
++REMOVED=${MPTCP_LIB_EVENT_REMOVED}
++SUB_ESTABLISHED=${MPTCP_LIB_EVENT_SUB_ESTABLISHED}
++SUB_CLOSED=${MPTCP_LIB_EVENT_SUB_CLOSED}
++LISTENER_CREATED=${MPTCP_LIB_EVENT_LISTENER_CREATED}
++LISTENER_CLOSED=${MPTCP_LIB_EVENT_LISTENER_CLOSED}
+
+-AF_INET=2
+-AF_INET6=10
++AF_INET=${MPTCP_LIB_AF_INET}
++AF_INET6=${MPTCP_LIB_AF_INET6}
+
+ file=""
+ server_evts=""
+@@ -75,7 +75,7 @@ print_test()
+ {
+ test_name="${1}"
+
+- _printf "%-63s" "${test_name}"
++ _printf "%-68s" "${test_name}"
+ }
+
+ print_results()
+@@ -108,15 +108,6 @@ test_fail()
+ mptcp_lib_result_fail "${test_name}"
+ }
+
+-kill_wait()
+-{
+- [ $1 -eq 0 ] && return 0
+-
+- kill -SIGUSR1 $1 > /dev/null 2>&1
+- kill $1 > /dev/null 2>&1
+- wait $1 2>/dev/null
+-}
+-
+ # This function is used in the cleanup trap
+ #shellcheck disable=SC2317
+ cleanup()
+@@ -128,7 +119,7 @@ cleanup()
+ for pid in $client4_pid $server4_pid $client6_pid $server6_pid\
+ $server_evts_pid $client_evts_pid
+ do
+- kill_wait $pid
++ mptcp_lib_kill_wait $pid
+ done
+
+ local netns
+@@ -193,10 +184,12 @@ make_connection()
+ local is_v6=$1
+ local app_port=$app4_port
+ local connect_addr="10.0.1.1"
++ local client_addr="10.0.1.2"
+ local listen_addr="0.0.0.0"
+ if [ "$is_v6" = "v6" ]
+ then
+ connect_addr="dead:beef:1::1"
++ client_addr="dead:beef:1::2"
+ listen_addr="::"
+ app_port=$app6_port
+ else
+@@ -208,21 +201,11 @@ make_connection()
+ if [ -z "$client_evts" ]; then
+ client_evts=$(mktemp)
+ fi
+- :>"$client_evts"
+- if [ $client_evts_pid -ne 0 ]; then
+- kill_wait $client_evts_pid
+- fi
+- ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
+- client_evts_pid=$!
++ mptcp_lib_events "${ns2}" "${client_evts}" client_evts_pid
+ if [ -z "$server_evts" ]; then
+ server_evts=$(mktemp)
+ fi
+- :>"$server_evts"
+- if [ $server_evts_pid -ne 0 ]; then
+- kill_wait $server_evts_pid
+- fi
+- ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 &
+- server_evts_pid=$!
++ mptcp_lib_events "${ns1}" "${server_evts}" server_evts_pid
+ sleep 0.5
+
+ # Run the server
+@@ -247,20 +230,18 @@ make_connection()
+ local server_token
+ local server_serverside
+
+- client_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+- client_port=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+- client_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
+- "$client_evts")
+- server_token=$(grep "type:1," "$server_evts" |
+- sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
+- server_serverside=$(grep "type:1," "$server_evts" |
+- sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q')
++ client_token=$(mptcp_lib_evts_get_info token "$client_evts")
++ client_port=$(mptcp_lib_evts_get_info sport "$client_evts")
++ client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts")
++ server_token=$(mptcp_lib_evts_get_info token "$server_evts")
++ server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts")
+
+ print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1"
+ if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
+ [ "$server_serverside" = 1 ]
+ then
+ test_pass
++ print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
+ else
+ test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
+ mptcp_lib_result_print_all_tap
+@@ -340,16 +321,16 @@ verify_announce_event()
+ local dport
+ local id
+
+- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
++ type=$(mptcp_lib_evts_get_info type "$evt" $e_type)
++ token=$(mptcp_lib_evts_get_info token "$evt" $e_type)
+ if [ "$e_af" = "v6" ]
+ then
+- addr=$(sed --unbuffered -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
++ addr=$(mptcp_lib_evts_get_info daddr6 "$evt" $e_type)
+ else
+- addr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
++ addr=$(mptcp_lib_evts_get_info daddr4 "$evt" $e_type)
+ fi
+- dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
++ dport=$(mptcp_lib_evts_get_info dport "$evt" $e_type)
++ id=$(mptcp_lib_evts_get_info rem_id "$evt" $e_type)
+
+ check_expected "type" "token" "addr" "dport" "id"
+ }
+@@ -367,7 +348,7 @@ test_announce()
+ $client_addr_id dev ns2eth1 > /dev/null 2>&1
+
+ local type
+- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
++ type=$(mptcp_lib_evts_get_info type "$server_evts")
+ print_test "ADD_ADDR 10.0.2.2 (ns2) => ns1, invalid token"
+ if [ "$type" = "" ]
+ then
+@@ -381,7 +362,7 @@ test_announce()
+ ip netns exec "$ns2"\
+ ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\
+ ns2eth1
+- print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, reuse port"
++ print_test "ADD_ADDR id:client 10.0.2.2 (ns2) => ns1, reuse port"
+ sleep 0.5
+ verify_announce_event $server_evts $ANNOUNCED $server4_token "10.0.2.2" $client_addr_id \
+ "$client4_port"
+@@ -390,7 +371,7 @@ test_announce()
+ :>"$server_evts"
+ ip netns exec "$ns2" ./pm_nl_ctl ann\
+ dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1
+- print_test "ADD_ADDR6 id:${client_addr_id} dead:beef:2::2 (ns2) => ns1, reuse port"
++ print_test "ADD_ADDR6 id:client dead:beef:2::2 (ns2) => ns1, reuse port"
+ sleep 0.5
+ verify_announce_event "$server_evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
+ "$client_addr_id" "$client6_port" "v6"
+@@ -400,7 +381,7 @@ test_announce()
+ client_addr_id=$((client_addr_id+1))
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
+ $client_addr_id dev ns2eth1 port $new4_port
+- print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, new port"
++ print_test "ADD_ADDR id:client+1 10.0.2.2 (ns2) => ns1, new port"
+ sleep 0.5
+ verify_announce_event "$server_evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
+ "$client_addr_id" "$new4_port"
+@@ -411,7 +392,7 @@ test_announce()
+ # ADD_ADDR from the server to client machine reusing the subflow port
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id dev ns1eth2
+- print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
++ print_test "ADD_ADDR id:server 10.0.2.1 (ns1) => ns2, reuse port"
+ sleep 0.5
+ verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ "$server_addr_id" "$app4_port"
+@@ -420,7 +401,7 @@ test_announce()
+ :>"$client_evts"
+ ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
+ $server_addr_id dev ns1eth2
+- print_test "ADD_ADDR6 id:${server_addr_id} dead:beef:2::1 (ns1) => ns2, reuse port"
++ print_test "ADD_ADDR6 id:server dead:beef:2::1 (ns1) => ns2, reuse port"
+ sleep 0.5
+ verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
+ "$server_addr_id" "$app6_port" "v6"
+@@ -430,7 +411,7 @@ test_announce()
+ server_addr_id=$((server_addr_id+1))
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id dev ns1eth2 port $new4_port
+- print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, new port"
++ print_test "ADD_ADDR id:server+1 10.0.2.1 (ns1) => ns2, new port"
+ sleep 0.5
+ verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ "$server_addr_id" "$new4_port"
+@@ -446,9 +427,9 @@ verify_remove_event()
+ local token
+ local id
+
+- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
++ type=$(mptcp_lib_evts_get_info type "$evt" $e_type)
++ token=$(mptcp_lib_evts_get_info token "$evt" $e_type)
++ id=$(mptcp_lib_evts_get_info rem_id "$evt" $e_type)
+
+ check_expected "type" "token" "id"
+ }
+@@ -464,9 +445,9 @@ test_remove()
+ local invalid_token=$(( client4_token - 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token $invalid_token id\
+ $client_addr_id > /dev/null 2>&1
+- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1, invalid token"
++ print_test "RM_ADDR id:client ns2 => ns1, invalid token"
+ local type
+- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
++ type=$(mptcp_lib_evts_get_info type "$server_evts")
+ if [ "$type" = "" ]
+ then
+ test_pass
+@@ -478,8 +459,8 @@ test_remove()
+ local invalid_id=$(( client_addr_id + 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $invalid_id > /dev/null 2>&1
+- print_test "RM_ADDR id:${invalid_id} ns2 => ns1, invalid id"
+- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
++ print_test "RM_ADDR id:client+1 ns2 => ns1, invalid id"
++ type=$(mptcp_lib_evts_get_info type "$server_evts")
+ if [ "$type" = "" ]
+ then
+ test_pass
+@@ -491,7 +472,7 @@ test_remove()
+ :>"$server_evts"
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $client_addr_id
+- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
++ print_test "RM_ADDR id:client ns2 => ns1"
+ sleep 0.5
+ verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
+
+@@ -500,7 +481,7 @@ test_remove()
+ client_addr_id=$(( client_addr_id - 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $client_addr_id
+- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
++ print_test "RM_ADDR id:client-1 ns2 => ns1"
+ sleep 0.5
+ verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
+
+@@ -508,7 +489,7 @@ test_remove()
+ :>"$server_evts"
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\
+ $client_addr_id
+- print_test "RM_ADDR6 id:${client_addr_id} ns2 => ns1"
++ print_test "RM_ADDR6 id:client-1 ns2 => ns1"
+ sleep 0.5
+ verify_remove_event "$server_evts" "$REMOVED" "$server6_token" "$client_addr_id"
+
+@@ -518,7 +499,7 @@ test_remove()
+ # RM_ADDR from the server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
+ $server_addr_id
+- print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
++ print_test "RM_ADDR id:server ns1 => ns2"
+ sleep 0.5
+ verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
+
+@@ -527,7 +508,7 @@ test_remove()
+ server_addr_id=$(( server_addr_id - 1 ))
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
+ $server_addr_id
+- print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
++ print_test "RM_ADDR id:server-1 ns1 => ns2"
+ sleep 0.5
+ verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
+
+@@ -535,7 +516,7 @@ test_remove()
+ :>"$client_evts"
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\
+ $server_addr_id
+- print_test "RM_ADDR6 id:${server_addr_id} ns1 => ns2"
++ print_test "RM_ADDR6 id:server-1 ns1 => ns2"
+ sleep 0.5
+ verify_remove_event "$client_evts" "$REMOVED" "$client6_token" "$server_addr_id"
+ }
+@@ -563,8 +544,14 @@ verify_subflow_events()
+ local locid
+ local remid
+ local info
++ local e_dport_txt
++
++ # only display the fixed ports
++ if [ "${e_dport}" -ge "${app4_port}" ] && [ "${e_dport}" -le "${app6_port}" ]; then
++ e_dport_txt=":${e_dport}"
++ fi
+
+- info="${e_saddr} (${e_from}) => ${e_daddr} (${e_to})"
++ info="${e_saddr} (${e_from}) => ${e_daddr}${e_dport_txt} (${e_to})"
+
+ if [ "$e_type" = "$SUB_ESTABLISHED" ]
+ then
+@@ -583,19 +570,19 @@ verify_subflow_events()
+ fi
+ fi
+
+- type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- family=$(sed --unbuffered -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- locid=$(sed --unbuffered -n 's/.*\(loc_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+- remid=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
++ type=$(mptcp_lib_evts_get_info type "$evt" $e_type)
++ token=$(mptcp_lib_evts_get_info token "$evt" $e_type)
++ family=$(mptcp_lib_evts_get_info family "$evt" $e_type)
++ dport=$(mptcp_lib_evts_get_info dport "$evt" $e_type)
++ locid=$(mptcp_lib_evts_get_info loc_id "$evt" $e_type)
++ remid=$(mptcp_lib_evts_get_info rem_id "$evt" $e_type)
+ if [ "$family" = "$AF_INET6" ]
+ then
+- saddr=$(sed --unbuffered -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+- daddr=$(sed --unbuffered -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
++ saddr=$(mptcp_lib_evts_get_info saddr6 "$evt" $e_type)
++ daddr=$(mptcp_lib_evts_get_info daddr6 "$evt" $e_type)
+ else
+- saddr=$(sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+- daddr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
++ saddr=$(mptcp_lib_evts_get_info saddr4 "$evt" $e_type)
++ daddr=$(mptcp_lib_evts_get_info daddr4 "$evt" $e_type)
+ fi
+
+ check_expected "type" "token" "daddr" "dport" "family" "saddr" "locid" "remid"
+@@ -627,10 +614,10 @@ test_subflows()
+ "10.0.2.2" "$client4_port" "23" "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+- kill_wait $listener_pid
++ mptcp_lib_kill_wait $listener_pid
+
+ local sport
+- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
++ sport=$(mptcp_lib_evts_get_info sport "$server_evts" $SUB_ESTABLISHED)
+
+ # DESTROY_SUBFLOW from server to client machine
+ :>"$server_evts"
+@@ -666,9 +653,9 @@ test_subflows()
+ "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+- kill_wait $listener_pid
++ mptcp_lib_kill_wait $listener_pid
+
+- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
++ sport=$(mptcp_lib_evts_get_info sport "$server_evts" $SUB_ESTABLISHED)
+
+ # DESTROY_SUBFLOW6 from server to client machine
+ :>"$server_evts"
+@@ -705,9 +692,9 @@ test_subflows()
+ "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+- kill_wait $listener_pid
++ mptcp_lib_kill_wait $listener_pid
+
+- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
++ sport=$(mptcp_lib_evts_get_info sport "$server_evts" $SUB_ESTABLISHED)
+
+ # DESTROY_SUBFLOW from server to client machine
+ :>"$server_evts"
+@@ -743,9 +730,9 @@ test_subflows()
+ "10.0.2.1" "$app4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+- kill_wait $listener_pid
++ mptcp_lib_kill_wait $listener_pid
+
+- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
++ sport=$(mptcp_lib_evts_get_info sport "$client_evts" $SUB_ESTABLISHED)
+
+ # DESTROY_SUBFLOW from client to server machine
+ :>"$client_evts"
+@@ -782,9 +769,9 @@ test_subflows()
+ "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+- kill_wait $listener_pid
++ mptcp_lib_kill_wait $listener_pid
+
+- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
++ sport=$(mptcp_lib_evts_get_info sport "$client_evts" $SUB_ESTABLISHED)
+
+ # DESTROY_SUBFLOW6 from client to server machine
+ :>"$client_evts"
+@@ -819,9 +806,9 @@ test_subflows()
+ "10.0.2.2" "10.0.2.1" "$new4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+- kill_wait $listener_pid
++ mptcp_lib_kill_wait $listener_pid
+
+- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
++ sport=$(mptcp_lib_evts_get_info sport "$client_evts" $SUB_ESTABLISHED)
+
+ # DESTROY_SUBFLOW from client to server machine
+ :>"$client_evts"
+@@ -850,7 +837,7 @@ test_subflows_v4_v6_mix()
+ :>"$client_evts"
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\
+ $server_addr_id dev ns1eth2
+- print_test "ADD_ADDR4 id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
++ print_test "ADD_ADDR4 id:server 10.0.2.1 (ns1) => ns2, reuse port"
+ sleep 0.5
+ verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\
+ "$server_addr_id" "$app6_port"
+@@ -865,9 +852,9 @@ test_subflows_v4_v6_mix()
+ "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+- kill_wait $listener_pid
++ mptcp_lib_kill_wait $listener_pid
+
+- sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
++ sport=$(mptcp_lib_evts_get_info sport "$client_evts" $SUB_ESTABLISHED)
+
+ # DESTROY_SUBFLOW from client to server machine
+ :>"$client_evts"
+@@ -896,9 +883,10 @@ test_prio()
+
+ # Check TX
+ print_test "MP_PRIO TX"
+- count=$(ip netns exec "$ns2" nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}')
+- [ -z "$count" ] && count=0
+- if [ $count != 1 ]; then
++ count=$(mptcp_lib_get_counter "$ns2" "MPTcpExtMPPrioTx")
++ if [ -z "$count" ]; then
++ test_skip
++ elif [ $count != 1 ]; then
+ test_fail "Count != 1: ${count}"
+ else
+ test_pass
+@@ -906,9 +894,10 @@ test_prio()
+
+ # Check RX
+ print_test "MP_PRIO RX"
+- count=$(ip netns exec "$ns1" nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}')
+- [ -z "$count" ] && count=0
+- if [ $count != 1 ]; then
++ count=$(mptcp_lib_get_counter "$ns1" "MPTcpExtMPPrioRx")
++ if [ -z "$count" ]; then
++ test_skip
++ elif [ $count != 1 ]; then
+ test_fail "Count != 1: ${count}"
+ else
+ test_pass
+@@ -927,24 +916,13 @@ verify_listener_events()
+ local saddr
+ local sport
+
+- if [ $e_type = $LISTENER_CREATED ]; then
+- print_test "CREATE_LISTENER $e_saddr:$e_sport"
+- elif [ $e_type = $LISTENER_CLOSED ]; then
+- print_test "CLOSE_LISTENER $e_saddr:$e_sport"
+- fi
+-
+- type=$(grep "type:$e_type," $evt |
+- sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q')
+- family=$(grep "type:$e_type," $evt |
+- sed --unbuffered -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q')
+- sport=$(grep "type:$e_type," $evt |
+- sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
++ type=$(mptcp_lib_evts_get_info type $evt $e_type)
++ family=$(mptcp_lib_evts_get_info family $evt $e_type)
++ sport=$(mptcp_lib_evts_get_info sport $evt $e_type)
+ if [ $family ] && [ $family = $AF_INET6 ]; then
+- saddr=$(grep "type:$e_type," $evt |
+- sed --unbuffered -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
++ saddr=$(mptcp_lib_evts_get_info saddr6 $evt $e_type)
+ else
+- saddr=$(grep "type:$e_type," $evt |
+- sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q')
++ saddr=$(mptcp_lib_evts_get_info saddr4 $evt $e_type)
+ fi
+
+ check_expected "type" "family" "saddr" "sport"
+@@ -969,6 +947,7 @@ test_listener()
+ local listener_pid=$!
+
+ sleep 0.5
++ print_test "CREATE_LISTENER 10.0.2.2 (client port)"
+ verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
+
+ # ADD_ADDR from client to server machine reusing the subflow port
+@@ -982,15 +961,17 @@ test_listener()
+ sleep 0.5
+
+ # Delete the listener from the client ns, if one was created
+- kill_wait $listener_pid
++ mptcp_lib_kill_wait $listener_pid
+
+ sleep 0.5
++ print_test "CLOSE_LISTENER 10.0.2.2 (client port)"
+ verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
+ }
+
+ print_title "Make connections"
+ make_connection
+ make_connection "v6"
++print_title "Will be using address IDs ${client_addr_id} (client) and ${server_addr_id} (server)"
+
+ test_announce
+ test_remove
+diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
+index bdc03a2097e85a..7ea5fb28c93db2 100644
+--- a/tools/testing/selftests/net/msg_zerocopy.c
++++ b/tools/testing/selftests/net/msg_zerocopy.c
+@@ -85,6 +85,7 @@ static bool cfg_rx;
+ static int cfg_runtime_ms = 4200;
+ static int cfg_verbose;
+ static int cfg_waittime_ms = 500;
++static int cfg_notification_limit = 32;
+ static bool cfg_zerocopy;
+
+ static socklen_t cfg_alen;
+@@ -95,6 +96,7 @@ static char payload[IP_MAXPACKET];
+ static long packets, bytes, completions, expected_completions;
+ static int zerocopied = -1;
+ static uint32_t next_completion;
++static uint32_t sends_since_notify;
+
+ static unsigned long gettimeofday_ms(void)
+ {
+@@ -208,6 +210,7 @@ static bool do_sendmsg(int fd, struct msghdr *msg, bool do_zerocopy, int domain)
+ error(1, errno, "send");
+ if (cfg_verbose && ret != len)
+ fprintf(stderr, "send: ret=%u != %u\n", ret, len);
++ sends_since_notify++;
+
+ if (len) {
+ packets++;
+@@ -435,7 +438,7 @@ static bool do_recv_completion(int fd, int domain)
+ /* Detect notification gaps. These should not happen often, if at all.
+ * Gaps can occur due to drops, reordering and retransmissions.
+ */
+- if (lo != next_completion)
++ if (cfg_verbose && lo != next_completion)
+ fprintf(stderr, "gap: %u..%u does not append to %u\n",
+ lo, hi, next_completion);
+ next_completion = hi + 1;
+@@ -460,6 +463,7 @@ static bool do_recv_completion(int fd, int domain)
+ static void do_recv_completions(int fd, int domain)
+ {
+ while (do_recv_completion(fd, domain)) {}
++ sends_since_notify = 0;
+ }
+
+ /* Wait for all remaining completions on the errqueue */
+@@ -549,6 +553,9 @@ static void do_tx(int domain, int type, int protocol)
+ else
+ do_sendmsg(fd, &msg, cfg_zerocopy, domain);
+
++ if (cfg_zerocopy && sends_since_notify >= cfg_notification_limit)
++ do_recv_completions(fd, domain);
++
+ while (!do_poll(fd, POLLOUT)) {
+ if (cfg_zerocopy)
+ do_recv_completions(fd, domain);
+@@ -708,7 +715,7 @@ static void parse_opts(int argc, char **argv)
+
+ cfg_payload_len = max_payload_len;
+
+- while ((c = getopt(argc, argv, "46c:C:D:i:mp:rs:S:t:vz")) != -1) {
++ while ((c = getopt(argc, argv, "46c:C:D:i:l:mp:rs:S:t:vz")) != -1) {
+ switch (c) {
+ case '4':
+ if (cfg_family != PF_UNSPEC)
+@@ -736,6 +743,9 @@ static void parse_opts(int argc, char **argv)
+ if (cfg_ifindex == 0)
+ error(1, errno, "invalid iface: %s", optarg);
+ break;
++ case 'l':
++ cfg_notification_limit = strtoul(optarg, NULL, 0);
++ break;
+ case 'm':
+ cfg_cork_mixed = true;
+ break;
+diff --git a/tools/testing/selftests/net/net_helper.sh b/tools/testing/selftests/net/net_helper.sh
+new file mode 100644
+index 00000000000000..6596fe03c77f43
+--- /dev/null
++++ b/tools/testing/selftests/net/net_helper.sh
+@@ -0,0 +1,25 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++#
++# Helper functions
++
++wait_local_port_listen()
++{
++ local listener_ns="${1}"
++ local port="${2}"
++ local protocol="${3}"
++ local pattern
++ local i
++
++ pattern=":$(printf "%04X" "${port}") "
++
++ # for tcp protocol additionally check the socket state
++ [ ${protocol} = "tcp" ] && pattern="${pattern}0A"
++ for i in $(seq 10); do
++ if ip netns exec "${listener_ns}" awk '{print $2" "$4}' \
++ /proc/net/"${protocol}"* | grep -q "${pattern}"; then
++ break
++ fi
++ sleep 0.1
++ done
++}
+diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+index f8499d4c87f3f7..bab7436c683486 100755
+--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
++++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # OVS kernel module self tests
+@@ -502,7 +502,20 @@ test_netlink_checks () {
+ wc -l) == 2 ] || \
+ return 1
+
++ info "Checking clone depth"
+ ERR_MSG="Flow actions may not be safe on all matching packets"
++ PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
++ ovs_add_flow "test_netlink_checks" nv0 \
++ 'in_port(1),eth(),eth_type(0x800),ipv4()' \
++ 'clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(drop)))))))))))))))))' \
++ >/dev/null 2>&1 && return 1
++ POST_TEST=$(dmesg | grep -c "${ERR_MSG}")
++
++ if [ "$PRE_TEST" == "$POST_TEST" ]; then
++ info "failed - clone depth too large"
++ return 1
++ fi
++
+ PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
+ ovs_add_flow "test_netlink_checks" nv0 \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' 'drop(0),2' \
+diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+index b97e621face958..8b120718768ec8 100644
+--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
++++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+@@ -299,7 +299,7 @@ class ovsactions(nla):
+ ("OVS_ACTION_ATTR_PUSH_NSH", "none"),
+ ("OVS_ACTION_ATTR_POP_NSH", "flag"),
+ ("OVS_ACTION_ATTR_METER", "none"),
+- ("OVS_ACTION_ATTR_CLONE", "none"),
++ ("OVS_ACTION_ATTR_CLONE", "recursive"),
+ ("OVS_ACTION_ATTR_CHECK_PKT_LEN", "none"),
+ ("OVS_ACTION_ATTR_ADD_MPLS", "none"),
+ ("OVS_ACTION_ATTR_DEC_TTL", "none"),
+@@ -465,29 +465,42 @@ class ovsactions(nla):
+ print_str += "pop_mpls"
+ else:
+ datum = self.get_attr(field[0])
+- print_str += datum.dpstr(more)
++ if field[0] == "OVS_ACTION_ATTR_CLONE":
++ print_str += "clone("
++ print_str += datum.dpstr(more)
++ print_str += ")"
++ else:
++ print_str += datum.dpstr(more)
+
+ return print_str
+
+ def parse(self, actstr):
++ totallen = len(actstr)
+ while len(actstr) != 0:
+ parsed = False
++ parencount = 0
+ if actstr.startswith("drop"):
+ # If no reason is provided, the implicit drop is used (i.e no
+ # action). If some reason is given, an explicit action is used.
+- actstr, reason = parse_extract_field(
+- actstr,
+- "drop(",
+- "([0-9]+)",
+- lambda x: int(x, 0),
+- False,
+- None,
+- )
++ reason = None
++ if actstr.startswith("drop("):
++ parencount += 1
++
++ actstr, reason = parse_extract_field(
++ actstr,
++ "drop(",
++ "([0-9]+)",
++ lambda x: int(x, 0),
++ False,
++ None,
++ )
++
+ if reason is not None:
+ self["attrs"].append(["OVS_ACTION_ATTR_DROP", reason])
+ parsed = True
+ else:
+- return
++ actstr = actstr[len("drop"): ]
++ return (totallen - len(actstr))
+
+ elif parse_starts_block(actstr, "^(\d+)", False, True):
+ actstr, output = parse_extract_field(
+@@ -504,6 +517,7 @@ class ovsactions(nla):
+ False,
+ 0,
+ )
++ parencount += 1
+ self["attrs"].append(["OVS_ACTION_ATTR_RECIRC", recircid])
+ parsed = True
+
+@@ -516,12 +530,22 @@ class ovsactions(nla):
+
+ for flat_act in parse_flat_map:
+ if parse_starts_block(actstr, flat_act[0], False):
+- actstr += len(flat_act[0])
+- self["attrs"].append([flat_act[1]])
++ actstr = actstr[len(flat_act[0]):]
++ self["attrs"].append([flat_act[1], True])
+ actstr = actstr[strspn(actstr, ", ") :]
+ parsed = True
+
+- if parse_starts_block(actstr, "ct(", False):
++ if parse_starts_block(actstr, "clone(", False):
++ parencount += 1
++ subacts = ovsactions()
++ actstr = actstr[len("clone("):]
++ parsedLen = subacts.parse(actstr)
++ lst = []
++ self["attrs"].append(("OVS_ACTION_ATTR_CLONE", subacts))
++ actstr = actstr[parsedLen:]
++ parsed = True
++ elif parse_starts_block(actstr, "ct(", False):
++ parencount += 1
+ actstr = actstr[len("ct(") :]
+ ctact = ovsactions.ctact()
+
+@@ -553,6 +577,7 @@ class ovsactions(nla):
+ natact = ovsactions.ctact.natattr()
+
+ if actstr.startswith("("):
++ parencount += 1
+ t = None
+ actstr = actstr[1:]
+ if actstr.startswith("src"):
+@@ -607,15 +632,29 @@ class ovsactions(nla):
+ actstr = actstr[strspn(actstr, ", ") :]
+
+ ctact["attrs"].append(["OVS_CT_ATTR_NAT", natact])
+- actstr = actstr[strspn(actstr, ",) ") :]
++ actstr = actstr[strspn(actstr, ", ") :]
+
+ self["attrs"].append(["OVS_ACTION_ATTR_CT", ctact])
+ parsed = True
+
+- actstr = actstr[strspn(actstr, "), ") :]
++ actstr = actstr[strspn(actstr, ", ") :]
++ while parencount > 0:
++ parencount -= 1
++ actstr = actstr[strspn(actstr, " "):]
++ if len(actstr) and actstr[0] != ")":
++ raise ValueError("Action str: '%s' unbalanced" % actstr)
++ actstr = actstr[1:]
++
++ if len(actstr) and actstr[0] == ")":
++ return (totallen - len(actstr))
++
++ actstr = actstr[strspn(actstr, ", ") :]
++
+ if not parsed:
+ raise ValueError("Action str: '%s' not supported" % actstr)
+
++ return (totallen - len(actstr))
++
+
+ class ovskey(nla):
+ nla_flags = NLA_F_NESTED
+@@ -2111,6 +2150,8 @@ def main(argv):
+ ovsflow = OvsFlow()
+ ndb = NDB()
+
++ sys.setrecursionlimit(100000)
++
+ if hasattr(args, "showdp"):
+ found = False
+ for iface in ndb.interfaces:
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index f838dd370f6af3..d65fdd407d73f9 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # Check that route PMTU values match expectations, and that initial device MTU
+@@ -198,8 +198,8 @@
+ # - pmtu_ipv6_route_change
+ # Same as above but with IPv6
+
+-# Kselftest framework requirement - SKIP code is 4.
+-ksft_skip=4
++source lib.sh
++source net_helper.sh
+
+ PAUSE_ON_FAIL=no
+ VERBOSE=0
+@@ -268,16 +268,6 @@ tests="
+ pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1
+ pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1"
+
+-NS_A="ns-A"
+-NS_B="ns-B"
+-NS_C="ns-C"
+-NS_R1="ns-R1"
+-NS_R2="ns-R2"
+-ns_a="ip netns exec ${NS_A}"
+-ns_b="ip netns exec ${NS_B}"
+-ns_c="ip netns exec ${NS_C}"
+-ns_r1="ip netns exec ${NS_R1}"
+-ns_r2="ip netns exec ${NS_R2}"
+ # Addressing and routing for tests with routers: four network segments, with
+ # index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an
+ # identifier ID, which is 1 for hosts (A and B), 2 for routers (R1 and R2).
+@@ -543,13 +533,17 @@ setup_ip6ip6() {
+ }
+
+ setup_namespaces() {
++ setup_ns NS_A NS_B NS_C NS_R1 NS_R2
+ for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do
+- ip netns add ${n} || return 1
+-
+ # Disable DAD, so that we don't have to wait to use the
+ # configured IPv6 addresses
+ ip netns exec ${n} sysctl -q net/ipv6/conf/default/accept_dad=0
+ done
++ ns_a="ip netns exec ${NS_A}"
++ ns_b="ip netns exec ${NS_B}"
++ ns_c="ip netns exec ${NS_C}"
++ ns_r1="ip netns exec ${NS_R1}"
++ ns_r2="ip netns exec ${NS_R2}"
+ }
+
+ setup_veth() {
+@@ -714,23 +708,23 @@ setup_xfrm6() {
+ }
+
+ setup_xfrm4udp() {
+- setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+- setup_nettest_xfrm 4 4500
++ setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0" && \
++ setup_nettest_xfrm 4 4500
+ }
+
+ setup_xfrm6udp() {
+- setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+- setup_nettest_xfrm 6 4500
++ setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0" && \
++ setup_nettest_xfrm 6 4500
+ }
+
+ setup_xfrm4udprouted() {
+- setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0"
+- setup_nettest_xfrm 4 4500
++ setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0" && \
++ setup_nettest_xfrm 4 4500
+ }
+
+ setup_xfrm6udprouted() {
+- setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0"
+- setup_nettest_xfrm 6 4500
++ setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0" && \
++ setup_nettest_xfrm 6 4500
+ }
+
+ setup_routing_old() {
+@@ -839,7 +833,7 @@ setup_bridge() {
+ run_cmd ${ns_a} ip link set br0 up
+
+ run_cmd ${ns_c} ip link add veth_C-A type veth peer name veth_A-C
+- run_cmd ${ns_c} ip link set veth_A-C netns ns-A
++ run_cmd ${ns_c} ip link set veth_A-C netns ${NS_A}
+
+ run_cmd ${ns_a} ip link set veth_A-C up
+ run_cmd ${ns_c} ip link set veth_C-A up
+@@ -944,9 +938,7 @@ cleanup() {
+ done
+ socat_pids=
+
+- for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do
+- ip netns del ${n} 2> /dev/null
+- done
++ cleanup_all_ns
+
+ ip link del veth_A-C 2>/dev/null
+ ip link del veth_A-R1 2>/dev/null
+@@ -1345,13 +1337,15 @@ test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception() {
+ TCPDST="TCP:[${dst}]:50000"
+ fi
+ ${ns_b} socat -T 3 -u -6 TCP-LISTEN:50000 STDOUT > $tmpoutfile &
++ local socat_pid=$!
+
+- sleep 1
++ wait_local_port_listen ${NS_B} 50000 tcp
+
+- dd if=/dev/zero of=/dev/stdout status=none bs=1M count=1 | ${target} socat -T 3 -u STDIN $TCPDST,connect-timeout=3
++ dd if=/dev/zero status=none bs=1M count=1 | ${target} socat -T 3 -u STDIN $TCPDST,connect-timeout=3
+
+ size=$(du -sb $tmpoutfile)
+ size=${size%%/tmp/*}
++ wait ${socat_pid}
+
+ [ $size -ne 1048576 ] && err "File size $size mismatches exepcted value in locally bridged vxlan test" && return 1
+ done
+@@ -1963,6 +1957,13 @@ check_command() {
+ return 0
+ }
+
++check_running() {
++ pid=${1}
++ cmd=${2}
++
++ [ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "{cmd}" ]
++}
++
+ test_cleanup_vxlanX_exception() {
+ outer="${1}"
+ encap="vxlan"
+@@ -1993,11 +1994,12 @@ test_cleanup_vxlanX_exception() {
+
+ ${ns_a} ip link del dev veth_A-R1 &
+ iplink_pid=$!
+- sleep 1
+- if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
+- err " can't delete veth device in a timely manner, PMTU dst likely leaked"
+- return 1
+- fi
++ for i in $(seq 1 20); do
++ check_running ${iplink_pid} "iplinkdeldevveth_A-R1" || return 0
++ sleep 0.1
++ done
++ err " can't delete veth device in a timely manner, PMTU dst likely leaked"
++ return 1
+ }
+
+ test_cleanup_ipv6_exception() {
+@@ -2048,7 +2050,7 @@ run_test() {
+ case $ret in
+ 0)
+ all_skipped=false
+- [ $exitcode=$ksft_skip ] && exitcode=0
++ [ $exitcode -eq $ksft_skip ] && exitcode=0
+ ;;
+ $ksft_skip)
+ [ $all_skipped = true ] && exitcode=$ksft_skip
+diff --git a/tools/testing/selftests/net/reuseaddr_conflict.c b/tools/testing/selftests/net/reuseaddr_conflict.c
+index 7c5b12664b03b0..bfb07dc495186d 100644
+--- a/tools/testing/selftests/net/reuseaddr_conflict.c
++++ b/tools/testing/selftests/net/reuseaddr_conflict.c
+@@ -109,6 +109,6 @@ int main(void)
+ fd1 = open_port(0, 1);
+ if (fd1 >= 0)
+ error(1, 0, "Was allowed to create an ipv4 reuseport on an already bound non-reuseport socket with no ipv6");
+- fprintf(stderr, "Success");
++ fprintf(stderr, "Success\n");
+ return 0;
+ }
+diff --git a/tools/testing/selftests/net/rps_default_mask.sh b/tools/testing/selftests/net/rps_default_mask.sh
+index a26c5624429fb1..4287a852989079 100755
+--- a/tools/testing/selftests/net/rps_default_mask.sh
++++ b/tools/testing/selftests/net/rps_default_mask.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+ readonly ksft_skip=4
+@@ -33,6 +33,10 @@ chk_rps() {
+
+ rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus)
+ printf "%-60s" "$msg"
++
++ # In case there is more than 32 CPUs we need to remove commas from masks
++ rps_mask=${rps_mask//,}
++ expected_rps_mask=${expected_rps_mask//,}
+ if [ $rps_mask -eq $expected_rps_mask ]; then
+ echo "[ ok ]"
+ else
+diff --git a/tools/testing/selftests/net/setup_loopback.sh b/tools/testing/selftests/net/setup_loopback.sh
+old mode 100755
+new mode 100644
+diff --git a/tools/testing/selftests/net/setup_veth.sh b/tools/testing/selftests/net/setup_veth.sh
+index 1003ddf7b3b26e..227fd1076f2132 100644
+--- a/tools/testing/selftests/net/setup_veth.sh
++++ b/tools/testing/selftests/net/setup_veth.sh
+@@ -8,7 +8,7 @@ setup_veth_ns() {
+ local -r ns_mac="$4"
+
+ [[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}"
+- echo 100000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
++ echo 1000000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
+ ip link set dev "${ns_dev}" netns "${ns_name}" mtu 65535
+ ip -netns "${ns_name}" link set dev "${ns_dev}" up
+
+diff --git a/tools/testing/selftests/net/so_incoming_cpu.c b/tools/testing/selftests/net/so_incoming_cpu.c
+index a148181641026e..e9fa14e1073226 100644
+--- a/tools/testing/selftests/net/so_incoming_cpu.c
++++ b/tools/testing/selftests/net/so_incoming_cpu.c
+@@ -3,19 +3,16 @@
+ #define _GNU_SOURCE
+ #include <sched.h>
+
++#include <fcntl.h>
++
+ #include <netinet/in.h>
+ #include <sys/socket.h>
+ #include <sys/sysinfo.h>
+
+ #include "../kselftest_harness.h"
+
+-#define CLIENT_PER_SERVER 32 /* More sockets, more reliable */
+-#define NR_SERVER self->nproc
+-#define NR_CLIENT (CLIENT_PER_SERVER * NR_SERVER)
+-
+ FIXTURE(so_incoming_cpu)
+ {
+- int nproc;
+ int *servers;
+ union {
+ struct sockaddr addr;
+@@ -56,12 +53,47 @@ FIXTURE_VARIANT_ADD(so_incoming_cpu, after_all_listen)
+ .when_to_set = AFTER_ALL_LISTEN,
+ };
+
++static void write_sysctl(struct __test_metadata *_metadata,
++ char *filename, char *string)
++{
++ int fd, len, ret;
++
++ fd = open(filename, O_WRONLY);
++ ASSERT_NE(fd, -1);
++
++ len = strlen(string);
++ ret = write(fd, string, len);
++ ASSERT_EQ(ret, len);
++}
++
++static void setup_netns(struct __test_metadata *_metadata)
++{
++ ASSERT_EQ(unshare(CLONE_NEWNET), 0);
++ ASSERT_EQ(system("ip link set lo up"), 0);
++
++ write_sysctl(_metadata, "/proc/sys/net/ipv4/ip_local_port_range", "10000 60001");
++ write_sysctl(_metadata, "/proc/sys/net/ipv4/tcp_tw_reuse", "0");
++}
++
++#define NR_PORT (60001 - 10000 - 1)
++#define NR_CLIENT_PER_SERVER_DEFAULT 32
++static int nr_client_per_server, nr_server, nr_client;
++
+ FIXTURE_SETUP(so_incoming_cpu)
+ {
+- self->nproc = get_nprocs();
+- ASSERT_LE(2, self->nproc);
++ setup_netns(_metadata);
++
++ nr_server = get_nprocs();
++ ASSERT_LE(2, nr_server);
++
++ if (NR_CLIENT_PER_SERVER_DEFAULT * nr_server < NR_PORT)
++ nr_client_per_server = NR_CLIENT_PER_SERVER_DEFAULT;
++ else
++ nr_client_per_server = NR_PORT / nr_server;
++
++ nr_client = nr_client_per_server * nr_server;
+
+- self->servers = malloc(sizeof(int) * NR_SERVER);
++ self->servers = malloc(sizeof(int) * nr_server);
+ ASSERT_NE(self->servers, NULL);
+
+ self->in_addr.sin_family = AF_INET;
+@@ -74,7 +106,7 @@ FIXTURE_TEARDOWN(so_incoming_cpu)
+ {
+ int i;
+
+- for (i = 0; i < NR_SERVER; i++)
++ for (i = 0; i < nr_server; i++)
+ close(self->servers[i]);
+
+ free(self->servers);
+@@ -110,10 +142,10 @@ int create_server(struct __test_metadata *_metadata,
+ if (variant->when_to_set == BEFORE_LISTEN)
+ set_so_incoming_cpu(_metadata, fd, cpu);
+
+- /* We don't use CLIENT_PER_SERVER here not to block
++ /* We don't use nr_client_per_server here not to block
+ * this test at connect() if SO_INCOMING_CPU is broken.
+ */
+- ret = listen(fd, NR_CLIENT);
++ ret = listen(fd, nr_client);
+ ASSERT_EQ(ret, 0);
+
+ if (variant->when_to_set == AFTER_LISTEN)
+@@ -128,7 +160,7 @@ void create_servers(struct __test_metadata *_metadata,
+ {
+ int i, ret;
+
+- for (i = 0; i < NR_SERVER; i++) {
++ for (i = 0; i < nr_server; i++) {
+ self->servers[i] = create_server(_metadata, self, variant, i);
+
+ if (i == 0) {
+@@ -138,7 +170,7 @@ void create_servers(struct __test_metadata *_metadata,
+ }
+
+ if (variant->when_to_set == AFTER_ALL_LISTEN) {
+- for (i = 0; i < NR_SERVER; i++)
++ for (i = 0; i < nr_server; i++)
+ set_so_incoming_cpu(_metadata, self->servers[i], i);
+ }
+ }
+@@ -149,7 +181,7 @@ void create_clients(struct __test_metadata *_metadata,
+ cpu_set_t cpu_set;
+ int i, j, fd, ret;
+
+- for (i = 0; i < NR_SERVER; i++) {
++ for (i = 0; i < nr_server; i++) {
+ CPU_ZERO(&cpu_set);
+
+ CPU_SET(i, &cpu_set);
+@@ -162,7 +194,7 @@ void create_clients(struct __test_metadata *_metadata,
+ ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+ ASSERT_EQ(ret, 0);
+
+- for (j = 0; j < CLIENT_PER_SERVER; j++) {
++ for (j = 0; j < nr_client_per_server; j++) {
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_NE(fd, -1);
+
+@@ -180,8 +212,8 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
+ int i, j, fd, cpu, ret, total = 0;
+ socklen_t len = sizeof(int);
+
+- for (i = 0; i < NR_SERVER; i++) {
+- for (j = 0; j < CLIENT_PER_SERVER; j++) {
++ for (i = 0; i < nr_server; i++) {
++ for (j = 0; j < nr_client_per_server; j++) {
+ /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */
+ fd = accept(self->servers[i], &self->addr, &self->addrlen);
+ ASSERT_NE(fd, -1);
+@@ -195,7 +227,7 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
+ }
+ }
+
+- ASSERT_EQ(total, NR_CLIENT);
++ ASSERT_EQ(total, nr_client);
+ TH_LOG("SO_INCOMING_CPU is very likely to be "
+ "working correctly with %d sockets.", total);
+ }
+diff --git a/tools/testing/selftests/net/test_bridge_backup_port.sh b/tools/testing/selftests/net/test_bridge_backup_port.sh
+index 112cfd8a10ad94..1b3f89e2b86e6a 100755
+--- a/tools/testing/selftests/net/test_bridge_backup_port.sh
++++ b/tools/testing/selftests/net/test_bridge_backup_port.sh
+@@ -35,9 +35,8 @@
+ # | sw1 | | sw2 |
+ # +------------------------------------+ +------------------------------------+
+
++source lib.sh
+ ret=0
+-# Kselftest framework requirement - SKIP code is 4.
+-ksft_skip=4
+
+ # All tests in this script. Can be overridden with -t option.
+ TESTS="
+@@ -125,6 +124,16 @@ tc_check_packets()
+ [[ $pkts == $count ]]
+ }
+
++bridge_link_check()
++{
++ local ns=$1; shift
++ local dev=$1; shift
++ local state=$1; shift
++
++ bridge -n $ns -d -j link show dev $dev | \
++ jq -e ".[][\"state\"] == \"$state\"" &> /dev/null
++}
++
+ ################################################################################
+ # Setup
+
+@@ -132,9 +141,6 @@ setup_topo_ns()
+ {
+ local ns=$1; shift
+
+- ip netns add $ns
+- ip -n $ns link set dev lo up
+-
+ ip netns exec $ns sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
+ ip netns exec $ns sysctl -qw net.ipv6.conf.default.ignore_routes_with_linkdown=1
+ ip netns exec $ns sysctl -qw net.ipv6.conf.all.accept_dad=0
+@@ -145,13 +151,14 @@ setup_topo()
+ {
+ local ns
+
+- for ns in sw1 sw2; do
++ setup_ns sw1 sw2
++ for ns in $sw1 $sw2; do
+ setup_topo_ns $ns
+ done
+
+ ip link add name veth0 type veth peer name veth1
+- ip link set dev veth0 netns sw1 name veth0
+- ip link set dev veth1 netns sw2 name veth0
++ ip link set dev veth0 netns $sw1 name veth0
++ ip link set dev veth1 netns $sw2 name veth0
+ }
+
+ setup_sw_common()
+@@ -190,7 +197,7 @@ setup_sw_common()
+
+ setup_sw1()
+ {
+- local ns=sw1
++ local ns=$sw1
+ local local_addr=192.0.2.33
+ local remote_addr=192.0.2.34
+ local veth_addr=192.0.2.49
+@@ -203,7 +210,7 @@ setup_sw1()
+
+ setup_sw2()
+ {
+- local ns=sw2
++ local ns=$sw2
+ local local_addr=192.0.2.34
+ local remote_addr=192.0.2.33
+ local veth_addr=192.0.2.50
+@@ -229,11 +236,7 @@ setup()
+
+ cleanup()
+ {
+- local ns
+-
+- for ns in h1 h2 sw1 sw2; do
+- ip netns del $ns &> /dev/null
+- done
++ cleanup_ns $sw1 $sw2
+ }
+
+ ################################################################################
+@@ -248,85 +251,90 @@ backup_port()
+ echo "Backup port"
+ echo "-----------"
+
+- run_cmd "tc -n sw1 qdisc replace dev swp1 clsact"
+- run_cmd "tc -n sw1 filter replace dev swp1 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev swp1 clsact"
++ run_cmd "tc -n $sw1 filter replace dev swp1 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
+
+- run_cmd "tc -n sw1 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
+
+- run_cmd "bridge -n sw1 fdb replace $dmac dev swp1 master static vlan 10"
++ run_cmd "bridge -n $sw1 fdb replace $dmac dev swp1 master static vlan 10"
+
+ # Initial state - check that packets are forwarded out of swp1 when it
+ # has a carrier and not forwarded out of any port when it does not have
+ # a carrier.
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 1
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 1
+ log_test $? 0 "Forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 0
++ tc_check_packets $sw1 "dev vx0 egress" 101 0
+ log_test $? 0 "No forwarding out of vx0"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
+ log_test $? 0 "swp1 carrier off"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 1
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 1
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 0
++ tc_check_packets $sw1 "dev vx0 egress" 101 0
+ log_test $? 0 "No forwarding out of vx0"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier on"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier on"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
+ log_test $? 0 "swp1 carrier on"
+
+ # Configure vx0 as the backup port of swp1 and check that packets are
+ # forwarded out of swp1 when it has a carrier and out of vx0 when swp1
+ # does not have a carrier.
+- run_cmd "bridge -n sw1 link set dev swp1 backup_port vx0"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_port vx0\""
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_port vx0"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_port vx0\""
+ log_test $? 0 "vx0 configured as backup port of swp1"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 2
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 2
+ log_test $? 0 "Forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 0
++ tc_check_packets $sw1 "dev vx0 egress" 101 0
+ log_test $? 0 "No forwarding out of vx0"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
+ log_test $? 0 "swp1 carrier off"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 2
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 2
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "Forwarding out of vx0"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier on"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier on"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
+ log_test $? 0 "swp1 carrier on"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 3
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 3
+ log_test $? 0 "Forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "No forwarding out of vx0"
+
+ # Remove vx0 as the backup port of swp1 and check that packets are no
+ # longer forwarded out of vx0 when swp1 does not have a carrier.
+- run_cmd "bridge -n sw1 link set dev swp1 nobackup_port"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_port vx0\""
++ run_cmd "bridge -n $sw1 link set dev swp1 nobackup_port"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_port vx0\""
+ log_test $? 1 "vx0 not configured as backup port of swp1"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 4
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 4
+ log_test $? 0 "Forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "No forwarding out of vx0"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
+ log_test $? 0 "swp1 carrier off"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 4
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 4
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "No forwarding out of vx0"
+ }
+
+@@ -339,125 +347,130 @@ backup_nhid()
+ echo "Backup nexthop ID"
+ echo "-----------------"
+
+- run_cmd "tc -n sw1 qdisc replace dev swp1 clsact"
+- run_cmd "tc -n sw1 filter replace dev swp1 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev swp1 clsact"
++ run_cmd "tc -n $sw1 filter replace dev swp1 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
+
+- run_cmd "tc -n sw1 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
+
+- run_cmd "ip -n sw1 nexthop replace id 1 via 192.0.2.34 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 2 via 192.0.2.34 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 10 group 1/2 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 1 via 192.0.2.34 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 2 via 192.0.2.34 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 10 group 1/2 fdb"
+
+- run_cmd "bridge -n sw1 fdb replace $dmac dev swp1 master static vlan 10"
+- run_cmd "bridge -n sw1 fdb replace $dmac dev vx0 self static dst 192.0.2.36 src_vni 10010"
++ run_cmd "bridge -n $sw1 fdb replace $dmac dev swp1 master static vlan 10"
++ run_cmd "bridge -n $sw1 fdb replace $dmac dev vx0 self static dst 192.0.2.36 src_vni 10010"
+
+- run_cmd "ip -n sw2 address replace 192.0.2.36/32 dev lo"
++ run_cmd "ip -n $sw2 address replace 192.0.2.36/32 dev lo"
+
+ # The first filter matches on packets forwarded using the backup
+ # nexthop ID and the second filter matches on packets forwarded using a
+ # regular VXLAN FDB entry.
+- run_cmd "tc -n sw2 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw2 filter replace dev vx0 ingress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac enc_key_id 10010 enc_dst_ip 192.0.2.34 action pass"
+- run_cmd "tc -n sw2 filter replace dev vx0 ingress pref 1 handle 102 proto ip flower src_mac $smac dst_mac $dmac enc_key_id 10010 enc_dst_ip 192.0.2.36 action pass"
++ run_cmd "tc -n $sw2 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw2 filter replace dev vx0 ingress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac enc_key_id 10010 enc_dst_ip 192.0.2.34 action pass"
++ run_cmd "tc -n $sw2 filter replace dev vx0 ingress pref 1 handle 102 proto ip flower src_mac $smac dst_mac $dmac enc_key_id 10010 enc_dst_ip 192.0.2.36 action pass"
+
+ # Configure vx0 as the backup port of swp1 and check that packets are
+ # forwarded out of swp1 when it has a carrier and out of vx0 when swp1
+ # does not have a carrier. When packets are forwarded out of vx0, check
+ # that they are forwarded by the VXLAN FDB entry.
+- run_cmd "bridge -n sw1 link set dev swp1 backup_port vx0"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_port vx0\""
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_port vx0"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_port vx0\""
+ log_test $? 0 "vx0 configured as backup port of swp1"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 1
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 1
+ log_test $? 0 "Forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 0
++ tc_check_packets $sw1 "dev vx0 egress" 101 0
+ log_test $? 0 "No forwarding out of vx0"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
+ log_test $? 0 "swp1 carrier off"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 1
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 1
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "Forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 0
++ tc_check_packets $sw2 "dev vx0 ingress" 101 0
+ log_test $? 0 "No forwarding using backup nexthop ID"
+- tc_check_packets sw2 "dev vx0 ingress" 102 1
++ tc_check_packets $sw2 "dev vx0 ingress" 102 1
+ log_test $? 0 "Forwarding using VXLAN FDB entry"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier on"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier on"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
+ log_test $? 0 "swp1 carrier on"
+
+ # Configure nexthop ID 10 as the backup nexthop ID of swp1 and check
+ # that when packets are forwarded out of vx0, they are forwarded using
+ # the backup nexthop ID.
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 10"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_nhid 10\""
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 10"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_nhid 10\""
+ log_test $? 0 "nexthop ID 10 configured as backup nexthop ID of swp1"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 2
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 2
+ log_test $? 0 "Forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "No forwarding out of vx0"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
+ log_test $? 0 "swp1 carrier off"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 2
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 2
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "Forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "Forwarding using backup nexthop ID"
+- tc_check_packets sw2 "dev vx0 ingress" 102 1
++ tc_check_packets $sw2 "dev vx0 ingress" 102 1
+ log_test $? 0 "No forwarding using VXLAN FDB entry"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier on"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier on"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
+ log_test $? 0 "swp1 carrier on"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 3
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 3
+ log_test $? 0 "Forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "No forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "No forwarding using backup nexthop ID"
+- tc_check_packets sw2 "dev vx0 ingress" 102 1
++ tc_check_packets $sw2 "dev vx0 ingress" 102 1
+ log_test $? 0 "No forwarding using VXLAN FDB entry"
+
+ # Reset the backup nexthop ID to 0 and check that packets are no longer
+ # forwarded using the backup nexthop ID when swp1 does not have a
+ # carrier and are instead forwarded by the VXLAN FDB.
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 0"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_nhid\""
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 0"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_nhid\""
+ log_test $? 1 "No backup nexthop ID configured for swp1"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 4
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 4
+ log_test $? 0 "Forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "No forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "No forwarding using backup nexthop ID"
+- tc_check_packets sw2 "dev vx0 ingress" 102 1
++ tc_check_packets $sw2 "dev vx0 ingress" 102 1
+ log_test $? 0 "No forwarding using VXLAN FDB entry"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
+ log_test $? 0 "swp1 carrier off"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 4
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 4
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 3
++ tc_check_packets $sw1 "dev vx0 egress" 101 3
+ log_test $? 0 "Forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "No forwarding using backup nexthop ID"
+- tc_check_packets sw2 "dev vx0 ingress" 102 2
++ tc_check_packets $sw2 "dev vx0 ingress" 102 2
+ log_test $? 0 "Forwarding using VXLAN FDB entry"
+ }
+
+@@ -475,109 +488,110 @@ backup_nhid_invalid()
+ # is forwarded out of the VXLAN port, but dropped by the VXLAN driver
+ # and does not crash the host.
+
+- run_cmd "tc -n sw1 qdisc replace dev swp1 clsact"
+- run_cmd "tc -n sw1 filter replace dev swp1 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev swp1 clsact"
++ run_cmd "tc -n $sw1 filter replace dev swp1 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
+
+- run_cmd "tc -n sw1 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac action pass"
+ # Drop all other Tx traffic to avoid changes to Tx drop counter.
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 2 handle 102 proto all matchall action drop"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 2 handle 102 proto all matchall action drop"
+
+- tx_drop=$(ip -n sw1 -s -j link show dev vx0 | jq '.[]["stats64"]["tx"]["dropped"]')
++ tx_drop=$(ip -n $sw1 -s -j link show dev vx0 | jq '.[]["stats64"]["tx"]["dropped"]')
+
+- run_cmd "ip -n sw1 nexthop replace id 1 via 192.0.2.34 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 2 via 192.0.2.34 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 10 group 1/2 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 1 via 192.0.2.34 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 2 via 192.0.2.34 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 10 group 1/2 fdb"
+
+- run_cmd "bridge -n sw1 fdb replace $dmac dev swp1 master static vlan 10"
++ run_cmd "bridge -n $sw1 fdb replace $dmac dev swp1 master static vlan 10"
+
+- run_cmd "tc -n sw2 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw2 filter replace dev vx0 ingress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac enc_key_id 10010 enc_dst_ip 192.0.2.34 action pass"
++ run_cmd "tc -n $sw2 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw2 filter replace dev vx0 ingress pref 1 handle 101 proto ip flower src_mac $smac dst_mac $dmac enc_key_id 10010 enc_dst_ip 192.0.2.34 action pass"
+
+ # First, check that redirection works.
+- run_cmd "bridge -n sw1 link set dev swp1 backup_port vx0"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_port vx0\""
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_port vx0"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_port vx0\""
+ log_test $? 0 "vx0 configured as backup port of swp1"
+
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 10"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_nhid 10\""
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 10"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_nhid 10\""
+ log_test $? 0 "Valid nexthop as backup nexthop"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
+ log_test $? 0 "swp1 carrier off"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 0
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 0
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "Forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "Forwarding using backup nexthop ID"
+- run_cmd "ip -n sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $tx_drop'"
++ run_cmd "ip -n $sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $tx_drop'"
+ log_test $? 0 "No Tx drop increase"
+
+ # Use a non-existent nexthop ID.
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 20"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_nhid 20\""
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 20"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_nhid 20\""
+ log_test $? 0 "Non-existent nexthop as backup nexthop"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 0
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 0
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "Forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "No forwarding using backup nexthop ID"
+- run_cmd "ip -n sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $((tx_drop + 1))'"
++ run_cmd "ip -n $sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $((tx_drop + 1))'"
+ log_test $? 0 "Tx drop increased"
+
+ # Use a blckhole nexthop.
+- run_cmd "ip -n sw1 nexthop replace id 30 blackhole"
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 30"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_nhid 30\""
++ run_cmd "ip -n $sw1 nexthop replace id 30 blackhole"
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 30"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_nhid 30\""
+ log_test $? 0 "Blackhole nexthop as backup nexthop"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 0
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 0
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 3
++ tc_check_packets $sw1 "dev vx0 egress" 101 3
+ log_test $? 0 "Forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "No forwarding using backup nexthop ID"
+- run_cmd "ip -n sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $((tx_drop + 2))'"
++ run_cmd "ip -n $sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $((tx_drop + 2))'"
+ log_test $? 0 "Tx drop increased"
+
+ # Non-group FDB nexthop.
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 1"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_nhid 1\""
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 1"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_nhid 1\""
+ log_test $? 0 "Non-group FDB nexthop as backup nexthop"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 0
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 0
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 4
++ tc_check_packets $sw1 "dev vx0 egress" 101 4
+ log_test $? 0 "Forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "No forwarding using backup nexthop ID"
+- run_cmd "ip -n sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $((tx_drop + 3))'"
++ run_cmd "ip -n $sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $((tx_drop + 3))'"
+ log_test $? 0 "Tx drop increased"
+
+ # IPv6 address family nexthop.
+- run_cmd "ip -n sw1 nexthop replace id 100 via 2001:db8:100::1 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 200 via 2001:db8:100::1 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 300 group 100/200 fdb"
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 300"
+- run_cmd "bridge -n sw1 -d link show dev swp1 | grep \"backup_nhid 300\""
++ run_cmd "ip -n $sw1 nexthop replace id 100 via 2001:db8:100::1 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 200 via 2001:db8:100::1 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 300 group 100/200 fdb"
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 300"
++ run_cmd "bridge -n $sw1 -d link show dev swp1 | grep \"backup_nhid 300\""
+ log_test $? 0 "IPv6 address family nexthop as backup nexthop"
+
+- run_cmd "ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
+- tc_check_packets sw1 "dev swp1 egress" 101 0
++ run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
++ tc_check_packets $sw1 "dev swp1 egress" 101 0
+ log_test $? 0 "No forwarding out of swp1"
+- tc_check_packets sw1 "dev vx0 egress" 101 5
++ tc_check_packets $sw1 "dev vx0 egress" 101 5
+ log_test $? 0 "Forwarding out of vx0"
+- tc_check_packets sw2 "dev vx0 ingress" 101 1
++ tc_check_packets $sw2 "dev vx0 ingress" 101 1
+ log_test $? 0 "No forwarding using backup nexthop ID"
+- run_cmd "ip -n sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $((tx_drop + 4))'"
++ run_cmd "ip -n $sw1 -s -j link show dev vx0 | jq -e '.[][\"stats64\"][\"tx\"][\"dropped\"] == $((tx_drop + 4))'"
+ log_test $? 0 "Tx drop increased"
+ }
+
+@@ -591,44 +605,46 @@ backup_nhid_ping()
+ echo "------------------------"
+
+ # Test bidirectional traffic when traffic is redirected in both VTEPs.
+- sw1_mac=$(ip -n sw1 -j -p link show br0.10 | jq -r '.[]["address"]')
+- sw2_mac=$(ip -n sw2 -j -p link show br0.10 | jq -r '.[]["address"]')
++ sw1_mac=$(ip -n $sw1 -j -p link show br0.10 | jq -r '.[]["address"]')
++ sw2_mac=$(ip -n $sw2 -j -p link show br0.10 | jq -r '.[]["address"]')
+
+- run_cmd "bridge -n sw1 fdb replace $sw2_mac dev swp1 master static vlan 10"
+- run_cmd "bridge -n sw2 fdb replace $sw1_mac dev swp1 master static vlan 10"
++ run_cmd "bridge -n $sw1 fdb replace $sw2_mac dev swp1 master static vlan 10"
++ run_cmd "bridge -n $sw2 fdb replace $sw1_mac dev swp1 master static vlan 10"
+
+- run_cmd "ip -n sw1 neigh replace 192.0.2.66 lladdr $sw2_mac nud perm dev br0.10"
+- run_cmd "ip -n sw2 neigh replace 192.0.2.65 lladdr $sw1_mac nud perm dev br0.10"
++ run_cmd "ip -n $sw1 neigh replace 192.0.2.66 lladdr $sw2_mac nud perm dev br0.10"
++ run_cmd "ip -n $sw2 neigh replace 192.0.2.65 lladdr $sw1_mac nud perm dev br0.10"
+
+- run_cmd "ip -n sw1 nexthop replace id 1 via 192.0.2.34 fdb"
+- run_cmd "ip -n sw2 nexthop replace id 1 via 192.0.2.33 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 10 group 1 fdb"
+- run_cmd "ip -n sw2 nexthop replace id 10 group 1 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 1 via 192.0.2.34 fdb"
++ run_cmd "ip -n $sw2 nexthop replace id 1 via 192.0.2.33 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 10 group 1 fdb"
++ run_cmd "ip -n $sw2 nexthop replace id 10 group 1 fdb"
+
+- run_cmd "bridge -n sw1 link set dev swp1 backup_port vx0"
+- run_cmd "bridge -n sw2 link set dev swp1 backup_port vx0"
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 10"
+- run_cmd "bridge -n sw2 link set dev swp1 backup_nhid 10"
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_port vx0"
++ run_cmd "bridge -n $sw2 link set dev swp1 backup_port vx0"
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 10"
++ run_cmd "bridge -n $sw2 link set dev swp1 backup_nhid 10"
+
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
+- run_cmd "ip -n sw2 link set dev swp1 carrier off"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
++ run_cmd "ip -n $sw2 link set dev swp1 carrier off"
++ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw2 swp1 disabled
+
+- run_cmd "ip netns exec sw1 ping -i 0.1 -c 10 -w $PING_TIMEOUT 192.0.2.66"
++ run_cmd "ip netns exec $sw1 ping -i 0.1 -c 10 -w $PING_TIMEOUT 192.0.2.66"
+ log_test $? 0 "Ping with backup nexthop ID"
+
+ # Reset the backup nexthop ID to 0 and check that ping fails.
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 0"
+- run_cmd "bridge -n sw2 link set dev swp1 backup_nhid 0"
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 0"
++ run_cmd "bridge -n $sw2 link set dev swp1 backup_nhid 0"
+
+- run_cmd "ip netns exec sw1 ping -i 0.1 -c 10 -w $PING_TIMEOUT 192.0.2.66"
++ run_cmd "ip netns exec $sw1 ping -i 0.1 -c 10 -w $PING_TIMEOUT 192.0.2.66"
+ log_test $? 1 "Ping after disabling backup nexthop ID"
+ }
+
+ backup_nhid_add_del_loop()
+ {
+ while true; do
+- ip -n sw1 nexthop del id 10
+- ip -n sw1 nexthop replace id 10 group 1/2 fdb
++ ip -n $sw1 nexthop del id 10
++ ip -n $sw1 nexthop replace id 10 group 1/2 fdb
+ done >/dev/null 2>&1
+ }
+
+@@ -648,19 +664,19 @@ backup_nhid_torture()
+ # deleting the group. The test is considered successful if nothing
+ # crashed.
+
+- run_cmd "ip -n sw1 nexthop replace id 1 via 192.0.2.34 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 2 via 192.0.2.34 fdb"
+- run_cmd "ip -n sw1 nexthop replace id 10 group 1/2 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 1 via 192.0.2.34 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 2 via 192.0.2.34 fdb"
++ run_cmd "ip -n $sw1 nexthop replace id 10 group 1/2 fdb"
+
+- run_cmd "bridge -n sw1 fdb replace $dmac dev swp1 master static vlan 10"
++ run_cmd "bridge -n $sw1 fdb replace $dmac dev swp1 master static vlan 10"
+
+- run_cmd "bridge -n sw1 link set dev swp1 backup_port vx0"
+- run_cmd "bridge -n sw1 link set dev swp1 backup_nhid 10"
+- run_cmd "ip -n sw1 link set dev swp1 carrier off"
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_port vx0"
++ run_cmd "bridge -n $sw1 link set dev swp1 backup_nhid 10"
++ run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+
+ backup_nhid_add_del_loop &
+ pid1=$!
+- ip netns exec sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 0 &
++ ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 0 &
+ pid2=$!
+
+ sleep 30
+diff --git a/tools/testing/selftests/net/test_bridge_neigh_suppress.sh b/tools/testing/selftests/net/test_bridge_neigh_suppress.sh
+index d80f2cd87614ca..02b986c9c247d6 100755
+--- a/tools/testing/selftests/net/test_bridge_neigh_suppress.sh
++++ b/tools/testing/selftests/net/test_bridge_neigh_suppress.sh
+@@ -45,9 +45,8 @@
+ # | sw1 | | sw2 |
+ # +------------------------------------+ +------------------------------------+
+
++source lib.sh
+ ret=0
+-# Kselftest framework requirement - SKIP code is 4.
+-ksft_skip=4
+
+ # All tests in this script. Can be overridden with -t option.
+ TESTS="
+@@ -140,9 +139,6 @@ setup_topo_ns()
+ {
+ local ns=$1; shift
+
+- ip netns add $ns
+- ip -n $ns link set dev lo up
+-
+ ip netns exec $ns sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
+ ip netns exec $ns sysctl -qw net.ipv6.conf.default.ignore_routes_with_linkdown=1
+ ip netns exec $ns sysctl -qw net.ipv6.conf.all.accept_dad=0
+@@ -153,21 +149,14 @@ setup_topo()
+ {
+ local ns
+
+- for ns in h1 h2 sw1 sw2; do
++ setup_ns h1 h2 sw1 sw2
++ for ns in $h1 $h2 $sw1 $sw2; do
+ setup_topo_ns $ns
+ done
+
+- ip link add name veth0 type veth peer name veth1
+- ip link set dev veth0 netns h1 name eth0
+- ip link set dev veth1 netns sw1 name swp1
+-
+- ip link add name veth0 type veth peer name veth1
+- ip link set dev veth0 netns sw1 name veth0
+- ip link set dev veth1 netns sw2 name veth0
+-
+- ip link add name veth0 type veth peer name veth1
+- ip link set dev veth0 netns h2 name eth0
+- ip link set dev veth1 netns sw2 name swp1
++ ip -n $h1 link add name eth0 type veth peer name swp1 netns $sw1
++ ip -n $sw1 link add name veth0 type veth peer name veth0 netns $sw2
++ ip -n $h2 link add name eth0 type veth peer name swp1 netns $sw2
+ }
+
+ setup_host_common()
+@@ -190,7 +179,7 @@ setup_host_common()
+
+ setup_h1()
+ {
+- local ns=h1
++ local ns=$h1
+ local v4addr1=192.0.2.1/28
+ local v4addr2=192.0.2.17/28
+ local v6addr1=2001:db8:1::1/64
+@@ -201,7 +190,7 @@ setup_h1()
+
+ setup_h2()
+ {
+- local ns=h2
++ local ns=$h2
+ local v4addr1=192.0.2.2/28
+ local v4addr2=192.0.2.18/28
+ local v6addr1=2001:db8:1::2/64
+@@ -254,7 +243,7 @@ setup_sw_common()
+
+ setup_sw1()
+ {
+- local ns=sw1
++ local ns=$sw1
+ local local_addr=192.0.2.33
+ local remote_addr=192.0.2.34
+ local veth_addr=192.0.2.49
+@@ -265,7 +254,7 @@ setup_sw1()
+
+ setup_sw2()
+ {
+- local ns=sw2
++ local ns=$sw2
+ local local_addr=192.0.2.34
+ local remote_addr=192.0.2.33
+ local veth_addr=192.0.2.50
+@@ -291,11 +280,7 @@ setup()
+
+ cleanup()
+ {
+- local ns
+-
+- for ns in h1 h2 sw1 sw2; do
+- ip netns del $ns &> /dev/null
+- done
++ cleanup_ns $h1 $h2 $sw1 $sw2
+ }
+
+ ################################################################################
+@@ -312,80 +297,80 @@ neigh_suppress_arp_common()
+ echo "Per-port ARP suppression - VLAN $vid"
+ echo "----------------------------------"
+
+- run_cmd "tc -n sw1 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto 0x0806 flower indev swp1 arp_tip $tip arp_sip $sip arp_op request action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 101 proto 0x0806 flower indev swp1 arp_tip $tip arp_sip $sip arp_op request action pass"
+
+ # Initial state - check that ARP requests are not suppressed and that
+ # ARP replies are received.
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
+ log_test $? 0 "arping"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "ARP suppression"
+
+ # Enable neighbor suppression and check that nothing changes compared
+ # to the initial state.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress on"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
+ log_test $? 0 "\"neigh_suppress\" is on"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
+ log_test $? 0 "arping"
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "ARP suppression"
+
+ # Install an FDB entry for the remote host and check that nothing
+ # changes compared to the initial state.
+- h2_mac=$(ip -n h2 -j -p link show eth0.$vid | jq -r '.[]["address"]')
+- run_cmd "bridge -n sw1 fdb replace $h2_mac dev vx0 master static vlan $vid"
++ h2_mac=$(ip -n $h2 -j -p link show eth0.$vid | jq -r '.[]["address"]')
++ run_cmd "bridge -n $sw1 fdb replace $h2_mac dev vx0 master static vlan $vid"
+ log_test $? 0 "FDB entry installation"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
+ log_test $? 0 "arping"
+- tc_check_packets sw1 "dev vx0 egress" 101 3
++ tc_check_packets $sw1 "dev vx0 egress" 101 3
+ log_test $? 0 "ARP suppression"
+
+ # Install a neighbor on the matching SVI interface and check that ARP
+ # requests are suppressed.
+- run_cmd "ip -n sw1 neigh replace $tip lladdr $h2_mac nud permanent dev br0.$vid"
++ run_cmd "ip -n $sw1 neigh replace $tip lladdr $h2_mac nud permanent dev br0.$vid"
+ log_test $? 0 "Neighbor entry installation"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
+ log_test $? 0 "arping"
+- tc_check_packets sw1 "dev vx0 egress" 101 3
++ tc_check_packets $sw1 "dev vx0 egress" 101 3
+ log_test $? 0 "ARP suppression"
+
+ # Take the second host down and check that ARP requests are suppressed
+ # and that ARP replies are received.
+- run_cmd "ip -n h2 link set dev eth0.$vid down"
++ run_cmd "ip -n $h2 link set dev eth0.$vid down"
+ log_test $? 0 "H2 down"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
+ log_test $? 0 "arping"
+- tc_check_packets sw1 "dev vx0 egress" 101 3
++ tc_check_packets $sw1 "dev vx0 egress" 101 3
+ log_test $? 0 "ARP suppression"
+
+- run_cmd "ip -n h2 link set dev eth0.$vid up"
++ run_cmd "ip -n $h2 link set dev eth0.$vid up"
+ log_test $? 0 "H2 up"
+
+ # Disable neighbor suppression and check that ARP requests are no
+ # longer suppressed.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress off"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress off\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress off"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress off\""
+ log_test $? 0 "\"neigh_suppress\" is off"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
+ log_test $? 0 "arping"
+- tc_check_packets sw1 "dev vx0 egress" 101 4
++ tc_check_packets $sw1 "dev vx0 egress" 101 4
+ log_test $? 0 "ARP suppression"
+
+ # Take the second host down and check that ARP requests are not
+ # suppressed and that ARP replies are not received.
+- run_cmd "ip -n h2 link set dev eth0.$vid down"
++ run_cmd "ip -n $h2 link set dev eth0.$vid down"
+ log_test $? 0 "H2 down"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip"
+ log_test $? 1 "arping"
+- tc_check_packets sw1 "dev vx0 egress" 101 5
++ tc_check_packets $sw1 "dev vx0 egress" 101 5
+ log_test $? 0 "ARP suppression"
+ }
+
+@@ -415,80 +400,80 @@ neigh_suppress_ns_common()
+ echo "Per-port NS suppression - VLAN $vid"
+ echo "---------------------------------"
+
+- run_cmd "tc -n sw1 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr type 135 code 0 action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 101 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr type 135 code 0 action pass"
+
+ # Initial state - check that NS messages are not suppressed and that ND
+ # messages are received.
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
+ log_test $? 0 "ndisc6"
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "NS suppression"
+
+ # Enable neighbor suppression and check that nothing changes compared
+ # to the initial state.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress on"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
+ log_test $? 0 "\"neigh_suppress\" is on"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
+ log_test $? 0 "ndisc6"
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "NS suppression"
+
+ # Install an FDB entry for the remote host and check that nothing
+ # changes compared to the initial state.
+- h2_mac=$(ip -n h2 -j -p link show eth0.$vid | jq -r '.[]["address"]')
+- run_cmd "bridge -n sw1 fdb replace $h2_mac dev vx0 master static vlan $vid"
++ h2_mac=$(ip -n $h2 -j -p link show eth0.$vid | jq -r '.[]["address"]')
++ run_cmd "bridge -n $sw1 fdb replace $h2_mac dev vx0 master static vlan $vid"
+ log_test $? 0 "FDB entry installation"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
+ log_test $? 0 "ndisc6"
+- tc_check_packets sw1 "dev vx0 egress" 101 3
++ tc_check_packets $sw1 "dev vx0 egress" 101 3
+ log_test $? 0 "NS suppression"
+
+ # Install a neighbor on the matching SVI interface and check that NS
+ # messages are suppressed.
+- run_cmd "ip -n sw1 neigh replace $daddr lladdr $h2_mac nud permanent dev br0.$vid"
++ run_cmd "ip -n $sw1 neigh replace $daddr lladdr $h2_mac nud permanent dev br0.$vid"
+ log_test $? 0 "Neighbor entry installation"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
+ log_test $? 0 "ndisc6"
+- tc_check_packets sw1 "dev vx0 egress" 101 3
++ tc_check_packets $sw1 "dev vx0 egress" 101 3
+ log_test $? 0 "NS suppression"
+
+ # Take the second host down and check that NS messages are suppressed
+ # and that ND messages are received.
+- run_cmd "ip -n h2 link set dev eth0.$vid down"
++ run_cmd "ip -n $h2 link set dev eth0.$vid down"
+ log_test $? 0 "H2 down"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
+ log_test $? 0 "ndisc6"
+- tc_check_packets sw1 "dev vx0 egress" 101 3
++ tc_check_packets $sw1 "dev vx0 egress" 101 3
+ log_test $? 0 "NS suppression"
+
+- run_cmd "ip -n h2 link set dev eth0.$vid up"
++ run_cmd "ip -n $h2 link set dev eth0.$vid up"
+ log_test $? 0 "H2 up"
+
+ # Disable neighbor suppression and check that NS messages are no longer
+ # suppressed.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress off"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress off\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress off"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress off\""
+ log_test $? 0 "\"neigh_suppress\" is off"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
+ log_test $? 0 "ndisc6"
+- tc_check_packets sw1 "dev vx0 egress" 101 4
++ tc_check_packets $sw1 "dev vx0 egress" 101 4
+ log_test $? 0 "NS suppression"
+
+ # Take the second host down and check that NS messages are not
+ # suppressed and that ND messages are not received.
+- run_cmd "ip -n h2 link set dev eth0.$vid down"
++ run_cmd "ip -n $h2 link set dev eth0.$vid down"
+ log_test $? 0 "H2 down"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid"
+ log_test $? 2 "ndisc6"
+- tc_check_packets sw1 "dev vx0 egress" 101 5
++ tc_check_packets $sw1 "dev vx0 egress" 101 5
+ log_test $? 0 "NS suppression"
+ }
+
+@@ -524,118 +509,118 @@ neigh_vlan_suppress_arp()
+ echo "Per-{Port, VLAN} ARP suppression"
+ echo "--------------------------------"
+
+- run_cmd "tc -n sw1 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto 0x0806 flower indev swp1 arp_tip $tip1 arp_sip $sip1 arp_op request action pass"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 102 proto 0x0806 flower indev swp1 arp_tip $tip2 arp_sip $sip2 arp_op request action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 101 proto 0x0806 flower indev swp1 arp_tip $tip1 arp_sip $sip1 arp_op request action pass"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 102 proto 0x0806 flower indev swp1 arp_tip $tip2 arp_sip $sip2 arp_op request action pass"
+
+- h2_mac1=$(ip -n h2 -j -p link show eth0.$vid1 | jq -r '.[]["address"]')
+- h2_mac2=$(ip -n h2 -j -p link show eth0.$vid2 | jq -r '.[]["address"]')
+- run_cmd "bridge -n sw1 fdb replace $h2_mac1 dev vx0 master static vlan $vid1"
+- run_cmd "bridge -n sw1 fdb replace $h2_mac2 dev vx0 master static vlan $vid2"
+- run_cmd "ip -n sw1 neigh replace $tip1 lladdr $h2_mac1 nud permanent dev br0.$vid1"
+- run_cmd "ip -n sw1 neigh replace $tip2 lladdr $h2_mac2 nud permanent dev br0.$vid2"
++ h2_mac1=$(ip -n $h2 -j -p link show eth0.$vid1 | jq -r '.[]["address"]')
++ h2_mac2=$(ip -n $h2 -j -p link show eth0.$vid2 | jq -r '.[]["address"]')
++ run_cmd "bridge -n $sw1 fdb replace $h2_mac1 dev vx0 master static vlan $vid1"
++ run_cmd "bridge -n $sw1 fdb replace $h2_mac2 dev vx0 master static vlan $vid2"
++ run_cmd "ip -n $sw1 neigh replace $tip1 lladdr $h2_mac1 nud permanent dev br0.$vid1"
++ run_cmd "ip -n $sw1 neigh replace $tip2 lladdr $h2_mac2 nud permanent dev br0.$vid2"
+
+ # Enable per-{Port, VLAN} neighbor suppression and check that ARP
+ # requests are not suppressed and that ARP replies are received.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_vlan_suppress on"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress on\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_vlan_suppress on"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress on\""
+ log_test $? 0 "\"neigh_vlan_suppress\" is on"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
+ log_test $? 0 "arping (VLAN $vid1)"
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
+ log_test $? 0 "arping (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "ARP suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 1
++ tc_check_packets $sw1 "dev vx0 egress" 102 1
+ log_test $? 0 "ARP suppression (VLAN $vid2)"
+
+ # Enable neighbor suppression on VLAN 10 and check that only on this
+ # VLAN ARP requests are suppressed.
+- run_cmd "bridge -n sw1 vlan set vid $vid1 dev vx0 neigh_suppress on"
+- run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress on\""
++ run_cmd "bridge -n $sw1 vlan set vid $vid1 dev vx0 neigh_suppress on"
++ run_cmd "bridge -n $sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress on\""
+ log_test $? 0 "\"neigh_suppress\" is on (VLAN $vid1)"
+- run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid2 | grep \"neigh_suppress off\""
++ run_cmd "bridge -n $sw1 -d vlan show dev vx0 vid $vid2 | grep \"neigh_suppress off\""
+ log_test $? 0 "\"neigh_suppress\" is off (VLAN $vid2)"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
+ log_test $? 0 "arping (VLAN $vid1)"
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
+ log_test $? 0 "arping (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "ARP suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 2
++ tc_check_packets $sw1 "dev vx0 egress" 102 2
+ log_test $? 0 "ARP suppression (VLAN $vid2)"
+
+ # Enable neighbor suppression on the port and check that it has no
+ # effect compared to previous state.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress on"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
+ log_test $? 0 "\"neigh_suppress\" is on"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
+ log_test $? 0 "arping (VLAN $vid1)"
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
+ log_test $? 0 "arping (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "ARP suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 3
++ tc_check_packets $sw1 "dev vx0 egress" 102 3
+ log_test $? 0 "ARP suppression (VLAN $vid2)"
+
+ # Disable neighbor suppression on the port and check that it has no
+ # effect compared to previous state.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress off"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress off\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress off"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress off\""
+ log_test $? 0 "\"neigh_suppress\" is off"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
+ log_test $? 0 "arping (VLAN $vid1)"
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
+ log_test $? 0 "arping (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "ARP suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 4
++ tc_check_packets $sw1 "dev vx0 egress" 102 4
+ log_test $? 0 "ARP suppression (VLAN $vid2)"
+
+ # Disable neighbor suppression on VLAN 10 and check that ARP requests
+ # are no longer suppressed on this VLAN.
+- run_cmd "bridge -n sw1 vlan set vid $vid1 dev vx0 neigh_suppress off"
+- run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress off\""
++ run_cmd "bridge -n $sw1 vlan set vid $vid1 dev vx0 neigh_suppress off"
++ run_cmd "bridge -n $sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress off\""
+ log_test $? 0 "\"neigh_suppress\" is off (VLAN $vid1)"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
+ log_test $? 0 "arping (VLAN $vid1)"
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
+ log_test $? 0 "arping (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "ARP suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 5
++ tc_check_packets $sw1 "dev vx0 egress" 102 5
+ log_test $? 0 "ARP suppression (VLAN $vid2)"
+
+ # Disable per-{Port, VLAN} neighbor suppression, enable neighbor
+ # suppression on the port and check that on both VLANs ARP requests are
+ # suppressed.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_vlan_suppress off"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress off\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_vlan_suppress off"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress off\""
+ log_test $? 0 "\"neigh_vlan_suppress\" is off"
+
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress on"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
+ log_test $? 0 "\"neigh_suppress\" is on"
+
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1"
+ log_test $? 0 "arping (VLAN $vid1)"
+- run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
++ run_cmd "ip netns exec $h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2"
+ log_test $? 0 "arping (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "ARP suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 5
++ tc_check_packets $sw1 "dev vx0 egress" 102 5
+ log_test $? 0 "ARP suppression (VLAN $vid2)"
+ }
+
+@@ -655,118 +640,118 @@ neigh_vlan_suppress_ns()
+ echo "Per-{Port, VLAN} NS suppression"
+ echo "-------------------------------"
+
+- run_cmd "tc -n sw1 qdisc replace dev vx0 clsact"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr1 type 135 code 0 action pass"
+- run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 102 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr2 type 135 code 0 action pass"
++ run_cmd "tc -n $sw1 qdisc replace dev vx0 clsact"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 101 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr1 type 135 code 0 action pass"
++ run_cmd "tc -n $sw1 filter replace dev vx0 egress pref 1 handle 102 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr2 type 135 code 0 action pass"
+
+- h2_mac1=$(ip -n h2 -j -p link show eth0.$vid1 | jq -r '.[]["address"]')
+- h2_mac2=$(ip -n h2 -j -p link show eth0.$vid2 | jq -r '.[]["address"]')
+- run_cmd "bridge -n sw1 fdb replace $h2_mac1 dev vx0 master static vlan $vid1"
+- run_cmd "bridge -n sw1 fdb replace $h2_mac2 dev vx0 master static vlan $vid2"
+- run_cmd "ip -n sw1 neigh replace $daddr1 lladdr $h2_mac1 nud permanent dev br0.$vid1"
+- run_cmd "ip -n sw1 neigh replace $daddr2 lladdr $h2_mac2 nud permanent dev br0.$vid2"
++ h2_mac1=$(ip -n $h2 -j -p link show eth0.$vid1 | jq -r '.[]["address"]')
++ h2_mac2=$(ip -n $h2 -j -p link show eth0.$vid2 | jq -r '.[]["address"]')
++ run_cmd "bridge -n $sw1 fdb replace $h2_mac1 dev vx0 master static vlan $vid1"
++ run_cmd "bridge -n $sw1 fdb replace $h2_mac2 dev vx0 master static vlan $vid2"
++ run_cmd "ip -n $sw1 neigh replace $daddr1 lladdr $h2_mac1 nud permanent dev br0.$vid1"
++ run_cmd "ip -n $sw1 neigh replace $daddr2 lladdr $h2_mac2 nud permanent dev br0.$vid2"
+
+ # Enable per-{Port, VLAN} neighbor suppression and check that NS
+ # messages are not suppressed and that ND messages are received.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_vlan_suppress on"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress on\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_vlan_suppress on"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress on\""
+ log_test $? 0 "\"neigh_vlan_suppress\" is on"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
+ log_test $? 0 "ndisc6 (VLAN $vid1)"
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
+ log_test $? 0 "ndisc6 (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "NS suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 1
++ tc_check_packets $sw1 "dev vx0 egress" 102 1
+ log_test $? 0 "NS suppression (VLAN $vid2)"
+
+ # Enable neighbor suppression on VLAN 10 and check that only on this
+ # VLAN NS messages are suppressed.
+- run_cmd "bridge -n sw1 vlan set vid $vid1 dev vx0 neigh_suppress on"
+- run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress on\""
++ run_cmd "bridge -n $sw1 vlan set vid $vid1 dev vx0 neigh_suppress on"
++ run_cmd "bridge -n $sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress on\""
+ log_test $? 0 "\"neigh_suppress\" is on (VLAN $vid1)"
+- run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid2 | grep \"neigh_suppress off\""
++ run_cmd "bridge -n $sw1 -d vlan show dev vx0 vid $vid2 | grep \"neigh_suppress off\""
+ log_test $? 0 "\"neigh_suppress\" is off (VLAN $vid2)"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
+ log_test $? 0 "ndisc6 (VLAN $vid1)"
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
+ log_test $? 0 "ndisc6 (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "NS suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 2
++ tc_check_packets $sw1 "dev vx0 egress" 102 2
+ log_test $? 0 "NS suppression (VLAN $vid2)"
+
+ # Enable neighbor suppression on the port and check that it has no
+ # effect compared to previous state.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress on"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
+ log_test $? 0 "\"neigh_suppress\" is on"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
+ log_test $? 0 "ndisc6 (VLAN $vid1)"
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
+ log_test $? 0 "ndisc6 (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "NS suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 3
++ tc_check_packets $sw1 "dev vx0 egress" 102 3
+ log_test $? 0 "NS suppression (VLAN $vid2)"
+
+ # Disable neighbor suppression on the port and check that it has no
+ # effect compared to previous state.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress off"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress off\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress off"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress off\""
+ log_test $? 0 "\"neigh_suppress\" is off"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
+ log_test $? 0 "ndisc6 (VLAN $vid1)"
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
+ log_test $? 0 "ndisc6 (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 1
++ tc_check_packets $sw1 "dev vx0 egress" 101 1
+ log_test $? 0 "NS suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 4
++ tc_check_packets $sw1 "dev vx0 egress" 102 4
+ log_test $? 0 "NS suppression (VLAN $vid2)"
+
+ # Disable neighbor suppression on VLAN 10 and check that NS messages
+ # are no longer suppressed on this VLAN.
+- run_cmd "bridge -n sw1 vlan set vid $vid1 dev vx0 neigh_suppress off"
+- run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress off\""
++ run_cmd "bridge -n $sw1 vlan set vid $vid1 dev vx0 neigh_suppress off"
++ run_cmd "bridge -n $sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress off\""
+ log_test $? 0 "\"neigh_suppress\" is off (VLAN $vid1)"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
+ log_test $? 0 "ndisc6 (VLAN $vid1)"
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
+ log_test $? 0 "ndisc6 (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "NS suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 5
++ tc_check_packets $sw1 "dev vx0 egress" 102 5
+ log_test $? 0 "NS suppression (VLAN $vid2)"
+
+ # Disable per-{Port, VLAN} neighbor suppression, enable neighbor
+ # suppression on the port and check that on both VLANs NS messages are
+ # suppressed.
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_vlan_suppress off"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress off\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_vlan_suppress off"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress off\""
+ log_test $? 0 "\"neigh_vlan_suppress\" is off"
+
+- run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on"
+- run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
++ run_cmd "bridge -n $sw1 link set dev vx0 neigh_suppress on"
++ run_cmd "bridge -n $sw1 -d link show dev vx0 | grep \"neigh_suppress on\""
+ log_test $? 0 "\"neigh_suppress\" is on"
+
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1"
+ log_test $? 0 "ndisc6 (VLAN $vid1)"
+- run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
++ run_cmd "ip netns exec $h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2"
+ log_test $? 0 "ndisc6 (VLAN $vid2)"
+
+- tc_check_packets sw1 "dev vx0 egress" 101 2
++ tc_check_packets $sw1 "dev vx0 egress" 101 2
+ log_test $? 0 "NS suppression (VLAN $vid1)"
+- tc_check_packets sw1 "dev vx0 egress" 102 5
++ tc_check_packets $sw1 "dev vx0 egress" 102 5
+ log_test $? 0 "NS suppression (VLAN $vid2)"
+ }
+
+diff --git a/tools/testing/selftests/net/test_vxlan_mdb.sh b/tools/testing/selftests/net/test_vxlan_mdb.sh
+index 31e5f0f8859d1c..be8e66abc74e11 100755
+--- a/tools/testing/selftests/net/test_vxlan_mdb.sh
++++ b/tools/testing/selftests/net/test_vxlan_mdb.sh
+@@ -984,6 +984,7 @@ encap_params_common()
+ local plen=$1; shift
+ local enc_ethtype=$1; shift
+ local grp=$1; shift
++ local grp_dmac=$1; shift
+ local src=$1; shift
+ local mz=$1; shift
+
+@@ -1002,11 +1003,11 @@ encap_params_common()
+ run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep2_ip src_vni 10020"
+
+ run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $vtep1_ip action pass"
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Destination IP - match"
+
+- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Destination IP - no match"
+
+@@ -1019,20 +1020,20 @@ encap_params_common()
+ run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip dst_port 1111 src_vni 10020"
+
+ run_cmd "tc -n $ns2 filter replace dev veth0 ingress pref 1 handle 101 proto $enc_ethtype flower ip_proto udp dst_port 4789 action pass"
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev veth0 ingress" 101 1
+ log_test $? 0 "Default destination port - match"
+
+- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev veth0 ingress" 101 1
+ log_test $? 0 "Default destination port - no match"
+
+ run_cmd "tc -n $ns2 filter replace dev veth0 ingress pref 1 handle 101 proto $enc_ethtype flower ip_proto udp dst_port 1111 action pass"
+- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev veth0 ingress" 101 1
+ log_test $? 0 "Non-default destination port - match"
+
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev veth0 ingress" 101 1
+ log_test $? 0 "Non-default destination port - no match"
+
+@@ -1045,11 +1046,11 @@ encap_params_common()
+ run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip src_vni 10020"
+
+ run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_key_id 10010 action pass"
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Default destination VNI - match"
+
+- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Default destination VNI - no match"
+
+@@ -1057,11 +1058,11 @@ encap_params_common()
+ run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip vni 10010 src_vni 10020"
+
+ run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_key_id 10020 action pass"
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Non-default destination VNI - match"
+
+- run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.20 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Non-default destination VNI - no match"
+
+@@ -1079,6 +1080,7 @@ encap_params_ipv4_ipv4()
+ local plen=32
+ local enc_ethtype="ip"
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local src=192.0.2.129
+
+ echo
+@@ -1086,7 +1088,7 @@ encap_params_ipv4_ipv4()
+ echo "------------------------------------------------------------------"
+
+ encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \
+- $grp $src "mausezahn"
++ $grp $grp_dmac $src "mausezahn"
+ }
+
+ encap_params_ipv6_ipv4()
+@@ -1098,6 +1100,7 @@ encap_params_ipv6_ipv4()
+ local plen=32
+ local enc_ethtype="ip"
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local src=2001:db8:100::1
+
+ echo
+@@ -1105,7 +1108,7 @@ encap_params_ipv6_ipv4()
+ echo "------------------------------------------------------------------"
+
+ encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \
+- $grp $src "mausezahn -6"
++ $grp $grp_dmac $src "mausezahn -6"
+ }
+
+ encap_params_ipv4_ipv6()
+@@ -1117,6 +1120,7 @@ encap_params_ipv4_ipv6()
+ local plen=128
+ local enc_ethtype="ipv6"
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local src=192.0.2.129
+
+ echo
+@@ -1124,7 +1128,7 @@ encap_params_ipv4_ipv6()
+ echo "------------------------------------------------------------------"
+
+ encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \
+- $grp $src "mausezahn"
++ $grp $grp_dmac $src "mausezahn"
+ }
+
+ encap_params_ipv6_ipv6()
+@@ -1136,6 +1140,7 @@ encap_params_ipv6_ipv6()
+ local plen=128
+ local enc_ethtype="ipv6"
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local src=2001:db8:100::1
+
+ echo
+@@ -1143,7 +1148,7 @@ encap_params_ipv6_ipv6()
+ echo "------------------------------------------------------------------"
+
+ encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \
+- $grp $src "mausezahn -6"
++ $grp $grp_dmac $src "mausezahn -6"
+ }
+
+ starg_exclude_ir_common()
+@@ -1154,6 +1159,7 @@ starg_exclude_ir_common()
+ local vtep2_ip=$1; shift
+ local plen=$1; shift
+ local grp=$1; shift
++ local grp_dmac=$1; shift
+ local valid_src=$1; shift
+ local invalid_src=$1; shift
+ local mz=$1; shift
+@@ -1175,14 +1181,14 @@ starg_exclude_ir_common()
+ run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $invalid_src dst $vtep2_ip src_vni 10010"
+
+ # Check that invalid source is not forwarded to any VTEP.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 0
+ log_test $? 0 "Block excluded source - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 0
+ log_test $? 0 "Block excluded source - second VTEP"
+
+ # Check that valid source is forwarded to both VTEPs.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Forward valid source - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 1
+@@ -1192,14 +1198,14 @@ starg_exclude_ir_common()
+ run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep2_ip src_vni 10010"
+
+ # Check that invalid source is not forwarded to any VTEP.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Block excluded source after removal - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 1
+ log_test $? 0 "Block excluded source after removal - second VTEP"
+
+ # Check that valid source is forwarded to the remaining VTEP.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 2
+ log_test $? 0 "Forward valid source after removal - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 1
+@@ -1214,6 +1220,7 @@ starg_exclude_ir_ipv4_ipv4()
+ local vtep2_ip=198.51.100.200
+ local plen=32
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local valid_src=192.0.2.129
+ local invalid_src=192.0.2.145
+
+@@ -1222,7 +1229,7 @@ starg_exclude_ir_ipv4_ipv4()
+ echo "-------------------------------------------------------------"
+
+ starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
+- $valid_src $invalid_src "mausezahn"
++ $grp_dmac $valid_src $invalid_src "mausezahn"
+ }
+
+ starg_exclude_ir_ipv6_ipv4()
+@@ -1233,6 +1240,7 @@ starg_exclude_ir_ipv6_ipv4()
+ local vtep2_ip=198.51.100.200
+ local plen=32
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local valid_src=2001:db8:100::1
+ local invalid_src=2001:db8:200::1
+
+@@ -1241,7 +1249,7 @@ starg_exclude_ir_ipv6_ipv4()
+ echo "-------------------------------------------------------------"
+
+ starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
+- $valid_src $invalid_src "mausezahn -6"
++ $grp_dmac $valid_src $invalid_src "mausezahn -6"
+ }
+
+ starg_exclude_ir_ipv4_ipv6()
+@@ -1252,6 +1260,7 @@ starg_exclude_ir_ipv4_ipv6()
+ local vtep2_ip=2001:db8:2000::1
+ local plen=128
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local valid_src=192.0.2.129
+ local invalid_src=192.0.2.145
+
+@@ -1260,7 +1269,7 @@ starg_exclude_ir_ipv4_ipv6()
+ echo "-------------------------------------------------------------"
+
+ starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
+- $valid_src $invalid_src "mausezahn"
++ $grp_dmac $valid_src $invalid_src "mausezahn"
+ }
+
+ starg_exclude_ir_ipv6_ipv6()
+@@ -1271,6 +1280,7 @@ starg_exclude_ir_ipv6_ipv6()
+ local vtep2_ip=2001:db8:2000::1
+ local plen=128
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local valid_src=2001:db8:100::1
+ local invalid_src=2001:db8:200::1
+
+@@ -1279,7 +1289,7 @@ starg_exclude_ir_ipv6_ipv6()
+ echo "-------------------------------------------------------------"
+
+ starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
+- $valid_src $invalid_src "mausezahn -6"
++ $grp_dmac $valid_src $invalid_src "mausezahn -6"
+ }
+
+ starg_include_ir_common()
+@@ -1290,6 +1300,7 @@ starg_include_ir_common()
+ local vtep2_ip=$1; shift
+ local plen=$1; shift
+ local grp=$1; shift
++ local grp_dmac=$1; shift
+ local valid_src=$1; shift
+ local invalid_src=$1; shift
+ local mz=$1; shift
+@@ -1311,14 +1322,14 @@ starg_include_ir_common()
+ run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode include source_list $valid_src dst $vtep2_ip src_vni 10010"
+
+ # Check that invalid source is not forwarded to any VTEP.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 0
+ log_test $? 0 "Block excluded source - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 0
+ log_test $? 0 "Block excluded source - second VTEP"
+
+ # Check that valid source is forwarded to both VTEPs.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Forward valid source - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 1
+@@ -1328,14 +1339,14 @@ starg_include_ir_common()
+ run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep2_ip src_vni 10010"
+
+ # Check that invalid source is not forwarded to any VTEP.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Block excluded source after removal - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 1
+ log_test $? 0 "Block excluded source after removal - second VTEP"
+
+ # Check that valid source is forwarded to the remaining VTEP.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 2
+ log_test $? 0 "Forward valid source after removal - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 1
+@@ -1350,6 +1361,7 @@ starg_include_ir_ipv4_ipv4()
+ local vtep2_ip=198.51.100.200
+ local plen=32
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local valid_src=192.0.2.129
+ local invalid_src=192.0.2.145
+
+@@ -1358,7 +1370,7 @@ starg_include_ir_ipv4_ipv4()
+ echo "-------------------------------------------------------------"
+
+ starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
+- $valid_src $invalid_src "mausezahn"
++ $grp_dmac $valid_src $invalid_src "mausezahn"
+ }
+
+ starg_include_ir_ipv6_ipv4()
+@@ -1369,6 +1381,7 @@ starg_include_ir_ipv6_ipv4()
+ local vtep2_ip=198.51.100.200
+ local plen=32
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local valid_src=2001:db8:100::1
+ local invalid_src=2001:db8:200::1
+
+@@ -1377,7 +1390,7 @@ starg_include_ir_ipv6_ipv4()
+ echo "-------------------------------------------------------------"
+
+ starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
+- $valid_src $invalid_src "mausezahn -6"
++ $grp_dmac $valid_src $invalid_src "mausezahn -6"
+ }
+
+ starg_include_ir_ipv4_ipv6()
+@@ -1388,6 +1401,7 @@ starg_include_ir_ipv4_ipv6()
+ local vtep2_ip=2001:db8:2000::1
+ local plen=128
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local valid_src=192.0.2.129
+ local invalid_src=192.0.2.145
+
+@@ -1396,7 +1410,7 @@ starg_include_ir_ipv4_ipv6()
+ echo "-------------------------------------------------------------"
+
+ starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
+- $valid_src $invalid_src "mausezahn"
++ $grp_dmac $valid_src $invalid_src "mausezahn"
+ }
+
+ starg_include_ir_ipv6_ipv6()
+@@ -1407,6 +1421,7 @@ starg_include_ir_ipv6_ipv6()
+ local vtep2_ip=2001:db8:2000::1
+ local plen=128
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local valid_src=2001:db8:100::1
+ local invalid_src=2001:db8:200::1
+
+@@ -1415,7 +1430,7 @@ starg_include_ir_ipv6_ipv6()
+ echo "-------------------------------------------------------------"
+
+ starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \
+- $valid_src $invalid_src "mausezahn -6"
++ $grp_dmac $valid_src $invalid_src "mausezahn -6"
+ }
+
+ starg_exclude_p2mp_common()
+@@ -1425,6 +1440,7 @@ starg_exclude_p2mp_common()
+ local mcast_grp=$1; shift
+ local plen=$1; shift
+ local grp=$1; shift
++ local grp_dmac=$1; shift
+ local valid_src=$1; shift
+ local invalid_src=$1; shift
+ local mz=$1; shift
+@@ -1442,12 +1458,12 @@ starg_exclude_p2mp_common()
+ run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $invalid_src dst $mcast_grp src_vni 10010 via veth0"
+
+ # Check that invalid source is not forwarded.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 0
+ log_test $? 0 "Block excluded source"
+
+ # Check that valid source is forwarded.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Forward valid source"
+
+@@ -1455,7 +1471,7 @@ starg_exclude_p2mp_common()
+ run_cmd "ip -n $ns2 address del $mcast_grp/$plen dev veth0"
+
+ # Check that valid source is not received anymore.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Receive of valid source after removal from group"
+ }
+@@ -1467,6 +1483,7 @@ starg_exclude_p2mp_ipv4_ipv4()
+ local mcast_grp=238.1.1.1
+ local plen=32
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local valid_src=192.0.2.129
+ local invalid_src=192.0.2.145
+
+@@ -1474,7 +1491,7 @@ starg_exclude_p2mp_ipv4_ipv4()
+ echo "Data path: (*, G) EXCLUDE - P2MP - IPv4 overlay / IPv4 underlay"
+ echo "---------------------------------------------------------------"
+
+- starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
++ starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
+ $valid_src $invalid_src "mausezahn"
+ }
+
+@@ -1485,6 +1502,7 @@ starg_exclude_p2mp_ipv6_ipv4()
+ local mcast_grp=238.1.1.1
+ local plen=32
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local valid_src=2001:db8:100::1
+ local invalid_src=2001:db8:200::1
+
+@@ -1492,7 +1510,7 @@ starg_exclude_p2mp_ipv6_ipv4()
+ echo "Data path: (*, G) EXCLUDE - P2MP - IPv6 overlay / IPv4 underlay"
+ echo "---------------------------------------------------------------"
+
+- starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
++ starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
+ $valid_src $invalid_src "mausezahn -6"
+ }
+
+@@ -1503,6 +1521,7 @@ starg_exclude_p2mp_ipv4_ipv6()
+ local mcast_grp=ff0e::2
+ local plen=128
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local valid_src=192.0.2.129
+ local invalid_src=192.0.2.145
+
+@@ -1510,7 +1529,7 @@ starg_exclude_p2mp_ipv4_ipv6()
+ echo "Data path: (*, G) EXCLUDE - P2MP - IPv4 overlay / IPv6 underlay"
+ echo "---------------------------------------------------------------"
+
+- starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
++ starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
+ $valid_src $invalid_src "mausezahn"
+ }
+
+@@ -1521,6 +1540,7 @@ starg_exclude_p2mp_ipv6_ipv6()
+ local mcast_grp=ff0e::2
+ local plen=128
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local valid_src=2001:db8:100::1
+ local invalid_src=2001:db8:200::1
+
+@@ -1528,7 +1548,7 @@ starg_exclude_p2mp_ipv6_ipv6()
+ echo "Data path: (*, G) EXCLUDE - P2MP - IPv6 overlay / IPv6 underlay"
+ echo "---------------------------------------------------------------"
+
+- starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
++ starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
+ $valid_src $invalid_src "mausezahn -6"
+ }
+
+@@ -1539,6 +1559,7 @@ starg_include_p2mp_common()
+ local mcast_grp=$1; shift
+ local plen=$1; shift
+ local grp=$1; shift
++ local grp_dmac=$1; shift
+ local valid_src=$1; shift
+ local invalid_src=$1; shift
+ local mz=$1; shift
+@@ -1556,12 +1577,12 @@ starg_include_p2mp_common()
+ run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode include source_list $valid_src dst $mcast_grp src_vni 10010 via veth0"
+
+ # Check that invalid source is not forwarded.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 0
+ log_test $? 0 "Block excluded source"
+
+ # Check that valid source is forwarded.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Forward valid source"
+
+@@ -1569,7 +1590,7 @@ starg_include_p2mp_common()
+ run_cmd "ip -n $ns2 address del $mcast_grp/$plen dev veth0"
+
+ # Check that valid source is not received anymore.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Receive of valid source after removal from group"
+ }
+@@ -1581,6 +1602,7 @@ starg_include_p2mp_ipv4_ipv4()
+ local mcast_grp=238.1.1.1
+ local plen=32
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local valid_src=192.0.2.129
+ local invalid_src=192.0.2.145
+
+@@ -1588,7 +1610,7 @@ starg_include_p2mp_ipv4_ipv4()
+ echo "Data path: (*, G) INCLUDE - P2MP - IPv4 overlay / IPv4 underlay"
+ echo "---------------------------------------------------------------"
+
+- starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
++ starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
+ $valid_src $invalid_src "mausezahn"
+ }
+
+@@ -1599,6 +1621,7 @@ starg_include_p2mp_ipv6_ipv4()
+ local mcast_grp=238.1.1.1
+ local plen=32
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local valid_src=2001:db8:100::1
+ local invalid_src=2001:db8:200::1
+
+@@ -1606,7 +1629,7 @@ starg_include_p2mp_ipv6_ipv4()
+ echo "Data path: (*, G) INCLUDE - P2MP - IPv6 overlay / IPv4 underlay"
+ echo "---------------------------------------------------------------"
+
+- starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
++ starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
+ $valid_src $invalid_src "mausezahn -6"
+ }
+
+@@ -1617,6 +1640,7 @@ starg_include_p2mp_ipv4_ipv6()
+ local mcast_grp=ff0e::2
+ local plen=128
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local valid_src=192.0.2.129
+ local invalid_src=192.0.2.145
+
+@@ -1624,7 +1648,7 @@ starg_include_p2mp_ipv4_ipv6()
+ echo "Data path: (*, G) INCLUDE - P2MP - IPv4 overlay / IPv6 underlay"
+ echo "---------------------------------------------------------------"
+
+- starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
++ starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
+ $valid_src $invalid_src "mausezahn"
+ }
+
+@@ -1635,6 +1659,7 @@ starg_include_p2mp_ipv6_ipv6()
+ local mcast_grp=ff0e::2
+ local plen=128
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local valid_src=2001:db8:100::1
+ local invalid_src=2001:db8:200::1
+
+@@ -1642,7 +1667,7 @@ starg_include_p2mp_ipv6_ipv6()
+ echo "Data path: (*, G) INCLUDE - P2MP - IPv6 overlay / IPv6 underlay"
+ echo "---------------------------------------------------------------"
+
+- starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \
++ starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp $grp_dmac \
+ $valid_src $invalid_src "mausezahn -6"
+ }
+
+@@ -1654,6 +1679,7 @@ egress_vni_translation_common()
+ local plen=$1; shift
+ local proto=$1; shift
+ local grp=$1; shift
++ local grp_dmac=$1; shift
+ local src=$1; shift
+ local mz=$1; shift
+
+@@ -1689,20 +1715,20 @@ egress_vni_translation_common()
+ # Make sure that packets sent from the first VTEP over VLAN 10 are
+ # received by the SVI corresponding to the L3VNI (14000 / VLAN 4000) on
+ # the second VTEP, since it is configured as PVID.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev br0.4000 ingress" 101 1
+ log_test $? 0 "Egress VNI translation - PVID configured"
+
+ # Remove PVID flag from VLAN 4000 on the second VTEP and make sure
+ # packets are no longer received by the SVI interface.
+ run_cmd "bridge -n $ns2 vlan add vid 4000 dev vx0"
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev br0.4000 ingress" 101 1
+ log_test $? 0 "Egress VNI translation - no PVID configured"
+
+ # Reconfigure the PVID and make sure packets are received again.
+ run_cmd "bridge -n $ns2 vlan add vid 4000 dev vx0 pvid"
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev br0.4000 ingress" 101 2
+ log_test $? 0 "Egress VNI translation - PVID reconfigured"
+ }
+@@ -1715,6 +1741,7 @@ egress_vni_translation_ipv4_ipv4()
+ local plen=32
+ local proto="ipv4"
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local src=192.0.2.129
+
+ echo
+@@ -1722,7 +1749,7 @@ egress_vni_translation_ipv4_ipv4()
+ echo "----------------------------------------------------------------"
+
+ egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \
+- $src "mausezahn"
++ $grp_dmac $src "mausezahn"
+ }
+
+ egress_vni_translation_ipv6_ipv4()
+@@ -1733,6 +1760,7 @@ egress_vni_translation_ipv6_ipv4()
+ local plen=32
+ local proto="ipv6"
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local src=2001:db8:100::1
+
+ echo
+@@ -1740,7 +1768,7 @@ egress_vni_translation_ipv6_ipv4()
+ echo "----------------------------------------------------------------"
+
+ egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \
+- $src "mausezahn -6"
++ $grp_dmac $src "mausezahn -6"
+ }
+
+ egress_vni_translation_ipv4_ipv6()
+@@ -1751,6 +1779,7 @@ egress_vni_translation_ipv4_ipv6()
+ local plen=128
+ local proto="ipv4"
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local src=192.0.2.129
+
+ echo
+@@ -1758,7 +1787,7 @@ egress_vni_translation_ipv4_ipv6()
+ echo "----------------------------------------------------------------"
+
+ egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \
+- $src "mausezahn"
++ $grp_dmac $src "mausezahn"
+ }
+
+ egress_vni_translation_ipv6_ipv6()
+@@ -1769,6 +1798,7 @@ egress_vni_translation_ipv6_ipv6()
+ local plen=128
+ local proto="ipv6"
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local src=2001:db8:100::1
+
+ echo
+@@ -1776,7 +1806,7 @@ egress_vni_translation_ipv6_ipv6()
+ echo "----------------------------------------------------------------"
+
+ egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \
+- $src "mausezahn -6"
++ $grp_dmac $src "mausezahn -6"
+ }
+
+ all_zeros_mdb_common()
+@@ -1789,12 +1819,18 @@ all_zeros_mdb_common()
+ local vtep4_ip=$1; shift
+ local plen=$1; shift
+ local ipv4_grp=239.1.1.1
++ local ipv4_grp_dmac=01:00:5e:01:01:01
+ local ipv4_unreg_grp=239.2.2.2
++ local ipv4_unreg_grp_dmac=01:00:5e:02:02:02
+ local ipv4_ll_grp=224.0.0.100
++ local ipv4_ll_grp_dmac=01:00:5e:00:00:64
+ local ipv4_src=192.0.2.129
+ local ipv6_grp=ff0e::1
++ local ipv6_grp_dmac=33:33:00:00:00:01
+ local ipv6_unreg_grp=ff0e::2
++ local ipv6_unreg_grp_dmac=33:33:00:00:00:02
+ local ipv6_ll_grp=ff02::1
++ local ipv6_ll_grp_dmac=33:33:00:00:00:01
+ local ipv6_src=2001:db8:100::1
+
+ # Install all-zeros (catchall) MDB entries for IPv4 and IPv6 traffic
+@@ -1830,7 +1866,7 @@ all_zeros_mdb_common()
+
+ # Send registered IPv4 multicast and make sure it only arrives to the
+ # first VTEP.
+- run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 mausezahn br0.10 -a own -b $ipv4_grp_dmac -A $ipv4_src -B $ipv4_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "Registered IPv4 multicast - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 0
+@@ -1838,7 +1874,7 @@ all_zeros_mdb_common()
+
+ # Send unregistered IPv4 multicast that is not link-local and make sure
+ # it arrives to the first and second VTEPs.
+- run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 mausezahn br0.10 -a own -b $ipv4_unreg_grp_dmac -A $ipv4_src -B $ipv4_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 2
+ log_test $? 0 "Unregistered IPv4 multicast - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 1
+@@ -1846,7 +1882,7 @@ all_zeros_mdb_common()
+
+ # Send IPv4 link-local multicast traffic and make sure it does not
+ # arrive to any VTEP.
+- run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 mausezahn br0.10 -a own -b $ipv4_ll_grp_dmac -A $ipv4_src -B $ipv4_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 2
+ log_test $? 0 "Link-local IPv4 multicast - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 1
+@@ -1881,7 +1917,7 @@ all_zeros_mdb_common()
+
+ # Send registered IPv6 multicast and make sure it only arrives to the
+ # third VTEP.
+- run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -a own -b $ipv6_grp_dmac -A $ipv6_src -B $ipv6_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 103 1
+ log_test $? 0 "Registered IPv6 multicast - third VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 104 0
+@@ -1889,7 +1925,7 @@ all_zeros_mdb_common()
+
+ # Send unregistered IPv6 multicast that is not link-local and make sure
+ # it arrives to the third and fourth VTEPs.
+- run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -a own -b $ipv6_unreg_grp_dmac -A $ipv6_src -B $ipv6_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 103 2
+ log_test $? 0 "Unregistered IPv6 multicast - third VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 104 1
+@@ -1897,7 +1933,7 @@ all_zeros_mdb_common()
+
+ # Send IPv6 link-local multicast traffic and make sure it does not
+ # arrive to any VTEP.
+- run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -a own -b $ipv6_ll_grp_dmac -A $ipv6_src -B $ipv6_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 103 2
+ log_test $? 0 "Link-local IPv6 multicast - third VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 104 1
+@@ -1972,6 +2008,7 @@ mdb_fdb_common()
+ local plen=$1; shift
+ local proto=$1; shift
+ local grp=$1; shift
++ local grp_dmac=$1; shift
+ local src=$1; shift
+ local mz=$1; shift
+
+@@ -1995,7 +2032,7 @@ mdb_fdb_common()
+
+ # Send IP multicast traffic and make sure it is forwarded by the MDB
+ # and only arrives to the first VTEP.
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "IP multicast - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 0
+@@ -2012,7 +2049,7 @@ mdb_fdb_common()
+ # Remove the MDB entry and make sure that IP multicast is now forwarded
+ # by the FDB to the second VTEP.
+ run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep1_ip src_vni 10010"
+- run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
++ run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b $grp_dmac -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q"
+ tc_check_packets "$ns2" "dev vx0 ingress" 101 1
+ log_test $? 0 "IP multicast after removal - first VTEP"
+ tc_check_packets "$ns2" "dev vx0 ingress" 102 2
+@@ -2028,14 +2065,15 @@ mdb_fdb_ipv4_ipv4()
+ local plen=32
+ local proto="ipv4"
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local src=192.0.2.129
+
+ echo
+ echo "Data path: MDB with FDB - IPv4 overlay / IPv4 underlay"
+ echo "------------------------------------------------------"
+
+- mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \
+- "mausezahn"
++ mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp \
++ $grp_dmac $src "mausezahn"
+ }
+
+ mdb_fdb_ipv6_ipv4()
+@@ -2047,14 +2085,15 @@ mdb_fdb_ipv6_ipv4()
+ local plen=32
+ local proto="ipv6"
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local src=2001:db8:100::1
+
+ echo
+ echo "Data path: MDB with FDB - IPv6 overlay / IPv4 underlay"
+ echo "------------------------------------------------------"
+
+- mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \
+- "mausezahn -6"
++ mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp \
++ $grp_dmac $src "mausezahn -6"
+ }
+
+ mdb_fdb_ipv4_ipv6()
+@@ -2066,14 +2105,15 @@ mdb_fdb_ipv4_ipv6()
+ local plen=128
+ local proto="ipv4"
+ local grp=239.1.1.1
++ local grp_dmac=01:00:5e:01:01:01
+ local src=192.0.2.129
+
+ echo
+ echo "Data path: MDB with FDB - IPv4 overlay / IPv6 underlay"
+ echo "------------------------------------------------------"
+
+- mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \
+- "mausezahn"
++ mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp \
++ $grp_dmac $src "mausezahn"
+ }
+
+ mdb_fdb_ipv6_ipv6()
+@@ -2085,14 +2125,15 @@ mdb_fdb_ipv6_ipv6()
+ local plen=128
+ local proto="ipv6"
+ local grp=ff0e::1
++ local grp_dmac=33:33:00:00:00:01
+ local src=2001:db8:100::1
+
+ echo
+ echo "Data path: MDB with FDB - IPv6 overlay / IPv6 underlay"
+ echo "------------------------------------------------------"
+
+- mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \
+- "mausezahn -6"
++ mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp \
++ $grp_dmac $src "mausezahn -6"
+ }
+
+ mdb_grp1_loop()
+@@ -2127,7 +2168,9 @@ mdb_torture_common()
+ local vtep1_ip=$1; shift
+ local vtep2_ip=$1; shift
+ local grp1=$1; shift
++ local grp1_dmac=$1; shift
+ local grp2=$1; shift
++ local grp2_dmac=$1; shift
+ local src=$1; shift
+ local mz=$1; shift
+ local pid1
+@@ -2152,9 +2195,9 @@ mdb_torture_common()
+ pid1=$!
+ mdb_grp2_loop $ns1 $vtep1_ip $vtep2_ip $grp2 &
+ pid2=$!
+- ip netns exec $ns1 $mz br0.10 -A $src -B $grp1 -t udp sp=12345,dp=54321 -p 100 -c 0 -q &
++ ip netns exec $ns1 $mz br0.10 -a own -b $grp1_dmac -A $src -B $grp1 -t udp sp=12345,dp=54321 -p 100 -c 0 -q &
+ pid3=$!
+- ip netns exec $ns1 $mz br0.10 -A $src -B $grp2 -t udp sp=12345,dp=54321 -p 100 -c 0 -q &
++ ip netns exec $ns1 $mz br0.10 -a own -b $grp2_dmac -A $src -B $grp2 -t udp sp=12345,dp=54321 -p 100 -c 0 -q &
+ pid4=$!
+
+ sleep 30
+@@ -2170,15 +2213,17 @@ mdb_torture_ipv4_ipv4()
+ local vtep1_ip=198.51.100.100
+ local vtep2_ip=198.51.100.200
+ local grp1=239.1.1.1
++ local grp1_dmac=01:00:5e:01:01:01
+ local grp2=239.2.2.2
++ local grp2_dmac=01:00:5e:02:02:02
+ local src=192.0.2.129
+
+ echo
+ echo "Data path: MDB torture test - IPv4 overlay / IPv4 underlay"
+ echo "----------------------------------------------------------"
+
+- mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \
+- "mausezahn"
++ mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp1_dmac $grp2 \
++ $grp2_dmac $src "mausezahn"
+ }
+
+ mdb_torture_ipv6_ipv4()
+@@ -2187,15 +2232,17 @@ mdb_torture_ipv6_ipv4()
+ local vtep1_ip=198.51.100.100
+ local vtep2_ip=198.51.100.200
+ local grp1=ff0e::1
++ local grp1_dmac=33:33:00:00:00:01
+ local grp2=ff0e::2
++ local grp2_dmac=33:33:00:00:00:02
+ local src=2001:db8:100::1
+
+ echo
+ echo "Data path: MDB torture test - IPv6 overlay / IPv4 underlay"
+ echo "----------------------------------------------------------"
+
+- mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \
+- "mausezahn -6"
++ mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp1_dmac $grp2 \
++ $grp2_dmac $src "mausezahn -6"
+ }
+
+ mdb_torture_ipv4_ipv6()
+@@ -2204,15 +2251,17 @@ mdb_torture_ipv4_ipv6()
+ local vtep1_ip=2001:db8:1000::1
+ local vtep2_ip=2001:db8:2000::1
+ local grp1=239.1.1.1
++ local grp1_dmac=01:00:5e:01:01:01
+ local grp2=239.2.2.2
++ local grp2_dmac=01:00:5e:02:02:02
+ local src=192.0.2.129
+
+ echo
+ echo "Data path: MDB torture test - IPv4 overlay / IPv6 underlay"
+ echo "----------------------------------------------------------"
+
+- mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \
+- "mausezahn"
++ mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp1_dmac $grp2 \
++ $grp2_dmac $src "mausezahn"
+ }
+
+ mdb_torture_ipv6_ipv6()
+@@ -2221,15 +2270,17 @@ mdb_torture_ipv6_ipv6()
+ local vtep1_ip=2001:db8:1000::1
+ local vtep2_ip=2001:db8:2000::1
+ local grp1=ff0e::1
++ local grp1_dmac=33:33:00:00:00:01
+ local grp2=ff0e::2
++ local grp2_dmac=33:33:00:00:00:02
+ local src=2001:db8:100::1
+
+ echo
+ echo "Data path: MDB torture test - IPv6 overlay / IPv6 underlay"
+ echo "----------------------------------------------------------"
+
+- mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \
+- "mausezahn -6"
++ mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp1_dmac $grp2 \
++ $grp2_dmac $src "mausezahn -6"
+ }
+
+ ################################################################################
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 464853a7f98290..bc36c91c4480f5 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -707,6 +707,20 @@ TEST_F(tls, splice_from_pipe)
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+ }
+
++TEST_F(tls, splice_more)
++{
++ unsigned int f = SPLICE_F_NONBLOCK | SPLICE_F_MORE | SPLICE_F_GIFT;
++ int send_len = TLS_PAYLOAD_MAX_LEN;
++ char mem_send[TLS_PAYLOAD_MAX_LEN];
++ int i, send_pipe = 1;
++ int p[2];
++
++ ASSERT_GE(pipe(p), 0);
++ EXPECT_GE(write(p[1], mem_send, send_len), 0);
++ for (i = 0; i < 32; i++)
++ EXPECT_EQ(splice(p[0], NULL, self->fd, NULL, send_pipe, f), 1);
++}
++
+ TEST_F(tls, splice_from_pipe2)
+ {
+ int send_len = 16000;
+@@ -988,12 +1002,12 @@ TEST_F(tls, recv_partial)
+
+ memset(recv_mem, 0, sizeof(recv_mem));
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
+- MSG_WAITALL), -1);
++ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_first),
++ MSG_WAITALL), strlen(test_str_first));
+ EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
+ memset(recv_mem, 0, sizeof(recv_mem));
+- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
+- MSG_WAITALL), -1);
++ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_second),
++ MSG_WAITALL), strlen(test_str_second));
+ EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
+ 0);
+ }
+diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
+index 0c743752669af7..53341c8135e889 100755
+--- a/tools/testing/selftests/net/udpgro.sh
++++ b/tools/testing/selftests/net/udpgro.sh
+@@ -3,9 +3,11 @@
+ #
+ # Run a series of udpgro functional tests.
+
++source net_helper.sh
++
+ readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+-BPF_FILE="../bpf/xdp_dummy.bpf.o"
++BPF_FILE="xdp_dummy.o"
+
+ # set global exit status, but never reset nonzero one.
+ check_err()
+@@ -44,18 +46,19 @@ run_one() {
+ local -r all="$@"
+ local -r tx_args=${all%rx*}
+ local -r rx_args=${all#*rx}
++ local ret=0
+
+ cfg_veth
+
+- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \
+- echo "ok" || \
+- echo "failed" &
++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} &
++ local PID1=$!
+
+- # Hack: let bg programs complete the startup
+- sleep 0.2
++ wait_local_port_listen ${PEER_NS} 8000 udp
+ ./udpgso_bench_tx ${tx_args}
+- ret=$?
+- wait $(jobs -p)
++ check_err $?
++ wait ${PID1}
++ check_err $?
++ [ "$ret" -eq 0 ] && echo "ok" || echo "failed"
+ return $ret
+ }
+
+@@ -72,6 +75,7 @@ run_one_nat() {
+ local -r all="$@"
+ local -r tx_args=${all%rx*}
+ local -r rx_args=${all#*rx}
++ local ret=0
+
+ if [[ ${tx_args} = *-4* ]]; then
+ ipt_cmd=iptables
+@@ -92,16 +96,17 @@ run_one_nat() {
+ # ... so that GRO will match the UDP_GRO enabled socket, but packets
+ # will land on the 'plain' one
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
+- pid=$!
+- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \
+- echo "ok" || \
+- echo "failed"&
++ local PID1=$!
++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} &
++ local PID2=$!
+
+- sleep 0.1
++ wait_local_port_listen "${PEER_NS}" 8000 udp
+ ./udpgso_bench_tx ${tx_args}
+- ret=$?
+- kill -INT $pid
+- wait $(jobs -p)
++ check_err $?
++ kill -INT ${PID1}
++ wait ${PID2}
++ check_err $?
++ [ "$ret" -eq 0 ] && echo "ok" || echo "failed"
+ return $ret
+ }
+
+@@ -110,22 +115,26 @@ run_one_2sock() {
+ local -r all="$@"
+ local -r tx_args=${all%rx*}
+ local -r rx_args=${all#*rx}
++ local ret=0
+
+ cfg_veth
+
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
+- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \
+- echo "ok" || \
+- echo "failed" &
++ local PID1=$!
++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} &
++ local PID2=$!
+
+- # Hack: let bg programs complete the startup
+- sleep 0.2
++ wait_local_port_listen "${PEER_NS}" 12345 udp
+ ./udpgso_bench_tx ${tx_args} -p 12345
+- sleep 0.1
+- # first UDP GSO socket should be closed at this point
++ check_err $?
++ wait_local_port_listen "${PEER_NS}" 8000 udp
+ ./udpgso_bench_tx ${tx_args}
+- ret=$?
+- wait $(jobs -p)
++ check_err $?
++ wait ${PID1}
++ check_err $?
++ wait ${PID2}
++ check_err $?
++ [ "$ret" -eq 0 ] && echo "ok" || echo "failed"
+ return $ret
+ }
+
+@@ -198,7 +207,7 @@ run_all() {
+ }
+
+ if [ ! -f ${BPF_FILE} ]; then
+- echo "Missing ${BPF_FILE}. Build bpf selftest first"
++ echo "Missing ${BPF_FILE}. Run 'make' first"
+ exit -1
+ fi
+
+diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
+index 894972877e8b0a..7080eae5312b2f 100755
+--- a/tools/testing/selftests/net/udpgro_bench.sh
++++ b/tools/testing/selftests/net/udpgro_bench.sh
+@@ -3,9 +3,11 @@
+ #
+ # Run a series of udpgro benchmarks
+
++source net_helper.sh
++
+ readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+-BPF_FILE="../bpf/xdp_dummy.bpf.o"
++BPF_FILE="xdp_dummy.o"
+
+ cleanup() {
+ local -r jobs="$(jobs -p)"
+@@ -40,8 +42,7 @@ run_one() {
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
+
+- # Hack: let bg programs complete the startup
+- sleep 0.2
++ wait_local_port_listen "${PEER_NS}" 8000 udp
+ ./udpgso_bench_tx ${tx_args}
+ }
+
+@@ -83,7 +84,7 @@ run_all() {
+ }
+
+ if [ ! -f ${BPF_FILE} ]; then
+- echo "Missing ${BPF_FILE}. Build bpf selftest first"
++ echo "Missing ${BPF_FILE}. Run 'make' first"
+ exit -1
+ fi
+
+diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
+index 0a6359bed0b926..e1ff645bd3d1c7 100755
+--- a/tools/testing/selftests/net/udpgro_frglist.sh
++++ b/tools/testing/selftests/net/udpgro_frglist.sh
+@@ -3,9 +3,11 @@
+ #
+ # Run a series of udpgro benchmarks
+
++source net_helper.sh
++
+ readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+-BPF_FILE="../bpf/xdp_dummy.bpf.o"
++BPF_FILE="xdp_dummy.o"
+
+ cleanup() {
+ local -r jobs="$(jobs -p)"
+@@ -45,8 +47,7 @@ run_one() {
+ echo ${rx_args}
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+
+- # Hack: let bg programs complete the startup
+- sleep 0.2
++ wait_local_port_listen "${PEER_NS}" 8000 udp
+ ./udpgso_bench_tx ${tx_args}
+ }
+
+@@ -84,12 +85,12 @@ run_all() {
+ }
+
+ if [ ! -f ${BPF_FILE} ]; then
+- echo "Missing ${BPF_FILE}. Build bpf selftest first"
++ echo "Missing ${BPF_FILE}. Run 'make' first"
+ exit -1
+ fi
+
+ if [ ! -f nat6to4.o ]; then
+- echo "Missing nat6to4 helper. Build bpf nat6to4.o selftest first"
++ echo "Missing nat6to4 helper. Run 'make' first"
+ exit -1
+ fi
+
+diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
+index c079565add3922..f4549e6894dd9a 100755
+--- a/tools/testing/selftests/net/udpgro_fwd.sh
++++ b/tools/testing/selftests/net/udpgro_fwd.sh
+@@ -1,7 +1,9 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+-BPF_FILE="../bpf/xdp_dummy.bpf.o"
++source net_helper.sh
++
++BPF_FILE="xdp_dummy.o"
+ readonly BASE="ns-$(mktemp -u XXXXXX)"
+ readonly SRC=2
+ readonly DST=1
+@@ -37,6 +39,10 @@ create_ns() {
+ for ns in $NS_SRC $NS_DST; do
+ ip netns add $ns
+ ip -n $ns link set dev lo up
++
++ # disable route solicitations to decrease 'noise' traffic
++ ip netns exec $ns sysctl -qw net.ipv6.conf.default.router_solicitations=0
++ ip netns exec $ns sysctl -qw net.ipv6.conf.all.router_solicitations=0
+ done
+
+ ip link add name veth$SRC type veth peer name veth$DST
+@@ -78,6 +84,12 @@ create_vxlan_pair() {
+ create_vxlan_endpoint $BASE$ns veth$ns $BM_NET_V6$((3 - $ns)) vxlan6$ns 6
+ ip -n $BASE$ns addr add dev vxlan6$ns $OL_NET_V6$ns/24 nodad
+ done
++
++ # preload neighbur cache, do avoid some noisy traffic
++ local addr_dst=$(ip -j -n $BASE$DST link show dev vxlan6$DST |jq -r '.[]["address"]')
++ local addr_src=$(ip -j -n $BASE$SRC link show dev vxlan6$SRC |jq -r '.[]["address"]')
++ ip -n $BASE$DST neigh add dev vxlan6$DST lladdr $addr_src $OL_NET_V6$SRC
++ ip -n $BASE$SRC neigh add dev vxlan6$SRC lladdr $addr_dst $OL_NET_V6$DST
+ }
+
+ is_ipv6() {
+@@ -117,9 +129,9 @@ run_test() {
+ # not enable GRO
+ ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 4789
+ ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 8000
+- ip netns exec $NS_DST ./udpgso_bench_rx -C 1000 -R 10 -n 10 -l 1300 $rx_args &
++ ip netns exec $NS_DST ./udpgso_bench_rx -C 2000 -R 100 -n 10 -l 1300 $rx_args &
+ local spid=$!
+- sleep 0.1
++ wait_local_port_listen "$NS_DST" 8000 udp
+ ip netns exec $NS_SRC ./udpgso_bench_tx $family -M 1 -s 13000 -S 1300 -D $dst
+ local retc=$?
+ wait $spid
+@@ -166,9 +178,9 @@ run_bench() {
+ # bind the sender and the receiver to different CPUs to try
+ # get reproducible results
+ ip netns exec $NS_DST bash -c "echo 2 > /sys/class/net/veth$DST/queues/rx-0/rps_cpus"
+- ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 1000 -R 10 &
++ ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 2000 -R 100 &
+ local spid=$!
+- sleep 0.1
++ wait_local_port_listen "$NS_DST" 8000 udp
+ ip netns exec $NS_SRC taskset 0x1 ./udpgso_bench_tx $family -l 3 -S 1300 -D $dst
+ local retc=$?
+ wait $spid
+@@ -229,7 +241,7 @@ for family in 4 6; do
+
+ create_vxlan_pair
+ ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on
+- run_test "GRO frag list over UDP tunnel" $OL_NET$DST 1 1
++ run_test "GRO frag list over UDP tunnel" $OL_NET$DST 10 10
+ cleanup
+
+ # use NAT to circumvent GRO FWD check
+@@ -242,13 +254,7 @@ for family in 4 6; do
+ # load arp cache before running the test to reduce the amount of
+ # stray traffic on top of the UDP tunnel
+ ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
+- run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
+- cleanup
+-
+- create_vxlan_pair
+- run_bench "UDP tunnel fwd perf" $OL_NET$DST
+- ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on
+- run_bench "UDP tunnel GRO fwd perf" $OL_NET$DST
++ run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST
+ cleanup
+ done
+
+diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
+index 7badaf215de288..b02080d09fbc05 100644
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -34,7 +34,7 @@
+ #endif
+
+ #ifndef UDP_MAX_SEGMENTS
+-#define UDP_MAX_SEGMENTS (1 << 6UL)
++#define UDP_MAX_SEGMENTS (1 << 7UL)
+ #endif
+
+ #define CONST_MTU_TEST 1500
+diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
+index f35a924d4a3030..1cbadd267c963c 100644
+--- a/tools/testing/selftests/net/udpgso_bench_rx.c
++++ b/tools/testing/selftests/net/udpgso_bench_rx.c
+@@ -375,7 +375,7 @@ static void do_recv(void)
+ do_flush_udp(fd);
+
+ tnow = gettimeofday_ms();
+- if (tnow > treport) {
++ if (!cfg_expected_pkt_nr && tnow > treport) {
+ if (packets)
+ fprintf(stderr,
+ "%s rx: %6lu MB/s %8lu calls/s\n",
+diff --git a/tools/testing/selftests/net/unicast_extensions.sh b/tools/testing/selftests/net/unicast_extensions.sh
+index 2d10ccac898a74..f52aa5f7da5240 100755
+--- a/tools/testing/selftests/net/unicast_extensions.sh
++++ b/tools/testing/selftests/net/unicast_extensions.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # By Seth Schoen (c) 2021, for the IPv4 Unicast Extensions Project
+@@ -28,8 +28,7 @@
+ # These tests provide an easy way to flip the expected result of any
+ # of these behaviors for testing kernel patches that change them.
+
+-# Kselftest framework requirement - SKIP code is 4.
+-ksft_skip=4
++source lib.sh
+
+ # nettest can be run from PATH or from same directory as this selftest
+ if ! which nettest >/dev/null; then
+@@ -61,20 +60,20 @@ _do_segmenttest(){
+ # foo --- bar
+ # Arguments: ip_a ip_b prefix_length test_description
+ #
+- # Caller must set up foo-ns and bar-ns namespaces
++ # Caller must set up $foo_ns and $bar_ns namespaces
+ # containing linked veth devices foo and bar,
+ # respectively.
+
+- ip -n foo-ns address add $1/$3 dev foo || return 1
+- ip -n foo-ns link set foo up || return 1
+- ip -n bar-ns address add $2/$3 dev bar || return 1
+- ip -n bar-ns link set bar up || return 1
++ ip -n $foo_ns address add $1/$3 dev foo || return 1
++ ip -n $foo_ns link set foo up || return 1
++ ip -n $bar_ns address add $2/$3 dev bar || return 1
++ ip -n $bar_ns link set bar up || return 1
+
+- ip netns exec foo-ns timeout 2 ping -c 1 $2 || return 1
+- ip netns exec bar-ns timeout 2 ping -c 1 $1 || return 1
++ ip netns exec $foo_ns timeout 2 ping -c 1 $2 || return 1
++ ip netns exec $bar_ns timeout 2 ping -c 1 $1 || return 1
+
+- nettest -B -N bar-ns -O foo-ns -r $1 || return 1
+- nettest -B -N foo-ns -O bar-ns -r $2 || return 1
++ nettest -B -N $bar_ns -O $foo_ns -r $1 || return 1
++ nettest -B -N $foo_ns -O $bar_ns -r $2 || return 1
+
+ return 0
+ }
+@@ -88,31 +87,31 @@ _do_route_test(){
+ # Arguments: foo_ip foo1_ip bar1_ip bar_ip prefix_len test_description
+ # Displays test result and returns success or failure.
+
+- # Caller must set up foo-ns, bar-ns, and router-ns
++ # Caller must set up $foo_ns, $bar_ns, and $router_ns
+ # containing linked veth devices foo-foo1, bar1-bar
+- # (foo in foo-ns, foo1 and bar1 in router-ns, and
+- # bar in bar-ns).
+-
+- ip -n foo-ns address add $1/$5 dev foo || return 1
+- ip -n foo-ns link set foo up || return 1
+- ip -n foo-ns route add default via $2 || return 1
+- ip -n bar-ns address add $4/$5 dev bar || return 1
+- ip -n bar-ns link set bar up || return 1
+- ip -n bar-ns route add default via $3 || return 1
+- ip -n router-ns address add $2/$5 dev foo1 || return 1
+- ip -n router-ns link set foo1 up || return 1
+- ip -n router-ns address add $3/$5 dev bar1 || return 1
+- ip -n router-ns link set bar1 up || return 1
+-
+- echo 1 | ip netns exec router-ns tee /proc/sys/net/ipv4/ip_forward
+-
+- ip netns exec foo-ns timeout 2 ping -c 1 $2 || return 1
+- ip netns exec foo-ns timeout 2 ping -c 1 $4 || return 1
+- ip netns exec bar-ns timeout 2 ping -c 1 $3 || return 1
+- ip netns exec bar-ns timeout 2 ping -c 1 $1 || return 1
+-
+- nettest -B -N bar-ns -O foo-ns -r $1 || return 1
+- nettest -B -N foo-ns -O bar-ns -r $4 || return 1
++ # (foo in $foo_ns, foo1 and bar1 in $router_ns, and
++ # bar in $bar_ns).
++
++ ip -n $foo_ns address add $1/$5 dev foo || return 1
++ ip -n $foo_ns link set foo up || return 1
++ ip -n $foo_ns route add default via $2 || return 1
++ ip -n $bar_ns address add $4/$5 dev bar || return 1
++ ip -n $bar_ns link set bar up || return 1
++ ip -n $bar_ns route add default via $3 || return 1
++ ip -n $router_ns address add $2/$5 dev foo1 || return 1
++ ip -n $router_ns link set foo1 up || return 1
++ ip -n $router_ns address add $3/$5 dev bar1 || return 1
++ ip -n $router_ns link set bar1 up || return 1
++
++ echo 1 | ip netns exec $router_ns tee /proc/sys/net/ipv4/ip_forward
++
++ ip netns exec $foo_ns timeout 2 ping -c 1 $2 || return 1
++ ip netns exec $foo_ns timeout 2 ping -c 1 $4 || return 1
++ ip netns exec $bar_ns timeout 2 ping -c 1 $3 || return 1
++ ip netns exec $bar_ns timeout 2 ping -c 1 $1 || return 1
++
++ nettest -B -N $bar_ns -O $foo_ns -r $1 || return 1
++ nettest -B -N $foo_ns -O $bar_ns -r $4 || return 1
+
+ return 0
+ }
+@@ -121,17 +120,15 @@ segmenttest(){
+ # Sets up veth link and tries to connect over it.
+ # Arguments: ip_a ip_b prefix_len test_description
+ hide_output
+- ip netns add foo-ns
+- ip netns add bar-ns
+- ip link add foo netns foo-ns type veth peer name bar netns bar-ns
++ setup_ns foo_ns bar_ns
++ ip link add foo netns $foo_ns type veth peer name bar netns $bar_ns
+
+ test_result=0
+ _do_segmenttest "$@" || test_result=1
+
+- ip netns pids foo-ns | xargs -r kill -9
+- ip netns pids bar-ns | xargs -r kill -9
+- ip netns del foo-ns
+- ip netns del bar-ns
++ ip netns pids $foo_ns | xargs -r kill -9
++ ip netns pids $bar_ns | xargs -r kill -9
++ cleanup_ns $foo_ns $bar_ns
+ show_output
+
+ # inverted tests will expect failure instead of success
+@@ -147,21 +144,17 @@ route_test(){
+ # Returns success or failure.
+
+ hide_output
+- ip netns add foo-ns
+- ip netns add bar-ns
+- ip netns add router-ns
+- ip link add foo netns foo-ns type veth peer name foo1 netns router-ns
+- ip link add bar netns bar-ns type veth peer name bar1 netns router-ns
++ setup_ns foo_ns bar_ns router_ns
++ ip link add foo netns $foo_ns type veth peer name foo1 netns $router_ns
++ ip link add bar netns $bar_ns type veth peer name bar1 netns $router_ns
+
+ test_result=0
+ _do_route_test "$@" || test_result=1
+
+- ip netns pids foo-ns | xargs -r kill -9
+- ip netns pids bar-ns | xargs -r kill -9
+- ip netns pids router-ns | xargs -r kill -9
+- ip netns del foo-ns
+- ip netns del bar-ns
+- ip netns del router-ns
++ ip netns pids $foo_ns | xargs -r kill -9
++ ip netns pids $bar_ns | xargs -r kill -9
++ ip netns pids $router_ns | xargs -r kill -9
++ cleanup_ns $foo_ns $bar_ns $router_ns
+
+ show_output
+
+diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
+index 2d073595c62021..27574bbf2d6386 100755
+--- a/tools/testing/selftests/net/veth.sh
++++ b/tools/testing/selftests/net/veth.sh
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+
+-BPF_FILE="../bpf/xdp_dummy.bpf.o"
++BPF_FILE="xdp_dummy.o"
+ readonly STATS="$(mktemp -p /tmp ns-XXXXXX)"
+ readonly BASE=`basename $STATS`
+ readonly SRC=2
+@@ -218,7 +218,7 @@ while getopts "hs:" option; do
+ done
+
+ if [ ! -f ${BPF_FILE} ]; then
+- echo "Missing ${BPF_FILE}. Build bpf selftest first"
++ echo "Missing ${BPF_FILE}. Run 'make' first"
+ exit 1
+ fi
+
+diff --git a/tools/testing/selftests/net/xdp_dummy.c b/tools/testing/selftests/net/xdp_dummy.c
+new file mode 100644
+index 00000000000000..d988b2e0cee840
+--- /dev/null
++++ b/tools/testing/selftests/net/xdp_dummy.c
+@@ -0,0 +1,13 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#define KBUILD_MODNAME "xdp_dummy"
++#include <linux/bpf.h>
++#include <bpf/bpf_helpers.h>
++
++SEC("xdp")
++int xdp_dummy_prog(struct xdp_md *ctx)
++{
++ return XDP_PASS;
++}
++
++char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index ef90aca4cc96af..bced422b78f729 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -7,7 +7,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
+ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+ conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
+- conntrack_sctp_collision.sh
++ conntrack_sctp_collision.sh xt_string.sh
+
+ HOSTPKG_CONFIG := pkg-config
+
+diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
+index 99ed5bd6e8402f..e4717444d38e77 100755
+--- a/tools/testing/selftests/netfilter/nft_audit.sh
++++ b/tools/testing/selftests/netfilter/nft_audit.sh
+@@ -25,12 +25,31 @@ logread_pid=$!
+ trap 'kill $logread_pid; rm -f $logfile $rulefile' EXIT
+ exec 3<"$logfile"
+
++lsplit='s/^\(.*\) entries=\([^ ]*\) \(.*\)$/pfx="\1"\nval="\2"\nsfx="\3"/'
++summarize_logs() {
++ sum=0
++ while read line; do
++ eval $(sed "$lsplit" <<< "$line")
++ [[ $sum -gt 0 ]] && {
++ [[ "$pfx $sfx" == "$tpfx $tsfx" ]] && {
++ let "sum += val"
++ continue
++ }
++ echo "$tpfx entries=$sum $tsfx"
++ }
++ tpfx="$pfx"
++ tsfx="$sfx"
++ sum=$val
++ done
++ echo "$tpfx entries=$sum $tsfx"
++}
++
+ do_test() { # (cmd, log)
+ echo -n "testing for cmd: $1 ... "
+ cat <&3 >/dev/null
+ $1 >/dev/null || exit 1
+ sleep 0.1
+- res=$(diff -a -u <(echo "$2") - <&3)
++ res=$(diff -a -u <(echo "$2") <(summarize_logs <&3))
+ [ $? -eq 0 ] && { echo "OK"; return; }
+ echo "FAIL"
+ grep -v '^\(---\|+++\|@@\)' <<< "$res"
+@@ -129,31 +148,17 @@ do_test 'nft reset rules t1 c2' \
+ 'table=t1 family=2 entries=3 op=nft_reset_rule'
+
+ do_test 'nft reset rules table t1' \
+-'table=t1 family=2 entries=3 op=nft_reset_rule
+-table=t1 family=2 entries=3 op=nft_reset_rule
+-table=t1 family=2 entries=3 op=nft_reset_rule'
++'table=t1 family=2 entries=9 op=nft_reset_rule'
+
+ do_test 'nft reset rules t2 c3' \
+-'table=t2 family=2 entries=189 op=nft_reset_rule
+-table=t2 family=2 entries=188 op=nft_reset_rule
+-table=t2 family=2 entries=126 op=nft_reset_rule'
++'table=t2 family=2 entries=503 op=nft_reset_rule'
+
+ do_test 'nft reset rules t2' \
+-'table=t2 family=2 entries=3 op=nft_reset_rule
+-table=t2 family=2 entries=3 op=nft_reset_rule
+-table=t2 family=2 entries=186 op=nft_reset_rule
+-table=t2 family=2 entries=188 op=nft_reset_rule
+-table=t2 family=2 entries=129 op=nft_reset_rule'
++'table=t2 family=2 entries=509 op=nft_reset_rule'
+
+ do_test 'nft reset rules' \
+-'table=t1 family=2 entries=3 op=nft_reset_rule
+-table=t1 family=2 entries=3 op=nft_reset_rule
+-table=t1 family=2 entries=3 op=nft_reset_rule
+-table=t2 family=2 entries=3 op=nft_reset_rule
+-table=t2 family=2 entries=3 op=nft_reset_rule
+-table=t2 family=2 entries=180 op=nft_reset_rule
+-table=t2 family=2 entries=188 op=nft_reset_rule
+-table=t2 family=2 entries=135 op=nft_reset_rule'
++'table=t1 family=2 entries=9 op=nft_reset_rule
++table=t2 family=2 entries=509 op=nft_reset_rule'
+
+ # resetting sets and elements
+
+@@ -177,13 +182,11 @@ do_test 'nft reset counters t1' \
+ 'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+ do_test 'nft reset counters t2' \
+-'table=t2 family=2 entries=342 op=nft_reset_obj
+-table=t2 family=2 entries=158 op=nft_reset_obj'
++'table=t2 family=2 entries=500 op=nft_reset_obj'
+
+ do_test 'nft reset counters' \
+ 'table=t1 family=2 entries=1 op=nft_reset_obj
+-table=t2 family=2 entries=341 op=nft_reset_obj
+-table=t2 family=2 entries=159 op=nft_reset_obj'
++table=t2 family=2 entries=500 op=nft_reset_obj'
+
+ # resetting quotas
+
+@@ -194,13 +197,11 @@ do_test 'nft reset quotas t1' \
+ 'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+ do_test 'nft reset quotas t2' \
+-'table=t2 family=2 entries=315 op=nft_reset_obj
+-table=t2 family=2 entries=185 op=nft_reset_obj'
++'table=t2 family=2 entries=500 op=nft_reset_obj'
+
+ do_test 'nft reset quotas' \
+ 'table=t1 family=2 entries=1 op=nft_reset_obj
+-table=t2 family=2 entries=314 op=nft_reset_obj
+-table=t2 family=2 entries=186 op=nft_reset_obj'
++table=t2 family=2 entries=500 op=nft_reset_obj'
+
+ # deleting rules
+
+diff --git a/tools/testing/selftests/netfilter/xt_string.sh b/tools/testing/selftests/netfilter/xt_string.sh
+new file mode 100755
+index 00000000000000..1802653a472873
+--- /dev/null
++++ b/tools/testing/selftests/netfilter/xt_string.sh
+@@ -0,0 +1,128 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++# return code to signal skipped test
++ksft_skip=4
++rc=0
++
++if ! iptables --version >/dev/null 2>&1; then
++ echo "SKIP: Test needs iptables"
++ exit $ksft_skip
++fi
++if ! ip -V >/dev/null 2>&1; then
++ echo "SKIP: Test needs iproute2"
++ exit $ksft_skip
++fi
++if ! nc -h >/dev/null 2>&1; then
++ echo "SKIP: Test needs netcat"
++ exit $ksft_skip
++fi
++
++pattern="foo bar baz"
++patlen=11
++hdrlen=$((20 + 8)) # IPv4 + UDP
++ns="ns-$(mktemp -u XXXXXXXX)"
++trap 'ip netns del $ns' EXIT
++ip netns add "$ns"
++ip -net "$ns" link add d0 type dummy
++ip -net "$ns" link set d0 up
++ip -net "$ns" addr add 10.1.2.1/24 dev d0
++
++#ip netns exec "$ns" tcpdump -npXi d0 &
++#tcpdump_pid=$!
++#trap 'kill $tcpdump_pid; ip netns del $ns' EXIT
++
++add_rule() { # (alg, from, to)
++ ip netns exec "$ns" \
++ iptables -A OUTPUT -o d0 -m string \
++ --string "$pattern" --algo $1 --from $2 --to $3
++}
++showrules() { # ()
++ ip netns exec "$ns" iptables -v -S OUTPUT | grep '^-A'
++}
++zerorules() {
++ ip netns exec "$ns" iptables -Z OUTPUT
++}
++countrule() { # (pattern)
++ showrules | grep -c -- "$*"
++}
++send() { # (offset)
++ ( for ((i = 0; i < $1 - $hdrlen; i++)); do
++ printf " "
++ done
++ printf "$pattern"
++ ) | ip netns exec "$ns" nc -w 1 -u 10.1.2.2 27374
++}
++
++add_rule bm 1000 1500
++add_rule bm 1400 1600
++add_rule kmp 1000 1500
++add_rule kmp 1400 1600
++
++zerorules
++send 0
++send $((1000 - $patlen))
++if [ $(countrule -c 0 0) -ne 4 ]; then
++ echo "FAIL: rules match data before --from"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1000
++send $((1400 - $patlen))
++if [ $(countrule -c 2) -ne 2 ]; then
++ echo "FAIL: only two rules should match at low offset"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1500 - $patlen))
++if [ $(countrule -c 1) -ne 4 ]; then
++ echo "FAIL: all rules should match at end of packet"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1495
++if [ $(countrule -c 1) -ne 1 ]; then
++ echo "FAIL: only kmp with proper --to should match pattern spanning fragments"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1500
++if [ $(countrule -c 1) -ne 2 ]; then
++ echo "FAIL: two rules should match pattern at start of second fragment"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen))
++if [ $(countrule -c 1) -ne 2 ]; then
++ echo "FAIL: two rules should match pattern at end of largest --to"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send $((1600 - $patlen + 1))
++if [ $(countrule -c 1) -ne 0 ]; then
++ echo "FAIL: no rules should match pattern extending largest --to"
++ showrules
++ ((rc--))
++fi
++
++zerorules
++send 1600
++if [ $(countrule -c 1) -ne 0 ]; then
++ echo "FAIL: no rule should match pattern past largest --to"
++ showrules
++ ((rc--))
++fi
++
++exit $rc
+diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
+index fb3bf91462e27a..4aaafbfc2f9735 100644
+--- a/tools/testing/selftests/nolibc/nolibc-test.c
++++ b/tools/testing/selftests/nolibc/nolibc-test.c
+@@ -145,11 +145,11 @@ static void result(int llen, enum RESULT r)
+ const char *msg;
+
+ if (r == OK)
+- msg = " [OK]";
++ msg = " [OK]";
+ else if (r == SKIPPED)
+ msg = "[SKIPPED]";
+ else
+- msg = "[FAIL]";
++ msg = " [FAIL]";
+
+ if (llen < 64)
+ putcharn(' ', 64 - llen);
+@@ -522,7 +522,7 @@ int expect_strzr(const char *expr, int llen)
+ {
+ int ret = 0;
+
+- llen += printf(" = <%s> ", expr);
++ llen += printf(" = <%s> ", expr ? expr : "(null)");
+ if (expr) {
+ ret = 1;
+ result(llen, FAIL);
+@@ -541,7 +541,7 @@ int expect_strnz(const char *expr, int llen)
+ {
+ int ret = 0;
+
+- llen += printf(" = <%s> ", expr);
++ llen += printf(" = <%s> ", expr ? expr : "(null)");
+ if (!expr) {
+ ret = 1;
+ result(llen, FAIL);
+diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
+index 7fb902099de45e..f9d2b0ec77564f 100644
+--- a/tools/testing/selftests/openat2/openat2_test.c
++++ b/tools/testing/selftests/openat2/openat2_test.c
+@@ -5,6 +5,7 @@
+ */
+
+ #define _GNU_SOURCE
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ #include <fcntl.h>
+ #include <sched.h>
+ #include <sys/stat.h>
+diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+index 4e86f927880c32..01cc37bf611c32 100644
+--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+@@ -62,7 +62,7 @@ static void error_report(struct error *err, const char *test_name)
+ break;
+
+ case PIDFD_PASS:
+- ksft_test_result_pass("%s test: Passed\n");
++ ksft_test_result_pass("%s test: Passed\n", test_name);
+ break;
+
+ default:
+diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
+index 00a07e7c571cda..c081ae91313aa2 100644
+--- a/tools/testing/selftests/pidfd/pidfd_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_test.c
+@@ -381,13 +381,13 @@ static int test_pidfd_send_signal_syscall_support(void)
+
+ static void *test_pidfd_poll_exec_thread(void *priv)
+ {
+- ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++ ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ getpid(), syscall(SYS_gettid));
+ ksft_print_msg("Child Thread: doing exec of sleep\n");
+
+ execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL);
+
+- ksft_print_msg("Child Thread: DONE. pid %d tid %d\n",
++ ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n",
+ getpid(), syscall(SYS_gettid));
+ return NULL;
+ }
+@@ -427,7 +427,7 @@ static int child_poll_exec_test(void *args)
+ {
+ pthread_t t1;
+
+- ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(),
++ ksft_print_msg("Child (pidfd): starting. pid %d tid %ld\n", getpid(),
+ syscall(SYS_gettid));
+ pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL);
+ /*
+@@ -480,10 +480,10 @@ static void test_pidfd_poll_exec(int use_waitpid)
+
+ static void *test_pidfd_poll_leader_exit_thread(void *priv)
+ {
+- ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
++ ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
+ getpid(), syscall(SYS_gettid));
+ sleep(CHILD_THREAD_MIN_WAIT);
+- ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++ ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ return NULL;
+ }
+
+@@ -492,7 +492,7 @@ static int child_poll_leader_exit_test(void *args)
+ {
+ pthread_t t1, t2;
+
+- ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
++ ksft_print_msg("Child: starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
+ pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+ pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL);
+
+diff --git a/tools/testing/selftests/powerpc/dexcr/Makefile b/tools/testing/selftests/powerpc/dexcr/Makefile
+index 76210f2bcec3c8..829ad075b4a441 100644
+--- a/tools/testing/selftests/powerpc/dexcr/Makefile
++++ b/tools/testing/selftests/powerpc/dexcr/Makefile
+@@ -3,7 +3,7 @@ TEST_GEN_FILES := lsdexcr
+
+ include ../../lib.mk
+
+-$(OUTPUT)/hashchk_test: CFLAGS += -fno-pie $(call cc-option,-mno-rop-protect)
++$(OUTPUT)/hashchk_test: CFLAGS += -fno-pie -no-pie $(call cc-option,-mno-rop-protect)
+
+ $(TEST_GEN_PROGS): ../harness.c ../utils.c ./dexcr.c
+ $(TEST_GEN_FILES): ../utils.c ./dexcr.c
+diff --git a/tools/testing/selftests/powerpc/math/fpu_preempt.c b/tools/testing/selftests/powerpc/math/fpu_preempt.c
+index 5235bdc8c0b114..3e5b5663d24492 100644
+--- a/tools/testing/selftests/powerpc/math/fpu_preempt.c
++++ b/tools/testing/selftests/powerpc/math/fpu_preempt.c
+@@ -37,19 +37,20 @@ __thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
+ int threads_starting;
+ int running;
+
+-extern void preempt_fpu(double *darray, int *threads_starting, int *running);
++extern int preempt_fpu(double *darray, int *threads_starting, int *running);
+
+ void *preempt_fpu_c(void *p)
+ {
++ long rc;
+ int i;
++
+ srand(pthread_self());
+ for (i = 0; i < 21; i++)
+ darray[i] = rand();
+
+- /* Test failed if it ever returns */
+- preempt_fpu(darray, &threads_starting, &running);
++ rc = preempt_fpu(darray, &threads_starting, &running);
+
+- return p;
++ return (void *)rc;
+ }
+
+ int test_preempt_fpu(void)
+diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c
+index 6761d6ce30eca9..6f7cf400c6875f 100644
+--- a/tools/testing/selftests/powerpc/math/vmx_preempt.c
++++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c
+@@ -37,19 +37,21 @@ __thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12},
+ int threads_starting;
+ int running;
+
+-extern void preempt_vmx(vector int *varray, int *threads_starting, int *running);
++extern int preempt_vmx(vector int *varray, int *threads_starting, int *running);
+
+ void *preempt_vmx_c(void *p)
+ {
+ int i, j;
++ long rc;
++
+ srand(pthread_self());
+ for (i = 0; i < 12; i++)
+ for (j = 0; j < 4; j++)
+ varray[i][j] = rand();
+
+- /* Test fails if it ever returns */
+- preempt_vmx(varray, &threads_starting, &running);
+- return p;
++ rc = preempt_vmx(varray, &threads_starting, &running);
++
++ return (void *)rc;
+ }
+
+ int test_preempt_vmx(void)
+diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh
+index 12b50a4a881ac4..89a82f6f140ef7 100755
+--- a/tools/testing/selftests/rcutorture/bin/torture.sh
++++ b/tools/testing/selftests/rcutorture/bin/torture.sh
+@@ -567,7 +567,7 @@ then
+ torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 tsc=watchdog"
+ torture_set "clocksourcewd-1" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+
+- torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 clocksource.max_cswd_read_retries=1 tsc=watchdog"
++ torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 tsc=watchdog"
+ torture_set "clocksourcewd-2" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+
+ # In case our work is already done...
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 5073dbc9612582..021863f86053a9 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -1,10 +1,12 @@
+ # SPDX-License-Identifier: GPL-2.0
+
+-CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
++CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
+ CFLAGS += $(KHDR_INCLUDES)
+
+ TEST_GEN_PROGS := resctrl_tests
+
++LOCAL_HDRS += $(wildcard *.h)
++
+ include ../lib.mk
+
+-$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
++$(OUTPUT)/resctrl_tests: $(wildcard *.c)
+diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
+index d3cbb829ff6a70..601ab78dbf4217 100644
+--- a/tools/testing/selftests/resctrl/cache.c
++++ b/tools/testing/selftests/resctrl/cache.c
+@@ -40,7 +40,7 @@ static int perf_event_open_llc_miss(pid_t pid, int cpu_no)
+ fd_lm = perf_event_open(&pea_llc_miss, pid, cpu_no, -1,
+ PERF_FLAG_FD_CLOEXEC);
+ if (fd_lm == -1) {
+- perror("Error opening leader");
++ ksft_perror("Error opening leader");
+ ctrlc_handler(0, NULL, NULL);
+ return -1;
+ }
+@@ -95,7 +95,7 @@ static int get_llc_perf(unsigned long *llc_perf_miss)
+
+ ret = read(fd_lm, &rf_cqm, sizeof(struct read_format));
+ if (ret == -1) {
+- perror("Could not get llc misses through perf");
++ ksft_perror("Could not get llc misses through perf");
+ return -1;
+ }
+
+@@ -124,12 +124,12 @@ static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
+
+ fp = fopen(llc_occup_path, "r");
+ if (!fp) {
+- perror("Failed to open results file");
++ ksft_perror("Failed to open results file");
+
+ return errno;
+ }
+ if (fscanf(fp, "%lu", llc_occupancy) <= 0) {
+- perror("Could not get llc occupancy");
++ ksft_perror("Could not get llc occupancy");
+ fclose(fp);
+
+ return -1;
+@@ -159,7 +159,7 @@ static int print_results_cache(char *filename, int bm_pid,
+ } else {
+ fp = fopen(filename, "a");
+ if (!fp) {
+- perror("Cannot open results file");
++ ksft_perror("Cannot open results file");
+
+ return errno;
+ }
+@@ -205,10 +205,11 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
+ * cache_val: execute benchmark and measure LLC occupancy resctrl
+ * and perf cache miss for the benchmark
+ * @param: parameters passed to cache_val()
++ * @span: buffer size for the benchmark
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+-int cat_val(struct resctrl_val_param *param)
++int cat_val(struct resctrl_val_param *param, size_t span)
+ {
+ int memflush = 1, operation = 0, ret = 0;
+ char *resctrl_val = param->resctrl_val;
+@@ -245,7 +246,7 @@ int cat_val(struct resctrl_val_param *param)
+ if (ret)
+ break;
+
+- if (run_fill_buf(param->span, memflush, operation, true)) {
++ if (run_fill_buf(span, memflush, operation, true)) {
+ fprintf(stderr, "Error-running fill buffer\n");
+ ret = -1;
+ goto pe_close;
+diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
+index 3848dfb46aba4f..9bb8ba93f43355 100644
+--- a/tools/testing/selftests/resctrl/cat_test.c
++++ b/tools/testing/selftests/resctrl/cat_test.c
+@@ -41,7 +41,7 @@ static int cat_setup(struct resctrl_val_param *p)
+ return ret;
+ }
+
+-static int check_results(struct resctrl_val_param *param)
++static int check_results(struct resctrl_val_param *param, size_t span)
+ {
+ char *token_array[8], temp[512];
+ unsigned long sum_llc_perf_miss = 0;
+@@ -51,7 +51,7 @@ static int check_results(struct resctrl_val_param *param)
+ ksft_print_msg("Checking for pass/fail\n");
+ fp = fopen(param->filename, "r");
+ if (!fp) {
+- perror("# Cannot open file");
++ ksft_perror("Cannot open file");
+
+ return errno;
+ }
+@@ -76,7 +76,7 @@ static int check_results(struct resctrl_val_param *param)
+ fclose(fp);
+ no_of_bits = count_bits(param->mask);
+
+- return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
++ return show_cache_info(sum_llc_perf_miss, no_of_bits, span / 64,
+ MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
+ get_vendor() == ARCH_INTEL, false);
+ }
+@@ -96,6 +96,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ char cbm_mask[256];
+ int count_of_bits;
+ char pipe_message;
++ size_t span;
+
+ /* Get default cbm mask for L3/L2 cache */
+ ret = get_cbm_mask(cache_type, cbm_mask);
+@@ -140,7 +141,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ /* Set param values for parent thread which will be allocated bitmask
+ * with (max_bits - n) bits
+ */
+- param.span = cache_size * (count_of_bits - n) / count_of_bits;
++ span = cache_size * (count_of_bits - n) / count_of_bits;
+ strcpy(param.ctrlgrp, "c2");
+ strcpy(param.mongrp, "m2");
+ strcpy(param.filename, RESULT_FILE_NAME2);
+@@ -148,7 +149,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ param.num_of_runs = 0;
+
+ if (pipe(pipefd)) {
+- perror("# Unable to create pipe");
++ ksft_perror("Unable to create pipe");
+ return errno;
+ }
+
+@@ -162,23 +163,17 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ param.mask = l_mask_1;
+ strcpy(param.ctrlgrp, "c1");
+ strcpy(param.mongrp, "m1");
+- param.span = cache_size * n / count_of_bits;
++ span = cache_size * n / count_of_bits;
+ strcpy(param.filename, RESULT_FILE_NAME1);
+ param.num_of_runs = 0;
+ param.cpu_no = sibling_cpu_no;
+- } else {
+- ret = signal_handler_register();
+- if (ret) {
+- kill(bm_pid, SIGKILL);
+- goto out;
+- }
+ }
+
+ remove(param.filename);
+
+- ret = cat_val(&param);
++ ret = cat_val(&param, span);
+ if (ret == 0)
+- ret = check_results(&param);
++ ret = check_results(&param, span);
+
+ if (bm_pid == 0) {
+ /* Tell parent that child is ready */
+@@ -190,7 +185,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ * Just print the error message.
+ * Let while(1) run and wait for itself to be killed.
+ */
+- perror("# failed signaling parent process");
++ ksft_perror("Failed signaling parent process");
+
+ close(pipefd[1]);
+ while (1)
+@@ -202,16 +197,14 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ while (pipe_message != 1) {
+ if (read(pipefd[0], &pipe_message,
+ sizeof(pipe_message)) < sizeof(pipe_message)) {
+- perror("# failed reading from child process");
++ ksft_perror("Failed reading from child process");
+ break;
+ }
+ }
+ close(pipefd[0]);
+ kill(bm_pid, SIGKILL);
+- signal_handler_unregister();
+ }
+
+-out:
+ cat_test_cleanup();
+
+ return ret;
+diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
+index cb2197647c6cdf..16fc0488e0a545 100644
+--- a/tools/testing/selftests/resctrl/cmt_test.c
++++ b/tools/testing/selftests/resctrl/cmt_test.c
+@@ -27,7 +27,7 @@ static int cmt_setup(struct resctrl_val_param *p)
+ return 0;
+ }
+
+-static int check_results(struct resctrl_val_param *param, int no_of_bits)
++static int check_results(struct resctrl_val_param *param, size_t span, int no_of_bits)
+ {
+ char *token_array[8], temp[512];
+ unsigned long sum_llc_occu_resc = 0;
+@@ -37,7 +37,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
+ ksft_print_msg("Checking for pass/fail\n");
+ fp = fopen(param->filename, "r");
+ if (!fp) {
+- perror("# Error in opening file\n");
++ ksft_perror("Error in opening file");
+
+ return errno;
+ }
+@@ -58,7 +58,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
+ }
+ fclose(fp);
+
+- return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
++ return show_cache_info(sum_llc_occu_resc, no_of_bits, span,
+ MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
+ true, true);
+ }
+@@ -68,16 +68,17 @@ void cmt_test_cleanup(void)
+ remove(RESULT_FILE_NAME);
+ }
+
+-int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
++int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd)
+ {
++ const char * const *cmd = benchmark_cmd;
++ const char *new_cmd[BENCHMARK_ARGS];
+ unsigned long cache_size = 0;
+ unsigned long long_mask;
++ char *span_str = NULL;
+ char cbm_mask[256];
+ int count_of_bits;
+- int ret;
+-
+- if (!validate_resctrl_feature_request(CMT_STR))
+- return -1;
++ size_t span;
++ int ret, i;
+
+ ret = get_cbm_mask("L3", cbm_mask);
+ if (ret)
+@@ -105,24 +106,36 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ .cpu_no = cpu_no,
+ .filename = RESULT_FILE_NAME,
+ .mask = ~(long_mask << n) & long_mask,
+- .span = cache_size * n / count_of_bits,
+ .num_of_runs = 0,
+ .setup = cmt_setup,
+ };
+
+- if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
+- sprintf(benchmark_cmd[1], "%zu", param.span);
++ span = cache_size * n / count_of_bits;
++
++ if (strcmp(cmd[0], "fill_buf") == 0) {
++ /* Duplicate the command to be able to replace span in it */
++ for (i = 0; benchmark_cmd[i]; i++)
++ new_cmd[i] = benchmark_cmd[i];
++ new_cmd[i] = NULL;
++
++ ret = asprintf(&span_str, "%zu", span);
++ if (ret < 0)
++ return -1;
++ new_cmd[1] = span_str;
++ cmd = new_cmd;
++ }
+
+ remove(RESULT_FILE_NAME);
+
+- ret = resctrl_val(benchmark_cmd, &param);
++ ret = resctrl_val(cmd, &param);
+ if (ret)
+ goto out;
+
+- ret = check_results(&param, n);
++ ret = check_results(&param, span, n);
+
+ out:
+ cmt_test_cleanup();
++ free(span_str);
+
+ return ret;
+ }
+diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
+index 0d425f26583a95..0f6cca61ec94ba 100644
+--- a/tools/testing/selftests/resctrl/fill_buf.c
++++ b/tools/testing/selftests/resctrl/fill_buf.c
+@@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once)
+ /* Consume read result so that reading memory is not optimized out. */
+ fp = fopen("/dev/null", "w");
+ if (!fp) {
+- perror("Unable to write to /dev/null");
++ ksft_perror("Unable to write to /dev/null");
+ return -1;
+ }
+ fprintf(fp, "Sum: %d ", ret);
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index 4d2f145804b834..4988b93add6a79 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -12,7 +12,7 @@
+
+ #define RESULT_FILE_NAME "result_mba"
+ #define NUM_OF_RUNS 5
+-#define MAX_DIFF_PERCENT 5
++#define MAX_DIFF_PERCENT 8
+ #define ALLOCATION_MAX 100
+ #define ALLOCATION_MIN 10
+ #define ALLOCATION_STEP 10
+@@ -109,7 +109,7 @@ static int check_results(void)
+
+ fp = fopen(output, "r");
+ if (!fp) {
+- perror(output);
++ ksft_perror(output);
+
+ return errno;
+ }
+@@ -141,7 +141,7 @@ void mba_test_cleanup(void)
+ remove(RESULT_FILE_NAME);
+ }
+
+-int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
++int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd)
+ {
+ struct resctrl_val_param param = {
+ .resctrl_val = MBA_STR,
+@@ -149,7 +149,7 @@ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
+ .mongrp = "m1",
+ .cpu_no = cpu_no,
+ .filename = RESULT_FILE_NAME,
+- .bw_report = bw_report,
++ .bw_report = "reads",
+ .setup = mba_setup
+ };
+ int ret;
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index c7de6f5977f690..eb488aabb9ae66 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -11,7 +11,7 @@
+ #include "resctrl.h"
+
+ #define RESULT_FILE_NAME "result_mbm"
+-#define MAX_DIFF_PERCENT 5
++#define MAX_DIFF_PERCENT 8
+ #define NUM_OF_RUNS 5
+
+ static int
+@@ -59,7 +59,7 @@ static int check_results(size_t span)
+
+ fp = fopen(output, "r");
+ if (!fp) {
+- perror(output);
++ ksft_perror(output);
+
+ return errno;
+ }
+@@ -109,16 +109,15 @@ void mbm_test_cleanup(void)
+ remove(RESULT_FILE_NAME);
+ }
+
+-int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd)
++int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd)
+ {
+ struct resctrl_val_param param = {
+ .resctrl_val = MBM_STR,
+ .ctrlgrp = "c1",
+ .mongrp = "m1",
+- .span = span,
+ .cpu_no = cpu_no,
+ .filename = RESULT_FILE_NAME,
+- .bw_report = bw_report,
++ .bw_report = "reads",
+ .setup = mbm_setup
+ };
+ int ret;
+@@ -129,7 +128,7 @@ int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd
+ if (ret)
+ goto out;
+
+- ret = check_results(span);
++ ret = check_results(DEFAULT_SPAN);
+
+ out:
+ mbm_test_cleanup();
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index 838d1a438f335f..dd3546655657a9 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -1,5 +1,4 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#define _GNU_SOURCE
+ #ifndef RESCTRL_H
+ #define RESCTRL_H
+ #include <stdio.h>
+@@ -28,19 +27,18 @@
+ #define RESCTRL_PATH "/sys/fs/resctrl"
+ #define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
+ #define INFO_PATH "/sys/fs/resctrl/info"
+-#define L3_PATH "/sys/fs/resctrl/info/L3"
+-#define MB_PATH "/sys/fs/resctrl/info/MB"
+-#define L3_MON_PATH "/sys/fs/resctrl/info/L3_MON"
+-#define L3_MON_FEATURES_PATH "/sys/fs/resctrl/info/L3_MON/mon_features"
+
+ #define ARCH_INTEL 1
+ #define ARCH_AMD 2
+
+ #define END_OF_TESTS 1
+
+-#define PARENT_EXIT(err_msg) \
++#define BENCHMARK_ARGS 64
++
++#define DEFAULT_SPAN (250 * MB)
++
++#define PARENT_EXIT() \
+ do { \
+- perror(err_msg); \
+ kill(ppid, SIGKILL); \
+ umount_resctrlfs(); \
+ exit(EXIT_FAILURE); \
+@@ -52,7 +50,6 @@
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @cpu_no: CPU number to which the benchmark would be binded
+- * @span: Memory bytes accessed in each benchmark iteration
+ * @filename: Name of file to which the o/p should be written
+ * @bw_report: Bandwidth report type (reads vs writes)
+ * @setup: Call back function to setup test environment
+@@ -62,7 +59,6 @@ struct resctrl_val_param {
+ char ctrlgrp[64];
+ char mongrp[64];
+ int cpu_no;
+- size_t span;
+ char filename[64];
+ char *bw_report;
+ unsigned long mask;
+@@ -86,10 +82,9 @@ int get_resource_id(int cpu_no, int *resource_id);
+ int mount_resctrlfs(void);
+ int umount_resctrlfs(void);
+ int validate_bw_report_request(char *bw_report);
+-bool validate_resctrl_feature_request(const char *resctrl_val);
++bool validate_resctrl_feature_request(const char *resource, const char *feature);
+ char *fgrep(FILE *inf, const char *str);
+ int taskset_benchmark(pid_t bm_pid, int cpu_no);
+-void run_benchmark(int signum, siginfo_t *info, void *ucontext);
+ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no,
+ char *resctrl_val);
+ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+@@ -97,21 +92,21 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
+ int group_fd, unsigned long flags);
+ int run_fill_buf(size_t span, int memflush, int op, bool once);
+-int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
+-int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd);
++int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param);
++int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd);
+ void tests_cleanup(void);
+ void mbm_test_cleanup(void);
+-int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
++int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd);
+ void mba_test_cleanup(void);
+ int get_cbm_mask(char *cache_type, char *cbm_mask);
+ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
+ void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
+ int signal_handler_register(void);
+ void signal_handler_unregister(void);
+-int cat_val(struct resctrl_val_param *param);
++int cat_val(struct resctrl_val_param *param, size_t span);
+ void cat_test_cleanup(void);
+ int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
+-int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
++int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd);
+ unsigned int count_bits(unsigned long n);
+ void cmt_test_cleanup(void);
+ int get_core_sibling(int cpu_no);
+diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
+index d511daeb6851e6..31373b69e675d1 100644
+--- a/tools/testing/selftests/resctrl/resctrl_tests.c
++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
+@@ -10,9 +10,6 @@
+ */
+ #include "resctrl.h"
+
+-#define BENCHMARK_ARGS 64
+-#define BENCHMARK_ARG_SIZE 64
+-
+ static int detect_vendor(void)
+ {
+ FILE *inf = fopen("/proc/cpuinfo", "r");
+@@ -70,72 +67,98 @@ void tests_cleanup(void)
+ cat_test_cleanup();
+ }
+
+-static void run_mbm_test(char **benchmark_cmd, size_t span,
+- int cpu_no, char *bw_report)
++static int test_prepare(void)
+ {
+ int res;
+
+- ksft_print_msg("Starting MBM BW change ...\n");
++ res = signal_handler_register();
++ if (res) {
++ ksft_print_msg("Failed to register signal handler\n");
++ return res;
++ }
+
+ res = mount_resctrlfs();
+ if (res) {
+- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
++ signal_handler_unregister();
++ ksft_print_msg("Failed to mount resctrl FS\n");
++ return res;
++ }
++ return 0;
++}
++
++static void test_cleanup(void)
++{
++ umount_resctrlfs();
++ signal_handler_unregister();
++}
++
++static void run_mbm_test(const char * const *benchmark_cmd, int cpu_no)
++{
++ int res;
++
++ ksft_print_msg("Starting MBM BW change ...\n");
++
++ if (test_prepare()) {
++ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
+ return;
+ }
+
+- if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) {
++ if (!validate_resctrl_feature_request("L3_MON", "mbm_total_bytes") ||
++ !validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
++ (get_vendor() != ARCH_INTEL)) {
+ ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
+- goto umount;
++ goto cleanup;
+ }
+
+- res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
++ res = mbm_bw_change(cpu_no, benchmark_cmd);
+ ksft_test_result(!res, "MBM: bw change\n");
+ if ((get_vendor() == ARCH_INTEL) && res)
+ ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+
+-umount:
+- umount_resctrlfs();
++cleanup:
++ test_cleanup();
+ }
+
+-static void run_mba_test(char **benchmark_cmd, int cpu_no, char *bw_report)
++static void run_mba_test(const char * const *benchmark_cmd, int cpu_no)
+ {
+ int res;
+
+ ksft_print_msg("Starting MBA Schemata change ...\n");
+
+- res = mount_resctrlfs();
+- if (res) {
+- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
++ if (test_prepare()) {
++ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
+ return;
+ }
+
+- if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) {
++ if (!validate_resctrl_feature_request("MB", NULL) ||
++ !validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
++ (get_vendor() != ARCH_INTEL)) {
+ ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
+- goto umount;
++ goto cleanup;
+ }
+
+- res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
++ res = mba_schemata_change(cpu_no, benchmark_cmd);
+ ksft_test_result(!res, "MBA: schemata change\n");
+
+-umount:
+- umount_resctrlfs();
++cleanup:
++ test_cleanup();
+ }
+
+-static void run_cmt_test(char **benchmark_cmd, int cpu_no)
++static void run_cmt_test(const char * const *benchmark_cmd, int cpu_no)
+ {
+ int res;
+
+ ksft_print_msg("Starting CMT test ...\n");
+
+- res = mount_resctrlfs();
+- if (res) {
+- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
++ if (test_prepare()) {
++ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
+ return;
+ }
+
+- if (!validate_resctrl_feature_request(CMT_STR)) {
++ if (!validate_resctrl_feature_request("L3_MON", "llc_occupancy") ||
++ !validate_resctrl_feature_request("L3", NULL)) {
+ ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
+- goto umount;
++ goto cleanup;
+ }
+
+ res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
+@@ -143,8 +166,8 @@ static void run_cmt_test(char **benchmark_cmd, int cpu_no)
+ if ((get_vendor() == ARCH_INTEL) && res)
+ ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+
+-umount:
+- umount_resctrlfs();
++cleanup:
++ test_cleanup();
+ }
+
+ static void run_cat_test(int cpu_no, int no_of_bits)
+@@ -153,33 +176,32 @@ static void run_cat_test(int cpu_no, int no_of_bits)
+
+ ksft_print_msg("Starting CAT test ...\n");
+
+- res = mount_resctrlfs();
+- if (res) {
+- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
++ if (test_prepare()) {
++ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
+ return;
+ }
+
+- if (!validate_resctrl_feature_request(CAT_STR)) {
++ if (!validate_resctrl_feature_request("L3", NULL)) {
+ ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
+- goto umount;
++ goto cleanup;
+ }
+
+ res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
+ ksft_test_result(!res, "CAT: test\n");
+
+-umount:
+- umount_resctrlfs();
++cleanup:
++ test_cleanup();
+ }
+
+ int main(int argc, char **argv)
+ {
+ bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
+- char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
+- char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
+ int c, cpu_no = 1, argc_new = argc, i, no_of_bits = 0;
++ const char *benchmark_cmd[BENCHMARK_ARGS];
+ int ben_ind, ben_count, tests = 0;
+- size_t span = 250 * MB;
++ char *span_str = NULL;
+ bool cat_test = true;
++ int ret;
+
+ for (i = 0; i < argc; i++) {
+ if (strcmp(argv[i], "-b") == 0) {
+@@ -255,28 +277,26 @@ int main(int argc, char **argv)
+ return ksft_exit_skip("Not running as root. Skipping...\n");
+
+ if (has_ben) {
++ if (argc - ben_ind >= BENCHMARK_ARGS)
++ ksft_exit_fail_msg("Too long benchmark command.\n");
++
+ /* Extract benchmark command from command line. */
+- for (i = ben_ind; i < argc; i++) {
+- benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
+- sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
+- }
++ for (i = 0; i < argc - ben_ind; i++)
++ benchmark_cmd[i] = argv[i + ben_ind];
+ benchmark_cmd[ben_count] = NULL;
+ } else {
+ /* If no benchmark is given by "-b" argument, use fill_buf. */
+- for (i = 0; i < 5; i++)
+- benchmark_cmd[i] = benchmark_cmd_area[i];
+-
+- strcpy(benchmark_cmd[0], "fill_buf");
+- sprintf(benchmark_cmd[1], "%zu", span);
+- strcpy(benchmark_cmd[2], "1");
+- strcpy(benchmark_cmd[3], "0");
+- strcpy(benchmark_cmd[4], "false");
++ benchmark_cmd[0] = "fill_buf";
++ ret = asprintf(&span_str, "%u", DEFAULT_SPAN);
++ if (ret < 0)
++ ksft_exit_fail_msg("Out of memory!\n");
++ benchmark_cmd[1] = span_str;
++ benchmark_cmd[2] = "1";
++ benchmark_cmd[3] = "0";
++ benchmark_cmd[4] = "false";
+ benchmark_cmd[5] = NULL;
+ }
+
+- sprintf(bw_report, "reads");
+- sprintf(bm_type, "fill_buf");
+-
+ if (!check_resctrlfs_support())
+ return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
+
+@@ -288,10 +308,10 @@ int main(int argc, char **argv)
+ ksft_set_plan(tests ? : 4);
+
+ if (mbm_test)
+- run_mbm_test(benchmark_cmd, span, cpu_no, bw_report);
++ run_mbm_test(benchmark_cmd, cpu_no);
+
+ if (mba_test)
+- run_mba_test(benchmark_cmd, cpu_no, bw_report);
++ run_mba_test(benchmark_cmd, cpu_no);
+
+ if (cmt_test)
+ run_cmt_test(benchmark_cmd, cpu_no);
+@@ -299,5 +319,6 @@ int main(int argc, char **argv)
+ if (cat_test)
+ run_cat_test(cpu_no, no_of_bits);
+
++ free(span_str);
+ ksft_finished();
+ }
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index f0f6c5f6e98b9b..45439e726e79c5 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -156,12 +156,12 @@ static int read_from_imc_dir(char *imc_dir, int count)
+ sprintf(imc_counter_type, "%s%s", imc_dir, "type");
+ fp = fopen(imc_counter_type, "r");
+ if (!fp) {
+- perror("Failed to open imc counter type file");
++ ksft_perror("Failed to open iMC counter type file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) {
+- perror("Could not get imc type");
++ ksft_perror("Could not get iMC type");
+ fclose(fp);
+
+ return -1;
+@@ -175,12 +175,12 @@ static int read_from_imc_dir(char *imc_dir, int count)
+ sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME);
+ fp = fopen(imc_counter_cfg, "r");
+ if (!fp) {
+- perror("Failed to open imc config file");
++ ksft_perror("Failed to open iMC config file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
+- perror("Could not get imc cas count read");
++ ksft_perror("Could not get iMC cas count read");
+ fclose(fp);
+
+ return -1;
+@@ -193,12 +193,12 @@ static int read_from_imc_dir(char *imc_dir, int count)
+ sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME);
+ fp = fopen(imc_counter_cfg, "r");
+ if (!fp) {
+- perror("Failed to open imc config file");
++ ksft_perror("Failed to open iMC config file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
+- perror("Could not get imc cas count write");
++ ksft_perror("Could not get iMC cas count write");
+ fclose(fp);
+
+ return -1;
+@@ -262,12 +262,12 @@ static int num_of_imcs(void)
+ }
+ closedir(dp);
+ if (count == 0) {
+- perror("Unable find iMC counters!\n");
++ ksft_print_msg("Unable to find iMC counters\n");
+
+ return -1;
+ }
+ } else {
+- perror("Unable to open PMU directory!\n");
++ ksft_perror("Unable to open PMU directory");
+
+ return -1;
+ }
+@@ -292,6 +292,18 @@ static int initialize_mem_bw_imc(void)
+ return 0;
+ }
+
++static void perf_close_imc_mem_bw(void)
++{
++ int mc;
++
++ for (mc = 0; mc < imcs; mc++) {
++ if (imc_counters_config[mc][READ].fd != -1)
++ close(imc_counters_config[mc][READ].fd);
++ if (imc_counters_config[mc][WRITE].fd != -1)
++ close(imc_counters_config[mc][WRITE].fd);
++ }
++}
++
+ /*
+ * get_mem_bw_imc: Memory band width as reported by iMC counters
+ * @cpu_no: CPU number that the benchmark PID is binded to
+@@ -305,26 +317,33 @@ static int initialize_mem_bw_imc(void)
+ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+ {
+ float reads, writes, of_mul_read, of_mul_write;
+- int imc, j, ret;
++ int imc, ret;
++
++ for (imc = 0; imc < imcs; imc++) {
++ imc_counters_config[imc][READ].fd = -1;
++ imc_counters_config[imc][WRITE].fd = -1;
++ }
+
+ /* Start all iMC counters to log values (both read and write) */
+ reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
+ for (imc = 0; imc < imcs; imc++) {
+- for (j = 0; j < 2; j++) {
+- ret = open_perf_event(imc, cpu_no, j);
+- if (ret)
+- return -1;
+- }
+- for (j = 0; j < 2; j++)
+- membw_ioctl_perf_event_ioc_reset_enable(imc, j);
++ ret = open_perf_event(imc, cpu_no, READ);
++ if (ret)
++ goto close_fds;
++ ret = open_perf_event(imc, cpu_no, WRITE);
++ if (ret)
++ goto close_fds;
++
++ membw_ioctl_perf_event_ioc_reset_enable(imc, READ);
++ membw_ioctl_perf_event_ioc_reset_enable(imc, WRITE);
+ }
+
+ sleep(1);
+
+ /* Stop counters after a second to get results (both read and write) */
+ for (imc = 0; imc < imcs; imc++) {
+- for (j = 0; j < 2; j++)
+- membw_ioctl_perf_event_ioc_disable(imc, j);
++ membw_ioctl_perf_event_ioc_disable(imc, READ);
++ membw_ioctl_perf_event_ioc_disable(imc, WRITE);
+ }
+
+ /*
+@@ -339,16 +358,14 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+
+ if (read(r->fd, &r->return_value,
+ sizeof(struct membw_read_format)) == -1) {
+- perror("Couldn't get read b/w through iMC");
+-
+- return -1;
++ ksft_perror("Couldn't get read b/w through iMC");
++ goto close_fds;
+ }
+
+ if (read(w->fd, &w->return_value,
+ sizeof(struct membw_read_format)) == -1) {
+- perror("Couldn't get write bw through iMC");
+-
+- return -1;
++ ksft_perror("Couldn't get write bw through iMC");
++ goto close_fds;
+ }
+
+ __u64 r_time_enabled = r->return_value.time_enabled;
+@@ -368,10 +385,7 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+ writes += w->return_value.value * of_mul_write * SCALE;
+ }
+
+- for (imc = 0; imc < imcs; imc++) {
+- close(imc_counters_config[imc][READ].fd);
+- close(imc_counters_config[imc][WRITE].fd);
+- }
++ perf_close_imc_mem_bw();
+
+ if (strcmp(bw_report, "reads") == 0) {
+ *bw_imc = reads;
+@@ -385,6 +399,10 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+
+ *bw_imc = reads + writes;
+ return 0;
++
++close_fds:
++ perf_close_imc_mem_bw();
++ return -1;
+ }
+
+ void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
+@@ -416,7 +434,7 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
+ int resource_id;
+
+ if (get_resource_id(cpu_no, &resource_id) < 0) {
+- perror("Could not get resource_id");
++ ksft_print_msg("Could not get resource_id\n");
+ return;
+ }
+
+@@ -449,12 +467,12 @@ static int get_mem_bw_resctrl(unsigned long *mbm_total)
+
+ fp = fopen(mbm_total_path, "r");
+ if (!fp) {
+- perror("Failed to open total bw file");
++ ksft_perror("Failed to open total bw file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%lu", mbm_total) <= 0) {
+- perror("Could not get mbm local bytes");
++ ksft_perror("Could not get mbm local bytes");
+ fclose(fp);
+
+ return -1;
+@@ -468,7 +486,9 @@ pid_t bm_pid, ppid;
+
+ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
+ {
+- kill(bm_pid, SIGKILL);
++ /* Only kill child after bm_pid is set after fork() */
++ if (bm_pid)
++ kill(bm_pid, SIGKILL);
+ umount_resctrlfs();
+ tests_cleanup();
+ ksft_print_msg("Ending\n\n");
+@@ -482,16 +502,18 @@ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
+ */
+ int signal_handler_register(void)
+ {
+- struct sigaction sigact;
++ struct sigaction sigact = {};
+ int ret = 0;
+
++ bm_pid = 0;
++
+ sigact.sa_sigaction = ctrlc_handler;
+ sigemptyset(&sigact.sa_mask);
+ sigact.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGINT, &sigact, NULL) ||
+ sigaction(SIGTERM, &sigact, NULL) ||
+ sigaction(SIGHUP, &sigact, NULL)) {
+- perror("# sigaction");
++ ksft_perror("sigaction");
+ ret = -1;
+ }
+ return ret;
+@@ -504,14 +526,14 @@ int signal_handler_register(void)
+ */
+ void signal_handler_unregister(void)
+ {
+- struct sigaction sigact;
++ struct sigaction sigact = {};
+
+ sigact.sa_handler = SIG_DFL;
+ sigemptyset(&sigact.sa_mask);
+ if (sigaction(SIGINT, &sigact, NULL) ||
+ sigaction(SIGTERM, &sigact, NULL) ||
+ sigaction(SIGHUP, &sigact, NULL)) {
+- perror("# sigaction");
++ ksft_perror("sigaction");
+ }
+ }
+
+@@ -536,14 +558,14 @@ static int print_results_bw(char *filename, int bm_pid, float bw_imc,
+ } else {
+ fp = fopen(filename, "a");
+ if (!fp) {
+- perror("Cannot open results file");
++ ksft_perror("Cannot open results file");
+
+ return errno;
+ }
+ if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
+ bm_pid, bw_imc, bw_resc, diff) <= 0) {
++ ksft_print_msg("Could not log results\n");
+ fclose(fp);
+- perror("Could not log results.");
+
+ return errno;
+ }
+@@ -581,7 +603,7 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
+ int resource_id;
+
+ if (get_resource_id(cpu_no, &resource_id) < 0) {
+- perror("# Unable to resource_id");
++ ksft_print_msg("Could not get resource_id\n");
+ return;
+ }
+
+@@ -621,6 +643,61 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+ return 0;
+ }
+
++/*
++ * run_benchmark - Run a specified benchmark or fill_buf (default benchmark)
++ * in specified signal. Direct benchmark stdio to /dev/null.
++ * @signum: signal number
++ * @info: signal info
++ * @ucontext: user context in signal handling
++ */
++static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
++{
++ int operation, ret, memflush;
++ char **benchmark_cmd;
++ size_t span;
++ bool once;
++ FILE *fp;
++
++ benchmark_cmd = info->si_ptr;
++
++ /*
++ * Direct stdio of child to /dev/null, so that only parent writes to
++ * stdio (console)
++ */
++ fp = freopen("/dev/null", "w", stdout);
++ if (!fp) {
++ ksft_perror("Unable to direct benchmark status to /dev/null");
++ PARENT_EXIT();
++ }
++
++ if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
++ /* Execute default fill_buf benchmark */
++ span = strtoul(benchmark_cmd[1], NULL, 10);
++ memflush = atoi(benchmark_cmd[2]);
++ operation = atoi(benchmark_cmd[3]);
++ if (!strcmp(benchmark_cmd[4], "true")) {
++ once = true;
++ } else if (!strcmp(benchmark_cmd[4], "false")) {
++ once = false;
++ } else {
++ ksft_print_msg("Invalid once parameter\n");
++ PARENT_EXIT();
++ }
++
++ if (run_fill_buf(span, memflush, operation, once))
++ fprintf(stderr, "Error in running fill buffer\n");
++ } else {
++ /* Execute specified benchmark */
++ ret = execvp(benchmark_cmd[0], benchmark_cmd);
++ if (ret)
++ ksft_perror("execvp");
++ }
++
++ fclose(stdout);
++ ksft_print_msg("Unable to run specified benchmark\n");
++ PARENT_EXIT();
++}
++
+ /*
+ * resctrl_val: execute benchmark and measure memory bandwidth on
+ * the benchmark
+@@ -629,7 +706,7 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+-int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
++int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param)
+ {
+ char *resctrl_val = param->resctrl_val;
+ unsigned long bw_resc_start = 0;
+@@ -655,7 +732,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ ppid = getpid();
+
+ if (pipe(pipefd)) {
+- perror("# Unable to create pipe");
++ ksft_perror("Unable to create pipe");
+
+ return -1;
+ }
+@@ -667,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ fflush(stdout);
+ bm_pid = fork();
+ if (bm_pid == -1) {
+- perror("# Unable to fork");
++ ksft_perror("Unable to fork");
+
+ return -1;
+ }
+@@ -684,15 +761,17 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ sigact.sa_flags = SA_SIGINFO;
+
+ /* Register for "SIGUSR1" signal from parent */
+- if (sigaction(SIGUSR1, &sigact, NULL))
+- PARENT_EXIT("Can't register child for signal");
++ if (sigaction(SIGUSR1, &sigact, NULL)) {
++ ksft_perror("Can't register child for signal");
++ PARENT_EXIT();
++ }
+
+ /* Tell parent that child is ready */
+ close(pipefd[0]);
+ pipe_message = 1;
+ if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
+ sizeof(pipe_message)) {
+- perror("# failed signaling parent process");
++ ksft_perror("Failed signaling parent process");
+ close(pipefd[1]);
+ return -1;
+ }
+@@ -701,33 +780,36 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ /* Suspend child until delivery of "SIGUSR1" from parent */
+ sigsuspend(&sigact.sa_mask);
+
+- PARENT_EXIT("Child is done");
++ ksft_perror("Child is done");
++ PARENT_EXIT();
+ }
+
+ ksft_print_msg("Benchmark PID: %d\n", bm_pid);
+
+- ret = signal_handler_register();
+- if (ret)
+- goto out;
+-
+- value.sival_ptr = benchmark_cmd;
++ /*
++ * The cast removes constness but nothing mutates benchmark_cmd within
++ * the context of this process. At the receiving process, it becomes
++ * argv, which is mutable, on exec() but that's after fork() so it
++ * doesn't matter for the process running the tests.
++ */
++ value.sival_ptr = (void *)benchmark_cmd;
+
+ /* Taskset benchmark to specified cpu */
+ ret = taskset_benchmark(bm_pid, param->cpu_no);
+ if (ret)
+- goto unregister;
++ goto out;
+
+ /* Write benchmark to specified control&monitoring grp in resctrl FS */
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
+ resctrl_val);
+ if (ret)
+- goto unregister;
++ goto out;
+
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ ret = initialize_mem_bw_imc();
+ if (ret)
+- goto unregister;
++ goto out;
+
+ initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
+ param->cpu_no, resctrl_val);
+@@ -740,18 +822,18 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ while (pipe_message != 1) {
+ if (read(pipefd[0], &pipe_message, sizeof(pipe_message)) <
+ sizeof(pipe_message)) {
+- perror("# failed reading message from child process");
++ ksft_perror("Failed reading message from child process");
+ close(pipefd[0]);
+- goto unregister;
++ goto out;
+ }
+ }
+ close(pipefd[0]);
+
+ /* Signal child to start benchmark */
+ if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
+- perror("# sigqueue SIGUSR1 to child");
++ ksft_perror("sigqueue SIGUSR1 to child");
+ ret = errno;
+- goto unregister;
++ goto out;
+ }
+
+ /* Give benchmark enough time to fully run */
+@@ -780,8 +862,6 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ }
+ }
+
+-unregister:
+- signal_handler_unregister();
+ out:
+ kill(bm_pid, SIGKILL);
+
+diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
+index bd36ee20660207..71ad2b335b83fc 100644
+--- a/tools/testing/selftests/resctrl/resctrlfs.c
++++ b/tools/testing/selftests/resctrl/resctrlfs.c
+@@ -8,6 +8,8 @@
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
++#include <limits.h>
++
+ #include "resctrl.h"
+
+ static int find_resctrl_mount(char *buffer)
+@@ -17,7 +19,7 @@ static int find_resctrl_mount(char *buffer)
+
+ mounts = fopen("/proc/mounts", "r");
+ if (!mounts) {
+- perror("/proc/mounts");
++ ksft_perror("/proc/mounts");
+ return -ENXIO;
+ }
+ while (!feof(mounts)) {
+@@ -66,7 +68,7 @@ int mount_resctrlfs(void)
+ ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
+ ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
+ if (ret)
+- perror("# mount");
++ ksft_perror("mount");
+
+ return ret;
+ }
+@@ -83,7 +85,7 @@ int umount_resctrlfs(void)
+ return ret;
+
+ if (umount(mountpoint)) {
+- perror("# Unable to umount resctrl");
++ ksft_perror("Unable to umount resctrl");
+
+ return errno;
+ }
+@@ -112,12 +114,12 @@ int get_resource_id(int cpu_no, int *resource_id)
+
+ fp = fopen(phys_pkg_path, "r");
+ if (!fp) {
+- perror("Failed to open physical_package_id");
++ ksft_perror("Failed to open physical_package_id");
+
+ return -1;
+ }
+ if (fscanf(fp, "%d", resource_id) <= 0) {
+- perror("Could not get socket number or l3 id");
++ ksft_perror("Could not get socket number or l3 id");
+ fclose(fp);
+
+ return -1;
+@@ -146,7 +148,7 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
+ } else if (!strcmp(cache_type, "L2")) {
+ cache_num = 2;
+ } else {
+- perror("Invalid cache level");
++ ksft_print_msg("Invalid cache level\n");
+ return -1;
+ }
+
+@@ -154,12 +156,12 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
+ cpu_no, cache_num);
+ fp = fopen(cache_path, "r");
+ if (!fp) {
+- perror("Failed to open cache size");
++ ksft_perror("Failed to open cache size");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cache_str) <= 0) {
+- perror("Could not get cache_size");
++ ksft_perror("Could not get cache_size");
+ fclose(fp);
+
+ return -1;
+@@ -211,12 +213,12 @@ int get_cbm_mask(char *cache_type, char *cbm_mask)
+
+ fp = fopen(cbm_mask_path, "r");
+ if (!fp) {
+- perror("Failed to open cache level");
++ ksft_perror("Failed to open cache level");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cbm_mask) <= 0) {
+- perror("Could not get max cbm_mask");
++ ksft_perror("Could not get max cbm_mask");
+ fclose(fp);
+
+ return -1;
+@@ -243,12 +245,12 @@ int get_core_sibling(int cpu_no)
+
+ fp = fopen(core_siblings_path, "r");
+ if (!fp) {
+- perror("Failed to open core siblings path");
++ ksft_perror("Failed to open core siblings path");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cpu_list_str) <= 0) {
+- perror("Could not get core_siblings list");
++ ksft_perror("Could not get core_siblings list");
+ fclose(fp);
+
+ return -1;
+@@ -283,7 +285,7 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no)
+ CPU_SET(cpu_no, &my_set);
+
+ if (sched_setaffinity(bm_pid, sizeof(cpu_set_t), &my_set)) {
+- perror("Unable to taskset benchmark");
++ ksft_perror("Unable to taskset benchmark");
+
+ return -1;
+ }
+@@ -291,58 +293,6 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no)
+ return 0;
+ }
+
+-/*
+- * run_benchmark - Run a specified benchmark or fill_buf (default benchmark)
+- * in specified signal. Direct benchmark stdio to /dev/null.
+- * @signum: signal number
+- * @info: signal info
+- * @ucontext: user context in signal handling
+- *
+- * Return: void
+- */
+-void run_benchmark(int signum, siginfo_t *info, void *ucontext)
+-{
+- int operation, ret, memflush;
+- char **benchmark_cmd;
+- size_t span;
+- bool once;
+- FILE *fp;
+-
+- benchmark_cmd = info->si_ptr;
+-
+- /*
+- * Direct stdio of child to /dev/null, so that only parent writes to
+- * stdio (console)
+- */
+- fp = freopen("/dev/null", "w", stdout);
+- if (!fp)
+- PARENT_EXIT("Unable to direct benchmark status to /dev/null");
+-
+- if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
+- /* Execute default fill_buf benchmark */
+- span = strtoul(benchmark_cmd[1], NULL, 10);
+- memflush = atoi(benchmark_cmd[2]);
+- operation = atoi(benchmark_cmd[3]);
+- if (!strcmp(benchmark_cmd[4], "true"))
+- once = true;
+- else if (!strcmp(benchmark_cmd[4], "false"))
+- once = false;
+- else
+- PARENT_EXIT("Invalid once parameter");
+-
+- if (run_fill_buf(span, memflush, operation, once))
+- fprintf(stderr, "Error in running fill buffer\n");
+- } else {
+- /* Execute specified benchmark */
+- ret = execvp(benchmark_cmd[0], benchmark_cmd);
+- if (ret)
+- perror("wrong\n");
+- }
+-
+- fclose(stdout);
+- PARENT_EXIT("Unable to run specified benchmark");
+-}
+-
+ /*
+ * create_grp - Create a group only if one doesn't exist
+ * @grp_name: Name of the group
+@@ -374,7 +324,7 @@ static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
+ }
+ closedir(dp);
+ } else {
+- perror("Unable to open resctrl for group");
++ ksft_perror("Unable to open resctrl for group");
+
+ return -1;
+ }
+@@ -382,7 +332,7 @@ static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
+ /* Requested grp doesn't exist, hence create it */
+ if (found_grp == 0) {
+ if (mkdir(grp, 0) == -1) {
+- perror("Unable to create group");
++ ksft_perror("Unable to create group");
+
+ return -1;
+ }
+@@ -397,12 +347,12 @@ static int write_pid_to_tasks(char *tasks, pid_t pid)
+
+ fp = fopen(tasks, "w");
+ if (!fp) {
+- perror("Failed to open tasks file");
++ ksft_perror("Failed to open tasks file");
+
+ return -1;
+ }
+ if (fprintf(fp, "%d\n", pid) < 0) {
+- perror("Failed to wr pid to tasks file");
++ ksft_print_msg("Failed to write pid to tasks file\n");
+ fclose(fp);
+
+ return -1;
+@@ -469,7 +419,7 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ out:
+ ksft_print_msg("Writing benchmark parameters to resctrl FS\n");
+ if (ret)
+- perror("# writing to resctrlfs");
++ ksft_print_msg("Failed writing to resctrlfs\n");
+
+ return ret;
+ }
+@@ -604,63 +554,46 @@ char *fgrep(FILE *inf, const char *str)
+
+ /*
+ * validate_resctrl_feature_request - Check if requested feature is valid.
+- * @resctrl_val: Requested feature
++ * @resource: Required resource (e.g., MB, L3, L2, L3_MON, etc.)
++ * @feature: Required monitor feature (in mon_features file). Can only be
++ * set for L3_MON. Must be NULL for all other resources.
+ *
+- * Return: True if the feature is supported, else false. False is also
+- * returned if resctrl FS is not mounted.
++ * Return: True if the resource/feature is supported, else false. False is
++ * also returned if resctrl FS is not mounted.
+ */
+-bool validate_resctrl_feature_request(const char *resctrl_val)
++bool validate_resctrl_feature_request(const char *resource, const char *feature)
+ {
++ char res_path[PATH_MAX];
+ struct stat statbuf;
+- bool found = false;
+ char *res;
+ FILE *inf;
+ int ret;
+
+- if (!resctrl_val)
++ if (!resource)
+ return false;
+
+ ret = find_resctrl_mount(NULL);
+ if (ret)
+ return false;
+
+- if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+- if (!stat(L3_PATH, &statbuf))
+- return true;
+- } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+- if (!stat(MB_PATH, &statbuf))
+- return true;
+- } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+- !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+- if (!stat(L3_MON_PATH, &statbuf)) {
+- inf = fopen(L3_MON_FEATURES_PATH, "r");
+- if (!inf)
+- return false;
+-
+- if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+- res = fgrep(inf, "llc_occupancy");
+- if (res) {
+- found = true;
+- free(res);
+- }
+- }
+-
+- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+- res = fgrep(inf, "mbm_total_bytes");
+- if (res) {
+- free(res);
+- res = fgrep(inf, "mbm_local_bytes");
+- if (res) {
+- found = true;
+- free(res);
+- }
+- }
+- }
+- fclose(inf);
+- }
+- }
++ snprintf(res_path, sizeof(res_path), "%s/%s", INFO_PATH, resource);
++
++ if (stat(res_path, &statbuf))
++ return false;
++
++ if (!feature)
++ return true;
++
++ snprintf(res_path, sizeof(res_path), "%s/%s/mon_features", INFO_PATH, resource);
++ inf = fopen(res_path, "r");
++ if (!inf)
++ return false;
++
++ res = fgrep(inf, feature);
++ free(res);
++ fclose(inf);
+
+- return found;
++ return !!res;
+ }
+
+ int filter_dmesg(void)
+@@ -673,7 +606,7 @@ int filter_dmesg(void)
+
+ ret = pipe(pipefds);
+ if (ret) {
+- perror("pipe");
++ ksft_perror("pipe");
+ return ret;
+ }
+ fflush(stdout);
+@@ -682,13 +615,13 @@ int filter_dmesg(void)
+ close(pipefds[0]);
+ dup2(pipefds[1], STDOUT_FILENO);
+ execlp("dmesg", "dmesg", NULL);
+- perror("executing dmesg");
++ ksft_perror("Executing dmesg");
+ exit(1);
+ }
+ close(pipefds[1]);
+ fp = fdopen(pipefds[0], "r");
+ if (!fp) {
+- perror("fdopen(pipe)");
++ ksft_perror("fdopen(pipe)");
+ kill(pid, SIGTERM);
+
+ return -1;
+diff --git a/tools/testing/selftests/riscv/mm/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h
+index 9b8434f62f570d..2e0db9c5be6c33 100644
+--- a/tools/testing/selftests/riscv/mm/mmap_test.h
++++ b/tools/testing/selftests/riscv/mm/mmap_test.h
+@@ -18,6 +18,8 @@ struct addresses {
+ int *on_56_addr;
+ };
+
++// Only works on 64 bit
++#if __riscv_xlen == 64
+ static inline void do_mmaps(struct addresses *mmap_addresses)
+ {
+ /*
+@@ -50,6 +52,7 @@ static inline void do_mmaps(struct addresses *mmap_addresses)
+ mmap_addresses->on_56_addr =
+ mmap(on_56_bits, 5 * sizeof(int), prot, flags, 0, 0);
+ }
++#endif /* __riscv_xlen == 64 */
+
+ static inline int memory_layout(void)
+ {
+diff --git a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
+index 66764edb0d5268..1dd94197da30cc 100644
+--- a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
++++ b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
+@@ -27,7 +27,7 @@ int main(void)
+
+ datap = malloc(MAX_VSIZE);
+ if (!datap) {
+- ksft_test_result_fail("fail to allocate memory for size = %lu\n", MAX_VSIZE);
++ ksft_test_result_fail("fail to allocate memory for size = %d\n", MAX_VSIZE);
+ exit(-1);
+ }
+
+diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c
+index b348b475be570c..8ad94e08ff4d07 100644
+--- a/tools/testing/selftests/riscv/vector/vstate_prctl.c
++++ b/tools/testing/selftests/riscv/vector/vstate_prctl.c
+@@ -68,7 +68,7 @@ int test_and_compare_child(long provided, long expected, int inherit)
+ }
+ rc = launch_test(inherit);
+ if (rc != expected) {
+- ksft_test_result_fail("Test failed, check %d != %d\n", rc,
++ ksft_test_result_fail("Test failed, check %d != %ld\n", rc,
+ expected);
+ return -2;
+ }
+@@ -87,7 +87,7 @@ int main(void)
+ pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
+ rc = riscv_hwprobe(&pair, 1, 0, NULL, 0);
+ if (rc < 0) {
+- ksft_test_result_fail("hwprobe() failed with %d\n", rc);
++ ksft_test_result_fail("hwprobe() failed with %ld\n", rc);
+ return -1;
+ }
+
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index 96e812bdf8a45c..5b9772cdf2651b 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -60,12 +60,6 @@ unsigned int rseq_size = -1U;
+ /* Flags used during rseq registration. */
+ unsigned int rseq_flags;
+
+-/*
+- * rseq feature size supported by the kernel. 0 if the registration was
+- * unsuccessful.
+- */
+-unsigned int rseq_feature_size = -1U;
+-
+ static int rseq_ownership;
+ static int rseq_reg_success; /* At least one rseq registration has succeded. */
+
+@@ -111,6 +105,43 @@ int rseq_available(void)
+ }
+ }
+
++/* The rseq areas need to be at least 32 bytes. */
++static
++unsigned int get_rseq_min_alloc_size(void)
++{
++ unsigned int alloc_size = rseq_size;
++
++ if (alloc_size < ORIG_RSEQ_ALLOC_SIZE)
++ alloc_size = ORIG_RSEQ_ALLOC_SIZE;
++ return alloc_size;
++}
++
++/*
++ * Return the feature size supported by the kernel.
++ *
++ * Depending on the value returned by getauxval(AT_RSEQ_FEATURE_SIZE):
++ *
++ * 0: Return ORIG_RSEQ_FEATURE_SIZE (20)
++ * > 0: Return the value from getauxval(AT_RSEQ_FEATURE_SIZE).
++ *
++ * It should never return a value below ORIG_RSEQ_FEATURE_SIZE.
++ */
++static
++unsigned int get_rseq_kernel_feature_size(void)
++{
++ unsigned long auxv_rseq_feature_size, auxv_rseq_align;
++
++ auxv_rseq_align = getauxval(AT_RSEQ_ALIGN);
++ assert(!auxv_rseq_align || auxv_rseq_align <= RSEQ_THREAD_AREA_ALLOC_SIZE);
++
++ auxv_rseq_feature_size = getauxval(AT_RSEQ_FEATURE_SIZE);
++ assert(!auxv_rseq_feature_size || auxv_rseq_feature_size <= RSEQ_THREAD_AREA_ALLOC_SIZE);
++ if (auxv_rseq_feature_size)
++ return auxv_rseq_feature_size;
++ else
++ return ORIG_RSEQ_FEATURE_SIZE;
++}
++
+ int rseq_register_current_thread(void)
+ {
+ int rc;
+@@ -119,7 +150,7 @@ int rseq_register_current_thread(void)
+ /* Treat libc's ownership as a successful registration. */
+ return 0;
+ }
+- rc = sys_rseq(&__rseq_abi, rseq_size, 0, RSEQ_SIG);
++ rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG);
+ if (rc) {
+ if (RSEQ_READ_ONCE(rseq_reg_success)) {
+ /* Incoherent success/failure within process. */
+@@ -140,28 +171,12 @@ int rseq_unregister_current_thread(void)
+ /* Treat libc's ownership as a successful unregistration. */
+ return 0;
+ }
+- rc = sys_rseq(&__rseq_abi, rseq_size, RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
++ rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
+ if (rc)
+ return -1;
+ return 0;
+ }
+
+-static
+-unsigned int get_rseq_feature_size(void)
+-{
+- unsigned long auxv_rseq_feature_size, auxv_rseq_align;
+-
+- auxv_rseq_align = getauxval(AT_RSEQ_ALIGN);
+- assert(!auxv_rseq_align || auxv_rseq_align <= RSEQ_THREAD_AREA_ALLOC_SIZE);
+-
+- auxv_rseq_feature_size = getauxval(AT_RSEQ_FEATURE_SIZE);
+- assert(!auxv_rseq_feature_size || auxv_rseq_feature_size <= RSEQ_THREAD_AREA_ALLOC_SIZE);
+- if (auxv_rseq_feature_size)
+- return auxv_rseq_feature_size;
+- else
+- return ORIG_RSEQ_FEATURE_SIZE;
+-}
+-
+ static __attribute__((constructor))
+ void rseq_init(void)
+ {
+@@ -178,28 +193,54 @@ void rseq_init(void)
+ }
+ if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p &&
+ *libc_rseq_size_p != 0) {
++ unsigned int libc_rseq_size;
++
+ /* rseq registration owned by glibc */
+ rseq_offset = *libc_rseq_offset_p;
+- rseq_size = *libc_rseq_size_p;
++ libc_rseq_size = *libc_rseq_size_p;
+ rseq_flags = *libc_rseq_flags_p;
+- rseq_feature_size = get_rseq_feature_size();
+- if (rseq_feature_size > rseq_size)
+- rseq_feature_size = rseq_size;
++
++ /*
++ * Previous versions of glibc expose the value
++ * 32 even though the kernel only supported 20
++ * bytes initially. Therefore treat 32 as a
++ * special-case. glibc 2.40 exposes a 20 bytes
++ * __rseq_size without using getauxval(3) to
++ * query the supported size, while still allocating a 32
++ * bytes area. Also treat 20 as a special-case.
++ *
++ * Special-cases are handled by using the following
++ * value as active feature set size:
++ *
++ * rseq_size = min(32, get_rseq_kernel_feature_size())
++ */
++ switch (libc_rseq_size) {
++ case ORIG_RSEQ_FEATURE_SIZE:
++ fallthrough;
++ case ORIG_RSEQ_ALLOC_SIZE:
++ {
++ unsigned int rseq_kernel_feature_size = get_rseq_kernel_feature_size();
++
++ if (rseq_kernel_feature_size < ORIG_RSEQ_ALLOC_SIZE)
++ rseq_size = rseq_kernel_feature_size;
++ else
++ rseq_size = ORIG_RSEQ_ALLOC_SIZE;
++ break;
++ }
++ default:
++ /* Otherwise just use the __rseq_size from libc as rseq_size. */
++ rseq_size = libc_rseq_size;
++ break;
++ }
+ return;
+ }
+ rseq_ownership = 1;
+ if (!rseq_available()) {
+ rseq_size = 0;
+- rseq_feature_size = 0;
+ return;
+ }
+ rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer();
+ rseq_flags = 0;
+- rseq_feature_size = get_rseq_feature_size();
+- if (rseq_feature_size == ORIG_RSEQ_FEATURE_SIZE)
+- rseq_size = ORIG_RSEQ_ALLOC_SIZE;
+- else
+- rseq_size = RSEQ_THREAD_AREA_ALLOC_SIZE;
+ }
+
+ static __attribute__((destructor))
+@@ -209,7 +250,6 @@ void rseq_exit(void)
+ return;
+ rseq_offset = 0;
+ rseq_size = -1U;
+- rseq_feature_size = -1U;
+ rseq_ownership = 0;
+ }
+
+diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
+index d7364ea4d201d2..4e217b620e0c7a 100644
+--- a/tools/testing/selftests/rseq/rseq.h
++++ b/tools/testing/selftests/rseq/rseq.h
+@@ -68,12 +68,6 @@ extern unsigned int rseq_size;
+ /* Flags used during rseq registration. */
+ extern unsigned int rseq_flags;
+
+-/*
+- * rseq feature size supported by the kernel. 0 if the registration was
+- * unsuccessful.
+- */
+-extern unsigned int rseq_feature_size;
+-
+ enum rseq_mo {
+ RSEQ_MO_RELAXED = 0,
+ RSEQ_MO_CONSUME = 1, /* Unused */
+@@ -193,7 +187,7 @@ static inline uint32_t rseq_current_cpu(void)
+
+ static inline bool rseq_node_id_available(void)
+ {
+- return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, node_id);
++ return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, node_id);
+ }
+
+ /*
+@@ -207,7 +201,7 @@ static inline uint32_t rseq_current_node_id(void)
+
+ static inline bool rseq_mm_cid_available(void)
+ {
+- return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, mm_cid);
++ return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, mm_cid);
+ }
+
+ static inline uint32_t rseq_current_mm_cid(void)
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 38f6514699682b..cacf6507f69055 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -784,7 +784,7 @@ void *kill_thread(void *data)
+ bool die = (bool)data;
+
+ if (die) {
+- prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
++ syscall(__NR_getpid);
+ return (void *)SIBLING_EXIT_FAILURE;
+ }
+
+@@ -803,11 +803,11 @@ void kill_thread_or_group(struct __test_metadata *_metadata,
+ {
+ pthread_t thread;
+ void *status;
+- /* Kill only when calling __NR_prctl. */
++ /* Kill only when calling __NR_getpid. */
+ struct sock_filter filter_thread[] = {
+ BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
+ offsetof(struct seccomp_data, nr)),
+- BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
++ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD),
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+ };
+@@ -819,7 +819,7 @@ void kill_thread_or_group(struct __test_metadata *_metadata,
+ struct sock_filter filter_process[] = {
+ BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
+ offsetof(struct seccomp_data, nr)),
+- BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
++ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
+ BPF_STMT(BPF_RET|BPF_K, kill),
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+ };
+@@ -3709,7 +3709,12 @@ TEST(user_notification_sibling_pid_ns)
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+- ASSERT_EQ(unshare(CLONE_NEWPID), 0);
++ ASSERT_EQ(unshare(CLONE_NEWPID), 0) {
++ if (errno == EPERM)
++ SKIP(return, "CLONE_NEWPID requires CAP_SYS_ADMIN");
++ else if (errno == EINVAL)
++ SKIP(return, "CLONE_NEWPID is invalid (missing CONFIG_PID_NS?)");
++ }
+
+ pid2 = fork();
+ ASSERT_GE(pid2, 0);
+@@ -3727,6 +3732,8 @@ TEST(user_notification_sibling_pid_ns)
+ ASSERT_EQ(unshare(CLONE_NEWPID), 0) {
+ if (errno == EPERM)
+ SKIP(return, "CLONE_NEWPID requires CAP_SYS_ADMIN");
++ else if (errno == EINVAL)
++ SKIP(return, "CLONE_NEWPID is invalid (missing CONFIG_PID_NS?)");
+ }
+ ASSERT_EQ(errno, 0);
+
+@@ -4037,6 +4044,16 @@ TEST(user_notification_filter_empty_threaded)
+ EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
+ }
+
++
++int get_next_fd(int prev_fd)
++{
++ for (int i = prev_fd + 1; i < FD_SETSIZE; ++i) {
++ if (fcntl(i, F_GETFD) == -1)
++ return i;
++ }
++ _exit(EXIT_FAILURE);
++}
++
+ TEST(user_notification_addfd)
+ {
+ pid_t pid;
+@@ -4053,7 +4070,7 @@ TEST(user_notification_addfd)
+ /* There may be arbitrary already-open fds at test start. */
+ memfd = memfd_create("test", 0);
+ ASSERT_GE(memfd, 0);
+- nextfd = memfd + 1;
++ nextfd = get_next_fd(memfd);
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+@@ -4064,7 +4081,8 @@ TEST(user_notification_addfd)
+ /* Check that the basic notification machinery works */
+ listener = user_notif_syscall(__NR_getppid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
+- ASSERT_EQ(listener, nextfd++);
++ ASSERT_EQ(listener, nextfd);
++ nextfd = get_next_fd(nextfd);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+@@ -4119,14 +4137,16 @@ TEST(user_notification_addfd)
+
+ /* Verify we can set an arbitrary remote fd */
+ fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
+- EXPECT_EQ(fd, nextfd++);
++ EXPECT_EQ(fd, nextfd);
++ nextfd = get_next_fd(nextfd);
+ EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
+
+ /* Verify we can set an arbitrary remote fd with large size */
+ memset(&big, 0x0, sizeof(big));
+ big.addfd = addfd;
+ fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big);
+- EXPECT_EQ(fd, nextfd++);
++ EXPECT_EQ(fd, nextfd);
++ nextfd = get_next_fd(nextfd);
+
+ /* Verify we can set a specific remote fd */
+ addfd.newfd = 42;
+@@ -4164,7 +4184,8 @@ TEST(user_notification_addfd)
+ * Child has earlier "low" fds and now 42, so we expect the next
+ * lowest available fd to be assigned here.
+ */
+- EXPECT_EQ(fd, nextfd++);
++ EXPECT_EQ(fd, nextfd);
++ nextfd = get_next_fd(nextfd);
+ ASSERT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
+
+ /*
+diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile
+index 50aab6b57da34d..01abe4969b0f94 100644
+--- a/tools/testing/selftests/sgx/Makefile
++++ b/tools/testing/selftests/sgx/Makefile
+@@ -16,10 +16,10 @@ HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC -z noexecstack
+ ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
+ -fno-stack-protector -mrdrnd $(INCLUDES)
+
++ifeq ($(CAN_BUILD_X86_64), 1)
+ TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
+ TEST_FILES := $(OUTPUT)/test_encl.elf
+
+-ifeq ($(CAN_BUILD_X86_64), 1)
+ all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
+ endif
+
+diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c
+index 94bdeac1cf041a..c9f658e44de6c1 100644
+--- a/tools/testing/selftests/sgx/load.c
++++ b/tools/testing/selftests/sgx/load.c
+@@ -136,11 +136,11 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
+ */
+ uint64_t encl_get_entry(struct encl *encl, const char *symbol)
+ {
++ Elf64_Sym *symtab = NULL;
++ char *sym_names = NULL;
+ Elf64_Shdr *sections;
+- Elf64_Sym *symtab;
+ Elf64_Ehdr *ehdr;
+- char *sym_names;
+- int num_sym;
++ int num_sym = 0;
+ int i;
+
+ ehdr = encl->bin;
+@@ -161,6 +161,9 @@ uint64_t encl_get_entry(struct encl *encl, const char *symbol)
+ }
+ }
+
++ if (!symtab || !sym_names)
++ return 0;
++
+ for (i = 0; i < num_sym; i++) {
+ Elf64_Sym *sym = &symtab[i];
+
+diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c
+index a07896a463643d..d73b29becf5b01 100644
+--- a/tools/testing/selftests/sgx/sigstruct.c
++++ b/tools/testing/selftests/sgx/sigstruct.c
+@@ -318,9 +318,9 @@ bool encl_measure(struct encl *encl)
+ struct sgx_sigstruct *sigstruct = &encl->sigstruct;
+ struct sgx_sigstruct_payload payload;
+ uint8_t digest[SHA256_DIGEST_LENGTH];
++ EVP_MD_CTX *ctx = NULL;
+ unsigned int siglen;
+ RSA *key = NULL;
+- EVP_MD_CTX *ctx;
+ int i;
+
+ memset(sigstruct, 0, sizeof(*sigstruct));
+@@ -384,7 +384,8 @@ bool encl_measure(struct encl *encl)
+ return true;
+
+ err:
+- EVP_MD_CTX_destroy(ctx);
++ if (ctx)
++ EVP_MD_CTX_destroy(ctx);
+ RSA_free(key);
+ return false;
+ }
+diff --git a/tools/testing/selftests/sgx/test_encl.c b/tools/testing/selftests/sgx/test_encl.c
+index c0d6397295e311..ae791df3e5a571 100644
+--- a/tools/testing/selftests/sgx/test_encl.c
++++ b/tools/testing/selftests/sgx/test_encl.c
+@@ -24,10 +24,11 @@ static void do_encl_emodpe(void *_op)
+ secinfo.flags = op->flags;
+
+ asm volatile(".byte 0x0f, 0x01, 0xd7"
+- :
++ : /* no outputs */
+ : "a" (EMODPE),
+ "b" (&secinfo),
+- "c" (op->epc_addr));
++ "c" (op->epc_addr)
++ : "memory" /* read from secinfo pointer */);
+ }
+
+ static void do_encl_eaccept(void *_op)
+@@ -42,7 +43,8 @@ static void do_encl_eaccept(void *_op)
+ : "=a" (rax)
+ : "a" (EACCEPT),
+ "b" (&secinfo),
+- "c" (op->epc_addr));
++ "c" (op->epc_addr)
++ : "memory" /* read from secinfo pointer */);
+
+ op->ret = rax;
+ }
+diff --git a/tools/testing/selftests/sgx/test_encl.lds b/tools/testing/selftests/sgx/test_encl.lds
+index a1ec64f7d91fc5..108bc11d1d8c5f 100644
+--- a/tools/testing/selftests/sgx/test_encl.lds
++++ b/tools/testing/selftests/sgx/test_encl.lds
+@@ -34,8 +34,4 @@ SECTIONS
+ }
+ }
+
+-ASSERT(!DEFINED(.altinstructions), "ALTERNATIVES are not supported in enclaves")
+-ASSERT(!DEFINED(.altinstr_replacement), "ALTERNATIVES are not supported in enclaves")
+-ASSERT(!DEFINED(.discard.retpoline_safe), "RETPOLINE ALTERNATIVES are not supported in enclaves")
+-ASSERT(!DEFINED(.discard.nospec), "RETPOLINE ALTERNATIVES are not supported in enclaves")
+-ASSERT(!DEFINED(.got.plt), "Libcalls are not supported in enclaves")
++ASSERT(!DEFINED(_GLOBAL_OFFSET_TABLE_), "Libcalls through GOT are not supported in enclaves")
+diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+index ea9bdf3a90b164..09da8f1011ce4c 100644
+--- a/tools/testing/selftests/sigaltstack/current_stack_pointer.h
++++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+@@ -8,7 +8,7 @@ register unsigned long sp asm("sp");
+ register unsigned long sp asm("esp");
+ #elif __loongarch64
+ register unsigned long sp asm("$sp");
+-#elif __ppc__
++#elif __powerpc__
+ register unsigned long sp asm("r1");
+ #elif __s390x__
+ register unsigned long sp asm("%15");
+diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+index b5d592d4099e85..d975a67673299f 100644
+--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
++++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+@@ -158,6 +158,20 @@ static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
+
+ /* In preparation for sigreturn. */
+ SYSCALL_DISPATCH_OFF(glob_sel);
++
++ /*
++ * The tests for argument handling assume that `syscall(x) == x`. This
++ * is a NOP on x86 because the syscall number is passed in %rax, which
++ * happens to also be the function ABI return register. Other
++ * architectures may need to swizzle the arguments around.
++ */
++#if defined(__riscv)
++/* REG_A7 is not defined in libc headers */
++# define REG_A7 (REG_A0 + 7)
++
++ ((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A0] =
++ ((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A7];
++#endif
+ }
+
+ TEST(dispatch_and_return)
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+index 0599635c4bc650..6a6f61ac485874 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+@@ -132,6 +132,50 @@
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
+ },
++ {
++ "id": "6f62",
++ "name": "Add taprio Qdisc with too short interval",
++ "category": [
++ "qdisc",
++ "taprio"
++ ],
++ "plugins": {
++ "requires": "nsPlugin"
++ },
++ "setup": [
++ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
++ ],
++ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 300 sched-entry S 02 1700 clockid CLOCK_TAI",
++ "expExitCode": "2",
++ "verifyCmd": "$TC qdisc show dev $ETH",
++ "matchPattern": "qdisc taprio 1: root refcnt",
++ "matchCount": "0",
++ "teardown": [
++ "echo \"1\" > /sys/bus/netdevsim/del_device"
++ ]
++ },
++ {
++ "id": "831f",
++ "name": "Add taprio Qdisc with too short cycle-time",
++ "category": [
++ "qdisc",
++ "taprio"
++ ],
++ "plugins": {
++ "requires": "nsPlugin"
++ },
++ "setup": [
++ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
++ ],
++ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 clockid CLOCK_TAI",
++ "expExitCode": "2",
++ "verifyCmd": "$TC qdisc show dev $ETH",
++ "matchPattern": "qdisc taprio 1: root refcnt",
++ "matchCount": "0",
++ "teardown": [
++ "echo \"1\" > /sys/bus/netdevsim/del_device"
++ ]
++ },
+ {
+ "id": "3e1e",
+ "name": "Add taprio Qdisc with an invalid cycle-time",
+diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
+index b98256f38447d3..8f969f54ddf40e 100755
+--- a/tools/testing/selftests/tc-testing/tdc.py
++++ b/tools/testing/selftests/tc-testing/tdc.py
+@@ -129,7 +129,6 @@ class PluginMgr:
+ except Exception as ee:
+ print('exception {} in call to pre_case for {} plugin'.
+ format(ee, pgn_inst.__class__))
+- print('test_ordinal is {}'.format(test_ordinal))
+ print('testid is {}'.format(caseinfo['id']))
+ raise
+
+diff --git a/tools/testing/selftests/timens/exec.c b/tools/testing/selftests/timens/exec.c
+index e40dc5be2f6684..d12ff955de0d8f 100644
+--- a/tools/testing/selftests/timens/exec.c
++++ b/tools/testing/selftests/timens/exec.c
+@@ -30,7 +30,7 @@ int main(int argc, char *argv[])
+
+ for (i = 0; i < 2; i++) {
+ _gettime(CLOCK_MONOTONIC, &tst, i);
+- if (abs(tst.tv_sec - now.tv_sec) > 5)
++ if (labs(tst.tv_sec - now.tv_sec) > 5)
+ return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
+ }
+ return 0;
+@@ -50,7 +50,7 @@ int main(int argc, char *argv[])
+
+ for (i = 0; i < 2; i++) {
+ _gettime(CLOCK_MONOTONIC, &tst, i);
+- if (abs(tst.tv_sec - now.tv_sec) > 5)
++ if (labs(tst.tv_sec - now.tv_sec) > 5)
+ return pr_fail("%ld %ld\n",
+ now.tv_sec, tst.tv_sec);
+ }
+@@ -70,7 +70,7 @@ int main(int argc, char *argv[])
+ /* Check that a child process is in the new timens. */
+ for (i = 0; i < 2; i++) {
+ _gettime(CLOCK_MONOTONIC, &tst, i);
+- if (abs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
++ if (labs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
+ return pr_fail("%ld %ld\n",
+ now.tv_sec + OFFSET, tst.tv_sec);
+ }
+diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c
+index 5e7f0051bd7be1..5b939f59dfa4d6 100644
+--- a/tools/testing/selftests/timens/timer.c
++++ b/tools/testing/selftests/timens/timer.c
+@@ -56,7 +56,7 @@ int run_test(int clockid, struct timespec now)
+ return pr_perror("timerfd_gettime");
+
+ elapsed = new_value.it_value.tv_sec;
+- if (abs(elapsed - 3600) > 60) {
++ if (llabs(elapsed - 3600) > 60) {
+ ksft_test_result_fail("clockid: %d elapsed: %lld\n",
+ clockid, elapsed);
+ return 1;
+diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c
+index 9edd43d6b2c133..a4196bbd6e33f4 100644
+--- a/tools/testing/selftests/timens/timerfd.c
++++ b/tools/testing/selftests/timens/timerfd.c
+@@ -61,7 +61,7 @@ int run_test(int clockid, struct timespec now)
+ return pr_perror("timerfd_gettime(%d)", clockid);
+
+ elapsed = new_value.it_value.tv_sec;
+- if (abs(elapsed - 3600) > 60) {
++ if (llabs(elapsed - 3600) > 60) {
+ ksft_test_result_fail("clockid: %d elapsed: %lld\n",
+ clockid, elapsed);
+ return 1;
+diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c
+index beb7614941fb1f..5b8907bf451dde 100644
+--- a/tools/testing/selftests/timens/vfork_exec.c
++++ b/tools/testing/selftests/timens/vfork_exec.c
+@@ -32,7 +32,7 @@ static void *tcheck(void *_args)
+
+ for (i = 0; i < 2; i++) {
+ _gettime(CLOCK_MONOTONIC, &tst, i);
+- if (abs(tst.tv_sec - now->tv_sec) > 5) {
++ if (labs(tst.tv_sec - now->tv_sec) > 5) {
+ pr_fail("%s: in-thread: unexpected value: %ld (%ld)\n",
+ args->tst_name, tst.tv_sec, now->tv_sec);
+ return (void *)1UL;
+@@ -64,7 +64,7 @@ static int check(char *tst_name, struct timespec *now)
+
+ for (i = 0; i < 2; i++) {
+ _gettime(CLOCK_MONOTONIC, &tst, i);
+- if (abs(tst.tv_sec - now->tv_sec) > 5)
++ if (labs(tst.tv_sec - now->tv_sec) > 5)
+ return pr_fail("%s: unexpected value: %ld (%ld)\n",
+ tst_name, tst.tv_sec, now->tv_sec);
+ }
+diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
+index 8a17c0e8d82b37..c001dd79179d5d 100644
+--- a/tools/testing/selftests/timers/posix_timers.c
++++ b/tools/testing/selftests/timers/posix_timers.c
+@@ -66,7 +66,7 @@ static int check_diff(struct timeval start, struct timeval end)
+ diff = end.tv_usec - start.tv_usec;
+ diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
+
+- if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
++ if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
+ printf("Diff too high: %lld..", diff);
+ return -1;
+ }
+@@ -76,22 +76,21 @@ static int check_diff(struct timeval start, struct timeval end)
+
+ static int check_itimer(int which)
+ {
++ const char *name;
+ int err;
+ struct timeval start, end;
+ struct itimerval val = {
+ .it_value.tv_sec = DELAY,
+ };
+
+- printf("Check itimer ");
+-
+ if (which == ITIMER_VIRTUAL)
+- printf("virtual... ");
++ name = "ITIMER_VIRTUAL";
+ else if (which == ITIMER_PROF)
+- printf("prof... ");
++ name = "ITIMER_PROF";
+ else if (which == ITIMER_REAL)
+- printf("real... ");
+-
+- fflush(stdout);
++ name = "ITIMER_REAL";
++ else
++ return -1;
+
+ done = 0;
+
+@@ -104,13 +103,13 @@ static int check_itimer(int which)
+
+ err = gettimeofday(&start, NULL);
+ if (err < 0) {
+- perror("Can't call gettimeofday()\n");
++ ksft_perror("Can't call gettimeofday()");
+ return -1;
+ }
+
+ err = setitimer(which, &val, NULL);
+ if (err < 0) {
+- perror("Can't set timer\n");
++ ksft_perror("Can't set timer");
+ return -1;
+ }
+
+@@ -123,20 +122,18 @@ static int check_itimer(int which)
+
+ err = gettimeofday(&end, NULL);
+ if (err < 0) {
+- perror("Can't call gettimeofday()\n");
++ ksft_perror("Can't call gettimeofday()");
+ return -1;
+ }
+
+- if (!check_diff(start, end))
+- printf("[OK]\n");
+- else
+- printf("[FAIL]\n");
++ ksft_test_result(check_diff(start, end) == 0, "%s\n", name);
+
+ return 0;
+ }
+
+ static int check_timer_create(int which)
+ {
++ const char *type;
+ int err;
+ timer_t id;
+ struct timeval start, end;
+@@ -144,31 +141,32 @@ static int check_timer_create(int which)
+ .it_value.tv_sec = DELAY,
+ };
+
+- printf("Check timer_create() ");
+ if (which == CLOCK_THREAD_CPUTIME_ID) {
+- printf("per thread... ");
++ type = "thread";
+ } else if (which == CLOCK_PROCESS_CPUTIME_ID) {
+- printf("per process... ");
++ type = "process";
++ } else {
++ ksft_print_msg("Unknown timer_create() type %d\n", which);
++ return -1;
+ }
+- fflush(stdout);
+
+ done = 0;
+ err = timer_create(which, NULL, &id);
+ if (err < 0) {
+- perror("Can't create timer\n");
++ ksft_perror("Can't create timer");
+ return -1;
+ }
+ signal(SIGALRM, sig_handler);
+
+ err = gettimeofday(&start, NULL);
+ if (err < 0) {
+- perror("Can't call gettimeofday()\n");
++ ksft_perror("Can't call gettimeofday()");
+ return -1;
+ }
+
+ err = timer_settime(id, 0, &val, NULL);
+ if (err < 0) {
+- perror("Can't set timer\n");
++ ksft_perror("Can't set timer");
+ return -1;
+ }
+
+@@ -176,96 +174,90 @@ static int check_timer_create(int which)
+
+ err = gettimeofday(&end, NULL);
+ if (err < 0) {
+- perror("Can't call gettimeofday()\n");
++ ksft_perror("Can't call gettimeofday()");
+ return -1;
+ }
+
+- if (!check_diff(start, end))
+- printf("[OK]\n");
+- else
+- printf("[FAIL]\n");
++ ksft_test_result(check_diff(start, end) == 0,
++ "timer_create() per %s\n", type);
+
+ return 0;
+ }
+
+-int remain;
+-__thread int got_signal;
++static pthread_t ctd_thread;
++static volatile int ctd_count, ctd_failed;
+
+-static void *distribution_thread(void *arg)
++static void ctd_sighandler(int sig)
+ {
+- while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
+- return NULL;
++ if (pthread_self() != ctd_thread)
++ ctd_failed = 1;
++ ctd_count--;
+ }
+
+-static void distribution_handler(int nr)
++static void *ctd_thread_func(void *arg)
+ {
+- if (!__atomic_exchange_n(&got_signal, 1, __ATOMIC_RELAXED))
+- __atomic_fetch_sub(&remain, 1, __ATOMIC_RELAXED);
+-}
+-
+-/*
+- * Test that all running threads _eventually_ receive CLOCK_PROCESS_CPUTIME_ID
+- * timer signals. This primarily tests that the kernel does not favour any one.
+- */
+-static int check_timer_distribution(void)
+-{
+- int err, i;
+- timer_t id;
+- const int nthreads = 10;
+- pthread_t threads[nthreads];
+ struct itimerspec val = {
+ .it_value.tv_sec = 0,
+ .it_value.tv_nsec = 1000 * 1000,
+ .it_interval.tv_sec = 0,
+ .it_interval.tv_nsec = 1000 * 1000,
+ };
++ timer_t id;
+
+- printf("Check timer_create() per process signal distribution... ");
+- fflush(stdout);
++ /* 1/10 seconds to ensure the leader sleeps */
++ usleep(10000);
+
+- remain = nthreads + 1; /* worker threads + this thread */
+- signal(SIGALRM, distribution_handler);
+- err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id);
+- if (err < 0) {
+- perror("Can't create timer\n");
+- return -1;
+- }
+- err = timer_settime(id, 0, &val, NULL);
+- if (err < 0) {
+- perror("Can't set timer\n");
+- return -1;
+- }
++ ctd_count = 100;
++ if (timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id))
++ return "Can't create timer\n";
++ if (timer_settime(id, 0, &val, NULL))
++ return "Can't set timer\n";
+
+- for (i = 0; i < nthreads; i++) {
+- if (pthread_create(&threads[i], NULL, distribution_thread, NULL)) {
+- perror("Can't create thread\n");
+- return -1;
+- }
+- }
++ while (ctd_count > 0 && !ctd_failed)
++ ;
+
+- /* Wait for all threads to receive the signal. */
+- while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
++ if (timer_delete(id))
++ return "Can't delete timer\n";
+
+- for (i = 0; i < nthreads; i++) {
+- if (pthread_join(threads[i], NULL)) {
+- perror("Can't join thread\n");
+- return -1;
+- }
+- }
++ return NULL;
++}
+
+- if (timer_delete(id)) {
+- perror("Can't delete timer\n");
+- return -1;
+- }
++/*
++ * Test that only the running thread receives the timer signal.
++ */
++static int check_timer_distribution(void)
++{
++ const char *errmsg;
++
++ signal(SIGALRM, ctd_sighandler);
+
+- printf("[OK]\n");
++ errmsg = "Can't create thread\n";
++ if (pthread_create(&ctd_thread, NULL, ctd_thread_func, NULL))
++ goto err;
++
++ errmsg = "Can't join thread\n";
++ if (pthread_join(ctd_thread, (void **)&errmsg) || errmsg)
++ goto err;
++
++ if (!ctd_failed)
++ ksft_test_result_pass("check signal distribution\n");
++ else if (ksft_min_kernel_version(6, 3))
++ ksft_test_result_fail("check signal distribution\n");
++ else
++ ksft_test_result_skip("check signal distribution (old kernel)\n");
+ return 0;
++err:
++ ksft_print_msg("%s", errmsg);
++ return -1;
+ }
+
+ int main(int argc, char **argv)
+ {
+- printf("Testing posix timers. False negative may happen on CPU execution \n");
+- printf("based timers if other threads run on the CPU...\n");
++ ksft_print_header();
++ ksft_set_plan(6);
++
++ ksft_print_msg("Testing posix timers. False negative may happen on CPU execution \n");
++ ksft_print_msg("based timers if other threads run on the CPU...\n");
+
+ if (check_itimer(ITIMER_VIRTUAL) < 0)
+ return ksft_exit_fail();
+@@ -294,5 +286,5 @@ int main(int argc, char **argv)
+ if (check_timer_distribution() < 0)
+ return ksft_exit_fail();
+
+- return ksft_exit_pass();
++ ksft_finished();
+ }
+diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
+index 48b9a803235a80..d13ebde203221a 100644
+--- a/tools/testing/selftests/timers/valid-adjtimex.c
++++ b/tools/testing/selftests/timers/valid-adjtimex.c
+@@ -21,9 +21,6 @@
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+-
+-
+-
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <time.h>
+@@ -62,45 +59,47 @@ int clear_time_state(void)
+ #define NUM_FREQ_OUTOFRANGE 4
+ #define NUM_FREQ_INVALID 2
+
++#define SHIFTED_PPM (1 << 16)
++
+ long valid_freq[NUM_FREQ_VALID] = {
+- -499<<16,
+- -450<<16,
+- -400<<16,
+- -350<<16,
+- -300<<16,
+- -250<<16,
+- -200<<16,
+- -150<<16,
+- -100<<16,
+- -75<<16,
+- -50<<16,
+- -25<<16,
+- -10<<16,
+- -5<<16,
+- -1<<16,
++ -499 * SHIFTED_PPM,
++ -450 * SHIFTED_PPM,
++ -400 * SHIFTED_PPM,
++ -350 * SHIFTED_PPM,
++ -300 * SHIFTED_PPM,
++ -250 * SHIFTED_PPM,
++ -200 * SHIFTED_PPM,
++ -150 * SHIFTED_PPM,
++ -100 * SHIFTED_PPM,
++ -75 * SHIFTED_PPM,
++ -50 * SHIFTED_PPM,
++ -25 * SHIFTED_PPM,
++ -10 * SHIFTED_PPM,
++ -5 * SHIFTED_PPM,
++ -1 * SHIFTED_PPM,
+ -1000,
+- 1<<16,
+- 5<<16,
+- 10<<16,
+- 25<<16,
+- 50<<16,
+- 75<<16,
+- 100<<16,
+- 150<<16,
+- 200<<16,
+- 250<<16,
+- 300<<16,
+- 350<<16,
+- 400<<16,
+- 450<<16,
+- 499<<16,
++ 1 * SHIFTED_PPM,
++ 5 * SHIFTED_PPM,
++ 10 * SHIFTED_PPM,
++ 25 * SHIFTED_PPM,
++ 50 * SHIFTED_PPM,
++ 75 * SHIFTED_PPM,
++ 100 * SHIFTED_PPM,
++ 150 * SHIFTED_PPM,
++ 200 * SHIFTED_PPM,
++ 250 * SHIFTED_PPM,
++ 300 * SHIFTED_PPM,
++ 350 * SHIFTED_PPM,
++ 400 * SHIFTED_PPM,
++ 450 * SHIFTED_PPM,
++ 499 * SHIFTED_PPM,
+ };
+
+ long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
+- -1000<<16,
+- -550<<16,
+- 550<<16,
+- 1000<<16,
++ -1000 * SHIFTED_PPM,
++ -550 * SHIFTED_PPM,
++ 550 * SHIFTED_PPM,
++ 1000 * SHIFTED_PPM,
+ };
+
+ #define LONG_MAX (~0UL>>1)
+diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
+index 413f75620a35b4..7dd5668ea8a6e3 100644
+--- a/tools/testing/selftests/vDSO/parse_vdso.c
++++ b/tools/testing/selftests/vDSO/parse_vdso.c
+@@ -36,6 +36,12 @@
+ #define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
+ #define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
+
++#ifdef __s390x__
++#define ELF_HASH_ENTRY ELF(Xword)
++#else
++#define ELF_HASH_ENTRY ELF(Word)
++#endif
++
+ static struct vdso_info
+ {
+ bool valid;
+@@ -47,22 +53,28 @@ static struct vdso_info
+ /* Symbol table */
+ ELF(Sym) *symtab;
+ const char *symstrings;
+- ELF(Word) *bucket, *chain;
+- ELF(Word) nbucket, nchain;
++ ELF_HASH_ENTRY *bucket, *chain;
++ ELF_HASH_ENTRY nbucket, nchain;
+
+ /* Version table */
+ ELF(Versym) *versym;
+ ELF(Verdef) *verdef;
+ } vdso_info;
+
+-/* Straight from the ELF specification. */
+-static unsigned long elf_hash(const unsigned char *name)
++/*
++ * Straight from the ELF specification...and then tweaked slightly, in order to
++ * avoid a few clang warnings.
++ */
++static unsigned long elf_hash(const char *name)
+ {
+ unsigned long h = 0, g;
+- while (*name)
++ const unsigned char *uch_name = (const unsigned char *)name;
++
++ while (*uch_name)
+ {
+- h = (h << 4) + *name++;
+- if (g = h & 0xf0000000)
++ h = (h << 4) + *uch_name++;
++ g = h & 0xf0000000;
++ if (g)
+ h ^= g >> 24;
+ h &= ~g;
+ }
+@@ -109,7 +121,7 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base)
+ /*
+ * Fish out the useful bits of the dynamic table.
+ */
+- ELF(Word) *hash = 0;
++ ELF_HASH_ENTRY *hash = 0;
+ vdso_info.symstrings = 0;
+ vdso_info.symtab = 0;
+ vdso_info.versym = 0;
+@@ -127,7 +139,7 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base)
+ + vdso_info.load_offset);
+ break;
+ case DT_HASH:
+- hash = (ELF(Word) *)
++ hash = (ELF_HASH_ENTRY *)
+ ((uintptr_t)dyn[i].d_un.d_ptr
+ + vdso_info.load_offset);
+ break;
+@@ -210,7 +222,8 @@ void *vdso_sym(const char *version, const char *name)
+ ELF(Sym) *sym = &vdso_info.symtab[chain];
+
+ /* Check for a defined global or weak function w/ right name. */
+- if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
++ if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC &&
++ ELF64_ST_TYPE(sym->st_info) != STT_NOTYPE)
+ continue;
+ if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
+ ELF64_ST_BIND(sym->st_info) != STB_WEAK)
+diff --git a/tools/testing/selftests/vDSO/vdso_config.h b/tools/testing/selftests/vDSO/vdso_config.h
+index cdfed403ba13f8..72de45f587b2c9 100644
+--- a/tools/testing/selftests/vDSO/vdso_config.h
++++ b/tools/testing/selftests/vDSO/vdso_config.h
+@@ -18,18 +18,18 @@
+ #elif defined(__aarch64__)
+ #define VDSO_VERSION 3
+ #define VDSO_NAMES 0
+-#elif defined(__powerpc__)
++#elif defined(__powerpc64__)
+ #define VDSO_VERSION 1
+ #define VDSO_NAMES 0
+-#define VDSO_32BIT 1
+-#elif defined(__powerpc64__)
++#elif defined(__powerpc__)
+ #define VDSO_VERSION 1
+ #define VDSO_NAMES 0
+-#elif defined (__s390__)
++#define VDSO_32BIT 1
++#elif defined (__s390__) && !defined(__s390x__)
+ #define VDSO_VERSION 2
+ #define VDSO_NAMES 0
+ #define VDSO_32BIT 1
+-#elif defined (__s390X__)
++#elif defined (__s390x__)
+ #define VDSO_VERSION 2
+ #define VDSO_NAMES 0
+ #elif defined(__mips__)
+diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+index 8a44ff973ee17b..27f6fdf119691e 100644
+--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
++++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+@@ -18,7 +18,7 @@
+
+ #include "parse_vdso.h"
+
+-/* We need a libc functions... */
++/* We need some libc functions... */
+ int strcmp(const char *a, const char *b)
+ {
+ /* This implementation is buggy: it never returns -1. */
+@@ -34,6 +34,20 @@ int strcmp(const char *a, const char *b)
+ return 0;
+ }
+
++/*
++ * The clang build needs this, although gcc does not.
++ * Stolen from lib/string.c.
++ */
++void *memcpy(void *dest, const void *src, size_t count)
++{
++ char *tmp = dest;
++ const char *s = src;
++
++ while (count--)
++ *tmp++ = *s++;
++ return dest;
++}
++
+ /* ...and two syscalls. This is x86-specific. */
+ static inline long x86_syscall3(long nr, long a0, long a1, long a2)
+ {
+@@ -70,7 +84,7 @@ void to_base10(char *lastdig, time_t n)
+ }
+ }
+
+-__attribute__((externally_visible)) void c_main(void **stack)
++void c_main(void **stack)
+ {
+ /* Parse the stack */
+ long argc = (long)*stack;
+diff --git a/tools/testing/selftests/vDSO/vdso_test_correctness.c b/tools/testing/selftests/vDSO/vdso_test_correctness.c
+index e691a3cf149112..cdb697ae8343cc 100644
+--- a/tools/testing/selftests/vDSO/vdso_test_correctness.c
++++ b/tools/testing/selftests/vDSO/vdso_test_correctness.c
+@@ -114,6 +114,12 @@ static void fill_function_pointers()
+ if (!vdso)
+ vdso = dlopen("linux-gate.so.1",
+ RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
++ if (!vdso)
++ vdso = dlopen("linux-vdso32.so.1",
++ RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
++ if (!vdso)
++ vdso = dlopen("linux-vdso64.so.1",
++ RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso) {
+ printf("[WARN]\tfailed to find vDSO\n");
+ return;
+diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
+index e95bd56b332f75..35856b11c14350 100644
+--- a/tools/testing/selftests/wireguard/qemu/Makefile
++++ b/tools/testing/selftests/wireguard/qemu/Makefile
+@@ -109,9 +109,9 @@ KERNEL_ARCH := x86_64
+ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+ QEMU_VPORT_RESULT := virtio-serial-device
+ ifeq ($(HOST_ARCH),$(ARCH))
+-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
+ else
+-QEMU_MACHINE := -cpu max -machine microvm -no-acpi
++QEMU_MACHINE := -cpu max -machine microvm,acpi=off
+ endif
+ else ifeq ($(ARCH),i686)
+ CHOST := i686-linux-musl
+@@ -120,9 +120,9 @@ KERNEL_ARCH := x86
+ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+ QEMU_VPORT_RESULT := virtio-serial-device
+ ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
+-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
+ else
+-QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi
++QEMU_MACHINE := -cpu coreduo -machine microvm,acpi=off
+ endif
+ else ifeq ($(ARCH),mips64)
+ CHOST := mips64-linux-musl
+diff --git a/tools/testing/selftests/wireguard/qemu/arch/riscv32.config b/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
+index 2fc36efb166dc9..a7f8e8a9562593 100644
+--- a/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
++++ b/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
+@@ -3,6 +3,7 @@ CONFIG_ARCH_RV32I=y
+ CONFIG_MMU=y
+ CONFIG_FPU=y
+ CONFIG_SOC_VIRT=y
++CONFIG_RISCV_ISA_FALLBACK=y
+ CONFIG_SERIAL_8250=y
+ CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_OF_PLATFORM=y
+diff --git a/tools/testing/selftests/wireguard/qemu/arch/riscv64.config b/tools/testing/selftests/wireguard/qemu/arch/riscv64.config
+index dc266f3b191557..daeb3e5e096585 100644
+--- a/tools/testing/selftests/wireguard/qemu/arch/riscv64.config
++++ b/tools/testing/selftests/wireguard/qemu/arch/riscv64.config
+@@ -2,6 +2,7 @@ CONFIG_ARCH_RV64I=y
+ CONFIG_MMU=y
+ CONFIG_FPU=y
+ CONFIG_SOC_VIRT=y
++CONFIG_RISCV_ISA_FALLBACK=y
+ CONFIG_SERIAL_8250=y
+ CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_OF_PLATFORM=y
+diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c
+index eb0e46905bf9d0..8f9b06d9ce039a 100644
+--- a/tools/testing/selftests/x86/lam.c
++++ b/tools/testing/selftests/x86/lam.c
+@@ -573,7 +573,7 @@ int do_uring(unsigned long lam)
+ char path[PATH_MAX] = {0};
+
+ /* get current process path */
+- if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
++ if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0)
+ return 1;
+
+ int file_fd = open(path, O_RDONLY);
+@@ -680,14 +680,14 @@ static int handle_execve(struct testcases *test)
+ perror("Fork failed.");
+ ret = 1;
+ } else if (pid == 0) {
+- char path[PATH_MAX];
++ char path[PATH_MAX] = {0};
+
+ /* Set LAM mode in parent process */
+ if (set_lam(lam) != 0)
+ return 1;
+
+ /* Get current binary's path and the binary was run by execve */
+- if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
++ if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0)
+ exit(-1);
+
+ /* run binary to get LAM mode and return to parent process */
+diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
+index 90718c2fd4ea95..5dc7767039f6fa 100644
+--- a/tools/testing/vsock/vsock_test.c
++++ b/tools/testing/vsock/vsock_test.c
+@@ -392,11 +392,12 @@ static void test_stream_msg_peek_server(const struct test_opts *opts)
+ }
+
+ #define SOCK_BUF_SIZE (2 * 1024 * 1024)
+-#define MAX_MSG_SIZE (32 * 1024)
++#define MAX_MSG_PAGES 4
+
+ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
+ {
+ unsigned long curr_hash;
++ size_t max_msg_size;
+ int page_size;
+ int msg_count;
+ int fd;
+@@ -412,7 +413,8 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
+
+ curr_hash = 0;
+ page_size = getpagesize();
+- msg_count = SOCK_BUF_SIZE / MAX_MSG_SIZE;
++ max_msg_size = MAX_MSG_PAGES * page_size;
++ msg_count = SOCK_BUF_SIZE / max_msg_size;
+
+ for (int i = 0; i < msg_count; i++) {
+ ssize_t send_size;
+@@ -423,7 +425,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
+ /* Use "small" buffers and "big" buffers. */
+ if (i & 1)
+ buf_size = page_size +
+- (rand() % (MAX_MSG_SIZE - page_size));
++ (rand() % (max_msg_size - page_size));
+ else
+ buf_size = 1 + (rand() % page_size);
+
+@@ -479,7 +481,6 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
+ unsigned long remote_hash;
+ unsigned long curr_hash;
+ int fd;
+- char buf[MAX_MSG_SIZE];
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+
+@@ -507,8 +508,13 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
+ control_writeln("SRVREADY");
+ /* Wait, until peer sends whole data. */
+ control_expectln("SENDDONE");
+- iov.iov_base = buf;
+- iov.iov_len = sizeof(buf);
++ iov.iov_len = MAX_MSG_PAGES * getpagesize();
++ iov.iov_base = malloc(iov.iov_len);
++ if (!iov.iov_base) {
++ perror("malloc");
++ exit(EXIT_FAILURE);
++ }
++
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+@@ -533,6 +539,7 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
+ curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size);
+ }
+
++ free(iov.iov_base);
+ close(fd);
+ remote_hash = control_readulong();
+
+diff --git a/tools/tracing/latency/latency-collector.c b/tools/tracing/latency/latency-collector.c
+index 0fd9c747d396d6..cf263fe9deaf4b 100644
+--- a/tools/tracing/latency/latency-collector.c
++++ b/tools/tracing/latency/latency-collector.c
+@@ -935,12 +935,12 @@ static void show_available(void)
+ }
+
+ if (!tracers) {
+- warnx(no_tracer_msg);
++ warnx("%s", no_tracer_msg);
+ return;
+ }
+
+ if (!found) {
+- warnx(no_latency_tr_msg);
++ warnx("%s", no_latency_tr_msg);
+ tracefs_list_free(tracers);
+ return;
+ }
+@@ -983,7 +983,7 @@ static const char *find_default_tracer(void)
+ for (i = 0; relevant_tracers[i]; i++) {
+ valid = tracer_valid(relevant_tracers[i], &notracer);
+ if (notracer)
+- errx(EXIT_FAILURE, no_tracer_msg);
++ errx(EXIT_FAILURE, "%s", no_tracer_msg);
+ if (valid)
+ return relevant_tracers[i];
+ }
+@@ -1878,7 +1878,7 @@ static void scan_arguments(int argc, char *argv[])
+ }
+ valid = tracer_valid(current_tracer, &notracer);
+ if (notracer)
+- errx(EXIT_FAILURE, no_tracer_msg);
++ errx(EXIT_FAILURE, "%s", no_tracer_msg);
+ if (!valid)
+ errx(EXIT_FAILURE,
+ "The tracer %s is not supported by your kernel!\n", current_tracer);
+diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
+index 2456a399eb9ae1..afd18c678ff5a5 100644
+--- a/tools/tracing/rtla/Makefile
++++ b/tools/tracing/rtla/Makefile
+@@ -28,10 +28,15 @@ FOPTS := -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
+ -fasynchronous-unwind-tables -fstack-clash-protection
+ WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
+
++ifeq ($(CC),clang)
++ FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS))
++ WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS))
++endif
++
+ TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs)
+
+ CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS)
+-LDFLAGS := -ggdb $(EXTRA_LDFLAGS)
++LDFLAGS := -flto=auto -ggdb $(EXTRA_LDFLAGS)
+ LIBS := $$($(PKG_CONFIG) --libs libtracefs)
+
+ SRC := $(wildcard src/*.c)
+diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
+index 8f81fa00736489..01870d50942a19 100644
+--- a/tools/tracing/rtla/src/osnoise_hist.c
++++ b/tools/tracing/rtla/src/osnoise_hist.c
+@@ -135,8 +135,7 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
+ if (params->output_divisor)
+ duration = duration / params->output_divisor;
+
+- if (data->bucket_size)
+- bucket = duration / data->bucket_size;
++ bucket = duration / data->bucket_size;
+
+ total_duration = duration * count;
+
+@@ -480,7 +479,11 @@ static void osnoise_hist_usage(char *usage)
+
+ for (i = 0; msg[i]; i++)
+ fprintf(stderr, "%s\n", msg[i]);
+- exit(1);
++
++ if (usage)
++ exit(EXIT_FAILURE);
++
++ exit(EXIT_SUCCESS);
+ }
+
+ /*
+diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
+index f7c959be867779..de8767ff043a43 100644
+--- a/tools/tracing/rtla/src/osnoise_top.c
++++ b/tools/tracing/rtla/src/osnoise_top.c
+@@ -331,7 +331,11 @@ static void osnoise_top_usage(struct osnoise_top_params *params, char *usage)
+
+ for (i = 0; msg[i]; i++)
+ fprintf(stderr, "%s\n", msg[i]);
+- exit(1);
++
++ if (usage)
++ exit(EXIT_FAILURE);
++
++ exit(EXIT_SUCCESS);
+ }
+
+ /*
+@@ -424,7 +428,7 @@ struct osnoise_top_params *osnoise_top_parse_args(int argc, char **argv)
+ case 'd':
+ params->duration = parse_seconds_duration(optarg);
+ if (!params->duration)
+- osnoise_top_usage(params, "Invalid -D duration\n");
++ osnoise_top_usage(params, "Invalid -d duration\n");
+ break;
+ case 'e':
+ tevent = trace_event_alloc(optarg);
+@@ -620,8 +624,10 @@ struct osnoise_tool *osnoise_init_top(struct osnoise_top_params *params)
+ return NULL;
+
+ tool->data = osnoise_alloc_top(nr_cpus);
+- if (!tool->data)
+- goto out_err;
++ if (!tool->data) {
++ osnoise_destroy_tool(tool);
++ return NULL;
++ }
+
+ tool->params = params;
+
+@@ -629,11 +635,6 @@ struct osnoise_tool *osnoise_init_top(struct osnoise_top_params *params)
+ osnoise_top_handler, NULL);
+
+ return tool;
+-
+-out_err:
+- osnoise_free_top(tool->data);
+- osnoise_destroy_tool(tool);
+- return NULL;
+ }
+
+ static int stop_tracing;
+diff --git a/tools/tracing/rtla/src/timerlat_aa.c b/tools/tracing/rtla/src/timerlat_aa.c
+index 7093fd5333beb9..7bd80ee2a5b48d 100644
+--- a/tools/tracing/rtla/src/timerlat_aa.c
++++ b/tools/tracing/rtla/src/timerlat_aa.c
+@@ -16,6 +16,9 @@ enum timelat_state {
+ TIMERLAT_WAITING_THREAD,
+ };
+
++/* Used to fill spaces in the output */
++static const char *spaces = " ";
++
+ #define MAX_COMM 24
+
+ /*
+@@ -274,14 +277,17 @@ static int timerlat_aa_nmi_handler(struct trace_seq *s, struct tep_record *recor
+ taa_data->prev_irq_timstamp = start;
+
+ trace_seq_reset(taa_data->prev_irqs_seq);
+- trace_seq_printf(taa_data->prev_irqs_seq, "\t%24s \t\t\t%9.2f us\n",
+- "nmi", ns_to_usf(duration));
++ trace_seq_printf(taa_data->prev_irqs_seq, " %24s %.*s %9.2f us\n",
++ "nmi",
++ 24, spaces,
++ ns_to_usf(duration));
+ return 0;
+ }
+
+ taa_data->thread_nmi_sum += duration;
+- trace_seq_printf(taa_data->nmi_seq, " %24s \t\t\t%9.2f us\n",
+- "nmi", ns_to_usf(duration));
++ trace_seq_printf(taa_data->nmi_seq, " %24s %.*s %9.2f us\n",
++ "nmi",
++ 24, spaces, ns_to_usf(duration));
+
+ return 0;
+ }
+@@ -323,8 +329,10 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor
+ taa_data->prev_irq_timstamp = start;
+
+ trace_seq_reset(taa_data->prev_irqs_seq);
+- trace_seq_printf(taa_data->prev_irqs_seq, "\t%24s:%-3llu \t\t%9.2f us\n",
+- desc, vector, ns_to_usf(duration));
++ trace_seq_printf(taa_data->prev_irqs_seq, " %24s:%-3llu %.*s %9.2f us\n",
++ desc, vector,
++ 15, spaces,
++ ns_to_usf(duration));
+ return 0;
+ }
+
+@@ -372,8 +380,10 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor
+ * IRQ interference.
+ */
+ taa_data->thread_irq_sum += duration;
+- trace_seq_printf(taa_data->irqs_seq, " %24s:%-3llu \t %9.2f us\n",
+- desc, vector, ns_to_usf(duration));
++ trace_seq_printf(taa_data->irqs_seq, " %24s:%-3llu %.*s %9.2f us\n",
++ desc, vector,
++ 24, spaces,
++ ns_to_usf(duration));
+
+ return 0;
+ }
+@@ -408,8 +418,10 @@ static int timerlat_aa_softirq_handler(struct trace_seq *s, struct tep_record *r
+
+ taa_data->thread_softirq_sum += duration;
+
+- trace_seq_printf(taa_data->softirqs_seq, "\t%24s:%-3llu \t %9.2f us\n",
+- softirq_name[vector], vector, ns_to_usf(duration));
++ trace_seq_printf(taa_data->softirqs_seq, " %24s:%-3llu %.*s %9.2f us\n",
++ softirq_name[vector], vector,
++ 24, spaces,
++ ns_to_usf(duration));
+ return 0;
+ }
+
+@@ -452,8 +464,10 @@ static int timerlat_aa_thread_handler(struct trace_seq *s, struct tep_record *re
+ } else {
+ taa_data->thread_thread_sum += duration;
+
+- trace_seq_printf(taa_data->threads_seq, "\t%24s:%-3llu \t\t%9.2f us\n",
+- comm, pid, ns_to_usf(duration));
++ trace_seq_printf(taa_data->threads_seq, " %24s:%-12llu %.*s %9.2f us\n",
++ comm, pid,
++ 15, spaces,
++ ns_to_usf(duration));
+ }
+
+ return 0;
+@@ -482,7 +496,8 @@ static int timerlat_aa_stack_handler(struct trace_seq *s, struct tep_record *rec
+ function = tep_find_function(taa_ctx->tool->trace.tep, caller[i]);
+ if (!function)
+ break;
+- trace_seq_printf(taa_data->stack_seq, "\t\t-> %s\n", function);
++ trace_seq_printf(taa_data->stack_seq, " %.*s -> %s\n",
++ 14, spaces, function);
+ }
+ }
+ return 0;
+@@ -568,23 +583,24 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ exp_irq_ts = taa_data->timer_irq_start_time - taa_data->timer_irq_start_delay;
+ if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) {
+ if (taa_data->prev_irq_timstamp < taa_data->timer_irq_start_time)
+- printf(" Previous IRQ interference: \t\t up to %9.2f us\n",
+- ns_to_usf(taa_data->prev_irq_duration));
++ printf(" Previous IRQ interference: %.*s up to %9.2f us\n",
++ 16, spaces,
++ ns_to_usf(taa_data->prev_irq_duration));
+ }
+
+ /*
+ * The delay that the IRQ suffered before starting.
+ */
+- printf(" IRQ handler delay: %16s %9.2f us (%.2f %%)\n",
+- (ns_to_usf(taa_data->timer_exit_from_idle) > 10) ? "(exit from idle)" : "",
+- ns_to_usf(taa_data->timer_irq_start_delay),
+- ns_to_per(total, taa_data->timer_irq_start_delay));
++ printf(" IRQ handler delay: %.*s %16s %9.2f us (%.2f %%)\n", 16, spaces,
++ (ns_to_usf(taa_data->timer_exit_from_idle) > 10) ? "(exit from idle)" : "",
++ ns_to_usf(taa_data->timer_irq_start_delay),
++ ns_to_per(total, taa_data->timer_irq_start_delay));
+
+ /*
+ * Timerlat IRQ.
+ */
+- printf(" IRQ latency: \t\t\t\t %9.2f us\n",
+- ns_to_usf(taa_data->tlat_irq_latency));
++ printf(" IRQ latency: %.*s %9.2f us\n", 40, spaces,
++ ns_to_usf(taa_data->tlat_irq_latency));
+
+ if (irq) {
+ /*
+@@ -595,15 +611,16 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ * so it will be displayed, it is the key.
+ */
+ printf(" Blocking thread:\n");
+- printf(" %24s:%-9llu\n",
+- taa_data->run_thread_comm, taa_data->run_thread_pid);
++ printf(" %.*s %24s:%-9llu\n", 6, spaces, taa_data->run_thread_comm,
++ taa_data->run_thread_pid);
+ } else {
+ /*
+ * The duration of the IRQ handler that handled the timerlat IRQ.
+ */
+- printf(" Timerlat IRQ duration: \t\t %9.2f us (%.2f %%)\n",
+- ns_to_usf(taa_data->timer_irq_duration),
+- ns_to_per(total, taa_data->timer_irq_duration));
++ printf(" Timerlat IRQ duration: %.*s %9.2f us (%.2f %%)\n",
++ 30, spaces,
++ ns_to_usf(taa_data->timer_irq_duration),
++ ns_to_per(total, taa_data->timer_irq_duration));
+
+ /*
+ * The amount of time that the current thread postponed the scheduler.
+@@ -611,13 +628,13 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ * Recalling that it is net from NMI/IRQ/Softirq interference, so there
+ * is no need to compute values here.
+ */
+- printf(" Blocking thread: \t\t\t %9.2f us (%.2f %%)\n",
+- ns_to_usf(taa_data->thread_blocking_duration),
+- ns_to_per(total, taa_data->thread_blocking_duration));
++ printf(" Blocking thread: %.*s %9.2f us (%.2f %%)\n", 36, spaces,
++ ns_to_usf(taa_data->thread_blocking_duration),
++ ns_to_per(total, taa_data->thread_blocking_duration));
+
+- printf(" %24s:%-9llu %9.2f us\n",
+- taa_data->run_thread_comm, taa_data->run_thread_pid,
+- ns_to_usf(taa_data->thread_blocking_duration));
++ printf(" %.*s %24s:%-9llu %.*s %9.2f us\n", 6, spaces,
++ taa_data->run_thread_comm, taa_data->run_thread_pid,
++ 12, spaces, ns_to_usf(taa_data->thread_blocking_duration));
+ }
+
+ /*
+@@ -629,9 +646,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ * NMIs can happen during the IRQ, so they are always possible.
+ */
+ if (taa_data->thread_nmi_sum)
+- printf(" NMI interference \t\t\t %9.2f us (%.2f %%)\n",
+- ns_to_usf(taa_data->thread_nmi_sum),
+- ns_to_per(total, taa_data->thread_nmi_sum));
++ printf(" NMI interference %.*s %9.2f us (%.2f %%)\n", 36, spaces,
++ ns_to_usf(taa_data->thread_nmi_sum),
++ ns_to_per(total, taa_data->thread_nmi_sum));
+
+ /*
+ * If it is an IRQ latency, the other factors can be skipped.
+@@ -643,9 +660,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ * Prints the interference caused by IRQs to the thread latency.
+ */
+ if (taa_data->thread_irq_sum) {
+- printf(" IRQ interference \t\t\t %9.2f us (%.2f %%)\n",
+- ns_to_usf(taa_data->thread_irq_sum),
+- ns_to_per(total, taa_data->thread_irq_sum));
++ printf(" IRQ interference %.*s %9.2f us (%.2f %%)\n", 36, spaces,
++ ns_to_usf(taa_data->thread_irq_sum),
++ ns_to_per(total, taa_data->thread_irq_sum));
+
+ trace_seq_do_printf(taa_data->irqs_seq);
+ }
+@@ -654,9 +671,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ * Prints the interference caused by Softirqs to the thread latency.
+ */
+ if (taa_data->thread_softirq_sum) {
+- printf(" Softirq interference \t\t\t %9.2f us (%.2f %%)\n",
+- ns_to_usf(taa_data->thread_softirq_sum),
+- ns_to_per(total, taa_data->thread_softirq_sum));
++ printf(" Softirq interference %.*s %9.2f us (%.2f %%)\n", 32, spaces,
++ ns_to_usf(taa_data->thread_softirq_sum),
++ ns_to_per(total, taa_data->thread_softirq_sum));
+
+ trace_seq_do_printf(taa_data->softirqs_seq);
+ }
+@@ -670,9 +687,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ * timer handling latency.
+ */
+ if (taa_data->thread_thread_sum) {
+- printf(" Thread interference \t\t\t %9.2f us (%.2f %%)\n",
+- ns_to_usf(taa_data->thread_thread_sum),
+- ns_to_per(total, taa_data->thread_thread_sum));
++ printf(" Thread interference %.*s %9.2f us (%.2f %%)\n", 33, spaces,
++ ns_to_usf(taa_data->thread_thread_sum),
++ ns_to_per(total, taa_data->thread_thread_sum));
+
+ trace_seq_do_printf(taa_data->threads_seq);
+ }
+@@ -682,8 +699,8 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ */
+ print_total:
+ printf("------------------------------------------------------------------------\n");
+- printf(" %s latency: \t\t\t %9.2f us (100%%)\n", irq ? "IRQ" : "Thread",
+- ns_to_usf(total));
++ printf(" %s latency: %.*s %9.2f us (100%%)\n", irq ? " IRQ" : "Thread",
++ 37, spaces, ns_to_usf(total));
+ }
+
+ static int timerlat_auto_analysis_collect_trace(struct timerlat_aa_context *taa_ctx)
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index 47d3d8b53cb217..1c8ecd4ebcbd35 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -178,8 +178,7 @@ timerlat_hist_update(struct osnoise_tool *tool, int cpu,
+ if (params->output_divisor)
+ latency = latency / params->output_divisor;
+
+- if (data->bucket_size)
+- bucket = latency / data->bucket_size;
++ bucket = latency / data->bucket_size;
+
+ if (!context) {
+ hist = data->hist[cpu].irq;
+@@ -324,17 +323,29 @@ timerlat_print_summary(struct timerlat_hist_params *params,
+ if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ continue;
+
+- if (!params->no_irq)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].min_irq);
++ if (!params->no_irq) {
++ if (data->hist[cpu].irq_count)
++ trace_seq_printf(trace->seq, "%9llu ",
++ data->hist[cpu].min_irq);
++ else
++ trace_seq_printf(trace->seq, " - ");
++ }
+
+- if (!params->no_thread)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].min_thread);
++ if (!params->no_thread) {
++ if (data->hist[cpu].thread_count)
++ trace_seq_printf(trace->seq, "%9llu ",
++ data->hist[cpu].min_thread);
++ else
++ trace_seq_printf(trace->seq, " - ");
++ }
+
+- if (params->user_hist)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].min_user);
++ if (params->user_hist) {
++ if (data->hist[cpu].user_count)
++ trace_seq_printf(trace->seq, "%9llu ",
++ data->hist[cpu].min_user);
++ else
++ trace_seq_printf(trace->seq, " - ");
++ }
+ }
+ trace_seq_printf(trace->seq, "\n");
+
+@@ -384,17 +395,29 @@ timerlat_print_summary(struct timerlat_hist_params *params,
+ if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ continue;
+
+- if (!params->no_irq)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].max_irq);
++ if (!params->no_irq) {
++ if (data->hist[cpu].irq_count)
++ trace_seq_printf(trace->seq, "%9llu ",
++ data->hist[cpu].max_irq);
++ else
++ trace_seq_printf(trace->seq, " - ");
++ }
+
+- if (!params->no_thread)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].max_thread);
++ if (!params->no_thread) {
++ if (data->hist[cpu].thread_count)
++ trace_seq_printf(trace->seq, "%9llu ",
++ data->hist[cpu].max_thread);
++ else
++ trace_seq_printf(trace->seq, " - ");
++ }
+
+- if (params->user_hist)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].max_user);
++ if (params->user_hist) {
++ if (data->hist[cpu].user_count)
++ trace_seq_printf(trace->seq, "%9llu ",
++ data->hist[cpu].max_user);
++ else
++ trace_seq_printf(trace->seq, " - ");
++ }
+ }
+ trace_seq_printf(trace->seq, "\n");
+ trace_seq_do_printf(trace->seq);
+@@ -546,7 +569,11 @@ static void timerlat_hist_usage(char *usage)
+
+ for (i = 0; msg[i]; i++)
+ fprintf(stderr, "%s\n", msg[i]);
+- exit(1);
++
++ if (usage)
++ exit(EXIT_FAILURE);
++
++ exit(EXIT_SUCCESS);
+ }
+
+ /*
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index 1640f121baca50..a84f43857de14d 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -211,6 +211,8 @@ static void timerlat_top_header(struct osnoise_tool *top)
+ trace_seq_printf(s, "\n");
+ }
+
++static const char *no_value = " -";
++
+ /*
+ * timerlat_top_print - prints the output of a given CPU
+ */
+@@ -238,10 +240,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
+ trace_seq_printf(s, "%3d #%-9d |", cpu, cpu_data->irq_count);
+
+ if (!cpu_data->irq_count) {
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " - |");
++ trace_seq_printf(s, "%s %s %s %s |", no_value, no_value, no_value, no_value);
+ } else {
+ trace_seq_printf(s, "%9llu ", cpu_data->cur_irq / params->output_divisor);
+ trace_seq_printf(s, "%9llu ", cpu_data->min_irq / params->output_divisor);
+@@ -250,10 +249,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
+ }
+
+ if (!cpu_data->thread_count) {
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " -\n");
++ trace_seq_printf(s, "%s %s %s %s", no_value, no_value, no_value, no_value);
+ } else {
+ trace_seq_printf(s, "%9llu ", cpu_data->cur_thread / divisor);
+ trace_seq_printf(s, "%9llu ", cpu_data->min_thread / divisor);
+@@ -270,10 +266,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
+ trace_seq_printf(s, " |");
+
+ if (!cpu_data->user_count) {
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " - ");
+- trace_seq_printf(s, " -\n");
++ trace_seq_printf(s, "%s %s %s %s\n", no_value, no_value, no_value, no_value);
+ } else {
+ trace_seq_printf(s, "%9llu ", cpu_data->cur_user / divisor);
+ trace_seq_printf(s, "%9llu ", cpu_data->min_user / divisor);
+@@ -346,7 +339,7 @@ static void timerlat_top_usage(char *usage)
+ " -c/--cpus cpus: run the tracer only on the given cpus",
+ " -H/--house-keeping cpus: run rtla control threads only on the given cpus",
+ " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited",
+- " -d/--duration time[m|h|d]: duration of the session in seconds",
++ " -d/--duration time[s|m|h|d]: duration of the session",
+ " -D/--debug: print debug info",
+ " --dump-tasks: prints the task running on all CPUs if stop conditions are met (depends on !--no-aa)",
+ " -t/--trace[=file]: save the stopped trace to [file|timerlat_trace.txt]",
+@@ -375,7 +368,11 @@ static void timerlat_top_usage(char *usage)
+
+ for (i = 0; msg[i]; i++)
+ fprintf(stderr, "%s\n", msg[i]);
+- exit(1);
++
++ if (usage)
++ exit(EXIT_FAILURE);
++
++ exit(EXIT_SUCCESS);
+ }
+
+ /*
+@@ -488,7 +485,7 @@ static struct timerlat_top_params
+ case 'd':
+ params->duration = parse_seconds_duration(optarg);
+ if (!params->duration)
+- timerlat_top_usage("Invalid -D duration\n");
++ timerlat_top_usage("Invalid -d duration\n");
+ break;
+ case 'e':
+ tevent = trace_event_alloc(optarg);
+diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
+index 623a38908ed5b4..9ac71a66840c1b 100644
+--- a/tools/tracing/rtla/src/utils.c
++++ b/tools/tracing/rtla/src/utils.c
+@@ -238,12 +238,6 @@ static inline int sched_setattr(pid_t pid, const struct sched_attr *attr,
+ return syscall(__NR_sched_setattr, pid, attr, flags);
+ }
+
+-static inline int sched_getattr(pid_t pid, struct sched_attr *attr,
+- unsigned int size, unsigned int flags)
+-{
+- return syscall(__NR_sched_getattr, pid, attr, size, flags);
+-}
+-
+ int __set_sched_attr(int pid, struct sched_attr *attr)
+ {
+ int flags = 0;
+@@ -479,13 +473,13 @@ int parse_prio(char *arg, struct sched_attr *sched_param)
+ if (prio == INVALID_VAL)
+ return -1;
+
+- if (prio < sched_get_priority_min(SCHED_OTHER))
++ if (prio < MIN_NICE)
+ return -1;
+- if (prio > sched_get_priority_max(SCHED_OTHER))
++ if (prio > MAX_NICE)
+ return -1;
+
+ sched_param->sched_policy = SCHED_OTHER;
+- sched_param->sched_priority = prio;
++ sched_param->sched_nice = prio;
+ break;
+ default:
+ return -1;
+@@ -536,9 +530,9 @@ int set_cpu_dma_latency(int32_t latency)
+ */
+ static const int find_mount(const char *fs, char *mp, int sizeof_mp)
+ {
+- char mount_point[MAX_PATH];
++ char mount_point[MAX_PATH+1];
+ char type[100];
+- int found;
++ int found = 0;
+ FILE *fp;
+
+ fp = fopen("/proc/mounts", "r");
+diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h
+index 04ed1e650495a3..d44513e6c66a01 100644
+--- a/tools/tracing/rtla/src/utils.h
++++ b/tools/tracing/rtla/src/utils.h
+@@ -9,6 +9,8 @@
+ */
+ #define BUFF_U64_STR_SIZE 24
+ #define MAX_PATH 1024
++#define MAX_NICE 20
++#define MIN_NICE -19
+
+ #define container_of(ptr, type, member)({ \
+ const typeof(((type *)0)->member) *__mptr = (ptr); \
+diff --git a/tools/verification/rv/Makefile b/tools/verification/rv/Makefile
+index 3d0f3888a58c66..485f8aeddbe033 100644
+--- a/tools/verification/rv/Makefile
++++ b/tools/verification/rv/Makefile
+@@ -28,10 +28,15 @@ FOPTS := -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
+ -fasynchronous-unwind-tables -fstack-clash-protection
+ WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
+
++ifeq ($(CC),clang)
++ FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS))
++ WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS))
++endif
++
+ TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs)
+
+ CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS) -I include
+-LDFLAGS := -ggdb $(EXTRA_LDFLAGS)
++LDFLAGS := -flto=auto -ggdb $(EXTRA_LDFLAGS)
+ LIBS := $$($(PKG_CONFIG) --libs libtracefs)
+
+ SRC := $(wildcard src/*.c)
+diff --git a/tools/verification/rv/src/in_kernel.c b/tools/verification/rv/src/in_kernel.c
+index ad28582bcf2b1c..f04479ecc96c0b 100644
+--- a/tools/verification/rv/src/in_kernel.c
++++ b/tools/verification/rv/src/in_kernel.c
+@@ -210,9 +210,9 @@ static char *ikm_read_reactor(char *monitor_name)
+ static char *ikm_get_current_reactor(char *monitor_name)
+ {
+ char *reactors = ikm_read_reactor(monitor_name);
++ char *curr_reactor = NULL;
+ char *start;
+ char *end;
+- char *curr_reactor;
+
+ if (!reactors)
+ return NULL;
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index e033c79d528e00..28658b9e0d9684 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -87,7 +87,27 @@ static void async_pf_execute(struct work_struct *work)
+ __kvm_vcpu_wake_up(vcpu);
+
+ mmput(mm);
+- kvm_put_kvm(vcpu->kvm);
++}
++
++static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
++{
++ /*
++ * The async #PF is "done", but KVM must wait for the work item itself,
++ * i.e. async_pf_execute(), to run to completion. If KVM is a module,
++ * KVM must ensure *no* code owned by the KVM (the module) can be run
++ * after the last call to module_put(). Note, flushing the work item
++ * is always required when the item is taken off the completion queue.
++ * E.g. even if the vCPU handles the item in the "normal" path, the VM
++ * could be terminated before async_pf_execute() completes.
++ *
++ * Wake all events skip the queue and go straight done, i.e. don't
++ * need to be flushed (but sanity check that the work wasn't queued).
++ */
++ if (work->wakeup_all)
++ WARN_ON_ONCE(work->work.func);
++ else
++ flush_work(&work->work);
++ kmem_cache_free(async_pf_cache, work);
+ }
+
+ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+@@ -114,7 +134,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+ #else
+ if (cancel_work_sync(&work->work)) {
+ mmput(work->mm);
+- kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
+ kmem_cache_free(async_pf_cache, work);
+ }
+ #endif
+@@ -126,7 +145,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+ list_first_entry(&vcpu->async_pf.done,
+ typeof(*work), link);
+ list_del(&work->link);
+- kmem_cache_free(async_pf_cache, work);
++
++ spin_unlock(&vcpu->async_pf.lock);
++ kvm_flush_and_free_async_pf_work(work);
++ spin_lock(&vcpu->async_pf.lock);
+ }
+ spin_unlock(&vcpu->async_pf.lock);
+
+@@ -151,7 +173,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
+
+ list_del(&work->queue);
+ vcpu->async_pf.queued--;
+- kmem_cache_free(async_pf_cache, work);
++ kvm_flush_and_free_async_pf_work(work);
+ }
+ }
+
+@@ -186,7 +208,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ work->arch = *arch;
+ work->mm = current->mm;
+ mmget(work->mm);
+- kvm_get_kvm(work->vcpu->kvm);
+
+ INIT_WORK(&work->work, async_pf_execute);
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 486800a7024b37..44c228bcd699d9 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -3772,12 +3772,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ {
+ struct kvm *kvm = me->kvm;
+ struct kvm_vcpu *vcpu;
+- int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
++ int last_boosted_vcpu;
+ unsigned long i;
+ int yielded = 0;
+ int try = 3;
+ int pass;
+
++ last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
+ kvm_vcpu_set_in_spin_loop(me, true);
+ /*
+ * We boost the priority of a VCPU that is runnable but not
+@@ -3808,7 +3809,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+
+ yielded = kvm_vcpu_yield_to(vcpu);
+ if (yielded > 0) {
+- kvm->last_boosted_vcpu = i;
++ WRITE_ONCE(kvm->last_boosted_vcpu, i);
+ break;
+ } else if (yielded < 0) {
+ try--;
+@@ -5173,6 +5174,7 @@ __visible bool kvm_rebooting;
+ EXPORT_SYMBOL_GPL(kvm_rebooting);
+
+ static DEFINE_PER_CPU(bool, hardware_enabled);
++static DEFINE_MUTEX(kvm_usage_lock);
+ static int kvm_usage_count;
+
+ static int __hardware_enable_nolock(void)
+@@ -5205,10 +5207,10 @@ static int kvm_online_cpu(unsigned int cpu)
+ * be enabled. Otherwise running VMs would encounter unrecoverable
+ * errors when scheduled to this CPU.
+ */
+- mutex_lock(&kvm_lock);
++ mutex_lock(&kvm_usage_lock);
+ if (kvm_usage_count)
+ ret = __hardware_enable_nolock();
+- mutex_unlock(&kvm_lock);
++ mutex_unlock(&kvm_usage_lock);
+ return ret;
+ }
+
+@@ -5228,10 +5230,10 @@ static void hardware_disable_nolock(void *junk)
+
+ static int kvm_offline_cpu(unsigned int cpu)
+ {
+- mutex_lock(&kvm_lock);
++ mutex_lock(&kvm_usage_lock);
+ if (kvm_usage_count)
+ hardware_disable_nolock(NULL);
+- mutex_unlock(&kvm_lock);
++ mutex_unlock(&kvm_usage_lock);
+ return 0;
+ }
+
+@@ -5247,9 +5249,9 @@ static void hardware_disable_all_nolock(void)
+ static void hardware_disable_all(void)
+ {
+ cpus_read_lock();
+- mutex_lock(&kvm_lock);
++ mutex_lock(&kvm_usage_lock);
+ hardware_disable_all_nolock();
+- mutex_unlock(&kvm_lock);
++ mutex_unlock(&kvm_usage_lock);
+ cpus_read_unlock();
+ }
+
+@@ -5280,7 +5282,7 @@ static int hardware_enable_all(void)
+ * enable hardware multiple times.
+ */
+ cpus_read_lock();
+- mutex_lock(&kvm_lock);
++ mutex_lock(&kvm_usage_lock);
+
+ r = 0;
+
+@@ -5294,7 +5296,7 @@ static int hardware_enable_all(void)
+ }
+ }
+
+- mutex_unlock(&kvm_lock);
++ mutex_unlock(&kvm_usage_lock);
+ cpus_read_unlock();
+
+ return r;
+@@ -5322,13 +5324,13 @@ static int kvm_suspend(void)
+ {
+ /*
+ * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
+- * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
+- * is stable. Assert that kvm_lock is not held to ensure the system
+- * isn't suspended while KVM is enabling hardware. Hardware enabling
+- * can be preempted, but the task cannot be frozen until it has dropped
+- * all locks (userspace tasks are frozen via a fake signal).
++ * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
++ * count is stable. Assert that kvm_usage_lock is not held to ensure
++ * the system isn't suspended while KVM is enabling hardware. Hardware
++ * enabling can be preempted, but the task cannot be frozen until it has
++ * dropped all locks (userspace tasks are frozen via a fake signal).
+ */
+- lockdep_assert_not_held(&kvm_lock);
++ lockdep_assert_not_held(&kvm_usage_lock);
+ lockdep_assert_irqs_disabled();
+
+ if (kvm_usage_count)
+@@ -5338,7 +5340,7 @@ static int kvm_suspend(void)
+
+ static void kvm_resume(void)
+ {
+- lockdep_assert_not_held(&kvm_lock);
++ lockdep_assert_not_held(&kvm_usage_lock);
+ lockdep_assert_irqs_disabled();
+
+ if (kvm_usage_count)
diff --git a/system/easy-kernel/0100-linux-6.6.6.patch b/system/easy-kernel/0100-linux-6.6.6.patch
deleted file mode 100644
index 5192da321..000000000
--- a/system/easy-kernel/0100-linux-6.6.6.patch
+++ /dev/null
@@ -1,64421 +0,0 @@
-diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
-index b2ff0012c0f2b..2e24ac3bd7efa 100644
---- a/Documentation/ABI/testing/sysfs-class-led
-+++ b/Documentation/ABI/testing/sysfs-class-led
-@@ -59,15 +59,6 @@ Description:
- brightness. Reading this file when no hw brightness change
- event has happened will return an ENODATA error.
-
--What: /sys/class/leds/<led>/color
--Date: June 2023
--KernelVersion: 6.5
--Description:
-- Color of the LED.
--
-- This is a read-only file. Reading this file returns the color
-- of the LED as a string (e.g: "red", "green", "multicolor").
--
- What: /sys/class/leds/<led>/trigger
- Date: March 2006
- KernelVersion: 2.6.17
-diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat
-index ef6d6c57105ef..96834d103a09e 100644
---- a/Documentation/ABI/testing/sysfs-driver-qat
-+++ b/Documentation/ABI/testing/sysfs-driver-qat
-@@ -29,6 +29,8 @@ Description: (RW) Reports the current configuration of the QAT device.
- services
- * asym;sym: identical to sym;asym
- * dc: the device is configured for running compression services
-+ * dcc: identical to dc but enables the dc chaining feature,
-+ hash then compression. If this is not required chose dc
- * sym: the device is configured for running symmetric crypto
- services
- * asym: the device is configured for running asymmetric crypto
-diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
-index b6cfb51cb0b46..e715bfc09879a 100644
---- a/Documentation/admin-guide/hw-vuln/srso.rst
-+++ b/Documentation/admin-guide/hw-vuln/srso.rst
-@@ -46,12 +46,22 @@ The possible values in this file are:
-
- The processor is not vulnerable
-
-- * 'Vulnerable: no microcode':
-+* 'Vulnerable':
-+
-+ The processor is vulnerable and no mitigations have been applied.
-+
-+ * 'Vulnerable: No microcode':
-
- The processor is vulnerable, no microcode extending IBPB
- functionality to address the vulnerability has been applied.
-
-- * 'Mitigation: microcode':
-+ * 'Vulnerable: Safe RET, no microcode':
-+
-+ The "Safe RET" mitigation (see below) has been applied to protect the
-+ kernel, but the IBPB-extending microcode has not been applied. User
-+ space tasks may still be vulnerable.
-+
-+ * 'Vulnerable: Microcode, no safe RET':
-
- Extended IBPB functionality microcode patch has been applied. It does
- not address User->Kernel and Guest->Host transitions protection but it
-@@ -72,11 +82,11 @@ The possible values in this file are:
-
- (spec_rstack_overflow=microcode)
-
-- * 'Mitigation: safe RET':
-+ * 'Mitigation: Safe RET':
-
-- Software-only mitigation. It complements the extended IBPB microcode
-- patch functionality by addressing User->Kernel and Guest->Host
-- transitions protection.
-+ Combined microcode/software mitigation. It complements the
-+ extended IBPB microcode patch functionality by addressing
-+ User->Kernel and Guest->Host transitions protection.
-
- Selected by default or by spec_rstack_overflow=safe-ret
-
-@@ -129,7 +139,7 @@ an indrect branch prediction barrier after having applied the required
- microcode patch for one's system. This mitigation comes also at
- a performance cost.
-
--Mitigation: safe RET
-+Mitigation: Safe RET
- --------------------
-
- The mitigation works by ensuring all RET instructions speculate to
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 0a1731a0f0ef3..41644336e3587 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -5858,6 +5858,13 @@
- This feature may be more efficiently disabled
- using the csdlock_debug- kernel parameter.
-
-+ smp.panic_on_ipistall= [KNL]
-+ If a csd_lock_timeout extends for more than
-+ the specified number of milliseconds, panic the
-+ system. By default, let CSD-lock acquisition
-+ take as long as they take. Specifying 300,000
-+ for this value provides a 5-minute timeout.
-+
- smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
- smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
- smsc-ircc2.ircc_sir= [HW] SIR base I/O port
-diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
-index 294693a8906cf..10540aa7afa1a 100644
---- a/Documentation/devicetree/bindings/mfd/mt6397.txt
-+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
-@@ -22,8 +22,9 @@ compatible:
- "mediatek,mt6323" for PMIC MT6323
- "mediatek,mt6331" for PMIC MT6331 and MT6332
- "mediatek,mt6357" for PMIC MT6357
-- "mediatek,mt6358" for PMIC MT6358 and MT6366
-+ "mediatek,mt6358" for PMIC MT6358
- "mediatek,mt6359" for PMIC MT6359
-+ "mediatek,mt6366", "mediatek,mt6358" for PMIC MT6366
- "mediatek,mt6397" for PMIC MT6397
-
- Optional subnodes:
-@@ -40,6 +41,7 @@ Optional subnodes:
- - compatible: "mediatek,mt6323-regulator"
- see ../regulator/mt6323-regulator.txt
- - compatible: "mediatek,mt6358-regulator"
-+ - compatible: "mediatek,mt6366-regulator", "mediatek-mt6358-regulator"
- see ../regulator/mt6358-regulator.txt
- - compatible: "mediatek,mt6397-regulator"
- see ../regulator/mt6397-regulator.txt
-diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
-index 029569d5fcf35..24c733c10e0e9 100644
---- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
-+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
-@@ -32,6 +32,27 @@ properties:
-
- vdd3-supply: true
-
-+ qcom,tune-usb2-disc-thres:
-+ $ref: /schemas/types.yaml#/definitions/uint8
-+ description: High-Speed disconnect threshold
-+ minimum: 0
-+ maximum: 7
-+ default: 0
-+
-+ qcom,tune-usb2-amplitude:
-+ $ref: /schemas/types.yaml#/definitions/uint8
-+ description: High-Speed trasmit amplitude
-+ minimum: 0
-+ maximum: 15
-+ default: 8
-+
-+ qcom,tune-usb2-preem:
-+ $ref: /schemas/types.yaml#/definitions/uint8
-+ description: High-Speed TX pre-emphasis tuning
-+ minimum: 0
-+ maximum: 7
-+ default: 5
-+
- required:
- - compatible
- - reg
-diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
-index 303a443d9e29b..9418fd66a8e95 100644
---- a/Documentation/devicetree/bindings/serial/rs485.yaml
-+++ b/Documentation/devicetree/bindings/serial/rs485.yaml
-@@ -29,6 +29,10 @@ properties:
- default: 0
- maximum: 100
-
-+ rs485-rts-active-high:
-+ description: drive RTS high when sending (this is the default).
-+ $ref: /schemas/types.yaml#/definitions/flag
-+
- rs485-rts-active-low:
- description: drive RTS low when sending (default is high).
- $ref: /schemas/types.yaml#/definitions/flag
-diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
-index ea277560a5966..5727bd549deca 100644
---- a/Documentation/devicetree/bindings/serial/serial.yaml
-+++ b/Documentation/devicetree/bindings/serial/serial.yaml
-@@ -96,7 +96,7 @@ then:
- rts-gpios: false
-
- patternProperties:
-- "^bluetooth|gnss|gps|mcu$":
-+ "^(bluetooth|gnss|gps|mcu)$":
- if:
- type: object
- then:
-diff --git a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
-index bffdab0b01859..fbac40b958dde 100644
---- a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
-+++ b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
-@@ -169,27 +169,27 @@ properties:
- - const: tgib0
- - const: tgic0
- - const: tgid0
-- - const: tgiv0
-+ - const: tciv0
- - const: tgie0
- - const: tgif0
- - const: tgia1
- - const: tgib1
-- - const: tgiv1
-- - const: tgiu1
-+ - const: tciv1
-+ - const: tciu1
- - const: tgia2
- - const: tgib2
-- - const: tgiv2
-- - const: tgiu2
-+ - const: tciv2
-+ - const: tciu2
- - const: tgia3
- - const: tgib3
- - const: tgic3
- - const: tgid3
-- - const: tgiv3
-+ - const: tciv3
- - const: tgia4
- - const: tgib4
- - const: tgic4
- - const: tgid4
-- - const: tgiv4
-+ - const: tciv4
- - const: tgiu5
- - const: tgiv5
- - const: tgiw5
-@@ -197,18 +197,18 @@ properties:
- - const: tgib6
- - const: tgic6
- - const: tgid6
-- - const: tgiv6
-+ - const: tciv6
- - const: tgia7
- - const: tgib7
- - const: tgic7
- - const: tgid7
-- - const: tgiv7
-+ - const: tciv7
- - const: tgia8
- - const: tgib8
- - const: tgic8
- - const: tgid8
-- - const: tgiv8
-- - const: tgiu8
-+ - const: tciv8
-+ - const: tciu8
-
- clocks:
- maxItems: 1
-@@ -285,16 +285,16 @@ examples:
- <GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
-- interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tgiv0", "tgie0",
-+ interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tciv0", "tgie0",
- "tgif0",
-- "tgia1", "tgib1", "tgiv1", "tgiu1",
-- "tgia2", "tgib2", "tgiv2", "tgiu2",
-- "tgia3", "tgib3", "tgic3", "tgid3", "tgiv3",
-- "tgia4", "tgib4", "tgic4", "tgid4", "tgiv4",
-+ "tgia1", "tgib1", "tciv1", "tciu1",
-+ "tgia2", "tgib2", "tciv2", "tciu2",
-+ "tgia3", "tgib3", "tgic3", "tgid3", "tciv3",
-+ "tgia4", "tgib4", "tgic4", "tgid4", "tciv4",
- "tgiu5", "tgiv5", "tgiw5",
-- "tgia6", "tgib6", "tgic6", "tgid6", "tgiv6",
-- "tgia7", "tgib7", "tgic7", "tgid7", "tgiv7",
-- "tgia8", "tgib8", "tgic8", "tgid8", "tgiv8", "tgiu8";
-+ "tgia6", "tgib6", "tgic6", "tgid6", "tciv6",
-+ "tgia7", "tgib7", "tgic7", "tgid7", "tciv7",
-+ "tgia8", "tgib8", "tgic8", "tgid8", "tciv8", "tciu8";
- clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
- power-domains = <&cpg>;
- resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;
-diff --git a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
-index ff3a1707ef570..6d4cfd943f584 100644
---- a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
-+++ b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
-@@ -36,7 +36,11 @@ properties:
-
- vdd-supply:
- description:
-- VDD power supply to the hub
-+ 3V3 power supply to the hub
-+
-+ vdd2-supply:
-+ description:
-+ 1V2 power supply to the hub
-
- peer-hub:
- $ref: /schemas/types.yaml#/definitions/phandle
-@@ -62,6 +66,7 @@ allOf:
- properties:
- reset-gpios: false
- vdd-supply: false
-+ vdd2-supply: false
- peer-hub: false
- i2c-bus: false
- else:
-diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
-index e76e68ccf7182..10eced6c2e462 100644
---- a/Documentation/i2c/busses/i2c-i801.rst
-+++ b/Documentation/i2c/busses/i2c-i801.rst
-@@ -47,6 +47,7 @@ Supported adapters:
- * Intel Alder Lake (PCH)
- * Intel Raptor Lake (PCH)
- * Intel Meteor Lake (SOC and PCH)
-+ * Intel Birch Stream (SOC)
-
- Datasheets: Publicly available at the Intel website
-
-diff --git a/Makefile b/Makefile
-index 5c418efbe89b6..1eefa893f048b 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,7 +1,7 @@
- # SPDX-License-Identifier: GPL-2.0
- VERSION = 6
- PATCHLEVEL = 6
--SUBLEVEL = 0
-+SUBLEVEL = 6
- EXTRAVERSION =
- NAME = Hurr durr I'ma ninja sloth
-
-diff --git a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
-index 42bcbf10957c4..9f9084269ef58 100644
---- a/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
-+++ b/arch/arm/boot/dts/broadcom/bcm4708-buffalo-wzr-1166dhp-common.dtsi
-@@ -181,5 +181,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
-index e04d2e5ea51aa..72e960c888ac8 100644
---- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xap-1510.dts
-@@ -85,5 +85,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
-index a399800139d9c..750e17482371c 100644
---- a/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm4708-luxul-xwc-1000.dts
-@@ -88,5 +88,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
-index fad3473810a2e..2bdbc7d18b0eb 100644
---- a/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm4708-netgear-r6250.dts
-@@ -122,5 +122,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
-index 5b2b7b8b3b123..b226bef3369cf 100644
---- a/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm4708-smartrg-sr400ac.dts
-@@ -145,6 +145,14 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-
-diff --git a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
-index d0a26b643b82f..192b8db5a89c3 100644
---- a/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47081-buffalo-wzr-600dhp2.dts
-@@ -145,5 +145,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
-index 9f21d6d6d35b7..0198b5f9e4a75 100644
---- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xap-1410.dts
-@@ -81,5 +81,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
-index 2561072917021..73ff1694a4a0b 100644
---- a/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47081-luxul-xwr-1200.dts
-@@ -148,5 +148,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
-index 707c561703ed8..55fc9f44cbc7f 100644
---- a/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm4709-netgear-r8000.dts
-@@ -227,6 +227,14 @@
- label = "wan";
- };
-
-+ port@5 {
-+ status = "disabled";
-+ };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
- port@8 {
- label = "cpu";
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
-index c914569ddd5ec..e6d26987865d0 100644
---- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-885l.dts
-@@ -144,6 +144,14 @@
- label = "wan";
- };
-
-+ port@5 {
-+ status = "disabled";
-+ };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
- port@8 {
- label = "cpu";
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
-index f050acbea0b20..3124dfd01b944 100644
---- a/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47094-dlink-dir-890l.dts
-@@ -192,6 +192,14 @@
- label = "wan";
- };
-
-+ port@5 {
-+ status = "disabled";
-+ };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
- port@8 {
- label = "cpu";
- phy-mode = "rgmii";
-diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
-index e8991d4e248ce..e374062eb5b76 100644
---- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-abr-4500.dts
-@@ -107,5 +107,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
-index afc635c8cdebb..badafa024d24c 100644
---- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xap-1610.dts
-@@ -120,5 +120,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
-index 7cfa4607ef311..cf95af9db1e66 100644
---- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xbr-4500.dts
-@@ -107,5 +107,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
-index d55e10095eae7..992c19e1cfa17 100644
---- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwc-2000.dts
-@@ -75,5 +75,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
-index ccf031c0e276d..4d0ba315a2049 100644
---- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3100.dts
-@@ -147,5 +147,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
-index e28f7a3501179..83c429afc2974 100644
---- a/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm47094-luxul-xwr-3150-v1.dts
-@@ -158,5 +158,13 @@
- port@5 {
- label = "cpu";
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-diff --git a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
-index 03ad614e6b721..0bf5106f7012c 100644
---- a/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm53015-meraki-mr26.dts
-@@ -124,6 +124,14 @@
- full-duplex;
- };
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-
-diff --git a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
-index 26c12bfb0bdd4..25eeacf6a2484 100644
---- a/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm53016-meraki-mr32.dts
-@@ -185,6 +185,14 @@
- full-duplex;
- };
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-
-diff --git a/arch/arm/boot/dts/broadcom/bcm953012er.dts b/arch/arm/boot/dts/broadcom/bcm953012er.dts
-index 4fe3b36533767..d939ec9f4a9e7 100644
---- a/arch/arm/boot/dts/broadcom/bcm953012er.dts
-+++ b/arch/arm/boot/dts/broadcom/bcm953012er.dts
-@@ -84,6 +84,14 @@
- label = "cpu";
- ethernet = <&gmac0>;
- };
-+
-+ port@7 {
-+ status = "disabled";
-+ };
-+
-+ port@8 {
-+ status = "disabled";
-+ };
- };
- };
-
-diff --git a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
-index 884d99297d4cf..f516e0426bb9e 100644
---- a/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
-+++ b/arch/arm/boot/dts/qcom/qcom-apq8026-samsung-matisse-wifi.dts
-@@ -45,11 +45,11 @@
-
- event-hall-sensor {
- label = "Hall Effect Sensor";
-- gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>;
-- interrupts = <&tlmm 110 IRQ_TYPE_EDGE_FALLING>;
-+ gpios = <&tlmm 110 GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_SW>;
- linux,code = <SW_LID>;
- debounce-interval = <15>;
-+ linux,can-disable;
- wakeup-source;
- };
- };
-diff --git a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
-index fc4f52f9e9f7d..63e21aa236429 100644
---- a/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
-+++ b/arch/arm/boot/dts/qcom/qcom-mdm9615.dtsi
-@@ -47,14 +47,12 @@
- };
- };
-
-- regulators {
-- vsdcc_fixed: vsdcc-regulator {
-- compatible = "regulator-fixed";
-- regulator-name = "SDCC Power";
-- regulator-min-microvolt = <2700000>;
-- regulator-max-microvolt = <2700000>;
-- regulator-always-on;
-- };
-+ vsdcc_fixed: vsdcc-regulator {
-+ compatible = "regulator-fixed";
-+ regulator-name = "SDCC Power";
-+ regulator-min-microvolt = <2700000>;
-+ regulator-max-microvolt = <2700000>;
-+ regulator-always-on;
- };
-
- soc: soc {
-diff --git a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
-index c66de9dd12dfc..6a83923aa4612 100644
---- a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
-+++ b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
-@@ -239,7 +239,7 @@
- };
-
- keyboard_pins: keyboard {
-- pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_02";
-+ pins = "GP_3_10", "GP_3_11", "GP_3_12", "GP_3_15", "GP_11_2";
- bias-pull-up;
- };
-
-diff --git a/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
-index 65480a9f5cc4e..842f2b17c4a81 100644
---- a/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
-+++ b/arch/arm/boot/dts/st/stm32f7-pinctrl.dtsi
-@@ -376,7 +376,6 @@
- };
- };
-
--
- ltdc_pins_a: ltdc-0 {
- pins {
- pinmux = <STM32_PINMUX('E', 4, AF14)>, /* LCD_B0 */
-diff --git a/arch/arm/boot/dts/ti/omap/am3517-evm.dts b/arch/arm/boot/dts/ti/omap/am3517-evm.dts
-index af9df15274bed..866f68c5b504d 100644
---- a/arch/arm/boot/dts/ti/omap/am3517-evm.dts
-+++ b/arch/arm/boot/dts/ti/omap/am3517-evm.dts
-@@ -271,13 +271,6 @@
- >;
- };
-
-- leds_pins: leds-pins {
-- pinctrl-single,pins = <
-- OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */
-- OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */
-- >;
-- };
--
- mmc1_pins: mmc1-pins {
- pinctrl-single,pins = <
- OMAP3_CORE1_IOPAD(0x2144, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
-@@ -355,3 +348,12 @@
- >;
- };
- };
-+
-+&omap3_pmx_wkup {
-+ leds_pins: leds-pins {
-+ pinctrl-single,pins = <
-+ OMAP3_WKUP_IOPAD(0x2a24, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu0.gpio_11 */
-+ OMAP3_WKUP_IOPAD(0x2a26, PIN_OUTPUT_PULLUP | MUX_MODE4) /* jtag_emu1.gpio_31 */
-+ >;
-+ };
-+};
-diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
-index 72529f5e2bed9..a41b503b7dcde 100644
---- a/arch/arm/include/asm/arm_pmuv3.h
-+++ b/arch/arm/include/asm/arm_pmuv3.h
-@@ -23,6 +23,8 @@
- #define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
- #define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
- #define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
-+#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
-+#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
- #define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
- #define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
-
-@@ -150,21 +152,6 @@ static inline u64 read_pmccntr(void)
- return read_sysreg(PMCCNTR);
- }
-
--static inline void write_pmxevcntr(u32 val)
--{
-- write_sysreg(val, PMXEVCNTR);
--}
--
--static inline u32 read_pmxevcntr(void)
--{
-- return read_sysreg(PMXEVCNTR);
--}
--
--static inline void write_pmxevtyper(u32 val)
--{
-- write_sysreg(val, PMXEVTYPER);
--}
--
- static inline void write_pmcntenset(u32 val)
- {
- write_sysreg(val, PMCNTENSET);
-@@ -205,16 +192,6 @@ static inline void write_pmuserenr(u32 val)
- write_sysreg(val, PMUSERENR);
- }
-
--static inline u32 read_pmceid0(void)
--{
-- return read_sysreg(PMCEID0);
--}
--
--static inline u32 read_pmceid1(void)
--{
-- return read_sysreg(PMCEID1);
--}
--
- static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
- static inline void kvm_clr_pmu_events(u32 clr) {}
- static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
-@@ -231,6 +208,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
-
- /* PMU Version in DFR Register */
- #define ARMV8_PMU_DFR_VER_NI 0
-+#define ARMV8_PMU_DFR_VER_V3P1 0x4
- #define ARMV8_PMU_DFR_VER_V3P4 0x5
- #define ARMV8_PMU_DFR_VER_V3P5 0x6
- #define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
-@@ -251,4 +229,24 @@ static inline bool is_pmuv3p5(int pmuver)
- return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
- }
-
-+static inline u64 read_pmceid0(void)
-+{
-+ u64 val = read_sysreg(PMCEID0);
-+
-+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
-+ val |= (u64)read_sysreg(PMCEID2) << 32;
-+
-+ return val;
-+}
-+
-+static inline u64 read_pmceid1(void)
-+{
-+ u64 val = read_sysreg(PMCEID1);
-+
-+ if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
-+ val |= (u64)read_sysreg(PMCEID3) << 32;
-+
-+ return val;
-+}
-+
- #endif
-diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
-index c6aded1b069cf..e2a1916013e75 100644
---- a/arch/arm/include/asm/dma.h
-+++ b/arch/arm/include/asm/dma.h
-@@ -12,6 +12,9 @@
- extern phys_addr_t arm_dma_zone_size; \
- arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
- (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
-+
-+extern phys_addr_t arm_dma_limit;
-+#define ARCH_LOW_ADDRESS_LIMIT arm_dma_limit
- #endif
-
- #ifdef CONFIG_ISA_DMA_API
-diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
-index 58e039a851af0..3c82975d46db3 100644
---- a/arch/arm/include/asm/exception.h
-+++ b/arch/arm/include/asm/exception.h
-@@ -10,10 +10,6 @@
-
- #include <linux/interrupt.h>
-
--#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- #define __exception_irq_entry __irq_entry
--#else
--#define __exception_irq_entry
--#endif
-
- #endif /* __ASM_ARM_EXCEPTION_H */
-diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
-index d71ab61430b26..de75ae4d5ab41 100644
---- a/arch/arm/lib/memset.S
-+++ b/arch/arm/lib/memset.S
-@@ -17,6 +17,7 @@ ENTRY(__memset)
- ENTRY(mmioset)
- WEAK(memset)
- UNWIND( .fnstart )
-+ and r1, r1, #255 @ cast to unsigned char
- ands r3, r0, #3 @ 1 unaligned?
- mov ip, r0 @ preserve r0 as return value
- bne 6f @ 1
-diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
-index c392e18f1e431..a395b6c0aae2a 100644
---- a/arch/arm/xen/enlighten.c
-+++ b/arch/arm/xen/enlighten.c
-@@ -164,9 +164,6 @@ static int xen_starting_cpu(unsigned int cpu)
- BUG_ON(err);
- per_cpu(xen_vcpu, cpu) = vcpup;
-
-- if (!xen_kernel_unmapped_at_usr())
-- xen_setup_runstate_info(cpu);
--
- after_register_vcpu_info:
- enable_percpu_irq(xen_events_irq, 0);
- return 0;
-@@ -487,7 +484,8 @@ static int __init xen_guest_init(void)
- * for secondary CPUs as they are brought up.
- * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
- */
-- xen_vcpu_info = alloc_percpu(struct vcpu_info);
-+ xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
-+ 1 << fls(sizeof(struct vcpu_info) - 1));
- if (xen_vcpu_info == NULL)
- return -ENOMEM;
-
-@@ -523,9 +521,6 @@ static int __init xen_guest_init(void)
- return -EINVAL;
- }
-
-- if (!xen_kernel_unmapped_at_usr())
-- xen_time_setup_guest();
--
- if (xen_initial_domain())
- pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
-
-@@ -535,7 +530,13 @@ static int __init xen_guest_init(void)
- }
- early_initcall(xen_guest_init);
-
--static int __init xen_pm_init(void)
-+static int xen_starting_runstate_cpu(unsigned int cpu)
-+{
-+ xen_setup_runstate_info(cpu);
-+ return 0;
-+}
-+
-+static int __init xen_late_init(void)
- {
- if (!xen_domain())
- return -ENODEV;
-@@ -548,9 +549,16 @@ static int __init xen_pm_init(void)
- do_settimeofday64(&ts);
- }
-
-- return 0;
-+ if (xen_kernel_unmapped_at_usr())
-+ return 0;
-+
-+ xen_time_setup_guest();
-+
-+ return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
-+ "arm/xen_runstate:starting",
-+ xen_starting_runstate_cpu, NULL);
- }
--late_initcall(xen_pm_init);
-+late_initcall(xen_late_init);
-
-
- /* empty stubs */
-diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 78f20e6327120..6062a52a084ff 100644
---- a/arch/arm64/Kconfig
-+++ b/arch/arm64/Kconfig
-@@ -1368,6 +1368,8 @@ choice
- config CPU_BIG_ENDIAN
- bool "Build big-endian kernel"
- depends on !LD_IS_LLD || LLD_VERSION >= 130000
-+ # https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
-+ depends on AS_IS_GNU || AS_VERSION >= 150000
- help
- Say Y if you plan on running a kernel with a big-endian userspace.
-
-diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
-index d2f5345d05600..717288bbdb8b6 100644
---- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
-@@ -1186,26 +1186,34 @@
- dma-coherent;
- };
-
-- usb0: usb@3100000 {
-- status = "disabled";
-- compatible = "snps,dwc3";
-- reg = <0x0 0x3100000 0x0 0x10000>;
-- interrupts = <0 80 0x4>; /* Level high type */
-- dr_mode = "host";
-- snps,quirk-frame-length-adjustment = <0x20>;
-- snps,dis_rxdet_inp3_quirk;
-- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-- };
-+ bus: bus {
-+ #address-cells = <2>;
-+ #size-cells = <2>;
-+ compatible = "simple-bus";
-+ ranges;
-+ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
-+
-+ usb0: usb@3100000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3100000 0x0 0x10000>;
-+ interrupts = <0 80 0x4>; /* Level high type */
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ status = "disabled";
-+ };
-
-- usb1: usb@3110000 {
-- status = "disabled";
-- compatible = "snps,dwc3";
-- reg = <0x0 0x3110000 0x0 0x10000>;
-- interrupts = <0 81 0x4>; /* Level high type */
-- dr_mode = "host";
-- snps,quirk-frame-length-adjustment = <0x20>;
-- snps,dis_rxdet_inp3_quirk;
-- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ usb1: usb@3110000 {
-+ compatible = "snps,dwc3";
-+ reg = <0x0 0x3110000 0x0 0x10000>;
-+ interrupts = <0 81 0x4>; /* Level high type */
-+ dr_mode = "host";
-+ snps,quirk-frame-length-adjustment = <0x20>;
-+ snps,dis_rxdet_inp3_quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
-+ status = "disabled";
-+ };
- };
-
- ccn@4000000 {
-diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
-index 236fe44f779df..738024baaa578 100644
---- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
-+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
-@@ -399,6 +399,7 @@
- "pll8k", "pll11k", "clkext3";
- dmas = <&sdma2 24 25 0x80000000>;
- dma-names = "rx";
-+ #sound-dai-cells = <0>;
- status = "disabled";
- };
-
-diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
-index aa38dd6dc9ba5..1bb1d0c1bae4d 100644
---- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
-+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
-@@ -371,6 +371,7 @@
- "pll8k", "pll11k", "clkext3";
- dmas = <&sdma2 24 25 0x80000000>;
- dma-names = "rx";
-+ #sound-dai-cells = <0>;
- status = "disabled";
- };
-
-diff --git a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
-index 28db9349ed62c..267ceffc02d84 100644
---- a/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
-+++ b/arch/arm64/boot/dts/freescale/imx8mp-debix-model-a.dts
-@@ -284,7 +284,6 @@
- usb_hub_2_x: hub@1 {
- compatible = "usbbda,5411";
- reg = <1>;
-- reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
- vdd-supply = <&reg_usb_hub>;
- peer-hub = <&usb_hub_3_x>;
- };
-@@ -293,7 +292,6 @@
- usb_hub_3_x: hub@2 {
- compatible = "usbbda,411";
- reg = <2>;
-- reset-gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
- vdd-supply = <&reg_usb_hub>;
- peer-hub = <&usb_hub_2_x>;
- };
-@@ -443,7 +441,6 @@
- pinctrl_usb1: usb1grp {
- fsl,pins = <
- MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR 0x10
-- MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25 0x19
- >;
- };
-
-diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
-index 7764b4146e0ab..2bbdacb1313f9 100644
---- a/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
-+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-img.dtsi
-@@ -8,5 +8,5 @@
- };
-
- &jpegenc {
-- compatible = "nxp,imx8qm-jpgdec", "nxp,imx8qxp-jpgenc";
-+ compatible = "nxp,imx8qm-jpgenc", "nxp,imx8qxp-jpgenc";
- };
-diff --git a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
-index 32cfb3e2efc3a..47d45ff3d6f57 100644
---- a/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
-+++ b/arch/arm64/boot/dts/marvell/cn9130-crb.dtsi
-@@ -120,7 +120,7 @@
- "mpp59", "mpp60", "mpp61";
- marvell,function = "sdio";
- };
-- cp0_spi0_pins: cp0-spi-pins-0 {
-+ cp0_spi1_pins: cp0-spi-pins-1 {
- marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
- marvell,function = "spi1";
- };
-@@ -170,7 +170,7 @@
-
- &cp0_spi1 {
- pinctrl-names = "default";
-- pinctrl-0 = <&cp0_spi0_pins>;
-+ pinctrl-0 = <&cp0_spi1_pins>;
- reg = <0x700680 0x50>, /* control */
- <0x2000000 0x1000000>; /* CS0 */
- status = "okay";
-diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
-index c7de1ea0d470a..6eb6a175de38d 100644
---- a/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
-+++ b/arch/arm64/boot/dts/marvell/cn9130-db.dtsi
-@@ -307,7 +307,7 @@
- &cp0_spi1 {
- status = "disabled";
- pinctrl-names = "default";
-- pinctrl-0 = <&cp0_spi0_pins>;
-+ pinctrl-0 = <&cp0_spi1_pins>;
- reg = <0x700680 0x50>;
-
- flash@0 {
-@@ -371,7 +371,7 @@
- "mpp59", "mpp60", "mpp61";
- marvell,function = "sdio";
- };
-- cp0_spi0_pins: cp0-spi-pins-0 {
-+ cp0_spi1_pins: cp0-spi-pins-1 {
- marvell,pins = "mpp13", "mpp14", "mpp15", "mpp16";
- marvell,function = "spi1";
- };
-diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
-index 5f592f1d81e2e..fe08e131b7b9e 100644
---- a/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
-+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3767.dtsi
-@@ -28,7 +28,7 @@
- flash@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
-- spi-max-frequency = <136000000>;
-+ spi-max-frequency = <102000000>;
- spi-tx-bus-width = <4>;
- spi-rx-bus-width = <4>;
- };
-@@ -42,7 +42,7 @@
- mmc@3400000 {
- status = "okay";
- bus-width = <4>;
-- cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_HIGH>;
-+ cd-gpios = <&gpio TEGRA234_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>;
- disable-wp;
- };
-
-diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
-index 95524e5bce826..ac69eacf8a6ba 100644
---- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
-+++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
-@@ -43,12 +43,12 @@
- <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
-- <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
-+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
- status = "okay";
- };
-
-diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
-index 4f5541e9be0e9..dabe9f42a63ad 100644
---- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
-+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
-@@ -172,6 +172,9 @@
- pd-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>;
-
- avdd-supply = <&pm8916_l6>;
-+ a2vdd-supply = <&pm8916_l6>;
-+ dvdd-supply = <&pm8916_l6>;
-+ pvdd-supply = <&pm8916_l6>;
- v1p2-supply = <&pm8916_l6>;
- v3p3-supply = <&pm8916_l17>;
-
-diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
-index 8bfc2db44624a..e40c55adff23d 100644
---- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
-@@ -135,7 +135,7 @@
- reg = <0x0 0x4a800000 0x0 0x100000>;
- no-map;
-
-- hwlocks = <&tcsr_mutex 0>;
-+ hwlocks = <&tcsr_mutex 3>;
- };
- };
-
-diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
-index 47b8b1d6730ac..264845cecf925 100644
---- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
-@@ -211,7 +211,7 @@
- smem {
- compatible = "qcom,smem";
- memory-region = <&smem_region>;
-- hwlocks = <&tcsr_mutex 0>;
-+ hwlocks = <&tcsr_mutex 3>;
- };
-
- soc: soc@0 {
-@@ -393,7 +393,7 @@
-
- tcsr_mutex: hwlock@1905000 {
- compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
-- reg = <0x0 0x01905000 0x0 0x1000>;
-+ reg = <0x0 0x01905000 0x0 0x20000>;
- #hwlock-cells = <1>;
- };
-
-diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
-index 00ed71936b472..92fd924bbdbe5 100644
---- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
-@@ -101,7 +101,7 @@
- reg = <0x0 0x4ab00000 0x0 0x100000>;
- no-map;
-
-- hwlocks = <&tcsr_mutex 0>;
-+ hwlocks = <&tcsr_mutex 3>;
- };
-
- memory@4ac00000 {
-diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-index 51aba071c1eb3..8a72ad4afd032 100644
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -195,7 +195,7 @@
- smem@4aa00000 {
- compatible = "qcom,smem";
- reg = <0x0 0x4aa00000 0x0 0x100000>;
-- hwlocks = <&tcsr_mutex 0>;
-+ hwlocks = <&tcsr_mutex 3>;
- no-map;
- };
- };
-diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
-index 33fb65d731046..3c934363368c3 100644
---- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
-+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
-@@ -1813,7 +1813,7 @@
- #size-cells = <1>;
- #iommu-cells = <1>;
- compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
-- ranges = <0 0x01e20000 0x40000>;
-+ ranges = <0 0x01e20000 0x20000>;
- reg = <0x01ef0000 0x3000>;
- clocks = <&gcc GCC_SMMU_CFG_CLK>,
- <&gcc GCC_APSS_TCU_CLK>;
-diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
-index 6e24f0f2374fe..5a6b1942cfaa5 100644
---- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
-+++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
-@@ -1447,7 +1447,7 @@
- apps_iommu: iommu@1ef0000 {
- compatible = "qcom,msm8916-iommu", "qcom,msm-iommu-v1";
- reg = <0x01ef0000 0x3000>;
-- ranges = <0 0x01e20000 0x40000>;
-+ ranges = <0 0x01e20000 0x20000>;
- clocks = <&gcc GCC_SMMU_CFG_CLK>,
- <&gcc GCC_APSS_TCU_CLK>;
- clock-names = "iface", "bus";
-diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
-index f9f5afbcc52bb..4c5be22b47fee 100644
---- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
-+++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
-@@ -379,7 +379,7 @@
- smp2p-modem {
- compatible = "qcom,smp2p";
- interrupts = <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>;
-- qcom,ipc = <&apcs 8 13>;
-+ qcom,ipc = <&apcs 8 14>;
-
- qcom,local-pid = <0>;
- qcom,remote-pid = <1>;
-@@ -402,7 +402,7 @@
- smp2p-wcnss {
- compatible = "qcom,smp2p";
- interrupts = <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>;
-- qcom,ipc = <&apcs 8 17>;
-+ qcom,ipc = <&apcs 8 18>;
-
- qcom,local-pid = <0>;
- qcom,remote-pid = <4>;
-@@ -428,9 +428,9 @@
- #address-cells = <1>;
- #size-cells = <0>;
-
-- qcom,ipc-1 = <&apcs 8 12>;
-+ qcom,ipc-1 = <&apcs 8 13>;
- qcom,ipc-2 = <&apcs 8 9>;
-- qcom,ipc-3 = <&apcs 8 18>;
-+ qcom,ipc-3 = <&apcs 8 19>;
-
- apps_smsm: apps@0 {
- reg = <0>;
-diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
-index fcca1ba94da69..5fe5de9ceef99 100644
---- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
-+++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
-@@ -109,11 +109,6 @@
- qcom,client-id = <1>;
- };
-
-- audio_mem: audio@cb400000 {
-- reg = <0 0xcb000000 0 0x400000>;
-- no-mem;
-- };
--
- qseecom_mem: qseecom@cb400000 {
- reg = <0 0xcb400000 0 0x1c00000>;
- no-mem;
-diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
-index eadba066972e8..0f7c591878962 100644
---- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
-+++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
-@@ -13,7 +13,7 @@
- compatible = "qcom,qrb2210-rb1", "qcom,qrb2210", "qcom,qcm2290";
-
- aliases {
-- serial0 = &uart0;
-+ serial0 = &uart4;
- sdhc1 = &sdhc_1;
- sdhc2 = &sdhc_2;
- };
-@@ -150,15 +150,15 @@
-
- pm2250_s3: s3 {
- /* 0.4V-1.6625V -> 1.3V (Power tree requirements) */
-- regulator-min-microvolts = <1350000>;
-- regulator-max-microvolts = <1350000>;
-+ regulator-min-microvolt = <1352000>;
-+ regulator-max-microvolt = <1352000>;
- regulator-boot-on;
- };
-
- pm2250_s4: s4 {
- /* 1.2V-2.35V -> 2.05V (Power tree requirements) */
-- regulator-min-microvolts = <2072000>;
-- regulator-max-microvolts = <2072000>;
-+ regulator-min-microvolt = <2072000>;
-+ regulator-max-microvolt = <2072000>;
- regulator-boot-on;
- };
-
-@@ -166,47 +166,47 @@
-
- pm2250_l2: l2 {
- /* LPDDR4X VDD2 */
-- regulator-min-microvolts = <1136000>;
-- regulator-max-microvolts = <1136000>;
-+ regulator-min-microvolt = <1136000>;
-+ regulator-max-microvolt = <1136000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- pm2250_l3: l3 {
- /* LPDDR4X VDDQ */
-- regulator-min-microvolts = <616000>;
-- regulator-max-microvolts = <616000>;
-+ regulator-min-microvolt = <616000>;
-+ regulator-max-microvolt = <616000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- pm2250_l4: l4 {
-- /* max = 3.05V -> max = just below 3V (SDHCI2) */
-- regulator-min-microvolts = <1648000>;
-- regulator-max-microvolts = <2992000>;
-+ /* max = 3.05V -> max = 2.7 to disable 3V signaling (SDHCI2) */
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <2700000>;
- regulator-allow-set-load;
- };
-
- pm2250_l5: l5 {
- /* CSI/DSI */
-- regulator-min-microvolts = <1232000>;
-- regulator-max-microvolts = <1232000>;
-+ regulator-min-microvolt = <1232000>;
-+ regulator-max-microvolt = <1232000>;
- regulator-allow-set-load;
- regulator-boot-on;
- };
-
- pm2250_l6: l6 {
- /* DRAM PLL */
-- regulator-min-microvolts = <928000>;
-- regulator-max-microvolts = <928000>;
-+ regulator-min-microvolt = <928000>;
-+ regulator-max-microvolt = <928000>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- pm2250_l7: l7 {
- /* Wi-Fi CX/MX */
-- regulator-min-microvolts = <664000>;
-- regulator-max-microvolts = <664000>;
-+ regulator-min-microvolt = <664000>;
-+ regulator-max-microvolt = <664000>;
- };
-
- /*
-@@ -216,37 +216,37 @@
-
- pm2250_l10: l10 {
- /* Wi-Fi RFA */
-- regulator-min-microvolts = <1300000>;
-- regulator-max-microvolts = <1300000>;
-+ regulator-min-microvolt = <1304000>;
-+ regulator-max-microvolt = <1304000>;
- };
-
- pm2250_l11: l11 {
- /* GPS RF1 */
-- regulator-min-microvolts = <1000000>;
-- regulator-max-microvolts = <1000000>;
-+ regulator-min-microvolt = <1000000>;
-+ regulator-max-microvolt = <1000000>;
- regulator-boot-on;
- };
-
- pm2250_l12: l12 {
- /* USB PHYs */
-- regulator-min-microvolts = <928000>;
-- regulator-max-microvolts = <928000>;
-+ regulator-min-microvolt = <928000>;
-+ regulator-max-microvolt = <928000>;
- regulator-allow-set-load;
- regulator-boot-on;
- };
-
- pm2250_l13: l13 {
- /* USB/QFPROM/PLLs */
-- regulator-min-microvolts = <1800000>;
-- regulator-max-microvolts = <1800000>;
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
- regulator-allow-set-load;
- regulator-boot-on;
- };
-
- pm2250_l14: l14 {
- /* SDHCI1 VQMMC */
-- regulator-min-microvolts = <1800000>;
-- regulator-max-microvolts = <1800000>;
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
- regulator-allow-set-load;
- /* Broken hardware, never turn it off! */
- regulator-always-on;
-@@ -254,8 +254,8 @@
-
- pm2250_l15: l15 {
- /* WCD/DSI/BT VDDIO */
-- regulator-min-microvolts = <1800000>;
-- regulator-max-microvolts = <1800000>;
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
- regulator-allow-set-load;
- regulator-always-on;
- regulator-boot-on;
-@@ -263,47 +263,47 @@
-
- pm2250_l16: l16 {
- /* GPS RF2 */
-- regulator-min-microvolts = <1800000>;
-- regulator-max-microvolts = <1800000>;
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- };
-
- pm2250_l17: l17 {
-- regulator-min-microvolts = <3000000>;
-- regulator-max-microvolts = <3000000>;
-+ regulator-min-microvolt = <3000000>;
-+ regulator-max-microvolt = <3000000>;
- };
-
- pm2250_l18: l18 {
- /* VDD_PXn */
-- regulator-min-microvolts = <1800000>;
-- regulator-max-microvolts = <1800000>;
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
- };
-
- pm2250_l19: l19 {
- /* VDD_PXn */
-- regulator-min-microvolts = <1800000>;
-- regulator-max-microvolts = <1800000>;
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
- };
-
- pm2250_l20: l20 {
- /* SDHCI1 VMMC */
-- regulator-min-microvolts = <2856000>;
-- regulator-max-microvolts = <2856000>;
-+ regulator-min-microvolt = <2400000>;
-+ regulator-max-microvolt = <3600000>;
- regulator-allow-set-load;
- };
-
- pm2250_l21: l21 {
- /* SDHCI2 VMMC */
-- regulator-min-microvolts = <2960000>;
-- regulator-max-microvolts = <3300000>;
-+ regulator-min-microvolt = <2960000>;
-+ regulator-max-microvolt = <3300000>;
- regulator-allow-set-load;
- regulator-boot-on;
- };
-
- pm2250_l22: l22 {
- /* Wi-Fi */
-- regulator-min-microvolts = <3312000>;
-- regulator-max-microvolts = <3312000>;
-+ regulator-min-microvolt = <3312000>;
-+ regulator-max-microvolt = <3312000>;
- };
- };
- };
-@@ -357,7 +357,7 @@
- };
-
- /* UART connected to the Micro-USB port via a FTDI chip */
--&uart0 {
-+&uart4 {
- compatible = "qcom,geni-debug-uart";
- status = "okay";
- };
-diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
-index 925428a5f6aea..91bb58c6b1a61 100644
---- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
-+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
-@@ -649,18 +649,6 @@
- };
- };
-
-- eud_typec: connector {
-- compatible = "usb-c-connector";
--
-- ports {
-- port@0 {
-- con_eud: endpoint {
-- remote-endpoint = <&eud_con>;
-- };
-- };
-- };
-- };
--
- memory@80000000 {
- device_type = "memory";
- /* We expect the bootloader to fill in the size */
-@@ -869,7 +857,8 @@
- clocks = <&rpmhcc RPMH_CXO_CLK>,
- <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>,
- <0>, <&pcie1_lane>,
-- <0>, <0>, <0>, <0>;
-+ <0>, <0>, <0>,
-+ <&usb_1_ssphy>;
- clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk",
- "pcie_0_pipe_clk", "pcie_1_pipe_clk",
- "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk",
-@@ -3624,6 +3613,8 @@
- <0 0x88e2000 0 0x1000>;
- interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
-
-+ status = "disabled";
-+
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-@@ -3634,13 +3625,6 @@
- remote-endpoint = <&usb2_role_switch>;
- };
- };
--
-- port@1 {
-- reg = <1>;
-- eud_con: endpoint {
-- remote-endpoint = <&con_eud>;
-- };
-- };
- };
- };
-
-@@ -5363,6 +5347,14 @@
- reg = <0 0x18591000 0 0x1000>,
- <0 0x18592000 0 0x1000>,
- <0 0x18593000 0 0x1000>;
-+
-+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "dcvsh-irq-0",
-+ "dcvsh-irq-1",
-+ "dcvsh-irq-2";
-+
- clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
- clock-names = "xo", "alternate";
- #freq-domain-cells = <1>;
-diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
-index 84cd2e39266fe..ba2043d67370a 100644
---- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
-+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
-@@ -1328,7 +1328,8 @@
- compatible = "qcom,sdm670-pdc", "qcom,pdc";
- reg = <0 0x0b220000 0 0x30000>;
- qcom,pdc-ranges = <0 480 40>, <41 521 7>, <49 529 4>,
-- <54 534 24>, <79 559 30>, <115 630 7>;
-+ <54 534 24>, <79 559 15>, <94 609 15>,
-+ <115 630 7>;
- #interrupt-cells = <2>;
- interrupt-parent = <&intc>;
- interrupt-controller;
-diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
-index f86e7acdfd99f..0ab5e8f53ac9f 100644
---- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
-+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
-@@ -143,16 +143,20 @@
- };
- };
-
-+&cpufreq_hw {
-+ /delete-property/ interrupts-extended; /* reference to lmh_cluster[01] */
-+};
-+
- &psci {
-- /delete-node/ cpu0;
-- /delete-node/ cpu1;
-- /delete-node/ cpu2;
-- /delete-node/ cpu3;
-- /delete-node/ cpu4;
-- /delete-node/ cpu5;
-- /delete-node/ cpu6;
-- /delete-node/ cpu7;
-- /delete-node/ cpu-cluster0;
-+ /delete-node/ power-domain-cpu0;
-+ /delete-node/ power-domain-cpu1;
-+ /delete-node/ power-domain-cpu2;
-+ /delete-node/ power-domain-cpu3;
-+ /delete-node/ power-domain-cpu4;
-+ /delete-node/ power-domain-cpu5;
-+ /delete-node/ power-domain-cpu6;
-+ /delete-node/ power-domain-cpu7;
-+ /delete-node/ power-domain-cluster;
- };
-
- &cpus {
-@@ -275,6 +279,14 @@
- &CLUSTER_SLEEP_0>;
- };
-
-+&lmh_cluster0 {
-+ status = "disabled";
-+};
-+
-+&lmh_cluster1 {
-+ status = "disabled";
-+};
-+
- /*
- * Reserved memory changes
- *
-@@ -338,6 +350,8 @@
-
-
- &apps_rsc {
-+ /delete-property/ power-domains;
-+
- regulators-0 {
- compatible = "qcom,pm8998-rpmh-regulators";
- qcom,pmic-id = "a";
-diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
-index b3c27a5247429..1516113391edc 100644
---- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
-+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
-@@ -716,6 +716,8 @@
- vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
- vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
- vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
-+
-+ qcom,snoc-host-cap-8bit-quirk;
- };
-
- /* PINCTRL - additions to nodes defined in sdm845.dtsi */
-diff --git a/arch/arm64/boot/dts/qcom/sdx75-idp.dts b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
-index 10d15871f2c48..a14e0650c4a8a 100644
---- a/arch/arm64/boot/dts/qcom/sdx75-idp.dts
-+++ b/arch/arm64/boot/dts/qcom/sdx75-idp.dts
-@@ -44,7 +44,7 @@
- };
-
- &apps_rsc {
-- pmx75-rpmh-regulators {
-+ regulators-0 {
- compatible = "qcom,pmx75-rpmh-regulators";
- qcom,pmic-id = "b";
-
-diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
-index d7c1a40617c64..197f8fed19a29 100644
---- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
-+++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
-@@ -1208,7 +1208,7 @@
-
- apps_smmu: iommu@c600000 {
- compatible = "qcom,sm6125-smmu-500", "qcom,smmu-500", "arm,mmu-500";
-- reg = <0xc600000 0x80000>;
-+ reg = <0x0c600000 0x80000>;
- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
-diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
-index 06c53000bb74d..19c6003dca153 100644
---- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
-+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
-@@ -1893,8 +1893,12 @@
- ranges;
- clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
- <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
-+ <&gcc GCC_PCIE_0_CLKREF_CLK>,
- <&gcc GCC_PCIE0_PHY_REFGEN_CLK>;
-- clock-names = "aux", "cfg_ahb", "refgen";
-+ clock-names = "aux",
-+ "cfg_ahb",
-+ "ref",
-+ "refgen";
-
- resets = <&gcc GCC_PCIE_0_PHY_BCR>;
- reset-names = "phy";
-@@ -1991,8 +1995,12 @@
- ranges;
- clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
- <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
-+ <&gcc GCC_PCIE_1_CLKREF_CLK>,
- <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
-- clock-names = "aux", "cfg_ahb", "refgen";
-+ clock-names = "aux",
-+ "cfg_ahb",
-+ "ref",
-+ "refgen";
-
- resets = <&gcc GCC_PCIE_1_PHY_BCR>;
- reset-names = "phy";
-diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
-index 00604bf7724f4..a94e069da83d5 100644
---- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
-+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
-@@ -2964,7 +2964,7 @@
- };
-
- qup_uart18_default: qup-uart18-default-state {
-- pins = "gpio58", "gpio59";
-+ pins = "gpio68", "gpio69";
- function = "qup18";
- drive-strength = <2>;
- bias-disable;
-diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
-index 0bd80e5157544..97af4f9128285 100644
---- a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
-+++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
-@@ -137,6 +137,18 @@
- vin-supply = <&vcc5v0_sys>;
- };
-
-+ vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator {
-+ compatible = "regulator-fixed";
-+ enable-active-high;
-+ gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
-+ pinctrl-names = "default";
-+ pinctrl-0 = <&pcie_m2_1_pwren>;
-+ regulator-name = "vcc3v3_pcie2x1l0";
-+ regulator-min-microvolt = <3300000>;
-+ regulator-max-microvolt = <3300000>;
-+ vin-supply = <&vcc5v0_sys>;
-+ };
-+
- vcc3v3_pcie30: vcc3v3-pcie30-regulator {
- compatible = "regulator-fixed";
- enable-active-high;
-@@ -421,6 +433,14 @@
- status = "okay";
- };
-
-+&pcie2x1l1 {
-+ reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
-+ vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
-+ pinctrl-names = "default";
-+ pinctrl-0 = <&pcie2_1_rst>;
-+ status = "okay";
-+};
-+
- &pcie2x1l2 {
- reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>;
- vpcie3v3-supply = <&vcc_3v3_pcie20>;
-@@ -467,6 +487,10 @@
- rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-
-+ pcie2_1_rst: pcie2-1-rst {
-+ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
-+ };
-+
- pcie2_2_rst: pcie2-2-rst {
- rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-@@ -474,6 +498,10 @@
- pcie_m2_0_pwren: pcie-m20-pwren {
- rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
- };
-+
-+ pcie_m2_1_pwren: pcie-m21-pwren {
-+ rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
-+ };
- };
-
- usb {
-diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
-index e7b8e2e7f083d..8bd5acc6d6835 100644
---- a/arch/arm64/boot/dts/ti/Makefile
-+++ b/arch/arm64/boot/dts/ti/Makefile
-@@ -9,6 +9,8 @@
- # alphabetically.
-
- # Boards with AM62x SoC
-+k3-am625-sk-hdmi-audio-dtbs := k3-am625-sk.dtb k3-am62x-sk-hdmi-audio.dtbo
-+k3-am62-lp-sk-hdmi-audio-dtbs := k3-am62-lp-sk.dtb k3-am62x-sk-hdmi-audio.dtbo
- dtb-$(CONFIG_ARCH_K3) += k3-am625-beagleplay.dtb
- dtb-$(CONFIG_ARCH_K3) += k3-am625-phyboard-lyra-rdk.dtb
- dtb-$(CONFIG_ARCH_K3) += k3-am625-sk.dtb
-@@ -19,7 +21,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dahlia.dtb
- dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-dev.dtb
- dtb-$(CONFIG_ARCH_K3) += k3-am625-verdin-wifi-yavia.dtb
- dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk.dtb
--dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-hdmi-audio.dtbo
-+dtb-$(CONFIG_ARCH_K3) += k3-am625-sk-hdmi-audio.dtb
-+dtb-$(CONFIG_ARCH_K3) += k3-am62-lp-sk-hdmi-audio.dtb
-
- # Boards with AM62Ax SoC
- dtb-$(CONFIG_ARCH_K3) += k3-am62a7-sk.dtb
-@@ -66,6 +69,8 @@ dtb-$(CONFIG_ARCH_K3) += k3-j721e-sk.dtb
- dtb-$(CONFIG_ARCH_K3) += k3-am68-sk-base-board.dtb
- dtb-$(CONFIG_ARCH_K3) += k3-j721s2-common-proc-board.dtb
- dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm-gesi-exp-board.dtbo
-+k3-j721s2-evm-dtbs := k3-j721s2-common-proc-board.dtb k3-j721s2-evm-gesi-exp-board.dtbo
-+dtb-$(CONFIG_ARCH_K3) += k3-j721s2-evm.dtb
-
- # Boards with J784s4 SoC
- dtb-$(CONFIG_ARCH_K3) += k3-am69-sk.dtb
-diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
-index 40992e7e4c308..5db52f2372534 100644
---- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
-+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
-@@ -1061,6 +1061,7 @@
- vddc-supply = <&reg_1v2_dsi>;
- vddmipi-supply = <&reg_1v2_dsi>;
- vddio-supply = <&reg_1v8_dsi>;
-+ status = "disabled";
-
- dsi_bridge_ports: ports {
- #address-cells = <1>;
-diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
-index 7cfdf562b53bf..2de74428a8bde 100644
---- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
-+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
-@@ -58,7 +58,7 @@
-
- ramoops: ramoops@9ca00000 {
- compatible = "ramoops";
-- reg = <0x00 0x9c700000 0x00 0x00100000>;
-+ reg = <0x00 0x9ca00000 0x00 0x00100000>;
- record-size = <0x8000>;
- console-size = <0x8000>;
- ftrace-size = <0x00>;
-diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
-index cff283c75f8ec..99f2878de4c67 100644
---- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
-+++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
-@@ -250,7 +250,7 @@
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&main_i2c1_pins_default>;
-- clock-frequency = <400000>;
-+ clock-frequency = <100000>;
-
- exp1: gpio@22 {
- compatible = "ti,tca6424";
-diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
-index 18dc2fb3d7b7b..c27404fa4418a 100644
---- a/arch/arm64/include/asm/arm_pmuv3.h
-+++ b/arch/arm64/include/asm/arm_pmuv3.h
-@@ -46,12 +46,12 @@ static inline u32 read_pmuver(void)
- ID_AA64DFR0_EL1_PMUVer_SHIFT);
- }
-
--static inline void write_pmcr(u32 val)
-+static inline void write_pmcr(u64 val)
- {
- write_sysreg(val, pmcr_el0);
- }
-
--static inline u32 read_pmcr(void)
-+static inline u64 read_pmcr(void)
- {
- return read_sysreg(pmcr_el0);
- }
-@@ -71,21 +71,6 @@ static inline u64 read_pmccntr(void)
- return read_sysreg(pmccntr_el0);
- }
-
--static inline void write_pmxevcntr(u32 val)
--{
-- write_sysreg(val, pmxevcntr_el0);
--}
--
--static inline u32 read_pmxevcntr(void)
--{
-- return read_sysreg(pmxevcntr_el0);
--}
--
--static inline void write_pmxevtyper(u32 val)
--{
-- write_sysreg(val, pmxevtyper_el0);
--}
--
- static inline void write_pmcntenset(u32 val)
- {
- write_sysreg(val, pmcntenset_el0);
-@@ -106,7 +91,7 @@ static inline void write_pmintenclr(u32 val)
- write_sysreg(val, pmintenclr_el1);
- }
-
--static inline void write_pmccfiltr(u32 val)
-+static inline void write_pmccfiltr(u64 val)
- {
- write_sysreg(val, pmccfiltr_el0);
- }
-@@ -126,12 +111,12 @@ static inline void write_pmuserenr(u32 val)
- write_sysreg(val, pmuserenr_el0);
- }
-
--static inline u32 read_pmceid0(void)
-+static inline u64 read_pmceid0(void)
- {
- return read_sysreg(pmceid0_el0);
- }
-
--static inline u32 read_pmceid1(void)
-+static inline u64 read_pmceid1(void)
- {
- return read_sysreg(pmceid1_el0);
- }
-diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
-index 74d00feb62f03..7c7493cb571f9 100644
---- a/arch/arm64/include/asm/cputype.h
-+++ b/arch/arm64/include/asm/cputype.h
-@@ -86,7 +86,8 @@
- #define ARM_CPU_PART_NEOVERSE_N2 0xD49
- #define ARM_CPU_PART_CORTEX_A78C 0xD4B
-
--#define APM_CPU_PART_POTENZA 0x000
-+#define APM_CPU_PART_XGENE 0x000
-+#define APM_CPU_VAR_POTENZA 0x00
-
- #define CAVIUM_CPU_PART_THUNDERX 0x0A1
- #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
-diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
-index f4af547ef54ca..2e4d7da74fb87 100644
---- a/arch/arm64/include/asm/setup.h
-+++ b/arch/arm64/include/asm/setup.h
-@@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
- extern bool rodata_enabled;
- extern bool rodata_full;
-
-- if (arg && !strcmp(arg, "full")) {
-+ if (!arg)
-+ return false;
-+
-+ if (!strcmp(arg, "full")) {
-+ rodata_enabled = rodata_full = true;
-+ return true;
-+ }
-+
-+ if (!strcmp(arg, "off")) {
-+ rodata_enabled = rodata_full = false;
-+ return true;
-+ }
-+
-+ if (!strcmp(arg, "on")) {
- rodata_enabled = true;
-- rodata_full = true;
-+ rodata_full = false;
- return true;
- }
-
-diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
-index bd69a4e7cd605..79200f21e1239 100644
---- a/arch/arm64/kernel/module-plts.c
-+++ b/arch/arm64/kernel/module-plts.c
-@@ -167,9 +167,6 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
- switch (ELF64_R_TYPE(rela[i].r_info)) {
- case R_AARCH64_JUMP26:
- case R_AARCH64_CALL26:
-- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-- break;
--
- /*
- * We only have to consider branch targets that resolve
- * to symbols that are defined in a different section.
-@@ -269,9 +266,6 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
- {
- int i = 0, j = numrels - 1;
-
-- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-- return 0;
--
- while (i < j) {
- if (branch_rela_needs_plt(syms, &rela[i], dstidx))
- i++;
-diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
-index 95f6945c44325..a1710e5fa72b6 100644
---- a/arch/arm64/kvm/guest.c
-+++ b/arch/arm64/kvm/guest.c
-@@ -874,7 +874,7 @@ u32 __attribute_const__ kvm_target_cpu(void)
- break;
- case ARM_CPU_IMP_APM:
- switch (part_number) {
-- case APM_CPU_PART_POTENZA:
-+ case APM_CPU_PART_XGENE:
- return KVM_ARM_TARGET_XGENE_POTENZA;
- }
- break;
-diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
-index 8e2017ba5f1b1..924843f1f661b 100644
---- a/arch/arm64/mm/pageattr.c
-+++ b/arch/arm64/mm/pageattr.c
-@@ -29,8 +29,8 @@ bool can_set_direct_map(void)
- *
- * KFENCE pool requires page-granular mapping if initialized late.
- */
-- return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
-- arm64_kfence_can_set_direct_map();
-+ return rodata_full || debug_pagealloc_enabled() ||
-+ arm64_kfence_can_set_direct_map();
- }
-
- static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
-@@ -105,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
- * If we are manipulating read-only permissions, apply the same
- * change to the linear mapping of the pages that back this VM area.
- */
-- if (rodata_enabled &&
-- rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
-+ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
- pgprot_val(clear_mask) == PTE_RDONLY)) {
- for (i = 0; i < area->nr_pages; i++) {
- __change_memory_common((u64)page_address(area->pages[i]),
-diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
-index b9f567e660166..ed5da02b1cf6f 100644
---- a/arch/loongarch/include/asm/percpu.h
-+++ b/arch/loongarch/include/asm/percpu.h
-@@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
- #define __my_cpu_offset __my_cpu_offset
-
- #define PERCPU_OP(op, asm_op, c_op) \
--static inline unsigned long __percpu_##op(void *ptr, \
-+static __always_inline unsigned long __percpu_##op(void *ptr, \
- unsigned long val, int size) \
- { \
- unsigned long ret; \
-@@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
- PERCPU_OP(or, or, |)
- #undef PERCPU_OP
-
--static inline unsigned long __percpu_read(void *ptr, int size)
-+static __always_inline unsigned long __percpu_read(void *ptr, int size)
- {
- unsigned long ret;
-
-@@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
- return ret;
- }
-
--static inline void __percpu_write(void *ptr, unsigned long val, int size)
-+static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
- {
- switch (size) {
- case 1:
-@@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
- }
- }
-
--static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
-- int size)
-+static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
-+ int size)
- {
- switch (size) {
- case 1:
-diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
-index 02042100e2671..7f830634dbe7d 100644
---- a/arch/mips/mm/cache.c
-+++ b/arch/mips/mm/cache.c
-@@ -117,7 +117,7 @@ void __flush_dcache_pages(struct page *page, unsigned int nr)
- * get faulted into the tlb (and thus flushed) anyways.
- */
- for (i = 0; i < nr; i++) {
-- addr = (unsigned long)kmap_local_page(page + i);
-+ addr = (unsigned long)kmap_local_page(nth_page(page, i));
- flush_data_cache_page(addr);
- kunmap_local((void *)addr);
- }
-diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
-index a15ab147af2e0..68cbe666510a3 100644
---- a/arch/parisc/Kconfig
-+++ b/arch/parisc/Kconfig
-@@ -138,11 +138,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
- default 8
-
- config ARCH_MMAP_RND_BITS_MAX
-- default 24 if 64BIT
-- default 17
-+ default 18 if 64BIT
-+ default 13
-
- config ARCH_MMAP_RND_COMPAT_BITS_MAX
-- default 17
-+ default 13
-
- # unless you want to implement ACPI on PA-RISC ... ;-)
- config PM
-diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
-index 1ed45fd085d3b..1eb488f25b838 100644
---- a/arch/parisc/include/asm/alternative.h
-+++ b/arch/parisc/include/asm/alternative.h
-@@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
-
- /* Alternative SMP implementation. */
- #define ALTERNATIVE(cond, replacement) "!0:" \
-- ".section .altinstructions, \"aw\" !" \
-+ ".section .altinstructions, \"a\" !" \
-+ ".align 4 !" \
- ".word (0b-4-.) !" \
- ".hword 1, " __stringify(cond) " !" \
- ".word " __stringify(replacement) " !" \
-@@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
-
- /* to replace one single instructions by a new instruction */
- #define ALTERNATIVE(from, to, cond, replacement)\
-- .section .altinstructions, "aw" ! \
-+ .section .altinstructions, "a" ! \
-+ .align 4 ! \
- .word (from - .) ! \
- .hword (to - from)/4, cond ! \
- .word replacement ! \
-@@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
-
- /* to replace multiple instructions by new code */
- #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
-- .section .altinstructions, "aw" ! \
-+ .section .altinstructions, "a" ! \
-+ .align 4 ! \
- .word (from - .) ! \
- .hword -num_instructions, cond ! \
- .word (new_instr_ptr - .) ! \
-diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
-index 75677b526b2bb..74d17d7e759da 100644
---- a/arch/parisc/include/asm/assembly.h
-+++ b/arch/parisc/include/asm/assembly.h
-@@ -574,6 +574,7 @@
- */
- #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
- .section __ex_table,"aw" ! \
-+ .align 4 ! \
- .word (fault_addr - .), (except_addr - .) ! \
- .previous
-
-diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
-index 4b6d60b941247..b9cad0bb4461b 100644
---- a/arch/parisc/include/asm/bug.h
-+++ b/arch/parisc/include/asm/bug.h
-@@ -28,13 +28,15 @@
- do { \
- asm volatile("\n" \
- "1:\t" PARISC_BUG_BREAK_ASM "\n" \
-- "\t.pushsection __bug_table,\"aw\"\n" \
-+ "\t.pushsection __bug_table,\"a\"\n" \
-+ "\t.align %4\n" \
- "2:\t" ASM_WORD_INSN "1b, %c0\n" \
-- "\t.short %c1, %c2\n" \
-- "\t.org 2b+%c3\n" \
-+ "\t.short %1, %2\n" \
-+ "\t.blockz %3-2*%4-2*2\n" \
- "\t.popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
-- "i" (0), "i" (sizeof(struct bug_entry)) ); \
-+ "i" (0), "i" (sizeof(struct bug_entry)), \
-+ "i" (sizeof(long)) ); \
- unreachable(); \
- } while(0)
-
-@@ -51,27 +53,31 @@
- do { \
- asm volatile("\n" \
- "1:\t" PARISC_BUG_BREAK_ASM "\n" \
-- "\t.pushsection __bug_table,\"aw\"\n" \
-+ "\t.pushsection __bug_table,\"a\"\n" \
-+ "\t.align %4\n" \
- "2:\t" ASM_WORD_INSN "1b, %c0\n" \
-- "\t.short %c1, %c2\n" \
-- "\t.org 2b+%c3\n" \
-+ "\t.short %1, %2\n" \
-+ "\t.blockz %3-2*%4-2*2\n" \
- "\t.popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_WARNING|(flags)), \
-- "i" (sizeof(struct bug_entry)) ); \
-+ "i" (sizeof(struct bug_entry)), \
-+ "i" (sizeof(long)) ); \
- } while(0)
- #else
- #define __WARN_FLAGS(flags) \
- do { \
- asm volatile("\n" \
- "1:\t" PARISC_BUG_BREAK_ASM "\n" \
-- "\t.pushsection __bug_table,\"aw\"\n" \
-+ "\t.pushsection __bug_table,\"a\"\n" \
-+ "\t.align %2\n" \
- "2:\t" ASM_WORD_INSN "1b\n" \
-- "\t.short %c0\n" \
-- "\t.org 2b+%c1\n" \
-+ "\t.short %0\n" \
-+ "\t.blockz %1-%2-2\n" \
- "\t.popsection" \
- : : "i" (BUGFLAG_WARNING|(flags)), \
-- "i" (sizeof(struct bug_entry)) ); \
-+ "i" (sizeof(struct bug_entry)), \
-+ "i" (sizeof(long)) ); \
- } while(0)
- #endif
-
-diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
-index 140eaa97bf215..2d73d3c3cd37f 100644
---- a/arch/parisc/include/asm/elf.h
-+++ b/arch/parisc/include/asm/elf.h
-@@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */
-
- #define ELF_HWCAP 0
-
--/* Masks for stack and mmap randomization */
--#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
--#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
--#define STACK_RND_MASK MMAP_RND_MASK
--
--struct mm_struct;
--extern unsigned long arch_randomize_brk(struct mm_struct *);
--#define arch_randomize_brk arch_randomize_brk
--
-+#define STACK_RND_MASK 0x7ff /* 8MB of VA */
-
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
- struct linux_binprm;
-diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
-index af2a598bc0f81..94428798b6aa6 100644
---- a/arch/parisc/include/asm/jump_label.h
-+++ b/arch/parisc/include/asm/jump_label.h
-@@ -15,10 +15,12 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
- asm_volatile_goto("1:\n\t"
- "nop\n\t"
- ".pushsection __jump_table, \"aw\"\n\t"
-+ ".align %1\n\t"
- ".word 1b - ., %l[l_yes] - .\n\t"
- __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
- ".popsection\n\t"
-- : : "i" (&((char *)key)[branch]) : : l_yes);
-+ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
-+ : : l_yes);
-
- return false;
- l_yes:
-@@ -30,10 +32,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
- asm_volatile_goto("1:\n\t"
- "b,n %l[l_yes]\n\t"
- ".pushsection __jump_table, \"aw\"\n\t"
-+ ".align %1\n\t"
- ".word 1b - ., %l[l_yes] - .\n\t"
- __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
- ".popsection\n\t"
-- : : "i" (&((char *)key)[branch]) : : l_yes);
-+ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
-+ : : l_yes);
-
- return false;
- l_yes:
-diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
-index ee9e071859b2f..47ebc4c91eaff 100644
---- a/arch/parisc/include/asm/ldcw.h
-+++ b/arch/parisc/include/asm/ldcw.h
-@@ -55,7 +55,7 @@
- })
-
- #ifdef CONFIG_SMP
--# define __lock_aligned __section(".data..lock_aligned")
-+# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
- #endif
-
- #endif /* __PARISC_LDCW_H */
-diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
-index ff6cbdb6903bc..ece4b3046515c 100644
---- a/arch/parisc/include/asm/processor.h
-+++ b/arch/parisc/include/asm/processor.h
-@@ -47,6 +47,8 @@
-
- #ifndef __ASSEMBLY__
-
-+struct rlimit;
-+unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
- unsigned long calc_max_stack_size(unsigned long stack_max);
-
- /*
-diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
-index 2bf660eabe421..4165079898d9e 100644
---- a/arch/parisc/include/asm/uaccess.h
-+++ b/arch/parisc/include/asm/uaccess.h
-@@ -41,6 +41,7 @@ struct exception_table_entry {
-
- #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
- ".section __ex_table,\"aw\"\n" \
-+ ".align 4\n" \
- ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
- ".previous\n"
-
-diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
-index 87245c584784e..8d94739d75c67 100644
---- a/arch/parisc/include/uapi/asm/errno.h
-+++ b/arch/parisc/include/uapi/asm/errno.h
-@@ -75,7 +75,6 @@
-
- /* We now return you to your regularly scheduled HPUX. */
-
--#define ENOSYM 215 /* symbol does not exist in executable */
- #define ENOTSOCK 216 /* Socket operation on non-socket */
- #define EDESTADDRREQ 217 /* Destination address required */
- #define EMSGSIZE 218 /* Message too long */
-@@ -101,7 +100,6 @@
- #define ETIMEDOUT 238 /* Connection timed out */
- #define ECONNREFUSED 239 /* Connection refused */
- #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
--#define EREMOTERELEASE 240 /* Remote peer released connection */
- #define EHOSTDOWN 241 /* Host is down */
- #define EHOSTUNREACH 242 /* No route to host */
-
-diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
-index 7a90070136e82..8e38a86996fc6 100644
---- a/arch/parisc/include/uapi/asm/pdc.h
-+++ b/arch/parisc/include/uapi/asm/pdc.h
-@@ -472,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */
- unsigned long arch_rev;
- unsigned long pot_key;
- unsigned long curr_key;
-+ unsigned long width; /* default of PSW_W bit (1=enabled) */
- };
-
- struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
-diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
-index ae03b8679696e..ab23e61a6f016 100644
---- a/arch/parisc/kernel/entry.S
-+++ b/arch/parisc/kernel/entry.S
-@@ -36,6 +36,24 @@
- .level 2.0
- #endif
-
-+/*
-+ * We need seven instructions after a TLB insert for it to take effect.
-+ * The PA8800/PA8900 processors are an exception and need 12 instructions.
-+ * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
-+ */
-+#ifdef CONFIG_64BIT
-+#define NUM_PIPELINE_INSNS 12
-+#else
-+#define NUM_PIPELINE_INSNS 7
-+#endif
-+
-+ /* Insert num nops */
-+ .macro insert_nops num
-+ .rept \num
-+ nop
-+ .endr
-+ .endm
-+
- /* Get aligned page_table_lock address for this mm from cr28/tr4 */
- .macro get_ptl reg
- mfctl %cr28,\reg
-@@ -415,24 +433,20 @@
- 3:
- .endm
-
-- /* Release page_table_lock without reloading lock address.
-- We use an ordered store to ensure all prior accesses are
-- performed prior to releasing the lock. */
-- .macro ptl_unlock0 spc,tmp,tmp2
-+ /* Release page_table_lock if for user space. We use an ordered
-+ store to ensure all prior accesses are performed prior to
-+ releasing the lock. Note stw may not be executed, so we
-+ provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
-+ .macro ptl_unlock spc,tmp,tmp2
- #ifdef CONFIG_TLB_PTLOCK
--98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
-+98: get_ptl \tmp
-+ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
- or,COND(=) %r0,\spc,%r0
- stw,ma \tmp2,0(\tmp)
- 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
--#endif
-- .endm
--
-- /* Release page_table_lock. */
-- .macro ptl_unlock1 spc,tmp,tmp2
--#ifdef CONFIG_TLB_PTLOCK
--98: get_ptl \tmp
-- ptl_unlock0 \spc,\tmp,\tmp2
--99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-+ insert_nops NUM_PIPELINE_INSNS - 4
-+#else
-+ insert_nops NUM_PIPELINE_INSNS - 1
- #endif
- .endm
-
-@@ -461,13 +475,13 @@
- * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
- #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
- #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
-+ #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
-
- /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
- .macro convert_for_tlb_insert20 pte,tmp
- #ifdef CONFIG_HUGETLB_PAGE
- copy \pte,\tmp
-- extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
-- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
-+ extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
-
- depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
- (63-58)+PAGE_ADD_SHIFT,\pte
-@@ -475,8 +489,7 @@
- depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
- (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
- #else /* Huge pages disabled */
-- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
-- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
-+ extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
- depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
- (63-58)+PAGE_ADD_SHIFT,\pte
- #endif
-@@ -1124,7 +1137,7 @@ dtlb_miss_20w:
-
- idtlbt pte,prot
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1133,6 +1146,7 @@ dtlb_check_alias_20w:
-
- idtlbt pte,prot
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1150,7 +1164,7 @@ nadtlb_miss_20w:
-
- idtlbt pte,prot
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1159,6 +1173,7 @@ nadtlb_check_alias_20w:
-
- idtlbt pte,prot
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1184,7 +1199,7 @@ dtlb_miss_11:
-
- mtsp t1, %sr1 /* Restore sr1 */
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1194,6 +1209,7 @@ dtlb_check_alias_11:
- idtlba pte,(va)
- idtlbp prot,(va)
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1217,7 +1233,7 @@ nadtlb_miss_11:
-
- mtsp t1, %sr1 /* Restore sr1 */
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1227,6 +1243,7 @@ nadtlb_check_alias_11:
- idtlba pte,(va)
- idtlbp prot,(va)
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1246,7 +1263,7 @@ dtlb_miss_20:
-
- idtlbt pte,prot
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1255,6 +1272,7 @@ dtlb_check_alias_20:
-
- idtlbt pte,prot
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1274,7 +1292,7 @@ nadtlb_miss_20:
-
- idtlbt pte,prot
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1283,6 +1301,7 @@ nadtlb_check_alias_20:
-
- idtlbt pte,prot
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1319,7 +1338,7 @@ itlb_miss_20w:
-
- iitlbt pte,prot
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1343,7 +1362,7 @@ naitlb_miss_20w:
-
- iitlbt pte,prot
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1352,6 +1371,7 @@ naitlb_check_alias_20w:
-
- iitlbt pte,prot
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1377,7 +1397,7 @@ itlb_miss_11:
-
- mtsp t1, %sr1 /* Restore sr1 */
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1401,7 +1421,7 @@ naitlb_miss_11:
-
- mtsp t1, %sr1 /* Restore sr1 */
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1411,6 +1431,7 @@ naitlb_check_alias_11:
- iitlba pte,(%sr0, va)
- iitlbp prot,(%sr0, va)
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1431,7 +1452,7 @@ itlb_miss_20:
-
- iitlbt pte,prot
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1451,7 +1472,7 @@ naitlb_miss_20:
-
- iitlbt pte,prot
-
-- ptl_unlock1 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1460,6 +1481,7 @@ naitlb_check_alias_20:
-
- iitlbt pte,prot
-
-+ insert_nops NUM_PIPELINE_INSNS - 1
- rfir
- nop
-
-@@ -1481,7 +1503,7 @@ dbit_trap_20w:
-
- idtlbt pte,prot
-
-- ptl_unlock0 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
- #else
-@@ -1507,7 +1529,7 @@ dbit_trap_11:
-
- mtsp t1, %sr1 /* Restore sr1 */
-
-- ptl_unlock0 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
-
-@@ -1527,7 +1549,7 @@ dbit_trap_20:
-
- idtlbt pte,prot
-
-- ptl_unlock0 spc,t0,t1
-+ ptl_unlock spc,t0,t1
- rfir
- nop
- #endif
-diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
-index a171bf3c6b318..96e0264ac9616 100644
---- a/arch/parisc/kernel/head.S
-+++ b/arch/parisc/kernel/head.S
-@@ -70,9 +70,8 @@ $bss_loop:
- stw,ma %arg2,4(%r1)
- stw,ma %arg3,4(%r1)
-
--#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
-- /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
-- * and halt kernel if we detect a PA1.x CPU. */
-+#if defined(CONFIG_PA20)
-+ /* check for 64-bit capable CPU as required by current kernel */
- ldi 32,%r10
- mtctl %r10,%cr11
- .level 2.0
-diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index ab896eff7a1de..98af719d5f85b 100644
---- a/arch/parisc/kernel/sys_parisc.c
-+++ b/arch/parisc/kernel/sys_parisc.c
-@@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
- * indicating that "current" should be used instead of a passed-in
- * value from the exec bprm as done with arch_pick_mmap_layout().
- */
--static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
-+unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
- {
- unsigned long stack_base;
-
-diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
-index 58694d1989c23..548051b0b4aff 100644
---- a/arch/parisc/kernel/vmlinux.lds.S
-+++ b/arch/parisc/kernel/vmlinux.lds.S
-@@ -130,6 +130,7 @@ SECTIONS
- RO_DATA(8)
-
- /* unwind info */
-+ . = ALIGN(4);
- .PARISC.unwind : {
- __start___unwind = .;
- *(.PARISC.unwind)
-diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
-index 6fe46e7545566..0b4e5f8ce3e8a 100644
---- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
-+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
-@@ -69,9 +69,6 @@
-
- #define _PTE_NONE_MASK 0
-
--/* Until my rework is finished, 40x still needs atomic PTE updates */
--#define PTE_ATOMIC_UPDATES 1
--
- #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
- #define _PAGE_BASE (_PAGE_BASE_NC)
-
-diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
-index 6a9acfb690c9f..2f8f3f93cbb67 100644
---- a/arch/powerpc/kernel/fpu.S
-+++ b/arch/powerpc/kernel/fpu.S
-@@ -23,6 +23,15 @@
- #include <asm/feature-fixups.h>
-
- #ifdef CONFIG_VSX
-+#define __REST_1FPVSR(n,c,base) \
-+BEGIN_FTR_SECTION \
-+ b 2f; \
-+END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
-+ REST_FPR(n,base); \
-+ b 3f; \
-+2: REST_VSR(n,c,base); \
-+3:
-+
- #define __REST_32FPVSRS(n,c,base) \
- BEGIN_FTR_SECTION \
- b 2f; \
-@@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
- 2: SAVE_32VSRS(n,c,base); \
- 3:
- #else
-+#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
- #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
- #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
- #endif
-+#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
- #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
- #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
-
-@@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
- SAVE_32FPVSRS(0, R4, R3)
- mffs fr0
- stfd fr0,FPSTATE_FPSCR(r3)
-+ REST_1FPVSR(0, R4, R3)
- blr
- EXPORT_SYMBOL(store_fp_state)
-
-@@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
- 2: SAVE_32FPVSRS(0, R4, R6)
- mffs fr0
- stfd fr0,FPSTATE_FPSCR(r6)
-+ REST_1FPVSR(0, R4, R6)
- blr
-diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index b68898ac07e19..9452a54d356c9 100644
---- a/arch/powerpc/kernel/process.c
-+++ b/arch/powerpc/kernel/process.c
-@@ -1198,11 +1198,11 @@ void kvmppc_save_user_regs(void)
-
- usermsr = current->thread.regs->msr;
-
-+ /* Caller has enabled FP/VEC/VSX/TM in MSR */
- if (usermsr & MSR_FP)
-- save_fpu(current);
--
-+ __giveup_fpu(current);
- if (usermsr & MSR_VEC)
-- save_altivec(current);
-+ __giveup_altivec(current);
-
- #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (usermsr & MSR_TM) {
-@@ -2258,6 +2258,22 @@ unsigned long __get_wchan(struct task_struct *p)
- return ret;
- }
-
-+static bool empty_user_regs(struct pt_regs *regs, struct task_struct *tsk)
-+{
-+ unsigned long stack_page;
-+
-+ // A non-empty pt_regs should never have a zero MSR or TRAP value.
-+ if (regs->msr || regs->trap)
-+ return false;
-+
-+ // Check it sits at the very base of the stack
-+ stack_page = (unsigned long)task_stack_page(tsk);
-+ if ((unsigned long)(regs + 1) != stack_page + THREAD_SIZE)
-+ return false;
-+
-+ return true;
-+}
-+
- static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-
- void __no_sanitize_address show_stack(struct task_struct *tsk,
-@@ -2322,9 +2338,13 @@ void __no_sanitize_address show_stack(struct task_struct *tsk,
- lr = regs->link;
- printk("%s--- interrupt: %lx at %pS\n",
- loglvl, regs->trap, (void *)regs->nip);
-- __show_regs(regs);
-- printk("%s--- interrupt: %lx\n",
-- loglvl, regs->trap);
-+
-+ // Detect the case of an empty pt_regs at the very base
-+ // of the stack and suppress showing it in full.
-+ if (!empty_user_regs(regs, tsk)) {
-+ __show_regs(regs);
-+ printk("%s--- interrupt: %lx\n", loglvl, regs->trap);
-+ }
-
- firstframe = 1;
- }
-diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
-index 64ff37721fd06..fe3f720c9cd61 100644
---- a/arch/powerpc/kernel/traps.c
-+++ b/arch/powerpc/kernel/traps.c
-@@ -1164,6 +1164,7 @@ void emulate_single_step(struct pt_regs *regs)
- __single_step_exception(regs);
- }
-
-+#ifdef CONFIG_PPC_FPU_REGS
- static inline int __parse_fpscr(unsigned long fpscr)
- {
- int ret = FPE_FLTUNK;
-@@ -1190,6 +1191,7 @@ static inline int __parse_fpscr(unsigned long fpscr)
-
- return ret;
- }
-+#endif
-
- static void parse_fpe(struct pt_regs *regs)
- {
-diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
-index 4094e4c4c77a7..80b3f6e476b66 100644
---- a/arch/powerpc/kernel/vector.S
-+++ b/arch/powerpc/kernel/vector.S
-@@ -33,6 +33,7 @@ _GLOBAL(store_vr_state)
- mfvscr v0
- li r4, VRSTATE_VSCR
- stvx v0, r4, r3
-+ lvx v0, 0, r3
- blr
- EXPORT_SYMBOL(store_vr_state)
-
-@@ -109,6 +110,7 @@ _GLOBAL(save_altivec)
- mfvscr v0
- li r4,VRSTATE_VSCR
- stvx v0,r4,r7
-+ lvx v0,0,r7
- blr
-
- #ifdef CONFIG_VSX
-diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
-index de64c79629912..005269ac3244c 100644
---- a/arch/powerpc/kexec/core.c
-+++ b/arch/powerpc/kexec/core.c
-@@ -74,6 +74,9 @@ void arch_crash_save_vmcoreinfo(void)
- VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
- VMCOREINFO_OFFSET(mmu_psize_def, shift);
- #endif
-+ VMCOREINFO_SYMBOL(cur_cpu_spec);
-+ VMCOREINFO_OFFSET(cpu_spec, mmu_features);
-+ vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled());
- vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
- }
-
-diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
-index 8c1f7def596e4..10b946e9c6e75 100644
---- a/arch/powerpc/perf/core-book3s.c
-+++ b/arch/powerpc/perf/core-book3s.c
-@@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
- /*
- * Disable instruction sampling if it was enabled
- */
-- if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
-- val &= ~MMCRA_SAMPLE_ENABLE;
-+ val &= ~MMCRA_SAMPLE_ENABLE;
-
- /* Disable BHRB via mmcra (BHRBRD) for p10 */
- if (ppmu->flags & PPMU_ARCH_31)
-@@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
- * instruction sampling or BHRB.
- */
- if (val != mmcra) {
-- mtspr(SPRN_MMCRA, mmcra);
-+ mtspr(SPRN_MMCRA, val);
- mb();
- isync();
- }
-diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
-index 9d229ef7f86ef..ada817c49b722 100644
---- a/arch/powerpc/perf/imc-pmu.c
-+++ b/arch/powerpc/perf/imc-pmu.c
-@@ -51,7 +51,7 @@ static int trace_imc_mem_size;
- * core and trace-imc
- */
- static struct imc_pmu_ref imc_global_refc = {
-- .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
-+ .lock = __SPIN_LOCK_UNLOCKED(imc_global_refc.lock),
- .id = 0,
- .refc = 0,
- };
-diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
-index 77ea9335fd049..f381b177ea06a 100644
---- a/arch/powerpc/platforms/book3s/vas-api.c
-+++ b/arch/powerpc/platforms/book3s/vas-api.c
-@@ -4,6 +4,8 @@
- * Copyright (C) 2019 Haren Myneni, IBM Corp
- */
-
-+#define pr_fmt(fmt) "vas-api: " fmt
-+
- #include <linux/kernel.h>
- #include <linux/device.h>
- #include <linux/cdev.h>
-@@ -78,7 +80,7 @@ int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
- task_ref->mm = get_task_mm(current);
- if (!task_ref->mm) {
- put_pid(task_ref->pid);
-- pr_err("VAS: pid(%d): mm_struct is not found\n",
-+ pr_err("pid(%d): mm_struct is not found\n",
- current->pid);
- return -EPERM;
- }
-@@ -235,8 +237,7 @@ void vas_update_csb(struct coprocessor_request_block *crb,
- rc = kill_pid_info(SIGSEGV, &info, pid);
- rcu_read_unlock();
-
-- pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
-- pid_vnr(pid), rc);
-+ pr_devel("pid %d kill_proc_info() rc %d\n", pid_vnr(pid), rc);
- }
-
- void vas_dump_crb(struct coprocessor_request_block *crb)
-@@ -294,7 +295,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
-
- rc = copy_from_user(&uattr, uptr, sizeof(uattr));
- if (rc) {
-- pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
-+ pr_err("copy_from_user() returns %d\n", rc);
- return -EFAULT;
- }
-
-@@ -311,7 +312,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
- txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
- cp_inst->coproc->cop_type);
- if (IS_ERR(txwin)) {
-- pr_err("%s() VAS window open failed, %ld\n", __func__,
-+ pr_err_ratelimited("VAS window open failed rc=%ld\n",
- PTR_ERR(txwin));
- return PTR_ERR(txwin);
- }
-@@ -405,8 +406,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
- * window is not opened. Shouldn't expect this error.
- */
- if (!cp_inst || !cp_inst->txwin) {
-- pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
-- __func__);
-+ pr_err("Unexpected fault on paste address with TX window closed\n");
- return VM_FAULT_SIGBUS;
- }
-
-@@ -421,8 +421,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
- * issue NX request.
- */
- if (txwin->task_ref.vma != vmf->vma) {
-- pr_err("%s(): No previous mapping with paste address\n",
-- __func__);
-+ pr_err("No previous mapping with paste address\n");
- return VM_FAULT_SIGBUS;
- }
-
-@@ -481,19 +480,19 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
- txwin = cp_inst->txwin;
-
- if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
-- pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
-+ pr_debug("size 0x%zx, PAGE_SIZE 0x%zx\n",
- (vma->vm_end - vma->vm_start), PAGE_SIZE);
- return -EINVAL;
- }
-
- /* Ensure instance has an open send window */
- if (!txwin) {
-- pr_err("%s(): No send window open?\n", __func__);
-+ pr_err("No send window open?\n");
- return -EINVAL;
- }
-
- if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
-- pr_err("%s(): VAS API is not registered\n", __func__);
-+ pr_err("VAS API is not registered\n");
- return -EACCES;
- }
-
-@@ -510,14 +509,14 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
- */
- mutex_lock(&txwin->task_ref.mmap_mutex);
- if (txwin->status != VAS_WIN_ACTIVE) {
-- pr_err("%s(): Window is not active\n", __func__);
-+ pr_err("Window is not active\n");
- rc = -EACCES;
- goto out;
- }
-
- paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
- if (!paste_addr) {
-- pr_err("%s(): Window paste address failed\n", __func__);
-+ pr_err("Window paste address failed\n");
- rc = -EINVAL;
- goto out;
- }
-@@ -533,8 +532,8 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
- rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start, prot);
-
-- pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
-- paste_addr, vma->vm_start, rc);
-+ pr_devel("paste addr %llx at %lx, rc %d\n", paste_addr,
-+ vma->vm_start, rc);
-
- txwin->task_ref.vma = vma;
- vma->vm_ops = &vas_vm_ops;
-@@ -609,8 +608,7 @@ int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
- goto err;
- }
-
-- pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
-- MINOR(devno));
-+ pr_devel("Added dev [%d,%d]\n", MAJOR(devno), MINOR(devno));
-
- return 0;
-
-diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
-index 16d93b580f61f..496e16c588aaa 100644
---- a/arch/powerpc/platforms/pseries/iommu.c
-+++ b/arch/powerpc/platforms/pseries/iommu.c
-@@ -914,7 +914,8 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_
- return 0;
- }
-
--static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
-+static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift,
-+ bool *direct_mapping)
- {
- struct dma_win *window;
- const struct dynamic_dma_window_prop *dma64;
-@@ -927,6 +928,7 @@ static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *windo
- dma64 = window->prop;
- *dma_addr = be64_to_cpu(dma64->dma_base);
- *window_shift = be32_to_cpu(dma64->window_shift);
-+ *direct_mapping = window->direct;
- found = true;
- break;
- }
-@@ -1270,10 +1272,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
-
- mutex_lock(&dma_win_init_mutex);
-
-- if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
-- direct_mapping = (len >= max_ram_len);
-+ if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len, &direct_mapping))
- goto out_unlock;
-- }
-
- /*
- * If we already went through this for a previous function of
-diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
-index f2cb62148f36f..d4d6de0628b05 100644
---- a/arch/powerpc/platforms/pseries/lpar.c
-+++ b/arch/powerpc/platforms/pseries/lpar.c
-@@ -526,8 +526,10 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
-
- if (cmd) {
- rc = init_cpu_associativity();
-- if (rc)
-+ if (rc) {
-+ destroy_cpu_associativity();
- goto out;
-+ }
-
- for_each_possible_cpu(cpu) {
- disp = per_cpu_ptr(&vcpu_disp_data, cpu);
-diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
-index e25ac52acf507..b1f25bac280b4 100644
---- a/arch/powerpc/platforms/pseries/vas.c
-+++ b/arch/powerpc/platforms/pseries/vas.c
-@@ -341,7 +341,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
-
- if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
- atomic_read(&cop_feat_caps->nr_total_credits)) {
-- pr_err("Credits are not available to allocate window\n");
-+ pr_err_ratelimited("Credits are not available to allocate window\n");
- rc = -EINVAL;
- goto out;
- }
-@@ -424,7 +424,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
-
- put_vas_user_win_ref(&txwin->vas_win.task_ref);
- rc = -EBUSY;
-- pr_err("No credit is available to allocate window\n");
-+ pr_err_ratelimited("No credit is available to allocate window\n");
-
- out_free:
- /*
-diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
-index 9f0af4d795d88..f1c0fa6ece21d 100644
---- a/arch/powerpc/sysdev/xive/native.c
-+++ b/arch/powerpc/sysdev/xive/native.c
-@@ -802,7 +802,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
- if (out_qpage)
- *out_qpage = be64_to_cpu(qpage);
- if (out_qsize)
-- *out_qsize = be32_to_cpu(qsize);
-+ *out_qsize = be64_to_cpu(qsize);
- if (out_qeoi_page)
- *out_qeoi_page = be64_to_cpu(qeoi_page);
- if (out_escalate_irq)
-diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
-index 22b13947bd131..8e7fc0edf21d3 100644
---- a/arch/riscv/boot/Makefile
-+++ b/arch/riscv/boot/Makefile
-@@ -17,6 +17,7 @@
- KCOV_INSTRUMENT := n
-
- OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
-+OBJCOPYFLAGS_loader.bin :=-O binary
- OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
-
- targets := Image Image.* loader loader.o loader.lds loader.bin
-diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
-index 8275630af977d..b8684312593e5 100644
---- a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
-+++ b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
-@@ -30,7 +30,6 @@
- cpu0_intc: interrupt-controller {
- compatible = "riscv,cpu-intc";
- interrupt-controller;
-- #address-cells = <0>;
- #interrupt-cells = <1>;
- };
- };
-diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
-index 61ba8ed43d8fe..36b955c762ba0 100644
---- a/arch/riscv/include/asm/asm-prototypes.h
-+++ b/arch/riscv/include/asm/asm-prototypes.h
-@@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
- DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
- DECLARE_DO_ERROR_INFO(do_trap_break);
-
--asmlinkage unsigned long get_overflow_stack(void);
- asmlinkage void handle_bad_stack(struct pt_regs *regs);
- asmlinkage void do_page_fault(struct pt_regs *regs);
- asmlinkage void do_irq(struct pt_regs *regs);
-diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
-index 114bbadaef41e..bfb4c26f113c4 100644
---- a/arch/riscv/include/asm/asm.h
-+++ b/arch/riscv/include/asm/asm.h
-@@ -82,6 +82,28 @@
- .endr
- .endm
-
-+#ifdef CONFIG_SMP
-+#ifdef CONFIG_32BIT
-+#define PER_CPU_OFFSET_SHIFT 2
-+#else
-+#define PER_CPU_OFFSET_SHIFT 3
-+#endif
-+
-+.macro asm_per_cpu dst sym tmp
-+ REG_L \tmp, TASK_TI_CPU_NUM(tp)
-+ slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
-+ la \dst, __per_cpu_offset
-+ add \dst, \dst, \tmp
-+ REG_L \tmp, 0(\dst)
-+ la \dst, \sym
-+ add \dst, \dst, \tmp
-+.endm
-+#else /* CONFIG_SMP */
-+.macro asm_per_cpu dst sym tmp
-+ la \dst, \sym
-+.endm
-+#endif /* CONFIG_SMP */
-+
- /* save all GPs except x1 ~ x5 */
- .macro save_from_x6_to_x31
- REG_S x6, PT_T1(sp)
-diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
-index 78936f4ff5133..7cad513538d8d 100644
---- a/arch/riscv/include/asm/hwprobe.h
-+++ b/arch/riscv/include/asm/hwprobe.h
-@@ -10,4 +10,9 @@
-
- #define RISCV_HWPROBE_MAX_KEY 5
-
-+static inline bool riscv_hwprobe_key_is_valid(__s64 key)
-+{
-+ return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
-+}
-+
- #endif
-diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
-index 5488ecc337b63..57e887bfa34cb 100644
---- a/arch/riscv/include/asm/page.h
-+++ b/arch/riscv/include/asm/page.h
-@@ -33,8 +33,8 @@
- #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
- #endif
- /*
-- * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
-- * define the PAGE_OFFSET value for SV39.
-+ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
-+ * define the PAGE_OFFSET value for SV48 and SV39.
- */
- #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
- #define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
-diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
-index 1833beb00489c..d18ce0113ca1f 100644
---- a/arch/riscv/include/asm/thread_info.h
-+++ b/arch/riscv/include/asm/thread_info.h
-@@ -34,9 +34,6 @@
-
- #ifndef __ASSEMBLY__
-
--extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
--extern unsigned long spin_shadow_stack;
--
- #include <asm/processor.h>
- #include <asm/csr.h>
-
-diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
-index 14f5d27783b85..96b65a5396dfc 100644
---- a/arch/riscv/include/asm/vdso/processor.h
-+++ b/arch/riscv/include/asm/vdso/processor.h
-@@ -14,7 +14,7 @@ static inline void cpu_relax(void)
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
- #endif
-
--#ifdef __riscv_zihintpause
-+#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
- /*
- * Reduce instruction retirement.
- * This assumes the PC changes.
-diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
-index d6a75aac1d27a..9f535d5de33f9 100644
---- a/arch/riscv/kernel/asm-offsets.c
-+++ b/arch/riscv/kernel/asm-offsets.c
-@@ -39,6 +39,7 @@ void asm_offsets(void)
- OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
- OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
-
-+ OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
- OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
- OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
- OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
-diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
-index c17dacb1141cb..157ace8b262c2 100644
---- a/arch/riscv/kernel/cpu.c
-+++ b/arch/riscv/kernel/cpu.c
-@@ -125,13 +125,14 @@ old_interface:
- */
- int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
- {
-- int rc;
--
- for (; node; node = node->parent) {
- if (of_device_is_compatible(node, "riscv")) {
-- rc = riscv_of_processor_hartid(node, hartid);
-- if (!rc)
-- return 0;
-+ *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
-+ if (*hartid == ~0UL) {
-+ pr_warn("Found CPU without hart ID\n");
-+ return -ENODEV;
-+ }
-+ return 0;
- }
- }
-
-diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
-index 143a2bb3e6976..278d01d2911fd 100644
---- a/arch/riscv/kernel/entry.S
-+++ b/arch/riscv/kernel/entry.S
-@@ -10,9 +10,13 @@
- #include <asm/asm.h>
- #include <asm/csr.h>
- #include <asm/unistd.h>
-+#include <asm/page.h>
- #include <asm/thread_info.h>
- #include <asm/asm-offsets.h>
- #include <asm/errata_list.h>
-+#include <linux/sizes.h>
-+
-+ .section .irqentry.text, "ax"
-
- SYM_CODE_START(handle_exception)
- /*
-@@ -170,67 +174,15 @@ SYM_CODE_END(ret_from_exception)
-
- #ifdef CONFIG_VMAP_STACK
- SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
-- /*
-- * Takes the psuedo-spinlock for the shadow stack, in case multiple
-- * harts are concurrently overflowing their kernel stacks. We could
-- * store any value here, but since we're overflowing the kernel stack
-- * already we only have SP to use as a scratch register. So we just
-- * swap in the address of the spinlock, as that's definately non-zero.
-- *
-- * Pairs with a store_release in handle_bad_stack().
-- */
--1: la sp, spin_shadow_stack
-- REG_AMOSWAP_AQ sp, sp, (sp)
-- bnez sp, 1b
--
-- la sp, shadow_stack
-- addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
--
-- //save caller register to shadow stack
-- addi sp, sp, -(PT_SIZE_ON_STACK)
-- REG_S x1, PT_RA(sp)
-- REG_S x5, PT_T0(sp)
-- REG_S x6, PT_T1(sp)
-- REG_S x7, PT_T2(sp)
-- REG_S x10, PT_A0(sp)
-- REG_S x11, PT_A1(sp)
-- REG_S x12, PT_A2(sp)
-- REG_S x13, PT_A3(sp)
-- REG_S x14, PT_A4(sp)
-- REG_S x15, PT_A5(sp)
-- REG_S x16, PT_A6(sp)
-- REG_S x17, PT_A7(sp)
-- REG_S x28, PT_T3(sp)
-- REG_S x29, PT_T4(sp)
-- REG_S x30, PT_T5(sp)
-- REG_S x31, PT_T6(sp)
--
-- la ra, restore_caller_reg
-- tail get_overflow_stack
--
--restore_caller_reg:
-- //save per-cpu overflow stack
-- REG_S a0, -8(sp)
-- //restore caller register from shadow_stack
-- REG_L x1, PT_RA(sp)
-- REG_L x5, PT_T0(sp)
-- REG_L x6, PT_T1(sp)
-- REG_L x7, PT_T2(sp)
-- REG_L x10, PT_A0(sp)
-- REG_L x11, PT_A1(sp)
-- REG_L x12, PT_A2(sp)
-- REG_L x13, PT_A3(sp)
-- REG_L x14, PT_A4(sp)
-- REG_L x15, PT_A5(sp)
-- REG_L x16, PT_A6(sp)
-- REG_L x17, PT_A7(sp)
-- REG_L x28, PT_T3(sp)
-- REG_L x29, PT_T4(sp)
-- REG_L x30, PT_T5(sp)
-- REG_L x31, PT_T6(sp)
-+ /* we reach here from kernel context, sscratch must be 0 */
-+ csrrw x31, CSR_SCRATCH, x31
-+ asm_per_cpu sp, overflow_stack, x31
-+ li x31, OVERFLOW_STACK_SIZE
-+ add sp, sp, x31
-+ /* zero out x31 again and restore x31 */
-+ xor x31, x31, x31
-+ csrrw x31, CSR_SCRATCH, x31
-
-- //load per-cpu overflow stack
-- REG_L sp, -8(sp)
- addi sp, sp, -(PT_SIZE_ON_STACK)
-
- //save context to overflow stack
-diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
-index d3099d67816d0..6c166029079c4 100644
---- a/arch/riscv/kernel/probes/simulate-insn.c
-+++ b/arch/riscv/kernel/probes/simulate-insn.c
-@@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
- unsigned long val)
- {
- if (index == 0)
-- return false;
-+ return true;
- else if (index <= 31)
- *((unsigned long *)regs + index) = val;
- else
-diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
-index 194f166b2cc40..4b3dc8beaf77d 100644
---- a/arch/riscv/kernel/probes/uprobes.c
-+++ b/arch/riscv/kernel/probes/uprobes.c
-@@ -3,6 +3,7 @@
- #include <linux/highmem.h>
- #include <linux/ptrace.h>
- #include <linux/uprobes.h>
-+#include <asm/insn.h>
-
- #include "decode-insn.h"
-
-@@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
- #endif
- }
-
-+bool is_trap_insn(uprobe_opcode_t *insn)
-+{
-+ return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
-+}
-+
- unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
- {
- return instruction_pointer(regs);
-diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
-index fae8f610d867f..67d0073fb624d 100644
---- a/arch/riscv/kernel/traps.c
-+++ b/arch/riscv/kernel/traps.c
-@@ -410,48 +410,14 @@ int is_valid_bugaddr(unsigned long pc)
- #endif /* CONFIG_GENERIC_BUG */
-
- #ifdef CONFIG_VMAP_STACK
--/*
-- * Extra stack space that allows us to provide panic messages when the kernel
-- * has overflowed its stack.
-- */
--static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
-+DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
- overflow_stack)__aligned(16);
--/*
-- * A temporary stack for use by handle_kernel_stack_overflow. This is used so
-- * we can call into C code to get the per-hart overflow stack. Usage of this
-- * stack must be protected by spin_shadow_stack.
-- */
--long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
--
--/*
-- * A pseudo spinlock to protect the shadow stack from being used by multiple
-- * harts concurrently. This isn't a real spinlock because the lock side must
-- * be taken without a valid stack and only a single register, it's only taken
-- * while in the process of panicing anyway so the performance and error
-- * checking a proper spinlock gives us doesn't matter.
-- */
--unsigned long spin_shadow_stack;
--
--asmlinkage unsigned long get_overflow_stack(void)
--{
-- return (unsigned long)this_cpu_ptr(overflow_stack) +
-- OVERFLOW_STACK_SIZE;
--}
-
- asmlinkage void handle_bad_stack(struct pt_regs *regs)
- {
- unsigned long tsk_stk = (unsigned long)current->stack;
- unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
-
-- /*
-- * We're done with the shadow stack by this point, as we're on the
-- * overflow stack. Tell any other concurrent overflowing harts that
-- * they can proceed with panicing by releasing the pseudo-spinlock.
-- *
-- * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
-- */
-- smp_store_release(&spin_shadow_stack, 0);
--
- console_verbose();
-
- pr_emerg("Insufficient stack space to handle exception!\n");
-diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
-index d40bec6ac0786..cadf725ef7983 100644
---- a/arch/riscv/kernel/vdso/hwprobe.c
-+++ b/arch/riscv/kernel/vdso/hwprobe.c
-@@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
-
- /* This is something we can handle, fill out the pairs. */
- while (p < end) {
-- if (p->key <= RISCV_HWPROBE_MAX_KEY) {
-+ if (riscv_hwprobe_key_is_valid(p->key)) {
- p->value = avd->all_cpu_hwprobe_values[p->key];
-
- } else {
-diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
-index 9c454f90fd3da..3a4dfc8babcf8 100644
---- a/arch/riscv/mm/Makefile
-+++ b/arch/riscv/mm/Makefile
-@@ -36,3 +36,4 @@ endif
-
- obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
- obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
-+obj-$(CONFIG_RISCV_NONSTANDARD_CACHE_OPS) += cache-ops.o
-diff --git a/arch/riscv/mm/cache-ops.c b/arch/riscv/mm/cache-ops.c
-new file mode 100644
-index 0000000000000..a993ad11d0eca
---- /dev/null
-+++ b/arch/riscv/mm/cache-ops.c
-@@ -0,0 +1,17 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
-+ */
-+
-+#include <asm/dma-noncoherent.h>
-+
-+struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init;
-+
-+void
-+riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
-+{
-+ if (!ops)
-+ return;
-+ noncoherent_cache_ops = *ops;
-+}
-+EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
-diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
-index b76e7e192eb18..341bd6706b4c5 100644
---- a/arch/riscv/mm/dma-noncoherent.c
-+++ b/arch/riscv/mm/dma-noncoherent.c
-@@ -15,12 +15,6 @@ static bool noncoherent_supported __ro_after_init;
- int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
- EXPORT_SYMBOL_GPL(dma_cache_alignment);
-
--struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
-- .wback = NULL,
-- .inv = NULL,
-- .wback_inv = NULL,
--};
--
- static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
- {
- void *vaddr = phys_to_virt(paddr);
-@@ -162,12 +156,3 @@ void __init riscv_set_dma_cache_alignment(void)
- if (!noncoherent_supported)
- dma_cache_alignment = 1;
- }
--
--void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
--{
-- if (!ops)
-- return;
--
-- noncoherent_cache_ops = *ops;
--}
--EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
-diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
-index 20a9f991a6d74..e9090b38f8117 100644
---- a/arch/riscv/mm/ptdump.c
-+++ b/arch/riscv/mm/ptdump.c
-@@ -384,6 +384,9 @@ static int __init ptdump_init(void)
-
- kernel_ptd_info.base_addr = KERN_VIRT_START;
-
-+ pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
-+ pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
-+
- for (i = 0; i < ARRAY_SIZE(pg_level); i++)
- for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
- pg_level[i].mask |= pte_bits[j].mask;
-diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
-index 05e51666db033..8d0b95c173129 100644
---- a/arch/s390/kernel/ipl.c
-+++ b/arch/s390/kernel/ipl.c
-@@ -666,6 +666,7 @@ static int __init ipl_init(void)
- &ipl_ccw_attr_group_lpar);
- break;
- case IPL_TYPE_ECKD:
-+ case IPL_TYPE_ECKD_DUMP:
- rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group);
- break;
- case IPL_TYPE_FCP:
-diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
-index 906a7bfc2a787..20786f6883b29 100644
---- a/arch/s390/mm/gmap.c
-+++ b/arch/s390/mm/gmap.c
-@@ -21,10 +21,22 @@
-
- #include <asm/pgalloc.h>
- #include <asm/gmap.h>
-+#include <asm/page.h>
- #include <asm/tlb.h>
-
- #define GMAP_SHADOW_FAKE_TABLE 1ULL
-
-+static struct page *gmap_alloc_crst(void)
-+{
-+ struct page *page;
-+
-+ page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
-+ if (!page)
-+ return NULL;
-+ arch_set_page_dat(page, CRST_ALLOC_ORDER);
-+ return page;
-+}
-+
- /**
- * gmap_alloc - allocate and initialize a guest address space
- * @limit: maximum address of the gmap address space
-@@ -67,7 +79,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
- spin_lock_init(&gmap->guest_table_lock);
- spin_lock_init(&gmap->shadow_lock);
- refcount_set(&gmap->ref_count, 1);
-- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
-+ page = gmap_alloc_crst();
- if (!page)
- goto out_free;
- page->index = 0;
-@@ -308,7 +320,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
- unsigned long *new;
-
- /* since we dont free the gmap table until gmap_free we can unlock */
-- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
-+ page = gmap_alloc_crst();
- if (!page)
- return -ENOMEM;
- new = page_to_virt(page);
-@@ -1759,7 +1771,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
-
- BUG_ON(!gmap_is_shadow(sg));
- /* Allocate a shadow region second table */
-- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
-+ page = gmap_alloc_crst();
- if (!page)
- return -ENOMEM;
- page->index = r2t & _REGION_ENTRY_ORIGIN;
-@@ -1843,7 +1855,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
-
- BUG_ON(!gmap_is_shadow(sg));
- /* Allocate a shadow region second table */
-- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
-+ page = gmap_alloc_crst();
- if (!page)
- return -ENOMEM;
- page->index = r3t & _REGION_ENTRY_ORIGIN;
-@@ -1927,7 +1939,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
-
- BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
- /* Allocate a shadow segment table */
-- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
-+ page = gmap_alloc_crst();
- if (!page)
- return -ENOMEM;
- page->index = sgt & _REGION_ENTRY_ORIGIN;
-@@ -2855,7 +2867,7 @@ int s390_replace_asce(struct gmap *gmap)
- if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
- return -EINVAL;
-
-- page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
-+ page = gmap_alloc_crst();
- if (!page)
- return -ENOMEM;
- page->index = 0;
-diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
-index 1e2ea706aa228..79a037f49f707 100644
---- a/arch/s390/mm/page-states.c
-+++ b/arch/s390/mm/page-states.c
-@@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
- continue;
- if (!pud_folded(*pud)) {
- page = phys_to_page(pud_val(*pud));
-- for (i = 0; i < 3; i++)
-+ for (i = 0; i < 4; i++)
- set_bit(PG_arch_1, &page[i].flags);
- }
- mark_kernel_pmd(pud, addr, next);
-@@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
- continue;
- if (!p4d_folded(*p4d)) {
- page = phys_to_page(p4d_val(*p4d));
-- for (i = 0; i < 3; i++)
-+ for (i = 0; i < 4; i++)
- set_bit(PG_arch_1, &page[i].flags);
- }
- mark_kernel_pud(p4d, addr, next);
-@@ -164,7 +164,7 @@ static void mark_kernel_pgd(void)
- continue;
- if (!pgd_folded(*pgd)) {
- page = phys_to_page(pgd_val(*pgd));
-- for (i = 0; i < 3; i++)
-+ for (i = 0; i < 4; i++)
- set_bit(PG_arch_1, &page[i].flags);
- }
- mark_kernel_p4d(pgd, addr, next);
-@@ -181,6 +181,12 @@ void __init cmma_init_nodat(void)
- return;
- /* Mark pages used in kernel page tables */
- mark_kernel_pgd();
-+ page = virt_to_page(&swapper_pg_dir);
-+ for (i = 0; i < 4; i++)
-+ set_bit(PG_arch_1, &page[i].flags);
-+ page = virt_to_page(&invalid_pg_dir);
-+ for (i = 0; i < 4; i++)
-+ set_bit(PG_arch_1, &page[i].flags);
-
- /* Set all kernel pages not used for page tables to stable/no-dat */
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
-diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
-index 07fc660a24aa2..6396d6b06a3a2 100644
---- a/arch/s390/mm/pgalloc.c
-+++ b/arch/s390/mm/pgalloc.c
-@@ -146,6 +146,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
- ptdesc = pagetable_alloc(GFP_KERNEL, 0);
- if (ptdesc) {
- table = (u64 *)ptdesc_to_virt(ptdesc);
-+ arch_set_page_dat(virt_to_page(table), 0);
- memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
- memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
- }
-diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
-index 6957d2ed97bf0..6d276103c6d58 100644
---- a/arch/s390/mm/vmem.c
-+++ b/arch/s390/mm/vmem.c
-@@ -12,6 +12,7 @@
- #include <linux/hugetlb.h>
- #include <linux/slab.h>
- #include <linux/sort.h>
-+#include <asm/page-states.h>
- #include <asm/cacheflush.h>
- #include <asm/nospec-branch.h>
- #include <asm/pgalloc.h>
-@@ -45,8 +46,11 @@ void *vmem_crst_alloc(unsigned long val)
- unsigned long *table;
-
- table = vmem_alloc_pages(CRST_ALLOC_ORDER);
-- if (table)
-- crst_table_init(table, val);
-+ if (!table)
-+ return NULL;
-+ crst_table_init(table, val);
-+ if (slab_is_available())
-+ arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
- return table;
- }
-
-diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
-index c449e7c1b20ff..8bcd6c1431a95 100644
---- a/arch/sh/Kconfig.debug
-+++ b/arch/sh/Kconfig.debug
-@@ -22,6 +22,17 @@ config STACK_DEBUG
- every function call and will therefore incur a major
- performance hit. Most users should say N.
-
-+config EARLY_PRINTK
-+ bool "Early printk"
-+ depends on SH_STANDARD_BIOS
-+ help
-+ Say Y here to redirect kernel printk messages to the serial port
-+ used by the SH-IPL bootloader, starting very early in the boot
-+ process and ending when the kernel's serial console is initialised.
-+ This option is only useful while porting the kernel to a new machine,
-+ when the kernel may crash or hang before the serial console is
-+ initialised. If unsure, say N.
-+
- config 4KSTACKS
- bool "Use 4Kb for kernel stacks instead of 8Kb"
- depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
-diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S
-index b193c0a1d8db3..2eca5f43734fe 100644
---- a/arch/x86/coco/tdx/tdcall.S
-+++ b/arch/x86/coco/tdx/tdcall.S
-@@ -195,6 +195,7 @@ SYM_FUNC_END(__tdx_module_call)
- xor %r10d, %r10d
- xor %r11d, %r11d
- xor %rdi, %rdi
-+ xor %rsi, %rsi
- xor %rdx, %rdx
-
- /* Restore callee-saved GPRs as mandated by the x86_64 ABI */
-diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
-index 44340a1139e0b..959afa705e95c 100644
---- a/arch/x86/crypto/sha1_ssse3_glue.c
-+++ b/arch/x86/crypto/sha1_ssse3_glue.c
-@@ -24,8 +24,17 @@
- #include <linux/types.h>
- #include <crypto/sha1.h>
- #include <crypto/sha1_base.h>
-+#include <asm/cpu_device_id.h>
- #include <asm/simd.h>
-
-+static const struct x86_cpu_id module_cpu_ids[] = {
-+ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
-+ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
-+ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
-+ {}
-+};
-+MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
-+
- static int sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, sha1_block_fn *sha1_xform)
- {
-@@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
-
- static int __init sha1_ssse3_mod_init(void)
- {
-+ if (!x86_match_cpu(module_cpu_ids))
-+ return -ENODEV;
-+
- if (register_sha1_ssse3())
- goto fail;
-
-diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
-index 3a5f6be7dbba4..d25235f0ccafc 100644
---- a/arch/x86/crypto/sha256_ssse3_glue.c
-+++ b/arch/x86/crypto/sha256_ssse3_glue.c
-@@ -38,11 +38,20 @@
- #include <crypto/sha2.h>
- #include <crypto/sha256_base.h>
- #include <linux/string.h>
-+#include <asm/cpu_device_id.h>
- #include <asm/simd.h>
-
- asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
- const u8 *data, int blocks);
-
-+static const struct x86_cpu_id module_cpu_ids[] = {
-+ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
-+ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
-+ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
-+ {}
-+};
-+MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
-+
- static int _sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, sha256_block_fn *sha256_xform)
- {
-@@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
-
- static int __init sha256_ssse3_mod_init(void)
- {
-+ if (!x86_match_cpu(module_cpu_ids))
-+ return -ENODEV;
-+
- if (register_sha256_ssse3())
- goto fail;
-
-diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
-index c8a7fc23f63c6..f896eed4516c7 100644
---- a/arch/x86/include/asm/acpi.h
-+++ b/arch/x86/include/asm/acpi.h
-@@ -16,6 +16,9 @@
- #include <asm/x86_init.h>
- #include <asm/cpufeature.h>
- #include <asm/irq_vectors.h>
-+#include <asm/xen/hypervisor.h>
-+
-+#include <xen/xen.h>
-
- #ifdef CONFIG_ACPI_APEI
- # include <asm/pgtable_types.h>
-@@ -127,6 +130,17 @@ static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
- if (!cpu_has(c, X86_FEATURE_MWAIT) ||
- boot_option_idle_override == IDLE_NOMWAIT)
- *cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
-+
-+ if (xen_initial_domain()) {
-+ /*
-+ * When Linux is running as Xen dom0, the hypervisor is the
-+ * entity in charge of the processor power management, and so
-+ * Xen needs to check the OS capabilities reported in the
-+ * processor capabilities buffer matches what the hypervisor
-+ * driver supports.
-+ */
-+ xen_sanitize_proc_cap_bits(cap);
-+ }
- }
-
- static inline bool acpi_has_cpu_in_madt(void)
-diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
-index e3054e3e46d52..9b419f0de713c 100644
---- a/arch/x86/include/asm/kvm-x86-ops.h
-+++ b/arch/x86/include/asm/kvm-x86-ops.h
-@@ -108,6 +108,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
- KVM_X86_OP_OPTIONAL(vcpu_unblocking)
- KVM_X86_OP_OPTIONAL(pi_update_irte)
- KVM_X86_OP_OPTIONAL(pi_start_assignment)
-+KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
- KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
- KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
- KVM_X86_OP_OPTIONAL(set_hv_timer)
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 70d139406bc80..fb9f5fa96cc96 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -1708,6 +1708,7 @@ struct kvm_x86_ops {
- int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
- void (*pi_start_assignment)(struct kvm *kvm);
-+ void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
- void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
- bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
-
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
-index b37abb55e948b..389f9594746ef 100644
---- a/arch/x86/include/asm/msr-index.h
-+++ b/arch/x86/include/asm/msr-index.h
-@@ -553,6 +553,7 @@
- #define MSR_AMD64_CPUID_FN_1 0xc0011004
- #define MSR_AMD64_LS_CFG 0xc0011020
- #define MSR_AMD64_DC_CFG 0xc0011022
-+#define MSR_AMD64_TW_CFG 0xc0011023
-
- #define MSR_AMD64_DE_CFG 0xc0011029
- #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
-index c55cc243592e9..197ff4f4d1ceb 100644
---- a/arch/x86/include/asm/nospec-branch.h
-+++ b/arch/x86/include/asm/nospec-branch.h
-@@ -271,7 +271,7 @@
- .Lskip_rsb_\@:
- .endm
-
--#ifdef CONFIG_CPU_UNRET_ENTRY
-+#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
- #define CALL_UNTRAIN_RET "call entry_untrain_ret"
- #else
- #define CALL_UNTRAIN_RET ""
-@@ -312,7 +312,7 @@
-
- .macro UNTRAIN_RET_FROM_CALL
- #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
-- defined(CONFIG_CALL_DEPTH_TRACKING)
-+ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
- VALIDATE_UNRET_END
- ALTERNATIVE_3 "", \
- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
-diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
-index e3bae2b60a0db..ef2844d691735 100644
---- a/arch/x86/include/asm/numa.h
-+++ b/arch/x86/include/asm/numa.h
-@@ -12,13 +12,6 @@
-
- #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-
--/*
-- * Too small node sizes may confuse the VM badly. Usually they
-- * result from BIOS bugs. So dont recognize nodes as standalone
-- * NUMA entities that have less than this amount of RAM listed:
-- */
--#define NODE_MIN_SIZE (4*1024*1024)
--
- extern int numa_off;
-
- /*
-diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
-index 64df897c0ee30..1be13b2dfe8bf 100644
---- a/arch/x86/include/asm/sparsemem.h
-+++ b/arch/x86/include/asm/sparsemem.h
-@@ -37,6 +37,8 @@ extern int phys_to_target_node(phys_addr_t start);
- #define phys_to_target_node phys_to_target_node
- extern int memory_add_physaddr_to_nid(u64 start);
- #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
-+extern int numa_fill_memblks(u64 start, u64 end);
-+#define numa_fill_memblks numa_fill_memblks
- #endif
- #endif /* __ASSEMBLY__ */
-
-diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
-index 8bae40a662827..5c367c1290c35 100644
---- a/arch/x86/include/asm/uaccess.h
-+++ b/arch/x86/include/asm/uaccess.h
-@@ -496,7 +496,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
- #define copy_mc_to_kernel copy_mc_to_kernel
-
- unsigned long __must_check
--copy_mc_to_user(void *to, const void *from, unsigned len);
-+copy_mc_to_user(void __user *to, const void *from, unsigned len);
- #endif
-
- /*
-diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
-index 7048dfacc04b2..a9088250770f2 100644
---- a/arch/x86/include/asm/xen/hypervisor.h
-+++ b/arch/x86/include/asm/xen/hypervisor.h
-@@ -100,4 +100,13 @@ static inline void leave_lazy(enum xen_lazy_mode mode)
-
- enum xen_lazy_mode xen_get_lazy_mode(void);
-
-+#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI)
-+void xen_sanitize_proc_cap_bits(uint32_t *buf);
-+#else
-+static inline void xen_sanitize_proc_cap_bits(uint32_t *buf)
-+{
-+ BUG();
-+}
-+#endif
-+
- #endif /* _ASM_X86_XEN_HYPERVISOR_H */
-diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
-index 356de955e78dd..cab4d8b1535d6 100644
---- a/arch/x86/kernel/amd_nb.c
-+++ b/arch/x86/kernel/amd_nb.c
-@@ -112,6 +112,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
-+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
-+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
-+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
-diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
-index 760adac3d1a82..3cdf48493546d 100644
---- a/arch/x86/kernel/apic/apic.c
-+++ b/arch/x86/kernel/apic/apic.c
-@@ -36,6 +36,8 @@
- #include <linux/smp.h>
- #include <linux/mm.h>
-
-+#include <xen/xen.h>
-+
- #include <asm/trace/irq_vectors.h>
- #include <asm/irq_remapping.h>
- #include <asm/pc-conf-reg.h>
-@@ -2344,6 +2346,15 @@ static int __init smp_init_primary_thread_mask(void)
- {
- unsigned int cpu;
-
-+ /*
-+ * XEN/PV provides either none or useless topology information.
-+ * Pretend that all vCPUs are primary threads.
-+ */
-+ if (xen_pv_domain()) {
-+ cpumask_copy(&__cpu_primary_thread_mask, cpu_possible_mask);
-+ return 0;
-+ }
-+
- for (cpu = 0; cpu < nr_logical_cpuids; cpu++)
- cpu_mark_primary_thread(cpu, cpuid_to_apicid[cpu]);
- return 0;
-diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
-index 6b6b711678fe0..d9651f15ae4f7 100644
---- a/arch/x86/kernel/apic/msi.c
-+++ b/arch/x86/kernel/apic/msi.c
-@@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
- * caused by the non-atomic update of the address/data pair.
- *
- * Direct update is possible when:
-- * - The MSI is maskable (remapped MSI does not use this code path)).
-- * The quirk bit is not set in this case.
-+ * - The MSI is maskable (remapped MSI does not use this code path).
-+ * The reservation mode bit is set in this case.
- * - The new vector is the same as the old vector
- * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
- * - The interrupt is not yet started up
- * - The new destination CPU is the same as the old destination CPU
- */
-- if (!irqd_msi_nomask_quirk(irqd) ||
-+ if (!irqd_can_reserve(irqd) ||
- cfg->vector == old_cfg.vector ||
- old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
- !irqd_is_started(irqd) ||
-@@ -215,8 +215,6 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
- if (WARN_ON_ONCE(domain != real_parent))
- return false;
- info->chip->irq_set_affinity = msi_set_affinity;
-- /* See msi_set_affinity() for the gory details */
-- info->flags |= MSI_FLAG_NOMASK_QUIRK;
- break;
- case DOMAIN_BUS_DMAR:
- case DOMAIN_BUS_AMDVI:
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index 10499bcd4e396..0bc55472f303a 100644
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -2353,6 +2353,8 @@ early_param("l1tf", l1tf_cmdline);
-
- enum srso_mitigation {
- SRSO_MITIGATION_NONE,
-+ SRSO_MITIGATION_UCODE_NEEDED,
-+ SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
- SRSO_MITIGATION_MICROCODE,
- SRSO_MITIGATION_SAFE_RET,
- SRSO_MITIGATION_IBPB,
-@@ -2368,11 +2370,13 @@ enum srso_mitigation_cmd {
- };
-
- static const char * const srso_strings[] = {
-- [SRSO_MITIGATION_NONE] = "Vulnerable",
-- [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode",
-- [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET",
-- [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
-- [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
-+ [SRSO_MITIGATION_NONE] = "Vulnerable",
-+ [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
-+ [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
-+ [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
-+ [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
-+ [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
-+ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
- };
-
- static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
-@@ -2409,10 +2413,7 @@ static void __init srso_select_mitigation(void)
- if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
- goto pred_cmd;
-
-- if (!has_microcode) {
-- pr_warn("IBPB-extending microcode not applied!\n");
-- pr_warn(SRSO_NOTICE);
-- } else {
-+ if (has_microcode) {
- /*
- * Zen1/2 with SMT off aren't vulnerable after the right
- * IBPB microcode has been applied.
-@@ -2421,14 +2422,17 @@ static void __init srso_select_mitigation(void)
- setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
- return;
- }
-- }
-
-- if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
-- if (has_microcode) {
-- pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
-+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
- srso_mitigation = SRSO_MITIGATION_IBPB;
-- goto pred_cmd;
-+ goto out;
- }
-+ } else {
-+ pr_warn("IBPB-extending microcode not applied!\n");
-+ pr_warn(SRSO_NOTICE);
-+
-+ /* may be overwritten by SRSO_CMD_SAFE_RET below */
-+ srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
- }
-
- switch (srso_cmd) {
-@@ -2458,7 +2462,10 @@ static void __init srso_select_mitigation(void)
- setup_force_cpu_cap(X86_FEATURE_SRSO);
- x86_return_thunk = srso_return_thunk;
- }
-- srso_mitigation = SRSO_MITIGATION_SAFE_RET;
-+ if (has_microcode)
-+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
-+ else
-+ srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
- } else {
- pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
- goto pred_cmd;
-@@ -2493,10 +2500,11 @@ static void __init srso_select_mitigation(void)
- break;
- }
-
-- pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
-+out:
-+ pr_info("%s\n", srso_strings[srso_mitigation]);
-
- pred_cmd:
-- if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
-+ if ((!boot_cpu_has_bug(X86_BUG_SRSO) || srso_cmd == SRSO_CMD_OFF) &&
- boot_cpu_has(X86_FEATURE_SBPB))
- x86_pred_cmd = PRED_CMD_SBPB;
- }
-@@ -2704,9 +2712,7 @@ static ssize_t srso_show_state(char *buf)
- if (boot_cpu_has(X86_FEATURE_SRSO_NO))
- return sysfs_emit(buf, "Mitigation: SMT disabled\n");
-
-- return sysfs_emit(buf, "%s%s\n",
-- srso_strings[srso_mitigation],
-- boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
-+ return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
- }
-
- static ssize_t gds_show_state(char *buf)
-diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
-index defdc594be14d..a7b3ef4c4de91 100644
---- a/arch/x86/kernel/cpu/hygon.c
-+++ b/arch/x86/kernel/cpu/hygon.c
-@@ -87,8 +87,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
- if (!err)
- c->x86_coreid_bits = get_count_order(c->x86_max_cores);
-
-- /* Socket ID is ApicId[6] for these processors. */
-- c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
-+ /*
-+ * Socket ID is ApicId[6] for the processors with model <= 0x3
-+ * when running on host.
-+ */
-+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
-+ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
-
- cacheinfo_hygon_init_llc_id(c, cpu);
- } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
-diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
-index 49f7629b17f73..bbc21798df10e 100644
---- a/arch/x86/kernel/head64.c
-+++ b/arch/x86/kernel/head64.c
-@@ -80,7 +80,7 @@ static struct desc_struct startup_gdt[GDT_ENTRIES] = {
- * while the kernel still uses a direct mapping.
- */
- static struct desc_ptr startup_gdt_descr = {
-- .size = sizeof(startup_gdt),
-+ .size = sizeof(startup_gdt)-1,
- .address = 0,
- };
-
-diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
-index a0c551846b35f..4766b6bed4439 100644
---- a/arch/x86/kernel/nmi.c
-+++ b/arch/x86/kernel/nmi.c
-@@ -507,12 +507,13 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
- }
- this_cpu_write(nmi_state, NMI_EXECUTING);
- this_cpu_write(nmi_cr2, read_cr2());
-+
-+nmi_restart:
- if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
- WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
- WARN_ON_ONCE(!(nsp->idt_seq & 0x1));
- WRITE_ONCE(nsp->recv_jiffies, jiffies);
- }
--nmi_restart:
-
- /*
- * Needs to happen before DR7 is accessed, because the hypervisor can
-@@ -548,16 +549,16 @@ nmi_restart:
-
- if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
- write_cr2(this_cpu_read(nmi_cr2));
-- if (this_cpu_dec_return(nmi_state))
-- goto nmi_restart;
--
-- if (user_mode(regs))
-- mds_user_clear_cpu_buffers();
- if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) {
- WRITE_ONCE(nsp->idt_seq, nsp->idt_seq + 1);
- WARN_ON_ONCE(nsp->idt_seq & 0x1);
- WRITE_ONCE(nsp->recv_jiffies, jiffies);
- }
-+ if (this_cpu_dec_return(nmi_state))
-+ goto nmi_restart;
-+
-+ if (user_mode(regs))
-+ mds_user_clear_cpu_buffers();
- }
-
- #if IS_ENABLED(CONFIG_KVM_INTEL)
-diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
-index cacf2ede62175..23d8aaf8d9fd1 100644
---- a/arch/x86/kernel/signal_64.c
-+++ b/arch/x86/kernel/signal_64.c
-@@ -175,9 +175,6 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
- frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp);
- uc_flags = frame_uc_flags(regs);
-
-- if (setup_signal_shadow_stack(ksig))
-- return -EFAULT;
--
- if (!user_access_begin(frame, sizeof(*frame)))
- return -EFAULT;
-
-@@ -198,6 +195,9 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
- return -EFAULT;
- }
-
-+ if (setup_signal_shadow_stack(ksig))
-+ return -EFAULT;
-+
- /* Set up registers for signal handler */
- regs->di = ksig->sig;
- /* In case the signal handler was declared without prototypes */
-diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
-index 7c2dac6824e26..238afd7335e46 100644
---- a/arch/x86/kvm/hyperv.c
-+++ b/arch/x86/kvm/hyperv.c
-@@ -727,10 +727,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
-
- stimer_cleanup(stimer);
- stimer->count = count;
-- if (stimer->count == 0)
-- stimer->config.enable = 0;
-- else if (stimer->config.auto_enable)
-- stimer->config.enable = 1;
-+ if (!host) {
-+ if (stimer->count == 0)
-+ stimer->config.enable = 0;
-+ else if (stimer->config.auto_enable)
-+ stimer->config.enable = 1;
-+ }
-
- if (stimer->config.enable)
- stimer_mark_pending(stimer, false);
-diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 3e977dbbf9933..245b20973caee 100644
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -2444,22 +2444,22 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
- void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
- {
- struct kvm_lapic *apic = vcpu->arch.apic;
-- u64 val;
-
- /*
-- * ICR is a single 64-bit register when x2APIC is enabled. For legacy
-- * xAPIC, ICR writes need to go down the common (slightly slower) path
-- * to get the upper half from ICR2.
-+ * ICR is a single 64-bit register when x2APIC is enabled, all others
-+ * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
-+ * go down the common path to get the upper half from ICR2.
-+ *
-+ * Note, using the write helpers may incur an unnecessary write to the
-+ * virtual APIC state, but KVM needs to conditionally modify the value
-+ * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
-+ * conditional branches is likely a wash relative to the cost of the
-+ * maybe-unecessary write, and both are in the noise anyways.
- */
-- if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
-- val = kvm_lapic_get_reg64(apic, APIC_ICR);
-- kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
-- trace_kvm_apic_write(APIC_ICR, val);
-- } else {
-- /* TODO: optimize to just emulate side effect w/o one more write */
-- val = kvm_lapic_get_reg(apic, offset);
-- kvm_lapic_reg_write(apic, offset, (u32)val);
-- }
-+ if (apic_x2apic_mode(apic) && offset == APIC_ICR)
-+ kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
-+ else
-+ kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
- }
- EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
-
-@@ -2670,6 +2670,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
- u64 msr_val;
- int i;
-
-+ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
-+
- if (!init_event) {
- msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_reset_bsp(vcpu))
-@@ -2981,6 +2983,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
- struct kvm_lapic *apic = vcpu->arch.apic;
- int r;
-
-+ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
-+
- kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
- /* set SPIV separately to get count of SW disabled APICs right */
- apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
-diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
-index 72e3943f36935..9bba5352582c3 100644
---- a/arch/x86/kvm/vmx/vmx.c
-+++ b/arch/x86/kvm/vmx/vmx.c
-@@ -6912,7 +6912,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
- vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
- }
-
--static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
-+static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
-@@ -8286,7 +8286,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
- .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
- .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
- .load_eoi_exitmap = vmx_load_eoi_exitmap,
-- .apicv_post_state_restore = vmx_apicv_post_state_restore,
-+ .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
- .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
- .hwapic_irr_update = vmx_hwapic_irr_update,
- .hwapic_isr_update = vmx_hwapic_isr_update,
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 41cce5031126a..e179db7c17dad 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -3641,6 +3641,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
- case MSR_AMD64_PATCH_LOADER:
- case MSR_AMD64_BU_CFG2:
- case MSR_AMD64_DC_CFG:
-+ case MSR_AMD64_TW_CFG:
- case MSR_F15H_EX_CFG:
- break;
-
-@@ -4065,6 +4066,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
- case MSR_AMD64_BU_CFG2:
- case MSR_IA32_PERF_CTL:
- case MSR_AMD64_DC_CFG:
-+ case MSR_AMD64_TW_CFG:
- case MSR_F15H_EX_CFG:
- /*
- * Intel Sandy Bridge CPUs must support the RAPL (running average power
-diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
-index 80efd45a77617..6e8b7e600def5 100644
---- a/arch/x86/lib/copy_mc.c
-+++ b/arch/x86/lib/copy_mc.c
-@@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
- }
- EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
-
--unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
-+unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
- {
- unsigned long ret;
-
- if (copy_mc_fragile_enabled) {
- __uaccess_begin();
-- ret = copy_mc_fragile(dst, src, len);
-+ ret = copy_mc_fragile((__force void *)dst, src, len);
- __uaccess_end();
- return ret;
- }
-
- if (static_cpu_has(X86_FEATURE_ERMS)) {
- __uaccess_begin();
-- ret = copy_mc_enhanced_fast_string(dst, src, len);
-+ ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
- __uaccess_end();
- return ret;
- }
-
-- return copy_user_generic(dst, src, len);
-+ return copy_user_generic((__force void *)dst, src, len);
- }
-diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
-index 5a53c2cc169cc..6993f026adec9 100644
---- a/arch/x86/mm/maccess.c
-+++ b/arch/x86/mm/maccess.c
-@@ -9,12 +9,21 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
- unsigned long vaddr = (unsigned long)unsafe_src;
-
- /*
-- * Range covering the highest possible canonical userspace address
-- * as well as non-canonical address range. For the canonical range
-- * we also need to include the userspace guard page.
-+ * Do not allow userspace addresses. This disallows
-+ * normal userspace and the userspace guard page:
- */
-- return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
-- __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
-+ if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
-+ return false;
-+
-+ /*
-+ * Allow everything during early boot before 'x86_virt_bits'
-+ * is initialized. Needed for instruction decoding in early
-+ * exception handlers.
-+ */
-+ if (!boot_cpu_data.x86_virt_bits)
-+ return true;
-+
-+ return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
- }
- #else
- bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
-diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
-index 2aadb2019b4f2..aa39d678fe81d 100644
---- a/arch/x86/mm/numa.c
-+++ b/arch/x86/mm/numa.c
-@@ -11,6 +11,7 @@
- #include <linux/nodemask.h>
- #include <linux/sched.h>
- #include <linux/topology.h>
-+#include <linux/sort.h>
-
- #include <asm/e820/api.h>
- #include <asm/proto.h>
-@@ -601,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
- if (start >= end)
- continue;
-
-- /*
-- * Don't confuse VM with a node that doesn't have the
-- * minimum amount of memory:
-- */
-- if (end && (end - start) < NODE_MIN_SIZE)
-- continue;
--
- alloc_node_data(nid);
- }
-
-@@ -961,4 +955,83 @@ int memory_add_physaddr_to_nid(u64 start)
- return nid;
- }
- EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-+
-+static int __init cmp_memblk(const void *a, const void *b)
-+{
-+ const struct numa_memblk *ma = *(const struct numa_memblk **)a;
-+ const struct numa_memblk *mb = *(const struct numa_memblk **)b;
-+
-+ return ma->start - mb->start;
-+}
-+
-+static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
-+
-+/**
-+ * numa_fill_memblks - Fill gaps in numa_meminfo memblks
-+ * @start: address to begin fill
-+ * @end: address to end fill
-+ *
-+ * Find and extend numa_meminfo memblks to cover the @start-@end
-+ * physical address range, such that the first memblk includes
-+ * @start, the last memblk includes @end, and any gaps in between
-+ * are filled.
-+ *
-+ * RETURNS:
-+ * 0 : Success
-+ * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
-+ */
-+
-+int __init numa_fill_memblks(u64 start, u64 end)
-+{
-+ struct numa_memblk **blk = &numa_memblk_list[0];
-+ struct numa_meminfo *mi = &numa_meminfo;
-+ int count = 0;
-+ u64 prev_end;
-+
-+ /*
-+ * Create a list of pointers to numa_meminfo memblks that
-+ * overlap start, end. Exclude (start == bi->end) since
-+ * end addresses in both a CFMWS range and a memblk range
-+ * are exclusive.
-+ *
-+ * This list of pointers is used to make in-place changes
-+ * that fill out the numa_meminfo memblks.
-+ */
-+ for (int i = 0; i < mi->nr_blks; i++) {
-+ struct numa_memblk *bi = &mi->blk[i];
-+
-+ if (start < bi->end && end >= bi->start) {
-+ blk[count] = &mi->blk[i];
-+ count++;
-+ }
-+ }
-+ if (!count)
-+ return NUMA_NO_MEMBLK;
-+
-+ /* Sort the list of pointers in memblk->start order */
-+ sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
-+
-+ /* Make sure the first/last memblks include start/end */
-+ blk[0]->start = min(blk[0]->start, start);
-+ blk[count - 1]->end = max(blk[count - 1]->end, end);
-+
-+ /*
-+ * Fill any gaps by tracking the previous memblks
-+ * end address and backfilling to it if needed.
-+ */
-+ prev_end = blk[0]->end;
-+ for (int i = 1; i < count; i++) {
-+ struct numa_memblk *curr = blk[i];
-+
-+ if (prev_end >= curr->start) {
-+ if (prev_end < curr->end)
-+ prev_end = curr->end;
-+ } else {
-+ curr->start = prev_end;
-+ prev_end = curr->end;
-+ }
-+ }
-+ return 0;
-+}
-+
- #endif
-diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index a5930042139d3..52f36c48c1b9e 100644
---- a/arch/x86/net/bpf_jit_comp.c
-+++ b/arch/x86/net/bpf_jit_comp.c
-@@ -1018,6 +1018,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
-
- #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
-
-+/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
-+#define RESTORE_TAIL_CALL_CNT(stack) \
-+ EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
-+
- static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
- int oldproglen, struct jit_context *ctx, bool jmp_padding)
- {
-@@ -1623,9 +1627,7 @@ st: if (is_imm8(insn->off))
-
- func = (u8 *) __bpf_call_base + imm32;
- if (tail_call_reachable) {
-- /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
-- EMIT3_off32(0x48, 0x8B, 0x85,
-- -round_up(bpf_prog->aux->stack_depth, 8) - 8);
-+ RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
- if (!imm32)
- return -EINVAL;
- offs = 7 + x86_call_depth_emit_accounting(&prog, func);
-@@ -2400,6 +2402,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
- * [ ... ]
- * [ stack_arg2 ]
- * RBP - arg_stack_off [ stack_arg1 ]
-+ * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
- */
-
- /* room for return value of orig_call or fentry prog */
-@@ -2464,6 +2467,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
- else
- /* sub rsp, stack_size */
- EMIT4(0x48, 0x83, 0xEC, stack_size);
-+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
-+ EMIT1(0x50); /* push rax */
- /* mov QWORD PTR [rbp - rbx_off], rbx */
- emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
-
-@@ -2516,9 +2521,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
- restore_regs(m, &prog, regs_off);
- save_args(m, &prog, arg_stack_off, true);
-
-+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
-+ /* Before calling the original function, restore the
-+ * tail_call_cnt from stack to rax.
-+ */
-+ RESTORE_TAIL_CALL_CNT(stack_size);
-+
- if (flags & BPF_TRAMP_F_ORIG_STACK) {
-- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
-- EMIT2(0xff, 0xd0); /* call *rax */
-+ emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
-+ EMIT2(0xff, 0xd3); /* call *rbx */
- } else {
- /* call original function */
- if (emit_rsb_call(&prog, orig_call, prog)) {
-@@ -2569,7 +2580,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
- ret = -EINVAL;
- goto cleanup;
- }
-- }
-+ } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
-+ /* Before running the original function, restore the
-+ * tail_call_cnt from stack to rax.
-+ */
-+ RESTORE_TAIL_CALL_CNT(stack_size);
-+
- /* restore return value of orig_call or fentry prog back into RAX */
- if (save_ret)
- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
-diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
-index e3ec02e6ac9fe..f347c20247d30 100644
---- a/arch/x86/pci/fixup.c
-+++ b/arch/x86/pci/fixup.c
-@@ -3,9 +3,11 @@
- * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
- */
-
-+#include <linux/bitfield.h>
- #include <linux/delay.h>
- #include <linux/dmi.h>
- #include <linux/pci.h>
-+#include <linux/suspend.h>
- #include <linux/vgaarb.h>
- #include <asm/amd_nb.h>
- #include <asm/hpet.h>
-@@ -904,3 +906,60 @@ static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
- }
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
- DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
-+
-+#ifdef CONFIG_SUSPEND
-+/*
-+ * Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
-+ * if the SoC is put into a hardware sleep state by the amd-pmc driver, the
-+ * Root Ports don't generate wakeup interrupts for USB devices.
-+ *
-+ * When suspending, remove D3hot and D3cold from the PME_Support advertised
-+ * by the Root Port so we don't use those states if we're expecting wakeup
-+ * interrupts. Restore the advertised PME_Support when resuming.
-+ */
-+static void amd_rp_pme_suspend(struct pci_dev *dev)
-+{
-+ struct pci_dev *rp;
-+
-+ /*
-+ * PM_SUSPEND_ON means we're doing runtime suspend, which means
-+ * amd-pmc will not be involved so PMEs during D3 work as advertised.
-+ *
-+ * The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
-+ * sleep state, but we assume amd-pmc is always present.
-+ */
-+ if (pm_suspend_target_state == PM_SUSPEND_ON)
-+ return;
-+
-+ rp = pcie_find_root_port(dev);
-+ if (!rp->pm_cap)
-+ return;
-+
-+ rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
-+ PCI_PM_CAP_PME_SHIFT);
-+ dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
-+}
-+
-+static void amd_rp_pme_resume(struct pci_dev *dev)
-+{
-+ struct pci_dev *rp;
-+ u16 pmc;
-+
-+ rp = pcie_find_root_port(dev);
-+ if (!rp->pm_cap)
-+ return;
-+
-+ pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
-+ rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
-+}
-+/* Rembrandt (yellow_carp) */
-+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend);
-+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume);
-+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend);
-+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume);
-+/* Phoenix (pink_sardine) */
-+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
-+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
-+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
-+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
-+#endif /* CONFIG_SUSPEND */
-diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 0337392a31214..3c61bb98c10e2 100644
---- a/arch/x86/xen/enlighten.c
-+++ b/arch/x86/xen/enlighten.c
-@@ -33,9 +33,12 @@ EXPORT_SYMBOL_GPL(hypercall_page);
- * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
- * but during boot it is switched to point to xen_vcpu_info.
- * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
-+ * Make sure that xen_vcpu_info doesn't cross a page boundary by making it
-+ * cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
-+ * which matches the cache line size of 64-bit x86 processors).
- */
- DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
--DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
-+DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
-
- /* Linux <-> Xen vCPU id mapping */
- DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
-@@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
- int err;
- struct vcpu_info *vcpup;
-
-+ BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
- BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
-
- /*
-diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
-index 408a2aa66c692..a87ab36889e76 100644
---- a/arch/x86/xen/xen-ops.h
-+++ b/arch/x86/xen/xen-ops.h
-@@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
- struct trap_info;
- void xen_copy_trap_info(struct trap_info *traps);
-
--DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
-+DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
- DECLARE_PER_CPU(unsigned long, xen_cr3);
- DECLARE_PER_CPU(unsigned long, xen_current_cr3);
-
-diff --git a/block/bdev.c b/block/bdev.c
-index f3b13aa1b7d42..04dba25b0019e 100644
---- a/block/bdev.c
-+++ b/block/bdev.c
-@@ -425,6 +425,8 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
-
- void bdev_add(struct block_device *bdev, dev_t dev)
- {
-+ if (bdev_stable_writes(bdev))
-+ mapping_set_stable_writes(bdev->bd_inode->i_mapping);
- bdev->bd_dev = dev;
- bdev->bd_inode->i_rdev = dev;
- bdev->bd_inode->i_ino = dev;
-diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
-index 624c03c8fe64e..fd482439afbc9 100644
---- a/block/blk-cgroup.h
-+++ b/block/blk-cgroup.h
-@@ -249,8 +249,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
- {
- struct blkcg_gq *blkg;
-
-- WARN_ON_ONCE(!rcu_read_lock_held());
--
- if (blkcg == &blkcg_root)
- return q->root_blkg;
-
-diff --git a/block/blk-core.c b/block/blk-core.c
-index 9d51e9894ece7..fdf25b8d6e784 100644
---- a/block/blk-core.c
-+++ b/block/blk-core.c
-@@ -501,8 +501,8 @@ static inline void bio_check_ro(struct bio *bio)
- if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
- if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
- return;
-- pr_warn("Trying to write to read-only block-device %pg\n",
-- bio->bi_bdev);
-+ pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
-+ bio->bi_bdev);
- /* Older lvm-tools actually trigger this */
- }
- }
-diff --git a/block/blk-mq.c b/block/blk-mq.c
-index 1fafd54dce3cb..6ab7f360ff2ac 100644
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -2875,11 +2875,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
- };
- struct request *rq;
-
-- if (unlikely(bio_queue_enter(bio)))
-- return NULL;
--
- if (blk_mq_attempt_bio_merge(q, bio, nsegs))
-- goto queue_exit;
-+ return NULL;
-
- rq_qos_throttle(q, bio);
-
-@@ -2895,35 +2892,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
- rq_qos_cleanup(q, bio);
- if (bio->bi_opf & REQ_NOWAIT)
- bio_wouldblock_error(bio);
--queue_exit:
-- blk_queue_exit(q);
- return NULL;
- }
-
--static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
-- struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
-+/* return true if this @rq can be used for @bio */
-+static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
-+ struct bio *bio)
- {
-- struct request *rq;
-- enum hctx_type type, hctx_type;
-+ enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
-+ enum hctx_type hctx_type = rq->mq_hctx->type;
-
-- if (!plug)
-- return NULL;
-- rq = rq_list_peek(&plug->cached_rq);
-- if (!rq || rq->q != q)
-- return NULL;
-+ WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
-
-- if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
-- *bio = NULL;
-- return NULL;
-- }
--
-- type = blk_mq_get_hctx_type((*bio)->bi_opf);
-- hctx_type = rq->mq_hctx->type;
- if (type != hctx_type &&
- !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
-- return NULL;
-- if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
-- return NULL;
-+ return false;
-+ if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
-+ return false;
-
- /*
- * If any qos ->throttle() end up blocking, we will have flushed the
-@@ -2931,12 +2916,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
- * before we throttle.
- */
- plug->cached_rq = rq_list_next(rq);
-- rq_qos_throttle(q, *bio);
-+ rq_qos_throttle(rq->q, bio);
-
- blk_mq_rq_time_init(rq, 0);
-- rq->cmd_flags = (*bio)->bi_opf;
-+ rq->cmd_flags = bio->bi_opf;
- INIT_LIST_HEAD(&rq->queuelist);
-- return rq;
-+ return true;
- }
-
- static void bio_set_ioprio(struct bio *bio)
-@@ -2966,7 +2951,7 @@ void blk_mq_submit_bio(struct bio *bio)
- struct blk_plug *plug = blk_mq_plug(bio);
- const int is_sync = op_is_sync(bio->bi_opf);
- struct blk_mq_hw_ctx *hctx;
-- struct request *rq;
-+ struct request *rq = NULL;
- unsigned int nr_segs = 1;
- blk_status_t ret;
-
-@@ -2977,20 +2962,36 @@ void blk_mq_submit_bio(struct bio *bio)
- return;
- }
-
-- if (!bio_integrity_prep(bio))
-- return;
--
- bio_set_ioprio(bio);
-
-- rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
-- if (!rq) {
-- if (!bio)
-+ if (plug) {
-+ rq = rq_list_peek(&plug->cached_rq);
-+ if (rq && rq->q != q)
-+ rq = NULL;
-+ }
-+ if (rq) {
-+ if (!bio_integrity_prep(bio))
- return;
-- rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
-- if (unlikely(!rq))
-+ if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
- return;
-+ if (blk_mq_can_use_cached_rq(rq, plug, bio))
-+ goto done;
-+ percpu_ref_get(&q->q_usage_counter);
-+ } else {
-+ if (unlikely(bio_queue_enter(bio)))
-+ return;
-+ if (!bio_integrity_prep(bio))
-+ goto fail;
-+ }
-+
-+ rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
-+ if (unlikely(!rq)) {
-+fail:
-+ blk_queue_exit(q);
-+ return;
- }
-
-+done:
- trace_block_getrq(bio);
-
- rq_qos_track(q, rq, bio);
-diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
-index 1ef3b46d6f6e5..59ec726b7c770 100644
---- a/crypto/asymmetric_keys/Kconfig
-+++ b/crypto/asymmetric_keys/Kconfig
-@@ -76,7 +76,7 @@ config SIGNED_PE_FILE_VERIFICATION
- signed PE binary.
-
- config FIPS_SIGNATURE_SELFTEST
-- bool "Run FIPS selftests on the X.509+PKCS7 signature verification"
-+ tristate "Run FIPS selftests on the X.509+PKCS7 signature verification"
- help
- This option causes some selftests to be run on the signature
- verification code, using some built in data. This is required
-@@ -84,5 +84,6 @@ config FIPS_SIGNATURE_SELFTEST
- depends on KEYS
- depends on ASYMMETRIC_KEY_TYPE
- depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER
-+ depends on X509_CERTIFICATE_PARSER
-
- endif # ASYMMETRIC_KEY_TYPE
-diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
-index 0d1fa1b692c6b..1a273d6df3ebf 100644
---- a/crypto/asymmetric_keys/Makefile
-+++ b/crypto/asymmetric_keys/Makefile
-@@ -22,7 +22,8 @@ x509_key_parser-y := \
- x509_cert_parser.o \
- x509_loader.o \
- x509_public_key.o
--x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o
-+obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o
-+x509_selftest-y += selftest.o
-
- $(obj)/x509_cert_parser.o: \
- $(obj)/x509.asn1.h \
-diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c
-index fa0bf7f242849..c50da7ef90ae9 100644
---- a/crypto/asymmetric_keys/selftest.c
-+++ b/crypto/asymmetric_keys/selftest.c
-@@ -4,10 +4,11 @@
- * Written by David Howells (dhowells@redhat.com)
- */
-
--#include <linux/kernel.h>
-+#include <crypto/pkcs7.h>
- #include <linux/cred.h>
-+#include <linux/kernel.h>
- #include <linux/key.h>
--#include <crypto/pkcs7.h>
-+#include <linux/module.h>
- #include "x509_parser.h"
-
- struct certs_test {
-@@ -175,7 +176,7 @@ static const struct certs_test certs_tests[] __initconst = {
- TEST(certs_selftest_1_data, certs_selftest_1_pkcs7),
- };
-
--int __init fips_signature_selftest(void)
-+static int __init fips_signature_selftest(void)
- {
- struct key *keyring;
- int ret, i;
-@@ -222,3 +223,9 @@ int __init fips_signature_selftest(void)
- key_put(keyring);
- return 0;
- }
-+
-+late_initcall(fips_signature_selftest);
-+
-+MODULE_DESCRIPTION("X.509 self tests");
-+MODULE_AUTHOR("Red Hat, Inc.");
-+MODULE_LICENSE("GPL");
-diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
-index a299c9c56f409..97a886cbe01c3 100644
---- a/crypto/asymmetric_keys/x509_parser.h
-+++ b/crypto/asymmetric_keys/x509_parser.h
-@@ -40,15 +40,6 @@ struct x509_certificate {
- bool blacklisted;
- };
-
--/*
-- * selftest.c
-- */
--#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST
--extern int __init fips_signature_selftest(void);
--#else
--static inline int fips_signature_selftest(void) { return 0; }
--#endif
--
- /*
- * x509_cert_parser.c
- */
-diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
-index 7c71db3ac23d4..6a4f00be22fc1 100644
---- a/crypto/asymmetric_keys/x509_public_key.c
-+++ b/crypto/asymmetric_keys/x509_public_key.c
-@@ -262,15 +262,9 @@ static struct asymmetric_key_parser x509_key_parser = {
- /*
- * Module stuff
- */
--extern int __init certs_selftest(void);
- static int __init x509_key_init(void)
- {
-- int ret;
--
-- ret = register_asymmetric_key_parser(&x509_key_parser);
-- if (ret < 0)
-- return ret;
-- return fips_signature_selftest();
-+ return register_asymmetric_key_parser(&x509_key_parser);
- }
-
- static void __exit x509_key_exit(void)
-diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
-index 8c1d0ca412137..d0d954fe9d54f 100644
---- a/crypto/pcrypt.c
-+++ b/crypto/pcrypt.c
-@@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
- err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
- if (!err)
- return -EINPROGRESS;
-+ if (err == -EBUSY)
-+ return -EAGAIN;
-
- return err;
- }
-@@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
- err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
- if (!err)
- return -EINPROGRESS;
-+ if (err == -EBUSY)
-+ return -EAGAIN;
-
- return err;
- }
-diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
-index 20c4583f12b0d..31c74ca70a2e5 100644
---- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
-+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
-@@ -8149,11 +8149,11 @@ static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u
- eng_id[num_of_eng] = razwi_info[i].eng_id;
- base[num_of_eng] = razwi_info[i].rtr_ctrl;
- if (!num_of_eng)
-- str_size += snprintf(eng_name + str_size,
-+ str_size += scnprintf(eng_name + str_size,
- PSOC_RAZWI_ENG_STR_SIZE - str_size, "%s",
- razwi_info[i].eng_name);
- else
-- str_size += snprintf(eng_name + str_size,
-+ str_size += scnprintf(eng_name + str_size,
- PSOC_RAZWI_ENG_STR_SIZE - str_size, " or %s",
- razwi_info[i].eng_name);
- num_of_eng++;
-diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
-index 18be8b98e9a8b..b8010c07eec17 100644
---- a/drivers/accel/ivpu/ivpu_hw_37xx.c
-+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
-@@ -536,6 +536,16 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
- return ret;
- }
-
-+static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
-+{
-+ ivpu_boot_dpu_active_drive(vdev, false);
-+ ivpu_boot_pwr_island_isolation_drive(vdev, true);
-+ ivpu_boot_pwr_island_trickle_drive(vdev, false);
-+ ivpu_boot_pwr_island_drive(vdev, false);
-+
-+ return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
-+}
-+
- static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
- {
- u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
-@@ -625,30 +635,26 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
- ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
- ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
-
-+ ivpu_hw_read_platform(vdev);
-+ ivpu_hw_wa_init(vdev);
-+ ivpu_hw_timeouts_init(vdev);
-+
- return 0;
- }
-
- static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
- {
-- int ret;
-- u32 val;
--
-- if (IVPU_WA(punit_disabled))
-- return 0;
-+ int ret = 0;
-
-- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
-- if (ret) {
-- ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
-- return ret;
-+ if (ivpu_boot_pwr_domain_disable(vdev)) {
-+ ivpu_err(vdev, "Failed to disable power domain\n");
-+ ret = -EIO;
- }
-
-- val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
-- val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
-- REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
--
-- ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
-- if (ret)
-- ivpu_err(vdev, "Timed out waiting for RESET completion\n");
-+ if (ivpu_pll_disable(vdev)) {
-+ ivpu_err(vdev, "Failed to disable PLL\n");
-+ ret = -EIO;
-+ }
-
- return ret;
- }
-@@ -681,14 +687,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
- {
- int ret;
-
-- ivpu_hw_read_platform(vdev);
-- ivpu_hw_wa_init(vdev);
-- ivpu_hw_timeouts_init(vdev);
--
-- ret = ivpu_hw_37xx_reset(vdev);
-- if (ret)
-- ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
--
- ret = ivpu_hw_37xx_d0i3_disable(vdev);
- if (ret)
- ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
-@@ -756,11 +754,11 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
- {
- int ret = 0;
-
-- if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
-- ivpu_err(vdev, "Failed to reset the VPU\n");
-+ if (!ivpu_hw_37xx_is_idle(vdev))
-+ ivpu_warn(vdev, "VPU not idle during power down\n");
-
-- if (ivpu_pll_disable(vdev)) {
-- ivpu_err(vdev, "Failed to disable PLL\n");
-+ if (ivpu_hw_37xx_reset(vdev)) {
-+ ivpu_err(vdev, "Failed to reset VPU\n");
- ret = -EIO;
- }
-
-diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
-index 85171a408363f..7c3ff25232a2c 100644
---- a/drivers/accel/ivpu/ivpu_hw_40xx.c
-+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
-@@ -728,6 +728,10 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
- ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
- ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
-
-+ ivpu_hw_read_platform(vdev);
-+ ivpu_hw_wa_init(vdev);
-+ ivpu_hw_timeouts_init(vdev);
-+
- return 0;
- }
-
-@@ -819,10 +823,6 @@ static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
- return ret;
- }
-
-- ivpu_hw_read_platform(vdev);
-- ivpu_hw_wa_init(vdev);
-- ivpu_hw_timeouts_init(vdev);
--
- ret = ivpu_hw_40xx_d0i3_disable(vdev);
- if (ret)
- ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
-diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
-index a2056c4c8cb70..271092f2700a1 100644
---- a/drivers/acpi/acpi_fpdt.c
-+++ b/drivers/acpi/acpi_fpdt.c
-@@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
- record_header = (void *)subtable_header + offset;
- offset += record_header->length;
-
-+ if (!record_header->length) {
-+ pr_err(FW_BUG "Zero-length record found in FPTD.\n");
-+ result = -EINVAL;
-+ goto err;
-+ }
-+
- switch (record_header->type) {
- case RECORD_S3_RESUME:
- if (subtable_type != SUBTABLE_S3PT) {
- pr_err(FW_BUG "Invalid record %d for subtable %s\n",
- record_header->type, signature);
-- return -EINVAL;
-+ result = -EINVAL;
-+ goto err;
- }
- if (record_resume) {
- pr_err("Duplicate resume performance record found.\n");
-@@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
- record_resume = (struct resume_performance_record *)record_header;
- result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
- if (result)
-- return result;
-+ goto err;
- break;
- case RECORD_S3_SUSPEND:
- if (subtable_type != SUBTABLE_S3PT) {
-@@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
- record_suspend = (struct suspend_performance_record *)record_header;
- result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
- if (result)
-- return result;
-+ goto err;
- break;
- case RECORD_BOOT:
- if (subtable_type != SUBTABLE_FBPT) {
- pr_err(FW_BUG "Invalid %d for subtable %s\n",
- record_header->type, signature);
-- return -EINVAL;
-+ result = -EINVAL;
-+ goto err;
- }
- if (record_boot) {
- pr_err("Duplicate boot performance record found.\n");
-@@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
- record_boot = (struct boot_performance_record *)record_header;
- result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
- if (result)
-- return result;
-+ goto err;
- break;
-
- default:
-@@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
- }
- }
- return 0;
-+
-+err:
-+ if (record_boot)
-+ sysfs_remove_group(fpdt_kobj, &boot_attr_group);
-+
-+ if (record_suspend)
-+ sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
-+
-+ if (record_resume)
-+ sysfs_remove_group(fpdt_kobj, &resume_attr_group);
-+
-+ return result;
- }
-
- static int __init acpi_init_fpdt(void)
-@@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
- struct acpi_table_header *header;
- struct fpdt_subtable_entry *subtable;
- u32 offset = sizeof(*header);
-+ int result;
-
- status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
-
-@@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
-
- fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
- if (!fpdt_kobj) {
-- acpi_put_table(header);
-- return -ENOMEM;
-+ result = -ENOMEM;
-+ goto err_nomem;
- }
-
- while (offset < header->length) {
-@@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
- switch (subtable->type) {
- case SUBTABLE_FBPT:
- case SUBTABLE_S3PT:
-- fpdt_process_subtable(subtable->address,
-+ result = fpdt_process_subtable(subtable->address,
- subtable->type);
-+ if (result)
-+ goto err_subtable;
- break;
- default:
- /* Other types are reserved in ACPI 6.4 spec. */
-@@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
- offset += sizeof(*subtable);
- }
- return 0;
-+err_subtable:
-+ kobject_put(fpdt_kobj);
-+
-+err_nomem:
-+ acpi_put_table(header);
-+ return result;
- }
-
- fs_initcall(acpi_init_fpdt);
-diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
-index b411948594ff8..35f071ad95324 100644
---- a/drivers/acpi/acpi_video.c
-+++ b/drivers/acpi/acpi_video.c
-@@ -253,8 +253,7 @@ static const struct backlight_ops acpi_backlight_ops = {
- static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
- unsigned long *state)
- {
-- struct acpi_device *device = cooling_dev->devdata;
-- struct acpi_video_device *video = acpi_driver_data(device);
-+ struct acpi_video_device *video = cooling_dev->devdata;
-
- *state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
- return 0;
-@@ -263,8 +262,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
- static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
- unsigned long *state)
- {
-- struct acpi_device *device = cooling_dev->devdata;
-- struct acpi_video_device *video = acpi_driver_data(device);
-+ struct acpi_video_device *video = cooling_dev->devdata;
- unsigned long long level;
- int offset;
-
-@@ -283,8 +281,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
- static int
- video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
- {
-- struct acpi_device *device = cooling_dev->devdata;
-- struct acpi_video_device *video = acpi_driver_data(device);
-+ struct acpi_video_device *video = cooling_dev->devdata;
- int level;
-
- if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
-@@ -1125,7 +1122,6 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
-
- strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
-- device->driver_data = data;
-
- data->device_id = device_id;
- data->video = video;
-@@ -1747,8 +1743,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
- device->backlight->props.brightness =
- acpi_video_get_brightness(device->backlight);
-
-- device->cooling_dev = thermal_cooling_device_register("LCD",
-- device->dev, &video_cooling_ops);
-+ device->cooling_dev = thermal_cooling_device_register("LCD", device,
-+ &video_cooling_ops);
- if (IS_ERR(device->cooling_dev)) {
- /*
- * Set cooling_dev to NULL so we don't crash trying to free it.
-@@ -2031,7 +2027,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
- * HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
- * evaluated to have functional panel brightness control.
- */
-- acpi_device_fix_up_power_extended(device);
-+ acpi_device_fix_up_power_children(device);
-
- pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
- ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
-diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
-index ef59d6ea16da0..63ad0541db381 100644
---- a/drivers/acpi/apei/ghes.c
-+++ b/drivers/acpi/apei/ghes.c
-@@ -209,6 +209,20 @@ err_pool_alloc:
- return -ENOMEM;
- }
-
-+/**
-+ * ghes_estatus_pool_region_free - free previously allocated memory
-+ * from the ghes_estatus_pool.
-+ * @addr: address of memory to free.
-+ * @size: size of memory to free.
-+ *
-+ * Returns none.
-+ */
-+void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
-+{
-+ gen_pool_free(ghes_estatus_pool, addr, size);
-+}
-+EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
-+
- static int map_gen_v2(struct ghes *ghes)
- {
- return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
-@@ -564,6 +578,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
- pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- unsigned int devfn;
- int aer_severity;
-+ u8 *aer_info;
-
- devfn = PCI_DEVFN(pcie_err->device_id.device,
- pcie_err->device_id.function);
-@@ -577,11 +592,17 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
- if (gdata->flags & CPER_SEC_RESET)
- aer_severity = AER_FATAL;
-
-+ aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
-+ sizeof(struct aer_capability_regs));
-+ if (!aer_info)
-+ return;
-+ memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
-+
- aer_recover_queue(pcie_err->device_id.segment,
- pcie_err->device_id.bus,
- devfn, aer_severity,
- (struct aer_capability_regs *)
-- pcie_err->aer_info);
-+ aer_info);
- }
- #endif
- }
-diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
-index f007116a84276..3b4d048c49417 100644
---- a/drivers/acpi/device_pm.c
-+++ b/drivers/acpi/device_pm.c
-@@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev)
- }
- EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
-
-+/**
-+ * acpi_device_fix_up_power_children - Force a device's children into D0.
-+ * @adev: Parent device object whose children's power state is to be fixed up.
-+ *
-+ * Call acpi_device_fix_up_power() for @adev's children so long as they
-+ * are reported as present and enabled.
-+ */
-+void acpi_device_fix_up_power_children(struct acpi_device *adev)
-+{
-+ acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
-+}
-+EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
-+
- int acpi_device_update_power(struct acpi_device *device, int *state_p)
- {
- int state;
-diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
-index b9bbf07461992..a34d8578b3da6 100644
---- a/drivers/acpi/device_sysfs.c
-+++ b/drivers/acpi/device_sysfs.c
-@@ -158,8 +158,8 @@ static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalia
- return 0;
-
- len = snprintf(modalias, size, "acpi:");
-- if (len <= 0)
-- return len;
-+ if (len >= size)
-+ return -ENOMEM;
-
- size -= len;
-
-@@ -212,8 +212,10 @@ static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias
- len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
- ACPI_FREE(buf.pointer);
-
-- if (len <= 0)
-- return len;
-+ if (len >= size)
-+ return -ENOMEM;
-+
-+ size -= len;
-
- of_compatible = acpi_dev->data.of_compatible;
- if (of_compatible->type == ACPI_TYPE_PACKAGE) {
-diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
-index c95d0edb0be9e..a59c11df73754 100644
---- a/drivers/acpi/ec.c
-+++ b/drivers/acpi/ec.c
-@@ -1924,6 +1924,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
- },
- },
-+ {
-+ /*
-+ * HP 250 G7 Notebook PC
-+ */
-+ .callback = ec_honor_dsdt_gpe,
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
-+ },
-+ },
- {
- /*
- * Samsung hardware
-diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
-index 1f4fc5f8a819d..12f330b0eac01 100644
---- a/drivers/acpi/numa/srat.c
-+++ b/drivers/acpi/numa/srat.c
-@@ -310,11 +310,16 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
- start = cfmws->base_hpa;
- end = cfmws->base_hpa + cfmws->window_size;
-
-- /* Skip if the SRAT already described the NUMA details for this HPA */
-- node = phys_to_target_node(start);
-- if (node != NUMA_NO_NODE)
-+ /*
-+ * The SRAT may have already described NUMA details for all,
-+ * or a portion of, this CFMWS HPA range. Extend the memblks
-+ * found for any portion of the window to cover the entire
-+ * window.
-+ */
-+ if (!numa_fill_memblks(start, end))
- return 0;
-
-+ /* No SRAT description. Create a new node. */
- node = acpi_map_pxm_to_node(*fake_pxm);
-
- if (node == NUMA_NO_NODE) {
-diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
-index 3a34a8c425fe4..55437f5e0c3ae 100644
---- a/drivers/acpi/processor_idle.c
-+++ b/drivers/acpi/processor_idle.c
-@@ -592,7 +592,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
- while (1) {
-
- if (cx->entry_method == ACPI_CSTATE_HALT)
-- safe_halt();
-+ raw_safe_halt();
- else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
- io_idle(cx->address);
- } else
-diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
-index 413e4fcadcaf7..99b4e33554355 100644
---- a/drivers/acpi/property.c
-+++ b/drivers/acpi/property.c
-@@ -1102,25 +1102,26 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
- switch (proptype) {
- case DEV_PROP_STRING:
- break;
-- case DEV_PROP_U8 ... DEV_PROP_U64:
-+ default:
- if (obj->type == ACPI_TYPE_BUFFER) {
- if (nval > obj->buffer.length)
- return -EOVERFLOW;
-- break;
-+ } else {
-+ if (nval > obj->package.count)
-+ return -EOVERFLOW;
- }
-- fallthrough;
-- default:
-- if (nval > obj->package.count)
-- return -EOVERFLOW;
- break;
- }
- if (nval == 0)
- return -EINVAL;
-
-- if (obj->type != ACPI_TYPE_BUFFER)
-- items = obj->package.elements;
-- else
-+ if (obj->type == ACPI_TYPE_BUFFER) {
-+ if (proptype != DEV_PROP_U8)
-+ return -EPROTO;
- items = obj;
-+ } else {
-+ items = obj->package.elements;
-+ }
-
- switch (proptype) {
- case DEV_PROP_U8:
-diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
-index 297a88587031e..d09e3e7bcb585 100644
---- a/drivers/acpi/resource.c
-+++ b/drivers/acpi/resource.c
-@@ -446,6 +446,13 @@ static const struct dmi_system_id asus_laptop[] = {
- DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
- },
- },
-+ {
-+ /* Asus ExpertBook B1402CVA */
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
-+ },
-+ },
- {
- .ident = "Asus ExpertBook B1502CBA",
- .matches = {
-@@ -495,6 +502,18 @@ static const struct dmi_system_id maingear_laptop[] = {
- DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
- }
- },
-+ {
-+ /* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
-+ },
-+ },
-+ {
-+ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
-+ },
-+ },
- {
- .ident = "MAINGEAR Vector Pro 2 17",
- .matches = {
-diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
-index 691d4b7686ee7..1d249d0f61ae4 100644
---- a/drivers/acpi/scan.c
-+++ b/drivers/acpi/scan.c
-@@ -1568,17 +1568,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
- int err;
- const struct iommu_ops *ops;
-
-+ /* Serialise to make dev->iommu stable under our potential fwspec */
-+ mutex_lock(&iommu_probe_device_lock);
- /*
- * If we already translated the fwspec there is nothing left to do,
- * return the iommu_ops.
- */
- ops = acpi_iommu_fwspec_ops(dev);
-- if (ops)
-+ if (ops) {
-+ mutex_unlock(&iommu_probe_device_lock);
- return ops;
-+ }
-
- err = iort_iommu_configure_id(dev, id_in);
- if (err && err != -EPROBE_DEFER)
- err = viot_iommu_configure(dev);
-+ mutex_unlock(&iommu_probe_device_lock);
-
- /*
- * If we have reason to believe the IOMMU driver missed the initial
-diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
-index 442396f6ed1f9..31205fee59d4a 100644
---- a/drivers/acpi/video_detect.c
-+++ b/drivers/acpi/video_detect.c
-@@ -130,6 +130,16 @@ static int video_detect_force_native(const struct dmi_system_id *d)
- return 0;
- }
-
-+static int video_detect_portege_r100(const struct dmi_system_id *d)
-+{
-+ struct pci_dev *dev;
-+ /* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */
-+ dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL);
-+ if (dev)
-+ acpi_backlight_dmi = acpi_backlight_vendor;
-+ return 0;
-+}
-+
- static const struct dmi_system_id video_detect_dmi_table[] = {
- /*
- * Models which should use the vendor backlight interface,
-@@ -270,6 +280,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
- },
- },
-
-+ /*
-+ * Toshiba Portégé R100 has working both acpi_video and toshiba_acpi
-+ * vendor driver. But none of them gets activated as it has a VGA with
-+ * no kernel driver (Trident CyberBlade XP4m32).
-+ * The DMI strings are generic so check for the VGA chip in callback.
-+ */
-+ {
-+ .callback = video_detect_portege_r100,
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
-+ DMI_MATCH(DMI_BOARD_NAME, "Portable PC")
-+ },
-+ },
-+
- /*
- * Models which need acpi_video backlight control where the GPU drivers
- * do not call acpi_video_register_backlight() because no internal panel
-diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
-index 3a957c4da4092..4209fb39f6442 100644
---- a/drivers/ata/libata-scsi.c
-+++ b/drivers/ata/libata-scsi.c
-@@ -1055,9 +1055,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
- * Ask the sd driver to issue START STOP UNIT on runtime suspend
- * and resume and shutdown only. For system level suspend/resume,
- * devices power state is handled directly by libata EH.
-+ * Given that disks are always spun up on system resume, also
-+ * make sure that the sd driver forces runtime suspended disks
-+ * to be resumed to correctly reflect the power state of the
-+ * device.
- */
-- sdev->manage_runtime_start_stop = true;
-- sdev->manage_shutdown = true;
-+ sdev->manage_runtime_start_stop = 1;
-+ sdev->manage_shutdown = 1;
-+ sdev->force_runtime_start_on_system_start = 1;
- }
-
- /*
-diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
-index 25a63d043c8e1..0f77e04240661 100644
---- a/drivers/ata/pata_isapnp.c
-+++ b/drivers/ata/pata_isapnp.c
-@@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
- if (pnp_port_valid(idev, 1)) {
- ctl_addr = devm_ioport_map(&idev->dev,
- pnp_port_start(idev, 1), 1);
-+ if (!ctl_addr)
-+ return -ENOMEM;
-+
- ap->ioaddr.altstatus_addr = ctl_addr;
- ap->ioaddr.ctl_addr = ctl_addr;
- ap->ops = &isapnp_port_ops;
-diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
-index 3241486869530..9bba8f280a4d4 100644
---- a/drivers/atm/iphase.c
-+++ b/drivers/atm/iphase.c
-@@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
- static int reset_sar(struct atm_dev *dev)
- {
- IADEV *iadev;
-- int i, error = 1;
-+ int i, error;
- unsigned int pci[64];
-
- iadev = INPH_IA_DEV(dev);
-- for(i=0; i<64; i++)
-- if ((error = pci_read_config_dword(iadev->pci,
-- i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
-- return error;
-+ for (i = 0; i < 64; i++) {
-+ error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
-+ if (error != PCIBIOS_SUCCESSFUL)
-+ return error;
-+ }
- writel(0, iadev->reg+IPHASE5575_EXT_RESET);
-- for(i=0; i<64; i++)
-- if ((error = pci_write_config_dword(iadev->pci,
-- i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
-- return error;
-+ for (i = 0; i < 64; i++) {
-+ error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
-+ if (error != PCIBIOS_SUCCESSFUL)
-+ return error;
-+ }
- udelay(5);
- return 0;
- }
-diff --git a/drivers/base/dd.c b/drivers/base/dd.c
-index a528cec24264a..0c3725c3eefa4 100644
---- a/drivers/base/dd.c
-+++ b/drivers/base/dd.c
-@@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
- if (dev->bus && dev->bus->dma_cleanup)
- dev->bus->dma_cleanup(dev);
-
-- device_links_driver_cleanup(dev);
- device_unbind_cleanup(dev);
-+ device_links_driver_cleanup(dev);
-
- klist_remove(&dev->p->knode_driver);
- device_pm_check_callbacks(dev);
-diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
-index c5d151e9c4815..92592f944a3df 100644
---- a/drivers/base/regmap/regcache.c
-+++ b/drivers/base/regmap/regcache.c
-@@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
- return 0;
- }
-
-+static int rbtree_all(const void *key, const struct rb_node *node)
-+{
-+ return 0;
-+}
-+
- /**
- * regcache_sync - Sync the register cache with the hardware.
- *
-@@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
- unsigned int i;
- const char *name;
- bool bypass;
-+ struct rb_node *node;
-
- if (WARN_ON(map->cache_type == REGCACHE_NONE))
- return -EINVAL;
-@@ -392,6 +398,30 @@ out:
- /* Restore the bypass state */
- map->cache_bypass = bypass;
- map->no_sync_defaults = false;
-+
-+ /*
-+ * If we did any paging with cache bypassed and a cached
-+ * paging register then the register and cache state might
-+ * have gone out of sync, force writes of all the paging
-+ * registers.
-+ */
-+ rb_for_each(node, 0, &map->range_tree, rbtree_all) {
-+ struct regmap_range_node *this =
-+ rb_entry(node, struct regmap_range_node, node);
-+
-+ /* If there's nothing in the cache there's nothing to sync */
-+ ret = regcache_read(map, this->selector_reg, &i);
-+ if (ret != 0)
-+ continue;
-+
-+ ret = _regmap_write(map, this->selector_reg, i);
-+ if (ret != 0) {
-+ dev_err(map->dev, "Failed to write %x = %x: %d\n",
-+ this->selector_reg, i, ret);
-+ break;
-+ }
-+ }
-+
- map->unlock(map->lock_arg);
-
- regmap_async_complete(map);
-diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
-index f36027591e1a8..bdd80b73c3e6c 100644
---- a/drivers/base/regmap/regmap-debugfs.c
-+++ b/drivers/base/regmap/regmap-debugfs.c
-@@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file,
- name = map->dev->driver->name;
-
- ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
-- if (ret < 0) {
-+ if (ret >= PAGE_SIZE) {
- kfree(buf);
- return ret;
- }
-diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
-index 234a84ecde8b1..ea61577471994 100644
---- a/drivers/base/regmap/regmap.c
-+++ b/drivers/base/regmap/regmap.c
-@@ -1620,17 +1620,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
- }
-
- if (!map->cache_bypass && map->format.parse_val) {
-- unsigned int ival;
-+ unsigned int ival, offset;
- int val_bytes = map->format.val_bytes;
-- for (i = 0; i < val_len / val_bytes; i++) {
-- ival = map->format.parse_val(val + (i * val_bytes));
-- ret = regcache_write(map,
-- reg + regmap_get_offset(map, i),
-- ival);
-+
-+ /* Cache the last written value for noinc writes */
-+ i = noinc ? val_len - val_bytes : 0;
-+ for (; i < val_len; i += val_bytes) {
-+ ival = map->format.parse_val(val + i);
-+ offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
-+ ret = regcache_write(map, reg + offset, ival);
- if (ret) {
- dev_err(map->dev,
- "Error in caching of register: %x ret: %d\n",
-- reg + regmap_get_offset(map, i), ret);
-+ reg + offset, ret);
- return ret;
- }
- }
-diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
-index 800f131222fc8..855fdf5c3b4ea 100644
---- a/drivers/block/nbd.c
-+++ b/drivers/block/nbd.c
-@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
- struct gendisk *disk = nbd->disk;
-
- del_gendisk(disk);
-- put_disk(disk);
- blk_mq_free_tag_set(&nbd->tag_set);
-
- /*
-@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
- idr_remove(&nbd_index_idr, nbd->index);
- mutex_unlock(&nbd_index_mutex);
- destroy_workqueue(nbd->recv_workq);
-- kfree(nbd);
-+ put_disk(disk);
- }
-
- static void nbd_dev_remove_work(struct work_struct *work)
-@@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk)
- nbd_put(nbd);
- }
-
-+static void nbd_free_disk(struct gendisk *disk)
-+{
-+ struct nbd_device *nbd = disk->private_data;
-+
-+ kfree(nbd);
-+}
-+
- static const struct block_device_operations nbd_fops =
- {
- .owner = THIS_MODULE,
-@@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops =
- .release = nbd_release,
- .ioctl = nbd_ioctl,
- .compat_ioctl = nbd_ioctl,
-+ .free_disk = nbd_free_disk,
- };
-
- #if IS_ENABLED(CONFIG_DEBUG_FS)
-diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
-index 1fe011676d070..4a4b9bad551e8 100644
---- a/drivers/block/virtio_blk.c
-+++ b/drivers/block/virtio_blk.c
-@@ -1313,6 +1313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
- u16 min_io_size;
- u8 physical_block_exp, alignment_offset;
- unsigned int queue_depth;
-+ size_t max_dma_size;
-
- if (!vdev->config->get) {
- dev_err(&vdev->dev, "%s failure: config access disabled\n",
-@@ -1411,7 +1412,8 @@ static int virtblk_probe(struct virtio_device *vdev)
- /* No real sector limit. */
- blk_queue_max_hw_sectors(q, UINT_MAX);
-
-- max_size = virtio_max_dma_size(vdev);
-+ max_dma_size = virtio_max_dma_size(vdev);
-+ max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
-
- /* Host can optionally specify maximum segment size and number of
- * segments. */
-diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
-index 499f4809fcdf3..66080fae072f2 100644
---- a/drivers/bluetooth/btusb.c
-+++ b/drivers/bluetooth/btusb.c
-@@ -543,6 +543,10 @@ static const struct usb_device_id quirks_table[] = {
- BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
- BTUSB_WIDEBAND_SPEECH },
-+ { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
-+ BTUSB_WIDEBAND_SPEECH },
-+ { USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
-+ BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
- BTUSB_WIDEBAND_SPEECH },
-
-@@ -2818,6 +2822,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
- goto err_free_wc;
- }
-
-+ if (data->evt_skb == NULL)
-+ goto err_free_wc;
-+
- /* Parse and handle the return WMT event */
- wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
- if (wmt_evt->whdr.op != hdr->op) {
-diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
-index 19ad0e7886462..a617578356953 100644
---- a/drivers/bluetooth/hci_bcm4377.c
-+++ b/drivers/bluetooth/hci_bcm4377.c
-@@ -512,6 +512,7 @@ struct bcm4377_hw {
- unsigned long disable_aspm : 1;
- unsigned long broken_ext_scan : 1;
- unsigned long broken_mws_transport_config : 1;
-+ unsigned long broken_le_coded : 1;
-
- int (*send_calibration)(struct bcm4377_data *bcm4377);
- int (*send_ptb)(struct bcm4377_data *bcm4377,
-@@ -2372,6 +2373,8 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
- if (bcm4377->hw->broken_ext_scan)
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
-+ if (bcm4377->hw->broken_le_coded)
-+ set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
-
- pci_set_drvdata(pdev, bcm4377);
- hci_set_drvdata(hdev, bcm4377);
-@@ -2461,6 +2464,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
- .bar0_core2_window2 = 0x18107000,
- .has_bar0_core2_window2 = true,
- .broken_mws_transport_config = true,
-+ .broken_le_coded = true,
- .send_calibration = bcm4378_send_calibration,
- .send_ptb = bcm4378_send_ptb,
- },
-@@ -2474,6 +2478,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
- .has_bar0_core2_window2 = true,
- .clear_pciecfg_subsystem_ctrl_bit19 = true,
- .broken_mws_transport_config = true,
-+ .broken_le_coded = true,
- .send_calibration = bcm4387_send_calibration,
- .send_ptb = bcm4378_send_ptb,
- },
-diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
-index c6f181702b9a7..edbc4d3381177 100644
---- a/drivers/char/agp/parisc-agp.c
-+++ b/drivers/char/agp/parisc-agp.c
-@@ -38,7 +38,7 @@ static struct _parisc_agp_info {
-
- int lba_cap_offset;
-
-- u64 *gatt;
-+ __le64 *gatt;
- u64 gatt_entries;
-
- u64 gart_base;
-@@ -104,7 +104,7 @@ parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
- int i;
-
- for (i = 0; i < info->gatt_entries; i++) {
-- info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
-+ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
- }
-
- return 0;
-@@ -158,9 +158,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
- for (k = 0;
- k < info->io_pages_per_kpage;
- k++, j++, paddr += info->io_page_size) {
-- info->gatt[j] =
-+ info->gatt[j] = cpu_to_le64(
- parisc_agp_mask_memory(agp_bridge,
-- paddr, type);
-+ paddr, type));
- asm_io_fdc(&info->gatt[j]);
- }
- }
-@@ -184,7 +184,7 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
- io_pg_start = info->io_pages_per_kpage * pg_start;
- io_pg_count = info->io_pages_per_kpage * mem->page_count;
- for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
-- info->gatt[i] = agp_bridge->scratch_page;
-+ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
- }
-
- agp_bridge->driver->tlb_flush(mem);
-@@ -204,7 +204,8 @@ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
- pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
- pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
-
-- return cpu_to_le64(pa);
-+ /* return native (big-endian) PDIR entry */
-+ return pa;
- }
-
- static void
-@@ -251,7 +252,8 @@ static int __init
- agp_ioc_init(void __iomem *ioc_regs)
- {
- struct _parisc_agp_info *info = &parisc_agp_info;
-- u64 iova_base, *io_pdir, io_tlb_ps;
-+ u64 iova_base, io_tlb_ps;
-+ __le64 *io_pdir;
- int io_tlb_shift;
-
- printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
-diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
-index e19b0f9f48b97..4c08efe7f3753 100644
---- a/drivers/char/hw_random/bcm2835-rng.c
-+++ b/drivers/char/hw_random/bcm2835-rng.c
-@@ -70,7 +70,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
- while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
- if (!wait)
- return 0;
-- hwrng_msleep(rng, 1000);
-+ hwrng_yield(rng);
- }
-
- num_words = rng_readl(priv, RNG_STATUS) >> 24;
-diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
-index e3598ec9cfca8..420f155d251fb 100644
---- a/drivers/char/hw_random/core.c
-+++ b/drivers/char/hw_random/core.c
-@@ -678,6 +678,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
- }
- EXPORT_SYMBOL_GPL(hwrng_msleep);
-
-+long hwrng_yield(struct hwrng *rng)
-+{
-+ return wait_for_completion_interruptible_timeout(&rng->dying, 1);
-+}
-+EXPORT_SYMBOL_GPL(hwrng_yield);
-+
- static int __init hwrng_modinit(void)
- {
- int ret;
-diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
-index 12fbe80918319..159baf00a8675 100644
---- a/drivers/char/hw_random/geode-rng.c
-+++ b/drivers/char/hw_random/geode-rng.c
-@@ -58,7 +58,8 @@ struct amd_geode_priv {
-
- static int geode_rng_data_read(struct hwrng *rng, u32 *data)
- {
-- void __iomem *mem = (void __iomem *)rng->priv;
-+ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
-+ void __iomem *mem = priv->membase;
-
- *data = readl(mem + GEODE_RNG_DATA_REG);
-
-@@ -67,7 +68,8 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
-
- static int geode_rng_data_present(struct hwrng *rng, int wait)
- {
-- void __iomem *mem = (void __iomem *)rng->priv;
-+ struct amd_geode_priv *priv = (struct amd_geode_priv *)rng->priv;
-+ void __iomem *mem = priv->membase;
- int data, i;
-
- for (i = 0; i < 20; i++) {
-diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
-index e319cfa51a8a3..030186def9c69 100644
---- a/drivers/clk/clk-npcm7xx.c
-+++ b/drivers/clk/clk-npcm7xx.c
-@@ -510,7 +510,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
- return;
-
- npcm7xx_init_fail:
-- kfree(npcm7xx_clk_data->hws);
-+ kfree(npcm7xx_clk_data);
- npcm7xx_init_np_err:
- iounmap(clk_base);
- npcm7xx_init_error:
-diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
-index 2c7a830ce3080..fdec715c9ba9b 100644
---- a/drivers/clk/clk-scmi.c
-+++ b/drivers/clk/clk-scmi.c
-@@ -213,6 +213,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
- sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
- if (!sclk->info) {
- dev_dbg(dev, "invalid clock info for idx %d\n", idx);
-+ devm_kfree(dev, sclk);
- continue;
- }
-
-diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
-index f6b82e0b9703a..db3bca5f4ec9c 100644
---- a/drivers/clk/imx/Kconfig
-+++ b/drivers/clk/imx/Kconfig
-@@ -96,6 +96,7 @@ config CLK_IMX8QXP
- depends on (ARCH_MXC && ARM64) || COMPILE_TEST
- depends on IMX_SCU && HAVE_ARM_SMCCC
- select MXC_CLK_SCU
-+ select MXC_CLK
- help
- Build the driver for IMX8QXP SCU based clocks.
-
-diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
-index 1e82f72b75c67..1c95ae905eec8 100644
---- a/drivers/clk/imx/clk-imx8-acm.c
-+++ b/drivers/clk/imx/clk-imx8-acm.c
-@@ -279,8 +279,10 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev,
-
- for (i = 0; i < dev_pm->num_domains; i++) {
- dev_pm->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
-- if (IS_ERR(dev_pm->pd_dev[i]))
-- return PTR_ERR(dev_pm->pd_dev[i]);
-+ if (IS_ERR(dev_pm->pd_dev[i])) {
-+ ret = PTR_ERR(dev_pm->pd_dev[i]);
-+ goto detach_pm;
-+ }
-
- dev_pm->pd_dev_link[i] = device_link_add(dev,
- dev_pm->pd_dev[i],
-@@ -371,7 +373,7 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
- sels[i].shift, sels[i].width,
- 0, NULL, NULL);
- if (IS_ERR(hws[sels[i].clkid])) {
-- pm_runtime_disable(&pdev->dev);
-+ ret = PTR_ERR(hws[sels[i].clkid]);
- goto err_clk_register;
- }
- }
-@@ -381,12 +383,16 @@ static int imx8_acm_clk_probe(struct platform_device *pdev)
- ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
- if (ret < 0) {
- dev_err(dev, "failed to register hws for ACM\n");
-- pm_runtime_disable(&pdev->dev);
-+ goto err_clk_register;
- }
-
--err_clk_register:
-+ pm_runtime_put_sync(&pdev->dev);
-+ return 0;
-
-+err_clk_register:
- pm_runtime_put_sync(&pdev->dev);
-+ pm_runtime_disable(&pdev->dev);
-+ clk_imx_acm_detach_pm_domains(&pdev->dev, &priv->dev_pm);
-
- return ret;
- }
-diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
-index 4bd65879fcd34..f70ed231b92d6 100644
---- a/drivers/clk/imx/clk-imx8mq.c
-+++ b/drivers/clk/imx/clk-imx8mq.c
-@@ -288,8 +288,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
- void __iomem *base;
- int err;
-
-- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
-- IMX8MQ_CLK_END), GFP_KERNEL);
-+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MQ_CLK_END), GFP_KERNEL);
- if (WARN_ON(!clk_hw_data))
- return -ENOMEM;
-
-@@ -306,10 +305,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
- hws[IMX8MQ_CLK_EXT4] = imx_get_clk_hw_by_name(np, "clk_ext4");
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
-- base = of_iomap(np, 0);
-+ base = devm_of_iomap(dev, np, 0, NULL);
- of_node_put(np);
-- if (WARN_ON(!base))
-- return -ENOMEM;
-+ if (WARN_ON(IS_ERR(base))) {
-+ err = PTR_ERR(base);
-+ goto unregister_hws;
-+ }
-
- hws[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- hws[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
-@@ -395,8 +396,10 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
-
- np = dev->of_node;
- base = devm_platform_ioremap_resource(pdev, 0);
-- if (WARN_ON(IS_ERR(base)))
-- return PTR_ERR(base);
-+ if (WARN_ON(IS_ERR(base))) {
-+ err = PTR_ERR(base);
-+ goto unregister_hws;
-+ }
-
- /* CORE */
- hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000);
-diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
-index cadcbb318f5cf..4020aa4b79bf2 100644
---- a/drivers/clk/imx/clk-imx8qxp.c
-+++ b/drivers/clk/imx/clk-imx8qxp.c
-@@ -147,10 +147,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
- imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
- imx_clk_scu("adc1_clk", IMX_SC_R_ADC_1, IMX_SC_PM_CLK_PER);
- imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
-+ imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
- imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
- imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
- imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
-- imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
-
- /* Audio SS */
- imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
-diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
-index ee5c72369334f..6bbdd4705d71f 100644
---- a/drivers/clk/keystone/pll.c
-+++ b/drivers/clk/keystone/pll.c
-@@ -281,12 +281,13 @@ static void __init of_pll_div_clk_init(struct device_node *node)
-
- clk = clk_register_divider(NULL, clk_name, parent_name, 0, reg, shift,
- mask, 0, NULL);
-- if (clk) {
-- of_clk_add_provider(node, of_clk_src_simple_get, clk);
-- } else {
-+ if (IS_ERR(clk)) {
- pr_err("%s: error registering divider %s\n", __func__, clk_name);
- iounmap(reg);
-+ return;
- }
-+
-+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
- }
- CLK_OF_DECLARE(pll_divider_clock, "ti,keystone,pll-divider-clock", of_pll_div_clk_init);
-
-@@ -328,10 +329,12 @@ static void __init of_pll_mux_clk_init(struct device_node *node)
- clk = clk_register_mux(NULL, clk_name, (const char **)&parents,
- ARRAY_SIZE(parents) , 0, reg, shift, mask,
- 0, NULL);
-- if (clk)
-- of_clk_add_provider(node, of_clk_src_simple_get, clk);
-- else
-+ if (IS_ERR(clk)) {
- pr_err("%s: error registering mux %s\n", __func__, clk_name);
-+ return;
-+ }
-+
-+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
- }
- CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init);
-
-diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
-index c81f3e33ce568..12d9560eb4ba2 100644
---- a/drivers/clk/mediatek/clk-mt2701.c
-+++ b/drivers/clk/mediatek/clk-mt2701.c
-@@ -667,6 +667,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
-@@ -747,6 +749,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node)
-
- if (!infra_clk_data) {
- infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
-+ if (!infra_clk_data)
-+ return;
-
- for (i = 0; i < CLK_INFRA_NR; i++)
- infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
-@@ -774,6 +778,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
-
- if (!infra_clk_data) {
- infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
-+ if (!infra_clk_data)
-+ return -ENOMEM;
- } else {
- for (i = 0; i < CLK_INFRA_NR; i++) {
- if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
-@@ -890,6 +896,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_PERI_NR);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_gates(&pdev->dev, node, peri_clks,
- ARRAY_SIZE(peri_clks), clk_data);
-diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
-index 1f4c8d0c041ab..9c7f7407d7980 100644
---- a/drivers/clk/mediatek/clk-mt6765.c
-+++ b/drivers/clk/mediatek/clk-mt6765.c
-@@ -737,6 +737,8 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
-
-@@ -769,6 +771,8 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
- clk_data);
-@@ -807,6 +811,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
- ARRAY_SIZE(ifr_clks), clk_data);
-diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
-index 3ee2f5a2319a0..ffedb1fe3c672 100644
---- a/drivers/clk/mediatek/clk-mt6779.c
-+++ b/drivers/clk/mediatek/clk-mt6779.c
-@@ -1217,6 +1217,8 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
-
-@@ -1237,6 +1239,8 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
-diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
-index 2ebd25f0ce71d..f12d4e9ff0bba 100644
---- a/drivers/clk/mediatek/clk-mt6797.c
-+++ b/drivers/clk/mediatek/clk-mt6797.c
-@@ -390,6 +390,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
- clk_data);
-@@ -545,6 +547,8 @@ static void mtk_infrasys_init_early(struct device_node *node)
-
- if (!infra_clk_data) {
- infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
-+ if (!infra_clk_data)
-+ return;
-
- for (i = 0; i < CLK_INFRA_NR; i++)
- infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
-@@ -570,6 +574,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
-
- if (!infra_clk_data) {
- infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR);
-+ if (!infra_clk_data)
-+ return -ENOMEM;
- } else {
- for (i = 0; i < CLK_INFRA_NR; i++) {
- if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER))
-diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
-index fe714debdc9ec..1bfedc988cfe8 100644
---- a/drivers/clk/mediatek/clk-mt7629-eth.c
-+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
-@@ -77,6 +77,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_gates(&pdev->dev, node, eth_clks,
- CLK_ETH_NR_CLK, clk_data);
-@@ -100,6 +102,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++],
- CLK_SGMII_NR_CLK, clk_data);
-diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
-index 2882107d0f240..b8a1f01bc974d 100644
---- a/drivers/clk/mediatek/clk-mt7629.c
-+++ b/drivers/clk/mediatek/clk-mt7629.c
-@@ -555,6 +555,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
-@@ -579,6 +581,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
- struct clk_hw_onecell_data *clk_data;
-
- clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_gates(&pdev->dev, node, infra_clks,
- ARRAY_SIZE(infra_clks), clk_data);
-@@ -602,6 +606,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
- return PTR_ERR(base);
-
- clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
-+ if (!clk_data)
-+ return -ENOMEM;
-
- mtk_clk_register_gates(&pdev->dev, node, peri_clks,
- ARRAY_SIZE(peri_clks), clk_data);
-diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
-index a4eca5fd539c8..513ab6b1b3229 100644
---- a/drivers/clk/mediatek/clk-pll.c
-+++ b/drivers/clk/mediatek/clk-pll.c
-@@ -321,10 +321,8 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
-
- ret = clk_hw_register(NULL, &pll->hw);
-
-- if (ret) {
-- kfree(pll);
-+ if (ret)
- return ERR_PTR(ret);
-- }
-
- return &pll->hw;
- }
-@@ -340,6 +338,8 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
- return ERR_PTR(-ENOMEM);
-
- hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
-+ if (IS_ERR(hw))
-+ kfree(pll);
-
- return hw;
- }
-diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
-index 865db5202e4cf..a79b837583894 100644
---- a/drivers/clk/qcom/Kconfig
-+++ b/drivers/clk/qcom/Kconfig
-@@ -131,6 +131,7 @@ config IPQ_APSS_6018
- tristate "IPQ APSS Clock Controller"
- select IPQ_APSS_PLL
- depends on QCOM_APCS_IPC || COMPILE_TEST
-+ depends on QCOM_SMEM
- help
- Support for APSS clock controller on IPQ platforms. The
- APSS clock controller manages the Mux and enable block that feeds the
-diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
-index e170331858cc1..41279e5437a62 100644
---- a/drivers/clk/qcom/apss-ipq-pll.c
-+++ b/drivers/clk/qcom/apss-ipq-pll.c
-@@ -68,13 +68,13 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
- .fw_name = "xo",
- },
- .num_parents = 1,
-- .ops = &clk_alpha_pll_stromer_ops,
-+ .ops = &clk_alpha_pll_stromer_plus_ops,
- },
- },
- };
-
- static const struct alpha_pll_config ipq5332_pll_config = {
-- .l = 0x3e,
-+ .l = 0x2d,
- .config_ctl_val = 0x4001075b,
- .config_ctl_hi_val = 0x304,
- .main_output_mask = BIT(0),
-diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
-index e4ef645f65d1f..892f2efc1c32c 100644
---- a/drivers/clk/qcom/clk-alpha-pll.c
-+++ b/drivers/clk/qcom/clk-alpha-pll.c
-@@ -2479,3 +2479,66 @@ const struct clk_ops clk_alpha_pll_stromer_ops = {
- .set_rate = clk_alpha_pll_stromer_set_rate,
- };
- EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_ops);
-+
-+static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw,
-+ unsigned long rate,
-+ unsigned long prate)
-+{
-+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
-+ u32 l, alpha_width = pll_alpha_width(pll);
-+ int ret, pll_mode;
-+ u64 a;
-+
-+ rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
-+
-+ ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &pll_mode);
-+ if (ret)
-+ return ret;
-+
-+ regmap_write(pll->clkr.regmap, PLL_MODE(pll), 0);
-+
-+ /* Delay of 2 output clock ticks required until output is disabled */
-+ udelay(1);
-+
-+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
-+
-+ if (alpha_width > ALPHA_BITWIDTH)
-+ a <<= alpha_width - ALPHA_BITWIDTH;
-+
-+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
-+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
-+ a >> ALPHA_BITWIDTH);
-+
-+ regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL);
-+
-+ /* Wait five micro seconds or more */
-+ udelay(5);
-+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N,
-+ PLL_RESET_N);
-+
-+ /* The lock time should be less than 50 micro seconds worst case */
-+ usleep_range(50, 60);
-+
-+ ret = wait_for_pll_enable_lock(pll);
-+ if (ret) {
-+ pr_err("Wait for PLL enable lock failed [%s] %d\n",
-+ clk_hw_get_name(hw), ret);
-+ return ret;
-+ }
-+
-+ if (pll_mode & PLL_OUTCTRL)
-+ regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_OUTCTRL,
-+ PLL_OUTCTRL);
-+
-+ return 0;
-+}
-+
-+const struct clk_ops clk_alpha_pll_stromer_plus_ops = {
-+ .prepare = clk_alpha_pll_enable,
-+ .unprepare = clk_alpha_pll_disable,
-+ .is_enabled = clk_alpha_pll_is_enabled,
-+ .recalc_rate = clk_alpha_pll_recalc_rate,
-+ .determine_rate = clk_alpha_pll_stromer_determine_rate,
-+ .set_rate = clk_alpha_pll_stromer_plus_set_rate,
-+};
-+EXPORT_SYMBOL_GPL(clk_alpha_pll_stromer_plus_ops);
-diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
-index e4bd863027ab6..903fbab9b58e9 100644
---- a/drivers/clk/qcom/clk-alpha-pll.h
-+++ b/drivers/clk/qcom/clk-alpha-pll.h
-@@ -152,6 +152,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_ops;
- extern const struct clk_ops clk_alpha_pll_huayra_ops;
- extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops;
- extern const struct clk_ops clk_alpha_pll_stromer_ops;
-+extern const struct clk_ops clk_alpha_pll_stromer_plus_ops;
-
- extern const struct clk_ops clk_alpha_pll_fabia_ops;
- extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
-diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
-index e22baf3a7112a..5183c74b074f8 100644
---- a/drivers/clk/qcom/clk-rcg2.c
-+++ b/drivers/clk/qcom/clk-rcg2.c
-@@ -158,17 +158,11 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
- static unsigned long
- calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
- {
-- if (hid_div) {
-- rate *= 2;
-- rate /= hid_div + 1;
-- }
-+ if (hid_div)
-+ rate = mult_frac(rate, 2, hid_div + 1);
-
-- if (mode) {
-- u64 tmp = rate;
-- tmp *= m;
-- do_div(tmp, n);
-- rate = tmp;
-- }
-+ if (mode)
-+ rate = mult_frac(rate, m, n);
-
- return rate;
- }
-diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
-index 19dc2b71cacf0..2a3c0659b7008 100644
---- a/drivers/clk/qcom/gcc-ipq5018.c
-+++ b/drivers/clk/qcom/gcc-ipq5018.c
-@@ -128,7 +128,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
- },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -143,7 +142,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
- },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -158,7 +156,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
- },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
-index b02026f8549b2..f98591148a976 100644
---- a/drivers/clk/qcom/gcc-ipq5332.c
-+++ b/drivers/clk/qcom/gcc-ipq5332.c
-@@ -71,7 +71,6 @@ static struct clk_fixed_factor gpll0_div2 = {
- &gpll0_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -85,7 +84,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
- &gpll0_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -114,7 +112,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
- &gpll2_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -154,7 +151,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
- &gpll4_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
-index 6120fbbc5de05..f9494fa1b8716 100644
---- a/drivers/clk/qcom/gcc-ipq6018.c
-+++ b/drivers/clk/qcom/gcc-ipq6018.c
-@@ -72,7 +72,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
- &gpll0_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -86,7 +85,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
- &gpll0_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -161,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
- &gpll6_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -192,7 +189,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
- &gpll4_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -243,7 +239,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
- &gpll2_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -274,7 +269,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
- &nss_crypto_pll_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
-index 63ac2ced76bb9..b7faf12a511a1 100644
---- a/drivers/clk/qcom/gcc-ipq8074.c
-+++ b/drivers/clk/qcom/gcc-ipq8074.c
-@@ -75,7 +75,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
- &gpll0_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -121,7 +120,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
- &gpll2_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -154,7 +152,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
- &gpll4_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -188,7 +185,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
- &gpll6_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -201,7 +197,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
- &gpll6_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-@@ -266,7 +261,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
- &nss_crypto_pll_main.clkr.hw },
- .num_parents = 1,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
-- .flags = CLK_SET_RATE_PARENT,
- },
- };
-
-diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
-index 8f430367299e6..e8190108e1aef 100644
---- a/drivers/clk/qcom/gcc-ipq9574.c
-+++ b/drivers/clk/qcom/gcc-ipq9574.c
-@@ -87,7 +87,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
- &gpll0_main.clkr.hw
- },
- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_fixed_factor_ops,
- },
- };
-@@ -102,7 +101,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
- &gpll0_main.clkr.hw
- },
- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
- },
- };
-@@ -132,7 +130,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
- &gpll4_main.clkr.hw
- },
- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
- },
- };
-@@ -162,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
- &gpll2_main.clkr.hw
- },
- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_alpha_pll_postdiv_ro_ops,
- },
- };
-diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
-index 14dcc3f036683..e7b03a17514a5 100644
---- a/drivers/clk/qcom/gcc-msm8996.c
-+++ b/drivers/clk/qcom/gcc-msm8996.c
-@@ -244,71 +244,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
- { .hw = &gpll0_early_div.hw }
- };
-
--static const struct freq_tbl ftbl_system_noc_clk_src[] = {
-- F(19200000, P_XO, 1, 0, 0),
-- F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
-- F(100000000, P_GPLL0, 6, 0, 0),
-- F(150000000, P_GPLL0, 4, 0, 0),
-- F(200000000, P_GPLL0, 3, 0, 0),
-- F(240000000, P_GPLL0, 2.5, 0, 0),
-- { }
--};
--
--static struct clk_rcg2 system_noc_clk_src = {
-- .cmd_rcgr = 0x0401c,
-- .hid_width = 5,
-- .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
-- .freq_tbl = ftbl_system_noc_clk_src,
-- .clkr.hw.init = &(struct clk_init_data){
-- .name = "system_noc_clk_src",
-- .parent_data = gcc_xo_gpll0_gpll0_early_div,
-- .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll0_early_div),
-- .ops = &clk_rcg2_ops,
-- },
--};
--
--static const struct freq_tbl ftbl_config_noc_clk_src[] = {
-- F(19200000, P_XO, 1, 0, 0),
-- F(37500000, P_GPLL0, 16, 0, 0),
-- F(75000000, P_GPLL0, 8, 0, 0),
-- { }
--};
--
--static struct clk_rcg2 config_noc_clk_src = {
-- .cmd_rcgr = 0x0500c,
-- .hid_width = 5,
-- .parent_map = gcc_xo_gpll0_map,
-- .freq_tbl = ftbl_config_noc_clk_src,
-- .clkr.hw.init = &(struct clk_init_data){
-- .name = "config_noc_clk_src",
-- .parent_data = gcc_xo_gpll0,
-- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
-- .ops = &clk_rcg2_ops,
-- },
--};
--
--static const struct freq_tbl ftbl_periph_noc_clk_src[] = {
-- F(19200000, P_XO, 1, 0, 0),
-- F(37500000, P_GPLL0, 16, 0, 0),
-- F(50000000, P_GPLL0, 12, 0, 0),
-- F(75000000, P_GPLL0, 8, 0, 0),
-- F(100000000, P_GPLL0, 6, 0, 0),
-- { }
--};
--
--static struct clk_rcg2 periph_noc_clk_src = {
-- .cmd_rcgr = 0x06014,
-- .hid_width = 5,
-- .parent_map = gcc_xo_gpll0_map,
-- .freq_tbl = ftbl_periph_noc_clk_src,
-- .clkr.hw.init = &(struct clk_init_data){
-- .name = "periph_noc_clk_src",
-- .parent_data = gcc_xo_gpll0,
-- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
-- .ops = &clk_rcg2_ops,
-- },
--};
--
- static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(120000000, P_GPLL0, 5, 0, 0),
-@@ -1297,11 +1232,7 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mmss_noc_cfg_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
-+ .flags = CLK_IGNORE_UNUSED,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -1464,11 +1395,6 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_usb_phy_cfg_ahb2phy_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -1498,11 +1424,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sdcc1_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -1549,11 +1470,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sdcc2_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -1583,11 +1499,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sdcc3_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -1617,11 +1528,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sdcc4_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -1635,11 +1541,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
- .enable_mask = BIT(17),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp1_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -1977,11 +1878,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
- .enable_mask = BIT(15),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_blsp2_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2318,11 +2214,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pdm_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2353,11 +2244,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
- .enable_mask = BIT(13),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_prng_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2370,11 +2256,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_tsif_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2422,11 +2303,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
- .enable_mask = BIT(10),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_boot_rom_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2520,11 +2396,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_0_slv_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2537,11 +2408,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_0_mstr_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2554,11 +2420,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_0_cfg_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2606,11 +2467,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_1_slv_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2623,11 +2479,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_1_mstr_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2640,11 +2491,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_1_cfg_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2692,11 +2538,6 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_2_slv_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2709,11 +2550,6 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_2_mstr_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2726,11 +2562,6 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_2_cfg_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2778,11 +2609,6 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_pcie_phy_cfg_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -2829,11 +2655,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_ufs_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3060,11 +2881,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_aggre0_snoc_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
-+ .flags = CLK_IS_CRITICAL,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3077,11 +2894,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_aggre0_cnoc_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
-+ .flags = CLK_IS_CRITICAL,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3094,11 +2907,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_smmu_aggre0_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
-+ .flags = CLK_IS_CRITICAL,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3111,11 +2920,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_smmu_aggre0_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
-+ .flags = CLK_IS_CRITICAL,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3162,10 +2967,6 @@ static struct clk_branch gcc_dcc_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_dcc_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3178,10 +2979,6 @@ static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3194,11 +2991,6 @@ static struct clk_branch gcc_qspi_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_qspi_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &periph_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
-- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3347,10 +3139,6 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mss_cfg_ahb_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &config_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3363,10 +3151,6 @@ static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mss_mnoc_bimc_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3379,10 +3163,6 @@ static struct clk_branch gcc_mss_snoc_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mss_snoc_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3395,10 +3175,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mss_q6_bimc_axi_clk",
-- .parent_hws = (const struct clk_hw*[]){
-- &system_noc_clk_src.clkr.hw,
-- },
-- .num_parents = 1,
- .ops = &clk_branch2_ops,
- },
- },
-@@ -3495,9 +3271,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
- [GPLL0] = &gpll0.clkr,
- [GPLL4_EARLY] = &gpll4_early.clkr,
- [GPLL4] = &gpll4.clkr,
-- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
-- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
-- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
- [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
- [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
- [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
-diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
-index 41ab210875fb2..05d115c52dfeb 100644
---- a/drivers/clk/qcom/gcc-sm8150.c
-+++ b/drivers/clk/qcom/gcc-sm8150.c
-@@ -774,7 +774,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
- .name = "gcc_sdcc2_apps_clk_src",
- .parent_data = gcc_parents_6,
- .num_parents = ARRAY_SIZE(gcc_parents_6),
-- .flags = CLK_SET_RATE_PARENT,
-+ .flags = CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_floor_ops,
- },
- };
-diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
-index a023c4374be96..1180e48c687ac 100644
---- a/drivers/clk/qcom/mmcc-msm8998.c
-+++ b/drivers/clk/qcom/mmcc-msm8998.c
-@@ -2439,6 +2439,7 @@ static struct clk_branch fd_ahb_clk = {
-
- static struct clk_branch mnoc_ahb_clk = {
- .halt_reg = 0x5024,
-+ .halt_check = BRANCH_HALT_SKIP,
- .clkr = {
- .enable_reg = 0x5024,
- .enable_mask = BIT(0),
-@@ -2454,6 +2455,7 @@ static struct clk_branch mnoc_ahb_clk = {
-
- static struct clk_branch bimc_smmu_ahb_clk = {
- .halt_reg = 0xe004,
-+ .halt_check = BRANCH_HALT_SKIP,
- .hwcg_reg = 0xe004,
- .hwcg_bit = 1,
- .clkr = {
-@@ -2471,6 +2473,7 @@ static struct clk_branch bimc_smmu_ahb_clk = {
-
- static struct clk_branch bimc_smmu_axi_clk = {
- .halt_reg = 0xe008,
-+ .halt_check = BRANCH_HALT_SKIP,
- .hwcg_reg = 0xe008,
- .hwcg_bit = 1,
- .clkr = {
-@@ -2607,11 +2610,13 @@ static struct gdsc camss_cpp_gdsc = {
- static struct gdsc bimc_smmu_gdsc = {
- .gdscr = 0xe020,
- .gds_hw_ctrl = 0xe024,
-+ .cxcs = (unsigned int []){ 0xe008 },
-+ .cxc_count = 1,
- .pd = {
- .name = "bimc_smmu",
- },
- .pwrsts = PWRSTS_OFF_ON,
-- .flags = HW_CTRL | ALWAYS_ON,
-+ .flags = VOTABLE,
- };
-
- static struct clk_regmap *mmcc_msm8998_clocks[] = {
-diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
-index 1e7991439527a..50a443bf79ecd 100644
---- a/drivers/clk/ralink/clk-mtmips.c
-+++ b/drivers/clk/ralink/clk-mtmips.c
-@@ -821,6 +821,10 @@ static const struct mtmips_clk_data mt76x8_clk_data = {
- };
-
- static const struct of_device_id mtmips_of_match[] = {
-+ {
-+ .compatible = "ralink,rt2880-reset",
-+ .data = NULL,
-+ },
- {
- .compatible = "ralink,rt2880-sysc",
- .data = &rt2880_clk_data,
-@@ -1088,25 +1092,11 @@ static int mtmips_clk_probe(struct platform_device *pdev)
- return 0;
- }
-
--static const struct of_device_id mtmips_clk_of_match[] = {
-- { .compatible = "ralink,rt2880-reset" },
-- { .compatible = "ralink,rt2880-sysc" },
-- { .compatible = "ralink,rt3050-sysc" },
-- { .compatible = "ralink,rt3052-sysc" },
-- { .compatible = "ralink,rt3352-sysc" },
-- { .compatible = "ralink,rt3883-sysc" },
-- { .compatible = "ralink,rt5350-sysc" },
-- { .compatible = "ralink,mt7620-sysc" },
-- { .compatible = "ralink,mt7628-sysc" },
-- { .compatible = "ralink,mt7688-sysc" },
-- {}
--};
--
- static struct platform_driver mtmips_clk_driver = {
- .probe = mtmips_clk_probe,
- .driver = {
- .name = "mtmips-clk",
-- .of_match_table = mtmips_clk_of_match,
-+ .of_match_table = mtmips_of_match,
- },
- };
-
-diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
-index e2e0447de1901..5a15f8788b922 100644
---- a/drivers/clk/renesas/rcar-cpg-lib.c
-+++ b/drivers/clk/renesas/rcar-cpg-lib.c
-@@ -70,8 +70,21 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
- #define STPnHCK BIT(9 - SDnSRCFC_SHIFT)
-
- static const struct clk_div_table cpg_sdh_div_table[] = {
-+ /*
-+ * These values are recommended by the datasheet. Because they come
-+ * first, Linux will only use these.
-+ */
- { 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 },
-- { STPnHCK | 4, 16 }, { 0, 0 },
-+ { STPnHCK | 4, 16 },
-+ /*
-+ * These values are not recommended because STPnHCK is wrong. But they
-+ * have been seen because of broken firmware. So, we support reading
-+ * them but Linux will sanitize them when initializing through
-+ * recalc_rate.
-+ */
-+ { STPnHCK | 0, 1 }, { STPnHCK | 1, 2 }, { 2, 4 }, { 3, 8 }, { 4, 16 },
-+ /* Sentinel */
-+ { 0, 0 }
- };
-
- struct clk * __init cpg_sdh_clk_register(const char *name,
-diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
-index 47f488387f33a..3f01620e292b6 100644
---- a/drivers/clk/renesas/rzg2l-cpg.c
-+++ b/drivers/clk/renesas/rzg2l-cpg.c
-@@ -11,6 +11,7 @@
- * Copyright (C) 2015 Renesas Electronics Corp.
- */
-
-+#include <linux/bitfield.h>
- #include <linux/clk.h>
- #include <linux/clk-provider.h>
- #include <linux/clk/renesas.h>
-@@ -38,14 +39,13 @@
- #define WARN_DEBUG(x) do { } while (0)
- #endif
-
--#define DIV_RSMASK(v, s, m) ((v >> s) & m)
- #define GET_SHIFT(val) ((val >> 12) & 0xff)
- #define GET_WIDTH(val) ((val >> 8) & 0xf)
-
--#define KDIV(val) DIV_RSMASK(val, 16, 0xffff)
--#define MDIV(val) DIV_RSMASK(val, 6, 0x3ff)
--#define PDIV(val) DIV_RSMASK(val, 0, 0x3f)
--#define SDIV(val) DIV_RSMASK(val, 0, 0x7)
-+#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
-+#define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
-+#define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
-+#define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
-
- #define CLK_ON_R(reg) (reg)
- #define CLK_MON_R(reg) (0x180 + (reg))
-@@ -188,7 +188,9 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
- u32 off = GET_REG_OFFSET(hwdata->conf);
- u32 shift = GET_SHIFT(hwdata->conf);
- const u32 clk_src_266 = 2;
-- u32 bitmask;
-+ u32 msk, val, bitmask;
-+ unsigned long flags;
-+ int ret;
-
- /*
- * As per the HW manual, we should not directly switch from 533 MHz to
-@@ -202,26 +204,30 @@ static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
- * the index to value mapping is done by adding 1 to the index.
- */
- bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
-+ msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
-+ spin_lock_irqsave(&priv->rmw_lock, flags);
- if (index != clk_src_266) {
-- u32 msk, val;
-- int ret;
--
- writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
-
-- msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
--
-- ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
-- !(val & msk), 100,
-- CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
-- if (ret) {
-- dev_err(priv->dev, "failed to switch clk source\n");
-- return ret;
-- }
-+ ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
-+ !(val & msk), 10,
-+ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
-+ if (ret)
-+ goto unlock;
- }
-
- writel(bitmask | ((index + 1) << shift), priv->base + off);
-
-- return 0;
-+ ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
-+ !(val & msk), 10,
-+ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
-+unlock:
-+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
-+
-+ if (ret)
-+ dev_err(priv->dev, "failed to switch clk source\n");
-+
-+ return ret;
- }
-
- static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
-@@ -232,14 +238,8 @@ static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
-
- val >>= GET_SHIFT(hwdata->conf);
- val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
-- if (val) {
-- val--;
-- } else {
-- /* Prohibited clk source, change it to 533 MHz(reset value) */
-- rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
-- }
-
-- return val;
-+ return val ? val - 1 : 0;
- }
-
- static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
-@@ -695,18 +695,18 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
- struct pll_clk *pll_clk = to_pll(hw);
- struct rzg2l_cpg_priv *priv = pll_clk->priv;
- unsigned int val1, val2;
-- unsigned int mult = 1;
-- unsigned int div = 1;
-+ u64 rate;
-
- if (pll_clk->type != CLK_TYPE_SAM_PLL)
- return parent_rate;
-
- val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
- val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
-- mult = MDIV(val1) + KDIV(val1) / 65536;
-- div = PDIV(val1) << SDIV(val2);
-
-- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
-+ rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
-+ 16 + SDIV(val2));
-+
-+ return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
- }
-
- static const struct clk_ops rzg2l_cpg_pll_ops = {
-diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
-index 6cee9e56acc72..91e9c2569f801 100644
---- a/drivers/clk/renesas/rzg2l-cpg.h
-+++ b/drivers/clk/renesas/rzg2l-cpg.h
-@@ -43,7 +43,7 @@
- #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
- #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
-
--#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000
-+#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200
-
- /* n = 0/1/2 for PLL1/4/6 */
- #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n))
-diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
-index 75234e0783e1c..83fe4eb3133cb 100644
---- a/drivers/clk/socfpga/stratix10-clk.h
-+++ b/drivers/clk/socfpga/stratix10-clk.h
-@@ -7,8 +7,10 @@
- #define __STRATIX10_CLK_H
-
- struct stratix10_clock_data {
-- struct clk_hw_onecell_data clk_data;
- void __iomem *base;
-+
-+ /* Must be last */
-+ struct clk_hw_onecell_data clk_data;
- };
-
- struct stratix10_pll_clock {
-diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
-index 768a1f3398b47..5d5bb123ba949 100644
---- a/drivers/clk/ti/divider.c
-+++ b/drivers/clk/ti/divider.c
-@@ -309,7 +309,6 @@ static struct clk *_register_divider(struct device_node *node,
- u32 flags,
- struct clk_omap_divider *div)
- {
-- struct clk *clk;
- struct clk_init_data init;
- const char *parent_name;
- const char *name;
-@@ -326,12 +325,7 @@ static struct clk *_register_divider(struct device_node *node,
- div->hw.init = &init;
-
- /* register the clock */
-- clk = of_ti_clk_register(node, &div->hw, name);
--
-- if (IS_ERR(clk))
-- kfree(div);
--
-- return clk;
-+ return of_ti_clk_register(node, &div->hw, name);
- }
-
- int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
-diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
-index 01d07f1bf01b1..c4bd40676da4b 100644
---- a/drivers/clk/visconti/pll.h
-+++ b/drivers/clk/visconti/pll.h
-@@ -15,8 +15,10 @@
-
- struct visconti_pll_provider {
- void __iomem *reg_base;
-- struct clk_hw_onecell_data clk_data;
- struct device_node *node;
-+
-+ /* Must be last */
-+ struct clk_hw_onecell_data clk_data;
- };
-
- #define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
-diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
-index 7dd2c615bce23..071b04f1ee730 100644
---- a/drivers/clocksource/arm_arch_timer.c
-+++ b/drivers/clocksource/arm_arch_timer.c
-@@ -836,8 +836,9 @@ static u64 __arch_timer_check_delta(void)
- * Note that TVAL is signed, thus has only 31 of its
- * 32 bits to express magnitude.
- */
-- MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
-- APM_CPU_PART_POTENZA)),
-+ MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
-+ APM_CPU_PART_XGENE),
-+ APM_CPU_VAR_POTENZA, 0x0, 0xf),
- {},
- };
-
-diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
-index 27af17c995900..2a90c92a9182a 100644
---- a/drivers/clocksource/timer-atmel-tcb.c
-+++ b/drivers/clocksource/timer-atmel-tcb.c
-@@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
- writel(mck_divisor_idx /* likely divide-by-8 */
- | ATMEL_TC_WAVE
- | ATMEL_TC_WAVESEL_UP /* free-run */
-+ | ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
- | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
- | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
- tcaddr + ATMEL_TC_REG(0, CMR));
-diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
-index 28ab4f1a7c713..6a878d227a13b 100644
---- a/drivers/clocksource/timer-imx-gpt.c
-+++ b/drivers/clocksource/timer-imx-gpt.c
-@@ -434,12 +434,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
- return -ENOMEM;
-
- imxtm->base = of_iomap(np, 0);
-- if (!imxtm->base)
-- return -ENXIO;
-+ if (!imxtm->base) {
-+ ret = -ENXIO;
-+ goto err_kfree;
-+ }
-
- imxtm->irq = irq_of_parse_and_map(np, 0);
-- if (imxtm->irq <= 0)
-- return -EINVAL;
-+ if (imxtm->irq <= 0) {
-+ ret = -EINVAL;
-+ goto err_kfree;
-+ }
-
- imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
-
-@@ -452,11 +456,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
-
- ret = _mxc_timer_init(imxtm);
- if (ret)
-- return ret;
-+ goto err_kfree;
-
- initialized = 1;
-
- return 0;
-+
-+err_kfree:
-+ kfree(imxtm);
-+ return ret;
- }
-
- static int __init imx1_timer_init_dt(struct device_node *np)
-diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
-index 09ab29cb7f641..5f60f6bd33866 100644
---- a/drivers/clocksource/timer-ti-dm.c
-+++ b/drivers/clocksource/timer-ti-dm.c
-@@ -140,6 +140,8 @@ struct dmtimer {
- struct platform_device *pdev;
- struct list_head node;
- struct notifier_block nb;
-+ struct notifier_block fclk_nb;
-+ unsigned long fclk_rate;
- };
-
- static u32 omap_reserved_systimers;
-@@ -253,8 +255,7 @@ static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
- timer->posted = OMAP_TIMER_POSTED;
- }
-
--static inline void __omap_dm_timer_stop(struct dmtimer *timer,
-- unsigned long rate)
-+static inline void __omap_dm_timer_stop(struct dmtimer *timer)
- {
- u32 l;
-
-@@ -269,7 +270,7 @@ static inline void __omap_dm_timer_stop(struct dmtimer *timer,
- * Wait for functional clock period x 3.5 to make sure that
- * timer is stopped
- */
-- udelay(3500000 / rate + 1);
-+ udelay(3500000 / timer->fclk_rate + 1);
- #endif
- }
-
-@@ -348,6 +349,21 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
- return NOTIFY_OK;
- }
-
-+static int omap_timer_fclk_notifier(struct notifier_block *nb,
-+ unsigned long event, void *data)
-+{
-+ struct clk_notifier_data *clk_data = data;
-+ struct dmtimer *timer = container_of(nb, struct dmtimer, fclk_nb);
-+
-+ switch (event) {
-+ case POST_RATE_CHANGE:
-+ timer->fclk_rate = clk_data->new_rate;
-+ return NOTIFY_OK;
-+ default:
-+ return NOTIFY_DONE;
-+ }
-+}
-+
- static int omap_dm_timer_reset(struct dmtimer *timer)
- {
- u32 l, timeout = 100000;
-@@ -754,7 +770,6 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
- {
- struct dmtimer *timer;
- struct device *dev;
-- unsigned long rate = 0;
-
- timer = to_dmtimer(cookie);
- if (unlikely(!timer))
-@@ -762,10 +777,7 @@ static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
-
- dev = &timer->pdev->dev;
-
-- if (!timer->omap1)
-- rate = clk_get_rate(timer->fclk);
--
-- __omap_dm_timer_stop(timer, rate);
-+ __omap_dm_timer_stop(timer);
-
- pm_runtime_put_sync(dev);
-
-@@ -1124,6 +1136,14 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
- timer->fclk = devm_clk_get(dev, "fck");
- if (IS_ERR(timer->fclk))
- return PTR_ERR(timer->fclk);
-+
-+ timer->fclk_nb.notifier_call = omap_timer_fclk_notifier;
-+ ret = devm_clk_notifier_register(dev, timer->fclk,
-+ &timer->fclk_nb);
-+ if (ret)
-+ return ret;
-+
-+ timer->fclk_rate = clk_get_rate(timer->fclk);
- } else {
- timer->fclk = ERR_PTR(-ENODEV);
- }
-diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
-index 9a1e194d5cf88..1f6186475715e 100644
---- a/drivers/cpufreq/amd-pstate.c
-+++ b/drivers/cpufreq/amd-pstate.c
-@@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
-
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
--
-+ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
- WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
--
-+ WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
- return 0;
- }
-
-@@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
- highest_perf = cppc_perf.highest_perf;
-
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
--
-+ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
- WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
- WRITE_ONCE(cpudata->lowest_nonlinear_perf,
- cppc_perf.lowest_nonlinear_perf);
- WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
-+ WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
-
- if (cppc_state == AMD_PSTATE_ACTIVE)
- return 0;
-@@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
- u64 prev = READ_ONCE(cpudata->cppc_req_cached);
- u64 value = prev;
-
-+ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
-+ cpudata->max_limit_perf);
-+ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
-+ cpudata->max_limit_perf);
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
-
- if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
-@@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
- return 0;
- }
-
-+static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
-+{
-+ u32 max_limit_perf, min_limit_perf;
-+ struct amd_cpudata *cpudata = policy->driver_data;
-+
-+ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
-+ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
-+
-+ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
-+ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
-+ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
-+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
-+
-+ return 0;
-+}
-+
- static int amd_pstate_update_freq(struct cpufreq_policy *policy,
- unsigned int target_freq, bool fast_switch)
- {
-@@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
- if (!cpudata->max_freq)
- return -ENODEV;
-
-+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
-+ amd_pstate_update_min_max_limit(policy);
-+
- cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_perf = cap_perf;
-@@ -518,7 +542,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
- static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
- unsigned int target_freq)
- {
-- return amd_pstate_update_freq(policy, target_freq, true);
-+ if (!amd_pstate_update_freq(policy, target_freq, true))
-+ return target_freq;
-+ return policy->cur;
- }
-
- static void amd_pstate_adjust_perf(unsigned int cpu,
-@@ -532,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
- struct amd_cpudata *cpudata = policy->driver_data;
- unsigned int target_freq;
-
-+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
-+ amd_pstate_update_min_max_limit(policy);
-+
-+
- cap_perf = READ_ONCE(cpudata->highest_perf);
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- max_freq = READ_ONCE(cpudata->max_freq);
-@@ -745,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
-+ cpudata->max_limit_freq = max_freq;
-+ cpudata->min_limit_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
-
-@@ -850,11 +882,16 @@ static ssize_t show_energy_performance_available_preferences(
- {
- int i = 0;
- int offset = 0;
-+ struct amd_cpudata *cpudata = policy->driver_data;
-+
-+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
-+ return sysfs_emit_at(buf, offset, "%s\n",
-+ energy_perf_strings[EPP_INDEX_PERFORMANCE]);
-
- while (energy_perf_strings[i] != NULL)
- offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
-
-- sysfs_emit_at(buf, offset, "\n");
-+ offset += sysfs_emit_at(buf, offset, "\n");
-
- return offset;
- }
-@@ -1183,16 +1220,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
- return 0;
- }
-
--static void amd_pstate_epp_init(unsigned int cpu)
-+static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
- {
-- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct amd_cpudata *cpudata = policy->driver_data;
-- u32 max_perf, min_perf;
-+ u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
- u64 value;
- s16 epp;
-
- max_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
-+ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
-+ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
-+
-+ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
-+ cpudata->max_limit_perf);
-+ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
-+ cpudata->max_limit_perf);
-+
-+ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
-+ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
-
- value = READ_ONCE(cpudata->cppc_req_cached);
-
-@@ -1210,9 +1256,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(0);
-
-- if (cpudata->epp_policy == cpudata->policy)
-- goto skip_epp;
--
- cpudata->epp_policy = cpudata->policy;
-
- /* Get BIOS pre-defined epp value */
-@@ -1222,7 +1265,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
- * This return value can only be negative for shared_memory
- * systems where EPP register read/write not supported.
- */
-- goto skip_epp;
-+ return;
- }
-
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
-@@ -1236,8 +1279,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
-
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- amd_pstate_set_epp(cpudata, epp);
--skip_epp:
-- cpufreq_cpu_put(policy);
- }
-
- static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
-@@ -1252,7 +1293,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
-
- cpudata->policy = policy->policy;
-
-- amd_pstate_epp_init(policy->cpu);
-+ amd_pstate_epp_update_limit(policy);
-
- return 0;
- }
-diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
-index a33df3c66c88c..40a9ff18da068 100644
---- a/drivers/cpufreq/cpufreq_stats.c
-+++ b/drivers/cpufreq/cpufreq_stats.c
-@@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
- len += sysfs_emit_at(buf, len, " From : To\n");
- len += sysfs_emit_at(buf, len, " : ");
- for (i = 0; i < stats->state_num; i++) {
-- if (len >= PAGE_SIZE)
-+ if (len >= PAGE_SIZE - 1)
- break;
- len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
- }
-- if (len >= PAGE_SIZE)
-- return PAGE_SIZE;
-+ if (len >= PAGE_SIZE - 1)
-+ return PAGE_SIZE - 1;
-
- len += sysfs_emit_at(buf, len, "\n");
-
- for (i = 0; i < stats->state_num; i++) {
-- if (len >= PAGE_SIZE)
-+ if (len >= PAGE_SIZE - 1)
- break;
-
- len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
-
- for (j = 0; j < stats->state_num; j++) {
-- if (len >= PAGE_SIZE)
-+ if (len >= PAGE_SIZE - 1)
- break;
-
- if (pending)
-@@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
-
- len += sysfs_emit_at(buf, len, "%9u ", count);
- }
-- if (len >= PAGE_SIZE)
-+ if (len >= PAGE_SIZE - 1)
- break;
- len += sysfs_emit_at(buf, len, "\n");
- }
-
-- if (len >= PAGE_SIZE) {
-+ if (len >= PAGE_SIZE - 1) {
- pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
- return -EFBIG;
- }
-diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
-index 494d044b9e720..33728c242f66c 100644
---- a/drivers/cpufreq/imx6q-cpufreq.c
-+++ b/drivers/cpufreq/imx6q-cpufreq.c
-@@ -327,7 +327,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
- imx6x_disable_freq_in_opp(dev, 696000000);
-
- if (of_machine_is_compatible("fsl,imx6ull")) {
-- if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
-+ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
- imx6x_disable_freq_in_opp(dev, 792000000);
-
- if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
-diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
-index 88ef5e57ccd05..386aed3637b4e 100644
---- a/drivers/cpufreq/tegra194-cpufreq.c
-+++ b/drivers/cpufreq/tegra194-cpufreq.c
-@@ -450,6 +450,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
- if (IS_ERR(opp))
- continue;
-
-+ dev_pm_opp_put(opp);
-+
- ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
- if (ret < 0)
- return ret;
-diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
-index eba2d750c3b07..066f08a3a040d 100644
---- a/drivers/crypto/caam/caamalg.c
-+++ b/drivers/crypto/caam/caamalg.c
-@@ -575,7 +575,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
- if (keylen != CHACHA_KEY_SIZE + saltlen)
- return -EINVAL;
-
-- ctx->cdata.key_virt = key;
-+ memcpy(ctx->key, key, keylen);
-+ ctx->cdata.key_virt = ctx->key;
- ctx->cdata.keylen = keylen - saltlen;
-
- return chachapoly_set_sh_desc(aead);
-diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
-index 9156bbe038b7b..a148ff1f0872c 100644
---- a/drivers/crypto/caam/caamalg_qi2.c
-+++ b/drivers/crypto/caam/caamalg_qi2.c
-@@ -641,7 +641,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
- if (keylen != CHACHA_KEY_SIZE + saltlen)
- return -EINVAL;
-
-- ctx->cdata.key_virt = key;
-+ memcpy(ctx->key, key, keylen);
-+ ctx->cdata.key_virt = ctx->key;
- ctx->cdata.keylen = keylen - saltlen;
-
- return chachapoly_set_sh_desc(aead);
-diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
-index 839ea14b9a853..6f33149ef80df 100644
---- a/drivers/crypto/ccp/dbc.c
-+++ b/drivers/crypto/ccp/dbc.c
-@@ -205,7 +205,7 @@ int dbc_dev_init(struct psp_device *psp)
- return -ENOMEM;
-
- BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
-- dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
-+ dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL | __GFP_ZERO, 0);
- if (!dbc_dev->mbox) {
- ret = -ENOMEM;
- goto cleanup_dev;
-diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
-index 39297ce70f441..3dce35debf637 100644
---- a/drivers/crypto/hisilicon/hpre/hpre_main.c
-+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
-@@ -433,8 +433,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
- module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
- MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
-
-+static bool pf_q_num_flag;
- static int pf_q_num_set(const char *val, const struct kernel_param *kp)
- {
-+ pf_q_num_flag = true;
-+
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
- }
-
-@@ -1033,7 +1036,7 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
-
- for (i = 0; i < clusters_num; i++) {
- ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
-- if (ret < 0)
-+ if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
- return -EINVAL;
- tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
-
-@@ -1157,6 +1160,8 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- qm->qm_list = &hpre_devices;
-+ if (pf_q_num_flag)
-+ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
- }
-
- ret = hisi_qm_init(qm);
-diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
-index a99fd589445ce..193b0b3a77cda 100644
---- a/drivers/crypto/hisilicon/qm.c
-+++ b/drivers/crypto/hisilicon/qm.c
-@@ -206,8 +206,6 @@
- #define WAIT_PERIOD 20
- #define REMOVE_WAIT_DELAY 10
-
--#define QM_DRIVER_REMOVING 0
--#define QM_RST_SCHED 1
- #define QM_QOS_PARAM_NUM 2
- #define QM_QOS_MAX_VAL 1000
- #define QM_QOS_RATE 100
-@@ -849,6 +847,8 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
- qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
- qp->qp_status.cq_head, 0);
- atomic_dec(&qp->qp_status.used);
-+
-+ cond_resched();
- }
-
- /* set c_flag */
-@@ -2824,7 +2824,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
- mutex_init(&qm->mailbox_lock);
- init_rwsem(&qm->qps_lock);
- qm->qp_in_used = 0;
-- qm->misc_ctl = false;
- if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
- if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
- dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
-@@ -5093,6 +5092,7 @@ free_eq_irq:
-
- static int qm_get_qp_num(struct hisi_qm *qm)
- {
-+ struct device *dev = &qm->pdev->dev;
- bool is_db_isolation;
-
- /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
-@@ -5109,13 +5109,21 @@ static int qm_get_qp_num(struct hisi_qm *qm)
- qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
- QM_FUNC_MAX_QP_CAP, is_db_isolation);
-
-- /* check if qp number is valid */
-- if (qm->qp_num > qm->max_qp_num) {
-- dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
-+ if (qm->qp_num <= qm->max_qp_num)
-+ return 0;
-+
-+ if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
-+ /* Check whether the set qp number is valid */
-+ dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n",
- qm->qp_num, qm->max_qp_num);
- return -EINVAL;
- }
-
-+ dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n",
-+ qm->qp_num, qm->max_qp_num);
-+ qm->qp_num = qm->max_qp_num;
-+ qm->debug.curr_qm_qp_num = qm->qp_num;
-+
- return 0;
- }
-
-diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
-index 1406a422d4551..8e36aa9c681be 100644
---- a/drivers/crypto/hisilicon/qm_common.h
-+++ b/drivers/crypto/hisilicon/qm_common.h
-@@ -4,7 +4,6 @@
- #define QM_COMMON_H
-
- #define QM_DBG_READ_LEN 256
--#define QM_RESETTING 2
-
- struct qm_cqe {
- __le32 rsvd0;
-diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
-index 77f9f131b8503..62bd8936a9154 100644
---- a/drivers/crypto/hisilicon/sec2/sec_main.c
-+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
-@@ -311,8 +311,11 @@ static int sec_diff_regs_show(struct seq_file *s, void *unused)
- }
- DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
-
-+static bool pf_q_num_flag;
- static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
- {
-+ pf_q_num_flag = true;
-+
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
- }
-
-@@ -1120,6 +1123,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- qm->qm_list = &sec_devices;
-+ if (pf_q_num_flag)
-+ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
- } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
-diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
-index f3ce34198775d..84dbaeb07ea83 100644
---- a/drivers/crypto/hisilicon/zip/zip_main.c
-+++ b/drivers/crypto/hisilicon/zip/zip_main.c
-@@ -364,8 +364,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE;
- module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
- MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
-
-+static bool pf_q_num_flag;
- static int pf_q_num_set(const char *val, const struct kernel_param *kp)
- {
-+ pf_q_num_flag = true;
-+
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
- }
-
-@@ -1139,6 +1142,8 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- qm->qm_list = &zip_devices;
-+ if (pf_q_num_flag)
-+ set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
- } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
- /*
- * have no way to get qm configure in VM in v1 hardware,
-diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
-index dd4464b7e00b1..a5691ba0b7244 100644
---- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
-+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
-@@ -11,8 +11,13 @@
- #include <adf_gen4_pm.h>
- #include <adf_gen4_timer.h>
- #include "adf_4xxx_hw_data.h"
-+#include "adf_cfg_services.h"
- #include "icp_qat_hw.h"
-
-+#define ADF_AE_GROUP_0 GENMASK(3, 0)
-+#define ADF_AE_GROUP_1 GENMASK(7, 4)
-+#define ADF_AE_GROUP_2 BIT(8)
-+
- enum adf_fw_objs {
- ADF_FW_SYM_OBJ,
- ADF_FW_ASYM_OBJ,
-@@ -40,39 +45,45 @@ struct adf_fw_config {
- };
-
- static const struct adf_fw_config adf_fw_cy_config[] = {
-- {0xF0, ADF_FW_SYM_OBJ},
-- {0xF, ADF_FW_ASYM_OBJ},
-- {0x100, ADF_FW_ADMIN_OBJ},
-+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
-+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
-+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
- };
-
- static const struct adf_fw_config adf_fw_dc_config[] = {
-- {0xF0, ADF_FW_DC_OBJ},
-- {0xF, ADF_FW_DC_OBJ},
-- {0x100, ADF_FW_ADMIN_OBJ},
-+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
-+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
-+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
- };
-
- static const struct adf_fw_config adf_fw_sym_config[] = {
-- {0xF0, ADF_FW_SYM_OBJ},
-- {0xF, ADF_FW_SYM_OBJ},
-- {0x100, ADF_FW_ADMIN_OBJ},
-+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
-+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
-+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
- };
-
- static const struct adf_fw_config adf_fw_asym_config[] = {
-- {0xF0, ADF_FW_ASYM_OBJ},
-- {0xF, ADF_FW_ASYM_OBJ},
-- {0x100, ADF_FW_ADMIN_OBJ},
-+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
-+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
-+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
- };
-
- static const struct adf_fw_config adf_fw_asym_dc_config[] = {
-- {0xF0, ADF_FW_ASYM_OBJ},
-- {0xF, ADF_FW_DC_OBJ},
-- {0x100, ADF_FW_ADMIN_OBJ},
-+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
-+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
-+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
- };
-
- static const struct adf_fw_config adf_fw_sym_dc_config[] = {
-- {0xF0, ADF_FW_SYM_OBJ},
-- {0xF, ADF_FW_DC_OBJ},
-- {0x100, ADF_FW_ADMIN_OBJ},
-+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
-+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
-+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
-+};
-+
-+static const struct adf_fw_config adf_fw_dcc_config[] = {
-+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
-+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
-+ {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
- };
-
- static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
-@@ -80,6 +91,7 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
- static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
- static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
- static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
-+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
-
- /* Worker thread to service arbiter mappings */
- static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
-@@ -94,36 +106,18 @@ static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x0
- };
-
-+static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = {
-+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
-+ 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
-+ 0x0
-+};
-+
- static struct adf_hw_device_class adf_4xxx_class = {
- .name = ADF_4XXX_DEVICE_NAME,
- .type = DEV_4XXX,
- .instances = 0,
- };
-
--enum dev_services {
-- SVC_CY = 0,
-- SVC_CY2,
-- SVC_DC,
-- SVC_SYM,
-- SVC_ASYM,
-- SVC_DC_ASYM,
-- SVC_ASYM_DC,
-- SVC_DC_SYM,
-- SVC_SYM_DC,
--};
--
--static const char *const dev_cfg_services[] = {
-- [SVC_CY] = ADF_CFG_CY,
-- [SVC_CY2] = ADF_CFG_ASYM_SYM,
-- [SVC_DC] = ADF_CFG_DC,
-- [SVC_SYM] = ADF_CFG_SYM,
-- [SVC_ASYM] = ADF_CFG_ASYM,
-- [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
-- [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
-- [SVC_DC_SYM] = ADF_CFG_DC_SYM,
-- [SVC_SYM_DC] = ADF_CFG_SYM_DC,
--};
--
- static int get_service_enabled(struct adf_accel_dev *accel_dev)
- {
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-@@ -137,7 +131,7 @@ static int get_service_enabled(struct adf_accel_dev *accel_dev)
- return ret;
- }
-
-- ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
-+ ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
- services);
- if (ret < 0)
- dev_err(&GET_DEV(accel_dev),
-@@ -212,6 +206,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
- {
- struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
- u32 capabilities_sym, capabilities_asym, capabilities_dc;
-+ u32 capabilities_dcc;
- u32 fusectl1;
-
- /* Read accelerator capabilities mask */
-@@ -284,6 +279,14 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
- return capabilities_sym | capabilities_asym;
- case SVC_DC:
- return capabilities_dc;
-+ case SVC_DCC:
-+ /*
-+ * Sym capabilities are available for chaining operations,
-+ * but sym crypto instances cannot be supported
-+ */
-+ capabilities_dcc = capabilities_dc | capabilities_sym;
-+ capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
-+ return capabilities_dcc;
- case SVC_SYM:
- return capabilities_sym;
- case SVC_ASYM:
-@@ -309,6 +312,8 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
- switch (get_service_enabled(accel_dev)) {
- case SVC_DC:
- return thrd_to_arb_map_dc;
-+ case SVC_DCC:
-+ return thrd_to_arb_map_dcc;
- default:
- return default_thrd_to_arb_map;
- }
-@@ -393,38 +398,96 @@ static u32 uof_get_num_objs(void)
- return ARRAY_SIZE(adf_fw_cy_config);
- }
-
--static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
-- const char * const fw_objs[], int num_objs)
-+static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
- {
-- int id;
--
- switch (get_service_enabled(accel_dev)) {
- case SVC_CY:
- case SVC_CY2:
-- id = adf_fw_cy_config[obj_num].obj;
-- break;
-+ return adf_fw_cy_config;
- case SVC_DC:
-- id = adf_fw_dc_config[obj_num].obj;
-- break;
-+ return adf_fw_dc_config;
-+ case SVC_DCC:
-+ return adf_fw_dcc_config;
- case SVC_SYM:
-- id = adf_fw_sym_config[obj_num].obj;
-- break;
-+ return adf_fw_sym_config;
- case SVC_ASYM:
-- id = adf_fw_asym_config[obj_num].obj;
-- break;
-+ return adf_fw_asym_config;
- case SVC_ASYM_DC:
- case SVC_DC_ASYM:
-- id = adf_fw_asym_dc_config[obj_num].obj;
-- break;
-+ return adf_fw_asym_dc_config;
- case SVC_SYM_DC:
- case SVC_DC_SYM:
-- id = adf_fw_sym_dc_config[obj_num].obj;
-- break;
-+ return adf_fw_sym_dc_config;
- default:
-- id = -EINVAL;
-- break;
-+ return NULL;
-+ }
-+}
-+
-+enum adf_rp_groups {
-+ RP_GROUP_0 = 0,
-+ RP_GROUP_1,
-+ RP_GROUP_COUNT
-+};
-+
-+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
-+{
-+ enum adf_cfg_service_type rps[RP_GROUP_COUNT];
-+ const struct adf_fw_config *fw_config;
-+ u16 ring_to_svc_map;
-+ int i, j;
-+
-+ fw_config = get_fw_config(accel_dev);
-+ if (!fw_config)
-+ return 0;
-+
-+ for (i = 0; i < RP_GROUP_COUNT; i++) {
-+ switch (fw_config[i].ae_mask) {
-+ case ADF_AE_GROUP_0:
-+ j = RP_GROUP_0;
-+ break;
-+ case ADF_AE_GROUP_1:
-+ j = RP_GROUP_1;
-+ break;
-+ default:
-+ return 0;
-+ }
-+
-+ switch (fw_config[i].obj) {
-+ case ADF_FW_SYM_OBJ:
-+ rps[j] = SYM;
-+ break;
-+ case ADF_FW_ASYM_OBJ:
-+ rps[j] = ASYM;
-+ break;
-+ case ADF_FW_DC_OBJ:
-+ rps[j] = COMP;
-+ break;
-+ default:
-+ rps[j] = 0;
-+ break;
-+ }
- }
-
-+ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
-+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
-+ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
-+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
-+
-+ return ring_to_svc_map;
-+}
-+
-+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
-+ const char * const fw_objs[], int num_objs)
-+{
-+ const struct adf_fw_config *fw_config;
-+ int id;
-+
-+ fw_config = get_fw_config(accel_dev);
-+ if (fw_config)
-+ id = fw_config[obj_num].obj;
-+ else
-+ id = -EINVAL;
-+
- if (id < 0 || id > num_objs)
- return NULL;
-
-@@ -447,26 +510,13 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n
-
- static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
- {
-- switch (get_service_enabled(accel_dev)) {
-- case SVC_CY:
-- return adf_fw_cy_config[obj_num].ae_mask;
-- case SVC_DC:
-- return adf_fw_dc_config[obj_num].ae_mask;
-- case SVC_CY2:
-- return adf_fw_cy_config[obj_num].ae_mask;
-- case SVC_SYM:
-- return adf_fw_sym_config[obj_num].ae_mask;
-- case SVC_ASYM:
-- return adf_fw_asym_config[obj_num].ae_mask;
-- case SVC_ASYM_DC:
-- case SVC_DC_ASYM:
-- return adf_fw_asym_dc_config[obj_num].ae_mask;
-- case SVC_SYM_DC:
-- case SVC_DC_SYM:
-- return adf_fw_sym_dc_config[obj_num].ae_mask;
-- default:
-+ const struct adf_fw_config *fw_config;
-+
-+ fw_config = get_fw_config(accel_dev);
-+ if (!fw_config)
- return 0;
-- }
-+
-+ return fw_config[obj_num].ae_mask;
- }
-
- void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
-@@ -522,6 +572,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
- hw_data->uof_get_ae_mask = uof_get_ae_mask;
- hw_data->set_msix_rttable = set_msix_default_rttable;
- hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
-+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
- hw_data->disable_iov = adf_disable_sriov;
- hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
- hw_data->enable_pm = adf_gen4_enable_pm;
-diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
-index 6d4e2e139ffa2..90f5c1ca7b8d8 100644
---- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
-+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
-@@ -11,6 +11,7 @@
- #include <adf_heartbeat.h>
-
- #include "adf_4xxx_hw_data.h"
-+#include "adf_cfg_services.h"
- #include "qat_compression.h"
- #include "qat_crypto.h"
- #include "adf_transport_access_macros.h"
-@@ -23,30 +24,6 @@ static const struct pci_device_id adf_pci_tbl[] = {
- };
- MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
--enum configs {
-- DEV_CFG_CY = 0,
-- DEV_CFG_DC,
-- DEV_CFG_SYM,
-- DEV_CFG_ASYM,
-- DEV_CFG_ASYM_SYM,
-- DEV_CFG_ASYM_DC,
-- DEV_CFG_DC_ASYM,
-- DEV_CFG_SYM_DC,
-- DEV_CFG_DC_SYM,
--};
--
--static const char * const services_operations[] = {
-- ADF_CFG_CY,
-- ADF_CFG_DC,
-- ADF_CFG_SYM,
-- ADF_CFG_ASYM,
-- ADF_CFG_ASYM_SYM,
-- ADF_CFG_ASYM_DC,
-- ADF_CFG_DC_ASYM,
-- ADF_CFG_SYM_DC,
-- ADF_CFG_DC_SYM,
--};
--
- static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
- {
- if (accel_dev->hw_device) {
-@@ -292,16 +269,17 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
- if (ret)
- goto err;
-
-- ret = sysfs_match_string(services_operations, services);
-+ ret = sysfs_match_string(adf_cfg_services, services);
- if (ret < 0)
- goto err;
-
- switch (ret) {
-- case DEV_CFG_CY:
-- case DEV_CFG_ASYM_SYM:
-+ case SVC_CY:
-+ case SVC_CY2:
- ret = adf_crypto_dev_config(accel_dev);
- break;
-- case DEV_CFG_DC:
-+ case SVC_DC:
-+ case SVC_DCC:
- ret = adf_comp_dev_config(accel_dev);
- break;
- default:
-diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
-index e57abde66f4fb..79d5a1535eda3 100644
---- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
-+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
-@@ -29,7 +29,7 @@
- #define ADF_PCI_MAX_BARS 3
- #define ADF_DEVICE_NAME_LENGTH 32
- #define ADF_ETR_MAX_RINGS_PER_BANK 16
--#define ADF_MAX_MSIX_VECTOR_NAME 16
-+#define ADF_MAX_MSIX_VECTOR_NAME 48
- #define ADF_DEVICE_NAME_PREFIX "qat_"
-
- enum adf_accel_capabilities {
-@@ -182,6 +182,7 @@ struct adf_hw_device_data {
- void (*get_arb_info)(struct arb_info *arb_csrs_info);
- void (*get_admin_info)(struct admin_info *admin_csrs_info);
- enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
-+ u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
- int (*alloc_irq)(struct adf_accel_dev *accel_dev);
- void (*free_irq)(struct adf_accel_dev *accel_dev);
- void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
-diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
-index ff790823b8686..194d64d4b99a1 100644
---- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
-+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
-@@ -8,6 +8,7 @@
- #include <linux/dma-mapping.h>
- #include "adf_accel_devices.h"
- #include "adf_common_drv.h"
-+#include "adf_cfg.h"
- #include "adf_heartbeat.h"
- #include "icp_qat_fw_init_admin.h"
-
-@@ -212,6 +213,17 @@ int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
- return 0;
- }
-
-+static int adf_set_chaining(struct adf_accel_dev *accel_dev)
-+{
-+ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
-+ struct icp_qat_fw_init_admin_resp resp = { };
-+ struct icp_qat_fw_init_admin_req req = { };
-+
-+ req.cmd_id = ICP_QAT_FW_DC_CHAIN_INIT;
-+
-+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
-+}
-+
- static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
- u32 *capabilities)
- {
-@@ -284,6 +296,19 @@ int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
- return adf_send_admin(accel_dev, &req, &resp, ae_mask);
- }
-
-+static bool is_dcc_enabled(struct adf_accel_dev *accel_dev)
-+{
-+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-+ int ret;
-+
-+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-+ ADF_SERVICES_ENABLED, services);
-+ if (ret)
-+ return false;
-+
-+ return !strcmp(services, "dcc");
-+}
-+
- /**
- * adf_send_admin_init() - Function sends init message to FW
- * @accel_dev: Pointer to acceleration device.
-@@ -297,6 +322,16 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
- u32 dc_capabilities = 0;
- int ret;
-
-+ ret = adf_set_fw_constants(accel_dev);
-+ if (ret)
-+ return ret;
-+
-+ if (is_dcc_enabled(accel_dev)) {
-+ ret = adf_set_chaining(accel_dev);
-+ if (ret)
-+ return ret;
-+ }
-+
- ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
- if (ret) {
- dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
-@@ -304,10 +339,6 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
- }
- accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
-
-- ret = adf_set_fw_constants(accel_dev);
-- if (ret)
-- return ret;
--
- return adf_init_ae(accel_dev);
- }
- EXPORT_SYMBOL_GPL(adf_send_admin_init);
-diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
-new file mode 100644
-index 0000000000000..b353d40c5c6d0
---- /dev/null
-+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
-@@ -0,0 +1,34 @@
-+/* SPDX-License-Identifier: GPL-2.0-only */
-+/* Copyright(c) 2023 Intel Corporation */
-+#ifndef _ADF_CFG_SERVICES_H_
-+#define _ADF_CFG_SERVICES_H_
-+
-+#include "adf_cfg_strings.h"
-+
-+enum adf_services {
-+ SVC_CY = 0,
-+ SVC_CY2,
-+ SVC_DC,
-+ SVC_DCC,
-+ SVC_SYM,
-+ SVC_ASYM,
-+ SVC_DC_ASYM,
-+ SVC_ASYM_DC,
-+ SVC_DC_SYM,
-+ SVC_SYM_DC,
-+};
-+
-+static const char *const adf_cfg_services[] = {
-+ [SVC_CY] = ADF_CFG_CY,
-+ [SVC_CY2] = ADF_CFG_ASYM_SYM,
-+ [SVC_DC] = ADF_CFG_DC,
-+ [SVC_DCC] = ADF_CFG_DCC,
-+ [SVC_SYM] = ADF_CFG_SYM,
-+ [SVC_ASYM] = ADF_CFG_ASYM,
-+ [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
-+ [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
-+ [SVC_DC_SYM] = ADF_CFG_DC_SYM,
-+ [SVC_SYM_DC] = ADF_CFG_SYM_DC,
-+};
-+
-+#endif
-diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
-index 6066dc637352c..322b76903a737 100644
---- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
-+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
-@@ -32,6 +32,7 @@
- #define ADF_CFG_DC_ASYM "dc;asym"
- #define ADF_CFG_SYM_DC "sym;dc"
- #define ADF_CFG_DC_SYM "dc;sym"
-+#define ADF_CFG_DCC "dcc"
- #define ADF_SERVICES_ENABLED "ServicesEnabled"
- #define ADF_PM_IDLE_SUPPORT "PmIdleSupport"
- #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
-diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
-index 673b5044c62a5..79ff7982378d9 100644
---- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
-+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
-@@ -25,6 +25,8 @@
- #define ADF_STATUS_AE_STARTED 6
- #define ADF_STATUS_PF_RUNNING 7
- #define ADF_STATUS_IRQ_ALLOCATED 8
-+#define ADF_STATUS_CRYPTO_ALGS_REGISTERED 9
-+#define ADF_STATUS_COMP_ALGS_REGISTERED 10
-
- enum adf_dev_reset_mode {
- ADF_DEV_RESET_ASYNC = 0,
-diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
-index 89001fe92e762..0f9e2d59ce385 100644
---- a/drivers/crypto/intel/qat/qat_common/adf_init.c
-+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
-@@ -97,6 +97,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
- return -EFAULT;
- }
-
-+ if (hw_data->get_ring_to_svc_map)
-+ hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev);
-+
- if (adf_ae_init(accel_dev)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to initialise Acceleration Engine\n");
-@@ -231,6 +234,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
- clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
- return -EFAULT;
- }
-+ set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
-
- if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
- dev_err(&GET_DEV(accel_dev),
-@@ -239,6 +243,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
- clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
- return -EFAULT;
- }
-+ set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
-
- adf_dbgfs_add(accel_dev);
-
-@@ -272,13 +277,17 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
- clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
- clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
-
-- if (!list_empty(&accel_dev->crypto_list)) {
-+ if (!list_empty(&accel_dev->crypto_list) &&
-+ test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) {
- qat_algs_unregister();
- qat_asym_algs_unregister();
- }
-+ clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
-
-- if (!list_empty(&accel_dev->compression_list))
-+ if (!list_empty(&accel_dev->compression_list) &&
-+ test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
- qat_comp_algs_unregister();
-+ clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
-
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
-@@ -440,13 +449,6 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
-
- mutex_lock(&accel_dev->state_lock);
-
-- if (!adf_dev_started(accel_dev)) {
-- dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
-- accel_dev->accel_id);
-- ret = -EINVAL;
-- goto out;
-- }
--
- if (reconfig) {
- ret = adf_dev_shutdown_cache_cfg(accel_dev);
- goto out;
-diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
-index a74d2f9303670..8f04b0d3c5ac8 100644
---- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
-+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
-@@ -5,6 +5,7 @@
- #include <linux/pci.h>
- #include "adf_accel_devices.h"
- #include "adf_cfg.h"
-+#include "adf_cfg_services.h"
- #include "adf_common_drv.h"
-
- static const char * const state_operations[] = {
-@@ -52,6 +53,13 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
- case DEV_DOWN:
- dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
-
-+ if (!adf_dev_started(accel_dev)) {
-+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
-+ accel_id);
-+
-+ break;
-+ }
-+
- ret = adf_dev_down(accel_dev, true);
- if (ret < 0)
- return -EINVAL;
-@@ -61,7 +69,9 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
- dev_info(dev, "Starting device qat_dev%d\n", accel_id);
-
- ret = adf_dev_up(accel_dev, true);
-- if (ret < 0) {
-+ if (ret == -EALREADY) {
-+ break;
-+ } else if (ret) {
- dev_err(dev, "Failed to start device qat_dev%d\n",
- accel_id);
- adf_dev_down(accel_dev, true);
-@@ -75,18 +85,6 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
- return count;
- }
-
--static const char * const services_operations[] = {
-- ADF_CFG_CY,
-- ADF_CFG_DC,
-- ADF_CFG_SYM,
-- ADF_CFG_ASYM,
-- ADF_CFG_ASYM_SYM,
-- ADF_CFG_ASYM_DC,
-- ADF_CFG_DC_ASYM,
-- ADF_CFG_SYM_DC,
-- ADF_CFG_DC_SYM,
--};
--
- static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
- char *buf)
- {
-@@ -121,7 +119,7 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a
- struct adf_accel_dev *accel_dev;
- int ret;
-
-- ret = sysfs_match_string(services_operations, buf);
-+ ret = sysfs_match_string(adf_cfg_services, buf);
- if (ret < 0)
- return ret;
-
-@@ -135,7 +133,7 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a
- return -EINVAL;
- }
-
-- ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
-+ ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
- if (ret < 0)
- return ret;
-
-diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
-index 08bca1c506c0e..e2dd568b87b51 100644
---- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
-+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
-@@ -90,7 +90,7 @@ DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
- int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
- {
- struct adf_etr_ring_debug_entry *ring_debug;
-- char entry_name[8];
-+ char entry_name[16];
-
- ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
- if (!ring_debug)
-@@ -192,7 +192,7 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
- {
- struct adf_accel_dev *accel_dev = bank->accel_dev;
- struct dentry *parent = accel_dev->transport->debug;
-- char name[8];
-+ char name[16];
-
- snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
- bank->bank_debug_dir = debugfs_create_dir(name, parent);
-diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
-index 3e968a4bcc9cd..019a6443834e0 100644
---- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
-+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
-@@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id {
- ICP_QAT_FW_HEARTBEAT_SYNC = 7,
- ICP_QAT_FW_HEARTBEAT_GET = 8,
- ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
-+ ICP_QAT_FW_DC_CHAIN_INIT = 11,
- ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
- ICP_QAT_FW_TIMER_GET = 19,
- ICP_QAT_FW_PM_STATE_CONFIG = 128,
-diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
-index bb80455b3e81e..b97b678823a97 100644
---- a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
-+++ b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
-@@ -40,40 +40,44 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
- spin_unlock_bh(&backlog->lock);
- }
-
--static void qat_alg_backlog_req(struct qat_alg_req *req,
-- struct qat_instance_backlog *backlog)
--{
-- INIT_LIST_HEAD(&req->list);
--
-- spin_lock_bh(&backlog->lock);
-- list_add_tail(&req->list, &backlog->list);
-- spin_unlock_bh(&backlog->lock);
--}
--
--static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
-+static bool qat_alg_try_enqueue(struct qat_alg_req *req)
- {
- struct qat_instance_backlog *backlog = req->backlog;
- struct adf_etr_ring_data *tx_ring = req->tx_ring;
- u32 *fw_req = req->fw_req;
-
-- /* If any request is already backlogged, then add to backlog list */
-+ /* Check if any request is already backlogged */
- if (!list_empty(&backlog->list))
-- goto enqueue;
-+ return false;
-
-- /* If ring is nearly full, then add to backlog list */
-+ /* Check if ring is nearly full */
- if (adf_ring_nearly_full(tx_ring))
-- goto enqueue;
-+ return false;
-
-- /* If adding request to HW ring fails, then add to backlog list */
-+ /* Try to enqueue to HW ring */
- if (adf_send_message(tx_ring, fw_req))
-- goto enqueue;
-+ return false;
-
-- return -EINPROGRESS;
-+ return true;
-+}
-
--enqueue:
-- qat_alg_backlog_req(req, backlog);
-
-- return -EBUSY;
-+static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
-+{
-+ struct qat_instance_backlog *backlog = req->backlog;
-+ int ret = -EINPROGRESS;
-+
-+ if (qat_alg_try_enqueue(req))
-+ return ret;
-+
-+ spin_lock_bh(&backlog->lock);
-+ if (!qat_alg_try_enqueue(req)) {
-+ list_add_tail(&req->list, &backlog->list);
-+ ret = -EBUSY;
-+ }
-+ spin_unlock_bh(&backlog->lock);
-+
-+ return ret;
- }
-
- int qat_alg_send_message(struct qat_alg_req *req)
-diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
-index 45e7e044cf4a0..8e5f3d84311e5 100644
---- a/drivers/cxl/core/core.h
-+++ b/drivers/cxl/core/core.h
-@@ -75,6 +75,7 @@ resource_size_t __rcrb_to_component(struct device *dev,
- enum cxl_rcrb which);
-
- extern struct rw_semaphore cxl_dpa_rwsem;
-+extern struct rw_semaphore cxl_region_rwsem;
-
- int cxl_memdev_init(void);
- void cxl_memdev_exit(void);
-diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
-index 4449b34a80cc9..64e86b786db52 100644
---- a/drivers/cxl/core/hdm.c
-+++ b/drivers/cxl/core/hdm.c
-@@ -85,7 +85,7 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
- struct cxl_component_regs *regs)
- {
- struct cxl_register_map map = {
-- .dev = &port->dev,
-+ .host = &port->dev,
- .resource = port->component_reg_phys,
- .base = crb,
- .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
-@@ -575,17 +575,11 @@ static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
- CXL_HDM_DECODER0_CTRL_HOSTONLY);
- }
-
--static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
-+static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
- {
- struct cxl_dport **t = &cxlsd->target[0];
- int ways = cxlsd->cxld.interleave_ways;
-
-- if (dev_WARN_ONCE(&cxlsd->cxld.dev,
-- ways > 8 || ways > cxlsd->nr_targets,
-- "ways: %d overflows targets: %d\n", ways,
-- cxlsd->nr_targets))
-- return -ENXIO;
--
- *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
- if (ways > 1)
- *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
-@@ -601,8 +595,6 @@ static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
- *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
- if (ways > 7)
- *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
--
-- return 0;
- }
-
- /*
-@@ -650,6 +642,25 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
- return -EBUSY;
- }
-
-+ /*
-+ * For endpoint decoders hosted on CXL memory devices that
-+ * support the sanitize operation, make sure sanitize is not in-flight.
-+ */
-+ if (is_endpoint_decoder(&cxld->dev)) {
-+ struct cxl_endpoint_decoder *cxled =
-+ to_cxl_endpoint_decoder(&cxld->dev);
-+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
-+ struct cxl_memdev_state *mds =
-+ to_cxl_memdev_state(cxlmd->cxlds);
-+
-+ if (mds && mds->security.sanitize_active) {
-+ dev_dbg(&cxlmd->dev,
-+ "attempted to commit %s during sanitize\n",
-+ dev_name(&cxld->dev));
-+ return -EBUSY;
-+ }
-+ }
-+
- down_read(&cxl_dpa_rwsem);
- /* common decoder settings */
- ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
-@@ -670,13 +681,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
- void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
- u64 targets;
-
-- rc = cxlsd_set_targets(cxlsd, &targets);
-- if (rc) {
-- dev_dbg(&port->dev, "%s: target configuration error\n",
-- dev_name(&cxld->dev));
-- goto err;
-- }
--
-+ cxlsd_set_targets(cxlsd, &targets);
- writel(upper_32_bits(targets), tl_hi);
- writel(lower_32_bits(targets), tl_lo);
- } else {
-@@ -694,7 +699,6 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
-
- port->commit_end++;
- rc = cxld_await_commit(hdm, cxld->id);
--err:
- if (rc) {
- dev_dbg(&port->dev, "%s: error %d committing decoder\n",
- dev_name(&cxld->dev), rc);
-diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
-index 4df4f614f490e..b91bb98869917 100644
---- a/drivers/cxl/core/mbox.c
-+++ b/drivers/cxl/core/mbox.c
-@@ -1125,20 +1125,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
- }
- EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-
--/**
-- * cxl_mem_sanitize() - Send a sanitization command to the device.
-- * @mds: The device data for the operation
-- * @cmd: The specific sanitization command opcode
-- *
-- * Return: 0 if the command was executed successfully, regardless of
-- * whether or not the actual security operation is done in the background,
-- * such as for the Sanitize case.
-- * Error return values can be the result of the mailbox command, -EINVAL
-- * when security requirements are not met or invalid contexts.
-- *
-- * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
-- */
--int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
-+static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
- {
- int rc;
- u32 sec_out = 0;
-@@ -1183,7 +1170,45 @@ int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
-
- return 0;
- }
--EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
-+
-+
-+/**
-+ * cxl_mem_sanitize() - Send a sanitization command to the device.
-+ * @cxlmd: The device for the operation
-+ * @cmd: The specific sanitization command opcode
-+ *
-+ * Return: 0 if the command was executed successfully, regardless of
-+ * whether or not the actual security operation is done in the background,
-+ * such as for the Sanitize case.
-+ * Error return values can be the result of the mailbox command, -EINVAL
-+ * when security requirements are not met or invalid contexts, or -EBUSY
-+ * if the sanitize operation is already in flight.
-+ *
-+ * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
-+ */
-+int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
-+{
-+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
-+ struct cxl_port *endpoint;
-+ int rc;
-+
-+ /* synchronize with cxl_mem_probe() and decoder write operations */
-+ device_lock(&cxlmd->dev);
-+ endpoint = cxlmd->endpoint;
-+ down_read(&cxl_region_rwsem);
-+ /*
-+ * Require an endpoint to be safe otherwise the driver can not
-+ * be sure that the device is unmapped.
-+ */
-+ if (endpoint && endpoint->commit_end == -1)
-+ rc = __cxl_mem_sanitize(mds, cmd);
-+ else
-+ rc = -EBUSY;
-+ up_read(&cxl_region_rwsem);
-+ device_unlock(&cxlmd->dev);
-+
-+ return rc;
-+}
-
- static int add_dpa_res(struct device *dev, struct resource *parent,
- struct resource *res, resource_size_t start,
-diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
-index 14b547c07f547..fed9573cf355e 100644
---- a/drivers/cxl/core/memdev.c
-+++ b/drivers/cxl/core/memdev.c
-@@ -125,13 +125,16 @@ static ssize_t security_state_show(struct device *dev,
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-- u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
-- u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
-- u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
- unsigned long state = mds->security.state;
-+ int rc = 0;
-
-- if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
-- return sysfs_emit(buf, "sanitize\n");
-+ /* sync with latest submission state */
-+ mutex_lock(&mds->mbox_mutex);
-+ if (mds->security.sanitize_active)
-+ rc = sysfs_emit(buf, "sanitize\n");
-+ mutex_unlock(&mds->mbox_mutex);
-+ if (rc)
-+ return rc;
-
- if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
- return sysfs_emit(buf, "disabled\n");
-@@ -152,24 +155,17 @@ static ssize_t security_sanitize_store(struct device *dev,
- const char *buf, size_t len)
- {
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
-- struct cxl_port *port = cxlmd->endpoint;
- bool sanitize;
- ssize_t rc;
-
- if (kstrtobool(buf, &sanitize) || !sanitize)
- return -EINVAL;
-
-- if (!port || !is_cxl_endpoint(port))
-- return -EINVAL;
--
-- /* ensure no regions are mapped to this memdev */
-- if (port->commit_end != -1)
-- return -EBUSY;
--
-- rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
-+ rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
-+ if (rc)
-+ return rc;
-
-- return rc ? rc : len;
-+ return len;
- }
- static struct device_attribute dev_attr_security_sanitize =
- __ATTR(sanitize, 0200, NULL, security_sanitize_store);
-@@ -179,24 +175,17 @@ static ssize_t security_erase_store(struct device *dev,
- const char *buf, size_t len)
- {
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
-- struct cxl_port *port = cxlmd->endpoint;
- ssize_t rc;
- bool erase;
-
- if (kstrtobool(buf, &erase) || !erase)
- return -EINVAL;
-
-- if (!port || !is_cxl_endpoint(port))
-- return -EINVAL;
--
-- /* ensure no regions are mapped to this memdev */
-- if (port->commit_end != -1)
-- return -EBUSY;
--
-- rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
-+ rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
-+ if (rc)
-+ return rc;
-
-- return rc ? rc : len;
-+ return len;
- }
- static struct device_attribute dev_attr_security_erase =
- __ATTR(erase, 0200, NULL, security_erase_store);
-@@ -556,21 +545,11 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
- }
- EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
-
--static void cxl_memdev_security_shutdown(struct device *dev)
--{
-- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
--
-- if (mds->security.poll)
-- cancel_delayed_work_sync(&mds->security.poll_dwork);
--}
--
- static void cxl_memdev_shutdown(struct device *dev)
- {
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-
- down_write(&cxl_memdev_rwsem);
-- cxl_memdev_security_shutdown(dev);
- cxlmd->cxlds = NULL;
- up_write(&cxl_memdev_rwsem);
- }
-@@ -580,8 +559,8 @@ static void cxl_memdev_unregister(void *_cxlmd)
- struct cxl_memdev *cxlmd = _cxlmd;
- struct device *dev = &cxlmd->dev;
-
-- cxl_memdev_shutdown(dev);
- cdev_device_del(&cxlmd->cdev, dev);
-+ cxl_memdev_shutdown(dev);
- put_device(dev);
- }
-
-@@ -961,17 +940,16 @@ static const struct fw_upload_ops cxl_memdev_fw_ops = {
- .cleanup = cxl_fw_cleanup,
- };
-
--static void devm_cxl_remove_fw_upload(void *fwl)
-+static void cxl_remove_fw_upload(void *fwl)
- {
- firmware_upload_unregister(fwl);
- }
-
--int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
-+int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
- {
- struct cxl_dev_state *cxlds = &mds->cxlds;
- struct device *dev = &cxlds->cxlmd->dev;
- struct fw_upload *fwl;
-- int rc;
-
- if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
- return 0;
-@@ -979,19 +957,10 @@ int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
- fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
- &cxl_memdev_fw_ops, mds);
- if (IS_ERR(fwl))
-- return dev_err_probe(dev, PTR_ERR(fwl),
-- "Failed to register firmware loader\n");
--
-- rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
-- fwl);
-- if (rc)
-- dev_err(dev,
-- "Failed to add firmware loader remove action: %d\n",
-- rc);
--
-- return rc;
-+ return PTR_ERR(fwl);
-+ return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
- }
--EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
-+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
-
- static const struct file_operations cxl_memdev_fops = {
- .owner = THIS_MODULE,
-@@ -1002,36 +971,8 @@ static const struct file_operations cxl_memdev_fops = {
- .llseek = noop_llseek,
- };
-
--static void put_sanitize(void *data)
--{
-- struct cxl_memdev_state *mds = data;
--
-- sysfs_put(mds->security.sanitize_node);
--}
--
--static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
--{
-- struct cxl_dev_state *cxlds = cxlmd->cxlds;
-- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-- struct device *dev = &cxlmd->dev;
-- struct kernfs_node *sec;
--
-- sec = sysfs_get_dirent(dev->kobj.sd, "security");
-- if (!sec) {
-- dev_err(dev, "sysfs_get_dirent 'security' failed\n");
-- return -ENODEV;
-- }
-- mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
-- sysfs_put(sec);
-- if (!mds->security.sanitize_node) {
-- dev_err(dev, "sysfs_get_dirent 'state' failed\n");
-- return -ENODEV;
-- }
--
-- return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
-- }
--
--struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
-+struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
-+ struct cxl_dev_state *cxlds)
- {
- struct cxl_memdev *cxlmd;
- struct device *dev;
-@@ -1059,11 +1000,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
- if (rc)
- goto err;
-
-- rc = cxl_memdev_security_init(cxlmd);
-- if (rc)
-- goto err;
--
-- rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
-+ rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
- if (rc)
- return ERR_PTR(rc);
- return cxlmd;
-@@ -1079,6 +1016,50 @@ err:
- }
- EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
-
-+static void sanitize_teardown_notifier(void *data)
-+{
-+ struct cxl_memdev_state *mds = data;
-+ struct kernfs_node *state;
-+
-+ /*
-+ * Prevent new irq triggered invocations of the workqueue and
-+ * flush inflight invocations.
-+ */
-+ mutex_lock(&mds->mbox_mutex);
-+ state = mds->security.sanitize_node;
-+ mds->security.sanitize_node = NULL;
-+ mutex_unlock(&mds->mbox_mutex);
-+
-+ cancel_delayed_work_sync(&mds->security.poll_dwork);
-+ sysfs_put(state);
-+}
-+
-+int devm_cxl_sanitize_setup_notifier(struct device *host,
-+ struct cxl_memdev *cxlmd)
-+{
-+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
-+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-+ struct kernfs_node *sec;
-+
-+ if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
-+ return 0;
-+
-+ /*
-+ * Note, the expectation is that @cxlmd would have failed to be
-+ * created if these sysfs_get_dirent calls fail.
-+ */
-+ sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
-+ if (!sec)
-+ return -ENOENT;
-+ mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
-+ sysfs_put(sec);
-+ if (!mds->security.sanitize_node)
-+ return -ENOENT;
-+
-+ return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
-+}
-+EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
-+
- __init int cxl_memdev_init(void)
- {
- dev_t devt;
-diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
-index 7ca01a834e188..6a75a3cb601ec 100644
---- a/drivers/cxl/core/port.c
-+++ b/drivers/cxl/core/port.c
-@@ -28,6 +28,12 @@
- * instantiated by the core.
- */
-
-+/*
-+ * All changes to the interleave configuration occur with this lock held
-+ * for write.
-+ */
-+DECLARE_RWSEM(cxl_region_rwsem);
-+
- static DEFINE_IDA(cxl_port_ida);
- static DEFINE_XARRAY(cxl_root_buses);
-
-@@ -691,14 +697,14 @@ err:
- return ERR_PTR(rc);
- }
-
--static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
-+static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
- resource_size_t component_reg_phys)
- {
- if (component_reg_phys == CXL_RESOURCE_NONE)
- return 0;
-
- *map = (struct cxl_register_map) {
-- .dev = dev,
-+ .host = host,
- .reg_type = CXL_REGLOC_RBI_COMPONENT,
- .resource = component_reg_phys,
- .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
-@@ -716,13 +722,23 @@ static int cxl_port_setup_regs(struct cxl_port *port,
- component_reg_phys);
- }
-
--static int cxl_dport_setup_regs(struct cxl_dport *dport,
-+static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
- resource_size_t component_reg_phys)
- {
-+ int rc;
-+
- if (dev_is_platform(dport->dport_dev))
- return 0;
-- return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
-- component_reg_phys);
-+
-+ /*
-+ * use @dport->dport_dev for the context for error messages during
-+ * register probing, and fixup @host after the fact, since @host may be
-+ * NULL.
-+ */
-+ rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
-+ component_reg_phys);
-+ dport->comp_map.host = host;
-+ return rc;
- }
-
- static struct cxl_port *__devm_cxl_add_port(struct device *host,
-@@ -983,7 +999,16 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
- if (!dport)
- return ERR_PTR(-ENOMEM);
-
-- if (rcrb != CXL_RESOURCE_NONE) {
-+ dport->dport_dev = dport_dev;
-+ dport->port_id = port_id;
-+ dport->port = port;
-+
-+ if (rcrb == CXL_RESOURCE_NONE) {
-+ rc = cxl_dport_setup_regs(&port->dev, dport,
-+ component_reg_phys);
-+ if (rc)
-+ return ERR_PTR(rc);
-+ } else {
- dport->rcrb.base = rcrb;
- component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
- CXL_RCRB_DOWNSTREAM);
-@@ -992,6 +1017,14 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
- return ERR_PTR(-ENXIO);
- }
-
-+ /*
-+ * RCH @dport is not ready to map until associated with its
-+ * memdev
-+ */
-+ rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
-+ if (rc)
-+ return ERR_PTR(rc);
-+
- dport->rch = true;
- }
-
-@@ -999,14 +1032,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
- dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
- &component_reg_phys);
-
-- dport->dport_dev = dport_dev;
-- dport->port_id = port_id;
-- dport->port = port;
--
-- rc = cxl_dport_setup_regs(dport, component_reg_phys);
-- if (rc)
-- return ERR_PTR(rc);
--
- cond_cxl_root_lock(port);
- rc = add_dport(port, dport);
- cond_cxl_root_unlock(port);
-@@ -1217,35 +1242,39 @@ static struct device *grandparent(struct device *dev)
- return NULL;
- }
-
-+static struct device *endpoint_host(struct cxl_port *endpoint)
-+{
-+ struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
-+
-+ if (is_cxl_root(port))
-+ return port->uport_dev;
-+ return &port->dev;
-+}
-+
- static void delete_endpoint(void *data)
- {
- struct cxl_memdev *cxlmd = data;
- struct cxl_port *endpoint = cxlmd->endpoint;
-- struct cxl_port *parent_port;
-- struct device *parent;
--
-- parent_port = cxl_mem_find_port(cxlmd, NULL);
-- if (!parent_port)
-- goto out;
-- parent = &parent_port->dev;
-+ struct device *host = endpoint_host(endpoint);
-
-- device_lock(parent);
-- if (parent->driver && !endpoint->dead) {
-- devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
-- devm_release_action(parent, cxl_unlink_uport, endpoint);
-- devm_release_action(parent, unregister_port, endpoint);
-+ device_lock(host);
-+ if (host->driver && !endpoint->dead) {
-+ devm_release_action(host, cxl_unlink_parent_dport, endpoint);
-+ devm_release_action(host, cxl_unlink_uport, endpoint);
-+ devm_release_action(host, unregister_port, endpoint);
- }
- cxlmd->endpoint = NULL;
-- device_unlock(parent);
-- put_device(parent);
--out:
-+ device_unlock(host);
- put_device(&endpoint->dev);
-+ put_device(host);
- }
-
- int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
- {
-+ struct device *host = endpoint_host(endpoint);
- struct device *dev = &cxlmd->dev;
-
-+ get_device(host);
- get_device(&endpoint->dev);
- cxlmd->endpoint = endpoint;
- cxlmd->depth = endpoint->depth;
-diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
-index 6d63b8798c299..9d60020c5cb3b 100644
---- a/drivers/cxl/core/region.c
-+++ b/drivers/cxl/core/region.c
-@@ -28,12 +28,6 @@
- * 3. Decoder targets
- */
-
--/*
-- * All changes to the interleave configuration occur with this lock held
-- * for write.
-- */
--static DECLARE_RWSEM(cxl_region_rwsem);
--
- static struct cxl_region *to_cxl_region(struct device *dev);
-
- static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
-@@ -294,7 +288,7 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
- */
- rc = cxl_region_invalidate_memregion(cxlr);
- if (rc)
-- return rc;
-+ goto out;
-
- if (commit) {
- rc = cxl_region_decode_commit(cxlr);
-@@ -1133,7 +1127,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
- }
-
- if (is_cxl_root(parent_port)) {
-- parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
-+ /*
-+ * Root decoder IG is always set to value in CFMWS which
-+ * may be different than this region's IG. We can use the
-+ * region's IG here since interleave_granularity_store()
-+ * does not allow interleaved host-bridges with
-+ * root IG != region IG.
-+ */
-+ parent_ig = p->interleave_granularity;
- parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
- /*
- * For purposes of address bit routing, use power-of-2 math for
-@@ -1195,6 +1196,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
- return rc;
- }
-
-+ if (iw > 8 || iw > cxlsd->nr_targets) {
-+ dev_dbg(&cxlr->dev,
-+ "%s:%s:%s: ways: %d overflows targets: %d\n",
-+ dev_name(port->uport_dev), dev_name(&port->dev),
-+ dev_name(&cxld->dev), iw, cxlsd->nr_targets);
-+ return -ENXIO;
-+ }
-+
- if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
- if (cxld->interleave_ways != iw ||
- cxld->interleave_granularity != ig ||
-@@ -1480,6 +1489,14 @@ static int cxl_region_attach_auto(struct cxl_region *cxlr,
- return 0;
- }
-
-+static int cmp_interleave_pos(const void *a, const void *b)
-+{
-+ struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
-+ struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
-+
-+ return cxled_a->pos - cxled_b->pos;
-+}
-+
- static struct cxl_port *next_port(struct cxl_port *port)
- {
- if (!port->parent_dport)
-@@ -1487,119 +1504,127 @@ static struct cxl_port *next_port(struct cxl_port *port)
- return port->parent_dport->port;
- }
-
--static int decoder_match_range(struct device *dev, void *data)
-+static int match_switch_decoder_by_range(struct device *dev, void *data)
- {
-- struct cxl_endpoint_decoder *cxled = data;
- struct cxl_switch_decoder *cxlsd;
-+ struct range *r1, *r2 = data;
-
- if (!is_switch_decoder(dev))
- return 0;
-
- cxlsd = to_cxl_switch_decoder(dev);
-- return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range);
--}
--
--static void find_positions(const struct cxl_switch_decoder *cxlsd,
-- const struct cxl_port *iter_a,
-- const struct cxl_port *iter_b, int *a_pos,
-- int *b_pos)
--{
-- int i;
-+ r1 = &cxlsd->cxld.hpa_range;
-
-- for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) {
-- if (cxlsd->target[i] == iter_a->parent_dport)
-- *a_pos = i;
-- else if (cxlsd->target[i] == iter_b->parent_dport)
-- *b_pos = i;
-- if (*a_pos >= 0 && *b_pos >= 0)
-- break;
-- }
-+ if (is_root_decoder(dev))
-+ return range_contains(r1, r2);
-+ return (r1->start == r2->start && r1->end == r2->end);
- }
-
--static int cmp_decode_pos(const void *a, const void *b)
-+static int find_pos_and_ways(struct cxl_port *port, struct range *range,
-+ int *pos, int *ways)
- {
-- struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
-- struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
-- struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a);
-- struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b);
-- struct cxl_port *port_a = cxled_to_port(cxled_a);
-- struct cxl_port *port_b = cxled_to_port(cxled_b);
-- struct cxl_port *iter_a, *iter_b, *port = NULL;
- struct cxl_switch_decoder *cxlsd;
-+ struct cxl_port *parent;
- struct device *dev;
-- int a_pos, b_pos;
-- unsigned int seq;
--
-- /* Exit early if any prior sorting failed */
-- if (cxled_a->pos < 0 || cxled_b->pos < 0)
-- return 0;
-+ int rc = -ENXIO;
-
-- /*
-- * Walk up the hierarchy to find a shared port, find the decoder that
-- * maps the range, compare the relative position of those dport
-- * mappings.
-- */
-- for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) {
-- struct cxl_port *next_a, *next_b;
-+ parent = next_port(port);
-+ if (!parent)
-+ return rc;
-
-- next_a = next_port(iter_a);
-- if (!next_a)
-- break;
-+ dev = device_find_child(&parent->dev, range,
-+ match_switch_decoder_by_range);
-+ if (!dev) {
-+ dev_err(port->uport_dev,
-+ "failed to find decoder mapping %#llx-%#llx\n",
-+ range->start, range->end);
-+ return rc;
-+ }
-+ cxlsd = to_cxl_switch_decoder(dev);
-+ *ways = cxlsd->cxld.interleave_ways;
-
-- for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) {
-- next_b = next_port(iter_b);
-- if (next_a != next_b)
-- continue;
-- port = next_a;
-+ for (int i = 0; i < *ways; i++) {
-+ if (cxlsd->target[i] == port->parent_dport) {
-+ *pos = i;
-+ rc = 0;
- break;
- }
--
-- if (port)
-- break;
- }
-+ put_device(dev);
-
-- if (!port) {
-- dev_err(cxlmd_a->dev.parent,
-- "failed to find shared port with %s\n",
-- dev_name(cxlmd_b->dev.parent));
-- goto err;
-- }
-+ return rc;
-+}
-
-- dev = device_find_child(&port->dev, cxled_a, decoder_match_range);
-- if (!dev) {
-- struct range *range = &cxled_a->cxld.hpa_range;
-+/**
-+ * cxl_calc_interleave_pos() - calculate an endpoint position in a region
-+ * @cxled: endpoint decoder member of given region
-+ *
-+ * The endpoint position is calculated by traversing the topology from
-+ * the endpoint to the root decoder and iteratively applying this
-+ * calculation:
-+ *
-+ * position = position * parent_ways + parent_pos;
-+ *
-+ * ...where @position is inferred from switch and root decoder target lists.
-+ *
-+ * Return: position >= 0 on success
-+ * -ENXIO on failure
-+ */
-+static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
-+{
-+ struct cxl_port *iter, *port = cxled_to_port(cxled);
-+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
-+ struct range *range = &cxled->cxld.hpa_range;
-+ int parent_ways = 0, parent_pos = 0, pos = 0;
-+ int rc;
-
-- dev_err(port->uport_dev,
-- "failed to find decoder that maps %#llx-%#llx\n",
-- range->start, range->end);
-- goto err;
-- }
-+ /*
-+ * Example: the expected interleave order of the 4-way region shown
-+ * below is: mem0, mem2, mem1, mem3
-+ *
-+ * root_port
-+ * / \
-+ * host_bridge_0 host_bridge_1
-+ * | | | |
-+ * mem0 mem1 mem2 mem3
-+ *
-+ * In the example the calculator will iterate twice. The first iteration
-+ * uses the mem position in the host-bridge and the ways of the host-
-+ * bridge to generate the first, or local, position. The second
-+ * iteration uses the host-bridge position in the root_port and the ways
-+ * of the root_port to refine the position.
-+ *
-+ * A trace of the calculation per endpoint looks like this:
-+ * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0
-+ * pos = 0 * 2 + 0 pos = 0 * 2 + 1
-+ * pos: 0 pos: 1
-+ *
-+ * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1
-+ * pos = 1 * 2 + 0 pos = 1 * 2 + 1
-+ * pos: 2 pos = 3
-+ *
-+ * Note that while this example is simple, the method applies to more
-+ * complex topologies, including those with switches.
-+ */
-
-- cxlsd = to_cxl_switch_decoder(dev);
-- do {
-- seq = read_seqbegin(&cxlsd->target_lock);
-- find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos);
-- } while (read_seqretry(&cxlsd->target_lock, seq));
-+ /* Iterate from endpoint to root_port refining the position */
-+ for (iter = port; iter; iter = next_port(iter)) {
-+ if (is_cxl_root(iter))
-+ break;
-
-- put_device(dev);
-+ rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
-+ if (rc)
-+ return rc;
-
-- if (a_pos < 0 || b_pos < 0) {
-- dev_err(port->uport_dev,
-- "failed to find shared decoder for %s and %s\n",
-- dev_name(cxlmd_a->dev.parent),
-- dev_name(cxlmd_b->dev.parent));
-- goto err;
-+ pos = pos * parent_ways + parent_pos;
- }
-
-- dev_dbg(port->uport_dev, "%s comes %s %s\n",
-- dev_name(cxlmd_a->dev.parent),
-- a_pos - b_pos < 0 ? "before" : "after",
-- dev_name(cxlmd_b->dev.parent));
-+ dev_dbg(&cxlmd->dev,
-+ "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
-+ dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
-+ dev_name(&port->dev), range->start, range->end, pos);
-
-- return a_pos - b_pos;
--err:
-- cxled_a->pos = -1;
-- return 0;
-+ return pos;
- }
-
- static int cxl_region_sort_targets(struct cxl_region *cxlr)
-@@ -1607,22 +1632,21 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr)
- struct cxl_region_params *p = &cxlr->params;
- int i, rc = 0;
-
-- sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos,
-- NULL);
--
- for (i = 0; i < p->nr_targets; i++) {
- struct cxl_endpoint_decoder *cxled = p->targets[i];
-
-+ cxled->pos = cxl_calc_interleave_pos(cxled);
- /*
-- * Record that sorting failed, but still continue to restore
-- * cxled->pos with its ->targets[] position so that follow-on
-- * code paths can reliably do p->targets[cxled->pos] to
-- * self-reference their entry.
-+ * Record that sorting failed, but still continue to calc
-+ * cxled->pos so that follow-on code paths can reliably
-+ * do p->targets[cxled->pos] to self-reference their entry.
- */
- if (cxled->pos < 0)
- rc = -ENXIO;
-- cxled->pos = i;
- }
-+ /* Keep the cxlr target list in interleave position order */
-+ sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
-+ cmp_interleave_pos, NULL);
-
- dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
- return rc;
-@@ -1658,6 +1682,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
- return -ENXIO;
- }
-
-+ if (p->nr_targets >= p->interleave_ways) {
-+ dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
-+ p->nr_targets);
-+ return -EINVAL;
-+ }
-+
- ep_port = cxled_to_port(cxled);
- root_port = cxlrd_to_port(cxlrd);
- dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
-@@ -1750,7 +1780,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
- if (p->nr_targets == p->interleave_ways) {
- rc = cxl_region_setup_targets(cxlr);
- if (rc)
-- goto err_decrement;
-+ return rc;
- p->state = CXL_CONFIG_ACTIVE;
- }
-
-@@ -1761,13 +1791,27 @@ static int cxl_region_attach(struct cxl_region *cxlr,
- .end = p->res->end,
- };
-
-- return 0;
-+ if (p->nr_targets != p->interleave_ways)
-+ return 0;
-
--err_decrement:
-- p->nr_targets--;
-- cxled->pos = -1;
-- p->targets[pos] = NULL;
-- return rc;
-+ /*
-+ * Test the auto-discovery position calculator function
-+ * against this successfully created user-defined region.
-+ * A fail message here means that this interleave config
-+ * will fail when presented as CXL_REGION_F_AUTO.
-+ */
-+ for (int i = 0; i < p->nr_targets; i++) {
-+ struct cxl_endpoint_decoder *cxled = p->targets[i];
-+ int test_pos;
-+
-+ test_pos = cxl_calc_interleave_pos(cxled);
-+ dev_dbg(&cxled->cxld.dev,
-+ "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
-+ (test_pos == cxled->pos) ? "success" : "fail",
-+ test_pos, cxled->pos);
-+ }
-+
-+ return 0;
- }
-
- static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
-@@ -2696,7 +2740,7 @@ err:
- return rc;
- }
-
--static int match_decoder_by_range(struct device *dev, void *data)
-+static int match_root_decoder_by_range(struct device *dev, void *data)
- {
- struct range *r1, *r2 = data;
- struct cxl_root_decoder *cxlrd;
-@@ -2827,7 +2871,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
- int rc;
-
- cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
-- match_decoder_by_range);
-+ match_root_decoder_by_range);
- if (!cxlrd_dev) {
- dev_err(cxlmd->dev.parent,
- "%s:%s no CXL window for range %#llx:%#llx\n",
-diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
-index 6281127b3e9d9..e0fbe964f6f0a 100644
---- a/drivers/cxl/core/regs.c
-+++ b/drivers/cxl/core/regs.c
-@@ -204,7 +204,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
- struct cxl_component_regs *regs,
- unsigned long map_mask)
- {
-- struct device *dev = map->dev;
-+ struct device *host = map->host;
- struct mapinfo {
- const struct cxl_reg_map *rmap;
- void __iomem **addr;
-@@ -225,7 +225,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
- continue;
- phys_addr = map->resource + mi->rmap->offset;
- length = mi->rmap->size;
-- *(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length);
-+ *(mi->addr) = devm_cxl_iomap_block(host, phys_addr, length);
- if (!*(mi->addr))
- return -ENOMEM;
- }
-@@ -237,7 +237,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
- int cxl_map_device_regs(const struct cxl_register_map *map,
- struct cxl_device_regs *regs)
- {
-- struct device *dev = map->dev;
-+ struct device *host = map->host;
- resource_size_t phys_addr = map->resource;
- struct mapinfo {
- const struct cxl_reg_map *rmap;
-@@ -259,7 +259,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
-
- addr = phys_addr + mi->rmap->offset;
- length = mi->rmap->size;
-- *(mi->addr) = devm_cxl_iomap_block(dev, addr, length);
-+ *(mi->addr) = devm_cxl_iomap_block(host, addr, length);
- if (!*(mi->addr))
- return -ENOMEM;
- }
-@@ -309,7 +309,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- int regloc, i;
-
- *map = (struct cxl_register_map) {
-- .dev = &pdev->dev,
-+ .host = &pdev->dev,
- .resource = CXL_RESOURCE_NONE,
- };
-
-@@ -403,15 +403,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
-
- static int cxl_map_regblock(struct cxl_register_map *map)
- {
-- struct device *dev = map->dev;
-+ struct device *host = map->host;
-
- map->base = ioremap(map->resource, map->max_size);
- if (!map->base) {
-- dev_err(dev, "failed to map registers\n");
-+ dev_err(host, "failed to map registers\n");
- return -ENOMEM;
- }
-
-- dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
-+ dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource);
- return 0;
- }
-
-@@ -425,28 +425,28 @@ static int cxl_probe_regs(struct cxl_register_map *map)
- {
- struct cxl_component_reg_map *comp_map;
- struct cxl_device_reg_map *dev_map;
-- struct device *dev = map->dev;
-+ struct device *host = map->host;
- void __iomem *base = map->base;
-
- switch (map->reg_type) {
- case CXL_REGLOC_RBI_COMPONENT:
- comp_map = &map->component_map;
-- cxl_probe_component_regs(dev, base, comp_map);
-- dev_dbg(dev, "Set up component registers\n");
-+ cxl_probe_component_regs(host, base, comp_map);
-+ dev_dbg(host, "Set up component registers\n");
- break;
- case CXL_REGLOC_RBI_MEMDEV:
- dev_map = &map->device_map;
-- cxl_probe_device_regs(dev, base, dev_map);
-+ cxl_probe_device_regs(host, base, dev_map);
- if (!dev_map->status.valid || !dev_map->mbox.valid ||
- !dev_map->memdev.valid) {
-- dev_err(dev, "registers not found: %s%s%s\n",
-+ dev_err(host, "registers not found: %s%s%s\n",
- !dev_map->status.valid ? "status " : "",
- !dev_map->mbox.valid ? "mbox " : "",
- !dev_map->memdev.valid ? "memdev " : "");
- return -ENXIO;
- }
-
-- dev_dbg(dev, "Probing device registers...\n");
-+ dev_dbg(host, "Probing device registers...\n");
- break;
- default:
- break;
-diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
-index 76d92561af294..b5b015b661eae 100644
---- a/drivers/cxl/cxl.h
-+++ b/drivers/cxl/cxl.h
-@@ -247,7 +247,7 @@ struct cxl_pmu_reg_map {
-
- /**
- * struct cxl_register_map - DVSEC harvested register block mapping parameters
-- * @dev: device for devm operations and logging
-+ * @host: device for devm operations and logging
- * @base: virtual base of the register-block-BAR + @block_offset
- * @resource: physical resource base of the register block
- * @max_size: maximum mapping size to perform register search
-@@ -257,7 +257,7 @@ struct cxl_pmu_reg_map {
- * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
- */
- struct cxl_register_map {
-- struct device *dev;
-+ struct device *host;
- void __iomem *base;
- resource_size_t resource;
- resource_size_t max_size;
-diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
-index 706f8a6d1ef43..6933bc20e76b6 100644
---- a/drivers/cxl/cxlmem.h
-+++ b/drivers/cxl/cxlmem.h
-@@ -84,9 +84,12 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
- return is_cxl_memdev(port->uport_dev);
- }
-
--struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
-+struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
-+ struct cxl_dev_state *cxlds);
-+int devm_cxl_sanitize_setup_notifier(struct device *host,
-+ struct cxl_memdev *cxlmd);
- struct cxl_memdev_state;
--int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
-+int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds);
- int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
- resource_size_t base, resource_size_t len,
- resource_size_t skipped);
-@@ -360,16 +363,16 @@ struct cxl_fw_state {
- *
- * @state: state of last security operation
- * @enabled_cmds: All security commands enabled in the CEL
-- * @poll: polling for sanitization is enabled, device has no mbox irq support
- * @poll_tmo_secs: polling timeout
-+ * @sanitize_active: sanitize completion pending
- * @poll_dwork: polling work item
- * @sanitize_node: sanitation sysfs file to notify
- */
- struct cxl_security_state {
- unsigned long state;
- DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
-- bool poll;
- int poll_tmo_secs;
-+ bool sanitize_active;
- struct delayed_work poll_dwork;
- struct kernfs_node *sanitize_node;
- };
-@@ -883,7 +886,7 @@ static inline void cxl_mem_active_dec(void)
- }
- #endif
-
--int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
-+int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
-
- struct cxl_hdm {
- struct cxl_component_regs regs;
-diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
-index 44a21ab7add51..8bece1e2e2491 100644
---- a/drivers/cxl/pci.c
-+++ b/drivers/cxl/pci.c
-@@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
- reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
- opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
- if (opcode == CXL_MBOX_OP_SANITIZE) {
-+ mutex_lock(&mds->mbox_mutex);
- if (mds->security.sanitize_node)
-- sysfs_notify_dirent(mds->security.sanitize_node);
--
-- dev_dbg(cxlds->dev, "Sanitization operation ended\n");
-+ mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
-+ mutex_unlock(&mds->mbox_mutex);
- } else {
- /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
- rcuwait_wake_up(&mds->mbox_wait);
-@@ -152,18 +152,16 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
- mutex_lock(&mds->mbox_mutex);
- if (cxl_mbox_background_complete(cxlds)) {
- mds->security.poll_tmo_secs = 0;
-- put_device(cxlds->dev);
--
- if (mds->security.sanitize_node)
- sysfs_notify_dirent(mds->security.sanitize_node);
-+ mds->security.sanitize_active = false;
-
- dev_dbg(cxlds->dev, "Sanitization operation ended\n");
- } else {
- int timeout = mds->security.poll_tmo_secs + 10;
-
- mds->security.poll_tmo_secs = min(15 * 60, timeout);
-- queue_delayed_work(system_wq, &mds->security.poll_dwork,
-- timeout * HZ);
-+ schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
- }
- mutex_unlock(&mds->mbox_mutex);
- }
-@@ -295,18 +293,15 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
- * and allow userspace to poll(2) for completion.
- */
- if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
-- if (mds->security.poll) {
-- /* hold the device throughout */
-- get_device(cxlds->dev);
--
-- /* give first timeout a second */
-- timeout = 1;
-- mds->security.poll_tmo_secs = timeout;
-- queue_delayed_work(system_wq,
-- &mds->security.poll_dwork,
-- timeout * HZ);
-- }
--
-+ if (mds->security.sanitize_active)
-+ return -EBUSY;
-+
-+ /* give first timeout a second */
-+ timeout = 1;
-+ mds->security.poll_tmo_secs = timeout;
-+ mds->security.sanitize_active = true;
-+ schedule_delayed_work(&mds->security.poll_dwork,
-+ timeout * HZ);
- dev_dbg(dev, "Sanitization operation started\n");
- goto success;
- }
-@@ -389,7 +384,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
- const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
- struct device *dev = cxlds->dev;
- unsigned long timeout;
-+ int irq, msgnum;
- u64 md_status;
-+ u32 ctrl;
-
- timeout = jiffies + mbox_ready_timeout * HZ;
- do {
-@@ -437,33 +434,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
- dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
-
- rcuwait_init(&mds->mbox_wait);
-+ INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
-
-- if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
-- u32 ctrl;
-- int irq, msgnum;
-- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
--
-- msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
-- irq = pci_irq_vector(pdev, msgnum);
-- if (irq < 0)
-- goto mbox_poll;
--
-- if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
-- goto mbox_poll;
-+ /* background command interrupts are optional */
-+ if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
-+ return 0;
-
-- /* enable background command mbox irq support */
-- ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
-- ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
-- writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
-+ msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
-+ irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
-+ if (irq < 0)
-+ return 0;
-
-+ if (cxl_request_irq(cxlds, irq, NULL, cxl_pci_mbox_irq))
- return 0;
-- }
-
--mbox_poll:
-- mds->security.poll = true;
-- INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
-+ dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
-+ /* enable background command mbox irq support */
-+ ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
-+ ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
-+ writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
-
-- dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
- return 0;
- }
-
-@@ -484,7 +474,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
- resource_size_t component_reg_phys;
-
- *map = (struct cxl_register_map) {
-- .dev = &pdev->dev,
-+ .host = &pdev->dev,
- .resource = CXL_RESOURCE_NONE,
- };
-
-@@ -882,11 +872,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- if (rc)
- return rc;
-
-- cxlmd = devm_cxl_add_memdev(cxlds);
-+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
- if (IS_ERR(cxlmd))
- return PTR_ERR(cxlmd);
-
-- rc = cxl_memdev_setup_fw_upload(mds);
-+ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
-+ if (rc)
-+ return rc;
-+
-+ rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
- if (rc)
- return rc;
-
-diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
-index 39ac069cabc75..74893c06aa087 100644
---- a/drivers/devfreq/event/rockchip-dfi.c
-+++ b/drivers/devfreq/event/rockchip-dfi.c
-@@ -193,14 +193,15 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
- return dev_err_probe(dev, PTR_ERR(data->clk),
- "Cannot get the clk pclk_ddr_mon\n");
-
-- /* try to find the optional reference to the pmu syscon */
- node = of_parse_phandle(np, "rockchip,pmu", 0);
-- if (node) {
-- data->regmap_pmu = syscon_node_to_regmap(node);
-- of_node_put(node);
-- if (IS_ERR(data->regmap_pmu))
-- return PTR_ERR(data->regmap_pmu);
-- }
-+ if (!node)
-+ return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
-+
-+ data->regmap_pmu = syscon_node_to_regmap(node);
-+ of_node_put(node);
-+ if (IS_ERR(data->regmap_pmu))
-+ return PTR_ERR(data->regmap_pmu);
-+
- data->dev = dev;
-
- desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
-diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
-index 38b4110378de0..eb8b733065b24 100644
---- a/drivers/dma-buf/dma-resv.c
-+++ b/drivers/dma-buf/dma-resv.c
-@@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
-
- dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
- if ((old->context == fence->context && old_usage >= usage &&
-- dma_fence_is_later(fence, old)) ||
-+ dma_fence_is_later_or_same(fence, old)) ||
- dma_fence_is_signaled(old)) {
- dma_resv_list_set(fobj, i, fence, usage);
- dma_fence_put(old);
-diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
-index dc096839ac637..c5e679070e463 100644
---- a/drivers/dma/idxd/Makefile
-+++ b/drivers/dma/idxd/Makefile
-@@ -1,12 +1,12 @@
- ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
-
-+obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
-+idxd_bus-y := bus.o
-+
- obj-$(CONFIG_INTEL_IDXD) += idxd.o
- idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
-
- idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
-
--obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
--idxd_bus-y := bus.o
--
- obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
- idxd_compat-y := compat.o
-diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
-index 1b046d9a3a269..16d342654da2b 100644
---- a/drivers/dma/pxa_dma.c
-+++ b/drivers/dma/pxa_dma.c
-@@ -722,7 +722,6 @@ static void pxad_free_desc(struct virt_dma_desc *vd)
- dma_addr_t dma;
- struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
-
-- BUG_ON(sw_desc->nb_desc == 0);
- for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
- if (i > 0)
- dma = sw_desc->hw_desc[i - 1]->ddadr;
-diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
-index bae08b3f55c73..f414efdbd809e 100644
---- a/drivers/dma/stm32-mdma.c
-+++ b/drivers/dma/stm32-mdma.c
-@@ -489,7 +489,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
- src_maxburst = chan->dma_config.src_maxburst;
- dst_maxburst = chan->dma_config.dst_maxburst;
-
-- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
-+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
- ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
- ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
-
-@@ -965,7 +965,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
- if (!desc)
- return NULL;
-
-- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
-+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
- ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
- ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
- cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
-diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
-index aa8e2e8ac2609..33d6d931b33bb 100644
---- a/drivers/dma/ti/edma.c
-+++ b/drivers/dma/ti/edma.c
-@@ -2401,7 +2401,7 @@ static int edma_probe(struct platform_device *pdev)
- if (irq < 0 && node)
- irq = irq_of_parse_and_map(node, 0);
-
-- if (irq >= 0) {
-+ if (irq > 0) {
- irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
- dev_name(dev));
- ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
-@@ -2417,7 +2417,7 @@ static int edma_probe(struct platform_device *pdev)
- if (irq < 0 && node)
- irq = irq_of_parse_and_map(node, 2);
-
-- if (irq >= 0) {
-+ if (irq > 0) {
- irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
- dev_name(dev));
- ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
-diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
-index aa597cda0d887..2828e9573e90b 100644
---- a/drivers/firewire/core-device.c
-+++ b/drivers/firewire/core-device.c
-@@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
- fw_unit_attributes,
- &unit->attribute_group);
-
-- if (device_register(&unit->device) < 0)
-- goto skip_unit;
--
- fw_device_get(device);
-- continue;
--
-- skip_unit:
-- kfree(unit);
-+ if (device_register(&unit->device) < 0) {
-+ put_device(&unit->device);
-+ continue;
-+ }
- }
- }
-
-diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
-index 7edf2c95282fa..e779d866022b9 100644
---- a/drivers/firewire/sbp2.c
-+++ b/drivers/firewire/sbp2.c
-@@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
- sdev->use_10_for_rw = 1;
-
- if (sbp2_param_exclusive_login) {
-- sdev->manage_system_start_stop = true;
-- sdev->manage_runtime_start_stop = true;
-- sdev->manage_shutdown = true;
-+ sdev->manage_system_start_stop = 1;
-+ sdev->manage_runtime_start_stop = 1;
-+ sdev->manage_shutdown = 1;
- }
-
- if (sdev->type == TYPE_ROM)
-diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
-index 2b8bfcd010f5f..7865438b36960 100644
---- a/drivers/firmware/arm_ffa/bus.c
-+++ b/drivers/firmware/arm_ffa/bus.c
-@@ -193,6 +193,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- dev->release = ffa_release_device;
- dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
-
-+ ffa_dev->id = id;
- ffa_dev->vm_id = vm_id;
- ffa_dev->ops = ops;
- uuid_copy(&ffa_dev->uuid, uuid);
-diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
-index 121f4fc903cd5..7cd6b1564e801 100644
---- a/drivers/firmware/arm_ffa/driver.c
-+++ b/drivers/firmware/arm_ffa/driver.c
-@@ -587,17 +587,9 @@ static int ffa_partition_info_get(const char *uuid_str,
- return 0;
- }
-
--static void _ffa_mode_32bit_set(struct ffa_device *dev)
--{
-- dev->mode_32bit = true;
--}
--
- static void ffa_mode_32bit_set(struct ffa_device *dev)
- {
-- if (drv_info->version > FFA_VERSION_1_0)
-- return;
--
-- _ffa_mode_32bit_set(dev);
-+ dev->mode_32bit = true;
- }
-
- static int ffa_sync_send_receive(struct ffa_device *dev,
-@@ -706,7 +698,7 @@ static void ffa_setup_partitions(void)
-
- if (drv_info->version > FFA_VERSION_1_0 &&
- !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
-- _ffa_mode_32bit_set(ffa_dev);
-+ ffa_mode_32bit_set(ffa_dev);
- }
- kfree(pbuf);
- }
-diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
-index 135278ddaf627..79fb687bb90f9 100644
---- a/drivers/firmware/efi/unaccepted_memory.c
-+++ b/drivers/firmware/efi/unaccepted_memory.c
-@@ -100,7 +100,7 @@ retry:
- * overlap on physical address level.
- */
- list_for_each_entry(entry, &accepting_list, list) {
-- if (entry->end < range.start)
-+ if (entry->end <= range.start)
- continue;
- if (entry->start >= range.end)
- continue;
-diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
-index 06fe8aca870d7..69831f1d91e3f 100644
---- a/drivers/firmware/qcom_scm.c
-+++ b/drivers/firmware/qcom_scm.c
-@@ -167,6 +167,12 @@ static enum qcom_scm_convention __get_convention(void)
- if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
- return qcom_scm_convention;
-
-+ /*
-+ * Per the "SMC calling convention specification", the 64-bit calling
-+ * convention can only be used when the client is 64-bit, otherwise
-+ * system will encounter the undefined behaviour.
-+ */
-+#if IS_ENABLED(CONFIG_ARM64)
- /*
- * Device isn't required as there is only one argument - no device
- * needed to dma_map_single to secure world
-@@ -187,6 +193,7 @@ static enum qcom_scm_convention __get_convention(void)
- forced = true;
- goto found;
- }
-+#endif
-
- probed_convention = SMC_CONVENTION_ARM_32;
- ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
-diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
-index 51d062e0c3f12..c1590d3aa9cb7 100644
---- a/drivers/firmware/tegra/bpmp.c
-+++ b/drivers/firmware/tegra/bpmp.c
-@@ -313,6 +313,8 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
- return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
- }
-
-+static int __maybe_unused tegra_bpmp_resume(struct device *dev);
-+
- int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
- struct tegra_bpmp_message *msg)
- {
-@@ -325,6 +327,14 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
- if (!tegra_bpmp_message_valid(msg))
- return -EINVAL;
-
-+ if (bpmp->suspended) {
-+ /* Reset BPMP IPC channels during resume based on flags passed */
-+ if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
-+ tegra_bpmp_resume(bpmp->dev);
-+ else
-+ return -EAGAIN;
-+ }
-+
- channel = bpmp->tx_channel;
-
- spin_lock(&bpmp->atomic_tx_lock);
-@@ -364,6 +374,14 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
- if (!tegra_bpmp_message_valid(msg))
- return -EINVAL;
-
-+ if (bpmp->suspended) {
-+ /* Reset BPMP IPC channels during resume based on flags passed */
-+ if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
-+ tegra_bpmp_resume(bpmp->dev);
-+ else
-+ return -EAGAIN;
-+ }
-+
- channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
- msg->tx.size);
- if (IS_ERR(channel))
-@@ -796,10 +814,21 @@ deinit:
- return err;
- }
-
-+static int __maybe_unused tegra_bpmp_suspend(struct device *dev)
-+{
-+ struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
-+
-+ bpmp->suspended = true;
-+
-+ return 0;
-+}
-+
- static int __maybe_unused tegra_bpmp_resume(struct device *dev)
- {
- struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
-
-+ bpmp->suspended = false;
-+
- if (bpmp->soc->ops->resume)
- return bpmp->soc->ops->resume(bpmp);
- else
-@@ -807,6 +836,7 @@ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
- }
-
- static const struct dev_pm_ops tegra_bpmp_pm_ops = {
-+ .suspend_noirq = tegra_bpmp_suspend,
- .resume_noirq = tegra_bpmp_resume,
- };
-
-diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
-index 26a37f47f4ca5..66c3846c91476 100644
---- a/drivers/firmware/ti_sci.c
-+++ b/drivers/firmware/ti_sci.c
-@@ -190,19 +190,6 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
- return 0;
- }
-
--/**
-- * ti_sci_debugfs_destroy() - clean up log debug file
-- * @pdev: platform device pointer
-- * @info: Pointer to SCI entity information
-- */
--static void ti_sci_debugfs_destroy(struct platform_device *pdev,
-- struct ti_sci_info *info)
--{
-- if (IS_ERR(info->debug_region))
-- return;
--
-- debugfs_remove(info->d);
--}
- #else /* CONFIG_DEBUG_FS */
- static inline int ti_sci_debugfs_create(struct platform_device *dev,
- struct ti_sci_info *info)
-@@ -3449,43 +3436,12 @@ out:
- return ret;
- }
-
--static int ti_sci_remove(struct platform_device *pdev)
--{
-- struct ti_sci_info *info;
-- struct device *dev = &pdev->dev;
-- int ret = 0;
--
-- of_platform_depopulate(dev);
--
-- info = platform_get_drvdata(pdev);
--
-- if (info->nb.notifier_call)
-- unregister_restart_handler(&info->nb);
--
-- mutex_lock(&ti_sci_list_mutex);
-- if (info->users)
-- ret = -EBUSY;
-- else
-- list_del(&info->node);
-- mutex_unlock(&ti_sci_list_mutex);
--
-- if (!ret) {
-- ti_sci_debugfs_destroy(pdev, info);
--
-- /* Safe to free channels since no more users */
-- mbox_free_channel(info->chan_tx);
-- mbox_free_channel(info->chan_rx);
-- }
--
-- return ret;
--}
--
- static struct platform_driver ti_sci_driver = {
- .probe = ti_sci_probe,
-- .remove = ti_sci_remove,
- .driver = {
- .name = "ti-sci",
- .of_match_table = of_match_ptr(ti_sci_of_match),
-+ .suppress_bind_attrs = true,
- },
- };
- module_platform_driver(ti_sci_driver);
-diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
-index 44bf1709a6488..a8e5ac95cf170 100644
---- a/drivers/gpio/gpio-sim.c
-+++ b/drivers/gpio/gpio-sim.c
-@@ -1438,10 +1438,10 @@ static const struct config_item_type gpio_sim_device_config_group_type = {
- static struct config_group *
- gpio_sim_config_make_device_group(struct config_group *group, const char *name)
- {
-- struct gpio_sim_device *dev __free(kfree) = NULL;
- int id;
-
-- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-+ struct gpio_sim_device *dev __free(kfree) = kzalloc(sizeof(*dev),
-+ GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
-diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
-index 51e41676de0b8..5d04720107ef5 100644
---- a/drivers/gpio/gpiolib-acpi.c
-+++ b/drivers/gpio/gpiolib-acpi.c
-@@ -1655,6 +1655,26 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
- .ignore_wake = "SYNA1202:00@16",
- },
- },
-+ {
-+ /*
-+ * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
-+ * a "dolby" button. At the ACPI level an _AEI event-handler
-+ * is connected which sets an ACPI variable to 1 on both
-+ * edges. This variable can be polled + cleared to 0 using
-+ * WMI. But since the variable is set on both edges the WMI
-+ * interface is pretty useless even when polling.
-+ * So instead the x86-android-tablets code instantiates
-+ * a gpio-keys platform device for it.
-+ * Ignore the _AEI handler for the pin, so that it is not busy.
-+ */
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
-+ },
-+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
-+ .ignore_interrupt = "INT33FC:00@3",
-+ },
-+ },
- {} /* Terminating entry */
- };
-
-diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
-index 531faabead0f4..d9525d95e818d 100644
---- a/drivers/gpio/gpiolib-of.c
-+++ b/drivers/gpio/gpiolib-of.c
-@@ -512,6 +512,10 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
- #if IS_ENABLED(CONFIG_SND_SOC_CS42L56)
- { "reset", "cirrus,gpio-nreset", "cirrus,cs42l56" },
- #endif
-+#if IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)
-+ { "i2s1-in-sel-gpio1", NULL, "mediatek,mt2701-cs42448-machine" },
-+ { "i2s1-in-sel-gpio2", NULL, "mediatek,mt2701-cs42448-machine" },
-+#endif
- #if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X)
- { "reset", "gpio-reset", "ti,tlv320aic3x" },
- { "reset", "gpio-reset", "ti,tlv320aic33" },
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
-index 38ccec913f009..f3a09ecb76992 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
-@@ -29,6 +29,7 @@
- #include "amdgpu.h"
- #include "atom.h"
-
-+#include <linux/device.h>
- #include <linux/pci.h>
- #include <linux/slab.h>
- #include <linux/acpi.h>
-@@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
- if (adev->flags & AMD_IS_APU)
- return false;
-
-+ /* ATRM is for on-platform devices only */
-+ if (dev_is_removable(&adev->pdev->dev))
-+ return false;
-+
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- dhandle = ACPI_HANDLE(&pdev->dev);
- if (!dhandle)
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
-index b6298e901cbd4..9a53ca555e708 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
-@@ -183,6 +183,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
- }
-
- rcu_read_unlock();
-+ *result = NULL;
- return -ENOENT;
- }
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
-index d93a8961274c6..f4fd0d5bd9b68 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
-@@ -1411,7 +1411,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
- if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
- else if (r != -ERESTARTSYS && r != -EAGAIN)
-- DRM_ERROR("Failed to process the buffer list %d!\n", r);
-+ DRM_DEBUG("Failed to process the buffer list %d!\n", r);
- goto error_fini;
- }
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
-index a4faea4fa0b59..05405da51e7a2 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
-@@ -748,6 +748,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
- ssize_t result = 0;
- int r;
-
-+ if (!adev->smc_rreg)
-+ return -EPERM;
-+
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
-@@ -804,6 +807,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
- ssize_t result = 0;
- int r;
-
-+ if (!adev->smc_wreg)
-+ return -EPERM;
-+
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-index 2b8356699f235..a164857bdb9f4 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-@@ -43,6 +43,7 @@
- #include <drm/drm_fb_helper.h>
- #include <drm/drm_probe_helper.h>
- #include <drm/amdgpu_drm.h>
-+#include <linux/device.h>
- #include <linux/vgaarb.h>
- #include <linux/vga_switcheroo.h>
- #include <linux/efi.h>
-@@ -2018,7 +2019,6 @@ out:
- */
- static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
- {
-- struct drm_device *dev = adev_to_drm(adev);
- struct pci_dev *parent;
- int i, r;
- bool total;
-@@ -2089,7 +2089,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
- (amdgpu_is_atpx_hybrid() ||
- amdgpu_has_atpx_dgpu_power_cntl()) &&
- ((adev->flags & AMD_IS_APU) == 0) &&
-- !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
-+ !dev_is_removable(&adev->pdev->dev))
- adev->flags |= AMD_IS_PX;
-
- if (!(adev->flags & AMD_IS_APU)) {
-@@ -2103,6 +2103,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
- adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
-+ if (!amdgpu_device_pcie_dynamic_switching_supported())
-+ adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
-
- total = true;
- for (i = 0; i < adev->num_ip_blocks; i++) {
-@@ -3901,7 +3903,7 @@ fence_driver_init:
-
- px = amdgpu_device_supports_px(ddev);
-
-- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
-+ if (px || (!dev_is_removable(&adev->pdev->dev) &&
- apple_gmux_detect(NULL, NULL)))
- vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, px);
-@@ -4046,7 +4048,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
-
- px = amdgpu_device_supports_px(adev_to_drm(adev));
-
-- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
-+ if (px || (!dev_is_removable(&adev->pdev->dev) &&
- apple_gmux_detect(NULL, NULL)))
- vga_switcheroo_unregister_client(adev->pdev);
-
-@@ -5183,7 +5185,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- * Flush RAM to disk so that after reboot
- * the user can read log and see why the system rebooted.
- */
-- if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
-+ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
-+ amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
-
- ksys_sync_helper();
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
-index 7d5e7ad28ba82..68a901287264f 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
-@@ -93,6 +93,7 @@
- MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
-
- #define mmRCC_CONFIG_MEMSIZE 0xde3
-+#define mmMP0_SMN_C2PMSG_33 0x16061
- #define mmMM_INDEX 0x0
- #define mmMM_INDEX_HI 0x6
- #define mmMM_DATA 0x1
-@@ -231,8 +232,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
- static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
- uint8_t *binary)
- {
-- uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
-- int ret = 0;
-+ uint64_t vram_size;
-+ u32 msg;
-+ int i, ret = 0;
-+
-+ /* It can take up to a second for IFWI init to complete on some dGPUs,
-+ * but generally it should be in the 60-100ms range. Normally this starts
-+ * as soon as the device gets power so by the time the OS loads this has long
-+ * completed. However, when a card is hotplugged via e.g., USB4, we need to
-+ * wait for this to complete. Once the C2PMSG is updated, we can
-+ * continue.
-+ */
-+ if (dev_is_removable(&adev->pdev->dev)) {
-+ for (i = 0; i < 1000; i++) {
-+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
-+ if (msg & 0x80000000)
-+ break;
-+ msleep(1);
-+ }
-+ }
-+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
-
- if (vram_size) {
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
-index 363e6a2cad8c2..578aeba49ea8e 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
-@@ -340,14 +340,11 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
- adev->have_disp_power_ref = true;
- return ret;
- }
-- /* if we have no active crtcs, then drop the power ref
-- * we got before
-+ /* if we have no active crtcs, then go to
-+ * drop the power ref we got before
- */
-- if (!active && adev->have_disp_power_ref) {
-- pm_runtime_put_autosuspend(dev->dev);
-+ if (!active && adev->have_disp_power_ref)
- adev->have_disp_power_ref = false;
-- }
--
- out:
- /* drop the power reference we got coming in here */
- pm_runtime_put_autosuspend(dev->dev);
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
-index 81edf66dbea8b..2c35036e4ba25 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
-@@ -2195,6 +2195,8 @@ retry_init:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
-
-+ pci_wake_from_d3(pdev, TRUE);
-+
- /*
- * For runpm implemented via BACO, PMFW will handle the
- * timing for BACO in and out:
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
-index 2382921710ece..ef4cb921781d7 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
-@@ -384,9 +384,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring = &kiq->ring;
- u32 domain = AMDGPU_GEM_DOMAIN_GTT;
-
-+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
- /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
- domain |= AMDGPU_GEM_DOMAIN_VRAM;
-+#endif
-
- /* create MQD for KIQ */
- if (!adev->enable_mes_kiq && !ring->mqd_obj) {
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
-index 6c6184f0dbc17..508f02eb0cf8f 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
-@@ -28,7 +28,7 @@
- #define AMDGPU_IH_MAX_NUM_IVS 32
-
- #define IH_RING_SIZE (256 * 1024)
--#define IH_SW_RING_SIZE (8 * 1024) /* enough for 256 CAM entries */
-+#define IH_SW_RING_SIZE (16 * 1024) /* enough for 512 CAM entries */
-
- struct amdgpu_device;
- struct amdgpu_iv_entry;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
-index b6015157763af..6aa75052309ff 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
-@@ -556,8 +556,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
- mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
- mqd_prop.hqd_active = false;
-
-+ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
-+ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
-+ mutex_lock(&adev->srbm_mutex);
-+ amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
-+ }
-+
- mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
-
-+ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
-+ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
-+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
-+ mutex_unlock(&adev->srbm_mutex);
-+ }
-+
- amdgpu_bo_unreserve(q->mqd_obj);
- }
-
-@@ -993,9 +1005,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- switch (queue_type) {
- case AMDGPU_RING_TYPE_GFX:
- ring->funcs = adev->gfx.gfx_ring[0].funcs;
-+ ring->me = adev->gfx.gfx_ring[0].me;
-+ ring->pipe = adev->gfx.gfx_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- ring->funcs = adev->gfx.compute_ring[0].funcs;
-+ ring->me = adev->gfx.compute_ring[0].me;
-+ ring->pipe = adev->gfx.compute_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ring->funcs = adev->sdma.instance[0].ring.funcs;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
-index 163445baa4fc8..6f6341f702789 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
-@@ -1373,7 +1373,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
- {
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-
-- sysfs_remove_file_from_group(&adev->dev->kobj,
-+ if (adev->dev->kobj.sd)
-+ sysfs_remove_file_from_group(&adev->dev->kobj,
- &con->badpages_attr.attr,
- RAS_FS_NAME);
- }
-@@ -1390,7 +1391,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
- .attrs = attrs,
- };
-
-- sysfs_remove_group(&adev->dev->kobj, &group);
-+ if (adev->dev->kobj.sd)
-+ sysfs_remove_group(&adev->dev->kobj, &group);
-
- return 0;
- }
-@@ -1437,7 +1439,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
- if (!obj || !obj->attr_inuse)
- return -EINVAL;
-
-- sysfs_remove_file_from_group(&adev->dev->kobj,
-+ if (adev->dev->kobj.sd)
-+ sysfs_remove_file_from_group(&adev->dev->kobj,
- &obj->sysfs_attr.attr,
- RAS_FS_NAME);
- obj->attr_inuse = 0;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
-index 595d5e535aca6..9d82701d365bb 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
-@@ -214,6 +214,12 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
- control->i2c_address = EEPROM_I2C_MADDR_0;
- return true;
- case IP_VERSION(13, 0, 0):
-+ if (strnstr(atom_ctx->vbios_pn, "D707",
-+ sizeof(atom_ctx->vbios_pn)))
-+ control->i2c_address = EEPROM_I2C_MADDR_0;
-+ else
-+ control->i2c_address = EEPROM_I2C_MADDR_4;
-+ return true;
- case IP_VERSION(13, 0, 6):
- case IP_VERSION(13, 0, 10):
- control->i2c_address = EEPROM_I2C_MADDR_4;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
-index 36b55d2bd51a9..03b4bcfca1963 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
-@@ -292,8 +292,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
- void *ptr;
- int i, idx;
-
-+ bool in_ras_intr = amdgpu_ras_intr_triggered();
-+
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
-+ /* err_event_athub will corrupt VCPU buffer, so we need to
-+ * restore fw data and clear buffer in amdgpu_vcn_resume() */
-+ if (in_ras_intr)
-+ return 0;
-+
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
-index 7148a216ae2fe..db6fc0cb18eb8 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
-@@ -239,6 +239,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
-
- for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
- mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
-+ if (!mode)
-+ continue;
- drm_mode_probed_add(connector, mode);
- }
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
-index 82f25996ff5ef..89c8e51cd3323 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
-@@ -1095,8 +1095,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
- bo = gem_to_amdgpu_bo(gobj);
- }
- mem = bo->tbo.resource;
-- if (mem->mem_type == TTM_PL_TT ||
-- mem->mem_type == AMDGPU_PL_PREEMPT)
-+ if (mem && (mem->mem_type == TTM_PL_TT ||
-+ mem->mem_type == AMDGPU_PL_PREEMPT))
- pages_addr = bo->tbo.ttm->dma_address;
- }
-
-@@ -2125,7 +2125,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
- * Returns:
- * 0 for success, error for failure.
- */
--int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
-+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-+ int32_t xcp_id)
- {
- struct amdgpu_bo *root_bo;
- struct amdgpu_bo_vm *root;
-@@ -2144,6 +2145,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
- INIT_LIST_HEAD(&vm->done);
- INIT_LIST_HEAD(&vm->pt_freed);
- INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
-+ INIT_KFIFO(vm->faults);
-
- r = amdgpu_vm_init_entities(adev, vm);
- if (r)
-@@ -2178,34 +2180,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
- false, &root, xcp_id);
- if (r)
- goto error_free_delayed;
-- root_bo = &root->bo;
-+
-+ root_bo = amdgpu_bo_ref(&root->bo);
- r = amdgpu_bo_reserve(root_bo, true);
-- if (r)
-- goto error_free_root;
-+ if (r) {
-+ amdgpu_bo_unref(&root->shadow);
-+ amdgpu_bo_unref(&root_bo);
-+ goto error_free_delayed;
-+ }
-
-+ amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
- r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
- if (r)
-- goto error_unreserve;
--
-- amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
-+ goto error_free_root;
-
- r = amdgpu_vm_pt_clear(adev, vm, root, false);
- if (r)
-- goto error_unreserve;
-+ goto error_free_root;
-
- amdgpu_bo_unreserve(vm->root.bo);
--
-- INIT_KFIFO(vm->faults);
-+ amdgpu_bo_unref(&root_bo);
-
- return 0;
-
--error_unreserve:
-- amdgpu_bo_unreserve(vm->root.bo);
--
- error_free_root:
-- amdgpu_bo_unref(&root->shadow);
-+ amdgpu_vm_pt_free_root(adev, vm);
-+ amdgpu_bo_unreserve(vm->root.bo);
- amdgpu_bo_unref(&root_bo);
-- vm->root.bo = NULL;
-
- error_free_delayed:
- dma_fence_put(vm->last_tlb_flush);
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
-index 9032d7a24d7cd..306252cd67fd7 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
-@@ -6457,11 +6457,11 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
- nv_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.me.mqd_backup[mqd_idx])
-- memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
-+ memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else {
- /* restore mqd with the backup copy */
- if (adev->gfx.me.mqd_backup[mqd_idx])
-- memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
-+ memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
- /* reset the ring */
- ring->wptr = 0;
- *ring->wptr_cpu_addr = 0;
-@@ -6735,7 +6735,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
- if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
- if (adev->gfx.kiq[0].mqd_backup)
-- memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
-+ memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
-
- /* reset ring buffer */
- ring->wptr = 0;
-@@ -6758,7 +6758,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
- mutex_unlock(&adev->srbm_mutex);
-
- if (adev->gfx.kiq[0].mqd_backup)
-- memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
-+ memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
- }
-
- return 0;
-@@ -6779,11 +6779,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
- mutex_unlock(&adev->srbm_mutex);
-
- if (adev->gfx.mec.mqd_backup[mqd_idx])
-- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
-+ memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else {
- /* restore MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
-- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
-+ memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
- /* reset ring buffer */
- ring->wptr = 0;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
-index 762d7a19f1be1..b346eb0a0db11 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
-@@ -83,6 +83,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
- MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
- MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
-
-+static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
-+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
-+};
-+
- static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
- {
- SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
-@@ -275,6 +279,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
- default:
- break;
- }
-+ soc15_program_register_sequence(adev,
-+ golden_settings_gc_11_0,
-+ (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
-+
- }
-
- static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
-@@ -390,7 +398,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
-
-- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
-+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
-@@ -3684,11 +3692,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.me.mqd_backup[mqd_idx])
-- memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
-+ memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else {
- /* restore mqd with the backup copy */
- if (adev->gfx.me.mqd_backup[mqd_idx])
-- memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
-+ memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
- /* reset the ring */
- ring->wptr = 0;
- *ring->wptr_cpu_addr = 0;
-@@ -3977,7 +3985,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
- if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
- if (adev->gfx.kiq[0].mqd_backup)
-- memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
-+ memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
-
- /* reset ring buffer */
- ring->wptr = 0;
-@@ -4000,7 +4008,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
- mutex_unlock(&adev->srbm_mutex);
-
- if (adev->gfx.kiq[0].mqd_backup)
-- memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
-+ memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
- }
-
- return 0;
-@@ -4021,11 +4029,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
- mutex_unlock(&adev->srbm_mutex);
-
- if (adev->gfx.mec.mqd_backup[mqd_idx])
-- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
-+ memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else {
- /* restore MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
-- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
-+ memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
- /* reset ring buffer */
- ring->wptr = 0;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
-index 885ebd703260f..1943beb135c4c 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
-@@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- memset(&ib, 0, sizeof(ib));
-- r = amdgpu_ib_get(adev, NULL, 16,
-- AMDGPU_IB_POOL_DIRECT, &ib);
-+
-+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r)
- goto err1;
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-index fd61574a737cb..2e23d08b45f4a 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-@@ -1039,8 +1039,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- memset(&ib, 0, sizeof(ib));
-- r = amdgpu_ib_get(adev, NULL, 16,
-- AMDGPU_IB_POOL_DIRECT, &ib);
-+
-+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r)
- goto err1;
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
-index 18ce5fe45f6f8..e481ef73af6e5 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
-@@ -296,8 +296,8 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- memset(&ib, 0, sizeof(ib));
-- r = amdgpu_ib_get(adev, NULL, 16,
-- AMDGPU_IB_POOL_DIRECT, &ib);
-+
-+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r)
- goto err1;
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
-index 4038455d79984..ef368ca79a668 100644
---- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
-+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
-@@ -28,6 +28,7 @@
- #include "nbio/nbio_2_3_offset.h"
- #include "nbio/nbio_2_3_sh_mask.h"
- #include <uapi/linux/kfd_ioctl.h>
-+#include <linux/device.h>
- #include <linux/pci.h>
-
- #define smnPCIE_CONFIG_CNTL 0x11180044
-@@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
-
- data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
-
-- if (pci_is_thunderbolt_attached(adev->pdev))
-+ if (dev_is_removable(&adev->pdev->dev))
- data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
- else
- data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
-@@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
-
- def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
- data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
-- if (pci_is_thunderbolt_attached(adev->pdev))
-+ if (dev_is_removable(&adev->pdev->dev))
- data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
- else
- data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
-diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
-index 469eed084976c..52d80f286b3dd 100644
---- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
-@@ -59,6 +59,9 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
- /* Read USB-PD from LFB */
- #define GFX_CMD_USB_PD_USE_LFB 0x480
-
-+/* Retry times for vmbx ready wait */
-+#define PSP_VMBX_POLLING_LIMIT 20000
-+
- /* VBIOS gfl defines */
- #define MBOX_READY_MASK 0x80000000
- #define MBOX_STATUS_MASK 0x0000FFFF
-@@ -138,7 +141,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
- struct amdgpu_device *adev = psp->adev;
- int retry_loop, ret;
-
-- for (retry_loop = 0; retry_loop < 70; retry_loop++) {
-+ for (retry_loop = 0; retry_loop < PSP_VMBX_POLLING_LIMIT; retry_loop++) {
- /* Wait for bootloader to signify that is
- ready having bit 31 of C2PMSG_33 set to 1 */
- ret = psp_wait_for(
-diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
-index c7991e07b6be5..a7697ec8188e0 100644
---- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
-+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
-@@ -268,7 +268,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
- SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
- switch (encoding) {
- case SQ_INTERRUPT_WORD_ENCODING_AUTO:
-- pr_debug(
-+ pr_debug_ratelimited(
- "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
- SE_ID),
-@@ -284,7 +284,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
- THREAD_TRACE_UTC_ERROR));
- break;
- case SQ_INTERRUPT_WORD_ENCODING_INST:
-- pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
-+ pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
-@@ -310,7 +310,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
- case SQ_INTERRUPT_WORD_ENCODING_ERROR:
- sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
- ERR_TYPE);
-- pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
-+ pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
-diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
-index f933bd231fb9c..2a65792fd1162 100644
---- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
-+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
-@@ -150,7 +150,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
-
- static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
- {
-- pr_debug(
-+ pr_debug_ratelimited(
- "sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
-@@ -165,7 +165,7 @@ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
-
- static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
- {
-- pr_debug(
-+ pr_debug_ratelimited(
- "sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
-@@ -177,7 +177,7 @@ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
-
- static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
- {
-- pr_warn(
-+ pr_warn_ratelimited(
- "sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
-diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
-index 830396b1c3b14..27cdaea405017 100644
---- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
-+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
-@@ -333,7 +333,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
- encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
- switch (encoding) {
- case SQ_INTERRUPT_WORD_ENCODING_AUTO:
-- pr_debug(
-+ pr_debug_ratelimited(
- "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
-@@ -347,7 +347,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
- break;
- case SQ_INTERRUPT_WORD_ENCODING_INST:
-- pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
-+ pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
-@@ -366,7 +366,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
- break;
- case SQ_INTERRUPT_WORD_ENCODING_ERROR:
- sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
-- pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
-+ pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
-diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
-index bb16b795d1bc2..63ce30ea68915 100644
---- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
-+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
-@@ -495,11 +495,11 @@ svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
-
- /* We need a new svm_bo. Spin-loop to wait for concurrent
- * svm_range_bo_release to finish removing this range from
-- * its range list. After this, it is safe to reuse the
-- * svm_bo pointer and svm_bo_list head.
-+ * its range list and set prange->svm_bo to null. After this,
-+ * it is safe to reuse the svm_bo pointer and svm_bo_list head.
- */
-- while (!list_empty_careful(&prange->svm_bo_list))
-- ;
-+ while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
-+ cond_resched();
-
- return false;
- }
-@@ -628,8 +628,15 @@ create_bo_failed:
-
- void svm_range_vram_node_free(struct svm_range *prange)
- {
-- svm_range_bo_unref(prange->svm_bo);
-- prange->ttm_res = NULL;
-+ /* serialize prange->svm_bo unref */
-+ mutex_lock(&prange->lock);
-+ /* prange->svm_bo has not been unref */
-+ if (prange->ttm_res) {
-+ prange->ttm_res = NULL;
-+ mutex_unlock(&prange->lock);
-+ svm_range_bo_unref(prange->svm_bo);
-+ } else
-+ mutex_unlock(&prange->lock);
- }
-
- struct kfd_node *
-@@ -760,7 +767,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
- prange->flags &= ~attrs[i].value;
- break;
- case KFD_IOCTL_SVM_ATTR_GRANULARITY:
-- prange->granularity = attrs[i].value;
-+ prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
- break;
- default:
- WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
-@@ -820,7 +827,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
- }
- }
-
-- return !prange->is_error_flag;
-+ return true;
- }
-
- /**
-@@ -1662,73 +1669,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
-
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
-- for (addr = start; addr < end && !r; ) {
-+ for (addr = start; !r && addr < end; ) {
- struct hmm_range *hmm_range;
- struct vm_area_struct *vma;
-- unsigned long next;
-+ unsigned long next = 0;
- unsigned long offset;
- unsigned long npages;
- bool readonly;
-
- vma = vma_lookup(mm, addr);
-- if (!vma) {
-+ if (vma) {
-+ readonly = !(vma->vm_flags & VM_WRITE);
-+
-+ next = min(vma->vm_end, end);
-+ npages = (next - addr) >> PAGE_SHIFT;
-+ WRITE_ONCE(p->svms.faulting_task, current);
-+ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
-+ readonly, owner, NULL,
-+ &hmm_range);
-+ WRITE_ONCE(p->svms.faulting_task, NULL);
-+ if (r) {
-+ pr_debug("failed %d to get svm range pages\n", r);
-+ if (r == -EBUSY)
-+ r = -EAGAIN;
-+ }
-+ } else {
- r = -EFAULT;
-- goto unreserve_out;
-- }
-- readonly = !(vma->vm_flags & VM_WRITE);
--
-- next = min(vma->vm_end, end);
-- npages = (next - addr) >> PAGE_SHIFT;
-- WRITE_ONCE(p->svms.faulting_task, current);
-- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
-- readonly, owner, NULL,
-- &hmm_range);
-- WRITE_ONCE(p->svms.faulting_task, NULL);
-- if (r) {
-- pr_debug("failed %d to get svm range pages\n", r);
-- if (r == -EBUSY)
-- r = -EAGAIN;
-- goto unreserve_out;
- }
-
-- offset = (addr - start) >> PAGE_SHIFT;
-- r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
-- hmm_range->hmm_pfns);
-- if (r) {
-- pr_debug("failed %d to dma map range\n", r);
-- goto unreserve_out;
-+ if (!r) {
-+ offset = (addr - start) >> PAGE_SHIFT;
-+ r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
-+ hmm_range->hmm_pfns);
-+ if (r)
-+ pr_debug("failed %d to dma map range\n", r);
- }
-
- svm_range_lock(prange);
-- if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
-+ if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) {
- pr_debug("hmm update the range, need validate again\n");
- r = -EAGAIN;
-- goto unlock_out;
- }
-- if (!list_empty(&prange->child_list)) {
-+
-+ if (!r && !list_empty(&prange->child_list)) {
- pr_debug("range split by unmap in parallel, validate again\n");
- r = -EAGAIN;
-- goto unlock_out;
- }
-
-- r = svm_range_map_to_gpus(prange, offset, npages, readonly,
-- ctx->bitmap, wait, flush_tlb);
-+ if (!r)
-+ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
-+ ctx->bitmap, wait, flush_tlb);
-+
-+ if (!r && next == end)
-+ prange->mapped_to_gpu = true;
-
--unlock_out:
- svm_range_unlock(prange);
-
- addr = next;
- }
-
-- if (addr == end) {
-- prange->validated_once = true;
-- prange->mapped_to_gpu = true;
-- }
--
--unreserve_out:
- svm_range_unreserve_bos(ctx);
--
-- prange->is_error_flag = !!r;
- if (!r)
- prange->validate_timestamp = ktime_get_boottime();
-
-@@ -2097,7 +2097,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
- next = interval_tree_iter_next(node, start, last);
- next_start = min(node->last, last) + 1;
-
-- if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
-+ if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
-+ prange->mapped_to_gpu) {
- /* nothing to do */
- } else if (node->start < start || node->last > last) {
- /* node intersects the update range and its attributes
-@@ -3507,7 +3508,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
- struct svm_range *next;
- bool update_mapping = false;
- bool flush_tlb;
-- int r = 0;
-+ int r, ret = 0;
-
- pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
- p->pasid, &p->svms, start, start + size - 1, size);
-@@ -3595,7 +3596,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
- out_unlock_range:
- mutex_unlock(&prange->migrate_mutex);
- if (r)
-- break;
-+ ret = r;
- }
-
- dynamic_svm_range_dump(svms);
-@@ -3608,7 +3609,7 @@ out:
- pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
- &p->svms, start, start + size - 1, r);
-
-- return r;
-+ return ret ? ret : r;
- }
-
- static int
-diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
-index 9e668eeefb32d..25f7119057386 100644
---- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
-+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
-@@ -132,9 +132,7 @@ struct svm_range {
- struct list_head child_list;
- DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
- DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
-- bool validated_once;
- bool mapped_to_gpu;
-- bool is_error_flag;
- };
-
- static inline void svm_range_lock(struct svm_range *prange)
-diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-index 868946dd7ef12..f5fdb61c821d0 100644
---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-@@ -1692,8 +1692,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
- DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
- dce_version_to_string(adev->dm.dc->ctx->dce_version));
- } else {
-- DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
-- dce_version_to_string(adev->dm.dc->ctx->dce_version));
-+ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
- goto error;
- }
-
-@@ -2085,7 +2084,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
- struct dmub_srv_create_params create_params;
- struct dmub_srv_region_params region_params;
- struct dmub_srv_region_info region_info;
-- struct dmub_srv_fb_params fb_params;
-+ struct dmub_srv_memory_params memory_params;
- struct dmub_srv_fb_info *fb_info;
- struct dmub_srv *dmub_srv;
- const struct dmcub_firmware_header_v1_0 *hdr;
-@@ -2185,6 +2184,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
- adev->dm.dmub_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
- PSP_HEADER_BYTES;
-+ region_params.is_mailbox_in_inbox = false;
-
- status = dmub_srv_calc_region_info(dmub_srv, &region_params,
- &region_info);
-@@ -2208,10 +2208,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
- return r;
-
- /* Rebase the regions on the framebuffer address. */
-- memset(&fb_params, 0, sizeof(fb_params));
-- fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
-- fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
-- fb_params.region_info = &region_info;
-+ memset(&memory_params, 0, sizeof(memory_params));
-+ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
-+ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
-+ memory_params.region_info = &region_info;
-
- adev->dm.dmub_fb_info =
- kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
-@@ -2223,7 +2223,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
- return -ENOMEM;
- }
-
-- status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
-+ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
- if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
- return -EINVAL;
-@@ -6236,7 +6236,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
- dm_new_state->underscan_enable = val;
- ret = 0;
- } else if (property == adev->mode_info.abm_level_property) {
-- dm_new_state->abm_level = val;
-+ dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
- ret = 0;
- }
-
-@@ -6281,7 +6281,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
- *val = dm_state->underscan_enable;
- ret = 0;
- } else if (property == adev->mode_info.abm_level_property) {
-- *val = dm_state->abm_level;
-+ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
-+ dm_state->abm_level : 0;
- ret = 0;
- }
-
-@@ -6354,7 +6355,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
- state->pbn = 0;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-- state->abm_level = amdgpu_dm_abm_level;
-+ state->abm_level = amdgpu_dm_abm_level ?:
-+ ABM_LEVEL_IMMEDIATE_DISABLE;
-
- __drm_atomic_helper_connector_reset(connector, &state->base);
- }
-@@ -7431,6 +7433,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
- int i;
- int result = -EIO;
-
-+ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
-+ return result;
-+
- cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
-
- if (!cmd.payloads)
-@@ -9539,14 +9544,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
- struct drm_plane *other;
- struct drm_plane_state *old_other_state, *new_other_state;
- struct drm_crtc_state *new_crtc_state;
-+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
- int i;
-
- /*
-- * TODO: Remove this hack once the checks below are sufficient
-- * enough to determine when we need to reset all the planes on
-- * the stream.
-+ * TODO: Remove this hack for all asics once it proves that the
-+ * fast updates works fine on DCN3.2+.
- */
-- if (state->allow_modeset)
-+ if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
- return true;
-
- /* Exit early if we know that we're adding or removing the plane. */
-@@ -9892,16 +9897,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
- }
- }
-
-+static void
-+dm_get_plane_scale(struct drm_plane_state *plane_state,
-+ int *out_plane_scale_w, int *out_plane_scale_h)
-+{
-+ int plane_src_w, plane_src_h;
-+
-+ dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
-+ *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
-+ *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
-+}
-+
- static int dm_check_crtc_cursor(struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- struct drm_crtc_state *new_crtc_state)
- {
-- struct drm_plane *cursor = crtc->cursor, *underlying;
-+ struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
-+ struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane_state *new_cursor_state, *new_underlying_state;
- int i;
- int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
-- int cursor_src_w, cursor_src_h;
-- int underlying_src_w, underlying_src_h;
-+ bool any_relevant_change = false;
-
- /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
- * cursor per pipe but it's going to inherit the scaling and
-@@ -9909,13 +9925,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
- * blending properties match the underlying planes'.
- */
-
-- new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
-- if (!new_cursor_state || !new_cursor_state->fb)
-+ /* If no plane was enabled or changed scaling, no need to check again */
-+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
-+ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
-+
-+ if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
-+ continue;
-+
-+ if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
-+ any_relevant_change = true;
-+ break;
-+ }
-+
-+ if (new_plane_state->fb == old_plane_state->fb &&
-+ new_plane_state->crtc_w == old_plane_state->crtc_w &&
-+ new_plane_state->crtc_h == old_plane_state->crtc_h)
-+ continue;
-+
-+ dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
-+ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
-+
-+ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
-+ any_relevant_change = true;
-+ break;
-+ }
-+ }
-+
-+ if (!any_relevant_change)
- return 0;
-
-- dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
-- cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
-- cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
-+ new_cursor_state = drm_atomic_get_plane_state(state, cursor);
-+ if (IS_ERR(new_cursor_state))
-+ return PTR_ERR(new_cursor_state);
-+
-+ if (!new_cursor_state->fb)
-+ return 0;
-+
-+ dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
-+
-+ /* Need to check all enabled planes, even if this commit doesn't change
-+ * their state
-+ */
-+ i = drm_atomic_add_affected_planes(state, crtc);
-+ if (i)
-+ return i;
-
- for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
- /* Narrow down to non-cursor planes on the same CRTC as the cursor */
-@@ -9926,10 +9979,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
- if (!new_underlying_state->fb)
- continue;
-
-- dm_get_oriented_plane_size(new_underlying_state,
-- &underlying_src_w, &underlying_src_h);
-- underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
-- underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
-+ dm_get_plane_scale(new_underlying_state,
-+ &underlying_scale_w, &underlying_scale_h);
-
- if (cursor_scale_w != underlying_scale_w ||
- cursor_scale_h != underlying_scale_h) {
-diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
-index 57230661132bd..28f5eb9ecbd3e 100644
---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
-+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
-@@ -1598,31 +1598,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
- unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
- unsigned int max_compressed_bw_in_kbps = 0;
- struct dc_dsc_bw_range bw_range = {0};
-- struct drm_dp_mst_topology_mgr *mst_mgr;
-+ uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
-
- /*
-- * check if the mode could be supported if DSC pass-through is supported
-- * AND check if there enough bandwidth available to support the mode
-- * with DSC enabled.
-+ * Consider the case with the depth of the mst topology tree is equal or less than 2
-+ * A. When dsc bitstream can be transmitted along the entire path
-+ * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
-+ * 2. dsc passthrough supported at MST branch, or
-+ * 3. dsc decoding supported at leaf MST device
-+ * Use maximum dsc compression as bw constraint
-+ * B. When dsc bitstream cannot be transmitted along the entire path
-+ * Use native bw as bw constraint
- */
- if (is_dsc_common_config_possible(stream, &bw_range) &&
-- aconnector->mst_output_port->passthrough_aux) {
-- mst_mgr = aconnector->mst_output_port->mgr;
-- mutex_lock(&mst_mgr->lock);
--
-+ (aconnector->mst_output_port->passthrough_aux ||
-+ aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
- cur_link_settings = stream->link->verified_link_cap;
-
- upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
-- &cur_link_settings
-- );
-- down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
-+ &cur_link_settings);
-+ down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
-
- /* pick the bottleneck */
- end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
- down_link_bw_in_kbps);
-
-- mutex_unlock(&mst_mgr->lock);
--
- /*
- * use the maximum dsc compression bandwidth as the required
- * bandwidth for the mode
-@@ -1637,8 +1637,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
- /* check if mode could be supported within full_pbn */
- bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
--
-- if (pbn > aconnector->mst_output_port->full_pbn)
-+ if (pbn > full_pbn)
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
-
-diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
-index d08e60dff46de..a1be93f6385c6 100644
---- a/drivers/gpu/drm/amd/display/dc/core/dc.c
-+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
-@@ -990,7 +990,8 @@ static bool dc_construct(struct dc *dc,
- /* set i2c speed if not done by the respective dcnxxx__resource.c */
- if (dc->caps.i2c_speed_in_khz_hdcp == 0)
- dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
--
-+ if (dc->caps.max_optimizable_video_width == 0)
-+ dc->caps.max_optimizable_video_width = 5120;
- dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
- if (!dc->clk_mgr)
- goto fail;
-@@ -1069,53 +1070,6 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
- }
- }
-
--static void phantom_pipe_blank(
-- struct dc *dc,
-- struct timing_generator *tg,
-- int width,
-- int height)
--{
-- struct dce_hwseq *hws = dc->hwseq;
-- enum dc_color_space color_space;
-- struct tg_color black_color = {0};
-- struct output_pixel_processor *opp = NULL;
-- uint32_t num_opps, opp_id_src0, opp_id_src1;
-- uint32_t otg_active_width, otg_active_height;
-- uint32_t i;
--
-- /* program opp dpg blank color */
-- color_space = COLOR_SPACE_SRGB;
-- color_space_to_black_color(dc, color_space, &black_color);
--
-- otg_active_width = width;
-- otg_active_height = height;
--
-- /* get the OPTC source */
-- tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
-- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
--
-- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
-- if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
-- opp = dc->res_pool->opps[i];
-- break;
-- }
-- }
--
-- if (opp && opp->funcs->opp_set_disp_pattern_generator)
-- opp->funcs->opp_set_disp_pattern_generator(
-- opp,
-- CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
-- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
-- COLOR_DEPTH_UNDEFINED,
-- &black_color,
-- otg_active_width,
-- otg_active_height,
-- 0);
--
-- if (tg->funcs->is_tg_enabled(tg))
-- hws->funcs.wait_for_blank_complete(opp);
--}
--
- static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
- {
- if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
-@@ -1206,7 +1160,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
-
- main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
- main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
-- phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
-+ if (dc->hwss.blank_phantom)
-+ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
- tg->funcs->enable_crtc(tg);
- }
- }
-@@ -1888,7 +1843,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
-
-- if (dc->debug.enable_double_buffered_dsc_pg_support)
-+ if (dc->hwss.update_dsc_pg)
- dc->hwss.update_dsc_pg(dc, context, false);
-
- disable_dangling_plane(dc, context);
-@@ -1995,7 +1950,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
- dc->hwss.optimize_bandwidth(dc, context);
- }
-
-- if (dc->debug.enable_double_buffered_dsc_pg_support)
-+ if (dc->hwss.update_dsc_pg)
- dc->hwss.update_dsc_pg(dc, context, true);
-
- if (dc->ctx->dce_version >= DCE_VERSION_MAX)
-@@ -2242,7 +2197,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
-
- dc->hwss.optimize_bandwidth(dc, context);
-
-- if (dc->debug.enable_double_buffered_dsc_pg_support)
-+ if (dc->hwss.update_dsc_pg)
- dc->hwss.update_dsc_pg(dc, context, true);
- }
-
-@@ -2488,6 +2443,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
- }
-
- static enum surface_update_type get_scaling_info_update_type(
-+ const struct dc *dc,
- const struct dc_surface_update *u)
- {
- union surface_update_flags *update_flags = &u->surface->update_flags;
-@@ -2520,6 +2476,12 @@ static enum surface_update_type get_scaling_info_update_type(
- update_flags->bits.clock_change = 1;
- }
-
-+ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
-+ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
-+ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
-+ /* Changing clip size of a large surface may result in MPC slice count change */
-+ update_flags->bits.bandwidth_change = 1;
-+
- if (u->scaling_info->src_rect.x != u->surface->src_rect.x
- || u->scaling_info->src_rect.y != u->surface->src_rect.y
- || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
-@@ -2557,7 +2519,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
- type = get_plane_info_update_type(u);
- elevate_update_type(&overall_type, type);
-
-- type = get_scaling_info_update_type(u);
-+ type = get_scaling_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
-
- if (u->flip_addr) {
-@@ -3571,7 +3533,7 @@ static void commit_planes_for_stream(struct dc *dc,
- if (get_seamless_boot_stream_count(context) == 0)
- dc->hwss.prepare_bandwidth(dc, context);
-
-- if (dc->debug.enable_double_buffered_dsc_pg_support)
-+ if (dc->hwss.update_dsc_pg)
- dc->hwss.update_dsc_pg(dc, context, false);
-
- context_clock_trace(dc, context);
-@@ -4374,6 +4336,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
- update_type,
- context);
- } else {
-+ if (!stream_update &&
-+ dc->hwss.is_pipe_topology_transition_seamless &&
-+ !dc->hwss.is_pipe_topology_transition_seamless(
-+ dc, dc->current_state, context)) {
-+
-+ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
-+ BREAK_TO_DEBUGGER();
-+ }
- commit_planes_for_stream(
- dc,
- srf_updates,
-@@ -5284,3 +5254,24 @@ void dc_query_current_properties(struct dc *dc, struct dc_current_properties *pr
- properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
- }
-
-+/**
-+ *****************************************************************************
-+ * dc_set_edp_power() - DM controls eDP power to be ON/OFF
-+ *
-+ * Called when DM wants to power on/off eDP.
-+ * Only work on links with flag skip_implict_edp_power_control is set.
-+ *
-+ *****************************************************************************
-+ */
-+void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
-+ bool powerOn)
-+{
-+ if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
-+ return;
-+
-+ if (edp_link->skip_implict_edp_power_control == false)
-+ return;
-+
-+ edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
-+}
-+
-diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
-index f7b51aca60200..8873acfe309c8 100644
---- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
-+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
-@@ -996,7 +996,7 @@ static void adjust_recout_for_visual_confirm(struct rect *recout,
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- int dpp_offset, base_offset;
-
-- if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
-+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE || !pipe_ctx->plane_res.dpp)
- return;
-
- dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO;
-diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
-index 01fe2d2fd2417..ebe571fcefe32 100644
---- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
-+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
-@@ -582,7 +582,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
- for (i = 0; i < MAX_PIPES; i++) {
- struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
-
-- if (res_ctx->pipe_ctx[i].stream != stream)
-+ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
- continue;
-
- return tg->funcs->get_frame_count(tg);
-@@ -641,7 +641,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
- for (i = 0; i < MAX_PIPES; i++) {
- struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
-
-- if (res_ctx->pipe_ctx[i].stream != stream)
-+ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
- continue;
-
- tg->funcs->get_scanoutpos(tg,
-diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
-index 31e3183497a7f..3f33740e2f659 100644
---- a/drivers/gpu/drm/amd/display/dc/dc.h
-+++ b/drivers/gpu/drm/amd/display/dc/dc.h
-@@ -231,6 +231,11 @@ struct dc_caps {
- uint32_t dmdata_alloc_size;
- unsigned int max_cursor_size;
- unsigned int max_video_width;
-+ /*
-+ * max video plane width that can be safely assumed to be always
-+ * supported by single DPP pipe.
-+ */
-+ unsigned int max_optimizable_video_width;
- unsigned int min_horizontal_blanking_period;
- int linear_pitch_alignment;
- bool dcc_const_color;
-@@ -1533,7 +1538,6 @@ struct dc_link {
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
-- struct backlight_settings backlight_settings;
- struct psr_settings psr_settings;
-
- struct replay_settings replay_settings;
-@@ -1573,6 +1577,7 @@ struct dc_link {
- struct phy_state phy_state;
- // BW ALLOCATON USB4 ONLY
- struct dc_dpia_bw_alloc dpia_bw_alloc_config;
-+ bool skip_implict_edp_power_control;
- };
-
- /* Return an enumerated dc_link.
-@@ -1592,6 +1597,9 @@ void dc_get_edp_links(const struct dc *dc,
- struct dc_link **edp_links,
- int *edp_num);
-
-+void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
-+ bool powerOn);
-+
- /* The function initiates detection handshake over the given link. It first
- * determines if there are display connections over the link. If so it initiates
- * detection protocols supported by the connected receiver device. The function
-diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
-index 3697ea1d14c1b..d5b3e3a32cc6d 100644
---- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
-+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
-@@ -302,7 +302,6 @@ struct dc_stream_state {
- bool vblank_synchronized;
- bool fpo_in_use;
- struct mall_stream_config mall_stream_config;
-- bool skip_edp_power_down;
- };
-
- #define ABM_LEVEL_IMMEDIATE_DISABLE 255
-diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
-index 445ad79001ce2..accffba5a6834 100644
---- a/drivers/gpu/drm/amd/display/dc/dc_types.h
-+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
-@@ -189,6 +189,7 @@ struct dc_panel_patch {
- unsigned int disable_fams;
- unsigned int skip_avmute;
- unsigned int mst_start_top_delay;
-+ unsigned int remove_sink_ext_caps;
- };
-
- struct dc_edid_caps {
-@@ -1002,10 +1003,6 @@ struct link_mst_stream_allocation_table {
- struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
- };
-
--struct backlight_settings {
-- uint32_t backlight_millinits;
--};
--
- /* PSR feature flags */
- struct psr_settings {
- bool psr_feature_enabled; // PSR is supported by sink
-diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
-index b87bfecb7755a..a8e79104b684e 100644
---- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
-+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
-@@ -586,7 +586,8 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
- if (state == PSR_STATE0)
- break;
- }
-- fsleep(500);
-+ /* must *not* be fsleep - this can be called from high irq levels */
-+ udelay(500);
- }
-
- /* assert if max retry hit */
-diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
-index 0f24b6fbd2201..4704c9c85ee6f 100644
---- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
-+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
-@@ -216,7 +216,8 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
- break;
- }
-
-- fsleep(500);
-+ /* must *not* be fsleep - this can be called from high irq levels */
-+ udelay(500);
- }
-
- /* assert if max retry hit */
-diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
-index 2a6157555fd1e..9c78e42418f34 100644
---- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
-+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
-@@ -1226,7 +1226,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
- struct dce_hwseq *hws = link->dc->hwseq;
-
- if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
-- if (!stream->skip_edp_power_down)
-+ if (!link->skip_implict_edp_power_control)
- hws->funcs.edp_backlight_control(link, false);
- link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
- }
-diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
-index 9834b75f1837b..79befa17bb037 100644
---- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
-+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
-@@ -111,7 +111,8 @@ void dcn10_lock_all_pipes(struct dc *dc,
- if (pipe_ctx->top_pipe ||
- !pipe_ctx->stream ||
- (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
-- !tg->funcs->is_tg_enabled(tg))
-+ !tg->funcs->is_tg_enabled(tg) ||
-+ pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
- continue;
-
- if (lock)
-diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
-index aeadc587433fd..a2e1ca3b93e86 100644
---- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
-+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
-@@ -1830,8 +1830,16 @@ void dcn20_program_front_end_for_ctx(
- dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
- struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
-
-- if (tg->funcs->enable_crtc)
-+ if (tg->funcs->enable_crtc) {
-+ if (dc->hwss.blank_phantom) {
-+ int main_pipe_width, main_pipe_height;
-+
-+ main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
-+ main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
-+ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
-+ }
- tg->funcs->enable_crtc(tg);
-+ }
- }
- }
- /* OTG blank before disabling all front ends */
-diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
-index 4d2820ffe4682..33a8626bda735 100644
---- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
-+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
-@@ -476,7 +476,8 @@ void dcn314_disable_link_output(struct dc_link *link,
- struct dmcu *dmcu = dc->res_pool->dmcu;
-
- if (signal == SIGNAL_TYPE_EDP &&
-- link->dc->hwss.edp_backlight_control)
-+ link->dc->hwss.edp_backlight_control &&
-+ !link->skip_implict_edp_power_control)
- link->dc->hwss.edp_backlight_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->lock_phy)
- dmcu->funcs->lock_phy(dmcu);
-diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
-index 004beed9bd444..3e65e683db0ac 100644
---- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
-+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
-@@ -869,7 +869,7 @@ static const struct dc_plane_cap plane_cap = {
- static const struct dc_debug_options debug_defaults_drv = {
- .disable_z10 = false,
- .enable_z9_disable_interface = true,
-- .minimum_z8_residency_time = 2000,
-+ .minimum_z8_residency_time = 2100,
- .psr_skip_crtc_disable = true,
- .replay_skip_crtc_disabled = true,
- .disable_dmcu = true,
-diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
-index 680e7fa8d18ab..650e1598bddcb 100644
---- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
-+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
-@@ -77,6 +77,9 @@ void dcn32_dsc_pg_control(
- if (hws->ctx->dc->debug.disable_dsc_power_gate)
- return;
-
-+ if (!hws->ctx->dc->debug.enable_double_buffered_dsc_pg_support)
-+ return;
-+
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
-@@ -214,7 +217,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
- static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
- {
- int i;
-- uint8_t num_ways = 0;
-+ uint32_t num_ways = 0;
- uint32_t mall_ss_size_bytes = 0;
-
- mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
-@@ -244,7 +247,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
- bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
- {
- union dmub_rb_cmd cmd;
-- uint8_t ways, i;
-+ uint8_t i;
-+ uint32_t ways;
- int j;
- bool mall_ss_unsupported = false;
- struct dc_plane_state *plane = NULL;
-@@ -304,7 +308,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
- cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
- cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
- cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
-- cmd.cab.cab_alloc_ways = ways;
-+ cmd.cab.cab_alloc_ways = (uint8_t)ways;
-
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
-
-@@ -482,8 +486,7 @@ bool dcn32_set_mcm_luts(
- if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf->pwl;
- else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
-- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
-- plane_state->blend_tf,
-+ cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = &dpp_base->regamma_params;
- }
-@@ -497,8 +500,7 @@ bool dcn32_set_mcm_luts(
- else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- ASSERT(false);
-- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
-- plane_state->in_shaper_func,
-+ cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = &dpp_base->shaper_params;
- }
-@@ -1573,3 +1575,101 @@ void dcn32_init_blank(
- if (opp)
- hws->funcs.wait_for_blank_complete(opp);
- }
-+
-+void dcn32_blank_phantom(struct dc *dc,
-+ struct timing_generator *tg,
-+ int width,
-+ int height)
-+{
-+ struct dce_hwseq *hws = dc->hwseq;
-+ enum dc_color_space color_space;
-+ struct tg_color black_color = {0};
-+ struct output_pixel_processor *opp = NULL;
-+ uint32_t num_opps, opp_id_src0, opp_id_src1;
-+ uint32_t otg_active_width, otg_active_height;
-+ uint32_t i;
-+
-+ /* program opp dpg blank color */
-+ color_space = COLOR_SPACE_SRGB;
-+ color_space_to_black_color(dc, color_space, &black_color);
-+
-+ otg_active_width = width;
-+ otg_active_height = height;
-+
-+ /* get the OPTC source */
-+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
-+ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
-+
-+ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
-+ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
-+ opp = dc->res_pool->opps[i];
-+ break;
-+ }
-+ }
-+
-+ if (opp && opp->funcs->opp_set_disp_pattern_generator)
-+ opp->funcs->opp_set_disp_pattern_generator(
-+ opp,
-+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
-+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
-+ COLOR_DEPTH_UNDEFINED,
-+ &black_color,
-+ otg_active_width,
-+ otg_active_height,
-+ 0);
-+
-+ if (tg->funcs->is_tg_enabled(tg))
-+ hws->funcs.wait_for_blank_complete(opp);
-+}
-+
-+bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
-+ const struct dc_state *cur_ctx,
-+ const struct dc_state *new_ctx)
-+{
-+ int i;
-+ const struct pipe_ctx *cur_pipe, *new_pipe;
-+ bool is_seamless = true;
-+
-+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
-+ cur_pipe = &cur_ctx->res_ctx.pipe_ctx[i];
-+ new_pipe = &new_ctx->res_ctx.pipe_ctx[i];
-+
-+ if (resource_is_pipe_type(cur_pipe, FREE_PIPE) ||
-+ resource_is_pipe_type(new_pipe, FREE_PIPE))
-+ /* adding or removing free pipes is always seamless */
-+ continue;
-+ else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) {
-+ if (resource_is_pipe_type(new_pipe, OTG_MASTER))
-+ if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id)
-+ /* OTG master with the same stream is seamless */
-+ continue;
-+ } else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) {
-+ if (resource_is_pipe_type(new_pipe, OPP_HEAD)) {
-+ if (cur_pipe->stream_res.tg == new_pipe->stream_res.tg)
-+ /*
-+ * OPP heads sharing the same timing
-+ * generator is seamless
-+ */
-+ continue;
-+ }
-+ } else if (resource_is_pipe_type(cur_pipe, DPP_PIPE)) {
-+ if (resource_is_pipe_type(new_pipe, DPP_PIPE)) {
-+ if (cur_pipe->stream_res.opp == new_pipe->stream_res.opp)
-+ /*
-+ * DPP pipes sharing the same OPP head is
-+ * seamless
-+ */
-+ continue;
-+ }
-+ }
-+
-+ /*
-+ * This pipe's transition doesn't fall under any seamless
-+ * conditions
-+ */
-+ is_seamless = false;
-+ break;
-+ }
-+
-+ return is_seamless;
-+}
-diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
-index 2d2628f31bed7..9992e40acd217 100644
---- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
-+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
-@@ -115,4 +115,13 @@ void dcn32_init_blank(
- struct dc *dc,
- struct timing_generator *tg);
-
-+void dcn32_blank_phantom(struct dc *dc,
-+ struct timing_generator *tg,
-+ int width,
-+ int height);
-+
-+bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
-+ const struct dc_state *cur_ctx,
-+ const struct dc_state *new_ctx);
-+
- #endif /* __DC_HWSS_DCN32_H__ */
-diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
-index c7417147dff19..1edadff39a5ef 100644
---- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
-+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
-@@ -115,6 +115,8 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
- .update_phantom_vp_position = dcn32_update_phantom_vp_position,
- .update_dsc_pg = dcn32_update_dsc_pg,
- .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
-+ .blank_phantom = dcn32_blank_phantom,
-+ .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
- };
-
- static const struct hwseq_private_funcs dcn32_private_funcs = {
-diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
-index 5805fb02af14e..f2de0c7584947 100644
---- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
-+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
-@@ -948,10 +948,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
- {
- int plane_count;
- int i;
-- unsigned int min_dst_y_next_start_us;
-
- plane_count = 0;
-- min_dst_y_next_start_us = 0;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].plane_state)
- plane_count++;
-@@ -973,26 +971,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
- else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
- struct dc_link *link = context->streams[0]->sink->link;
- struct dc_stream_status *stream_status = &context->stream_status[0];
-- struct dc_stream_state *current_stream = context->streams[0];
- int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
- bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
- bool is_pwrseq0 = link->link_index == 0;
-- bool isFreesyncVideo;
--
-- isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
-- isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
-- for (i = 0; i < dc->res_pool->pipe_count; i++) {
-- if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
-- min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
-- break;
-- }
-- }
-
- /* Don't support multi-plane configurations */
- if (stream_status->plane_count > 1)
- return DCN_ZSTATE_SUPPORT_DISALLOW;
-
-- if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
-+ if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
- return DCN_ZSTATE_SUPPORT_ALLOW;
- else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
- return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
-diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
-index 711d4085b33b8..cf3b400c8619b 100644
---- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
-+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
-@@ -1964,6 +1964,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
- int i, pipe_idx, vlevel_temp = 0;
- double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
- double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
-+ double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
- double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
- dm_dram_clock_change_unsupported;
-@@ -2151,7 +2152,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
- }
-
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
-- min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
-+ min_dram_speed_mts = dram_speed_from_validation;
- min_dram_speed_mts_margin = 160;
-
- context->bw_ctx.dml.soc.dram_clock_change_latency_us =
-diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
-index 02ff99f7bec2b..66e680902c95c 100644
---- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
-+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
-@@ -388,6 +388,11 @@ struct hw_sequencer_funcs {
- void (*z10_restore)(const struct dc *dc);
- void (*z10_save_init)(struct dc *dc);
-
-+ void (*blank_phantom)(struct dc *dc,
-+ struct timing_generator *tg,
-+ int width,
-+ int height);
-+
- void (*update_visual_confirm_color)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- int mpcc_id);
-@@ -396,6 +401,9 @@ struct hw_sequencer_funcs {
- struct dc_state *context,
- struct pipe_ctx *phantom_pipe);
- void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
-+ bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
-+ const struct dc_state *cur_ctx,
-+ const struct dc_state *new_ctx);
-
- void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
- void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
-diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
-index e3e8c76c17cfa..d7685368140ab 100644
---- a/drivers/gpu/drm/amd/display/dc/inc/link.h
-+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
-@@ -295,6 +295,7 @@ struct link_service {
- bool (*edp_receiver_ready_T9)(struct dc_link *link);
- bool (*edp_receiver_ready_T7)(struct dc_link *link);
- bool (*edp_power_alpm_dpcd_enable)(struct dc_link *link, bool enable);
-+ void (*edp_set_panel_power)(struct dc_link *link, bool powerOn);
-
-
- /*************************** DP CTS ************************************/
-diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
-index c9b6676eaf53b..c7a9e286a5d4d 100644
---- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
-+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
-@@ -876,7 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
- (link->dpcd_sink_ext_caps.bits.oled == 1)) {
- dpcd_set_source_specific_data(link);
- msleep(post_oui_delay);
-- set_cached_brightness_aux(link);
-+ set_default_brightness_aux(link);
- }
-
- return true;
-@@ -1085,6 +1085,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
- if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
- link->ctx->dc->debug.hdmi20_disable = true;
-
-+ if (sink->edid_caps.panel_patch.remove_sink_ext_caps)
-+ link->dpcd_sink_ext_caps.raw = 0;
-+
- if (dc_is_hdmi_signal(link->connector_signal))
- read_scdc_caps(link->ddc, link->local_sink);
-
-@@ -1163,6 +1166,12 @@ static bool detect_link_and_local_sink(struct dc_link *link,
- dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
- // Override dc_panel_config if system has specific settings
- dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
-+
-+ //sink only can use supported link rate table, we are foreced to enable it
-+ if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
-+ link->panel_config.ilr.optimize_edp_link_rate = true;
-+ if (edp_is_ilr_optimization_enabled(link))
-+ link->reported_link_cap.link_rate = get_max_link_rate_from_ilr_table(link);
- }
-
- } else {
-diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
-index 79aef205598b7..35d087cf1980f 100644
---- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
-+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
-@@ -1930,7 +1930,7 @@ static void disable_link_dp(struct dc_link *link,
- dp_disable_link_phy(link, link_res, signal);
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
-- if (!link->dc->config.edp_no_power_sequencing)
-+ if (!link->skip_implict_edp_power_control)
- link->dc->hwss.edp_power_control(link, false);
- }
-
-@@ -2140,8 +2140,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
- if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
- link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
- link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
-- set_cached_brightness_aux(link);
--
-+ set_default_brightness_aux(link);
- if (link->dpcd_sink_ext_caps.bits.oled == 1)
- msleep(bl_oled_enable_delay);
- edp_backlight_enable_aux(link, true);
-@@ -2219,7 +2218,7 @@ static enum dc_status enable_link(
- * link settings. Need to call disable first before enabling at
- * new link settings.
- */
-- if (link->link_status.link_active && !stream->skip_edp_power_down)
-+ if (link->link_status.link_active)
- disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
-
- switch (pipe_ctx->stream->signal) {
-@@ -2338,9 +2337,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
- dc->hwss.disable_stream(pipe_ctx);
- } else {
- dc->hwss.disable_stream(pipe_ctx);
-- if (!pipe_ctx->stream->skip_edp_power_down) {
-- disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
-- }
-+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
- }
-
- if (pipe_ctx->stream->timing.flags.DSC) {
-diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
-index 0895742a31024..e406561c2c237 100644
---- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
-+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
-@@ -223,6 +223,7 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
- link_srv->edp_receiver_ready_T9 = edp_receiver_ready_T9;
- link_srv->edp_receiver_ready_T7 = edp_receiver_ready_T7;
- link_srv->edp_power_alpm_dpcd_enable = edp_power_alpm_dpcd_enable;
-+ link_srv->edp_set_panel_power = edp_set_panel_power;
- }
-
- /* link dp cts implements dp compliance test automation protocols and manual
-diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
-index 237e0ff955f3c..db87aa7b5c90f 100644
---- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
-+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
-@@ -707,8 +707,7 @@ bool edp_decide_link_settings(struct dc_link *link,
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
-- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
-- link->dpcd_caps.edp_supported_link_rates_count == 0) {
-+ if (!edp_is_ilr_optimization_enabled(link)) {
- *link_setting = link->verified_link_cap;
- return true;
- }
-@@ -772,8 +771,7 @@ bool decide_edp_link_settings_with_dsc(struct dc_link *link,
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
-- if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
-- link->dpcd_caps.edp_supported_link_rates_count == 0)) {
-+ if (!edp_is_ilr_optimization_enabled(link)) {
- /* for DSC enabled case, we search for minimum lane count */
- memset(&initial_link_setting, 0, sizeof(initial_link_setting));
- initial_link_setting.lane_count = LANE_COUNT_ONE;
-@@ -1938,9 +1936,7 @@ void detect_edp_sink_caps(struct dc_link *link)
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
-- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
-- (link->panel_config.ilr.optimize_edp_link_rate ||
-- link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
-+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13) {
- // Read DPCD 00010h - 0001Fh 16 bytes at one shot
- core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
- supported_link_rates, sizeof(supported_link_rates));
-@@ -1958,12 +1954,10 @@ void detect_edp_sink_caps(struct dc_link *link)
- link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
- link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
- link->dpcd_caps.edp_supported_link_rates_count++;
--
-- if (link->reported_link_cap.link_rate < link_rate)
-- link->reported_link_cap.link_rate = link_rate;
- }
- }
- }
-+
- core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP,
- &backlight_adj_cap, sizeof(backlight_adj_cap));
-
-diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
-index b7abba55bc2fd..0050e0a06cbc2 100644
---- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
-+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
-@@ -73,7 +73,8 @@ void dp_disable_link_phy(struct dc_link *link,
- {
- struct dc *dc = link->ctx->dc;
-
-- if (!link->wa_flags.dp_keep_receiver_powered)
-+ if (!link->wa_flags.dp_keep_receiver_powered &&
-+ !link->skip_implict_edp_power_control)
- dpcd_write_rx_power_ctrl(link, false);
-
- dc->hwss.disable_link_output(link, link_res, signal);
-diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
-index fd8f6f1981461..68096d12f52fd 100644
---- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
-+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
-@@ -115,7 +115,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
- lt_settings->cr_pattern_time = 16000;
-
- /* Fixed VS/PE specific: Toggle link rate */
-- apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
-+ apply_toggle_rate_wa = ((link->vendor_specific_lttpr_link_rate_wa == target_rate) || (link->vendor_specific_lttpr_link_rate_wa == 0));
- target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
- toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
-
-@@ -271,7 +271,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
-
-- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
-+ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
-@@ -617,7 +617,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
-
-- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
-+ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
-diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
-index 98e715aa6d8e3..fe74d4252a510 100644
---- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
-+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
-@@ -33,6 +33,7 @@
- #include "link_dp_capability.h"
- #include "dm_helpers.h"
- #include "dal_asic_id.h"
-+#include "link_dp_phy.h"
- #include "dce/dmub_psr.h"
- #include "dc/dc_dmub_srv.h"
- #include "dce/dmub_replay.h"
-@@ -167,7 +168,6 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
- *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
- *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
-
-- link->backlight_settings.backlight_millinits = backlight_millinits;
-
- if (!link->dpcd_caps.panel_luminance_control) {
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
-@@ -280,9 +280,9 @@ bool set_default_brightness_aux(struct dc_link *link)
- if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
- if (!read_default_bl_aux(link, &default_backlight))
- default_backlight = 150000;
-- // if < 5 nits or > 5000, it might be wrong readback
-- if (default_backlight < 5000 || default_backlight > 5000000)
-- default_backlight = 150000; //
-+ // if > 5000, it might be wrong readback
-+ if (default_backlight > 5000000)
-+ default_backlight = 150000;
-
- return edp_set_backlight_level_nits(link, true,
- default_backlight, 0);
-@@ -290,14 +290,23 @@ bool set_default_brightness_aux(struct dc_link *link)
- return false;
- }
-
--bool set_cached_brightness_aux(struct dc_link *link)
-+bool edp_is_ilr_optimization_enabled(struct dc_link *link)
- {
-- if (link->backlight_settings.backlight_millinits)
-- return edp_set_backlight_level_nits(link, true,
-- link->backlight_settings.backlight_millinits, 0);
-- else
-- return set_default_brightness_aux(link);
-- return false;
-+ if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate)
-+ return false;
-+ return true;
-+}
-+
-+enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link)
-+{
-+ enum dc_link_rate link_rate = link->reported_link_cap.link_rate;
-+
-+ for (int i = 0; i < link->dpcd_caps.edp_supported_link_rates_count; i++) {
-+ if (link_rate < link->dpcd_caps.edp_supported_link_rates[i])
-+ link_rate = link->dpcd_caps.edp_supported_link_rates[i];
-+ }
-+
-+ return link_rate;
- }
-
- bool edp_is_ilr_optimization_required(struct dc_link *link,
-@@ -311,8 +320,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
-
- ASSERT(link || crtc_timing); // invalid input
-
-- if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
-- !link->panel_config.ilr.optimize_edp_link_rate)
-+ if (!edp_is_ilr_optimization_enabled(link))
- return false;
-
-
-@@ -362,6 +370,34 @@ void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
- link->dc->hwss.edp_backlight_control(link, true);
- }
-
-+void edp_set_panel_power(struct dc_link *link, bool powerOn)
-+{
-+ if (powerOn) {
-+ // 1. panel VDD on
-+ if (!link->dc->config.edp_no_power_sequencing)
-+ link->dc->hwss.edp_power_control(link, true);
-+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
-+
-+ // 2. panel BL on
-+ if (link->dc->hwss.edp_backlight_control)
-+ link->dc->hwss.edp_backlight_control(link, true);
-+
-+ // 3. Rx power on
-+ dpcd_write_rx_power_ctrl(link, true);
-+ } else {
-+ // 3. Rx power off
-+ dpcd_write_rx_power_ctrl(link, false);
-+
-+ // 2. panel BL off
-+ if (link->dc->hwss.edp_backlight_control)
-+ link->dc->hwss.edp_backlight_control(link, false);
-+
-+ // 1. panel VDD off
-+ if (!link->dc->config.edp_no_power_sequencing)
-+ link->dc->hwss.edp_power_control(link, false);
-+ }
-+}
-+
- bool edp_wait_for_t12(struct dc_link *link)
- {
- if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
-diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
-index 0a5bbda8c739c..a034288ad75d4 100644
---- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
-+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
-@@ -30,7 +30,6 @@
- enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
- void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
- bool set_default_brightness_aux(struct dc_link *link);
--bool set_cached_brightness_aux(struct dc_link *link);
- void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
- int edp_get_backlight_level(const struct dc_link *link);
- bool edp_get_backlight_level_nits(struct dc_link *link,
-@@ -64,9 +63,12 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
- bool edp_wait_for_t12(struct dc_link *link);
- bool edp_is_ilr_optimization_required(struct dc_link *link,
- struct dc_crtc_timing *crtc_timing);
-+bool edp_is_ilr_optimization_enabled(struct dc_link *link);
-+enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link);
- bool edp_backlight_enable_aux(struct dc_link *link, bool enable);
- void edp_add_delay_for_T9(struct dc_link *link);
- bool edp_receiver_ready_T9(struct dc_link *link);
- bool edp_receiver_ready_T7(struct dc_link *link);
- bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
-+void edp_set_panel_power(struct dc_link *link, bool powerOn);
- #endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
-diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
-index 2d995c87fbb98..d3c4a9a577eea 100644
---- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
-+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
-@@ -186,6 +186,7 @@ struct dmub_srv_region_params {
- uint32_t vbios_size;
- const uint8_t *fw_inst_const;
- const uint8_t *fw_bss_data;
-+ bool is_mailbox_in_inbox;
- };
-
- /**
-@@ -205,20 +206,25 @@ struct dmub_srv_region_params {
- */
- struct dmub_srv_region_info {
- uint32_t fb_size;
-+ uint32_t inbox_size;
- uint8_t num_regions;
- struct dmub_region regions[DMUB_WINDOW_TOTAL];
- };
-
- /**
-- * struct dmub_srv_fb_params - parameters used for driver fb setup
-+ * struct dmub_srv_memory_params - parameters used for driver fb setup
- * @region_info: region info calculated by dmub service
-- * @cpu_addr: base cpu address for the framebuffer
-- * @gpu_addr: base gpu virtual address for the framebuffer
-+ * @cpu_fb_addr: base cpu address for the framebuffer
-+ * @cpu_inbox_addr: base cpu address for the gart
-+ * @gpu_fb_addr: base gpu virtual address for the framebuffer
-+ * @gpu_inbox_addr: base gpu virtual address for the gart
- */
--struct dmub_srv_fb_params {
-+struct dmub_srv_memory_params {
- const struct dmub_srv_region_info *region_info;
-- void *cpu_addr;
-- uint64_t gpu_addr;
-+ void *cpu_fb_addr;
-+ void *cpu_inbox_addr;
-+ uint64_t gpu_fb_addr;
-+ uint64_t gpu_inbox_addr;
- };
-
- /**
-@@ -546,8 +552,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_INVALID - unspecified error
- */
--enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
-- const struct dmub_srv_fb_params *params,
-+enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
-+ const struct dmub_srv_memory_params *params,
- struct dmub_srv_fb_info *out);
-
- /**
-diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
-index 93624ffe4eb82..6c45e216c709c 100644
---- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
-+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
-@@ -386,7 +386,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
- uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
- uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
- uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
--
-+ uint32_t previous_top = 0;
- if (!dmub->sw_init)
- return DMUB_STATUS_INVALID;
-
-@@ -411,8 +411,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
- bios->base = dmub_align(stack->top, 256);
- bios->top = bios->base + params->vbios_size;
-
-- mail->base = dmub_align(bios->top, 256);
-- mail->top = mail->base + DMUB_MAILBOX_SIZE;
-+ if (params->is_mailbox_in_inbox) {
-+ mail->base = 0;
-+ mail->top = mail->base + DMUB_MAILBOX_SIZE;
-+ previous_top = bios->top;
-+ } else {
-+ mail->base = dmub_align(bios->top, 256);
-+ mail->top = mail->base + DMUB_MAILBOX_SIZE;
-+ previous_top = mail->top;
-+ }
-
- fw_info = dmub_get_fw_meta_info(params);
-
-@@ -431,7 +438,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
- dmub->fw_version = fw_info->fw_version;
- }
-
-- trace_buff->base = dmub_align(mail->top, 256);
-+ trace_buff->base = dmub_align(previous_top, 256);
- trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
-
- fw_state->base = dmub_align(trace_buff->top, 256);
-@@ -442,11 +449,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
-
- out->fb_size = dmub_align(scratch_mem->top, 4096);
-
-+ if (params->is_mailbox_in_inbox)
-+ out->inbox_size = dmub_align(mail->top, 4096);
-+
- return DMUB_STATUS_OK;
- }
-
--enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
-- const struct dmub_srv_fb_params *params,
-+enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
-+ const struct dmub_srv_memory_params *params,
- struct dmub_srv_fb_info *out)
- {
- uint8_t *cpu_base;
-@@ -461,8 +471,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
- if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
- return DMUB_STATUS_INVALID;
-
-- cpu_base = (uint8_t *)params->cpu_addr;
-- gpu_base = params->gpu_addr;
-+ cpu_base = (uint8_t *)params->cpu_fb_addr;
-+ gpu_base = params->gpu_fb_addr;
-
- for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
- const struct dmub_region *reg =
-@@ -470,6 +480,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
-
- out->fb[i].cpu_addr = cpu_base + reg->base;
- out->fb[i].gpu_addr = gpu_base + reg->base;
-+
-+ if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
-+ out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
-+ out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
-+ }
-+
- out->fb[i].size = reg->top - reg->base;
- }
-
-@@ -658,9 +674,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
- return DMUB_STATUS_INVALID;
-
- if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
-- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
-- dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
-- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
-+ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
-+ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
-+
-+ if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
-+ return DMUB_STATUS_HW_FAILURE;
-+ } else {
-+ dmub->inbox1_rb.rptr = rptr;
-+ dmub->inbox1_rb.wrpt = wptr;
-+ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
-+ }
- }
-
- return DMUB_STATUS_OK;
-@@ -694,6 +717,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
- if (!dmub->hw_init)
- return DMUB_STATUS_INVALID;
-
-+ if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
-+ dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
-+ return DMUB_STATUS_HW_FAILURE;
-+ }
-+
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
- return DMUB_STATUS_OK;
-
-@@ -969,6 +997,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
- ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
- if (ack)
- return DMUB_STATUS_OK;
-+ udelay(1);
- }
- return DMUB_STATUS_TIMEOUT;
- }
-diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
-index c92c4b83253f8..4bff1ef8a9a64 100644
---- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
-+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
-@@ -6369,6 +6369,8 @@
- #define regTCP_INVALIDATE_BASE_IDX 1
- #define regTCP_STATUS 0x19a1
- #define regTCP_STATUS_BASE_IDX 1
-+#define regTCP_CNTL 0x19a2
-+#define regTCP_CNTL_BASE_IDX 1
- #define regTCP_CNTL2 0x19a3
- #define regTCP_CNTL2_BASE_IDX 1
- #define regTCP_DEBUG_INDEX 0x19a5
-diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
-index 0b6a057e0a4c4..5aac8d545bdc6 100644
---- a/drivers/gpu/drm/amd/include/pptable.h
-+++ b/drivers/gpu/drm/amd/include/pptable.h
-@@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
- typedef struct _ATOM_PPLIB_STATE
- {
- UCHAR ucNonClockStateIndex;
-- UCHAR ucClockStateIndices[1]; // variable-sized
-+ UCHAR ucClockStateIndices[]; // variable-sized
- } ATOM_PPLIB_STATE;
-
-
-@@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
- /**
- * Driver will read the first ucNumDPMLevels in this array
- */
-- UCHAR clockInfoIndex[1];
-+ UCHAR clockInfoIndex[];
- } ATOM_PPLIB_STATE_V2;
-
- typedef struct _StateArray{
-diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
-index 8bb2da13826f1..b4c9fedaa51de 100644
---- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
-+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
-@@ -734,7 +734,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
-- if (count > 127)
-+ if (count > 127 || count == 0)
- return -EINVAL;
-
- if (*buf == 's')
-@@ -754,7 +754,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
- else
- return -EINVAL;
-
-- memcpy(buf_cpy, buf, count+1);
-+ memcpy(buf_cpy, buf, count);
-+ buf_cpy[count] = 0;
-
- tmp_str = buf_cpy;
-
-@@ -771,6 +772,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
- return -EINVAL;
- parameter_size++;
-
-+ if (!tmp_str)
-+ break;
-+
- while (isspace(*tmp_str))
- tmp_str++;
- }
-diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
-index 7a31cfa5e7fb4..9fcad69a9f344 100644
---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
-+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
-@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
- typedef struct _ATOM_Tonga_State_Array {
- UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
-- ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
-+ ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */
- } ATOM_Tonga_State_Array;
-
- typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
-@@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
- typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
- UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
-- ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
-+ ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
- } ATOM_Tonga_MCLK_Dependency_Table;
-
- typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
-@@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
- typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
- UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
-- ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
-+ ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
- } ATOM_Tonga_SCLK_Dependency_Table;
-
- typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
-@@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
- typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
- UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
-- ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
-+ ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
- } ATOM_Polaris_SCLK_Dependency_Table;
-
- typedef struct _ATOM_Tonga_PCIE_Record {
-@@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
- typedef struct _ATOM_Tonga_PCIE_Table {
- UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
-- ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */
-+ ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */
- } ATOM_Tonga_PCIE_Table;
-
- typedef struct _ATOM_Polaris10_PCIE_Record {
-@@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
- typedef struct _ATOM_Polaris10_PCIE_Table {
- UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
-- ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
-+ ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */
- } ATOM_Polaris10_PCIE_Table;
-
-
-@@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
- typedef struct _ATOM_Tonga_MM_Dependency_Table {
- UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
-- ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */
-+ ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */
- } ATOM_Tonga_MM_Dependency_Table;
-
- typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
-@@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
- typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
- UCHAR ucRevId;
- UCHAR ucNumEntries; /* Number of entries. */
-- ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */
-+ ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */
- } ATOM_Tonga_Voltage_Lookup_Table;
-
- typedef struct _ATOM_Tonga_Fan_Table {
-diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
-index 5a2371484a58c..11372fcc59c8f 100644
---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
-+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
-@@ -1823,9 +1823,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-
- data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
- data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
-- data->pcie_dpm_key_disabled =
-- !amdgpu_device_pcie_dynamic_switching_supported() ||
-- !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
-+ data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
- /* need to set voltage control types before EVV patching */
- data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
- data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
-index f005a90c35af4..b47fd42414f46 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
-+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
-@@ -1232,7 +1232,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
- {
- struct smu_feature *feature = &smu->smu_feature;
- struct amdgpu_device *adev = smu->adev;
-- uint32_t pcie_gen = 0, pcie_width = 0;
-+ uint8_t pcie_gen = 0, pcie_width = 0;
- uint64_t features_supported;
- int ret = 0;
-
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
-index 5a52098bcf166..72ed836328966 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
-+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
-@@ -844,7 +844,7 @@ struct pptable_funcs {
- * &pcie_gen_cap: Maximum allowed PCIe generation.
- * &pcie_width_cap: Maximum allowed PCIe width.
- */
-- int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
-+ int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
-
- /**
- * @i2c_init: Initialize i2c.
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
-index 355c156d871af..cc02f979e9e98 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
-+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
-@@ -296,8 +296,8 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
- uint32_t pptable_id);
-
- int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
-- uint32_t pcie_gen_cap,
-- uint32_t pcie_width_cap);
-+ uint8_t pcie_gen_cap,
-+ uint8_t pcie_width_cap);
-
- #endif
- #endif
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
-index 18487ae10bcff..c564f6e191f84 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
-+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
-@@ -2376,8 +2376,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
- }
-
- static int navi10_update_pcie_parameters(struct smu_context *smu,
-- uint32_t pcie_gen_cap,
-- uint32_t pcie_width_cap)
-+ uint8_t pcie_gen_cap,
-+ uint8_t pcie_width_cap)
- {
- struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- PPTable_t *pptable = smu->smu_table.driver_pptable;
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
-index da2860da60188..a7f4f82d23b4b 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
-+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
-@@ -2085,14 +2085,14 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
- #define MAX(a, b) ((a) > (b) ? (a) : (b))
-
- static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
-- uint32_t pcie_gen_cap,
-- uint32_t pcie_width_cap)
-+ uint8_t pcie_gen_cap,
-+ uint8_t pcie_width_cap)
- {
- struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
- uint8_t *table_member1, *table_member2;
-- uint32_t min_gen_speed, max_gen_speed;
-- uint32_t min_lane_width, max_lane_width;
-+ uint8_t min_gen_speed, max_gen_speed;
-+ uint8_t min_lane_width, max_lane_width;
- uint32_t smu_pcie_arg;
- int ret, i;
-
-@@ -2108,7 +2108,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
- min_lane_width = min_lane_width > max_lane_width ?
- max_lane_width : min_lane_width;
-
-- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
-+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
- pcie_table->pcie_gen[0] = max_gen_speed;
- pcie_table->pcie_lane[0] = max_lane_width;
- } else {
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
-index cc3169400c9b0..08fff9600bd29 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
-+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
-@@ -257,8 +257,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
- }
-
- smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
-- if (!smu_table->ecc_table)
-+ if (!smu_table->ecc_table) {
-+ kfree(smu_table->metrics_table);
-+ kfree(smu_table->gpu_metrics_table);
- return -ENOMEM;
-+ }
-
- return 0;
- }
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
-index 0232adb95df3a..5355f621388bb 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
-+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
-@@ -2420,8 +2420,8 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
- }
-
- int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
-- uint32_t pcie_gen_cap,
-- uint32_t pcie_width_cap)
-+ uint8_t pcie_gen_cap,
-+ uint8_t pcie_width_cap)
- {
- struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_pcie_table *pcie_table =
-@@ -2430,7 +2430,10 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
- uint32_t smu_pcie_arg;
- int ret, i;
-
-- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
-+ if (!num_of_levels)
-+ return 0;
-+
-+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
- if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
- pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
-
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
-index 3903a47669e43..4022dd44ebb2b 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
-+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
-@@ -352,12 +352,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
- smu->dc_controlled_by_gpio = true;
-
-- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
-- powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
-+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
- smu_baco->platform_support = true;
-
-- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
-- smu_baco->maco_support = true;
-+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
-+ smu_baco->maco_support = true;
-+ }
-
- /*
- * We are in the transition to a new OD mechanism.
-@@ -2163,38 +2163,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
- }
- }
-
-- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
-- (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
-- ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
-- ret = smu_cmn_update_table(smu,
-- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
-- WORKLOAD_PPLIB_COMPUTE_BIT,
-- (void *)(&activity_monitor_external),
-- false);
-- if (ret) {
-- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
-- return ret;
-- }
--
-- ret = smu_cmn_update_table(smu,
-- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
-- WORKLOAD_PPLIB_CUSTOM_BIT,
-- (void *)(&activity_monitor_external),
-- true);
-- if (ret) {
-- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
-- return ret;
-- }
--
-- workload_type = smu_cmn_to_asic_specific_index(smu,
-- CMN2ASIC_MAPPING_WORKLOAD,
-- PP_SMC_POWER_PROFILE_CUSTOM);
-- } else {
-- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-- workload_type = smu_cmn_to_asic_specific_index(smu,
-+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-+ workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
-- }
-
- if (workload_type < 0)
- return -EINVAL;
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
-index de80e191a92c4..24d6811438c5c 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
-+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
-@@ -1968,8 +1968,10 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
-
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
-- if (ret)
-+ if (ret) {
-+ kfree(metrics);
- return ret;
-+ }
-
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
-
-diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
-index 94ef5b4d116d7..51ae41cb43ea0 100644
---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
-+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
-@@ -341,12 +341,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
- if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
- smu->dc_controlled_by_gpio = true;
-
-- if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO ||
-- powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
-+ if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
- smu_baco->platform_support = true;
-
-- if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
-- smu_baco->maco_support = true;
-+ if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
-+ && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
-+ smu_baco->maco_support = true;
-+ }
-
- #if 0
- if (!overdrive_lowerlimits->FeatureCtrlMask ||
-diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
-index 4618687a8f4d6..f3e744172673c 100644
---- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
-+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
-@@ -1223,7 +1223,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
- return 0;
- }
-
--static void
-+static int
- komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
- struct komeda_pipeline_state *new)
- {
-@@ -1243,8 +1243,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
- c = komeda_pipeline_get_component(pipe, id);
- c_st = komeda_component_get_state_and_set_user(c,
- drm_st, NULL, new->crtc);
-+ if (PTR_ERR(c_st) == -EDEADLK)
-+ return -EDEADLK;
- WARN_ON(IS_ERR(c_st));
- }
-+
-+ return 0;
- }
-
- /* release unclaimed pipeline resource */
-@@ -1266,9 +1270,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
- if (WARN_ON(IS_ERR_OR_NULL(st)))
- return -EINVAL;
-
-- komeda_pipeline_unbound_components(pipe, st);
-+ return komeda_pipeline_unbound_components(pipe, st);
-
-- return 0;
- }
-
- /* Since standalone disabled components must be disabled separately and in the
-diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
-index d207b03f8357c..78122b35a0cbb 100644
---- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
-+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
-@@ -358,11 +358,18 @@ static void aspeed_gfx_remove(struct platform_device *pdev)
- sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
- drm_dev_unregister(drm);
- aspeed_gfx_unload(drm);
-+ drm_atomic_helper_shutdown(drm);
-+}
-+
-+static void aspeed_gfx_shutdown(struct platform_device *pdev)
-+{
-+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
- }
-
- static struct platform_driver aspeed_gfx_platform_driver = {
- .probe = aspeed_gfx_probe,
- .remove_new = aspeed_gfx_remove,
-+ .shutdown = aspeed_gfx_shutdown,
- .driver = {
- .name = "aspeed_gfx",
- .of_match_table = aspeed_gfx_match,
-diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
-index 848a9f1403e89..f7053f2972bb9 100644
---- a/drivers/gpu/drm/ast/ast_drv.h
-+++ b/drivers/gpu/drm/ast/ast_drv.h
-@@ -172,6 +172,17 @@ to_ast_sil164_connector(struct drm_connector *connector)
- return container_of(connector, struct ast_sil164_connector, base);
- }
-
-+struct ast_bmc_connector {
-+ struct drm_connector base;
-+ struct drm_connector *physical_connector;
-+};
-+
-+static inline struct ast_bmc_connector *
-+to_ast_bmc_connector(struct drm_connector *connector)
-+{
-+ return container_of(connector, struct ast_bmc_connector, base);
-+}
-+
- /*
- * Device
- */
-@@ -216,7 +227,7 @@ struct ast_device {
- } astdp;
- struct {
- struct drm_encoder encoder;
-- struct drm_connector connector;
-+ struct ast_bmc_connector bmc_connector;
- } bmc;
- } output;
-
-diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
-index 32f04ec6c386f..3de0f457fff6a 100644
---- a/drivers/gpu/drm/ast/ast_mode.c
-+++ b/drivers/gpu/drm/ast/ast_mode.c
-@@ -1767,6 +1767,30 @@ static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
- };
-
-+static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
-+ struct drm_modeset_acquire_ctx *ctx,
-+ bool force)
-+{
-+ struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector);
-+ struct drm_connector *physical_connector = bmc_connector->physical_connector;
-+
-+ /*
-+ * Most user-space compositors cannot handle more than one connected
-+ * connector per CRTC. Hence, we only mark the BMC as connected if the
-+ * physical connector is disconnected. If the physical connector's status
-+ * is connected or unknown, the BMC remains disconnected. This has no
-+ * effect on the output of the BMC.
-+ *
-+ * FIXME: Remove this logic once user-space compositors can handle more
-+ * than one connector per CRTC. The BMC should always be connected.
-+ */
-+
-+ if (physical_connector && physical_connector->status == connector_status_disconnected)
-+ return connector_status_connected;
-+
-+ return connector_status_disconnected;
-+}
-+
- static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
- {
- return drm_add_modes_noedid(connector, 4096, 4096);
-@@ -1774,6 +1798,7 @@ static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
-
- static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
- .get_modes = ast_bmc_connector_helper_get_modes,
-+ .detect_ctx = ast_bmc_connector_helper_detect_ctx,
- };
-
- static const struct drm_connector_funcs ast_bmc_connector_funcs = {
-@@ -1784,12 +1809,33 @@ static const struct drm_connector_funcs ast_bmc_connector_funcs = {
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
- };
-
--static int ast_bmc_output_init(struct ast_device *ast)
-+static int ast_bmc_connector_init(struct drm_device *dev,
-+ struct ast_bmc_connector *bmc_connector,
-+ struct drm_connector *physical_connector)
-+{
-+ struct drm_connector *connector = &bmc_connector->base;
-+ int ret;
-+
-+ ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
-+ DRM_MODE_CONNECTOR_VIRTUAL);
-+ if (ret)
-+ return ret;
-+
-+ drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
-+
-+ bmc_connector->physical_connector = physical_connector;
-+
-+ return 0;
-+}
-+
-+static int ast_bmc_output_init(struct ast_device *ast,
-+ struct drm_connector *physical_connector)
- {
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.bmc.encoder;
-- struct drm_connector *connector = &ast->output.bmc.connector;
-+ struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector;
-+ struct drm_connector *connector = &bmc_connector->base;
- int ret;
-
- ret = drm_encoder_init(dev, encoder,
-@@ -1799,13 +1845,10 @@ static int ast_bmc_output_init(struct ast_device *ast)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
-- ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
-- DRM_MODE_CONNECTOR_VIRTUAL);
-+ ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector);
- if (ret)
- return ret;
-
-- drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
--
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-@@ -1864,6 +1907,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
- int ast_mode_config_init(struct ast_device *ast)
- {
- struct drm_device *dev = &ast->base;
-+ struct drm_connector *physical_connector = NULL;
- int ret;
-
- ret = drmm_mode_config_init(dev);
-@@ -1904,23 +1948,27 @@ int ast_mode_config_init(struct ast_device *ast)
- ret = ast_vga_output_init(ast);
- if (ret)
- return ret;
-+ physical_connector = &ast->output.vga.vga_connector.base;
- }
- if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
- ret = ast_sil164_output_init(ast);
- if (ret)
- return ret;
-+ physical_connector = &ast->output.sil164.sil164_connector.base;
- }
- if (ast->tx_chip_types & AST_TX_DP501_BIT) {
- ret = ast_dp501_output_init(ast);
- if (ret)
- return ret;
-+ physical_connector = &ast->output.dp501.connector;
- }
- if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
- ret = ast_astdp_output_init(ast);
- if (ret)
- return ret;
-+ physical_connector = &ast->output.astdp.connector;
- }
-- ret = ast_bmc_output_init(ast);
-+ ret = ast_bmc_output_init(ast, physical_connector);
- if (ret)
- return ret;
-
-diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
-index 44a660a4bdbfc..ba82a1142adf7 100644
---- a/drivers/gpu/drm/bridge/Kconfig
-+++ b/drivers/gpu/drm/bridge/Kconfig
-@@ -181,6 +181,7 @@ config DRM_NWL_MIPI_DSI
- select DRM_KMS_HELPER
- select DRM_MIPI_DSI
- select DRM_PANEL_BRIDGE
-+ select GENERIC_PHY
- select GENERIC_PHY_MIPI_DPHY
- select MFD_SYSCON
- select MULTIPLEXER
-@@ -227,6 +228,7 @@ config DRM_SAMSUNG_DSIM
- select DRM_KMS_HELPER
- select DRM_MIPI_DSI
- select DRM_PANEL_BRIDGE
-+ select GENERIC_PHY
- select GENERIC_PHY_MIPI_DPHY
- help
- The Samsung MIPI DSIM bridge controller driver.
-diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
-index ec35215a20034..cced81633ddcd 100644
---- a/drivers/gpu/drm/bridge/cadence/Kconfig
-+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
-@@ -4,6 +4,7 @@ config DRM_CDNS_DSI
- select DRM_KMS_HELPER
- select DRM_MIPI_DSI
- select DRM_PANEL_BRIDGE
-+ select GENERIC_PHY
- select GENERIC_PHY_MIPI_DPHY
- depends on OF
- help
-diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
-index 466641c77fe91..8f5846b76d594 100644
---- a/drivers/gpu/drm/bridge/ite-it66121.c
-+++ b/drivers/gpu/drm/bridge/ite-it66121.c
-@@ -884,14 +884,14 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
- mutex_lock(&ctx->lock);
- ret = it66121_preamble_ddc(ctx);
- if (ret) {
-- edid = ERR_PTR(ret);
-+ edid = NULL;
- goto out_unlock;
- }
-
- ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
- IT66121_DDC_HEADER_EDID);
- if (ret) {
-- edid = ERR_PTR(ret);
-+ edid = NULL;
- goto out_unlock;
- }
-
-@@ -1447,10 +1447,14 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
- struct it66121_ctx *ctx = dev_get_drvdata(dev);
-
- mutex_lock(&ctx->lock);
--
-- memcpy(buf, ctx->connector->eld,
-- min(sizeof(ctx->connector->eld), len));
--
-+ if (!ctx->connector) {
-+ /* Pass en empty ELD if connector not available */
-+ dev_dbg(dev, "No connector present, passing empty EDID data");
-+ memset(buf, 0, len);
-+ } else {
-+ memcpy(buf, ctx->connector->eld,
-+ min(sizeof(ctx->connector->eld), len));
-+ }
- mutex_unlock(&ctx->lock);
-
- return 0;
-diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
-index 4eaea67fb71c2..03532efb893bb 100644
---- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
-+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
-@@ -45,7 +45,6 @@ struct lt8912 {
-
- u8 data_lanes;
- bool is_power_on;
-- bool is_attached;
- };
-
- static int lt8912_write_init_config(struct lt8912 *lt)
-@@ -559,6 +558,13 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
- struct lt8912 *lt = bridge_to_lt8912(bridge);
- int ret;
-
-+ ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
-+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-+ if (ret < 0) {
-+ dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
-+ return ret;
-+ }
-+
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
- ret = lt8912_bridge_connector_init(bridge);
- if (ret) {
-@@ -575,8 +581,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge,
- if (ret)
- goto error;
-
-- lt->is_attached = true;
--
- return 0;
-
- error:
-@@ -588,15 +592,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
- {
- struct lt8912 *lt = bridge_to_lt8912(bridge);
-
-- if (lt->is_attached) {
-- lt8912_hard_power_off(lt);
--
-- if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
-- drm_bridge_hpd_disable(lt->hdmi_port);
-+ lt8912_hard_power_off(lt);
-
-- drm_connector_unregister(&lt->connector);
-- drm_connector_cleanup(&lt->connector);
-- }
-+ if (lt->connector.dev && lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
-+ drm_bridge_hpd_disable(lt->hdmi_port);
- }
-
- static enum drm_connector_status
-@@ -750,7 +749,6 @@ static void lt8912_remove(struct i2c_client *client)
- {
- struct lt8912 *lt = i2c_get_clientdata(client);
-
-- lt8912_bridge_detach(&lt->bridge);
- drm_bridge_remove(&lt->bridge);
- lt8912_free_i2c(lt);
- lt8912_put_dt(lt);
-diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
-index 22c84d29c2bc5..6f33bb0dd32aa 100644
---- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
-+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
-@@ -929,9 +929,9 @@ retry:
- init_waitqueue_head(&lt9611uxc->wq);
- INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
-
-- ret = devm_request_threaded_irq(dev, client->irq, NULL,
-- lt9611uxc_irq_thread_handler,
-- IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
-+ ret = request_threaded_irq(client->irq, NULL,
-+ lt9611uxc_irq_thread_handler,
-+ IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
- if (ret) {
- dev_err(dev, "failed to request irq\n");
- goto err_disable_regulators;
-@@ -967,6 +967,8 @@ retry:
- return lt9611uxc_audio_init(dev, lt9611uxc);
-
- err_remove_bridge:
-+ free_irq(client->irq, lt9611uxc);
-+ cancel_work_sync(&lt9611uxc->work);
- drm_bridge_remove(&lt9611uxc->bridge);
-
- err_disable_regulators:
-@@ -983,7 +985,7 @@ static void lt9611uxc_remove(struct i2c_client *client)
- {
- struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
-
-- disable_irq(client->irq);
-+ free_irq(client->irq, lt9611uxc);
- cancel_work_sync(&lt9611uxc->work);
- lt9611uxc_audio_exit(lt9611uxc);
- drm_bridge_remove(&lt9611uxc->bridge);
-diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
-index cf777bdb25d2a..19bdb32dbc9aa 100644
---- a/drivers/gpu/drm/bridge/samsung-dsim.c
-+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
-@@ -385,7 +385,7 @@ static const unsigned int imx8mm_dsim_reg_values[] = {
- [RESET_TYPE] = DSIM_SWRST,
- [PLL_TIMER] = 500,
- [STOP_STATE_CNT] = 0xf,
-- [PHYCTRL_ULPS_EXIT] = 0,
-+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
- [PHYCTRL_VREG_LP] = 0,
- [PHYCTRL_SLEW_UP] = 0,
- [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
-@@ -413,6 +413,7 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
- .m_min = 41,
- .m_max = 125,
- .min_freq = 500,
-+ .has_broken_fifoctrl_emptyhdr = 1,
- };
-
- static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
-@@ -429,6 +430,7 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
- .m_min = 41,
- .m_max = 125,
- .min_freq = 500,
-+ .has_broken_fifoctrl_emptyhdr = 1,
- };
-
- static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
-@@ -1010,8 +1012,20 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
- do {
- u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
-
-- if (reg & DSIM_SFR_HEADER_EMPTY)
-- return 0;
-+ if (!dsi->driver_data->has_broken_fifoctrl_emptyhdr) {
-+ if (reg & DSIM_SFR_HEADER_EMPTY)
-+ return 0;
-+ } else {
-+ if (!(reg & DSIM_SFR_HEADER_FULL)) {
-+ /*
-+ * Wait a little bit, so the pending data can
-+ * actually leave the FIFO to avoid overflow.
-+ */
-+ if (!cond_resched())
-+ usleep_range(950, 1050);
-+ return 0;
-+ }
-+ }
-
- if (!cond_resched())
- usleep_range(950, 1050);
-diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
-index 819a4b6ec2a07..6eed5c4232956 100644
---- a/drivers/gpu/drm/bridge/tc358768.c
-+++ b/drivers/gpu/drm/bridge/tc358768.c
-@@ -15,6 +15,7 @@
- #include <linux/regmap.h>
- #include <linux/regulator/consumer.h>
- #include <linux/slab.h>
-+#include <linux/units.h>
-
- #include <drm/drm_atomic_helper.h>
- #include <drm/drm_drv.h>
-@@ -216,6 +217,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
- u32 tmp, orig;
-
- tc358768_read(priv, reg, &orig);
-+
-+ if (priv->error)
-+ return;
-+
- tmp = orig & ~mask;
- tmp |= val & mask;
- if (tmp != orig)
-@@ -600,7 +605,7 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
-
- dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
- clk_get_rate(priv->refclk), fbd, prd, frs);
-- dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
-+ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n",
- priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
- dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
- tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
-@@ -623,15 +628,14 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
- return tc358768_clear_error(priv);
- }
-
--#define TC358768_PRECISION 1000
--static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
-+static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps)
- {
-- return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
-+ return DIV_ROUND_UP(ns * 1000, period_ps);
- }
-
--static u32 tc358768_to_ns(u32 nsk)
-+static u32 tc358768_ps_to_ns(u32 ps)
- {
-- return (nsk / TC358768_PRECISION);
-+ return ps / 1000;
- }
-
- static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
-@@ -642,13 +646,15 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
- u32 val, val2, lptxcnt, hact, data_type;
- s32 raw_val;
- const struct drm_display_mode *mode;
-- u32 dsibclk_nsk, dsiclk_nsk, ui_nsk;
-- u32 dsiclk, dsibclk, video_start;
-+ u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
-+ u32 dsiclk, hsbyteclk, video_start;
- const u32 internal_delay = 40;
- int ret, i;
-+ struct videomode vm;
-+ struct device *dev = priv->dev;
-
- if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
-- dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
-+ dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
- mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
- }
-
-@@ -656,7 +662,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
-
- ret = tc358768_sw_reset(priv);
- if (ret) {
-- dev_err(priv->dev, "Software reset failed: %d\n", ret);
-+ dev_err(dev, "Software reset failed: %d\n", ret);
- tc358768_hw_disable(priv);
- return;
- }
-@@ -664,45 +670,47 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
- mode = &bridge->encoder->crtc->state->adjusted_mode;
- ret = tc358768_setup_pll(priv, mode);
- if (ret) {
-- dev_err(priv->dev, "PLL setup failed: %d\n", ret);
-+ dev_err(dev, "PLL setup failed: %d\n", ret);
- tc358768_hw_disable(priv);
- return;
- }
-
-+ drm_display_mode_to_videomode(mode, &vm);
-+
- dsiclk = priv->dsiclk;
-- dsibclk = dsiclk / 4;
-+ hsbyteclk = dsiclk / 4;
-
- /* Data Format Control Register */
- val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
- switch (dsi_dev->format) {
- case MIPI_DSI_FMT_RGB888:
- val |= (0x3 << 4);
-- hact = mode->hdisplay * 3;
-- video_start = (mode->htotal - mode->hsync_start) * 3;
-+ hact = vm.hactive * 3;
-+ video_start = (vm.hsync_len + vm.hback_porch) * 3;
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
- break;
- case MIPI_DSI_FMT_RGB666:
- val |= (0x4 << 4);
-- hact = mode->hdisplay * 3;
-- video_start = (mode->htotal - mode->hsync_start) * 3;
-+ hact = vm.hactive * 3;
-+ video_start = (vm.hsync_len + vm.hback_porch) * 3;
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
- break;
-
- case MIPI_DSI_FMT_RGB666_PACKED:
- val |= (0x4 << 4) | BIT(3);
-- hact = mode->hdisplay * 18 / 8;
-- video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
-+ hact = vm.hactive * 18 / 8;
-+ video_start = (vm.hsync_len + vm.hback_porch) * 18 / 8;
- data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
- break;
-
- case MIPI_DSI_FMT_RGB565:
- val |= (0x5 << 4);
-- hact = mode->hdisplay * 2;
-- video_start = (mode->htotal - mode->hsync_start) * 2;
-+ hact = vm.hactive * 2;
-+ video_start = (vm.hsync_len + vm.hback_porch) * 2;
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
- break;
- default:
-- dev_err(priv->dev, "Invalid data format (%u)\n",
-+ dev_err(dev, "Invalid data format (%u)\n",
- dsi_dev->format);
- tc358768_hw_disable(priv);
- return;
-@@ -722,67 +730,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
- tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
-
- /* DSI Timings */
-- dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
-- dsibclk);
-- dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
-- ui_nsk = dsiclk_nsk / 2;
-- dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
-- dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
-- dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
-+ hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk);
-+ dsiclk_ps = (u32)div_u64(PICO, dsiclk);
-+ ui_ps = dsiclk_ps / 2;
-+ dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps,
-+ ui_ps, hsbyteclk_ps);
-
- /* LP11 > 100us for D-PHY Rx Init */
-- val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
-- dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
-+ val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1;
-+ dev_dbg(dev, "LINEINITCNT: %u\n", val);
- tc358768_write(priv, TC358768_LINEINITCNT, val);
-
- /* LPTimeCnt > 50ns */
-- val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
-+ val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1;
- lptxcnt = val;
-- dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
-+ dev_dbg(dev, "LPTXTIMECNT: %u\n", val);
- tc358768_write(priv, TC358768_LPTXTIMECNT, val);
-
- /* 38ns < TCLK_PREPARE < 95ns */
-- val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
-+ val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1;
-+ dev_dbg(dev, "TCLK_PREPARECNT %u\n", val);
- /* TCLK_PREPARE + TCLK_ZERO > 300ns */
-- val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
-- dsibclk_nsk) - 2;
-+ val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps),
-+ hsbyteclk_ps) - 2;
-+ dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2);
- val |= val2 << 8;
-- dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
- tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
-
- /* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
-- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
-+ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5;
- val = clamp(raw_val, 0, 127);
-- dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
-+ dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val);
- tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
-
- /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
-- val = 50 + tc358768_to_ns(4 * ui_nsk);
-- val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
-+ val = 50 + tc358768_ps_to_ns(4 * ui_ps);
-+ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1;
-+ dev_dbg(dev, "THS_PREPARECNT %u\n", val);
- /* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
-- raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
-+ raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10;
- val2 = clamp(raw_val, 0, 127);
-+ dev_dbg(dev, "THS_ZEROCNT %u\n", val2);
- val |= val2 << 8;
-- dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
- tc358768_write(priv, TC358768_THS_HEADERCNT, val);
-
- /* TWAKEUP > 1ms in lptxcnt steps */
-- val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
-+ val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps);
- val = val / (lptxcnt + 1) - 1;
-- dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
-+ dev_dbg(dev, "TWAKEUP: %u\n", val);
- tc358768_write(priv, TC358768_TWAKEUP, val);
-
- /* TCLK_POSTCNT > 60ns + 52*UI */
-- val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
-- dsibclk_nsk) - 3;
-- dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
-+ val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps),
-+ hsbyteclk_ps) - 3;
-+ dev_dbg(dev, "TCLK_POSTCNT: %u\n", val);
- tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
-
- /* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
-- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
-- dsibclk_nsk) - 4;
-+ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps),
-+ hsbyteclk_ps) - 4;
- val = clamp(raw_val, 0, 15);
-- dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
-+ dev_dbg(dev, "THS_TRAILCNT: %u\n", val);
- tc358768_write(priv, TC358768_THS_TRAILCNT, val);
-
- val = BIT(0);
-@@ -790,16 +798,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
- val |= BIT(i + 1);
- tc358768_write(priv, TC358768_HSTXVREGEN, val);
-
-- if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
-- tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
-+ tc358768_write(priv, TC358768_TXOPTIONCNTRL,
-+ (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0));
-
- /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
-- val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
-- val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
-- val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
-- dsibclk_nsk) - 2;
-+ val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4);
-+ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1;
-+ dev_dbg(dev, "TXTAGOCNT: %u\n", val);
-+ val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps),
-+ hsbyteclk_ps) - 2;
-+ dev_dbg(dev, "RXTASURECNT: %u\n", val2);
- val = val << 16 | val2;
-- dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
- tc358768_write(priv, TC358768_BTACNTRL1, val);
-
- /* START[0] */
-@@ -810,43 +819,43 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
- tc358768_write(priv, TC358768_DSI_EVENT, 0);
-
- /* vact */
-- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
-+ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
-
- /* vsw */
-- tc358768_write(priv, TC358768_DSI_VSW,
-- mode->vsync_end - mode->vsync_start);
-+ tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len);
-+
- /* vbp */
-- tc358768_write(priv, TC358768_DSI_VBPR,
-- mode->vtotal - mode->vsync_end);
-+ tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
-
- /* hsw * byteclk * ndl / pclk */
-- val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
-- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
-- mode->clock * 1000);
-+ val = (u32)div_u64(vm.hsync_len *
-+ (u64)hsbyteclk * priv->dsi_lanes,
-+ vm.pixelclock);
- tc358768_write(priv, TC358768_DSI_HSW, val);
-
- /* hbp * byteclk * ndl / pclk */
-- val = (u32)div_u64((mode->htotal - mode->hsync_end) *
-- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
-- mode->clock * 1000);
-+ val = (u32)div_u64(vm.hback_porch *
-+ (u64)hsbyteclk * priv->dsi_lanes,
-+ vm.pixelclock);
- tc358768_write(priv, TC358768_DSI_HBPR, val);
- } else {
- /* Set event mode */
- tc358768_write(priv, TC358768_DSI_EVENT, 1);
-
- /* vact */
-- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
-+ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
-
- /* vsw (+ vbp) */
- tc358768_write(priv, TC358768_DSI_VSW,
-- mode->vtotal - mode->vsync_start);
-+ vm.vsync_len + vm.vback_porch);
-+
- /* vbp (not used in event mode) */
- tc358768_write(priv, TC358768_DSI_VBPR, 0);
-
- /* (hsw + hbp) * byteclk * ndl / pclk */
-- val = (u32)div_u64((mode->htotal - mode->hsync_start) *
-- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
-- mode->clock * 1000);
-+ val = (u32)div_u64((vm.hsync_len + vm.hback_porch) *
-+ (u64)hsbyteclk * priv->dsi_lanes,
-+ vm.pixelclock);
- tc358768_write(priv, TC358768_DSI_HSW, val);
-
- /* hbp (not used in event mode) */
-@@ -857,11 +866,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
- tc358768_write(priv, TC358768_DSI_HACT, hact);
-
- /* VSYNC polarity */
-- if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
-- tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
-+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5),
-+ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0);
-+
- /* HSYNC polarity */
-- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-- tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
-+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0),
-+ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0);
-
- /* Start DSI Tx */
- tc358768_write(priv, TC358768_DSI_START, 0x1);
-@@ -891,7 +901,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
-
- ret = tc358768_clear_error(priv);
- if (ret) {
-- dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
-+ dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
-diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
-index 4b71040ae5be5..b3e1b288fc0c2 100644
---- a/drivers/gpu/drm/drm_edid.c
-+++ b/drivers/gpu/drm/drm_edid.c
-@@ -3499,11 +3499,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
- mode->vsync_end = mode->vsync_start + vsync_pulse_width;
- mode->vtotal = mode->vdisplay + vblank;
-
-- /* Some EDIDs have bogus h/vtotal values */
-- if (mode->hsync_end > mode->htotal)
-- mode->htotal = mode->hsync_end + 1;
-- if (mode->vsync_end > mode->vtotal)
-- mode->vtotal = mode->vsync_end + 1;
-+ /* Some EDIDs have bogus h/vsync_end values */
-+ if (mode->hsync_end > mode->htotal) {
-+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing hsync_end %d->%d\n",
-+ connector->base.id, connector->name,
-+ mode->hsync_end, mode->htotal);
-+ mode->hsync_end = mode->htotal;
-+ }
-+ if (mode->vsync_end > mode->vtotal) {
-+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing vsync_end %d->%d\n",
-+ connector->base.id, connector->name,
-+ mode->vsync_end, mode->vtotal);
-+ mode->vsync_end = mode->vtotal;
-+ }
-
- drm_mode_do_interlace_quirk(mode, pt);
-
-diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
-index 150fe15550680..94375c6a54256 100644
---- a/drivers/gpu/drm/drm_lease.c
-+++ b/drivers/gpu/drm/drm_lease.c
-@@ -510,8 +510,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
- /* Handle leased objects, if any */
- idr_init(&leases);
- if (object_count != 0) {
-- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
-- array_size(object_count, sizeof(__u32)));
-+ object_ids = memdup_array_user(u64_to_user_ptr(cl->object_ids),
-+ object_count, sizeof(__u32));
- if (IS_ERR(object_ids)) {
- ret = PTR_ERR(object_ids);
- idr_destroy(&leases);
-diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
-index f7003d1ec5ef1..01da6789d0440 100644
---- a/drivers/gpu/drm/drm_syncobj.c
-+++ b/drivers/gpu/drm/drm_syncobj.c
-@@ -1069,7 +1069,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
- fence = drm_syncobj_fence_get(syncobjs[i]);
- if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
- dma_fence_put(fence);
-- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
-+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
-+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
- continue;
- } else {
- timeout = -EINVAL;
-diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
-index f7f709df99b49..70d9adafa2333 100644
---- a/drivers/gpu/drm/gma500/psb_drv.h
-+++ b/drivers/gpu/drm/gma500/psb_drv.h
-@@ -424,6 +424,7 @@ struct drm_psb_private {
- uint32_t pipestat[PSB_NUM_PIPE];
-
- spinlock_t irqmask_lock;
-+ bool irq_enabled;
-
- /* Power */
- bool pm_initialized;
-diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
-index 343c51250207d..7bbb79b0497d8 100644
---- a/drivers/gpu/drm/gma500/psb_irq.c
-+++ b/drivers/gpu/drm/gma500/psb_irq.c
-@@ -327,6 +327,8 @@ int gma_irq_install(struct drm_device *dev)
-
- gma_irq_postinstall(dev);
-
-+ dev_priv->irq_enabled = true;
-+
- return 0;
- }
-
-@@ -337,6 +339,9 @@ void gma_irq_uninstall(struct drm_device *dev)
- unsigned long irqflags;
- unsigned int i;
-
-+ if (!dev_priv->irq_enabled)
-+ return;
-+
- spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
- if (dev_priv->ops->hotplug_enable)
-diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
-index 2fb030b1ff1de..f99cf8037bd68 100644
---- a/drivers/gpu/drm/i915/display/intel_cdclk.c
-+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
-@@ -2688,6 +2688,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
- for_each_pipe(dev_priv, pipe)
- min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
-
-+ /*
-+ * Avoid glk_force_audio_cdclk() causing excessive screen
-+ * blinking when multiple pipes are active by making sure
-+ * CDCLK frequency is always high enough for audio. With a
-+ * single active pipe we can always change CDCLK frequency
-+ * by changing the cd2x divider (see glk_cdclk_table[]) and
-+ * thus a full modeset won't be needed then.
-+ */
-+ if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
-+ !is_power_of_2(cdclk_state->active_pipes))
-+ min_cdclk = max(2 * 96000, min_cdclk);
-+
- if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
- drm_dbg_kms(&dev_priv->drm,
- "required cdclk (%d kHz) exceeds max (%d kHz)\n",
-diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
-index e0e4cb5292846..119a4de7fe6f7 100644
---- a/drivers/gpu/drm/i915/display/intel_dp.c
-+++ b/drivers/gpu/drm/i915/display/intel_dp.c
-@@ -430,7 +430,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp)
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (intel_is_c10phy(i915, phy))
-- return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
-+ return 810000;
-
- return 2000000;
- }
-@@ -5517,8 +5517,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- * (eg. Acer Chromebook C710), so we'll check it only if multiple
- * ports are attempting to use the same AUX CH, according to VBT.
- */
-- if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
-- !intel_digital_port_connected(encoder)) {
-+ if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
- /*
- * If this fails, presume the DPCD answer came
- * from some other port using the same AUX CH.
-@@ -5526,10 +5525,27 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- * FIXME maybe cleaner to check this before the
- * DPCD read? Would need sort out the VDD handling...
- */
-- drm_info(&dev_priv->drm,
-- "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
-- encoder->base.base.id, encoder->base.name);
-- goto out_vdd_off;
-+ if (!intel_digital_port_connected(encoder)) {
-+ drm_info(&dev_priv->drm,
-+ "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
-+ encoder->base.base.id, encoder->base.name);
-+ goto out_vdd_off;
-+ }
-+
-+ /*
-+ * Unfortunately even the HPD based detection fails on
-+ * eg. Asus B360M-A (CFL+CNP), so as a last resort fall
-+ * back to checking for a VGA branch device. Only do this
-+ * on known affected platforms to minimize false positives.
-+ */
-+ if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
-+ (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
-+ DP_DWN_STRM_PORT_TYPE_ANALOG) {
-+ drm_info(&dev_priv->drm,
-+ "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
-+ encoder->base.base.id, encoder->base.name);
-+ goto out_vdd_off;
-+ }
- }
-
- mutex_lock(&dev_priv->drm.mode_config.mutex);
-diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
-index 3ebf41859043e..cdf2455440bea 100644
---- a/drivers/gpu/drm/i915/display/intel_tc.c
-+++ b/drivers/gpu/drm/i915/display/intel_tc.c
-@@ -58,7 +58,7 @@ struct intel_tc_port {
- struct delayed_work link_reset_work;
- int link_refcount;
- bool legacy_port:1;
-- char port_name[8];
-+ const char *port_name;
- enum tc_port_mode mode;
- enum tc_port_mode init_mode;
- enum phy_fia phy_fia;
-@@ -1841,8 +1841,12 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
- else
- tc->phy_ops = &icl_tc_phy_ops;
-
-- snprintf(tc->port_name, sizeof(tc->port_name),
-- "%c/TC#%d", port_name(port), tc_port + 1);
-+ tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
-+ tc_port + 1);
-+ if (!tc->port_name) {
-+ kfree(tc);
-+ return -ENOMEM;
-+ }
-
- mutex_init(&tc->lock);
- /* TODO: Combine the two works */
-@@ -1863,6 +1867,7 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
- {
- intel_tc_port_suspend(dig_port);
-
-+ kfree(dig_port->tc->port_name);
- kfree(dig_port->tc);
- dig_port->tc = NULL;
- }
-diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
-index 9a9ff84c90d7e..e38f06a6e56eb 100644
---- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
-+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
-@@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
- if (idx >= pc->num_user_engines)
- return -EINVAL;
-
-+ idx = array_index_nospec(idx, pc->num_user_engines);
- pe = &pc->user_engines[idx];
-
- /* Only render engine supports RPCS configuration. */
-diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
-index dcedff41a825f..d304e0a948f0d 100644
---- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
-+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
-@@ -42,12 +42,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
- (struct llist_head *)&engine->i915->uabi_engines);
- }
-
--static const u8 uabi_classes[] = {
-+#define I915_NO_UABI_CLASS ((u16)(-1))
-+
-+static const u16 uabi_classes[] = {
- [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
- [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
- [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
- [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
- [COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
-+ [OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
- };
-
- static int engine_cmp(void *priv, const struct list_head *A,
-@@ -202,6 +205,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
-
- void intel_engines_driver_register(struct drm_i915_private *i915)
- {
-+ u16 name_instance, other_instance = 0;
- struct legacy_ring ring = {};
- struct list_head *it, *next;
- struct rb_node **p, *prev;
-@@ -219,27 +223,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
- if (intel_gt_has_unrecoverable_error(engine->gt))
- continue; /* ignore incomplete engines */
-
-- /*
-- * We don't want to expose the GSC engine to the users, but we
-- * still rename it so it is easier to identify in the debug logs
-- */
-- if (engine->id == GSC0) {
-- engine_rename(engine, "gsc", 0);
-- continue;
-- }
--
- GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
- engine->uabi_class = uabi_classes[engine->class];
-+ if (engine->uabi_class == I915_NO_UABI_CLASS) {
-+ name_instance = other_instance++;
-+ } else {
-+ GEM_BUG_ON(engine->uabi_class >=
-+ ARRAY_SIZE(i915->engine_uabi_class_count));
-+ name_instance =
-+ i915->engine_uabi_class_count[engine->uabi_class]++;
-+ }
-+ engine->uabi_instance = name_instance;
-
-- GEM_BUG_ON(engine->uabi_class >=
-- ARRAY_SIZE(i915->engine_uabi_class_count));
-- engine->uabi_instance =
-- i915->engine_uabi_class_count[engine->uabi_class]++;
--
-- /* Replace the internal name with the final user facing name */
-+ /*
-+ * Replace the internal name with the final user and log facing
-+ * name.
-+ */
- engine_rename(engine,
- intel_engine_class_repr(engine->class),
-- engine->uabi_instance);
-+ name_instance);
-+
-+ if (engine->uabi_class == I915_NO_UABI_CLASS)
-+ continue;
-
- rb_link_node(&engine->uabi_node, prev, p);
- rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
-diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
-index da21f2786b5d7..b20d8fe8aa95d 100644
---- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
-+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
-@@ -190,6 +190,21 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
- spin_unlock_irq(&uncore->lock);
- }
-
-+static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915)
-+{
-+ /*
-+ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
-+ * will be dropped. For WC mappings in general we have 64 byte burst
-+ * writes when the WC buffer is flushed, so we can't use it, but have to
-+ * resort to an uncached mapping. The WC issue is easily caught by the
-+ * readback check when writing GTT PTE entries.
-+ */
-+ if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
-+ return true;
-+
-+ return false;
-+}
-+
- static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
- {
- struct intel_uncore *uncore = ggtt->vm.gt->uncore;
-@@ -197,8 +212,12 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
- /*
- * Note that as an uncached mmio write, this will flush the
- * WCB of the writes into the GGTT before it triggers the invalidate.
-+ *
-+ * Only perform this when GGTT is mapped as WC, see ggtt_probe_common().
- */
-- intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-+ if (needs_wc_ggtt_mapping(ggtt->vm.i915))
-+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
-+ GFX_FLSH_CNTL_EN);
- }
-
- static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
-@@ -902,17 +921,11 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
- GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
-
-- /*
-- * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
-- * will be dropped. For WC mappings in general we have 64 byte burst
-- * writes when the WC buffer is flushed, so we can't use it, but have to
-- * resort to an uncached mapping. The WC issue is easily caught by the
-- * readback check when writing GTT PTE entries.
-- */
-- if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
-- ggtt->gsm = ioremap(phys_addr, size);
-- else
-+ if (needs_wc_ggtt_mapping(i915))
- ggtt->gsm = ioremap_wc(phys_addr, size);
-+ else
-+ ggtt->gsm = ioremap(phys_addr, size);
-+
- if (!ggtt->gsm) {
- drm_err(&i915->drm, "Failed to map the ggtt page table\n");
- return -ENOMEM;
-diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
-index 449f0b7fc8434..95631e8f39e7b 100644
---- a/drivers/gpu/drm/i915/gt/intel_gt.c
-+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
-@@ -967,8 +967,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
-
- err:
- i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
-- intel_gt_release_all(i915);
--
- return ret;
- }
-
-@@ -987,15 +985,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915)
- return 0;
- }
-
--void intel_gt_release_all(struct drm_i915_private *i915)
--{
-- struct intel_gt *gt;
-- unsigned int id;
--
-- for_each_gt(gt, i915, id)
-- i915->gt[id] = NULL;
--}
--
- void intel_gt_info_print(const struct intel_gt_info *info,
- struct drm_printer *p)
- {
-diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
-index 58bb1c55294c9..ccdc1afbf11b5 100644
---- a/drivers/gpu/drm/i915/gt/intel_rc6.c
-+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
-@@ -584,19 +584,23 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
-
- static void rc6_res_reg_init(struct intel_rc6 *rc6)
- {
-- memset(rc6->res_reg, INVALID_MMIO_REG.reg, sizeof(rc6->res_reg));
-+ i915_reg_t res_reg[INTEL_RC6_RES_MAX] = {
-+ [0 ... INTEL_RC6_RES_MAX - 1] = INVALID_MMIO_REG,
-+ };
-
- switch (rc6_to_gt(rc6)->type) {
- case GT_MEDIA:
-- rc6->res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
-+ res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
- break;
- default:
-- rc6->res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
-- rc6->res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
-- rc6->res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
-- rc6->res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
-+ res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
-+ res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
-+ res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
-+ res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
- break;
- }
-+
-+ memcpy(rc6->res_reg, res_reg, sizeof(res_reg));
- }
-
- void intel_rc6_init(struct intel_rc6 *rc6)
-diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
-index ec4d26b3c17cc..8dc5f85b7747b 100644
---- a/drivers/gpu/drm/i915/i915_driver.c
-+++ b/drivers/gpu/drm/i915/i915_driver.c
-@@ -777,7 +777,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-
- ret = i915_driver_mmio_probe(i915);
- if (ret < 0)
-- goto out_tiles_cleanup;
-+ goto out_runtime_pm_put;
-
- ret = i915_driver_hw_probe(i915);
- if (ret < 0)
-@@ -837,8 +837,6 @@ out_cleanup_hw:
- i915_ggtt_driver_late_release(i915);
- out_cleanup_mmio:
- i915_driver_mmio_release(i915);
--out_tiles_cleanup:
-- intel_gt_release_all(i915);
- out_runtime_pm_put:
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_driver_late_release(i915);
-diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
-index 59e1e21df2710..109135fcfca28 100644
---- a/drivers/gpu/drm/i915/i915_perf.c
-+++ b/drivers/gpu/drm/i915/i915_perf.c
-@@ -4286,11 +4286,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
- u32 known_open_flags;
- int ret;
-
-- if (!perf->i915) {
-- drm_dbg(&perf->i915->drm,
-- "i915 perf interface not available for this system\n");
-+ if (!perf->i915)
- return -ENOTSUPP;
-- }
-
- known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
- I915_PERF_FLAG_FD_NONBLOCK |
-@@ -4666,11 +4663,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
- struct i915_oa_reg *regs;
- int err, id;
-
-- if (!perf->i915) {
-- drm_dbg(&perf->i915->drm,
-- "i915 perf interface not available for this system\n");
-+ if (!perf->i915)
- return -ENOTSUPP;
-- }
-
- if (!perf->metrics_kobj) {
- drm_dbg(&perf->i915->drm,
-@@ -4832,11 +4826,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
- struct i915_oa_config *oa_config;
- int ret;
-
-- if (!perf->i915) {
-- drm_dbg(&perf->i915->drm,
-- "i915 perf interface not available for this system\n");
-+ if (!perf->i915)
- return -ENOTSUPP;
-- }
-
- if (i915_perf_stream_paranoid && !perfmon_capable()) {
- drm_dbg(&perf->i915->drm,
-diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.c b/drivers/gpu/drm/loongson/lsdc_pixpll.c
-index 04c15b4697e21..2609a2256da4b 100644
---- a/drivers/gpu/drm/loongson/lsdc_pixpll.c
-+++ b/drivers/gpu/drm/loongson/lsdc_pixpll.c
-@@ -120,12 +120,14 @@ static int lsdc_pixel_pll_setup(struct lsdc_pixpll * const this)
- struct lsdc_pixpll_parms *pparms;
-
- this->mmio = ioremap(this->reg_base, this->reg_size);
-- if (IS_ERR_OR_NULL(this->mmio))
-+ if (!this->mmio)
- return -ENOMEM;
-
- pparms = kzalloc(sizeof(*pparms), GFP_KERNEL);
-- if (IS_ERR_OR_NULL(pparms))
-+ if (!pparms) {
-+ iounmap(this->mmio);
- return -ENOMEM;
-+ }
-
- pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
-
-diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
-index 2cb47f6637568..0e285df6577ea 100644
---- a/drivers/gpu/drm/mediatek/mtk_dp.c
-+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
-@@ -2034,7 +2034,6 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
- bool enabled = mtk_dp->enabled;
- struct edid *new_edid = NULL;
- struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
-- struct cea_sad *sads;
-
- if (!enabled) {
- drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
-@@ -2049,11 +2048,16 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
- */
- if (mtk_dp_parse_capabilities(mtk_dp)) {
- drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
-+ kfree(new_edid);
- new_edid = NULL;
- }
-
- if (new_edid) {
-+ struct cea_sad *sads;
-+
- audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
-+ kfree(sads);
-+
- audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
- }
-
-diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
-index b6fa4ad2f94dc..0a511d7688a3a 100644
---- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
-+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
-@@ -408,6 +408,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
- unsigned int local_layer;
-
- plane_state = to_mtk_plane_state(plane->state);
-+
-+ /* should not enable layer before crtc enabled */
-+ plane_state->pending.enable = false;
- comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
- if (comp)
- mtk_ddp_comp_layer_config(comp, local_layer,
-diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
-index 93552d76b6e77..2d6a979afe8f9 100644
---- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
-+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
-@@ -288,6 +288,7 @@ static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
- static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
- .main_path = mt8188_mtk_ddp_main,
- .main_len = ARRAY_SIZE(mt8188_mtk_ddp_main),
-+ .mmsys_dev_num = 1,
- };
-
- static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
-diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
-index 0e0a41b2f57f0..4f2e3feabc0f8 100644
---- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
-+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
-@@ -121,7 +121,14 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
- int ret;
-
- args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
-- args->size = args->pitch * args->height;
-+
-+ /*
-+ * Multiply 2 variables of different types,
-+ * for example: args->size = args->spacing * args->height;
-+ * may cause coverity issue with unintentional overflow.
-+ */
-+ args->size = args->pitch;
-+ args->size *= args->height;
-
- mtk_gem = mtk_drm_gem_create(dev, args->size, false);
- if (IS_ERR(mtk_gem))
-diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
-index db2f70ae060d6..ddc9355b06d51 100644
---- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
-+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
-@@ -141,6 +141,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
- dma_addr_t addr;
- dma_addr_t hdr_addr = 0;
- unsigned int hdr_pitch = 0;
-+ int offset;
-
- gem = fb->obj[0];
- mtk_gem = to_mtk_gem_obj(gem);
-@@ -150,8 +151,15 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
- modifier = fb->modifier;
-
- if (modifier == DRM_FORMAT_MOD_LINEAR) {
-- addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
-- addr += (new_state->src.y1 >> 16) * pitch;
-+ /*
-+ * Using dma_addr_t variable to calculate with multiplier of different types,
-+ * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
-+ * may cause coverity issue with unintentional overflow.
-+ */
-+ offset = (new_state->src.x1 >> 16) * fb->format->cpp[0];
-+ addr += offset;
-+ offset = (new_state->src.y1 >> 16) * pitch;
-+ addr += offset;
- } else {
- int width_in_blocks = ALIGN(fb->width, AFBC_DATA_BLOCK_WIDTH)
- / AFBC_DATA_BLOCK_WIDTH;
-@@ -159,21 +167,34 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
- / AFBC_DATA_BLOCK_HEIGHT;
- int x_offset_in_blocks = (new_state->src.x1 >> 16) / AFBC_DATA_BLOCK_WIDTH;
- int y_offset_in_blocks = (new_state->src.y1 >> 16) / AFBC_DATA_BLOCK_HEIGHT;
-- int hdr_size;
-+ int hdr_size, hdr_offset;
-
- hdr_pitch = width_in_blocks * AFBC_HEADER_BLOCK_SIZE;
- pitch = width_in_blocks * AFBC_DATA_BLOCK_WIDTH *
- AFBC_DATA_BLOCK_HEIGHT * fb->format->cpp[0];
-
- hdr_size = ALIGN(hdr_pitch * height_in_blocks, AFBC_HEADER_ALIGNMENT);
-+ hdr_offset = hdr_pitch * y_offset_in_blocks +
-+ AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
-+
-+ /*
-+ * Using dma_addr_t variable to calculate with multiplier of different types,
-+ * for example: addr += hdr_pitch * y_offset_in_blocks;
-+ * may cause coverity issue with unintentional overflow.
-+ */
-+ hdr_addr = addr + hdr_offset;
-
-- hdr_addr = addr + hdr_pitch * y_offset_in_blocks +
-- AFBC_HEADER_BLOCK_SIZE * x_offset_in_blocks;
- /* The data plane is offset by 1 additional block. */
-- addr = addr + hdr_size +
-- pitch * y_offset_in_blocks +
-- AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
-- fb->format->cpp[0] * (x_offset_in_blocks + 1);
-+ offset = pitch * y_offset_in_blocks +
-+ AFBC_DATA_BLOCK_WIDTH * AFBC_DATA_BLOCK_HEIGHT *
-+ fb->format->cpp[0] * (x_offset_in_blocks + 1);
-+
-+ /*
-+ * Using dma_addr_t variable to calculate with multiplier of different types,
-+ * for example: addr += pitch * y_offset_in_blocks;
-+ * may cause coverity issue with unintentional overflow.
-+ */
-+ addr = addr + hdr_size + offset;
- }
-
- mtk_plane_state->pending.enable = true;
-@@ -206,9 +227,9 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
- plane->state->src_y = new_state->src_y;
- plane->state->src_h = new_state->src_h;
- plane->state->src_w = new_state->src_w;
-- swap(plane->state->fb, new_state->fb);
-
- mtk_plane_update_new_state(new_state, new_plane_state);
-+ swap(plane->state->fb, new_state->fb);
- wmb(); /* Make sure the above parameters are set before update */
- new_plane_state->pending.async_dirty = true;
- mtk_drm_crtc_async_update(new_state->crtc, plane, state);
-diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
-index d8bfc2cce54dc..290f328c6a421 100644
---- a/drivers/gpu/drm/mediatek/mtk_dsi.c
-+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
-@@ -407,7 +407,7 @@ static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
- if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
- tmp_reg |= HSTX_CKLP_EN;
-
-- if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
-+ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
- tmp_reg |= DIS_EOT;
-
- writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
-@@ -484,7 +484,7 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
- timing->da_hs_zero + timing->da_hs_exit + 3;
-
- delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
-- delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 2 : 0;
-+ delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2;
-
- horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
- horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
-diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
-index abddf37f0ea11..2fb18b782b053 100644
---- a/drivers/gpu/drm/mgag200/mgag200_drv.c
-+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
-@@ -10,6 +10,7 @@
- #include <linux/pci.h>
-
- #include <drm/drm_aperture.h>
-+#include <drm/drm_atomic_helper.h>
- #include <drm/drm_drv.h>
- #include <drm/drm_fbdev_generic.h>
- #include <drm/drm_file.h>
-@@ -278,6 +279,12 @@ static void mgag200_pci_remove(struct pci_dev *pdev)
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_dev_unregister(dev);
-+ drm_atomic_helper_shutdown(dev);
-+}
-+
-+static void mgag200_pci_shutdown(struct pci_dev *pdev)
-+{
-+ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
- }
-
- static struct pci_driver mgag200_pci_driver = {
-@@ -285,6 +292,7 @@ static struct pci_driver mgag200_pci_driver = {
- .id_table = mgag200_pciidlist,
- .probe = mgag200_pci_probe,
- .remove = mgag200_pci_remove,
-+ .shutdown = mgag200_pci_shutdown,
- };
-
- drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset);
-diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
-index d4e85e24002fb..522ca7fe67625 100644
---- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
-+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
-@@ -2237,7 +2237,7 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i
- DRM_DEV_ERROR(dev,
- "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
- speedbin);
-- return UINT_MAX;
-+ supp_hw = BIT(0); /* Default */
- }
-
- ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
-diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
-index 575e7c56219ff..f2d9d34ed50f9 100644
---- a/drivers/gpu/drm/msm/adreno/adreno_device.c
-+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
-@@ -331,7 +331,7 @@ static const struct adreno_info gpulist[] = {
- ),
- }, {
- .machine = "qcom,sm6375",
-- .chip_ids = ADRENO_CHIP_IDS(0x06010900),
-+ .chip_ids = ADRENO_CHIP_IDS(0x06010901),
- .family = ADRENO_6XX_GEN1,
- .revn = 619,
- .fw = {
-diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
-index 58f5e25679b15..ff9adb8000acd 100644
---- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
-+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
-@@ -419,6 +419,7 @@ static const struct dpu_perf_cfg sc8280xp_perf_data = {
- .min_llcc_ib = 0,
- .min_dram_ib = 800000,
- .danger_lut_tbl = {0xf, 0xffff, 0x0},
-+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
- .qos_lut_tbl = {
- {.nentry = ARRAY_SIZE(sc8180x_qos_linear),
- .entries = sc8180x_qos_linear
-diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
-index 42d52510ffd4a..86a8e06c7a60f 100644
---- a/drivers/gpu/drm/msm/dp/dp_panel.c
-+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
-@@ -289,26 +289,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
-
- static u8 dp_panel_get_edid_checksum(struct edid *edid)
- {
-- struct edid *last_block;
-- u8 *raw_edid;
-- bool is_edid_corrupt = false;
-+ edid += edid->extensions;
-
-- if (!edid) {
-- DRM_ERROR("invalid edid input\n");
-- return 0;
-- }
--
-- raw_edid = (u8 *)edid;
-- raw_edid += (edid->extensions * EDID_LENGTH);
-- last_block = (struct edid *)raw_edid;
--
-- /* block type extension */
-- drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
-- if (!is_edid_corrupt)
-- return last_block->checksum;
--
-- DRM_ERROR("Invalid block, no checksum\n");
-- return 0;
-+ return edid->checksum;
- }
-
- void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
-diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
-index baab79ab6e745..32f965bacdc30 100644
---- a/drivers/gpu/drm/msm/dsi/dsi.c
-+++ b/drivers/gpu/drm/msm/dsi/dsi.c
-@@ -126,6 +126,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
- struct msm_drm_private *priv = dev_get_drvdata(master);
- struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
-
-+ msm_dsi_tx_buf_free(msm_dsi->host);
- priv->dsi[msm_dsi->id] = NULL;
- }
-
-diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
-index bd3763a5d7234..3b46617a59f20 100644
---- a/drivers/gpu/drm/msm/dsi/dsi.h
-+++ b/drivers/gpu/drm/msm/dsi/dsi.h
-@@ -125,6 +125,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
- void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
- void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
- void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
-+void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
- int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
- int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
- int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
-diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
-index 3d6fb708dc223..470866896b9b8 100644
---- a/drivers/gpu/drm/msm/dsi/dsi_host.c
-+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
-@@ -147,6 +147,7 @@ struct msm_dsi_host {
-
- /* DSI 6G TX buffer*/
- struct drm_gem_object *tx_gem_obj;
-+ struct msm_gem_address_space *aspace;
-
- /* DSI v2 TX buffer */
- void *tx_buf;
-@@ -1111,8 +1112,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
- uint64_t iova;
- u8 *data;
-
-+ msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
-+
- data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
-- priv->kms->aspace,
-+ msm_host->aspace,
- &msm_host->tx_gem_obj, &iova);
-
- if (IS_ERR(data)) {
-@@ -1141,10 +1144,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
- return 0;
- }
-
--static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
-+void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
- {
-+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct drm_device *dev = msm_host->dev;
-- struct msm_drm_private *priv;
-
- /*
- * This is possible if we're tearing down before we've had a chance to
-@@ -1155,11 +1158,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
- if (!dev)
- return;
-
-- priv = dev->dev_private;
- if (msm_host->tx_gem_obj) {
-- msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
-- drm_gem_object_put(msm_host->tx_gem_obj);
-+ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
-+ msm_gem_address_space_put(msm_host->aspace);
- msm_host->tx_gem_obj = NULL;
-+ msm_host->aspace = NULL;
- }
-
- if (msm_host->tx_buf)
-@@ -1945,7 +1948,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
- struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-
- DBG("");
-- dsi_tx_buf_free(msm_host);
- if (msm_host->workqueue) {
- destroy_workqueue(msm_host->workqueue);
- msm_host->workqueue = NULL;
-diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
-index 3b1ed02f644d2..89a6344bc8653 100644
---- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
-+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
-@@ -918,7 +918,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
- if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
- if (phy->cphy_mode) {
- vreg_ctrl_0 = 0x45;
-- vreg_ctrl_1 = 0x45;
-+ vreg_ctrl_1 = 0x41;
- glbl_rescode_top_ctrl = 0x00;
- glbl_rescode_bot_ctrl = 0x00;
- } else {
-diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
-index 0f3bd187ede67..280d1d9a559ba 100644
---- a/drivers/gpu/drm/nouveau/nouveau_bo.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
-@@ -318,8 +318,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
- (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
- continue;
-
-- if (pi < 0)
-- pi = i;
-+ /* pick the last one as it will be smallest. */
-+ pi = i;
-+
- /* Stop once the buffer is larger than the current page size. */
- if (*size >= 1ULL << vmm->page[i].shift)
- break;
-diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
-index abb0788843c60..503ecea72c5ea 100644
---- a/drivers/gpu/drm/panel/panel-arm-versatile.c
-+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
-@@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel,
- connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
-
- mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
-+ if (!mode)
-+ return -ENOMEM;
- drm_mode_set_name(mode);
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-
-diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
-index c9087f474cbc5..29e63cdfb8954 100644
---- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
-+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
-@@ -2049,6 +2049,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
- .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_LPM,
- .init_cmds = auo_b101uan08_3_init_cmd,
-+ .lp11_before_reset = true,
- };
-
- static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
-@@ -2103,14 +2104,15 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
- .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_LPM,
- .init_cmds = starry_qfh032011_53g_init_cmd,
-+ .lp11_before_reset = true,
- };
-
- static const struct drm_display_mode starry_himax83102_j02_default_mode = {
-- .clock = 161600,
-+ .clock = 162850,
- .hdisplay = 1200,
-- .hsync_start = 1200 + 40,
-- .hsync_end = 1200 + 40 + 20,
-- .htotal = 1200 + 40 + 20 + 40,
-+ .hsync_start = 1200 + 50,
-+ .hsync_end = 1200 + 50 + 20,
-+ .htotal = 1200 + 50 + 20 + 50,
- .vdisplay = 1920,
- .vsync_start = 1920 + 116,
- .vsync_end = 1920 + 116 + 8,
-diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
-index 9632b9e95b715..c4a804c5d6aac 100644
---- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
-+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
-@@ -1266,9 +1266,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
- return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
-
- pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
-- if (!pinfo->dsi[1]) {
-+ if (IS_ERR(pinfo->dsi[1])) {
- dev_err(dev, "cannot get secondary DSI device\n");
-- return -ENODEV;
-+ return PTR_ERR(pinfo->dsi[1]);
- }
- }
-
-diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
-index dd7928d9570f7..6e46e55d29a9a 100644
---- a/drivers/gpu/drm/panel/panel-simple.c
-+++ b/drivers/gpu/drm/panel/panel-simple.c
-@@ -2326,13 +2326,13 @@ static const struct panel_desc innolux_g070y2_t02 = {
- static const struct display_timing innolux_g101ice_l01_timing = {
- .pixelclock = { 60400000, 71100000, 74700000 },
- .hactive = { 1280, 1280, 1280 },
-- .hfront_porch = { 41, 80, 100 },
-- .hback_porch = { 40, 79, 99 },
-- .hsync_len = { 1, 1, 1 },
-+ .hfront_porch = { 30, 60, 70 },
-+ .hback_porch = { 30, 60, 70 },
-+ .hsync_len = { 22, 40, 60 },
- .vactive = { 800, 800, 800 },
-- .vfront_porch = { 5, 11, 14 },
-- .vback_porch = { 4, 11, 14 },
-- .vsync_len = { 1, 1, 1 },
-+ .vfront_porch = { 3, 8, 14 },
-+ .vback_porch = { 3, 8, 14 },
-+ .vsync_len = { 4, 7, 12 },
- .flags = DISPLAY_FLAGS_DE_HIGH,
- };
-
-@@ -2349,6 +2349,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
- .disable = 200,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
-+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
- .connector_type = DRM_MODE_CONNECTOR_LVDS,
- };
-
-diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
-index 6a39456395350..7bb723d445ade 100644
---- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
-+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
-@@ -506,29 +506,30 @@ static int st7703_prepare(struct drm_panel *panel)
- return 0;
-
- dev_dbg(ctx->dev, "Resetting the panel\n");
-- ret = regulator_enable(ctx->vcc);
-+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
-+
-+ ret = regulator_enable(ctx->iovcc);
- if (ret < 0) {
-- dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
-+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
- return ret;
- }
-- ret = regulator_enable(ctx->iovcc);
-+
-+ ret = regulator_enable(ctx->vcc);
- if (ret < 0) {
-- dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
-- goto disable_vcc;
-+ dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
-+ regulator_disable(ctx->iovcc);
-+ return ret;
- }
-
-- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
-- usleep_range(20, 40);
-+ /* Give power supplies time to stabilize before deasserting reset. */
-+ usleep_range(10000, 20000);
-+
- gpiod_set_value_cansleep(ctx->reset_gpio, 0);
-- msleep(20);
-+ usleep_range(15000, 20000);
-
- ctx->prepared = true;
-
- return 0;
--
--disable_vcc:
-- regulator_disable(ctx->vcc);
-- return ret;
- }
-
- static const u32 mantix_bus_formats[] = {
-diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
-index 845304435e235..f6a212e542cb9 100644
---- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
-+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
-@@ -379,6 +379,8 @@ static int tpg110_get_modes(struct drm_panel *panel,
- connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
-
- mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
-+ if (!mode)
-+ return -ENOMEM;
- drm_mode_set_name(mode);
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-
-diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
-index ba3b5b5f0cdfe..02e6b74d50166 100644
---- a/drivers/gpu/drm/pl111/pl111_drv.c
-+++ b/drivers/gpu/drm/pl111/pl111_drv.c
-@@ -323,12 +323,18 @@ static void pl111_amba_remove(struct amba_device *amba_dev)
- struct pl111_drm_dev_private *priv = drm->dev_private;
-
- drm_dev_unregister(drm);
-+ drm_atomic_helper_shutdown(drm);
- if (priv->panel)
- drm_panel_bridge_remove(priv->bridge);
- drm_dev_put(drm);
- of_reserved_mem_device_release(dev);
- }
-
-+static void pl111_amba_shutdown(struct amba_device *amba_dev)
-+{
-+ drm_atomic_helper_shutdown(amba_get_drvdata(amba_dev));
-+}
-+
- /*
- * This early variant lacks the 565 and 444 pixel formats.
- */
-@@ -431,6 +437,7 @@ static struct amba_driver pl111_amba_driver __maybe_unused = {
- },
- .probe = pl111_amba_probe,
- .remove = pl111_amba_remove,
-+ .shutdown = pl111_amba_shutdown,
- .id_table = pl111_id_table,
- };
-
-diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
-index 6492a70e3c396..404b0483bb7cb 100644
---- a/drivers/gpu/drm/qxl/qxl_display.c
-+++ b/drivers/gpu/drm/qxl/qxl_display.c
-@@ -1229,6 +1229,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
- if (!qdev->monitors_config_bo)
- return 0;
-
-+ kfree(qdev->dumb_heads);
-+ qdev->dumb_heads = NULL;
-+
- qdev->monitors_config = NULL;
- qdev->ram_header->monitors_config = 0;
-
-diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
-index 4f06356d9ce2e..f0ae087be914e 100644
---- a/drivers/gpu/drm/radeon/evergreen.c
-+++ b/drivers/gpu/drm/radeon/evergreen.c
-@@ -4821,14 +4821,15 @@ restart_ih:
- break;
- case 44: /* hdmi */
- afmt_idx = src_data;
-- if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
-- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
--
- if (afmt_idx > 5) {
- DRM_ERROR("Unhandled interrupt: %d %d\n",
- src_id, src_data);
- break;
- }
-+
-+ if (!(afmt_status[afmt_idx] & AFMT_AZ_FORMAT_WTRIG))
-+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-+
- afmt_status[afmt_idx] &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI%d\n", afmt_idx + 1);
-diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
-index 8afb03bbce298..3d3d2109dfebc 100644
---- a/drivers/gpu/drm/radeon/radeon.h
-+++ b/drivers/gpu/drm/radeon/radeon.h
-@@ -2215,10 +2215,6 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
--int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
-- struct drm_file *file_priv);
--int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
-- struct drm_file *file_priv);
- int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
- int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
-diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
-index d2f02c3dfce29..b84b58926106a 100644
---- a/drivers/gpu/drm/radeon/radeon_connectors.c
-+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
-@@ -1119,6 +1119,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
- else {
- /* only 800x600 is supported right now on pre-avivo chips */
- tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
-+ if (!tv_mode)
-+ return 0;
- tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, tv_mode);
- }
-diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
-index fa531493b1113..7bf08164140ef 100644
---- a/drivers/gpu/drm/radeon/radeon_drv.c
-+++ b/drivers/gpu/drm/radeon/radeon_drv.c
-@@ -555,8 +555,6 @@ static const struct drm_ioctl_desc radeon_ioctls_kms[] = {
- DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-- DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
-- DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
-index 358d19242f4ba..3fec3acdaf284 100644
---- a/drivers/gpu/drm/radeon/radeon_gem.c
-+++ b/drivers/gpu/drm/radeon/radeon_gem.c
-@@ -311,22 +311,6 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
- return 0;
- }
-
--int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
-- struct drm_file *filp)
--{
-- /* TODO: implement */
-- DRM_ERROR("unimplemented %s\n", __func__);
-- return -EOPNOTSUPP;
--}
--
--int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
-- struct drm_file *filp)
--{
-- /* TODO: implement */
-- DRM_ERROR("unimplemented %s\n", __func__);
-- return -EOPNOTSUPP;
--}
--
- int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
- {
-diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
-index a29fbafce3936..3793863c210eb 100644
---- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
-+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
-@@ -1177,6 +1177,7 @@ static int cdn_dp_probe(struct platform_device *pdev)
- struct cdn_dp_device *dp;
- struct extcon_dev *extcon;
- struct phy *phy;
-+ int ret;
- int i;
-
- dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
-@@ -1217,9 +1218,19 @@ static int cdn_dp_probe(struct platform_device *pdev)
- mutex_init(&dp->lock);
- dev_set_drvdata(dev, dp);
-
-- cdn_dp_audio_codec_init(dp, dev);
-+ ret = cdn_dp_audio_codec_init(dp, dev);
-+ if (ret)
-+ return ret;
-+
-+ ret = component_add(dev, &cdn_dp_component_ops);
-+ if (ret)
-+ goto err_audio_deinit;
-
-- return component_add(dev, &cdn_dp_component_ops);
-+ return 0;
-+
-+err_audio_deinit:
-+ platform_device_unregister(dp->audio_pdev);
-+ return ret;
- }
-
- static void cdn_dp_remove(struct platform_device *pdev)
-diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
-index b8f8b45ebf594..93ed841f5dcea 100644
---- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
-+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
-@@ -40,7 +40,7 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
-
- ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
- prot);
-- if (ret < rk_obj->base.size) {
-+ if (ret < (ssize_t)rk_obj->base.size) {
- DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
- ret, rk_obj->base.size);
- ret = -ENOMEM;
-diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
-index 14320bc73e5bf..4b338cb89d32d 100644
---- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
-+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
-@@ -247,14 +247,22 @@ static inline void vop_cfg_done(struct vop *vop)
- VOP_REG_SET(vop, common, cfg_done, 1);
- }
-
--static bool has_rb_swapped(uint32_t format)
-+static bool has_rb_swapped(uint32_t version, uint32_t format)
- {
- switch (format) {
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
-- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_BGR565:
- return true;
-+ /*
-+ * full framework (IP version 3.x) only need rb swapped for RGB888 and
-+ * little framework (IP version 2.x) only need rb swapped for BGR888,
-+ * check for 3.x to also only rb swap BGR888 for unknown vop version
-+ */
-+ case DRM_FORMAT_RGB888:
-+ return VOP_MAJOR(version) == 3;
-+ case DRM_FORMAT_BGR888:
-+ return VOP_MAJOR(version) != 3;
- default:
- return false;
- }
-@@ -1013,7 +1021,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
- VOP_WIN_SET(vop, win, dsp_info, dsp_info);
- VOP_WIN_SET(vop, win, dsp_st, dsp_st);
-
-- rb_swap = has_rb_swapped(fb->format->format);
-+ rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
- VOP_WIN_SET(vop, win, rb_swap, rb_swap);
-
- /*
-@@ -1614,7 +1622,8 @@ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
- if (WARN_ON(!crtc->state))
- return NULL;
-
-- rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
-+ rockchip_state = kmemdup(to_rockchip_crtc_state(crtc->state),
-+ sizeof(*rockchip_state), GFP_KERNEL);
- if (!rockchip_state)
- return NULL;
-
-@@ -1639,7 +1648,10 @@ static void vop_crtc_reset(struct drm_crtc *crtc)
- if (crtc->state)
- vop_crtc_destroy_state(crtc, crtc->state);
-
-- __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
-+ if (crtc_state)
-+ __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base);
-+ else
-+ __drm_atomic_helper_crtc_reset(crtc, NULL);
- }
-
- #ifdef CONFIG_DRM_ANALOGIX_DP
-diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
-index 583df4d22f7e9..c306806aa3dea 100644
---- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
-+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
-@@ -2079,30 +2079,15 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
- .atomic_disable = vop2_crtc_atomic_disable,
- };
-
--static void vop2_crtc_reset(struct drm_crtc *crtc)
--{
-- struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
--
-- if (crtc->state) {
-- __drm_atomic_helper_crtc_destroy_state(crtc->state);
-- kfree(vcstate);
-- }
--
-- vcstate = kzalloc(sizeof(*vcstate), GFP_KERNEL);
-- if (!vcstate)
-- return;
--
-- crtc->state = &vcstate->base;
-- crtc->state->crtc = crtc;
--}
--
- static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
- {
-- struct rockchip_crtc_state *vcstate, *old_vcstate;
-+ struct rockchip_crtc_state *vcstate;
-
-- old_vcstate = to_rockchip_crtc_state(crtc->state);
-+ if (WARN_ON(!crtc->state))
-+ return NULL;
-
-- vcstate = kmemdup(old_vcstate, sizeof(*old_vcstate), GFP_KERNEL);
-+ vcstate = kmemdup(to_rockchip_crtc_state(crtc->state),
-+ sizeof(*vcstate), GFP_KERNEL);
- if (!vcstate)
- return NULL;
-
-@@ -2120,6 +2105,20 @@ static void vop2_crtc_destroy_state(struct drm_crtc *crtc,
- kfree(vcstate);
- }
-
-+static void vop2_crtc_reset(struct drm_crtc *crtc)
-+{
-+ struct rockchip_crtc_state *vcstate =
-+ kzalloc(sizeof(*vcstate), GFP_KERNEL);
-+
-+ if (crtc->state)
-+ vop2_crtc_destroy_state(crtc, crtc->state);
-+
-+ if (vcstate)
-+ __drm_atomic_helper_crtc_reset(crtc, &vcstate->base);
-+ else
-+ __drm_atomic_helper_crtc_reset(crtc, NULL);
-+}
-+
- static const struct drm_crtc_funcs vop2_crtc_funcs = {
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
-diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
-index 5a80b228d18ca..78272b1f9d5b1 100644
---- a/drivers/gpu/drm/solomon/ssd130x.c
-+++ b/drivers/gpu/drm/solomon/ssd130x.c
-@@ -553,14 +553,45 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
- static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
- struct ssd130x_plane_state *ssd130x_state)
- {
-- struct drm_rect fullscreen = {
-- .x1 = 0,
-- .x2 = ssd130x->width,
-- .y1 = 0,
-- .y2 = ssd130x->height,
-- };
--
-- ssd130x_update_rect(ssd130x, ssd130x_state, &fullscreen);
-+ unsigned int page_height = ssd130x->device_info->page_height;
-+ unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
-+ u8 *data_array = ssd130x_state->data_array;
-+ unsigned int width = ssd130x->width;
-+ int ret, i;
-+
-+ if (!ssd130x->page_address_mode) {
-+ memset(data_array, 0, width * pages);
-+
-+ /* Set address range for horizontal addressing mode */
-+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset, width);
-+ if (ret < 0)
-+ return;
-+
-+ ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset, pages);
-+ if (ret < 0)
-+ return;
-+
-+ /* Write out update in one go if we aren't using page addressing mode */
-+ ssd130x_write_data(ssd130x, data_array, width * pages);
-+ } else {
-+ /*
-+ * In page addressing mode, the start address needs to be reset,
-+ * and each page then needs to be written out separately.
-+ */
-+ memset(data_array, 0, width);
-+
-+ for (i = 0; i < pages; i++) {
-+ ret = ssd130x_set_page_pos(ssd130x,
-+ ssd130x->page_offset + i,
-+ ssd130x->col_offset);
-+ if (ret < 0)
-+ return;
-+
-+ ret = ssd130x_write_data(ssd130x, data_array, width);
-+ if (ret < 0)
-+ return;
-+ }
-+ }
- }
-
- static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
-diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
-index c68c831136c9b..e8523abef27a5 100644
---- a/drivers/gpu/drm/stm/drv.c
-+++ b/drivers/gpu/drm/stm/drv.c
-@@ -114,6 +114,7 @@ static void drv_unload(struct drm_device *ddev)
- DRM_DEBUG("%s\n", __func__);
-
- drm_kms_helper_poll_fini(ddev);
-+ drm_atomic_helper_shutdown(ddev);
- ltdc_unload(ddev);
- }
-
-@@ -225,6 +226,11 @@ static void stm_drm_platform_remove(struct platform_device *pdev)
- drm_dev_put(ddev);
- }
-
-+static void stm_drm_platform_shutdown(struct platform_device *pdev)
-+{
-+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
-+}
-+
- static const struct of_device_id drv_dt_ids[] = {
- { .compatible = "st,stm32-ltdc"},
- { /* end node */ },
-@@ -234,6 +240,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids);
- static struct platform_driver stm_drm_platform_driver = {
- .probe = stm_drm_platform_probe,
- .remove_new = stm_drm_platform_remove,
-+ .shutdown = stm_drm_platform_shutdown,
- .driver = {
- .name = "stm32-display",
- .of_match_table = drv_dt_ids,
-diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
-index fe56beea3e93f..8ebd7134ee21b 100644
---- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
-+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
-@@ -175,6 +175,7 @@ static void tilcdc_fini(struct drm_device *dev)
- drm_dev_unregister(dev);
-
- drm_kms_helper_poll_fini(dev);
-+ drm_atomic_helper_shutdown(dev);
- tilcdc_irq_uninstall(dev);
- drm_mode_config_cleanup(dev);
-
-@@ -389,6 +390,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
-
- init_failed:
- tilcdc_fini(ddev);
-+ platform_set_drvdata(pdev, NULL);
-
- return ret;
- }
-@@ -537,7 +539,8 @@ static void tilcdc_unbind(struct device *dev)
- if (!ddev->dev_private)
- return;
-
-- tilcdc_fini(dev_get_drvdata(dev));
-+ tilcdc_fini(ddev);
-+ dev_set_drvdata(dev, NULL);
- }
-
- static const struct component_master_ops tilcdc_comp_ops = {
-@@ -582,6 +585,11 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
- return 0;
- }
-
-+static void tilcdc_pdev_shutdown(struct platform_device *pdev)
-+{
-+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
-+}
-+
- static const struct of_device_id tilcdc_of_match[] = {
- { .compatible = "ti,am33xx-tilcdc", },
- { .compatible = "ti,da850-tilcdc", },
-@@ -592,6 +600,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
- static struct platform_driver tilcdc_platform_driver = {
- .probe = tilcdc_pdev_probe,
- .remove = tilcdc_pdev_remove,
-+ .shutdown = tilcdc_pdev_shutdown,
- .driver = {
- .name = "tilcdc",
- .pm = pm_sleep_ptr(&tilcdc_pm_ops),
-diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
-index 0bb56d0635366..acce210e25547 100644
---- a/drivers/gpu/drm/tve200/tve200_drv.c
-+++ b/drivers/gpu/drm/tve200/tve200_drv.c
-@@ -242,6 +242,7 @@ static void tve200_remove(struct platform_device *pdev)
- struct tve200_drm_dev_private *priv = drm->dev_private;
-
- drm_dev_unregister(drm);
-+ drm_atomic_helper_shutdown(drm);
- if (priv->panel)
- drm_panel_bridge_remove(priv->bridge);
- drm_mode_config_cleanup(drm);
-@@ -249,6 +250,11 @@ static void tve200_remove(struct platform_device *pdev)
- drm_dev_put(drm);
- }
-
-+static void tve200_shutdown(struct platform_device *pdev)
-+{
-+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
-+}
-+
- static const struct of_device_id tve200_of_match[] = {
- {
- .compatible = "faraday,tve200",
-@@ -263,6 +269,7 @@ static struct platform_driver tve200_driver = {
- },
- .probe = tve200_probe,
- .remove_new = tve200_remove,
-+ .shutdown = tve200_shutdown,
- };
- drm_module_platform_driver(tve200_driver);
-
-diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
-index 4fee15c97c341..047b958123341 100644
---- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
-+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
-@@ -12,6 +12,7 @@
- #include <linux/vt_kern.h>
-
- #include <drm/drm_aperture.h>
-+#include <drm/drm_atomic_helper.h>
- #include <drm/drm_drv.h>
- #include <drm/drm_fbdev_generic.h>
- #include <drm/drm_file.h>
-@@ -97,11 +98,19 @@ static void vbox_pci_remove(struct pci_dev *pdev)
- struct vbox_private *vbox = pci_get_drvdata(pdev);
-
- drm_dev_unregister(&vbox->ddev);
-+ drm_atomic_helper_shutdown(&vbox->ddev);
- vbox_irq_fini(vbox);
- vbox_mode_fini(vbox);
- vbox_hw_fini(vbox);
- }
-
-+static void vbox_pci_shutdown(struct pci_dev *pdev)
-+{
-+ struct vbox_private *vbox = pci_get_drvdata(pdev);
-+
-+ drm_atomic_helper_shutdown(&vbox->ddev);
-+}
-+
- static int vbox_pm_suspend(struct device *dev)
- {
- struct vbox_private *vbox = dev_get_drvdata(dev);
-@@ -165,6 +174,7 @@ static struct pci_driver vbox_pci_driver = {
- .id_table = pciidlist,
- .probe = vbox_pci_probe,
- .remove = vbox_pci_remove,
-+ .shutdown = vbox_pci_shutdown,
- .driver.pm = pm_sleep_ptr(&vbox_pm_ops),
- };
-
-diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
-index 5d12d7beef0eb..ade3309ae042f 100644
---- a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
-+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
-@@ -26,7 +26,7 @@ struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
- struct vc4_crtc *vc4_crtc;
- int ret;
-
-- dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL);
-+ dummy_crtc = drmm_kzalloc(drm, sizeof(*dummy_crtc), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, dummy_crtc);
-
- vc4_crtc = &dummy_crtc->crtc;
-diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
-index 6e11fcc9ef45e..e70d7c3076acf 100644
---- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
-+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
-@@ -32,7 +32,7 @@ struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
- struct drm_encoder *enc;
- int ret;
-
-- dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL);
-+ dummy_output = drmm_kzalloc(drm, sizeof(*dummy_output), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
- dummy_output->encoder.type = vc4_encoder_type;
-
-diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
-index 3829be282ff00..17463aeeef28f 100644
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
-@@ -774,9 +774,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- sizeof(metadata->mip_levels));
- metadata->num_sizes = num_sizes;
- metadata->sizes =
-- memdup_user((struct drm_vmw_size __user *)(unsigned long)
-+ memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
- req->size_addr,
-- sizeof(*metadata->sizes) * metadata->num_sizes);
-+ metadata->num_sizes, sizeof(*metadata->sizes));
- if (IS_ERR(metadata->sizes)) {
- ret = PTR_ERR(metadata->sizes);
- goto out_no_sizes;
-diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
-index a3f336edd991b..955c971c528d4 100644
---- a/drivers/gpu/host1x/context.c
-+++ b/drivers/gpu/host1x/context.c
-@@ -34,10 +34,10 @@ int host1x_memory_context_list_init(struct host1x *host1x)
- if (err < 0)
- return 0;
-
-- cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
-+ cdl->len = err / 4;
-+ cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
- if (!cdl->devs)
- return -ENOMEM;
-- cdl->len = err / 4;
-
- for (i = 0; i < cdl->len; i++) {
- ctx = &cdl->devs[i];
-diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index 8992e3c1e7698..e0181218ad857 100644
---- a/drivers/hid/hid-core.c
-+++ b/drivers/hid/hid-core.c
-@@ -702,15 +702,22 @@ static void hid_close_report(struct hid_device *device)
- * Free a device structure, all reports, and all fields.
- */
-
--static void hid_device_release(struct device *dev)
-+void hiddev_free(struct kref *ref)
- {
-- struct hid_device *hid = to_hid_device(dev);
-+ struct hid_device *hid = container_of(ref, struct hid_device, ref);
-
- hid_close_report(hid);
- kfree(hid->dev_rdesc);
- kfree(hid);
- }
-
-+static void hid_device_release(struct device *dev)
-+{
-+ struct hid_device *hid = to_hid_device(dev);
-+
-+ kref_put(&hid->ref, hiddev_free);
-+}
-+
- /*
- * Fetch a report description item from the data stream. We support long
- * items, though they are not used yet.
-@@ -2846,6 +2853,7 @@ struct hid_device *hid_allocate_device(void)
- spin_lock_init(&hdev->debug_list_lock);
- sema_init(&hdev->driver_input_lock, 1);
- mutex_init(&hdev->ll_open_lock);
-+ kref_init(&hdev->ref);
-
- hid_bpf_device_init(hdev);
-
-diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
-index 54c33a24f8442..20a0d1315d90f 100644
---- a/drivers/hid/hid-cp2112.c
-+++ b/drivers/hid/hid-cp2112.c
-@@ -1151,8 +1151,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct cp2112_device *dev = gpiochip_get_data(gc);
-
-- INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
--
- if (!dev->gpio_poll) {
- dev->gpio_poll = true;
- schedule_delayed_work(&dev->gpio_poll_worker, 0);
-@@ -1168,7 +1166,11 @@ static void cp2112_gpio_irq_shutdown(struct irq_data *d)
- struct cp2112_device *dev = gpiochip_get_data(gc);
-
- cp2112_gpio_irq_mask(d);
-- cancel_delayed_work_sync(&dev->gpio_poll_worker);
-+
-+ if (!dev->irq_mask) {
-+ dev->gpio_poll = false;
-+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
-+ }
- }
-
- static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
-@@ -1307,6 +1309,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
- girq->handler = handle_simple_irq;
- girq->threaded = true;
-
-+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
-+
- ret = gpiochip_add_data(&dev->gc, dev);
- if (ret < 0) {
- hid_err(hdev, "error registering gpio chip\n");
-diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
-index e7ef1ea107c9e..7dd83ec74f8a9 100644
---- a/drivers/hid/hid-debug.c
-+++ b/drivers/hid/hid-debug.c
-@@ -1135,6 +1135,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
- goto out;
- }
- list->hdev = (struct hid_device *) inode->i_private;
-+ kref_get(&list->hdev->ref);
- file->private_data = list;
- mutex_init(&list->read_mutex);
-
-@@ -1227,6 +1228,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
- list_del(&list->node);
- spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
- kfifo_free(&list->hid_debug_fifo);
-+
-+ kref_put(&list->hdev->ref, hiddev_free);
- kfree(list);
-
- return 0;
-diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
-index e4d2dfd5d2536..d10ccfa17e168 100644
---- a/drivers/hid/hid-ids.h
-+++ b/drivers/hid/hid-ids.h
-@@ -366,6 +366,7 @@
-
- #define USB_VENDOR_ID_DELL 0x413c
- #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
-+#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W 0x4503
-
- #define USB_VENDOR_ID_DELORME 0x1163
- #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
-@@ -868,7 +869,6 @@
- #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
- #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
- #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
--#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc547
- #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
- #define USB_DEVICE_ID_SPACETRAVELLER 0xc623
- #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
-diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
-index 44763c0da4441..7c1b33be9d134 100644
---- a/drivers/hid/hid-lenovo.c
-+++ b/drivers/hid/hid-lenovo.c
-@@ -51,7 +51,12 @@ struct lenovo_drvdata {
- int select_right;
- int sensitivity;
- int press_speed;
-- u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
-+ /* 0: Up
-+ * 1: Down (undecided)
-+ * 2: Scrolling
-+ * 3: Patched firmware, disable workaround
-+ */
-+ u8 middlebutton_state;
- bool fn_lock;
- };
-
-@@ -521,6 +526,19 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
- int ret;
- struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
-
-+ /*
-+ * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
-+ * regular keys
-+ */
-+ ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
-+ if (ret)
-+ hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
-+
-+ /* Switch middle button to native mode */
-+ ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
-+ if (ret)
-+ hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
-+
- ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
- if (ret)
- hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
-@@ -668,31 +686,48 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
- {
- struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
-
-- /* "wheel" scroll events */
-- if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
-- usage->code == REL_HWHEEL)) {
-- /* Scroll events disable middle-click event */
-- cptkbd_data->middlebutton_state = 2;
-- return 0;
-- }
-+ if (cptkbd_data->middlebutton_state != 3) {
-+ /* REL_X and REL_Y events during middle button pressed
-+ * are only possible on patched, bug-free firmware
-+ * so set middlebutton_state to 3
-+ * to never apply workaround anymore
-+ */
-+ if (cptkbd_data->middlebutton_state == 1 &&
-+ usage->type == EV_REL &&
-+ (usage->code == REL_X || usage->code == REL_Y)) {
-+ cptkbd_data->middlebutton_state = 3;
-+ /* send middle button press which was hold before */
-+ input_event(field->hidinput->input,
-+ EV_KEY, BTN_MIDDLE, 1);
-+ input_sync(field->hidinput->input);
-+ }
-+
-+ /* "wheel" scroll events */
-+ if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
-+ usage->code == REL_HWHEEL)) {
-+ /* Scroll events disable middle-click event */
-+ cptkbd_data->middlebutton_state = 2;
-+ return 0;
-+ }
-
-- /* Middle click events */
-- if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
-- if (value == 1) {
-- cptkbd_data->middlebutton_state = 1;
-- } else if (value == 0) {
-- if (cptkbd_data->middlebutton_state == 1) {
-- /* No scrolling inbetween, send middle-click */
-- input_event(field->hidinput->input,
-- EV_KEY, BTN_MIDDLE, 1);
-- input_sync(field->hidinput->input);
-- input_event(field->hidinput->input,
-- EV_KEY, BTN_MIDDLE, 0);
-- input_sync(field->hidinput->input);
-+ /* Middle click events */
-+ if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
-+ if (value == 1) {
-+ cptkbd_data->middlebutton_state = 1;
-+ } else if (value == 0) {
-+ if (cptkbd_data->middlebutton_state == 1) {
-+ /* No scrolling inbetween, send middle-click */
-+ input_event(field->hidinput->input,
-+ EV_KEY, BTN_MIDDLE, 1);
-+ input_sync(field->hidinput->input);
-+ input_event(field->hidinput->input,
-+ EV_KEY, BTN_MIDDLE, 0);
-+ input_sync(field->hidinput->input);
-+ }
-+ cptkbd_data->middlebutton_state = 0;
- }
-- cptkbd_data->middlebutton_state = 0;
-+ return 1;
- }
-- return 1;
- }
-
- if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) {
-@@ -1126,22 +1161,6 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
- }
- hid_set_drvdata(hdev, cptkbd_data);
-
-- /*
-- * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
-- * regular keys (Compact only)
-- */
-- if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
-- hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
-- ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
-- if (ret)
-- hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
-- }
--
-- /* Switch middle button to native mode */
-- ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
-- if (ret)
-- hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
--
- /* Set keyboard settings to known state */
- cptkbd_data->middlebutton_state = 0;
- cptkbd_data->fn_lock = true;
-@@ -1264,6 +1283,24 @@ err:
- return ret;
- }
-
-+#ifdef CONFIG_PM
-+static int lenovo_reset_resume(struct hid_device *hdev)
-+{
-+ switch (hdev->product) {
-+ case USB_DEVICE_ID_LENOVO_CUSBKBD:
-+ case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
-+ if (hdev->type == HID_TYPE_USBMOUSE)
-+ lenovo_features_set_cptkbd(hdev);
-+
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return 0;
-+}
-+#endif
-+
- static void lenovo_remove_tpkbd(struct hid_device *hdev)
- {
- struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
-@@ -1380,6 +1417,9 @@ static struct hid_driver lenovo_driver = {
- .raw_event = lenovo_raw_event,
- .event = lenovo_event,
- .report_fixup = lenovo_report_fixup,
-+#ifdef CONFIG_PM
-+ .reset_resume = lenovo_reset_resume,
-+#endif
- };
- module_hid_driver(lenovo_driver);
-
-diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
-index 8afe3be683ba2..e6a8b6d8eab70 100644
---- a/drivers/hid/hid-logitech-dj.c
-+++ b/drivers/hid/hid-logitech-dj.c
-@@ -1695,12 +1695,11 @@ static int logi_dj_raw_event(struct hid_device *hdev,
- }
- /*
- * Mouse-only receivers send unnumbered mouse data. The 27 MHz
-- * receiver uses 6 byte packets, the nano receiver 8 bytes,
-- * the lightspeed receiver (Pro X Superlight) 13 bytes.
-+ * receiver uses 6 byte packets, the nano receiver 8 bytes.
- */
- if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
-- size <= 13){
-- u8 mouse_report[14];
-+ size <= 8) {
-+ u8 mouse_report[9];
-
- /* Prepend report id */
- mouse_report[0] = REPORT_TYPE_MOUSE;
-@@ -1984,10 +1983,6 @@ static const struct hid_device_id logi_dj_receivers[] = {
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
- .driver_data = recvr_type_gaming_hidpp},
-- { /* Logitech lightspeed receiver (0xc547) */
-- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
-- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
-- .driver_data = recvr_type_gaming_hidpp},
-
- { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
-diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
-index a209d51bd2476..7bf12ca0eb4a9 100644
---- a/drivers/hid/hid-logitech-hidpp.c
-+++ b/drivers/hid/hid-logitech-hidpp.c
-@@ -1835,15 +1835,14 @@ static int hidpp_battery_get_property(struct power_supply *psy,
- /* -------------------------------------------------------------------------- */
- #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b
-
--static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
-+static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index)
- {
- u8 feature_type;
- int ret;
-
- ret = hidpp_root_get_feature(hidpp,
- HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
-- &hidpp->wireless_feature_index,
-- &feature_type);
-+ feature_index, &feature_type);
-
- return ret;
- }
-@@ -4249,6 +4248,13 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
- }
- }
-
-+ if (hidpp->protocol_major >= 2) {
-+ u8 feature_index;
-+
-+ if (!hidpp_get_wireless_feature_index(hidpp, &feature_index))
-+ hidpp->wireless_feature_index = feature_index;
-+ }
-+
- if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
- name = hidpp_get_device_name(hidpp);
- if (name) {
-@@ -4394,7 +4400,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
- bool connected;
- unsigned int connect_mask = HID_CONNECT_DEFAULT;
- struct hidpp_ff_private_data data;
-- bool will_restart = false;
-
- /* report_fixup needs drvdata to be set before we call hid_parse */
- hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
-@@ -4445,10 +4450,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
- return ret;
- }
-
-- if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
-- hidpp->quirks & HIDPP_QUIRK_UNIFYING)
-- will_restart = true;
--
- INIT_WORK(&hidpp->work, delayed_work_cb);
- mutex_init(&hidpp->send_mutex);
- init_waitqueue_head(&hidpp->wait);
-@@ -4460,10 +4461,12 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
- hdev->name);
-
- /*
-- * Plain USB connections need to actually call start and open
-- * on the transport driver to allow incoming data.
-+ * First call hid_hw_start(hdev, 0) to allow IO without connecting any
-+ * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's
-+ * name and serial number and store these in hdev->name and hdev->uniq,
-+ * before the hid-input and hidraw drivers expose these to userspace.
- */
-- ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
-+ ret = hid_hw_start(hdev, 0);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- goto hid_hw_start_fail;
-@@ -4496,15 +4499,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
- hidpp_overwrite_name(hdev);
- }
-
-- if (connected && hidpp->protocol_major >= 2) {
-- ret = hidpp_set_wireless_feature_index(hidpp);
-- if (ret == -ENOENT)
-- hidpp->wireless_feature_index = 0;
-- else if (ret)
-- goto hid_hw_init_fail;
-- ret = 0;
-- }
--
- if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
- ret = wtp_get_config(hidpp);
- if (ret)
-@@ -4518,21 +4512,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
- schedule_work(&hidpp->work);
- flush_work(&hidpp->work);
-
-- if (will_restart) {
-- /* Reset the HID node state */
-- hid_device_io_stop(hdev);
-- hid_hw_close(hdev);
-- hid_hw_stop(hdev);
--
-- if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
-- connect_mask &= ~HID_CONNECT_HIDINPUT;
-+ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
-+ connect_mask &= ~HID_CONNECT_HIDINPUT;
-
-- /* Now export the actual inputs and hidraw nodes to the world */
-- ret = hid_hw_start(hdev, connect_mask);
-- if (ret) {
-- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
-- goto hid_hw_start_fail;
-- }
-+ /* Now export the actual inputs and hidraw nodes to the world */
-+ ret = hid_connect(hdev, connect_mask);
-+ if (ret) {
-+ hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret);
-+ goto hid_hw_init_fail;
- }
-
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
-@@ -4543,6 +4530,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
- ret);
- }
-
-+ /*
-+ * This relies on logi_dj_ll_close() being a no-op so that DJ connection
-+ * events will still be received.
-+ */
-+ hid_hw_close(hdev);
- return ret;
-
- hid_hw_init_fail:
-diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
-index 3983b4f282f8f..5a48fcaa32f00 100644
---- a/drivers/hid/hid-quirks.c
-+++ b/drivers/hid/hid-quirks.c
-@@ -66,6 +66,7 @@ static const struct hid_device_id hid_quirks[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
-+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
- { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
-diff --git a/drivers/hid/hid-uclogic-core-test.c b/drivers/hid/hid-uclogic-core-test.c
-index 2bb916226a389..cb274cde3ad23 100644
---- a/drivers/hid/hid-uclogic-core-test.c
-+++ b/drivers/hid/hid-uclogic-core-test.c
-@@ -56,6 +56,11 @@ static struct uclogic_raw_event_hook_test test_events[] = {
- },
- };
-
-+static void fake_work(struct work_struct *work)
-+{
-+
-+}
-+
- static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
- {
- struct uclogic_params p = {0, };
-@@ -77,6 +82,8 @@ static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filter->event);
- memcpy(filter->event, &hook_events[n].event[0], filter->size);
-
-+ INIT_WORK(&filter->work, fake_work);
-+
- list_add_tail(&filter->list, &p.event_hooks->list);
- }
-
-diff --git a/drivers/hid/hid-uclogic-params-test.c b/drivers/hid/hid-uclogic-params-test.c
-index 678f50cbb160b..a30121419a292 100644
---- a/drivers/hid/hid-uclogic-params-test.c
-+++ b/drivers/hid/hid-uclogic-params-test.c
-@@ -174,12 +174,26 @@ static void hid_test_uclogic_parse_ugee_v2_desc(struct kunit *test)
- KUNIT_EXPECT_EQ(test, params->frame_type, frame_type);
- }
-
-+struct fake_device {
-+ unsigned long quirks;
-+};
-+
- static void hid_test_uclogic_params_cleanup_event_hooks(struct kunit *test)
- {
- int res, n;
-+ struct hid_device *hdev;
-+ struct fake_device *fake_dev;
- struct uclogic_params p = {0, };
-
-- res = uclogic_params_ugee_v2_init_event_hooks(NULL, &p);
-+ hdev = kunit_kzalloc(test, sizeof(struct hid_device), GFP_KERNEL);
-+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hdev);
-+
-+ fake_dev = kunit_kzalloc(test, sizeof(struct fake_device), GFP_KERNEL);
-+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fake_dev);
-+
-+ hid_set_drvdata(hdev, fake_dev);
-+
-+ res = uclogic_params_ugee_v2_init_event_hooks(hdev, &p);
- KUNIT_ASSERT_EQ(test, res, 0);
-
- /* Check that the function can be called repeatedly */
-diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
-index ba37a5efbf820..ab2edff018eb6 100644
---- a/drivers/hte/hte-tegra194-test.c
-+++ b/drivers/hte/hte-tegra194-test.c
-@@ -153,8 +153,10 @@ static int tegra_hte_test_probe(struct platform_device *pdev)
- }
-
- cnt = of_hte_req_count(hte.pdev);
-- if (cnt < 0)
-+ if (cnt < 0) {
-+ ret = cnt;
- goto free_irq;
-+ }
-
- dev_info(&pdev->dev, "Total requested lines:%d\n", cnt);
-
-diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
-index 5fd136baf1cd3..19b9bf3d75ef9 100644
---- a/drivers/hwmon/axi-fan-control.c
-+++ b/drivers/hwmon/axi-fan-control.c
-@@ -496,6 +496,21 @@ static int axi_fan_control_probe(struct platform_device *pdev)
- return -ENODEV;
- }
-
-+ ret = axi_fan_control_init(ctl, pdev->dev.of_node);
-+ if (ret) {
-+ dev_err(&pdev->dev, "Failed to initialize device\n");
-+ return ret;
-+ }
-+
-+ ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
-+ name,
-+ ctl,
-+ &axi_chip_info,
-+ axi_fan_control_groups);
-+
-+ if (IS_ERR(ctl->hdev))
-+ return PTR_ERR(ctl->hdev);
-+
- ctl->irq = platform_get_irq(pdev, 0);
- if (ctl->irq < 0)
- return ctl->irq;
-@@ -509,19 +524,7 @@ static int axi_fan_control_probe(struct platform_device *pdev)
- return ret;
- }
-
-- ret = axi_fan_control_init(ctl, pdev->dev.of_node);
-- if (ret) {
-- dev_err(&pdev->dev, "Failed to initialize device\n");
-- return ret;
-- }
--
-- ctl->hdev = devm_hwmon_device_register_with_info(&pdev->dev,
-- name,
-- ctl,
-- &axi_chip_info,
-- axi_fan_control_groups);
--
-- return PTR_ERR_OR_ZERO(ctl->hdev);
-+ return 0;
- }
-
- static struct platform_driver axi_fan_control_driver = {
-diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
-index eba94f68585a8..ba82d1e79c131 100644
---- a/drivers/hwmon/coretemp.c
-+++ b/drivers/hwmon/coretemp.c
-@@ -42,7 +42,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
- #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
- #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
- #define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
--#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
-+#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
- #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
- #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
- #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
-index b5b81bd83bb15..d928eb8ae5a37 100644
---- a/drivers/hwmon/nct6775-core.c
-+++ b/drivers/hwmon/nct6775-core.c
-@@ -1614,17 +1614,21 @@ struct nct6775_data *nct6775_update_device(struct device *dev)
- data->fan_div[i]);
-
- if (data->has_fan_min & BIT(i)) {
-- err = nct6775_read_value(data, data->REG_FAN_MIN[i], &reg);
-+ u16 tmp;
-+
-+ err = nct6775_read_value(data, data->REG_FAN_MIN[i], &tmp);
- if (err)
- goto out;
-- data->fan_min[i] = reg;
-+ data->fan_min[i] = tmp;
- }
-
- if (data->REG_FAN_PULSES[i]) {
-- err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &reg);
-+ u16 tmp;
-+
-+ err = nct6775_read_value(data, data->REG_FAN_PULSES[i], &tmp);
- if (err)
- goto out;
-- data->fan_pulses[i] = (reg >> data->FAN_PULSE_SHIFT[i]) & 0x03;
-+ data->fan_pulses[i] = (tmp >> data->FAN_PULSE_SHIFT[i]) & 0x03;
- }
-
- err = nct6775_select_fan_div(dev, data, i, reg);
-diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
-index 26ba506331007..b9bb469e2d8fe 100644
---- a/drivers/hwmon/pmbus/mp2975.c
-+++ b/drivers/hwmon/pmbus/mp2975.c
-@@ -297,6 +297,11 @@ static int mp2973_read_word_data(struct i2c_client *client, int page,
- int ret;
-
- switch (reg) {
-+ case PMBUS_STATUS_WORD:
-+ /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */
-+ ret = pmbus_read_word_data(client, page, phase, reg);
-+ ret ^= PB_STATUS_POWER_GOOD_N;
-+ break;
- case PMBUS_OT_FAULT_LIMIT:
- ret = mp2975_read_word_helper(client, page, phase, reg,
- GENMASK(7, 0));
-@@ -380,11 +385,6 @@ static int mp2975_read_word_data(struct i2c_client *client, int page,
- int ret;
-
- switch (reg) {
-- case PMBUS_STATUS_WORD:
-- /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */
-- ret = pmbus_read_word_data(client, page, phase, reg);
-- ret ^= PB_STATUS_POWER_GOOD_N;
-- break;
- case PMBUS_OT_FAULT_LIMIT:
- ret = mp2975_read_word_helper(client, page, phase, reg,
- GENMASK(7, 0));
-diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
-index 1bbda3b05532e..bf408e35e2c32 100644
---- a/drivers/hwmon/sch5627.c
-+++ b/drivers/hwmon/sch5627.c
-@@ -6,6 +6,7 @@
-
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-+#include <linux/bits.h>
- #include <linux/module.h>
- #include <linux/mod_devicetable.h>
- #include <linux/init.h>
-@@ -32,6 +33,10 @@
- #define SCH5627_REG_PRIMARY_ID 0x3f
- #define SCH5627_REG_CTRL 0x40
-
-+#define SCH5627_CTRL_START BIT(0)
-+#define SCH5627_CTRL_LOCK BIT(1)
-+#define SCH5627_CTRL_VBAT BIT(4)
-+
- #define SCH5627_NO_TEMPS 8
- #define SCH5627_NO_FANS 4
- #define SCH5627_NO_IN 5
-@@ -147,7 +152,8 @@ static int sch5627_update_in(struct sch5627_data *data)
-
- /* Trigger a Vbat voltage measurement every 5 minutes */
- if (time_after(jiffies, data->last_battery + 300 * HZ)) {
-- sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | 0x10);
-+ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
-+ data->control | SCH5627_CTRL_VBAT);
- data->last_battery = jiffies;
- }
-
-@@ -226,6 +232,14 @@ static int reg_to_rpm(u16 reg)
- static umode_t sch5627_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
- int channel)
- {
-+ const struct sch5627_data *data = drvdata;
-+
-+ /* Once the lock bit is set, the virtual registers become read-only
-+ * until the next power cycle.
-+ */
-+ if (data->control & SCH5627_CTRL_LOCK)
-+ return 0444;
-+
- if (type == hwmon_pwm && attr == hwmon_pwm_auto_channels_temp)
- return 0644;
-
-@@ -483,14 +497,13 @@ static int sch5627_probe(struct platform_device *pdev)
- return val;
-
- data->control = val;
-- if (!(data->control & 0x01)) {
-+ if (!(data->control & SCH5627_CTRL_START)) {
- pr_err("hardware monitoring not enabled\n");
- return -ENODEV;
- }
- /* Trigger a Vbat voltage measurement, so that we get a valid reading
- the first time we read Vbat */
-- sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
-- data->control | 0x10);
-+ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL, data->control | SCH5627_CTRL_VBAT);
- data->last_battery = jiffies;
-
- /*
-diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
-index de3a0886c2f72..ac1f725807155 100644
---- a/drivers/hwmon/sch56xx-common.c
-+++ b/drivers/hwmon/sch56xx-common.c
-@@ -7,10 +7,8 @@
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
- #include <linux/module.h>
--#include <linux/mod_devicetable.h>
- #include <linux/init.h>
- #include <linux/platform_device.h>
--#include <linux/dmi.h>
- #include <linux/err.h>
- #include <linux/io.h>
- #include <linux/acpi.h>
-@@ -21,10 +19,7 @@
- #include <linux/slab.h>
- #include "sch56xx-common.h"
-
--static bool ignore_dmi;
--module_param(ignore_dmi, bool, 0);
--MODULE_PARM_DESC(ignore_dmi, "Omit DMI check for supported devices (default=0)");
--
-+/* Insmod parameters */
- static bool nowayout = WATCHDOG_NOWAYOUT;
- module_param(nowayout, bool, 0);
- MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
-@@ -523,66 +518,11 @@ static int __init sch56xx_device_add(int address, const char *name)
- return PTR_ERR_OR_ZERO(sch56xx_pdev);
- }
-
--static const struct dmi_system_id sch56xx_dmi_override_table[] __initconst = {
-- {
-- .matches = {
-- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
-- DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS W380"),
-- },
-- },
-- {
-- .matches = {
-- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
-- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO P710"),
-- },
-- },
-- {
-- .matches = {
-- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
-- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO E9900"),
-- },
-- },
-- { }
--};
--
--/* For autoloading only */
--static const struct dmi_system_id sch56xx_dmi_table[] __initconst = {
-- {
-- .matches = {
-- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
-- },
-- },
-- { }
--};
--MODULE_DEVICE_TABLE(dmi, sch56xx_dmi_table);
--
- static int __init sch56xx_init(void)
- {
-- const char *name = NULL;
- int address;
-+ const char *name = NULL;
-
-- if (!ignore_dmi) {
-- if (!dmi_check_system(sch56xx_dmi_table))
-- return -ENODEV;
--
-- if (!dmi_check_system(sch56xx_dmi_override_table)) {
-- /*
-- * Some machines like the Esprimo P720 and Esprimo C700 have
-- * onboard devices named " Antiope"/" Theseus" instead of
-- * "Antiope"/"Theseus", so we need to check for both.
-- */
-- if (!dmi_find_device(DMI_DEV_TYPE_OTHER, "Antiope", NULL) &&
-- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Antiope", NULL) &&
-- !dmi_find_device(DMI_DEV_TYPE_OTHER, "Theseus", NULL) &&
-- !dmi_find_device(DMI_DEV_TYPE_OTHER, " Theseus", NULL))
-- return -ENODEV;
-- }
-- }
--
-- /*
-- * Some devices like the Esprimo C700 have both onboard devices,
-- * so we still have to check manually
-- */
- address = sch56xx_find(0x4e, &name);
- if (address < 0)
- address = sch56xx_find(0x2e, &name);
-diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
-index 6644eebedaf3b..97d27e01a6ee2 100644
---- a/drivers/i2c/busses/Kconfig
-+++ b/drivers/i2c/busses/Kconfig
-@@ -158,6 +158,7 @@ config I2C_I801
- Alder Lake (PCH)
- Raptor Lake (PCH)
- Meteor Lake (SOC and PCH)
-+ Birch Stream (SOC)
-
- This driver can also be built as a module. If so, the module
- will be called i2c-i801.
-diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
-index 51aab662050b1..e905734c26a04 100644
---- a/drivers/i2c/busses/i2c-bcm-iproc.c
-+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
-@@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init(
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
- }
-
--static void bcm_iproc_i2c_check_slave_status(
-- struct bcm_iproc_i2c_dev *iproc_i2c)
-+static bool bcm_iproc_i2c_check_slave_status
-+ (struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
- {
- u32 val;
-+ bool recover = false;
-
-- val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
-- /* status is valid only when START_BUSY is cleared after it was set */
-- if (val & BIT(S_CMD_START_BUSY_SHIFT))
-- return;
-+ /* check slave transmit status only if slave is transmitting */
-+ if (!iproc_i2c->slave_rx_only) {
-+ val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
-+ /* status is valid only when START_BUSY is cleared */
-+ if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) {
-+ val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
-+ if (val == S_CMD_STATUS_TIMEOUT ||
-+ val == S_CMD_STATUS_MASTER_ABORT) {
-+ dev_warn(iproc_i2c->device,
-+ (val == S_CMD_STATUS_TIMEOUT) ?
-+ "slave random stretch time timeout\n" :
-+ "Master aborted read transaction\n");
-+ recover = true;
-+ }
-+ }
-+ }
-+
-+ /* RX_EVENT is not valid when START_BUSY is set */
-+ if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
-+ (status & BIT(IS_S_START_BUSY_SHIFT))) {
-+ dev_warn(iproc_i2c->device, "Slave aborted read transaction\n");
-+ recover = true;
-+ }
-
-- val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
-- if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
-- dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
-- "slave random stretch time timeout\n" :
-- "Master aborted read transaction\n");
-+ if (recover) {
- /* re-initialize i2c for recovery */
- bcm_iproc_i2c_enable_disable(iproc_i2c, false);
- bcm_iproc_i2c_slave_init(iproc_i2c, true);
- bcm_iproc_i2c_enable_disable(iproc_i2c, true);
- }
-+
-+ return recover;
- }
-
- static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
-@@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
- u32 val;
- u8 value;
-
-- /*
-- * Slave events in case of master-write, master-write-read and,
-- * master-read
-- *
-- * Master-write : only IS_S_RX_EVENT_SHIFT event
-- * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
-- * events
-- * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
-- * events or only IS_S_RD_EVENT_SHIFT
-- *
-- * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
-- * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
-- * full. This can happen if Master issues write requests of more than
-- * 64 bytes.
-- */
-- if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
-- status & BIT(IS_S_RD_EVENT_SHIFT) ||
-- status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
-- /* disable slave interrupts */
-- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
-- val &= ~iproc_i2c->slave_int_mask;
-- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
--
-- if (status & BIT(IS_S_RD_EVENT_SHIFT))
-- /* Master-write-read request */
-- iproc_i2c->slave_rx_only = false;
-- else
-- /* Master-write request only */
-- iproc_i2c->slave_rx_only = true;
--
-- /* schedule tasklet to read data later */
-- tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
--
-- /*
-- * clear only IS_S_RX_EVENT_SHIFT and
-- * IS_S_RX_FIFO_FULL_SHIFT interrupt.
-- */
-- val = BIT(IS_S_RX_EVENT_SHIFT);
-- if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
-- val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
-- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
-- }
-
- if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
- iproc_i2c->tx_underrun++;
-@@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
- * less than PKT_LENGTH bytes were output on the SMBUS
- */
- iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
-- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
-- iproc_i2c->slave_int_mask);
-+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
-+ val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
-+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
-
- /* End of SMBUS for Master Read */
- val = BIT(S_TX_WR_STATUS_SHIFT);
-@@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
- BIT(IS_S_START_BUSY_SHIFT));
- }
-
-- /* check slave transmit status only if slave is transmitting */
-- if (!iproc_i2c->slave_rx_only)
-- bcm_iproc_i2c_check_slave_status(iproc_i2c);
-+ /* if the controller has been reset, immediately return from the ISR */
-+ if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status))
-+ return true;
-+
-+ /*
-+ * Slave events in case of master-write, master-write-read and,
-+ * master-read
-+ *
-+ * Master-write : only IS_S_RX_EVENT_SHIFT event
-+ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
-+ * events
-+ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
-+ * events or only IS_S_RD_EVENT_SHIFT
-+ *
-+ * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
-+ * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
-+ * full. This can happen if Master issues write requests of more than
-+ * 64 bytes.
-+ */
-+ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
-+ status & BIT(IS_S_RD_EVENT_SHIFT) ||
-+ status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
-+ /* disable slave interrupts */
-+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
-+ val &= ~iproc_i2c->slave_int_mask;
-+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
-+
-+ if (status & BIT(IS_S_RD_EVENT_SHIFT))
-+ /* Master-write-read request */
-+ iproc_i2c->slave_rx_only = false;
-+ else
-+ /* Master-write request only */
-+ iproc_i2c->slave_rx_only = true;
-+
-+ /* schedule tasklet to read data later */
-+ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
-+
-+ /* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */
-+ if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
-+ val = BIT(IS_S_RX_FIFO_FULL_SHIFT);
-+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
-+ }
-+ }
-
- return true;
- }
-diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
-index ca1035e010c72..85dbd0eb5392c 100644
---- a/drivers/i2c/busses/i2c-designware-master.c
-+++ b/drivers/i2c/busses/i2c-designware-master.c
-@@ -519,10 +519,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
-
- /*
- * Because we don't know the buffer length in the
-- * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
-- * the transaction here.
-+ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
-+ * transaction here. Also disable the TX_EMPTY IRQ
-+ * while waiting for the data length byte to avoid the
-+ * bogus interrupts flood.
- */
-- if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
-+ if (flags & I2C_M_RECV_LEN) {
-+ dev->status |= STATUS_WRITE_IN_PROGRESS;
-+ intr_mask &= ~DW_IC_INTR_TX_EMPTY;
-+ break;
-+ } else if (buf_len > 0) {
- /* more bytes to be written */
- dev->status |= STATUS_WRITE_IN_PROGRESS;
- break;
-@@ -558,6 +564,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
- msgs[dev->msg_read_idx].len = len;
- msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
-
-+ /*
-+ * Received buffer length, re-enable TX_EMPTY interrupt
-+ * to resume the SMBUS transaction.
-+ */
-+ regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
-+ DW_IC_INTR_TX_EMPTY);
-+
- return len;
- }
-
-diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
-index 1d855258a45dc..a87e3c15e5fc6 100644
---- a/drivers/i2c/busses/i2c-i801.c
-+++ b/drivers/i2c/busses/i2c-i801.c
-@@ -79,6 +79,7 @@
- * Meteor Lake-P (SOC) 0x7e22 32 hard yes yes yes
- * Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes
- * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
-+ * Birch Stream (SOC) 0x5796 32 hard yes yes yes
- *
- * Features supported by this driver:
- * Software PEC no
-@@ -231,6 +232,7 @@
- #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
- #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
- #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
-+#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
- #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
- #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
- #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
-@@ -679,15 +681,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
- return result ? priv->status : -ETIMEDOUT;
- }
-
-- for (i = 1; i <= len; i++) {
-- if (i == len && read_write == I2C_SMBUS_READ)
-- smbcmd |= SMBHSTCNT_LAST_BYTE;
-- outb_p(smbcmd, SMBHSTCNT(priv));
--
-- if (i == 1)
-- outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
-- SMBHSTCNT(priv));
-+ if (len == 1 && read_write == I2C_SMBUS_READ)
-+ smbcmd |= SMBHSTCNT_LAST_BYTE;
-+ outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
-
-+ for (i = 1; i <= len; i++) {
- status = i801_wait_byte_done(priv);
- if (status)
- return status;
-@@ -710,9 +708,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
- data->block[0] = len;
- }
-
-- /* Retrieve/store value in SMBBLKDAT */
-- if (read_write == I2C_SMBUS_READ)
-+ if (read_write == I2C_SMBUS_READ) {
- data->block[i] = inb_p(SMBBLKDAT(priv));
-+ if (i == len - 1)
-+ outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
-+ }
-+
- if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
- outb_p(data->block[i+1], SMBBLKDAT(priv));
-
-@@ -1044,6 +1045,7 @@ static const struct pci_device_id i801_ids[] = {
- { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
-+ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { 0, }
- };
-
-diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
-index 29be05af826b0..3bd406470940f 100644
---- a/drivers/i2c/busses/i2c-pxa.c
-+++ b/drivers/i2c/busses/i2c-pxa.c
-@@ -264,6 +264,9 @@ struct pxa_i2c {
- u32 hs_mask;
-
- struct i2c_bus_recovery_info recovery;
-+ struct pinctrl *pinctrl;
-+ struct pinctrl_state *pinctrl_default;
-+ struct pinctrl_state *pinctrl_recovery;
- };
-
- #define _IBMR(i2c) ((i2c)->reg_ibmr)
-@@ -1300,12 +1303,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap)
- */
- gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS);
- gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS);
-+
-+ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery));
- }
-
- static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
- {
- struct pxa_i2c *i2c = adap->algo_data;
-- struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- u32 isr;
-
- /*
-@@ -1319,7 +1323,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
- i2c_pxa_do_reset(i2c);
- }
-
-- WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default));
-+ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default));
-
- dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n",
- readl(_IBMR(i2c)), readl(_ISR(i2c)));
-@@ -1341,20 +1345,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c)
- if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
- return 0;
-
-- bri->pinctrl = devm_pinctrl_get(dev);
-- if (PTR_ERR(bri->pinctrl) == -ENODEV) {
-- bri->pinctrl = NULL;
-+ i2c->pinctrl = devm_pinctrl_get(dev);
-+ if (PTR_ERR(i2c->pinctrl) == -ENODEV)
-+ i2c->pinctrl = NULL;
-+ if (IS_ERR(i2c->pinctrl))
-+ return PTR_ERR(i2c->pinctrl);
-+
-+ if (!i2c->pinctrl)
-+ return 0;
-+
-+ i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl,
-+ PINCTRL_STATE_DEFAULT);
-+ i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery");
-+
-+ if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) {
-+ dev_info(dev, "missing pinmux recovery information: %ld %ld\n",
-+ PTR_ERR(i2c->pinctrl_default),
-+ PTR_ERR(i2c->pinctrl_recovery));
-+ return 0;
-+ }
-+
-+ /*
-+ * Claiming GPIOs can influence the pinmux state, and may glitch the
-+ * I2C bus. Do this carefully.
-+ */
-+ bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
-+ if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER))
-+ return -EPROBE_DEFER;
-+ if (IS_ERR(bri->scl_gpiod)) {
-+ dev_info(dev, "missing scl gpio recovery information: %pe\n",
-+ bri->scl_gpiod);
-+ return 0;
-+ }
-+
-+ /*
-+ * We have SCL. Pull SCL low and wait a bit so that SDA glitches
-+ * have no effect.
-+ */
-+ gpiod_direction_output(bri->scl_gpiod, 0);
-+ udelay(10);
-+ bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN);
-+
-+ /* Wait a bit in case of a SDA glitch, and then release SCL. */
-+ udelay(10);
-+ gpiod_direction_output(bri->scl_gpiod, 1);
-+
-+ if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER))
-+ return -EPROBE_DEFER;
-+
-+ if (IS_ERR(bri->sda_gpiod)) {
-+ dev_info(dev, "missing sda gpio recovery information: %pe\n",
-+ bri->sda_gpiod);
- return 0;
- }
-- if (IS_ERR(bri->pinctrl))
-- return PTR_ERR(bri->pinctrl);
-
- bri->prepare_recovery = i2c_pxa_prepare_recovery;
- bri->unprepare_recovery = i2c_pxa_unprepare_recovery;
-+ bri->recover_bus = i2c_generic_scl_recovery;
-
- i2c->adap.bus_recovery_info = bri;
-
-- return 0;
-+ /*
-+ * Claiming GPIOs can change the pinmux state, which confuses the
-+ * pinctrl since pinctrl's idea of the current setting is unaffected
-+ * by the pinmux change caused by claiming the GPIO. Work around that
-+ * by switching pinctrl to the GPIO state here. We do it this way to
-+ * avoid glitching the I2C bus.
-+ */
-+ pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery);
-+
-+ return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default);
- }
-
- static int i2c_pxa_probe(struct platform_device *dev)
-diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
-index fa6020dced595..85e035e7a1d75 100644
---- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
-+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
-@@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
- return -EINVAL;
- }
-
-+ if (clk_freq == 0) {
-+ dev_err(dev, "clock-frequency is set to 0 in DT\n");
-+ return -EINVAL;
-+ }
-+
- if (of_get_child_count(np) > 1) {
- dev_err(dev, "P2WI only supports one slave device\n");
- return -EINVAL;
-diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
-index 60746652fd525..7f30bcceebaed 100644
---- a/drivers/i2c/i2c-core-base.c
-+++ b/drivers/i2c/i2c-core-base.c
-@@ -931,8 +931,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
- struct i2c_client *
- i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
- {
-- struct i2c_client *client;
-- int status;
-+ struct i2c_client *client;
-+ bool need_put = false;
-+ int status;
-
- client = kzalloc(sizeof *client, GFP_KERNEL);
- if (!client)
-@@ -970,7 +971,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
- client->dev.fwnode = info->fwnode;
-
- device_enable_async_suspend(&client->dev);
-- i2c_dev_set_name(adap, client, info);
-
- if (info->swnode) {
- status = device_add_software_node(&client->dev, info->swnode);
-@@ -982,6 +982,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
- }
- }
-
-+ i2c_dev_set_name(adap, client, info);
- status = device_register(&client->dev);
- if (status)
- goto out_remove_swnode;
-@@ -993,6 +994,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
-
- out_remove_swnode:
- device_remove_software_node(&client->dev);
-+ need_put = true;
- out_err_put_of_node:
- of_node_put(info->of_node);
- out_err:
-@@ -1000,7 +1002,10 @@ out_err:
- "Failed to register i2c client %s at 0x%02x (%d)\n",
- client->name, client->addr, status);
- out_err_silent:
-- kfree(client);
-+ if (need_put)
-+ put_device(&client->dev);
-+ else
-+ kfree(client);
- return ERR_PTR(status);
- }
- EXPORT_SYMBOL_GPL(i2c_new_client_device);
-diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
-index 1247e6e6e9751..05b8b8dfa9bdd 100644
---- a/drivers/i2c/i2c-core.h
-+++ b/drivers/i2c/i2c-core.h
-@@ -29,7 +29,7 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
- */
- static inline bool i2c_in_atomic_xfer_mode(void)
- {
-- return system_state > SYSTEM_RUNNING && irqs_disabled();
-+ return system_state > SYSTEM_RUNNING && !preemptible();
- }
-
- static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
-diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
-index a01b59e3599b5..7d337380a05d9 100644
---- a/drivers/i2c/i2c-dev.c
-+++ b/drivers/i2c/i2c-dev.c
-@@ -450,8 +450,8 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
- return -EINVAL;
-
-- rdwr_pa = memdup_user(rdwr_arg.msgs,
-- rdwr_arg.nmsgs * sizeof(struct i2c_msg));
-+ rdwr_pa = memdup_array_user(rdwr_arg.msgs,
-+ rdwr_arg.nmsgs, sizeof(struct i2c_msg));
- if (IS_ERR(rdwr_pa))
- return PTR_ERR(rdwr_pa);
-
-diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
-index 87283e4a46076..0e9ff5500a777 100644
---- a/drivers/i3c/master.c
-+++ b/drivers/i3c/master.c
-@@ -1525,9 +1525,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
- desc->dev->dev.of_node = desc->boardinfo->of_node;
-
- ret = device_register(&desc->dev->dev);
-- if (ret)
-+ if (ret) {
- dev_err(&master->dev,
- "Failed to add I3C device (err = %d)\n", ret);
-+ put_device(&desc->dev->dev);
-+ }
- }
- }
-
-diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
-index 49551db71bc96..8f1fda3c7ac52 100644
---- a/drivers/i3c/master/i3c-master-cdns.c
-+++ b/drivers/i3c/master/i3c-master-cdns.c
-@@ -191,7 +191,7 @@
- #define SLV_STATUS1_HJ_DIS BIT(18)
- #define SLV_STATUS1_MR_DIS BIT(17)
- #define SLV_STATUS1_PROT_ERR BIT(16)
--#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
-+#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
- #define SLV_STATUS1_HAS_DA BIT(8)
- #define SLV_STATUS1_DDR_RX_FULL BIT(7)
- #define SLV_STATUS1_DDR_TX_FULL BIT(6)
-@@ -1623,13 +1623,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
- /* Device ID0 is reserved to describe this master. */
- master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
- master->free_rr_slots = GENMASK(master->maxdevs, 1);
-+ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
-+ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
-
- val = readl(master->regs + CONF_STATUS1);
- master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
- master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
- master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
-- master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
-- master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
-
- spin_lock_init(&master->ibi.lock);
- master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
-diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
-index 97bb49ff5b53b..47b9b4d4ed3fc 100644
---- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
-+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
-@@ -64,15 +64,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
- return -EOPNOTSUPP;
- }
-
-- /* use a bitmap for faster free slot search */
-- hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
-- if (!hci->DAT_data)
-- return -ENOMEM;
--
-- /* clear them */
-- for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
-- dat_w0_write(dat_idx, 0);
-- dat_w1_write(dat_idx, 0);
-+ if (!hci->DAT_data) {
-+ /* use a bitmap for faster free slot search */
-+ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
-+ if (!hci->DAT_data)
-+ return -ENOMEM;
-+
-+ /* clear them */
-+ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
-+ dat_w0_write(dat_idx, 0);
-+ dat_w1_write(dat_idx, 0);
-+ }
- }
-
- return 0;
-@@ -87,7 +89,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
- static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
- {
- unsigned int dat_idx;
-+ int ret;
-
-+ if (!hci->DAT_data) {
-+ ret = hci_dat_v1_init(hci);
-+ if (ret)
-+ return ret;
-+ }
- dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
- if (dat_idx >= hci->DAT_entries)
- return -ENOENT;
-@@ -103,7 +111,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
- {
- dat_w0_write(dat_idx, 0);
- dat_w1_write(dat_idx, 0);
-- __clear_bit(dat_idx, hci->DAT_data);
-+ if (hci->DAT_data)
-+ __clear_bit(dat_idx, hci->DAT_data);
- }
-
- static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
-diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
-index 2990ac9eaade7..71b5dbe45c45c 100644
---- a/drivers/i3c/master/mipi-i3c-hci/dma.c
-+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
-@@ -734,7 +734,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
- unsigned int i;
- bool handled = false;
-
-- for (i = 0; mask && i < 8; i++) {
-+ for (i = 0; mask && i < rings->total; i++) {
- struct hci_rh_data *rh;
- u32 status;
-
-diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
-index 8f8295acdadb3..c395e52294140 100644
---- a/drivers/i3c/master/svc-i3c-master.c
-+++ b/drivers/i3c/master/svc-i3c-master.c
-@@ -93,6 +93,7 @@
- #define SVC_I3C_MINTMASKED 0x098
- #define SVC_I3C_MERRWARN 0x09C
- #define SVC_I3C_MERRWARN_NACK BIT(2)
-+#define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
- #define SVC_I3C_MDMACTRL 0x0A0
- #define SVC_I3C_MDATACTRL 0x0AC
- #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
-@@ -175,6 +176,7 @@ struct svc_i3c_regs_save {
- * @ibi.slots: Available IBI slots
- * @ibi.tbq_slot: To be queued IBI slot
- * @ibi.lock: IBI lock
-+ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
- */
- struct svc_i3c_master {
- struct i3c_master_controller base;
-@@ -203,6 +205,7 @@ struct svc_i3c_master {
- /* Prevent races within IBI handlers */
- spinlock_t lock;
- } ibi;
-+ struct mutex lock;
- };
-
- /**
-@@ -225,6 +228,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
- if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
- merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
- writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
-+
-+ /* Ignore timeout error */
-+ if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
-+ dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
-+ mstatus, merrwarn);
-+ return false;
-+ }
-+
- dev_err(master->dev,
- "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
- mstatus, merrwarn);
-@@ -331,6 +342,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
- struct i3c_ibi_slot *slot;
- unsigned int count;
- u32 mdatactrl;
-+ int ret, val;
- u8 *buf;
-
- slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
-@@ -340,6 +352,13 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
- slot->len = 0;
- buf = slot->data;
-
-+ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
-+ SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
-+ if (ret) {
-+ dev_err(master->dev, "Timeout when polling for COMPLETE\n");
-+ return ret;
-+ }
-+
- while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
- slot->len < SVC_I3C_FIFO_SIZE) {
- mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
-@@ -384,6 +403,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
- u32 status, val;
- int ret;
-
-+ mutex_lock(&master->lock);
- /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
- writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
- SVC_I3C_MCTRL_IBIRESP_AUTO,
-@@ -394,6 +414,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
- SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
- if (ret) {
- dev_err(master->dev, "Timeout when polling for IBIWON\n");
-+ svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
- }
-
-@@ -460,12 +481,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
-
- reenable_ibis:
- svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
-+ mutex_unlock(&master->lock);
- }
-
- static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
- {
- struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
-- u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
-+ u32 active = readl(master->regs + SVC_I3C_MSTATUS);
-
- if (!SVC_I3C_MSTATUS_SLVSTART(active))
- return IRQ_NONE;
-@@ -1007,6 +1029,9 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
- u32 reg;
- int ret;
-
-+ /* clean SVC_I3C_MINT_IBIWON w1c bits */
-+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
-+
- writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
- xfer_type |
- SVC_I3C_MCTRL_IBIRESP_NACK |
-@@ -1025,6 +1050,23 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
- goto emit_stop;
- }
-
-+ /*
-+ * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
-+ * with I3C Target Address.
-+ *
-+ * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
-+ * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
-+ * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
-+ * a Hot-Join Request has been made.
-+ *
-+ * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
-+ * and yield the above events handler.
-+ */
-+ if (SVC_I3C_MSTATUS_IBIWON(reg)) {
-+ ret = -ENXIO;
-+ goto emit_stop;
-+ }
-+
- if (rnw)
- ret = svc_i3c_master_read(master, in, xfer_len);
- else
-@@ -1204,9 +1246,11 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
- cmd->read_len = 0;
- cmd->continued = false;
-
-+ mutex_lock(&master->lock);
- svc_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
- svc_i3c_master_dequeue_xfer(master, xfer);
-+ mutex_unlock(&master->lock);
-
- ret = xfer->ret;
- kfree(buf);
-@@ -1250,9 +1294,11 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
- cmd->read_len = read_len;
- cmd->continued = false;
-
-+ mutex_lock(&master->lock);
- svc_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
- svc_i3c_master_dequeue_xfer(master, xfer);
-+ mutex_unlock(&master->lock);
-
- if (cmd->read_len != xfer_len)
- ccc->dests[0].payload.len = cmd->read_len;
-@@ -1309,9 +1355,11 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
- cmd->continued = (i + 1) < nxfers;
- }
-
-+ mutex_lock(&master->lock);
- svc_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
- svc_i3c_master_dequeue_xfer(master, xfer);
-+ mutex_unlock(&master->lock);
-
- ret = xfer->ret;
- svc_i3c_master_free_xfer(xfer);
-@@ -1347,9 +1395,11 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
- cmd->continued = (i + 1 < nxfers);
- }
-
-+ mutex_lock(&master->lock);
- svc_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
- svc_i3c_master_dequeue_xfer(master, xfer);
-+ mutex_unlock(&master->lock);
-
- ret = xfer->ret;
- svc_i3c_master_free_xfer(xfer);
-@@ -1540,6 +1590,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
-
- INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
- INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
-+ mutex_init(&master->lock);
-+
- ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
- IRQF_NO_SUSPEND, "svc-i3c-irq", master);
- if (ret)
-diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
-index 2f082006550fd..bbd5bdd732f01 100644
---- a/drivers/iio/adc/stm32-adc-core.c
-+++ b/drivers/iio/adc/stm32-adc-core.c
-@@ -708,6 +708,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
- struct stm32_adc_priv *priv;
- struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
-+ const struct of_device_id *of_id;
-+
- struct resource *res;
- u32 max_rate;
- int ret;
-@@ -720,8 +722,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
- return -ENOMEM;
- platform_set_drvdata(pdev, &priv->common);
-
-- priv->cfg = (const struct stm32_adc_priv_cfg *)
-- of_match_device(dev->driver->of_match_table, dev)->data;
-+ of_id = of_match_device(dev->driver->of_match_table, dev);
-+ if (!of_id)
-+ return -ENODEV;
-+
-+ priv->cfg = (const struct stm32_adc_priv_cfg *)of_id->data;
- priv->nb_adc_max = priv->cfg->num_adcs;
- spin_lock_init(&priv->common.lock);
-
-diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
-index 85e289700c3c5..4abf80f75ef5d 100644
---- a/drivers/iio/frequency/adf4350.c
-+++ b/drivers/iio/frequency/adf4350.c
-@@ -33,7 +33,6 @@ enum {
-
- struct adf4350_state {
- struct spi_device *spi;
-- struct regulator *reg;
- struct gpio_desc *lock_detect_gpiod;
- struct adf4350_platform_data *pdata;
- struct clk *clk;
-@@ -469,6 +468,15 @@ static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
- return pdata;
- }
-
-+static void adf4350_power_down(void *data)
-+{
-+ struct iio_dev *indio_dev = data;
-+ struct adf4350_state *st = iio_priv(indio_dev);
-+
-+ st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
-+ adf4350_sync_config(st);
-+}
-+
- static int adf4350_probe(struct spi_device *spi)
- {
- struct adf4350_platform_data *pdata;
-@@ -491,31 +499,21 @@ static int adf4350_probe(struct spi_device *spi)
- }
-
- if (!pdata->clkin) {
-- clk = devm_clk_get(&spi->dev, "clkin");
-+ clk = devm_clk_get_enabled(&spi->dev, "clkin");
- if (IS_ERR(clk))
-- return -EPROBE_DEFER;
--
-- ret = clk_prepare_enable(clk);
-- if (ret < 0)
-- return ret;
-+ return PTR_ERR(clk);
- }
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
-- if (indio_dev == NULL) {
-- ret = -ENOMEM;
-- goto error_disable_clk;
-- }
-+ if (indio_dev == NULL)
-+ return -ENOMEM;
-
- st = iio_priv(indio_dev);
-
-- st->reg = devm_regulator_get(&spi->dev, "vcc");
-- if (!IS_ERR(st->reg)) {
-- ret = regulator_enable(st->reg);
-- if (ret)
-- goto error_disable_clk;
-- }
-+ ret = devm_regulator_get_enable(&spi->dev, "vcc");
-+ if (ret)
-+ return ret;
-
-- spi_set_drvdata(spi, indio_dev);
- st->spi = spi;
- st->pdata = pdata;
-
-@@ -544,47 +542,21 @@ static int adf4350_probe(struct spi_device *spi)
-
- st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
- GPIOD_IN);
-- if (IS_ERR(st->lock_detect_gpiod)) {
-- ret = PTR_ERR(st->lock_detect_gpiod);
-- goto error_disable_reg;
-- }
-+ if (IS_ERR(st->lock_detect_gpiod))
-+ return PTR_ERR(st->lock_detect_gpiod);
-
- if (pdata->power_up_frequency) {
- ret = adf4350_set_freq(st, pdata->power_up_frequency);
- if (ret)
-- goto error_disable_reg;
-+ return ret;
- }
-
-- ret = iio_device_register(indio_dev);
-+ ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
- if (ret)
-- goto error_disable_reg;
--
-- return 0;
--
--error_disable_reg:
-- if (!IS_ERR(st->reg))
-- regulator_disable(st->reg);
--error_disable_clk:
-- clk_disable_unprepare(clk);
--
-- return ret;
--}
--
--static void adf4350_remove(struct spi_device *spi)
--{
-- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-- struct adf4350_state *st = iio_priv(indio_dev);
-- struct regulator *reg = st->reg;
--
-- st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
-- adf4350_sync_config(st);
--
-- iio_device_unregister(indio_dev);
--
-- clk_disable_unprepare(st->clk);
-+ return dev_err_probe(&spi->dev, ret,
-+ "Failed to add action to managed power down\n");
-
-- if (!IS_ERR(reg))
-- regulator_disable(reg);
-+ return devm_iio_device_register(&spi->dev, indio_dev);
- }
-
- static const struct of_device_id adf4350_of_match[] = {
-@@ -607,7 +579,6 @@ static struct spi_driver adf4350_driver = {
- .of_match_table = adf4350_of_match,
- },
- .probe = adf4350_probe,
-- .remove = adf4350_remove,
- .id_table = adf4350_id,
- };
- module_spi_driver(adf4350_driver);
-diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
-index a666847bd7143..010718738d04c 100644
---- a/drivers/infiniband/core/device.c
-+++ b/drivers/infiniband/core/device.c
-@@ -804,7 +804,7 @@ static int alloc_port_data(struct ib_device *device)
- * empty slots at the beginning.
- */
- pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
-- rdma_end_port(device) + 1),
-+ size_add(rdma_end_port(device), 1)),
- GFP_KERNEL);
- if (!pdata_rcu)
- return -ENOMEM;
-diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
-index 59179cfc20ef9..8175dde60b0a8 100644
---- a/drivers/infiniband/core/sa_query.c
-+++ b/drivers/infiniband/core/sa_query.c
-@@ -2159,7 +2159,9 @@ static int ib_sa_add_one(struct ib_device *device)
- s = rdma_start_port(device);
- e = rdma_end_port(device);
-
-- sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
-+ sa_dev = kzalloc(struct_size(sa_dev, port,
-+ size_add(size_sub(e, s), 1)),
-+ GFP_KERNEL);
- if (!sa_dev)
- return -ENOMEM;
-
-diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
-index ee59d73915689..ec5efdc166601 100644
---- a/drivers/infiniband/core/sysfs.c
-+++ b/drivers/infiniband/core/sysfs.c
-@@ -903,7 +903,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
- * Two extra attribue elements here, one for the lifespan entry and
- * one to NULL terminate the list for the sysfs core code
- */
-- data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
-+ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
- GFP_KERNEL);
- if (!data)
- goto err_free_stats;
-@@ -1009,7 +1009,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
- * Two extra attribue elements here, one for the lifespan entry and
- * one to NULL terminate the list for the sysfs core code
- */
-- data = kzalloc(struct_size(data, attrs, stats->num_counters + 1),
-+ data = kzalloc(struct_size(data, attrs, size_add(stats->num_counters, 1)),
- GFP_KERNEL);
- if (!data)
- goto err_free_stats;
-@@ -1140,7 +1140,7 @@ static int setup_gid_attrs(struct ib_port *port,
- int ret;
-
- gid_attr_group = kzalloc(struct_size(gid_attr_group, attrs_list,
-- attr->gid_tbl_len * 2),
-+ size_mul(attr->gid_tbl_len, 2)),
- GFP_KERNEL);
- if (!gid_attr_group)
- return -ENOMEM;
-@@ -1205,8 +1205,8 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
- int ret;
-
- p = kvzalloc(struct_size(p, attrs_list,
-- attr->gid_tbl_len + attr->pkey_tbl_len),
-- GFP_KERNEL);
-+ size_add(attr->gid_tbl_len, attr->pkey_tbl_len)),
-+ GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
- p->ibdev = device;
-diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
-index 7e5c33aad1619..f5feca7fa9b9c 100644
---- a/drivers/infiniband/core/user_mad.c
-+++ b/drivers/infiniband/core/user_mad.c
-@@ -1378,7 +1378,9 @@ static int ib_umad_add_one(struct ib_device *device)
- s = rdma_start_port(device);
- e = rdma_end_port(device);
-
-- umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
-+ umad_dev = kzalloc(struct_size(umad_dev, ports,
-+ size_add(size_sub(e, s), 1)),
-+ GFP_KERNEL);
- if (!umad_dev)
- return -ENOMEM;
-
-diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
-index 7741a1d69097c..2b5d264f41e51 100644
---- a/drivers/infiniband/hw/hfi1/efivar.c
-+++ b/drivers/infiniband/hw/hfi1/efivar.c
-@@ -112,7 +112,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
- unsigned long *size, void **return_data)
- {
- char prefix_name[64];
-- char name[64];
-+ char name[128];
- int result;
-
- /* create a common prefix */
-diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
-index 08732e1ac9662..c132a9c073bff 100644
---- a/drivers/infiniband/hw/hfi1/pcie.c
-+++ b/drivers/infiniband/hw/hfi1/pcie.c
-@@ -3,6 +3,7 @@
- * Copyright(c) 2015 - 2019 Intel Corporation.
- */
-
-+#include <linux/bitfield.h>
- #include <linux/pci.h>
- #include <linux/io.h>
- #include <linux/delay.h>
-@@ -210,12 +211,6 @@ static u32 extract_speed(u16 linkstat)
- return speed;
- }
-
--/* return the PCIe link speed from the given link status */
--static u32 extract_width(u16 linkstat)
--{
-- return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
--}
--
- /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
- static void update_lbus_info(struct hfi1_devdata *dd)
- {
-@@ -228,7 +223,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
- return;
- }
-
-- dd->lbus_width = extract_width(linkstat);
-+ dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
- dd->lbus_speed = extract_speed(linkstat);
- snprintf(dd->lbus_info, sizeof(dd->lbus_info),
- "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
-diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
-index e77fcc74f15c4..3df032ddda189 100644
---- a/drivers/infiniband/hw/hns/hns_roce_ah.c
-+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
-@@ -33,7 +33,9 @@
- #include <linux/pci.h>
- #include <rdma/ib_addr.h>
- #include <rdma/ib_cache.h>
-+#include "hnae3.h"
- #include "hns_roce_device.h"
-+#include "hns_roce_hw_v2.h"
-
- static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
- {
-@@ -57,6 +59,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
- struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
- struct hns_roce_ah *ah = to_hr_ah(ibah);
- int ret = 0;
-+ u32 max_sl;
-
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
- return -EOPNOTSUPP;
-@@ -70,9 +73,17 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
- ah->av.hop_limit = grh->hop_limit;
- ah->av.flowlabel = grh->flow_label;
- ah->av.udp_sport = get_ah_udp_sport(ah_attr);
-- ah->av.sl = rdma_ah_get_sl(ah_attr);
- ah->av.tclass = get_tclass(grh);
-
-+ ah->av.sl = rdma_ah_get_sl(ah_attr);
-+ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
-+ if (unlikely(ah->av.sl > max_sl)) {
-+ ibdev_err_ratelimited(&hr_dev->ib_dev,
-+ "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
-+ ah->av.sl, max_sl);
-+ return -EINVAL;
-+ }
-+
- memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
- memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
-
-diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
-index d82daff2d9bd5..58d14f1562b9a 100644
---- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
-+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
-@@ -270,7 +270,7 @@ static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
- struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
- int mtu = ib_mtu_enum_to_int(qp->path_mtu);
-
-- if (len > qp->max_inline_data || len > mtu) {
-+ if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
- ibdev_err(&hr_dev->ib_dev,
- "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
- len, qp->max_inline_data, mtu);
-@@ -4725,6 +4725,9 @@ static int check_cong_type(struct ib_qp *ibqp,
- {
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
-+ if (ibqp->qp_type == IB_QPT_UD)
-+ hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
-+
- /* different congestion types match different configurations */
- switch (hr_dev->caps.cong_type) {
- case CONG_TYPE_DCQCN:
-@@ -4821,22 +4824,32 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct ib_device *ibdev = &hr_dev->ib_dev;
- const struct ib_gid_attr *gid_attr = NULL;
-+ u8 sl = rdma_ah_get_sl(&attr->ah_attr);
- int is_roce_protocol;
- u16 vlan_id = 0xffff;
- bool is_udp = false;
-+ u32 max_sl;
- u8 ib_port;
- u8 hr_port;
- int ret;
-
-+ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
-+ if (unlikely(sl > max_sl)) {
-+ ibdev_err_ratelimited(ibdev,
-+ "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
-+ sl, max_sl);
-+ return -EINVAL;
-+ }
-+
- /*
- * If free_mr_en of qp is set, it means that this qp comes from
- * free mr. This qp will perform the loopback operation.
- * In the loopback scenario, only sl needs to be set.
- */
- if (hr_qp->free_mr_en) {
-- hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
-+ hr_reg_write(context, QPC_SL, sl);
- hr_reg_clear(qpc_mask, QPC_SL);
-- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
-+ hr_qp->sl = sl;
- return 0;
- }
-
-@@ -4903,14 +4916,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
- memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
- memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
-
-- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
-- if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
-- ibdev_err(ibdev,
-- "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
-- hr_qp->sl, MAX_SERVICE_LEVEL);
-- return -EINVAL;
-- }
--
-+ hr_qp->sl = sl;
- hr_reg_write(context, QPC_SL, hr_qp->sl);
- hr_reg_clear(qpc_mask, QPC_SL);
-
-@@ -5804,7 +5810,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
-- ibdev_warn(ibdev, "send queue drained.\n");
-+ ibdev_dbg(ibdev, "send queue drained.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
-@@ -5819,10 +5825,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
- irq_work->queue_num, irq_work->sub_type);
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
-- ibdev_warn(ibdev, "SRQ limit reach.\n");
-+ ibdev_dbg(ibdev, "SRQ limit reach.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
-- ibdev_warn(ibdev, "SRQ last wqe reach.\n");
-+ ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- ibdev_err(ibdev, "SRQ catas error.\n");
-diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
-index d9d546cdef525..4a9cd4d21bc99 100644
---- a/drivers/infiniband/hw/hns/hns_roce_main.c
-+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
-@@ -547,17 +547,12 @@ static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
- struct ib_device *device, u32 port_num)
- {
- struct hns_roce_dev *hr_dev = to_hr_dev(device);
-- u32 port = port_num - 1;
-
-- if (port > hr_dev->caps.num_ports) {
-+ if (port_num > hr_dev->caps.num_ports) {
- ibdev_err(device, "invalid port num.\n");
- return NULL;
- }
-
-- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
-- hr_dev->is_vf)
-- return NULL;
--
- return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
- ARRAY_SIZE(hns_roce_port_stats_descs),
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
-@@ -577,10 +572,6 @@ static int hns_roce_get_hw_stats(struct ib_device *device,
- if (port > hr_dev->caps.num_ports)
- return -EINVAL;
-
-- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
-- hr_dev->is_vf)
-- return -EOPNOTSUPP;
--
- ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
- &num_counters);
- if (ret) {
-@@ -634,8 +625,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
- .query_pkey = hns_roce_query_pkey,
- .query_port = hns_roce_query_port,
- .reg_user_mr = hns_roce_reg_user_mr,
-- .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
-- .get_hw_stats = hns_roce_get_hw_stats,
-
- INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
- INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
-@@ -644,6 +633,11 @@ static const struct ib_device_ops hns_roce_dev_ops = {
- INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
- };
-
-+static const struct ib_device_ops hns_roce_dev_hw_stats_ops = {
-+ .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
-+ .get_hw_stats = hns_roce_get_hw_stats,
-+};
-+
- static const struct ib_device_ops hns_roce_dev_mr_ops = {
- .rereg_user_mr = hns_roce_rereg_user_mr,
- };
-@@ -720,6 +714,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
- ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
-
-+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 &&
-+ !hr_dev->is_vf)
-+ ib_set_device_ops(ib_dev, &hns_roce_dev_hw_stats_ops);
-+
- ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
- ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
- ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
-diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
-index cdc1c6de43a17..828b58534aa97 100644
---- a/drivers/infiniband/hw/hns/hns_roce_qp.c
-+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
-@@ -1064,7 +1064,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
- {
- struct hns_roce_ib_create_qp_resp resp = {};
- struct ib_device *ibdev = &hr_dev->ib_dev;
-- struct hns_roce_ib_create_qp ucmd;
-+ struct hns_roce_ib_create_qp ucmd = {};
- int ret;
-
- mutex_init(&hr_qp->mutex);
-diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
-index 555629b798b95..5d963abb7e609 100644
---- a/drivers/infiniband/hw/mlx5/main.c
-+++ b/drivers/infiniband/hw/mlx5/main.c
-@@ -4071,10 +4071,8 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
- return ret;
-
- ret = mlx5_mkey_cache_init(dev);
-- if (ret) {
-+ if (ret)
- mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
-- mlx5r_umr_resource_cleanup(dev);
-- }
- return ret;
- }
-
-diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
-index 78b96bfb4e6ac..2340baaba8e67 100644
---- a/drivers/infiniband/hw/mlx5/qp.c
-+++ b/drivers/infiniband/hw/mlx5/qp.c
-@@ -4045,6 +4045,30 @@ static unsigned int get_tx_affinity(struct ib_qp *qp,
- return tx_affinity;
- }
-
-+static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
-+ struct mlx5_core_dev *mdev)
-+{
-+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
-+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
-+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
-+ void *rqc;
-+
-+ if (!qp->rq.wqe_cnt)
-+ return 0;
-+
-+ MLX5_SET(modify_rq_in, in, rq_state, rq->state);
-+ MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
-+
-+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
-+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
-+
-+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
-+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
-+ MLX5_SET(rqc, rqc, counter_set_id, set_id);
-+
-+ return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
-+}
-+
- static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
- struct rdma_counter *counter)
- {
-@@ -4060,6 +4084,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
- else
- set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
-
-+ if (mqp->type == IB_QPT_RAW_PACKET)
-+ return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
-+
- base = &mqp->trans_qp.base;
- MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
- MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
-diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
-index f2e093b0b9982..1b45b1d3077de 100644
---- a/drivers/input/rmi4/rmi_bus.c
-+++ b/drivers/input/rmi4/rmi_bus.c
-@@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn)
-
- device_del(&fn->dev);
- of_node_put(fn->dev.of_node);
-- put_device(&fn->dev);
-
- for (i = 0; i < fn->num_of_irqs; i++)
- irq_dispose_mapping(fn->irq[i]);
-
-+ put_device(&fn->dev);
- }
-
- /**
-diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
-index 2c16917ba1fda..e76356f91125f 100644
---- a/drivers/interconnect/qcom/icc-rpm.c
-+++ b/drivers/interconnect/qcom/icc-rpm.c
-@@ -497,7 +497,7 @@ regmap_done:
-
- ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
- if (ret)
-- return ret;
-+ goto err_disable_unprepare_clk;
-
- provider = &qp->provider;
- provider->dev = dev;
-@@ -512,13 +512,15 @@ regmap_done:
- /* If this fails, bus accesses will crash the platform! */
- ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
- if (ret)
-- return ret;
-+ goto err_disable_unprepare_clk;
-
- for (i = 0; i < num_nodes; i++) {
- size_t j;
-
- node = icc_node_create(qnodes[i]->id);
- if (IS_ERR(node)) {
-+ clk_bulk_disable_unprepare(qp->num_intf_clks,
-+ qp->intf_clks);
- ret = PTR_ERR(node);
- goto err_remove_nodes;
- }
-@@ -534,8 +536,11 @@ regmap_done:
- if (qnodes[i]->qos.ap_owned &&
- qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
- ret = qcom_icc_qos_set(node);
-- if (ret)
-- return ret;
-+ if (ret) {
-+ clk_bulk_disable_unprepare(qp->num_intf_clks,
-+ qp->intf_clks);
-+ goto err_remove_nodes;
-+ }
- }
-
- data->nodes[i] = node;
-@@ -563,6 +568,7 @@ err_deregister_provider:
- icc_provider_deregister(provider);
- err_remove_nodes:
- icc_nodes_remove(provider);
-+err_disable_unprepare_clk:
- clk_disable_unprepare(qp->bus_clk);
-
- return ret;
-diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
-index dc321bb86d0be..e97478bbc2825 100644
---- a/drivers/interconnect/qcom/osm-l3.c
-+++ b/drivers/interconnect/qcom/osm-l3.c
-@@ -3,6 +3,7 @@
- * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
- */
-
-+#include <linux/args.h>
- #include <linux/bitfield.h>
- #include <linux/clk.h>
- #include <linux/interconnect-provider.h>
-@@ -78,7 +79,7 @@ enum {
- .name = #_name, \
- .id = _id, \
- .buswidth = _buswidth, \
-- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
-+ .num_links = COUNT_ARGS(__VA_ARGS__), \
- .links = { __VA_ARGS__ }, \
- }
-
-diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c
-index bf800dd7d4ba1..a7392eb73d4a9 100644
---- a/drivers/interconnect/qcom/qdu1000.c
-+++ b/drivers/interconnect/qcom/qdu1000.c
-@@ -769,6 +769,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .num_nodes = 1,
- .nodes = { &ebi },
- };
-diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
-index d94ab9b39f3db..af2be15438403 100644
---- a/drivers/interconnect/qcom/sc7180.c
-+++ b/drivers/interconnect/qcom/sc7180.c
-@@ -1238,6 +1238,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi },
-diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
-index 6592839b4d94b..a626dbc719995 100644
---- a/drivers/interconnect/qcom/sc7280.c
-+++ b/drivers/interconnect/qcom/sc7280.c
-@@ -1285,6 +1285,7 @@ static struct qcom_icc_node srvc_snoc = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .num_nodes = 1,
- .nodes = { &ebi },
- };
-diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
-index 0fb4898dabcfe..bdd3471d4ac89 100644
---- a/drivers/interconnect/qcom/sc8180x.c
-+++ b/drivers/interconnect/qcom/sc8180x.c
-@@ -1345,6 +1345,7 @@ static struct qcom_icc_node slv_qup_core_2 = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .num_nodes = 1,
- .nodes = { &slv_ebi }
- };
-diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
-index b82c5493cbb56..0270f6c64481a 100644
---- a/drivers/interconnect/qcom/sc8280xp.c
-+++ b/drivers/interconnect/qcom/sc8280xp.c
-@@ -1712,6 +1712,7 @@ static struct qcom_icc_node srvc_snoc = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .num_nodes = 1,
- .nodes = { &ebi },
- };
-diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c
-index 540a2108b77c1..907e1ff4ff817 100644
---- a/drivers/interconnect/qcom/sdm670.c
-+++ b/drivers/interconnect/qcom/sdm670.c
-@@ -1047,6 +1047,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi },
-diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
-index b9243c0aa626c..855802be93fea 100644
---- a/drivers/interconnect/qcom/sdm845.c
-+++ b/drivers/interconnect/qcom/sdm845.c
-@@ -1265,6 +1265,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi },
-diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
-index 49aed492e9b80..f41d7e19ba269 100644
---- a/drivers/interconnect/qcom/sm6350.c
-+++ b/drivers/interconnect/qcom/sm6350.c
-@@ -1164,6 +1164,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi },
-diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
-index c7c9cf7f746b0..edfe824cad353 100644
---- a/drivers/interconnect/qcom/sm8150.c
-+++ b/drivers/interconnect/qcom/sm8150.c
-@@ -1282,6 +1282,7 @@ static struct qcom_icc_node xs_sys_tcu_cfg = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi },
-diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
-index d4a4ecef11f01..661dc18d99dba 100644
---- a/drivers/interconnect/qcom/sm8250.c
-+++ b/drivers/interconnect/qcom/sm8250.c
-@@ -1397,6 +1397,7 @@ static struct qcom_icc_node qup2_core_slave = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi },
-diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
-index bdf75839e6d17..562322d4fc3c4 100644
---- a/drivers/interconnect/qcom/sm8350.c
-+++ b/drivers/interconnect/qcom/sm8350.c
-@@ -1356,6 +1356,7 @@ static struct qcom_icc_node qns_mem_noc_sf_disp = {
-
- static struct qcom_icc_bcm bcm_acv = {
- .name = "ACV",
-+ .enable_mask = BIT(3),
- .keepalive = false,
- .num_nodes = 1,
- .nodes = { &ebi },
-diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
-index a3414afe11b07..23cb80d62a9ab 100644
---- a/drivers/iommu/intel/dmar.c
-+++ b/drivers/iommu/intel/dmar.c
-@@ -1522,6 +1522,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- {
- struct qi_desc desc;
-
-+ /*
-+ * VT-d spec, section 4.3:
-+ *
-+ * Software is recommended to not submit any Device-TLB invalidation
-+ * requests while address remapping hardware is disabled.
-+ */
-+ if (!(iommu->gcmd & DMA_GCMD_TE))
-+ return;
-+
- if (mask) {
- addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
- desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
-@@ -1587,6 +1596,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
- struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
-
-+ /*
-+ * VT-d spec, section 4.3:
-+ *
-+ * Software is recommended to not submit any Device-TLB invalidation
-+ * requests while address remapping hardware is disabled.
-+ */
-+ if (!(iommu->gcmd & DMA_GCMD_TE))
-+ return;
-+
- desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
- QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
- QI_DEV_IOTLB_PFSID(pfsid);
-diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
-index 3685ba90ec88e..4c3707384bd92 100644
---- a/drivers/iommu/intel/iommu.c
-+++ b/drivers/iommu/intel/iommu.c
-@@ -2487,7 +2487,8 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
- return ret;
- }
-
-- iommu_enable_pci_caps(info);
-+ if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
-+ iommu_enable_pci_caps(info);
-
- return 0;
- }
-@@ -3922,8 +3923,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
- */
- static void domain_context_clear(struct device_domain_info *info)
- {
-- if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
-- return;
-+ if (!dev_is_pci(info->dev))
-+ domain_context_clear_one(info, info->bus, info->devfn);
-
- pci_for_each_dma_alias(to_pci_dev(info->dev),
- &domain_context_clear_one_cb, info);
-@@ -4928,7 +4929,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
- ver = (dev->device >> 8) & 0xff;
- if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
- ver != 0x4e && ver != 0x8a && ver != 0x98 &&
-- ver != 0x9a && ver != 0xa7)
-+ ver != 0x9a && ver != 0xa7 && ver != 0x7d)
- return;
-
- if (risky_device(dev))
-diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
-index 50a481c895b86..ac12f76c1212a 100644
---- a/drivers/iommu/intel/svm.c
-+++ b/drivers/iommu/intel/svm.c
-@@ -216,6 +216,27 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
- rcu_read_unlock();
- }
-
-+static void intel_flush_svm_all(struct intel_svm *svm)
-+{
-+ struct device_domain_info *info;
-+ struct intel_svm_dev *sdev;
-+
-+ rcu_read_lock();
-+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
-+ info = dev_iommu_priv_get(sdev->dev);
-+
-+ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
-+ if (info->ats_enabled) {
-+ qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
-+ svm->pasid, sdev->qdep,
-+ 0, 64 - VTD_PAGE_SHIFT);
-+ quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
-+ svm->pasid, sdev->qdep);
-+ }
-+ }
-+ rcu_read_unlock();
-+}
-+
- /* Pages have been freed at this point */
- static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
- struct mm_struct *mm,
-@@ -223,6 +244,11 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
- {
- struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
-
-+ if (start == 0 && end == -1UL) {
-+ intel_flush_svm_all(svm);
-+ return;
-+ }
-+
- intel_flush_svm_range(svm, start,
- (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
- }
-diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
-index c146378c7d032..3a67e636287a7 100644
---- a/drivers/iommu/iommu.c
-+++ b/drivers/iommu/iommu.c
-@@ -479,11 +479,12 @@ static void iommu_deinit_device(struct device *dev)
- dev_iommu_free(dev);
- }
-
-+DEFINE_MUTEX(iommu_probe_device_lock);
-+
- static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
- {
- const struct iommu_ops *ops = dev->bus->iommu_ops;
- struct iommu_group *group;
-- static DEFINE_MUTEX(iommu_probe_device_lock);
- struct group_device *gdev;
- int ret;
-
-@@ -496,17 +497,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
- * probably be able to use device_lock() here to minimise the scope,
- * but for now enforcing a simple global ordering is fine.
- */
-- mutex_lock(&iommu_probe_device_lock);
-+ lockdep_assert_held(&iommu_probe_device_lock);
-
- /* Device is probed already if in a group */
-- if (dev->iommu_group) {
-- ret = 0;
-- goto out_unlock;
-- }
-+ if (dev->iommu_group)
-+ return 0;
-
- ret = iommu_init_device(dev, ops);
- if (ret)
-- goto out_unlock;
-+ return ret;
-
- group = dev->iommu_group;
- gdev = iommu_group_alloc_device(group, dev);
-@@ -542,7 +541,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
- list_add_tail(&group->entry, group_list);
- }
- mutex_unlock(&group->mutex);
-- mutex_unlock(&iommu_probe_device_lock);
-
- if (dev_is_pci(dev))
- iommu_dma_set_pci_32bit_workaround(dev);
-@@ -556,8 +554,6 @@ err_put_group:
- iommu_deinit_device(dev);
- mutex_unlock(&group->mutex);
- iommu_group_put(group);
--out_unlock:
-- mutex_unlock(&iommu_probe_device_lock);
-
- return ret;
- }
-@@ -567,7 +563,9 @@ int iommu_probe_device(struct device *dev)
- const struct iommu_ops *ops;
- int ret;
-
-+ mutex_lock(&iommu_probe_device_lock);
- ret = __iommu_probe_device(dev, NULL);
-+ mutex_unlock(&iommu_probe_device_lock);
- if (ret)
- return ret;
-
-@@ -1783,7 +1781,9 @@ static int probe_iommu_group(struct device *dev, void *data)
- struct list_head *group_list = data;
- int ret;
-
-+ mutex_lock(&iommu_probe_device_lock);
- ret = __iommu_probe_device(dev, group_list);
-+ mutex_unlock(&iommu_probe_device_lock);
- if (ret == -ENODEV)
- ret = 0;
-
-diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
-index 3a598182b7619..117a39ae2e4aa 100644
---- a/drivers/iommu/iommufd/io_pagetable.c
-+++ b/drivers/iommu/iommufd/io_pagetable.c
-@@ -221,6 +221,18 @@ static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area,
- return 0;
- }
-
-+static struct iopt_area *iopt_area_alloc(void)
-+{
-+ struct iopt_area *area;
-+
-+ area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
-+ if (!area)
-+ return NULL;
-+ RB_CLEAR_NODE(&area->node.rb);
-+ RB_CLEAR_NODE(&area->pages_node.rb);
-+ return area;
-+}
-+
- static int iopt_alloc_area_pages(struct io_pagetable *iopt,
- struct list_head *pages_list,
- unsigned long length, unsigned long *dst_iova,
-@@ -231,7 +243,7 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
- int rc = 0;
-
- list_for_each_entry(elm, pages_list, next) {
-- elm->area = kzalloc(sizeof(*elm->area), GFP_KERNEL_ACCOUNT);
-+ elm->area = iopt_area_alloc();
- if (!elm->area)
- return -ENOMEM;
- }
-@@ -1005,11 +1017,11 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
- iopt_area_start_byte(area, new_start) & (alignment - 1))
- return -EINVAL;
-
-- lhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
-+ lhs = iopt_area_alloc();
- if (!lhs)
- return -ENOMEM;
-
-- rhs = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT);
-+ rhs = iopt_area_alloc();
- if (!rhs) {
- rc = -ENOMEM;
- goto err_free_lhs;
-@@ -1048,6 +1060,16 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
- if (WARN_ON(rc))
- goto err_remove_lhs;
-
-+ /*
-+ * If the original area has filled a domain, domains_itree has to be
-+ * updated.
-+ */
-+ if (area->storage_domain) {
-+ interval_tree_remove(&area->pages_node, &pages->domains_itree);
-+ interval_tree_insert(&lhs->pages_node, &pages->domains_itree);
-+ interval_tree_insert(&rhs->pages_node, &pages->domains_itree);
-+ }
-+
- lhs->storage_domain = area->storage_domain;
- lhs->pages = area->pages;
- rhs->storage_domain = area->storage_domain;
-diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
-index 8d9aa297c117e..528f356238b34 100644
---- a/drivers/iommu/iommufd/pages.c
-+++ b/drivers/iommu/iommufd/pages.c
-@@ -1507,6 +1507,8 @@ void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages)
- area, domain, iopt_area_index(area),
- iopt_area_last_index(area));
-
-+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
-+ WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb));
- interval_tree_remove(&area->pages_node, &pages->domains_itree);
- iopt_area_unfill_domain(area, pages, area->storage_domain);
- area->storage_domain = NULL;
-diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
-index 157b286e36bf3..35ba090f3b5e2 100644
---- a/drivers/iommu/of_iommu.c
-+++ b/drivers/iommu/of_iommu.c
-@@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
- const u32 *id)
- {
- const struct iommu_ops *ops = NULL;
-- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-+ struct iommu_fwspec *fwspec;
- int err = NO_IOMMU;
-
- if (!master_np)
- return NULL;
-
-+ /* Serialise to make dev->iommu stable under our potential fwspec */
-+ mutex_lock(&iommu_probe_device_lock);
-+ fwspec = dev_iommu_fwspec_get(dev);
- if (fwspec) {
-- if (fwspec->ops)
-+ if (fwspec->ops) {
-+ mutex_unlock(&iommu_probe_device_lock);
- return fwspec->ops;
--
-+ }
- /* In the deferred case, start again from scratch */
- iommu_fwspec_free(dev);
- }
-@@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
- fwspec = dev_iommu_fwspec_get(dev);
- ops = fwspec->ops;
- }
-+ mutex_unlock(&iommu_probe_device_lock);
-+
- /*
- * If we have reason to believe the IOMMU driver missed the initial
- * probe for dev, replay it to get things in order.
-@@ -191,7 +197,7 @@ iommu_resv_region_get_type(struct device *dev,
- if (start == phys->start && end == phys->end)
- return IOMMU_RESV_DIRECT;
-
-- dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
-+ dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
- &start, &end);
- return IOMMU_RESV_RESERVED;
- }
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
-index a8c89df1a9978..9a7a74239eabb 100644
---- a/drivers/irqchip/irq-gic-v3-its.c
-+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -2379,12 +2379,12 @@ retry_baser:
- break;
- }
-
-+ if (!shr)
-+ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
-+
- its_write_baser(its, baser, val);
- tmp = baser->val;
-
-- if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
-- tmp &= ~GITS_BASER_SHAREABILITY_MASK;
--
- if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
- /*
- * Shareability didn't stick. Just use
-@@ -2394,10 +2394,9 @@ retry_baser:
- * non-cacheable as well.
- */
- shr = tmp & GITS_BASER_SHAREABILITY_MASK;
-- if (!shr) {
-+ if (!shr)
- cache = GITS_BASER_nC;
-- gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
-- }
-+
- goto retry_baser;
- }
-
-@@ -2609,6 +2608,11 @@ static int its_alloc_tables(struct its_node *its)
- /* erratum 24313: ignore memory access type */
- cache = GITS_BASER_nCnB;
-
-+ if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
-+ cache = GITS_BASER_nC;
-+ shr = 0;
-+ }
-+
- for (i = 0; i < GITS_BASER_NR_REGS; i++) {
- struct its_baser *baser = its->tables + i;
- u64 val = its_read_baser(its, baser);
-diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
-index e1484905b7bdb..5b7bc4fd9517c 100644
---- a/drivers/irqchip/irq-sifive-plic.c
-+++ b/drivers/irqchip/irq-sifive-plic.c
-@@ -532,17 +532,18 @@ done:
- }
-
- /*
-- * We can have multiple PLIC instances so setup cpuhp state only
-- * when context handler for current/boot CPU is present.
-+ * We can have multiple PLIC instances so setup cpuhp state
-+ * and register syscore operations only when context handler
-+ * for current/boot CPU is present.
- */
- handler = this_cpu_ptr(&plic_handlers);
- if (handler->present && !plic_cpuhp_setup_done) {
- cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
- "irqchip/sifive/plic:starting",
- plic_starting_cpu, plic_dying_cpu);
-+ register_syscore_ops(&plic_irq_syscore_ops);
- plic_cpuhp_setup_done = true;
- }
-- register_syscore_ops(&plic_irq_syscore_ops);
-
- pr_info("%pOFP: mapped %d interrupts with %d handlers for"
- " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
-diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
-index 974b84f6bd6af..ba1be15cfd8ea 100644
---- a/drivers/leds/led-class.c
-+++ b/drivers/leds/led-class.c
-@@ -75,19 +75,6 @@ static ssize_t max_brightness_show(struct device *dev,
- }
- static DEVICE_ATTR_RO(max_brightness);
-
--static ssize_t color_show(struct device *dev,
-- struct device_attribute *attr, char *buf)
--{
-- const char *color_text = "invalid";
-- struct led_classdev *led_cdev = dev_get_drvdata(dev);
--
-- if (led_cdev->color < LED_COLOR_ID_MAX)
-- color_text = led_colors[led_cdev->color];
--
-- return sysfs_emit(buf, "%s\n", color_text);
--}
--static DEVICE_ATTR_RO(color);
--
- #ifdef CONFIG_LEDS_TRIGGERS
- static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
- static struct bin_attribute *led_trigger_bin_attrs[] = {
-@@ -102,7 +89,6 @@ static const struct attribute_group led_trigger_group = {
- static struct attribute *led_class_attrs[] = {
- &dev_attr_brightness.attr,
- &dev_attr_max_brightness.attr,
-- &dev_attr_color.attr,
- NULL,
- };
-
-diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
-index 419b710984ab6..2b3bf1353b707 100644
---- a/drivers/leds/leds-pwm.c
-+++ b/drivers/leds/leds-pwm.c
-@@ -53,7 +53,7 @@ static int led_pwm_set(struct led_classdev *led_cdev,
- duty = led_dat->pwmstate.period - duty;
-
- led_dat->pwmstate.duty_cycle = duty;
-- led_dat->pwmstate.enabled = duty > 0;
-+ led_dat->pwmstate.enabled = true;
- return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
- }
-
-diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
-index b8a95a917cfa4..b13a547e72c49 100644
---- a/drivers/leds/leds-turris-omnia.c
-+++ b/drivers/leds/leds-turris-omnia.c
-@@ -2,7 +2,7 @@
- /*
- * CZ.NIC's Turris Omnia LEDs driver
- *
-- * 2020 by Marek Behún <kabel@kernel.org>
-+ * 2020, 2023 by Marek Behún <kabel@kernel.org>
- */
-
- #include <linux/i2c.h>
-@@ -41,6 +41,37 @@ struct omnia_leds {
- struct omnia_led leds[];
- };
-
-+static int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd, u8 val)
-+{
-+ u8 buf[2] = { cmd, val };
-+
-+ return i2c_master_send(client, buf, sizeof(buf));
-+}
-+
-+static int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd)
-+{
-+ struct i2c_msg msgs[2];
-+ u8 reply;
-+ int ret;
-+
-+ msgs[0].addr = client->addr;
-+ msgs[0].flags = 0;
-+ msgs[0].len = 1;
-+ msgs[0].buf = &cmd;
-+ msgs[1].addr = client->addr;
-+ msgs[1].flags = I2C_M_RD;
-+ msgs[1].len = 1;
-+ msgs[1].buf = &reply;
-+
-+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-+ if (likely(ret == ARRAY_SIZE(msgs)))
-+ return reply;
-+ else if (ret < 0)
-+ return ret;
-+ else
-+ return -EIO;
-+}
-+
- static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
- enum led_brightness brightness)
- {
-@@ -64,7 +95,7 @@ static int omnia_led_brightness_set_blocking(struct led_classdev *cdev,
- if (buf[2] || buf[3] || buf[4])
- state |= CMD_LED_STATE_ON;
-
-- ret = i2c_smbus_write_byte_data(leds->client, CMD_LED_STATE, state);
-+ ret = omnia_cmd_write_u8(leds->client, CMD_LED_STATE, state);
- if (ret >= 0 && (state & CMD_LED_STATE_ON))
- ret = i2c_master_send(leds->client, buf, 5);
-
-@@ -114,9 +145,9 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
- cdev->brightness_set_blocking = omnia_led_brightness_set_blocking;
-
- /* put the LED into software mode */
-- ret = i2c_smbus_write_byte_data(client, CMD_LED_MODE,
-- CMD_LED_MODE_LED(led->reg) |
-- CMD_LED_MODE_USER);
-+ ret = omnia_cmd_write_u8(client, CMD_LED_MODE,
-+ CMD_LED_MODE_LED(led->reg) |
-+ CMD_LED_MODE_USER);
- if (ret < 0) {
- dev_err(dev, "Cannot set LED %pOF to software mode: %i\n", np,
- ret);
-@@ -124,8 +155,8 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
- }
-
- /* disable the LED */
-- ret = i2c_smbus_write_byte_data(client, CMD_LED_STATE,
-- CMD_LED_STATE_LED(led->reg));
-+ ret = omnia_cmd_write_u8(client, CMD_LED_STATE,
-+ CMD_LED_STATE_LED(led->reg));
- if (ret < 0) {
- dev_err(dev, "Cannot set LED %pOF brightness: %i\n", np, ret);
- return ret;
-@@ -158,7 +189,7 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
- struct i2c_client *client = to_i2c_client(dev);
- int ret;
-
-- ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS);
-+ ret = omnia_cmd_read_u8(client, CMD_LED_GET_BRIGHTNESS);
-
- if (ret < 0)
- return ret;
-@@ -179,8 +210,7 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
- if (brightness > 100)
- return -EINVAL;
-
-- ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
-- (u8)brightness);
-+ ret = omnia_cmd_write_u8(client, CMD_LED_SET_BRIGHTNESS, brightness);
-
- return ret < 0 ? ret : count;
- }
-@@ -237,8 +267,8 @@ static void omnia_leds_remove(struct i2c_client *client)
- u8 buf[5];
-
- /* put all LEDs into default (HW triggered) mode */
-- i2c_smbus_write_byte_data(client, CMD_LED_MODE,
-- CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
-+ omnia_cmd_write_u8(client, CMD_LED_MODE,
-+ CMD_LED_MODE_LED(OMNIA_BOARD_LEDS));
-
- /* set all LEDs color to [255, 255, 255] */
- buf[0] = CMD_LED_COLOR;
-diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
-index 8af4f9bb9cde8..05848a2fecff6 100644
---- a/drivers/leds/trigger/ledtrig-cpu.c
-+++ b/drivers/leds/trigger/ledtrig-cpu.c
-@@ -130,7 +130,7 @@ static int ledtrig_prepare_down_cpu(unsigned int cpu)
-
- static int __init ledtrig_cpu_init(void)
- {
-- int cpu;
-+ unsigned int cpu;
- int ret;
-
- /* Supports up to 9999 cpu cores */
-@@ -152,7 +152,7 @@ static int __init ledtrig_cpu_init(void)
- if (cpu >= 8)
- continue;
-
-- snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
-+ snprintf(trig->name, MAX_NAME_LEN, "cpu%u", cpu);
-
- led_trigger_register_simple(trig->name, &trig->_trig);
- }
-diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
-index 58f3352539e8e..e358e77e4b38f 100644
---- a/drivers/leds/trigger/ledtrig-netdev.c
-+++ b/drivers/leds/trigger/ledtrig-netdev.c
-@@ -221,6 +221,9 @@ static ssize_t device_name_show(struct device *dev,
- static int set_device_name(struct led_netdev_data *trigger_data,
- const char *name, size_t size)
- {
-+ if (size >= IFNAMSIZ)
-+ return -EINVAL;
-+
- cancel_delayed_work_sync(&trigger_data->work);
-
- mutex_lock(&trigger_data->lock);
-@@ -263,9 +266,6 @@ static ssize_t device_name_store(struct device *dev,
- struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
- int ret;
-
-- if (size >= IFNAMSIZ)
-- return -EINVAL;
--
- ret = set_device_name(trigger_data, buf, size);
-
- if (ret < 0)
-diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
-index 0cac5bead84fa..d4eec09009809 100644
---- a/drivers/mcb/mcb-core.c
-+++ b/drivers/mcb/mcb-core.c
-@@ -246,6 +246,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
- return 0;
-
- out:
-+ put_device(&dev->dev);
-
- return ret;
- }
-diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
-index 656b6b71c7682..1ae37e693de04 100644
---- a/drivers/mcb/mcb-parse.c
-+++ b/drivers/mcb/mcb-parse.c
-@@ -106,7 +106,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
- return 0;
-
- err:
-- put_device(&mdev->dev);
-+ mcb_free_dev(mdev);
-
- return ret;
- }
-diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index fd121a61f17cc..3084c57248f69 100644
---- a/drivers/md/bcache/btree.c
-+++ b/drivers/md/bcache/btree.c
-@@ -1363,7 +1363,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
- memset(new_nodes, 0, sizeof(new_nodes));
- closure_init_stack(&cl);
-
-- while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
-+ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
- keys += r[nodes++].keys;
-
- blocks = btree_default_blocks(b->c) * 2 / 3;
-@@ -1510,7 +1510,7 @@ out_nocoalesce:
- bch_keylist_free(&keylist);
-
- for (i = 0; i < nodes; i++)
-- if (!IS_ERR(new_nodes[i])) {
-+ if (!IS_ERR_OR_NULL(new_nodes[i])) {
- btree_node_free(new_nodes[i]);
- rw_unlock(true, new_nodes[i]);
- }
-@@ -1527,6 +1527,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
- return 0;
-
- n = btree_node_alloc_replacement(replace, NULL);
-+ if (IS_ERR(n))
-+ return 0;
-
- /* recheck reserve after allocating replacement node */
- if (btree_check_reserve(b, NULL)) {
-diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
-index 0e2c1880f60b2..18ac98dc89223 100644
---- a/drivers/md/bcache/sysfs.c
-+++ b/drivers/md/bcache/sysfs.c
-@@ -1103,7 +1103,7 @@ SHOW(__bch_cache)
- sum += INITIAL_PRIO - cached[i];
-
- if (n)
-- do_div(sum, n);
-+ sum = div64_u64(sum, n);
-
- for (i = 0; i < ARRAY_SIZE(q); i++)
- q[i] = INITIAL_PRIO - cached[n * (i + 1) /
-diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
-index 24c049067f61a..d4432b3a6f96e 100644
---- a/drivers/md/bcache/writeback.c
-+++ b/drivers/md/bcache/writeback.c
-@@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
- void bch_sectors_dirty_init(struct bcache_device *d)
- {
- int i;
-+ struct btree *b = NULL;
- struct bkey *k = NULL;
- struct btree_iter iter;
- struct sectors_dirty_init op;
- struct cache_set *c = d->c;
- struct bch_dirty_init_state state;
-
-+retry_lock:
-+ b = c->root;
-+ rw_lock(0, b, b->level);
-+ if (b != c->root) {
-+ rw_unlock(0, b);
-+ goto retry_lock;
-+ }
-+
- /* Just count root keys if no leaf node */
-- rw_lock(0, c->root, c->root->level);
- if (c->root->level == 0) {
- bch_btree_op_init(&op.op, -1);
- op.inode = d->id;
- op.count = 0;
-
- for_each_key_filter(&c->root->keys,
-- k, &iter, bch_ptr_invalid)
-+ k, &iter, bch_ptr_invalid) {
-+ if (KEY_INODE(k) != op.inode)
-+ continue;
- sectors_dirty_init_fn(&op.op, c->root, k);
-+ }
-
-- rw_unlock(0, c->root);
-+ rw_unlock(0, b);
- return;
- }
-
-@@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
- if (atomic_read(&state.enough))
- break;
-
-+ atomic_inc(&state.started);
- state.infos[i].state = &state;
- state.infos[i].thread =
- kthread_run(bch_dirty_init_thread, &state.infos[i],
- "bch_dirtcnt[%d]", i);
- if (IS_ERR(state.infos[i].thread)) {
- pr_err("fails to run thread bch_dirty_init[%d]\n", i);
-+ atomic_dec(&state.started);
- for (--i; i >= 0; i--)
- kthread_stop(state.infos[i].thread);
- goto out;
- }
-- atomic_inc(&state.started);
- }
-
- out:
- /* Must wait for all threads to stop. */
- wait_event(state.wait, atomic_read(&state.started) == 0);
-- rw_unlock(0, c->root);
-+ rw_unlock(0, b);
- }
-
- void bch_cached_dev_writeback_init(struct cached_dev *dc)
-diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
-index bc309e41d074a..486e1180cc3a3 100644
---- a/drivers/md/dm-bufio.c
-+++ b/drivers/md/dm-bufio.c
-@@ -254,7 +254,7 @@ enum evict_result {
-
- typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
-
--static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context)
-+static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
- {
- unsigned long tested = 0;
- struct list_head *h = lru->cursor;
-@@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
-
- h = h->next;
-
-- cond_resched();
-+ if (!no_sleep)
-+ cond_resched();
- }
-
- return NULL;
-@@ -382,7 +383,10 @@ struct dm_buffer {
- */
-
- struct buffer_tree {
-- struct rw_semaphore lock;
-+ union {
-+ struct rw_semaphore lock;
-+ rwlock_t spinlock;
-+ } u;
- struct rb_root root;
- } ____cacheline_aligned_in_smp;
-
-@@ -393,9 +397,12 @@ struct dm_buffer_cache {
- * on the locks.
- */
- unsigned int num_locks;
-+ bool no_sleep;
- struct buffer_tree trees[];
- };
-
-+static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
-+
- static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
- {
- return dm_hash_locks_index(block, num_locks);
-@@ -403,22 +410,34 @@ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
-
- static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
- {
-- down_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
-+ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
-+ read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
-+ else
-+ down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
- }
-
- static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
- {
-- up_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
-+ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
-+ read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
-+ else
-+ up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
- }
-
- static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
- {
-- down_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
-+ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
-+ write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
-+ else
-+ down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
- }
-
- static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
- {
-- up_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
-+ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
-+ write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
-+ else
-+ up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
- }
-
- /*
-@@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
-
- static void __lh_lock(struct lock_history *lh, unsigned int index)
- {
-- if (lh->write)
-- down_write(&lh->cache->trees[index].lock);
-- else
-- down_read(&lh->cache->trees[index].lock);
-+ if (lh->write) {
-+ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
-+ write_lock_bh(&lh->cache->trees[index].u.spinlock);
-+ else
-+ down_write(&lh->cache->trees[index].u.lock);
-+ } else {
-+ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
-+ read_lock_bh(&lh->cache->trees[index].u.spinlock);
-+ else
-+ down_read(&lh->cache->trees[index].u.lock);
-+ }
- }
-
- static void __lh_unlock(struct lock_history *lh, unsigned int index)
- {
-- if (lh->write)
-- up_write(&lh->cache->trees[index].lock);
-- else
-- up_read(&lh->cache->trees[index].lock);
-+ if (lh->write) {
-+ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
-+ write_unlock_bh(&lh->cache->trees[index].u.spinlock);
-+ else
-+ up_write(&lh->cache->trees[index].u.lock);
-+ } else {
-+ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
-+ read_unlock_bh(&lh->cache->trees[index].u.spinlock);
-+ else
-+ up_read(&lh->cache->trees[index].u.lock);
-+ }
- }
-
- /*
-@@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
- return le_to_buffer(le);
- }
-
--static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks)
-+static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
- {
- unsigned int i;
-
- bc->num_locks = num_locks;
-+ bc->no_sleep = no_sleep;
-
- for (i = 0; i < bc->num_locks; i++) {
-- init_rwsem(&bc->trees[i].lock);
-+ if (no_sleep)
-+ rwlock_init(&bc->trees[i].u.spinlock);
-+ else
-+ init_rwsem(&bc->trees[i].u.lock);
- bc->trees[i].root = RB_ROOT;
- }
-
-@@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
- struct lru_entry *le;
- struct dm_buffer *b;
-
-- le = lru_evict(&bc->lru[list_mode], __evict_pred, &w);
-+ le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
- if (!le)
- return NULL;
-
-@@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
- struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
-
- while (true) {
-- le = lru_evict(&bc->lru[old_mode], __evict_pred, &w);
-+ le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
- if (!le)
- break;
-
-@@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
- {
- unsigned int i;
-
-+ BUG_ON(bc->no_sleep);
- for (i = 0; i < bc->num_locks; i++) {
-- down_write(&bc->trees[i].lock);
-+ down_write(&bc->trees[i].u.lock);
- __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
-- up_write(&bc->trees[i].lock);
-+ up_write(&bc->trees[i].u.lock);
- }
- }
-
-@@ -979,8 +1017,6 @@ struct dm_bufio_client {
- struct dm_buffer_cache cache; /* must be last member */
- };
-
--static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
--
- /*----------------------------------------------------------------*/
-
- #define dm_bufio_in_request() (!!current->bio_list)
-@@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
- if (need_submit)
- submit_io(b, REQ_OP_READ, read_endio);
-
-- wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
-+ if (nf != NF_GET) /* we already tested this condition above */
-+ wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
-
- if (b->read_error) {
- int error = blk_status_to_errno(b->read_error);
-@@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
- r = -ENOMEM;
- goto bad_client;
- }
-- cache_init(&c->cache, num_locks);
-+ cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
-
- c->bdev = bdev;
- c->block_size = block_size;
-diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
-index 5315fd261c23b..cef9353370b20 100644
---- a/drivers/md/dm-crypt.c
-+++ b/drivers/md/dm-crypt.c
-@@ -1699,11 +1699,17 @@ retry:
- order = min(order, remaining_order);
-
- while (order > 0) {
-+ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) +
-+ (1 << order) > dm_crypt_pages_per_client))
-+ goto decrease_order;
- pages = alloc_pages(gfp_mask
- | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
- order);
-- if (likely(pages != NULL))
-+ if (likely(pages != NULL)) {
-+ percpu_counter_add(&cc->n_allocated_pages, 1 << order);
- goto have_pages;
-+ }
-+decrease_order:
- order--;
- }
-
-@@ -1741,10 +1747,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
-
- if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
- bio_for_each_folio_all(fi, clone) {
-- if (folio_test_large(fi.folio))
-+ if (folio_test_large(fi.folio)) {
-+ percpu_counter_sub(&cc->n_allocated_pages,
-+ 1 << folio_order(fi.folio));
- folio_put(fi.folio);
-- else
-+ } else {
- mempool_free(&fi.folio->page, &cc->page_pool);
-+ }
- }
- }
- }
-diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
-index 7433525e59856..3726fae3006e3 100644
---- a/drivers/md/dm-delay.c
-+++ b/drivers/md/dm-delay.c
-@@ -31,7 +31,7 @@ struct delay_c {
- struct workqueue_struct *kdelayd_wq;
- struct work_struct flush_expired_bios;
- struct list_head delayed_bios;
-- atomic_t may_delay;
-+ bool may_delay;
-
- struct delay_class read;
- struct delay_class write;
-@@ -192,7 +192,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
- INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
- INIT_LIST_HEAD(&dc->delayed_bios);
- mutex_init(&dc->timer_lock);
-- atomic_set(&dc->may_delay, 1);
-+ dc->may_delay = true;
- dc->argc = argc;
-
- ret = delay_class_ctr(ti, &dc->read, argv);
-@@ -247,7 +247,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
- struct dm_delay_info *delayed;
- unsigned long expires = 0;
-
-- if (!c->delay || !atomic_read(&dc->may_delay))
-+ if (!c->delay)
- return DM_MAPIO_REMAPPED;
-
- delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
-@@ -256,6 +256,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
- delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
-
- mutex_lock(&delayed_bios_lock);
-+ if (unlikely(!dc->may_delay)) {
-+ mutex_unlock(&delayed_bios_lock);
-+ return DM_MAPIO_REMAPPED;
-+ }
- c->ops++;
- list_add_tail(&delayed->list, &dc->delayed_bios);
- mutex_unlock(&delayed_bios_lock);
-@@ -269,7 +273,10 @@ static void delay_presuspend(struct dm_target *ti)
- {
- struct delay_c *dc = ti->private;
-
-- atomic_set(&dc->may_delay, 0);
-+ mutex_lock(&delayed_bios_lock);
-+ dc->may_delay = false;
-+ mutex_unlock(&delayed_bios_lock);
-+
- del_timer_sync(&dc->delay_timer);
- flush_bios(flush_delayed_bios(dc, 1));
- }
-@@ -278,7 +285,7 @@ static void delay_resume(struct dm_target *ti)
- {
- struct delay_c *dc = ti->private;
-
-- atomic_set(&dc->may_delay, 1);
-+ dc->may_delay = true;
- }
-
- static int delay_map(struct dm_target *ti, struct bio *bio)
-diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
-index 3ef9f018da60c..b475200d8586a 100644
---- a/drivers/md/dm-verity-fec.c
-+++ b/drivers/md/dm-verity-fec.c
-@@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
- */
- static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
- {
-- return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
-+ return (struct dm_verity_fec_io *)
-+ ((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
- }
-
- /*
-@@ -185,7 +186,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
- {
- if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
- data, 1 << v->data_dev_block_bits,
-- verity_io_real_digest(v, io))))
-+ verity_io_real_digest(v, io), true)))
- return 0;
-
- return memcmp(verity_io_real_digest(v, io), want_digest,
-@@ -386,7 +387,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
- /* Always re-validate the corrected block against the expected hash */
- r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
- 1 << v->data_dev_block_bits,
-- verity_io_real_digest(v, io));
-+ verity_io_real_digest(v, io), true);
- if (unlikely(r < 0))
- return r;
-
-diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
-index 26adcfea03022..14e58ae705218 100644
---- a/drivers/md/dm-verity-target.c
-+++ b/drivers/md/dm-verity-target.c
-@@ -135,20 +135,21 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
- * Wrapper for crypto_ahash_init, which handles verity salting.
- */
- static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
-- struct crypto_wait *wait)
-+ struct crypto_wait *wait, bool may_sleep)
- {
- int r;
-
- ahash_request_set_tfm(req, v->tfm);
-- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
-- CRYPTO_TFM_REQ_MAY_BACKLOG,
-- crypto_req_done, (void *)wait);
-+ ahash_request_set_callback(req,
-+ may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
-+ crypto_req_done, (void *)wait);
- crypto_init_wait(wait);
-
- r = crypto_wait_req(crypto_ahash_init(req), wait);
-
- if (unlikely(r < 0)) {
-- DMERR("crypto_ahash_init failed: %d", r);
-+ if (r != -ENOMEM)
-+ DMERR("crypto_ahash_init failed: %d", r);
- return r;
- }
-
-@@ -179,12 +180,12 @@ out:
- }
-
- int verity_hash(struct dm_verity *v, struct ahash_request *req,
-- const u8 *data, size_t len, u8 *digest)
-+ const u8 *data, size_t len, u8 *digest, bool may_sleep)
- {
- int r;
- struct crypto_wait wait;
-
-- r = verity_hash_init(v, req, &wait);
-+ r = verity_hash_init(v, req, &wait, may_sleep);
- if (unlikely(r < 0))
- goto out;
-
-@@ -322,7 +323,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
-
- r = verity_hash(v, verity_io_hash_req(v, io),
- data, 1 << v->hash_dev_block_bits,
-- verity_io_real_digest(v, io));
-+ verity_io_real_digest(v, io), !io->in_tasklet);
- if (unlikely(r < 0))
- goto release_ret_r;
-
-@@ -556,7 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io)
- continue;
- }
-
-- r = verity_hash_init(v, req, &wait);
-+ r = verity_hash_init(v, req, &wait, !io->in_tasklet);
- if (unlikely(r < 0))
- return r;
-
-@@ -641,7 +642,6 @@ static void verity_work(struct work_struct *w)
-
- io->in_tasklet = false;
-
-- verity_fec_init_io(io);
- verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
- }
-
-@@ -652,7 +652,7 @@ static void verity_tasklet(unsigned long data)
-
- io->in_tasklet = true;
- err = verity_verify_io(io);
-- if (err == -EAGAIN) {
-+ if (err == -EAGAIN || err == -ENOMEM) {
- /* fallback to retrying with work-queue */
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
-@@ -667,7 +667,9 @@ static void verity_end_io(struct bio *bio)
- struct dm_verity_io *io = bio->bi_private;
-
- if (bio->bi_status &&
-- (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
-+ (!verity_fec_is_enabled(io->v) ||
-+ verity_is_system_shutting_down() ||
-+ (bio->bi_opf & REQ_RAHEAD))) {
- verity_finish_io(io, bio->bi_status);
- return;
- }
-@@ -791,6 +793,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
- bio->bi_private = io;
- io->iter = bio->bi_iter;
-
-+ verity_fec_init_io(io);
-+
- verity_submit_prefetch(v, io);
-
- submit_bio_noacct(bio);
-@@ -1033,7 +1037,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
- goto out;
-
- r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
-- v->zero_digest);
-+ v->zero_digest, true);
-
- out:
- kfree(req);
-diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
-index 2f555b4203679..f9d522c870e61 100644
---- a/drivers/md/dm-verity.h
-+++ b/drivers/md/dm-verity.h
-@@ -115,12 +115,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
- return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
- }
-
--static inline u8 *verity_io_digest_end(struct dm_verity *v,
-- struct dm_verity_io *io)
--{
-- return verity_io_want_digest(v, io) + v->digest_size;
--}
--
- extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
- struct bvec_iter *iter,
- int (*process)(struct dm_verity *v,
-@@ -128,7 +122,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
- u8 *data, size_t len));
-
- extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
-- const u8 *data, size_t len, u8 *digest);
-+ const u8 *data, size_t len, u8 *digest, bool may_sleep);
-
- extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
- sector_t block, u8 *digest, bool *is_zero);
-diff --git a/drivers/md/md.c b/drivers/md/md.c
-index a104a025084dc..2748b0b424cfe 100644
---- a/drivers/md/md.c
-+++ b/drivers/md/md.c
-@@ -449,7 +449,7 @@ void mddev_suspend(struct mddev *mddev)
- set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
- percpu_ref_kill(&mddev->active_io);
-
-- if (mddev->pers->prepare_suspend)
-+ if (mddev->pers && mddev->pers->prepare_suspend)
- mddev->pers->prepare_suspend(mddev);
-
- wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
-@@ -8669,7 +8669,8 @@ static void md_end_clone_io(struct bio *bio)
- struct bio *orig_bio = md_io_clone->orig_bio;
- struct mddev *mddev = md_io_clone->mddev;
-
-- orig_bio->bi_status = bio->bi_status;
-+ if (bio->bi_status && !orig_bio->bi_status)
-+ orig_bio->bi_status = bio->bi_status;
-
- if (md_io_clone->start_time)
- bio_end_io_acct(orig_bio, md_io_clone->start_time);
-diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile
-index 26d2bc7783944..a51e98ab4958d 100644
---- a/drivers/media/cec/platform/Makefile
-+++ b/drivers/media/cec/platform/Makefile
-@@ -6,7 +6,7 @@
- # Please keep it in alphabetic order
- obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
- obj-$(CONFIG_CEC_GPIO) += cec-gpio/
--obj-$(CONFIG_CEC_MESON_AO) += meson/
-+obj-y += meson/
- obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
- obj-$(CONFIG_CEC_SECO) += seco/
- obj-$(CONFIG_CEC_STI) += sti/
-diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
-index 74ff833ff48ca..53b443be5a59e 100644
---- a/drivers/media/i2c/Kconfig
-+++ b/drivers/media/i2c/Kconfig
-@@ -99,6 +99,7 @@ config VIDEO_IMX214
-
- config VIDEO_IMX219
- tristate "Sony IMX219 sensor support"
-+ select V4L2_CCI_I2C
- help
- This is a Video4Linux2 sensor driver for the Sony
- IMX219 camera.
-diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
-index 49e0d9a095302..6f8fbd82e21c8 100644
---- a/drivers/media/i2c/ccs/ccs-core.c
-+++ b/drivers/media/i2c/ccs/ccs-core.c
-@@ -3097,7 +3097,7 @@ static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
- try_fmt->code = sensor->internal_csi_format->code;
- try_fmt->field = V4L2_FIELD_NONE;
-
-- if (ssd != sensor->pixel_array)
-+ if (ssd == sensor->pixel_array)
- continue;
-
- try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
-diff --git a/drivers/media/i2c/ccs/ccs-quirk.h b/drivers/media/i2c/ccs/ccs-quirk.h
-index 5838fcda92fd4..0b1a64958d714 100644
---- a/drivers/media/i2c/ccs/ccs-quirk.h
-+++ b/drivers/media/i2c/ccs/ccs-quirk.h
-@@ -32,12 +32,10 @@ struct ccs_sensor;
- * @reg: Pointer to the register to access
- * @value: Register value, set by the caller on write, or
- * by the quirk on read
-- *
-- * @flags: Quirk flags
-- *
- * @return: 0 on success, -ENOIOCTLCMD if no register
- * access may be done by the caller (default read
- * value is zero), else negative error code on error
-+ * @flags: Quirk flags
- */
- struct ccs_quirk {
- int (*limits)(struct ccs_sensor *sensor);
-diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
-index ec53abe2e84e5..3afa3f79c8a26 100644
---- a/drivers/media/i2c/imx219.c
-+++ b/drivers/media/i2c/imx219.c
-@@ -21,40 +21,56 @@
- #include <linux/module.h>
- #include <linux/pm_runtime.h>
- #include <linux/regulator/consumer.h>
-+
-+#include <media/v4l2-cci.h>
- #include <media/v4l2-ctrls.h>
- #include <media/v4l2-device.h>
- #include <media/v4l2-event.h>
- #include <media/v4l2-fwnode.h>
- #include <media/v4l2-mediabus.h>
--#include <asm/unaligned.h>
-
--#define IMX219_REG_VALUE_08BIT 1
--#define IMX219_REG_VALUE_16BIT 2
-+/* Chip ID */
-+#define IMX219_REG_CHIP_ID CCI_REG16(0x0000)
-+#define IMX219_CHIP_ID 0x0219
-
--#define IMX219_REG_MODE_SELECT 0x0100
-+#define IMX219_REG_MODE_SELECT CCI_REG8(0x0100)
- #define IMX219_MODE_STANDBY 0x00
- #define IMX219_MODE_STREAMING 0x01
-
--/* Chip ID */
--#define IMX219_REG_CHIP_ID 0x0000
--#define IMX219_CHIP_ID 0x0219
-+#define IMX219_REG_CSI_LANE_MODE CCI_REG8(0x0114)
-+#define IMX219_CSI_2_LANE_MODE 0x01
-+#define IMX219_CSI_4_LANE_MODE 0x03
-
--/* External clock frequency is 24.0M */
--#define IMX219_XCLK_FREQ 24000000
-+#define IMX219_REG_DPHY_CTRL CCI_REG8(0x0128)
-+#define IMX219_DPHY_CTRL_TIMING_AUTO 0
-+#define IMX219_DPHY_CTRL_TIMING_MANUAL 1
-
--/* Pixel rate is fixed for all the modes */
--#define IMX219_PIXEL_RATE 182400000
--#define IMX219_PIXEL_RATE_4LANE 280800000
-+#define IMX219_REG_EXCK_FREQ CCI_REG16(0x012a)
-+#define IMX219_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */
-
--#define IMX219_DEFAULT_LINK_FREQ 456000000
--#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
-+/* Analog gain control */
-+#define IMX219_REG_ANALOG_GAIN CCI_REG8(0x0157)
-+#define IMX219_ANA_GAIN_MIN 0
-+#define IMX219_ANA_GAIN_MAX 232
-+#define IMX219_ANA_GAIN_STEP 1
-+#define IMX219_ANA_GAIN_DEFAULT 0x0
-
--#define IMX219_REG_CSI_LANE_MODE 0x0114
--#define IMX219_CSI_2_LANE_MODE 0x01
--#define IMX219_CSI_4_LANE_MODE 0x03
-+/* Digital gain control */
-+#define IMX219_REG_DIGITAL_GAIN CCI_REG16(0x0158)
-+#define IMX219_DGTL_GAIN_MIN 0x0100
-+#define IMX219_DGTL_GAIN_MAX 0x0fff
-+#define IMX219_DGTL_GAIN_DEFAULT 0x0100
-+#define IMX219_DGTL_GAIN_STEP 1
-+
-+/* Exposure control */
-+#define IMX219_REG_EXPOSURE CCI_REG16(0x015a)
-+#define IMX219_EXPOSURE_MIN 4
-+#define IMX219_EXPOSURE_STEP 1
-+#define IMX219_EXPOSURE_DEFAULT 0x640
-+#define IMX219_EXPOSURE_MAX 65535
-
- /* V_TIMING internal */
--#define IMX219_REG_VTS 0x0160
-+#define IMX219_REG_VTS CCI_REG16(0x0160)
- #define IMX219_VTS_15FPS 0x0dc6
- #define IMX219_VTS_30FPS_1080P 0x06e3
- #define IMX219_VTS_30FPS_BINNED 0x06e3
-@@ -72,37 +88,37 @@
- /* HBLANK control - read only */
- #define IMX219_PPL_DEFAULT 3448
-
--/* Exposure control */
--#define IMX219_REG_EXPOSURE 0x015a
--#define IMX219_EXPOSURE_MIN 4
--#define IMX219_EXPOSURE_STEP 1
--#define IMX219_EXPOSURE_DEFAULT 0x640
--#define IMX219_EXPOSURE_MAX 65535
--
--/* Analog gain control */
--#define IMX219_REG_ANALOG_GAIN 0x0157
--#define IMX219_ANA_GAIN_MIN 0
--#define IMX219_ANA_GAIN_MAX 232
--#define IMX219_ANA_GAIN_STEP 1
--#define IMX219_ANA_GAIN_DEFAULT 0x0
--
--/* Digital gain control */
--#define IMX219_REG_DIGITAL_GAIN 0x0158
--#define IMX219_DGTL_GAIN_MIN 0x0100
--#define IMX219_DGTL_GAIN_MAX 0x0fff
--#define IMX219_DGTL_GAIN_DEFAULT 0x0100
--#define IMX219_DGTL_GAIN_STEP 1
--
--#define IMX219_REG_ORIENTATION 0x0172
-+#define IMX219_REG_LINE_LENGTH_A CCI_REG16(0x0162)
-+#define IMX219_REG_X_ADD_STA_A CCI_REG16(0x0164)
-+#define IMX219_REG_X_ADD_END_A CCI_REG16(0x0166)
-+#define IMX219_REG_Y_ADD_STA_A CCI_REG16(0x0168)
-+#define IMX219_REG_Y_ADD_END_A CCI_REG16(0x016a)
-+#define IMX219_REG_X_OUTPUT_SIZE CCI_REG16(0x016c)
-+#define IMX219_REG_Y_OUTPUT_SIZE CCI_REG16(0x016e)
-+#define IMX219_REG_X_ODD_INC_A CCI_REG8(0x0170)
-+#define IMX219_REG_Y_ODD_INC_A CCI_REG8(0x0171)
-+#define IMX219_REG_ORIENTATION CCI_REG8(0x0172)
-
- /* Binning Mode */
--#define IMX219_REG_BINNING_MODE 0x0174
-+#define IMX219_REG_BINNING_MODE CCI_REG16(0x0174)
- #define IMX219_BINNING_NONE 0x0000
- #define IMX219_BINNING_2X2 0x0101
- #define IMX219_BINNING_2X2_ANALOG 0x0303
-
-+#define IMX219_REG_CSI_DATA_FORMAT_A CCI_REG16(0x018c)
-+
-+/* PLL Settings */
-+#define IMX219_REG_VTPXCK_DIV CCI_REG8(0x0301)
-+#define IMX219_REG_VTSYCK_DIV CCI_REG8(0x0303)
-+#define IMX219_REG_PREPLLCK_VT_DIV CCI_REG8(0x0304)
-+#define IMX219_REG_PREPLLCK_OP_DIV CCI_REG8(0x0305)
-+#define IMX219_REG_PLL_VT_MPY CCI_REG16(0x0306)
-+#define IMX219_REG_OPPXCK_DIV CCI_REG8(0x0309)
-+#define IMX219_REG_OPSYCK_DIV CCI_REG8(0x030b)
-+#define IMX219_REG_PLL_OP_MPY CCI_REG16(0x030c)
-+
- /* Test Pattern Control */
--#define IMX219_REG_TEST_PATTERN 0x0600
-+#define IMX219_REG_TEST_PATTERN CCI_REG16(0x0600)
- #define IMX219_TEST_PATTERN_DISABLE 0
- #define IMX219_TEST_PATTERN_SOLID_COLOR 1
- #define IMX219_TEST_PATTERN_COLOR_BARS 2
-@@ -110,10 +126,10 @@
- #define IMX219_TEST_PATTERN_PN9 4
-
- /* Test pattern colour components */
--#define IMX219_REG_TESTP_RED 0x0602
--#define IMX219_REG_TESTP_GREENR 0x0604
--#define IMX219_REG_TESTP_BLUE 0x0606
--#define IMX219_REG_TESTP_GREENB 0x0608
-+#define IMX219_REG_TESTP_RED CCI_REG16(0x0602)
-+#define IMX219_REG_TESTP_GREENR CCI_REG16(0x0604)
-+#define IMX219_REG_TESTP_BLUE CCI_REG16(0x0606)
-+#define IMX219_REG_TESTP_GREENB CCI_REG16(0x0608)
- #define IMX219_TESTP_COLOUR_MIN 0
- #define IMX219_TESTP_COLOUR_MAX 0x03ff
- #define IMX219_TESTP_COLOUR_STEP 1
-@@ -122,6 +138,19 @@
- #define IMX219_TESTP_BLUE_DEFAULT 0
- #define IMX219_TESTP_GREENB_DEFAULT 0
-
-+#define IMX219_REG_TP_WINDOW_WIDTH CCI_REG16(0x0624)
-+#define IMX219_REG_TP_WINDOW_HEIGHT CCI_REG16(0x0626)
-+
-+/* External clock frequency is 24.0M */
-+#define IMX219_XCLK_FREQ 24000000
-+
-+/* Pixel rate is fixed for all the modes */
-+#define IMX219_PIXEL_RATE 182400000
-+#define IMX219_PIXEL_RATE_4LANE 280800000
-+
-+#define IMX219_DEFAULT_LINK_FREQ 456000000
-+#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
-+
- /* IMX219 native and active pixel array size. */
- #define IMX219_NATIVE_WIDTH 3296U
- #define IMX219_NATIVE_HEIGHT 2480U
-@@ -130,14 +159,9 @@
- #define IMX219_PIXEL_ARRAY_WIDTH 3280U
- #define IMX219_PIXEL_ARRAY_HEIGHT 2464U
-
--struct imx219_reg {
-- u16 address;
-- u8 val;
--};
--
- struct imx219_reg_list {
- unsigned int num_of_regs;
-- const struct imx219_reg *regs;
-+ const struct cci_reg_sequence *regs;
- };
-
- /* Mode : resolution and related config&values */
-@@ -160,53 +184,48 @@ struct imx219_mode {
- bool binning;
- };
-
--static const struct imx219_reg imx219_common_regs[] = {
-- {0x0100, 0x00}, /* Mode Select */
-+static const struct cci_reg_sequence imx219_common_regs[] = {
-+ { IMX219_REG_MODE_SELECT, 0x00 }, /* Mode Select */
-
- /* To Access Addresses 3000-5fff, send the following commands */
-- {0x30eb, 0x0c},
-- {0x30eb, 0x05},
-- {0x300a, 0xff},
-- {0x300b, 0xff},
-- {0x30eb, 0x05},
-- {0x30eb, 0x09},
-+ { CCI_REG8(0x30eb), 0x0c },
-+ { CCI_REG8(0x30eb), 0x05 },
-+ { CCI_REG8(0x300a), 0xff },
-+ { CCI_REG8(0x300b), 0xff },
-+ { CCI_REG8(0x30eb), 0x05 },
-+ { CCI_REG8(0x30eb), 0x09 },
-
- /* PLL Clock Table */
-- {0x0301, 0x05}, /* VTPXCK_DIV */
-- {0x0303, 0x01}, /* VTSYSCK_DIV */
-- {0x0304, 0x03}, /* PREPLLCK_VT_DIV 0x03 = AUTO set */
-- {0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */
-- {0x0306, 0x00}, /* PLL_VT_MPY */
-- {0x0307, 0x39},
-- {0x030b, 0x01}, /* OP_SYS_CLK_DIV */
-- {0x030c, 0x00}, /* PLL_OP_MPY */
-- {0x030d, 0x72},
-+ { IMX219_REG_VTPXCK_DIV, 5 },
-+ { IMX219_REG_VTSYCK_DIV, 1 },
-+ { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
-+ { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
-+ { IMX219_REG_PLL_VT_MPY, 57 },
-+ { IMX219_REG_OPSYCK_DIV, 1 },
-+ { IMX219_REG_PLL_OP_MPY, 114 },
-
- /* Undocumented registers */
-- {0x455e, 0x00},
-- {0x471e, 0x4b},
-- {0x4767, 0x0f},
-- {0x4750, 0x14},
-- {0x4540, 0x00},
-- {0x47b4, 0x14},
-- {0x4713, 0x30},
-- {0x478b, 0x10},
-- {0x478f, 0x10},
-- {0x4793, 0x10},
-- {0x4797, 0x0e},
-- {0x479b, 0x0e},
-+ { CCI_REG8(0x455e), 0x00 },
-+ { CCI_REG8(0x471e), 0x4b },
-+ { CCI_REG8(0x4767), 0x0f },
-+ { CCI_REG8(0x4750), 0x14 },
-+ { CCI_REG8(0x4540), 0x00 },
-+ { CCI_REG8(0x47b4), 0x14 },
-+ { CCI_REG8(0x4713), 0x30 },
-+ { CCI_REG8(0x478b), 0x10 },
-+ { CCI_REG8(0x478f), 0x10 },
-+ { CCI_REG8(0x4793), 0x10 },
-+ { CCI_REG8(0x4797), 0x0e },
-+ { CCI_REG8(0x479b), 0x0e },
-
- /* Frame Bank Register Group "A" */
-- {0x0162, 0x0d}, /* Line_Length_A */
-- {0x0163, 0x78},
-- {0x0170, 0x01}, /* X_ODD_INC_A */
-- {0x0171, 0x01}, /* Y_ODD_INC_A */
-+ { IMX219_REG_LINE_LENGTH_A, 3448 },
-+ { IMX219_REG_X_ODD_INC_A, 1 },
-+ { IMX219_REG_Y_ODD_INC_A, 1 },
-
- /* Output setup registers */
-- {0x0114, 0x01}, /* CSI 2-Lane Mode */
-- {0x0128, 0x00}, /* DPHY Auto Mode */
-- {0x012a, 0x18}, /* EXCK_Freq */
-- {0x012b, 0x00},
-+ { IMX219_REG_DPHY_CTRL, IMX219_DPHY_CTRL_TIMING_AUTO },
-+ { IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) },
- };
-
- /*
-@@ -214,92 +233,58 @@ static const struct imx219_reg imx219_common_regs[] = {
- * driver.
- * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
- */
--static const struct imx219_reg mode_3280x2464_regs[] = {
-- {0x0164, 0x00},
-- {0x0165, 0x00},
-- {0x0166, 0x0c},
-- {0x0167, 0xcf},
-- {0x0168, 0x00},
-- {0x0169, 0x00},
-- {0x016a, 0x09},
-- {0x016b, 0x9f},
-- {0x016c, 0x0c},
-- {0x016d, 0xd0},
-- {0x016e, 0x09},
-- {0x016f, 0xa0},
-- {0x0624, 0x0c},
-- {0x0625, 0xd0},
-- {0x0626, 0x09},
-- {0x0627, 0xa0},
-+static const struct cci_reg_sequence mode_3280x2464_regs[] = {
-+ { IMX219_REG_X_ADD_STA_A, 0 },
-+ { IMX219_REG_X_ADD_END_A, 3279 },
-+ { IMX219_REG_Y_ADD_STA_A, 0 },
-+ { IMX219_REG_Y_ADD_END_A, 2463 },
-+ { IMX219_REG_X_OUTPUT_SIZE, 3280 },
-+ { IMX219_REG_Y_OUTPUT_SIZE, 2464 },
-+ { IMX219_REG_TP_WINDOW_WIDTH, 3280 },
-+ { IMX219_REG_TP_WINDOW_HEIGHT, 2464 },
- };
-
--static const struct imx219_reg mode_1920_1080_regs[] = {
-- {0x0164, 0x02},
-- {0x0165, 0xa8},
-- {0x0166, 0x0a},
-- {0x0167, 0x27},
-- {0x0168, 0x02},
-- {0x0169, 0xb4},
-- {0x016a, 0x06},
-- {0x016b, 0xeb},
-- {0x016c, 0x07},
-- {0x016d, 0x80},
-- {0x016e, 0x04},
-- {0x016f, 0x38},
-- {0x0624, 0x07},
-- {0x0625, 0x80},
-- {0x0626, 0x04},
-- {0x0627, 0x38},
-+static const struct cci_reg_sequence mode_1920_1080_regs[] = {
-+ { IMX219_REG_X_ADD_STA_A, 680 },
-+ { IMX219_REG_X_ADD_END_A, 2599 },
-+ { IMX219_REG_Y_ADD_STA_A, 692 },
-+ { IMX219_REG_Y_ADD_END_A, 1771 },
-+ { IMX219_REG_X_OUTPUT_SIZE, 1920 },
-+ { IMX219_REG_Y_OUTPUT_SIZE, 1080 },
-+ { IMX219_REG_TP_WINDOW_WIDTH, 1920 },
-+ { IMX219_REG_TP_WINDOW_HEIGHT, 1080 },
- };
-
--static const struct imx219_reg mode_1640_1232_regs[] = {
-- {0x0164, 0x00},
-- {0x0165, 0x00},
-- {0x0166, 0x0c},
-- {0x0167, 0xcf},
-- {0x0168, 0x00},
-- {0x0169, 0x00},
-- {0x016a, 0x09},
-- {0x016b, 0x9f},
-- {0x016c, 0x06},
-- {0x016d, 0x68},
-- {0x016e, 0x04},
-- {0x016f, 0xd0},
-- {0x0624, 0x06},
-- {0x0625, 0x68},
-- {0x0626, 0x04},
-- {0x0627, 0xd0},
-+static const struct cci_reg_sequence mode_1640_1232_regs[] = {
-+ { IMX219_REG_X_ADD_STA_A, 0 },
-+ { IMX219_REG_X_ADD_END_A, 3279 },
-+ { IMX219_REG_Y_ADD_STA_A, 0 },
-+ { IMX219_REG_Y_ADD_END_A, 2463 },
-+ { IMX219_REG_X_OUTPUT_SIZE, 1640 },
-+ { IMX219_REG_Y_OUTPUT_SIZE, 1232 },
-+ { IMX219_REG_TP_WINDOW_WIDTH, 1640 },
-+ { IMX219_REG_TP_WINDOW_HEIGHT, 1232 },
- };
-
--static const struct imx219_reg mode_640_480_regs[] = {
-- {0x0164, 0x03},
-- {0x0165, 0xe8},
-- {0x0166, 0x08},
-- {0x0167, 0xe7},
-- {0x0168, 0x02},
-- {0x0169, 0xf0},
-- {0x016a, 0x06},
-- {0x016b, 0xaf},
-- {0x016c, 0x02},
-- {0x016d, 0x80},
-- {0x016e, 0x01},
-- {0x016f, 0xe0},
-- {0x0624, 0x06},
-- {0x0625, 0x68},
-- {0x0626, 0x04},
-- {0x0627, 0xd0},
-+static const struct cci_reg_sequence mode_640_480_regs[] = {
-+ { IMX219_REG_X_ADD_STA_A, 1000 },
-+ { IMX219_REG_X_ADD_END_A, 2279 },
-+ { IMX219_REG_Y_ADD_STA_A, 752 },
-+ { IMX219_REG_Y_ADD_END_A, 1711 },
-+ { IMX219_REG_X_OUTPUT_SIZE, 640 },
-+ { IMX219_REG_Y_OUTPUT_SIZE, 480 },
-+ { IMX219_REG_TP_WINDOW_WIDTH, 1640 },
-+ { IMX219_REG_TP_WINDOW_HEIGHT, 1232 },
- };
-
--static const struct imx219_reg raw8_framefmt_regs[] = {
-- {0x018c, 0x08},
-- {0x018d, 0x08},
-- {0x0309, 0x08},
-+static const struct cci_reg_sequence raw8_framefmt_regs[] = {
-+ { IMX219_REG_CSI_DATA_FORMAT_A, 0x0808 },
-+ { IMX219_REG_OPPXCK_DIV, 8 },
- };
-
--static const struct imx219_reg raw10_framefmt_regs[] = {
-- {0x018c, 0x0a},
-- {0x018d, 0x0a},
-- {0x0309, 0x0a},
-+static const struct cci_reg_sequence raw10_framefmt_regs[] = {
-+ { IMX219_REG_CSI_DATA_FORMAT_A, 0x0a0a },
-+ { IMX219_REG_OPPXCK_DIV, 10 },
- };
-
- static const s64 imx219_link_freq_menu[] = {
-@@ -460,6 +445,7 @@ struct imx219 {
- struct v4l2_subdev sd;
- struct media_pad pad;
-
-+ struct regmap *regmap;
- struct clk *xclk; /* system clock to IMX219 */
- u32 xclk_freq;
-
-@@ -491,78 +477,6 @@ static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd)
- return container_of(_sd, struct imx219, sd);
- }
-
--/* Read registers up to 2 at a time */
--static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val)
--{
-- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-- struct i2c_msg msgs[2];
-- u8 addr_buf[2] = { reg >> 8, reg & 0xff };
-- u8 data_buf[4] = { 0, };
-- int ret;
--
-- if (len > 4)
-- return -EINVAL;
--
-- /* Write register address */
-- msgs[0].addr = client->addr;
-- msgs[0].flags = 0;
-- msgs[0].len = ARRAY_SIZE(addr_buf);
-- msgs[0].buf = addr_buf;
--
-- /* Read data from register */
-- msgs[1].addr = client->addr;
-- msgs[1].flags = I2C_M_RD;
-- msgs[1].len = len;
-- msgs[1].buf = &data_buf[4 - len];
--
-- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-- if (ret != ARRAY_SIZE(msgs))
-- return -EIO;
--
-- *val = get_unaligned_be32(data_buf);
--
-- return 0;
--}
--
--/* Write registers up to 2 at a time */
--static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val)
--{
-- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-- u8 buf[6];
--
-- if (len > 4)
-- return -EINVAL;
--
-- put_unaligned_be16(reg, buf);
-- put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
-- if (i2c_master_send(client, buf, len + 2) != len + 2)
-- return -EIO;
--
-- return 0;
--}
--
--/* Write a list of registers */
--static int imx219_write_regs(struct imx219 *imx219,
-- const struct imx219_reg *regs, u32 len)
--{
-- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-- unsigned int i;
-- int ret;
--
-- for (i = 0; i < len; i++) {
-- ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val);
-- if (ret) {
-- dev_err_ratelimited(&client->dev,
-- "Failed to write reg 0x%4.4x. error = %d\n",
-- regs[i].address, ret);
--
-- return ret;
-- }
-- }
--
-- return 0;
--}
--
- /* Get bayer order based on flip setting. */
- static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
- {
-@@ -586,7 +500,7 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
- struct imx219 *imx219 =
- container_of(ctrl->handler, struct imx219, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-- int ret;
-+ int ret = 0;
-
- if (ctrl->id == V4L2_CID_VBLANK) {
- int exposure_max, exposure_def;
-@@ -610,48 +524,45 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
-
- switch (ctrl->id) {
- case V4L2_CID_ANALOGUE_GAIN:
-- ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN,
-- IMX219_REG_VALUE_08BIT, ctrl->val);
-+ cci_write(imx219->regmap, IMX219_REG_ANALOG_GAIN,
-+ ctrl->val, &ret);
- break;
- case V4L2_CID_EXPOSURE:
-- ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE,
-- IMX219_REG_VALUE_16BIT, ctrl->val);
-+ cci_write(imx219->regmap, IMX219_REG_EXPOSURE,
-+ ctrl->val, &ret);
- break;
- case V4L2_CID_DIGITAL_GAIN:
-- ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN,
-- IMX219_REG_VALUE_16BIT, ctrl->val);
-+ cci_write(imx219->regmap, IMX219_REG_DIGITAL_GAIN,
-+ ctrl->val, &ret);
- break;
- case V4L2_CID_TEST_PATTERN:
-- ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN,
-- IMX219_REG_VALUE_16BIT,
-- imx219_test_pattern_val[ctrl->val]);
-+ cci_write(imx219->regmap, IMX219_REG_TEST_PATTERN,
-+ imx219_test_pattern_val[ctrl->val], &ret);
- break;
- case V4L2_CID_HFLIP:
- case V4L2_CID_VFLIP:
-- ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1,
-- imx219->hflip->val |
-- imx219->vflip->val << 1);
-+ cci_write(imx219->regmap, IMX219_REG_ORIENTATION,
-+ imx219->hflip->val | imx219->vflip->val << 1, &ret);
- break;
- case V4L2_CID_VBLANK:
-- ret = imx219_write_reg(imx219, IMX219_REG_VTS,
-- IMX219_REG_VALUE_16BIT,
-- imx219->mode->height + ctrl->val);
-+ cci_write(imx219->regmap, IMX219_REG_VTS,
-+ imx219->mode->height + ctrl->val, &ret);
- break;
- case V4L2_CID_TEST_PATTERN_RED:
-- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED,
-- IMX219_REG_VALUE_16BIT, ctrl->val);
-+ cci_write(imx219->regmap, IMX219_REG_TESTP_RED,
-+ ctrl->val, &ret);
- break;
- case V4L2_CID_TEST_PATTERN_GREENR:
-- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR,
-- IMX219_REG_VALUE_16BIT, ctrl->val);
-+ cci_write(imx219->regmap, IMX219_REG_TESTP_GREENR,
-+ ctrl->val, &ret);
- break;
- case V4L2_CID_TEST_PATTERN_BLUE:
-- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE,
-- IMX219_REG_VALUE_16BIT, ctrl->val);
-+ cci_write(imx219->regmap, IMX219_REG_TESTP_BLUE,
-+ ctrl->val, &ret);
- break;
- case V4L2_CID_TEST_PATTERN_GREENB:
-- ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB,
-- IMX219_REG_VALUE_16BIT, ctrl->val);
-+ cci_write(imx219->regmap, IMX219_REG_TESTP_GREENB,
-+ ctrl->val, &ret);
- break;
- default:
- dev_info(&client->dev,
-@@ -802,15 +713,15 @@ static int imx219_set_framefmt(struct imx219 *imx219,
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- case MEDIA_BUS_FMT_SGBRG8_1X8:
- case MEDIA_BUS_FMT_SBGGR8_1X8:
-- return imx219_write_regs(imx219, raw8_framefmt_regs,
-- ARRAY_SIZE(raw8_framefmt_regs));
-+ return cci_multi_reg_write(imx219->regmap, raw8_framefmt_regs,
-+ ARRAY_SIZE(raw8_framefmt_regs), NULL);
-
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- case MEDIA_BUS_FMT_SBGGR10_1X10:
-- return imx219_write_regs(imx219, raw10_framefmt_regs,
-- ARRAY_SIZE(raw10_framefmt_regs));
-+ return cci_multi_reg_write(imx219->regmap, raw10_framefmt_regs,
-+ ARRAY_SIZE(raw10_framefmt_regs), NULL);
- }
-
- return -EINVAL;
-@@ -819,28 +730,24 @@ static int imx219_set_framefmt(struct imx219 *imx219,
- static int imx219_set_binning(struct imx219 *imx219,
- const struct v4l2_mbus_framefmt *format)
- {
-- if (!imx219->mode->binning) {
-- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
-- IMX219_REG_VALUE_16BIT,
-- IMX219_BINNING_NONE);
-- }
-+ if (!imx219->mode->binning)
-+ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
-+ IMX219_BINNING_NONE, NULL);
-
- switch (format->code) {
- case MEDIA_BUS_FMT_SRGGB8_1X8:
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- case MEDIA_BUS_FMT_SGBRG8_1X8:
- case MEDIA_BUS_FMT_SBGGR8_1X8:
-- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
-- IMX219_REG_VALUE_16BIT,
-- IMX219_BINNING_2X2_ANALOG);
-+ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
-+ IMX219_BINNING_2X2_ANALOG, NULL);
-
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- case MEDIA_BUS_FMT_SBGGR10_1X10:
-- return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
-- IMX219_REG_VALUE_16BIT,
-- IMX219_BINNING_2X2);
-+ return cci_write(imx219->regmap, IMX219_REG_BINNING_MODE,
-+ IMX219_BINNING_2X2, NULL);
- }
-
- return -EINVAL;
-@@ -879,9 +786,9 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
-
- static int imx219_configure_lanes(struct imx219 *imx219)
- {
-- return imx219_write_reg(imx219, IMX219_REG_CSI_LANE_MODE,
-- IMX219_REG_VALUE_08BIT, (imx219->lanes == 2) ?
-- IMX219_CSI_2_LANE_MODE : IMX219_CSI_4_LANE_MODE);
-+ return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE,
-+ imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE :
-+ IMX219_CSI_4_LANE_MODE, NULL);
- };
-
- static int imx219_start_streaming(struct imx219 *imx219,
-@@ -897,7 +804,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
- return ret;
-
- /* Send all registers that are common to all modes */
-- ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs));
-+ ret = cci_multi_reg_write(imx219->regmap, imx219_common_regs,
-+ ARRAY_SIZE(imx219_common_regs), NULL);
- if (ret) {
- dev_err(&client->dev, "%s failed to send mfg header\n", __func__);
- goto err_rpm_put;
-@@ -912,7 +820,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
-
- /* Apply default values of current mode */
- reg_list = &imx219->mode->reg_list;
-- ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
-+ ret = cci_multi_reg_write(imx219->regmap, reg_list->regs,
-+ reg_list->num_of_regs, NULL);
- if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
- goto err_rpm_put;
-@@ -939,8 +848,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
- goto err_rpm_put;
-
- /* set stream on register */
-- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
-+ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
-+ IMX219_MODE_STREAMING, NULL);
- if (ret)
- goto err_rpm_put;
-
-@@ -961,8 +870,8 @@ static void imx219_stop_streaming(struct imx219 *imx219)
- int ret;
-
- /* set stream off register */
-- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-- IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
-+ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
-+ IMX219_MODE_STANDBY, NULL);
- if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
-
-@@ -1101,10 +1010,9 @@ static int imx219_identify_module(struct imx219 *imx219)
- {
- struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
- int ret;
-- u32 val;
-+ u64 val;
-
-- ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID,
-- IMX219_REG_VALUE_16BIT, &val);
-+ ret = cci_read(imx219->regmap, IMX219_REG_CHIP_ID, &val, NULL);
- if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
- IMX219_CHIP_ID);
-@@ -1112,7 +1020,7 @@ static int imx219_identify_module(struct imx219 *imx219)
- }
-
- if (val != IMX219_CHIP_ID) {
-- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
-+ dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
- IMX219_CHIP_ID, val);
- return -EIO;
- }
-@@ -1336,6 +1244,13 @@ static int imx219_probe(struct i2c_client *client)
- if (imx219_check_hwcfg(dev, imx219))
- return -EINVAL;
-
-+ imx219->regmap = devm_cci_regmap_init_i2c(client, 16);
-+ if (IS_ERR(imx219->regmap)) {
-+ ret = PTR_ERR(imx219->regmap);
-+ dev_err(dev, "failed to initialize CCI: %d\n", ret);
-+ return ret;
-+ }
-+
- /* Get system clock (xclk) */
- imx219->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(imx219->xclk)) {
-@@ -1379,17 +1294,19 @@ static int imx219_probe(struct i2c_client *client)
- * streaming is started, so upon power up switch the modes to:
- * streaming -> standby
- */
-- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
-+ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
-+ IMX219_MODE_STREAMING, NULL);
- if (ret < 0)
- goto error_power_off;
-+
- usleep_range(100, 110);
-
- /* put sensor back to standby mode */
-- ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-- IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
-+ ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
-+ IMX219_MODE_STANDBY, NULL);
- if (ret < 0)
- goto error_power_off;
-+
- usleep_range(100, 110);
-
- ret = imx219_init_controls(imx219);
-diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
-index be84ff1e2b170..fc1cf196ef015 100644
---- a/drivers/media/i2c/max9286.c
-+++ b/drivers/media/i2c/max9286.c
-@@ -1449,7 +1449,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
-
- i2c_mux_mask |= BIT(id);
- }
-- of_node_put(node);
- of_node_put(i2c_mux);
-
- /* Parse the endpoints */
-@@ -1513,7 +1512,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
- priv->source_mask |= BIT(ep.port);
- priv->nsources++;
- }
-- of_node_put(node);
-
- of_property_read_u32(dev->of_node, "maxim,bus-width", &priv->bus_width);
- switch (priv->bus_width) {
-diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
-index dbc642c5995b6..8ebdb32dd3dbc 100644
---- a/drivers/media/i2c/ov13b10.c
-+++ b/drivers/media/i2c/ov13b10.c
-@@ -1501,7 +1501,7 @@ static int ov13b10_probe(struct i2c_client *client)
-
- full_power = acpi_dev_state_d0(&client->dev);
- if (full_power) {
-- ov13b10_power_on(&client->dev);
-+ ret = ov13b10_power_on(&client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to power on\n");
- return ret;
-diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
-index 5fe85aa2d2ec4..40532f7bcabea 100644
---- a/drivers/media/i2c/ov5640.c
-+++ b/drivers/media/i2c/ov5640.c
-@@ -2850,12 +2850,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
- return 0;
- }
-
-+static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank)
-+{
-+ const struct ov5640_mode_info *mode = sensor->current_mode;
-+
-+ __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
-+ OV5640_MAX_VTS - mode->height, 1, vblank);
-+
-+ __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
-+}
-+
- static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
- {
- const struct ov5640_mode_info *mode = sensor->current_mode;
- enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
- struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
-- const struct ov5640_timings *timings;
-+ const struct ov5640_timings *timings = ov5640_timings(sensor, mode);
- s32 exposure_val, exposure_max;
- unsigned int hblank;
- unsigned int i = 0;
-@@ -2874,6 +2884,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
- __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
- ov5640_calc_pixel_rate(sensor));
-
-+ __v4l2_ctrl_vblank_update(sensor, timings->vblank_def);
-+
- return 0;
- }
-
-@@ -2916,15 +2928,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
- __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
- __v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
-
-- timings = ov5640_timings(sensor, mode);
- hblank = timings->htot - mode->width;
- __v4l2_ctrl_modify_range(sensor->ctrls.hblank,
- hblank, hblank, 1, hblank);
-
- vblank = timings->vblank_def;
-- __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
-- OV5640_MAX_VTS - mode->height, 1, vblank);
-- __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
-+ __v4l2_ctrl_vblank_update(sensor, vblank);
-
- exposure_max = timings->crop.height + vblank - 4;
- exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
-@@ -3919,7 +3928,7 @@ static int ov5640_probe(struct i2c_client *client)
- ret = ov5640_sensor_resume(dev);
- if (ret) {
- dev_err(dev, "failed to power on\n");
-- goto entity_cleanup;
-+ goto free_ctrls;
- }
-
- pm_runtime_set_active(dev);
-@@ -3944,8 +3953,9 @@ static int ov5640_probe(struct i2c_client *client)
- err_pm_runtime:
- pm_runtime_put_noidle(dev);
- pm_runtime_disable(dev);
-- v4l2_ctrl_handler_free(&sensor->ctrls.handler);
- ov5640_sensor_suspend(dev);
-+free_ctrls:
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
- entity_cleanup:
- media_entity_cleanup(&sensor->sd.entity);
- mutex_destroy(&sensor->lock);
-diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
-index aa708a0e5eac6..09a193bb87df3 100644
---- a/drivers/media/pci/bt8xx/bttv-driver.c
-+++ b/drivers/media/pci/bt8xx/bttv-driver.c
-@@ -3474,6 +3474,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
-
- /* free resources */
- free_irq(btv->c.pci->irq,btv);
-+ del_timer_sync(&btv->timeout);
- iounmap(btv->bt848_mmio);
- release_mem_region(pci_resource_start(btv->c.pci,0),
- pci_resource_len(btv->c.pci,0));
-diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
-index 74edcc76d12f4..6e1a0614e6d06 100644
---- a/drivers/media/pci/cobalt/cobalt-driver.c
-+++ b/drivers/media/pci/cobalt/cobalt-driver.c
-@@ -8,6 +8,7 @@
- * All rights reserved.
- */
-
-+#include <linux/bitfield.h>
- #include <linux/delay.h>
- #include <media/i2c/adv7604.h>
- #include <media/i2c/adv7842.h>
-@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
- pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
- cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
- capa, get_link_speed(capa),
-- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
-+ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
- cobalt_info("PCIe link control 0x%04x\n", ctrl);
- cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
- stat, get_link_speed(stat),
-- (stat & PCI_EXP_LNKSTA_NLW) >> 4);
-+ FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
-
- /* Bus */
- pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
- cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
- capa, get_link_speed(capa),
-- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
-+ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
-
- /* Slot */
- pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
-@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
- if (!pci_is_pcie(pci_dev))
- return 0;
- pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
-- return (link & PCI_EXP_LNKSTA_NLW) >> 4;
-+ return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
- }
-
- static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
-@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
- if (!pci_is_pcie(pci_dev))
- return 0;
- pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
-- return (link & PCI_EXP_LNKCAP_MLW) >> 4;
-+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
- }
-
- static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
-diff --git a/drivers/media/platform/amphion/vpu_defs.h b/drivers/media/platform/amphion/vpu_defs.h
-index 667637eedb5d4..7320852668d64 100644
---- a/drivers/media/platform/amphion/vpu_defs.h
-+++ b/drivers/media/platform/amphion/vpu_defs.h
-@@ -71,6 +71,7 @@ enum {
- VPU_MSG_ID_TIMESTAMP_INFO,
- VPU_MSG_ID_FIRMWARE_XCPT,
- VPU_MSG_ID_PIC_SKIPPED,
-+ VPU_MSG_ID_DBG_MSG,
- };
-
- enum VPU_ENC_MEMORY_RESOURSE {
-diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
-index af3b336e5dc32..d12310af9ebce 100644
---- a/drivers/media/platform/amphion/vpu_helpers.c
-+++ b/drivers/media/platform/amphion/vpu_helpers.c
-@@ -489,6 +489,7 @@ const char *vpu_id_name(u32 id)
- case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
- case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
- case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
-+ case VPU_MSG_ID_DBG_MSG: return "debug msg";
- }
- return "<unknown>";
- }
-diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
-index f771661980c01..d3425de7bccd3 100644
---- a/drivers/media/platform/amphion/vpu_malone.c
-+++ b/drivers/media/platform/amphion/vpu_malone.c
-@@ -745,6 +745,7 @@ static struct vpu_pair malone_msgs[] = {
- {VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
- {VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
- {VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED},
-+ {VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC},
- };
-
- static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
-diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
-index d0ead051f7d18..b74a407a19f22 100644
---- a/drivers/media/platform/amphion/vpu_msgs.c
-+++ b/drivers/media/platform/amphion/vpu_msgs.c
-@@ -23,6 +23,7 @@
- struct vpu_msg_handler {
- u32 id;
- void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
-+ u32 is_str;
- };
-
- static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
-@@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event
- {
- char *str = (char *)pkt->data;
-
-- if (strlen(str))
-+ if (*str)
- dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
- else
- dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
-@@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc
- vpu_inst_unlock(inst);
- }
-
-+static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
-+{
-+ char *str = (char *)pkt->data;
-+
-+ if (*str)
-+ dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
-+}
-+
-+static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
-+{
-+ if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
-+ pkt->hdr.num--;
-+ pkt->data[pkt->hdr.num] = 0;
-+}
-+
- static struct vpu_msg_handler handlers[] = {
- {VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
- {VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
-@@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = {
- {VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
- {VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
- {VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
-- {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
-- {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
-+ {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
-+ {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
- {VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
-+ {VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
- };
-
- static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
-@@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
- }
- }
-
-- if (handler && handler->done)
-- handler->done(inst, msg);
-+ if (handler) {
-+ if (handler->is_str)
-+ vpu_terminate_string_msg(msg);
-+ if (handler->done)
-+ handler->done(inst, msg);
-+ }
-
- vpu_response_cmd(inst, msg_id, 1);
-
-diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
-index 0d879d71d8185..9231ee7e9b3a9 100644
---- a/drivers/media/platform/cadence/cdns-csi2rx.c
-+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
-@@ -479,8 +479,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
- asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
- struct v4l2_async_connection);
- of_node_put(ep);
-- if (IS_ERR(asd))
-+ if (IS_ERR(asd)) {
-+ v4l2_async_nf_cleanup(&csi2rx->notifier);
- return PTR_ERR(asd);
-+ }
-
- csi2rx->notifier.ops = &csi2rx_notifier_ops;
-
-@@ -543,6 +545,7 @@ static int csi2rx_probe(struct platform_device *pdev)
- return 0;
-
- err_cleanup:
-+ v4l2_async_nf_unregister(&csi2rx->notifier);
- v4l2_async_nf_cleanup(&csi2rx->notifier);
- err_free_priv:
- kfree(csi2rx);
-@@ -553,6 +556,8 @@ static void csi2rx_remove(struct platform_device *pdev)
- {
- struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
-
-+ v4l2_async_nf_unregister(&csi2rx->notifier);
-+ v4l2_async_nf_cleanup(&csi2rx->notifier);
- v4l2_async_unregister_subdev(&csi2rx->subdev);
- kfree(csi2rx);
- }
-diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
-index 2bbc48c7402ca..f8fa3b841ccfb 100644
---- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
-+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
-@@ -127,6 +127,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
- u32 img_stride;
- u32 mem_stride;
- u32 i, enc_quality;
-+ u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality);
-
- value = width << 16 | height;
- writel(value, base + JPEG_ENC_IMG_SIZE);
-@@ -157,8 +158,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
- writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
- writel(mem_stride, base + JPEG_ENC_STRIDE);
-
-- enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
-- for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
-+ enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value;
-+ for (i = 0; i < nr_enc_quality; i++) {
- if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
- enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
- break;
-diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
-index 3177592490bee..6adac857a4779 100644
---- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
-+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
-@@ -261,11 +261,11 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
- const struct v4l2_rect *compose;
- u32 out = 0;
-
-+ ctx = &path->comps[index];
- if (CFG_CHECK(MT8183, p_id))
- out = CFG_COMP(MT8183, ctx->param, outputs[0]);
-
- compose = path->composes[out];
-- ctx = &path->comps[index];
- ret = call_op(ctx, config_frame, cmd, compose);
- if (ret)
- return ret;
-diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
-index 908602031fd0e..9ce34a3b5ee67 100644
---- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
-+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
-@@ -47,20 +47,32 @@ EXPORT_SYMBOL(mtk_vcodec_write_vdecsys);
-
- int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem)
- {
-+ enum mtk_instance_type inst_type = *((unsigned int *)priv);
-+ struct platform_device *plat_dev;
- unsigned long size = mem->size;
-- struct mtk_vcodec_dec_ctx *ctx = priv;
-- struct device *dev = &ctx->dev->plat_dev->dev;
-+ int id;
-
-- mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
-+ if (inst_type == MTK_INST_ENCODER) {
-+ struct mtk_vcodec_enc_ctx *enc_ctx = priv;
-+
-+ plat_dev = enc_ctx->dev->plat_dev;
-+ id = enc_ctx->id;
-+ } else {
-+ struct mtk_vcodec_dec_ctx *dec_ctx = priv;
-+
-+ plat_dev = dec_ctx->dev->plat_dev;
-+ id = dec_ctx->id;
-+ }
-+
-+ mem->va = dma_alloc_coherent(&plat_dev->dev, size, &mem->dma_addr, GFP_KERNEL);
- if (!mem->va) {
-- mtk_v4l2_vdec_err(ctx, "%s dma_alloc size=%ld failed!", dev_name(dev), size);
-+ mtk_v4l2_err(plat_dev, "%s dma_alloc size=%ld failed!",
-+ dev_name(&plat_dev->dev), size);
- return -ENOMEM;
- }
-
-- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
-- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
-- (unsigned long)mem->dma_addr);
-- mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
-+ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
-+ (unsigned long)mem->dma_addr, size);
-
- return 0;
- }
-@@ -68,21 +80,33 @@ EXPORT_SYMBOL(mtk_vcodec_mem_alloc);
-
- void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem)
- {
-+ enum mtk_instance_type inst_type = *((unsigned int *)priv);
-+ struct platform_device *plat_dev;
- unsigned long size = mem->size;
-- struct mtk_vcodec_dec_ctx *ctx = priv;
-- struct device *dev = &ctx->dev->plat_dev->dev;
-+ int id;
-+
-+ if (inst_type == MTK_INST_ENCODER) {
-+ struct mtk_vcodec_enc_ctx *enc_ctx = priv;
-+
-+ plat_dev = enc_ctx->dev->plat_dev;
-+ id = enc_ctx->id;
-+ } else {
-+ struct mtk_vcodec_dec_ctx *dec_ctx = priv;
-+
-+ plat_dev = dec_ctx->dev->plat_dev;
-+ id = dec_ctx->id;
-+ }
-
- if (!mem->va) {
-- mtk_v4l2_vdec_err(ctx, "%s dma_free size=%ld failed!", dev_name(dev), size);
-+ mtk_v4l2_err(plat_dev, "%s dma_free size=%ld failed!",
-+ dev_name(&plat_dev->dev), size);
- return;
- }
-
-- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
-- mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
-- (unsigned long)mem->dma_addr);
-- mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
-+ mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
-+ (unsigned long)mem->dma_addr, size);
-
-- dma_free_coherent(dev, size, mem->va, mem->dma_addr);
-+ dma_free_coherent(&plat_dev->dev, size, mem->va, mem->dma_addr);
- mem->va = NULL;
- mem->dma_addr = 0;
- mem->size = 0;
-diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
-index ae6290d28f8e9..84ad1cc6ad171 100644
---- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
-+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
-@@ -154,6 +154,11 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
- return -EINVAL;
- }
-
-+ if (IS_ERR_OR_NULL(vpu->vsi)) {
-+ mtk_venc_err(vpu->ctx, "invalid venc vsi");
-+ return -EINVAL;
-+ }
-+
- return 0;
- }
-
-diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
-index b7a720198ce57..0c8b204535ffc 100644
---- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
-+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
-@@ -1322,6 +1322,20 @@ static bool mxc_jpeg_compare_format(const struct mxc_jpeg_fmt *fmt1,
- return false;
- }
-
-+static void mxc_jpeg_set_last_buffer(struct mxc_jpeg_ctx *ctx)
-+{
-+ struct vb2_v4l2_buffer *next_dst_buf;
-+
-+ next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-+ if (!next_dst_buf) {
-+ ctx->fh.m2m_ctx->is_draining = true;
-+ ctx->fh.m2m_ctx->next_buf_last = true;
-+ return;
-+ }
-+
-+ v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, next_dst_buf);
-+}
-+
- static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
- struct mxc_jpeg_src_buf *jpeg_src_buf)
- {
-@@ -1334,7 +1348,8 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
- q_data_cap = mxc_jpeg_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if (mxc_jpeg_compare_format(q_data_cap->fmt, jpeg_src_buf->fmt))
- jpeg_src_buf->fmt = q_data_cap->fmt;
-- if (q_data_cap->fmt != jpeg_src_buf->fmt ||
-+ if (ctx->need_initial_source_change_evt ||
-+ q_data_cap->fmt != jpeg_src_buf->fmt ||
- q_data_cap->w != jpeg_src_buf->w ||
- q_data_cap->h != jpeg_src_buf->h) {
- dev_dbg(dev, "Detected jpeg res=(%dx%d)->(%dx%d), pixfmt=%c%c%c%c\n",
-@@ -1378,6 +1393,9 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
- mxc_jpeg_sizeimage(q_data_cap);
- notify_src_chg(ctx);
- ctx->source_change = 1;
-+ ctx->need_initial_source_change_evt = false;
-+ if (vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
-+ mxc_jpeg_set_last_buffer(ctx);
- }
-
- return ctx->source_change ? true : false;
-@@ -1595,6 +1613,9 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q,
- for (i = 0; i < *nplanes; i++)
- sizes[i] = mxc_jpeg_get_plane_size(q_data, i);
-
-+ if (V4L2_TYPE_IS_OUTPUT(q->type))
-+ ctx->need_initial_source_change_evt = true;
-+
- return 0;
- }
-
-@@ -1638,8 +1659,13 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
- }
-
-- if (V4L2_TYPE_IS_OUTPUT(q->type) || !ctx->source_change)
-- v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
-+ v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
-+ /* if V4L2_DEC_CMD_STOP is sent before the source change triggered,
-+ * restore the is_draining flag
-+ */
-+ if (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->source_change && ctx->fh.m2m_ctx->last_src_buf)
-+ ctx->fh.m2m_ctx->is_draining = true;
-+
- if (V4L2_TYPE_IS_OUTPUT(q->type) &&
- v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) {
- notify_eos(ctx);
-@@ -1916,7 +1942,7 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
- return -EINVAL;
- for (i = 0; i < q_data->fmt->mem_planes; i++) {
- sizeimage = mxc_jpeg_get_plane_size(q_data, i);
-- if (vb2_plane_size(vb, i) < sizeimage) {
-+ if (!ctx->source_change && vb2_plane_size(vb, i) < sizeimage) {
- dev_err(dev, "plane %d too small (%lu < %lu)",
- i, vb2_plane_size(vb, i), sizeimage);
- return -EINVAL;
-diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
-index d80e94cc9d992..dc4afeeff5b65 100644
---- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
-+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
-@@ -99,6 +99,7 @@ struct mxc_jpeg_ctx {
- enum mxc_jpeg_enc_state enc_state;
- int slot;
- unsigned int source_change;
-+ bool need_initial_source_change_evt;
- bool header_parsed;
- struct v4l2_ctrl_handler ctrl_handler;
- u8 jpeg_quality;
-diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
-index 0f8ac29d038db..23acc387be5f0 100644
---- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
-+++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
-@@ -355,9 +355,6 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
- u8 dt_id = vc;
-
- if (tg->enabled) {
-- /* Config Test Generator */
-- vc = 0xa;
--
- /* configure one DT, infinite frames */
- val = vc << TPG_VC_CFG0_VC_NUM;
- val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
-@@ -370,14 +367,14 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
-
- writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
-
-- val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
-- val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
-+ val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT;
-+ val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH;
- writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
-
- val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
- writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
-
-- val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
-+ val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE;
- val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
- val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
- writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
-@@ -449,6 +446,8 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
- writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
-
- val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
-+ if (vc > 3)
-+ val |= 1 << CSI2_RX_CFG1_VC_MODE;
- val |= 1 << CSI2_RX_CFG1_MISR_EN;
- writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
-
-diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
-index 04baa80494c66..4dba61b8d3f2a 100644
---- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
-+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
-@@ -476,7 +476,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
-
- settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
-
-- val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
-+ val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
- for (i = 0; i < c->num_data; i++)
- val |= BIT(c->data[i].pos * 2);
-
-diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
-index 02494c89da91c..168baaa80d4e6 100644
---- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
-+++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
-@@ -7,7 +7,6 @@
- * Copyright (C) 2020-2021 Linaro Ltd.
- */
-
--#include <linux/delay.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/iopoll.h>
-@@ -494,35 +493,20 @@ static int vfe_enable_output(struct vfe_line *line)
- return 0;
- }
-
--static int vfe_disable_output(struct vfe_line *line)
-+static void vfe_disable_output(struct vfe_line *line)
- {
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output = &line->output;
- unsigned long flags;
- unsigned int i;
-- bool done;
-- int timeout = 0;
--
-- do {
-- spin_lock_irqsave(&vfe->output_lock, flags);
-- done = !output->gen2.active_num;
-- spin_unlock_irqrestore(&vfe->output_lock, flags);
-- usleep_range(10000, 20000);
--
-- if (timeout++ == 100) {
-- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
-- vfe_reset(vfe);
-- output->gen2.active_num = 0;
-- return 0;
-- }
-- } while (!done);
-
- spin_lock_irqsave(&vfe->output_lock, flags);
- for (i = 0; i < output->wm_num; i++)
- vfe_wm_stop(vfe, output->wm_idx[i]);
-+ output->gen2.active_num = 0;
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
-- return 0;
-+ vfe_reset(vfe);
- }
-
- /*
-diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
-index f70aad2e8c237..8ddb8016434ae 100644
---- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
-+++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
-@@ -8,7 +8,6 @@
- * Copyright (C) 2021 Jonathan Marek
- */
-
--#include <linux/delay.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/iopoll.h>
-@@ -328,35 +327,20 @@ static int vfe_enable_output(struct vfe_line *line)
- return 0;
- }
-
--static int vfe_disable_output(struct vfe_line *line)
-+static void vfe_disable_output(struct vfe_line *line)
- {
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output = &line->output;
- unsigned long flags;
- unsigned int i;
-- bool done;
-- int timeout = 0;
--
-- do {
-- spin_lock_irqsave(&vfe->output_lock, flags);
-- done = !output->gen2.active_num;
-- spin_unlock_irqrestore(&vfe->output_lock, flags);
-- usleep_range(10000, 20000);
--
-- if (timeout++ == 100) {
-- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
-- vfe_reset(vfe);
-- output->gen2.active_num = 0;
-- return 0;
-- }
-- } while (!done);
-
- spin_lock_irqsave(&vfe->output_lock, flags);
- for (i = 0; i < output->wm_num; i++)
- vfe_wm_stop(vfe, output->wm_idx[i]);
-+ output->gen2.active_num = 0;
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
-- return 0;
-+ vfe_reset(vfe);
- }
-
- /*
-diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
-index 06c95568e5af4..965500b83d073 100644
---- a/drivers/media/platform/qcom/camss/camss-vfe.c
-+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
-@@ -535,7 +535,8 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
- struct camss_clock *clock = &vfe->clock[i];
-
- if (!strcmp(clock->name, "vfe0") ||
-- !strcmp(clock->name, "vfe1")) {
-+ !strcmp(clock->name, "vfe1") ||
-+ !strcmp(clock->name, "vfe_lite")) {
- u64 min_rate = 0;
- unsigned long rate;
-
-@@ -611,7 +612,7 @@ int vfe_get(struct vfe_device *vfe)
- } else {
- ret = vfe_check_clock_rates(vfe);
- if (ret < 0)
-- goto error_pm_runtime_get;
-+ goto error_pm_domain;
- }
- vfe->power_count++;
-
-diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
-index f11dc59135a5a..75991d849b571 100644
---- a/drivers/media/platform/qcom/camss/camss.c
-+++ b/drivers/media/platform/qcom/camss/camss.c
-@@ -1619,6 +1619,12 @@ static int camss_probe(struct platform_device *pdev)
- if (ret < 0)
- goto err_cleanup;
-
-+ ret = camss_configure_pd(camss);
-+ if (ret < 0) {
-+ dev_err(dev, "Failed to configure power domains: %d\n", ret);
-+ goto err_cleanup;
-+ }
-+
- ret = camss_init_subdevices(camss);
- if (ret < 0)
- goto err_cleanup;
-@@ -1678,12 +1684,6 @@ static int camss_probe(struct platform_device *pdev)
- }
- }
-
-- ret = camss_configure_pd(camss);
-- if (ret < 0) {
-- dev_err(dev, "Failed to configure power domains: %d\n", ret);
-- return ret;
-- }
--
- pm_runtime_enable(dev);
-
- return 0;
-diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
-index 7cab685a2ec80..0a041b4db9efc 100644
---- a/drivers/media/platform/qcom/venus/hfi_msgs.c
-+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
-@@ -398,7 +398,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
- memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
- idx++;
-
-- if (idx > HFI_BUFFER_TYPE_MAX)
-+ if (idx >= HFI_BUFFER_TYPE_MAX)
- return HFI_ERR_SESSION_INVALID_PARAMETER;
-
- req_bytes -= sizeof(struct hfi_buffer_requirements);
-diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
-index 6cf74b2bc5ae3..c43839539d4dd 100644
---- a/drivers/media/platform/qcom/venus/hfi_parser.c
-+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
-@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
- struct hfi_plat_caps *caps = core->caps, *cap;
- unsigned long bit;
-
-+ if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
-+ return;
-+
- for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
- cap = &caps[core->codecs_count++];
- cap->codec = BIT(bit);
-@@ -86,6 +89,9 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
- {
- const struct hfi_profile_level *pl = data;
-
-+ if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
-+ return;
-+
- memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
- cap->num_pl += num;
- }
-@@ -111,6 +117,9 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
- {
- const struct hfi_capability *caps = data;
-
-+ if (cap->num_caps + num >= MAX_CAP_ENTRIES)
-+ return;
-+
- memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
- cap->num_caps += num;
- }
-@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
- {
- const struct raw_formats *formats = fmts;
-
-+ if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
-+ return;
-+
- memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
- cap->num_fmts += num_fmts;
- }
-@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
- rawfmts[i].buftype = fmt->buffer_type;
- i++;
-
-+ if (i >= MAX_FMT_ENTRIES)
-+ return;
-+
- if (pinfo->num_planes > MAX_PLANES)
- break;
-
-diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
-index 19fc6575a4891..f9437b6412b91 100644
---- a/drivers/media/platform/qcom/venus/hfi_venus.c
-+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
-@@ -205,6 +205,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
-
- new_wr_idx = wr_idx + dwords;
- wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
-+
-+ if (wr_ptr < (u32 *)queue->qmem.kva ||
-+ wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
-+ return -EINVAL;
-+
- if (new_wr_idx < qsize) {
- memcpy(wr_ptr, packet, dwords << 2);
- } else {
-@@ -272,6 +277,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
- }
-
- rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
-+
-+ if (rd_ptr < (u32 *)queue->qmem.kva ||
-+ rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
-+ return -EINVAL;
-+
- dwords = *rd_ptr >> 2;
- if (!dwords)
- return -EINVAL;
-diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
-index 76634d242b103..0f5b3845d7b94 100644
---- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
-+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
-@@ -1133,12 +1133,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
-
- ret = vb2_queue_init(q);
- if (ret)
-- goto err_vd_rel;
-+ return ret;
-
- vp->pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
- if (ret)
-- goto err_vd_rel;
-+ return ret;
-
- video_set_drvdata(vfd, vp);
-
-@@ -1171,8 +1171,6 @@ err_ctrlh_free:
- v4l2_ctrl_handler_free(&vp->ctrl_handler);
- err_me_cleanup:
- media_entity_cleanup(&vfd->entity);
--err_vd_rel:
-- video_device_release(vfd);
- return ret;
- }
-
-diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
-index 423fc85d79ee3..50ec24c753e9e 100644
---- a/drivers/media/platform/verisilicon/hantro_drv.c
-+++ b/drivers/media/platform/verisilicon/hantro_drv.c
-@@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work)
- ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
- if (ctx) {
- vpu_err("frame processing timed out!\n");
-- ctx->codec_ops->reset(ctx);
-+ if (ctx->codec_ops->reset)
-+ ctx->codec_ops->reset(ctx);
- hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
- }
- }
-diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
-index 0224ff68ab3fc..64d6fb852ae9b 100644
---- a/drivers/media/platform/verisilicon/hantro_postproc.c
-+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
-@@ -107,7 +107,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
-
- static int down_scale_factor(struct hantro_ctx *ctx)
- {
-- if (ctx->src_fmt.width == ctx->dst_fmt.width)
-+ if (ctx->src_fmt.width <= ctx->dst_fmt.width)
- return 0;
-
- return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
-diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
-index 816ffa905a4bb..f975276707835 100644
---- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
-+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
-@@ -648,7 +648,7 @@ static const char * const rockchip_vpu_clk_names[] = {
- };
-
- static const char * const rk3588_vpu981_vpu_clk_names[] = {
-- "aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root"
-+ "aclk", "hclk",
- };
-
- /* VDPU1/VEPU1 */
-diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
-index 74546f7e34691..5719dda6e0f0e 100644
---- a/drivers/media/rc/imon.c
-+++ b/drivers/media/rc/imon.c
-@@ -2427,6 +2427,12 @@ static int imon_probe(struct usb_interface *interface,
- goto fail;
- }
-
-+ if (first_if->dev.driver != interface->dev.driver) {
-+ dev_err(&interface->dev, "inconsistent driver matching\n");
-+ ret = -EINVAL;
-+ goto fail;
-+ }
-+
- if (ifnum == 0) {
- ictx = imon_init_intf0(interface, id);
- if (!ictx) {
-diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
-index 3d8488c39c561..3311099cbd573 100644
---- a/drivers/media/rc/ir-sharp-decoder.c
-+++ b/drivers/media/rc/ir-sharp-decoder.c
-@@ -15,7 +15,9 @@
- #define SHARP_UNIT 40 /* us */
- #define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
- #define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
--#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
-+#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680us space) */
-+#define SHARP_BIT_0_SPACE (17 * SHARP_UNIT) /* 680us space */
-+#define SHARP_BIT_1_SPACE (42 * SHARP_UNIT) /* 1680us space */
- #define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
- #define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
-
-@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
- .header_pulse = 0,
- .header_space = 0,
- .bit_pulse = SHARP_BIT_PULSE,
-- .bit_space[0] = SHARP_BIT_0_PERIOD,
-- .bit_space[1] = SHARP_BIT_1_PERIOD,
-+ .bit_space[0] = SHARP_BIT_0_SPACE,
-+ .bit_space[1] = SHARP_BIT_1_SPACE,
- .trailer_pulse = SHARP_BIT_PULSE,
- .trailer_space = SHARP_ECHO_SPACE,
- .msb_first = 1,
-diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
-index 043d23aaa3cbc..a537734832c50 100644
---- a/drivers/media/rc/lirc_dev.c
-+++ b/drivers/media/rc/lirc_dev.c
-@@ -276,7 +276,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
- if (ret < 0)
- goto out_kfree_raw;
-
-- count = ret;
-+ /* drop trailing space */
-+ if (!(ret % 2))
-+ count = ret - 1;
-+ else
-+ count = ret;
-
- txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
- if (!txbuf) {
-diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
-index b51e6a3b8cbeb..f99878eff7ace 100644
---- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
-+++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
-@@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
- m->priv = args->priv;
- m->network_id = args->network_id;
- m->network_name = kstrdup(args->network_name, GFP_KERNEL);
-+ if (!m->network_name)
-+ goto free_mux_buf;
-+
- m->timing.current_jiffies = get_jiffies_64();
-
- if (args->channels)
- m->channels = args->channels;
- else
- if (vidtv_channels_init(m) < 0)
-- goto free_mux_buf;
-+ goto free_mux_network_name;
-
- /* will alloc data for pmt_sections after initializing pat */
- if (vidtv_channel_si_init(m) < 0)
-@@ -527,6 +530,8 @@ free_channel_si:
- vidtv_channel_si_destroy(m);
- free_channels:
- vidtv_channels_destroy(m);
-+free_mux_network_name:
-+ kfree(m->network_name);
- free_mux_buf:
- vfree(m->mux_buf);
- free_mux:
-diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
-index ce0b7a6e92dc3..2a51c898c11eb 100644
---- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
-+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
-@@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
-
- desc->service_name_len = service_name_len;
-
-- if (service_name && service_name_len)
-+ if (service_name && service_name_len) {
- desc->service_name = kstrdup(service_name, GFP_KERNEL);
-+ if (!desc->service_name)
-+ goto free_desc;
-+ }
-
- desc->provider_name_len = provider_name_len;
-
-- if (provider_name && provider_name_len)
-+ if (provider_name && provider_name_len) {
- desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
-+ if (!desc->provider_name)
-+ goto free_desc_service_name;
-+ }
-
- vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
- return desc;
-+
-+free_desc_service_name:
-+ if (service_name && service_name_len)
-+ kfree(desc->service_name);
-+free_desc:
-+ kfree(desc);
-+ return NULL;
- }
-
- struct vidtv_psi_desc_registration
-@@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name
-
- desc->length = network_name_len;
-
-- if (network_name && network_name_len)
-+ if (network_name && network_name_len) {
- desc->network_name = kstrdup(network_name, GFP_KERNEL);
-+ if (!desc->network_name) {
-+ kfree(desc);
-+ return NULL;
-+ }
-+ }
-
- vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
- return desc;
-@@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event
- iso_language_code = "eng";
-
- desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
-+ if (!desc->iso_language_code)
-+ goto free_desc;
-
-- if (event_name && event_name_len)
-+ if (event_name && event_name_len) {
- desc->event_name = kstrdup(event_name, GFP_KERNEL);
-+ if (!desc->event_name)
-+ goto free_desc_language_code;
-+ }
-
-- if (text && text_len)
-+ if (text && text_len) {
- desc->text = kstrdup(text, GFP_KERNEL);
-+ if (!desc->text)
-+ goto free_desc_event_name;
-+ }
-
- vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
- return desc;
-+
-+free_desc_event_name:
-+ if (event_name && event_name_len)
-+ kfree(desc->event_name);
-+free_desc_language_code:
-+ kfree(desc->iso_language_code);
-+free_desc:
-+ kfree(desc);
-+ return NULL;
- }
-
- struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
-diff --git a/drivers/media/test-drivers/vivid/vivid-rds-gen.c b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
-index b5b104ee64c99..c57771119a34b 100644
---- a/drivers/media/test-drivers/vivid/vivid-rds-gen.c
-+++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
-@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
- rds->ta = alt;
- rds->ms = true;
- snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
-- freq / 16, ((freq & 0xf) * 10) / 16);
-+ (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
- if (alt)
- strscpy(rds->radiotext,
- " The Radio Data System can switch between different Radio Texts ",
-diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
-index 33a2aa8907e65..4eb7dd4599b7e 100644
---- a/drivers/media/usb/dvb-usb-v2/af9035.c
-+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
-@@ -322,8 +322,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
- ret = -EOPNOTSUPP;
- } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
- (msg[0].addr == state->af9033_i2c_addr[1])) {
-- if (msg[0].len < 3 || msg[1].len < 1)
-- return -EOPNOTSUPP;
-+ if (msg[0].len < 3 || msg[1].len < 1) {
-+ ret = -EOPNOTSUPP;
-+ goto unlock;
-+ }
- /* demod access via firmware interface */
- u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
- msg[0].buf[2];
-@@ -383,8 +385,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
- ret = -EOPNOTSUPP;
- } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
- (msg[0].addr == state->af9033_i2c_addr[1])) {
-- if (msg[0].len < 3)
-- return -EOPNOTSUPP;
-+ if (msg[0].len < 3) {
-+ ret = -EOPNOTSUPP;
-+ goto unlock;
-+ }
- /* demod access via firmware interface */
- u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
- msg[0].buf[2];
-@@ -459,6 +463,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
- ret = -EOPNOTSUPP;
- }
-
-+unlock:
- mutex_unlock(&d->i2c_mutex);
-
- if (ret < 0)
-diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
-index 46ed95483e222..5f5fa851ca640 100644
---- a/drivers/media/usb/gspca/cpia1.c
-+++ b/drivers/media/usb/gspca/cpia1.c
-@@ -18,6 +18,7 @@
-
- #include <linux/input.h>
- #include <linux/sched/signal.h>
-+#include <linux/bitops.h>
-
- #include "gspca.h"
-
-@@ -1028,6 +1029,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
- sd->params.exposure.expMode = 2;
- sd->exposure_status = EXPOSURE_NORMAL;
- }
-+ if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
-+ return -EINVAL;
- currentexp = currentexp << sd->params.exposure.gain;
- sd->params.exposure.gain = 0;
- /* round down current exposure to nearest value */
-diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
-index 9e5b5dbd9c8df..2845041f32d69 100644
---- a/drivers/memory/tegra/tegra234.c
-+++ b/drivers/memory/tegra/tegra234.c
-@@ -986,6 +986,10 @@ static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst)
- msg.rx.data = &bwmgr_resp;
- msg.rx.size = sizeof(bwmgr_resp);
-
-+ if (pclient->bpmp_id >= TEGRA_ICC_BPMP_CPU_CLUSTER0 &&
-+ pclient->bpmp_id <= TEGRA_ICC_BPMP_CPU_CLUSTER2)
-+ msg.flags = TEGRA_BPMP_MESSAGE_RESET;
-+
- ret = tegra_bpmp_transfer(mc->bpmp, &msg);
- if (ret < 0) {
- dev_err(mc->dev, "BPMP transfer failed: %d\n", ret);
-diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
-index 02cf4f3e91d76..de5d894ac04af 100644
---- a/drivers/mfd/arizona-spi.c
-+++ b/drivers/mfd/arizona-spi.c
-@@ -159,6 +159,9 @@ static int arizona_spi_acpi_probe(struct arizona *arizona)
- arizona->pdata.micd_ranges = arizona_micd_aosp_ranges;
- arizona->pdata.num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges);
-
-+ /* Use left headphone speaker for HP vs line-out detection */
-+ arizona->pdata.hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
-+
- return 0;
- }
-
-diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
-index c7510434380a4..fbbe82c6e75b5 100644
---- a/drivers/mfd/dln2.c
-+++ b/drivers/mfd/dln2.c
-@@ -826,7 +826,6 @@ out_stop_rx:
- dln2_stop_rx_urbs(dln2);
-
- out_free:
-- usb_put_dev(dln2->usb_dev);
- dln2_free(dln2);
-
- return ret;
-diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
-index 699f44ffff0e4..ae5759200622c 100644
---- a/drivers/mfd/intel-lpss-pci.c
-+++ b/drivers/mfd/intel-lpss-pci.c
-@@ -561,6 +561,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info },
-+ /* LNL-M */
-+ { PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info },
-+ { PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info },
-+ { PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info },
-+ { PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info },
-+ { PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info },
-+ { PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info },
-+ { PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info },
-+ { PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info },
-+ { PCI_VDEVICE(INTEL, 0xa878), (kernel_ulong_t)&ehl_i2c_info },
-+ { PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info },
-+ { PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info },
-+ { PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info },
- { }
- };
- MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
-diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
-index 0ed7c0d7784e1..2b85509a90fc2 100644
---- a/drivers/mfd/mfd-core.c
-+++ b/drivers/mfd/mfd-core.c
-@@ -146,6 +146,7 @@ static int mfd_add_device(struct device *parent, int id,
- struct platform_device *pdev;
- struct device_node *np = NULL;
- struct mfd_of_node_entry *of_entry, *tmp;
-+ bool disabled = false;
- int ret = -ENOMEM;
- int platform_id;
- int r;
-@@ -183,11 +184,10 @@ static int mfd_add_device(struct device *parent, int id,
- if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
- for_each_child_of_node(parent->of_node, np) {
- if (of_device_is_compatible(np, cell->of_compatible)) {
-- /* Ignore 'disabled' devices error free */
-+ /* Skip 'disabled' devices */
- if (!of_device_is_available(np)) {
-- of_node_put(np);
-- ret = 0;
-- goto fail_alias;
-+ disabled = true;
-+ continue;
- }
-
- ret = mfd_match_of_node_to_dev(pdev, np, cell);
-@@ -197,10 +197,17 @@ static int mfd_add_device(struct device *parent, int id,
- if (ret)
- goto fail_alias;
-
-- break;
-+ goto match;
- }
- }
-
-+ if (disabled) {
-+ /* Ignore 'disabled' devices error free */
-+ ret = 0;
-+ goto fail_alias;
-+ }
-+
-+match:
- if (!pdev->dev.of_node)
- pr_warn("%s: Failed to locate of_node [id: %d]\n",
- cell->name, platform_id);
-diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
-index 7e2cd79d17ebf..8e449cff5cec4 100644
---- a/drivers/mfd/qcom-spmi-pmic.c
-+++ b/drivers/mfd/qcom-spmi-pmic.c
-@@ -30,6 +30,8 @@ struct qcom_spmi_dev {
- struct qcom_spmi_pmic pmic;
- };
-
-+static DEFINE_MUTEX(pmic_spmi_revid_lock);
-+
- #define N_USIDS(n) ((void *)n)
-
- static const struct of_device_id pmic_spmi_id_table[] = {
-@@ -76,24 +78,21 @@ static const struct of_device_id pmic_spmi_id_table[] = {
- *
- * This only supports PMICs with 1 or 2 USIDs.
- */
--static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
-+static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
- {
-- struct spmi_device *sdev;
-- struct qcom_spmi_dev *ctx;
- struct device_node *spmi_bus;
-- struct device_node *other_usid = NULL;
-+ struct device_node *child;
- int function_parent_usid, ret;
- u32 pmic_addr;
-
-- sdev = to_spmi_device(dev);
-- ctx = dev_get_drvdata(&sdev->dev);
--
- /*
- * Quick return if the function device is already in the base
- * USID. This will always be hit for PMICs with only 1 USID.
- */
-- if (sdev->usid % ctx->num_usids == 0)
-+ if (sdev->usid % ctx->num_usids == 0) {
-+ get_device(&sdev->dev);
- return sdev;
-+ }
-
- function_parent_usid = sdev->usid;
-
-@@ -105,28 +104,61 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
- * device for USID 2.
- */
- spmi_bus = of_get_parent(sdev->dev.of_node);
-- do {
-- other_usid = of_get_next_child(spmi_bus, other_usid);
--
-- ret = of_property_read_u32_index(other_usid, "reg", 0, &pmic_addr);
-- if (ret)
-- return ERR_PTR(ret);
-+ sdev = ERR_PTR(-ENODATA);
-+ for_each_child_of_node(spmi_bus, child) {
-+ ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr);
-+ if (ret) {
-+ of_node_put(child);
-+ sdev = ERR_PTR(ret);
-+ break;
-+ }
-
-- sdev = spmi_device_from_of(other_usid);
- if (pmic_addr == function_parent_usid - (ctx->num_usids - 1)) {
-- if (!sdev)
-+ sdev = spmi_device_from_of(child);
-+ if (!sdev) {
- /*
-- * If the base USID for this PMIC hasn't probed yet
-- * but the secondary USID has, then we need to defer
-- * the function driver so that it will attempt to
-- * probe again when the base USID is ready.
-+ * If the base USID for this PMIC hasn't been
-+ * registered yet then we need to defer.
- */
-- return ERR_PTR(-EPROBE_DEFER);
-- return sdev;
-+ sdev = ERR_PTR(-EPROBE_DEFER);
-+ }
-+ of_node_put(child);
-+ break;
- }
-- } while (other_usid->sibling);
-+ }
-
-- return ERR_PTR(-ENODATA);
-+ of_node_put(spmi_bus);
-+
-+ return sdev;
-+}
-+
-+static int pmic_spmi_get_base_revid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
-+{
-+ struct qcom_spmi_dev *base_ctx;
-+ struct spmi_device *base;
-+ int ret = 0;
-+
-+ base = qcom_pmic_get_base_usid(sdev, ctx);
-+ if (IS_ERR(base))
-+ return PTR_ERR(base);
-+
-+ /*
-+ * Copy revid info from base device if it has probed and is still
-+ * bound to its driver.
-+ */
-+ mutex_lock(&pmic_spmi_revid_lock);
-+ base_ctx = spmi_device_get_drvdata(base);
-+ if (!base_ctx) {
-+ ret = -EPROBE_DEFER;
-+ goto out_unlock;
-+ }
-+ memcpy(&ctx->pmic, &base_ctx->pmic, sizeof(ctx->pmic));
-+out_unlock:
-+ mutex_unlock(&pmic_spmi_revid_lock);
-+
-+ put_device(&base->dev);
-+
-+ return ret;
- }
-
- static int pmic_spmi_load_revid(struct regmap *map, struct device *dev,
-@@ -204,11 +236,7 @@ const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev)
- if (!of_match_device(pmic_spmi_id_table, dev->parent))
- return ERR_PTR(-EINVAL);
-
-- sdev = qcom_pmic_get_base_usid(dev->parent);
--
-- if (IS_ERR(sdev))
-- return ERR_CAST(sdev);
--
-+ sdev = to_spmi_device(dev->parent);
- spmi = dev_get_drvdata(&sdev->dev);
-
- return &spmi->pmic;
-@@ -243,16 +271,31 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
- ret = pmic_spmi_load_revid(regmap, &sdev->dev, &ctx->pmic);
- if (ret < 0)
- return ret;
-+ } else {
-+ ret = pmic_spmi_get_base_revid(sdev, ctx);
-+ if (ret)
-+ return ret;
- }
-+
-+ mutex_lock(&pmic_spmi_revid_lock);
- spmi_device_set_drvdata(sdev, ctx);
-+ mutex_unlock(&pmic_spmi_revid_lock);
-
- return devm_of_platform_populate(&sdev->dev);
- }
-
-+static void pmic_spmi_remove(struct spmi_device *sdev)
-+{
-+ mutex_lock(&pmic_spmi_revid_lock);
-+ spmi_device_set_drvdata(sdev, NULL);
-+ mutex_unlock(&pmic_spmi_revid_lock);
-+}
-+
- MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
-
- static struct spmi_driver pmic_spmi_driver = {
- .probe = pmic_spmi_probe,
-+ .remove = pmic_spmi_remove,
- .driver = {
- .name = "pmic-spmi",
- .of_match_table = pmic_spmi_id_table,
-diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
-index ed4d0ef5e5c31..af519088732d9 100644
---- a/drivers/misc/pci_endpoint_test.c
-+++ b/drivers/misc/pci_endpoint_test.c
-@@ -71,6 +71,7 @@
- #define PCI_DEVICE_ID_TI_AM654 0xb00c
- #define PCI_DEVICE_ID_TI_J7200 0xb00f
- #define PCI_DEVICE_ID_TI_AM64 0xb010
-+#define PCI_DEVICE_ID_TI_J721S2 0xb013
- #define PCI_DEVICE_ID_LS1088A 0x80c0
- #define PCI_DEVICE_ID_IMX8 0x0808
-
-@@ -81,6 +82,7 @@
- #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
- #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
- #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
-+#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
-
- static DEFINE_IDA(pci_endpoint_test_ida);
-
-@@ -990,6 +992,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
- { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
- { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
-+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
-+ .driver_data = (kernel_ulong_t)&default_data,
-+ },
- { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
- .driver_data = (kernel_ulong_t)&j721e_data,
- },
-@@ -999,6 +1004,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
- .driver_data = (kernel_ulong_t)&j721e_data,
- },
-+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
-+ .driver_data = (kernel_ulong_t)&j721e_data,
-+ },
- { }
- };
- MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
-diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
-index c1a134bd8ba7b..b878431553abc 100644
---- a/drivers/misc/ti-st/st_core.c
-+++ b/drivers/misc/ti-st/st_core.c
-@@ -15,6 +15,7 @@
- #include <linux/skbuff.h>
-
- #include <linux/ti_wilink_st.h>
-+#include <linux/netdevice.h>
-
- /*
- * function pointer pointing to either,
-@@ -429,7 +430,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
- case ST_LL_AWAKE_TO_ASLEEP:
- pr_err("ST LL is illegal state(%ld),"
- "purging received skb.", st_ll_getstate(st_gdata));
-- kfree_skb(skb);
-+ dev_kfree_skb_irq(skb);
- break;
- case ST_LL_ASLEEP:
- skb_queue_tail(&st_gdata->tx_waitq, skb);
-@@ -438,7 +439,7 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
- default:
- pr_err("ST LL is illegal state(%ld),"
- "purging received skb.", st_ll_getstate(st_gdata));
-- kfree_skb(skb);
-+ dev_kfree_skb_irq(skb);
- break;
- }
-
-@@ -492,7 +493,7 @@ void st_tx_wakeup(struct st_data_s *st_data)
- spin_unlock_irqrestore(&st_data->lock, flags);
- break;
- }
-- kfree_skb(skb);
-+ dev_kfree_skb_irq(skb);
- spin_unlock_irqrestore(&st_data->lock, flags);
- }
- /* if wake-up is set in another context- restart sending */
-diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
-index 3a8f27c3e310a..f9a5cffa64b1f 100644
---- a/drivers/mmc/core/block.c
-+++ b/drivers/mmc/core/block.c
-@@ -1482,6 +1482,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
- blk_mq_requeue_request(req, true);
- else
- __blk_mq_end_request(req, BLK_STS_OK);
-+ } else if (mq->in_recovery) {
-+ blk_mq_requeue_request(req, true);
- } else {
- blk_mq_end_request(req, BLK_STS_OK);
- }
-@@ -2381,8 +2383,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
- }
- ret = mmc_blk_cqe_issue_flush(mq, req);
- break;
-- case REQ_OP_READ:
- case REQ_OP_WRITE:
-+ card->written_flag = true;
-+ fallthrough;
-+ case REQ_OP_READ:
- if (host->cqe_enabled)
- ret = mmc_blk_cqe_issue_rw_rq(mq, req);
- else
-diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
-index 4edf9057fa79d..b7754a1b8d978 100644
---- a/drivers/mmc/core/card.h
-+++ b/drivers/mmc/core/card.h
-@@ -280,4 +280,8 @@ static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
- return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
- }
-
-+static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
-+{
-+ return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
-+}
- #endif
-diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
-index 3d3e0ca526148..a8c17b4cd7379 100644
---- a/drivers/mmc/core/core.c
-+++ b/drivers/mmc/core/core.c
-@@ -551,7 +551,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
- cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
-- mmc_wait_for_cmd(host, &cmd, 0);
-+ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
-+
-+ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = MMC_CMDQ_TASK_MGMT;
-@@ -559,10 +561,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
- cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
-- err = mmc_wait_for_cmd(host, &cmd, 0);
-+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
-
- host->cqe_ops->cqe_recovery_finish(host);
-
-+ if (err)
-+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
-+
- mmc_retune_release(host);
-
- return err;
-diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
-index 4a4bab9aa7263..a46ce0868fe1f 100644
---- a/drivers/mmc/core/mmc.c
-+++ b/drivers/mmc/core/mmc.c
-@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
- case 3: /* MMC v3.1 - v3.3 */
- case 4: /* MMC v4 */
- card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
-- card->cid.oemid = UNSTUFF_BITS(resp, 104, 8);
-+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
- card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
- card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
- card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
-@@ -2081,13 +2081,17 @@ static int _mmc_flush_cache(struct mmc_host *host)
- {
- int err = 0;
-
-+ if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
-+ return 0;
-+
- if (_mmc_cache_enabled(host)) {
- err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1,
- CACHE_FLUSH_TIMEOUT_MS);
- if (err)
-- pr_err("%s: cache flush error %d\n",
-- mmc_hostname(host), err);
-+ pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
-+ else
-+ host->card->written_flag = false;
- }
-
- return err;
-diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
-index 32b64b564fb1f..cca71867bc4ad 100644
---- a/drivers/mmc/core/quirks.h
-+++ b/drivers/mmc/core/quirks.h
-@@ -110,11 +110,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
- MMC_QUIRK_TRIM_BROKEN),
-
- /*
-- * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
-- * support being used to offload WRITE_ZEROES.
-+ * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support
-+ * WRITE_ZEROES offloading. It also supports caching, but the cache can
-+ * only be flushed after a write has occurred.
- */
- MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
-- MMC_QUIRK_TRIM_BROKEN),
-+ MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH),
-
- /*
- * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
-diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
-index b3d7d6d8d6548..41e94cd141098 100644
---- a/drivers/mmc/host/cqhci-core.c
-+++ b/drivers/mmc/host/cqhci-core.c
-@@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
- ret = cqhci_tasks_cleared(cq_host);
-
- if (!ret)
-- pr_debug("%s: cqhci: Failed to clear tasks\n",
-- mmc_hostname(mmc));
-+ pr_warn("%s: cqhci: Failed to clear tasks\n",
-+ mmc_hostname(mmc));
-
- return ret;
- }
-@@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
- ret = cqhci_halted(cq_host);
-
- if (!ret)
-- pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
-+ pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
-
- return ret;
- }
-@@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
- /*
- * After halting we expect to be able to use the command line. We interpret the
- * failure to halt to mean the data lines might still be in use (and the upper
-- * layers will need to send a STOP command), so we set the timeout based on a
-- * generous command timeout.
-+ * layers will need to send a STOP command), however failing to halt complicates
-+ * the recovery, so set a timeout that would reasonably allow I/O to complete.
- */
--#define CQHCI_START_HALT_TIMEOUT 5
-+#define CQHCI_START_HALT_TIMEOUT 500
-
- static void cqhci_recovery_start(struct mmc_host *mmc)
- {
-@@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
-
- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
-
-- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
-- ok = false;
--
- /*
- * The specification contradicts itself, by saying that tasks cannot be
- * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
- * be disabled/re-enabled, but not to disable before clearing tasks.
- * Have a go anyway.
- */
-- if (!ok) {
-- pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
-- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
-- cqcfg &= ~CQHCI_ENABLE;
-- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
-- cqcfg |= CQHCI_ENABLE;
-- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
-- /* Be sure that there are no tasks */
-- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
-- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
-- ok = false;
-- WARN_ON(!ok);
-- }
-+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
-+ ok = false;
-+
-+ /* Disable to make sure tasks really are cleared */
-+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
-+ cqcfg &= ~CQHCI_ENABLE;
-+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
-+
-+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
-+ cqcfg |= CQHCI_ENABLE;
-+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
-+
-+ cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
-+
-+ if (!ok)
-+ cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
-
- cqhci_recover_mrqs(cq_host);
-
-diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
-index 9837dab096e64..c7c067b9415a4 100644
---- a/drivers/mmc/host/meson-gx-mmc.c
-+++ b/drivers/mmc/host/meson-gx-mmc.c
-@@ -801,7 +801,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
-
- cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
- cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
-- cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
-
- meson_mmc_set_response_bits(cmd, &cmd_cfg);
-
-diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
-index 109d4b010f978..77911a57b12cf 100644
---- a/drivers/mmc/host/sdhci-pci-gli.c
-+++ b/drivers/mmc/host/sdhci-pci-gli.c
-@@ -25,6 +25,12 @@
- #define GLI_9750_WT_EN_ON 0x1
- #define GLI_9750_WT_EN_OFF 0x0
-
-+#define PCI_GLI_9750_PM_CTRL 0xFC
-+#define PCI_GLI_9750_PM_STATE GENMASK(1, 0)
-+
-+#define PCI_GLI_9750_CORRERR_MASK 0x214
-+#define PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
-+
- #define SDHCI_GLI_9750_CFG2 0x848
- #define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24)
- #define GLI_9750_CFG2_L1DLY_VALUE 0x1F
-@@ -149,6 +155,9 @@
- #define PCI_GLI_9755_PM_CTRL 0xFC
- #define PCI_GLI_9755_PM_STATE GENMASK(1, 0)
-
-+#define PCI_GLI_9755_CORRERR_MASK 0x214
-+#define PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
-+
- #define SDHCI_GLI_9767_GM_BURST_SIZE 0x510
- #define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
-
-@@ -536,8 +545,12 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
-
- static void gl9750_hw_setting(struct sdhci_host *host)
- {
-+ struct sdhci_pci_slot *slot = sdhci_priv(host);
-+ struct pci_dev *pdev;
- u32 value;
-
-+ pdev = slot->chip->pdev;
-+
- gl9750_wt_on(host);
-
- value = sdhci_readl(host, SDHCI_GLI_9750_CFG2);
-@@ -547,6 +560,18 @@ static void gl9750_hw_setting(struct sdhci_host *host)
- GLI_9750_CFG2_L1DLY_VALUE);
- sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
-
-+ /* toggle PM state to allow GL9750 to enter ASPM L1.2 */
-+ pci_read_config_dword(pdev, PCI_GLI_9750_PM_CTRL, &value);
-+ value |= PCI_GLI_9750_PM_STATE;
-+ pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
-+ value &= ~PCI_GLI_9750_PM_STATE;
-+ pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
-+
-+ /* mask the replay timer timeout of AER */
-+ pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value);
-+ value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
-+ pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value);
-+
- gl9750_wt_off(host);
- }
-
-@@ -756,6 +781,11 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
- value &= ~PCI_GLI_9755_PM_STATE;
- pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
-
-+ /* mask the replay timer timeout of AER */
-+ pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value);
-+ value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
-+ pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value);
-+
- gl9755_wt_off(pdev);
- }
-
-@@ -1159,6 +1189,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
- sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
- }
-
-+static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
-+ bool enable)
-+{
-+ struct pci_dev *pdev = slot->chip->pdev;
-+ u32 value;
-+
-+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
-+ value &= ~GLI_9763E_VHS_REV;
-+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
-+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
-+
-+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
-+
-+ if (enable)
-+ value &= ~GLI_9763E_CFG_LPSN_DIS;
-+ else
-+ value |= GLI_9763E_CFG_LPSN_DIS;
-+
-+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
-+
-+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
-+ value &= ~GLI_9763E_VHS_REV;
-+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
-+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
-+}
-+
- static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
- unsigned int timing)
- {
-@@ -1267,6 +1323,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
- if (ret)
- goto cleanup;
-
-+ /* Disable LPM negotiation to avoid entering L1 state. */
-+ gl9763e_set_low_power_negotiation(slot, false);
-+
- return 0;
-
- cleanup:
-@@ -1310,31 +1369,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
- }
-
- #ifdef CONFIG_PM
--static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
--{
-- struct pci_dev *pdev = slot->chip->pdev;
-- u32 value;
--
-- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
-- value &= ~GLI_9763E_VHS_REV;
-- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
-- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
--
-- pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
--
-- if (enable)
-- value &= ~GLI_9763E_CFG_LPSN_DIS;
-- else
-- value |= GLI_9763E_CFG_LPSN_DIS;
--
-- pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
--
-- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
-- value &= ~GLI_9763E_VHS_REV;
-- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
-- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
--}
--
- static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
- {
- struct sdhci_pci_slot *slot = chip->slots[0];
-diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
-index 6b84ba27e6ab0..6b8a57e2d20f0 100644
---- a/drivers/mmc/host/sdhci-sprd.c
-+++ b/drivers/mmc/host/sdhci-sprd.c
-@@ -416,12 +416,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
- mmc_request_done(host->mmc, mrq);
- }
-
-+static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
-+ unsigned short vdd)
-+{
-+ struct mmc_host *mmc = host->mmc;
-+
-+ switch (mode) {
-+ case MMC_POWER_OFF:
-+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
-+
-+ mmc_regulator_disable_vqmmc(mmc);
-+ break;
-+ case MMC_POWER_ON:
-+ mmc_regulator_enable_vqmmc(mmc);
-+ break;
-+ case MMC_POWER_UP:
-+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
-+ break;
-+ }
-+}
-+
- static struct sdhci_ops sdhci_sprd_ops = {
- .read_l = sdhci_sprd_readl,
- .write_l = sdhci_sprd_writel,
- .write_w = sdhci_sprd_writew,
- .write_b = sdhci_sprd_writeb,
- .set_clock = sdhci_sprd_set_clock,
-+ .set_power = sdhci_sprd_set_power,
- .get_max_clock = sdhci_sprd_get_max_clock,
- .get_min_clock = sdhci_sprd_get_min_clock,
- .set_bus_width = sdhci_set_bus_width,
-@@ -823,6 +844,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
- host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
- SDHCI_SUPPORT_DDR50);
-
-+ ret = mmc_regulator_get_supply(host->mmc);
-+ if (ret)
-+ goto pm_runtime_disable;
-+
- ret = sdhci_setup_host(host);
- if (ret)
- goto pm_runtime_disable;
-diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
-index c125485ba80e9..967bd2dfcda1b 100644
---- a/drivers/mmc/host/sdhci_am654.c
-+++ b/drivers/mmc/host/sdhci_am654.c
-@@ -598,7 +598,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
- return 0;
- }
-
-- for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
-+ for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
-
- ret = device_property_read_u32(dev, td[i].otap_binding,
- &sdhci_am654->otap_del_sel[i]);
-diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
-index 9ec593d52f0fa..cef0e716ad16f 100644
---- a/drivers/mmc/host/vub300.c
-+++ b/drivers/mmc/host/vub300.c
-@@ -2309,6 +2309,7 @@ static int vub300_probe(struct usb_interface *interface,
- vub300->read_only =
- (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
- } else {
-+ retval = -EINVAL;
- goto error5;
- }
- usb_set_intfdata(interface, vub300);
-diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
-index 11b06fefaa0e2..c10693ba265ba 100644
---- a/drivers/mtd/chips/cfi_cmdset_0001.c
-+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
-@@ -422,9 +422,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
- extra_size = 0;
-
- /* Protection Register info */
-- if (extp->NumProtectionFields)
-+ if (extp->NumProtectionFields) {
-+ struct cfi_intelext_otpinfo *otp =
-+ (struct cfi_intelext_otpinfo *)&extp->extra[0];
-+
- extra_size += (extp->NumProtectionFields - 1) *
-- sizeof(struct cfi_intelext_otpinfo);
-+ sizeof(struct cfi_intelext_otpinfo);
-+
-+ if (extp_size >= sizeof(*extp) + extra_size) {
-+ int i;
-+
-+ /* Do some byteswapping if necessary */
-+ for (i = 0; i < extp->NumProtectionFields - 1; i++) {
-+ otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
-+ otp->FactGroups = le16_to_cpu(otp->FactGroups);
-+ otp->UserGroups = le16_to_cpu(otp->UserGroups);
-+ otp++;
-+ }
-+ }
-+ }
- }
-
- if (extp->MinorVersion >= '1') {
-diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
-index cb5d88f42297b..f0ad2308f6d50 100644
---- a/drivers/mtd/nand/raw/intel-nand-controller.c
-+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
-@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
- ebu_host->cs_num = cs;
-
- resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
-+ if (!resname) {
-+ ret = -ENOMEM;
-+ goto err_of_node_put;
-+ }
-+
- ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
- resname);
- if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
-@@ -649,6 +654,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
- }
-
- resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
-+ if (!resname) {
-+ ret = -ENOMEM;
-+ goto err_cleanup_dma;
-+ }
-+
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
- if (!res) {
- ret = -EINVAL;
-diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
-index 25e3c1cb605e7..a506e658d4624 100644
---- a/drivers/mtd/nand/raw/meson_nand.c
-+++ b/drivers/mtd/nand/raw/meson_nand.c
-@@ -1134,6 +1134,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
- init.name = devm_kasprintf(nfc->dev,
- GFP_KERNEL, "%s#div",
- dev_name(nfc->dev));
-+ if (!init.name)
-+ return -ENOMEM;
-+
- init.ops = &clk_divider_ops;
- nfc_divider_parent_data[0].fw_name = "device";
- init.parent_data = nfc_divider_parent_data;
-diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
-index eb0b9d16e8dae..a553e3ac8ff41 100644
---- a/drivers/mtd/nand/raw/tegra_nand.c
-+++ b/drivers/mtd/nand/raw/tegra_nand.c
-@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
- init_completion(&ctrl->dma_complete);
-
- ctrl->irq = platform_get_irq(pdev, 0);
-+ if (ctrl->irq < 0) {
-+ err = ctrl->irq;
-+ goto err_put_pm;
-+ }
- err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
- dev_name(&pdev->dev), ctrl);
- if (err) {
-diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
-index 51d47eda1c873..8e6cc0e133b7f 100644
---- a/drivers/net/bonding/bond_main.c
-+++ b/drivers/net/bonding/bond_main.c
-@@ -1500,6 +1500,10 @@ done:
- static void bond_setup_by_slave(struct net_device *bond_dev,
- struct net_device *slave_dev)
- {
-+ bool was_up = !!(bond_dev->flags & IFF_UP);
-+
-+ dev_close(bond_dev);
-+
- bond_dev->header_ops = slave_dev->header_ops;
-
- bond_dev->type = slave_dev->type;
-@@ -1514,6 +1518,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
- bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
- bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
- }
-+ if (was_up)
-+ dev_open(bond_dev, NULL);
- }
-
- /* On bonding slaves other than the currently active slave, suppress
-diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
-index 7f9334a8af500..735d5de3caa0e 100644
---- a/drivers/net/can/dev/dev.c
-+++ b/drivers/net/can/dev/dev.c
-@@ -132,7 +132,8 @@ static void can_restart(struct net_device *dev)
- struct can_frame *cf;
- int err;
-
-- BUG_ON(netif_carrier_ok(dev));
-+ if (netif_carrier_ok(dev))
-+ netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
-
- /* No synchronization needed because the device is bus-off and
- * no messages can come in or go out.
-@@ -153,11 +154,12 @@ restart:
- priv->can_stats.restarts++;
-
- /* Now restart the device */
-- err = priv->do_set_mode(dev, CAN_MODE_START);
--
- netif_carrier_on(dev);
-- if (err)
-+ err = priv->do_set_mode(dev, CAN_MODE_START);
-+ if (err) {
- netdev_err(dev, "Error %d during restart", err);
-+ netif_carrier_off(dev);
-+ }
- }
-
- static void can_restart_work(struct work_struct *work)
-diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
-index f6d05b3ef59ab..3ebd4f779b9bd 100644
---- a/drivers/net/can/dev/skb.c
-+++ b/drivers/net/can/dev/skb.c
-@@ -49,7 +49,11 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- {
- struct can_priv *priv = netdev_priv(dev);
-
-- BUG_ON(idx >= priv->echo_skb_max);
-+ if (idx >= priv->echo_skb_max) {
-+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
-+ __func__, idx, priv->echo_skb_max);
-+ return -EINVAL;
-+ }
-
- /* check flag whether this packet has to be looped back */
- if (!(dev->flags & IFF_ECHO) ||
-diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
-index 0c7f7505632cd..5e3a72b7c4691 100644
---- a/drivers/net/can/usb/etas_es58x/es58x_core.c
-+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
-@@ -2230,6 +2230,7 @@ static int es58x_probe(struct usb_interface *intf,
-
- for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
- int ret = es58x_init_netdev(es58x_dev, ch_idx);
-+
- if (ret) {
- es58x_free_netdevs(es58x_dev);
- return ret;
-diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
-index c1ba1a4e8857b..2e183bdeedd72 100644
---- a/drivers/net/can/usb/etas_es58x/es58x_core.h
-+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
-@@ -378,13 +378,13 @@ struct es58x_sw_version {
-
- /**
- * struct es58x_hw_revision - Hardware revision number.
-- * @letter: Revision letter.
-+ * @letter: Revision letter, an alphanumeric character.
- * @major: Version major number, represented on three digits.
- * @minor: Version minor number, represented on three digits.
- *
- * The hardware revision uses its own format: "axxx/xxx" where 'a' is
-- * a letter and 'x' a digit. It can be retrieved from the product
-- * information string.
-+ * an alphanumeric character and 'x' a digit. It can be retrieved from
-+ * the product information string.
- */
- struct es58x_hw_revision {
- char letter;
-diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
-index 9fba29e2f57c6..635edeb8f68cd 100644
---- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
-+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
-@@ -125,14 +125,28 @@ static int es58x_parse_hw_rev(struct es58x_device *es58x_dev,
- * firmware version, the bootloader version and the hardware
- * revision.
- *
-- * If the function fails, simply emit a log message and continue
-- * because product information is not critical for the driver to
-- * operate.
-+ * If the function fails, set the version or revision to an invalid
-+ * value and emit an informal message. Continue probing because the
-+ * product information is not critical for the driver to operate.
- */
- void es58x_parse_product_info(struct es58x_device *es58x_dev)
- {
-+ static const struct es58x_sw_version sw_version_not_set = {
-+ .major = -1,
-+ .minor = -1,
-+ .revision = -1,
-+ };
-+ static const struct es58x_hw_revision hw_revision_not_set = {
-+ .letter = '\0',
-+ .major = -1,
-+ .minor = -1,
-+ };
- char *prod_info;
-
-+ es58x_dev->firmware_version = sw_version_not_set;
-+ es58x_dev->bootloader_version = sw_version_not_set;
-+ es58x_dev->hardware_revision = hw_revision_not_set;
-+
- prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX);
- if (!prod_info) {
- dev_warn(es58x_dev->dev,
-@@ -150,29 +164,36 @@ void es58x_parse_product_info(struct es58x_device *es58x_dev)
- }
-
- /**
-- * es58x_sw_version_is_set() - Check if the version is a valid number.
-+ * es58x_sw_version_is_valid() - Check if the version is a valid number.
- * @sw_ver: Version number of either the firmware or the bootloader.
- *
-- * If &es58x_sw_version.major, &es58x_sw_version.minor and
-- * &es58x_sw_version.revision are all zero, the product string could
-- * not be parsed and the version number is invalid.
-+ * If any of the software version sub-numbers do not fit on two
-+ * digits, the version is invalid, most probably because the product
-+ * string could not be parsed.
-+ *
-+ * Return: @true if the software version is valid, @false otherwise.
- */
--static inline bool es58x_sw_version_is_set(struct es58x_sw_version *sw_ver)
-+static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver)
- {
-- return sw_ver->major || sw_ver->minor || sw_ver->revision;
-+ return sw_ver->major < 100 && sw_ver->minor < 100 &&
-+ sw_ver->revision < 100;
- }
-
- /**
-- * es58x_hw_revision_is_set() - Check if the revision is a valid number.
-+ * es58x_hw_revision_is_valid() - Check if the revision is a valid number.
- * @hw_rev: Revision number of the hardware.
- *
-- * If &es58x_hw_revision.letter is the null character, the product
-- * string could not be parsed and the hardware revision number is
-- * invalid.
-+ * If &es58x_hw_revision.letter is not a alphanumeric character or if
-+ * any of the hardware revision sub-numbers do not fit on three
-+ * digits, the revision is invalid, most probably because the product
-+ * string could not be parsed.
-+ *
-+ * Return: @true if the hardware revision is valid, @false otherwise.
- */
--static inline bool es58x_hw_revision_is_set(struct es58x_hw_revision *hw_rev)
-+static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev)
- {
-- return hw_rev->letter != '\0';
-+ return isalnum(hw_rev->letter) && hw_rev->major < 1000 &&
-+ hw_rev->minor < 1000;
- }
-
- /**
-@@ -197,7 +218,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
- char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
- int ret = 0;
-
-- if (es58x_sw_version_is_set(fw_ver)) {
-+ if (es58x_sw_version_is_valid(fw_ver)) {
- snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
- fw_ver->major, fw_ver->minor, fw_ver->revision);
- ret = devlink_info_version_running_put(req,
-@@ -207,7 +228,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
- return ret;
- }
-
-- if (es58x_sw_version_is_set(bl_ver)) {
-+ if (es58x_sw_version_is_valid(bl_ver)) {
- snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
- bl_ver->major, bl_ver->minor, bl_ver->revision);
- ret = devlink_info_version_running_put(req,
-@@ -217,7 +238,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
- return ret;
- }
-
-- if (es58x_hw_revision_is_set(hw_rev)) {
-+ if (es58x_hw_revision_is_valid(hw_rev)) {
- snprintf(buf, sizeof(buf), "%c%03u/%03u",
- hw_rev->letter, hw_rev->major, hw_rev->minor);
- ret = devlink_info_version_fixed_put(req,
-diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
-index d8ab2b77d201e..167a86f39f277 100644
---- a/drivers/net/dsa/lan9303_mdio.c
-+++ b/drivers/net/dsa/lan9303_mdio.c
-@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
- struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
-
- reg <<= 2; /* reg num to offset */
-- mutex_lock(&sw_dev->device->bus->mdio_lock);
-+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
- lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
- lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
- mutex_unlock(&sw_dev->device->bus->mdio_lock);
-@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
- struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
-
- reg <<= 2; /* reg num to offset */
-- mutex_lock(&sw_dev->device->bus->mdio_lock);
-+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
- *val = lan9303_mdio_real_read(sw_dev->device, reg);
- *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
- mutex_unlock(&sw_dev->device->bus->mdio_lock);
-diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
-index ab434a77b059a..dc7f9b99f409f 100644
---- a/drivers/net/dsa/mv88e6xxx/chip.c
-+++ b/drivers/net/dsa/mv88e6xxx/chip.c
-@@ -577,6 +577,18 @@ static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
- config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
- }
-
-+static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
-+ struct phylink_config *config)
-+{
-+ unsigned long *supported = config->supported_interfaces;
-+
-+ /* Translate the default cmode */
-+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
-+
-+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
-+ MAC_1000FD;
-+}
-+
- static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
- {
- u16 reg, val;
-@@ -3880,7 +3892,8 @@ static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
-- if (chip->info->ops->pcs_ops->pcs_init) {
-+ if (chip->info->ops->pcs_ops &&
-+ chip->info->ops->pcs_ops->pcs_init) {
- err = chip->info->ops->pcs_ops->pcs_init(chip, port);
- if (err)
- return err;
-@@ -3895,7 +3908,8 @@ static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
-
- mv88e6xxx_teardown_devlink_regions_port(ds, port);
-
-- if (chip->info->ops->pcs_ops->pcs_teardown)
-+ if (chip->info->ops->pcs_ops &&
-+ chip->info->ops->pcs_ops->pcs_teardown)
- chip->info->ops->pcs_ops->pcs_teardown(chip, port);
- }
-
-@@ -4340,7 +4354,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
- .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .stu_getnext = mv88e6352_g1_stu_getnext,
- .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
-- .phylink_get_caps = mv88e6185_phylink_get_caps,
-+ .phylink_get_caps = mv88e6351_phylink_get_caps,
- };
-
- static const struct mv88e6xxx_ops mv88e6172_ops = {
-@@ -4440,7 +4454,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
- .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .stu_getnext = mv88e6352_g1_stu_getnext,
- .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
-- .phylink_get_caps = mv88e6185_phylink_get_caps,
-+ .phylink_get_caps = mv88e6351_phylink_get_caps,
- };
-
- static const struct mv88e6xxx_ops mv88e6176_ops = {
-@@ -5069,7 +5083,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
- .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .stu_getnext = mv88e6352_g1_stu_getnext,
- .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
-- .phylink_get_caps = mv88e6185_phylink_get_caps,
-+ .phylink_get_caps = mv88e6351_phylink_get_caps,
- };
-
- static const struct mv88e6xxx_ops mv88e6351_ops = {
-@@ -5117,7 +5131,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
- .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .avb_ops = &mv88e6352_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
-- .phylink_get_caps = mv88e6185_phylink_get_caps,
-+ .phylink_get_caps = mv88e6351_phylink_get_caps,
- };
-
- static const struct mv88e6xxx_ops mv88e6352_ops = {
-diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
-index 045fe133f6ee9..5beadabc21361 100644
---- a/drivers/net/ethernet/amd/pds_core/adminq.c
-+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
-@@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
- }
-
- queue_work(pdsc->wq, &qcq->work);
-- pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
-+ pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
-
- return IRQ_HANDLED;
- }
-diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
-index e545fafc48196..b1c1f1007b065 100644
---- a/drivers/net/ethernet/amd/pds_core/core.h
-+++ b/drivers/net/ethernet/amd/pds_core/core.h
-@@ -15,7 +15,7 @@
- #define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
-
- #define PDSC_WATCHDOG_SECS 5
--#define PDSC_QUEUE_NAME_MAX_SZ 32
-+#define PDSC_QUEUE_NAME_MAX_SZ 16
- #define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
- #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
- #define PDSC_TEARDOWN_RECOVERY false
-diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
-index f77cd9f5a2fda..eb178728edba9 100644
---- a/drivers/net/ethernet/amd/pds_core/dev.c
-+++ b/drivers/net/ethernet/amd/pds_core/dev.c
-@@ -254,10 +254,14 @@ static int pdsc_identify(struct pdsc *pdsc)
- struct pds_core_drv_identity drv = {};
- size_t sz;
- int err;
-+ int n;
-
- drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
-- snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
-- "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
-+ /* Catching the return quiets a Wformat-truncation complaint */
-+ n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
-+ "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
-+ if (n > sizeof(drv.driver_ver_str))
-+ dev_dbg(pdsc->dev, "release name truncated, don't care\n");
-
- /* Next let's get some info about the device
- * We use the devcmd_lock at this level in order to
-diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
-index d9607033bbf21..d2abf32b93fe3 100644
---- a/drivers/net/ethernet/amd/pds_core/devlink.c
-+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
-@@ -104,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
- struct pds_core_fw_list_info fw_list;
- struct pdsc *pdsc = devlink_priv(dl);
- union pds_core_dev_comp comp;
-- char buf[16];
-+ char buf[32];
- int listlen;
- int err;
- int i;
-diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
-index 614c0278419bc..6b73648b37793 100644
---- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
-+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
-@@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
- static void xgbe_service_timer(struct timer_list *t)
- {
- struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
-+ struct xgbe_channel *channel;
-+ unsigned int i;
-
- queue_work(pdata->dev_workqueue, &pdata->service_work);
-
- mod_timer(&pdata->service_timer, jiffies + HZ);
-+
-+ if (!pdata->tx_usecs)
-+ return;
-+
-+ for (i = 0; i < pdata->channel_count; i++) {
-+ channel = pdata->channel[i];
-+ if (!channel->tx_ring || channel->tx_timer_active)
-+ break;
-+ channel->tx_timer_active = 1;
-+ mod_timer(&channel->tx_timer,
-+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
-+ }
- }
-
- static void xgbe_init_timers(struct xgbe_prv_data *pdata)
-diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
-index 6e83ff59172a3..32fab5e772462 100644
---- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
-+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
-@@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
-
- cmd->base.phy_address = pdata->phy.address;
-
-- cmd->base.autoneg = pdata->phy.autoneg;
-- cmd->base.speed = pdata->phy.speed;
-- cmd->base.duplex = pdata->phy.duplex;
-+ if (netif_carrier_ok(netdev)) {
-+ cmd->base.speed = pdata->phy.speed;
-+ cmd->base.duplex = pdata->phy.duplex;
-+ } else {
-+ cmd->base.speed = SPEED_UNKNOWN;
-+ cmd->base.duplex = DUPLEX_UNKNOWN;
-+ }
-
-+ cmd->base.autoneg = pdata->phy.autoneg;
- cmd->base.port = PORT_NONE;
-
- XGBE_LM_COPY(cmd, supported, lks, supported);
-diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
-index 32d2c6fac6526..4a2dc705b5280 100644
---- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
-+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
-@@ -1193,7 +1193,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
- if (pdata->phy.duplex != DUPLEX_FULL)
- return -EINVAL;
-
-- xgbe_set_mode(pdata, mode);
-+ /* Force the mode change for SFI in Fixed PHY config.
-+ * Fixed PHY configs needs PLL to be enabled while doing mode set.
-+ * When the SFP module isn't connected during boot, driver assumes
-+ * AN is ON and attempts autonegotiation. However, if the connected
-+ * SFP comes up in Fixed PHY config, the link will not come up as
-+ * PLL isn't enabled while the initial mode set command is issued.
-+ * So, force the mode change for SFI in Fixed PHY configuration to
-+ * fix link issues.
-+ */
-+ if (mode == XGBE_MODE_SFI)
-+ xgbe_change_mode(pdata, mode);
-+ else
-+ xgbe_set_mode(pdata, mode);
-
- return 0;
- }
-diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
-index 43d821fe7a542..63ba64dbb7310 100644
---- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
-+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
-@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
- u16 next_to_use;
- u16 next_to_clean;
- struct napi_struct napi;
-- struct page *rx_page;
-- unsigned int rx_page_offset;
- };
-
- /* board specific private data structure */
- struct atl1c_adapter {
- struct net_device *netdev;
- struct pci_dev *pdev;
-- unsigned int rx_frag_size;
- struct atl1c_hw hw;
- struct atl1c_hw_stats hw_stats;
- struct mii_if_info mii; /* MII interface info */
-diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
-index 940c5d1ff9cfc..74b78164cf74a 100644
---- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
-+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
-@@ -483,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
- static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
- struct net_device *dev)
- {
-- unsigned int head_size;
- int mtu = dev->mtu;
-
- adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
- roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
--
-- head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
-- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-- adapter->rx_frag_size = roundup_pow_of_two(head_size);
- }
-
- static netdev_features_t atl1c_fix_features(struct net_device *netdev,
-@@ -964,7 +959,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
- static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
- {
- struct pci_dev *pdev = adapter->pdev;
-- int i;
-
- dma_free_coherent(&pdev->dev, adapter->ring_header.size,
- adapter->ring_header.desc, adapter->ring_header.dma);
-@@ -977,12 +971,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
- kfree(adapter->tpd_ring[0].buffer_info);
- adapter->tpd_ring[0].buffer_info = NULL;
- }
-- for (i = 0; i < adapter->rx_queue_count; ++i) {
-- if (adapter->rrd_ring[i].rx_page) {
-- put_page(adapter->rrd_ring[i].rx_page);
-- adapter->rrd_ring[i].rx_page = NULL;
-- }
-- }
- }
-
- /**
-@@ -1754,48 +1742,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
- skb_checksum_none_assert(skb);
- }
-
--static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
-- u32 queue, bool napi_mode)
--{
-- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
-- struct sk_buff *skb;
-- struct page *page;
--
-- if (adapter->rx_frag_size > PAGE_SIZE) {
-- if (likely(napi_mode))
-- return napi_alloc_skb(&rrd_ring->napi,
-- adapter->rx_buffer_len);
-- else
-- return netdev_alloc_skb_ip_align(adapter->netdev,
-- adapter->rx_buffer_len);
-- }
--
-- page = rrd_ring->rx_page;
-- if (!page) {
-- page = alloc_page(GFP_ATOMIC);
-- if (unlikely(!page))
-- return NULL;
-- rrd_ring->rx_page = page;
-- rrd_ring->rx_page_offset = 0;
-- }
--
-- skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
-- adapter->rx_frag_size);
-- if (likely(skb)) {
-- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-- rrd_ring->rx_page_offset += adapter->rx_frag_size;
-- if (rrd_ring->rx_page_offset >= PAGE_SIZE)
-- rrd_ring->rx_page = NULL;
-- else
-- get_page(page);
-- }
-- return skb;
--}
--
- static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
- bool napi_mode)
- {
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
-+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
- struct pci_dev *pdev = adapter->pdev;
- struct atl1c_buffer *buffer_info, *next_info;
- struct sk_buff *skb;
-@@ -1814,13 +1765,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
- while (next_info->flags & ATL1C_BUFFER_FREE) {
- rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
-
-- skb = atl1c_alloc_skb(adapter, queue, napi_mode);
-+ /* When DMA RX address is set to something like
-+ * 0x....fc0, it will be very likely to cause DMA
-+ * RFD overflow issue.
-+ *
-+ * To work around it, we apply rx skb with 64 bytes
-+ * longer space, and offset the address whenever
-+ * 0x....fc0 is detected.
-+ */
-+ if (likely(napi_mode))
-+ skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
-+ else
-+ skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
- if (unlikely(!skb)) {
- if (netif_msg_rx_err(adapter))
- dev_warn(&pdev->dev, "alloc rx buffer failed\n");
- break;
- }
-
-+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
-+ skb_reserve(skb, 64);
-+
- /*
- * Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
-diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
-index 14b311196b8f8..22b00912f7ac8 100644
---- a/drivers/net/ethernet/broadcom/tg3.c
-+++ b/drivers/net/ethernet/broadcom/tg3.c
-@@ -18078,7 +18078,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
- if (netif_running(dev))
- dev_close(dev);
-
-- tg3_power_down(tp);
-+ if (system_state == SYSTEM_POWER_OFF)
-+ tg3_power_down(tp);
-
- rtnl_unlock();
-
-diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
-index 7750702900fa6..6f6525983130e 100644
---- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
-+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
-@@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
-
- if (tp->snd_una != snd_una) {
- tp->snd_una = snd_una;
-- tp->rcv_tstamp = tcp_time_stamp(tp);
-+ tp->rcv_tstamp = tcp_jiffies32;
- if (tp->snd_una == tp->snd_nxt &&
- !csk_flag_nochk(csk, CSK_TX_FAILOVER))
- csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
-diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
-index a8b9d1a3e4d57..636949737d72f 100644
---- a/drivers/net/ethernet/cortina/gemini.c
-+++ b/drivers/net/ethernet/cortina/gemini.c
-@@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
- .val = CONFIG0_MAXLEN_1536,
- },
- {
-- .max_l3_len = 1542,
-- .val = CONFIG0_MAXLEN_1542,
-+ .max_l3_len = 1548,
-+ .val = CONFIG0_MAXLEN_1548,
- },
- {
- .max_l3_len = 9212,
-@@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
- dma_addr_t mapping;
- unsigned short mtu;
- void *buffer;
-+ int ret;
-
- mtu = ETH_HLEN;
- mtu += netdev->mtu;
-@@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
- word3 |= mtu;
- }
-
-- if (skb->ip_summed != CHECKSUM_NONE) {
-+ if (skb->len >= ETH_FRAME_LEN) {
-+ /* Hardware offloaded checksumming isn't working on frames
-+ * bigger than 1514 bytes. A hypothesis about this is that the
-+ * checksum buffer is only 1518 bytes, so when the frames get
-+ * bigger they get truncated, or the last few bytes get
-+ * overwritten by the FCS.
-+ *
-+ * Just use software checksumming and bypass on bigger frames.
-+ */
-+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
-+ ret = skb_checksum_help(skb);
-+ if (ret)
-+ return ret;
-+ }
-+ word1 |= TSS_BYPASS_BIT;
-+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int tcp = 0;
-
-+ /* We do not switch off the checksumming on non TCP/UDP
-+ * frames: as is shown from tests, the checksumming engine
-+ * is smart enough to see that a frame is not actually TCP
-+ * or UDP and then just pass it through without any changes
-+ * to the frame.
-+ */
- if (skb->protocol == htons(ETH_P_IP)) {
- word1 |= TSS_IP_CHKSUM_BIT;
- tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
-@@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
- return 0;
- }
-
--static netdev_features_t gmac_fix_features(struct net_device *netdev,
-- netdev_features_t features)
--{
-- if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
-- features &= ~GMAC_OFFLOAD_FEATURES;
--
-- return features;
--}
--
- static int gmac_set_features(struct net_device *netdev,
- netdev_features_t features)
- {
-@@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = {
- .ndo_set_mac_address = gmac_set_mac_address,
- .ndo_get_stats64 = gmac_get_stats64,
- .ndo_change_mtu = gmac_change_mtu,
-- .ndo_fix_features = gmac_fix_features,
- .ndo_set_features = gmac_set_features,
- };
-
-@@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
-
- netdev->hw_features = GMAC_OFFLOAD_FEATURES;
- netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
-- /* We can handle jumbo frames up to 10236 bytes so, let's accept
-- * payloads of 10236 bytes minus VLAN and ethernet header
-+ /* We can receive jumbo frames up to 10236 bytes but only
-+ * transmit 2047 bytes so, let's accept payloads of 2047
-+ * bytes minus VLAN and ethernet header
- */
- netdev->min_mtu = ETH_MIN_MTU;
-- netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
-+ netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
-
- port->freeq_refill = 0;
- netif_napi_add(netdev, &port->napi, gmac_napi_poll);
-diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
-index 9fdf77d5eb374..24bb989981f23 100644
---- a/drivers/net/ethernet/cortina/gemini.h
-+++ b/drivers/net/ethernet/cortina/gemini.h
-@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
- #define SOF_BIT 0x80000000
- #define EOF_BIT 0x40000000
- #define EOFIE_BIT BIT(29)
--#define MTU_SIZE_BIT_MASK 0x1fff
-+#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
-
- /* GMAC Tx Descriptor */
- struct gmac_txdesc {
-@@ -787,7 +787,7 @@ union gmac_config0 {
- #define CONFIG0_MAXLEN_1536 0
- #define CONFIG0_MAXLEN_1518 1
- #define CONFIG0_MAXLEN_1522 2
--#define CONFIG0_MAXLEN_1542 3
-+#define CONFIG0_MAXLEN_1548 3
- #define CONFIG0_MAXLEN_9k 4 /* 9212 */
- #define CONFIG0_MAXLEN_10k 5 /* 10236 */
- #define CONFIG0_MAXLEN_1518__6 6
-diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
-index 6e14c918e3fb7..f188fba021a62 100644
---- a/drivers/net/ethernet/engleder/tsnep.h
-+++ b/drivers/net/ethernet/engleder/tsnep.h
-@@ -143,7 +143,7 @@ struct tsnep_rx {
-
- struct tsnep_queue {
- struct tsnep_adapter *adapter;
-- char name[IFNAMSIZ + 9];
-+ char name[IFNAMSIZ + 16];
-
- struct tsnep_tx *tx;
- struct tsnep_rx *rx;
-diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
-index 8b992dc9bb52b..38da2d6c250e6 100644
---- a/drivers/net/ethernet/engleder/tsnep_main.c
-+++ b/drivers/net/ethernet/engleder/tsnep_main.c
-@@ -1779,14 +1779,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
- dev = queue->adapter;
- } else {
- if (queue->tx && queue->rx)
-- sprintf(queue->name, "%s-txrx-%d", name,
-- queue->rx->queue_index);
-+ snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
-+ name, queue->rx->queue_index);
- else if (queue->tx)
-- sprintf(queue->name, "%s-tx-%d", name,
-- queue->tx->queue_index);
-+ snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
-+ name, queue->tx->queue_index);
- else
-- sprintf(queue->name, "%s-rx-%d", name,
-- queue->rx->queue_index);
-+ snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
-+ name, queue->rx->queue_index);
- handler = tsnep_irq_txrx;
- dev = queue;
- }
-diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
-index 15bab41cee48d..888509cf1f210 100644
---- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
-+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
-@@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
-
- memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
-
-- dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
--
- return skb;
- }
-
-@@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
- struct rtnl_link_stats64 *percpu_stats;
- struct dpaa2_eth_drv_stats *percpu_extras;
- struct device *dev = priv->net_dev->dev.parent;
-+ bool recycle_rx_buf = false;
- void *buf_data;
- u32 xdp_act;
-
-@@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
- dma_unmap_page(dev, addr, priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
-+ } else {
-+ recycle_rx_buf = true;
- }
- } else if (fd_format == dpaa2_fd_sg) {
- WARN_ON(priv->xdp_prog);
-@@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
- goto err_build_skb;
-
- dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
-+
-+ if (recycle_rx_buf)
-+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
- return;
-
- err_build_skb:
-@@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
- dma_addr_t addr;
-
- buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
--
-- /* If there's enough room to align the FD address, do it.
-- * It will help hardware optimize accesses.
-- */
- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
- if (aligned_start >= skb->head)
- buffer_start = aligned_start;
-+ else
-+ return -ENOMEM;
-
- /* Store a backpointer to the skb at the beginning of the buffer
- * (in the private data area) such that we can release it
-@@ -4967,6 +4969,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
- if (err)
- goto err_dl_port_add;
-
-+ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
-+
- err = register_netdev(net_dev);
- if (err < 0) {
- dev_err(dev, "register_netdev() failed\n");
-diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
-index bfb6c96c3b2f0..834cba8c3a416 100644
---- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
-+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
-@@ -740,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
-
- static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
- {
-- unsigned int headroom = DPAA2_ETH_SWA_SIZE;
-+ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
-
- /* If we don't have an skb (e.g. XDP buffer), we only need space for
- * the software annotation area
-diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
-index 35461165de0d2..b92e3aa7cd041 100644
---- a/drivers/net/ethernet/freescale/enetc/enetc.c
-+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
-@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
- if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
- priv->num_tx_rings) {
- NL_SET_ERR_MSG_FMT_MOD(extack,
-- "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
-+ "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
- num_xdp_tx_queues,
- priv->min_num_stack_tx_queues,
- priv->num_tx_rings);
-diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
-index 5704b5f57cd0d..5703240474e5b 100644
---- a/drivers/net/ethernet/google/gve/gve_main.c
-+++ b/drivers/net/ethernet/google/gve/gve_main.c
-@@ -190,7 +190,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
- rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
- priv->rx_cfg.num_queues;
- priv->stats_report_len = struct_size(priv->stats_report, stats,
-- tx_stats_num + rx_stats_num);
-+ size_add(tx_stats_num, rx_stats_num));
- priv->stats_report =
- dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
- &priv->stats_report_bus, GFP_KERNEL);
-@@ -254,10 +254,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
- if (block->tx) {
- if (block->tx->q_num < priv->tx_cfg.num_queues)
- reschedule |= gve_tx_poll(block, budget);
-- else
-+ else if (budget)
- reschedule |= gve_xdp_poll(block, budget);
- }
-
-+ if (!budget)
-+ return 0;
-+
- if (block->rx) {
- work_done = gve_rx_poll(block, budget);
- reschedule |= work_done == budget;
-@@ -298,6 +301,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
- if (block->tx)
- reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
-
-+ if (!budget)
-+ return 0;
-+
- if (block->rx) {
- work_done = gve_rx_poll_dqo(block, budget);
- reschedule |= work_done == budget;
-diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
-index e84a066aa1a40..73655347902d2 100644
---- a/drivers/net/ethernet/google/gve/gve_rx.c
-+++ b/drivers/net/ethernet/google/gve/gve_rx.c
-@@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget)
-
- feat = block->napi.dev->features;
-
-- /* If budget is 0, do all the work */
-- if (budget == 0)
-- budget = INT_MAX;
--
- if (budget > 0)
- work_done = gve_clean_rx_done(rx, budget, feat);
-
-diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
-index 6957a865cff37..9f6ffc4a54f0b 100644
---- a/drivers/net/ethernet/google/gve/gve_tx.c
-+++ b/drivers/net/ethernet/google/gve/gve_tx.c
-@@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
- bool repoll;
- u32 to_do;
-
-- /* If budget is 0, do all the work */
-- if (budget == 0)
-- budget = INT_MAX;
--
- /* Find out how much work there is to be done */
- nic_done = gve_tx_load_event_counter(priv, tx);
- to_do = min_t(u32, (nic_done - tx->done), budget);
-diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
-index b8508533878be..4f385a18d288e 100644
---- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
-+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
-@@ -500,11 +500,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
- }
-
- sprintf(result[j++], "%d", i);
-- sprintf(result[j++], "%s", dim_state_str[dim->state]);
-+ sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
-+ dim_state_str[dim->state] : "unknown");
- sprintf(result[j++], "%u", dim->profile_ix);
-- sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
-+ sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
-+ dim_cqe_mode_str[dim->mode] : "unknown");
- sprintf(result[j++], "%s",
-- dim_tune_stat_str[dim->tune_state]);
-+ dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
-+ dim_tune_stat_str[dim->tune_state] : "unknown");
- sprintf(result[j++], "%u", dim->steps_left);
- sprintf(result[j++], "%u", dim->steps_right);
- sprintf(result[j++], "%u", dim->tired);
-diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
-index cf50368441b78..677cfaa5fe08c 100644
---- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
-+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
-@@ -5140,7 +5140,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
- struct hnae3_handle *h = priv->ae_handle;
-- u8 mac_addr_temp[ETH_ALEN];
-+ u8 mac_addr_temp[ETH_ALEN] = {0};
- int ret = 0;
-
- if (h->ae_algo->ops->get_mac_addr)
-diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
-index c42574e297476..a61d9fd732b96 100644
---- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
-+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
-@@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
- static void hclge_update_fec_stats(struct hclge_dev *hdev);
- static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
- int wait_cnt);
-+static int hclge_update_port_info(struct hclge_dev *hdev);
-
- static struct hnae3_ae_algo ae_algo;
-
-@@ -3043,6 +3044,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
-
- if (state != hdev->hw.mac.link) {
- hdev->hw.mac.link = state;
-+ if (state == HCLGE_LINK_STATUS_UP)
-+ hclge_update_port_info(hdev);
-+
- client->ops->link_status_change(handle, state);
- hclge_config_mac_tnl_int(hdev, state);
- if (rclient && rclient->ops->link_status_change)
-@@ -10026,8 +10030,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
- struct hclge_vport_vlan_cfg *vlan, *tmp;
- struct hclge_dev *hdev = vport->back;
-
-- mutex_lock(&hdev->vport_lock);
--
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- if (vlan->vlan_id == vlan_id) {
- if (is_write_tbl && vlan->hd_tbl_status)
-@@ -10042,8 +10044,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
- break;
- }
- }
--
-- mutex_unlock(&hdev->vport_lock);
- }
-
- void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
-@@ -10452,11 +10452,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
- * handle mailbox. Just record the vlan id, and remove it after
- * reset finished.
- */
-+ mutex_lock(&hdev->vport_lock);
- if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
- test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
- set_bit(vlan_id, vport->vlan_del_fail_bmap);
-+ mutex_unlock(&hdev->vport_lock);
- return -EBUSY;
-+ } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
-+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
- }
-+ mutex_unlock(&hdev->vport_lock);
-
- /* when port base vlan enabled, we use port base vlan as the vlan
- * filter entry. In this case, we don't update vlan filter table
-@@ -10471,17 +10476,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
- }
-
- if (!ret) {
-- if (!is_kill)
-+ if (!is_kill) {
- hclge_add_vport_vlan_table(vport, vlan_id,
- writen_to_tbl);
-- else if (is_kill && vlan_id != 0)
-+ } else if (is_kill && vlan_id != 0) {
-+ mutex_lock(&hdev->vport_lock);
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
-+ mutex_unlock(&hdev->vport_lock);
-+ }
- } else if (is_kill) {
- /* when remove hw vlan filter failed, record the vlan id,
- * and try to remove it from hw later, to be consistence
- * with stack
- */
-+ mutex_lock(&hdev->vport_lock);
- set_bit(vlan_id, vport->vlan_del_fail_bmap);
-+ mutex_unlock(&hdev->vport_lock);
- }
-
- hclge_set_vport_vlan_fltr_change(vport);
-@@ -10521,6 +10531,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
- int i, ret, sync_cnt = 0;
- u16 vlan_id;
-
-+ mutex_lock(&hdev->vport_lock);
- /* start from vport 1 for PF is always alive */
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- struct hclge_vport *vport = &hdev->vport[i];
-@@ -10531,21 +10542,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
- ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
- vport->vport_id, vlan_id,
- true);
-- if (ret && ret != -EINVAL)
-+ if (ret && ret != -EINVAL) {
-+ mutex_unlock(&hdev->vport_lock);
- return;
-+ }
-
- clear_bit(vlan_id, vport->vlan_del_fail_bmap);
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- hclge_set_vport_vlan_fltr_change(vport);
-
- sync_cnt++;
-- if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
-+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
-+ mutex_unlock(&hdev->vport_lock);
- return;
-+ }
-
- vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
- VLAN_N_VID);
- }
- }
-+ mutex_unlock(&hdev->vport_lock);
-
- hclge_sync_vlan_fltr_state(hdev);
- }
-@@ -11652,6 +11668,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
- goto err_msi_irq_uninit;
-
- if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
-+ clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
- if (hnae3_dev_phy_imp_supported(hdev))
- ret = hclge_update_tp_port_info(hdev);
- else
-diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
-index a4d68fb216fb9..0aa9beefd1c7e 100644
---- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
-+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
-@@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
- test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
- set_bit(vlan_id, hdev->vlan_del_fail_bmap);
- return -EBUSY;
-+ } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
-+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
- }
-
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
-@@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
- int ret, sync_cnt = 0;
- u16 vlan_id;
-
-+ if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
-+ return;
-+
-+ rtnl_lock();
- vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
- while (vlan_id != VLAN_N_VID) {
- ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
- vlan_id, true);
- if (ret)
-- return;
-+ break;
-
- clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
- sync_cnt++;
- if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
-- return;
-+ break;
-
- vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
- }
-+ rtnl_unlock();
- }
-
- static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
-@@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
- return HCLGEVF_VECTOR0_EVENT_OTHER;
- }
-
-+static void hclgevf_reset_timer(struct timer_list *t)
-+{
-+ struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
-+
-+ hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
-+ hclgevf_reset_task_schedule(hdev);
-+}
-+
- static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
- {
-+#define HCLGEVF_RESET_DELAY 5
-+
- enum hclgevf_evt_cause event_cause;
- struct hclgevf_dev *hdev = data;
- u32 clearval;
-@@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
-
- switch (event_cause) {
- case HCLGEVF_VECTOR0_EVENT_RST:
-- hclgevf_reset_task_schedule(hdev);
-+ mod_timer(&hdev->reset_timer,
-+ jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
- break;
- case HCLGEVF_VECTOR0_EVENT_MBX:
- hclgevf_mbx_handler(hdev);
-@@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
- HCLGEVF_DRIVER_NAME);
-
- hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
-+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
-
- return 0;
-
-diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
-index 81c16b8c8da29..a73f2bf3a56a6 100644
---- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
-+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
-@@ -219,6 +219,7 @@ struct hclgevf_dev {
- enum hnae3_reset_type reset_level;
- unsigned long reset_pending;
- enum hnae3_reset_type reset_type;
-+ struct timer_list reset_timer;
-
- #define HCLGEVF_RESET_REQUESTED 0
- #define HCLGEVF_RESET_PENDING 1
-diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
-index bbf7b14079de3..85c2a634c8f96 100644
---- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
-+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
-@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
- i++;
- }
-
-+ /* ensure additional_info will be seen after received_resp */
-+ smp_rmb();
-+
- if (i >= HCLGEVF_MAX_TRY_TIMES) {
- dev_err(&hdev->pdev->dev,
- "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
-@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
- resp->resp_status = hclgevf_resp_to_errno(resp_status);
- memcpy(resp->additional_info, req->msg.resp_data,
- HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
-+
-+ /* ensure additional_info will be seen before setting received_resp */
-+ smp_wmb();
-+
- if (match_id) {
- /* If match_id is not zero, it means PF support match_id.
- * if the match_id is right, VF get the right response, or
-diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
-index de7fd43dc11c8..00ca2b88165cb 100644
---- a/drivers/net/ethernet/intel/i40e/i40e_main.c
-+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
-@@ -16320,11 +16320,15 @@ static void i40e_remove(struct pci_dev *pdev)
- i40e_switch_branch_release(pf->veb[i]);
- }
-
-- /* Now we can shutdown the PF's VSI, just before we kill
-+ /* Now we can shutdown the PF's VSIs, just before we kill
- * adminq and hmc.
- */
-- if (pf->vsi[pf->lan_vsi])
-- i40e_vsi_release(pf->vsi[pf->lan_vsi]);
-+ for (i = pf->num_alloc_vsi; i--;)
-+ if (pf->vsi[i]) {
-+ i40e_vsi_close(pf->vsi[i]);
-+ i40e_vsi_release(pf->vsi[i]);
-+ pf->vsi[i] = NULL;
-+ }
-
- i40e_cloud_filter_exit(pf);
-
-diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
-index d3d6415553ed6..4441b00297f47 100644
---- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
-+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
-@@ -3842,7 +3842,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
- struct i40e_pf *pf = vf->pf;
- struct i40e_vsi *vsi = NULL;
- int aq_ret = 0;
-- int i, ret;
-+ int i;
-
- if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = -EINVAL;
-@@ -3866,8 +3866,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
- }
-
- cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
-- if (!cfilter)
-- return -ENOMEM;
-+ if (!cfilter) {
-+ aq_ret = -ENOMEM;
-+ goto err_out;
-+ }
-
- /* parse destination mac address */
- for (i = 0; i < ETH_ALEN; i++)
-@@ -3915,13 +3917,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
-
- /* Adding cloud filter programmed as TC filter */
- if (tcf.dst_port)
-- ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
-+ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
- else
-- ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
-- if (ret) {
-+ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
-+ if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
-- vf->vf_id, ERR_PTR(ret),
-+ vf->vf_id, ERR_PTR(aq_ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- goto err_free;
- }
-diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
-index e110ba3461857..d8d7b62ceb24e 100644
---- a/drivers/net/ethernet/intel/iavf/iavf.h
-+++ b/drivers/net/ethernet/intel/iavf/iavf.h
-@@ -298,8 +298,6 @@ struct iavf_adapter {
- #define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
- #define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
- #define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
--#define IAVF_FLAG_PROMISC_ON BIT(13)
--#define IAVF_FLAG_ALLMULTI_ON BIT(14)
- #define IAVF_FLAG_LEGACY_RX BIT(15)
- #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
- #define IAVF_FLAG_QUEUES_DISABLED BIT(17)
-@@ -325,10 +323,7 @@ struct iavf_adapter {
- #define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
- #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
- #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
--#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
--#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
--#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
--#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
-+#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
- #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
- #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
- #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
-@@ -365,6 +360,12 @@ struct iavf_adapter {
- (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
- IAVF_EXTENDED_CAP_RECV_VLAN_V2)
-
-+ /* Lock to prevent possible clobbering of
-+ * current_netdev_promisc_flags
-+ */
-+ spinlock_t current_netdev_promisc_flags_lock;
-+ netdev_features_t current_netdev_promisc_flags;
-+
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-@@ -551,7 +552,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
- void iavf_del_ether_addrs(struct iavf_adapter *adapter);
- void iavf_add_vlans(struct iavf_adapter *adapter);
- void iavf_del_vlans(struct iavf_adapter *adapter);
--void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
-+void iavf_set_promiscuous(struct iavf_adapter *adapter);
-+bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
- void iavf_request_stats(struct iavf_adapter *adapter);
- int iavf_request_reset(struct iavf_adapter *adapter);
- void iavf_get_hena(struct iavf_adapter *adapter);
-diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
-index b3434dbc90d6f..68783a7b70962 100644
---- a/drivers/net/ethernet/intel/iavf/iavf_main.c
-+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
-@@ -1186,6 +1186,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
- return 0;
- }
-
-+/**
-+ * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
-+ * @adapter: device specific adapter
-+ */
-+bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
-+{
-+ return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
-+ (IFF_PROMISC | IFF_ALLMULTI);
-+}
-+
- /**
- * iavf_set_rx_mode - NDO callback to set the netdev filters
- * @netdev: network interface device structure
-@@ -1199,19 +1209,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
- __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
-- if (netdev->flags & IFF_PROMISC &&
-- !(adapter->flags & IAVF_FLAG_PROMISC_ON))
-- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
-- else if (!(netdev->flags & IFF_PROMISC) &&
-- adapter->flags & IAVF_FLAG_PROMISC_ON)
-- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
--
-- if (netdev->flags & IFF_ALLMULTI &&
-- !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
-- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
-- else if (!(netdev->flags & IFF_ALLMULTI) &&
-- adapter->flags & IAVF_FLAG_ALLMULTI_ON)
-- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
-+ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
-+ if (iavf_promiscuous_mode_changed(adapter))
-+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
-+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
- }
-
- /**
-@@ -2162,19 +2163,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
- return 0;
- }
-
-- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
-- iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
-- FLAG_VF_MULTICAST_PROMISC);
-- return 0;
-- }
--
-- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
-- iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
-- return 0;
-- }
-- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
-- (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
-- iavf_set_promiscuous(adapter, 0);
-+ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
-+ iavf_set_promiscuous(adapter);
- return 0;
- }
-
-@@ -4970,6 +4960,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
- spin_lock_init(&adapter->cloud_filter_list_lock);
- spin_lock_init(&adapter->fdir_fltr_lock);
- spin_lock_init(&adapter->adv_rss_lock);
-+ spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
-
- INIT_LIST_HEAD(&adapter->mac_filter_list);
- INIT_LIST_HEAD(&adapter->vlan_filter_list);
-diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
-index f9727e9c3d630..0b97b424e487a 100644
---- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
-+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
-@@ -936,14 +936,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
- /**
- * iavf_set_promiscuous
- * @adapter: adapter structure
-- * @flags: bitmask to control unicast/multicast promiscuous.
- *
- * Request that the PF enable promiscuous mode for our VSI.
- **/
--void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
-+void iavf_set_promiscuous(struct iavf_adapter *adapter)
- {
-+ struct net_device *netdev = adapter->netdev;
- struct virtchnl_promisc_info vpi;
-- int promisc_all;
-+ unsigned int flags;
-
- if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
- /* bail because we already have a command pending */
-@@ -952,36 +952,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
- return;
- }
-
-- promisc_all = FLAG_VF_UNICAST_PROMISC |
-- FLAG_VF_MULTICAST_PROMISC;
-- if ((flags & promisc_all) == promisc_all) {
-- adapter->flags |= IAVF_FLAG_PROMISC_ON;
-- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
-- dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
-- }
-+ /* prevent changes to promiscuous flags */
-+ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
-
-- if (flags & FLAG_VF_MULTICAST_PROMISC) {
-- adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
-- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
-- dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
-- adapter->netdev->name);
-+ /* sanity check to prevent duplicate AQ calls */
-+ if (!iavf_promiscuous_mode_changed(adapter)) {
-+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
-+ dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
-+ /* allow changes to promiscuous flags */
-+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
-+ return;
- }
-
-- if (!flags) {
-- if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
-- adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
-- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
-- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
-- }
-+ /* there are 2 bits, but only 3 states */
-+ if (!(netdev->flags & IFF_PROMISC) &&
-+ netdev->flags & IFF_ALLMULTI) {
-+ /* State 1 - only multicast promiscuous mode enabled
-+ * - !IFF_PROMISC && IFF_ALLMULTI
-+ */
-+ flags = FLAG_VF_MULTICAST_PROMISC;
-+ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
-+ adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
-+ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
-+ } else if (!(netdev->flags & IFF_PROMISC) &&
-+ !(netdev->flags & IFF_ALLMULTI)) {
-+ /* State 2 - unicast/multicast promiscuous mode disabled
-+ * - !IFF_PROMISC && !IFF_ALLMULTI
-+ */
-+ flags = 0;
-+ adapter->current_netdev_promisc_flags &=
-+ ~(IFF_PROMISC | IFF_ALLMULTI);
-+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
-+ } else {
-+ /* State 3 - unicast/multicast promiscuous mode enabled
-+ * - IFF_PROMISC && IFF_ALLMULTI
-+ * - IFF_PROMISC && !IFF_ALLMULTI
-+ */
-+ flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
-+ adapter->current_netdev_promisc_flags |= IFF_PROMISC;
-+ if (netdev->flags & IFF_ALLMULTI)
-+ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
-+ else
-+ adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
-
-- if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
-- adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
-- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
-- dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
-- adapter->netdev->name);
-- }
-+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
- }
-
-+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
-+
-+ /* allow changes to promiscuous flags */
-+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
-+
- adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
- vpi.vsi_id = adapter->vsi_res->vsi_id;
- vpi.flags = flags;
-diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
-index 7b1256992dcf6..d86e2460b5a4d 100644
---- a/drivers/net/ethernet/intel/ice/ice_lag.c
-+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
-@@ -536,6 +536,50 @@ resume_traffic:
- dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
- }
-
-+/**
-+ * ice_lag_build_netdev_list - populate the lag struct's netdev list
-+ * @lag: local lag struct
-+ * @ndlist: pointer to netdev list to populate
-+ */
-+static void ice_lag_build_netdev_list(struct ice_lag *lag,
-+ struct ice_lag_netdev_list *ndlist)
-+{
-+ struct ice_lag_netdev_list *nl;
-+ struct net_device *tmp_nd;
-+
-+ INIT_LIST_HEAD(&ndlist->node);
-+ rcu_read_lock();
-+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
-+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
-+ if (!nl)
-+ break;
-+
-+ nl->netdev = tmp_nd;
-+ list_add(&nl->node, &ndlist->node);
-+ }
-+ rcu_read_unlock();
-+ lag->netdev_head = &ndlist->node;
-+}
-+
-+/**
-+ * ice_lag_destroy_netdev_list - free lag struct's netdev list
-+ * @lag: pointer to local lag struct
-+ * @ndlist: pointer to lag struct netdev list
-+ */
-+static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
-+ struct ice_lag_netdev_list *ndlist)
-+{
-+ struct ice_lag_netdev_list *entry, *n;
-+
-+ rcu_read_lock();
-+ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
-+ list_del(&entry->node);
-+ kfree(entry);
-+ }
-+ rcu_read_unlock();
-+ lag->netdev_head = NULL;
-+}
-+
- /**
- * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
- * @lag: primary interface LAG struct
-@@ -564,7 +608,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
- void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
- {
- struct ice_lag_netdev_list ndlist;
-- struct list_head *tmp, *n;
- u8 pri_port, act_port;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
-@@ -588,38 +631,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
- pri_port = pf->hw.port_info->lport;
- act_port = lag->active_port;
-
-- if (lag->upper_netdev) {
-- struct ice_lag_netdev_list *nl;
-- struct net_device *tmp_nd;
--
-- INIT_LIST_HEAD(&ndlist.node);
-- rcu_read_lock();
-- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
-- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
-- if (!nl)
-- break;
--
-- nl->netdev = tmp_nd;
-- list_add(&nl->node, &ndlist.node);
-- }
-- rcu_read_unlock();
-- }
--
-- lag->netdev_head = &ndlist.node;
-+ if (lag->upper_netdev)
-+ ice_lag_build_netdev_list(lag, &ndlist);
-
- if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
- lag->bonded && lag->primary && pri_port != act_port &&
- !list_empty(lag->netdev_head))
- ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
-
-- list_for_each_safe(tmp, n, &ndlist.node) {
-- struct ice_lag_netdev_list *entry;
--
-- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
-- list_del(&entry->node);
-- kfree(entry);
-- }
-- lag->netdev_head = NULL;
-+ ice_lag_destroy_netdev_list(lag, &ndlist);
-
- new_vf_unlock:
- mutex_unlock(&pf->lag_mutex);
-@@ -646,6 +666,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
- ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
- }
-
-+/**
-+ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
-+ * @lag: local lag struct
-+ * @src_prt: lport value for source port
-+ * @dst_prt: lport value for destination port
-+ *
-+ * This function is used to move nodes during an out-of-netdev-event situation,
-+ * primarily when the driver needs to reconfigure or recreate resources.
-+ *
-+ * Must be called while holding the lag_mutex to avoid lag events from
-+ * processing while out-of-sync moves are happening. Also, paired moves,
-+ * such as used in a reset flow, should both be called under the same mutex
-+ * lock to avoid changes between start of reset and end of reset.
-+ */
-+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
-+{
-+ struct ice_lag_netdev_list ndlist;
-+
-+ ice_lag_build_netdev_list(lag, &ndlist);
-+ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
-+ ice_lag_destroy_netdev_list(lag, &ndlist);
-+}
-+
- #define ICE_LAG_SRIOV_CP_RECIPE 10
- #define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
-
-@@ -1529,18 +1572,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
- */
- static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
- {
-- struct ice_lag_netdev_list *entry;
- struct ice_netdev_priv *np;
-- struct net_device *netdev;
- struct ice_pf *pf;
-
-- list_for_each_entry(entry, lag->netdev_head, node) {
-- netdev = entry->netdev;
-- np = netdev_priv(netdev);
-- pf = np->vsi->back;
--
-- ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
-- }
-+ np = netdev_priv(lag->netdev);
-+ pf = np->vsi->back;
-+ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
- }
-
- /**
-@@ -1672,7 +1709,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
-
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
-- nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
-+ nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
- if (!nd_list)
- break;
-
-@@ -2028,7 +2065,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
- {
- struct ice_lag_netdev_list ndlist;
- struct ice_lag *lag, *prim_lag;
-- struct list_head *tmp, *n;
- u8 act_port, loc_port;
-
- if (!pf->lag || !pf->lag->bonded)
-@@ -2040,21 +2076,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
- if (lag->primary) {
- prim_lag = lag;
- } else {
-- struct ice_lag_netdev_list *nl;
-- struct net_device *tmp_nd;
--
-- INIT_LIST_HEAD(&ndlist.node);
-- rcu_read_lock();
-- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
-- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
-- if (!nl)
-- break;
--
-- nl->netdev = tmp_nd;
-- list_add(&nl->node, &ndlist.node);
-- }
-- rcu_read_unlock();
-- lag->netdev_head = &ndlist.node;
-+ ice_lag_build_netdev_list(lag, &ndlist);
- prim_lag = ice_lag_find_primary(lag);
- }
-
-@@ -2084,13 +2106,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
-
- ice_clear_rdma_cap(pf);
- lag_rebuild_out:
-- list_for_each_safe(tmp, n, &ndlist.node) {
-- struct ice_lag_netdev_list *entry;
--
-- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
-- list_del(&entry->node);
-- kfree(entry);
-- }
-+ ice_lag_destroy_netdev_list(lag, &ndlist);
- mutex_unlock(&pf->lag_mutex);
- }
-
-diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
-index facb6c894b6dd..7f22987675012 100644
---- a/drivers/net/ethernet/intel/ice/ice_lag.h
-+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
-@@ -63,4 +63,5 @@ int ice_init_lag(struct ice_pf *pf);
- void ice_deinit_lag(struct ice_pf *pf);
- void ice_lag_rebuild(struct ice_pf *pf);
- bool ice_lag_is_switchdev_running(struct ice_pf *pf);
-+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
- #endif /* _ICE_LAG_H_ */
-diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
-index 81d96a40d5a74..c4270708a7694 100644
---- a/drivers/net/ethernet/intel/ice/ice_ptp.c
-+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
-@@ -2246,18 +2246,20 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
- static void
- ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
- {
-- info->n_per_out = N_PER_OUT_E810;
--
-- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
-- info->n_ext_ts = N_EXT_TS_E810;
--
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- info->n_ext_ts = N_EXT_TS_E810;
-+ info->n_per_out = N_PER_OUT_E810T;
- info->n_pins = NUM_PTP_PINS_E810T;
- info->verify = ice_verify_pin_e810t;
-
- /* Complete setup of the SMA pins */
- ice_ptp_setup_sma_pins_e810t(pf, info);
-+ } else if (ice_is_e810t(&pf->hw)) {
-+ info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
-+ info->n_per_out = N_PER_OUT_NO_SMA_E810T;
-+ } else {
-+ info->n_per_out = N_PER_OUT_E810;
-+ info->n_ext_ts = N_EXT_TS_E810;
- }
- }
-
-diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
-index 37b54db91df27..dd03cb69ad26b 100644
---- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
-+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
-@@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
- return ice_tc_tun_get_type(dev) != TNL_LAST;
- }
-
--static int
--ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
-- struct flow_action_entry *act)
-+static bool ice_tc_is_dev_uplink(struct net_device *dev)
-+{
-+ return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
-+}
-+
-+static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
-+ struct ice_tc_flower_fltr *fltr,
-+ struct net_device *target_dev)
- {
- struct ice_repr *repr;
-
-+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
-+
-+ if (ice_is_port_repr_netdev(filter_dev) &&
-+ ice_is_port_repr_netdev(target_dev)) {
-+ repr = ice_netdev_to_repr(target_dev);
-+
-+ fltr->dest_vsi = repr->src_vsi;
-+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
-+ } else if (ice_is_port_repr_netdev(filter_dev) &&
-+ ice_tc_is_dev_uplink(target_dev)) {
-+ repr = ice_netdev_to_repr(filter_dev);
-+
-+ fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
-+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
-+ } else if (ice_tc_is_dev_uplink(filter_dev) &&
-+ ice_is_port_repr_netdev(target_dev)) {
-+ repr = ice_netdev_to_repr(target_dev);
-+
-+ fltr->dest_vsi = repr->src_vsi;
-+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
-+ } else {
-+ NL_SET_ERR_MSG_MOD(fltr->extack,
-+ "Unsupported netdevice in switchdev mode");
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+static int
-+ice_tc_setup_drop_action(struct net_device *filter_dev,
-+ struct ice_tc_flower_fltr *fltr)
-+{
-+ fltr->action.fltr_act = ICE_DROP_PACKET;
-+
-+ if (ice_is_port_repr_netdev(filter_dev)) {
-+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
-+ } else if (ice_tc_is_dev_uplink(filter_dev)) {
-+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
-+ } else {
-+ NL_SET_ERR_MSG_MOD(fltr->extack,
-+ "Unsupported netdevice in switchdev mode");
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
-+ struct ice_tc_flower_fltr *fltr,
-+ struct flow_action_entry *act)
-+{
-+ int err;
-+
- switch (act->id) {
- case FLOW_ACTION_DROP:
-- fltr->action.fltr_act = ICE_DROP_PACKET;
-+ err = ice_tc_setup_drop_action(filter_dev, fltr);
-+ if (err)
-+ return err;
-+
- break;
-
- case FLOW_ACTION_REDIRECT:
-- fltr->action.fltr_act = ICE_FWD_TO_VSI;
--
-- if (ice_is_port_repr_netdev(act->dev)) {
-- repr = ice_netdev_to_repr(act->dev);
--
-- fltr->dest_vsi = repr->src_vsi;
-- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
-- } else if (netif_is_ice(act->dev) ||
-- ice_is_tunnel_supported(act->dev)) {
-- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
-- } else {
-- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
-- return -EINVAL;
-- }
-+ err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
-+ if (err)
-+ return err;
-
- break;
-
-@@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
- goto exit;
- }
-
-- /* egress traffic is always redirect to uplink */
-- if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
-- fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
--
- rule_info.sw_act.fltr_act = fltr->action.fltr_act;
- if (fltr->action.fltr_act != ICE_DROP_PACKET)
- rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
-@@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
- rule_info.flags_info.act_valid = true;
-
- if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
-+ /* Uplink to VF */
- rule_info.sw_act.flag |= ICE_FLTR_RX;
- rule_info.sw_act.src = hw->pf_id;
- rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
-- } else {
-+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
-+ fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
-+ /* VF to Uplink */
- rule_info.sw_act.flag |= ICE_FLTR_TX;
- rule_info.sw_act.src = vsi->idx;
- rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
-+ } else {
-+ /* VF to VF */
-+ rule_info.sw_act.flag |= ICE_FLTR_TX;
-+ rule_info.sw_act.src = vsi->idx;
-+ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
- }
-
- /* specify the cookie as filter_rule_id */
-@@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
-
- /**
- * ice_parse_tc_flower_actions - Parse the actions for a TC filter
-+ * @filter_dev: Pointer to device on which filter is being added
- * @vsi: Pointer to VSI
- * @cls_flower: Pointer to TC flower offload structure
- * @fltr: Pointer to TC flower filter structure
- *
- * Parse the actions for a TC filter
- */
--static int
--ice_parse_tc_flower_actions(struct ice_vsi *vsi,
-- struct flow_cls_offload *cls_flower,
-- struct ice_tc_flower_fltr *fltr)
-+static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
-+ struct ice_vsi *vsi,
-+ struct flow_cls_offload *cls_flower,
-+ struct ice_tc_flower_fltr *fltr)
- {
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
- struct flow_action *flow_action = &rule->action;
-@@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
-
- flow_action_for_each(i, act, flow_action) {
- if (ice_is_eswitch_mode_switchdev(vsi->back))
-- err = ice_eswitch_tc_parse_action(fltr, act);
-+ err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
- else
- err = ice_tc_parse_action(vsi, fltr, act);
- if (err)
-@@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
- if (err < 0)
- goto err;
-
-- err = ice_parse_tc_flower_actions(vsi, f, fltr);
-+ err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
- if (err < 0)
- goto err;
-
-diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
-index 24e4f4d897b66..d488c7156d093 100644
---- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
-+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
-@@ -827,12 +827,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
- int ice_reset_vf(struct ice_vf *vf, u32 flags)
- {
- struct ice_pf *pf = vf->pf;
-+ struct ice_lag *lag;
- struct ice_vsi *vsi;
-+ u8 act_prt, pri_prt;
- struct device *dev;
- int err = 0;
- bool rsd;
-
- dev = ice_pf_to_dev(pf);
-+ act_prt = ICE_LAG_INVALID_PORT;
-+ pri_prt = pf->hw.port_info->lport;
-
- if (flags & ICE_VF_RESET_NOTIFY)
- ice_notify_vf_reset(vf);
-@@ -843,6 +847,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
- return 0;
- }
-
-+ lag = pf->lag;
-+ mutex_lock(&pf->lag_mutex);
-+ if (lag && lag->bonded && lag->primary) {
-+ act_prt = lag->active_port;
-+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
-+ lag->upper_netdev)
-+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
-+ else
-+ act_prt = ICE_LAG_INVALID_PORT;
-+ }
-+
- if (flags & ICE_VF_RESET_LOCK)
- mutex_lock(&vf->cfg_lock);
- else
-@@ -935,6 +950,11 @@ out_unlock:
- if (flags & ICE_VF_RESET_LOCK)
- mutex_unlock(&vf->cfg_lock);
-
-+ if (lag && lag->bonded && lag->primary &&
-+ act_prt != ICE_LAG_INVALID_PORT)
-+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
-+ mutex_unlock(&pf->lag_mutex);
-+
- return err;
- }
-
-diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
-index db97353efd067..62337e6569b23 100644
---- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
-+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
-@@ -1600,9 +1600,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
- (struct virtchnl_vsi_queue_config_info *)msg;
- struct virtchnl_queue_pair_info *qpi;
- struct ice_pf *pf = vf->pf;
-+ struct ice_lag *lag;
- struct ice_vsi *vsi;
-+ u8 act_prt, pri_prt;
- int i = -1, q_idx;
-
-+ lag = pf->lag;
-+ mutex_lock(&pf->lag_mutex);
-+ act_prt = ICE_LAG_INVALID_PORT;
-+ pri_prt = pf->hw.port_info->lport;
-+ if (lag && lag->bonded && lag->primary) {
-+ act_prt = lag->active_port;
-+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
-+ lag->upper_netdev)
-+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
-+ else
-+ act_prt = ICE_LAG_INVALID_PORT;
-+ }
-+
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- goto error_param;
-
-@@ -1710,6 +1725,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
- }
- }
-
-+ if (lag && lag->bonded && lag->primary &&
-+ act_prt != ICE_LAG_INVALID_PORT)
-+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
-+ mutex_unlock(&pf->lag_mutex);
-+
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-@@ -1724,6 +1744,11 @@ error_param:
- vf->vf_id, i);
- }
-
-+ if (lag && lag->bonded && lag->primary &&
-+ act_prt != ICE_LAG_INVALID_PORT)
-+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
-+ mutex_unlock(&pf->lag_mutex);
-+
- ice_lag_move_new_vf_nodes(vf);
-
- /* send the response to the VF */
-diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
-index d483b8c00ec0e..165f76d1231c1 100644
---- a/drivers/net/ethernet/marvell/mvneta.c
-+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
- u8 *data)
- {
- if (sset == ETH_SS_STATS) {
-+ struct mvneta_port *pp = netdev_priv(netdev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- mvneta_statistics[i].name, ETH_GSTRING_LEN);
-
-- data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
-- page_pool_ethtool_stats_get_strings(data);
-+ if (!pp->bm_priv) {
-+ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
-+ page_pool_ethtool_stats_get_strings(data);
-+ }
- }
- }
-
-@@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
- struct page_pool_stats stats = {};
- int i;
-
-- for (i = 0; i < rxq_number; i++)
-- page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
-+ for (i = 0; i < rxq_number; i++) {
-+ if (pp->rxqs[i].page_pool)
-+ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
-+ }
-
- page_pool_ethtool_stats_get(data, &stats);
- }
-@@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
- for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
- *data++ = pp->ethtool_stats[i];
-
-- mvneta_ethtool_pp_stats(pp, data);
-+ if (!pp->bm_priv)
-+ mvneta_ethtool_pp_stats(pp, data);
- }
-
- static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
- {
-- if (sset == ETH_SS_STATS)
-- return ARRAY_SIZE(mvneta_statistics) +
-- page_pool_ethtool_stats_get_count();
-+ if (sset == ETH_SS_STATS) {
-+ int count = ARRAY_SIZE(mvneta_statistics);
-+ struct mvneta_port *pp = netdev_priv(dev);
-+
-+ if (!pp->bm_priv)
-+ count += page_pool_ethtool_stats_get_count();
-+
-+ return count;
-+ }
-
- return -EOPNOTSUPP;
- }
-diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
-index 23c2f2ed2fb83..c112c71ff576f 100644
---- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
-+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
-@@ -5505,6 +5505,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
-
- ipolicer = &nix_hw->ipolicer[layer];
- for (idx = 0; idx < req->prof_count[layer]; idx++) {
-+ if (idx == MAX_BANDPROF_PER_PFFUNC)
-+ break;
- prof_idx = req->prof_idx[layer][idx];
- if (prof_idx >= ipolicer->band_prof.max ||
- ipolicer->pfvf_map[prof_idx] != pcifunc)
-@@ -5518,8 +5520,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
- ipolicer->pfvf_map[prof_idx] = 0x00;
- ipolicer->match_id[prof_idx] = 0;
- rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
-- if (idx == MAX_BANDPROF_PER_PFFUNC)
-- break;
- }
- }
- mutex_unlock(&rvu->rsrc_lock);
-diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
-index a4a258da8dd59..c1c99d7054f87 100644
---- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
-+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
-@@ -450,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
- aq->prof.pebs_mantissa = 0;
- aq->prof_mask.pebs_mantissa = 0xFF;
-
-+ aq->prof.hl_en = 0;
-+ aq->prof_mask.hl_en = 1;
-+
- /* Fill AQ info */
- aq->qidx = profile;
- aq->ctype = NIX_AQ_CTYPE_BANDPROF;
-diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
-index 818ce76185b2f..629cf1659e5f9 100644
---- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
-+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
-@@ -818,7 +818,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
- int qidx, sqe_tail, sqe_head;
- struct otx2_snd_queue *sq;
- u64 incr, *ptr, val;
-- int timeout = 1000;
-
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
- for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
-@@ -827,15 +826,11 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
- continue;
-
- incr = (u64)qidx << 32;
-- while (timeout) {
-- val = otx2_atomic64_add(incr, ptr);
-- sqe_head = (val >> 20) & 0x3F;
-- sqe_tail = (val >> 28) & 0x3F;
-- if (sqe_head == sqe_tail)
-- break;
-- usleep_range(1, 3);
-- timeout--;
-- }
-+ val = otx2_atomic64_add(incr, ptr);
-+ sqe_head = (val >> 20) & 0x3F;
-+ sqe_tail = (val >> 28) & 0x3F;
-+ if (sqe_head != sqe_tail)
-+ usleep_range(50, 60);
- }
- }
-
-diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
-index c04a8ee53a82f..06910307085ef 100644
---- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
-+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
-@@ -977,6 +977,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
- int otx2_txsch_alloc(struct otx2_nic *pfvf);
- void otx2_txschq_stop(struct otx2_nic *pfvf);
- void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
-+void otx2_free_pending_sqe(struct otx2_nic *pfvf);
- void otx2_sqb_flush(struct otx2_nic *pfvf);
- int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma);
-@@ -1069,6 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic);
- void otx2_shutdown_tc(struct otx2_nic *nic);
- int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
- void *type_data);
-+void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
-+
- /* CGX/RPM DMAC filters support */
- int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
- int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
-diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
-index 4762dbea64a12..97a71e9b85637 100644
---- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
-+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
-@@ -1088,6 +1088,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
- struct ethhdr *eth_hdr;
- bool new = false;
- int err = 0;
-+ u64 vf_num;
- u32 ring;
-
- if (!flow_cfg->max_flows) {
-@@ -1100,7 +1101,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
- if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
- return -ENOMEM;
-
-- if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
-+ /* Number of queues on a VF can be greater or less than
-+ * the PF's queue. Hence no need to check for the
-+ * queue count. Hence no need to check queue count if PF
-+ * is installing for its VF. Below is the expected vf_num value
-+ * based on the ethtool commands.
-+ *
-+ * e.g.
-+ * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
-+ * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
-+ * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
-+ * vf_num:vf_idx+1
-+ */
-+ vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
-+ if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
-+ ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
- return -EINVAL;
-
- if (fsp->location >= otx2_get_maxflows(flow_cfg))
-@@ -1182,6 +1197,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
- flow_cfg->nr_flows++;
- }
-
-+ if (flow->is_vf)
-+ netdev_info(pfvf->netdev,
-+ "Make sure that VF's queue number is within its queue limit\n");
- return 0;
- }
-
-diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
-index 6daf4d58c25d6..532e324bdcc8e 100644
---- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
-+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
-@@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
- otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
- TYPE_PFVF);
-- vfs -= 64;
-+ if (intr)
-+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
-+ vfs = 64;
- }
-
- intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
-@@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
-
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
-
-- trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
-+ if (intr)
-+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
-
- return IRQ_HANDLED;
- }
-@@ -1193,31 +1196,32 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
- };
-
- static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
-- "NIX_SND_STATUS_GOOD",
-- "NIX_SND_STATUS_SQ_CTX_FAULT",
-- "NIX_SND_STATUS_SQ_CTX_POISON",
-- "NIX_SND_STATUS_SQB_FAULT",
-- "NIX_SND_STATUS_SQB_POISON",
-- "NIX_SND_STATUS_HDR_ERR",
-- "NIX_SND_STATUS_EXT_ERR",
-- "NIX_SND_STATUS_JUMP_FAULT",
-- "NIX_SND_STATUS_JUMP_POISON",
-- "NIX_SND_STATUS_CRC_ERR",
-- "NIX_SND_STATUS_IMM_ERR",
-- "NIX_SND_STATUS_SG_ERR",
-- "NIX_SND_STATUS_MEM_ERR",
-- "NIX_SND_STATUS_INVALID_SUBDC",
-- "NIX_SND_STATUS_SUBDC_ORDER_ERR",
-- "NIX_SND_STATUS_DATA_FAULT",
-- "NIX_SND_STATUS_DATA_POISON",
-- "NIX_SND_STATUS_NPC_DROP_ACTION",
-- "NIX_SND_STATUS_LOCK_VIOL",
-- "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
-- "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
-- "NIX_SND_STATUS_NPC_MCAST_ABORT",
-- "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
-- "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
-- "NIX_SND_STATUS_SEND_STATS_ERR",
-+ [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
-+ [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
-+ [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
-+ [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
-+ [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
-+ [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
-+ [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
-+ [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
-+ [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
-+ [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
-+ [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
-+ [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
-+ [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
-+ [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
-+ [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
-+ [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
-+ [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
-+ [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
-+ [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
-+ [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
-+ [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
-+ [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
-+ [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
-+ [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
-+ [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
-+ [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
- };
-
- static irqreturn_t otx2_q_intr_handler(int irq, void *data)
-@@ -1238,14 +1242,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
- continue;
-
- if (val & BIT_ULL(42)) {
-- netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
-+ netdev_err(pf->netdev,
-+ "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
- qidx, otx2_read64(pf, NIX_LF_ERR_INT));
- } else {
- if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
- netdev_err(pf->netdev, "CQ%lld: Doorbell error",
- qidx);
- if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
-- netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
-+ netdev_err(pf->netdev,
-+ "CQ%lld: Memory fault on CQE write to LLC/DRAM",
- qidx);
- }
-
-@@ -1272,7 +1278,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
- (val & NIX_SQINT_BITS));
-
- if (val & BIT_ULL(42)) {
-- netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
-+ netdev_err(pf->netdev,
-+ "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
- qidx, otx2_read64(pf, NIX_LF_ERR_INT));
- goto done;
- }
-@@ -1282,8 +1289,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
- goto chk_mnq_err_dbg;
-
- sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
-- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
-- qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
-+ netdev_err(pf->netdev,
-+ "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n",
-+ qidx, sq_op_err_dbg,
-+ nix_sqoperr_e_str[sq_op_err_code],
-+ sq_op_err_code);
-
- otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
-
-@@ -1300,16 +1310,21 @@ chk_mnq_err_dbg:
- goto chk_snd_err_dbg;
-
- mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
-- netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
-- qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
-+ netdev_err(pf->netdev,
-+ "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n",
-+ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code],
-+ mnq_err_code);
- otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
-
- chk_snd_err_dbg:
- snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
- if (snd_err_dbg & BIT(44)) {
- snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
-- netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
-- qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
-+ netdev_err(pf->netdev,
-+ "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
-+ qidx, snd_err_dbg,
-+ nix_snd_status_e_str[snd_err_code],
-+ snd_err_code);
- otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
- }
-
-@@ -1589,6 +1604,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
- else
- otx2_cleanup_tx_cqes(pf, cq);
- }
-+ otx2_free_pending_sqe(pf);
-
- otx2_free_sq_res(pf);
-
-@@ -1857,6 +1873,8 @@ int otx2_open(struct net_device *netdev)
- if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
- otx2_dmacflt_reinstall_flows(pf);
-
-+ otx2_tc_apply_ingress_police_rules(pf);
-+
- err = otx2_rxtx_enable(pf, true);
- /* If a mbox communication error happens at this point then interface
- * will end up in a state such that it is in down state but hardware
-@@ -1921,6 +1939,8 @@ int otx2_stop(struct net_device *netdev)
- /* Clear RSS enable flag */
- rss = &pf->hw.rss_info;
- rss->enable = false;
-+ if (!netif_is_rxfh_configured(netdev))
-+ kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
-
- /* Cleanup Queue IRQ */
- vec = pci_irq_vector(pf->pdev,
-diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
-index fa37b9f312cae..4e5899d8fa2e6 100644
---- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
-+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
-@@ -318,23 +318,23 @@ enum nix_snd_status_e {
- NIX_SND_STATUS_EXT_ERR = 0x6,
- NIX_SND_STATUS_JUMP_FAULT = 0x7,
- NIX_SND_STATUS_JUMP_POISON = 0x8,
-- NIX_SND_STATUS_CRC_ERR = 0x9,
-- NIX_SND_STATUS_IMM_ERR = 0x10,
-- NIX_SND_STATUS_SG_ERR = 0x11,
-- NIX_SND_STATUS_MEM_ERR = 0x12,
-- NIX_SND_STATUS_INVALID_SUBDC = 0x13,
-- NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
-- NIX_SND_STATUS_DATA_FAULT = 0x15,
-- NIX_SND_STATUS_DATA_POISON = 0x16,
-- NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
-- NIX_SND_STATUS_LOCK_VIOL = 0x18,
-- NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
-- NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
-- NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
-- NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
-- NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
-- NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
-- NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
-+ NIX_SND_STATUS_CRC_ERR = 0x10,
-+ NIX_SND_STATUS_IMM_ERR = 0x11,
-+ NIX_SND_STATUS_SG_ERR = 0x12,
-+ NIX_SND_STATUS_MEM_ERR = 0x13,
-+ NIX_SND_STATUS_INVALID_SUBDC = 0x14,
-+ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
-+ NIX_SND_STATUS_DATA_FAULT = 0x16,
-+ NIX_SND_STATUS_DATA_POISON = 0x17,
-+ NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
-+ NIX_SND_STATUS_LOCK_VIOL = 0x21,
-+ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
-+ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
-+ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
-+ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
-+ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
-+ NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
-+ NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
- NIX_SND_STATUS_MAX,
- };
-
-diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
-index fab9d85bfb371..423ce54eaea69 100644
---- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
-+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
-@@ -45,6 +45,9 @@ struct otx2_tc_flow {
- bool is_act_police;
- u32 prio;
- struct npc_install_flow_req req;
-+ u64 rate;
-+ u32 burst;
-+ bool is_pps;
- };
-
- static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
-@@ -282,21 +285,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
- return err;
- }
-
--static int otx2_tc_act_set_police(struct otx2_nic *nic,
-- struct otx2_tc_flow *node,
-- struct flow_cls_offload *f,
-- u64 rate, u32 burst, u32 mark,
-- struct npc_install_flow_req *req, bool pps)
-+static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
-+ struct otx2_tc_flow *node)
- {
-- struct netlink_ext_ack *extack = f->common.extack;
-- struct otx2_hw *hw = &nic->hw;
-- int rq_idx, rc;
--
-- rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
-- if (rq_idx >= hw->rx_queues) {
-- NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
-- return -EINVAL;
-- }
-+ int rc;
-
- mutex_lock(&nic->mbox.lock);
-
-@@ -306,23 +298,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
- return rc;
- }
-
-- rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
-+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
-+ node->burst, node->rate, node->is_pps);
- if (rc)
- goto free_leaf;
-
-- rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
-+ rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
- if (rc)
- goto free_leaf;
-
- mutex_unlock(&nic->mbox.lock);
-
-- req->match_id = mark & 0xFFFFULL;
-- req->index = rq_idx;
-- req->op = NIX_RX_ACTIONOP_UCAST;
-- set_bit(rq_idx, &nic->rq_bmap);
-- node->is_act_police = true;
-- node->rq = rq_idx;
--
- return 0;
-
- free_leaf:
-@@ -334,6 +320,39 @@ free_leaf:
- return rc;
- }
-
-+static int otx2_tc_act_set_police(struct otx2_nic *nic,
-+ struct otx2_tc_flow *node,
-+ struct flow_cls_offload *f,
-+ u64 rate, u32 burst, u32 mark,
-+ struct npc_install_flow_req *req, bool pps)
-+{
-+ struct netlink_ext_ack *extack = f->common.extack;
-+ struct otx2_hw *hw = &nic->hw;
-+ int rq_idx, rc;
-+
-+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
-+ if (rq_idx >= hw->rx_queues) {
-+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
-+ return -EINVAL;
-+ }
-+
-+ req->match_id = mark & 0xFFFFULL;
-+ req->index = rq_idx;
-+ req->op = NIX_RX_ACTIONOP_UCAST;
-+
-+ node->is_act_police = true;
-+ node->rq = rq_idx;
-+ node->burst = burst;
-+ node->rate = rate;
-+ node->is_pps = pps;
-+
-+ rc = otx2_tc_act_set_hw_police(nic, node);
-+ if (!rc)
-+ set_bit(rq_idx, &nic->rq_bmap);
-+
-+ return rc;
-+}
-+
- static int otx2_tc_parse_actions(struct otx2_nic *nic,
- struct flow_action *flow_action,
- struct npc_install_flow_req *req,
-@@ -986,6 +1005,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
- }
-
- if (flow_node->is_act_police) {
-+ __clear_bit(flow_node->rq, &nic->rq_bmap);
-+
-+ if (nic->flags & OTX2_FLAG_INTF_DOWN)
-+ goto free_mcam_flow;
-+
- mutex_lock(&nic->mbox.lock);
-
- err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
-@@ -1001,11 +1025,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
- "Unable to free leaf bandwidth profile(%d)\n",
- flow_node->leaf_profile);
-
-- __clear_bit(flow_node->rq, &nic->rq_bmap);
--
- mutex_unlock(&nic->mbox.lock);
- }
-
-+free_mcam_flow:
- otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
- otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
- kfree_rcu(flow_node, rcu);
-@@ -1025,6 +1048,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
- if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
- return -ENOMEM;
-
-+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
-+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
-+ return -EINVAL;
-+ }
-+
- if (flow_cfg->nr_flows == flow_cfg->max_flows) {
- NL_SET_ERR_MSG_MOD(extack,
- "Free MCAM entry not available to add the flow");
-@@ -1384,3 +1412,45 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
- otx2_destroy_tc_flow_list(nic);
- }
- EXPORT_SYMBOL(otx2_shutdown_tc);
-+
-+static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
-+ struct otx2_tc_flow *node)
-+{
-+ struct npc_install_flow_req *req;
-+
-+ if (otx2_tc_act_set_hw_police(nic, node))
-+ return;
-+
-+ mutex_lock(&nic->mbox.lock);
-+
-+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
-+ if (!req)
-+ goto err;
-+
-+ memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
-+
-+ if (otx2_sync_mbox_msg(&nic->mbox))
-+ netdev_err(nic->netdev,
-+ "Failed to install MCAM flow entry for ingress rule");
-+err:
-+ mutex_unlock(&nic->mbox.lock);
-+}
-+
-+void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
-+{
-+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
-+ struct otx2_tc_flow *node;
-+
-+ /* If any ingress policer rules exist for the interface then
-+ * apply those rules. Ingress policer rules depend on bandwidth
-+ * profiles linked to the receive queues. Since no receive queues
-+ * exist when interface is down, ingress policer rules are stored
-+ * and configured in hardware after all receive queues are allocated
-+ * in otx2_open.
-+ */
-+ list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
-+ if (node->is_act_police)
-+ otx2_tc_config_ingress_rule(nic, node);
-+ }
-+}
-+EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
-diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
-index 53b2a4ef52985..6ee15f3c25ede 100644
---- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
-+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
-@@ -1247,9 +1247,11 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
-
- void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
- {
-+ int tx_pkts = 0, tx_bytes = 0;
- struct sk_buff *skb = NULL;
- struct otx2_snd_queue *sq;
- struct nix_cqe_tx_s *cqe;
-+ struct netdev_queue *txq;
- int processed_cqe = 0;
- struct sg_list *sg;
- int qidx;
-@@ -1270,12 +1272,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
- sg = &sq->sg[cqe->comp.sqe_id];
- skb = (struct sk_buff *)sg->skb;
- if (skb) {
-+ tx_bytes += skb->len;
-+ tx_pkts++;
- otx2_dma_unmap_skb_frags(pfvf, sg);
- dev_kfree_skb_any(skb);
- sg->skb = (u64)NULL;
- }
- }
-
-+ if (likely(tx_pkts)) {
-+ if (qidx >= pfvf->hw.tx_queues)
-+ qidx -= pfvf->hw.xdp_queues;
-+ txq = netdev_get_tx_queue(pfvf->netdev, qidx);
-+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
-+ }
- /* Free CQEs to HW */
- otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
- ((u64)cq->cq_idx << 32) | processed_cqe);
-@@ -1302,6 +1312,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
- return err;
- }
-
-+void otx2_free_pending_sqe(struct otx2_nic *pfvf)
-+{
-+ int tx_pkts = 0, tx_bytes = 0;
-+ struct sk_buff *skb = NULL;
-+ struct otx2_snd_queue *sq;
-+ struct netdev_queue *txq;
-+ struct sg_list *sg;
-+ int sq_idx, sqe;
-+
-+ for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
-+ sq = &pfvf->qset.sq[sq_idx];
-+ for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
-+ sg = &sq->sg[sqe];
-+ skb = (struct sk_buff *)sg->skb;
-+ if (skb) {
-+ tx_bytes += skb->len;
-+ tx_pkts++;
-+ otx2_dma_unmap_skb_frags(pfvf, sg);
-+ dev_kfree_skb_any(skb);
-+ sg->skb = (u64)NULL;
-+ }
-+ }
-+
-+ if (!tx_pkts)
-+ continue;
-+ txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
-+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
-+ tx_pkts = 0;
-+ tx_bytes = 0;
-+ }
-+}
-+
- static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
- {
-diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
-index 47ea69feb3b24..f87ab9b8a5901 100644
---- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
-+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
-@@ -64,8 +64,8 @@ struct mtk_wdma_desc {
- #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
- #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
- #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
--#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
--#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
-+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
-+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
- #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
- #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
- #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
-index bb11e644d24f7..af3928eddafd1 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
-@@ -177,6 +177,8 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
-
- static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
- struct mlx5_cqe64 *cqe,
-+ u8 *md_buff,
-+ u8 *md_buff_sz,
- int budget)
- {
- struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
-@@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
- mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
- out:
- napi_consume_skb(skb, budget);
-- mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
-+ md_buff[*md_buff_sz++] = metadata_id;
- if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
- !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
- }
-
--static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
-+static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
- {
- struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
-- struct mlx5_cqwq *cqwq = &cq->wq;
-+ int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
-+ u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
-+ u8 metadata_buff_sz = 0;
-+ struct mlx5_cqwq *cqwq;
- struct mlx5_cqe64 *cqe;
- int work_done = 0;
-
-+ cqwq = &cq->wq;
-+
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
- return false;
-
-@@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
- do {
- mlx5_cqwq_pop(cqwq);
-
-- mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
-+ mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
-+ metadata_buff, &metadata_buff_sz, napi_budget);
- } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
-
- mlx5_cqwq_update_db_record(cqwq);
-@@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
- /* ensure cq space is freed before enabling more cqes */
- wmb();
-
-+ while (metadata_buff_sz > 0)
-+ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
-+ metadata_buff[--metadata_buff_sz]);
-+
- mlx5e_txqsq_wake(&ptpsq->txqsq);
-
- return work_done == budget;
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
-index e8eea9ffd5eb6..03b119a434bc9 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
-@@ -702,11 +702,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
-
- void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
- {
-- char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_icosq *icosq = rq->icosq;
- struct mlx5e_priv *priv = rq->priv;
- struct mlx5e_err_ctx err_ctx = {};
-+ char icosq_str[32] = {};
-
- err_ctx.ctx = rq;
- err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
-@@ -715,7 +715,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
- if (icosq)
- snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
- snprintf(err_str, sizeof(err_str),
-- "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
-+ "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
- rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
-index 00a04fdd756f5..668da5c70e63d 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
-@@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
- if (err)
- goto destroy_neigh_entry;
-
-- e->encap_size = ipv4_encap_size;
-- e->encap_header = encap_header;
--
- if (!(nud_state & NUD_VALID)) {
- neigh_event_send(attr.n, NULL);
- /* the encap entry will be made valid on neigh update event
-@@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
- goto destroy_neigh_entry;
- }
-
-+ e->encap_size = ipv4_encap_size;
-+ e->encap_header = encap_header;
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
- mlx5e_route_lookup_ipv4_put(&attr);
-@@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
- if (err)
- goto free_encap;
-
-- e->encap_size = ipv4_encap_size;
-- kfree(e->encap_header);
-- e->encap_header = encap_header;
--
- if (!(nud_state & NUD_VALID)) {
- neigh_event_send(attr.n, NULL);
- /* the encap entry will be made valid on neigh update event
- * and not used before that.
- */
-- goto release_neigh;
-+ goto free_encap;
- }
-
- memset(&reformat_params, 0, sizeof(reformat_params));
-@@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
- goto free_encap;
- }
-
-+ e->encap_size = ipv4_encap_size;
-+ kfree(e->encap_header);
-+ e->encap_header = encap_header;
-+
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
- mlx5e_route_lookup_ipv4_put(&attr);
-@@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
- if (err)
- goto destroy_neigh_entry;
-
-- e->encap_size = ipv6_encap_size;
-- e->encap_header = encap_header;
--
- if (!(nud_state & NUD_VALID)) {
- neigh_event_send(attr.n, NULL);
- /* the encap entry will be made valid on neigh update event
-@@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
- goto destroy_neigh_entry;
- }
-
-+ e->encap_size = ipv6_encap_size;
-+ e->encap_header = encap_header;
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
- mlx5e_route_lookup_ipv6_put(&attr);
-@@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
- if (err)
- goto free_encap;
-
-- e->encap_size = ipv6_encap_size;
-- kfree(e->encap_header);
-- e->encap_header = encap_header;
--
- if (!(nud_state & NUD_VALID)) {
- neigh_event_send(attr.n, NULL);
- /* the encap entry will be made valid on neigh update event
- * and not used before that.
- */
-- goto release_neigh;
-+ goto free_encap;
- }
-
- memset(&reformat_params, 0, sizeof(reformat_params));
-@@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
- goto free_encap;
- }
-
-+ e->encap_size = ipv6_encap_size;
-+ kfree(e->encap_header);
-+ e->encap_header = encap_header;
-+
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
- mlx5e_route_lookup_ipv6_put(&attr);
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
-index dff02434ff458..7c66bd73ddfa2 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
-@@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
- struct ethtool_drvinfo *drvinfo)
- {
- struct mlx5_core_dev *mdev = priv->mdev;
-+ int count;
-
- strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-- "%d.%d.%04d (%.16s)",
-- fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
-- mdev->board_id);
-+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
-+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
-+ if (count == sizeof(drvinfo->fw_version))
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%d.%d.%04d", fw_rev_maj(mdev),
-+ fw_rev_min(mdev), fw_rev_sub(mdev));
-+
- strscpy(drvinfo->bus_info, dev_name(mdev->device),
- sizeof(drvinfo->bus_info));
- }
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
-index fd1cce542b680..825f9c687633f 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
-@@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
- {
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_core_dev *mdev = priv->mdev;
-+ int count;
-
- strscpy(drvinfo->driver, mlx5e_rep_driver_name,
- sizeof(drvinfo->driver));
-- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-- "%d.%d.%04d (%.16s)",
-- fw_rev_maj(mdev), fw_rev_min(mdev),
-- fw_rev_sub(mdev), mdev->board_id);
-+ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
-+ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
-+ if (count == sizeof(drvinfo->fw_version))
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%d.%d.%04d", fw_rev_maj(mdev),
-+ fw_rev_min(mdev), fw_rev_sub(mdev));
- }
-
- static const struct counter_desc sw_rep_stats_desc[] = {
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
-index c8590483ddc64..b62fd37493410 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
-@@ -3145,7 +3145,7 @@ static struct mlx5_fields fields[] = {
- OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
- OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
-- OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
-+ OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
-
- OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
- OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
-@@ -3156,21 +3156,31 @@ static struct mlx5_fields fields[] = {
- OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
- };
-
--static unsigned long mask_to_le(unsigned long mask, int size)
-+static u32 mask_field_get(void *mask, struct mlx5_fields *f)
- {
-- __be32 mask_be32;
-- __be16 mask_be16;
--
-- if (size == 32) {
-- mask_be32 = (__force __be32)(mask);
-- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
-- } else if (size == 16) {
-- mask_be32 = (__force __be32)(mask);
-- mask_be16 = *(__be16 *)&mask_be32;
-- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
-+ switch (f->field_bsize) {
-+ case 32:
-+ return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
-+ case 16:
-+ return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
-+ default:
-+ return *(u8 *)mask & (u8)f->field_mask;
- }
-+}
-
-- return mask;
-+static void mask_field_clear(void *mask, struct mlx5_fields *f)
-+{
-+ switch (f->field_bsize) {
-+ case 32:
-+ *(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
-+ break;
-+ case 16:
-+ *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
-+ break;
-+ default:
-+ *(u8 *)mask &= ~(u8)f->field_mask;
-+ break;
-+ }
- }
-
- static int offload_pedit_fields(struct mlx5e_priv *priv,
-@@ -3182,11 +3192,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
- struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- struct pedit_headers_action *hdrs = parse_attr->hdrs;
- void *headers_c, *headers_v, *action, *vals_p;
-- u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
- struct mlx5e_tc_mod_hdr_acts *mod_acts;
-- unsigned long mask, field_mask;
-+ void *s_masks_p, *a_masks_p;
- int i, first, last, next_z;
- struct mlx5_fields *f;
-+ unsigned long mask;
-+ u32 s_mask, a_mask;
- u8 cmd;
-
- mod_acts = &parse_attr->mod_hdr_acts;
-@@ -3202,15 +3213,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
- bool skip;
-
- f = &fields[i];
-- /* avoid seeing bits set from previous iterations */
-- s_mask = 0;
-- a_mask = 0;
--
- s_masks_p = (void *)set_masks + f->offset;
- a_masks_p = (void *)add_masks + f->offset;
-
-- s_mask = *s_masks_p & f->field_mask;
-- a_mask = *a_masks_p & f->field_mask;
-+ s_mask = mask_field_get(s_masks_p, f);
-+ a_mask = mask_field_get(a_masks_p, f);
-
- if (!s_mask && !a_mask) /* nothing to offload here */
- continue;
-@@ -3237,22 +3244,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
- match_mask, f->field_bsize))
- skip = true;
- /* clear to denote we consumed this field */
-- *s_masks_p &= ~f->field_mask;
-+ mask_field_clear(s_masks_p, f);
- } else {
- cmd = MLX5_ACTION_TYPE_ADD;
- mask = a_mask;
- vals_p = (void *)add_vals + f->offset;
- /* add 0 is no change */
-- if ((*(u32 *)vals_p & f->field_mask) == 0)
-+ if (!mask_field_get(vals_p, f))
- skip = true;
- /* clear to denote we consumed this field */
-- *a_masks_p &= ~f->field_mask;
-+ mask_field_clear(a_masks_p, f);
- }
- if (skip)
- continue;
-
-- mask = mask_to_le(mask, f->field_bsize);
--
- first = find_first_bit(&mask, f->field_bsize);
- next_z = find_next_zero_bit(&mask, f->field_bsize, first);
- last = find_last_bit(&mask, f->field_bsize);
-@@ -3279,10 +3284,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
- MLX5_SET(set_action_in, action, field, f->field);
-
- if (cmd == MLX5_ACTION_TYPE_SET) {
-+ unsigned long field_mask = f->field_mask;
- int start;
-
-- field_mask = mask_to_le(f->field_mask, f->field_bsize);
--
- /* if field is bit sized it can start not from first bit */
- start = find_first_bit(&field_mask, f->field_bsize);
-
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
-index d41435c22ce56..f0b506e562df3 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
-@@ -399,9 +399,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
-
- mlx5e_skb_cb_hwtstamp_init(skb);
-- mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
- mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
- metadata_index);
-+ mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
- if (!netif_tx_queue_stopped(sq->txq) &&
- mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
- netif_tx_stop_queue(sq->txq);
-@@ -494,10 +494,10 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
-
- err_drop:
- stats->dropped++;
-- dev_kfree_skb_any(skb);
- if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
- mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
- be32_to_cpu(eseg->flow_table_metadata));
-+ dev_kfree_skb_any(skb);
- mlx5e_tx_flush(sq);
- }
-
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
-index ea0405e0a43fa..40a6cb052a2da 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
-@@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
- {
- struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_irq *irq;
-+ int cpu;
-
- irq = xa_load(&table->comp_irqs, vecidx);
- if (!irq)
- return;
-
-+ cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
-+ cpumask_clear_cpu(cpu, &table->used_cpus);
- xa_erase(&table->comp_irqs, vecidx);
- mlx5_irq_affinity_irq_release(dev, irq);
- }
-@@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
- static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
- {
- struct mlx5_eq_table *table = dev->priv.eq_table;
-+ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
-+ struct irq_affinity_desc af_desc = {};
- struct mlx5_irq *irq;
-
-- irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
-- if (IS_ERR(irq)) {
-- /* In case SF irq pool does not exist, fallback to the PF irqs*/
-- if (PTR_ERR(irq) == -ENOENT)
-- return comp_irq_request_pci(dev, vecidx);
-+ /* In case SF irq pool does not exist, fallback to the PF irqs*/
-+ if (!mlx5_irq_pool_is_sf_pool(pool))
-+ return comp_irq_request_pci(dev, vecidx);
-
-+ af_desc.is_managed = 1;
-+ cpumask_copy(&af_desc.mask, cpu_online_mask);
-+ cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
-+ irq = mlx5_irq_affinity_request(pool, &af_desc);
-+ if (IS_ERR(irq))
- return PTR_ERR(irq);
-- }
-+
-+ cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
-+ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
-+ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
-+ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
-+ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
-
- return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
- }
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
-index b296ac52a4397..88236e75fd901 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
-@@ -984,7 +984,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
- dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-
-- if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
-+ if (rep->vport == MLX5_VPORT_UPLINK &&
-+ on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
- dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
- flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
-index 047d5fed5f89e..612e666ec2635 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
-@@ -168,45 +168,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
- if (pool->irqs_per_cpu)
- cpu_put(pool, cpu);
- }
--
--/**
-- * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
-- * @dev: mlx5 device that is requesting the IRQ.
-- * @used_cpus: cpumask of bounded cpus by the device
-- * @vecidx: vector index to request an IRQ for.
-- *
-- * Each IRQ is bounded to at most 1 CPU.
-- * This function is requesting an IRQ according to the default assignment.
-- * The default assignment policy is:
-- * - request the least loaded IRQ which is not bound to any
-- * CPU of the previous IRQs requested.
-- *
-- * On success, this function updates used_cpus mask and returns an irq pointer.
-- * In case of an error, an appropriate error pointer is returned.
-- */
--struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
-- struct cpumask *used_cpus, u16 vecidx)
--{
-- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
-- struct irq_affinity_desc af_desc = {};
-- struct mlx5_irq *irq;
--
-- if (!mlx5_irq_pool_is_sf_pool(pool))
-- return ERR_PTR(-ENOENT);
--
-- af_desc.is_managed = 1;
-- cpumask_copy(&af_desc.mask, cpu_online_mask);
-- cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
-- irq = mlx5_irq_affinity_request(pool, &af_desc);
--
-- if (IS_ERR(irq))
-- return irq;
--
-- cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
-- mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
-- pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
-- cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
-- mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
--
-- return irq;
--}
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
-index aa29f09e83564..0c83ef174275a 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
-@@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-
- static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
- {
-- return mlx5_ptp_adjtime(ptp, delta);
-+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
-+ struct mlx5_core_dev *mdev;
-+
-+ mdev = container_of(clock, struct mlx5_core_dev, clock);
-+
-+ return mlx5_ptp_adjtime_real_time(mdev, delta);
- }
-
- static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
-index 653648216730a..4dcf995cb1a20 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
-@@ -28,7 +28,7 @@
- struct mlx5_irq {
- struct atomic_notifier_head nh;
- cpumask_var_t mask;
-- char name[MLX5_MAX_IRQ_NAME];
-+ char name[MLX5_MAX_IRQ_FORMATTED_NAME];
- struct mlx5_irq_pool *pool;
- int refcount;
- struct msi_map map;
-@@ -292,8 +292,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
- else
- irq_sf_set_name(pool, name, i);
- ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
-- snprintf(irq->name, MLX5_MAX_IRQ_NAME,
-- "%s@pci:%s", name, pci_name(dev->pdev));
-+ snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
-+ MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
- err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
- &irq->nh);
- if (err) {
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
-index d3a77a0ab8488..c4d377f8df308 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
-@@ -7,6 +7,9 @@
- #include <linux/mlx5/driver.h>
-
- #define MLX5_MAX_IRQ_NAME (32)
-+#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
-+#define MLX5_MAX_IRQ_FORMATTED_NAME \
-+ (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
- /* max irq_index is 2047, so four chars */
- #define MLX5_MAX_IRQ_IDX_CHARS (4)
- #define MLX5_EQ_REFS_PER_IRQ (2)
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
-index 4e8527a724f50..6fa06ba2d3465 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
-@@ -52,7 +52,6 @@ struct dr_qp_init_attr {
- u32 cqn;
- u32 pdn;
- u32 max_send_wr;
-- u32 max_send_sge;
- struct mlx5_uars_page *uar;
- u8 isolate_vl_tc:1;
- };
-@@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
- return err == CQ_POLL_ERR ? err : npolled;
- }
-
--static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
--{
-- return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
-- sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
-- sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
--}
--
--/* We calculate for specific RC QP with the required functionality */
--static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
--{
-- int update_arg_size;
-- int inl_size = 0;
-- int tot_size;
-- int size;
--
-- update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
--
-- size = sizeof(struct mlx5_wqe_ctrl_seg) +
-- sizeof(struct mlx5_wqe_raddr_seg);
-- inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
-- DR_STE_SIZE, 16);
--
-- size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
--
-- size = max(size, update_arg_size);
--
-- tot_size = max(size, inl_size);
--
-- return ALIGN(tot_size, MLX5_SEND_WQE_BB);
--}
--
- static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
- struct dr_qp_init_attr *attr)
- {
-@@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
- u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
- struct mlx5_wq_param wqp;
- struct mlx5dr_qp *dr_qp;
-- int wqe_size;
- int inlen;
- void *qpc;
- void *in;
-@@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
- if (err)
- goto err_in;
- dr_qp->uar = attr->uar;
-- wqe_size = dr_qp_calc_rc_send_wqe(attr);
-- dr_qp->max_inline_data = min(wqe_size -
-- (sizeof(struct mlx5_wqe_ctrl_seg) +
-- sizeof(struct mlx5_wqe_raddr_seg) +
-- sizeof(struct mlx5_wqe_inline_seg)),
-- (2 * MLX5_SEND_WQE_BB -
-- (sizeof(struct mlx5_wqe_ctrl_seg) +
-- sizeof(struct mlx5_wqe_raddr_seg) +
-- sizeof(struct mlx5_wqe_inline_seg))));
-
- return dr_qp;
-
-@@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
- MLX5_SEND_WQE_DS;
- }
-
--static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
-- struct dr_data_seg *data_seg, void *wqe)
--{
-- int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
-- sizeof(struct mlx5_wqe_raddr_seg) +
-- sizeof(struct mlx5_wqe_inline_seg);
-- struct mlx5_wqe_inline_seg *seg;
-- int left_space;
-- int inl = 0;
-- void *addr;
-- int len;
-- int idx;
--
-- seg = wqe;
-- wqe += sizeof(*seg);
-- addr = (void *)(unsigned long)(data_seg->addr);
-- len = data_seg->length;
-- inl += len;
-- left_space = MLX5_SEND_WQE_BB - inline_header_size;
--
-- if (likely(len > left_space)) {
-- memcpy(wqe, addr, left_space);
-- len -= left_space;
-- addr += left_space;
-- idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
-- wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
-- }
--
-- memcpy(wqe, addr, len);
--
-- if (likely(inl)) {
-- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
-- return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
-- MLX5_SEND_WQE_DS);
-- } else {
-- return 0;
-- }
--}
--
- static void
--dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
-- struct mlx5_wqe_ctrl_seg *wq_ctrl,
-+dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
- u64 remote_addr,
- u32 rkey,
- struct dr_data_seg *data_seg,
-@@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
- wq_raddr->reserved = 0;
-
- wq_dseg = (void *)(wq_raddr + 1);
-- /* WQE ctrl segment + WQE remote addr segment */
-- *size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
-
-- if (data_seg->send_flags & IB_SEND_INLINE) {
-- *size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
-- } else {
-- wq_dseg->byte_count = cpu_to_be32(data_seg->length);
-- wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
-- wq_dseg->addr = cpu_to_be64(data_seg->addr);
-- *size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
-- }
-+ wq_dseg->byte_count = cpu_to_be32(data_seg->length);
-+ wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
-+ wq_dseg->addr = cpu_to_be64(data_seg->addr);
-+
-+ *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
-+ sizeof(*wq_dseg) + /* WQE data segment */
-+ sizeof(*wq_raddr)) / /* WQE remote addr segment */
-+ MLX5_SEND_WQE_DS;
- }
-
- static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
-@@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
- switch (opcode) {
- case MLX5_OPCODE_RDMA_READ:
- case MLX5_OPCODE_RDMA_WRITE:
-- dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
-+ dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
- rkey, data_seg, &size);
- break;
- case MLX5_OPCODE_FLOW_TBL_ACCESS:
-@@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
- if (send_ring->pending_wqe % send_ring->signal_th == 0)
- send_info->write.send_flags |= IB_SEND_SIGNALED;
- else
-- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
-+ send_info->write.send_flags = 0;
- }
-
- static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
-@@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
- }
-
- send_ring->pending_wqe++;
-- if (!send_info->write.lkey)
-- send_info->write.send_flags |= IB_SEND_INLINE;
-
- if (send_ring->pending_wqe % send_ring->signal_th == 0)
- send_info->write.send_flags |= IB_SEND_SIGNALED;
-- else
-- send_info->write.send_flags &= ~IB_SEND_SIGNALED;
-
- send_ring->pending_wqe++;
- send_info->read.length = send_info->write.length;
-@@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
- send_info->read.lkey = send_ring->sync_mr->mkey;
-
- if (send_ring->pending_wqe % send_ring->signal_th == 0)
-- send_info->read.send_flags |= IB_SEND_SIGNALED;
-+ send_info->read.send_flags = IB_SEND_SIGNALED;
- else
-- send_info->read.send_flags &= ~IB_SEND_SIGNALED;
-+ send_info->read.send_flags = 0;
- }
-
- static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
-@@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
- dmn->send_ring->cq->qp = dmn->send_ring->qp;
-
- dmn->info.max_send_wr = QUEUE_SIZE;
-- init_attr.max_send_sge = 1;
- dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
- DR_STE_SIZE);
-
-diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
-index e2aced7ab4547..95f63fcf4ba1f 100644
---- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
-+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
-@@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
- * is 2^ACL_MAX_BF_LOG
- */
- bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
-- bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
-+ bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
- GFP_KERNEL);
- if (!bf)
- return ERR_PTR(-ENOMEM);
-diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
-index 361b90007148b..62cabeeb842a1 100644
---- a/drivers/net/ethernet/realtek/r8169_main.c
-+++ b/drivers/net/ethernet/realtek/r8169_main.c
-@@ -579,6 +579,7 @@ struct rtl8169_tc_offsets {
- enum rtl_flag {
- RTL_FLAG_TASK_ENABLED = 0,
- RTL_FLAG_TASK_RESET_PENDING,
-+ RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
- RTL_FLAG_TASK_TX_TIMEOUT,
- RTL_FLAG_MAX
- };
-@@ -624,6 +625,7 @@ struct rtl8169_private {
-
- unsigned supports_gmii:1;
- unsigned aspm_manageable:1;
-+ unsigned dash_enabled:1;
- dma_addr_t counters_phys_addr;
- struct rtl8169_counters *counters;
- struct rtl8169_tc_offsets tc_offset;
-@@ -1253,14 +1255,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
- return r8168ep_ocp_read(tp, 0x128) & BIT(0);
- }
-
--static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
-+static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
-+{
-+ switch (tp->dash_type) {
-+ case RTL_DASH_DP:
-+ return r8168dp_check_dash(tp);
-+ case RTL_DASH_EP:
-+ return r8168ep_check_dash(tp);
-+ default:
-+ return false;
-+ }
-+}
-+
-+static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
- {
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
-- return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
-+ return RTL_DASH_DP;
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
-- return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
-+ return RTL_DASH_EP;
- default:
- return RTL_DASH_NONE;
- }
-@@ -1453,7 +1467,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
-
- device_set_wakeup_enable(tp_to_dev(tp), wolopts);
-
-- if (tp->dash_type == RTL_DASH_NONE) {
-+ if (!tp->dash_enabled) {
- rtl_set_d3_pll_down(tp, !wolopts);
- tp->dev->wol_enabled = wolopts ? 1 : 0;
- }
-@@ -2512,7 +2526,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
-
- static void rtl_prepare_power_down(struct rtl8169_private *tp)
- {
-- if (tp->dash_type != RTL_DASH_NONE)
-+ if (tp->dash_enabled)
- return;
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
-@@ -2582,6 +2596,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
-
- if (dev->flags & IFF_PROMISC) {
- rx_mode |= AcceptAllPhys;
-+ } else if (!(dev->flags & IFF_MULTICAST)) {
-+ rx_mode &= ~AcceptMulticast;
- } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
- dev->flags & IFF_ALLMULTI ||
- tp->mac_version == RTL_GIGA_MAC_VER_35) {
-@@ -4567,6 +4583,8 @@ static void rtl_task(struct work_struct *work)
- reset:
- rtl_reset_work(tp);
- netif_wake_queue(tp->dev);
-+ } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
-+ rtl_reset_work(tp);
- }
- out_unlock:
- rtnl_unlock();
-@@ -4596,7 +4614,11 @@ static void r8169_phylink_handler(struct net_device *ndev)
- if (netif_carrier_ok(ndev)) {
- rtl_link_chg_patch(tp);
- pm_request_resume(d);
-+ netif_wake_queue(tp->dev);
- } else {
-+ /* In few cases rx is broken after link-down otherwise */
-+ if (rtl_is_8125(tp))
-+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
- pm_runtime_idle(d);
- }
-
-@@ -4640,10 +4662,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
- rtl8169_cleanup(tp);
- rtl_disable_exit_l1(tp);
- rtl_prepare_power_down(tp);
-+
-+ if (tp->dash_type != RTL_DASH_NONE)
-+ rtl8168_driver_stop(tp);
- }
-
- static void rtl8169_up(struct rtl8169_private *tp)
- {
-+ if (tp->dash_type != RTL_DASH_NONE)
-+ rtl8168_driver_start(tp);
-+
- pci_set_master(tp->pci_dev);
- phy_init_hw(tp->phydev);
- phy_resume(tp->phydev);
-@@ -4666,7 +4694,7 @@ static int rtl8169_close(struct net_device *dev)
- rtl8169_down(tp);
- rtl8169_rx_clear(tp);
-
-- cancel_work_sync(&tp->wk.work);
-+ cancel_work(&tp->wk.work);
-
- free_irq(tp->irq, tp);
-
-@@ -4861,7 +4889,7 @@ static int rtl8169_runtime_idle(struct device *device)
- {
- struct rtl8169_private *tp = dev_get_drvdata(device);
-
-- if (tp->dash_type != RTL_DASH_NONE)
-+ if (tp->dash_enabled)
- return -EBUSY;
-
- if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
-@@ -4887,8 +4915,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
- /* Restore original MAC address */
- rtl_rar_set(tp, tp->dev->perm_addr);
-
-- if (system_state == SYSTEM_POWER_OFF &&
-- tp->dash_type == RTL_DASH_NONE) {
-+ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
- pci_wake_from_d3(pdev, tp->saved_wolopts);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-@@ -4901,6 +4928,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
- if (pci_dev_run_wake(pdev))
- pm_runtime_get_noresume(&pdev->dev);
-
-+ cancel_work_sync(&tp->wk.work);
-+
- unregister_netdev(tp->dev);
-
- if (tp->dash_type != RTL_DASH_NONE)
-@@ -5246,7 +5275,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
- tp->aspm_manageable = !rc;
-
-- tp->dash_type = rtl_check_dash(tp);
-+ tp->dash_type = rtl_get_dash_type(tp);
-+ tp->dash_enabled = rtl_dash_is_enabled(tp);
-
- tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
-
-@@ -5317,7 +5347,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- /* configure chip for default features */
- rtl8169_set_features(dev, dev->features);
-
-- if (tp->dash_type == RTL_DASH_NONE) {
-+ if (!tp->dash_enabled) {
- rtl_set_d3_pll_down(tp, true);
- } else {
- rtl_set_d3_pll_down(tp, false);
-@@ -5357,7 +5387,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- "ok" : "ko");
-
- if (tp->dash_type != RTL_DASH_NONE) {
-- netdev_info(dev, "DASH enabled\n");
-+ netdev_info(dev, "DASH %s\n",
-+ tp->dash_enabled ? "enabled" : "disabled");
- rtl8168_driver_start(tp);
- }
-
-diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
-index 0ef0b88b71459..bb56cf4090423 100644
---- a/drivers/net/ethernet/renesas/ravb_main.c
-+++ b/drivers/net/ethernet/renesas/ravb_main.c
-@@ -515,6 +515,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
- {
- struct ravb_private *priv = netdev_priv(ndev);
-
-+ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
-+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
-+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
-+ } else {
-+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
-+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
-+ CXR31_SEL_LINK0);
-+ }
-+
- /* Receive frame limit set register */
- ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
-
-@@ -537,14 +546,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
-
- /* E-MAC interrupt enable register */
- ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
--
-- if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
-- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
-- ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
-- } else {
-- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
-- CXR31_SEL_LINK0);
-- }
- }
-
- static void ravb_emac_init_rcar(struct net_device *ndev)
-@@ -1811,19 +1812,20 @@ static int ravb_open(struct net_device *ndev)
- if (info->gptp)
- ravb_ptp_init(ndev, priv->pdev);
-
-- netif_tx_start_all_queues(ndev);
--
- /* PHY control start */
- error = ravb_phy_start(ndev);
- if (error)
- goto out_ptp_stop;
-
-+ netif_tx_start_all_queues(ndev);
-+
- return 0;
-
- out_ptp_stop:
- /* Stop PTP Clock driver */
- if (info->gptp)
- ravb_ptp_stop(ndev);
-+ ravb_stop_dma(ndev);
- out_free_irq_mgmta:
- if (!info->multi_irqs)
- goto out_free_irq;
-@@ -1874,6 +1876,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
- struct net_device *ndev = priv->ndev;
- int error;
-
-+ if (!rtnl_trylock()) {
-+ usleep_range(1000, 2000);
-+ schedule_work(&priv->work);
-+ return;
-+ }
-+
- netif_tx_stop_all_queues(ndev);
-
- /* Stop PTP Clock driver */
-@@ -1907,7 +1915,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
- */
- netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
- __func__, error);
-- return;
-+ goto out_unlock;
- }
- ravb_emac_init(ndev);
-
-@@ -1917,6 +1925,9 @@ out:
- ravb_ptp_init(ndev, priv->pdev);
-
- netif_tx_start_all_queues(ndev);
-+
-+out_unlock:
-+ rtnl_unlock();
- }
-
- /* Packet transmit function for Ethernet AVB */
-@@ -2645,9 +2656,14 @@ static int ravb_probe(struct platform_device *pdev)
- ndev->features = info->net_features;
- ndev->hw_features = info->net_hw_features;
-
-- reset_control_deassert(rstc);
-+ error = reset_control_deassert(rstc);
-+ if (error)
-+ goto out_free_netdev;
-+
- pm_runtime_enable(&pdev->dev);
-- pm_runtime_get_sync(&pdev->dev);
-+ error = pm_runtime_resume_and_get(&pdev->dev);
-+ if (error < 0)
-+ goto out_rpm_disable;
-
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
-@@ -2872,11 +2888,12 @@ out_disable_gptp_clk:
- out_disable_refclk:
- clk_disable_unprepare(priv->refclk);
- out_release:
-- free_netdev(ndev);
--
- pm_runtime_put(&pdev->dev);
-+out_rpm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(rstc);
-+out_free_netdev:
-+ free_netdev(ndev);
- return error;
- }
-
-@@ -2886,22 +2903,26 @@ static int ravb_remove(struct platform_device *pdev)
- struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
-
-- /* Stop PTP Clock driver */
-- if (info->ccc_gac)
-- ravb_ptp_stop(ndev);
--
-- clk_disable_unprepare(priv->gptp_clk);
-- clk_disable_unprepare(priv->refclk);
--
-- /* Set reset mode */
-- ravb_write(ndev, CCC_OPC_RESET, CCC);
- unregister_netdev(ndev);
- if (info->nc_queues)
- netif_napi_del(&priv->napi[RAVB_NC]);
- netif_napi_del(&priv->napi[RAVB_BE]);
-+
- ravb_mdio_release(priv);
-+
-+ /* Stop PTP Clock driver */
-+ if (info->ccc_gac)
-+ ravb_ptp_stop(ndev);
-+
- dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
- priv->desc_bat_dma);
-+
-+ /* Set reset mode */
-+ ravb_write(ndev, CCC_OPC_RESET, CCC);
-+
-+ clk_disable_unprepare(priv->gptp_clk);
-+ clk_disable_unprepare(priv->refclk);
-+
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(priv->rstc);
-diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
-index 0fc0b6bea7530..ae9d8722b76f7 100644
---- a/drivers/net/ethernet/renesas/rswitch.c
-+++ b/drivers/net/ethernet/renesas/rswitch.c
-@@ -1501,8 +1501,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
- {
- struct rswitch_device *rdev = netdev_priv(ndev);
- struct rswitch_gwca_queue *gq = rdev->tx_queue;
-+ netdev_tx_t ret = NETDEV_TX_OK;
- struct rswitch_ext_desc *desc;
-- int ret = NETDEV_TX_OK;
- dma_addr_t dma_addr;
-
- if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
-@@ -1514,10 +1514,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
- return ret;
-
- dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
-- if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
-- dev_kfree_skb_any(skb);
-- return ret;
-- }
-+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
-+ goto err_kfree;
-
- gq->skbs[gq->cur] = skb;
- desc = &gq->tx_ring[gq->cur];
-@@ -1530,10 +1528,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
- struct rswitch_gwca_ts_info *ts_info;
-
- ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
-- if (!ts_info) {
-- dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
-- return -ENOMEM;
-- }
-+ if (!ts_info)
-+ goto err_unmap;
-
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- rdev->ts_tag++;
-@@ -1555,6 +1551,14 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
- gq->cur = rswitch_next_queue_index(gq, true, 1);
- rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
-
-+ return ret;
-+
-+err_unmap:
-+ dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
-+
-+err_kfree:
-+ dev_kfree_skb_any(skb);
-+
- return ret;
- }
-
-diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
-index 7a8f47e7b728b..a4e8b498dea96 100644
---- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
-+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
-@@ -259,7 +259,7 @@
- ((val) << XGMAC_PPS_MINIDX(x))
- #define XGMAC_PPSCMD_START 0x2
- #define XGMAC_PPSCMD_STOP 0x5
--#define XGMAC_PPSEN0 BIT(4)
-+#define XGMAC_PPSENx(x) BIT(4 + (x) * 8)
- #define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
- #define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
- #define XGMAC_TRGTBUSY0 BIT(31)
-diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
-index f352be269deb5..453e88b75be08 100644
---- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
-+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
-@@ -1178,7 +1178,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
-
- val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
- val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
-- val |= XGMAC_PPSEN0;
-+
-+ /* XGMAC Core has 4 PPS outputs at most.
-+ *
-+ * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
-+ * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
-+ * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
-+ * read-only reserved to 0.
-+ * But we always set PPSEN{1,2,3} do not make things worse ;-)
-+ *
-+ * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
-+ * be set, or the PPS outputs stay in Fixed PPS mode by default.
-+ */
-+ val |= XGMAC_PPSENx(index);
-
- writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
-
-diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
-index ea4910ae0921a..6a7c1d325c464 100644
---- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
-+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
-@@ -177,8 +177,10 @@
- #define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
- #define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
-
-+#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
- #define MMC_XGMAC_TX_FPE_FRAG 0x208
- #define MMC_XGMAC_TX_HOLD_REQ 0x20c
-+#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
- #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
- #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
- #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
-@@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
- {
- writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
- writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
-+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
-+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
- writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
- }
-
-diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-index 5801f4d50f951..1fa4da96c8f50 100644
---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-@@ -5267,6 +5267,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
-
- dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
-+ limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
-
- if (netif_msg_rx_status(priv)) {
- void *rx_head;
-@@ -5302,10 +5303,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
- len = 0;
- }
-
-+read_again:
- if (count >= limit)
- break;
-
--read_again:
- buf1_len = 0;
- buf2_len = 0;
- entry = next_entry;
-diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
-index 4cf2a52e43783..3025e9c189702 100644
---- a/drivers/net/ethernet/ti/icssg/icss_iep.c
-+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
-@@ -177,7 +177,7 @@ static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
- if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
- writel(upper_32_bits(ns), iep->base +
- iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
-- writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
-+ writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
- }
-
- static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
-diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
-index 4914d0ef58e9b..c09ecb3da7723 100644
---- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
-+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
-@@ -2050,7 +2050,7 @@ static int prueth_probe(struct platform_device *pdev)
- &prueth->shram);
- if (ret) {
- dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
-- pruss_put(prueth->pruss);
-+ goto put_pruss;
- }
-
- prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
-@@ -2092,10 +2092,7 @@ static int prueth_probe(struct platform_device *pdev)
- prueth->iep1 = icss_iep_get_idx(np, 1);
- if (IS_ERR(prueth->iep1)) {
- ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
-- icss_iep_put(prueth->iep0);
-- prueth->iep0 = NULL;
-- prueth->iep1 = NULL;
-- goto free_pool;
-+ goto put_iep0;
- }
-
- if (prueth->pdata.quirk_10m_link_issue) {
-@@ -2185,6 +2182,12 @@ netdev_exit:
- exit_iep:
- if (prueth->pdata.quirk_10m_link_issue)
- icss_iep_exit_fw(prueth->iep1);
-+ icss_iep_put(prueth->iep1);
-+
-+put_iep0:
-+ icss_iep_put(prueth->iep0);
-+ prueth->iep0 = NULL;
-+ prueth->iep1 = NULL;
-
- free_pool:
- gen_pool_free(prueth->sram_pool,
-@@ -2192,6 +2195,8 @@ free_pool:
-
- put_mem:
- pruss_release_mem_region(prueth->pruss, &prueth->shram);
-+
-+put_pruss:
- pruss_put(prueth->pruss);
-
- put_cores:
-diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
-index 50d7eacfec582..87e67121477cb 100644
---- a/drivers/net/ethernet/toshiba/spider_net.c
-+++ b/drivers/net/ethernet/toshiba/spider_net.c
-@@ -2332,7 +2332,7 @@ spider_net_alloc_card(void)
- struct spider_net_card *card;
-
- netdev = alloc_etherdev(struct_size(card, darray,
-- tx_descriptors + rx_descriptors));
-+ size_add(tx_descriptors, rx_descriptors)));
- if (!netdev)
- return NULL;
-
-diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
-index 85dc16faca544..52130df26aee5 100644
---- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
-+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
-@@ -1677,10 +1677,12 @@ int wx_sw_init(struct wx *wx)
- wx->subsystem_device_id = pdev->subsystem_device;
- } else {
- err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
-- if (!err)
-- wx->subsystem_device_id = swab16((u16)ssid);
-+ if (err < 0) {
-+ wx_err(wx, "read of internal subsystem device id failed\n");
-+ return err;
-+ }
-
-- return err;
-+ wx->subsystem_device_id = swab16((u16)ssid);
- }
-
- wx->mac_table = kcalloc(wx->mac.num_rar_entries,
-diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
-index e04d4a5eed7ba..21505920136c6 100644
---- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
-+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
-@@ -1965,11 +1965,11 @@ void wx_reset_interrupt_capability(struct wx *wx)
- if (!pdev->msi_enabled && !pdev->msix_enabled)
- return;
-
-- pci_free_irq_vectors(wx->pdev);
- if (pdev->msix_enabled) {
- kfree(wx->msix_entries);
- wx->msix_entries = NULL;
- }
-+ pci_free_irq_vectors(wx->pdev);
- }
- EXPORT_SYMBOL(wx_reset_interrupt_capability);
-
-diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
-index 2b431db6085a6..a4d63d2f3c5bb 100644
---- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
-+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
-@@ -121,10 +121,8 @@ static int ngbe_sw_init(struct wx *wx)
-
- /* PCI config space info */
- err = wx_sw_init(wx);
-- if (err < 0) {
-- wx_err(wx, "read of internal subsystem device id failed\n");
-+ if (err < 0)
- return err;
-- }
-
- /* mac type, phy type , oem type */
- ngbe_init_type_code(wx);
-diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
-index 5c3aed516ac20..d60c26ba0ba4c 100644
---- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
-+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
-@@ -362,10 +362,8 @@ static int txgbe_sw_init(struct wx *wx)
-
- /* PCI config space info */
- err = wx_sw_init(wx);
-- if (err < 0) {
-- wx_err(wx, "read of internal subsystem device id failed\n");
-+ if (err < 0)
- return err;
-- }
-
- txgbe_init_type_code(wx);
-
-diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
-index b7ec4dafae90c..3297aff969c80 100644
---- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
-+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
-@@ -822,7 +822,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
- if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
- /* Tx Full Checksum Offload Enabled */
- cur_p->app0 |= 2;
-- } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
-+ } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
- csum_start_off = skb_transport_offset(skb);
- csum_index_off = csum_start_off + skb->csum_offset;
- /* Tx Partial Checksum Offload Enabled */
-diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
-index b22596b18ee8c..b1919278e931f 100644
---- a/drivers/net/gtp.c
-+++ b/drivers/net/gtp.c
-@@ -630,7 +630,7 @@ static void __gtp_encap_destroy(struct sock *sk)
- gtp->sk0 = NULL;
- else
- gtp->sk1u = NULL;
-- udp_sk(sk)->encap_type = 0;
-+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
- rcu_assign_sk_user_data(sk, NULL);
- release_sock(sk);
- sock_put(sk);
-@@ -682,7 +682,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
-
- netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
-
-- switch (udp_sk(sk)->encap_type) {
-+ switch (READ_ONCE(udp_sk(sk)->encap_type)) {
- case UDP_ENCAP_GTP0:
- netdev_dbg(gtp->dev, "received GTP0 packet\n");
- ret = gtp0_udp_encap_recv(gtp, skb);
-diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
-index 3ba3c8fb28a5d..706ea5263e879 100644
---- a/drivers/net/hyperv/netvsc_drv.c
-+++ b/drivers/net/hyperv/netvsc_drv.c
-@@ -2206,9 +2206,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
- goto upper_link_failed;
- }
-
-- /* set slave flag before open to prevent IPv6 addrconf */
-- vf_netdev->flags |= IFF_SLAVE;
--
- schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
-
- call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
-@@ -2315,16 +2312,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
-
- }
-
-- /* Fallback path to check synthetic vf with
-- * help of mac addr
-+ /* Fallback path to check synthetic vf with help of mac addr.
-+ * Because this function can be called before vf_netdev is
-+ * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
-+ * from dev_addr, also try to match to its dev_addr.
-+ * Note: On Hyper-V and Azure, it's not possible to set a MAC address
-+ * on a VF that matches to the MAC of a unrelated NETVSC device.
- */
- list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
- ndev = hv_get_drvdata(ndev_ctx->device_ctx);
-- if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
-- netdev_notice(vf_netdev,
-- "falling back to mac addr based matching\n");
-+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
-+ ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
- return ndev;
-- }
- }
-
- netdev_notice(vf_netdev,
-@@ -2332,6 +2331,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
- return NULL;
- }
-
-+static int netvsc_prepare_bonding(struct net_device *vf_netdev)
-+{
-+ struct net_device *ndev;
-+
-+ ndev = get_netvsc_byslot(vf_netdev);
-+ if (!ndev)
-+ return NOTIFY_DONE;
-+
-+ /* set slave flag before open to prevent IPv6 addrconf */
-+ vf_netdev->flags |= IFF_SLAVE;
-+ return NOTIFY_DONE;
-+}
-+
- static int netvsc_register_vf(struct net_device *vf_netdev)
- {
- struct net_device_context *net_device_ctx;
-@@ -2531,15 +2543,6 @@ static int netvsc_probe(struct hv_device *dev,
- goto devinfo_failed;
- }
-
-- nvdev = rndis_filter_device_add(dev, device_info);
-- if (IS_ERR(nvdev)) {
-- ret = PTR_ERR(nvdev);
-- netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-- goto rndis_failed;
-- }
--
-- eth_hw_addr_set(net, device_info->mac_adr);
--
- /* We must get rtnl lock before scheduling nvdev->subchan_work,
- * otherwise netvsc_subchan_work() can get rtnl lock first and wait
- * all subchannels to show up, but that may not happen because
-@@ -2547,9 +2550,23 @@ static int netvsc_probe(struct hv_device *dev,
- * -> ... -> device_add() -> ... -> __device_attach() can't get
- * the device lock, so all the subchannels can't be processed --
- * finally netvsc_subchan_work() hangs forever.
-+ *
-+ * The rtnl lock also needs to be held before rndis_filter_device_add()
-+ * which advertises nvsp_2_vsc_capability / sriov bit, and triggers
-+ * VF NIC offering and registering. If VF NIC finished register_netdev()
-+ * earlier it may cause name based config failure.
- */
- rtnl_lock();
-
-+ nvdev = rndis_filter_device_add(dev, device_info);
-+ if (IS_ERR(nvdev)) {
-+ ret = PTR_ERR(nvdev);
-+ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-+ goto rndis_failed;
-+ }
-+
-+ eth_hw_addr_set(net, device_info->mac_adr);
-+
- if (nvdev->num_chn > 1)
- schedule_work(&nvdev->subchan_work);
-
-@@ -2586,9 +2603,9 @@ static int netvsc_probe(struct hv_device *dev,
- return 0;
-
- register_failed:
-- rtnl_unlock();
- rndis_filter_device_remove(dev, nvdev);
- rndis_failed:
-+ rtnl_unlock();
- netvsc_devinfo_put(device_info);
- devinfo_failed:
- free_percpu(net_device_ctx->vf_stats);
-@@ -2753,6 +2770,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
- return NOTIFY_DONE;
-
- switch (event) {
-+ case NETDEV_POST_INIT:
-+ return netvsc_prepare_bonding(event_dev);
- case NETDEV_REGISTER:
- return netvsc_register_vf(event_dev);
- case NETDEV_UNREGISTER:
-@@ -2788,12 +2807,17 @@ static int __init netvsc_drv_init(void)
- }
- netvsc_ring_bytes = ring_size * PAGE_SIZE;
-
-+ register_netdevice_notifier(&netvsc_netdev_notifier);
-+
- ret = vmbus_driver_register(&netvsc_drv);
- if (ret)
-- return ret;
-+ goto err_vmbus_reg;
-
-- register_netdevice_notifier(&netvsc_netdev_notifier);
- return 0;
-+
-+err_vmbus_reg:
-+ unregister_netdevice_notifier(&netvsc_netdev_notifier);
-+ return ret;
- }
-
- MODULE_LICENSE("GPL");
-diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c
-index d7b81a36d673b..145eb0bd096d6 100644
---- a/drivers/net/ipa/reg/gsi_reg-v5.0.c
-+++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c
-@@ -78,7 +78,7 @@ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
- 0x0001c000 + 0x12000 * GSI_EE_AP, 0x80);
-
- static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
-- [R_LENGTH] = GENMASK(19, 0),
-+ [R_LENGTH] = GENMASK(23, 0),
- };
-
- REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
-diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
-index c0c49f1813673..2d5b021b4ea60 100644
---- a/drivers/net/ipvlan/ipvlan_core.c
-+++ b/drivers/net/ipvlan/ipvlan_core.c
-@@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
- return addr;
- }
-
--static int ipvlan_process_v4_outbound(struct sk_buff *skb)
-+static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
- {
- const struct iphdr *ip4h = ip_hdr(skb);
- struct net_device *dev = skb->dev;
-@@ -441,25 +441,23 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
-
- err = ip_local_out(net, skb->sk, skb);
- if (unlikely(net_xmit_eval(err)))
-- dev->stats.tx_errors++;
-+ DEV_STATS_INC(dev, tx_errors);
- else
- ret = NET_XMIT_SUCCESS;
- goto out;
- err:
-- dev->stats.tx_errors++;
-+ DEV_STATS_INC(dev, tx_errors);
- kfree_skb(skb);
- out:
- return ret;
- }
-
- #if IS_ENABLED(CONFIG_IPV6)
--static int ipvlan_process_v6_outbound(struct sk_buff *skb)
-+
-+static noinline_for_stack int
-+ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
- {
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
-- struct net_device *dev = skb->dev;
-- struct net *net = dev_net(dev);
-- struct dst_entry *dst;
-- int err, ret = NET_XMIT_DROP;
- struct flowi6 fl6 = {
- .flowi6_oif = dev->ifindex,
- .daddr = ip6h->daddr,
-@@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
- .flowi6_mark = skb->mark,
- .flowi6_proto = ip6h->nexthdr,
- };
-+ struct dst_entry *dst;
-+ int err;
-
-- dst = ip6_route_output(net, NULL, &fl6);
-- if (dst->error) {
-- ret = dst->error;
-+ dst = ip6_route_output(dev_net(dev), NULL, &fl6);
-+ err = dst->error;
-+ if (err) {
- dst_release(dst);
-- goto err;
-+ return err;
- }
- skb_dst_set(skb, dst);
-+ return 0;
-+}
-+
-+static int ipvlan_process_v6_outbound(struct sk_buff *skb)
-+{
-+ struct net_device *dev = skb->dev;
-+ int err, ret = NET_XMIT_DROP;
-+
-+ err = ipvlan_route_v6_outbound(dev, skb);
-+ if (unlikely(err)) {
-+ DEV_STATS_INC(dev, tx_errors);
-+ kfree_skb(skb);
-+ return err;
-+ }
-
- memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-
-- err = ip6_local_out(net, skb->sk, skb);
-+ err = ip6_local_out(dev_net(dev), skb->sk, skb);
- if (unlikely(net_xmit_eval(err)))
-- dev->stats.tx_errors++;
-+ DEV_STATS_INC(dev, tx_errors);
- else
- ret = NET_XMIT_SUCCESS;
-- goto out;
--err:
-- dev->stats.tx_errors++;
-- kfree_skb(skb);
--out:
- return ret;
- }
- #else
-diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
-index 1b55928e89b8a..57c79f5f29916 100644
---- a/drivers/net/ipvlan/ipvlan_main.c
-+++ b/drivers/net/ipvlan/ipvlan_main.c
-@@ -324,6 +324,7 @@ static void ipvlan_get_stats64(struct net_device *dev,
- s->rx_dropped = rx_errs;
- s->tx_dropped = tx_drps;
- }
-+ s->tx_errors = DEV_STATS_READ(dev, tx_errors);
- }
-
- static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
-diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
-index c5cd4551c67ca..9663050a852d8 100644
---- a/drivers/net/macsec.c
-+++ b/drivers/net/macsec.c
-@@ -3657,9 +3657,9 @@ static void macsec_get_stats64(struct net_device *dev,
-
- dev_fetch_sw_netstats(s, dev->tstats);
-
-- s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
-- s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
-- s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
-+ s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
-+ s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
-+ s->rx_errors = DEV_STATS_READ(dev, rx_errors);
- }
-
- static int macsec_get_iflink(const struct net_device *dev)
-diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
-index 02bd201bc7e58..c8da94af4161a 100644
---- a/drivers/net/macvlan.c
-+++ b/drivers/net/macvlan.c
-@@ -780,7 +780,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
- if (dev->flags & IFF_UP) {
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
-- if (change & IFF_PROMISC)
-+ if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
- dev_set_promiscuity(lowerdev,
- dev->flags & IFF_PROMISC ? 1 : -1);
-
-diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
-index f60eb97e3a627..608953d4f98da 100644
---- a/drivers/net/netdevsim/bpf.c
-+++ b/drivers/net/netdevsim/bpf.c
-@@ -93,7 +93,7 @@ static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
- {
- struct nsim_bpf_bound_prog *state;
-
-- if (!prog || !prog->aux->offload)
-+ if (!prog || !bpf_prog_is_offloaded(prog->aux))
- return;
-
- state = prog->aux->offload->dev_priv;
-@@ -311,7 +311,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
- if (!bpf->prog)
- return 0;
-
-- if (!bpf->prog->aux->offload) {
-+ if (!bpf_prog_is_offloaded(bpf->prog->aux)) {
- NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
- return -EINVAL;
- }
-diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
-index 0d7354955d626..b5f012619e42d 100644
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1631,6 +1631,7 @@ struct phylink *phylink_create(struct phylink_config *config,
- pl->config = config;
- if (config->type == PHYLINK_NETDEV) {
- pl->netdev = to_net_dev(config->dev);
-+ netif_carrier_off(pl->netdev);
- } else if (config->type == PHYLINK_DEV) {
- pl->dev = config->dev;
- } else {
-diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
-index 4ecfac2278651..3679a43f4eb02 100644
---- a/drivers/net/phy/sfp.c
-+++ b/drivers/net/phy/sfp.c
-@@ -452,6 +452,11 @@ static const struct sfp_quirk sfp_quirks[] = {
- // Rollball protocol to talk to the PHY.
- SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
-
-+ // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
-+ // NRZ in their EEPROM
-+ SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
-+ sfp_fixup_ignore_tx_fault),
-+
- SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
-
- // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
-@@ -463,6 +468,9 @@ static const struct sfp_quirk sfp_quirks[] = {
- SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
- sfp_fixup_ignore_tx_fault),
-
-+ // FS 2.5G Base-T
-+ SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
-+
- // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
- // 2500MBd NRZ in their EEPROM
- SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
-diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
-index ebcdffdf4f0e0..52d05ce4a2819 100644
---- a/drivers/net/ppp/ppp_synctty.c
-+++ b/drivers/net/ppp/ppp_synctty.c
-@@ -453,6 +453,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
- case PPPIOCSMRU:
- if (get_user(val, (int __user *) argp))
- break;
-+ if (val > U16_MAX) {
-+ err = -EINVAL;
-+ break;
-+ }
- if (val < PPP_MRU)
- val = PPP_MRU;
- ap->mru = val;
-@@ -687,7 +691,7 @@ ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count)
-
- /* strip address/control field if present */
- p = skb->data;
-- if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
-+ if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
- /* chop off address/control */
- if (skb->len < 3)
- goto err;
-diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
-index aff39bf3161de..4ea0e155bb0d5 100644
---- a/drivers/net/usb/ax88179_178a.c
-+++ b/drivers/net/usb/ax88179_178a.c
-@@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev)
-
- *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
-- msleep(200);
-+ msleep(500);
-
- *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
- ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
-- msleep(100);
-+ msleep(200);
-
- /* Ethernet PHY Auto Detach*/
- ax88179_auto_detach(dev);
-diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
-index afb20c0ed688d..be18d72cefcce 100644
---- a/drivers/net/usb/r8152.c
-+++ b/drivers/net/usb/r8152.c
-@@ -2543,7 +2543,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
- }
- }
-
-- if (list_empty(&tp->rx_done))
-+ if (list_empty(&tp->rx_done) || work_done >= budget)
- goto out1;
-
- clear_bit(RX_EPROTO, &tp->flags);
-@@ -2559,6 +2559,15 @@ static int rx_bottom(struct r8152 *tp, int budget)
- struct urb *urb;
- u8 *rx_data;
-
-+ /* A bulk transfer of USB may contain may packets, so the
-+ * total packets may more than the budget. Deal with all
-+ * packets in current bulk transfer, and stop to handle the
-+ * next bulk transfer until next schedule, if budget is
-+ * exhausted.
-+ */
-+ if (work_done >= budget)
-+ break;
-+
- list_del_init(cursor);
-
- agg = list_entry(cursor, struct rx_agg, list);
-@@ -2578,9 +2587,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
- unsigned int pkt_len, rx_frag_head_sz;
- struct sk_buff *skb;
-
-- /* limit the skb numbers for rx_queue */
-- if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
-- break;
-+ WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000);
-
- pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
- if (pkt_len < ETH_ZLEN)
-@@ -2658,9 +2665,10 @@ submit:
- }
- }
-
-+ /* Splice the remained list back to rx_done for next schedule */
- if (!list_empty(&rx_queue)) {
- spin_lock_irqsave(&tp->rx_lock, flags);
-- list_splice_tail(&rx_queue, &tp->rx_done);
-+ list_splice(&rx_queue, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
- }
-
-diff --git a/drivers/net/veth.c b/drivers/net/veth.c
-index 0deefd1573cf2..0f798bcbe25cd 100644
---- a/drivers/net/veth.c
-+++ b/drivers/net/veth.c
-@@ -236,8 +236,8 @@ static void veth_get_ethtool_stats(struct net_device *dev,
- data[tx_idx + j] += *(u64 *)(base + offset);
- }
- } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
-- pp_idx = tx_idx + VETH_TQ_STATS_LEN;
- }
-+ pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
-
- page_pool_stats:
- veth_get_page_pool_stats(dev, &data[pp_idx]);
-@@ -373,7 +373,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
- skb_tx_timestamp(skb);
- if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
- if (!use_napi)
-- dev_lstats_add(dev, length);
-+ dev_sw_netstats_tx_add(dev, 1, length);
- else
- __veth_xdp_flush(rq);
- } else {
-@@ -387,14 +387,6 @@ drop:
- return ret;
- }
-
--static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
--{
-- struct veth_priv *priv = netdev_priv(dev);
--
-- dev_lstats_read(dev, packets, bytes);
-- return atomic64_read(&priv->dropped);
--}
--
- static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
- {
- struct veth_priv *priv = netdev_priv(dev);
-@@ -432,24 +424,24 @@ static void veth_get_stats64(struct net_device *dev,
- struct veth_priv *priv = netdev_priv(dev);
- struct net_device *peer;
- struct veth_stats rx;
-- u64 packets, bytes;
-
-- tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
-- tot->tx_bytes = bytes;
-- tot->tx_packets = packets;
-+ tot->tx_dropped = atomic64_read(&priv->dropped);
-+ dev_fetch_sw_netstats(tot, dev->tstats);
-
- veth_stats_rx(&rx, dev);
- tot->tx_dropped += rx.xdp_tx_err;
- tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
-- tot->rx_bytes = rx.xdp_bytes;
-- tot->rx_packets = rx.xdp_packets;
-+ tot->rx_bytes += rx.xdp_bytes;
-+ tot->rx_packets += rx.xdp_packets;
-
- rcu_read_lock();
- peer = rcu_dereference(priv->peer);
- if (peer) {
-- veth_stats_tx(peer, &packets, &bytes);
-- tot->rx_bytes += bytes;
-- tot->rx_packets += packets;
-+ struct rtnl_link_stats64 tot_peer = {};
-+
-+ dev_fetch_sw_netstats(&tot_peer, peer->tstats);
-+ tot->rx_bytes += tot_peer.tx_bytes;
-+ tot->rx_packets += tot_peer.tx_packets;
-
- veth_stats_rx(&rx, peer);
- tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
-@@ -1499,25 +1491,12 @@ static void veth_free_queues(struct net_device *dev)
-
- static int veth_dev_init(struct net_device *dev)
- {
-- int err;
--
-- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
-- if (!dev->lstats)
-- return -ENOMEM;
--
-- err = veth_alloc_queues(dev);
-- if (err) {
-- free_percpu(dev->lstats);
-- return err;
-- }
--
-- return 0;
-+ return veth_alloc_queues(dev);
- }
-
- static void veth_dev_free(struct net_device *dev)
- {
- veth_free_queues(dev);
-- free_percpu(dev->lstats);
- }
-
- #ifdef CONFIG_NET_POLL_CONTROLLER
-@@ -1789,6 +1768,7 @@ static void veth_setup(struct net_device *dev)
- NETIF_F_HW_VLAN_STAG_RX);
- dev->needs_free_netdev = true;
- dev->priv_destructor = veth_dev_free;
-+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
- dev->max_mtu = ETH_MAX_MTU;
-
- dev->hw_features = VETH_FEATURES;
-diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
-index d67f742fbd4c5..0c0be6b872c6a 100644
---- a/drivers/net/virtio_net.c
-+++ b/drivers/net/virtio_net.c
-@@ -81,24 +81,24 @@ struct virtnet_stat_desc {
-
- struct virtnet_sq_stats {
- struct u64_stats_sync syncp;
-- u64 packets;
-- u64 bytes;
-- u64 xdp_tx;
-- u64 xdp_tx_drops;
-- u64 kicks;
-- u64 tx_timeouts;
-+ u64_stats_t packets;
-+ u64_stats_t bytes;
-+ u64_stats_t xdp_tx;
-+ u64_stats_t xdp_tx_drops;
-+ u64_stats_t kicks;
-+ u64_stats_t tx_timeouts;
- };
-
- struct virtnet_rq_stats {
- struct u64_stats_sync syncp;
-- u64 packets;
-- u64 bytes;
-- u64 drops;
-- u64 xdp_packets;
-- u64 xdp_tx;
-- u64 xdp_redirects;
-- u64 xdp_drops;
-- u64 kicks;
-+ u64_stats_t packets;
-+ u64_stats_t bytes;
-+ u64_stats_t drops;
-+ u64_stats_t xdp_packets;
-+ u64_stats_t xdp_tx;
-+ u64_stats_t xdp_redirects;
-+ u64_stats_t xdp_drops;
-+ u64_stats_t kicks;
- };
-
- #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
-@@ -775,8 +775,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
- return;
-
- u64_stats_update_begin(&sq->stats.syncp);
-- sq->stats.bytes += bytes;
-- sq->stats.packets += packets;
-+ u64_stats_add(&sq->stats.bytes, bytes);
-+ u64_stats_add(&sq->stats.packets, packets);
- u64_stats_update_end(&sq->stats.syncp);
- }
-
-@@ -975,11 +975,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
- }
- out:
- u64_stats_update_begin(&sq->stats.syncp);
-- sq->stats.bytes += bytes;
-- sq->stats.packets += packets;
-- sq->stats.xdp_tx += n;
-- sq->stats.xdp_tx_drops += n - nxmit;
-- sq->stats.kicks += kicks;
-+ u64_stats_add(&sq->stats.bytes, bytes);
-+ u64_stats_add(&sq->stats.packets, packets);
-+ u64_stats_add(&sq->stats.xdp_tx, n);
-+ u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
-+ u64_stats_add(&sq->stats.kicks, kicks);
- u64_stats_update_end(&sq->stats.syncp);
-
- virtnet_xdp_put_sq(vi, sq);
-@@ -1011,14 +1011,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
- u32 act;
-
- act = bpf_prog_run_xdp(xdp_prog, xdp);
-- stats->xdp_packets++;
-+ u64_stats_inc(&stats->xdp_packets);
-
- switch (act) {
- case XDP_PASS:
- return act;
-
- case XDP_TX:
-- stats->xdp_tx++;
-+ u64_stats_inc(&stats->xdp_tx);
- xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- netdev_dbg(dev, "convert buff to frame failed for xdp\n");
-@@ -1036,7 +1036,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
- return act;
-
- case XDP_REDIRECT:
-- stats->xdp_redirects++;
-+ u64_stats_inc(&stats->xdp_redirects);
- err = xdp_do_redirect(dev, xdp, xdp_prog);
- if (err)
- return XDP_DROP;
-@@ -1232,9 +1232,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
- return skb;
-
- err_xdp:
-- stats->xdp_drops++;
-+ u64_stats_inc(&stats->xdp_drops);
- err:
-- stats->drops++;
-+ u64_stats_inc(&stats->drops);
- put_page(page);
- xdp_xmit:
- return NULL;
-@@ -1253,7 +1253,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
- struct sk_buff *skb;
-
- len -= vi->hdr_len;
-- stats->bytes += len;
-+ u64_stats_add(&stats->bytes, len);
-
- if (unlikely(len > GOOD_PACKET_LEN)) {
- pr_debug("%s: rx error: len %u exceeds max size %d\n",
-@@ -1282,7 +1282,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
- return skb;
-
- err:
-- stats->drops++;
-+ u64_stats_inc(&stats->drops);
- put_page(page);
- return NULL;
- }
-@@ -1298,14 +1298,14 @@ static struct sk_buff *receive_big(struct net_device *dev,
- struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
-
-- stats->bytes += len - vi->hdr_len;
-+ u64_stats_add(&stats->bytes, len - vi->hdr_len);
- if (unlikely(!skb))
- goto err;
-
- return skb;
-
- err:
-- stats->drops++;
-+ u64_stats_inc(&stats->drops);
- give_pages(rq, page);
- return NULL;
- }
-@@ -1326,7 +1326,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
- dev->stats.rx_length_errors++;
- break;
- }
-- stats->bytes += len;
-+ u64_stats_add(&stats->bytes, len);
- page = virt_to_head_page(buf);
- put_page(page);
- }
-@@ -1436,7 +1436,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
- goto err;
- }
-
-- stats->bytes += len;
-+ u64_stats_add(&stats->bytes, len);
- page = virt_to_head_page(buf);
- offset = buf - page_address(page);
-
-@@ -1600,8 +1600,8 @@ err_xdp:
- put_page(page);
- mergeable_buf_free(rq, num_buf, dev, stats);
-
-- stats->xdp_drops++;
-- stats->drops++;
-+ u64_stats_inc(&stats->xdp_drops);
-+ u64_stats_inc(&stats->drops);
- return NULL;
- }
-
-@@ -1625,7 +1625,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
- unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
-
- head_skb = NULL;
-- stats->bytes += len - vi->hdr_len;
-+ u64_stats_add(&stats->bytes, len - vi->hdr_len);
-
- if (unlikely(len > truesize - room)) {
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
-@@ -1666,7 +1666,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
- goto err_buf;
- }
-
-- stats->bytes += len;
-+ u64_stats_add(&stats->bytes, len);
- page = virt_to_head_page(buf);
-
- truesize = mergeable_ctx_to_truesize(ctx);
-@@ -1718,7 +1718,7 @@ err_skb:
- mergeable_buf_free(rq, num_buf, dev, stats);
-
- err_buf:
-- stats->drops++;
-+ u64_stats_inc(&stats->drops);
- dev_kfree_skb(head_skb);
- return NULL;
- }
-@@ -1985,7 +1985,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
- unsigned long flags;
-
- flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
-- rq->stats.kicks++;
-+ u64_stats_inc(&rq->stats.kicks);
- u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
- }
-
-@@ -2065,22 +2065,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
- struct virtnet_info *vi = rq->vq->vdev->priv;
- struct virtnet_rq_stats stats = {};
- unsigned int len;
-+ int packets = 0;
- void *buf;
- int i;
-
- if (!vi->big_packets || vi->mergeable_rx_bufs) {
- void *ctx;
-
-- while (stats.packets < budget &&
-+ while (packets < budget &&
- (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
- receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
-- stats.packets++;
-+ packets++;
- }
- } else {
-- while (stats.packets < budget &&
-+ while (packets < budget &&
- (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
- receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
-- stats.packets++;
-+ packets++;
- }
- }
-
-@@ -2093,17 +2094,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
- }
- }
-
-+ u64_stats_set(&stats.packets, packets);
- u64_stats_update_begin(&rq->stats.syncp);
- for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
- size_t offset = virtnet_rq_stats_desc[i].offset;
-- u64 *item;
-+ u64_stats_t *item, *src;
-
-- item = (u64 *)((u8 *)&rq->stats + offset);
-- *item += *(u64 *)((u8 *)&stats + offset);
-+ item = (u64_stats_t *)((u8 *)&rq->stats + offset);
-+ src = (u64_stats_t *)((u8 *)&stats + offset);
-+ u64_stats_add(item, u64_stats_read(src));
- }
- u64_stats_update_end(&rq->stats.syncp);
-
-- return stats.packets;
-+ return packets;
- }
-
- static void virtnet_poll_cleantx(struct receive_queue *rq)
-@@ -2158,7 +2161,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
- sq = virtnet_xdp_get_sq(vi);
- if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
- u64_stats_update_begin(&sq->stats.syncp);
-- sq->stats.kicks++;
-+ u64_stats_inc(&sq->stats.kicks);
- u64_stats_update_end(&sq->stats.syncp);
- }
- virtnet_xdp_put_sq(vi, sq);
-@@ -2370,7 +2373,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
- if (kick || netif_xmit_stopped(txq)) {
- if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
- u64_stats_update_begin(&sq->stats.syncp);
-- sq->stats.kicks++;
-+ u64_stats_inc(&sq->stats.kicks);
- u64_stats_update_end(&sq->stats.syncp);
- }
- }
-@@ -2553,16 +2556,16 @@ static void virtnet_stats(struct net_device *dev,
-
- do {
- start = u64_stats_fetch_begin(&sq->stats.syncp);
-- tpackets = sq->stats.packets;
-- tbytes = sq->stats.bytes;
-- terrors = sq->stats.tx_timeouts;
-+ tpackets = u64_stats_read(&sq->stats.packets);
-+ tbytes = u64_stats_read(&sq->stats.bytes);
-+ terrors = u64_stats_read(&sq->stats.tx_timeouts);
- } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
-
- do {
- start = u64_stats_fetch_begin(&rq->stats.syncp);
-- rpackets = rq->stats.packets;
-- rbytes = rq->stats.bytes;
-- rdrops = rq->stats.drops;
-+ rpackets = u64_stats_read(&rq->stats.packets);
-+ rbytes = u64_stats_read(&rq->stats.bytes);
-+ rdrops = u64_stats_read(&rq->stats.drops);
- } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
-
- tot->rx_packets += rpackets;
-@@ -2855,6 +2858,9 @@ static void virtnet_get_ringparam(struct net_device *dev,
- ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
- }
-
-+static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
-+ u16 vqn, u32 max_usecs, u32 max_packets);
-+
- static int virtnet_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring,
- struct kernel_ethtool_ringparam *kernel_ring,
-@@ -2890,12 +2896,36 @@ static int virtnet_set_ringparam(struct net_device *dev,
- err = virtnet_tx_resize(vi, sq, ring->tx_pending);
- if (err)
- return err;
-+
-+ /* Upon disabling and re-enabling a transmit virtqueue, the device must
-+ * set the coalescing parameters of the virtqueue to those configured
-+ * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
-+ * did not set any TX coalescing parameters, to 0.
-+ */
-+ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
-+ vi->intr_coal_tx.max_usecs,
-+ vi->intr_coal_tx.max_packets);
-+ if (err)
-+ return err;
-+
-+ vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
-+ vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
- }
-
- if (ring->rx_pending != rx_pending) {
- err = virtnet_rx_resize(vi, rq, ring->rx_pending);
- if (err)
- return err;
-+
-+ /* The reason is same as the transmit virtqueue reset */
-+ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
-+ vi->intr_coal_rx.max_usecs,
-+ vi->intr_coal_rx.max_packets);
-+ if (err)
-+ return err;
-+
-+ vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
-+ vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
- }
- }
-
-@@ -3164,17 +3194,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
- struct virtnet_info *vi = netdev_priv(dev);
- unsigned int idx = 0, start, i, j;
- const u8 *stats_base;
-+ const u64_stats_t *p;
- size_t offset;
-
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- struct receive_queue *rq = &vi->rq[i];
-
-- stats_base = (u8 *)&rq->stats;
-+ stats_base = (const u8 *)&rq->stats;
- do {
- start = u64_stats_fetch_begin(&rq->stats.syncp);
- for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
- offset = virtnet_rq_stats_desc[j].offset;
-- data[idx + j] = *(u64 *)(stats_base + offset);
-+ p = (const u64_stats_t *)(stats_base + offset);
-+ data[idx + j] = u64_stats_read(p);
- }
- } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
- idx += VIRTNET_RQ_STATS_LEN;
-@@ -3183,12 +3215,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- struct send_queue *sq = &vi->sq[i];
-
-- stats_base = (u8 *)&sq->stats;
-+ stats_base = (const u8 *)&sq->stats;
- do {
- start = u64_stats_fetch_begin(&sq->stats.syncp);
- for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
- offset = virtnet_sq_stats_desc[j].offset;
-- data[idx + j] = *(u64 *)(stats_base + offset);
-+ p = (const u64_stats_t *)(stats_base + offset);
-+ data[idx + j] = u64_stats_read(p);
- }
- } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
- idx += VIRTNET_SQ_STATS_LEN;
-@@ -3233,6 +3266,7 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
- struct ethtool_coalesce *ec)
- {
- struct scatterlist sgs_tx, sgs_rx;
-+ int i;
-
- vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
- vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
-@@ -3246,6 +3280,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
- /* Save parameters */
- vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
- vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
-+ for (i = 0; i < vi->max_queue_pairs; i++) {
-+ vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
-+ vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
-+ }
-
- vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
- vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
-@@ -3259,6 +3297,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
- /* Save parameters */
- vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
- vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
-+ for (i = 0; i < vi->max_queue_pairs; i++) {
-+ vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
-+ vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
-+ }
-
- return 0;
- }
-@@ -3287,27 +3329,23 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
- {
- int err;
-
-- if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames) {
-- err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
-- ec->rx_coalesce_usecs,
-- ec->rx_max_coalesced_frames);
-- if (err)
-- return err;
-- /* Save parameters */
-- vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
-- vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
-- }
-+ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
-+ ec->rx_coalesce_usecs,
-+ ec->rx_max_coalesced_frames);
-+ if (err)
-+ return err;
-
-- if (ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames) {
-- err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
-- ec->tx_coalesce_usecs,
-- ec->tx_max_coalesced_frames);
-- if (err)
-- return err;
-- /* Save parameters */
-- vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
-- vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
-- }
-+ vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
-+ vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
-+
-+ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
-+ ec->tx_coalesce_usecs,
-+ ec->tx_max_coalesced_frames);
-+ if (err)
-+ return err;
-+
-+ vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
-+ vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
-
- return 0;
- }
-@@ -3453,7 +3491,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
- } else {
- ec->rx_max_coalesced_frames = 1;
-
-- if (vi->sq[0].napi.weight)
-+ if (vi->sq[queue].napi.weight)
- ec->tx_max_coalesced_frames = 1;
- }
-
-@@ -3866,7 +3904,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
- struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
-
- u64_stats_update_begin(&sq->stats.syncp);
-- sq->stats.tx_timeouts++;
-+ u64_stats_inc(&sq->stats.tx_timeouts);
- u64_stats_update_end(&sq->stats.syncp);
-
- netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
-diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
-index a3408e4e1491b..b90dccdc2d33c 100644
---- a/drivers/net/vrf.c
-+++ b/drivers/net/vrf.c
-@@ -121,22 +121,12 @@ struct net_vrf {
- int ifindex;
- };
-
--struct pcpu_dstats {
-- u64 tx_pkts;
-- u64 tx_bytes;
-- u64 tx_drps;
-- u64 rx_pkts;
-- u64 rx_bytes;
-- u64 rx_drps;
-- struct u64_stats_sync syncp;
--};
--
- static void vrf_rx_stats(struct net_device *dev, int len)
- {
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
-- dstats->rx_pkts++;
-+ dstats->rx_packets++;
- dstats->rx_bytes += len;
- u64_stats_update_end(&dstats->syncp);
- }
-@@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev,
- do {
- start = u64_stats_fetch_begin(&dstats->syncp);
- tbytes = dstats->tx_bytes;
-- tpkts = dstats->tx_pkts;
-- tdrops = dstats->tx_drps;
-+ tpkts = dstats->tx_packets;
-+ tdrops = dstats->tx_drops;
- rbytes = dstats->rx_bytes;
-- rpkts = dstats->rx_pkts;
-+ rpkts = dstats->rx_packets;
- } while (u64_stats_fetch_retry(&dstats->syncp, start));
- stats->tx_bytes += tbytes;
- stats->tx_packets += tpkts;
-@@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
- if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
- vrf_rx_stats(dev, len);
- else
-- this_cpu_inc(dev->dstats->rx_drps);
-+ this_cpu_inc(dev->dstats->rx_drops);
-
- return NETDEV_TX_OK;
- }
-@@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
-- dstats->tx_pkts++;
-+ dstats->tx_packets++;
- dstats->tx_bytes += len;
- u64_stats_update_end(&dstats->syncp);
- } else {
-- this_cpu_inc(dev->dstats->tx_drps);
-+ this_cpu_inc(dev->dstats->tx_drops);
- }
-
- return ret;
-@@ -1174,22 +1164,15 @@ static void vrf_dev_uninit(struct net_device *dev)
-
- vrf_rtable_release(dev, vrf);
- vrf_rt6_release(dev, vrf);
--
-- free_percpu(dev->dstats);
-- dev->dstats = NULL;
- }
-
- static int vrf_dev_init(struct net_device *dev)
- {
- struct net_vrf *vrf = netdev_priv(dev);
-
-- dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
-- if (!dev->dstats)
-- goto out_nomem;
--
- /* create the default dst which points back to us */
- if (vrf_rtable_create(dev) != 0)
-- goto out_stats;
-+ goto out_nomem;
-
- if (vrf_rt6_create(dev) != 0)
- goto out_rth;
-@@ -1203,9 +1186,6 @@ static int vrf_dev_init(struct net_device *dev)
-
- out_rth:
- vrf_rtable_release(dev, vrf);
--out_stats:
-- free_percpu(dev->dstats);
-- dev->dstats = NULL;
- out_nomem:
- return -ENOMEM;
- }
-@@ -1704,6 +1684,8 @@ static void vrf_setup(struct net_device *dev)
- dev->min_mtu = IPV6_MIN_MTU;
- dev->max_mtu = IP6_MAX_MTU;
- dev->mtu = dev->max_mtu;
-+
-+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
- }
-
- static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
-diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
-index 258dcc1039216..deb9636b0ecf8 100644
---- a/drivers/net/wireguard/device.c
-+++ b/drivers/net/wireguard/device.c
-@@ -210,7 +210,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
- */
- while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
- dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
-- ++dev->stats.tx_dropped;
-+ DEV_STATS_INC(dev, tx_dropped);
- }
- skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
- spin_unlock_bh(&peer->staged_packet_queue.lock);
-@@ -228,7 +228,7 @@ err_icmp:
- else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
- err:
-- ++dev->stats.tx_errors;
-+ DEV_STATS_INC(dev, tx_errors);
- kfree_skb(skb);
- return ret;
- }
-diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
-index 0b3f0c8435509..a176653c88616 100644
---- a/drivers/net/wireguard/receive.c
-+++ b/drivers/net/wireguard/receive.c
-@@ -416,20 +416,20 @@ dishonest_packet_peer:
- net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
- dev->name, skb, peer->internal_id,
- &peer->endpoint.addr);
-- ++dev->stats.rx_errors;
-- ++dev->stats.rx_frame_errors;
-+ DEV_STATS_INC(dev, rx_errors);
-+ DEV_STATS_INC(dev, rx_frame_errors);
- goto packet_processed;
- dishonest_packet_type:
- net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
- dev->name, peer->internal_id, &peer->endpoint.addr);
-- ++dev->stats.rx_errors;
-- ++dev->stats.rx_frame_errors;
-+ DEV_STATS_INC(dev, rx_errors);
-+ DEV_STATS_INC(dev, rx_frame_errors);
- goto packet_processed;
- dishonest_packet_size:
- net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
- dev->name, peer->internal_id, &peer->endpoint.addr);
-- ++dev->stats.rx_errors;
-- ++dev->stats.rx_length_errors;
-+ DEV_STATS_INC(dev, rx_errors);
-+ DEV_STATS_INC(dev, rx_length_errors);
- goto packet_processed;
- packet_processed:
- dev_kfree_skb(skb);
-diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
-index 95c853b59e1da..0d48e0f4a1ba3 100644
---- a/drivers/net/wireguard/send.c
-+++ b/drivers/net/wireguard/send.c
-@@ -333,7 +333,8 @@ err:
- void wg_packet_purge_staged_packets(struct wg_peer *peer)
- {
- spin_lock_bh(&peer->staged_packet_queue.lock);
-- peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
-+ DEV_STATS_ADD(peer->device->dev, tx_dropped,
-+ peer->staged_packet_queue.qlen);
- __skb_queue_purge(&peer->staged_packet_queue);
- spin_unlock_bh(&peer->staged_packet_queue.lock);
- }
-diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
-index f9518e1c99039..fe89bc61e5317 100644
---- a/drivers/net/wireless/ath/ath10k/debug.c
-+++ b/drivers/net/wireless/ath/ath10k/debug.c
-@@ -1140,7 +1140,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
- u32 sset, u8 *data)
- {
- if (sset == ETH_SS_STATS)
-- memcpy(data, *ath10k_gstrings_stats,
-+ memcpy(data, ath10k_gstrings_stats,
- sizeof(ath10k_gstrings_stats));
- }
-
-diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
-index 26214c00cd0d7..2c39bad7ebfb9 100644
---- a/drivers/net/wireless/ath/ath10k/snoc.c
-+++ b/drivers/net/wireless/ath/ath10k/snoc.c
-@@ -828,12 +828,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
-
- static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
- {
-- ath10k_ce_disable_interrupts(ar);
-+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
-+ int id;
-+
-+ for (id = 0; id < CE_COUNT_MAX; id++)
-+ disable_irq(ar_snoc->ce_irqs[id].irq_line);
- }
-
- static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
- {
-- ath10k_ce_enable_interrupts(ar);
-+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
-+ int id;
-+
-+ for (id = 0; id < CE_COUNT_MAX; id++)
-+ enable_irq(ar_snoc->ce_irqs[id].irq_line);
- }
-
- static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
-@@ -1090,6 +1098,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
- goto err_free_rri;
- }
-
-+ ath10k_ce_enable_interrupts(ar);
-+
- return 0;
-
- err_free_rri:
-@@ -1253,8 +1263,8 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
-
- for (id = 0; id < CE_COUNT_MAX; id++) {
- ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
-- ath10k_snoc_per_engine_handler, 0,
-- ce_name[id], ar);
-+ ath10k_snoc_per_engine_handler,
-+ IRQF_NO_AUTOEN, ce_name[id], ar);
- if (ret) {
- ath10k_err(ar,
- "failed to register IRQ handler for CE %d: %d\n",
-diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
-index 62bc98852f0f7..a993e74bbae83 100644
---- a/drivers/net/wireless/ath/ath11k/dp_rx.c
-+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
-@@ -1621,14 +1621,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
- u8 pdev_id;
-
- pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
-+
-+ rcu_read_lock();
-+
- ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
- if (!ar) {
- ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
-- return;
-+ goto out;
- }
-
- trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
- ar->ab->pktlog_defs_checksum);
-+
-+out:
-+ rcu_read_unlock();
- }
-
- static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
-diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
-index c071bf5841af6..b328a0599818b 100644
---- a/drivers/net/wireless/ath/ath11k/mac.c
-+++ b/drivers/net/wireless/ath/ath11k/mac.c
-@@ -9042,6 +9042,14 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
- if (ar->state != ATH11K_STATE_ON)
- goto err_fallback;
-
-+ /* Firmware doesn't provide Tx power during CAC hence no need to fetch
-+ * the stats.
-+ */
-+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
-+ mutex_unlock(&ar->conf_mutex);
-+ return -EAGAIN;
-+ }
-+
- req_param.pdev_id = ar->pdev->pdev_id;
- req_param.stats_id = WMI_REQUEST_PDEV_STAT;
-
-diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
-index a5aa1857ec14b..09e65c5e55c4a 100644
---- a/drivers/net/wireless/ath/ath11k/pci.c
-+++ b/drivers/net/wireless/ath/ath11k/pci.c
-@@ -854,10 +854,16 @@ unsupported_wcn6855_soc:
- if (ret)
- goto err_pci_disable_msi;
-
-+ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
-+ if (ret) {
-+ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
-+ goto err_pci_disable_msi;
-+ }
-+
- ret = ath11k_mhi_register(ab_pci);
- if (ret) {
- ath11k_err(ab, "failed to register mhi: %d\n", ret);
-- goto err_pci_disable_msi;
-+ goto err_irq_affinity_cleanup;
- }
-
- ret = ath11k_hal_srng_init(ab);
-@@ -878,12 +884,6 @@ unsupported_wcn6855_soc:
- goto err_ce_free;
- }
-
-- ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
-- if (ret) {
-- ath11k_err(ab, "failed to set irq affinity %d\n", ret);
-- goto err_free_irq;
-- }
--
- /* kernel may allocate a dummy vector before request_irq and
- * then allocate a real vector when request_irq is called.
- * So get msi_data here again to avoid spurious interrupt
-@@ -892,20 +892,17 @@ unsupported_wcn6855_soc:
- ret = ath11k_pci_config_msi_data(ab_pci);
- if (ret) {
- ath11k_err(ab, "failed to config msi_data: %d\n", ret);
-- goto err_irq_affinity_cleanup;
-+ goto err_free_irq;
- }
-
- ret = ath11k_core_init(ab);
- if (ret) {
- ath11k_err(ab, "failed to init core: %d\n", ret);
-- goto err_irq_affinity_cleanup;
-+ goto err_free_irq;
- }
- ath11k_qmi_fwreset_from_cold_boot(ab);
- return 0;
-
--err_irq_affinity_cleanup:
-- ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
--
- err_free_irq:
- ath11k_pcic_free_irq(ab);
-
-@@ -918,6 +915,9 @@ err_hal_srng_deinit:
- err_mhi_unregister:
- ath11k_mhi_unregister(ab_pci);
-
-+err_irq_affinity_cleanup:
-+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
-+
- err_pci_disable_msi:
- ath11k_pci_free_msi(ab_pci);
-
-diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
-index 23ad6825e5be5..1c07f55c25e67 100644
---- a/drivers/net/wireless/ath/ath11k/wmi.c
-+++ b/drivers/net/wireless/ath/ath11k/wmi.c
-@@ -8337,6 +8337,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
- ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
- ev->freq_offset, ev->sidx);
-
-+ rcu_read_lock();
-+
- ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
-
- if (!ar) {
-@@ -8354,6 +8356,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
- ieee80211_radar_detected(ar->hw);
-
- exit:
-+ rcu_read_unlock();
-+
- kfree(tb);
- }
-
-@@ -8383,15 +8387,19 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
- ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n",
- ev->temp, ev->pdev_id);
-
-+ rcu_read_lock();
-+
- ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
- if (!ar) {
- ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
-- kfree(tb);
-- return;
-+ goto exit;
- }
-
- ath11k_thermal_event_temperature(ar, ev->temp);
-
-+exit:
-+ rcu_read_unlock();
-+
- kfree(tb);
- }
-
-@@ -8611,12 +8619,13 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
- return;
- }
-
-+ rcu_read_lock();
-+
- arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
- if (!arvif) {
- ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
- ev->vdev_id);
-- kfree(tb);
-- return;
-+ goto exit;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n",
-@@ -8633,6 +8642,8 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
-
- ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
- (void *)&replay_ctr_be, GFP_ATOMIC);
-+exit:
-+ rcu_read_unlock();
-
- kfree(tb);
- }
-diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
-index f933896f2a68d..6893466f61f04 100644
---- a/drivers/net/wireless/ath/ath12k/dp.c
-+++ b/drivers/net/wireless/ath/ath12k/dp.c
-@@ -38,6 +38,7 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
-
- ath12k_dp_rx_peer_tid_cleanup(ar, peer);
- crypto_free_shash(peer->tfm_mmic);
-+ peer->dp_setup_done = false;
- spin_unlock_bh(&ab->base_lock);
- }
-
-diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
-index e6e64d437c47a..dbcbe7e0cd2a7 100644
---- a/drivers/net/wireless/ath/ath12k/dp_rx.c
-+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
-@@ -1555,6 +1555,13 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
-
- msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
- len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
-+ if (len > (skb->len - struct_size(msg, data, 0))) {
-+ ath12k_warn(ab,
-+ "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
-+ len, skb->len);
-+ return -EINVAL;
-+ }
-+
- pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
- ppdu_id = le32_to_cpu(msg->ppdu_id);
-
-@@ -1583,6 +1590,16 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
- goto exit;
- }
-
-+ if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
-+ spin_unlock_bh(&ar->data_lock);
-+ ath12k_warn(ab,
-+ "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
-+ ppdu_info->ppdu_stats.common.num_users,
-+ HTT_PPDU_STATS_MAX_USERS);
-+ ret = -EINVAL;
-+ goto exit;
-+ }
-+
- /* back up data rate tlv for all peers */
- if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
- (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
-@@ -1641,11 +1658,12 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
- msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
- pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
- HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
-- ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
-
-+ rcu_read_lock();
-+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
- if (!ar) {
- ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
-- return;
-+ goto exit;
- }
-
- spin_lock_bh(&ar->data_lock);
-@@ -1661,6 +1679,8 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
- pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
-
- spin_unlock_bh(&ar->data_lock);
-+exit:
-+ rcu_read_unlock();
- }
-
- void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
-@@ -2748,6 +2768,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
- }
-
- peer->tfm_mmic = tfm;
-+ peer->dp_setup_done = true;
- spin_unlock_bh(&ab->base_lock);
-
- return 0;
-@@ -3214,6 +3235,14 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
- ret = -ENOENT;
- goto out_unlock;
- }
-+
-+ if (!peer->dp_setup_done) {
-+ ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
-+ peer->addr, peer_id);
-+ ret = -ENOENT;
-+ goto out_unlock;
-+ }
-+
- rx_tid = &peer->rx_tid[tid];
-
- if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
-@@ -3229,7 +3258,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
- goto out_unlock;
- }
-
-- if (frag_no > __fls(rx_tid->rx_frag_bitmap))
-+ if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
- __skb_queue_tail(&rx_tid->rx_frags, msdu);
- else
- ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
-diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
-index 8874c815d7faf..16d889fc20433 100644
---- a/drivers/net/wireless/ath/ath12k/dp_tx.c
-+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
-@@ -330,8 +330,11 @@ tcl_ring_sel:
-
- fail_unmap_dma:
- dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
-- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
-+
-+ if (skb_cb->paddr_ext_desc)
-+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-+ sizeof(struct hal_tx_msdu_ext_desc),
-+ DMA_TO_DEVICE);
-
- fail_remove_tx_buf:
- ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
-diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
-index 42f1140baa4fe..f83d3e09ae366 100644
---- a/drivers/net/wireless/ath/ath12k/mhi.c
-+++ b/drivers/net/wireless/ath/ath12k/mhi.c
-@@ -370,8 +370,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
- ret = ath12k_mhi_get_msi(ab_pci);
- if (ret) {
- ath12k_err(ab, "failed to get msi for mhi\n");
-- mhi_free_controller(mhi_ctrl);
-- return ret;
-+ goto free_controller;
- }
-
- mhi_ctrl->iova_start = 0;
-@@ -388,11 +387,15 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
- ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config);
- if (ret) {
- ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
-- mhi_free_controller(mhi_ctrl);
-- return ret;
-+ goto free_controller;
- }
-
- return 0;
-+
-+free_controller:
-+ mhi_free_controller(mhi_ctrl);
-+ ab_pci->mhi_ctrl = NULL;
-+ return ret;
- }
-
- void ath12k_mhi_unregister(struct ath12k_pci *ab_pci)
-diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
-index b296dc0e2f671..c6edb24cbedd8 100644
---- a/drivers/net/wireless/ath/ath12k/peer.h
-+++ b/drivers/net/wireless/ath/ath12k/peer.h
-@@ -44,6 +44,9 @@ struct ath12k_peer {
- struct ppdu_user_delayba ppdu_stats_delayba;
- bool delayba_flag;
- bool is_authorized;
-+
-+ /* protected by ab->data_lock */
-+ bool dp_setup_done;
- };
-
- void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
-diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
-index ef0f3cf35cfd1..d217b70a7a8fb 100644
---- a/drivers/net/wireless/ath/ath12k/wmi.c
-+++ b/drivers/net/wireless/ath/ath12k/wmi.c
-@@ -3876,6 +3876,12 @@ static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
- ath12k_warn(soc, "failed to extract reg cap %d\n", i);
- return ret;
- }
-+
-+ if (reg_cap.phy_id >= MAX_RADIOS) {
-+ ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
-+ return -EINVAL;
-+ }
-+
- soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
- }
- return 0;
-@@ -6476,6 +6482,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
- ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
- ev->freq_offset, ev->sidx);
-
-+ rcu_read_lock();
-+
- ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
-
- if (!ar) {
-@@ -6493,6 +6501,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
- ieee80211_radar_detected(ar->hw);
-
- exit:
-+ rcu_read_unlock();
-+
- kfree(tb);
- }
-
-@@ -6511,11 +6521,16 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
- ath12k_dbg(ab, ATH12K_DBG_WMI,
- "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
-
-+ rcu_read_lock();
-+
- ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
- if (!ar) {
- ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
-- return;
-+ goto exit;
- }
-+
-+exit:
-+ rcu_read_unlock();
- }
-
- static void ath12k_fils_discovery_event(struct ath12k_base *ab,
-diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
-index 9bc57c5a89bfe..a0376a6787b8d 100644
---- a/drivers/net/wireless/ath/ath9k/debug.c
-+++ b/drivers/net/wireless/ath/ath9k/debug.c
-@@ -1293,7 +1293,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw,
- u32 sset, u8 *data)
- {
- if (sset == ETH_SS_STATS)
-- memcpy(data, *ath9k_gstrings_stats,
-+ memcpy(data, ath9k_gstrings_stats,
- sizeof(ath9k_gstrings_stats));
- }
-
-diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
-index c549ff3abcdc4..278ddc713fdc2 100644
---- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
-+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
-@@ -423,7 +423,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
- u32 sset, u8 *data)
- {
- if (sset == ETH_SS_STATS)
-- memcpy(data, *ath9k_htc_gstrings_stats,
-+ memcpy(data, ath9k_htc_gstrings_stats,
- sizeof(ath9k_htc_gstrings_stats));
- }
-
-diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
-index 27f4d74a41c80..2788a1b06c17c 100644
---- a/drivers/net/wireless/ath/dfs_pattern_detector.c
-+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
-@@ -206,7 +206,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
-
- INIT_LIST_HEAD(&cd->head);
- cd->freq = freq;
-- cd->detectors = kmalloc_array(dpd->num_radar_types,
-+ cd->detectors = kcalloc(dpd->num_radar_types,
- sizeof(*cd->detectors), GFP_ATOMIC);
- if (cd->detectors == NULL)
- goto fail;
-diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
-index b9893b22e41da..42e765fe3cfe1 100644
---- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
-+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
-@@ -134,12 +134,10 @@ static const struct iwl_base_params iwl_bz_base_params = {
- .ht_params = &iwl_gl_a_ht_params
-
- /*
-- * If the device doesn't support HE, no need to have that many buffers.
-- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
-+ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
- * A-MPDU, with additional overhead to account for processing time.
- */
--#define IWL_NUM_RBDS_NON_HE 512
--#define IWL_NUM_RBDS_BZ_HE 4096
-+#define IWL_NUM_RBDS_BZ_EHT (512 * 16)
-
- const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
- .device_family = IWL_DEVICE_FAMILY_BZ,
-@@ -160,16 +158,16 @@ const struct iwl_cfg iwl_cfg_bz = {
- .fw_name_mac = "bz",
- .uhb_supported = true,
- IWL_DEVICE_BZ,
-- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
-- .num_rbds = IWL_NUM_RBDS_BZ_HE,
-+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
-+ .num_rbds = IWL_NUM_RBDS_BZ_EHT,
- };
-
- const struct iwl_cfg iwl_cfg_gl = {
- .fw_name_mac = "gl",
- .uhb_supported = true,
- IWL_DEVICE_BZ,
-- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
-- .num_rbds = IWL_NUM_RBDS_BZ_HE,
-+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
-+ .num_rbds = IWL_NUM_RBDS_BZ_EHT,
- };
-
-
-diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
-index ad283fd22e2a2..604e9cef6baac 100644
---- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
-+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
-@@ -127,12 +127,10 @@ static const struct iwl_base_params iwl_sc_base_params = {
- .ht_params = &iwl_22000_ht_params
-
- /*
-- * If the device doesn't support HE, no need to have that many buffers.
-- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
-+ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
- * A-MPDU, with additional overhead to account for processing time.
- */
--#define IWL_NUM_RBDS_NON_HE 512
--#define IWL_NUM_RBDS_SC_HE 4096
-+#define IWL_NUM_RBDS_SC_EHT (512 * 16)
-
- const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
- .device_family = IWL_DEVICE_FAMILY_SC,
-@@ -153,8 +151,8 @@ const struct iwl_cfg iwl_cfg_sc = {
- .fw_name_mac = "sc",
- .uhb_supported = true,
- IWL_DEVICE_SC,
-- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
-- .num_rbds = IWL_NUM_RBDS_SC_HE,
-+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
-+ .num_rbds = IWL_NUM_RBDS_SC_EHT,
- };
-
- MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
-diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
-index 60a7b61d59aa3..ca1daec641c4f 100644
---- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
-+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
-@@ -3,6 +3,7 @@
- *
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 Intel Corporation
-+ * Copyright (C) 2023 Intel Corporation
- *****************************************************************************/
-
- #include <linux/kernel.h>
-@@ -1169,7 +1170,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
- iwlagn_check_ratid_empty(priv, sta_id, tid);
- }
-
-- iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
-+ iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false);
-
- freed = 0;
-
-@@ -1315,7 +1316,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
-- &reclaimed_skbs);
-+ &reclaimed_skbs, false);
-
- IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
- "sta_id = %d\n",
-diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
-index ba538d70985f4..39bee9c00e071 100644
---- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
-+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
-@@ -13,6 +13,7 @@
- #define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
- #define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
- #define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
-+#define IWL_FW_INI_PRESET_DISABLE 0xff
-
- /**
- * struct iwl_fw_ini_hcmd
-diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
-index 241a9e3f2a1a7..f45f645ca6485 100644
---- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
-+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
-@@ -86,10 +86,7 @@ enum iwl_nvm_type {
- #define IWL_DEFAULT_MAX_TX_POWER 22
- #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
- NETIF_F_TSO | NETIF_F_TSO6)
--#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6)
--#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \
-- IWL_TX_CSUM_NETIF_FLAGS_BZ | \
-- NETIF_F_RXCSUM)
-+#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM)
-
- /* Antenna presence definitions */
- #define ANT_NONE 0x0
-diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
-index 128059ca77e60..06fb7d6653905 100644
---- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
-+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
-@@ -1,6 +1,6 @@
- /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
- /*
-- * Copyright (C) 2018-2022 Intel Corporation
-+ * Copyright (C) 2018-2023 Intel Corporation
- */
- #ifndef __iwl_dbg_tlv_h__
- #define __iwl_dbg_tlv_h__
-@@ -10,7 +10,8 @@
- #include <fw/file.h>
- #include <fw/api/dbg-tlv.h>
-
--#define IWL_DBG_TLV_MAX_PRESET 15
-+#define IWL_DBG_TLV_MAX_PRESET 15
-+#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
-
- /**
- * struct iwl_dbg_tlv_node - debug TLV node
-diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
-index 3d87d26845e74..fb5e254757e71 100644
---- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
-+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
-@@ -1795,6 +1795,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
- #endif
-
- drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
-+ if (iwlwifi_mod_params.enable_ini != ENABLE_INI) {
-+ /* We have a non-default value in the module parameter,
-+ * take its value
-+ */
-+ drv->trans->dbg.domains_bitmap &= 0xffff;
-+ if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) {
-+ if (iwlwifi_mod_params.enable_ini > ENABLE_INI) {
-+ IWL_ERR(trans,
-+ "invalid enable_ini module parameter value: max = %d, using 0 instead\n",
-+ ENABLE_INI);
-+ iwlwifi_mod_params.enable_ini = 0;
-+ }
-+ drv->trans->dbg.domains_bitmap =
-+ BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini);
-+ }
-+ }
-
- ret = iwl_request_firmware(drv, true);
- if (ret) {
-@@ -1843,8 +1859,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
- kfree(drv);
- }
-
--#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
--
- /* shared module parameters */
- struct iwl_mod_params iwlwifi_mod_params = {
- .fw_restart = true,
-@@ -1964,38 +1978,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
- MODULE_PARM_DESC(uapsd_disable,
- "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
-
--static int enable_ini_set(const char *arg, const struct kernel_param *kp)
--{
-- int ret = 0;
-- bool res;
-- __u32 new_enable_ini;
--
-- /* in case the argument type is a number */
-- ret = kstrtou32(arg, 0, &new_enable_ini);
-- if (!ret) {
-- if (new_enable_ini > ENABLE_INI) {
-- pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
-- return -EINVAL;
-- }
-- goto out;
-- }
--
-- /* in case the argument type is boolean */
-- ret = kstrtobool(arg, &res);
-- if (ret)
-- return ret;
-- new_enable_ini = (res ? ENABLE_INI : 0);
--
--out:
-- iwlwifi_mod_params.enable_ini = new_enable_ini;
-- return 0;
--}
--
--static const struct kernel_param_ops enable_ini_ops = {
-- .set = enable_ini_set
--};
--
--module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
-+module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444);
- MODULE_PARM_DESC(enable_ini,
- "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
- "Debug INI TLV FW debug infrastructure (default: 16)");
-diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
-index 6dd381ff0f9e7..2a63968b0e55b 100644
---- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
-+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
-@@ -348,8 +348,8 @@
- #define RFIC_REG_RD 0xAD0470
- #define WFPM_CTRL_REG 0xA03030
- #define WFPM_OTP_CFG1_ADDR 0x00a03098
--#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
--#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
-+#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(5)
-+#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(4)
- #define WFPM_OTP_BZ_BNJ_JACKET_BIT 5
- #define WFPM_OTP_BZ_BNJ_CDB_BIT 4
- #define WFPM_OTP_CFG1_IS_JACKET(_val) (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT)
-diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
-index 3b6b0e03037f1..168eda2132fb8 100644
---- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
-+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
-@@ -56,6 +56,10 @@
- * 6) Eventually, the free function will be called.
- */
-
-+/* default preset 0 (start from bit 16)*/
-+#define IWL_FW_DBG_DOMAIN_POS 16
-+#define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS)
-+
- #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
-
- #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
-@@ -584,7 +588,7 @@ struct iwl_trans_ops {
- int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int queue);
- void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
-- struct sk_buff_head *skbs);
-+ struct sk_buff_head *skbs, bool is_flush);
-
- void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
-
-@@ -1269,14 +1273,15 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- }
-
- static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
-- int ssn, struct sk_buff_head *skbs)
-+ int ssn, struct sk_buff_head *skbs,
-+ bool is_flush)
- {
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
-- trans->ops->reclaim(trans, queue, ssn, skbs);
-+ trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
- }
-
- static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
-index f6488b4bbe68b..be2602d8c5bfa 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
-@@ -2012,6 +2012,16 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
- if (IS_ERR(key_config))
- return false;
- ieee80211_set_key_rx_seq(key_config, 0, &seq);
-+
-+ if (key_config->keyidx == 4 || key_config->keyidx == 5) {
-+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
-+ struct iwl_mvm_vif_link_info *mvm_link =
-+ mvmvif->link[link_id];
-+
-+ mvm_link->igtk = key_config;
-+ }
-+
- return true;
- }
-
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
-index b49781d1a07a7..10b9219b3bfd3 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
-@@ -1,7 +1,7 @@
- // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
- /*
- * Copyright (C) 2015-2017 Intel Deutschland GmbH
-- * Copyright (C) 2018-2022 Intel Corporation
-+ * Copyright (C) 2018-2023 Intel Corporation
- */
- #include <net/cfg80211.h>
- #include <linux/etherdevice.h>
-@@ -302,7 +302,12 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
- struct iwl_mvm_pasn_sta *sta)
- {
- list_del(&sta->list);
-- iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
-+
-+ if (iwl_mvm_has_mld_api(mvm->fw))
-+ iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
-+ else
-+ iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
-+
- iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta);
- kfree(sta);
- }
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
-index ace82e2c5bd91..4ab55a1fcbf04 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
-@@ -53,7 +53,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- unsigned int link_id = link_conf->link_id;
- struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
- struct iwl_link_config_cmd cmd = {};
-- struct iwl_mvm_phy_ctxt *phyctxt;
-
- if (WARN_ON_ONCE(!link_info))
- return -EINVAL;
-@@ -61,7 +60,7 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
- link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
- mvmvif);
-- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
-+ if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))
- return -EINVAL;
-
- rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
-@@ -77,12 +76,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- cmd.link_id = cpu_to_le32(link_info->fw_link_id);
- cmd.mac_id = cpu_to_le32(mvmvif->id);
- cmd.spec_link_id = link_conf->link_id;
-- /* P2P-Device already has a valid PHY context during add */
-- phyctxt = link_info->phy_ctxt;
-- if (phyctxt)
-- cmd.phy_id = cpu_to_le32(phyctxt->id);
-- else
-- cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
-+ WARN_ON_ONCE(link_info->phy_ctxt);
-+ cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
-
- memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN);
-
-@@ -194,11 +189,14 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- flags_mask |= LINK_FLG_MU_EDCA_CW;
- }
-
-- if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be)
-- cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing);
-- else
-- /* This flag can be set only if the MAC has eht support */
-- changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
-+ if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
-+ if (iwlwifi_mod_params.disable_11be ||
-+ !link_conf->eht_support)
-+ changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
-+ else
-+ cmd.puncture_mask =
-+ cpu_to_le16(link_conf->eht_puncturing);
-+ }
-
- cmd.bss_color = link_conf->he_bss_color.color;
-
-@@ -245,7 +243,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- int ret;
-
- if (WARN_ON(!link_info ||
-- link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
-+ link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
- return -EINVAL;
-
- RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
-index 7369a45f7f2bd..9c97691e60384 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
-@@ -286,6 +286,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
- INIT_LIST_HEAD(&mvmvif->time_event_data.list);
- mvmvif->time_event_data.id = TE_MAX;
-
-+ mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
-+ mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
-+ mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
-+
- /* No need to allocate data queues to P2P Device MAC and NAN.*/
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- return 0;
-@@ -300,10 +304,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
- mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
- }
-
-- mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
-- mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
-- mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
--
- for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
- mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
-
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-index 5918c1f2b10c3..a25ea638229b0 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-@@ -1589,32 +1589,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
- IEEE80211_VIF_SUPPORTS_CQM_RSSI;
- }
-
-- /*
-- * P2P_DEVICE interface does not have a channel context assigned to it,
-- * so a dedicated PHY context is allocated to it and the corresponding
-- * MAC context is bound to it at this stage.
-- */
-- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
--
-- mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
-- if (!mvmvif->deflink.phy_ctxt) {
-- ret = -ENOSPC;
-- goto out_free_bf;
-- }
--
-- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
-- ret = iwl_mvm_binding_add_vif(mvm, vif);
-- if (ret)
-- goto out_unref_phy;
--
-- ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
-- if (ret)
-- goto out_unbind;
--
-- /* Save a pointer to p2p device vif, so it can later be used to
-- * update the p2p device MAC when a GO is started/stopped */
-+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- mvm->p2p_device_vif = vif;
-- }
-
- iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
-@@ -1643,11 +1619,6 @@ out:
-
- goto out_unlock;
-
-- out_unbind:
-- iwl_mvm_binding_remove_vif(mvm, vif);
-- out_unref_phy:
-- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
-- out_free_bf:
- if (mvm->bf_allowed_vif == mvmvif) {
- mvm->bf_allowed_vif = NULL;
- vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
-@@ -1744,12 +1715,17 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
- if (iwl_mvm_mac_remove_interface_common(hw, vif))
- goto out;
-
-+ /* Before the interface removal, mac80211 would cancel the ROC, and the
-+ * ROC worker would be scheduled if needed. The worker would be flushed
-+ * in iwl_mvm_prepare_mac_removal() and thus at this point there is no
-+ * binding etc. so nothing needs to be done here.
-+ */
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-+ if (mvmvif->deflink.phy_ctxt) {
-+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
-+ mvmvif->deflink.phy_ctxt = NULL;
-+ }
- mvm->p2p_device_vif = NULL;
-- iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
-- iwl_mvm_binding_remove_vif(mvm, vif);
-- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
-- mvmvif->deflink.phy_ctxt = NULL;
- }
-
- iwl_mvm_mac_ctxt_remove(mvm, vif);
-@@ -3791,6 +3767,12 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
-
- iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
-
-+ /* MFP is set by default before the station is authorized.
-+ * Clear it here in case it's not used.
-+ */
-+ if (!sta->mfp)
-+ return callbacks->update_sta(mvm, vif, sta);
-+
- return 0;
- }
-
-@@ -4531,30 +4513,20 @@ static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
- return ret;
- }
-
--static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm,
-- struct ieee80211_vif *vif,
-- struct iwl_mvm_phy_ctxt *new_phy_ctxt)
-+static int iwl_mvm_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
- {
-- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-- int ret = 0;
-+ int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
-- /* Unbind the P2P_DEVICE from the current PHY context,
-- * and if the PHY context is not used remove it.
-- */
-- ret = iwl_mvm_binding_remove_vif(mvm, vif);
-- if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
-+ ret = iwl_mvm_binding_add_vif(mvm, vif);
-+ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
- return ret;
-
-- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
--
-- /* Bind the P2P_DEVICE to the current PHY Context */
-- mvmvif->deflink.phy_ctxt = new_phy_ctxt;
--
-- ret = iwl_mvm_binding_add_vif(mvm, vif);
-- WARN(ret, "Failed binding P2P_DEVICE\n");
-- return ret;
-+ /* The station and queue allocation must be done only after the binding
-+ * is done, as otherwise the FW might incorrectly configure its state.
-+ */
-+ return iwl_mvm_add_p2p_bcast_sta(mvm, vif);
- }
-
- static int iwl_mvm_roc(struct ieee80211_hw *hw,
-@@ -4565,7 +4537,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
- {
- static const struct iwl_mvm_roc_ops ops = {
- .add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20,
-- .switch_phy_ctxt = iwl_mvm_roc_switch_binding,
-+ .link = iwl_mvm_roc_link,
- };
-
- return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
-@@ -4581,7 +4553,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct cfg80211_chan_def chandef;
- struct iwl_mvm_phy_ctxt *phy_ctxt;
-- bool band_change_removal;
- int ret, i;
- u32 lmac_id;
-
-@@ -4610,82 +4581,61 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- /* handle below */
- break;
- default:
-- IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
-+ IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type);
- ret = -EINVAL;
- goto out_unlock;
- }
-
-+ /* Try using a PHY context that is already in use */
- for (i = 0; i < NUM_PHY_CTX; i++) {
- phy_ctxt = &mvm->phy_ctxts[i];
-- if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt)
-+ if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt)
- continue;
-
-- if (phy_ctxt->ref && channel == phy_ctxt->channel) {
-- ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
-- if (ret)
-- goto out_unlock;
-+ if (channel == phy_ctxt->channel) {
-+ if (mvmvif->deflink.phy_ctxt)
-+ iwl_mvm_phy_ctxt_unref(mvm,
-+ mvmvif->deflink.phy_ctxt);
-
-+ mvmvif->deflink.phy_ctxt = phy_ctxt;
- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
-- goto schedule_time_event;
-+ goto link_and_start_p2p_roc;
- }
- }
-
-- /* Need to update the PHY context only if the ROC channel changed */
-- if (channel == mvmvif->deflink.phy_ctxt->channel)
-- goto schedule_time_event;
--
-- cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
--
-- /*
-- * Check if the remain-on-channel is on a different band and that
-- * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
-- * so, we'll need to release and then re-configure here, since we
-- * must not remove a PHY context that's part of a binding.
-+ /* If the currently used PHY context is configured with a matching
-+ * channel use it
- */
-- band_change_removal =
-- fw_has_capa(&mvm->fw->ucode_capa,
-- IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
-- mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band;
--
-- if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) {
-- /*
-- * Change the PHY context configuration as it is currently
-- * referenced only by the P2P Device MAC (and we can modify it)
-- */
-- ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt,
-- &chandef, 1, 1);
-- if (ret)
-- goto out_unlock;
-+ if (mvmvif->deflink.phy_ctxt) {
-+ if (channel == mvmvif->deflink.phy_ctxt->channel)
-+ goto link_and_start_p2p_roc;
- } else {
-- /*
-- * The PHY context is shared with other MACs (or we're trying to
-- * switch bands), so remove the P2P Device from the binding,
-- * allocate an new PHY context and create a new binding.
-- */
- phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
- if (!phy_ctxt) {
- ret = -ENOSPC;
- goto out_unlock;
- }
-
-- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
-- 1, 1);
-- if (ret) {
-- IWL_ERR(mvm, "Failed to change PHY context\n");
-- goto out_unlock;
-- }
-+ mvmvif->deflink.phy_ctxt = phy_ctxt;
-+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
-+ }
-
-- ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
-- if (ret)
-- goto out_unlock;
-+ /* Configure the PHY context */
-+ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
-
-- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
-+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
-+ 1, 1);
-+ if (ret) {
-+ IWL_ERR(mvm, "Failed to change PHY context\n");
-+ goto out_unlock;
- }
-
--schedule_time_event:
-- /* Schedule the time events */
-- ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
-+link_and_start_p2p_roc:
-+ ret = ops->link(mvm, vif);
-+ if (ret)
-+ goto out_unlock;
-
-+ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
- out_unlock:
- mutex_unlock(&mvm->mutex);
- IWL_DEBUG_MAC80211(mvm, "leave\n");
-@@ -5629,7 +5579,8 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- }
-
- if (drop) {
-- if (iwl_mvm_flush_sta(mvm, mvmsta, false))
-+ if (iwl_mvm_flush_sta(mvm, mvmsta->deflink.sta_id,
-+ mvmsta->tfd_queue_msk))
- IWL_ERR(mvm, "flush request fail\n");
- } else {
- if (iwl_mvm_has_new_tx_api(mvm))
-@@ -5651,22 +5602,21 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
- {
-+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-- int i;
-+ struct iwl_mvm_link_sta *mvm_link_sta;
-+ struct ieee80211_link_sta *link_sta;
-+ int link_id;
-
- mutex_lock(&mvm->mutex);
-- for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
-- struct iwl_mvm_sta *mvmsta;
-- struct ieee80211_sta *tmp;
--
-- tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
-- lockdep_is_held(&mvm->mutex));
-- if (tmp != sta)
-+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
-+ mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id],
-+ lockdep_is_held(&mvm->mutex));
-+ if (!mvm_link_sta)
- continue;
-
-- mvmsta = iwl_mvm_sta_from_mac80211(sta);
--
-- if (iwl_mvm_flush_sta(mvm, mvmsta, false))
-+ if (iwl_mvm_flush_sta(mvm, mvm_link_sta->sta_id,
-+ mvmsta->tfd_queue_msk))
- IWL_ERR(mvm, "flush request fail\n");
- }
- mutex_unlock(&mvm->mutex);
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
-index 2c9f2f71b083a..ea3e9e9c6e26c 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
-@@ -24,10 +24,15 @@ static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm,
- return 0;
- }
-
-- /* AP group keys are per link and should be on the mcast STA */
-+ /* AP group keys are per link and should be on the mcast/bcast STA */
- if (vif->type == NL80211_IFTYPE_AP &&
-- !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
-+ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
-+ /* IGTK/BIGTK to bcast STA */
-+ if (keyconf->keyidx >= 4)
-+ return BIT(link_info->bcast_sta.sta_id);
-+ /* GTK for data to mcast STA */
- return BIT(link_info->mcast_sta.sta_id);
-+ }
-
- /* for client mode use the AP STA also for group keys */
- if (!sta && vif->type == NL80211_IFTYPE_STATION)
-@@ -91,7 +96,12 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
- if (!sta && vif->type == NL80211_IFTYPE_STATION)
- sta = mvmvif->ap_sta;
-
-- if (!IS_ERR_OR_NULL(sta) && sta->mfp)
-+ /* Set the MFP flag also for an AP interface where the key is an IGTK
-+ * key as in such a case the station would always be NULL
-+ */
-+ if ((!IS_ERR_OR_NULL(sta) && sta->mfp) ||
-+ (vif->type == NL80211_IFTYPE_AP &&
-+ (keyconf->keyidx == 4 || keyconf->keyidx == 5)))
- flags |= IWL_SEC_KEY_FLAG_MFP;
-
- return flags;
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
-index b719843e94576..2ddb6f763a0b3 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
-@@ -56,43 +56,15 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
- IEEE80211_VIF_SUPPORTS_CQM_RSSI;
- }
-
-- /*
-- * P2P_DEVICE interface does not have a channel context assigned to it,
-- * so a dedicated PHY context is allocated to it and the corresponding
-- * MAC context is bound to it at this stage.
-- */
-- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-- mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
-- if (!mvmvif->deflink.phy_ctxt) {
-- ret = -ENOSPC;
-- goto out_free_bf;
-- }
--
-- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
-- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
-- if (ret)
-- goto out_unref_phy;
--
-- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
-- LINK_CONTEXT_MODIFY_ACTIVE |
-- LINK_CONTEXT_MODIFY_RATES_INFO,
-- true);
-- if (ret)
-- goto out_remove_link;
--
-- ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
-- if (ret)
-- goto out_remove_link;
-+ ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
-+ if (ret)
-+ goto out_free_bf;
-
-- /* Save a pointer to p2p device vif, so it can later be used to
-- * update the p2p device MAC when a GO is started/stopped
-- */
-+ /* Save a pointer to p2p device vif, so it can later be used to
-+ * update the p2p device MAC when a GO is started/stopped
-+ */
-+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- mvm->p2p_device_vif = vif;
-- } else {
-- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
-- if (ret)
-- goto out_free_bf;
-- }
-
- ret = iwl_mvm_power_update_mac(mvm);
- if (ret)
-@@ -119,10 +91,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
-
- goto out_unlock;
-
-- out_remove_link:
-- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
-- out_unref_phy:
-- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
- out_free_bf:
- if (mvm->bf_allowed_vif == mvmvif) {
- mvm->bf_allowed_vif = NULL;
-@@ -130,7 +98,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
- IEEE80211_VIF_SUPPORTS_CQM_RSSI);
- }
- out_remove_mac:
-- mvmvif->deflink.phy_ctxt = NULL;
- mvmvif->link[0] = NULL;
- iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
- out_unlock:
-@@ -185,14 +152,18 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
-
- iwl_mvm_power_update_mac(mvm);
-
-+ /* Before the interface removal, mac80211 would cancel the ROC, and the
-+ * ROC worker would be scheduled if needed. The worker would be flushed
-+ * in iwl_mvm_prepare_mac_removal() and thus at this point the link is
-+ * not active. So need only to remove the link.
-+ */
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-+ if (mvmvif->deflink.phy_ctxt) {
-+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
-+ mvmvif->deflink.phy_ctxt = NULL;
-+ }
- mvm->p2p_device_vif = NULL;
--
-- /* P2P device uses only one link */
-- iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf);
-- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
-- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
-- mvmvif->deflink.phy_ctxt = NULL;
-+ iwl_mvm_remove_link(mvm, vif, &vif->bss_conf);
- } else {
- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
- }
-@@ -653,7 +624,7 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
- }
-
- /* Update EHT Puncturing info */
-- if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht)
-+ if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc)
- link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
-
- if (link_changes) {
-@@ -968,36 +939,29 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw,
- return 0;
- }
-
--static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm,
-- struct ieee80211_vif *vif,
-- struct iwl_mvm_phy_ctxt *new_phy_ctxt)
-+static int iwl_mvm_mld_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
- {
-- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-- int ret = 0;
-+ int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
-- /* Inorder to change the phy_ctx of a link, the link needs to be
-- * inactive. Therefore, first deactivate the link, then change its
-- * phy_ctx, and then activate it again.
-- */
-- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
-- LINK_CONTEXT_MODIFY_ACTIVE, false);
-- if (WARN(ret, "Failed to deactivate link\n"))
-+ /* The PHY context ID might have changed so need to set it */
-+ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
-+ if (WARN(ret, "Failed to set PHY context ID\n"))
- return ret;
-
-- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
--
-- mvmvif->deflink.phy_ctxt = new_phy_ctxt;
-+ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
-+ LINK_CONTEXT_MODIFY_ACTIVE |
-+ LINK_CONTEXT_MODIFY_RATES_INFO,
-+ true);
-
-- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
-- if (WARN(ret, "Failed to deactivate link\n"))
-+ if (WARN(ret, "Failed linking P2P_DEVICE\n"))
- return ret;
-
-- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
-- LINK_CONTEXT_MODIFY_ACTIVE, true);
-- WARN(ret, "Failed binding P2P_DEVICE\n");
-- return ret;
-+ /* The station and queue allocation must be done only after the linking
-+ * is done, as otherwise the FW might incorrectly configure its state.
-+ */
-+ return iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
- }
-
- static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-@@ -1006,7 +970,7 @@ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- {
- static const struct iwl_mvm_roc_ops ops = {
- .add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta,
-- .switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx,
-+ .link = iwl_mvm_mld_roc_link,
- };
-
- return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
-@@ -1089,9 +1053,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
- }
- }
-
-- if (err)
-- goto out_err;
--
- err = 0;
- if (new_links == 0) {
- mvmvif->link[0] = &mvmvif->deflink;
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
-index 524852cf5cd2d..1ccbe8c1eeb42 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
-@@ -347,7 +347,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm,
- return -EINVAL;
-
- if (flush)
-- iwl_mvm_flush_sta(mvm, int_sta, true);
-+ iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk);
-
- iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid);
-
-@@ -705,8 +705,10 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- rcu_dereference_protected(mvm_sta->link[link_id],
- lockdep_is_held(&mvm->mutex));
-
-- if (WARN_ON(!link_conf || !mvm_link_sta))
-+ if (WARN_ON(!link_conf || !mvm_link_sta)) {
-+ ret = -EINVAL;
- goto err;
-+ }
-
- ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
- mvm_link_sta);
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
-index b18c91c5dd5d1..218f3bc31104b 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
-@@ -1658,7 +1658,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
- static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
- #endif
- int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
--int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
-+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask);
- int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
-
- /* Utils to extract sta related data */
-@@ -1942,13 +1942,12 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
- *
- * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta
- * for Hot Spot 2.0
-- * @switch_phy_ctxt: pointer to the function that switches a vif from one
-- * phy_ctx to another
-+ * @link: For a P2P Device interface, pointer to a function that links the
-+ * MAC/Link to the PHY context
- */
- struct iwl_mvm_roc_ops {
- int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id);
-- int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-- struct iwl_mvm_phy_ctxt *new_phy_ctxt);
-+ int (*link)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
- };
-
- int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
-index 3b9a343d4f672..2c231f4623893 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
-@@ -2059,7 +2059,8 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- *status = IWL_MVM_QUEUE_FREE;
- }
-
-- if (vif->type == NL80211_IFTYPE_STATION) {
-+ if (vif->type == NL80211_IFTYPE_STATION &&
-+ mvm_link->ap_sta_id == sta_id) {
- /* if associated - we can't remove the AP STA now */
- if (vif->cfg.assoc)
- return true;
-@@ -2097,7 +2098,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
- return ret;
-
- /* flush its queues here since we are freeing mvm_sta */
-- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
-+ ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
-+ mvm_sta->tfd_queue_msk);
- if (ret)
- return ret;
- if (iwl_mvm_has_new_tx_api(mvm)) {
-@@ -2408,7 +2410,8 @@ void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
-
- lockdep_assert_held(&mvm->mutex);
-
-- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true);
-+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
-+ mvmvif->deflink.bcast_sta.tfd_queue_msk);
-
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
-@@ -2664,7 +2667,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-
- lockdep_assert_held(&mvm->mutex);
-
-- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true);
-+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
-+ mvmvif->deflink.mcast_sta.tfd_queue_msk);
-
- iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
- &mvmvif->deflink.cab_queue, 0);
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
-index 5f0e7144a951c..158266719ffd7 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
-@@ -78,9 +78,29 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
- */
-
- if (!WARN_ON(!mvm->p2p_device_vif)) {
-- mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
-- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta,
-- true);
-+ struct ieee80211_vif *vif = mvm->p2p_device_vif;
-+
-+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
-+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
-+ mvmvif->deflink.bcast_sta.tfd_queue_msk);
-+
-+ if (mvm->mld_api_is_used) {
-+ iwl_mvm_mld_rm_bcast_sta(mvm, vif,
-+ &vif->bss_conf);
-+
-+ iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
-+ LINK_CONTEXT_MODIFY_ACTIVE,
-+ false);
-+ } else {
-+ iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
-+ iwl_mvm_binding_remove_vif(mvm, vif);
-+ }
-+
-+ /* Do not remove the PHY context as removing and adding
-+ * a PHY context has timing overheads. Leaving it
-+ * configured in FW would be useful in case the next ROC
-+ * is with the same channel.
-+ */
- }
- }
-
-@@ -93,7 +113,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
- */
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
- /* do the same in case of hot spot 2.0 */
-- iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
-+ iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
-+ mvm->aux_sta.tfd_queue_msk);
-
- if (mvm->mld_api_is_used) {
- iwl_mvm_mld_rm_aux_sta(mvm);
-@@ -880,8 +901,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
- if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
- /* End TE, notify mac80211 */
- mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
-- ieee80211_remain_on_channel_expired(mvm->hw);
- iwl_mvm_p2p_roc_finished(mvm);
-+ ieee80211_remain_on_channel_expired(mvm->hw);
- } else if (le32_to_cpu(notif->start)) {
- if (WARN_ON(mvmvif->time_event_data.id !=
- le32_to_cpu(notif->conf_id)))
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
-index 898dca3936435..177a4628a913e 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
-@@ -536,16 +536,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
- flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
-
- /*
-- * For data packets rate info comes from the fw. Only
-- * set rate/antenna during connection establishment or in case
-- * no station is given.
-+ * For data and mgmt packets rate info comes from the fw. Only
-+ * set rate/antenna for injected frames with fixed rate, or
-+ * when no sta is given.
- */
-- if (!sta || !ieee80211_is_data(hdr->frame_control) ||
-- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
-+ if (unlikely(!sta ||
-+ info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
- flags |= IWL_TX_FLAGS_CMD_RATE;
- rate_n_flags =
- iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
- hdr->frame_control);
-+ } else if (!ieee80211_is_data(hdr->frame_control) ||
-+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
-+ /* These are important frames */
-+ flags |= IWL_TX_FLAGS_HIGH_PRI;
- }
-
- if (mvm->trans->trans_cfg->device_family >=
-@@ -1599,7 +1603,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
- seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
-
- /* we can free until ssn % q.n_bd not inclusive */
-- iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
-+ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
-
- while (!skb_queue_empty(&skbs)) {
- struct sk_buff *skb = __skb_dequeue(&skbs);
-@@ -1951,7 +1955,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway).
- */
-- iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
-+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
-
- skb_queue_walk(&reclaimed_skbs, skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-@@ -2293,24 +2297,10 @@ free_rsp:
- return ret;
- }
-
--int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
-+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask)
- {
-- u32 sta_id, tfd_queue_msk;
--
-- if (internal) {
-- struct iwl_mvm_int_sta *int_sta = sta;
--
-- sta_id = int_sta->sta_id;
-- tfd_queue_msk = int_sta->tfd_queue_msk;
-- } else {
-- struct iwl_mvm_sta *mvm_sta = sta;
--
-- sta_id = mvm_sta->deflink.sta_id;
-- tfd_queue_msk = mvm_sta->tfd_queue_msk;
-- }
--
- if (iwl_mvm_has_new_tx_api(mvm))
- return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
-
-- return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk);
-+ return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask);
- }
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
-index fa46dad5fd680..2ecf6db95fb31 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
-@@ -161,6 +161,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
- if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
- IWL_DEBUG_INFO(trans,
- "DEVICE_ENABLED bit was set and is now cleared\n");
-+ iwl_pcie_synchronize_irqs(trans);
- iwl_pcie_rx_napi_sync(trans);
- iwl_txq_gen2_tx_free(trans);
- iwl_pcie_rx_stop(trans);
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-index 198933f853c55..583d1011963ec 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-@@ -1263,6 +1263,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
- if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
- IWL_DEBUG_INFO(trans,
- "DEVICE_ENABLED bit was set and is now cleared\n");
-+ iwl_pcie_synchronize_irqs(trans);
- iwl_pcie_rx_napi_sync(trans);
- iwl_pcie_tx_stop(trans);
- iwl_pcie_rx_stop(trans);
-diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
-index 340240b8954f6..ca74b1b63cac1 100644
---- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
-+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
-@@ -1575,7 +1575,7 @@ void iwl_txq_progress(struct iwl_txq *txq)
-
- /* Frees buffers until index _not_ inclusive */
- void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
-- struct sk_buff_head *skbs)
-+ struct sk_buff_head *skbs, bool is_flush)
- {
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- int tfd_num, read_ptr, last_to_free;
-@@ -1650,9 +1650,11 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- if (iwl_txq_space(trans, txq) > txq->low_mark &&
- test_bit(txq_id, trans->txqs.queue_stopped)) {
- struct sk_buff_head overflow_skbs;
-+ struct sk_buff *skb;
-
- __skb_queue_head_init(&overflow_skbs);
-- skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
-+ skb_queue_splice_init(&txq->overflow_q,
-+ is_flush ? skbs : &overflow_skbs);
-
- /*
- * We are going to transmit from the overflow queue.
-@@ -1672,8 +1674,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- */
- spin_unlock_bh(&txq->lock);
-
-- while (!skb_queue_empty(&overflow_skbs)) {
-- struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
-+ while ((skb = __skb_dequeue(&overflow_skbs))) {
- struct iwl_device_tx_cmd *dev_cmd_ptr;
-
- dev_cmd_ptr = *(void **)((u8 *)skb->cb +
-diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
-index b7d3808588bfb..4c09bc1930fa1 100644
---- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
-+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
-@@ -179,7 +179,7 @@ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs);
- void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
-- struct sk_buff_head *skbs);
-+ struct sk_buff_head *skbs, bool is_flush);
- void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
- void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
- bool freeze);
-diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
-index dc8f4e157eb29..6ca7b494c2c26 100644
---- a/drivers/net/wireless/mediatek/mt76/dma.c
-+++ b/drivers/net/wireless/mediatek/mt76/dma.c
-@@ -330,9 +330,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
- if (e->txwi == DMA_DUMMY_DATA)
- e->txwi = NULL;
-
-- if (e->skb == DMA_DUMMY_DATA)
-- e->skb = NULL;
--
- *prev_e = *e;
- memset(e, 0, sizeof(*e));
- }
-diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
-index d158320bc15db..dbab400969202 100644
---- a/drivers/net/wireless/mediatek/mt76/mac80211.c
-+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
-@@ -1697,11 +1697,16 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- }
- EXPORT_SYMBOL_GPL(mt76_init_queue);
-
--u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
-+u16 mt76_calculate_default_rate(struct mt76_phy *phy,
-+ struct ieee80211_vif *vif, int rateidx)
- {
-+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
-+ struct cfg80211_chan_def *chandef = mvif->ctx ?
-+ &mvif->ctx->def :
-+ &phy->chandef;
- int offset = 0;
-
-- if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
-+ if (chandef->chan->band != NL80211_BAND_2GHZ)
- offset = 4;
-
- /* pick the lowest rate for hidden nodes */
-diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
-index e8757865a3d06..dae5410d67e83 100644
---- a/drivers/net/wireless/mediatek/mt76/mt76.h
-+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
-@@ -709,6 +709,7 @@ struct mt76_vif {
- u8 basic_rates_idx;
- u8 mcast_rates_idx;
- u8 beacon_rates_idx;
-+ struct ieee80211_chanctx_conf *ctx;
- };
-
- struct mt76_phy {
-@@ -1100,7 +1101,8 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
- struct mt76_queue *
- mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- int ring_base, u32 flags);
--u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
-+u16 mt76_calculate_default_rate(struct mt76_phy *phy,
-+ struct ieee80211_vif *vif, int rateidx);
- static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
- int n_desc, int ring_base, u32 flags)
- {
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
-index 888678732f290..c223f7c19e6da 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
-@@ -9,6 +9,23 @@ struct beacon_bc_data {
- int count[MT7603_MAX_INTERFACES];
- };
-
-+static void
-+mt7603_mac_stuck_beacon_recovery(struct mt7603_dev *dev)
-+{
-+ if (dev->beacon_check % 5 != 4)
-+ return;
-+
-+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
-+ mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
-+ mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
-+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
-+
-+ mt76_set(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
-+ mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
-+ mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE);
-+ mt76_clear(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS);
-+}
-+
- static void
- mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
- {
-@@ -16,6 +33,8 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
- struct mt76_dev *mdev = &dev->mt76;
- struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
- struct sk_buff *skb = NULL;
-+ u32 om_idx = mvif->idx;
-+ u32 val;
-
- if (!(mdev->beacon_mask & BIT(mvif->idx)))
- return;
-@@ -24,20 +43,33 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
- if (!skb)
- return;
-
-- mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
-- MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
-+ if (om_idx)
-+ om_idx |= 0x10;
-+ val = MT_DMA_FQCR0_BUSY | MT_DMA_FQCR0_MODE |
-+ FIELD_PREP(MT_DMA_FQCR0_TARGET_BSS, om_idx) |
-+ FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
-+ FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8);
-
- spin_lock_bh(&dev->ps_lock);
-- mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
-- FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
-- FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
-- dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
-- FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
-- FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
-
-- if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000))
-+ mt76_wr(dev, MT_DMA_FQCR0, val |
-+ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BCN));
-+ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
- dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
-+ goto out;
-+ }
-+
-+ mt76_wr(dev, MT_DMA_FQCR0, val |
-+ FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BMC));
-+ if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) {
-+ dev->beacon_check = MT7603_WATCHDOG_TIMEOUT;
-+ goto out;
-+ }
-
-+ mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON],
-+ MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
-+
-+out:
- spin_unlock_bh(&dev->ps_lock);
- }
-
-@@ -81,6 +113,18 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
- data.dev = dev;
- __skb_queue_head_init(&data.q);
-
-+ /* Flush all previous CAB queue packets and beacons */
-+ mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
-+
-+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
-+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
-+
-+ if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > 0)
-+ dev->beacon_check++;
-+ else
-+ dev->beacon_check = 0;
-+ mt7603_mac_stuck_beacon_recovery(dev);
-+
- q = dev->mphy.q_tx[MT_TXQ_BEACON];
- spin_lock(&q->lock);
- ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
-@@ -89,14 +133,9 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
- mt76_queue_kick(dev, q);
- spin_unlock(&q->lock);
-
-- /* Flush all previous CAB queue packets */
-- mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
--
-- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
--
- mt76_csa_check(mdev);
- if (mdev->csa_complete)
-- goto out;
-+ return;
-
- q = dev->mphy.q_tx[MT_TXQ_CAB];
- do {
-@@ -108,7 +147,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
- skb_queue_len(&data.q) < 8);
-
- if (skb_queue_empty(&data.q))
-- goto out;
-+ return;
-
- for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
- if (!data.tail[i])
-@@ -136,11 +175,6 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
- MT_WF_ARB_CAB_START_BSSn(0) |
- (MT_WF_ARB_CAB_START_BSS0n(1) *
- ((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
--
--out:
-- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
-- if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
-- dev->beacon_check++;
- }
-
- void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
-index 60a996b63c0c0..915b8349146af 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
-@@ -42,11 +42,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
- }
-
- if (intr & MT_INT_RX_DONE(0)) {
-+ dev->rx_pse_check = 0;
- mt7603_irq_disable(dev, MT_INT_RX_DONE(0));
- napi_schedule(&dev->mt76.napi[0]);
- }
-
- if (intr & MT_INT_RX_DONE(1)) {
-+ dev->rx_pse_check = 0;
- mt7603_irq_disable(dev, MT_INT_RX_DONE(1));
- napi_schedule(&dev->mt76.napi[1]);
- }
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
-index 99ae080502d80..cf21d06257e53 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
-@@ -1441,15 +1441,6 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
-
- mt7603_beacon_set_timer(dev, -1, 0);
-
-- if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
-- dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
-- dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
-- dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
-- mt7603_pse_reset(dev);
--
-- if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
-- goto skip_dma_reset;
--
- mt7603_mac_stop(dev);
-
- mt76_clear(dev, MT_WPDMA_GLO_CFG,
-@@ -1459,28 +1450,32 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
-
- mt7603_irq_disable(dev, mask);
-
-- mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
--
- mt7603_pse_client_reset(dev);
-
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
- for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
-
-+ mt7603_dma_sched_reset(dev);
-+
-+ mt76_tx_status_check(&dev->mt76, true);
-+
- mt76_for_each_q_rx(&dev->mt76, i) {
- mt76_queue_rx_reset(dev, i);
- }
-
-- mt76_tx_status_check(&dev->mt76, true);
-+ if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
-+ dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY)
-+ mt7603_pse_reset(dev);
-
-- mt7603_dma_sched_reset(dev);
-+ if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
-+ mt7603_mac_dma_start(dev);
-
-- mt7603_mac_dma_start(dev);
-+ mt7603_irq_enable(dev, mask);
-
-- mt7603_irq_enable(dev, mask);
-+ clear_bit(MT76_RESET, &dev->mphy.state);
-+ }
-
--skip_dma_reset:
-- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_worker_enable(&dev->mt76.tx_worker);
-@@ -1570,20 +1565,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
- {
- u32 addr, val;
-
-- if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
-- return true;
--
- if (mt7603_rx_fifo_busy(dev))
-- return false;
-+ goto out;
-
- addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
- mt76_wr(dev, addr, 3);
- val = mt76_rr(dev, addr) >> 16;
-
-- if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
-- return true;
-+ if (!(val & BIT(0)))
-+ return false;
-
-- return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
-+ if (is_mt7628(dev))
-+ val &= 0xa000;
-+ else
-+ val &= 0x8000;
-+ if (!val)
-+ return false;
-+
-+out:
-+ if (mt76_rr(dev, MT_INT_SOURCE_CSR) &
-+ (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1)))
-+ return false;
-+
-+ return true;
- }
-
- static bool
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
-index a39c9a0fcb1cb..524bceb8e9581 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
-+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
-@@ -469,6 +469,11 @@ enum {
- #define MT_WF_SEC_BASE 0x21a00
- #define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs))
-
-+#define MT_WF_CFG_OFF_BASE 0x21e00
-+#define MT_WF_CFG_OFF(ofs) (MT_WF_CFG_OFF_BASE + (ofs))
-+#define MT_WF_CFG_OFF_WOCCR MT_WF_CFG_OFF(0x004)
-+#define MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS BIT(4)
-+
- #define MT_SEC_SCR MT_WF_SEC(0x004)
- #define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0)
-
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
-index 8d745c9730c72..955974a82180f 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
-@@ -2147,7 +2147,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
- };
-
- if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
-- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
-+ phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
- req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
-index 0019890fdb784..fbb1181c58ff3 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
-@@ -106,7 +106,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- else
- mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
-
-- tx_info->skb = DMA_DUMMY_DATA;
-+ tx_info->skb = NULL;
-
- return 0;
- }
-diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
-index 68ca0844cbbfa..87bfa441a9374 100644
---- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
-+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
-@@ -257,6 +257,8 @@ enum tx_mgnt_type {
- #define MT_TXD7_UDP_TCP_SUM BIT(15)
- #define MT_TXD7_TX_TIME GENMASK(9, 0)
-
-+#define MT_TXD9_WLAN_IDX GENMASK(23, 8)
-+
- #define MT_TX_RATE_STBC BIT(14)
- #define MT_TX_RATE_NSS GENMASK(13, 10)
- #define MT_TX_RATE_MODE GENMASK(9, 6)
-diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
-index ee5177fd6ddea..87479c6c2b505 100644
---- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
-@@ -151,23 +151,6 @@ void mt76_connac_tx_complete_skb(struct mt76_dev *mdev,
- return;
- }
-
-- /* error path */
-- if (e->skb == DMA_DUMMY_DATA) {
-- struct mt76_connac_txp_common *txp;
-- struct mt76_txwi_cache *t;
-- u16 token;
--
-- txp = mt76_connac_txwi_to_txp(mdev, e->txwi);
-- if (is_mt76_fw_txp(mdev))
-- token = le16_to_cpu(txp->fw.token);
-- else
-- token = le16_to_cpu(txp->hw.msdu_id[0]) &
-- ~MT_MSDU_ID_VALID;
--
-- t = mt76_token_put(mdev, token);
-- e->skb = t ? t->skb : NULL;
-- }
--
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
- }
-@@ -310,7 +293,10 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
- struct ieee80211_vif *vif,
- bool beacon, bool mcast)
- {
-- u8 nss = 0, mode = 0, band = mphy->chandef.chan->band;
-+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
-+ struct cfg80211_chan_def *chandef = mvif->ctx ?
-+ &mvif->ctx->def : &mphy->chandef;
-+ u8 nss = 0, mode = 0, band = chandef->chan->band;
- int rateidx = 0, mcast_rate;
-
- if (!vif)
-@@ -343,7 +329,7 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
-
- legacy:
-- rateidx = mt76_calculate_default_rate(mphy, rateidx);
-+ rateidx = mt76_calculate_default_rate(mphy, vif, rateidx);
- mode = rateidx >> 8;
- rateidx &= GENMASK(7, 0);
- out:
-diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
-index 0f0a519f956f8..8274a57e1f0fb 100644
---- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
-@@ -829,7 +829,9 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
- struct ieee80211_vif *vif,
- u8 rcpi, u8 sta_state)
- {
-- struct cfg80211_chan_def *chandef = &mphy->chandef;
-+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
-+ struct cfg80211_chan_def *chandef = mvif->ctx ?
-+ &mvif->ctx->def : &mphy->chandef;
- enum nl80211_band band = chandef->chan->band;
- struct mt76_dev *dev = mphy->dev;
- struct sta_rec_ra_info *ra_info;
-@@ -1369,7 +1371,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
- const struct ieee80211_sta_he_cap *
- mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
- {
-- enum nl80211_band band = phy->chandef.chan->band;
-+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
-+ struct cfg80211_chan_def *chandef = mvif->ctx ?
-+ &mvif->ctx->def : &phy->chandef;
-+ enum nl80211_band band = chandef->chan->band;
- struct ieee80211_supported_band *sband;
-
- sband = phy->hw->wiphy->bands[band];
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
-index b8b0c0fda7522..2222fb9aa103e 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
-@@ -809,7 +809,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
- else
- txp->rept_wds_wcid = cpu_to_le16(0x3ff);
-- tx_info->skb = DMA_DUMMY_DATA;
-+ tx_info->skb = NULL;
-
- /* pass partial skb header to fw */
- tx_info->buf[1].len = MT_CT_PARSE_LEN;
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
-index 8ebbf186fab23..d85105a43d704 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
-@@ -646,11 +646,13 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
- mt7915_update_bss_color(hw, vif, &info->he_bss_color);
-
- if (changed & (BSS_CHANGED_BEACON |
-- BSS_CHANGED_BEACON_ENABLED |
-- BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
-- BSS_CHANGED_FILS_DISCOVERY))
-+ BSS_CHANGED_BEACON_ENABLED))
- mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
-
-+ if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
-+ BSS_CHANGED_FILS_DISCOVERY))
-+ mt7915_mcu_add_inband_discov(dev, vif, changed);
-+
- if (set_bss_info == 0)
- mt7915_mcu_add_bss_info(phy, vif, false);
- if (set_sta == 0)
-@@ -1386,7 +1388,7 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
- if (sset != ETH_SS_STATS)
- return;
-
-- memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
-+ memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
- data += sizeof(mt7915_gstrings_stats);
- page_pool_ethtool_stats_get_strings(data);
- }
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
-index 50ae7bf3af91c..5d8e985cd7d45 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
-@@ -1015,13 +1015,13 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool bfee)
- {
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
-+ int sts = hweight16(phy->mt76->chainmask);
-
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
- return false;
-
-- if (!bfee && tx_ant < 2)
-+ if (!bfee && sts < 2)
- return false;
-
- if (sta->deflink.he_cap.has_he) {
-@@ -1882,10 +1882,9 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
- }
-
--static void
--mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
-- struct sk_buff *rskb, struct bss_info_bcn *bcn,
-- u32 changed)
-+int
-+mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
-+ u32 changed)
- {
- #define OFFLOAD_TX_MODE_SU BIT(0)
- #define OFFLOAD_TX_MODE_MU BIT(1)
-@@ -1895,14 +1894,27 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
- enum nl80211_band band = chandef->chan->band;
- struct mt76_wcid *wcid = &dev->mt76.global_wcid;
-+ struct bss_info_bcn *bcn;
- struct bss_info_inband_discovery *discov;
- struct ieee80211_tx_info *info;
-- struct sk_buff *skb = NULL;
-- struct tlv *tlv;
-+ struct sk_buff *rskb, *skb = NULL;
-+ struct tlv *tlv, *sub_tlv;
- bool ext_phy = phy != &dev->phy;
- u8 *buf, interval;
- int len;
-
-+ if (vif->bss_conf.nontransmitted)
-+ return 0;
-+
-+ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
-+ MT7915_MAX_BSS_OFFLOAD_SIZE);
-+ if (IS_ERR(rskb))
-+ return PTR_ERR(rskb);
-+
-+ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
-+ bcn = (struct bss_info_bcn *)tlv;
-+ bcn->enable = true;
-+
- if (changed & BSS_CHANGED_FILS_DISCOVERY &&
- vif->bss_conf.fils_discovery.max_interval) {
- interval = vif->bss_conf.fils_discovery.max_interval;
-@@ -1913,27 +1925,29 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
- skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
- }
-
-- if (!skb)
-- return;
-+ if (!skb) {
-+ dev_kfree_skb(rskb);
-+ return -EINVAL;
-+ }
-
- info = IEEE80211_SKB_CB(skb);
- info->control.vif = vif;
- info->band = band;
--
- info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
-
- len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
- len = (len & 0x3) ? ((len | 0x3) + 1) : len;
-
-- if (len > (MT7915_MAX_BSS_OFFLOAD_SIZE - rskb->len)) {
-+ if (skb->len > MT7915_MAX_BEACON_SIZE) {
- dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
-+ dev_kfree_skb(rskb);
- dev_kfree_skb(skb);
-- return;
-+ return -EINVAL;
- }
-
-- tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
-- len, &bcn->sub_ntlv, &bcn->len);
-- discov = (struct bss_info_inband_discovery *)tlv;
-+ sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
-+ len, &bcn->sub_ntlv, &bcn->len);
-+ discov = (struct bss_info_inband_discovery *)sub_tlv;
- discov->tx_mode = OFFLOAD_TX_MODE_SU;
- /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
- discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
-@@ -1941,13 +1955,16 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi
- discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
- discov->enable = true;
-
-- buf = (u8 *)tlv + sizeof(*discov);
-+ buf = (u8 *)sub_tlv + sizeof(*discov);
-
- mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
- 0, changed);
- memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
-
- dev_kfree_skb(skb);
-+
-+ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
-+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
- }
-
- int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-@@ -1980,11 +1997,14 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- goto out;
-
- skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
-- if (!skb)
-+ if (!skb) {
-+ dev_kfree_skb(rskb);
- return -EINVAL;
-+ }
-
-- if (skb->len > MT7915_MAX_BEACON_SIZE - MT_TXD_SIZE) {
-+ if (skb->len > MT7915_MAX_BEACON_SIZE) {
- dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
-+ dev_kfree_skb(rskb);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-@@ -1997,11 +2017,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
- dev_kfree_skb(skb);
-
-- if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
-- changed & BSS_CHANGED_FILS_DISCOVERY)
-- mt7915_mcu_beacon_inband_discov(dev, vif, rskb,
-- bcn, changed);
--
- out:
- return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
- MCU_EXT_CMD(BSS_INFO_UPDATE), true);
-@@ -2725,10 +2740,10 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
- if (mt76_connac_spe_idx(phy->mt76->antenna_mask))
- req.tx_path_num = fls(phy->mt76->antenna_mask);
-
-- if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
-- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
-+ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
- req.switch_reason = CH_SWITCH_NORMAL;
-- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
-+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
-+ phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
- req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
- NL80211_IFTYPE_AP))
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
-index b9ea297f382c3..1592b5d6751a0 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
-+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
-@@ -495,10 +495,14 @@ enum {
- SER_RECOVER
- };
-
--#define MT7915_MAX_BEACON_SIZE 512
--#define MT7915_MAX_INBAND_FRAME_SIZE 256
--#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
-- MT7915_MAX_INBAND_FRAME_SIZE + \
-+#define MT7915_MAX_BEACON_SIZE 1308
-+#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
-+ sizeof(struct bss_info_bcn) + \
-+ sizeof(struct bss_info_bcn_cntdwn) + \
-+ sizeof(struct bss_info_bcn_mbss) + \
-+ MT_TXD_SIZE + \
-+ sizeof(struct bss_info_bcn_cont))
-+#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
- MT7915_BEACON_UPDATE_SIZE)
-
- #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
-@@ -511,12 +515,6 @@ enum {
- sizeof(struct bss_info_bmc_rate) +\
- sizeof(struct bss_info_ext_bss))
-
--#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
-- sizeof(struct bss_info_bcn_cntdwn) + \
-- sizeof(struct bss_info_bcn_mbss) + \
-- sizeof(struct bss_info_bcn_cont) + \
-- sizeof(struct bss_info_inband_discovery))
--
- static inline s8
- mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
- {
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
-index 0456e56f63480..21984e9723709 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
-+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
-@@ -447,6 +447,8 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
- bool add);
- int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct cfg80211_he_bss_color *he_bss_color);
-+int mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
-+ u32 changed);
- int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int enable, u32 changed);
- int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
-index 0844d28b3223d..d8851cb5f400b 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
-@@ -756,7 +756,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
-
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
-- true, mvif->ctx);
-+ true, mvif->mt76.ctx);
-
- ewma_avg_signal_init(&msta->avg_ack_signal);
-
-@@ -791,7 +791,7 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- if (!sta->tdls)
- mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
- &mvif->sta.wcid, false,
-- mvif->ctx);
-+ mvif->mt76.ctx);
- }
-
- spin_lock_bh(&dev->mt76.sta_poll_lock);
-@@ -1208,7 +1208,7 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- mt792x_mutex_acquire(dev);
-
- err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
-- true, mvif->ctx);
-+ true, mvif->mt76.ctx);
- if (err)
- goto out;
-
-@@ -1240,7 +1240,7 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- goto out;
-
- mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false,
-- mvif->ctx);
-+ mvif->mt76.ctx);
-
- out:
- mt792x_mutex_release(dev);
-@@ -1265,7 +1265,7 @@ static void mt7921_ctx_iter(void *priv, u8 *mac,
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct ieee80211_chanctx_conf *ctx = priv;
-
-- if (ctx != mvif->ctx)
-+ if (ctx != mvif->mt76.ctx)
- return;
-
- if (vif->type == NL80211_IFTYPE_MONITOR)
-@@ -1298,7 +1298,7 @@ static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw,
- jiffies_to_msecs(HZ);
-
- mt792x_mutex_acquire(dev);
-- mt7921_set_roc(mvif->phy, mvif, mvif->ctx->def.chan, duration,
-+ mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration,
- MT7921_ROC_REQ_JOIN);
- mt792x_mutex_release(dev);
- }
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
-index 3dda84a937175..f04e7095e1810 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
-@@ -17,6 +17,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
- .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
- .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
-+ { PCI_DEVICE(PCI_VENDOR_ID_ITTIM, 0x7922),
-+ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
- .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
-index e7a995e7e70a3..c866144ff0613 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
-@@ -48,7 +48,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- memset(txp, 0, sizeof(struct mt76_connac_hw_txp));
- mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
-
-- tx_info->skb = DMA_DUMMY_DATA;
-+ tx_info->skb = NULL;
-
- return 0;
- }
-diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
-index 5d5ab8630041b..6c347495e1185 100644
---- a/drivers/net/wireless/mediatek/mt76/mt792x.h
-+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
-@@ -91,7 +91,6 @@ struct mt792x_vif {
- struct ewma_rssi rssi;
-
- struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
-- struct ieee80211_chanctx_conf *ctx;
- };
-
- struct mt792x_phy {
-diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
-index 46be7f996c7e1..f111c47fdca56 100644
---- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
-@@ -243,7 +243,7 @@ int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw,
- struct mt792x_dev *dev = mt792x_hw_dev(hw);
-
- mutex_lock(&dev->mt76.mutex);
-- mvif->ctx = ctx;
-+ mvif->mt76.ctx = ctx;
- mutex_unlock(&dev->mt76.mutex);
-
- return 0;
-@@ -259,7 +259,7 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw,
- struct mt792x_dev *dev = mt792x_hw_dev(hw);
-
- mutex_lock(&dev->mt76.mutex);
-- mvif->ctx = NULL;
-+ mvif->mt76.ctx = NULL;
- mutex_unlock(&dev->mt76.mutex);
- }
- EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx);
-@@ -358,7 +358,7 @@ void mt792x_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- if (sset != ETH_SS_STATS)
- return;
-
-- memcpy(data, *mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats));
-+ memcpy(data, mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats));
-
- data += sizeof(mt792x_gstrings_stats);
- page_pool_ethtool_stats_get_strings(data);
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
-index 26e03b28935f2..66d8cc0eeabee 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
-@@ -733,16 +733,17 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
- IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
- IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
-
-+ val = max_t(u8, sts - 1, 3);
- eht_cap_elem->phy_cap_info[0] |=
-- u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
-+ u8_encode_bits(u8_get_bits(val, BIT(0)),
- IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
-
- eht_cap_elem->phy_cap_info[1] =
-- u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
-+ u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
-- u8_encode_bits(sts - 1,
-+ u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
-- u8_encode_bits(sts - 1,
-+ u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
-
- eht_cap_elem->phy_cap_info[2] =
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
-index ac8759febe485..c43839a205088 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
-@@ -433,7 +433,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
-+ /* rxv reports bw 320-1 and 320-2 separately */
- case IEEE80211_STA_RX_BW_320:
-+ case IEEE80211_STA_RX_BW_320 + 1:
- status->bw = RATE_INFO_BW_320;
- break;
- default:
-@@ -991,11 +993,9 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- }
-
- txp->fw.token = cpu_to_le16(id);
-- if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
-- txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
-- else
-- txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
-- tx_info->skb = DMA_DUMMY_DATA;
-+ txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
-+
-+ tx_info->skb = NULL;
-
- /* pass partial skb header to fw */
- tx_info->buf[1].len = MT_CT_PARSE_LEN;
-@@ -1051,7 +1051,7 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7996_tx_check_aggr(sta, txwi);
- } else {
-- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
-+ wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
- }
-
- __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
-index c3a479dc3f533..620880e560e00 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
-@@ -190,7 +190,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
- mvif->mt76.omac_idx = idx;
- mvif->phy = phy;
- mvif->mt76.band_idx = band_idx;
-- mvif->mt76.wmm_idx = band_idx;
-+ mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
-
- ret = mt7996_mcu_add_dev_info(phy, vif, true);
- if (ret)
-@@ -414,10 +414,16 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- const struct ieee80211_tx_queue_params *params)
- {
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
-+ const u8 mq_to_aci[] = {
-+ [IEEE80211_AC_VO] = 3,
-+ [IEEE80211_AC_VI] = 2,
-+ [IEEE80211_AC_BE] = 0,
-+ [IEEE80211_AC_BK] = 1,
-+ };
-
-+ /* firmware uses access class index */
-+ mvif->queue_params[mq_to_aci[queue]] = *params;
- /* no need to update right away, we'll get BSS_CHANGED_QOS */
-- queue = mt76_connac_lmac_mapping(queue);
-- mvif->queue_params[queue] = *params;
-
- return 0;
- }
-@@ -618,8 +624,8 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
- mt7996_mcu_add_beacon(hw, vif, info->enable_beacon);
- }
-
-- if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
-- changed & BSS_CHANGED_FILS_DISCOVERY)
-+ if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
-+ BSS_CHANGED_FILS_DISCOVERY))
- mt7996_mcu_beacon_inband_discov(dev, vif, changed);
-
- if (changed & BSS_CHANGED_MU_GROUPS)
-@@ -1192,7 +1198,7 @@ void mt7996_get_et_strings(struct ieee80211_hw *hw,
- u32 sset, u8 *data)
- {
- if (sset == ETH_SS_STATS)
-- memcpy(data, *mt7996_gstrings_stats,
-+ memcpy(data, mt7996_gstrings_stats,
- sizeof(mt7996_gstrings_stats));
- }
-
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
-index 4a30db49ef33f..7575d3506ea4e 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
-+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
-@@ -2016,7 +2016,7 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- bcn->bcc_ie_pos = cpu_to_le16(offset - 3);
- }
-
-- buf = (u8 *)bcn + sizeof(*bcn) - MAX_BEACON_SIZE;
-+ buf = (u8 *)bcn + sizeof(*bcn);
- mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0,
- BSS_CHANGED_BEACON);
-
-@@ -2034,26 +2034,22 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
- struct sk_buff *skb, *rskb;
- struct tlv *tlv;
- struct bss_bcn_content_tlv *bcn;
-+ int len;
-
- rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
-- MT7996_BEACON_UPDATE_SIZE);
-+ MT7996_MAX_BSS_OFFLOAD_SIZE);
- if (IS_ERR(rskb))
- return PTR_ERR(rskb);
-
-- tlv = mt7996_mcu_add_uni_tlv(rskb,
-- UNI_BSS_INFO_BCN_CONTENT, sizeof(*bcn));
-- bcn = (struct bss_bcn_content_tlv *)tlv;
-- bcn->enable = en;
--
-- if (!en)
-- goto out;
--
- skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
-- if (!skb)
-+ if (!skb) {
-+ dev_kfree_skb(rskb);
- return -EINVAL;
-+ }
-
-- if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) {
-+ if (skb->len > MT7996_MAX_BEACON_SIZE) {
- dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
-+ dev_kfree_skb(rskb);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-@@ -2061,11 +2057,18 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
- info = IEEE80211_SKB_CB(skb);
- info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
-
-+ len = sizeof(*bcn) + MT_TXD_SIZE + skb->len;
-+ tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
-+ bcn = (struct bss_bcn_content_tlv *)tlv;
-+ bcn->enable = en;
-+ if (!en)
-+ goto out;
-+
- mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
- /* TODO: subtag - 11v MBSSID */
- mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs);
-- dev_kfree_skb(skb);
- out:
-+ dev_kfree_skb(skb);
- return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
- MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
- }
-@@ -2086,9 +2089,13 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
- struct sk_buff *rskb, *skb = NULL;
- struct tlv *tlv;
- u8 *buf, interval;
-+ int len;
-+
-+ if (vif->bss_conf.nontransmitted)
-+ return 0;
-
- rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
-- MT7996_INBAND_FRAME_SIZE);
-+ MT7996_MAX_BSS_OFFLOAD_SIZE);
- if (IS_ERR(rskb))
- return PTR_ERR(rskb);
-
-@@ -2102,11 +2109,14 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
- skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
- }
-
-- if (!skb)
-+ if (!skb) {
-+ dev_kfree_skb(rskb);
- return -EINVAL;
-+ }
-
-- if (skb->len > MAX_INBAND_FRAME_SIZE - MT_TXD_SIZE) {
-+ if (skb->len > MT7996_MAX_BEACON_SIZE) {
- dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
-+ dev_kfree_skb(rskb);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-@@ -2116,7 +2126,9 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
- info->band = band;
- info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
-
-- tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, sizeof(*discov));
-+ len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
-+
-+ tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
-
- discov = (struct bss_inband_discovery_tlv *)tlv;
- discov->tx_mode = OFFLOAD_TX_MODE_SU;
-@@ -2127,7 +2139,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
- discov->enable = true;
- discov->wcid = cpu_to_le16(MT7996_WTBL_RESERVED);
-
-- buf = (u8 *)tlv + sizeof(*discov) - MAX_INBAND_FRAME_SIZE;
-+ buf = (u8 *)tlv + sizeof(*discov);
-
- mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, changed);
-
-@@ -2679,7 +2691,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif)
-
- e = (struct edca *)tlv;
- e->set = WMM_PARAM_SET;
-- e->queue = ac + mvif->mt76.wmm_idx * MT7996_MAX_WMM_SETS;
-+ e->queue = ac;
- e->aifs = q->aifs;
- e->txop = cpu_to_le16(q->txop);
-
-@@ -2960,10 +2972,10 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
- .channel_band = ch_band[chandef->chan->band],
- };
-
-- if (tag == UNI_CHANNEL_RX_PATH ||
-- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
-+ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
- req.switch_reason = CH_SWITCH_NORMAL;
-- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
-+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
-+ phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
- req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
- NL80211_IFTYPE_AP))
-@@ -3307,8 +3319,8 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
-
- tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
- req_mod_en = (struct bf_mod_en_ctrl *)tlv;
-- req_mod_en->bf_num = 2;
-- req_mod_en->bf_bitmap = GENMASK(0, 0);
-+ req_mod_en->bf_num = 3;
-+ req_mod_en->bf_bitmap = GENMASK(2, 0);
- break;
- }
- default:
-@@ -3548,7 +3560,9 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
- int cmd)
- {
- struct {
-- u8 _rsv[4];
-+ /* fixed field */
-+ u8 bss;
-+ u8 _rsv[3];
-
- __le16 tag;
- __le16 len;
-@@ -3566,7 +3580,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
- u8 exponent;
- u8 is_ap;
- u8 agrt_params;
-- u8 __rsv2[135];
-+ u8 __rsv2[23];
- } __packed req = {
- .tag = cpu_to_le16(UNI_CMD_TWT_ARGT_UPDATE),
- .len = cpu_to_le16(sizeof(req) - 4),
-@@ -3576,6 +3590,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
- .flowid = flow->id,
- .peer_id = cpu_to_le16(flow->wcid),
- .duration = flow->duration,
-+ .bss = mvif->mt76.idx,
- .bss_idx = mvif->mt76.idx,
- .start_tsf = cpu_to_le64(flow->tsf),
- .mantissa = flow->mantissa,
-diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
-index 078f828586212..e4b31228ba0d2 100644
---- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
-+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
-@@ -270,8 +270,6 @@ struct bss_inband_discovery_tlv {
- u8 enable;
- __le16 wcid;
- __le16 prob_rsp_len;
--#define MAX_INBAND_FRAME_SIZE 512
-- u8 pkt[MAX_INBAND_FRAME_SIZE];
- } __packed;
-
- struct bss_bcn_content_tlv {
-@@ -283,8 +281,6 @@ struct bss_bcn_content_tlv {
- u8 enable;
- u8 type;
- __le16 pkt_len;
--#define MAX_BEACON_SIZE 512
-- u8 pkt[MAX_BEACON_SIZE];
- } __packed;
-
- struct bss_bcn_cntdwn_tlv {
-@@ -591,13 +587,14 @@ enum {
- sizeof(struct sta_rec_hdr_trans) + \
- sizeof(struct tlv))
-
-+#define MT7996_MAX_BEACON_SIZE 1342
- #define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
- sizeof(struct bss_bcn_content_tlv) + \
-+ MT_TXD_SIZE + \
- sizeof(struct bss_bcn_cntdwn_tlv) + \
- sizeof(struct bss_bcn_mbss_tlv))
--
--#define MT7996_INBAND_FRAME_SIZE (sizeof(struct bss_req_hdr) + \
-- sizeof(struct bss_inband_discovery_tlv))
-+#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
-+ MT7996_BEACON_UPDATE_SIZE)
-
- enum {
- UNI_BAND_CONFIG_RADIO_ENABLE,
-diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
-index 58bbf50081e47..9eb115c79c90a 100644
---- a/drivers/net/wireless/microchip/wilc1000/wlan.c
-+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
-@@ -1492,7 +1492,7 @@ int wilc_wlan_init(struct net_device *dev)
- }
-
- if (!wilc->vmm_table)
-- wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
-+ wilc->vmm_table = kcalloc(WILC_VMM_TBL_SIZE, sizeof(u32), GFP_KERNEL);
-
- if (!wilc->vmm_table) {
- ret = -ENOBUFS;
-diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
-index 94ee831b5de35..506d2f31efb5a 100644
---- a/drivers/net/wireless/purelifi/plfxlc/mac.c
-+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
-@@ -666,7 +666,7 @@ static void plfxlc_get_et_strings(struct ieee80211_hw *hw,
- u32 sset, u8 *data)
- {
- if (sset == ETH_SS_STATS)
-- memcpy(data, *et_strings, sizeof(et_strings));
-+ memcpy(data, et_strings, sizeof(et_strings));
- }
-
- static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
-diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
-index 6f61d6a106272..5a34894a533be 100644
---- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
-@@ -799,7 +799,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
- }
-
- if (rtlpriv->btcoexist.bt_edca_dl != 0) {
-- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
-+ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
- bt_change_edca = true;
- }
-
-diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
-index 0b6a15c2e5ccd..d92aad60edfe9 100644
---- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
-@@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
- }
-
- if (rtlpriv->btcoexist.bt_edca_dl != 0) {
-- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
-+ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
- bt_change_edca = true;
- }
-
-diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
-index 8ada31380efa4..0ff8e355c23a4 100644
---- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
-@@ -466,7 +466,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw)
- }
-
- if (rtlpriv->btcoexist.bt_edca_dl != 0) {
-- edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
-+ edca_be_dl = rtlpriv->btcoexist.bt_edca_dl;
- bt_change_edca = true;
- }
-
-diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
-index f8ba133baff06..35bc37a3c469d 100644
---- a/drivers/net/wireless/realtek/rtw88/debug.c
-+++ b/drivers/net/wireless/realtek/rtw88/debug.c
-@@ -1233,9 +1233,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = {
- #define rtw_debugfs_add_core(name, mode, fopname, parent) \
- do { \
- rtw_debug_priv_ ##name.rtwdev = rtwdev; \
-- if (!debugfs_create_file(#name, mode, \
-+ if (IS_ERR(debugfs_create_file(#name, mode, \
- parent, &rtw_debug_priv_ ##name,\
-- &file_ops_ ##fopname)) \
-+ &file_ops_ ##fopname))) \
- pr_debug("Unable to initialize debugfs:%s\n", \
- #name); \
- } while (0)
-diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
-index d879d7e3dc81f..e6ab1ac6d7093 100644
---- a/drivers/net/wireless/realtek/rtw88/usb.c
-+++ b/drivers/net/wireless/realtek/rtw88/usb.c
-@@ -611,8 +611,7 @@ static void rtw_usb_cancel_rx_bufs(struct rtw_usb *rtwusb)
-
- for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
- rxcb = &rtwusb->rx_cb[i];
-- if (rxcb->rx_urb)
-- usb_kill_urb(rxcb->rx_urb);
-+ usb_kill_urb(rxcb->rx_urb);
- }
- }
-
-@@ -623,10 +622,8 @@ static void rtw_usb_free_rx_bufs(struct rtw_usb *rtwusb)
-
- for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
- rxcb = &rtwusb->rx_cb[i];
-- if (rxcb->rx_urb) {
-- usb_kill_urb(rxcb->rx_urb);
-- usb_free_urb(rxcb->rx_urb);
-- }
-+ usb_kill_urb(rxcb->rx_urb);
-+ usb_free_urb(rxcb->rx_urb);
- }
- }
-
-diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
-index 6a5e52a96d183..caa22226b01bc 100644
---- a/drivers/net/wireless/silabs/wfx/data_tx.c
-+++ b/drivers/net/wireless/silabs/wfx/data_tx.c
-@@ -226,53 +226,40 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
-
- static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
- {
-- int i;
-- bool finished;
-+ bool has_rate0 = false;
-+ int i, j;
-
-- /* Firmware is not able to mix rates with different flags */
-- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
-- if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
-- rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
-- if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
-+ for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) {
-+ if (rates[j].idx == -1)
-+ break;
-+ /* The device use the rates in descending order, whatever the request from minstrel.
-+ * We have to trade off here. Most important is to respect the primary rate
-+ * requested by minstrel. So, we drops the entries with rate higher than the
-+ * previous.
-+ */
-+ if (rates[j].idx >= rates[i - 1].idx) {
-+ rates[i - 1].count += rates[j].count;
-+ rates[i - 1].count = min_t(u16, 15, rates[i - 1].count);
-+ } else {
-+ memcpy(rates + i, rates + j, sizeof(rates[i]));
-+ if (rates[i].idx == 0)
-+ has_rate0 = true;
-+ /* The device apply Short GI only on the first rate */
- rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
-- if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
-- rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
-- }
--
-- /* Sort rates and remove duplicates */
-- do {
-- finished = true;
-- for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
-- if (rates[i + 1].idx == rates[i].idx &&
-- rates[i].idx != -1) {
-- rates[i].count += rates[i + 1].count;
-- if (rates[i].count > 15)
-- rates[i].count = 15;
-- rates[i + 1].idx = -1;
-- rates[i + 1].count = 0;
--
-- finished = false;
-- }
-- if (rates[i + 1].idx > rates[i].idx) {
-- swap(rates[i + 1], rates[i]);
-- finished = false;
-- }
-+ i++;
- }
-- } while (!finished);
-+ }
- /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
-- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
-- if (rates[i].idx == 0)
-- break;
-- if (rates[i].idx == -1) {
-- rates[i].idx = 0;
-- rates[i].count = 8; /* == hw->max_rate_tries */
-- rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
-- break;
-- }
-+ if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) {
-+ rates[i].idx = 0;
-+ rates[i].count = 8; /* == hw->max_rate_tries */
-+ rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS;
-+ i++;
-+ }
-+ for (; i < IEEE80211_TX_MAX_RATES; i++) {
-+ memset(rates + i, 0, sizeof(rates[i]));
-+ rates[i].idx = -1;
- }
-- /* All retries use long GI */
-- for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
-- rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
- }
-
- static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info)
-diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
-index 1f524030b186e..f5a0880da3fcc 100644
---- a/drivers/net/wireless/virtual/mac80211_hwsim.c
-+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
-@@ -3170,7 +3170,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
- u32 sset, u8 *data)
- {
- if (sset == ETH_SS_STATS)
-- memcpy(data, *mac80211_hwsim_gstrings_stats,
-+ memcpy(data, mac80211_hwsim_gstrings_stats,
- sizeof(mac80211_hwsim_gstrings_stats));
- }
-
-diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
-index 1b9f5b8a6167e..d3fca0ab62900 100644
---- a/drivers/nvdimm/of_pmem.c
-+++ b/drivers/nvdimm/of_pmem.c
-@@ -30,7 +30,13 @@ static int of_pmem_region_probe(struct platform_device *pdev)
- if (!priv)
- return -ENOMEM;
-
-- priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
-+ priv->bus_desc.provider_name = devm_kstrdup(&pdev->dev, pdev->name,
-+ GFP_KERNEL);
-+ if (!priv->bus_desc.provider_name) {
-+ kfree(priv);
-+ return -ENOMEM;
-+ }
-+
- priv->bus_desc.module = THIS_MODULE;
- priv->bus_desc.of_node = np;
-
-diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
-index 0a81f87f6f6c0..e2f1fb99707fc 100644
---- a/drivers/nvdimm/region_devs.c
-+++ b/drivers/nvdimm/region_devs.c
-@@ -939,7 +939,8 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
- {
- unsigned int cpu, lane;
-
-- cpu = get_cpu();
-+ migrate_disable();
-+ cpu = smp_processor_id();
- if (nd_region->num_lanes < nr_cpu_ids) {
- struct nd_percpu_lane *ndl_lock, *ndl_count;
-
-@@ -958,16 +959,15 @@ EXPORT_SYMBOL(nd_region_acquire_lane);
- void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
- {
- if (nd_region->num_lanes < nr_cpu_ids) {
-- unsigned int cpu = get_cpu();
-+ unsigned int cpu = smp_processor_id();
- struct nd_percpu_lane *ndl_lock, *ndl_count;
-
- ndl_count = per_cpu_ptr(nd_region->lane, cpu);
- ndl_lock = per_cpu_ptr(nd_region->lane, lane);
- if (--ndl_count->count == 0)
- spin_unlock(&ndl_lock->lock);
-- put_cpu();
- }
-- put_cpu();
-+ migrate_enable();
- }
- EXPORT_SYMBOL(nd_region_release_lane);
-
-diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
-index 21783aa2ee8e1..c09048984a277 100644
---- a/drivers/nvme/host/core.c
-+++ b/drivers/nvme/host/core.c
-@@ -2026,6 +2026,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
- if (ret)
- return ret;
-
-+ if (id->ncap == 0) {
-+ /* namespace not allocated or attached */
-+ info->is_removed = true;
-+ ret = -ENODEV;
-+ goto error;
-+ }
-+
- blk_mq_freeze_queue(ns->disk->queue);
- lbaf = nvme_lbaf_index(id->flbas);
- ns->lba_shift = id->lbaf[lbaf].ds;
-@@ -2083,6 +2090,8 @@ out:
- set_bit(NVME_NS_READY, &ns->flags);
- ret = 0;
- }
-+
-+error:
- kfree(id);
- return ret;
- }
-diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
-index 8175d49f29090..92ba315cfe19e 100644
---- a/drivers/nvme/host/fabrics.c
-+++ b/drivers/nvme/host/fabrics.c
-@@ -645,8 +645,10 @@ static const match_table_t opt_tokens = {
- { NVMF_OPT_TOS, "tos=%d" },
- { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
- { NVMF_OPT_DISCOVERY, "discovery" },
-+#ifdef CONFIG_NVME_HOST_AUTH
- { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
- { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
-+#endif
- { NVMF_OPT_ERR, NULL }
- };
-
-diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
-index 747c879e8982b..529b9954d2b8c 100644
---- a/drivers/nvme/host/ioctl.c
-+++ b/drivers/nvme/host/ioctl.c
-@@ -510,10 +510,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
- struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-
- req->bio = pdu->bio;
-- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
-+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
- pdu->nvme_status = -EINTR;
-- else
-+ } else {
- pdu->nvme_status = nvme_req(req)->status;
-+ if (!pdu->nvme_status)
-+ pdu->nvme_status = blk_status_to_errno(err);
-+ }
- pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
-
- /*
-diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
-index 43b5bd8bb6a52..d8da840a1c0ed 100644
---- a/drivers/nvme/target/fabrics-cmd.c
-+++ b/drivers/nvme/target/fabrics-cmd.c
-@@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
- goto out;
- }
-
-+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
-+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
- status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
- le32_to_cpu(c->kato), &ctrl);
- if (status)
-@@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
- goto out;
- }
-
-+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
-+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
- ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
- le16_to_cpu(d->cntlid), req);
- if (!ctrl) {
-diff --git a/drivers/of/address.c b/drivers/of/address.c
-index e692809ff8227..3219c51777507 100644
---- a/drivers/of/address.c
-+++ b/drivers/of/address.c
-@@ -100,6 +100,32 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
- return IORESOURCE_MEM;
- }
-
-+static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na,
-+ int ns, int pna)
-+{
-+ u64 cp, s, da;
-+
-+ /* Check that flags match */
-+ if (*addr != *range)
-+ return OF_BAD_ADDR;
-+
-+ /* Read address values, skipping high cell */
-+ cp = of_read_number(range + 1, na - 1);
-+ s = of_read_number(range + na + pna, ns);
-+ da = of_read_number(addr + 1, na - 1);
-+
-+ pr_debug("default flags map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
-+
-+ if (da < cp || da >= (cp + s))
-+ return OF_BAD_ADDR;
-+ return da - cp;
-+}
-+
-+static int of_bus_default_flags_translate(__be32 *addr, u64 offset, int na)
-+{
-+ /* Keep "flags" part (high cell) in translated address */
-+ return of_bus_default_translate(addr + 1, offset, na - 1);
-+}
-
- #ifdef CONFIG_PCI
- static unsigned int of_bus_pci_get_flags(const __be32 *addr)
-@@ -374,8 +400,8 @@ static struct of_bus of_busses[] = {
- .addresses = "reg",
- .match = of_bus_default_flags_match,
- .count_cells = of_bus_default_count_cells,
-- .map = of_bus_default_map,
-- .translate = of_bus_default_translate,
-+ .map = of_bus_default_flags_map,
-+ .translate = of_bus_default_flags_translate,
- .has_flags = true,
- .get_flags = of_bus_default_flags_get_flags,
- },
-diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
-index 6f5e5f0230d39..332bcc0053a5e 100644
---- a/drivers/parisc/power.c
-+++ b/drivers/parisc/power.c
-@@ -197,6 +197,14 @@ static struct notifier_block parisc_panic_block = {
- .priority = INT_MAX,
- };
-
-+/* qemu soft power-off function */
-+static int qemu_power_off(struct sys_off_data *data)
-+{
-+ /* this turns the system off via SeaBIOS */
-+ gsc_writel(0, (unsigned long) data->cb_data);
-+ pdc_soft_power_button(1);
-+ return NOTIFY_DONE;
-+}
-
- static int __init power_init(void)
- {
-@@ -226,7 +234,13 @@ static int __init power_init(void)
- soft_power_reg);
- }
-
-- power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME);
-+ power_task = NULL;
-+ if (running_on_qemu && soft_power_reg)
-+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
-+ qemu_power_off, (void *)soft_power_reg);
-+ else
-+ power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
-+ KTHREAD_NAME);
- if (IS_ERR(power_task)) {
- printk(KERN_ERR DRIVER_NAME ": thread creation failed. Driver not loaded.\n");
- pdc_soft_power_button(0);
-diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
-index 6319082301d68..c6bede3469320 100644
---- a/drivers/pci/controller/dwc/pci-exynos.c
-+++ b/drivers/pci/controller/dwc/pci-exynos.c
-@@ -375,7 +375,7 @@ fail_probe:
- return ret;
- }
-
--static int __exit exynos_pcie_remove(struct platform_device *pdev)
-+static int exynos_pcie_remove(struct platform_device *pdev)
- {
- struct exynos_pcie *ep = platform_get_drvdata(pdev);
-
-@@ -431,7 +431,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
-
- static struct platform_driver exynos_pcie_driver = {
- .probe = exynos_pcie_probe,
-- .remove = __exit_p(exynos_pcie_remove),
-+ .remove = exynos_pcie_remove,
- .driver = {
- .name = "exynos-pcie",
- .of_match_table = exynos_pcie_of_match,
-diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
-index 49aea6ce3e878..0def919f89faf 100644
---- a/drivers/pci/controller/dwc/pci-keystone.c
-+++ b/drivers/pci/controller/dwc/pci-keystone.c
-@@ -1100,7 +1100,7 @@ static const struct of_device_id ks_pcie_of_match[] = {
- { },
- };
-
--static int __init ks_pcie_probe(struct platform_device *pdev)
-+static int ks_pcie_probe(struct platform_device *pdev)
- {
- const struct dw_pcie_host_ops *host_ops;
- const struct dw_pcie_ep_ops *ep_ops;
-@@ -1302,7 +1302,7 @@ err_link:
- return ret;
- }
-
--static int __exit ks_pcie_remove(struct platform_device *pdev)
-+static int ks_pcie_remove(struct platform_device *pdev)
- {
- struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
- struct device_link **link = ks_pcie->link;
-@@ -1318,9 +1318,9 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
- return 0;
- }
-
--static struct platform_driver ks_pcie_driver __refdata = {
-+static struct platform_driver ks_pcie_driver = {
- .probe = ks_pcie_probe,
-- .remove = __exit_p(ks_pcie_remove),
-+ .remove = ks_pcie_remove,
- .driver = {
- .name = "keystone-pcie",
- .of_match_table = ks_pcie_of_match,
-diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
-index 1c1c7348972b0..2b60d20dfdf59 100644
---- a/drivers/pci/controller/dwc/pcie-designware.c
-+++ b/drivers/pci/controller/dwc/pcie-designware.c
-@@ -732,6 +732,53 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
-
- }
-
-+static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
-+{
-+ u32 lnkcap, lwsc, plc;
-+ u8 cap;
-+
-+ if (!num_lanes)
-+ return;
-+
-+ /* Set the number of lanes */
-+ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
-+ plc &= ~PORT_LINK_FAST_LINK_MODE;
-+ plc &= ~PORT_LINK_MODE_MASK;
-+
-+ /* Set link width speed control register */
-+ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
-+ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
-+ switch (num_lanes) {
-+ case 1:
-+ plc |= PORT_LINK_MODE_1_LANES;
-+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
-+ break;
-+ case 2:
-+ plc |= PORT_LINK_MODE_2_LANES;
-+ lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
-+ break;
-+ case 4:
-+ plc |= PORT_LINK_MODE_4_LANES;
-+ lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
-+ break;
-+ case 8:
-+ plc |= PORT_LINK_MODE_8_LANES;
-+ lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
-+ break;
-+ default:
-+ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
-+ return;
-+ }
-+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
-+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
-+
-+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
-+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
-+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
-+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
-+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
-+}
-+
- void dw_pcie_iatu_detect(struct dw_pcie *pci)
- {
- int max_region, ob, ib;
-@@ -1013,49 +1060,5 @@ void dw_pcie_setup(struct dw_pcie *pci)
- val |= PORT_LINK_DLL_LINK_EN;
- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
-
-- if (!pci->num_lanes) {
-- dev_dbg(pci->dev, "Using h/w default number of lanes\n");
-- return;
-- }
--
-- /* Set the number of lanes */
-- val &= ~PORT_LINK_FAST_LINK_MODE;
-- val &= ~PORT_LINK_MODE_MASK;
-- switch (pci->num_lanes) {
-- case 1:
-- val |= PORT_LINK_MODE_1_LANES;
-- break;
-- case 2:
-- val |= PORT_LINK_MODE_2_LANES;
-- break;
-- case 4:
-- val |= PORT_LINK_MODE_4_LANES;
-- break;
-- case 8:
-- val |= PORT_LINK_MODE_8_LANES;
-- break;
-- default:
-- dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
-- return;
-- }
-- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
--
-- /* Set link width speed control register */
-- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
-- val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
-- switch (pci->num_lanes) {
-- case 1:
-- val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
-- break;
-- case 2:
-- val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
-- break;
-- case 4:
-- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
-- break;
-- case 8:
-- val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
-- break;
-- }
-- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-+ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
- }
-diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
-index d93bc29069502..2ee146767971c 100644
---- a/drivers/pci/controller/dwc/pcie-kirin.c
-+++ b/drivers/pci/controller/dwc/pcie-kirin.c
-@@ -741,7 +741,7 @@ err:
- return ret;
- }
-
--static int __exit kirin_pcie_remove(struct platform_device *pdev)
-+static int kirin_pcie_remove(struct platform_device *pdev)
- {
- struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
-
-@@ -818,7 +818,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
-
- static struct platform_driver kirin_pcie_driver = {
- .probe = kirin_pcie_probe,
-- .remove = __exit_p(kirin_pcie_remove),
-+ .remove = kirin_pcie_remove,
- .driver = {
- .name = "kirin-pcie",
- .of_match_table = kirin_pcie_match,
-diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
-index 8bd8107690a6c..9b62ee6992f0e 100644
---- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
-+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
-@@ -123,6 +123,7 @@
-
- /* ELBI registers */
- #define ELBI_SYS_STTS 0x08
-+#define ELBI_CS2_ENABLE 0xa4
-
- /* DBI registers */
- #define DBI_CON_STATUS 0x44
-@@ -263,6 +264,21 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
- disable_irq(pcie_ep->perst_irq);
- }
-
-+static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
-+ u32 reg, size_t size, u32 val)
-+{
-+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
-+ int ret;
-+
-+ writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
-+
-+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
-+ if (ret)
-+ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
-+
-+ writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
-+}
-+
- static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
- {
- struct dw_pcie *pci = &pcie_ep->pci;
-@@ -519,6 +535,7 @@ static const struct dw_pcie_ops pci_ops = {
- .link_up = qcom_pcie_dw_link_up,
- .start_link = qcom_pcie_dw_start_link,
- .stop_link = qcom_pcie_dw_stop_link,
-+ .write_dbi2 = qcom_pcie_dw_write_dbi2,
- };
-
- static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
-diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
-index 4bba31502ce1d..248cd9347e8fd 100644
---- a/drivers/pci/controller/dwc/pcie-tegra194.c
-+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
-@@ -9,6 +9,7 @@
- * Author: Vidya Sagar <vidyas@nvidia.com>
- */
-
-+#include <linux/bitfield.h>
- #include <linux/clk.h>
- #include <linux/debugfs.h>
- #include <linux/delay.h>
-@@ -346,8 +347,7 @@ static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
- */
- val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
- if (val & PCI_EXP_LNKSTA_LBMS) {
-- current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
-- PCI_EXP_LNKSTA_NLW_SHIFT;
-+ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
- if (pcie->init_link_width > current_link_width) {
- dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
- val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
-@@ -760,8 +760,7 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
-
- val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
- PCI_EXP_LNKSTA);
-- pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
-- PCI_EXP_LNKSTA_NLW_SHIFT;
-+ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
-
- val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
- PCI_EXP_LNKCTL);
-@@ -920,7 +919,7 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
- /* Configure Max lane width from DT */
- val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
- val &= ~PCI_EXP_LNKCAP_MLW;
-- val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
-+ val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
- dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
-
- /* Clear Slot Clock Configuration bit if SRNS configuration */
-diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
-index 60810a1fbfb75..29fe09c99e7d9 100644
---- a/drivers/pci/controller/pci-mvebu.c
-+++ b/drivers/pci/controller/pci-mvebu.c
-@@ -264,7 +264,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
- */
- lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
- lnkcap &= ~PCI_EXP_LNKCAP_MLW;
-- lnkcap |= (port->is_x4 ? 4 : 1) << 4;
-+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
- mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
-
- /* Disable Root Bridge I/O space, memory space and bus mastering. */
-diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
-index ad56df98b8e63..1c1c1aa940a51 100644
---- a/drivers/pci/controller/vmd.c
-+++ b/drivers/pci/controller/vmd.c
-@@ -525,8 +525,7 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
- base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
- PCI_DEVFN(dev, 0), 0);
-
-- hdr_type = readb(base + PCI_HEADER_TYPE) &
-- PCI_HEADER_TYPE_MASK;
-+ hdr_type = readb(base + PCI_HEADER_TYPE);
-
- functions = (hdr_type & 0x80) ? 8 : 1;
- for (fn = 0; fn < functions; fn++) {
-diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
-index 5a4a8b0be6262..a7d3a92391a41 100644
---- a/drivers/pci/endpoint/pci-epc-core.c
-+++ b/drivers/pci/endpoint/pci-epc-core.c
-@@ -869,7 +869,6 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
-
- put_dev:
- put_device(&epc->dev);
-- kfree(epc);
-
- err_ret:
- return ERR_PTR(ret);
-diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
-index a05350a4e49cb..05b7357bd2586 100644
---- a/drivers/pci/pci-acpi.c
-+++ b/drivers/pci/pci-acpi.c
-@@ -911,7 +911,7 @@ pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
- {
- int acpi_state, d_max;
-
-- if (pdev->no_d3cold)
-+ if (pdev->no_d3cold || !pdev->d3cold_allowed)
- d_max = ACPI_STATE_D3_HOT;
- else
- d_max = ACPI_STATE_D3_COLD;
-diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
-index d9eede2dbc0e1..3317b93547167 100644
---- a/drivers/pci/pci-sysfs.c
-+++ b/drivers/pci/pci-sysfs.c
-@@ -12,7 +12,7 @@
- * Modeled after usb's driverfs.c
- */
-
--
-+#include <linux/bitfield.h>
- #include <linux/kernel.h>
- #include <linux/sched.h>
- #include <linux/pci.h>
-@@ -230,8 +230,7 @@ static ssize_t current_link_width_show(struct device *dev,
- if (err)
- return -EINVAL;
-
-- return sysfs_emit(buf, "%u\n",
-- (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
-+ return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
- }
- static DEVICE_ATTR_RO(current_link_width);
-
-@@ -530,10 +529,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
- return -EINVAL;
-
- pdev->d3cold_allowed = !!val;
-- if (pdev->d3cold_allowed)
-- pci_d3cold_enable(pdev);
-- else
-- pci_d3cold_disable(pdev);
-+ pci_bridge_d3_update(pdev);
-
- pm_runtime_resume(dev);
-
-diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
-index 59c01d68c6d5e..a607f277ccf10 100644
---- a/drivers/pci/pci.c
-+++ b/drivers/pci/pci.c
-@@ -732,15 +732,18 @@ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
- {
- u16 vsec = 0;
- u32 header;
-+ int ret;
-
- if (vendor != dev->vendor)
- return 0;
-
- while ((vsec = pci_find_next_ext_capability(dev, vsec,
- PCI_EXT_CAP_ID_VNDR))) {
-- if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
-- &header) == PCIBIOS_SUCCESSFUL &&
-- PCI_VNDR_HEADER_ID(header) == cap)
-+ ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
-+ if (ret != PCIBIOS_SUCCESSFUL)
-+ continue;
-+
-+ if (PCI_VNDR_HEADER_ID(header) == cap)
- return vsec;
- }
-
-@@ -3752,14 +3755,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
- return 0;
-
- pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
-- cap &= PCI_REBAR_CAP_SIZES;
-+ cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
-
- /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
- if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
-- bar == 0 && cap == 0x7000)
-- cap = 0x3f000;
-+ bar == 0 && cap == 0x700)
-+ return 0x3f00;
-
-- return cap >> 4;
-+ return cap;
- }
- EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
-
-@@ -6257,8 +6260,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
-
- next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
-- next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
-- PCI_EXP_LNKSTA_NLW_SHIFT;
-+ next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
-
- next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
-
-@@ -6330,7 +6332,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
-
- pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
- if (lnkcap)
-- return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
-+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
-
- return PCIE_LNK_WIDTH_UNKNOWN;
- }
-diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
-index 9c8fd69ae5ad8..40d84cb0c601e 100644
---- a/drivers/pci/pcie/aer.c
-+++ b/drivers/pci/pcie/aer.c
-@@ -29,6 +29,7 @@
- #include <linux/kfifo.h>
- #include <linux/slab.h>
- #include <acpi/apei.h>
-+#include <acpi/ghes.h>
- #include <ras/ras_event.h>
-
- #include "../pci.h"
-@@ -997,6 +998,15 @@ static void aer_recover_work_func(struct work_struct *work)
- continue;
- }
- cper_print_aer(pdev, entry.severity, entry.regs);
-+ /*
-+ * Memory for aer_capability_regs(entry.regs) is being allocated from the
-+ * ghes_estatus_pool to protect it from overwriting when multiple sections
-+ * are present in the error status. Thus free the same after processing
-+ * the data.
-+ */
-+ ghes_estatus_pool_region_free((unsigned long)entry.regs,
-+ sizeof(struct aer_capability_regs));
-+
- if (entry.severity == AER_NONFATAL)
- pcie_do_recovery(pdev, pci_channel_io_normal,
- aer_root_reset);
-diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
-index 1bf6300592644..fc18e42f0a6ed 100644
---- a/drivers/pci/pcie/aspm.c
-+++ b/drivers/pci/pcie/aspm.c
-@@ -1059,7 +1059,8 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_disable |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
-- link->aspm_disable |= ASPM_STATE_L1;
-+ /* L1 PM substates require L1 */
-+ link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_disable |= ASPM_STATE_L1_1;
- if (state & PCIE_LINK_STATE_L1_2)
-@@ -1247,6 +1248,8 @@ static ssize_t aspm_attr_store_common(struct device *dev,
- link->aspm_disable &= ~ASPM_STATE_L1;
- } else {
- link->aspm_disable |= state;
-+ if (state & ASPM_STATE_L1)
-+ link->aspm_disable |= ASPM_STATE_L1SS;
- }
-
- pcie_config_aspm_link(link, policy_to_aspm_state(link));
-diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index 795534589b985..43159965e09e9 100644
---- a/drivers/pci/probe.c
-+++ b/drivers/pci/probe.c
-@@ -1652,15 +1652,15 @@ static void pci_set_removable(struct pci_dev *dev)
- static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
- {
- #ifdef CONFIG_PCI_QUIRKS
-- int pos;
-+ int pos, ret;
- u32 header, tmp;
-
- pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
-
- for (pos = PCI_CFG_SPACE_SIZE;
- pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
-- if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
-- || header != tmp)
-+ ret = pci_read_config_dword(dev, pos, &tmp);
-+ if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
- return false;
- }
-
-diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
-index eeec1d6f90238..ae95d09507722 100644
---- a/drivers/pci/quirks.c
-+++ b/drivers/pci/quirks.c
-@@ -690,7 +690,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
- /*
- * In the AMD NL platform, this device ([1022:7912]) has a class code of
- * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
-- * claim it.
-+ * claim it. The same applies on the VanGogh platform device ([1022:163a]).
- *
- * But the dwc3 driver is a more specific driver for this device, and we'd
- * prefer to use it instead of xhci. To prevent xhci from claiming the
-@@ -698,7 +698,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
- * defines as "USB device (not host controller)". The dwc3 driver can then
- * claim it based on its Vendor and Device ID.
- */
--static void quirk_amd_nl_class(struct pci_dev *pdev)
-+static void quirk_amd_dwc_class(struct pci_dev *pdev)
- {
- u32 class = pdev->class;
-
-@@ -708,7 +708,9 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
- class, pdev->class);
- }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
-- quirk_amd_nl_class);
-+ quirk_amd_dwc_class);
-+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
-+ quirk_amd_dwc_class);
-
- /*
- * Synopsys USB 3.x host HAPS platform has a class code of
-@@ -5383,7 +5385,7 @@ int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
- */
- static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
- {
-- int pos, i = 0;
-+ int pos, i = 0, ret;
- u8 next_cap;
- u16 reg16, *cap;
- struct pci_cap_saved_state *state;
-@@ -5429,8 +5431,8 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
- pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
-
- pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
-- if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
-- PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
-+ ret = pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status);
-+ if ((ret != PCIBIOS_SUCCESSFUL) || (PCI_POSSIBLE_ERROR(status)))
- pdev->cfg_size = PCI_CFG_SPACE_SIZE;
-
- if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
-@@ -5507,6 +5509,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
- DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
-
- #ifdef CONFIG_PCI_ATS
-+static void quirk_no_ats(struct pci_dev *pdev)
-+{
-+ pci_info(pdev, "disabling ATS\n");
-+ pdev->ats_cap = 0;
-+}
-+
- /*
- * Some devices require additional driver setup to enable ATS. Don't use
- * ATS for those devices as ATS will be enabled before the driver has had a
-@@ -5520,14 +5528,10 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
- (pdev->subsystem_device == 0xce19 ||
- pdev->subsystem_device == 0xcc10 ||
- pdev->subsystem_device == 0xcc08))
-- goto no_ats;
-- else
-- return;
-+ quirk_no_ats(pdev);
-+ } else {
-+ quirk_no_ats(pdev);
- }
--
--no_ats:
-- pci_info(pdev, "disabling ATS\n");
-- pdev->ats_cap = 0;
- }
-
- /* AMD Stoney platform GPU */
-@@ -5550,6 +5554,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
- /* AMD Raven platform iGPU */
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
-+
-+/*
-+ * Intel IPU E2000 revisions before C0 implement incorrect endianness
-+ * in ATS Invalidate Request message body. Disable ATS for those devices.
-+ */
-+static void quirk_intel_e2000_no_ats(struct pci_dev *pdev)
-+{
-+ if (pdev->revision < 0x20)
-+ quirk_no_ats(pdev);
-+}
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1451, quirk_intel_e2000_no_ats);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1452, quirk_intel_e2000_no_ats);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1453, quirk_intel_e2000_no_ats);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1454, quirk_intel_e2000_no_ats);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1455, quirk_intel_e2000_no_ats);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1457, quirk_intel_e2000_no_ats);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1459, quirk_intel_e2000_no_ats);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145a, quirk_intel_e2000_no_ats);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145c, quirk_intel_e2000_no_ats);
- #endif /* CONFIG_PCI_ATS */
-
- /* Freescale PCIe doesn't support MSI in RC mode */
-@@ -6188,3 +6211,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node);
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node);
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node);
-+
-+/*
-+ * Devices known to require a longer delay before first config space access
-+ * after reset recovery or resume from D3cold:
-+ *
-+ * VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
-+ */
-+static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
-+{
-+ pdev->d3cold_delay = 1000;
-+}
-+DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
-diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
-index 5658745c398f5..b33be1e63c98f 100644
---- a/drivers/pcmcia/cs.c
-+++ b/drivers/pcmcia/cs.c
-@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
- dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
- skt->thread = NULL;
- complete(&skt->thread_done);
-+ put_device(&skt->dev);
- return 0;
- }
- ret = pccard_sysfs_add_socket(&skt->dev);
-diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
-index d500e5dbbc3f5..b4b8363d1de21 100644
---- a/drivers/pcmcia/ds.c
-+++ b/drivers/pcmcia/ds.c
-@@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
- /* by default don't allow DMA */
- p_dev->dma_mask = 0;
- p_dev->dev.dma_mask = &p_dev->dma_mask;
-- dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
-- if (!dev_name(&p_dev->dev))
-- goto err_free;
- p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
- if (!p_dev->devname)
- goto err_free;
-@@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
-
- pcmcia_device_query(p_dev);
-
-- if (device_register(&p_dev->dev))
-- goto err_unreg;
-+ dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
-+ if (device_register(&p_dev->dev)) {
-+ mutex_lock(&s->ops_mutex);
-+ list_del(&p_dev->socket_device_list);
-+ s->device_count--;
-+ mutex_unlock(&s->ops_mutex);
-+ put_device(&p_dev->dev);
-+ return NULL;
-+ }
-
- return p_dev;
-
-diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
-index 6b50bc5519846..caae2d3e9d3ea 100644
---- a/drivers/perf/arm-cmn.c
-+++ b/drivers/perf/arm-cmn.c
-@@ -112,7 +112,9 @@
-
- #define CMN_DTM_PMEVCNTSR 0x240
-
--#define CMN_DTM_UNIT_INFO 0x0910
-+#define CMN650_DTM_UNIT_INFO 0x0910
-+#define CMN_DTM_UNIT_INFO 0x0960
-+#define CMN_DTM_UNIT_INFO_DTC_DOMAIN GENMASK_ULL(1, 0)
-
- #define CMN_DTM_NUM_COUNTERS 4
- /* Want more local counters? Why not replicate the whole DTM! Ugh... */
-@@ -2117,6 +2119,16 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
- return 0;
- }
-
-+static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
-+{
-+ int offset = CMN_DTM_UNIT_INFO;
-+
-+ if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
-+ offset = CMN650_DTM_UNIT_INFO;
-+
-+ return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
-+}
-+
- static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
- {
- int level;
-@@ -2248,7 +2260,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
- if (cmn->part == PART_CMN600)
- xp->dtc = 0xf;
- else
-- xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
-+ xp->dtc = 1 << arm_cmn_dtc_domain(cmn, xp_region);
-
- xp->dtm = dtm - cmn->dtms;
- arm_cmn_init_dtm(dtm++, xp, 0);
-diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
-index e2b7827c45635..9363c31f31b89 100644
---- a/drivers/perf/arm_cspmu/arm_cspmu.c
-+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
-@@ -635,6 +635,9 @@ static int arm_cspmu_event_init(struct perf_event *event)
-
- cspmu = to_arm_cspmu(event->pmu);
-
-+ if (event->attr.type != event->pmu->type)
-+ return -ENOENT;
-+
- /*
- * Following other "uncore" PMUs, we do not support sampling mode or
- * attach to a task (per-process mode).
-diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
-index 8fcaa26f0f8a6..d681638ec6b82 100644
---- a/drivers/perf/arm_pmuv3.c
-+++ b/drivers/perf/arm_pmuv3.c
-@@ -428,12 +428,12 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
- #define ARMV8_IDX_TO_COUNTER(x) \
- (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
-
--static inline u32 armv8pmu_pmcr_read(void)
-+static inline u64 armv8pmu_pmcr_read(void)
- {
- return read_pmcr();
- }
-
--static inline void armv8pmu_pmcr_write(u32 val)
-+static inline void armv8pmu_pmcr_write(u64 val)
- {
- val &= ARMV8_PMU_PMCR_MASK;
- isb();
-@@ -957,7 +957,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
- static void armv8pmu_reset(void *info)
- {
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
-- u32 pmcr;
-+ u64 pmcr;
-
- /* The counter and interrupt enable registers are unknown at reset. */
- armv8pmu_disable_counter(U32_MAX);
-diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
-index 5a00adb2de8c9..051efffc44c82 100644
---- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
-+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
-@@ -353,6 +353,10 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
-
-+ /* Check the type first before going on, otherwise it's not our event */
-+ if (event->attr.type != event->pmu->type)
-+ return -ENOENT;
-+
- event->cpu = pcie_pmu->on_cpu;
-
- if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
-@@ -360,9 +364,6 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
- else
- hwc->event_base = HISI_PCIE_CNT;
-
-- if (event->attr.type != event->pmu->type)
-- return -ENOENT;
--
- /* Sampling is not supported. */
- if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
- return -EOPNOTSUPP;
-diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
-index d941e746b4248..797cf201996a9 100644
---- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
-+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
-@@ -505,8 +505,8 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
- ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
- if (ret) {
- dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
-- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
-- &pa_pmu->node);
-+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
-+ &pa_pmu->node);
- return ret;
- }
-
-diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
-index 6fe534a665eda..e706ca5676764 100644
---- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
-+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
-@@ -450,8 +450,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
- ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
- if (ret) {
- dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
-- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
-- &sllc_pmu->node);
-+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
-+ &sllc_pmu->node);
- return ret;
- }
-
-diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
-index e0457d84af6b3..16869bf5bf4cc 100644
---- a/drivers/perf/hisilicon/hns3_pmu.c
-+++ b/drivers/perf/hisilicon/hns3_pmu.c
-@@ -1556,8 +1556,8 @@ static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
- ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
- if (ret) {
- pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
-- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
-- &hns3_pmu->node);
-+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
-+ &hns3_pmu->node);
- }
-
- return ret;
-@@ -1568,8 +1568,8 @@ static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
- struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
-
- perf_pmu_unregister(&hns3_pmu->pmu);
-- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
-- &hns3_pmu->node);
-+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
-+ &hns3_pmu->node);
- }
-
- static int hns3_pmu_init_dev(struct pci_dev *pdev)
-diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
-index 96c7f670c8f0d..cd8a2b9efd787 100644
---- a/drivers/perf/riscv_pmu_sbi.c
-+++ b/drivers/perf/riscv_pmu_sbi.c
-@@ -543,8 +543,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
-
- if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
- (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
-- on_each_cpu_mask(mm_cpumask(event->owner->mm),
-- pmu_sbi_set_scounteren, (void *)event, 1);
-+ pmu_sbi_set_scounteren((void *)event);
- }
-
- static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
-@@ -554,8 +553,7 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
-
- if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
- (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
-- on_each_cpu_mask(mm_cpumask(event->owner->mm),
-- pmu_sbi_reset_scounteren, (void *)event, 1);
-+ pmu_sbi_reset_scounteren((void *)event);
-
- ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
- if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
-@@ -689,6 +687,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
-
- /* Firmware counter don't support overflow yet */
- fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
-+ if (fidx == RISCV_MAX_COUNTERS) {
-+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
-+ return IRQ_NONE;
-+ }
-+
- event = cpu_hw_evt->events[fidx];
- if (!event) {
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
-diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
-index d1670bbe6d6bc..e4502958fd62d 100644
---- a/drivers/phy/Kconfig
-+++ b/drivers/phy/Kconfig
-@@ -87,7 +87,6 @@ source "drivers/phy/motorola/Kconfig"
- source "drivers/phy/mscc/Kconfig"
- source "drivers/phy/qualcomm/Kconfig"
- source "drivers/phy/ralink/Kconfig"
--source "drivers/phy/realtek/Kconfig"
- source "drivers/phy/renesas/Kconfig"
- source "drivers/phy/rockchip/Kconfig"
- source "drivers/phy/samsung/Kconfig"
-diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
-index 868a220ed0f6d..fb3dc9de61115 100644
---- a/drivers/phy/Makefile
-+++ b/drivers/phy/Makefile
-@@ -26,7 +26,6 @@ obj-y += allwinner/ \
- mscc/ \
- qualcomm/ \
- ralink/ \
-- realtek/ \
- renesas/ \
- rockchip/ \
- samsung/ \
-diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
-index 52c275fbb2a1c..d4fb85c20eb0f 100644
---- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
-+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
-@@ -24,23 +24,73 @@
- #define EUSB2_FORCE_VAL_5 0xeD
- #define V_CLK_19P2M_EN BIT(6)
-
-+#define EUSB2_TUNE_USB2_CROSSOVER 0x50
- #define EUSB2_TUNE_IUSB2 0x51
-+#define EUSB2_TUNE_RES_FSDIF 0x52
-+#define EUSB2_TUNE_HSDISC 0x53
- #define EUSB2_TUNE_SQUELCH_U 0x54
-+#define EUSB2_TUNE_USB2_SLEW 0x55
-+#define EUSB2_TUNE_USB2_EQU 0x56
- #define EUSB2_TUNE_USB2_PREEM 0x57
-+#define EUSB2_TUNE_USB2_HS_COMP_CUR 0x58
-+#define EUSB2_TUNE_EUSB_SLEW 0x59
-+#define EUSB2_TUNE_EUSB_EQU 0x5A
-+#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
-
--#define QCOM_EUSB2_REPEATER_INIT_CFG(o, v) \
-+#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
- { \
-- .offset = o, \
-+ .reg = r, \
- .val = v, \
- }
-
--struct eusb2_repeater_init_tbl {
-- unsigned int offset;
-- unsigned int val;
-+enum reg_fields {
-+ F_TUNE_EUSB_HS_COMP_CUR,
-+ F_TUNE_EUSB_EQU,
-+ F_TUNE_EUSB_SLEW,
-+ F_TUNE_USB2_HS_COMP_CUR,
-+ F_TUNE_USB2_PREEM,
-+ F_TUNE_USB2_EQU,
-+ F_TUNE_USB2_SLEW,
-+ F_TUNE_SQUELCH_U,
-+ F_TUNE_HSDISC,
-+ F_TUNE_RES_FSDIF,
-+ F_TUNE_IUSB2,
-+ F_TUNE_USB2_CROSSOVER,
-+ F_NUM_TUNE_FIELDS,
-+
-+ F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
-+ F_FORCE_EN_5,
-+
-+ F_EN_CTL1,
-+
-+ F_RPTR_STATUS,
-+ F_NUM_FIELDS,
-+};
-+
-+static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
-+ [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
-+ [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
-+ [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
-+ [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
-+ [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
-+ [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
-+ [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
-+ [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
-+ [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
-+ [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
-+ [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
-+ [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
-+
-+ [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
-+ [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
-+
-+ [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
-+
-+ [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
- };
-
- struct eusb2_repeater_cfg {
-- const struct eusb2_repeater_init_tbl *init_tbl;
-+ const u32 *init_tbl;
- int init_tbl_num;
- const char * const *vreg_list;
- int num_vregs;
-@@ -48,11 +98,10 @@ struct eusb2_repeater_cfg {
-
- struct eusb2_repeater {
- struct device *dev;
-- struct regmap *regmap;
-+ struct regmap_field *regs[F_NUM_FIELDS];
- struct phy *phy;
- struct regulator_bulk_data *vregs;
- const struct eusb2_repeater_cfg *cfg;
-- u16 base;
- enum phy_mode mode;
- };
-
-@@ -60,10 +109,10 @@ static const char * const pm8550b_vreg_l[] = {
- "vdd18", "vdd3",
- };
-
--static const struct eusb2_repeater_init_tbl pm8550b_init_tbl[] = {
-- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_IUSB2, 0x8),
-- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_SQUELCH_U, 0x3),
-- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_USB2_PREEM, 0x5),
-+static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
-+ [F_TUNE_IUSB2] = 0x8,
-+ [F_TUNE_SQUELCH_U] = 0x3,
-+ [F_TUNE_USB2_PREEM] = 0x5,
- };
-
- static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
-@@ -91,9 +140,9 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
-
- static int eusb2_repeater_init(struct phy *phy)
- {
-+ struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
- struct eusb2_repeater *rptr = phy_get_drvdata(phy);
-- const struct eusb2_repeater_init_tbl *init_tbl = rptr->cfg->init_tbl;
-- int num = rptr->cfg->init_tbl_num;
-+ const u32 *init_tbl = rptr->cfg->init_tbl;
- u32 val;
- int ret;
- int i;
-@@ -102,17 +151,21 @@ static int eusb2_repeater_init(struct phy *phy)
- if (ret)
- return ret;
-
-- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_EN_CTL1,
-- EUSB2_RPTR_EN, EUSB2_RPTR_EN);
-+ regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
-
-- for (i = 0; i < num; i++)
-- regmap_update_bits(rptr->regmap,
-- rptr->base + init_tbl[i].offset,
-- init_tbl[i].val, init_tbl[i].val);
-+ for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
-+ if (init_tbl[i]) {
-+ regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
-+ } else {
-+ /* Write 0 if there's no value set */
-+ u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
-+
-+ regmap_field_update_bits(rptr->regs[i], mask, 0);
-+ }
-+ }
-
-- ret = regmap_read_poll_timeout(rptr->regmap,
-- rptr->base + EUSB2_RPTR_STATUS, val,
-- val & RPTR_OK, 10, 5);
-+ ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
-+ val, val & RPTR_OK, 10, 5);
- if (ret)
- dev_err(rptr->dev, "initialization timed-out\n");
-
-@@ -131,10 +184,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
- * per eUSB 1.2 Spec. Below implement software workaround until
- * PHY and controller is fixing seen observation.
- */
-- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
-- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
-- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
-- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
-+ regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
-+ F_CLK_19P2M_EN, F_CLK_19P2M_EN);
-+ regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
-+ V_CLK_19P2M_EN, V_CLK_19P2M_EN);
- break;
- case PHY_MODE_USB_DEVICE:
- /*
-@@ -143,10 +196,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
- * repeater doesn't clear previous value due to shared
- * regulators (say host <-> device mode switch).
- */
-- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
-- F_CLK_19P2M_EN, 0);
-- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
-- V_CLK_19P2M_EN, 0);
-+ regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
-+ F_CLK_19P2M_EN, 0);
-+ regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
-+ V_CLK_19P2M_EN, 0);
- break;
- default:
- return -EINVAL;
-@@ -175,8 +228,9 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
- struct device *dev = &pdev->dev;
- struct phy_provider *phy_provider;
- struct device_node *np = dev->of_node;
-+ struct regmap *regmap;
-+ int i, ret;
- u32 res;
-- int ret;
-
- rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
- if (!rptr)
-@@ -189,15 +243,22 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
- if (!rptr->cfg)
- return -EINVAL;
-
-- rptr->regmap = dev_get_regmap(dev->parent, NULL);
-- if (!rptr->regmap)
-+ regmap = dev_get_regmap(dev->parent, NULL);
-+ if (!regmap)
- return -ENODEV;
-
- ret = of_property_read_u32(np, "reg", &res);
- if (ret < 0)
- return ret;
-
-- rptr->base = res;
-+ for (i = 0; i < F_NUM_FIELDS; i++)
-+ eusb2_repeater_tune_reg_fields[i].reg += res;
-+
-+ ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
-+ eusb2_repeater_tune_reg_fields,
-+ F_NUM_FIELDS);
-+ if (ret)
-+ return ret;
-
- ret = eusb2_repeater_init_vregs(rptr);
- if (ret < 0) {
-diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
-deleted file mode 100644
-index 75ac7e7c31aec..0000000000000
---- a/drivers/phy/realtek/Kconfig
-+++ /dev/null
-@@ -1,32 +0,0 @@
--# SPDX-License-Identifier: GPL-2.0
--#
--# Phy drivers for Realtek platforms
--#
--
--if ARCH_REALTEK || COMPILE_TEST
--
--config PHY_RTK_RTD_USB2PHY
-- tristate "Realtek RTD USB2 PHY Transceiver Driver"
-- depends on USB_SUPPORT
-- select GENERIC_PHY
-- select USB_PHY
-- select USB_COMMON
-- help
-- Enable this to support Realtek SoC USB2 phy transceiver.
-- The DHC (digital home center) RTD series SoCs used the Synopsys
-- DWC3 USB IP. This driver will do the PHY initialization
-- of the parameters.
--
--config PHY_RTK_RTD_USB3PHY
-- tristate "Realtek RTD USB3 PHY Transceiver Driver"
-- depends on USB_SUPPORT
-- select GENERIC_PHY
-- select USB_PHY
-- select USB_COMMON
-- help
-- Enable this to support Realtek SoC USB3 phy transceiver.
-- The DHC (digital home center) RTD series SoCs used the Synopsys
-- DWC3 USB IP. This driver will do the PHY initialization
-- of the parameters.
--
--endif # ARCH_REALTEK || COMPILE_TEST
-diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
-deleted file mode 100644
-index ed7b47ff8a268..0000000000000
---- a/drivers/phy/realtek/Makefile
-+++ /dev/null
-@@ -1,3 +0,0 @@
--# SPDX-License-Identifier: GPL-2.0
--obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
--obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
-diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
-deleted file mode 100644
-index aedc78bd37f73..0000000000000
---- a/drivers/phy/realtek/phy-rtk-usb2.c
-+++ /dev/null
-@@ -1,1325 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--/*
-- * phy-rtk-usb2.c RTK usb2.0 PHY driver
-- *
-- * Copyright (C) 2023 Realtek Semiconductor Corporation
-- *
-- */
--
--#include <linux/module.h>
--#include <linux/of.h>
--#include <linux/of_device.h>
--#include <linux/of_address.h>
--#include <linux/uaccess.h>
--#include <linux/debugfs.h>
--#include <linux/nvmem-consumer.h>
--#include <linux/regmap.h>
--#include <linux/sys_soc.h>
--#include <linux/mfd/syscon.h>
--#include <linux/phy/phy.h>
--#include <linux/usb.h>
--#include <linux/usb/phy.h>
--#include <linux/usb/hcd.h>
--
--/* GUSB2PHYACCn register */
--#define PHY_NEW_REG_REQ BIT(25)
--#define PHY_VSTS_BUSY BIT(23)
--#define PHY_VCTRL_SHIFT 8
--#define PHY_REG_DATA_MASK 0xff
--
--#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
--#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
--
--#define EFUS_USB_DC_CAL_RATE 2
--#define EFUS_USB_DC_CAL_MAX 7
--
--#define EFUS_USB_DC_DIS_RATE 1
--#define EFUS_USB_DC_DIS_MAX 7
--
--#define MAX_PHY_DATA_SIZE 20
--#define OFFEST_PHY_READ 0x20
--
--#define MAX_USB_PHY_NUM 4
--#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
--#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
--#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
--
--#define SET_PAGE_OFFSET 0xf4
--#define SET_PAGE_0 0x9b
--#define SET_PAGE_1 0xbb
--#define SET_PAGE_2 0xdb
--
--#define PAGE_START 0xe0
--#define PAGE0_0XE4 0xe4
--#define PAGE0_0XE6 0xe6
--#define PAGE0_0XE7 0xe7
--#define PAGE1_0XE0 0xe0
--#define PAGE1_0XE2 0xe2
--
--#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
--#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
--#define DEFAULT_DC_DRIVING_VALUE (0x8)
--#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
--#define HS_CLK_SELECT BIT(6)
--
--struct phy_reg {
-- void __iomem *reg_wrap_vstatus;
-- void __iomem *reg_gusb2phyacc0;
-- int vstatus_index;
--};
--
--struct phy_data {
-- u8 addr;
-- u8 data;
--};
--
--struct phy_cfg {
-- int page0_size;
-- struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
-- int page1_size;
-- struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
-- int page2_size;
-- struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
--
-- int num_phy;
--
-- bool check_efuse;
-- int check_efuse_version;
--#define CHECK_EFUSE_V1 1
--#define CHECK_EFUSE_V2 2
-- int efuse_dc_driving_rate;
-- int efuse_dc_disconnect_rate;
-- int dc_driving_mask;
-- int dc_disconnect_mask;
-- bool usb_dc_disconnect_at_page0;
-- int driving_updated_for_dev_dis;
--
-- bool do_toggle;
-- bool do_toggle_driving;
-- bool use_default_parameter;
-- bool is_double_sensitivity_mode;
--};
--
--struct phy_parameter {
-- struct phy_reg phy_reg;
--
-- /* Get from efuse */
-- s8 efuse_usb_dc_cal;
-- s8 efuse_usb_dc_dis;
--
-- /* Get from dts */
-- bool inverse_hstx_sync_clock;
-- u32 driving_level;
-- s32 driving_level_compensate;
-- s32 disconnection_compensate;
--};
--
--struct rtk_phy {
-- struct usb_phy phy;
-- struct device *dev;
--
-- struct phy_cfg *phy_cfg;
-- int num_phy;
-- struct phy_parameter *phy_parameter;
--
-- struct dentry *debug_dir;
--};
--
--/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
--static inline int page_addr_to_array_index(u8 addr)
--{
-- return (int)((((addr) - PAGE_START) & 0x7) +
-- ((((addr) - PAGE_START) & 0x10) >> 1));
--}
--
--static inline u8 array_index_to_page_addr(int index)
--{
-- return ((((index) + PAGE_START) & 0x7) +
-- ((((index) & 0x8) << 1) + PAGE_START));
--}
--
--#define PHY_IO_TIMEOUT_USEC (50000)
--#define PHY_IO_DELAY_US (100)
--
--static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
--{
-- int ret;
-- unsigned int val;
--
-- ret = read_poll_timeout(readl, val, ((val & mask) == result),
-- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
-- if (ret) {
-- pr_err("%s can't program USB phy\n", __func__);
-- return -ETIMEDOUT;
-- }
--
-- return 0;
--}
--
--static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
--{
-- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
-- unsigned int val;
-- int ret = 0;
--
-- addr -= OFFEST_PHY_READ;
--
-- /* polling until VBusy == 0 */
-- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
-- if (ret)
-- return (char)ret;
--
-- /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
-- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
-- writel(val, reg_gusb2phyacc0);
-- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
-- if (ret)
-- return (char)ret;
--
-- /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
-- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
-- writel(val, reg_gusb2phyacc0);
-- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
-- if (ret)
-- return (char)ret;
--
-- val = readl(reg_gusb2phyacc0);
--
-- return (char)(val & PHY_REG_DATA_MASK);
--}
--
--static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
--{
-- unsigned int val;
-- void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
-- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
-- int shift_bits = phy_reg->vstatus_index * 8;
-- int ret = 0;
--
-- /* write data to VStatusOut2 (data output to phy) */
-- writel((u32)data << shift_bits, reg_wrap_vstatus);
--
-- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
-- if (ret)
-- return ret;
--
-- /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
-- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
--
-- writel(val, reg_gusb2phyacc0);
-- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
-- if (ret)
-- return ret;
--
-- /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
-- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
--
-- writel(val, reg_gusb2phyacc0);
-- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
-- if (ret)
-- return ret;
--
-- return 0;
--}
--
--static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
--{
-- switch (page) {
-- case 0:
-- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
-- case 1:
-- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
-- case 2:
-- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
-- default:
-- pr_err("%s error page=%d\n", __func__, page);
-- }
--
-- return -EINVAL;
--}
--
--static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
-- struct phy_parameter *phy_parameter, u8 data)
--{
-- u8 ret;
-- s32 val;
-- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
-- int offset = 4;
--
-- val = (s32)((data >> offset) & dc_disconnect_mask)
-- + phy_parameter->efuse_usb_dc_dis
-- + phy_parameter->disconnection_compensate;
--
-- if (val > dc_disconnect_mask)
-- val = dc_disconnect_mask;
-- else if (val < 0)
-- val = 0;
--
-- ret = (data & (~(dc_disconnect_mask << offset))) |
-- (val & dc_disconnect_mask) << offset;
--
-- return ret;
--}
--
--/* updated disconnect level at page0 */
--static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
-- struct phy_parameter *phy_parameter, bool update)
--{
-- struct phy_cfg *phy_cfg;
-- struct phy_reg *phy_reg;
-- struct phy_data *phy_data_page;
-- struct phy_data *phy_data;
-- u8 addr, data;
-- int offset = 4;
-- s32 dc_disconnect_mask;
-- int i;
--
-- phy_cfg = rtk_phy->phy_cfg;
-- phy_reg = &phy_parameter->phy_reg;
--
-- /* Set page 0 */
-- phy_data_page = phy_cfg->page0;
-- rtk_phy_set_page(phy_reg, 0);
--
-- i = page_addr_to_array_index(PAGE0_0XE4);
-- phy_data = phy_data_page + i;
-- if (!phy_data->addr) {
-- phy_data->addr = PAGE0_0XE4;
-- phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
-- }
--
-- addr = phy_data->addr;
-- data = phy_data->data;
-- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
--
-- if (update)
-- data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
-- else
-- data = (data & ~(dc_disconnect_mask << offset)) |
-- (DEFAULT_DC_DISCONNECTION_VALUE << offset);
--
-- if (rtk_phy_write(phy_reg, addr, data))
-- dev_err(rtk_phy->dev,
-- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
-- __func__, addr, data);
--}
--
--static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
-- struct phy_parameter *phy_parameter, u8 data)
--{
-- u8 ret;
-- s32 val;
-- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
--
-- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
-- val = (s32)(data & dc_disconnect_mask)
-- + phy_parameter->efuse_usb_dc_dis
-- + phy_parameter->disconnection_compensate;
-- } else { /* for CHECK_EFUSE_V2 or no efuse */
-- if (phy_parameter->efuse_usb_dc_dis)
-- val = (s32)(phy_parameter->efuse_usb_dc_dis +
-- phy_parameter->disconnection_compensate);
-- else
-- val = (s32)((data & dc_disconnect_mask) +
-- phy_parameter->disconnection_compensate);
-- }
--
-- if (val > dc_disconnect_mask)
-- val = dc_disconnect_mask;
-- else if (val < 0)
-- val = 0;
--
-- ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
--
-- return ret;
--}
--
--/* updated disconnect level at page1 */
--static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
-- struct phy_parameter *phy_parameter, bool update)
--{
-- struct phy_cfg *phy_cfg;
-- struct phy_data *phy_data_page;
-- struct phy_data *phy_data;
-- struct phy_reg *phy_reg;
-- u8 addr, data;
-- s32 dc_disconnect_mask;
-- int i;
--
-- phy_cfg = rtk_phy->phy_cfg;
-- phy_reg = &phy_parameter->phy_reg;
--
-- /* Set page 1 */
-- phy_data_page = phy_cfg->page1;
-- rtk_phy_set_page(phy_reg, 1);
--
-- i = page_addr_to_array_index(PAGE1_0XE2);
-- phy_data = phy_data_page + i;
-- if (!phy_data->addr) {
-- phy_data->addr = PAGE1_0XE2;
-- phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
-- }
--
-- addr = phy_data->addr;
-- data = phy_data->data;
-- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
--
-- if (update)
-- data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
-- else
-- data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
--
-- if (rtk_phy_write(phy_reg, addr, data))
-- dev_err(rtk_phy->dev,
-- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
-- __func__, addr, data);
--}
--
--static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
-- struct phy_parameter *phy_parameter, bool update)
--{
-- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
--
-- if (phy_cfg->usb_dc_disconnect_at_page0)
-- update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
-- else
-- update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
--}
--
--static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
-- struct phy_parameter *phy_parameter, u8 data)
--{
-- s32 driving_level_compensate = phy_parameter->driving_level_compensate;
-- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
-- s32 val;
-- u8 ret;
--
-- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
-- val = (s32)(data & dc_driving_mask) + driving_level_compensate
-- + phy_parameter->efuse_usb_dc_cal;
-- } else { /* for CHECK_EFUSE_V2 or no efuse */
-- if (phy_parameter->efuse_usb_dc_cal)
-- val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
-- + driving_level_compensate);
-- else
-- val = (s32)(data & dc_driving_mask);
-- }
--
-- if (val > dc_driving_mask)
-- val = dc_driving_mask;
-- else if (val < 0)
-- val = 0;
--
-- ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
--
-- return ret;
--}
--
--static void update_dc_driving_level(struct rtk_phy *rtk_phy,
-- struct phy_parameter *phy_parameter)
--{
-- struct phy_cfg *phy_cfg;
-- struct phy_reg *phy_reg;
--
-- phy_reg = &phy_parameter->phy_reg;
-- phy_cfg = rtk_phy->phy_cfg;
-- if (!phy_cfg->page0[4].addr) {
-- rtk_phy_set_page(phy_reg, 0);
-- phy_cfg->page0[4].addr = PAGE0_0XE4;
-- phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
-- }
--
-- if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
-- u32 dc_driving_mask;
-- u8 driving_level;
-- u8 data;
--
-- data = phy_cfg->page0[4].data;
-- dc_driving_mask = phy_cfg->dc_driving_mask;
-- driving_level = data & dc_driving_mask;
--
-- dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
-- __func__, driving_level, phy_parameter->driving_level);
--
-- phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
-- (phy_parameter->driving_level & dc_driving_mask);
-- }
--
-- phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
-- phy_parameter,
-- phy_cfg->page0[4].data);
--}
--
--static void update_hs_clk_select(struct rtk_phy *rtk_phy,
-- struct phy_parameter *phy_parameter)
--{
-- struct phy_cfg *phy_cfg;
-- struct phy_reg *phy_reg;
--
-- phy_cfg = rtk_phy->phy_cfg;
-- phy_reg = &phy_parameter->phy_reg;
--
-- if (phy_parameter->inverse_hstx_sync_clock) {
-- if (!phy_cfg->page0[6].addr) {
-- rtk_phy_set_page(phy_reg, 0);
-- phy_cfg->page0[6].addr = PAGE0_0XE6;
-- phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
-- }
--
-- phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
-- }
--}
--
--static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
-- int index, bool connect)
--{
-- struct phy_parameter *phy_parameter;
-- struct phy_cfg *phy_cfg;
-- struct phy_reg *phy_reg;
-- struct phy_data *phy_data_page;
-- u8 addr, data;
-- int i;
--
-- phy_cfg = rtk_phy->phy_cfg;
-- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-- phy_reg = &phy_parameter->phy_reg;
--
-- if (!phy_cfg->do_toggle)
-- goto out;
--
-- if (phy_cfg->is_double_sensitivity_mode)
-- goto do_toggle_driving;
--
-- /* Set page 0 */
-- rtk_phy_set_page(phy_reg, 0);
--
-- addr = PAGE0_0XE7;
-- data = rtk_phy_read(phy_reg, addr);
--
-- if (connect)
-- rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
-- else
-- rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
--
--do_toggle_driving:
--
-- if (!phy_cfg->do_toggle_driving)
-- goto do_toggle;
--
-- /* Page 0 addr 0xE4 driving capability */
--
-- /* Set page 0 */
-- phy_data_page = phy_cfg->page0;
-- rtk_phy_set_page(phy_reg, 0);
--
-- i = page_addr_to_array_index(PAGE0_0XE4);
-- addr = phy_data_page[i].addr;
-- data = phy_data_page[i].data;
--
-- if (connect) {
-- rtk_phy_write(phy_reg, addr, data);
-- } else {
-- u8 value;
-- s32 tmp;
-- s32 driving_updated =
-- phy_cfg->driving_updated_for_dev_dis;
-- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
--
-- tmp = (s32)(data & dc_driving_mask) + driving_updated;
--
-- if (tmp > dc_driving_mask)
-- tmp = dc_driving_mask;
-- else if (tmp < 0)
-- tmp = 0;
--
-- value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
--
-- rtk_phy_write(phy_reg, addr, value);
-- }
--
--do_toggle:
-- /* restore dc disconnect level before toggle */
-- update_dc_disconnect_level(rtk_phy, phy_parameter, false);
--
-- /* Set page 1 */
-- rtk_phy_set_page(phy_reg, 1);
--
-- addr = PAGE1_0XE0;
-- data = rtk_phy_read(phy_reg, addr);
--
-- rtk_phy_write(phy_reg, addr, data &
-- (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
-- mdelay(1);
-- rtk_phy_write(phy_reg, addr, data |
-- (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
--
-- /* update dc disconnect level after toggle */
-- update_dc_disconnect_level(rtk_phy, phy_parameter, true);
--
--out:
-- return;
--}
--
--static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
--{
-- struct phy_parameter *phy_parameter;
-- struct phy_cfg *phy_cfg;
-- struct phy_data *phy_data_page;
-- struct phy_reg *phy_reg;
-- int i;
--
-- phy_cfg = rtk_phy->phy_cfg;
-- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-- phy_reg = &phy_parameter->phy_reg;
--
-- if (phy_cfg->use_default_parameter) {
-- dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
-- __func__, index);
-- goto do_toggle;
-- }
--
-- /* Set page 0 */
-- phy_data_page = phy_cfg->page0;
-- rtk_phy_set_page(phy_reg, 0);
--
-- for (i = 0; i < phy_cfg->page0_size; i++) {
-- struct phy_data *phy_data = phy_data_page + i;
-- u8 addr = phy_data->addr;
-- u8 data = phy_data->data;
--
-- if (!addr)
-- continue;
--
-- if (rtk_phy_write(phy_reg, addr, data)) {
-- dev_err(rtk_phy->dev,
-- "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
-- __func__, addr, data);
-- return -EINVAL;
-- }
-- }
--
-- /* Set page 1 */
-- phy_data_page = phy_cfg->page1;
-- rtk_phy_set_page(phy_reg, 1);
--
-- for (i = 0; i < phy_cfg->page1_size; i++) {
-- struct phy_data *phy_data = phy_data_page + i;
-- u8 addr = phy_data->addr;
-- u8 data = phy_data->data;
--
-- if (!addr)
-- continue;
--
-- if (rtk_phy_write(phy_reg, addr, data)) {
-- dev_err(rtk_phy->dev,
-- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
-- __func__, addr, data);
-- return -EINVAL;
-- }
-- }
--
-- if (phy_cfg->page2_size == 0)
-- goto do_toggle;
--
-- /* Set page 2 */
-- phy_data_page = phy_cfg->page2;
-- rtk_phy_set_page(phy_reg, 2);
--
-- for (i = 0; i < phy_cfg->page2_size; i++) {
-- struct phy_data *phy_data = phy_data_page + i;
-- u8 addr = phy_data->addr;
-- u8 data = phy_data->data;
--
-- if (!addr)
-- continue;
--
-- if (rtk_phy_write(phy_reg, addr, data)) {
-- dev_err(rtk_phy->dev,
-- "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
-- __func__, addr, data);
-- return -EINVAL;
-- }
-- }
--
--do_toggle:
-- do_rtk_phy_toggle(rtk_phy, index, false);
--
-- return 0;
--}
--
--static int rtk_phy_init(struct phy *phy)
--{
-- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
-- unsigned long phy_init_time = jiffies;
-- int i, ret = 0;
--
-- if (!rtk_phy)
-- return -EINVAL;
--
-- for (i = 0; i < rtk_phy->num_phy; i++)
-- ret = do_rtk_phy_init(rtk_phy, i);
--
-- dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
-- jiffies_to_msecs(jiffies - phy_init_time));
-- return ret;
--}
--
--static int rtk_phy_exit(struct phy *phy)
--{
-- return 0;
--}
--
--static const struct phy_ops ops = {
-- .init = rtk_phy_init,
-- .exit = rtk_phy_exit,
-- .owner = THIS_MODULE,
--};
--
--static void rtk_phy_toggle(struct usb_phy *usb2_phy, bool connect, int port)
--{
-- int index = port;
-- struct rtk_phy *rtk_phy = NULL;
--
-- rtk_phy = dev_get_drvdata(usb2_phy->dev);
--
-- if (index > rtk_phy->num_phy) {
-- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
-- __func__, index, rtk_phy->num_phy);
-- return;
-- }
--
-- do_rtk_phy_toggle(rtk_phy, index, connect);
--}
--
--static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
-- u16 portstatus, u16 portchange)
--{
-- bool connect = false;
--
-- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
-- __func__, port, (int)portstatus, (int)portchange);
-- if (portstatus & USB_PORT_STAT_CONNECTION)
-- connect = true;
--
-- if (portchange & USB_PORT_STAT_C_CONNECTION)
-- rtk_phy_toggle(x, connect, port);
--
-- return 0;
--}
--
--#ifdef CONFIG_DEBUG_FS
--static struct dentry *create_phy_debug_root(void)
--{
-- struct dentry *phy_debug_root;
--
-- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
-- if (!phy_debug_root)
-- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
--
-- return phy_debug_root;
--}
--
--static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
--{
-- struct rtk_phy *rtk_phy = s->private;
-- struct phy_cfg *phy_cfg;
-- int i, index;
--
-- phy_cfg = rtk_phy->phy_cfg;
--
-- seq_puts(s, "Property:\n");
-- seq_printf(s, " check_efuse: %s\n",
-- phy_cfg->check_efuse ? "Enable" : "Disable");
-- seq_printf(s, " check_efuse_version: %d\n",
-- phy_cfg->check_efuse_version);
-- seq_printf(s, " efuse_dc_driving_rate: %d\n",
-- phy_cfg->efuse_dc_driving_rate);
-- seq_printf(s, " dc_driving_mask: 0x%x\n",
-- phy_cfg->dc_driving_mask);
-- seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
-- phy_cfg->efuse_dc_disconnect_rate);
-- seq_printf(s, " dc_disconnect_mask: 0x%x\n",
-- phy_cfg->dc_disconnect_mask);
-- seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
-- phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
-- seq_printf(s, " do_toggle: %s\n",
-- phy_cfg->do_toggle ? "Enable" : "Disable");
-- seq_printf(s, " do_toggle_driving: %s\n",
-- phy_cfg->do_toggle_driving ? "Enable" : "Disable");
-- seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
-- phy_cfg->driving_updated_for_dev_dis);
-- seq_printf(s, " use_default_parameter: %s\n",
-- phy_cfg->use_default_parameter ? "Enable" : "Disable");
-- seq_printf(s, " is_double_sensitivity_mode: %s\n",
-- phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
--
-- for (index = 0; index < rtk_phy->num_phy; index++) {
-- struct phy_parameter *phy_parameter;
-- struct phy_reg *phy_reg;
-- struct phy_data *phy_data_page;
--
-- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-- phy_reg = &phy_parameter->phy_reg;
--
-- seq_printf(s, "PHY %d:\n", index);
--
-- seq_puts(s, "Page 0:\n");
-- /* Set page 0 */
-- phy_data_page = phy_cfg->page0;
-- rtk_phy_set_page(phy_reg, 0);
--
-- for (i = 0; i < phy_cfg->page0_size; i++) {
-- struct phy_data *phy_data = phy_data_page + i;
-- u8 addr = array_index_to_page_addr(i);
-- u8 data = phy_data->data;
-- u8 value = rtk_phy_read(phy_reg, addr);
--
-- if (phy_data->addr)
-- seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
-- addr, data, value);
-- else
-- seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
-- addr, value);
-- }
--
-- seq_puts(s, "Page 1:\n");
-- /* Set page 1 */
-- phy_data_page = phy_cfg->page1;
-- rtk_phy_set_page(phy_reg, 1);
--
-- for (i = 0; i < phy_cfg->page1_size; i++) {
-- struct phy_data *phy_data = phy_data_page + i;
-- u8 addr = array_index_to_page_addr(i);
-- u8 data = phy_data->data;
-- u8 value = rtk_phy_read(phy_reg, addr);
--
-- if (phy_data->addr)
-- seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
-- addr, data, value);
-- else
-- seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
-- addr, value);
-- }
--
-- if (phy_cfg->page2_size == 0)
-- goto out;
--
-- seq_puts(s, "Page 2:\n");
-- /* Set page 2 */
-- phy_data_page = phy_cfg->page2;
-- rtk_phy_set_page(phy_reg, 2);
--
-- for (i = 0; i < phy_cfg->page2_size; i++) {
-- struct phy_data *phy_data = phy_data_page + i;
-- u8 addr = array_index_to_page_addr(i);
-- u8 data = phy_data->data;
-- u8 value = rtk_phy_read(phy_reg, addr);
--
-- if (phy_data->addr)
-- seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
-- addr, data, value);
-- else
-- seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
-- addr, value);
-- }
--
--out:
-- seq_puts(s, "PHY Property:\n");
-- seq_printf(s, " efuse_usb_dc_cal: %d\n",
-- (int)phy_parameter->efuse_usb_dc_cal);
-- seq_printf(s, " efuse_usb_dc_dis: %d\n",
-- (int)phy_parameter->efuse_usb_dc_dis);
-- seq_printf(s, " inverse_hstx_sync_clock: %s\n",
-- phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
-- seq_printf(s, " driving_level: %d\n",
-- phy_parameter->driving_level);
-- seq_printf(s, " driving_level_compensate: %d\n",
-- phy_parameter->driving_level_compensate);
-- seq_printf(s, " disconnection_compensate: %d\n",
-- phy_parameter->disconnection_compensate);
-- }
--
-- return 0;
--}
--DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
--
--static inline void create_debug_files(struct rtk_phy *rtk_phy)
--{
-- struct dentry *phy_debug_root = NULL;
--
-- phy_debug_root = create_phy_debug_root();
-- if (!phy_debug_root)
-- return;
--
-- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
-- phy_debug_root);
--
-- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
-- &rtk_usb2_parameter_fops);
--
-- return;
--}
--
--static inline void remove_debug_files(struct rtk_phy *rtk_phy)
--{
-- debugfs_remove_recursive(rtk_phy->debug_dir);
--}
--#else
--static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
--static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
--#endif /* CONFIG_DEBUG_FS */
--
--static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
-- struct phy_parameter *phy_parameter, int index)
--{
-- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
-- u8 value = 0;
-- struct nvmem_cell *cell;
-- struct soc_device_attribute rtk_soc_groot[] = {
-- { .family = "Realtek Groot",},
-- { /* empty */ } };
--
-- if (!phy_cfg->check_efuse)
-- goto out;
--
-- /* Read efuse for usb dc cal */
-- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
-- if (IS_ERR(cell)) {
-- dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
-- __func__, PTR_ERR(cell));
-- } else {
-- unsigned char *buf;
-- size_t buf_size;
--
-- buf = nvmem_cell_read(cell, &buf_size);
-- if (!IS_ERR(buf)) {
-- value = buf[0] & phy_cfg->dc_driving_mask;
-- kfree(buf);
-- }
-- nvmem_cell_put(cell);
-- }
--
-- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
-- int rate = phy_cfg->efuse_dc_driving_rate;
--
-- if (value <= EFUS_USB_DC_CAL_MAX)
-- phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
-- else
-- phy_parameter->efuse_usb_dc_cal = -(int8_t)
-- ((EFUS_USB_DC_CAL_MAX & value) * rate);
--
-- if (soc_device_match(rtk_soc_groot)) {
-- dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
--
-- /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
-- if (value <= EFUS_USB_DC_CAL_MAX)
-- phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
--
-- /* We set max dc cal compensate is 0x8 if otp is 0x7 */
-- if (value == 0x7)
-- phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
-- }
-- } else { /* for CHECK_EFUSE_V2 */
-- phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
-- }
--
-- /* Read efuse for usb dc disconnect level */
-- value = 0;
-- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
-- if (IS_ERR(cell)) {
-- dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
-- __func__, PTR_ERR(cell));
-- } else {
-- unsigned char *buf;
-- size_t buf_size;
--
-- buf = nvmem_cell_read(cell, &buf_size);
-- if (!IS_ERR(buf)) {
-- value = buf[0] & phy_cfg->dc_disconnect_mask;
-- kfree(buf);
-- }
-- nvmem_cell_put(cell);
-- }
--
-- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
-- int rate = phy_cfg->efuse_dc_disconnect_rate;
--
-- if (value <= EFUS_USB_DC_DIS_MAX)
-- phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
-- else
-- phy_parameter->efuse_usb_dc_dis = -(int8_t)
-- ((EFUS_USB_DC_DIS_MAX & value) * rate);
-- } else { /* for CHECK_EFUSE_V2 */
-- phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
-- }
--
--out:
-- return 0;
--}
--
--static int parse_phy_data(struct rtk_phy *rtk_phy)
--{
-- struct device *dev = rtk_phy->dev;
-- struct device_node *np = dev->of_node;
-- struct phy_parameter *phy_parameter;
-- int ret = 0;
-- int index;
--
-- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
-- rtk_phy->num_phy, GFP_KERNEL);
-- if (!rtk_phy->phy_parameter)
-- return -ENOMEM;
--
-- for (index = 0; index < rtk_phy->num_phy; index++) {
-- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
--
-- phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
-- phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
-- phy_parameter->phy_reg.vstatus_index = index;
--
-- if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
-- phy_parameter->inverse_hstx_sync_clock = true;
-- else
-- phy_parameter->inverse_hstx_sync_clock = false;
--
-- if (of_property_read_u32_index(np, "realtek,driving-level",
-- index, &phy_parameter->driving_level))
-- phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
--
-- if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
-- index, &phy_parameter->driving_level_compensate))
-- phy_parameter->driving_level_compensate = 0;
--
-- if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
-- index, &phy_parameter->disconnection_compensate))
-- phy_parameter->disconnection_compensate = 0;
--
-- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
--
-- update_dc_driving_level(rtk_phy, phy_parameter);
--
-- update_hs_clk_select(rtk_phy, phy_parameter);
-- }
--
-- return ret;
--}
--
--static int rtk_usb2phy_probe(struct platform_device *pdev)
--{
-- struct rtk_phy *rtk_phy;
-- struct device *dev = &pdev->dev;
-- struct phy *generic_phy;
-- struct phy_provider *phy_provider;
-- const struct phy_cfg *phy_cfg;
-- int ret = 0;
--
-- phy_cfg = of_device_get_match_data(dev);
-- if (!phy_cfg) {
-- dev_err(dev, "phy config are not assigned!\n");
-- return -EINVAL;
-- }
--
-- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
-- if (!rtk_phy)
-- return -ENOMEM;
--
-- rtk_phy->dev = &pdev->dev;
-- rtk_phy->phy.dev = rtk_phy->dev;
-- rtk_phy->phy.label = "rtk-usb2phy";
-- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
--
-- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
--
-- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
--
-- rtk_phy->num_phy = phy_cfg->num_phy;
--
-- ret = parse_phy_data(rtk_phy);
-- if (ret)
-- goto err;
--
-- platform_set_drvdata(pdev, rtk_phy);
--
-- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
-- if (IS_ERR(generic_phy))
-- return PTR_ERR(generic_phy);
--
-- phy_set_drvdata(generic_phy, rtk_phy);
--
-- phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
-- of_phy_simple_xlate);
-- if (IS_ERR(phy_provider))
-- return PTR_ERR(phy_provider);
--
-- ret = usb_add_phy_dev(&rtk_phy->phy);
-- if (ret)
-- goto err;
--
-- create_debug_files(rtk_phy);
--
--err:
-- return ret;
--}
--
--static void rtk_usb2phy_remove(struct platform_device *pdev)
--{
-- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
--
-- remove_debug_files(rtk_phy);
--
-- usb_remove_phy(&rtk_phy->phy);
--}
--
--static const struct phy_cfg rtd1295_phy_cfg = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [0] = {0xe0, 0x90},
-- [3] = {0xe3, 0x3a},
-- [4] = {0xe4, 0x68},
-- [6] = {0xe6, 0x91},
-- [13] = {0xf5, 0x81},
-- [15] = {0xf7, 0x02}, },
-- .page1_size = 8,
-- .page1 = { /* default parameter */ },
-- .page2_size = 0,
-- .page2 = { /* no parameter */ },
-- .num_phy = 1,
-- .check_efuse = false,
-- .check_efuse_version = CHECK_EFUSE_V1,
-- .efuse_dc_driving_rate = 1,
-- .dc_driving_mask = 0xf,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = true,
-- .do_toggle = true,
-- .do_toggle_driving = false,
-- .driving_updated_for_dev_dis = 0xf,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = false,
--};
--
--static const struct phy_cfg rtd1395_phy_cfg = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [4] = {0xe4, 0xac},
-- [13] = {0xf5, 0x00},
-- [15] = {0xf7, 0x02}, },
-- .page1_size = 8,
-- .page1 = { /* default parameter */ },
-- .page2_size = 0,
-- .page2 = { /* no parameter */ },
-- .num_phy = 1,
-- .check_efuse = false,
-- .check_efuse_version = CHECK_EFUSE_V1,
-- .efuse_dc_driving_rate = 1,
-- .dc_driving_mask = 0xf,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = true,
-- .do_toggle = true,
-- .do_toggle_driving = false,
-- .driving_updated_for_dev_dis = 0xf,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = false,
--};
--
--static const struct phy_cfg rtd1395_phy_cfg_2port = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [4] = {0xe4, 0xac},
-- [13] = {0xf5, 0x00},
-- [15] = {0xf7, 0x02}, },
-- .page1_size = 8,
-- .page1 = { /* default parameter */ },
-- .page2_size = 0,
-- .page2 = { /* no parameter */ },
-- .num_phy = 2,
-- .check_efuse = false,
-- .check_efuse_version = CHECK_EFUSE_V1,
-- .efuse_dc_driving_rate = 1,
-- .dc_driving_mask = 0xf,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = true,
-- .do_toggle = true,
-- .do_toggle_driving = false,
-- .driving_updated_for_dev_dis = 0xf,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = false,
--};
--
--static const struct phy_cfg rtd1619_phy_cfg = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [4] = {0xe4, 0x68}, },
-- .page1_size = 8,
-- .page1 = { /* default parameter */ },
-- .page2_size = 0,
-- .page2 = { /* no parameter */ },
-- .num_phy = 1,
-- .check_efuse = true,
-- .check_efuse_version = CHECK_EFUSE_V1,
-- .efuse_dc_driving_rate = 1,
-- .dc_driving_mask = 0xf,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = true,
-- .do_toggle = true,
-- .do_toggle_driving = false,
-- .driving_updated_for_dev_dis = 0xf,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = false,
--};
--
--static const struct phy_cfg rtd1319_phy_cfg = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [0] = {0xe0, 0x18},
-- [4] = {0xe4, 0x6a},
-- [7] = {0xe7, 0x71},
-- [13] = {0xf5, 0x15},
-- [15] = {0xf7, 0x32}, },
-- .page1_size = 8,
-- .page1 = { [3] = {0xe3, 0x44}, },
-- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
-- .page2 = { [0] = {0xe0, 0x01}, },
-- .num_phy = 1,
-- .check_efuse = true,
-- .check_efuse_version = CHECK_EFUSE_V1,
-- .efuse_dc_driving_rate = 1,
-- .dc_driving_mask = 0xf,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = true,
-- .do_toggle = true,
-- .do_toggle_driving = true,
-- .driving_updated_for_dev_dis = 0xf,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = true,
--};
--
--static const struct phy_cfg rtd1312c_phy_cfg = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [0] = {0xe0, 0x14},
-- [4] = {0xe4, 0x67},
-- [5] = {0xe5, 0x55}, },
-- .page1_size = 8,
-- .page1 = { [3] = {0xe3, 0x23},
-- [6] = {0xe6, 0x58}, },
-- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
-- .page2 = { /* default parameter */ },
-- .num_phy = 1,
-- .check_efuse = true,
-- .check_efuse_version = CHECK_EFUSE_V1,
-- .efuse_dc_driving_rate = 1,
-- .dc_driving_mask = 0xf,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = true,
-- .do_toggle = true,
-- .do_toggle_driving = true,
-- .driving_updated_for_dev_dis = 0xf,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = true,
--};
--
--static const struct phy_cfg rtd1619b_phy_cfg = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [0] = {0xe0, 0xa3},
-- [4] = {0xe4, 0x88},
-- [5] = {0xe5, 0x4f},
-- [6] = {0xe6, 0x02}, },
-- .page1_size = 8,
-- .page1 = { [3] = {0xe3, 0x64}, },
-- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
-- .page2 = { [7] = {0xe7, 0x45}, },
-- .num_phy = 1,
-- .check_efuse = true,
-- .check_efuse_version = CHECK_EFUSE_V1,
-- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
-- .dc_driving_mask = 0x1f,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = false,
-- .do_toggle = true,
-- .do_toggle_driving = true,
-- .driving_updated_for_dev_dis = 0x8,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = true,
--};
--
--static const struct phy_cfg rtd1319d_phy_cfg = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [0] = {0xe0, 0xa3},
-- [4] = {0xe4, 0x8e},
-- [5] = {0xe5, 0x4f},
-- [6] = {0xe6, 0x02}, },
-- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
-- .page1 = { [14] = {0xf5, 0x1}, },
-- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
-- .page2 = { [7] = {0xe7, 0x44}, },
-- .check_efuse = true,
-- .num_phy = 1,
-- .check_efuse_version = CHECK_EFUSE_V1,
-- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
-- .dc_driving_mask = 0x1f,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = false,
-- .do_toggle = true,
-- .do_toggle_driving = false,
-- .driving_updated_for_dev_dis = 0x8,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = true,
--};
--
--static const struct phy_cfg rtd1315e_phy_cfg = {
-- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
-- .page0 = { [0] = {0xe0, 0xa3},
-- [4] = {0xe4, 0x8c},
-- [5] = {0xe5, 0x4f},
-- [6] = {0xe6, 0x02}, },
-- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
-- .page1 = { [3] = {0xe3, 0x7f},
-- [14] = {0xf5, 0x01}, },
-- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
-- .page2 = { [7] = {0xe7, 0x44}, },
-- .num_phy = 1,
-- .check_efuse = true,
-- .check_efuse_version = CHECK_EFUSE_V2,
-- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
-- .dc_driving_mask = 0x1f,
-- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
-- .dc_disconnect_mask = 0xf,
-- .usb_dc_disconnect_at_page0 = false,
-- .do_toggle = true,
-- .do_toggle_driving = false,
-- .driving_updated_for_dev_dis = 0x8,
-- .use_default_parameter = false,
-- .is_double_sensitivity_mode = true,
--};
--
--static const struct of_device_id usbphy_rtk_dt_match[] = {
-- { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
-- { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
-- { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
-- { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
-- { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
-- { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
-- { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
-- { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
-- { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
-- {},
--};
--MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
--
--static struct platform_driver rtk_usb2phy_driver = {
-- .probe = rtk_usb2phy_probe,
-- .remove_new = rtk_usb2phy_remove,
-- .driver = {
-- .name = "rtk-usb2phy",
-- .of_match_table = usbphy_rtk_dt_match,
-- },
--};
--
--module_platform_driver(rtk_usb2phy_driver);
--
--MODULE_LICENSE("GPL");
--MODULE_ALIAS("platform: rtk-usb2phy");
--MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
--MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
-diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
-deleted file mode 100644
-index dfb3122f3f114..0000000000000
---- a/drivers/phy/realtek/phy-rtk-usb3.c
-+++ /dev/null
-@@ -1,761 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--/*
-- * phy-rtk-usb3.c RTK usb3.0 phy driver
-- *
-- * copyright (c) 2023 realtek semiconductor corporation
-- *
-- */
--
--#include <linux/module.h>
--#include <linux/of.h>
--#include <linux/of_device.h>
--#include <linux/of_address.h>
--#include <linux/uaccess.h>
--#include <linux/debugfs.h>
--#include <linux/nvmem-consumer.h>
--#include <linux/regmap.h>
--#include <linux/sys_soc.h>
--#include <linux/mfd/syscon.h>
--#include <linux/phy/phy.h>
--#include <linux/usb.h>
--#include <linux/usb/hcd.h>
--#include <linux/usb/phy.h>
--
--#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
--#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
--#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
--#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
--
--#define MAX_USB_PHY_DATA_SIZE 0x30
--#define PHY_ADDR_0X09 0x09
--#define PHY_ADDR_0X0B 0x0b
--#define PHY_ADDR_0X0D 0x0d
--#define PHY_ADDR_0X10 0x10
--#define PHY_ADDR_0X1F 0x1f
--#define PHY_ADDR_0X20 0x20
--#define PHY_ADDR_0X21 0x21
--#define PHY_ADDR_0X30 0x30
--
--#define REG_0X09_FORCE_CALIBRATION BIT(9)
--#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
--#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
--#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
--#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
--#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
--
--#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
--#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
--#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
--#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
--#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
--#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
--
--#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
--#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
--
--struct phy_reg {
-- void __iomem *reg_mdio_ctl;
--};
--
--struct phy_data {
-- u8 addr;
-- u16 data;
--};
--
--struct phy_cfg {
-- int param_size;
-- struct phy_data param[MAX_USB_PHY_DATA_SIZE];
--
-- bool check_efuse;
-- bool do_toggle;
-- bool do_toggle_once;
-- bool use_default_parameter;
-- bool check_rx_front_end_offset;
--};
--
--struct phy_parameter {
-- struct phy_reg phy_reg;
--
-- /* Get from efuse */
-- u8 efuse_usb_u3_tx_lfps_swing_trim;
--
-- /* Get from dts */
-- u32 amplitude_control_coarse;
-- u32 amplitude_control_fine;
--};
--
--struct rtk_phy {
-- struct usb_phy phy;
-- struct device *dev;
--
-- struct phy_cfg *phy_cfg;
-- int num_phy;
-- struct phy_parameter *phy_parameter;
--
-- struct dentry *debug_dir;
--};
--
--#define PHY_IO_TIMEOUT_USEC (50000)
--#define PHY_IO_DELAY_US (100)
--
--static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
--{
-- int ret;
-- unsigned int val;
--
-- ret = read_poll_timeout(readl, val, ((val & mask) == result),
-- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
-- if (ret) {
-- pr_err("%s can't program USB phy\n", __func__);
-- return -ETIMEDOUT;
-- }
--
-- return 0;
--}
--
--static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
--{
-- return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
--}
--
--static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
--{
-- unsigned int tmp;
-- u32 value;
--
-- tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
--
-- writel(tmp, phy_reg->reg_mdio_ctl);
--
-- rtk_phy3_wait_vbusy(phy_reg);
--
-- value = readl(phy_reg->reg_mdio_ctl);
-- value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
--
-- return (u16)value;
--}
--
--static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
--{
-- unsigned int val;
--
-- val = USB_MDIO_CTRL_PHY_WRITE |
-- (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
-- (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
--
-- writel(val, phy_reg->reg_mdio_ctl);
--
-- rtk_phy3_wait_vbusy(phy_reg);
--
-- return 0;
--}
--
--static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
--{
-- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
-- struct phy_reg *phy_reg;
-- struct phy_parameter *phy_parameter;
-- struct phy_data *phy_data;
-- u8 addr;
-- u16 data;
-- int i;
--
-- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-- phy_reg = &phy_parameter->phy_reg;
--
-- if (!phy_cfg->do_toggle)
-- return;
--
-- i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
-- phy_data = phy_cfg->param + i;
-- addr = phy_data->addr;
-- data = phy_data->data;
--
-- if (!addr && !data) {
-- addr = PHY_ADDR_0X09;
-- data = rtk_phy_read(phy_reg, addr);
-- phy_data->addr = addr;
-- phy_data->data = data;
-- }
--
-- rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
-- mdelay(1);
-- rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
--}
--
--static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
--{
-- struct phy_cfg *phy_cfg;
-- struct phy_reg *phy_reg;
-- struct phy_parameter *phy_parameter;
-- int i = 0;
--
-- phy_cfg = rtk_phy->phy_cfg;
-- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-- phy_reg = &phy_parameter->phy_reg;
--
-- if (phy_cfg->use_default_parameter)
-- goto do_toggle;
--
-- for (i = 0; i < phy_cfg->param_size; i++) {
-- struct phy_data *phy_data = phy_cfg->param + i;
-- u8 addr = phy_data->addr;
-- u16 data = phy_data->data;
--
-- if (!addr && !data)
-- continue;
--
-- rtk_phy_write(phy_reg, addr, data);
-- }
--
--do_toggle:
-- if (phy_cfg->do_toggle_once)
-- phy_cfg->do_toggle = true;
--
-- do_rtk_usb3_phy_toggle(rtk_phy, index, false);
--
-- if (phy_cfg->do_toggle_once) {
-- u16 check_value = 0;
-- int count = 10;
-- u16 value_0x0d, value_0x10;
--
-- /* Enable Debug mode by set 0x0D and 0x10 */
-- value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
-- value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
--
-- rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
-- value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
-- rtk_phy_write(phy_reg, PHY_ADDR_0X10,
-- (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
-- REG_0X10_DEBUG_MODE_SETTING);
--
-- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
--
-- while (!(check_value & BIT(15))) {
-- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
-- mdelay(1);
-- if (count-- < 0)
-- break;
-- }
--
-- if (!(check_value & BIT(15)))
-- dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
-- PHY_ADDR_0X30, check_value);
--
-- /* Disable Debug mode by set 0x0D and 0x10 to default*/
-- rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
-- rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
--
-- phy_cfg->do_toggle = false;
-- }
--
-- if (phy_cfg->check_rx_front_end_offset) {
-- u16 rx_offset_code, rx_offset_range;
-- u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
-- u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
-- bool do_update = false;
--
-- rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
-- if (((rx_offset_code & code_mask) == 0x0) ||
-- ((rx_offset_code & code_mask) == code_mask))
-- do_update = true;
--
-- rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
-- if (((rx_offset_range & range_mask) == range_mask) && do_update) {
-- dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
-- rx_offset_code, rx_offset_range);
-- do_update = false;
-- }
--
-- if (do_update) {
-- u16 tmp1, tmp2;
--
-- tmp1 = rx_offset_range & (~range_mask);
-- tmp2 = rx_offset_range & range_mask;
-- tmp2 += (1 << 2);
-- rx_offset_range = tmp1 | (tmp2 & range_mask);
-- rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
-- goto do_toggle;
-- }
-- }
--
-- return 0;
--}
--
--static int rtk_phy_init(struct phy *phy)
--{
-- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
-- int ret = 0;
-- int i;
-- unsigned long phy_init_time = jiffies;
--
-- for (i = 0; i < rtk_phy->num_phy; i++)
-- ret = do_rtk_phy_init(rtk_phy, i);
--
-- dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
-- jiffies_to_msecs(jiffies - phy_init_time));
--
-- return ret;
--}
--
--static int rtk_phy_exit(struct phy *phy)
--{
-- return 0;
--}
--
--static const struct phy_ops ops = {
-- .init = rtk_phy_init,
-- .exit = rtk_phy_exit,
-- .owner = THIS_MODULE,
--};
--
--static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port)
--{
-- int index = port;
-- struct rtk_phy *rtk_phy = NULL;
--
-- rtk_phy = dev_get_drvdata(usb3_phy->dev);
--
-- if (index > rtk_phy->num_phy) {
-- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
-- __func__, index, rtk_phy->num_phy);
-- return;
-- }
--
-- do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
--}
--
--static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
-- u16 portstatus, u16 portchange)
--{
-- bool connect = false;
--
-- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
-- __func__, port, (int)portstatus, (int)portchange);
-- if (portstatus & USB_PORT_STAT_CONNECTION)
-- connect = true;
--
-- if (portchange & USB_PORT_STAT_C_CONNECTION)
-- rtk_phy_toggle(x, connect, port);
--
-- return 0;
--}
--
--#ifdef CONFIG_DEBUG_FS
--static struct dentry *create_phy_debug_root(void)
--{
-- struct dentry *phy_debug_root;
--
-- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
-- if (!phy_debug_root)
-- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
--
-- return phy_debug_root;
--}
--
--static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
--{
-- struct rtk_phy *rtk_phy = s->private;
-- struct phy_cfg *phy_cfg;
-- int i, index;
--
-- phy_cfg = rtk_phy->phy_cfg;
--
-- seq_puts(s, "Property:\n");
-- seq_printf(s, " check_efuse: %s\n",
-- phy_cfg->check_efuse ? "Enable" : "Disable");
-- seq_printf(s, " do_toggle: %s\n",
-- phy_cfg->do_toggle ? "Enable" : "Disable");
-- seq_printf(s, " do_toggle_once: %s\n",
-- phy_cfg->do_toggle_once ? "Enable" : "Disable");
-- seq_printf(s, " use_default_parameter: %s\n",
-- phy_cfg->use_default_parameter ? "Enable" : "Disable");
--
-- for (index = 0; index < rtk_phy->num_phy; index++) {
-- struct phy_reg *phy_reg;
-- struct phy_parameter *phy_parameter;
--
-- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-- phy_reg = &phy_parameter->phy_reg;
--
-- seq_printf(s, "PHY %d:\n", index);
--
-- for (i = 0; i < phy_cfg->param_size; i++) {
-- struct phy_data *phy_data = phy_cfg->param + i;
-- u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
-- u16 data = phy_data->data;
--
-- if (!phy_data->addr && !data)
-- seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
-- addr, rtk_phy_read(phy_reg, addr));
-- else
-- seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
-- addr, data, rtk_phy_read(phy_reg, addr));
-- }
--
-- seq_puts(s, "PHY Property:\n");
-- seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
-- (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
-- seq_printf(s, " amplitude_control_coarse: 0x%x\n",
-- (int)phy_parameter->amplitude_control_coarse);
-- seq_printf(s, " amplitude_control_fine: 0x%x\n",
-- (int)phy_parameter->amplitude_control_fine);
-- }
--
-- return 0;
--}
--DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
--
--static inline void create_debug_files(struct rtk_phy *rtk_phy)
--{
-- struct dentry *phy_debug_root = NULL;
--
-- phy_debug_root = create_phy_debug_root();
--
-- if (!phy_debug_root)
-- return;
--
-- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
--
-- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
-- &rtk_usb3_parameter_fops);
--
-- return;
--}
--
--static inline void remove_debug_files(struct rtk_phy *rtk_phy)
--{
-- debugfs_remove_recursive(rtk_phy->debug_dir);
--}
--#else
--static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
--static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
--#endif /* CONFIG_DEBUG_FS */
--
--static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
-- struct phy_parameter *phy_parameter, int index)
--{
-- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
-- u8 value = 0;
-- struct nvmem_cell *cell;
--
-- if (!phy_cfg->check_efuse)
-- goto out;
--
-- cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
-- if (IS_ERR(cell)) {
-- dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
-- __func__, PTR_ERR(cell));
-- } else {
-- unsigned char *buf;
-- size_t buf_size;
--
-- buf = nvmem_cell_read(cell, &buf_size);
-- if (!IS_ERR(buf)) {
-- value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
-- kfree(buf);
-- }
-- nvmem_cell_put(cell);
-- }
--
-- if (value > 0 && value < 0x8)
-- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
-- else
-- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
--
--out:
-- return 0;
--}
--
--static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
-- struct phy_parameter *phy_parameter)
--{
-- struct phy_cfg *phy_cfg;
-- struct phy_reg *phy_reg;
--
-- phy_reg = &phy_parameter->phy_reg;
-- phy_cfg = rtk_phy->phy_cfg;
--
-- if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
-- u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
-- u16 data;
--
-- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
-- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
-- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
-- } else {
-- data = phy_cfg->param[PHY_ADDR_0X20].data;
-- }
--
-- data &= (~val_mask);
-- data |= (phy_parameter->amplitude_control_coarse & val_mask);
--
-- phy_cfg->param[PHY_ADDR_0X20].data = data;
-- }
--
-- if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
-- u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
-- u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
-- int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
-- u16 data;
--
-- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
-- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
-- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
-- } else {
-- data = phy_cfg->param[PHY_ADDR_0X20].data;
-- }
--
-- data &= ~(val_mask << val_shift);
-- data |= ((efuse_val & val_mask) << val_shift);
--
-- phy_cfg->param[PHY_ADDR_0X20].data = data;
-- }
--
-- if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
-- u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
--
-- if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
-- phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
--
-- phy_cfg->param[PHY_ADDR_0X21].data =
-- phy_parameter->amplitude_control_fine & val_mask;
-- }
--}
--
--static int parse_phy_data(struct rtk_phy *rtk_phy)
--{
-- struct device *dev = rtk_phy->dev;
-- struct phy_parameter *phy_parameter;
-- int ret = 0;
-- int index;
--
-- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
-- rtk_phy->num_phy, GFP_KERNEL);
-- if (!rtk_phy->phy_parameter)
-- return -ENOMEM;
--
-- for (index = 0; index < rtk_phy->num_phy; index++) {
-- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
--
-- phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
--
-- /* Amplitude control address 0x20 bit 0 to bit 7 */
-- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
-- &phy_parameter->amplitude_control_coarse))
-- phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
--
-- /* Amplitude control address 0x21 bit 0 to bit 16 */
-- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
-- &phy_parameter->amplitude_control_fine))
-- phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
--
-- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
--
-- update_amplitude_control_value(rtk_phy, phy_parameter);
-- }
--
-- return ret;
--}
--
--static int rtk_usb3phy_probe(struct platform_device *pdev)
--{
-- struct rtk_phy *rtk_phy;
-- struct device *dev = &pdev->dev;
-- struct phy *generic_phy;
-- struct phy_provider *phy_provider;
-- const struct phy_cfg *phy_cfg;
-- int ret;
--
-- phy_cfg = of_device_get_match_data(dev);
-- if (!phy_cfg) {
-- dev_err(dev, "phy config are not assigned!\n");
-- return -EINVAL;
-- }
--
-- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
-- if (!rtk_phy)
-- return -ENOMEM;
--
-- rtk_phy->dev = &pdev->dev;
-- rtk_phy->phy.dev = rtk_phy->dev;
-- rtk_phy->phy.label = "rtk-usb3phy";
-- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
--
-- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
--
-- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
--
-- rtk_phy->num_phy = 1;
--
-- ret = parse_phy_data(rtk_phy);
-- if (ret)
-- goto err;
--
-- platform_set_drvdata(pdev, rtk_phy);
--
-- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
-- if (IS_ERR(generic_phy))
-- return PTR_ERR(generic_phy);
--
-- phy_set_drvdata(generic_phy, rtk_phy);
--
-- phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
-- if (IS_ERR(phy_provider))
-- return PTR_ERR(phy_provider);
--
-- ret = usb_add_phy_dev(&rtk_phy->phy);
-- if (ret)
-- goto err;
--
-- create_debug_files(rtk_phy);
--
--err:
-- return ret;
--}
--
--static void rtk_usb3phy_remove(struct platform_device *pdev)
--{
-- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
--
-- remove_debug_files(rtk_phy);
--
-- usb_remove_phy(&rtk_phy->phy);
--}
--
--static const struct phy_cfg rtd1295_phy_cfg = {
-- .param_size = MAX_USB_PHY_DATA_SIZE,
-- .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
-- [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
-- [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
-- [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
-- [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
-- [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
-- [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
-- [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
-- [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
-- [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
-- [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
-- [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
-- [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
-- [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
-- [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
-- [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
-- [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
-- [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
-- [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
-- [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
-- [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
-- [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
-- [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
-- [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
-- .check_efuse = false,
-- .do_toggle = true,
-- .do_toggle_once = false,
-- .use_default_parameter = false,
-- .check_rx_front_end_offset = false,
--};
--
--static const struct phy_cfg rtd1619_phy_cfg = {
-- .param_size = MAX_USB_PHY_DATA_SIZE,
-- .param = { [8] = {0x08, 0x3591},
-- [38] = {0x26, 0x840b},
-- [40] = {0x28, 0xf842}, },
-- .check_efuse = false,
-- .do_toggle = true,
-- .do_toggle_once = false,
-- .use_default_parameter = false,
-- .check_rx_front_end_offset = false,
--};
--
--static const struct phy_cfg rtd1319_phy_cfg = {
-- .param_size = MAX_USB_PHY_DATA_SIZE,
-- .param = { [1] = {0x01, 0xac86},
-- [6] = {0x06, 0x0003},
-- [9] = {0x09, 0x924c},
-- [10] = {0x0a, 0xa608},
-- [11] = {0x0b, 0xb905},
-- [14] = {0x0e, 0x2010},
-- [32] = {0x20, 0x705a},
-- [33] = {0x21, 0xf645},
-- [34] = {0x22, 0x0013},
-- [35] = {0x23, 0xcb66},
-- [41] = {0x29, 0xff00}, },
-- .check_efuse = true,
-- .do_toggle = true,
-- .do_toggle_once = false,
-- .use_default_parameter = false,
-- .check_rx_front_end_offset = false,
--};
--
--static const struct phy_cfg rtd1619b_phy_cfg = {
-- .param_size = MAX_USB_PHY_DATA_SIZE,
-- .param = { [1] = {0x01, 0xac8c},
-- [6] = {0x06, 0x0017},
-- [9] = {0x09, 0x724c},
-- [10] = {0x0a, 0xb610},
-- [11] = {0x0b, 0xb90d},
-- [13] = {0x0d, 0xef2a},
-- [15] = {0x0f, 0x9050},
-- [16] = {0x10, 0x000c},
-- [32] = {0x20, 0x70ff},
-- [34] = {0x22, 0x0013},
-- [35] = {0x23, 0xdb66},
-- [38] = {0x26, 0x8609},
-- [41] = {0x29, 0xff13},
-- [42] = {0x2a, 0x3070}, },
-- .check_efuse = true,
-- .do_toggle = false,
-- .do_toggle_once = true,
-- .use_default_parameter = false,
-- .check_rx_front_end_offset = false,
--};
--
--static const struct phy_cfg rtd1319d_phy_cfg = {
-- .param_size = MAX_USB_PHY_DATA_SIZE,
-- .param = { [1] = {0x01, 0xac89},
-- [4] = {0x04, 0xf2f5},
-- [6] = {0x06, 0x0017},
-- [9] = {0x09, 0x424c},
-- [10] = {0x0a, 0x9610},
-- [11] = {0x0b, 0x9901},
-- [12] = {0x0c, 0xf000},
-- [13] = {0x0d, 0xef2a},
-- [14] = {0x0e, 0x1000},
-- [15] = {0x0f, 0x9050},
-- [32] = {0x20, 0x7077},
-- [35] = {0x23, 0x0b62},
-- [37] = {0x25, 0x10ec},
-- [42] = {0x2a, 0x3070}, },
-- .check_efuse = true,
-- .do_toggle = false,
-- .do_toggle_once = true,
-- .use_default_parameter = false,
-- .check_rx_front_end_offset = true,
--};
--
--static const struct of_device_id usbphy_rtk_dt_match[] = {
-- { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
-- { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
-- { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
-- { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
-- { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
-- {},
--};
--MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
--
--static struct platform_driver rtk_usb3phy_driver = {
-- .probe = rtk_usb3phy_probe,
-- .remove_new = rtk_usb3phy_remove,
-- .driver = {
-- .name = "rtk-usb3phy",
-- .of_match_table = usbphy_rtk_dt_match,
-- },
--};
--
--module_platform_driver(rtk_usb3phy_driver);
--
--MODULE_LICENSE("GPL");
--MODULE_ALIAS("platform: rtk-usb3phy");
--MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
--MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
-diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
-index e9dc9638120a5..184ec92241ca8 100644
---- a/drivers/pinctrl/core.c
-+++ b/drivers/pinctrl/core.c
-@@ -1253,17 +1253,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
- static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
- {
- struct pinctrl_setting *setting, *setting2;
-- struct pinctrl_state *old_state = p->state;
-+ struct pinctrl_state *old_state = READ_ONCE(p->state);
- int ret;
-
-- if (p->state) {
-+ if (old_state) {
- /*
- * For each pinmux setting in the old state, forget SW's record
- * of mux owner for that pingroup. Any pingroups which are
- * still owned by the new state will be re-acquired by the call
- * to pinmux_enable_setting() in the loop below.
- */
-- list_for_each_entry(setting, &p->state->settings, node) {
-+ list_for_each_entry(setting, &old_state->settings, node) {
- if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
- continue;
- pinmux_disable_setting(setting);
-diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
-index faa8b7ff5bcf3..ec76e43527c5c 100644
---- a/drivers/pinctrl/intel/pinctrl-baytrail.c
-+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
-@@ -983,11 +983,18 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
-
- break;
- case PIN_CONFIG_INPUT_DEBOUNCE:
-- if (arg)
-+ if (arg) {
- conf |= BYT_DEBOUNCE_EN;
-- else
-+ } else {
- conf &= ~BYT_DEBOUNCE_EN;
-
-+ /*
-+ * No need to update the pulse value.
-+ * Debounce is going to be disabled.
-+ */
-+ break;
-+ }
-+
- switch (arg) {
- case 375:
- db_pulse = BYT_DEBOUNCE_PULSE_375US;
-diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
-index 37cdfe4b04f9a..2ea6ef99cc70b 100644
---- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
-+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
-@@ -1175,6 +1175,8 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
- u32 port;
- u8 bit;
-
-+ irq_chip_disable_parent(d);
-+
- port = RZG2L_PIN_ID_TO_PORT(hwirq);
- bit = RZG2L_PIN_ID_TO_PIN(hwirq);
-
-@@ -1189,7 +1191,6 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d)
- spin_unlock_irqrestore(&pctrl->lock, flags);
-
- gpiochip_disable_irq(gc, hwirq);
-- irq_chip_disable_parent(d);
- }
-
- static void rzg2l_gpio_irq_enable(struct irq_data *d)
-diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
-index a73385a431de9..346a31f31bba8 100644
---- a/drivers/pinctrl/stm32/pinctrl-stm32.c
-+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
-@@ -1283,9 +1283,11 @@ static struct stm32_desc_pin *stm32_pctrl_get_desc_pin_from_gpio(struct stm32_pi
- int i;
-
- /* With few exceptions (e.g. bank 'Z'), pin number matches with pin index in array */
-- pin_desc = pctl->pins + stm32_pin_nb;
-- if (pin_desc->pin.number == stm32_pin_nb)
-- return pin_desc;
-+ if (stm32_pin_nb < pctl->npins) {
-+ pin_desc = pctl->pins + stm32_pin_nb;
-+ if (pin_desc->pin.number == stm32_pin_nb)
-+ return pin_desc;
-+ }
-
- /* Otherwise, loop all array to find the pin with the right number */
- for (i = 0; i < pctl->npins; i++) {
-@@ -1378,6 +1380,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
- }
-
- names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
-+ if (!names) {
-+ err = -ENOMEM;
-+ goto err_clk;
-+ }
-+
- for (i = 0; i < npins; i++) {
- stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
- if (stm32_pin && stm32_pin->pin.name)
-diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
-index 5d36fbc75e1bb..badc68bbae8cc 100644
---- a/drivers/platform/chrome/cros_ec.c
-+++ b/drivers/platform/chrome/cros_ec.c
-@@ -321,17 +321,8 @@ void cros_ec_unregister(struct cros_ec_device *ec_dev)
- EXPORT_SYMBOL(cros_ec_unregister);
-
- #ifdef CONFIG_PM_SLEEP
--/**
-- * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
-- * @ec_dev: Device to suspend.
-- *
-- * This can be called by drivers to handle a suspend event.
-- *
-- * Return: 0 on success or negative error code.
-- */
--int cros_ec_suspend(struct cros_ec_device *ec_dev)
-+static void cros_ec_send_suspend_event(struct cros_ec_device *ec_dev)
- {
-- struct device *dev = ec_dev->dev;
- int ret;
- u8 sleep_event;
-
-@@ -343,7 +334,26 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
- if (ret < 0)
- dev_dbg(ec_dev->dev, "Error %d sending suspend event to ec\n",
- ret);
-+}
-
-+/**
-+ * cros_ec_suspend_prepare() - Handle a suspend prepare operation for the ChromeOS EC device.
-+ * @ec_dev: Device to suspend.
-+ *
-+ * This can be called by drivers to handle a suspend prepare stage of suspend.
-+ *
-+ * Return: 0 always.
-+ */
-+int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev)
-+{
-+ cros_ec_send_suspend_event(ec_dev);
-+ return 0;
-+}
-+EXPORT_SYMBOL(cros_ec_suspend_prepare);
-+
-+static void cros_ec_disable_irq(struct cros_ec_device *ec_dev)
-+{
-+ struct device *dev = ec_dev->dev;
- if (device_may_wakeup(dev))
- ec_dev->wake_enabled = !enable_irq_wake(ec_dev->irq);
- else
-@@ -351,7 +361,35 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
-
- disable_irq(ec_dev->irq);
- ec_dev->suspended = true;
-+}
-
-+/**
-+ * cros_ec_suspend_late() - Handle a suspend late operation for the ChromeOS EC device.
-+ * @ec_dev: Device to suspend.
-+ *
-+ * This can be called by drivers to handle a suspend late stage of suspend.
-+ *
-+ * Return: 0 always.
-+ */
-+int cros_ec_suspend_late(struct cros_ec_device *ec_dev)
-+{
-+ cros_ec_disable_irq(ec_dev);
-+ return 0;
-+}
-+EXPORT_SYMBOL(cros_ec_suspend_late);
-+
-+/**
-+ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
-+ * @ec_dev: Device to suspend.
-+ *
-+ * This can be called by drivers to handle a suspend event.
-+ *
-+ * Return: 0 always.
-+ */
-+int cros_ec_suspend(struct cros_ec_device *ec_dev)
-+{
-+ cros_ec_send_suspend_event(ec_dev);
-+ cros_ec_disable_irq(ec_dev);
- return 0;
- }
- EXPORT_SYMBOL(cros_ec_suspend);
-@@ -370,22 +408,11 @@ static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
- }
- }
-
--/**
-- * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
-- * @ec_dev: Device to resume.
-- *
-- * This can be called by drivers to handle a resume event.
-- *
-- * Return: 0 on success or negative error code.
-- */
--int cros_ec_resume(struct cros_ec_device *ec_dev)
-+static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev)
- {
- int ret;
- u8 sleep_event;
-
-- ec_dev->suspended = false;
-- enable_irq(ec_dev->irq);
--
- sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ?
- HOST_SLEEP_EVENT_S3_RESUME :
- HOST_SLEEP_EVENT_S0IX_RESUME;
-@@ -394,6 +421,24 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
- if (ret < 0)
- dev_dbg(ec_dev->dev, "Error %d sending resume event to ec\n",
- ret);
-+}
-+
-+/**
-+ * cros_ec_resume_complete() - Handle a resume complete operation for the ChromeOS EC device.
-+ * @ec_dev: Device to resume.
-+ *
-+ * This can be called by drivers to handle a resume complete stage of resume.
-+ */
-+void cros_ec_resume_complete(struct cros_ec_device *ec_dev)
-+{
-+ cros_ec_send_resume_event(ec_dev);
-+}
-+EXPORT_SYMBOL(cros_ec_resume_complete);
-+
-+static void cros_ec_enable_irq(struct cros_ec_device *ec_dev)
-+{
-+ ec_dev->suspended = false;
-+ enable_irq(ec_dev->irq);
-
- if (ec_dev->wake_enabled)
- disable_irq_wake(ec_dev->irq);
-@@ -403,8 +448,35 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
- * suspend. This way the clients know what to do with them.
- */
- cros_ec_report_events_during_suspend(ec_dev);
-+}
-
-+/**
-+ * cros_ec_resume_early() - Handle a resume early operation for the ChromeOS EC device.
-+ * @ec_dev: Device to resume.
-+ *
-+ * This can be called by drivers to handle a resume early stage of resume.
-+ *
-+ * Return: 0 always.
-+ */
-+int cros_ec_resume_early(struct cros_ec_device *ec_dev)
-+{
-+ cros_ec_enable_irq(ec_dev);
-+ return 0;
-+}
-+EXPORT_SYMBOL(cros_ec_resume_early);
-
-+/**
-+ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
-+ * @ec_dev: Device to resume.
-+ *
-+ * This can be called by drivers to handle a resume event.
-+ *
-+ * Return: 0 always.
-+ */
-+int cros_ec_resume(struct cros_ec_device *ec_dev)
-+{
-+ cros_ec_enable_irq(ec_dev);
-+ cros_ec_send_resume_event(ec_dev);
- return 0;
- }
- EXPORT_SYMBOL(cros_ec_resume);
-diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
-index bbca0096868ac..566332f487892 100644
---- a/drivers/platform/chrome/cros_ec.h
-+++ b/drivers/platform/chrome/cros_ec.h
-@@ -14,7 +14,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev);
- void cros_ec_unregister(struct cros_ec_device *ec_dev);
-
- int cros_ec_suspend(struct cros_ec_device *ec_dev);
-+int cros_ec_suspend_late(struct cros_ec_device *ec_dev);
-+int cros_ec_suspend_prepare(struct cros_ec_device *ec_dev);
- int cros_ec_resume(struct cros_ec_device *ec_dev);
-+int cros_ec_resume_early(struct cros_ec_device *ec_dev);
-+void cros_ec_resume_complete(struct cros_ec_device *ec_dev);
-
- irqreturn_t cros_ec_irq_thread(int irq, void *data);
-
-diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
-index 356572452898d..42e1770887fb0 100644
---- a/drivers/platform/chrome/cros_ec_lpc.c
-+++ b/drivers/platform/chrome/cros_ec_lpc.c
-@@ -549,22 +549,36 @@ MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
- static int cros_ec_lpc_prepare(struct device *dev)
- {
- struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
--
-- return cros_ec_suspend(ec_dev);
-+ return cros_ec_suspend_prepare(ec_dev);
- }
-
- static void cros_ec_lpc_complete(struct device *dev)
- {
- struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
-- cros_ec_resume(ec_dev);
-+ cros_ec_resume_complete(ec_dev);
-+}
-+
-+static int cros_ec_lpc_suspend_late(struct device *dev)
-+{
-+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
-+
-+ return cros_ec_suspend_late(ec_dev);
-+}
-+
-+static int cros_ec_lpc_resume_early(struct device *dev)
-+{
-+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
-+
-+ return cros_ec_resume_early(ec_dev);
- }
- #endif
-
- static const struct dev_pm_ops cros_ec_lpc_pm_ops = {
- #ifdef CONFIG_PM_SLEEP
- .prepare = cros_ec_lpc_prepare,
-- .complete = cros_ec_lpc_complete
-+ .complete = cros_ec_lpc_complete,
- #endif
-+ SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend_late, cros_ec_lpc_resume_early)
- };
-
- static struct platform_driver cros_ec_lpc_driver = {
-diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
-index 5b9748e0463bc..63e38671e95a6 100644
---- a/drivers/platform/chrome/cros_ec_proto_test.c
-+++ b/drivers/platform/chrome/cros_ec_proto_test.c
-@@ -2668,6 +2668,7 @@ static int cros_ec_proto_test_init(struct kunit *test)
- ec_dev->dev->release = cros_ec_proto_test_release;
- ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
- ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
-+ mutex_init(&ec_dev->lock);
-
- priv->msg = (struct cros_ec_command *)priv->_msg;
-
-diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
-index c1e788b67a748..212f164bc3dba 100644
---- a/drivers/platform/x86/amd/pmc/pmc.c
-+++ b/drivers/platform/x86/amd/pmc/pmc.c
-@@ -912,33 +912,6 @@ static const struct pci_device_id pmc_pci_ids[] = {
- { }
- };
-
--static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev)
--{
-- int ret;
--
-- switch (dev->cpu_id) {
-- case AMD_CPU_ID_YC:
-- if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) {
-- ret = -EINVAL;
-- goto err_dram_size;
-- }
-- break;
-- default:
-- ret = -EINVAL;
-- goto err_dram_size;
-- }
--
-- ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
-- if (ret || !dev->dram_size)
-- goto err_dram_size;
--
-- return 0;
--
--err_dram_size:
-- dev_err(dev->dev, "DRAM size command not supported for this platform\n");
-- return ret;
--}
--
- static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
- {
- u32 phys_addr_low, phys_addr_hi;
-@@ -957,8 +930,8 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
- return -EIO;
-
- /* Get DRAM size */
-- ret = amd_pmc_get_dram_size(dev);
-- if (ret)
-+ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
-+ if (ret || !dev->dram_size)
- dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
-
- /* Get STB DRAM address */
-diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
-index 5798b49ddaba9..6ddca857cc4d1 100644
---- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
-+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
-@@ -592,13 +592,11 @@ static int hp_add_other_attributes(int attr_type)
- int ret;
- char *attr_name;
-
-- mutex_lock(&bioscfg_drv.mutex);
--
- attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
-- if (!attr_name_kobj) {
-- ret = -ENOMEM;
-- goto err_other_attr_init;
-- }
-+ if (!attr_name_kobj)
-+ return -ENOMEM;
-+
-+ mutex_lock(&bioscfg_drv.mutex);
-
- /* Check if attribute type is supported */
- switch (attr_type) {
-@@ -615,14 +613,14 @@ static int hp_add_other_attributes(int attr_type)
- default:
- pr_err("Error: Unknown attr_type: %d\n", attr_type);
- ret = -EINVAL;
-- goto err_other_attr_init;
-+ kfree(attr_name_kobj);
-+ goto unlock_drv_mutex;
- }
-
- ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
- NULL, "%s", attr_name);
- if (ret) {
- pr_err("Error encountered [%d]\n", ret);
-- kobject_put(attr_name_kobj);
- goto err_other_attr_init;
- }
-
-@@ -630,25 +628,25 @@ static int hp_add_other_attributes(int attr_type)
- switch (attr_type) {
- case HPWMI_SECURE_PLATFORM_TYPE:
- ret = hp_populate_secure_platform_data(attr_name_kobj);
-- if (ret)
-- goto err_other_attr_init;
- break;
-
- case HPWMI_SURE_START_TYPE:
- ret = hp_populate_sure_start_data(attr_name_kobj);
-- if (ret)
-- goto err_other_attr_init;
- break;
-
- default:
- ret = -EINVAL;
-- goto err_other_attr_init;
- }
-
-+ if (ret)
-+ goto err_other_attr_init;
-+
- mutex_unlock(&bioscfg_drv.mutex);
- return 0;
-
- err_other_attr_init:
-+ kobject_put(attr_name_kobj);
-+unlock_drv_mutex:
- mutex_unlock(&bioscfg_drv.mutex);
- kfree(obj);
- return ret;
-diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
-index ac037540acfc6..88eefccb6ed27 100644
---- a/drivers/platform/x86/ideapad-laptop.c
-+++ b/drivers/platform/x86/ideapad-laptop.c
-@@ -1425,18 +1425,17 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
- if (WARN_ON(priv->kbd_bl.initialized))
- return -EEXIST;
-
-- brightness = ideapad_kbd_bl_brightness_get(priv);
-- if (brightness < 0)
-- return brightness;
--
-- priv->kbd_bl.last_brightness = brightness;
--
- if (ideapad_kbd_bl_check_tristate(priv->kbd_bl.type)) {
- priv->kbd_bl.led.max_brightness = 2;
- } else {
- priv->kbd_bl.led.max_brightness = 1;
- }
-
-+ brightness = ideapad_kbd_bl_brightness_get(priv);
-+ if (brightness < 0)
-+ return brightness;
-+
-+ priv->kbd_bl.last_brightness = brightness;
- priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
- priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
- priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
-diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
-index 41584427dc323..a46fc417cb200 100644
---- a/drivers/platform/x86/thinkpad_acpi.c
-+++ b/drivers/platform/x86/thinkpad_acpi.c
-@@ -9816,6 +9816,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
- * Individual addressing is broken on models that expose the
- * primary battery as BAT1.
- */
-+ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
- TPACPI_Q_LNV('J', '7', true), /* B5400 */
- TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
- TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
-diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
-index a78ddd83cda02..317c907304149 100644
---- a/drivers/platform/x86/wmi.c
-+++ b/drivers/platform/x86/wmi.c
-@@ -911,21 +911,13 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
- }
- static int wmi_char_open(struct inode *inode, struct file *filp)
- {
-- const char *driver_name = filp->f_path.dentry->d_iname;
-- struct wmi_block *wblock;
-- struct wmi_block *next;
--
-- list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
-- if (!wblock->dev.dev.driver)
-- continue;
-- if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
-- filp->private_data = wblock;
-- break;
-- }
-- }
-+ /*
-+ * The miscdevice already stores a pointer to itself
-+ * inside filp->private_data
-+ */
-+ struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
-
-- if (!filp->private_data)
-- return -ENODEV;
-+ filp->private_data = wblock;
-
- return nonseekable_open(inode, filp);
- }
-@@ -1270,8 +1262,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
- struct wmi_block *wblock, *next;
- union acpi_object *obj;
- acpi_status status;
-- int retval = 0;
- u32 i, total;
-+ int retval;
-
- status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
- if (ACPI_FAILURE(status))
-@@ -1282,8 +1274,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
- return -ENXIO;
-
- if (obj->type != ACPI_TYPE_BUFFER) {
-- retval = -ENXIO;
-- goto out_free_pointer;
-+ kfree(obj);
-+ return -ENXIO;
- }
-
- gblock = (const struct guid_block *)obj->buffer.pointer;
-@@ -1298,8 +1290,8 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
-
- wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
- if (!wblock) {
-- retval = -ENOMEM;
-- break;
-+ dev_err(wmi_bus_dev, "Failed to allocate %pUL\n", &gblock[i].guid);
-+ continue;
- }
-
- wblock->acpi_device = device;
-@@ -1338,9 +1330,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
- }
- }
-
--out_free_pointer:
-- kfree(out.pointer);
-- return retval;
-+ kfree(obj);
-+
-+ return 0;
- }
-
- /*
-diff --git a/drivers/pmdomain/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
-index cfb796d40d9d2..0dd71cd814c52 100644
---- a/drivers/pmdomain/amlogic/meson-ee-pwrc.c
-+++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
-@@ -228,7 +228,7 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
-
- static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
- { G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) },
-- { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(23, 0) },
-+ { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(31, 0) },
- };
-
- #define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \
-diff --git a/drivers/pmdomain/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
-index 1a179d4e011cf..d2f0233cb6206 100644
---- a/drivers/pmdomain/bcm/bcm2835-power.c
-+++ b/drivers/pmdomain/bcm/bcm2835-power.c
-@@ -175,7 +175,7 @@ static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable
- }
- writel(PM_PASSWORD | val, base + reg);
-
-- while (readl(base + reg) & ASB_ACK) {
-+ while (!!(readl(base + reg) & ASB_ACK) == enable) {
- cpu_relax();
- if (ktime_get_ns() - start >= 1000)
- return -ETIMEDOUT;
-diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
-index 90a8b2c0676ff..419ed15cc10c4 100644
---- a/drivers/pmdomain/imx/gpc.c
-+++ b/drivers/pmdomain/imx/gpc.c
-@@ -498,6 +498,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
-
- pd_pdev->dev.parent = &pdev->dev;
- pd_pdev->dev.of_node = np;
-+ pd_pdev->dev.fwnode = of_fwnode_handle(np);
-
- ret = platform_device_add(pd_pdev);
- if (ret) {
-diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
-index 0b69fb7bafd85..416409e2fd6da 100644
---- a/drivers/power/supply/power_supply_core.c
-+++ b/drivers/power/supply/power_supply_core.c
-@@ -29,7 +29,7 @@
- struct class *power_supply_class;
- EXPORT_SYMBOL_GPL(power_supply_class);
-
--ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
-+BLOCKING_NOTIFIER_HEAD(power_supply_notifier);
- EXPORT_SYMBOL_GPL(power_supply_notifier);
-
- static struct device_type power_supply_dev_type;
-@@ -97,7 +97,7 @@ static void power_supply_changed_work(struct work_struct *work)
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
- power_supply_update_leds(psy);
-- atomic_notifier_call_chain(&power_supply_notifier,
-+ blocking_notifier_call_chain(&power_supply_notifier,
- PSY_EVENT_PROP_CHANGED, psy);
- kobject_uevent(&psy->dev.kobj, KOBJ_CHANGE);
- spin_lock_irqsave(&psy->changed_lock, flags);
-@@ -1262,13 +1262,13 @@ static void power_supply_dev_release(struct device *dev)
-
- int power_supply_reg_notifier(struct notifier_block *nb)
- {
-- return atomic_notifier_chain_register(&power_supply_notifier, nb);
-+ return blocking_notifier_chain_register(&power_supply_notifier, nb);
- }
- EXPORT_SYMBOL_GPL(power_supply_reg_notifier);
-
- void power_supply_unreg_notifier(struct notifier_block *nb)
- {
-- atomic_notifier_chain_unregister(&power_supply_notifier, nb);
-+ blocking_notifier_chain_unregister(&power_supply_notifier, nb);
- }
- EXPORT_SYMBOL_GPL(power_supply_unreg_notifier);
-
-diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
-index 2ff7717530bf8..8a2f18fa3faf5 100644
---- a/drivers/powercap/dtpm_cpu.c
-+++ b/drivers/powercap/dtpm_cpu.c
-@@ -24,7 +24,6 @@
- #include <linux/of.h>
- #include <linux/pm_qos.h>
- #include <linux/slab.h>
--#include <linux/units.h>
-
- struct dtpm_cpu {
- struct dtpm dtpm;
-@@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
- if (pd->table[i].frequency < freq)
- continue;
-
-- return scale_pd_power_uw(pd_mask, pd->table[i].power *
-- MICROWATT_PER_MILLIWATT);
-+ return scale_pd_power_uw(pd_mask, pd->table[i].power);
- }
-
- return 0;
-@@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
- nr_cpus = cpumask_weight(&cpus);
-
- dtpm->power_min = em->table[0].power;
-- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
- dtpm->power_min *= nr_cpus;
-
- dtpm->power_max = em->table[em->nr_perf_states - 1].power;
-- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
- dtpm->power_max *= nr_cpus;
-
- return 0;
-diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
-index 91276761a31d9..612c3b59dd5be 100644
---- a/drivers/powercap/dtpm_devfreq.c
-+++ b/drivers/powercap/dtpm_devfreq.c
-@@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
- struct em_perf_domain *pd = em_pd_get(dev);
-
- dtpm->power_min = pd->table[0].power;
-- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
-
- dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
-- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
-
- return 0;
- }
-@@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
- struct device *dev = devfreq->dev.parent;
- struct em_perf_domain *pd = em_pd_get(dev);
- unsigned long freq;
-- u64 power;
- int i;
-
- for (i = 0; i < pd->nr_perf_states; i++) {
--
-- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
-- if (power > power_limit)
-+ if (pd->table[i].power > power_limit)
- break;
- }
-
-@@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
-
- dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
-
-- power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
-+ power_limit = pd->table[i - 1].power;
-
- return power_limit;
- }
-@@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
- if (pd->table[i].frequency < freq)
- continue;
-
-- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
-+ power = pd->table[i].power;
- power *= status.busy_time;
- power >>= 10;
-
-diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
-index 40a2cc649c79b..2feed036c1cd4 100644
---- a/drivers/powercap/intel_rapl_common.c
-+++ b/drivers/powercap/intel_rapl_common.c
-@@ -892,7 +892,7 @@ static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
- return -EINVAL;
-
- if (rd->rpl[pl].locked) {
-- pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
-+ pr_debug("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
- return -EACCES;
- }
-
-diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
-index 362bf756e6b78..5a3a4cc0bec82 100644
---- a/drivers/ptp/ptp_chardev.c
-+++ b/drivers/ptp/ptp_chardev.c
-@@ -490,7 +490,8 @@ ssize_t ptp_read(struct posix_clock *pc,
-
- for (i = 0; i < cnt; i++) {
- event[i] = queue->buf[queue->head];
-- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
-+ /* Paired with READ_ONCE() in queue_cnt() */
-+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
- }
-
- spin_unlock_irqrestore(&queue->lock, flags);
-diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
-index 80f74e38c2da4..9a50bfb56453c 100644
---- a/drivers/ptp/ptp_clock.c
-+++ b/drivers/ptp/ptp_clock.c
-@@ -56,10 +56,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
- dst->t.sec = seconds;
- dst->t.nsec = remainder;
-
-+ /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
- if (!queue_free(queue))
-- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
-+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
-
-- queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
-+ WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
-
- spin_unlock_irqrestore(&queue->lock, flags);
- }
-diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
-index 75f58fc468a71..b8d4f61f14be4 100644
---- a/drivers/ptp/ptp_private.h
-+++ b/drivers/ptp/ptp_private.h
-@@ -76,9 +76,13 @@ struct ptp_vclock {
- * that a writer might concurrently increment the tail does not
- * matter, since the queue remains nonempty nonetheless.
- */
--static inline int queue_cnt(struct timestamp_event_queue *q)
-+static inline int queue_cnt(const struct timestamp_event_queue *q)
- {
-- int cnt = q->tail - q->head;
-+ /*
-+ * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
-+ * ptp_read(), extts_fifo_show().
-+ */
-+ int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
- return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
- }
-
-diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
-index 6e4d5456a8851..34ea5c16123a1 100644
---- a/drivers/ptp/ptp_sysfs.c
-+++ b/drivers/ptp/ptp_sysfs.c
-@@ -90,7 +90,8 @@ static ssize_t extts_fifo_show(struct device *dev,
- qcnt = queue_cnt(queue);
- if (qcnt) {
- event = queue->buf[queue->head];
-- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
-+ /* Paired with READ_ONCE() in queue_cnt() */
-+ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
- }
- spin_unlock_irqrestore(&queue->lock, flags);
-
-diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
-index a3faa9a3de7cc..a7d529bf76adc 100644
---- a/drivers/pwm/pwm-brcmstb.c
-+++ b/drivers/pwm/pwm-brcmstb.c
-@@ -288,7 +288,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
- {
- struct brcmstb_pwm *p = dev_get_drvdata(dev);
-
-- clk_disable(p->clk);
-+ clk_disable_unprepare(p->clk);
-
- return 0;
- }
-@@ -297,7 +297,7 @@ static int brcmstb_pwm_resume(struct device *dev)
- {
- struct brcmstb_pwm *p = dev_get_drvdata(dev);
-
-- clk_enable(p->clk);
-+ clk_prepare_enable(p->clk);
-
- return 0;
- }
-diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
-index b1d1373648a38..c8800f84b917f 100644
---- a/drivers/pwm/pwm-sti.c
-+++ b/drivers/pwm/pwm-sti.c
-@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
- unsigned int cpt_num_devs;
- unsigned int max_pwm_cnt;
- unsigned int max_prescale;
-+ struct sti_cpt_ddata *ddata;
- };
-
- struct sti_pwm_chip {
-@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
- {
- struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
- struct sti_pwm_compat_data *cdata = pc->cdata;
-- struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
-+ struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
- struct device *dev = pc->dev;
- unsigned int effective_ticks;
- unsigned long long high, low;
-@@ -440,7 +441,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
- while (cpt_int_stat) {
- devicenum = ffs(cpt_int_stat) - 1;
-
-- ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
-+ ddata = &pc->cdata->ddata[devicenum];
-
- /*
- * Capture input:
-@@ -638,30 +639,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
- dev_err(dev, "failed to prepare clock\n");
- return ret;
- }
-+
-+ cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
-+ if (!cdata->ddata)
-+ return -ENOMEM;
- }
-
- pc->chip.dev = dev;
- pc->chip.ops = &sti_pwm_ops;
- pc->chip.npwm = pc->cdata->pwm_num_devs;
-
-- ret = pwmchip_add(&pc->chip);
-- if (ret < 0) {
-- clk_unprepare(pc->pwm_clk);
-- clk_unprepare(pc->cpt_clk);
-- return ret;
-- }
--
- for (i = 0; i < cdata->cpt_num_devs; i++) {
-- struct sti_cpt_ddata *ddata;
--
-- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
-- if (!ddata)
-- return -ENOMEM;
-+ struct sti_cpt_ddata *ddata = &cdata->ddata[i];
-
- init_waitqueue_head(&ddata->wait);
- mutex_init(&ddata->lock);
-+ }
-
-- pwm_set_chip_data(&pc->chip.pwms[i], ddata);
-+ ret = pwmchip_add(&pc->chip);
-+ if (ret < 0) {
-+ clk_unprepare(pc->pwm_clk);
-+ clk_unprepare(pc->cpt_clk);
-+ return ret;
- }
-
- platform_set_drvdata(pdev, pc);
-diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
-index 65fbd95f1dbb0..4ca8fbf4b3e2e 100644
---- a/drivers/regulator/mt6358-regulator.c
-+++ b/drivers/regulator/mt6358-regulator.c
-@@ -688,12 +688,18 @@ static int mt6358_regulator_probe(struct platform_device *pdev)
- const struct mt6358_regulator_info *mt6358_info;
- int i, max_regulator, ret;
-
-- if (mt6397->chip_id == MT6366_CHIP_ID) {
-- max_regulator = MT6366_MAX_REGULATOR;
-- mt6358_info = mt6366_regulators;
-- } else {
-+ switch (mt6397->chip_id) {
-+ case MT6358_CHIP_ID:
- max_regulator = MT6358_MAX_REGULATOR;
- mt6358_info = mt6358_regulators;
-+ break;
-+ case MT6366_CHIP_ID:
-+ max_regulator = MT6366_MAX_REGULATOR;
-+ mt6358_info = mt6366_regulators;
-+ break;
-+ default:
-+ dev_err(&pdev->dev, "unsupported chip ID: %d\n", mt6397->chip_id);
-+ return -EINVAL;
- }
-
- ret = mt6358_sync_vcn33_setting(&pdev->dev);
-diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
-index d990ba19c50eb..b2e359ac31693 100644
---- a/drivers/regulator/qcom-rpmh-regulator.c
-+++ b/drivers/regulator/qcom-rpmh-regulator.c
-@@ -1095,7 +1095,7 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
- RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
- RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
- RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
-- RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"),
-+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
- RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
- RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
- RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
-diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
-index 3cdc015692ca6..1a65a4e0dc003 100644
---- a/drivers/rtc/rtc-brcmstb-waketimer.c
-+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
-@@ -1,6 +1,6 @@
- // SPDX-License-Identifier: GPL-2.0-only
- /*
-- * Copyright © 2014-2017 Broadcom
-+ * Copyright © 2014-2023 Broadcom
- */
-
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-@@ -34,6 +34,7 @@ struct brcmstb_waketmr {
- u32 rate;
- unsigned long rtc_alarm;
- bool alarm_en;
-+ bool alarm_expired;
- };
-
- #define BRCMSTB_WKTMR_EVENT 0x00
-@@ -64,6 +65,11 @@ static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer)
- writel_relaxed(reg - 1, timer->base + BRCMSTB_WKTMR_ALARM);
- writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT);
- (void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
-+ if (timer->alarm_expired) {
-+ timer->alarm_expired = false;
-+ /* maintain call balance */
-+ enable_irq(timer->alarm_irq);
-+ }
- }
-
- static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer,
-@@ -105,10 +111,17 @@ static irqreturn_t brcmstb_alarm_irq(int irq, void *data)
- return IRQ_HANDLED;
-
- if (timer->alarm_en) {
-- if (!device_may_wakeup(timer->dev))
-+ if (device_may_wakeup(timer->dev)) {
-+ disable_irq_nosync(irq);
-+ timer->alarm_expired = true;
-+ } else {
- writel_relaxed(WKTMR_ALARM_EVENT,
- timer->base + BRCMSTB_WKTMR_EVENT);
-+ }
- rtc_update_irq(timer->rtc, 1, RTC_IRQF | RTC_AF);
-+ } else {
-+ writel_relaxed(WKTMR_ALARM_EVENT,
-+ timer->base + BRCMSTB_WKTMR_EVENT);
- }
-
- return IRQ_HANDLED;
-@@ -221,8 +234,14 @@ static int brcmstb_waketmr_alarm_enable(struct device *dev,
- !brcmstb_waketmr_is_pending(timer))
- return -EINVAL;
- timer->alarm_en = true;
-- if (timer->alarm_irq)
-+ if (timer->alarm_irq) {
-+ if (timer->alarm_expired) {
-+ timer->alarm_expired = false;
-+ /* maintain call balance */
-+ enable_irq(timer->alarm_irq);
-+ }
- enable_irq(timer->alarm_irq);
-+ }
- } else if (!enabled && timer->alarm_en) {
- if (timer->alarm_irq)
- disable_irq(timer->alarm_irq);
-@@ -352,6 +371,17 @@ static int brcmstb_waketmr_suspend(struct device *dev)
- return brcmstb_waketmr_prepare_suspend(timer);
- }
-
-+static int brcmstb_waketmr_suspend_noirq(struct device *dev)
-+{
-+ struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
-+
-+ /* Catch any alarms occurring prior to noirq */
-+ if (timer->alarm_expired && device_may_wakeup(dev))
-+ return -EBUSY;
-+
-+ return 0;
-+}
-+
- static int brcmstb_waketmr_resume(struct device *dev)
- {
- struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
-@@ -368,10 +398,17 @@ static int brcmstb_waketmr_resume(struct device *dev)
-
- return ret;
- }
-+#else
-+#define brcmstb_waketmr_suspend NULL
-+#define brcmstb_waketmr_suspend_noirq NULL
-+#define brcmstb_waketmr_resume NULL
- #endif /* CONFIG_PM_SLEEP */
-
--static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops,
-- brcmstb_waketmr_suspend, brcmstb_waketmr_resume);
-+static const struct dev_pm_ops brcmstb_waketmr_pm_ops = {
-+ .suspend = brcmstb_waketmr_suspend,
-+ .suspend_noirq = brcmstb_waketmr_suspend_noirq,
-+ .resume = brcmstb_waketmr_resume,
-+};
-
- static const __maybe_unused struct of_device_id brcmstb_waketmr_of_match[] = {
- { .compatible = "brcm,brcmstb-waketimer" },
-diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
-index 06194674d71c5..540042b9eec8f 100644
---- a/drivers/rtc/rtc-pcf85363.c
-+++ b/drivers/rtc/rtc-pcf85363.c
-@@ -438,7 +438,7 @@ static int pcf85363_probe(struct i2c_client *client)
- if (client->irq > 0 || wakeup_source) {
- regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
- regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
-- PIN_IO_INTA_OUT, PIN_IO_INTAPM);
-+ PIN_IO_INTAPM, PIN_IO_INTA_OUT);
- }
-
- if (client->irq > 0) {
-diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
-index 215597f73be4f..5b11ee9234573 100644
---- a/drivers/s390/block/dasd.c
-+++ b/drivers/s390/block/dasd.c
-@@ -674,18 +674,20 @@ static void dasd_profile_start(struct dasd_block *block,
- * we count each request only once.
- */
- device = cqr->startdev;
-- if (device->profile.data) {
-- counter = 1; /* request is not yet queued on the start device */
-- list_for_each(l, &device->ccw_queue)
-- if (++counter >= 31)
-- break;
-- }
-+ if (!device->profile.data)
-+ return;
-+
-+ spin_lock(get_ccwdev_lock(device->cdev));
-+ counter = 1; /* request is not yet queued on the start device */
-+ list_for_each(l, &device->ccw_queue)
-+ if (++counter >= 31)
-+ break;
-+ spin_unlock(get_ccwdev_lock(device->cdev));
-+
- spin_lock(&device->profile.lock);
-- if (device->profile.data) {
-- device->profile.data->dasd_io_nr_req[counter]++;
-- if (rq_data_dir(req) == READ)
-- device->profile.data->dasd_read_nr_req[counter]++;
-- }
-+ device->profile.data->dasd_io_nr_req[counter]++;
-+ if (rq_data_dir(req) == READ)
-+ device->profile.data->dasd_read_nr_req[counter]++;
- spin_unlock(&device->profile.lock);
- }
-
-diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
-index 339812efe8221..d6ad437883fad 100644
---- a/drivers/s390/crypto/ap_bus.c
-+++ b/drivers/s390/crypto/ap_bus.c
-@@ -1022,6 +1022,10 @@ EXPORT_SYMBOL(ap_driver_unregister);
-
- void ap_bus_force_rescan(void)
- {
-+ /* Only trigger AP bus scans after the initial scan is done */
-+ if (atomic64_read(&ap_scan_bus_count) <= 0)
-+ return;
-+
- /* processing a asynchronous bus rescan */
- del_timer(&ap_config_timer);
- queue_work(system_long_wq, &ap_scan_work);
-@@ -1865,15 +1869,18 @@ static inline void ap_scan_domains(struct ap_card *ac)
- }
- /* get it and thus adjust reference counter */
- get_device(dev);
-- if (decfg)
-+ if (decfg) {
- AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
- __func__, ac->id, dom);
-- else if (chkstop)
-+ } else if (chkstop) {
- AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
- __func__, ac->id, dom);
-- else
-+ } else {
-+ /* nudge the queue's state machine */
-+ ap_queue_init_state(aq);
- AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
- __func__, ac->id, dom);
-+ }
- goto put_dev_and_continue;
- }
- /* handle state changes on already existing queue device */
-@@ -1895,10 +1902,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
- } else if (!chkstop && aq->chkstop) {
- /* checkstop off */
- aq->chkstop = false;
-- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
-- aq->dev_state = AP_DEV_STATE_OPERATING;
-- aq->sm_state = AP_SM_STATE_RESET_START;
-- }
-+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
-+ _ap_queue_init_state(aq);
- spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
- __func__, ac->id, dom);
-@@ -1922,10 +1927,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
- } else if (!decfg && !aq->config) {
- /* config on this queue device */
- aq->config = true;
-- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
-- aq->dev_state = AP_DEV_STATE_OPERATING;
-- aq->sm_state = AP_SM_STATE_RESET_START;
-- }
-+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
-+ _ap_queue_init_state(aq);
- spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
- __func__, ac->id, dom);
-diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
-index be54b070c0316..3e34912a60506 100644
---- a/drivers/s390/crypto/ap_bus.h
-+++ b/drivers/s390/crypto/ap_bus.h
-@@ -287,6 +287,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
- void ap_queue_prepare_remove(struct ap_queue *aq);
- void ap_queue_remove(struct ap_queue *aq);
- void ap_queue_init_state(struct ap_queue *aq);
-+void _ap_queue_init_state(struct ap_queue *aq);
-
- struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
- int comp_type, unsigned int functions, int ml);
-diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
-index 1336e632adc4a..2943b2529d3a0 100644
---- a/drivers/s390/crypto/ap_queue.c
-+++ b/drivers/s390/crypto/ap_queue.c
-@@ -1160,14 +1160,19 @@ void ap_queue_remove(struct ap_queue *aq)
- spin_unlock_bh(&aq->lock);
- }
-
--void ap_queue_init_state(struct ap_queue *aq)
-+void _ap_queue_init_state(struct ap_queue *aq)
- {
-- spin_lock_bh(&aq->lock);
- aq->dev_state = AP_DEV_STATE_OPERATING;
- aq->sm_state = AP_SM_STATE_RESET_START;
- aq->last_err_rc = 0;
- aq->assoc_idx = ASSOC_IDX_INVALID;
- ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
-+}
-+
-+void ap_queue_init_state(struct ap_queue *aq)
-+{
-+ spin_lock_bh(&aq->lock);
-+ _ap_queue_init_state(aq);
- spin_unlock_bh(&aq->lock);
- }
- EXPORT_SYMBOL(ap_queue_init_state);
-diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
-index 4902d45e929ce..c61e6427384c3 100644
---- a/drivers/s390/net/Kconfig
-+++ b/drivers/s390/net/Kconfig
-@@ -103,10 +103,11 @@ config CCWGROUP
- config ISM
- tristate "Support for ISM vPCI Adapter"
- depends on PCI
-+ imply SMC
- default n
- help
- Select this option if you want to use the Internal Shared Memory
-- vPCI Adapter.
-+ vPCI Adapter. The adapter can be used with the SMC network protocol.
-
- To compile as a module choose M. The module name is ism.
- If unsure, choose N.
-diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
-index 6df7f377d2f90..81aabbfbbe2ca 100644
---- a/drivers/s390/net/ism_drv.c
-+++ b/drivers/s390/net/ism_drv.c
-@@ -30,7 +30,6 @@ static const struct pci_device_id ism_device_table[] = {
- MODULE_DEVICE_TABLE(pci, ism_device_table);
-
- static debug_info_t *ism_debug_info;
--static const struct smcd_ops ism_ops;
-
- #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
- static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
-@@ -289,22 +288,6 @@ out:
- return ret;
- }
-
--static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
-- u32 vid)
--{
-- union ism_query_rgid cmd;
--
-- memset(&cmd, 0, sizeof(cmd));
-- cmd.request.hdr.cmd = ISM_QUERY_RGID;
-- cmd.request.hdr.len = sizeof(cmd.request);
--
-- cmd.request.rgid = rgid;
-- cmd.request.vlan_valid = vid_valid;
-- cmd.request.vlan_id = vid;
--
-- return ism_cmd(ism, &cmd);
--}
--
- static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
- {
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
-@@ -429,23 +412,6 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
- return ism_cmd(ism, &cmd);
- }
-
--static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
-- u32 event_code, u64 info)
--{
-- union ism_sig_ieq cmd;
--
-- memset(&cmd, 0, sizeof(cmd));
-- cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
-- cmd.request.hdr.len = sizeof(cmd.request);
--
-- cmd.request.rgid = rgid;
-- cmd.request.trigger_irq = trigger_irq;
-- cmd.request.event_code = event_code;
-- cmd.request.info = info;
--
-- return ism_cmd(ism, &cmd);
--}
--
- static unsigned int max_bytes(unsigned int start, unsigned int len,
- unsigned int boundary)
- {
-@@ -503,14 +469,6 @@ u8 *ism_get_seid(void)
- }
- EXPORT_SYMBOL_GPL(ism_get_seid);
-
--static u16 ism_get_chid(struct ism_dev *ism)
--{
-- if (!ism || !ism->pdev)
-- return 0;
--
-- return to_zpci(ism->pdev)->pchid;
--}
--
- static void ism_handle_event(struct ism_dev *ism)
- {
- struct ism_event *entry;
-@@ -569,11 +527,6 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
- return IRQ_HANDLED;
- }
-
--static u64 ism_get_local_gid(struct ism_dev *ism)
--{
-- return ism->local_gid;
--}
--
- static int ism_dev_init(struct ism_dev *ism)
- {
- struct pci_dev *pdev = ism->pdev;
-@@ -774,6 +727,22 @@ module_exit(ism_exit);
- /*************************** SMC-D Implementation *****************************/
-
- #if IS_ENABLED(CONFIG_SMC)
-+static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
-+ u32 vid)
-+{
-+ union ism_query_rgid cmd;
-+
-+ memset(&cmd, 0, sizeof(cmd));
-+ cmd.request.hdr.cmd = ISM_QUERY_RGID;
-+ cmd.request.hdr.len = sizeof(cmd.request);
-+
-+ cmd.request.rgid = rgid;
-+ cmd.request.vlan_valid = vid_valid;
-+ cmd.request.vlan_id = vid;
-+
-+ return ism_cmd(ism, &cmd);
-+}
-+
- static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
- u32 vid)
- {
-@@ -811,6 +780,23 @@ static int smcd_reset_vlan_required(struct smcd_dev *smcd)
- return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
- }
-
-+static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
-+ u32 event_code, u64 info)
-+{
-+ union ism_sig_ieq cmd;
-+
-+ memset(&cmd, 0, sizeof(cmd));
-+ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
-+ cmd.request.hdr.len = sizeof(cmd.request);
-+
-+ cmd.request.rgid = rgid;
-+ cmd.request.trigger_irq = trigger_irq;
-+ cmd.request.event_code = event_code;
-+ cmd.request.info = info;
-+
-+ return ism_cmd(ism, &cmd);
-+}
-+
- static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info)
- {
-@@ -830,11 +816,24 @@ static int smcd_supports_v2(void)
- SYSTEM_EID.type[0] != '0';
- }
-
-+static u64 ism_get_local_gid(struct ism_dev *ism)
-+{
-+ return ism->local_gid;
-+}
-+
- static u64 smcd_get_local_gid(struct smcd_dev *smcd)
- {
- return ism_get_local_gid(smcd->priv);
- }
-
-+static u16 ism_get_chid(struct ism_dev *ism)
-+{
-+ if (!ism || !ism->pdev)
-+ return 0;
-+
-+ return to_zpci(ism->pdev)->pchid;
-+}
-+
- static u16 smcd_get_chid(struct smcd_dev *smcd)
- {
- return ism_get_chid(smcd->priv);
-diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
-index bbb64ee6afd7c..089186fe17915 100644
---- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
-+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
-@@ -4865,6 +4865,12 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
- hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
- }
-
-+static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
-+{
-+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
-+ hisi_hba->debugfs_dir = NULL;
-+}
-+
- static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
- {
- struct device *dev = hisi_hba->dev;
-@@ -4888,18 +4894,13 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
-
- for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
- if (debugfs_alloc_v3_hw(hisi_hba, i)) {
-- debugfs_remove_recursive(hisi_hba->debugfs_dir);
-+ debugfs_exit_v3_hw(hisi_hba);
- dev_dbg(dev, "failed to init debugfs!\n");
- break;
- }
- }
- }
-
--static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
--{
-- debugfs_remove_recursive(hisi_hba->debugfs_dir);
--}
--
- static int
- hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- {
-diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
-index ce9eb00e2ca04..c98346e464b48 100644
---- a/drivers/scsi/ibmvscsi/ibmvfc.c
-+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
-@@ -22,7 +22,6 @@
- #include <linux/bsg-lib.h>
- #include <asm/firmware.h>
- #include <asm/irq.h>
--#include <asm/rtas.h>
- #include <asm/vio.h>
- #include <scsi/scsi.h>
- #include <scsi/scsi_cmnd.h>
-@@ -1519,7 +1518,11 @@ static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
- unsigned long flags;
-
- spin_lock_irqsave(&queue->l_lock, flags);
-- BUG_ON(list_empty(&queue->free));
-+ if (list_empty(&queue->free)) {
-+ ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
-+ spin_unlock_irqrestore(&queue->l_lock, flags);
-+ return NULL;
-+ }
- evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
- atomic_set(&evt->free, 0);
- list_del(&evt->queue_list);
-@@ -1948,9 +1951,15 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
- if (vhost->using_channels) {
- scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
- evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
-+ if (!evt)
-+ return SCSI_MLQUEUE_HOST_BUSY;
-+
- evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
-- } else
-+ } else {
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt)
-+ return SCSI_MLQUEUE_HOST_BUSY;
-+ }
-
- ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
- evt->cmnd = cmnd;
-@@ -2038,6 +2047,11 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
-
- vhost->aborting_passthru = 1;
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
-+ return -ENOMEM;
-+ }
-+
- ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
-
- tmf = &evt->iu.tmf;
-@@ -2096,6 +2110,10 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
- goto unlock_out;
-
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ rc = -ENOMEM;
-+ goto unlock_out;
-+ }
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
- plogi = &evt->iu.plogi;
- memset(plogi, 0, sizeof(*plogi));
-@@ -2214,6 +2232,11 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
- }
-
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
-+ rc = -ENOMEM;
-+ goto out;
-+ }
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
- mad = &evt->iu.passthru;
-
-@@ -2302,6 +2325,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
- else
- evt = ibmvfc_get_event(&vhost->crq);
-
-+ if (!evt) {
-+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
-+ return -ENOMEM;
-+ }
-+
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
- tmf = ibmvfc_init_vfc_cmd(evt, sdev);
- iu = ibmvfc_get_fcp_iu(vhost, tmf);
-@@ -2505,6 +2533,8 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
- struct ibmvfc_tmf *tmf;
-
- evt = ibmvfc_get_event(queue);
-+ if (!evt)
-+ return NULL;
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
-
- tmf = &evt->iu.tmf;
-@@ -2561,6 +2591,11 @@ static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
-
- if (found_evt && vhost->logged_in) {
- evt = ibmvfc_init_tmf(&queues[i], sdev, type);
-+ if (!evt) {
-+ spin_unlock(queues[i].q_lock);
-+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
-+ return -ENOMEM;
-+ }
- evt->sync_iu = &queues[i].cancel_rsp;
- ibmvfc_send_event(evt, vhost, default_timeout);
- list_add_tail(&evt->cancel, &cancelq);
-@@ -2774,6 +2809,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
-
- if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
-+ return -ENOMEM;
-+ }
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
- tmf = ibmvfc_init_vfc_cmd(evt, sdev);
- iu = ibmvfc_get_fcp_iu(vhost, tmf);
-@@ -4032,6 +4071,12 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
-
- kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
-+ kref_put(&tgt->kref, ibmvfc_release_tgt);
-+ __ibmvfc_reset_host(vhost);
-+ return;
-+ }
- vhost->discovery_threads++;
- ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
- evt->tgt = tgt;
-@@ -4139,6 +4184,12 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
- kref_get(&tgt->kref);
- tgt->logo_rcvd = 0;
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
-+ kref_put(&tgt->kref, ibmvfc_release_tgt);
-+ __ibmvfc_reset_host(vhost);
-+ return;
-+ }
- vhost->discovery_threads++;
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
- ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
-@@ -4215,6 +4266,8 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
-
- kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt)
-+ return NULL;
- ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
- evt->tgt = tgt;
- mad = &evt->iu.implicit_logout;
-@@ -4242,6 +4295,13 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
- vhost->discovery_threads++;
- evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
- ibmvfc_tgt_implicit_logout_done);
-+ if (!evt) {
-+ vhost->discovery_threads--;
-+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
-+ kref_put(&tgt->kref, ibmvfc_release_tgt);
-+ __ibmvfc_reset_host(vhost);
-+ return;
-+ }
-
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
- if (ibmvfc_send_event(evt, vhost, default_timeout)) {
-@@ -4381,6 +4441,12 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
-
- kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
-+ kref_put(&tgt->kref, ibmvfc_release_tgt);
-+ __ibmvfc_reset_host(vhost);
-+ return;
-+ }
- vhost->discovery_threads++;
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
- ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
-@@ -4547,6 +4613,14 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
- vhost->abort_threads++;
- kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
-+ vhost->abort_threads--;
-+ kref_put(&tgt->kref, ibmvfc_release_tgt);
-+ __ibmvfc_reset_host(vhost);
-+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
-+ return;
-+ }
- ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
-
- evt->tgt = tgt;
-@@ -4597,6 +4671,12 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
-
- kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
-+ kref_put(&tgt->kref, ibmvfc_release_tgt);
-+ __ibmvfc_reset_host(vhost);
-+ return;
-+ }
- vhost->discovery_threads++;
- ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
- evt->tgt = tgt;
-@@ -4700,6 +4780,12 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
-
- kref_get(&tgt->kref);
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
-+ kref_put(&tgt->kref, ibmvfc_release_tgt);
-+ __ibmvfc_reset_host(vhost);
-+ return;
-+ }
- vhost->discovery_threads++;
- evt->tgt = tgt;
- ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
-@@ -4872,6 +4958,13 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
- {
- struct ibmvfc_discover_targets *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
-+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
-+
-+ if (!evt) {
-+ ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
-+ ibmvfc_hard_reset_host(vhost);
-+ return;
-+ }
-
- ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
- mad = &evt->iu.discover_targets;
-@@ -4949,8 +5042,15 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
- struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
- unsigned int num_channels =
- min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
-+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
- int i;
-
-+ if (!evt) {
-+ ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
-+ ibmvfc_hard_reset_host(vhost);
-+ return;
-+ }
-+
- memset(setup_buf, 0, sizeof(*setup_buf));
- if (num_channels == 0)
- setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
-@@ -5012,6 +5112,13 @@ static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
- {
- struct ibmvfc_channel_enquiry *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
-+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
-+
-+ if (!evt) {
-+ ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
-+ ibmvfc_hard_reset_host(vhost);
-+ return;
-+ }
-
- ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
- mad = &evt->iu.channel_enquiry;
-@@ -5134,6 +5241,12 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
- struct ibmvfc_npiv_login_mad *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
-
-+ if (!evt) {
-+ ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
-+ ibmvfc_hard_reset_host(vhost);
-+ return;
-+ }
-+
- ibmvfc_gather_partition_info(vhost);
- ibmvfc_set_login_info(vhost);
- ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
-@@ -5198,6 +5311,12 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
- struct ibmvfc_event *evt;
-
- evt = ibmvfc_get_event(&vhost->crq);
-+ if (!evt) {
-+ ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
-+ ibmvfc_hard_reset_host(vhost);
-+ return;
-+ }
-+
- ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
-
- mad = &evt->iu.npiv_logout;
-@@ -5804,7 +5923,7 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
- irq_failed:
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
-- } while (rtas_busy_delay(rc));
-+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
- reg_failed:
- LEAVE;
- return rc;
-diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
-index 9c02c9523c4d4..ab06e9aeb613e 100644
---- a/drivers/scsi/libfc/fc_lport.c
-+++ b/drivers/scsi/libfc/fc_lport.c
-@@ -241,6 +241,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
- }
- mutex_lock(&lport->disc.disc_mutex);
- lport->ptp_rdata = fc_rport_create(lport, remote_fid);
-+ if (!lport->ptp_rdata) {
-+ printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
-+ lport->port_id);
-+ mutex_unlock(&lport->disc.disc_mutex);
-+ return;
-+ }
- kref_get(&lport->ptp_rdata->kref);
- lport->ptp_rdata->ids.port_name = remote_wwpn;
- lport->ptp_rdata->ids.node_name = remote_wwnn;
-diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
-index e1aa667dae662..3d4f13da1ae87 100644
---- a/drivers/scsi/megaraid/megaraid_sas_base.c
-+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
-@@ -263,13 +263,13 @@ u32 megasas_readl(struct megasas_instance *instance,
- * Fusion registers could intermittently return all zeroes.
- * This behavior is transient in nature and subsequent reads will
- * return valid value. As a workaround in driver, retry readl for
-- * upto three times until a non-zero value is read.
-+ * up to thirty times until a non-zero value is read.
- */
- if (instance->adapter_type == AERO_SERIES) {
- do {
- ret_val = readl(addr);
- i++;
-- } while (ret_val == 0 && i < 3);
-+ } while (ret_val == 0 && i < 30);
- return ret_val;
- } else {
- return readl(addr);
-diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
-index 61a32bf00747e..a75f670bf5519 100644
---- a/drivers/scsi/mpt3sas/mpt3sas_base.c
-+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
-@@ -223,8 +223,8 @@ _base_readl_ext_retry(const void __iomem *addr)
-
- for (i = 0 ; i < 30 ; i++) {
- ret_val = readl(addr);
-- if (ret_val == 0)
-- continue;
-+ if (ret_val != 0)
-+ break;
- }
-
- return ret_val;
-diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
-index dcae09a37d498..c45eef743c457 100644
---- a/drivers/scsi/qla2xxx/qla_os.c
-+++ b/drivers/scsi/qla2xxx/qla_os.c
-@@ -1836,8 +1836,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
- }
-
- spin_lock_irqsave(qp->qp_lock_ptr, *flags);
-- if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
-- sp->done(sp, res);
-+ switch (sp->type) {
-+ case SRB_SCSI_CMD:
-+ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
-+ sp->done(sp, res);
-+ break;
-+ default:
-+ if (ret_cmd)
-+ sp->done(sp, res);
-+ break;
-+ }
- } else {
- sp->done(sp, res);
- }
-diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 6effa13039f39..e17509f0b3fa8 100644
---- a/drivers/scsi/sd.c
-+++ b/drivers/scsi/sd.c
-@@ -3953,8 +3953,15 @@ static int sd_resume(struct device *dev, bool runtime)
-
- static int sd_resume_system(struct device *dev)
- {
-- if (pm_runtime_suspended(dev))
-+ if (pm_runtime_suspended(dev)) {
-+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
-+ struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
-+
-+ if (sdp && sdp->force_runtime_start_on_system_start)
-+ pm_request_resume(dev);
-+
- return 0;
-+ }
-
- return sd_resume(dev, false);
- }
-diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
-index e32a4161a8d02..c61848595da06 100644
---- a/drivers/soc/qcom/llcc-qcom.c
-+++ b/drivers/soc/qcom/llcc-qcom.c
-@@ -944,6 +944,9 @@ static int qcom_llcc_probe(struct platform_device *pdev)
- u32 version;
- struct regmap *regmap;
-
-+ if (!IS_ERR(drv_data))
-+ return -EBUSY;
-+
- drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
- if (!drv_data) {
- ret = -ENOMEM;
-diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
-index d05e0d6edf493..6f8b2f7ae3cc1 100644
---- a/drivers/soc/qcom/pmic_glink_altmode.c
-+++ b/drivers/soc/qcom/pmic_glink_altmode.c
-@@ -444,6 +444,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
- ret = fwnode_property_read_u32(fwnode, "reg", &port);
- if (ret < 0) {
- dev_err(dev, "missing reg property of %pOFn\n", fwnode);
-+ fwnode_handle_put(fwnode);
- return ret;
- }
-
-@@ -454,6 +455,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
-
- if (altmode->ports[port].altmode) {
- dev_err(dev, "multiple connector definition for port %u\n", port);
-+ fwnode_handle_put(fwnode);
- return -EINVAL;
- }
-
-@@ -465,48 +467,62 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
- alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
- alt_port->bridge.of_node = to_of_node(fwnode);
- alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
-- alt_port->bridge.type = DRM_MODE_CONNECTOR_USB;
-+ alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-
- ret = devm_drm_bridge_add(dev, &alt_port->bridge);
-- if (ret)
-+ if (ret) {
-+ fwnode_handle_put(fwnode);
- return ret;
-+ }
-
- alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
- alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
- alt_port->dp_alt.active = 1;
-
- alt_port->typec_mux = fwnode_typec_mux_get(fwnode);
-- if (IS_ERR(alt_port->typec_mux))
-+ if (IS_ERR(alt_port->typec_mux)) {
-+ fwnode_handle_put(fwnode);
- return dev_err_probe(dev, PTR_ERR(alt_port->typec_mux),
- "failed to acquire mode-switch for port: %d\n",
- port);
-+ }
-
- ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_mux,
- alt_port->typec_mux);
-- if (ret)
-+ if (ret) {
-+ fwnode_handle_put(fwnode);
- return ret;
-+ }
-
- alt_port->typec_retimer = fwnode_typec_retimer_get(fwnode);
-- if (IS_ERR(alt_port->typec_retimer))
-+ if (IS_ERR(alt_port->typec_retimer)) {
-+ fwnode_handle_put(fwnode);
- return dev_err_probe(dev, PTR_ERR(alt_port->typec_retimer),
- "failed to acquire retimer-switch for port: %d\n",
- port);
-+ }
-
- ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_retimer,
- alt_port->typec_retimer);
-- if (ret)
-+ if (ret) {
-+ fwnode_handle_put(fwnode);
- return ret;
-+ }
-
- alt_port->typec_switch = fwnode_typec_switch_get(fwnode);
-- if (IS_ERR(alt_port->typec_switch))
-+ if (IS_ERR(alt_port->typec_switch)) {
-+ fwnode_handle_put(fwnode);
- return dev_err_probe(dev, PTR_ERR(alt_port->typec_switch),
- "failed to acquire orientation-switch for port: %d\n",
- port);
-+ }
-
- ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_switch,
- alt_port->typec_switch);
-- if (ret)
-+ if (ret) {
-+ fwnode_handle_put(fwnode);
- return ret;
-+ }
- }
-
- altmode->client = devm_pmic_glink_register_client(dev,
-diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
-index 2a1096dab63d3..9ebdd0cd0b1cf 100644
---- a/drivers/soundwire/dmi-quirks.c
-+++ b/drivers/soundwire/dmi-quirks.c
-@@ -141,7 +141,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
-- DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16"),
- },
- .driver_data = (void *)hp_omen_16,
- },
-diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
-index 2c21d5b96fdce..bcbf840cd41c8 100644
---- a/drivers/spi/Kconfig
-+++ b/drivers/spi/Kconfig
-@@ -1157,6 +1157,7 @@ config SPI_XTENSA_XTFPGA
- config SPI_ZYNQ_QSPI
- tristate "Xilinx Zynq QSPI controller"
- depends on ARCH_ZYNQ || COMPILE_TEST
-+ depends on SPI_MEM
- help
- This enables support for the Zynq Quad SPI controller
- in master mode.
-diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
-index c964f41dcc428..168eff721ed37 100644
---- a/drivers/spi/spi-nxp-fspi.c
-+++ b/drivers/spi/spi-nxp-fspi.c
-@@ -759,7 +759,7 @@ static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
- f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
- len : NXP_FSPI_MIN_IOMAP;
-
-- f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
-+ f->ahb_addr = ioremap(f->memmap_phy + f->memmap_start,
- f->memmap_len);
-
- if (!f->ahb_addr) {
-diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
-index e5cd82eb9e549..ddf1c684bcc7d 100644
---- a/drivers/spi/spi-omap2-mcspi.c
-+++ b/drivers/spi/spi-omap2-mcspi.c
-@@ -117,7 +117,7 @@ struct omap2_mcspi_regs {
-
- struct omap2_mcspi {
- struct completion txdone;
-- struct spi_master *master;
-+ struct spi_controller *ctlr;
- /* Virtual base address of the controller */
- void __iomem *base;
- unsigned long phys;
-@@ -125,10 +125,12 @@ struct omap2_mcspi {
- struct omap2_mcspi_dma *dma_channels;
- struct device *dev;
- struct omap2_mcspi_regs ctx;
-+ struct clk *ref_clk;
- int fifo_depth;
-- bool slave_aborted;
-+ bool target_aborted;
- unsigned int pin_dir:1;
- size_t max_xfer_len;
-+ u32 ref_clk_hz;
- };
-
- struct omap2_mcspi_cs {
-@@ -141,17 +143,17 @@ struct omap2_mcspi_cs {
- u32 chconf0, chctrl0;
- };
-
--static inline void mcspi_write_reg(struct spi_master *master,
-+static inline void mcspi_write_reg(struct spi_controller *ctlr,
- int idx, u32 val)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
-
- writel_relaxed(val, mcspi->base + idx);
- }
-
--static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
-+static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
-
- return readl_relaxed(mcspi->base + idx);
- }
-@@ -235,7 +237,7 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
-
- static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
- u32 l;
-
- /* The controller handles the inverted chip selects
-@@ -266,24 +268,24 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
- }
- }
-
--static void omap2_mcspi_set_mode(struct spi_master *master)
-+static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
- struct omap2_mcspi_regs *ctx = &mcspi->ctx;
- u32 l;
-
- /*
-- * Choose master or slave mode
-+ * Choose host or target mode
- */
-- l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
-+ l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
- l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
-- if (spi_controller_is_slave(master)) {
-+ if (spi_controller_is_target(ctlr)) {
- l |= (OMAP2_MCSPI_MODULCTRL_MS);
- } else {
- l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
- l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
- }
-- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
-+ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
-
- ctx->modulctrl = l;
- }
-@@ -291,14 +293,14 @@ static void omap2_mcspi_set_mode(struct spi_master *master)
- static void omap2_mcspi_set_fifo(const struct spi_device *spi,
- struct spi_transfer *t, int enable)
- {
-- struct spi_master *master = spi->master;
-+ struct spi_controller *ctlr = spi->controller;
- struct omap2_mcspi_cs *cs = spi->controller_state;
- struct omap2_mcspi *mcspi;
- unsigned int wcnt;
- int max_fifo_depth, bytes_per_word;
- u32 chconf, xferlevel;
-
-- mcspi = spi_master_get_devdata(master);
-+ mcspi = spi_controller_get_devdata(ctlr);
-
- chconf = mcspi_cached_chconf0(spi);
- if (enable) {
-@@ -326,7 +328,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
- xferlevel |= bytes_per_word - 1;
- }
-
-- mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
-+ mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
- mcspi_write_chconf0(spi, chconf);
- mcspi->fifo_depth = max_fifo_depth;
-
-@@ -364,9 +366,9 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
- static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
- struct completion *x)
- {
-- if (spi_controller_is_slave(mcspi->master)) {
-+ if (spi_controller_is_target(mcspi->ctlr)) {
- if (wait_for_completion_interruptible(x) ||
-- mcspi->slave_aborted)
-+ mcspi->target_aborted)
- return -EINTR;
- } else {
- wait_for_completion(x);
-@@ -378,7 +380,7 @@ static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
- static void omap2_mcspi_rx_callback(void *data)
- {
- struct spi_device *spi = data;
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
- struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
-
- /* We must disable the DMA RX request */
-@@ -390,7 +392,7 @@ static void omap2_mcspi_rx_callback(void *data)
- static void omap2_mcspi_tx_callback(void *data)
- {
- struct spi_device *spi = data;
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
- struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
-
- /* We must disable the DMA TX request */
-@@ -407,7 +409,7 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
- struct omap2_mcspi_dma *mcspi_dma;
- struct dma_async_tx_descriptor *tx;
-
-- mcspi = spi_master_get_devdata(spi->master);
-+ mcspi = spi_controller_get_devdata(spi->controller);
- mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
-
- dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
-@@ -445,13 +447,13 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
- void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
- struct dma_async_tx_descriptor *tx;
-
-- mcspi = spi_master_get_devdata(spi->master);
-+ mcspi = spi_controller_get_devdata(spi->controller);
- mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
- count = xfer->len;
-
- /*
- * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
-- * it mentions reducing DMA transfer length by one element in master
-+ * it mentions reducing DMA transfer length by one element in host
- * normal mode.
- */
- if (mcspi->fifo_depth == 0)
-@@ -514,7 +516,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
- omap2_mcspi_set_dma_req(spi, 1, 1);
-
- ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
-- if (ret || mcspi->slave_aborted) {
-+ if (ret || mcspi->target_aborted) {
- dmaengine_terminate_sync(mcspi_dma->dma_rx);
- omap2_mcspi_set_dma_req(spi, 1, 0);
- return 0;
-@@ -590,7 +592,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
- void __iomem *irqstat_reg;
- int wait_res;
-
-- mcspi = spi_master_get_devdata(spi->master);
-+ mcspi = spi_controller_get_devdata(spi->controller);
- mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
-
- if (cs->word_len <= 8) {
-@@ -617,14 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
-
-- mcspi->slave_aborted = false;
-+ mcspi->target_aborted = false;
- reinit_completion(&mcspi_dma->dma_tx_completion);
- reinit_completion(&mcspi_dma->dma_rx_completion);
- reinit_completion(&mcspi->txdone);
- if (tx) {
-- /* Enable EOW IRQ to know end of tx in slave mode */
-- if (spi_controller_is_slave(spi->master))
-- mcspi_write_reg(spi->master,
-+ /* Enable EOW IRQ to know end of tx in target mode */
-+ if (spi_controller_is_target(spi->controller))
-+ mcspi_write_reg(spi->controller,
- OMAP2_MCSPI_IRQENABLE,
- OMAP2_MCSPI_IRQSTATUS_EOW);
- omap2_mcspi_tx_dma(spi, xfer, cfg);
-@@ -637,15 +639,15 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
- int ret;
-
- ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
-- if (ret || mcspi->slave_aborted) {
-+ if (ret || mcspi->target_aborted) {
- dmaengine_terminate_sync(mcspi_dma->dma_tx);
- omap2_mcspi_set_dma_req(spi, 0, 0);
- return 0;
- }
-
-- if (spi_controller_is_slave(mcspi->master)) {
-+ if (spi_controller_is_target(mcspi->ctlr)) {
- ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
-- if (ret || mcspi->slave_aborted)
-+ if (ret || mcspi->target_aborted)
- return 0;
- }
-
-@@ -656,7 +658,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
- OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
- dev_err(&spi->dev, "EOW timed out\n");
-
-- mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
-+ mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS,
- OMAP2_MCSPI_IRQSTATUS_EOW);
- }
-
-@@ -880,12 +882,12 @@ out:
- return count - c;
- }
-
--static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
-+static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
- {
- u32 div;
-
- for (div = 0; div < 15; div++)
-- if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
-+ if (speed_hz >= (ref_clk_hz >> div))
- return div;
-
- return 15;
-@@ -897,11 +899,11 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
- {
- struct omap2_mcspi_cs *cs = spi->controller_state;
- struct omap2_mcspi *mcspi;
-- u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
-+ u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0;
- u8 word_len = spi->bits_per_word;
- u32 speed_hz = spi->max_speed_hz;
-
-- mcspi = spi_master_get_devdata(spi->master);
-+ mcspi = spi_controller_get_devdata(spi->controller);
-
- if (t != NULL && t->bits_per_word)
- word_len = t->bits_per_word;
-@@ -911,14 +913,15 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
- if (t && t->speed_hz)
- speed_hz = t->speed_hz;
-
-- speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
-- if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
-- clkd = omap2_mcspi_calc_divisor(speed_hz);
-- speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
-+ ref_clk_hz = mcspi->ref_clk_hz;
-+ speed_hz = min_t(u32, speed_hz, ref_clk_hz);
-+ if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) {
-+ clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz);
-+ speed_hz = ref_clk_hz >> clkd;
- clkg = 0;
- } else {
-- div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
-- speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
-+ div = (ref_clk_hz + speed_hz - 1) / speed_hz;
-+ speed_hz = ref_clk_hz / div;
- clkd = (div - 1) & 0xf;
- extclk = (div - 1) >> 4;
- clkg = OMAP2_MCSPI_CHCONF_CLKG;
-@@ -926,7 +929,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
-
- l = mcspi_cached_chconf0(spi);
-
-- /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
-+ /* standard 4-wire host mode: SCK, MOSI/out, MISO/in, nCS
- * REVISIT: this controller could support SPI_3WIRE mode.
- */
- if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
-@@ -1017,13 +1020,13 @@ no_dma:
- return ret;
- }
-
--static void omap2_mcspi_release_dma(struct spi_master *master)
-+static void omap2_mcspi_release_dma(struct spi_controller *ctlr)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
- struct omap2_mcspi_dma *mcspi_dma;
- int i;
-
-- for (i = 0; i < master->num_chipselect; i++) {
-+ for (i = 0; i < ctlr->num_chipselect; i++) {
- mcspi_dma = &mcspi->dma_channels[i];
-
- if (mcspi_dma->dma_rx) {
-@@ -1054,7 +1057,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
- {
- bool initial_setup = false;
- int ret;
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
- struct omap2_mcspi_regs *ctx = &mcspi->ctx;
- struct omap2_mcspi_cs *cs = spi->controller_state;
-
-@@ -1096,24 +1099,24 @@ static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
- struct omap2_mcspi *mcspi = data;
- u32 irqstat;
-
-- irqstat = mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS);
-+ irqstat = mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS);
- if (!irqstat)
- return IRQ_NONE;
-
-- /* Disable IRQ and wakeup slave xfer task */
-- mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
-+ /* Disable IRQ and wakeup target xfer task */
-+ mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0);
- if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
- complete(&mcspi->txdone);
-
- return IRQ_HANDLED;
- }
-
--static int omap2_mcspi_slave_abort(struct spi_master *master)
-+static int omap2_mcspi_target_abort(struct spi_controller *ctlr)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
- struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
-
-- mcspi->slave_aborted = true;
-+ mcspi->target_aborted = true;
- complete(&mcspi_dma->dma_rx_completion);
- complete(&mcspi_dma->dma_tx_completion);
- complete(&mcspi->txdone);
-@@ -1121,7 +1124,7 @@ static int omap2_mcspi_slave_abort(struct spi_master *master)
- return 0;
- }
-
--static int omap2_mcspi_transfer_one(struct spi_master *master,
-+static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
- struct spi_device *spi,
- struct spi_transfer *t)
- {
-@@ -1129,7 +1132,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
- /* We only enable one channel at a time -- the one whose message is
- * -- although this controller would gladly
- * arbitrate among multiple channels. This corresponds to "single
-- * channel" master mode. As a side effect, we need to manage the
-+ * channel" host mode. As a side effect, we need to manage the
- * chipselect with the FORCE bit ... CS != channel enable.
- */
-
-@@ -1141,13 +1144,13 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
- int status = 0;
- u32 chconf;
-
-- mcspi = spi_master_get_devdata(master);
-+ mcspi = spi_controller_get_devdata(ctlr);
- mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
- cs = spi->controller_state;
- cd = spi->controller_data;
-
- /*
-- * The slave driver could have changed spi->mode in which case
-+ * The target driver could have changed spi->mode in which case
- * it will be different from cs->mode (the current hardware setup).
- * If so, set par_override (even though its not a parity issue) so
- * omap2_mcspi_setup_transfer will be called to configure the hardware
-@@ -1175,7 +1178,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
- if (cd && cd->cs_per_word) {
- chconf = mcspi->ctx.modulctrl;
- chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
-- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
-+ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
- mcspi->ctx.modulctrl =
- mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
- }
-@@ -1201,8 +1204,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
- unsigned count;
-
- if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
-- master->cur_msg_mapped &&
-- master->can_dma(master, spi, t))
-+ ctlr->cur_msg_mapped &&
-+ ctlr->can_dma(ctlr, spi, t))
- omap2_mcspi_set_fifo(spi, t, 1);
-
- omap2_mcspi_set_enable(spi, 1);
-@@ -1213,8 +1216,8 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
- + OMAP2_MCSPI_TX0);
-
- if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
-- master->cur_msg_mapped &&
-- master->can_dma(master, spi, t))
-+ ctlr->cur_msg_mapped &&
-+ ctlr->can_dma(ctlr, spi, t))
- count = omap2_mcspi_txrx_dma(spi, t);
- else
- count = omap2_mcspi_txrx_pio(spi, t);
-@@ -1240,7 +1243,7 @@ out:
- if (cd && cd->cs_per_word) {
- chconf = mcspi->ctx.modulctrl;
- chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
-- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
-+ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
- mcspi->ctx.modulctrl =
- mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
- }
-@@ -1256,10 +1259,10 @@ out:
- return status;
- }
-
--static int omap2_mcspi_prepare_message(struct spi_master *master,
-+static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
- struct spi_message *msg)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
- struct omap2_mcspi_regs *ctx = &mcspi->ctx;
- struct omap2_mcspi_cs *cs;
-
-@@ -1283,29 +1286,29 @@ static int omap2_mcspi_prepare_message(struct spi_master *master,
- return 0;
- }
-
--static bool omap2_mcspi_can_dma(struct spi_master *master,
-+static bool omap2_mcspi_can_dma(struct spi_controller *ctlr,
- struct spi_device *spi,
- struct spi_transfer *xfer)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
- struct omap2_mcspi_dma *mcspi_dma =
- &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
-
- if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
- return false;
-
-- if (spi_controller_is_slave(master))
-+ if (spi_controller_is_target(ctlr))
- return true;
-
-- master->dma_rx = mcspi_dma->dma_rx;
-- master->dma_tx = mcspi_dma->dma_tx;
-+ ctlr->dma_rx = mcspi_dma->dma_rx;
-+ ctlr->dma_tx = mcspi_dma->dma_tx;
-
- return (xfer->len >= DMA_MIN_BYTES);
- }
-
- static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
- {
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
- struct omap2_mcspi_dma *mcspi_dma =
- &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
-
-@@ -1317,7 +1320,7 @@ static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
-
- static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
- {
-- struct spi_master *master = mcspi->master;
-+ struct spi_controller *ctlr = mcspi->ctlr;
- struct omap2_mcspi_regs *ctx = &mcspi->ctx;
- int ret = 0;
-
-@@ -1325,11 +1328,11 @@ static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
- if (ret < 0)
- return ret;
-
-- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
-+ mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE,
- OMAP2_MCSPI_WAKEUPENABLE_WKEN);
- ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
-
-- omap2_mcspi_set_mode(master);
-+ omap2_mcspi_set_mode(ctlr);
- pm_runtime_mark_last_busy(mcspi->dev);
- pm_runtime_put_autosuspend(mcspi->dev);
- return 0;
-@@ -1353,8 +1356,8 @@ static int omap_mcspi_runtime_suspend(struct device *dev)
- */
- static int omap_mcspi_runtime_resume(struct device *dev)
- {
-- struct spi_master *master = dev_get_drvdata(dev);
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct spi_controller *ctlr = dev_get_drvdata(dev);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
- struct omap2_mcspi_regs *ctx = &mcspi->ctx;
- struct omap2_mcspi_cs *cs;
- int error;
-@@ -1364,8 +1367,8 @@ static int omap_mcspi_runtime_resume(struct device *dev)
- dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
-
- /* McSPI: context restore */
-- mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
-- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
-+ mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
-+ mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
-
- list_for_each_entry(cs, &ctx->cs, node) {
- /*
-@@ -1420,7 +1423,7 @@ MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
-
- static int omap2_mcspi_probe(struct platform_device *pdev)
- {
-- struct spi_master *master;
-+ struct spi_controller *ctlr;
- const struct omap2_mcspi_platform_config *pdata;
- struct omap2_mcspi *mcspi;
- struct resource *r;
-@@ -1430,32 +1433,30 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
- const struct of_device_id *match;
-
- if (of_property_read_bool(node, "spi-slave"))
-- master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi));
-+ ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi));
- else
-- master = spi_alloc_master(&pdev->dev, sizeof(*mcspi));
-- if (!master)
-+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi));
-+ if (!ctlr)
- return -ENOMEM;
-
- /* the spi->mode bits understood by this driver: */
-- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
-- master->setup = omap2_mcspi_setup;
-- master->auto_runtime_pm = true;
-- master->prepare_message = omap2_mcspi_prepare_message;
-- master->can_dma = omap2_mcspi_can_dma;
-- master->transfer_one = omap2_mcspi_transfer_one;
-- master->set_cs = omap2_mcspi_set_cs;
-- master->cleanup = omap2_mcspi_cleanup;
-- master->slave_abort = omap2_mcspi_slave_abort;
-- master->dev.of_node = node;
-- master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
-- master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
-- master->use_gpio_descriptors = true;
--
-- platform_set_drvdata(pdev, master);
--
-- mcspi = spi_master_get_devdata(master);
-- mcspi->master = master;
-+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-+ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
-+ ctlr->setup = omap2_mcspi_setup;
-+ ctlr->auto_runtime_pm = true;
-+ ctlr->prepare_message = omap2_mcspi_prepare_message;
-+ ctlr->can_dma = omap2_mcspi_can_dma;
-+ ctlr->transfer_one = omap2_mcspi_transfer_one;
-+ ctlr->set_cs = omap2_mcspi_set_cs;
-+ ctlr->cleanup = omap2_mcspi_cleanup;
-+ ctlr->target_abort = omap2_mcspi_target_abort;
-+ ctlr->dev.of_node = node;
-+ ctlr->use_gpio_descriptors = true;
-+
-+ platform_set_drvdata(pdev, ctlr);
-+
-+ mcspi = spi_controller_get_devdata(ctlr);
-+ mcspi->ctlr = ctlr;
-
- match = of_match_device(omap_mcspi_of_match, &pdev->dev);
- if (match) {
-@@ -1463,24 +1464,24 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
- pdata = match->data;
-
- of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
-- master->num_chipselect = num_cs;
-+ ctlr->num_chipselect = num_cs;
- if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
- mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
- } else {
- pdata = dev_get_platdata(&pdev->dev);
-- master->num_chipselect = pdata->num_cs;
-+ ctlr->num_chipselect = pdata->num_cs;
- mcspi->pin_dir = pdata->pin_dir;
- }
- regs_offset = pdata->regs_offset;
- if (pdata->max_xfer_len) {
- mcspi->max_xfer_len = pdata->max_xfer_len;
-- master->max_transfer_size = omap2_mcspi_max_xfer_size;
-+ ctlr->max_transfer_size = omap2_mcspi_max_xfer_size;
- }
-
- mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
- if (IS_ERR(mcspi->base)) {
- status = PTR_ERR(mcspi->base);
-- goto free_master;
-+ goto free_ctlr;
- }
- mcspi->phys = r->start + regs_offset;
- mcspi->base += regs_offset;
-@@ -1489,36 +1490,44 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
-
- INIT_LIST_HEAD(&mcspi->ctx.cs);
-
-- mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
-+ mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect,
- sizeof(struct omap2_mcspi_dma),
- GFP_KERNEL);
- if (mcspi->dma_channels == NULL) {
- status = -ENOMEM;
-- goto free_master;
-+ goto free_ctlr;
- }
-
-- for (i = 0; i < master->num_chipselect; i++) {
-+ for (i = 0; i < ctlr->num_chipselect; i++) {
- sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
- sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
-
- status = omap2_mcspi_request_dma(mcspi,
- &mcspi->dma_channels[i]);
- if (status == -EPROBE_DEFER)
-- goto free_master;
-+ goto free_ctlr;
- }
-
- status = platform_get_irq(pdev, 0);
- if (status < 0)
-- goto free_master;
-+ goto free_ctlr;
- init_completion(&mcspi->txdone);
- status = devm_request_irq(&pdev->dev, status,
- omap2_mcspi_irq_handler, 0, pdev->name,
- mcspi);
- if (status) {
- dev_err(&pdev->dev, "Cannot request IRQ");
-- goto free_master;
-+ goto free_ctlr;
- }
-
-+ mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
-+ if (mcspi->ref_clk)
-+ mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
-+ else
-+ mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
-+ ctlr->max_speed_hz = mcspi->ref_clk_hz;
-+ ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
-+
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
- pm_runtime_enable(&pdev->dev);
-@@ -1527,7 +1536,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
- if (status < 0)
- goto disable_pm;
-
-- status = devm_spi_register_controller(&pdev->dev, master);
-+ status = devm_spi_register_controller(&pdev->dev, ctlr);
- if (status < 0)
- goto disable_pm;
-
-@@ -1537,18 +1546,18 @@ disable_pm:
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
--free_master:
-- omap2_mcspi_release_dma(master);
-- spi_master_put(master);
-+free_ctlr:
-+ omap2_mcspi_release_dma(ctlr);
-+ spi_controller_put(ctlr);
- return status;
- }
-
- static void omap2_mcspi_remove(struct platform_device *pdev)
- {
-- struct spi_master *master = platform_get_drvdata(pdev);
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
-
-- omap2_mcspi_release_dma(master);
-+ omap2_mcspi_release_dma(ctlr);
-
- pm_runtime_dont_use_autosuspend(mcspi->dev);
- pm_runtime_put_sync(mcspi->dev);
-@@ -1560,8 +1569,8 @@ MODULE_ALIAS("platform:omap2_mcspi");
-
- static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
- {
-- struct spi_master *master = dev_get_drvdata(dev);
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct spi_controller *ctlr = dev_get_drvdata(dev);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
- int error;
-
- error = pinctrl_pm_select_sleep_state(dev);
-@@ -1569,9 +1578,9 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
- dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
- __func__, error);
-
-- error = spi_master_suspend(master);
-+ error = spi_controller_suspend(ctlr);
- if (error)
-- dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
-+ dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n",
- __func__, error);
-
- return pm_runtime_force_suspend(dev);
-@@ -1579,13 +1588,13 @@ static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
-
- static int __maybe_unused omap2_mcspi_resume(struct device *dev)
- {
-- struct spi_master *master = dev_get_drvdata(dev);
-- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-+ struct spi_controller *ctlr = dev_get_drvdata(dev);
-+ struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
- int error;
-
-- error = spi_master_resume(master);
-+ error = spi_controller_resume(ctlr);
- if (error)
-- dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
-+ dev_warn(mcspi->dev, "%s: controller resume failed: %i\n",
- __func__, error);
-
- return pm_runtime_force_resume(dev);
-diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
-index 4d6db6182c5ed..f5cd365c913a8 100644
---- a/drivers/spi/spi-tegra20-slink.c
-+++ b/drivers/spi/spi-tegra20-slink.c
-@@ -1086,6 +1086,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
- reset_control_deassert(tspi->rst);
-
- spi_irq = platform_get_irq(pdev, 0);
-+ if (spi_irq < 0)
-+ return spi_irq;
- tspi->irq = spi_irq;
- ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
- tegra_slink_isr_thread, IRQF_ONESHOT,
-diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
-index 8d6304cb061ec..399e81d37b3ba 100644
---- a/drivers/spi/spi.c
-+++ b/drivers/spi/spi.c
-@@ -3323,33 +3323,52 @@ void spi_unregister_controller(struct spi_controller *ctlr)
- }
- EXPORT_SYMBOL_GPL(spi_unregister_controller);
-
-+static inline int __spi_check_suspended(const struct spi_controller *ctlr)
-+{
-+ return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
-+}
-+
-+static inline void __spi_mark_suspended(struct spi_controller *ctlr)
-+{
-+ mutex_lock(&ctlr->bus_lock_mutex);
-+ ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
-+ mutex_unlock(&ctlr->bus_lock_mutex);
-+}
-+
-+static inline void __spi_mark_resumed(struct spi_controller *ctlr)
-+{
-+ mutex_lock(&ctlr->bus_lock_mutex);
-+ ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
-+ mutex_unlock(&ctlr->bus_lock_mutex);
-+}
-+
- int spi_controller_suspend(struct spi_controller *ctlr)
- {
-- int ret;
-+ int ret = 0;
-
- /* Basically no-ops for non-queued controllers */
-- if (!ctlr->queued)
-- return 0;
--
-- ret = spi_stop_queue(ctlr);
-- if (ret)
-- dev_err(&ctlr->dev, "queue stop failed\n");
-+ if (ctlr->queued) {
-+ ret = spi_stop_queue(ctlr);
-+ if (ret)
-+ dev_err(&ctlr->dev, "queue stop failed\n");
-+ }
-
-+ __spi_mark_suspended(ctlr);
- return ret;
- }
- EXPORT_SYMBOL_GPL(spi_controller_suspend);
-
- int spi_controller_resume(struct spi_controller *ctlr)
- {
-- int ret;
--
-- if (!ctlr->queued)
-- return 0;
-+ int ret = 0;
-
-- ret = spi_start_queue(ctlr);
-- if (ret)
-- dev_err(&ctlr->dev, "queue restart failed\n");
-+ __spi_mark_resumed(ctlr);
-
-+ if (ctlr->queued) {
-+ ret = spi_start_queue(ctlr);
-+ if (ret)
-+ dev_err(&ctlr->dev, "queue restart failed\n");
-+ }
- return ret;
- }
- EXPORT_SYMBOL_GPL(spi_controller_resume);
-@@ -4153,8 +4172,7 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
- ctlr->cur_msg = msg;
- ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
- if (ret)
-- goto out;
--
-+ dev_err(&ctlr->dev, "noqueue transfer failed\n");
- ctlr->cur_msg = NULL;
- ctlr->fallback = false;
-
-@@ -4170,7 +4188,6 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
- spi_idle_runtime_pm(ctlr);
- }
-
--out:
- mutex_unlock(&ctlr->io_mutex);
- }
-
-@@ -4193,6 +4210,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
- int status;
- struct spi_controller *ctlr = spi->controller;
-
-+ if (__spi_check_suspended(ctlr)) {
-+ dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
-+ return -ESHUTDOWN;
-+ }
-+
- status = __spi_validate(spi, message);
- if (status != 0)
- return status;
-diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
-index b696bf884cbd6..32af0e96e762b 100644
---- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
-+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
-@@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device)
- {
- struct cedrus_dev *dev = dev_get_drvdata(device);
-
-- reset_control_assert(dev->rstc);
--
- clk_disable_unprepare(dev->ram_clk);
- clk_disable_unprepare(dev->mod_clk);
- clk_disable_unprepare(dev->ahb_clk);
-
-+ reset_control_assert(dev->rstc);
-+
- return 0;
- }
-
-@@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device)
- struct cedrus_dev *dev = dev_get_drvdata(device);
- int ret;
-
-+ ret = reset_control_reset(dev->rstc);
-+ if (ret) {
-+ dev_err(dev->dev, "Failed to apply reset\n");
-+
-+ return ret;
-+ }
-+
- ret = clk_prepare_enable(dev->ahb_clk);
- if (ret) {
- dev_err(dev->dev, "Failed to enable AHB clock\n");
-
-- return ret;
-+ goto err_rst;
- }
-
- ret = clk_prepare_enable(dev->mod_clk);
-@@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device)
- goto err_mod_clk;
- }
-
-- ret = reset_control_reset(dev->rstc);
-- if (ret) {
-- dev_err(dev->dev, "Failed to apply reset\n");
--
-- goto err_ram_clk;
-- }
--
- return 0;
-
--err_ram_clk:
-- clk_disable_unprepare(dev->ram_clk);
- err_mod_clk:
- clk_disable_unprepare(dev->mod_clk);
- err_ahb_clk:
- clk_disable_unprepare(dev->ahb_clk);
-+err_rst:
-+ reset_control_assert(dev->rstc);
-
- return ret;
- }
-diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
-index 36243a3972fd7..5ac5cb60bae67 100644
---- a/drivers/thermal/intel/intel_powerclamp.c
-+++ b/drivers/thermal/intel/intel_powerclamp.c
-@@ -256,7 +256,7 @@ skip_limit_set:
-
- static const struct kernel_param_ops max_idle_ops = {
- .set = max_idle_set,
-- .get = param_get_int,
-+ .get = param_get_byte,
- };
-
- module_param_cb(max_idle, &max_idle_ops, &max_idle, 0644);
-diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
-index 843214d30bd8b..8b0edb2048443 100644
---- a/drivers/thermal/mediatek/auxadc_thermal.c
-+++ b/drivers/thermal/mediatek/auxadc_thermal.c
-@@ -1267,7 +1267,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
-
- mtk_thermal_turn_on_buffer(mt, apmixed_base);
-
-- if (mt->conf->version != MTK_THERMAL_V2)
-+ if (mt->conf->version != MTK_THERMAL_V1)
- mtk_thermal_release_periodic_ts(mt, auxadc_base);
-
- if (mt->conf->version == MTK_THERMAL_V1)
-diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
-index 58533ea75cd92..e6f3166a9208f 100644
---- a/drivers/thermal/thermal_core.c
-+++ b/drivers/thermal/thermal_core.c
-@@ -689,7 +689,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
- if (result)
- goto release_ida;
-
-- sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
-+ snprintf(dev->attr_name, sizeof(dev->attr_name), "cdev%d_trip_point",
-+ dev->id);
- sysfs_attr_init(&dev->attr.attr);
- dev->attr.attr.name = dev->attr_name;
- dev->attr.attr.mode = 0444;
-@@ -698,7 +699,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
- if (result)
- goto remove_symbol_link;
-
-- sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
-+ snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name),
-+ "cdev%d_weight", dev->id);
- sysfs_attr_init(&dev->weight_attr.attr);
- dev->weight_attr.attr.name = dev->weight_attr_name;
- dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;
-diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
-index 024e2e365a26b..597ac4144e331 100644
---- a/drivers/thermal/thermal_trip.c
-+++ b/drivers/thermal/thermal_trip.c
-@@ -55,6 +55,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
- {
- struct thermal_trip trip;
- int low = -INT_MAX, high = INT_MAX;
-+ bool same_trip = false;
- int i, ret;
-
- lockdep_assert_held(&tz->lock);
-@@ -63,6 +64,7 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
- return;
-
- for (i = 0; i < tz->num_trips; i++) {
-+ bool low_set = false;
- int trip_low;
-
- ret = __thermal_zone_get_trip(tz, i , &trip);
-@@ -71,18 +73,31 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
-
- trip_low = trip.temperature - trip.hysteresis;
-
-- if (trip_low < tz->temperature && trip_low > low)
-+ if (trip_low < tz->temperature && trip_low > low) {
- low = trip_low;
-+ low_set = true;
-+ same_trip = false;
-+ }
-
- if (trip.temperature > tz->temperature &&
-- trip.temperature < high)
-+ trip.temperature < high) {
- high = trip.temperature;
-+ same_trip = low_set;
-+ }
- }
-
- /* No need to change trip points */
- if (tz->prev_low_trip == low && tz->prev_high_trip == high)
- return;
-
-+ /*
-+ * If "high" and "low" are the same, skip the change unless this is the
-+ * first time.
-+ */
-+ if (same_trip && (tz->prev_low_trip != -INT_MAX ||
-+ tz->prev_high_trip != INT_MAX))
-+ return;
-+
- tz->prev_low_trip = low;
- tz->prev_high_trip = high;
-
-diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
-index 488138a28ae13..e6bfa63b40aee 100644
---- a/drivers/thunderbolt/quirks.c
-+++ b/drivers/thunderbolt/quirks.c
-@@ -31,6 +31,9 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
- {
- struct tb_port *port;
-
-+ if (tb_switch_is_icm(sw))
-+ return;
-+
- tb_switch_for_each_port(sw, port) {
- if (!tb_port_is_usb3_down(port))
- continue;
-diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
-index bd5815f8f23bd..509b99af5087b 100644
---- a/drivers/thunderbolt/switch.c
-+++ b/drivers/thunderbolt/switch.c
-@@ -1082,7 +1082,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
- * Only set bonding if the link was not already bonded. This
- * avoids the lane adapter to re-enter bonding state.
- */
-- if (width == TB_LINK_WIDTH_SINGLE) {
-+ if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
- ret = tb_port_set_lane_bonding(port, true);
- if (ret)
- goto err_lane1;
-diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
-index 98764e740c078..34c01874f45be 100644
---- a/drivers/tty/hvc/hvc_xen.c
-+++ b/drivers/tty/hvc/hvc_xen.c
-@@ -377,18 +377,21 @@ void xen_console_resume(void)
- #ifdef CONFIG_HVC_XEN_FRONTEND
- static void xencons_disconnect_backend(struct xencons_info *info)
- {
-- if (info->irq > 0)
-- unbind_from_irqhandler(info->irq, NULL);
-- info->irq = 0;
-+ if (info->hvc != NULL)
-+ hvc_remove(info->hvc);
-+ info->hvc = NULL;
-+ if (info->irq > 0) {
-+ evtchn_put(info->evtchn);
-+ info->irq = 0;
-+ info->evtchn = 0;
-+ }
-+ /* evtchn_put() will also close it so this is only an error path */
- if (info->evtchn > 0)
- xenbus_free_evtchn(info->xbdev, info->evtchn);
- info->evtchn = 0;
- if (info->gntref > 0)
- gnttab_free_grant_references(info->gntref);
- info->gntref = 0;
-- if (info->hvc != NULL)
-- hvc_remove(info->hvc);
-- info->hvc = NULL;
- }
-
- static void xencons_free(struct xencons_info *info)
-@@ -433,7 +436,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
- if (ret)
- return ret;
- info->evtchn = evtchn;
-- irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
-+ irq = bind_evtchn_to_irq_lateeoi(evtchn);
- if (irq < 0)
- return irq;
- info->irq = irq;
-@@ -553,10 +556,23 @@ static void xencons_backend_changed(struct xenbus_device *dev,
- if (dev->state == XenbusStateClosed)
- break;
- fallthrough; /* Missed the backend's CLOSING state */
-- case XenbusStateClosing:
-+ case XenbusStateClosing: {
-+ struct xencons_info *info = dev_get_drvdata(&dev->dev);;
-+
-+ /*
-+ * Don't tear down the evtchn and grant ref before the other
-+ * end has disconnected, but do stop userspace from trying
-+ * to use the device before we allow the backend to close.
-+ */
-+ if (info->hvc) {
-+ hvc_remove(info->hvc);
-+ info->hvc = NULL;
-+ }
-+
- xenbus_frontend_closed(dev);
- break;
- }
-+ }
- }
-
- static const struct xenbus_device_id xencons_ids[] = {
-@@ -588,7 +604,7 @@ static int __init xen_hvc_init(void)
- ops = &dom0_hvc_ops;
- r = xen_initial_domain_console_init();
- if (r < 0)
-- return r;
-+ goto register_fe;
- info = vtermno_to_xencons(HVC_COOKIE);
- } else {
- ops = &domU_hvc_ops;
-@@ -597,7 +613,7 @@ static int __init xen_hvc_init(void)
- else
- r = xen_pv_console_init();
- if (r < 0)
-- return r;
-+ goto register_fe;
-
- info = vtermno_to_xencons(HVC_COOKIE);
- info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
-@@ -616,12 +632,13 @@ static int __init xen_hvc_init(void)
- list_del(&info->list);
- spin_unlock_irqrestore(&xencons_lock, flags);
- if (info->irq)
-- unbind_from_irqhandler(info->irq, NULL);
-+ evtchn_put(info->evtchn);
- kfree(info);
- return r;
- }
-
- r = 0;
-+ register_fe:
- #ifdef CONFIG_HVC_XEN_FRONTEND
- r = xenbus_register_frontend(&xencons_driver);
- #endif
-diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
-index 1f3aba607cd51..0ee7531c92017 100644
---- a/drivers/tty/n_gsm.c
-+++ b/drivers/tty/n_gsm.c
-@@ -4108,6 +4108,8 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
-
- static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
- {
-+ if (dlci->gsm->dead)
-+ return -EL2HLT;
- if (dlci->adaption == 2) {
- /* Send convergence layer type 2 empty data frame. */
- gsm_modem_upd_via_data(dlci, brk);
-diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
-index 62a9bd30b4db5..bbd7914ddc9ad 100644
---- a/drivers/tty/serial/8250/8250_pci.c
-+++ b/drivers/tty/serial/8250/8250_pci.c
-@@ -2429,6 +2429,153 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
- .init = pci_oxsemi_tornado_init,
- .setup = pci_oxsemi_tornado_setup,
- },
-+ /*
-+ * Brainboxes devices - all Oxsemi based
-+ */
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4027,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4028,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4029,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4019,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4016,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4015,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x400A,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x400E,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x400C,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x400B,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x400F,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4010,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4011,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x401D,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x401E,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4013,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4017,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
-+ {
-+ .vendor = PCI_VENDOR_ID_INTASHIELD,
-+ .device = 0x4018,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ .init = pci_oxsemi_tornado_init,
-+ .setup = pci_oxsemi_tornado_setup,
-+ },
- {
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = 0x8811,
-@@ -4913,6 +5060,12 @@ static const struct pci_device_id serial_pci_tbl[] = {
- 0, 0,
- pbn_b1_bt_1_115200 },
-
-+ /*
-+ * IntaShield IS-100
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0D60,
-+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-+ pbn_b2_1_115200 },
- /*
- * IntaShield IS-200
- */
-@@ -4925,6 +5078,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
- { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
- pbn_b2_4_115200 },
-+ /*
-+ * IntaShield IX-100
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x4027,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_oxsemi_1_15625000 },
-+ /*
-+ * IntaShield IX-200
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x4028,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_oxsemi_2_15625000 },
-+ /*
-+ * IntaShield IX-400
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x4029,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_oxsemi_4_15625000 },
- /* Brainboxes Devices */
- /*
- * Brainboxes UC-101
-@@ -4940,10 +5114,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
- pbn_b2_1_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_1_115200 },
- /*
-- * Brainboxes UC-257
-+ * Brainboxes UC-253/UC-734
- */
-- { PCI_VENDOR_ID_INTASHIELD, 0x0861,
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
- pbn_b2_2_115200 },
-@@ -4979,6 +5157,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
- pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x08E2,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x08E3,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
- /*
- * Brainboxes UC-310
- */
-@@ -4989,6 +5175,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
- /*
- * Brainboxes UC-313
- */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x08A1,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x08A2,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
- { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
-@@ -5003,6 +5197,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
- /*
- * Brainboxes UC-346
- */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0B01,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_4_115200 },
- { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
-@@ -5014,6 +5212,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
- pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0A82,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
- { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
-@@ -5026,12 +5228,94 @@ static const struct pci_device_id serial_pci_tbl[] = {
- 0, 0,
- pbn_b2_4_115200 },
- /*
-- * Brainboxes UC-420/431
-+ * Brainboxes UC-420
- */
- { PCI_VENDOR_ID_INTASHIELD, 0x0921,
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
- pbn_b2_4_115200 },
-+ /*
-+ * Brainboxes UC-607
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x09A1,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x09A2,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x09A3,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ /*
-+ * Brainboxes UC-836
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0D41,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_4_115200 },
-+ /*
-+ * Brainboxes UP-189
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ /*
-+ * Brainboxes UP-200
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0B21,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0B22,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0B23,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ /*
-+ * Brainboxes UP-869
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0C01,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0C02,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0C03,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ /*
-+ * Brainboxes UP-880
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0C21,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0C22,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0C23,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_2_115200 },
- /*
- * Brainboxes PX-101
- */
-@@ -5064,7 +5348,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
- { PCI_VENDOR_ID_INTASHIELD, 0x4015,
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
-- pbn_oxsemi_4_15625000 },
-+ pbn_oxsemi_2_15625000 },
- /*
- * Brainboxes PX-260/PX-701
- */
-@@ -5072,6 +5356,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
- pbn_oxsemi_4_15625000 },
-+ /*
-+ * Brainboxes PX-275/279
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x0E41,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b2_8_115200 },
- /*
- * Brainboxes PX-310
- */
-@@ -5119,16 +5410,38 @@ static const struct pci_device_id serial_pci_tbl[] = {
- 0, 0,
- pbn_oxsemi_4_15625000 },
- /*
-- * Brainboxes PX-803
-+ * Brainboxes PX-475
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x401D,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_oxsemi_1_15625000 },
-+ /*
-+ * Brainboxes PX-803/PX-857
- */
- { PCI_VENDOR_ID_INTASHIELD, 0x4009,
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
-- pbn_b0_1_115200 },
-+ pbn_b0_2_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x4018,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_oxsemi_2_15625000 },
- { PCI_VENDOR_ID_INTASHIELD, 0x401E,
- PCI_ANY_ID, PCI_ANY_ID,
- 0, 0,
-- pbn_oxsemi_1_15625000 },
-+ pbn_oxsemi_2_15625000 },
-+ /*
-+ * Brainboxes PX-820
-+ */
-+ { PCI_VENDOR_ID_INTASHIELD, 0x4002,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_b0_4_115200 },
-+ { PCI_VENDOR_ID_INTASHIELD, 0x4013,
-+ PCI_ANY_ID, PCI_ANY_ID,
-+ 0, 0,
-+ pbn_oxsemi_4_15625000 },
- /*
- * Brainboxes PX-846
- */
-diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
-index 790d910dafa5d..9388b9ddea3bd 100644
---- a/drivers/tty/serial/meson_uart.c
-+++ b/drivers/tty/serial/meson_uart.c
-@@ -380,10 +380,14 @@ static void meson_uart_set_termios(struct uart_port *port,
- else
- val |= AML_UART_STOP_BIT_1SB;
-
-- if (cflags & CRTSCTS)
-- val &= ~AML_UART_TWO_WIRE_EN;
-- else
-+ if (cflags & CRTSCTS) {
-+ if (port->flags & UPF_HARD_FLOW)
-+ val &= ~AML_UART_TWO_WIRE_EN;
-+ else
-+ termios->c_cflag &= ~CRTSCTS;
-+ } else {
- val |= AML_UART_TWO_WIRE_EN;
-+ }
-
- writel(val, port->membase + AML_UART_CONTROL);
-
-@@ -705,6 +709,7 @@ static int meson_uart_probe(struct platform_device *pdev)
- u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
- int ret = 0;
- int irq;
-+ bool has_rtscts;
-
- if (pdev->dev.of_node)
- pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
-@@ -732,6 +737,7 @@ static int meson_uart_probe(struct platform_device *pdev)
- return irq;
-
- of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
-+ has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts");
-
- if (meson_ports[pdev->id]) {
- return dev_err_probe(&pdev->dev, -EBUSY,
-@@ -762,6 +768,8 @@ static int meson_uart_probe(struct platform_device *pdev)
- port->mapsize = resource_size(res_mem);
- port->irq = irq;
- port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
-+ if (has_rtscts)
-+ port->flags |= UPF_HARD_FLOW;
- port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
- port->dev = &pdev->dev;
- port->line = pdev->id;
-diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
-index d5ba6e90bd95f..f912f8bf1e633 100644
---- a/drivers/tty/serial/serial_core.c
-+++ b/drivers/tty/serial/serial_core.c
-@@ -146,7 +146,7 @@ static void __uart_start(struct uart_state *state)
-
- /* Increment the runtime PM usage count for the active check below */
- err = pm_runtime_get(&port_dev->dev);
-- if (err < 0) {
-+ if (err < 0 && err != -EINPROGRESS) {
- pm_runtime_put_noidle(&port_dev->dev);
- return;
- }
-diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
-index 23198e3f1461a..6b4a28bcf2f5f 100644
---- a/drivers/tty/sysrq.c
-+++ b/drivers/tty/sysrq.c
-@@ -262,13 +262,14 @@ static void sysrq_handle_showallcpus(u8 key)
- if (in_hardirq())
- regs = get_irq_regs();
-
-- pr_info("CPU%d:\n", smp_processor_id());
-+ pr_info("CPU%d:\n", get_cpu());
- if (regs)
- show_regs(regs);
- else
- show_stack(NULL, NULL, KERN_INFO);
-
- schedule_work(&sysrq_showallcpus);
-+ put_cpu();
- }
- }
-
-diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
-index 0d04287da0984..ef8741c3e6629 100644
---- a/drivers/tty/tty_jobctrl.c
-+++ b/drivers/tty/tty_jobctrl.c
-@@ -300,12 +300,7 @@ void disassociate_ctty(int on_exit)
- return;
- }
-
-- spin_lock_irq(&current->sighand->siglock);
-- put_pid(current->signal->tty_old_pgrp);
-- current->signal->tty_old_pgrp = NULL;
-- tty = tty_kref_get(current->signal->tty);
-- spin_unlock_irq(&current->sighand->siglock);
--
-+ tty = get_current_tty();
- if (tty) {
- unsigned long flags;
-
-@@ -320,6 +315,16 @@ void disassociate_ctty(int on_exit)
- tty_kref_put(tty);
- }
-
-+ /* If tty->ctrl.pgrp is not NULL, it may be assigned to
-+ * current->signal->tty_old_pgrp in a race condition, and
-+ * cause pid memleak. Release current->signal->tty_old_pgrp
-+ * after tty->ctrl.pgrp set to NULL.
-+ */
-+ spin_lock_irq(&current->sighand->siglock);
-+ put_pid(current->signal->tty_old_pgrp);
-+ current->signal->tty_old_pgrp = NULL;
-+ spin_unlock_irq(&current->sighand->siglock);
-+
- /* Now clear signal->tty under the lock */
- read_lock(&tasklist_lock);
- session_clear_tty(task_session(current));
-diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
-index a39ed981bfd3e..5b625f20233b4 100644
---- a/drivers/tty/vcc.c
-+++ b/drivers/tty/vcc.c
-@@ -579,18 +579,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
- return -ENOMEM;
-
- name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
-+ if (!name) {
-+ rv = -ENOMEM;
-+ goto free_port;
-+ }
-
- rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
- ARRAY_SIZE(vcc_versions), NULL, name);
- if (rv)
-- goto free_port;
-+ goto free_name;
-
- port->vio.debug = vcc_dbg_vio;
- vcc_ldc_cfg.debug = vcc_dbg_ldc;
-
- rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
- if (rv)
-- goto free_port;
-+ goto free_name;
-
- spin_lock_init(&port->lock);
-
-@@ -624,6 +628,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
- goto unreg_tty;
- }
- port->domain = kstrdup(domain, GFP_KERNEL);
-+ if (!port->domain) {
-+ rv = -ENOMEM;
-+ goto unreg_tty;
-+ }
-+
-
- mdesc_release(hp);
-
-@@ -653,8 +662,9 @@ free_table:
- vcc_table_remove(port->index);
- free_ldc:
- vio_ldc_free(&port->vio);
--free_port:
-+free_name:
- kfree(name);
-+free_port:
- kfree(port);
-
- return rv;
-diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
-index 2ba8ec254dcee..0787456c2b892 100644
---- a/drivers/ufs/core/ufs-mcq.c
-+++ b/drivers/ufs/core/ufs-mcq.c
-@@ -436,7 +436,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
-
- for (i = 0; i < hba->nr_hw_queues; i++) {
- hwq = &hba->uhq[i];
-- hwq->max_entries = hba->nutrs;
-+ hwq->max_entries = hba->nutrs + 1;
- spin_lock_init(&hwq->sq_lock);
- spin_lock_init(&hwq->cq_lock);
- mutex_init(&hwq->sq_mutex);
-@@ -630,6 +630,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
- int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct ufs_hw_queue *hwq;
-+ unsigned long flags;
- int err = FAILED;
-
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
-@@ -670,8 +671,10 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
- }
-
- err = SUCCESS;
-+ spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
-+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
-
- out:
- return err;
-diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
-index 8382e8cfa414a..170fbd5715b21 100644
---- a/drivers/ufs/core/ufshcd.c
-+++ b/drivers/ufs/core/ufshcd.c
-@@ -3632,7 +3632,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- */
- ret = utf16s_to_utf8s(uc_str->uc,
- uc_str->len - QUERY_DESC_HDR_SIZE,
-- UTF16_BIG_ENDIAN, str, ascii_len);
-+ UTF16_BIG_ENDIAN, str, ascii_len - 1);
-
- /* replace non-printable or non-ASCII characters with spaces */
- for (i = 0; i < ret; i++)
-@@ -6347,11 +6347,24 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- struct ufs_hba *hba = shost_priv(shost);
-+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
-+ struct ufs_hw_queue *hwq;
-+ unsigned long flags;
-
- *ret = ufshcd_try_to_abort_task(hba, tag);
- dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
- *ret ? "failed" : "succeeded");
-+
-+ /* Release cmd in MCQ mode if abort succeeds */
-+ if (is_mcq_enabled(hba) && (*ret == 0)) {
-+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
-+ spin_lock_irqsave(&hwq->cq_lock, flags);
-+ if (ufshcd_cmd_inflight(lrbp->cmd))
-+ ufshcd_release_scsi_cmd(hba, lrbp);
-+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
-+ }
-+
- return *ret == 0;
- }
-
-@@ -8723,7 +8736,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
- if (ret)
- goto out;
-
-- if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
-+ if (!hba->pm_op_in_progress &&
-+ (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
- /* Reset the device and controller before doing reinit */
- ufshcd_device_reset(hba);
- ufshcd_hba_stop(hba);
-diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
-index d1149b1c3ed50..b1d720031251e 100644
---- a/drivers/ufs/host/ufs-qcom.c
-+++ b/drivers/ufs/host/ufs-qcom.c
-@@ -909,8 +909,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
- return ret;
- }
-
-- /* Use the agreed gear */
-- host->hs_gear = dev_req_params->gear_tx;
-+ /*
-+ * Update hs_gear only when the gears are scaled to a higher value. This is because,
-+ * the PHY gear settings are backwards compatible and we only need to change the PHY
-+ * settings while scaling to higher gears.
-+ */
-+ if (dev_req_params->gear_tx > host->hs_gear)
-+ host->hs_gear = dev_req_params->gear_tx;
-
- /* enable the device ref clock before changing to HS mode */
- if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
-diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
-index 07f6068342d46..275a6a2fa671e 100644
---- a/drivers/usb/cdns3/cdnsp-ring.c
-+++ b/drivers/usb/cdns3/cdnsp-ring.c
-@@ -1529,6 +1529,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
- unsigned long flags;
- int counter = 0;
-
-+ local_bh_disable();
- spin_lock_irqsave(&pdev->lock, flags);
-
- if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
-@@ -1541,6 +1542,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
- cdnsp_died(pdev);
-
- spin_unlock_irqrestore(&pdev->lock, flags);
-+ local_bh_enable();
- return IRQ_HANDLED;
- }
-
-@@ -1557,6 +1559,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
- cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
-
- spin_unlock_irqrestore(&pdev->lock, flags);
-+ local_bh_enable();
-
- return IRQ_HANDLED;
- }
-diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
-index 08af26b762a2d..0cce192083701 100644
---- a/drivers/usb/chipidea/host.c
-+++ b/drivers/usb/chipidea/host.c
-@@ -30,8 +30,7 @@ struct ehci_ci_priv {
- };
-
- struct ci_hdrc_dma_aligned_buffer {
-- void *kmalloc_ptr;
-- void *old_xfer_buffer;
-+ void *original_buffer;
- u8 data[];
- };
-
-@@ -380,59 +379,52 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
- return 0;
- }
-
--static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
-+static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb, bool copy_back)
- {
- struct ci_hdrc_dma_aligned_buffer *temp;
-- size_t length;
-
- if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
- return;
-+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
-
- temp = container_of(urb->transfer_buffer,
- struct ci_hdrc_dma_aligned_buffer, data);
-+ urb->transfer_buffer = temp->original_buffer;
-+
-+ if (copy_back && usb_urb_dir_in(urb)) {
-+ size_t length;
-
-- if (usb_urb_dir_in(urb)) {
- if (usb_pipeisoc(urb->pipe))
- length = urb->transfer_buffer_length;
- else
- length = urb->actual_length;
-
-- memcpy(temp->old_xfer_buffer, temp->data, length);
-+ memcpy(temp->original_buffer, temp->data, length);
- }
-- urb->transfer_buffer = temp->old_xfer_buffer;
-- kfree(temp->kmalloc_ptr);
-
-- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
-+ kfree(temp);
- }
-
- static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
- {
-- struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
-- const unsigned int ci_hdrc_usb_dma_align = 32;
-- size_t kmalloc_size;
-+ struct ci_hdrc_dma_aligned_buffer *temp;
-
-- if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
-- !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
-+ if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0)
-+ return 0;
-+ if (IS_ALIGNED((uintptr_t)urb->transfer_buffer, 4)
-+ && IS_ALIGNED(urb->transfer_buffer_length, 4))
- return 0;
-
-- /* Allocate a buffer with enough padding for alignment */
-- kmalloc_size = urb->transfer_buffer_length +
-- sizeof(struct ci_hdrc_dma_aligned_buffer) +
-- ci_hdrc_usb_dma_align - 1;
--
-- kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
-- if (!kmalloc_ptr)
-+ temp = kmalloc(sizeof(*temp) + ALIGN(urb->transfer_buffer_length, 4), mem_flags);
-+ if (!temp)
- return -ENOMEM;
-
-- /* Position our struct dma_aligned_buffer such that data is aligned */
-- temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
-- temp->kmalloc_ptr = kmalloc_ptr;
-- temp->old_xfer_buffer = urb->transfer_buffer;
- if (usb_urb_dir_out(urb))
- memcpy(temp->data, urb->transfer_buffer,
- urb->transfer_buffer_length);
-- urb->transfer_buffer = temp->data;
-
-+ temp->original_buffer = urb->transfer_buffer;
-+ urb->transfer_buffer = temp->data;
- urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
-
- return 0;
-@@ -449,7 +441,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
-
- ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
- if (ret)
-- ci_hdrc_free_dma_aligned_buffer(urb);
-+ ci_hdrc_free_dma_aligned_buffer(urb, false);
-
- return ret;
- }
-@@ -457,7 +449,7 @@ static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
- static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
- {
- usb_hcd_unmap_urb_for_dma(hcd, urb);
-- ci_hdrc_free_dma_aligned_buffer(urb);
-+ ci_hdrc_free_dma_aligned_buffer(urb, true);
- }
-
- #ifdef CONFIG_PM_SLEEP
-diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
-index b19e38d5fd10c..7f8d33f92ddb5 100644
---- a/drivers/usb/core/config.c
-+++ b/drivers/usb/core/config.c
-@@ -1047,7 +1047,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
-
- if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
- dev_notice(ddev, "descriptor type invalid, skip\n");
-- continue;
-+ goto skip_to_next_descriptor;
- }
-
- switch (cap_type) {
-@@ -1078,6 +1078,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
- break;
- }
-
-+skip_to_next_descriptor:
- total_len -= length;
- buffer += length;
- }
-diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 0ff47eeffb490..dfc30cebd4c4c 100644
---- a/drivers/usb/core/hub.c
-+++ b/drivers/usb/core/hub.c
-@@ -622,29 +622,6 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
- ret = 0;
- }
- mutex_unlock(&hub->status_mutex);
--
-- /*
-- * There is no need to lock status_mutex here, because status_mutex
-- * protects hub->status, and the phy driver only checks the port
-- * status without changing the status.
-- */
-- if (!ret) {
-- struct usb_device *hdev = hub->hdev;
--
-- /*
-- * Only roothub will be notified of port state changes,
-- * since the USB PHY only cares about changes at the next
-- * level.
-- */
-- if (is_root_hub(hdev)) {
-- struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
--
-- if (hcd->usb_phy)
-- usb_phy_notify_port_status(hcd->usb_phy,
-- port1 - 1, *status, *change);
-- }
-- }
--
- return ret;
- }
-
-diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
-index 657f1f659ffaf..35c7a4df8e717 100644
---- a/drivers/usb/dwc2/hcd.c
-+++ b/drivers/usb/dwc2/hcd.c
-@@ -4769,8 +4769,8 @@ fail3:
- if (qh_allocated && qh->channel && qh->channel->qh == qh)
- qh->channel->qh = NULL;
- fail2:
-- spin_unlock_irqrestore(&hsotg->lock, flags);
- urb->hcpriv = NULL;
-+ spin_unlock_irqrestore(&hsotg->lock, flags);
- kfree(qtd);
- fail1:
- if (qh_allocated) {
-diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
-index 0144ca8350c31..5c7538d498dd1 100644
---- a/drivers/usb/dwc2/hcd_intr.c
-+++ b/drivers/usb/dwc2/hcd_intr.c
-@@ -2015,15 +2015,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
- {
- struct dwc2_qtd *qtd;
- struct dwc2_host_chan *chan;
-- u32 hcint, hcintmsk;
-+ u32 hcint, hcintraw, hcintmsk;
-
- chan = hsotg->hc_ptr_array[chnum];
-
-- hcint = dwc2_readl(hsotg, HCINT(chnum));
-+ hcintraw = dwc2_readl(hsotg, HCINT(chnum));
- hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
-+ hcint = hcintraw & hcintmsk;
-+ dwc2_writel(hsotg, hcint, HCINT(chnum));
-+
- if (!chan) {
- dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
-- dwc2_writel(hsotg, hcint, HCINT(chnum));
- return;
- }
-
-@@ -2032,11 +2034,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
- chnum);
- dev_vdbg(hsotg->dev,
- " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
-- hcint, hcintmsk, hcint & hcintmsk);
-+ hcintraw, hcintmsk, hcint);
- }
-
-- dwc2_writel(hsotg, hcint, HCINT(chnum));
--
- /*
- * If we got an interrupt after someone called
- * dwc2_hcd_endpoint_disable() we don't want to crash below
-@@ -2046,8 +2046,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
- return;
- }
-
-- chan->hcint = hcint;
-- hcint &= hcintmsk;
-+ chan->hcint = hcintraw;
-
- /*
- * If the channel was halted due to a dequeue, the qtd list might
-diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
-index 343d2570189ff..8d5af9ccb6027 100644
---- a/drivers/usb/dwc3/core.c
-+++ b/drivers/usb/dwc3/core.c
-@@ -1094,6 +1094,111 @@ static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
- }
- }
-
-+static void dwc3_config_threshold(struct dwc3 *dwc)
-+{
-+ u32 reg;
-+ u8 rx_thr_num;
-+ u8 rx_maxburst;
-+ u8 tx_thr_num;
-+ u8 tx_maxburst;
-+
-+ /*
-+ * Must config both number of packets and max burst settings to enable
-+ * RX and/or TX threshold.
-+ */
-+ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
-+ rx_thr_num = dwc->rx_thr_num_pkt_prd;
-+ rx_maxburst = dwc->rx_max_burst_prd;
-+ tx_thr_num = dwc->tx_thr_num_pkt_prd;
-+ tx_maxburst = dwc->tx_max_burst_prd;
-+
-+ if (rx_thr_num && rx_maxburst) {
-+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
-+ reg |= DWC31_RXTHRNUMPKTSEL_PRD;
-+
-+ reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
-+ reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
-+
-+ reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
-+ reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
-+
-+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
-+ }
-+
-+ if (tx_thr_num && tx_maxburst) {
-+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
-+ reg |= DWC31_TXTHRNUMPKTSEL_PRD;
-+
-+ reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
-+ reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
-+
-+ reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
-+ reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
-+
-+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
-+ }
-+ }
-+
-+ rx_thr_num = dwc->rx_thr_num_pkt;
-+ rx_maxburst = dwc->rx_max_burst;
-+ tx_thr_num = dwc->tx_thr_num_pkt;
-+ tx_maxburst = dwc->tx_max_burst;
-+
-+ if (DWC3_IP_IS(DWC3)) {
-+ if (rx_thr_num && rx_maxburst) {
-+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
-+ reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
-+
-+ reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
-+ reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
-+
-+ reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
-+ reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
-+
-+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
-+ }
-+
-+ if (tx_thr_num && tx_maxburst) {
-+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
-+ reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
-+
-+ reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
-+ reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
-+
-+ reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
-+ reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
-+
-+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
-+ }
-+ } else {
-+ if (rx_thr_num && rx_maxburst) {
-+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
-+ reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
-+
-+ reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
-+ reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
-+
-+ reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
-+ reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
-+
-+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
-+ }
-+
-+ if (tx_thr_num && tx_maxburst) {
-+ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
-+ reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
-+
-+ reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
-+ reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
-+
-+ reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
-+ reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
-+
-+ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
-+ }
-+ }
-+}
-+
- /**
- * dwc3_core_init - Low-level initialization of DWC3 Core
- * @dwc: Pointer to our controller context structure
-@@ -1246,42 +1351,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
- dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
- }
-
-- /*
-- * Must config both number of packets and max burst settings to enable
-- * RX and/or TX threshold.
-- */
-- if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
-- u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
-- u8 rx_maxburst = dwc->rx_max_burst_prd;
-- u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
-- u8 tx_maxburst = dwc->tx_max_burst_prd;
--
-- if (rx_thr_num && rx_maxburst) {
-- reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
-- reg |= DWC31_RXTHRNUMPKTSEL_PRD;
--
-- reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
-- reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
--
-- reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
-- reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
--
-- dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
-- }
--
-- if (tx_thr_num && tx_maxburst) {
-- reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
-- reg |= DWC31_TXTHRNUMPKTSEL_PRD;
--
-- reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
-- reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
--
-- reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
-- reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
--
-- dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
-- }
-- }
-+ dwc3_config_threshold(dwc);
-
- return 0;
-
-@@ -1417,6 +1487,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
- u8 lpm_nyet_threshold;
- u8 tx_de_emphasis;
- u8 hird_threshold;
-+ u8 rx_thr_num_pkt = 0;
-+ u8 rx_max_burst = 0;
-+ u8 tx_thr_num_pkt = 0;
-+ u8 tx_max_burst = 0;
- u8 rx_thr_num_pkt_prd = 0;
- u8 rx_max_burst_prd = 0;
- u8 tx_thr_num_pkt_prd = 0;
-@@ -1479,6 +1553,14 @@ static void dwc3_get_properties(struct dwc3 *dwc)
- "snps,usb2-lpm-disable");
- dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
- "snps,usb2-gadget-lpm-disable");
-+ device_property_read_u8(dev, "snps,rx-thr-num-pkt",
-+ &rx_thr_num_pkt);
-+ device_property_read_u8(dev, "snps,rx-max-burst",
-+ &rx_max_burst);
-+ device_property_read_u8(dev, "snps,tx-thr-num-pkt",
-+ &tx_thr_num_pkt);
-+ device_property_read_u8(dev, "snps,tx-max-burst",
-+ &tx_max_burst);
- device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
- &rx_thr_num_pkt_prd);
- device_property_read_u8(dev, "snps,rx-max-burst-prd",
-@@ -1560,6 +1642,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
-
- dwc->hird_threshold = hird_threshold;
-
-+ dwc->rx_thr_num_pkt = rx_thr_num_pkt;
-+ dwc->rx_max_burst = rx_max_burst;
-+
-+ dwc->tx_thr_num_pkt = tx_thr_num_pkt;
-+ dwc->tx_max_burst = tx_max_burst;
-+
- dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
- dwc->rx_max_burst_prd = rx_max_burst_prd;
-
-@@ -1918,6 +2006,8 @@ static int dwc3_probe(struct platform_device *pdev)
-
- pm_runtime_put(dev);
-
-+ dma_set_max_seg_size(dev, UINT_MAX);
-+
- return 0;
-
- err_exit_debugfs:
-diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
-index a69ac67d89fe6..6782ec8bfd64c 100644
---- a/drivers/usb/dwc3/core.h
-+++ b/drivers/usb/dwc3/core.h
-@@ -211,6 +211,11 @@
- #define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
- #define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
-
-+/* Global TX Threshold Configuration Register */
-+#define DWC3_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0xff) << 16)
-+#define DWC3_GTXTHRCFG_TXPKTCNT(n) (((n) & 0xf) << 24)
-+#define DWC3_GTXTHRCFG_PKTCNTSEL BIT(29)
-+
- /* Global RX Threshold Configuration Register for DWC_usb31 only */
- #define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16)
- #define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21)
-@@ -1045,6 +1050,10 @@ struct dwc3_scratchpad_array {
- * @test_mode_nr: test feature selector
- * @lpm_nyet_threshold: LPM NYET response threshold
- * @hird_threshold: HIRD threshold
-+ * @rx_thr_num_pkt: USB receive packet count
-+ * @rx_max_burst: max USB receive burst size
-+ * @tx_thr_num_pkt: USB transmit packet count
-+ * @tx_max_burst: max USB transmit burst size
- * @rx_thr_num_pkt_prd: periodic ESS receive packet count
- * @rx_max_burst_prd: max periodic ESS receive burst size
- * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
-@@ -1273,6 +1282,10 @@ struct dwc3 {
- u8 test_mode_nr;
- u8 lpm_nyet_threshold;
- u8 hird_threshold;
-+ u8 rx_thr_num_pkt;
-+ u8 rx_max_burst;
-+ u8 tx_thr_num_pkt;
-+ u8 tx_max_burst;
- u8 rx_thr_num_pkt_prd;
- u8 rx_max_burst_prd;
- u8 tx_thr_num_pkt_prd;
-diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
-index 039bf241769af..57ddd2e43022e 100644
---- a/drivers/usb/dwc3/drd.c
-+++ b/drivers/usb/dwc3/drd.c
-@@ -505,6 +505,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
- dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
- mode = DWC3_GCTL_PRTCAP_DEVICE;
- }
-+ dwc3_set_mode(dwc, mode);
-
- dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
- dwc3_role_switch.set = dwc3_usb_role_switch_set;
-@@ -526,7 +527,6 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
- }
- }
-
-- dwc3_set_mode(dwc, mode);
- return 0;
- }
- #else
-diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
-index 3de43df6bbe81..82544374110b0 100644
---- a/drivers/usb/dwc3/dwc3-qcom.c
-+++ b/drivers/usb/dwc3/dwc3-qcom.c
-@@ -549,7 +549,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
- qcom_dwc3_resume_irq,
-- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-+ IRQF_ONESHOT,
- "qcom_dwc3 HS", qcom);
- if (ret) {
- dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
-@@ -564,7 +564,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
- qcom_dwc3_resume_irq,
-- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-+ IRQF_ONESHOT,
- "qcom_dwc3 DP_HS", qcom);
- if (ret) {
- dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
-@@ -579,7 +579,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
- qcom_dwc3_resume_irq,
-- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-+ IRQF_ONESHOT,
- "qcom_dwc3 DM_HS", qcom);
- if (ret) {
- dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
-@@ -594,7 +594,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
- qcom_dwc3_resume_irq,
-- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
-+ IRQF_ONESHOT,
- "qcom_dwc3 SS", qcom);
- if (ret) {
- dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
-@@ -758,6 +758,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
- if (!qcom->dwc3) {
- ret = -ENODEV;
- dev_err(dev, "failed to get dwc3 platform device\n");
-+ of_platform_depopulate(dev);
- }
-
- node_put:
-@@ -766,9 +767,9 @@ node_put:
- return ret;
- }
-
--static struct platform_device *
--dwc3_qcom_create_urs_usb_platdev(struct device *dev)
-+static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
- {
-+ struct platform_device *urs_usb = NULL;
- struct fwnode_handle *fwh;
- struct acpi_device *adev;
- char name[8];
-@@ -788,9 +789,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
-
- adev = to_acpi_device_node(fwh);
- if (!adev)
-- return NULL;
-+ goto err_put_handle;
-+
-+ urs_usb = acpi_create_platform_device(adev, NULL);
-+ if (IS_ERR_OR_NULL(urs_usb))
-+ goto err_put_handle;
-+
-+ return urs_usb;
-+
-+err_put_handle:
-+ fwnode_handle_put(fwh);
-+
-+ return urs_usb;
-+}
-
-- return acpi_create_platform_device(adev, NULL);
-+static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
-+{
-+ struct fwnode_handle *fwh = urs_usb->dev.fwnode;
-+
-+ platform_device_unregister(urs_usb);
-+ fwnode_handle_put(fwh);
- }
-
- static int dwc3_qcom_probe(struct platform_device *pdev)
-@@ -874,13 +892,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
- qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
- if (IS_ERR(qcom->qscratch_base)) {
- ret = PTR_ERR(qcom->qscratch_base);
-- goto clk_disable;
-+ goto free_urs;
- }
-
- ret = dwc3_qcom_setup_irq(pdev);
- if (ret) {
- dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
-- goto clk_disable;
-+ goto free_urs;
- }
-
- /*
-@@ -899,7 +917,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
-
- if (ret) {
- dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
-- goto depopulate;
-+ goto free_urs;
- }
-
- ret = dwc3_qcom_interconnect_init(qcom);
-@@ -931,10 +949,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
- interconnect_exit:
- dwc3_qcom_interconnect_exit(qcom);
- depopulate:
-- if (np)
-+ if (np) {
- of_platform_depopulate(&pdev->dev);
-- else
-- platform_device_put(pdev);
-+ } else {
-+ device_remove_software_node(&qcom->dwc3->dev);
-+ platform_device_del(qcom->dwc3);
-+ }
-+ platform_device_put(qcom->dwc3);
-+free_urs:
-+ if (qcom->urs_usb)
-+ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
- clk_disable:
- for (i = qcom->num_clocks - 1; i >= 0; i--) {
- clk_disable_unprepare(qcom->clks[i]);
-@@ -953,11 +977,16 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
- struct device *dev = &pdev->dev;
- int i;
-
-- device_remove_software_node(&qcom->dwc3->dev);
-- if (np)
-+ if (np) {
- of_platform_depopulate(&pdev->dev);
-- else
-- platform_device_put(pdev);
-+ } else {
-+ device_remove_software_node(&qcom->dwc3->dev);
-+ platform_device_del(qcom->dwc3);
-+ }
-+ platform_device_put(qcom->dwc3);
-+
-+ if (qcom->urs_usb)
-+ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
-
- for (i = qcom->num_clocks - 1; i >= 0; i--) {
- clk_disable_unprepare(qcom->clks[i]);
-diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
-index e6ab8cc225ffd..cc0ed29a4adc0 100644
---- a/drivers/usb/gadget/function/f_ncm.c
-+++ b/drivers/usb/gadget/function/f_ncm.c
-@@ -1410,7 +1410,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
- struct usb_composite_dev *cdev = c->cdev;
- struct f_ncm *ncm = func_to_ncm(f);
- struct usb_string *us;
-- int status;
-+ int status = 0;
- struct usb_ep *ep;
- struct f_ncm_opts *ncm_opts;
-
-@@ -1428,22 +1428,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
- f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
- }
-
-- /*
-- * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
-- * configurations are bound in sequence with list_for_each_entry,
-- * in each configuration its functions are bound in sequence
-- * with list_for_each_entry, so we assume no race condition
-- * with regard to ncm_opts->bound access
-- */
-- if (!ncm_opts->bound) {
-- mutex_lock(&ncm_opts->lock);
-- gether_set_gadget(ncm_opts->net, cdev->gadget);
-+ mutex_lock(&ncm_opts->lock);
-+ gether_set_gadget(ncm_opts->net, cdev->gadget);
-+ if (!ncm_opts->bound)
- status = gether_register_netdev(ncm_opts->net);
-- mutex_unlock(&ncm_opts->lock);
-- if (status)
-- goto fail;
-- ncm_opts->bound = true;
-- }
-+ mutex_unlock(&ncm_opts->lock);
-+
-+ if (status)
-+ goto fail;
-+
-+ ncm_opts->bound = true;
-+
- us = usb_gstrings_attach(cdev, ncm_strings,
- ARRAY_SIZE(ncm_string_defs));
- if (IS_ERR(us)) {
-diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
-index e549022642e56..ea106ad665a1f 100644
---- a/drivers/usb/gadget/legacy/raw_gadget.c
-+++ b/drivers/usb/gadget/legacy/raw_gadget.c
-@@ -663,12 +663,12 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
- if (WARN_ON(in && dev->ep0_out_pending)) {
- ret = -ENODEV;
- dev->state = STATE_DEV_FAILED;
-- goto out_done;
-+ goto out_unlock;
- }
- if (WARN_ON(!in && dev->ep0_in_pending)) {
- ret = -ENODEV;
- dev->state = STATE_DEV_FAILED;
-- goto out_done;
-+ goto out_unlock;
- }
-
- dev->req->buf = data;
-@@ -683,7 +683,7 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
- "fail, usb_ep_queue returned %d\n", ret);
- spin_lock_irqsave(&dev->lock, flags);
- dev->state = STATE_DEV_FAILED;
-- goto out_done;
-+ goto out_queue_failed;
- }
-
- ret = wait_for_completion_interruptible(&dev->ep0_done);
-@@ -692,13 +692,16 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
- usb_ep_dequeue(dev->gadget->ep0, dev->req);
- wait_for_completion(&dev->ep0_done);
- spin_lock_irqsave(&dev->lock, flags);
-- goto out_done;
-+ if (dev->ep0_status == -ECONNRESET)
-+ dev->ep0_status = -EINTR;
-+ goto out_interrupted;
- }
-
- spin_lock_irqsave(&dev->lock, flags);
-- ret = dev->ep0_status;
-
--out_done:
-+out_interrupted:
-+ ret = dev->ep0_status;
-+out_queue_failed:
- dev->ep0_urb_queued = false;
- out_unlock:
- spin_unlock_irqrestore(&dev->lock, flags);
-@@ -1067,7 +1070,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
- "fail, usb_ep_queue returned %d\n", ret);
- spin_lock_irqsave(&dev->lock, flags);
- dev->state = STATE_DEV_FAILED;
-- goto out_done;
-+ goto out_queue_failed;
- }
-
- ret = wait_for_completion_interruptible(&done);
-@@ -1076,13 +1079,16 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
- usb_ep_dequeue(ep->ep, ep->req);
- wait_for_completion(&done);
- spin_lock_irqsave(&dev->lock, flags);
-- goto out_done;
-+ if (ep->status == -ECONNRESET)
-+ ep->status = -EINTR;
-+ goto out_interrupted;
- }
-
- spin_lock_irqsave(&dev->lock, flags);
-- ret = ep->status;
-
--out_done:
-+out_interrupted:
-+ ret = ep->status;
-+out_queue_failed:
- ep->urb_queued = false;
- out_unlock:
- spin_unlock_irqrestore(&dev->lock, flags);
-diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
-index b9ae5c2a25275..95ed9404f6f85 100644
---- a/drivers/usb/host/xhci-pci.c
-+++ b/drivers/usb/host/xhci-pci.c
-@@ -535,6 +535,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
- /* xHC spec requires PCI devices to support D3hot and D3cold */
- if (xhci->hci_version >= 0x120)
- xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
-+ else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
-+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
-
- if (xhci->quirks & XHCI_RESET_ON_RESUME)
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
-@@ -693,7 +695,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
- /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
- pm_runtime_put_noidle(&dev->dev);
-
-- if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
-+ if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
-+ pm_runtime_forbid(&dev->dev);
-+ else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
- pm_runtime_allow(&dev->dev);
-
- dma_set_max_seg_size(&dev->dev, UINT_MAX);
-diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
-index 28218c8f18376..732cdeb739202 100644
---- a/drivers/usb/host/xhci-plat.c
-+++ b/drivers/usb/host/xhci-plat.c
-@@ -13,6 +13,7 @@
- #include <linux/module.h>
- #include <linux/pci.h>
- #include <linux/of.h>
-+#include <linux/of_device.h>
- #include <linux/platform_device.h>
- #include <linux/usb/phy.h>
- #include <linux/slab.h>
-@@ -148,7 +149,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
- int ret;
- int irq;
- struct xhci_plat_priv *priv = NULL;
--
-+ bool of_match;
-
- if (usb_disabled())
- return -ENODEV;
-@@ -253,16 +254,23 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
- &xhci->imod_interval);
- }
-
-- hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
-- if (IS_ERR(hcd->usb_phy)) {
-- ret = PTR_ERR(hcd->usb_phy);
-- if (ret == -EPROBE_DEFER)
-- goto disable_clk;
-- hcd->usb_phy = NULL;
-- } else {
-- ret = usb_phy_init(hcd->usb_phy);
-- if (ret)
-- goto disable_clk;
-+ /*
-+ * Drivers such as dwc3 manages PHYs themself (and rely on driver name
-+ * matching for the xhci platform device).
-+ */
-+ of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
-+ if (of_match) {
-+ hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
-+ if (IS_ERR(hcd->usb_phy)) {
-+ ret = PTR_ERR(hcd->usb_phy);
-+ if (ret == -EPROBE_DEFER)
-+ goto disable_clk;
-+ hcd->usb_phy = NULL;
-+ } else {
-+ ret = usb_phy_init(hcd->usb_phy);
-+ if (ret)
-+ goto disable_clk;
-+ }
- }
-
- hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
-@@ -285,15 +293,17 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
- goto dealloc_usb2_hcd;
- }
-
-- xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
-- "usb-phy", 1);
-- if (IS_ERR(xhci->shared_hcd->usb_phy)) {
-- xhci->shared_hcd->usb_phy = NULL;
-- } else {
-- ret = usb_phy_init(xhci->shared_hcd->usb_phy);
-- if (ret)
-- dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
-- __func__, ret);
-+ if (of_match) {
-+ xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
-+ "usb-phy", 1);
-+ if (IS_ERR(xhci->shared_hcd->usb_phy)) {
-+ xhci->shared_hcd->usb_phy = NULL;
-+ } else {
-+ ret = usb_phy_init(xhci->shared_hcd->usb_phy);
-+ if (ret)
-+ dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
-+ __func__, ret);
-+ }
- }
-
- xhci->shared_hcd->tpl_support = hcd->tpl_support;
-@@ -458,23 +468,38 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
- int ret;
-
- if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
-- clk_prepare_enable(xhci->clk);
-- clk_prepare_enable(xhci->reg_clk);
-+ ret = clk_prepare_enable(xhci->clk);
-+ if (ret)
-+ return ret;
-+
-+ ret = clk_prepare_enable(xhci->reg_clk);
-+ if (ret) {
-+ clk_disable_unprepare(xhci->clk);
-+ return ret;
-+ }
- }
-
- ret = xhci_priv_resume_quirk(hcd);
- if (ret)
-- return ret;
-+ goto disable_clks;
-
- ret = xhci_resume(xhci, PMSG_RESUME);
- if (ret)
-- return ret;
-+ goto disable_clks;
-
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- return 0;
-+
-+disable_clks:
-+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
-+ clk_disable_unprepare(xhci->clk);
-+ clk_disable_unprepare(xhci->reg_clk);
-+ }
-+
-+ return ret;
- }
-
- static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
-diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
-index e1b1b64a07232..132b76fa7ca60 100644
---- a/drivers/usb/host/xhci.c
-+++ b/drivers/usb/host/xhci.c
-@@ -968,6 +968,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
- int retval = 0;
- bool comp_timer_running = false;
- bool pending_portevent = false;
-+ bool suspended_usb3_devs = false;
- bool reinit_xhc = false;
-
- if (!hcd->state)
-@@ -1115,10 +1116,17 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
- /*
- * Resume roothubs only if there are pending events.
- * USB 3 devices resend U3 LFPS wake after a 100ms delay if
-- * the first wake signalling failed, give it that chance.
-+ * the first wake signalling failed, give it that chance if
-+ * there are suspended USB 3 devices.
- */
-+ if (xhci->usb3_rhub.bus_state.suspended_ports ||
-+ xhci->usb3_rhub.bus_state.bus_suspended)
-+ suspended_usb3_devs = true;
-+
- pending_portevent = xhci_pending_portevent(xhci);
-- if (!pending_portevent && msg.event == PM_EVENT_AUTO_RESUME) {
-+
-+ if (suspended_usb3_devs && !pending_portevent &&
-+ msg.event == PM_EVENT_AUTO_RESUME) {
- msleep(120);
- pending_portevent = xhci_pending_portevent(xhci);
- }
-diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
-index 57bbe13090948..d72130eda57d6 100644
---- a/drivers/usb/misc/onboard_usb_hub.c
-+++ b/drivers/usb/misc/onboard_usb_hub.c
-@@ -437,6 +437,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
- { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
- { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
- { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
-+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 */
-+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 */
- { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
- { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
- { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
-diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
-index 2a4ab5ac0ebed..8af34e6d1afff 100644
---- a/drivers/usb/misc/onboard_usb_hub.h
-+++ b/drivers/usb/misc/onboard_usb_hub.h
-@@ -16,6 +16,11 @@ static const struct onboard_hub_pdata microchip_usb424_data = {
- .num_supplies = 1,
- };
-
-+static const struct onboard_hub_pdata microchip_usb5744_data = {
-+ .reset_us = 0,
-+ .num_supplies = 2,
-+};
-+
- static const struct onboard_hub_pdata realtek_rts5411_data = {
- .reset_us = 0,
- .num_supplies = 1,
-@@ -50,6 +55,8 @@ static const struct of_device_id onboard_hub_match[] = {
- { .compatible = "usb424,2412", .data = &microchip_usb424_data, },
- { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
- { .compatible = "usb424,2517", .data = &microchip_usb424_data, },
-+ { .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
-+ { .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
- { .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
- { .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
- { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
-diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
-index 45dcfaadaf98e..4dffcfefd62da 100644
---- a/drivers/usb/serial/option.c
-+++ b/drivers/usb/serial/option.c
-@@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
- #define DELL_PRODUCT_5829E_ESIM 0x81e4
- #define DELL_PRODUCT_5829E 0x81e6
-
--#define DELL_PRODUCT_FM101R 0x8213
--#define DELL_PRODUCT_FM101R_ESIM 0x8215
-+#define DELL_PRODUCT_FM101R_ESIM 0x8213
-+#define DELL_PRODUCT_FM101R 0x8215
-
- #define KYOCERA_VENDOR_ID 0x0c88
- #define KYOCERA_PRODUCT_KPC650 0x17da
-@@ -609,6 +609,8 @@ static void option_instat_callback(struct urb *urb);
- #define UNISOC_VENDOR_ID 0x1782
- /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
- #define TOZED_PRODUCT_LT70C 0x4055
-+/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
-+#define LUAT_PRODUCT_AIR720U 0x4e00
-
- /* Device flags */
-
-@@ -1546,7 +1548,8 @@ static const struct usb_device_id option_ids[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
- .driver_info = RSVD(4) },
-- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
-+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
-+ .driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
- .driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
-@@ -2249,6 +2252,7 @@ static const struct usb_device_id option_ids[] = {
- .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
- { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
- { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
-+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
- { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
- .driver_info = RSVD(4) | RSVD(5) },
- { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
-@@ -2271,6 +2275,7 @@ static const struct usb_device_id option_ids[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
- { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
-+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
- { } /* Terminating entry */
- };
- MODULE_DEVICE_TABLE(usb, option_ids);
-diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
-index 0547daf116a26..5df40759d77ad 100644
---- a/drivers/usb/storage/unusual_cypress.h
-+++ b/drivers/usb/storage/unusual_cypress.h
-@@ -19,7 +19,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
- "Cypress ISD-300LP",
- USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-
--UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
-+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
- "Super Top",
- "USB 2.0 SATA BRIDGE",
- USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
-index d962f67c95ae6..6d455ca76125e 100644
---- a/drivers/usb/typec/tcpm/tcpm.c
-+++ b/drivers/usb/typec/tcpm/tcpm.c
-@@ -1625,6 +1625,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
- if (PD_VDO_VID(p[0]) != USB_SID_PD)
- break;
-
-+ if (IS_ERR_OR_NULL(port->partner))
-+ break;
-+
- if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
- typec_partner_set_svdm_version(port->partner,
- PD_VDO_SVDM_VER(p[0]));
-@@ -3903,6 +3906,8 @@ static void run_state_machine(struct tcpm_port *port)
- port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
- port->state == SRC_UNATTACHED) ||
- (port->enter_state == SNK_ATTACH_WAIT &&
-+ port->state == SNK_UNATTACHED) ||
-+ (port->enter_state == SNK_DEBOUNCED &&
- port->state == SNK_UNATTACHED));
-
- port->enter_state = port->state;
-@@ -4268,7 +4273,8 @@ static void run_state_machine(struct tcpm_port *port)
- current_lim = PD_P_SNK_STDBY_MW / 5;
- tcpm_set_current_limit(port, current_lim, 5000);
- /* Not sink vbus if operational current is 0mA */
-- tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
-+ tcpm_set_charge(port, !port->pd_supported ||
-+ pdo_max_current(port->snk_pdo[0]));
-
- if (!port->pd_supported)
- tcpm_set_state(port, SNK_READY, 0);
-@@ -5386,6 +5392,15 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
- if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
- port->tcpc->set_bist_data(port->tcpc, false);
-
-+ switch (port->state) {
-+ case ERROR_RECOVERY:
-+ case PORT_RESET:
-+ case PORT_RESET_WAIT_OFF:
-+ return;
-+ default:
-+ break;
-+ }
-+
- if (port->ams != NONE_AMS)
- port->ams = NONE_AMS;
- if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
-diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
-index bb1854b3311dc..db6e248f82083 100644
---- a/drivers/usb/typec/ucsi/ucsi_glink.c
-+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
-@@ -8,9 +8,13 @@
- #include <linux/mutex.h>
- #include <linux/property.h>
- #include <linux/soc/qcom/pdr.h>
-+#include <linux/usb/typec_mux.h>
-+#include <linux/gpio/consumer.h>
- #include <linux/soc/qcom/pmic_glink.h>
- #include "ucsi.h"
-
-+#define PMIC_GLINK_MAX_PORTS 2
-+
- #define UCSI_BUF_SIZE 48
-
- #define MSG_TYPE_REQ_RESP 1
-@@ -52,6 +56,9 @@ struct ucsi_notify_ind_msg {
- struct pmic_glink_ucsi {
- struct device *dev;
-
-+ struct gpio_desc *port_orientation[PMIC_GLINK_MAX_PORTS];
-+ struct typec_switch *port_switch[PMIC_GLINK_MAX_PORTS];
-+
- struct pmic_glink_client *client;
-
- struct ucsi *ucsi;
-@@ -220,8 +227,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
- }
-
- con_num = UCSI_CCI_CONNECTOR(cci);
-- if (con_num)
-+ if (con_num) {
-+ if (con_num < PMIC_GLINK_MAX_PORTS &&
-+ ucsi->port_orientation[con_num - 1]) {
-+ int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]);
-+
-+ if (orientation >= 0) {
-+ typec_switch_set(ucsi->port_switch[con_num - 1],
-+ orientation ? TYPEC_ORIENTATION_REVERSE
-+ : TYPEC_ORIENTATION_NORMAL);
-+ }
-+ }
-+
- ucsi_connector_change(ucsi->ucsi, con_num);
-+ }
-
- if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
- ucsi->sync_val = -EBUSY;
-@@ -282,6 +301,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
- {
- struct pmic_glink_ucsi *ucsi;
- struct device *dev = &adev->dev;
-+ struct fwnode_handle *fwnode;
- int ret;
-
- ucsi = devm_kzalloc(dev, sizeof(*ucsi), GFP_KERNEL);
-@@ -309,6 +329,38 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
-
- ucsi_set_drvdata(ucsi->ucsi, ucsi);
-
-+ device_for_each_child_node(dev, fwnode) {
-+ struct gpio_desc *desc;
-+ u32 port;
-+
-+ ret = fwnode_property_read_u32(fwnode, "reg", &port);
-+ if (ret < 0) {
-+ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
-+ return ret;
-+ }
-+
-+ if (port >= PMIC_GLINK_MAX_PORTS) {
-+ dev_warn(dev, "invalid connector number, ignoring\n");
-+ continue;
-+ }
-+
-+ desc = devm_gpiod_get_index_optional(&adev->dev, "orientation", port, GPIOD_IN);
-+
-+ /* If GPIO isn't found, continue */
-+ if (!desc)
-+ continue;
-+
-+ if (IS_ERR(desc))
-+ return dev_err_probe(dev, PTR_ERR(desc),
-+ "unable to acquire orientation gpio\n");
-+ ucsi->port_orientation[port] = desc;
-+
-+ ucsi->port_switch[port] = fwnode_typec_switch_get(fwnode);
-+ if (IS_ERR(ucsi->port_switch[port]))
-+ return dev_err_probe(dev, PTR_ERR(ucsi->port_switch[port]),
-+ "failed to acquire orientation-switch\n");
-+ }
-+
- ucsi->client = devm_pmic_glink_register_client(dev,
- PMIC_GLINK_OWNER_USBC,
- pmic_glink_ucsi_callback,
-diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
-index 9c6954aad6c88..ce625b1ce9a51 100644
---- a/drivers/usb/usbip/stub_dev.c
-+++ b/drivers/usb/usbip/stub_dev.c
-@@ -464,8 +464,13 @@ static void stub_disconnect(struct usb_device *udev)
- /* release port */
- rc = usb_hub_release_port(udev->parent, udev->portnum,
- (struct usb_dev_state *) udev);
-- if (rc) {
-- dev_dbg(&udev->dev, "unable to release port\n");
-+ /*
-+ * NOTE: If a HUB disconnect triggered disconnect of the down stream
-+ * device usb_hub_release_port will return -ENODEV so we can safely ignore
-+ * that error here.
-+ */
-+ if (rc && (rc != -ENODEV)) {
-+ dev_dbg(&udev->dev, "unable to release port (%i)\n", rc);
- return;
- }
-
-diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
-index b3a3cb1657955..b137f36793439 100644
---- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
-+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
-@@ -437,7 +437,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
- if (blk->shared_backend) {
- blk->buffer = shared_buffer;
- } else {
-- blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
-+ blk->buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
- GFP_KERNEL);
- if (!blk->buffer) {
- ret = -ENOMEM;
-@@ -495,7 +495,7 @@ static int __init vdpasim_blk_init(void)
- goto parent_err;
-
- if (shared_backend) {
-- shared_buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
-+ shared_buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
- GFP_KERNEL);
- if (!shared_buffer) {
- ret = -ENOMEM;
-diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c
-index ab4b5958e4131..caffa1a2cf591 100644
---- a/drivers/vfio/pci/pds/pci_drv.c
-+++ b/drivers/vfio/pci/pds/pci_drv.c
-@@ -55,10 +55,10 @@ static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
- * VFIO_DEVICE_STATE_RUNNING.
- */
- if (deferred_reset_needed) {
-- spin_lock(&pds_vfio->reset_lock);
-+ mutex_lock(&pds_vfio->reset_mutex);
- pds_vfio->deferred_reset = true;
- pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
-- spin_unlock(&pds_vfio->reset_lock);
-+ mutex_unlock(&pds_vfio->reset_mutex);
- }
- }
-
-diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
-index 649b18ee394bb..4c351c59d05a9 100644
---- a/drivers/vfio/pci/pds/vfio_dev.c
-+++ b/drivers/vfio/pci/pds/vfio_dev.c
-@@ -29,7 +29,7 @@ struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
- void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
- {
- again:
-- spin_lock(&pds_vfio->reset_lock);
-+ mutex_lock(&pds_vfio->reset_mutex);
- if (pds_vfio->deferred_reset) {
- pds_vfio->deferred_reset = false;
- if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
-@@ -39,23 +39,23 @@ again:
- }
- pds_vfio->state = pds_vfio->deferred_reset_state;
- pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
-- spin_unlock(&pds_vfio->reset_lock);
-+ mutex_unlock(&pds_vfio->reset_mutex);
- goto again;
- }
- mutex_unlock(&pds_vfio->state_mutex);
-- spin_unlock(&pds_vfio->reset_lock);
-+ mutex_unlock(&pds_vfio->reset_mutex);
- }
-
- void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
- {
-- spin_lock(&pds_vfio->reset_lock);
-+ mutex_lock(&pds_vfio->reset_mutex);
- pds_vfio->deferred_reset = true;
- pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
- if (!mutex_trylock(&pds_vfio->state_mutex)) {
-- spin_unlock(&pds_vfio->reset_lock);
-+ mutex_unlock(&pds_vfio->reset_mutex);
- return;
- }
-- spin_unlock(&pds_vfio->reset_lock);
-+ mutex_unlock(&pds_vfio->reset_mutex);
- pds_vfio_state_mutex_unlock(pds_vfio);
- }
-
-@@ -155,6 +155,9 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
-
- pds_vfio->vf_id = vf_id;
-
-+ mutex_init(&pds_vfio->state_mutex);
-+ mutex_init(&pds_vfio->reset_mutex);
-+
- vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
- vdev->mig_ops = &pds_vfio_lm_ops;
- vdev->log_ops = &pds_vfio_log_ops;
-@@ -168,6 +171,17 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
- return 0;
- }
-
-+static void pds_vfio_release_device(struct vfio_device *vdev)
-+{
-+ struct pds_vfio_pci_device *pds_vfio =
-+ container_of(vdev, struct pds_vfio_pci_device,
-+ vfio_coredev.vdev);
-+
-+ mutex_destroy(&pds_vfio->state_mutex);
-+ mutex_destroy(&pds_vfio->reset_mutex);
-+ vfio_pci_core_release_dev(vdev);
-+}
-+
- static int pds_vfio_open_device(struct vfio_device *vdev)
- {
- struct pds_vfio_pci_device *pds_vfio =
-@@ -179,7 +193,6 @@ static int pds_vfio_open_device(struct vfio_device *vdev)
- if (err)
- return err;
-
-- mutex_init(&pds_vfio->state_mutex);
- pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
- pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
-
-@@ -199,14 +212,13 @@ static void pds_vfio_close_device(struct vfio_device *vdev)
- pds_vfio_put_save_file(pds_vfio);
- pds_vfio_dirty_disable(pds_vfio, true);
- mutex_unlock(&pds_vfio->state_mutex);
-- mutex_destroy(&pds_vfio->state_mutex);
- vfio_pci_core_close_device(vdev);
- }
-
- static const struct vfio_device_ops pds_vfio_ops = {
- .name = "pds-vfio",
- .init = pds_vfio_init_device,
-- .release = vfio_pci_core_release_dev,
-+ .release = pds_vfio_release_device,
- .open_device = pds_vfio_open_device,
- .close_device = pds_vfio_close_device,
- .ioctl = vfio_pci_core_ioctl,
-diff --git a/drivers/vfio/pci/pds/vfio_dev.h b/drivers/vfio/pci/pds/vfio_dev.h
-index b8f2d667608f3..e7b01080a1ec3 100644
---- a/drivers/vfio/pci/pds/vfio_dev.h
-+++ b/drivers/vfio/pci/pds/vfio_dev.h
-@@ -18,7 +18,7 @@ struct pds_vfio_pci_device {
- struct pds_vfio_dirty dirty;
- struct mutex state_mutex; /* protect migration state */
- enum vfio_device_mig_state state;
-- spinlock_t reset_lock; /* protect reset_done flow */
-+ struct mutex reset_mutex; /* protect reset_done flow */
- u8 deferred_reset;
- enum vfio_device_mig_state deferred_reset_state;
- struct notifier_block nb;
-diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
-index 78379ffd23363..fb590e346e43d 100644
---- a/drivers/vhost/vdpa.c
-+++ b/drivers/vhost/vdpa.c
-@@ -1511,7 +1511,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
-
- err:
- put_device(&v->dev);
-- ida_simple_remove(&vhost_vdpa_ida, v->minor);
- return r;
- }
-
-diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
-index a51fbab963680..289bd9ce4d36d 100644
---- a/drivers/video/backlight/pwm_bl.c
-+++ b/drivers/video/backlight/pwm_bl.c
-@@ -626,9 +626,14 @@ static void pwm_backlight_remove(struct platform_device *pdev)
- {
- struct backlight_device *bl = platform_get_drvdata(pdev);
- struct pwm_bl_data *pb = bl_get_data(bl);
-+ struct pwm_state state;
-
- backlight_device_unregister(bl);
- pwm_backlight_power_off(pb);
-+ pwm_get_state(pb->pwm, &state);
-+ state.duty_cycle = 0;
-+ state.enabled = false;
-+ pwm_apply_state(pb->pwm, &state);
-
- if (pb->exit)
- pb->exit(&pdev->dev);
-@@ -638,8 +643,13 @@ static void pwm_backlight_shutdown(struct platform_device *pdev)
- {
- struct backlight_device *bl = platform_get_drvdata(pdev);
- struct pwm_bl_data *pb = bl_get_data(bl);
-+ struct pwm_state state;
-
- pwm_backlight_power_off(pb);
-+ pwm_get_state(pb->pwm, &state);
-+ state.duty_cycle = 0;
-+ state.enabled = false;
-+ pwm_apply_state(pb->pwm, &state);
- }
-
- #ifdef CONFIG_PM_SLEEP
-@@ -647,12 +657,24 @@ static int pwm_backlight_suspend(struct device *dev)
- {
- struct backlight_device *bl = dev_get_drvdata(dev);
- struct pwm_bl_data *pb = bl_get_data(bl);
-+ struct pwm_state state;
-
- if (pb->notify)
- pb->notify(pb->dev, 0);
-
- pwm_backlight_power_off(pb);
-
-+ /*
-+ * Note that disabling the PWM doesn't guarantee that the output stays
-+ * at its inactive state. However without the PWM disabled, the PWM
-+ * driver refuses to suspend. So disable here even though this might
-+ * enable the backlight on poorly designed boards.
-+ */
-+ pwm_get_state(pb->pwm, &state);
-+ state.duty_cycle = 0;
-+ state.enabled = false;
-+ pwm_apply_state(pb->pwm, &state);
-+
- if (pb->notify_after)
- pb->notify_after(pb->dev, 0);
-
-diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
-index 7fbd9f069ac2e..0bced82fa4940 100644
---- a/drivers/video/fbdev/fsl-diu-fb.c
-+++ b/drivers/video/fbdev/fsl-diu-fb.c
-@@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
- * Workaround for failed writing desc register of planes.
- * Needed with MPC5121 DIU rev 2.0 silicon.
- */
--void wr_reg_wa(u32 *reg, u32 val)
-+static void wr_reg_wa(u32 *reg, u32 val)
- {
- do {
- out_be32(reg, val);
-diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
-index f4c8677488fb8..f5eaa58a808fb 100644
---- a/drivers/video/fbdev/imsttfb.c
-+++ b/drivers/video/fbdev/imsttfb.c
-@@ -1419,7 +1419,6 @@ static int init_imstt(struct fb_info *info)
- if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
- || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
- printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
-- framebuffer_release(info);
- return -ENODEV;
- }
-
-@@ -1451,14 +1450,11 @@ static int init_imstt(struct fb_info *info)
- FBINFO_HWACCEL_FILLRECT |
- FBINFO_HWACCEL_YPAN;
-
-- if (fb_alloc_cmap(&info->cmap, 0, 0)) {
-- framebuffer_release(info);
-+ if (fb_alloc_cmap(&info->cmap, 0, 0))
- return -ENODEV;
-- }
-
- if (register_framebuffer(info) < 0) {
- fb_dealloc_cmap(&info->cmap);
-- framebuffer_release(info);
- return -ENODEV;
- }
-
-@@ -1498,8 +1494,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-
- if (!request_mem_region(addr, size, "imsttfb")) {
- printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
-- framebuffer_release(info);
-- return -ENODEV;
-+ ret = -ENODEV;
-+ goto release_info;
- }
-
- switch (pdev->device) {
-@@ -1516,36 +1512,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
- printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
- "contact maintainer.\n", pdev->device);
- ret = -ENODEV;
-- goto error;
-+ goto release_mem_region;
- }
-
- info->fix.smem_start = addr;
- info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
- 0x400000 : 0x800000);
- if (!info->screen_base)
-- goto error;
-+ goto release_mem_region;
- info->fix.mmio_start = addr + 0x800000;
- par->dc_regs = ioremap(addr + 0x800000, 0x1000);
- if (!par->dc_regs)
-- goto error;
-+ goto unmap_screen_base;
- par->cmap_regs_phys = addr + 0x840000;
- par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
- if (!par->cmap_regs)
-- goto error;
-+ goto unmap_dc_regs;
- info->pseudo_palette = par->palette;
- ret = init_imstt(info);
- if (ret)
-- goto error;
-+ goto unmap_cmap_regs;
-
- pci_set_drvdata(pdev, info);
-- return ret;
-+ return 0;
-
--error:
-- if (par->dc_regs)
-- iounmap(par->dc_regs);
-- if (info->screen_base)
-- iounmap(info->screen_base);
-+unmap_cmap_regs:
-+ iounmap(par->cmap_regs);
-+unmap_dc_regs:
-+ iounmap(par->dc_regs);
-+unmap_screen_base:
-+ iounmap(info->screen_base);
-+release_mem_region:
- release_mem_region(addr, size);
-+release_info:
- framebuffer_release(info);
- return ret;
- }
-diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
-index 97dbe715e96ad..5bee58ef5f1e3 100644
---- a/drivers/virt/coco/sev-guest/sev-guest.c
-+++ b/drivers/virt/coco/sev-guest/sev-guest.c
-@@ -57,6 +57,11 @@ struct snp_guest_dev {
-
- struct snp_secrets_page_layout *layout;
- struct snp_req_data input;
-+ union {
-+ struct snp_report_req report;
-+ struct snp_derived_key_req derived_key;
-+ struct snp_ext_report_req ext_report;
-+ } req;
- u32 *os_area_msg_seqno;
- u8 *vmpck;
- };
-@@ -473,8 +478,8 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
- static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
- {
- struct snp_guest_crypto *crypto = snp_dev->crypto;
-+ struct snp_report_req *req = &snp_dev->req.report;
- struct snp_report_resp *resp;
-- struct snp_report_req req;
- int rc, resp_len;
-
- lockdep_assert_held(&snp_cmd_mutex);
-@@ -482,7 +487,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
- if (!arg->req_data || !arg->resp_data)
- return -EINVAL;
-
-- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
-+ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
- return -EFAULT;
-
- /*
-@@ -496,7 +501,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
- return -ENOMEM;
-
- rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
-- SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
-+ SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
- resp_len);
- if (rc)
- goto e_free;
-@@ -511,9 +516,9 @@ e_free:
-
- static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
- {
-+ struct snp_derived_key_req *req = &snp_dev->req.derived_key;
- struct snp_guest_crypto *crypto = snp_dev->crypto;
- struct snp_derived_key_resp resp = {0};
-- struct snp_derived_key_req req;
- int rc, resp_len;
- /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
- u8 buf[64 + 16];
-@@ -532,11 +537,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
- if (sizeof(buf) < resp_len)
- return -ENOMEM;
-
-- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
-+ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
- return -EFAULT;
-
- rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
-- SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len);
-+ SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
- if (rc)
- return rc;
-
-@@ -552,8 +557,8 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
-
- static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
- {
-+ struct snp_ext_report_req *req = &snp_dev->req.ext_report;
- struct snp_guest_crypto *crypto = snp_dev->crypto;
-- struct snp_ext_report_req req;
- struct snp_report_resp *resp;
- int ret, npages = 0, resp_len;
-
-@@ -562,18 +567,18 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
- if (!arg->req_data || !arg->resp_data)
- return -EINVAL;
-
-- if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
-+ if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
- return -EFAULT;
-
- /* userspace does not want certificate data */
-- if (!req.certs_len || !req.certs_address)
-+ if (!req->certs_len || !req->certs_address)
- goto cmd;
-
-- if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
-- !IS_ALIGNED(req.certs_len, PAGE_SIZE))
-+ if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
-+ !IS_ALIGNED(req->certs_len, PAGE_SIZE))
- return -EINVAL;
-
-- if (!access_ok((const void __user *)req.certs_address, req.certs_len))
-+ if (!access_ok((const void __user *)req->certs_address, req->certs_len))
- return -EFAULT;
-
- /*
-@@ -582,8 +587,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
- * the host. If host does not supply any certs in it, then copy
- * zeros to indicate that certificate data was not provided.
- */
-- memset(snp_dev->certs_data, 0, req.certs_len);
-- npages = req.certs_len >> PAGE_SHIFT;
-+ memset(snp_dev->certs_data, 0, req->certs_len);
-+ npages = req->certs_len >> PAGE_SHIFT;
- cmd:
- /*
- * The intermediate response buffer is used while decrypting the
-@@ -597,14 +602,14 @@ cmd:
-
- snp_dev->input.data_npages = npages;
- ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
-- SNP_MSG_REPORT_REQ, &req.data,
-- sizeof(req.data), resp->data, resp_len);
-+ SNP_MSG_REPORT_REQ, &req->data,
-+ sizeof(req->data), resp->data, resp_len);
-
- /* If certs length is invalid then copy the returned length */
- if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
-- req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
-+ req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
-
-- if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
-+ if (copy_to_user((void __user *)arg->req_data, req, sizeof(*req)))
- ret = -EFAULT;
- }
-
-@@ -612,8 +617,8 @@ cmd:
- goto e_free;
-
- if (npages &&
-- copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
-- req.certs_len)) {
-+ copy_to_user((void __user *)req->certs_address, snp_dev->certs_data,
-+ req->certs_len)) {
- ret = -EFAULT;
- goto e_free;
- }
-diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
-index 607ce4b8df574..ec0c08652ec2f 100644
---- a/drivers/watchdog/ixp4xx_wdt.c
-+++ b/drivers/watchdog/ixp4xx_wdt.c
-@@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = {
- .owner = THIS_MODULE,
- };
-
-+/*
-+ * The A0 version of the IXP422 had a bug in the watchdog making
-+ * is useless, but we still need to use it to restart the system
-+ * as it is the only way, so in this special case we register a
-+ * "dummy" watchdog that doesn't really work, but will support
-+ * the restart operation.
-+ */
-+static int ixp4xx_wdt_dummy(struct watchdog_device *wdd)
-+{
-+ return 0;
-+}
-+
-+static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = {
-+ .start = ixp4xx_wdt_dummy,
-+ .stop = ixp4xx_wdt_dummy,
-+ .restart = ixp4xx_wdt_restart,
-+ .owner = THIS_MODULE,
-+};
-+
- static const struct watchdog_info ixp4xx_wdt_info = {
- .options = WDIOF_KEEPALIVEPING
- | WDIOF_MAGICCLOSE
-@@ -114,14 +133,17 @@ static const struct watchdog_info ixp4xx_wdt_info = {
-
- static int ixp4xx_wdt_probe(struct platform_device *pdev)
- {
-+ static const struct watchdog_ops *iwdt_ops;
- struct device *dev = &pdev->dev;
- struct ixp4xx_wdt *iwdt;
- struct clk *clk;
- int ret;
-
- if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
-- dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
-- return -ENODEV;
-+ dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n");
-+ iwdt_ops = &ixp4xx_wdt_restart_only_ops;
-+ } else {
-+ iwdt_ops = &ixp4xx_wdt_ops;
- }
-
- iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
-@@ -141,7 +163,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
- iwdt->rate = IXP4XX_TIMER_FREQ;
-
- iwdt->wdd.info = &ixp4xx_wdt_info;
-- iwdt->wdd.ops = &ixp4xx_wdt_ops;
-+ iwdt->wdd.ops = iwdt_ops;
- iwdt->wdd.min_timeout = 1;
- iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
- iwdt->wdd.parent = dev;
-diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c
-index d7eb8286e11ec..1ec1e014ba831 100644
---- a/drivers/watchdog/marvell_gti_wdt.c
-+++ b/drivers/watchdog/marvell_gti_wdt.c
-@@ -271,7 +271,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
- &wdt_idx);
- if (!err) {
- if (wdt_idx >= priv->data->gti_num_timers)
-- return dev_err_probe(&pdev->dev, err,
-+ return dev_err_probe(&pdev->dev, -EINVAL,
- "GTI wdog timer index not valid");
-
- priv->wdt_timer_idx = wdt_idx;
-diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
-index 421ebcda62e64..5f23913ce3b49 100644
---- a/drivers/watchdog/sbsa_gwdt.c
-+++ b/drivers/watchdog/sbsa_gwdt.c
-@@ -152,14 +152,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
- timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
-
- if (action)
-- sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
-+ sbsa_gwdt_reg_write((u64)gwdt->clk * timeout, gwdt);
- else
- /*
- * In the single stage mode, The first signal (WS0) is ignored,
- * the timeout is (WOR * 2), so the WOR should be configured
- * to half value of timeout.
- */
-- sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
-+ sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
-
- return 0;
- }
-diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
-index 1b2136fe0fa51..c50419638ac0a 100644
---- a/drivers/xen/events/events_base.c
-+++ b/drivers/xen/events/events_base.c
-@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
-
- /* IRQ <-> IPI mapping */
- static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
-+/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
-+static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
-
- /* Event channel distribution data */
- static atomic_t channels_on_cpu[NR_CPUS];
-@@ -366,6 +368,7 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
- info->u.ipi = ipi;
-
- per_cpu(ipi_to_irq, cpu)[ipi] = irq;
-+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
-
- return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
- }
-@@ -601,7 +604,9 @@ static void lateeoi_list_add(struct irq_info *info)
-
- spin_lock_irqsave(&eoi->eoi_list_lock, flags);
-
-- if (list_empty(&eoi->eoi_list)) {
-+ elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
-+ eoi_list);
-+ if (!elem || info->eoi_time < elem->eoi_time) {
- list_add(&info->eoi_list, &eoi->eoi_list);
- mod_delayed_work_on(info->eoi_cpu, system_wq,
- &eoi->delayed, delay);
-@@ -981,6 +986,7 @@ static void __unbind_from_irq(unsigned int irq)
- break;
- case IRQT_IPI:
- per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
-+ per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(irq)] = 0;
- break;
- case IRQT_EVTCHN:
- dev = info->u.interdomain;
-@@ -1631,7 +1637,7 @@ EXPORT_SYMBOL_GPL(evtchn_put);
-
- void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
- {
-- int irq;
-+ evtchn_port_t evtchn;
-
- #ifdef CONFIG_X86
- if (unlikely(vector == XEN_NMI_VECTOR)) {
-@@ -1642,9 +1648,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
- return;
- }
- #endif
-- irq = per_cpu(ipi_to_irq, cpu)[vector];
-- BUG_ON(irq < 0);
-- notify_remote_via_irq(irq);
-+ evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
-+ BUG_ON(evtchn == 0);
-+ notify_remote_via_evtchn(evtchn);
- }
-
- struct evtchn_loop_ctrl {
-diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
-index b3e3d1bb37f3e..5086552731453 100644
---- a/drivers/xen/pcpu.c
-+++ b/drivers/xen/pcpu.c
-@@ -47,6 +47,9 @@
- #include <asm/xen/hypervisor.h>
- #include <asm/xen/hypercall.h>
-
-+#ifdef CONFIG_ACPI
-+#include <acpi/processor.h>
-+#endif
-
- /*
- * @cpu_id: Xen physical cpu logic number
-@@ -400,4 +403,23 @@ bool __init xen_processor_present(uint32_t acpi_id)
-
- return online;
- }
-+
-+void xen_sanitize_proc_cap_bits(uint32_t *cap)
-+{
-+ struct xen_platform_op op = {
-+ .cmd = XENPF_set_processor_pminfo,
-+ .u.set_pminfo.id = -1,
-+ .u.set_pminfo.type = XEN_PM_PDC,
-+ };
-+ u32 buf[3] = { ACPI_PDC_REVISION_ID, 1, *cap };
-+ int ret;
-+
-+ set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
-+ ret = HYPERVISOR_platform_op(&op);
-+ if (ret)
-+ pr_err("sanitize of _PDC buffer bits from Xen failed: %d\n",
-+ ret);
-+ else
-+ *cap = buf[2];
-+}
- #endif
-diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
-index f00ad5f5f1d4a..da88173bac432 100644
---- a/drivers/xen/privcmd.c
-+++ b/drivers/xen/privcmd.c
-@@ -935,7 +935,7 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
- return -ENOMEM;
- dm_op = kirqfd + 1;
-
-- if (copy_from_user(dm_op, irqfd->dm_op, irqfd->size)) {
-+ if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
- ret = -EFAULT;
- goto error_kfree;
- }
-diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
-index 946bd56f0ac53..0e6c6c25d154f 100644
---- a/drivers/xen/swiotlb-xen.c
-+++ b/drivers/xen/swiotlb-xen.c
-@@ -405,4 +405,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
- .get_sgtable = dma_common_get_sgtable,
- .alloc_pages = dma_common_alloc_pages,
- .free_pages = dma_common_free_pages,
-+ .max_mapping_size = swiotlb_max_mapping_size,
- };
-diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
-index 059de92aea7d0..d47eee6c51435 100644
---- a/drivers/xen/xen-pciback/conf_space.c
-+++ b/drivers/xen/xen-pciback/conf_space.c
-@@ -288,12 +288,6 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
- u16 val;
- int ret = 0;
-
-- err = pci_read_config_word(dev, PCI_COMMAND, &val);
-- if (err)
-- return err;
-- if (!(val & PCI_COMMAND_INTX_DISABLE))
-- ret |= INTERRUPT_TYPE_INTX;
--
- /*
- * Do not trust dev->msi(x)_enabled here, as enabling could be done
- * bypassing the pci_*msi* functions, by the qemu.
-@@ -316,6 +310,19 @@ int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
- if (val & PCI_MSIX_FLAGS_ENABLE)
- ret |= INTERRUPT_TYPE_MSIX;
- }
-+
-+ /*
-+ * PCIe spec says device cannot use INTx if MSI/MSI-X is enabled,
-+ * so check for INTx only when both are disabled.
-+ */
-+ if (!ret) {
-+ err = pci_read_config_word(dev, PCI_COMMAND, &val);
-+ if (err)
-+ return err;
-+ if (!(val & PCI_COMMAND_INTX_DISABLE))
-+ ret |= INTERRUPT_TYPE_INTX;
-+ }
-+
- return ret ?: INTERRUPT_TYPE_NONE;
- }
-
-diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
-index 097316a741268..1948a9700c8fa 100644
---- a/drivers/xen/xen-pciback/conf_space_capability.c
-+++ b/drivers/xen/xen-pciback/conf_space_capability.c
-@@ -236,10 +236,16 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
- return PCIBIOS_SET_FAILED;
-
- if (new_value & field_config->enable_bit) {
-- /* don't allow enabling together with other interrupt types */
-+ /*
-+ * Don't allow enabling together with other interrupt type, but do
-+ * allow enabling MSI(-X) while INTx is still active to please Linuxes
-+ * MSI(-X) startup sequence. It is safe to do, as according to PCI
-+ * spec, device with enabled MSI(-X) shouldn't use INTx.
-+ */
- int int_type = xen_pcibk_get_interrupt_type(dev);
-
- if (int_type == INTERRUPT_TYPE_NONE ||
-+ int_type == INTERRUPT_TYPE_INTX ||
- int_type == field_config->int_type)
- goto write;
- return PCIBIOS_SET_FAILED;
-diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
-index 981435103af1a..fc03326459664 100644
---- a/drivers/xen/xen-pciback/conf_space_header.c
-+++ b/drivers/xen/xen-pciback/conf_space_header.c
-@@ -104,24 +104,9 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
- pci_clear_mwi(dev);
- }
-
-- if (dev_data && dev_data->allow_interrupt_control) {
-- if ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE) {
-- if (value & PCI_COMMAND_INTX_DISABLE) {
-- pci_intx(dev, 0);
-- } else {
-- /* Do not allow enabling INTx together with MSI or MSI-X. */
-- switch (xen_pcibk_get_interrupt_type(dev)) {
-- case INTERRUPT_TYPE_NONE:
-- pci_intx(dev, 1);
-- break;
-- case INTERRUPT_TYPE_INTX:
-- break;
-- default:
-- return PCIBIOS_SET_FAILED;
-- }
-- }
-- }
-- }
-+ if (dev_data && dev_data->allow_interrupt_control &&
-+ ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
-+ pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
-
- cmd->val = value;
-
-diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
-index 639bf628389ba..3205e5d724c8c 100644
---- a/drivers/xen/xenbus/xenbus_probe.c
-+++ b/drivers/xen/xenbus/xenbus_probe.c
-@@ -1025,7 +1025,7 @@ static int __init xenbus_init(void)
- if (err < 0) {
- pr_err("xenstore_late_init couldn't bind irq err=%d\n",
- err);
-- return err;
-+ goto out_error;
- }
-
- xs_init_irq = err;
-diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
-index e00cf8109b3f3..3c4572ef3a488 100644
---- a/fs/9p/xattr.c
-+++ b/fs/9p/xattr.c
-@@ -68,7 +68,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
- struct p9_fid *fid;
- int ret;
-
-- p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
-+ p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
- name, buffer_size);
- fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
-@@ -139,7 +139,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
-
- ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
- {
-- return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
-+ /* Txattrwalk with an empty string lists xattrs instead */
-+ return v9fs_xattr_get(dentry, "", buffer, buffer_size);
- }
-
- static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
-diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
-index 95bcbd7654d1b..8081d68004d05 100644
---- a/fs/afs/dynroot.c
-+++ b/fs/afs/dynroot.c
-@@ -132,8 +132,8 @@ static int afs_probe_cell_name(struct dentry *dentry)
-
- ret = dns_query(net->net, "afsdb", name, len, "srv=1",
- NULL, NULL, false);
-- if (ret == -ENODATA)
-- ret = -EDESTADDRREQ;
-+ if (ret == -ENODATA || ret == -ENOKEY)
-+ ret = -ENOENT;
- return ret;
- }
-
-diff --git a/fs/afs/internal.h b/fs/afs/internal.h
-index da73b97e19a9a..5041eae64423a 100644
---- a/fs/afs/internal.h
-+++ b/fs/afs/internal.h
-@@ -553,6 +553,7 @@ struct afs_server_entry {
- };
-
- struct afs_server_list {
-+ struct rcu_head rcu;
- afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
- refcount_t usage;
- unsigned char nr_servers;
-diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
-index ed9056703505f..b59896b1de0af 100644
---- a/fs/afs/server_list.c
-+++ b/fs/afs/server_list.c
-@@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
- for (i = 0; i < slist->nr_servers; i++)
- afs_unuse_server(net, slist->servers[i].server,
- afs_server_trace_put_slist);
-- kfree(slist);
-+ kfree_rcu(slist, rcu);
- }
- }
-
-diff --git a/fs/afs/super.c b/fs/afs/super.c
-index 95d713074dc81..e95fb4cb4fcd2 100644
---- a/fs/afs/super.c
-+++ b/fs/afs/super.c
-@@ -407,6 +407,8 @@ static int afs_validate_fc(struct fs_context *fc)
- return PTR_ERR(volume);
-
- ctx->volume = volume;
-+ if (volume->type != AFSVL_RWVOL)
-+ ctx->flock_mode = afs_flock_mode_local;
- }
-
- return 0;
-diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
-index 488e58490b16e..eb415ce563600 100644
---- a/fs/afs/vl_rotate.c
-+++ b/fs/afs/vl_rotate.c
-@@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
- }
-
- /* Status load is ordered after lookup counter load */
-+ if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
-+ pr_warn("No record of cell %s\n", cell->name);
-+ vc->error = -ENOENT;
-+ return false;
-+ }
-+
- if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
- vc->error = -EDESTADDRREQ;
- return false;
-@@ -285,6 +291,7 @@ failed:
- */
- static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
- {
-+ struct afs_cell *cell = vc->cell;
- static int count;
- int i;
-
-@@ -294,6 +301,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
-
- rcu_read_lock();
- pr_notice("EDESTADDR occurred\n");
-+ pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
-+ pr_notice("DNS: src=%u st=%u lc=%x\n",
-+ cell->dns_source, cell->dns_status, cell->dns_lookup_count);
- pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
- vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
-
-diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
-index b2e5107b7cecc..5a97db9888107 100644
---- a/fs/btrfs/block-group.c
-+++ b/fs/btrfs/block-group.c
-@@ -2601,7 +2601,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
- btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
-
- btrfs_set_dev_extent_length(leaf, extent, num_bytes);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- out:
- btrfs_free_path(path);
- return ret;
-@@ -3025,7 +3025,7 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
- cache->global_root_id);
- btrfs_set_stack_block_group_flags(&bgi, cache->flags);
- write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- fail:
- btrfs_release_path(path);
- /*
-diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
-index 617d4827eec26..118ad4d2cbbe2 100644
---- a/fs/btrfs/ctree.c
-+++ b/fs/btrfs/ctree.c
-@@ -359,7 +359,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
- return ret;
- }
-
-- btrfs_mark_buffer_dirty(cow);
-+ btrfs_mark_buffer_dirty(trans, cow);
- *cow_ret = cow;
- return 0;
- }
-@@ -627,7 +627,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
- cow->start);
- btrfs_set_node_ptr_generation(parent, parent_slot,
- trans->transid);
-- btrfs_mark_buffer_dirty(parent);
-+ btrfs_mark_buffer_dirty(trans, parent);
- if (last_ref) {
- ret = btrfs_tree_mod_log_free_eb(buf);
- if (ret) {
-@@ -643,7 +643,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
- if (unlock_orig)
- btrfs_tree_unlock(buf);
- free_extent_buffer_stale(buf);
-- btrfs_mark_buffer_dirty(cow);
-+ btrfs_mark_buffer_dirty(trans, cow);
- *cow_ret = cow;
- return 0;
- }
-@@ -1197,7 +1197,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
- goto out;
- }
- btrfs_set_node_key(parent, &right_key, pslot + 1);
-- btrfs_mark_buffer_dirty(parent);
-+ btrfs_mark_buffer_dirty(trans, parent);
- }
- }
- if (btrfs_header_nritems(mid) == 1) {
-@@ -1255,7 +1255,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
- goto out;
- }
- btrfs_set_node_key(parent, &mid_key, pslot);
-- btrfs_mark_buffer_dirty(parent);
-+ btrfs_mark_buffer_dirty(trans, parent);
- }
-
- /* update the path */
-@@ -1362,7 +1362,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
- return ret;
- }
- btrfs_set_node_key(parent, &disk_key, pslot);
-- btrfs_mark_buffer_dirty(parent);
-+ btrfs_mark_buffer_dirty(trans, parent);
- if (btrfs_header_nritems(left) > orig_slot) {
- path->nodes[level] = left;
- path->slots[level + 1] -= 1;
-@@ -1422,7 +1422,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
- return ret;
- }
- btrfs_set_node_key(parent, &disk_key, pslot + 1);
-- btrfs_mark_buffer_dirty(parent);
-+ btrfs_mark_buffer_dirty(trans, parent);
-
- if (btrfs_header_nritems(mid) <= orig_slot) {
- path->nodes[level] = right;
-@@ -2678,7 +2678,8 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
- * higher levels
- *
- */
--static void fixup_low_keys(struct btrfs_path *path,
-+static void fixup_low_keys(struct btrfs_trans_handle *trans,
-+ struct btrfs_path *path,
- struct btrfs_disk_key *key, int level)
- {
- int i;
-@@ -2695,7 +2696,7 @@ static void fixup_low_keys(struct btrfs_path *path,
- BTRFS_MOD_LOG_KEY_REPLACE);
- BUG_ON(ret < 0);
- btrfs_set_node_key(t, key, tslot);
-- btrfs_mark_buffer_dirty(path->nodes[i]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[i]);
- if (tslot != 0)
- break;
- }
-@@ -2707,10 +2708,11 @@ static void fixup_low_keys(struct btrfs_path *path,
- * This function isn't completely safe. It's the caller's responsibility
- * that the new key won't break the order
- */
--void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
-+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- const struct btrfs_key *new_key)
- {
-+ struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_disk_key disk_key;
- struct extent_buffer *eb;
- int slot;
-@@ -2748,9 +2750,9 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
-
- btrfs_cpu_key_to_disk(&disk_key, new_key);
- btrfs_set_item_key(eb, &disk_key, slot);
-- btrfs_mark_buffer_dirty(eb);
-+ btrfs_mark_buffer_dirty(trans, eb);
- if (slot == 0)
-- fixup_low_keys(path, &disk_key, 1);
-+ fixup_low_keys(trans, path, &disk_key, 1);
- }
-
- /*
-@@ -2881,8 +2883,8 @@ static int push_node_left(struct btrfs_trans_handle *trans,
- }
- btrfs_set_header_nritems(src, src_nritems - push_items);
- btrfs_set_header_nritems(dst, dst_nritems + push_items);
-- btrfs_mark_buffer_dirty(src);
-- btrfs_mark_buffer_dirty(dst);
-+ btrfs_mark_buffer_dirty(trans, src);
-+ btrfs_mark_buffer_dirty(trans, dst);
-
- return ret;
- }
-@@ -2957,8 +2959,8 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
- btrfs_set_header_nritems(src, src_nritems - push_items);
- btrfs_set_header_nritems(dst, dst_nritems + push_items);
-
-- btrfs_mark_buffer_dirty(src);
-- btrfs_mark_buffer_dirty(dst);
-+ btrfs_mark_buffer_dirty(trans, src);
-+ btrfs_mark_buffer_dirty(trans, dst);
-
- return ret;
- }
-@@ -3007,7 +3009,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
-
- btrfs_set_node_ptr_generation(c, 0, lower_gen);
-
-- btrfs_mark_buffer_dirty(c);
-+ btrfs_mark_buffer_dirty(trans, c);
-
- old = root->node;
- ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
-@@ -3079,7 +3081,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
- WARN_ON(trans->transid == 0);
- btrfs_set_node_ptr_generation(lower, slot, trans->transid);
- btrfs_set_header_nritems(lower, nritems + 1);
-- btrfs_mark_buffer_dirty(lower);
-+ btrfs_mark_buffer_dirty(trans, lower);
-
- return 0;
- }
-@@ -3158,8 +3160,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
- btrfs_set_header_nritems(split, c_nritems - mid);
- btrfs_set_header_nritems(c, mid);
-
-- btrfs_mark_buffer_dirty(c);
-- btrfs_mark_buffer_dirty(split);
-+ btrfs_mark_buffer_dirty(trans, c);
-+ btrfs_mark_buffer_dirty(trans, split);
-
- ret = insert_ptr(trans, path, &disk_key, split->start,
- path->slots[level + 1] + 1, level + 1);
-@@ -3325,15 +3327,15 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
- btrfs_set_header_nritems(left, left_nritems);
-
- if (left_nritems)
-- btrfs_mark_buffer_dirty(left);
-+ btrfs_mark_buffer_dirty(trans, left);
- else
- btrfs_clear_buffer_dirty(trans, left);
-
-- btrfs_mark_buffer_dirty(right);
-+ btrfs_mark_buffer_dirty(trans, right);
-
- btrfs_item_key(right, &disk_key, 0);
- btrfs_set_node_key(upper, &disk_key, slot + 1);
-- btrfs_mark_buffer_dirty(upper);
-+ btrfs_mark_buffer_dirty(trans, upper);
-
- /* then fixup the leaf pointer in the path */
- if (path->slots[0] >= left_nritems) {
-@@ -3545,14 +3547,14 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
- btrfs_set_token_item_offset(&token, i, push_space);
- }
-
-- btrfs_mark_buffer_dirty(left);
-+ btrfs_mark_buffer_dirty(trans, left);
- if (right_nritems)
-- btrfs_mark_buffer_dirty(right);
-+ btrfs_mark_buffer_dirty(trans, right);
- else
- btrfs_clear_buffer_dirty(trans, right);
-
- btrfs_item_key(right, &disk_key, 0);
-- fixup_low_keys(path, &disk_key, 1);
-+ fixup_low_keys(trans, path, &disk_key, 1);
-
- /* then fixup the leaf pointer in the path */
- if (path->slots[0] < push_items) {
-@@ -3683,8 +3685,8 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
- if (ret < 0)
- return ret;
-
-- btrfs_mark_buffer_dirty(right);
-- btrfs_mark_buffer_dirty(l);
-+ btrfs_mark_buffer_dirty(trans, right);
-+ btrfs_mark_buffer_dirty(trans, l);
- BUG_ON(path->slots[0] != slot);
-
- if (mid <= slot) {
-@@ -3925,7 +3927,7 @@ again:
- path->nodes[0] = right;
- path->slots[0] = 0;
- if (path->slots[1] == 0)
-- fixup_low_keys(path, &disk_key, 1);
-+ fixup_low_keys(trans, path, &disk_key, 1);
- }
- /*
- * We create a new leaf 'right' for the required ins_len and
-@@ -4024,7 +4026,8 @@ err:
- return ret;
- }
-
--static noinline int split_item(struct btrfs_path *path,
-+static noinline int split_item(struct btrfs_trans_handle *trans,
-+ struct btrfs_path *path,
- const struct btrfs_key *new_key,
- unsigned long split_offset)
- {
-@@ -4083,7 +4086,7 @@ static noinline int split_item(struct btrfs_path *path,
- write_extent_buffer(leaf, buf + split_offset,
- btrfs_item_ptr_offset(leaf, slot),
- item_size - split_offset);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- BUG_ON(btrfs_leaf_free_space(leaf) < 0);
- kfree(buf);
-@@ -4117,7 +4120,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
- if (ret)
- return ret;
-
-- ret = split_item(path, new_key, split_offset);
-+ ret = split_item(trans, path, new_key, split_offset);
- return ret;
- }
-
-@@ -4127,7 +4130,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
- * off the end of the item or if we shift the item to chop bytes off
- * the front.
- */
--void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
-+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
-+ struct btrfs_path *path, u32 new_size, int from_end)
- {
- int slot;
- struct extent_buffer *leaf;
-@@ -4203,11 +4207,11 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
- btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
- btrfs_set_item_key(leaf, &disk_key, slot);
- if (slot == 0)
-- fixup_low_keys(path, &disk_key, 1);
-+ fixup_low_keys(trans, path, &disk_key, 1);
- }
-
- btrfs_set_item_size(leaf, slot, new_size);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- if (btrfs_leaf_free_space(leaf) < 0) {
- btrfs_print_leaf(leaf);
-@@ -4218,7 +4222,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
- /*
- * make the item pointed to by the path bigger, data_size is the added size.
- */
--void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
-+void btrfs_extend_item(struct btrfs_trans_handle *trans,
-+ struct btrfs_path *path, u32 data_size)
- {
- int slot;
- struct extent_buffer *leaf;
-@@ -4268,7 +4273,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
- data_end = old_data;
- old_size = btrfs_item_size(leaf, slot);
- btrfs_set_item_size(leaf, slot, old_size + data_size);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- if (btrfs_leaf_free_space(leaf) < 0) {
- btrfs_print_leaf(leaf);
-@@ -4279,6 +4284,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
- /*
- * Make space in the node before inserting one or more items.
- *
-+ * @trans: transaction handle
- * @root: root we are inserting items to
- * @path: points to the leaf/slot where we are going to insert new items
- * @batch: information about the batch of items to insert
-@@ -4286,7 +4292,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
- * Main purpose is to save stack depth by doing the bulk of the work in a
- * function that doesn't call btrfs_search_slot
- */
--static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
-+static void setup_items_for_insert(struct btrfs_trans_handle *trans,
-+ struct btrfs_root *root, struct btrfs_path *path,
- const struct btrfs_item_batch *batch)
- {
- struct btrfs_fs_info *fs_info = root->fs_info;
-@@ -4306,7 +4313,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
- */
- if (path->slots[0] == 0) {
- btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
-- fixup_low_keys(path, &disk_key, 1);
-+ fixup_low_keys(trans, path, &disk_key, 1);
- }
- btrfs_unlock_up_safe(path, 1);
-
-@@ -4365,7 +4372,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
- }
-
- btrfs_set_header_nritems(leaf, nritems + batch->nr);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- if (btrfs_leaf_free_space(leaf) < 0) {
- btrfs_print_leaf(leaf);
-@@ -4376,12 +4383,14 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
- /*
- * Insert a new item into a leaf.
- *
-+ * @trans: Transaction handle.
- * @root: The root of the btree.
- * @path: A path pointing to the target leaf and slot.
- * @key: The key of the new item.
- * @data_size: The size of the data associated with the new key.
- */
--void btrfs_setup_item_for_insert(struct btrfs_root *root,
-+void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
-+ struct btrfs_root *root,
- struct btrfs_path *path,
- const struct btrfs_key *key,
- u32 data_size)
-@@ -4393,7 +4402,7 @@ void btrfs_setup_item_for_insert(struct btrfs_root *root,
- batch.total_data_size = data_size;
- batch.nr = 1;
-
-- setup_items_for_insert(root, path, &batch);
-+ setup_items_for_insert(trans, root, path, &batch);
- }
-
- /*
-@@ -4419,7 +4428,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
- slot = path->slots[0];
- BUG_ON(slot < 0);
-
-- setup_items_for_insert(root, path, batch);
-+ setup_items_for_insert(trans, root, path, batch);
- return 0;
- }
-
-@@ -4444,7 +4453,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- leaf = path->nodes[0];
- ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
- write_extent_buffer(leaf, data, ptr, data_size);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- }
- btrfs_free_path(path);
- return ret;
-@@ -4475,7 +4484,7 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
- return ret;
-
- path->slots[0]++;
-- btrfs_setup_item_for_insert(root, path, new_key, item_size);
-+ btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
- leaf = path->nodes[0];
- memcpy_extent_buffer(leaf,
- btrfs_item_ptr_offset(leaf, path->slots[0]),
-@@ -4533,9 +4542,9 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct btrfs_disk_key disk_key;
-
- btrfs_node_key(parent, &disk_key, 0);
-- fixup_low_keys(path, &disk_key, level + 1);
-+ fixup_low_keys(trans, path, &disk_key, level + 1);
- }
-- btrfs_mark_buffer_dirty(parent);
-+ btrfs_mark_buffer_dirty(trans, parent);
- return 0;
- }
-
-@@ -4632,7 +4641,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct btrfs_disk_key disk_key;
-
- btrfs_item_key(leaf, &disk_key, 0);
-- fixup_low_keys(path, &disk_key, 1);
-+ fixup_low_keys(trans, path, &disk_key, 1);
- }
-
- /*
-@@ -4697,11 +4706,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- * dirtied this buffer
- */
- if (path->nodes[0] == leaf)
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- free_extent_buffer(leaf);
- }
- } else {
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- }
- }
- return ret;
-diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
-index ff40acd63a374..06333a74d6c4c 100644
---- a/fs/btrfs/ctree.h
-+++ b/fs/btrfs/ctree.h
-@@ -518,7 +518,7 @@ int btrfs_previous_item(struct btrfs_root *root,
- int type);
- int btrfs_previous_extent_item(struct btrfs_root *root,
- struct btrfs_path *path, u64 min_objectid);
--void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
-+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- const struct btrfs_key *new_key);
- struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
-@@ -545,8 +545,10 @@ int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
- struct extent_buffer *buf);
- int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct btrfs_path *path, int level, int slot);
--void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
--void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
-+void btrfs_extend_item(struct btrfs_trans_handle *trans,
-+ struct btrfs_path *path, u32 data_size);
-+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
-+ struct btrfs_path *path, u32 new_size, int from_end);
- int btrfs_split_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
-@@ -610,7 +612,8 @@ struct btrfs_item_batch {
- int nr;
- };
-
--void btrfs_setup_item_for_insert(struct btrfs_root *root,
-+void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
-+ struct btrfs_root *root,
- struct btrfs_path *path,
- const struct btrfs_key *key,
- u32 data_size);
-diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
-index 427abaf608b8c..0d105ed1b8def 100644
---- a/fs/btrfs/delalloc-space.c
-+++ b/fs/btrfs/delalloc-space.c
-@@ -322,9 +322,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
- } else {
- if (current->journal_info)
- flush = BTRFS_RESERVE_FLUSH_LIMIT;
--
-- if (btrfs_transaction_in_commit(fs_info))
-- schedule_timeout(1);
- }
-
- num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
-diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
-index 90aaedce1548a..16f9e5f474cca 100644
---- a/fs/btrfs/delayed-inode.c
-+++ b/fs/btrfs/delayed-inode.c
-@@ -1030,7 +1030,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
- struct btrfs_inode_item);
- write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
- sizeof(struct btrfs_inode_item));
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
- goto out;
-diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
-index fff22ed55c428..fe6ba17a05099 100644
---- a/fs/btrfs/dev-replace.c
-+++ b/fs/btrfs/dev-replace.c
-@@ -442,7 +442,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
- dev_replace->item_needs_writeback = 0;
- up_write(&dev_replace->rwsem);
-
-- btrfs_mark_buffer_dirty(eb);
-+ btrfs_mark_buffer_dirty(trans, eb);
-
- out:
- btrfs_free_path(path);
-diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
-index 082eb0e195981..9c07d5c3e5ad2 100644
---- a/fs/btrfs/dir-item.c
-+++ b/fs/btrfs/dir-item.c
-@@ -38,7 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
- di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
- if (di)
- return ERR_PTR(-EEXIST);
-- btrfs_extend_item(path, data_size);
-+ btrfs_extend_item(trans, path, data_size);
- } else if (ret < 0)
- return ERR_PTR(ret);
- WARN_ON(ret > 0);
-@@ -93,7 +93,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
-
- write_extent_buffer(leaf, name, name_ptr, name_len);
- write_extent_buffer(leaf, data, data_ptr, data_len);
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
- return ret;
- }
-@@ -153,7 +153,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
- name_ptr = (unsigned long)(dir_item + 1);
-
- write_extent_buffer(leaf, name->name, name_ptr, name->len);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- second_insert:
- /* FIXME, use some real flag for selecting the extra index */
-@@ -439,7 +439,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
- start = btrfs_item_ptr_offset(leaf, path->slots[0]);
- memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
- item_len - (ptr + sub_item_len - start));
-- btrfs_truncate_item(path, item_len - sub_item_len, 1);
-+ btrfs_truncate_item(trans, path, item_len - sub_item_len, 1);
- }
- return ret;
- }
-diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
-index 68f60d50e1fd0..71efb6883f307 100644
---- a/fs/btrfs/disk-io.c
-+++ b/fs/btrfs/disk-io.c
-@@ -867,7 +867,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
- }
-
- root->node = leaf;
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- root->commit_root = btrfs_root_node(root);
- set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
-@@ -942,7 +942,7 @@ int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
-
- root->node = leaf;
-
-- btrfs_mark_buffer_dirty(root->node);
-+ btrfs_mark_buffer_dirty(trans, root->node);
- btrfs_tree_unlock(root->node);
-
- return 0;
-@@ -3197,6 +3197,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
- goto fail_alloc;
- }
-
-+ btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
- /*
- * Verify the type first, if that or the checksum value are
- * corrupted, we'll find out
-@@ -4423,7 +4424,8 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
- btrfs_close_devices(fs_info->fs_devices);
- }
-
--void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
-+void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
-+ struct extent_buffer *buf)
- {
- struct btrfs_fs_info *fs_info = buf->fs_info;
- u64 transid = btrfs_header_generation(buf);
-@@ -4437,10 +4439,14 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
- if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
- return;
- #endif
-+ /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
-+ ASSERT(trans->transid == fs_info->generation);
- btrfs_assert_tree_write_locked(buf);
-- if (transid != fs_info->generation)
-+ if (transid != fs_info->generation) {
- WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
- buf->start, transid, fs_info->generation);
-+ btrfs_abort_transaction(trans, -EUCLEAN);
-+ }
- set_extent_buffer_dirty(buf);
- #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- /*
-diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
-index 02b645744a822..50dab8f639dcc 100644
---- a/fs/btrfs/disk-io.h
-+++ b/fs/btrfs/disk-io.h
-@@ -104,7 +104,8 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
- }
-
- void btrfs_put_root(struct btrfs_root *root);
--void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
-+void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
-+ struct extent_buffer *buf);
- int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
- int atomic);
- int btrfs_read_extent_buffer(struct extent_buffer *buf,
-diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
-index fc313fce5bbdc..91fe57e87583c 100644
---- a/fs/btrfs/extent-tree.c
-+++ b/fs/btrfs/extent-tree.c
-@@ -575,7 +575,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
- btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
- }
- }
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- ret = 0;
- fail:
- btrfs_release_path(path);
-@@ -623,7 +623,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
- btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
- else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
- btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- }
- return ret;
- }
-@@ -976,7 +976,7 @@ out:
- * helper to add new inline back ref
- */
- static noinline_for_stack
--void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
-+void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct btrfs_extent_inline_ref *iref,
- u64 parent, u64 root_objectid,
-@@ -999,7 +999,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
- type = extent_ref_type(parent, owner);
- size = btrfs_extent_inline_ref_size(type);
-
-- btrfs_extend_item(path, size);
-+ btrfs_extend_item(trans, path, size);
-
- ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
- refs = btrfs_extent_refs(leaf, ei);
-@@ -1033,7 +1033,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
- } else {
- btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
- }
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- }
-
- static int lookup_extent_backref(struct btrfs_trans_handle *trans,
-@@ -1066,7 +1066,9 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
- /*
- * helper to update/remove inline back ref
- */
--static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
-+static noinline_for_stack int update_inline_extent_backref(
-+ struct btrfs_trans_handle *trans,
-+ struct btrfs_path *path,
- struct btrfs_extent_inline_ref *iref,
- int refs_to_mod,
- struct btrfs_delayed_extent_op *extent_op)
-@@ -1174,9 +1176,9 @@ static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *pa
- memmove_extent_buffer(leaf, ptr, ptr + size,
- end - ptr - size);
- item_size -= size;
-- btrfs_truncate_item(path, item_size, 1);
-+ btrfs_truncate_item(trans, path, item_size, 1);
- }
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- return 0;
- }
-
-@@ -1206,9 +1208,10 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
- bytenr, num_bytes, root_objectid, path->slots[0]);
- return -EUCLEAN;
- }
-- ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
-+ ret = update_inline_extent_backref(trans, path, iref,
-+ refs_to_add, extent_op);
- } else if (ret == -ENOENT) {
-- setup_inline_extent_backref(trans->fs_info, path, iref, parent,
-+ setup_inline_extent_backref(trans, path, iref, parent,
- root_objectid, owner, offset,
- refs_to_add, extent_op);
- ret = 0;
-@@ -1226,7 +1229,8 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
-
- BUG_ON(!is_data && refs_to_drop != 1);
- if (iref)
-- ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
-+ ret = update_inline_extent_backref(trans, path, iref,
-+ -refs_to_drop, NULL);
- else if (is_data)
- ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
- else
-@@ -1510,7 +1514,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- if (extent_op)
- __run_delayed_extent_op(extent_op, leaf, item);
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- /* now insert the actual backref */
-@@ -1678,7 +1682,7 @@ again:
- ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
- __run_delayed_extent_op(extent_op, leaf, ei);
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- out:
- btrfs_free_path(path);
- return err;
-@@ -3151,7 +3155,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- }
- } else {
- btrfs_set_extent_refs(leaf, ei, refs);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- }
- if (found_extent) {
- ret = remove_extent_backref(trans, extent_root, path,
-@@ -4659,7 +4663,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
- }
-
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- btrfs_free_path(path);
-
- return alloc_reserved_extent(trans, ins->objectid, ins->offset);
-@@ -4734,7 +4738,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
- btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
- }
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_free_path(path);
-
- return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
-diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
-index caccd0376342b..1530df88370ce 100644
---- a/fs/btrfs/extent_io.c
-+++ b/fs/btrfs/extent_io.c
-@@ -675,8 +675,8 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
- * the array will be skipped
- *
- * Return: 0 if all pages were able to be allocated;
-- * -ENOMEM otherwise, and the caller is responsible for freeing all
-- * non-null page pointers in the array.
-+ * -ENOMEM otherwise, the partially allocated pages would be freed and
-+ * the array slots zeroed
- */
- int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
- {
-@@ -695,8 +695,13 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
- * though alloc_pages_bulk_array() falls back to alloc_page()
- * if it could not bulk-allocate. So we must be out of memory.
- */
-- if (allocated == last)
-+ if (allocated == last) {
-+ for (int i = 0; i < allocated; i++) {
-+ __free_page(page_array[i]);
-+ page_array[i] = NULL;
-+ }
- return -ENOMEM;
-+ }
-
- memalloc_retry_wait(GFP_NOFS);
- }
-diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
-index 1ce5dd1544995..45cae356e89ba 100644
---- a/fs/btrfs/file-item.c
-+++ b/fs/btrfs/file-item.c
-@@ -194,7 +194,7 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
- btrfs_set_file_extent_encryption(leaf, item, 0);
- btrfs_set_file_extent_other_encoding(leaf, item, 0);
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- out:
- btrfs_free_path(path);
- return ret;
-@@ -811,11 +811,12 @@ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
- * This calls btrfs_truncate_item with the correct args based on the overlap,
- * and fixes up the key as required.
- */
--static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
-+static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct btrfs_key *key,
- u64 bytenr, u64 len)
- {
-+ struct btrfs_fs_info *fs_info = trans->fs_info;
- struct extent_buffer *leaf;
- const u32 csum_size = fs_info->csum_size;
- u64 csum_end;
-@@ -836,7 +837,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
- */
- u32 new_size = (bytenr - key->offset) >> blocksize_bits;
- new_size *= csum_size;
-- btrfs_truncate_item(path, new_size, 1);
-+ btrfs_truncate_item(trans, path, new_size, 1);
- } else if (key->offset >= bytenr && csum_end > end_byte &&
- end_byte > key->offset) {
- /*
-@@ -848,10 +849,10 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
- u32 new_size = (csum_end - end_byte) >> blocksize_bits;
- new_size *= csum_size;
-
-- btrfs_truncate_item(path, new_size, 0);
-+ btrfs_truncate_item(trans, path, new_size, 0);
-
- key->offset = end_byte;
-- btrfs_set_item_key_safe(fs_info, path, key);
-+ btrfs_set_item_key_safe(trans, path, key);
- } else {
- BUG();
- }
-@@ -994,7 +995,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
-
- key.offset = end_byte - 1;
- } else {
-- truncate_one_csum(fs_info, path, &key, bytenr, len);
-+ truncate_one_csum(trans, path, &key, bytenr, len);
- if (key.offset < bytenr)
- break;
- }
-@@ -1202,7 +1203,7 @@ extend_csum:
- diff /= csum_size;
- diff *= csum_size;
-
-- btrfs_extend_item(path, diff);
-+ btrfs_extend_item(trans, path, diff);
- ret = 0;
- goto csum;
- }
-@@ -1249,7 +1250,7 @@ found:
- ins_size /= csum_size;
- total_bytes += ins_size * fs_info->sectorsize;
-
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- if (total_bytes < sums->len) {
- btrfs_release_path(path);
- cond_resched();
-diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
-index 361535c71c0f5..23a145ca94573 100644
---- a/fs/btrfs/file.c
-+++ b/fs/btrfs/file.c
-@@ -368,7 +368,7 @@ next_slot:
- btrfs_set_file_extent_offset(leaf, fi, extent_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - args->start);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- if (update_refs && disk_bytenr > 0) {
- btrfs_init_generic_ref(&ref,
-@@ -405,13 +405,13 @@ next_slot:
-
- memcpy(&new_key, &key, sizeof(new_key));
- new_key.offset = args->end;
-- btrfs_set_item_key_safe(fs_info, path, &new_key);
-+ btrfs_set_item_key_safe(trans, path, &new_key);
-
- extent_offset += args->end - key.offset;
- btrfs_set_file_extent_offset(leaf, fi, extent_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - args->end);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- if (update_refs && disk_bytenr > 0)
- args->bytes_found += args->end - key.offset;
- break;
-@@ -431,7 +431,7 @@ next_slot:
-
- btrfs_set_file_extent_num_bytes(leaf, fi,
- args->start - key.offset);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- if (update_refs && disk_bytenr > 0)
- args->bytes_found += extent_end - args->start;
- if (args->end == extent_end)
-@@ -536,7 +536,8 @@ delete_extent_item:
- if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
- path->slots[0]++;
- }
-- btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
-+ btrfs_setup_item_for_insert(trans, root, path, &key,
-+ args->extent_item_size);
- args->extent_inserted = true;
- }
-
-@@ -593,7 +594,6 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
- int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode, u64 start, u64 end)
- {
-- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *root = inode->root;
- struct extent_buffer *leaf;
- struct btrfs_path *path;
-@@ -664,7 +664,7 @@ again:
- ino, bytenr, orig_offset,
- &other_start, &other_end)) {
- new_key.offset = end;
-- btrfs_set_item_key_safe(fs_info, path, &new_key);
-+ btrfs_set_item_key_safe(trans, path, &new_key);
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi,
-@@ -679,7 +679,7 @@ again:
- trans->transid);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- end - other_start);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- goto out;
- }
- }
-@@ -698,7 +698,7 @@ again:
- trans->transid);
- path->slots[0]++;
- new_key.offset = start;
-- btrfs_set_item_key_safe(fs_info, path, &new_key);
-+ btrfs_set_item_key_safe(trans, path, &new_key);
-
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
-@@ -708,7 +708,7 @@ again:
- other_end - start);
- btrfs_set_file_extent_offset(leaf, fi,
- start - orig_offset);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- goto out;
- }
- }
-@@ -742,7 +742,7 @@ again:
- btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - split);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
- num_bytes, 0);
-@@ -814,7 +814,7 @@ again:
- btrfs_set_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- } else {
- fi = btrfs_item_ptr(leaf, del_slot - 1,
- struct btrfs_file_extent_item);
-@@ -823,7 +823,7 @@ again:
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - key.offset);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- if (ret < 0) {
-@@ -2104,7 +2104,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
- btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_offset(leaf, fi, 0);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- goto out;
- }
-
-@@ -2112,7 +2112,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
- u64 num_bytes;
-
- key.offset = offset;
-- btrfs_set_item_key_safe(fs_info, path, &key);
-+ btrfs_set_item_key_safe(trans, path, &key);
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
-@@ -2121,7 +2121,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
- btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_offset(leaf, fi, 0);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- goto out;
- }
- btrfs_release_path(path);
-@@ -2273,7 +2273,7 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
- btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
- if (extent_info->is_new_extent)
- btrfs_set_file_extent_generation(leaf, extent, trans->transid);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
-diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
-index 27fad70451aad..8dd8ef760321e 100644
---- a/fs/btrfs/free-space-cache.c
-+++ b/fs/btrfs/free-space-cache.c
-@@ -195,7 +195,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
- btrfs_set_inode_nlink(leaf, inode_item, 1);
- btrfs_set_inode_transid(leaf, inode_item, trans->transid);
- btrfs_set_inode_block_group(leaf, inode_item, offset);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-@@ -213,7 +213,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
- struct btrfs_free_space_header);
- memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
- btrfs_set_free_space_key(leaf, header, &disk_key);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- return 0;
-@@ -1185,7 +1185,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
- btrfs_set_free_space_entries(leaf, header, entries);
- btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
- btrfs_set_free_space_generation(leaf, header, trans->transid);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- return 0;
-diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
-index c0e734082dcc4..7b598b070700e 100644
---- a/fs/btrfs/free-space-tree.c
-+++ b/fs/btrfs/free-space-tree.c
-@@ -89,7 +89,7 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
- struct btrfs_free_space_info);
- btrfs_set_free_space_extent_count(leaf, info, 0);
- btrfs_set_free_space_flags(leaf, info, 0);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- ret = 0;
- out:
-@@ -287,7 +287,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
- btrfs_set_free_space_flags(leaf, info, flags);
- expected_extent_count = btrfs_free_space_extent_count(leaf, info);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- if (extent_count != expected_extent_count) {
-@@ -324,7 +324,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
- ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
- write_extent_buffer(leaf, bitmap_cursor, ptr,
- data_size);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- i += extent_size;
-@@ -430,7 +430,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
- flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
- btrfs_set_free_space_flags(leaf, info, flags);
- expected_extent_count = btrfs_free_space_extent_count(leaf, info);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
-@@ -495,7 +495,7 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
-
- extent_count += new_extents;
- btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- btrfs_release_path(path);
-
- if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
-@@ -533,7 +533,8 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
- return !!extent_buffer_test_bit(leaf, ptr, i);
- }
-
--static void free_space_set_bits(struct btrfs_block_group *block_group,
-+static void free_space_set_bits(struct btrfs_trans_handle *trans,
-+ struct btrfs_block_group *block_group,
- struct btrfs_path *path, u64 *start, u64 *size,
- int bit)
- {
-@@ -563,7 +564,7 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
- extent_buffer_bitmap_set(leaf, ptr, first, last - first);
- else
- extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- *size -= end - *start;
- *start = end;
-@@ -656,7 +657,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
- cur_start = start;
- cur_size = size;
- while (1) {
-- free_space_set_bits(block_group, path, &cur_start, &cur_size,
-+ free_space_set_bits(trans, block_group, path, &cur_start, &cur_size,
- !remove);
- if (cur_size == 0)
- break;
-diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
-index 4c322b720a80a..d3ff97374d48a 100644
---- a/fs/btrfs/inode-item.c
-+++ b/fs/btrfs/inode-item.c
-@@ -167,7 +167,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
- memmove_extent_buffer(leaf, ptr, ptr + del_len,
- item_size - (ptr + del_len - item_start));
-
-- btrfs_truncate_item(path, item_size - del_len, 1);
-+ btrfs_truncate_item(trans, path, item_size - del_len, 1);
-
- out:
- btrfs_free_path(path);
-@@ -229,7 +229,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
- item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
- memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
- item_size - (ptr + sub_item_len - item_start));
-- btrfs_truncate_item(path, item_size - sub_item_len, 1);
-+ btrfs_truncate_item(trans, path, item_size - sub_item_len, 1);
- out:
- btrfs_free_path(path);
-
-@@ -282,7 +282,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
- name))
- goto out;
-
-- btrfs_extend_item(path, ins_len);
-+ btrfs_extend_item(trans, path, ins_len);
- ret = 0;
- }
- if (ret < 0)
-@@ -299,7 +299,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
-
- ptr = (unsigned long)&extref->name;
- write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
- out:
- btrfs_free_path(path);
-@@ -338,7 +338,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
- goto out;
-
- old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
-- btrfs_extend_item(path, ins_len);
-+ btrfs_extend_item(trans, path, ins_len);
- ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_inode_ref);
- ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
-@@ -364,7 +364,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
- ptr = (unsigned long)(ref + 1);
- }
- write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
- out:
- btrfs_free_path(path);
-@@ -591,7 +591,7 @@ search_again:
- num_dec = (orig_num_bytes - extent_num_bytes);
- if (extent_start != 0)
- control->sub_bytes += num_dec;
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- } else {
- extent_num_bytes =
- btrfs_file_extent_disk_num_bytes(leaf, fi);
-@@ -617,7 +617,7 @@ search_again:
-
- btrfs_set_file_extent_ram_bytes(leaf, fi, size);
- size = btrfs_file_extent_calc_inline_size(size);
-- btrfs_truncate_item(path, size, 1);
-+ btrfs_truncate_item(trans, path, size, 1);
- } else if (!del_item) {
- /*
- * We have to bail so the last_size is set to
-diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
-index 7814b9d654ce1..c92c589b454d8 100644
---- a/fs/btrfs/inode.c
-+++ b/fs/btrfs/inode.c
-@@ -573,7 +573,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
- kunmap_local(kaddr);
- put_page(page);
- }
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- /*
-@@ -2912,7 +2912,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
- btrfs_item_ptr_offset(leaf, path->slots[0]),
- sizeof(struct btrfs_file_extent_item));
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_release_path(path);
-
- /*
-@@ -3981,7 +3981,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
- struct btrfs_inode_item);
-
- fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_set_inode_last_trans(trans, inode);
- ret = 0;
- failed:
-@@ -6310,7 +6310,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
- }
- }
-
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- /*
- * We don't need the path anymore, plus inheriting properties, adding
- * ACLs, security xattrs, orphan item or adding the link, will result in
-@@ -6974,8 +6974,15 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
- int ret;
-
- alloc_hint = get_extent_allocation_hint(inode, start, len);
-+again:
- ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
- 0, alloc_hint, &ins, 1, 1);
-+ if (ret == -EAGAIN) {
-+ ASSERT(btrfs_is_zoned(fs_info));
-+ wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
-+ TASK_UNINTERRUPTIBLE);
-+ goto again;
-+ }
- if (ret)
- return ERR_PTR(ret);
-
-@@ -9446,7 +9453,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
-
- ptr = btrfs_file_extent_inline_start(ei);
- write_extent_buffer(leaf, symname, ptr, name_len);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_free_path(path);
-
- d_instantiate_new(dentry, inode);
-diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
-index 8e7d03bc1b565..0b120716aeb9c 100644
---- a/fs/btrfs/ioctl.c
-+++ b/fs/btrfs/ioctl.c
-@@ -663,7 +663,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
- goto out;
- }
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- inode_item = &root_item->inode;
- btrfs_set_stack_inode_generation(inode_item, 1);
-@@ -1528,7 +1528,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
- static noinline int copy_to_sk(struct btrfs_path *path,
- struct btrfs_key *key,
- struct btrfs_ioctl_search_key *sk,
-- size_t *buf_size,
-+ u64 *buf_size,
- char __user *ubuf,
- unsigned long *sk_offset,
- int *num_found)
-@@ -1660,7 +1660,7 @@ out:
-
- static noinline int search_ioctl(struct inode *inode,
- struct btrfs_ioctl_search_key *sk,
-- size_t *buf_size,
-+ u64 *buf_size,
- char __user *ubuf)
- {
- struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
-@@ -1733,7 +1733,7 @@ static noinline int btrfs_ioctl_tree_search(struct inode *inode,
- struct btrfs_ioctl_search_args __user *uargs = argp;
- struct btrfs_ioctl_search_key sk;
- int ret;
-- size_t buf_size;
-+ u64 buf_size;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-@@ -1763,8 +1763,8 @@ static noinline int btrfs_ioctl_tree_search_v2(struct inode *inode,
- struct btrfs_ioctl_search_args_v2 __user *uarg = argp;
- struct btrfs_ioctl_search_args_v2 args;
- int ret;
-- size_t buf_size;
-- const size_t buf_limit = SZ_16M;
-+ u64 buf_size;
-+ const u64 buf_limit = SZ_16M;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-@@ -2947,7 +2947,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
-
- btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
- btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- btrfs_release_path(path);
-
- btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
-@@ -4351,6 +4351,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
- arg->clone_sources = compat_ptr(args32.clone_sources);
- arg->parent_root = args32.parent_root;
- arg->flags = args32.flags;
-+ arg->version = args32.version;
- memcpy(arg->reserved, args32.reserved,
- sizeof(args32.reserved));
- #else
-diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
-index b99230db3c820..bdaebb9fc6899 100644
---- a/fs/btrfs/qgroup.c
-+++ b/fs/btrfs/qgroup.c
-@@ -622,7 +622,7 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
-
- ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
-
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
-
- btrfs_free_path(path);
- return ret;
-@@ -700,7 +700,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
- btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
- btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- btrfs_release_path(path);
-
-@@ -719,7 +719,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
- btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
- btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- ret = 0;
- out:
-@@ -808,7 +808,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
- btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
- btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
-
-- btrfs_mark_buffer_dirty(l);
-+ btrfs_mark_buffer_dirty(trans, l);
-
- out:
- btrfs_free_path(path);
-@@ -854,7 +854,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
- btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
- btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
-
-- btrfs_mark_buffer_dirty(l);
-+ btrfs_mark_buffer_dirty(trans, l);
-
- out:
- btrfs_free_path(path);
-@@ -896,7 +896,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
- btrfs_set_qgroup_status_rescan(l, ptr,
- fs_info->qgroup_rescan_progress.objectid);
-
-- btrfs_mark_buffer_dirty(l);
-+ btrfs_mark_buffer_dirty(trans, l);
-
- out:
- btrfs_free_path(path);
-@@ -1069,7 +1069,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
- BTRFS_QGROUP_STATUS_FLAGS_MASK);
- btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- key.objectid = 0;
- key.type = BTRFS_ROOT_REF_KEY;
-diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
-index 95d28497de7c2..e646662e61c6b 100644
---- a/fs/btrfs/ref-verify.c
-+++ b/fs/btrfs/ref-verify.c
-@@ -791,6 +791,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
- dump_ref_action(fs_info, ra);
- kfree(ref);
- kfree(ra);
-+ kfree(re);
- goto out_unlock;
- } else if (be->num_refs == 0) {
- btrfs_err(fs_info,
-@@ -800,6 +801,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
- dump_ref_action(fs_info, ra);
- kfree(ref);
- kfree(ra);
-+ kfree(re);
- goto out_unlock;
- }
-
-diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
-index c6d4bb8cbe299..4eaac3ae5c365 100644
---- a/fs/btrfs/relocation.c
-+++ b/fs/btrfs/relocation.c
-@@ -1181,7 +1181,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
- }
- }
- if (dirty)
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- if (inode)
- btrfs_add_delayed_iput(BTRFS_I(inode));
- return ret;
-@@ -1374,13 +1374,13 @@ again:
- */
- btrfs_set_node_blockptr(parent, slot, new_bytenr);
- btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
-- btrfs_mark_buffer_dirty(parent);
-+ btrfs_mark_buffer_dirty(trans, parent);
-
- btrfs_set_node_blockptr(path->nodes[level],
- path->slots[level], old_bytenr);
- btrfs_set_node_ptr_generation(path->nodes[level],
- path->slots[level], old_ptr_gen);
-- btrfs_mark_buffer_dirty(path->nodes[level]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[level]);
-
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
- blocksize, path->nodes[level]->start);
-@@ -2517,7 +2517,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
- node->eb->start);
- btrfs_set_node_ptr_generation(upper->eb, slot,
- trans->transid);
-- btrfs_mark_buffer_dirty(upper->eb);
-+ btrfs_mark_buffer_dirty(trans, upper->eb);
-
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
- node->eb->start, blocksize,
-@@ -3835,7 +3835,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
- btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
- btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
- BTRFS_INODE_PREALLOC);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- out:
- btrfs_free_path(path);
- return ret;
-diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
-index 859874579456f..5b0f1bccc409c 100644
---- a/fs/btrfs/root-tree.c
-+++ b/fs/btrfs/root-tree.c
-@@ -191,7 +191,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
- btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
-
- write_extent_buffer(l, item, ptr, sizeof(*item));
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- out:
- btrfs_free_path(path);
- return ret;
-@@ -438,7 +438,7 @@ again:
- btrfs_set_root_ref_name_len(leaf, ref, name->len);
- ptr = (unsigned long)(ref + 1);
- write_extent_buffer(leaf, name->name, ptr, name->len);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- if (key.type == BTRFS_ROOT_BACKREF_KEY) {
- btrfs_release_path(path);
-diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
-index b877203f1dc5a..4445a52a07076 100644
---- a/fs/btrfs/scrub.c
-+++ b/fs/btrfs/scrub.c
-@@ -1798,6 +1798,9 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
- */
- ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
-
-+ /* @found_logical_ret must be specified. */
-+ ASSERT(found_logical_ret);
-+
- stripe = &sctx->stripes[sctx->cur_stripe];
- scrub_reset_stripe(stripe);
- ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
-@@ -1806,8 +1809,7 @@ static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *
- /* Either >0 as no more extents or <0 for error. */
- if (ret)
- return ret;
-- if (found_logical_ret)
-- *found_logical_ret = stripe->logical;
-+ *found_logical_ret = stripe->logical;
- sctx->cur_stripe++;
-
- /* We filled one group, submit it. */
-@@ -2010,7 +2012,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
-
- /* Go through each extent items inside the logical range */
- while (cur_logical < logical_end) {
-- u64 found_logical;
-+ u64 found_logical = U64_MAX;
- u64 cur_physical = physical + cur_logical - logical_start;
-
- /* Canceled? */
-@@ -2045,6 +2047,8 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
- if (ret < 0)
- break;
-
-+ /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
-+ ASSERT(found_logical != U64_MAX);
- cur_logical = found_logical + BTRFS_STRIPE_LEN;
-
- /* Don't hold CPU for too long time */
-diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
-index 3a566150c531a..db94eefda27e2 100644
---- a/fs/btrfs/send.c
-+++ b/fs/btrfs/send.c
-@@ -8158,7 +8158,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
- }
-
- sctx->send_filp = fget(arg->send_fd);
-- if (!sctx->send_filp) {
-+ if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
- ret = -EBADF;
- goto out;
- }
-diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
-index 1a093ec0f7e36..de0bfebce1269 100644
---- a/fs/btrfs/super.c
-+++ b/fs/btrfs/super.c
-@@ -79,7 +79,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data);
-
- static void btrfs_put_super(struct super_block *sb)
- {
-- close_ctree(btrfs_sb(sb));
-+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
-+
-+ btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
-+ close_ctree(fs_info);
- }
-
- enum {
-diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
-index 5ef0b90e25c3b..6a43a64ba55ad 100644
---- a/fs/btrfs/tests/extent-buffer-tests.c
-+++ b/fs/btrfs/tests/extent-buffer-tests.c
-@@ -61,7 +61,11 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
- key.type = BTRFS_EXTENT_CSUM_KEY;
- key.offset = 0;
-
-- btrfs_setup_item_for_insert(root, path, &key, value_len);
-+ /*
-+ * Passing a NULL trans handle is fine here, we have a dummy root eb
-+ * and the tree is a single node (level 0).
-+ */
-+ btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
- write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
- value_len);
-
-diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
-index 05b03f5eab83b..492d69d2fa737 100644
---- a/fs/btrfs/tests/inode-tests.c
-+++ b/fs/btrfs/tests/inode-tests.c
-@@ -34,7 +34,11 @@ static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = start;
-
-- btrfs_setup_item_for_insert(root, &path, &key, value_len);
-+ /*
-+ * Passing a NULL trans handle is fine here, we have a dummy root eb
-+ * and the tree is a single node (level 0).
-+ */
-+ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
- fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi, 1);
- btrfs_set_file_extent_type(leaf, fi, type);
-@@ -64,7 +68,11 @@ static void insert_inode_item_key(struct btrfs_root *root)
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
-
-- btrfs_setup_item_for_insert(root, &path, &key, value_len);
-+ /*
-+ * Passing a NULL trans handle is fine here, we have a dummy root eb
-+ * and the tree is a single node (level 0).
-+ */
-+ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
- }
-
- /*
-diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
-index cbb17b5421317..9fb64af608d12 100644
---- a/fs/btrfs/tree-log.c
-+++ b/fs/btrfs/tree-log.c
-@@ -504,9 +504,9 @@ insert:
- found_size = btrfs_item_size(path->nodes[0],
- path->slots[0]);
- if (found_size > item_size)
-- btrfs_truncate_item(path, item_size, 1);
-+ btrfs_truncate_item(trans, path, item_size, 1);
- else if (found_size < item_size)
-- btrfs_extend_item(path, item_size - found_size);
-+ btrfs_extend_item(trans, path, item_size - found_size);
- } else if (ret) {
- return ret;
- }
-@@ -574,7 +574,7 @@ insert:
- }
- }
- no_copy:
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- btrfs_release_path(path);
- return 0;
- }
-@@ -3530,7 +3530,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
- last_offset = max(last_offset, curr_end);
- }
- btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
-- btrfs_mark_buffer_dirty(path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
- btrfs_release_path(path);
- return 0;
- }
-@@ -4488,7 +4488,7 @@ copy_item:
- dst_index++;
- }
-
-- btrfs_mark_buffer_dirty(dst_path->nodes[0]);
-+ btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
- btrfs_release_path(dst_path);
- out:
- kfree(ins_data);
-@@ -4693,7 +4693,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
- write_extent_buffer(leaf, &fi,
- btrfs_item_ptr_offset(leaf, path->slots[0]),
- sizeof(fi));
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- btrfs_release_path(path);
-
-diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
-index 7c7001f42b14c..5be74f9e47ebf 100644
---- a/fs/btrfs/uuid-tree.c
-+++ b/fs/btrfs/uuid-tree.c
-@@ -124,7 +124,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
- * An item with that type already exists.
- * Extend the item and store the new subid at the end.
- */
-- btrfs_extend_item(path, sizeof(subid_le));
-+ btrfs_extend_item(trans, path, sizeof(subid_le));
- eb = path->nodes[0];
- slot = path->slots[0];
- offset = btrfs_item_ptr_offset(eb, slot);
-@@ -139,7 +139,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
- ret = 0;
- subid_le = cpu_to_le64(subid_cpu);
- write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
-- btrfs_mark_buffer_dirty(eb);
-+ btrfs_mark_buffer_dirty(trans, eb);
-
- out:
- btrfs_free_path(path);
-@@ -221,7 +221,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
- move_src = offset + sizeof(subid);
- move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
- memmove_extent_buffer(eb, move_dst, move_src, move_len);
-- btrfs_truncate_item(path, item_size - sizeof(subid), 1);
-+ btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
-
- out:
- btrfs_free_path(path);
-diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
-index b9ef6f54635ca..722a1dde75636 100644
---- a/fs/btrfs/volumes.c
-+++ b/fs/btrfs/volumes.c
-@@ -1894,7 +1894,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
- ptr = btrfs_device_fsid(dev_item);
- write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
- ptr, BTRFS_FSID_SIZE);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- ret = 0;
- out:
-@@ -2597,7 +2597,7 @@ next_slot:
- if (device->fs_devices->seeding) {
- btrfs_set_device_generation(leaf, dev_item,
- device->generation);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- }
-
- path->slots[0]++;
-@@ -2895,7 +2895,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
- btrfs_device_get_disk_total_bytes(device));
- btrfs_set_device_bytes_used(leaf, dev_item,
- btrfs_device_get_bytes_used(device));
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
-
- out:
- btrfs_free_path(path);
-@@ -3045,15 +3045,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
- read_unlock(&em_tree->lock);
-
- if (!em) {
-- btrfs_crit(fs_info, "unable to find logical %llu length %llu",
-+ btrfs_crit(fs_info,
-+ "unable to find chunk map for logical %llu length %llu",
- logical, length);
- return ERR_PTR(-EINVAL);
- }
-
-- if (em->start > logical || em->start + em->len < logical) {
-+ if (em->start > logical || em->start + em->len <= logical) {
- btrfs_crit(fs_info,
-- "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
-- logical, length, em->start, em->start + em->len);
-+ "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
-+ logical, logical + length, em->start, em->start + em->len);
- free_extent_map(em);
- return ERR_PTR(-EINVAL);
- }
-@@ -3483,7 +3484,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
-
- btrfs_set_balance_flags(leaf, item, bctl->flags);
-
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- out:
- btrfs_free_path(path);
- err = btrfs_commit_transaction(trans);
-@@ -7534,7 +7535,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
- for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
- btrfs_set_dev_stats_value(eb, ptr, i,
- btrfs_dev_stat_read(device, i));
-- btrfs_mark_buffer_dirty(eb);
-+ btrfs_mark_buffer_dirty(trans, eb);
-
- out:
- btrfs_free_path(path);
-diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
-index 96828a13dd43d..b906f809650ef 100644
---- a/fs/btrfs/xattr.c
-+++ b/fs/btrfs/xattr.c
-@@ -188,15 +188,15 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
- if (old_data_len + name_len + sizeof(*di) == item_size) {
- /* No other xattrs packed in the same leaf item. */
- if (size > old_data_len)
-- btrfs_extend_item(path, size - old_data_len);
-+ btrfs_extend_item(trans, path, size - old_data_len);
- else if (size < old_data_len)
-- btrfs_truncate_item(path, data_size, 1);
-+ btrfs_truncate_item(trans, path, data_size, 1);
- } else {
- /* There are other xattrs packed in the same item. */
- ret = btrfs_delete_one_dir_name(trans, root, path, di);
- if (ret)
- goto out;
-- btrfs_extend_item(path, data_size);
-+ btrfs_extend_item(trans, path, data_size);
- }
-
- ptr = btrfs_item_ptr(leaf, slot, char);
-@@ -205,7 +205,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
- btrfs_set_dir_data_len(leaf, di, size);
- data_ptr = ((unsigned long)(di + 1)) + name_len;
- write_extent_buffer(leaf, value, data_ptr, size);
-- btrfs_mark_buffer_dirty(leaf);
-+ btrfs_mark_buffer_dirty(trans, leaf);
- } else {
- /*
- * Insert, and we had space for the xattr, so path->slots[0] is
-diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
-index 87b3753aa4b1e..c45e8c2d62e11 100644
---- a/fs/debugfs/file.c
-+++ b/fs/debugfs/file.c
-@@ -939,7 +939,7 @@ static ssize_t debugfs_write_file_str(struct file *file, const char __user *user
- new[pos + count] = '\0';
- strim(new);
-
-- rcu_assign_pointer(*(char **)file->private_data, new);
-+ rcu_assign_pointer(*(char __rcu **)file->private_data, new);
- synchronize_rcu();
- kfree(old);
-
-diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
-index 5aabcb6f0f157..c93359ceaae61 100644
---- a/fs/dlm/debug_fs.c
-+++ b/fs/dlm/debug_fs.c
-@@ -973,7 +973,8 @@ void dlm_delete_debug_comms_file(void *ctx)
-
- void dlm_create_debug_file(struct dlm_ls *ls)
- {
-- char name[DLM_LOCKSPACE_LEN + 8];
-+ /* Reserve enough space for the longest file name */
-+ char name[DLM_LOCKSPACE_LEN + sizeof("_queued_asts")];
-
- /* format 1 */
-
-@@ -986,7 +987,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
- /* format 2 */
-
- memset(name, 0, sizeof(name));
-- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_locks", ls->ls_name);
-+ snprintf(name, sizeof(name), "%s_locks", ls->ls_name);
-
- ls->ls_debug_locks_dentry = debugfs_create_file(name,
- 0644,
-@@ -997,7 +998,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
- /* format 3 */
-
- memset(name, 0, sizeof(name));
-- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_all", ls->ls_name);
-+ snprintf(name, sizeof(name), "%s_all", ls->ls_name);
-
- ls->ls_debug_all_dentry = debugfs_create_file(name,
- S_IFREG | S_IRUGO,
-@@ -1008,7 +1009,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
- /* format 4 */
-
- memset(name, 0, sizeof(name));
-- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_toss", ls->ls_name);
-+ snprintf(name, sizeof(name), "%s_toss", ls->ls_name);
-
- ls->ls_debug_toss_dentry = debugfs_create_file(name,
- S_IFREG | S_IRUGO,
-@@ -1017,7 +1018,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
- &format4_fops);
-
- memset(name, 0, sizeof(name));
-- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_waiters", ls->ls_name);
-+ snprintf(name, sizeof(name), "%s_waiters", ls->ls_name);
-
- ls->ls_debug_waiters_dentry = debugfs_create_file(name,
- 0644,
-@@ -1028,7 +1029,7 @@ void dlm_create_debug_file(struct dlm_ls *ls)
- /* format 5 */
-
- memset(name, 0, sizeof(name));
-- snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_queued_asts", ls->ls_name);
-+ snprintf(name, sizeof(name), "%s_queued_asts", ls->ls_name);
-
- ls->ls_debug_queued_asts_dentry = debugfs_create_file(name,
- 0644,
-diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
-index f641b36a36db0..2247ebb61be1e 100644
---- a/fs/dlm/midcomms.c
-+++ b/fs/dlm/midcomms.c
-@@ -337,13 +337,21 @@ static struct midcomms_node *nodeid2node(int nodeid)
-
- int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
- {
-- int ret, r = nodeid_hash(nodeid);
-+ int ret, idx, r = nodeid_hash(nodeid);
- struct midcomms_node *node;
-
- ret = dlm_lowcomms_addr(nodeid, addr, len);
- if (ret)
- return ret;
-
-+ idx = srcu_read_lock(&nodes_srcu);
-+ node = __find_node(nodeid, r);
-+ if (node) {
-+ srcu_read_unlock(&nodes_srcu, idx);
-+ return 0;
-+ }
-+ srcu_read_unlock(&nodes_srcu, idx);
-+
- node = kmalloc(sizeof(*node), GFP_NOFS);
- if (!node)
- return -ENOMEM;
-@@ -1030,15 +1038,15 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
-
- break;
- case DLM_VERSION_3_2:
-+ /* send ack back if necessary */
-+ dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
-+
- msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
- ppc);
- if (!msg) {
- dlm_free_mhandle(mh);
- goto err;
- }
--
-- /* send ack back if necessary */
-- dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
- break;
- default:
- dlm_free_mhandle(mh);
-@@ -1260,12 +1268,23 @@ void dlm_midcomms_remove_member(int nodeid)
-
- idx = srcu_read_lock(&nodes_srcu);
- node = nodeid2node(nodeid);
-- if (WARN_ON_ONCE(!node)) {
-+ /* in case of dlm_midcomms_close() removes node */
-+ if (!node) {
- srcu_read_unlock(&nodes_srcu, idx);
- return;
- }
-
- spin_lock(&node->state_lock);
-+ /* case of dlm_midcomms_addr() created node but
-+ * was not added before because dlm_midcomms_close()
-+ * removed the node
-+ */
-+ if (!node->users) {
-+ spin_unlock(&node->state_lock);
-+ srcu_read_unlock(&nodes_srcu, idx);
-+ return;
-+ }
-+
- node->users--;
- pr_debug("node %d users dec count %d\n", nodeid, node->users);
-
-@@ -1386,10 +1405,16 @@ void dlm_midcomms_shutdown(void)
- midcomms_shutdown(node);
- }
- }
-- srcu_read_unlock(&nodes_srcu, idx);
-- mutex_unlock(&close_lock);
-
- dlm_lowcomms_shutdown();
-+
-+ for (i = 0; i < CONN_HASH_SIZE; i++) {
-+ hlist_for_each_entry_rcu(node, &node_hash[i], hlist) {
-+ midcomms_node_reset(node);
-+ }
-+ }
-+ srcu_read_unlock(&nodes_srcu, idx);
-+ mutex_unlock(&close_lock);
- }
-
- int dlm_midcomms_close(int nodeid)
-diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
-index 992d9c7e64ae6..5ab4b87888a79 100644
---- a/fs/ecryptfs/inode.c
-+++ b/fs/ecryptfs/inode.c
-@@ -998,6 +998,14 @@ static int ecryptfs_getattr_link(struct mnt_idmap *idmap,
- return rc;
- }
-
-+static int ecryptfs_do_getattr(const struct path *path, struct kstat *stat,
-+ u32 request_mask, unsigned int flags)
-+{
-+ if (flags & AT_GETATTR_NOSEC)
-+ return vfs_getattr_nosec(path, stat, request_mask, flags);
-+ return vfs_getattr(path, stat, request_mask, flags);
-+}
-+
- static int ecryptfs_getattr(struct mnt_idmap *idmap,
- const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
-@@ -1006,8 +1014,8 @@ static int ecryptfs_getattr(struct mnt_idmap *idmap,
- struct kstat lower_stat;
- int rc;
-
-- rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat,
-- request_mask, flags);
-+ rc = ecryptfs_do_getattr(ecryptfs_dentry_to_lower_path(dentry),
-+ &lower_stat, request_mask, flags);
- if (!rc) {
- fsstack_copy_attr_all(d_inode(dentry),
- ecryptfs_inode_to_lower(d_inode(dentry)));
-diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
-index cc6fb9e988991..4256a85719a1d 100644
---- a/fs/erofs/utils.c
-+++ b/fs/erofs/utils.c
-@@ -77,12 +77,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- struct erofs_workgroup *pre;
-
-- /*
-- * Bump up before making this visible to others for the XArray in order
-- * to avoid potential UAF without serialized by xa_lock.
-- */
-- lockref_get(&grp->lockref);
--
-+ DBG_BUGON(grp->lockref.count < 1);
- repeat:
- xa_lock(&sbi->managed_pslots);
- pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
-@@ -96,7 +91,6 @@ repeat:
- cond_resched();
- goto repeat;
- }
-- lockref_put_return(&grp->lockref);
- grp = pre;
- }
- xa_unlock(&sbi->managed_pslots);
-diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
-index 036f610e044b6..a7e6847f6f8f1 100644
---- a/fs/erofs/zdata.c
-+++ b/fs/erofs/zdata.c
-@@ -796,6 +796,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
- return PTR_ERR(pcl);
-
- spin_lock_init(&pcl->obj.lockref.lock);
-+ pcl->obj.lockref.count = 1; /* one ref for this request */
- pcl->algorithmformat = map->m_algorithmformat;
- pcl->length = 0;
- pcl->partial = true;
-diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
-index 1b9f587f6cca5..95c51b025b917 100644
---- a/fs/exfat/namei.c
-+++ b/fs/exfat/namei.c
-@@ -351,14 +351,20 @@ static int exfat_find_empty_entry(struct inode *inode,
- if (exfat_check_max_dentries(inode))
- return -ENOSPC;
-
-- /* we trust p_dir->size regardless of FAT type */
-- if (exfat_find_last_cluster(sb, p_dir, &last_clu))
-- return -EIO;
--
- /*
- * Allocate new cluster to this directory
- */
-- exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
-+ if (ei->start_clu != EXFAT_EOF_CLUSTER) {
-+ /* we trust p_dir->size regardless of FAT type */
-+ if (exfat_find_last_cluster(sb, p_dir, &last_clu))
-+ return -EIO;
-+
-+ exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
-+ } else {
-+ /* This directory is empty */
-+ exfat_chain_set(&clu, EXFAT_EOF_CLUSTER, 0,
-+ ALLOC_NO_FAT_CHAIN);
-+ }
-
- /* allocate a cluster */
- ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
-@@ -368,6 +374,11 @@ static int exfat_find_empty_entry(struct inode *inode,
- if (exfat_zeroed_cluster(inode, clu.dir))
- return -EIO;
-
-+ if (ei->start_clu == EXFAT_EOF_CLUSTER) {
-+ ei->start_clu = clu.dir;
-+ p_dir->dir = clu.dir;
-+ }
-+
- /* append to the FAT chain */
- if (clu.flags != p_dir->flags) {
- /* no-fat-chain bit is disabled,
-@@ -645,7 +656,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
- info->type = exfat_get_entry_type(ep);
- info->attr = le16_to_cpu(ep->dentry.file.attr);
- info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
-- if ((info->type == TYPE_FILE) && (info->size == 0)) {
-+ if (info->size == 0) {
- info->flags = ALLOC_NO_FAT_CHAIN;
- info->start_clu = EXFAT_EOF_CLUSTER;
- } else {
-@@ -888,6 +899,9 @@ static int exfat_check_dir_empty(struct super_block *sb,
-
- dentries_per_clu = sbi->dentries_per_clu;
-
-+ if (p_dir->dir == EXFAT_EOF_CLUSTER)
-+ return 0;
-+
- exfat_chain_dup(&clu, p_dir);
-
- while (clu.dir != EXFAT_EOF_CLUSTER) {
-@@ -1255,7 +1269,8 @@ static int __exfat_rename(struct inode *old_parent_inode,
- }
-
- /* Free the clusters if new_inode is a dir(as if exfat_rmdir) */
-- if (new_entry_type == TYPE_DIR) {
-+ if (new_entry_type == TYPE_DIR &&
-+ new_ei->start_clu != EXFAT_EOF_CLUSTER) {
- /* new_ei, new_clu_to_free */
- struct exfat_chain new_clu_to_free;
-
-diff --git a/fs/ext2/file.c b/fs/ext2/file.c
-index 1039e5bf90afd..4ddc36f4dbd40 100644
---- a/fs/ext2/file.c
-+++ b/fs/ext2/file.c
-@@ -258,7 +258,6 @@ static ssize_t ext2_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
- goto out_unlock;
- }
-
-- iocb->ki_pos += status;
- ret += status;
- endbyte = pos + status - 1;
- ret2 = filemap_write_and_wait_range(inode->i_mapping, pos,
-diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
-index 0c5a79c3b5d48..ef4c19e5f5706 100644
---- a/fs/ext4/acl.h
-+++ b/fs/ext4/acl.h
-@@ -68,6 +68,11 @@ extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
- static inline int
- ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
- {
-+ /* usually, the umask is applied by posix_acl_create(), but if
-+ ext4 ACL support is disabled at compile time, we need to do
-+ it here, because posix_acl_create() will never be called */
-+ inode->i_mode &= ~current_umask();
-+
- return 0;
- }
- #endif /* CONFIG_EXT4_FS_POSIX_ACL */
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 9418359b1d9d3..cd4ccae1e28a1 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1676,7 +1676,8 @@ struct ext4_sb_info {
-
- /*
- * Barrier between writepages ops and changing any inode's JOURNAL_DATA
-- * or EXTENTS flag.
-+ * or EXTENTS flag or between writepages ops and changing DELALLOC or
-+ * DIOREAD_NOLOCK mount options on remount.
- */
- struct percpu_rw_semaphore s_writepages_rwsem;
- struct dax_device *s_daxdev;
-diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
-index 202c76996b621..4d8496d1a8ac4 100644
---- a/fs/ext4/extents.c
-+++ b/fs/ext4/extents.c
-@@ -1010,6 +1010,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
- ix = curp->p_idx;
- }
-
-+ if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
-+ EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
-+ return -EFSCORRUPTED;
-+ }
-+
- len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
- BUG_ON(len < 0);
- if (len > 0) {
-@@ -1019,11 +1024,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
- memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
- }
-
-- if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
-- EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
-- return -EFSCORRUPTED;
-- }
--
- ix->ei_block = cpu_to_le32(logical);
- ext4_idx_store_pblock(ix, ptr);
- le16_add_cpu(&curp->p_hdr->eh_entries, 1);
-diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
-index 6f7de14c0fa86..f4b50652f0cce 100644
---- a/fs/ext4/extents_status.c
-+++ b/fs/ext4/extents_status.c
-@@ -152,8 +152,9 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
- static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
- struct ext4_inode_info *locked_ei);
--static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
-- ext4_lblk_t len);
-+static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
-+ ext4_lblk_t len,
-+ struct pending_reservation **prealloc);
-
- int __init ext4_init_es(void)
- {
-@@ -448,6 +449,19 @@ static void ext4_es_list_del(struct inode *inode)
- spin_unlock(&sbi->s_es_lock);
- }
-
-+static inline struct pending_reservation *__alloc_pending(bool nofail)
-+{
-+ if (!nofail)
-+ return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
-+
-+ return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
-+}
-+
-+static inline void __free_pending(struct pending_reservation *pr)
-+{
-+ kmem_cache_free(ext4_pending_cachep, pr);
-+}
-+
- /*
- * Returns true if we cannot fail to allocate memory for this extent_status
- * entry and cannot reclaim it until its status changes.
-@@ -836,11 +850,12 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
- {
- struct extent_status newes;
- ext4_lblk_t end = lblk + len - 1;
-- int err1 = 0;
-- int err2 = 0;
-+ int err1 = 0, err2 = 0, err3 = 0;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct extent_status *es1 = NULL;
- struct extent_status *es2 = NULL;
-+ struct pending_reservation *pr = NULL;
-+ bool revise_pending = false;
-
- if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
- return;
-@@ -868,11 +883,17 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
-
- ext4_es_insert_extent_check(inode, &newes);
-
-+ revise_pending = sbi->s_cluster_ratio > 1 &&
-+ test_opt(inode->i_sb, DELALLOC) &&
-+ (status & (EXTENT_STATUS_WRITTEN |
-+ EXTENT_STATUS_UNWRITTEN));
- retry:
- if (err1 && !es1)
- es1 = __es_alloc_extent(true);
- if ((err1 || err2) && !es2)
- es2 = __es_alloc_extent(true);
-+ if ((err1 || err2 || err3) && revise_pending && !pr)
-+ pr = __alloc_pending(true);
- write_lock(&EXT4_I(inode)->i_es_lock);
-
- err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
-@@ -897,13 +918,18 @@ retry:
- es2 = NULL;
- }
-
-- if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
-- (status & EXTENT_STATUS_WRITTEN ||
-- status & EXTENT_STATUS_UNWRITTEN))
-- __revise_pending(inode, lblk, len);
-+ if (revise_pending) {
-+ err3 = __revise_pending(inode, lblk, len, &pr);
-+ if (err3 != 0)
-+ goto error;
-+ if (pr) {
-+ __free_pending(pr);
-+ pr = NULL;
-+ }
-+ }
- error:
- write_unlock(&EXT4_I(inode)->i_es_lock);
-- if (err1 || err2)
-+ if (err1 || err2 || err3)
- goto retry;
-
- ext4_es_print_tree(inode);
-@@ -1311,7 +1337,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
- rc->ndelonly--;
- node = rb_next(&pr->rb_node);
- rb_erase(&pr->rb_node, &tree->root);
-- kmem_cache_free(ext4_pending_cachep, pr);
-+ __free_pending(pr);
- if (!node)
- break;
- pr = rb_entry(node, struct pending_reservation,
-@@ -1405,8 +1431,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
- }
- }
- if (count_reserved)
-- count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
-- &orig_es, &rc);
-+ count_rsvd(inode, orig_es.es_lblk + len1,
-+ orig_es.es_len - len1 - len2, &orig_es, &rc);
- goto out_get_reserved;
- }
-
-@@ -1907,11 +1933,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
- *
- * @inode - file containing the cluster
- * @lblk - logical block in the cluster to be added
-+ * @prealloc - preallocated pending entry
- *
- * Returns 0 on successful insertion and -ENOMEM on failure. If the
- * pending reservation is already in the set, returns successfully.
- */
--static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
-+static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
-+ struct pending_reservation **prealloc)
- {
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
-@@ -1937,10 +1965,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
- }
- }
-
-- pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
-- if (pr == NULL) {
-- ret = -ENOMEM;
-- goto out;
-+ if (likely(*prealloc == NULL)) {
-+ pr = __alloc_pending(false);
-+ if (!pr) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ } else {
-+ pr = *prealloc;
-+ *prealloc = NULL;
- }
- pr->lclu = lclu;
-
-@@ -1970,7 +2003,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
- if (pr != NULL) {
- tree = &EXT4_I(inode)->i_pending_tree;
- rb_erase(&pr->rb_node, &tree->root);
-- kmem_cache_free(ext4_pending_cachep, pr);
-+ __free_pending(pr);
- }
- }
-
-@@ -2029,10 +2062,10 @@ void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
- bool allocated)
- {
- struct extent_status newes;
-- int err1 = 0;
-- int err2 = 0;
-+ int err1 = 0, err2 = 0, err3 = 0;
- struct extent_status *es1 = NULL;
- struct extent_status *es2 = NULL;
-+ struct pending_reservation *pr = NULL;
-
- if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
- return;
-@@ -2052,6 +2085,8 @@ retry:
- es1 = __es_alloc_extent(true);
- if ((err1 || err2) && !es2)
- es2 = __es_alloc_extent(true);
-+ if ((err1 || err2 || err3) && allocated && !pr)
-+ pr = __alloc_pending(true);
- write_lock(&EXT4_I(inode)->i_es_lock);
-
- err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
-@@ -2074,11 +2109,18 @@ retry:
- es2 = NULL;
- }
-
-- if (allocated)
-- __insert_pending(inode, lblk);
-+ if (allocated) {
-+ err3 = __insert_pending(inode, lblk, &pr);
-+ if (err3 != 0)
-+ goto error;
-+ if (pr) {
-+ __free_pending(pr);
-+ pr = NULL;
-+ }
-+ }
- error:
- write_unlock(&EXT4_I(inode)->i_es_lock);
-- if (err1 || err2)
-+ if (err1 || err2 || err3)
- goto retry;
-
- ext4_es_print_tree(inode);
-@@ -2184,21 +2226,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
- * @inode - file containing the range
- * @lblk - logical block defining the start of range
- * @len - length of range in blocks
-+ * @prealloc - preallocated pending entry
- *
- * Used after a newly allocated extent is added to the extents status tree.
- * Requires that the extents in the range have either written or unwritten
- * status. Must be called while holding i_es_lock.
- */
--static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
-- ext4_lblk_t len)
-+static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
-+ ext4_lblk_t len,
-+ struct pending_reservation **prealloc)
- {
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- ext4_lblk_t end = lblk + len - 1;
- ext4_lblk_t first, last;
- bool f_del = false, l_del = false;
-+ int ret = 0;
-
- if (len == 0)
-- return;
-+ return 0;
-
- /*
- * Two cases - block range within single cluster and block range
-@@ -2219,7 +2264,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
- f_del = __es_scan_range(inode, &ext4_es_is_delonly,
- first, lblk - 1);
- if (f_del) {
-- __insert_pending(inode, first);
-+ ret = __insert_pending(inode, first, prealloc);
-+ if (ret < 0)
-+ goto out;
- } else {
- last = EXT4_LBLK_CMASK(sbi, end) +
- sbi->s_cluster_ratio - 1;
-@@ -2227,9 +2274,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
- l_del = __es_scan_range(inode,
- &ext4_es_is_delonly,
- end + 1, last);
-- if (l_del)
-- __insert_pending(inode, last);
-- else
-+ if (l_del) {
-+ ret = __insert_pending(inode, last, prealloc);
-+ if (ret < 0)
-+ goto out;
-+ } else
- __remove_pending(inode, last);
- }
- } else {
-@@ -2237,18 +2286,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
- if (first != lblk)
- f_del = __es_scan_range(inode, &ext4_es_is_delonly,
- first, lblk - 1);
-- if (f_del)
-- __insert_pending(inode, first);
-- else
-+ if (f_del) {
-+ ret = __insert_pending(inode, first, prealloc);
-+ if (ret < 0)
-+ goto out;
-+ } else
- __remove_pending(inode, first);
-
- last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
- if (last != end)
- l_del = __es_scan_range(inode, &ext4_es_is_delonly,
- end + 1, last);
-- if (l_del)
-- __insert_pending(inode, last);
-- else
-+ if (l_del) {
-+ ret = __insert_pending(inode, last, prealloc);
-+ if (ret < 0)
-+ goto out;
-+ } else
- __remove_pending(inode, last);
- }
-+out:
-+ return ret;
- }
-diff --git a/fs/ext4/file.c b/fs/ext4/file.c
-index 6830ea3a6c59c..0166bb9ca160b 100644
---- a/fs/ext4/file.c
-+++ b/fs/ext4/file.c
-@@ -306,80 +306,38 @@ out:
- }
-
- static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
-- ssize_t written, size_t count)
-+ ssize_t count)
- {
- handle_t *handle;
-- bool truncate = false;
-- u8 blkbits = inode->i_blkbits;
-- ext4_lblk_t written_blk, end_blk;
-- int ret;
--
-- /*
-- * Note that EXT4_I(inode)->i_disksize can get extended up to
-- * inode->i_size while the I/O was running due to writeback of delalloc
-- * blocks. But, the code in ext4_iomap_alloc() is careful to use
-- * zeroed/unwritten extents if this is possible; thus we won't leave
-- * uninitialized blocks in a file even if we didn't succeed in writing
-- * as much as we intended.
-- */
-- WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
-- if (offset + count <= EXT4_I(inode)->i_disksize) {
-- /*
-- * We need to ensure that the inode is removed from the orphan
-- * list if it has been added prematurely, due to writeback of
-- * delalloc blocks.
-- */
-- if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
-- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
--
-- if (IS_ERR(handle)) {
-- ext4_orphan_del(NULL, inode);
-- return PTR_ERR(handle);
-- }
--
-- ext4_orphan_del(handle, inode);
-- ext4_journal_stop(handle);
-- }
--
-- return written;
-- }
--
-- if (written < 0)
-- goto truncate;
-
-+ lockdep_assert_held_write(&inode->i_rwsem);
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
-- if (IS_ERR(handle)) {
-- written = PTR_ERR(handle);
-- goto truncate;
-- }
-+ if (IS_ERR(handle))
-+ return PTR_ERR(handle);
-
-- if (ext4_update_inode_size(inode, offset + written)) {
-- ret = ext4_mark_inode_dirty(handle, inode);
-+ if (ext4_update_inode_size(inode, offset + count)) {
-+ int ret = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(ret)) {
-- written = ret;
- ext4_journal_stop(handle);
-- goto truncate;
-+ return ret;
- }
- }
-
-- /*
-- * We may need to truncate allocated but not written blocks beyond EOF.
-- */
-- written_blk = ALIGN(offset + written, 1 << blkbits);
-- end_blk = ALIGN(offset + count, 1 << blkbits);
-- if (written_blk < end_blk && ext4_can_truncate(inode))
-- truncate = true;
--
-- /*
-- * Remove the inode from the orphan list if it has been extended and
-- * everything went OK.
-- */
-- if (!truncate && inode->i_nlink)
-+ if (inode->i_nlink)
- ext4_orphan_del(handle, inode);
- ext4_journal_stop(handle);
-
-- if (truncate) {
--truncate:
-+ return count;
-+}
-+
-+/*
-+ * Clean up the inode after DIO or DAX extending write has completed and the
-+ * inode size has been updated using ext4_handle_inode_extension().
-+ */
-+static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
-+{
-+ lockdep_assert_held_write(&inode->i_rwsem);
-+ if (count < 0) {
- ext4_truncate_failed_write(inode);
- /*
- * If the truncate operation failed early, then the inode may
-@@ -388,9 +346,28 @@ truncate:
- */
- if (inode->i_nlink)
- ext4_orphan_del(NULL, inode);
-+ return;
- }
-+ /*
-+ * If i_disksize got extended due to writeback of delalloc blocks while
-+ * the DIO was running we could fail to cleanup the orphan list in
-+ * ext4_handle_inode_extension(). Do it now.
-+ */
-+ if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
-+ handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
-
-- return written;
-+ if (IS_ERR(handle)) {
-+ /*
-+ * The write has successfully completed. Not much to
-+ * do with the error here so just cleanup the orphan
-+ * list and hope for the best.
-+ */
-+ ext4_orphan_del(NULL, inode);
-+ return;
-+ }
-+ ext4_orphan_del(handle, inode);
-+ ext4_journal_stop(handle);
-+ }
- }
-
- static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
-@@ -399,31 +376,22 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
- loff_t pos = iocb->ki_pos;
- struct inode *inode = file_inode(iocb->ki_filp);
-
-+ if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
-+ error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
- if (error)
- return error;
--
-- if (size && flags & IOMAP_DIO_UNWRITTEN) {
-- error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
-- if (error < 0)
-- return error;
-- }
- /*
-- * If we are extending the file, we have to update i_size here before
-- * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
-- * buffered reads could zero out too much from page cache pages. Update
-- * of on-disk size will happen later in ext4_dio_write_iter() where
-- * we have enough information to also perform orphan list handling etc.
-- * Note that we perform all extending writes synchronously under
-- * i_rwsem held exclusively so i_size update is safe here in that case.
-- * If the write was not extending, we cannot see pos > i_size here
-- * because operations reducing i_size like truncate wait for all
-- * outstanding DIO before updating i_size.
-+ * Note that EXT4_I(inode)->i_disksize can get extended up to
-+ * inode->i_size while the I/O was running due to writeback of delalloc
-+ * blocks. But the code in ext4_iomap_alloc() is careful to use
-+ * zeroed/unwritten extents if this is possible; thus we won't leave
-+ * uninitialized blocks in a file even if we didn't succeed in writing
-+ * as much as we intended.
- */
-- pos += size;
-- if (pos > i_size_read(inode))
-- i_size_write(inode, pos);
--
-- return 0;
-+ WARN_ON_ONCE(i_size_read(inode) < READ_ONCE(EXT4_I(inode)->i_disksize));
-+ if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize))
-+ return size;
-+ return ext4_handle_inode_extension(inode, pos, size);
- }
-
- static const struct iomap_dio_ops ext4_dio_write_ops = {
-@@ -569,18 +537,20 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
- return ext4_buffered_write_iter(iocb, from);
- }
-
-+ /*
-+ * Prevent inline data from being created since we are going to allocate
-+ * blocks for DIO. We know the inode does not currently have inline data
-+ * because ext4_should_use_dio() checked for it, but we have to clear
-+ * the state flag before the write checks because a lock cycle could
-+ * introduce races with other writers.
-+ */
-+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
-+
- ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend,
- &unwritten, &dio_flags);
- if (ret <= 0)
- return ret;
-
-- /*
-- * Make sure inline data cannot be created anymore since we are going
-- * to allocate blocks for DIO. We know the inode does not have any
-- * inline data now because ext4_dio_supported() checked for that.
-- */
-- ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
--
- offset = iocb->ki_pos;
- count = ret;
-
-@@ -606,9 +576,16 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
- dio_flags, NULL, 0);
- if (ret == -ENOTBLK)
- ret = 0;
--
-- if (extend)
-- ret = ext4_handle_inode_extension(inode, offset, ret, count);
-+ if (extend) {
-+ /*
-+ * We always perform extending DIO write synchronously so by
-+ * now the IO is completed and ext4_handle_inode_extension()
-+ * was called. Cleanup the inode in case of error or race with
-+ * writeback of delalloc blocks.
-+ */
-+ WARN_ON_ONCE(ret == -EIOCBQUEUED);
-+ ext4_inode_extension_cleanup(inode, ret);
-+ }
-
- out:
- if (ilock_shared)
-@@ -689,8 +666,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
-
- ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
-
-- if (extend)
-- ret = ext4_handle_inode_extension(inode, offset, ret, count);
-+ if (extend) {
-+ ret = ext4_handle_inode_extension(inode, offset, ret);
-+ ext4_inode_extension_cleanup(inode, ret);
-+ }
- out:
- inode_unlock(inode);
- if (ret > 0)
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index 4ce35f1c8b0a8..d7732320431ac 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -789,10 +789,22 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
- int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
-+ int ret = 0;
-+
- ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
- inode->i_ino, create);
-- return _ext4_get_block(inode, iblock, bh_result,
-+ ret = _ext4_get_block(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
-+
-+ /*
-+ * If the buffer is marked unwritten, mark it as new to make sure it is
-+ * zeroed out correctly in case of partial writes. Otherwise, there is
-+ * a chance of stale data getting exposed.
-+ */
-+ if (ret == 0 && buffer_unwritten(bh_result))
-+ set_buffer_new(bh_result);
-+
-+ return ret;
- }
-
- /* Maximum number of blocks we map for direct IO at once. */
-diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 1e599305d85fa..a7b8558c0d093 100644
---- a/fs/ext4/mballoc.c
-+++ b/fs/ext4/mballoc.c
-@@ -417,8 +417,6 @@ static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
-
- static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
- ext4_group_t group);
--static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
-- ext4_group_t group);
- static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
-
- static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
-@@ -1361,17 +1359,17 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
- * We place the buddy block and bitmap block
- * close together
- */
-+ grinfo = ext4_get_group_info(sb, group);
-+ if (!grinfo) {
-+ err = -EFSCORRUPTED;
-+ goto out;
-+ }
- if ((first_block + i) & 1) {
- /* this is block of buddy */
- BUG_ON(incore == NULL);
- mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
- group, page->index, i * blocksize);
- trace_ext4_mb_buddy_bitmap_load(sb, group);
-- grinfo = ext4_get_group_info(sb, group);
-- if (!grinfo) {
-- err = -EFSCORRUPTED;
-- goto out;
-- }
- grinfo->bb_fragments = 0;
- memset(grinfo->bb_counters, 0,
- sizeof(*grinfo->bb_counters) *
-@@ -1398,7 +1396,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
-
- /* mark all preallocated blks used in in-core bitmap */
- ext4_mb_generate_from_pa(sb, data, group);
-- ext4_mb_generate_from_freelist(sb, data, group);
-+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
- ext4_unlock_group(sb, group);
-
- /* set incore so that the buddy information can be
-@@ -4958,31 +4956,6 @@ try_group_pa:
- return false;
- }
-
--/*
-- * the function goes through all block freed in the group
-- * but not yet committed and marks them used in in-core bitmap.
-- * buddy must be generated from this bitmap
-- * Need to be called with the ext4 group lock held
-- */
--static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
-- ext4_group_t group)
--{
-- struct rb_node *n;
-- struct ext4_group_info *grp;
-- struct ext4_free_data *entry;
--
-- grp = ext4_get_group_info(sb, group);
-- if (!grp)
-- return;
-- n = rb_first(&(grp->bb_free_root));
--
-- while (n) {
-- entry = rb_entry(n, struct ext4_free_data, efd_node);
-- mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
-- n = rb_next(n);
-- }
--}
--
- /*
- * the function goes through all preallocation in this group and marks them
- * used in in-core bitmap. buddy must be generated from this bitmap
-diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
-index 0361c20910def..667381180b261 100644
---- a/fs/ext4/resize.c
-+++ b/fs/ext4/resize.c
-@@ -560,13 +560,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
- if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
- goto handle_itb;
-
-- if (meta_bg == 1) {
-- ext4_group_t first_group;
-- first_group = ext4_meta_bg_first_group(sb, group);
-- if (first_group != group + 1 &&
-- first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
-- goto handle_itb;
-- }
-+ if (meta_bg == 1)
-+ goto handle_itb;
-
- block = start + ext4_bg_has_super(sb, group);
- /* Copy all of the GDT blocks into the backup in this group */
-@@ -1191,8 +1186,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
- ext4_group_first_block_no(sb, group));
- BUFFER_TRACE(bh, "get_write_access");
- if ((err = ext4_journal_get_write_access(handle, sb, bh,
-- EXT4_JTR_NONE)))
-+ EXT4_JTR_NONE))) {
-+ brelse(bh);
- break;
-+ }
- lock_buffer(bh);
- memcpy(bh->b_data, data, size);
- if (rest)
-@@ -1601,6 +1598,8 @@ exit_journal:
- int gdb_num_end = ((group + flex_gd->count - 1) /
- EXT4_DESC_PER_BLOCK(sb));
- int meta_bg = ext4_has_feature_meta_bg(sb);
-+ sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
-+ ext4_group_first_block_no(sb, 0);
- sector_t old_gdb = 0;
-
- update_backups(sb, ext4_group_first_block_no(sb, 0),
-@@ -1612,8 +1611,8 @@ exit_journal:
- gdb_num);
- if (old_gdb == gdb_bh->b_blocknr)
- continue;
-- update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
-- gdb_bh->b_size, meta_bg);
-+ update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
-+ gdb_bh->b_data, gdb_bh->b_size, meta_bg);
- old_gdb = gdb_bh->b_blocknr;
- }
- }
-@@ -1980,9 +1979,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
-
- errout:
- ret = ext4_journal_stop(handle);
-- if (!err)
-- err = ret;
-- return ret;
-+ return err ? err : ret;
-
- invalid_resize_inode:
- ext4_error(sb, "corrupted/inconsistent resize inode");
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index dbebd8b3127e5..d062383ea50ef 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -768,7 +768,8 @@ static void update_super_work(struct work_struct *work)
- */
- if (!sb_rdonly(sbi->s_sb) && journal) {
- struct buffer_head *sbh = sbi->s_sbh;
-- bool call_notify_err;
-+ bool call_notify_err = false;
-+
- handle = jbd2_journal_start(journal, 1);
- if (IS_ERR(handle))
- goto write_directly;
-@@ -6442,6 +6443,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
- struct ext4_mount_options old_opts;
- ext4_group_t g;
- int err = 0;
-+ int alloc_ctx;
- #ifdef CONFIG_QUOTA
- int enable_quota = 0;
- int i, j;
-@@ -6482,7 +6484,16 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
-
- }
-
-+ /*
-+ * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
-+ * two calls to ext4_should_dioread_nolock() to return inconsistent
-+ * values, triggering WARN_ON in ext4_add_complete_io(). we grab
-+ * here s_writepages_rwsem to avoid race between writepages ops and
-+ * remount.
-+ */
-+ alloc_ctx = ext4_writepages_down_write(sb);
- ext4_apply_options(fc, sb);
-+ ext4_writepages_up_write(sb, alloc_ctx);
-
- if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
- test_opt(sb, JOURNAL_CHECKSUM)) {
-@@ -6700,6 +6711,8 @@ restore_opts:
- if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) &&
- sb_any_quota_suspended(sb))
- dquot_resume(sb, -1);
-+
-+ alloc_ctx = ext4_writepages_down_write(sb);
- sb->s_flags = old_sb_flags;
- sbi->s_mount_opt = old_opts.s_mount_opt;
- sbi->s_mount_opt2 = old_opts.s_mount_opt2;
-@@ -6708,6 +6721,8 @@ restore_opts:
- sbi->s_commit_interval = old_opts.s_commit_interval;
- sbi->s_min_batch_time = old_opts.s_min_batch_time;
- sbi->s_max_batch_time = old_opts.s_max_batch_time;
-+ ext4_writepages_up_write(sb, alloc_ctx);
-+
- if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
- ext4_release_system_zone(sb);
- #ifdef CONFIG_QUOTA
-diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
-index d820801f473e5..7514661bbfbb1 100644
---- a/fs/f2fs/compress.c
-+++ b/fs/f2fs/compress.c
-@@ -1976,7 +1976,7 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
- int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
- {
- dev_t dev = sbi->sb->s_bdev->bd_dev;
-- char slab_name[32];
-+ char slab_name[35];
-
- if (!f2fs_sb_has_compression(sbi))
- return 0;
-diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
-index 916e317ac925f..1ac34eb49a0e8 100644
---- a/fs/f2fs/data.c
-+++ b/fs/f2fs/data.c
-@@ -2344,8 +2344,10 @@ skip_reading_dnode:
- f2fs_wait_on_block_writeback(inode, blkaddr);
-
- if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
-- if (atomic_dec_and_test(&dic->remaining_pages))
-+ if (atomic_dec_and_test(&dic->remaining_pages)) {
- f2fs_decompress_cluster(dic, true);
-+ break;
-+ }
- continue;
- }
-
-@@ -3023,7 +3025,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
- {
- int ret = 0;
- int done = 0, retry = 0;
-- struct page *pages[F2FS_ONSTACK_PAGES];
-+ struct page *pages_local[F2FS_ONSTACK_PAGES];
-+ struct page **pages = pages_local;
- struct folio_batch fbatch;
- struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
- struct bio *bio = NULL;
-@@ -3047,6 +3050,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
- #endif
- int nr_folios, p, idx;
- int nr_pages;
-+ unsigned int max_pages = F2FS_ONSTACK_PAGES;
- pgoff_t index;
- pgoff_t end; /* Inclusive */
- pgoff_t done_index;
-@@ -3056,6 +3060,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
- int submitted = 0;
- int i;
-
-+#ifdef CONFIG_F2FS_FS_COMPRESSION
-+ if (f2fs_compressed_file(inode) &&
-+ 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
-+ pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
-+ cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
-+ max_pages = 1 << cc.log_cluster_size;
-+ }
-+#endif
-+
- folio_batch_init(&fbatch);
-
- if (get_dirty_pages(mapping->host) <=
-@@ -3101,7 +3114,7 @@ again:
- add_more:
- pages[nr_pages] = folio_page(folio, idx);
- folio_get(folio);
-- if (++nr_pages == F2FS_ONSTACK_PAGES) {
-+ if (++nr_pages == max_pages) {
- index = folio->index + idx + 1;
- folio_batch_release(&fbatch);
- goto write;
-@@ -3283,6 +3296,11 @@ next:
- if (bio)
- f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
-
-+#ifdef CONFIG_F2FS_FS_COMPRESSION
-+ if (pages != pages_local)
-+ kfree(pages);
-+#endif
-+
- return ret;
- }
-
-diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
-index 0e2d49140c07f..ad8dfac73bd44 100644
---- a/fs/f2fs/extent_cache.c
-+++ b/fs/f2fs/extent_cache.c
-@@ -74,40 +74,14 @@ static void __set_extent_info(struct extent_info *ei,
- }
- }
-
--static bool __may_read_extent_tree(struct inode *inode)
--{
-- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
--
-- if (!test_opt(sbi, READ_EXTENT_CACHE))
-- return false;
-- if (is_inode_flag_set(inode, FI_NO_EXTENT))
-- return false;
-- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
-- !f2fs_sb_has_readonly(sbi))
-- return false;
-- return S_ISREG(inode->i_mode);
--}
--
--static bool __may_age_extent_tree(struct inode *inode)
--{
-- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
--
-- if (!test_opt(sbi, AGE_EXTENT_CACHE))
-- return false;
-- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
-- return false;
-- if (file_is_cold(inode))
-- return false;
--
-- return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
--}
--
- static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
- {
- if (type == EX_READ)
-- return __may_read_extent_tree(inode);
-- else if (type == EX_BLOCK_AGE)
-- return __may_age_extent_tree(inode);
-+ return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
-+ S_ISREG(inode->i_mode);
-+ if (type == EX_BLOCK_AGE)
-+ return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
-+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
- return false;
- }
-
-@@ -120,7 +94,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
- if (list_empty(&F2FS_I_SB(inode)->s_list))
- return false;
-
-- return __init_may_extent_tree(inode, type);
-+ if (!__init_may_extent_tree(inode, type))
-+ return false;
-+
-+ if (type == EX_READ) {
-+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
-+ return false;
-+ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
-+ !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
-+ return false;
-+ } else if (type == EX_BLOCK_AGE) {
-+ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
-+ return false;
-+ if (file_is_cold(inode))
-+ return false;
-+ }
-+ return true;
- }
-
- static void __try_update_largest_extent(struct extent_tree *et,
-diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
-index ca5904129b162..a06f03d23762f 100644
---- a/fs/f2fs/file.c
-+++ b/fs/f2fs/file.c
-@@ -3258,6 +3258,7 @@ int f2fs_precache_extents(struct inode *inode)
- return -EOPNOTSUPP;
-
- map.m_lblk = 0;
-+ map.m_pblk = 0;
- map.m_next_pgofs = NULL;
- map.m_next_extent = &m_next_extent;
- map.m_seg_type = NO_CHECK_TYPE;
-@@ -4005,6 +4006,15 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
- F2FS_I(inode)->i_compress_algorithm = option.algorithm;
- F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
- F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
-+ /* Set default level */
-+ if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
-+ F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
-+ else
-+ F2FS_I(inode)->i_compress_level = 0;
-+ /* Adjust mount option level */
-+ if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
-+ F2FS_OPTION(sbi).compress_level)
-+ F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
- f2fs_mark_inode_dirty_sync(inode, true);
-
- if (!f2fs_is_compress_backend_ready(inode))
-diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
-index ee2e1dd64f256..8b30f11f37b46 100644
---- a/fs/f2fs/node.c
-+++ b/fs/f2fs/node.c
-@@ -1467,7 +1467,8 @@ page_hit:
- ofs_of_node(page), cpver_of_node(page),
- next_blkaddr_of_node(page));
- set_sbi_flag(sbi, SBI_NEED_FSCK);
-- err = -EINVAL;
-+ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
-+ err = -EFSCORRUPTED;
- out_err:
- ClearPageUptodate(page);
- out_put_err:
-@@ -2389,7 +2390,7 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
- blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
-
- if (blk_addr == NEW_ADDR)
-- return -EINVAL;
-+ return -EFSCORRUPTED;
-
- if (blk_addr == NULL_ADDR) {
- add_free_nid(sbi, start_nid, true, true);
-@@ -2504,7 +2505,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
-
- if (ret) {
- f2fs_up_read(&nm_i->nat_tree_lock);
-- f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
-+
-+ if (ret == -EFSCORRUPTED) {
-+ f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
-+ set_sbi_flag(sbi, SBI_NEED_FSCK);
-+ f2fs_handle_error(sbi,
-+ ERROR_INCONSISTENT_NAT);
-+ }
-+
- return ret;
- }
- }
-@@ -2743,7 +2751,9 @@ recover_xnid:
- f2fs_update_inode_page(inode);
-
- /* 3: update and set xattr node page dirty */
-- memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
-+ if (page)
-+ memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
-+ VALID_XATTR_BLOCK_SIZE);
-
- set_page_dirty(xpage);
- f2fs_put_page(xpage, 1);
-diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
-index a8c8232852bb1..bc303a0522155 100644
---- a/fs/f2fs/super.c
-+++ b/fs/f2fs/super.c
-@@ -547,6 +547,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
- }
-
- #ifdef CONFIG_F2FS_FS_COMPRESSION
-+static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
-+ const char *new_ext, bool is_ext)
-+{
-+ unsigned char (*ext)[F2FS_EXTENSION_LEN];
-+ int ext_cnt;
-+ int i;
-+
-+ if (is_ext) {
-+ ext = F2FS_OPTION(sbi).extensions;
-+ ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
-+ } else {
-+ ext = F2FS_OPTION(sbi).noextensions;
-+ ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
-+ }
-+
-+ for (i = 0; i < ext_cnt; i++) {
-+ if (!strcasecmp(new_ext, ext[i]))
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
- /*
- * 1. The same extension name cannot not appear in both compress and non-compress extension
- * at the same time.
-@@ -1149,6 +1172,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
- return -EINVAL;
- }
-
-+ if (is_compress_extension_exist(sbi, name, true)) {
-+ kfree(name);
-+ break;
-+ }
-+
- strcpy(ext[ext_cnt], name);
- F2FS_OPTION(sbi).compress_ext_cnt++;
- kfree(name);
-@@ -1173,6 +1201,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
- return -EINVAL;
- }
-
-+ if (is_compress_extension_exist(sbi, name, false)) {
-+ kfree(name);
-+ break;
-+ }
-+
- strcpy(noext[noext_cnt], name);
- F2FS_OPTION(sbi).nocompress_ext_cnt++;
- kfree(name);
-@@ -1629,7 +1662,7 @@ static void f2fs_put_super(struct super_block *sb)
-
- f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
-
-- if (err) {
-+ if (err || f2fs_cp_error(sbi)) {
- truncate_inode_pages_final(NODE_MAPPING(sbi));
- truncate_inode_pages_final(META_MAPPING(sbi));
- }
-diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
-index a657284faee30..465d145360de3 100644
---- a/fs/f2fs/xattr.c
-+++ b/fs/f2fs/xattr.c
-@@ -364,10 +364,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
-
- *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
- if (!*xe) {
-- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
-+ f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
- inode->i_ino);
- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
-- err = -EFSCORRUPTED;
-+ err = -ENODATA;
- f2fs_handle_error(F2FS_I_SB(inode),
- ERROR_CORRUPTED_XATTR);
- goto out;
-@@ -584,13 +584,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
-
- if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
- (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
-- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
-+ f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
- inode->i_ino);
- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
-- error = -EFSCORRUPTED;
- f2fs_handle_error(F2FS_I_SB(inode),
- ERROR_CORRUPTED_XATTR);
-- goto cleanup;
-+ break;
- }
-
- if (!prefix)
-@@ -650,7 +649,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
-
- if (size > MAX_VALUE_LEN(inode))
- return -E2BIG;
--
-+retry:
- error = read_all_xattrs(inode, ipage, &base_addr);
- if (error)
- return error;
-@@ -660,7 +659,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
- /* find entry with wanted name. */
- here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
- if (!here) {
-- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
-+ if (!F2FS_I(inode)->i_xattr_nid) {
-+ f2fs_notice(F2FS_I_SB(inode),
-+ "recover xattr in inode (%lu)", inode->i_ino);
-+ f2fs_recover_xattr_data(inode, NULL);
-+ kfree(base_addr);
-+ goto retry;
-+ }
-+ f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
- inode->i_ino);
- set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
- error = -EFSCORRUPTED;
-diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
-index c1af01b2c42d7..1767493dffda7 100644
---- a/fs/fs-writeback.c
-+++ b/fs/fs-writeback.c
-@@ -613,6 +613,24 @@ out_free:
- kfree(isw);
- }
-
-+static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
-+ struct list_head *list, int *nr)
-+{
-+ struct inode *inode;
-+
-+ list_for_each_entry(inode, list, i_io_list) {
-+ if (!inode_prepare_wbs_switch(inode, isw->new_wb))
-+ continue;
-+
-+ isw->inodes[*nr] = inode;
-+ (*nr)++;
-+
-+ if (*nr >= WB_MAX_INODES_PER_ISW - 1)
-+ return true;
-+ }
-+ return false;
-+}
-+
- /**
- * cleanup_offline_cgwb - detach associated inodes
- * @wb: target wb
-@@ -625,7 +643,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
- {
- struct cgroup_subsys_state *memcg_css;
- struct inode_switch_wbs_context *isw;
-- struct inode *inode;
- int nr;
- bool restart = false;
-
-@@ -647,17 +664,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
-
- nr = 0;
- spin_lock(&wb->list_lock);
-- list_for_each_entry(inode, &wb->b_attached, i_io_list) {
-- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
-- continue;
--
-- isw->inodes[nr++] = inode;
--
-- if (nr >= WB_MAX_INODES_PER_ISW - 1) {
-- restart = true;
-- break;
-- }
-- }
-+ /*
-+ * In addition to the inodes that have completed writeback, also switch
-+ * cgwbs for those inodes only with dirty timestamps. Otherwise, those
-+ * inodes won't be written back for a long time when lazytime is
-+ * enabled, and thus pinning the dying cgwbs. It won't break the
-+ * bandwidth restrictions, as writeback of inode metadata is not
-+ * accounted for.
-+ */
-+ restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
-+ if (!restart)
-+ restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
- spin_unlock(&wb->list_lock);
-
- /* no attached inodes? bail out */
-diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
-index 0eac045079047..4e63fbb63151c 100644
---- a/fs/gfs2/inode.c
-+++ b/fs/gfs2/inode.c
-@@ -1866,16 +1866,24 @@ out:
- int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
- int mask)
- {
-+ int may_not_block = mask & MAY_NOT_BLOCK;
- struct gfs2_inode *ip;
- struct gfs2_holder i_gh;
-+ struct gfs2_glock *gl;
- int error;
-
- gfs2_holder_mark_uninitialized(&i_gh);
- ip = GFS2_I(inode);
-- if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
-- if (mask & MAY_NOT_BLOCK)
-+ gl = rcu_dereference_check(ip->i_gl, !may_not_block);
-+ if (unlikely(!gl)) {
-+ /* inode is getting torn down, must be RCU mode */
-+ WARN_ON_ONCE(!may_not_block);
-+ return -ECHILD;
-+ }
-+ if (gfs2_glock_is_locked_by_me(gl) == NULL) {
-+ if (may_not_block)
- return -ECHILD;
-- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
-+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
- if (error)
- return error;
- }
-diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
-index 33ca04733e933..dd64140ae6d7b 100644
---- a/fs/gfs2/ops_fstype.c
-+++ b/fs/gfs2/ops_fstype.c
-@@ -1281,10 +1281,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
-
- if (!sb_rdonly(sb)) {
- error = init_threads(sdp);
-- if (error) {
-- gfs2_withdraw_delayed(sdp);
-+ if (error)
- goto fail_per_node;
-- }
- }
-
- error = gfs2_freeze_lock_shared(sdp);
-diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
-index 171b2713d2e5e..41d0232532a03 100644
---- a/fs/gfs2/quota.c
-+++ b/fs/gfs2/quota.c
-@@ -457,6 +457,17 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
- (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
- return 0;
-
-+ /*
-+ * If qd_change is 0 it means a pending quota change was negated.
-+ * We should not sync it, but we still have a qd reference and slot
-+ * reference taken by gfs2_quota_change -> do_qc that need to be put.
-+ */
-+ if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
-+ slot_put(qd);
-+ qd_put(qd);
-+ return 0;
-+ }
-+
- if (!lockref_get_not_dead(&qd->qd_lockref))
- return 0;
-
-diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
-index 02d93da21b2b0..5f4ebe279aaae 100644
---- a/fs/gfs2/super.c
-+++ b/fs/gfs2/super.c
-@@ -602,13 +602,15 @@ restart:
- }
- spin_unlock(&sdp->sd_jindex_spin);
-
-- if (!sb_rdonly(sb)) {
-+ if (!sb_rdonly(sb))
- gfs2_make_fs_ro(sdp);
-- }
-- if (gfs2_withdrawn(sdp)) {
-- gfs2_destroy_threads(sdp);
-+ else {
-+ if (gfs2_withdrawn(sdp))
-+ gfs2_destroy_threads(sdp);
-+
- gfs2_quota_cleanup(sdp);
- }
-+
- WARN_ON(gfs2_withdrawing(sdp));
-
- /* At this point, we're through modifying the disk */
-@@ -1550,7 +1552,7 @@ out:
- wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
- gfs2_glock_add_to_lru(ip->i_gl);
- gfs2_glock_put_eventually(ip->i_gl);
-- ip->i_gl = NULL;
-+ rcu_assign_pointer(ip->i_gl, NULL);
- }
- }
-
-diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
-index 316c4cebd3f3d..60fce26ff9378 100644
---- a/fs/hugetlbfs/inode.c
-+++ b/fs/hugetlbfs/inode.c
-@@ -295,7 +295,7 @@ static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t byt
- size_t res = 0;
-
- /* First subpage to start the loop. */
-- page += offset / PAGE_SIZE;
-+ page = nth_page(page, offset / PAGE_SIZE);
- offset %= PAGE_SIZE;
- while (1) {
- if (is_raw_hwpoison_page_in_hugepage(page))
-@@ -309,7 +309,7 @@ static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t byt
- break;
- offset += n;
- if (offset == PAGE_SIZE) {
-- page++;
-+ page = nth_page(page, 1);
- offset = 0;
- }
- }
-diff --git a/fs/inode.c b/fs/inode.c
-index 84bc3c76e5ccb..ae1a6410b53d7 100644
---- a/fs/inode.c
-+++ b/fs/inode.c
-@@ -215,6 +215,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
- lockdep_set_class_and_name(&mapping->invalidate_lock,
- &sb->s_type->invalidate_lock_key,
- "mapping.invalidate_lock");
-+ if (sb->s_iflags & SB_I_STABLE_WRITES)
-+ mapping_set_stable_writes(mapping);
- inode->i_private = NULL;
- inode->i_mapping = mapping;
- INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
-diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
-index c269a7d29a465..5b771a3d8d9ae 100644
---- a/fs/jbd2/recovery.c
-+++ b/fs/jbd2/recovery.c
-@@ -289,6 +289,8 @@ int jbd2_journal_recover(journal_t *journal)
- journal_superblock_t * sb;
-
- struct recovery_info info;
-+ errseq_t wb_err;
-+ struct address_space *mapping;
-
- memset(&info, 0, sizeof(info));
- sb = journal->j_superblock;
-@@ -306,6 +308,9 @@ int jbd2_journal_recover(journal_t *journal)
- return 0;
- }
-
-+ wb_err = 0;
-+ mapping = journal->j_fs_dev->bd_inode->i_mapping;
-+ errseq_check_and_advance(&mapping->wb_err, &wb_err);
- err = do_one_pass(journal, &info, PASS_SCAN);
- if (!err)
- err = do_one_pass(journal, &info, PASS_REVOKE);
-@@ -327,6 +332,9 @@ int jbd2_journal_recover(journal_t *journal)
-
- jbd2_journal_clear_revoke(journal);
- err2 = sync_blockdev(journal->j_fs_dev);
-+ if (!err)
-+ err = err2;
-+ err2 = errseq_check_and_advance(&mapping->wb_err, &wb_err);
- if (!err)
- err = err2;
- /* Make sure all replayed data is on permanent storage */
-diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
-index 88afd108c2dd2..11c77757ead9e 100644
---- a/fs/jfs/jfs_dmap.c
-+++ b/fs/jfs/jfs_dmap.c
-@@ -87,7 +87,7 @@ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
- static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
- static int dbFindBits(u32 word, int l2nb);
- static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
--static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
-+static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
- static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
- int nblocks);
- static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
-@@ -180,7 +180,8 @@ int dbMount(struct inode *ipbmap)
- bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
-
- bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
-- if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) {
-+ if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
-+ bmp->db_l2nbperpage < 0) {
- err = -EINVAL;
- goto err_release_metapage;
- }
-@@ -194,6 +195,12 @@ int dbMount(struct inode *ipbmap)
- bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
- bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
- bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
-+ if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
-+ bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
-+ err = -EINVAL;
-+ goto err_release_metapage;
-+ }
-+
- bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
- bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
- bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
-@@ -1710,7 +1717,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
- * dbFindLeaf() returns the index of the leaf at which
- * free space was found.
- */
-- rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
-+ rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
-
- /* release the buffer.
- */
-@@ -1957,7 +1964,7 @@ dbAllocDmapLev(struct bmap * bmp,
- * free space. if sufficient free space is found, dbFindLeaf()
- * returns the index of the leaf at which free space was found.
- */
-- if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
-+ if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
- return -ENOSPC;
-
- if (leafidx < 0)
-@@ -2921,14 +2928,18 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
- * leafidx - return pointer to be set to the index of the leaf
- * describing at least l2nb free blocks if sufficient
- * free blocks are found.
-+ * is_ctl - determines if the tree is of type ctl
- *
- * RETURN VALUES:
- * 0 - success
- * -ENOSPC - insufficient free blocks.
- */
--static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
-+static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
- {
- int ti, n = 0, k, x = 0;
-+ int max_size;
-+
-+ max_size = is_ctl ? CTLTREESIZE : TREESIZE;
-
- /* first check the root of the tree to see if there is
- * sufficient free space.
-@@ -2949,6 +2960,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
- /* sufficient free space found. move to the next
- * level (or quit if this is the last level).
- */
-+ if (x + n > max_size)
-+ return -ENOSPC;
- if (l2nb <= tp->dmt_stree[x + n])
- break;
- }
-diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
-index 923a58422c461..1b267eec3f367 100644
---- a/fs/jfs/jfs_imap.c
-+++ b/fs/jfs/jfs_imap.c
-@@ -1320,7 +1320,7 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
- int diAlloc(struct inode *pip, bool dir, struct inode *ip)
- {
- int rc, ino, iagno, addext, extno, bitno, sword;
-- int nwords, rem, i, agno;
-+ int nwords, rem, i, agno, dn_numag;
- u32 mask, inosmap, extsmap;
- struct inode *ipimap;
- struct metapage *mp;
-@@ -1356,6 +1356,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
-
- /* get the ag number of this iag */
- agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
-+ dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
-+ if (agno < 0 || agno > dn_numag)
-+ return -EIO;
-
- if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
- /*
-diff --git a/fs/libfs.c b/fs/libfs.c
-index 37f2d34ee090b..189447cf4acf5 100644
---- a/fs/libfs.c
-+++ b/fs/libfs.c
-@@ -396,6 +396,8 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
- return -EINVAL;
- }
-
-+ /* In this case, ->private_data is protected by f_pos_lock */
-+ file->private_data = NULL;
- return vfs_setpos(file, offset, U32_MAX);
- }
-
-@@ -425,7 +427,7 @@ static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
- inode->i_ino, fs_umode_to_dtype(inode->i_mode));
- }
-
--static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
-+static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
- {
- struct offset_ctx *so_ctx = inode->i_op->get_offset_ctx(inode);
- XA_STATE(xas, &so_ctx->xa, ctx->pos);
-@@ -434,7 +436,7 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
- while (true) {
- dentry = offset_find_next(&xas);
- if (!dentry)
-- break;
-+ return ERR_PTR(-ENOENT);
-
- if (!offset_dir_emit(ctx, dentry)) {
- dput(dentry);
-@@ -444,6 +446,7 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
- dput(dentry);
- ctx->pos = xas.xa_index + 1;
- }
-+ return NULL;
- }
-
- /**
-@@ -476,7 +479,12 @@ static int offset_readdir(struct file *file, struct dir_context *ctx)
- if (!dir_emit_dots(file, ctx))
- return 0;
-
-- offset_iterate_dir(d_inode(dir), ctx);
-+ /* In this case, ->private_data is protected by f_pos_lock */
-+ if (ctx->pos == 2)
-+ file->private_data = NULL;
-+ else if (file->private_data == ERR_PTR(-ENOENT))
-+ return 0;
-+ file->private_data = offset_iterate_dir(d_inode(dir), ctx);
- return 0;
- }
-
-diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
-index 5ee283eb9660b..0ff913b4e9e0b 100644
---- a/fs/nfs/nfs4proc.c
-+++ b/fs/nfs/nfs4proc.c
-@@ -5622,7 +5622,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
-
- msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
- nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
-- nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
-+ nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
- }
-
- static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
-@@ -5663,7 +5663,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
- data->res.server = server;
- msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
- nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
-- nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
-+ nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
-+ NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
- }
-
- static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
-@@ -8934,6 +8935,7 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
-
- sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
-
-+try_again:
- /* Test connection for session trunking. Async exchange_id call */
- task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
- if (IS_ERR(task))
-@@ -8946,11 +8948,15 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
-
- if (status == 0)
- rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
-- else if (rpc_clnt_xprt_switch_has_addr(clnt,
-+ else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
- (struct sockaddr *)&xprt->addr))
- rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
-
- rpc_put_task(task);
-+ if (status == -NFS4ERR_DELAY) {
-+ ssleep(1);
-+ goto try_again;
-+ }
- }
- EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
-
-diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
-index 929248c6ca84c..4cbe0434cbb8c 100644
---- a/fs/nfsd/cache.h
-+++ b/fs/nfsd/cache.h
-@@ -84,8 +84,8 @@ int nfsd_net_reply_cache_init(struct nfsd_net *nn);
- void nfsd_net_reply_cache_destroy(struct nfsd_net *nn);
- int nfsd_reply_cache_init(struct nfsd_net *);
- void nfsd_reply_cache_shutdown(struct nfsd_net *);
--int nfsd_cache_lookup(struct svc_rqst *rqstp,
-- struct nfsd_cacherep **cacherep);
-+int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
-+ unsigned int len, struct nfsd_cacherep **cacherep);
- void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
- int cachetype, __be32 *statp);
- int nfsd_reply_cache_stats_show(struct seq_file *m, void *v);
-diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
-index ee9c923192e08..07bf219f9ae48 100644
---- a/fs/nfsd/filecache.c
-+++ b/fs/nfsd/filecache.c
-@@ -989,22 +989,21 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
- unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
- struct net *net = SVC_NET(rqstp);
- struct nfsd_file *new, *nf;
-- const struct cred *cred;
-+ bool stale_retry = true;
- bool open_retry = true;
- struct inode *inode;
- __be32 status;
- int ret;
-
-+retry:
- status = fh_verify(rqstp, fhp, S_IFREG,
- may_flags|NFSD_MAY_OWNER_OVERRIDE);
- if (status != nfs_ok)
- return status;
- inode = d_inode(fhp->fh_dentry);
-- cred = get_current_cred();
-
--retry:
- rcu_read_lock();
-- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
-+ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
- rcu_read_unlock();
-
- if (nf) {
-@@ -1026,7 +1025,7 @@ retry:
-
- rcu_read_lock();
- spin_lock(&inode->i_lock);
-- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
-+ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
- if (unlikely(nf)) {
- spin_unlock(&inode->i_lock);
- rcu_read_unlock();
-@@ -1058,6 +1057,7 @@ wait_for_construction:
- goto construction_err;
- }
- open_retry = false;
-+ fh_put(fhp);
- goto retry;
- }
- this_cpu_inc(nfsd_file_cache_hits);
-@@ -1074,7 +1074,6 @@ out:
- nfsd_file_check_write_error(nf);
- *pnf = nf;
- }
-- put_cred(cred);
- trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
- return status;
-
-@@ -1088,8 +1087,20 @@ open_file:
- status = nfs_ok;
- trace_nfsd_file_opened(nf, status);
- } else {
-- status = nfsd_open_verified(rqstp, fhp, may_flags,
-- &nf->nf_file);
-+ ret = nfsd_open_verified(rqstp, fhp, may_flags,
-+ &nf->nf_file);
-+ if (ret == -EOPENSTALE && stale_retry) {
-+ stale_retry = false;
-+ nfsd_file_unhash(nf);
-+ clear_and_wake_up_bit(NFSD_FILE_PENDING,
-+ &nf->nf_flags);
-+ if (refcount_dec_and_test(&nf->nf_ref))
-+ nfsd_file_free(nf);
-+ nf = NULL;
-+ fh_put(fhp);
-+ goto retry;
-+ }
-+ status = nfserrno(ret);
- trace_nfsd_file_open(nf, status);
- }
- } else
-diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index 8534693eb6a49..529b3ed3b3177 100644
---- a/fs/nfsd/nfs4state.c
-+++ b/fs/nfsd/nfs4state.c
-@@ -2797,7 +2797,7 @@ static int client_opens_release(struct inode *inode, struct file *file)
-
- /* XXX: alternatively, we could get/drop in seq start/stop */
- drop_client(clp);
-- return 0;
-+ return seq_release(inode, file);
- }
-
- static const struct file_operations client_states_fops = {
-diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
-index 80621a7095107..6cd36af2f97e1 100644
---- a/fs/nfsd/nfscache.c
-+++ b/fs/nfsd/nfscache.c
-@@ -368,33 +368,52 @@ nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
- return freed;
- }
-
--/*
-- * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
-+/**
-+ * nfsd_cache_csum - Checksum incoming NFS Call arguments
-+ * @buf: buffer containing a whole RPC Call message
-+ * @start: starting byte of the NFS Call header
-+ * @remaining: size of the NFS Call header, in bytes
-+ *
-+ * Compute a weak checksum of the leading bytes of an NFS procedure
-+ * call header to help verify that a retransmitted Call matches an
-+ * entry in the duplicate reply cache.
-+ *
-+ * To avoid assumptions about how the RPC message is laid out in
-+ * @buf and what else it might contain (eg, a GSS MIC suffix), the
-+ * caller passes us the exact location and length of the NFS Call
-+ * header.
-+ *
-+ * Returns a 32-bit checksum value, as defined in RFC 793.
- */
--static __wsum
--nfsd_cache_csum(struct svc_rqst *rqstp)
-+static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
-+ unsigned int remaining)
- {
-+ unsigned int base, len;
-+ struct xdr_buf subbuf;
-+ __wsum csum = 0;
-+ void *p;
- int idx;
-- unsigned int base;
-- __wsum csum;
-- struct xdr_buf *buf = &rqstp->rq_arg;
-- const unsigned char *p = buf->head[0].iov_base;
-- size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
-- RC_CSUMLEN);
-- size_t len = min(buf->head[0].iov_len, csum_len);
-+
-+ if (remaining > RC_CSUMLEN)
-+ remaining = RC_CSUMLEN;
-+ if (xdr_buf_subsegment(buf, &subbuf, start, remaining))
-+ return csum;
-
- /* rq_arg.head first */
-- csum = csum_partial(p, len, 0);
-- csum_len -= len;
-+ if (subbuf.head[0].iov_len) {
-+ len = min_t(unsigned int, subbuf.head[0].iov_len, remaining);
-+ csum = csum_partial(subbuf.head[0].iov_base, len, csum);
-+ remaining -= len;
-+ }
-
- /* Continue into page array */
-- idx = buf->page_base / PAGE_SIZE;
-- base = buf->page_base & ~PAGE_MASK;
-- while (csum_len) {
-- p = page_address(buf->pages[idx]) + base;
-- len = min_t(size_t, PAGE_SIZE - base, csum_len);
-+ idx = subbuf.page_base / PAGE_SIZE;
-+ base = subbuf.page_base & ~PAGE_MASK;
-+ while (remaining) {
-+ p = page_address(subbuf.pages[idx]) + base;
-+ len = min_t(unsigned int, PAGE_SIZE - base, remaining);
- csum = csum_partial(p, len, csum);
-- csum_len -= len;
-+ remaining -= len;
- base = 0;
- ++idx;
- }
-@@ -465,6 +484,8 @@ out:
- /**
- * nfsd_cache_lookup - Find an entry in the duplicate reply cache
- * @rqstp: Incoming Call to find
-+ * @start: starting byte in @rqstp->rq_arg of the NFS Call header
-+ * @len: size of the NFS Call header, in bytes
- * @cacherep: OUT: DRC entry for this request
- *
- * Try to find an entry matching the current call in the cache. When none
-@@ -478,7 +499,8 @@ out:
- * %RC_REPLY: Reply from cache
- * %RC_DROPIT: Do not process the request further
- */
--int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
-+int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
-+ unsigned int len, struct nfsd_cacherep **cacherep)
- {
- struct nfsd_net *nn;
- struct nfsd_cacherep *rp, *found;
-@@ -494,7 +516,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
- goto out;
- }
-
-- csum = nfsd_cache_csum(rqstp);
-+ csum = nfsd_cache_csum(&rqstp->rq_arg, start, len);
-
- /*
- * Since the common case is a cache miss followed by an insert,
-@@ -640,24 +662,17 @@ void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
- return;
- }
-
--/*
-- * Copy cached reply to current reply buffer. Should always fit.
-- * FIXME as reply is in a page, we should just attach the page, and
-- * keep a refcount....
-- */
- static int
- nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
- {
-- struct kvec *vec = &rqstp->rq_res.head[0];
--
-- if (vec->iov_len + data->iov_len > PAGE_SIZE) {
-- printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
-- data->iov_len);
-- return 0;
-- }
-- memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
-- vec->iov_len += data->iov_len;
-- return 1;
-+ __be32 *p;
-+
-+ p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len);
-+ if (unlikely(!p))
-+ return false;
-+ memcpy(p, data->iov_base, data->iov_len);
-+ xdr_commit_encode(&rqstp->rq_res_stream);
-+ return true;
- }
-
- /*
-diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
-index c7af1095f6b54..a87e9ef613868 100644
---- a/fs/nfsd/nfssvc.c
-+++ b/fs/nfsd/nfssvc.c
-@@ -988,6 +988,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
- const struct svc_procedure *proc = rqstp->rq_procinfo;
- __be32 *statp = rqstp->rq_accept_statp;
- struct nfsd_cacherep *rp;
-+ unsigned int start, len;
-+ __be32 *nfs_reply;
-
- /*
- * Give the xdr decoder a chance to change this if it wants
-@@ -995,11 +997,18 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
- */
- rqstp->rq_cachetype = proc->pc_cachetype;
-
-+ /*
-+ * ->pc_decode advances the argument stream past the NFS
-+ * Call header, so grab the header's starting location and
-+ * size now for the call to nfsd_cache_lookup().
-+ */
-+ start = xdr_stream_pos(&rqstp->rq_arg_stream);
-+ len = xdr_stream_remaining(&rqstp->rq_arg_stream);
- if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
- goto out_decode_err;
-
- rp = NULL;
-- switch (nfsd_cache_lookup(rqstp, &rp)) {
-+ switch (nfsd_cache_lookup(rqstp, start, len, &rp)) {
- case RC_DOIT:
- break;
- case RC_REPLY:
-@@ -1008,6 +1017,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
- goto out_dropit;
- }
-
-+ nfs_reply = xdr_inline_decode(&rqstp->rq_res_stream, 0);
- *statp = proc->pc_func(rqstp);
- if (test_bit(RQ_DROPME, &rqstp->rq_flags))
- goto out_update_drop;
-@@ -1015,7 +1025,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
- if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
- goto out_encode_err;
-
-- nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
-+ nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, nfs_reply);
- out_cached_reply:
- return 1;
-
-diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index 02f5fcaad03f3..b24462efa1781 100644
---- a/fs/nfsd/vfs.c
-+++ b/fs/nfsd/vfs.c
-@@ -823,7 +823,7 @@ int nfsd_open_break_lease(struct inode *inode, int access)
- * and additional flags.
- * N.B. After this call fhp needs an fh_put
- */
--static __be32
-+static int
- __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
- int may_flags, struct file **filp)
- {
-@@ -831,14 +831,12 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
- struct inode *inode;
- struct file *file;
- int flags = O_RDONLY|O_LARGEFILE;
-- __be32 err;
-- int host_err = 0;
-+ int host_err = -EPERM;
-
- path.mnt = fhp->fh_export->ex_path.mnt;
- path.dentry = fhp->fh_dentry;
- inode = d_inode(path.dentry);
-
-- err = nfserr_perm;
- if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
- goto out;
-
-@@ -847,7 +845,7 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
-
- host_err = nfsd_open_break_lease(inode, may_flags);
- if (host_err) /* NOMEM or WOULDBLOCK */
-- goto out_nfserr;
-+ goto out;
-
- if (may_flags & NFSD_MAY_WRITE) {
- if (may_flags & NFSD_MAY_READ)
-@@ -859,13 +857,13 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
- file = dentry_open(&path, flags, current_cred());
- if (IS_ERR(file)) {
- host_err = PTR_ERR(file);
-- goto out_nfserr;
-+ goto out;
- }
-
- host_err = ima_file_check(file, may_flags);
- if (host_err) {
- fput(file);
-- goto out_nfserr;
-+ goto out;
- }
-
- if (may_flags & NFSD_MAY_64BIT_COOKIE)
-@@ -874,10 +872,8 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
- file->f_mode |= FMODE_32BITHASH;
-
- *filp = file;
--out_nfserr:
-- err = nfserrno(host_err);
- out:
-- return err;
-+ return host_err;
- }
-
- __be32
-@@ -885,6 +881,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
- int may_flags, struct file **filp)
- {
- __be32 err;
-+ int host_err;
- bool retried = false;
-
- validate_process_creds();
-@@ -904,12 +901,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
- retry:
- err = fh_verify(rqstp, fhp, type, may_flags);
- if (!err) {
-- err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
-- if (err == nfserr_stale && !retried) {
-+ host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
-+ if (host_err == -EOPENSTALE && !retried) {
- retried = true;
- fh_put(fhp);
- goto retry;
- }
-+ err = nfserrno(host_err);
- }
- validate_process_creds();
- return err;
-@@ -922,13 +920,13 @@ retry:
- * @may_flags: internal permission flags
- * @filp: OUT: open "struct file *"
- *
-- * Returns an nfsstat value in network byte order.
-+ * Returns zero on success, or a negative errno value.
- */
--__be32
-+int
- nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
- struct file **filp)
- {
-- __be32 err;
-+ int err;
-
- validate_process_creds();
- err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
-diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
-index a6890ea7b765b..e3c29596f4df1 100644
---- a/fs/nfsd/vfs.h
-+++ b/fs/nfsd/vfs.h
-@@ -104,8 +104,8 @@ __be32 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
- int nfsd_open_break_lease(struct inode *, int);
- __be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
- int, struct file **);
--__be32 nfsd_open_verified(struct svc_rqst *, struct svc_fh *,
-- int, struct file **);
-+int nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp,
-+ int may_flags, struct file **filp);
- __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct file *file, loff_t offset,
- unsigned long *count,
-diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
-index 83ef66644c213..fca29dba7b146 100644
---- a/fs/overlayfs/inode.c
-+++ b/fs/overlayfs/inode.c
-@@ -171,7 +171,7 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
-
- type = ovl_path_real(dentry, &realpath);
- old_cred = ovl_override_creds(dentry->d_sb);
-- err = vfs_getattr(&realpath, stat, request_mask, flags);
-+ err = ovl_do_getattr(&realpath, stat, request_mask, flags);
- if (err)
- goto out;
-
-@@ -196,8 +196,8 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
- (!is_dir ? STATX_NLINK : 0);
-
- ovl_path_lower(dentry, &realpath);
-- err = vfs_getattr(&realpath, &lowerstat,
-- lowermask, flags);
-+ err = ovl_do_getattr(&realpath, &lowerstat, lowermask,
-+ flags);
- if (err)
- goto out;
-
-@@ -249,8 +249,8 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
-
- ovl_path_lowerdata(dentry, &realpath);
- if (realpath.dentry) {
-- err = vfs_getattr(&realpath, &lowerdatastat,
-- lowermask, flags);
-+ err = ovl_do_getattr(&realpath, &lowerdatastat,
-+ lowermask, flags);
- if (err)
- goto out;
- } else {
-diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
-index 9817b2dcb132c..09ca82ed0f8ce 100644
---- a/fs/overlayfs/overlayfs.h
-+++ b/fs/overlayfs/overlayfs.h
-@@ -397,6 +397,14 @@ static inline bool ovl_open_flags_need_copy_up(int flags)
- return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC));
- }
-
-+static inline int ovl_do_getattr(const struct path *path, struct kstat *stat,
-+ u32 request_mask, unsigned int flags)
-+{
-+ if (flags & AT_GETATTR_NOSEC)
-+ return vfs_getattr_nosec(path, stat, request_mask, flags);
-+ return vfs_getattr(path, stat, request_mask, flags);
-+}
-+
- /* util.c */
- int ovl_want_write(struct dentry *dentry);
- void ovl_drop_write(struct dentry *dentry);
-diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
-index 3fa2416264a4e..c71d185980c08 100644
---- a/fs/overlayfs/super.c
-+++ b/fs/overlayfs/super.c
-@@ -1489,7 +1489,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
- ovl_trusted_xattr_handlers;
- sb->s_fs_info = ofs;
- sb->s_flags |= SB_POSIXACL;
-- sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
-+ sb->s_iflags |= SB_I_SKIP_SYNC;
-
- err = -ENOMEM;
- root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
-diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
-index c88854df0b624..de484195f49fe 100644
---- a/fs/proc/proc_sysctl.c
-+++ b/fs/proc/proc_sysctl.c
-@@ -1576,7 +1576,6 @@ static const struct sysctl_alias sysctl_aliases[] = {
- {"hung_task_panic", "kernel.hung_task_panic" },
- {"numa_zonelist_order", "vm.numa_zonelist_order" },
- {"softlockup_all_cpu_backtrace", "kernel.softlockup_all_cpu_backtrace" },
-- {"softlockup_panic", "kernel.softlockup_panic" },
- { }
- };
-
-@@ -1592,6 +1591,13 @@ static const char *sysctl_find_alias(char *param)
- return NULL;
- }
-
-+bool sysctl_is_alias(char *param)
-+{
-+ const char *alias = sysctl_find_alias(param);
-+
-+ return alias != NULL;
-+}
-+
- /* Set sysctl value passed on kernel command line. */
- static int process_sysctl_arg(char *param, char *val,
- const char *unused, void *arg)
-diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
-index e5bca9a004ccc..03425928d2fb3 100644
---- a/fs/pstore/platform.c
-+++ b/fs/pstore/platform.c
-@@ -464,6 +464,8 @@ out:
- */
- int pstore_register(struct pstore_info *psi)
- {
-+ char *new_backend;
-+
- if (backend && strcmp(backend, psi->name)) {
- pr_warn("backend '%s' already in use: ignoring '%s'\n",
- backend, psi->name);
-@@ -484,11 +486,16 @@ int pstore_register(struct pstore_info *psi)
- return -EINVAL;
- }
-
-+ new_backend = kstrdup(psi->name, GFP_KERNEL);
-+ if (!new_backend)
-+ return -ENOMEM;
-+
- mutex_lock(&psinfo_lock);
- if (psinfo) {
- pr_warn("backend '%s' already loaded: ignoring '%s'\n",
- psinfo->name, psi->name);
- mutex_unlock(&psinfo_lock);
-+ kfree(new_backend);
- return -EBUSY;
- }
-
-@@ -521,7 +528,7 @@ int pstore_register(struct pstore_info *psi)
- * Update the module parameter backend, so it is visible
- * through /sys/module/pstore/parameters/backend
- */
-- backend = kstrdup(psi->name, GFP_KERNEL);
-+ backend = new_backend;
-
- pr_info("Registered %s as persistent store backend\n", psi->name);
-
-diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
-index 31e897ad5e6a7..023b91b4e1f0a 100644
---- a/fs/quota/dquot.c
-+++ b/fs/quota/dquot.c
-@@ -2351,6 +2351,20 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
- if (sb_has_quota_loaded(sb, type))
- return -EBUSY;
-
-+ /*
-+ * Quota files should never be encrypted. They should be thought of as
-+ * filesystem metadata, not user data. New-style internal quota files
-+ * cannot be encrypted by users anyway, but old-style external quota
-+ * files could potentially be incorrectly created in an encrypted
-+ * directory, hence this explicit check. Some reasons why encrypted
-+ * quota files don't work include: (1) some filesystems that support
-+ * encryption don't handle it in their quota_read and quota_write, and
-+ * (2) cleaning up encrypted quota files at unmount would need special
-+ * consideration, as quota files are cleaned up later than user files.
-+ */
-+ if (IS_ENCRYPTED(inode))
-+ return -EINVAL;
-+
- dqopt->files[type] = igrab(inode);
- if (!dqopt->files[type])
- return -EIO;
-diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
-index fe1bf5b6e0cb3..59f6b8e32cc97 100644
---- a/fs/smb/client/cached_dir.c
-+++ b/fs/smb/client/cached_dir.c
-@@ -32,7 +32,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
- * fully cached or it may be in the process of
- * being deleted due to a lease break.
- */
-- if (!cfid->has_lease) {
-+ if (!cfid->time || !cfid->has_lease) {
- spin_unlock(&cfids->cfid_list_lock);
- return NULL;
- }
-@@ -193,10 +193,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
- npath = path_no_prefix(cifs_sb, path);
- if (IS_ERR(npath)) {
- rc = PTR_ERR(npath);
-- kfree(utf16_path);
-- return rc;
-+ goto out;
- }
-
-+ if (!npath[0]) {
-+ dentry = dget(cifs_sb->root);
-+ } else {
-+ dentry = path_to_dentry(cifs_sb, npath);
-+ if (IS_ERR(dentry)) {
-+ rc = -ENOENT;
-+ goto out;
-+ }
-+ }
-+ cfid->dentry = dentry;
-+
- /*
- * We do not hold the lock for the open because in case
- * SMB2_open needs to reconnect.
-@@ -249,6 +259,15 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
-
- smb2_set_related(&rqst[1]);
-
-+ /*
-+ * Set @cfid->has_lease to true before sending out compounded request so
-+ * its lease reference can be put in cached_dir_lease_break() due to a
-+ * potential lease break right after the request is sent or while @cfid
-+ * is still being cached. Concurrent processes won't be to use it yet
-+ * due to @cfid->time being zero.
-+ */
-+ cfid->has_lease = true;
-+
- rc = compound_send_recv(xid, ses, server,
- flags, 2, rqst,
- resp_buftype, rsp_iov);
-@@ -263,6 +282,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
- cfid->tcon = tcon;
- cfid->is_open = true;
-
-+ spin_lock(&cfids->cfid_list_lock);
-+
- o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
- oparms.fid->persistent_fid = o_rsp->PersistentFileId;
- oparms.fid->volatile_fid = o_rsp->VolatileFileId;
-@@ -270,18 +291,25 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
- oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
- #endif /* CIFS_DEBUG2 */
-
-- if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
-+ rc = -EINVAL;
-+ if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
-+ spin_unlock(&cfids->cfid_list_lock);
- goto oshr_free;
-+ }
-
- smb2_parse_contexts(server, o_rsp,
- &oparms.fid->epoch,
- oparms.fid->lease_key, &oplock,
- NULL, NULL);
-- if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
-+ if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
-+ spin_unlock(&cfids->cfid_list_lock);
- goto oshr_free;
-+ }
- qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
-- if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
-+ if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
-+ spin_unlock(&cfids->cfid_list_lock);
- goto oshr_free;
-+ }
- if (!smb2_validate_and_copy_iov(
- le16_to_cpu(qi_rsp->OutputBufferOffset),
- sizeof(struct smb2_file_all_info),
-@@ -289,37 +317,24 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
- (char *)&cfid->file_all_info))
- cfid->file_all_info_is_valid = true;
-
-- if (!npath[0])
-- dentry = dget(cifs_sb->root);
-- else {
-- dentry = path_to_dentry(cifs_sb, npath);
-- if (IS_ERR(dentry)) {
-- rc = -ENOENT;
-- goto oshr_free;
-- }
-- }
-- spin_lock(&cfids->cfid_list_lock);
-- cfid->dentry = dentry;
- cfid->time = jiffies;
-- cfid->has_lease = true;
- spin_unlock(&cfids->cfid_list_lock);
-+ /* At this point the directory handle is fully cached */
-+ rc = 0;
-
- oshr_free:
-- kfree(utf16_path);
- SMB2_open_free(&rqst[0]);
- SMB2_query_info_free(&rqst[1]);
- free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
- free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
-- spin_lock(&cfids->cfid_list_lock);
-- if (!cfid->has_lease) {
-- if (rc) {
-- if (cfid->on_list) {
-- list_del(&cfid->entry);
-- cfid->on_list = false;
-- cfids->num_entries--;
-- }
-- rc = -ENOENT;
-- } else {
-+ if (rc) {
-+ spin_lock(&cfids->cfid_list_lock);
-+ if (cfid->on_list) {
-+ list_del(&cfid->entry);
-+ cfid->on_list = false;
-+ cfids->num_entries--;
-+ }
-+ if (cfid->has_lease) {
- /*
- * We are guaranteed to have two references at this
- * point. One for the caller and one for a potential
-@@ -327,25 +342,24 @@ oshr_free:
- * will be closed when the caller closes the cached
- * handle.
- */
-+ cfid->has_lease = false;
- spin_unlock(&cfids->cfid_list_lock);
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- goto out;
- }
-+ spin_unlock(&cfids->cfid_list_lock);
- }
-- spin_unlock(&cfids->cfid_list_lock);
-+out:
- if (rc) {
- if (cfid->is_open)
- SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
- cfid->fid.volatile_fid);
- free_cached_dir(cfid);
-- cfid = NULL;
-- }
--out:
-- if (rc == 0) {
-+ } else {
- *ret_cfid = cfid;
- atomic_inc(&tcon->num_remote_opens);
- }
--
-+ kfree(utf16_path);
- return rc;
- }
-
-diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
-index 76922fcc4bc6e..16282ecfe17a7 100644
---- a/fs/smb/client/cifs_debug.c
-+++ b/fs/smb/client/cifs_debug.c
-@@ -279,6 +279,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
- struct cifs_server_iface *iface;
-+ size_t iface_weight = 0, iface_min_speed = 0;
-+ struct cifs_server_iface *last_iface = NULL;
- int c, i, j;
-
- seq_puts(m,
-@@ -452,6 +454,11 @@ skip_rdma:
- seq_printf(m, "\n\n\tSessions: ");
- i = 0;
- list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
-+ spin_lock(&ses->ses_lock);
-+ if (ses->ses_status == SES_EXITING) {
-+ spin_unlock(&ses->ses_lock);
-+ continue;
-+ }
- i++;
- if ((ses->serverDomain == NULL) ||
- (ses->serverOS == NULL) ||
-@@ -472,6 +479,7 @@ skip_rdma:
- ses->ses_count, ses->serverOS, ses->serverNOS,
- ses->capabilities, ses->ses_status);
- }
-+ spin_unlock(&ses->ses_lock);
-
- seq_printf(m, "\n\tSecurity type: %s ",
- get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
-@@ -536,11 +544,25 @@ skip_rdma:
- "\tLast updated: %lu seconds ago",
- ses->iface_count,
- (jiffies - ses->iface_last_update) / HZ);
-+
-+ last_iface = list_last_entry(&ses->iface_list,
-+ struct cifs_server_iface,
-+ iface_head);
-+ iface_min_speed = last_iface->speed;
-+
- j = 0;
- list_for_each_entry(iface, &ses->iface_list,
- iface_head) {
- seq_printf(m, "\n\t%d)", ++j);
- cifs_dump_iface(m, iface);
-+
-+ iface_weight = iface->speed / iface_min_speed;
-+ seq_printf(m, "\t\tWeight (cur,total): (%zu,%zu)"
-+ "\n\t\tAllocated channels: %u\n",
-+ iface->weight_fulfilled,
-+ iface_weight,
-+ iface->num_channels);
-+
- if (is_ses_using_iface(ses, iface))
- seq_puts(m, "\t\t[CONNECTED]\n");
- }
-diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
-index 332588e77c311..26327442e383b 100644
---- a/fs/smb/client/cifs_ioctl.h
-+++ b/fs/smb/client/cifs_ioctl.h
-@@ -26,6 +26,11 @@ struct smb_mnt_fs_info {
- __u64 cifs_posix_caps;
- } __packed;
-
-+struct smb_mnt_tcon_info {
-+ __u32 tid;
-+ __u64 session_id;
-+} __packed;
-+
- struct smb_snapshot_array {
- __u32 number_of_snapshots;
- __u32 number_of_snapshots_returned;
-@@ -108,6 +113,7 @@ struct smb3_notify_info {
- #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
- #define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
- #define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
-+#define CIFS_IOC_GET_TCON_INFO _IOR(CIFS_IOCTL_MAGIC, 12, struct smb_mnt_tcon_info)
- #define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
-
- /*
-diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
-index 6f3285f1dfee5..af7849e5974ff 100644
---- a/fs/smb/client/cifs_spnego.c
-+++ b/fs/smb/client/cifs_spnego.c
-@@ -64,8 +64,8 @@ struct key_type cifs_spnego_key_type = {
- * strlen(";sec=ntlmsspi") */
- #define MAX_MECH_STR_LEN 13
-
--/* strlen of "host=" */
--#define HOST_KEY_LEN 5
-+/* strlen of ";host=" */
-+#define HOST_KEY_LEN 6
-
- /* strlen of ";ip4=" or ";ip6=" */
- #define IP_KEY_LEN 5
-diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
-index 22869cda13565..ea3a7a668b45f 100644
---- a/fs/smb/client/cifsfs.c
-+++ b/fs/smb/client/cifsfs.c
-@@ -1191,6 +1191,7 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
-
- const struct inode_operations cifs_symlink_inode_ops = {
- .get_link = cifs_get_link,
-+ .setattr = cifs_setattr,
- .permission = cifs_permission,
- .listxattr = cifs_listxattr,
- };
-diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
-index 02082621d8e07..b8d1c19f67714 100644
---- a/fs/smb/client/cifsglob.h
-+++ b/fs/smb/client/cifsglob.h
-@@ -969,6 +969,8 @@ struct cifs_server_iface {
- struct list_head iface_head;
- struct kref refcount;
- size_t speed;
-+ size_t weight_fulfilled;
-+ unsigned int num_channels;
- unsigned int rdma_capable : 1;
- unsigned int rss_capable : 1;
- unsigned int is_active : 1; /* unset if non existent */
-@@ -2143,6 +2145,7 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
- unsigned int len, skip;
- unsigned int nents = 0;
- unsigned long addr;
-+ size_t data_size;
- int i, j;
-
- /*
-@@ -2158,17 +2161,21 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
- * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
- */
- for (i = 0; i < num_rqst; i++) {
-+ data_size = iov_iter_count(&rqst[i].rq_iter);
-+
- /* We really don't want a mixture of pinned and unpinned pages
- * in the sglist. It's hard to keep track of which is what.
- * Instead, we convert to a BVEC-type iterator higher up.
- */
-- if (WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
-+ if (data_size &&
-+ WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
- return -EIO;
-
- /* We also don't want to have any extra refs or pins to clean
- * up in the sglist.
- */
-- if (WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
-+ if (data_size &&
-+ WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
- return -EIO;
-
- for (j = 0; j < rqst[i].rq_nvec; j++) {
-@@ -2184,7 +2191,8 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
- }
- skip = 0;
- }
-- nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
-+ if (data_size)
-+ nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
- }
- nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
- return nents;
-diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
-index e17222fec9d29..a75220db5c1e1 100644
---- a/fs/smb/client/cifspdu.h
-+++ b/fs/smb/client/cifspdu.h
-@@ -2570,7 +2570,7 @@ typedef struct {
-
-
- struct win_dev {
-- unsigned char type[8]; /* IntxCHR or IntxBLK */
-+ unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
- __le64 major;
- __le64 minor;
- } __attribute__((packed));
-diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
-index 0c37eefa18a57..8e53abcfc5ec4 100644
---- a/fs/smb/client/cifsproto.h
-+++ b/fs/smb/client/cifsproto.h
-@@ -81,7 +81,7 @@ extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
- extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
- char *cifs_build_devname(char *nodename, const char *prepath);
- extern void delete_mid(struct mid_q_entry *mid);
--extern void release_mid(struct mid_q_entry *mid);
-+void __release_mid(struct kref *refcount);
- extern void cifs_wake_up_task(struct mid_q_entry *mid);
- extern int cifs_handle_standard(struct TCP_Server_Info *server,
- struct mid_q_entry *mid);
-@@ -610,7 +610,7 @@ void cifs_free_hash(struct shash_desc **sdesc);
-
- struct cifs_chan *
- cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
--int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses);
-+int cifs_try_adding_channels(struct cifs_ses *ses);
- bool is_server_using_iface(struct TCP_Server_Info *server,
- struct cifs_server_iface *iface);
- bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
-@@ -740,4 +740,9 @@ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
- return true;
- }
-
-+static inline void release_mid(struct mid_q_entry *mid)
-+{
-+ kref_put(&mid->refcount, __release_mid);
-+}
-+
- #endif /* _CIFSPROTO_H */
-diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
-index 7b923e36501b0..d517651d7bcea 100644
---- a/fs/smb/client/connect.c
-+++ b/fs/smb/client/connect.c
-@@ -156,13 +156,14 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
- /* If server is a channel, select the primary channel */
- pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
-
-- spin_lock(&pserver->srv_lock);
-+ /* if we need to signal just this channel */
- if (!all_channels) {
-- pserver->tcpStatus = CifsNeedReconnect;
-- spin_unlock(&pserver->srv_lock);
-+ spin_lock(&server->srv_lock);
-+ if (server->tcpStatus != CifsExiting)
-+ server->tcpStatus = CifsNeedReconnect;
-+ spin_unlock(&server->srv_lock);
- return;
- }
-- spin_unlock(&pserver->srv_lock);
-
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
-@@ -2033,6 +2034,12 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
- }
- }
-
-+ /* we now account for primary channel in iface->refcount */
-+ if (ses->chans[0].iface) {
-+ kref_put(&ses->chans[0].iface->refcount, release_iface);
-+ ses->chans[0].server = NULL;
-+ }
-+
- sesInfoFree(ses);
- cifs_put_tcp_session(server, 0);
- }
-@@ -3560,7 +3567,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
- ctx->prepath = NULL;
-
- out:
-- cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
-+ cifs_try_adding_channels(mnt_ctx.ses);
- rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
- if (rc)
- goto error;
-@@ -3849,8 +3856,12 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
- is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
- spin_unlock(&ses->chan_lock);
-
-- if (!is_binding)
-+ if (!is_binding) {
- ses->ses_status = SES_IN_SETUP;
-+
-+ /* force iface_list refresh */
-+ ses->iface_last_update = 0;
-+ }
- spin_unlock(&ses->ses_lock);
-
- /* update ses ip_addr only for primary chan */
-diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
-index 81b84151450d2..a8a1d386da656 100644
---- a/fs/smb/client/dfs.c
-+++ b/fs/smb/client/dfs.c
-@@ -263,15 +263,23 @@ out:
- return rc;
- }
-
--/* Resolve UNC hostname in @ctx->source and set ip addr in @ctx->dstaddr */
-+/*
-+ * If @ctx->dfs_automount, then update @ctx->dstaddr earlier with the DFS root
-+ * server from where we'll start following any referrals. Otherwise rely on the
-+ * value provided by mount(2) as the user might not have dns_resolver key set up
-+ * and therefore failing to upcall to resolve UNC hostname under @ctx->source.
-+ */
- static int update_fs_context_dstaddr(struct smb3_fs_context *ctx)
- {
- struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
-- int rc;
-+ int rc = 0;
-
-- rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL);
-- if (!rc)
-- cifs_set_port(addr, ctx->port);
-+ if (!ctx->nodfs && ctx->dfs_automount) {
-+ rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL);
-+ if (!rc)
-+ cifs_set_port(addr, ctx->port);
-+ ctx->dfs_automount = false;
-+ }
- return rc;
- }
-
-diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
-index 9d8d34af02114..cf46916286d02 100644
---- a/fs/smb/client/fs_context.h
-+++ b/fs/smb/client/fs_context.h
-@@ -268,6 +268,7 @@ struct smb3_fs_context {
- bool witness:1; /* use witness protocol */
- char *leaf_fullpath;
- struct cifs_ses *dfs_root_ses;
-+ bool dfs_automount:1; /* set for dfs automount only */
- };
-
- extern const struct fs_parameter_spec smb3_fs_parameters[];
-diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
-index d7c302442c1ec..d6aa5e474d5e7 100644
---- a/fs/smb/client/inode.c
-+++ b/fs/smb/client/inode.c
-@@ -592,6 +592,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
- cifs_dbg(FYI, "Symlink\n");
- fattr->cf_mode |= S_IFLNK;
- fattr->cf_dtype = DT_LNK;
-+ } else if (memcmp("LnxFIFO", pbuf, 8) == 0) {
-+ cifs_dbg(FYI, "FIFO\n");
-+ fattr->cf_mode |= S_IFIFO;
-+ fattr->cf_dtype = DT_FIFO;
- } else {
- fattr->cf_mode |= S_IFREG; /* file? */
- fattr->cf_dtype = DT_REG;
-@@ -744,7 +748,7 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
- case 0: /* SMB1 symlink */
- case IO_REPARSE_TAG_SYMLINK:
- case IO_REPARSE_TAG_NFS:
-- fattr->cf_mode = S_IFLNK;
-+ fattr->cf_mode = S_IFLNK | cifs_sb->ctx->file_mode;
- fattr->cf_dtype = DT_LNK;
- break;
- default:
-@@ -819,6 +823,8 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
-
- out_reparse:
- if (S_ISLNK(fattr->cf_mode)) {
-+ if (likely(data->symlink_target))
-+ fattr->cf_eof = strnlen(data->symlink_target, PATH_MAX);
- fattr->cf_symlink_target = data->symlink_target;
- data->symlink_target = NULL;
- }
-diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
-index f7160003e0ed9..73ededa8eba5c 100644
---- a/fs/smb/client/ioctl.c
-+++ b/fs/smb/client/ioctl.c
-@@ -117,6 +117,20 @@ out_drop_write:
- return rc;
- }
-
-+static long smb_mnt_get_tcon_info(struct cifs_tcon *tcon, void __user *arg)
-+{
-+ int rc = 0;
-+ struct smb_mnt_tcon_info tcon_inf;
-+
-+ tcon_inf.tid = tcon->tid;
-+ tcon_inf.session_id = tcon->ses->Suid;
-+
-+ if (copy_to_user(arg, &tcon_inf, sizeof(struct smb_mnt_tcon_info)))
-+ rc = -EFAULT;
-+
-+ return rc;
-+}
-+
- static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
- void __user *arg)
- {
-@@ -414,6 +428,17 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
- tcon = tlink_tcon(pSMBFile->tlink);
- rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
- break;
-+ case CIFS_IOC_GET_TCON_INFO:
-+ cifs_sb = CIFS_SB(inode->i_sb);
-+ tlink = cifs_sb_tlink(cifs_sb);
-+ if (IS_ERR(tlink)) {
-+ rc = PTR_ERR(tlink);
-+ break;
-+ }
-+ tcon = tlink_tcon(tlink);
-+ rc = smb_mnt_get_tcon_info(tcon, (void __user *)arg);
-+ cifs_put_tlink(tlink);
-+ break;
- case CIFS_ENUMERATE_SNAPSHOTS:
- if (pSMBFile == NULL)
- break;
-diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
-index c8f5ed8a69f1c..a6968573b775e 100644
---- a/fs/smb/client/namespace.c
-+++ b/fs/smb/client/namespace.c
-@@ -117,6 +117,18 @@ cifs_build_devname(char *nodename, const char *prepath)
- return dev;
- }
-
-+static bool is_dfs_mount(struct dentry *dentry)
-+{
-+ struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
-+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
-+ bool ret;
-+
-+ spin_lock(&tcon->tc_lock);
-+ ret = !!tcon->origin_fullpath;
-+ spin_unlock(&tcon->tc_lock);
-+ return ret;
-+}
-+
- /* Return full path out of a dentry set for automount */
- static char *automount_fullpath(struct dentry *dentry, void *page)
- {
-@@ -212,8 +224,9 @@ static struct vfsmount *cifs_do_automount(struct path *path)
- ctx->source = NULL;
- goto out;
- }
-- cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s\n",
-- __func__, ctx->source, ctx->UNC, ctx->prepath);
-+ ctx->dfs_automount = is_dfs_mount(mntpt);
-+ cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dfs_automount=%d\n",
-+ __func__, ctx->source, ctx->UNC, ctx->prepath, ctx->dfs_automount);
-
- mnt = fc_mount(fc);
- out:
-diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
-index 79f26c560edf8..80050e36f0451 100644
---- a/fs/smb/client/sess.c
-+++ b/fs/smb/client/sess.c
-@@ -24,7 +24,7 @@
- #include "fs_context.h"
-
- static int
--cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
-+cifs_ses_add_channel(struct cifs_ses *ses,
- struct cifs_server_iface *iface);
-
- bool
-@@ -157,14 +157,16 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
- }
-
- /* returns number of channels added */
--int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
-+int cifs_try_adding_channels(struct cifs_ses *ses)
- {
- struct TCP_Server_Info *server = ses->server;
- int old_chan_count, new_chan_count;
- int left;
- int rc = 0;
- int tries = 0;
-+ size_t iface_weight = 0, iface_min_speed = 0;
- struct cifs_server_iface *iface = NULL, *niface = NULL;
-+ struct cifs_server_iface *last_iface = NULL;
-
- spin_lock(&ses->chan_lock);
-
-@@ -186,28 +188,17 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
- }
-
- if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
-- ses->chan_max = 1;
- spin_unlock(&ses->chan_lock);
- cifs_server_dbg(VFS, "no multichannel support\n");
- return 0;
- }
- spin_unlock(&ses->chan_lock);
-
-- /*
-- * Keep connecting to same, fastest, iface for all channels as
-- * long as its RSS. Try next fastest one if not RSS or channel
-- * creation fails.
-- */
-- spin_lock(&ses->iface_lock);
-- iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
-- iface_head);
-- spin_unlock(&ses->iface_lock);
--
- while (left > 0) {
-
- tries++;
- if (tries > 3*ses->chan_max) {
-- cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
-+ cifs_dbg(VFS, "too many channel open attempts (%d channels left to open)\n",
- left);
- break;
- }
-@@ -215,23 +206,41 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
- spin_lock(&ses->iface_lock);
- if (!ses->iface_count) {
- spin_unlock(&ses->iface_lock);
-+ cifs_dbg(VFS, "server %s does not advertise interfaces\n",
-+ ses->server->hostname);
- break;
- }
-
-+ if (!iface)
-+ iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
-+ iface_head);
-+ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
-+ iface_head);
-+ iface_min_speed = last_iface->speed;
-+
- list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
- iface_head) {
-+ /* do not mix rdma and non-rdma interfaces */
-+ if (iface->rdma_capable != ses->server->rdma)
-+ continue;
-+
- /* skip ifaces that are unusable */
- if (!iface->is_active ||
- (is_ses_using_iface(ses, iface) &&
-- !iface->rss_capable)) {
-+ !iface->rss_capable))
-+ continue;
-+
-+ /* check if we already allocated enough channels */
-+ iface_weight = iface->speed / iface_min_speed;
-+
-+ if (iface->weight_fulfilled >= iface_weight)
- continue;
-- }
-
- /* take ref before unlock */
- kref_get(&iface->refcount);
-
- spin_unlock(&ses->iface_lock);
-- rc = cifs_ses_add_channel(cifs_sb, ses, iface);
-+ rc = cifs_ses_add_channel(ses, iface);
- spin_lock(&ses->iface_lock);
-
- if (rc) {
-@@ -242,10 +251,21 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
- continue;
- }
-
-- cifs_dbg(FYI, "successfully opened new channel on iface:%pIS\n",
-+ iface->num_channels++;
-+ iface->weight_fulfilled++;
-+ cifs_dbg(VFS, "successfully opened new channel on iface:%pIS\n",
- &iface->sockaddr);
- break;
- }
-+
-+ /* reached end of list. reset weight_fulfilled and start over */
-+ if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
-+ list_for_each_entry(iface, &ses->iface_list, iface_head)
-+ iface->weight_fulfilled = 0;
-+ spin_unlock(&ses->iface_lock);
-+ iface = NULL;
-+ continue;
-+ }
- spin_unlock(&ses->iface_lock);
-
- left--;
-@@ -264,8 +284,11 @@ int
- cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
- {
- unsigned int chan_index;
-+ size_t iface_weight = 0, iface_min_speed = 0;
- struct cifs_server_iface *iface = NULL;
- struct cifs_server_iface *old_iface = NULL;
-+ struct cifs_server_iface *last_iface = NULL;
-+ struct sockaddr_storage ss;
- int rc = 0;
-
- spin_lock(&ses->chan_lock);
-@@ -284,14 +307,49 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
- }
- spin_unlock(&ses->chan_lock);
-
-+ spin_lock(&server->srv_lock);
-+ ss = server->dstaddr;
-+ spin_unlock(&server->srv_lock);
-+
- spin_lock(&ses->iface_lock);
-+ if (!ses->iface_count) {
-+ spin_unlock(&ses->iface_lock);
-+ cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname);
-+ return 0;
-+ }
-+
-+ last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
-+ iface_head);
-+ iface_min_speed = last_iface->speed;
-+
- /* then look for a new one */
- list_for_each_entry(iface, &ses->iface_list, iface_head) {
-+ if (!chan_index) {
-+ /* if we're trying to get the updated iface for primary channel */
-+ if (!cifs_match_ipaddr((struct sockaddr *) &ss,
-+ (struct sockaddr *) &iface->sockaddr))
-+ continue;
-+
-+ kref_get(&iface->refcount);
-+ break;
-+ }
-+
-+ /* do not mix rdma and non-rdma interfaces */
-+ if (iface->rdma_capable != server->rdma)
-+ continue;
-+
- if (!iface->is_active ||
- (is_ses_using_iface(ses, iface) &&
- !iface->rss_capable)) {
- continue;
- }
-+
-+ /* check if we already allocated enough channels */
-+ iface_weight = iface->speed / iface_min_speed;
-+
-+ if (iface->weight_fulfilled >= iface_weight)
-+ continue;
-+
- kref_get(&iface->refcount);
- break;
- }
-@@ -302,16 +360,41 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
- cifs_dbg(FYI, "unable to find a suitable iface\n");
- }
-
-+ if (!chan_index && !iface) {
-+ cifs_dbg(FYI, "unable to get the interface matching: %pIS\n",
-+ &ss);
-+ spin_unlock(&ses->iface_lock);
-+ return 0;
-+ }
-+
- /* now drop the ref to the current iface */
- if (old_iface && iface) {
- cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
- &old_iface->sockaddr,
- &iface->sockaddr);
-+
-+ old_iface->num_channels--;
-+ if (old_iface->weight_fulfilled)
-+ old_iface->weight_fulfilled--;
-+ iface->num_channels++;
-+ iface->weight_fulfilled++;
-+
- kref_put(&old_iface->refcount, release_iface);
- } else if (old_iface) {
- cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
- &old_iface->sockaddr);
-+
-+ old_iface->num_channels--;
-+ if (old_iface->weight_fulfilled)
-+ old_iface->weight_fulfilled--;
-+
- kref_put(&old_iface->refcount, release_iface);
-+ } else if (!chan_index) {
-+ /* special case: update interface for primary channel */
-+ cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
-+ &iface->sockaddr);
-+ iface->num_channels++;
-+ iface->weight_fulfilled++;
- } else {
- WARN_ON(!iface);
- cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
-@@ -355,7 +438,7 @@ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
- }
-
- static int
--cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
-+cifs_ses_add_channel(struct cifs_ses *ses,
- struct cifs_server_iface *iface)
- {
- struct TCP_Server_Info *chan_server;
-@@ -434,7 +517,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
- * This will be used for encoding/decoding user/domain/pw
- * during sess setup auth.
- */
-- ctx->local_nls = cifs_sb->local_nls;
-+ ctx->local_nls = ses->local_nls;
-
- /* Use RDMA if possible */
- ctx->rdma = iface->rdma_capable;
-@@ -480,7 +563,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
-
- rc = cifs_negotiate_protocol(xid, ses, chan->server);
- if (!rc)
-- rc = cifs_setup_session(xid, ses, chan->server, cifs_sb->local_nls);
-+ rc = cifs_setup_session(xid, ses, chan->server, ses->local_nls);
-
- mutex_unlock(&ses->session_mutex);
-
-diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
-index 25f7cd6f23d64..32dfa0f7a78c3 100644
---- a/fs/smb/client/smb2misc.c
-+++ b/fs/smb/client/smb2misc.c
-@@ -787,7 +787,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
- {
- struct close_cancelled_open *cancelled;
-
-- cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
-+ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
- if (!cancelled)
- return -ENOMEM;
-
-diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
-index 9aeecee6b91b3..b2a60aa6564fd 100644
---- a/fs/smb/client/smb2ops.c
-+++ b/fs/smb/client/smb2ops.c
-@@ -756,6 +756,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
- unsigned int ret_data_len = 0;
- struct network_interface_info_ioctl_rsp *out_buf = NULL;
- struct cifs_ses *ses = tcon->ses;
-+ struct TCP_Server_Info *pserver;
-
- /* do not query too frequently */
- if (ses->iface_last_update &&
-@@ -780,6 +781,11 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
- if (rc)
- goto out;
-
-+ /* check if iface is still active */
-+ pserver = ses->chans[0].server;
-+ if (pserver && !cifs_chan_is_iface_active(ses, pserver))
-+ cifs_chan_update_iface(ses, pserver);
-+
- out:
- kfree(out_buf);
- return rc;
-@@ -3299,6 +3305,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
- struct inode *inode = file_inode(file);
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct cifsFileInfo *cfile = file->private_data;
-+ unsigned long long new_size;
- long rc;
- unsigned int xid;
- __le64 eof;
-@@ -3329,10 +3336,15 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
- /*
- * do we also need to change the size of the file?
- */
-- if (keep_size == false && i_size_read(inode) < offset + len) {
-- eof = cpu_to_le64(offset + len);
-+ new_size = offset + len;
-+ if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
-+ eof = cpu_to_le64(new_size);
- rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
- cfile->fid.volatile_fid, cfile->pid, &eof);
-+ if (rc >= 0) {
-+ truncate_setsize(inode, new_size);
-+ fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
-+ }
- }
-
- zero_range_exit:
-@@ -3727,6 +3739,9 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
- if (rc < 0)
- goto out_2;
-
-+ truncate_setsize(inode, old_eof + len);
-+ fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
-+
- rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
- if (rc < 0)
- goto out_2;
-@@ -5087,7 +5102,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
- * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
- */
-
-- if (!S_ISCHR(mode) && !S_ISBLK(mode))
-+ if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
- return rc;
-
- cifs_dbg(FYI, "sfu compat create special file\n");
-@@ -5135,6 +5150,12 @@ smb2_make_node(unsigned int xid, struct inode *inode,
- pdev->minor = cpu_to_le64(MINOR(dev));
- rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
- &bytes_written, iov, 1);
-+ } else if (S_ISFIFO(mode)) {
-+ memcpy(pdev->type, "LnxFIFO", 8);
-+ pdev->major = 0;
-+ pdev->minor = 0;
-+ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
-+ &bytes_written, iov, 1);
- }
- tcon->ses->server->ops->close(xid, tcon, &fid);
- d_drop(dentry);
-diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
-index 23c50ed7d4b59..a136fc4cc2b5f 100644
---- a/fs/smb/client/smb2transport.c
-+++ b/fs/smb/client/smb2transport.c
-@@ -452,6 +452,8 @@ generate_smb3signingkey(struct cifs_ses *ses,
- ptriplet->encryption.context,
- ses->smb3encryptionkey,
- SMB3_ENC_DEC_KEY_SIZE);
-+ if (rc)
-+ return rc;
- rc = generate_key(ses, ptriplet->decryption.label,
- ptriplet->decryption.context,
- ses->smb3decryptionkey,
-@@ -460,9 +462,6 @@ generate_smb3signingkey(struct cifs_ses *ses,
- return rc;
- }
-
-- if (rc)
-- return rc;
--
- #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
- cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
- /*
-diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
-index 14710afdc2a36..d553b7a54621b 100644
---- a/fs/smb/client/transport.c
-+++ b/fs/smb/client/transport.c
-@@ -76,7 +76,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
- return temp;
- }
-
--static void __release_mid(struct kref *refcount)
-+void __release_mid(struct kref *refcount)
- {
- struct mid_q_entry *midEntry =
- container_of(refcount, struct mid_q_entry, refcount);
-@@ -156,15 +156,6 @@ static void __release_mid(struct kref *refcount)
- mempool_free(midEntry, cifs_mid_poolp);
- }
-
--void release_mid(struct mid_q_entry *mid)
--{
-- struct TCP_Server_Info *server = mid->server;
--
-- spin_lock(&server->mid_lock);
-- kref_put(&mid->refcount, __release_mid);
-- spin_unlock(&server->mid_lock);
--}
--
- void
- delete_mid(struct mid_q_entry *mid)
- {
-diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
-index 4ad5531686d81..c2bf829310bee 100644
---- a/fs/smb/client/xattr.c
-+++ b/fs/smb/client/xattr.c
-@@ -150,10 +150,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
- goto out;
-
-- if (pTcon->ses->server->ops->set_EA)
-+ if (pTcon->ses->server->ops->set_EA) {
- rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
- full_path, name, value, (__u16)size,
- cifs_sb->local_nls, cifs_sb);
-+ if (rc == 0)
-+ inode_set_ctime_current(inode);
-+ }
- break;
-
- case XATTR_CIFS_ACL:
-diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
-index 93262ca3f58a7..269fbfb3cd678 100644
---- a/fs/smb/server/smb2pdu.c
-+++ b/fs/smb/server/smb2pdu.c
-@@ -2380,7 +2380,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
- rc = 0;
- } else {
- rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
-- le16_to_cpu(eabuf->EaValueLength), 0);
-+ le16_to_cpu(eabuf->EaValueLength),
-+ 0, true);
- if (rc < 0) {
- ksmbd_debug(SMB,
- "ksmbd_vfs_setxattr is failed(%d)\n",
-@@ -2443,7 +2444,7 @@ static noinline int smb2_set_stream_name_xattr(const struct path *path,
- return -EBADF;
- }
-
-- rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0);
-+ rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0, false);
- if (rc < 0)
- pr_err("Failed to store XATTR stream name :%d\n", rc);
- return 0;
-@@ -2518,7 +2519,7 @@ static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *
- da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
- XATTR_DOSINFO_ITIME;
-
-- rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da);
-+ rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, false);
- if (rc)
- ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
- }
-@@ -2608,7 +2609,7 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
- sizeof(struct create_sd_buf_req))
- return -EINVAL;
- return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
-- le32_to_cpu(sd_buf->ccontext.DataLength), true);
-+ le32_to_cpu(sd_buf->ccontext.DataLength), true, false);
- }
-
- static void ksmbd_acls_fattr(struct smb_fattr *fattr,
-@@ -3152,7 +3153,8 @@ int smb2_open(struct ksmbd_work *work)
- idmap,
- &path,
- pntsd,
-- pntsd_size);
-+ pntsd_size,
-+ false);
- kfree(pntsd);
- if (rc)
- pr_err("failed to store ntacl in xattr : %d\n",
-@@ -3228,12 +3230,6 @@ int smb2_open(struct ksmbd_work *work)
- if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
- ksmbd_fd_set_delete_on_close(fp, file_info);
-
-- if (need_truncate) {
-- rc = smb2_create_truncate(&path);
-- if (rc)
-- goto err_out;
-- }
--
- if (req->CreateContextsOffset) {
- struct create_alloc_size_req *az_req;
-
-@@ -3398,11 +3394,12 @@ int smb2_open(struct ksmbd_work *work)
- }
-
- err_out:
-- if (file_present || created) {
-- inode_unlock(d_inode(parent_path.dentry));
-- path_put(&path);
-- path_put(&parent_path);
-- }
-+ if (file_present || created)
-+ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
-+
-+ if (fp && need_truncate)
-+ rc = smb2_create_truncate(&fp->filp->f_path);
-+
- ksmbd_revert_fsids(work);
- err_out1:
- if (!rc) {
-@@ -5537,7 +5534,7 @@ static int smb2_rename(struct ksmbd_work *work,
- rc = ksmbd_vfs_setxattr(file_mnt_idmap(fp->filp),
- &fp->filp->f_path,
- xattr_stream_name,
-- NULL, 0, 0);
-+ NULL, 0, 0, true);
- if (rc < 0) {
- pr_err("failed to store stream name in xattr: %d\n",
- rc);
-@@ -5630,11 +5627,9 @@ static int smb2_create_link(struct ksmbd_work *work,
- if (rc)
- rc = -EINVAL;
- out:
-- if (file_present) {
-- inode_unlock(d_inode(parent_path.dentry));
-- path_put(&path);
-- path_put(&parent_path);
-- }
-+ if (file_present)
-+ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
-+
- if (!IS_ERR(link_name))
- kfree(link_name);
- kfree(pathname);
-@@ -5701,7 +5696,8 @@ static int set_file_basic_info(struct ksmbd_file *fp,
- da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
- XATTR_DOSINFO_ITIME;
-
-- rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da);
-+ rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da,
-+ true);
- if (rc)
- ksmbd_debug(SMB,
- "failed to restore file attribute in EA\n");
-@@ -6013,7 +6009,7 @@ static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
- fp->saccess |= FILE_SHARE_DELETE_LE;
-
- return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
-- buf_len, false);
-+ buf_len, false, true);
- }
-
- /**
-@@ -7582,7 +7578,8 @@ static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
-
- da.attr = le32_to_cpu(fp->f_ci->m_fattr);
- ret = ksmbd_vfs_set_dos_attrib_xattr(idmap,
-- &fp->filp->f_path, &da);
-+ &fp->filp->f_path,
-+ &da, true);
- if (ret)
- fp->f_ci->m_fattr = old_fattr;
- }
-diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
-index e6ba1e9b8589a..6691ae68af0c0 100644
---- a/fs/smb/server/smb_common.c
-+++ b/fs/smb/server/smb_common.c
-@@ -366,11 +366,22 @@ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
- return 0;
- }
-
-+/**
-+ * set_smb1_rsp_status() - set error type in smb response header
-+ * @work: smb work containing smb response header
-+ * @err: error code to set in response
-+ */
-+static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
-+{
-+ work->send_no_response = 1;
-+}
-+
- static struct smb_version_ops smb1_server_ops = {
- .get_cmd_val = get_smb1_cmd_val,
- .init_rsp_hdr = init_smb1_rsp_hdr,
- .allocate_rsp_buf = smb1_allocate_rsp_buf,
- .check_user_session = smb1_check_user_session,
-+ .set_rsp_status = set_smb1_rsp_status,
- };
-
- static int smb1_negotiate(struct ksmbd_work *work)
-diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
-index 6c0305be895e5..1164365533f08 100644
---- a/fs/smb/server/smbacl.c
-+++ b/fs/smb/server/smbacl.c
-@@ -1107,6 +1107,7 @@ pass:
- struct smb_acl *pdacl;
- struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
- int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
-+ int pntsd_alloc_size;
-
- if (parent_pntsd->osidoffset) {
- powner_sid = (struct smb_sid *)((char *)parent_pntsd +
-@@ -1119,9 +1120,10 @@ pass:
- pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
- }
-
-- pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
-- pgroup_sid_size + sizeof(struct smb_acl) +
-- nt_size, GFP_KERNEL);
-+ pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
-+ pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
-+
-+ pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
- if (!pntsd) {
- rc = -ENOMEM;
- goto free_aces_base;
-@@ -1136,6 +1138,27 @@ pass:
- pntsd->gsidoffset = parent_pntsd->gsidoffset;
- pntsd->dacloffset = parent_pntsd->dacloffset;
-
-+ if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
-+ pntsd_alloc_size) {
-+ rc = -EINVAL;
-+ kfree(pntsd);
-+ goto free_aces_base;
-+ }
-+
-+ if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
-+ pntsd_alloc_size) {
-+ rc = -EINVAL;
-+ kfree(pntsd);
-+ goto free_aces_base;
-+ }
-+
-+ if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
-+ pntsd_alloc_size) {
-+ rc = -EINVAL;
-+ kfree(pntsd);
-+ goto free_aces_base;
-+ }
-+
- if (pntsd->osidoffset) {
- struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
- le32_to_cpu(pntsd->osidoffset));
-@@ -1162,7 +1185,7 @@ pass:
- pntsd_size += sizeof(struct smb_acl) + nt_size;
- }
-
-- ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size);
-+ ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size, false);
- kfree(pntsd);
- }
-
-@@ -1354,7 +1377,7 @@ err_out:
-
- int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
- const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
-- bool type_check)
-+ bool type_check, bool get_write)
- {
- int rc;
- struct smb_fattr fattr = {{0}};
-@@ -1414,7 +1437,8 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
- if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
- /* Update WinACL in xattr */
- ksmbd_vfs_remove_sd_xattrs(idmap, path);
-- ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len);
-+ ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len,
-+ get_write);
- }
-
- out:
-diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
-index 49a8c292bd2e8..2b52861707d8c 100644
---- a/fs/smb/server/smbacl.h
-+++ b/fs/smb/server/smbacl.h
-@@ -207,7 +207,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
- __le32 *pdaccess, int uid);
- int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
- const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
-- bool type_check);
-+ bool type_check, bool get_write);
- void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
- void ksmbd_init_domain(u32 *sub_auth);
-
-diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
-index b5a5e50fc9ca3..5a41c0b4e9335 100644
---- a/fs/smb/server/vfs.c
-+++ b/fs/smb/server/vfs.c
-@@ -97,6 +97,13 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
- return -ENOENT;
- }
-
-+ err = mnt_want_write(parent_path->mnt);
-+ if (err) {
-+ path_put(parent_path);
-+ putname(filename);
-+ return -ENOENT;
-+ }
-+
- inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT);
- d = lookup_one_qstr_excl(&last, parent_path->dentry, 0);
- if (IS_ERR(d))
-@@ -123,6 +130,7 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
-
- err_out:
- inode_unlock(d_inode(parent_path->dentry));
-+ mnt_drop_write(parent_path->mnt);
- path_put(parent_path);
- putname(filename);
- return -ENOENT;
-@@ -173,10 +181,6 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
- return err;
- }
-
-- err = mnt_want_write(path.mnt);
-- if (err)
-- goto out_err;
--
- mode |= S_IFREG;
- err = vfs_create(mnt_idmap(path.mnt), d_inode(path.dentry),
- dentry, mode, true);
-@@ -186,9 +190,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
- } else {
- pr_err("File(%s): creation failed (err:%d)\n", name, err);
- }
-- mnt_drop_write(path.mnt);
-
--out_err:
- done_path_create(&path, dentry);
- return err;
- }
-@@ -219,10 +221,6 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
- return err;
- }
-
-- err = mnt_want_write(path.mnt);
-- if (err)
-- goto out_err2;
--
- idmap = mnt_idmap(path.mnt);
- mode |= S_IFDIR;
- err = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode);
-@@ -233,21 +231,19 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
- dentry->d_name.len);
- if (IS_ERR(d)) {
- err = PTR_ERR(d);
-- goto out_err1;
-+ goto out_err;
- }
- if (unlikely(d_is_negative(d))) {
- dput(d);
- err = -ENOENT;
-- goto out_err1;
-+ goto out_err;
- }
-
- ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
- dput(d);
- }
-
--out_err1:
-- mnt_drop_write(path.mnt);
--out_err2:
-+out_err:
- done_path_create(&path, dentry);
- if (err)
- pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
-@@ -463,7 +459,8 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
- fp->stream.name,
- (void *)stream_buf,
- size,
-- 0);
-+ 0,
-+ true);
- if (err < 0)
- goto out;
-
-@@ -605,10 +602,6 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
- goto out_err;
- }
-
-- err = mnt_want_write(path->mnt);
-- if (err)
-- goto out_err;
--
- idmap = mnt_idmap(path->mnt);
- if (S_ISDIR(d_inode(path->dentry)->i_mode)) {
- err = vfs_rmdir(idmap, d_inode(parent), path->dentry);
-@@ -619,7 +612,6 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
- if (err)
- ksmbd_debug(VFS, "unlink failed, err %d\n", err);
- }
-- mnt_drop_write(path->mnt);
-
- out_err:
- ksmbd_revert_fsids(work);
-@@ -665,16 +657,11 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
- goto out3;
- }
-
-- err = mnt_want_write(newpath.mnt);
-- if (err)
-- goto out3;
--
- err = vfs_link(oldpath.dentry, mnt_idmap(newpath.mnt),
- d_inode(newpath.dentry),
- dentry, NULL);
- if (err)
- ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
-- mnt_drop_write(newpath.mnt);
-
- out3:
- done_path_create(&newpath, dentry);
-@@ -924,18 +911,22 @@ ssize_t ksmbd_vfs_getxattr(struct mnt_idmap *idmap,
- * @attr_value: xattr value to set
- * @attr_size: size of xattr value
- * @flags: destination buffer length
-+ * @get_write: get write access to a mount
- *
- * Return: 0 on success, otherwise error
- */
- int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
- const struct path *path, const char *attr_name,
-- void *attr_value, size_t attr_size, int flags)
-+ void *attr_value, size_t attr_size, int flags,
-+ bool get_write)
- {
- int err;
-
-- err = mnt_want_write(path->mnt);
-- if (err)
-- return err;
-+ if (get_write == true) {
-+ err = mnt_want_write(path->mnt);
-+ if (err)
-+ return err;
-+ }
-
- err = vfs_setxattr(idmap,
- path->dentry,
-@@ -945,7 +936,8 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
- flags);
- if (err)
- ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
-- mnt_drop_write(path->mnt);
-+ if (get_write == true)
-+ mnt_drop_write(path->mnt);
- return err;
- }
-
-@@ -1268,6 +1260,13 @@ out1:
- }
-
- if (!err) {
-+ err = mnt_want_write(parent_path->mnt);
-+ if (err) {
-+ path_put(path);
-+ path_put(parent_path);
-+ return err;
-+ }
-+
- err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
- if (err) {
- path_put(path);
-@@ -1277,6 +1276,14 @@ out1:
- return err;
- }
-
-+void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path)
-+{
-+ inode_unlock(d_inode(parent_path->dentry));
-+ mnt_drop_write(parent_path->mnt);
-+ path_put(path);
-+ path_put(parent_path);
-+}
-+
- struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
- const char *name,
- unsigned int flags,
-@@ -1431,7 +1438,8 @@ out:
- int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
- struct mnt_idmap *idmap,
- const struct path *path,
-- struct smb_ntsd *pntsd, int len)
-+ struct smb_ntsd *pntsd, int len,
-+ bool get_write)
- {
- int rc;
- struct ndr sd_ndr = {0}, acl_ndr = {0};
-@@ -1491,7 +1499,7 @@ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
-
- rc = ksmbd_vfs_setxattr(idmap, path,
- XATTR_NAME_SD, sd_ndr.data,
-- sd_ndr.offset, 0);
-+ sd_ndr.offset, 0, get_write);
- if (rc < 0)
- pr_err("Failed to store XATTR ntacl :%d\n", rc);
-
-@@ -1580,7 +1588,8 @@ free_n_data:
-
- int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
- const struct path *path,
-- struct xattr_dos_attrib *da)
-+ struct xattr_dos_attrib *da,
-+ bool get_write)
- {
- struct ndr n;
- int err;
-@@ -1590,7 +1599,7 @@ int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
- return err;
-
- err = ksmbd_vfs_setxattr(idmap, path, XATTR_NAME_DOS_ATTRIBUTE,
-- (void *)n.data, n.offset, 0);
-+ (void *)n.data, n.offset, 0, get_write);
- if (err)
- ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
- kfree(n.data);
-@@ -1862,10 +1871,6 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
- }
- posix_state_to_acl(&acl_state, acls->a_entries);
-
-- rc = mnt_want_write(path->mnt);
-- if (rc)
-- goto out_err;
--
- rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
- if (rc < 0)
- ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
-@@ -1877,9 +1882,7 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
- ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
- rc);
- }
-- mnt_drop_write(path->mnt);
-
--out_err:
- free_acl_state(&acl_state);
- posix_acl_release(acls);
- return rc;
-@@ -1909,10 +1912,6 @@ int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap,
- }
- }
-
-- rc = mnt_want_write(path->mnt);
-- if (rc)
-- goto out_err;
--
- rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
- if (rc < 0)
- ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
-@@ -1924,9 +1923,7 @@ int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap,
- ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
- rc);
- }
-- mnt_drop_write(path->mnt);
-
--out_err:
- posix_acl_release(acls);
- return rc;
- }
-diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
-index 00968081856e3..cfe1c8092f230 100644
---- a/fs/smb/server/vfs.h
-+++ b/fs/smb/server/vfs.h
-@@ -109,7 +109,8 @@ ssize_t ksmbd_vfs_casexattr_len(struct mnt_idmap *idmap,
- int attr_name_len);
- int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
- const struct path *path, const char *attr_name,
-- void *attr_value, size_t attr_size, int flags);
-+ void *attr_value, size_t attr_size, int flags,
-+ bool get_write);
- int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
- size_t *xattr_stream_name_size, int s_type);
- int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
-@@ -117,6 +118,7 @@ int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
- int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
- unsigned int flags, struct path *parent_path,
- struct path *path, bool caseless);
-+void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path);
- struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
- const char *name,
- unsigned int flags,
-@@ -144,14 +146,16 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
- int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
- struct mnt_idmap *idmap,
- const struct path *path,
-- struct smb_ntsd *pntsd, int len);
-+ struct smb_ntsd *pntsd, int len,
-+ bool get_write);
- int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
- struct mnt_idmap *idmap,
- struct dentry *dentry,
- struct smb_ntsd **pntsd);
- int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
- const struct path *path,
-- struct xattr_dos_attrib *da);
-+ struct xattr_dos_attrib *da,
-+ bool get_write);
- int ksmbd_vfs_get_dos_attrib_xattr(struct mnt_idmap *idmap,
- struct dentry *dentry,
- struct xattr_dos_attrib *da);
-diff --git a/fs/stat.c b/fs/stat.c
-index d43a5cc1bfa46..5375be5f97ccf 100644
---- a/fs/stat.c
-+++ b/fs/stat.c
-@@ -133,7 +133,8 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
- idmap = mnt_idmap(path->mnt);
- if (inode->i_op->getattr)
- return inode->i_op->getattr(idmap, path, stat,
-- request_mask, query_flags);
-+ request_mask,
-+ query_flags | AT_GETATTR_NOSEC);
-
- generic_fillattr(idmap, request_mask, inode, stat);
- return 0;
-@@ -166,6 +167,9 @@ int vfs_getattr(const struct path *path, struct kstat *stat,
- {
- int retval;
-
-+ if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC))
-+ return -EPERM;
-+
- retval = security_inode_getattr(path);
- if (retval)
- return retval;
-diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
-index 8c8d64e76103e..efbdc47c74dcf 100644
---- a/fs/tracefs/event_inode.c
-+++ b/fs/tracefs/event_inode.c
-@@ -38,7 +38,10 @@ struct eventfs_inode {
- * @fop: file_operations for file or directory
- * @iop: inode_operations for file or directory
- * @data: something that the caller will want to get to later on
-+ * @is_freed: Flag set if the eventfs is on its way to be freed
- * @mode: the permission that the file or directory should have
-+ * @uid: saved uid if changed
-+ * @gid: saved gid if changed
- */
- struct eventfs_file {
- const char *name;
-@@ -50,22 +53,32 @@ struct eventfs_file {
- const struct inode_operations *iop;
- /*
- * Union - used for deletion
-- * @del_list: list of eventfs_file to delete
-+ * @llist: for calling dput() if needed after RCU
- * @rcu: eventfs_file to delete in RCU
-- * @is_freed: node is freed if one of the above is set
- */
- union {
-- struct list_head del_list;
-+ struct llist_node llist;
- struct rcu_head rcu;
-- unsigned long is_freed;
- };
- void *data;
-- umode_t mode;
-+ unsigned int is_freed:1;
-+ unsigned int mode:31;
-+ kuid_t uid;
-+ kgid_t gid;
- };
-
- static DEFINE_MUTEX(eventfs_mutex);
- DEFINE_STATIC_SRCU(eventfs_srcu);
-
-+/* Mode is unsigned short, use the upper bits for flags */
-+enum {
-+ EVENTFS_SAVE_MODE = BIT(16),
-+ EVENTFS_SAVE_UID = BIT(17),
-+ EVENTFS_SAVE_GID = BIT(18),
-+};
-+
-+#define EVENTFS_MODE_MASK (EVENTFS_SAVE_MODE - 1)
-+
- static struct dentry *eventfs_root_lookup(struct inode *dir,
- struct dentry *dentry,
- unsigned int flags);
-@@ -73,8 +86,53 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file);
- static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx);
- static int eventfs_release(struct inode *inode, struct file *file);
-
-+static void update_attr(struct eventfs_file *ef, struct iattr *iattr)
-+{
-+ unsigned int ia_valid = iattr->ia_valid;
-+
-+ if (ia_valid & ATTR_MODE) {
-+ ef->mode = (ef->mode & ~EVENTFS_MODE_MASK) |
-+ (iattr->ia_mode & EVENTFS_MODE_MASK) |
-+ EVENTFS_SAVE_MODE;
-+ }
-+ if (ia_valid & ATTR_UID) {
-+ ef->mode |= EVENTFS_SAVE_UID;
-+ ef->uid = iattr->ia_uid;
-+ }
-+ if (ia_valid & ATTR_GID) {
-+ ef->mode |= EVENTFS_SAVE_GID;
-+ ef->gid = iattr->ia_gid;
-+ }
-+}
-+
-+static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
-+ struct iattr *iattr)
-+{
-+ struct eventfs_file *ef;
-+ int ret;
-+
-+ mutex_lock(&eventfs_mutex);
-+ ef = dentry->d_fsdata;
-+ if (ef && ef->is_freed) {
-+ /* Do not allow changes if the event is about to be removed. */
-+ mutex_unlock(&eventfs_mutex);
-+ return -ENODEV;
-+ }
-+
-+ ret = simple_setattr(idmap, dentry, iattr);
-+ if (!ret && ef)
-+ update_attr(ef, iattr);
-+ mutex_unlock(&eventfs_mutex);
-+ return ret;
-+}
-+
- static const struct inode_operations eventfs_root_dir_inode_operations = {
- .lookup = eventfs_root_lookup,
-+ .setattr = eventfs_set_attr,
-+};
-+
-+static const struct inode_operations eventfs_file_inode_operations = {
-+ .setattr = eventfs_set_attr,
- };
-
- static const struct file_operations eventfs_file_operations = {
-@@ -85,10 +143,20 @@ static const struct file_operations eventfs_file_operations = {
- .release = eventfs_release,
- };
-
-+static void update_inode_attr(struct inode *inode, struct eventfs_file *ef)
-+{
-+ inode->i_mode = ef->mode & EVENTFS_MODE_MASK;
-+
-+ if (ef->mode & EVENTFS_SAVE_UID)
-+ inode->i_uid = ef->uid;
-+
-+ if (ef->mode & EVENTFS_SAVE_GID)
-+ inode->i_gid = ef->gid;
-+}
-+
- /**
- * create_file - create a file in the tracefs filesystem
-- * @name: the name of the file to create.
-- * @mode: the permission that the file should have.
-+ * @ef: the eventfs_file
- * @parent: parent dentry for this file.
- * @data: something that the caller will want to get to later on.
- * @fop: struct file_operations that should be used for this file.
-@@ -104,7 +172,7 @@ static const struct file_operations eventfs_file_operations = {
- * If tracefs is not enabled in the kernel, the value -%ENODEV will be
- * returned.
- */
--static struct dentry *create_file(const char *name, umode_t mode,
-+static struct dentry *create_file(struct eventfs_file *ef,
- struct dentry *parent, void *data,
- const struct file_operations *fop)
- {
-@@ -112,13 +180,13 @@ static struct dentry *create_file(const char *name, umode_t mode,
- struct dentry *dentry;
- struct inode *inode;
-
-- if (!(mode & S_IFMT))
-- mode |= S_IFREG;
-+ if (!(ef->mode & S_IFMT))
-+ ef->mode |= S_IFREG;
-
-- if (WARN_ON_ONCE(!S_ISREG(mode)))
-+ if (WARN_ON_ONCE(!S_ISREG(ef->mode)))
- return NULL;
-
-- dentry = eventfs_start_creating(name, parent);
-+ dentry = eventfs_start_creating(ef->name, parent);
-
- if (IS_ERR(dentry))
- return dentry;
-@@ -127,7 +195,10 @@ static struct dentry *create_file(const char *name, umode_t mode,
- if (unlikely(!inode))
- return eventfs_failed_creating(dentry);
-
-- inode->i_mode = mode;
-+ /* If the user updated the directory's attributes, use them */
-+ update_inode_attr(inode, ef);
-+
-+ inode->i_op = &eventfs_file_inode_operations;
- inode->i_fop = fop;
- inode->i_private = data;
-
-@@ -140,7 +211,7 @@ static struct dentry *create_file(const char *name, umode_t mode,
-
- /**
- * create_dir - create a dir in the tracefs filesystem
-- * @name: the name of the file to create.
-+ * @ei: the eventfs_inode that represents the directory to create
- * @parent: parent dentry for this file.
- * @data: something that the caller will want to get to later on.
- *
-@@ -155,13 +226,14 @@ static struct dentry *create_file(const char *name, umode_t mode,
- * If tracefs is not enabled in the kernel, the value -%ENODEV will be
- * returned.
- */
--static struct dentry *create_dir(const char *name, struct dentry *parent, void *data)
-+static struct dentry *create_dir(struct eventfs_file *ef,
-+ struct dentry *parent, void *data)
- {
- struct tracefs_inode *ti;
- struct dentry *dentry;
- struct inode *inode;
-
-- dentry = eventfs_start_creating(name, parent);
-+ dentry = eventfs_start_creating(ef->name, parent);
- if (IS_ERR(dentry))
- return dentry;
-
-@@ -169,7 +241,8 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
- if (unlikely(!inode))
- return eventfs_failed_creating(dentry);
-
-- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
-+ update_inode_attr(inode, ef);
-+
- inode->i_op = &eventfs_root_dir_inode_operations;
- inode->i_fop = &eventfs_file_operations;
- inode->i_private = data;
-@@ -184,6 +257,13 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
- return eventfs_end_creating(dentry);
- }
-
-+static void free_ef(struct eventfs_file *ef)
-+{
-+ kfree(ef->name);
-+ kfree(ef->ei);
-+ kfree(ef);
-+}
-+
- /**
- * eventfs_set_ef_status_free - set the ef->status to free
- * @ti: the tracefs_inode of the dentry
-@@ -194,59 +274,37 @@ static struct dentry *create_dir(const char *name, struct dentry *parent, void *
- */
- void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry)
- {
-- struct tracefs_inode *ti_parent;
- struct eventfs_inode *ei;
-- struct eventfs_file *ef, *tmp;
-+ struct eventfs_file *ef;
-
- /* The top level events directory may be freed by this */
- if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) {
-- LIST_HEAD(ef_del_list);
--
- mutex_lock(&eventfs_mutex);
--
- ei = ti->private;
-
-- /* Record all the top level files */
-- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
-- lockdep_is_held(&eventfs_mutex)) {
-- list_add_tail(&ef->del_list, &ef_del_list);
-- }
--
- /* Nothing should access this, but just in case! */
- ti->private = NULL;
--
- mutex_unlock(&eventfs_mutex);
-
-- /* Now safely free the top level files and their children */
-- list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
-- list_del(&ef->del_list);
-- eventfs_remove(ef);
-- }
--
-- kfree(ei);
-+ ef = dentry->d_fsdata;
-+ if (ef)
-+ free_ef(ef);
- return;
- }
-
- mutex_lock(&eventfs_mutex);
-
-- ti_parent = get_tracefs(dentry->d_parent->d_inode);
-- if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
-- goto out;
--
- ef = dentry->d_fsdata;
- if (!ef)
- goto out;
-
-- /*
-- * If ef was freed, then the LSB bit is set for d_fsdata.
-- * But this should not happen, as it should still have a
-- * ref count that prevents it. Warn in case it does.
-- */
-- if (WARN_ON_ONCE((unsigned long)ef & 1))
-- goto out;
-+ if (ef->is_freed) {
-+ free_ef(ef);
-+ } else {
-+ ef->dentry = NULL;
-+ }
-
- dentry->d_fsdata = NULL;
-- ef->dentry = NULL;
- out:
- mutex_unlock(&eventfs_mutex);
- }
-@@ -306,10 +364,9 @@ create_dentry(struct eventfs_file *ef, struct dentry *parent, bool lookup)
- inode_lock(parent->d_inode);
-
- if (ef->ei)
-- dentry = create_dir(ef->name, parent, ef->data);
-+ dentry = create_dir(ef, parent, ef->data);
- else
-- dentry = create_file(ef->name, ef->mode, parent,
-- ef->data, ef->fop);
-+ dentry = create_file(ef, parent, ef->data, ef->fop);
-
- if (!lookup)
- inode_unlock(parent->d_inode);
-@@ -475,6 +532,7 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
- if (d) {
- struct dentry **tmp;
-
-+
- tmp = krealloc(dentries, sizeof(d) * (cnt + 2), GFP_KERNEL);
- if (!tmp)
- break;
-@@ -549,13 +607,14 @@ static struct eventfs_file *eventfs_prepare_ef(const char *name, umode_t mode,
- return ERR_PTR(-ENOMEM);
- }
- INIT_LIST_HEAD(&ef->ei->e_top_files);
-+ ef->mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
- } else {
- ef->ei = NULL;
-+ ef->mode = mode;
- }
-
- ef->iop = iop;
- ef->fop = fop;
-- ef->mode = mode;
- ef->data = data;
- return ef;
- }
-@@ -772,25 +831,64 @@ int eventfs_add_file(const char *name, umode_t mode,
- return 0;
- }
-
--static void free_ef(struct rcu_head *head)
-+static LLIST_HEAD(free_list);
-+
-+static void eventfs_workfn(struct work_struct *work)
-+{
-+ struct eventfs_file *ef, *tmp;
-+ struct llist_node *llnode;
-+
-+ llnode = llist_del_all(&free_list);
-+ llist_for_each_entry_safe(ef, tmp, llnode, llist) {
-+ /* This should only get here if it had a dentry */
-+ if (!WARN_ON_ONCE(!ef->dentry))
-+ dput(ef->dentry);
-+ }
-+}
-+
-+static DECLARE_WORK(eventfs_work, eventfs_workfn);
-+
-+static void free_rcu_ef(struct rcu_head *head)
- {
- struct eventfs_file *ef = container_of(head, struct eventfs_file, rcu);
-
-- kfree(ef->name);
-- kfree(ef->ei);
-- kfree(ef);
-+ if (ef->dentry) {
-+ /* Do not free the ef until all references of dentry are gone */
-+ if (llist_add(&ef->llist, &free_list))
-+ queue_work(system_unbound_wq, &eventfs_work);
-+ return;
-+ }
-+
-+ free_ef(ef);
-+}
-+
-+static void unhook_dentry(struct dentry *dentry)
-+{
-+ if (!dentry)
-+ return;
-+ /*
-+ * Need to add a reference to the dentry that is expected by
-+ * simple_recursive_removal(), which will include a dput().
-+ */
-+ dget(dentry);
-+
-+ /*
-+ * Also add a reference for the dput() in eventfs_workfn().
-+ * That is required as that dput() will free the ei after
-+ * the SRCU grace period is over.
-+ */
-+ dget(dentry);
- }
-
- /**
- * eventfs_remove_rec - remove eventfs dir or file from list
- * @ef: eventfs_file to be removed.
-- * @head: to create list of eventfs_file to be deleted
- * @level: to check recursion depth
- *
- * The helper function eventfs_remove_rec() is used to clean up and free the
- * associated data from eventfs for both of the added functions.
- */
--static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head, int level)
-+static void eventfs_remove_rec(struct eventfs_file *ef, int level)
- {
- struct eventfs_file *ef_child;
-
-@@ -810,12 +908,16 @@ static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head,
- /* search for nested folders or files */
- list_for_each_entry_srcu(ef_child, &ef->ei->e_top_files, list,
- lockdep_is_held(&eventfs_mutex)) {
-- eventfs_remove_rec(ef_child, head, level + 1);
-+ eventfs_remove_rec(ef_child, level + 1);
- }
- }
-
-+ ef->is_freed = 1;
-+
-+ unhook_dentry(ef->dentry);
-+
- list_del_rcu(&ef->list);
-- list_add_tail(&ef->del_list, head);
-+ call_srcu(&eventfs_srcu, &ef->rcu, free_rcu_ef);
- }
-
- /**
-@@ -826,61 +928,22 @@ static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head,
- */
- void eventfs_remove(struct eventfs_file *ef)
- {
-- struct eventfs_file *tmp;
-- LIST_HEAD(ef_del_list);
-- struct dentry *dentry_list = NULL;
- struct dentry *dentry;
-
- if (!ef)
- return;
-
- mutex_lock(&eventfs_mutex);
-- eventfs_remove_rec(ef, &ef_del_list, 0);
-- list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
-- if (ef->dentry) {
-- unsigned long ptr = (unsigned long)dentry_list;
--
-- /* Keep the dentry from being freed yet */
-- dget(ef->dentry);
--
-- /*
-- * Paranoid: The dget() above should prevent the dentry
-- * from being freed and calling eventfs_set_ef_status_free().
-- * But just in case, set the link list LSB pointer to 1
-- * and have eventfs_set_ef_status_free() check that to
-- * make sure that if it does happen, it will not think
-- * the d_fsdata is an event_file.
-- *
-- * For this to work, no event_file should be allocated
-- * on a odd space, as the ef should always be allocated
-- * to be at least word aligned. Check for that too.
-- */
-- WARN_ON_ONCE(ptr & 1);
--
-- ef->dentry->d_fsdata = (void *)(ptr | 1);
-- dentry_list = ef->dentry;
-- ef->dentry = NULL;
-- }
-- call_srcu(&eventfs_srcu, &ef->rcu, free_ef);
-- }
-+ dentry = ef->dentry;
-+ eventfs_remove_rec(ef, 0);
- mutex_unlock(&eventfs_mutex);
-
-- while (dentry_list) {
-- unsigned long ptr;
--
-- dentry = dentry_list;
-- ptr = (unsigned long)dentry->d_fsdata & ~1UL;
-- dentry_list = (struct dentry *)ptr;
-- dentry->d_fsdata = NULL;
-- d_invalidate(dentry);
-- mutex_lock(&eventfs_mutex);
-- /* dentry should now have at least a single reference */
-- WARN_ONCE((int)d_count(dentry) < 1,
-- "dentry %p less than one reference (%d) after invalidate\n",
-- dentry, d_count(dentry));
-- mutex_unlock(&eventfs_mutex);
-- dput(dentry);
-- }
-+ /*
-+ * If any of the ei children has a dentry, then the ei itself
-+ * must have a dentry.
-+ */
-+ if (dentry)
-+ simple_recursive_removal(dentry, NULL);
- }
-
- /**
-@@ -891,6 +954,8 @@ void eventfs_remove(struct eventfs_file *ef)
- */
- void eventfs_remove_events_dir(struct dentry *dentry)
- {
-+ struct eventfs_file *ef_child;
-+ struct eventfs_inode *ei;
- struct tracefs_inode *ti;
-
- if (!dentry || !dentry->d_inode)
-@@ -900,6 +965,11 @@ void eventfs_remove_events_dir(struct dentry *dentry)
- if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
- return;
-
-- d_invalidate(dentry);
-- dput(dentry);
-+ mutex_lock(&eventfs_mutex);
-+ ei = ti->private;
-+ list_for_each_entry_srcu(ef_child, &ei->e_top_files, list,
-+ lockdep_is_held(&eventfs_mutex)) {
-+ eventfs_remove_rec(ef_child, 0);
-+ }
-+ mutex_unlock(&eventfs_mutex);
- }
-diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
-index 0e5dba2343ea1..e6609067ef261 100644
---- a/fs/xfs/xfs_inode_item_recover.c
-+++ b/fs/xfs/xfs_inode_item_recover.c
-@@ -369,24 +369,26 @@ xlog_recover_inode_commit_pass2(
- * superblock flag to determine whether we need to look at di_flushiter
- * to skip replay when the on disk inode is newer than the log one
- */
-- if (!xfs_has_v3inodes(mp) &&
-- ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
-- /*
-- * Deal with the wrap case, DI_MAX_FLUSH is less
-- * than smaller numbers
-- */
-- if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
-- ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
-- /* do nothing */
-- } else {
-- trace_xfs_log_recover_inode_skip(log, in_f);
-- error = 0;
-- goto out_release;
-+ if (!xfs_has_v3inodes(mp)) {
-+ if (ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
-+ /*
-+ * Deal with the wrap case, DI_MAX_FLUSH is less
-+ * than smaller numbers
-+ */
-+ if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
-+ ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
-+ /* do nothing */
-+ } else {
-+ trace_xfs_log_recover_inode_skip(log, in_f);
-+ error = 0;
-+ goto out_release;
-+ }
- }
-+
-+ /* Take the opportunity to reset the flush iteration count */
-+ ldip->di_flushiter = 0;
- }
-
-- /* Take the opportunity to reset the flush iteration count */
-- ldip->di_flushiter = 0;
-
- if (unlikely(S_ISREG(ldip->di_mode))) {
- if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
-diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
-index 254685085c825..0b7eab0ef7d7f 100644
---- a/include/acpi/acpi_bus.h
-+++ b/include/acpi/acpi_bus.h
-@@ -539,6 +539,7 @@ int acpi_device_set_power(struct acpi_device *device, int state);
- int acpi_bus_init_power(struct acpi_device *device);
- int acpi_device_fix_up_power(struct acpi_device *device);
- void acpi_device_fix_up_power_extended(struct acpi_device *adev);
-+void acpi_device_fix_up_power_children(struct acpi_device *adev);
- int acpi_bus_update_power(acpi_handle handle, int *state_p);
- int acpi_device_update_power(struct acpi_device *device, int *state_p);
- bool acpi_bus_power_manageable(acpi_handle handle);
-diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
-index 3c8bba9f1114a..be1dd4c1a9174 100644
---- a/include/acpi/ghes.h
-+++ b/include/acpi/ghes.h
-@@ -73,8 +73,12 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
- void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
-
- struct list_head *ghes_get_devices(void);
-+
-+void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
- #else
- static inline struct list_head *ghes_get_devices(void) { return NULL; }
-+
-+static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
- #endif
-
- int ghes_estatus_pool_init(unsigned int num_ghes);
-diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
-index 05100e91ecb96..6fc9bb2979e45 100644
---- a/include/drm/bridge/samsung-dsim.h
-+++ b/include/drm/bridge/samsung-dsim.h
-@@ -53,6 +53,7 @@ struct samsung_dsim_driver_data {
- unsigned int plltmr_reg;
- unsigned int has_freqband:1;
- unsigned int has_clklane_stop:1;
-+ unsigned int has_broken_fifoctrl_emptyhdr:1;
- unsigned int num_clks;
- unsigned int min_freq;
- unsigned int max_freq;
-diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
-index 446394f846064..6ad02ad9c7b42 100644
---- a/include/linux/amd-pstate.h
-+++ b/include/linux/amd-pstate.h
-@@ -70,6 +70,10 @@ struct amd_cpudata {
- u32 nominal_perf;
- u32 lowest_nonlinear_perf;
- u32 lowest_perf;
-+ u32 min_limit_perf;
-+ u32 max_limit_perf;
-+ u32 min_limit_freq;
-+ u32 max_limit_freq;
-
- u32 max_freq;
- u32 min_freq;
-diff --git a/include/linux/bpf.h b/include/linux/bpf.h
-index 49f8b691496c4..392f581af2cee 100644
---- a/include/linux/bpf.h
-+++ b/include/linux/bpf.h
-@@ -903,10 +903,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
- aux->ctx_field_size = size;
- }
-
-+static bool bpf_is_ldimm64(const struct bpf_insn *insn)
-+{
-+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
-+}
-+
- static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
- {
-- return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
-- insn->src_reg == BPF_PSEUDO_FUNC;
-+ return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
- }
-
- struct bpf_prog_ops {
-@@ -1029,6 +1033,11 @@ struct btf_func_model {
- */
- #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
-
-+/* Indicate that current trampoline is in a tail call context. Then, it has to
-+ * cache and restore tail_call_cnt to avoid infinite tail call loop.
-+ */
-+#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
-+
- /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
- * bytes on x86.
- */
-diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
-index ec32ec58c59f7..ace3a4ce2fc98 100644
---- a/include/linux/clk-provider.h
-+++ b/include/linux/clk-provider.h
-@@ -74,7 +74,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core,
- unsigned long parent_rate);
-
- /**
-- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
-+ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
- *
- * @num: Numerator of the duty cycle ratio
- * @den: Denominator of the duty cycle ratio
-@@ -129,7 +129,7 @@ struct clk_duty {
- * @restore_context: Restore the context of the clock after a restoration
- * of power.
- *
-- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
-+ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
- * parent rate is an input parameter. It is up to the caller to
- * ensure that the prepare_mutex is held across this call. If the
- * driver cannot figure out a rate for this clock, it must return
-@@ -456,7 +456,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
- * clock with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
-- * @parent_name: name of clock's parent
-+ * @parent_data: name of clock's parent
- * @flags: framework-specific flags
- * @fixed_rate: non-adjustable clock rate
- * @fixed_accuracy: non-adjustable clock accuracy
-@@ -471,7 +471,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
- * the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
-- * @parent_name: name of clock's parent
-+ * @parent_data: name of clock's parent
- * @flags: framework-specific flags
- * @fixed_rate: non-adjustable clock rate
- */
-@@ -649,7 +649,7 @@ struct clk_div_table {
- * Clock with an adjustable divider affecting its output frequency. Implements
- * .recalc_rate, .set_rate and .round_rate
- *
-- * Flags:
-+ * @flags:
- * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
- * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
- * the raw value read from the register, with the value of zero considered
-@@ -1130,11 +1130,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
- * @mwidth: width of the numerator bit field
- * @nshift: shift to the denominator bit field
- * @nwidth: width of the denominator bit field
-+ * @approximation: clk driver's callback for calculating the divider clock
- * @lock: register lock
- *
- * Clock with adjustable fractional divider affecting its output frequency.
- *
-- * Flags:
-+ * @flags:
- * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
- * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
- * is set then the numerator and denominator are both the value read
-@@ -1191,7 +1192,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
- * Clock with an adjustable multiplier affecting its output frequency.
- * Implements .recalc_rate, .set_rate and .round_rate
- *
-- * Flags:
-+ * @flags:
- * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
- * from the register, with 0 being a valid value effectively
- * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
-diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
-index 068f7738be22a..28c1d3d77b70f 100644
---- a/include/linux/cpuhotplug.h
-+++ b/include/linux/cpuhotplug.h
-@@ -189,6 +189,7 @@ enum cpuhp_state {
- /* Must be the last timer callback */
- CPUHP_AP_DUMMY_TIMER_STARTING,
- CPUHP_AP_ARM_XEN_STARTING,
-+ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
- CPUHP_AP_ARM_CORESIGHT_STARTING,
- CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
- CPUHP_AP_ARM64_ISNDEP_STARTING,
-diff --git a/include/linux/damon.h b/include/linux/damon.h
-index ae2664d1d5f1d..c70cca8a839f7 100644
---- a/include/linux/damon.h
-+++ b/include/linux/damon.h
-@@ -642,6 +642,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
- return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
- }
-
-+static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
-+{
-+ /* {aggr,sample}_interval are unsigned long, hence could overflow */
-+ return min(attrs->aggr_interval / attrs->sample_interval,
-+ (unsigned long)UINT_MAX);
-+}
-+
-
- int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
- int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
-diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
-index ebe78bd3d121d..b3772edca2e6e 100644
---- a/include/linux/dma-fence.h
-+++ b/include/linux/dma-fence.h
-@@ -498,6 +498,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
- return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
- }
-
-+/**
-+ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
-+ * @f1: the first fence from the same context
-+ * @f2: the second fence from the same context
-+ *
-+ * Returns true if f1 is chronologically later than f2 or the same fence. Both
-+ * fences must be from the same context, since a seqno is not re-used across
-+ * contexts.
-+ */
-+static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
-+ struct dma_fence *f2)
-+{
-+ return f1 == f2 || dma_fence_is_later(f1, f2);
-+}
-+
- /**
- * dma_fence_later - return the chronologically later fence
- * @f1: the first fence from the same context
-diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
-index 62b61527bcc4f..1b523fd48586f 100644
---- a/include/linux/ethtool.h
-+++ b/include/linux/ethtool.h
-@@ -1045,10 +1045,10 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
-
- /**
- * ethtool_sprintf - Write formatted string to ethtool string data
-- * @data: Pointer to start of string to update
-+ * @data: Pointer to a pointer to the start of string to update
- * @fmt: Format of string to write
- *
-- * Write formatted string to data. Update data to point at start of
-+ * Write formatted string to *data. Update *data to point at start of
- * next string.
- */
- extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
-diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
-index a82a4bb6ce68b..cf1adceb02697 100644
---- a/include/linux/f2fs_fs.h
-+++ b/include/linux/f2fs_fs.h
-@@ -104,6 +104,7 @@ enum f2fs_error {
- ERROR_CORRUPTED_VERITY_XATTR,
- ERROR_CORRUPTED_XATTR,
- ERROR_INVALID_NODE_REFERENCE,
-+ ERROR_INCONSISTENT_NAT,
- ERROR_MAX,
- };
-
-diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
-index 107613f7d7920..f6cd0f909d9fb 100644
---- a/include/linux/generic-radix-tree.h
-+++ b/include/linux/generic-radix-tree.h
-@@ -38,6 +38,7 @@
-
- #include <asm/page.h>
- #include <linux/bug.h>
-+#include <linux/limits.h>
- #include <linux/log2.h>
- #include <linux/math.h>
- #include <linux/types.h>
-@@ -184,6 +185,12 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
- static inline void __genradix_iter_advance(struct genradix_iter *iter,
- size_t obj_size)
- {
-+ if (iter->offset + obj_size < iter->offset) {
-+ iter->offset = SIZE_MAX;
-+ iter->pos = SIZE_MAX;
-+ return;
-+ }
-+
- iter->offset += obj_size;
-
- if (!is_power_of_2(obj_size) &&
-diff --git a/include/linux/hid.h b/include/linux/hid.h
-index 964ca1f15e3f6..3b08a29572298 100644
---- a/include/linux/hid.h
-+++ b/include/linux/hid.h
-@@ -679,6 +679,7 @@ struct hid_device { /* device report descriptor */
- struct list_head debug_list;
- spinlock_t debug_list_lock;
- wait_queue_head_t debug_wait;
-+ struct kref ref;
-
- unsigned int id; /* system unique id */
-
-@@ -687,6 +688,8 @@ struct hid_device { /* device report descriptor */
- #endif /* CONFIG_BPF */
- };
-
-+void hiddev_free(struct kref *ref);
-+
- #define to_hid_device(pdev) \
- container_of(pdev, struct hid_device, dev)
-
-diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
-index 39fbfb4be944b..9da4f3f1e6d61 100644
---- a/include/linux/hisi_acc_qm.h
-+++ b/include/linux/hisi_acc_qm.h
-@@ -144,6 +144,13 @@ enum qm_vf_state {
- QM_NOT_READY,
- };
-
-+enum qm_misc_ctl_bits {
-+ QM_DRIVER_REMOVING = 0x0,
-+ QM_RST_SCHED,
-+ QM_RESETTING,
-+ QM_MODULE_PARAM,
-+};
-+
- enum qm_cap_bits {
- QM_SUPPORT_DB_ISOLATION = 0x0,
- QM_SUPPORT_FUNC_QOS,
-diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
-index 8a3115516a1ba..136e9842120e8 100644
---- a/include/linux/hw_random.h
-+++ b/include/linux/hw_random.h
-@@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng);
- extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
-
- extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
-+extern long hwrng_yield(struct hwrng *rng);
-
- #endif /* LINUX_HWRANDOM_H_ */
-diff --git a/include/linux/idr.h b/include/linux/idr.h
-index a0dce14090a9e..da5f5fa4a3a6a 100644
---- a/include/linux/idr.h
-+++ b/include/linux/idr.h
-@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
- */
- #define idr_for_each_entry_ul(idr, entry, tmp, id) \
- for (tmp = 0, id = 0; \
-- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
-+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
- tmp = id, ++id)
-
- /**
-@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
- * @id: Entry ID.
- *
- * Continue to iterate over entries, continuing after the current position.
-+ * After normal termination @entry is left with the value NULL. This
-+ * is convenient for a "not found" value.
- */
- #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
- for (tmp = id; \
-- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
-+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
- tmp = id, ++id)
-
- /*
-diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
-index 13d19b9be9f4a..5fd664fb71c86 100644
---- a/include/linux/io_uring_types.h
-+++ b/include/linux/io_uring_types.h
-@@ -327,6 +327,9 @@ struct io_ring_ctx {
-
- struct list_head io_buffers_cache;
-
-+ /* deferred free list, protected by ->uring_lock */
-+ struct hlist_head io_buf_list;
-+
- /* Keep this last, we don't need it for the fast path */
- struct wait_queue_head poll_wq;
- struct io_restriction restrictions;
-diff --git a/include/linux/iommu.h b/include/linux/iommu.h
-index c50a769d569a6..0225cf7445de2 100644
---- a/include/linux/iommu.h
-+++ b/include/linux/iommu.h
-@@ -703,6 +703,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
- dev->iommu->priv = priv;
- }
-
-+extern struct mutex iommu_probe_device_lock;
- int iommu_probe_device(struct device *dev);
-
- int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
-diff --git a/include/linux/irq.h b/include/linux/irq.h
-index d8a6fdce93738..90081afa10ce5 100644
---- a/include/linux/irq.h
-+++ b/include/linux/irq.h
-@@ -215,8 +215,6 @@ struct irq_data {
- * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
- * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
- * IRQD_CAN_RESERVE - Can use reservation mode
-- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
-- * required
- * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
- * from actual interrupt context.
- * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
-@@ -247,11 +245,10 @@ enum {
- IRQD_SINGLE_TARGET = BIT(24),
- IRQD_DEFAULT_TRIGGER_SET = BIT(25),
- IRQD_CAN_RESERVE = BIT(26),
-- IRQD_MSI_NOMASK_QUIRK = BIT(27),
-- IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28),
-- IRQD_AFFINITY_ON_ACTIVATE = BIT(29),
-- IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30),
-- IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31),
-+ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
-+ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
-+ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
-+ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
- };
-
- #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
-@@ -426,21 +423,6 @@ static inline bool irqd_can_reserve(struct irq_data *d)
- return __irqd_to_state(d) & IRQD_CAN_RESERVE;
- }
-
--static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
--{
-- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
--}
--
--static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
--{
-- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
--}
--
--static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
--{
-- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
--}
--
- static inline void irqd_set_affinity_on_activate(struct irq_data *d)
- {
- __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
-diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
-index ac962c4cb44b1..2b8d85aae0832 100644
---- a/include/linux/lsm_hook_defs.h
-+++ b/include/linux/lsm_hook_defs.h
-@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
- LSM_HOOK(int, 0, syslog, int type)
- LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
- const struct timezone *tz)
--LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
-+LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
- LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
- LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
- LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
-@@ -273,7 +273,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
- LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
- LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
- LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
--LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
-+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
- u32 *ctxlen)
-
- #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
-diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
-index 47e7a3a61ce69..e8bcad641d8c2 100644
---- a/include/linux/mfd/core.h
-+++ b/include/linux/mfd/core.h
-@@ -92,7 +92,7 @@ struct mfd_cell {
- * (above) when matching OF nodes with devices that have identical
- * compatible strings
- */
-- const u64 of_reg;
-+ u64 of_reg;
-
- /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
- bool use_of_reg;
-diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
-index daa2f40d9ce65..7b12eebc5586d 100644
---- a/include/linux/mmc/card.h
-+++ b/include/linux/mmc/card.h
-@@ -295,7 +295,9 @@ struct mmc_card {
- #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
- #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
- #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
-+#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
-
-+ bool written_flag; /* Indicates eMMC has been written since power on */
- bool reenable_cmdq; /* Re-enable Command Queue */
-
- unsigned int erase_size; /* erase size in sectors */
-diff --git a/include/linux/msi.h b/include/linux/msi.h
-index a50ea79522f85..ddace8c34dcf9 100644
---- a/include/linux/msi.h
-+++ b/include/linux/msi.h
-@@ -547,12 +547,6 @@ enum {
- MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
- /* Free MSI descriptors */
- MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
-- /*
-- * Quirk to handle MSI implementations which do not provide
-- * masking. Currently known to affect x86, but has to be partially
-- * handled in the core MSI code.
-- */
-- MSI_FLAG_NOMASK_QUIRK = (1 << 7),
-
- /* Mask for the generic functionality */
- MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 0896aaa91dd7b..b8e60a20416ba 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -1774,6 +1774,13 @@ enum netdev_ml_priv_type {
- ML_PRIV_CAN,
- };
-
-+enum netdev_stat_type {
-+ NETDEV_PCPU_STAT_NONE,
-+ NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
-+ NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
-+ NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
-+};
-+
- /**
- * struct net_device - The DEVICE structure.
- *
-@@ -1968,10 +1975,14 @@ enum netdev_ml_priv_type {
- *
- * @ml_priv: Mid-layer private
- * @ml_priv_type: Mid-layer private type
-- * @lstats: Loopback statistics
-- * @tstats: Tunnel statistics
-- * @dstats: Dummy statistics
-- * @vstats: Virtual ethernet statistics
-+ *
-+ * @pcpu_stat_type: Type of device statistics which the core should
-+ * allocate/free: none, lstats, tstats, dstats. none
-+ * means the driver is handling statistics allocation/
-+ * freeing internally.
-+ * @lstats: Loopback statistics: packets, bytes
-+ * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
-+ * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
- *
- * @garp_port: GARP
- * @mrp_port: MRP
-@@ -2328,6 +2339,7 @@ struct net_device {
- void *ml_priv;
- enum netdev_ml_priv_type ml_priv_type;
-
-+ enum netdev_stat_type pcpu_stat_type:8;
- union {
- struct pcpu_lstats __percpu *lstats;
- struct pcpu_sw_netstats __percpu *tstats;
-@@ -2725,6 +2737,16 @@ struct pcpu_sw_netstats {
- struct u64_stats_sync syncp;
- } __aligned(4 * sizeof(u64));
-
-+struct pcpu_dstats {
-+ u64 rx_packets;
-+ u64 rx_bytes;
-+ u64 rx_drops;
-+ u64 tx_packets;
-+ u64 tx_bytes;
-+ u64 tx_drops;
-+ struct u64_stats_sync syncp;
-+} __aligned(8 * sizeof(u64));
-+
- struct pcpu_lstats {
- u64_stats_t packets;
- u64_stats_t bytes;
-@@ -5214,5 +5236,6 @@ extern struct net_device *blackhole_netdev;
- #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
- #define DEV_STATS_ADD(DEV, FIELD, VAL) \
- atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
-+#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
-
- #endif /* _LINUX_NETDEVICE_H */
-diff --git a/include/linux/numa.h b/include/linux/numa.h
-index 59df211d051fa..a904861de8000 100644
---- a/include/linux/numa.h
-+++ b/include/linux/numa.h
-@@ -12,6 +12,7 @@
- #define MAX_NUMNODES (1 << NODES_SHIFT)
-
- #define NUMA_NO_NODE (-1)
-+#define NUMA_NO_MEMBLK (-1)
-
- /* optionally keep NUMA memory info available post init */
- #ifdef CONFIG_NUMA_KEEP_MEMINFO
-@@ -25,7 +26,7 @@
- #include <asm/sparsemem.h>
-
- /* Generic implementation available */
--int numa_map_to_online_node(int node);
-+int numa_nearest_node(int node, unsigned int state);
-
- #ifndef memory_add_physaddr_to_nid
- static inline int memory_add_physaddr_to_nid(u64 start)
-@@ -43,11 +44,18 @@ static inline int phys_to_target_node(u64 start)
- return 0;
- }
- #endif
-+#ifndef numa_fill_memblks
-+static inline int __init numa_fill_memblks(u64 start, u64 end)
-+{
-+ return NUMA_NO_MEMBLK;
-+}
-+#endif
- #else /* !CONFIG_NUMA */
--static inline int numa_map_to_online_node(int node)
-+static inline int numa_nearest_node(int node, unsigned int state)
- {
- return NUMA_NO_NODE;
- }
-+
- static inline int memory_add_physaddr_to_nid(u64 start)
- {
- return 0;
-@@ -58,6 +66,8 @@ static inline int phys_to_target_node(u64 start)
- }
- #endif
-
-+#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
-+
- #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
- extern const struct attribute_group arch_node_dev_group;
- #endif
-diff --git a/include/linux/objtool.h b/include/linux/objtool.h
-index 03f82c2c2ebf6..b5440e7da55bf 100644
---- a/include/linux/objtool.h
-+++ b/include/linux/objtool.h
-@@ -130,7 +130,8 @@
- * it will be ignored.
- */
- .macro VALIDATE_UNRET_BEGIN
--#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
-+#if defined(CONFIG_NOINSTR_VALIDATION) && \
-+ (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
- .Lhere_\@:
- .pushsection .discard.validate_unret
- .long .Lhere_\@ - .
-diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
-index 351c3b7f93a14..8c9608b217b00 100644
---- a/include/linux/pagemap.h
-+++ b/include/linux/pagemap.h
-@@ -204,6 +204,8 @@ enum mapping_flags {
- AS_NO_WRITEBACK_TAGS = 5,
- AS_LARGE_FOLIO_SUPPORT = 6,
- AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
-+ AS_STABLE_WRITES, /* must wait for writeback before modifying
-+ folio contents */
- };
-
- /**
-@@ -289,6 +291,21 @@ static inline void mapping_clear_release_always(struct address_space *mapping)
- clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
- }
-
-+static inline bool mapping_stable_writes(const struct address_space *mapping)
-+{
-+ return test_bit(AS_STABLE_WRITES, &mapping->flags);
-+}
-+
-+static inline void mapping_set_stable_writes(struct address_space *mapping)
-+{
-+ set_bit(AS_STABLE_WRITES, &mapping->flags);
-+}
-+
-+static inline void mapping_clear_stable_writes(struct address_space *mapping)
-+{
-+ clear_bit(AS_STABLE_WRITES, &mapping->flags);
-+}
-+
- static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
- {
- return mapping->gfp_mask;
-diff --git a/include/linux/pci.h b/include/linux/pci.h
-index 8c7c2c3c6c652..b56417276042d 100644
---- a/include/linux/pci.h
-+++ b/include/linux/pci.h
-@@ -1624,6 +1624,8 @@ struct msix_entry {
- u16 entry; /* Driver uses to specify entry, OS writes */
- };
-
-+struct msi_domain_template;
-+
- #ifdef CONFIG_PCI_MSI
- int pci_msi_vec_count(struct pci_dev *dev);
- void pci_disable_msi(struct pci_dev *dev);
-@@ -1656,6 +1658,11 @@ void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
- void pci_free_irq_vectors(struct pci_dev *dev);
- int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
- const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
-+bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
-+ unsigned int hwsize, void *data);
-+struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
-+ const struct irq_affinity_desc *affdesc);
-+void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
-
- #else
- static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-@@ -1719,6 +1726,25 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
- {
- return cpu_possible_mask;
- }
-+
-+static inline bool pci_create_ims_domain(struct pci_dev *pdev,
-+ const struct msi_domain_template *template,
-+ unsigned int hwsize, void *data)
-+{ return false; }
-+
-+static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
-+ union msi_instance_cookie *icookie,
-+ const struct irq_affinity_desc *affdesc)
-+{
-+ struct msi_map map = { .index = -ENOSYS, };
-+
-+ return map;
-+}
-+
-+static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
-+{
-+}
-+
- #endif
-
- /**
-@@ -2616,14 +2642,6 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
- void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
- #endif
-
--struct msi_domain_template;
--
--bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
-- unsigned int hwsize, void *data);
--struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
-- const struct irq_affinity_desc *affdesc);
--void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
--
- #include <linux/dma-mapping.h>
-
- #define pci_printk(level, pdev, fmt, arg...) \
-diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
-index 5fb3d4c393a9e..fe4a3589bb3fd 100644
---- a/include/linux/pci_ids.h
-+++ b/include/linux/pci_ids.h
-@@ -180,6 +180,8 @@
- #define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
- #define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
-
-+#define PCI_VENDOR_ID_ITTIM 0x0b48
-+
- #define PCI_VENDOR_ID_COMPAQ 0x0e11
- #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
- #define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
-@@ -579,6 +581,7 @@
- #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
- #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
- #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
-+#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
- #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
- #define PCI_DEVICE_ID_AMD_LANCE 0x2000
- #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
-diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index 7b5406e3288d9..e846f87e2d099 100644
---- a/include/linux/perf_event.h
-+++ b/include/linux/perf_event.h
-@@ -843,11 +843,11 @@ struct perf_event {
- };
-
- /*
-- * ,-----------------------[1:n]----------------------.
-- * V V
-- * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
-- * ^ ^ | |
-- * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-'
-+ * ,-----------------------[1:n]------------------------.
-+ * V V
-+ * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
-+ * | |
-+ * `--[n:1]-> pmu <-[1:n]--'
- *
- *
- * struct perf_event_pmu_context lifetime is refcount based and RCU freed
-@@ -865,6 +865,9 @@ struct perf_event {
- * ctx->mutex pinning the configuration. Since we hold a reference on
- * group_leader (through the filedesc) it can't go away, therefore it's
- * associated pmu_ctx must exist and cannot change due to ctx->mutex.
-+ *
-+ * perf_event holds a refcount on perf_event_context
-+ * perf_event holds a refcount on perf_event_pmu_context
- */
- struct perf_event_pmu_context {
- struct pmu *pmu;
-@@ -879,6 +882,7 @@ struct perf_event_pmu_context {
- unsigned int embedded : 1;
-
- unsigned int nr_events;
-+ unsigned int nr_cgroups;
-
- atomic_t refcount; /* event <-> epc */
- struct rcu_head rcu_head;
-diff --git a/include/linux/pm.h b/include/linux/pm.h
-index 1400c37b29c75..629c1633bbd00 100644
---- a/include/linux/pm.h
-+++ b/include/linux/pm.h
-@@ -374,24 +374,39 @@ const struct dev_pm_ops name = { \
- RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
- }
-
--#ifdef CONFIG_PM
--#define _EXPORT_DEV_PM_OPS(name, license, ns) \
-+#define _EXPORT_PM_OPS(name, license, ns) \
- const struct dev_pm_ops name; \
- __EXPORT_SYMBOL(name, license, ns); \
- const struct dev_pm_ops name
--#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
--#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
--#else
--#define _EXPORT_DEV_PM_OPS(name, license, ns) \
-+
-+#define _DISCARD_PM_OPS(name, license, ns) \
- static __maybe_unused const struct dev_pm_ops __static_##name
-+
-+#ifdef CONFIG_PM
-+#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
-+#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
-+#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
-+#else
-+#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
- #define EXPORT_PM_FN_GPL(name)
- #define EXPORT_PM_FN_NS_GPL(name, ns)
- #endif
-
--#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
--#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
--#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
--#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
-+#ifdef CONFIG_PM_SLEEP
-+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
-+#else
-+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
-+#endif
-+
-+#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
-+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
-+#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
-+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
-+
-+#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "")
-+#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "")
-+#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns)
-+#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns)
-
- /*
- * Use this if you want to use the same suspend and resume callbacks for suspend
-@@ -404,19 +419,19 @@ const struct dev_pm_ops name = { \
- _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
-
- #define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-- EXPORT_DEV_PM_OPS(name) = { \
-+ EXPORT_DEV_SLEEP_PM_OPS(name) = { \
- SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- }
- #define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-- EXPORT_GPL_DEV_PM_OPS(name) = { \
-+ EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \
- SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- }
- #define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
-- EXPORT_NS_DEV_PM_OPS(name, ns) = { \
-+ EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \
- SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- }
- #define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
-- EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
-+ EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \
- SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- }
-
-diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
-index a427f13c757f4..85b86768c0b91 100644
---- a/include/linux/power_supply.h
-+++ b/include/linux/power_supply.h
-@@ -767,7 +767,7 @@ struct power_supply_battery_info {
- int bti_resistance_tolerance;
- };
-
--extern struct atomic_notifier_head power_supply_notifier;
-+extern struct blocking_notifier_head power_supply_notifier;
- extern int power_supply_reg_notifier(struct notifier_block *nb);
- extern void power_supply_unreg_notifier(struct notifier_block *nb);
- #if IS_ENABLED(CONFIG_POWER_SUPPLY)
-diff --git a/include/linux/preempt.h b/include/linux/preempt.h
-index 1424670df161d..9aa6358a1a16b 100644
---- a/include/linux/preempt.h
-+++ b/include/linux/preempt.h
-@@ -99,14 +99,21 @@ static __always_inline unsigned char interrupt_context_level(void)
- return level;
- }
-
-+/*
-+ * These macro definitions avoid redundant invocations of preempt_count()
-+ * because such invocations would result in redundant loads given that
-+ * preempt_count() is commonly implemented with READ_ONCE().
-+ */
-+
- #define nmi_count() (preempt_count() & NMI_MASK)
- #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
- #ifdef CONFIG_PREEMPT_RT
- # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
-+# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
- #else
- # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-+# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
- #endif
--#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
-
- /*
- * Macros to retrieve the current execution context:
-@@ -119,7 +126,11 @@ static __always_inline unsigned char interrupt_context_level(void)
- #define in_nmi() (nmi_count())
- #define in_hardirq() (hardirq_count())
- #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
--#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
-+#ifdef CONFIG_PREEMPT_RT
-+# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
-+#else
-+# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
-+#endif
-
- /*
- * The following macros are deprecated and should not be used in new code:
-diff --git a/include/linux/pwm.h b/include/linux/pwm.h
-index d2f9f690a9c14..fe0f38ce1bdee 100644
---- a/include/linux/pwm.h
-+++ b/include/linux/pwm.h
-@@ -41,8 +41,8 @@ struct pwm_args {
- };
-
- enum {
-- PWMF_REQUESTED = 1 << 0,
-- PWMF_EXPORTED = 1 << 1,
-+ PWMF_REQUESTED = 0,
-+ PWMF_EXPORTED = 1,
- };
-
- /*
-diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
-index 0ee96ea7a0e90..1b37fa8fc723d 100644
---- a/include/linux/sched/coredump.h
-+++ b/include/linux/sched/coredump.h
-@@ -91,4 +91,14 @@ static inline int get_dumpable(struct mm_struct *mm)
- MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK)
-
- #define MMF_VM_MERGE_ANY 29
-+#define MMF_HAS_MDWE_NO_INHERIT 30
-+
-+static inline unsigned long mmf_init_flags(unsigned long flags)
-+{
-+ if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
-+ flags &= ~((1UL << MMF_HAS_MDWE) |
-+ (1UL << MMF_HAS_MDWE_NO_INHERIT));
-+ return flags & MMF_INIT_MASK;
-+}
-+
- #endif /* _LINUX_SCHED_COREDUMP_H */
-diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
-index c1637515a8a41..c953b8c0d2f43 100644
---- a/include/linux/skmsg.h
-+++ b/include/linux/skmsg.h
-@@ -106,6 +106,7 @@ struct sk_psock {
- struct mutex work_mutex;
- struct sk_psock_work_state work_state;
- struct delayed_work work;
-+ struct sock *sk_pair;
- struct rcu_work rwork;
- };
-
-diff --git a/include/linux/socket.h b/include/linux/socket.h
-index 39b74d83c7c4a..cfcb7e2c3813f 100644
---- a/include/linux/socket.h
-+++ b/include/linux/socket.h
-@@ -383,6 +383,7 @@ struct ucred {
- #define SOL_MPTCP 284
- #define SOL_MCTP 285
- #define SOL_SMC 286
-+#define SOL_VSOCK 287
-
- /* IPX options */
- #define IPX_TYPE 1
-diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
-index 7f8b478fdeb3d..8cc7a99927f95 100644
---- a/include/linux/spi/spi.h
-+++ b/include/linux/spi/spi.h
-@@ -566,6 +566,7 @@ struct spi_controller {
- #define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
- #define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
- #define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
-+#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
-
- /* Flag indicating if the allocation of this struct is devres-managed */
- bool devm_allocated;
-diff --git a/include/linux/string.h b/include/linux/string.h
-index dbfc66400050f..5077776e995e0 100644
---- a/include/linux/string.h
-+++ b/include/linux/string.h
-@@ -5,7 +5,9 @@
- #include <linux/compiler.h> /* for inline */
- #include <linux/types.h> /* for size_t */
- #include <linux/stddef.h> /* for NULL */
-+#include <linux/err.h> /* for ERR_PTR() */
- #include <linux/errno.h> /* for E2BIG */
-+#include <linux/overflow.h> /* for check_mul_overflow() */
- #include <linux/stdarg.h>
- #include <uapi/linux/string.h>
-
-@@ -14,6 +16,44 @@ extern void *memdup_user(const void __user *, size_t);
- extern void *vmemdup_user(const void __user *, size_t);
- extern void *memdup_user_nul(const void __user *, size_t);
-
-+/**
-+ * memdup_array_user - duplicate array from user space
-+ * @src: source address in user space
-+ * @n: number of array members to copy
-+ * @size: size of one array member
-+ *
-+ * Return: an ERR_PTR() on failure. Result is physically
-+ * contiguous, to be freed by kfree().
-+ */
-+static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
-+{
-+ size_t nbytes;
-+
-+ if (check_mul_overflow(n, size, &nbytes))
-+ return ERR_PTR(-EOVERFLOW);
-+
-+ return memdup_user(src, nbytes);
-+}
-+
-+/**
-+ * vmemdup_array_user - duplicate array from user space
-+ * @src: source address in user space
-+ * @n: number of array members to copy
-+ * @size: size of one array member
-+ *
-+ * Return: an ERR_PTR() on failure. Result may be not
-+ * physically contiguous. Use kvfree() to free.
-+ */
-+static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
-+{
-+ size_t nbytes;
-+
-+ if (check_mul_overflow(n, size, &nbytes))
-+ return ERR_PTR(-EOVERFLOW);
-+
-+ return vmemdup_user(src, nbytes);
-+}
-+
- /*
- * Include machine specific inline routines
- */
-@@ -277,10 +317,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
- */
- #define strtomem_pad(dest, src, pad) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
-+ const size_t _src_len = __builtin_object_size(src, 1); \
- \
- BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
- _dest_len == (size_t)-1); \
-- memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
-+ memcpy_and_pad(dest, _dest_len, src, \
-+ strnlen(src, min(_src_len, _dest_len)), pad); \
- } while (0)
-
- /**
-@@ -298,10 +340,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
- */
- #define strtomem(dest, src) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
-+ const size_t _src_len = __builtin_object_size(src, 1); \
- \
- BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
- _dest_len == (size_t)-1); \
-- memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
-+ memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
- } while (0)
-
- /**
-diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
-index af7358277f1c3..e9d4377d03c6e 100644
---- a/include/linux/sunrpc/clnt.h
-+++ b/include/linux/sunrpc/clnt.h
-@@ -92,6 +92,7 @@ struct rpc_clnt {
- };
- const struct cred *cl_cred;
- unsigned int cl_max_connect; /* max number of transports not to the same IP */
-+ struct super_block *pipefs_sb;
- };
-
- /*
-diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
-index 09d7429d67c0e..61b40ea81f4d3 100644
---- a/include/linux/sysctl.h
-+++ b/include/linux/sysctl.h
-@@ -242,6 +242,7 @@ extern void __register_sysctl_init(const char *path, struct ctl_table *table,
- extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
-
- void do_sysctl_args(void);
-+bool sysctl_is_alias(char *param);
- int do_proc_douintvec(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos,
- int (*conv)(unsigned long *lvalp,
-@@ -287,6 +288,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
- static inline void do_sysctl_args(void)
- {
- }
-+
-+static inline bool sysctl_is_alias(char *param)
-+{
-+ return false;
-+}
- #endif /* CONFIG_SYSCTL */
-
- int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
-diff --git a/include/linux/topology.h b/include/linux/topology.h
-index fea32377f7c77..52f5850730b3e 100644
---- a/include/linux/topology.h
-+++ b/include/linux/topology.h
-@@ -251,7 +251,7 @@ extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int
- #else
- static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
- {
-- return cpumask_nth(cpu, cpus);
-+ return cpumask_nth_and(cpu, cpus, cpu_online_mask);
- }
-
- static inline const struct cpumask *
-diff --git a/include/linux/torture.h b/include/linux/torture.h
-index bb466eec01e42..017f0f710815a 100644
---- a/include/linux/torture.h
-+++ b/include/linux/torture.h
-@@ -81,7 +81,8 @@ static inline void torture_random_init(struct torture_random_state *trsp)
- }
-
- /* Definitions for high-resolution-timer sleeps. */
--int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
-+int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
-+ struct torture_random_state *trsp);
- int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
- int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
- int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
-diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
-index 21ae37e49319a..cf9f0c61796e1 100644
---- a/include/linux/trace_events.h
-+++ b/include/linux/trace_events.h
-@@ -492,6 +492,7 @@ enum {
- EVENT_FILE_FL_TRIGGER_COND_BIT,
- EVENT_FILE_FL_PID_FILTER_BIT,
- EVENT_FILE_FL_WAS_ENABLED_BIT,
-+ EVENT_FILE_FL_FREED_BIT,
- };
-
- extern struct trace_event_file *trace_get_event_file(const char *instance,
-@@ -630,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
- * TRIGGER_COND - When set, one or more triggers has an associated filter
- * PID_FILTER - When set, the event is filtered based on pid
- * WAS_ENABLED - Set when enabled to know to clear trace on module removal
-+ * FREED - File descriptor is freed, all fields should be considered invalid
- */
- enum {
- EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
-@@ -643,6 +645,7 @@ enum {
- EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
- EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
- EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
-+ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
- };
-
- struct trace_event_file {
-@@ -671,6 +674,7 @@ struct trace_event_file {
- * caching and such. Which is mostly OK ;-)
- */
- unsigned long flags;
-+ atomic_t ref; /* ref count for opened files */
- atomic_t sm_ref; /* soft-mode reference counter */
- atomic_t tm_ref; /* trigger-mode reference counter */
- };
-diff --git a/include/linux/udp.h b/include/linux/udp.h
-index 43c1fb2d2c21a..d04188714dca1 100644
---- a/include/linux/udp.h
-+++ b/include/linux/udp.h
-@@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
- return (num + net_hash_mix(net)) & mask;
- }
-
-+enum {
-+ UDP_FLAGS_CORK, /* Cork is required */
-+ UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
-+ UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
-+ UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
-+ UDP_FLAGS_ACCEPT_FRAGLIST,
-+ UDP_FLAGS_ACCEPT_L4,
-+ UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
-+ UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
-+ UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
-+};
-+
- struct udp_sock {
- /* inet_sock has to be the first member */
- struct inet_sock inet;
- #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
- #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
- #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
-+
-+ unsigned long udp_flags;
-+
- int pending; /* Any pending frames ? */
-- unsigned int corkflag; /* Cork is required */
- __u8 encap_type; /* Is this an Encapsulation socket? */
-- unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
-- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
-- encap_enabled:1, /* This socket enabled encap
-- * processing; UDP tunnels and
-- * different encapsulation layer set
-- * this
-- */
-- gro_enabled:1, /* Request GRO aggregation */
-- accept_udp_l4:1,
-- accept_udp_fraglist:1;
-+
- /*
- * Following member retains the information to create a UDP header
- * when the socket is uncorked.
-@@ -62,12 +67,6 @@ struct udp_sock {
- */
- __u16 pcslen;
- __u16 pcrlen;
--/* indicator bits used by pcflag: */
--#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
--#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
--#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
-- __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
-- __u8 unused[3];
- /*
- * For encapsulation sockets.
- */
-@@ -95,28 +94,39 @@ struct udp_sock {
- int forward_threshold;
- };
-
-+#define udp_test_bit(nr, sk) \
-+ test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
-+#define udp_set_bit(nr, sk) \
-+ set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
-+#define udp_test_and_set_bit(nr, sk) \
-+ test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
-+#define udp_clear_bit(nr, sk) \
-+ clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
-+#define udp_assign_bit(nr, sk, val) \
-+ assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
-+
- #define UDP_MAX_SEGMENTS (1 << 6UL)
-
- #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
-
- static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
- {
-- udp_sk(sk)->no_check6_tx = val;
-+ udp_assign_bit(NO_CHECK6_TX, sk, val);
- }
-
- static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
- {
-- udp_sk(sk)->no_check6_rx = val;
-+ udp_assign_bit(NO_CHECK6_RX, sk, val);
- }
-
--static inline bool udp_get_no_check6_tx(struct sock *sk)
-+static inline bool udp_get_no_check6_tx(const struct sock *sk)
- {
-- return udp_sk(sk)->no_check6_tx;
-+ return udp_test_bit(NO_CHECK6_TX, sk);
- }
-
--static inline bool udp_get_no_check6_rx(struct sock *sk)
-+static inline bool udp_get_no_check6_rx(const struct sock *sk)
- {
-- return udp_sk(sk)->no_check6_rx;
-+ return udp_test_bit(NO_CHECK6_RX, sk);
- }
-
- static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
-@@ -135,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
- if (!skb_is_gso(skb))
- return false;
-
-- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
-+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
-+ !udp_test_bit(ACCEPT_L4, sk))
- return true;
-
-- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
-+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
-+ !udp_test_bit(ACCEPT_FRAGLIST, sk))
- return true;
-
- return false;
-@@ -146,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
-
- static inline void udp_allow_gso(struct sock *sk)
- {
-- udp_sk(sk)->accept_udp_l4 = 1;
-- udp_sk(sk)->accept_udp_fraglist = 1;
-+ udp_set_bit(ACCEPT_L4, sk);
-+ udp_set_bit(ACCEPT_FRAGLIST, sk);
- }
-
- #define udp_portaddr_for_each_entry(__sk, list) \
-diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
-index b513749582d77..e4de6bc1f69b6 100644
---- a/include/linux/usb/phy.h
-+++ b/include/linux/usb/phy.h
-@@ -144,10 +144,6 @@ struct usb_phy {
- */
- int (*set_wakeup)(struct usb_phy *x, bool enabled);
-
-- /* notify phy port status change */
-- int (*notify_port_status)(struct usb_phy *x, int port,
-- u16 portstatus, u16 portchange);
--
- /* notify phy connect status change */
- int (*notify_connect)(struct usb_phy *x,
- enum usb_device_speed speed);
-@@ -320,15 +316,6 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled)
- return 0;
- }
-
--static inline int
--usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange)
--{
-- if (x && x->notify_port_status)
-- return x->notify_port_status(x, port, portstatus, portchange);
-- else
-- return 0;
--}
--
- static inline int
- usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
- {
-diff --git a/include/linux/verification.h b/include/linux/verification.h
-index f34e50ebcf60a..cb2d47f280910 100644
---- a/include/linux/verification.h
-+++ b/include/linux/verification.h
-@@ -8,6 +8,7 @@
- #ifndef _LINUX_VERIFICATION_H
- #define _LINUX_VERIFICATION_H
-
-+#include <linux/errno.h>
- #include <linux/types.h>
-
- /*
-diff --git a/include/linux/vfio.h b/include/linux/vfio.h
-index 454e9295970c4..a65b2513f8cdc 100644
---- a/include/linux/vfio.h
-+++ b/include/linux/vfio.h
-@@ -289,16 +289,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
- /*
- * External user API
- */
--#if IS_ENABLED(CONFIG_VFIO_GROUP)
- struct iommu_group *vfio_file_iommu_group(struct file *file);
-+
-+#if IS_ENABLED(CONFIG_VFIO_GROUP)
- bool vfio_file_is_group(struct file *file);
- bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
- #else
--static inline struct iommu_group *vfio_file_iommu_group(struct file *file)
--{
-- return NULL;
--}
--
- static inline bool vfio_file_is_group(struct file *file)
- {
- return false;
-diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
-index 1c1d06804d450..24b1e5070f4d4 100644
---- a/include/linux/workqueue.h
-+++ b/include/linux/workqueue.h
-@@ -274,18 +274,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
- * to generate better code.
- */
- #ifdef CONFIG_LOCKDEP
--#define __INIT_WORK(_work, _func, _onstack) \
-+#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
- do { \
-- static struct lock_class_key __key; \
-- \
- __init_work((_work), _onstack); \
- (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
-- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
-+ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
- INIT_LIST_HEAD(&(_work)->entry); \
- (_work)->func = (_func); \
- } while (0)
- #else
--#define __INIT_WORK(_work, _func, _onstack) \
-+#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
- do { \
- __init_work((_work), _onstack); \
- (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
-@@ -294,12 +292,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
- } while (0)
- #endif
-
-+#define __INIT_WORK(_work, _func, _onstack) \
-+ do { \
-+ static __maybe_unused struct lock_class_key __key; \
-+ \
-+ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
-+ } while (0)
-+
- #define INIT_WORK(_work, _func) \
- __INIT_WORK((_work), (_func), 0)
-
- #define INIT_WORK_ONSTACK(_work, _func) \
- __INIT_WORK((_work), (_func), 1)
-
-+#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
-+ __INIT_WORK_KEY((_work), (_func), 1, _key)
-+
- #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
- do { \
- INIT_WORK(&(_work)->work, (_func)); \
-@@ -693,8 +701,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
- return fn(arg);
- }
- #else
--long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
--long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
-+long work_on_cpu_key(int cpu, long (*fn)(void *),
-+ void *arg, struct lock_class_key *key);
-+/*
-+ * A new key is defined for each caller to make sure the work
-+ * associated with the function doesn't share its locking class.
-+ */
-+#define work_on_cpu(_cpu, _fn, _arg) \
-+({ \
-+ static struct lock_class_key __key; \
-+ \
-+ work_on_cpu_key(_cpu, _fn, _arg, &__key); \
-+})
-+
-+long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
-+ void *arg, struct lock_class_key *key);
-+
-+/*
-+ * A new key is defined for each caller to make sure the work
-+ * associated with the function doesn't share its locking class.
-+ */
-+#define work_on_cpu_safe(_cpu, _fn, _arg) \
-+({ \
-+ static struct lock_class_key __key; \
-+ \
-+ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
-+})
- #endif /* CONFIG_SMP */
-
- #ifdef CONFIG_FREEZER
-diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h
-index bdc654a455216..783bda6d5cc3f 100644
---- a/include/media/ipu-bridge.h
-+++ b/include/media/ipu-bridge.h
-@@ -108,7 +108,7 @@ struct ipu_node_names {
- char ivsc_sensor_port[7];
- char ivsc_ipu_port[7];
- char endpoint[11];
-- char remote_port[7];
-+ char remote_port[9];
- char vcm[16];
- };
-
-diff --git a/include/net/af_unix.h b/include/net/af_unix.h
-index 824c258143a3a..49c4640027d8a 100644
---- a/include/net/af_unix.h
-+++ b/include/net/af_unix.h
-@@ -75,6 +75,7 @@ struct unix_sock {
- };
-
- #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
-+#define unix_peer(sk) (unix_sk(sk)->peer)
-
- #define peer_wait peer_wq.wait
-
-diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
-index 87d92accc26ea..bdee5d649cc61 100644
---- a/include/net/bluetooth/hci.h
-+++ b/include/net/bluetooth/hci.h
-@@ -1,6 +1,7 @@
- /*
- BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
-+ Copyright 2023 NXP
-
- Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
-
-@@ -673,6 +674,8 @@ enum {
- #define HCI_TX_POWER_INVALID 127
- #define HCI_RSSI_INVALID 127
-
-+#define HCI_SYNC_HANDLE_INVALID 0xffff
-+
- #define HCI_ROLE_MASTER 0x00
- #define HCI_ROLE_SLAVE 0x01
-
-diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
-index c33348ba1657e..7fa95b72e5c85 100644
---- a/include/net/bluetooth/hci_core.h
-+++ b/include/net/bluetooth/hci_core.h
-@@ -350,6 +350,8 @@ struct hci_dev {
- struct list_head list;
- struct mutex lock;
-
-+ struct ida unset_handle_ida;
-+
- const char *name;
- unsigned long flags;
- __u16 id;
-@@ -1314,7 +1316,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *
- }
-
- static inline struct hci_conn *
--hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big)
-+hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
- {
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c;
-@@ -1336,6 +1338,29 @@ hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big)
- return NULL;
- }
-
-+static inline struct hci_conn *
-+hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
-+{
-+ struct hci_conn_hash *h = &hdev->conn_hash;
-+ struct hci_conn *c;
-+
-+ rcu_read_lock();
-+
-+ list_for_each_entry_rcu(c, &h->list, list) {
-+ if (c->type != ISO_LINK ||
-+ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
-+ continue;
-+
-+ if (c->sync_handle == sync_handle) {
-+ rcu_read_unlock();
-+ return c;
-+ }
-+ }
-+ rcu_read_unlock();
-+
-+ return NULL;
-+}
-+
- static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
- __u8 type, __u16 state)
- {
-@@ -1426,7 +1451,9 @@ int hci_le_create_cis_pending(struct hci_dev *hdev);
- int hci_conn_check_create_cis(struct hci_conn *conn);
-
- struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
-- u8 role);
-+ u8 role, u16 handle);
-+struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
-+ bdaddr_t *dst, u8 role);
- void hci_conn_del(struct hci_conn *conn);
- void hci_conn_hash_flush(struct hci_dev *hdev);
- void hci_conn_check_pending(struct hci_dev *hdev);
-diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
-index 7192346e4a22d..153a8c3e7213d 100644
---- a/include/net/cfg80211.h
-+++ b/include/net/cfg80211.h
-@@ -5826,6 +5826,16 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
- */
- void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
-
-+/**
-+ * wiphy_work_flush - flush previously queued work
-+ * @wiphy: the wiphy, for debug purposes
-+ * @work: the work to flush, this can be %NULL to flush all work
-+ *
-+ * Flush the work (i.e. run it if pending). This must be called
-+ * under the wiphy mutex acquired by wiphy_lock().
-+ */
-+void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
-+
- struct wiphy_delayed_work {
- struct wiphy_work work;
- struct wiphy *wiphy;
-@@ -5869,6 +5879,17 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
- void wiphy_delayed_work_cancel(struct wiphy *wiphy,
- struct wiphy_delayed_work *dwork);
-
-+/**
-+ * wiphy_delayed_work_flush - flush previously queued delayed work
-+ * @wiphy: the wiphy, for debug purposes
-+ * @work: the work to flush
-+ *
-+ * Flush the work (i.e. run it if pending). This must be called
-+ * under the wiphy mutex acquired by wiphy_lock().
-+ */
-+void wiphy_delayed_work_flush(struct wiphy *wiphy,
-+ struct wiphy_delayed_work *dwork);
-+
- /**
- * struct wireless_dev - wireless device state
- *
-diff --git a/include/net/flow.h b/include/net/flow.h
-index 7f0adda3bf2fe..335bbc52171c1 100644
---- a/include/net/flow.h
-+++ b/include/net/flow.h
-@@ -40,8 +40,8 @@ struct flowi_common {
- #define FLOWI_FLAG_KNOWN_NH 0x02
- __u32 flowic_secid;
- kuid_t flowic_uid;
-- struct flowi_tunnel flowic_tun_key;
- __u32 flowic_multipath_hash;
-+ struct flowi_tunnel flowic_tun_key;
- };
-
- union flowi_uli {
-diff --git a/include/net/neighbour.h b/include/net/neighbour.h
-index 07022bb0d44d4..0d28172193fa6 100644
---- a/include/net/neighbour.h
-+++ b/include/net/neighbour.h
-@@ -162,7 +162,7 @@ struct neighbour {
- struct rcu_head rcu;
- struct net_device *dev;
- netdevice_tracker dev_tracker;
-- u8 primary_key[0];
-+ u8 primary_key[];
- } __randomize_layout;
-
- struct neigh_ops {
-diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
-index 078d3c52c03f9..e5f2f0b73a9a0 100644
---- a/include/net/netfilter/nf_conntrack_act_ct.h
-+++ b/include/net/netfilter/nf_conntrack_act_ct.h
-@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
- #endif
- }
-
--static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
-+static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
-+ enum ip_conntrack_info ctinfo)
-+{
-+#if IS_ENABLED(CONFIG_NET_ACT_CT)
-+ struct nf_conn_act_ct_ext *act_ct_ext;
-+
-+ act_ct_ext = nf_conn_act_ct_ext_find(ct);
-+ if (dev_net(skb->dev) == &init_net && act_ct_ext)
-+ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
-+#endif
-+}
-+
-+static inline struct
-+nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
-+ struct nf_conn *ct,
-+ enum ip_conntrack_info ctinfo)
- {
- #if IS_ENABLED(CONFIG_NET_ACT_CT)
- struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
-@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
- return act_ct;
-
- act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
-+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
- return act_ct;
- #else
- return NULL;
- #endif
- }
-
--static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
-- enum ip_conntrack_info ctinfo)
--{
--#if IS_ENABLED(CONFIG_NET_ACT_CT)
-- struct nf_conn_act_ct_ext *act_ct_ext;
--
-- act_ct_ext = nf_conn_act_ct_ext_find(ct);
-- if (dev_net(skb->dev) == &init_net && act_ct_ext)
-- act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
--#endif
--}
--
- #endif /* _NF_CONNTRACK_ACT_CT_H */
-diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
-index 7c816359d5a98..75972e211ba12 100644
---- a/include/net/netfilter/nf_tables.h
-+++ b/include/net/netfilter/nf_tables.h
-@@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg)
- return *(__force __be32 *)sreg;
- }
-
--static inline void nft_reg_store64(u32 *dreg, u64 val)
-+static inline void nft_reg_store64(u64 *dreg, u64 val)
- {
-- put_unaligned(val, (u64 *)dreg);
-+ put_unaligned(val, dreg);
- }
-
- static inline u64 nft_reg_load64(const u32 *sreg)
-diff --git a/include/net/sock.h b/include/net/sock.h
-index 92f7ea62a9159..7753354d59c0b 100644
---- a/include/net/sock.h
-+++ b/include/net/sock.h
-@@ -2006,21 +2006,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
- /* sk_tx_queue_mapping accept only upto a 16-bit value */
- if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
- return;
-- sk->sk_tx_queue_mapping = tx_queue;
-+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
-+ * other WRITE_ONCE() because socket lock might be not held.
-+ */
-+ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
- }
-
- #define NO_QUEUE_MAPPING USHRT_MAX
-
- static inline void sk_tx_queue_clear(struct sock *sk)
- {
-- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
-+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
-+ * other WRITE_ONCE() because socket lock might be not held.
-+ */
-+ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
- }
-
- static inline int sk_tx_queue_get(const struct sock *sk)
- {
-- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
-- return sk->sk_tx_queue_mapping;
-+ if (sk) {
-+ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
-+ * and sk_tx_queue_set().
-+ */
-+ int val = READ_ONCE(sk->sk_tx_queue_mapping);
-
-+ if (val != NO_QUEUE_MAPPING)
-+ return val;
-+ }
- return -1;
- }
-
-@@ -2169,7 +2181,7 @@ static inline void __dst_negative_advice(struct sock *sk)
- if (ndst != dst) {
- rcu_assign_pointer(sk->sk_dst_cache, ndst);
- sk_tx_queue_clear(sk);
-- sk->sk_dst_pending_confirm = 0;
-+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- }
- }
- }
-@@ -2186,7 +2198,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
- struct dst_entry *old_dst;
-
- sk_tx_queue_clear(sk);
-- sk->sk_dst_pending_confirm = 0;
-+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- old_dst = rcu_dereference_protected(sk->sk_dst_cache,
- lockdep_sock_is_held(sk));
- rcu_assign_pointer(sk->sk_dst_cache, dst);
-@@ -2199,7 +2211,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
- struct dst_entry *old_dst;
-
- sk_tx_queue_clear(sk);
-- sk->sk_dst_pending_confirm = 0;
-+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
- dst_release(old_dst);
- }
-diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
-index b24ea2d9400ba..1dc2f827d0bcf 100644
---- a/include/net/tc_act/tc_ct.h
-+++ b/include/net/tc_act/tc_ct.h
-@@ -57,6 +57,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
- return to_ct_params(a)->nf_ft;
- }
-
-+static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
-+{
-+ return to_ct_params(a)->helper;
-+}
-+
- #else
- static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
- static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
-@@ -64,6 +69,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
- {
- return NULL;
- }
-+static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
-+{
-+ return NULL;
-+}
- #endif /* CONFIG_NF_CONNTRACK */
-
- #if IS_ENABLED(CONFIG_NET_ACT_CT)
-diff --git a/include/net/tcp.h b/include/net/tcp.h
-index 4b03ca7cb8a5e..0239e815edf71 100644
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -801,7 +801,7 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
- }
-
- /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
--static inline u32 tcp_ns_to_ts(u64 ns)
-+static inline u64 tcp_ns_to_ts(u64 ns)
- {
- return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
- }
-diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
-index 0ca9b7a11baf5..29251c3519cf0 100644
---- a/include/net/udp_tunnel.h
-+++ b/include/net/udp_tunnel.h
-@@ -174,16 +174,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
- }
- #endif
-
--static inline void udp_tunnel_encap_enable(struct socket *sock)
-+static inline void udp_tunnel_encap_enable(struct sock *sk)
- {
-- struct udp_sock *up = udp_sk(sock->sk);
--
-- if (up->encap_enabled)
-+ if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
- return;
-
-- up->encap_enabled = 1;
- #if IS_ENABLED(CONFIG_IPV6)
-- if (sock->sk->sk_family == PF_INET6)
-+ if (READ_ONCE(sk->sk_family) == PF_INET6)
- ipv6_stub->udpv6_encap_enable();
- #endif
- udp_encap_enable();
-diff --git a/include/net/udplite.h b/include/net/udplite.h
-index bd33ff2b8f426..786919d29f8de 100644
---- a/include/net/udplite.h
-+++ b/include/net/udplite.h
-@@ -66,14 +66,18 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
- /* Fast-path computation of checksum. Socket may not be locked. */
- static inline __wsum udplite_csum(struct sk_buff *skb)
- {
-- const struct udp_sock *up = udp_sk(skb->sk);
- const int off = skb_transport_offset(skb);
-+ const struct sock *sk = skb->sk;
- int len = skb->len - off;
-
-- if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
-- if (0 < up->pcslen)
-- len = up->pcslen;
-- udp_hdr(skb)->len = htons(up->pcslen);
-+ if (udp_test_bit(UDPLITE_SEND_CC, sk)) {
-+ u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen);
-+
-+ if (pcslen < len) {
-+ if (pcslen > 0)
-+ len = pcslen;
-+ udp_hdr(skb)->len = htons(pcslen);
-+ }
- }
- skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
-
-diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
-index 65e49fae8da7a..8fa1153f37cbf 100644
---- a/include/scsi/scsi_device.h
-+++ b/include/scsi/scsi_device.h
-@@ -167,19 +167,25 @@ struct scsi_device {
- * power state for system suspend/resume (suspend to RAM and
- * hibernation) operations.
- */
-- bool manage_system_start_stop;
-+ unsigned manage_system_start_stop:1;
-
- /*
- * If true, let the high-level device driver (sd) manage the device
- * power state for runtime device suspand and resume operations.
- */
-- bool manage_runtime_start_stop;
-+ unsigned manage_runtime_start_stop:1;
-
- /*
- * If true, let the high-level device driver (sd) manage the device
- * power state for system shutdown (power off) operations.
- */
-- bool manage_shutdown;
-+ unsigned manage_shutdown:1;
-+
-+ /*
-+ * If set and if the device is runtime suspended, ask the high-level
-+ * device driver (sd) to force a runtime resume of the device.
-+ */
-+ unsigned force_runtime_start_on_system_start:1;
-
- unsigned removable:1;
- unsigned changed:1; /* Data invalid due to media change */
-diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
-index 5842e38bb2880..f5e4ac5b8cce8 100644
---- a/include/soc/tegra/bpmp.h
-+++ b/include/soc/tegra/bpmp.h
-@@ -102,8 +102,12 @@ struct tegra_bpmp {
- #ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_mirror;
- #endif
-+
-+ bool suspended;
- };
-
-+#define TEGRA_BPMP_MESSAGE_RESET BIT(0)
-+
- struct tegra_bpmp_message {
- unsigned int mrq;
-
-@@ -117,6 +121,8 @@ struct tegra_bpmp_message {
- size_t size;
- int ret;
- } rx;
-+
-+ unsigned long flags;
- };
-
- #if IS_ENABLED(CONFIG_TEGRA_BPMP)
-diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
-index 1bf757901d024..2fe8c6b0d4cf3 100644
---- a/include/sound/cs35l41.h
-+++ b/include/sound/cs35l41.h
-@@ -11,7 +11,6 @@
- #define __CS35L41_H
-
- #include <linux/regmap.h>
--#include <linux/completion.h>
- #include <linux/firmware/cirrus/cs_dsp.h>
-
- #define CS35L41_FIRSTREG 0x00000000
-@@ -902,7 +901,8 @@ int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap);
- int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
- struct cs35l41_hw_cfg *hw_cfg);
- bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
-+int cs35l41_mdsync_up(struct regmap *regmap);
- int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
-- int enable, struct completion *pll_lock, bool firmware_running);
-+ int enable, bool firmware_running);
-
- #endif /* __CS35L41_H */
-diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
-index 6d31d535e8f6d..23d6d6bfb0736 100644
---- a/include/sound/soc-acpi.h
-+++ b/include/sound/soc-acpi.h
-@@ -68,6 +68,10 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
- * @i2s_link_mask: I2S/TDM links enabled on the board
- * @num_dai_drivers: number of elements in @dai_drivers
- * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
-+ * @subsystem_vendor: optional PCI SSID vendor value
-+ * @subsystem_device: optional PCI SSID device value
-+ * @subsystem_id_set: true if a value has been written to
-+ * subsystem_vendor and subsystem_device.
- */
- struct snd_soc_acpi_mach_params {
- u32 acpi_ipc_irq_index;
-@@ -80,6 +84,9 @@ struct snd_soc_acpi_mach_params {
- u32 i2s_link_mask;
- u32 num_dai_drivers;
- struct snd_soc_dai_driver *dai_drivers;
-+ unsigned short subsystem_vendor;
-+ unsigned short subsystem_device;
-+ bool subsystem_id_set;
- };
-
- /**
-diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
-index fc94dfb0021fd..e8ff2e089cd00 100644
---- a/include/sound/soc-card.h
-+++ b/include/sound/soc-card.h
-@@ -59,6 +59,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card,
- void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link);
-
-+#ifdef CONFIG_PCI
-+static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
-+ unsigned short vendor,
-+ unsigned short device)
-+{
-+ card->pci_subsystem_vendor = vendor;
-+ card->pci_subsystem_device = device;
-+ card->pci_subsystem_set = true;
-+}
-+
-+static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
-+ unsigned short *vendor,
-+ unsigned short *device)
-+{
-+ if (!card->pci_subsystem_set)
-+ return -ENOENT;
-+
-+ *vendor = card->pci_subsystem_vendor;
-+ *device = card->pci_subsystem_device;
-+
-+ return 0;
-+}
-+#else /* !CONFIG_PCI */
-+static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
-+ unsigned short vendor,
-+ unsigned short device)
-+{
-+}
-+
-+static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
-+ unsigned short *vendor,
-+ unsigned short *device)
-+{
-+ return -ENOENT;
-+}
-+#endif /* CONFIG_PCI */
-+
- /* device driver data */
- static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
- void *data)
-diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
-index 5fcfba47d98cc..adcd8719d3435 100644
---- a/include/sound/soc-dai.h
-+++ b/include/sound/soc-dai.h
-@@ -370,6 +370,7 @@ struct snd_soc_dai_ops {
-
- /* bit field */
- unsigned int no_capture_mute:1;
-+ unsigned int mute_unmute_on_trigger:1;
- };
-
- struct snd_soc_cdai_ops {
-diff --git a/include/sound/soc.h b/include/sound/soc.h
-index 37f9d3fe302a6..49ec688eed606 100644
---- a/include/sound/soc.h
-+++ b/include/sound/soc.h
-@@ -932,6 +932,17 @@ struct snd_soc_card {
- #ifdef CONFIG_DMI
- char dmi_longname[80];
- #endif /* CONFIG_DMI */
-+
-+#ifdef CONFIG_PCI
-+ /*
-+ * PCI does not define 0 as invalid, so pci_subsystem_set indicates
-+ * whether a value has been written to these fields.
-+ */
-+ unsigned short pci_subsystem_vendor;
-+ unsigned short pci_subsystem_device;
-+ bool pci_subsystem_set;
-+#endif /* CONFIG_PCI */
-+
- char topology_shortname[32];
-
- struct device *dev;
-diff --git a/include/sound/sof.h b/include/sound/sof.h
-index d3c41f87ac319..51294f2ba302c 100644
---- a/include/sound/sof.h
-+++ b/include/sound/sof.h
-@@ -64,6 +64,14 @@ struct snd_sof_pdata {
- const char *name;
- const char *platform;
-
-+ /*
-+ * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set
-+ * flag indicates that a value has been written to these members.
-+ */
-+ unsigned short subsystem_vendor;
-+ unsigned short subsystem_device;
-+ bool subsystem_id_set;
-+
- struct device *dev;
-
- /*
-diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
-index 4c53a5ef6257b..f7e537f64db45 100644
---- a/include/trace/events/rxrpc.h
-+++ b/include/trace/events/rxrpc.h
-@@ -328,7 +328,7 @@
- E_(rxrpc_rtt_tx_ping, "PING")
-
- #define rxrpc_rtt_rx_traces \
-- EM(rxrpc_rtt_rx_cancel, "CNCL") \
-+ EM(rxrpc_rtt_rx_other_ack, "OACK") \
- EM(rxrpc_rtt_rx_obsolete, "OBSL") \
- EM(rxrpc_rtt_rx_lost, "LOST") \
- EM(rxrpc_rtt_rx_ping_response, "PONG") \
-diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
-index 6c80f96049bd0..282e90aeb163c 100644
---- a/include/uapi/linux/fcntl.h
-+++ b/include/uapi/linux/fcntl.h
-@@ -116,5 +116,8 @@
- #define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
- compare object identity and may not
- be usable to open_by_handle_at(2) */
-+#if defined(__KERNEL__)
-+#define AT_GETATTR_NOSEC 0x80000000
-+#endif
-
- #endif /* _UAPI_LINUX_FCNTL_H */
-diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
-index 3c36aeade991e..370ed14b1ae09 100644
---- a/include/uapi/linux/prctl.h
-+++ b/include/uapi/linux/prctl.h
-@@ -283,7 +283,8 @@ struct prctl_mm_map {
-
- /* Memory deny write / execute */
- #define PR_SET_MDWE 65
--# define PR_MDWE_REFUSE_EXEC_GAIN 1
-+# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
-+# define PR_MDWE_NO_INHERIT (1UL << 1)
-
- #define PR_GET_MDWE 66
-
-diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
-index 5c6c4269f7efe..2ec6f35cda32e 100644
---- a/include/uapi/linux/stddef.h
-+++ b/include/uapi/linux/stddef.h
-@@ -27,7 +27,7 @@
- union { \
- struct { MEMBERS } ATTRS; \
- struct TAG { MEMBERS } ATTRS NAME; \
-- }
-+ } ATTRS
-
- #ifdef __cplusplus
- /* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
-diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
-index 4a195b68f28f6..b383c2fe0cf35 100644
---- a/include/uapi/linux/v4l2-subdev.h
-+++ b/include/uapi/linux/v4l2-subdev.h
-@@ -239,7 +239,7 @@ struct v4l2_subdev_routing {
- * set (which is the default), the 'stream' fields will be forced to 0 by the
- * kernel.
- */
-- #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0)
-+ #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
-
- /**
- * struct v4l2_subdev_client_capability - Capabilities of the client accessing
-diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
-index c60ca33eac594..ed07181d4eff9 100644
---- a/include/uapi/linux/vm_sockets.h
-+++ b/include/uapi/linux/vm_sockets.h
-@@ -191,4 +191,21 @@ struct sockaddr_vm {
-
- #define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
-
-+/* MSG_ZEROCOPY notifications are encoded in the standard error format,
-+ * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in
-+ * kernel source tree for more details.
-+ */
-+
-+/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing
-+ * when MSG_ZEROCOPY flag is used on transmissions.
-+ */
-+
-+#define SOL_VSOCK 287
-+
-+/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing
-+ * when MSG_ZEROCOPY flag is used on transmissions.
-+ */
-+
-+#define VSOCK_RECVERR 1
-+
- #endif /* _UAPI_VM_SOCKETS_H */
-diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
-index 375718ba4ab62..e145bca5105c5 100644
---- a/include/uapi/xen/privcmd.h
-+++ b/include/uapi/xen/privcmd.h
-@@ -102,7 +102,7 @@ struct privcmd_mmap_resource {
- #define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0)
-
- struct privcmd_irqfd {
-- void __user *dm_op;
-+ __u64 dm_op;
- __u32 size; /* Size of structure pointed by dm_op */
- __u32 fd;
- __u32 flags;
-@@ -138,6 +138,6 @@ struct privcmd_irqfd {
- #define IOCTL_PRIVCMD_MMAP_RESOURCE \
- _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
- #define IOCTL_PRIVCMD_IRQFD \
-- _IOC(_IOC_NONE, 'P', 8, sizeof(struct privcmd_irqfd))
-+ _IOW('P', 8, struct privcmd_irqfd)
-
- #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
-diff --git a/include/video/sticore.h b/include/video/sticore.h
-index 945ad60463a18..012b5b46ad7d0 100644
---- a/include/video/sticore.h
-+++ b/include/video/sticore.h
-@@ -232,7 +232,7 @@ struct sti_rom_font {
- u8 height;
- u8 font_type; /* language type */
- u8 bytes_per_char;
-- u32 next_font;
-+ s32 next_font; /* note: signed int */
- u8 underline_height;
- u8 underline_pos;
- u8 res008[2];
-diff --git a/init/Makefile b/init/Makefile
-index ec557ada3c12e..cbac576c57d63 100644
---- a/init/Makefile
-+++ b/init/Makefile
-@@ -60,4 +60,5 @@ include/generated/utsversion.h: FORCE
- $(obj)/version-timestamp.o: include/generated/utsversion.h
- CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
- KASAN_SANITIZE_version-timestamp.o := n
-+KCSAN_SANITIZE_version-timestamp.o := n
- GCOV_PROFILE_version-timestamp.o := n
-diff --git a/init/main.c b/init/main.c
-index 436d73261810b..e24b0780fdff7 100644
---- a/init/main.c
-+++ b/init/main.c
-@@ -530,6 +530,10 @@ static int __init unknown_bootoption(char *param, char *val,
- {
- size_t len = strlen(param);
-
-+ /* Handle params aliased to sysctls */
-+ if (sysctl_is_alias(param))
-+ return 0;
-+
- repair_env_string(param, val);
-
- /* Handle obsolete-style parameters */
-diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
-index f04a43044d917..976e9500f6518 100644
---- a/io_uring/fdinfo.c
-+++ b/io_uring/fdinfo.c
-@@ -145,13 +145,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
- if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
- struct io_sq_data *sq = ctx->sq_data;
-
-- if (mutex_trylock(&sq->lock)) {
-- if (sq->thread) {
-- sq_pid = task_pid_nr(sq->thread);
-- sq_cpu = task_cpu(sq->thread);
-- }
-- mutex_unlock(&sq->lock);
-- }
-+ sq_pid = sq->task_pid;
-+ sq_cpu = sq->sq_cpu;
- }
-
- seq_printf(m, "SqThread:\t%d\n", sq_pid);
-diff --git a/io_uring/fs.c b/io_uring/fs.c
-index 08e3b175469c6..eccea851dd5a2 100644
---- a/io_uring/fs.c
-+++ b/io_uring/fs.c
-@@ -254,7 +254,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
- newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- lnk->flags = READ_ONCE(sqe->hardlink_flags);
-
-- lnk->oldpath = getname(oldf);
-+ lnk->oldpath = getname_uflags(oldf, lnk->flags);
- if (IS_ERR(lnk->oldpath))
- return PTR_ERR(lnk->oldpath);
-
-diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
-index 8d1bc6cdfe712..f09e3ee11229c 100644
---- a/io_uring/io_uring.c
-+++ b/io_uring/io_uring.c
-@@ -323,6 +323,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
- INIT_LIST_HEAD(&ctx->sqd_list);
- INIT_LIST_HEAD(&ctx->cq_overflow_list);
- INIT_LIST_HEAD(&ctx->io_buffers_cache);
-+ INIT_HLIST_HEAD(&ctx->io_buf_list);
- io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
- sizeof(struct io_rsrc_node));
- io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
-@@ -2659,7 +2660,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
- return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
- }
-
--static void io_mem_free(void *ptr)
-+void io_mem_free(void *ptr)
- {
- if (!ptr)
- return;
-@@ -2690,6 +2691,7 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
- {
- struct page **page_array;
- unsigned int nr_pages;
-+ void *page_addr;
- int ret, i;
-
- *npages = 0;
-@@ -2711,27 +2713,29 @@ err:
- io_pages_free(&page_array, ret > 0 ? ret : 0);
- return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT);
- }
-- /*
-- * Should be a single page. If the ring is small enough that we can
-- * use a normal page, that is fine. If we need multiple pages, then
-- * userspace should use a huge page. That's the only way to guarantee
-- * that we get contigious memory, outside of just being lucky or
-- * (currently) having low memory fragmentation.
-- */
-- if (page_array[0] != page_array[ret - 1])
-- goto err;
-
-- /*
-- * Can't support mapping user allocated ring memory on 32-bit archs
-- * where it could potentially reside in highmem. Just fail those with
-- * -EINVAL, just like we did on kernels that didn't support this
-- * feature.
-- */
-+ page_addr = page_address(page_array[0]);
- for (i = 0; i < nr_pages; i++) {
-- if (PageHighMem(page_array[i])) {
-- ret = -EINVAL;
-+ ret = -EINVAL;
-+
-+ /*
-+ * Can't support mapping user allocated ring memory on 32-bit
-+ * archs where it could potentially reside in highmem. Just
-+ * fail those with -EINVAL, just like we did on kernels that
-+ * didn't support this feature.
-+ */
-+ if (PageHighMem(page_array[i]))
- goto err;
-- }
-+
-+ /*
-+ * No support for discontig pages for now, should either be a
-+ * single normal page, or a huge page. Later on we can add
-+ * support for remapping discontig pages, for now we will
-+ * just fail them with EINVAL.
-+ */
-+ if (page_address(page_array[i]) != page_addr)
-+ goto err;
-+ page_addr += PAGE_SIZE;
- }
-
- *pages = page_array;
-@@ -2768,7 +2772,7 @@ static void io_rings_free(struct io_ring_ctx *ctx)
- }
- }
-
--static void *io_mem_alloc(size_t size)
-+void *io_mem_alloc(size_t size)
- {
- gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
- void *ret;
-@@ -2939,6 +2943,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
- ctx->mm_account = NULL;
- }
- io_rings_free(ctx);
-+ io_kbuf_mmap_list_free(ctx);
-
- percpu_ref_exit(&ctx->refs);
- free_uid(ctx->user);
-@@ -3433,25 +3438,27 @@ static void *io_uring_validate_mmap_request(struct file *file,
- struct page *page;
- void *ptr;
-
-- /* Don't allow mmap if the ring was setup without it */
-- if (ctx->flags & IORING_SETUP_NO_MMAP)
-- return ERR_PTR(-EINVAL);
--
- switch (offset & IORING_OFF_MMAP_MASK) {
- case IORING_OFF_SQ_RING:
- case IORING_OFF_CQ_RING:
-+ /* Don't allow mmap if the ring was setup without it */
-+ if (ctx->flags & IORING_SETUP_NO_MMAP)
-+ return ERR_PTR(-EINVAL);
- ptr = ctx->rings;
- break;
- case IORING_OFF_SQES:
-+ /* Don't allow mmap if the ring was setup without it */
-+ if (ctx->flags & IORING_SETUP_NO_MMAP)
-+ return ERR_PTR(-EINVAL);
- ptr = ctx->sq_sqes;
- break;
- case IORING_OFF_PBUF_RING: {
- unsigned int bgid;
-
- bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
-- mutex_lock(&ctx->uring_lock);
-+ rcu_read_lock();
- ptr = io_pbuf_get_address(ctx, bgid);
-- mutex_unlock(&ctx->uring_lock);
-+ rcu_read_unlock();
- if (!ptr)
- return ERR_PTR(-EINVAL);
- break;
-diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
-index 0bc145614a6e6..d2bad1df347da 100644
---- a/io_uring/io_uring.h
-+++ b/io_uring/io_uring.h
-@@ -86,6 +86,9 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
- bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
- bool cancel_all);
-
-+void *io_mem_alloc(size_t size);
-+void io_mem_free(void *ptr);
-+
- #if defined(CONFIG_PROVE_LOCKING)
- static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
- {
-diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
-index 9123138aa9f48..012f622036049 100644
---- a/io_uring/kbuf.c
-+++ b/io_uring/kbuf.c
-@@ -19,28 +19,54 @@
-
- #define BGID_ARRAY 64
-
-+/* BIDs are addressed by a 16-bit field in a CQE */
-+#define MAX_BIDS_PER_BGID (1 << 16)
-+
- struct io_provide_buf {
- struct file *file;
- __u64 addr;
- __u32 len;
- __u32 bgid;
-- __u16 nbufs;
-+ __u32 nbufs;
- __u16 bid;
- };
-
-+static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
-+ struct io_buffer_list *bl,
-+ unsigned int bgid)
-+{
-+ if (bl && bgid < BGID_ARRAY)
-+ return &bl[bgid];
-+
-+ return xa_load(&ctx->io_bl_xa, bgid);
-+}
-+
-+struct io_buf_free {
-+ struct hlist_node list;
-+ void *mem;
-+ size_t size;
-+ int inuse;
-+};
-+
- static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
- unsigned int bgid)
- {
-- if (ctx->io_bl && bgid < BGID_ARRAY)
-- return &ctx->io_bl[bgid];
-+ lockdep_assert_held(&ctx->uring_lock);
-
-- return xa_load(&ctx->io_bl_xa, bgid);
-+ return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
- }
-
- static int io_buffer_add_list(struct io_ring_ctx *ctx,
- struct io_buffer_list *bl, unsigned int bgid)
- {
-+ /*
-+ * Store buffer group ID and finally mark the list as visible.
-+ * The normal lookup doesn't care about the visibility as we're
-+ * always under the ->uring_lock, but the RCU lookup from mmap does.
-+ */
- bl->bgid = bgid;
-+ smp_store_release(&bl->is_ready, 1);
-+
- if (bgid < BGID_ARRAY)
- return 0;
-
-@@ -191,21 +217,40 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
-
- static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
- {
-+ struct io_buffer_list *bl;
- int i;
-
-- ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
-- GFP_KERNEL);
-- if (!ctx->io_bl)
-+ bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
-+ if (!bl)
- return -ENOMEM;
-
- for (i = 0; i < BGID_ARRAY; i++) {
-- INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
-- ctx->io_bl[i].bgid = i;
-+ INIT_LIST_HEAD(&bl[i].buf_list);
-+ bl[i].bgid = i;
- }
-
-+ smp_store_release(&ctx->io_bl, bl);
- return 0;
- }
-
-+/*
-+ * Mark the given mapped range as free for reuse
-+ */
-+static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
-+{
-+ struct io_buf_free *ibf;
-+
-+ hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
-+ if (bl->buf_ring == ibf->mem) {
-+ ibf->inuse = 0;
-+ return;
-+ }
-+ }
-+
-+ /* can't happen... */
-+ WARN_ON_ONCE(1);
-+}
-+
- static int __io_remove_buffers(struct io_ring_ctx *ctx,
- struct io_buffer_list *bl, unsigned nbufs)
- {
-@@ -218,7 +263,11 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
- if (bl->is_mapped) {
- i = bl->buf_ring->tail - bl->head;
- if (bl->is_mmap) {
-- folio_put(virt_to_folio(bl->buf_ring));
-+ /*
-+ * io_kbuf_list_free() will free the page(s) at
-+ * ->release() time.
-+ */
-+ io_kbuf_mark_free(ctx, bl);
- bl->buf_ring = NULL;
- bl->is_mmap = 0;
- } else if (bl->buf_nr_pages) {
-@@ -267,7 +316,7 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
- xa_for_each(&ctx->io_bl_xa, index, bl) {
- xa_erase(&ctx->io_bl_xa, bl->bgid);
- __io_remove_buffers(ctx, bl, -1U);
-- kfree(bl);
-+ kfree_rcu(bl, rcu);
- }
-
- while (!list_empty(&ctx->io_buffers_pages)) {
-@@ -289,7 +338,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
- return -EINVAL;
-
- tmp = READ_ONCE(sqe->fd);
-- if (!tmp || tmp > USHRT_MAX)
-+ if (!tmp || tmp > MAX_BIDS_PER_BGID)
- return -EINVAL;
-
- memset(p, 0, sizeof(*p));
-@@ -332,7 +381,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
- return -EINVAL;
-
- tmp = READ_ONCE(sqe->fd);
-- if (!tmp || tmp > USHRT_MAX)
-+ if (!tmp || tmp > MAX_BIDS_PER_BGID)
- return -E2BIG;
- p->nbufs = tmp;
- p->addr = READ_ONCE(sqe->addr);
-@@ -352,7 +401,7 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
- tmp = READ_ONCE(sqe->off);
- if (tmp > USHRT_MAX)
- return -E2BIG;
-- if (tmp + p->nbufs >= USHRT_MAX)
-+ if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
- return -EINVAL;
- p->bid = tmp;
- return 0;
-@@ -452,7 +501,16 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
- INIT_LIST_HEAD(&bl->buf_list);
- ret = io_buffer_add_list(ctx, bl, p->bgid);
- if (ret) {
-- kfree(bl);
-+ /*
-+ * Doesn't need rcu free as it was never visible, but
-+ * let's keep it consistent throughout. Also can't
-+ * be a lower indexed array group, as adding one
-+ * where lookup failed cannot happen.
-+ */
-+ if (p->bgid >= BGID_ARRAY)
-+ kfree_rcu(bl, rcu);
-+ else
-+ WARN_ON_ONCE(1);
- goto err;
- }
- }
-@@ -523,19 +581,63 @@ error_unpin:
- return -EINVAL;
- }
-
--static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg,
-+/*
-+ * See if we have a suitable region that we can reuse, rather than allocate
-+ * both a new io_buf_free and mem region again. We leave it on the list as
-+ * even a reused entry will need freeing at ring release.
-+ */
-+static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
-+ size_t ring_size)
-+{
-+ struct io_buf_free *ibf, *best = NULL;
-+ size_t best_dist;
-+
-+ hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
-+ size_t dist;
-+
-+ if (ibf->inuse || ibf->size < ring_size)
-+ continue;
-+ dist = ibf->size - ring_size;
-+ if (!best || dist < best_dist) {
-+ best = ibf;
-+ if (!dist)
-+ break;
-+ best_dist = dist;
-+ }
-+ }
-+
-+ return best;
-+}
-+
-+static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
-+ struct io_uring_buf_reg *reg,
- struct io_buffer_list *bl)
- {
-- gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
-+ struct io_buf_free *ibf;
- size_t ring_size;
- void *ptr;
-
- ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
-- ptr = (void *) __get_free_pages(gfp, get_order(ring_size));
-- if (!ptr)
-- return -ENOMEM;
-
-- bl->buf_ring = ptr;
-+ /* Reuse existing entry, if we can */
-+ ibf = io_lookup_buf_free_entry(ctx, ring_size);
-+ if (!ibf) {
-+ ptr = io_mem_alloc(ring_size);
-+ if (!ptr)
-+ return -ENOMEM;
-+
-+ /* Allocate and store deferred free entry */
-+ ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
-+ if (!ibf) {
-+ io_mem_free(ptr);
-+ return -ENOMEM;
-+ }
-+ ibf->mem = ptr;
-+ ibf->size = ring_size;
-+ hlist_add_head(&ibf->list, &ctx->io_buf_list);
-+ }
-+ ibf->inuse = 1;
-+ bl->buf_ring = ibf->mem;
- bl->is_mapped = 1;
- bl->is_mmap = 1;
- return 0;
-@@ -547,6 +649,8 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
- struct io_buffer_list *bl, *free_bl = NULL;
- int ret;
-
-+ lockdep_assert_held(&ctx->uring_lock);
-+
- if (copy_from_user(&reg, arg, sizeof(reg)))
- return -EFAULT;
-
-@@ -591,7 +695,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
- if (!(reg.flags & IOU_PBUF_RING_MMAP))
- ret = io_pin_pbuf_ring(&reg, bl);
- else
-- ret = io_alloc_pbuf_ring(&reg, bl);
-+ ret = io_alloc_pbuf_ring(ctx, &reg, bl);
-
- if (!ret) {
- bl->nr_entries = reg.ring_entries;
-@@ -601,7 +705,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
- return 0;
- }
-
-- kfree(free_bl);
-+ kfree_rcu(free_bl, rcu);
- return ret;
- }
-
-@@ -610,6 +714,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
- struct io_uring_buf_reg reg;
- struct io_buffer_list *bl;
-
-+ lockdep_assert_held(&ctx->uring_lock);
-+
- if (copy_from_user(&reg, arg, sizeof(reg)))
- return -EFAULT;
- if (reg.resv[0] || reg.resv[1] || reg.resv[2])
-@@ -626,7 +732,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
- __io_remove_buffers(ctx, bl, -1U);
- if (bl->bgid >= BGID_ARRAY) {
- xa_erase(&ctx->io_bl_xa, bl->bgid);
-- kfree(bl);
-+ kfree_rcu(bl, rcu);
- }
- return 0;
- }
-@@ -635,9 +741,33 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
- {
- struct io_buffer_list *bl;
-
-- bl = io_buffer_get_list(ctx, bgid);
-+ bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
-+
-+ /*
-+ * Ensure the list is fully setup. Only strictly needed for RCU lookup
-+ * via mmap, and in that case only for the array indexed groups. For
-+ * the xarray lookups, it's either visible and ready, or not at all.
-+ */
-+ if (!smp_load_acquire(&bl->is_ready))
-+ return NULL;
- if (!bl || !bl->is_mmap)
- return NULL;
-
- return bl->buf_ring;
- }
-+
-+/*
-+ * Called at or after ->release(), free the mmap'ed buffers that we used
-+ * for memory mapped provided buffer rings.
-+ */
-+void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
-+{
-+ struct io_buf_free *ibf;
-+ struct hlist_node *tmp;
-+
-+ hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
-+ hlist_del(&ibf->list);
-+ io_mem_free(ibf->mem);
-+ kfree(ibf);
-+ }
-+}
-diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
-index d14345ef61fc8..3d0cb6b8c1ed2 100644
---- a/io_uring/kbuf.h
-+++ b/io_uring/kbuf.h
-@@ -15,6 +15,7 @@ struct io_buffer_list {
- struct page **buf_pages;
- struct io_uring_buf_ring *buf_ring;
- };
-+ struct rcu_head rcu;
- };
- __u16 bgid;
-
-@@ -28,6 +29,8 @@ struct io_buffer_list {
- __u8 is_mapped;
- /* ring mapped provided buffers, but mmap'ed by application */
- __u8 is_mmap;
-+ /* bl is visible from an RCU point of view for lookup */
-+ __u8 is_ready;
- };
-
- struct io_buffer {
-@@ -51,6 +54,8 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
- int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
- int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
-
-+void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
-+
- unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
-
- void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
-diff --git a/io_uring/net.c b/io_uring/net.c
-index 7a8e298af81b3..75d494dad7e2c 100644
---- a/io_uring/net.c
-+++ b/io_uring/net.c
-@@ -1461,16 +1461,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
- int ret;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
-
-- if (connect->in_progress) {
-- struct socket *socket;
--
-- ret = -ENOTSOCK;
-- socket = sock_from_file(req->file);
-- if (socket)
-- ret = sock_error(socket->sk);
-- goto out;
-- }
--
- if (req_has_async_data(req)) {
- io = req->async_data;
- } else {
-@@ -1490,9 +1480,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
- && force_nonblock) {
- if (ret == -EINPROGRESS) {
- connect->in_progress = true;
-- return -EAGAIN;
-- }
-- if (ret == -ECONNABORTED) {
-+ } else if (ret == -ECONNABORTED) {
- if (connect->seen_econnaborted)
- goto out;
- connect->seen_econnaborted = true;
-@@ -1506,6 +1494,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
- memcpy(req->async_data, &__io, sizeof(__io));
- return -EAGAIN;
- }
-+ if (connect->in_progress) {
-+ /*
-+ * At least bluetooth will return -EBADFD on a re-connect
-+ * attempt, and it's (supposedly) also valid to get -EISCONN
-+ * which means the previous result is good. For both of these,
-+ * grab the sock_error() and use that for the completion.
-+ */
-+ if (ret == -EBADFD || ret == -EISCONN)
-+ ret = sock_error(sock_from_file(req->file)->sk);
-+ }
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- out:
-diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
-index d9c853d105878..dde501abd7196 100644
---- a/io_uring/rsrc.c
-+++ b/io_uring/rsrc.c
-@@ -1261,7 +1261,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
- */
- const struct bio_vec *bvec = imu->bvec;
-
-- if (offset <= bvec->bv_len) {
-+ if (offset < bvec->bv_len) {
- /*
- * Note, huge pages buffers consists of one large
- * bvec entry and should always go this way. The other
-diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
-index bd6c2c7959a5b..65b5dbe3c850e 100644
---- a/io_uring/sqpoll.c
-+++ b/io_uring/sqpoll.c
-@@ -214,6 +214,7 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
- did_sig = get_signal(&ksig);
- cond_resched();
- mutex_lock(&sqd->lock);
-+ sqd->sq_cpu = raw_smp_processor_id();
- }
- return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
- }
-@@ -229,10 +230,15 @@ static int io_sq_thread(void *data)
- snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
- set_task_comm(current, buf);
-
-- if (sqd->sq_cpu != -1)
-+ /* reset to our pid after we've set task_comm, for fdinfo */
-+ sqd->task_pid = current->pid;
-+
-+ if (sqd->sq_cpu != -1) {
- set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
-- else
-+ } else {
- set_cpus_allowed_ptr(current, cpu_online_mask);
-+ sqd->sq_cpu = raw_smp_processor_id();
-+ }
-
- mutex_lock(&sqd->lock);
- while (1) {
-@@ -261,6 +267,7 @@ static int io_sq_thread(void *data)
- mutex_unlock(&sqd->lock);
- cond_resched();
- mutex_lock(&sqd->lock);
-+ sqd->sq_cpu = raw_smp_processor_id();
- }
- continue;
- }
-@@ -294,6 +301,7 @@ static int io_sq_thread(void *data)
- mutex_unlock(&sqd->lock);
- schedule();
- mutex_lock(&sqd->lock);
-+ sqd->sq_cpu = raw_smp_processor_id();
- }
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- atomic_andnot(IORING_SQ_NEED_WAKEUP,
-diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
-index 65075f1e4ac8c..7a98cd176a127 100644
---- a/kernel/audit_watch.c
-+++ b/kernel/audit_watch.c
-@@ -527,11 +527,18 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
- unsigned long ino;
- dev_t dev;
-
-- exe_file = get_task_exe_file(tsk);
-+ /* only do exe filtering if we are recording @current events/records */
-+ if (tsk != current)
-+ return 0;
-+
-+ if (!current->mm)
-+ return 0;
-+ exe_file = get_mm_exe_file(current->mm);
- if (!exe_file)
- return 0;
- ino = file_inode(exe_file)->i_ino;
- dev = file_inode(exe_file)->i_sb->s_dev;
- fput(exe_file);
-+
- return audit_mark_compare(mark, ino, dev);
- }
-diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
-index 4e3ce0542e31f..64fcd81ad3da4 100644
---- a/kernel/bpf/core.c
-+++ b/kernel/bpf/core.c
-@@ -623,7 +623,11 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
-
- if (val < ksym->start)
- return -1;
-- if (val >= ksym->end)
-+ /* Ensure that we detect return addresses as part of the program, when
-+ * the final instruction is a call for a program part of the stack
-+ * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
-+ */
-+ if (val > ksym->end)
- return 1;
-
- return 0;
-diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
-index a8c7e1c5abfac..fd8d4b0addfca 100644
---- a/kernel/bpf/hashtab.c
-+++ b/kernel/bpf/hashtab.c
-@@ -155,13 +155,15 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
- hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
-
- preempt_disable();
-+ local_irq_save(flags);
- if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
- __this_cpu_dec(*(htab->map_locked[hash]));
-+ local_irq_restore(flags);
- preempt_enable();
- return -EBUSY;
- }
-
-- raw_spin_lock_irqsave(&b->raw_lock, flags);
-+ raw_spin_lock(&b->raw_lock);
- *pflags = flags;
-
- return 0;
-@@ -172,8 +174,9 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
- unsigned long flags)
- {
- hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
-- raw_spin_unlock_irqrestore(&b->raw_lock, flags);
-+ raw_spin_unlock(&b->raw_lock);
- __this_cpu_dec(*(htab->map_locked[hash]));
-+ local_irq_restore(flags);
- preempt_enable();
- }
-
-diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
-index 8bd3812fb8df4..607be04db75b9 100644
---- a/kernel/bpf/helpers.c
-+++ b/kernel/bpf/helpers.c
-@@ -1176,13 +1176,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
- ret = -EBUSY;
- goto out;
- }
-- if (!atomic64_read(&map->usercnt)) {
-- /* maps with timers must be either held by user space
-- * or pinned in bpffs.
-- */
-- ret = -EPERM;
-- goto out;
-- }
- /* allocate hrtimer via map_kmalloc to use memcg accounting */
- t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
- if (!t) {
-@@ -1195,7 +1188,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
- rcu_assign_pointer(t->callback_fn, NULL);
- hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
- t->timer.function = bpf_timer_cb;
-- timer->timer = t;
-+ WRITE_ONCE(timer->timer, t);
-+ /* Guarantee the order between timer->timer and map->usercnt. So
-+ * when there are concurrent uref release and bpf timer init, either
-+ * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
-+ * timer or atomic64_read() below returns a zero usercnt.
-+ */
-+ smp_mb();
-+ if (!atomic64_read(&map->usercnt)) {
-+ /* maps with timers must be either held by user space
-+ * or pinned in bpffs.
-+ */
-+ WRITE_ONCE(timer->timer, NULL);
-+ kfree(t);
-+ ret = -EPERM;
-+ }
- out:
- __bpf_spin_unlock_irqrestore(&timer->lock);
- return ret;
-@@ -1370,7 +1377,7 @@ void bpf_timer_cancel_and_free(void *val)
- /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
- * this timer, since it won't be initialized.
- */
-- timer->timer = NULL;
-+ WRITE_ONCE(timer->timer, NULL);
- out:
- __bpf_spin_unlock_irqrestore(&timer->lock);
- if (!t)
-@@ -2197,7 +2204,12 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
- __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
- struct cgroup *ancestor)
- {
-- return task_under_cgroup_hierarchy(task, ancestor);
-+ long ret;
-+
-+ rcu_read_lock();
-+ ret = task_under_cgroup_hierarchy(task, ancestor);
-+ rcu_read_unlock();
-+ return ret;
- }
- #endif /* CONFIG_CGROUPS */
-
-diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
-index d93ddac283d40..956f80ee6f5c5 100644
---- a/kernel/bpf/memalloc.c
-+++ b/kernel/bpf/memalloc.c
-@@ -958,6 +958,8 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
- memcg = get_memcg(c);
- old_memcg = set_active_memcg(memcg);
- ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
-+ if (ret)
-+ *(struct bpf_mem_cache **)ret = c;
- set_active_memcg(old_memcg);
- mem_cgroup_put(memcg);
- }
-diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
-index 53ff50cac61ea..e97aeda3a86b5 100644
---- a/kernel/bpf/trampoline.c
-+++ b/kernel/bpf/trampoline.c
-@@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
- goto out;
- }
-
-- /* clear all bits except SHARE_IPMODIFY */
-- tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
-+ /* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
-+ tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
-
- if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
- tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
-index 873ade146f3de..824531d4c262a 100644
---- a/kernel/bpf/verifier.c
-+++ b/kernel/bpf/verifier.c
-@@ -1515,7 +1515,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
- if (state->in_async_callback_fn)
- verbose(env, " async_cb");
- verbose(env, "\n");
-- mark_verifier_state_clean(env);
-+ if (!print_all)
-+ mark_verifier_state_clean(env);
- }
-
- static inline u32 vlog_alignment(u32 pos)
-@@ -3200,12 +3201,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
-
- /* Backtrack one insn at a time. If idx is not at the top of recorded
- * history then previous instruction came from straight line execution.
-+ * Return -ENOENT if we exhausted all instructions within given state.
-+ *
-+ * It's legal to have a bit of a looping with the same starting and ending
-+ * insn index within the same state, e.g.: 3->4->5->3, so just because current
-+ * instruction index is the same as state's first_idx doesn't mean we are
-+ * done. If there is still some jump history left, we should keep going. We
-+ * need to take into account that we might have a jump history between given
-+ * state's parent and itself, due to checkpointing. In this case, we'll have
-+ * history entry recording a jump from last instruction of parent state and
-+ * first instruction of given state.
- */
- static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
- u32 *history)
- {
- u32 cnt = *history;
-
-+ if (i == st->first_insn_idx) {
-+ if (cnt == 0)
-+ return -ENOENT;
-+ if (cnt == 1 && st->jmp_history[0].idx == i)
-+ return -ENOENT;
-+ }
-+
- if (cnt && st->jmp_history[cnt - 1].idx == i) {
- i = st->jmp_history[cnt - 1].prev_idx;
- (*history)--;
-@@ -3426,7 +3444,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
- if (class == BPF_ALU || class == BPF_ALU64) {
- if (!bt_is_reg_set(bt, dreg))
- return 0;
-- if (opcode == BPF_MOV) {
-+ if (opcode == BPF_END || opcode == BPF_NEG) {
-+ /* sreg is reserved and unused
-+ * dreg still need precision before this insn
-+ */
-+ return 0;
-+ } else if (opcode == BPF_MOV) {
- if (BPF_SRC(insn->code) == BPF_X) {
- /* dreg = sreg or dreg = (s8, s16, s32)sreg
- * dreg needs precision after this insn
-@@ -4080,10 +4103,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
- * Nothing to be tracked further in the parent state.
- */
- return 0;
-- if (i == first_idx)
-- break;
- subseq_idx = i;
- i = get_prev_insn_idx(st, i, &history);
-+ if (i == -ENOENT)
-+ break;
- if (i >= env->prog->len) {
- /* This can happen if backtracking reached insn 0
- * and there are still reg_mask or stack_mask
-@@ -4358,7 +4381,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
- insn->imm != 0 && env->bpf_capable) {
- struct bpf_reg_state fake_reg = {};
-
-- __mark_reg_known(&fake_reg, (u32)insn->imm);
-+ __mark_reg_known(&fake_reg, insn->imm);
- fake_reg.type = SCALAR_VALUE;
- save_register_state(state, spi, &fake_reg, size);
- } else if (reg && is_spillable_regtype(reg->type)) {
-@@ -11202,6 +11225,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
- break;
- }
- case KF_ARG_PTR_TO_CALLBACK:
-+ if (reg->type != PTR_TO_FUNC) {
-+ verbose(env, "arg%d expected pointer to func\n", i);
-+ return -EINVAL;
-+ }
- meta->subprogno = reg->subprogno;
- break;
- case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
-@@ -14135,6 +14162,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
- !sanitize_speculative_path(env, insn, *insn_idx + 1,
- *insn_idx))
- return -EFAULT;
-+ if (env->log.level & BPF_LOG_LEVEL)
-+ print_insn_state(env, this_branch->frame[this_branch->curframe]);
- *insn_idx += insn->off;
- return 0;
- } else if (pred == 0) {
-@@ -14147,6 +14176,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
- *insn_idx + insn->off + 1,
- *insn_idx))
- return -EFAULT;
-+ if (env->log.level & BPF_LOG_LEVEL)
-+ print_insn_state(env, this_branch->frame[this_branch->curframe]);
- return 0;
- }
-
-@@ -14725,8 +14756,7 @@ enum {
- * w - next instruction
- * e - edge
- */
--static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
-- bool loop_ok)
-+static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
- {
- int *insn_stack = env->cfg.insn_stack;
- int *insn_state = env->cfg.insn_state;
-@@ -14758,7 +14788,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
- insn_stack[env->cfg.cur_stack++] = w;
- return KEEP_EXPLORING;
- } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
-- if (loop_ok && env->bpf_capable)
-+ if (env->bpf_capable)
- return DONE_EXPLORING;
- verbose_linfo(env, t, "%d: ", t);
- verbose_linfo(env, w, "%d: ", w);
-@@ -14778,24 +14808,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
- struct bpf_verifier_env *env,
- bool visit_callee)
- {
-- int ret;
-+ int ret, insn_sz;
-
-- ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
-+ insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
-+ ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
- if (ret)
- return ret;
-
-- mark_prune_point(env, t + 1);
-+ mark_prune_point(env, t + insn_sz);
- /* when we exit from subprog, we need to record non-linear history */
-- mark_jmp_point(env, t + 1);
-+ mark_jmp_point(env, t + insn_sz);
-
- if (visit_callee) {
- mark_prune_point(env, t);
-- ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
-- /* It's ok to allow recursion from CFG point of
-- * view. __check_func_call() will do the actual
-- * check.
-- */
-- bpf_pseudo_func(insns + t));
-+ ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
- }
- return ret;
- }
-@@ -14808,15 +14834,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
- static int visit_insn(int t, struct bpf_verifier_env *env)
- {
- struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
-- int ret, off;
-+ int ret, off, insn_sz;
-
- if (bpf_pseudo_func(insn))
- return visit_func_call_insn(t, insns, env, true);
-
- /* All non-branch instructions have a single fall-through edge. */
- if (BPF_CLASS(insn->code) != BPF_JMP &&
-- BPF_CLASS(insn->code) != BPF_JMP32)
-- return push_insn(t, t + 1, FALLTHROUGH, env, false);
-+ BPF_CLASS(insn->code) != BPF_JMP32) {
-+ insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
-+ return push_insn(t, t + insn_sz, FALLTHROUGH, env);
-+ }
-
- switch (BPF_OP(insn->code)) {
- case BPF_EXIT:
-@@ -14862,8 +14890,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
- off = insn->imm;
-
- /* unconditional jump with single edge */
-- ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
-- true);
-+ ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
- if (ret)
- return ret;
-
-@@ -14876,11 +14903,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
- /* conditional jump with two edges */
- mark_prune_point(env, t);
-
-- ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
-+ ret = push_insn(t, t + 1, FALLTHROUGH, env);
- if (ret)
- return ret;
-
-- return push_insn(t, t + insn->off + 1, BRANCH, env, true);
-+ return push_insn(t, t + insn->off + 1, BRANCH, env);
- }
- }
-
-@@ -14935,11 +14962,21 @@ static int check_cfg(struct bpf_verifier_env *env)
- }
-
- for (i = 0; i < insn_cnt; i++) {
-+ struct bpf_insn *insn = &env->prog->insnsi[i];
-+
- if (insn_state[i] != EXPLORED) {
- verbose(env, "unreachable insn %d\n", i);
- ret = -EINVAL;
- goto err_free;
- }
-+ if (bpf_is_ldimm64(insn)) {
-+ if (insn_state[i + 1] != 0) {
-+ verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
-+ ret = -EINVAL;
-+ goto err_free;
-+ }
-+ i++; /* skip second half of ldimm64 */
-+ }
- }
- ret = 0; /* cfg looks good */
-
-@@ -19641,6 +19678,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
- if (!tr)
- return -ENOMEM;
-
-+ if (tgt_prog && tgt_prog->aux->tail_call_reachable)
-+ tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
-+
- prog->aux->dst_trampoline = tr;
- return 0;
- }
-diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
-index 1fb7f562289d5..518725b57200c 100644
---- a/kernel/cgroup/cgroup.c
-+++ b/kernel/cgroup/cgroup.c
-@@ -3867,14 +3867,6 @@ static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
- return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
- }
-
--static int cgroup_pressure_open(struct kernfs_open_file *of)
--{
-- if (of->file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
-- return -EPERM;
--
-- return 0;
--}
--
- static void cgroup_pressure_release(struct kernfs_open_file *of)
- {
- struct cgroup_file_ctx *ctx = of->priv;
-@@ -5275,7 +5267,6 @@ static struct cftype cgroup_psi_files[] = {
- {
- .name = "io.pressure",
- .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
-- .open = cgroup_pressure_open,
- .seq_show = cgroup_io_pressure_show,
- .write = cgroup_io_pressure_write,
- .poll = cgroup_pressure_poll,
-@@ -5284,7 +5275,6 @@ static struct cftype cgroup_psi_files[] = {
- {
- .name = "memory.pressure",
- .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
-- .open = cgroup_pressure_open,
- .seq_show = cgroup_memory_pressure_show,
- .write = cgroup_memory_pressure_write,
- .poll = cgroup_pressure_poll,
-@@ -5293,7 +5283,6 @@ static struct cftype cgroup_psi_files[] = {
- {
- .name = "cpu.pressure",
- .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
-- .open = cgroup_pressure_open,
- .seq_show = cgroup_cpu_pressure_show,
- .write = cgroup_cpu_pressure_write,
- .poll = cgroup_pressure_poll,
-@@ -5303,7 +5292,6 @@ static struct cftype cgroup_psi_files[] = {
- {
- .name = "irq.pressure",
- .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
-- .open = cgroup_pressure_open,
- .seq_show = cgroup_irq_pressure_show,
- .write = cgroup_irq_pressure_write,
- .poll = cgroup_pressure_poll,
-diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index 58ec88efa4f82..4749e0c86c62c 100644
---- a/kernel/cgroup/cpuset.c
-+++ b/kernel/cgroup/cpuset.c
-@@ -1304,13 +1304,23 @@ static int update_partition_exclusive(struct cpuset *cs, int new_prs)
- *
- * Changing load balance flag will automatically call
- * rebuild_sched_domains_locked().
-+ * This function is for cgroup v2 only.
- */
- static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
- {
- int new_prs = cs->partition_root_state;
-- bool new_lb = (new_prs != PRS_ISOLATED);
- bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
-+ bool new_lb;
-
-+ /*
-+ * If cs is not a valid partition root, the load balance state
-+ * will follow its parent.
-+ */
-+ if (new_prs > 0) {
-+ new_lb = (new_prs != PRS_ISOLATED);
-+ } else {
-+ new_lb = is_sched_load_balance(parent_cs(cs));
-+ }
- if (new_lb != !!is_sched_load_balance(cs)) {
- rebuild_domains = true;
- if (new_lb)
-diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 6de7c6bb74eee..303cb0591b4b1 100644
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -659,11 +659,19 @@ static inline bool cpu_smt_thread_allowed(unsigned int cpu)
- #endif
- }
-
--static inline bool cpu_smt_allowed(unsigned int cpu)
-+static inline bool cpu_bootable(unsigned int cpu)
- {
- if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
- return true;
-
-+ /* All CPUs are bootable if controls are not configured */
-+ if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
-+ return true;
-+
-+ /* All CPUs are bootable if CPU is not SMT capable */
-+ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
-+ return true;
-+
- if (topology_is_primary_thread(cpu))
- return true;
-
-@@ -685,7 +693,7 @@ bool cpu_smt_possible(void)
- EXPORT_SYMBOL_GPL(cpu_smt_possible);
-
- #else
--static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
-+static inline bool cpu_bootable(unsigned int cpu) { return true; }
- #endif
-
- static inline enum cpuhp_state
-@@ -788,10 +796,10 @@ static int bringup_wait_for_ap_online(unsigned int cpu)
- * SMT soft disabling on X86 requires to bring the CPU out of the
- * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
- * CPU marked itself as booted_once in notify_cpu_starting() so the
-- * cpu_smt_allowed() check will now return false if this is not the
-+ * cpu_bootable() check will now return false if this is not the
- * primary sibling.
- */
-- if (!cpu_smt_allowed(cpu))
-+ if (!cpu_bootable(cpu))
- return -ECANCELED;
- return 0;
- }
-@@ -1515,11 +1523,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
- /*
- * Ensure that the control task does not run on the to be offlined
- * CPU to prevent a deadlock against cfs_b->period_timer.
-+ * Also keep at least one housekeeping cpu onlined to avoid generating
-+ * an empty sched_domain span.
- */
-- cpu = cpumask_any_but(cpu_online_mask, cpu);
-- if (cpu >= nr_cpu_ids)
-- return -EBUSY;
-- return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
-+ for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
-+ if (cpu != work.cpu)
-+ return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
-+ }
-+ return -EBUSY;
- }
-
- static int cpu_down(unsigned int cpu, enum cpuhp_state target)
-@@ -1741,7 +1752,7 @@ static int cpu_up(unsigned int cpu, enum cpuhp_state target)
- err = -EBUSY;
- goto out;
- }
-- if (!cpu_smt_allowed(cpu)) {
-+ if (!cpu_bootable(cpu)) {
- err = -EPERM;
- goto out;
- }
-diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
-index 621037a0aa870..ce1bb2301c061 100644
---- a/kernel/debug/debug_core.c
-+++ b/kernel/debug/debug_core.c
-@@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg)
- if (panic_timeout)
- return;
-
-+ debug_locks_off();
-+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
-+
- if (dbg_kdb_mode)
- kdb_printf("PANIC: %s\n", msg);
-
-diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
-index dff067bd56b1e..2048194a03bed 100644
---- a/kernel/dma/swiotlb.c
-+++ b/kernel/dma/swiotlb.c
-@@ -283,7 +283,8 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
- }
-
- for (i = 0; i < mem->nslabs; i++) {
-- mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
-+ mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
-+ mem->nslabs - i);
- mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
- mem->slots[i].alloc_size = 0;
- }
-@@ -558,29 +559,40 @@ void __init swiotlb_exit(void)
- * alloc_dma_pages() - allocate pages to be used for DMA
- * @gfp: GFP flags for the allocation.
- * @bytes: Size of the buffer.
-+ * @phys_limit: Maximum allowed physical address of the buffer.
- *
- * Allocate pages from the buddy allocator. If successful, make the allocated
- * pages decrypted that they can be used for DMA.
- *
-- * Return: Decrypted pages, or %NULL on failure.
-+ * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
-+ * if the allocated physical address was above @phys_limit.
- */
--static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
-+static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
- {
- unsigned int order = get_order(bytes);
- struct page *page;
-+ phys_addr_t paddr;
- void *vaddr;
-
- page = alloc_pages(gfp, order);
- if (!page)
- return NULL;
-
-- vaddr = page_address(page);
-+ paddr = page_to_phys(page);
-+ if (paddr + bytes - 1 > phys_limit) {
-+ __free_pages(page, order);
-+ return ERR_PTR(-EAGAIN);
-+ }
-+
-+ vaddr = phys_to_virt(paddr);
- if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
- goto error;
- return page;
-
- error:
-- __free_pages(page, order);
-+ /* Intentional leak if pages cannot be encrypted again. */
-+ if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
-+ __free_pages(page, order);
- return NULL;
- }
-
-@@ -618,11 +630,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
- else if (phys_limit <= DMA_BIT_MASK(32))
- gfp |= __GFP_DMA32;
-
-- while ((page = alloc_dma_pages(gfp, bytes)) &&
-- page_to_phys(page) + bytes - 1 > phys_limit) {
-- /* allocated, but too high */
-- __free_pages(page, get_order(bytes));
--
-+ while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
- if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
- phys_limit < DMA_BIT_MASK(64) &&
- !(gfp & (__GFP_DMA32 | __GFP_DMA)))
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index a2f2a9525d72e..6dbb03c532375 100644
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -375,6 +375,7 @@ enum event_type_t {
- EVENT_TIME = 0x4,
- /* see ctx_resched() for details */
- EVENT_CPU = 0x8,
-+ EVENT_CGROUP = 0x10,
- EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
- };
-
-@@ -684,20 +685,26 @@ do { \
- ___p; \
- })
-
--static void perf_ctx_disable(struct perf_event_context *ctx)
-+static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
- {
- struct perf_event_pmu_context *pmu_ctx;
-
-- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
-+ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
-+ if (cgroup && !pmu_ctx->nr_cgroups)
-+ continue;
- perf_pmu_disable(pmu_ctx->pmu);
-+ }
- }
-
--static void perf_ctx_enable(struct perf_event_context *ctx)
-+static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
- {
- struct perf_event_pmu_context *pmu_ctx;
-
-- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
-+ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
-+ if (cgroup && !pmu_ctx->nr_cgroups)
-+ continue;
- perf_pmu_enable(pmu_ctx->pmu);
-+ }
- }
-
- static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
-@@ -856,9 +863,9 @@ static void perf_cgroup_switch(struct task_struct *task)
- return;
-
- perf_ctx_lock(cpuctx, cpuctx->task_ctx);
-- perf_ctx_disable(&cpuctx->ctx);
-+ perf_ctx_disable(&cpuctx->ctx, true);
-
-- ctx_sched_out(&cpuctx->ctx, EVENT_ALL);
-+ ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
- /*
- * must not be done before ctxswout due
- * to update_cgrp_time_from_cpuctx() in
-@@ -870,9 +877,9 @@ static void perf_cgroup_switch(struct task_struct *task)
- * perf_cgroup_set_timestamp() in ctx_sched_in()
- * to not have to pass task around
- */
-- ctx_sched_in(&cpuctx->ctx, EVENT_ALL);
-+ ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
-
-- perf_ctx_enable(&cpuctx->ctx);
-+ perf_ctx_enable(&cpuctx->ctx, true);
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
- }
-
-@@ -965,6 +972,8 @@ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ct
- if (!is_cgroup_event(event))
- return;
-
-+ event->pmu_ctx->nr_cgroups++;
-+
- /*
- * Because cgroup events are always per-cpu events,
- * @ctx == &cpuctx->ctx.
-@@ -985,6 +994,8 @@ perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *c
- if (!is_cgroup_event(event))
- return;
-
-+ event->pmu_ctx->nr_cgroups--;
-+
- /*
- * Because cgroup events are always per-cpu events,
- * @ctx == &cpuctx->ctx.
-@@ -2679,9 +2690,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
-
- event_type &= EVENT_ALL;
-
-- perf_ctx_disable(&cpuctx->ctx);
-+ perf_ctx_disable(&cpuctx->ctx, false);
- if (task_ctx) {
-- perf_ctx_disable(task_ctx);
-+ perf_ctx_disable(task_ctx, false);
- task_ctx_sched_out(task_ctx, event_type);
- }
-
-@@ -2699,9 +2710,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
-
- perf_event_sched_in(cpuctx, task_ctx);
-
-- perf_ctx_enable(&cpuctx->ctx);
-+ perf_ctx_enable(&cpuctx->ctx, false);
- if (task_ctx)
-- perf_ctx_enable(task_ctx);
-+ perf_ctx_enable(task_ctx, false);
- }
-
- void perf_pmu_resched(struct pmu *pmu)
-@@ -3246,6 +3257,9 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
- struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
- struct perf_event_pmu_context *pmu_ctx;
- int is_active = ctx->is_active;
-+ bool cgroup = event_type & EVENT_CGROUP;
-+
-+ event_type &= ~EVENT_CGROUP;
-
- lockdep_assert_held(&ctx->lock);
-
-@@ -3292,8 +3306,11 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
-
- is_active ^= ctx->is_active; /* changed bits */
-
-- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry)
-+ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
-+ if (cgroup && !pmu_ctx->nr_cgroups)
-+ continue;
- __pmu_ctx_sched_out(pmu_ctx, is_active);
-+ }
- }
-
- /*
-@@ -3484,7 +3501,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
- raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
- if (context_equiv(ctx, next_ctx)) {
-
-- perf_ctx_disable(ctx);
-+ perf_ctx_disable(ctx, false);
-
- /* PMIs are disabled; ctx->nr_pending is stable. */
- if (local_read(&ctx->nr_pending) ||
-@@ -3504,7 +3521,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
- perf_ctx_sched_task_cb(ctx, false);
- perf_event_swap_task_ctx_data(ctx, next_ctx);
-
-- perf_ctx_enable(ctx);
-+ perf_ctx_enable(ctx, false);
-
- /*
- * RCU_INIT_POINTER here is safe because we've not
-@@ -3528,13 +3545,13 @@ unlock:
-
- if (do_switch) {
- raw_spin_lock(&ctx->lock);
-- perf_ctx_disable(ctx);
-+ perf_ctx_disable(ctx, false);
-
- inside_switch:
- perf_ctx_sched_task_cb(ctx, false);
- task_ctx_sched_out(ctx, EVENT_ALL);
-
-- perf_ctx_enable(ctx);
-+ perf_ctx_enable(ctx, false);
- raw_spin_unlock(&ctx->lock);
- }
- }
-@@ -3820,47 +3837,32 @@ static int merge_sched_in(struct perf_event *event, void *data)
- return 0;
- }
-
--static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
-+static void pmu_groups_sched_in(struct perf_event_context *ctx,
-+ struct perf_event_groups *groups,
-+ struct pmu *pmu)
- {
-- struct perf_event_pmu_context *pmu_ctx;
- int can_add_hw = 1;
--
-- if (pmu) {
-- visit_groups_merge(ctx, &ctx->pinned_groups,
-- smp_processor_id(), pmu,
-- merge_sched_in, &can_add_hw);
-- } else {
-- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
-- can_add_hw = 1;
-- visit_groups_merge(ctx, &ctx->pinned_groups,
-- smp_processor_id(), pmu_ctx->pmu,
-- merge_sched_in, &can_add_hw);
-- }
-- }
-+ visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
-+ merge_sched_in, &can_add_hw);
- }
-
--static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
-+static void ctx_groups_sched_in(struct perf_event_context *ctx,
-+ struct perf_event_groups *groups,
-+ bool cgroup)
- {
- struct perf_event_pmu_context *pmu_ctx;
-- int can_add_hw = 1;
-
-- if (pmu) {
-- visit_groups_merge(ctx, &ctx->flexible_groups,
-- smp_processor_id(), pmu,
-- merge_sched_in, &can_add_hw);
-- } else {
-- list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
-- can_add_hw = 1;
-- visit_groups_merge(ctx, &ctx->flexible_groups,
-- smp_processor_id(), pmu_ctx->pmu,
-- merge_sched_in, &can_add_hw);
-- }
-+ list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
-+ if (cgroup && !pmu_ctx->nr_cgroups)
-+ continue;
-+ pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
- }
- }
-
--static void __pmu_ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu)
-+static void __pmu_ctx_sched_in(struct perf_event_context *ctx,
-+ struct pmu *pmu)
- {
-- ctx_flexible_sched_in(ctx, pmu);
-+ pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu);
- }
-
- static void
-@@ -3868,6 +3870,9 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
- {
- struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
- int is_active = ctx->is_active;
-+ bool cgroup = event_type & EVENT_CGROUP;
-+
-+ event_type &= ~EVENT_CGROUP;
-
- lockdep_assert_held(&ctx->lock);
-
-@@ -3900,11 +3905,11 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
- * in order to give them the best chance of going on.
- */
- if (is_active & EVENT_PINNED)
-- ctx_pinned_sched_in(ctx, NULL);
-+ ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
-
- /* Then walk through the lower prio flexible groups */
- if (is_active & EVENT_FLEXIBLE)
-- ctx_flexible_sched_in(ctx, NULL);
-+ ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
- }
-
- static void perf_event_context_sched_in(struct task_struct *task)
-@@ -3919,11 +3924,11 @@ static void perf_event_context_sched_in(struct task_struct *task)
-
- if (cpuctx->task_ctx == ctx) {
- perf_ctx_lock(cpuctx, ctx);
-- perf_ctx_disable(ctx);
-+ perf_ctx_disable(ctx, false);
-
- perf_ctx_sched_task_cb(ctx, true);
-
-- perf_ctx_enable(ctx);
-+ perf_ctx_enable(ctx, false);
- perf_ctx_unlock(cpuctx, ctx);
- goto rcu_unlock;
- }
-@@ -3936,7 +3941,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
- if (!ctx->nr_events)
- goto unlock;
-
-- perf_ctx_disable(ctx);
-+ perf_ctx_disable(ctx, false);
- /*
- * We want to keep the following priority order:
- * cpu pinned (that don't need to move), task pinned,
-@@ -3946,7 +3951,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
- * events, no need to flip the cpuctx's events around.
- */
- if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
-- perf_ctx_disable(&cpuctx->ctx);
-+ perf_ctx_disable(&cpuctx->ctx, false);
- ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
- }
-
-@@ -3955,9 +3960,9 @@ static void perf_event_context_sched_in(struct task_struct *task)
- perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
-
- if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
-- perf_ctx_enable(&cpuctx->ctx);
-+ perf_ctx_enable(&cpuctx->ctx, false);
-
-- perf_ctx_enable(ctx);
-+ perf_ctx_enable(ctx, false);
-
- unlock:
- perf_ctx_unlock(cpuctx, ctx);
-@@ -4811,6 +4816,11 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
- void *task_ctx_data = NULL;
-
- if (!ctx->task) {
-+ /*
-+ * perf_pmu_migrate_context() / __perf_pmu_install_event()
-+ * relies on the fact that find_get_pmu_context() cannot fail
-+ * for CPU contexts.
-+ */
- struct perf_cpu_pmu_context *cpc;
-
- cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
-@@ -12872,6 +12882,9 @@ static void __perf_pmu_install_event(struct pmu *pmu,
- int cpu, struct perf_event *event)
- {
- struct perf_event_pmu_context *epc;
-+ struct perf_event_context *old_ctx = event->ctx;
-+
-+ get_ctx(ctx); /* normally find_get_context() */
-
- event->cpu = cpu;
- epc = find_get_pmu_context(pmu, ctx, event);
-@@ -12880,6 +12893,11 @@ static void __perf_pmu_install_event(struct pmu *pmu,
- if (event->state >= PERF_EVENT_STATE_OFF)
- event->state = PERF_EVENT_STATE_INACTIVE;
- perf_install_in_context(ctx, event, cpu);
-+
-+ /*
-+ * Now that event->ctx is updated and visible, put the old ctx.
-+ */
-+ put_ctx(old_ctx);
- }
-
- static void __perf_pmu_install(struct perf_event_context *ctx,
-@@ -12918,6 +12936,10 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
- struct perf_event_context *src_ctx, *dst_ctx;
- LIST_HEAD(events);
-
-+ /*
-+ * Since per-cpu context is persistent, no need to grab an extra
-+ * reference.
-+ */
- src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
- dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
-
-diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
-index fb1e180b5f0af..e8d82c2f07d0e 100644
---- a/kernel/events/ring_buffer.c
-+++ b/kernel/events/ring_buffer.c
-@@ -700,6 +700,12 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
- watermark = 0;
- }
-
-+ /*
-+ * kcalloc_node() is unable to allocate buffer if the size is larger
-+ * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
-+ */
-+ if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
-+ return -ENOMEM;
- rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
- node);
- if (!rb->aux_pages)
-diff --git a/kernel/fork.c b/kernel/fork.c
-index 3b6d20dfb9a85..177ce7438db6b 100644
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -1288,7 +1288,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
- hugetlb_count_init(mm);
-
- if (current->mm) {
-- mm->flags = current->mm->flags & MMF_INIT_MASK;
-+ mm->flags = mmf_init_flags(current->mm->flags);
- mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
- } else {
- mm->flags = default_dump_filter;
-diff --git a/kernel/futex/core.c b/kernel/futex/core.c
-index f10587d1d4817..f30a93e50f65e 100644
---- a/kernel/futex/core.c
-+++ b/kernel/futex/core.c
-@@ -248,7 +248,17 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
- * but access_ok() should be faster than find_vma()
- */
- if (!fshared) {
-- key->private.mm = mm;
-+ /*
-+ * On no-MMU, shared futexes are treated as private, therefore
-+ * we must not include the current process in the key. Since
-+ * there is only one address space, the address is a unique key
-+ * on its own.
-+ */
-+ if (IS_ENABLED(CONFIG_MMU))
-+ key->private.mm = mm;
-+ else
-+ key->private.mm = NULL;
-+
- key->private.address = address;
- return 0;
- }
-diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
-index 5971a66be0347..aae0402507ed7 100644
---- a/kernel/irq/debugfs.c
-+++ b/kernel/irq/debugfs.c
-@@ -121,7 +121,6 @@ static const struct irq_bit_descr irqdata_states[] = {
- BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
- BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
- BIT_MASK_DESCR(IRQD_CAN_RESERVE),
-- BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
-
- BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
-
-diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
-index c653cd31548d0..5a452b94b6434 100644
---- a/kernel/irq/generic-chip.c
-+++ b/kernel/irq/generic-chip.c
-@@ -544,21 +544,34 @@ EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
- void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
- unsigned int clr, unsigned int set)
- {
-- unsigned int i = gc->irq_base;
-+ unsigned int i, virq;
-
- raw_spin_lock(&gc_lock);
- list_del(&gc->list);
- raw_spin_unlock(&gc_lock);
-
-- for (; msk; msk >>= 1, i++) {
-+ for (i = 0; msk; msk >>= 1, i++) {
- if (!(msk & 0x01))
- continue;
-
-+ /*
-+ * Interrupt domain based chips store the base hardware
-+ * interrupt number in gc::irq_base. Otherwise gc::irq_base
-+ * contains the base Linux interrupt number.
-+ */
-+ if (gc->domain) {
-+ virq = irq_find_mapping(gc->domain, gc->irq_base + i);
-+ if (!virq)
-+ continue;
-+ } else {
-+ virq = gc->irq_base + i;
-+ }
-+
- /* Remove handler first. That will mask the irq line */
-- irq_set_handler(i, NULL);
-- irq_set_chip(i, &no_irq_chip);
-- irq_set_chip_data(i, NULL);
-- irq_modify_status(i, clr, set);
-+ irq_set_handler(virq, NULL);
-+ irq_set_chip(virq, &no_irq_chip);
-+ irq_set_chip_data(virq, NULL);
-+ irq_modify_status(virq, clr, set);
- }
- }
- EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
-diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
-index 1698e77645acf..75d0ae490e29c 100644
---- a/kernel/irq/matrix.c
-+++ b/kernel/irq/matrix.c
-@@ -466,16 +466,16 @@ unsigned int irq_matrix_reserved(struct irq_matrix *m)
- }
-
- /**
-- * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
-+ * irq_matrix_allocated - Get the number of allocated non-managed irqs on the local CPU
- * @m: Pointer to the matrix to search
- *
-- * This returns number of allocated irqs
-+ * This returns number of allocated non-managed interrupts.
- */
- unsigned int irq_matrix_allocated(struct irq_matrix *m)
- {
- struct cpumap *cm = this_cpu_ptr(m->maps);
-
-- return cm->allocated;
-+ return cm->allocated - cm->managed_allocated;
- }
-
- #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
-diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
-index b4c31a5c11473..79b4a58ba9c3f 100644
---- a/kernel/irq/msi.c
-+++ b/kernel/irq/msi.c
-@@ -1204,7 +1204,6 @@ static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
-
- #define VIRQ_CAN_RESERVE 0x01
- #define VIRQ_ACTIVATE 0x02
--#define VIRQ_NOMASK_QUIRK 0x04
-
- static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
- {
-@@ -1213,8 +1212,6 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag
-
- if (!(vflags & VIRQ_CAN_RESERVE)) {
- irqd_clr_can_reserve(irqd);
-- if (vflags & VIRQ_NOMASK_QUIRK)
-- irqd_set_msi_nomask_quirk(irqd);
-
- /*
- * If the interrupt is managed but no CPU is available to
-@@ -1275,15 +1272,8 @@ static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain
- * Interrupt can use a reserved vector and will not occupy
- * a real device vector until the interrupt is requested.
- */
-- if (msi_check_reservation_mode(domain, info, dev)) {
-+ if (msi_check_reservation_mode(domain, info, dev))
- vflags |= VIRQ_CAN_RESERVE;
-- /*
-- * MSI affinity setting requires a special quirk (X86) when
-- * reservation mode is active.
-- */
-- if (info->flags & MSI_FLAG_NOMASK_QUIRK)
-- vflags |= VIRQ_NOMASK_QUIRK;
-- }
-
- xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
- if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED))
-diff --git a/kernel/kexec.c b/kernel/kexec.c
-index 107f355eac101..8f35a5a42af85 100644
---- a/kernel/kexec.c
-+++ b/kernel/kexec.c
-@@ -247,7 +247,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
- ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
- return -EINVAL;
-
-- ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
-+ ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0]));
- if (IS_ERR(ksegments))
- return PTR_ERR(ksegments);
-
-diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
-index 61328328c474c..ecbc9b6aba3a1 100644
---- a/kernel/livepatch/core.c
-+++ b/kernel/livepatch/core.c
-@@ -243,7 +243,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
- * symbols are exported and normal relas can be used instead.
- */
- if (!sec_vmlinux && sym_vmlinux) {
-- pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
-+ pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
- sym_name);
- return -EINVAL;
- }
-diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
-index e85b5ad3e2069..151bd3de59363 100644
---- a/kernel/locking/lockdep.c
-+++ b/kernel/locking/lockdep.c
-@@ -3497,7 +3497,8 @@ static int alloc_chain_hlocks(int req)
- size = chain_block_size(curr);
- if (likely(size >= req)) {
- del_chain_block(0, size, chain_block_next(curr));
-- add_chain_block(curr + req, size - req);
-+ if (size > req)
-+ add_chain_block(curr + req, size - req);
- return curr;
- }
- }
-diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
-index 93cca6e698600..7c5a8f05497f2 100644
---- a/kernel/locking/test-ww_mutex.c
-+++ b/kernel/locking/test-ww_mutex.c
-@@ -466,7 +466,6 @@ retry:
- } while (!time_after(jiffies, stress->timeout));
-
- kfree(order);
-- kfree(stress);
- }
-
- struct reorder_lock {
-@@ -531,7 +530,6 @@ out:
- list_for_each_entry_safe(ll, ln, &locks, link)
- kfree(ll);
- kfree(order);
-- kfree(stress);
- }
-
- static void stress_one_work(struct work_struct *work)
-@@ -552,8 +550,6 @@ static void stress_one_work(struct work_struct *work)
- break;
- }
- } while (!time_after(jiffies, stress->timeout));
--
-- kfree(stress);
- }
-
- #define STRESS_INORDER BIT(0)
-@@ -564,15 +560,24 @@ static void stress_one_work(struct work_struct *work)
- static int stress(int nlocks, int nthreads, unsigned int flags)
- {
- struct ww_mutex *locks;
-- int n;
-+ struct stress *stress_array;
-+ int n, count;
-
- locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
- if (!locks)
- return -ENOMEM;
-
-+ stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
-+ GFP_KERNEL);
-+ if (!stress_array) {
-+ kfree(locks);
-+ return -ENOMEM;
-+ }
-+
- for (n = 0; n < nlocks; n++)
- ww_mutex_init(&locks[n], &ww_class);
-
-+ count = 0;
- for (n = 0; nthreads; n++) {
- struct stress *stress;
- void (*fn)(struct work_struct *work);
-@@ -596,9 +601,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
- if (!fn)
- continue;
-
-- stress = kmalloc(sizeof(*stress), GFP_KERNEL);
-- if (!stress)
-- break;
-+ stress = &stress_array[count++];
-
- INIT_WORK(&stress->work, fn);
- stress->locks = locks;
-@@ -613,6 +616,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
-
- for (n = 0; n < nlocks; n++)
- ww_mutex_destroy(&locks[n]);
-+ kfree(stress_array);
- kfree(locks);
-
- return 0;
-diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
-index 87440f714c0ca..474e68f0f0634 100644
---- a/kernel/module/decompress.c
-+++ b/kernel/module/decompress.c
-@@ -100,7 +100,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
- s.next_in = buf + gzip_hdr_len;
- s.avail_in = size - gzip_hdr_len;
-
-- s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
-+ s.workspace = kvmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
- if (!s.workspace)
- return -ENOMEM;
-
-@@ -138,7 +138,7 @@ static ssize_t module_gzip_decompress(struct load_info *info,
- out_inflate_end:
- zlib_inflateEnd(&s);
- out:
-- kfree(s.workspace);
-+ kvfree(s.workspace);
- return retval;
- }
- #elif defined(CONFIG_MODULE_COMPRESS_XZ)
-@@ -241,7 +241,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
- }
-
- wksp_size = zstd_dstream_workspace_bound(header.windowSize);
-- wksp = vmalloc(wksp_size);
-+ wksp = kvmalloc(wksp_size, GFP_KERNEL);
- if (!wksp) {
- retval = -ENOMEM;
- goto out;
-@@ -284,7 +284,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
- retval = new_size;
-
- out:
-- vfree(wksp);
-+ kvfree(wksp);
- return retval;
- }
- #else
-diff --git a/kernel/padata.c b/kernel/padata.c
-index 222d60195de66..179fb1518070c 100644
---- a/kernel/padata.c
-+++ b/kernel/padata.c
-@@ -202,7 +202,7 @@ int padata_do_parallel(struct padata_shell *ps,
- *cb_cpu = cpu;
- }
-
-- err = -EBUSY;
-+ err = -EBUSY;
- if ((pinst->flags & PADATA_RESET))
- goto out;
-
-@@ -1102,12 +1102,16 @@ EXPORT_SYMBOL(padata_alloc_shell);
- */
- void padata_free_shell(struct padata_shell *ps)
- {
-+ struct parallel_data *pd;
-+
- if (!ps)
- return;
-
- mutex_lock(&ps->pinst->lock);
- list_del(&ps->list);
-- padata_free_pd(rcu_dereference_protected(ps->pd, 1));
-+ pd = rcu_dereference_protected(ps->pd, 1);
-+ if (refcount_dec_and_test(&pd->refcnt))
-+ padata_free_pd(pd);
- mutex_unlock(&ps->pinst->lock);
-
- kfree(ps);
-diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
-index 0f12e0a97e432..50a15408c3fca 100644
---- a/kernel/power/snapshot.c
-+++ b/kernel/power/snapshot.c
-@@ -2545,8 +2545,9 @@ static void *get_highmem_page_buffer(struct page *page,
- pbe->copy_page = tmp;
- } else {
- /* Copy of the page will be stored in normal memory */
-- kaddr = safe_pages_list;
-- safe_pages_list = safe_pages_list->next;
-+ kaddr = __get_safe_page(ca->gfp_mask);
-+ if (!kaddr)
-+ return ERR_PTR(-ENOMEM);
- pbe->copy_page = virt_to_page(kaddr);
- }
- pbe->next = highmem_pblist;
-@@ -2750,8 +2751,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
- return ERR_PTR(-ENOMEM);
- }
- pbe->orig_address = page_address(page);
-- pbe->address = safe_pages_list;
-- safe_pages_list = safe_pages_list->next;
-+ pbe->address = __get_safe_page(ca->gfp_mask);
-+ if (!pbe->address)
-+ return ERR_PTR(-ENOMEM);
- pbe->next = restore_pblist;
- restore_pblist = pbe;
- return pbe->address;
-@@ -2783,8 +2785,6 @@ next:
- if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
- return 0;
-
-- handle->sync_read = 1;
--
- if (!handle->cur) {
- if (!buffer)
- /* This makes the buffer be freed by swsusp_free() */
-@@ -2827,7 +2827,6 @@ next:
- memory_bm_position_reset(&zero_bm);
- restore_pblist = NULL;
- handle->buffer = get_buffer(&orig_bm, &ca);
-- handle->sync_read = 0;
- if (IS_ERR(handle->buffer))
- return PTR_ERR(handle->buffer);
- }
-@@ -2837,9 +2836,8 @@ next:
- handle->buffer = get_buffer(&orig_bm, &ca);
- if (IS_ERR(handle->buffer))
- return PTR_ERR(handle->buffer);
-- if (handle->buffer != buffer)
-- handle->sync_read = 0;
- }
-+ handle->sync_read = (handle->buffer == buffer);
- handle->cur++;
-
- /* Zero pages were not included in the image, memset it and move on. */
-diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
-index 20d7a238d675a..25285893e44e7 100644
---- a/kernel/rcu/srcutree.c
-+++ b/kernel/rcu/srcutree.c
-@@ -223,7 +223,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
- snp->grplo = cpu;
- snp->grphi = cpu;
- }
-- sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
-+ sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
- }
- smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
- return true;
-@@ -782,8 +782,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
- spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
-- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
-- rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
-+ WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
- spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
- WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
- WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
-@@ -833,7 +832,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
- int cpu;
-
- for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
-- if (!(mask & (1 << (cpu - snp->grplo))))
-+ if (!(mask & (1UL << (cpu - snp->grplo))))
- continue;
- srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
- }
-@@ -1242,10 +1241,37 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
- spin_lock_irqsave_sdp_contention(sdp, &flags);
- if (rhp)
- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
-+ /*
-+ * The snapshot for acceleration must be taken _before_ the read of the
-+ * current gp sequence used for advancing, otherwise advancing may fail
-+ * and acceleration may then fail too.
-+ *
-+ * This could happen if:
-+ *
-+ * 1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
-+ * RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
-+ *
-+ * 2) The grace period for RCU_WAIT_TAIL is seen as started but not
-+ * completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
-+ *
-+ * 3) This value is passed to rcu_segcblist_advance() which can't move
-+ * any segment forward and fails.
-+ *
-+ * 4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
-+ * But then the call to rcu_seq_snap() observes the grace period for the
-+ * RCU_WAIT_TAIL segment as completed and the subsequent one for the
-+ * RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
-+ * so it returns a snapshot of the next grace period, which is X + 12.
-+ *
-+ * 5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
-+ * freshly enqueued callback in RCU_NEXT_TAIL can't move to
-+ * RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
-+ * period (gp_num = X + 8). So acceleration fails.
-+ */
-+ s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
-- s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
-- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
-+ WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
- if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
- sdp->srcu_gp_seq_needed = s;
- needgp = true;
-@@ -1692,6 +1718,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
- ssp = sdp->ssp;
- rcu_cblist_init(&ready_cbs);
- spin_lock_irq_rcu_node(sdp);
-+ WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
- if (sdp->srcu_cblist_invoking ||
-@@ -1720,8 +1747,6 @@ static void srcu_invoke_callbacks(struct work_struct *work)
- */
- spin_lock_irq_rcu_node(sdp);
- rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
-- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
-- rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
- sdp->srcu_cblist_invoking = false;
- more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
- spin_unlock_irq_rcu_node(sdp);
-diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
-index cb1caefa8bd07..7b4517dc46579 100644
---- a/kernel/rcu/tree.c
-+++ b/kernel/rcu/tree.c
-@@ -31,6 +31,7 @@
- #include <linux/bitops.h>
- #include <linux/export.h>
- #include <linux/completion.h>
-+#include <linux/kmemleak.h>
- #include <linux/moduleparam.h>
- #include <linux/panic.h>
- #include <linux/panic_notifier.h>
-@@ -1556,10 +1557,22 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
- */
- static void rcu_gp_fqs(bool first_time)
- {
-+ int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
- struct rcu_node *rnp = rcu_get_root();
-
- WRITE_ONCE(rcu_state.gp_activity, jiffies);
- WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
-+
-+ WARN_ON_ONCE(nr_fqs > 3);
-+ /* Only countdown nr_fqs for stall purposes if jiffies moves. */
-+ if (nr_fqs) {
-+ if (nr_fqs == 1) {
-+ WRITE_ONCE(rcu_state.jiffies_stall,
-+ jiffies + rcu_jiffies_till_stall_check());
-+ }
-+ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
-+ }
-+
- if (first_time) {
- /* Collect dyntick-idle snapshots. */
- force_qs_rnp(dyntick_save_progress_counter);
-@@ -3388,6 +3401,14 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
- success = true;
- }
-
-+ /*
-+ * The kvfree_rcu() caller considers the pointer freed at this point
-+ * and likely removes any references to it. Since the actual slab
-+ * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
-+ * this object (no scanning or false positives reporting).
-+ */
-+ kmemleak_ignore(ptr);
-+
- // Set timer to drain after KFREE_DRAIN_JIFFIES.
- if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
- schedule_delayed_monitor_work(krcp);
-diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
-index 192536916f9a6..e9821a8422dbe 100644
---- a/kernel/rcu/tree.h
-+++ b/kernel/rcu/tree.h
-@@ -386,6 +386,10 @@ struct rcu_state {
- /* in jiffies. */
- unsigned long jiffies_stall; /* Time at which to check */
- /* for CPU stalls. */
-+ int nr_fqs_jiffies_stall; /* Number of fqs loops after
-+ * which read jiffies and set
-+ * jiffies_stall. Stall
-+ * warnings disabled if !0. */
- unsigned long jiffies_resched; /* Time at which to resched */
- /* a reluctant CPU. */
- unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
-diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
-index 6f06dc12904ad..e09f4f624261e 100644
---- a/kernel/rcu/tree_stall.h
-+++ b/kernel/rcu/tree_stall.h
-@@ -149,12 +149,17 @@ static void panic_on_rcu_stall(void)
- /**
- * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
- *
-+ * To perform the reset request from the caller, disable stall detection until
-+ * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
-+ * loaded. It should be safe to do from the fqs loop as enough timer
-+ * interrupts and context switches should have passed.
-+ *
- * The caller must disable hard irqs.
- */
- void rcu_cpu_stall_reset(void)
- {
-- WRITE_ONCE(rcu_state.jiffies_stall,
-- jiffies + rcu_jiffies_till_stall_check());
-+ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
-+ WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
- }
-
- //////////////////////////////////////////////////////////////////////////////
-@@ -170,6 +175,7 @@ static void record_gp_stall_check_time(void)
- WRITE_ONCE(rcu_state.gp_start, j);
- j1 = rcu_jiffies_till_stall_check();
- smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
-+ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
- WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
- rcu_state.jiffies_resched = j + j1 / 2;
- rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
-@@ -725,6 +731,16 @@ static void check_cpu_stall(struct rcu_data *rdp)
- !rcu_gp_in_progress())
- return;
- rcu_stall_kick_kthreads();
-+
-+ /*
-+ * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
-+ * loop has to set jiffies to ensure a non-stale jiffies value. This
-+ * is required to have good jiffies value after coming out of long
-+ * breaks of jiffies updates. Not doing so can cause false positives.
-+ */
-+ if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
-+ return;
-+
- j = jiffies;
-
- /*
-diff --git a/kernel/reboot.c b/kernel/reboot.c
-index 3bba88c7ffc6b..6ebef11c88760 100644
---- a/kernel/reboot.c
-+++ b/kernel/reboot.c
-@@ -74,6 +74,7 @@ void __weak (*pm_power_off)(void);
- void emergency_restart(void)
- {
- kmsg_dump(KMSG_DUMP_EMERG);
-+ system_state = SYSTEM_RESTART;
- machine_emergency_restart();
- }
- EXPORT_SYMBOL_GPL(emergency_restart);
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 802551e0009bf..a854b71836dd5 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -2664,9 +2664,11 @@ static int migration_cpu_stop(void *data)
- * it.
- */
- WARN_ON_ONCE(!pending->stop_pending);
-+ preempt_disable();
- task_rq_unlock(rq, p, &rf);
- stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
- &pending->arg, &pending->stop_work);
-+ preempt_enable();
- return 0;
- }
- out:
-@@ -2986,12 +2988,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
- complete = true;
- }
-
-+ preempt_disable();
- task_rq_unlock(rq, p, rf);
--
- if (push_task) {
- stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
- p, &rq->push_work);
- }
-+ preempt_enable();
-
- if (complete)
- complete_all(&pending->done);
-@@ -3057,12 +3060,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
- if (flags & SCA_MIGRATE_ENABLE)
- p->migration_flags &= ~MDF_PUSH;
-
-+ preempt_disable();
- task_rq_unlock(rq, p, rf);
--
- if (!stop_pending) {
- stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
- &pending->arg, &pending->stop_work);
- }
-+ preempt_enable();
-
- if (flags & SCA_MIGRATE_ENABLE)
- return 0;
-@@ -5374,8 +5378,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
- /* switch_mm_cid() requires the memory barriers above. */
- switch_mm_cid(rq, prev, next);
-
-- rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
--
- prepare_lock_switch(rq, next, rf);
-
- /* Here we just switch the register state and the stack. */
-@@ -6615,6 +6617,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
- /* Promote REQ to ACT */
- rq->clock_update_flags <<= 1;
- update_rq_clock(rq);
-+ rq->clock_update_flags = RQCF_UPDATED;
-
- switch_count = &prev->nivcsw;
-
-@@ -6694,8 +6697,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
- /* Also unlocks the rq: */
- rq = context_switch(rq, prev, next, &rf);
- } else {
-- rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
--
- rq_unpin_lock(rq, &rf);
- __balance_callbacks(rq);
- raw_spin_rq_unlock_irq(rq);
-@@ -9505,9 +9506,11 @@ static void balance_push(struct rq *rq)
- * Temporarily drop rq->lock such that we can wake-up the stop task.
- * Both preemption and IRQs are still disabled.
- */
-+ preempt_disable();
- raw_spin_rq_unlock(rq);
- stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
- this_cpu_ptr(&push_work));
-+ preempt_enable();
- /*
- * At this point need_resched() is true and we'll take the loop in
- * schedule(). The next pick is obviously going to be the stop task
-diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index 58b542bf28934..d78f2e8769fb4 100644
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -2449,9 +2449,11 @@ skip:
- double_unlock_balance(this_rq, src_rq);
-
- if (push_task) {
-+ preempt_disable();
- raw_spin_rq_unlock(this_rq);
- stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
- push_task, &src_rq->push_work);
-+ preempt_enable();
- raw_spin_rq_lock(this_rq);
- }
- }
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index df348aa55d3c7..fa9fff0f9620d 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -3626,41 +3626,140 @@ static inline void
- dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
- #endif
-
-+static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se,
-+ unsigned long weight)
-+{
-+ unsigned long old_weight = se->load.weight;
-+ u64 avruntime = avg_vruntime(cfs_rq);
-+ s64 vlag, vslice;
-+
-+ /*
-+ * VRUNTIME
-+ * ========
-+ *
-+ * COROLLARY #1: The virtual runtime of the entity needs to be
-+ * adjusted if re-weight at !0-lag point.
-+ *
-+ * Proof: For contradiction assume this is not true, so we can
-+ * re-weight without changing vruntime at !0-lag point.
-+ *
-+ * Weight VRuntime Avg-VRuntime
-+ * before w v V
-+ * after w' v' V'
-+ *
-+ * Since lag needs to be preserved through re-weight:
-+ *
-+ * lag = (V - v)*w = (V'- v')*w', where v = v'
-+ * ==> V' = (V - v)*w/w' + v (1)
-+ *
-+ * Let W be the total weight of the entities before reweight,
-+ * since V' is the new weighted average of entities:
-+ *
-+ * V' = (WV + w'v - wv) / (W + w' - w) (2)
-+ *
-+ * by using (1) & (2) we obtain:
-+ *
-+ * (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
-+ * ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
-+ * ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
-+ * ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
-+ *
-+ * Since we are doing at !0-lag point which means V != v, we
-+ * can simplify (3):
-+ *
-+ * ==> W / (W + w' - w) = w / w'
-+ * ==> Ww' = Ww + ww' - ww
-+ * ==> W * (w' - w) = w * (w' - w)
-+ * ==> W = w (re-weight indicates w' != w)
-+ *
-+ * So the cfs_rq contains only one entity, hence vruntime of
-+ * the entity @v should always equal to the cfs_rq's weighted
-+ * average vruntime @V, which means we will always re-weight
-+ * at 0-lag point, thus breach assumption. Proof completed.
-+ *
-+ *
-+ * COROLLARY #2: Re-weight does NOT affect weighted average
-+ * vruntime of all the entities.
-+ *
-+ * Proof: According to corollary #1, Eq. (1) should be:
-+ *
-+ * (V - v)*w = (V' - v')*w'
-+ * ==> v' = V' - (V - v)*w/w' (4)
-+ *
-+ * According to the weighted average formula, we have:
-+ *
-+ * V' = (WV - wv + w'v') / (W - w + w')
-+ * = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
-+ * = (WV - wv + w'V' - Vw + wv) / (W - w + w')
-+ * = (WV + w'V' - Vw) / (W - w + w')
-+ *
-+ * ==> V'*(W - w + w') = WV + w'V' - Vw
-+ * ==> V' * (W - w) = (W - w) * V (5)
-+ *
-+ * If the entity is the only one in the cfs_rq, then reweight
-+ * always occurs at 0-lag point, so V won't change. Or else
-+ * there are other entities, hence W != w, then Eq. (5) turns
-+ * into V' = V. So V won't change in either case, proof done.
-+ *
-+ *
-+ * So according to corollary #1 & #2, the effect of re-weight
-+ * on vruntime should be:
-+ *
-+ * v' = V' - (V - v) * w / w' (4)
-+ * = V - (V - v) * w / w'
-+ * = V - vl * w / w'
-+ * = V - vl'
-+ */
-+ if (avruntime != se->vruntime) {
-+ vlag = (s64)(avruntime - se->vruntime);
-+ vlag = div_s64(vlag * old_weight, weight);
-+ se->vruntime = avruntime - vlag;
-+ }
-+
-+ /*
-+ * DEADLINE
-+ * ========
-+ *
-+ * When the weight changes, the virtual time slope changes and
-+ * we should adjust the relative virtual deadline accordingly.
-+ *
-+ * d' = v' + (d - v)*w/w'
-+ * = V' - (V - v)*w/w' + (d - v)*w/w'
-+ * = V - (V - v)*w/w' + (d - v)*w/w'
-+ * = V + (d - V)*w/w'
-+ */
-+ vslice = (s64)(se->deadline - avruntime);
-+ vslice = div_s64(vslice * old_weight, weight);
-+ se->deadline = avruntime + vslice;
-+}
-+
- static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
- unsigned long weight)
- {
-- unsigned long old_weight = se->load.weight;
-+ bool curr = cfs_rq->curr == se;
-
- if (se->on_rq) {
- /* commit outstanding execution time */
-- if (cfs_rq->curr == se)
-+ if (curr)
- update_curr(cfs_rq);
- else
-- avg_vruntime_sub(cfs_rq, se);
-+ __dequeue_entity(cfs_rq, se);
- update_load_sub(&cfs_rq->load, se->load.weight);
- }
- dequeue_load_avg(cfs_rq, se);
-
-- update_load_set(&se->load, weight);
--
- if (!se->on_rq) {
- /*
- * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
- * we need to scale se->vlag when w_i changes.
- */
-- se->vlag = div_s64(se->vlag * old_weight, weight);
-+ se->vlag = div_s64(se->vlag * se->load.weight, weight);
- } else {
-- s64 deadline = se->deadline - se->vruntime;
-- /*
-- * When the weight changes, the virtual time slope changes and
-- * we should adjust the relative virtual deadline accordingly.
-- */
-- deadline = div_s64(deadline * old_weight, weight);
-- se->deadline = se->vruntime + deadline;
-- if (se != cfs_rq->curr)
-- min_deadline_cb_propagate(&se->run_node, NULL);
-+ reweight_eevdf(cfs_rq, se, weight);
- }
-
-+ update_load_set(&se->load, weight);
-+
- #ifdef CONFIG_SMP
- do {
- u32 divider = get_pelt_divider(&se->avg);
-@@ -3672,8 +3771,17 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
- enqueue_load_avg(cfs_rq, se);
- if (se->on_rq) {
- update_load_add(&cfs_rq->load, se->load.weight);
-- if (cfs_rq->curr != se)
-- avg_vruntime_add(cfs_rq, se);
-+ if (!curr) {
-+ /*
-+ * The entity's vruntime has been adjusted, so let's check
-+ * whether the rq-wide min_vruntime needs updated too. Since
-+ * the calculations above require stable min_vruntime rather
-+ * than up-to-date one, we do the update at the end of the
-+ * reweight process.
-+ */
-+ __enqueue_entity(cfs_rq, se);
-+ update_min_vruntime(cfs_rq);
-+ }
- }
- }
-
-@@ -3817,14 +3925,11 @@ static void update_cfs_group(struct sched_entity *se)
-
- #ifndef CONFIG_SMP
- shares = READ_ONCE(gcfs_rq->tg->shares);
--
-- if (likely(se->load.weight == shares))
-- return;
- #else
-- shares = calc_group_shares(gcfs_rq);
-+ shares = calc_group_shares(gcfs_rq);
- #endif
--
-- reweight_entity(cfs_rq_of(se), se, shares);
-+ if (unlikely(se->load.weight != shares))
-+ reweight_entity(cfs_rq_of(se), se, shares);
- }
-
- #else /* CONFIG_FAIR_GROUP_SCHED */
-@@ -4626,22 +4731,6 @@ static inline unsigned long task_util_est(struct task_struct *p)
- return max(task_util(p), _task_util_est(p));
- }
-
--#ifdef CONFIG_UCLAMP_TASK
--static inline unsigned long uclamp_task_util(struct task_struct *p,
-- unsigned long uclamp_min,
-- unsigned long uclamp_max)
--{
-- return clamp(task_util_est(p), uclamp_min, uclamp_max);
--}
--#else
--static inline unsigned long uclamp_task_util(struct task_struct *p,
-- unsigned long uclamp_min,
-- unsigned long uclamp_max)
--{
-- return task_util_est(p);
--}
--#endif
--
- static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
- struct task_struct *p)
- {
-@@ -4932,7 +5021,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
-
- static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
- {
-- return true;
-+ return !cfs_rq->nr_running;
- }
-
- #define UPDATE_TG 0x0
-@@ -7756,7 +7845,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
- target = prev_cpu;
-
- sync_entity_load_avg(&p->se);
-- if (!uclamp_task_util(p, p_util_min, p_util_max))
-+ if (!task_util_est(p) && p_util_min == 0)
- goto unlock;
-
- eenv_task_busy_time(&eenv, p, prev_cpu);
-@@ -7764,11 +7853,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
- for (; pd; pd = pd->next) {
- unsigned long util_min = p_util_min, util_max = p_util_max;
- unsigned long cpu_cap, cpu_thermal_cap, util;
-- unsigned long cur_delta, max_spare_cap = 0;
-+ long prev_spare_cap = -1, max_spare_cap = -1;
- unsigned long rq_util_min, rq_util_max;
-- unsigned long prev_spare_cap = 0;
-+ unsigned long cur_delta, base_energy;
- int max_spare_cap_cpu = -1;
-- unsigned long base_energy;
- int fits, max_fits = -1;
-
- cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
-@@ -7831,7 +7919,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
- prev_spare_cap = cpu_cap;
- prev_fits = fits;
- } else if ((fits > max_fits) ||
-- ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
-+ ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) {
- /*
- * Find the CPU with the maximum spare capacity
- * among the remaining CPUs in the performance
-@@ -7843,7 +7931,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
- }
- }
-
-- if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
-+ if (max_spare_cap_cpu < 0 && prev_spare_cap < 0)
- continue;
-
- eenv_pd_busy_time(&eenv, cpus, p);
-@@ -7851,7 +7939,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
- base_energy = compute_energy(&eenv, pd, cpus, p, -1);
-
- /* Evaluate the energy impact of using prev_cpu. */
-- if (prev_spare_cap > 0) {
-+ if (prev_spare_cap > -1) {
- prev_delta = compute_energy(&eenv, pd, cpus, p,
- prev_cpu);
- /* CPU utilization has changed */
-@@ -11033,12 +11121,16 @@ static int should_we_balance(struct lb_env *env)
- continue;
- }
-
-- /* Are we the first idle CPU? */
-+ /*
-+ * Are we the first idle core in a non-SMT domain or higher,
-+ * or the first idle CPU in a SMT domain?
-+ */
- return cpu == env->dst_cpu;
- }
-
-- if (idle_smt == env->dst_cpu)
-- return true;
-+ /* Are we the first idle CPU with busy siblings? */
-+ if (idle_smt != -1)
-+ return idle_smt == env->dst_cpu;
-
- /* Are we the first CPU of this group ? */
- return group_balance_cpu(sg) == env->dst_cpu;
-@@ -11251,13 +11343,15 @@ more_balance:
- busiest->push_cpu = this_cpu;
- active_balance = 1;
- }
-- raw_spin_rq_unlock_irqrestore(busiest, flags);
-
-+ preempt_disable();
-+ raw_spin_rq_unlock_irqrestore(busiest, flags);
- if (active_balance) {
- stop_one_cpu_nowait(cpu_of(busiest),
- active_load_balance_cpu_stop, busiest,
- &busiest->active_balance_work);
- }
-+ preempt_enable();
- }
- } else {
- sd->nr_balance_failed = 0;
-diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
-index 0597ba0f85ff3..904dd85345973 100644
---- a/kernel/sched/rt.c
-+++ b/kernel/sched/rt.c
-@@ -2109,9 +2109,11 @@ retry:
- */
- push_task = get_push_task(rq);
- if (push_task) {
-+ preempt_disable();
- raw_spin_rq_unlock(rq);
- stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
- push_task, &rq->push_work);
-+ preempt_enable();
- raw_spin_rq_lock(rq);
- }
-
-@@ -2448,9 +2450,11 @@ skip:
- double_unlock_balance(this_rq, src_rq);
-
- if (push_task) {
-+ preempt_disable();
- raw_spin_rq_unlock(this_rq);
- stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
- push_task, &src_rq->push_work);
-+ preempt_enable();
- raw_spin_rq_lock(this_rq);
- }
- }
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 05a5bc678c089..423d08947962c 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -2122,12 +2122,16 @@ static int hop_cmp(const void *a, const void *b)
- */
- int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
- {
-- struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu };
-+ struct __cmp_key k = { .cpus = cpus, .cpu = cpu };
- struct cpumask ***hop_masks;
- int hop, ret = nr_cpu_ids;
-
- rcu_read_lock();
-
-+ /* CPU-less node entries are uninitialized in sched_domains_numa_masks */
-+ node = numa_nearest_node(node, N_CPU);
-+ k.node = node;
-+
- k.masks = rcu_dereference(sched_domains_numa_masks);
- if (!k.masks)
- goto unlock;
-diff --git a/kernel/smp.c b/kernel/smp.c
-index 8455a53465af8..695eb13a276d2 100644
---- a/kernel/smp.c
-+++ b/kernel/smp.c
-@@ -170,6 +170,8 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
-
- static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
- module_param(csd_lock_timeout, ulong, 0444);
-+static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
-+module_param(panic_on_ipistall, int, 0444);
-
- static atomic_t csd_bug_count = ATOMIC_INIT(0);
-
-@@ -230,6 +232,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
- }
-
- ts2 = sched_clock();
-+ /* How long since we last checked for a stuck CSD lock.*/
- ts_delta = ts2 - *ts1;
- if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
- return false;
-@@ -243,9 +246,17 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
- else
- cpux = cpu;
- cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
-+ /* How long since this CSD lock was stuck. */
-+ ts_delta = ts2 - ts0;
- pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
-- firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
-+ firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts_delta,
- cpu, csd->func, csd->info);
-+ /*
-+ * If the CSD lock is still stuck after 5 minutes, it is unlikely
-+ * to become unstuck. Use a signed comparison to avoid triggering
-+ * on underflows when the TSC is out of sync between sockets.
-+ */
-+ BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
- if (cpu_cur_csd && csd != cpu_cur_csd) {
- pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
- *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
-diff --git a/kernel/sys.c b/kernel/sys.c
-index 2410e3999ebe5..7a4ae6d5aecd5 100644
---- a/kernel/sys.c
-+++ b/kernel/sys.c
-@@ -2368,19 +2368,45 @@ static int prctl_set_vma(unsigned long opt, unsigned long start,
- }
- #endif /* CONFIG_ANON_VMA_NAME */
-
-+static inline unsigned long get_current_mdwe(void)
-+{
-+ unsigned long ret = 0;
-+
-+ if (test_bit(MMF_HAS_MDWE, &current->mm->flags))
-+ ret |= PR_MDWE_REFUSE_EXEC_GAIN;
-+ if (test_bit(MMF_HAS_MDWE_NO_INHERIT, &current->mm->flags))
-+ ret |= PR_MDWE_NO_INHERIT;
-+
-+ return ret;
-+}
-+
- static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
- unsigned long arg4, unsigned long arg5)
- {
-+ unsigned long current_bits;
-+
- if (arg3 || arg4 || arg5)
- return -EINVAL;
-
-- if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN))
-+ if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT))
- return -EINVAL;
-
-+ /* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */
-+ if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))
-+ return -EINVAL;
-+
-+ /* PARISC cannot allow mdwe as it needs writable stacks */
-+ if (IS_ENABLED(CONFIG_PARISC))
-+ return -EINVAL;
-+
-+ current_bits = get_current_mdwe();
-+ if (current_bits && current_bits != bits)
-+ return -EPERM; /* Cannot unset the flags */
-+
-+ if (bits & PR_MDWE_NO_INHERIT)
-+ set_bit(MMF_HAS_MDWE_NO_INHERIT, &current->mm->flags);
- if (bits & PR_MDWE_REFUSE_EXEC_GAIN)
- set_bit(MMF_HAS_MDWE, &current->mm->flags);
-- else if (test_bit(MMF_HAS_MDWE, &current->mm->flags))
-- return -EPERM; /* Cannot unset the flag */
-
- return 0;
- }
-@@ -2390,9 +2416,7 @@ static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3,
- {
- if (arg2 || arg3 || arg4 || arg5)
- return -EINVAL;
--
-- return test_bit(MMF_HAS_MDWE, &current->mm->flags) ?
-- PR_MDWE_REFUSE_EXEC_GAIN : 0;
-+ return get_current_mdwe();
- }
-
- static int prctl_get_auxv(void __user *addr, unsigned long len)
-diff --git a/kernel/torture.c b/kernel/torture.c
-index b28b05bbef027..c7b475883b9a8 100644
---- a/kernel/torture.c
-+++ b/kernel/torture.c
-@@ -87,14 +87,15 @@ EXPORT_SYMBOL_GPL(verbose_torout_sleep);
- * nanosecond random fuzz. This function and its friends desynchronize
- * testing from the timer wheel.
- */
--int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp)
-+int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
-+ struct torture_random_state *trsp)
- {
- ktime_t hto = baset_ns;
-
- if (trsp)
- hto += torture_random(trsp) % fuzzt_ns;
- set_current_state(TASK_IDLE);
-- return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
-+ return schedule_hrtimeout(&hto, mode);
- }
- EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
-
-@@ -106,7 +107,7 @@ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state
- {
- ktime_t baset_ns = baset_us * NSEC_PER_USEC;
-
-- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
-+ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
- }
- EXPORT_SYMBOL_GPL(torture_hrtimeout_us);
-
-@@ -123,7 +124,7 @@ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state
- fuzzt_ns = (u32)~0U;
- else
- fuzzt_ns = fuzzt_us * NSEC_PER_USEC;
-- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
-+ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
- }
- EXPORT_SYMBOL_GPL(torture_hrtimeout_ms);
-
-@@ -136,7 +137,7 @@ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp)
- {
- ktime_t baset_ns = jiffies_to_nsecs(baset_j);
-
-- return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp);
-+ return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), HRTIMER_MODE_REL, trsp);
- }
- EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies);
-
-@@ -153,7 +154,7 @@ int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *
- fuzzt_ns = (u32)~0U;
- else
- fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC;
-- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
-+ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
- }
- EXPORT_SYMBOL_GPL(torture_hrtimeout_s);
-
-@@ -720,7 +721,7 @@ static void torture_shutdown_cleanup(void)
- * suddenly applied to or removed from the system.
- */
- static struct task_struct *stutter_task;
--static int stutter_pause_test;
-+static ktime_t stutter_till_abs_time;
- static int stutter;
- static int stutter_gap;
-
-@@ -730,30 +731,16 @@ static int stutter_gap;
- */
- bool stutter_wait(const char *title)
- {
-- unsigned int i = 0;
- bool ret = false;
-- int spt;
-+ ktime_t till_ns;
-
- cond_resched_tasks_rcu_qs();
-- spt = READ_ONCE(stutter_pause_test);
-- for (; spt; spt = READ_ONCE(stutter_pause_test)) {
-- if (!ret && !rt_task(current)) {
-- sched_set_normal(current, MAX_NICE);
-- ret = true;
-- }
-- if (spt == 1) {
-- torture_hrtimeout_jiffies(1, NULL);
-- } else if (spt == 2) {
-- while (READ_ONCE(stutter_pause_test)) {
-- if (!(i++ & 0xffff))
-- torture_hrtimeout_us(10, 0, NULL);
-- cond_resched();
-- }
-- } else {
-- torture_hrtimeout_jiffies(round_jiffies_relative(HZ), NULL);
-- }
-- torture_shutdown_absorb(title);
-+ till_ns = READ_ONCE(stutter_till_abs_time);
-+ if (till_ns && ktime_before(ktime_get(), till_ns)) {
-+ torture_hrtimeout_ns(till_ns, 0, HRTIMER_MODE_ABS, NULL);
-+ ret = true;
- }
-+ torture_shutdown_absorb(title);
- return ret;
- }
- EXPORT_SYMBOL_GPL(stutter_wait);
-@@ -764,23 +751,16 @@ EXPORT_SYMBOL_GPL(stutter_wait);
- */
- static int torture_stutter(void *arg)
- {
-- DEFINE_TORTURE_RANDOM(rand);
-- int wtime;
-+ ktime_t till_ns;
-
- VERBOSE_TOROUT_STRING("torture_stutter task started");
- do {
- if (!torture_must_stop() && stutter > 1) {
-- wtime = stutter;
-- if (stutter > 2) {
-- WRITE_ONCE(stutter_pause_test, 1);
-- wtime = stutter - 3;
-- torture_hrtimeout_jiffies(wtime, &rand);
-- wtime = 2;
-- }
-- WRITE_ONCE(stutter_pause_test, 2);
-- torture_hrtimeout_jiffies(wtime, NULL);
-+ till_ns = ktime_add_ns(ktime_get(),
-+ jiffies_to_nsecs(stutter));
-+ WRITE_ONCE(stutter_till_abs_time, till_ns);
-+ torture_hrtimeout_jiffies(stutter - 1, NULL);
- }
-- WRITE_ONCE(stutter_pause_test, 0);
- if (!torture_must_stop())
- torture_hrtimeout_jiffies(stutter_gap, NULL);
- torture_shutdown_absorb("torture_stutter");
-diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index abaaf516fcae9..a40d6baf101f0 100644
---- a/kernel/trace/trace.c
-+++ b/kernel/trace/trace.c
-@@ -4986,6 +4986,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
- if (ret)
- return ret;
-
-+ mutex_lock(&event_mutex);
-+
-+ /* Fail if the file is marked for removal */
-+ if (file->flags & EVENT_FILE_FL_FREED) {
-+ trace_array_put(file->tr);
-+ ret = -ENODEV;
-+ } else {
-+ event_file_get(file);
-+ }
-+
-+ mutex_unlock(&event_mutex);
-+ if (ret)
-+ return ret;
-+
- filp->private_data = inode->i_private;
-
- return 0;
-@@ -4996,6 +5010,7 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
- struct trace_event_file *file = inode->i_private;
-
- trace_array_put(file->tr);
-+ event_file_put(file);
-
- return 0;
- }
-diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
-index 77debe53f07cf..d608f61287043 100644
---- a/kernel/trace/trace.h
-+++ b/kernel/trace/trace.h
-@@ -1664,6 +1664,9 @@ extern void event_trigger_unregister(struct event_command *cmd_ops,
- char *glob,
- struct event_trigger_data *trigger_data);
-
-+extern void event_file_get(struct trace_event_file *file);
-+extern void event_file_put(struct trace_event_file *file);
-+
- /**
- * struct event_trigger_ops - callbacks for trace event triggers
- *
-diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index f49d6ddb63425..82cb22ad6d617 100644
---- a/kernel/trace/trace_events.c
-+++ b/kernel/trace/trace_events.c
-@@ -990,13 +990,35 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
- }
- }
-
-+void event_file_get(struct trace_event_file *file)
-+{
-+ atomic_inc(&file->ref);
-+}
-+
-+void event_file_put(struct trace_event_file *file)
-+{
-+ if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
-+ if (file->flags & EVENT_FILE_FL_FREED)
-+ kmem_cache_free(file_cachep, file);
-+ return;
-+ }
-+
-+ if (atomic_dec_and_test(&file->ref)) {
-+ /* Count should only go to zero when it is freed */
-+ if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
-+ return;
-+ kmem_cache_free(file_cachep, file);
-+ }
-+}
-+
- static void remove_event_file_dir(struct trace_event_file *file)
- {
- eventfs_remove(file->ef);
- list_del(&file->list);
- remove_subsystem(file->system);
- free_event_filter(file->filter);
-- kmem_cache_free(file_cachep, file);
-+ file->flags |= EVENT_FILE_FL_FREED;
-+ event_file_put(file);
- }
-
- /*
-@@ -1369,7 +1391,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
- flags = file->flags;
- mutex_unlock(&event_mutex);
-
-- if (!file)
-+ if (!file || flags & EVENT_FILE_FL_FREED)
- return -ENODEV;
-
- if (flags & EVENT_FILE_FL_ENABLED &&
-@@ -1407,7 +1429,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
- ret = -ENODEV;
- mutex_lock(&event_mutex);
- file = event_file_data(filp);
-- if (likely(file))
-+ if (likely(file && !(file->flags & EVENT_FILE_FL_FREED)))
- ret = ftrace_event_enable_disable(file, val);
- mutex_unlock(&event_mutex);
- break;
-@@ -1681,7 +1703,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
-
- mutex_lock(&event_mutex);
- file = event_file_data(filp);
-- if (file)
-+ if (file && !(file->flags & EVENT_FILE_FL_FREED))
- print_event_filter(file, s);
- mutex_unlock(&event_mutex);
-
-@@ -2803,6 +2825,7 @@ trace_create_new_event(struct trace_event_call *call,
- atomic_set(&file->tm_ref, 0);
- INIT_LIST_HEAD(&file->triggers);
- list_add(&file->list, &tr->events);
-+ event_file_get(file);
-
- return file;
- }
-diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
-index 33264e510d161..0c611b281a5b5 100644
---- a/kernel/trace/trace_events_filter.c
-+++ b/kernel/trace/trace_events_filter.c
-@@ -2349,6 +2349,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
- struct event_filter *filter = NULL;
- int err;
-
-+ if (file->flags & EVENT_FILE_FL_FREED)
-+ return -ENODEV;
-+
- if (!strcmp(strstrip(filter_string), "0")) {
- filter_disable(file);
- filter = event_filter(file);
-diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
-index 14cb275a0bab0..846e02c0fb59a 100644
---- a/kernel/trace/trace_events_synth.c
-+++ b/kernel/trace/trace_events_synth.c
-@@ -452,7 +452,7 @@ static unsigned int trace_string(struct synth_trace_event *entry,
-
- #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
- if ((unsigned long)str_val < TASK_SIZE)
-- ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
-+ ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
- else
- #endif
- ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
-diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
-index 8bfe23af9c739..7d2ddbcfa377c 100644
---- a/kernel/trace/trace_fprobe.c
-+++ b/kernel/trace/trace_fprobe.c
-@@ -927,11 +927,12 @@ static int parse_symbol_and_return(int argc, const char *argv[],
- for (i = 2; i < argc; i++) {
- tmp = strstr(argv[i], "$retval");
- if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
-+ if (is_tracepoint) {
-+ trace_probe_log_set_index(i);
-+ trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
-+ return -EINVAL;
-+ }
- *is_return = true;
-- /*
-- * NOTE: Don't check is_tracepoint here, because it will
-- * be checked when the argument is parsed.
-- */
- break;
- }
- }
-diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
-index e834f149695b7..47812aa16bb57 100644
---- a/kernel/trace/trace_kprobe.c
-+++ b/kernel/trace/trace_kprobe.c
-@@ -1020,9 +1020,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
- /**
- * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
- * @cmd: A pointer to the dynevent_cmd struct representing the new event
-+ * @kretprobe: Is this a return probe?
- * @name: The name of the kprobe event
- * @loc: The location of the kprobe event
-- * @kretprobe: Is this a return probe?
- * @...: Variable number of arg (pairs), one pair for each field
- *
- * NOTE: Users normally won't want to call this function directly, but
-diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
-index d0b6b390ee423..778b4056700ff 100644
---- a/kernel/watch_queue.c
-+++ b/kernel/watch_queue.c
-@@ -331,7 +331,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
- filter.__reserved != 0)
- return -EINVAL;
-
-- tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
-+ tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf));
- if (IS_ERR(tf))
- return PTR_ERR(tf);
-
-diff --git a/kernel/watchdog.c b/kernel/watchdog.c
-index d145305d95fe8..5cd6d4e269157 100644
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -283,6 +283,13 @@ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
- static DEFINE_PER_CPU(bool, softlockup_touch_sync);
- static unsigned long soft_lockup_nmi_warn;
-
-+static int __init softlockup_panic_setup(char *str)
-+{
-+ softlockup_panic = simple_strtoul(str, NULL, 0);
-+ return 1;
-+}
-+__setup("softlockup_panic=", softlockup_panic_setup);
-+
- static int __init nowatchdog_setup(char *str)
- {
- watchdog_user_enabled = 0;
-diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index a3522b70218d3..0f682da96e1c5 100644
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -5622,50 +5622,54 @@ static void work_for_cpu_fn(struct work_struct *work)
- }
-
- /**
-- * work_on_cpu - run a function in thread context on a particular cpu
-+ * work_on_cpu_key - run a function in thread context on a particular cpu
- * @cpu: the cpu to run on
- * @fn: the function to run
- * @arg: the function arg
-+ * @key: The lock class key for lock debugging purposes
- *
- * It is up to the caller to ensure that the cpu doesn't go offline.
- * The caller must not hold any locks which would prevent @fn from completing.
- *
- * Return: The value @fn returns.
- */
--long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
-+long work_on_cpu_key(int cpu, long (*fn)(void *),
-+ void *arg, struct lock_class_key *key)
- {
- struct work_for_cpu wfc = { .fn = fn, .arg = arg };
-
-- INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
-+ INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
- schedule_work_on(cpu, &wfc.work);
- flush_work(&wfc.work);
- destroy_work_on_stack(&wfc.work);
- return wfc.ret;
- }
--EXPORT_SYMBOL_GPL(work_on_cpu);
-+EXPORT_SYMBOL_GPL(work_on_cpu_key);
-
- /**
-- * work_on_cpu_safe - run a function in thread context on a particular cpu
-+ * work_on_cpu_safe_key - run a function in thread context on a particular cpu
- * @cpu: the cpu to run on
- * @fn: the function to run
- * @arg: the function argument
-+ * @key: The lock class key for lock debugging purposes
- *
- * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
- * any locks which would prevent @fn from completing.
- *
- * Return: The value @fn returns.
- */
--long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
-+long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
-+ void *arg, struct lock_class_key *key)
- {
- long ret = -ENODEV;
-
- cpus_read_lock();
- if (cpu_online(cpu))
-- ret = work_on_cpu(cpu, fn, arg);
-+ ret = work_on_cpu_key(cpu, fn, arg, key);
- cpus_read_unlock();
- return ret;
- }
--EXPORT_SYMBOL_GPL(work_on_cpu_safe);
-+EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
- #endif /* CONFIG_SMP */
-
- #ifdef CONFIG_FREEZER
-diff --git a/lib/errname.c b/lib/errname.c
-index 67739b174a8cc..0c336b0f12f60 100644
---- a/lib/errname.c
-+++ b/lib/errname.c
-@@ -111,9 +111,6 @@ static const char *names_0[] = {
- E(ENOSPC),
- E(ENOSR),
- E(ENOSTR),
--#ifdef ENOSYM
-- E(ENOSYM),
--#endif
- E(ENOSYS),
- E(ENOTBLK),
- E(ENOTCONN),
-@@ -144,9 +141,6 @@ static const char *names_0[] = {
- #endif
- E(EREMOTE),
- E(EREMOTEIO),
--#ifdef EREMOTERELEASE
-- E(EREMOTERELEASE),
--#endif
- E(ERESTART),
- E(ERFKILL),
- E(EROFS),
-diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
-index f25eb111c0516..7dfa88282b006 100644
---- a/lib/generic-radix-tree.c
-+++ b/lib/generic-radix-tree.c
-@@ -166,6 +166,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
- struct genradix_root *r;
- struct genradix_node *n;
- unsigned level, i;
-+
-+ if (iter->offset == SIZE_MAX)
-+ return NULL;
-+
- restart:
- r = READ_ONCE(radix->root);
- if (!r)
-@@ -184,10 +188,17 @@ restart:
- (GENRADIX_ARY - 1);
-
- while (!n->children[i]) {
-+ size_t objs_per_ptr = genradix_depth_size(level);
-+
-+ if (iter->offset + objs_per_ptr < iter->offset) {
-+ iter->offset = SIZE_MAX;
-+ iter->pos = SIZE_MAX;
-+ return NULL;
-+ }
-+
- i++;
-- iter->offset = round_down(iter->offset +
-- genradix_depth_size(level),
-- genradix_depth_size(level));
-+ iter->offset = round_down(iter->offset + objs_per_ptr,
-+ objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) *
- objs_per_page;
- if (i == GENRADIX_ARY)
-diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
-index a6348489d45fe..1236b3cd2fbb2 100644
---- a/lib/kunit/executor.c
-+++ b/lib/kunit/executor.c
-@@ -137,8 +137,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set)
- {
- struct kunit_suite * const *suites;
-
-- for (suites = suite_set.start; suites < suite_set.end; suites++)
-+ for (suites = suite_set.start; suites < suite_set.end; suites++) {
-+ kfree((*suites)->test_cases);
- kfree(*suites);
-+ }
- kfree(suite_set.start);
- }
-
-@@ -155,10 +157,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
- struct kunit_suite_set filtered = {NULL, NULL};
- struct kunit_glob_filter parsed_glob;
- struct kunit_attr_filter *parsed_filters = NULL;
-+ struct kunit_suite * const *suites;
-
- const size_t max = suite_set->end - suite_set->start;
-
-- copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
-+ copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL);
- if (!copy) { /* won't be able to run anything, return an empty set */
- return filtered;
- }
-@@ -193,7 +196,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
- parsed_glob.test_glob);
- if (IS_ERR(filtered_suite)) {
- *err = PTR_ERR(filtered_suite);
-- goto free_parsed_filters;
-+ goto free_filtered_suite;
- }
- }
- if (filter_count > 0 && parsed_filters != NULL) {
-@@ -210,11 +213,11 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
- filtered_suite = new_filtered_suite;
-
- if (*err)
-- goto free_parsed_filters;
-+ goto free_filtered_suite;
-
- if (IS_ERR(filtered_suite)) {
- *err = PTR_ERR(filtered_suite);
-- goto free_parsed_filters;
-+ goto free_filtered_suite;
- }
- if (!filtered_suite)
- break;
-@@ -229,6 +232,14 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
- filtered.start = copy_start;
- filtered.end = copy;
-
-+free_filtered_suite:
-+ if (*err) {
-+ for (suites = copy_start; suites < copy; suites++) {
-+ kfree((*suites)->test_cases);
-+ kfree(*suites);
-+ }
-+ }
-+
- free_parsed_filters:
- if (filter_count)
- kfree(parsed_filters);
-@@ -241,7 +252,7 @@ free_parsed_glob:
-
- free_copy:
- if (*err)
-- kfree(copy);
-+ kfree(copy_start);
-
- return filtered;
- }
-diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
-index b4f6f96b28445..22d4ee86dbedd 100644
---- a/lib/kunit/executor_test.c
-+++ b/lib/kunit/executor_test.c
-@@ -9,7 +9,7 @@
- #include <kunit/test.h>
- #include <kunit/attributes.h>
-
--static void kfree_at_end(struct kunit *test, const void *to_free);
-+static void free_suite_set_at_end(struct kunit *test, const void *to_free);
- static struct kunit_suite *alloc_fake_suite(struct kunit *test,
- const char *suite_name,
- struct kunit_case *test_cases);
-@@ -56,7 +56,7 @@ static void filter_suites_test(struct kunit *test)
- got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
- KUNIT_ASSERT_EQ(test, err, 0);
-- kfree_at_end(test, got.start);
-+ free_suite_set_at_end(test, &got);
-
- /* Validate we just have suite2 */
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
-@@ -82,7 +82,7 @@ static void filter_suites_test_glob_test(struct kunit *test)
- got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
- KUNIT_ASSERT_EQ(test, err, 0);
-- kfree_at_end(test, got.start);
-+ free_suite_set_at_end(test, &got);
-
- /* Validate we just have suite2 */
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
-@@ -109,7 +109,7 @@ static void filter_suites_to_empty_test(struct kunit *test)
-
- got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err);
- KUNIT_ASSERT_EQ(test, err, 0);
-- kfree_at_end(test, got.start); /* just in case */
-+ free_suite_set_at_end(test, &got); /* just in case */
-
- KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
- "should be empty to indicate no match");
-@@ -172,7 +172,7 @@ static void filter_attr_test(struct kunit *test)
- got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
- KUNIT_ASSERT_EQ(test, err, 0);
-- kfree_at_end(test, got.start);
-+ free_suite_set_at_end(test, &got);
-
- /* Validate we just have normal_suite */
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
-@@ -200,7 +200,7 @@ static void filter_attr_empty_test(struct kunit *test)
-
- got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
- KUNIT_ASSERT_EQ(test, err, 0);
-- kfree_at_end(test, got.start); /* just in case */
-+ free_suite_set_at_end(test, &got); /* just in case */
-
- KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
- "should be empty to indicate no match");
-@@ -222,7 +222,7 @@ static void filter_attr_skip_test(struct kunit *test)
- got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
- KUNIT_ASSERT_EQ(test, err, 0);
-- kfree_at_end(test, got.start);
-+ free_suite_set_at_end(test, &got);
-
- /* Validate we have both the slow and normal test */
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
-@@ -256,18 +256,26 @@ kunit_test_suites(&executor_test_suite);
-
- /* Test helpers */
-
--/* Use the resource API to register a call to kfree(to_free).
-+static void free_suite_set(void *suite_set)
-+{
-+ kunit_free_suite_set(*(struct kunit_suite_set *)suite_set);
-+ kfree(suite_set);
-+}
-+
-+/* Use the resource API to register a call to free_suite_set.
- * Since we never actually use the resource, it's safe to use on const data.
- */
--static void kfree_at_end(struct kunit *test, const void *to_free)
-+static void free_suite_set_at_end(struct kunit *test, const void *to_free)
- {
-- /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
-- if (IS_ERR_OR_NULL(to_free))
-+ struct kunit_suite_set *free;
-+
-+ if (!((struct kunit_suite_set *)to_free)->start)
- return;
-
-- kunit_add_action(test,
-- (kunit_action_t *)kfree,
-- (void *)to_free);
-+ free = kzalloc(sizeof(struct kunit_suite_set), GFP_KERNEL);
-+ *free = *(struct kunit_suite_set *)to_free;
-+
-+ kunit_add_action(test, free_suite_set, (void *)free);
- }
-
- static struct kunit_suite *alloc_fake_suite(struct kunit *test,
-diff --git a/mm/cma.c b/mm/cma.c
-index da2967c6a2238..2b2494fd6b59a 100644
---- a/mm/cma.c
-+++ b/mm/cma.c
-@@ -505,7 +505,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
- */
- if (page) {
- for (i = 0; i < count; i++)
-- page_kasan_tag_reset(page + i);
-+ page_kasan_tag_reset(nth_page(page, i));
- }
-
- if (ret && !no_warn) {
-diff --git a/mm/damon/core.c b/mm/damon/core.c
-index bcd2bd9d6c104..fd5be73f699f4 100644
---- a/mm/damon/core.c
-+++ b/mm/damon/core.c
-@@ -476,20 +476,14 @@ static unsigned int damon_age_for_new_attrs(unsigned int age,
- static unsigned int damon_accesses_bp_to_nr_accesses(
- unsigned int accesses_bp, struct damon_attrs *attrs)
- {
-- unsigned int max_nr_accesses =
-- attrs->aggr_interval / attrs->sample_interval;
--
-- return accesses_bp * max_nr_accesses / 10000;
-+ return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
- }
-
- /* convert nr_accesses to access ratio in bp (per 10,000) */
- static unsigned int damon_nr_accesses_to_accesses_bp(
- unsigned int nr_accesses, struct damon_attrs *attrs)
- {
-- unsigned int max_nr_accesses =
-- attrs->aggr_interval / attrs->sample_interval;
--
-- return nr_accesses * 10000 / max_nr_accesses;
-+ return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
- }
-
- static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
-@@ -920,7 +914,7 @@ static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
- matched = true;
- break;
- default:
-- break;
-+ return false;
- }
-
- return matched == filter->matching;
-diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
-index 7b8fce2f67a8d..3071e08e8b8f8 100644
---- a/mm/damon/lru_sort.c
-+++ b/mm/damon/lru_sort.c
-@@ -193,9 +193,7 @@ static int damon_lru_sort_apply_parameters(void)
- if (err)
- return err;
-
-- /* aggr_interval / sample_interval is the maximum nr_accesses */
-- hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
-- damon_lru_sort_mon_attrs.sample_interval *
-+ hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
- hot_thres_access_freq / 1000;
- scheme = damon_lru_sort_new_hot_scheme(hot_thres);
- if (!scheme)
-diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
-index ac1c3fa80f984..d25d99cb5f2bb 100644
---- a/mm/damon/ops-common.c
-+++ b/mm/damon/ops-common.c
-@@ -73,7 +73,6 @@ void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr
- int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
- struct damos *s)
- {
-- unsigned int max_nr_accesses;
- int freq_subscore;
- unsigned int age_in_sec;
- int age_in_log, age_subscore;
-@@ -81,8 +80,8 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
- unsigned int age_weight = s->quota.weight_age;
- int hotness;
-
-- max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
-- freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
-+ freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE /
-+ damon_max_nr_accesses(&c->attrs);
-
- age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
- for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
-diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
-index 527e7d17eb3b2..36dcd881a19c0 100644
---- a/mm/damon/sysfs-schemes.c
-+++ b/mm/damon/sysfs-schemes.c
-@@ -126,6 +126,9 @@ damon_sysfs_scheme_regions_alloc(void)
- struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions),
- GFP_KERNEL);
-
-+ if (!regions)
-+ return NULL;
-+
- regions->kobj = (struct kobject){};
- INIT_LIST_HEAD(&regions->regions_list);
- regions->nr_regions = 0;
-@@ -1752,6 +1755,8 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
- return 0;
-
- region = damon_sysfs_scheme_region_alloc(r);
-+ if (!region)
-+ return 0;
- list_add_tail(&region->list, &sysfs_regions->regions_list);
- sysfs_regions->nr_regions++;
- if (kobject_init_and_add(&region->kobj,
-diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
-index f60e56150feb6..faaef5098e264 100644
---- a/mm/damon/sysfs.c
-+++ b/mm/damon/sysfs.c
-@@ -1150,58 +1150,75 @@ destroy_targets_out:
- return err;
- }
-
--/*
-- * Search a target in a context that corresponds to the sysfs target input.
-- *
-- * Return: pointer to the target if found, NULL if not found, or negative
-- * error code if the search failed.
-- */
--static struct damon_target *damon_sysfs_existing_target(
-- struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
-+static int damon_sysfs_update_target_pid(struct damon_target *target, int pid)
- {
-- struct pid *pid;
-- struct damon_target *t;
-+ struct pid *pid_new;
-
-- if (!damon_target_has_pid(ctx)) {
-- /* Up to only one target for paddr could exist */
-- damon_for_each_target(t, ctx)
-- return t;
-- return NULL;
-+ pid_new = find_get_pid(pid);
-+ if (!pid_new)
-+ return -EINVAL;
-+
-+ if (pid_new == target->pid) {
-+ put_pid(pid_new);
-+ return 0;
- }
-
-- /* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
-- pid = find_get_pid(sys_target->pid);
-- if (!pid)
-- return ERR_PTR(-EINVAL);
-- damon_for_each_target(t, ctx) {
-- if (t->pid == pid) {
-- put_pid(pid);
-- return t;
-- }
-+ put_pid(target->pid);
-+ target->pid = pid_new;
-+ return 0;
-+}
-+
-+static int damon_sysfs_update_target(struct damon_target *target,
-+ struct damon_ctx *ctx,
-+ struct damon_sysfs_target *sys_target)
-+{
-+ int err;
-+
-+ if (damon_target_has_pid(ctx)) {
-+ err = damon_sysfs_update_target_pid(target, sys_target->pid);
-+ if (err)
-+ return err;
- }
-- put_pid(pid);
-- return NULL;
-+
-+ /*
-+ * Do monitoring target region boundary update only if one or more
-+ * regions are set by the user. This is for keeping current monitoring
-+ * target results and range easier, especially for dynamic monitoring
-+ * target regions update ops like 'vaddr'.
-+ */
-+ if (sys_target->regions->nr)
-+ err = damon_sysfs_set_regions(target, sys_target->regions);
-+ return err;
- }
-
- static int damon_sysfs_set_targets(struct damon_ctx *ctx,
- struct damon_sysfs_targets *sysfs_targets)
- {
-- int i, err;
-+ struct damon_target *t, *next;
-+ int i = 0, err;
-
- /* Multiple physical address space monitoring targets makes no sense */
- if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
- return -EINVAL;
-
-- for (i = 0; i < sysfs_targets->nr; i++) {
-+ damon_for_each_target_safe(t, next, ctx) {
-+ if (i < sysfs_targets->nr) {
-+ err = damon_sysfs_update_target(t, ctx,
-+ sysfs_targets->targets_arr[i]);
-+ if (err)
-+ return err;
-+ } else {
-+ if (damon_target_has_pid(ctx))
-+ put_pid(t->pid);
-+ damon_destroy_target(t);
-+ }
-+ i++;
-+ }
-+
-+ for (; i < sysfs_targets->nr; i++) {
- struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
-- struct damon_target *t = damon_sysfs_existing_target(st, ctx);
--
-- if (IS_ERR(t))
-- return PTR_ERR(t);
-- if (!t)
-- err = damon_sysfs_add_target(st, ctx);
-- else
-- err = damon_sysfs_set_regions(t, st->regions);
-+
-+ err = damon_sysfs_add_target(st, ctx);
- if (err)
- return err;
- }
-diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index 064fbd90822b4..874000f97bfc1 100644
---- a/mm/huge_memory.c
-+++ b/mm/huge_memory.c
-@@ -2737,13 +2737,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
- int nr = folio_nr_pages(folio);
-
- xas_split(&xas, folio, folio_order(folio));
-- if (folio_test_swapbacked(folio)) {
-- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
-- -nr);
-- } else {
-- __lruvec_stat_mod_folio(folio, NR_FILE_THPS,
-- -nr);
-- filemap_nr_thps_dec(mapping);
-+ if (folio_test_pmd_mappable(folio)) {
-+ if (folio_test_swapbacked(folio)) {
-+ __lruvec_stat_mod_folio(folio,
-+ NR_SHMEM_THPS, -nr);
-+ } else {
-+ __lruvec_stat_mod_folio(folio,
-+ NR_FILE_THPS, -nr);
-+ filemap_nr_thps_dec(mapping);
-+ }
- }
- }
-
-diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 1301ba7b2c9a9..5f0adffeceb1d 100644
---- a/mm/hugetlb.c
-+++ b/mm/hugetlb.c
-@@ -6520,7 +6520,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- }
- }
-
-- page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
-+ page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
-
- /*
- * Note that page may be a sub-page, and with vmemmap
-diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 5b009b233ab89..8a881ab21f6cb 100644
---- a/mm/memcontrol.c
-+++ b/mm/memcontrol.c
-@@ -2864,7 +2864,8 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
- * Moreover, it should not come from DMA buffer and is not readily
- * reclaimable. So those GFP bits should be masked off.
- */
--#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
-+#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
-+ __GFP_ACCOUNT | __GFP_NOFAIL)
-
- /*
- * mod_objcg_mlstate() may be called with irq enabled, so
-diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
-index 1b03f4ec6fd21..3b301c4023ffc 100644
---- a/mm/memory_hotplug.c
-+++ b/mm/memory_hotplug.c
-@@ -1689,7 +1689,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
- */
- if (HPageMigratable(head))
- goto found;
-- skip = compound_nr(head) - (page - head);
-+ skip = compound_nr(head) - (pfn - page_to_pfn(head));
- pfn += skip - 1;
- }
- return -ENOENT;
-diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index 29ebf1e7898cf..e52e3a0b8f2e6 100644
---- a/mm/mempolicy.c
-+++ b/mm/mempolicy.c
-@@ -131,22 +131,26 @@ static struct mempolicy default_policy = {
- static struct mempolicy preferred_node_policy[MAX_NUMNODES];
-
- /**
-- * numa_map_to_online_node - Find closest online node
-+ * numa_nearest_node - Find nearest node by state
- * @node: Node id to start the search
-+ * @state: State to filter the search
- *
-- * Lookup the next closest node by distance if @nid is not online.
-+ * Lookup the closest node by distance if @nid is not in state.
- *
-- * Return: this @node if it is online, otherwise the closest node by distance
-+ * Return: this @node if it is in state, otherwise the closest node by distance
- */
--int numa_map_to_online_node(int node)
-+int numa_nearest_node(int node, unsigned int state)
- {
- int min_dist = INT_MAX, dist, n, min_node;
-
-- if (node == NUMA_NO_NODE || node_online(node))
-+ if (state >= NR_NODE_STATES)
-+ return -EINVAL;
-+
-+ if (node == NUMA_NO_NODE || node_state(node, state))
- return node;
-
- min_node = node;
-- for_each_online_node(n) {
-+ for_each_node_state(n, state) {
- dist = node_distance(node, n);
- if (dist < min_dist) {
- min_dist = dist;
-@@ -156,7 +160,7 @@ int numa_map_to_online_node(int node)
-
- return min_node;
- }
--EXPORT_SYMBOL_GPL(numa_map_to_online_node);
-+EXPORT_SYMBOL_GPL(numa_nearest_node);
-
- struct mempolicy *get_task_policy(struct task_struct *p)
- {
-diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index b8d3d7040a506..4656534b8f5cc 100644
---- a/mm/page-writeback.c
-+++ b/mm/page-writeback.c
-@@ -3110,7 +3110,7 @@ EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
- */
- void folio_wait_stable(struct folio *folio)
- {
-- if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES)
-+ if (mapping_stable_writes(folio_mapping(folio)))
- folio_wait_writeback(folio);
- }
- EXPORT_SYMBOL_GPL(folio_wait_stable);
-diff --git a/mm/readahead.c b/mm/readahead.c
-index e815c114de21e..6925e6959fd3f 100644
---- a/mm/readahead.c
-+++ b/mm/readahead.c
-@@ -735,7 +735,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
- */
- ret = -EINVAL;
- if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
-- !S_ISREG(file_inode(f.file)->i_mode))
-+ (!S_ISREG(file_inode(f.file)->i_mode) &&
-+ !S_ISBLK(file_inode(f.file)->i_mode)))
- goto out;
-
- ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
-diff --git a/mm/util.c b/mm/util.c
-index 8cbbfd3a3d598..be798981acc7d 100644
---- a/mm/util.c
-+++ b/mm/util.c
-@@ -414,6 +414,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
-
- static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
- {
-+#ifdef CONFIG_STACK_GROWSUP
-+ /*
-+ * For an upwards growing stack the calculation is much simpler.
-+ * Memory for the maximum stack size is reserved at the top of the
-+ * task. mmap_base starts directly below the stack and grows
-+ * downwards.
-+ */
-+ return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
-+#else
- unsigned long gap = rlim_stack->rlim_cur;
- unsigned long pad = stack_guard_gap;
-
-@@ -431,6 +440,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(STACK_TOP - gap - rnd);
-+#endif
- }
-
- void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-diff --git a/net/9p/client.c b/net/9p/client.c
-index 86bbc7147fc14..e265a0ca6bddd 100644
---- a/net/9p/client.c
-+++ b/net/9p/client.c
-@@ -540,12 +540,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
- return 0;
-
- if (!p9_is_proto_dotl(c)) {
-- char *ename;
-+ char *ename = NULL;
-
- err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
- &ename, &ecode);
-- if (err)
-+ if (err) {
-+ kfree(ename);
- goto out_err;
-+ }
-
- if (p9_is_proto_dotu(c) && ecode < 512)
- err = -ecode;
-@@ -1979,7 +1981,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
- goto error;
- }
- p9_debug(P9_DEBUG_9P,
-- ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
-+ ">>> TXATTRWALK file_fid %d, attr_fid %d name '%s'\n",
- file_fid->fid, attr_fid->fid, attr_name);
-
- req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
-diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
-index c4015f30f9fa7..d0eb03ada704d 100644
---- a/net/9p/trans_fd.c
-+++ b/net/9p/trans_fd.c
-@@ -832,14 +832,21 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
- goto out_free_ts;
- if (!(ts->rd->f_mode & FMODE_READ))
- goto out_put_rd;
-- /* prevent workers from hanging on IO when fd is a pipe */
-- ts->rd->f_flags |= O_NONBLOCK;
-+ /* Prevent workers from hanging on IO when fd is a pipe.
-+ * It's technically possible for userspace or concurrent mounts to
-+ * modify this flag concurrently, which will likely result in a
-+ * broken filesystem. However, just having bad flags here should
-+ * not crash the kernel or cause any other sort of bug, so mark this
-+ * particular data race as intentional so that tooling (like KCSAN)
-+ * can allow it and detect further problems.
-+ */
-+ data_race(ts->rd->f_flags |= O_NONBLOCK);
- ts->wr = fget(wfd);
- if (!ts->wr)
- goto out_put_rd;
- if (!(ts->wr->f_mode & FMODE_WRITE))
- goto out_put_wr;
-- ts->wr->f_flags |= O_NONBLOCK;
-+ data_race(ts->wr->f_flags |= O_NONBLOCK);
-
- client->trans = ts;
- client->status = Connected;
-diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
-index 2134f92bd7ac2..5d698f19868c5 100644
---- a/net/bluetooth/amp.c
-+++ b/net/bluetooth/amp.c
-@@ -109,7 +109,7 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon;
- u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
-
-- hcon = hci_conn_add(hdev, AMP_LINK, dst, role);
-+ hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr));
- if (!hcon)
- return NULL;
-
-@@ -117,7 +117,6 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
-
- hcon->state = BT_CONNECT;
- hcon->attempt++;
-- hcon->handle = __next_handle(mgr);
- hcon->remote_id = remote_id;
- hcon->amp_mgr = amp_mgr_get(mgr);
-
-diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
-index 73470cc3518a7..f3139c4c20fc0 100644
---- a/net/bluetooth/hci_conn.c
-+++ b/net/bluetooth/hci_conn.c
-@@ -153,6 +153,9 @@ static void hci_conn_cleanup(struct hci_conn *conn)
-
- hci_conn_hash_del(hdev, conn);
-
-+ if (HCI_CONN_HANDLE_UNSET(conn->handle))
-+ ida_free(&hdev->unset_handle_ida, conn->handle);
-+
- if (conn->cleanup)
- conn->cleanup(conn);
-
-@@ -169,13 +172,11 @@ static void hci_conn_cleanup(struct hci_conn *conn)
- hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
- }
-
-- hci_conn_del_sysfs(conn);
--
- debugfs_remove_recursive(conn->debugfs);
-
-- hci_dev_put(hdev);
-+ hci_conn_del_sysfs(conn);
-
-- hci_conn_put(conn);
-+ hci_dev_put(hdev);
- }
-
- static void hci_acl_create_connection(struct hci_conn *conn)
-@@ -928,31 +929,18 @@ static void cis_cleanup(struct hci_conn *conn)
- hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
- }
-
--static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
-+static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
- {
-- struct hci_conn_hash *h = &hdev->conn_hash;
-- struct hci_conn *c;
-- u16 handle = HCI_CONN_HANDLE_MAX + 1;
--
-- rcu_read_lock();
--
-- list_for_each_entry_rcu(c, &h->list, list) {
-- /* Find the first unused handle */
-- if (handle == 0xffff || c->handle != handle)
-- break;
-- handle++;
-- }
-- rcu_read_unlock();
--
-- return handle;
-+ return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
-+ U16_MAX, GFP_ATOMIC);
- }
-
- struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
-- u8 role)
-+ u8 role, u16 handle)
- {
- struct hci_conn *conn;
-
-- BT_DBG("%s dst %pMR", hdev->name, dst);
-+ bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
-
- conn = kzalloc(sizeof(*conn), GFP_KERNEL);
- if (!conn)
-@@ -960,7 +948,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
-
- bacpy(&conn->dst, dst);
- bacpy(&conn->src, &hdev->bdaddr);
-- conn->handle = hci_conn_hash_alloc_unset(hdev);
-+ conn->handle = handle;
- conn->hdev = hdev;
- conn->type = type;
- conn->role = role;
-@@ -973,6 +961,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- conn->rssi = HCI_RSSI_INVALID;
- conn->tx_power = HCI_TX_POWER_INVALID;
- conn->max_tx_power = HCI_TX_POWER_INVALID;
-+ conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
-
- set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
-@@ -1044,6 +1033,20 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- return conn;
- }
-
-+struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
-+ bdaddr_t *dst, u8 role)
-+{
-+ int handle;
-+
-+ bt_dev_dbg(hdev, "dst %pMR", dst);
-+
-+ handle = hci_conn_hash_alloc_unset(hdev);
-+ if (unlikely(handle < 0))
-+ return NULL;
-+
-+ return hci_conn_add(hdev, type, dst, role, handle);
-+}
-+
- static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
- {
- if (!reason)
-@@ -1274,6 +1277,9 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
- if (conn->abort_reason)
- return conn->abort_reason;
-
-+ if (HCI_CONN_HANDLE_UNSET(conn->handle))
-+ ida_free(&hdev->unset_handle_ida, conn->handle);
-+
- conn->handle = handle;
-
- return 0;
-@@ -1381,7 +1387,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
- if (conn) {
- bacpy(&conn->dst, dst);
- } else {
-- conn = hci_conn_add(hdev, LE_LINK, dst, role);
-+ conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
- if (!conn)
- return ERR_PTR(-ENOMEM);
- hci_conn_hold(conn);
-@@ -1546,7 +1552,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
- memcmp(conn->le_per_adv_data, base, base_len)))
- return ERR_PTR(-EADDRINUSE);
-
-- conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
-+ conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
- if (!conn)
- return ERR_PTR(-ENOMEM);
-
-@@ -1590,7 +1596,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
-
- BT_DBG("requesting refresh of dst_addr");
-
-- conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
-+ conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
- if (!conn)
- return ERR_PTR(-ENOMEM);
-
-@@ -1638,7 +1644,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
-
- acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (!acl) {
-- acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
-+ acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
- if (!acl)
- return ERR_PTR(-ENOMEM);
- }
-@@ -1698,7 +1704,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
-
- sco = hci_conn_hash_lookup_ba(hdev, type, dst);
- if (!sco) {
-- sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
-+ sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
- if (!sco) {
- hci_conn_drop(acl);
- return ERR_PTR(-ENOMEM);
-@@ -1890,7 +1896,7 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
- cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
- qos->ucast.cis);
- if (!cis) {
-- cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
-+ cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
- if (!cis)
- return ERR_PTR(-ENOMEM);
- cis->cleanup = cis_cleanup;
-diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
-index 195aea2198a96..65601aa52e0d8 100644
---- a/net/bluetooth/hci_core.c
-+++ b/net/bluetooth/hci_core.c
-@@ -2535,6 +2535,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
- mutex_init(&hdev->lock);
- mutex_init(&hdev->req_lock);
-
-+ ida_init(&hdev->unset_handle_ida);
-+
- INIT_LIST_HEAD(&hdev->mesh_pending);
- INIT_LIST_HEAD(&hdev->mgmt_pending);
- INIT_LIST_HEAD(&hdev->reject_list);
-@@ -2789,6 +2791,7 @@ void hci_release_dev(struct hci_dev *hdev)
- hci_codec_list_clear(&hdev->local_codecs);
- hci_dev_unlock(hdev);
-
-+ ida_destroy(&hdev->unset_handle_ida);
- ida_simple_remove(&hci_index_ida, hdev->id);
- kfree_skb(hdev->sent_cmd);
- kfree_skb(hdev->recv_event);
-diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
-index 1e1c9147356c3..f6d3150bcbb03 100644
---- a/net/bluetooth/hci_event.c
-+++ b/net/bluetooth/hci_event.c
-@@ -2335,8 +2335,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
- }
- } else {
- if (!conn) {
-- conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
-- HCI_ROLE_MASTER);
-+ conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
-+ HCI_ROLE_MASTER);
- if (!conn)
- bt_dev_err(hdev, "no memory for new connection");
- }
-@@ -3151,8 +3151,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
- hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
- &ev->bdaddr,
- BDADDR_BREDR)) {
-- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
-- HCI_ROLE_SLAVE);
-+ conn = hci_conn_add_unset(hdev, ev->link_type,
-+ &ev->bdaddr, HCI_ROLE_SLAVE);
- if (!conn) {
- bt_dev_err(hdev, "no memory for new conn");
- goto unlock;
-@@ -3317,8 +3317,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
- conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
- &ev->bdaddr);
- if (!conn) {
-- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
-- HCI_ROLE_SLAVE);
-+ conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
-+ HCI_ROLE_SLAVE);
- if (!conn) {
- bt_dev_err(hdev, "no memory for new connection");
- goto unlock;
-@@ -5890,7 +5890,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
- if (status)
- goto unlock;
-
-- conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
-+ conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
- if (!conn) {
- bt_dev_err(hdev, "no memory for new connection");
- goto unlock;
-@@ -5952,17 +5952,11 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
-
- conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
-
-- if (handle > HCI_CONN_HANDLE_MAX) {
-- bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
-- HCI_CONN_HANDLE_MAX);
-- status = HCI_ERROR_INVALID_PARAMETERS;
-- }
--
- /* All connection failure handling is taken care of by the
- * hci_conn_failed function which is triggered by the HCI
- * request completion callbacks used for connecting.
- */
-- if (status)
-+ if (status || hci_conn_set_handle(conn, handle))
- goto unlock;
-
- /* Drop the connection if it has been aborted */
-@@ -5986,7 +5980,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
- mgmt_device_connected(hdev, conn, NULL, 0);
-
- conn->sec_level = BT_SECURITY_LOW;
-- conn->handle = handle;
- conn->state = BT_CONFIG;
-
- /* Store current advertising instance as connection advertising instance
-@@ -6603,7 +6596,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
- struct hci_ev_le_pa_sync_established *ev = data;
- int mask = hdev->link_mode;
- __u8 flags = 0;
-- struct hci_conn *bis;
-+ struct hci_conn *pa_sync;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
-
-@@ -6620,20 +6613,19 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
- if (!(flags & HCI_PROTO_DEFER))
- goto unlock;
-
-- /* Add connection to indicate the PA sync event */
-- bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
-- HCI_ROLE_SLAVE);
-+ if (ev->status) {
-+ /* Add connection to indicate the failed PA sync event */
-+ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
-+ HCI_ROLE_SLAVE);
-
-- if (!bis)
-- goto unlock;
-+ if (!pa_sync)
-+ goto unlock;
-
-- if (ev->status)
-- set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags);
-- else
-- set_bit(HCI_CONN_PA_SYNC, &bis->flags);
-+ set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
-
-- /* Notify connection to iso layer */
-- hci_connect_cfm(bis, ev->status);
-+ /* Notify iso layer */
-+ hci_connect_cfm(pa_sync, ev->status);
-+ }
-
- unlock:
- hci_dev_unlock(hdev);
-@@ -7020,12 +7012,12 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
-
- cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
- if (!cis) {
-- cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
-+ cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
-+ cis_handle);
- if (!cis) {
- hci_le_reject_cis(hdev, ev->cis_handle);
- goto unlock;
- }
-- cis->handle = cis_handle;
- }
-
- cis->iso_qos.ucast.cig = ev->cig_id;
-@@ -7125,7 +7117,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
- hci_dev_lock(hdev);
-
- if (!ev->status) {
-- pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle);
-+ pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
- if (pa_sync)
- /* Also mark the BIG sync established event on the
- * associated PA sync hcon
-@@ -7140,10 +7132,9 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
- bis = hci_conn_hash_lookup_handle(hdev, handle);
- if (!bis) {
- bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
-- HCI_ROLE_SLAVE);
-+ HCI_ROLE_SLAVE, handle);
- if (!bis)
- continue;
-- bis->handle = handle;
- }
-
- if (ev->status != 0x42)
-@@ -7186,15 +7177,42 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
- struct hci_evt_le_big_info_adv_report *ev = data;
- int mask = hdev->link_mode;
- __u8 flags = 0;
-+ struct hci_conn *pa_sync;
-
- bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
-
- hci_dev_lock(hdev);
-
- mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
-- if (!(mask & HCI_LM_ACCEPT))
-+ if (!(mask & HCI_LM_ACCEPT)) {
- hci_le_pa_term_sync(hdev, ev->sync_handle);
-+ goto unlock;
-+ }
-+
-+ if (!(flags & HCI_PROTO_DEFER))
-+ goto unlock;
-+
-+ pa_sync = hci_conn_hash_lookup_pa_sync_handle
-+ (hdev,
-+ le16_to_cpu(ev->sync_handle));
-+
-+ if (pa_sync)
-+ goto unlock;
-
-+ /* Add connection to indicate the PA sync event */
-+ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
-+ HCI_ROLE_SLAVE);
-+
-+ if (!pa_sync)
-+ goto unlock;
-+
-+ pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
-+ set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
-+
-+ /* Notify iso layer */
-+ hci_connect_cfm(pa_sync, 0x00);
-+
-+unlock:
- hci_dev_unlock(hdev);
- }
-
-diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
-index a15ab0b874a9d..9e71362c04b48 100644
---- a/net/bluetooth/hci_sync.c
-+++ b/net/bluetooth/hci_sync.c
-@@ -152,7 +152,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
- struct sk_buff *skb;
- int err = 0;
-
-- bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
-+ bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
-
- hci_req_init(&req, hdev);
-
-@@ -248,7 +248,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
- skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
- if (IS_ERR(skb)) {
- if (!event)
-- bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
-+ bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
- PTR_ERR(skb));
- return PTR_ERR(skb);
- }
-diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
-index 15b33579007cb..367e32fe30eb8 100644
---- a/net/bluetooth/hci_sysfs.c
-+++ b/net/bluetooth/hci_sysfs.c
-@@ -35,7 +35,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
- {
- struct hci_dev *hdev = conn->hdev;
-
-- BT_DBG("conn %p", conn);
-+ bt_dev_dbg(hdev, "conn %p", conn);
-
- conn->dev.type = &bt_link;
- conn->dev.class = &bt_class;
-@@ -48,27 +48,30 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
- {
- struct hci_dev *hdev = conn->hdev;
-
-- BT_DBG("conn %p", conn);
-+ bt_dev_dbg(hdev, "conn %p", conn);
-
- if (device_is_registered(&conn->dev))
- return;
-
- dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
-
-- if (device_add(&conn->dev) < 0) {
-+ if (device_add(&conn->dev) < 0)
- bt_dev_err(hdev, "failed to register connection device");
-- return;
-- }
--
-- hci_dev_hold(hdev);
- }
-
- void hci_conn_del_sysfs(struct hci_conn *conn)
- {
- struct hci_dev *hdev = conn->hdev;
-
-- if (!device_is_registered(&conn->dev))
-+ bt_dev_dbg(hdev, "conn %p", conn);
-+
-+ if (!device_is_registered(&conn->dev)) {
-+ /* If device_add() has *not* succeeded, use *only* put_device()
-+ * to drop the reference count.
-+ */
-+ put_device(&conn->dev);
- return;
-+ }
-
- while (1) {
- struct device *dev;
-@@ -80,9 +83,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
- put_device(dev);
- }
-
-- device_del(&conn->dev);
--
-- hci_dev_put(hdev);
-+ device_unregister(&conn->dev);
- }
-
- static void bt_host_release(struct device *dev)
-diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
-index 71248163ce9a5..2132a16be93cd 100644
---- a/net/bluetooth/iso.c
-+++ b/net/bluetooth/iso.c
-@@ -77,6 +77,7 @@ static struct bt_iso_qos default_qos;
- static bool check_ucast_qos(struct bt_iso_qos *qos);
- static bool check_bcast_qos(struct bt_iso_qos *qos);
- static bool iso_match_sid(struct sock *sk, void *data);
-+static bool iso_match_sync_handle(struct sock *sk, void *data);
- static void iso_sock_disconn(struct sock *sk);
-
- /* ---- ISO timers ---- */
-@@ -1202,7 +1203,6 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
- test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
- iso_conn_big_sync(sk);
- sk->sk_state = BT_LISTEN;
-- set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
- } else {
- iso_conn_defer_accept(pi->conn->hcon);
- sk->sk_state = BT_CONFIG;
-@@ -1579,6 +1579,7 @@ static void iso_conn_ready(struct iso_conn *conn)
- struct sock *sk = conn->sk;
- struct hci_ev_le_big_sync_estabilished *ev = NULL;
- struct hci_ev_le_pa_sync_established *ev2 = NULL;
-+ struct hci_evt_le_big_info_adv_report *ev3 = NULL;
- struct hci_conn *hcon;
-
- BT_DBG("conn %p", conn);
-@@ -1603,14 +1604,20 @@ static void iso_conn_ready(struct iso_conn *conn)
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_big, ev);
-- } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags) ||
-- test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
-+ } else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
- ev2 = hci_recv_event_data(hcon->hdev,
- HCI_EV_LE_PA_SYNC_ESTABLISHED);
- if (ev2)
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_sid, ev2);
-+ } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
-+ ev3 = hci_recv_event_data(hcon->hdev,
-+ HCI_EVT_LE_BIG_INFO_ADV_REPORT);
-+ if (ev3)
-+ parent = iso_get_sock_listen(&hcon->src,
-+ &hcon->dst,
-+ iso_match_sync_handle, ev3);
- }
-
- if (!parent)
-@@ -1650,11 +1657,13 @@ static void iso_conn_ready(struct iso_conn *conn)
- hcon->sync_handle = iso_pi(parent)->sync_handle;
- }
-
-- if (ev2 && !ev2->status) {
-- iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle;
-+ if (ev3) {
- iso_pi(sk)->qos = iso_pi(parent)->qos;
-+ iso_pi(sk)->qos.bcast.encryption = ev3->encryption;
-+ hcon->iso_qos = iso_pi(sk)->qos;
- iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis;
- memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS);
-+ set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
- }
-
- bacpy(&iso_pi(sk)->dst, &hcon->dst);
-diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
-index 71056ee847736..0fcf357ea7ad3 100644
---- a/net/bridge/netfilter/nf_conntrack_bridge.c
-+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
-@@ -37,7 +37,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
- ktime_t tstamp = skb->tstamp;
- struct ip_frag_state state;
- struct iphdr *iph;
-- int err;
-+ int err = 0;
-
- /* for offloaded checksums cleanup checksum before fragmentation */
- if (skb->ip_summed == CHECKSUM_PARTIAL &&
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 9f3f8930c6914..9bf90b2a75b6a 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -10050,6 +10050,54 @@ void netif_tx_stop_all_queues(struct net_device *dev)
- }
- EXPORT_SYMBOL(netif_tx_stop_all_queues);
-
-+static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
-+{
-+ void __percpu *v;
-+
-+ /* Drivers implementing ndo_get_peer_dev must support tstat
-+ * accounting, so that skb_do_redirect() can bump the dev's
-+ * RX stats upon network namespace switch.
-+ */
-+ if (dev->netdev_ops->ndo_get_peer_dev &&
-+ dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
-+ return -EOPNOTSUPP;
-+
-+ switch (dev->pcpu_stat_type) {
-+ case NETDEV_PCPU_STAT_NONE:
-+ return 0;
-+ case NETDEV_PCPU_STAT_LSTATS:
-+ v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
-+ break;
-+ case NETDEV_PCPU_STAT_TSTATS:
-+ v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-+ break;
-+ case NETDEV_PCPU_STAT_DSTATS:
-+ v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return v ? 0 : -ENOMEM;
-+}
-+
-+static void netdev_do_free_pcpu_stats(struct net_device *dev)
-+{
-+ switch (dev->pcpu_stat_type) {
-+ case NETDEV_PCPU_STAT_NONE:
-+ return;
-+ case NETDEV_PCPU_STAT_LSTATS:
-+ free_percpu(dev->lstats);
-+ break;
-+ case NETDEV_PCPU_STAT_TSTATS:
-+ free_percpu(dev->tstats);
-+ break;
-+ case NETDEV_PCPU_STAT_DSTATS:
-+ free_percpu(dev->dstats);
-+ break;
-+ }
-+}
-+
- /**
- * register_netdevice() - register a network device
- * @dev: device to register
-@@ -10110,9 +10158,13 @@ int register_netdevice(struct net_device *dev)
- goto err_uninit;
- }
-
-+ ret = netdev_do_alloc_pcpu_stats(dev);
-+ if (ret)
-+ goto err_uninit;
-+
- ret = dev_index_reserve(net, dev->ifindex);
- if (ret < 0)
-- goto err_uninit;
-+ goto err_free_pcpu;
- dev->ifindex = ret;
-
- /* Transfer changeable features to wanted_features and enable
-@@ -10218,6 +10270,8 @@ err_uninit_notify:
- call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
- err_ifindex_release:
- dev_index_release(net, dev->ifindex);
-+err_free_pcpu:
-+ netdev_do_free_pcpu_stats(dev);
- err_uninit:
- if (dev->netdev_ops->ndo_uninit)
- dev->netdev_ops->ndo_uninit(dev);
-@@ -10470,6 +10524,7 @@ void netdev_run_todo(void)
- WARN_ON(rcu_access_pointer(dev->ip_ptr));
- WARN_ON(rcu_access_pointer(dev->ip6_ptr));
-
-+ netdev_do_free_pcpu_stats(dev);
- if (dev->priv_destructor)
- dev->priv_destructor(dev);
- if (dev->needs_free_netdev)
-diff --git a/net/core/filter.c b/net/core/filter.c
-index a094694899c99..b149a165c405c 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -2489,6 +2489,7 @@ int skb_do_redirect(struct sk_buff *skb)
- net_eq(net, dev_net(dev))))
- goto out_drop;
- skb->dev = dev;
-+ dev_sw_netstats_rx_add(dev, skb->len);
- return -EAGAIN;
- }
- return flags & BPF_F_NEIGH ?
-diff --git a/net/core/page_pool.c b/net/core/page_pool.c
-index 77cb75e63aca1..31f923e7b5c40 100644
---- a/net/core/page_pool.c
-+++ b/net/core/page_pool.c
-@@ -221,8 +221,12 @@ static int page_pool_init(struct page_pool *pool,
- return -ENOMEM;
- #endif
-
-- if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
-+ if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
-+#ifdef CONFIG_PAGE_POOL_STATS
-+ free_percpu(pool->recycle_stats);
-+#endif
- return -ENOMEM;
-+ }
-
- atomic_set(&pool->pages_state_release_cnt, 0);
-
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 4eaf7ed0d1f44..97b4a42e6e347 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -4254,6 +4254,7 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
- unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
- unsigned int to, struct ts_config *config)
- {
-+ unsigned int patlen = config->ops->get_pattern_len(config);
- struct ts_state state;
- unsigned int ret;
-
-@@ -4265,7 +4266,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
- skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
-
- ret = textsearch_find(config, &state);
-- return (ret <= to - from ? ret : UINT_MAX);
-+ return (ret + patlen <= to - from ? ret : UINT_MAX);
- }
- EXPORT_SYMBOL(skb_find_text);
-
-diff --git a/net/core/skmsg.c b/net/core/skmsg.c
-index 6c31eefbd7778..93ecfceac1bc4 100644
---- a/net/core/skmsg.c
-+++ b/net/core/skmsg.c
-@@ -826,6 +826,8 @@ static void sk_psock_destroy(struct work_struct *work)
-
- if (psock->sk_redir)
- sock_put(psock->sk_redir);
-+ if (psock->sk_pair)
-+ sock_put(psock->sk_pair);
- sock_put(psock->sk);
- kfree(psock);
- }
-diff --git a/net/core/sock.c b/net/core/sock.c
-index 16584e2dd6481..bfaf47b3f3c7c 100644
---- a/net/core/sock.c
-+++ b/net/core/sock.c
-@@ -600,7 +600,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
- INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
- dst, cookie) == NULL) {
- sk_tx_queue_clear(sk);
-- sk->sk_dst_pending_confirm = 0;
-+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
- dst_release(dst);
- return NULL;
-diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
-index 69453b936bd55..524b7e581a036 100644
---- a/net/dccp/ipv4.c
-+++ b/net/dccp/ipv4.c
-@@ -629,9 +629,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
- if (dccp_parse_options(sk, dreq, skb))
- goto drop_and_free;
-
-- if (security_inet_conn_request(sk, skb, req))
-- goto drop_and_free;
--
- ireq = inet_rsk(req);
- sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
- sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
-@@ -639,6 +636,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
- ireq->ireq_family = AF_INET;
- ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
-
-+ if (security_inet_conn_request(sk, skb, req))
-+ goto drop_and_free;
-+
- /*
- * Step 3: Process LISTEN state
- *
-diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
-index c693a570682fb..6f5a556f4f6d7 100644
---- a/net/dccp/ipv6.c
-+++ b/net/dccp/ipv6.c
-@@ -360,15 +360,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
- if (dccp_parse_options(sk, dreq, skb))
- goto drop_and_free;
-
-- if (security_inet_conn_request(sk, skb, req))
-- goto drop_and_free;
--
- ireq = inet_rsk(req);
- ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
- ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- ireq->ireq_family = AF_INET6;
- ireq->ir_mark = inet_request_mark(sk, skb);
-
-+ if (security_inet_conn_request(sk, skb, req))
-+ goto drop_and_free;
-+
- if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
- np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
- np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
-diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
-index 3bbd5afb7b31c..fe3553f60bf39 100644
---- a/net/ethtool/netlink.c
-+++ b/net/ethtool/netlink.c
-@@ -505,6 +505,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
- ret = skb->len;
- break;
- }
-+ ret = 0;
- }
- rtnl_unlock();
-
-diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
-index b71dab630a873..80cdc6f6b34c9 100644
---- a/net/hsr/hsr_forward.c
-+++ b/net/hsr/hsr_forward.c
-@@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
- skb = skb_copy_expand(frame->skb_std, 0,
- skb_tailroom(frame->skb_std) + HSR_HLEN,
- GFP_ATOMIC);
-- prp_fill_rct(skb, frame, port);
--
-- return skb;
-+ return prp_fill_rct(skb, frame, port);
- }
-
- static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
-diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
-index 418e5fb58fd3f..d515881d02a6f 100644
---- a/net/ipv4/igmp.c
-+++ b/net/ipv4/igmp.c
-@@ -216,8 +216,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
- int tv = get_random_u32_below(max_delay);
-
- im->tm_running = 1;
-- if (!mod_timer(&im->timer, jiffies+tv+2))
-- refcount_inc(&im->refcnt);
-+ if (refcount_inc_not_zero(&im->refcnt)) {
-+ if (mod_timer(&im->timer, jiffies + tv + 2))
-+ ip_ma_put(im);
-+ }
- }
-
- static void igmp_gq_start_timer(struct in_device *in_dev)
-diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
-index 598c1b114d2c2..a532f749e4778 100644
---- a/net/ipv4/inet_hashtables.c
-+++ b/net/ipv4/inet_hashtables.c
-@@ -751,12 +751,12 @@ int __inet_hash(struct sock *sk, struct sock *osk)
- if (err)
- goto unlock;
- }
-+ sock_set_flag(sk, SOCK_RCU_FREE);
- if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
- sk->sk_family == AF_INET6)
- __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
- else
- __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
-- sock_set_flag(sk, SOCK_RCU_FREE);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- unlock:
- spin_unlock(&ilb2->lock);
-diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index b214b5a2e045f..3bad9aa066db3 100644
---- a/net/ipv4/route.c
-+++ b/net/ipv4/route.c
-@@ -780,7 +780,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
- goto reject_redirect;
- }
-
-- n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
-+ n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
- if (!n)
- n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
- if (!IS_ERR(n)) {
-diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
-index dc478a0574cbe..3b4dafefb4b03 100644
---- a/net/ipv4/syncookies.c
-+++ b/net/ipv4/syncookies.c
-@@ -41,7 +41,6 @@ static siphash_aligned_key_t syncookie_secret[2];
- * requested/supported by the syn/synack exchange.
- */
- #define TSBITS 6
--#define TSMASK (((__u32)1 << TSBITS) - 1)
-
- static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
- u32 count, int c)
-@@ -62,27 +61,22 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
- */
- u64 cookie_init_timestamp(struct request_sock *req, u64 now)
- {
-- struct inet_request_sock *ireq;
-- u32 ts, ts_now = tcp_ns_to_ts(now);
-+ const struct inet_request_sock *ireq = inet_rsk(req);
-+ u64 ts, ts_now = tcp_ns_to_ts(now);
- u32 options = 0;
-
-- ireq = inet_rsk(req);
--
- options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
- if (ireq->sack_ok)
- options |= TS_OPT_SACK;
- if (ireq->ecn_ok)
- options |= TS_OPT_ECN;
-
-- ts = ts_now & ~TSMASK;
-+ ts = (ts_now >> TSBITS) << TSBITS;
- ts |= options;
-- if (ts > ts_now) {
-- ts >>= TSBITS;
-- ts--;
-- ts <<= TSBITS;
-- ts |= options;
-- }
-- return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
-+ if (ts > ts_now)
-+ ts -= (1UL << TSBITS);
-+
-+ return ts * (NSEC_PER_SEC / TCP_TS_HZ);
- }
-
-
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 804821d6bd4d4..1f9d1d445fb3b 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -6450,22 +6450,23 @@ reset_and_undo:
-
- static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
- {
-+ struct tcp_sock *tp = tcp_sk(sk);
- struct request_sock *req;
-
- /* If we are still handling the SYNACK RTO, see if timestamp ECR allows
- * undo. If peer SACKs triggered fast recovery, we can't undo here.
- */
-- if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
-- tcp_try_undo_loss(sk, false);
-+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
-+ tcp_try_undo_recovery(sk);
-
- /* Reset rtx states to prevent spurious retransmits_timed_out() */
-- tcp_sk(sk)->retrans_stamp = 0;
-+ tp->retrans_stamp = 0;
- inet_csk(sk)->icsk_retransmits = 0;
-
- /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
- * we no longer need req so release it.
- */
-- req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
-+ req = rcu_dereference_protected(tp->fastopen_rsk,
- lockdep_sock_is_held(sk));
- reqsk_fastopen_remove(sk, req, false);
-
-diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
-index c196759f1d3bd..7aca12c59c184 100644
---- a/net/ipv4/tcp_metrics.c
-+++ b/net/ipv4/tcp_metrics.c
-@@ -470,11 +470,15 @@ void tcp_init_metrics(struct sock *sk)
- u32 val, crtt = 0; /* cached RTT scaled by 8 */
-
- sk_dst_confirm(sk);
-+ /* ssthresh may have been reduced unnecessarily during.
-+ * 3WHS. Restore it back to its initial default.
-+ */
-+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- if (!dst)
- goto reset;
-
- rcu_read_lock();
-- tm = tcp_get_metrics(sk, dst, true);
-+ tm = tcp_get_metrics(sk, dst, false);
- if (!tm) {
- rcu_read_unlock();
- goto reset;
-@@ -489,11 +493,6 @@ void tcp_init_metrics(struct sock *sk)
- tp->snd_ssthresh = val;
- if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
- tp->snd_ssthresh = tp->snd_cwnd_clamp;
-- } else {
-- /* ssthresh may have been reduced unnecessarily during.
-- * 3WHS. Restore it back to its initial default.
-- */
-- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- }
- val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
- if (val && tp->reordering != val)
-@@ -908,7 +907,7 @@ static void tcp_metrics_flush_all(struct net *net)
- match = net ? net_eq(tm_net(tm), net) :
- !refcount_read(&tm_net(tm)->ns.count);
- if (match) {
-- *pp = tm->tcpm_next;
-+ rcu_assign_pointer(*pp, tm->tcpm_next);
- kfree_rcu(tm, rcu_head);
- } else {
- pp = &tm->tcpm_next;
-@@ -949,7 +948,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
- if (addr_same(&tm->tcpm_daddr, &daddr) &&
- (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
- net_eq(tm_net(tm), net)) {
-- *pp = tm->tcpm_next;
-+ rcu_assign_pointer(*pp, tm->tcpm_next);
- kfree_rcu(tm, rcu_head);
- found = true;
- } else {
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index f0723460753c5..9ccfdc825004d 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -1331,7 +1331,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
- skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
-
-- skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
-+ skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
-
- /* Build TCP header and checksum it. */
- th = (struct tcphdr *)skb->data;
-diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index f39b9c8445808..c3ff984b63547 100644
---- a/net/ipv4/udp.c
-+++ b/net/ipv4/udp.c
-@@ -714,7 +714,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
- iph->saddr, uh->source, skb->dev->ifindex,
- inet_sdif(skb), udptable, NULL);
-
-- if (!sk || udp_sk(sk)->encap_type) {
-+ if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
- /* No socket for error: try tunnels before discarding */
- if (static_branch_unlikely(&udp_encap_needed_key)) {
- sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
-@@ -1051,7 +1051,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
- u8 tos, scope;
- __be16 dport;
- int err, is_udplite = IS_UDPLITE(sk);
-- int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
-+ int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
- int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
- struct sk_buff *skb;
- struct ip_options_data opt_copy;
-@@ -1315,11 +1315,11 @@ void udp_splice_eof(struct socket *sock)
- struct sock *sk = sock->sk;
- struct udp_sock *up = udp_sk(sk);
-
-- if (!up->pending || READ_ONCE(up->corkflag))
-+ if (!up->pending || udp_test_bit(CORK, sk))
- return;
-
- lock_sock(sk);
-- if (up->pending && !READ_ONCE(up->corkflag))
-+ if (up->pending && !udp_test_bit(CORK, sk))
- udp_push_pending_frames(sk);
- release_sock(sk);
- }
-@@ -1868,7 +1868,7 @@ try_again:
- (struct sockaddr *)sin);
- }
-
-- if (udp_sk(sk)->gro_enabled)
-+ if (udp_test_bit(GRO_ENABLED, sk))
- udp_cmsg_recv(msg, sk, skb);
-
- if (inet_cmsg_flags(inet))
-@@ -2081,7 +2081,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
- }
- nf_reset_ct(skb);
-
-- if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
-+ if (static_branch_unlikely(&udp_encap_needed_key) &&
-+ READ_ONCE(up->encap_type)) {
- int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
-
- /*
-@@ -2119,7 +2120,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
- /*
- * UDP-Lite specific tests, ignored on UDP sockets
- */
-- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
-+ if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
-+ u16 pcrlen = READ_ONCE(up->pcrlen);
-
- /*
- * MIB statistics other than incrementing the error count are
-@@ -2132,7 +2134,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
- * delivery of packets with coverage values less than a value
- * provided by the application."
- */
-- if (up->pcrlen == 0) { /* full coverage was set */
-+ if (pcrlen == 0) { /* full coverage was set */
- net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
- UDP_SKB_CB(skb)->cscov, skb->len);
- goto drop;
-@@ -2143,9 +2145,9 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
- * that it wants x while sender emits packets of smaller size y.
- * Therefore the above ...()->partial_cov statement is essential.
- */
-- if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
-+ if (UDP_SKB_CB(skb)->cscov < pcrlen) {
- net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
-- UDP_SKB_CB(skb)->cscov, up->pcrlen);
-+ UDP_SKB_CB(skb)->cscov, pcrlen);
- goto drop;
- }
- }
-@@ -2618,7 +2620,7 @@ void udp_destroy_sock(struct sock *sk)
- if (encap_destroy)
- encap_destroy(sk);
- }
-- if (up->encap_enabled)
-+ if (udp_test_bit(ENCAP_ENABLED, sk))
- static_branch_dec(&udp_encap_needed_key);
- }
- }
-@@ -2658,9 +2660,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- switch (optname) {
- case UDP_CORK:
- if (val != 0) {
-- WRITE_ONCE(up->corkflag, 1);
-+ udp_set_bit(CORK, sk);
- } else {
-- WRITE_ONCE(up->corkflag, 0);
-+ udp_clear_bit(CORK, sk);
- lock_sock(sk);
- push_pending_frames(sk);
- release_sock(sk);
-@@ -2675,17 +2677,17 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- case UDP_ENCAP_ESPINUDP_NON_IKE:
- #if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6)
-- up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv;
-+ WRITE_ONCE(up->encap_rcv,
-+ ipv6_stub->xfrm6_udp_encap_rcv);
- else
- #endif
-- up->encap_rcv = xfrm4_udp_encap_rcv;
-+ WRITE_ONCE(up->encap_rcv,
-+ xfrm4_udp_encap_rcv);
- #endif
- fallthrough;
- case UDP_ENCAP_L2TPINUDP:
-- up->encap_type = val;
-- lock_sock(sk);
-- udp_tunnel_encap_enable(sk->sk_socket);
-- release_sock(sk);
-+ WRITE_ONCE(up->encap_type, val);
-+ udp_tunnel_encap_enable(sk);
- break;
- default:
- err = -ENOPROTOOPT;
-@@ -2694,11 +2696,11 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- break;
-
- case UDP_NO_CHECK6_TX:
-- up->no_check6_tx = valbool;
-+ udp_set_no_check6_tx(sk, valbool);
- break;
-
- case UDP_NO_CHECK6_RX:
-- up->no_check6_rx = valbool;
-+ udp_set_no_check6_rx(sk, valbool);
- break;
-
- case UDP_SEGMENT:
-@@ -2708,14 +2710,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- break;
-
- case UDP_GRO:
-- lock_sock(sk);
-
- /* when enabling GRO, accept the related GSO packet type */
- if (valbool)
-- udp_tunnel_encap_enable(sk->sk_socket);
-- up->gro_enabled = valbool;
-- up->accept_udp_l4 = valbool;
-- release_sock(sk);
-+ udp_tunnel_encap_enable(sk);
-+ udp_assign_bit(GRO_ENABLED, sk, valbool);
-+ udp_assign_bit(ACCEPT_L4, sk, valbool);
- break;
-
- /*
-@@ -2730,8 +2730,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- val = 8;
- else if (val > USHRT_MAX)
- val = USHRT_MAX;
-- up->pcslen = val;
-- up->pcflag |= UDPLITE_SEND_CC;
-+ WRITE_ONCE(up->pcslen, val);
-+ udp_set_bit(UDPLITE_SEND_CC, sk);
- break;
-
- /* The receiver specifies a minimum checksum coverage value. To make
-@@ -2744,8 +2744,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- val = 8;
- else if (val > USHRT_MAX)
- val = USHRT_MAX;
-- up->pcrlen = val;
-- up->pcflag |= UDPLITE_RECV_CC;
-+ WRITE_ONCE(up->pcrlen, val);
-+ udp_set_bit(UDPLITE_RECV_CC, sk);
- break;
-
- default:
-@@ -2783,19 +2783,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
-
- switch (optname) {
- case UDP_CORK:
-- val = READ_ONCE(up->corkflag);
-+ val = udp_test_bit(CORK, sk);
- break;
-
- case UDP_ENCAP:
-- val = up->encap_type;
-+ val = READ_ONCE(up->encap_type);
- break;
-
- case UDP_NO_CHECK6_TX:
-- val = up->no_check6_tx;
-+ val = udp_get_no_check6_tx(sk);
- break;
-
- case UDP_NO_CHECK6_RX:
-- val = up->no_check6_rx;
-+ val = udp_get_no_check6_rx(sk);
- break;
-
- case UDP_SEGMENT:
-@@ -2803,17 +2803,17 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
- break;
-
- case UDP_GRO:
-- val = up->gro_enabled;
-+ val = udp_test_bit(GRO_ENABLED, sk);
- break;
-
- /* The following two cannot be changed on UDP sockets, the return is
- * always 0 (which corresponds to the full checksum coverage of UDP). */
- case UDPLITE_SEND_CSCOV:
-- val = up->pcslen;
-+ val = READ_ONCE(up->pcslen);
- break;
-
- case UDPLITE_RECV_CSCOV:
-- val = up->pcrlen;
-+ val = READ_ONCE(up->pcrlen);
- break;
-
- default:
-diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
-index 0f46b3c2e4ac5..6c95d28d0c4a7 100644
---- a/net/ipv4/udp_offload.c
-+++ b/net/ipv4/udp_offload.c
-@@ -557,10 +557,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
- NAPI_GRO_CB(skb)->is_flist = 0;
- if (!sk || !udp_sk(sk)->gro_receive) {
- if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
-- NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
-+ NAPI_GRO_CB(skb)->is_flist = sk ? !udp_test_bit(GRO_ENABLED, sk) : 1;
-
- if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
-- (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
-+ (sk && udp_test_bit(GRO_ENABLED, sk)) || NAPI_GRO_CB(skb)->is_flist)
- return call_gro_receive(udp_gro_receive_segment, head, skb);
-
- /* no GRO, be sure flush the current packet */
-diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
-index 9b18f371af0d4..1e7e4aecdc48a 100644
---- a/net/ipv4/udp_tunnel_core.c
-+++ b/net/ipv4/udp_tunnel_core.c
-@@ -78,7 +78,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
- udp_sk(sk)->gro_receive = cfg->gro_receive;
- udp_sk(sk)->gro_complete = cfg->gro_complete;
-
-- udp_tunnel_encap_enable(sock);
-+ udp_tunnel_encap_enable(sk);
- }
- EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
-
-diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
-index 39ecdad1b50ce..af37af3ab727b 100644
---- a/net/ipv4/udplite.c
-+++ b/net/ipv4/udplite.c
-@@ -21,7 +21,6 @@ EXPORT_SYMBOL(udplite_table);
- static int udplite_sk_init(struct sock *sk)
- {
- udp_init_sock(sk);
-- udp_sk(sk)->pcflag = UDPLITE_BIT;
- pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
- "please contact the netdev mailing list\n");
- return 0;
-diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
-index eac206a290d05..183f6dc372429 100644
---- a/net/ipv4/xfrm4_input.c
-+++ b/net/ipv4/xfrm4_input.c
-@@ -85,11 +85,11 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
- struct udphdr *uh;
- struct iphdr *iph;
- int iphlen, len;
--
- __u8 *udpdata;
- __be32 *udpdata32;
-- __u16 encap_type = up->encap_type;
-+ u16 encap_type;
-
-+ encap_type = READ_ONCE(up->encap_type);
- /* if this is not encapsulated socket, then just return now */
- if (!encap_type)
- return 1;
-diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
-index 54fc4c711f2c5..1121082901b99 100644
---- a/net/ipv6/ip6_output.c
-+++ b/net/ipv6/ip6_output.c
-@@ -162,7 +162,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
- int err;
-
- skb_mark_not_on_list(segs);
-- err = ip6_fragment(net, sk, segs, ip6_finish_output2);
-+ /* Last GSO segment can be smaller than gso_size (and MTU).
-+ * Adding a fragment header would produce an "atomic fragment",
-+ * which is considered harmful (RFC-8021). Avoid that.
-+ */
-+ err = segs->len > mtu ?
-+ ip6_fragment(net, sk, segs, ip6_finish_output2) :
-+ ip6_finish_output2(net, sk, segs);
- if (err && ret == 0)
- ret = err;
- }
-diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
-index 5014aa6634527..8698b49dfc8de 100644
---- a/net/ipv6/syncookies.c
-+++ b/net/ipv6/syncookies.c
-@@ -180,14 +180,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
- treq = tcp_rsk(req);
- treq->tfo_listener = false;
-
-- if (security_inet_conn_request(sk, skb, req))
-- goto out_free;
--
- req->mss = mss;
- ireq->ir_rmt_port = th->source;
- ireq->ir_num = ntohs(th->dest);
- ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
- ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
-+
-+ if (security_inet_conn_request(sk, skb, req))
-+ goto out_free;
-+
- if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
- np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
- np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
-diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
-index 86b5d509a4688..f60ba42954352 100644
---- a/net/ipv6/udp.c
-+++ b/net/ipv6/udp.c
-@@ -413,7 +413,7 @@ try_again:
- (struct sockaddr *)sin6);
- }
-
-- if (udp_sk(sk)->gro_enabled)
-+ if (udp_test_bit(GRO_ENABLED, sk))
- udp_cmsg_recv(msg, sk, skb);
-
- if (np->rxopt.all)
-@@ -571,7 +571,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
- inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
-
-- if (!sk || udp_sk(sk)->encap_type) {
-+ if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
- /* No socket for error: try tunnels before discarding */
- if (static_branch_unlikely(&udpv6_encap_needed_key)) {
- sk = __udp6_lib_err_encap(net, hdr, offset, uh,
-@@ -688,7 +688,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
- }
- nf_reset_ct(skb);
-
-- if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
-+ if (static_branch_unlikely(&udpv6_encap_needed_key) &&
-+ READ_ONCE(up->encap_type)) {
- int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
-
- /*
-@@ -726,16 +727,17 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
- /*
- * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
- */
-- if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
-+ if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
-+ u16 pcrlen = READ_ONCE(up->pcrlen);
-
-- if (up->pcrlen == 0) { /* full coverage was set */
-+ if (pcrlen == 0) { /* full coverage was set */
- net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
- UDP_SKB_CB(skb)->cscov, skb->len);
- goto drop;
- }
-- if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
-+ if (UDP_SKB_CB(skb)->cscov < pcrlen) {
- net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
-- UDP_SKB_CB(skb)->cscov, up->pcrlen);
-+ UDP_SKB_CB(skb)->cscov, pcrlen);
- goto drop;
- }
- }
-@@ -858,7 +860,7 @@ start_lookup:
- /* If zero checksum and no_check is not on for
- * the socket then skip it.
- */
-- if (!uh->check && !udp_sk(sk)->no_check6_rx)
-+ if (!uh->check && !udp_get_no_check6_rx(sk))
- continue;
- if (!first) {
- first = sk;
-@@ -980,7 +982,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
- if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
- udp6_sk_rx_dst_set(sk, dst);
-
-- if (!uh->check && !udp_sk(sk)->no_check6_rx) {
-+ if (!uh->check && !udp_get_no_check6_rx(sk)) {
- if (refcounted)
- sock_put(sk);
- goto report_csum_error;
-@@ -1002,7 +1004,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
- /* Unicast */
- sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
- if (sk) {
-- if (!uh->check && !udp_sk(sk)->no_check6_rx)
-+ if (!uh->check && !udp_get_no_check6_rx(sk))
- goto report_csum_error;
- return udp6_unicast_rcv_skb(sk, skb, uh);
- }
-@@ -1241,7 +1243,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
- kfree_skb(skb);
- return -EINVAL;
- }
-- if (udp_sk(sk)->no_check6_tx) {
-+ if (udp_get_no_check6_tx(sk)) {
- kfree_skb(skb);
- return -EINVAL;
- }
-@@ -1262,7 +1264,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
-
- if (is_udplite)
- csum = udplite_csum(skb);
-- else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
-+ else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
- skb->ip_summed = CHECKSUM_NONE;
- goto send;
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
-@@ -1332,7 +1334,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
- int addr_len = msg->msg_namelen;
- bool connected = false;
- int ulen = len;
-- int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
-+ int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
- int err;
- int is_udplite = IS_UDPLITE(sk);
- int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
-@@ -1644,11 +1646,11 @@ static void udpv6_splice_eof(struct socket *sock)
- struct sock *sk = sock->sk;
- struct udp_sock *up = udp_sk(sk);
-
-- if (!up->pending || READ_ONCE(up->corkflag))
-+ if (!up->pending || udp_test_bit(CORK, sk))
- return;
-
- lock_sock(sk);
-- if (up->pending && !READ_ONCE(up->corkflag))
-+ if (up->pending && !udp_test_bit(CORK, sk))
- udp_v6_push_pending_frames(sk);
- release_sock(sk);
- }
-@@ -1670,7 +1672,7 @@ void udpv6_destroy_sock(struct sock *sk)
- if (encap_destroy)
- encap_destroy(sk);
- }
-- if (up->encap_enabled) {
-+ if (udp_test_bit(ENCAP_ENABLED, sk)) {
- static_branch_dec(&udpv6_encap_needed_key);
- udp_encap_disable();
- }
-diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
-index 267d491e97075..a60bec9b14f14 100644
---- a/net/ipv6/udplite.c
-+++ b/net/ipv6/udplite.c
-@@ -17,7 +17,6 @@
- static int udplitev6_sk_init(struct sock *sk)
- {
- udpv6_init_sock(sk);
-- udp_sk(sk)->pcflag = UDPLITE_BIT;
- pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, "
- "please contact the netdev mailing list\n");
- return 0;
-diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
-index 4907ab241d6be..4156387248e40 100644
---- a/net/ipv6/xfrm6_input.c
-+++ b/net/ipv6/xfrm6_input.c
-@@ -81,14 +81,14 @@ int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
- struct ipv6hdr *ip6h;
- int len;
- int ip6hlen = sizeof(struct ipv6hdr);
--
- __u8 *udpdata;
- __be32 *udpdata32;
-- __u16 encap_type = up->encap_type;
-+ u16 encap_type;
-
- if (skb->protocol == htons(ETH_P_IP))
- return xfrm4_udp_encap_rcv(sk, skb);
-
-+ encap_type = READ_ONCE(up->encap_type);
- /* if this is not encapsulated socket, then just return now */
- if (!encap_type)
- return 1;
-diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
-index 03608d3ded4b8..8d21ff25f1602 100644
---- a/net/l2tp/l2tp_core.c
-+++ b/net/l2tp/l2tp_core.c
-@@ -1139,9 +1139,9 @@ static void l2tp_tunnel_destruct(struct sock *sk)
- switch (tunnel->encap) {
- case L2TP_ENCAPTYPE_UDP:
- /* No longer an encapsulation socket. See net/ipv4/udp.c */
-- (udp_sk(sk))->encap_type = 0;
-- (udp_sk(sk))->encap_rcv = NULL;
-- (udp_sk(sk))->encap_destroy = NULL;
-+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
-+ udp_sk(sk)->encap_rcv = NULL;
-+ udp_sk(sk)->encap_destroy = NULL;
- break;
- case L2TP_ENCAPTYPE_IP:
- break;
-diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
-index 7cac441862e21..51bccfb00a9cd 100644
---- a/net/llc/llc_input.c
-+++ b/net/llc/llc_input.c
-@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
- skb->transport_header += llc_len;
- skb_pull(skb, llc_len);
- if (skb->protocol == htons(ETH_P_802_2)) {
-- __be16 pdulen = eth_hdr(skb)->h_proto;
-- s32 data_size = ntohs(pdulen) - llc_len;
-+ __be16 pdulen;
-+ s32 data_size;
-+
-+ if (skb->mac_len < ETH_HLEN)
-+ return 0;
-+
-+ pdulen = eth_hdr(skb)->h_proto;
-+ data_size = ntohs(pdulen) - llc_len;
-
- if (data_size < 0 ||
- !pskb_may_pull(skb, data_size))
-diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
-index 79d1cef8f15a9..06fb8e6944b06 100644
---- a/net/llc/llc_s_ac.c
-+++ b/net/llc/llc_s_ac.c
-@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
- int rc = 1;
- u32 data_size;
-
-+ if (skb->mac_len < ETH_HLEN)
-+ return 1;
-+
- llc_pdu_decode_sa(skb, mac_da);
- llc_pdu_decode_da(skb, mac_sa);
- llc_pdu_decode_ssap(skb, &dsap);
-diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
-index 05c6ae0920534..f506542925109 100644
---- a/net/llc/llc_station.c
-+++ b/net/llc/llc_station.c
-@@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
- u32 data_size;
- struct sk_buff *nskb;
-
-+ if (skb->mac_len < ETH_HLEN)
-+ goto out;
-+
- /* The test request command is type U (llc_len = 3) */
- data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
- nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
-diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
-index 0e3a1753a51c6..715da615f0359 100644
---- a/net/mac80211/cfg.c
-+++ b/net/mac80211/cfg.c
-@@ -3121,6 +3121,10 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
- else
- *dbm = sdata->vif.bss_conf.txpower;
-
-+ /* INT_MIN indicates no power level was set yet */
-+ if (*dbm == INT_MIN)
-+ return -EINVAL;
-+
- return 0;
- }
-
-diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
-index 30cd0c905a24f..aa37a1410f377 100644
---- a/net/mac80211/driver-ops.c
-+++ b/net/mac80211/driver-ops.c
-@@ -510,10 +510,13 @@ int drv_change_vif_links(struct ieee80211_local *local,
- if (ret)
- return ret;
-
-- for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
-- link = rcu_access_pointer(sdata->link[link_id]);
-+ if (!local->in_reconfig) {
-+ for_each_set_bit(link_id, &links_to_add,
-+ IEEE80211_MLD_MAX_NUM_LINKS) {
-+ link = rcu_access_pointer(sdata->link[link_id]);
-
-- ieee80211_link_debugfs_drv_add(link);
-+ ieee80211_link_debugfs_drv_add(link);
-+ }
- }
-
- return 0;
-diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
-index c4505593ba7a6..2bc2fbe58f944 100644
---- a/net/mac80211/driver-ops.h
-+++ b/net/mac80211/driver-ops.h
-@@ -23,7 +23,7 @@
- static inline struct ieee80211_sub_if_data *
- get_bss_sdata(struct ieee80211_sub_if_data *sdata)
- {
-- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-+ if (sdata && sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
- u.ap);
-
-@@ -638,10 +638,13 @@ static inline void drv_flush(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- u32 queues, bool drop)
- {
-- struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
-+ struct ieee80211_vif *vif;
-
- might_sleep();
-
-+ sdata = get_bss_sdata(sdata);
-+ vif = sdata ? &sdata->vif : NULL;
-+
- if (sdata && !check_sdata_in_driver(sdata))
- return;
-
-@@ -657,6 +660,8 @@ static inline void drv_flush_sta(struct ieee80211_local *local,
- {
- might_sleep();
-
-+ sdata = get_bss_sdata(sdata);
-+
- if (sdata && !check_sdata_in_driver(sdata))
- return;
-
-diff --git a/net/mac80211/drop.h b/net/mac80211/drop.h
-index 49dc809cab290..1570fac8411f4 100644
---- a/net/mac80211/drop.h
-+++ b/net/mac80211/drop.h
-@@ -53,4 +53,7 @@ enum mac80211_drop_reason {
- #undef DEF
- };
-
-+#define RX_RES_IS_UNUSABLE(result) \
-+ (((__force u32)(result) & SKB_DROP_REASON_SUBSYS_MASK) == ___RX_DROP_UNUSABLE)
-+
- #endif /* MAC80211_DROP_H */
-diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
-index 98ef1fe1226e7..07beb72ddd25a 100644
---- a/net/mac80211/ieee80211_i.h
-+++ b/net/mac80211/ieee80211_i.h
-@@ -1406,7 +1406,7 @@ struct ieee80211_local {
- /* wowlan is enabled -- don't reconfig on resume */
- bool wowlan;
-
-- struct work_struct radar_detected_work;
-+ struct wiphy_work radar_detected_work;
-
- /* number of RX chains the hardware has */
- u8 rx_chains;
-@@ -1483,14 +1483,14 @@ struct ieee80211_local {
- int hw_scan_ies_bufsize;
- struct cfg80211_scan_info scan_info;
-
-- struct work_struct sched_scan_stopped_work;
-+ struct wiphy_work sched_scan_stopped_work;
- struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
- struct cfg80211_sched_scan_request __rcu *sched_scan_req;
- u8 scan_addr[ETH_ALEN];
-
- unsigned long leave_oper_channel_time;
- enum mac80211_scan_state next_scan_state;
-- struct delayed_work scan_work;
-+ struct wiphy_delayed_work scan_work;
- struct ieee80211_sub_if_data __rcu *scan_sdata;
- /* For backward compatibility only -- do not use */
- struct cfg80211_chan_def _oper_chandef;
-@@ -1583,9 +1583,9 @@ struct ieee80211_local {
- /*
- * Remain-on-channel support
- */
-- struct delayed_work roc_work;
-+ struct wiphy_delayed_work roc_work;
- struct list_head roc_list;
-- struct work_struct hw_roc_start, hw_roc_done;
-+ struct wiphy_work hw_roc_start, hw_roc_done;
- unsigned long hw_roc_start_time;
- u64 roc_cookie_counter;
-
-@@ -1929,7 +1929,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata,
- u64 *changed);
-
- /* scan/BSS handling */
--void ieee80211_scan_work(struct work_struct *work);
-+void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work);
- int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
- const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel **channels,
-@@ -1962,7 +1962,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_sched_scan_request *req);
- int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
- void ieee80211_sched_scan_end(struct ieee80211_local *local);
--void ieee80211_sched_scan_stopped_work(struct work_struct *work);
-+void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
-+ struct wiphy_work *work);
-
- /* off-channel/mgmt-tx */
- void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
-@@ -2566,7 +2567,8 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local);
-
- void ieee80211_dfs_cac_timer_work(struct work_struct *work);
- void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
--void ieee80211_dfs_radar_detected_work(struct work_struct *work);
-+void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
-+ struct wiphy_work *work);
- int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_csa_settings *csa_settings);
-
-diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
-index be586bc0b5b7d..6e3bfb46af44d 100644
---- a/net/mac80211/iface.c
-+++ b/net/mac80211/iface.c
-@@ -691,7 +691,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
- ieee80211_recalc_ps(local);
-
- if (cancel_scan)
-- flush_delayed_work(&local->scan_work);
-+ wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
-
- if (local->open_count == 0) {
- ieee80211_stop_device(local);
-diff --git a/net/mac80211/link.c b/net/mac80211/link.c
-index 6148208b320e3..16cbaea93fc32 100644
---- a/net/mac80211/link.c
-+++ b/net/mac80211/link.c
-@@ -195,7 +195,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
-
- memset(to_free, 0, sizeof(links));
-
-- if (old_links == new_links)
-+ if (old_links == new_links && dormant_links == sdata->vif.dormant_links)
- return 0;
-
- /* if there were no old links, need to clear the pointers to deflink */
-diff --git a/net/mac80211/main.c b/net/mac80211/main.c
-index 24315d7b31263..4548f84451095 100644
---- a/net/mac80211/main.c
-+++ b/net/mac80211/main.c
-@@ -335,10 +335,7 @@ static void ieee80211_restart_work(struct work_struct *work)
- struct ieee80211_sub_if_data *sdata;
- int ret;
-
-- /* wait for scan work complete */
- flush_workqueue(local->workqueue);
-- flush_work(&local->sched_scan_stopped_work);
-- flush_work(&local->radar_detected_work);
-
- rtnl_lock();
- /* we might do interface manipulations, so need both */
-@@ -379,8 +376,8 @@ static void ieee80211_restart_work(struct work_struct *work)
- ieee80211_scan_cancel(local);
-
- /* make sure any new ROC will consider local->in_reconfig */
-- flush_delayed_work(&local->roc_work);
-- flush_work(&local->hw_roc_done);
-+ wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work);
-+ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done);
-
- /* wait for all packet processing to be done */
- synchronize_net();
-@@ -809,12 +806,12 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
- INIT_LIST_HEAD(&local->chanctx_list);
- mutex_init(&local->chanctx_mtx);
-
-- INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
-+ wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work);
-
- INIT_WORK(&local->restart_work, ieee80211_restart_work);
-
-- INIT_WORK(&local->radar_detected_work,
-- ieee80211_dfs_radar_detected_work);
-+ wiphy_work_init(&local->radar_detected_work,
-+ ieee80211_dfs_radar_detected_work);
-
- INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
- local->smps_mode = IEEE80211_SMPS_OFF;
-@@ -825,8 +822,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
- ieee80211_dynamic_ps_disable_work);
- timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
-
-- INIT_WORK(&local->sched_scan_stopped_work,
-- ieee80211_sched_scan_stopped_work);
-+ wiphy_work_init(&local->sched_scan_stopped_work,
-+ ieee80211_sched_scan_stopped_work);
-
- spin_lock_init(&local->ack_status_lock);
- idr_init(&local->ack_status_frames);
-@@ -1482,13 +1479,15 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
- */
- ieee80211_remove_interfaces(local);
-
-+ wiphy_lock(local->hw.wiphy);
-+ wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work);
-+ wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work);
-+ wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work);
-+ wiphy_unlock(local->hw.wiphy);
- rtnl_unlock();
-
-- cancel_delayed_work_sync(&local->roc_work);
- cancel_work_sync(&local->restart_work);
- cancel_work_sync(&local->reconfig_filter);
-- flush_work(&local->sched_scan_stopped_work);
-- flush_work(&local->radar_detected_work);
-
- ieee80211_clear_tx_pending(local);
- rate_control_deinitialize(local);
-diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
-index d32e304eeb4ba..3e52aaa57b1fc 100644
---- a/net/mac80211/mesh_pathtbl.c
-+++ b/net/mac80211/mesh_pathtbl.c
-@@ -648,7 +648,7 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
-
- cache = &sdata->u.mesh.tx_cache;
- spin_lock_bh(&cache->walk_lock);
-- entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
-+ entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
- if (entry)
- mesh_fast_tx_entry_free(cache, entry);
- spin_unlock_bh(&cache->walk_lock);
-diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
-index cdf991e74ab99..5bedd9cef414d 100644
---- a/net/mac80211/offchannel.c
-+++ b/net/mac80211/offchannel.c
-@@ -230,7 +230,7 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
- if (dur == LONG_MAX)
- return false;
-
-- mod_delayed_work(local->workqueue, &local->roc_work, dur);
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, dur);
- return true;
- }
-
-@@ -258,7 +258,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
- roc->notified = true;
- }
-
--static void ieee80211_hw_roc_start(struct work_struct *work)
-+static void ieee80211_hw_roc_start(struct wiphy *wiphy, struct wiphy_work *work)
- {
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, hw_roc_start);
-@@ -285,7 +285,7 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
-
- trace_api_ready_on_channel(local);
-
-- ieee80211_queue_work(hw, &local->hw_roc_start);
-+ wiphy_work_queue(hw->wiphy, &local->hw_roc_start);
- }
- EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
-
-@@ -338,7 +338,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
- tmp->started = true;
- tmp->abort = true;
- }
-- ieee80211_queue_work(&local->hw, &local->hw_roc_done);
-+ wiphy_work_queue(local->hw.wiphy, &local->hw_roc_done);
- return;
- }
-
-@@ -368,8 +368,8 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
- ieee80211_hw_config(local, 0);
- }
-
-- ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
-- msecs_to_jiffies(min_dur));
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
-+ msecs_to_jiffies(min_dur));
-
- /* tell userspace or send frame(s) */
- list_for_each_entry(tmp, &local->roc_list, list) {
-@@ -407,8 +407,8 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
- _ieee80211_start_next_roc(local);
- } else {
- /* delay it a bit */
-- ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
-- round_jiffies_relative(HZ/2));
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
-+ round_jiffies_relative(HZ / 2));
- }
- }
-
-@@ -451,7 +451,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
- }
- }
-
--static void ieee80211_roc_work(struct work_struct *work)
-+static void ieee80211_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
- {
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, roc_work.work);
-@@ -461,7 +461,7 @@ static void ieee80211_roc_work(struct work_struct *work)
- mutex_unlock(&local->mtx);
- }
-
--static void ieee80211_hw_roc_done(struct work_struct *work)
-+static void ieee80211_hw_roc_done(struct wiphy *wiphy, struct wiphy_work *work)
- {
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, hw_roc_done);
-@@ -482,7 +482,7 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
-
- trace_api_remain_on_channel_expired(local);
-
-- ieee80211_queue_work(hw, &local->hw_roc_done);
-+ wiphy_work_queue(hw->wiphy, &local->hw_roc_done);
- }
- EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
-
-@@ -586,8 +586,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
- /* if not HW assist, just queue & schedule work */
- if (!local->ops->remain_on_channel) {
- list_add_tail(&roc->list, &local->roc_list);
-- ieee80211_queue_delayed_work(&local->hw,
-- &local->roc_work, 0);
-+ wiphy_delayed_work_queue(local->hw.wiphy,
-+ &local->roc_work, 0);
- } else {
- /* otherwise actually kick it off here
- * (for error handling)
-@@ -695,7 +695,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
- if (!cookie)
- return -ENOENT;
-
-- flush_work(&local->hw_roc_start);
-+ wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start);
-
- mutex_lock(&local->mtx);
- list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
-@@ -745,7 +745,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
- } else {
- /* go through work struct to return to the operating channel */
- found->abort = true;
-- mod_delayed_work(local->workqueue, &local->roc_work, 0);
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work, 0);
- }
-
- out_unlock:
-@@ -994,9 +994,9 @@ int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
-
- void ieee80211_roc_setup(struct ieee80211_local *local)
- {
-- INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
-- INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
-- INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
-+ wiphy_work_init(&local->hw_roc_start, ieee80211_hw_roc_start);
-+ wiphy_work_init(&local->hw_roc_done, ieee80211_hw_roc_done);
-+ wiphy_delayed_work_init(&local->roc_work, ieee80211_roc_work);
- INIT_LIST_HEAD(&local->roc_list);
- }
-
-diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
-index 8f6b6f56b65b4..26ca2f5dc52b2 100644
---- a/net/mac80211/rx.c
-+++ b/net/mac80211/rx.c
-@@ -2112,7 +2112,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
- /* either the frame has been decrypted or will be dropped */
- status->flag |= RX_FLAG_DECRYPTED;
-
-- if (unlikely(ieee80211_is_beacon(fc) && (result & RX_DROP_UNUSABLE) &&
-+ if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) &&
- rx->sdata->dev))
- cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
- skb->data, skb->len);
-diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
-index 0805aa8603c61..68ec2124c3db5 100644
---- a/net/mac80211/scan.c
-+++ b/net/mac80211/scan.c
-@@ -274,8 +274,8 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
- * the beacon/proberesp rx gives us an opportunity to upgrade
- * to active scan
- */
-- set_bit(SCAN_BEACON_DONE, &local->scanning);
-- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
-+ set_bit(SCAN_BEACON_DONE, &local->scanning);
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
- }
-
- if (ieee80211_is_probe_resp(mgmt->frame_control)) {
-@@ -505,7 +505,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw,
-
- memcpy(&local->scan_info, info, sizeof(*info));
-
-- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
- }
- EXPORT_SYMBOL(ieee80211_scan_completed);
-
-@@ -545,8 +545,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
- /* We need to set power level at maximum rate for scanning. */
- ieee80211_hw_config(local, 0);
-
-- ieee80211_queue_delayed_work(&local->hw,
-- &local->scan_work, 0);
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
-
- return 0;
- }
-@@ -603,8 +602,8 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
- lockdep_is_held(&local->mtx))))
- return;
-
-- ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
-- round_jiffies_relative(0));
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
-+ round_jiffies_relative(0));
- }
-
- static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
-@@ -795,8 +794,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
- }
-
- /* Now, just wait a bit and we are all done! */
-- ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
-- next_delay);
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
-+ next_delay);
- return 0;
- } else {
- /* Do normal software scan */
-@@ -1043,7 +1042,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
- local->next_scan_state = SCAN_SET_CHANNEL;
- }
-
--void ieee80211_scan_work(struct work_struct *work)
-+void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work)
- {
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, scan_work.work);
-@@ -1137,7 +1136,8 @@ void ieee80211_scan_work(struct work_struct *work)
- }
- } while (next_delay == 0);
-
-- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
-+ next_delay);
- goto out;
-
- out_complete:
-@@ -1280,12 +1280,7 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
- goto out;
- }
-
-- /*
-- * If the work is currently running, it must be blocked on
-- * the mutex, but we'll set scan_sdata = NULL and it'll
-- * simply exit once it acquires the mutex.
-- */
-- cancel_delayed_work(&local->scan_work);
-+ wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work);
- /* and clean up */
- memset(&local->scan_info, 0, sizeof(local->scan_info));
- __ieee80211_scan_completed(&local->hw, true);
-@@ -1427,10 +1422,11 @@ void ieee80211_sched_scan_end(struct ieee80211_local *local)
-
- mutex_unlock(&local->mtx);
-
-- cfg80211_sched_scan_stopped(local->hw.wiphy, 0);
-+ cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
- }
-
--void ieee80211_sched_scan_stopped_work(struct work_struct *work)
-+void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy,
-+ struct wiphy_work *work)
- {
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local,
-@@ -1453,6 +1449,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
- if (local->in_reconfig)
- return;
-
-- schedule_work(&local->sched_scan_stopped_work);
-+ wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work);
- }
- EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
-diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
-index 7751f8ba960ee..0c5cc75857e4f 100644
---- a/net/mac80211/sta_info.c
-+++ b/net/mac80211/sta_info.c
-@@ -2990,7 +2990,7 @@ void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
- WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1;
-
- if (val)
-- sta->sta.max_amsdu_subframes = 4 << val;
-+ sta->sta.max_amsdu_subframes = 4 << (4 - val);
- }
-
- #ifdef CONFIG_LOCKDEP
-diff --git a/net/mac80211/util.c b/net/mac80211/util.c
-index 8a6917cf63cf9..172173b2a9eb8 100644
---- a/net/mac80211/util.c
-+++ b/net/mac80211/util.c
-@@ -2340,8 +2340,8 @@ static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
- */
- if (aborted)
- set_bit(SCAN_ABORTED, &local->scanning);
-- ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
-- flush_delayed_work(&local->scan_work);
-+ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
-+ wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work);
- }
- }
-
-@@ -4356,7 +4356,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
- mutex_unlock(&local->mtx);
- }
-
--void ieee80211_dfs_radar_detected_work(struct work_struct *work)
-+void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
-+ struct wiphy_work *work)
- {
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, radar_detected_work);
-@@ -4374,9 +4375,7 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
- }
- mutex_unlock(&local->chanctx_mtx);
-
-- wiphy_lock(local->hw.wiphy);
- ieee80211_dfs_cac_cancel(local);
-- wiphy_unlock(local->hw.wiphy);
-
- if (num_chanctx > 1)
- /* XXX: multi-channel is not supported yet */
-@@ -4391,7 +4390,7 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
-
- trace_api_radar_detected(local);
-
-- schedule_work(&local->radar_detected_work);
-+ wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
- }
- EXPORT_SYMBOL(ieee80211_radar_detected);
-
-diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
-index bceaab8dd8e46..74698582a2859 100644
---- a/net/mptcp/fastopen.c
-+++ b/net/mptcp/fastopen.c
-@@ -52,6 +52,7 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
-
- mptcp_set_owner_r(skb, sk);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
-+ mptcp_sk(sk)->bytes_received += skb->len;
-
- sk->sk_data_ready(sk);
-
-diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
-index 9661f38126826..3011bc378462b 100644
---- a/net/mptcp/pm_netlink.c
-+++ b/net/mptcp/pm_netlink.c
-@@ -1538,8 +1538,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
- struct mptcp_pm_addr_entry *entry;
-
- list_for_each_entry(entry, rm_list, list) {
-- remove_anno_list_by_saddr(msk, &entry->addr);
-- if (alist.nr < MPTCP_RM_IDS_MAX)
-+ if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
-+ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
-+ alist.nr < MPTCP_RM_IDS_MAX)
- alist.ids[alist.nr++] = entry->addr.id;
- }
-
-diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
-index 886ab689a8aea..c1527f520dce3 100644
---- a/net/mptcp/protocol.c
-+++ b/net/mptcp/protocol.c
-@@ -1231,6 +1231,8 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
- mptcp_do_fallback(ssk);
- }
-
-+#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
-+
- static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
- struct mptcp_data_frag *dfrag,
- struct mptcp_sendmsg_info *info)
-@@ -1257,6 +1259,8 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
- return -EAGAIN;
-
- /* compute send limit */
-+ if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
-+ ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
- info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
- copy = info->size_goal;
-
-diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
-index 8260202c00669..7539b9c8c2fb4 100644
---- a/net/mptcp/sockopt.c
-+++ b/net/mptcp/sockopt.c
-@@ -737,8 +737,11 @@ static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
- val = inet_sk(sk)->tos;
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-+ bool slow;
-
-+ slow = lock_sock_fast(ssk);
- __ip_sock_set_tos(ssk, val);
-+ unlock_sock_fast(ssk, slow);
- }
- release_sock(sk);
-
-diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
-index f8854bff286cb..62fb1031763d1 100644
---- a/net/ncsi/ncsi-aen.c
-+++ b/net/ncsi/ncsi-aen.c
-@@ -89,11 +89,6 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
- if ((had_link == has_link) || chained)
- return 0;
-
-- if (had_link)
-- netif_carrier_off(ndp->ndev.dev);
-- else
-- netif_carrier_on(ndp->ndev.dev);
--
- if (!ndp->multi_package && !nc->package->multi_channel) {
- if (had_link) {
- ndp->flags |= NCSI_DEV_RESHUFFLE;
-diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
-index 6616ba5d0b049..5b37487d9d11f 100644
---- a/net/netfilter/nf_nat_redirect.c
-+++ b/net/netfilter/nf_nat_redirect.c
-@@ -80,6 +80,26 @@ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
-
- static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
-
-+static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
-+{
-+ unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
-+
-+ if (ifa_addr_type & IPV6_ADDR_MAPPED)
-+ return false;
-+
-+ if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
-+ return false;
-+
-+ if (scope) {
-+ unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
-+
-+ if (!(scope & ifa_scope))
-+ return false;
-+ }
-+
-+ return true;
-+}
-+
- unsigned int
- nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
- unsigned int hooknum)
-@@ -89,14 +109,19 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
- if (hooknum == NF_INET_LOCAL_OUT) {
- newdst.in6 = loopback_addr;
- } else {
-+ unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
- struct inet6_dev *idev;
-- struct inet6_ifaddr *ifa;
- bool addr = false;
-
- idev = __in6_dev_get(skb->dev);
- if (idev != NULL) {
-+ const struct inet6_ifaddr *ifa;
-+
- read_lock_bh(&idev->lock);
- list_for_each_entry(ifa, &idev->addr_list, if_list) {
-+ if (!nf_nat_redirect_ipv6_usable(ifa, scope))
-+ continue;
-+
- newdst.in6 = ifa->addr;
- addr = true;
- break;
-diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
-index 29c651804cb22..4a450f6d12a59 100644
---- a/net/netfilter/nf_tables_api.c
-+++ b/net/netfilter/nf_tables_api.c
-@@ -3465,10 +3465,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
- goto cont_skip;
- if (*idx < s_idx)
- goto cont;
-- if (*idx > s_idx) {
-- memset(&cb->args[1], 0,
-- sizeof(cb->args) - sizeof(cb->args[0]));
-- }
- if (prule)
- handle = prule->handle;
- else
-@@ -6468,6 +6464,12 @@ static int nft_setelem_deactivate(const struct net *net,
- return ret;
- }
-
-+static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall)
-+{
-+ list_del_rcu(&catchall->list);
-+ kfree_rcu(catchall, rcu);
-+}
-+
- static void nft_setelem_catchall_remove(const struct net *net,
- const struct nft_set *set,
- const struct nft_set_elem *elem)
-@@ -6476,8 +6478,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
-
- list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
- if (catchall->elem == elem->priv) {
-- list_del_rcu(&catchall->list);
-- kfree_rcu(catchall, rcu);
-+ nft_setelem_catchall_destroy(catchall);
- break;
- }
- }
-@@ -7209,10 +7210,11 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
-
- if (err < 0) {
- NL_SET_BAD_ATTR(extack, attr);
-- break;
-+ return err;
- }
- }
-- return err;
-+
-+ return 0;
- }
-
- /*
-@@ -9638,9 +9640,8 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
- call_rcu(&trans->rcu, nft_trans_gc_trans_free);
- }
-
--static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
-- unsigned int gc_seq,
-- bool sync)
-+struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
-+ unsigned int gc_seq)
- {
- struct nft_set_elem_catchall *catchall;
- const struct nft_set *set = gc->set;
-@@ -9656,11 +9657,7 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
-
- nft_set_elem_dead(ext);
- dead_elem:
-- if (sync)
-- gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
-- else
-- gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
--
-+ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
- if (!gc)
- return NULL;
-
-@@ -9670,15 +9667,34 @@ dead_elem:
- return gc;
- }
-
--struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
-- unsigned int gc_seq)
--{
-- return nft_trans_gc_catchall(gc, gc_seq, false);
--}
--
- struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
- {
-- return nft_trans_gc_catchall(gc, 0, true);
-+ struct nft_set_elem_catchall *catchall, *next;
-+ const struct nft_set *set = gc->set;
-+ struct nft_set_elem elem;
-+ struct nft_set_ext *ext;
-+
-+ WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net));
-+
-+ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
-+ ext = nft_set_elem_ext(set, catchall->elem);
-+
-+ if (!nft_set_elem_expired(ext))
-+ continue;
-+
-+ gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
-+ if (!gc)
-+ return NULL;
-+
-+ memset(&elem, 0, sizeof(elem));
-+ elem.priv = catchall->elem;
-+
-+ nft_setelem_data_deactivate(gc->net, gc->set, &elem);
-+ nft_setelem_catchall_destroy(catchall);
-+ nft_trans_gc_elem_add(gc, elem.priv);
-+ }
-+
-+ return gc;
- }
-
- static void nf_tables_module_autoload_cleanup(struct net *net)
-diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
-index e596d1a842f70..f6e791a681015 100644
---- a/net/netfilter/nft_byteorder.c
-+++ b/net/netfilter/nft_byteorder.c
-@@ -38,13 +38,14 @@ void nft_byteorder_eval(const struct nft_expr *expr,
-
- switch (priv->size) {
- case 8: {
-+ u64 *dst64 = (void *)dst;
- u64 src64;
-
- switch (priv->op) {
- case NFT_BYTEORDER_NTOH:
- for (i = 0; i < priv->len / 8; i++) {
- src64 = nft_reg_load64(&src[i]);
-- nft_reg_store64(&dst[i],
-+ nft_reg_store64(&dst64[i],
- be64_to_cpu((__force __be64)src64));
- }
- break;
-@@ -52,7 +53,7 @@ void nft_byteorder_eval(const struct nft_expr *expr,
- for (i = 0; i < priv->len / 8; i++) {
- src64 = (__force __u64)
- cpu_to_be64(nft_reg_load64(&src[i]));
-- nft_reg_store64(&dst[i], src64);
-+ nft_reg_store64(&dst64[i], src64);
- }
- break;
- }
-diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
-index f7da7c43333b5..ba0d3683a45d3 100644
---- a/net/netfilter/nft_meta.c
-+++ b/net/netfilter/nft_meta.c
-@@ -63,7 +63,7 @@ nft_meta_get_eval_time(enum nft_meta_keys key,
- {
- switch (key) {
- case NFT_META_TIME_NS:
-- nft_reg_store64(dest, ktime_get_real_ns());
-+ nft_reg_store64((u64 *)dest, ktime_get_real_ns());
- break;
- case NFT_META_TIME_DAY:
- nft_reg_store8(dest, nft_meta_weekday());
-diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
-index 7ddb9a78e3fc8..ef93e0d3bee04 100644
---- a/net/netfilter/xt_recent.c
-+++ b/net/netfilter/xt_recent.c
-@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
- {
- struct recent_table *t = pde_data(file_inode(file));
- struct recent_entry *e;
-- char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
-+ char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
- const char *c = buf;
- union nf_inet_addr addr = {};
- u_int16_t family;
-diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
-index 0b9a785dea459..3019a4406ca4f 100644
---- a/net/openvswitch/conntrack.c
-+++ b/net/openvswitch/conntrack.c
-@@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
- if (err)
- return err;
-
-- nf_conn_act_ct_ext_add(ct);
-+ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
- } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
- labels_nonzero(&info->labels.mask)) {
- err = ovs_ct_set_labels(ct, key, &info->labels.value,
-diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
-index ac85d4644a3c3..df8a271948a1c 100644
---- a/net/rxrpc/conn_object.c
-+++ b/net/rxrpc/conn_object.c
-@@ -212,7 +212,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
- conn->idle_timestamp = jiffies;
- if (atomic_dec_and_test(&conn->active))
- rxrpc_set_service_reap_timer(conn->rxnet,
-- jiffies + rxrpc_connection_expiry);
-+ jiffies + rxrpc_connection_expiry * HZ);
- }
-
- rxrpc_put_call(call, rxrpc_call_put_io_thread);
-diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
-index 030d64f282f37..92495e73b8699 100644
---- a/net/rxrpc/input.c
-+++ b/net/rxrpc/input.c
-@@ -643,12 +643,8 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
- clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
- smp_mb(); /* Read data before setting avail bit */
- set_bit(i, &call->rtt_avail);
-- if (type != rxrpc_rtt_rx_cancel)
-- rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
-- sent_at, resp_time);
-- else
-- trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
-- orig_serial, acked_serial, 0, 0);
-+ rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
-+ sent_at, resp_time);
- matched = true;
- }
-
-@@ -801,28 +797,21 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
- summary.ack_reason, nr_acks);
- rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
-
-- switch (ack.reason) {
-- case RXRPC_ACK_PING_RESPONSE:
-- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
-- rxrpc_rtt_rx_ping_response);
-- break;
-- case RXRPC_ACK_REQUESTED:
-- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
-- rxrpc_rtt_rx_requested_ack);
-- break;
-- default:
-- if (acked_serial != 0)
-+ if (acked_serial != 0) {
-+ switch (ack.reason) {
-+ case RXRPC_ACK_PING_RESPONSE:
- rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
-- rxrpc_rtt_rx_cancel);
-- break;
-- }
--
-- if (ack.reason == RXRPC_ACK_PING) {
-- rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
-- rxrpc_propose_ack_respond_to_ping);
-- } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
-- rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
-- rxrpc_propose_ack_respond_to_ack);
-+ rxrpc_rtt_rx_ping_response);
-+ break;
-+ case RXRPC_ACK_REQUESTED:
-+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
-+ rxrpc_rtt_rx_requested_ack);
-+ break;
-+ default:
-+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
-+ rxrpc_rtt_rx_other_ack);
-+ break;
-+ }
- }
-
- /* If we get an EXCEEDS_WINDOW ACK from the server, it probably
-@@ -835,7 +824,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
- rxrpc_is_client_call(call)) {
- rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
- 0, -ENETRESET);
-- return;
-+ goto send_response;
- }
-
- /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also
-@@ -849,7 +838,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
- rxrpc_is_client_call(call)) {
- rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
- 0, -ENETRESET);
-- return;
-+ goto send_response;
- }
-
- /* Discard any out-of-order or duplicate ACKs (outside lock). */
-@@ -857,7 +846,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
- trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
- first_soft_ack, call->acks_first_seq,
- prev_pkt, call->acks_prev_seq);
-- return;
-+ goto send_response;
- }
-
- info.rxMTU = 0;
-@@ -897,7 +886,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
- case RXRPC_CALL_SERVER_AWAIT_ACK:
- break;
- default:
-- return;
-+ goto send_response;
- }
-
- if (before(hard_ack, call->acks_hard_ack) ||
-@@ -909,7 +898,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
- if (after(hard_ack, call->acks_hard_ack)) {
- if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
- rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
-- return;
-+ goto send_response;
- }
- }
-
-@@ -927,6 +916,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
- rxrpc_propose_ack_ping_for_lost_reply);
-
- rxrpc_congestion_management(call, skb, &summary, acked_serial);
-+
-+send_response:
-+ if (ack.reason == RXRPC_ACK_PING)
-+ rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
-+ rxrpc_propose_ack_respond_to_ping);
-+ else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
-+ rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
-+ rxrpc_propose_ack_respond_to_ack);
- }
-
- /*
-diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
-index 7d910aee4f8cb..c553a30e9c838 100644
---- a/net/rxrpc/local_object.c
-+++ b/net/rxrpc/local_object.c
-@@ -87,7 +87,7 @@ static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
- struct rxrpc_local *local =
- container_of(timer, struct rxrpc_local, client_conn_reap_timer);
-
-- if (local->kill_all_client_conns &&
-+ if (!local->kill_all_client_conns &&
- test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
- rxrpc_wake_up_io_thread(local);
- }
-diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
-index fb52d6f9aff93..6dcc4585576e8 100644
---- a/net/sched/act_ct.c
-+++ b/net/sched/act_ct.c
-@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
- entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
- }
-
-+static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
-+{
-+ struct nf_conn_act_ct_ext *act_ct_ext;
-+
-+ act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
-+ if (act_ct_ext) {
-+ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
-+ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
-+ }
-+}
-+
- static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
- struct nf_conn *ct,
- bool tcp, bool bidirectional)
-@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
- else
- ctinfo = IP_CT_ESTABLISHED_REPLY;
-
-+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
-+ tcf_ct_flow_ct_ext_ifidx_update(flow);
- flow_offload_refresh(nf_ft, flow, force_refresh);
- if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
- /* Process this flow in SW to allow promoting to ASSURED */
-@@ -1030,7 +1043,7 @@ do_nat:
- tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
-
- if (!nf_ct_is_confirmed(ct))
-- nf_conn_act_ct_ext_add(ct);
-+ nf_conn_act_ct_ext_add(skb, ct, ctinfo);
-
- /* This will take care of sending queued events
- * even if the connection is already confirmed.
-@@ -1522,6 +1535,9 @@ static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
- if (bind) {
- struct flow_action_entry *entry = entry_data;
-
-+ if (tcf_ct_helper(act))
-+ return -EOPNOTSUPP;
-+
- entry->id = FLOW_ACTION_CT;
- entry->ct.action = tcf_ct_action(act);
- entry->ct.zone = tcf_ct_zone(act);
-diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
-index 35ddebae88941..741339ac94833 100644
---- a/net/smc/af_smc.c
-+++ b/net/smc/af_smc.c
-@@ -275,7 +275,7 @@ static int __smc_release(struct smc_sock *smc)
-
- if (!smc->use_fallback) {
- rc = smc_close_active(smc);
-- sock_set_flag(sk, SOCK_DEAD);
-+ smc_sock_set_flag(sk, SOCK_DEAD);
- sk->sk_shutdown |= SHUTDOWN_MASK;
- } else {
- if (sk->sk_state != SMC_CLOSED) {
-@@ -598,8 +598,12 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
- struct smc_llc_qentry *qentry;
- int rc;
-
-- /* receive CONFIRM LINK request from server over RoCE fabric */
-- qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
-+ /* Receive CONFIRM LINK request from server over RoCE fabric.
-+ * Increasing the client's timeout by twice as much as the server's
-+ * timeout by default can temporarily avoid decline messages of
-+ * both sides crossing or colliding
-+ */
-+ qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
- SMC_LLC_CONFIRM_LINK);
- if (!qentry) {
- struct smc_clc_msg_decline dclc;
-@@ -1743,7 +1747,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
- if (new_clcsock)
- sock_release(new_clcsock);
- new_sk->sk_state = SMC_CLOSED;
-- sock_set_flag(new_sk, SOCK_DEAD);
-+ smc_sock_set_flag(new_sk, SOCK_DEAD);
- sock_put(new_sk); /* final */
- *new_smc = NULL;
- goto out;
-diff --git a/net/smc/smc.h b/net/smc/smc.h
-index 24745fde4ac26..e377980b84145 100644
---- a/net/smc/smc.h
-+++ b/net/smc/smc.h
-@@ -377,4 +377,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
- int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
- int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
-
-+static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
-+{
-+ set_bit(flag, &sk->sk_flags);
-+}
-+
- #endif /* __SMC_H */
-diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
-index 89105e95b4523..3c06625ceb200 100644
---- a/net/smc/smc_cdc.c
-+++ b/net/smc/smc_cdc.c
-@@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
- {
- struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
- struct smc_connection *conn = cdcpend->conn;
-+ struct smc_buf_desc *sndbuf_desc;
- struct smc_sock *smc;
- int diff;
-
-+ sndbuf_desc = conn->sndbuf_desc;
- smc = container_of(conn, struct smc_sock, conn);
- bh_lock_sock(&smc->sk);
-- if (!wc_status) {
-- diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
-+ if (!wc_status && sndbuf_desc) {
-+ diff = smc_curs_diff(sndbuf_desc->len,
- &cdcpend->conn->tx_curs_fin,
- &cdcpend->cursor);
- /* sndbuf_space is decreased in smc_sendmsg */
-@@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn,
- union smc_host_cursor cfed;
- int rc;
-
-- if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
-- return -ENOBUFS;
--
- smc_cdc_add_pending_send(conn, pend);
-
- conn->tx_cdc_seq++;
-@@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
- smc->sk.sk_shutdown |= RCV_SHUTDOWN;
- if (smc->clcsock && smc->clcsock->sk)
- smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
-- sock_set_flag(&smc->sk, SOCK_DONE);
-+ smc_sock_set_flag(&smc->sk, SOCK_DONE);
- sock_hold(&smc->sk); /* sock_put in close_work */
- if (!queue_work(smc_close_wq, &conn->close_work))
- sock_put(&smc->sk);
-diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
-index dbdf03e8aa5b5..10219f55aad14 100644
---- a/net/smc/smc_close.c
-+++ b/net/smc/smc_close.c
-@@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc)
- struct sock *sk = &smc->sk;
-
- release_sock(sk);
-- cancel_work_sync(&smc->conn.close_work);
-+ if (cancel_work_sync(&smc->conn.close_work))
-+ sock_put(sk);
- cancel_delayed_work_sync(&smc->conn.tx_work);
- lock_sock(sk);
- }
-@@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc)
- break;
- }
-
-- sock_set_flag(sk, SOCK_DEAD);
-+ smc_sock_set_flag(sk, SOCK_DEAD);
- sk->sk_state_change(sk);
-
- if (release_clcsock) {
-diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index 9c210273d06b7..339dfc5b92246 100644
---- a/net/sunrpc/clnt.c
-+++ b/net/sunrpc/clnt.c
-@@ -111,7 +111,8 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
-
- pipefs_sb = rpc_get_sb_net(net);
- if (pipefs_sb) {
-- __rpc_clnt_remove_pipedir(clnt);
-+ if (pipefs_sb == clnt->pipefs_sb)
-+ __rpc_clnt_remove_pipedir(clnt);
- rpc_put_sb_net(net);
- }
- }
-@@ -151,6 +152,8 @@ rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
- {
- struct dentry *dentry;
-
-+ clnt->pipefs_sb = pipefs_sb;
-+
- if (clnt->cl_program->pipe_dir_name != NULL) {
- dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
- if (IS_ERR(dentry))
-@@ -2171,6 +2174,7 @@ call_connect_status(struct rpc_task *task)
- task->tk_status = 0;
- switch (status) {
- case -ECONNREFUSED:
-+ case -ECONNRESET:
- /* A positive refusal suggests a rebind is needed. */
- if (RPC_IS_SOFTCONN(task))
- break;
-@@ -2179,7 +2183,6 @@ call_connect_status(struct rpc_task *task)
- goto out_retry;
- }
- fallthrough;
-- case -ECONNRESET:
- case -ECONNABORTED:
- case -ENETDOWN:
- case -ENETUNREACH:
-diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
-index 5988a5c5ff3f0..102c3818bc54d 100644
---- a/net/sunrpc/rpcb_clnt.c
-+++ b/net/sunrpc/rpcb_clnt.c
-@@ -769,6 +769,10 @@ void rpcb_getport_async(struct rpc_task *task)
-
- child = rpcb_call_async(rpcb_clnt, map, proc);
- rpc_release_client(rpcb_clnt);
-+ if (IS_ERR(child)) {
-+ /* rpcb_map_release() has freed the arguments */
-+ return;
-+ }
-
- xprt->stat.bind_count++;
- rpc_put_task(child);
-diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
-index 85c8bcaebb80f..3b05f90a3e50d 100644
---- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
-@@ -852,7 +852,8 @@ out_readfail:
- if (ret == -EINVAL)
- svc_rdma_send_error(rdma_xprt, ctxt, ret);
- svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
-- return ret;
-+ svc_xprt_deferred_close(xprt);
-+ return -ENOTCONN;
-
- out_backchannel:
- svc_rdma_handle_bc_reply(rqstp, ctxt);
-diff --git a/net/tipc/link.c b/net/tipc/link.c
-index e33b4f29f77cf..d0143823658d5 100644
---- a/net/tipc/link.c
-+++ b/net/tipc/link.c
-@@ -1446,7 +1446,7 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
- p = (struct tipc_gap_ack_blks *)msg_data(hdr);
- sz = ntohs(p->len);
- /* Sanity check */
-- if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
-+ if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
- /* Good, check if the desired type exists */
- if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
- goto ok;
-@@ -1533,7 +1533,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
- __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
-
- /* Total len */
-- len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
-+ len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
- ga->len = htons(len);
- return len;
- }
-diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
-index e8fd257c0e688..1a9a5bdaccf4f 100644
---- a/net/tipc/netlink.c
-+++ b/net/tipc/netlink.c
-@@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
-
- const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
- [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
-- [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING,
-+ [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING,
- .len = TIPC_MAX_LINK_NAME },
- [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
- [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
-@@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
-
- const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
- [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
-- [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING,
-+ [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING,
- .len = TIPC_MAX_BEARER_NAME },
- [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
- [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
-diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
-index 5bc076f2fa74a..c763008a8adba 100644
---- a/net/tipc/netlink_compat.c
-+++ b/net/tipc/netlink_compat.c
-@@ -102,6 +102,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
- return -EMSGSIZE;
-
- skb_put(skb, TLV_SPACE(len));
-+ memset(tlv, 0, TLV_SPACE(len));
- tlv->tlv_type = htons(type);
- tlv->tlv_len = htons(TLV_LENGTH(len));
- if (len && data)
-diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
-index e9d1e83a859d1..779815b885e94 100644
---- a/net/tls/tls_sw.c
-+++ b/net/tls/tls_sw.c
-@@ -1232,11 +1232,14 @@ void tls_sw_splice_eof(struct socket *sock)
- lock_sock(sk);
-
- retry:
-+ /* same checks as in tls_sw_push_pending_record() */
- rec = ctx->open_rec;
- if (!rec)
- goto unlock;
-
- msg_pl = &rec->msg_plaintext;
-+ if (msg_pl->sg.size == 0)
-+ goto unlock;
-
- /* Check the BPF advisor and perform transmission. */
- ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
-@@ -1491,7 +1494,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
- */
- aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
- aead_size = ALIGN(aead_size, __alignof__(*dctx));
-- mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
-+ mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
- sk->sk_allocation);
- if (!mem) {
- err = -ENOMEM;
-diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
-index 3e8a04a136688..1e1a88bd4e688 100644
---- a/net/unix/af_unix.c
-+++ b/net/unix/af_unix.c
-@@ -212,8 +212,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
- }
- #endif /* CONFIG_SECURITY_NETWORK */
-
--#define unix_peer(sk) (unix_sk(sk)->peer)
--
- static inline int unix_our_peer(struct sock *sk, struct sock *osk)
- {
- return unix_peer(osk) == sk;
-@@ -2553,15 +2551,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
-
- if (!(state->flags & MSG_PEEK))
- WRITE_ONCE(u->oob_skb, NULL);
--
-+ else
-+ skb_get(oob_skb);
- unix_state_unlock(sk);
-
- chunk = state->recv_actor(oob_skb, 0, chunk, state);
-
-- if (!(state->flags & MSG_PEEK)) {
-+ if (!(state->flags & MSG_PEEK))
- UNIXCB(oob_skb).consumed += 1;
-- kfree_skb(oob_skb);
-- }
-+
-+ consume_skb(oob_skb);
-
- mutex_unlock(&u->iolock);
-
-diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
-index 2f9d8271c6ec7..7ea7c3a0d0d06 100644
---- a/net/unix/unix_bpf.c
-+++ b/net/unix/unix_bpf.c
-@@ -159,12 +159,17 @@ int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool re
-
- int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
- {
-+ struct sock *sk_pair;
-+
- if (restore) {
- sk->sk_write_space = psock->saved_write_space;
- sock_replace_proto(sk, psock->sk_proto);
- return 0;
- }
-
-+ sk_pair = unix_peer(sk);
-+ sock_hold(sk_pair);
-+ psock->sk_pair = sk_pair;
- unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
- sock_replace_proto(sk, &unix_stream_bpf_prot);
- return 0;
-diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
-index 020cf17ab7e47..ccd8cefeea7ba 100644
---- a/net/vmw_vsock/af_vsock.c
-+++ b/net/vmw_vsock/af_vsock.c
-@@ -89,6 +89,7 @@
- #include <linux/types.h>
- #include <linux/bitops.h>
- #include <linux/cred.h>
-+#include <linux/errqueue.h>
- #include <linux/init.h>
- #include <linux/io.h>
- #include <linux/kernel.h>
-@@ -110,6 +111,7 @@
- #include <linux/workqueue.h>
- #include <net/sock.h>
- #include <net/af_vsock.h>
-+#include <uapi/linux/vm_sockets.h>
-
- static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
- static void vsock_sk_destruct(struct sock *sk);
-@@ -2134,6 +2136,10 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int err;
-
- sk = sock->sk;
-+
-+ if (unlikely(flags & MSG_ERRQUEUE))
-+ return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
-+
- vsk = vsock_sk(sk);
- err = 0;
-
-diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
-index 352d042b130b5..8bc272b6003bb 100644
---- a/net/vmw_vsock/virtio_transport_common.c
-+++ b/net/vmw_vsock/virtio_transport_common.c
-@@ -68,6 +68,8 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
- hdr->dst_port = cpu_to_le32(dst_port);
- hdr->flags = cpu_to_le32(info->flags);
- hdr->len = cpu_to_le32(len);
-+ hdr->buf_alloc = cpu_to_le32(0);
-+ hdr->fwd_cnt = cpu_to_le32(0);
-
- if (info->msg && len > 0) {
- payload = skb_put(skb, len);
-@@ -1204,11 +1206,17 @@ virtio_transport_recv_connected(struct sock *sk,
- vsk->peer_shutdown |= RCV_SHUTDOWN;
- if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
- vsk->peer_shutdown |= SEND_SHUTDOWN;
-- if (vsk->peer_shutdown == SHUTDOWN_MASK &&
-- vsock_stream_has_data(vsk) <= 0 &&
-- !sock_flag(sk, SOCK_DONE)) {
-- (void)virtio_transport_reset(vsk, NULL);
-- virtio_transport_do_close(vsk, true);
-+ if (vsk->peer_shutdown == SHUTDOWN_MASK) {
-+ if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
-+ (void)virtio_transport_reset(vsk, NULL);
-+ virtio_transport_do_close(vsk, true);
-+ }
-+ /* Remove this socket anyway because the remote peer sent
-+ * the shutdown. This way a new connection will succeed
-+ * if the remote peer uses the same source port,
-+ * even if the old socket is still unreleased, but now disconnected.
-+ */
-+ vsock_remove_sock(vsk);
- }
- if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
- sk->sk_state_change(sk);
-diff --git a/net/wireless/core.c b/net/wireless/core.c
-index acec41c1809a8..563cfbe3237c9 100644
---- a/net/wireless/core.c
-+++ b/net/wireless/core.c
-@@ -1049,7 +1049,8 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
- }
- EXPORT_SYMBOL(wiphy_rfkill_start_polling);
-
--void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
-+void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
-+ struct wiphy_work *end)
- {
- unsigned int runaway_limit = 100;
- unsigned long flags;
-@@ -1068,6 +1069,10 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev)
- wk->func(&rdev->wiphy, wk);
-
- spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
-+
-+ if (wk == end)
-+ break;
-+
- if (WARN_ON(--runaway_limit == 0))
- INIT_LIST_HEAD(&rdev->wiphy_work_list);
- }
-@@ -1118,7 +1123,7 @@ void wiphy_unregister(struct wiphy *wiphy)
- #endif
-
- /* surely nothing is reachable now, clean up work */
-- cfg80211_process_wiphy_works(rdev);
-+ cfg80211_process_wiphy_works(rdev, NULL);
- wiphy_unlock(&rdev->wiphy);
- rtnl_unlock();
-
-@@ -1640,6 +1645,21 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work)
- }
- EXPORT_SYMBOL_GPL(wiphy_work_cancel);
-
-+void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work)
-+{
-+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
-+ unsigned long flags;
-+ bool run;
-+
-+ spin_lock_irqsave(&rdev->wiphy_work_lock, flags);
-+ run = !work || !list_empty(&work->entry);
-+ spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
-+
-+ if (run)
-+ cfg80211_process_wiphy_works(rdev, work);
-+}
-+EXPORT_SYMBOL_GPL(wiphy_work_flush);
-+
- void wiphy_delayed_work_timer(struct timer_list *t)
- {
- struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer);
-@@ -1672,6 +1692,16 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
- }
- EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel);
-
-+void wiphy_delayed_work_flush(struct wiphy *wiphy,
-+ struct wiphy_delayed_work *dwork)
-+{
-+ lockdep_assert_held(&wiphy->mtx);
-+
-+ del_timer_sync(&dwork->timer);
-+ wiphy_work_flush(wiphy, &dwork->work);
-+}
-+EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
-+
- static int __init cfg80211_init(void)
- {
- int err;
-diff --git a/net/wireless/core.h b/net/wireless/core.h
-index ba9c7170afa44..e536c0b615a09 100644
---- a/net/wireless/core.h
-+++ b/net/wireless/core.h
-@@ -464,7 +464,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
- struct net_device *dev, enum nl80211_iftype ntype,
- struct vif_params *params);
- void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
--void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev);
-+void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev,
-+ struct wiphy_work *end);
- void cfg80211_process_wdev_events(struct wireless_dev *wdev);
-
- bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
-diff --git a/net/wireless/scan.c b/net/wireless/scan.c
-index 8210a6090ac16..e4cc6209c7b9b 100644
---- a/net/wireless/scan.c
-+++ b/net/wireless/scan.c
-@@ -2358,8 +2358,8 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
-
- /* elem might be invalid after the memmove */
- next = (void *)(elem->data + elem->datalen);
--
- elem_datalen = elem->datalen;
-+
- if (elem->id == WLAN_EID_EXTENSION) {
- copied = elem->datalen - 1;
- if (copied > data_len)
-@@ -2380,7 +2380,7 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
-
- for (elem = next;
- elem->data < ies + ieslen &&
-- elem->data + elem->datalen < ies + ieslen;
-+ elem->data + elem->datalen <= ies + ieslen;
- elem = next) {
- /* elem might be invalid after the memmove */
- next = (void *)(elem->data + elem->datalen);
-diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
-index c629bac3f2983..565511a3f461e 100644
---- a/net/wireless/sysfs.c
-+++ b/net/wireless/sysfs.c
-@@ -105,14 +105,14 @@ static int wiphy_suspend(struct device *dev)
- cfg80211_leave_all(rdev);
- cfg80211_process_rdev_events(rdev);
- }
-- cfg80211_process_wiphy_works(rdev);
-+ cfg80211_process_wiphy_works(rdev, NULL);
- if (rdev->ops->suspend)
- ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
- if (ret == 1) {
- /* Driver refuse to configure wowlan */
- cfg80211_leave_all(rdev);
- cfg80211_process_rdev_events(rdev);
-- cfg80211_process_wiphy_works(rdev);
-+ cfg80211_process_wiphy_works(rdev, NULL);
- ret = rdev_suspend(rdev, NULL);
- }
- if (ret == 0)
-diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
-index 7a788bb837fc1..7a09ac74fac07 100644
---- a/samples/bpf/syscall_tp_user.c
-+++ b/samples/bpf/syscall_tp_user.c
-@@ -17,9 +17,9 @@
-
- static void usage(const char *cmd)
- {
-- printf("USAGE: %s [-i num_progs] [-h]\n", cmd);
-- printf(" -i num_progs # number of progs of the test\n");
-- printf(" -h # help\n");
-+ printf("USAGE: %s [-i nr_tests] [-h]\n", cmd);
-+ printf(" -i nr_tests # rounds of test to run\n");
-+ printf(" -h # help\n");
- }
-
- static void verify_map(int map_id)
-@@ -45,14 +45,14 @@ static void verify_map(int map_id)
- }
- }
-
--static int test(char *filename, int num_progs)
-+static int test(char *filename, int nr_tests)
- {
-- int map0_fds[num_progs], map1_fds[num_progs], fd, i, j = 0;
-- struct bpf_link *links[num_progs * 4];
-- struct bpf_object *objs[num_progs];
-+ int map0_fds[nr_tests], map1_fds[nr_tests], fd, i, j = 0;
-+ struct bpf_link **links = NULL;
-+ struct bpf_object *objs[nr_tests];
- struct bpf_program *prog;
-
-- for (i = 0; i < num_progs; i++) {
-+ for (i = 0; i < nr_tests; i++) {
- objs[i] = bpf_object__open_file(filename, NULL);
- if (libbpf_get_error(objs[i])) {
- fprintf(stderr, "opening BPF object file failed\n");
-@@ -60,6 +60,19 @@ static int test(char *filename, int num_progs)
- goto cleanup;
- }
-
-+ /* One-time initialization */
-+ if (!links) {
-+ int nr_progs = 0;
-+
-+ bpf_object__for_each_program(prog, objs[i])
-+ nr_progs += 1;
-+
-+ links = calloc(nr_progs * nr_tests, sizeof(struct bpf_link *));
-+
-+ if (!links)
-+ goto cleanup;
-+ }
-+
- /* load BPF program */
- if (bpf_object__load(objs[i])) {
- fprintf(stderr, "loading BPF object file failed\n");
-@@ -101,14 +114,18 @@ static int test(char *filename, int num_progs)
- close(fd);
-
- /* verify the map */
-- for (i = 0; i < num_progs; i++) {
-+ for (i = 0; i < nr_tests; i++) {
- verify_map(map0_fds[i]);
- verify_map(map1_fds[i]);
- }
-
- cleanup:
-- for (j--; j >= 0; j--)
-- bpf_link__destroy(links[j]);
-+ if (links) {
-+ for (j--; j >= 0; j--)
-+ bpf_link__destroy(links[j]);
-+
-+ free(links);
-+ }
-
- for (i--; i >= 0; i--)
- bpf_object__close(objs[i]);
-@@ -117,13 +134,13 @@ cleanup:
-
- int main(int argc, char **argv)
- {
-- int opt, num_progs = 1;
-+ int opt, nr_tests = 1;
- char filename[256];
-
- while ((opt = getopt(argc, argv, "i:h")) != -1) {
- switch (opt) {
- case 'i':
-- num_progs = atoi(optarg);
-+ nr_tests = atoi(optarg);
- break;
- case 'h':
- default:
-@@ -134,5 +151,5 @@ int main(int argc, char **argv)
-
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
-
-- return test(filename, num_progs);
-+ return test(filename, nr_tests);
- }
-diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
-index 3cd6ca15f390d..c9f3e03124d7f 100644
---- a/scripts/Makefile.vmlinux
-+++ b/scripts/Makefile.vmlinux
-@@ -19,6 +19,7 @@ quiet_cmd_cc_o_c = CC $@
-
- ifdef CONFIG_MODULES
- KASAN_SANITIZE_.vmlinux.export.o := n
-+KCSAN_SANITIZE_.vmlinux.export.o := n
- GCOV_PROFILE_.vmlinux.export.o := n
- targets += .vmlinux.export.o
- vmlinux: .vmlinux.export.o
-diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
-index 0edfdb40364b8..25b3b587d37c0 100644
---- a/scripts/Makefile.vmlinux_o
-+++ b/scripts/Makefile.vmlinux_o
-@@ -37,7 +37,8 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
-
- vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y)
- vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
--vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret)
-+vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
-+ $(if $(or $(CONFIG_CPU_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret)
-
- objtool-args = $(vmlinux-objtool-args-y) --link
-
-diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
-index 951b74ba1b242..910bd21d08f48 100644
---- a/scripts/gcc-plugins/randomize_layout_plugin.c
-+++ b/scripts/gcc-plugins/randomize_layout_plugin.c
-@@ -191,12 +191,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
-
- static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
- {
-- unsigned long i, x;
-+ unsigned long i, x, index;
- struct partition_group size_group[length];
- unsigned long num_groups = 0;
- unsigned long randnum;
-
- partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
-+
-+ /* FIXME: this group shuffle is currently a no-op. */
- for (i = num_groups - 1; i > 0; i--) {
- struct partition_group tmp;
- randnum = ranval(prng_state) % (i + 1);
-@@ -206,11 +208,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
- }
-
- for (x = 0; x < num_groups; x++) {
-- for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
-+ for (index = size_group[x].length - 1; index > 0; index--) {
- tree tmp;
-+
-+ i = size_group[x].start + index;
- if (DECL_BIT_FIELD_TYPE(newtree[i]))
- continue;
-- randnum = ranval(prng_state) % (i + 1);
-+ randnum = ranval(prng_state) % (index + 1);
-+ randnum += size_group[x].start;
- // we could handle this case differently if desired
- if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
- continue;
-@@ -273,8 +278,6 @@ static bool is_flexible_array(const_tree field)
- {
- const_tree fieldtype;
- const_tree typesize;
-- const_tree elemtype;
-- const_tree elemsize;
-
- fieldtype = TREE_TYPE(field);
- typesize = TYPE_SIZE(fieldtype);
-@@ -282,20 +285,12 @@ static bool is_flexible_array(const_tree field)
- if (TREE_CODE(fieldtype) != ARRAY_TYPE)
- return false;
-
-- elemtype = TREE_TYPE(fieldtype);
-- elemsize = TYPE_SIZE(elemtype);
--
- /* size of type is represented in bits */
-
- if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE &&
- TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE)
- return true;
-
-- if (typesize != NULL_TREE &&
-- (TREE_CONSTANT(typesize) && (!tree_to_uhwi(typesize) ||
-- tree_to_uhwi(typesize) == tree_to_uhwi(elemsize))))
-- return true;
--
- return false;
- }
-
-diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
-index e3517d4ab8ec9..e810e0c27ff18 100644
---- a/scripts/gdb/linux/constants.py.in
-+++ b/scripts/gdb/linux/constants.py.in
-@@ -66,10 +66,11 @@ LX_GDBPARSED(IRQD_LEVEL)
- LX_GDBPARSED(IRQ_HIDDEN)
-
- /* linux/module.h */
--LX_GDBPARSED(MOD_TEXT)
--LX_GDBPARSED(MOD_DATA)
--LX_GDBPARSED(MOD_RODATA)
--LX_GDBPARSED(MOD_RO_AFTER_INIT)
-+if IS_BUILTIN(CONFIG_MODULES):
-+ LX_GDBPARSED(MOD_TEXT)
-+ LX_GDBPARSED(MOD_DATA)
-+ LX_GDBPARSED(MOD_RODATA)
-+ LX_GDBPARSED(MOD_RO_AFTER_INIT)
-
- /* linux/mount.h */
- LX_VALUE(MNT_NOSUID)
-@@ -157,3 +158,4 @@ LX_CONFIG(CONFIG_STACKDEPOT)
- LX_CONFIG(CONFIG_PAGE_OWNER)
- LX_CONFIG(CONFIG_SLUB_DEBUG)
- LX_CONFIG(CONFIG_SLAB_FREELIST_HARDENED)
-+LX_CONFIG(CONFIG_MMU)
-diff --git a/scripts/gdb/linux/vmalloc.py b/scripts/gdb/linux/vmalloc.py
-index 48e4a4fae7bbf..d3c8a0274d1ed 100644
---- a/scripts/gdb/linux/vmalloc.py
-+++ b/scripts/gdb/linux/vmalloc.py
-@@ -10,8 +10,9 @@ import gdb
- import re
- from linux import lists, utils, stackdepot, constants, mm
-
--vmap_area_type = utils.CachedType('struct vmap_area')
--vmap_area_ptr_type = vmap_area_type.get_type().pointer()
-+if constants.LX_CONFIG_MMU:
-+ vmap_area_type = utils.CachedType('struct vmap_area')
-+ vmap_area_ptr_type = vmap_area_type.get_type().pointer()
-
- def is_vmalloc_addr(x):
- pg_ops = mm.page_ops().ops
-@@ -25,6 +26,9 @@ class LxVmallocInfo(gdb.Command):
- super(LxVmallocInfo, self).__init__("lx-vmallocinfo", gdb.COMMAND_DATA)
-
- def invoke(self, arg, from_tty):
-+ if not constants.LX_CONFIG_MMU:
-+ raise gdb.GdbError("Requires MMU support")
-+
- vmap_area_list = gdb.parse_and_eval('vmap_area_list')
- for vmap_area in lists.list_for_each_entry(vmap_area_list, vmap_area_ptr_type, "list"):
- if not vmap_area['vm']:
-diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
-index 7056751c29b1f..6583b36dbe694 100644
---- a/scripts/mod/file2alias.c
-+++ b/scripts/mod/file2alias.c
-@@ -1348,13 +1348,13 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
- /* Looks like: tee:uuid */
- static int do_tee_entry(const char *filename, void *symval, char *alias)
- {
-- DEF_FIELD(symval, tee_client_device_id, uuid);
-+ DEF_FIELD_ADDR(symval, tee_client_device_id, uuid);
-
- sprintf(alias, "tee:%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
-- uuid.b[0], uuid.b[1], uuid.b[2], uuid.b[3], uuid.b[4],
-- uuid.b[5], uuid.b[6], uuid.b[7], uuid.b[8], uuid.b[9],
-- uuid.b[10], uuid.b[11], uuid.b[12], uuid.b[13], uuid.b[14],
-- uuid.b[15]);
-+ uuid->b[0], uuid->b[1], uuid->b[2], uuid->b[3], uuid->b[4],
-+ uuid->b[5], uuid->b[6], uuid->b[7], uuid->b[8], uuid->b[9],
-+ uuid->b[10], uuid->b[11], uuid->b[12], uuid->b[13], uuid->b[14],
-+ uuid->b[15]);
-
- add_wildcard(alias);
- return 1;
-@@ -1401,10 +1401,10 @@ static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
- /* Looks like: ishtp:{guid} */
- static int do_ishtp_entry(const char *filename, void *symval, char *alias)
- {
-- DEF_FIELD(symval, ishtp_device_id, guid);
-+ DEF_FIELD_ADDR(symval, ishtp_device_id, guid);
-
- strcpy(alias, ISHTP_MODULE_PREFIX "{");
-- add_guid(alias, guid);
-+ add_guid(alias, *guid);
- strcat(alias, "}");
-
- return 1;
-diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
-index bd6a910f65282..261cef4c622fb 100644
---- a/security/apparmor/apparmorfs.c
-+++ b/security/apparmor/apparmorfs.c
-@@ -423,7 +423,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
- /* high level check about policy management - fine grained in
- * below after unpack
- */
-- error = aa_may_manage_policy(label, ns, mask);
-+ error = aa_may_manage_policy(current_cred(), label, ns, mask);
- if (error)
- goto end_section;
-
-@@ -486,7 +486,8 @@ static ssize_t profile_remove(struct file *f, const char __user *buf,
- /* high level check about policy management - fine grained in
- * below after unpack
- */
-- error = aa_may_manage_policy(label, ns, AA_MAY_REMOVE_POLICY);
-+ error = aa_may_manage_policy(current_cred(), label, ns,
-+ AA_MAY_REMOVE_POLICY);
- if (error)
- goto out;
-
-@@ -1805,7 +1806,8 @@ static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
- int error;
-
- label = begin_current_label_crit_section();
-- error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
-+ error = aa_may_manage_policy(current_cred(), label, NULL,
-+ AA_MAY_LOAD_POLICY);
- end_current_label_crit_section(label);
- if (error)
- return error;
-@@ -1854,7 +1856,8 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
- int error;
-
- label = begin_current_label_crit_section();
-- error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
-+ error = aa_may_manage_policy(current_cred(), label, NULL,
-+ AA_MAY_LOAD_POLICY);
- end_current_label_crit_section(label);
- if (error)
- return error;
-diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
-index 5a7978aa4b19e..6933cb2f679b0 100644
---- a/security/apparmor/audit.c
-+++ b/security/apparmor/audit.c
-@@ -85,37 +85,36 @@ static const char *const aa_class_names[] = {
- /**
- * audit_pre() - core AppArmor function.
- * @ab: audit buffer to fill (NOT NULL)
-- * @ca: audit structure containing data to audit (NOT NULL)
-+ * @va: audit structure containing data to audit (NOT NULL)
- *
-- * Record common AppArmor audit data from @sa
-+ * Record common AppArmor audit data from @va
- */
--static void audit_pre(struct audit_buffer *ab, void *ca)
-+static void audit_pre(struct audit_buffer *ab, void *va)
- {
-- struct common_audit_data *sa = ca;
-+ struct apparmor_audit_data *ad = aad_of_va(va);
-
- if (aa_g_audit_header) {
- audit_log_format(ab, "apparmor=\"%s\"",
-- aa_audit_type[aad(sa)->type]);
-+ aa_audit_type[ad->type]);
- }
-
-- if (aad(sa)->op) {
-- audit_log_format(ab, " operation=\"%s\"", aad(sa)->op);
-- }
-+ if (ad->op)
-+ audit_log_format(ab, " operation=\"%s\"", ad->op);
-
-- if (aad(sa)->class)
-+ if (ad->class)
- audit_log_format(ab, " class=\"%s\"",
-- aad(sa)->class <= AA_CLASS_LAST ?
-- aa_class_names[aad(sa)->class] :
-+ ad->class <= AA_CLASS_LAST ?
-+ aa_class_names[ad->class] :
- "unknown");
-
-- if (aad(sa)->info) {
-- audit_log_format(ab, " info=\"%s\"", aad(sa)->info);
-- if (aad(sa)->error)
-- audit_log_format(ab, " error=%d", aad(sa)->error);
-+ if (ad->info) {
-+ audit_log_format(ab, " info=\"%s\"", ad->info);
-+ if (ad->error)
-+ audit_log_format(ab, " error=%d", ad->error);
- }
-
-- if (aad(sa)->label) {
-- struct aa_label *label = aad(sa)->label;
-+ if (ad->subj_label) {
-+ struct aa_label *label = ad->subj_label;
-
- if (label_isprofile(label)) {
- struct aa_profile *profile = labels_profile(label);
-@@ -134,42 +133,44 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
- }
- }
-
-- if (aad(sa)->name) {
-+ if (ad->name) {
- audit_log_format(ab, " name=");
-- audit_log_untrustedstring(ab, aad(sa)->name);
-+ audit_log_untrustedstring(ab, ad->name);
- }
- }
-
- /**
- * aa_audit_msg - Log a message to the audit subsystem
-- * @sa: audit event structure (NOT NULL)
-+ * @type: audit type for the message
-+ * @ad: audit event structure (NOT NULL)
- * @cb: optional callback fn for type specific fields (MAYBE NULL)
- */
--void aa_audit_msg(int type, struct common_audit_data *sa,
-+void aa_audit_msg(int type, struct apparmor_audit_data *ad,
- void (*cb) (struct audit_buffer *, void *))
- {
-- aad(sa)->type = type;
-- common_lsm_audit(sa, audit_pre, cb);
-+ ad->type = type;
-+ common_lsm_audit(&ad->common, audit_pre, cb);
- }
-
- /**
- * aa_audit - Log a profile based audit event to the audit subsystem
- * @type: audit type for the message
- * @profile: profile to check against (NOT NULL)
-- * @sa: audit event (NOT NULL)
-+ * @ad: audit event (NOT NULL)
- * @cb: optional callback fn for type specific fields (MAYBE NULL)
- *
- * Handle default message switching based off of audit mode flags
- *
- * Returns: error on failure
- */
--int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
-+int aa_audit(int type, struct aa_profile *profile,
-+ struct apparmor_audit_data *ad,
- void (*cb) (struct audit_buffer *, void *))
- {
- AA_BUG(!profile);
-
- if (type == AUDIT_APPARMOR_AUTO) {
-- if (likely(!aad(sa)->error)) {
-+ if (likely(!ad->error)) {
- if (AUDIT_MODE(profile) != AUDIT_ALL)
- return 0;
- type = AUDIT_APPARMOR_AUDIT;
-@@ -181,24 +182,24 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
- if (AUDIT_MODE(profile) == AUDIT_QUIET ||
- (type == AUDIT_APPARMOR_DENIED &&
- AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
-- return aad(sa)->error;
-+ return ad->error;
-
- if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
- type = AUDIT_APPARMOR_KILL;
-
-- aad(sa)->label = &profile->label;
-+ ad->subj_label = &profile->label;
-
-- aa_audit_msg(type, sa, cb);
-+ aa_audit_msg(type, ad, cb);
-
-- if (aad(sa)->type == AUDIT_APPARMOR_KILL)
-+ if (ad->type == AUDIT_APPARMOR_KILL)
- (void)send_sig_info(SIGKILL, NULL,
-- sa->type == LSM_AUDIT_DATA_TASK && sa->u.tsk ?
-- sa->u.tsk : current);
-+ ad->common.type == LSM_AUDIT_DATA_TASK &&
-+ ad->common.u.tsk ? ad->common.u.tsk : current);
-
-- if (aad(sa)->type == AUDIT_APPARMOR_ALLOWED)
-- return complain_error(aad(sa)->error);
-+ if (ad->type == AUDIT_APPARMOR_ALLOWED)
-+ return complain_error(ad->error);
-
-- return aad(sa)->error;
-+ return ad->error;
- }
-
- struct aa_audit_rule {
-diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
-index 326a51838ef28..2fb6a2ea0b998 100644
---- a/security/apparmor/capability.c
-+++ b/security/apparmor/capability.c
-@@ -51,7 +51,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
-
- /**
- * audit_caps - audit a capability
-- * @sa: audit data
-+ * @as: audit data
- * @profile: profile being tested for confinement (NOT NULL)
- * @cap: capability tested
- * @error: error code returned by test
-@@ -59,9 +59,9 @@ static void audit_cb(struct audit_buffer *ab, void *va)
- * Do auditing of capability and handle, audit/complain/kill modes switching
- * and duplicate message elimination.
- *
-- * Returns: 0 or sa->error on success, error code on failure
-+ * Returns: 0 or ad->error on success, error code on failure
- */
--static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
-+static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile,
- int cap, int error)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
-@@ -69,7 +69,7 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
- struct audit_cache *ent;
- int type = AUDIT_APPARMOR_AUTO;
-
-- aad(sa)->error = error;
-+ ad->error = error;
-
- if (likely(!error)) {
- /* test if auditing is being forced */
-@@ -101,7 +101,7 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
- }
- put_cpu_var(audit_cache);
-
-- return aa_audit(type, profile, sa, audit_cb);
-+ return aa_audit(type, profile, ad, audit_cb);
- }
-
- /**
-@@ -109,12 +109,12 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
- * @profile: profile being enforced (NOT NULL, NOT unconfined)
- * @cap: capability to test if allowed
- * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
-- * @sa: audit data (MAY BE NULL indicating no auditing)
-+ * @ad: audit data (MAY BE NULL indicating no auditing)
- *
- * Returns: 0 if allowed else -EPERM
- */
- static int profile_capable(struct aa_profile *profile, int cap,
-- unsigned int opts, struct common_audit_data *sa)
-+ unsigned int opts, struct apparmor_audit_data *ad)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
-@@ -132,14 +132,15 @@ static int profile_capable(struct aa_profile *profile, int cap,
- /* audit the cap request in complain mode but note that it
- * should be optional.
- */
-- aad(sa)->info = "optional: no audit";
-+ ad->info = "optional: no audit";
- }
-
-- return audit_caps(sa, profile, cap, error);
-+ return audit_caps(ad, profile, cap, error);
- }
-
- /**
- * aa_capable - test permission to use capability
-+ * @subj_cread: cred we are testing capability against
- * @label: label being tested for capability (NOT NULL)
- * @cap: capability to be tested
- * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
-@@ -148,15 +149,17 @@ static int profile_capable(struct aa_profile *profile, int cap,
- *
- * Returns: 0 on success, or else an error code.
- */
--int aa_capable(struct aa_label *label, int cap, unsigned int opts)
-+int aa_capable(const struct cred *subj_cred, struct aa_label *label,
-+ int cap, unsigned int opts)
- {
- struct aa_profile *profile;
- int error = 0;
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
-
-- sa.u.cap = cap;
-+ ad.subj_cred = subj_cred;
-+ ad.common.u.cap = cap;
- error = fn_for_each_confined(label, profile,
-- profile_capable(profile, cap, opts, &sa));
-+ profile_capable(profile, cap, opts, &ad));
-
- return error;
- }
-diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
-index f3715cda59c52..543105cf7e334 100644
---- a/security/apparmor/domain.c
-+++ b/security/apparmor/domain.c
-@@ -31,6 +31,7 @@
-
- /**
- * may_change_ptraced_domain - check if can change profile on ptraced task
-+ * @cred: cred of task changing domain
- * @to_label: profile to change to (NOT NULL)
- * @info: message if there is an error
- *
-@@ -39,28 +40,34 @@
- *
- * Returns: %0 or error if change not allowed
- */
--static int may_change_ptraced_domain(struct aa_label *to_label,
-+static int may_change_ptraced_domain(const struct cred *to_cred,
-+ struct aa_label *to_label,
- const char **info)
- {
- struct task_struct *tracer;
- struct aa_label *tracerl = NULL;
-+ const struct cred *tracer_cred = NULL;
-+
- int error = 0;
-
- rcu_read_lock();
- tracer = ptrace_parent(current);
-- if (tracer)
-+ if (tracer) {
- /* released below */
- tracerl = aa_get_task_label(tracer);
--
-+ tracer_cred = get_task_cred(tracer);
-+ }
- /* not ptraced */
- if (!tracer || unconfined(tracerl))
- goto out;
-
-- error = aa_may_ptrace(tracerl, to_label, PTRACE_MODE_ATTACH);
-+ error = aa_may_ptrace(tracer_cred, tracerl, to_cred, to_label,
-+ PTRACE_MODE_ATTACH);
-
- out:
- rcu_read_unlock();
- aa_put_label(tracerl);
-+ put_cred(tracer_cred);
-
- if (error)
- *info = "ptrace prevents transition";
-@@ -619,7 +626,8 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
- return new;
- }
-
--static struct aa_label *profile_transition(struct aa_profile *profile,
-+static struct aa_label *profile_transition(const struct cred *subj_cred,
-+ struct aa_profile *profile,
- const struct linux_binprm *bprm,
- char *buffer, struct path_cond *cond,
- bool *secure_exec)
-@@ -709,7 +717,8 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
- }
-
- audit:
-- aa_audit_file(profile, &perms, OP_EXEC, MAY_EXEC, name, target, new,
-+ aa_audit_file(subj_cred, profile, &perms, OP_EXEC, MAY_EXEC, name,
-+ target, new,
- cond->uid, info, error);
- if (!new || nonewprivs) {
- aa_put_label(new);
-@@ -719,7 +728,8 @@ audit:
- return new;
- }
-
--static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
-+static int profile_onexec(const struct cred *subj_cred,
-+ struct aa_profile *profile, struct aa_label *onexec,
- bool stack, const struct linux_binprm *bprm,
- char *buffer, struct path_cond *cond,
- bool *secure_exec)
-@@ -787,13 +797,15 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
- }
-
- audit:
-- return aa_audit_file(profile, &perms, OP_EXEC, AA_MAY_ONEXEC, xname,
-+ return aa_audit_file(subj_cred, profile, &perms, OP_EXEC,
-+ AA_MAY_ONEXEC, xname,
- NULL, onexec, cond->uid, info, error);
- }
-
- /* ensure none ns domain transitions are correctly applied with onexec */
-
--static struct aa_label *handle_onexec(struct aa_label *label,
-+static struct aa_label *handle_onexec(const struct cred *subj_cred,
-+ struct aa_label *label,
- struct aa_label *onexec, bool stack,
- const struct linux_binprm *bprm,
- char *buffer, struct path_cond *cond,
-@@ -810,26 +822,28 @@ static struct aa_label *handle_onexec(struct aa_label *label,
-
- if (!stack) {
- error = fn_for_each_in_ns(label, profile,
-- profile_onexec(profile, onexec, stack,
-+ profile_onexec(subj_cred, profile, onexec, stack,
- bprm, buffer, cond, unsafe));
- if (error)
- return ERR_PTR(error);
- new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
- aa_get_newest_label(onexec),
-- profile_transition(profile, bprm, buffer,
-+ profile_transition(subj_cred, profile, bprm,
-+ buffer,
- cond, unsafe));
-
- } else {
- /* TODO: determine how much we want to loosen this */
- error = fn_for_each_in_ns(label, profile,
-- profile_onexec(profile, onexec, stack, bprm,
-+ profile_onexec(subj_cred, profile, onexec, stack, bprm,
- buffer, cond, unsafe));
- if (error)
- return ERR_PTR(error);
- new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
- aa_label_merge(&profile->label, onexec,
- GFP_KERNEL),
-- profile_transition(profile, bprm, buffer,
-+ profile_transition(subj_cred, profile, bprm,
-+ buffer,
- cond, unsafe));
- }
-
-@@ -838,7 +852,8 @@ static struct aa_label *handle_onexec(struct aa_label *label,
-
- /* TODO: get rid of GLOBAL_ROOT_UID */
- error = fn_for_each_in_ns(label, profile,
-- aa_audit_file(profile, &nullperms, OP_CHANGE_ONEXEC,
-+ aa_audit_file(subj_cred, profile, &nullperms,
-+ OP_CHANGE_ONEXEC,
- AA_MAY_ONEXEC, bprm->filename, NULL,
- onexec, GLOBAL_ROOT_UID,
- "failed to build target label", -ENOMEM));
-@@ -857,6 +872,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
- {
- struct aa_task_ctx *ctx;
- struct aa_label *label, *new = NULL;
-+ const struct cred *subj_cred;
- struct aa_profile *profile;
- char *buffer = NULL;
- const char *info = NULL;
-@@ -869,6 +885,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
- file_inode(bprm->file)->i_mode
- };
-
-+ subj_cred = current_cred();
- ctx = task_ctx(current);
- AA_BUG(!cred_label(bprm->cred));
- AA_BUG(!ctx);
-@@ -895,11 +912,12 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
-
- /* Test for onexec first as onexec override other x transitions. */
- if (ctx->onexec)
-- new = handle_onexec(label, ctx->onexec, ctx->token,
-+ new = handle_onexec(subj_cred, label, ctx->onexec, ctx->token,
- bprm, buffer, &cond, &unsafe);
- else
- new = fn_label_build(label, profile, GFP_KERNEL,
-- profile_transition(profile, bprm, buffer,
-+ profile_transition(subj_cred, profile, bprm,
-+ buffer,
- &cond, &unsafe));
-
- AA_BUG(!new);
-@@ -934,7 +952,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
-
- if (bprm->unsafe & (LSM_UNSAFE_PTRACE)) {
- /* TODO: test needs to be profile of label to new */
-- error = may_change_ptraced_domain(new, &info);
-+ error = may_change_ptraced_domain(bprm->cred, new, &info);
- if (error)
- goto audit;
- }
-@@ -971,7 +989,8 @@ done:
-
- audit:
- error = fn_for_each(label, profile,
-- aa_audit_file(profile, &nullperms, OP_EXEC, MAY_EXEC,
-+ aa_audit_file(current_cred(), profile, &nullperms,
-+ OP_EXEC, MAY_EXEC,
- bprm->filename, NULL, new,
- vfsuid_into_kuid(vfsuid), info, error));
- aa_put_label(new);
-@@ -987,7 +1006,8 @@ audit:
- *
- * Returns: label for hat transition OR ERR_PTR. Does NOT return NULL
- */
--static struct aa_label *build_change_hat(struct aa_profile *profile,
-+static struct aa_label *build_change_hat(const struct cred *subj_cred,
-+ struct aa_profile *profile,
- const char *name, bool sibling)
- {
- struct aa_profile *root, *hat = NULL;
-@@ -1019,7 +1039,8 @@ static struct aa_label *build_change_hat(struct aa_profile *profile,
- aa_put_profile(root);
-
- audit:
-- aa_audit_file(profile, &nullperms, OP_CHANGE_HAT, AA_MAY_CHANGEHAT,
-+ aa_audit_file(subj_cred, profile, &nullperms, OP_CHANGE_HAT,
-+ AA_MAY_CHANGEHAT,
- name, hat ? hat->base.hname : NULL,
- hat ? &hat->label : NULL, GLOBAL_ROOT_UID, info,
- error);
-@@ -1035,7 +1056,8 @@ audit:
- *
- * Returns: label for hat transition or ERR_PTR. Does not return NULL
- */
--static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
-+static struct aa_label *change_hat(const struct cred *subj_cred,
-+ struct aa_label *label, const char *hats[],
- int count, int flags)
- {
- struct aa_profile *profile, *root, *hat = NULL;
-@@ -1111,7 +1133,8 @@ fail:
- */
- /* TODO: get rid of GLOBAL_ROOT_UID */
- if (count > 1 || COMPLAIN_MODE(profile)) {
-- aa_audit_file(profile, &nullperms, OP_CHANGE_HAT,
-+ aa_audit_file(subj_cred, profile, &nullperms,
-+ OP_CHANGE_HAT,
- AA_MAY_CHANGEHAT, name, NULL, NULL,
- GLOBAL_ROOT_UID, info, error);
- }
-@@ -1120,7 +1143,8 @@ fail:
-
- build:
- new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
-- build_change_hat(profile, name, sibling),
-+ build_change_hat(subj_cred, profile, name,
-+ sibling),
- aa_get_label(&profile->label));
- if (!new) {
- info = "label build failed";
-@@ -1150,7 +1174,7 @@ build:
- */
- int aa_change_hat(const char *hats[], int count, u64 token, int flags)
- {
-- const struct cred *cred;
-+ const struct cred *subj_cred;
- struct aa_task_ctx *ctx = task_ctx(current);
- struct aa_label *label, *previous, *new = NULL, *target = NULL;
- struct aa_profile *profile;
-@@ -1159,8 +1183,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
- int error = 0;
-
- /* released below */
-- cred = get_current_cred();
-- label = aa_get_newest_cred_label(cred);
-+ subj_cred = get_current_cred();
-+ label = aa_get_newest_cred_label(subj_cred);
- previous = aa_get_newest_label(ctx->previous);
-
- /*
-@@ -1180,7 +1204,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
- }
-
- if (count) {
-- new = change_hat(label, hats, count, flags);
-+ new = change_hat(subj_cred, label, hats, count, flags);
- AA_BUG(!new);
- if (IS_ERR(new)) {
- error = PTR_ERR(new);
-@@ -1189,7 +1213,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
- goto out;
- }
-
-- error = may_change_ptraced_domain(new, &info);
-+ /* target cred is the same as current except new label */
-+ error = may_change_ptraced_domain(subj_cred, new, &info);
- if (error)
- goto fail;
-
-@@ -1242,7 +1267,7 @@ out:
- aa_put_label(new);
- aa_put_label(previous);
- aa_put_label(label);
-- put_cred(cred);
-+ put_cred(subj_cred);
-
- return error;
-
-@@ -1252,7 +1277,7 @@ kill:
-
- fail:
- fn_for_each_in_ns(label, profile,
-- aa_audit_file(profile, &perms, OP_CHANGE_HAT,
-+ aa_audit_file(subj_cred, profile, &perms, OP_CHANGE_HAT,
- AA_MAY_CHANGEHAT, NULL, NULL, target,
- GLOBAL_ROOT_UID, info, error));
-
-@@ -1261,6 +1286,7 @@ fail:
-
-
- static int change_profile_perms_wrapper(const char *op, const char *name,
-+ const struct cred *subj_cred,
- struct aa_profile *profile,
- struct aa_label *target, bool stack,
- u32 request, struct aa_perms *perms)
-@@ -1275,7 +1301,8 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
- rules->file.start[AA_CLASS_FILE],
- perms);
- if (error)
-- error = aa_audit_file(profile, perms, op, request, name,
-+ error = aa_audit_file(subj_cred, profile, perms, op, request,
-+ name,
- NULL, target, GLOBAL_ROOT_UID, info,
- error);
-
-@@ -1304,6 +1331,7 @@ int aa_change_profile(const char *fqname, int flags)
- const char *auditname = fqname; /* retain leading & if stack */
- bool stack = flags & AA_CHANGE_STACK;
- struct aa_task_ctx *ctx = task_ctx(current);
-+ const struct cred *subj_cred = get_current_cred();
- int error = 0;
- char *op;
- u32 request;
-@@ -1381,6 +1409,7 @@ int aa_change_profile(const char *fqname, int flags)
- */
- error = fn_for_each_in_ns(label, profile,
- change_profile_perms_wrapper(op, auditname,
-+ subj_cred,
- profile, target, stack,
- request, &perms));
- if (error)
-@@ -1391,7 +1420,7 @@ int aa_change_profile(const char *fqname, int flags)
-
- check:
- /* check if tracing task is allowed to trace target domain */
-- error = may_change_ptraced_domain(target, &info);
-+ error = may_change_ptraced_domain(subj_cred, target, &info);
- if (error && !fn_for_each_in_ns(label, profile,
- COMPLAIN_MODE(profile)))
- goto audit;
-@@ -1451,7 +1480,8 @@ check:
-
- audit:
- error = fn_for_each_in_ns(label, profile,
-- aa_audit_file(profile, &perms, op, request, auditname,
-+ aa_audit_file(subj_cred,
-+ profile, &perms, op, request, auditname,
- NULL, new ? new : target,
- GLOBAL_ROOT_UID, info, error));
-
-@@ -1459,6 +1489,7 @@ out:
- aa_put_label(new);
- aa_put_label(target);
- aa_put_label(label);
-+ put_cred(subj_cred);
-
- return error;
- }
-diff --git a/security/apparmor/file.c b/security/apparmor/file.c
-index 698b124e649f6..6fd21324a097f 100644
---- a/security/apparmor/file.c
-+++ b/security/apparmor/file.c
-@@ -44,38 +44,40 @@ static u32 map_mask_to_chr_mask(u32 mask)
- static void file_audit_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-- kuid_t fsuid = current_fsuid();
-+ struct apparmor_audit_data *ad = aad(sa);
-+ kuid_t fsuid = ad->subj_cred ? ad->subj_cred->fsuid : current_fsuid();
- char str[10];
-
-- if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
-+ if (ad->request & AA_AUDIT_FILE_MASK) {
- aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
-- map_mask_to_chr_mask(aad(sa)->request));
-+ map_mask_to_chr_mask(ad->request));
- audit_log_format(ab, " requested_mask=\"%s\"", str);
- }
-- if (aad(sa)->denied & AA_AUDIT_FILE_MASK) {
-+ if (ad->denied & AA_AUDIT_FILE_MASK) {
- aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
-- map_mask_to_chr_mask(aad(sa)->denied));
-+ map_mask_to_chr_mask(ad->denied));
- audit_log_format(ab, " denied_mask=\"%s\"", str);
- }
-- if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
-+ if (ad->request & AA_AUDIT_FILE_MASK) {
- audit_log_format(ab, " fsuid=%d",
- from_kuid(&init_user_ns, fsuid));
- audit_log_format(ab, " ouid=%d",
-- from_kuid(&init_user_ns, aad(sa)->fs.ouid));
-+ from_kuid(&init_user_ns, ad->fs.ouid));
- }
-
-- if (aad(sa)->peer) {
-+ if (ad->peer) {
- audit_log_format(ab, " target=");
-- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
-+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
- FLAG_VIEW_SUBNS, GFP_KERNEL);
-- } else if (aad(sa)->fs.target) {
-+ } else if (ad->fs.target) {
- audit_log_format(ab, " target=");
-- audit_log_untrustedstring(ab, aad(sa)->fs.target);
-+ audit_log_untrustedstring(ab, ad->fs.target);
- }
- }
-
- /**
- * aa_audit_file - handle the auditing of file operations
-+ * @subj_cred: cred of the subject
- * @profile: the profile being enforced (NOT NULL)
- * @perms: the permissions computed for the request (NOT NULL)
- * @op: operation being mediated
-@@ -89,59 +91,74 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
- *
- * Returns: %0 or error on failure
- */
--int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
-+int aa_audit_file(const struct cred *subj_cred,
-+ struct aa_profile *profile, struct aa_perms *perms,
- const char *op, u32 request, const char *name,
- const char *target, struct aa_label *tlabel,
- kuid_t ouid, const char *info, int error)
- {
- int type = AUDIT_APPARMOR_AUTO;
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
--
-- sa.u.tsk = NULL;
-- aad(&sa)->request = request;
-- aad(&sa)->name = name;
-- aad(&sa)->fs.target = target;
-- aad(&sa)->peer = tlabel;
-- aad(&sa)->fs.ouid = ouid;
-- aad(&sa)->info = info;
-- aad(&sa)->error = error;
-- sa.u.tsk = NULL;
--
-- if (likely(!aad(&sa)->error)) {
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
-+
-+ ad.subj_cred = subj_cred;
-+ ad.request = request;
-+ ad.name = name;
-+ ad.fs.target = target;
-+ ad.peer = tlabel;
-+ ad.fs.ouid = ouid;
-+ ad.info = info;
-+ ad.error = error;
-+ ad.common.u.tsk = NULL;
-+
-+ if (likely(!ad.error)) {
- u32 mask = perms->audit;
-
- if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
- mask = 0xffff;
-
- /* mask off perms that are not being force audited */
-- aad(&sa)->request &= mask;
-+ ad.request &= mask;
-
-- if (likely(!aad(&sa)->request))
-+ if (likely(!ad.request))
- return 0;
- type = AUDIT_APPARMOR_AUDIT;
- } else {
- /* only report permissions that were denied */
-- aad(&sa)->request = aad(&sa)->request & ~perms->allow;
-- AA_BUG(!aad(&sa)->request);
-+ ad.request = ad.request & ~perms->allow;
-+ AA_BUG(!ad.request);
-
-- if (aad(&sa)->request & perms->kill)
-+ if (ad.request & perms->kill)
- type = AUDIT_APPARMOR_KILL;
-
- /* quiet known rejects, assumes quiet and kill do not overlap */
-- if ((aad(&sa)->request & perms->quiet) &&
-+ if ((ad.request & perms->quiet) &&
- AUDIT_MODE(profile) != AUDIT_NOQUIET &&
- AUDIT_MODE(profile) != AUDIT_ALL)
-- aad(&sa)->request &= ~perms->quiet;
-+ ad.request &= ~perms->quiet;
-
-- if (!aad(&sa)->request)
-- return aad(&sa)->error;
-+ if (!ad.request)
-+ return ad.error;
- }
-
-- aad(&sa)->denied = aad(&sa)->request & ~perms->allow;
-- return aa_audit(type, profile, &sa, file_audit_cb);
-+ ad.denied = ad.request & ~perms->allow;
-+ return aa_audit(type, profile, &ad, file_audit_cb);
- }
-
--static int path_name(const char *op, struct aa_label *label,
-+/**
-+ * is_deleted - test if a file has been completely unlinked
-+ * @dentry: dentry of file to test for deletion (NOT NULL)
-+ *
-+ * Returns: true if deleted else false
-+ */
-+static inline bool is_deleted(struct dentry *dentry)
-+{
-+ if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
-+ return true;
-+ return false;
-+}
-+
-+static int path_name(const char *op, const struct cred *subj_cred,
-+ struct aa_label *label,
- const struct path *path, int flags, char *buffer,
- const char **name, struct path_cond *cond, u32 request)
- {
-@@ -153,7 +170,8 @@ static int path_name(const char *op, struct aa_label *label,
- labels_profile(label)->disconnected);
- if (error) {
- fn_for_each_confined(label, profile,
-- aa_audit_file(profile, &nullperms, op, request, *name,
-+ aa_audit_file(subj_cred,
-+ profile, &nullperms, op, request, *name,
- NULL, NULL, cond->uid, info, error));
- return error;
- }
-@@ -207,9 +225,9 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
- return state;
- }
-
--static int __aa_path_perm(const char *op, struct aa_profile *profile,
-- const char *name, u32 request,
-- struct path_cond *cond, int flags,
-+static int __aa_path_perm(const char *op, const struct cred *subj_cred,
-+ struct aa_profile *profile, const char *name,
-+ u32 request, struct path_cond *cond, int flags,
- struct aa_perms *perms)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
-@@ -222,12 +240,14 @@ static int __aa_path_perm(const char *op, struct aa_profile *profile,
- name, cond, perms);
- if (request & ~perms->allow)
- e = -EACCES;
-- return aa_audit_file(profile, perms, op, request, name, NULL, NULL,
-+ return aa_audit_file(subj_cred,
-+ profile, perms, op, request, name, NULL, NULL,
- cond->uid, NULL, e);
- }
-
-
--static int profile_path_perm(const char *op, struct aa_profile *profile,
-+static int profile_path_perm(const char *op, const struct cred *subj_cred,
-+ struct aa_profile *profile,
- const struct path *path, char *buffer, u32 request,
- struct path_cond *cond, int flags,
- struct aa_perms *perms)
-@@ -238,18 +258,19 @@ static int profile_path_perm(const char *op, struct aa_profile *profile,
- if (profile_unconfined(profile))
- return 0;
-
-- error = path_name(op, &profile->label, path,
-+ error = path_name(op, subj_cred, &profile->label, path,
- flags | profile->path_flags, buffer, &name, cond,
- request);
- if (error)
- return error;
-- return __aa_path_perm(op, profile, name, request, cond, flags,
-- perms);
-+ return __aa_path_perm(op, subj_cred, profile, name, request, cond,
-+ flags, perms);
- }
-
- /**
- * aa_path_perm - do permissions check & audit for @path
- * @op: operation being checked
-+ * @subj_cred: subject cred
- * @label: profile being enforced (NOT NULL)
- * @path: path to check permissions of (NOT NULL)
- * @flags: any additional path flags beyond what the profile specifies
-@@ -258,7 +279,8 @@ static int profile_path_perm(const char *op, struct aa_profile *profile,
- *
- * Returns: %0 else error if access denied or other error
- */
--int aa_path_perm(const char *op, struct aa_label *label,
-+int aa_path_perm(const char *op, const struct cred *subj_cred,
-+ struct aa_label *label,
- const struct path *path, int flags, u32 request,
- struct path_cond *cond)
- {
-@@ -273,8 +295,8 @@ int aa_path_perm(const char *op, struct aa_label *label,
- if (!buffer)
- return -ENOMEM;
- error = fn_for_each_confined(label, profile,
-- profile_path_perm(op, profile, path, buffer, request,
-- cond, flags, &perms));
-+ profile_path_perm(op, subj_cred, profile, path, buffer,
-+ request, cond, flags, &perms));
-
- aa_put_buffer(buffer);
-
-@@ -301,7 +323,8 @@ static inline bool xindex_is_subset(u32 link, u32 target)
- return true;
- }
-
--static int profile_path_link(struct aa_profile *profile,
-+static int profile_path_link(const struct cred *subj_cred,
-+ struct aa_profile *profile,
- const struct path *link, char *buffer,
- const struct path *target, char *buffer2,
- struct path_cond *cond)
-@@ -315,13 +338,15 @@ static int profile_path_link(struct aa_profile *profile,
- aa_state_t state;
- int error;
-
-- error = path_name(OP_LINK, &profile->label, link, profile->path_flags,
-+ error = path_name(OP_LINK, subj_cred, &profile->label, link,
-+ profile->path_flags,
- buffer, &lname, cond, AA_MAY_LINK);
- if (error)
- goto audit;
-
- /* buffer2 freed below, tname is pointer in buffer2 */
-- error = path_name(OP_LINK, &profile->label, target, profile->path_flags,
-+ error = path_name(OP_LINK, subj_cred, &profile->label, target,
-+ profile->path_flags,
- buffer2, &tname, cond, AA_MAY_LINK);
- if (error)
- goto audit;
-@@ -381,12 +406,14 @@ done_tests:
- error = 0;
-
- audit:
-- return aa_audit_file(profile, &lperms, OP_LINK, request, lname, tname,
-+ return aa_audit_file(subj_cred,
-+ profile, &lperms, OP_LINK, request, lname, tname,
- NULL, cond->uid, info, error);
- }
-
- /**
- * aa_path_link - Handle hard link permission check
-+ * @subj_cred: subject cred
- * @label: the label being enforced (NOT NULL)
- * @old_dentry: the target dentry (NOT NULL)
- * @new_dir: directory the new link will be created in (NOT NULL)
-@@ -403,7 +430,8 @@ audit:
- *
- * Returns: %0 if allowed else error
- */
--int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
-+int aa_path_link(const struct cred *subj_cred,
-+ struct aa_label *label, struct dentry *old_dentry,
- const struct path *new_dir, struct dentry *new_dentry)
- {
- struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
-@@ -424,8 +452,8 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
- goto out;
-
- error = fn_for_each_confined(label, profile,
-- profile_path_link(profile, &link, buffer, &target,
-- buffer2, &cond));
-+ profile_path_link(subj_cred, profile, &link, buffer,
-+ &target, buffer2, &cond));
- out:
- aa_put_buffer(buffer);
- aa_put_buffer(buffer2);
-@@ -453,7 +481,8 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
- spin_unlock(&fctx->lock);
- }
-
--static int __file_path_perm(const char *op, struct aa_label *label,
-+static int __file_path_perm(const char *op, const struct cred *subj_cred,
-+ struct aa_label *label,
- struct aa_label *flabel, struct file *file,
- u32 request, u32 denied, bool in_atomic)
- {
-@@ -480,7 +509,8 @@ static int __file_path_perm(const char *op, struct aa_label *label,
-
- /* check every profile in task label not in current cache */
- error = fn_for_each_not_in_set(flabel, label, profile,
-- profile_path_perm(op, profile, &file->f_path, buffer,
-+ profile_path_perm(op, subj_cred, profile,
-+ &file->f_path, buffer,
- request, &cond, flags, &perms));
- if (denied && !error) {
- /*
-@@ -493,12 +523,14 @@ static int __file_path_perm(const char *op, struct aa_label *label,
- */
- if (label == flabel)
- error = fn_for_each(label, profile,
-- profile_path_perm(op, profile, &file->f_path,
-+ profile_path_perm(op, subj_cred,
-+ profile, &file->f_path,
- buffer, request, &cond, flags,
- &perms));
- else
- error = fn_for_each_not_in_set(label, flabel, profile,
-- profile_path_perm(op, profile, &file->f_path,
-+ profile_path_perm(op, subj_cred,
-+ profile, &file->f_path,
- buffer, request, &cond, flags,
- &perms));
- }
-@@ -510,7 +542,8 @@ static int __file_path_perm(const char *op, struct aa_label *label,
- return error;
- }
-
--static int __file_sock_perm(const char *op, struct aa_label *label,
-+static int __file_sock_perm(const char *op, const struct cred *subj_cred,
-+ struct aa_label *label,
- struct aa_label *flabel, struct file *file,
- u32 request, u32 denied)
- {
-@@ -524,11 +557,12 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
- return 0;
-
- /* TODO: improve to skip profiles cached in flabel */
-- error = aa_sock_file_perm(label, op, request, sock);
-+ error = aa_sock_file_perm(subj_cred, label, op, request, sock);
- if (denied) {
- /* TODO: improve to skip profiles checked above */
- /* check every profile in file label to is cached */
-- last_error(error, aa_sock_file_perm(flabel, op, request, sock));
-+ last_error(error, aa_sock_file_perm(subj_cred, flabel, op,
-+ request, sock));
- }
- if (!error)
- update_file_ctx(file_ctx(file), label, request);
-@@ -539,6 +573,7 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
- /**
- * aa_file_perm - do permission revalidation check & audit for @file
- * @op: operation being checked
-+ * @subj_cred: subject cred
- * @label: label being enforced (NOT NULL)
- * @file: file to revalidate access permissions on (NOT NULL)
- * @request: requested permissions
-@@ -546,7 +581,8 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
- *
- * Returns: %0 if access allowed else error
- */
--int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
-+int aa_file_perm(const char *op, const struct cred *subj_cred,
-+ struct aa_label *label, struct file *file,
- u32 request, bool in_atomic)
- {
- struct aa_file_ctx *fctx;
-@@ -582,19 +618,19 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
- /* TODO: label cross check */
-
- if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
-- error = __file_path_perm(op, label, flabel, file, request,
-- denied, in_atomic);
-+ error = __file_path_perm(op, subj_cred, label, flabel, file,
-+ request, denied, in_atomic);
-
- else if (S_ISSOCK(file_inode(file)->i_mode))
-- error = __file_sock_perm(op, label, flabel, file, request,
-- denied);
-+ error = __file_sock_perm(op, subj_cred, label, flabel, file,
-+ request, denied);
- aa_put_label(flabel);
-
- done:
- return error;
- }
-
--static void revalidate_tty(struct aa_label *label)
-+static void revalidate_tty(const struct cred *subj_cred, struct aa_label *label)
- {
- struct tty_struct *tty;
- int drop_tty = 0;
-@@ -612,8 +648,8 @@ static void revalidate_tty(struct aa_label *label)
- struct tty_file_private, list);
- file = file_priv->file;
-
-- if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
-- IN_ATOMIC))
-+ if (aa_file_perm(OP_INHERIT, subj_cred, label, file,
-+ MAY_READ | MAY_WRITE, IN_ATOMIC))
- drop_tty = 1;
- }
- spin_unlock(&tty->files_lock);
-@@ -623,12 +659,17 @@ static void revalidate_tty(struct aa_label *label)
- no_tty();
- }
-
-+struct cred_label {
-+ const struct cred *cred;
-+ struct aa_label *label;
-+};
-+
- static int match_file(const void *p, struct file *file, unsigned int fd)
- {
-- struct aa_label *label = (struct aa_label *)p;
-+ struct cred_label *cl = (struct cred_label *)p;
-
-- if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
-- IN_ATOMIC))
-+ if (aa_file_perm(OP_INHERIT, cl->cred, cl->label, file,
-+ aa_map_file_to_perms(file), IN_ATOMIC))
- return fd + 1;
- return 0;
- }
-@@ -638,13 +679,17 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
- void aa_inherit_files(const struct cred *cred, struct files_struct *files)
- {
- struct aa_label *label = aa_get_newest_cred_label(cred);
-+ struct cred_label cl = {
-+ .cred = cred,
-+ .label = label,
-+ };
- struct file *devnull = NULL;
- unsigned int n;
-
-- revalidate_tty(label);
-+ revalidate_tty(cred, label);
-
- /* Revalidate access to inherited open files. */
-- n = iterate_fd(files, 0, match_file, label);
-+ n = iterate_fd(files, 0, match_file, &cl);
- if (!n) /* none found? */
- goto out;
-
-@@ -654,7 +699,7 @@ void aa_inherit_files(const struct cred *cred, struct files_struct *files)
- /* replace all the matching ones with this */
- do {
- replace_fd(n - 1, devnull, 0);
-- } while ((n = iterate_fd(files, n, match_file, label)) != 0);
-+ } while ((n = iterate_fd(files, n, match_file, &cl)) != 0);
- if (devnull)
- fput(devnull);
- out:
-diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
-index c328f07f11cd8..42d701fec5a6d 100644
---- a/security/apparmor/include/audit.h
-+++ b/security/apparmor/include/audit.h
-@@ -109,7 +109,8 @@ struct apparmor_audit_data {
- int type;
- u16 class;
- const char *op;
-- struct aa_label *label;
-+ const struct cred *subj_cred;
-+ struct aa_label *subj_label;
- const char *name;
- const char *info;
- u32 request;
-@@ -152,33 +153,35 @@ struct apparmor_audit_data {
- unsigned long flags;
- } mnt;
- };
-+
-+ struct common_audit_data common;
- };
-
- /* macros for dealing with apparmor_audit_data structure */
--#define aad(SA) ((SA)->apparmor_audit_data)
-+#define aad(SA) (container_of(SA, struct apparmor_audit_data, common))
-+#define aad_of_va(VA) aad((struct common_audit_data *)(VA))
-+
- #define DEFINE_AUDIT_DATA(NAME, T, C, X) \
- /* TODO: cleanup audit init so we don't need _aad = {0,} */ \
-- struct apparmor_audit_data NAME ## _aad = { \
-+ struct apparmor_audit_data NAME = { \
- .class = (C), \
- .op = (X), \
-- }; \
-- struct common_audit_data NAME = \
-- { \
-- .type = (T), \
-- .u.tsk = NULL, \
-- }; \
-- NAME.apparmor_audit_data = &(NAME ## _aad)
--
--void aa_audit_msg(int type, struct common_audit_data *sa,
-+ .common.type = (T), \
-+ .common.u.tsk = NULL, \
-+ .common.apparmor_audit_data = &NAME, \
-+ };
-+
-+void aa_audit_msg(int type, struct apparmor_audit_data *ad,
- void (*cb) (struct audit_buffer *, void *));
--int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
-+int aa_audit(int type, struct aa_profile *profile,
-+ struct apparmor_audit_data *ad,
- void (*cb) (struct audit_buffer *, void *));
-
--#define aa_audit_error(ERROR, SA, CB) \
-+#define aa_audit_error(ERROR, AD, CB) \
- ({ \
-- aad((SA))->error = (ERROR); \
-- aa_audit_msg(AUDIT_APPARMOR_ERROR, (SA), (CB)); \
-- aad((SA))->error; \
-+ (AD)->error = (ERROR); \
-+ aa_audit_msg(AUDIT_APPARMOR_ERROR, (AD), (CB)); \
-+ (AD)->error; \
- })
-
-
-diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
-index d420e2d10b31b..d6dcc604ec0cc 100644
---- a/security/apparmor/include/capability.h
-+++ b/security/apparmor/include/capability.h
-@@ -36,7 +36,8 @@ struct aa_caps {
-
- extern struct aa_sfs_entry aa_sfs_entry_caps[];
-
--int aa_capable(struct aa_label *label, int cap, unsigned int opts);
-+int aa_capable(const struct cred *subj_cred, struct aa_label *label,
-+ int cap, unsigned int opts);
-
- static inline void aa_free_cap_rules(struct aa_caps *caps)
- {
-diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
-index 5be620af33ba0..64dc6d1a7a05c 100644
---- a/security/apparmor/include/file.h
-+++ b/security/apparmor/include/file.h
-@@ -108,7 +108,8 @@ struct path_cond {
-
- #define COMBINED_PERM_MASK(X) ((X).allow | (X).audit | (X).quiet | (X).kill)
-
--int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
-+int aa_audit_file(const struct cred *cred,
-+ struct aa_profile *profile, struct aa_perms *perms,
- const char *op, u32 request, const char *name,
- const char *target, struct aa_label *tlabel, kuid_t ouid,
- const char *info, int error);
-@@ -119,14 +120,16 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
- const char *name, struct path_cond *cond,
- struct aa_perms *perms);
-
--int aa_path_perm(const char *op, struct aa_label *label,
-- const struct path *path, int flags, u32 request,
-- struct path_cond *cond);
-+int aa_path_perm(const char *op, const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *path,
-+ int flags, u32 request, struct path_cond *cond);
-
--int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
-- const struct path *new_dir, struct dentry *new_dentry);
-+int aa_path_link(const struct cred *subj_cred, struct aa_label *label,
-+ struct dentry *old_dentry, const struct path *new_dir,
-+ struct dentry *new_dentry);
-
--int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
-+int aa_file_perm(const char *op, const struct cred *subj_cred,
-+ struct aa_label *label, struct file *file,
- u32 request, bool in_atomic);
-
- void aa_inherit_files(const struct cred *cred, struct files_struct *files);
-diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
-index a1ac6ffb95e9c..74d17052f76bc 100644
---- a/security/apparmor/include/ipc.h
-+++ b/security/apparmor/include/ipc.h
-@@ -13,6 +13,8 @@
-
- #include <linux/sched.h>
-
--int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig);
-+int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
-+ const struct cred *target_cred, struct aa_label *target,
-+ int sig);
-
- #endif /* __AA_IPC_H */
-diff --git a/security/apparmor/include/mount.h b/security/apparmor/include/mount.h
-index a710683b24965..46834f8281794 100644
---- a/security/apparmor/include/mount.h
-+++ b/security/apparmor/include/mount.h
-@@ -25,26 +25,36 @@
-
- #define AA_MS_IGNORE_MASK (MS_KERNMOUNT | MS_NOSEC | MS_ACTIVE | MS_BORN)
-
--int aa_remount(struct aa_label *label, const struct path *path,
-+int aa_remount(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *path,
- unsigned long flags, void *data);
-
--int aa_bind_mount(struct aa_label *label, const struct path *path,
-+int aa_bind_mount(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *path,
- const char *old_name, unsigned long flags);
-
-
--int aa_mount_change_type(struct aa_label *label, const struct path *path,
-+int aa_mount_change_type(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *path,
- unsigned long flags);
-
--int aa_move_mount(struct aa_label *label, const struct path *path,
-- const char *old_name);
-+int aa_move_mount_old(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *path,
-+ const char *old_name);
-+int aa_move_mount(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *from_path,
-+ const struct path *to_path);
-
--int aa_new_mount(struct aa_label *label, const char *dev_name,
-+int aa_new_mount(const struct cred *subj_cred,
-+ struct aa_label *label, const char *dev_name,
- const struct path *path, const char *type, unsigned long flags,
- void *data);
-
--int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags);
-+int aa_umount(const struct cred *subj_cred,
-+ struct aa_label *label, struct vfsmount *mnt, int flags);
-
--int aa_pivotroot(struct aa_label *label, const struct path *old_path,
-+int aa_pivotroot(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *old_path,
- const struct path *new_path);
-
- #endif /* __AA_MOUNT_H */
-diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
-index 6fa440b5daed8..aa8515af677f0 100644
---- a/security/apparmor/include/net.h
-+++ b/security/apparmor/include/net.h
-@@ -61,9 +61,9 @@ struct aa_sk_ctx {
- LSM_AUDIT_DATA_NONE, \
- AA_CLASS_NET, \
- OP); \
-- NAME.u.net = &(NAME ## _net); \
-- aad(&NAME)->net.type = (T); \
-- aad(&NAME)->net.protocol = (P)
-+ NAME.common.u.net = &(NAME ## _net); \
-+ NAME.net.type = (T); \
-+ NAME.net.protocol = (P)
-
- #define DEFINE_AUDIT_SK(NAME, OP, SK) \
- DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
-@@ -90,21 +90,24 @@ struct aa_secmark {
- extern struct aa_sfs_entry aa_sfs_entry_network[];
-
- void audit_net_cb(struct audit_buffer *ab, void *va);
--int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
-+int aa_profile_af_perm(struct aa_profile *profile,
-+ struct apparmor_audit_data *ad,
- u32 request, u16 family, int type);
--int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
-+int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
-+ const char *op, u32 request, u16 family,
- int type, int protocol);
- static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
-- struct common_audit_data *sa,
-+ struct apparmor_audit_data *ad,
- u32 request,
- struct sock *sk)
- {
-- return aa_profile_af_perm(profile, sa, request, sk->sk_family,
-+ return aa_profile_af_perm(profile, ad, request, sk->sk_family,
- sk->sk_type);
- }
- int aa_sk_perm(const char *op, u32 request, struct sock *sk);
-
--int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
-+int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
-+ const char *op, u32 request,
- struct socket *sock);
-
- int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
-diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
-index 797a7a00644d2..83534df8939fd 100644
---- a/security/apparmor/include/perms.h
-+++ b/security/apparmor/include/perms.h
-@@ -212,8 +212,8 @@ void aa_profile_match_label(struct aa_profile *profile,
- int type, u32 request, struct aa_perms *perms);
- int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
- u32 request, int type, u32 *deny,
-- struct common_audit_data *sa);
-+ struct apparmor_audit_data *ad);
- int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
-- u32 request, struct common_audit_data *sa,
-+ u32 request, struct apparmor_audit_data *ad,
- void (*cb)(struct audit_buffer *, void *));
- #endif /* __AA_PERM_H */
-diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
-index 545f791cabdae..fa15a5c7febb8 100644
---- a/security/apparmor/include/policy.h
-+++ b/security/apparmor/include/policy.h
-@@ -370,9 +370,12 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
- return profile->audit;
- }
-
--bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns);
--bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns);
--int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns,
-+bool aa_policy_view_capable(const struct cred *subj_cred,
-+ struct aa_label *label, struct aa_ns *ns);
-+bool aa_policy_admin_capable(const struct cred *subj_cred,
-+ struct aa_label *label, struct aa_ns *ns);
-+int aa_may_manage_policy(const struct cred *subj_cred,
-+ struct aa_label *label, struct aa_ns *ns,
- u32 mask);
- bool aa_current_policy_view_capable(struct aa_ns *ns);
- bool aa_current_policy_admin_capable(struct aa_ns *ns);
-diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
-index 961d85d328ea9..ad2c0da8e64fc 100644
---- a/security/apparmor/include/resource.h
-+++ b/security/apparmor/include/resource.h
-@@ -33,7 +33,8 @@ struct aa_rlimit {
- extern struct aa_sfs_entry aa_sfs_entry_rlimit[];
-
- int aa_map_resource(int resource);
--int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
-+int aa_task_setrlimit(const struct cred *subj_cred, struct aa_label *label,
-+ struct task_struct *task,
- unsigned int resource, struct rlimit *new_rlim);
-
- void __aa_transition_rlimits(struct aa_label *old, struct aa_label *new);
-diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
-index 13437d62c70f4..29ba55107b7d6 100644
---- a/security/apparmor/include/task.h
-+++ b/security/apparmor/include/task.h
-@@ -91,7 +91,8 @@ static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
- "segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \
- "xcpu xfsz vtalrm prof winch io pwr sys emt lost"
-
--int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
-+int aa_may_ptrace(const struct cred *tracer_cred, struct aa_label *tracer,
-+ const struct cred *tracee_cred, struct aa_label *tracee,
- u32 request);
-
-
-diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
-index 5acde746775f7..c0d0dbd7b4c4b 100644
---- a/security/apparmor/ipc.c
-+++ b/security/apparmor/ipc.c
-@@ -52,31 +52,33 @@ static const char *audit_signal_mask(u32 mask)
- static void audit_signal_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-+ struct apparmor_audit_data *ad = aad(sa);
-
-- if (aad(sa)->request & AA_SIGNAL_PERM_MASK) {
-+ if (ad->request & AA_SIGNAL_PERM_MASK) {
- audit_log_format(ab, " requested_mask=\"%s\"",
-- audit_signal_mask(aad(sa)->request));
-- if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) {
-+ audit_signal_mask(ad->request));
-+ if (ad->denied & AA_SIGNAL_PERM_MASK) {
- audit_log_format(ab, " denied_mask=\"%s\"",
-- audit_signal_mask(aad(sa)->denied));
-+ audit_signal_mask(ad->denied));
- }
- }
-- if (aad(sa)->signal == SIGUNKNOWN)
-+ if (ad->signal == SIGUNKNOWN)
- audit_log_format(ab, "signal=unknown(%d)",
-- aad(sa)->unmappedsig);
-- else if (aad(sa)->signal < MAXMAPPED_SIGNAME)
-- audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
-+ ad->unmappedsig);
-+ else if (ad->signal < MAXMAPPED_SIGNAME)
-+ audit_log_format(ab, " signal=%s", sig_names[ad->signal]);
- else
- audit_log_format(ab, " signal=rtmin+%d",
-- aad(sa)->signal - SIGRT_BASE);
-+ ad->signal - SIGRT_BASE);
- audit_log_format(ab, " peer=");
-- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
-+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
- FLAGS_NONE, GFP_ATOMIC);
- }
-
--static int profile_signal_perm(struct aa_profile *profile,
-+static int profile_signal_perm(const struct cred *cred,
-+ struct aa_profile *profile,
- struct aa_label *peer, u32 request,
-- struct common_audit_data *sa)
-+ struct apparmor_audit_data *ad)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
-@@ -87,24 +89,29 @@ static int profile_signal_perm(struct aa_profile *profile,
- !ANY_RULE_MEDIATES(&profile->rules, AA_CLASS_SIGNAL))
- return 0;
-
-- aad(sa)->peer = peer;
-+ ad->subj_cred = cred;
-+ ad->peer = peer;
- /* TODO: secondary cache check <profile, profile, perm> */
- state = aa_dfa_next(rules->policy.dfa,
- rules->policy.start[AA_CLASS_SIGNAL],
-- aad(sa)->signal);
-+ ad->signal);
- aa_label_match(profile, rules, peer, state, false, request, &perms);
- aa_apply_modes_to_perms(profile, &perms);
-- return aa_check_perms(profile, &perms, request, sa, audit_signal_cb);
-+ return aa_check_perms(profile, &perms, request, ad, audit_signal_cb);
- }
-
--int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig)
-+int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
-+ const struct cred *target_cred, struct aa_label *target,
-+ int sig)
- {
- struct aa_profile *profile;
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
-
-- aad(&sa)->signal = map_signal_num(sig);
-- aad(&sa)->unmappedsig = sig;
-+ ad.signal = map_signal_num(sig);
-+ ad.unmappedsig = sig;
- return xcheck_labels(sender, target, profile,
-- profile_signal_perm(profile, target, MAY_WRITE, &sa),
-- profile_signal_perm(profile, sender, MAY_READ, &sa));
-+ profile_signal_perm(subj_cred, profile, target,
-+ MAY_WRITE, &ad),
-+ profile_signal_perm(target_cred, profile, sender,
-+ MAY_READ, &ad));
- }
-diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
-index a630c951bb3b8..c87bccafff446 100644
---- a/security/apparmor/lib.c
-+++ b/security/apparmor/lib.c
-@@ -27,7 +27,7 @@ struct aa_perms allperms = { .allow = ALL_PERMS_MASK,
-
- /**
- * aa_free_str_table - free entries str table
-- * @str: the string table to free (MAYBE NULL)
-+ * @t: the string table to free (MAYBE NULL)
- */
- void aa_free_str_table(struct aa_str_table *t)
- {
-@@ -85,6 +85,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
- /**
- * skipn_spaces - Removes leading whitespace from @str.
- * @str: The string to be stripped.
-+ * @n: length of str to parse, will stop at \0 if encountered before n
- *
- * Returns a pointer to the first non-whitespace character in @str.
- * if all whitespace will return NULL
-@@ -143,10 +144,10 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
- void aa_info_message(const char *str)
- {
- if (audit_enabled) {
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
-
-- aad(&sa)->info = str;
-- aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
-+ ad.info = str;
-+ aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, NULL);
- }
- printk(KERN_INFO "AppArmor: %s\n", str);
- }
-@@ -281,21 +282,22 @@ void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
- static void aa_audit_perms_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-+ struct apparmor_audit_data *ad = aad(sa);
-
-- if (aad(sa)->request) {
-+ if (ad->request) {
- audit_log_format(ab, " requested_mask=");
-- aa_audit_perm_mask(ab, aad(sa)->request, aa_file_perm_chrs,
-+ aa_audit_perm_mask(ab, ad->request, aa_file_perm_chrs,
- PERMS_CHRS_MASK, aa_file_perm_names,
- PERMS_NAMES_MASK);
- }
-- if (aad(sa)->denied) {
-+ if (ad->denied) {
- audit_log_format(ab, "denied_mask=");
-- aa_audit_perm_mask(ab, aad(sa)->denied, aa_file_perm_chrs,
-+ aa_audit_perm_mask(ab, ad->denied, aa_file_perm_chrs,
- PERMS_CHRS_MASK, aa_file_perm_names,
- PERMS_NAMES_MASK);
- }
- audit_log_format(ab, " peer=");
-- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
-+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
- FLAGS_NONE, GFP_ATOMIC);
- }
-
-@@ -349,21 +351,20 @@ void aa_profile_match_label(struct aa_profile *profile,
- /* currently unused */
- int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
- u32 request, int type, u32 *deny,
-- struct common_audit_data *sa)
-+ struct apparmor_audit_data *ad)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
- struct aa_perms perms;
-
-- aad(sa)->label = &profile->label;
-- aad(sa)->peer = &target->label;
-- aad(sa)->request = request;
-+ ad->peer = &target->label;
-+ ad->request = request;
-
- aa_profile_match_label(profile, rules, &target->label, type, request,
- &perms);
- aa_apply_modes_to_perms(profile, &perms);
- *deny |= request & perms.deny;
-- return aa_check_perms(profile, &perms, request, sa, aa_audit_perms_cb);
-+ return aa_check_perms(profile, &perms, request, ad, aa_audit_perms_cb);
- }
-
- /**
-@@ -371,8 +372,7 @@ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
- * @profile: profile being checked
- * @perms: perms computed for the request
- * @request: requested perms
-- * @deny: Returns: explicit deny set
-- * @sa: initialized audit structure (MAY BE NULL if not auditing)
-+ * @ad: initialized audit structure (MAY BE NULL if not auditing)
- * @cb: callback fn for type specific fields (MAY BE NULL)
- *
- * Returns: 0 if permission else error code
-@@ -385,7 +385,7 @@ int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
- * with a positive value.
- */
- int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
-- u32 request, struct common_audit_data *sa,
-+ u32 request, struct apparmor_audit_data *ad,
- void (*cb)(struct audit_buffer *, void *))
- {
- int type, error;
-@@ -394,7 +394,7 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
- if (likely(!denied)) {
- /* mask off perms that are not being force audited */
- request &= perms->audit;
-- if (!request || !sa)
-+ if (!request || !ad)
- return 0;
-
- type = AUDIT_APPARMOR_AUDIT;
-@@ -413,16 +413,16 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
- error = -ENOENT;
-
- denied &= ~perms->quiet;
-- if (!sa || !denied)
-+ if (!ad || !denied)
- return error;
- }
-
-- if (sa) {
-- aad(sa)->label = &profile->label;
-- aad(sa)->request = request;
-- aad(sa)->denied = denied;
-- aad(sa)->error = error;
-- aa_audit_msg(type, sa, cb);
-+ if (ad) {
-+ ad->subj_label = &profile->label;
-+ ad->request = request;
-+ ad->denied = denied;
-+ ad->error = error;
-+ aa_audit_msg(type, ad, cb);
- }
-
- if (type == AUDIT_APPARMOR_ALLOWED)
-diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
-index 108eccc5ada58..6fdab1b5ede5c 100644
---- a/security/apparmor/lsm.c
-+++ b/security/apparmor/lsm.c
-@@ -116,15 +116,17 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
- unsigned int mode)
- {
- struct aa_label *tracer, *tracee;
-+ const struct cred *cred;
- int error;
-
-+ cred = get_task_cred(child);
-+ tracee = cred_label(cred); /* ref count on cred */
- tracer = __begin_current_label_crit_section();
-- tracee = aa_get_task_label(child);
-- error = aa_may_ptrace(tracer, tracee,
-+ error = aa_may_ptrace(current_cred(), tracer, cred, tracee,
- (mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
- : AA_PTRACE_TRACE);
-- aa_put_label(tracee);
- __end_current_label_crit_section(tracer);
-+ put_cred(cred);
-
- return error;
- }
-@@ -132,12 +134,15 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
- static int apparmor_ptrace_traceme(struct task_struct *parent)
- {
- struct aa_label *tracer, *tracee;
-+ const struct cred *cred;
- int error;
-
- tracee = __begin_current_label_crit_section();
-- tracer = aa_get_task_label(parent);
-- error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
-- aa_put_label(tracer);
-+ cred = get_task_cred(parent);
-+ tracer = cred_label(cred); /* ref count on cred */
-+ error = aa_may_ptrace(cred, tracer, current_cred(), tracee,
-+ AA_PTRACE_TRACE);
-+ put_cred(cred);
- __end_current_label_crit_section(tracee);
-
- return error;
-@@ -188,7 +193,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
-
- label = aa_get_newest_cred_label(cred);
- if (!unconfined(label))
-- error = aa_capable(label, cap, opts);
-+ error = aa_capable(cred, label, cap, opts);
- aa_put_label(label);
-
- return error;
-@@ -211,7 +216,8 @@ static int common_perm(const char *op, const struct path *path, u32 mask,
-
- label = __begin_current_label_crit_section();
- if (!unconfined(label))
-- error = aa_path_perm(op, label, path, 0, mask, cond);
-+ error = aa_path_perm(op, current_cred(), label, path, 0, mask,
-+ cond);
- __end_current_label_crit_section(label);
-
- return error;
-@@ -357,7 +363,8 @@ static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_
-
- label = begin_current_label_crit_section();
- if (!unconfined(label))
-- error = aa_path_link(label, old_dentry, new_dir, new_dentry);
-+ error = aa_path_link(current_cred(), label, old_dentry, new_dir,
-+ new_dentry);
- end_current_label_crit_section(label);
-
- return error;
-@@ -396,23 +403,27 @@ static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_d
- vfsuid = i_uid_into_vfsuid(idmap, d_backing_inode(old_dentry));
- cond_exchange.uid = vfsuid_into_kuid(vfsuid);
-
-- error = aa_path_perm(OP_RENAME_SRC, label, &new_path, 0,
-+ error = aa_path_perm(OP_RENAME_SRC, current_cred(),
-+ label, &new_path, 0,
- MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
- AA_MAY_SETATTR | AA_MAY_DELETE,
- &cond_exchange);
- if (!error)
-- error = aa_path_perm(OP_RENAME_DEST, label, &old_path,
-+ error = aa_path_perm(OP_RENAME_DEST, current_cred(),
-+ label, &old_path,
- 0, MAY_WRITE | AA_MAY_SETATTR |
- AA_MAY_CREATE, &cond_exchange);
- }
-
- if (!error)
-- error = aa_path_perm(OP_RENAME_SRC, label, &old_path, 0,
-+ error = aa_path_perm(OP_RENAME_SRC, current_cred(),
-+ label, &old_path, 0,
- MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
- AA_MAY_SETATTR | AA_MAY_DELETE,
- &cond);
- if (!error)
-- error = aa_path_perm(OP_RENAME_DEST, label, &new_path,
-+ error = aa_path_perm(OP_RENAME_DEST, current_cred(),
-+ label, &new_path,
- 0, MAY_WRITE | AA_MAY_SETATTR |
- AA_MAY_CREATE, &cond);
-
-@@ -467,7 +478,8 @@ static int apparmor_file_open(struct file *file)
- vfsuid = i_uid_into_vfsuid(idmap, inode);
- cond.uid = vfsuid_into_kuid(vfsuid);
-
-- error = aa_path_perm(OP_OPEN, label, &file->f_path, 0,
-+ error = aa_path_perm(OP_OPEN, file->f_cred,
-+ label, &file->f_path, 0,
- aa_map_file_to_perms(file), &cond);
- /* todo cache full allowed permissions set and state */
- fctx->allow = aa_map_file_to_perms(file);
-@@ -507,7 +519,7 @@ static int common_file_perm(const char *op, struct file *file, u32 mask,
- return -EACCES;
-
- label = __begin_current_label_crit_section();
-- error = aa_file_perm(op, label, file, mask, in_atomic);
-+ error = aa_file_perm(op, current_cred(), label, file, mask, in_atomic);
- __end_current_label_crit_section(label);
-
- return error;
-@@ -585,23 +597,42 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path,
- label = __begin_current_label_crit_section();
- if (!unconfined(label)) {
- if (flags & MS_REMOUNT)
-- error = aa_remount(label, path, flags, data);
-+ error = aa_remount(current_cred(), label, path, flags,
-+ data);
- else if (flags & MS_BIND)
-- error = aa_bind_mount(label, path, dev_name, flags);
-+ error = aa_bind_mount(current_cred(), label, path,
-+ dev_name, flags);
- else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE |
- MS_UNBINDABLE))
-- error = aa_mount_change_type(label, path, flags);
-+ error = aa_mount_change_type(current_cred(), label,
-+ path, flags);
- else if (flags & MS_MOVE)
-- error = aa_move_mount(label, path, dev_name);
-+ error = aa_move_mount_old(current_cred(), label, path,
-+ dev_name);
- else
-- error = aa_new_mount(label, dev_name, path, type,
-- flags, data);
-+ error = aa_new_mount(current_cred(), label, dev_name,
-+ path, type, flags, data);
- }
- __end_current_label_crit_section(label);
-
- return error;
- }
-
-+static int apparmor_move_mount(const struct path *from_path,
-+ const struct path *to_path)
-+{
-+ struct aa_label *label;
-+ int error = 0;
-+
-+ label = __begin_current_label_crit_section();
-+ if (!unconfined(label))
-+ error = aa_move_mount(current_cred(), label, from_path,
-+ to_path);
-+ __end_current_label_crit_section(label);
-+
-+ return error;
-+}
-+
- static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
- {
- struct aa_label *label;
-@@ -609,7 +640,7 @@ static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
-
- label = __begin_current_label_crit_section();
- if (!unconfined(label))
-- error = aa_umount(label, mnt, flags);
-+ error = aa_umount(current_cred(), label, mnt, flags);
- __end_current_label_crit_section(label);
-
- return error;
-@@ -623,7 +654,7 @@ static int apparmor_sb_pivotroot(const struct path *old_path,
-
- label = aa_get_current_label();
- if (!unconfined(label))
-- error = aa_pivotroot(label, old_path, new_path);
-+ error = aa_pivotroot(current_cred(), label, old_path, new_path);
- aa_put_label(label);
-
- return error;
-@@ -662,7 +693,7 @@ static int apparmor_setprocattr(const char *name, void *value,
- char *command, *largs = NULL, *args = value;
- size_t arg_size;
- int error;
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
- OP_SETPROCATTR);
-
- if (size == 0)
-@@ -722,11 +753,11 @@ out:
- return error;
-
- fail:
-- aad(&sa)->label = begin_current_label_crit_section();
-- aad(&sa)->info = name;
-- aad(&sa)->error = error = -EINVAL;
-- aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
-- end_current_label_crit_section(aad(&sa)->label);
-+ ad.subj_label = begin_current_label_crit_section();
-+ ad.info = name;
-+ ad.error = error = -EINVAL;
-+ aa_audit_msg(AUDIT_APPARMOR_DENIED, &ad, NULL);
-+ end_current_label_crit_section(ad.subj_label);
- goto out;
- }
-
-@@ -785,7 +816,8 @@ static int apparmor_task_setrlimit(struct task_struct *task,
- int error = 0;
-
- if (!unconfined(label))
-- error = aa_task_setrlimit(label, task, resource, new_rlim);
-+ error = aa_task_setrlimit(current_cred(), label, task,
-+ resource, new_rlim);
- __end_current_label_crit_section(label);
-
- return error;
-@@ -794,26 +826,27 @@ static int apparmor_task_setrlimit(struct task_struct *task,
- static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info,
- int sig, const struct cred *cred)
- {
-+ const struct cred *tc;
- struct aa_label *cl, *tl;
- int error;
-
-+ tc = get_task_cred(target);
-+ tl = aa_get_newest_cred_label(tc);
- if (cred) {
- /*
- * Dealing with USB IO specific behavior
- */
- cl = aa_get_newest_cred_label(cred);
-- tl = aa_get_task_label(target);
-- error = aa_may_signal(cl, tl, sig);
-+ error = aa_may_signal(cred, cl, tc, tl, sig);
- aa_put_label(cl);
-- aa_put_label(tl);
- return error;
-+ } else {
-+ cl = __begin_current_label_crit_section();
-+ error = aa_may_signal(current_cred(), cl, tc, tl, sig);
-+ __end_current_label_crit_section(cl);
- }
--
-- cl = __begin_current_label_crit_section();
-- tl = aa_get_task_label(target);
-- error = aa_may_signal(cl, tl, sig);
- aa_put_label(tl);
-- __end_current_label_crit_section(cl);
-+ put_cred(tc);
-
- return error;
- }
-@@ -879,7 +912,8 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern)
- if (!(kern || unconfined(label)))
- error = af_select(family,
- create_perm(label, family, type, protocol),
-- aa_af_perm(label, OP_CREATE, AA_MAY_CREATE,
-+ aa_af_perm(current_cred(), label,
-+ OP_CREATE, AA_MAY_CREATE,
- family, type, protocol));
- end_current_label_crit_section(label);
-
-@@ -1221,6 +1255,7 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
- LSM_HOOK_INIT(capget, apparmor_capget),
- LSM_HOOK_INIT(capable, apparmor_capable),
-
-+ LSM_HOOK_INIT(move_mount, apparmor_move_mount),
- LSM_HOOK_INIT(sb_mount, apparmor_sb_mount),
- LSM_HOOK_INIT(sb_umount, apparmor_sb_umount),
- LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot),
-diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
-index cdfa430ae2161..f2a114e540079 100644
---- a/security/apparmor/mount.c
-+++ b/security/apparmor/mount.c
-@@ -86,32 +86,34 @@ static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags)
- static void audit_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-+ struct apparmor_audit_data *ad = aad(sa);
-
-- if (aad(sa)->mnt.type) {
-+ if (ad->mnt.type) {
- audit_log_format(ab, " fstype=");
-- audit_log_untrustedstring(ab, aad(sa)->mnt.type);
-+ audit_log_untrustedstring(ab, ad->mnt.type);
- }
-- if (aad(sa)->mnt.src_name) {
-+ if (ad->mnt.src_name) {
- audit_log_format(ab, " srcname=");
-- audit_log_untrustedstring(ab, aad(sa)->mnt.src_name);
-+ audit_log_untrustedstring(ab, ad->mnt.src_name);
- }
-- if (aad(sa)->mnt.trans) {
-+ if (ad->mnt.trans) {
- audit_log_format(ab, " trans=");
-- audit_log_untrustedstring(ab, aad(sa)->mnt.trans);
-+ audit_log_untrustedstring(ab, ad->mnt.trans);
- }
-- if (aad(sa)->mnt.flags) {
-+ if (ad->mnt.flags) {
- audit_log_format(ab, " flags=\"");
-- audit_mnt_flags(ab, aad(sa)->mnt.flags);
-+ audit_mnt_flags(ab, ad->mnt.flags);
- audit_log_format(ab, "\"");
- }
-- if (aad(sa)->mnt.data) {
-+ if (ad->mnt.data) {
- audit_log_format(ab, " options=");
-- audit_log_untrustedstring(ab, aad(sa)->mnt.data);
-+ audit_log_untrustedstring(ab, ad->mnt.data);
- }
- }
-
- /**
- * audit_mount - handle the auditing of mount operations
-+ * @subj_cred: cred of the subject
- * @profile: the profile being enforced (NOT NULL)
- * @op: operation being mediated (NOT NULL)
- * @name: name of object being mediated (MAYBE NULL)
-@@ -127,14 +129,15 @@ static void audit_cb(struct audit_buffer *ab, void *va)
- *
- * Returns: %0 or error on failure
- */
--static int audit_mount(struct aa_profile *profile, const char *op,
-+static int audit_mount(const struct cred *subj_cred,
-+ struct aa_profile *profile, const char *op,
- const char *name, const char *src_name,
- const char *type, const char *trans,
- unsigned long flags, const void *data, u32 request,
- struct aa_perms *perms, const char *info, int error)
- {
- int audit_type = AUDIT_APPARMOR_AUTO;
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
-
- if (likely(!error)) {
- u32 mask = perms->audit;
-@@ -165,17 +168,18 @@ static int audit_mount(struct aa_profile *profile, const char *op,
- return error;
- }
-
-- aad(&sa)->name = name;
-- aad(&sa)->mnt.src_name = src_name;
-- aad(&sa)->mnt.type = type;
-- aad(&sa)->mnt.trans = trans;
-- aad(&sa)->mnt.flags = flags;
-+ ad.subj_cred = subj_cred;
-+ ad.name = name;
-+ ad.mnt.src_name = src_name;
-+ ad.mnt.type = type;
-+ ad.mnt.trans = trans;
-+ ad.mnt.flags = flags;
- if (data && (perms->audit & AA_AUDIT_DATA))
-- aad(&sa)->mnt.data = data;
-- aad(&sa)->info = info;
-- aad(&sa)->error = error;
-+ ad.mnt.data = data;
-+ ad.info = info;
-+ ad.error = error;
-
-- return aa_audit(audit_type, profile, &sa, audit_cb);
-+ return aa_audit(audit_type, profile, &ad, audit_cb);
- }
-
- /**
-@@ -283,6 +287,7 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
-
- /**
- * match_mnt_path_str - handle path matching for mount
-+ * @subj_cred: cred of confined subject
- * @profile: the confining profile
- * @mntpath: for the mntpnt (NOT NULL)
- * @buffer: buffer to be used to lookup mntpath
-@@ -295,7 +300,8 @@ static int path_flags(struct aa_profile *profile, const struct path *path)
- *
- * Returns: 0 on success else error
- */
--static int match_mnt_path_str(struct aa_profile *profile,
-+static int match_mnt_path_str(const struct cred *subj_cred,
-+ struct aa_profile *profile,
- const struct path *mntpath, char *buffer,
- const char *devname, const char *type,
- unsigned long flags, void *data, bool binary,
-@@ -336,12 +342,14 @@ static int match_mnt_path_str(struct aa_profile *profile,
- error = 0;
-
- audit:
-- return audit_mount(profile, OP_MOUNT, mntpnt, devname, type, NULL,
-+ return audit_mount(subj_cred, profile, OP_MOUNT, mntpnt, devname,
-+ type, NULL,
- flags, data, AA_MAY_MOUNT, &perms, info, error);
- }
-
- /**
- * match_mnt - handle path matching for mount
-+ * @subj_cred: cred of the subject
- * @profile: the confining profile
- * @path: for the mntpnt (NOT NULL)
- * @buffer: buffer to be used to lookup mntpath
-@@ -354,7 +362,8 @@ audit:
- *
- * Returns: 0 on success else error
- */
--static int match_mnt(struct aa_profile *profile, const struct path *path,
-+static int match_mnt(const struct cred *subj_cred,
-+ struct aa_profile *profile, const struct path *path,
- char *buffer, const struct path *devpath, char *devbuffer,
- const char *type, unsigned long flags, void *data,
- bool binary)
-@@ -378,11 +387,12 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
- devname = ERR_PTR(error);
- }
-
-- return match_mnt_path_str(profile, path, buffer, devname, type, flags,
-- data, binary, info);
-+ return match_mnt_path_str(subj_cred, profile, path, buffer, devname,
-+ type, flags, data, binary, info);
- }
-
--int aa_remount(struct aa_label *label, const struct path *path,
-+int aa_remount(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *path,
- unsigned long flags, void *data)
- {
- struct aa_profile *profile;
-@@ -399,14 +409,16 @@ int aa_remount(struct aa_label *label, const struct path *path,
- if (!buffer)
- return -ENOMEM;
- error = fn_for_each_confined(label, profile,
-- match_mnt(profile, path, buffer, NULL, NULL, NULL,
-+ match_mnt(subj_cred, profile, path, buffer, NULL,
-+ NULL, NULL,
- flags, data, binary));
- aa_put_buffer(buffer);
-
- return error;
- }
-
--int aa_bind_mount(struct aa_label *label, const struct path *path,
-+int aa_bind_mount(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *path,
- const char *dev_name, unsigned long flags)
- {
- struct aa_profile *profile;
-@@ -433,8 +445,8 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
- goto out;
-
- error = fn_for_each_confined(label, profile,
-- match_mnt(profile, path, buffer, &old_path, old_buffer,
-- NULL, flags, NULL, false));
-+ match_mnt(subj_cred, profile, path, buffer, &old_path,
-+ old_buffer, NULL, flags, NULL, false));
- out:
- aa_put_buffer(buffer);
- aa_put_buffer(old_buffer);
-@@ -443,7 +455,8 @@ out:
- return error;
- }
-
--int aa_mount_change_type(struct aa_label *label, const struct path *path,
-+int aa_mount_change_type(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *path,
- unsigned long flags)
- {
- struct aa_profile *profile;
-@@ -461,50 +474,63 @@ int aa_mount_change_type(struct aa_label *label, const struct path *path,
- if (!buffer)
- return -ENOMEM;
- error = fn_for_each_confined(label, profile,
-- match_mnt(profile, path, buffer, NULL, NULL, NULL,
-+ match_mnt(subj_cred, profile, path, buffer, NULL,
-+ NULL, NULL,
- flags, NULL, false));
- aa_put_buffer(buffer);
-
- return error;
- }
-
--int aa_move_mount(struct aa_label *label, const struct path *path,
-- const char *orig_name)
-+int aa_move_mount(const struct cred *subj_cred,
-+ struct aa_label *label, const struct path *from_path,
-+ const struct path *to_path)
- {
- struct aa_profile *profile;
-- char *buffer = NULL, *old_buffer = NULL;
-- struct path old_path;
-+ char *to_buffer = NULL, *from_buffer = NULL;
- int error;
-
- AA_BUG(!label);
-- AA_BUG(!path);
-+ AA_BUG(!from_path);
-+ AA_BUG(!to_path);
-+
-+ to_buffer = aa_get_buffer(false);
-+ from_buffer = aa_get_buffer(false);
-+ error = -ENOMEM;
-+ if (!to_buffer || !from_buffer)
-+ goto out;
-+ error = fn_for_each_confined(label, profile,
-+ match_mnt(subj_cred, profile, to_path, to_buffer,
-+ from_path, from_buffer,
-+ NULL, MS_MOVE, NULL, false));
-+out:
-+ aa_put_buffer(to_buffer);
-+ aa_put_buffer(from_buffer);
-+
-+ return error;
-+}
-+
-+int aa_move_mount_old(const struct cred *subj_cred, struct aa_label *label,
-+ const struct path *path, const char *orig_name)
-+{
-+ struct path old_path;
-+ int error;
-
- if (!orig_name || !*orig_name)
- return -EINVAL;
--
- error = kern_path(orig_name, LOOKUP_FOLLOW, &old_path);
- if (error)
- return error;
-
-- buffer = aa_get_buffer(false);
-- old_buffer = aa_get_buffer(false);
-- error = -ENOMEM;
-- if (!buffer || !old_buffer)
-- goto out;
-- error = fn_for_each_confined(label, profile,
-- match_mnt(profile, path, buffer, &old_path, old_buffer,
-- NULL, MS_MOVE, NULL, false));
--out:
-- aa_put_buffer(buffer);
-- aa_put_buffer(old_buffer);
-+ error = aa_move_mount(subj_cred, label, &old_path, path);
- path_put(&old_path);
-
- return error;
- }
-
--int aa_new_mount(struct aa_label *label, const char *dev_name,
-- const struct path *path, const char *type, unsigned long flags,
-- void *data)
-+int aa_new_mount(const struct cred *subj_cred, struct aa_label *label,
-+ const char *dev_name, const struct path *path,
-+ const char *type, unsigned long flags, void *data)
- {
- struct aa_profile *profile;
- char *buffer = NULL, *dev_buffer = NULL;
-@@ -549,12 +575,14 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
- goto out;
- }
- error = fn_for_each_confined(label, profile,
-- match_mnt(profile, path, buffer, dev_path, dev_buffer,
-+ match_mnt(subj_cred, profile, path, buffer,
-+ dev_path, dev_buffer,
- type, flags, data, binary));
- } else {
- error = fn_for_each_confined(label, profile,
-- match_mnt_path_str(profile, path, buffer, dev_name,
-- type, flags, data, binary, NULL));
-+ match_mnt_path_str(subj_cred, profile, path,
-+ buffer, dev_name,
-+ type, flags, data, binary, NULL));
- }
-
- out:
-@@ -566,7 +594,8 @@ out:
- return error;
- }
-
--static int profile_umount(struct aa_profile *profile, const struct path *path,
-+static int profile_umount(const struct cred *subj_cred,
-+ struct aa_profile *profile, const struct path *path,
- char *buffer)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
-@@ -595,11 +624,13 @@ static int profile_umount(struct aa_profile *profile, const struct path *path,
- error = -EACCES;
-
- audit:
-- return audit_mount(profile, OP_UMOUNT, name, NULL, NULL, NULL, 0, NULL,
-+ return audit_mount(subj_cred, profile, OP_UMOUNT, name, NULL, NULL,
-+ NULL, 0, NULL,
- AA_MAY_UMOUNT, &perms, info, error);
- }
-
--int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
-+int aa_umount(const struct cred *subj_cred, struct aa_label *label,
-+ struct vfsmount *mnt, int flags)
- {
- struct aa_profile *profile;
- char *buffer = NULL;
-@@ -614,7 +645,7 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
- return -ENOMEM;
-
- error = fn_for_each_confined(label, profile,
-- profile_umount(profile, &path, buffer));
-+ profile_umount(subj_cred, profile, &path, buffer));
- aa_put_buffer(buffer);
-
- return error;
-@@ -624,7 +655,8 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
- *
- * Returns: label for transition or ERR_PTR. Does not return NULL
- */
--static struct aa_label *build_pivotroot(struct aa_profile *profile,
-+static struct aa_label *build_pivotroot(const struct cred *subj_cred,
-+ struct aa_profile *profile,
- const struct path *new_path,
- char *new_buffer,
- const struct path *old_path,
-@@ -669,7 +701,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
- error = 0;
-
- audit:
-- error = audit_mount(profile, OP_PIVOTROOT, new_name, old_name,
-+ error = audit_mount(subj_cred, profile, OP_PIVOTROOT, new_name,
-+ old_name,
- NULL, trans_name, 0, NULL, AA_MAY_PIVOTROOT,
- &perms, info, error);
- if (error)
-@@ -678,7 +711,8 @@ audit:
- return aa_get_newest_label(&profile->label);
- }
-
--int aa_pivotroot(struct aa_label *label, const struct path *old_path,
-+int aa_pivotroot(const struct cred *subj_cred, struct aa_label *label,
-+ const struct path *old_path,
- const struct path *new_path)
- {
- struct aa_profile *profile;
-@@ -696,7 +730,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
- if (!old_buffer || !new_buffer)
- goto out;
- target = fn_label_build(label, profile, GFP_KERNEL,
-- build_pivotroot(profile, new_path, new_buffer,
-+ build_pivotroot(subj_cred, profile, new_path,
-+ new_buffer,
- old_path, old_buffer));
- if (!target) {
- info = "label build failed";
-@@ -722,7 +757,8 @@ out:
- fail:
- /* TODO: add back in auditing of new_name and old_name */
- error = fn_for_each(label, profile,
-- audit_mount(profile, OP_PIVOTROOT, NULL /*new_name */,
-+ audit_mount(subj_cred, profile, OP_PIVOTROOT,
-+ NULL /*new_name */,
- NULL /* old_name */,
- NULL, NULL,
- 0, NULL, AA_MAY_PIVOTROOT, &nullperms, info,
-diff --git a/security/apparmor/net.c b/security/apparmor/net.c
-index 788be1609a865..704c171232ab4 100644
---- a/security/apparmor/net.c
-+++ b/security/apparmor/net.c
-@@ -71,6 +71,7 @@ static const char * const net_mask_names[] = {
- void audit_net_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-+ struct apparmor_audit_data *ad = aad(sa);
-
- if (address_family_names[sa->u.net->family])
- audit_log_format(ab, " family=\"%s\"",
-@@ -78,35 +79,36 @@ void audit_net_cb(struct audit_buffer *ab, void *va)
- else
- audit_log_format(ab, " family=\"unknown(%d)\"",
- sa->u.net->family);
-- if (sock_type_names[aad(sa)->net.type])
-+ if (sock_type_names[ad->net.type])
- audit_log_format(ab, " sock_type=\"%s\"",
-- sock_type_names[aad(sa)->net.type]);
-+ sock_type_names[ad->net.type]);
- else
- audit_log_format(ab, " sock_type=\"unknown(%d)\"",
-- aad(sa)->net.type);
-- audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
-+ ad->net.type);
-+ audit_log_format(ab, " protocol=%d", ad->net.protocol);
-
-- if (aad(sa)->request & NET_PERMS_MASK) {
-+ if (ad->request & NET_PERMS_MASK) {
- audit_log_format(ab, " requested_mask=");
-- aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
-+ aa_audit_perm_mask(ab, ad->request, NULL, 0,
- net_mask_names, NET_PERMS_MASK);
-
-- if (aad(sa)->denied & NET_PERMS_MASK) {
-+ if (ad->denied & NET_PERMS_MASK) {
- audit_log_format(ab, " denied_mask=");
-- aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
-+ aa_audit_perm_mask(ab, ad->denied, NULL, 0,
- net_mask_names, NET_PERMS_MASK);
- }
- }
-- if (aad(sa)->peer) {
-+ if (ad->peer) {
- audit_log_format(ab, " peer=");
-- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
-+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
- FLAGS_NONE, GFP_ATOMIC);
- }
- }
-
- /* Generic af perm */
--int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
-- u32 request, u16 family, int type)
-+int aa_profile_af_perm(struct aa_profile *profile,
-+ struct apparmor_audit_data *ad, u32 request, u16 family,
-+ int type)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
-@@ -130,21 +132,23 @@ int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
- perms = *aa_lookup_perms(&rules->policy, state);
- aa_apply_modes_to_perms(profile, &perms);
-
-- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
-+ return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
- }
-
--int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
-- int type, int protocol)
-+int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
-+ const char *op, u32 request, u16 family, int type, int protocol)
- {
- struct aa_profile *profile;
-- DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
-+ DEFINE_AUDIT_NET(ad, op, NULL, family, type, protocol);
-
- return fn_for_each_confined(label, profile,
-- aa_profile_af_perm(profile, &sa, request, family,
-+ aa_profile_af_perm(profile, &ad, request, family,
- type));
- }
-
--static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
-+static int aa_label_sk_perm(const struct cred *subj_cred,
-+ struct aa_label *label,
-+ const char *op, u32 request,
- struct sock *sk)
- {
- struct aa_sk_ctx *ctx = SK_CTX(sk);
-@@ -155,10 +159,11 @@ static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
-
- if (ctx->label != kernel_t && !unconfined(label)) {
- struct aa_profile *profile;
-- DEFINE_AUDIT_SK(sa, op, sk);
-+ DEFINE_AUDIT_SK(ad, op, sk);
-
-+ ad.subj_cred = subj_cred;
- error = fn_for_each_confined(label, profile,
-- aa_profile_af_sk_perm(profile, &sa, request, sk));
-+ aa_profile_af_sk_perm(profile, &ad, request, sk));
- }
-
- return error;
-@@ -174,21 +179,21 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk)
-
- /* TODO: switch to begin_current_label ???? */
- label = begin_current_label_crit_section();
-- error = aa_label_sk_perm(label, op, request, sk);
-+ error = aa_label_sk_perm(current_cred(), label, op, request, sk);
- end_current_label_crit_section(label);
-
- return error;
- }
-
-
--int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
-- struct socket *sock)
-+int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
-+ const char *op, u32 request, struct socket *sock)
- {
- AA_BUG(!label);
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
-
-- return aa_label_sk_perm(label, op, request, sock->sk);
-+ return aa_label_sk_perm(subj_cred, label, op, request, sock->sk);
- }
-
- #ifdef CONFIG_NETWORK_SECMARK
-@@ -214,7 +219,7 @@ static int apparmor_secmark_init(struct aa_secmark *secmark)
- }
-
- static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
-- struct common_audit_data *sa)
-+ struct apparmor_audit_data *ad)
- {
- int i, ret;
- struct aa_perms perms = { };
-@@ -245,17 +250,17 @@ static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
-
- aa_apply_modes_to_perms(profile, &perms);
-
-- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
-+ return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
- }
-
- int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
- u32 secid, const struct sock *sk)
- {
- struct aa_profile *profile;
-- DEFINE_AUDIT_SK(sa, op, sk);
-+ DEFINE_AUDIT_SK(ad, op, sk);
-
- return fn_for_each_confined(label, profile,
- aa_secmark_perm(profile, request, secid,
-- &sa));
-+ &ad));
- }
- #endif
-diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
-index b38f7b2a5e1d5..8a07793ce1032 100644
---- a/security/apparmor/policy.c
-+++ b/security/apparmor/policy.c
-@@ -255,6 +255,7 @@ void aa_free_profile(struct aa_profile *profile)
-
- aa_put_ns(profile->ns);
- kfree_sensitive(profile->rename);
-+ kfree_sensitive(profile->disconnected);
-
- free_attachment(&profile->attach);
-
-@@ -285,6 +286,7 @@ void aa_free_profile(struct aa_profile *profile)
- /**
- * aa_alloc_profile - allocate, initialize and return a new profile
- * @hname: name of the profile (NOT NULL)
-+ * @proxy: proxy to use OR null if to allocate a new one
- * @gfp: allocation type
- *
- * Returns: refcount profile or NULL on failure
-@@ -721,16 +723,17 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
- static void audit_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-+ struct apparmor_audit_data *ad = aad(sa);
-
-- if (aad(sa)->iface.ns) {
-+ if (ad->iface.ns) {
- audit_log_format(ab, " ns=");
-- audit_log_untrustedstring(ab, aad(sa)->iface.ns);
-+ audit_log_untrustedstring(ab, ad->iface.ns);
- }
- }
-
- /**
- * audit_policy - Do auditing of policy changes
-- * @label: label to check if it can manage policy
-+ * @subj_label: label to check if it can manage policy
- * @op: policy operation being performed
- * @ns_name: name of namespace being manipulated
- * @name: name of profile being manipulated (NOT NULL)
-@@ -739,19 +742,19 @@ static void audit_cb(struct audit_buffer *ab, void *va)
- *
- * Returns: the error to be returned after audit is done
- */
--static int audit_policy(struct aa_label *label, const char *op,
-+static int audit_policy(struct aa_label *subj_label, const char *op,
- const char *ns_name, const char *name,
- const char *info, int error)
- {
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
-
-- aad(&sa)->iface.ns = ns_name;
-- aad(&sa)->name = name;
-- aad(&sa)->info = info;
-- aad(&sa)->error = error;
-- aad(&sa)->label = label;
-+ ad.iface.ns = ns_name;
-+ ad.name = name;
-+ ad.info = info;
-+ ad.error = error;
-+ ad.subj_label = subj_label;
-
-- aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, audit_cb);
-+ aa_audit_msg(AUDIT_APPARMOR_STATUS, &ad, audit_cb);
-
- return error;
- }
-@@ -759,31 +762,35 @@ static int audit_policy(struct aa_label *label, const char *op,
- /* don't call out to other LSMs in the stack for apparmor policy admin
- * permissions
- */
--static int policy_ns_capable(struct aa_label *label,
-+static int policy_ns_capable(const struct cred *subj_cred,
-+ struct aa_label *label,
- struct user_namespace *userns, int cap)
- {
- int err;
-
- /* check for MAC_ADMIN cap in cred */
-- err = cap_capable(current_cred(), userns, cap, CAP_OPT_NONE);
-+ err = cap_capable(subj_cred, userns, cap, CAP_OPT_NONE);
- if (!err)
-- err = aa_capable(label, cap, CAP_OPT_NONE);
-+ err = aa_capable(subj_cred, label, cap, CAP_OPT_NONE);
-
- return err;
- }
-
- /**
- * aa_policy_view_capable - check if viewing policy in at @ns is allowed
-- * label: label that is trying to view policy in ns
-- * ns: namespace being viewed by @label (may be NULL if @label's ns)
-+ * @subj_cred: cred of subject
-+ * @label: label that is trying to view policy in ns
-+ * @ns: namespace being viewed by @label (may be NULL if @label's ns)
-+ *
- * Returns: true if viewing policy is allowed
- *
- * If @ns is NULL then the namespace being viewed is assumed to be the
- * tasks current namespace.
- */
--bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
-+bool aa_policy_view_capable(const struct cred *subj_cred,
-+ struct aa_label *label, struct aa_ns *ns)
- {
-- struct user_namespace *user_ns = current_user_ns();
-+ struct user_namespace *user_ns = subj_cred->user_ns;
- struct aa_ns *view_ns = labels_view(label);
- bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) ||
- in_egroup_p(make_kgid(user_ns, 0));
-@@ -800,15 +807,17 @@ bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
- return response;
- }
-
--bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns)
-+bool aa_policy_admin_capable(const struct cred *subj_cred,
-+ struct aa_label *label, struct aa_ns *ns)
- {
-- struct user_namespace *user_ns = current_user_ns();
-- bool capable = policy_ns_capable(label, user_ns, CAP_MAC_ADMIN) == 0;
-+ struct user_namespace *user_ns = subj_cred->user_ns;
-+ bool capable = policy_ns_capable(subj_cred, label, user_ns,
-+ CAP_MAC_ADMIN) == 0;
-
- AA_DEBUG("cap_mac_admin? %d\n", capable);
- AA_DEBUG("policy locked? %d\n", aa_g_lock_policy);
-
-- return aa_policy_view_capable(label, ns) && capable &&
-+ return aa_policy_view_capable(subj_cred, label, ns) && capable &&
- !aa_g_lock_policy;
- }
-
-@@ -818,7 +827,7 @@ bool aa_current_policy_view_capable(struct aa_ns *ns)
- bool res;
-
- label = __begin_current_label_crit_section();
-- res = aa_policy_view_capable(label, ns);
-+ res = aa_policy_view_capable(current_cred(), label, ns);
- __end_current_label_crit_section(label);
-
- return res;
-@@ -830,7 +839,7 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
- bool res;
-
- label = __begin_current_label_crit_section();
-- res = aa_policy_admin_capable(label, ns);
-+ res = aa_policy_admin_capable(current_cred(), label, ns);
- __end_current_label_crit_section(label);
-
- return res;
-@@ -838,12 +847,15 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
-
- /**
- * aa_may_manage_policy - can the current task manage policy
-+ * @subj_cred; subjects cred
- * @label: label to check if it can manage policy
-+ * @ns: namespace being managed by @label (may be NULL if @label's ns)
- * @mask: contains the policy manipulation operation being done
- *
- * Returns: 0 if the task is allowed to manipulate policy else error
- */
--int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
-+int aa_may_manage_policy(const struct cred *subj_cred, struct aa_label *label,
-+ struct aa_ns *ns, u32 mask)
- {
- const char *op;
-
-@@ -859,7 +871,7 @@ int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
- return audit_policy(label, op, NULL, NULL, "policy_locked",
- -EACCES);
-
-- if (!aa_policy_admin_capable(label, ns))
-+ if (!aa_policy_admin_capable(subj_cred, label, ns))
- return audit_policy(label, op, NULL, NULL, "not policy admin",
- -EACCES);
-
-@@ -950,11 +962,11 @@ static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
-
- /**
- * __lookup_replace - lookup replacement information for a profile
-- * @ns - namespace the lookup occurs in
-- * @hname - name of profile to lookup
-- * @noreplace - true if not replacing an existing profile
-- * @p - Returns: profile to be replaced
-- * @info - Returns: info string on why lookup failed
-+ * @ns: namespace the lookup occurs in
-+ * @hname: name of profile to lookup
-+ * @noreplace: true if not replacing an existing profile
-+ * @p: Returns - profile to be replaced
-+ * @info: Returns - info string on why lookup failed
- *
- * Returns: profile to replace (no ref) on success else ptr error
- */
-diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
-index 8b8846073e142..dbc83455d900e 100644
---- a/security/apparmor/policy_unpack.c
-+++ b/security/apparmor/policy_unpack.c
-@@ -34,17 +34,18 @@
- static void audit_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-+ struct apparmor_audit_data *ad = aad(sa);
-
-- if (aad(sa)->iface.ns) {
-+ if (ad->iface.ns) {
- audit_log_format(ab, " ns=");
-- audit_log_untrustedstring(ab, aad(sa)->iface.ns);
-+ audit_log_untrustedstring(ab, ad->iface.ns);
- }
-- if (aad(sa)->name) {
-+ if (ad->name) {
- audit_log_format(ab, " name=");
-- audit_log_untrustedstring(ab, aad(sa)->name);
-+ audit_log_untrustedstring(ab, ad->name);
- }
-- if (aad(sa)->iface.pos)
-- audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
-+ if (ad->iface.pos)
-+ audit_log_format(ab, " offset=%ld", ad->iface.pos);
- }
-
- /**
-@@ -63,18 +64,18 @@ static int audit_iface(struct aa_profile *new, const char *ns_name,
- int error)
- {
- struct aa_profile *profile = labels_profile(aa_current_raw_label());
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
- if (e)
-- aad(&sa)->iface.pos = e->pos - e->start;
-- aad(&sa)->iface.ns = ns_name;
-+ ad.iface.pos = e->pos - e->start;
-+ ad.iface.ns = ns_name;
- if (new)
-- aad(&sa)->name = new->base.hname;
-+ ad.name = new->base.hname;
- else
-- aad(&sa)->name = name;
-- aad(&sa)->info = info;
-- aad(&sa)->error = error;
-+ ad.name = name;
-+ ad.info = info;
-+ ad.error = error;
-
-- return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
-+ return aa_audit(AUDIT_APPARMOR_STATUS, profile, &ad, audit_cb);
- }
-
- void __aa_loaddata_update(struct aa_loaddata *data, long revision)
-@@ -807,7 +808,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
- const char *info = "failed to unpack profile";
- size_t ns_len;
- struct rhashtable_params params = { 0 };
-- char *key = NULL;
-+ char *key = NULL, *disconnected = NULL;
- struct aa_data *data;
- int error = -EPROTO;
- kernel_cap_t tmpcap;
-@@ -873,7 +874,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
- }
-
- /* disconnected attachment string is optional */
-- (void) aa_unpack_str(e, &profile->disconnected, "disconnected");
-+ (void) aa_unpack_strdup(e, &disconnected, "disconnected");
-+ profile->disconnected = disconnected;
-
- /* per profile debug flags (complain, audit) */
- if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
-diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
-index e859481648962..dcc94c3153d51 100644
---- a/security/apparmor/resource.c
-+++ b/security/apparmor/resource.c
-@@ -30,18 +30,20 @@ struct aa_sfs_entry aa_sfs_entry_rlimit[] = {
- static void audit_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-+ struct apparmor_audit_data *ad = aad(sa);
-
- audit_log_format(ab, " rlimit=%s value=%lu",
-- rlim_names[aad(sa)->rlim.rlim], aad(sa)->rlim.max);
-- if (aad(sa)->peer) {
-+ rlim_names[ad->rlim.rlim], ad->rlim.max);
-+ if (ad->peer) {
- audit_log_format(ab, " peer=");
-- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
-+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
- FLAGS_NONE, GFP_ATOMIC);
- }
- }
-
- /**
- * audit_resource - audit setting resource limit
-+ * @subj_cred: cred setting the resource
- * @profile: profile being enforced (NOT NULL)
- * @resource: rlimit being auditing
- * @value: value being set
-@@ -49,22 +51,24 @@ static void audit_cb(struct audit_buffer *ab, void *va)
- * @info: info being auditing
- * @error: error value
- *
-- * Returns: 0 or sa->error else other error code on failure
-+ * Returns: 0 or ad->error else other error code on failure
- */
--static int audit_resource(struct aa_profile *profile, unsigned int resource,
-+static int audit_resource(const struct cred *subj_cred,
-+ struct aa_profile *profile, unsigned int resource,
- unsigned long value, struct aa_label *peer,
- const char *info, int error)
- {
-- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
-+ DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
- OP_SETRLIMIT);
-
-- aad(&sa)->rlim.rlim = resource;
-- aad(&sa)->rlim.max = value;
-- aad(&sa)->peer = peer;
-- aad(&sa)->info = info;
-- aad(&sa)->error = error;
-+ ad.subj_cred = subj_cred;
-+ ad.rlim.rlim = resource;
-+ ad.rlim.max = value;
-+ ad.peer = peer;
-+ ad.info = info;
-+ ad.error = error;
-
-- return aa_audit(AUDIT_APPARMOR_AUTO, profile, &sa, audit_cb);
-+ return aa_audit(AUDIT_APPARMOR_AUTO, profile, &ad, audit_cb);
- }
-
- /**
-@@ -81,7 +85,8 @@ int aa_map_resource(int resource)
- return rlim_map[resource];
- }
-
--static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
-+static int profile_setrlimit(const struct cred *subj_cred,
-+ struct aa_profile *profile, unsigned int resource,
- struct rlimit *new_rlim)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
-@@ -91,22 +96,24 @@ static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
- if (rules->rlimits.mask & (1 << resource) && new_rlim->rlim_max >
- rules->rlimits.limits[resource].rlim_max)
- e = -EACCES;
-- return audit_resource(profile, resource, new_rlim->rlim_max, NULL, NULL,
-- e);
-+ return audit_resource(subj_cred, profile, resource, new_rlim->rlim_max,
-+ NULL, NULL, e);
- }
-
- /**
- * aa_task_setrlimit - test permission to set an rlimit
-- * @label - label confining the task (NOT NULL)
-- * @task - task the resource is being set on
-- * @resource - the resource being set
-- * @new_rlim - the new resource limit (NOT NULL)
-+ * @subj_cred: cred setting the limit
-+ * @label: label confining the task (NOT NULL)
-+ * @task: task the resource is being set on
-+ * @resource: the resource being set
-+ * @new_rlim: the new resource limit (NOT NULL)
- *
- * Control raising the processes hard limit.
- *
- * Returns: 0 or error code if setting resource failed
- */
--int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
-+int aa_task_setrlimit(const struct cred *subj_cred, struct aa_label *label,
-+ struct task_struct *task,
- unsigned int resource, struct rlimit *new_rlim)
- {
- struct aa_profile *profile;
-@@ -125,14 +132,15 @@ int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
- */
-
- if (label != peer &&
-- aa_capable(label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
-+ aa_capable(subj_cred, label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
- error = fn_for_each(label, profile,
-- audit_resource(profile, resource,
-+ audit_resource(subj_cred, profile, resource,
- new_rlim->rlim_max, peer,
- "cap_sys_resource", -EACCES));
- else
- error = fn_for_each_confined(label, profile,
-- profile_setrlimit(profile, resource, new_rlim));
-+ profile_setrlimit(subj_cred, profile, resource,
-+ new_rlim));
- aa_put_label(peer);
-
- return error;
-diff --git a/security/apparmor/task.c b/security/apparmor/task.c
-index 84d16a29bfcbc..0d7af707cccdd 100644
---- a/security/apparmor/task.c
-+++ b/security/apparmor/task.c
-@@ -208,70 +208,75 @@ static const char *audit_ptrace_mask(u32 mask)
- static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
- {
- struct common_audit_data *sa = va;
-+ struct apparmor_audit_data *ad = aad(sa);
-
-- if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
-+ if (ad->request & AA_PTRACE_PERM_MASK) {
- audit_log_format(ab, " requested_mask=\"%s\"",
-- audit_ptrace_mask(aad(sa)->request));
-+ audit_ptrace_mask(ad->request));
-
-- if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
-+ if (ad->denied & AA_PTRACE_PERM_MASK) {
- audit_log_format(ab, " denied_mask=\"%s\"",
-- audit_ptrace_mask(aad(sa)->denied));
-+ audit_ptrace_mask(ad->denied));
- }
- }
- audit_log_format(ab, " peer=");
-- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
-+ aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
- FLAGS_NONE, GFP_ATOMIC);
- }
-
- /* assumes check for RULE_MEDIATES is already done */
- /* TODO: conditionals */
--static int profile_ptrace_perm(struct aa_profile *profile,
-- struct aa_label *peer, u32 request,
-- struct common_audit_data *sa)
-+static int profile_ptrace_perm(const struct cred *cred,
-+ struct aa_profile *profile,
-+ struct aa_label *peer, u32 request,
-+ struct apparmor_audit_data *ad)
- {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
- struct aa_perms perms = { };
-
-- aad(sa)->peer = peer;
-+ ad->subj_cred = cred;
-+ ad->peer = peer;
- aa_profile_match_label(profile, rules, peer, AA_CLASS_PTRACE, request,
- &perms);
- aa_apply_modes_to_perms(profile, &perms);
-- return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
-+ return aa_check_perms(profile, &perms, request, ad, audit_ptrace_cb);
- }
-
--static int profile_tracee_perm(struct aa_profile *tracee,
-+static int profile_tracee_perm(const struct cred *cred,
-+ struct aa_profile *tracee,
- struct aa_label *tracer, u32 request,
-- struct common_audit_data *sa)
-+ struct apparmor_audit_data *ad)
- {
- if (profile_unconfined(tracee) || unconfined(tracer) ||
- !ANY_RULE_MEDIATES(&tracee->rules, AA_CLASS_PTRACE))
- return 0;
-
-- return profile_ptrace_perm(tracee, tracer, request, sa);
-+ return profile_ptrace_perm(cred, tracee, tracer, request, ad);
- }
-
--static int profile_tracer_perm(struct aa_profile *tracer,
-+static int profile_tracer_perm(const struct cred *cred,
-+ struct aa_profile *tracer,
- struct aa_label *tracee, u32 request,
-- struct common_audit_data *sa)
-+ struct apparmor_audit_data *ad)
- {
- if (profile_unconfined(tracer))
- return 0;
-
- if (ANY_RULE_MEDIATES(&tracer->rules, AA_CLASS_PTRACE))
-- return profile_ptrace_perm(tracer, tracee, request, sa);
-+ return profile_ptrace_perm(cred, tracer, tracee, request, ad);
-
- /* profile uses the old style capability check for ptrace */
- if (&tracer->label == tracee)
- return 0;
-
-- aad(sa)->label = &tracer->label;
-- aad(sa)->peer = tracee;
-- aad(sa)->request = 0;
-- aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
-- CAP_OPT_NONE);
-+ ad->subj_label = &tracer->label;
-+ ad->peer = tracee;
-+ ad->request = 0;
-+ ad->error = aa_capable(cred, &tracer->label, CAP_SYS_PTRACE,
-+ CAP_OPT_NONE);
-
-- return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
-+ return aa_audit(AUDIT_APPARMOR_AUTO, tracer, ad, audit_ptrace_cb);
- }
-
- /**
-@@ -282,7 +287,8 @@ static int profile_tracer_perm(struct aa_profile *tracer,
- *
- * Returns: %0 else error code if permission denied or error
- */
--int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
-+int aa_may_ptrace(const struct cred *tracer_cred, struct aa_label *tracer,
-+ const struct cred *tracee_cred, struct aa_label *tracee,
- u32 request)
- {
- struct aa_profile *profile;
-@@ -290,6 +296,8 @@ int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
- DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_PTRACE, OP_PTRACE);
-
- return xcheck_labels(tracer, tracee, profile,
-- profile_tracer_perm(profile, tracee, request, &sa),
-- profile_tracee_perm(profile, tracer, xrequest, &sa));
-+ profile_tracer_perm(tracer_cred, profile, tracee,
-+ request, &sa),
-+ profile_tracee_perm(tracee_cred, profile, tracer,
-+ xrequest, &sa));
- }
-diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
-index 232191ee09e31..b6e074ac02273 100644
---- a/security/integrity/Kconfig
-+++ b/security/integrity/Kconfig
-@@ -68,8 +68,6 @@ config INTEGRITY_MACHINE_KEYRING
- depends on INTEGRITY_ASYMMETRIC_KEYS
- depends on SYSTEM_BLACKLIST_KEYRING
- depends on LOAD_UEFI_KEYS || LOAD_PPC_KEYS
-- select INTEGRITY_CA_MACHINE_KEYRING if LOAD_PPC_KEYS
-- select INTEGRITY_CA_MACHINE_KEYRING_MAX if LOAD_PPC_KEYS
- help
- If set, provide a keyring to which Machine Owner Keys (MOK) may
- be added. This keyring shall contain just MOK keys. Unlike keys
-diff --git a/security/integrity/iint.c b/security/integrity/iint.c
-index a462df827de2d..27ea19fb1f54c 100644
---- a/security/integrity/iint.c
-+++ b/security/integrity/iint.c
-@@ -66,9 +66,32 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
- return iint;
- }
-
--static void iint_free(struct integrity_iint_cache *iint)
-+#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
-+
-+/*
-+ * It is not clear that IMA should be nested at all, but as long is it measures
-+ * files both on overlayfs and on underlying fs, we need to annotate the iint
-+ * mutex to avoid lockdep false positives related to IMA + overlayfs.
-+ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
-+ */
-+static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
-+ struct inode *inode)
-+{
-+#ifdef CONFIG_LOCKDEP
-+ static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
-+
-+ int depth = inode->i_sb->s_stack_depth;
-+
-+ if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
-+ depth = 0;
-+
-+ lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
-+#endif
-+}
-+
-+static void iint_init_always(struct integrity_iint_cache *iint,
-+ struct inode *inode)
- {
-- kfree(iint->ima_hash);
- iint->ima_hash = NULL;
- iint->version = 0;
- iint->flags = 0UL;
-@@ -80,6 +103,14 @@ static void iint_free(struct integrity_iint_cache *iint)
- iint->ima_creds_status = INTEGRITY_UNKNOWN;
- iint->evm_status = INTEGRITY_UNKNOWN;
- iint->measured_pcrs = 0;
-+ mutex_init(&iint->mutex);
-+ iint_lockdep_annotate(iint, inode);
-+}
-+
-+static void iint_free(struct integrity_iint_cache *iint)
-+{
-+ kfree(iint->ima_hash);
-+ mutex_destroy(&iint->mutex);
- kmem_cache_free(iint_cache, iint);
- }
-
-@@ -104,6 +135,8 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
- if (!iint)
- return NULL;
-
-+ iint_init_always(iint, inode);
-+
- write_lock(&integrity_iint_lock);
-
- p = &integrity_iint_tree.rb_node;
-@@ -153,25 +186,18 @@ void integrity_inode_free(struct inode *inode)
- iint_free(iint);
- }
-
--static void init_once(void *foo)
-+static void iint_init_once(void *foo)
- {
- struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
-
- memset(iint, 0, sizeof(*iint));
-- iint->ima_file_status = INTEGRITY_UNKNOWN;
-- iint->ima_mmap_status = INTEGRITY_UNKNOWN;
-- iint->ima_bprm_status = INTEGRITY_UNKNOWN;
-- iint->ima_read_status = INTEGRITY_UNKNOWN;
-- iint->ima_creds_status = INTEGRITY_UNKNOWN;
-- iint->evm_status = INTEGRITY_UNKNOWN;
-- mutex_init(&iint->mutex);
- }
-
- static int __init integrity_iintcache_init(void)
- {
- iint_cache =
- kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
-- 0, SLAB_PANIC, init_once);
-+ 0, SLAB_PANIC, iint_init_once);
- return 0;
- }
- DEFINE_LSM(integrity) = {
-diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
-index 452e80b541e54..597ea0c4d72f7 100644
---- a/security/integrity/ima/ima_api.c
-+++ b/security/integrity/ima/ima_api.c
-@@ -243,6 +243,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
- {
- const char *audit_cause = "failed";
- struct inode *inode = file_inode(file);
-+ struct inode *real_inode = d_real_inode(file_dentry(file));
- const char *filename = file->f_path.dentry->d_name.name;
- struct ima_max_digest_data hash;
- struct kstat stat;
-@@ -302,6 +303,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
- iint->ima_hash = tmpbuf;
- memcpy(iint->ima_hash, &hash, length);
- iint->version = i_version;
-+ if (real_inode != inode) {
-+ iint->real_ino = real_inode->i_ino;
-+ iint->real_dev = real_inode->i_sb->s_dev;
-+ }
-
- /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
- if (!result)
-diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
-index 365db0e43d7c2..cc1217ac2c6fa 100644
---- a/security/integrity/ima/ima_main.c
-+++ b/security/integrity/ima/ima_main.c
-@@ -25,6 +25,7 @@
- #include <linux/xattr.h>
- #include <linux/ima.h>
- #include <linux/fs.h>
-+#include <linux/iversion.h>
-
- #include "ima.h"
-
-@@ -207,7 +208,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
- u32 secid, char *buf, loff_t size, int mask,
- enum ima_hooks func)
- {
-- struct inode *inode = file_inode(file);
-+ struct inode *backing_inode, *inode = file_inode(file);
- struct integrity_iint_cache *iint = NULL;
- struct ima_template_desc *template_desc = NULL;
- char *pathbuf = NULL;
-@@ -284,6 +285,19 @@ static int process_measurement(struct file *file, const struct cred *cred,
- iint->measured_pcrs = 0;
- }
-
-+ /* Detect and re-evaluate changes made to the backing file. */
-+ backing_inode = d_real_inode(file_dentry(file));
-+ if (backing_inode != inode &&
-+ (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
-+ if (!IS_I_VERSION(backing_inode) ||
-+ backing_inode->i_sb->s_dev != iint->real_dev ||
-+ backing_inode->i_ino != iint->real_ino ||
-+ !inode_eq_iversion(backing_inode, iint->version)) {
-+ iint->flags &= ~IMA_DONE_MASK;
-+ iint->measured_pcrs = 0;
-+ }
-+ }
-+
- /* Determine if already appraised/measured based on bitmask
- * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
- * IMA_AUDIT, IMA_AUDITED)
-diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
-index d7553c93f5c0d..9561db7cf6b42 100644
---- a/security/integrity/integrity.h
-+++ b/security/integrity/integrity.h
-@@ -164,6 +164,8 @@ struct integrity_iint_cache {
- unsigned long flags;
- unsigned long measured_pcrs;
- unsigned long atomic_flags;
-+ unsigned long real_ino;
-+ dev_t real_dev;
- enum integrity_status ima_file_status:4;
- enum integrity_status ima_mmap_status:4;
- enum integrity_status ima_bprm_status:4;
-diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
-index 85fb5c22529a7..fee1ab2c734d3 100644
---- a/security/keys/trusted-keys/trusted_core.c
-+++ b/security/keys/trusted-keys/trusted_core.c
-@@ -358,17 +358,17 @@ static int __init init_trusted(void)
- if (!get_random)
- get_random = kernel_get_random;
-
-- static_call_update(trusted_key_seal,
-- trusted_key_sources[i].ops->seal);
-- static_call_update(trusted_key_unseal,
-- trusted_key_sources[i].ops->unseal);
-- static_call_update(trusted_key_get_random,
-- get_random);
-- trusted_key_exit = trusted_key_sources[i].ops->exit;
-- migratable = trusted_key_sources[i].ops->migratable;
--
- ret = trusted_key_sources[i].ops->init();
-- if (!ret)
-+ if (!ret) {
-+ static_call_update(trusted_key_seal, trusted_key_sources[i].ops->seal);
-+ static_call_update(trusted_key_unseal, trusted_key_sources[i].ops->unseal);
-+ static_call_update(trusted_key_get_random, get_random);
-+
-+ trusted_key_exit = trusted_key_sources[i].ops->exit;
-+ migratable = trusted_key_sources[i].ops->migratable;
-+ }
-+
-+ if (!ret || ret != -ENODEV)
- break;
- }
-
-diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
-index ac3e270ade69b..aa3d477de6db5 100644
---- a/security/keys/trusted-keys/trusted_tee.c
-+++ b/security/keys/trusted-keys/trusted_tee.c
-@@ -65,24 +65,16 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
- int ret;
- struct tee_ioctl_invoke_arg inv_arg;
- struct tee_param param[4];
-- struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
-+ struct tee_shm *reg_shm = NULL;
-
- memset(&inv_arg, 0, sizeof(inv_arg));
- memset(&param, 0, sizeof(param));
-
-- reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
-- p->key_len);
-- if (IS_ERR(reg_shm_in)) {
-- dev_err(pvt_data.dev, "key shm register failed\n");
-- return PTR_ERR(reg_shm_in);
-- }
--
-- reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
-- sizeof(p->blob));
-- if (IS_ERR(reg_shm_out)) {
-- dev_err(pvt_data.dev, "blob shm register failed\n");
-- ret = PTR_ERR(reg_shm_out);
-- goto out;
-+ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
-+ sizeof(p->key) + sizeof(p->blob));
-+ if (IS_ERR(reg_shm)) {
-+ dev_err(pvt_data.dev, "shm register failed\n");
-+ return PTR_ERR(reg_shm);
- }
-
- inv_arg.func = TA_CMD_SEAL;
-@@ -90,13 +82,13 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
- inv_arg.num_params = 4;
-
- param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
-- param[0].u.memref.shm = reg_shm_in;
-+ param[0].u.memref.shm = reg_shm;
- param[0].u.memref.size = p->key_len;
- param[0].u.memref.shm_offs = 0;
- param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
-- param[1].u.memref.shm = reg_shm_out;
-+ param[1].u.memref.shm = reg_shm;
- param[1].u.memref.size = sizeof(p->blob);
-- param[1].u.memref.shm_offs = 0;
-+ param[1].u.memref.shm_offs = sizeof(p->key);
-
- ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
- if ((ret < 0) || (inv_arg.ret != 0)) {
-@@ -107,11 +99,7 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
- p->blob_len = param[1].u.memref.size;
- }
-
--out:
-- if (reg_shm_out)
-- tee_shm_free(reg_shm_out);
-- if (reg_shm_in)
-- tee_shm_free(reg_shm_in);
-+ tee_shm_free(reg_shm);
-
- return ret;
- }
-@@ -124,24 +112,16 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
- int ret;
- struct tee_ioctl_invoke_arg inv_arg;
- struct tee_param param[4];
-- struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
-+ struct tee_shm *reg_shm = NULL;
-
- memset(&inv_arg, 0, sizeof(inv_arg));
- memset(&param, 0, sizeof(param));
-
-- reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
-- p->blob_len);
-- if (IS_ERR(reg_shm_in)) {
-- dev_err(pvt_data.dev, "blob shm register failed\n");
-- return PTR_ERR(reg_shm_in);
-- }
--
-- reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
-- sizeof(p->key));
-- if (IS_ERR(reg_shm_out)) {
-- dev_err(pvt_data.dev, "key shm register failed\n");
-- ret = PTR_ERR(reg_shm_out);
-- goto out;
-+ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
-+ sizeof(p->key) + sizeof(p->blob));
-+ if (IS_ERR(reg_shm)) {
-+ dev_err(pvt_data.dev, "shm register failed\n");
-+ return PTR_ERR(reg_shm);
- }
-
- inv_arg.func = TA_CMD_UNSEAL;
-@@ -149,11 +129,11 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
- inv_arg.num_params = 4;
-
- param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
-- param[0].u.memref.shm = reg_shm_in;
-+ param[0].u.memref.shm = reg_shm;
- param[0].u.memref.size = p->blob_len;
-- param[0].u.memref.shm_offs = 0;
-+ param[0].u.memref.shm_offs = sizeof(p->key);
- param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
-- param[1].u.memref.shm = reg_shm_out;
-+ param[1].u.memref.shm = reg_shm;
- param[1].u.memref.size = sizeof(p->key);
- param[1].u.memref.shm_offs = 0;
-
-@@ -166,11 +146,7 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
- p->key_len = param[1].u.memref.size;
- }
-
--out:
-- if (reg_shm_out)
-- tee_shm_free(reg_shm_out);
-- if (reg_shm_in)
-- tee_shm_free(reg_shm_in);
-+ tee_shm_free(reg_shm);
-
- return ret;
- }
-diff --git a/sound/core/info.c b/sound/core/info.c
-index 0b2f04dcb5897..e2f302e55bbb2 100644
---- a/sound/core/info.c
-+++ b/sound/core/info.c
-@@ -56,7 +56,7 @@ struct snd_info_private_data {
- };
-
- static int snd_info_version_init(void);
--static void snd_info_disconnect(struct snd_info_entry *entry);
-+static void snd_info_clear_entries(struct snd_info_entry *entry);
-
- /*
-
-@@ -569,11 +569,16 @@ void snd_info_card_disconnect(struct snd_card *card)
- {
- if (!card)
- return;
-- mutex_lock(&info_mutex);
-+
- proc_remove(card->proc_root_link);
-- card->proc_root_link = NULL;
- if (card->proc_root)
-- snd_info_disconnect(card->proc_root);
-+ proc_remove(card->proc_root->p);
-+
-+ mutex_lock(&info_mutex);
-+ if (card->proc_root)
-+ snd_info_clear_entries(card->proc_root);
-+ card->proc_root_link = NULL;
-+ card->proc_root = NULL;
- mutex_unlock(&info_mutex);
- }
-
-@@ -745,15 +750,14 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
- }
- EXPORT_SYMBOL(snd_info_create_card_entry);
-
--static void snd_info_disconnect(struct snd_info_entry *entry)
-+static void snd_info_clear_entries(struct snd_info_entry *entry)
- {
- struct snd_info_entry *p;
-
- if (!entry->p)
- return;
- list_for_each_entry(p, &entry->children, list)
-- snd_info_disconnect(p);
-- proc_remove(entry->p);
-+ snd_info_clear_entries(p);
- entry->p = NULL;
- }
-
-@@ -770,8 +774,9 @@ void snd_info_free_entry(struct snd_info_entry * entry)
- if (!entry)
- return;
- if (entry->p) {
-+ proc_remove(entry->p);
- mutex_lock(&info_mutex);
-- snd_info_disconnect(entry);
-+ snd_info_clear_entries(entry);
- mutex_unlock(&info_mutex);
- }
-
-diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
-index 2633a4bb1d85d..214a0680524b0 100644
---- a/sound/hda/hdac_stream.c
-+++ b/sound/hda/hdac_stream.c
-@@ -354,8 +354,10 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
- struct hdac_stream *res = NULL;
-
- /* make a non-zero unique key for the substream */
-- int key = (substream->pcm->device << 16) | (substream->number << 2) |
-- (substream->stream + 1);
-+ int key = (substream->number << 2) | (substream->stream + 1);
-+
-+ if (substream->pcm)
-+ key |= (substream->pcm->device << 16);
-
- spin_lock_irq(&bus->reg_lock);
- list_for_each_entry(azx_dev, &bus->stream_list, list) {
-diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
-index 24a948baf1bc0..756fa0aa69bba 100644
---- a/sound/hda/intel-dsp-config.c
-+++ b/sound/hda/intel-dsp-config.c
-@@ -336,6 +336,12 @@ static const struct config_entry config_table[] = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- }
- },
-+ {
-+ .ident = "Google firmware",
-+ .matches = {
-+ DMI_MATCH(DMI_BIOS_VERSION, "Google"),
-+ }
-+ },
- {}
- }
- },
-diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
-index c6031f7440996..3c157b006a5a2 100644
---- a/sound/pci/hda/cs35l41_hda.c
-+++ b/sound/pci/hda/cs35l41_hda.c
-@@ -570,7 +570,7 @@ static void cs35l41_hda_play_done(struct device *dev)
-
- dev_dbg(dev, "Play (Complete)\n");
-
-- cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1, NULL,
-+ cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1,
- cs35l41->firmware_running);
- if (cs35l41->firmware_running) {
- regmap_multi_reg_write(reg, cs35l41_hda_unmute_dsp,
-@@ -589,7 +589,7 @@ static void cs35l41_hda_pause_start(struct device *dev)
- dev_dbg(dev, "Pause (Start)\n");
-
- regmap_multi_reg_write(reg, cs35l41_hda_mute, ARRAY_SIZE(cs35l41_hda_mute));
-- cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0, NULL,
-+ cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0,
- cs35l41->firmware_running);
- }
-
-@@ -1668,8 +1668,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
- ret = component_add(cs35l41->dev, &cs35l41_hda_comp_ops);
- if (ret) {
- dev_err(cs35l41->dev, "Register component failed: %d\n", ret);
-- pm_runtime_disable(cs35l41->dev);
-- goto err;
-+ goto err_pm;
- }
-
- dev_info(cs35l41->dev, "Cirrus Logic CS35L41 (%x), Revision: %02X\n", regid, reg_revid);
-@@ -1677,6 +1676,7 @@ int cs35l41_hda_probe(struct device *dev, const char *device_name, int id, int i
- return 0;
-
- err_pm:
-+ pm_runtime_dont_use_autosuspend(cs35l41->dev);
- pm_runtime_disable(cs35l41->dev);
- pm_runtime_put_noidle(cs35l41->dev);
-
-@@ -1695,6 +1695,7 @@ void cs35l41_hda_remove(struct device *dev)
- struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
-
- pm_runtime_get_sync(cs35l41->dev);
-+ pm_runtime_dont_use_autosuspend(cs35l41->dev);
- pm_runtime_disable(cs35l41->dev);
-
- if (cs35l41->halo_initialized)
-diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
-index ca765ac4765f4..75148485b7553 100644
---- a/sound/pci/hda/hda_intel.c
-+++ b/sound/pci/hda/hda_intel.c
-@@ -2218,6 +2218,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
- SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
- /* https://bugs.launchpad.net/bugs/1821663 */
- SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
-+ /* KONTRON SinglePC may cause a stall at runtime resume */
-+ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
- {}
- };
- #endif /* CONFIG_PM */
-diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
-index 9677c09cf7a98..758abe9dffd6d 100644
---- a/sound/pci/hda/patch_realtek.c
-+++ b/sound/pci/hda/patch_realtek.c
-@@ -1986,6 +1986,7 @@ enum {
- ALC887_FIXUP_ASUS_AUDIO,
- ALC887_FIXUP_ASUS_HMIC,
- ALCS1200A_FIXUP_MIC_VREF,
-+ ALC888VD_FIXUP_MIC_100VREF,
- };
-
- static void alc889_fixup_coef(struct hda_codec *codec,
-@@ -2539,6 +2540,13 @@ static const struct hda_fixup alc882_fixups[] = {
- {}
- }
- },
-+ [ALC888VD_FIXUP_MIC_100VREF] = {
-+ .type = HDA_FIXUP_PINCTLS,
-+ .v.pins = (const struct hda_pintbl[]) {
-+ { 0x18, PIN_VREF100 }, /* headset mic */
-+ {}
-+ }
-+ },
- };
-
- static const struct snd_pci_quirk alc882_fixup_tbl[] = {
-@@ -2608,6 +2616,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
- SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
-
- SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
-+ SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF),
- SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
- SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
-@@ -3255,6 +3264,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
- case 0x10ec0230:
- case 0x10ec0236:
- case 0x10ec0256:
-+ case 0x10ec0257:
- case 0x19e58326:
- alc_write_coef_idx(codec, 0x48, 0x0);
- alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
-@@ -3284,6 +3294,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
- case 0x10ec0230:
- case 0x10ec0236:
- case 0x10ec0256:
-+ case 0x10ec0257:
- case 0x19e58326:
- alc_write_coef_idx(codec, 0x48, 0xd011);
- alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
-@@ -6495,6 +6506,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
- case 0x10ec0236:
- case 0x10ec0255:
- case 0x10ec0256:
-+ case 0x10ec0257:
- case 0x19e58326:
- alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
- alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
-@@ -7262,8 +7274,10 @@ enum {
- ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
- ALC299_FIXUP_PREDATOR_SPK,
- ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
-+ ALC289_FIXUP_DELL_SPK1,
- ALC289_FIXUP_DELL_SPK2,
- ALC289_FIXUP_DUAL_SPK,
-+ ALC289_FIXUP_RTK_AMP_DUAL_SPK,
- ALC294_FIXUP_SPK2_TO_DAC1,
- ALC294_FIXUP_ASUS_DUAL_SPK,
- ALC285_FIXUP_THINKPAD_X1_GEN7,
-@@ -7363,6 +7377,8 @@ enum {
- ALC287_FIXUP_THINKPAD_I2S_SPK,
- ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
- ALC2XX_FIXUP_HEADSET_MIC,
-+ ALC289_FIXUP_DELL_CS35L41_SPI_2,
-+ ALC294_FIXUP_CS35L41_I2C_2,
- };
-
- /* A special fixup for Lenovo C940 and Yoga Duet 7;
-@@ -8589,6 +8605,15 @@ static const struct hda_fixup alc269_fixups[] = {
- .chained = true,
- .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
- },
-+ [ALC289_FIXUP_DELL_SPK1] = {
-+ .type = HDA_FIXUP_PINS,
-+ .v.pins = (const struct hda_pintbl[]) {
-+ { 0x14, 0x90170140 },
-+ { }
-+ },
-+ .chained = true,
-+ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
-+ },
- [ALC289_FIXUP_DELL_SPK2] = {
- .type = HDA_FIXUP_PINS,
- .v.pins = (const struct hda_pintbl[]) {
-@@ -8604,6 +8629,12 @@ static const struct hda_fixup alc269_fixups[] = {
- .chained = true,
- .chain_id = ALC289_FIXUP_DELL_SPK2
- },
-+ [ALC289_FIXUP_RTK_AMP_DUAL_SPK] = {
-+ .type = HDA_FIXUP_FUNC,
-+ .v.func = alc285_fixup_speaker2_to_dac1,
-+ .chained = true,
-+ .chain_id = ALC289_FIXUP_DELL_SPK1
-+ },
- [ALC294_FIXUP_SPK2_TO_DAC1] = {
- .type = HDA_FIXUP_FUNC,
- .v.func = alc285_fixup_speaker2_to_dac1,
-@@ -9471,6 +9502,16 @@ static const struct hda_fixup alc269_fixups[] = {
- .type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_headset_mic,
- },
-+ [ALC289_FIXUP_DELL_CS35L41_SPI_2] = {
-+ .type = HDA_FIXUP_FUNC,
-+ .v.func = cs35l41_fixup_spi_two,
-+ .chained = true,
-+ .chain_id = ALC289_FIXUP_DUAL_SPK
-+ },
-+ [ALC294_FIXUP_CS35L41_I2C_2] = {
-+ .type = HDA_FIXUP_FUNC,
-+ .v.func = cs35l41_fixup_i2c_two,
-+ },
- };
-
- static const struct snd_pci_quirk alc269_fixup_tbl[] = {
-@@ -9581,13 +9622,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
- SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
- SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
-- SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
-- SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
-- SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2),
-- SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
-- SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
-- SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
-- SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1028, 0x0cc0, "Dell Oasis 13", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
-+ SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis 14", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
- SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
-@@ -9720,6 +9763,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
-+ SND_PCI_QUIRK(0x103c, 0x890e, "HP 255 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
- SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
-@@ -9755,6 +9799,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
-+ SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
- SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
-@@ -9788,12 +9833,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
-+ SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
-+ SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
-+ SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
- SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
- SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
- SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
- SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
- SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-+ SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
- SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-@@ -9832,12 +9881,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
- SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
-+ SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
- SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
- SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-+ SND_PCI_QUIRK(0x1043, 0x1c03, "ASUS UM3406HA", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-+ SND_PCI_QUIRK(0x1043, 0x1c33, "ASUS UX5304MA", ALC245_FIXUP_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
- SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
- SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
-@@ -9848,6 +9902,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
- SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
-+ SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
- SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
-@@ -10707,22 +10762,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
- {0x12, 0x90a60130},
- {0x17, 0x90170110},
- {0x21, 0x03211020}),
-- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
-- {0x14, 0x90170110},
-- {0x21, 0x04211020}),
-- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
-- {0x14, 0x90170110},
-- {0x21, 0x04211030}),
-- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-- ALC295_STANDARD_PINS,
-- {0x17, 0x21014020},
-- {0x18, 0x21a19030}),
-- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-- ALC295_STANDARD_PINS,
-- {0x17, 0x21014040},
-- {0x18, 0x21a19050}),
-- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-- ALC295_STANDARD_PINS),
- SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
- ALC298_STANDARD_PINS,
- {0x17, 0x90170110}),
-@@ -10766,6 +10805,9 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
- SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
- {0x19, 0x40000000},
- {0x1b, 0x40000000}),
-+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
-+ {0x19, 0x40000000},
-+ {0x1b, 0x40000000}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x19, 0x40000000},
- {0x1a, 0x40000000}),
-diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c
-index 4ec306cd2f476..2ec5fdc875b13 100644
---- a/sound/soc/codecs/cs35l41-lib.c
-+++ b/sound/soc/codecs/cs35l41-lib.c
-@@ -1192,8 +1192,28 @@ bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type)
- }
- EXPORT_SYMBOL_GPL(cs35l41_safe_reset);
-
-+/*
-+ * Enabling the CS35L41_SHD_BOOST_ACTV and CS35L41_SHD_BOOST_PASS shared boosts
-+ * does also require a call to cs35l41_mdsync_up(), but not before getting the
-+ * PLL Lock signal.
-+ *
-+ * PLL Lock seems to be triggered soon after snd_pcm_start() is executed and
-+ * SNDRV_PCM_TRIGGER_START command is processed, which happens (long) after the
-+ * SND_SOC_DAPM_PRE_PMU event handler is invoked as part of snd_pcm_prepare().
-+ *
-+ * This event handler is where cs35l41_global_enable() is normally called from,
-+ * but waiting for PLL Lock here will time out. Increasing the wait duration
-+ * will not help, as the only consequence of it would be to add an unnecessary
-+ * delay in the invocation of snd_pcm_start().
-+ *
-+ * Trying to move the wait in the SNDRV_PCM_TRIGGER_START callback is not a
-+ * solution either, as the trigger is executed in an IRQ-off atomic context.
-+ *
-+ * The current approach is to invoke cs35l41_mdsync_up() right after receiving
-+ * the PLL Lock interrupt, in the IRQ handler.
-+ */
- int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
-- int enable, struct completion *pll_lock, bool firmware_running)
-+ int enable, bool firmware_running)
- {
- int ret;
- unsigned int gpio1_func, pad_control, pwr_ctrl1, pwr_ctrl3, int_status, pup_pdn_mask;
-@@ -1203,11 +1223,6 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
- {CS35L41_GPIO_PAD_CONTROL, 0},
- {CS35L41_PWR_CTRL1, 0, 3000},
- };
-- struct reg_sequence cs35l41_mdsync_up_seq[] = {
-- {CS35L41_PWR_CTRL3, 0},
-- {CS35L41_PWR_CTRL1, 0x00000000, 3000},
-- {CS35L41_PWR_CTRL1, 0x00000001, 3000},
-- };
-
- pup_pdn_mask = enable ? CS35L41_PUP_DONE_MASK : CS35L41_PDN_DONE_MASK;
-
-@@ -1241,24 +1256,12 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
- cs35l41_mdsync_down_seq[0].def = pwr_ctrl3;
- cs35l41_mdsync_down_seq[1].def = pad_control;
- cs35l41_mdsync_down_seq[2].def = pwr_ctrl1;
-+
- ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_down_seq,
- ARRAY_SIZE(cs35l41_mdsync_down_seq));
-- if (!enable)
-- break;
--
-- if (!pll_lock)
-- return -EINVAL;
--
-- ret = wait_for_completion_timeout(pll_lock, msecs_to_jiffies(1000));
-- if (ret == 0) {
-- ret = -ETIMEDOUT;
-- } else {
-- regmap_read(regmap, CS35L41_PWR_CTRL3, &pwr_ctrl3);
-- pwr_ctrl3 |= CS35L41_SYNC_EN_MASK;
-- cs35l41_mdsync_up_seq[0].def = pwr_ctrl3;
-- ret = regmap_multi_reg_write(regmap, cs35l41_mdsync_up_seq,
-- ARRAY_SIZE(cs35l41_mdsync_up_seq));
-- }
-+ /* Activation to be completed later via cs35l41_mdsync_up() */
-+ if (ret || enable)
-+ return ret;
-
- ret = regmap_read_poll_timeout(regmap, CS35L41_IRQ1_STATUS1,
- int_status, int_status & pup_pdn_mask,
-@@ -1266,7 +1269,7 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
- if (ret)
- dev_err(dev, "Enable(%d) failed: %d\n", enable, ret);
-
-- // Clear PUP/PDN status
-+ /* Clear PUP/PDN status */
- regmap_write(regmap, CS35L41_IRQ1_STATUS1, pup_pdn_mask);
- break;
- case CS35L41_INT_BOOST:
-@@ -1348,6 +1351,17 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
- }
- EXPORT_SYMBOL_GPL(cs35l41_global_enable);
-
-+/*
-+ * To be called after receiving the IRQ Lock interrupt, in order to complete
-+ * any shared boost activation initiated by cs35l41_global_enable().
-+ */
-+int cs35l41_mdsync_up(struct regmap *regmap)
-+{
-+ return regmap_update_bits(regmap, CS35L41_PWR_CTRL3,
-+ CS35L41_SYNC_EN_MASK, CS35L41_SYNC_EN_MASK);
-+}
-+EXPORT_SYMBOL_GPL(cs35l41_mdsync_up);
-+
- int cs35l41_gpio_config(struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg)
- {
- struct cs35l41_gpio_cfg *gpio1 = &hw_cfg->gpio1;
-diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
-index 722b69a6de26c..5456e6bfa242f 100644
---- a/sound/soc/codecs/cs35l41.c
-+++ b/sound/soc/codecs/cs35l41.c
-@@ -386,10 +386,18 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
- struct cs35l41_private *cs35l41 = data;
- unsigned int status[4] = { 0, 0, 0, 0 };
- unsigned int masks[4] = { 0, 0, 0, 0 };
-- int ret = IRQ_NONE;
- unsigned int i;
-+ int ret;
-
-- pm_runtime_get_sync(cs35l41->dev);
-+ ret = pm_runtime_resume_and_get(cs35l41->dev);
-+ if (ret < 0) {
-+ dev_err(cs35l41->dev,
-+ "pm_runtime_resume_and_get failed in %s: %d\n",
-+ __func__, ret);
-+ return IRQ_NONE;
-+ }
-+
-+ ret = IRQ_NONE;
-
- for (i = 0; i < ARRAY_SIZE(status); i++) {
- regmap_read(cs35l41->regmap,
-@@ -459,7 +467,19 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
-
- if (status[2] & CS35L41_PLL_LOCK) {
- regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS3, CS35L41_PLL_LOCK);
-- complete(&cs35l41->pll_lock);
-+
-+ if (cs35l41->hw_cfg.bst_type == CS35L41_SHD_BOOST_ACTV ||
-+ cs35l41->hw_cfg.bst_type == CS35L41_SHD_BOOST_PASS) {
-+ ret = cs35l41_mdsync_up(cs35l41->regmap);
-+ if (ret)
-+ dev_err(cs35l41->dev, "MDSYNC-up failed: %d\n", ret);
-+ else
-+ dev_dbg(cs35l41->dev, "MDSYNC-up done\n");
-+
-+ dev_dbg(cs35l41->dev, "PUP-done status: %d\n",
-+ !!(status[0] & CS35L41_PUP_DONE_MASK));
-+ }
-+
- ret = IRQ_HANDLED;
- }
-
-@@ -500,11 +520,11 @@ static int cs35l41_main_amp_event(struct snd_soc_dapm_widget *w,
- ARRAY_SIZE(cs35l41_pup_patch));
-
- ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type,
-- 1, &cs35l41->pll_lock, cs35l41->dsp.cs_dsp.running);
-+ 1, cs35l41->dsp.cs_dsp.running);
- break;
- case SND_SOC_DAPM_POST_PMD:
- ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type,
-- 0, &cs35l41->pll_lock, cs35l41->dsp.cs_dsp.running);
-+ 0, cs35l41->dsp.cs_dsp.running);
-
- regmap_multi_reg_write_bypassed(cs35l41->regmap,
- cs35l41_pdn_patch,
-@@ -802,10 +822,6 @@ static const struct snd_pcm_hw_constraint_list cs35l41_constraints = {
- static int cs35l41_pcm_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
- {
-- struct cs35l41_private *cs35l41 = snd_soc_component_get_drvdata(dai->component);
--
-- reinit_completion(&cs35l41->pll_lock);
--
- if (substream->runtime)
- return snd_pcm_hw_constraint_list(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE,
-@@ -1295,8 +1311,6 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
- if (ret < 0)
- goto err;
-
-- init_completion(&cs35l41->pll_lock);
--
- pm_runtime_set_autosuspend_delay(cs35l41->dev, 3000);
- pm_runtime_use_autosuspend(cs35l41->dev);
- pm_runtime_mark_last_busy(cs35l41->dev);
-@@ -1320,6 +1334,7 @@ int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *
- return 0;
-
- err_pm:
-+ pm_runtime_dont_use_autosuspend(cs35l41->dev);
- pm_runtime_disable(cs35l41->dev);
- pm_runtime_put_noidle(cs35l41->dev);
-
-@@ -1336,6 +1351,7 @@ EXPORT_SYMBOL_GPL(cs35l41_probe);
- void cs35l41_remove(struct cs35l41_private *cs35l41)
- {
- pm_runtime_get_sync(cs35l41->dev);
-+ pm_runtime_dont_use_autosuspend(cs35l41->dev);
- pm_runtime_disable(cs35l41->dev);
-
- regmap_write(cs35l41->regmap, CS35L41_IRQ1_MASK1, 0xFFFFFFFF);
-diff --git a/sound/soc/codecs/cs35l41.h b/sound/soc/codecs/cs35l41.h
-index 34d967d4372b2..c85cbc1dd333b 100644
---- a/sound/soc/codecs/cs35l41.h
-+++ b/sound/soc/codecs/cs35l41.h
-@@ -33,7 +33,6 @@ struct cs35l41_private {
- int irq;
- /* GPIO for /RST */
- struct gpio_desc *reset_gpio;
-- struct completion pll_lock;
- };
-
- int cs35l41_probe(struct cs35l41_private *cs35l41, const struct cs35l41_hw_cfg *hw_cfg);
-diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
-index f9059780b7a7b..32d4ab2cd6724 100644
---- a/sound/soc/codecs/cs35l56.c
-+++ b/sound/soc/codecs/cs35l56.c
-@@ -772,9 +772,20 @@ static int cs35l56_component_probe(struct snd_soc_component *component)
- {
- struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
- struct dentry *debugfs_root = component->debugfs_root;
-+ unsigned short vendor, device;
-
- BUILD_BUG_ON(ARRAY_SIZE(cs35l56_tx_input_texts) != ARRAY_SIZE(cs35l56_tx_input_values));
-
-+ if (!cs35l56->dsp.system_name &&
-+ (snd_soc_card_get_pci_ssid(component->card, &vendor, &device) == 0)) {
-+ cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
-+ GFP_KERNEL,
-+ "%04x%04x",
-+ vendor, device);
-+ if (!cs35l56->dsp.system_name)
-+ return -ENOMEM;
-+ }
-+
- if (!wait_for_completion_timeout(&cs35l56->init_completion,
- msecs_to_jiffies(5000))) {
- dev_err(cs35l56->base.dev, "%s: init_completion timed out\n", __func__);
-diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
-index 09eef6042aad6..20da1eaa4f1c7 100644
---- a/sound/soc/codecs/hdmi-codec.c
-+++ b/sound/soc/codecs/hdmi-codec.c
-@@ -877,18 +877,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
- void *data)
- {
- struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
-- int ret = -ENOTSUPP;
-
- if (hcp->hcd.ops->hook_plugged_cb) {
- hcp->jack = jack;
-- ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
-- hcp->hcd.data,
-- plugged_cb,
-- component->dev);
-- if (ret)
-- hcp->jack = NULL;
-+ return 0;
- }
-- return ret;
-+
-+ return -ENOTSUPP;
- }
-
- static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
-@@ -982,6 +977,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
- return ret;
- }
-
-+static int hdmi_probe(struct snd_soc_component *component)
-+{
-+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
-+ int ret = 0;
-+
-+ if (hcp->hcd.ops->hook_plugged_cb) {
-+ ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
-+ hcp->hcd.data,
-+ plugged_cb,
-+ component->dev);
-+ }
-+
-+ return ret;
-+}
-+
- static void hdmi_remove(struct snd_soc_component *component)
- {
- struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
-@@ -992,6 +1002,7 @@ static void hdmi_remove(struct snd_soc_component *component)
- }
-
- static const struct snd_soc_component_driver hdmi_driver = {
-+ .probe = hdmi_probe,
- .remove = hdmi_remove,
- .dapm_widgets = hdmi_widgets,
- .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
-diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
-index fff4a8b862a73..7e21cec3c2fb9 100644
---- a/sound/soc/codecs/lpass-wsa-macro.c
-+++ b/sound/soc/codecs/lpass-wsa-macro.c
-@@ -1685,6 +1685,9 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
- boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
- reg = CDC_WSA_RX1_RX_PATH_CTL;
- reg_mix = CDC_WSA_RX1_RX_PATH_MIX_CTL;
-+ } else {
-+ dev_warn(component->dev, "Incorrect widget name in the driver\n");
-+ return -EINVAL;
- }
-
- switch (event) {
-diff --git a/sound/soc/codecs/rt712-sdca.c b/sound/soc/codecs/rt712-sdca.c
-index 7077ff6ba1f4b..6954fbe7ec5f3 100644
---- a/sound/soc/codecs/rt712-sdca.c
-+++ b/sound/soc/codecs/rt712-sdca.c
-@@ -963,13 +963,6 @@ static int rt712_sdca_probe(struct snd_soc_component *component)
- rt712_sdca_parse_dt(rt712, &rt712->slave->dev);
- rt712->component = component;
-
-- if (!rt712->first_hw_init)
-- return 0;
--
-- ret = pm_runtime_resume(component->dev);
-- if (ret < 0 && ret != -EACCES)
-- return ret;
--
- /* add SPK route */
- if (rt712->hw_id != RT712_DEV_ID_713) {
- snd_soc_add_component_controls(component,
-@@ -980,6 +973,13 @@ static int rt712_sdca_probe(struct snd_soc_component *component)
- rt712_sdca_spk_dapm_routes, ARRAY_SIZE(rt712_sdca_spk_dapm_routes));
- }
-
-+ if (!rt712->first_hw_init)
-+ return 0;
-+
-+ ret = pm_runtime_resume(component->dev);
-+ if (ret < 0 && ret != -EACCES)
-+ return ret;
-+
- return 0;
- }
-
-diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
-index 197fae23762f5..cb83c569e18d6 100644
---- a/sound/soc/codecs/wsa883x.c
-+++ b/sound/soc/codecs/wsa883x.c
-@@ -1203,9 +1203,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
- break;
- }
-
-- snd_soc_component_write_field(component, WSA883X_DRE_CTL_1,
-- WSA883X_DRE_GAIN_EN_MASK,
-- WSA883X_DRE_GAIN_FROM_CSR);
- if (wsa883x->port_enable[WSA883X_PORT_COMP])
- snd_soc_component_write_field(component, WSA883X_DRE_CTL_0,
- WSA883X_DRE_OFFSET_MASK,
-@@ -1218,9 +1215,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
- snd_soc_component_write_field(component, WSA883X_PDM_WD_CTL,
- WSA883X_PDM_EN_MASK,
- WSA883X_PDM_ENABLE);
-- snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
-- WSA883X_GLOBAL_PA_EN_MASK,
-- WSA883X_GLOBAL_PA_ENABLE);
-
- break;
- case SND_SOC_DAPM_PRE_PMD:
-@@ -1346,6 +1340,7 @@ static const struct snd_soc_dai_ops wsa883x_dai_ops = {
- .hw_free = wsa883x_hw_free,
- .mute_stream = wsa883x_digital_mute,
- .set_stream = wsa883x_set_sdw_stream,
-+ .mute_unmute_on_trigger = true,
- };
-
- static struct snd_soc_dai_driver wsa883x_dais[] = {
-diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
-index bab7d34cf585b..5f181b89838ac 100644
---- a/sound/soc/fsl/fsl-asoc-card.c
-+++ b/sound/soc/fsl/fsl-asoc-card.c
-@@ -41,6 +41,7 @@
-
- /**
- * struct codec_priv - CODEC private data
-+ * @mclk: Main clock of the CODEC
- * @mclk_freq: Clock rate of MCLK
- * @free_freq: Clock rate of MCLK for hw_free()
- * @mclk_id: MCLK (or main clock) id for set_sysclk()
-diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
-index ba62995c909ac..ec53bda46a467 100644
---- a/sound/soc/fsl/fsl_easrc.c
-+++ b/sound/soc/fsl/fsl_easrc.c
-@@ -1966,17 +1966,21 @@ static int fsl_easrc_probe(struct platform_device *pdev)
- &fsl_easrc_dai, 1);
- if (ret) {
- dev_err(dev, "failed to register ASoC DAI\n");
-- return ret;
-+ goto err_pm_disable;
- }
-
- ret = devm_snd_soc_register_component(dev, &fsl_asrc_component,
- NULL, 0);
- if (ret) {
- dev_err(&pdev->dev, "failed to register ASoC platform\n");
-- return ret;
-+ goto err_pm_disable;
- }
-
- return 0;
-+
-+err_pm_disable:
-+ pm_runtime_disable(&pdev->dev);
-+ return ret;
- }
-
- static void fsl_easrc_remove(struct platform_device *pdev)
-diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
-index 9014978100207..3f7ccae3f6b1a 100644
---- a/sound/soc/fsl/mpc5200_dma.c
-+++ b/sound/soc/fsl/mpc5200_dma.c
-@@ -100,6 +100,9 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
-
- /**
- * psc_dma_trigger: start and stop the DMA transfer.
-+ * @component: triggered component
-+ * @substream: triggered substream
-+ * @cmd: triggered command
- *
- * This function is called by ALSA to start, stop, pause, and resume the DMA
- * transfer of data.
-diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
-index 842649501e303..24e966a2ac2be 100644
---- a/sound/soc/intel/boards/sof_sdw.c
-+++ b/sound/soc/intel/boards/sof_sdw.c
-@@ -1374,7 +1374,7 @@ static int create_sdw_dailink(struct snd_soc_card *card, int *link_index,
- continue;
-
- /* j reset after loop, adr_index only applies to first link */
-- for (; j < adr_link_next->num_adr; j++) {
-+ for (; j < adr_link_next->num_adr && codec_dlc_index < codec_num; j++) {
- const struct snd_soc_acpi_endpoint *endpoints;
-
- endpoints = adr_link_next->adr_d[j].endpoints;
-@@ -1934,6 +1934,12 @@ static int mc_probe(struct platform_device *pdev)
- for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
- codec_info_list[i].amp_num = 0;
-
-+ if (mach->mach_params.subsystem_id_set) {
-+ snd_soc_card_set_pci_ssid(card,
-+ mach->mach_params.subsystem_vendor,
-+ mach->mach_params.subsystem_device);
-+ }
-+
- ret = sof_card_dai_links_create(card);
- if (ret < 0)
- return ret;
-diff --git a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
-index 623e3bebb8884..4360b9f5ff2c7 100644
---- a/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
-+++ b/sound/soc/intel/boards/sof_sdw_rt_sdca_jack_common.c
-@@ -58,6 +58,11 @@ static const struct snd_soc_dapm_route rt712_sdca_map[] = {
- { "rt712 MIC2", NULL, "Headset Mic" },
- };
-
-+static const struct snd_soc_dapm_route rt713_sdca_map[] = {
-+ { "Headphone", NULL, "rt713 HP" },
-+ { "rt713 MIC2", NULL, "Headset Mic" },
-+};
-+
- static const struct snd_kcontrol_new rt_sdca_jack_controls[] = {
- SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Headset Mic"),
-@@ -109,6 +114,9 @@ static int rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd)
- } else if (strstr(component->name_prefix, "rt712")) {
- ret = snd_soc_dapm_add_routes(&card->dapm, rt712_sdca_map,
- ARRAY_SIZE(rt712_sdca_map));
-+ } else if (strstr(component->name_prefix, "rt713")) {
-+ ret = snd_soc_dapm_add_routes(&card->dapm, rt713_sdca_map,
-+ ARRAY_SIZE(rt713_sdca_map));
- } else {
- dev_err(card->dev, "%s is not supported\n", component->name_prefix);
- return -EINVAL;
-diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
-index cdcbf04b8832f..5e2ec60e2954b 100644
---- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
-+++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
-@@ -75,6 +75,39 @@ static struct snd_soc_acpi_mach *cht_ess8316_quirk(void *arg)
- return arg;
- }
-
-+/*
-+ * The Lenovo Yoga Tab 3 Pro YT3-X90, with Android factory OS has a buggy DSDT
-+ * with the coded not being listed at all.
-+ */
-+static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
-+ {
-+ /* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
-+ },
-+ },
-+ { }
-+};
-+
-+static struct snd_soc_acpi_mach cht_lenovo_yoga_tab3_x90_mach = {
-+ .id = "10WM5102",
-+ .drv_name = "bytcr_wm5102",
-+ .fw_filename = "intel/fw_sst_22a8.bin",
-+ .board = "bytcr_wm5102",
-+ .sof_tplg_filename = "sof-cht-wm5102.tplg",
-+};
-+
-+static struct snd_soc_acpi_mach *lenovo_yt3_x90_quirk(void *arg)
-+{
-+ if (dmi_check_system(lenovo_yoga_tab3_x90))
-+ return &cht_lenovo_yoga_tab3_x90_mach;
-+
-+ /* Skip wildcard match snd_soc_acpi_intel_cherrytrail_machines[] entry */
-+ return NULL;
-+}
-+
- static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
- .num_codecs = 2,
- .codecs = { "10EC5640", "10EC3276" },
-@@ -175,6 +208,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
- .drv_name = "sof_pcm512x",
- .sof_tplg_filename = "sof-cht-src-50khz-pcm512x.tplg",
- },
-+ /*
-+ * Special case for the Lenovo Yoga Tab 3 Pro YT3-X90 where the DSDT
-+ * misses the codec. Match on the SST id instead, lenovo_yt3_x90_quirk()
-+ * will return a YT3 specific mach or NULL when called on other hw,
-+ * skipping this entry.
-+ */
-+ {
-+ .id = "808622A8",
-+ .machine_quirk = lenovo_yt3_x90_quirk,
-+ },
-
- #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
- /*
-diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
-index 57ea815d3f041..b776c58dcf47a 100644
---- a/sound/soc/intel/skylake/skl-sst-utils.c
-+++ b/sound/soc/intel/skylake/skl-sst-utils.c
-@@ -299,6 +299,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
- module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
- if (!module->instance_id) {
- ret = -ENOMEM;
-+ kfree(module);
- goto free_uuid_list;
- }
-
-diff --git a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
-index 9c11016f032c2..9777ba89e956c 100644
---- a/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
-+++ b/sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
-@@ -1179,7 +1179,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
- playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
- if (!playback_codec) {
- ret = -EINVAL;
-- dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
-+ dev_err_probe(&pdev->dev, ret, "Property 'playback-codecs' missing or invalid\n");
- goto err_playback_codec;
- }
-
-@@ -1193,7 +1193,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
- for_each_card_prelinks(card, i, dai_link) {
- ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
- if (ret) {
-- dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
-+ dev_err_probe(&pdev->dev, ret, "%s set playback_codec fail\n",
- dai_link->name);
- goto err_probe;
- }
-diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
-index 9017f48b6272b..f7e22abb75846 100644
---- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
-+++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
-@@ -246,6 +246,11 @@ static const struct snd_soc_dapm_widget mt8188_mt6359_widgets[] = {
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
- SND_SOC_DAPM_SINK("HDMI"),
- SND_SOC_DAPM_SINK("DP"),
-+
-+ /* dynamic pinctrl */
-+ SND_SOC_DAPM_PINCTRL("ETDM_SPK_PIN", "aud_etdm_spk_on", "aud_etdm_spk_off"),
-+ SND_SOC_DAPM_PINCTRL("ETDM_HP_PIN", "aud_etdm_hp_on", "aud_etdm_hp_off"),
-+ SND_SOC_DAPM_PINCTRL("MTKAIF_PIN", "aud_mtkaif_on", "aud_mtkaif_off"),
- };
-
- static const struct snd_kcontrol_new mt8188_mt6359_controls[] = {
-@@ -267,6 +272,7 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
- snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
- struct snd_soc_component *cmpnt_codec =
- asoc_rtd_to_codec(rtd, 0)->component;
-+ struct snd_soc_dapm_widget *pin_w = NULL, *w;
- struct mtk_base_afe *afe;
- struct mt8188_afe_private *afe_priv;
- struct mtkaif_param *param;
-@@ -306,6 +312,18 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
- return 0;
- }
-
-+ for_each_card_widgets(rtd->card, w) {
-+ if (!strcmp(w->name, "MTKAIF_PIN")) {
-+ pin_w = w;
-+ break;
-+ }
-+ }
-+
-+ if (pin_w)
-+ dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_PRE_PMU);
-+ else
-+ dev_dbg(afe->dev, "%s(), no pinmux widget, please check if default on\n", __func__);
-+
- pm_runtime_get_sync(afe->dev);
- mt6359_mtkaif_calibration_enable(cmpnt_codec);
-
-@@ -403,6 +421,9 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
- for (i = 0; i < MT8188_MTKAIF_MISO_NUM; i++)
- param->mtkaif_phase_cycle[i] = mtkaif_phase_cycle[i];
-
-+ if (pin_w)
-+ dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_POST_PMD);
-+
- dev_dbg(afe->dev, "%s(), end, calibration ok %d\n",
- __func__, param->mtkaif_calibration_ok);
-
-diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
-index 3f33f0630ad8a..9a828e55c4f9e 100644
---- a/sound/soc/soc-dai.c
-+++ b/sound/soc/soc-dai.c
-@@ -658,6 +658,10 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
- ret = soc_dai_trigger(dai, substream, cmd);
- if (ret < 0)
- break;
-+
-+ if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
-+ snd_soc_dai_digital_mute(dai, 0, substream->stream);
-+
- soc_dai_mark_push(dai, substream, trigger);
- }
- break;
-@@ -668,6 +672,9 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
- if (rollback && !soc_dai_mark_match(dai, substream, trigger))
- continue;
-
-+ if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
-+ snd_soc_dai_digital_mute(dai, 1, substream->stream);
-+
- r = soc_dai_trigger(dai, substream, cmd);
- if (r < 0)
- ret = r; /* use last ret */
-diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
-index 312e555798315..85e3bbf7e5f0e 100644
---- a/sound/soc/soc-dapm.c
-+++ b/sound/soc/soc-dapm.c
-@@ -3670,7 +3670,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
- dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD);
- break;
- case snd_soc_dapm_clock_supply:
-- w->clk = devm_clk_get(dapm->dev, w->name);
-+ w->clk = devm_clk_get(dapm->dev, widget->name);
- if (IS_ERR(w->clk)) {
- ret = PTR_ERR(w->clk);
- goto request_failed;
-diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
-index 54704250c0a2c..511446a30c057 100644
---- a/sound/soc/soc-pcm.c
-+++ b/sound/soc/soc-pcm.c
-@@ -698,14 +698,12 @@ static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd,
-
- if (!rollback) {
- snd_soc_runtime_deactivate(rtd, substream->stream);
-- /* clear the corresponding DAIs parameters when going to be inactive */
-- for_each_rtd_dais(rtd, i, dai) {
-- if (snd_soc_dai_active(dai) == 0)
-- soc_pcm_set_dai_params(dai, NULL);
-
-- if (snd_soc_dai_stream_active(dai, substream->stream) == 0)
-- snd_soc_dai_digital_mute(dai, 1, substream->stream);
-- }
-+ /* Make sure DAI parameters cleared if the DAI becomes inactive */
-+ for_each_rtd_dais(rtd, i, dai)
-+ if (snd_soc_dai_active(dai) == 0 &&
-+ (dai->rate || dai->channels || dai->sample_bits))
-+ soc_pcm_set_dai_params(dai, NULL);
- }
-
- for_each_rtd_dais(rtd, i, dai)
-@@ -898,8 +896,10 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
- snd_soc_dapm_stream_event(rtd, substream->stream,
- SND_SOC_DAPM_STREAM_START);
-
-- for_each_rtd_dais(rtd, i, dai)
-- snd_soc_dai_digital_mute(dai, 0, substream->stream);
-+ for_each_rtd_dais(rtd, i, dai) {
-+ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
-+ snd_soc_dai_digital_mute(dai, 0, substream->stream);
-+ }
-
- out:
- return soc_pcm_ret(rtd, ret);
-@@ -936,6 +936,17 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
-
- snd_soc_dpcm_mutex_assert_held(rtd);
-
-+ /* clear the corresponding DAIs parameters when going to be inactive */
-+ for_each_rtd_dais(rtd, i, dai) {
-+ if (snd_soc_dai_active(dai) == 1)
-+ soc_pcm_set_dai_params(dai, NULL);
-+
-+ if (snd_soc_dai_stream_active(dai, substream->stream) == 1) {
-+ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
-+ snd_soc_dai_digital_mute(dai, 1, substream->stream);
-+ }
-+ }
-+
- /* run the stream event */
- snd_soc_dapm_stream_stop(rtd, substream->stream);
-
-diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
-index 2d1616b81485c..0938b259f7034 100644
---- a/sound/soc/sof/core.c
-+++ b/sound/soc/sof/core.c
-@@ -459,9 +459,10 @@ int snd_sof_device_remove(struct device *dev)
- struct snd_sof_dev *sdev = dev_get_drvdata(dev);
- struct snd_sof_pdata *pdata = sdev->pdata;
- int ret;
-+ bool aborted = false;
-
- if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
-- cancel_work_sync(&sdev->probe_work);
-+ aborted = cancel_work_sync(&sdev->probe_work);
-
- /*
- * Unregister any registered client device first before IPC and debugfs
-@@ -487,6 +488,9 @@ int snd_sof_device_remove(struct device *dev)
- snd_sof_free_debug(sdev);
- snd_sof_remove(sdev);
- sof_ops_free(sdev);
-+ } else if (aborted) {
-+ /* probe_work never ran */
-+ sof_ops_free(sdev);
- }
-
- /* release firmware */
-diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
-index 7cb63e6b24dc9..c9c1d2ec7af25 100644
---- a/sound/soc/sof/ipc4-topology.c
-+++ b/sound/soc/sof/ipc4-topology.c
-@@ -895,7 +895,8 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
- if (process->init_config == SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT) {
- struct sof_ipc4_base_module_cfg_ext *base_cfg_ext;
- u32 ext_size = struct_size(base_cfg_ext, pin_formats,
-- swidget->num_input_pins + swidget->num_output_pins);
-+ size_add(swidget->num_input_pins,
-+ swidget->num_output_pins));
-
- base_cfg_ext = kzalloc(ext_size, GFP_KERNEL);
- if (!base_cfg_ext) {
-diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
-index ab6eddd91bb77..1b09496733fb8 100644
---- a/sound/soc/sof/ipc4.c
-+++ b/sound/soc/sof/ipc4.c
-@@ -614,6 +614,9 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
- case SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS:
- sof_ipc4_mtrace_update_pos(sdev, SOF_IPC4_LOG_CORE_GET(ipc4_msg->primary));
- break;
-+ case SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT:
-+ snd_sof_dsp_panic(sdev, 0, true);
-+ break;
- default:
- dev_dbg(sdev->dev, "Unhandled DSP message: %#x|%#x\n",
- ipc4_msg->primary, ipc4_msg->extension);
-diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
-index e5405f854a910..563fe6f7789f7 100644
---- a/sound/soc/sof/sof-audio.c
-+++ b/sound/soc/sof/sof-audio.c
-@@ -1032,6 +1032,13 @@ int sof_machine_check(struct snd_sof_dev *sdev)
- mach = snd_sof_machine_select(sdev);
- if (mach) {
- sof_pdata->machine = mach;
-+
-+ if (sof_pdata->subsystem_id_set) {
-+ mach->mach_params.subsystem_vendor = sof_pdata->subsystem_vendor;
-+ mach->mach_params.subsystem_device = sof_pdata->subsystem_device;
-+ mach->mach_params.subsystem_id_set = true;
-+ }
-+
- snd_sof_set_mach_params(mach, sdev);
- return 0;
- }
-diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
-index f5ece43d0ec24..69a2352f2e1a0 100644
---- a/sound/soc/sof/sof-pci-dev.c
-+++ b/sound/soc/sof/sof-pci-dev.c
-@@ -145,6 +145,13 @@ static const struct dmi_system_id community_key_platforms[] = {
- DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"),
- }
- },
-+ {
-+ .ident = "Google firmware",
-+ .callback = chromebook_use_community_key,
-+ .matches = {
-+ DMI_MATCH(DMI_BIOS_VERSION, "Google"),
-+ }
-+ },
- {},
- };
-
-@@ -214,6 +221,14 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
- return ret;
-
- sof_pdata->name = pci_name(pci);
-+
-+ /* PCI defines a vendor ID of 0xFFFF as invalid. */
-+ if (pci->subsystem_vendor != 0xFFFF) {
-+ sof_pdata->subsystem_vendor = pci->subsystem_vendor;
-+ sof_pdata->subsystem_device = pci->subsystem_device;
-+ sof_pdata->subsystem_id_set = true;
-+ }
-+
- sof_pdata->desc = desc;
- sof_pdata->dev = dev;
-
-diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
-index 666057d50ea0d..dd3f59bb72faf 100644
---- a/sound/soc/ti/ams-delta.c
-+++ b/sound/soc/ti/ams-delta.c
-@@ -303,7 +303,7 @@ static int cx81801_open(struct tty_struct *tty)
- static void cx81801_close(struct tty_struct *tty)
- {
- struct snd_soc_component *component = tty->disc_data;
-- struct snd_soc_dapm_context *dapm = &component->card->dapm;
-+ struct snd_soc_dapm_context *dapm;
-
- del_timer_sync(&cx81801_timer);
-
-@@ -315,6 +315,8 @@ static void cx81801_close(struct tty_struct *tty)
-
- v253_ops.close(tty);
-
-+ dapm = &component->card->dapm;
-+
- /* Revert back to default audio input/output constellation */
- snd_soc_dapm_mutex_lock(dapm);
-
-diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
-index fdabed5133e83..b399d86f22777 100644
---- a/sound/soc/ti/omap-mcbsp.c
-+++ b/sound/soc/ti/omap-mcbsp.c
-@@ -74,14 +74,16 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
- return 0;
- }
-
-- pm_runtime_put_sync(mcbsp->dev);
-+ if (mcbsp->active)
-+ pm_runtime_put_sync(mcbsp->dev);
-
- r = clk_set_parent(mcbsp->fclk, fck_src);
- if (r)
- dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
- src);
-
-- pm_runtime_get_sync(mcbsp->dev);
-+ if (mcbsp->active)
-+ pm_runtime_get_sync(mcbsp->dev);
-
- clk_put(fck_src);
-
-diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
-index 4e64842245e19..ab2b938502ebe 100644
---- a/sound/usb/quirks.c
-+++ b/sound/usb/quirks.c
-@@ -2220,6 +2220,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
- QUIRK_FLAG_DSD_RAW),
- VENDOR_FLG(0x2ab6, /* T+A devices */
- QUIRK_FLAG_DSD_RAW),
-+ VENDOR_FLG(0x2afd, /* McIntosh Laboratory, Inc. */
-+ QUIRK_FLAG_DSD_RAW),
- VENDOR_FLG(0x2d87, /* Cayin device */
- QUIRK_FLAG_DSD_RAW),
- VENDOR_FLG(0x3336, /* HEM devices */
-diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
-index 87245c584784e..8d94739d75c67 100644
---- a/tools/arch/parisc/include/uapi/asm/errno.h
-+++ b/tools/arch/parisc/include/uapi/asm/errno.h
-@@ -75,7 +75,6 @@
-
- /* We now return you to your regularly scheduled HPUX. */
-
--#define ENOSYM 215 /* symbol does not exist in executable */
- #define ENOTSOCK 216 /* Socket operation on non-socket */
- #define EDESTADDRREQ 217 /* Destination address required */
- #define EMSGSIZE 218 /* Message too long */
-@@ -101,7 +100,6 @@
- #define ETIMEDOUT 238 /* Connection timed out */
- #define ECONNREFUSED 239 /* Connection refused */
- #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
--#define EREMOTERELEASE 240 /* Remote peer released connection */
- #define EHOSTDOWN 241 /* Host is down */
- #define EHOSTUNREACH 242 /* No route to host */
-
-diff --git a/tools/crypto/ccp/dbc.c b/tools/crypto/ccp/dbc.c
-index 37e813175642f..a807df0f05974 100644
---- a/tools/crypto/ccp/dbc.c
-+++ b/tools/crypto/ccp/dbc.c
-@@ -8,6 +8,7 @@
- */
-
- #include <assert.h>
-+#include <errno.h>
- #include <string.h>
- #include <sys/ioctl.h>
-
-@@ -22,16 +23,14 @@ int get_nonce(int fd, void *nonce_out, void *signature)
- struct dbc_user_nonce tmp = {
- .auth_needed = !!signature,
- };
-- int ret;
-
- assert(nonce_out);
-
- if (signature)
- memcpy(tmp.signature, signature, sizeof(tmp.signature));
-
-- ret = ioctl(fd, DBCIOCNONCE, &tmp);
-- if (ret)
-- return ret;
-+ if (ioctl(fd, DBCIOCNONCE, &tmp))
-+ return errno;
- memcpy(nonce_out, tmp.nonce, sizeof(tmp.nonce));
-
- return 0;
-@@ -47,7 +46,9 @@ int set_uid(int fd, __u8 *uid, __u8 *signature)
- memcpy(tmp.uid, uid, sizeof(tmp.uid));
- memcpy(tmp.signature, signature, sizeof(tmp.signature));
-
-- return ioctl(fd, DBCIOCUID, &tmp);
-+ if (ioctl(fd, DBCIOCUID, &tmp))
-+ return errno;
-+ return 0;
- }
-
- int process_param(int fd, int msg_index, __u8 *signature, int *data)
-@@ -63,10 +64,10 @@ int process_param(int fd, int msg_index, __u8 *signature, int *data)
-
- memcpy(tmp.signature, signature, sizeof(tmp.signature));
-
-- ret = ioctl(fd, DBCIOCPARAM, &tmp);
-- if (ret)
-- return ret;
-+ if (ioctl(fd, DBCIOCPARAM, &tmp))
-+ return errno;
-
- *data = tmp.param;
-+ memcpy(signature, tmp.signature, sizeof(tmp.signature));
- return 0;
- }
-diff --git a/tools/crypto/ccp/dbc.py b/tools/crypto/ccp/dbc.py
-index 3f6a825ffc9e4..2b91415b19407 100644
---- a/tools/crypto/ccp/dbc.py
-+++ b/tools/crypto/ccp/dbc.py
-@@ -27,8 +27,7 @@ lib = ctypes.CDLL("./dbc_library.so", mode=ctypes.RTLD_GLOBAL)
-
-
- def handle_error(code):
-- val = code * -1
-- raise OSError(val, os.strerror(val))
-+ raise OSError(code, os.strerror(code))
-
-
- def get_nonce(device, signature):
-@@ -58,7 +57,8 @@ def process_param(device, message, signature, data=None):
- if type(message) != tuple:
- raise ValueError("Expected message tuple")
- arg = ctypes.c_int(data if data else 0)
-- ret = lib.process_param(device.fileno(), message[0], signature, ctypes.pointer(arg))
-+ sig = ctypes.create_string_buffer(signature, len(signature))
-+ ret = lib.process_param(device.fileno(), message[0], ctypes.pointer(sig), ctypes.pointer(arg))
- if ret:
- handle_error(ret)
-- return arg, signature
-+ return arg.value, sig.value
-diff --git a/tools/crypto/ccp/test_dbc.py b/tools/crypto/ccp/test_dbc.py
-index 998bb3e3cd040..79de3638a01ab 100755
---- a/tools/crypto/ccp/test_dbc.py
-+++ b/tools/crypto/ccp/test_dbc.py
-@@ -4,6 +4,12 @@ import unittest
- import os
- import time
- import glob
-+import fcntl
-+try:
-+ import ioctl_opt as ioctl
-+except ImportError:
-+ ioctl = None
-+ pass
- from dbc import *
-
- # Artificial delay between set commands
-@@ -27,8 +33,8 @@ def system_is_secured() -> bool:
- class DynamicBoostControlTest(unittest.TestCase):
- def __init__(self, data) -> None:
- self.d = None
-- self.signature = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
-- self.uid = "1111111111111111"
-+ self.signature = b"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
-+ self.uid = b"1111111111111111"
- super().__init__(data)
-
- def setUp(self) -> None:
-@@ -64,13 +70,16 @@ class TestInvalidIoctls(DynamicBoostControlTest):
- def setUp(self) -> None:
- if not os.path.exists(DEVICE_NODE):
- self.skipTest("system is unsupported")
-+ if not ioctl:
-+ self.skipTest("unable to test IOCTLs without ioctl_opt")
-+
- return super().setUp()
-
- def test_invalid_nonce_ioctl(self) -> None:
- """tries to call get_nonce ioctl with invalid data structures"""
-
- # 0x1 (get nonce), and invalid data
-- INVALID1 = IOWR(ord("D"), 0x01, invalid_param)
-+ INVALID1 = ioctl.IOWR(ord("D"), 0x01, invalid_param)
- with self.assertRaises(OSError) as error:
- fcntl.ioctl(self.d, INVALID1, self.data, True)
- self.assertEqual(error.exception.errno, 22)
-@@ -79,7 +88,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
- """tries to call set_uid ioctl with invalid data structures"""
-
- # 0x2 (set uid), and invalid data
-- INVALID2 = IOW(ord("D"), 0x02, invalid_param)
-+ INVALID2 = ioctl.IOW(ord("D"), 0x02, invalid_param)
- with self.assertRaises(OSError) as error:
- fcntl.ioctl(self.d, INVALID2, self.data, True)
- self.assertEqual(error.exception.errno, 22)
-@@ -88,7 +97,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
- """tries to call set_uid ioctl with invalid data structures"""
-
- # 0x2 as RW (set uid), and invalid data
-- INVALID3 = IOWR(ord("D"), 0x02, invalid_param)
-+ INVALID3 = ioctl.IOWR(ord("D"), 0x02, invalid_param)
- with self.assertRaises(OSError) as error:
- fcntl.ioctl(self.d, INVALID3, self.data, True)
- self.assertEqual(error.exception.errno, 22)
-@@ -96,7 +105,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
- def test_invalid_param_ioctl(self) -> None:
- """tries to call param ioctl with invalid data structures"""
- # 0x3 (param), and invalid data
-- INVALID4 = IOWR(ord("D"), 0x03, invalid_param)
-+ INVALID4 = ioctl.IOWR(ord("D"), 0x03, invalid_param)
- with self.assertRaises(OSError) as error:
- fcntl.ioctl(self.d, INVALID4, self.data, True)
- self.assertEqual(error.exception.errno, 22)
-@@ -104,7 +113,7 @@ class TestInvalidIoctls(DynamicBoostControlTest):
- def test_invalid_call_ioctl(self) -> None:
- """tries to call the DBC ioctl with invalid data structures"""
- # 0x4, and invalid data
-- INVALID5 = IOWR(ord("D"), 0x04, invalid_param)
-+ INVALID5 = ioctl.IOWR(ord("D"), 0x04, invalid_param)
- with self.assertRaises(OSError) as error:
- fcntl.ioctl(self.d, INVALID5, self.data, True)
- self.assertEqual(error.exception.errno, 22)
-@@ -183,12 +192,12 @@ class TestUnFusedSystem(DynamicBoostControlTest):
- # SOC power
- soc_power_max = process_param(self.d, PARAM_GET_SOC_PWR_MAX, self.signature)
- soc_power_min = process_param(self.d, PARAM_GET_SOC_PWR_MIN, self.signature)
-- self.assertGreater(soc_power_max.parameter, soc_power_min.parameter)
-+ self.assertGreater(soc_power_max[0], soc_power_min[0])
-
- # fmax
- fmax_max = process_param(self.d, PARAM_GET_FMAX_MAX, self.signature)
- fmax_min = process_param(self.d, PARAM_GET_FMAX_MIN, self.signature)
-- self.assertGreater(fmax_max.parameter, fmax_min.parameter)
-+ self.assertGreater(fmax_max[0], fmax_min[0])
-
- # cap values
- keys = {
-@@ -199,7 +208,7 @@ class TestUnFusedSystem(DynamicBoostControlTest):
- }
- for k in keys:
- result = process_param(self.d, keys[k], self.signature)
-- self.assertGreater(result.parameter, 0)
-+ self.assertGreater(result[0], 0)
-
- def test_get_invalid_param(self) -> None:
- """fetch an invalid parameter"""
-@@ -217,17 +226,17 @@ class TestUnFusedSystem(DynamicBoostControlTest):
- original = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
-
- # set the fmax
-- target = original.parameter - 100
-+ target = original[0] - 100
- process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, target)
- time.sleep(SET_DELAY)
- new = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
-- self.assertEqual(new.parameter, target)
-+ self.assertEqual(new[0], target)
-
- # revert back to current
-- process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original.parameter)
-+ process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original[0])
- time.sleep(SET_DELAY)
- cur = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
-- self.assertEqual(cur.parameter, original.parameter)
-+ self.assertEqual(cur[0], original[0])
-
- def test_set_power_cap(self) -> None:
- """get/set power cap limit"""
-@@ -235,17 +244,17 @@ class TestUnFusedSystem(DynamicBoostControlTest):
- original = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
-
- # set the fmax
-- target = original.parameter - 10
-+ target = original[0] - 10
- process_param(self.d, PARAM_SET_PWR_CAP, self.signature, target)
- time.sleep(SET_DELAY)
- new = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
-- self.assertEqual(new.parameter, target)
-+ self.assertEqual(new[0], target)
-
- # revert back to current
-- process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original.parameter)
-+ process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original[0])
- time.sleep(SET_DELAY)
- cur = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
-- self.assertEqual(cur.parameter, original.parameter)
-+ self.assertEqual(cur[0], original[0])
-
- def test_set_3d_graphics_mode(self) -> None:
- """set/get 3d graphics mode"""
-diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
-index 264eeb9c46a9f..318e2dad27e04 100644
---- a/tools/hv/hv_kvp_daemon.c
-+++ b/tools/hv/hv_kvp_daemon.c
-@@ -1421,7 +1421,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
- if (error)
- goto setval_error;
-
-- if (new_val->addr_family == ADDR_FAMILY_IPV6) {
-+ if (new_val->addr_family & ADDR_FAMILY_IPV6) {
- error = fprintf(nmfile, "\n[ipv6]\n");
- if (error < 0)
- goto setval_error;
-@@ -1455,14 +1455,18 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
- if (error < 0)
- goto setval_error;
-
-- error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
-- if (error < 0)
-- goto setval_error;
--
-- error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
-- if (error < 0)
-- goto setval_error;
-+ /* we do not want ipv4 addresses in ipv6 section and vice versa */
-+ if (is_ipv6 != is_ipv4((char *)new_val->gate_way)) {
-+ error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
-+ if (error < 0)
-+ goto setval_error;
-+ }
-
-+ if (is_ipv6 != is_ipv4((char *)new_val->dns_addr)) {
-+ error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
-+ if (error < 0)
-+ goto setval_error;
-+ }
- fclose(nmfile);
- fclose(ifcfg_file);
-
-diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
-index ae5a7a8249a20..440a91b35823b 100755
---- a/tools/hv/hv_set_ifconfig.sh
-+++ b/tools/hv/hv_set_ifconfig.sh
-@@ -53,7 +53,7 @@
- # or "manual" if no boot-time protocol should be used)
- #
- # address1=ipaddr1/plen
--# address=ipaddr2/plen
-+# address2=ipaddr2/plen
- #
- # gateway=gateway1;gateway2
- #
-@@ -61,7 +61,7 @@
- #
- # [ipv6]
- # address1=ipaddr1/plen
--# address2=ipaddr1/plen
-+# address2=ipaddr2/plen
- #
- # gateway=gateway1;gateway2
- #
-diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
-index 44bbf80f0cfdd..0d0a7a19d6f95 100644
---- a/tools/iio/iio_generic_buffer.c
-+++ b/tools/iio/iio_generic_buffer.c
-@@ -54,9 +54,12 @@ enum autochan {
- static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
- {
- unsigned int bytes = 0;
-- int i = 0;
-+ int i = 0, max = 0;
-+ unsigned int misalignment;
-
- while (i < num_channels) {
-+ if (channels[i].bytes > max)
-+ max = channels[i].bytes;
- if (bytes % channels[i].bytes == 0)
- channels[i].location = bytes;
- else
-@@ -66,6 +69,14 @@ static unsigned int size_from_channelarray(struct iio_channel_info *channels, in
- bytes = channels[i].location + channels[i].bytes;
- i++;
- }
-+ /*
-+ * We want the data in next sample to also be properly aligned so
-+ * we'll add padding at the end if needed. Adding padding only
-+ * works for channel data which size is 2^n bytes.
-+ */
-+ misalignment = bytes % max;
-+ if (misalignment)
-+ bytes += max - misalignment;
-
- return bytes;
- }
-diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
-index 3c36aeade991e..370ed14b1ae09 100644
---- a/tools/include/uapi/linux/prctl.h
-+++ b/tools/include/uapi/linux/prctl.h
-@@ -283,7 +283,8 @@ struct prctl_mm_map {
-
- /* Memory deny write / execute */
- #define PR_SET_MDWE 65
--# define PR_MDWE_REFUSE_EXEC_GAIN 1
-+# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
-+# define PR_MDWE_NO_INHERIT (1UL << 1)
-
- #define PR_GET_MDWE 66
-
-diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
-index 3803479dbe106..1c13f8e88833b 100644
---- a/tools/lib/bpf/bpf_tracing.h
-+++ b/tools/lib/bpf/bpf_tracing.h
-@@ -362,8 +362,6 @@ struct pt_regs___arm64 {
- #define __PT_PARM7_REG a6
- #define __PT_PARM8_REG a7
-
--/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
--#define PT_REGS_SYSCALL_REGS(ctx) ctx
- #define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
- #define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
- #define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
-diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h
-index d5d771ccdc7b4..e88a6d8a0b0f9 100644
---- a/tools/lib/perf/include/internal/rc_check.h
-+++ b/tools/lib/perf/include/internal/rc_check.h
-@@ -9,8 +9,12 @@
- * Enable reference count checking implicitly with leak checking, which is
- * integrated into address sanitizer.
- */
--#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
-+#if defined(__SANITIZE_ADDRESS__) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
- #define REFCNT_CHECKING 1
-+#elif defined(__has_feature)
-+#if __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
-+#define REFCNT_CHECKING 1
-+#endif
- #endif
-
- /*
-diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
-index c54f7235c5d94..f40febdd6e36a 100644
---- a/tools/objtool/objtool.c
-+++ b/tools/objtool/objtool.c
-@@ -146,7 +146,5 @@ int main(int argc, const char **argv)
- exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
- pager_init(UNUSED);
-
-- objtool_run(argc, argv);
--
-- return 0;
-+ return objtool_run(argc, argv);
- }
-diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
-index 3c36324712b6e..482d6c52e2edf 100644
---- a/tools/perf/Documentation/perf-kwork.txt
-+++ b/tools/perf/Documentation/perf-kwork.txt
-@@ -8,7 +8,7 @@ perf-kwork - Tool to trace/measure kernel work properties (latencies)
- SYNOPSIS
- --------
- [verse]
--'perf kwork' {record}
-+'perf kwork' {record|report|latency|timehist}
-
- DESCRIPTION
- -----------
-diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
-index 37af6df7b978d..86569f230e60d 100644
---- a/tools/perf/Makefile.perf
-+++ b/tools/perf/Makefile.perf
-@@ -69,6 +69,10 @@ include ../scripts/utilities.mak
- # Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
- # for dwarf backtrace post unwind.
- #
-+# Define NO_LIBTRACEEVENT=1 if you don't want libtraceevent to be linked,
-+# this will remove multiple features and tools, such as 'perf trace',
-+# that need it to read tracefs event format files, etc.
-+#
- # Define NO_PERF_READ_VDSO32 if you do not want to build perf-read-vdso32
- # for reading the 32-bit compatibility VDSO in 64-bit mode
- #
-diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
-index 14bf7a8429e76..de2fbb7c56c32 100644
---- a/tools/perf/builtin-kwork.c
-+++ b/tools/perf/builtin-kwork.c
-@@ -406,12 +406,14 @@ static int work_push_atom(struct perf_kwork *kwork,
-
- work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
- if (work == NULL) {
-- free(atom);
-+ atom_free(atom);
- return -1;
- }
-
-- if (!profile_event_match(kwork, work, sample))
-+ if (!profile_event_match(kwork, work, sample)) {
-+ atom_free(atom);
- return 0;
-+ }
-
- if (dst_type < KWORK_TRACE_MAX) {
- dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
-@@ -1692,9 +1694,10 @@ int cmd_kwork(int argc, const char **argv)
- static struct perf_kwork kwork = {
- .class_list = LIST_HEAD_INIT(kwork.class_list),
- .tool = {
-- .mmap = perf_event__process_mmap,
-- .mmap2 = perf_event__process_mmap2,
-- .sample = perf_kwork__process_tracepoint_sample,
-+ .mmap = perf_event__process_mmap,
-+ .mmap2 = perf_event__process_mmap2,
-+ .sample = perf_kwork__process_tracepoint_sample,
-+ .ordered_events = true,
- },
- .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
- .sort_list = LIST_HEAD_INIT(kwork.sort_list),
-diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
-index b141f21342740..0b4b4445c5207 100644
---- a/tools/perf/builtin-lock.c
-+++ b/tools/perf/builtin-lock.c
-@@ -524,6 +524,7 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
- struct map *kmap;
- struct symbol *sym;
- u64 ip;
-+ const char *arch = perf_env__arch(machine->env);
-
- if (list_empty(&callstack_filters))
- return true;
-@@ -531,7 +532,21 @@ bool match_callstack_filter(struct machine *machine, u64 *callstack)
- for (int i = 0; i < max_stack_depth; i++) {
- struct callstack_filter *filter;
-
-- if (!callstack || !callstack[i])
-+ /*
-+ * In powerpc, the callchain saved by kernel always includes
-+ * first three entries as the NIP (next instruction pointer),
-+ * LR (link register), and the contents of LR save area in the
-+ * second stack frame. In certain scenarios its possible to have
-+ * invalid kernel instruction addresses in either LR or the second
-+ * stack frame's LR. In that case, kernel will store that address as
-+ * zero.
-+ *
-+ * The below check will continue to look into callstack,
-+ * incase first or second callstack index entry has 0
-+ * address for powerpc.
-+ */
-+ if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") ||
-+ (i != 1 && i != 2))))
- break;
-
- ip = callstack[i];
-diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
-index 07b48f6df48eb..a3af805a1d572 100644
---- a/tools/perf/builtin-stat.c
-+++ b/tools/perf/builtin-stat.c
-@@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void)
- * taking the highest cpu number to be the size of
- * the aggregation translate cpumap.
- */
-- if (evsel_list->core.user_requested_cpus)
-+ if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
- nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
- else
- nr = 0;
-diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
-index 1e7e8901a4450..e2848a9d48487 100644
---- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
-+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
-@@ -1,362 +1,384 @@
- [
- {
-+ "MetricName": "branch_miss_pred_rate",
- "MetricExpr": "BR_MIS_PRED / BR_PRED",
- "BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch",
-- "MetricGroup": "Branch Prediction",
-- "MetricName": "Misprediction"
-+ "MetricGroup": "branch",
-+ "ScaleUnit": "100%"
- },
- {
-- "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
-- "BriefDescription": "Branch predictor misprediction rate",
-- "MetricGroup": "Branch Prediction",
-- "MetricName": "Misprediction (retired)"
-- },
-- {
-- "MetricExpr": "BUS_ACCESS / ( BUS_CYCLES * 1)",
-+ "MetricName": "bus_utilization",
-+ "MetricExpr": "((BUS_ACCESS / (BUS_CYCLES * 1)) * 100)",
- "BriefDescription": "Core-to-uncore bus utilization",
- "MetricGroup": "Bus",
-- "MetricName": "Bus utilization"
-+ "ScaleUnit": "1percent of bus cycles"
- },
- {
-- "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
-- "BriefDescription": "L1D cache miss rate",
-- "MetricGroup": "Cache",
-- "MetricName": "L1D cache miss"
-+ "MetricName": "l1d_cache_miss_ratio",
-+ "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)",
-+ "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
-+ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
-+ "ScaleUnit": "1per cache access"
-+ },
-+ {
-+ "MetricName": "l1i_cache_miss_ratio",
-+ "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)",
-+ "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
-+ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
-+ "ScaleUnit": "1per cache access"
- },
- {
-+ "MetricName": "Miss_Ratio;l1d_cache_read_miss",
- "MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD",
- "BriefDescription": "L1D cache read miss rate",
- "MetricGroup": "Cache",
-- "MetricName": "L1D cache read miss"
-+ "ScaleUnit": "1per cache read access"
- },
- {
-- "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
-- "BriefDescription": "L1I cache miss rate",
-- "MetricGroup": "Cache",
-- "MetricName": "L1I cache miss"
-- },
-- {
-- "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
-- "BriefDescription": "L2 cache miss rate",
-- "MetricGroup": "Cache",
-- "MetricName": "L2 cache miss"
-+ "MetricName": "l2_cache_miss_ratio",
-+ "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)",
-+ "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
-+ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
-+ "ScaleUnit": "1per cache access"
- },
- {
-+ "MetricName": "l1i_cache_read_miss_rate",
- "MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE",
- "BriefDescription": "L1I cache read miss rate",
- "MetricGroup": "Cache",
-- "MetricName": "L1I cache read miss"
-+ "ScaleUnit": "1per cache access"
- },
- {
-+ "MetricName": "l2d_cache_read_miss_rate",
- "MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD",
- "BriefDescription": "L2 cache read miss rate",
- "MetricGroup": "Cache",
-- "MetricName": "L2 cache read miss"
-+ "ScaleUnit": "1per cache read access"
- },
- {
-- "MetricExpr": "(L1D_CACHE_LMISS_RD * 1000) / INST_RETIRED",
-+ "MetricName": "l1d_cache_miss_mpki",
-+ "MetricExpr": "(L1D_CACHE_LMISS_RD * 1e3) / INST_RETIRED",
- "BriefDescription": "Misses per thousand instructions (data)",
- "MetricGroup": "Cache",
-- "MetricName": "MPKI data"
-+ "ScaleUnit": "1MPKI"
- },
- {
-- "MetricExpr": "(L1I_CACHE_LMISS * 1000) / INST_RETIRED",
-+ "MetricName": "l1i_cache_miss_mpki",
-+ "MetricExpr": "(L1I_CACHE_LMISS * 1e3) / INST_RETIRED",
- "BriefDescription": "Misses per thousand instructions (instruction)",
- "MetricGroup": "Cache",
-- "MetricName": "MPKI instruction"
-+ "ScaleUnit": "1MPKI"
- },
- {
-- "MetricExpr": "ASE_SPEC / OP_SPEC",
-- "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) operations",
-- "MetricGroup": "Instruction",
-- "MetricName": "ASE mix"
-+ "MetricName": "simd_percentage",
-+ "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)",
-+ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
-+ "MetricGroup": "Operation_Mix",
-+ "ScaleUnit": "1percent of operations"
- },
- {
-- "MetricExpr": "CRYPTO_SPEC / OP_SPEC",
-- "BriefDescription": "Proportion of crypto data processing operations",
-- "MetricGroup": "Instruction",
-- "MetricName": "Crypto mix"
-+ "MetricName": "crypto_percentage",
-+ "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)",
-+ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
-+ "MetricGroup": "Operation_Mix",
-+ "ScaleUnit": "1percent of operations"
- },
- {
-- "MetricExpr": "VFP_SPEC / (duration_time *1000000000)",
-+ "MetricName": "gflops",
-+ "MetricExpr": "VFP_SPEC / (duration_time * 1e9)",
- "BriefDescription": "Giga-floating point operations per second",
-- "MetricGroup": "Instruction",
-- "MetricName": "GFLOPS_ISSUED"
-+ "MetricGroup": "InstructionMix"
- },
- {
-- "MetricExpr": "DP_SPEC / OP_SPEC",
-- "BriefDescription": "Proportion of integer data processing operations",
-- "MetricGroup": "Instruction",
-- "MetricName": "Integer mix"
-+ "MetricName": "integer_dp_percentage",
-+ "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)",
-+ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
-+ "MetricGroup": "Operation_Mix",
-+ "ScaleUnit": "1percent of operations"
- },
- {
-- "MetricExpr": "INST_RETIRED / CPU_CYCLES",
-- "BriefDescription": "Instructions per cycle",
-- "MetricGroup": "Instruction",
-- "MetricName": "IPC"
-+ "MetricName": "ipc",
-+ "MetricExpr": "(INST_RETIRED / CPU_CYCLES)",
-+ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
-+ "MetricGroup": "General",
-+ "ScaleUnit": "1per cycle"
- },
- {
-- "MetricExpr": "LD_SPEC / OP_SPEC",
-- "BriefDescription": "Proportion of load operations",
-- "MetricGroup": "Instruction",
-- "MetricName": "Load mix"
-+ "MetricName": "load_percentage",
-+ "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)",
-+ "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
-+ "MetricGroup": "Operation_Mix",
-+ "ScaleUnit": "1percent of operations"
- },
- {
-- "MetricExpr": "LDST_SPEC/ OP_SPEC",
-- "BriefDescription": "Proportion of load & store operations",
-- "MetricGroup": "Instruction",
-- "MetricName": "Load-store mix"
-+ "MetricName": "load_store_spec_rate",
-+ "MetricExpr": "((LDST_SPEC / INST_SPEC) * 100)",
-+ "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
-+ "MetricGroup": "Operation_Mix",
-+ "ScaleUnit": "1percent of operations"
- },
- {
-- "MetricExpr": "INST_RETIRED / (duration_time * 1000000)",
-+ "MetricName": "retired_mips",
-+ "MetricExpr": "INST_RETIRED / (duration_time * 1e6)",
- "BriefDescription": "Millions of instructions per second",
-- "MetricGroup": "Instruction",
-- "MetricName": "MIPS_RETIRED"
-+ "MetricGroup": "InstructionMix"
- },
- {
-- "MetricExpr": "INST_SPEC / (duration_time * 1000000)",
-+ "MetricName": "spec_utilization_mips",
-+ "MetricExpr": "INST_SPEC / (duration_time * 1e6)",
- "BriefDescription": "Millions of instructions per second",
-- "MetricGroup": "Instruction",
-- "MetricName": "MIPS_UTILIZATION"
-- },
-- {
-- "MetricExpr": "PC_WRITE_SPEC / OP_SPEC",
-- "BriefDescription": "Proportion of software change of PC operations",
-- "MetricGroup": "Instruction",
-- "MetricName": "PC write mix"
-+ "MetricGroup": "PEutilization"
- },
- {
-- "MetricExpr": "ST_SPEC / OP_SPEC",
-- "BriefDescription": "Proportion of store operations",
-- "MetricGroup": "Instruction",
-- "MetricName": "Store mix"
-+ "MetricName": "pc_write_spec_rate",
-+ "MetricExpr": "((PC_WRITE_SPEC / INST_SPEC) * 100)",
-+ "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
-+ "MetricGroup": "Operation_Mix",
-+ "ScaleUnit": "1percent of operations"
- },
- {
-- "MetricExpr": "VFP_SPEC / OP_SPEC",
-- "BriefDescription": "Proportion of FP operations",
-- "MetricGroup": "Instruction",
-- "MetricName": "VFP mix"
-+ "MetricName": "store_percentage",
-+ "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)",
-+ "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
-+ "MetricGroup": "Operation_Mix",
-+ "ScaleUnit": "1percent of operations"
- },
- {
-- "MetricExpr": "1 - (OP_RETIRED/ (CPU_CYCLES * 4))",
-- "BriefDescription": "Proportion of slots lost",
-- "MetricGroup": "Speculation / TDA",
-- "MetricName": "CPU lost"
-+ "MetricName": "scalar_fp_percentage",
-+ "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)",
-+ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
-+ "MetricGroup": "Operation_Mix",
-+ "ScaleUnit": "1percent of operations"
- },
- {
-- "MetricExpr": "OP_RETIRED/ (CPU_CYCLES * 4)",
-- "BriefDescription": "Proportion of slots retiring",
-- "MetricGroup": "Speculation / TDA",
-- "MetricName": "CPU utilization"
-+ "MetricName": "retired_rate",
-+ "MetricExpr": "OP_RETIRED / OP_SPEC",
-+ "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
-+ "MetricGroup": "General",
-+ "ScaleUnit": "100%"
- },
- {
-- "MetricExpr": "OP_RETIRED - OP_SPEC",
-- "BriefDescription": "Operations lost due to misspeculation",
-- "MetricGroup": "Speculation / TDA",
-- "MetricName": "Operations lost"
-+ "MetricName": "wasted",
-+ "MetricExpr": "1 - (OP_RETIRED / (CPU_CYCLES * #slots))",
-+ "BriefDescription": "Of all the micro-operations issued, what proportion are lost",
-+ "MetricGroup": "General",
-+ "ScaleUnit": "100%"
- },
- {
-- "MetricExpr": "1 - (OP_RETIRED / OP_SPEC)",
-- "BriefDescription": "Proportion of operations lost",
-- "MetricGroup": "Speculation / TDA",
-- "MetricName": "Operations lost (ratio)"
-+ "MetricName": "wasted_rate",
-+ "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
-+ "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
-+ "MetricGroup": "General",
-+ "ScaleUnit": "100%"
- },
- {
-- "MetricExpr": "OP_RETIRED / OP_SPEC",
-- "BriefDescription": "Proportion of operations retired",
-- "MetricGroup": "Speculation / TDA",
-- "MetricName": "Operations retired"
-- },
-- {
-- "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
-+ "MetricName": "stall_backend_cache_rate",
-+ "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)",
- "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss",
- "MetricGroup": "Stall",
-- "MetricName": "Stall backend cache cycles"
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
-+ "MetricName": "stall_backend_resource_rate",
-+ "MetricExpr": "((STALL_BACKEND_RESOURCE / CPU_CYCLES) * 100)",
- "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full",
- "MetricGroup": "Stall",
-- "MetricName": "Stall backend resource cycles"
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
-+ "MetricName": "stall_backend_tlb_rate",
-+ "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)",
- "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss",
- "MetricGroup": "Stall",
-- "MetricName": "Stall backend tlb cycles"
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
-+ "MetricName": "stall_frontend_cache_rate",
-+ "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
- "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
- "MetricGroup": "Stall",
-- "MetricName": "Stall frontend cache cycles"
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
-+ "MetricName": "stall_frontend_tlb_rate",
-+ "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
- "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
- "MetricGroup": "Stall",
-- "MetricName": "Stall frontend tlb cycles"
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "DTLB_WALK / L1D_TLB",
-- "BriefDescription": "D-side walk per d-side translation request",
-- "MetricGroup": "TLB",
-- "MetricName": "DTLB walks"
-+ "MetricName": "dtlb_walk_ratio",
-+ "MetricExpr": "(DTLB_WALK / L1D_TLB)",
-+ "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
-+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
-+ "ScaleUnit": "1per TLB access"
- },
- {
-- "MetricExpr": "ITLB_WALK / L1I_TLB",
-- "BriefDescription": "I-side walk per i-side translation request",
-- "MetricGroup": "TLB",
-- "MetricName": "ITLB walks"
-+ "MetricName": "itlb_walk_ratio",
-+ "MetricExpr": "(ITLB_WALK / L1I_TLB)",
-+ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
-+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
-+ "ScaleUnit": "1per TLB access"
- },
- {
-- "MetricExpr": "STALL_SLOT_BACKEND / (CPU_CYCLES * 4)",
-- "BriefDescription": "Fraction of slots backend bound",
-- "MetricGroup": "TopDownL1",
-- "MetricName": "backend"
-+ "ArchStdEvent": "backend_bound"
- },
- {
-- "MetricExpr": "1 - (retiring + lost + backend)",
-- "BriefDescription": "Fraction of slots frontend bound",
-- "MetricGroup": "TopDownL1",
-- "MetricName": "frontend"
-+ "ArchStdEvent": "frontend_bound",
-+ "MetricExpr": "100 - (retired_fraction + slots_lost_misspeculation_fraction + backend_bound)"
- },
- {
-- "MetricExpr": "((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * 4))",
-+ "MetricName": "slots_lost_misspeculation_fraction",
-+ "MetricExpr": "100 * ((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots))",
- "BriefDescription": "Fraction of slots lost due to misspeculation",
-- "MetricGroup": "TopDownL1",
-- "MetricName": "lost"
-+ "MetricGroup": "Default;TopdownL1",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "(OP_RETIRED / (CPU_CYCLES * 4))",
-+ "MetricName": "retired_fraction",
-+ "MetricExpr": "100 * (OP_RETIRED / (CPU_CYCLES * #slots))",
- "BriefDescription": "Fraction of slots retiring, useful work",
-- "MetricGroup": "TopDownL1",
-- "MetricName": "retiring"
-+ "MetricGroup": "Default;TopdownL1",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "backend - backend_memory",
-+ "MetricName": "backend_core",
-+ "MetricExpr": "(backend_bound / 100) - backend_memory",
- "BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues",
-- "MetricGroup": "TopDownL2",
-- "MetricName": "backend_core"
-+ "MetricGroup": "TopdownL2",
-+ "ScaleUnit": "100%"
- },
- {
-- "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE + STALL_BACKEND_MEM) / CPU_CYCLES ",
-+ "MetricName": "backend_memory",
-+ "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE) / CPU_CYCLES",
- "BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)",
-- "MetricGroup": "TopDownL2",
-- "MetricName": "backend_memory"
-+ "MetricGroup": "TopdownL2",
-+ "ScaleUnit": "100%"
- },
- {
-- "MetricExpr": " (BR_MIS_PRED_RETIRED / GPC_FLUSH) * lost",
-+ "MetricName": "branch_mispredict",
-+ "MetricExpr": "(BR_MIS_PRED_RETIRED / GPC_FLUSH) * slots_lost_misspeculation_fraction",
- "BriefDescription": "Fraction of slots lost due to branch misprediciton",
-- "MetricGroup": "TopDownL2",
-- "MetricName": "branch_mispredict"
-+ "MetricGroup": "TopdownL2",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "frontend - frontend_latency",
-+ "MetricName": "frontend_bandwidth",
-+ "MetricExpr": "frontend_bound - frontend_latency",
- "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)",
-- "MetricGroup": "TopDownL2",
-- "MetricName": "frontend_bandwidth"
-+ "MetricGroup": "TopdownL2",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - (frontend * CPU_CYCLES * 4)) / 4)) / CPU_CYCLES",
-+ "MetricName": "frontend_latency",
-+ "MetricExpr": "((STALL_FRONTEND - ((STALL_SLOT_FRONTEND - ((frontend_bound / 100) * CPU_CYCLES * #slots)) / #slots)) / CPU_CYCLES) * 100",
- "BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch",
-- "MetricGroup": "TopDownL2",
-- "MetricName": "frontend_latency"
-+ "MetricGroup": "TopdownL2",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "lost - branch_mispredict",
-+ "MetricName": "other_miss_pred",
-+ "MetricExpr": "slots_lost_misspeculation_fraction - branch_mispredict",
- "BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation",
-- "MetricGroup": "TopDownL2",
-- "MetricName": "other_clears"
-+ "MetricGroup": "TopdownL2",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "(IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6)",
-+ "MetricName": "pipe_utilization",
-+ "MetricExpr": "100 * ((IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6))",
- "BriefDescription": "Fraction of execute slots utilized",
-- "MetricGroup": "TopDownL2",
-- "MetricName": "pipe_utilization"
-+ "MetricGroup": "TopdownL2",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES",
-+ "MetricName": "d_cache_l2_miss_rate",
-+ "MetricExpr": "((STALL_BACKEND_MEM / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "d_cache_l2_miss"
-+ "MetricGroup": "TopdownL3",
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
-+ "MetricName": "d_cache_miss_rate",
-+ "MetricExpr": "((STALL_BACKEND_CACHE / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "d_cache_miss"
-+ "MetricGroup": "TopdownL3",
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
-+ "MetricName": "d_tlb_miss_rate",
-+ "MetricExpr": "((STALL_BACKEND_TLB / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "d_tlb_miss"
-+ "MetricGroup": "TopdownL3",
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)",
-+ "MetricName": "fsu_pipe_utilization",
-+ "MetricExpr": "((FSU_ISSUED / (CPU_CYCLES * 2)) * 100)",
- "BriefDescription": "Fraction of FSU execute slots utilized",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "fsu_pipe_utilization"
-+ "MetricGroup": "TopdownL3",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
-+ "MetricName": "i_cache_miss_rate",
-+ "MetricExpr": "((STALL_FRONTEND_CACHE / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "i_cache_miss"
-+ "MetricGroup": "TopdownL3",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": " STALL_FRONTEND_TLB / CPU_CYCLES ",
-+ "MetricName": "i_tlb_miss_rate",
-+ "MetricExpr": "((STALL_FRONTEND_TLB / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "i_tlb_miss"
-+ "MetricGroup": "TopdownL3",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES / 4)",
-+ "MetricName": "ixu_pipe_utilization",
-+ "MetricExpr": "((IXU_NUM_UOPS_ISSUED / (CPU_CYCLES * #slots)) * 100)",
- "BriefDescription": "Fraction of IXU execute slots utilized",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "ixu_pipe_utilization"
-+ "MetricGroup": "TopdownL3",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES",
-+ "MetricName": "stall_recovery_rate",
-+ "MetricExpr": "((IDR_STALL_FLUSH / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "recovery"
-- },
-- {
-- "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
-- "BriefDescription": "Fraction of cycles the CPU was stalled due to core resource shortage",
-- "MetricGroup": "TopDownL3",
-- "MetricName": "resource"
-+ "MetricGroup": "TopdownL3",
-+ "ScaleUnit": "1percent of slots"
- },
- {
-- "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES ",
-+ "MetricName": "stall_fsu_sched_rate",
-+ "MetricExpr": "((IDR_STALL_FSU_SCHED / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full",
-- "MetricGroup": "TopDownL4",
-- "MetricName": "stall_fsu_sched"
-+ "MetricGroup": "TopdownL4",
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES ",
-+ "MetricName": "stall_ixu_sched_rate",
-+ "MetricExpr": "((IDR_STALL_IXU_SCHED / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full",
-- "MetricGroup": "TopDownL4",
-- "MetricName": "stall_ixu_sched"
-+ "MetricGroup": "TopdownL4",
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES ",
-+ "MetricName": "stall_lob_id_rate",
-+ "MetricExpr": "((IDR_STALL_LOB_ID / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full",
-- "MetricGroup": "TopDownL4",
-- "MetricName": "stall_lob_id"
-+ "MetricGroup": "TopdownL4",
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES",
-+ "MetricName": "stall_rob_id_rate",
-+ "MetricExpr": "((IDR_STALL_ROB_ID / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full",
-- "MetricGroup": "TopDownL4",
-- "MetricName": "stall_rob_id"
-+ "MetricGroup": "TopdownL4",
-+ "ScaleUnit": "1percent of cycles"
- },
- {
-- "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES ",
-+ "MetricName": "stall_sob_id_rate",
-+ "MetricExpr": "((IDR_STALL_SOB_ID / CPU_CYCLES) * 100)",
- "BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full",
-- "MetricGroup": "TopDownL4",
-- "MetricName": "stall_sob_id"
-+ "MetricGroup": "TopdownL4",
-+ "ScaleUnit": "1percent of cycles"
- }
- ]
-diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
-index c606ae03cd27d..0e0253d0e7577 100644
---- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
-+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
-@@ -195,7 +195,7 @@
- "BriefDescription": "Threshold counter exceeded a value of 128."
- },
- {
-- "EventCode": "0x400FA",
-+ "EventCode": "0x500FA",
- "EventName": "PM_RUN_INST_CMPL",
- "BriefDescription": "PowerPC instruction completed while the run latch is set."
- }
-diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
-index 8fc62b8f667d8..e1f55fcfa0d02 100644
---- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
-+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
-@@ -48,6 +48,12 @@
- "MetricName": "C7_Pkg_Residency",
- "ScaleUnit": "100%"
- },
-+ {
-+ "BriefDescription": "Uncore frequency per die [GHZ]",
-+ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
-+ "MetricGroup": "SoC",
-+ "MetricName": "UNCORE_FREQ"
-+ },
- {
- "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
- "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
-@@ -652,7 +658,7 @@
- },
- {
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
-- "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3",
-+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
- "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
- "MetricName": "tma_info_system_dram_bw_use",
- "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
-@@ -690,6 +696,12 @@
- "MetricGroup": "SMT",
- "MetricName": "tma_info_system_smt_2t_utilization"
- },
-+ {
-+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
-+ "MetricExpr": "cbox_0@event\\=0x0@",
-+ "MetricGroup": "SoC",
-+ "MetricName": "tma_info_system_socket_clks"
-+ },
- {
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "tma_info_thread_clks / CPU_CLK_UNHALTED.REF_TSC",
-diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
-index 01f70b8e705a8..21f4d9ba023d9 100644
---- a/tools/perf/util/bpf_off_cpu.c
-+++ b/tools/perf/util/bpf_off_cpu.c
-@@ -98,7 +98,7 @@ static void off_cpu_finish(void *arg __maybe_unused)
- /* v5.18 kernel added prev_state arg, so it needs to check the signature */
- static void check_sched_switch_args(void)
- {
-- const struct btf *btf = bpf_object__btf(skel->obj);
-+ const struct btf *btf = btf__load_vmlinux_btf();
- const struct btf_type *t1, *t2, *t3;
- u32 type_id;
-
-@@ -116,7 +116,8 @@ static void check_sched_switch_args(void)
- return;
-
- t3 = btf__type_by_id(btf, t2->type);
-- if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
-+ /* btf_trace func proto has one more argument for the context */
-+ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
- /* new format: pass prev_state as 4th arg */
- skel->rodata->has_prev_state = true;
- }
-diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
-index 939ec769bf4a5..52c270330ae0d 100644
---- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
-+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
-@@ -153,7 +153,7 @@ static inline
- unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
- {
- unsigned int augmented_len = sizeof(*augmented_arg);
-- int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
-+ int string_len = bpf_probe_read_user_str(&augmented_arg->value, arg_len, arg);
-
- augmented_arg->size = augmented_arg->err = 0;
- /*
-@@ -203,7 +203,7 @@ int sys_enter_connect(struct syscall_enter_args *args)
- _Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two");
- socklen &= sizeof(augmented_args->saddr) - 1;
-
-- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
-+ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
-
- return augmented__output(args, augmented_args, len + socklen);
- }
-@@ -221,7 +221,7 @@ int sys_enter_sendto(struct syscall_enter_args *args)
-
- socklen &= sizeof(augmented_args->saddr) - 1;
-
-- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
-+ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
-
- return augmented__output(args, augmented_args, len + socklen);
- }
-@@ -311,7 +311,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
- if (augmented_args == NULL)
- goto failure;
-
-- if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
-+ if (bpf_probe_read_user(&augmented_args->__data, sizeof(*attr), attr) < 0)
- goto failure;
-
- attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
-@@ -325,7 +325,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
- goto failure;
-
- // Now that we read attr->size and tested it against the size limits, read it completely
-- if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
-+ if (bpf_probe_read_user(&augmented_args->__data, size, attr) < 0)
- goto failure;
-
- return augmented__output(args, augmented_args, len + size);
-@@ -347,7 +347,7 @@ int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
- if (size > sizeof(augmented_args->__data))
- goto failure;
-
-- bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
-+ bpf_probe_read_user(&augmented_args->__data, size, rqtp_arg);
-
- return augmented__output(args, augmented_args, len + size);
- failure:
-@@ -385,7 +385,7 @@ int sys_enter(struct syscall_enter_args *args)
- if (augmented_args == NULL)
- return 1;
-
-- bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
-+ bpf_probe_read_kernel(&augmented_args->args, sizeof(augmented_args->args), args);
-
- /*
- * Jump to syscall specific augmenter, even if the default one,
-@@ -406,7 +406,7 @@ int sys_exit(struct syscall_exit_args *args)
- if (pid_filter__has(&pids_filtered, getpid()))
- return 0;
-
-- bpf_probe_read(&exit_args, sizeof(exit_args), args);
-+ bpf_probe_read_kernel(&exit_args, sizeof(exit_args), args);
- /*
- * Jump to syscall specific return augmenter, even if the default one,
- * "!raw_syscalls:unaugmented" that will just return 1 to return the
-diff --git a/tools/perf/util/bpf_skel/vmlinux/.gitignore b/tools/perf/util/bpf_skel/vmlinux/.gitignore
-new file mode 100644
-index 0000000000000..49502c04183a2
---- /dev/null
-+++ b/tools/perf/util/bpf_skel/vmlinux/.gitignore
-@@ -0,0 +1 @@
-+!vmlinux.h
-diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
-index 7ef43f72098e0..c779b9f2e6220 100644
---- a/tools/perf/util/evlist.c
-+++ b/tools/perf/util/evlist.c
-@@ -251,6 +251,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
- .type = PERF_TYPE_SOFTWARE,
- .config = PERF_COUNT_SW_DUMMY,
- .size = sizeof(attr), /* to capture ABI version */
-+ /* Avoid frequency mode for dummy events to avoid associated timers. */
-+ .freq = 0,
-+ .sample_period = 1,
- };
-
- return evsel__new_idx(&attr, evlist->core.nr_entries);
-@@ -277,8 +280,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
- evsel->core.attr.exclude_kernel = 1;
- evsel->core.attr.exclude_guest = 1;
- evsel->core.attr.exclude_hv = 1;
-- evsel->core.attr.freq = 0;
-- evsel->core.attr.sample_period = 1;
- evsel->core.system_wide = system_wide;
- evsel->no_aux_samples = true;
- evsel->name = strdup("dummy:u");
-diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
-index 3dc8a4968beb9..ac8c0ef48a7f3 100644
---- a/tools/perf/util/hist.c
-+++ b/tools/perf/util/hist.c
-@@ -2676,8 +2676,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
-
- /* If we have branch cycles always annotate them. */
- if (bs && bs->nr && entries[0].flags.cycles) {
-- int i;
--
- bi = sample__resolve_bstack(sample, al);
- if (bi) {
- struct addr_map_symbol *prev = NULL;
-@@ -2692,7 +2690,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
- * Note that perf stores branches reversed from
- * program order!
- */
-- for (i = bs->nr - 1; i >= 0; i--) {
-+ for (int i = bs->nr - 1; i >= 0; i--) {
- addr_map_symbol__account_cycles(&bi[i].from,
- nonany_branch_mode ? NULL : prev,
- bi[i].flags.cycles);
-@@ -2701,6 +2699,12 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
- if (total_cycles)
- *total_cycles += bi[i].flags.cycles;
- }
-+ for (unsigned int i = 0; i < bs->nr; i++) {
-+ map__put(bi[i].to.ms.map);
-+ maps__put(bi[i].to.ms.maps);
-+ map__put(bi[i].from.ms.map);
-+ maps__put(bi[i].from.ms.maps);
-+ }
- free(bi);
- }
- }
-diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
-index dbf0bc71a63be..f38893e0b0369 100644
---- a/tools/perf/util/intel-pt.c
-+++ b/tools/perf/util/intel-pt.c
-@@ -1512,9 +1512,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
- } else if (ptq->state->flags & INTEL_PT_ASYNC) {
- if (!ptq->state->to_ip)
- ptq->flags = PERF_IP_FLAG_BRANCH |
-+ PERF_IP_FLAG_ASYNC |
- PERF_IP_FLAG_TRACE_END;
- else if (ptq->state->from_nr && !ptq->state->to_nr)
- ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
-+ PERF_IP_FLAG_ASYNC |
- PERF_IP_FLAG_VMEXIT;
- else
- ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
-diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
-index 88f31b3a63acb..e6a8d758f6fe4 100644
---- a/tools/perf/util/machine.c
-+++ b/tools/perf/util/machine.c
-@@ -2624,16 +2624,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
- save_lbr_cursor_node(thread, cursor, i);
- }
-
-- /* Add LBR ip from first entries.to */
-- ip = entries[0].to;
-- flags = &entries[0].flags;
-- *branch_from = entries[0].from;
-- err = add_callchain_ip(thread, cursor, parent,
-- root_al, &cpumode, ip,
-- true, flags, NULL,
-- *branch_from);
-- if (err)
-- return err;
-+ if (lbr_nr > 0) {
-+ /* Add LBR ip from first entries.to */
-+ ip = entries[0].to;
-+ flags = &entries[0].flags;
-+ *branch_from = entries[0].from;
-+ err = add_callchain_ip(thread, cursor, parent,
-+ root_al, &cpumode, ip,
-+ true, flags, NULL,
-+ *branch_from);
-+ if (err)
-+ return err;
-+ }
-
- return 0;
- }
-diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
-index 39ffe8ceb3809..954b235e12e51 100644
---- a/tools/perf/util/mem-events.c
-+++ b/tools/perf/util/mem-events.c
-@@ -185,7 +185,6 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
- {
- int i = *argv_nr, k = 0;
- struct perf_mem_event *e;
-- struct perf_pmu *pmu;
-
- for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- e = perf_mem_events__ptr(j);
-@@ -202,6 +201,8 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
- rec_argv[i++] = "-e";
- rec_argv[i++] = perf_mem_events__name(j, NULL);
- } else {
-+ struct perf_pmu *pmu = NULL;
-+
- if (!e->supported) {
- perf_mem_events__print_unsupport_hybrid(e, j);
- return -1;
-diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
-index 21bfe7e0d9444..c3a86ef4b7cf3 100644
---- a/tools/perf/util/parse-events.y
-+++ b/tools/perf/util/parse-events.y
-@@ -79,7 +79,7 @@ static void free_list_evsel(struct list_head* list_evsel)
- %type <str> PE_MODIFIER_BP
- %type <str> PE_EVENT_NAME
- %type <str> PE_DRV_CFG_TERM
--%type <str> name_or_raw name_or_legacy
-+%type <str> name_or_raw
- %destructor { free ($$); } <str>
- %type <term> event_term
- %destructor { parse_events_term__delete ($$); } <term>
-@@ -104,6 +104,7 @@ static void free_list_evsel(struct list_head* list_evsel)
- %type <list_evsel> groups
- %destructor { free_list_evsel ($$); } <list_evsel>
- %type <tracepoint_name> tracepoint_name
-+%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
- %type <hardware_term> PE_TERM_HW
- %destructor { free ($$.str); } <hardware_term>
-
-@@ -679,8 +680,6 @@ event_term
-
- name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE
-
--name_or_legacy: PE_NAME | PE_LEGACY_CACHE
--
- event_term:
- PE_RAW
- {
-@@ -695,7 +694,7 @@ PE_RAW
- $$ = term;
- }
- |
--name_or_raw '=' name_or_legacy
-+name_or_raw '=' name_or_raw
- {
- struct parse_events_term *term;
- int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3);
-@@ -775,7 +774,7 @@ PE_TERM_HW
- $$ = term;
- }
- |
--PE_TERM '=' name_or_legacy
-+PE_TERM '=' name_or_raw
- {
- struct parse_events_term *term;
- int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
-diff --git a/tools/power/cpupower/man/cpupower-powercap-info.1 b/tools/power/cpupower/man/cpupower-powercap-info.1
-index df3087000efb8..145d6f06fa72d 100644
---- a/tools/power/cpupower/man/cpupower-powercap-info.1
-+++ b/tools/power/cpupower/man/cpupower-powercap-info.1
-@@ -17,7 +17,7 @@ settings of all cores, see cpupower(1) how to choose specific cores.
- .SH "DOCUMENTATION"
-
- kernel sources:
--Documentation/power/powercap/powercap.txt
-+Documentation/power/powercap/powercap.rst
-
-
- .SH "SEE ALSO"
-diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
-index 4a356a7067855..40ad221e88811 100755
---- a/tools/power/pm-graph/sleepgraph.py
-+++ b/tools/power/pm-graph/sleepgraph.py
-@@ -4151,7 +4151,7 @@ def parseKernelLog(data):
- elif(re.match('Enabling non-boot CPUs .*', msg)):
- # start of first cpu resume
- cpu_start = ktime
-- elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)) \
-+ elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) \
- or re.match('psci: CPU(?P<cpu>[0-9]*) killed.*', msg)):
- # end of a cpu suspend, start of the next
- m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
-diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
-index 9a10512e34078..785de89077de0 100644
---- a/tools/power/x86/turbostat/turbostat.c
-+++ b/tools/power/x86/turbostat/turbostat.c
-@@ -2180,7 +2180,7 @@ retry:
- if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
- if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
- return -7;
-- } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
-+ } else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) {
- if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
- return -7;
- }
-@@ -5790,6 +5790,7 @@ void process_cpuid()
- rapl_probe(family, model);
- perf_limit_reasons_probe(family, model);
- automatic_cstate_conversion_probe(family, model);
-+ prewake_cstate_probe(family, model);
-
- check_tcc_offset(model_orig);
-
-diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
-index fb6ab9cef84f7..b885462999022 100644
---- a/tools/testing/cxl/test/cxl.c
-+++ b/tools/testing/cxl/test/cxl.c
-@@ -831,7 +831,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
- cxld->interleave_ways = 2;
- else
- cxld->interleave_ways = 1;
-- cxld->interleave_granularity = 256;
-+ cxld->interleave_granularity = 4096;
- cxld->hpa_range = (struct range) {
- .start = base,
- .end = base + size - 1,
-diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
-index 464fc39ed2776..68118c37f0b56 100644
---- a/tools/testing/cxl/test/mem.c
-+++ b/tools/testing/cxl/test/mem.c
-@@ -1450,11 +1450,11 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
- mdata->mes.mds = mds;
- cxl_mock_add_event_logs(&mdata->mes);
-
-- cxlmd = devm_cxl_add_memdev(cxlds);
-+ cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
- if (IS_ERR(cxlmd))
- return PTR_ERR(cxlmd);
-
-- rc = cxl_memdev_setup_fw_upload(mds);
-+ rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
- if (rc)
- return rc;
-
-diff --git a/tools/testing/selftests/arm64/fp/za-fork.c b/tools/testing/selftests/arm64/fp/za-fork.c
-index b86cb1049497f..587b946482226 100644
---- a/tools/testing/selftests/arm64/fp/za-fork.c
-+++ b/tools/testing/selftests/arm64/fp/za-fork.c
-@@ -85,7 +85,7 @@ int main(int argc, char **argv)
- */
- ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
- if (ret >= 0) {
-- ksft_test_result(fork_test(), "fork_test");
-+ ksft_test_result(fork_test(), "fork_test\n");
-
- } else {
- ksft_print_msg("SME not supported\n");
-diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
-index 18cf7b17463d9..98dde091d2825 100644
---- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
-+++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
-@@ -94,14 +94,8 @@ static struct {
- { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
- { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
- { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
-- { "pop_front_off",
-- "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
-- "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
-- "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
-- { "pop_back_off",
-- "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
-- "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
-- "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
-+ { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
-+ { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
- };
-
- static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
-diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
-index c7636e18b1ebd..aa9f67eb1c95b 100644
---- a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
-+++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c
-@@ -61,6 +61,11 @@ void test_module_fentry_shadow(void)
- int link_fd[2] = {};
- __s32 btf_id[2] = {};
-
-+ if (!env.has_testmod) {
-+ test__skip();
-+ return;
-+ }
-+
- LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
- .expected_attach_type = BPF_TRACE_FENTRY,
- );
-diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
-index 58fe2c586ed76..09c189761926c 100644
---- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
-+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
-@@ -271,11 +271,11 @@ static void test_tailcall_count(const char *which)
-
- data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
- if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
-- return;
-+ goto out;
-
- data_fd = bpf_map__fd(data_map);
-- if (CHECK_FAIL(map_fd < 0))
-- return;
-+ if (CHECK_FAIL(data_fd < 0))
-+ goto out;
-
- i = 0;
- err = bpf_map_lookup_elem(data_fd, &i, &val);
-@@ -352,11 +352,11 @@ static void test_tailcall_4(void)
-
- data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
- if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
-- return;
-+ goto out;
-
- data_fd = bpf_map__fd(data_map);
-- if (CHECK_FAIL(map_fd < 0))
-- return;
-+ if (CHECK_FAIL(data_fd < 0))
-+ goto out;
-
- for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
-@@ -442,11 +442,11 @@ static void test_tailcall_5(void)
-
- data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
- if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
-- return;
-+ goto out;
-
- data_fd = bpf_map__fd(data_map);
-- if (CHECK_FAIL(map_fd < 0))
-- return;
-+ if (CHECK_FAIL(data_fd < 0))
-+ goto out;
-
- for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
- snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
-@@ -631,11 +631,11 @@ static void test_tailcall_bpf2bpf_2(void)
-
- data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
- if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
-- return;
-+ goto out;
-
- data_fd = bpf_map__fd(data_map);
-- if (CHECK_FAIL(map_fd < 0))
-- return;
-+ if (CHECK_FAIL(data_fd < 0))
-+ goto out;
-
- i = 0;
- err = bpf_map_lookup_elem(data_fd, &i, &val);
-@@ -805,11 +805,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
-
- data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
- if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
-- return;
-+ goto out;
-
- data_fd = bpf_map__fd(data_map);
-- if (CHECK_FAIL(map_fd < 0))
-- return;
-+ if (CHECK_FAIL(data_fd < 0))
-+ goto out;
-
- i = 0;
- val.noise = noise;
-@@ -872,7 +872,7 @@ static void test_tailcall_bpf2bpf_6(void)
- ASSERT_EQ(topts.retval, 0, "tailcall retval");
-
- data_fd = bpf_map__fd(obj->maps.bss);
-- if (!ASSERT_GE(map_fd, 0, "bss map fd"))
-+ if (!ASSERT_GE(data_fd, 0, "bss map fd"))
- goto out;
-
- i = 0;
-diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
-index 38a57a2e70dbe..799fff4995d87 100644
---- a/tools/testing/selftests/bpf/progs/bpf_misc.h
-+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
-@@ -99,6 +99,9 @@
- #elif defined(__TARGET_ARCH_arm64)
- #define SYSCALL_WRAPPER 1
- #define SYS_PREFIX "__arm64_"
-+#elif defined(__TARGET_ARCH_riscv)
-+#define SYSCALL_WRAPPER 1
-+#define SYS_PREFIX "__riscv_"
- #else
- #define SYSCALL_WRAPPER 0
- #define SYS_PREFIX "__se_"
-diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
-index f4c63daba2297..6438982b928bd 100644
---- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
-+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
-@@ -591,7 +591,9 @@ int pop_ptr_off(void *(*op)(void *head))
- n = op(&p->head);
- bpf_spin_unlock(&p->lock);
-
-- bpf_this_cpu_ptr(n);
-+ if (!n)
-+ return 0;
-+ bpf_spin_lock((void *)n);
- return 0;
- }
-
-diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
-index 5bc86af80a9ad..71735dbf33d4f 100644
---- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
-+++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
-@@ -75,9 +75,10 @@ l0_%=: r0 += 1; \
- " ::: __clobber_all);
- }
-
--SEC("tracepoint")
-+SEC("socket")
- __description("bounded loop, start in the middle")
--__failure __msg("back-edge")
-+__success
-+__failure_unpriv __msg_unpriv("back-edge")
- __naked void loop_start_in_the_middle(void)
- {
- asm volatile (" \
-@@ -136,7 +137,9 @@ l0_%=: exit; \
-
- SEC("tracepoint")
- __description("bounded recursion")
--__failure __msg("back-edge")
-+__failure
-+/* verifier limitation in detecting max stack depth */
-+__msg("the call stack of 8 frames is too deep !")
- __naked void bounded_recursion(void)
- {
- asm volatile (" \
-diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
-index 77bd492c60248..2f9f6f250f171 100644
---- a/tools/testing/selftests/bpf/test_progs.h
-+++ b/tools/testing/selftests/bpf/test_progs.h
-@@ -417,6 +417,8 @@ int get_bpf_max_tramp_links(void);
- #define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep"
- #elif defined(__aarch64__)
- #define SYS_NANOSLEEP_KPROBE_NAME "__arm64_sys_nanosleep"
-+#elif defined(__riscv)
-+#define SYS_NANOSLEEP_KPROBE_NAME "__riscv_sys_nanosleep"
- #else
- #define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep"
- #endif
-diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
-index 1bdf2b43e49ea..3d5cd51071f04 100644
---- a/tools/testing/selftests/bpf/verifier/calls.c
-+++ b/tools/testing/selftests/bpf/verifier/calls.c
-@@ -442,7 +442,7 @@
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
-- .errstr = "back-edge from insn 0 to 0",
-+ .errstr = "the call stack of 9 frames is too deep",
- .result = REJECT,
- },
- {
-@@ -799,7 +799,7 @@
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
-- .errstr = "back-edge",
-+ .errstr = "the call stack of 9 frames is too deep",
- .result = REJECT,
- },
- {
-@@ -811,7 +811,7 @@
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
-- .errstr = "back-edge",
-+ .errstr = "the call stack of 9 frames is too deep",
- .result = REJECT,
- },
- {
-diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
-index f9297900cea6d..78f19c255f20b 100644
---- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
-+++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
-@@ -9,8 +9,8 @@
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
-- .errstr = "invalid BPF_LD_IMM insn",
-- .errstr_unpriv = "R1 pointer comparison",
-+ .errstr = "jump into the middle of ldimm64 insn 1",
-+ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
- .result = REJECT,
- },
- {
-@@ -23,8 +23,8 @@
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
-- .errstr = "invalid BPF_LD_IMM insn",
-- .errstr_unpriv = "R1 pointer comparison",
-+ .errstr = "jump into the middle of ldimm64 insn 1",
-+ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
- .result = REJECT,
- },
- {
-diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
-index e60cf4da8fb07..1c61e3c022cb8 100644
---- a/tools/testing/selftests/clone3/clone3.c
-+++ b/tools/testing/selftests/clone3/clone3.c
-@@ -196,7 +196,12 @@ int main(int argc, char *argv[])
- CLONE3_ARGS_NO_TEST);
-
- /* Do a clone3() in a new time namespace */
-- test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
-+ if (access("/proc/self/ns/time", F_OK) == 0) {
-+ test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
-+ } else {
-+ ksft_print_msg("Time namespaces are not supported\n");
-+ ksft_test_result_skip("Skipping clone3() with CLONE_NEWTIME\n");
-+ }
-
- /* Do a clone3() with exit signal (SIGCHLD) in flags */
- test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
-diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
-index 9674a19396a32..7bc7af4eb2c17 100644
---- a/tools/testing/selftests/efivarfs/create-read.c
-+++ b/tools/testing/selftests/efivarfs/create-read.c
-@@ -32,8 +32,10 @@ int main(int argc, char **argv)
- rc = read(fd, buf, sizeof(buf));
- if (rc != 0) {
- fprintf(stderr, "Reading a new var should return EOF\n");
-+ close(fd);
- return EXIT_FAILURE;
- }
-
-+ close(fd);
- return EXIT_SUCCESS;
- }
-diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
-index 5d52f64dfb430..7afe05e8c4d79 100644
---- a/tools/testing/selftests/lkdtm/config
-+++ b/tools/testing/selftests/lkdtm/config
-@@ -9,7 +9,6 @@ CONFIG_INIT_ON_FREE_DEFAULT_ON=y
- CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
- CONFIG_UBSAN=y
- CONFIG_UBSAN_BOUNDS=y
--CONFIG_UBSAN_TRAP=y
- CONFIG_STACKPROTECTOR_STRONG=y
- CONFIG_SLUB_DEBUG=y
- CONFIG_SLUB_DEBUG_ON=y
-diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
-index 607b8d7e3ea34..2f3a1b96da6e3 100644
---- a/tools/testing/selftests/lkdtm/tests.txt
-+++ b/tools/testing/selftests/lkdtm/tests.txt
-@@ -7,7 +7,7 @@ EXCEPTION
- #EXHAUST_STACK Corrupts memory on failure
- #CORRUPT_STACK Crashes entire system on success
- #CORRUPT_STACK_STRONG Crashes entire system on success
--ARRAY_BOUNDS
-+ARRAY_BOUNDS call trace:|UBSAN: array-index-out-of-bounds
- CORRUPT_LIST_ADD list_add corruption
- CORRUPT_LIST_DEL list_del corruption
- STACK_GUARD_PAGE_LEADING
-diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
-index bc91bef5d254e..0c5e469ae38fa 100644
---- a/tools/testing/selftests/mm/mdwe_test.c
-+++ b/tools/testing/selftests/mm/mdwe_test.c
-@@ -168,13 +168,10 @@ TEST_F(mdwe, mmap_FIXED)
- self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
- ASSERT_NE(self->p, MAP_FAILED);
-
-- p = mmap(self->p + self->size, self->size, PROT_READ | PROT_EXEC,
-+ /* MAP_FIXED unmaps the existing page before mapping which is allowed */
-+ p = mmap(self->p, self->size, PROT_READ | PROT_EXEC,
- self->flags | MAP_FIXED, 0, 0);
-- if (variant->enabled) {
-- EXPECT_EQ(p, MAP_FAILED);
-- } else {
-- EXPECT_EQ(p, self->p);
-- }
-+ EXPECT_EQ(p, self->p);
- }
-
- TEST_F(mdwe, arm64_BTI)
-diff --git a/tools/testing/selftests/net/af_unix/diag_uid.c b/tools/testing/selftests/net/af_unix/diag_uid.c
-index 5b88f7129fea4..79a3dd75590e8 100644
---- a/tools/testing/selftests/net/af_unix/diag_uid.c
-+++ b/tools/testing/selftests/net/af_unix/diag_uid.c
-@@ -148,7 +148,6 @@ void receive_response(struct __test_metadata *_metadata,
- .msg_iov = &iov,
- .msg_iovlen = 1
- };
-- struct unix_diag_req *udr;
- struct nlmsghdr *nlh;
- int ret;
-
-diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
-index 24b21b15ed3fb..6ff3e732f449f 100644
---- a/tools/testing/selftests/net/cmsg_sender.c
-+++ b/tools/testing/selftests/net/cmsg_sender.c
-@@ -416,9 +416,9 @@ int main(int argc, char *argv[])
- {
- struct addrinfo hints, *ai;
- struct iovec iov[1];
-+ unsigned char *buf;
- struct msghdr msg;
- char cbuf[1024];
-- char *buf;
- int err;
- int fd;
-
-diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
-index 9a8229abfa026..be4a30a0d02ae 100644
---- a/tools/testing/selftests/net/ipsec.c
-+++ b/tools/testing/selftests/net/ipsec.c
-@@ -2263,7 +2263,7 @@ static int check_results(void)
-
- int main(int argc, char **argv)
- {
-- unsigned int nr_process = 1;
-+ long nr_process = 1;
- int route_sock = -1, ret = KSFT_SKIP;
- int test_desc_fd[2];
- uint32_t route_seq;
-@@ -2284,7 +2284,7 @@ int main(int argc, char **argv)
- exit_usage(argv);
- }
-
-- if (nr_process > MAX_PROCESSES || !nr_process) {
-+ if (nr_process > MAX_PROCESSES || nr_process < 1) {
- printk("nr_process should be between [1; %u]",
- MAX_PROCESSES);
- exit_usage(argv);
-diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
-index c7f9ebeebc2c5..d2043ec3bf6d6 100644
---- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
-+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
-@@ -18,6 +18,7 @@
-
- #include <sys/ioctl.h>
- #include <sys/poll.h>
-+#include <sys/random.h>
- #include <sys/sendfile.h>
- #include <sys/stat.h>
- #include <sys/socket.h>
-@@ -1125,15 +1126,11 @@ again:
-
- static void init_rng(void)
- {
-- int fd = open("/dev/urandom", O_RDONLY);
- unsigned int foo;
-
-- if (fd > 0) {
-- int ret = read(fd, &foo, sizeof(foo));
--
-- if (ret < 0)
-- srand(fd + foo);
-- close(fd);
-+ if (getrandom(&foo, sizeof(foo), 0) == -1) {
-+ perror("getrandom");
-+ exit(1);
- }
-
- srand(foo);
-diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c
-index 8672d898f8cda..218aac4673212 100644
---- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
-+++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
-@@ -18,6 +18,7 @@
- #include <time.h>
-
- #include <sys/ioctl.h>
-+#include <sys/random.h>
- #include <sys/socket.h>
- #include <sys/types.h>
- #include <sys/wait.h>
-@@ -519,15 +520,11 @@ static int client(int unixfd)
-
- static void init_rng(void)
- {
-- int fd = open("/dev/urandom", O_RDONLY);
- unsigned int foo;
-
-- if (fd > 0) {
-- int ret = read(fd, &foo, sizeof(foo));
--
-- if (ret < 0)
-- srand(fd + foo);
-- close(fd);
-+ if (getrandom(&foo, sizeof(foo), 0) == -1) {
-+ perror("getrandom");
-+ exit(1);
- }
-
- srand(foo);
-diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
-index dc895b7b94e19..8eec7d2c1fc69 100755
---- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
-+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
-@@ -3237,7 +3237,7 @@ fastclose_tests()
- if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
- test_linkfail=1024 fastclose=server \
- run_tests $ns1 $ns2 10.0.1.1
-- chk_join_nr 0 0 0
-+ chk_join_nr 0 0 0 0 0 0 1
- chk_fclose_nr 1 1 invert
- chk_rst_nr 1 1
- fi
-@@ -3289,6 +3289,7 @@ userspace_pm_rm_sf_addr_ns1()
- local addr=$1
- local id=$2
- local tk sp da dp
-+ local cnt_addr cnt_sf
-
- tk=$(grep "type:1," "$evts_ns1" |
- sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
-@@ -3298,11 +3299,13 @@ userspace_pm_rm_sf_addr_ns1()
- sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
- dp=$(grep "type:10" "$evts_ns1" |
- sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
-+ cnt_addr=$(rm_addr_count ${ns1})
-+ cnt_sf=$(rm_sf_count ${ns1})
- ip netns exec $ns1 ./pm_nl_ctl rem token $tk id $id
- ip netns exec $ns1 ./pm_nl_ctl dsf lip "::ffff:$addr" \
- lport $sp rip $da rport $dp token $tk
-- wait_rm_addr $ns1 1
-- wait_rm_sf $ns1 1
-+ wait_rm_addr $ns1 "${cnt_addr}"
-+ wait_rm_sf $ns1 "${cnt_sf}"
- }
-
- userspace_pm_add_sf()
-@@ -3324,17 +3327,20 @@ userspace_pm_rm_sf_addr_ns2()
- local addr=$1
- local id=$2
- local tk da dp sp
-+ local cnt_addr cnt_sf
-
- tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
- da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
- dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
- sp=$(grep "type:10" "$evts_ns2" |
- sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
-+ cnt_addr=$(rm_addr_count ${ns2})
-+ cnt_sf=$(rm_sf_count ${ns2})
- ip netns exec $ns2 ./pm_nl_ctl rem token $tk id $id
- ip netns exec $ns2 ./pm_nl_ctl dsf lip $addr lport $sp \
- rip $da rport $dp token $tk
-- wait_rm_addr $ns2 1
-- wait_rm_sf $ns2 1
-+ wait_rm_addr $ns2 "${cnt_addr}"
-+ wait_rm_sf $ns2 "${cnt_sf}"
- }
-
- userspace_tests()
-@@ -3417,7 +3423,7 @@ userspace_tests()
- continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
- set_userspace_pm $ns1
- pm_nl_set_limits $ns2 1 1
-- speed=10 \
-+ speed=5 \
- run_tests $ns1 $ns2 10.0.1.1 &
- local tests_pid=$!
- wait_mpj $ns1
-@@ -3438,7 +3444,7 @@ userspace_tests()
- continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
- set_userspace_pm $ns2
- pm_nl_set_limits $ns1 0 1
-- speed=10 \
-+ speed=5 \
- run_tests $ns1 $ns2 10.0.1.1 &
- local tests_pid=$!
- wait_mpj $ns2
-diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
-index f838dd370f6af..b3b2dc5a630cf 100755
---- a/tools/testing/selftests/net/pmtu.sh
-+++ b/tools/testing/selftests/net/pmtu.sh
-@@ -2048,7 +2048,7 @@ run_test() {
- case $ret in
- 0)
- all_skipped=false
-- [ $exitcode=$ksft_skip ] && exitcode=0
-+ [ $exitcode -eq $ksft_skip ] && exitcode=0
- ;;
- $ksft_skip)
- [ $all_skipped = true ] && exitcode=$ksft_skip
-diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
-index ef90aca4cc96a..bced422b78f72 100644
---- a/tools/testing/selftests/netfilter/Makefile
-+++ b/tools/testing/selftests/netfilter/Makefile
-@@ -7,7 +7,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
- nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
- ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
- conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
-- conntrack_sctp_collision.sh
-+ conntrack_sctp_collision.sh xt_string.sh
-
- HOSTPKG_CONFIG := pkg-config
-
-diff --git a/tools/testing/selftests/netfilter/xt_string.sh b/tools/testing/selftests/netfilter/xt_string.sh
-new file mode 100755
-index 0000000000000..1802653a47287
---- /dev/null
-+++ b/tools/testing/selftests/netfilter/xt_string.sh
-@@ -0,0 +1,128 @@
-+#!/bin/bash
-+# SPDX-License-Identifier: GPL-2.0
-+
-+# return code to signal skipped test
-+ksft_skip=4
-+rc=0
-+
-+if ! iptables --version >/dev/null 2>&1; then
-+ echo "SKIP: Test needs iptables"
-+ exit $ksft_skip
-+fi
-+if ! ip -V >/dev/null 2>&1; then
-+ echo "SKIP: Test needs iproute2"
-+ exit $ksft_skip
-+fi
-+if ! nc -h >/dev/null 2>&1; then
-+ echo "SKIP: Test needs netcat"
-+ exit $ksft_skip
-+fi
-+
-+pattern="foo bar baz"
-+patlen=11
-+hdrlen=$((20 + 8)) # IPv4 + UDP
-+ns="ns-$(mktemp -u XXXXXXXX)"
-+trap 'ip netns del $ns' EXIT
-+ip netns add "$ns"
-+ip -net "$ns" link add d0 type dummy
-+ip -net "$ns" link set d0 up
-+ip -net "$ns" addr add 10.1.2.1/24 dev d0
-+
-+#ip netns exec "$ns" tcpdump -npXi d0 &
-+#tcpdump_pid=$!
-+#trap 'kill $tcpdump_pid; ip netns del $ns' EXIT
-+
-+add_rule() { # (alg, from, to)
-+ ip netns exec "$ns" \
-+ iptables -A OUTPUT -o d0 -m string \
-+ --string "$pattern" --algo $1 --from $2 --to $3
-+}
-+showrules() { # ()
-+ ip netns exec "$ns" iptables -v -S OUTPUT | grep '^-A'
-+}
-+zerorules() {
-+ ip netns exec "$ns" iptables -Z OUTPUT
-+}
-+countrule() { # (pattern)
-+ showrules | grep -c -- "$*"
-+}
-+send() { # (offset)
-+ ( for ((i = 0; i < $1 - $hdrlen; i++)); do
-+ printf " "
-+ done
-+ printf "$pattern"
-+ ) | ip netns exec "$ns" nc -w 1 -u 10.1.2.2 27374
-+}
-+
-+add_rule bm 1000 1500
-+add_rule bm 1400 1600
-+add_rule kmp 1000 1500
-+add_rule kmp 1400 1600
-+
-+zerorules
-+send 0
-+send $((1000 - $patlen))
-+if [ $(countrule -c 0 0) -ne 4 ]; then
-+ echo "FAIL: rules match data before --from"
-+ showrules
-+ ((rc--))
-+fi
-+
-+zerorules
-+send 1000
-+send $((1400 - $patlen))
-+if [ $(countrule -c 2) -ne 2 ]; then
-+ echo "FAIL: only two rules should match at low offset"
-+ showrules
-+ ((rc--))
-+fi
-+
-+zerorules
-+send $((1500 - $patlen))
-+if [ $(countrule -c 1) -ne 4 ]; then
-+ echo "FAIL: all rules should match at end of packet"
-+ showrules
-+ ((rc--))
-+fi
-+
-+zerorules
-+send 1495
-+if [ $(countrule -c 1) -ne 1 ]; then
-+ echo "FAIL: only kmp with proper --to should match pattern spanning fragments"
-+ showrules
-+ ((rc--))
-+fi
-+
-+zerorules
-+send 1500
-+if [ $(countrule -c 1) -ne 2 ]; then
-+ echo "FAIL: two rules should match pattern at start of second fragment"
-+ showrules
-+ ((rc--))
-+fi
-+
-+zerorules
-+send $((1600 - $patlen))
-+if [ $(countrule -c 1) -ne 2 ]; then
-+ echo "FAIL: two rules should match pattern at end of largest --to"
-+ showrules
-+ ((rc--))
-+fi
-+
-+zerorules
-+send $((1600 - $patlen + 1))
-+if [ $(countrule -c 1) -ne 0 ]; then
-+ echo "FAIL: no rules should match pattern extending largest --to"
-+ showrules
-+ ((rc--))
-+fi
-+
-+zerorules
-+send 1600
-+if [ $(countrule -c 1) -ne 0 ]; then
-+ echo "FAIL: no rule should match pattern past largest --to"
-+ showrules
-+ ((rc--))
-+fi
-+
-+exit $rc
-diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
-index 4e86f927880c3..01cc37bf611c3 100644
---- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
-+++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
-@@ -62,7 +62,7 @@ static void error_report(struct error *err, const char *test_name)
- break;
-
- case PIDFD_PASS:
-- ksft_test_result_pass("%s test: Passed\n");
-+ ksft_test_result_pass("%s test: Passed\n", test_name);
- break;
-
- default:
-diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
-index 00a07e7c571cd..c081ae91313aa 100644
---- a/tools/testing/selftests/pidfd/pidfd_test.c
-+++ b/tools/testing/selftests/pidfd/pidfd_test.c
-@@ -381,13 +381,13 @@ static int test_pidfd_send_signal_syscall_support(void)
-
- static void *test_pidfd_poll_exec_thread(void *priv)
- {
-- ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
-+ ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
- getpid(), syscall(SYS_gettid));
- ksft_print_msg("Child Thread: doing exec of sleep\n");
-
- execl("/bin/sleep", "sleep", str(CHILD_THREAD_MIN_WAIT), (char *)NULL);
-
-- ksft_print_msg("Child Thread: DONE. pid %d tid %d\n",
-+ ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n",
- getpid(), syscall(SYS_gettid));
- return NULL;
- }
-@@ -427,7 +427,7 @@ static int child_poll_exec_test(void *args)
- {
- pthread_t t1;
-
-- ksft_print_msg("Child (pidfd): starting. pid %d tid %d\n", getpid(),
-+ ksft_print_msg("Child (pidfd): starting. pid %d tid %ld\n", getpid(),
- syscall(SYS_gettid));
- pthread_create(&t1, NULL, test_pidfd_poll_exec_thread, NULL);
- /*
-@@ -480,10 +480,10 @@ static void test_pidfd_poll_exec(int use_waitpid)
-
- static void *test_pidfd_poll_leader_exit_thread(void *priv)
- {
-- ksft_print_msg("Child Thread: starting. pid %d tid %d ; and sleeping\n",
-+ ksft_print_msg("Child Thread: starting. pid %d tid %ld ; and sleeping\n",
- getpid(), syscall(SYS_gettid));
- sleep(CHILD_THREAD_MIN_WAIT);
-- ksft_print_msg("Child Thread: DONE. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
-+ ksft_print_msg("Child Thread: DONE. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
- return NULL;
- }
-
-@@ -492,7 +492,7 @@ static int child_poll_leader_exit_test(void *args)
- {
- pthread_t t1, t2;
-
-- ksft_print_msg("Child: starting. pid %d tid %d\n", getpid(), syscall(SYS_gettid));
-+ ksft_print_msg("Child: starting. pid %d tid %ld\n", getpid(), syscall(SYS_gettid));
- pthread_create(&t1, NULL, test_pidfd_poll_leader_exit_thread, NULL);
- pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL);
-
-diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
-index 5073dbc961258..2deac2031de9e 100644
---- a/tools/testing/selftests/resctrl/Makefile
-+++ b/tools/testing/selftests/resctrl/Makefile
-@@ -1,6 +1,6 @@
- # SPDX-License-Identifier: GPL-2.0
-
--CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
-+CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
- CFLAGS += $(KHDR_INCLUDES)
-
- TEST_GEN_PROGS := resctrl_tests
-diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
-index d3cbb829ff6a7..a0318bd3a63d8 100644
---- a/tools/testing/selftests/resctrl/cache.c
-+++ b/tools/testing/selftests/resctrl/cache.c
-@@ -205,10 +205,11 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
- * cache_val: execute benchmark and measure LLC occupancy resctrl
- * and perf cache miss for the benchmark
- * @param: parameters passed to cache_val()
-+ * @span: buffer size for the benchmark
- *
- * Return: 0 on success. non-zero on failure.
- */
--int cat_val(struct resctrl_val_param *param)
-+int cat_val(struct resctrl_val_param *param, size_t span)
- {
- int memflush = 1, operation = 0, ret = 0;
- char *resctrl_val = param->resctrl_val;
-@@ -245,7 +246,7 @@ int cat_val(struct resctrl_val_param *param)
- if (ret)
- break;
-
-- if (run_fill_buf(param->span, memflush, operation, true)) {
-+ if (run_fill_buf(span, memflush, operation, true)) {
- fprintf(stderr, "Error-running fill buffer\n");
- ret = -1;
- goto pe_close;
-diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
-index 3848dfb46aba4..224ba8544d8af 100644
---- a/tools/testing/selftests/resctrl/cat_test.c
-+++ b/tools/testing/selftests/resctrl/cat_test.c
-@@ -41,7 +41,7 @@ static int cat_setup(struct resctrl_val_param *p)
- return ret;
- }
-
--static int check_results(struct resctrl_val_param *param)
-+static int check_results(struct resctrl_val_param *param, size_t span)
- {
- char *token_array[8], temp[512];
- unsigned long sum_llc_perf_miss = 0;
-@@ -76,7 +76,7 @@ static int check_results(struct resctrl_val_param *param)
- fclose(fp);
- no_of_bits = count_bits(param->mask);
-
-- return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
-+ return show_cache_info(sum_llc_perf_miss, no_of_bits, span / 64,
- MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
- get_vendor() == ARCH_INTEL, false);
- }
-@@ -96,6 +96,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
- char cbm_mask[256];
- int count_of_bits;
- char pipe_message;
-+ size_t span;
-
- /* Get default cbm mask for L3/L2 cache */
- ret = get_cbm_mask(cache_type, cbm_mask);
-@@ -140,7 +141,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
- /* Set param values for parent thread which will be allocated bitmask
- * with (max_bits - n) bits
- */
-- param.span = cache_size * (count_of_bits - n) / count_of_bits;
-+ span = cache_size * (count_of_bits - n) / count_of_bits;
- strcpy(param.ctrlgrp, "c2");
- strcpy(param.mongrp, "m2");
- strcpy(param.filename, RESULT_FILE_NAME2);
-@@ -162,23 +163,17 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
- param.mask = l_mask_1;
- strcpy(param.ctrlgrp, "c1");
- strcpy(param.mongrp, "m1");
-- param.span = cache_size * n / count_of_bits;
-+ span = cache_size * n / count_of_bits;
- strcpy(param.filename, RESULT_FILE_NAME1);
- param.num_of_runs = 0;
- param.cpu_no = sibling_cpu_no;
-- } else {
-- ret = signal_handler_register();
-- if (ret) {
-- kill(bm_pid, SIGKILL);
-- goto out;
-- }
- }
-
- remove(param.filename);
-
-- ret = cat_val(&param);
-+ ret = cat_val(&param, span);
- if (ret == 0)
-- ret = check_results(&param);
-+ ret = check_results(&param, span);
-
- if (bm_pid == 0) {
- /* Tell parent that child is ready */
-@@ -208,10 +203,8 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
- }
- close(pipefd[0]);
- kill(bm_pid, SIGKILL);
-- signal_handler_unregister();
- }
-
--out:
- cat_test_cleanup();
-
- return ret;
-diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
-index cb2197647c6cd..50bdbce9fba95 100644
---- a/tools/testing/selftests/resctrl/cmt_test.c
-+++ b/tools/testing/selftests/resctrl/cmt_test.c
-@@ -27,7 +27,7 @@ static int cmt_setup(struct resctrl_val_param *p)
- return 0;
- }
-
--static int check_results(struct resctrl_val_param *param, int no_of_bits)
-+static int check_results(struct resctrl_val_param *param, size_t span, int no_of_bits)
- {
- char *token_array[8], temp[512];
- unsigned long sum_llc_occu_resc = 0;
-@@ -58,7 +58,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
- }
- fclose(fp);
-
-- return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
-+ return show_cache_info(sum_llc_occu_resc, no_of_bits, span,
- MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
- true, true);
- }
-@@ -68,16 +68,17 @@ void cmt_test_cleanup(void)
- remove(RESULT_FILE_NAME);
- }
-
--int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
-+int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd)
- {
-+ const char * const *cmd = benchmark_cmd;
-+ const char *new_cmd[BENCHMARK_ARGS];
- unsigned long cache_size = 0;
- unsigned long long_mask;
-+ char *span_str = NULL;
- char cbm_mask[256];
- int count_of_bits;
-- int ret;
--
-- if (!validate_resctrl_feature_request(CMT_STR))
-- return -1;
-+ size_t span;
-+ int ret, i;
-
- ret = get_cbm_mask("L3", cbm_mask);
- if (ret)
-@@ -105,24 +106,36 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
- .cpu_no = cpu_no,
- .filename = RESULT_FILE_NAME,
- .mask = ~(long_mask << n) & long_mask,
-- .span = cache_size * n / count_of_bits,
- .num_of_runs = 0,
- .setup = cmt_setup,
- };
-
-- if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
-- sprintf(benchmark_cmd[1], "%zu", param.span);
-+ span = cache_size * n / count_of_bits;
-+
-+ if (strcmp(cmd[0], "fill_buf") == 0) {
-+ /* Duplicate the command to be able to replace span in it */
-+ for (i = 0; benchmark_cmd[i]; i++)
-+ new_cmd[i] = benchmark_cmd[i];
-+ new_cmd[i] = NULL;
-+
-+ ret = asprintf(&span_str, "%zu", span);
-+ if (ret < 0)
-+ return -1;
-+ new_cmd[1] = span_str;
-+ cmd = new_cmd;
-+ }
-
- remove(RESULT_FILE_NAME);
-
-- ret = resctrl_val(benchmark_cmd, &param);
-+ ret = resctrl_val(cmd, &param);
- if (ret)
- goto out;
-
-- ret = check_results(&param, n);
-+ ret = check_results(&param, span, n);
-
- out:
- cmt_test_cleanup();
-+ free(span_str);
-
- return ret;
- }
-diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
-index 4d2f145804b83..d3bf4368341ec 100644
---- a/tools/testing/selftests/resctrl/mba_test.c
-+++ b/tools/testing/selftests/resctrl/mba_test.c
-@@ -12,7 +12,7 @@
-
- #define RESULT_FILE_NAME "result_mba"
- #define NUM_OF_RUNS 5
--#define MAX_DIFF_PERCENT 5
-+#define MAX_DIFF_PERCENT 8
- #define ALLOCATION_MAX 100
- #define ALLOCATION_MIN 10
- #define ALLOCATION_STEP 10
-@@ -141,7 +141,7 @@ void mba_test_cleanup(void)
- remove(RESULT_FILE_NAME);
- }
-
--int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
-+int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd)
- {
- struct resctrl_val_param param = {
- .resctrl_val = MBA_STR,
-@@ -149,7 +149,7 @@ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
- .mongrp = "m1",
- .cpu_no = cpu_no,
- .filename = RESULT_FILE_NAME,
-- .bw_report = bw_report,
-+ .bw_report = "reads",
- .setup = mba_setup
- };
- int ret;
-diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
-index c7de6f5977f69..d3c0d30c676a7 100644
---- a/tools/testing/selftests/resctrl/mbm_test.c
-+++ b/tools/testing/selftests/resctrl/mbm_test.c
-@@ -11,7 +11,7 @@
- #include "resctrl.h"
-
- #define RESULT_FILE_NAME "result_mbm"
--#define MAX_DIFF_PERCENT 5
-+#define MAX_DIFF_PERCENT 8
- #define NUM_OF_RUNS 5
-
- static int
-@@ -109,16 +109,15 @@ void mbm_test_cleanup(void)
- remove(RESULT_FILE_NAME);
- }
-
--int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd)
-+int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd)
- {
- struct resctrl_val_param param = {
- .resctrl_val = MBM_STR,
- .ctrlgrp = "c1",
- .mongrp = "m1",
-- .span = span,
- .cpu_no = cpu_no,
- .filename = RESULT_FILE_NAME,
-- .bw_report = bw_report,
-+ .bw_report = "reads",
- .setup = mbm_setup
- };
- int ret;
-@@ -129,7 +128,7 @@ int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd
- if (ret)
- goto out;
-
-- ret = check_results(span);
-+ ret = check_results(DEFAULT_SPAN);
-
- out:
- mbm_test_cleanup();
-diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
-index 838d1a438f335..8578a8b4e1459 100644
---- a/tools/testing/selftests/resctrl/resctrl.h
-+++ b/tools/testing/selftests/resctrl/resctrl.h
-@@ -1,5 +1,4 @@
- /* SPDX-License-Identifier: GPL-2.0 */
--#define _GNU_SOURCE
- #ifndef RESCTRL_H
- #define RESCTRL_H
- #include <stdio.h>
-@@ -28,16 +27,16 @@
- #define RESCTRL_PATH "/sys/fs/resctrl"
- #define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
- #define INFO_PATH "/sys/fs/resctrl/info"
--#define L3_PATH "/sys/fs/resctrl/info/L3"
--#define MB_PATH "/sys/fs/resctrl/info/MB"
--#define L3_MON_PATH "/sys/fs/resctrl/info/L3_MON"
--#define L3_MON_FEATURES_PATH "/sys/fs/resctrl/info/L3_MON/mon_features"
-
- #define ARCH_INTEL 1
- #define ARCH_AMD 2
-
- #define END_OF_TESTS 1
-
-+#define BENCHMARK_ARGS 64
-+
-+#define DEFAULT_SPAN (250 * MB)
-+
- #define PARENT_EXIT(err_msg) \
- do { \
- perror(err_msg); \
-@@ -52,7 +51,6 @@
- * @ctrlgrp: Name of the control monitor group (con_mon grp)
- * @mongrp: Name of the monitor group (mon grp)
- * @cpu_no: CPU number to which the benchmark would be binded
-- * @span: Memory bytes accessed in each benchmark iteration
- * @filename: Name of file to which the o/p should be written
- * @bw_report: Bandwidth report type (reads vs writes)
- * @setup: Call back function to setup test environment
-@@ -62,7 +60,6 @@ struct resctrl_val_param {
- char ctrlgrp[64];
- char mongrp[64];
- int cpu_no;
-- size_t span;
- char filename[64];
- char *bw_report;
- unsigned long mask;
-@@ -86,7 +83,7 @@ int get_resource_id(int cpu_no, int *resource_id);
- int mount_resctrlfs(void);
- int umount_resctrlfs(void);
- int validate_bw_report_request(char *bw_report);
--bool validate_resctrl_feature_request(const char *resctrl_val);
-+bool validate_resctrl_feature_request(const char *resource, const char *feature);
- char *fgrep(FILE *inf, const char *str);
- int taskset_benchmark(pid_t bm_pid, int cpu_no);
- void run_benchmark(int signum, siginfo_t *info, void *ucontext);
-@@ -97,21 +94,21 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
- int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
- int group_fd, unsigned long flags);
- int run_fill_buf(size_t span, int memflush, int op, bool once);
--int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
--int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd);
-+int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param);
-+int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd);
- void tests_cleanup(void);
- void mbm_test_cleanup(void);
--int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
-+int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd);
- void mba_test_cleanup(void);
- int get_cbm_mask(char *cache_type, char *cbm_mask);
- int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
- void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
- int signal_handler_register(void);
- void signal_handler_unregister(void);
--int cat_val(struct resctrl_val_param *param);
-+int cat_val(struct resctrl_val_param *param, size_t span);
- void cat_test_cleanup(void);
- int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
--int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
-+int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd);
- unsigned int count_bits(unsigned long n);
- void cmt_test_cleanup(void);
- int get_core_sibling(int cpu_no);
-diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
-index d511daeb6851e..31373b69e675d 100644
---- a/tools/testing/selftests/resctrl/resctrl_tests.c
-+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
-@@ -10,9 +10,6 @@
- */
- #include "resctrl.h"
-
--#define BENCHMARK_ARGS 64
--#define BENCHMARK_ARG_SIZE 64
--
- static int detect_vendor(void)
- {
- FILE *inf = fopen("/proc/cpuinfo", "r");
-@@ -70,72 +67,98 @@ void tests_cleanup(void)
- cat_test_cleanup();
- }
-
--static void run_mbm_test(char **benchmark_cmd, size_t span,
-- int cpu_no, char *bw_report)
-+static int test_prepare(void)
- {
- int res;
-
-- ksft_print_msg("Starting MBM BW change ...\n");
-+ res = signal_handler_register();
-+ if (res) {
-+ ksft_print_msg("Failed to register signal handler\n");
-+ return res;
-+ }
-
- res = mount_resctrlfs();
- if (res) {
-- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
-+ signal_handler_unregister();
-+ ksft_print_msg("Failed to mount resctrl FS\n");
-+ return res;
-+ }
-+ return 0;
-+}
-+
-+static void test_cleanup(void)
-+{
-+ umount_resctrlfs();
-+ signal_handler_unregister();
-+}
-+
-+static void run_mbm_test(const char * const *benchmark_cmd, int cpu_no)
-+{
-+ int res;
-+
-+ ksft_print_msg("Starting MBM BW change ...\n");
-+
-+ if (test_prepare()) {
-+ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
- return;
- }
-
-- if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) {
-+ if (!validate_resctrl_feature_request("L3_MON", "mbm_total_bytes") ||
-+ !validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
-+ (get_vendor() != ARCH_INTEL)) {
- ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
-- goto umount;
-+ goto cleanup;
- }
-
-- res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
-+ res = mbm_bw_change(cpu_no, benchmark_cmd);
- ksft_test_result(!res, "MBM: bw change\n");
- if ((get_vendor() == ARCH_INTEL) && res)
- ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
-
--umount:
-- umount_resctrlfs();
-+cleanup:
-+ test_cleanup();
- }
-
--static void run_mba_test(char **benchmark_cmd, int cpu_no, char *bw_report)
-+static void run_mba_test(const char * const *benchmark_cmd, int cpu_no)
- {
- int res;
-
- ksft_print_msg("Starting MBA Schemata change ...\n");
-
-- res = mount_resctrlfs();
-- if (res) {
-- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
-+ if (test_prepare()) {
-+ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
- return;
- }
-
-- if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) {
-+ if (!validate_resctrl_feature_request("MB", NULL) ||
-+ !validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") ||
-+ (get_vendor() != ARCH_INTEL)) {
- ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
-- goto umount;
-+ goto cleanup;
- }
-
-- res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
-+ res = mba_schemata_change(cpu_no, benchmark_cmd);
- ksft_test_result(!res, "MBA: schemata change\n");
-
--umount:
-- umount_resctrlfs();
-+cleanup:
-+ test_cleanup();
- }
-
--static void run_cmt_test(char **benchmark_cmd, int cpu_no)
-+static void run_cmt_test(const char * const *benchmark_cmd, int cpu_no)
- {
- int res;
-
- ksft_print_msg("Starting CMT test ...\n");
-
-- res = mount_resctrlfs();
-- if (res) {
-- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
-+ if (test_prepare()) {
-+ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
- return;
- }
-
-- if (!validate_resctrl_feature_request(CMT_STR)) {
-+ if (!validate_resctrl_feature_request("L3_MON", "llc_occupancy") ||
-+ !validate_resctrl_feature_request("L3", NULL)) {
- ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
-- goto umount;
-+ goto cleanup;
- }
-
- res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
-@@ -143,8 +166,8 @@ static void run_cmt_test(char **benchmark_cmd, int cpu_no)
- if ((get_vendor() == ARCH_INTEL) && res)
- ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
-
--umount:
-- umount_resctrlfs();
-+cleanup:
-+ test_cleanup();
- }
-
- static void run_cat_test(int cpu_no, int no_of_bits)
-@@ -153,33 +176,32 @@ static void run_cat_test(int cpu_no, int no_of_bits)
-
- ksft_print_msg("Starting CAT test ...\n");
-
-- res = mount_resctrlfs();
-- if (res) {
-- ksft_exit_fail_msg("Failed to mount resctrl FS\n");
-+ if (test_prepare()) {
-+ ksft_exit_fail_msg("Abnormal failure when preparing for the test\n");
- return;
- }
-
-- if (!validate_resctrl_feature_request(CAT_STR)) {
-+ if (!validate_resctrl_feature_request("L3", NULL)) {
- ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
-- goto umount;
-+ goto cleanup;
- }
-
- res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
- ksft_test_result(!res, "CAT: test\n");
-
--umount:
-- umount_resctrlfs();
-+cleanup:
-+ test_cleanup();
- }
-
- int main(int argc, char **argv)
- {
- bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
-- char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
-- char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
- int c, cpu_no = 1, argc_new = argc, i, no_of_bits = 0;
-+ const char *benchmark_cmd[BENCHMARK_ARGS];
- int ben_ind, ben_count, tests = 0;
-- size_t span = 250 * MB;
-+ char *span_str = NULL;
- bool cat_test = true;
-+ int ret;
-
- for (i = 0; i < argc; i++) {
- if (strcmp(argv[i], "-b") == 0) {
-@@ -255,28 +277,26 @@ int main(int argc, char **argv)
- return ksft_exit_skip("Not running as root. Skipping...\n");
-
- if (has_ben) {
-+ if (argc - ben_ind >= BENCHMARK_ARGS)
-+ ksft_exit_fail_msg("Too long benchmark command.\n");
-+
- /* Extract benchmark command from command line. */
-- for (i = ben_ind; i < argc; i++) {
-- benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
-- sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
-- }
-+ for (i = 0; i < argc - ben_ind; i++)
-+ benchmark_cmd[i] = argv[i + ben_ind];
- benchmark_cmd[ben_count] = NULL;
- } else {
- /* If no benchmark is given by "-b" argument, use fill_buf. */
-- for (i = 0; i < 5; i++)
-- benchmark_cmd[i] = benchmark_cmd_area[i];
--
-- strcpy(benchmark_cmd[0], "fill_buf");
-- sprintf(benchmark_cmd[1], "%zu", span);
-- strcpy(benchmark_cmd[2], "1");
-- strcpy(benchmark_cmd[3], "0");
-- strcpy(benchmark_cmd[4], "false");
-+ benchmark_cmd[0] = "fill_buf";
-+ ret = asprintf(&span_str, "%u", DEFAULT_SPAN);
-+ if (ret < 0)
-+ ksft_exit_fail_msg("Out of memory!\n");
-+ benchmark_cmd[1] = span_str;
-+ benchmark_cmd[2] = "1";
-+ benchmark_cmd[3] = "0";
-+ benchmark_cmd[4] = "false";
- benchmark_cmd[5] = NULL;
- }
-
-- sprintf(bw_report, "reads");
-- sprintf(bm_type, "fill_buf");
--
- if (!check_resctrlfs_support())
- return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
-
-@@ -288,10 +308,10 @@ int main(int argc, char **argv)
- ksft_set_plan(tests ? : 4);
-
- if (mbm_test)
-- run_mbm_test(benchmark_cmd, span, cpu_no, bw_report);
-+ run_mbm_test(benchmark_cmd, cpu_no);
-
- if (mba_test)
-- run_mba_test(benchmark_cmd, cpu_no, bw_report);
-+ run_mba_test(benchmark_cmd, cpu_no);
-
- if (cmt_test)
- run_cmt_test(benchmark_cmd, cpu_no);
-@@ -299,5 +319,6 @@ int main(int argc, char **argv)
- if (cat_test)
- run_cat_test(cpu_no, no_of_bits);
-
-+ free(span_str);
- ksft_finished();
- }
-diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
-index f0f6c5f6e98b9..b8ca6fa40b3bf 100644
---- a/tools/testing/selftests/resctrl/resctrl_val.c
-+++ b/tools/testing/selftests/resctrl/resctrl_val.c
-@@ -468,7 +468,9 @@ pid_t bm_pid, ppid;
-
- void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
- {
-- kill(bm_pid, SIGKILL);
-+ /* Only kill child after bm_pid is set after fork() */
-+ if (bm_pid)
-+ kill(bm_pid, SIGKILL);
- umount_resctrlfs();
- tests_cleanup();
- ksft_print_msg("Ending\n\n");
-@@ -482,9 +484,11 @@ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
- */
- int signal_handler_register(void)
- {
-- struct sigaction sigact;
-+ struct sigaction sigact = {};
- int ret = 0;
-
-+ bm_pid = 0;
-+
- sigact.sa_sigaction = ctrlc_handler;
- sigemptyset(&sigact.sa_mask);
- sigact.sa_flags = SA_SIGINFO;
-@@ -504,7 +508,7 @@ int signal_handler_register(void)
- */
- void signal_handler_unregister(void)
- {
-- struct sigaction sigact;
-+ struct sigaction sigact = {};
-
- sigact.sa_handler = SIG_DFL;
- sigemptyset(&sigact.sa_mask);
-@@ -629,7 +633,7 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
- *
- * Return: 0 on success. non-zero on failure.
- */
--int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
-+int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param)
- {
- char *resctrl_val = param->resctrl_val;
- unsigned long bw_resc_start = 0;
-@@ -706,28 +710,30 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
-
- ksft_print_msg("Benchmark PID: %d\n", bm_pid);
-
-- ret = signal_handler_register();
-- if (ret)
-- goto out;
--
-- value.sival_ptr = benchmark_cmd;
-+ /*
-+ * The cast removes constness but nothing mutates benchmark_cmd within
-+ * the context of this process. At the receiving process, it becomes
-+ * argv, which is mutable, on exec() but that's after fork() so it
-+ * doesn't matter for the process running the tests.
-+ */
-+ value.sival_ptr = (void *)benchmark_cmd;
-
- /* Taskset benchmark to specified cpu */
- ret = taskset_benchmark(bm_pid, param->cpu_no);
- if (ret)
-- goto unregister;
-+ goto out;
-
- /* Write benchmark to specified control&monitoring grp in resctrl FS */
- ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
- resctrl_val);
- if (ret)
-- goto unregister;
-+ goto out;
-
- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
- !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
- ret = initialize_mem_bw_imc();
- if (ret)
-- goto unregister;
-+ goto out;
-
- initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
- param->cpu_no, resctrl_val);
-@@ -742,7 +748,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
- sizeof(pipe_message)) {
- perror("# failed reading message from child process");
- close(pipefd[0]);
-- goto unregister;
-+ goto out;
- }
- }
- close(pipefd[0]);
-@@ -751,7 +757,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
- if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
- perror("# sigqueue SIGUSR1 to child");
- ret = errno;
-- goto unregister;
-+ goto out;
- }
-
- /* Give benchmark enough time to fully run */
-@@ -780,8 +786,6 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
- }
- }
-
--unregister:
-- signal_handler_unregister();
- out:
- kill(bm_pid, SIGKILL);
-
-diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
-index bd36ee2066020..3a8111362d262 100644
---- a/tools/testing/selftests/resctrl/resctrlfs.c
-+++ b/tools/testing/selftests/resctrl/resctrlfs.c
-@@ -8,6 +8,8 @@
- * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
- * Fenghua Yu <fenghua.yu@intel.com>
- */
-+#include <limits.h>
-+
- #include "resctrl.h"
-
- static int find_resctrl_mount(char *buffer)
-@@ -604,63 +606,46 @@ char *fgrep(FILE *inf, const char *str)
-
- /*
- * validate_resctrl_feature_request - Check if requested feature is valid.
-- * @resctrl_val: Requested feature
-+ * @resource: Required resource (e.g., MB, L3, L2, L3_MON, etc.)
-+ * @feature: Required monitor feature (in mon_features file). Can only be
-+ * set for L3_MON. Must be NULL for all other resources.
- *
-- * Return: True if the feature is supported, else false. False is also
-- * returned if resctrl FS is not mounted.
-+ * Return: True if the resource/feature is supported, else false. False is
-+ * also returned if resctrl FS is not mounted.
- */
--bool validate_resctrl_feature_request(const char *resctrl_val)
-+bool validate_resctrl_feature_request(const char *resource, const char *feature)
- {
-+ char res_path[PATH_MAX];
- struct stat statbuf;
-- bool found = false;
- char *res;
- FILE *inf;
- int ret;
-
-- if (!resctrl_val)
-+ if (!resource)
- return false;
-
- ret = find_resctrl_mount(NULL);
- if (ret)
- return false;
-
-- if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
-- if (!stat(L3_PATH, &statbuf))
-- return true;
-- } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
-- if (!stat(MB_PATH, &statbuf))
-- return true;
-- } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
-- !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
-- if (!stat(L3_MON_PATH, &statbuf)) {
-- inf = fopen(L3_MON_FEATURES_PATH, "r");
-- if (!inf)
-- return false;
--
-- if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
-- res = fgrep(inf, "llc_occupancy");
-- if (res) {
-- found = true;
-- free(res);
-- }
-- }
--
-- if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
-- res = fgrep(inf, "mbm_total_bytes");
-- if (res) {
-- free(res);
-- res = fgrep(inf, "mbm_local_bytes");
-- if (res) {
-- found = true;
-- free(res);
-- }
-- }
-- }
-- fclose(inf);
-- }
-- }
-+ snprintf(res_path, sizeof(res_path), "%s/%s", INFO_PATH, resource);
-+
-+ if (stat(res_path, &statbuf))
-+ return false;
-+
-+ if (!feature)
-+ return true;
-+
-+ snprintf(res_path, sizeof(res_path), "%s/%s/mon_features", INFO_PATH, resource);
-+ inf = fopen(res_path, "r");
-+ if (!inf)
-+ return false;
-+
-+ res = fgrep(inf, feature);
-+ free(res);
-+ fclose(inf);
-
-- return found;
-+ return !!res;
- }
-
- int filter_dmesg(void)
-diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c
-index eb0e46905bf9d..8f9b06d9ce039 100644
---- a/tools/testing/selftests/x86/lam.c
-+++ b/tools/testing/selftests/x86/lam.c
-@@ -573,7 +573,7 @@ int do_uring(unsigned long lam)
- char path[PATH_MAX] = {0};
-
- /* get current process path */
-- if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
-+ if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0)
- return 1;
-
- int file_fd = open(path, O_RDONLY);
-@@ -680,14 +680,14 @@ static int handle_execve(struct testcases *test)
- perror("Fork failed.");
- ret = 1;
- } else if (pid == 0) {
-- char path[PATH_MAX];
-+ char path[PATH_MAX] = {0};
-
- /* Set LAM mode in parent process */
- if (set_lam(lam) != 0)
- return 1;
-
- /* Get current binary's path and the binary was run by execve */
-- if (readlink("/proc/self/exe", path, PATH_MAX) <= 0)
-+ if (readlink("/proc/self/exe", path, PATH_MAX - 1) <= 0)
- exit(-1);
-
- /* run binary to get LAM mode and return to parent process */
-diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
-index 90718c2fd4ea9..5dc7767039f6f 100644
---- a/tools/testing/vsock/vsock_test.c
-+++ b/tools/testing/vsock/vsock_test.c
-@@ -392,11 +392,12 @@ static void test_stream_msg_peek_server(const struct test_opts *opts)
- }
-
- #define SOCK_BUF_SIZE (2 * 1024 * 1024)
--#define MAX_MSG_SIZE (32 * 1024)
-+#define MAX_MSG_PAGES 4
-
- static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
- {
- unsigned long curr_hash;
-+ size_t max_msg_size;
- int page_size;
- int msg_count;
- int fd;
-@@ -412,7 +413,8 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
-
- curr_hash = 0;
- page_size = getpagesize();
-- msg_count = SOCK_BUF_SIZE / MAX_MSG_SIZE;
-+ max_msg_size = MAX_MSG_PAGES * page_size;
-+ msg_count = SOCK_BUF_SIZE / max_msg_size;
-
- for (int i = 0; i < msg_count; i++) {
- ssize_t send_size;
-@@ -423,7 +425,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
- /* Use "small" buffers and "big" buffers. */
- if (i & 1)
- buf_size = page_size +
-- (rand() % (MAX_MSG_SIZE - page_size));
-+ (rand() % (max_msg_size - page_size));
- else
- buf_size = 1 + (rand() % page_size);
-
-@@ -479,7 +481,6 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
- unsigned long remote_hash;
- unsigned long curr_hash;
- int fd;
-- char buf[MAX_MSG_SIZE];
- struct msghdr msg = {0};
- struct iovec iov = {0};
-
-@@ -507,8 +508,13 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
- control_writeln("SRVREADY");
- /* Wait, until peer sends whole data. */
- control_expectln("SENDDONE");
-- iov.iov_base = buf;
-- iov.iov_len = sizeof(buf);
-+ iov.iov_len = MAX_MSG_PAGES * getpagesize();
-+ iov.iov_base = malloc(iov.iov_len);
-+ if (!iov.iov_base) {
-+ perror("malloc");
-+ exit(EXIT_FAILURE);
-+ }
-+
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
-@@ -533,6 +539,7 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
- curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size);
- }
-
-+ free(iov.iov_base);
- close(fd);
- remote_hash = control_readulong();
-
-diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
-index 623a38908ed5b..c769d7b3842c0 100644
---- a/tools/tracing/rtla/src/utils.c
-+++ b/tools/tracing/rtla/src/utils.c
-@@ -538,7 +538,7 @@ static const int find_mount(const char *fs, char *mp, int sizeof_mp)
- {
- char mount_point[MAX_PATH];
- char type[100];
-- int found;
-+ int found = 0;
- FILE *fp;
-
- fp = fopen("/proc/mounts", "r");
diff --git a/system/easy-kernel/0120-XATTR_USER_PREFIX.patch b/system/easy-kernel/0120-XATTR_USER_PREFIX.patch
deleted file mode 100644
index 50d3e3dae..000000000
--- a/system/easy-kernel/0120-XATTR_USER_PREFIX.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-diff -Naur xattr-a/include/uapi/linux/xattr.h xattr-b/include/uapi/linux/xattr.h
---- xattr-a/include/uapi/linux/xattr.h 2023-12-19 20:22:20.000000000 +1100
-+++ xattr-b/include/uapi/linux/xattr.h 2023-12-24 13:31:57.653254146 +1100
-@@ -81,5 +81,9 @@
- #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
- #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
-
-+/* User namespace */
-+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
-+#define XATTR_PAX_FLAGS_SUFFIX "flags"
-+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
-
- #endif /* _UAPI_LINUX_XATTR_H */
-diff -Naur xattr-a/mm/shmem.c xattr-b/mm/shmem.c
---- xattr-a/mm/shmem.c 2023-12-19 20:22:20.000000000 +1100
-+++ xattr-b/mm/shmem.c 2023-12-24 13:31:57.803254146 +1100
-@@ -3649,6 +3649,14 @@
- {
- struct shmem_inode_info *info = SHMEM_I(inode);
-
-+
-+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
-+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
-+ return -EOPNOTSUPP;
-+ if (size > 8)
-+ return -EINVAL;
-+ }
-+
- name = xattr_full_name(handler, name);
- return simple_xattr_get(&info->xattrs, name, buffer, size);
- }
diff --git a/system/easy-kernel/0204-amd-deserialised-MSR-access.patch b/system/easy-kernel/0204-amd-deserialised-MSR-access.patch
deleted file mode 100644
index cd8c4237a..000000000
--- a/system/easy-kernel/0204-amd-deserialised-MSR-access.patch
+++ /dev/null
@@ -1,134 +0,0 @@
-From: Borislav Petkov <bp@alien8.de>
-Date: Thu, 22 Jun 2023 11:52:12 +0200
-Subject: [PATCH 07/16] x86/barrier: Do not serialize MSR accesses on AMD
-
-AMD does not have the requirement for a synchronization barrier when
-acccessing a certain group of MSRs. Do not incur that unnecessary
-penalty there.
-
-While at it, move to processor.h to avoid include hell. Untangling that
-file properly is a matter for another day.
-
-Some notes on the performance aspect of why this is relevant, courtesy
-of Kishon VijayAbraham <Kishon.VijayAbraham@amd.com>:
-
-On a AMD Zen4 system with 96 cores, a modified ipi-bench[1] on a VM
-shows x2AVIC IPI rate is 3% to 4% lower than AVIC IPI rate. The
-ipi-bench is modified so that the IPIs are sent between two vCPUs in the
-same CCX. This also requires to pin the vCPU to a physical core to
-prevent any latencies. This simulates the use case of pinning vCPUs to
-the thread of a single CCX to avoid interrupt IPI latency.
-
-In order to avoid run-to-run variance (for both x2AVIC and AVIC), the
-below configurations are done:
-
- 1) Disable Power States in BIOS (to prevent the system from going to
- lower power state)
-
- 2) Run the system at fixed frequency 2500MHz (to prevent the system
- from increasing the frequency when the load is more)
-
-With the above configuration:
-
-*) Performance measured using ipi-bench for AVIC:
- Average Latency: 1124.98ns [Time to send IPI from one vCPU to another vCPU]
-
- Cumulative throughput: 42.6759M/s [Total number of IPIs sent in a second from
- 48 vCPUs simultaneously]
-
-*) Performance measured using ipi-bench for x2AVIC:
- Average Latency: 1172.42ns [Time to send IPI from one vCPU to another vCPU]
-
- Cumulative throughput: 40.9432M/s [Total number of IPIs sent in a second from
- 48 vCPUs simultaneously]
-
-From above, x2AVIC latency is ~4% more than AVIC. However, the expectation is
-x2AVIC performance to be better or equivalent to AVIC. Upon analyzing
-the perf captures, it is observed significant time is spent in
-weak_wrmsr_fence() invoked by x2apic_send_IPI().
-
-With the fix to skip weak_wrmsr_fence()
-
-*) Performance measured using ipi-bench for x2AVIC:
- Average Latency: 1117.44ns [Time to send IPI from one vCPU to another vCPU]
-
- Cumulative throughput: 42.9608M/s [Total number of IPIs sent in a second from
- 48 vCPUs simultaneously]
-
-Comparing the performance of x2AVIC with and without the fix, it can be seen
-the performance improves by ~4%.
-
-Performance captured using an unmodified ipi-bench using the 'mesh-ipi' option
-with and without weak_wrmsr_fence() on a Zen4 system also showed significant
-performance improvement without weak_wrmsr_fence(). The 'mesh-ipi' option ignores
-CCX or CCD and just picks random vCPU.
-
- Average throughput (10 iterations) with weak_wrmsr_fence(),
- Cumulative throughput: 4933374 IPI/s
-
- Average throughput (10 iterations) without weak_wrmsr_fence(),
- Cumulative throughput: 6355156 IPI/s
-
-[1] https://github.com/bytedance/kvm-utils/tree/master/microbenchmark/ipi-bench
-
-Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
----
- arch/x86/include/asm/barrier.h | 18 ------------------
- arch/x86/include/asm/processor.h | 19 +++++++++++++++++++
- 2 files changed, 19 insertions(+), 18 deletions(-)
-
-diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
-index 35389b2af..0216f63a3 100644
---- a/arch/x86/include/asm/barrier.h
-+++ b/arch/x86/include/asm/barrier.h
-@@ -81,22 +81,4 @@ do { \
-
- #include <asm-generic/barrier.h>
-
--/*
-- * Make previous memory operations globally visible before
-- * a WRMSR.
-- *
-- * MFENCE makes writes visible, but only affects load/store
-- * instructions. WRMSR is unfortunately not a load/store
-- * instruction and is unaffected by MFENCE. The LFENCE ensures
-- * that the WRMSR is not reordered.
-- *
-- * Most WRMSRs are full serializing instructions themselves and
-- * do not require this barrier. This is only required for the
-- * IA32_TSC_DEADLINE and X2APIC MSRs.
-- */
--static inline void weak_wrmsr_fence(void)
--{
-- asm volatile("mfence; lfence" : : : "memory");
--}
--
- #endif /* _ASM_X86_BARRIER_H */
-diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index a3669a777..3e175d554 100644
---- a/arch/x86/include/asm/processor.h
-+++ b/arch/x86/include/asm/processor.h
-@@ -734,4 +734,23 @@ bool arch_is_platform_page(u64 paddr);
-
- extern bool gds_ucode_mitigated(void);
-
-+/*
-+ * Make previous memory operations globally visible before
-+ * a WRMSR.
-+ *
-+ * MFENCE makes writes visible, but only affects load/store
-+ * instructions. WRMSR is unfortunately not a load/store
-+ * instruction and is unaffected by MFENCE. The LFENCE ensures
-+ * that the WRMSR is not reordered.
-+ *
-+ * Most WRMSRs are full serializing instructions themselves and
-+ * do not require this barrier. This is only required for the
-+ * IA32_TSC_DEADLINE and X2APIC MSRs.
-+ */
-+static inline void weak_wrmsr_fence(void)
-+{
-+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
-+ asm volatile("mfence; lfence" : : : "memory");
-+}
-+
- #endif /* _ASM_X86_PROCESSOR_H */
diff --git a/system/easy-kernel/0204-sparc-warray-fix.patch b/system/easy-kernel/0204-sparc-warray-fix.patch
new file mode 100644
index 000000000..f93935552
--- /dev/null
+++ b/system/easy-kernel/0204-sparc-warray-fix.patch
@@ -0,0 +1,17 @@
+--- a/arch/sparc/mm/init_64.c 2022-05-24 16:48:40.749677491 -0400
++++ b/arch/sparc/mm/init_64.c 2022-05-24 16:55:15.511356945 -0400
+@@ -3052,11 +3052,11 @@ static inline resource_size_t compute_ke
+ static void __init kernel_lds_init(void)
+ {
+ code_resource.start = compute_kern_paddr(_text);
+- code_resource.end = compute_kern_paddr(_etext - 1);
++ code_resource.end = compute_kern_paddr(_etext) - 1;
+ data_resource.start = compute_kern_paddr(_etext);
+- data_resource.end = compute_kern_paddr(_edata - 1);
++ data_resource.end = compute_kern_paddr(_edata) - 1;
+ bss_resource.start = compute_kern_paddr(__bss_start);
+- bss_resource.end = compute_kern_paddr(_end - 1);
++ bss_resource.end = compute_kern_paddr(_end) - 1;
+ }
+
+ static int __init report_memory(void)
diff --git a/system/easy-kernel/0208-gcc14-objtool-fix.patch b/system/easy-kernel/0208-gcc14-objtool-fix.patch
new file mode 100644
index 000000000..dca9494ab
--- /dev/null
+++ b/system/easy-kernel/0208-gcc14-objtool-fix.patch
@@ -0,0 +1,41 @@
+Subject: [gcc-14 PATCH] objtool: Fix calloc call for new -Walloc-size
+
+GCC 14 introduces a new -Walloc-size included in -Wextra which errors out
+like:
+```
+check.c: In function ‘cfi_alloc’:
+check.c:294:33: error: allocation of insufficient size ‘1’ for type ‘struct cfi_state’ with size ‘320’ [-Werror=alloc-size]
+ 294 | struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
+ | ^~~~~~
+```
+
+The calloc prototype is:
+```
+void *calloc(size_t nmemb, size_t size);
+```
+
+So, just swap the number of members and size arguments to match the prototype, as
+we're initialising 1 struct of size `sizeof(struct ...)`. GCC then sees we're not
+doing anything wrong.
+
+Link: https://lore.kernel.org/all/20231107205504.1470006-1-sam@gentoo.org/
+Signed-off-by: Sam James <sam@gentoo.org>
+---
+ tools/objtool/check.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index e94756e09ca9..548ec3cd7c00 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -291,7 +291,7 @@ static void init_insn_state(struct objtool_file *file, struct insn_state *state,
+
+ static struct cfi_state *cfi_alloc(void)
+ {
+- struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
++ struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
+ if (!cfi) {
+ WARN("calloc failed");
+ exit(1);
+--
+2.42.1
diff --git a/system/easy-kernel/0250-expose-per-process-ksm.patch b/system/easy-kernel/0250-expose-per-process-ksm.patch
index 0f28daca8..78562ce48 100644
--- a/system/easy-kernel/0250-expose-per-process-ksm.patch
+++ b/system/easy-kernel/0250-expose-per-process-ksm.patch
@@ -1,3 +1,4 @@
+From 60c8dc0a7b07004213fe162f9dd9453ae99a0568 Mon Sep 17 00:00:00 2001
From: Oleksandr Natalenko <oleksandr@natalenko.name>
Date: Mon, 8 May 2023 22:21:53 +0200
Subject: [PATCH] mm: expose per-process KSM control via syscalls
@@ -458,3 +459,6 @@ index e137c1385..2d9772d11 100644
COND_SYSCALL(remap_file_pages);
COND_SYSCALL(mbind);
COND_SYSCALL(get_mempolicy);
+--
+2.42.0
+
diff --git a/system/easy-kernel/0252-rectify-ksm-inheritance.patch b/system/easy-kernel/0252-rectify-ksm-inheritance.patch
new file mode 100644
index 000000000..6f0733fbb
--- /dev/null
+++ b/system/easy-kernel/0252-rectify-ksm-inheritance.patch
@@ -0,0 +1,1059 @@
+From fe014f52184ec1a059184ef9e0262a3e0670a90d Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Fri, 22 Sep 2023 14:11:40 -0700
+Subject: [PATCH 1/9] mm/ksm: support fork/exec for prctl
+
+Today we have two ways to enable KSM:
+
+1) madvise system call
+ This allows to enable KSM for a memory region for a long time.
+
+2) prctl system call
+ This is a recent addition to enable KSM for the complete process.
+ In addition when a process is forked, the KSM setting is inherited.
+
+This change only affects the second case.
+
+One of the use cases for (2) was to support the ability to enable
+KSM for cgroups. This allows systemd to enable KSM for the seed
+process. By enabling it in the seed process all child processes inherit
+the setting.
+
+This works correctly when the process is forked. However it doesn't
+support fork/exec workflow.
+
+From the previous cover letter:
+
+....
+Use case 3:
+With the madvise call sharing opportunities are only enabled for the
+current process: it is a workload-local decision. A considerable number
+of sharing opportunities may exist across multiple workloads or jobs
+(if they are part of the same security domain). Only a higler level
+entity like a job scheduler or container can know for certain if its
+running one or more instances of a job. That job scheduler however
+doesn't have the necessary internal workload knowledge to make targeted
+madvise calls.
+....
+
+In addition it can also be a bit surprising that fork keeps the KSM
+setting and fork/exec does not.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Fixes: d7597f59d1d3 ("mm: add new api to enable ksm per process")
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reported-by: Carl Klemm <carl@uvos.xyz>
+Tested-by: Carl Klemm <carl@uvos.xyz>
+---
+ include/linux/sched/coredump.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
+index 1b37fa8fc..32414e891 100644
+--- a/include/linux/sched/coredump.h
++++ b/include/linux/sched/coredump.h
+@@ -87,10 +87,13 @@ static inline int get_dumpable(struct mm_struct *mm)
+
+ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
+
++#define MMF_VM_MERGE_ANY 29
++#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY)
++
+ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
+- MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK)
++ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
++ MMF_VM_MERGE_ANY_MASK)
+
+-#define MMF_VM_MERGE_ANY 29
+ #define MMF_HAS_MDWE_NO_INHERIT 30
+
+ static inline unsigned long mmf_init_flags(unsigned long flags)
+--
+2.43.0.rc2
+
+
+From 41a07b06dc7b70da6afa1b8340fcf33a5f02121d Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Wed, 27 Sep 2023 09:22:19 -0700
+Subject: [PATCH 2/9] mm/ksm: add "smart" page scanning mode
+
+This change adds a "smart" page scanning mode for KSM. So far all the
+candidate pages are continuously scanned to find candidates for
+de-duplication. There are a considerably number of pages that cannot be
+de-duplicated. This is costly in terms of CPU. By using smart scanning
+considerable CPU savings can be achieved.
+
+This change takes the history of scanning pages into account and skips
+the page scanning of certain pages for a while if de-deduplication for
+this page has not been successful in the past.
+
+To do this it introduces two new fields in the ksm_rmap_item structure:
+age and remaining_skips. age, is the KSM age and remaining_skips
+determines how often scanning of this page is skipped. The age field is
+incremented each time the page is scanned and the page cannot be de-
+duplicated. age updated is capped at U8_MAX.
+
+How often a page is skipped is dependent how often de-duplication has
+been tried so far and the number of skips is currently limited to 8.
+This value has shown to be effective with different workloads.
+
+The feature is enabled by default and can be disabled with the new
+smart_scan knob.
+
+The feature has shown to be very effective: upt to 25% of the page scans
+can be eliminated; the pages_to_scan rate can be reduced by 40 - 50% and
+a similar de-duplication rate can be maintained.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+---
+ mm/ksm.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 103 insertions(+)
+
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 981af9c72..c0a2e7759 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -56,6 +56,8 @@
+ #define DO_NUMA(x) do { } while (0)
+ #endif
+
++typedef u8 rmap_age_t;
++
+ /**
+ * DOC: Overview
+ *
+@@ -193,6 +195,8 @@ struct ksm_stable_node {
+ * @node: rb node of this rmap_item in the unstable tree
+ * @head: pointer to stable_node heading this list in the stable tree
+ * @hlist: link into hlist of rmap_items hanging off that stable_node
++ * @age: number of scan iterations since creation
++ * @remaining_skips: how many scans to skip
+ */
+ struct ksm_rmap_item {
+ struct ksm_rmap_item *rmap_list;
+@@ -205,6 +209,8 @@ struct ksm_rmap_item {
+ struct mm_struct *mm;
+ unsigned long address; /* + low bits used for flags below */
+ unsigned int oldchecksum; /* when unstable */
++ rmap_age_t age;
++ rmap_age_t remaining_skips;
+ union {
+ struct rb_node node; /* when node of unstable tree */
+ struct { /* when listed from stable tree */
+@@ -281,6 +287,9 @@ static unsigned int zero_checksum __read_mostly;
+ /* Whether to merge empty (zeroed) pages with actual zero pages */
+ static bool ksm_use_zero_pages __read_mostly;
+
++/* Skip pages that couldn't be de-duplicated previously */
++static bool ksm_smart_scan = 1;
++
+ /* The number of zero pages which is placed by KSM */
+ unsigned long ksm_zero_pages;
+
+@@ -2305,6 +2314,73 @@ static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
+ return rmap_item;
+ }
+
++/*
++ * Calculate skip age for the ksm page age. The age determines how often
++ * de-duplicating has already been tried unsuccessfully. If the age is
++ * smaller, the scanning of this page is skipped for less scans.
++ *
++ * @age: rmap_item age of page
++ */
++static unsigned int skip_age(rmap_age_t age)
++{
++ if (age <= 3)
++ return 1;
++ if (age <= 5)
++ return 2;
++ if (age <= 8)
++ return 4;
++
++ return 8;
++}
++
++/*
++ * Determines if a page should be skipped for the current scan.
++ *
++ * @page: page to check
++ * @rmap_item: associated rmap_item of page
++ */
++static bool should_skip_rmap_item(struct page *page,
++ struct ksm_rmap_item *rmap_item)
++{
++ rmap_age_t age;
++
++ if (!ksm_smart_scan)
++ return false;
++
++ /*
++ * Never skip pages that are already KSM; pages cmp_and_merge_page()
++ * will essentially ignore them, but we still have to process them
++ * properly.
++ */
++ if (PageKsm(page))
++ return false;
++
++ age = rmap_item->age;
++ if (age != U8_MAX)
++ rmap_item->age++;
++
++ /*
++ * Smaller ages are not skipped, they need to get a chance to go
++ * through the different phases of the KSM merging.
++ */
++ if (age < 3)
++ return false;
++
++ /*
++ * Are we still allowed to skip? If not, then don't skip it
++ * and determine how much more often we are allowed to skip next.
++ */
++ if (!rmap_item->remaining_skips) {
++ rmap_item->remaining_skips = skip_age(age);
++ return false;
++ }
++
++ /* Skip this page */
++ rmap_item->remaining_skips--;
++ remove_rmap_item_from_tree(rmap_item);
++ return true;
++}
++
+ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
+ {
+ struct mm_struct *mm;
+@@ -2409,6 +2485,10 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
+ if (rmap_item) {
+ ksm_scan.rmap_list =
+ &rmap_item->rmap_list;
++
++ if (should_skip_rmap_item(*page, rmap_item))
++ goto next_page;
++
+ ksm_scan.address += PAGE_SIZE;
+ } else
+ put_page(*page);
+@@ -3449,6 +3529,28 @@ static ssize_t full_scans_show(struct kobject *kobj,
+ }
+ KSM_ATTR_RO(full_scans);
+
++static ssize_t smart_scan_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "%u\n", ksm_smart_scan);
++}
++
++static ssize_t smart_scan_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err;
++ bool value;
++
++ err = kstrtobool(buf, &value);
++ if (err)
++ return -EINVAL;
++
++ ksm_smart_scan = value;
++ return count;
++}
++KSM_ATTR(smart_scan);
++
+ static struct attribute *ksm_attrs[] = {
+ &sleep_millisecs_attr.attr,
+ &pages_to_scan_attr.attr,
+@@ -3469,6 +3571,7 @@ static struct attribute *ksm_attrs[] = {
+ &stable_node_chains_prune_millisecs_attr.attr,
+ &use_zero_pages_attr.attr,
+ &general_profit_attr.attr,
++ &smart_scan_attr.attr,
+ NULL,
+ };
+
+--
+2.43.0.rc2
+
+
+From ad6d220dab3aa4acc08457a201a72ec344076f00 Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Wed, 27 Sep 2023 09:22:20 -0700
+Subject: [PATCH 3/9] mm/ksm: add pages_skipped metric
+
+This change adds the "pages skipped" metric. To be able to evaluate how
+successful smart page scanning is, the pages skipped metric can be
+compared to the pages scanned metric.
+
+The pages skipped metric is a cumulative counter. The counter is stored
+under /sys/kernel/mm/ksm/pages_skipped.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+---
+ mm/ksm.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/mm/ksm.c b/mm/ksm.c
+index c0a2e7759..1df25a66f 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -293,6 +293,9 @@ static bool ksm_smart_scan = 1;
+ /* The number of zero pages which is placed by KSM */
+ unsigned long ksm_zero_pages;
+
++/* The number of pages that have been skipped due to "smart scanning" */
++static unsigned long ksm_pages_skipped;
++
+ #ifdef CONFIG_NUMA
+ /* Zeroed when merging across nodes is not allowed */
+ static unsigned int ksm_merge_across_nodes = 1;
+@@ -2376,6 +2379,7 @@ static bool should_skip_rmap_item(struct page *page,
+ }
+
+ /* Skip this page */
++ ksm_pages_skipped++;
+ rmap_item->remaining_skips--;
+ remove_rmap_item_from_tree(rmap_item);
+ return true;
+@@ -3463,6 +3467,13 @@ static ssize_t pages_volatile_show(struct kobject *kobj,
+ }
+ KSM_ATTR_RO(pages_volatile);
+
++static ssize_t pages_skipped_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "%lu\n", ksm_pages_skipped);
++}
++KSM_ATTR_RO(pages_skipped);
++
+ static ssize_t ksm_zero_pages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+ {
+@@ -3560,6 +3571,7 @@ static struct attribute *ksm_attrs[] = {
+ &pages_sharing_attr.attr,
+ &pages_unshared_attr.attr,
+ &pages_volatile_attr.attr,
++ &pages_skipped_attr.attr,
+ &ksm_zero_pages_attr.attr,
+ &full_scans_attr.attr,
+ #ifdef CONFIG_NUMA
+--
+2.43.0.rc2
+
+
+From 1c5c269d3fa05812a7da32bf5f83f6b9bdc8d6c4 Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Wed, 27 Sep 2023 09:22:21 -0700
+Subject: [PATCH 4/9] mm/ksm: document smart scan mode
+
+This adds documentation for the smart scan mode of KSM.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+---
+ Documentation/admin-guide/mm/ksm.rst | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/Documentation/admin-guide/mm/ksm.rst b/Documentation/admin-guide/mm/ksm.rst
+index 776f244bd..2b38a8bb0 100644
+--- a/Documentation/admin-guide/mm/ksm.rst
++++ b/Documentation/admin-guide/mm/ksm.rst
+@@ -155,6 +155,15 @@ stable_node_chains_prune_millisecs
+ scan. It's a noop if not a single KSM page hit the
+ ``max_page_sharing`` yet.
+
++smart_scan
++ Historically KSM checked every candidate page for each scan. It did
++ not take into account historic information. When smart scan is
++ enabled, pages that have previously not been de-duplicated get
++ skipped. How often these pages are skipped depends on how often
++ de-duplication has already been tried and failed. By default this
++ optimization is enabled. The ``pages_skipped`` metric shows how
++ effective the setting is.
++
+ The effectiveness of KSM and MADV_MERGEABLE is shown in ``/sys/kernel/mm/ksm/``:
+
+ general_profit
+--
+2.43.0.rc2
+
+
+From 218c97d1ef7ad28a79f1def130257c59b3a2a7a1 Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Wed, 27 Sep 2023 09:22:22 -0700
+Subject: [PATCH 5/9] mm/ksm: document pages_skipped sysfs knob
+
+This adds documentation for the new metric pages_skipped.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+---
+ Documentation/admin-guide/mm/ksm.rst | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/Documentation/admin-guide/mm/ksm.rst b/Documentation/admin-guide/mm/ksm.rst
+index 2b38a8bb0..0cadde17a 100644
+--- a/Documentation/admin-guide/mm/ksm.rst
++++ b/Documentation/admin-guide/mm/ksm.rst
+@@ -178,6 +178,8 @@ pages_unshared
+ how many pages unique but repeatedly checked for merging
+ pages_volatile
+ how many pages changing too fast to be placed in a tree
++pages_skipped
++ how many pages did the "smart" page scanning algorithm skip
+ full_scans
+ how many times all mergeable areas have been scanned
+ stable_node_chains
+--
+2.43.0.rc2
+
+
+From 692c5e04efe16bc1f354376c156832d7fbd8a0c3 Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Mon, 18 Dec 2023 15:10:51 -0800
+Subject: [PATCH 6/9] mm/ksm: add ksm advisor
+
+This adds the ksm advisor. The ksm advisor automatically manages the
+pages_to_scan setting to achieve a target scan time. The target scan
+time defines how many seconds it should take to scan all the candidate
+KSM pages. In other words the pages_to_scan rate is changed by the
+advisor to achieve the target scan time. The algorithm has a max and min
+value to:
+- guarantee responsiveness to changes
+- limit CPU resource consumption
+
+The respective parameters are:
+- ksm_advisor_target_scan_time (how many seconds a scan should take)
+- ksm_advisor_max_cpu (maximum value for cpu percent usage)
+
+- ksm_advisor_min_pages (minimum value for pages_to_scan per batch)
+- ksm_advisor_max_pages (maximum value for pages_to_scan per batch)
+
+The algorithm calculates the change value based on the target scan time
+and the previous scan time. To avoid pertubations an exponentially
+weighted moving average is applied.
+
+The advisor is managed by two main parameters: target scan time,
+cpu max time for the ksmd background thread. These parameters determine
+how aggresive ksmd scans.
+
+In addition there are min and max values for the pages_to_scan parameter
+to make sure that its initial and max values are not set too low or too
+high. This ensures that it is able to react to changes quickly enough.
+
+The default values are:
+- target scan time: 200 secs
+- max cpu: 70%
+- min pages: 500
+- max pages: 30000
+
+By default the advisor is disabled. Currently there are two advisors:
+none and scan-time.
+
+Tests with various workloads have shown considerable CPU savings. Most
+of the workloads I have investigated have more candidate pages during
+startup, once the workload is stable in terms of memory, the number of
+candidate pages is reduced. Without the advisor, the pages_to_scan needs
+to be sized for the maximum number of candidate pages. So having this
+advisor definitely helps in reducing CPU consumption.
+
+For the instagram workload, the advisor achieves a 25% CPU reduction.
+Once the memory is stable, the pages_to_scan parameter gets reduced to
+about 40% of its max value.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Acked-by: David Hildenbrand <david@redhat.com>
+---
+ mm/ksm.c | 158 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 157 insertions(+), 1 deletion(-)
+
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 1df25a66f..aef991e20 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -21,6 +21,7 @@
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+ #include <linux/sched/coredump.h>
++#include <linux/sched/cputime.h>
+ #include <linux/rwsem.h>
+ #include <linux/pagemap.h>
+ #include <linux/rmap.h>
+@@ -248,6 +249,9 @@ static struct kmem_cache *rmap_item_cache;
+ static struct kmem_cache *stable_node_cache;
+ static struct kmem_cache *mm_slot_cache;
+
++/* Default number of pages to scan per batch */
++#define DEFAULT_PAGES_TO_SCAN 100
++
+ /* The number of pages scanned */
+ static unsigned long ksm_pages_scanned;
+
+@@ -276,7 +280,7 @@ static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
+ static int ksm_max_page_sharing = 256;
+
+ /* Number of pages ksmd should scan in one batch */
+-static unsigned int ksm_thread_pages_to_scan = 100;
++static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
+
+ /* Milliseconds ksmd should sleep between batches */
+ static unsigned int ksm_thread_sleep_millisecs = 20;
+@@ -296,6 +300,152 @@ unsigned long ksm_zero_pages;
+ /* The number of pages that have been skipped due to "smart scanning" */
+ static unsigned long ksm_pages_skipped;
+
++/* Don't scan more than max pages per batch. */
++static unsigned long ksm_advisor_max_pages_to_scan = 30000;
++
++/* Min CPU for scanning pages per scan */
++#define KSM_ADVISOR_MIN_CPU 10
++
++/* Max CPU for scanning pages per scan */
++static unsigned int ksm_advisor_max_cpu = 70;
++
++/* Target scan time in seconds to analyze all KSM candidate pages. */
++static unsigned long ksm_advisor_target_scan_time = 200;
++
++/* Exponentially weighted moving average. */
++#define EWMA_WEIGHT 30
++
++/**
++ * struct advisor_ctx - metadata for KSM advisor
++ * @start_scan: start time of the current scan
++ * @scan_time: scan time of previous scan
++ * @change: change in percent to pages_to_scan parameter
++ * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
++ */
++struct advisor_ctx {
++ ktime_t start_scan;
++ unsigned long scan_time;
++ unsigned long change;
++ unsigned long long cpu_time;
++};
++static struct advisor_ctx advisor_ctx;
++
++/* Define different advisor's */
++enum ksm_advisor_type {
++ KSM_ADVISOR_NONE,
++ KSM_ADVISOR_SCAN_TIME,
++};
++static enum ksm_advisor_type ksm_advisor;
++
++static inline void advisor_start_scan(void)
++{
++ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
++ advisor_ctx.start_scan = ktime_get();
++}
++
++/*
++ * Use previous scan time if available, otherwise use current scan time as an
++ * approximation for the previous scan time.
++ */
++static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
++ unsigned long scan_time)
++{
++ return ctx->scan_time ? ctx->scan_time : scan_time;
++}
++
++/* Calculate exponential weighted moving average */
++static unsigned long ewma(unsigned long prev, unsigned long curr)
++{
++ return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
++}
++
++/*
++ * The scan time advisor is based on the current scan rate and the target
++ * scan rate.
++ *
++ * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
++ *
++ * To avoid perturbations it calculates a change factor of previous changes.
++ * A new change factor is calculated for each iteration and it uses an
++ * exponentially weighted moving average. The new pages_to_scan value is
++ * multiplied with that change factor:
++ *
++ * new_pages_to_scan *= change facor
++ *
++ * The new_pages_to_scan value is limited by the cpu min and max values. It
++ * calculates the cpu percent for the last scan and calculates the new
++ * estimated cpu percent cost for the next scan. That value is capped by the
++ * cpu min and max setting.
++ *
++ * In addition the new pages_to_scan value is capped by the max and min
++ * limits.
++ */
++static void scan_time_advisor(void)
++{
++ unsigned int cpu_percent;
++ unsigned long cpu_time;
++ unsigned long cpu_time_diff;
++ unsigned long cpu_time_diff_ms;
++ unsigned long pages;
++ unsigned long per_page_cost;
++ unsigned long factor;
++ unsigned long change;
++ unsigned long last_scan_time;
++ unsigned long scan_time;
++
++ /* Convert scan time to seconds */
++ scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
++ MSEC_PER_SEC);
++ scan_time = scan_time ? scan_time : 1;
++
++ /* Calculate CPU consumption of ksmd background thread */
++ cpu_time = task_sched_runtime(current);
++ cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
++ cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
++
++ cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
++ cpu_percent = cpu_percent ? cpu_percent : 1;
++ last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
++
++ /* Calculate scan time as percentage of target scan time */
++ factor = ksm_advisor_target_scan_time * 100 / scan_time;
++ factor = factor ? factor : 1;
++
++ /*
++ * Calculate scan time as percentage of last scan time and use
++ * exponentially weighted average to smooth it
++ */
++ change = scan_time * 100 / last_scan_time;
++ change = change ? change : 1;
++ change = ewma(advisor_ctx.change, change);
++
++ /* Calculate new scan rate based on target scan rate. */
++ pages = ksm_thread_pages_to_scan * 100 / factor;
++ /* Update pages_to_scan by weighted change percentage. */
++ pages = pages * change / 100;
++
++ /* Cap new pages_to_scan value */
++ per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
++ per_page_cost = per_page_cost ? per_page_cost : 1;
++
++ pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
++ pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
++ pages = min(pages, ksm_advisor_max_pages_to_scan);
++
++ /* Update advisor context */
++ advisor_ctx.change = change;
++ advisor_ctx.scan_time = scan_time;
++ advisor_ctx.cpu_time = cpu_time;
++
++ ksm_thread_pages_to_scan = pages;
++}
++
++static void advisor_stop_scan(void)
++{
++ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
++ scan_time_advisor();
++}
++
+ #ifdef CONFIG_NUMA
+ /* Zeroed when merging across nodes is not allowed */
+ static unsigned int ksm_merge_across_nodes = 1;
+@@ -2400,6 +2550,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
+
+ mm_slot = ksm_scan.mm_slot;
+ if (mm_slot == &ksm_mm_head) {
++ advisor_start_scan();
+ trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
+
+ /*
+@@ -2557,6 +2708,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
+ if (mm_slot != &ksm_mm_head)
+ goto next_mm;
+
++ advisor_stop_scan();
++
+ trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
+ ksm_scan.seqnr++;
+ return NULL;
+@@ -3243,6 +3396,9 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
+ unsigned int nr_pages;
+ int err;
+
++ if (ksm_advisor != KSM_ADVISOR_NONE)
++ return -EINVAL;
++
+ err = kstrtouint(buf, 10, &nr_pages);
+ if (err)
+ return -EINVAL;
+--
+2.43.0.rc2
+
+
+From 3b9c233ed130557f396326ff74be98afd0819775 Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Mon, 18 Dec 2023 15:10:52 -0800
+Subject: [PATCH 7/9] mm/ksm: add sysfs knobs for advisor
+
+This adds four new knobs for the KSM advisor to influence its behaviour.
+
+The knobs are:
+- advisor_mode:
+ none: no advisor (default)
+ scan-time: scan time advisor
+- advisor_max_cpu: 70 (default, cpu usage percent)
+- advisor_min_pages_to_scan: 500 (default)
+- advisor_max_pages_to_scan: 30000 (default)
+- advisor_target_scan_time: 200 (default in seconds)
+
+The new values will take effect on the next scan round.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Acked-by: David Hildenbrand <david@redhat.com>
+---
+ mm/ksm.c | 148 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 148 insertions(+)
+
+diff --git a/mm/ksm.c b/mm/ksm.c
+index aef991e20..c3bc292b1 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -337,6 +337,25 @@ enum ksm_advisor_type {
+ };
+ static enum ksm_advisor_type ksm_advisor;
+
++#ifdef CONFIG_SYSFS
++/*
++ * Only called through the sysfs control interface:
++ */
++
++/* At least scan this many pages per batch. */
++static unsigned long ksm_advisor_min_pages_to_scan = 500;
++
++static void set_advisor_defaults(void)
++{
++ if (ksm_advisor == KSM_ADVISOR_NONE) {
++ ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
++ } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
++ advisor_ctx = (const struct advisor_ctx){ 0 };
++ ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
++ }
++}
++#endif /* CONFIG_SYSFS */
++
+ static inline void advisor_start_scan(void)
+ {
+ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+@@ -3718,6 +3737,130 @@ static ssize_t smart_scan_store(struct kobject *kobj,
+ }
+ KSM_ATTR(smart_scan);
+
++static ssize_t advisor_mode_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ const char *output;
++
++ if (ksm_advisor == KSM_ADVISOR_NONE)
++ output = "[none] scan-time";
++ else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
++ output = "none [scan-time]";
++
++ return sysfs_emit(buf, "%s\n", output);
++}
++
++static ssize_t advisor_mode_store(struct kobject *kobj,
++ struct kobj_attribute *attr, const char *buf,
++ size_t count)
++{
++ enum ksm_advisor_type curr_advisor = ksm_advisor;
++
++ if (sysfs_streq("scan-time", buf))
++ ksm_advisor = KSM_ADVISOR_SCAN_TIME;
++ else if (sysfs_streq("none", buf))
++ ksm_advisor = KSM_ADVISOR_NONE;
++ else
++ return -EINVAL;
++
++ /* Set advisor default values */
++ if (curr_advisor != ksm_advisor)
++ set_advisor_defaults();
++
++ return count;
++}
++KSM_ATTR(advisor_mode);
++
++static ssize_t advisor_max_cpu_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
++}
++
++static ssize_t advisor_max_cpu_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err;
++ unsigned long value;
++
++ err = kstrtoul(buf, 10, &value);
++ if (err)
++ return -EINVAL;
++
++ ksm_advisor_max_cpu = value;
++ return count;
++}
++KSM_ATTR(advisor_max_cpu);
++
++static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
++}
++
++static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err;
++ unsigned long value;
++
++ err = kstrtoul(buf, 10, &value);
++ if (err)
++ return -EINVAL;
++
++ ksm_advisor_min_pages_to_scan = value;
++ return count;
++}
++KSM_ATTR(advisor_min_pages_to_scan);
++
++static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
++}
++
++static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err;
++ unsigned long value;
++
++ err = kstrtoul(buf, 10, &value);
++ if (err)
++ return -EINVAL;
++
++ ksm_advisor_max_pages_to_scan = value;
++ return count;
++}
++KSM_ATTR(advisor_max_pages_to_scan);
++
++static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
++}
++
++static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err;
++ unsigned long value;
++
++ err = kstrtoul(buf, 10, &value);
++ if (err)
++ return -EINVAL;
++ if (value < 1)
++ return -EINVAL;
++
++ ksm_advisor_target_scan_time = value;
++ return count;
++}
++KSM_ATTR(advisor_target_scan_time);
++
+ static struct attribute *ksm_attrs[] = {
+ &sleep_millisecs_attr.attr,
+ &pages_to_scan_attr.attr,
+@@ -3740,6 +3883,11 @@ static struct attribute *ksm_attrs[] = {
+ &use_zero_pages_attr.attr,
+ &general_profit_attr.attr,
+ &smart_scan_attr.attr,
++ &advisor_mode_attr.attr,
++ &advisor_max_cpu_attr.attr,
++ &advisor_min_pages_to_scan_attr.attr,
++ &advisor_max_pages_to_scan_attr.attr,
++ &advisor_target_scan_time_attr.attr,
+ NULL,
+ };
+
+--
+2.43.0.rc2
+
+
+From bd5a62b2620729cbe4c6625341e5dcac471fc21c Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Mon, 18 Dec 2023 15:10:53 -0800
+Subject: [PATCH 8/9] mm/ksm: add tracepoint for ksm advisor
+
+This adds a new tracepoint for the ksm advisor. It reports the last scan
+time, the new setting of the pages_to_scan parameter and the average cpu
+percent usage of the ksmd background thread for the last scan.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Acked-by: David Hildenbrand <david@redhat.com>
+---
+ include/trace/events/ksm.h | 33 +++++++++++++++++++++++++++++++++
+ mm/ksm.c | 1 +
+ 2 files changed, 34 insertions(+)
+
+diff --git a/include/trace/events/ksm.h b/include/trace/events/ksm.h
+index b5ac35c1d..e728647b5 100644
+--- a/include/trace/events/ksm.h
++++ b/include/trace/events/ksm.h
+@@ -245,6 +245,39 @@ TRACE_EVENT(ksm_remove_rmap_item,
+ __entry->pfn, __entry->rmap_item, __entry->mm)
+ );
+
++/**
++ * ksm_advisor - called after the advisor has run
++ *
++ * @scan_time: scan time in seconds
++ * @pages_to_scan: new pages_to_scan value
++ * @cpu_percent: cpu usage in percent
++ *
++ * Allows to trace the ksm advisor.
++ */
++TRACE_EVENT(ksm_advisor,
++
++ TP_PROTO(s64 scan_time, unsigned long pages_to_scan,
++ unsigned int cpu_percent),
++
++ TP_ARGS(scan_time, pages_to_scan, cpu_percent),
++
++ TP_STRUCT__entry(
++ __field(s64, scan_time)
++ __field(unsigned long, pages_to_scan)
++ __field(unsigned int, cpu_percent)
++ ),
++
++ TP_fast_assign(
++ __entry->scan_time = scan_time;
++ __entry->pages_to_scan = pages_to_scan;
++ __entry->cpu_percent = cpu_percent;
++ ),
++
++ TP_printk("ksm scan time %lld pages_to_scan %lu cpu percent %u",
++ __entry->scan_time, __entry->pages_to_scan,
++ __entry->cpu_percent)
++);
++
+ #endif /* _TRACE_KSM_H */
+
+ /* This part must be outside protection */
+diff --git a/mm/ksm.c b/mm/ksm.c
+index c3bc292b1..a1b5aa12a 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -457,6 +457,7 @@ static void scan_time_advisor(void)
+ advisor_ctx.cpu_time = cpu_time;
+
+ ksm_thread_pages_to_scan = pages;
++ trace_ksm_advisor(scan_time, pages, cpu_percent);
+ }
+
+ static void advisor_stop_scan(void)
+--
+2.43.0.rc2
+
+
+From 7a0a7aa00db82570f827e5caadbafa5874e047db Mon Sep 17 00:00:00 2001
+From: Stefan Roesch <shr@devkernel.io>
+Date: Mon, 18 Dec 2023 15:10:54 -0800
+Subject: [PATCH 9/9] mm/ksm: document ksm advisor and its sysfs knobs
+
+This documents the KSM advisor and its new knobs in /sys/fs/kernel/mm.
+
+Signed-off-by: Stefan Roesch <shr@devkernel.io>
+Acked-by: David Hildenbrand <david@redhat.com>
+---
+ Documentation/admin-guide/mm/ksm.rst | 55 ++++++++++++++++++++++++++++
+ 1 file changed, 55 insertions(+)
+
+diff --git a/Documentation/admin-guide/mm/ksm.rst b/Documentation/admin-guide/mm/ksm.rst
+index 0cadde17a..ad2bb8771 100644
+--- a/Documentation/admin-guide/mm/ksm.rst
++++ b/Documentation/admin-guide/mm/ksm.rst
+@@ -80,6 +80,9 @@ pages_to_scan
+ how many pages to scan before ksmd goes to sleep
+ e.g. ``echo 100 > /sys/kernel/mm/ksm/pages_to_scan``.
+
++ The pages_to_scan value cannot be changed if ``advisor_mode`` has
++ been set to scan-time.
++
+ Default: 100 (chosen for demonstration purposes)
+
+ sleep_millisecs
+@@ -164,6 +167,29 @@ smart_scan
+ optimization is enabled. The ``pages_skipped`` metric shows how
+ effective the setting is.
+
++advisor_mode
++ The ``advisor_mode`` selects the current advisor. Two modes are
++ supported: none and scan-time. The default is none. By setting
++ ``advisor_mode`` to scan-time, the scan time advisor is enabled.
++ The section about ``advisor`` explains in detail how the scan time
++ advisor works.
++
++adivsor_max_cpu
++ specifies the upper limit of the cpu percent usage of the ksmd
++ background thread. The default is 70.
++
++advisor_target_scan_time
++ specifies the target scan time in seconds to scan all the candidate
++ pages. The default value is 200 seconds.
++
++advisor_min_pages_to_scan
++ specifies the lower limit of the ``pages_to_scan`` parameter of the
++ scan time advisor. The default is 500.
++
++adivsor_max_pages_to_scan
++ specifies the upper limit of the ``pages_to_scan`` parameter of the
++ scan time advisor. The default is 30000.
++
+ The effectiveness of KSM and MADV_MERGEABLE is shown in ``/sys/kernel/mm/ksm/``:
+
+ general_profit
+@@ -263,6 +289,35 @@ ksm_swpin_copy
+ note that KSM page might be copied when swapping in because do_swap_page()
+ cannot do all the locking needed to reconstitute a cross-anon_vma KSM page.
+
++Advisor
++=======
++
++The number of candidate pages for KSM is dynamic. It can be often observed
++that during the startup of an application more candidate pages need to be
++processed. Without an advisor the ``pages_to_scan`` parameter needs to be
++sized for the maximum number of candidate pages. The scan time advisor can
++changes the ``pages_to_scan`` parameter based on demand.
++
++The advisor can be enabled, so KSM can automatically adapt to changes in the
++number of candidate pages to scan. Two advisors are implemented: none and
++scan-time. With none, no advisor is enabled. The default is none.
++
++The scan time advisor changes the ``pages_to_scan`` parameter based on the
++observed scan times. The possible values for the ``pages_to_scan`` parameter is
++limited by the ``advisor_max_cpu`` parameter. In addition there is also the
++``advisor_target_scan_time`` parameter. This parameter sets the target time to
++scan all the KSM candidate pages. The parameter ``advisor_target_scan_time``
++decides how aggressive the scan time advisor scans candidate pages. Lower
++values make the scan time advisor to scan more aggresively. This is the most
++important parameter for the configuration of the scan time advisor.
++
++The initial value and the maximum value can be changed with
++``advisor_min_pages_to_scan`` and ``advisor_max_pages_to_scan``. The default
++values are sufficient for most workloads and use cases.
++
++The ``pages_to_scan`` parameter is re-calculated after a scan has been completed.
++
++
+ --
+ Izik Eidus,
+ Hugh Dickins, 17 Nov 2009
+--
+2.43.0.rc2
+
diff --git a/system/easy-kernel/0300-correct-max98388-includes.patch b/system/easy-kernel/0300-correct-max98388-includes.patch
new file mode 100644
index 000000000..c24d6bed5
--- /dev/null
+++ b/system/easy-kernel/0300-correct-max98388-includes.patch
@@ -0,0 +1,39 @@
+From 832beb640e425b5d1a92d8c2002e6b8e0af693eb Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Mon, 11 Sep 2023 10:23:51 +0200
+Subject: ASoC: max98388: Correct the includes
+
+The MAX98388 driver is using the modern GPIO descriptor API
+but uses legacy includes. Include the proper <linux/consumer.h>
+header instead.
+
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://lore.kernel.org/r/20230911-descriptors-asoc-max-v2-4-b9d793fb768e@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+---
+ sound/soc/codecs/max98388.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+(limited to 'sound/soc/codecs/max98388.c')
+
+diff --git a/sound/soc/codecs/max98388.c b/sound/soc/codecs/max98388.c
+index cde5e85946cb88..078adec29312d0 100644
+--- a/sound/soc/codecs/max98388.c
++++ b/sound/soc/codecs/max98388.c
+@@ -3,12 +3,11 @@
+
+ #include <linux/acpi.h>
+ #include <linux/delay.h>
+-#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/of.h>
+-#include <linux/of_gpio.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/regmap.h>
+ #include <linux/slab.h>
+--
+cgit 1.2.3-korg
+
diff --git a/system/easy-kernel/0300-tmp513-regression-fix.patch b/system/easy-kernel/0300-tmp513-regression-fix.patch
deleted file mode 100644
index 433568579..000000000
--- a/system/easy-kernel/0300-tmp513-regression-fix.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From dc328d75a6f37f4ff11a81ae16b1ec88c3197640 Mon Sep 17 00:00:00 2001
-From: Mike Pagano <mpagano@gentoo.org>
-Date: Mon, 23 Mar 2020 08:20:06 -0400
-Subject: [PATCH 1/1] This driver requires REGMAP_I2C to build. Select it by
- default in Kconfig. Reported at gentoo bugzilla:
- https://bugs.gentoo.org/710790
-Cc: mpagano@gentoo.org
-
-Reported-by: Phil Stracchino <phils@caerllewys.net>
-
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- drivers/hwmon/Kconfig | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
-index 47ac20aee06f..530b4f29ba85 100644
---- a/drivers/hwmon/Kconfig
-+++ b/drivers/hwmon/Kconfig
-@@ -1769,6 +1769,7 @@ config SENSORS_TMP421
- config SENSORS_TMP513
- tristate "Texas Instruments TMP513 and compatibles"
- depends on I2C
-+ select REGMAP_I2C
- help
- If you say yes here you get support for Texas Instruments TMP512,
- and TMP513 temperature and power supply sensor chips.
---
-2.24.1
-
diff --git a/system/easy-kernel/0302-i915-gcc14-fix.patch b/system/easy-kernel/0302-i915-gcc14-fix.patch
new file mode 100644
index 000000000..b6f6ab386
--- /dev/null
+++ b/system/easy-kernel/0302-i915-gcc14-fix.patch
@@ -0,0 +1,37 @@
+Subject: [gcc-14 PATCH] drm: i915: Adapt to -Walloc-size
+
+GCC 14 introduces a new -Walloc-size included in -Wextra which errors out
+like:
+```
+drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c: In function ‘eb_copy_relocations’:
+drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c:1681:24: error: allocation of insufficient size ‘1’ for type ‘struct drm_i915_gem_relocation_entry’ with size ‘32’ [-Werror=alloc-size]
+ 1681 | relocs = kvmalloc_array(size, 1, GFP_KERNEL);
+ | ^
+
+```
+
+So, just swap the number of members and size arguments to match the prototype, as
+we're initialising 1 element of size `size`. GCC then sees we're not
+doing anything wrong.
+
+Link: https://lore.kernel.org/intel-gfx/20231107215538.1891359-1-sam@gentoo.org/
+Signed-off-by: Sam James <sam@gentoo.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index 683fd8d3151c..45b9d9e34b8b 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -1678,7 +1678,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
+ urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
+ size = nreloc * sizeof(*relocs);
+
+- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
++ relocs = kvmalloc_array(1, size, GFP_KERNEL);
+ if (!relocs) {
+ err = -ENOMEM;
+ goto err;
+--
+2.42.1
diff --git a/system/easy-kernel/0302-iwlwifi-rfkill-fix.patch b/system/easy-kernel/0302-iwlwifi-rfkill-fix.patch
deleted file mode 100644
index 02994243f..000000000
--- a/system/easy-kernel/0302-iwlwifi-rfkill-fix.patch
+++ /dev/null
@@ -1,170 +0,0 @@
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
-index 56def20374f3..abdb687e7274 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
-@@ -770,7 +770,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
- }
- }
-
--void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
-+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
-
- static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
- {
-@@ -817,7 +817,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
- return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
- }
-
--void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
-+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
- void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-
- #ifdef CONFIG_IWLWIFI_DEBUGFS
-@@ -853,7 +853,7 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
- int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd);
- void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
--void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
-+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool from_irq);
- void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset);
- int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
-index 146bc7bd14fb..a0d10df0c11a 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
-@@ -1783,7 +1783,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
- return inta;
- }
-
--void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
-+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
- {
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
-@@ -1807,7 +1807,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
- isr_stats->rfkill++;
-
- if (prev != report)
-- iwl_trans_pcie_rf_kill(trans, report);
-+ iwl_trans_pcie_rf_kill(trans, report, from_irq);
- mutex_unlock(&trans_pcie->mutex);
-
- if (hw_rfkill) {
-@@ -1947,7 +1947,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
-- iwl_pcie_handle_rfkill_irq(trans);
-+ iwl_pcie_handle_rfkill_irq(trans, true);
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
-@@ -2370,7 +2370,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
-
- /* HW RF KILL switch toggled */
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
-- iwl_pcie_handle_rfkill_irq(trans);
-+ iwl_pcie_handle_rfkill_irq(trans, true);
-
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
- IWL_ERR(trans,
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
-index c9e5bda8f0b7..a3b90c6422b9 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
-@@ -130,7 +130,7 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
- trans_pcie->fw_reset_state = FW_RESET_IDLE;
- }
-
--void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
-+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool from_irq)
- {
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-@@ -221,7 +221,7 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
- mutex_lock(&trans_pcie->mutex);
- trans_pcie->opmode_down = true;
- was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
-- _iwl_trans_pcie_gen2_stop_device(trans);
-+ _iwl_trans_pcie_gen2_stop_device(trans, false);
- iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
- mutex_unlock(&trans_pcie->mutex);
- }
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-index a468e5efeecd..bffd2293a9b8 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-@@ -1082,7 +1082,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
- report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
-
- if (prev != report)
-- iwl_trans_pcie_rf_kill(trans, report);
-+ iwl_trans_pcie_rf_kill(trans, report, false);
-
- return hw_rfkill;
- }
-@@ -1237,7 +1237,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
- trans_pcie->hw_mask = trans_pcie->hw_init_mask;
- }
-
--static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
-+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
- {
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-@@ -1264,7 +1264,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
- if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
- IWL_DEBUG_INFO(trans,
- "DEVICE_ENABLED bit was set and is now cleared\n");
-- iwl_pcie_synchronize_irqs(trans);
-+ if (!from_irq)
-+ iwl_pcie_synchronize_irqs(trans);
- iwl_pcie_rx_napi_sync(trans);
- iwl_pcie_tx_stop(trans);
- iwl_pcie_rx_stop(trans);
-@@ -1454,7 +1455,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
- clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
- }
- if (hw_rfkill != was_in_rfkill)
-- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-+ iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
- }
-
- static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
-@@ -1469,12 +1470,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
- mutex_lock(&trans_pcie->mutex);
- trans_pcie->opmode_down = true;
- was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
-- _iwl_trans_pcie_stop_device(trans);
-+ _iwl_trans_pcie_stop_device(trans, false);
- iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
- mutex_unlock(&trans_pcie->mutex);
- }
-
--void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
-+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
- {
- struct iwl_trans_pcie __maybe_unused *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-@@ -1485,9 +1486,9 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
- state ? "disabled" : "enabled");
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
- if (trans->trans_cfg->gen2)
-- _iwl_trans_pcie_gen2_stop_device(trans);
-+ _iwl_trans_pcie_gen2_stop_device(trans, from_irq);
- else
-- _iwl_trans_pcie_stop_device(trans);
-+ _iwl_trans_pcie_stop_device(trans, from_irq);
- }
- }
-
-@@ -2887,7 +2888,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
- IWL_WARN(trans, "changing debug rfkill %d->%d\n",
- trans_pcie->debug_rfkill, new_value);
- trans_pcie->debug_rfkill = new_value;
-- iwl_pcie_handle_rfkill_irq(trans);
-+ iwl_pcie_handle_rfkill_irq(trans, false);
-
- return count;
- }
diff --git a/system/easy-kernel/0210-fix-powerbook6-5-audio.patch b/system/easy-kernel/0304-fix-powerbook6-5-audio.patch
index 0db865335..0db865335 100644
--- a/system/easy-kernel/0210-fix-powerbook6-5-audio.patch
+++ b/system/easy-kernel/0304-fix-powerbook6-5-audio.patch
diff --git a/system/easy-kernel/0502-gcc9-kcflags.patch b/system/easy-kernel/0502-gcc9-kcflags.patch
index 54c262899..596cade69 100644
--- a/system/easy-kernel/0502-gcc9-kcflags.patch
+++ b/system/easy-kernel/0502-gcc9-kcflags.patch
@@ -1,5 +1,6 @@
+From 71dd30c3e2ab2852b0290ae1f34ce1c7f8655040 Mon Sep 17 00:00:00 2001
From: graysky <therealgraysky@proton.me>
-Date: Thu, 5 Jan 2023 14:29:37 -0500
+Date: Wed, 21 Feb 2024 08:38:13 -0500
FEATURES
This patch adds additional CPU options to the Linux kernel accessible under:
@@ -12,9 +13,10 @@ offered which are good for supported Intel or AMD CPUs:
• x86-64-v3
• x86-64-v4
-Users of glibc 2.33 and above can see which level is supported by current
-hardware by running:
+Users of glibc 2.33 and above can see which level is supported by running:
/lib/ld-linux-x86-64.so.2 --help | grep supported
+Or
+ /lib64/ld-linux-x86-64.so.2 --help | grep supported
Alternatively, compare the flags from /proc/cpuinfo to this list.[1]
@@ -105,12 +107,12 @@ REFERENCES
3 files changed, 528 insertions(+), 17 deletions(-)
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 542377cd419d..f589971df2d3 100644
+index 87396575c..5ac6e8463 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -157,7 +157,7 @@ config MPENTIUM4
-
-
+
+
config MK6
- bool "K6/K6-II/K6-III"
+ bool "AMD K6/K6-II/K6-III"
@@ -119,7 +121,7 @@ index 542377cd419d..f589971df2d3 100644
Select this for an AMD K6-family processor. Enables use of
@@ -165,7 +165,7 @@ config MK6
flags to GCC.
-
+
config MK7
- bool "Athlon/Duron/K7"
+ bool "AMD Athlon/Duron/K7"
@@ -128,7 +130,7 @@ index 542377cd419d..f589971df2d3 100644
Select this for an AMD Athlon K7-family processor. Enables use of
@@ -173,12 +173,106 @@ config MK7
flags to GCC.
-
+
config MK8
- bool "Opteron/Athlon64/Hammer/K8"
+ bool "AMD Opteron/Athlon64/Hammer/K8"
@@ -136,7 +138,7 @@ index 542377cd419d..f589971df2d3 100644
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
Enables use of some extended instructions, and passes appropriate
optimization flags to GCC.
-
+
+config MK8SSE3
+ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
+ help
@@ -236,17 +238,17 @@ index 542377cd419d..f589971df2d3 100644
depends on X86_32
@@ -270,7 +364,7 @@ config MPSC
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
-
+
config MCORE2
- bool "Core 2/newer Xeon"
+ bool "Intel Core 2"
help
-
+
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
@@ -278,6 +372,8 @@ config MCORE2
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
(not a typo)
-
+
+ Enables -march=core2
+
config MATOM
@@ -255,7 +257,7 @@ index 542377cd419d..f589971df2d3 100644
@@ -287,6 +383,212 @@ config MATOM
accordingly optimized code. Use a recent GCC with specific Atom
support in order to fully benefit from selecting this option.
-
+
+config MNEHALEM
+ bool "Intel Nehalem"
+ select X86_P6_NOP
@@ -468,7 +470,7 @@ index 542377cd419d..f589971df2d3 100644
@@ -294,6 +596,50 @@ config GENERIC_CPU
Generic x86-64 CPU.
Run equally well on all x86-64 CPUs.
-
+
+config GENERIC_CPU2
+ bool "Generic-x86-64-v2"
+ depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000)
@@ -514,7 +516,7 @@ index 542377cd419d..f589971df2d3 100644
+ Enables -march=native
+
endchoice
-
+
config X86_GENERIC
@@ -318,9 +664,17 @@ config X86_INTERNODE_CACHE_SHIFT
config X86_L1_CACHE_SHIFT
@@ -533,17 +535,17 @@ index 542377cd419d..f589971df2d3 100644
- default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII \
+ || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-
+
config X86_F00F_BUG
def_bool y
@@ -332,15 +686,27 @@ config X86_INVD_BUG
-
+
config X86_ALIGNMENT_16
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC \
+ || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
-
+
config X86_INTEL_USERCOPY
def_bool y
- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
@@ -552,7 +554,7 @@ index 542377cd419d..f589971df2d3 100644
+ || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
+ || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
+ || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL
-
+
config X86_USE_PPRO_CHECKSUM
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
@@ -564,7 +566,7 @@ index 542377cd419d..f589971df2d3 100644
+ || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE \
+ || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
+ || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
-
+
#
# P6_NOPs are a relatively minor optimization that require a family >=
@@ -356,32 +722,63 @@ config X86_USE_PPRO_CHECKSUM
@@ -577,7 +579,7 @@ index 542377cd419d..f589971df2d3 100644
+ || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE \
+ || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
+ || MNATIVE_INTEL)
-
+
config X86_TSC
def_bool y
- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
@@ -589,7 +591,7 @@ index 542377cd419d..f589971df2d3 100644
+ || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
+ || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
+ || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
-
+
config X86_CMPXCHG64
def_bool y
- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
@@ -600,7 +602,7 @@ index 542377cd419d..f589971df2d3 100644
+ || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE \
+ || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
+ || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
-
+
# this should be set for all -march=.. options where the compiler
# generates cmov.
config X86_CMOV
@@ -613,13 +615,13 @@ index 542377cd419d..f589971df2d3 100644
+ || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
+ || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
+ || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD)
-
+
config X86_MINIMUM_CPU_FAMILY
int
default "64" if X86_64
-- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
+- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
+ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
-+ || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 || MK8SSE3 \
++ || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8 || MK8SSE3 \
+ || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \
+ || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \
+ || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
@@ -628,20 +630,20 @@ index 542377cd419d..f589971df2d3 100644
+ || MNATIVE_INTEL || MNATIVE_AMD)
default "5" if X86_32 && X86_CMPXCHG64
default "4"
-
+
config X86_DEBUGCTLMSR
def_bool y
- depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML
+ depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 \
+ || M486SX || M486) && !UML
-
+
config IA32_FEAT_CTL
def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 415a5d138de4..17b1e039d955 100644
+index 1a068de12..23b2ec69d 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
-@@ -151,8 +151,48 @@ else
+@@ -152,8 +152,48 @@ else
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
cflags-$(CONFIG_MK8) += -march=k8
cflags-$(CONFIG_MPSC) += -march=nocona
@@ -691,9 +693,9 @@ index 415a5d138de4..17b1e039d955 100644
+ cflags-$(CONFIG_GENERIC_CPU4) += -march=x86-64-v4
cflags-$(CONFIG_GENERIC_CPU) += -mtune=generic
KBUILD_CFLAGS += $(cflags-y)
-
+
diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..02c1386eb653 100644
+index 75884d2cd..02c1386eb 100644
--- a/arch/x86/include/asm/vermagic.h
+++ b/arch/x86/include/asm/vermagic.h
@@ -17,6 +17,54 @@
@@ -784,3 +786,5 @@ index 75884d2cdec3..02c1386eb653 100644
#elif defined CONFIG_MELAN
#define MODULE_PROC_FAMILY "ELAN "
#elif defined CONFIG_MCRUSOE
+--
+2.43.0.232.ge79552d197
diff --git a/system/easy-kernel/0504-update-zstd-to-v1_5_5.patch b/system/easy-kernel/0504-update-zstd-to-v1_5_6.patch
index d0fc9da32..c26c89d55 100644
--- a/system/easy-kernel/0504-update-zstd-to-v1_5_5.patch
+++ b/system/easy-kernel/0504-update-zstd-to-v1_5_6.patch
@@ -1,68 +1,69 @@
+From 7718f4fafd7338132433609ceac97694f0e2239d Mon Sep 17 00:00:00 2001
From: Piotr Gorski <lucjan.lucjanov@gmail.com>
-Date: Fri, 8 Dec 2023 16:36:05 +0100
-Subject: [PATCH 1/2] zstd-6.6: merge v1.5.5 into kernel tree
+Date: Wed, 27 Mar 2024 07:22:57 +0100
+Subject: [PATCH 1/2] zstd-6.6: merge v1.5.6 into kernel tree
Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
---
include/linux/zstd.h | 2 +-
include/linux/zstd_errors.h | 23 +-
- include/linux/zstd_lib.h | 697 +++++--
+ include/linux/zstd_lib.h | 850 +++++--
lib/zstd/Makefile | 2 +-
lib/zstd/common/allocations.h | 56 +
lib/zstd/common/bits.h | 149 ++
- lib/zstd/common/bitstream.h | 53 +-
- lib/zstd/common/compiler.h | 14 +-
+ lib/zstd/common/bitstream.h | 127 +-
+ lib/zstd/common/compiler.h | 134 +-
lib/zstd/common/cpu.h | 3 +-
- lib/zstd/common/debug.c | 3 +-
- lib/zstd/common/debug.h | 3 +-
+ lib/zstd/common/debug.c | 9 +-
+ lib/zstd/common/debug.h | 34 +-
lib/zstd/common/entropy_common.c | 42 +-
lib/zstd/common/error_private.c | 12 +-
- lib/zstd/common/error_private.h | 3 +-
- lib/zstd/common/fse.h | 89 +-
- lib/zstd/common/fse_decompress.c | 94 +-
- lib/zstd/common/huf.h | 222 +--
- lib/zstd/common/mem.h | 2 +-
- lib/zstd/common/portability_macros.h | 26 +-
+ lib/zstd/common/error_private.h | 84 +-
+ lib/zstd/common/fse.h | 94 +-
+ lib/zstd/common/fse_decompress.c | 130 +-
+ lib/zstd/common/huf.h | 237 +-
+ lib/zstd/common/mem.h | 3 +-
+ lib/zstd/common/portability_macros.h | 28 +-
lib/zstd/common/zstd_common.c | 38 +-
lib/zstd/common/zstd_deps.h | 16 +-
- lib/zstd/common/zstd_internal.h | 99 +-
+ lib/zstd/common/zstd_internal.h | 109 +-
lib/zstd/compress/clevels.h | 3 +-
- lib/zstd/compress/fse_compress.c | 59 +-
+ lib/zstd/compress/fse_compress.c | 74 +-
lib/zstd/compress/hist.c | 3 +-
lib/zstd/compress/hist.h | 3 +-
- lib/zstd/compress/huf_compress.c | 372 ++--
- lib/zstd/compress/zstd_compress.c | 1762 ++++++++++++-----
- lib/zstd/compress/zstd_compress_internal.h | 333 +++-
+ lib/zstd/compress/huf_compress.c | 441 ++--
+ lib/zstd/compress/zstd_compress.c | 2111 ++++++++++++-----
+ lib/zstd/compress/zstd_compress_internal.h | 359 ++-
lib/zstd/compress/zstd_compress_literals.c | 155 +-
lib/zstd/compress/zstd_compress_literals.h | 25 +-
lib/zstd/compress/zstd_compress_sequences.c | 7 +-
lib/zstd/compress/zstd_compress_sequences.h | 3 +-
- lib/zstd/compress/zstd_compress_superblock.c | 47 +-
+ lib/zstd/compress/zstd_compress_superblock.c | 376 ++-
lib/zstd/compress/zstd_compress_superblock.h | 3 +-
- lib/zstd/compress/zstd_cwksp.h | 149 +-
- lib/zstd/compress/zstd_double_fast.c | 129 +-
- lib/zstd/compress/zstd_double_fast.h | 6 +-
- lib/zstd/compress/zstd_fast.c | 582 ++++--
+ lib/zstd/compress/zstd_cwksp.h | 169 +-
+ lib/zstd/compress/zstd_double_fast.c | 143 +-
+ lib/zstd/compress/zstd_double_fast.h | 17 +-
+ lib/zstd/compress/zstd_fast.c | 596 +++--
lib/zstd/compress/zstd_fast.h | 6 +-
- lib/zstd/compress/zstd_lazy.c | 518 ++---
- lib/zstd/compress/zstd_lazy.h | 7 +-
- lib/zstd/compress/zstd_ldm.c | 11 +-
+ lib/zstd/compress/zstd_lazy.c | 732 +++---
+ lib/zstd/compress/zstd_lazy.h | 138 +-
+ lib/zstd/compress/zstd_ldm.c | 21 +-
lib/zstd/compress/zstd_ldm.h | 3 +-
lib/zstd/compress/zstd_ldm_geartab.h | 3 +-
- lib/zstd/compress/zstd_opt.c | 187 +-
- lib/zstd/compress/zstd_opt.h | 3 +-
- lib/zstd/decompress/huf_decompress.c | 770 ++++---
+ lib/zstd/compress/zstd_opt.c | 497 ++--
+ lib/zstd/compress/zstd_opt.h | 41 +-
+ lib/zstd/decompress/huf_decompress.c | 887 ++++---
lib/zstd/decompress/zstd_ddict.c | 9 +-
lib/zstd/decompress/zstd_ddict.h | 3 +-
- lib/zstd/decompress/zstd_decompress.c | 261 ++-
- lib/zstd/decompress/zstd_decompress_block.c | 283 ++-
- lib/zstd/decompress/zstd_decompress_block.h | 8 +-
- .../decompress/zstd_decompress_internal.h | 7 +-
+ lib/zstd/decompress/zstd_decompress.c | 356 ++-
+ lib/zstd/decompress/zstd_decompress_block.c | 708 +++---
+ lib/zstd/decompress/zstd_decompress_block.h | 10 +-
+ .../decompress/zstd_decompress_internal.h | 9 +-
lib/zstd/decompress_sources.h | 2 +-
lib/zstd/zstd_common_module.c | 5 +-
lib/zstd/zstd_compress_module.c | 2 +-
lib/zstd/zstd_decompress_module.c | 4 +-
- 58 files changed, 4787 insertions(+), 2594 deletions(-)
+ 58 files changed, 6576 insertions(+), 3530 deletions(-)
create mode 100644 lib/zstd/common/allocations.h
create mode 100644 lib/zstd/common/bits.h
@@ -145,7 +146,7 @@ index 58b6dd45a..6d5cf55f0 100644
} ZSTD_ErrorCode;
diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
-index 79d55465d..8b4ffe649 100644
+index 79d55465d..6320fedcf 100644
--- a/include/linux/zstd_lib.h
+++ b/include/linux/zstd_lib.h
@@ -1,5 +1,6 @@
@@ -208,7 +209,7 @@ index 79d55465d..8b4ffe649 100644
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 5
-#define ZSTD_VERSION_RELEASE 2
-+#define ZSTD_VERSION_RELEASE 5
++#define ZSTD_VERSION_RELEASE 6
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/*! ZSTD_versionNumber() :
@@ -255,7 +256,7 @@ index 79d55465d..8b4ffe649 100644
+ * for example to size a static array on stack.
+ * Will produce constant value 0 if srcSize too large.
+ */
-+#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00LLU : 0xFF00FF00U)
++#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U)
+#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+/* ZSTD_isError() :
@@ -266,7 +267,90 @@ index 79d55465d..8b4ffe649 100644
ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
-@@ -412,6 +457,9 @@ typedef enum {
+@@ -183,7 +228,7 @@ ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compres
+ /*= Compression context
+ * When compressing many times,
+ * it is recommended to allocate a context just once,
+- * and re-use it for each successive compression operation.
++ * and reuse it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Note : re-using context is just a speed / resource optimization.
+ * It doesn't change the compression ratio, which remains identical.
+@@ -196,9 +241,9 @@ ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer *
+
+ /*! ZSTD_compressCCtx() :
+ * Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
+- * Important : in order to behave similarly to `ZSTD_compress()`,
+- * this function compresses at requested compression level,
+- * __ignoring any other parameter__ .
++ * Important : in order to mirror `ZSTD_compress()` behavior,
++ * this function compresses at the requested compression level,
++ * __ignoring any other advanced parameter__ .
+ * If any advanced parameter was set using the advanced API,
+ * they will all be reset. Only `compressionLevel` remains.
+ */
+@@ -210,7 +255,7 @@ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ /*= Decompression context
+ * When decompressing many times,
+ * it is recommended to allocate a context only once,
+- * and re-use it for each successive compression operation.
++ * and reuse it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution. */
+ typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+@@ -220,7 +265,7 @@ ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer *
+ /*! ZSTD_decompressDCtx() :
+ * Same as ZSTD_decompress(),
+ * requires an allocated ZSTD_DCtx.
+- * Compatible with sticky parameters.
++ * Compatible with sticky parameters (see below).
+ */
+ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+@@ -236,12 +281,12 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
+ * using ZSTD_CCtx_set*() functions.
+ * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
+ * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
+- * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
++ * __They do not apply to one-shot variants such as ZSTD_compressCCtx()__ .
+ *
+ * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
+ *
+ * This API supersedes all other "advanced" API entry points in the experimental section.
+- * In the future, we expect to remove from experimental API entry points which are redundant with this API.
++ * In the future, we expect to remove API entry points from experimental which are redundant with this API.
+ */
+
+
+@@ -324,6 +369,19 @@ typedef enum {
+ * The higher the value of selected strategy, the more complex it is,
+ * resulting in stronger and slower compression.
+ * Special: value 0 means "use default strategy". */
++
++ ZSTD_c_targetCBlockSize=130, /* v1.5.6+
++ * Attempts to fit compressed block size into approximatively targetCBlockSize.
++ * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX.
++ * Note that it's not a guarantee, just a convergence target (default:0).
++ * No target when targetCBlockSize == 0.
++ * This is helpful in low bandwidth streaming environments to improve end-to-end latency,
++ * when a client can make use of partial documents (a prominent example being Chrome).
++ * Note: this parameter is stable since v1.5.6.
++ * It was present as an experimental parameter in earlier versions,
++ * but it's not recommended using it with earlier library versions
++ * due to massive performance regressions.
++ */
+ /* LDM mode parameters */
+ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
+ * This parameter is designed to improve compression ratio
+@@ -403,7 +461,6 @@ typedef enum {
+ * ZSTD_c_forceMaxWindow
+ * ZSTD_c_forceAttachDict
+ * ZSTD_c_literalCompressionMode
+- * ZSTD_c_targetCBlockSize
+ * ZSTD_c_srcSizeHint
+ * ZSTD_c_enableDedicatedDictSearch
+ * ZSTD_c_stableInBuffer
+@@ -412,6 +469,9 @@ typedef enum {
* ZSTD_c_validateSequences
* ZSTD_c_useBlockSplitter
* ZSTD_c_useRowMatchFinder
@@ -276,7 +360,16 @@ index 79d55465d..8b4ffe649 100644
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly;
* also, the enums values themselves are unstable and can still change.
-@@ -430,7 +478,11 @@ typedef enum {
+@@ -421,7 +481,7 @@ typedef enum {
+ ZSTD_c_experimentalParam3=1000,
+ ZSTD_c_experimentalParam4=1001,
+ ZSTD_c_experimentalParam5=1002,
+- ZSTD_c_experimentalParam6=1003,
++ /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */
+ ZSTD_c_experimentalParam7=1004,
+ ZSTD_c_experimentalParam8=1005,
+ ZSTD_c_experimentalParam9=1006,
+@@ -430,7 +490,11 @@ typedef enum {
ZSTD_c_experimentalParam12=1009,
ZSTD_c_experimentalParam13=1010,
ZSTD_c_experimentalParam14=1011,
@@ -289,7 +382,7 @@ index 79d55465d..8b4ffe649 100644
} ZSTD_cParameter;
typedef struct {
-@@ -493,7 +545,7 @@ typedef enum {
+@@ -493,7 +557,7 @@ typedef enum {
* They will be used to compress next frame.
* Resetting session never fails.
* - The parameters : changes all parameters back to "default".
@@ -298,7 +391,12 @@ index 79d55465d..8b4ffe649 100644
* Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
* otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
* - Both : similar to resetting the session, followed by resetting parameters.
-@@ -506,7 +558,8 @@ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+@@ -502,11 +566,13 @@ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+
+ /*! ZSTD_compress2() :
+ * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
++ * (note that this entry point doesn't even expose a compression level parameter).
+ * ZSTD_compress2() always starts a new frame.
* Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
* - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
* - The function is always blocking, returns when compression is completed.
@@ -308,11 +406,12 @@ index 79d55465d..8b4ffe649 100644
* @return : compressed size written into `dst` (<= `dstCapacity),
* or an error code if it fails (which can be tested using ZSTD_isError()).
*/
-@@ -543,13 +596,15 @@ typedef enum {
+@@ -543,13 +609,17 @@ typedef enum {
* ZSTD_d_stableOutBuffer
* ZSTD_d_forceIgnoreChecksum
* ZSTD_d_refMultipleDDicts
+ * ZSTD_d_disableHuffmanAssembly
++ * ZSTD_d_maxBlockSize
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly
*/
@@ -321,11 +420,41 @@ index 79d55465d..8b4ffe649 100644
ZSTD_d_experimentalParam3=1002,
- ZSTD_d_experimentalParam4=1003
+ ZSTD_d_experimentalParam4=1003,
-+ ZSTD_d_experimentalParam5=1004
++ ZSTD_d_experimentalParam5=1004,
++ ZSTD_d_experimentalParam6=1005
} ZSTD_dParameter;
-@@ -728,8 +783,6 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output
+@@ -604,14 +674,14 @@ typedef struct ZSTD_outBuffer_s {
+ * A ZSTD_CStream object is required to track streaming operation.
+ * Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+ * ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+-* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
++* It is recommended to reuse ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+ *
+ * For parallel execution, use one separate ZSTD_CStream per thread.
+ *
+ * note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
+ *
+ * Parameters are sticky : when starting a new compression on the same context,
+-* it will re-use the same sticky parameters as previous compression session.
++* it will reuse the same sticky parameters as previous compression session.
+ * When in doubt, it's recommended to fully initialize the context before usage.
+ * Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+ * ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+@@ -700,6 +770,11 @@ typedef enum {
+ * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ * Before starting a new compression job, or changing compression parameters,
+ * it is required to fully flush internal buffers.
++ * - note: if an operation ends with an error, it may leave @cctx in an undefined state.
++ * Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.
++ * In order to be re-employed after an error, a state must be reset,
++ * which can be done explicitly (ZSTD_CCtx_reset()),
++ * or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())
+ */
+ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+@@ -728,8 +803,6 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output
* This following is a legacy streaming API, available since v1.0+ .
* It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
* It is redundant, but remains fully supported.
@@ -334,7 +463,7 @@ index 79d55465d..8b4ffe649 100644
******************************************************************************/
/*!
-@@ -738,6 +791,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output
+@@ -738,6 +811,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
@@ -344,7 +473,16 @@ index 79d55465d..8b4ffe649 100644
*/
ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
/*!
-@@ -788,13 +844,31 @@ ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer
+@@ -758,7 +834,7 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+ *
+ * A ZSTD_DStream object is required to track streaming operations.
+ * Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+-* ZSTD_DStream objects can be re-used multiple times.
++* ZSTD_DStream objects can be reused multiple times.
+ *
+ * Use ZSTD_initDStream() to start a new decompression operation.
+ * @return : recommended first input size
+@@ -788,13 +864,37 @@ ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer
/*===== Streaming decompression functions =====*/
@@ -373,11 +511,17 @@ index 79d55465d..8b4ffe649 100644
+ * @return : 0 when a frame is completely decoded and fully flushed,
+ * or an error code, which can be tested using ZSTD_isError(),
+ * or any other value > 0, which means there is some decoding or flushing to do to complete current frame.
++ *
++ * Note: when an operation returns with an error code, the @zds state may be left in undefined state.
++ * It's UB to invoke `ZSTD_decompressStream()` on such a state.
++ * In order to re-use such a state, it must be first reset,
++ * which can be done explicitly (`ZSTD_DCtx_reset()`),
++ * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)
+ */
ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
-@@ -913,7 +987,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+@@ -913,7 +1013,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
* If @return == 0, the dictID could not be decoded.
* This could for one of the following reasons :
* - The frame does not require a dictionary to be decoded (most common case).
@@ -386,7 +530,7 @@ index 79d55465d..8b4ffe649 100644
* Note : this use case also happens when using a non-conformant dictionary.
* - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
* - This is not a Zstandard frame.
-@@ -925,9 +999,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+@@ -925,9 +1025,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* Advanced dictionary and prefix API (Requires v1.4.0+)
*
* This API allows dictionaries to be used with ZSTD_compress2(),
@@ -394,14 +538,14 @@ index 79d55465d..8b4ffe649 100644
- * only reset with the context is reset with ZSTD_reset_parameters or
- * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ * ZSTD_compressStream2(), and ZSTD_decompressDCtx().
-+ * Dictionaries are sticky, they remain valid when same context is re-used,
++ * Dictionaries are sticky, they remain valid when same context is reused,
+ * they only reset when the context is reset
+ * with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters.
+ * In contrast, Prefixes are single-use.
******************************************************************************/
-@@ -937,8 +1013,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+@@ -937,8 +1039,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
* meaning "return to no-dictionary mode".
@@ -413,7 +557,7 @@ index 79d55465d..8b4ffe649 100644
* Note 2 : Loading a dictionary involves building tables.
* It's also a CPU consuming operation, with non-negligible impact on latency.
* Tables are dependent on compression parameters, and for this reason,
-@@ -947,11 +1024,15 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+@@ -947,11 +1050,15 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
* In such a case, dictionary buffer must outlive its users.
* Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
@@ -431,7 +575,7 @@ index 79d55465d..8b4ffe649 100644
* Note that compression parameters are enforced from within CDict,
* and supersede any compression parameter previously set within CCtx.
* The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
-@@ -970,6 +1051,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+@@ -970,6 +1077,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
* Decompression will need same prefix to properly regenerate data.
* Compressing with a prefix is similar in outcome as performing a diff and compressing it,
* but performs much faster, especially during decompression (compression speed is tunable with compression level).
@@ -439,7 +583,7 @@ index 79d55465d..8b4ffe649 100644
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
* Note 1 : Prefix buffer is referenced. It **must** outlive compression.
-@@ -986,9 +1068,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+@@ -986,9 +1094,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
const void* prefix, size_t prefixSize);
/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+
@@ -452,7 +596,7 @@ index 79d55465d..8b4ffe649 100644
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
* meaning "return to no-dictionary mode".
-@@ -1012,9 +1094,10 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s
+@@ -1012,9 +1120,10 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s
* The memory for the table is allocated on the first call to refDDict, and can be
* freed with ZSTD_freeDCtx().
*
@@ -465,7 +609,7 @@ index 79d55465d..8b4ffe649 100644
* Special: referencing a NULL DDict means "return to no-dictionary mode".
* Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
*/
-@@ -1071,24 +1154,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+@@ -1071,24 +1180,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE
#endif
@@ -490,7 +634,7 @@ index 79d55465d..8b4ffe649 100644
/* **************************************************************************************
* experimental API (static linking only)
****************************************************************************************
-@@ -1123,6 +1188,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+@@ -1123,6 +1214,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
#define ZSTD_STRATEGY_MIN ZSTD_fast
#define ZSTD_STRATEGY_MAX ZSTD_btultra2
@@ -498,7 +642,16 @@ index 79d55465d..8b4ffe649 100644
#define ZSTD_OVERLAPLOG_MIN 0
-@@ -1303,7 +1369,7 @@ typedef enum {
+@@ -1146,7 +1238,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+ #define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
+
+ /* Advanced parameter bounds */
+-#define ZSTD_TARGETCBLOCKSIZE_MIN 64
++#define ZSTD_TARGETCBLOCKSIZE_MIN 1340 /* suitable to fit into an ethernet / wifi / 4G transport frame */
+ #define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
+ #define ZSTD_SRCSIZEHINT_MIN 0
+ #define ZSTD_SRCSIZEHINT_MAX INT_MAX
+@@ -1303,7 +1395,7 @@ typedef enum {
} ZSTD_paramSwitch_e;
/* *************************************
@@ -507,7 +660,7 @@ index 79d55465d..8b4ffe649 100644
***************************************/
/*! ZSTD_findDecompressedSize() :
-@@ -1350,29 +1416,109 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size
+@@ -1350,29 +1442,122 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size
* or an error code (if srcSize is too small) */
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
@@ -596,7 +749,23 @@ index 79d55465d..8b4ffe649 100644
+
/*! ZSTD_generateSequences() :
- * Generate sequences using ZSTD_compress2, given a source buffer.
++ * WARNING: This function is meant for debugging and informational purposes ONLY!
++ * Its implementation is flawed, and it will be deleted in a future version.
++ * It is not guaranteed to succeed, as there are several cases where it will give
++ * up and fail. You should NOT use this function in production code.
++ *
++ * This function is deprecated, and will be removed in a future version.
++ *
+ * Generate sequences using ZSTD_compress2(), given a source buffer.
++ *
++ * @param zc The compression context to be used for ZSTD_compress2(). Set any
++ * compression parameters you need on this context.
++ * @param outSeqs The output sequences buffer of size @p outSeqsSize
++ * @param outSeqsSize The size of the output sequences buffer.
++ * ZSTD_sequenceBound(srcSize) is an upper bound on the number
++ * of sequences that can be generated.
++ * @param src The source buffer to generate sequences from of size @p srcSize.
++ * @param srcSize The size of the source buffer.
*
* Each block will end with a dummy sequence
* with offset == 0, matchLength == 0, and litLength == length of last literals.
@@ -605,24 +774,26 @@ index 79d55465d..8b4ffe649 100644
*
- * zc can be used to insert custom compression params.
- * This function invokes ZSTD_compress2
-+ * @zc can be used to insert custom compression params.
-+ * This function invokes ZSTD_compress2().
- *
- * The output of this function can be fed into ZSTD_compressSequences() with CCtx
- * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
- * @return : number of sequences generated
+- *
+- * The output of this function can be fed into ZSTD_compressSequences() with CCtx
+- * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
+- * @return : number of sequences generated
++ * @returns The number of sequences generated, necessarily less than
++ * ZSTD_sequenceBound(srcSize), or an error code that can be checked
++ * with ZSTD_isError().
*/
-
+-
-ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
- size_t outSeqsSize, const void* src, size_t srcSize);
++ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()")
+ZSTDLIB_STATIC_API size_t
-+ZSTD_generateSequences( ZSTD_CCtx* zc,
-+ ZSTD_Sequence* outSeqs, size_t outSeqsSize,
-+ const void* src, size_t srcSize);
++ZSTD_generateSequences(ZSTD_CCtx* zc,
++ ZSTD_Sequence* outSeqs, size_t outSeqsSize,
++ const void* src, size_t srcSize);
/*! ZSTD_mergeBlockDelimiters() :
* Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
-@@ -1388,7 +1534,9 @@ ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* o
+@@ -1388,7 +1573,9 @@ ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* o
ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
/*! ZSTD_compressSequences() :
@@ -633,7 +804,7 @@ index 79d55465d..8b4ffe649 100644
* If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
* The entire source is compressed into a single frame.
*
-@@ -1413,11 +1561,12 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si
+@@ -1413,11 +1600,12 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si
* Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
* Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
* and cannot emit an RLE block that disagrees with the repcode history
@@ -650,34 +821,82 @@ index 79d55465d..8b4ffe649 100644
/*! ZSTD_writeSkippableFrame() :
-@@ -1481,8 +1630,11 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
+@@ -1464,48 +1652,59 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
+ /*! ZSTD_estimate*() :
+ * These functions make it possible to estimate memory usage
+ * of a future {D,C}Ctx, before its creation.
++ * This is useful in combination with ZSTD_initStatic(),
++ * which makes it possible to employ a static buffer for ZSTD_CCtx* state.
+ *
+ * ZSTD_estimateCCtxSize() will provide a memory budget large enough
+- * for any compression level up to selected one.
+- * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
+- * does not include space for a window buffer.
+- * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
++ * to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2()
++ * associated with any compression level up to max specified one.
+ * The estimate will assume the input may be arbitrarily large,
+ * which is the worst case.
+ *
++ * Note that the size estimation is specific for one-shot compression,
++ * it is not valid for streaming (see ZSTD_estimateCStreamSize*())
++ * nor other potential ways of using a ZSTD_CCtx* state.
++ *
+ * When srcSize can be bound by a known and rather "small" value,
+- * this fact can be used to provide a tighter estimation
+- * because the CCtx compression context will need less memory.
+- * This tighter estimation can be provided by more advanced functions
++ * this knowledge can be used to provide a tighter budget estimation
++ * because the ZSTD_CCtx* state will need less memory for small inputs.
++ * This tighter estimation can be provided by employing more advanced functions
+ * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
* and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
* Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
*
- * Note 2 : only single-threaded compression is supported.
+ * Note : only single-threaded compression is supported.
* ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
-+ *
-+ * Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time.
-+ * Size estimates assume that no external sequence producer is registered.
*/
- ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+-ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
++ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int maxCompressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
-@@ -1501,7 +1653,12 @@ ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
+ ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
+
+ /*! ZSTD_estimateCStreamSize() :
+- * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+- * It will also consider src size to be arbitrarily "large", which is worst case.
++ * ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression
++ * using any compression level up to the max specified one.
++ * It will also consider src size to be arbitrarily "large", which is a worst case scenario.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note : CStream size estimation is only correct for single-threaded compression.
+- * ZSTD_DStream memory budget depends on window Size.
++ * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
++ * Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.
++ * Size estimates assume that no external sequence producer is registered.
++ *
++ * ZSTD_DStream memory budget depends on frame's window Size.
+ * This information can be passed manually, using ZSTD_estimateDStreamSize,
* or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
++ * Any frame requesting a window size larger than max specified one will be rejected.
* Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
* an internal ?Dict will be created, which additional size is not estimated here.
- * In this case, get total size by adding ZSTD_estimate?DictSize */
+-ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ * In this case, get total size by adding ZSTD_estimate?DictSize
-+ * Note 2 : only single-threaded compression is supported.
-+ * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
-+ * Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.
-+ * Size estimates assume that no external sequence producer is registered.
+ */
- ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
++ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int maxCompressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
-@@ -1649,22 +1806,45 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+-ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
++ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t maxWindowSize);
+ ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+
+ /*! ZSTD_estimate?DictSize() :
+@@ -1649,22 +1848,45 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
* This function never fails (wide contract) */
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
@@ -727,7 +946,19 @@ index 79d55465d..8b4ffe649 100644
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
-@@ -1808,13 +1988,16 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
+@@ -1737,11 +1959,6 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
+ */
+ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
+-/* Tries to fit compressed block size to be around targetCBlockSize.
+- * No target when targetCBlockSize == 0.
+- * There is no guarantee on compressed block size (default:0) */
+-#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
+-
+ /* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size,
+@@ -1808,13 +2025,16 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* Experimental parameter.
* Default is 0 == disabled. Set to 1 to enable.
*
@@ -751,7 +982,7 @@ index 79d55465d..8b4ffe649 100644
*
* When this flag is enabled zstd won't allocate an input window buffer,
* because the user guarantees it can reference the ZSTD_inBuffer until
-@@ -1822,18 +2005,15 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
+@@ -1822,18 +2042,15 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
* avoid the memcpy() from the input buffer to the input window buffer.
*
@@ -775,7 +1006,7 @@ index 79d55465d..8b4ffe649 100644
*/
#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
-@@ -1878,7 +2058,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
+@@ -1878,7 +2095,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* Without validation, providing a sequence that does not conform to the zstd spec will cause
* undefined behavior, and may produce a corrupted block.
*
@@ -784,7 +1015,7 @@ index 79d55465d..8b4ffe649 100644
* specifics regarding offset/matchlength requirements) then the function will bail out and
* return an error.
*
-@@ -1928,6 +2108,79 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
+@@ -1928,6 +2145,79 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
*/
#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15
@@ -864,7 +1095,7 @@ index 79d55465d..8b4ffe649 100644
/*! ZSTD_CCtx_getParameter() :
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
* and store it into int* value.
-@@ -2084,7 +2337,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
+@@ -2084,7 +2374,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
* in the range [dst, dst + pos) MUST not be modified during decompression
* or you will get data corruption.
*
@@ -873,7 +1104,7 @@ index 79d55465d..8b4ffe649 100644
* it can write directly to the ZSTD_outBuffer, but it will still allocate
* an input buffer large enough to fit any compressed block. This will also
* avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
-@@ -2137,6 +2390,17 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
+@@ -2137,6 +2427,33 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
*/
#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
@@ -888,10 +1119,26 @@ index 79d55465d..8b4ffe649 100644
+ */
+#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5
+
++/* ZSTD_d_maxBlockSize
++ * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB).
++ * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default.
++ *
++ * Forces the decompressor to reject blocks whose content size is
++ * larger than the configured maxBlockSize. When maxBlockSize is
++ * larger than the windowSize, the windowSize is used instead.
++ * This saves memory on the decoder when you know all blocks are small.
++ *
++ * This option is typically used in conjunction with ZSTD_c_maxBlockSize.
++ *
++ * WARNING: This causes the decoder to reject otherwise valid frames
++ * that have block sizes larger than the configured maxBlockSize.
++ */
++#define ZSTD_d_maxBlockSize ZSTD_d_experimentalParam6
++
/*! ZSTD_DCtx_setFormat() :
* This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().
-@@ -2145,6 +2409,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
+@@ -2145,6 +2462,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
* such ZSTD_f_zstd1_magicless for example.
* @return : 0, or an error code (which can be tested using ZSTD_isError()). */
ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
@@ -899,7 +1146,7 @@ index 79d55465d..8b4ffe649 100644
size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
/*! ZSTD_decompressStream_simpleArgs() :
-@@ -2181,6 +2446,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
+@@ -2181,6 +2499,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
@@ -907,7 +1154,7 @@ index 79d55465d..8b4ffe649 100644
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
int compressionLevel,
unsigned long long pledgedSrcSize);
-@@ -2198,17 +2464,15 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
+@@ -2198,17 +2517,15 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
@@ -928,7 +1175,7 @@ index 79d55465d..8b4ffe649 100644
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
*
-@@ -2218,6 +2482,7 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
+@@ -2218,6 +2535,7 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
@@ -936,7 +1183,7 @@ index 79d55465d..8b4ffe649 100644
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params,
-@@ -2232,15 +2497,13 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+@@ -2232,15 +2550,13 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
@@ -955,7 +1202,7 @@ index 79d55465d..8b4ffe649 100644
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
-@@ -2250,6 +2513,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+@@ -2250,6 +2566,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
@@ -963,7 +1210,16 @@ index 79d55465d..8b4ffe649 100644
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams,
-@@ -2274,6 +2538,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+@@ -2264,7 +2581,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ * explicitly specified.
+ *
+ * start a new frame, using same parameters from previous frame.
+- * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
++ * This is typically useful to skip dictionary loading stage, since it will reuse it in-place.
+ * Note that zcs must be init at least once before using ZSTD_resetCStream().
+ * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+@@ -2274,6 +2591,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
@@ -971,7 +1227,7 @@ index 79d55465d..8b4ffe649 100644
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
-@@ -2319,8 +2584,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
+@@ -2319,8 +2637,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
* ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
*
* note: no dictionary will be used if dict == NULL or dictSize < 8
@@ -981,7 +1237,7 @@ index 79d55465d..8b4ffe649 100644
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
/*!
-@@ -2330,8 +2595,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo
+@@ -2330,8 +2648,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo
* ZSTD_DCtx_refDDict(zds, ddict);
*
* note : ddict is referenced, it must outlive decompression session
@@ -991,11 +1247,13 @@ index 79d55465d..8b4ffe649 100644
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
/*!
-@@ -2340,17 +2605,185 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const Z
+@@ -2339,18 +2657,202 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const Z
+ *
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
*
- * re-use decompression parameters from previous init; saves dictionary loading
+- * re-use decompression parameters from previous init; saves dictionary loading
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
++ * reuse decompression parameters from previous init; saves dictionary loading
*/
+ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
@@ -1127,7 +1385,7 @@ index 79d55465d..8b4ffe649 100644
+
+#define ZSTD_SEQUENCE_PRODUCER_ERROR ((size_t)(-1))
+
-+typedef size_t ZSTD_sequenceProducer_F (
++typedef size_t (*ZSTD_sequenceProducer_F) (
+ void* sequenceProducerState,
+ ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
+ const void* src, size_t srcSize,
@@ -1159,7 +1417,23 @@ index 79d55465d..8b4ffe649 100644
+ZSTD_registerSequenceProducer(
+ ZSTD_CCtx* cctx,
+ void* sequenceProducerState,
-+ ZSTD_sequenceProducer_F* sequenceProducer
++ ZSTD_sequenceProducer_F sequenceProducer
++);
++
++/*! ZSTD_CCtxParams_registerSequenceProducer() :
++ * Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params.
++ * This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(),
++ * which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx().
++ *
++ * If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx()
++ * is required, then this function is for you. Otherwise, you probably don't need it.
++ *
++ * See tests/zstreamtest.c for example usage. */
++ZSTDLIB_STATIC_API void
++ZSTD_CCtxParams_registerSequenceProducer(
++ ZSTD_CCtx_params* params,
++ void* sequenceProducerState,
++ ZSTD_sequenceProducer_F sequenceProducer
+);
+
+
@@ -1182,7 +1456,12 @@ index 79d55465d..8b4ffe649 100644
********************************************************************* */
/*
-@@ -2362,7 +2795,6 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+@@ -2358,11 +2860,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+
+ A ZSTD_CCtx object is required to track streaming operations.
+ Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+- ZSTD_CCtx object can be re-used multiple times within successive compression operations.
++ ZSTD_CCtx object can be reused multiple times within successive compression operations.
Start by initializing a context.
Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
@@ -1190,7 +1469,12 @@ index 79d55465d..8b4ffe649 100644
Then, consume your input using ZSTD_compressContinue().
There are some important considerations to keep in mind when using this advanced function :
-@@ -2384,18 +2816,28 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+@@ -2380,36 +2881,46 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+ It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+
+- `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
++ `ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again.
*/
/*===== Buffer-less streaming compression functions =====*/
@@ -1220,7 +1504,13 @@ index 79d55465d..8b4ffe649 100644
size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
/*
Buffer-less streaming decompression (synchronous mode)
-@@ -2408,8 +2850,8 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
+
+ A ZSTD_DCtx object is required to track streaming operations.
+ Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+- A ZSTD_DCtx object can be re-used multiple times.
++ A ZSTD_DCtx object can be reused multiple times.
+
+ First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
@@ -1231,7 +1521,7 @@ index 79d55465d..8b4ffe649 100644
errorCode, which can be tested using ZSTD_isError().
It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
-@@ -2428,7 +2870,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
+@@ -2428,7 +2939,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
The most memory efficient way is to use a round buffer of sufficient size.
Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
@@ -1240,7 +1530,7 @@ index 79d55465d..8b4ffe649 100644
In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
-@@ -2448,7 +2890,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
+@@ -2448,7 +2959,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
@@ -1249,7 +1539,7 @@ index 79d55465d..8b4ffe649 100644
It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().
-@@ -2471,27 +2913,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
+@@ -2471,27 +2982,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
*/
/*===== Buffer-less streaming decompression functions =====*/
@@ -1277,7 +1567,7 @@ index 79d55465d..8b4ffe649 100644
ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
-@@ -2502,6 +2924,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+@@ -2502,6 +2993,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* misc */
@@ -1285,7 +1575,7 @@ index 79d55465d..8b4ffe649 100644
ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
-@@ -2509,11 +2932,23 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+@@ -2509,11 +3001,23 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
@@ -1312,7 +1602,7 @@ index 79d55465d..8b4ffe649 100644
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
-@@ -2524,7 +2959,6 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+@@ -2524,7 +3028,6 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
- It is necessary to init context before starting
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
@@ -1320,7 +1610,7 @@ index 79d55465d..8b4ffe649 100644
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
-@@ -2541,11 +2975,14 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+@@ -2541,11 +3044,14 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
*/
/*===== Raw zstd block functions =====*/
@@ -1350,7 +1640,7 @@ index 20f08c644..464c410b2 100644
# This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/common/allocations.h b/lib/zstd/common/allocations.h
new file mode 100644
-index 000000000..05adbbecc
+index 000000000..16c3d08e8
--- /dev/null
+++ b/lib/zstd/common/allocations.h
@@ -0,0 +1,56 @@
@@ -1371,7 +1661,7 @@ index 000000000..05adbbecc
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
+
-+#include "mem.h" /* MEM_STATIC */
++#include "compiler.h" /* MEM_STATIC */
+#define ZSTD_STATIC_LINKING_ONLY
+#include <linux/zstd.h> /* ZSTD_customMem */
+
@@ -1566,7 +1856,7 @@ index 000000000..aa3487ec4
+
+#endif /* ZSTD_BITS_H */
diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
-index feef3a1b1..444dc4f85 100644
+index feef3a1b1..6a13f1f0f 100644
--- a/lib/zstd/common/bitstream.h
+++ b/lib/zstd/common/bitstream.h
@@ -1,7 +1,8 @@
@@ -1587,7 +1877,43 @@ index feef3a1b1..444dc4f85 100644
/*=========================================
-@@ -122,33 +124,6 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
+@@ -79,19 +81,20 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
+ /*-********************************************
+ * bitStream decoding API (read backward)
+ **********************************************/
++typedef size_t BitContainerType;
+ typedef struct {
+- size_t bitContainer;
++ BitContainerType bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+ const char* limitPtr;
+ } BIT_DStream_t;
+
+-typedef enum { BIT_DStream_unfinished = 0,
+- BIT_DStream_endOfBuffer = 1,
+- BIT_DStream_completed = 2,
+- BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+- /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
++typedef enum { BIT_DStream_unfinished = 0, /* fully refilled */
++ BIT_DStream_endOfBuffer = 1, /* still some bits left in bitstream */
++ BIT_DStream_completed = 2, /* bitstream entirely consumed, bit-exact */
++ BIT_DStream_overflow = 3 /* user requested more bits than present in bitstream */
++ } BIT_DStream_status; /* result of BIT_reloadDStream() */
+
+ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+@@ -101,7 +104,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+ /* Start by invoking BIT_initDStream().
+ * A chunk of the bitStream is then stored into a local register.
+-* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
++* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (BitContainerType).
+ * You can then retrieve bitFields stored into the local register, **in reverse order**.
+ * Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+ * A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+@@ -122,33 +125,6 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
@@ -1621,11 +1947,11 @@ index feef3a1b1..444dc4f85 100644
/*===== Local Constants =====*/
static const unsigned BIT_mask[] = {
0, 1, 3, 7, 0xF, 0x1F,
-@@ -178,6 +153,12 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
+@@ -178,6 +154,12 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
return 0;
}
-+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
++FORCE_INLINE_TEMPLATE size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+{
+ assert(nbBits < BIT_MASK_SIZE);
+ return bitContainer & BIT_mask[nbBits];
@@ -1634,7 +1960,7 @@ index feef3a1b1..444dc4f85 100644
/*! BIT_addBits() :
* can add up to 31 bits into `bitC`.
* Note : does not check for register overflow ! */
-@@ -187,7 +168,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
+@@ -187,7 +169,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
assert(nbBits < BIT_MASK_SIZE);
assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
@@ -1643,7 +1969,7 @@ index feef3a1b1..444dc4f85 100644
bitC->bitPos += nbBits;
}
-@@ -266,7 +247,7 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
+@@ -266,35 +248,35 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
@@ -1652,7 +1978,33 @@ index feef3a1b1..444dc4f85 100644
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
} else {
bitD->ptr = bitD->start;
-@@ -294,7 +275,7 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+- case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
++ case 7: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ ZSTD_FALLTHROUGH;
+
+- case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
++ case 6: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ ZSTD_FALLTHROUGH;
+
+- case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
++ case 5: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ ZSTD_FALLTHROUGH;
+
+- case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
++ case 4: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[3]) << 24;
+ ZSTD_FALLTHROUGH;
+
+- case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
++ case 3: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[2]) << 16;
+ ZSTD_FALLTHROUGH;
+
+- case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
++ case 2: bitD->bitContainer += (BitContainerType)(((const BYTE*)(srcBuffer))[1]) << 8;
+ ZSTD_FALLTHROUGH;
+
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
@@ -1661,7 +2013,22 @@ index feef3a1b1..444dc4f85 100644
if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
}
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
-@@ -325,12 +306,6 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c
+@@ -303,12 +285,12 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
+ return srcSize;
+ }
+
+-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
++FORCE_INLINE_TEMPLATE size_t BIT_getUpperBits(BitContainerType bitContainer, U32 const start)
+ {
+ return bitContainer >> start;
+ }
+
+-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
++FORCE_INLINE_TEMPLATE size_t BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits)
+ {
+ U32 const regMask = sizeof(bitContainer)*8 - 1;
+ /* if start > regMask, bitstream is corrupted, and result is undefined */
+@@ -325,19 +307,13 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c
#endif
}
@@ -1674,7 +2041,33 @@ index feef3a1b1..444dc4f85 100644
/*! BIT_lookBits() :
* Provides next n bits from local register.
* local register is not modified.
-@@ -377,7 +352,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned n
+ * On 32-bits, maxNbBits==24.
+ * On 64-bits, maxNbBits==56.
+ * @return : value extracted */
+-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
++FORCE_INLINE_TEMPLATE size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+ {
+ /* arbitrate between double-shift and shift+mask */
+ #if 1
+@@ -360,7 +336,7 @@ MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+ return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+ }
+
+-MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
++FORCE_INLINE_TEMPLATE void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+ {
+ bitD->bitsConsumed += nbBits;
+ }
+@@ -369,7 +345,7 @@ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+ * Read (consume) next n bits from local register and update.
+ * Pay attention to not read more than nbBits contained into local register.
+ * @return : extracted value. */
+-MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
++FORCE_INLINE_TEMPLATE size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+ {
+ size_t const value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+@@ -377,7 +353,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned n
}
/*! BIT_readBitsFast() :
@@ -1683,17 +2076,76 @@ index feef3a1b1..444dc4f85 100644
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
{
size_t const value = BIT_lookBitsFast(bitD, nbBits);
-@@ -408,7 +383,7 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
- * This function is safe, it guarantees it will not read beyond src buffer.
+@@ -386,6 +362,21 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
+ return value;
+ }
+
++/*! BIT_reloadDStream_internal() :
++ * Simple variant of BIT_reloadDStream(), with two conditions:
++ * 1. bitstream is valid : bitsConsumed <= sizeof(bitD->bitContainer)*8
++ * 2. look window is valid after shifted down : bitD->ptr >= bitD->start
++ */
++MEM_STATIC BIT_DStream_status BIT_reloadDStream_internal(BIT_DStream_t* bitD)
++{
++ assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
++ bitD->ptr -= bitD->bitsConsumed >> 3;
++ assert(bitD->ptr >= bitD->start);
++ bitD->bitsConsumed &= 7;
++ bitD->bitContainer = MEM_readLEST(bitD->ptr);
++ return BIT_DStream_unfinished;
++}
++
+ /*! BIT_reloadDStreamFast() :
+ * Similar to BIT_reloadDStream(), but with two differences:
+ * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
+@@ -396,31 +387,35 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
+ {
+ if (UNLIKELY(bitD->ptr < bitD->limitPtr))
+ return BIT_DStream_overflow;
+- assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
+- bitD->ptr -= bitD->bitsConsumed >> 3;
+- bitD->bitsConsumed &= 7;
+- bitD->bitContainer = MEM_readLEST(bitD->ptr);
+- return BIT_DStream_unfinished;
++ return BIT_reloadDStream_internal(bitD);
+ }
+
+ /*! BIT_reloadDStream() :
+ * Refill `bitD` from buffer previously set in BIT_initDStream() .
+- * This function is safe, it guarantees it will not read beyond src buffer.
++ * This function is safe, it guarantees it will not never beyond src buffer.
* @return : status of `BIT_DStream_t` internal register.
* when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
-MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
-+MEM_STATIC FORCE_INLINE_ATTR BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
++FORCE_INLINE_TEMPLATE BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
++ /* note : once in overflow mode, a bitstream remains in this mode until it's reset */
++ if (UNLIKELY(bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))) {
++ static const BitContainerType zeroFilled = 0;
++ bitD->ptr = (const char*)&zeroFilled; /* aliasing is allowed for char */
++ /* overflow detected, erroneous scenario or end of stream: no update */
return BIT_DStream_overflow;
++ }
++
++ assert(bitD->ptr >= bitD->start);
+
+ if (bitD->ptr >= bitD->limitPtr) {
+- return BIT_reloadDStreamFast(bitD);
++ return BIT_reloadDStream_internal(bitD);
+ }
+ if (bitD->ptr == bitD->start) {
++ /* reached end of bitStream => no update */
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+- /* start < ptr < limitPtr */
++ /* start < ptr < limitPtr => cautious update */
+ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
-index c42d39faf..c437e0975 100644
+index c42d39faf..508ee2553 100644
--- a/lib/zstd/common/compiler.h
+++ b/lib/zstd/common/compiler.h
@@ -1,5 +1,6 @@
@@ -1704,10 +2156,178 @@ index c42d39faf..c437e0975 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -179,6 +180,17 @@
+@@ -11,6 +12,8 @@
+ #ifndef ZSTD_COMPILER_H
+ #define ZSTD_COMPILER_H
+
++#include <linux/types.h>
++
+ #include "portability_macros.h"
+
+ /*-*******************************************************
+@@ -41,12 +44,15 @@
+ */
+ #define WIN_CDECL
+
++/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
++#define UNUSED_ATTR __attribute__((unused))
++
+ /*
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
+ * branches.
+ */
+-#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
++#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR UNUSED_ATTR
+ /*
+ * HINT_INLINE is used to help the compiler generate better code. It is *not*
+ * used for "templates", so it can be tweaked based on the compilers
+@@ -61,11 +67,21 @@
+ #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
+ # define HINT_INLINE static INLINE_KEYWORD
+ #else
+-# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
++# define HINT_INLINE FORCE_INLINE_TEMPLATE
+ #endif
+
+-/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
+-#define UNUSED_ATTR __attribute__((unused))
++/* "soft" inline :
++ * The compiler is free to select if it's a good idea to inline or not.
++ * The main objective is to silence compiler warnings
++ * when a defined function in included but not used.
++ *
++ * Note : this macro is prefixed `MEM_` because it used to be provided by `mem.h` unit.
++ * Updating the prefix is probably preferable, but requires a fairly large codemod,
++ * since this name is used everywhere.
++ */
++#ifndef MEM_STATIC /* already defined in Linux Kernel mem.h */
++#define MEM_STATIC static __inline UNUSED_ATTR
++#endif
+
+ /* force no inlining */
+ #define FORCE_NOINLINE static __attribute__((__noinline__))
+@@ -86,23 +102,24 @@
+ # define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+ # define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+ #elif defined(__aarch64__)
+-# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
+-# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
++# define PREFETCH_L1(ptr) do { __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))); } while (0)
++# define PREFETCH_L2(ptr) do { __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))); } while (0)
+ #else
+-# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
+-# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
++# define PREFETCH_L1(ptr) do { (void)(ptr); } while (0) /* disabled */
++# define PREFETCH_L2(ptr) do { (void)(ptr); } while (0) /* disabled */
+ #endif /* NO_PREFETCH */
+
+ #define CACHELINE_SIZE 64
+
+-#define PREFETCH_AREA(p, s) { \
+- const char* const _ptr = (const char*)(p); \
+- size_t const _size = (size_t)(s); \
+- size_t _pos; \
+- for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
+- PREFETCH_L2(_ptr + _pos); \
+- } \
+-}
++#define PREFETCH_AREA(p, s) \
++ do { \
++ const char* const _ptr = (const char*)(p); \
++ size_t const _size = (size_t)(s); \
++ size_t _pos; \
++ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
++ PREFETCH_L2(_ptr + _pos); \
++ } \
++ } while (0)
+
+ /* vectorization
+ * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
+@@ -126,9 +143,9 @@
+ #define UNLIKELY(x) (__builtin_expect((x), 0))
+
+ #if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
+-# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); }
++# define ZSTD_UNREACHABLE do { assert(0), __builtin_unreachable(); } while (0)
+ #else
+-# define ZSTD_UNREACHABLE { assert(0); }
++# define ZSTD_UNREACHABLE do { assert(0); } while (0)
+ #endif
+
+ /* disable warnings */
+@@ -179,6 +196,85 @@
* Sanitizer
*****************************************************************/
++/*
++ * Zstd relies on pointer overflow in its decompressor.
++ * We add this attribute to functions that rely on pointer overflow.
++ */
++#ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++# if __has_attribute(no_sanitize)
++# if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8
++ /* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */
++# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow")))
++# else
++ /* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */
++# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow")))
++# endif
++# else
++# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++# endif
++#endif
++
++/*
++ * Helper function to perform a wrapped pointer difference without trigging
++ * UBSAN.
++ *
++ * @returns lhs - rhs with wrapping
++ */
++MEM_STATIC
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs)
++{
++ return lhs - rhs;
++}
++
++/*
++ * Helper function to perform a wrapped pointer add without triggering UBSAN.
++ *
++ * @return ptr + add with wrapping
++ */
++MEM_STATIC
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add)
++{
++ return ptr + add;
++}
++
++/*
++ * Helper function to perform a wrapped pointer subtraction without triggering
++ * UBSAN.
++ *
++ * @return ptr - sub with wrapping
++ */
++MEM_STATIC
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub)
++{
++ return ptr - sub;
++}
++
++/*
++ * Helper function to add to a pointer that works around C's undefined behavior
++ * of adding 0 to NULL.
++ *
++ * @returns `ptr + add` except it defines `NULL + 0 == NULL`.
++ */
++MEM_STATIC
++unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add)
++{
++ return add > 0 ? ptr + add : ptr;
++}
++
+/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
+ * abundance of caution, disable our custom poisoning on mingw. */
+#ifdef __MINGW32__
@@ -1735,7 +2355,7 @@ index 0db7b4240..d8319a2be 100644
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
-index bb863c9ea..e56ff6464 100644
+index bb863c9ea..8eb6aa9a3 100644
--- a/lib/zstd/common/debug.c
+++ b/lib/zstd/common/debug.c
@@ -1,7 +1,8 @@
@@ -1748,8 +2368,19 @@ index bb863c9ea..e56ff6464 100644
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+@@ -21,4 +22,10 @@
+
+ #include "debug.h"
+
++#if (DEBUGLEVEL>=2)
++/* We only use this when DEBUGLEVEL>=2, but we get -Werror=pedantic errors if a
++ * translation unit is empty. So remove this from Linux kernel builds, but
++ * otherwise just leave it in.
++ */
+ int g_debuglevel = DEBUGLEVEL;
++#endif
diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
-index 6dd88d1fb..da0dbfc61 100644
+index 6dd88d1fb..226ba3c57 100644
--- a/lib/zstd/common/debug.h
+++ b/lib/zstd/common/debug.h
@@ -1,7 +1,8 @@
@@ -1762,6 +2393,45 @@ index 6dd88d1fb..da0dbfc61 100644
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+@@ -82,18 +83,27 @@ extern int g_debuglevel; /* the variable is only declared,
+ It's useful when enabling very verbose levels
+ on selective conditions (such as position in src) */
+
+-# define RAWLOG(l, ...) { \
+- if (l<=g_debuglevel) { \
+- ZSTD_DEBUG_PRINT(__VA_ARGS__); \
+- } }
+-# define DEBUGLOG(l, ...) { \
+- if (l<=g_debuglevel) { \
+- ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
+- ZSTD_DEBUG_PRINT(" \n"); \
+- } }
++# define RAWLOG(l, ...) \
++ do { \
++ if (l<=g_debuglevel) { \
++ ZSTD_DEBUG_PRINT(__VA_ARGS__); \
++ } \
++ } while (0)
++
++#define STRINGIFY(x) #x
++#define TOSTRING(x) STRINGIFY(x)
++#define LINE_AS_STRING TOSTRING(__LINE__)
++
++# define DEBUGLOG(l, ...) \
++ do { \
++ if (l<=g_debuglevel) { \
++ ZSTD_DEBUG_PRINT(__FILE__ ":" LINE_AS_STRING ": " __VA_ARGS__); \
++ ZSTD_DEBUG_PRINT(" \n"); \
++ } \
++ } while (0)
+ #else
+-# define RAWLOG(l, ...) {} /* disabled */
+-# define DEBUGLOG(l, ...) {} /* disabled */
++# define RAWLOG(l, ...) do { } while (0) /* disabled */
++# define DEBUGLOG(l, ...) do { } while (0) /* disabled */
+ #endif
+
+
diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
index fef67056f..6cdd82233 100644
--- a/lib/zstd/common/entropy_common.c
@@ -1929,7 +2599,7 @@ index 6d1135f8c..a4062d30d 100644
default: return notErrorCode;
}
diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
-index ca5101e54..9a4699a38 100644
+index ca5101e54..0410ca415 100644
--- a/lib/zstd/common/error_private.h
+++ b/lib/zstd/common/error_private.h
@@ -1,5 +1,6 @@
@@ -1940,8 +2610,122 @@ index ca5101e54..9a4699a38 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
+@@ -49,8 +50,13 @@ ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+ ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
+
+ /* check and forward error code */
+-#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
+-#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
++#define CHECK_V_F(e, f) \
++ size_t const e = f; \
++ do { \
++ if (ERR_isError(e)) \
++ return e; \
++ } while (0)
++#define CHECK_F(f) do { CHECK_V_F(_var_err__, f); } while (0)
+
+
+ /*-****************************************
+@@ -84,10 +90,12 @@ void _force_has_format_string(const char *format, ...) {
+ * We want to force this function invocation to be syntactically correct, but
+ * we don't want to force runtime evaluation of its arguments.
+ */
+-#define _FORCE_HAS_FORMAT_STRING(...) \
+- if (0) { \
+- _force_has_format_string(__VA_ARGS__); \
+- }
++#define _FORCE_HAS_FORMAT_STRING(...) \
++ do { \
++ if (0) { \
++ _force_has_format_string(__VA_ARGS__); \
++ } \
++ } while (0)
+
+ #define ERR_QUOTE(str) #str
+
+@@ -98,48 +106,50 @@ void _force_has_format_string(const char *format, ...) {
+ * In order to do that (particularly, printing the conditional that failed),
+ * this can't just wrap RETURN_ERROR().
+ */
+-#define RETURN_ERROR_IF(cond, err, ...) \
+- if (cond) { \
+- RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
+- __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
+- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+- RAWLOG(3, ": " __VA_ARGS__); \
+- RAWLOG(3, "\n"); \
+- return ERROR(err); \
+- }
++#define RETURN_ERROR_IF(cond, err, ...) \
++ do { \
++ if (cond) { \
++ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
++ __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
++ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
++ RAWLOG(3, ": " __VA_ARGS__); \
++ RAWLOG(3, "\n"); \
++ return ERROR(err); \
++ } \
++ } while (0)
+
+ /*
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+-#define RETURN_ERROR(err, ...) \
+- do { \
+- RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
+- __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
+- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+- RAWLOG(3, ": " __VA_ARGS__); \
+- RAWLOG(3, "\n"); \
+- return ERROR(err); \
+- } while(0);
++#define RETURN_ERROR(err, ...) \
++ do { \
++ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
++ __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
++ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
++ RAWLOG(3, ": " __VA_ARGS__); \
++ RAWLOG(3, "\n"); \
++ return ERROR(err); \
++ } while(0)
+
+ /*
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+-#define FORWARD_IF_ERROR(err, ...) \
+- do { \
+- size_t const err_code = (err); \
+- if (ERR_isError(err_code)) { \
+- RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
+- __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
+- _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+- RAWLOG(3, ": " __VA_ARGS__); \
+- RAWLOG(3, "\n"); \
+- return err_code; \
+- } \
+- } while(0);
++#define FORWARD_IF_ERROR(err, ...) \
++ do { \
++ size_t const err_code = (err); \
++ if (ERR_isError(err_code)) { \
++ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
++ __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
++ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
++ RAWLOG(3, ": " __VA_ARGS__); \
++ RAWLOG(3, "\n"); \
++ return err_code; \
++ } \
++ } while(0)
+
+
+ #endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
-index 4507043b2..c4e25a219 100644
+index 4507043b2..2185a5786 100644
--- a/lib/zstd/common/fse.h
+++ b/lib/zstd/common/fse.h
@@ -1,7 +1,8 @@
@@ -2043,7 +2827,15 @@ index 4507043b2..c4e25a219 100644
/*!
Tutorial :
-@@ -317,16 +258,6 @@ If there is an error, the function will return an error code, which can be teste
+@@ -286,6 +227,7 @@ If there is an error, the function will return an error code, which can be teste
+
+ #endif /* FSE_H */
+
++
+ #if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
+ #define FSE_H_FSE_STATIC_LINKING_ONLY
+
+@@ -317,16 +259,6 @@ If there is an error, the function will return an error code, which can be teste
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
/*< same as FSE_optimalTableLog(), which used `minus==2` */
@@ -2060,7 +2852,7 @@ index 4507043b2..c4e25a219 100644
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
/*< build a fake FSE_CTable, designed to compress always the same symbolValue */
-@@ -344,19 +275,11 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi
+@@ -344,19 +276,11 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi
FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
/*< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
@@ -2083,7 +2875,22 @@ index 4507043b2..c4e25a219 100644
typedef enum {
FSE_repeat_none, /*< Cannot use the previous table */
-@@ -552,7 +475,7 @@ MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePt
+@@ -539,20 +463,20 @@ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, un
+ FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* const stateTable = (const U16*)(statePtr->stateTable);
+ U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+- BIT_addBits(bitC, statePtr->value, nbBitsOut);
++ BIT_addBits(bitC, (size_t)statePtr->value, nbBitsOut);
+ statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+ }
+
+ MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
+ {
+- BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
++ BIT_addBits(bitC, (size_t)statePtr->value, statePtr->stateLog);
+ BIT_flushBits(bitC);
+ }
+
/* FSE_getMaxNbBits() :
* Approximate maximum cost of a symbol, in bits.
@@ -2093,7 +2900,7 @@ index 4507043b2..c4e25a219 100644
* note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
-index 8dcb8ca39..99ce8fa54 100644
+index 8dcb8ca39..3a17e84f2 100644
--- a/lib/zstd/common/fse_decompress.c
+++ b/lib/zstd/common/fse_decompress.c
@@ -1,6 +1,7 @@
@@ -2105,15 +2912,18 @@ index 8dcb8ca39..99ce8fa54 100644
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-@@ -24,6 +25,7 @@
+@@ -22,8 +23,8 @@
+ #define FSE_STATIC_LINKING_ONLY
+ #include "fse.h"
#include "error_private.h"
- #define ZSTD_DEPS_NEED_MALLOC
- #include "zstd_deps.h"
+-#define ZSTD_DEPS_NEED_MALLOC
+-#include "zstd_deps.h"
++#include "zstd_deps.h" /* ZSTD_memcpy */
+#include "bits.h" /* ZSTD_highbit32 */
/* **************************************************************
-@@ -55,19 +57,6 @@
+@@ -55,19 +56,6 @@
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
@@ -2133,9 +2943,34 @@ index 8dcb8ca39..99ce8fa54 100644
static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
-@@ -127,10 +116,10 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
- }
- }
+@@ -96,7 +84,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+- symbolNext[s] = normalizedCounter[s];
++ symbolNext[s] = (U16)normalizedCounter[s];
+ } } }
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+@@ -111,8 +99,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+- {
+- U64 const add = 0x0101010101010101ull;
++ { U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+@@ -123,14 +110,13 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+- pos += n;
+- }
+- }
++ pos += (size_t)n;
++ } }
/* Now we spread those positions across the table.
- * The benefit of doing it in two stages is that we avoid the the
+ * The benefit of doing it in two stages is that we avoid the
@@ -2146,7 +2981,7 @@ index 8dcb8ca39..99ce8fa54 100644
*/
{
size_t position = 0;
-@@ -166,7 +155,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
+@@ -166,7 +152,7 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
for (u=0; u<tableSize; u++) {
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
U32 const nextState = symbolNext[symbol]++;
@@ -2155,7 +2990,7 @@ index 8dcb8ca39..99ce8fa54 100644
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
} }
-@@ -184,49 +173,6 @@ size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsi
+@@ -184,49 +170,6 @@ size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsi
/*-*******************************************************
* Decompression (Byte symbols)
*********************************************************/
@@ -2205,10 +3040,13 @@ index 8dcb8ca39..99ce8fa54 100644
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
-@@ -290,26 +236,6 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
- return op-ostart;
- }
+@@ -287,32 +230,12 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+ break;
+ } }
+- return op-ostart;
+-}
+-
-
-size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
- const void* cSrc, size_t cSrcSize,
@@ -2227,12 +3065,39 @@ index 8dcb8ca39..99ce8fa54 100644
-size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
-{
- return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
--}
--
++ assert(op >= ostart);
++ return (size_t)(op-ostart);
+ }
+
typedef struct {
short ncount[FSE_MAX_SYMBOL_VALUE + 1];
- FSE_DTable dtable[]; /* Dynamically sized */
-@@ -342,7 +268,8 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
+- FSE_DTable dtable[]; /* Dynamically sized */
+ } FSE_DecompressWksp;
+
+
+@@ -327,13 +250,18 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
++ size_t const dtablePos = sizeof(FSE_DecompressWksp) / sizeof(FSE_DTable);
++ FSE_DTable* const dtable = (FSE_DTable*)workSpace + dtablePos;
+
+- DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
++ FSE_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
+ if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
+
++ /* correct offset to dtable depends on this property */
++ FSE_STATIC_ASSERT(sizeof(FSE_DecompressWksp) % sizeof(FSE_DTable) == 0);
++
+ /* normal FSE decoding mode */
+- {
+- size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
++ { size_t const NCountLength =
++ FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
+ if (FSE_isError(NCountLength)) return NCountLength;
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+ assert(NCountLength <= cSrcSize);
+@@ -342,19 +270,20 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
}
if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
@@ -2241,8 +3106,24 @@ index 8dcb8ca39..99ce8fa54 100644
+ workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
- CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
-@@ -382,9 +309,4 @@ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc,
+- CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
++ CHECK_F( FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
+
+ {
+- const void* ptr = wksp->dtable;
++ const void* ptr = dtable;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+- if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
+- return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
++ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1);
++ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
+ }
+ }
+
+@@ -382,9 +311,4 @@ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc,
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
@@ -2253,7 +3134,7 @@ index 8dcb8ca39..99ce8fa54 100644
-
#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
-index 5042ff870..8e7943092 100644
+index 5042ff870..57462466e 100644
--- a/lib/zstd/common/huf.h
+++ b/lib/zstd/common/huf.h
@@ -1,7 +1,8 @@
@@ -2329,12 +3210,12 @@ index 5042ff870..8e7943092 100644
/* Error Management */
-HUF_PUBLIC_API unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
-HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
+-
+unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
+const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
-
-/* *** Advanced function *** */
--
+
-/* HUF_compress2() :
- * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
- * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
@@ -2487,7 +3368,31 @@ index 5042ff870..8e7943092 100644
/* HUF_readCTable() :
* Loading a CTable saved with HUF_writeCTable() */
-@@ -276,32 +225,12 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
+@@ -246,9 +195,22 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
+
+ /* HUF_getNbBitsFromCTable() :
+ * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
+- * Note 1 : is not inlined, as HUF_CElt definition is private */
++ * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0
++ * Note 2 : is not inlined, as HUF_CElt definition is private
++ */
+ U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
+
++typedef struct {
++ BYTE tableLog;
++ BYTE maxSymbolValue;
++ BYTE unused[sizeof(size_t) - 2];
++} HUF_CTableHeader;
++
++/* HUF_readCTableHeader() :
++ * @returns The header from the CTable specifying the tableLog and the maxSymbolValue.
++ */
++HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable);
++
+ /*
+ * HUF_decompress() does the following:
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
+@@ -276,32 +238,12 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
@@ -2521,12 +3426,13 @@ index 5042ff870..8e7943092 100644
/* HUF_compress1X_repeat() :
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
* If it uses hufTable it does not modify hufTable or repeat.
-@@ -312,47 +241,28 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
+@@ -312,47 +254,28 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
--
++ HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
+
-size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
@@ -2538,18 +3444,17 @@ index 5042ff870..8e7943092 100644
-size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
-size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
-#endif
--#ifndef HUF_FORCE_DECOMPRESS_X1
++size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
+ #ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
-#endif
-+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
-
+-
-size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /*< automatic selection of sing or double symbol decoder, based on DTable */
-#ifndef HUF_FORCE_DECOMPRESS_X2
-size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-#endif
-+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
- #ifndef HUF_FORCE_DECOMPRESS_X1
+-#ifndef HUF_FORCE_DECOMPRESS_X1
-size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /*< double-symbols decoder */
#endif
@@ -2580,7 +3485,7 @@ index 5042ff870..8e7943092 100644
+#endif /* HUF_H_298734234 */
diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
-index 1d9cc0392..a7231822b 100644
+index 1d9cc0392..2e91e7780 100644
--- a/lib/zstd/common/mem.h
+++ b/lib/zstd/common/mem.h
@@ -1,6 +1,6 @@
@@ -2591,8 +3496,16 @@ index 1d9cc0392..a7231822b 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
+@@ -24,6 +24,7 @@
+ /*-****************************************
+ * Compiler specifics
+ ******************************************/
++#undef MEM_STATIC /* may be already defined from common/compiler.h */
+ #define MEM_STATIC static inline
+
+ /*-**************************************************************
diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h
-index 0e3b2c0a5..7ede8cf1f 100644
+index 0e3b2c0a5..f08638cce 100644
--- a/lib/zstd/common/portability_macros.h
+++ b/lib/zstd/common/portability_macros.h
@@ -1,5 +1,6 @@
@@ -2612,7 +3525,16 @@ index 0e3b2c0a5..7ede8cf1f 100644
* This header is shared between C and ASM code, so it MUST only
* contain macro definitions. It MUST not contain any C code.
*
-@@ -65,7 +66,7 @@
+@@ -45,6 +46,8 @@
+ /* Mark the internal assembly functions as hidden */
+ #ifdef __ELF__
+ # define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
++#elif defined(__APPLE__)
++# define ZSTD_HIDE_ASM_FUNCTION(func) .private_extern func
+ #else
+ # define ZSTD_HIDE_ASM_FUNCTION(func)
+ #endif
+@@ -65,7 +68,7 @@
#endif
/*
@@ -2621,7 +3543,7 @@ index 0e3b2c0a5..7ede8cf1f 100644
* because other platforms may not support GAS assembly syntax.
*
* Only enable assembly for Linux / MacOS, other platforms may
-@@ -90,4 +91,23 @@
+@@ -90,4 +93,23 @@
*/
#define ZSTD_ENABLE_ASM_X86_64_BMI2 0
@@ -2734,7 +3656,7 @@ index 2c34e8a33..f931f7d0e 100644
+#endif /* ZSTD_DEPS_STDINT */
+#endif /* ZSTD_DEPS_NEED_STDINT */
diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
-index 93305d9b4..7f023e4d4 100644
+index 93305d9b4..11da1233e 100644
--- a/lib/zstd/common/zstd_internal.h
+++ b/lib/zstd/common/zstd_internal.h
@@ -1,5 +1,6 @@
@@ -2782,6 +3704,33 @@ index 93305d9b4..7f023e4d4 100644
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
/* Each table cannot take more than #symbols * FSELog bits */
+@@ -166,7 +169,7 @@ static void ZSTD_copy8(void* dst, const void* src) {
+ ZSTD_memcpy(dst, src, 8);
+ #endif
+ }
+-#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
++#define COPY8(d,s) do { ZSTD_copy8(d,s); d+=8; s+=8; } while (0)
+
+ /* Need to use memmove here since the literal buffer can now be located within
+ the dst buffer. In circumstances where the op "catches up" to where the
+@@ -186,7 +189,7 @@ static void ZSTD_copy16(void* dst, const void* src) {
+ ZSTD_memcpy(dst, copy16_buf, 16);
+ #endif
+ }
+-#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
++#define COPY16(d,s) do { ZSTD_copy16(d,s); d+=16; s+=16; } while (0)
+
+ #define WILDCOPY_OVERLENGTH 32
+ #define WILDCOPY_VECLEN 16
+@@ -215,7 +218,7 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
+ if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
+ /* Handle short offset copies. */
+ do {
+- COPY8(op, ip)
++ COPY8(op, ip);
+ } while (op < oend);
+ } else {
+ assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
@@ -225,12 +228,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
* one COPY16() in the first call. Then, do two calls per loop since
* at that point it is more likely to have a high trip count.
@@ -2919,6 +3868,22 @@ index 93305d9b4..7f023e4d4 100644
/* ZSTD_invalidateRepCodes() :
+@@ -420,13 +357,13 @@ typedef struct {
+
+ /*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+-/* Used by: decompress, fullbench (does not get its definition from here) */
++/* Used by: decompress, fullbench */
+ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr);
+
+ /*! ZSTD_decodeSeqHeaders() :
+ * decode sequence header from src */
+-/* Used by: decompress, fullbench (does not get its definition from here) */
++/* Used by: zstd_decompress_block, fullbench */
+ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize);
+
diff --git a/lib/zstd/compress/clevels.h b/lib/zstd/compress/clevels.h
index d9a76112e..6ab8be653 100644
--- a/lib/zstd/compress/clevels.h
@@ -2932,7 +3897,7 @@ index d9a76112e..6ab8be653 100644
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
-index ec5b1ca6d..e46ca6621 100644
+index ec5b1ca6d..44a3c10be 100644
--- a/lib/zstd/compress/fse_compress.c
+++ b/lib/zstd/compress/fse_compress.c
@@ -1,6 +1,7 @@
@@ -2944,10 +3909,12 @@ index ec5b1ca6d..e46ca6621 100644
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
-@@ -26,6 +27,7 @@
+@@ -25,7 +26,8 @@
+ #include "../common/error_private.h"
#define ZSTD_DEPS_NEED_MALLOC
#define ZSTD_DEPS_NEED_MATH64
- #include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
+-#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
++#include "../common/zstd_deps.h" /* ZSTD_memset */
+#include "../common/bits.h" /* ZSTD_highbit32 */
@@ -2970,7 +3937,55 @@ index ec5b1ca6d..e46ca6621 100644
U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
-@@ -342,21 +344,11 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+@@ -224,8 +226,8 @@ size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+ size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
+ + 4 /* bitCount initialized at 4 */
+ + 2 /* first two symbols may use one additional bit each */) / 8)
+- + 1 /* round up to whole nb bytes */
+- + 2 /* additional two bytes for bitstream flush */;
++ + 1 /* round up to whole nb bytes */
++ + 2 /* additional two bytes for bitstream flush */;
+ return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
+ }
+
+@@ -254,7 +256,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ /* Init */
+ remaining = tableSize+1; /* +1 for extra accuracy */
+ threshold = tableSize;
+- nbBits = tableLog+1;
++ nbBits = (int)tableLog+1;
+
+ while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
+ if (previousIs0) {
+@@ -273,7 +275,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ }
+ while (symbol >= start+3) {
+ start+=3;
+- bitStream += 3 << bitCount;
++ bitStream += 3U << bitCount;
+ bitCount += 2;
+ }
+ bitStream += (symbol-start) << bitCount;
+@@ -293,7 +295,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ count++; /* +1 for extra accuracy */
+ if (count>=threshold)
+ count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+- bitStream += count << bitCount;
++ bitStream += (U32)count << bitCount;
+ bitCount += nbBits;
+ bitCount -= (count<max);
+ previousIs0 = (count==1);
+@@ -321,7 +323,8 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ out[1] = (BYTE)(bitStream>>8);
+ out+= (bitCount+7) /8;
+
+- return (out-ostart);
++ assert(out >= ostart);
++ return (size_t)(out-ostart);
+ }
+
+
+@@ -342,21 +345,11 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize,
* FSE Compression Code
****************************************************************/
@@ -2994,7 +4009,7 @@ index ec5b1ca6d..e46ca6621 100644
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
assert(srcSize > 1); /* Not supported, RLE should be used instead */
return minBits;
-@@ -364,7 +356,7 @@ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+@@ -364,7 +357,7 @@ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
{
@@ -3003,7 +4018,7 @@ index ec5b1ca6d..e46ca6621 100644
U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
assert(srcSize > 1); /* Not supported, RLE should be used instead */
-@@ -532,40 +524,6 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
+@@ -532,40 +525,6 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
return tableLog;
}
@@ -3044,7 +4059,7 @@ index ec5b1ca6d..e46ca6621 100644
/* fake FSE_CTable, for rle input (always same symbol) */
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
{
-@@ -664,5 +622,4 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
+@@ -664,5 +623,4 @@ size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
@@ -3079,7 +4094,7 @@ index fc1830abc..f7687b0fc 100644
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
-index 74ef0db47..83241abaf 100644
+index 74ef0db47..0b229f5d2 100644
--- a/lib/zstd/compress/huf_compress.c
+++ b/lib/zstd/compress/huf_compress.c
@@ -1,6 +1,7 @@
@@ -3125,20 +4140,20 @@ index 74ef0db47..83241abaf 100644
+#if DEBUGLEVEL >= 2
+
+static size_t showU32(const U32* arr, size_t size)
-+{
+ {
+- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+ size_t u;
+ for (u=0; u<size; u++) {
+ RAWLOG(6, " %u", arr[u]); (void)arr;
+ }
+ RAWLOG(6, " \n");
+ return size;
-+}
-+
+ }
+
+static size_t HUF_getNbBits(HUF_CElt elt);
+
+static size_t showCTableBits(const HUF_CElt* ctable, size_t size)
- {
-- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
++{
+ size_t u;
+ for (u=0; u<size; u++) {
+ RAWLOG(6, " %zu", HUF_getNbBits(ctable[u])); (void)ctable;
@@ -3146,8 +4161,8 @@ index 74ef0db47..83241abaf 100644
+ RAWLOG(6, " \n");
+ return size;
+
- }
-
++}
++
+static size_t showHNodeSymbols(const nodeElt* hnode, size_t size)
+{
+ size_t u;
@@ -3194,16 +4209,45 @@ index 74ef0db47..83241abaf 100644
}
static size_t HUF_getValueFast(HUF_CElt elt)
-@@ -175,6 +233,8 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
+@@ -160,6 +218,25 @@ static void HUF_setValue(HUF_CElt* elt, size_t value)
+ }
+ }
+
++HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable)
++{
++ HUF_CTableHeader header;
++ ZSTD_memcpy(&header, ctable, sizeof(header));
++ return header;
++}
++
++static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue)
++{
++ HUF_CTableHeader header;
++ HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header));
++ ZSTD_memset(&header, 0, sizeof(header));
++ assert(tableLog < 256);
++ header.tableLog = (BYTE)tableLog;
++ assert(maxSymbolValue < 256);
++ header.maxSymbolValue = (BYTE)maxSymbolValue;
++ ZSTD_memcpy(ctable, &header, sizeof(header));
++}
++
+ typedef struct {
+ HUF_CompressWeightsWksp wksp;
+ BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
+@@ -175,6 +252,11 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
U32 n;
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
+ HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp));
+
++ assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue);
++ assert(HUF_readCTableHeader(CTable).tableLog == huffLog);
++
/* check conditions */
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
-@@ -204,16 +264,6 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
+@@ -204,16 +286,6 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
return ((maxSymbolValue+1)/2) + 1;
}
@@ -3220,13 +4264,32 @@ index 74ef0db47..83241abaf 100644
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
{
-@@ -269,68 +319,64 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
+@@ -231,7 +303,9 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
+
+- CTable[0] = tableLog;
++ *maxSymbolValuePtr = nbSymbols - 1;
++
++ HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr);
+
+ /* Prepare base value per rank */
+ { U32 n, nextRankStart = 0;
+@@ -263,74 +337,71 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
+ { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
+ }
+
+- *maxSymbolValuePtr = nbSymbols - 1;
+ return readSize;
+ }
U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
{
- const HUF_CElt* ct = CTable + 1;
+ const HUF_CElt* const ct = CTable + 1;
assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
++ if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue)
++ return 0;
return (U32)HUF_getNbBits(ct[symbolValue]);
}
@@ -3316,7 +4379,7 @@ index 74ef0db47..83241abaf 100644
assert(totalCost > 0);
/* repay normalized cost */
-@@ -339,19 +385,19 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+@@ -339,19 +410,19 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
/* Get pos of last (smallest = lowest cum. count) symbol per rank */
ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
@@ -3340,7 +4403,7 @@ index 74ef0db47..83241abaf 100644
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 const highPos = rankLast[nBitsToDecrease];
U32 const lowPos = rankLast[nBitsToDecrease-1];
-@@ -391,7 +437,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+@@ -391,7 +462,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
rankLast[nBitsToDecrease] = noSymbol;
else {
rankLast[nBitsToDecrease]--;
@@ -3349,7 +4412,7 @@ index 74ef0db47..83241abaf 100644
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
}
} /* while (totalCost > 0) */
-@@ -403,11 +449,11 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+@@ -403,11 +474,11 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
* TODO.
*/
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
@@ -3364,7 +4427,7 @@ index 74ef0db47..83241abaf 100644
huffNode[n+1].nbBits--;
assert(n >= 0);
rankLast[1] = (U32)(n+1);
-@@ -421,7 +467,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+@@ -421,7 +492,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
} /* repay normalized cost */
} /* there are several too large elements (at least >= 2) */
@@ -3373,7 +4436,7 @@ index 74ef0db47..83241abaf 100644
}
typedef struct {
-@@ -429,7 +475,7 @@ typedef struct {
+@@ -429,7 +500,7 @@ typedef struct {
U16 curr;
} rankPos;
@@ -3382,7 +4445,7 @@ index 74ef0db47..83241abaf 100644
/* Number of buckets available for HUF_sort() */
#define RANK_POSITION_TABLE_SIZE 192
-@@ -448,8 +494,8 @@ typedef struct {
+@@ -448,8 +519,8 @@ typedef struct {
* Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
*/
#define RANK_POSITION_MAX_COUNT_LOG 32
@@ -3393,7 +4456,7 @@ index 74ef0db47..83241abaf 100644
/* Return the appropriate bucket index for a given count. See definition of
* RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
-@@ -457,7 +503,7 @@ typedef struct {
+@@ -457,7 +528,7 @@ typedef struct {
static U32 HUF_getIndex(U32 const count) {
return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
? count
@@ -3402,7 +4465,7 @@ index 74ef0db47..83241abaf 100644
}
/* Helper swap function for HUF_quickSortPartition() */
-@@ -580,7 +626,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
+@@ -580,7 +651,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
/* Sort each bucket. */
for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
@@ -3411,7 +4474,7 @@ index 74ef0db47..83241abaf 100644
U32 const bucketStartIdx = rankPosition[n].base;
if (bucketSize > 1) {
assert(bucketStartIdx < maxSymbolValue1);
-@@ -591,6 +637,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
+@@ -591,6 +662,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
assert(HUF_isSorted(huffNode, maxSymbolValue1));
}
@@ -3419,7 +4482,7 @@ index 74ef0db47..83241abaf 100644
/* HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
-@@ -611,6 +658,7 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
+@@ -611,6 +683,7 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
int lowS, lowN;
int nodeNb = STARTNODE;
int n, nodeRoot;
@@ -3427,7 +4490,7 @@ index 74ef0db47..83241abaf 100644
/* init for parents */
nonNullRank = (int)maxSymbolValue;
while(huffNode[nonNullRank].count == 0) nonNullRank--;
-@@ -637,6 +685,8 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
+@@ -637,6 +710,8 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
for (n=0; n<=nonNullRank; n++)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
@@ -3436,8 +4499,13 @@ index 74ef0db47..83241abaf 100644
return nonNullRank;
}
-@@ -674,28 +724,36 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i
- CTable[0] = maxNbBits;
+@@ -671,31 +746,40 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i
+ HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
+ for (n=0; n<alphabetSize; n++)
+ HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
+- CTable[0] = maxNbBits;
++
++ HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue);
}
-size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
@@ -3478,7 +4546,35 @@ index 74ef0db47..83241abaf 100644
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
-@@ -804,7 +862,7 @@ FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int id
+@@ -716,13 +800,20 @@ size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count,
+ }
+
+ int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
+- HUF_CElt const* ct = CTable + 1;
+- int bad = 0;
+- int s;
+- for (s = 0; s <= (int)maxSymbolValue; ++s) {
+- bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
+- }
+- return !bad;
++ HUF_CTableHeader header = HUF_readCTableHeader(CTable);
++ HUF_CElt const* ct = CTable + 1;
++ int bad = 0;
++ int s;
++
++ assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX);
++
++ if (header.maxSymbolValue < maxSymbolValue)
++ return 0;
++
++ for (s = 0; s <= (int)maxSymbolValue; ++s) {
++ bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
++ }
++ return !bad;
+ }
+
+ size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+@@ -804,7 +895,7 @@ FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int id
#if DEBUGLEVEL >= 1
{
size_t const nbBits = HUF_getNbBits(elt);
@@ -3487,7 +4583,7 @@ index 74ef0db47..83241abaf 100644
(void)dirtyBits;
/* Middle bits are 0. */
assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
-@@ -884,7 +942,7 @@ static size_t HUF_closeCStream(HUF_CStream_t* bitC)
+@@ -884,7 +975,7 @@ static size_t HUF_closeCStream(HUF_CStream_t* bitC)
{
size_t const nbBits = bitC->bitPos[0] & 0xFF;
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
@@ -3496,7 +4592,28 @@ index 74ef0db47..83241abaf 100644
}
}
-@@ -1045,9 +1103,9 @@ HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
+@@ -964,17 +1055,17 @@ HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+ {
+- U32 const tableLog = (U32)CTable[0];
++ U32 const tableLog = HUF_readCTableHeader(CTable).tableLog;
+ HUF_CElt const* ct = CTable + 1;
+ const BYTE* ip = (const BYTE*) src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+- BYTE* op = ostart;
+ HUF_CStream_t bitC;
+
+ /* init */
+ if (dstSize < 8) return 0; /* not enough space to compress */
+- { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
++ { BYTE* op = ostart;
++ size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
+ if (HUF_isError(initErr)) return 0; }
+
+ if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
+@@ -1045,9 +1136,9 @@ HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
static size_t
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
@@ -3508,7 +4625,7 @@ index 74ef0db47..83241abaf 100644
return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
}
return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
-@@ -1058,28 +1116,23 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+@@ -1058,28 +1149,23 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
static size_t
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
@@ -3523,13 +4640,13 @@ index 74ef0db47..83241abaf 100644
#endif
-size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
--{
++size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
+ {
- return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
-}
-
-size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
-+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
- {
+-{
- return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
+ return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
}
@@ -3542,7 +4659,7 @@ index 74ef0db47..83241abaf 100644
{
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
const BYTE* ip = (const BYTE*) src;
-@@ -1093,7 +1146,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+@@ -1093,7 +1179,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
op += 6; /* jumpTable */
assert(op <= oend);
@@ -3551,7 +4668,7 @@ index 74ef0db47..83241abaf 100644
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart, (U16)cSize);
op += cSize;
-@@ -1101,7 +1154,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+@@ -1101,7 +1187,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
ip += segmentSize;
assert(op <= oend);
@@ -3560,7 +4677,7 @@ index 74ef0db47..83241abaf 100644
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart+2, (U16)cSize);
op += cSize;
-@@ -1109,7 +1162,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+@@ -1109,7 +1195,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
ip += segmentSize;
assert(op <= oend);
@@ -3569,7 +4686,7 @@ index 74ef0db47..83241abaf 100644
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart+4, (U16)cSize);
op += cSize;
-@@ -1118,7 +1171,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+@@ -1118,7 +1204,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
ip += segmentSize;
assert(op <= oend);
assert(ip <= iend);
@@ -3578,24 +4695,24 @@ index 74ef0db47..83241abaf 100644
if (cSize == 0 || cSize > 65535) return 0;
op += cSize;
}
-@@ -1126,14 +1179,9 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+@@ -1126,14 +1212,9 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
return (size_t)(op-ostart);
}
-size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
-+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
- {
+-{
- return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
-}
-
-size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
--{
++size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
+ {
- return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
+ return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
}
typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
-@@ -1141,11 +1189,11 @@ typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
+@@ -1141,11 +1222,11 @@ typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
static size_t HUF_compressCTable_internal(
BYTE* const ostart, BYTE* op, BYTE* const oend,
const void* src, size_t srcSize,
@@ -3610,7 +4727,7 @@ index 74ef0db47..83241abaf 100644
if (HUF_isError(cSize)) { return cSize; }
if (cSize==0) { return 0; } /* uncompressible */
op += cSize;
-@@ -1168,6 +1216,79 @@ typedef struct {
+@@ -1168,6 +1249,81 @@ typedef struct {
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
@@ -3651,7 +4768,7 @@ index 74ef0db47..83241abaf 100644
+
+ { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp);
+ size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp);
-+ size_t maxBits, hSize, newSize;
++ size_t hSize, newSize;
+ const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue);
+ const unsigned minTableLog = HUF_minTableLog(symbolCardinality);
+ size_t optSize = ((size_t) ~0) - 1;
@@ -3662,12 +4779,14 @@ index 74ef0db47..83241abaf 100644
+ /* Search until size increases */
+ for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) {
+ DEBUGLOG(7, "checking for huffLog=%u", optLogGuess);
-+ maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
-+ if (ERR_isError(maxBits)) continue;
+
-+ if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
++ { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
++ if (ERR_isError(maxBits)) continue;
++
++ if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
+
-+ hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
++ hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
++ }
+
+ if (ERR_isError(hSize)) continue;
+
@@ -3690,7 +4809,7 @@ index 74ef0db47..83241abaf 100644
/* HUF_compress_internal() :
* `workSpace_align4` must be aligned on 4-bytes boundaries,
* and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
-@@ -1177,14 +1298,14 @@ HUF_compress_internal (void* dst, size_t dstSize,
+@@ -1177,14 +1333,14 @@ HUF_compress_internal (void* dst, size_t dstSize,
unsigned maxSymbolValue, unsigned huffLog,
HUF_nbStreams_e nbStreams,
void* workSpace, size_t wkspSize,
@@ -3707,7 +4826,7 @@ index 74ef0db47..83241abaf 100644
HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
/* checks & inits */
-@@ -1198,16 +1319,17 @@ HUF_compress_internal (void* dst, size_t dstSize,
+@@ -1198,16 +1354,17 @@ HUF_compress_internal (void* dst, size_t dstSize,
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
/* Heuristic : If old table is valid, use it for small inputs */
@@ -3728,7 +4847,7 @@ index 74ef0db47..83241abaf 100644
{ unsigned maxSymbolValueBegin = maxSymbolValue;
CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
largestTotal += largestBegin;
-@@ -1224,6 +1346,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
+@@ -1224,6 +1381,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
@@ -3736,7 +4855,7 @@ index 74ef0db47..83241abaf 100644
/* Check validity of previous table */
if ( repeat
-@@ -1232,19 +1355,20 @@ HUF_compress_internal (void* dst, size_t dstSize,
+@@ -1232,25 +1390,20 @@ HUF_compress_internal (void* dst, size_t dstSize,
*repeat = HUF_repeat_none;
}
/* Heuristic : use existing table for small inputs */
@@ -3756,11 +4875,17 @@ index 74ef0db47..83241abaf 100644
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
CHECK_F(maxBits);
huffLog = (U32)maxBits;
+- }
+- /* Zero unused symbols in CTable, so we can check it for validity */
+- {
+- size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
+- size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
+- ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
+ DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1));
}
- /* Zero unused symbols in CTable, so we can check it for validity */
- {
-@@ -1263,7 +1387,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
+
+ /* Write table description header */
+@@ -1263,7 +1416,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
@@ -3769,7 +4894,7 @@ index 74ef0db47..83241abaf 100644
} }
/* Use the new huffman table */
-@@ -1275,46 +1399,20 @@ HUF_compress_internal (void* dst, size_t dstSize,
+@@ -1275,61 +1428,35 @@ HUF_compress_internal (void* dst, size_t dstSize,
}
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
@@ -3820,7 +4945,11 @@ index 74ef0db47..83241abaf 100644
}
/* HUF_compress4X_repeat():
-@@ -1325,11 +1423,11 @@ size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
+ * compress input using 4 streams.
+ * consider skipping quickly
+- * re-use an existing huffman compression table */
++ * reuse an existing huffman compression table */
+ size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
@@ -3836,7 +4965,7 @@ index 74ef0db47..83241abaf 100644
}
-
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
-index f620cafca..c1c316e9e 100644
+index f620cafca..0d139727c 100644
--- a/lib/zstd/compress/zstd_compress.c
+++ b/lib/zstd/compress/zstd_compress.c
@@ -1,5 +1,6 @@
@@ -3893,7 +5022,11 @@ index f620cafca..c1c316e9e 100644
}
-@@ -171,12 +176,9 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+@@ -168,15 +173,13 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
+
+ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+ {
++ DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx);
if (cctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"not compatible with static CCtx");
@@ -3908,7 +5041,7 @@ index f620cafca..c1c316e9e 100644
}
return 0;
}
-@@ -257,9 +259,9 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
+@@ -257,9 +260,9 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
}
@@ -3920,7 +5053,7 @@ index f620cafca..c1c316e9e 100644
*/
static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
-@@ -267,6 +269,34 @@ static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
+@@ -267,6 +270,34 @@ static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
}
@@ -3955,7 +5088,7 @@ index f620cafca..c1c316e9e 100644
static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
ZSTD_compressionParameters cParams)
{
-@@ -284,6 +314,10 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
+@@ -284,6 +315,10 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
}
cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
@@ -3966,7 +5099,7 @@ index f620cafca..c1c316e9e 100644
assert(!ZSTD_checkCParams(cParams));
return cctxParams;
}
-@@ -329,10 +363,13 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel)
+@@ -329,10 +364,13 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel)
#define ZSTD_NO_CLEVEL 0
/*
@@ -3982,7 +5115,7 @@ index f620cafca..c1c316e9e 100644
{
assert(!ZSTD_checkCParams(params->cParams));
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
-@@ -345,6 +382,9 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par
+@@ -345,6 +383,9 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par
cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &params->cParams);
cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, &params->cParams);
cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &params->cParams);
@@ -3992,7 +5125,7 @@ index f620cafca..c1c316e9e 100644
DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
}
-@@ -359,7 +399,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete
+@@ -359,7 +400,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete
/*
* Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
@@ -4001,7 +5134,7 @@ index f620cafca..c1c316e9e 100644
*/
static void ZSTD_CCtxParams_setZstdParams(
ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
-@@ -455,8 +495,8 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
+@@ -455,8 +496,8 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
return bounds;
case ZSTD_c_enableLongDistanceMatching:
@@ -4012,7 +5145,7 @@ index f620cafca..c1c316e9e 100644
return bounds;
case ZSTD_c_ldmHashLog:
-@@ -549,6 +589,26 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
+@@ -549,6 +590,26 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
bounds.upperBound = 1;
return bounds;
@@ -4039,7 +5172,23 @@ index f620cafca..c1c316e9e 100644
default:
bounds.error = ERROR(parameter_unsupported);
return bounds;
-@@ -613,6 +673,10 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
+@@ -567,10 +628,11 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
+ return 0;
+ }
+
+-#define BOUNDCHECK(cParam, val) { \
+- RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+- parameter_outOfBound, "Param out of bounds"); \
+-}
++#define BOUNDCHECK(cParam, val) \
++ do { \
++ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
++ parameter_outOfBound, "Param out of bounds"); \
++ } while (0)
+
+
+ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
+@@ -613,6 +675,10 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_useBlockSplitter:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
@@ -4050,7 +5199,7 @@ index f620cafca..c1c316e9e 100644
default:
return 0;
}
-@@ -625,7 +689,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
+@@ -625,7 +691,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
if (ZSTD_isUpdateAuthorized(param)) {
cctx->cParamsChanged = 1;
} else {
@@ -4059,7 +5208,7 @@ index f620cafca..c1c316e9e 100644
} }
switch(param)
-@@ -668,6 +732,10 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
+@@ -668,6 +734,10 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
case ZSTD_c_useBlockSplitter:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
@@ -4070,7 +5219,7 @@ index f620cafca..c1c316e9e 100644
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
-@@ -723,12 +791,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+@@ -723,12 +793,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_minMatch :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_minMatch, value);
@@ -4085,7 +5234,7 @@ index f620cafca..c1c316e9e 100644
return CCtxParams->cParams.targetLength;
case ZSTD_c_strategy :
-@@ -741,12 +809,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+@@ -741,12 +811,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
/* Content size written in frame header _when known_ (default:1) */
DEBUGLOG(4, "set content size flag = %u", (value!=0));
CCtxParams->fParams.contentSizeFlag = value != 0;
@@ -4100,7 +5249,7 @@ index f620cafca..c1c316e9e 100644
case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
-@@ -755,18 +823,18 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+@@ -755,18 +825,18 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_forceMaxWindow :
CCtxParams->forceWindow = (value != 0);
@@ -4122,7 +5271,7 @@ index f620cafca..c1c316e9e 100644
CCtxParams->literalCompressionMode = lcm;
return CCtxParams->literalCompressionMode;
}
-@@ -789,47 +857,48 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+@@ -789,47 +859,50 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_enableDedicatedDictSearch :
CCtxParams->enableDedicatedDictSearch = (value!=0);
@@ -4163,9 +5312,12 @@ index f620cafca..c1c316e9e 100644
return CCtxParams->ldmParams.hashRateLog;
case ZSTD_c_targetCBlockSize :
- if (value!=0) /* 0 ==> default */
+- if (value!=0) /* 0 ==> default */
++ if (value!=0) { /* 0 ==> default */
++ value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN);
BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
- CCtxParams->targetCBlockSize = value;
++ }
+ CCtxParams->targetCBlockSize = (U32)value;
return CCtxParams->targetCBlockSize;
@@ -4178,10 +5330,22 @@ index f620cafca..c1c316e9e 100644
case ZSTD_c_stableInBuffer:
BOUNDCHECK(ZSTD_c_stableInBuffer, value);
-@@ -866,6 +935,27 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
- CCtxParams->deterministicRefPrefix = !!value;
- return CCtxParams->deterministicRefPrefix;
+@@ -849,7 +922,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+ case ZSTD_c_validateSequences:
+ BOUNDCHECK(ZSTD_c_validateSequences, value);
+ CCtxParams->validateSequences = value;
+- return CCtxParams->validateSequences;
++ return (size_t)CCtxParams->validateSequences;
+ case ZSTD_c_useBlockSplitter:
+ BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
+@@ -864,7 +937,28 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+ case ZSTD_c_deterministicRefPrefix:
+ BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
+ CCtxParams->deterministicRefPrefix = !!value;
+- return CCtxParams->deterministicRefPrefix;
++ return (size_t)CCtxParams->deterministicRefPrefix;
++
+ case ZSTD_c_prefetchCDictTables:
+ BOUNDCHECK(ZSTD_c_prefetchCDictTables, value);
+ CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value;
@@ -4190,7 +5354,7 @@ index f620cafca..c1c316e9e 100644
+ case ZSTD_c_enableSeqProducerFallback:
+ BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value);
+ CCtxParams->enableMatchFinderFallback = value;
-+ return CCtxParams->enableMatchFinderFallback;
++ return (size_t)CCtxParams->enableMatchFinderFallback;
+
+ case ZSTD_c_maxBlockSize:
+ if (value!=0) /* 0 ==> default */
@@ -4202,11 +5366,10 @@ index f620cafca..c1c316e9e 100644
+ BOUNDCHECK(ZSTD_c_searchForExternalRepcodes, value);
+ CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value;
+ return CCtxParams->searchForExternalRepcodes;
-+
+
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
- }
-@@ -980,6 +1070,18 @@ size_t ZSTD_CCtxParams_getParameter(
+@@ -980,6 +1074,18 @@ size_t ZSTD_CCtxParams_getParameter(
case ZSTD_c_deterministicRefPrefix:
*value = (int)CCtxParams->deterministicRefPrefix;
break;
@@ -4225,7 +5388,7 @@ index f620cafca..c1c316e9e 100644
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
return 0;
-@@ -1006,9 +1108,47 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+@@ -1006,9 +1112,47 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
return 0;
}
@@ -4274,7 +5437,7 @@ index f620cafca..c1c316e9e 100644
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't set pledgedSrcSize when not in init stage.");
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
-@@ -1024,9 +1164,9 @@ static void ZSTD_dedicatedDictSearch_revertCParams(
+@@ -1024,9 +1168,9 @@ static void ZSTD_dedicatedDictSearch_revertCParams(
ZSTD_compressionParameters* cParams);
/*
@@ -4287,7 +5450,7 @@ index f620cafca..c1c316e9e 100644
*/
static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
{
-@@ -1039,8 +1179,8 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+@@ -1039,8 +1183,8 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
return 0;
}
if (dl->cdict != NULL) {
@@ -4297,7 +5460,7 @@ index f620cafca..c1c316e9e 100644
return 0;
}
assert(dl->dictSize > 0);
-@@ -1060,26 +1200,30 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+@@ -1060,26 +1204,30 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
}
size_t ZSTD_CCtx_loadDictionary_advanced(
@@ -4338,18 +5501,34 @@ index f620cafca..c1c316e9e 100644
}
cctx->localDict.dictSize = dictSize;
cctx->localDict.dictContentType = dictContentType;
-@@ -1149,8 +1293,9 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
+@@ -1149,7 +1297,7 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
- "Can't reset parameters only when not in init stage.");
+ "Reset parameters is only possible during init stage.");
ZSTD_clearAllDicts(cctx);
-+ ZSTD_memset(&cctx->externalMatchCtx, 0, sizeof(cctx->externalMatchCtx));
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
}
- return 0;
-@@ -1247,7 +1392,8 @@ static ZSTD_compressionParameters
+@@ -1178,11 +1326,12 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+ static ZSTD_compressionParameters
+ ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+ {
+-# define CLAMP_TYPE(cParam, val, type) { \
+- ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
+- if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
+- else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
+- }
++# define CLAMP_TYPE(cParam, val, type) \
++ do { \
++ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
++ if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
++ else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
++ } while (0)
+ # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
+ CLAMP(ZSTD_c_windowLog, cParams.windowLog);
+ CLAMP(ZSTD_c_chainLog, cParams.chainLog);
+@@ -1247,12 +1396,55 @@ static ZSTD_compressionParameters
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize,
@@ -4359,7 +5538,54 @@ index f620cafca..c1c316e9e 100644
{
const U64 minSrcSize = 513; /* (1<<9) + 1 */
const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
-@@ -1281,8 +1427,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+ assert(ZSTD_checkCParams(cPar)==0);
+
++ /* Cascade the selected strategy down to the next-highest one built into
++ * this binary. */
++#ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
++ if (cPar.strategy == ZSTD_btultra2) {
++ cPar.strategy = ZSTD_btultra;
++ }
++ if (cPar.strategy == ZSTD_btultra) {
++ cPar.strategy = ZSTD_btopt;
++ }
++#endif
++#ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
++ if (cPar.strategy == ZSTD_btopt) {
++ cPar.strategy = ZSTD_btlazy2;
++ }
++#endif
++#ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
++ if (cPar.strategy == ZSTD_btlazy2) {
++ cPar.strategy = ZSTD_lazy2;
++ }
++#endif
++#ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
++ if (cPar.strategy == ZSTD_lazy2) {
++ cPar.strategy = ZSTD_lazy;
++ }
++#endif
++#ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
++ if (cPar.strategy == ZSTD_lazy) {
++ cPar.strategy = ZSTD_greedy;
++ }
++#endif
++#ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
++ if (cPar.strategy == ZSTD_greedy) {
++ cPar.strategy = ZSTD_dfast;
++ }
++#endif
++#ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
++ if (cPar.strategy == ZSTD_dfast) {
++ cPar.strategy = ZSTD_fast;
++ cPar.targetLength = 0;
++ }
++#endif
++
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+@@ -1281,8 +1473,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
}
/* resize windowLog if input is small enough, to use less memory */
@@ -4370,7 +5596,7 @@ index f620cafca..c1c316e9e 100644
U32 const tSize = (U32)(srcSize + dictSize);
static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
-@@ -1300,6 +1446,42 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+@@ -1300,6 +1492,42 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
@@ -4413,7 +5639,7 @@ index f620cafca..c1c316e9e 100644
return cPar;
}
-@@ -1310,7 +1492,7 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
+@@ -1310,7 +1538,7 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
{
cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
@@ -4422,7 +5648,7 @@ index f620cafca..c1c316e9e 100644
}
static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
-@@ -1341,7 +1523,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+@@ -1341,7 +1569,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
assert(!ZSTD_checkCParams(cParams));
/* srcSizeHint == 0 means 0 */
@@ -4431,16 +5657,21 @@ index f620cafca..c1c316e9e 100644
}
static size_t
-@@ -1370,7 +1552,7 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
- + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
- + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+@@ -1367,10 +1595,10 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
+- + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+- + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
++ + ZSTD_cwksp_aligned_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_match_t))
++ + ZSTD_cwksp_aligned_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
- ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
+ ? ZSTD_cwksp_aligned_alloc_size(hSize)
: 0;
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
-@@ -1386,6 +1568,13 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+@@ -1386,6 +1614,13 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
}
@@ -4454,7 +5685,7 @@ index f620cafca..c1c316e9e 100644
static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_compressionParameters* cParams,
const ldmParams_t* ldmParams,
-@@ -1393,12 +1582,13 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+@@ -1393,12 +1628,13 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_paramSwitch_e useRowMatchFinder,
const size_t buffInSize,
const size_t buffOutSize,
@@ -4472,7 +5703,7 @@ index f620cafca..c1c316e9e 100644
size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+ ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
+ 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
-@@ -1417,6 +1607,11 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+@@ -1417,6 +1653,11 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
@@ -4484,7 +5715,7 @@ index f620cafca..c1c316e9e 100644
size_t const neededSpace =
cctxSpace +
entropySpace +
-@@ -1425,7 +1620,8 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+@@ -1425,7 +1666,8 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
ldmSeqSpace +
matchStateSize +
tokenSpace +
@@ -4494,16 +5725,16 @@ index f620cafca..c1c316e9e 100644
DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
return neededSpace;
-@@ -1443,7 +1639,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+@@ -1443,7 +1685,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
* be needed. However, we still allocate two 0-sized buffers, which can
* take space under ASAN. */
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
- &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
-+ &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, params->useSequenceProducer, params->maxBlockSize);
++ &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
}
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
-@@ -1493,7 +1689,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+@@ -1493,7 +1735,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
{ ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
@@ -4512,16 +5743,16 @@ index f620cafca..c1c316e9e 100644
size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
? ((size_t)1 << cParams.windowLog) + blockSize
: 0;
-@@ -1504,7 +1700,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+@@ -1504,7 +1746,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
- ZSTD_CONTENTSIZE_UNKNOWN);
-+ ZSTD_CONTENTSIZE_UNKNOWN, params->useSequenceProducer, params->maxBlockSize);
++ ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
}
}
-@@ -1637,6 +1833,19 @@ typedef enum {
+@@ -1637,6 +1879,19 @@ typedef enum {
ZSTD_resetTarget_CCtx
} ZSTD_resetTarget_e;
@@ -4541,7 +5772,7 @@ index f620cafca..c1c316e9e 100644
static size_t
ZSTD_reset_matchState(ZSTD_matchState_t* ms,
-@@ -1664,6 +1873,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+@@ -1664,6 +1919,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
}
ms->hashLog3 = hashLog3;
@@ -4549,11 +5780,26 @@ index f620cafca..c1c316e9e 100644
ZSTD_invalidateMatchState(ms);
-@@ -1685,6 +1895,27 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+@@ -1685,22 +1941,19 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ZSTD_cwksp_clean_tables(ws);
}
-+ if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
+- /* opt parser space */
+- if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
+- DEBUGLOG(4, "reserving optimal parser space");
+- ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
+- ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
+- ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
+- ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
+- ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
+- ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+- }
+-
+ if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
+- { /* Row match finder needs an additional table of hashes ("tags") */
+- size_t const tagTableSize = hSize*sizeof(U16);
+- ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
+- if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ /* Row match finder needs an additional table of hashes ("tags") */
+ size_t const tagTableSize = hSize;
+ /* We want to generate a new salt in case we reset a Cctx, but we always want to use
@@ -4566,38 +5812,28 @@ index f620cafca..c1c316e9e 100644
+ ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
+ ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ ms->hashSalt = 0;
-+ }
-+ { /* Switch to 32-entry rows if searchLog is 5 (or more) */
-+ U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
-+ assert(cParams->hashLog >= rowLog);
-+ ms->rowHashLog = cParams->hashLog - rowLog;
-+ }
-+ }
-+
- /* opt parser space */
- if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
- DEBUGLOG(4, "reserving optimal parser space");
-@@ -1696,19 +1927,6 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
- ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ }
+ { /* Switch to 32-entry rows if searchLog is 5 (or more) */
+ U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
+@@ -1709,6 +1962,17 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ }
}
-- if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
-- { /* Row match finder needs an additional table of hashes ("tags") */
-- size_t const tagTableSize = hSize*sizeof(U16);
-- ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
-- if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
-- }
-- { /* Switch to 32-entry rows if searchLog is 5 (or more) */
-- U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
-- assert(cParams->hashLog >= rowLog);
-- ms->rowHashLog = cParams->hashLog - rowLog;
-- }
-- }
--
++ /* opt parser space */
++ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
++ DEBUGLOG(4, "reserving optimal parser space");
++ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
++ ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
++ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
++ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
++ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t));
++ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
++ }
++
ms->cParams = *cParams;
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
-@@ -1768,6 +1986,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+@@ -1768,6 +2032,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
assert(params->useRowMatchFinder != ZSTD_ps_auto);
assert(params->useBlockSplitter != ZSTD_ps_auto);
assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
@@ -4605,7 +5841,7 @@ index f620cafca..c1c316e9e 100644
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* Adjust long distance matching parameters */
ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams);
-@@ -1776,9 +1995,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+@@ -1776,9 +2041,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
}
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
@@ -4613,20 +5849,30 @@ index f620cafca..c1c316e9e 100644
- U32 const divider = (params->cParams.minMatch==3) ? 3 : 4;
- size_t const maxNbSeq = blockSize / divider;
+ size_t const blockSize = MIN(params->maxBlockSize, windowSize);
-+ size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, params->useSequenceProducer);
++ size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params));
size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
? ZSTD_compressBound(blockSize) + 1
: 0;
-@@ -1795,7 +2013,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+@@ -1795,8 +2059,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
size_t const neededSpace =
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
- buffInSize, buffOutSize, pledgedSrcSize);
-+ buffInSize, buffOutSize, pledgedSrcSize, params->useSequenceProducer, params->maxBlockSize);
- int resizeWorkspace;
+- int resizeWorkspace;
++ buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
-@@ -1838,6 +2056,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+
+@@ -1805,7 +2068,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+ { /* Check if workspace is large enough, alloc a new one if needed */
+ int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
+ int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
+- resizeWorkspace = workspaceTooSmall || workspaceWasteful;
++ int resizeWorkspace = workspaceTooSmall || workspaceWasteful;
+ DEBUGLOG(4, "Need %zu B workspace", neededSpace);
+ DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
+
+@@ -1838,6 +2101,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
/* init params */
zc->blockState.matchState.cParams = params->cParams;
@@ -4634,7 +5880,7 @@ index f620cafca..c1c316e9e 100644
zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
zc->consumedSrcSize = 0;
zc->producedCSize = 0;
-@@ -1854,13 +2073,46 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+@@ -1854,13 +2118,46 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
@@ -4663,10 +5909,10 @@ index f620cafca..c1c316e9e 100644
+ }
+
+ /* reserve space for block-level external sequences */
-+ if (params->useSequenceProducer) {
++ if (ZSTD_hasExtSeqProd(params)) {
+ size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
-+ zc->externalMatchCtx.seqBufferCapacity = maxNbExternalSeq;
-+ zc->externalMatchCtx.seqBuffer =
++ zc->extSeqBufCapacity = maxNbExternalSeq;
++ zc->extSeqBuf =
+ (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence));
+ }
+
@@ -4682,7 +5928,7 @@ index f620cafca..c1c316e9e 100644
zc->bufferedPolicy = zbuff;
zc->inBuffSize = buffInSize;
zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
-@@ -1883,32 +2135,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+@@ -1883,32 +2180,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
@@ -4716,7 +5962,7 @@ index f620cafca..c1c316e9e 100644
zc->initialized = 1;
-@@ -1980,7 +2209,8 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
+@@ -1980,7 +2254,8 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
}
params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
@@ -4726,7 +5972,7 @@ index f620cafca..c1c316e9e 100644
params.cParams.windowLog = windowLog;
params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
-@@ -2019,6 +2249,22 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
+@@ -2019,6 +2294,22 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
return 0;
}
@@ -4749,7 +5995,7 @@ index f620cafca..c1c316e9e 100644
static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
-@@ -2054,21 +2300,23 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
+@@ -2054,21 +2345,23 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
: 0;
size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
@@ -4782,7 +6028,7 @@ index f620cafca..c1c316e9e 100644
}
}
-@@ -2147,6 +2395,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
+@@ -2147,6 +2440,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
params.ldmParams = srcCCtx->appliedParams.ldmParams;
params.fParams = fParams;
@@ -4790,7 +6036,7 @@ index f620cafca..c1c316e9e 100644
ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
/* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff);
-@@ -2294,7 +2543,7 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par
+@@ -2294,7 +2588,7 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par
/* See doc/zstd_compression_format.md for detailed format description */
@@ -4799,7 +6045,7 @@ index f620cafca..c1c316e9e 100644
{
const seqDef* const sequences = seqStorePtr->sequencesStart;
BYTE* const llCodeTable = seqStorePtr->llCode;
-@@ -2302,18 +2551,24 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+@@ -2302,18 +2596,24 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
BYTE* const mlCodeTable = seqStorePtr->mlCode;
U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
U32 u;
@@ -4825,7 +6071,7 @@ index f620cafca..c1c316e9e 100644
}
/* ZSTD_useTargetCBlockSize():
-@@ -2347,6 +2602,7 @@ typedef struct {
+@@ -2347,6 +2647,7 @@ typedef struct {
U32 MLtype;
size_t size;
size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
@@ -4833,7 +6079,7 @@ index f620cafca..c1c316e9e 100644
} ZSTD_symbolEncodingTypeStats_t;
/* ZSTD_buildSequencesStatistics():
-@@ -2357,11 +2613,13 @@ typedef struct {
+@@ -2357,11 +2658,13 @@ typedef struct {
* entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
*/
static ZSTD_symbolEncodingTypeStats_t
@@ -4852,7 +6098,7 @@ index f620cafca..c1c316e9e 100644
BYTE* const ostart = dst;
const BYTE* const oend = dstEnd;
BYTE* op = ostart;
-@@ -2375,7 +2633,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
+@@ -2375,7 +2678,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
stats.lastCountSize = 0;
/* convert length/distances into codes */
@@ -4861,7 +6107,7 @@ index f620cafca..c1c316e9e 100644
assert(op <= oend);
assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
/* build CTable for Literal Lengths */
-@@ -2480,22 +2738,22 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
+@@ -2480,22 +2783,22 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
*/
#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
MEM_STATIC size_t
@@ -4893,7 +6139,7 @@ index f620cafca..c1c316e9e 100644
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
-@@ -2503,29 +2761,31 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
+@@ -2503,29 +2806,31 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
size_t lastCountSize;
@@ -4932,7 +6178,7 @@ index f620cafca..c1c316e9e 100644
FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
assert(cSize <= dstCapacity);
op += cSize;
-@@ -2551,11 +2811,10 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
+@@ -2551,11 +2856,10 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
return (size_t)(op - ostart);
}
@@ -4947,7 +6193,7 @@ index f620cafca..c1c316e9e 100644
&prevEntropy->fse, &nextEntropy->fse,
op, oend,
strategy, count,
-@@ -2564,6 +2823,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
+@@ -2564,6 +2868,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
*seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
lastCountSize = stats.lastCountSize;
op += stats.size;
@@ -4955,7 +6201,7 @@ index f620cafca..c1c316e9e 100644
}
{ size_t const bitstreamSize = ZSTD_encodeSequences(
-@@ -2598,14 +2858,15 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
+@@ -2598,14 +2903,15 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
}
MEM_STATIC size_t
@@ -4979,7 +6225,7 @@ index f620cafca..c1c316e9e 100644
{
size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
seqStorePtr, prevEntropy, nextEntropy, cctxParams,
-@@ -2615,15 +2876,21 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
+@@ -2615,15 +2921,21 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
/* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
* Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
*/
@@ -5003,7 +6249,117 @@ index f620cafca..c1c316e9e 100644
return cSize;
}
-@@ -2718,6 +2985,72 @@ void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+@@ -2635,40 +2947,43 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
+ static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
+ { ZSTD_compressBlock_fast /* default for 0 */,
+ ZSTD_compressBlock_fast,
+- ZSTD_compressBlock_doubleFast,
+- ZSTD_compressBlock_greedy,
+- ZSTD_compressBlock_lazy,
+- ZSTD_compressBlock_lazy2,
+- ZSTD_compressBlock_btlazy2,
+- ZSTD_compressBlock_btopt,
+- ZSTD_compressBlock_btultra,
+- ZSTD_compressBlock_btultra2 },
++ ZSTD_COMPRESSBLOCK_DOUBLEFAST,
++ ZSTD_COMPRESSBLOCK_GREEDY,
++ ZSTD_COMPRESSBLOCK_LAZY,
++ ZSTD_COMPRESSBLOCK_LAZY2,
++ ZSTD_COMPRESSBLOCK_BTLAZY2,
++ ZSTD_COMPRESSBLOCK_BTOPT,
++ ZSTD_COMPRESSBLOCK_BTULTRA,
++ ZSTD_COMPRESSBLOCK_BTULTRA2
++ },
+ { ZSTD_compressBlock_fast_extDict /* default for 0 */,
+ ZSTD_compressBlock_fast_extDict,
+- ZSTD_compressBlock_doubleFast_extDict,
+- ZSTD_compressBlock_greedy_extDict,
+- ZSTD_compressBlock_lazy_extDict,
+- ZSTD_compressBlock_lazy2_extDict,
+- ZSTD_compressBlock_btlazy2_extDict,
+- ZSTD_compressBlock_btopt_extDict,
+- ZSTD_compressBlock_btultra_extDict,
+- ZSTD_compressBlock_btultra_extDict },
++ ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT,
++ ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT,
++ ZSTD_COMPRESSBLOCK_LAZY_EXTDICT,
++ ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT,
++ ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT,
++ ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT,
++ ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT,
++ ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT
++ },
+ { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
+ ZSTD_compressBlock_fast_dictMatchState,
+- ZSTD_compressBlock_doubleFast_dictMatchState,
+- ZSTD_compressBlock_greedy_dictMatchState,
+- ZSTD_compressBlock_lazy_dictMatchState,
+- ZSTD_compressBlock_lazy2_dictMatchState,
+- ZSTD_compressBlock_btlazy2_dictMatchState,
+- ZSTD_compressBlock_btopt_dictMatchState,
+- ZSTD_compressBlock_btultra_dictMatchState,
+- ZSTD_compressBlock_btultra_dictMatchState },
++ ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE,
++ ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE,
++ ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE,
++ ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE,
++ ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE,
++ ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE,
++ ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE,
++ ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE
++ },
+ { NULL /* default for 0 */,
+ NULL,
+ NULL,
+- ZSTD_compressBlock_greedy_dedicatedDictSearch,
+- ZSTD_compressBlock_lazy_dedicatedDictSearch,
+- ZSTD_compressBlock_lazy2_dedicatedDictSearch,
++ ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH,
++ ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH,
++ ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH,
+ NULL,
+ NULL,
+ NULL,
+@@ -2681,18 +2996,26 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
+ DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
+ if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
+ static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
+- { ZSTD_compressBlock_greedy_row,
+- ZSTD_compressBlock_lazy_row,
+- ZSTD_compressBlock_lazy2_row },
+- { ZSTD_compressBlock_greedy_extDict_row,
+- ZSTD_compressBlock_lazy_extDict_row,
+- ZSTD_compressBlock_lazy2_extDict_row },
+- { ZSTD_compressBlock_greedy_dictMatchState_row,
+- ZSTD_compressBlock_lazy_dictMatchState_row,
+- ZSTD_compressBlock_lazy2_dictMatchState_row },
+- { ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
+- ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
+- ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
++ {
++ ZSTD_COMPRESSBLOCK_GREEDY_ROW,
++ ZSTD_COMPRESSBLOCK_LAZY_ROW,
++ ZSTD_COMPRESSBLOCK_LAZY2_ROW
++ },
++ {
++ ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW,
++ ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW,
++ ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW
++ },
++ {
++ ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW,
++ ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW,
++ ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW
++ },
++ {
++ ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW,
++ ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW,
++ ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW
++ }
+ };
+ DEBUGLOG(4, "Selecting a row-based matchfinder");
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+@@ -2718,6 +3041,72 @@ void ZSTD_resetSeqStore(seqStore_t* ssPtr)
ssPtr->longLengthType = ZSTD_llt_none;
}
@@ -5076,7 +6432,7 @@ index f620cafca..c1c316e9e 100644
typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
-@@ -2727,7 +3060,9 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+@@ -2727,7 +3116,9 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
@@ -5087,7 +6443,7 @@ index f620cafca..c1c316e9e 100644
if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
} else {
-@@ -2763,6 +3098,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+@@ -2763,6 +3154,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
}
if (zc->externSeqStore.pos < zc->externSeqStore.size) {
assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
@@ -5095,7 +6451,7 @@ index f620cafca..c1c316e9e 100644
+ /* External matchfinder + LDM is technically possible, just not implemented yet.
+ * We need to revisit soon and implement it. */
+ RETURN_ERROR_IF(
-+ zc->appliedParams.useSequenceProducer,
++ ZSTD_hasExtSeqProd(&zc->appliedParams),
+ parameter_combination_unsupported,
+ "Long-distance matching with external sequence producer enabled is not currently supported."
+ );
@@ -5103,14 +6459,14 @@ index f620cafca..c1c316e9e 100644
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&zc->externSeqStore,
-@@ -2774,6 +3118,14 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+@@ -2774,6 +3174,14 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
} else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+ /* External matchfinder + LDM is technically possible, just not implemented yet.
+ * We need to revisit soon and implement it. */
+ RETURN_ERROR_IF(
-+ zc->appliedParams.useSequenceProducer,
++ ZSTD_hasExtSeqProd(&zc->appliedParams),
+ parameter_combination_unsupported,
+ "Long-distance matching with external sequence producer enabled is not currently supported."
+ );
@@ -5118,23 +6474,26 @@ index f620cafca..c1c316e9e 100644
ldmSeqStore.seq = zc->ldmSequences;
ldmSeqStore.capacity = zc->maxNbLdmSequences;
/* Updates ldmSeqStore.size */
-@@ -2788,7 +3140,68 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+@@ -2788,10 +3196,74 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(ldmSeqStore.pos == ldmSeqStore.size);
- } else { /* not long range mode */
-+ } else if (zc->appliedParams.useSequenceProducer) {
+- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
+- zc->appliedParams.useRowMatchFinder,
+- dictMode);
++ } else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) {
+ assert(
-+ zc->externalMatchCtx.seqBufferCapacity >= ZSTD_sequenceBound(srcSize)
++ zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize)
+ );
-+ assert(zc->externalMatchCtx.mFinder != NULL);
++ assert(zc->appliedParams.extSeqProdFunc != NULL);
+
+ { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog;
+
-+ size_t const nbExternalSeqs = (zc->externalMatchCtx.mFinder)(
-+ zc->externalMatchCtx.mState,
-+ zc->externalMatchCtx.seqBuffer,
-+ zc->externalMatchCtx.seqBufferCapacity,
++ size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)(
++ zc->appliedParams.extSeqProdState,
++ zc->extSeqBuf,
++ zc->extSeqBufCapacity,
+ src, srcSize,
+ NULL, 0, /* dict and dictSize, currently not supported */
+ zc->appliedParams.compressionLevel,
@@ -5142,21 +6501,21 @@ index f620cafca..c1c316e9e 100644
+ );
+
+ size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult(
-+ zc->externalMatchCtx.seqBuffer,
++ zc->extSeqBuf,
+ nbExternalSeqs,
-+ zc->externalMatchCtx.seqBufferCapacity,
++ zc->extSeqBufCapacity,
+ srcSize
+ );
+
+ /* Return early if there is no error, since we don't need to worry about last literals */
+ if (!ZSTD_isError(nbPostProcessedSeqs)) {
+ ZSTD_sequencePosition seqPos = {0,0,0};
-+ size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs);
++ size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs);
+ RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!");
+ FORWARD_IF_ERROR(
+ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(
+ zc, &seqPos,
-+ zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs,
++ zc->extSeqBuf, nbPostProcessedSeqs,
+ src, srcSize,
+ zc->appliedParams.searchForExternalRepcodes
+ ),
@@ -5173,9 +6532,11 @@ index f620cafca..c1c316e9e 100644
+ }
+
+ /* Fallback to software matchfinder */
-+ { ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
-+ zc->appliedParams.useRowMatchFinder,
-+ dictMode);
++ { ZSTD_blockCompressor const blockCompressor =
++ ZSTD_selectBlockCompressor(
++ zc->appliedParams.cParams.strategy,
++ zc->appliedParams.useRowMatchFinder,
++ dictMode);
+ ms->ldmSeqStore = NULL;
+ DEBUGLOG(
+ 5,
@@ -5185,30 +6546,177 @@ index f620cafca..c1c316e9e 100644
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
+ } }
+ } else { /* not long range mode and no external matchfinder */
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
- zc->appliedParams.useRowMatchFinder,
- dictMode);
-@@ -2849,7 +3262,7 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
- /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
- so we provide seqStoreSeqs[i].offset - 1 */
- ZSTD_updateRep(updatedRepcodes.rep,
-- seqStoreSeqs[i].offBase - 1,
-+ seqStoreSeqs[i].offBase,
- seqStoreSeqs[i].litLength == 0);
- literalsRead += outSeqs[i].litLength;
- }
-@@ -2865,6 +3278,10 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
- zc->seqCollector.seqIndex += seqStoreSeqSize;
++ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(
++ zc->appliedParams.cParams.strategy,
++ zc->appliedParams.useRowMatchFinder,
++ dictMode);
+ ms->ldmSeqStore = NULL;
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
+ }
+@@ -2801,29 +3273,38 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+ return ZSTDbss_compress;
}
-+size_t ZSTD_sequenceBound(size_t srcSize) {
-+ return (srcSize / ZSTD_MINMATCH_MIN) + 1;
+-static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
++static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const seqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM])
+ {
+- const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
+- const seqDef* seqStoreSeqs = seqStore->sequencesStart;
+- size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
+- size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
+- size_t literalsRead = 0;
+- size_t lastLLSize;
++ const seqDef* inSeqs = seqStore->sequencesStart;
++ const size_t nbInSequences = seqStore->sequences - inSeqs;
++ const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart);
+
+- ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
++ ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex;
++ const size_t nbOutSequences = nbInSequences + 1;
++ size_t nbOutLiterals = 0;
++ repcodes_t repcodes;
+ size_t i;
+- repcodes_t updatedRepcodes;
+
+- assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
+- /* Ensure we have enough space for last literals "sequence" */
+- assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
+- ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+- for (i = 0; i < seqStoreSeqSize; ++i) {
+- U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
+- outSeqs[i].litLength = seqStoreSeqs[i].litLength;
+- outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
++ /* Bounds check that we have enough space for every input sequence
++ * and the block delimiter
++ */
++ assert(seqCollector->seqIndex <= seqCollector->maxSequences);
++ RETURN_ERROR_IF(
++ nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex),
++ dstSize_tooSmall,
++ "Not enough space to copy sequences");
++
++ ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes));
++ for (i = 0; i < nbInSequences; ++i) {
++ U32 rawOffset;
++ outSeqs[i].litLength = inSeqs[i].litLength;
++ outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH;
+ outSeqs[i].rep = 0;
+
++ /* Handle the possible single length >= 64K
++ * There can only be one because we add MINMATCH to every match length,
++ * and blocks are at most 128K.
++ */
+ if (i == seqStore->longLengthPos) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
+ outSeqs[i].litLength += 0x10000;
+@@ -2832,37 +3313,55 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
+ }
+ }
+
+- if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
+- /* Derive the correct offset corresponding to a repcode */
+- outSeqs[i].rep = seqStoreSeqs[i].offBase;
++ /* Determine the raw offset given the offBase, which may be a repcode. */
++ if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) {
++ const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase);
++ assert(repcode > 0);
++ outSeqs[i].rep = repcode;
+ if (outSeqs[i].litLength != 0) {
+- rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
++ rawOffset = repcodes.rep[repcode - 1];
+ } else {
+- if (outSeqs[i].rep == 3) {
+- rawOffset = updatedRepcodes.rep[0] - 1;
++ if (repcode == 3) {
++ assert(repcodes.rep[0] > 1);
++ rawOffset = repcodes.rep[0] - 1;
+ } else {
+- rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
++ rawOffset = repcodes.rep[repcode];
+ }
+ }
++ } else {
++ rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase);
+ }
+ outSeqs[i].offset = rawOffset;
+- /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
+- so we provide seqStoreSeqs[i].offset - 1 */
+- ZSTD_updateRep(updatedRepcodes.rep,
+- seqStoreSeqs[i].offBase - 1,
+- seqStoreSeqs[i].litLength == 0);
+- literalsRead += outSeqs[i].litLength;
++
++ /* Update repcode history for the sequence */
++ ZSTD_updateRep(repcodes.rep,
++ inSeqs[i].offBase,
++ inSeqs[i].litLength == 0);
++
++ nbOutLiterals += outSeqs[i].litLength;
+ }
+ /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
+ * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
+ * for the block boundary, according to the API.
+ */
+- assert(seqStoreLiteralsSize >= literalsRead);
+- lastLLSize = seqStoreLiteralsSize - literalsRead;
+- outSeqs[i].litLength = (U32)lastLLSize;
+- outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
+- seqStoreSeqSize++;
+- zc->seqCollector.seqIndex += seqStoreSeqSize;
++ assert(nbInLiterals >= nbOutLiterals);
++ {
++ const size_t lastLLSize = nbInLiterals - nbOutLiterals;
++ outSeqs[nbInSequences].litLength = (U32)lastLLSize;
++ outSeqs[nbInSequences].matchLength = 0;
++ outSeqs[nbInSequences].offset = 0;
++ assert(nbOutSequences == nbInSequences + 1);
++ }
++ seqCollector->seqIndex += nbOutSequences;
++ assert(seqCollector->seqIndex <= seqCollector->maxSequences);
++
++ return 0;
+}
+
++size_t ZSTD_sequenceBound(size_t srcSize) {
++ const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1;
++ const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1;
++ return maxNbSeq + maxNbDelims;
+ }
+
size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
- size_t outSeqsSize, const void* src, size_t srcSize)
- {
-@@ -2910,19 +3327,17 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {
+@@ -2871,6 +3370,16 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ const size_t dstCapacity = ZSTD_compressBound(srcSize);
+ void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
+ SeqCollector seqCollector;
++ {
++ int targetCBlockSize;
++ FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), "");
++ RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0");
++ }
++ {
++ int nbWorkers;
++ FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), "");
++ RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0");
++ }
+
+ RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
+
+@@ -2880,8 +3389,12 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ seqCollector.maxSequences = outSeqsSize;
+ zc->seqCollector = seqCollector;
+
+- ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
+- ZSTD_customFree(dst, ZSTD_defaultCMem);
++ {
++ const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
++ ZSTD_customFree(dst, ZSTD_defaultCMem);
++ FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed");
++ }
++ assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize));
+ return zc->seqCollector.seqIndex;
+ }
+
+@@ -2910,19 +3423,17 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {
const size_t unrollMask = unrollSize - 1;
const size_t prefixLength = length & unrollMask;
size_t i;
@@ -5230,7 +6738,7 @@ index f620cafca..c1c316e9e 100644
return 1;
}
-@@ -2938,7 +3353,8 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore)
+@@ -2938,7 +3449,8 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore)
return nbSeqs < 4 && nbLits < 10;
}
@@ -5240,7 +6748,7 @@ index f620cafca..c1c316e9e 100644
{
ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
bs->prevCBlock = bs->nextCBlock;
-@@ -2946,7 +3362,9 @@ static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* c
+@@ -2946,7 +3458,9 @@ static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* c
}
/* Writes the block header */
@@ -5251,7 +6759,7 @@ index f620cafca..c1c316e9e 100644
U32 const cBlockHeader = cSize == 1 ?
lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
-@@ -2959,13 +3377,16 @@ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastB
+@@ -2959,13 +3473,16 @@ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastB
* Stores literals block type (raw, rle, compressed, repeat) and
* huffman description table to hufMetadata.
* Requires ENTROPY_WORKSPACE_SIZE workspace
@@ -5275,7 +6783,7 @@ index f620cafca..c1c316e9e 100644
{
BYTE* const wkspStart = (BYTE*)workspace;
BYTE* const wkspEnd = wkspStart + wkspSize;
-@@ -2973,9 +3394,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
+@@ -2973,9 +3490,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
unsigned* const countWksp = (unsigned*)workspace;
const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
BYTE* const nodeWksp = countWkspStart + countWkspSize;
@@ -5287,7 +6795,7 @@ index f620cafca..c1c316e9e 100644
HUF_repeat repeat = prevHuf->repeatMode;
DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
-@@ -2990,73 +3411,77 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
+@@ -2990,73 +3507,77 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
/* small ? don't even attempt compression (speed opt) */
#ifndef COMPRESS_LITERALS_SIZE_MIN
@@ -5399,7 +6907,7 @@ index f620cafca..c1c316e9e 100644
}
}
-@@ -3066,8 +3491,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
+@@ -3066,8 +3587,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi
* and updates nextEntropy to the appropriate repeatMode.
*/
static ZSTD_symbolEncodingTypeStats_t
@@ -5411,7 +6919,7 @@ index f620cafca..c1c316e9e 100644
nextEntropy->litlength_repeatMode = FSE_repeat_none;
nextEntropy->offcode_repeatMode = FSE_repeat_none;
nextEntropy->matchlength_repeatMode = FSE_repeat_none;
-@@ -3078,16 +3504,18 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
+@@ -3078,16 +3600,18 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
* Builds entropy for the sequences.
* Stores symbol compression modes and fse table to fseMetadata.
* Requires ENTROPY_WORKSPACE_SIZE wksp.
@@ -5438,7 +6946,7 @@ index f620cafca..c1c316e9e 100644
BYTE* const ostart = fseMetadata->fseTablesBuffer;
BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
BYTE* op = ostart;
-@@ -3114,23 +3542,28 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
+@@ -3114,23 +3638,28 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
/* ZSTD_buildBlockEntropyStats() :
* Builds entropy for the block.
* Requires workspace size ENTROPY_WORKSPACE_SIZE
@@ -5478,7 +6986,7 @@ index f620cafca..c1c316e9e 100644
FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
entropyMetadata->fseMetadata.fseTablesSize =
ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
-@@ -3143,11 +3576,12 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
+@@ -3143,11 +3672,12 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
}
/* Returns the size estimate for the literals section (header + content) of a block */
@@ -5496,7 +7004,7 @@ index f620cafca..c1c316e9e 100644
{
unsigned* const countWksp = (unsigned*)workspace;
unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
-@@ -3169,12 +3603,13 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz
+@@ -3169,12 +3699,13 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz
}
/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
@@ -5516,7 +7024,7 @@ index f620cafca..c1c316e9e 100644
{
unsigned* const countWksp = (unsigned*)workspace;
const BYTE* ctp = codeTable;
-@@ -3206,99 +3641,107 @@ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
+@@ -3206,99 +3737,107 @@ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
}
/* Returns the size estimate for the sequences section (header + content) of a block */
@@ -5669,7 +7177,7 @@ index f620cafca..c1c316e9e 100644
return matchBytes;
}
-@@ -3307,15 +3750,12 @@ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
+@@ -3307,15 +3846,12 @@ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
*/
static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
const seqStore_t* originalSeqStore,
@@ -5688,7 +7196,7 @@ index f620cafca..c1c316e9e 100644
}
/* Move longLengthPos into the correct position if necessary */
-@@ -3328,13 +3768,12 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
+@@ -3328,13 +3864,12 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
}
resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
@@ -5705,7 +7213,7 @@ index f620cafca..c1c316e9e 100644
}
resultSeqStore->llCode += startIdx;
resultSeqStore->mlCode += startIdx;
-@@ -3342,20 +3781,26 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
+@@ -3342,20 +3877,26 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
}
/*
@@ -5742,7 +7250,7 @@ index f620cafca..c1c316e9e 100644
}
/*
-@@ -3371,30 +3816,33 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, c
+@@ -3371,30 +3912,33 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, c
* 1-3 : repcode 1-3
* 4+ : real_offset+3
*/
@@ -5787,7 +7295,7 @@ index f620cafca..c1c316e9e 100644
}
}
-@@ -3404,10 +3852,11 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
+@@ -3404,10 +3948,11 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
* Returns the total size of that block (including header) or a ZSTD error code.
*/
static size_t
@@ -5801,7 +7309,18 @@ index f620cafca..c1c316e9e 100644
U32 lastBlock, U32 isPartition)
{
const U32 rleMaxLength = 25;
-@@ -3481,45 +3930,49 @@ typedef struct {
+@@ -3442,8 +3987,9 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
+ cSeqsSize = 1;
+ }
+
++ /* Sequence collection not supported when block splitting */
+ if (zc->seqCollector.collectSequences) {
+- ZSTD_copyBlockSequences(zc);
++ FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed");
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return 0;
+ }
+@@ -3481,45 +4027,49 @@ typedef struct {
/* Helper function to perform the recursive search for block splits.
* Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
@@ -5861,7 +7380,7 @@ index f620cafca..c1c316e9e 100644
ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
splits->splitLocations[splits->idx] = (U32)midIdx;
splits->idx++;
-@@ -3527,14 +3980,18 @@ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t end
+@@ -3527,14 +4077,18 @@ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t end
}
}
@@ -5885,7 +7404,7 @@ index f620cafca..c1c316e9e 100644
/* Refuse to try and split anything with less than 4 sequences */
return 0;
}
-@@ -3550,18 +4007,20 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
+@@ -3550,18 +4104,20 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
* Returns combined size of all blocks (which includes headers), or a ZSTD error code.
*/
static size_t
@@ -5912,7 +7431,7 @@ index f620cafca..c1c316e9e 100644
/* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
* may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
-@@ -3583,30 +4042,31 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
+@@ -3583,30 +4139,31 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
@@ -5953,7 +7472,7 @@ index f620cafca..c1c316e9e 100644
srcBytesTotal += srcBytes;
if (lastPartition) {
/* This is the final partition, need to account for possible last literals */
-@@ -3621,7 +4081,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
+@@ -3621,7 +4178,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
op, dstCapacity,
ip, srcBytes,
lastBlockEntireSrc, 1 /* isPartition */);
@@ -5963,7 +7482,7 @@ index f620cafca..c1c316e9e 100644
FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
ip += srcBytes;
-@@ -3629,10 +4090,10 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
+@@ -3629,10 +4187,10 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac
dstCapacity -= cSizeChunk;
cSize += cSizeChunk;
*currSeqStore = *nextSeqStore;
@@ -5977,7 +7496,7 @@ index f620cafca..c1c316e9e 100644
*/
ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
return cSize;
-@@ -3643,8 +4104,6 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
+@@ -3643,8 +4201,6 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 lastBlock)
{
@@ -5986,16 +7505,17 @@ index f620cafca..c1c316e9e 100644
U32 nbSeq;
size_t cSize;
DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
-@@ -3655,7 +4114,7 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
+@@ -3655,7 +4211,8 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
if (bss == ZSTDbss_noCompress) {
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
- cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
++ RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
+ cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
return cSize;
-@@ -3673,9 +4132,9 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+@@ -3673,9 +4230,9 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 frame)
{
@@ -6008,7 +7528,25 @@ index f620cafca..c1c316e9e 100644
*/
const U32 rleMaxLength = 25;
size_t cSize;
-@@ -3767,10 +4226,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
+@@ -3687,11 +4244,15 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+
+ { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+- if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
++ if (bss == ZSTDbss_noCompress) {
++ RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
++ cSize = 0;
++ goto out;
++ }
+ }
+
+ if (zc->seqCollector.collectSequences) {
+- ZSTD_copyBlockSequences(zc);
++ FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed");
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return 0;
+ }
+@@ -3767,10 +4328,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
* * cSize >= blockBound(srcSize): We have expanded the block too much so
* emit an uncompressed block.
*/
@@ -6023,7 +7561,7 @@ index f620cafca..c1c316e9e 100644
FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
-@@ -3778,7 +4238,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
+@@ -3778,7 +4340,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
}
}
}
@@ -6032,7 +7570,7 @@ index f620cafca..c1c316e9e 100644
DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
/* Superblock compression failed, attempt to emit a single no compress block.
-@@ -3836,7 +4296,7 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+@@ -3836,7 +4398,7 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
* All blocks will be terminated, all input will be consumed.
* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
* Frame is supposed already started (header already produced)
@@ -6041,7 +7579,7 @@ index f620cafca..c1c316e9e 100644
*/
static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
-@@ -3860,7 +4320,9 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
+@@ -3860,7 +4422,9 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
@@ -6052,7 +7590,7 @@ index f620cafca..c1c316e9e 100644
dstSize_tooSmall,
"not enough space to store compressed block");
if (remaining < blockSize) blockSize = remaining;
-@@ -3899,7 +4361,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
+@@ -3899,7 +4463,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
MEM_writeLE24(op, cBlockHeader);
cSize += ZSTD_blockHeaderSize;
}
@@ -6061,7 +7599,30 @@ index f620cafca..c1c316e9e 100644
ip += blockSize;
-@@ -4078,31 +4540,51 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
+@@ -4001,19 +4565,15 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
+ }
+ }
+
+-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
++void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+ {
+- RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
+- "wrong cctx stage");
+- RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
+- parameter_unsupported,
+- "incompatible with ldm");
++ assert(cctx->stage == ZSTDcs_init);
++ assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable);
+ cctx->externSeqStore.seq = seq;
+ cctx->externSeqStore.size = nbSeq;
+ cctx->externSeqStore.capacity = nbSeq;
+ cctx->externSeqStore.pos = 0;
+ cctx->externSeqStore.posInSequence = 0;
+- return 0;
+ }
+
+
+@@ -4078,31 +4638,51 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
}
}
@@ -6120,7 +7681,7 @@ index f620cafca..c1c316e9e 100644
/*! ZSTD_loadDictionaryContent() :
* @return : 0, or an error code
*/
-@@ -4111,25 +4593,36 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+@@ -4111,25 +4691,36 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* src, size_t srcSize,
@@ -6165,7 +7726,7 @@ index f620cafca..c1c316e9e 100644
/* If the dictionary is too large, only load the suffix of the dictionary. */
if (srcSize > maxDictSize) {
ip = iend - maxDictSize;
-@@ -4138,30 +4631,46 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+@@ -4138,35 +4729,58 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
}
}
@@ -6186,8 +7747,8 @@ index f620cafca..c1c316e9e 100644
ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
+ ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
-+ }
-+
+ }
+
+ /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */
+ if (params->cParams.strategy < ZSTD_btultra) {
+ U32 maxDictSize = 8U << MIN(MAX(params->cParams.hashLog, params->cParams.chainLog), 28);
@@ -6196,8 +7757,8 @@ index f620cafca..c1c316e9e 100644
+ src = ip;
+ srcSize = maxDictSize;
+ }
- }
-
++ }
++
+ ms->nextToUpdate = (U32)(ip - ms->window.base);
+ ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+ ms->forceNonContiguous = params->deterministicRefPrefix;
@@ -6217,11 +7778,23 @@ index f620cafca..c1c316e9e 100644
break;
case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, iend, dtlm);
++#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+ ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp);
++#else
++ assert(0); /* shouldn't be called: cparams should've been adjusted. */
++#endif
break;
case ZSTD_greedy:
-@@ -4174,7 +4683,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
++#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR)
+ assert(srcSize >= HASH_READ_SIZE);
+ if (ms->dedicatedDictSearch) {
+ assert(ms->chainTable != NULL);
+@@ -4174,7 +4788,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
} else {
assert(params->useRowMatchFinder != ZSTD_ps_auto);
if (params->useRowMatchFinder == ZSTD_ps_enable) {
@@ -6230,7 +7803,44 @@ index f620cafca..c1c316e9e 100644
ZSTD_memset(ms->tagTable, 0, tagTableSize);
ZSTD_row_update(ms, iend-HASH_READ_SIZE);
DEBUGLOG(4, "Using row-based hash table for lazy dict");
-@@ -4327,6 +4836,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+@@ -4183,14 +4797,23 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+ DEBUGLOG(4, "Using chain-based hash table for lazy dict");
+ }
+ }
++#else
++ assert(0); /* shouldn't be called: cparams should've been adjusted. */
++#endif
+ break;
+
+ case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
++#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
+ assert(srcSize >= HASH_READ_SIZE);
+ ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
++#else
++ assert(0); /* shouldn't be called: cparams should've been adjusted. */
++#endif
+ break;
+
+ default:
+@@ -4237,11 +4860,10 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
+
+ /* We only set the loaded table as valid if it contains all non-zero
+ * weights. Otherwise, we set it to check */
+- if (!hasZeroWeights)
++ if (!hasZeroWeights && maxSymbolValue == 255)
+ bs->entropy.huf.repeatMode = HUF_repeat_valid;
+
+ RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
+- RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
+ dictPtr += hufHeaderSize;
+ }
+
+@@ -4327,6 +4949,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_CCtx_params const* params,
const void* dict, size_t dictSize,
ZSTD_dictTableLoadMethod_e dtlm,
@@ -6238,7 +7848,7 @@ index f620cafca..c1c316e9e 100644
void* workspace)
{
const BYTE* dictPtr = (const BYTE*)dict;
-@@ -4345,7 +4855,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+@@ -4345,7 +4968,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
{
size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
@@ -6247,7 +7857,7 @@ index f620cafca..c1c316e9e 100644
}
return dictID;
}
-@@ -4361,6 +4871,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+@@ -4361,6 +4984,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
@@ -6255,7 +7865,7 @@ index f620cafca..c1c316e9e 100644
void* workspace)
{
DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
-@@ -4373,13 +4884,13 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+@@ -4373,13 +4997,13 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
/* dict restricted modes */
if (dictContentType == ZSTD_dct_rawContent)
@@ -6271,7 +7881,7 @@ index f620cafca..c1c316e9e 100644
}
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
assert(0); /* impossible */
-@@ -4387,13 +4898,14 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+@@ -4387,13 +5011,14 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
/* dict as full zstd dictionary */
return ZSTD_loadZstdDictionary(
@@ -6287,7 +7897,7 @@ index f620cafca..c1c316e9e 100644
* @return : 0, or an error code */
static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
-@@ -4426,11 +4938,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+@@ -4426,11 +5051,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
cdict->dictContentSize, cdict->dictContentType, dtlm,
@@ -6301,7 +7911,7 @@ index f620cafca..c1c316e9e 100644
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
cctx->dictID = (U32)dictID;
-@@ -4471,11 +4983,11 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
+@@ -4471,11 +5096,11 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
&cctxParams, pledgedSrcSize);
}
@@ -6316,7 +7926,7 @@ index f620cafca..c1c316e9e 100644
ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
}
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
-@@ -4483,9 +4995,15 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di
+@@ -4483,9 +5108,15 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
@@ -6333,7 +7943,35 @@ index f620cafca..c1c316e9e 100644
}
-@@ -4537,9 +5055,9 @@ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
+@@ -4496,14 +5127,13 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+ {
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+- size_t fhSize = 0;
+
+ DEBUGLOG(4, "ZSTD_writeEpilogue");
+ RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
+
+ /* special case : empty frame */
+ if (cctx->stage == ZSTDcs_init) {
+- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
++ size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
+ FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
+ dstCapacity -= fhSize;
+ op += fhSize;
+@@ -4513,8 +5143,9 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+ if (cctx->stage != ZSTDcs_ending) {
+ /* write one last empty block, make it the "last" block */
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
+- RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
+- MEM_writeLE32(op, cBlockHeader24);
++ ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3);
++ RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue");
++ MEM_writeLE24(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ }
+@@ -4537,9 +5168,9 @@ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
(void)extraCSize;
}
@@ -6346,7 +7984,7 @@ index f620cafca..c1c316e9e 100644
{
size_t endResult;
size_t const cSize = ZSTD_compressContinue_internal(cctx,
-@@ -4563,6 +5081,14 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
+@@ -4563,6 +5194,14 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
return cSize + endResult;
}
@@ -6361,7 +7999,7 @@ index f620cafca..c1c316e9e 100644
size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
-@@ -4591,7 +5117,7 @@ size_t ZSTD_compress_advanced_internal(
+@@ -4591,7 +5230,7 @@ size_t ZSTD_compress_advanced_internal(
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) , "");
@@ -6370,7 +8008,7 @@ index f620cafca..c1c316e9e 100644
}
size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
-@@ -4709,7 +5235,7 @@ static size_t ZSTD_initCDict_internal(
+@@ -4709,7 +5348,7 @@ static size_t ZSTD_initCDict_internal(
{ size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
&params, cdict->dictContent, cdict->dictContentSize,
@@ -6379,7 +8017,16 @@ index f620cafca..c1c316e9e 100644
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= (size_t)(U32)-1);
cdict->dictID = (U32)dictID;
-@@ -4906,6 +5432,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
+@@ -4811,7 +5450,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
+ cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
+ customMem);
+
+- if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
++ if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType,
+ cctxParams) )) {
+@@ -4906,6 +5545,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
params.cParams = cParams;
params.useRowMatchFinder = useRowMatchFinder;
cdict->useRowMatchFinder = useRowMatchFinder;
@@ -6387,7 +8034,7 @@ index f620cafca..c1c316e9e 100644
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
-@@ -4985,12 +5512,17 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
+@@ -4985,12 +5625,17 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
/* ZSTD_compressBegin_usingCDict() :
* cdict must be != NULL */
@@ -6406,7 +8053,7 @@ index f620cafca..c1c316e9e 100644
/*! ZSTD_compress_usingCDict_internal():
* Implementation of various ZSTD_compress_usingCDict* functions.
*/
-@@ -5000,7 +5532,7 @@ static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
+@@ -5000,7 +5645,7 @@ static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
@@ -6415,7 +8062,7 @@ index f620cafca..c1c316e9e 100644
}
/*! ZSTD_compress_usingCDict_advanced():
-@@ -5197,30 +5729,41 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
+@@ -5197,30 +5842,41 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
{
@@ -6463,13 +8110,13 @@ index f620cafca..c1c316e9e 100644
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ assert(input->pos >= zcs->stableIn_notConsumed);
+ input->pos -= zcs->stableIn_notConsumed;
-+ ip -= zcs->stableIn_notConsumed;
++ if (ip) ip -= zcs->stableIn_notConsumed;
+ zcs->stableIn_notConsumed = 0;
+ }
if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
assert(zcs->inBuff != NULL);
assert(zcs->inBuffSize > 0);
-@@ -5229,8 +5772,10 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+@@ -5229,8 +5885,10 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
assert(zcs->outBuff != NULL);
assert(zcs->outBuffSize > 0);
}
@@ -6481,7 +8128,7 @@ index f620cafca..c1c316e9e 100644
assert((U32)flushMode <= (U32)ZSTD_e_end);
while (someMoreWork) {
-@@ -5245,7 +5790,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+@@ -5245,7 +5903,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
|| zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
&& (zcs->inBuffPos == 0) ) {
/* shortcut to compression pass directly into output buffer */
@@ -6490,7 +8137,7 @@ index f620cafca..c1c316e9e 100644
op, oend-op, ip, iend-ip);
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
-@@ -5262,8 +5807,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+@@ -5262,8 +5920,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
zcs->inBuff + zcs->inBuffPos, toLoad,
ip, iend-ip);
zcs->inBuffPos += loaded;
@@ -6500,7 +8147,7 @@ index f620cafca..c1c316e9e 100644
if ( (flushMode == ZSTD_e_continue)
&& (zcs->inBuffPos < zcs->inBuffTarget) ) {
/* not enough input to fill full block : stop here */
-@@ -5274,6 +5818,20 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+@@ -5274,6 +5931,20 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
/* empty */
someMoreWork = 0; break;
}
@@ -6521,7 +8168,7 @@ index f620cafca..c1c316e9e 100644
}
/* compress current block (note : this stage cannot be stopped in the middle) */
DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
-@@ -5281,9 +5839,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+@@ -5281,9 +5952,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
void* cDst;
size_t cSize;
size_t oSize = oend-op;
@@ -6533,7 +8180,7 @@ index f620cafca..c1c316e9e 100644
if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
cDst = op; /* compress into output buffer, to skip flush stage */
else
-@@ -5291,9 +5848,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+@@ -5291,9 +5961,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
if (inputBuffered) {
unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
cSize = lastBlock ?
@@ -6545,7 +8192,7 @@ index f620cafca..c1c316e9e 100644
zcs->inBuff + zcs->inToCompress, iSize);
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
-@@ -5306,19 +5863,16 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+@@ -5306,19 +5976,16 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
if (!lastBlock)
assert(zcs->inBuffTarget <= zcs->inBuffSize);
zcs->inToCompress = zcs->inBuffPos;
@@ -6571,7 +8218,7 @@ index f620cafca..c1c316e9e 100644
}
if (cDst == op) { /* no need to flush */
op += cSize;
-@@ -5388,8 +5942,10 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf
+@@ -5388,8 +6055,10 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf
/* After a compression call set the expected input/output buffer.
* This is validated at the start of the next compression call.
*/
@@ -6583,7 +8230,7 @@ index f620cafca..c1c316e9e 100644
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
cctx->expectedInBuffer = *input;
}
-@@ -5408,22 +5964,22 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
+@@ -5408,22 +6077,22 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
{
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
ZSTD_inBuffer const expect = cctx->expectedInBuffer;
@@ -6612,7 +8259,7 @@ index f620cafca..c1c316e9e 100644
ZSTD_CCtx_params params = cctx->requestedParams;
ZSTD_prefixDict const prefixDict = cctx->prefixDict;
FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
-@@ -5437,9 +5993,9 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
+@@ -5437,9 +6106,9 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
params.compressionLevel = cctx->cdict->compressionLevel;
}
DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
@@ -6625,7 +8272,7 @@ index f620cafca..c1c316e9e 100644
? prefixDict.dictSize
: (cctx->cdict ? cctx->cdict->dictContentSize : 0);
ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
-@@ -5451,6 +6007,9 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
+@@ -5451,6 +6120,9 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, &params.cParams);
params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, &params.cParams);
params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, &params.cParams);
@@ -6635,7 +8282,7 @@ index f620cafca..c1c316e9e 100644
{ U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
-@@ -5477,6 +6036,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
+@@ -5477,6 +6149,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
return 0;
}
@@ -6644,7 +8291,7 @@ index f620cafca..c1c316e9e 100644
size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
-@@ -5491,8 +6052,27 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+@@ -5491,8 +6165,27 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
/* transparent initialization stage */
if (cctx->streamStage == zcss_init) {
@@ -6674,7 +8321,7 @@ index f620cafca..c1c316e9e 100644
}
/* end of transparent initialization stage */
-@@ -5510,13 +6090,20 @@ size_t ZSTD_compressStream2_simpleArgs (
+@@ -5510,13 +6203,20 @@ size_t ZSTD_compressStream2_simpleArgs (
const void* src, size_t srcSize, size_t* srcPos,
ZSTD_EndDirective endOp)
{
@@ -6701,7 +8348,7 @@ index f620cafca..c1c316e9e 100644
}
size_t ZSTD_compress2(ZSTD_CCtx* cctx,
-@@ -5539,6 +6126,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
+@@ -5539,6 +6239,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
/* Reset to the original values. */
cctx->requestedParams.inBufferMode = originalInBufferMode;
cctx->requestedParams.outBufferMode = originalOutBufferMode;
@@ -6709,7 +8356,7 @@ index f620cafca..c1c316e9e 100644
FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
if (result != 0) { /* compression not completed, due to lack of output space */
assert(oPos == dstCapacity);
-@@ -5549,64 +6137,61 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
+@@ -5549,64 +6250,61 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
}
}
@@ -6794,7 +8441,7 @@ index f620cafca..c1c316e9e 100644
if (cctx->cdict) {
dictSize = (U32)cctx->cdict->dictContentSize;
} else if (cctx->prefixDict.dict) {
-@@ -5615,25 +6200,55 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
+@@ -5615,25 +6313,55 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
dictSize = 0;
}
ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
@@ -6806,8 +8453,7 @@ index f620cafca..c1c316e9e 100644
- U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
- ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ U32 offBase;
-
-- DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
++
+ if (externalRepSearch == ZSTD_ps_disable) {
+ offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset);
+ } else {
@@ -6815,14 +8461,15 @@ index f620cafca..c1c316e9e 100644
+ offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
+ }
-+
+
+- DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
if (cctx->appliedParams.validateSequences) {
seqPos->posInSrc += litLength + matchLength;
- FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
- cctx->appliedParams.cParams.windowLog, dictSize),
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
-+ cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer),
++ cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)),
"Sequence validation failed");
}
- RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
@@ -6859,7 +8506,7 @@ index f620cafca..c1c316e9e 100644
ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
if (inSeqs[idx].litLength) {
-@@ -5642,26 +6257,15 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
+@@ -5642,26 +6370,15 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
ip += inSeqs[idx].litLength;
seqPos->posInSrc += inSeqs[idx].litLength;
}
@@ -6889,7 +8536,7 @@ index f620cafca..c1c316e9e 100644
{
U32 idx = seqPos->idx;
U32 startPosInSequence = seqPos->posInSequence;
-@@ -5673,6 +6277,9 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
+@@ -5673,6 +6390,9 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
U32 bytesAdjustment = 0;
U32 finalMatchSplit = 0;
@@ -6899,7 +8546,7 @@ index f620cafca..c1c316e9e 100644
if (cctx->cdict) {
dictSize = cctx->cdict->dictContentSize;
} else if (cctx->prefixDict.dict) {
-@@ -5680,7 +6287,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
+@@ -5680,7 +6400,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
} else {
dictSize = 0;
}
@@ -6908,7 +8555,7 @@ index f620cafca..c1c316e9e 100644
DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
-@@ -5688,7 +6295,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
+@@ -5688,7 +6408,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
U32 litLength = currSeq.litLength;
U32 matchLength = currSeq.matchLength;
U32 const rawOffset = currSeq.offset;
@@ -6917,7 +8564,7 @@ index f620cafca..c1c316e9e 100644
/* Modify the sequence depending on where endPosInSequence lies */
if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
-@@ -5702,7 +6309,6 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
+@@ -5702,7 +6422,6 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
/* Move to the next sequence */
endPosInSequence -= currSeq.litLength + currSeq.matchLength;
startPosInSequence = 0;
@@ -6925,7 +8572,7 @@ index f620cafca..c1c316e9e 100644
} else {
/* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
does not reach the end of the match. So, we have to split the sequence */
-@@ -5742,21 +6348,23 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
+@@ -5742,21 +6461,23 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
}
/* Check if this offset can be represented with a repcode */
{ U32 const ll0 = (litLength == 0);
@@ -6940,7 +8587,7 @@ index f620cafca..c1c316e9e 100644
- FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
- cctx->appliedParams.cParams.windowLog, dictSize),
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
-+ cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer),
++ cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)),
"Sequence validation failed");
}
- DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
@@ -6956,7 +8603,7 @@ index f620cafca..c1c316e9e 100644
}
DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
-@@ -5779,7 +6387,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
+@@ -5779,7 +6500,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
@@ -6965,7 +8612,7 @@ index f620cafca..c1c316e9e 100644
static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
{
ZSTD_sequenceCopier sequenceCopier = NULL;
-@@ -5793,6 +6401,57 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
+@@ -5793,6 +6514,57 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
return sequenceCopier;
}
@@ -7023,7 +8670,7 @@ index f620cafca..c1c316e9e 100644
/* Compress, block-by-block, all of the sequences given.
*
* Returns the cumulative size of all compressed blocks (including their headers),
-@@ -5805,9 +6464,6 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+@@ -5805,9 +6577,6 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
const void* src, size_t srcSize)
{
size_t cSize = 0;
@@ -7033,7 +8680,7 @@ index f620cafca..c1c316e9e 100644
size_t remaining = srcSize;
ZSTD_sequencePosition seqPos = {0, 0, 0};
-@@ -5827,22 +6483,29 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+@@ -5827,22 +6596,29 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
}
while (remaining) {
@@ -7069,7 +8716,7 @@ index f620cafca..c1c316e9e 100644
cSize += cBlockSize;
ip += blockSize;
op += cBlockSize;
-@@ -5851,6 +6514,7 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+@@ -5851,6 +6627,7 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
continue;
}
@@ -7077,7 +8724,7 @@ index f620cafca..c1c316e9e 100644
compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
&cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
&cctx->appliedParams,
-@@ -5859,11 +6523,11 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+@@ -5859,11 +6636,11 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
cctx->bmi2);
FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
@@ -7091,7 +8738,7 @@ index f620cafca..c1c316e9e 100644
/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
* This is only an issue for zstd <= v1.4.3
-@@ -5874,12 +6538,12 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+@@ -5874,12 +6651,12 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
if (compressedSeqsSize == 0) {
/* ZSTD_noCompressBlock writes the block header as well */
cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
@@ -7108,7 +8755,7 @@ index f620cafca..c1c316e9e 100644
} else {
U32 cBlockHeader;
/* Error checking and repcodes update */
-@@ -5891,11 +6555,10 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+@@ -5891,11 +6668,10 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
MEM_writeLE24(op, cBlockHeader);
cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
@@ -7121,7 +8768,7 @@ index f620cafca..c1c316e9e 100644
if (lastBlock) {
break;
-@@ -5906,12 +6569,15 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+@@ -5906,12 +6682,15 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
dstCapacity -= cBlockSize;
cctx->isFirstBlock = 0;
}
@@ -7138,7 +8785,7 @@ index f620cafca..c1c316e9e 100644
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
const void* src, size_t srcSize)
{
-@@ -5921,7 +6587,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci
+@@ -5921,7 +6700,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci
size_t frameHeaderSize = 0;
/* Transparent initialization stage, same as compressStream2() */
@@ -7147,7 +8794,7 @@ index f620cafca..c1c316e9e 100644
assert(cctx != NULL);
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
/* Begin writing output, starting with frame header */
-@@ -5949,26 +6615,34 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci
+@@ -5949,26 +6728,34 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci
cSize += 4;
}
@@ -7186,7 +8833,7 @@ index f620cafca..c1c316e9e 100644
if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
/* single thread mode : attempt to calculate remaining to flush more precisely */
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
-@@ -6090,7 +6764,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel,
+@@ -6090,7 +6877,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel,
cp.targetLength = (unsigned)(-clampedCompressionLevel);
}
/* refine parameters based on srcSize & dictSize */
@@ -7195,30 +8842,38 @@ index f620cafca..c1c316e9e 100644
}
}
-@@ -6125,3 +6799,21 @@ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeH
+@@ -6125,3 +6912,29 @@ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeH
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
+
+void ZSTD_registerSequenceProducer(
-+ ZSTD_CCtx* zc, void* mState,
-+ ZSTD_sequenceProducer_F* mFinder
++ ZSTD_CCtx* zc,
++ void* extSeqProdState,
++ ZSTD_sequenceProducer_F extSeqProdFunc
++) {
++ assert(zc != NULL);
++ ZSTD_CCtxParams_registerSequenceProducer(
++ &zc->requestedParams, extSeqProdState, extSeqProdFunc
++ );
++}
++
++void ZSTD_CCtxParams_registerSequenceProducer(
++ ZSTD_CCtx_params* params,
++ void* extSeqProdState,
++ ZSTD_sequenceProducer_F extSeqProdFunc
+) {
-+ if (mFinder != NULL) {
-+ ZSTD_externalMatchCtx emctx;
-+ emctx.mState = mState;
-+ emctx.mFinder = mFinder;
-+ emctx.seqBuffer = NULL;
-+ emctx.seqBufferCapacity = 0;
-+ zc->externalMatchCtx = emctx;
-+ zc->requestedParams.useSequenceProducer = 1;
++ assert(params != NULL);
++ if (extSeqProdFunc != NULL) {
++ params->extSeqProdFunc = extSeqProdFunc;
++ params->extSeqProdState = extSeqProdState;
+ } else {
-+ ZSTD_memset(&zc->externalMatchCtx, 0, sizeof(zc->externalMatchCtx));
-+ zc->requestedParams.useSequenceProducer = 0;
++ params->extSeqProdFunc = NULL;
++ params->extSeqProdState = NULL;
+ }
+}
diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
-index 71697a11a..899f5e2de 100644
+index 71697a11a..53cb582a8 100644
--- a/lib/zstd/compress/zstd_compress_internal.h
+++ b/lib/zstd/compress/zstd_compress_internal.h
@@ -1,5 +1,6 @@
@@ -7237,6 +8892,15 @@ index 71697a11a..899f5e2de 100644
/*-*************************************
+@@ -32,7 +34,7 @@
+ It's not a big deal though : candidate will just be sorted again.
+ Additionally, candidate position 1 will be lost.
+ But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
+- The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
++ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table reuse with a different strategy.
+ This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
+
+
@@ -111,12 +113,13 @@ typedef struct {
/* ZSTD_buildBlockEntropyStats() :
* Builds entropy for the block.
@@ -7257,7 +8921,7 @@ index 71697a11a..899f5e2de 100644
/* *******************************
* Compression internals structs *
-@@ -142,6 +145,12 @@ typedef struct {
+@@ -142,26 +145,33 @@ typedef struct {
size_t capacity; /* The capacity starting from `seq` pointer */
} rawSeqStore_t;
@@ -7270,19 +8934,47 @@ index 71697a11a..899f5e2de 100644
UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
typedef struct {
-@@ -212,8 +221,10 @@ struct ZSTD_matchState_t {
+- int price;
+- U32 off;
+- U32 mlen;
+- U32 litlen;
+- U32 rep[ZSTD_REP_NUM];
++ int price; /* price from beginning of segment to this position */
++ U32 off; /* offset of previous match */
++ U32 mlen; /* length of previous match */
++ U32 litlen; /* nb of literals since previous match */
++ U32 rep[ZSTD_REP_NUM]; /* offset history after previous match */
+ } ZSTD_optimal_t;
+
+ typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+
++#define ZSTD_OPT_SIZE (ZSTD_OPT_NUM+3)
+ typedef struct {
+ /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
+ unsigned* litFreq; /* table of literals statistics, of size 256 */
+ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
+ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
+ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
+- ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
+- ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
++ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_SIZE */
++ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */
+
+ U32 litSum; /* nb of literals */
+ U32 litLengthSum; /* nb of litLength codes */
+@@ -212,8 +222,10 @@ struct ZSTD_matchState_t {
U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
- U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
+ BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
-+ U64 hashSalt; /* For row-based matchFinder: salts the hash for re-use of tag table */
++ U64 hashSalt; /* For row-based matchFinder: salts the hash for reuse of tag table */
+ U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */
U32* hashTable;
U32* hashTable3;
-@@ -228,6 +239,18 @@ struct ZSTD_matchState_t {
+@@ -228,6 +240,18 @@ struct ZSTD_matchState_t {
const ZSTD_matchState_t* dictMatchState;
ZSTD_compressionParameters cParams;
const rawSeqStore_t* ldmSeqStore;
@@ -7301,7 +8993,7 @@ index 71697a11a..899f5e2de 100644
};
typedef struct {
-@@ -324,6 +347,24 @@ struct ZSTD_CCtx_params_s {
+@@ -324,6 +348,25 @@ struct ZSTD_CCtx_params_s {
/* Internal use, for createCCtxParams() and freeCCtxParams() only */
ZSTD_customMem customMem;
@@ -7313,10 +9005,11 @@ index 71697a11a..899f5e2de 100644
+ * if the external matchfinder returns an error code. */
+ int enableMatchFinderFallback;
+
-+ /* Indicates whether an external matchfinder has been referenced.
-+ * Users can't set this externally.
-+ * It is set internally in ZSTD_registerSequenceProducer(). */
-+ int useSequenceProducer;
++ /* Parameters for the external sequence producer API.
++ * Users set these parameters through ZSTD_registerSequenceProducer().
++ * It is not possible to set these parameters individually through the public API. */
++ void* extSeqProdState;
++ ZSTD_sequenceProducer_F extSeqProdFunc;
+
+ /* Adjust the max block size*/
+ size_t maxBlockSize;
@@ -7326,22 +9019,7 @@ index 71697a11a..899f5e2de 100644
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
-@@ -355,6 +396,14 @@ typedef struct {
- ZSTD_entropyCTablesMetadata_t entropyMetadata;
- } ZSTD_blockSplitCtx;
-
-+/* Context for block-level external matchfinder API */
-+typedef struct {
-+ void* mState;
-+ ZSTD_sequenceProducer_F* mFinder;
-+ ZSTD_Sequence* seqBuffer;
-+ size_t seqBufferCapacity;
-+} ZSTD_externalMatchCtx;
-+
- struct ZSTD_CCtx_s {
- ZSTD_compressionStage_e stage;
- int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
-@@ -404,6 +453,7 @@ struct ZSTD_CCtx_s {
+@@ -404,6 +447,7 @@ struct ZSTD_CCtx_s {
/* Stable in/out buffer verification */
ZSTD_inBuffer expectedInBuffer;
@@ -7349,13 +9027,14 @@ index 71697a11a..899f5e2de 100644
size_t expectedOutBufferSize;
/* Dictionary */
-@@ -417,9 +467,13 @@ struct ZSTD_CCtx_s {
+@@ -417,9 +461,14 @@ struct ZSTD_CCtx_s {
/* Workspace for block splitter */
ZSTD_blockSplitCtx blockSplitCtx;
+
-+ /* Workspace for external matchfinder */
-+ ZSTD_externalMatchCtx externalMatchCtx;
++ /* Buffer for output from external sequence producer */
++ ZSTD_Sequence* extSeqBuf;
++ size_t extSeqBufCapacity;
};
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
@@ -7363,7 +9042,7 @@ index 71697a11a..899f5e2de 100644
typedef enum {
ZSTD_noDict = 0,
-@@ -441,7 +495,7 @@ typedef enum {
+@@ -441,7 +490,7 @@ typedef enum {
* In this mode we take both the source size and the dictionary size
* into account when selecting and adjusting the parameters.
*/
@@ -7372,7 +9051,7 @@ index 71697a11a..899f5e2de 100644
* We don't know what these parameters are for. We default to the legacy
* behavior of taking both the source size and the dict size into account
* when selecting and adjusting parameters.
-@@ -500,9 +554,11 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+@@ -500,9 +549,11 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
/* ZSTD_noCompressBlock() :
* Writes uncompressed block to dst buffer from given src.
* Returns the size of the block */
@@ -7385,7 +9064,7 @@ index 71697a11a..899f5e2de 100644
RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
dstSize_tooSmall, "dst buf too small for uncompressed block");
MEM_writeLE24(dst, cBlockHeader24);
-@@ -510,7 +566,8 @@ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const voi
+@@ -510,7 +561,8 @@ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const voi
return ZSTD_blockHeaderSize + srcSize;
}
@@ -7395,7 +9074,7 @@ index 71697a11a..899f5e2de 100644
{
BYTE* const op = (BYTE*)dst;
U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
-@@ -529,7 +586,7 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+@@ -529,7 +581,7 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
{
U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
@@ -7404,7 +9083,7 @@ index 71697a11a..899f5e2de 100644
return (srcSize >> minlog) + 2;
}
-@@ -565,29 +622,27 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con
+@@ -565,29 +617,27 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con
while (ip < iend) *op++ = *ip++;
}
@@ -7448,7 +9127,7 @@ index 71697a11a..899f5e2de 100644
size_t matchLength)
{
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
-@@ -596,8 +651,8 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
+@@ -596,8 +646,8 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
static const BYTE* g_start = NULL;
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
@@ -7459,7 +9138,7 @@ index 71697a11a..899f5e2de 100644
}
#endif
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
-@@ -607,9 +662,9 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
+@@ -607,9 +657,9 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
assert(literals + litLength <= litLimit);
if (litEnd <= litLimit_w) {
/* Common case we can use wildcopy.
@@ -7472,7 +9151,7 @@ index 71697a11a..899f5e2de 100644
ZSTD_copy16(seqStorePtr->lit, literals);
if (litLength > 16) {
ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
-@@ -628,7 +683,7 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
+@@ -628,7 +678,7 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
seqStorePtr->sequences[0].litLength = (U16)litLength;
/* match offset */
@@ -7481,7 +9160,7 @@ index 71697a11a..899f5e2de 100644
/* match Length */
assert(matchLength >= MINMATCH);
-@@ -646,17 +701,17 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
+@@ -646,17 +696,17 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
/* ZSTD_updateRep() :
* updates in-place @rep (array of repeat offsets)
@@ -7504,7 +9183,7 @@ index 71697a11a..899f5e2de 100644
if (repCode > 0) { /* note : if repCode==0, no change */
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
rep[2] = (repCode >= 2) ? rep[1] : rep[2];
-@@ -673,11 +728,11 @@ typedef struct repcodes_s {
+@@ -673,11 +723,11 @@ typedef struct repcodes_s {
} repcodes_t;
MEM_STATIC repcodes_t
@@ -7518,7 +9197,7 @@ index 71697a11a..899f5e2de 100644
return newReps;
}
-@@ -685,59 +740,6 @@ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0
+@@ -685,59 +735,6 @@ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0
/*-*************************************
* Match length counter
***************************************/
@@ -7578,7 +9257,7 @@ index 71697a11a..899f5e2de 100644
MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
{
const BYTE* const pStart = pIn;
-@@ -783,32 +785,43 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
+@@ -783,32 +780,43 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
* Hashes
***************************************/
static const U32 prime3bytes = 506832829U;
@@ -7634,7 +9313,7 @@ index 71697a11a..899f5e2de 100644
switch(mls)
{
default:
-@@ -820,6 +833,24 @@ size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+@@ -820,6 +828,24 @@ size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
}
}
@@ -7659,7 +9338,18 @@ index 71697a11a..899f5e2de 100644
/* ZSTD_ipow() :
* Return base^exponent.
*/
-@@ -1167,10 +1198,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
+@@ -1011,7 +1037,9 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+ * The least significant cycleLog bits of the indices must remain the same,
+ * which may be 0. Every index up to maxDist in the past must be valid.
+ */
+-MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
++MEM_STATIC
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+ U32 maxDist, void const* src)
+ {
+ /* preemptive overflow correction:
+@@ -1167,10 +1195,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
(unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
assert(blockEndIdx >= loadedDictEnd);
@@ -7676,7 +9366,18 @@ index 71697a11a..899f5e2de 100644
*/
DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
*loadedDictEndPtr = 0;
-@@ -1302,6 +1338,42 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
+@@ -1199,7 +1232,9 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
+ * forget about the extDict. Handles overlap of the prefix and extDict.
+ * Returns non-zero if the segment is contiguous.
+ */
+-MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
++MEM_STATIC
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++U32 ZSTD_window_update(ZSTD_window_t* window,
+ void const* src, size_t srcSize,
+ int forceNonContiguous)
+ {
+@@ -1302,6 +1337,42 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
#endif
@@ -7719,7 +9420,20 @@ index 71697a11a..899f5e2de 100644
/* ===============================================================
-@@ -1396,4 +1468,51 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
+@@ -1381,11 +1452,10 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
+ * This cannot be used when long range matching is enabled.
+ * Zstd will use these sequences, and pass the literals to a secondary block
+ * compressor.
+- * @return : An error code on failure.
+ * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
+ * access and data corruption.
+ */
+-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
++void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+
+ /* ZSTD_cycleLog() :
+ * condition for correct operation : hashLog > 1 */
+@@ -1396,4 +1466,55 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
*/
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
@@ -7750,6 +9464,10 @@ index 71697a11a..899f5e2de 100644
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
+
++/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */
++MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) {
++ return params->extSeqProdFunc != NULL;
++}
+
+/* ===============================================================
+ * Deprecated definitions that are still used internally to avoid
@@ -8100,7 +9818,7 @@ index 7991364c2..7fe6f4ff5 100644
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
-index 17d836cc8..dbacbaf72 100644
+index 17d836cc8..41f6521b2 100644
--- a/lib/zstd/compress/zstd_compress_superblock.c
+++ b/lib/zstd/compress/zstd_compress_superblock.c
@@ -1,5 +1,6 @@
@@ -8149,24 +9867,63 @@ index 17d836cc8..dbacbaf72 100644
- { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
- : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
+ { int const flags = bmi2 ? HUF_flags_bmi2 : 0;
-+ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable, flags)
-+ : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable, flags);
++ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags)
++ : HUF_compress4X_usingCTable(op, (size_t)(oend-op), literals, litSize, hufTable, flags);
op += cSize;
cLitSize += cSize;
if (cSize == 0 || ERR_isError(cSize)) {
-@@ -126,7 +126,11 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
- return op-ostart;
+@@ -103,7 +103,7 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
+- { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
++ { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+@@ -123,26 +123,30 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
+ }
+ *entropyWritten = 1;
+ DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
+- return op-ostart;
++ return (size_t)(op-ostart);
}
-static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
+- const seqDef* const sstart = sequences;
+- const seqDef* const send = sequences + nbSeq;
+- const seqDef* sp = sstart;
+static size_t
+ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
-+ const seqDef* sequences, size_t nbSeq,
-+ size_t litSize, int lastSequence)
++ const seqDef* sequences, size_t nbSeqs,
++ size_t litSize, int lastSubBlock)
+{
- const seqDef* const sstart = sequences;
- const seqDef* const send = sequences + nbSeq;
- const seqDef* sp = sstart;
+ size_t matchLengthSum = 0;
+ size_t litLengthSum = 0;
+- (void)(litLengthSum); /* suppress unused variable warning on some environments */
+- while (send-sp > 0) {
+- ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
++ size_t n;
++ for (n=0; n<nbSeqs; n++) {
++ const ZSTD_sequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences+n);
+ litLengthSum += seqLen.litLength;
+ matchLengthSum += seqLen.matchLength;
+- sp++;
+ }
+- assert(litLengthSum <= litSize);
+- if (!lastSequence) {
++ DEBUGLOG(5, "ZSTD_seqDecompressedSize: %u sequences from %p: %u literals + %u matchlength",
++ (unsigned)nbSeqs, (const void*)sequences,
++ (unsigned)litLengthSum, (unsigned)matchLengthSum);
++ if (!lastSubBlock)
+ assert(litLengthSum == litSize);
+- }
++ else
++ assert(litLengthSum <= litSize);
++ (void)litLengthSum;
+ return matchLengthSum + litSize;
+ }
+
@@ -156,13 +160,14 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef*
* @return : compressed size of sequences section of a sub-block
* Or 0 if it is unable to compress
@@ -8189,7 +9946,408 @@ index 17d836cc8..dbacbaf72 100644
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
BYTE* const ostart = (BYTE*)dst;
-@@ -539,7 +544,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+@@ -176,14 +181,14 @@ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables
+ /* Sequences Header */
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall, "");
+- if (nbSeq < 0x7F)
++ if (nbSeq < 128)
+ *op++ = (BYTE)nbSeq;
+ else if (nbSeq < LONGNBSEQ)
+ op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
+ else
+ op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+ if (nbSeq==0) {
+- return op - ostart;
++ return (size_t)(op - ostart);
+ }
+
+ /* seqHead : flags for FSE encoding type */
+@@ -205,7 +210,7 @@ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables
+ }
+
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
+- op, oend - op,
++ op, (size_t)(oend - op),
+ fseTables->matchlengthCTable, mlCode,
+ fseTables->offcodeCTable, ofCode,
+ fseTables->litlengthCTable, llCode,
+@@ -249,7 +254,7 @@ static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables
+ #endif
+
+ *entropyWritten = 1;
+- return op - ostart;
++ return (size_t)(op - ostart);
+ }
+
+ /* ZSTD_compressSubBlock() :
+@@ -275,7 +280,8 @@ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
+ litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
+ { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
+ &entropyMetadata->hufMetadata, literals, litSize,
+- op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
++ op, (size_t)(oend-op),
++ bmi2, writeLitEntropy, litEntropyWritten);
+ FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
+ if (cLitSize == 0) return 0;
+ op += cLitSize;
+@@ -285,18 +291,18 @@ static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
+ sequences, nbSeq,
+ llCode, mlCode, ofCode,
+ cctxParams,
+- op, oend-op,
++ op, (size_t)(oend-op),
+ bmi2, writeSeqEntropy, seqEntropyWritten);
+ FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
+ if (cSeqSize == 0) return 0;
+ op += cSeqSize;
+ }
+ /* Write block header */
+- { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
++ { size_t cSize = (size_t)(op-ostart) - ZSTD_blockHeaderSize;
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(ostart, cBlockHeader24);
+ }
+- return op-ostart;
++ return (size_t)(op-ostart);
+ }
+
+ static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
+@@ -385,7 +391,11 @@ static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
+ return cSeqSizeEstimate + sequencesSectionHeaderSize;
+ }
+
+-static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
++typedef struct {
++ size_t estLitSize;
++ size_t estBlockSize;
++} EstimatedBlockSize;
++static EstimatedBlockSize ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+@@ -393,15 +403,17 @@ static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+- int writeLitEntropy, int writeSeqEntropy) {
+- size_t cSizeEstimate = 0;
+- cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
+- &entropy->huf, &entropyMetadata->hufMetadata,
+- workspace, wkspSize, writeLitEntropy);
+- cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
++ int writeLitEntropy, int writeSeqEntropy)
++{
++ EstimatedBlockSize ebs;
++ ebs.estLitSize = ZSTD_estimateSubBlockSize_literal(literals, litSize,
++ &entropy->huf, &entropyMetadata->hufMetadata,
++ workspace, wkspSize, writeLitEntropy);
++ ebs.estBlockSize = ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
+- return cSizeEstimate + ZSTD_blockHeaderSize;
++ ebs.estBlockSize += ebs.estLitSize + ZSTD_blockHeaderSize;
++ return ebs;
+ }
+
+ static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
+@@ -415,13 +427,56 @@ static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMe
+ return 0;
+ }
+
++static size_t countLiterals(seqStore_t const* seqStore, const seqDef* sp, size_t seqCount)
++{
++ size_t n, total = 0;
++ assert(sp != NULL);
++ for (n=0; n<seqCount; n++) {
++ total += ZSTD_getSequenceLength(seqStore, sp+n).litLength;
++ }
++ DEBUGLOG(6, "countLiterals for %zu sequences from %p => %zu bytes", seqCount, (const void*)sp, total);
++ return total;
++}
++
++#define BYTESCALE 256
++
++static size_t sizeBlockSequences(const seqDef* sp, size_t nbSeqs,
++ size_t targetBudget, size_t avgLitCost, size_t avgSeqCost,
++ int firstSubBlock)
++{
++ size_t n, budget = 0, inSize=0;
++ /* entropy headers */
++ size_t const headerSize = (size_t)firstSubBlock * 120 * BYTESCALE; /* generous estimate */
++ assert(firstSubBlock==0 || firstSubBlock==1);
++ budget += headerSize;
++
++ /* first sequence => at least one sequence*/
++ budget += sp[0].litLength * avgLitCost + avgSeqCost;
++ if (budget > targetBudget) return 1;
++ inSize = sp[0].litLength + (sp[0].mlBase+MINMATCH);
++
++ /* loop over sequences */
++ for (n=1; n<nbSeqs; n++) {
++ size_t currentCost = sp[n].litLength * avgLitCost + avgSeqCost;
++ budget += currentCost;
++ inSize += sp[n].litLength + (sp[n].mlBase+MINMATCH);
++ /* stop when sub-block budget is reached */
++ if ( (budget > targetBudget)
++ /* though continue to expand until the sub-block is deemed compressible */
++ && (budget < inSize * BYTESCALE) )
++ break;
++ }
++
++ return n;
++}
++
+ /* ZSTD_compressSubBlock_multi() :
+ * Breaks super-block into multiple sub-blocks and compresses them.
+- * Entropy will be written to the first block.
+- * The following blocks will use repeat mode to compress.
+- * All sub-blocks are compressed blocks (no raw or rle blocks).
+- * @return : compressed size of the super block (which is multiple ZSTD blocks)
+- * Or 0 if it failed to compress. */
++ * Entropy will be written into the first block.
++ * The following blocks use repeat_mode to compress.
++ * Sub-blocks are all compressed, except the last one when beneficial.
++ * @return : compressed size of the super block (which features multiple ZSTD blocks)
++ * or 0 if it failed to compress. */
+ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+ const ZSTD_compressedBlockState_t* prevCBlock,
+ ZSTD_compressedBlockState_t* nextCBlock,
+@@ -434,10 +489,12 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+ {
+ const seqDef* const sstart = seqStorePtr->sequencesStart;
+ const seqDef* const send = seqStorePtr->sequences;
+- const seqDef* sp = sstart;
++ const seqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */
++ size_t const nbSeqs = (size_t)(send - sstart);
+ const BYTE* const lstart = seqStorePtr->litStart;
+ const BYTE* const lend = seqStorePtr->lit;
+ const BYTE* lp = lstart;
++ size_t const nbLiterals = (size_t)(lend - lstart);
+ BYTE const* ip = (BYTE const*)src;
+ BYTE const* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*)dst;
+@@ -446,112 +503,171 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+ const BYTE* llCodePtr = seqStorePtr->llCode;
+ const BYTE* mlCodePtr = seqStorePtr->mlCode;
+ const BYTE* ofCodePtr = seqStorePtr->ofCode;
+- size_t targetCBlockSize = cctxParams->targetCBlockSize;
+- size_t litSize, seqCount;
+- int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
++ size_t const minTarget = ZSTD_TARGETCBLOCKSIZE_MIN; /* enforce minimum size, to reduce undesirable side effects */
++ size_t const targetCBlockSize = MAX(minTarget, cctxParams->targetCBlockSize);
++ int writeLitEntropy = (entropyMetadata->hufMetadata.hType == set_compressed);
+ int writeSeqEntropy = 1;
+- int lastSequence = 0;
+-
+- DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
+- (unsigned)(lend-lp), (unsigned)(send-sstart));
+-
+- litSize = 0;
+- seqCount = 0;
+- do {
+- size_t cBlockSizeEstimate = 0;
+- if (sstart == send) {
+- lastSequence = 1;
+- } else {
+- const seqDef* const sequence = sp + seqCount;
+- lastSequence = sequence == send - 1;
+- litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
+- seqCount++;
+- }
+- if (lastSequence) {
+- assert(lp <= lend);
+- assert(litSize <= (size_t)(lend - lp));
+- litSize = (size_t)(lend - lp);
++
++ DEBUGLOG(5, "ZSTD_compressSubBlock_multi (srcSize=%u, litSize=%u, nbSeq=%u)",
++ (unsigned)srcSize, (unsigned)(lend-lstart), (unsigned)(send-sstart));
++
++ /* let's start by a general estimation for the full block */
++ if (nbSeqs > 0) {
++ EstimatedBlockSize const ebs =
++ ZSTD_estimateSubBlockSize(lp, nbLiterals,
++ ofCodePtr, llCodePtr, mlCodePtr, nbSeqs,
++ &nextCBlock->entropy, entropyMetadata,
++ workspace, wkspSize,
++ writeLitEntropy, writeSeqEntropy);
++ /* quick estimation */
++ size_t const avgLitCost = nbLiterals ? (ebs.estLitSize * BYTESCALE) / nbLiterals : BYTESCALE;
++ size_t const avgSeqCost = ((ebs.estBlockSize - ebs.estLitSize) * BYTESCALE) / nbSeqs;
++ const size_t nbSubBlocks = MAX((ebs.estBlockSize + (targetCBlockSize/2)) / targetCBlockSize, 1);
++ size_t n, avgBlockBudget, blockBudgetSupp=0;
++ avgBlockBudget = (ebs.estBlockSize * BYTESCALE) / nbSubBlocks;
++ DEBUGLOG(5, "estimated fullblock size=%u bytes ; avgLitCost=%.2f ; avgSeqCost=%.2f ; targetCBlockSize=%u, nbSubBlocks=%u ; avgBlockBudget=%.0f bytes",
++ (unsigned)ebs.estBlockSize, (double)avgLitCost/BYTESCALE, (double)avgSeqCost/BYTESCALE,
++ (unsigned)targetCBlockSize, (unsigned)nbSubBlocks, (double)avgBlockBudget/BYTESCALE);
++ /* simplification: if estimates states that the full superblock doesn't compress, just bail out immediately
++ * this will result in the production of a single uncompressed block covering @srcSize.*/
++ if (ebs.estBlockSize > srcSize) return 0;
++
++ /* compress and write sub-blocks */
++ assert(nbSubBlocks>0);
++ for (n=0; n < nbSubBlocks-1; n++) {
++ /* determine nb of sequences for current sub-block + nbLiterals from next sequence */
++ size_t const seqCount = sizeBlockSequences(sp, (size_t)(send-sp),
++ avgBlockBudget + blockBudgetSupp, avgLitCost, avgSeqCost, n==0);
++ /* if reached last sequence : break to last sub-block (simplification) */
++ assert(seqCount <= (size_t)(send-sp));
++ if (sp + seqCount == send) break;
++ assert(seqCount > 0);
++ /* compress sub-block */
++ { int litEntropyWritten = 0;
++ int seqEntropyWritten = 0;
++ size_t litSize = countLiterals(seqStorePtr, sp, seqCount);
++ const size_t decompressedSize =
++ ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 0);
++ size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
++ sp, seqCount,
++ lp, litSize,
++ llCodePtr, mlCodePtr, ofCodePtr,
++ cctxParams,
++ op, (size_t)(oend-op),
++ bmi2, writeLitEntropy, writeSeqEntropy,
++ &litEntropyWritten, &seqEntropyWritten,
++ 0);
++ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
++
++ /* check compressibility, update state components */
++ if (cSize > 0 && cSize < decompressedSize) {
++ DEBUGLOG(5, "Committed sub-block compressing %u bytes => %u bytes",
++ (unsigned)decompressedSize, (unsigned)cSize);
++ assert(ip + decompressedSize <= iend);
++ ip += decompressedSize;
++ lp += litSize;
++ op += cSize;
++ llCodePtr += seqCount;
++ mlCodePtr += seqCount;
++ ofCodePtr += seqCount;
++ /* Entropy only needs to be written once */
++ if (litEntropyWritten) {
++ writeLitEntropy = 0;
++ }
++ if (seqEntropyWritten) {
++ writeSeqEntropy = 0;
++ }
++ sp += seqCount;
++ blockBudgetSupp = 0;
++ } }
++ /* otherwise : do not compress yet, coalesce current sub-block with following one */
+ }
+- /* I think there is an optimization opportunity here.
+- * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
+- * since it recalculates estimate from scratch.
+- * For example, it would recount literal distribution and symbol codes every time.
+- */
+- cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
+- &nextCBlock->entropy, entropyMetadata,
+- workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
+- if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
+- int litEntropyWritten = 0;
+- int seqEntropyWritten = 0;
+- const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
+- const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
+- sp, seqCount,
+- lp, litSize,
+- llCodePtr, mlCodePtr, ofCodePtr,
+- cctxParams,
+- op, oend-op,
+- bmi2, writeLitEntropy, writeSeqEntropy,
+- &litEntropyWritten, &seqEntropyWritten,
+- lastBlock && lastSequence);
+- FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
+- if (cSize > 0 && cSize < decompressedSize) {
+- DEBUGLOG(5, "Committed the sub-block");
+- assert(ip + decompressedSize <= iend);
+- ip += decompressedSize;
+- sp += seqCount;
+- lp += litSize;
+- op += cSize;
+- llCodePtr += seqCount;
+- mlCodePtr += seqCount;
+- ofCodePtr += seqCount;
+- litSize = 0;
+- seqCount = 0;
+- /* Entropy only needs to be written once */
+- if (litEntropyWritten) {
+- writeLitEntropy = 0;
+- }
+- if (seqEntropyWritten) {
+- writeSeqEntropy = 0;
+- }
++ } /* if (nbSeqs > 0) */
++
++ /* write last block */
++ DEBUGLOG(5, "Generate last sub-block: %u sequences remaining", (unsigned)(send - sp));
++ { int litEntropyWritten = 0;
++ int seqEntropyWritten = 0;
++ size_t litSize = (size_t)(lend - lp);
++ size_t seqCount = (size_t)(send - sp);
++ const size_t decompressedSize =
++ ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, 1);
++ size_t const cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
++ sp, seqCount,
++ lp, litSize,
++ llCodePtr, mlCodePtr, ofCodePtr,
++ cctxParams,
++ op, (size_t)(oend-op),
++ bmi2, writeLitEntropy, writeSeqEntropy,
++ &litEntropyWritten, &seqEntropyWritten,
++ lastBlock);
++ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
++
++ /* update pointers, the nb of literals borrowed from next sequence must be preserved */
++ if (cSize > 0 && cSize < decompressedSize) {
++ DEBUGLOG(5, "Last sub-block compressed %u bytes => %u bytes",
++ (unsigned)decompressedSize, (unsigned)cSize);
++ assert(ip + decompressedSize <= iend);
++ ip += decompressedSize;
++ lp += litSize;
++ op += cSize;
++ llCodePtr += seqCount;
++ mlCodePtr += seqCount;
++ ofCodePtr += seqCount;
++ /* Entropy only needs to be written once */
++ if (litEntropyWritten) {
++ writeLitEntropy = 0;
+ }
++ if (seqEntropyWritten) {
++ writeSeqEntropy = 0;
++ }
++ sp += seqCount;
+ }
+- } while (!lastSequence);
++ }
++
++
+ if (writeLitEntropy) {
+- DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
++ DEBUGLOG(5, "Literal entropy tables were never written");
+ ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
+ }
+ if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
+ /* If we haven't written our entropy tables, then we've violated our contract and
+ * must emit an uncompressed block.
+ */
+- DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
++ DEBUGLOG(5, "Sequence entropy tables were never written => cancel, emit an uncompressed block");
+ return 0;
+ }
++
+ if (ip < iend) {
+- size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
+- DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
++ /* some data left : last part of the block sent uncompressed */
++ size_t const rSize = (size_t)((iend - ip));
++ size_t const cSize = ZSTD_noCompressBlock(op, (size_t)(oend - op), ip, rSize, lastBlock);
++ DEBUGLOG(5, "Generate last uncompressed sub-block of %u bytes", (unsigned)(rSize));
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ assert(cSize != 0);
+ op += cSize;
+ /* We have to regenerate the repcodes because we've skipped some sequences */
+ if (sp < send) {
+- seqDef const* seq;
++ const seqDef* seq;
repcodes_t rep;
ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
for (seq = sstart; seq < sp; ++seq) {
@@ -8198,6 +10356,25 @@ index 17d836cc8..dbacbaf72 100644
}
ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
}
+ }
+- DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
+- return op-ostart;
++
++ DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed all subBlocks: total compressed size = %u",
++ (unsigned)(op-ostart));
++ return (size_t)(op-ostart);
+ }
+
+ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+- void const* src, size_t srcSize,
+- unsigned lastBlock) {
++ const void* src, size_t srcSize,
++ unsigned lastBlock)
++{
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
+
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
index 224ece795..826bbc9e0 100644
--- a/lib/zstd/compress/zstd_compress_superblock.h
@@ -8211,7 +10388,7 @@ index 224ece795..826bbc9e0 100644
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
-index 349fc923c..65ea53b62 100644
+index 349fc923c..86bc3c2c2 100644
--- a/lib/zstd/compress/zstd_cwksp.h
+++ b/lib/zstd/compress/zstd_cwksp.h
@@ -1,5 +1,6 @@
@@ -8440,7 +10617,13 @@ index 349fc923c..65ea53b62 100644
/*
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
*/
-@@ -361,13 +401,17 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
+@@ -356,18 +396,22 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
+
+ /*
+ * Aligned on 64 bytes. These buffers have the special property that
+- * their values remain constrained, allowing us to re-use them without
++ * their values remain constrained, allowing us to reuse them without
+ * memset()-ing them.
*/
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
{
@@ -8470,7 +10653,7 @@ index 349fc923c..65ea53b62 100644
}
ZSTD_cwksp_mark_tables_clean(ws);
}
-@@ -478,10 +522,10 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
+@@ -478,14 +522,23 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
ws->tableEnd = ws->objectEnd;
@@ -8484,7 +10667,20 @@ index 349fc923c..65ea53b62 100644
}
ZSTD_cwksp_assert_internal_consistency(ws);
}
-@@ -498,6 +542,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_c
+
++MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
++ return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
++}
++
++MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
++ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
++ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
++}
++
+ /*
+ * The provided workspace takes ownership of the buffer [start, start+size).
+ * Any existing values in the workspace are ignored (the previously managed
+@@ -498,6 +551,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_c
ws->workspaceEnd = (BYTE*)start + size;
ws->objectEnd = ws->workspace;
ws->tableValidEnd = ws->objectEnd;
@@ -8492,6 +10688,22 @@ index 349fc923c..65ea53b62 100644
ws->phase = ZSTD_cwksp_alloc_objects;
ws->isStatic = isStatic;
ZSTD_cwksp_clear(ws);
+@@ -529,15 +583,6 @@ MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
+ }
+
+-MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
+- return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
+-}
+-
+-MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
+- return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
+- + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
+-}
+-
+ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
+ return ws->allocFailed;
+ }
@@ -550,17 +595,11 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
* actual amount of space used.
@@ -8516,7 +10728,7 @@ index 349fc923c..65ea53b62 100644
diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
-index 76933dea2..ab9440a99 100644
+index 76933dea2..5ff54f17d 100644
--- a/lib/zstd/compress/zstd_double_fast.c
+++ b/lib/zstd/compress/zstd_double_fast.c
@@ -1,5 +1,6 @@
@@ -8527,11 +10739,16 @@ index 76933dea2..ab9440a99 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -11,8 +12,43 @@
+@@ -11,8 +12,49 @@
#include "zstd_compress_internal.h"
#include "zstd_double_fast.h"
-+static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
++#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+
+-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -8544,8 +10761,7 @@ index 76933dea2..ab9440a99 100644
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
-
--void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
++
+ /* Always insert every fastHashFillStep position into the hash tables.
+ * Insert the other positions into the large hash table if their entry
+ * is empty.
@@ -8568,11 +10784,13 @@ index 76933dea2..ab9440a99 100644
+ } }
+}
+
-+static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
-@@ -43,7 +79,19 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+@@ -43,11 +85,24 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
/* Only load extra positions for ZSTD_dtlm_full */
if (dtlm == ZSTD_dtlm_fast)
break;
@@ -8593,7 +10811,12 @@ index 76933dea2..ab9440a99 100644
}
-@@ -67,7 +115,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+ FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls /* template */)
+@@ -67,7 +122,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
@@ -8602,7 +10825,7 @@ index 76933dea2..ab9440a99 100644
size_t mLength;
U32 offset;
-@@ -100,8 +148,8 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+@@ -100,8 +155,8 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
U32 const current = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
U32 const maxRep = current - windowLow;
@@ -8613,7 +10836,7 @@ index 76933dea2..ab9440a99 100644
}
/* Outer Loop: one iteration per match found and stored */
-@@ -131,7 +179,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+@@ -131,7 +186,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
ip++;
@@ -8622,7 +10845,7 @@ index 76933dea2..ab9440a99 100644
goto _match_stored;
}
-@@ -175,9 +223,13 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+@@ -175,9 +230,13 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
} while (ip1 <= ilimit);
_cleanup:
@@ -8638,7 +10861,7 @@ index 76933dea2..ab9440a99 100644
/* Return the last literals size */
return (size_t)(iend - anchor);
-@@ -217,7 +269,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+@@ -217,7 +276,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
hashLong[hl1] = (U32)(ip1 - base);
}
@@ -8647,7 +10870,7 @@ index 76933dea2..ab9440a99 100644
_match_stored:
/* match found */
-@@ -243,7 +295,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+@@ -243,7 +302,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
@@ -8656,7 +10879,15 @@ index 76933dea2..ab9440a99 100644
ip += rLength;
anchor = ip;
continue; /* faster when present ... (?) */
-@@ -275,7 +327,6 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -254,6 +313,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+
+
+ FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+@@ -275,7 +335,6 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
@@ -8664,7 +10895,7 @@ index 76933dea2..ab9440a99 100644
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
-@@ -286,8 +337,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -286,8 +345,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
@@ -8675,21 +10906,21 @@ index 76933dea2..ab9440a99 100644
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
-@@ -295,6 +346,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -295,6 +354,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
/* if a dictionary is attached, it must be within window range */
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
+ if (ms->prefetchCDictTables) {
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
+ size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32);
-+ PREFETCH_AREA(dictHashLong, hashTableBytes)
-+ PREFETCH_AREA(dictHashSmall, chainTableBytes)
++ PREFETCH_AREA(dictHashLong, hashTableBytes);
++ PREFETCH_AREA(dictHashSmall, chainTableBytes);
+ }
+
/* init */
ip += (dictAndPrefixLength == 0);
-@@ -309,8 +367,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -309,8 +375,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
U32 offset;
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
@@ -8704,7 +10935,7 @@ index 76933dea2..ab9440a99 100644
U32 const curr = (U32)(ip-base);
U32 const matchIndexL = hashLong[h2];
U32 matchIndexS = hashSmall[h];
-@@ -328,7 +390,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -328,7 +398,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
ip++;
@@ -8713,7 +10944,7 @@ index 76933dea2..ab9440a99 100644
goto _match_stored;
}
-@@ -340,9 +402,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -340,9 +410,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
goto _match_found;
}
@@ -8725,7 +10956,7 @@ index 76933dea2..ab9440a99 100644
const BYTE* dictMatchL = dictBase + dictMatchIndexL;
assert(dictMatchL < dictEnd);
-@@ -358,9 +420,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -358,9 +428,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
if (MEM_read32(match) == MEM_read32(ip)) {
goto _search_next_long;
}
@@ -8737,7 +10968,7 @@ index 76933dea2..ab9440a99 100644
match = dictBase + dictMatchIndexS;
matchIndexS = dictMatchIndexS + dictIndexDelta;
-@@ -375,10 +437,11 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -375,10 +445,11 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
continue;
_search_next_long:
@@ -8751,7 +10982,7 @@ index 76933dea2..ab9440a99 100644
const BYTE* matchL3 = base + matchIndexL3;
hashLong[hl3] = curr + 1;
-@@ -391,9 +454,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -391,9 +462,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
goto _match_found;
}
@@ -8763,7 +10994,7 @@ index 76933dea2..ab9440a99 100644
const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
assert(dictMatchL3 < dictEnd);
if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
-@@ -419,7 +482,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -419,7 +490,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
offset_2 = offset_1;
offset_1 = offset;
@@ -8772,7 +11003,7 @@ index 76933dea2..ab9440a99 100644
_match_stored:
/* match found */
-@@ -448,7 +511,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -448,7 +519,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
@@ -8781,7 +11012,7 @@ index 76933dea2..ab9440a99 100644
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
-@@ -461,8 +524,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+@@ -461,8 +532,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
} /* while (ip < ilimit) */
/* save reps for next block */
@@ -8792,7 +11023,18 @@ index 76933dea2..ab9440a99 100644
/* Return the last literals size */
return (size_t)(iend - anchor);
-@@ -585,7 +648,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+@@ -527,7 +598,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ }
+
+
+-static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */)
+@@ -585,7 +658,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
@@ -8801,7 +11043,7 @@ index 76933dea2..ab9440a99 100644
} else {
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
-@@ -596,7 +659,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+@@ -596,7 +669,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
@@ -8810,7 +11052,7 @@ index 76933dea2..ab9440a99 100644
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
-@@ -621,7 +684,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+@@ -621,7 +694,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
}
offset_2 = offset_1;
offset_1 = offset;
@@ -8819,7 +11061,7 @@ index 76933dea2..ab9440a99 100644
} else {
ip += ((ip-anchor) >> kSearchStrength) + 1;
-@@ -653,7 +716,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+@@ -653,7 +726,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
@@ -8828,8 +11070,14 @@ index 76933dea2..ab9440a99 100644
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
+@@ -694,3 +767,5 @@ size_t ZSTD_compressBlock_doubleFast_extDict(
+ return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
+ }
+ }
++
++#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */
diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
-index 6822bde65..0204f12e4 100644
+index 6822bde65..b7ddc714f 100644
--- a/lib/zstd/compress/zstd_double_fast.h
+++ b/lib/zstd/compress/zstd_double_fast.h
@@ -1,5 +1,6 @@
@@ -8840,18 +11088,37 @@ index 6822bde65..0204f12e4 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -16,7 +17,8 @@
+@@ -15,8 +16,12 @@
+ #include "../common/mem.h" /* U32 */
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
++#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
++
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
- void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm,
+ ZSTD_tableFillPurpose_e tfp);
++
size_t ZSTD_compressBlock_doubleFast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
+@@ -27,6 +32,14 @@ size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
++#define ZSTD_COMPRESSBLOCK_DOUBLEFAST ZSTD_compressBlock_doubleFast
++#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE ZSTD_compressBlock_doubleFast_dictMatchState
++#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT ZSTD_compressBlock_doubleFast_extDict
++#else
++#define ZSTD_COMPRESSBLOCK_DOUBLEFAST NULL
++#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE NULL
++#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT NULL
++#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */
+
+
+ #endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
-index a752e6bea..3399b39c5 100644
+index a752e6bea..b7a63ba4c 100644
--- a/lib/zstd/compress/zstd_fast.c
+++ b/lib/zstd/compress/zstd_fast.c
@@ -1,5 +1,6 @@
@@ -8862,11 +11129,13 @@ index a752e6bea..3399b39c5 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -11,8 +12,42 @@
+@@ -11,8 +12,46 @@
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
#include "zstd_fast.h"
-+static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
@@ -8902,11 +11171,13 @@ index a752e6bea..3399b39c5 100644
+ } } } }
+}
+
-+static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
{
-@@ -25,6 +60,10 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+@@ -25,6 +64,10 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
const U32 fastHashFillStep = 3;
@@ -8917,7 +11188,7 @@ index a752e6bea..3399b39c5 100644
/* Always insert every fastHashFillStep position into the hash table.
* Insert the other positions if their hash entry is empty.
*/
-@@ -42,6 +81,18 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+@@ -42,6 +85,18 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
} } } }
}
@@ -8936,7 +11207,19 @@ index a752e6bea..3399b39c5 100644
/*
* If you squint hard enough (and ignore repcodes), the search operation at any
-@@ -117,7 +168,7 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -89,8 +144,9 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ *
+ * This is also the work we do at the beginning to enter the loop initially.
+ */
+-FORCE_INLINE_TEMPLATE size_t
+-ZSTD_compressBlock_fast_noDict_generic(
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_compressBlock_fast_noDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls, U32 const hasStep)
+@@ -117,7 +173,7 @@ ZSTD_compressBlock_fast_noDict_generic(
U32 rep_offset1 = rep[0];
U32 rep_offset2 = rep[1];
@@ -8945,7 +11228,7 @@ index a752e6bea..3399b39c5 100644
size_t hash0; /* hash for ip0 */
size_t hash1; /* hash for ip1 */
-@@ -141,8 +192,8 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -141,8 +197,8 @@ ZSTD_compressBlock_fast_noDict_generic(
{ U32 const curr = (U32)(ip0 - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
U32 const maxRep = curr - windowLow;
@@ -8956,7 +11239,7 @@ index a752e6bea..3399b39c5 100644
}
/* start each op */
-@@ -180,8 +231,14 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -180,8 +236,14 @@ ZSTD_compressBlock_fast_noDict_generic(
mLength = ip0[-1] == match0[-1];
ip0 -= mLength;
match0 -= mLength;
@@ -8972,7 +11255,7 @@ index a752e6bea..3399b39c5 100644
goto _match;
}
-@@ -195,6 +252,12 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -195,6 +257,12 @@ ZSTD_compressBlock_fast_noDict_generic(
/* check match at ip[0] */
if (MEM_read32(ip0) == mval) {
/* found a match! */
@@ -8985,7 +11268,7 @@ index a752e6bea..3399b39c5 100644
goto _offset;
}
-@@ -224,6 +287,21 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -224,6 +292,21 @@ ZSTD_compressBlock_fast_noDict_generic(
/* check match at ip[0] */
if (MEM_read32(ip0) == mval) {
/* found a match! */
@@ -9007,7 +11290,7 @@ index a752e6bea..3399b39c5 100644
goto _offset;
}
-@@ -254,9 +332,24 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -254,9 +337,24 @@ ZSTD_compressBlock_fast_noDict_generic(
* However, it seems to be a meaningful performance hit to try to search
* them. So let's not. */
@@ -9034,7 +11317,7 @@ index a752e6bea..3399b39c5 100644
/* Return the last literals size */
return (size_t)(iend - anchor);
-@@ -267,7 +360,7 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -267,7 +365,7 @@ ZSTD_compressBlock_fast_noDict_generic(
match0 = base + idx;
rep_offset2 = rep_offset1;
rep_offset1 = (U32)(ip0-match0);
@@ -9043,7 +11326,7 @@ index a752e6bea..3399b39c5 100644
mLength = 4;
/* Count the backwards match length. */
-@@ -287,11 +380,6 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -287,11 +385,6 @@ ZSTD_compressBlock_fast_noDict_generic(
ip0 += mLength;
anchor = ip0;
@@ -9055,7 +11338,7 @@ index a752e6bea..3399b39c5 100644
/* Fill table and check for immediate repcode. */
if (ip0 <= ilimit) {
/* Fill Table */
-@@ -306,7 +394,7 @@ ZSTD_compressBlock_fast_noDict_generic(
+@@ -306,7 +399,7 @@ ZSTD_compressBlock_fast_noDict_generic(
{ U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += rLength;
@@ -9064,7 +11347,15 @@ index a752e6bea..3399b39c5 100644
anchor = ip0;
continue; /* faster when present (confirmed on gcc-8) ... (?) */
} } }
-@@ -380,14 +468,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+@@ -369,6 +462,7 @@ size_t ZSTD_compressBlock_fast(
+ }
+
+ FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
+@@ -380,14 +474,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
@@ -9081,7 +11372,7 @@ index a752e6bea..3399b39c5 100644
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
-@@ -397,13 +485,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+@@ -397,13 +491,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
@@ -9098,13 +11389,13 @@ index a752e6bea..3399b39c5 100644
assert(endIndex - prefixStartIndex <= maxDistance);
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
-@@ -413,106 +501,155 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+@@ -413,106 +507,155 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
* when translating a dict index into a local index */
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
+ if (ms->prefetchCDictTables) {
+ size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
-+ PREFETCH_AREA(dictHashTable, hashTableBytes)
++ PREFETCH_AREA(dictHashTable, hashTableBytes);
+ }
+
/* init */
@@ -9319,7 +11610,18 @@ index a752e6bea..3399b39c5 100644
/* Return the last literals size */
return (size_t)(iend - anchor);
-@@ -553,11 +690,10 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
+@@ -545,7 +688,9 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
+ }
+
+
+-static size_t ZSTD_compressBlock_fast_extDict_generic(
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_compressBlock_fast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
+ {
+@@ -553,11 +698,10 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
@@ -9332,7 +11634,7 @@ index a752e6bea..3399b39c5 100644
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
-@@ -570,6 +706,28 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
+@@ -570,6 +714,28 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
U32 offset_1=rep[0], offset_2=rep[1];
@@ -9361,7 +11663,7 @@ index a752e6bea..3399b39c5 100644
(void)hasStep; /* not currently specialized on whether it's accelerated */
-@@ -579,75 +737,202 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
+@@ -579,75 +745,202 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
if (prefixStartIndex == dictStartIndex)
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
@@ -9625,7 +11927,7 @@ index a752e6bea..3399b39c5 100644
}
ZSTD_GEN_FAST_FN(extDict, 4, 0)
-@@ -660,6 +945,7 @@ size_t ZSTD_compressBlock_fast_extDict(
+@@ -660,6 +953,7 @@ size_t ZSTD_compressBlock_fast_extDict(
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
@@ -9656,7 +11958,7 @@ index fddc2f532..e64d9e1b2 100644
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
-index 0298a01a7..f6b4978ce 100644
+index 0298a01a7..3e88d8a1a 100644
--- a/lib/zstd/compress/zstd_lazy.c
+++ b/lib/zstd/compress/zstd_lazy.c
@@ -1,5 +1,6 @@
@@ -9667,17 +11969,57 @@ index 0298a01a7..f6b4978ce 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -10,6 +11,9 @@
+@@ -10,14 +11,23 @@
#include "zstd_compress_internal.h"
#include "zstd_lazy.h"
+#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */
+
++#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
++
+#define kLazySkippingStep 8
/*-*************************************
-@@ -197,8 +201,8 @@ ZSTD_DUBT_findBetterDictMatch (
+ * Binary Tree search
+ ***************************************/
+
+-static void
+-ZSTD_updateDUBT(ZSTD_matchState_t* ms,
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iend,
+ U32 mls)
+ {
+@@ -60,8 +70,9 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+ * sort one already inserted but unsorted position
+ * assumption : curr >= btlow == (curr - btmask)
+ * doesn't fail */
+-static void
+-ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
+ U32 curr, const BYTE* inputEnd,
+ U32 nbCompares, U32 btLow,
+ const ZSTD_dictMode_e dictMode)
+@@ -149,8 +160,9 @@ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
+ }
+
+
+-static size_t
+-ZSTD_DUBT_findBetterDictMatch (
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_DUBT_findBetterDictMatch (
+ const ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+@@ -197,8 +209,8 @@ ZSTD_DUBT_findBetterDictMatch (
U32 matchIndex = dictMatchIndex + dictIndexDelta;
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
@@ -9688,7 +12030,7 @@ index 0298a01a7..f6b4978ce 100644
}
if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
break; /* drop, to guarantee consistency (miss a little bit of compression) */
-@@ -218,7 +222,7 @@ ZSTD_DUBT_findBetterDictMatch (
+@@ -218,7 +230,7 @@ ZSTD_DUBT_findBetterDictMatch (
}
if (bestLength >= MINMATCH) {
@@ -9697,16 +12039,22 @@ index 0298a01a7..f6b4978ce 100644
DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
}
-@@ -230,7 +234,7 @@ ZSTD_DUBT_findBetterDictMatch (
- static size_t
- ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+@@ -227,10 +239,11 @@ ZSTD_DUBT_findBetterDictMatch (
+ }
+
+
+-static size_t
+-ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
- size_t* offsetPtr,
+ size_t* offBasePtr,
U32 const mls,
const ZSTD_dictMode_e dictMode)
{
-@@ -327,8 +331,8 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+@@ -327,8 +340,8 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
if (matchLength > bestLength) {
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
@@ -9717,7 +12065,7 @@ index 0298a01a7..f6b4978ce 100644
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
if (dictMode == ZSTD_dictMatchState) {
nbCompares = 0; /* in addition to avoiding checking any
-@@ -361,16 +365,16 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+@@ -361,16 +374,16 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
if (dictMode == ZSTD_dictMatchState && nbCompares) {
bestLength = ZSTD_DUBT_findBetterDictMatch(
ms, ip, iend,
@@ -9737,9 +12085,15 @@ index 0298a01a7..f6b4978ce 100644
}
return bestLength;
}
-@@ -381,14 +385,14 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
- FORCE_INLINE_TEMPLATE size_t
- ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+@@ -378,17 +391,18 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+
+
+ /* ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+-FORCE_INLINE_TEMPLATE size_t
+-ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
+ size_t* offBasePtr,
@@ -9754,7 +12108,7 @@ index 0298a01a7..f6b4978ce 100644
}
/* *********************************
-@@ -561,7 +565,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
+@@ -561,7 +575,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
@@ -9763,7 +12117,7 @@ index 0298a01a7..f6b4978ce 100644
if (ip+currentMl == iLimit) {
/* best possible, avoids read overflow on next attempt */
return ml;
-@@ -598,7 +602,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
+@@ -598,7 +612,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
@@ -9772,8 +12126,14 @@ index 0298a01a7..f6b4978ce 100644
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
-@@ -617,7 +621,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
- FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
+@@ -614,10 +628,12 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
+
+ /* Update chains up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+-FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++U32 ZSTD_insertAndFindFirstIndex_internal(
ZSTD_matchState_t* ms,
const ZSTD_compressionParameters* const cParams,
- const BYTE* ip, U32 const mls)
@@ -9781,7 +12141,7 @@ index 0298a01a7..f6b4978ce 100644
{
U32* const hashTable = ms->hashTable;
const U32 hashLog = cParams->hashLog;
-@@ -632,6 +636,9 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
+@@ -632,6 +648,9 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
hashTable[h] = idx;
idx++;
@@ -9791,7 +12151,7 @@ index 0298a01a7..f6b4978ce 100644
}
ms->nextToUpdate = target;
-@@ -640,7 +647,7 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
+@@ -640,11 +659,12 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -9800,7 +12160,12 @@ index 0298a01a7..f6b4978ce 100644
}
/* inlining is important to hardwire a hot branch (template emulation) */
-@@ -684,14 +691,15 @@ size_t ZSTD_HcFindBestMatch(
+ FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_HcFindBestMatch(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+@@ -684,14 +704,15 @@ size_t ZSTD_HcFindBestMatch(
}
/* HC4 match finder */
@@ -9818,7 +12183,7 @@ index 0298a01a7..f6b4978ce 100644
currentMl = ZSTD_count(ip, match, iLimit);
} else {
const BYTE* const match = dictBase + matchIndex;
-@@ -703,7 +711,7 @@ size_t ZSTD_HcFindBestMatch(
+@@ -703,7 +724,7 @@ size_t ZSTD_HcFindBestMatch(
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
@@ -9827,7 +12192,7 @@ index 0298a01a7..f6b4978ce 100644
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
-@@ -739,7 +747,7 @@ size_t ZSTD_HcFindBestMatch(
+@@ -739,7 +760,7 @@ size_t ZSTD_HcFindBestMatch(
if (currentMl > ml) {
ml = currentMl;
assert(curr > matchIndex + dmsIndexDelta);
@@ -9836,7 +12201,7 @@ index 0298a01a7..f6b4978ce 100644
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
-@@ -756,8 +764,6 @@ size_t ZSTD_HcFindBestMatch(
+@@ -756,8 +777,6 @@ size_t ZSTD_HcFindBestMatch(
* (SIMD) Row-based matchfinder
***********************************/
/* Constants for row-based hash */
@@ -9845,7 +12210,7 @@ index 0298a01a7..f6b4978ce 100644
#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
-@@ -769,64 +775,19 @@ typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 repr
+@@ -769,64 +788,19 @@ typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 repr
* Starting from the LSB, returns the idx of the next non-zero bit.
* Basically counting the nb of trailing zeroes.
*/
@@ -9917,7 +12282,7 @@ index 0298a01a7..f6b4978ce 100644
}
/* ZSTD_isAligned():
-@@ -840,7 +801,7 @@ MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
+@@ -840,7 +814,7 @@ MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
/* ZSTD_row_prefetch():
* Performs prefetching for the hashTable and tagTable at a given row.
*/
@@ -9926,7 +12291,15 @@ index 0298a01a7..f6b4978ce 100644
PREFETCH_L1(hashTable + relRow);
if (rowLog >= 5) {
PREFETCH_L1(hashTable + relRow + 16);
-@@ -864,13 +825,13 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B
+@@ -859,18 +833,20 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* ta
+ * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
+ * but not beyond iLimit.
+ */
+-FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
+ U32 const rowLog, U32 const mls,
U32 idx, const BYTE* const iLimit)
{
U32 const* const hashTable = ms->hashTable;
@@ -9942,11 +12315,15 @@ index 0298a01a7..f6b4978ce 100644
U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
-@@ -886,11 +847,12 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B
+@@ -885,12 +861,15 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B
+ * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
* base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
*/
- FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
+-FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
- U16 const* tagTable, BYTE const* base,
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
+ BYTE const* tagTable, BYTE const* base,
U32 idx, U32 const hashLog,
- U32 const rowLog, U32 const mls)
@@ -9958,8 +12335,20 @@ index 0298a01a7..f6b4978ce 100644
U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
{ U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
-@@ -908,22 +870,21 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
- U32 const rowMask, U32 const useCache)
+@@ -902,28 +881,29 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTab
+ /* ZSTD_row_update_internalImpl():
+ * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
+ */
+-FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
+- U32 updateStartIdx, U32 const updateEndIdx,
+- U32 const mls, U32 const rowLog,
+- U32 const rowMask, U32 const useCache)
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
++ U32 updateStartIdx, U32 const updateEndIdx,
++ U32 const mls, U32 const rowLog,
++ U32 const rowMask, U32 const useCache)
{
U32* const hashTable = ms->hashTable;
- U16* const tagTable = ms->tagTable;
@@ -9987,7 +12376,22 @@ index 0298a01a7..f6b4978ce 100644
row[pos] = updateStartIdx;
}
}
-@@ -971,7 +932,35 @@ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
+@@ -932,9 +912,11 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
+ * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
+ * Skips sections of long matches as is necessary.
+ */
+-FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
+- U32 const mls, U32 const rowLog,
+- U32 const rowMask, U32 const useCache)
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
++ U32 const mls, U32 const rowLog,
++ U32 const rowMask, U32 const useCache)
+ {
+ U32 idx = ms->nextToUpdate;
+ const BYTE* const base = ms->window.base;
+@@ -971,7 +953,35 @@ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
@@ -10024,7 +12428,7 @@ index 0298a01a7..f6b4978ce 100644
}
#if defined(ZSTD_ARCH_X86_SSE2)
-@@ -994,71 +983,82 @@ ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U
+@@ -994,71 +1004,82 @@ ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U
}
#endif
@@ -10156,7 +12560,7 @@ index 0298a01a7..f6b4978ce 100644
const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
const size_t xFF = ~((size_t)0);
const size_t x01 = xFF / 0xFF;
-@@ -1091,11 +1091,11 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head,
+@@ -1091,11 +1112,11 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head,
}
matches = ~matches;
if (rowEntries == 16) {
@@ -10171,7 +12575,38 @@ index 0298a01a7..f6b4978ce 100644
}
}
#endif
-@@ -1125,7 +1125,7 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1103,20 +1124,21 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head,
+
+ /* The high-level approach of the SIMD row based match finder is as follows:
+ * - Figure out where to insert the new entry:
+- * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
+- * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
++ * - Generate a hash for current input posistion and split it into a one byte of tag and `rowHashLog` bits of index.
++ * - The hash is salted by a value that changes on every contex reset, so when the same table is used
++ * we will avoid collisions that would otherwise slow us down by intorducing phantom matches.
++ * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines
+ * which row to insert into.
+- * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
+- * be considered as a circular buffer with a "head" index that resides in the tagTable.
+- * - Also insert the "tag" into the equivalent row and position in the tagTable.
+- * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
+- * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
+- * for alignment/performance reasons, leaving some bytes unused.
+- * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
++ * - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can
++ * be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes
++ * per row).
++ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and
+ * generate a bitfield that we can cycle through to check the collisions in the hash table.
+ * - Pick the longest match.
++ * - Insert the tag into the equivalent row and position in the tagTable.
+ */
+ FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_RowFindBestMatch(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+@@ -1125,7 +1147,7 @@ size_t ZSTD_RowFindBestMatch(
const U32 rowLog)
{
U32* const hashTable = ms->hashTable;
@@ -10180,7 +12615,7 @@ index 0298a01a7..f6b4978ce 100644
U32* const hashCache = ms->hashCache;
const U32 hashLog = ms->rowHashLog;
const ZSTD_compressionParameters* const cParams = &ms->cParams;
-@@ -1143,8 +1143,11 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1143,8 +1165,11 @@ size_t ZSTD_RowFindBestMatch(
const U32 rowEntries = (1U << rowLog);
const U32 rowMask = rowEntries - 1;
const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
@@ -10192,7 +12627,7 @@ index 0298a01a7..f6b4978ce 100644
/* DMS/DDS variables that may be referenced laster */
const ZSTD_matchState_t* const dms = ms->dictMatchState;
-@@ -1168,7 +1171,7 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1168,7 +1193,7 @@ size_t ZSTD_RowFindBestMatch(
if (dictMode == ZSTD_dictMatchState) {
/* Prefetch DMS rows */
U32* const dmsHashTable = dms->hashTable;
@@ -10201,7 +12636,7 @@ index 0298a01a7..f6b4978ce 100644
U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
-@@ -1178,23 +1181,34 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1178,23 +1203,34 @@ size_t ZSTD_RowFindBestMatch(
}
/* Update the hashTable and tagTable up to (but not including) ip */
@@ -10242,7 +12677,7 @@ index 0298a01a7..f6b4978ce 100644
assert(numMatches < rowEntries);
if (matchIndex < lowLimit)
break;
-@@ -1204,13 +1218,14 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1204,13 +1240,14 @@ size_t ZSTD_RowFindBestMatch(
PREFETCH_L1(dictBase + matchIndex);
}
matchBuffer[numMatches++] = matchIndex;
@@ -10258,7 +12693,7 @@ index 0298a01a7..f6b4978ce 100644
row[pos] = ms->nextToUpdate++;
}
-@@ -1224,7 +1239,8 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1224,7 +1261,8 @@ size_t ZSTD_RowFindBestMatch(
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
const BYTE* const match = base + matchIndex;
assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
@@ -10268,7 +12703,7 @@ index 0298a01a7..f6b4978ce 100644
currentMl = ZSTD_count(ip, match, iLimit);
} else {
const BYTE* const match = dictBase + matchIndex;
-@@ -1236,7 +1252,7 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1236,7 +1274,7 @@ size_t ZSTD_RowFindBestMatch(
/* Save best solution */
if (currentMl > ml) {
ml = currentMl;
@@ -10277,7 +12712,7 @@ index 0298a01a7..f6b4978ce 100644
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
-@@ -1254,19 +1270,21 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1254,19 +1292,21 @@ size_t ZSTD_RowFindBestMatch(
const U32 dmsSize = (U32)(dmsEnd - dmsBase);
const U32 dmsIndexDelta = dictLimit - dmsSize;
@@ -10303,7 +12738,7 @@ index 0298a01a7..f6b4978ce 100644
}
/* Return the longest match */
-@@ -1285,7 +1303,7 @@ size_t ZSTD_RowFindBestMatch(
+@@ -1285,7 +1325,7 @@ size_t ZSTD_RowFindBestMatch(
if (currentMl > ml) {
ml = currentMl;
assert(curr > matchIndex + dmsIndexDelta);
@@ -10312,7 +12747,19 @@ index 0298a01a7..f6b4978ce 100644
if (ip+currentMl == iLimit) break;
}
}
-@@ -1491,7 +1509,8 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1472,8 +1512,9 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
+ * Common parser - lazy strategy
+ *********************************/
+
+-FORCE_INLINE_TEMPLATE size_t
+-ZSTD_compressBlock_lazy_generic(
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_compressBlock_lazy_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+@@ -1491,7 +1532,8 @@ ZSTD_compressBlock_lazy_generic(
const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
@@ -10322,7 +12769,7 @@ index 0298a01a7..f6b4978ce 100644
const int isDMS = dictMode == ZSTD_dictMatchState;
const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
-@@ -1512,8 +1531,8 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1512,8 +1554,8 @@ ZSTD_compressBlock_lazy_generic(
U32 const curr = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
U32 const maxRep = curr - windowLow;
@@ -10333,7 +12780,7 @@ index 0298a01a7..f6b4978ce 100644
}
if (isDxS) {
/* dictMatchState repCode checks don't currently handle repCode == 0
-@@ -1522,10 +1541,11 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1522,10 +1564,11 @@ ZSTD_compressBlock_lazy_generic(
assert(offset_2 <= dictAndPrefixLength);
}
@@ -10348,7 +12795,7 @@ index 0298a01a7..f6b4978ce 100644
}
/* Match Loop */
-@@ -1537,7 +1557,7 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1537,7 +1580,7 @@ ZSTD_compressBlock_lazy_generic(
#endif
while (ip < ilimit) {
size_t matchLength=0;
@@ -10357,7 +12804,7 @@ index 0298a01a7..f6b4978ce 100644
const BYTE* start=ip+1;
DEBUGLOG(7, "search baseline (depth 0)");
-@@ -1562,14 +1582,23 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1562,14 +1605,23 @@ ZSTD_compressBlock_lazy_generic(
}
/* first search (depth 0) */
@@ -10385,7 +12832,7 @@ index 0298a01a7..f6b4978ce 100644
continue;
}
-@@ -1579,12 +1608,12 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1579,12 +1631,12 @@ ZSTD_compressBlock_lazy_generic(
DEBUGLOG(7, "search depth 1");
ip ++;
if ( (dictMode == ZSTD_noDict)
@@ -10401,7 +12848,7 @@ index 0298a01a7..f6b4978ce 100644
}
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
-@@ -1596,17 +1625,17 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1596,17 +1648,17 @@ ZSTD_compressBlock_lazy_generic(
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
int const gain2 = (int)(mlRep * 3);
@@ -10426,7 +12873,7 @@ index 0298a01a7..f6b4978ce 100644
continue; /* search a better one */
} }
-@@ -1615,12 +1644,12 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1615,12 +1667,12 @@ ZSTD_compressBlock_lazy_generic(
DEBUGLOG(7, "search depth 2");
ip ++;
if ( (dictMode == ZSTD_noDict)
@@ -10442,7 +12889,7 @@ index 0298a01a7..f6b4978ce 100644
}
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
-@@ -1632,17 +1661,17 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1632,17 +1684,17 @@ ZSTD_compressBlock_lazy_generic(
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
int const gain2 = (int)(mlRep * 4);
@@ -10467,7 +12914,7 @@ index 0298a01a7..f6b4978ce 100644
continue;
} } }
break; /* nothing found : store previous solution */
-@@ -1653,26 +1682,33 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1653,26 +1705,33 @@ ZSTD_compressBlock_lazy_generic(
* notably if `value` is unsigned, resulting in a large positive `-value`.
*/
/* catch up */
@@ -10507,7 +12954,7 @@ index 0298a01a7..f6b4978ce 100644
/* check immediate repcode */
if (isDxS) {
-@@ -1686,8 +1722,8 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1686,8 +1745,8 @@ ZSTD_compressBlock_lazy_generic(
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
@@ -10518,7 +12965,7 @@ index 0298a01a7..f6b4978ce 100644
ip += matchLength;
anchor = ip;
continue;
-@@ -1701,16 +1737,20 @@ ZSTD_compressBlock_lazy_generic(
+@@ -1701,166 +1760,181 @@ ZSTD_compressBlock_lazy_generic(
&& (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
/* store sequence */
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
@@ -10544,7 +12991,209 @@ index 0298a01a7..f6b4978ce 100644
/* Return the last literals size */
return (size_t)(iend - anchor);
-@@ -1886,12 +1926,13 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+ }
++#endif /* build exclusions */
+
+
+-size_t ZSTD_compressBlock_btlazy2(
++#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
+ }
+
+-size_t ZSTD_compressBlock_lazy2(
++size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
+ }
+
+-size_t ZSTD_compressBlock_lazy(
++size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
+ }
+
+-size_t ZSTD_compressBlock_greedy(
++size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
+ }
+
+-size_t ZSTD_compressBlock_btlazy2_dictMatchState(
++size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
+ }
+
+-size_t ZSTD_compressBlock_lazy2_dictMatchState(
++size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
+ }
++#endif
+
+-size_t ZSTD_compressBlock_lazy_dictMatchState(
++#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
+ }
+
+-size_t ZSTD_compressBlock_greedy_dictMatchState(
++size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
+ }
+
+-
+-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
++size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
+ }
+
+-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
++size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
+ }
+
+-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
++size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
+ }
+
+-/* Row-based matchfinder */
+-size_t ZSTD_compressBlock_lazy2_row(
++size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
+ }
++#endif
+
+-size_t ZSTD_compressBlock_lazy_row(
++#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
+ }
+
+-size_t ZSTD_compressBlock_greedy_row(
++size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
+ }
+
+-size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
++size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
+ }
+
+-size_t ZSTD_compressBlock_lazy_dictMatchState_row(
++size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
+ }
+
+-size_t ZSTD_compressBlock_greedy_dictMatchState_row(
++size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
+ }
+
+-
+ size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
+ }
++#endif
+
+-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
++#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
+ }
+
+-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
++size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
++ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
+ }
++#endif
+
++#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
+ FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_compressBlock_lazy_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+@@ -1886,12 +1960,13 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
@@ -10561,7 +13210,7 @@ index 0298a01a7..f6b4978ce 100644
}
/* Match Loop */
-@@ -1903,7 +1944,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+@@ -1903,7 +1978,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
#endif
while (ip < ilimit) {
size_t matchLength=0;
@@ -10570,7 +13219,7 @@ index 0298a01a7..f6b4978ce 100644
const BYTE* start=ip+1;
U32 curr = (U32)(ip-base);
-@@ -1922,14 +1963,23 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+@@ -1922,14 +1997,23 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
} }
/* first search (depth 0) */
@@ -10598,7 +13247,7 @@ index 0298a01a7..f6b4978ce 100644
continue;
}
-@@ -1939,7 +1989,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+@@ -1939,7 +2023,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
ip ++;
curr++;
/* check repCode */
@@ -10607,7 +13256,7 @@ index 0298a01a7..f6b4978ce 100644
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
-@@ -1951,18 +2001,18 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+@@ -1951,18 +2035,18 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
int const gain2 = (int)(repLength * 3);
@@ -10633,7 +13282,7 @@ index 0298a01a7..f6b4978ce 100644
continue; /* search a better one */
} }
-@@ -1971,7 +2021,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+@@ -1971,7 +2055,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
ip ++;
curr++;
/* check repCode */
@@ -10642,7 +13291,7 @@ index 0298a01a7..f6b4978ce 100644
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
-@@ -1983,38 +2033,45 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+@@ -1983,38 +2067,45 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
int const gain2 = (int)(repLength * 4);
@@ -10699,7 +13348,7 @@ index 0298a01a7..f6b4978ce 100644
/* check immediate repcode */
while (ip <= ilimit) {
-@@ -2029,8 +2086,8 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+@@ -2029,8 +2120,8 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
@@ -10710,16 +13359,89 @@ index 0298a01a7..f6b4978ce 100644
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
-@@ -2096,7 +2153,6 @@ size_t ZSTD_compressBlock_lazy_extDict_row(
- size_t ZSTD_compressBlock_lazy2_extDict_row(
+@@ -2045,8 +2136,9 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+ }
++#endif /* build exclusions */
+
+-
++#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
+ size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+@@ -2054,49 +2146,55 @@ size_t ZSTD_compressBlock_greedy_extDict(
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
+ }
+
+-size_t ZSTD_compressBlock_lazy_extDict(
++size_t ZSTD_compressBlock_greedy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
+- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
++ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
}
++#endif
+
+-size_t ZSTD_compressBlock_lazy2_extDict(
++#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+ {
+- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
++ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
+ }
+
+-size_t ZSTD_compressBlock_btlazy2_extDict(
++size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+ {
+- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
++ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
+ }
++#endif
+
+-size_t ZSTD_compressBlock_greedy_extDict_row(
++#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
++
+ {
+- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
++ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
+ }
+
+-size_t ZSTD_compressBlock_lazy_extDict_row(
++size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+-
+ {
+- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
++ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
+ }
++#endif
+
+-size_t ZSTD_compressBlock_lazy2_extDict_row(
++#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+ {
+- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
++ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
+ }
++#endif
diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
-index e5bdf4df8..9505bed93 100644
+index e5bdf4df8..22c9201f4 100644
--- a/lib/zstd/compress/zstd_lazy.h
+++ b/lib/zstd/compress/zstd_lazy.h
@@ -1,5 +1,6 @@
@@ -10730,26 +13452,213 @@ index e5bdf4df8..9505bed93 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -22,6 +23,8 @@
+@@ -22,98 +23,175 @@
*/
#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
+#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
+
++#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
-@@ -113,7 +116,7 @@ size_t ZSTD_compressBlock_lazy2_extDict_row(
+ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
+
+ void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
++#endif
+
+-size_t ZSTD_compressBlock_btlazy2(
++#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy2(
++size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy(
++size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_greedy(
++size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy2_row(
++size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy_row(
++size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_greedy_row(
++size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-
+-size_t ZSTD_compressBlock_btlazy2_dictMatchState(
++size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy2_dictMatchState(
++
++#define ZSTD_COMPRESSBLOCK_GREEDY ZSTD_compressBlock_greedy
++#define ZSTD_COMPRESSBLOCK_GREEDY_ROW ZSTD_compressBlock_greedy_row
++#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE ZSTD_compressBlock_greedy_dictMatchState
++#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW ZSTD_compressBlock_greedy_dictMatchState_row
++#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH ZSTD_compressBlock_greedy_dedicatedDictSearch
++#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_greedy_dedicatedDictSearch_row
++#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT ZSTD_compressBlock_greedy_extDict
++#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW ZSTD_compressBlock_greedy_extDict_row
++#else
++#define ZSTD_COMPRESSBLOCK_GREEDY NULL
++#define ZSTD_COMPRESSBLOCK_GREEDY_ROW NULL
++#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE NULL
++#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW NULL
++#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH NULL
++#define ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW NULL
++#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT NULL
++#define ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW NULL
++#endif
++
++#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy_dictMatchState(
++size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_greedy_dictMatchState(
++size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
++size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy_dictMatchState_row(
++size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_greedy_dictMatchState_row(
++size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-
+-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
++size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
++size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
++
++#define ZSTD_COMPRESSBLOCK_LAZY ZSTD_compressBlock_lazy
++#define ZSTD_COMPRESSBLOCK_LAZY_ROW ZSTD_compressBlock_lazy_row
++#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE ZSTD_compressBlock_lazy_dictMatchState
++#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy_dictMatchState_row
++#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy_dedicatedDictSearch
++#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy_dedicatedDictSearch_row
++#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT ZSTD_compressBlock_lazy_extDict
++#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW ZSTD_compressBlock_lazy_extDict_row
++#else
++#define ZSTD_COMPRESSBLOCK_LAZY NULL
++#define ZSTD_COMPRESSBLOCK_LAZY_ROW NULL
++#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE NULL
++#define ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW NULL
++#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH NULL
++#define ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW NULL
++#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT NULL
++#define ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW NULL
++#endif
++
++#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
++size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
++size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
++size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-
+-size_t ZSTD_compressBlock_greedy_extDict(
++size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy_extDict(
++size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+ size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_greedy_extDict_row(
++size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy_extDict_row(
++
++#define ZSTD_COMPRESSBLOCK_LAZY2 ZSTD_compressBlock_lazy2
++#define ZSTD_COMPRESSBLOCK_LAZY2_ROW ZSTD_compressBlock_lazy2_row
++#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE ZSTD_compressBlock_lazy2_dictMatchState
++#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW ZSTD_compressBlock_lazy2_dictMatchState_row
++#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH ZSTD_compressBlock_lazy2_dedicatedDictSearch
++#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW ZSTD_compressBlock_lazy2_dedicatedDictSearch_row
++#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT ZSTD_compressBlock_lazy2_extDict
++#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW ZSTD_compressBlock_lazy2_extDict_row
++#else
++#define ZSTD_COMPRESSBLOCK_LAZY2 NULL
++#define ZSTD_COMPRESSBLOCK_LAZY2_ROW NULL
++#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE NULL
++#define ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW NULL
++#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH NULL
++#define ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW NULL
++#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT NULL
++#define ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW NULL
++#endif
++
++#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_lazy2_extDict_row(
++size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btlazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
+
++#define ZSTD_COMPRESSBLOCK_BTLAZY2 ZSTD_compressBlock_btlazy2
++#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE ZSTD_compressBlock_btlazy2_dictMatchState
++#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT ZSTD_compressBlock_btlazy2_extDict
++#else
++#define ZSTD_COMPRESSBLOCK_BTLAZY2 NULL
++#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE NULL
++#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT NULL
++#endif
++
#endif /* ZSTD_LAZY_H */
diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
-index dd86fc83e..b7da76b0d 100644
+index dd86fc83e..07f3bc643 100644
--- a/lib/zstd/compress/zstd_ldm.c
+++ b/lib/zstd/compress/zstd_ldm.c
@@ -1,5 +1,6 @@
@@ -10760,7 +13669,7 @@ index dd86fc83e..b7da76b0d 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -242,11 +243,11 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+@@ -242,11 +243,15 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
switch(ms->cParams.strategy)
{
case ZSTD_fast:
@@ -10770,11 +13679,26 @@ index dd86fc83e..b7da76b0d 100644
case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
++#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
+ ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
++#else
++ assert(0); /* shouldn't be called: cparams should've been adjusted. */
++#endif
break;
case ZSTD_greedy:
-@@ -549,7 +550,7 @@ size_t ZSTD_ldm_generateSequences(
+@@ -318,7 +323,9 @@ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+ }
+ }
+
+-static size_t ZSTD_ldm_generateSequences_internal(
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_ldm_generateSequences_internal(
+ ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+ {
+@@ -549,7 +556,7 @@ size_t ZSTD_ldm_generateSequences(
* the window through early invalidation.
* TODO: * Test the chunk size.
* * Try invalidation after the sequence generation and test the
@@ -10783,7 +13707,23 @@ index dd86fc83e..b7da76b0d 100644
*
* NOTE: Because of dictionaries + sequence splitting we MUST make sure
* that any offset used is valid at the END of the sequence, since it may
-@@ -711,7 +712,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+@@ -689,7 +696,6 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ /* maybeSplitSequence updates rawSeqStore->pos */
+ rawSeq const sequence = maybeSplitSequence(rawSeqStore,
+ (U32)(iend - ip), minMatch);
+- int i;
+ /* End signal */
+ if (sequence.offset == 0)
+ break;
+@@ -702,6 +708,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ /* Run the block compressor */
+ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
+ {
++ int i;
+ size_t const newLitLength =
+ blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
+ ip += sequence.litLength;
+@@ -711,7 +718,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
rep[0] = sequence.offset;
/* Store the sequence */
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
@@ -10817,7 +13757,7 @@ index 647f865be..cfccfc46f 100644
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
-index fd82acfda..1e41cb04f 100644
+index fd82acfda..a87b66ac8 100644
--- a/lib/zstd/compress/zstd_opt.c
+++ b/lib/zstd/compress/zstd_opt.c
@@ -1,5 +1,6 @@
@@ -10828,7 +13768,14 @@ index fd82acfda..1e41cb04f 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -16,7 +17,7 @@
+@@ -12,11 +13,14 @@
+ #include "hist.h"
+ #include "zstd_opt.h"
+
++#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
+
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
#define ZSTD_MAX_PRICE (1<<30)
@@ -10837,7 +13784,7 @@ index fd82acfda..1e41cb04f 100644
/*-*************************************
-@@ -26,27 +27,35 @@
+@@ -26,27 +30,35 @@
#if 0 /* approximation at bit level (for tests) */
# define BITCOST_ACCURACY 0
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
@@ -10876,7 +13823,7 @@ index fd82acfda..1e41cb04f 100644
U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
U32 const weight = BWeight + FWeight;
assert(hb + BITCOST_ACCURACY < 31);
-@@ -57,7 +66,7 @@ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
+@@ -57,7 +69,7 @@ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
/* debugging function,
* @return price in bytes as fractional value
* for debug messages only */
@@ -10885,7 +13832,7 @@ index fd82acfda..1e41cb04f 100644
{
return (double)price / (BITCOST_MULTIPLIER*8);
}
-@@ -88,20 +97,26 @@ static U32 sum_u32(const unsigned table[], size_t nbElts)
+@@ -88,20 +100,26 @@ static U32 sum_u32(const unsigned table[], size_t nbElts)
return total;
}
@@ -10917,7 +13864,7 @@ index fd82acfda..1e41cb04f 100644
* return the resulting sum of elements */
static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
{
-@@ -110,7 +125,7 @@ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
+@@ -110,7 +128,7 @@ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
assert(logTarget < 30);
if (factor <= 1) return prevsum;
@@ -10926,7 +13873,7 @@ index fd82acfda..1e41cb04f 100644
}
/* ZSTD_rescaleFreqs() :
-@@ -129,18 +144,22 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
+@@ -129,18 +147,22 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
optPtr->priceType = zop_dynamic;
@@ -10953,7 +13900,7 @@ index fd82acfda..1e41cb04f 100644
unsigned lit;
assert(optPtr->litFreq != NULL);
optPtr->litSum = 0;
-@@ -188,13 +207,14 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
+@@ -188,13 +210,14 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
optPtr->offCodeSum += optPtr->offCodeFreq[of];
} }
@@ -10970,7 +13917,7 @@ index fd82acfda..1e41cb04f 100644
}
{ unsigned const baseLLfreqs[MaxLL+1] = {
-@@ -224,10 +244,9 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
+@@ -224,10 +247,9 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
}
@@ -10982,7 +13929,15 @@ index fd82acfda..1e41cb04f 100644
if (compressedLiterals)
optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
-@@ -255,11 +274,14 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
+@@ -246,6 +268,7 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
+ const optState_t* const optPtr,
+ int optLevel)
+ {
++ DEBUGLOG(8, "ZSTD_rawLiteralsCost (%u literals)", litLength);
+ if (litLength == 0) return 0;
+
+ if (!ZSTD_compressedLiterals(optPtr))
+@@ -255,11 +278,14 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
/* dynamic statistics */
@@ -11000,7 +13955,7 @@ index fd82acfda..1e41cb04f 100644
}
return price;
}
-@@ -272,10 +294,11 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
+@@ -272,10 +298,11 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
assert(litLength <= ZSTD_BLOCKSIZE_MAX);
if (optPtr->priceType == zop_predef)
return WEIGHT(litLength, optLevel);
@@ -11016,7 +13971,7 @@ index fd82acfda..1e41cb04f 100644
*/
if (litLength == ZSTD_BLOCKSIZE_MAX)
return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
-@@ -289,24 +312,25 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
+@@ -289,24 +316,25 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
}
/* ZSTD_getMatchPrice() :
@@ -11048,7 +14003,7 @@ index fd82acfda..1e41cb04f 100644
/* dynamic statistics */
price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
-@@ -325,10 +349,10 @@ ZSTD_getMatchPrice(U32 const offcode,
+@@ -325,10 +353,10 @@ ZSTD_getMatchPrice(U32 const offcode,
}
/* ZSTD_updateStats() :
@@ -11061,7 +14016,7 @@ index fd82acfda..1e41cb04f 100644
{
/* literals */
if (ZSTD_compressedLiterals(optPtr)) {
-@@ -344,8 +368,8 @@ static void ZSTD_updateStats(optState_t* const optPtr,
+@@ -344,8 +372,8 @@ static void ZSTD_updateStats(optState_t* const optPtr,
optPtr->litLengthSum++;
}
@@ -11072,11 +14027,53 @@ index fd82acfda..1e41cb04f 100644
assert(offCode <= MaxOff);
optPtr->offCodeFreq[offCode]++;
optPtr->offCodeSum++;
-@@ -552,16 +576,17 @@ void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
- ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
+@@ -379,9 +407,11 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
+
+ /* Update hashTable3 up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+-static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
+- U32* nextToUpdate3,
+- const BYTE* const ip)
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
++ U32* nextToUpdate3,
++ const BYTE* const ip)
+ {
+ U32* const hashTable3 = ms->hashTable3;
+ U32 const hashLog3 = ms->hashLog3;
+@@ -408,7 +438,9 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
+ * @param ip assumed <= iend-8 .
+ * @param target The target of ZSTD_updateTree_internal() - we are filling to this position
+ * @return : nb of positions added */
+-static U32 ZSTD_insertBt1(
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++U32 ZSTD_insertBt1(
+ const ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ U32 const target,
+@@ -527,6 +559,7 @@ static U32 ZSTD_insertBt1(
}
--FORCE_INLINE_TEMPLATE
+ FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ void ZSTD_updateTree_internal(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+@@ -535,7 +568,7 @@ void ZSTD_updateTree_internal(
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+- DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
++ DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
+ idx, target, dictMode);
+
+ while(idx < target) {
+@@ -553,15 +586,18 @@ void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+ }
+
+ FORCE_INLINE_TEMPLATE
-U32 ZSTD_insertBtAndGetAllMatches (
- ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
- ZSTD_matchState_t* ms,
@@ -11086,7 +14083,8 @@ index fd82acfda..1e41cb04f 100644
- U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
- const U32 lengthToBeat,
- U32 const mls /* template */)
-+FORCE_INLINE_TEMPLATE U32
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++U32
+ZSTD_insertBtAndGetAllMatches (
+ ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
+ ZSTD_matchState_t* ms,
@@ -11100,7 +14098,7 @@ index fd82acfda..1e41cb04f 100644
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
-@@ -644,7 +669,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
+@@ -644,7 +680,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
repCode, ll0, repOffset, repLen);
bestLength = repLen;
@@ -11109,7 +14107,7 @@ index fd82acfda..1e41cb04f 100644
matches[mnum].len = (U32)repLen;
mnum++;
if ( (repLen > sufficient_len)
-@@ -673,7 +698,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
+@@ -673,7 +709,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
bestLength = mlen;
assert(curr > matchIndex3);
assert(mnum==0); /* no prior solution */
@@ -11118,7 +14116,7 @@ index fd82acfda..1e41cb04f 100644
matches[0].len = (U32)mlen;
mnum = 1;
if ( (mlen > sufficient_len) |
-@@ -706,13 +731,13 @@ U32 ZSTD_insertBtAndGetAllMatches (
+@@ -706,13 +742,13 @@ U32 ZSTD_insertBtAndGetAllMatches (
}
if (matchLength > bestLength) {
@@ -11135,7 +14133,7 @@ index fd82acfda..1e41cb04f 100644
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
-@@ -754,12 +779,12 @@ U32 ZSTD_insertBtAndGetAllMatches (
+@@ -754,12 +790,12 @@ U32 ZSTD_insertBtAndGetAllMatches (
if (matchLength > bestLength) {
matchIndex = dictMatchIndex + dmsIndexDelta;
@@ -11151,7 +14149,18 @@ index fd82acfda..1e41cb04f 100644
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
-@@ -960,7 +985,7 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
+@@ -792,7 +828,9 @@ typedef U32 (*ZSTD_getAllMatchesFn)(
+ U32 const ll0,
+ U32 const lengthToBeat);
+
+-FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++U32 ZSTD_btGetAllMatches_internal(
+ ZSTD_match_t* matches,
+ ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+@@ -960,7 +998,7 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
{
U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
@@ -11160,7 +14169,7 @@ index fd82acfda..1e41cb04f 100644
U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
/* Ensure that current block position is not outside of the match */
-@@ -971,11 +996,11 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
+@@ -971,11 +1009,11 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
}
if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
@@ -11176,16 +14185,84 @@ index fd82acfda..1e41cb04f 100644
(*nbMatches)++;
}
}
-@@ -1062,6 +1087,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
- ZSTD_optimal_t lastSequence;
+@@ -1011,11 +1049,6 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
+ * Optimal parser
+ *********************************/
+
+-static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
+-{
+- return sol.litlen + sol.mlen;
+-}
+-
+ #if 0 /* debug */
+
+ static void
+@@ -1033,7 +1066,13 @@ listStats(const U32* table, int lastEltID)
+
+ #endif
+
+-FORCE_INLINE_TEMPLATE size_t
++#define LIT_PRICE(_p) (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel)
++#define LL_PRICE(_l) (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel)
++#define LL_INCPRICE(_l) (LL_PRICE(_l) - LL_PRICE(_l-1))
++
++FORCE_INLINE_TEMPLATE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t
+ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+@@ -1059,9 +1098,11 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+
+ ZSTD_optimal_t* const opt = optStatePtr->priceTable;
+ ZSTD_match_t* const matches = optStatePtr->matchTable;
+- ZSTD_optimal_t lastSequence;
++ ZSTD_optimal_t lastStretch;
ZSTD_optLdm_t optLdm;
-+ ZSTD_memset(&lastSequence, 0, sizeof(ZSTD_optimal_t));
++ ZSTD_memset(&lastStretch, 0, sizeof(ZSTD_optimal_t));
+
optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
-@@ -1098,14 +1125,14 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+@@ -1082,103 +1123,139 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ U32 const ll0 = !litlen;
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+- (U32)(ip-istart), (U32)(iend - ip));
+- if (!nbMatches) { ip++; continue; }
++ (U32)(ip-istart), (U32)(iend-ip));
++ if (!nbMatches) {
++ DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart));
++ ip++;
++ continue;
++ }
++
++ /* Match found: let's store this solution, and eventually find more candidates.
++ * During this forward pass, @opt is used to store stretches,
++ * defined as "a match followed by N literals".
++ * Note how this is different from a Sequence, which is "N literals followed by a match".
++ * Storing stretches allows us to store different match predecessors
++ * for each literal position part of a literals run. */
+
+ /* initialize opt[0] */
+- { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+- opt[0].mlen = 0; /* means is_a_literal */
++ opt[0].mlen = 0; /* there are only literals so far */
+ opt[0].litlen = litlen;
+- /* We don't need to include the actual price of the literals because
+- * it is static for the duration of the forward pass, and is included
+- * in every price. We include the literal length to avoid negative
+- * prices when we subtract the previous literal length.
++ /* No need to include the actual price of the literals before the first match
++ * because it is static for the duration of the forward pass, and is included
++ * in every subsequent price. But, we include the literal length because
++ * the cost variation of litlen depends on the value of litlen.
+ */
+- opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
++ opt[0].price = LL_PRICE(litlen);
++ ZSTD_STATIC_ASSERT(sizeof(opt[0].rep[0]) == sizeof(rep[0]));
++ ZSTD_memcpy(&opt[0].rep, rep, sizeof(opt[0].rep));
/* large match -> immediate encoding */
{ U32 const maxML = matches[nbMatches-1].len;
@@ -11197,15 +14274,32 @@ index fd82acfda..1e41cb04f 100644
+ nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart));
if (maxML > sufficient_len) {
- lastSequence.litlen = litlen;
- lastSequence.mlen = maxML;
+- lastSequence.litlen = litlen;
+- lastSequence.mlen = maxML;
- lastSequence.off = maxOffcode;
-+ lastSequence.off = maxOffBase;
- DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+- DEBUGLOG(6, "large match (%u>%u), immediate encoding",
++ lastStretch.litlen = 0;
++ lastStretch.mlen = maxML;
++ lastStretch.off = maxOffBase;
++ DEBUGLOG(6, "large match (%u>%u) => immediate encoding",
maxML, sufficient_len);
cur = 0;
-@@ -1122,15 +1149,15 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
- opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
+- last_pos = ZSTD_totalLen(lastSequence);
++ last_pos = maxML;
+ goto _shortestPath;
+ } }
+
+ /* set prices for first matches starting position == 0 */
+ assert(opt[0].price >= 0);
+- { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+- U32 pos;
++ { U32 pos;
+ U32 matchNb;
+ for (pos = 1; pos < minMatch; pos++) {
+- opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
++ opt[pos].price = ZSTD_MAX_PRICE;
++ opt[pos].mlen = 0;
++ opt[pos].litlen = litlen + pos;
}
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
- U32 const offcode = matches[matchNb].off;
@@ -11213,27 +14307,295 @@ index fd82acfda..1e41cb04f 100644
U32 const end = matches[matchNb].len;
for ( ; pos <= end ; pos++ ) {
- U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
-+ U32 const matchPrice = ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel);
- U32 const sequencePrice = literalsPrice + matchPrice;
+- U32 const sequencePrice = literalsPrice + matchPrice;
++ int const matchPrice = (int)ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel);
++ int const sequencePrice = opt[0].price + matchPrice;
DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
-- pos, ZSTD_fCost(sequencePrice));
-+ pos, ZSTD_fCost((int)sequencePrice));
+ pos, ZSTD_fCost(sequencePrice));
opt[pos].mlen = pos;
- opt[pos].off = offcode;
+- opt[pos].litlen = litlen;
+- opt[pos].price = (int)sequencePrice;
+- } }
+ opt[pos].off = offBase;
- opt[pos].litlen = litlen;
- opt[pos].price = (int)sequencePrice;
++ opt[pos].litlen = 0; /* end of match */
++ opt[pos].price = sequencePrice + LL_PRICE(0);
++ }
++ }
+ last_pos = pos-1;
++ opt[pos].price = ZSTD_MAX_PRICE;
+ }
+ }
+
+ /* check further positions */
+ for (cur = 1; cur <= last_pos; cur++) {
+ const BYTE* const inr = ip + cur;
+- assert(cur < ZSTD_OPT_NUM);
+- DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
++ assert(cur <= ZSTD_OPT_NUM);
++ DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur);
+
+ /* Fix current position with one literal if cheaper */
+- { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
++ { U32 const litlen = opt[cur-1].litlen + 1;
+ int const price = opt[cur-1].price
+- + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+- + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
+- - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
++ + LIT_PRICE(ip+cur-1)
++ + LL_INCPRICE(litlen);
+ assert(price < 1000000000); /* overflow check */
+ if (price <= opt[cur].price) {
++ ZSTD_optimal_t const prevMatch = opt[cur];
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
+- opt[cur].mlen = 0;
+- opt[cur].off = 0;
++ opt[cur] = opt[cur-1];
+ opt[cur].litlen = litlen;
+ opt[cur].price = price;
++ if ( (optLevel >= 1) /* additional check only for higher modes */
++ && (prevMatch.litlen == 0) /* replace a match */
++ && (LL_INCPRICE(1) < 0) /* ll1 is cheaper than ll0 */
++ && LIKELY(ip + cur < iend)
++ ) {
++ /* check next position, in case it would be cheaper */
++ int with1literal = prevMatch.price + LIT_PRICE(ip+cur) + LL_INCPRICE(1);
++ int withMoreLiterals = price + LIT_PRICE(ip+cur) + LL_INCPRICE(litlen+1);
++ DEBUGLOG(7, "then at next rPos %u : match+1lit %.2f vs %ulits %.2f",
++ cur+1, ZSTD_fCost(with1literal), litlen+1, ZSTD_fCost(withMoreLiterals));
++ if ( (with1literal < withMoreLiterals)
++ && (with1literal < opt[cur+1].price) ) {
++ /* update offset history - before it disappears */
++ U32 const prev = cur - prevMatch.mlen;
++ repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0);
++ assert(cur >= prevMatch.mlen);
++ DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !",
++ ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals),
++ newReps.rep[0], newReps.rep[1], newReps.rep[2] );
++ opt[cur+1] = prevMatch; /* mlen & offbase */
++ ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(repcodes_t));
++ opt[cur+1].litlen = 1;
++ opt[cur+1].price = with1literal;
++ if (last_pos < cur+1) last_pos = cur+1;
++ }
++ }
+ } else {
+- DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
+- inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
+- opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
++ DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f)",
++ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price));
+ }
+ }
+
+- /* Set the repcodes of the current position. We must do it here
+- * because we rely on the repcodes of the 2nd to last sequence being
+- * correct to set the next chunks repcodes during the backward
+- * traversal.
++ /* Offset history is not updated during match comparison.
++ * Do it here, now that the match is selected and confirmed.
+ */
+ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
+ assert(cur >= opt[cur].mlen);
+- if (opt[cur].mlen != 0) {
++ if (opt[cur].litlen == 0) {
++ /* just finished a match => alter offset history */
+ U32 const prev = cur - opt[cur].mlen;
+- repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
++ repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0);
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
+- } else {
+- ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
+ }
+
+ /* last match must start at a minimum distance of 8 from oend */
+@@ -1188,15 +1265,14 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+
+ if ( (optLevel==0) /*static_test*/
+ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
+- DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
++ DEBUGLOG(7, "skip current position : next rPos(%u) price is cheaper", cur+1);
+ continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+ }
+
+ assert(opt[cur].price >= 0);
+- { U32 const ll0 = (opt[cur].mlen != 0);
+- U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
+- U32 const previousPrice = (U32)opt[cur].price;
+- U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
++ { U32 const ll0 = (opt[cur].litlen == 0);
++ int const previousPrice = opt[cur].price;
++ int const basePrice = previousPrice + LL_PRICE(0);
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
+ U32 matchNb;
+
+@@ -1208,18 +1284,17 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ continue;
+ }
+
+- { U32 const maxML = matches[nbMatches-1].len;
+- DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
+- inr-istart, cur, nbMatches, maxML);
+-
+- if ( (maxML > sufficient_len)
+- || (cur + maxML >= ZSTD_OPT_NUM) ) {
+- lastSequence.mlen = maxML;
+- lastSequence.off = matches[nbMatches-1].off;
+- lastSequence.litlen = litlen;
+- cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
+- last_pos = cur + ZSTD_totalLen(lastSequence);
+- if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
++ { U32 const longestML = matches[nbMatches-1].len;
++ DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of longest ML=%u",
++ inr-istart, cur, nbMatches, longestML);
++
++ if ( (longestML > sufficient_len)
++ || (cur + longestML >= ZSTD_OPT_NUM)
++ || (ip + cur + longestML >= iend) ) {
++ lastStretch.mlen = longestML;
++ lastStretch.off = matches[nbMatches-1].off;
++ lastStretch.litlen = 0;
++ last_pos = cur + longestML;
+ goto _shortestPath;
} }
-@@ -1230,7 +1257,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+
+@@ -1230,20 +1305,25 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
U32 mlen;
- DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
+- matchNb, matches[matchNb].off, lastML, litlen);
+ DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u",
- matchNb, matches[matchNb].off, lastML, litlen);
++ matchNb, matches[matchNb].off, lastML, opt[cur].litlen);
for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
-@@ -1296,7 +1323,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ U32 const pos = cur + mlen;
+- int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
++ int const price = basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
+
+ if ((pos > last_pos) || (price < opt[pos].price)) {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+- while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
++ while (last_pos < pos) {
++ /* fill empty positions, for future comparisons */
++ last_pos++;
++ opt[last_pos].price = ZSTD_MAX_PRICE;
++ opt[last_pos].litlen = !0; /* just needs to be != 0, to mean "not an end of match" */
++ }
+ opt[pos].mlen = mlen;
+ opt[pos].off = offset;
+- opt[pos].litlen = litlen;
++ opt[pos].litlen = 0;
+ opt[pos].price = price;
+ } else {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
+@@ -1251,52 +1331,86 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
+ }
+ } } }
++ opt[last_pos+1].price = ZSTD_MAX_PRICE;
+ } /* for (cur = 1; cur <= last_pos; cur++) */
+
+- lastSequence = opt[last_pos];
+- cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
+- assert(cur < ZSTD_OPT_NUM); /* control overflow*/
++ lastStretch = opt[last_pos];
++ assert(cur >= lastStretch.mlen);
++ cur = last_pos - lastStretch.mlen;
+
+ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
+ assert(opt[0].mlen == 0);
++ assert(last_pos >= lastStretch.mlen);
++ assert(cur == last_pos - lastStretch.mlen);
+
+- /* Set the next chunk's repcodes based on the repcodes of the beginning
+- * of the last match, and the last sequence. This avoids us having to
+- * update them while traversing the sequences.
+- */
+- if (lastSequence.mlen != 0) {
+- repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
+- ZSTD_memcpy(rep, &reps, sizeof(reps));
++ if (lastStretch.mlen==0) {
++ /* no solution : all matches have been converted into literals */
++ assert(lastStretch.litlen == (ip - anchor) + last_pos);
++ ip += last_pos;
++ continue;
++ }
++ assert(lastStretch.off > 0);
++
++ /* Update offset history */
++ if (lastStretch.litlen == 0) {
++ /* finishing on a match : update offset history */
++ repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0);
++ ZSTD_memcpy(rep, &reps, sizeof(repcodes_t));
+ } else {
+- ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
++ ZSTD_memcpy(rep, lastStretch.rep, sizeof(repcodes_t));
++ assert(cur >= lastStretch.litlen);
++ cur -= lastStretch.litlen;
+ }
+
+- { U32 const storeEnd = cur + 1;
++ /* Let's write the shortest path solution.
++ * It is stored in @opt in reverse order,
++ * starting from @storeEnd (==cur+2),
++ * effectively partially @opt overwriting.
++ * Content is changed too:
++ * - So far, @opt stored stretches, aka a match followed by literals
++ * - Now, it will store sequences, aka literals followed by a match
++ */
++ { U32 const storeEnd = cur + 2;
+ U32 storeStart = storeEnd;
+- U32 seqPos = cur;
++ U32 stretchPos = cur;
+
+ DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
+ last_pos, cur); (void)last_pos;
+- assert(storeEnd < ZSTD_OPT_NUM);
+- DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+- storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
+- opt[storeEnd] = lastSequence;
+- while (seqPos > 0) {
+- U32 const backDist = ZSTD_totalLen(opt[seqPos]);
++ assert(storeEnd < ZSTD_OPT_SIZE);
++ DEBUGLOG(6, "last stretch copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
++ storeEnd, lastStretch.litlen, lastStretch.mlen, lastStretch.off);
++ if (lastStretch.litlen > 0) {
++ /* last "sequence" is unfinished: just a bunch of literals */
++ opt[storeEnd].litlen = lastStretch.litlen;
++ opt[storeEnd].mlen = 0;
++ storeStart = storeEnd-1;
++ opt[storeStart] = lastStretch;
++ } {
++ opt[storeEnd] = lastStretch; /* note: litlen will be fixed */
++ storeStart = storeEnd;
++ }
++ while (1) {
++ ZSTD_optimal_t nextStretch = opt[stretchPos];
++ opt[storeStart].litlen = nextStretch.litlen;
++ DEBUGLOG(6, "selected sequence (llen=%u,mlen=%u,ofc=%u)",
++ opt[storeStart].litlen, opt[storeStart].mlen, opt[storeStart].off);
++ if (nextStretch.mlen == 0) {
++ /* reaching beginning of segment */
++ break;
++ }
+ storeStart--;
+- DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+- seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
+- opt[storeStart] = opt[seqPos];
+- seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
++ opt[storeStart] = nextStretch; /* note: litlen will be fixed */
++ assert(nextStretch.litlen + nextStretch.mlen <= stretchPos);
++ stretchPos -= nextStretch.litlen + nextStretch.mlen;
+ }
+
+ /* save sequences */
+- DEBUGLOG(6, "sending selected sequences into seqStore")
++ DEBUGLOG(6, "sending selected sequences into seqStore");
+ { U32 storePos;
for (storePos=storeStart; storePos <= storeEnd; storePos++) {
U32 const llen = opt[storePos].litlen;
U32 const mlen = opt[storePos].mlen;
@@ -11242,7 +14604,7 @@ index fd82acfda..1e41cb04f 100644
U32 const advance = llen + mlen;
DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
anchor - istart, (unsigned)llen, (unsigned)mlen);
-@@ -1308,8 +1335,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+@@ -1308,11 +1422,14 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
}
assert(anchor + llen <= iend);
@@ -11253,16 +14615,71 @@ index fd82acfda..1e41cb04f 100644
anchor += advance;
ip = anchor;
} }
-@@ -1349,7 +1376,7 @@ size_t ZSTD_compressBlock_btopt(
++ DEBUGLOG(7, "new offset history : %u, %u, %u", rep[0], rep[1], rep[2]);
++
++ /* update all costs */
+ ZSTD_setBasePrices(optStatePtr, optLevel);
+ }
+ } /* while (ip < ilimit) */
+@@ -1320,21 +1437,27 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+ }
++#endif /* build exclusions */
+
++#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
+ static size_t ZSTD_compressBlock_opt0(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
+ {
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
+ }
++#endif
+
++#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
+ static size_t ZSTD_compressBlock_opt2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
+ {
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
+ }
++#endif
+
++#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
+ size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+@@ -1342,20 +1465,23 @@ size_t ZSTD_compressBlock_btopt(
+ DEBUGLOG(5, "ZSTD_compressBlock_btopt");
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
+ }
++#endif
+
+
+
+
++#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
/* ZSTD_initStats_ultra():
* make a first compression pass, just to seed stats with more accurate starting values.
* only works on first block, with no dictionary and no ldm.
- * this function cannot error, hence its contract must be respected.
+ * this function cannot error out, its narrow contract must be respected.
*/
- static void
- ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
-@@ -1368,7 +1395,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
+-static void
+-ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
+- seqStore_t* seqStore,
+- U32 rep[ZSTD_REP_NUM],
+- const void* src, size_t srcSize)
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++void ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
++ seqStore_t* seqStore,
++ U32 rep[ZSTD_REP_NUM],
++ const void* src, size_t srcSize)
+ {
+ U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
+ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
+@@ -1368,7 +1494,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
@@ -11271,7 +14688,7 @@ index fd82acfda..1e41cb04f 100644
ZSTD_resetSeqStore(seqStore);
ms->window.base -= srcSize;
ms->window.dictLimit += (U32)srcSize;
-@@ -1392,20 +1419,20 @@ size_t ZSTD_compressBlock_btultra2(
+@@ -1392,10 +1518,10 @@ size_t ZSTD_compressBlock_btultra2(
U32 const curr = (U32)((const BYTE*)src - ms->window.base);
DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
@@ -11285,9 +14702,7 @@ index fd82acfda..1e41cb04f 100644
* Consequently, this can only work if no data has been previously loaded in tables,
* aka, no dictionary, no prefix, no ldm preprocessing.
* The compression ratio gain is generally small (~0.5% on first block),
-- * the cost is 2x cpu time on first block. */
-+ ** the cost is 2x cpu time on first block. */
- assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+@@ -1404,15 +1530,17 @@ size_t ZSTD_compressBlock_btultra2(
if ( (ms->opt.litLengthSum==0) /* first block */
&& (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
&& (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
@@ -11298,8 +14713,50 @@ index fd82acfda..1e41cb04f 100644
) {
ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
}
+
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
+ }
++#endif
+
++#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
+ size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+@@ -1420,18 +1548,20 @@ size_t ZSTD_compressBlock_btopt_dictMatchState(
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
+ }
+
+-size_t ZSTD_compressBlock_btultra_dictMatchState(
++size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
++ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
+ }
++#endif
+
+-size_t ZSTD_compressBlock_btopt_extDict(
++#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+ {
+- return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
++ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
+ }
+
+ size_t ZSTD_compressBlock_btultra_extDict(
+@@ -1440,6 +1570,7 @@ size_t ZSTD_compressBlock_btultra_extDict(
+ {
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
+ }
++#endif
+
+ /* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
-index 22b862858..faa73ff4b 100644
+index 22b862858..ac1b743d2 100644
--- a/lib/zstd/compress/zstd_opt.h
+++ b/lib/zstd/compress/zstd_opt.h
@@ -1,5 +1,6 @@
@@ -11310,8 +14767,77 @@ index 22b862858..faa73ff4b 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
+@@ -14,30 +15,40 @@
+
+ #include "zstd_compress_internal.h"
+
++#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
++ || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
+ /* used in ZSTD_loadDictionaryContent() */
+ void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
++#endif
+
++#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
+ size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_btultra(
++size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-size_t ZSTD_compressBlock_btultra2(
++size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
++#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt
++#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE ZSTD_compressBlock_btopt_dictMatchState
++#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT ZSTD_compressBlock_btopt_extDict
++#else
++#define ZSTD_COMPRESSBLOCK_BTOPT NULL
++#define ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE NULL
++#define ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT NULL
++#endif
+
+-size_t ZSTD_compressBlock_btopt_dictMatchState(
++#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
++size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+ size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+-
+-size_t ZSTD_compressBlock_btopt_extDict(
+- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+- void const* src, size_t srcSize);
+ size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+@@ -45,6 +56,20 @@ size_t ZSTD_compressBlock_btultra_extDict(
+ /* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
++size_t ZSTD_compressBlock_btultra2(
++ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
++ void const* src, size_t srcSize);
++
++#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra
++#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState
++#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict
++#define ZSTD_COMPRESSBLOCK_BTULTRA2 ZSTD_compressBlock_btultra2
++#else
++#define ZSTD_COMPRESSBLOCK_BTULTRA NULL
++#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE NULL
++#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT NULL
++#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL
++#endif
+
+
+ #endif /* ZSTD_OPT_H */
diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
-index 60958afeb..db670d71f 100644
+index 60958afeb..ac8b87f48 100644
--- a/lib/zstd/decompress/huf_decompress.c
+++ b/lib/zstd/decompress/huf_decompress.c
@@ -1,7 +1,8 @@
@@ -11421,7 +14947,7 @@ index 60958afeb..db670d71f 100644
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
}
-@@ -134,15 +144,28 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
+@@ -134,43 +144,66 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
return dtd;
}
@@ -11446,16 +14972,19 @@ index 60958afeb..db670d71f 100644
+ * op [in/out] - The output pointers, must be updated to reflect what is written.
+ * bits [in/out] - The bitstream containers, must be updated to reflect the current state.
+ * dt [in] - The decoding table.
-+ * ilimit [in] - The input limit, stop when any input pointer is below ilimit.
++ * ilowest [in] - The beginning of the valid range of the input. Decoders may read
++ * down to this pointer. It may be below iend[0].
+ * oend [in] - The end of the output stream. op[3] must not cross oend.
+ * iend [in] - The end of each input stream. ip[i] may cross iend[i],
-+ * as long as it is above ilimit, but that indicates corruption.
++ * as long as it is above ilowest, but that indicates corruption.
+ */
typedef struct {
BYTE const* ip[4];
BYTE* op[4];
-@@ -151,15 +174,17 @@ typedef struct {
- BYTE const* ilimit;
+ U64 bits[4];
+ void const* dt;
+- BYTE const* ilimit;
++ BYTE const* ilowest;
BYTE* oend;
BYTE const* iend[4];
-} HUF_DecompressAsmArgs;
@@ -11477,9 +15006,12 @@ index 60958afeb..db670d71f 100644
{
void const* dt = DTable + 1;
U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
-@@ -168,9 +193,11 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
- BYTE* const oend = (BYTE*)dst + dstSize;
+- const BYTE* const ilimit = (const BYTE*)src + 6 + 8;
++ const BYTE* const istart = (const BYTE*)src;
+
+- BYTE* const oend = (BYTE*)dst + dstSize;
++ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
- /* The following condition is false on x32 platform,
- * but HUF_asm is not compatible with this ABI */
@@ -11489,10 +15021,15 @@ index 60958afeb..db670d71f 100644
+ */
+ if (!MEM_isLittleEndian() || MEM_32bits())
+ return 0;
++
++ /* Avoid nullptr addition */
++ if (dstSize == 0)
++ return 0;
++ assert(dst != NULL);
/* strict minimum : jump table + 1 byte per stream */
if (srcSize < 10)
-@@ -181,7 +208,7 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
+@@ -181,11 +214,10 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
* On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
*/
if (dtLog != HUF_DECODER_FAST_TABLELOG)
@@ -11501,23 +15038,28 @@ index 60958afeb..db670d71f 100644
/* Read the jump table. */
{
-@@ -195,13 +222,13 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
+- const BYTE* const istart = (const BYTE*)src;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+@@ -195,13 +227,11 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
args->iend[2] = args->iend[1] + length2;
args->iend[3] = args->iend[2] + length3;
- /* HUF_initDStream() requires this, and this small of an input
+ /* HUF_initFastDStream() requires this, and this small of an input
* won't benefit from the ASM loop anyways.
- * length1 must be >= 16 so that ip[0] >= ilimit before the loop
- * starts.
+- * length1 must be >= 16 so that ip[0] >= ilimit before the loop
+- * starts.
*/
- if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8)
+- if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8)
- return 1;
++ if (length1 < 8 || length2 < 8 || length3 < 8 || length4 < 8)
+ return 0;
if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
}
/* ip[] contains the position that is currently loaded into bits[]. */
-@@ -218,7 +245,7 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
+@@ -218,7 +248,7 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
/* No point to call the ASM loop for tiny outputs. */
if (args->op[3] >= oend)
@@ -11526,7 +15068,7 @@ index 60958afeb..db670d71f 100644
/* bits[] is the bit container.
* It is read from the MSB down to the LSB.
-@@ -227,10 +254,10 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
+@@ -227,24 +257,25 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
* set, so that CountTrailingZeros(bits[]) can be used
* to count how many bits we've consumed.
*/
@@ -11534,14 +15076,24 @@ index 60958afeb..db670d71f 100644
- args->bits[1] = HUF_initDStream(args->ip[1]);
- args->bits[2] = HUF_initDStream(args->ip[2]);
- args->bits[3] = HUF_initDStream(args->ip[3]);
+-
+- /* If ip[] >= ilimit, it is guaranteed to be safe to
+- * reload bits[]. It may be beyond its section, but is
+- * guaranteed to be valid (>= istart).
+- */
+- args->ilimit = ilimit;
+ args->bits[0] = HUF_initFastDStream(args->ip[0]);
+ args->bits[1] = HUF_initFastDStream(args->ip[1]);
+ args->bits[2] = HUF_initFastDStream(args->ip[2]);
+ args->bits[3] = HUF_initFastDStream(args->ip[3]);
++
++ /* The decoders must be sure to never read beyond ilowest.
++ * This is lower than iend[0], but allowing decoders to read
++ * down to ilowest can allow an extra iteration or two in the
++ * fast loop.
++ */
++ args->ilowest = istart;
- /* If ip[] >= ilimit, it is guaranteed to be safe to
- * reload bits[]. It may be beyond its section, but is
-@@ -241,10 +268,10 @@ static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst,
args->oend = oend;
args->dt = dt;
@@ -11554,16 +15106,17 @@ index 60958afeb..db670d71f 100644
{
/* Validate that we haven't overwritten. */
if (args->op[stream] > segmentEnd)
-@@ -258,15 +285,33 @@ static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs
+@@ -258,15 +289,33 @@ static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs
return ERROR(corruption_detected);
/* Construct the BIT_DStream_t. */
- bit->bitContainer = MEM_readLE64(args->ip[stream]);
- bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]);
+- bit->start = (const char*)args->iend[0];
+ assert(sizeof(size_t) == 8);
+ bit->bitContainer = MEM_readLEST(args->ip[stream]);
+ bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]);
- bit->start = (const char*)args->iend[0];
++ bit->start = (const char*)args->ilowest;
bit->limitPtr = bit->start + sizeof(size_t);
bit->ptr = (const char*)args->ip[stream];
@@ -11573,25 +15126,25 @@ index 60958afeb..db670d71f 100644
+
+/* Calls X(N) for each stream 0, 1, 2, 3. */
+#define HUF_4X_FOR_EACH_STREAM(X) \
-+ { \
-+ X(0) \
-+ X(1) \
-+ X(2) \
-+ X(3) \
-+ }
++ do { \
++ X(0); \
++ X(1); \
++ X(2); \
++ X(3); \
++ } while (0)
+
+/* Calls X(N, var) for each stream 0, 1, 2, 3. */
+#define HUF_4X_FOR_EACH_STREAM_WITH_VAR(X, var) \
-+ { \
-+ X(0, (var)) \
-+ X(1, (var)) \
-+ X(2, (var)) \
-+ X(3, (var)) \
-+ }
++ do { \
++ X(0, (var)); \
++ X(1, (var)); \
++ X(2, (var)); \
++ X(3, (var)); \
++ } while (0)
#ifndef HUF_FORCE_DECOMPRESS_X2
-@@ -283,10 +328,11 @@ typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decodi
+@@ -283,10 +332,11 @@ typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decodi
static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
U64 D4;
if (MEM_isLittleEndian()) {
@@ -11605,7 +15158,7 @@ index 60958afeb..db670d71f 100644
D4 *= 0x0001000100010001ULL;
return D4;
}
-@@ -329,13 +375,7 @@ typedef struct {
+@@ -329,13 +379,7 @@ typedef struct {
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
} HUF_ReadDTableX1_Workspace;
@@ -11620,7 +15173,7 @@ index 60958afeb..db670d71f 100644
{
U32 tableLog = 0;
U32 nbSymbols = 0;
-@@ -350,7 +390,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
+@@ -350,7 +394,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
@@ -11629,7 +15182,7 @@ index 60958afeb..db670d71f 100644
if (HUF_isError(iSize)) return iSize;
-@@ -377,9 +417,8 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
+@@ -377,9 +421,8 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
* rankStart[0] is not filled because there are no entries in the table for
* weight 0.
*/
@@ -11641,7 +15194,7 @@ index 60958afeb..db670d71f 100644
int const unroll = 4;
int const nLimit = (int)nbSymbols - unroll + 1;
for (n=0; n<(int)tableLog+1; n++) {
-@@ -406,10 +445,9 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
+@@ -406,10 +449,9 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
* We can switch based on the length to a different inner loop which is
* optimized for that particular case.
*/
@@ -11655,7 +15208,34 @@ index 60958afeb..db670d71f 100644
for (w=1; w<tableLog+1; ++w) {
int const symbolCount = wksp->rankVal[w];
int const length = (1 << w) >> 1;
-@@ -519,7 +557,7 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons
+@@ -483,15 +525,19 @@ HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog
+ }
+
+ #define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
+- *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
++ do { *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog); } while (0)
+
+-#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
+- if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+- HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
++#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
++ do { \
++ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
++ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
++ } while (0)
+
+-#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
+- if (MEM_64bits()) \
+- HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
++#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
++ do { \
++ if (MEM_64bits()) \
++ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr); \
++ } while (0)
+
+ HINT_INLINE size_t
+ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
+@@ -519,7 +565,7 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons
while (p < pEnd)
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
@@ -11664,7 +15244,16 @@ index 60958afeb..db670d71f 100644
}
FORCE_INLINE_TEMPLATE size_t
-@@ -545,6 +583,10 @@ HUF_decompress1X1_usingDTable_internal_body(
+@@ -529,7 +575,7 @@ HUF_decompress1X1_usingDTable_internal_body(
+ const HUF_DTable* DTable)
+ {
+ BYTE* op = (BYTE*)dst;
+- BYTE* const oend = op + dstSize;
++ BYTE* const oend = ZSTD_maybeNullPtrAdd(op, dstSize);
+ const void* dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+ BIT_DStream_t bitD;
+@@ -545,6 +591,10 @@ HUF_decompress1X1_usingDTable_internal_body(
return dstSize;
}
@@ -11675,15 +15264,23 @@ index 60958afeb..db670d71f 100644
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X1_usingDTable_internal_body(
void* dst, size_t dstSize,
-@@ -588,6 +630,7 @@ HUF_decompress4X1_usingDTable_internal_body(
+@@ -553,6 +603,7 @@ HUF_decompress4X1_usingDTable_internal_body(
+ {
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
++ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+@@ -588,6 +639,7 @@ HUF_decompress4X1_usingDTable_internal_body(
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
-+ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
++ assert(dstSize >= 6); /* validated above */
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
-@@ -650,38 +693,156 @@ size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo
+@@ -650,52 +702,173 @@ size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo
}
#endif
@@ -11710,7 +15307,7 @@ index 60958afeb..db670d71f 100644
+ BYTE* op[4];
+ U16 const* const dtable = (U16 const*)args->dt;
+ BYTE* const oend = args->oend;
-+ BYTE const* const ilimit = args->ilimit;
++ BYTE const* const ilowest = args->ilowest;
+
+ /* Copy the arguments to local variables */
+ ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
@@ -11728,7 +15325,7 @@ index 60958afeb..db670d71f 100644
+#ifndef NDEBUG
+ for (stream = 0; stream < 4; ++stream) {
+ assert(op[stream] <= (stream == 3 ? oend : op[stream + 1]));
-+ assert(ip[stream] >= ilimit);
++ assert(ip[stream] >= ilowest);
+ }
+#endif
+ /* Compute olimit */
@@ -11738,7 +15335,7 @@ index 60958afeb..db670d71f 100644
+ /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes
+ * per stream.
+ */
-+ size_t const iiters = (size_t)(ip[0] - ilimit) / 7;
++ size_t const iiters = (size_t)(ip[0] - ilowest) / 7;
+ /* We can safely run iters iterations before running bounds checks */
+ size_t const iters = MIN(oiters, iiters);
+ size_t const symbols = iters * 5;
@@ -11749,8 +15346,8 @@ index 60958afeb..db670d71f 100644
+ */
+ olimit = op[3] + symbols;
+
-+ /* Exit fast decoding loop once we get close to the end. */
-+ if (op[3] + 20 > olimit)
++ /* Exit fast decoding loop once we reach the end. */
++ if (op[3] == olimit)
+ break;
+
+ /* Exit the decoding loop if any input pointer has crossed the
@@ -11770,15 +15367,15 @@ index 60958afeb..db670d71f 100644
+#endif
+
+#define HUF_4X1_DECODE_SYMBOL(_stream, _symbol) \
-+ { \
++ do { \
+ int const index = (int)(bits[(_stream)] >> 53); \
+ int const entry = (int)dtable[index]; \
+ bits[(_stream)] <<= (entry & 0x3F); \
+ op[(_stream)][(_symbol)] = (BYTE)((entry >> 8) & 0xFF); \
-+ }
++ } while (0)
+
+#define HUF_4X1_RELOAD_STREAM(_stream) \
-+ { \
++ do { \
+ int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
+ int const nbBits = ctz & 7; \
+ int const nbBytes = ctz >> 3; \
@@ -11786,30 +15383,30 @@ index 60958afeb..db670d71f 100644
+ ip[(_stream)] -= nbBytes; \
+ bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
+ bits[(_stream)] <<= nbBits; \
-+ }
++ } while (0)
+
+ /* Manually unroll the loop because compilers don't consistently
+ * unroll the inner loops, which destroys performance.
+ */
+ do {
+ /* Decode 5 symbols in each of the 4 streams */
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 0)
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 1)
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 2)
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 3)
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 4)
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 0);
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 1);
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 2);
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 3);
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X1_DECODE_SYMBOL, 4);
+
+ /* Reload each of the 4 the bitstreams */
-+ HUF_4X_FOR_EACH_STREAM(HUF_4X1_RELOAD_STREAM)
++ HUF_4X_FOR_EACH_STREAM(HUF_4X1_RELOAD_STREAM);
+ } while (op[3] < olimit);
+
+#undef HUF_4X1_DECODE_SYMBOL
+#undef HUF_4X1_RELOAD_STREAM
+ }
-+
-+_out:
-static HUF_ASM_X86_64_BMI2_ATTRS
++_out:
++
+ /* Save the final values of each of the state variables back to args. */
+ ZSTD_memcpy(&args->bits, &bits, sizeof(bits));
+ ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip));
@@ -11832,14 +15429,16 @@ index 60958afeb..db670d71f 100644
+ HUF_DecompressFastLoopFn loopFn)
{
void const* dt = DTable + 1;
- const BYTE* const iend = (const BYTE*)cSrc + 6;
- BYTE* const oend = (BYTE*)dst + dstSize;
+- const BYTE* const iend = (const BYTE*)cSrc + 6;
+- BYTE* const oend = (BYTE*)dst + dstSize;
- HUF_DecompressAsmArgs args;
- {
- size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
- FORWARD_IF_ERROR(ret, "Failed to init asm args");
- if (ret != 0)
- return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
++ BYTE const* const ilowest = (BYTE const*)cSrc;
++ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
+ HUF_DecompressFastArgs args;
+ { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ FORWARD_IF_ERROR(ret, "Failed to init fast loop args");
@@ -11847,14 +15446,30 @@ index 60958afeb..db670d71f 100644
+ return 0;
}
- assert(args.ip[0] >= args.ilimit);
+- assert(args.ip[0] >= args.ilimit);
- HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args);
++ assert(args.ip[0] >= args.ilowest);
+ loopFn(&args);
- /* Our loop guarantees that ip[] >= ilimit and that we haven't
+- /* Our loop guarantees that ip[] >= ilimit and that we haven't
++ /* Our loop guarantees that ip[] >= ilowest and that we haven't
* overwritten any op[].
-@@ -694,8 +855,7 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm(
- (void)iend;
+ */
+- assert(args.ip[0] >= iend);
+- assert(args.ip[1] >= iend);
+- assert(args.ip[2] >= iend);
+- assert(args.ip[3] >= iend);
++ assert(args.ip[0] >= ilowest);
++ assert(args.ip[0] >= ilowest);
++ assert(args.ip[1] >= ilowest);
++ assert(args.ip[2] >= ilowest);
++ assert(args.ip[3] >= ilowest);
+ assert(args.op[3] <= oend);
+- (void)iend;
++
++ assert(ilowest == args.ilowest);
++ assert(ilowest + 6 == args.iend[0]);
++ (void)ilowest;
/* finish bit streams one by one. */
- {
@@ -11863,7 +15478,7 @@ index 60958afeb..db670d71f 100644
BYTE* segmentEnd = (BYTE*)dst;
int i;
for (i = 0; i < 4; ++i) {
-@@ -712,97 +872,59 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm(
+@@ -712,97 +885,59 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm(
}
/* decoded size */
@@ -11925,13 +15540,13 @@ index 60958afeb..db670d71f 100644
- if (dtd.tableType != 0) return ERROR(GENERIC);
- return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
--
+
-size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
- const BYTE* ip = (const BYTE*) cSrc;
-
+-
- size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
- if (HUF_isError(hSize)) return hSize;
- if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
@@ -11986,7 +15601,7 @@ index 60958afeb..db670d71f 100644
#endif /* HUF_FORCE_DECOMPRESS_X2 */
-@@ -985,7 +1107,7 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32
+@@ -985,7 +1120,7 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32
static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList,
@@ -11995,7 +15610,7 @@ index 60958afeb..db670d71f 100644
const U32 nbBitsBaseline)
{
U32* const rankVal = rankValOrigin[0];
-@@ -1040,14 +1162,7 @@ typedef struct {
+@@ -1040,14 +1175,7 @@ typedef struct {
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
const void* src, size_t srcSize,
@@ -12011,7 +15626,7 @@ index 60958afeb..db670d71f 100644
{
U32 tableLog, maxW, nbSymbols;
DTableDesc dtd = HUF_getDTableDesc(DTable);
-@@ -1069,7 +1184,7 @@ size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
+@@ -1069,7 +1197,7 @@ size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
/* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
@@ -12020,7 +15635,43 @@ index 60958afeb..db670d71f 100644
if (HUF_isError(iSize)) return iSize;
/* check result */
-@@ -1240,6 +1355,11 @@ HUF_decompress1X2_usingDTable_internal_body(
+@@ -1159,15 +1287,19 @@ HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, c
+ }
+
+ #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
++ do { ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); } while (0)
+
+-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+- if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
++#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
++ do { \
++ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
++ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
++ } while (0)
+
+-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+- if (MEM_64bits()) \
+- ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
++#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
++ do { \
++ if (MEM_64bits()) \
++ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog); \
++ } while (0)
+
+ HINT_INLINE size_t
+ HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+@@ -1227,7 +1359,7 @@ HUF_decompress1X2_usingDTable_internal_body(
+
+ /* decode */
+ { BYTE* const ostart = (BYTE*) dst;
+- BYTE* const oend = ostart + dstSize;
++ BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, dstSize);
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+@@ -1240,6 +1372,11 @@ HUF_decompress1X2_usingDTable_internal_body(
/* decoded size */
return dstSize;
}
@@ -12032,7 +15683,15 @@ index 60958afeb..db670d71f 100644
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X2_usingDTable_internal_body(
void* dst, size_t dstSize,
-@@ -1280,8 +1400,9 @@ HUF_decompress4X2_usingDTable_internal_body(
+@@ -1247,6 +1384,7 @@ HUF_decompress4X2_usingDTable_internal_body(
+ const HUF_DTable* DTable)
+ {
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
++ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+@@ -1280,8 +1418,9 @@ HUF_decompress4X2_usingDTable_internal_body(
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
@@ -12040,11 +15699,11 @@ index 60958afeb..db670d71f 100644
- if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
-+ if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
++ assert(dstSize >= 6 /* validated above */);
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
-@@ -1366,36 +1487,178 @@ size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo
+@@ -1366,44 +1505,191 @@ size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, vo
}
#endif
@@ -12071,7 +15730,7 @@ index 60958afeb..db670d71f 100644
+ BYTE* op[4];
+ BYTE* oend[4];
+ HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt;
-+ BYTE const* const ilimit = args->ilimit;
++ BYTE const* const ilowest = args->ilowest;
+
+ /* Copy the arguments to local registers. */
+ ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
@@ -12094,7 +15753,7 @@ index 60958afeb..db670d71f 100644
+#ifndef NDEBUG
+ for (stream = 0; stream < 4; ++stream) {
+ assert(op[stream] <= oend[stream]);
-+ assert(ip[stream] >= ilimit);
++ assert(ip[stream] >= ilowest);
+ }
+#endif
+ /* Compute olimit */
@@ -12107,7 +15766,7 @@ index 60958afeb..db670d71f 100644
+ * We also know that each input pointer is >= ip[0]. So we can run
+ * iters loops before running out of input.
+ */
-+ size_t iters = (size_t)(ip[0] - ilimit) / 7;
++ size_t iters = (size_t)(ip[0] - ilowest) / 7;
+ /* Each iteration can produce up to 10 bytes of output per stream.
+ * Each output stream my advance at different rates. So take the
+ * minimum number of safe iterations among all the output streams.
@@ -12125,8 +15784,8 @@ index 60958afeb..db670d71f 100644
+ */
+ olimit = op[3] + (iters * 5);
+
-+ /* Exit the fast decoding loop if we are too close to the end. */
-+ if (op[3] + 10 > olimit)
++ /* Exit the fast decoding loop once we reach the end. */
++ if (op[3] == olimit)
+ break;
+
+ /* Exit the decoding loop if any input pointer has crossed the
@@ -12144,19 +15803,23 @@ index 60958afeb..db670d71f 100644
+ assert(ip[stream] >= ip[stream - 1]);
+ }
+#endif
-+
-+#define HUF_4X2_DECODE_SYMBOL(_stream, _decode3) \
-+ if ((_decode3) || (_stream) != 3) { \
-+ int const index = (int)(bits[(_stream)] >> 53); \
-+ HUF_DEltX2 const entry = dtable[index]; \
-+ MEM_write16(op[(_stream)], entry.sequence); \
-+ bits[(_stream)] <<= (entry.nbBits) & 0x3F; \
-+ op[(_stream)] += (entry.length); \
-+ }
+
+-static HUF_ASM_X86_64_BMI2_ATTRS size_t
+-HUF_decompress4X2_usingDTable_internal_bmi2_asm(
++#define HUF_4X2_DECODE_SYMBOL(_stream, _decode3) \
++ do { \
++ if ((_decode3) || (_stream) != 3) { \
++ int const index = (int)(bits[(_stream)] >> 53); \
++ HUF_DEltX2 const entry = dtable[index]; \
++ MEM_write16(op[(_stream)], entry.sequence); \
++ bits[(_stream)] <<= (entry.nbBits) & 0x3F; \
++ op[(_stream)] += (entry.length); \
++ } \
++ } while (0)
+
+#define HUF_4X2_RELOAD_STREAM(_stream) \
-+ { \
-+ HUF_4X2_DECODE_SYMBOL(3, 1) \
++ do { \
++ HUF_4X2_DECODE_SYMBOL(3, 1); \
+ { \
+ int const ctz = ZSTD_countTrailingZeros64(bits[(_stream)]); \
+ int const nbBits = ctz & 7; \
@@ -12165,7 +15828,7 @@ index 60958afeb..db670d71f 100644
+ bits[(_stream)] = MEM_read64(ip[(_stream)]) | 1; \
+ bits[(_stream)] <<= nbBits; \
+ } \
-+ }
++ } while (0)
+
+ /* Manually unroll the loop because compilers don't consistently
+ * unroll the inner loops, which destroys performance.
@@ -12175,25 +15838,23 @@ index 60958afeb..db670d71f 100644
+ * The final stream will be decoded during the reload phase
+ * to reduce register pressure.
+ */
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0)
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0)
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0)
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0)
-+ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0)
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
++ HUF_4X_FOR_EACH_STREAM_WITH_VAR(HUF_4X2_DECODE_SYMBOL, 0);
+
+ /* Decode one symbol from the final stream */
-+ HUF_4X2_DECODE_SYMBOL(3, 1)
++ HUF_4X2_DECODE_SYMBOL(3, 1);
+
+ /* Decode 4 symbols from the final stream & reload bitstreams.
+ * The final stream is reloaded last, meaning that all 5 symbols
+ * are decoded from the final stream before it is reloaded.
+ */
-+ HUF_4X_FOR_EACH_STREAM(HUF_4X2_RELOAD_STREAM)
++ HUF_4X_FOR_EACH_STREAM(HUF_4X2_RELOAD_STREAM);
+ } while (op[3] < olimit);
+ }
-
--static HUF_ASM_X86_64_BMI2_ATTRS size_t
--HUF_decompress4X2_usingDTable_internal_bmi2_asm(
++
+#undef HUF_4X2_DECODE_SYMBOL
+#undef HUF_4X2_RELOAD_STREAM
+
@@ -12214,9 +15875,11 @@ index 60958afeb..db670d71f 100644
+ const HUF_DTable* DTable,
+ HUF_DecompressFastLoopFn loopFn) {
void const* dt = DTable + 1;
- const BYTE* const iend = (const BYTE*)cSrc + 6;
- BYTE* const oend = (BYTE*)dst + dstSize;
+- const BYTE* const iend = (const BYTE*)cSrc + 6;
+- BYTE* const oend = (BYTE*)dst + dstSize;
- HUF_DecompressAsmArgs args;
++ const BYTE* const ilowest = (const BYTE*)cSrc;
++ BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
+ HUF_DecompressFastArgs args;
{
- size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
@@ -12228,13 +15891,30 @@ index 60958afeb..db670d71f 100644
+ return 0;
}
- assert(args.ip[0] >= args.ilimit);
+- assert(args.ip[0] >= args.ilimit);
- HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args);
++ assert(args.ip[0] >= args.ilowest);
+ loopFn(&args);
/* note : op4 already verified within main loop */
- assert(args.ip[0] >= iend);
-@@ -1426,91 +1689,72 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm(
+- assert(args.ip[0] >= iend);
+- assert(args.ip[1] >= iend);
+- assert(args.ip[2] >= iend);
+- assert(args.ip[3] >= iend);
++ assert(args.ip[0] >= ilowest);
++ assert(args.ip[1] >= ilowest);
++ assert(args.ip[2] >= ilowest);
++ assert(args.ip[3] >= ilowest);
+ assert(args.op[3] <= oend);
+- (void)iend;
++
++ assert(ilowest == args.ilowest);
++ assert(ilowest + 6 == args.iend[0]);
++ (void)ilowest;
+
+ /* finish bitStreams one by one */
+ {
+@@ -1426,91 +1712,72 @@ HUF_decompress4X2_usingDTable_internal_bmi2_asm(
/* decoded size */
return dstSize;
}
@@ -12354,7 +16034,7 @@ index 60958afeb..db670d71f 100644
#endif /* HUF_FORCE_DECOMPRESS_X1 */
-@@ -1518,44 +1762,6 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+@@ -1518,44 +1785,6 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
/* Universal decompression selectors */
/* ***********************************/
@@ -12399,7 +16079,7 @@ index 60958afeb..db670d71f 100644
#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
-@@ -1610,36 +1816,9 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
+@@ -1610,36 +1839,9 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
#endif
}
@@ -12437,7 +16117,7 @@ index 60958afeb..db670d71f 100644
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
-@@ -1652,71 +1831,71 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+@@ -1652,71 +1854,71 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
@@ -12527,7 +16207,7 @@ index 60958afeb..db670d71f 100644
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
-@@ -1726,15 +1905,14 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds
+@@ -1726,15 +1928,14 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
@@ -12602,7 +16282,7 @@ index 8c1a79d66..de459a0da 100644
*
* This source code is licensed under both the BSD-style license (found in the
diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
-index 6b3177c94..03dbdf391 100644
+index 6b3177c94..b2bfa2b16 100644
--- a/lib/zstd/decompress/zstd_decompress.c
+++ b/lib/zstd/decompress/zstd_decompress.c
@@ -1,5 +1,6 @@
@@ -12613,27 +16293,25 @@ index 6b3177c94..03dbdf391 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
-@@ -52,17 +53,18 @@
- /*-*******************************************************
+@@ -53,13 +54,15 @@
* Dependencies
*********************************************************/
-+#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
++#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
++#include "../common/error_private.h"
++#include "../common/zstd_internal.h" /* blockProperties_t */
#include "../common/mem.h" /* low level memory routines */
++#include "../common/bits.h" /* ZSTD_highbit32 */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
- #include "../common/zstd_internal.h" /* blockProperties_t */
+-#include "../common/zstd_internal.h" /* blockProperties_t */
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
-+#include "../common/bits.h" /* ZSTD_highbit32 */
-
-
-
-@@ -72,11 +74,11 @@
+@@ -72,11 +75,11 @@
*************************************/
#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
@@ -12650,15 +16328,24 @@ index 6b3177c94..03dbdf391 100644
#define DDICT_HASHSET_TABLE_BASE_SIZE 64
#define DDICT_HASHSET_RESIZE_FACTOR 2
-@@ -237,6 +239,7 @@ static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
+@@ -237,6 +240,8 @@ static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
dctx->outBufferMode = ZSTD_bm_buffered;
dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
+ dctx->disableHufAsm = 0;
++ dctx->maxBlockSizeParam = 0;
}
static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
-@@ -421,16 +424,40 @@ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+@@ -253,6 +258,7 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
+ dctx->oversizedDuration = 0;
++ dctx->isFrameDecompression = 1;
+ #if DYNAMIC_BMI2
+ dctx->bmi2 = ZSTD_cpuSupportsBmi2();
+ #endif
+@@ -421,16 +427,40 @@ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
@@ -12703,7 +16390,7 @@ index 6b3177c94..03dbdf391 100644
if ( (format != ZSTD_f_zstd1_magicless)
&& (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
-@@ -540,49 +567,52 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
+@@ -540,49 +570,52 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
frameParameter_unsupported, "");
@@ -12778,7 +16465,7 @@ index 6b3177c94..03dbdf391 100644
unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
{
unsigned long long totalDstSize = 0;
-@@ -592,9 +622,7 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+@@ -592,9 +625,7 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
@@ -12789,7 +16476,7 @@ index 6b3177c94..03dbdf391 100644
assert(skippableSize <= srcSize);
src = (const BYTE *)src + skippableSize;
-@@ -602,17 +630,17 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+@@ -602,17 +633,17 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
continue;
}
@@ -12815,7 +16502,32 @@ index 6b3177c94..03dbdf391 100644
src = (const BYTE *)src + frameSrcSize;
srcSize -= frameSrcSize;
-@@ -730,10 +758,11 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize
+@@ -676,13 +707,13 @@ static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
+ return frameSizeInfo;
+ }
+
+-static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
++static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize, ZSTD_format_e format)
+ {
+ ZSTD_frameSizeInfo frameSizeInfo;
+ ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
+
+
+- if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
++ if (format == ZSTD_f_zstd1 && (srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+ && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
+ assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
+@@ -696,7 +727,7 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize
+ ZSTD_frameHeader zfh;
+
+ /* Extract Frame Header */
+- { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
++ { size_t const ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format);
+ if (ZSTD_isError(ret))
+ return ZSTD_errorFrameSizeInfo(ret);
+ if (ret > 0)
+@@ -730,23 +761,26 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize
ip += 4;
}
@@ -12828,7 +16540,37 @@ index 6b3177c94..03dbdf391 100644
return frameSizeInfo;
}
}
-@@ -773,6 +802,48 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+
++static size_t ZSTD_findFrameCompressedSize_advanced(const void *src, size_t srcSize, ZSTD_format_e format) {
++ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format);
++ return frameSizeInfo.compressedSize;
++}
++
+ /* ZSTD_findFrameCompressedSize() :
+- * compatible with legacy mode
+- * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+- * `srcSize` must be at least as large as the frame contained
+- * @return : the compressed size of the frame starting at `src` */
++ * See docs in zstd.h
++ * Note: compatible with legacy mode */
+ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+ {
+- ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+- return frameSizeInfo.compressedSize;
++ return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_f_zstd1);
+ }
+
+ /* ZSTD_decompressBound() :
+@@ -760,7 +794,7 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+ unsigned long long bound = 0;
+ /* Iterate over each frame */
+ while (srcSize > 0) {
+- ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
++ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+@@ -773,6 +807,48 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
return bound;
}
@@ -12839,7 +16581,7 @@ index 6b3177c94..03dbdf391 100644
+
+ /* Iterate over each frame */
+ while (srcSize > 0) {
-+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
++ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ ZSTD_frameHeader zfh;
@@ -12877,7 +16619,47 @@ index 6b3177c94..03dbdf391 100644
/*-*************************************************************
* Frame decoding
-@@ -930,6 +1001,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+@@ -856,6 +932,10 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
+ }
+
++ /* Shrink the blockSizeMax if enabled */
++ if (dctx->maxBlockSizeParam != 0)
++ dctx->fParams.blockSizeMax = MIN(dctx->fParams.blockSizeMax, (unsigned)dctx->maxBlockSizeParam);
++
+ /* Loop on each block */
+ while (1) {
+ BYTE* oBlockEnd = oend;
+@@ -888,7 +968,8 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+- decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming);
++ assert(dctx->isFrameDecompression == 1);
++ decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, not_streaming);
+ break;
+ case bt_raw :
+ /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
+@@ -901,12 +982,14 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ default:
+ RETURN_ERROR(corruption_detected, "invalid block type");
+ }
+-
+- if (ZSTD_isError(decodedSize)) return decodedSize;
+- if (dctx->validateChecksum)
++ FORWARD_IF_ERROR(decodedSize, "Block decompression failure");
++ DEBUGLOG(5, "Decompressed block of dSize = %u", (unsigned)decodedSize);
++ if (dctx->validateChecksum) {
+ xxh64_update(&dctx->xxhState, op, decodedSize);
+- if (decodedSize != 0)
++ }
++ if (decodedSize) /* support dst = NULL,0 */ {
+ op += decodedSize;
++ }
+ assert(ip != NULL);
+ ip += cBlockSize;
+ remainingSrcSize -= cBlockSize;
+@@ -930,12 +1013,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
}
ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
/* Allow caller to get size read */
@@ -12885,14 +16667,23 @@ index 6b3177c94..03dbdf391 100644
*srcPtr = ip;
*srcSizePtr = remainingSrcSize;
return (size_t)(op-ostart);
-@@ -955,17 +1027,18 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+ }
+
+-static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
++static
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
++size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+@@ -955,17 +1041,18 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
- { U32 const magicNumber = MEM_readLE32(src);
- DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
- (unsigned)magicNumber, ZSTD_MAGICNUMBER);
-+ if (srcSize >= 4) {
++ if (dctx->format == ZSTD_f_zstd1 && srcSize >= 4) {
+ U32 const magicNumber = MEM_readLE32(src);
+ DEBUGLOG(5, "reading magic number %08X", (unsigned)magicNumber);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
@@ -12909,7 +16700,7 @@ index 6b3177c94..03dbdf391 100644
} }
if (ddict) {
-@@ -1061,8 +1134,8 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
+@@ -1061,8 +1148,8 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
/*
@@ -12920,7 +16711,25 @@ index 6b3177c94..03dbdf391 100644
* be streamed.
*
* For blocks that can be streamed, this allows us to reduce the latency until we produce
-@@ -1262,7 +1335,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
+@@ -1181,7 +1268,8 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
+ {
+ case bt_compressed:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
+- rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming);
++ assert(dctx->isFrameDecompression == 1);
++ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, is_streaming);
+ dctx->expected = 0; /* Streaming not supported */
+ break;
+ case bt_raw :
+@@ -1250,6 +1338,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
+ case ZSTDds_decodeSkippableHeader:
+ assert(src != NULL);
+ assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
++ assert(dctx->format != ZSTD_f_zstd1_magicless);
+ ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
+ dctx->stage = ZSTDds_skipFrame;
+@@ -1262,7 +1351,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
default:
assert(0); /* impossible */
@@ -12929,7 +16738,7 @@ index 6b3177c94..03dbdf391 100644
}
}
-@@ -1303,11 +1376,11 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+@@ -1303,11 +1392,11 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
/* in minimal huffman, we always use X1 variants */
size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
dictPtr, dictEnd - dictPtr,
@@ -12943,7 +16752,7 @@ index 6b3177c94..03dbdf391 100644
#endif
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
dictPtr += hSize;
-@@ -1403,7 +1476,7 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+@@ -1403,10 +1492,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
dctx->prefixStart = NULL;
dctx->virtualStart = NULL;
dctx->dictEnd = NULL;
@@ -12952,7 +16761,11 @@ index 6b3177c94..03dbdf391 100644
dctx->litEntropy = dctx->fseEntropy = 0;
dctx->dictID = 0;
dctx->bType = bt_reserved;
-@@ -1465,7 +1538,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
++ dctx->isFrameDecompression = 1;
+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+ ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+ dctx->LLTptr = dctx->entropy.LLTable;
+@@ -1465,7 +1555,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
* This could for one of the following reasons :
* - The frame does not require a dictionary (most common case).
* - The frame was built with dictID intentionally removed.
@@ -12961,7 +16774,7 @@ index 6b3177c94..03dbdf391 100644
* Note : this use case also happens when using a non-conformant dictionary.
* - `srcSize` is too small, and as a result, frame header could not be decoded.
* Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
-@@ -1474,7 +1547,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
+@@ -1474,7 +1564,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
* ZSTD_getFrameHeader(), which will provide a more precise error code. */
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
{
@@ -12970,7 +16783,7 @@ index 6b3177c94..03dbdf391 100644
size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
if (ZSTD_isError(hError)) return 0;
return zfp.dictID;
-@@ -1581,7 +1654,9 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di
+@@ -1581,7 +1671,9 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
DEBUGLOG(4, "ZSTD_initDStream");
@@ -12981,7 +16794,7 @@ index 6b3177c94..03dbdf391 100644
}
/* ZSTD_initDStream_usingDDict() :
-@@ -1589,6 +1664,7 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds)
+@@ -1589,6 +1681,7 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds)
* this function cannot fail */
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
@@ -12989,7 +16802,7 @@ index 6b3177c94..03dbdf391 100644
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
return ZSTD_startingInputLength(dctx->format);
-@@ -1599,6 +1675,7 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
+@@ -1599,6 +1692,7 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
@@ -12997,7 +16810,7 @@ index 6b3177c94..03dbdf391 100644
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
return ZSTD_startingInputLength(dctx->format);
}
-@@ -1670,6 +1747,11 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
+@@ -1670,6 +1764,15 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
return bounds;
@@ -13005,21 +16818,28 @@ index 6b3177c94..03dbdf391 100644
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
++ case ZSTD_d_maxBlockSize:
++ bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN;
++ bounds.upperBound = ZSTD_BLOCKSIZE_MAX;
++ return bounds;
+
default:;
}
bounds.error = ERROR(parameter_unsupported);
-@@ -1710,6 +1792,9 @@ size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value
+@@ -1710,6 +1813,12 @@ size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value
case ZSTD_d_refMultipleDDicts:
*value = (int)dctx->refMultipleDDicts;
return 0;
+ case ZSTD_d_disableHuffmanAssembly:
+ *value = (int)dctx->disableHufAsm;
+ return 0;
++ case ZSTD_d_maxBlockSize:
++ *value = dctx->maxBlockSizeParam;
++ return 0;
default:;
}
RETURN_ERROR(parameter_unsupported, "");
-@@ -1743,6 +1828,10 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
+@@ -1743,6 +1852,14 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
}
dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
return 0;
@@ -13027,10 +16847,56 @@ index 6b3177c94..03dbdf391 100644
+ CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value);
+ dctx->disableHufAsm = value != 0;
+ return 0;
++ case ZSTD_d_maxBlockSize:
++ if (value != 0) CHECK_DBOUNDS(ZSTD_d_maxBlockSize, value);
++ dctx->maxBlockSizeParam = value;
++ return 0;
default:;
}
RETURN_ERROR(parameter_unsupported, "");
-@@ -1918,7 +2007,6 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -1754,6 +1871,7 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
++ dctx->isFrameDecompression = 1;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+@@ -1770,11 +1888,17 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
+ return ZSTD_sizeof_DCtx(dctx);
+ }
+
+-size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
++static size_t ZSTD_decodingBufferSize_internal(unsigned long long windowSize, unsigned long long frameContentSize, size_t blockSizeMax)
+ {
+- size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+- /* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/
+- unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2);
++ size_t const blockSize = MIN((size_t)MIN(windowSize, ZSTD_BLOCKSIZE_MAX), blockSizeMax);
++ /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block
++ * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing
++ * the block at the beginning of the output buffer, and maintain a full window.
++ *
++ * We need another blockSize worth of buffer so that we can store split
++ * literals at the end of the block without overwriting the extDict window.
++ */
++ unsigned long long const neededRBSize = windowSize + (blockSize * 2) + (WILDCOPY_OVERLENGTH * 2);
+ unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+ size_t const minRBSize = (size_t) neededSize;
+ RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
+@@ -1782,6 +1906,11 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
+ return minRBSize;
+ }
+
++size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
++{
++ return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, ZSTD_BLOCKSIZE_MAX);
++}
++
+ size_t ZSTD_estimateDStreamSize(size_t windowSize)
+ {
+ size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+@@ -1918,7 +2047,6 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (zds->refMultipleDDicts && zds->ddictSet) {
ZSTD_DCtx_selectFrameDDict(zds);
}
@@ -13038,7 +16904,7 @@ index 6b3177c94..03dbdf391 100644
if (ZSTD_isError(hSize)) {
return hSize; /* error */
}
-@@ -1932,6 +2020,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -1932,6 +2060,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->lhSize += remainingInput;
}
input->pos = input->size;
@@ -13050,10 +16916,18 @@ index 6b3177c94..03dbdf391 100644
return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
assert(ip != NULL);
-@@ -1949,8 +2042,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -1943,14 +2076,15 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+ if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && zds->fParams.frameType != ZSTD_skippableFrame
+ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
+- size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
++ size_t const cSize = ZSTD_findFrameCompressedSize_advanced(istart, (size_t)(iend-istart), zds->format);
+ if (cSize <= (size_t)(iend-istart)) {
+ /* shortcut : using single-pass mode */
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
if (ZSTD_isError(decompressedSize)) return decompressedSize;
- DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+- DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
++ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()");
+ assert(istart != NULL);
ip = istart + cSize;
- op += decompressedSize;
@@ -13061,7 +16935,32 @@ index 6b3177c94..03dbdf391 100644
zds->expected = 0;
zds->streamStage = zdss_init;
someMoreWork = 0;
-@@ -2034,6 +2128,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -1969,7 +2103,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+ DEBUGLOG(4, "Consume header");
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
+
+- if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
++ if (zds->format == ZSTD_f_zstd1
++ && (MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
+ zds->stage = ZSTDds_skipFrame;
+ } else {
+@@ -1985,11 +2120,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+ zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+ RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
+ frameParameter_windowTooLarge, "");
++ if (zds->maxBlockSizeParam != 0)
++ zds->fParams.blockSizeMax = MIN(zds->fParams.blockSizeMax, (unsigned)zds->maxBlockSizeParam);
+
+ /* Adapt buffer sizes to frame header instructions */
+ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+ size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
+- ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
++ ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax)
+ : 0;
+
+ ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
+@@ -2034,6 +2171,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
}
if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
@@ -13069,7 +16968,7 @@ index 6b3177c94..03dbdf391 100644
ip += neededInSize;
/* Function modifies the stage so we must break */
break;
-@@ -2048,7 +2143,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -2048,7 +2186,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
int const isSkipFrame = ZSTD_isSkipFrame(zds);
size_t loadedSize;
/* At this point we shouldn't be decompressing a block that we can stream. */
@@ -13078,7 +16977,7 @@ index 6b3177c94..03dbdf391 100644
if (isSkipFrame) {
loadedSize = MIN(toLoad, (size_t)(iend-ip));
} else {
-@@ -2057,8 +2152,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -2057,8 +2195,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
"should never happen");
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
}
@@ -13092,7 +16991,7 @@ index 6b3177c94..03dbdf391 100644
if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
/* decode loaded input */
-@@ -2068,14 +2166,17 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -2068,14 +2209,17 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
break;
}
case zdss_flush:
@@ -13113,7 +17012,7 @@ index 6b3177c94..03dbdf391 100644
DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
(int)(zds->outBuffSize - zds->outStart),
(U32)zds->fParams.blockSizeMax);
-@@ -2089,7 +2190,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -2089,7 +2233,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
default:
assert(0); /* impossible */
@@ -13122,7 +17021,7 @@ index 6b3177c94..03dbdf391 100644
} }
/* result */
-@@ -2102,8 +2203,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
+@@ -2102,8 +2246,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if ((ip==istart) && (op==ostart)) { /* no forward progress */
zds->noForwardProgress ++;
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
@@ -13133,7 +17032,7 @@ index 6b3177c94..03dbdf391 100644
assert(0);
}
} else {
-@@ -2140,11 +2241,17 @@ size_t ZSTD_decompressStream_simpleArgs (
+@@ -2140,11 +2284,17 @@ size_t ZSTD_decompressStream_simpleArgs (
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos)
{
@@ -13159,7 +17058,7 @@ index 6b3177c94..03dbdf391 100644
+ }
}
diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
-index c1913b8e7..9f5577e5b 100644
+index c1913b8e7..9fe9a12c8 100644
--- a/lib/zstd/decompress/zstd_decompress_block.c
+++ b/lib/zstd/decompress/zstd_decompress_block.c
@@ -1,5 +1,6 @@
@@ -13184,16 +17083,110 @@ index c1913b8e7..9f5577e5b 100644
/*_*******************************************************
* Macros
-@@ -89,7 +90,7 @@ static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const
+@@ -51,6 +52,13 @@ static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
+ * Block decoding
+ ***************************************************************/
+
++static size_t ZSTD_blockSizeMax(ZSTD_DCtx const* dctx)
++{
++ size_t const blockSizeMax = dctx->isFrameDecompression ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX;
++ assert(blockSizeMax <= ZSTD_BLOCKSIZE_MAX);
++ return blockSizeMax;
++}
++
+ /*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+@@ -73,41 +81,49 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize,
+ const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately)
+ {
+- if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH)
+- {
+- /* room for litbuffer to fit without read faulting */
+- dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH;
++ size_t const blockSizeMax = ZSTD_blockSizeMax(dctx);
++ assert(litSize <= blockSizeMax);
++ assert(dctx->isFrameDecompression || streaming == not_streaming);
++ assert(expectedWriteSize <= blockSizeMax);
++ if (streaming == not_streaming && dstCapacity > blockSizeMax + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) {
++ /* If we aren't streaming, we can just put the literals after the output
++ * of the current block. We don't need to worry about overwriting the
++ * extDict of our window, because it doesn't exist.
++ * So if we have space after the end of the block, just put it there.
++ */
++ dctx->litBuffer = (BYTE*)dst + blockSizeMax + WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd = dctx->litBuffer + litSize;
+ dctx->litBufferLocation = ZSTD_in_dst;
+- }
+- else if (litSize > ZSTD_LITBUFFEREXTRASIZE)
+- {
+- /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
++ } else if (litSize <= ZSTD_LITBUFFEREXTRASIZE) {
++ /* Literals fit entirely within the extra buffer, put them there to avoid
++ * having to split the literals.
++ */
++ dctx->litBuffer = dctx->litExtraBuffer;
++ dctx->litBufferEnd = dctx->litBuffer + litSize;
++ dctx->litBufferLocation = ZSTD_not_in_dst;
++ } else {
++ assert(blockSizeMax > ZSTD_LITBUFFEREXTRASIZE);
++ /* Literals must be split between the output block and the extra lit
++ * buffer. We fill the extra lit buffer with the tail of the literals,
++ * and put the rest of the literals at the end of the block, with
++ * WILDCOPY_OVERLENGTH of buffer room to allow for overreads.
++ * This MUST not write more than our maxBlockSize beyond dst, because in
++ * streaming mode, that could overwrite part of our extDict window.
++ */
+ if (splitImmediately) {
+ /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
+ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE;
- }
- else {
+- }
+- else {
- /* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */
++ } else {
+ /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */
dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize;
dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize;
}
-@@ -134,13 +135,16 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ dctx->litBufferLocation = ZSTD_split;
+- }
+- else
+- {
+- /* fits entirely within litExtraBuffer, so no split is necessary */
+- dctx->litBuffer = dctx->litExtraBuffer;
+- dctx->litBufferEnd = dctx->litBuffer + litSize;
+- dctx->litBufferLocation = ZSTD_not_in_dst;
++ assert(dctx->litBufferEnd <= (BYTE*)dst + expectedWriteSize);
+ }
+ }
+
+-/* Hidden declaration for fullbench */
+-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+- const void* src, size_t srcSize,
+- void* dst, size_t dstCapacity, const streaming_operation streaming);
+ /*! ZSTD_decodeLiteralsBlock() :
+ * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored
+ * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current
+@@ -116,7 +132,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ *
+ * @return : nb of bytes read from src (< srcSize )
+ * note : symbol not declared but exposed for fullbench */
+-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
++static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */
+ void* dst, size_t dstCapacity, const streaming_operation streaming)
+ {
+@@ -125,6 +141,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+
+ { const BYTE* const istart = (const BYTE*) src;
+ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
++ size_t const blockSizeMax = ZSTD_blockSizeMax(dctx);
+
+ switch(litEncType)
+ {
+@@ -134,13 +151,16 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
ZSTD_FALLTHROUGH;
case set_compressed:
@@ -13204,17 +17197,20 @@ index c1913b8e7..9f5577e5b 100644
U32 const lhlCode = (istart[0] >> 2) & 3;
U32 const lhc = MEM_readLE32(istart);
size_t hufSuccess;
- size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+- size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
++ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
+ int const flags = 0
+ | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0)
+ | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0);
switch(lhlCode)
{
case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
-@@ -165,6 +169,10 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+@@ -164,7 +184,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
- RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+- RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
++ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
+ if (!singleStream)
+ RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong,
+ "Not enough literals (%zu) for the 4-streams mode (min %u)",
@@ -13222,7 +17218,7 @@ index c1913b8e7..9f5577e5b 100644
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, "");
ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0);
-@@ -176,13 +184,14 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+@@ -176,13 +200,14 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
if (litEncType==set_repeat) {
if (singleStream) {
@@ -13241,7 +17237,7 @@ index c1913b8e7..9f5577e5b 100644
}
} else {
if (singleStream) {
-@@ -190,18 +199,18 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+@@ -190,26 +215,28 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
hufSuccess = HUF_decompress1X_DCtx_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
@@ -13265,7 +17261,26 @@ index c1913b8e7..9f5577e5b 100644
}
}
if (dctx->litBufferLocation == ZSTD_split)
-@@ -237,6 +246,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ {
++ assert(litSize > ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE);
+ dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd -= WILDCOPY_OVERLENGTH;
++ assert(dctx->litBufferEnd <= (BYTE*)dst + blockSizeMax);
+ }
+
+ RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
+@@ -224,7 +251,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ case set_basic:
+ { size_t litSize, lhSize;
+ U32 const lhlCode = ((istart[0]) >> 2) & 3;
+- size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
++ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+@@ -237,11 +264,13 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
break;
case 3:
lhSize = 3;
@@ -13273,7 +17288,22 @@ index c1913b8e7..9f5577e5b 100644
litSize = MEM_readLE24(istart) >> 4;
break;
}
-@@ -279,12 +289,13 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
++ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+@@ -270,7 +299,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ case set_rle:
+ { U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t litSize, lhSize;
+- size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
++ size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity);
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+@@ -279,16 +308,17 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
break;
case 1:
lhSize = 2;
@@ -13288,7 +17318,31 @@ index c1913b8e7..9f5577e5b 100644
break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
-@@ -506,14 +517,15 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
+- RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
++ RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
+ if (dctx->litBufferLocation == ZSTD_split)
+@@ -310,6 +340,18 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ }
+ }
+
++/* Hidden declaration for fullbench */
++size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
++ const void* src, size_t srcSize,
++ void* dst, size_t dstCapacity);
++size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx,
++ const void* src, size_t srcSize,
++ void* dst, size_t dstCapacity)
++{
++ dctx->isFrameDecompression = 0;
++ return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, not_streaming);
++}
++
+ /* Default FSE distribution tables.
+ * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
+@@ -506,14 +548,15 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
@@ -13307,7 +17361,7 @@ index c1913b8e7..9f5577e5b 100644
*/
{
size_t position = 0;
-@@ -540,7 +552,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
+@@ -540,7 +583,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
for (i=0; i<n; i++) {
tableDecode[position].baseValue = s;
position = (position + step) & tableMask;
@@ -13316,7 +17370,7 @@ index c1913b8e7..9f5577e5b 100644
} }
assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
-@@ -551,7 +563,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
+@@ -551,7 +594,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
for (u=0; u<tableSize; u++) {
U32 const symbol = tableDecode[u].baseValue;
U32 const nextState = symbolNext[symbol]++;
@@ -13325,7 +17379,69 @@ index c1913b8e7..9f5577e5b 100644
tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
assert(nbAdditionalBits[symbol] < 255);
tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol];
-@@ -964,6 +976,11 @@ size_t ZSTD_execSequence(BYTE* op,
+@@ -664,11 +707,6 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+
+ /* SeqHead */
+ nbSeq = *ip++;
+- if (!nbSeq) {
+- *nbSeqPtr=0;
+- RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
+- return 1;
+- }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+ RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
+@@ -681,8 +719,16 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ }
+ *nbSeqPtr = nbSeq;
+
++ if (nbSeq == 0) {
++ /* No sequence : section ends immediately */
++ RETURN_ERROR_IF(ip != iend, corruption_detected,
++ "extraneous data present in the Sequences section");
++ return (size_t)(ip - istart);
++ }
++
+ /* FSE table descriptors */
+ RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
++ RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */
+ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+@@ -829,7 +875,7 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt
+ /* ZSTD_safecopyDstBeforeSrc():
+ * This version allows overlap with dst before src, or handles the non-overlap case with dst after src
+ * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
+-static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) {
++static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length) {
+ ptrdiff_t const diff = op - ip;
+ BYTE* const oend = op + length;
+
+@@ -858,6 +904,7 @@ static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length
+ * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
+ */
+ FORCE_NOINLINE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_execSequenceEnd(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+@@ -905,6 +952,7 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
+ * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case.
+ */
+ FORCE_NOINLINE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
+ BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+@@ -950,6 +998,7 @@ size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
+ }
+
+ HINT_INLINE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+@@ -964,6 +1013,11 @@ size_t ZSTD_execSequence(BYTE* op,
assert(op != NULL /* Precondition */);
assert(oend_w < oend /* No underflow */);
@@ -13337,7 +17453,15 @@ index c1913b8e7..9f5577e5b 100644
/* Handle edge cases in a slow path:
* - Read beyond end of literals
* - Match end is within WILDCOPY_OVERLIMIT of oend
-@@ -1154,7 +1171,7 @@ ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16
+@@ -1043,6 +1097,7 @@ size_t ZSTD_execSequence(BYTE* op,
+ }
+
+ HINT_INLINE
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
+ BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+@@ -1154,7 +1209,7 @@ ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16
}
/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
@@ -13346,14 +17470,25 @@ index c1913b8e7..9f5577e5b 100644
* bits before reloading. This value is the maximum number of bytes we read
* after reloading when we are decoding long offsets.
*/
-@@ -1169,9 +1186,27 @@ FORCE_INLINE_TEMPLATE seq_t
- ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+@@ -1165,13 +1220,37 @@ ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16
+
+ typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+
++/*
++ * ZSTD_decodeSequence():
++ * @p longOffsets : tells the decoder to reload more bit while decoding large offsets
++ * only used in 32-bit mode
++ * @return : Sequence (litL + matchL + offset)
++ */
+ FORCE_INLINE_TEMPLATE seq_t
+-ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
++ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const int isLastSeq)
{
seq_t seq;
+ /*
-+ * ZSTD_seqSymbol is a structure with a total of 64 bits wide. So it can be
-+ * loaded in one operation and extracted its fields by simply shifting or
-+ * bit-extracting on aarch64.
++ * ZSTD_seqSymbol is a 64 bits wide structure.
++ * It can be loaded in one operation
++ * and its fields extracted by simply shifting or bit-extracting on aarch64.
+ * GCC doesn't recognize this and generates more unnecessary ldr/ldrb/ldrh
+ * operations that cause performance drop. This can be avoided by using this
+ * ZSTD_memcpy hack.
@@ -13374,7 +17509,7 @@ index c1913b8e7..9f5577e5b 100644
seq.matchLength = mlDInfo->baseValue;
seq.litLength = llDInfo->baseValue;
{ U32 const ofBase = ofDInfo->baseValue;
-@@ -1186,28 +1221,31 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+@@ -1186,28 +1265,31 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
U32 const llnbBits = llDInfo->nbBits;
U32 const mlnbBits = mlDInfo->nbBits;
U32 const ofnbBits = ofDInfo->nbBits;
@@ -13415,7 +17550,16 @@ index c1913b8e7..9f5577e5b 100644
} else {
offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
-@@ -1232,11 +1270,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+@@ -1224,7 +1306,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+ } else {
+ offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
+ { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+- temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
++ temp -= !temp; /* 0 is not valid: input corrupted => force offset to -1 => corruption detected at execSequence */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+@@ -1232,11 +1314,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
seq.offset = offset;
}
@@ -13427,7 +17571,7 @@ index c1913b8e7..9f5577e5b 100644
seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
-@@ -1246,11 +1280,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+@@ -1246,11 +1324,7 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
/* Ensure there are enough bits to read the rest of data in 64-bit mode. */
ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
@@ -13439,16 +17583,656 @@ index c1913b8e7..9f5577e5b 100644
seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
if (MEM_32bits())
-@@ -1552,7 +1582,7 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
+@@ -1259,17 +1333,22 @@ ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+ DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+
+- ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
+- ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
+- if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+- ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
++ if (!isLastSeq) {
++ /* don't update FSE state for last Sequence */
++ ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
++ ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
++ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
++ ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
++ BIT_reloadDStream(&seqState->DStream);
++ }
+ }
+
+ return seq;
+ }
+
+-#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+-MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
++#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
++#if DEBUGLEVEL >= 1
++static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
+ {
+ size_t const windowSize = dctx->fParams.windowSize;
+ /* No dictionary used. */
+@@ -1283,30 +1362,33 @@ MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefix
+ /* Dictionary is active. */
+ return 1;
+ }
++#endif
+
+-MEM_STATIC void ZSTD_assertValidSequence(
++static void ZSTD_assertValidSequence(
+ ZSTD_DCtx const* dctx,
+ BYTE const* op, BYTE const* oend,
+ seq_t const seq,
+ BYTE const* prefixStart, BYTE const* virtualStart)
+ {
+ #if DEBUGLEVEL >= 1
+- size_t const windowSize = dctx->fParams.windowSize;
+- size_t const sequenceSize = seq.litLength + seq.matchLength;
+- BYTE const* const oLitEnd = op + seq.litLength;
+- DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
+- (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+- assert(op <= oend);
+- assert((size_t)(oend - op) >= sequenceSize);
+- assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
+- if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
+- size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
+- /* Offset must be within the dictionary. */
+- assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
+- assert(seq.offset <= windowSize + dictSize);
+- } else {
+- /* Offset must be within our window. */
+- assert(seq.offset <= windowSize);
++ if (dctx->isFrameDecompression) {
++ size_t const windowSize = dctx->fParams.windowSize;
++ size_t const sequenceSize = seq.litLength + seq.matchLength;
++ BYTE const* const oLitEnd = op + seq.litLength;
++ DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
++ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
++ assert(op <= oend);
++ assert((size_t)(oend - op) >= sequenceSize);
++ assert(sequenceSize <= ZSTD_blockSizeMax(dctx));
++ if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
++ size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
++ /* Offset must be within the dictionary. */
++ assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
++ assert(seq.offset <= windowSize + dictSize);
++ } else {
++ /* Offset must be within our window. */
++ assert(seq.offset <= windowSize);
++ }
+ }
+ #else
+ (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
+@@ -1322,23 +1404,21 @@ DONT_VECTORIZE
+ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+- BYTE* const oend = ostart + maxDstSize;
++ BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* litBufferEnd = dctx->litBufferEnd;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+- DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer");
+- (void)frame;
++ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer (%i seqs)", nbSeq);
+
+- /* Regen sequences */
++ /* Literals are split between internal buffer & output buffer */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+@@ -1357,8 +1437,7 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
+ BIT_DStream_completed < BIT_DStream_overflow);
+
+ /* decompress without overrunning litPtr begins */
+- {
+- seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
++ { seq_t sequence = {0,0,0}; /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */
+ /* Align the decompression loop to 32 + 16 bytes.
+ *
+ * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
+@@ -1420,27 +1499,26 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
+ #endif
+
+ /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */
+- for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) {
+- size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
++ for ( ; nbSeq; nbSeq--) {
++ sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
++ if (litPtr + sequence.litLength > dctx->litBufferEnd) break;
++ { size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+ #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+- assert(!ZSTD_isError(oneSeqSize));
+- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
++ assert(!ZSTD_isError(oneSeqSize));
++ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+ #endif
+- if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+- return oneSeqSize;
+- DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+- op += oneSeqSize;
+- if (UNLIKELY(!--nbSeq))
+- break;
+- BIT_reloadDStream(&(seqState.DStream));
+- sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+- }
++ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
++ return oneSeqSize;
++ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
++ op += oneSeqSize;
++ } }
++ DEBUGLOG(6, "reached: (litPtr + sequence.litLength > dctx->litBufferEnd)");
+
+ /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
+ if (nbSeq > 0) {
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+- if (leftoverLit)
+- {
++ DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength);
++ if (leftoverLit) {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequence.litLength -= leftoverLit;
+@@ -1449,24 +1527,22 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+- {
+- size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
++ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+ #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
++ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+ #endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+- if (--nbSeq)
+- BIT_reloadDStream(&(seqState.DStream));
+ }
++ nbSeq--;
+ }
+ }
+
+- if (nbSeq > 0) /* there is remaining lit from extra buffer */
+- {
++ if (nbSeq > 0) {
++ /* there is remaining lit from extra buffer */
+
+ #if defined(__x86_64__)
+ __asm__(".p2align 6");
+@@ -1485,35 +1561,34 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
+ # endif
+ #endif
+
+- for (; ; ) {
+- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
++ for ( ; nbSeq ; nbSeq--) {
++ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+ #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
++ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+ #endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+- if (UNLIKELY(!--nbSeq))
+- break;
+- BIT_reloadDStream(&(seqState.DStream));
+ }
+ }
+
+ /* check if reached exact end */
+ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq);
+ RETURN_ERROR_IF(nbSeq, corruption_detected, "");
+- RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
++ DEBUGLOG(5, "bitStream : start=%p, ptr=%p, bitsConsumed=%u", seqState.DStream.start, seqState.DStream.ptr, seqState.DStream.bitsConsumed);
++ RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, "");
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+- if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
+- {
+- size_t const lastLLSize = litBufferEnd - litPtr;
++ if (dctx->litBufferLocation == ZSTD_split) {
++ /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
++ size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
++ DEBUGLOG(6, "copy last literals from segment : %u", (U32)lastLLSize);
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
+@@ -1523,15 +1598,17 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ }
+- { size_t const lastLLSize = litBufferEnd - litPtr;
++ /* copy last literals from internal buffer */
++ { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr);
++ DEBUGLOG(6, "copy last literals from internal buffer : %u", (U32)lastLLSize);
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+- }
+- }
++ } }
+
+- return op-ostart;
++ DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart));
++ return (size_t)(op - ostart);
+ }
+
+ FORCE_INLINE_TEMPLATE size_t
+@@ -1539,21 +1616,19 @@ DONT_VECTORIZE
+ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+- BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer;
++ BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, maxDstSize) : dctx->litBuffer;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart);
const BYTE* const vBase = (const BYTE*)(dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd);
- DEBUGLOG(5, "ZSTD_decompressSequences_body");
+- (void)frame;
+ DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq);
- (void)frame;
/* Regen sequences */
-@@ -1945,34 +1975,79 @@ ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+ if (nbSeq) {
+@@ -1568,11 +1643,6 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+ assert(dst != NULL);
+
+- ZSTD_STATIC_ASSERT(
+- BIT_DStream_unfinished < BIT_DStream_completed &&
+- BIT_DStream_endOfBuffer < BIT_DStream_completed &&
+- BIT_DStream_completed < BIT_DStream_overflow);
+-
+ #if defined(__x86_64__)
+ __asm__(".p2align 6");
+ __asm__("nop");
+@@ -1587,73 +1657,70 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
+ # endif
+ #endif
+
+- for ( ; ; ) {
+- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
++ for ( ; nbSeq ; nbSeq--) {
++ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
+ #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
++ ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+ #endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+- if (UNLIKELY(!--nbSeq))
+- break;
+- BIT_reloadDStream(&(seqState.DStream));
+ }
+
+ /* check if reached exact end */
+- DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
+- RETURN_ERROR_IF(nbSeq, corruption_detected, "");
+- RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
++ assert(nbSeq == 0);
++ RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, "");
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+- { size_t const lastLLSize = litEnd - litPtr;
++ { size_t const lastLLSize = (size_t)(litEnd - litPtr);
++ DEBUGLOG(6, "copy last literals : %u", (U32)lastLLSize);
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+- }
+- }
++ } }
+
+- return op-ostart;
++ DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart));
++ return (size_t)(op - ostart);
+ }
+
+ static size_t
+ ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+- return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+
+ static size_t
+ ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+- return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+
+-FORCE_INLINE_TEMPLATE size_t
+-ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
++FORCE_INLINE_TEMPLATE
++
++size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
+ const BYTE* const prefixStart, const BYTE* const dictEnd)
+ {
+ prefetchPos += sequence.litLength;
+ { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
+- const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+- * No consequence though : memory address is only used for prefetching, not for dereferencing */
++ /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
++ * No consequence though : memory address is only used for prefetching, not for dereferencing */
++ const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, prefetchPos), sequence.offset);
+ PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ }
+ return prefetchPos + sequence.matchLength;
+@@ -1668,20 +1735,18 @@ ZSTD_decompressSequencesLong_body(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+- BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize;
++ BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* litBufferEnd = dctx->litBufferEnd;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+- (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+@@ -1706,20 +1771,17 @@ ZSTD_decompressSequencesLong_body(
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+ /* prepare in advance */
+- for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
+- seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
++ for (seqNb=0; seqNb<seqAdvance; seqNb++) {
++ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1);
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb] = sequence;
+ }
+- RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
+
+ /* decompress without stomping litBuffer */
+- for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb < nbSeq); seqNb++) {
+- seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+- size_t oneSeqSize;
++ for (; seqNb < nbSeq; seqNb++) {
++ seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1);
+
+- if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd)
+- {
++ if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) {
+ /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit)
+@@ -1732,26 +1794,26 @@ ZSTD_decompressSequencesLong_body(
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+- oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
++ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+ #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+- assert(!ZSTD_isError(oneSeqSize));
+- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
++ assert(!ZSTD_isError(oneSeqSize));
++ ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+ #endif
+- if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
++ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+
+- prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+- sequences[seqNb & STORED_SEQS_MASK] = sequence;
+- op += oneSeqSize;
+- }
++ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
++ sequences[seqNb & STORED_SEQS_MASK] = sequence;
++ op += oneSeqSize;
++ } }
+ else
+ {
+ /* lit buffer is either wholly contained in first or second split, or not split at all*/
+- oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
++ size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
+ ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
+ ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+ #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
++ ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+ #endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+
+@@ -1760,17 +1822,15 @@ ZSTD_decompressSequencesLong_body(
+ op += oneSeqSize;
+ }
+ }
+- RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
++ RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, "");
+
+ /* finish queue */
+ seqNb -= seqAdvance;
+ for ( ; seqNb<nbSeq ; seqNb++) {
+ seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]);
+- if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd)
+- {
++ if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) {
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+- if (leftoverLit)
+- {
++ if (leftoverLit) {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequence->litLength -= leftoverLit;
+@@ -1779,11 +1839,10 @@ ZSTD_decompressSequencesLong_body(
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+- {
+- size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
++ { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+ #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
++ ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+ #endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+@@ -1796,7 +1855,7 @@ ZSTD_decompressSequencesLong_body(
+ ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+ #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+- if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
++ ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+ #endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+@@ -1808,8 +1867,7 @@ ZSTD_decompressSequencesLong_body(
+ }
+
+ /* last literal segment */
+- if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */
+- {
++ if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */
+ size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
+ if (op != NULL) {
+@@ -1827,17 +1885,16 @@ ZSTD_decompressSequencesLong_body(
+ }
+ }
+
+- return op-ostart;
++ return (size_t)(op - ostart);
+ }
+
+ static size_t
+ ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+- return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+@@ -1851,20 +1908,18 @@ DONT_VECTORIZE
+ ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+- return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ static BMI2_TARGET_ATTRIBUTE size_t
+ DONT_VECTORIZE
+ ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+- return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+@@ -1873,10 +1928,9 @@ static BMI2_TARGET_ATTRIBUTE size_t
+ ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+- return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+@@ -1886,37 +1940,34 @@ typedef size_t (*ZSTD_decompressSequences_t)(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame);
++ const ZSTD_longOffset_e isLongOffset);
+
+ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+ static size_t
+ ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+ DEBUGLOG(5, "ZSTD_decompressSequences");
+ #if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+- return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ #endif
+- return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ static size_t
+ ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+ DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer");
+ #if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+- return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ #endif
+- return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+@@ -1931,69 +1982,114 @@ static size_t
+ ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+- const ZSTD_longOffset_e isLongOffset,
+- const int frame)
++ const ZSTD_longOffset_e isLongOffset)
+ {
+ DEBUGLOG(5, "ZSTD_decompressSequencesLong");
+ #if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+- return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
+ #endif
+- return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+ }
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
@@ -13548,8 +18332,9 @@ index c1913b8e7..9f5577e5b 100644
size_t
ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
-@@ -1980,20 +2055,21 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
- const void* src, size_t srcSize, const int frame, const streaming_operation streaming)
+ void* dst, size_t dstCapacity,
+- const void* src, size_t srcSize, const int frame, const streaming_operation streaming)
++ const void* src, size_t srcSize, const streaming_operation streaming)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
- /* isLongOffset must be true if there are long offsets.
@@ -13559,18 +18344,20 @@ index c1913b8e7..9f5577e5b 100644
- * (note: but it could be evaluated from current-lowLimit)
- */
- ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
- DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
-
+- DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+-
- RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
++ DEBUGLOG(5, "ZSTD_decompressBlock_internal (cSize : %u)", (unsigned)srcSize);
++
+ /* Note : the wording of the specification
-+ * allows compressed block to be sized exactly ZSTD_BLOCKSIZE_MAX.
++ * allows compressed block to be sized exactly ZSTD_blockSizeMax(dctx).
+ * This generally does not happen, as it makes little sense,
+ * since an uncompressed block would feature same size and have no decompression cost.
+ * Also, note that decoder from reference libzstd before < v1.5.4
+ * would consider this edge case as an error.
-+ * As a consequence, avoid generating compressed blocks of size ZSTD_BLOCKSIZE_MAX
++ * As a consequence, avoid generating compressed blocks of size ZSTD_blockSizeMax(dctx)
+ * for broader compatibility with the deployed ecosystem of zstd decoders */
-+ RETURN_ERROR_IF(srcSize > ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
++ RETURN_ERROR_IF(srcSize > ZSTD_blockSizeMax(dctx), srcSize_wrong, "");
/* Decode literals section */
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming);
@@ -13579,15 +18366,15 @@ index c1913b8e7..9f5577e5b 100644
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
-@@ -2001,6 +2077,23 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+@@ -2001,6 +2097,23 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
/* Build Decoding Tables */
{
+ /* Compute the maximum block size, which must also work when !frame and fParams are unset.
+ * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t.
+ */
-+ size_t const blockSizeMax = MIN(dstCapacity, (frame ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX));
-+ size_t const totalHistorySize = ZSTD_totalHistorySize((BYTE*)dst + blockSizeMax, (BYTE const*)dctx->virtualStart);
++ size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx));
++ size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((BYTE*)dst, blockSizeMax), (BYTE const*)dctx->virtualStart);
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than ZSTD_maxShortOffset().
+ * We don't expect that to be the case in 64-bit mode.
@@ -13603,7 +18390,7 @@ index c1913b8e7..9f5577e5b 100644
/* These macros control at build-time which decompressor implementation
* we use. If neither is defined, we do some inspection and dispatch at
* runtime.
-@@ -2008,6 +2101,11 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+@@ -2008,6 +2121,11 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
int usePrefetchDecoder = dctx->ddictIsCold;
@@ -13615,7 +18402,7 @@ index c1913b8e7..9f5577e5b 100644
#endif
int nbSeq;
size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
-@@ -2015,28 +2113,42 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+@@ -2015,40 +2133,55 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
ip += seqHSize;
srcSize -= seqHSize;
@@ -13663,13 +18450,29 @@ index c1913b8e7..9f5577e5b 100644
+ {
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
- return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+- return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
#endif
+ }
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
/* else */
-@@ -2060,9 +2172,9 @@ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
+ if (dctx->litBufferLocation == ZSTD_split)
+- return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+ else
+- return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
++ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+ #endif
+ }
+ }
+
+
++ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
+ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
+ {
+ if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
+@@ -2060,13 +2193,24 @@ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
}
@@ -13681,8 +18484,11 @@ index c1913b8e7..9f5577e5b 100644
+ const void* src, size_t srcSize)
{
size_t dSize;
++ dctx->isFrameDecompression = 0;
ZSTD_checkContinuity(dctx, dst, dstCapacity);
-@@ -2070,3 +2182,12 @@ size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+- dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming);
++ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming);
++ FORWARD_IF_ERROR(dSize, "");
dctx->previousDstEnd = (char*)dst + dSize;
return dSize;
}
@@ -13696,7 +18502,7 @@ index c1913b8e7..9f5577e5b 100644
+ return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize);
+}
diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
-index 3d2d57a5d..5888e6cc7 100644
+index 3d2d57a5d..becffbd89 100644
--- a/lib/zstd/decompress/zstd_decompress_block.h
+++ b/lib/zstd/decompress/zstd_decompress_block.h
@@ -1,5 +1,6 @@
@@ -13707,6 +18513,15 @@ index 3d2d57a5d..5888e6cc7 100644
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
+@@ -47,7 +48,7 @@ typedef enum {
+ */
+ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+- const void* src, size_t srcSize, const int frame, const streaming_operation streaming);
++ const void* src, size_t srcSize, const streaming_operation streaming);
+
+ /* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
@@ -64,5 +65,10 @@ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
unsigned tableLog, void* wksp, size_t wkspSize,
int bmi2);
@@ -13719,7 +18534,7 @@ index 3d2d57a5d..5888e6cc7 100644
#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
-index 98102edb6..32f79fb28 100644
+index 98102edb6..0f02526be 100644
--- a/lib/zstd/decompress/zstd_decompress_internal.h
+++ b/lib/zstd/decompress/zstd_decompress_internal.h
@@ -1,5 +1,6 @@
@@ -13745,11 +18560,20 @@ index 98102edb6..32f79fb28 100644
U32 rep[ZSTD_REP_NUM];
U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
} ZSTD_entropyDTables_t;
-@@ -164,6 +166,7 @@ struct ZSTD_DCtx_s
+@@ -152,6 +154,7 @@ struct ZSTD_DCtx_s
+ size_t litSize;
+ size_t rleSize;
+ size_t staticSize;
++ int isFrameDecompression;
+ #if DYNAMIC_BMI2 != 0
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+ #endif
+@@ -164,6 +167,8 @@ struct ZSTD_DCtx_s
ZSTD_dictUses_e dictUses;
ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */
ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
+ int disableHufAsm;
++ int maxBlockSizeParam;
/* streaming */
ZSTD_dStreamStage streamStage;
@@ -13820,3 +18644,70 @@ index f4ed952ed..7d31518e9 100644
}
EXPORT_SYMBOL(zstd_reset_dstream);
+--
+2.43.0.232.ge79552d197
+
+
+From 0fb615f8227aebe53e7aec5f9949a47ecb3d10f5 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Mon, 22 Jan 2024 16:27:56 -0800
+Subject: [PATCH 2/2] lib: zstd: Refactor intentional wrap-around test
+
+In an effort to separate intentional arithmetic wrap-around from
+unexpected wrap-around, we need to refactor places that depend on this
+kind of math. One of the most common code patterns of this is:
+
+ VAR + value < VAR
+
+Notably, this is considered "undefined behavior" for signed and pointer
+types, which the kernel works around by using the -fno-strict-overflow
+option in the build[1] (which used to just be -fwrapv). Regardless, we
+want to get the kernel source to the position where we can meaningfully
+instrument arithmetic wrap-around conditions and catch them when they
+are unexpected, regardless of whether they are signed[2], unsigned[3],
+or pointer[4] types.
+
+Switch to a more regular type for a 64-bit value and refactor the
+open-coded wrap-around addition test to use subtraction from the type max
+(since add_would_overflow() may not be defined in early boot code). This
+paves the way to enabling the wrap-around sanitizers in the future.
+
+Link: https://git.kernel.org/linus/68df3755e383e6fecf2354a67b08f92f18536594 [1]
+Link: https://github.com/KSPP/linux/issues/26 [2]
+Link: https://github.com/KSPP/linux/issues/27 [3]
+Link: https://github.com/KSPP/linux/issues/344 [4]
+Cc: Nick Terrell <terrelln@fb.com>
+Cc: Paul Jones <paul@pauljones.id.au>
+Cc: Sedat Dilek <sedat.dilek@gmail.com>
+Cc: Oleksandr Natalenko <oleksandr@natalenko.name>
+Cc: Xin Gao <gaoxin@cdjrlc.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+---
+ lib/zstd/decompress/zstd_decompress.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
+index b2bfa2b16..c9cbc45f6 100644
+--- a/lib/zstd/decompress/zstd_decompress.c
++++ b/lib/zstd/decompress/zstd_decompress.c
+@@ -618,7 +618,7 @@ size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity,
+ * @return : decompressed size of the frames contained */
+ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+ {
+- unsigned long long totalDstSize = 0;
++ U64 totalDstSize = 0;
+
+ while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
+ U32 const magicNumber = MEM_readLE32(src);
+@@ -636,7 +636,7 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+ { unsigned long long const fcs = ZSTD_getFrameContentSize(src, srcSize);
+ if (fcs >= ZSTD_CONTENTSIZE_ERROR) return fcs;
+
+- if (totalDstSize + fcs < totalDstSize)
++ if (U64_MAX - totalDstSize < fcs)
+ return ZSTD_CONTENTSIZE_ERROR; /* check for overflow */
+ totalDstSize += fcs;
+ }
+--
+2.43.0.232.ge79552d197
+
diff --git a/system/easy-kernel/1000-version.patch b/system/easy-kernel/1000-version.patch
index 9e24d3e19..85e45653d 100644
--- a/system/easy-kernel/1000-version.patch
+++ b/system/easy-kernel/1000-version.patch
@@ -4,10 +4,10 @@ diff -Naur linux-6.6/Makefile linux-6.6-branded/Makefile
@@ -2,8 +2,8 @@
VERSION = 6
PATCHLEVEL = 6
- SUBLEVEL = 6
+ SUBLEVEL = 58
-EXTRAVERSION =
--NAME = Hurr durr I'ma ninja sloth
-+EXTRAVERSION = -mc1
+-NAME = Pinguïn Aangedreven
++EXTRAVERSION = -mc2
+NAME = Hard Rock Hallelujah
# *DOCUMENTATION*
diff --git a/system/easy-kernel/APKBUILD b/system/easy-kernel/APKBUILD
index 0811d21ed..2dd8e2a67 100644
--- a/system/easy-kernel/APKBUILD
+++ b/system/easy-kernel/APKBUILD
@@ -2,9 +2,9 @@
# Maintainer: Adelie Platform Group <adelie-devel@lists.adelielinux.org>
# KEEP THIS IN SYNC with the other easy-kernel packages.
_kflavour=""
-_patchver=1 # must match 1000-version.patch
+_patchver=2 # must match 1000-version.patch
_pkgname=easy-kernel$_kflavour
-pkgver=6.6.6
+pkgver=6.6.58
pkgrel=0
pkgname=$_pkgname-$pkgver-mc$_patchver
pkgdesc="The Linux kernel, packaged for your convenience"
@@ -32,26 +32,27 @@ source="https://cdn.kernel.org/pub/linux/kernel/v${_pkgmajver}.x/linux-${_pkgmin
config-x86_64
kernel.h
- 0100-linux-6.6.6.patch
- 0120-XATTR_USER_PREFIX.patch
+ 0100-linux-6.6.58.patch
0122-link-security-restrictions.patch
0124-bluetooth-keysize-check.patch
0126-sign-file-libressl.patch
0200-x86-compile.patch
0202-parisc-disable-prctl.patch
- 0204-amd-deserialised-MSR-access.patch
- 0210-fix-powerbook6-5-audio.patch
+ 0204-sparc-warray-fix.patch
+ 0208-gcc14-objtool-fix.patch
0250-expose-per-process-ksm.patch
+ 0252-rectify-ksm-inheritance.patch
0260-reduce-swappiness.patch
0262-boot-order.patch
- 0300-tmp513-regression-fix.patch
- 0302-iwlwifi-rfkill-fix.patch
+ 0300-correct-max98388-includes.patch
+ 0302-i915-gcc14-fix.patch
+ 0304-fix-powerbook6-5-audio.patch
0400-reduce-pageblock-size-nonhugetlb.patch
0402-mm-optimise-slub.patch
0404-page-cache-not-found.patch
0500-print-fw-info.patch
0502-gcc9-kcflags.patch
- 0504-update-zstd-to-v1_5_5.patch
+ 0504-update-zstd-to-v1_5_6.patch
1000-version.patch
no-require-gnu-tar.patch
@@ -162,37 +163,38 @@ src() {
}
sha512sums="458b2c34d46206f9b4ccbac54cc57aeca1eaecaf831bc441e59701bac6eadffc17f6ce24af6eadd0454964e843186539ac0d63295ad2cc32d112b60360c39a35 linux-6.6.tar.xz
-ee0aa6868315bb41fd16418c4a9d1786eb6b9f946e77ed392f81e4cbe6d4c25ee60cbc345b9ba552c19a66eecc3ca1517014d1301f94cbad863c7e4e24e50ee0 config-aarch64
-84ade351c7fb6ff8772ee3f1179760cf79e2be450c5443cec30b86246f7c76bda59846484f29d0db0177a1db97ee8adda3e616708cbf949e04cd224ef358dadd config-armv7
-586357146131af4537141ae71cbb34d5511174dfd977fdfa31170b4ad19fe77dfc2e9da0301fc53871c84ab9766fc902cfdfd3cc5fc09a671cb70a41173b0fbf config-m68k
-9f65ddeea1bd843af4ec970e507c635d121be1d556d60963668aa61bb0bcc7041dccbff1304dcfc2d2b911dd6db4914770127a7358d15b01fa0c423a5fb964ea config-pmmx
-e196f2641984cc0305f80d9285a35dc47674983d756851b11af58e1a2bb488750016364766aedd46befc07697b3bb04a571ee960332864d54b39ca5cd0c2c568 config-ppc
-01727a368518ee61502fba15a468e6c3985fdd09848c8117e55d9f86c1f4df28bc84191af11e72e023cddb73c4915f6ec7cabd3a9c8f313ae88da29052e6401e config-ppc64
-da70ba6236b233d5b917ccc1e4f428056c45c3e0c7ce6953742b3b2886a0209c4a3b76699f15b1629a5d11a7c1c320bc9aef08ee7bfc5a5d3445c56078a38379 config-sparc64
-fafe1704c04db8ee85c2641e8aa1f32596d02e061fa9c57035957deaa95c409fd820f0edc840f7818ee2752503fd049d8115b202b068544324ac70c09764c779 config-x86_64
+c16e726450602b271802b74bcfced1e2373c21b7cea8108bb722f9b4abcad44ae6e135a26296d76ad869c554360d1e043d163934592e5899e4c9514f179ac094 config-aarch64
+237b7eff4ecab68600497761eb3716c158f1f3fc6e55f70559189cd15ecc381017cb0017a44c766eb016b50d091410e590312d5eaebebb2e6e13495f5602281b config-armv7
+af1495f11e7252ee65af2ce00b551b4715a9d663e1bf8cff1a1235ba922e7d0b9e90dcdacd469db1d7831d8580071f0f7fef24c89362afccde63058393d115f5 config-m68k
+6f8423960598667edbfc67fda03957018d5fa4842a73497d6e57f9af7c7c0ce1e890307e4663347f0864b4522f3c3b22c2de24a5b6840ec029d149d786626a67 config-pmmx
+a4a2e4579a33d2720fb141d83f5a6f1b1b8be701cfee12a479a9b63021974159df7d5984a730dfa8da2e8e761d506ab6f5cce1d72b99ee577582bac7d2348cb0 config-ppc
+60116d4f5c454539d3c86538611bbcd683e46352419bcea1bb0c3f4e08e0915dff3053f80f600b54656eb12fa1f6ca818424ef91f59f0301ce71a0dc4b49a86f config-ppc64
+17a07b7563acba1f5b99b9055198f5f93e7c65432ec46c35fa6c2b5307107ad9d4e8ffea9800f50cf8a8445b69561b9a95ba1ac8cd5bb2b6814dab540edb17d4 config-sparc64
+b2136813ddc4cd7e45ddcce5c5842baaa74918557f1a39621818f2f74392a273ebdc9dbdf79b8e401346731f50c857e24bad9448e1a164f8fd4b71464232bbfb config-x86_64
1ca1bc094a8106b33176ab6cfb55c9514e7d69f1ed30c580eee419a3c422a2e6625d71c4581702e911f36c4bbac175010f46b7123bb81fda3303d4decdc57287 kernel.h
-12236457a3b9d938fdf150b6d62ad4a4655e48e30493ea7c4d8c89b0aabb3a7045d25bd4f0fcf7c19f8ed13f6ad357e0a958a6fb6a7a4a0ff39c7cc61e34f048 0100-linux-6.6.6.patch
-1777c174839d9191ec8530c95ff990505473374723cabd00652c9be478a08c430ecaadab52c4cd0588c42e6991dcb26c5de246b3a0df3253935077476ffbdae6 0120-XATTR_USER_PREFIX.patch
+d0a0498c0200ff65e1feecf04dfad915fe4cc96bedaa6ed41b920d3385a67998649abc85254fd092d59bac756d67e93eabed4e909694fff13b12a1b7da7dddfd 0100-linux-6.6.58.patch
d333494e1a261175ab11d84ace49ad3dcb010614e61d0bfe1d39d7c330d1c0e0311699423fbec5841c9c6ff514f4f5b1e63072f0289450ac2883f1d3a80c2961 0122-link-security-restrictions.patch
dc47b18749d95a456f8bc47fd6a0618c286b646b38466c3d950dfbeb25adf3fc1a794e95552e4da1abb58e49f0bd841f7222e71c4d04cb0264ca23476ca9caef 0124-bluetooth-keysize-check.patch
79eaf814d76402a445efc961666a7c7c74207e552b0cb32d93d5cb828da580f7dbe93509dc9f53321c7844663205a8dce4e518ba047e4c57fc55f5c3498088ec 0126-sign-file-libressl.patch
8ee913a4187740457a2de64708edf757666c6a8a7f8ef30aaa8eee22290a30fa5d636d10de1fad334a30b4acdb733ffe556fb046d5d1769bde3b4e85906189d5 0200-x86-compile.patch
aa5636e778b053a65b739056f300bbc2056dabc215dc110ac35bf3ce0f1cd35d9eafb4db87e804993200fa2de4e84e6410d7d77794abbb446ef7fc83c22d3296 0202-parisc-disable-prctl.patch
-403d9145d858483fb55d2d6a7276170e808fb6936f0db46e63ae01a3ef2e4e6425b2db6d459415c15e15de495f37cec1be73ee98b070a49d9543bb864062e006 0204-amd-deserialised-MSR-access.patch
-05376bf6307dc26d64f6cf7ee809deb81caf8b3040cb80170844505e3d5b10220d51879f047304bea4bde7c001c1924ae52dfc01f292890dab7f06dfb10264d9 0210-fix-powerbook6-5-audio.patch
-dd8dcd068000c6946d4d077a3ca3fc7a18c9b2e579f36b4b7c3e54ad8cb37fa19dfff6d950379c93a70c6e19411882e5d2b496f560f331e0ba5f1f63c880ae37 0250-expose-per-process-ksm.patch
+c68d72ba638f04a35d484b996a0beb08fc4166ad6c5d261138097e5ed4851663fa013ec65995b59b0045e7c9db66c9efe8f168de4025e9f913b8d216134242cf 0204-sparc-warray-fix.patch
+8f3120486ec8bbe2c82efc56888a08f73c99573c420f2b6d6f13fe71981418164eddb63b7f60d551b72da64a7854ee738138fb0e5010d3d1b265c3c4b088d22b 0208-gcc14-objtool-fix.patch
+e74b043c4e5a590cef274e8b869c19159c7b0eeec7e2e382776526f042e7f13715a2b42c29305c299be22d2e81e373ef82c7ada3b6057d56df3d20580106cd2e 0250-expose-per-process-ksm.patch
+341f249092877d573a9f2dd32cc347138e9f684283a085ee413a9d5391640de3b8d3ff0f61aebb78f63a54217d28393fb3761c8a94abd4d0718458b102e06cac 0252-rectify-ksm-inheritance.patch
9450d34f0a0efa6b11c5c84b4f8aed6a9095e8737673cdc731fd4c6ff9afa0b3e75e3d38a2542f72279a8be16aba8e8d4fd47535ca947302ed68a98e724c71b6 0260-reduce-swappiness.patch
7796055bbbaf5eb421991cd2b9e7ea030e1da0484855a2405c0c71070246a83d04165f9b61e1a9cc45aea84bfe150da3441d7cd241a5dd3a06ea03dada1cfc37 0262-boot-order.patch
-4e637935c2f37cc18f347293e3c94b18f90e2caccca726304a95c4891257a5b2bb3093aee7a97571038b29c0c987cc60a9a80aefd0d4c9a063b33d102f03579e 0300-tmp513-regression-fix.patch
-bb70170e6c23cfee7e3abeecf3a238f9f6237b07ed3a1c44d83cdf812046c1ab56d15b79fd793a0c08eb93ffff9578f0bf33d482d98cd3b605eb6e3b33b81b94 0302-iwlwifi-rfkill-fix.patch
+70d8c777a9975e2677ace0a18452a63eb1117f24f738f905e2d0a3ba4a394046a45758d7f5509ef7de13a2ffc22aa1c65c21ff010ad6b550978044c76912a3f8 0300-correct-max98388-includes.patch
+865b463092bb824e4eb6dd38821f063d2906bafdafe807f1d4af17567f82681903975c35c732e6526230f64ce99562243baaba30c52b019c73bbae1b98e6359f 0302-i915-gcc14-fix.patch
+05376bf6307dc26d64f6cf7ee809deb81caf8b3040cb80170844505e3d5b10220d51879f047304bea4bde7c001c1924ae52dfc01f292890dab7f06dfb10264d9 0304-fix-powerbook6-5-audio.patch
34f7fc726caedabdf0cee350c8eb9fb84041388d537bb3cfcd143dcf89dca8b8a75dd2611896987d4ce31aa2935598b860ea93fc8bbd65ce47385f3468f7f650 0400-reduce-pageblock-size-nonhugetlb.patch
1d4391cf8f34e6898da303803961364474aa175b0a95dd13b284e88a9590c657083db78efb65d9fa9d40c22e0948da0bdc1063ee84ae0a699292a4d953d6bf65 0402-mm-optimise-slub.patch
882cc2a0692a3b4841a5814a1b552db6dab10e49528050f265d04b6d34fe517e2f3f6436de82f889b5471192f87ecfdff9ea142c651154622821579429519b4c 0404-page-cache-not-found.patch
dfd940af1d14e8a2cc1fcffbfa0fa8db52accf67cad3a7f435fc84047c945ba64dc3c45eb8dbbcc82715d8b8f2fbafa1e22b123158029016c5506cc134a7842c 0500-print-fw-info.patch
-fb5162bc01913278fe1eacb43bfbea7b6f1dbc186d40a9630d26666fb16720a411142edabefb8dd0918fdeb1a47486870b890c7347252773d6ec1109e0b01cb6 0502-gcc9-kcflags.patch
-a232b89428dbc367c6f8df90b294da4c9b040843947d4914f617c90d323dcceaa86522c148634de6795a7539f20bb5c310e52488e4ec0b9b40c46630a714ce0b 0504-update-zstd-to-v1_5_5.patch
-ed613bfbd99d74614882741db6b5d97db7032d67e53449f7132903240e4ae4a18876060a2cddd7406b5fe2cabfcfcbd26a95636a1aa941ed63c9fe2dc6fe0f78 1000-version.patch
-47540083d16e58bbceaf5a01349c0db982ca9f752980b40261eba4ec9ee3e33a3e73777d9a9a3e4c11cd614e54be6c66956e625e209f796d316d84d6afc03326 no-require-gnu-tar.patch
+13377ff9df08f93a9d1be62081e8724010b423c7b21a48fc540e963df393c0b9238ff552aa67fbec604b30ec5dc9e78e3b5d480662d5acb2a9cb5d9dec337c7d 0502-gcc9-kcflags.patch
+ad92d5e21e61811a1f27cc9595f9870ca6d3d22d008b78238a0223ac504aeb0f5c8574a1d38951cefde331ed18fe70c4b086078db55ae15a5a285f733c66235e 0504-update-zstd-to-v1_5_6.patch
+5e71f65d2f96fd3f7f531a8c8c37e5ffbaf921bbc54236eeb8b8d76f425cb53418e5d55e7c9567754d17eff4c40bb85b0b559c8d6ce0b45837bdf744be44880b 1000-version.patch
+5a60551ce859f591ffd772b37cb46be3194ac6ba7e08e2dddc6df967add96c5cf51d6a92aed3a7b6805dde691da5f7670bcbcc68ce1f51763f59f725cbc28f9d no-require-gnu-tar.patch
d3b9e580db6006d25297b2dc17c4dc97be35992f9a02dd3bc37afa7d8054f8828a5c2060a5ffbd8e540e9d8babdca369b2af4106961e74652687e53d5bc7887b no-require-lilo.patch
7bb07eb22002cc48caf0cd55d17ce4097aa583e0ca4048c11c92e1519761b2ae982ffe98311543d4b0dfc991c8bc411b2e1c7be9488b6c6f19ffaa08e69e2f47 no-autoload-fb.conf
b0e0634c84440a480be208da5157b03cb914790faab08fd3fdc2faeceed2c0a03d52c0e029084190708190f80028648923c4fd6feb11ec68ab4f740488161b0a mkimage-missing-not-fatal.patch"
diff --git a/system/easy-kernel/config-aarch64 b/system/easy-kernel/config-aarch64
index 862cbc2c0..33ffd2370 100644
--- a/system/easy-kernel/config-aarch64
+++ b/system/easy-kernel/config-aarch64
@@ -1,18 +1,21 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/arm64 6.6.6-mc1 Kernel Configuration
+# Linux/arm64 6.6.58-mc2 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
+CONFIG_CC_VERSION_TEXT="gcc (Adelie 13.3.0) 13.3.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=80500
+CONFIG_GCC_VERSION=130300
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=24100
+CONFIG_AS_VERSION=24200
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=24100
+CONFIG_LD_VERSION=24200
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
+CONFIG_TOOLS_SUPPORT_RELR=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
CONFIG_PAHOLE_VERSION=0
@@ -161,7 +164,8 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_CC_HAS_INT128=y
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
-CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_ARCH_SUPPORTS_INT128=y
CONFIG_NUMA_BALANCING=y
CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
@@ -302,6 +306,7 @@ CONFIG_FIX_EARLYCON_MEM=y
CONFIG_PGTABLE_LEVELS=4
CONFIG_ARCH_SUPPORTS_UPROBES=y
CONFIG_ARCH_PROC_KCORE_TEXT=y
+CONFIG_BROKEN_GAS_INST=y
CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y
#
@@ -364,7 +369,6 @@ CONFIG_ARM64_ERRATUM_834220=y
CONFIG_ARM64_ERRATUM_1742098=y
CONFIG_ARM64_ERRATUM_845719=y
CONFIG_ARM64_ERRATUM_843419=y
-CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y
CONFIG_ARM64_ERRATUM_1024718=y
CONFIG_ARM64_ERRATUM_1418040=y
CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y
@@ -386,7 +390,10 @@ CONFIG_ARM64_ERRATUM_2067961=y
CONFIG_ARM64_ERRATUM_2441009=y
CONFIG_ARM64_ERRATUM_2457168=y
CONFIG_ARM64_ERRATUM_2645198=y
+CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y
CONFIG_ARM64_ERRATUM_2966298=y
+CONFIG_ARM64_ERRATUM_3117295=y
+CONFIG_ARM64_ERRATUM_3194386=y
CONFIG_CAVIUM_ERRATUM_22375=y
CONFIG_CAVIUM_ERRATUM_23144=y
CONFIG_CAVIUM_ERRATUM_23154=y
@@ -460,16 +467,12 @@ CONFIG_CP15_BARRIER_EMULATION=y
#
CONFIG_ARM64_HW_AFDBM=y
CONFIG_ARM64_PAN=y
-CONFIG_AS_HAS_LSE_ATOMICS=y
-CONFIG_ARM64_LSE_ATOMICS=y
CONFIG_ARM64_USE_LSE_ATOMICS=y
# end of ARMv8.1 architectural features
#
# ARMv8.2 architectural features
#
-CONFIG_AS_HAS_ARMV8_2=y
-CONFIG_AS_HAS_SHA3=y
# CONFIG_ARM64_PMEM is not set
CONFIG_ARM64_RAS_EXTN=y
CONFIG_ARM64_CNP=y
@@ -479,28 +482,20 @@ CONFIG_ARM64_CNP=y
# ARMv8.3 architectural features
#
# CONFIG_ARM64_PTR_AUTH is not set
-CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y
-CONFIG_AS_HAS_ARMV8_3=y
CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y
-CONFIG_AS_HAS_LDAPR=y
# end of ARMv8.3 architectural features
#
# ARMv8.4 architectural features
#
CONFIG_ARM64_AMU_EXTN=y
-CONFIG_AS_HAS_ARMV8_4=y
-CONFIG_ARM64_TLB_RANGE=y
# end of ARMv8.4 architectural features
#
# ARMv8.5 architectural features
#
-CONFIG_AS_HAS_ARMV8_5=y
CONFIG_ARM64_BTI=y
CONFIG_ARM64_E0PD=y
-CONFIG_ARM64_AS_HAS_MTE=y
-CONFIG_ARM64_MTE=y
# end of ARMv8.5 architectural features
#
@@ -685,11 +680,11 @@ CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
# CONFIG_NVHE_EL2_DEBUG is not set
+CONFIG_CPU_MITIGATIONS=y
#
# General architecture-dependent options
#
-CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y
CONFIG_HOTPLUG_CORE_SYNC=y
CONFIG_HOTPLUG_CORE_SYNC_DEAD=y
CONFIG_KPROBES=y
@@ -786,6 +781,7 @@ CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
CONFIG_ARCH_USE_MEMREMAP_PROT=y
# CONFIG_LOCK_EVENT_COUNTS is not set
CONFIG_ARCH_HAS_RELR=y
+# CONFIG_RELR is not set
CONFIG_HAVE_PREEMPT_DYNAMIC=y
CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y
CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
@@ -949,11 +945,11 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set
# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
+# CONFIG_Z3FOLD_DEPRECATED is not set
CONFIG_ZSMALLOC=m
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_ZSMALLOC_CHAIN_SIZE=8
@@ -1001,6 +997,7 @@ CONFIG_MIGRATION=y
CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
CONFIG_ARCH_ENABLE_THP_MIGRATION=y
CONFIG_CONTIG_ALLOC=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_MMU_NOTIFIER=y
CONFIG_KSM=y
@@ -1032,8 +1029,6 @@ CONFIG_ARCH_HAS_PTE_DEVMAP=y
CONFIG_ARCH_HAS_ZONE_DMA_SET=y
CONFIG_ZONE_DMA=y
CONFIG_ZONE_DMA32=y
-CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
-CONFIG_ARCH_USES_PG_ARCH_X=y
CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_PERCPU_STATS is not set
# CONFIG_GUP_TEST is not set
@@ -1661,7 +1656,6 @@ CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_LE_L2CAP_ECRED=y
CONFIG_BT_LEDS=y
@@ -2773,6 +2767,7 @@ CONFIG_NET_VENDOR_MICROCHIP=y
# CONFIG_VCAP is not set
CONFIG_NET_VENDOR_MICROSEMI=y
CONFIG_NET_VENDOR_MICROSOFT=y
+# CONFIG_MICROSOFT_MANA is not set
CONFIG_NET_VENDOR_MYRI=y
# CONFIG_MYRI10GE is not set
# CONFIG_FEALNX is not set
@@ -2856,7 +2851,6 @@ CONFIG_DWMAC_SUNXI=m
CONFIG_DWMAC_SUN8I=m
# CONFIG_DWMAC_INTEL_PLAT is not set
# CONFIG_DWMAC_TEGRA is not set
-# CONFIG_DWMAC_LOONGSON is not set
# CONFIG_STMMAC_PCI is not set
CONFIG_NET_VENDOR_SUN=y
# CONFIG_HAPPYMEAL is not set
@@ -4413,7 +4407,6 @@ CONFIG_MTK_THERMAL=y
# end of Mediatek thermal drivers
CONFIG_AMLOGIC_THERMAL=y
-# CONFIG_TI_SOC_THERMAL is not set
#
# Samsung thermal drivers
@@ -4597,7 +4590,6 @@ CONFIG_MFD_SEC_CORE=y
# CONFIG_MFD_STMPE is not set
CONFIG_MFD_SUN6I_PRCM=y
CONFIG_MFD_SYSCON=y
-# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_MFD_LP3943 is not set
# CONFIG_MFD_LP8788 is not set
# CONFIG_MFD_TI_LMU is not set
@@ -5430,6 +5422,7 @@ CONFIG_MEDIA_TUNER_XC5000=m
# Graphics support
#
CONFIG_APERTURE_HELPERS=y
+CONFIG_SCREEN_INFO=y
CONFIG_VIDEO_CMDLINE=y
CONFIG_VIDEO_NOMODESET=y
# CONFIG_AUXDISPLAY is not set
@@ -5811,6 +5804,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y
CONFIG_FB_SYS_FOPS=y
CONFIG_FB_DEFERRED_IO=y
CONFIG_FB_DMAMEM_HELPERS=y
+CONFIG_FB_IOMEM_FOPS=y
CONFIG_FB_IOMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
@@ -6505,6 +6499,7 @@ CONFIG_HID_WIIMOTE=m
# CONFIG_HID_ZYDACRON is not set
# CONFIG_HID_SENSOR_HUB is not set
# CONFIG_HID_ALPS is not set
+# CONFIG_HID_MCP2200 is not set
# CONFIG_HID_MCP2221 is not set
# end of Special HID drivers
@@ -6886,9 +6881,6 @@ CONFIG_MMC_CQHCI=y
CONFIG_MMC_TOSHIBA_PCI=m
CONFIG_MMC_MTK=m
CONFIG_MMC_SDHCI_XENON=y
-CONFIG_MMC_SDHCI_OMAP=m
-CONFIG_MMC_SDHCI_AM654=m
-CONFIG_MMC_SDHCI_EXTERNAL_DMA=y
CONFIG_SCSI_UFSHCD=m
# CONFIG_SCSI_UFS_BSG is not set
# CONFIG_SCSI_UFS_HWMON is not set
@@ -7959,6 +7951,7 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
# CONFIG_AD7923 is not set
# CONFIG_AD7949 is not set
# CONFIG_AD799X is not set
+# CONFIG_AD9467 is not set
# CONFIG_ADI_AXI_ADC is not set
# CONFIG_AXP20X_ADC is not set
# CONFIG_AXP288_ADC is not set
@@ -8971,6 +8964,9 @@ CONFIG_KEYS=y
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
@@ -8998,9 +8994,16 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
#
# Memory initialization
#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
CONFIG_INIT_STACK_NONE=y
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
# end of Memory initialization
#
@@ -9031,6 +9034,7 @@ CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
CONFIG_CRYPTO_SIG2=y
CONFIG_CRYPTO_SKCIPHER=m
CONFIG_CRYPTO_SKCIPHER2=y
@@ -9440,7 +9444,6 @@ CONFIG_CMA_ALIGNMENT=8
# CONFIG_DMA_MAP_BENCHMARK is not set
CONFIG_SGL_ALLOC=y
CONFIG_CHECK_SIGNATURE=y
-# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -9529,6 +9532,8 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
# CONFIG_UBSAN is not set
CONFIG_HAVE_ARCH_KCSAN=y
+CONFIG_HAVE_KCSAN_COMPILER=y
+# CONFIG_KCSAN is not set
# end of Generic Kernel Debugging Instruments
#
@@ -9570,7 +9575,6 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_PER_CPU_MAPS is not set
CONFIG_HAVE_ARCH_KASAN=y
CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y
-CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y
CONFIG_HAVE_ARCH_KASAN_VMALLOC=y
CONFIG_CC_HAS_KASAN_GENERIC=y
CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
@@ -9642,8 +9646,6 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
-# CONFIG_DEBUG_CREDENTIALS is not set
-
#
# RCU Debugging
#
@@ -9694,6 +9696,7 @@ CONFIG_FUNCTION_ERROR_INJECTION=y
# CONFIG_FAULT_INJECTION is not set
CONFIG_ARCH_HAS_KCOV=y
CONFIG_CC_HAS_SANCOV_TRACE_PC=y
+# CONFIG_KCOV is not set
CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_TEST_DHRY is not set
# CONFIG_LKDTM is not set
diff --git a/system/easy-kernel/config-armv7 b/system/easy-kernel/config-armv7
index 8c82f3915..1d1a18ff3 100644
--- a/system/easy-kernel/config-armv7
+++ b/system/easy-kernel/config-armv7
@@ -1,16 +1,18 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/arm 6.6.6-mc1 Kernel Configuration
+# Linux/arm 6.6.58-mc2 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
+CONFIG_CC_VERSION_TEXT="gcc (Adelie 13.3.0) 13.3.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=80500
+CONFIG_GCC_VERSION=130300
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=24100
+CONFIG_AS_VERSION=24200
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=24100
+CONFIG_LD_VERSION=24200
CONFIG_LLD_VERSION=0
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_TOOLS_SUPPORT_RELR=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
@@ -159,7 +161,8 @@ CONFIG_GENERIC_SCHED_CLOCK=y
# end of Scheduler features
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
-CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_CGROUP_FAVOR_DYNMODS is not set
@@ -727,6 +730,8 @@ CONFIG_ARM_CPU_SUSPEND=y
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
# end of Power management options
+CONFIG_CPU_MITIGATIONS=y
+
#
# General architecture-dependent options
#
@@ -956,11 +961,11 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set
# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
+# CONFIG_Z3FOLD_DEPRECATED is not set
CONFIG_ZSMALLOC=m
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_ZSMALLOC_CHAIN_SIZE=8
@@ -997,6 +1002,7 @@ CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
CONFIG_PAGE_REPORTING=y
CONFIG_MIGRATION=y
CONFIG_CONTIG_ALLOC=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_BOUNCE=y
CONFIG_MMU_NOTIFIER=y
@@ -1652,7 +1658,6 @@ CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_LE_L2CAP_ECRED=y
CONFIG_BT_LEDS=y
@@ -2681,7 +2686,6 @@ CONFIG_DWMAC_SUNXI=y
CONFIG_DWMAC_SUN8I=y
CONFIG_DWMAC_IMX8=y
# CONFIG_DWMAC_INTEL_PLAT is not set
-# CONFIG_DWMAC_LOONGSON is not set
# CONFIG_STMMAC_PCI is not set
CONFIG_NET_VENDOR_SUN=y
# CONFIG_HAPPYMEAL is not set
@@ -5116,6 +5120,7 @@ CONFIG_DVB_SP2=m
# Graphics support
#
CONFIG_APERTURE_HELPERS=y
+CONFIG_SCREEN_INFO=y
CONFIG_VIDEO_CMDLINE=y
CONFIG_VIDEO_NOMODESET=y
# CONFIG_AUXDISPLAY is not set
@@ -5474,6 +5479,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y
CONFIG_FB_SYS_FOPS=y
CONFIG_FB_DEFERRED_IO=y
CONFIG_FB_DMAMEM_HELPERS=y
+CONFIG_FB_IOMEM_FOPS=y
CONFIG_FB_IOMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
@@ -6064,6 +6070,7 @@ CONFIG_HID_GENERIC=y
# CONFIG_HID_ZYDACRON is not set
# CONFIG_HID_SENSOR_HUB is not set
# CONFIG_HID_ALPS is not set
+# CONFIG_HID_MCP2200 is not set
# CONFIG_HID_MCP2221 is not set
# end of Special HID drivers
@@ -6448,7 +6455,6 @@ CONFIG_MMC_BCM2835=y
# CONFIG_MMC_MTK is not set
# CONFIG_MMC_SDHCI_XENON is not set
CONFIG_MMC_SDHCI_OMAP=y
-# CONFIG_MMC_SDHCI_AM654 is not set
CONFIG_MMC_SDHCI_EXTERNAL_DMA=y
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_MEMSTICK is not set
@@ -7284,6 +7290,7 @@ CONFIG_IIO_SW_TRIGGER=y
# CONFIG_AD7923 is not set
# CONFIG_AD7949 is not set
# CONFIG_AD799X is not set
+# CONFIG_AD9467 is not set
# CONFIG_ADI_AXI_ADC is not set
# CONFIG_AXP20X_ADC is not set
# CONFIG_AXP288_ADC is not set
@@ -8214,6 +8221,9 @@ CONFIG_KEYS=y
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_KEY_NOTIFICATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
@@ -8240,9 +8250,16 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
#
# Memory initialization
#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
CONFIG_INIT_STACK_NONE=y
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
# end of Memory initialization
#
@@ -8271,6 +8288,7 @@ CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
CONFIG_CRYPTO_SIG2=y
CONFIG_CRYPTO_SKCIPHER=y
CONFIG_CRYPTO_SKCIPHER2=y
@@ -8637,7 +8655,6 @@ CONFIG_CMA_ALIGNMENT=8
# CONFIG_DMA_API_DEBUG is not set
# CONFIG_DMA_MAP_BENCHMARK is not set
CONFIG_SGL_ALLOC=y
-# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -8720,6 +8737,7 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
# CONFIG_UBSAN is not set
+CONFIG_HAVE_KCSAN_COMPILER=y
# end of Generic Kernel Debugging Instruments
#
@@ -8823,8 +8841,6 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
-# CONFIG_DEBUG_CREDENTIALS is not set
-
#
# RCU Debugging
#
diff --git a/system/easy-kernel/config-m68k b/system/easy-kernel/config-m68k
index 6b4cb5055..be7848297 100644
--- a/system/easy-kernel/config-m68k
+++ b/system/easy-kernel/config-m68k
@@ -1,16 +1,18 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/m68k 6.6.6-mc1 Kernel Configuration
+# Linux/m68k 6.6.58-mc2 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
+CONFIG_CC_VERSION_TEXT="gcc (Adelie 13.3.0) 13.3.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=80500
+CONFIG_GCC_VERSION=130300
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=24100
+CONFIG_AS_VERSION=24200
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=24100
+CONFIG_LD_VERSION=24200
CONFIG_LLD_VERSION=0
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_TOOLS_SUPPORT_RELR=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
@@ -108,7 +110,8 @@ CONFIG_LOG_BUF_SHIFT=14
# end of Scheduler features
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
-CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_CGROUP_FAVOR_DYNMODS is not set
@@ -294,6 +297,8 @@ CONFIG_AMIGA_BUILTIN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
# end of Character devices
+CONFIG_CPU_MITIGATIONS=y
+
#
# General architecture-dependent options
#
@@ -464,11 +469,11 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set
# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
+# CONFIG_Z3FOLD_DEPRECATED is not set
CONFIG_ZSMALLOC=m
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_ZSMALLOC_CHAIN_SIZE=8
@@ -498,6 +503,7 @@ CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
CONFIG_PAGE_REPORTING=y
CONFIG_MIGRATION=y
CONFIG_CONTIG_ALLOC=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_NEED_PER_CPU_KM=y
@@ -1112,7 +1118,6 @@ CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_LE_L2CAP_ECRED=y
# CONFIG_BT_MSFTEXT is not set
@@ -2258,7 +2263,6 @@ CONFIG_BCMA_POSSIBLE=y
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SKY81452 is not set
# CONFIG_MFD_SYSCON is not set
-# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_MFD_LP3943 is not set
# CONFIG_MFD_TI_LMU is not set
# CONFIG_TPS6105X is not set
@@ -2395,6 +2399,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y
# CONFIG_FB_FOREIGN_ENDIAN is not set
CONFIG_FB_SYS_FOPS=y
CONFIG_FB_DEFERRED_IO=y
+CONFIG_FB_IOMEM_FOPS=y
CONFIG_FB_IOMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
@@ -3105,6 +3110,9 @@ CONFIG_KEYS=y
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
@@ -3130,9 +3138,16 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
#
# Memory initialization
#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
CONFIG_INIT_STACK_NONE=y
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
# end of Memory initialization
#
@@ -3161,6 +3176,7 @@ CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
CONFIG_CRYPTO_SIG2=y
CONFIG_CRYPTO_SKCIPHER=m
CONFIG_CRYPTO_SKCIPHER2=y
@@ -3451,6 +3467,7 @@ CONFIG_DMA_DIRECT_REMAP=y
# CONFIG_DMA_API_DEBUG is not set
# CONFIG_DMA_MAP_BENCHMARK is not set
CONFIG_SGL_ALLOC=y
+CONFIG_FORCE_NR_CPUS=y
CONFIG_DQL=y
CONFIG_GLOB=y
# CONFIG_GLOB_SELFTEST is not set
@@ -3523,6 +3540,7 @@ CONFIG_DEBUG_FS_ALLOW_ALL=y
# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set
# CONFIG_DEBUG_FS_ALLOW_NONE is not set
# CONFIG_UBSAN is not set
+CONFIG_HAVE_KCSAN_COMPILER=y
# end of Generic Kernel Debugging Instruments
#
@@ -3601,8 +3619,6 @@ CONFIG_HAVE_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
-# CONFIG_DEBUG_CREDENTIALS is not set
-
#
# RCU Debugging
#
diff --git a/system/easy-kernel/config-pmmx b/system/easy-kernel/config-pmmx
index 2f2bdfc4d..f836b9124 100644
--- a/system/easy-kernel/config-pmmx
+++ b/system/easy-kernel/config-pmmx
@@ -1,16 +1,18 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 6.6.6-mc1 Kernel Configuration
+# Linux/x86 6.6.58-mc2 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
+CONFIG_CC_VERSION_TEXT="gcc (Adelie 13.3.0) 13.3.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=80500
+CONFIG_GCC_VERSION=130300
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=24100
+CONFIG_AS_VERSION=24200
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=24100
+CONFIG_LD_VERSION=24200
CONFIG_LLD_VERSION=0
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_TOOLS_SUPPORT_RELR=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
@@ -170,7 +172,8 @@ CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
-CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_CGROUP_FAVOR_DYNMODS is not set
@@ -362,6 +365,8 @@ CONFIG_M586MMX=y
# CONFIG_MEXCAVATOR is not set
# CONFIG_MZEN is not set
# CONFIG_MZEN2 is not set
+# CONFIG_MZEN3 is not set
+# CONFIG_MZEN4 is not set
# CONFIG_MCRUSOE is not set
# CONFIG_MEFFICEON is not set
# CONFIG_MWINCHIPC6 is not set
@@ -388,6 +393,14 @@ CONFIG_M586MMX=y
# CONFIG_MCANNONLAKE is not set
# CONFIG_MICELAKE is not set
# CONFIG_MCASCADELAKE is not set
+# CONFIG_MCOOPERLAKE is not set
+# CONFIG_MTIGERLAKE is not set
+# CONFIG_MSAPPHIRERAPIDS is not set
+# CONFIG_MROCKETLAKE is not set
+# CONFIG_MALDERLAKE is not set
+# CONFIG_MRAPTORLAKE is not set
+# CONFIG_MMETEORLAKE is not set
+# CONFIG_MEMERALDRAPIDS is not set
# CONFIG_MNATIVE_INTEL is not set
# CONFIG_MNATIVE_AMD is not set
CONFIG_X86_GENERIC=y
@@ -494,6 +507,7 @@ CONFIG_HZ_100=y
CONFIG_HZ=100
CONFIG_SCHED_HRTICK=y
CONFIG_ARCH_SUPPORTS_KEXEC=y
+CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y
CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y
CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y
CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y
@@ -515,9 +529,11 @@ CONFIG_MODIFY_LDT_SYSCALL=y
CONFIG_CC_HAS_ENTRY_PADDING=y
CONFIG_FUNCTION_PADDING_CFI=11
CONFIG_FUNCTION_PADDING_BYTES=16
-CONFIG_SPECULATION_MITIGATIONS=y
+CONFIG_CPU_MITIGATIONS=y
CONFIG_RETPOLINE=y
# CONFIG_GDS_FORCE_MITIGATION is not set
+CONFIG_MITIGATION_RFDS=y
+CONFIG_MITIGATION_SPECTRE_BHI=y
#
# Power management and ACPI options
@@ -735,6 +751,7 @@ CONFIG_KVM_AMD=m
CONFIG_KVM_SMM=y
# CONFIG_KVM_XEN is not set
# CONFIG_KVM_PROVE_MMU is not set
+CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS=y
#
# General architecture-dependent options
@@ -1001,11 +1018,11 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set
# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
+# CONFIG_Z3FOLD_DEPRECATED is not set
CONFIG_ZSMALLOC=m
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_ZSMALLOC_CHAIN_SIZE=8
@@ -1043,6 +1060,7 @@ CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
CONFIG_PAGE_REPORTING=y
CONFIG_MIGRATION=y
CONFIG_CONTIG_ALLOC=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
CONFIG_BOUNCE=y
CONFIG_MMU_NOTIFIER=y
CONFIG_KSM=y
@@ -1684,7 +1702,6 @@ CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_LE_L2CAP_ECRED=y
CONFIG_BT_LEDS=y
@@ -2918,7 +2935,6 @@ CONFIG_STMMAC_PLATFORM=m
CONFIG_DWMAC_GENERIC=m
# CONFIG_DWMAC_INTEL_PLAT is not set
CONFIG_DWMAC_INTEL=m
-# CONFIG_DWMAC_LOONGSON is not set
CONFIG_STMMAC_PCI=m
CONFIG_NET_VENDOR_SUN=y
CONFIG_HAPPYMEAL=m
@@ -4526,7 +4542,6 @@ CONFIG_MFD_CORE=m
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SKY81452 is not set
CONFIG_MFD_SYSCON=y
-# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_MFD_LP3943 is not set
# CONFIG_MFD_TI_LMU is not set
# CONFIG_TPS6105X is not set
@@ -5517,6 +5532,7 @@ CONFIG_DVB_DUMMY_FE=m
# Graphics support
#
CONFIG_APERTURE_HELPERS=y
+CONFIG_SCREEN_INFO=y
CONFIG_VIDEO_CMDLINE=y
CONFIG_VIDEO_NOMODESET=y
# CONFIG_AUXDISPLAY is not set
@@ -5886,6 +5902,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y
CONFIG_FB_SYS_FOPS=y
CONFIG_FB_DEFERRED_IO=y
CONFIG_FB_DMAMEM_HELPERS=y
+CONFIG_FB_IOMEM_FOPS=y
CONFIG_FB_IOMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
@@ -6325,6 +6342,7 @@ CONFIG_HID_ZYDACRON=m
CONFIG_HID_SENSOR_HUB=m
CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
CONFIG_HID_ALPS=m
+# CONFIG_HID_MCP2200 is not set
# CONFIG_HID_MCP2221 is not set
# end of Special HID drivers
@@ -6681,8 +6699,6 @@ CONFIG_MMC_CQHCI=m
CONFIG_MMC_TOSHIBA_PCI=m
CONFIG_MMC_MTK=m
CONFIG_MMC_SDHCI_XENON=m
-# CONFIG_MMC_SDHCI_OMAP is not set
-# CONFIG_MMC_SDHCI_AM654 is not set
CONFIG_SCSI_UFSHCD=m
# CONFIG_SCSI_UFS_BSG is not set
# CONFIG_SCSI_UFS_HWMON is not set
@@ -7653,6 +7669,9 @@ CONFIG_KEYS=y
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
@@ -7681,9 +7700,16 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
#
# Memory initialization
#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
CONFIG_INIT_STACK_NONE=y
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
# end of Memory initialization
#
@@ -7712,6 +7738,7 @@ CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
CONFIG_CRYPTO_SIG2=y
CONFIG_CRYPTO_SKCIPHER=y
CONFIG_CRYPTO_SKCIPHER2=y
@@ -8027,7 +8054,6 @@ CONFIG_CMA_ALIGNMENT=8
# CONFIG_DMA_MAP_BENCHMARK is not set
CONFIG_SGL_ALLOC=y
CONFIG_CHECK_SIGNATURE=y
-# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -8091,6 +8117,7 @@ CONFIG_FRAME_WARN=2048
# CONFIG_HEADERS_INSTALL is not set
# CONFIG_DEBUG_SECTION_MISMATCH is not set
CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
CONFIG_FRAME_POINTER=y
# CONFIG_VMLINUX_MAP is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
@@ -8110,6 +8137,7 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
# CONFIG_UBSAN is not set
+CONFIG_HAVE_KCSAN_COMPILER=y
# end of Generic Kernel Debugging Instruments
#
@@ -8222,8 +8250,6 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
-# CONFIG_DEBUG_CREDENTIALS is not set
-
#
# RCU Debugging
#
diff --git a/system/easy-kernel/config-ppc b/system/easy-kernel/config-ppc
index 579156985..86965b861 100644
--- a/system/easy-kernel/config-ppc
+++ b/system/easy-kernel/config-ppc
@@ -1,16 +1,18 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 6.6.6-mc1 Kernel Configuration
+# Linux/powerpc 6.6.58-mc2 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
+CONFIG_CC_VERSION_TEXT="gcc (Adelie 13.3.0) 13.3.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=80500
+CONFIG_GCC_VERSION=130300
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=24100
+CONFIG_AS_VERSION=24200
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=24100
+CONFIG_LD_VERSION=24200
CONFIG_LLD_VERSION=0
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_TOOLS_SUPPORT_RELR=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
@@ -150,7 +152,8 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
# end of Scheduler features
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
-CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_CGROUP_FAVOR_DYNMODS is not set
@@ -444,6 +447,7 @@ CONFIG_HOTPLUG_CPU=y
# CONFIG_PPC_QUEUED_SPINLOCKS is not set
CONFIG_ARCH_CPU_PROBE_RELEASE=y
CONFIG_ARCH_SUPPORTS_KEXEC=y
+CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y
CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y
# CONFIG_IRQ_ALL_CPUS is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -511,6 +515,7 @@ CONFIG_TASK_SIZE=0xb0000000
CONFIG_VIRTUALIZATION=y
CONFIG_HAVE_LIVEPATCH=y
+CONFIG_CPU_MITIGATIONS=y
#
# General architecture-dependent options
@@ -750,11 +755,11 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set
# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
+# CONFIG_Z3FOLD_DEPRECATED is not set
CONFIG_ZSMALLOC=m
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_ZSMALLOC_CHAIN_SIZE=8
@@ -790,6 +795,7 @@ CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
CONFIG_PAGE_REPORTING=y
CONFIG_MIGRATION=y
CONFIG_CONTIG_ALLOC=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
CONFIG_BOUNCE=y
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -1417,7 +1423,6 @@ CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_LE_L2CAP_ECRED=y
CONFIG_BT_LEDS=y
@@ -3698,7 +3703,6 @@ CONFIG_BCMA_DRIVER_GMAC_CMN=y
# CONFIG_MFD_SKY81452 is not set
# CONFIG_MFD_STMPE is not set
# CONFIG_MFD_SYSCON is not set
-# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_MFD_LP3943 is not set
# CONFIG_MFD_LP8788 is not set
# CONFIG_MFD_TI_LMU is not set
@@ -4923,6 +4927,7 @@ CONFIG_FB_BOTH_ENDIAN=y
CONFIG_FB_SYS_FOPS=y
CONFIG_FB_DEFERRED_IO=y
CONFIG_FB_DMAMEM_HELPERS=y
+CONFIG_FB_IOMEM_FOPS=y
CONFIG_FB_IOMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
@@ -5285,6 +5290,7 @@ CONFIG_HID_WACOM=m
# CONFIG_HID_ZYDACRON is not set
# CONFIG_HID_SENSOR_HUB is not set
# CONFIG_HID_ALPS is not set
+# CONFIG_HID_MCP2200 is not set
# CONFIG_HID_MCP2221 is not set
# end of Special HID drivers
@@ -5549,8 +5555,6 @@ CONFIG_MMC_USDHI6ROL0=m
CONFIG_MMC_TOSHIBA_PCI=m
# CONFIG_MMC_MTK is not set
# CONFIG_MMC_SDHCI_XENON is not set
-# CONFIG_MMC_SDHCI_OMAP is not set
-# CONFIG_MMC_SDHCI_AM654 is not set
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -6330,6 +6334,9 @@ CONFIG_KEYS=y
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
@@ -6356,9 +6363,16 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
#
# Memory initialization
#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
CONFIG_INIT_STACK_NONE=y
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
# end of Memory initialization
#
@@ -6387,6 +6401,7 @@ CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
CONFIG_CRYPTO_SIG2=y
CONFIG_CRYPTO_SKCIPHER=y
CONFIG_CRYPTO_SKCIPHER2=y
@@ -6681,7 +6696,6 @@ CONFIG_ARCH_DMA_DEFAULT_COHERENT=y
# CONFIG_DMA_MAP_BENCHMARK is not set
CONFIG_SGL_ALLOC=y
CONFIG_CHECK_SIGNATURE=y
-# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -6779,6 +6793,8 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
# CONFIG_UBSAN is not set
CONFIG_HAVE_ARCH_KCSAN=y
+CONFIG_HAVE_KCSAN_COMPILER=y
+# CONFIG_KCSAN is not set
# end of Generic Kernel Debugging Instruments
#
@@ -6887,8 +6903,6 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
-# CONFIG_DEBUG_CREDENTIALS is not set
-
#
# RCU Debugging
#
diff --git a/system/easy-kernel/config-ppc64 b/system/easy-kernel/config-ppc64
index 961caff58..aa02a42fc 100644
--- a/system/easy-kernel/config-ppc64
+++ b/system/easy-kernel/config-ppc64
@@ -1,18 +1,20 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 6.6.6-mc1 Kernel Configuration
+# Linux/powerpc 6.6.58-mc2 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
+CONFIG_CC_VERSION_TEXT="gcc (Adelie 13.3.0) 13.3.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=80500
+CONFIG_GCC_VERSION=130300
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=24100
+CONFIG_AS_VERSION=24200
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=24100
+CONFIG_LD_VERSION=24200
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_TOOLS_SUPPORT_RELR=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
@@ -154,7 +156,8 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_CC_HAS_INT128=y
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
-CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
# CONFIG_NUMA_BALANCING is not set
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
@@ -286,7 +289,7 @@ CONFIG_POWERPC64_CPU=y
# CONFIG_TOOLCHAIN_DEFAULT_CPU is not set
CONFIG_TARGET_CPU_BOOL=y
CONFIG_TARGET_CPU="power4"
-CONFIG_TUNE_CPU="-mtune=power9"
+CONFIG_TUNE_CPU="-mtune=power10"
CONFIG_PPC_BOOK3S=y
CONFIG_PPC_FPU_REGS=y
CONFIG_PPC_FPU=y
@@ -313,6 +316,8 @@ CONFIG_CPU_BIG_ENDIAN=y
# CONFIG_CPU_LITTLE_ENDIAN is not set
CONFIG_PPC64_ELF_ABI_V2=y
CONFIG_CC_HAS_ELFV2=y
+CONFIG_CC_HAS_PREFIXED=y
+CONFIG_CC_HAS_PCREL=y
CONFIG_64BIT=y
CONFIG_MMU=y
CONFIG_ARCH_MMAP_RND_BITS_MAX=33
@@ -608,6 +613,7 @@ CONFIG_KVM_BOOK3S_64_PR=m
# CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND is not set
CONFIG_KVM_XICS=y
CONFIG_KVM_XIVE=y
+CONFIG_CPU_MITIGATIONS=y
#
# General architecture-dependent options
@@ -862,11 +868,11 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set
# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
+# CONFIG_Z3FOLD_DEPRECATED is not set
CONFIG_ZSMALLOC=m
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_ZSMALLOC_CHAIN_SIZE=8
@@ -919,6 +925,7 @@ CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
CONFIG_ARCH_ENABLE_THP_MIGRATION=y
CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y
CONFIG_CONTIG_ALLOC=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_MMU_NOTIFIER=y
CONFIG_KSM=y
@@ -1565,7 +1572,6 @@ CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_LE_L2CAP_ECRED=y
CONFIG_BT_LEDS=y
@@ -3708,7 +3714,6 @@ CONFIG_BCMA_DRIVER_PCI=y
# CONFIG_MFD_SKY81452 is not set
# CONFIG_MFD_STMPE is not set
CONFIG_MFD_SYSCON=y
-# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_MFD_LP3943 is not set
# CONFIG_MFD_LP8788 is not set
# CONFIG_MFD_TI_LMU is not set
@@ -4697,6 +4702,7 @@ CONFIG_FB_BOTH_ENDIAN=y
# CONFIG_FB_LITTLE_ENDIAN is not set
CONFIG_FB_SYS_FOPS=y
CONFIG_FB_DEFERRED_IO=y
+CONFIG_FB_IOMEM_FOPS=y
CONFIG_FB_IOMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
@@ -5298,8 +5304,6 @@ CONFIG_MMC_SDHCI_OF_HLWD=m
# CONFIG_MMC_HSQ is not set
# CONFIG_MMC_TOSHIBA_PCI is not set
# CONFIG_MMC_SDHCI_XENON is not set
-# CONFIG_MMC_SDHCI_OMAP is not set
-# CONFIG_MMC_SDHCI_AM654 is not set
CONFIG_SCSI_UFSHCD=m
# CONFIG_SCSI_UFS_BSG is not set
# CONFIG_SCSI_UFS_CRYPTO is not set
@@ -6097,6 +6101,9 @@ CONFIG_KEYS=y
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
@@ -6123,12 +6130,16 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
#
# Memory initialization
#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
CONFIG_INIT_STACK_NONE=y
-# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set
-# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set
-# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL is not set
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
# end of Memory initialization
#
@@ -6159,6 +6170,7 @@ CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
CONFIG_CRYPTO_SIG2=y
CONFIG_CRYPTO_SKCIPHER=y
CONFIG_CRYPTO_SKCIPHER2=y
@@ -6464,7 +6476,6 @@ CONFIG_SWIOTLB=y
CONFIG_SGL_ALLOC=y
CONFIG_IOMMU_HELPER=y
CONFIG_CHECK_SIGNATURE=y
-# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -6552,6 +6563,8 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
# CONFIG_UBSAN is not set
CONFIG_HAVE_ARCH_KCSAN=y
+CONFIG_HAVE_KCSAN_COMPILER=y
+# CONFIG_KCSAN is not set
# end of Generic Kernel Debugging Instruments
#
@@ -6659,8 +6672,6 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
-# CONFIG_DEBUG_CREDENTIALS is not set
-
#
# RCU Debugging
#
diff --git a/system/easy-kernel/config-sparc64 b/system/easy-kernel/config-sparc64
index 2aaf69cd8..d164de11b 100644
--- a/system/easy-kernel/config-sparc64
+++ b/system/easy-kernel/config-sparc64
@@ -1,18 +1,20 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/sparc64 6.6.6-mc1 Kernel Configuration
+# Linux/sparc64 6.6.58-mc2 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
+CONFIG_CC_VERSION_TEXT="gcc (Adelie 13.3.0) 13.3.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=80500
+CONFIG_GCC_VERSION=130300
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=24100
+CONFIG_AS_VERSION=24200
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=24100
+CONFIG_LD_VERSION=24200
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_TOOLS_SUPPORT_RELR=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
@@ -135,7 +137,8 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
CONFIG_CC_HAS_INT128=y
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
-CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_CGROUP_FAVOR_DYNMODS is not set
@@ -347,6 +350,8 @@ CONFIG_ENVCTRL=m
CONFIG_ORACLE_DAX=m
# end of Misc Linux/SPARC drivers
+CONFIG_CPU_MITIGATIONS=y
+
#
# General architecture-dependent options
#
@@ -544,11 +549,11 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set
# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
+# CONFIG_Z3FOLD_DEPRECATED is not set
CONFIG_ZSMALLOC=m
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_ZSMALLOC_CHAIN_SIZE=8
@@ -582,6 +587,7 @@ CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
CONFIG_PAGE_REPORTING=y
CONFIG_MIGRATION=y
CONFIG_CONTIG_ALLOC=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -1211,7 +1217,6 @@ CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_LE_L2CAP_ECRED=y
CONFIG_BT_LEDS=y
@@ -3448,7 +3453,6 @@ CONFIG_BCMA_DRIVER_GMAC_CMN=y
# CONFIG_MFD_SKY81452 is not set
# CONFIG_MFD_STMPE is not set
# CONFIG_MFD_SYSCON is not set
-# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_MFD_LP3943 is not set
# CONFIG_MFD_LP8788 is not set
# CONFIG_MFD_TI_LMU is not set
@@ -4674,6 +4678,7 @@ CONFIG_FB_BOTH_ENDIAN=y
CONFIG_FB_SYS_FOPS=y
CONFIG_FB_DEFERRED_IO=y
CONFIG_FB_DMAMEM_HELPERS=y
+CONFIG_FB_IOMEM_FOPS=y
CONFIG_FB_IOMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
@@ -5013,6 +5018,7 @@ CONFIG_HID_WACOM=m
# CONFIG_HID_ZYDACRON is not set
# CONFIG_HID_SENSOR_HUB is not set
# CONFIG_HID_ALPS is not set
+# CONFIG_HID_MCP2200 is not set
# CONFIG_HID_MCP2221 is not set
# end of Special HID drivers
@@ -5267,8 +5273,6 @@ CONFIG_MMC_USDHI6ROL0=m
CONFIG_MMC_TOSHIBA_PCI=m
# CONFIG_MMC_MTK is not set
# CONFIG_MMC_SDHCI_XENON is not set
-# CONFIG_MMC_SDHCI_OMAP is not set
-# CONFIG_MMC_SDHCI_AM654 is not set
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -6048,6 +6052,9 @@ CONFIG_KEYS=y
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
@@ -6073,9 +6080,16 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
#
# Memory initialization
#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
CONFIG_INIT_STACK_NONE=y
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
# end of Memory initialization
#
@@ -6104,6 +6118,7 @@ CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
CONFIG_CRYPTO_SIG2=y
CONFIG_CRYPTO_SKCIPHER=y
CONFIG_CRYPTO_SKCIPHER2=y
@@ -6405,7 +6420,6 @@ CONFIG_ARCH_DMA_ADDR_T_64BIT=y
CONFIG_SGL_ALLOC=y
CONFIG_IOMMU_HELPER=y
CONFIG_CHECK_SIGNATURE=y
-# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -6490,6 +6504,7 @@ CONFIG_DEBUG_FS_ALLOW_ALL=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
# CONFIG_UBSAN is not set
+CONFIG_HAVE_KCSAN_COMPILER=y
# end of Generic Kernel Debugging Instruments
#
@@ -6585,8 +6600,6 @@ CONFIG_HAVE_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
-# CONFIG_DEBUG_CREDENTIALS is not set
-
#
# RCU Debugging
#
diff --git a/system/easy-kernel/config-x86_64 b/system/easy-kernel/config-x86_64
index e3a4203fb..0148aba77 100644
--- a/system/easy-kernel/config-x86_64
+++ b/system/easy-kernel/config-x86_64
@@ -1,18 +1,20 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 6.6.6-mc1 Kernel Configuration
+# Linux/x86_64 6.6.58-mc2 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (Adelie 8.5.0) 8.5.0"
+CONFIG_CC_VERSION_TEXT="gcc (Adelie 13.3.0) 13.3.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=80500
+CONFIG_GCC_VERSION=130300
CONFIG_CLANG_VERSION=0
CONFIG_AS_IS_GNU=y
-CONFIG_AS_VERSION=24100
+CONFIG_AS_VERSION=24200
CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=24100
+CONFIG_LD_VERSION=24200
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
CONFIG_TOOLS_SUPPORT_RELR=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
@@ -178,7 +180,8 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_CC_HAS_INT128=y
CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
-CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
CONFIG_ARCH_SUPPORTS_INT128=y
# CONFIG_NUMA_BALANCING is not set
CONFIG_CGROUPS=y
@@ -383,6 +386,8 @@ CONFIG_INTEL_TDX_GUEST=y
# CONFIG_MEXCAVATOR is not set
# CONFIG_MZEN is not set
# CONFIG_MZEN2 is not set
+# CONFIG_MZEN3 is not set
+# CONFIG_MZEN4 is not set
# CONFIG_MPSC is not set
# CONFIG_MCORE2 is not set
# CONFIG_MATOM is not set
@@ -400,7 +405,18 @@ CONFIG_INTEL_TDX_GUEST=y
# CONFIG_MCANNONLAKE is not set
# CONFIG_MICELAKE is not set
# CONFIG_MCASCADELAKE is not set
+# CONFIG_MCOOPERLAKE is not set
+# CONFIG_MTIGERLAKE is not set
+# CONFIG_MSAPPHIRERAPIDS is not set
+# CONFIG_MROCKETLAKE is not set
+# CONFIG_MALDERLAKE is not set
+# CONFIG_MRAPTORLAKE is not set
+# CONFIG_MMETEORLAKE is not set
+# CONFIG_MEMERALDRAPIDS is not set
CONFIG_GENERIC_CPU=y
+# CONFIG_GENERIC_CPU2 is not set
+# CONFIG_GENERIC_CPU3 is not set
+# CONFIG_GENERIC_CPU4 is not set
# CONFIG_MNATIVE_INTEL is not set
# CONFIG_MNATIVE_AMD is not set
CONFIG_X86_INTERNODE_CACHE_SHIFT=6
@@ -534,12 +550,14 @@ CONFIG_HAVE_LIVEPATCH=y
CONFIG_CC_HAS_ENTRY_PADDING=y
CONFIG_FUNCTION_PADDING_CFI=11
CONFIG_FUNCTION_PADDING_BYTES=16
-CONFIG_SPECULATION_MITIGATIONS=y
+CONFIG_CPU_MITIGATIONS=y
CONFIG_PAGE_TABLE_ISOLATION=y
CONFIG_RETPOLINE=y
CONFIG_CPU_IBPB_ENTRY=y
CONFIG_CPU_IBRS_ENTRY=y
# CONFIG_GDS_FORCE_MITIGATION is not set
+CONFIG_MITIGATION_RFDS=y
+CONFIG_MITIGATION_SPECTRE_BHI=y
CONFIG_ARCH_HAS_ADD_PAGES=y
#
@@ -741,6 +759,7 @@ CONFIG_KVM_AMD=m
CONFIG_KVM_SMM=y
# CONFIG_KVM_XEN is not set
# CONFIG_KVM_PROVE_MMU is not set
+CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS=y
#
# General architecture-dependent options
@@ -1034,11 +1053,11 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
-# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set
+# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set
# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud"
CONFIG_ZBUD=y
-CONFIG_Z3FOLD=m
+# CONFIG_Z3FOLD_DEPRECATED is not set
CONFIG_ZSMALLOC=m
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_ZSMALLOC_CHAIN_SIZE=8
@@ -1088,6 +1107,7 @@ CONFIG_MIGRATION=y
CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
CONFIG_ARCH_ENABLE_THP_MIGRATION=y
CONFIG_CONTIG_ALLOC=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_MMU_NOTIFIER=y
CONFIG_KSM=y
@@ -1749,7 +1769,6 @@ CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_LE_L2CAP_ECRED=y
CONFIG_BT_LEDS=y
@@ -3032,7 +3051,6 @@ CONFIG_STMMAC_PLATFORM=m
CONFIG_DWMAC_GENERIC=m
# CONFIG_DWMAC_INTEL_PLAT is not set
CONFIG_DWMAC_INTEL=m
-# CONFIG_DWMAC_LOONGSON is not set
CONFIG_STMMAC_PCI=m
CONFIG_NET_VENDOR_SUN=y
CONFIG_HAPPYMEAL=m
@@ -4790,7 +4808,6 @@ CONFIG_MFD_INTEL_LPSS_PCI=m
# CONFIG_MFD_SKY81452 is not set
# CONFIG_MFD_STMPE is not set
CONFIG_MFD_SYSCON=y
-# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_MFD_LP3943 is not set
# CONFIG_MFD_TI_LMU is not set
# CONFIG_TPS6105X is not set
@@ -5832,6 +5849,7 @@ CONFIG_DVB_DUMMY_FE=m
# Graphics support
#
CONFIG_APERTURE_HELPERS=y
+CONFIG_SCREEN_INFO=y
CONFIG_VIDEO_CMDLINE=y
CONFIG_VIDEO_NOMODESET=y
# CONFIG_AUXDISPLAY is not set
@@ -6225,6 +6243,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y
CONFIG_FB_SYS_FOPS=y
CONFIG_FB_DEFERRED_IO=y
CONFIG_FB_DMAMEM_HELPERS=y
+CONFIG_FB_IOMEM_FOPS=y
CONFIG_FB_IOMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS=y
CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
@@ -6639,6 +6658,7 @@ CONFIG_HID_ZYDACRON=m
CONFIG_HID_SENSOR_HUB=m
CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
CONFIG_HID_ALPS=m
+# CONFIG_HID_MCP2200 is not set
CONFIG_HID_MCP2221=m
# end of Special HID drivers
@@ -7009,8 +7029,6 @@ CONFIG_MMC_CQHCI=m
CONFIG_MMC_TOSHIBA_PCI=m
CONFIG_MMC_MTK=m
CONFIG_MMC_SDHCI_XENON=m
-# CONFIG_MMC_SDHCI_OMAP is not set
-# CONFIG_MMC_SDHCI_AM654 is not set
CONFIG_SCSI_UFSHCD=m
# CONFIG_SCSI_UFS_BSG is not set
# CONFIG_SCSI_UFS_HWMON is not set
@@ -8163,6 +8181,9 @@ CONFIG_KEYS=y
# CONFIG_ENCRYPTED_KEYS is not set
# CONFIG_KEY_DH_OPERATIONS is not set
# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
@@ -8191,13 +8212,17 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity"
#
# Memory initialization
#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
CONFIG_INIT_STACK_NONE=y
-# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set
-# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set
-# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL is not set
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
# CONFIG_GCC_PLUGIN_STACKLEAK is not set
# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
# end of Memory initialization
#
@@ -8228,6 +8253,7 @@ CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
CONFIG_CRYPTO_SIG2=y
CONFIG_CRYPTO_SKCIPHER=y
CONFIG_CRYPTO_SKCIPHER2=y
@@ -8583,7 +8609,6 @@ CONFIG_CMA_ALIGNMENT=8
CONFIG_SGL_ALLOC=y
CONFIG_IOMMU_HELPER=y
CONFIG_CHECK_SIGNATURE=y
-# CONFIG_FORCE_NR_CPUS is not set
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
CONFIG_GLOB=y
@@ -8651,6 +8676,7 @@ CONFIG_FRAME_WARN=2048
# CONFIG_DEBUG_SECTION_MISMATCH is not set
CONFIG_SECTION_MISMATCH_WARN_ONLY=y
# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
CONFIG_FRAME_POINTER=y
CONFIG_OBJTOOL=y
# CONFIG_VMLINUX_MAP is not set
@@ -8672,6 +8698,8 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
# CONFIG_UBSAN is not set
CONFIG_HAVE_ARCH_KCSAN=y
+CONFIG_HAVE_KCSAN_COMPILER=y
+# CONFIG_KCSAN is not set
# end of Generic Kernel Debugging Instruments
#
@@ -8788,8 +8816,6 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_MAPLE_TREE is not set
# end of Debug kernel data structures
-# CONFIG_DEBUG_CREDENTIALS is not set
-
#
# RCU Debugging
#
diff --git a/system/easy-kernel/no-require-gnu-tar.patch b/system/easy-kernel/no-require-gnu-tar.patch
index 4d771a202..52a53a43b 100644
--- a/system/easy-kernel/no-require-gnu-tar.patch
+++ b/system/easy-kernel/no-require-gnu-tar.patch
@@ -1,9 +1,9 @@
--- a/kernel/gen_kheaders.sh 2021-10-31 20:53:10.000000000 +0000
+++ b/kernel/gen_kheaders.sh 2022-01-06 19:01:21.940000000 +0000
-@@ -85,6 +85,5 @@
+@@ -90,6 +90,5 @@
# Create archive and try to normalize metadata for reproducibility.
tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
-- --owner=0 --group=0 --sort=name --numeric-owner \
+- --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
-I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
echo $headers_md5 > kernel/kheaders.md5